diff --git a/.buckconfig b/.buckconfig index e2e0648b6598d..2bf687637bac5 100644 --- a/.buckconfig +++ b/.buckconfig @@ -1,16 +1,10 @@ -[repositories] -root = . -prelude = prelude -shim = shim +[cells] +gh_facebook_buck2 = . +gh_facebook_buck2_shims_meta = shim -[repository_aliases] -config = prelude -ovr_config = prelude -toolchains = shim -fbcode = shim -fbcode_macros = shim -fbsource = shim -buck = shim +[cell_aliases] +root = gh_facebook_buck2 -[parser] -target_platform_detector_spec = target:root//...->prelude//platforms:default target:shim//...->prelude//platforms:default +[oss] +internal_cell = fbcode +stripped_root_dirs = buck2 diff --git a/.buckconfig.d/common.buckconfig b/.buckconfig.d/common.buckconfig new file mode 100644 index 0000000000000..076a7ca03af40 --- /dev/null +++ b/.buckconfig.d/common.buckconfig @@ -0,0 +1,20 @@ +[cells] +prelude = prelude +none = none + +[cell_aliases] +config = prelude +ovr_config = prelude +bazel_skylib = gh_facebook_buck2_shims_meta +buck = gh_facebook_buck2_shims_meta +fbcode = gh_facebook_buck2_shims_meta +fbcode_macros = gh_facebook_buck2_shims_meta +fbsource = gh_facebook_buck2_shims_meta +shim = gh_facebook_buck2_shims_meta +toolchains = gh_facebook_buck2_shims_meta + +[external_cells] +prelude = bundled + +[parser] +target_platform_detector_spec = target:root//...->prelude//platforms:default target:shim//...->prelude//platforms:default diff --git a/.cargo/config.toml b/.cargo/config.toml index 30031963ace7d..37b7a62c3e144 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,10 +1,6 @@ [build] -# `cargo_internal_build` is set for cargo builds inside Meta. -# The flag enables features not available in opensource version, -# in particular, dependencies on libraries not available in open source. rustflags = [ -# @oss-disable: "--cfg", "cargo_internal_build", -"--cfg", "tokio_unstable", + "--cfg=tokio_unstable", ] # @oss-disable: [source.crates-io] diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 56db3652229ab..0000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,267 +0,0 @@ -commands: - print_versions: - description: Version Info - steps: - - run: - name: Version Info - command: | - rustup show - rustc --version - cargo --version - rustup --version - python3 --version - - init_opam: - description: Init Opam - steps: - - run: - name: Init opam - command: | - opam init --compiler=5.1.0 --disable-sandboxing -y - opam install menhir ppxlib -y - - run: - name: OCaml Configuration Info - command: | - eval $(opam env) - ocamlopt.opt -config - - run: - name: Set OCaml envs - command: | - echo 'eval $(opam env)' >> "$BASH_ENV" - - setup_linux_env: - description: Setup env for Linux - steps: - - run: sudo apt-get update - - run: sudo apt-get install libssl-dev cmake clang lld opam libzstd-dev - - run: - # the xlarge linux resource class has 8 CPUs, limit the number of jobs to 6 to avoid running out of resources - name: "Set CARGO_BUILD_JOBS=6 to limit the number of CPUs used" - command: echo 'export CARGO_BUILD_JOBS="6"' >> "$BASH_ENV" - - print_versions - - setup_macos_env: - description: Setup env for macOS - steps: - - run: - name: Install Rustup - command: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - - run: - name: Brew install - command: | - brew install cmake coreutils opam llvm protobuf zstd - # TODO: Remove once non intel macos platform is supported on https://github.com/stepancheg/rust-protoc-bin-vendored/ - echo 'export BUCK2_BUILD_PROTOC=/opt/homebrew/opt/protobuf/bin/protoc' >> "$BASH_ENV" - echo 'export BUCK2_BUILD_PROTOC_INCLUDE=/opt/homebrew/opt/protobuf/include' >> "$BASH_ENV" - - run: - # the xlarge linux resource class has 8 CPUs, limit the number of jobs to 6 to avoid running out of resources - name: "Set CARGO_BUILD_JOBS=6 to limit the number of CPUs used" - command: echo 'export CARGO_BUILD_JOBS="6"' >> "$BASH_ENV" - - run: - name: "Add LLVM to PATH" - command: | - echo 'export PATH=/usr/local/opt/llvm/bin:"$PATH"' >> "$BASH_ENV" - - - print_versions - - setup_windows_env: - description: Setup env for Windows - steps: - - run: - # Use Rust toolchain installed by Rustup and uninstall default one. - name: Install Rustup - command: | - choco uninstall -y rust - choco install -y rustup.install - write-output "[net]`ngit-fetch-with-cli = true" | out-file -append -encoding utf8 $Env:USERPROFILE/.cargo/config.toml - type $Env:USERPROFILE/.cargo/config.toml - - run: - name: Create python3 symlink - command: | - New-Item -ItemType SymbolicLink -Path C:\ProgramData\chocolatey\bin\python3.exe -Target $(Get-Command python).Source - - run: - name: Write Powershell profile - command: | - $psProfileContent = @' - $vsPath = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -requires Microsoft.VisualStudio.Component.VC.Llvm.Clang -property installationPath - $llvmPath = Join-Path $vsPath "VC\Tools\Llvm\x64\bin" - $env:PATH = "$env:USERPROFILE\.cargo\bin;$llvmPath;" + $env:PATH - $env:TEMP = "$env:USERPROFILE\temp" - $env:TMP = $env:TEMP - '@ - Add-Content "$PsHome\profile.ps1" $psProfileContent - New-Item -ItemType Directory -Path "$env:USERPROFILE\temp" - - print_versions - - setup_reindeer: - description: Install Reindeer - steps: - - run: - name: Install Reindeer - command: | - cargo install --locked --git https://github.com/facebookincubator/reindeer reindeer - reindeer --third-party-dir shim/third-party/rust buckify - - build_debug: - description: Build buck2 binary (debug) - steps: - - run: - name: Build buck2 binary (debug) - command: | - mkdir /tmp/artifacts - cargo build --bin=buck2 -Z unstable-options --out-dir=/tmp/artifacts - - build_release: - description: Build buck2 binary (release) - steps: - - run: - name: Build buck2 binary (release) - command: | - mkdir /tmp/artifacts - cargo build --bin=buck2 --release -Z unstable-options --out-dir=/tmp/artifacts - - run_test_py: - description: Run test.py - steps: - - run: - name: Run test.py - command: python3 test.py --ci --git --buck2=/tmp/artifacts/buck2 - - build_bootstrap: - description: Build `buck2` with `buck2` - steps: - - run: - name: Build `buck2` with `buck2` - command: | - /tmp/artifacts/buck2 build :buck2 -v 2 - - build_example_no_prelude: - description: Build example/no_prelude directory - steps: - - run: - name: Build example/no_prelude directory - command: | - cd examples/no_prelude - /tmp/artifacts/buck2 build //... -v 2 - - -version: 2.1 -orbs: - win: circleci/windows@5.0 -jobs: - linux-build-and-test: - description: | - Build and test all with cargo for Linux - docker: - - image: cimg/rust:1.65.0 - resource_class: xlarge - steps: - - checkout - - setup_linux_env - - build_debug - - run_test_py - - linux-build-examples: - description: Build example projects - docker: - - image: cimg/rust:1.65.0 - resource_class: xlarge - steps: - - checkout - - setup_linux_env - - init_opam - - build_release - - run: - name: Build example/prelude directory - command: | - cd examples/with_prelude - /tmp/artifacts/buck2 init - cp -r ../../prelude prelude - # Additional setup for ocaml - source ./ocaml-setup.sh - /tmp/artifacts/buck2 build //... -v 2 - /tmp/artifacts/buck2 test //... -v 2 - - build_example_no_prelude - - setup_reindeer - - build_bootstrap - - macos-build-and-test: - description: | - Build all with cargo for macOS - macos: - xcode: "14.2.0" - resource_class: macos.m1.medium.gen1 - steps: - - checkout - - setup_macos_env - - build_debug - - run_test_py - - macos-build-examples: - description: Build example projects - macos: - xcode: "14.2.0" - resource_class: macos.m1.medium.gen1 - steps: - - checkout - - setup_macos_env - - init_opam - - build_release - - run: - name: Build example/prelude directory - command: | - cd examples/with_prelude - /tmp/artifacts/buck2 init - cp -r ../../prelude prelude - # Additional setup for ocaml - source ./ocaml-setup.sh - /tmp/artifacts/buck2 build //... -v 2 - /tmp/artifacts/buck2 test //... -v 2 - - build_example_no_prelude - - setup_reindeer - - build_bootstrap - - windows-build-and-test: - description: | - Build and test all with cargo for Windows - executor: - name: win/default - size: "xlarge" - shell: powershell.exe - steps: - - checkout - - setup_windows_env - - build_debug - - run_test_py - - windows-build-examples: - description: Build example projects - executor: - name: win/default - size: "xlarge" - shell: powershell.exe - steps: - - checkout - - setup_windows_env - - build_release - - run: - name: Build example/prelude directory - command: | - cd examples/with_prelude - /tmp/artifacts/buck2 init - copy-item -Path $env:CIRCLE_WORKING_DIRECTORY\prelude -Destination prelude -Recurse - /tmp/artifacts/buck2 build //... -v 2 - /tmp/artifacts/buck2 test //... -v 2 - - build_example_no_prelude - - setup_reindeer - - build_bootstrap - -workflows: - build-and-test: - jobs: - - linux-build-and-test - - linux-build-examples - - macos-build-and-test - - macos-build-examples - - windows-build-and-test - - windows-build-examples diff --git a/.envrc b/.envrc index e31c30606cd3a..f41a01ce7a0e0 100644 --- a/.envrc +++ b/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.2.1; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.2.1/direnvrc" "sha256-zelF0vLbEl5uaqrfIzbgNzJWGmLzCmYAkInj/LNxvKs=" +if ! has nix_direnv_version || ! nix_direnv_version 3.0.5; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.5/direnvrc" "sha256-RuwIS+QKFj/T9M2TFXScjBsLR6V3A17YVoEW/Q6AZ1w=" fi use flake diff --git a/.github/actions/build_bootstrap/action.yml b/.github/actions/build_bootstrap/action.yml new file mode 100644 index 0000000000000..ac8d7bda87622 --- /dev/null +++ b/.github/actions/build_bootstrap/action.yml @@ -0,0 +1,7 @@ +name: build_bootstrap +runs: + using: composite + steps: + - name: Build `buck2` with `buck2` + run: "$RUNNER_TEMP/artifacts/buck2 build :buck2 -v 2" + shell: bash diff --git a/.github/actions/build_debug/action.yml b/.github/actions/build_debug/action.yml new file mode 100644 index 0000000000000..aa4c7dc0309ea --- /dev/null +++ b/.github/actions/build_debug/action.yml @@ -0,0 +1,10 @@ +name: build_debug +description: Build buck2 binary (debug) +runs: + using: composite + steps: + - name: Build buck2 binary (debug) + run: |- + mkdir $RUNNER_TEMP/artifacts + cargo build --bin=buck2 -Z unstable-options --out-dir=$RUNNER_TEMP/artifacts + shell: bash diff --git a/.github/actions/build_example_conan/action.yml b/.github/actions/build_example_conan/action.yml new file mode 100644 index 0000000000000..6213ab4a5176f --- /dev/null +++ b/.github/actions/build_example_conan/action.yml @@ -0,0 +1,14 @@ +name: build_example_conan +runs: + using: composite + steps: + - name: Build examples/toolchains/conan_toolchain + run: |- + cd examples/toolchains/conan_toolchain + $RUNNER_TEMP/artifacts/buck2 init + cp -r ../../../prelude prelude + # Generate Conan imports. TODO[AH] Make that unnecessary. + PATH="$RUNNER_TEMP/artifacts:$PATH" $RUNNER_TEMP/artifacts/buck2 run //cpp/conan:update -v 2 + $RUNNER_TEMP/artifacts/buck2 build //... -v 2 + $RUNNER_TEMP/artifacts/buck2 test //... -v 2 + shell: bash diff --git a/.github/actions/build_example_no_prelude/action.yml b/.github/actions/build_example_no_prelude/action.yml new file mode 100644 index 0000000000000..7f3e09a5effa8 --- /dev/null +++ b/.github/actions/build_example_no_prelude/action.yml @@ -0,0 +1,9 @@ +name: build_example_no_prelude +runs: + using: composite + steps: + - name: Build example/no_prelude directory + run: |- + cd examples/no_prelude + $RUNNER_TEMP/artifacts/buck2 build //... -v 2 + shell: bash diff --git a/.github/actions/build_release/action.yml b/.github/actions/build_release/action.yml new file mode 100644 index 0000000000000..d332608a454e4 --- /dev/null +++ b/.github/actions/build_release/action.yml @@ -0,0 +1,10 @@ +name: build_release +description: Build buck2 binary (release) +runs: + using: composite + steps: + - name: Build buck2 binary (release) + run: |- + mkdir $RUNNER_TEMP/artifacts + cargo build --bin=buck2 --release -Z unstable-options --out-dir=$RUNNER_TEMP/artifacts + shell: bash diff --git a/.github/actions/init_opam/action.yml b/.github/actions/init_opam/action.yml new file mode 100644 index 0000000000000..5693a2e5be864 --- /dev/null +++ b/.github/actions/init_opam/action.yml @@ -0,0 +1,13 @@ +name: init_opam +description: Setup OPAM +runs: + using: composite + steps: + - name: Initialize OPAM + run: | + opam init --compiler=5.1.1 --disable-sandboxing -y + opam env | sed -e "s/ export .*//g" -e "s/'//g" -e "s/\;//g" >> $GITHUB_ENV + shell: bash + - name: Install OPAM packages + run: opam install menhir ppxlib -y + shell: bash diff --git a/.github/actions/print_versions/action.yml b/.github/actions/print_versions/action.yml new file mode 100644 index 0000000000000..df442f55cb416 --- /dev/null +++ b/.github/actions/print_versions/action.yml @@ -0,0 +1,12 @@ +name: print_versions +runs: + using: composite + steps: + - name: Version Info + run: |- + rustup show + rustc --version + cargo --version + rustup --version + python3 --version + shell: bash diff --git a/.github/actions/publish_tag/action.yml b/.github/actions/publish_tag/action.yml index 4a9423e4b6b6d..d00819a324584 100644 --- a/.github/actions/publish_tag/action.yml +++ b/.github/actions/publish_tag/action.yml @@ -19,7 +19,7 @@ runs: - shell: bash run: | mkdir -p ${{github.workspace}}/artifacts - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: path: ${{github.workspace}}/artifacts - name: Display structure of downloaded files @@ -48,8 +48,9 @@ runs: - name: Post a summary shell: bash run: | + # GITHUB_REPOSITORY is used below to allow this action to work on forks cat <> $GITHUB_STEP_SUMMARY # `${{ inputs.tag }}` Release Complete! :rocket: For the public download links of these build artifacts, please see: - + EOF diff --git a/.github/actions/run_test_py/action.yml b/.github/actions/run_test_py/action.yml new file mode 100644 index 0000000000000..c1957417f59b0 --- /dev/null +++ b/.github/actions/run_test_py/action.yml @@ -0,0 +1,7 @@ +name: run_test_py +runs: + using: composite + steps: + - name: Run test.py + run: python3 test.py --ci --git --buck2=$RUNNER_TEMP/artifacts/buck2 + shell: bash diff --git a/.github/actions/setup_linux_env/action.yml b/.github/actions/setup_linux_env/action.yml new file mode 100644 index 0000000000000..08e9a0a8c3f7d --- /dev/null +++ b/.github/actions/setup_linux_env/action.yml @@ -0,0 +1,31 @@ +name: Setup Linux environment +description: Setup Linux environment +runs: + using: composite + steps: + - uses: SebRollen/toml-action@v1.0.2 + id: read_rust_toolchain + with: + file: rust-toolchain + field: toolchain.channel + - uses: dtolnay/rust-toolchain@v1 + with: + toolchain: ${{ steps.read_rust_toolchain.outputs.value }} + components: clippy + - uses: Swatinem/rust-cache@v2 + with: + prefix-key: buck2-upload + - run: sudo apt-get update + shell: bash + - run: sudo apt-get install opam libzstd-dev python3-pip + shell: bash + - uses: haskell-actions/setup@v2 + with: + ghc-version: '9.10.1' + - name: Install conan + run: sudo pip3 install conan==1.* + shell: bash + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '~1.22.0' diff --git a/.github/actions/setup_macos_env/action.yml b/.github/actions/setup_macos_env/action.yml new file mode 100644 index 0000000000000..8bdc4d04a1aaa --- /dev/null +++ b/.github/actions/setup_macos_env/action.yml @@ -0,0 +1,22 @@ +name: setup_macos_env +description: Setup macOS environment +runs: + using: composite + steps: + - name: Install Rustup + run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=none + shell: bash + - name: Brew install + run: brew install cmake python3 coreutils opam llvm protobuf zstd + shell: bash + - name: Install conan + run: sudo pip3 install --break-system-packages conan==1.* + shell: bash + - uses: "./.github/actions/print_versions" + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '~1.22.0' + - uses: haskell-actions/setup@v2 + with: + ghc-version: '9.10.1' diff --git a/.github/actions/setup_reindeer/action.yml b/.github/actions/setup_reindeer/action.yml new file mode 100644 index 0000000000000..5a5d1c217d09b --- /dev/null +++ b/.github/actions/setup_reindeer/action.yml @@ -0,0 +1,11 @@ +name: setup_reindeer +runs: + using: composite + steps: + - name: Install Reindeer + run: |- + cargo install --locked --git https://github.com/facebookincubator/reindeer reindeer + # Remove any dirty BUCK and Cargo.lock files + rm -f shim/third-party/rust/Cargo.lock shim/third-party/rust/BUCK + reindeer --third-party-dir shim/third-party/rust buckify + shell: bash diff --git a/.github/actions/setup_windows_env/action.yml b/.github/actions/setup_windows_env/action.yml new file mode 100644 index 0000000000000..4264009fdcb34 --- /dev/null +++ b/.github/actions/setup_windows_env/action.yml @@ -0,0 +1,31 @@ +name: setup_windows_env +description: Setup Windows environment for building and testing +runs: + using: composite + steps: + - name: Install Rustup + run: |- + choco install -y rustup.install + write-output "[net]`ngit-fetch-with-cli = true" | out-file -append -encoding utf8 $Env:USERPROFILE/.cargo/config.toml + type $Env:USERPROFILE/.cargo/config.toml + shell: pwsh + - name: Create python3 symlink + run: New-Item -ItemType SymbolicLink -Path C:\ProgramData\chocolatey\bin\python3.exe -Target $(Get-Command python).Source + shell: pwsh + - name: Write Powershell profile + run: |- + $psProfileContent = @' + $vsPath = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -requires Microsoft.VisualStudio.Component.VC.Llvm.Clang -property installationPath + $llvmPath = Join-Path $vsPath "VC\Tools\Llvm\x64\bin" + $env:PATH = "$env:USERPROFILE\.cargo\bin;$llvmPath;" + $env:PATH + $env:TEMP = "$env:USERPROFILE\temp" + $env:TMP = $env:TEMP + '@ + Add-Content "$PsHome\profile.ps1" $psProfileContent + New-Item -ItemType Directory -Path "$env:USERPROFILE\temp" + shell: pwsh + - uses: "./.github/actions/print_versions" + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '~1.22.0' diff --git a/.github/dotslash-config.json b/.github/dotslash-config.json new file mode 100644 index 0000000000000..f5b264efb1f02 --- /dev/null +++ b/.github/dotslash-config.json @@ -0,0 +1,53 @@ +{ + "exclude-github-release-provider": true, + "outputs": { + "buck2": { + "platforms": { + "macos-aarch64": { + "regex": "^buck2-aarch64-apple-darwin.zst$", + "path": "buck2-aarch64-apple-darwin" + }, + "linux-aarch64": { + "regex": "^buck2-aarch64-unknown-linux-musl.zst$", + "path": "buck2-aarch64-unknown-linux-musl" + }, + "macos-x86_64": { + "regex": "^buck2-x86_64-apple-darwin.zst$", + "path": "buck2-x86_64-apple-darwin" + }, + "windows-x86_64": { + "regex": "^buck2-x86_64-pc-windows-msvc.exe.zst$", + "path": "buck2-x86_64-pc-windows-msvc.exe" + }, + "linux-x86_64": { + "regex": "^buck2-x86_64-unknown-linux-musl.zst$", + "path": "buck2-x86_64-unknown-linux-musl" + } + } + }, + "rust-project": { + "platforms": { + "macos-aarch64": { + "regex": "^rust-project-aarch64-apple-darwin.zst$", + "path": "rust-project-aarch64-apple-darwin" + }, + "linux-aarch64": { + "regex": "^rust-project-aarch64-unknown-linux-musl.zst$", + "path": "rust-project-aarch64-unknown-linux-musl" + }, + "macos-x86_64": { + "regex": "^rust-project-x86_64-apple-darwin.zst$", + "path": "rust-project-x86_64-apple-darwin" + }, + "windows-x86_64": { + "regex": "^rust-project-x86_64-pc-windows-msvc.exe.zst$", + "path": "rust-project-x86_64-pc-windows-msvc.exe" + }, + "linux-x86_64": { + "regex": "^rust-project-x86_64-unknown-linux-musl.zst$", + "path": "rust-project-x86_64-unknown-linux-musl" + } + } + } + } +} diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml new file mode 100644 index 0000000000000..568e8535542ae --- /dev/null +++ b/.github/workflows/build-and-test.yml @@ -0,0 +1,93 @@ +name: Build and test +on: + push: + pull_request: +jobs: + linux-build-and-test: + runs-on: 4-core-ubuntu + steps: + - uses: actions/checkout@v4.1.0 + - uses: ./.github/actions/setup_linux_env + - uses: ./.github/actions/build_debug + - uses: ./.github/actions/run_test_py + macos-build-and-test: + runs-on: macos-latest + steps: + - uses: maxim-lobanov/setup-xcode@v1.6.0 + with: + xcode-version: 15.3.0 + - uses: actions/checkout@v4.1.0 + - uses: ./.github/actions/setup_macos_env + - uses: ./.github/actions/build_debug + - uses: ./.github/actions/run_test_py + windows-build-and-test: + runs-on: windows-latest + steps: + - uses: actions/checkout@v4.1.0 + - uses: ./.github/actions/setup_windows_env + - uses: ./.github/actions/build_debug + - uses: ./.github/actions/run_test_py + macos-build-examples: + runs-on: macos-latest + steps: + - uses: maxim-lobanov/setup-xcode@v1.6.0 + with: + xcode-version: 15.3.0 + - uses: actions/checkout@v4.1.0 + - uses: ./.github/actions/setup_macos_env + - uses: ./.github/actions/init_opam + - uses: ./.github/actions/build_release + - name: Setup the 'example/with_prelude' project + run: |- + cd examples/with_prelude + ./haskell-setup.sh + ./ocaml-setup.sh + - name: Build the 'example/with_prelude' project + run: |- + cd examples/with_prelude + $RUNNER_TEMP/artifacts/buck2 build //... -v 2 + $RUNNER_TEMP/artifacts/buck2 test //... -v 2 + - uses: ./.github/actions/build_example_no_prelude + - uses: ./.github/actions/setup_reindeer + - uses: ./.github/actions/build_bootstrap + linux-build-examples: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.1.0 + - uses: ./.github/actions/setup_linux_env + - uses: ./.github/actions/init_opam + - uses: ./.github/actions/build_release + - name: Setup the 'example/with_prelude' project + run: |- + cd examples/with_prelude + ./haskell-setup.sh + ./ocaml-setup.sh + - name: Build the 'example/with_prelude' project + run: |- + cd examples/with_prelude + $RUNNER_TEMP/artifacts/buck2 build //... -v 2 + $RUNNER_TEMP/artifacts/buck2 test //... -v 2 + - uses: ./.github/actions/build_example_conan + - uses: ./.github/actions/build_example_no_prelude + - uses: ./.github/actions/setup_reindeer + - uses: ./.github/actions/build_bootstrap + windows-build-examples: + runs-on: windows-latest + steps: + - uses: actions/checkout@v4.1.0 + - uses: ./.github/actions/setup_windows_env + - uses: ./.github/actions/build_release + - name: Build example/prelude directory + run: |- + cd examples/with_prelude + & $Env:RUNNER_TEMP/artifacts/buck2 build //... -v 2 + & $Env:RUNNER_TEMP/artifacts/buck2 test //... -v 2 + - uses: ./.github/actions/build_example_no_prelude + - name: Configure CARGO_HOME + run: |- + echo CARGO_HOME=$GITHUB_WORKSPACE/.cargo >> $GITHUB_ENV + echo $GITHUB_WORKSPACE/.cargo/bin >> $GITHUB_PATH + shell: + bash + - uses: ./.github/actions/setup_reindeer + - uses: ./.github/actions/build_bootstrap diff --git a/.github/workflows/upload_buck2.yml b/.github/workflows/upload_buck2.yml index a37ee3653819c..b8c43c442f83d 100644 --- a/.github/workflows/upload_buck2.yml +++ b/.github/workflows/upload_buck2.yml @@ -22,7 +22,7 @@ jobs: git rev-parse HEAD > ../artifacts/prelude_hash echo "prelude_hash=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" - name: Upload prelude_hash - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: path: artifacts/prelude_hash name: prelude_hash @@ -82,44 +82,51 @@ jobs: shell: bash run: | if [ -n "${{ matrix.target.is_windows }}" ]; then - echo "cargo_out=target/${{ matrix.target.triple }}/release/buck2.exe" >> "$GITHUB_OUTPUT" + echo "buck2_out=target/${{ matrix.target.triple }}/release/buck2.exe" >> "$GITHUB_OUTPUT" echo "buck2_zst=artifacts/buck2-${{ matrix.target.triple }}.exe.zst" >> "$GITHUB_OUTPUT" + echo "buck2_rust_project_out=target/${{ matrix.target.triple }}/release/rust-project.exe" >> "$GITHUB_OUTPUT" + echo "buck2_rust_project_zst=artifacts/rust-project-${{ matrix.target.triple }}.exe.zst" >> "$GITHUB_OUTPUT" else - echo "cargo_out=target/${{ matrix.target.triple }}/release/buck2" >> "$GITHUB_OUTPUT" + echo "buck2_out=target/${{ matrix.target.triple }}/release/buck2" >> "$GITHUB_OUTPUT" echo "buck2_zst=artifacts/buck2-${{ matrix.target.triple }}.zst" >> "$GITHUB_OUTPUT" + echo "buck2_rust_project_out=target/${{ matrix.target.triple }}/release/rust-project" >> "$GITHUB_OUTPUT" + echo "buck2_rust_project_zst=artifacts/rust-project-${{ matrix.target.triple }}.zst" >> "$GITHUB_OUTPUT" fi - name: Build shell: bash env: - RUSTFLAGS: "-C strip=debuginfo" + RUSTFLAGS: "-C strip=debuginfo -C codegen-units=1" run: | + # aarch64-linux builds need JEMALLOC_SYS_WITH_LG_PAGE=16 + # this is for e.g. linux running on apple silicon with native 16k pages + if [[ "${{ matrix.target.triple }}" == aarch64-unknown-linux* ]]; then + export JEMALLOC_SYS_WITH_LG_PAGE=16 + fi + if [ -n "${{ matrix.target.cross }}" ]; then CARGO=cross else CARGO=cargo fi - $CARGO build --release --bin buck2 --target ${{ matrix.target.triple }} + $CARGO build --release --bin buck2 --bin rust-project --target ${{ matrix.target.triple }} - name: Sanity check with examples/with_prelude if: ${{ !matrix.target.cross }} shell: bash run: | - BUCK2="$(pwd)/${{ steps.set_variables.outputs.cargo_out }}" + BUCK2="$(pwd)/${{ steps.set_variables.outputs.buck2_out }}" cd examples/with_prelude - "$BUCK2" init --git - cd prelude - git checkout ${{ needs.get_prelude_hash.outputs.prelude_hash }} - cd ../ "$BUCK2" build //rust/... //cpp/... //python/... -v=2 - name: Move binary to artifacts/ shell: bash run: | mkdir artifacts - zstd -z ${{ steps.set_variables.outputs.cargo_out }} -o ${{ steps.set_variables.outputs.buck2_zst }} + zstd -z ${{ steps.set_variables.outputs.buck2_out }} -o ${{ steps.set_variables.outputs.buck2_zst }} + zstd -z ${{ steps.set_variables.outputs.buck2_rust_project_out }} -o ${{ steps.set_variables.outputs.buck2_rust_project_zst }} - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: buck2-${{ matrix.target.triple }} - path: ${{ steps.set_variables.outputs.buck2_zst }} + path: artifacts/ release_latest: name: Release `latest` tag @@ -149,7 +156,9 @@ jobs: name: Store date information so we stay consistent between the steps run: | month=$(date +%Y-%m) - day=$(date +%d) + # The math below uses the day number "XX" as a number, but if it has a leading 0 and the + # second digit is larger than 7 this fails as it parses as an octal, so we use `sed` to fix: + day=$(date +%d | sed 's/^0*//') tag=$(date +%Y-%m-%d) echo "month=$month" >> "$GITHUB_OUTPUT" echo "day=$day" >> "$GITHUB_OUTPUT" @@ -158,7 +167,8 @@ jobs: - id: get_tags_count name: Count the number of tags already published for this month run: | - url="https://api.github.com/repos/facebook/buck2/tags" + # GITHUB_REPOSITORY is used to allow this action to work on forks + url="https://api.github.com/repos/$GITHUB_REPOSITORY/tags" curl --retry 5 -fsSL "$url" -o tags.txt tags=$(cat tags.txt | jq -r ".[].name") tags_count=$(echo "$tags" | grep -c "${{ steps.get_date.outputs.month }}" || true) @@ -194,6 +204,12 @@ jobs: with: tag: ${{ needs.check_for_bi_monthly_release.outputs.tag }} github_token: ${{ secrets.GITHUB_TOKEN }} + - uses: facebook/dotslash-publish-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + config: .github/dotslash-config.json + tag: ${{ needs.check_for_bi_monthly_release.outputs.tag }} build_docs_job: name: Publish buck2.build @@ -204,7 +220,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Download Buck2 - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: buck2-x86_64-unknown-linux-gnu path: artifacts diff --git a/.gitignore b/.gitignore index 8a01581b2a073..5ca9410a5c866 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,5 @@ buck-out # symlinks /examples/with_prelude/prelude /examples/with_prelude/third-party/ocaml/opam +/examples/with_prelude/third-party/haskell/ghc /examples/with_prelude/third-party/ocaml/standard_library diff --git a/.taplo.toml b/.taplo.toml new file mode 100644 index 0000000000000..465b33f5af70e --- /dev/null +++ b/.taplo.toml @@ -0,0 +1,19 @@ +include = [ + "*.toml", +] + +[formatting] +align_comments = false +array_auto_collapse = false +array_auto_expand = false +indent_string = " " +reorder_arrays = false +reorder_keys = true + +[[rule]] +keys = [ + "dependencies", + "target.*.dependencies", +] +[rule.formatting] +reorder_arrays = true diff --git a/.vscode/test_buckconfig.code-snippets b/.vscode/test_buckconfig.code-snippets new file mode 100644 index 0000000000000..105aa8e41464e --- /dev/null +++ b/.vscode/test_buckconfig.code-snippets @@ -0,0 +1,35 @@ +{ + "Nanoprelude buckconfig for core test": { + "prefix": "", + "body": [ + "[cells]", + " root = .", + " nano_prelude = nano_prelude", + "", + "[cell_aliases]", + " prelude = nano_prelude", + "", + "[external_cells]", + " nano_prelude = bundled", + "", + "[buildfile]", + " name = TARGETS.fixture", + "", + ], + "description": "Inserts a buckconfig that is suitable for use in a core test with the nanoprelude", + "isFileTemplate": true, + }, + "Buckconfig for core test": { + "prefix": "", + "body": [ + "[cells]", + " root = .", + "", + "[buildfile]", + " name = TARGETS.fixture", + "", + ], + "description": "Inserts a buckconfig that is suitable for use in a core test", + "isFileTemplate": true, + }, +} diff --git a/BUCK b/BUCK index 1c11f9f249683..1a1d8ec5b1d03 100644 --- a/BUCK +++ b/BUCK @@ -1,7 +1,6 @@ load("@fbcode//target_determinator/macros:ci.bzl", "ci") load("@fbcode_macros//build_defs:native_rules.bzl", "alias") -load("@fbsource//tools/build_defs/buck2:is_buck2.bzl", "is_buck2") -load(":defs.bzl?v2_only", "symlinked_buck2_and_tpx") +load(":defs.bzl", "buck2_bundle") oncall("build_infra") @@ -11,10 +10,16 @@ alias( labels = [ci.aarch64(ci.skip_test())], ) -# buildifier: disable=no-effect -symlinked_buck2_and_tpx( - name = "symlinked_buck2_and_tpx", +buck2_bundle( + name = "buck2_bundle", buck2 = "//buck2:buck2", - labels = [ci.skip_test(ci.windows(ci.opt()))], + buck2_client = "//buck2/app/buck2:buck2_client-bin", tpx = "//buck2/buck2_tpx_cli:buck2_tpx_cli", -) if is_buck2() else None + visibility = ["PUBLIC"], +) + +# For backcompat with bash aliases and so forth +alias( + name = "symlinked_buck2_and_tpx", + actual = ":buck2_bundle", +) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf9dd7dd72ced..72e48e52cbcab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,3 @@ # Buck2 -* Initial version. +- Initial version. diff --git a/Cargo.toml b/Cargo.toml index feb14d49f2664..4a56c9eff0be4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,12 +1,10 @@ -cargo-features = ['named-profiles'] - [workspace] -resolver = "2" members = [ "allocative/allocative", "allocative/allocative_derive", # @oss-disable: "attic/uniplate", # @oss-disable: "attic/uniplate_derive", + "app/bazel_event_publisher_proto", "app/buck2", "app/buck2_action_impl", "app/buck2_action_impl_tests", @@ -17,22 +15,34 @@ members = [ "app/buck2_audit_server", "app/buck2_bxl", "app/buck2_build_info", + "app/buck2_certs", "app/buck2_cfg_constructor", "app/buck2_client", "app/buck2_client_ctx", + "app/buck2_cmd_completion_client", + "app/buck2_cmd_docs", + "app/buck2_cmd_docs_server", "app/buck2_common", "app/buck2_configured", "app/buck2_core", "app/buck2_cli_proto", + "app/buck2_daemon", + "app/buck2_directory", "app/buck2_downward_api", "app/buck2_downward_api_proto", "app/buck2_error", "app/buck2_error_derive", + "app/buck2_error_tests", "app/buck2_event_observer", "app/buck2_events", + "app/buck2_event_log", + "app/buck2_event_publisher_proto", "app/buck2_execute", "app/buck2_execute_impl", + "app/buck2_external_cells", + "app/buck2_external_cells_bundled", "app/buck2_grpc", + "app/buck2_http", "app/buck2_install_proto", "app/buck2_interpreter", "app/buck2_interpreter_for_build", @@ -43,13 +53,15 @@ members = [ "app/buck2_node_tests", "app/buck2_offline_archive", "app/buck2_artifact", - "app/buck2_starlark", + "app/buck2_cmd_starlark_client", + "app/buck2_cmd_starlark_server", "app/buck2_test", "app/buck2_test_api", "app/buck2_test_proto", "app/buck2_test_runner", "app/buck2_forkserver", "app/buck2_forkserver_proto", + "app/buck2_futures", "app/buck2_profile", "app/buck2_protoc_dev", "app/buck2_query", @@ -66,8 +78,6 @@ members = [ "app/buck2_data", "app/buck2_worker_proto", "app/buck2_wrapper_common", - # @oss-disable: "buck2_tpx", - # @oss-disable: "buck2_tpx_cli", "app/buck2_build_api", "app/buck2_build_api_derive", "app/buck2_build_api_tests", @@ -76,19 +86,19 @@ members = [ "app/buck2_build_signals", "app/buck2_build_signals_impl", "app/buck2_eden", + "app/buck2_validation", "dice/dice", "dice/dice_examples", "dice/dice_tests", # @oss-disable: "dice/fuzzy_dice", - # @oss-disable: "dice_replay", + "dice/read_dump", + # @oss-disable: "facebook/ingress", # @oss-disable: "host_sharing", - # @oss-disable: "gazebo_lint/gazebo_lint", - # Uncomment to manually test linter_test - # "gazebo_lint/linter_test", "gazebo/display_container", "gazebo/cmp_any", "gazebo/gazebo", "gazebo/gazebo_derive", + "integrations/resources/rust", "integrations/rust-project", "remote_execution/oss/re_grpc", "remote_execution/oss/re_grpc_proto", @@ -98,14 +108,18 @@ members = [ "starlark-rust/starlark_lsp", "starlark-rust/starlark_map", "starlark-rust/starlark_syntax", - "shed/internment_tweaks", + "shed/static_interner", "shed/lock_free_hashtable", "shed/lock_free_vec", - "shed/more_futures", "shed/provider", "shed/three_billion_instructions", "superconsole", ] +resolver = "2" + +[workspace.package] +license = "MIT OR Apache-2.0" +repository = "https://github.com/facebook/buck2" [workspace.dependencies] anyhow = "1.0.65" @@ -114,23 +128,25 @@ arc-swap = "1.6.0" argfile = "0.1.0" assert_matches = "1.5" async-compression = { version = "0.4.1", features = ["tokio", "gzip", "zstd"] } -async-condvar-fair = { version = "0.2.2", features = ["parking_lot_0_11", "tokio"] } +async-condvar-fair = { version = "1.0", features = ["parking_lot_0_11", "tokio"] } async-recursion = "1.0" -async-scoped = { version = "0.7.1", features = ["use-tokio"] } +async-scoped = { version = "0.9", features = ["use-tokio"] } +async-stream = "0.3.6" async-trait = "0.1.24" atomic = "0.5.1" backtrace = "0.3.51" -base64 = "0.13.0" +base64 = "0.21.7" bincode = "1.3.3" -blake3 = { version = "1.3.1", features = [ "default", "digest", "rayon", "std", "traits-preview" ] } +blake3 = { version = "1.3.1", features = ["default", "digest", "rayon", "std", "traits-preview"] } bumpalo = "3.11.1" byteorder = "1.4.3" bytes = "1.0" bytesize = "1.1.0" -chrono = "0.4.28" -clap = { version = "3.2.24", features = ["derive", "env"] } +chrono = "0.4.38" +clap = { version = "4.5.6", features = ["derive", "env", "string"] } +clap_complete = "4.5.5" common-path = "1.0.0" -compact_str = "0.6.1" +compact_str = "0.8" constant_time_eq = "0.2.4" convert_case = "0.4.0" criterion = { version = "0.3.1", features = [] } @@ -140,10 +156,10 @@ crossbeam-epoch = "0.9.7" crossterm = "0.27" csv = "1.1" ctor = "0.1.16" -dashmap = "4.0.2" +dashmap = "5.5.3" debugserver-types = "0.5.0" derivative = "2.2" -derive_more = "0.99.3" +derive_more = { version = "1.0.0", features = ["full"] } digest = "0.10" dirs = "3.0.1" dunce = "1.0.2" @@ -154,47 +170,49 @@ env_logger = "0.9.0" equivalent = "1.0.0" erased-serde = "0.3.20" faccess = "0.2.3" -fancy-regex = "0.10.0" +fancy-regex = "0.14.0" flate2 = "1.0.22" -fnv = "1.0.7" fs4 = { version = "0.6", features = ["sync"] } futures = { version = "0.3.28", features = ["async-await", "compat"] } futures-intrusive = "0.4" +fxhash = "0.2.1" glob = "0.3.0" globset = "0.4.10" -hashbrown = { version = "0.12.3", features = ["raw"] } +hashbrown = { version = "0.14.3", features = ["raw"] } hex = "0.4.3" -higher-order-closure = "0.0.5" hostname = "0.3.1" http = "0.2" httparse = "1.7.1" httptest = "0.15" humantime = "2.0.1" hyper = { version = "0.14.26", features = ["client", "http1", "http2"] } -hyper-proxy = { git = "https://github.com/get9/hyper-proxy", rev = "205e9fee42d469444d654d9fa207897f4a77d5b6", features = ["rustls"], default_features = false } # branch = tokio-rustls-0.23 Many PRs to bump versions (#28, #30, #31) are several years old, possibly abandoned crate. This fork contains changes from #28 + changes to upgrade rustls to 0.21. +hyper-proxy = { git = "https://github.com/get9/hyper-proxy", rev = "205e9fee42d469444d654d9fa207897f4a77d5b6", features = ["rustls"], default-features = false } # branch = tokio-rustls-0.23 Many PRs to bump versions (#28, #30, #31) are several years old, possibly abandoned crate. This fork contains changes from #28 + changes to upgrade rustls to 0.21. hyper-rustls = { version = "0.24.0", features = ["http2"] } hyper-timeout = "0.4" hyper-unix-connector = "0.2" indent_write = "2.2.0" indenter = "0.3.3" -indexmap = { version = "1.9.1", features = ["serde-1"] } -indoc = "1.0.3" -inferno = { version = "0.11.11", default-features = false } -internment = { version = "0.7", features = ["arc"] } +indexmap = { version = "2.2.6", features = ["serde"] } +indoc = "2.0.2" +inferno = { version = "0.11.19", default-features = false } +internment = { version = "0.8", features = ["arc"] } inventory = "0.3.8" -ipnetwork = "0.15" +ipnetwork = "0.20.0" is_proc_translated = "0.1.1" -itertools = "0.10.3" +itertools = "0.13.0" jemallocator = { version = "0.5.0", features = ["profiling"] } lalrpop = { version = "0.19.7", artifact = "bin" } lalrpop-util = "0.19.7" libc = "0.2.147" linked-hash-map = { version = "0.5", features = ["serde_impl"] } +linkme = { version = "0.3.17", features = ["used_linker"] } log = "0.4" logos = "0.12" +lru = "0.12.3" lsp-server = "0.7.2" lsp-types = "0.94.1" maplit = "1.0.2" +mappable-rc = { version = "0.1.1", features = ["std"] } memchr = "2.4.1" memmap2 = "0.5.0" memoffset = "0.6.4" @@ -208,9 +226,10 @@ num_cpus = "1.11" num_enum = "0.5" object = "0.29.0" once_cell = "1.8" -os_str_bytes = "6.0" +os_str_bytes = { version = "6.6.0", features = ["conversions"] } parking_lot = { version = "0.11.2", features = ["send_guard"] } paste = "1.0" +pathdiff = "0.2" perf-event = "0.4" perf-event-open-sys = "4.0" pin-project = "0.4.29" @@ -231,17 +250,18 @@ ref-cast = "1.0.0" regex = "1.5.4" relative-path = { version = "1.7.0", features = ["serde"] } rusqlite = { version = "0.29.0", features = ["bundled"] } -rustls = "0.21.0" +rustc-hash = { version = "1.1" } +rustls = "0.21.5" rustls-native-certs = { package = "rustls-native-certs", version = "0.6.2" } rustls-pemfile = { package = "rustls-pemfile", version = "1.0.0" } -rustyline = "11.0" +rustyline = "14.0" scopeguard = "1.0.0" sequence_trie = "0.3.6" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.48" +serde = { version = "1.0", features = ["derive", "rc"] } +serde_json = { version = "1.0.48", features = ["raw_value"] } sha1 = "0.10" sha2 = "0.10" -shlex = "1.0" +shlex = "1.3" siphasher = "0.3.3" slab = "0.4.7" slog = "2.7.0" @@ -249,14 +269,15 @@ smallvec = { version = "1.10", features = ["const_generics", "const_new", "serde static_assertions = "1.1.0" strsim = "0.10.0" structopt = "0.3.23" +strum = { version = "0.26.2", features = ["derive", "strum_macros"] } syn = { version = "2", features = ["extra-traits", "full", "visit"] } sync_wrapper = "0.1.0" sys-info = "0.9.1" -sysinfo = "0.26.8" +sysinfo = "0.30.11" take_mut = "0.2.2" tar = "0.4.38" tempfile = "3.1.0" -termimad = "0.20.1" +termimad = "0.30" termios = "0.3" termwiz = "0.18" test-case = "3.1.0" @@ -274,112 +295,129 @@ tower = "0.4" tower-layer = "0.3.1" tower-service = "0.3.2" tracing = "0.1.22" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } -triomphe = "0.1.8" +tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } +triomphe = "0.1.11" trybuild = "1.0.56" twox-hash = "1.6.1" +typed-arena = "2.0" unicode-segmentation = "1.7" uuid = { version = "1.2", features = ["v4"] } walkdir = "2.3.2" which = "4.3.0" -winapi = { version = "0.3", features = ["everything"] } +winapi = { version = "0.3", features = ["everything", "std"] } +x509-parser = "0.14.0" xattr = "0.2.2" zip = "0.5" -zstd = "0.11.2" +zstd = "0.13.0" -fbinit.version = "0.1" # @oss-disable: fbinit.path = "../common/rust/shed/fbinit" -sorted_vector_map.version = "0.1" +fbinit.version = "0.2" # @oss-disable: sorted_vector_map.path = "../common/rust/shed/sorted_vector_map" +sorted_vector_map.version = "0.2" -allocative.version = "0.3" -allocative.path = "allocative/allocative" allocative.features = ["anyhow", "bumpalo", "dashmap", "either", "futures", "hashbrown", "indexmap", "num-bigint", "once_cell", "parking_lot", "prost-types", "relative-path", "serde_json", "slab", "smallvec", "compact_str", "sorted_vector_map", "tokio", "triomphe"] -gazebo.version = "0.8.1" -gazebo.features = ["str_pattern_extensions"] -gazebo.path = "gazebo/gazebo" +allocative.path = "allocative/allocative" +allocative.version = "0.3.1" cmp_any = { path = "gazebo/cmp_any" } -dupe.version = "0.9.0" -dupe.path = "gazebo/dupe" dice = { path = "dice/dice" } display_container = { path = "gazebo/display_container" } +dupe.path = "gazebo/dupe" +dupe.version = "0.9.0" +gazebo.features = ["str_pattern_extensions"] +gazebo.path = "gazebo/gazebo" +gazebo.version = "0.8.1" host_sharing = { path = "host_sharing" } -more_futures = { path = "shed/more_futures" } -lock_free_hashtable = { path = "shed/lock_free_hashtable" } +lock_free_hashtable = { version = "0.1.0", path = "shed/lock_free_hashtable" } lock_free_vec = { path = "shed/lock_free_vec" } provider = { path = "shed/provider" } remote_execution = { path = "remote_execution/oss/re_grpc" } -starlark = { version = "0.9.0", path = "starlark-rust/starlark" } -starlark_lsp = { version = "0.9.0", path = "starlark-rust/starlark_lsp" } -starlark_map = { version = "0.9.0", path = "starlark-rust/starlark_map" } -starlark_syntax = { version = "0.9.0", path = "starlark-rust/starlark_syntax" } +starlark = { version = "0.12.0", path = "starlark-rust/starlark" } +starlark_lsp = { version = "0.12.0", path = "starlark-rust/starlark_lsp" } +starlark_map = { version = "0.12.0", path = "starlark-rust/starlark_map" } +starlark_syntax = { version = "0.12.0", path = "starlark-rust/starlark_syntax" } +static_interner = { path = "shed/static_interner" } +three_billion_instructions = { path = "shed/three_billion_instructions" } + +bazel_event_publisher_proto = { path = "app/bazel_event_publisher_proto" } buck2_action_impl = { path = "app/buck2_action_impl" } buck2_action_metadata_proto = { path = "app/buck2_action_metadata_proto" } buck2_analysis = { path = "app/buck2_analysis" } buck2_anon_target = { path = "app/buck2_anon_target" } -buck2_bxl = { path = "app/buck2_bxl" } +buck2_artifact = { path = "app/buck2_artifact" } +buck2_audit = { path = "app/buck2_audit" } +buck2_audit_server = { path = "app/buck2_audit_server" } +buck2_build_api = { path = "app/buck2_build_api" } +buck2_build_api_derive = { path = "app/buck2_build_api_derive" } buck2_build_info = { path = "app/buck2_build_info" } +buck2_build_signals = { path = "app/buck2_build_signals" } +buck2_build_signals_impl = { path = "app/buck2_build_signals_impl" } +buck2_bxl = { path = "app/buck2_bxl" } +buck2_certs = { path = "app/buck2_certs" } buck2_cfg_constructor = { path = "app/buck2_cfg_constructor" } +buck2_cli_proto = { path = "app/buck2_cli_proto" } +buck2_client = { path = "app/buck2_client" } buck2_client_ctx = { path = "app/buck2_client_ctx" } +buck2_cmd_completion_client = { path = "app/buck2_cmd_completion_client" } +buck2_cmd_docs = { path = "app/buck2_cmd_docs" } +buck2_cmd_docs_server = { path = "app/buck2_cmd_docs_server" } +buck2_cmd_starlark_client = { path = "app/buck2_cmd_starlark_client" } +buck2_cmd_starlark_server = { path = "app/buck2_cmd_starlark_server" } buck2_common = { path = "app/buck2_common" } buck2_configured = { path = "app/buck2_configured" } buck2_core = { path = "app/buck2_core" } +buck2_critical_path = { path = "app/buck2_critical_path" } +buck2_daemon = { path = "app/buck2_daemon" } +buck2_data = { path = "app/buck2_data" } +buck2_directory = { path = "app/buck2_directory" } buck2_downward_api = { path = "app/buck2_downward_api" } buck2_downward_api_proto = { path = "app/buck2_downward_api_proto" } -buck2_grpc = { path = "app/buck2_grpc" } -buck2_interpreter_for_build = { path = "app/buck2_interpreter_for_build" } -buck2_test = { path = "app/buck2_test" } -buck2_test_api = { path = "app/buck2_test_api" } -buck2_test_proto = { path = "app/buck2_test_proto" } -buck2_test_runner = { path = "app/buck2_test_runner" } +buck2_eden = { path = "app/buck2_eden" } +buck2_error = { path = "app/buck2_error" } +buck2_error_derive = { path = "app/buck2_error_derive" } +buck2_event_log = { path = "app/buck2_event_log" } +buck2_event_observer = { path = "app/buck2_event_observer" } +buck2_event_publisher_proto = { path = "app/buck2_event_publisher_proto" } +buck2_events = { path = "app/buck2_events" } +buck2_execute = { path = "app/buck2_execute" } +buck2_execute_impl = { path = "app/buck2_execute_impl" } +buck2_external_cells = { path = "app/buck2_external_cells" } +buck2_external_cells_bundled = { path = "app/buck2_external_cells_bundled" } buck2_file_watcher = { path = "app/buck2_file_watcher" } buck2_forkserver = { path = "app/buck2_forkserver" } buck2_forkserver_proto = { path = "app/buck2_forkserver_proto" } -buck2_profile = { path = "app/buck2_profile" } -buck2_protoc_dev = { path = "app/buck2_protoc_dev" } -buck2_query_parser = { path = "app/buck2_query_parser" } -buck2_query_derive = { path = "app/buck2_query_derive" } -buck2_starlark = { path = "app/buck2_starlark" } -buck2_audit = { path = "app/buck2_audit" } -buck2_audit_server = { path = "app/buck2_audit_server" } -buck2_cli_proto = { path = "app/buck2_cli_proto" } -buck2_data = { path = "app/buck2_data" } -buck2_event_observer = { path = "app/buck2_event_observer" } -buck2_events = { path = "app/buck2_events" } -buck2_query = { path = "app/buck2_query" } -buck2_query_impls = { path = "app/buck2_query_impls" } -buck2_tpx = { path = "buck2_tpx" } -buck2_tpx_cli = { path = "buck2_tpx_cli" } -buck2_build_api = { path = "app/buck2_build_api" } -buck2_build_api_derive = { path = "app/buck2_build_api_derive" } -buck2_client = { path = "app/buck2_client" } -buck2_install_proto = {path = "app/buck2_install_proto" } +buck2_futures = { path = "app/buck2_futures" } +buck2_grpc = { path = "app/buck2_grpc" } +buck2_http = { path = "app/buck2_http" } +buck2_install_proto = { path = "app/buck2_install_proto" } buck2_interpreter = { path = "app/buck2_interpreter" } +buck2_interpreter_for_build = { path = "app/buck2_interpreter_for_build" } buck2_interpreter_for_build_tests = { path = "app/buck2_interpreter_for_build_tests" } buck2_miniperf = { path = "app/buck2_miniperf" } buck2_miniperf_proto = { path = "app/buck2_miniperf_proto" } buck2_node = { path = "app/buck2_node" } buck2_offline_archive = { path = "app/buck2_offline_archive" } -buck2_artifact = { path = "app/buck2_artifact" } -buck2_error = { path = "app/buck2_error" } -buck2_error_derive = { path = "app/buck2_error_derive" } -buck2_execute = { path = "app/buck2_execute" } -buck2_execute_impl = { path = "app/buck2_execute_impl" } +buck2_profile = { path = "app/buck2_profile" } +buck2_protoc_dev = { path = "app/buck2_protoc_dev" } +buck2_query = { path = "app/buck2_query" } +buck2_query_derive = { path = "app/buck2_query_derive" } +buck2_query_impls = { path = "app/buck2_query_impls" } +buck2_query_parser = { path = "app/buck2_query_parser" } +buck2_re_configuration = { path = "app/buck2_re_configuration" } buck2_server = { path = "app/buck2_server" } buck2_server_commands = { path = "app/buck2_server_commands" } buck2_server_ctx = { path = "app/buck2_server_ctx" } buck2_server_starlark_debug = { path = "app/buck2_server_starlark_debug" } -buck2_util = { path = "app/buck2_util" } -buck2_re_configuration = { path = "app/buck2_re_configuration" } buck2_subscription_proto = { path = "app/buck2_subscription_proto" } +buck2_test = { path = "app/buck2_test" } +buck2_test_api = { path = "app/buck2_test_api" } +buck2_test_proto = { path = "app/buck2_test_proto" } +buck2_test_runner = { path = "app/buck2_test_runner" } buck2_transition = { path = "app/buck2_transition" } +buck2_util = { path = "app/buck2_util" } +buck2_validation = { path = "app/buck2_validation" } buck2_worker_proto = { path = "app/buck2_worker_proto" } buck2_wrapper_common = { path = "app/buck2_wrapper_common" } -buck2_critical_path = { path = "app/buck2_critical_path" } -buck2_build_signals = { path = "app/buck2_build_signals" } -buck2_build_signals_impl = { path = "app/buck2_build_signals_impl" } -buck2_eden = { path = "app/buck2_eden" } [profile.release] incremental = true @@ -390,16 +428,16 @@ panic = "abort" # Use with: --profile=release-symbols -Z unstable-options [profile.release-symbols] +debug = true incremental = true inherits = "release" -debug = true [profile.dev] -incremental = true debug = true -panic = "abort" -opt-level = 1 +incremental = true lto = "off" +opt-level = 1 +panic = "abort" [profile.test] incremental = true diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 0000000000000..2a7689e71c9a9 --- /dev/null +++ b/Cross.toml @@ -0,0 +1,2 @@ +[build.env] +passthrough = ["JEMALLOC_SYS_WITH_LG_PAGE"] diff --git a/HACKING.md b/HACKING.md index b1d73bd0f1e4c..40768643956ae 100644 --- a/HACKING.md +++ b/HACKING.md @@ -1,7 +1,8 @@ # Tips and tricks for hacking on Buck2 You might have been lead here by reading [CONTRIBUTING.md](/CONTRIBUTING.md). If -not, please read that as well! That will give you the high level overview; this document is all about the needed elbow grease you'll have to apply. +not, please read that as well! That will give you the high level overview; this +document is all about the needed elbow grease you'll have to apply. ## Building the code @@ -21,8 +22,8 @@ cargo install --path=app/buck2 Or, alternatively, install it directly from GitHub: ```sh -rustup install nightly-2023-07-10 -cargo +nightly-2023-07-10 install --git https://github.com/facebook/buck2.git buck2 +rustup install nightly-2024-07-21 +cargo +nightly-2024-07-21 install --git https://github.com/facebook/buck2.git buck2 ``` ### Side note: using [Nix] to compile the source @@ -110,8 +111,8 @@ have written. Some rules: ### Error messages - Names (of variables, targets, files, etc) should be quoted with backticks, - e.g. ``Variable `x` not defined``. -- Lists should use square brackets, e.g. ``Available targets: [`aa`, `bb`]``. + e.g. `` Variable `x` not defined ``. +- Lists should use square brackets, e.g. `` Available targets: [`aa`, `bb`] ``. - Error messages should start with an upper case letter. Error messages should not end with a period. @@ -120,10 +121,10 @@ have written. Some rules: Most code is shared as-is between open source and the internal Meta version of Buck2. However, there are some exceptions: -* The open-source remote execution client is different, because our internal - one works with custom servers/infrastructure that is not publicly available. -* There are places controlled with `is_open_source()` which change configuration +- The open-source remote execution client is different, because our internal one + works with custom servers/infrastructure that is not publicly available. +- There are places controlled with `is_open_source()` which change configuration between the internal and open source versions. -* Some places use `@oss-enable` or `@oss-disable` to comment/uncomment lines - of code. The internal code is visible, but the comment markers are moved - during export/import of code. +- Some places use `@oss-enable` or `@oss-disable` to comment/uncomment lines of + code. The internal code is visible, but the comment markers are moved during + export/import of code. diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000000000..588a56f89cb26 --- /dev/null +++ b/NOTICE @@ -0,0 +1,4 @@ +The code in `app/buck2_error_derive` is adapted from thiserror (https://github.com/dtolnay/thiserror). + +The code in `shed/completion_verify/src/runtime` is adapted from `complete_pty` +(https://crates.io/crates/completest_pty). diff --git a/README.md b/README.md index dedf7eab75b63..80e11ec69a21b 100644 --- a/README.md +++ b/README.md @@ -2,17 +2,18 @@ # Buck2: fast multi-language build system -![Version] -![License] -[![Build Status]][CI] +![Version] ![License] [![Build Status]][CI] -[Version]: https://img.shields.io/badge/release-unstable,%20"Developer%20Edition"-orange.svg -[License]: https://img.shields.io/badge/license-MIT%20OR%20Apache--2.0-blueviolet.svg -[Build Status]: https://img.shields.io/circleci/build/github/facebook/buck2 -[CI]: https://app.circleci.com/pipelines/github/facebook/buck2 +[Version]: + https://img.shields.io/badge/release-unstable,%20"Developer%20Edition"-orange.svg +[License]: + https://img.shields.io/badge/license-MIT%20OR%20Apache--2.0-blueviolet.svg +[Build Status]: + https://github.com/facebook/buck2/actions/workflows/build-and-test.yml/badge.svg +[CI]: https://github.com/facebook/buck2/actions/workflows/build-and-test.yml - Homepage  •  Getting Started  •  Contributing + Homepage  •  Getting Started  •  Contributing --- @@ -31,13 +32,13 @@ already exist? complete, or 0.1 seconds: when you have to build things, Buck2 doesn't waste time — it calculates the critical path and gets out of the way, with minimal overhead. It's not just the core design, but also careful attention to - detail that makes Buck2 so snappy. Buck2 is up to 2x faster than Buck1 *in - practice*[^perf-note]. So you spend more time iterating, and less time + detail that makes Buck2 so snappy. Buck2 is up to 2x faster than Buck1 _in + practice_[^perf-note]. So you spend more time iterating, and less time waiting. - **Hermetic**. When using Remote Execution[^hermetic-re-only], Buck2 becomes - *hermetic*: it is required for a build rule to correctly declare all of its - inputs; if they aren't specified correctly (e.g. a `.c` file neeads a `.h` - file that isn't correctly specified), the build will fail. This enforced + _hermetic_: it is required for a build rule to correctly declare all of its + inputs; if they aren't specified correctly (e.g. a `.c` file needs a `.h` file + that isn't correctly specified), the build will fail. This enforced correctness helps avoids entire classes of errors that most build systems allow, and helps ensure builds work everywhere for all users. And Buck2 correctly tracks dependencies with far better accuracy than Buck1, in more @@ -49,30 +50,32 @@ already exist? But then how do you run test suites, code coverage, or query code databases? Buck2 is designed to support multiple languages from the start, with abstractions for interoperation. And because it's completely scriptable, and - *users* can implement language support — it's incredibly flexible. Now + _users_ can implement language support — it's incredibly flexible. Now your Python library can depend on an OCaml library, and your OCaml library can depend on a Rust crate — and with a single build tool, you have a consistent UX to build and test and integrate all of these components. -[^perf-note]: This number comes from internal usage of Buck1 versus Buck2 at - Meta. Please note that *appropriate* comparisons with systems like Bazel - have yet to be performed; Buck1 is the baseline because it's simply what - existed and what had to be replaced. Please benchmark Buck2 against your - favorite tools and let us know how it goes! +[^perf-note]: + This number comes from internal usage of Buck1 versus Buck2 at Meta. Please + note that _appropriate_ comparisons with systems like Bazel have yet to be + performed; Buck1 is the baseline because it's simply what existed and what + had to be replaced. Please benchmark Buck2 against your favorite tools and + let us know how it goes! -[^hermetic-re-only]: Buck2 currently does not sandbox *local-only* build steps; - in contrast, Buck2 using Remote Execution is *always* hermetic by design. - The vast majority of build rules are remote compatible, as well. Despite - that, we hope to lift this restriction in the (hopefully short-term) future - so that local-only builds are hermetic as well. +[^hermetic-re-only]: + Buck2 currently does not sandbox _local-only_ build steps; in contrast, + Buck2 using Remote Execution is _always_ hermetic by design. The vast + majority of build rules are remote compatible, as well. Despite that, we + hope to lift this restriction in the (hopefully short-term) future so that + local-only builds are hermetic as well. If you're familiar with systems like Buck1, [Bazel](https://bazel.build/), or [Pants](https://www.pantsbuild.org/) — then Buck2 will feel warm and cozy, and these ideas will be familiar. But then why create Buck2 if those already exist? Because that isn't all — the page -_["Why Buck2?"](https://buck2.build/docs/why/)_ on our website goes into more -detail on several other important design critera that separate Buck2 from the -rest of the pack, including: +_["Why Buck2?"](https://buck2.build/docs/about/why/)_ on our website goes into +more detail on several other important design critera that separate Buck2 from +the rest of the pack, including: - Support for ultra-large repositories, through filesystem virtualization and watching for changes to the filesystem. @@ -84,14 +87,14 @@ rest of the pack, including: build graph. This allows you to more cleanly support features that need graph introspection, like LSPs or compilation databases. - Support for distributed compilation, using the same Remote Execution API that - is supported by Bazel. Existing solutions like BuildBarn, BuildBuddy, and - EngFlow all work today. + is supported by Bazel. Existing solutions like BuildBarn, BuildBuddy, EngFlow, + and NativeLink all work today. - An efficient, robust, and sound design — inspired by modern theory of build systems and incremental computation. - And more! -If these headline features make you interested — check out the [Getting -Started](https://buck2.build/docs/getting_started/) guide! +If these headline features make you interested — check out the +[Getting Started](https://buck2.build/docs/getting_started/) guide! ## 🚧🚧🚧 **Warning** 🚧🚧🚧 — rough terrain lies ahead @@ -111,14 +114,15 @@ rough edges right now — several features are missing or in progress, some toolchains from Buck1 are missing, and you'll probably have to fiddle with things more than necessary to get it nice and polished. -Please provide feedback by submitting [issues and questions!](/issues) +Please provide feedback by submitting +[issues and questions!](https://github.com/facebook/buck2/issues) ## Installing Buck2 -You can get started by downloading the [latest buck2 -binary](https://github.com/facebook/buck2/releases/tag/latest) for your -platform. The `latest` tag always refers to a recent commit; it is updated on -every single push to the GitHub repository, so it will always be a recent +You can get started by downloading the +[latest buck2 binary](https://github.com/facebook/buck2/releases/tag/latest) for +your platform. The `latest` tag always refers to a recent commit; it is updated +on every single push to the GitHub repository, so it will always be a recent version. You can also compile Buck2 from source, if a binary isn't immediately available @@ -126,8 +130,8 @@ for your use; check out the [HACKING.md](./HACKING.md) file for information. ## Terminology conventions -Frequently used terms and their definitions can be found on the [glossary -page](https://buck2.build/docs/concepts/glossary/). +Frequently used terms and their definitions can be found on the +[glossary page](https://buck2.build/docs/concepts/glossary/). ## License diff --git a/action_error_handler/java/java_error_handler.bzl b/action_error_handler/java/java_error_handler.bzl new file mode 100644 index 0000000000000..bb91c595082b5 --- /dev/null +++ b/action_error_handler/java/java_error_handler.bzl @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbsource//tools/build_defs/android/action_error_handler:android_di_error_handler.bzl", "android_di_error_handler") + +def java_error_handler(ctx: ActionErrorCtx) -> list[ActionSubError]: + categories = [] + + categories += android_di_error_handler(ctx) + + return categories diff --git a/action_error_handler/kotlin/kotlin_error_handler.bzl b/action_error_handler/kotlin/kotlin_error_handler.bzl new file mode 100644 index 0000000000000..da0bcef797d2c --- /dev/null +++ b/action_error_handler/kotlin/kotlin_error_handler.bzl @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbsource//tools/build_defs/android/action_error_handler:android_di_error_handler.bzl", "android_di_error_handler") + +def kotlin_error_handler(ctx: ActionErrorCtx) -> list[ActionSubError]: + categories = [] + + categories += android_di_error_handler(ctx) + + return categories diff --git a/allocative/README.md b/allocative/README.md index 56fe805b18506..2c13bd7b2180e 100644 --- a/allocative/README.md +++ b/allocative/README.md @@ -1,41 +1,42 @@ # Allocative: memory profiler for Rust -This crate implements a lightweight memory profiler which allows -object traversal and memory size introspection. +This crate implements a lightweight memory profiler which allows object +traversal and memory size introspection. ## Usage `Allocative` trait (typically implemented with proc-macro) is introspectable: -`Allocative` values can be traversed and their size and sizes of referenced objects -can be collected. +`Allocative` values can be traversed and their size and sizes of referenced +objects can be collected. -This crate provides a few utilities to work with such objects, -the main of such utilities is flame graph builder which produces flame graph -(see the crate documentation) like this: +This crate provides a few utilities to work with such objects, the main of such +utilities is flame graph builder which produces flame graph (see the crate +documentation) like this: ![sample-flamegraph.png](sample-flamegraph.png) ## How it is different from other call-stack malloc profilers like jemalloc heap profiler -Allocative is not a substitute for call stack malloc profiler, -it provides a different view of memory usage. +Allocative is not a substitute for call stack malloc profiler, it provides a +different view of memory usage. Here are some differences between allocative and call-stack malloc profiler: -* Allocative requires implementation of `Allocative` trait for each type - which needs to be measured, and some setup in the program to enable it is needed -* Allocative flamegraph shows object by object tree, not by call stack -* Allocative shows gaps in allocated memory, - e.g. spare capacity of collections or too large padding in structs or enums -* Allocative allows profiling of non-malloc allocations - (for example, allocations within [bumpalo](https://github.com/fitzgen/bumpalo) bumps) -* Allocative allows profiling of memory for subset of the process data - (for example, measure the size of RPC response before serialization) +- Allocative requires implementation of `Allocative` trait for each type which + needs to be measured, and some setup in the program to enable it is needed +- Allocative flamegraph shows object by object tree, not by call stack +- Allocative shows gaps in allocated memory, e.g. spare capacity of collections + or too large padding in structs or enums +- Allocative allows profiling of non-malloc allocations (for example, + allocations within [bumpalo](https://github.com/fitzgen/bumpalo) bumps) +- Allocative allows profiling of memory for subset of the process data (for + example, measure the size of RPC response before serialization) ## Runtime overhead -When allocative is used, binary size is slightly increased due to implementations -of [`Allocative`] trait, but it has no runtime/memory overhead when it is enabled but not used. +When allocative is used, binary size is slightly increased due to +implementations of [`Allocative`] trait, but it has no runtime/memory overhead +when it is enabled but not used. ## Source code @@ -45,5 +46,5 @@ is synchronized to GitHub. The main copy is ## License -Allocative is both MIT and Apache License, Version 2.0 licensed, -as found in the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. +Allocative is both MIT and Apache License, Version 2.0 licensed, as found in the +[LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. diff --git a/allocative/allocative/BUCK b/allocative/allocative/BUCK index 8495a1b3fe050..89d09b3391e83 100644 --- a/allocative/allocative/BUCK +++ b/allocative/allocative/BUCK @@ -1,6 +1,5 @@ load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/allocative/allocative/Cargo.toml b/allocative/allocative/Cargo.toml index db66d97fe2fd7..da7352e5130a6 100644 --- a/allocative/allocative/Cargo.toml +++ b/allocative/allocative/Cargo.toml @@ -1,39 +1,42 @@ [package] -name = "allocative" -version = "0.3.0" -edition = "2021" -description = "Inspect rust object tree and output it as flamegraph" -license = "MIT OR Apache-2.0" authors = ["Meta"] -repository = "https://github.com/facebookexperimental/allocative" +description = "Inspect rust object tree and output it as flamegraph" documentation = "https://docs.rs/allocative" +edition = "2021" +license = { workspace = true } +name = "allocative" +repository = "https://github.com/facebookexperimental/allocative" +version = "0.3.3" [dependencies] -allocative_derive = { path = "../allocative_derive", version = "=0.3.0" } +allocative_derive = { path = "../allocative_derive", version = "=0.3.3" } ctor = { workspace = true } anyhow = { version = "1.0.65", optional = true } bumpalo = { version = "3.11.1", optional = true } -dashmap = { version = "4.0.2", optional = true } +compact_str = { version = "0.8", optional = true } +dashmap = { version = "5.5.3", optional = true } either = { version = "1.8", optional = true } futures = { version = "0.3.24", optional = true } -hashbrown = { version = "0.12.3", optional = true } -indexmap = { version = "1.9.1", optional = true } +hashbrown = { version = "0.14.3", features = ["raw"], optional = true } +indexmap = { version = "2.2.6", optional = true } num-bigint = { version = "0.4.3", optional = true } -parking_lot = { version = "0.11.2", optional = true } -compact_str = { version = "0.6.1", optional = true } once_cell = { version = "1.15.0", optional = true } +parking_lot = { version = "0.11.2", optional = true } prost-types = { version = "0.11.2", optional = true } relative-path = { version = "1.7.0", optional = true } serde_json = { version = "1.0.48", optional = true } slab = { version = "0.4.7", optional = true } smallvec = { version = "1.10.0", optional = true } -tokio = { version = "1.5", optional = true, features = ["sync"] } -triomphe = { version = "0.1.8", optional = true } sorted_vector_map.optional = true -sorted_vector_map.version = "0.1" # @oss-disable: sorted_vector_map.path = "../../../common/rust/shed/sorted_vector_map" +sorted_vector_map.version = "0.2" +tokio = { version = "1.5", optional = true, features = ["sync"] } +triomphe = { version = "0.1.8", optional = true } [dev-dependencies] inferno = { version = "0.11.11", default-features = false } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(rust_nightly)"] } diff --git a/allocative/allocative/src/allocative_trait.rs b/allocative/allocative/src/allocative_trait.rs index 123c5d3d902de..fe83aad436178 100644 --- a/allocative/allocative/src/allocative_trait.rs +++ b/allocative/allocative/src/allocative_trait.rs @@ -25,13 +25,14 @@ use crate::Visitor; /// } /// ``` /// -/// Proc macro supports two attributes: `#[allocative(skip)]` and `#[allocative(bound = "")]`. +/// Proc macro supports two attributes: `#[allocative(skip)]` and +/// `#[allocative(bound = "...")]`. /// /// ## `#[allocative(skip)]` /// -/// `#[allocative(skip)]` can be used to skip field from traversal -/// (for example, to skip fields which are not `Allocative`, -/// and can be skipped because they are cheap). +/// `#[allocative(skip)]` can be used to skip field from traversal (for example, +/// to skip fields which are not `Allocative`, and can be skipped because they +/// are cheap). /// /// ``` /// use allocative::Allocative; @@ -46,13 +47,19 @@ use crate::Visitor; /// } /// ``` /// -/// ## `#[allocative(bound = "")]` +/// ## `#[allocative(bound = "...")]` +/// +/// `#[allocative(bound = "...")]` can be used to overwrite the bounds that are +/// added to the generics of the implementation. +/// +/// An empty string (`#[allocative(bound = "")]`) simply erases all bounds. It +/// adds all type variables found in the type to the list of generics but with +/// an empty bound. As an example /// -/// `#[allocative(bound = "")]` can be used to not add `T: Allocative` bound -/// to `Allocative` trait implementation, like this: /// /// ``` /// use std::marker::PhantomData; +/// /// use allocative::Allocative; /// /// struct Unsupported; @@ -62,9 +69,71 @@ use crate::Visitor; /// struct Baz { /// _marker: PhantomData, /// } +/// ``` +/// +/// Would generate an instance +/// +/// ```ignore +/// impl Allocative for Baz { ... } +/// ``` +/// +/// Alternatively you can use the string to provide custom bounds. The string in +/// this case is used *verbatim* as the bounds, which affords great flexibility, +/// but also necessitates that all type variables must be mentioned or will be +/// unbound (compile error). As an example we may derive a size of a `HashMap` +/// by ignoring the hasher type. +/// +/// +/// ```ignore +/// #[allocative(bound = "K: Allocative, V:Allocative, S")] +/// struct HashMap { +/// ... +/// } +/// ``` +/// +/// Which generates +/// +/// ```ignore +/// impl Allocative for HashMap { +/// ... +/// } +/// ``` +/// +/// ## `#[allocative(visit = ...)]` +/// +/// This annotation is used to provide a custom visit method for a given field. This +/// is especially useful if the type of the field does not implement `Allocative`. +/// +/// The annotation takes the path to a method with a signature `for<'a, 'b>(&T, &'a +/// mut allocative::Visitor<'b>)` where `T` is the type of the field. The function +/// you provide is basically the same as if you implemented [`Allocative::visit`]. +/// +/// As an example +/// +/// ``` +/// use allocative::Allocative; +/// use allocative::Key; +/// use allocative::Visitor; +/// // use third_party_lib::Unsupported; +/// # struct Unsupported(T); +/// # impl Unsupported { +/// # fn iter_elems(&self) -> &[T] { &[] } +/// # } +/// +/// #[derive(Allocative)] +/// struct Bar { +/// #[allocative(visit = visit_unsupported)] +/// unsupported: Unsupported, +/// } /// -/// // So `Baz` is `Allocative` even though `Unsupported` is not. -/// let allocative: &dyn Allocative = &Baz:: { _marker: PhantomData }; +/// fn visit_unsupported<'a, 'b>(u: &Unsupported, visitor: &'a mut Visitor<'b>) { +/// const ELEM_KEY: Key = Key::new("elements"); +/// let mut visitor = visitor.enter_self(u); +/// for element in u.iter_elems() { +/// visitor.visit_field(ELEM_KEY, element); +/// } +/// visitor.exit() +/// } /// ``` pub trait Allocative { fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>); diff --git a/allocative/allocative/src/flamegraph.rs b/allocative/allocative/src/flamegraph.rs index 67a63c9f4f15e..05168f8ecde9c 100644 --- a/allocative/allocative/src/flamegraph.rs +++ b/allocative/allocative/src/flamegraph.rs @@ -23,6 +23,8 @@ use crate::visitor::VisitorImpl; use crate::Allocative; /// Node in flamegraph tree. +/// +/// Can be written to flamegraph format with [`write`](FlameGraph::write). #[derive(Debug, Default, Clone)] pub struct FlameGraph { children: HashMap, @@ -63,7 +65,6 @@ impl FlameGraph { self.node_size += size; } - #[allow(clippy::from_iter_instead_of_collect)] fn write_flame_graph_impl(&self, stack: &[&str], w: &mut String) { if self.node_size != 0 { if !stack.is_empty() { @@ -82,7 +83,10 @@ impl FlameGraph { } } - /// Write flamegraph in format suitable for `flamegraph.pl` or `inferno`. + /// Write flamegraph in format suitable for [`flamegraph.pl`] or [inferno]. + /// + /// [flamegraph.pl]: https://github.com/brendangregg/FlameGraph + /// [inferno]: https://github.com/jonhoo/inferno pub fn write(&self) -> String { let mut r = String::new(); self.write_flame_graph_impl(&[], &mut r); @@ -276,18 +280,22 @@ unsafe impl Send for VisitedSharedPointer {} /// # Example /// /// ``` -/// use allocative::FlameGraphBuilder; /// use allocative::Allocative; +/// use allocative::FlameGraphBuilder; /// /// #[derive(Allocative)] /// struct Foo { /// data: String, /// }; /// -/// let foo1 = Foo { data: "Hello, world!".to_owned() }; -/// let foo2 = Foo { data: "Another message!".to_owned() }; +/// let foo1 = Foo { +/// data: "Hello, world!".to_owned(), +/// }; +/// let foo2 = Foo { +/// data: "Another message!".to_owned(), +/// }; /// -/// let mut flamegraph = FlameGraphBuilder::default(); +/// let mut flamegraph = FlameGraphBuilder::default(); /// flamegraph.visit_root(&foo1); /// flamegraph.visit_root(&foo2); /// let flamegraph_src = flamegraph.finish().flamegraph(); diff --git a/allocative/allocative/src/golden.rs b/allocative/allocative/src/golden.rs index bee5ab960bd55..6afcde98ec77c 100644 --- a/allocative/allocative/src/golden.rs +++ b/allocative/allocative/src/golden.rs @@ -10,6 +10,7 @@ #![cfg(test)] use std::env; +use std::fmt::Write; use std::fs; use crate::Allocative; @@ -50,8 +51,10 @@ fn make_golden(value: &T) -> (String, String) { "{header}{flamegraph}", header = golden_header() .lines() - .map(|line| format!("# {}\n", line)) - .collect::() + .fold(String::new(), |mut output, line| { + let _ = writeln!(output, "# {}", line); + output + }) ); let flamegraph_svg = flamegraph_svg.replace( diff --git a/allocative/allocative/src/impls/mod.rs b/allocative/allocative/src/impls.rs similarity index 100% rename from allocative/allocative/src/impls/mod.rs rename to allocative/allocative/src/impls.rs diff --git a/allocative/allocative/src/impls/hashbrown.rs b/allocative/allocative/src/impls/hashbrown.rs index fc60e3ce801cf..052b5753f0af3 100644 --- a/allocative/allocative/src/impls/hashbrown.rs +++ b/allocative/allocative/src/impls/hashbrown.rs @@ -12,6 +12,7 @@ use std::mem; use hashbrown::raw::RawTable; +use hashbrown::HashTable; use crate::Allocative; use crate::Key; @@ -41,6 +42,28 @@ impl Allocative for RawTable { } } +impl Allocative for HashTable { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { + use crate::impls::common::DATA_NAME; + use crate::impls::hashbrown_util; + + let mut visitor = visitor.enter_self_sized::(); + { + let mut visitor = visitor.enter_unique(DATA_NAME, mem::size_of::<*const T>()); + { + let mut visitor = visitor.enter( + CAPACITY_NAME, + hashbrown_util::raw_table_alloc_size_for_len::(self.capacity()), + ); + visitor.visit_iter::(self.iter()); + visitor.exit(); + } + visitor.exit(); + } + visitor.exit(); + } +} + #[cfg(test)] mod tests { use std::collections::hash_map::DefaultHasher; @@ -48,6 +71,7 @@ mod tests { use std::hash::Hasher; use hashbrown::raw::RawTable; + use hashbrown::HashTable; use crate::golden::golden_test; @@ -66,4 +90,14 @@ mod tests { golden_test!(&table); } + + #[test] + fn test_hash_table() { + let mut table = HashTable::with_capacity(100); + for i in 0..100 { + table.insert_unique(hash(&i.to_string()), i.to_string(), hash); + } + + golden_test!(&table); + } } diff --git a/allocative/allocative/src/impls/hashbrown_test_hash_table.src b/allocative/allocative/src/impls/hashbrown_test_hash_table.src new file mode 100644 index 0000000000000..973aa888fcd83 --- /dev/null +++ b/allocative/allocative/src/impls/hashbrown_test_hash_table.src @@ -0,0 +1,12 @@ +# @generated +# To regenerate, run: +# ``` +# ALLOCATIVE_REGENERATE_TESTS=1 cargo test -p allocative +# ``` +hashbrown::table::HashTable 24 +hashbrown::table::HashTable;data 8 +hashbrown::table::HashTable;data;capacity 800 +hashbrown::table::HashTable;data;capacity;alloc::string::String 1600 +hashbrown::table::HashTable;data;capacity;alloc::string::String;ptr 800 +hashbrown::table::HashTable;data;capacity;alloc::string::String;ptr;capacity;u8 190 +hashbrown::table::HashTable;data;capacity;alloc::string::String;ptr;capacity;unused_capacity 610 diff --git a/allocative/allocative/src/impls/indexmap.rs b/allocative/allocative/src/impls/indexmap.rs index e88ca3823d96e..ff93acc6b9750 100644 --- a/allocative/allocative/src/impls/indexmap.rs +++ b/allocative/allocative/src/impls/indexmap.rs @@ -31,7 +31,7 @@ fn add_raw_table_for_len(visitor: &mut Visitor, len: usize) { } } -impl Allocative for IndexSet { +impl Allocative for IndexSet { fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { let mut visitor = visitor.enter_self_sized::(); { @@ -47,7 +47,7 @@ impl Allocative for IndexSet { } } -impl Allocative for IndexMap { +impl Allocative for IndexMap { fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { let mut visitor = visitor.enter_self_sized::(); { diff --git a/allocative/allocative/src/impls/smallvec.rs b/allocative/allocative/src/impls/smallvec.rs index 2bb13eaf96e3f..8c1f1ab55cd67 100644 --- a/allocative/allocative/src/impls/smallvec.rs +++ b/allocative/allocative/src/impls/smallvec.rs @@ -19,7 +19,7 @@ use crate::visitor::Visitor; impl Allocative for SmallVec where - A: Array + 'static, + A: Array, A::Item: Allocative, { fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { diff --git a/allocative/allocative/src/impls/std.rs b/allocative/allocative/src/impls/std.rs new file mode 100644 index 0000000000000..df566de15c85d --- /dev/null +++ b/allocative/allocative/src/impls/std.rs @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod any; +mod cell; +mod collections; +mod function; +mod mem; +mod net; +mod primitive; +mod sync; +mod time; +mod tuple; +mod unsorted; diff --git a/allocative/allocative/src/impls/std/any.rs b/allocative/allocative/src/impls/std/any.rs new file mode 100644 index 0000000000000..1e67f46ee08bf --- /dev/null +++ b/allocative/allocative/src/impls/std/any.rs @@ -0,0 +1,17 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use crate::allocative_trait::Allocative; +use crate::visitor::Visitor; + +impl Allocative for std::any::TypeId { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { + visitor.visit_simple_sized::(); + } +} diff --git a/allocative/allocative/src/impls/std/mod.rs b/allocative/allocative/src/impls/std/mod.rs deleted file mode 100644 index 65f5b4edcbacb..0000000000000 --- a/allocative/allocative/src/impls/std/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod cell; -mod collections; -mod function; -mod mem; -mod primitive; -mod sync; -mod time; -mod tuple; -mod unsorted; diff --git a/allocative/allocative/src/impls/std/net.rs b/allocative/allocative/src/impls/std/net.rs new file mode 100644 index 0000000000000..29a2fbfbfb2b7 --- /dev/null +++ b/allocative/allocative/src/impls/std/net.rs @@ -0,0 +1,33 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; + +use crate::allocative_trait::Allocative; +use crate::visitor::Visitor; + +impl Allocative for IpAddr { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { + visitor.enter_self_sized::().exit(); + } +} + +impl Allocative for Ipv4Addr { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { + visitor.enter_self_sized::().exit(); + } +} + +impl Allocative for Ipv6Addr { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { + visitor.enter_self_sized::().exit(); + } +} diff --git a/allocative/allocative/src/impls/std/sync.rs b/allocative/allocative/src/impls/std/sync.rs index 529687ed9b7fa..cd058cad0f0a6 100644 --- a/allocative/allocative/src/impls/std/sync.rs +++ b/allocative/allocative/src/impls/std/sync.rs @@ -52,7 +52,8 @@ impl Allocative for Arc { Arc::as_ptr(self) as *const (), ); if let Some(mut visitor) = visitor { - struct ArcInner(AtomicUsize, AtomicUsize, ()); + #[allow(dead_code)] // Only used for its size + struct ArcInner(AtomicUsize, AtomicUsize); { let val: &T = self; let mut visitor = visitor.enter( @@ -92,6 +93,7 @@ impl Allocative for Rc { Rc::as_ptr(self) as *const (), ); if let Some(mut visitor) = visitor { + #[allow(dead_code)] // fields `0` and `1` are never read struct RcInner(AtomicUsize, AtomicUsize, ()); { let val: &T = self; diff --git a/allocative/allocative/src/lib.rs b/allocative/allocative/src/lib.rs index 388b8829ec14e..3fa8e7a4f1e45 100644 --- a/allocative/allocative/src/lib.rs +++ b/allocative/allocative/src/lib.rs @@ -73,3 +73,37 @@ pub use crate::visitor::Visitor; pub mod __macro_refs { pub use ctor; } + +/// Create a `const` of type `Key` with the provided `ident` as the value and +/// return that value. This allows the keys to be placed conveniently inline +/// without any performance hit because unlike calling `Key::new` this is +/// guaranteed to be evaluated at compile time. +/// +/// The main use case is manual implementations of [`Allocative`], like so: +/// +/// ``` +/// use allocative::ident_key; +/// use allocative::Allocative; +/// use allocative::Visitor; +/// +/// struct MyStruct { +/// foo: usize, +/// bar: Vec<()>, +/// } +/// +/// impl Allocative for MyStruct { +/// fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { +/// let mut visitor = visitor.enter_self(self); +/// visitor.visit_field(ident_key!(foo), &self.foo); +/// visitor.visit_field(ident_key!(bar), &self.bar); +/// visitor.exit(); +/// } +/// } +/// ``` +#[macro_export] +macro_rules! ident_key { + ($name:ident) => {{ + const KEY: $crate::Key = $crate::Key::new(stringify!(name)); + KEY + }}; +} diff --git a/allocative/allocative/src/rc_str.rs b/allocative/allocative/src/rc_str.rs index 26f3aa253b747..be9fb5db0349e 100644 --- a/allocative/allocative/src/rc_str.rs +++ b/allocative/allocative/src/rc_str.rs @@ -14,6 +14,7 @@ use std::ops::Deref; use std::rc::Rc; #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] +#[allow(dead_code)] pub(crate) struct RcStr(Rc); impl<'a> From<&'a str> for RcStr { diff --git a/allocative/allocative/src/size_of.rs b/allocative/allocative/src/size_of.rs index 1662e07305adf..b31110d946629 100644 --- a/allocative/allocative/src/size_of.rs +++ b/allocative/allocative/src/size_of.rs @@ -29,7 +29,12 @@ use crate::Visitor; /// data: Vec, /// } /// -/// assert_eq!(3, allocative::size_of_unique_allocated_data(&Foo { data: vec![10, 20, 30] })); +/// assert_eq!( +/// 3, +/// allocative::size_of_unique_allocated_data(&Foo { +/// data: vec![10, 20, 30] +/// }) +/// ); /// ``` pub fn size_of_unique_allocated_data(root: &dyn Allocative) -> usize { struct SizeOfUniqueAllocatedDataVisitor { @@ -91,7 +96,12 @@ pub fn size_of_unique_allocated_data(root: &dyn Allocative) -> usize { /// data: Vec, /// } /// -/// assert_eq!(3 + std::mem::size_of::>(), allocative::size_of_unique(&Foo { data: vec![10, 20, 30] })); +/// assert_eq!( +/// 3 + std::mem::size_of::>(), +/// allocative::size_of_unique(&Foo { +/// data: vec![10, 20, 30] +/// }) +/// ); /// ``` pub fn size_of_unique(root: &T) -> usize where diff --git a/allocative/allocative/src/test_derive.rs b/allocative/allocative/src/test_derive.rs new file mode 100644 index 0000000000000..6dff32f758f1a --- /dev/null +++ b/allocative/allocative/src/test_derive.rs @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(test)] +#![allow(dead_code)] + +mod bounds; +mod dst; +mod skip; +mod visit; +mod with_flamegraph; + +use crate as allocative; +use crate::Allocative; + +#[derive(Allocative)] +struct Empty {} + +#[derive(Allocative)] +struct TupleStruct(u32, String); + +#[derive(Allocative)] +struct RegularStruct { + a: u32, + b: String, +} + +#[derive(Allocative)] +enum Enum { + Unit, + Tuple(u32, String), + Regular { a: u32, b: String }, +} + +#[derive(Allocative)] +enum GenericEnum { + Unit, + Tuple(T, String), +} + +#[derive(Allocative)] +struct StructWithDefaultParam(T); diff --git a/allocative/allocative/src/test_derive/bounds.rs b/allocative/allocative/src/test_derive/bounds.rs new file mode 100644 index 0000000000000..011cd533f8113 --- /dev/null +++ b/allocative/allocative/src/test_derive/bounds.rs @@ -0,0 +1,34 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use crate as allocative; +use crate::Allocative; + +#[derive(Allocative)] +#[allocative(bound = "K: Allocative, V:Allocative, S")] +struct HashMap { + map: std::collections::HashMap, +} + +#[derive(Allocative)] +#[allocative(bound = "S: Sized")] +struct CanBeUnsized { + #[allocative(visit = via_sized)] + s: Box, +} + +#[allow(clippy::borrowed_box)] +fn via_sized(s: &Box, visitor: &mut allocative::Visitor) { + visitor + .enter( + allocative::Key::new("s"), + std::mem::size_of_val(Box::as_ref(s)), + ) + .exit() +} diff --git a/allocative/allocative/src/test_derive/mod.rs b/allocative/allocative/src/test_derive/mod.rs deleted file mode 100644 index d3db100c88b87..0000000000000 --- a/allocative/allocative/src/test_derive/mod.rs +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![cfg(test)] -#![allow(dead_code)] - -mod dst; -mod skip; -mod visit; -mod with_flamegraph; - -use crate as allocative; -use crate::Allocative; - -#[derive(Allocative)] -struct Empty {} - -#[derive(Allocative)] -struct TupleStruct(u32, String); - -#[derive(Allocative)] -struct RegularStruct { - a: u32, - b: String, -} - -#[derive(Allocative)] -enum Enum { - Unit, - Tuple(u32, String), - Regular { a: u32, b: String }, -} - -#[derive(Allocative)] -enum GenericEnum { - Unit, - Tuple(T, String), -} - -#[derive(Allocative)] -struct StructWithDefaultParam(T); diff --git a/allocative/allocative/src/visitor.rs b/allocative/allocative/src/visitor.rs index 5ec7fd6c39a6f..9fd302e5d5f01 100644 --- a/allocative/allocative/src/visitor.rs +++ b/allocative/allocative/src/visitor.rs @@ -154,8 +154,24 @@ impl<'a> Visitor<'a> { where 'a: 'b, { - let mut visitor = self.enter(name, mem::size_of_val::(field)); - field.visit(&mut visitor); + self.visit_field_with(name, mem::size_of_val::(field), |visitor| { + field.visit(visitor); + }) + } + + /// Similar to `visit_field` but instead of calling [`Allocative::visit`] for + /// whichever is the field type, you can provide a custom closure to call + /// instead. + /// + /// Useful if the field type does not implement [`Allocative`]. + pub fn visit_field_with<'b, 'f, F: for<'c, 'd> FnOnce(&'d mut Visitor<'c>)>( + &'b mut self, + name: Key, + field_size: usize, + visit: F, + ) { + let mut visitor = self.enter(name, field_size); + visit(&mut visitor); visitor.exit(); } @@ -190,25 +206,25 @@ impl<'a> Visitor<'a> { 'a: 'b, T: Allocative, { - let mut visitor = self.enter(CAPACITY_NAME, mem::size_of::() * capacity); - visitor.visit_slice(data); - visitor.visit_simple( - UNUSED_CAPACITY_NAME, - mem::size_of::() * capacity.wrapping_sub(data.len()), - ); - visitor.exit(); + self.visit_field_with(CAPACITY_NAME, mem::size_of::() * capacity, |visitor| { + visitor.visit_slice(data); + visitor.visit_simple( + UNUSED_CAPACITY_NAME, + mem::size_of::() * capacity.wrapping_sub(data.len()), + ); + }) } pub fn visit_generic_map_fields<'b, 'x, K: Allocative + 'x, V: Allocative + 'x>( &'b mut self, entries: impl IntoIterator, ) { - let mut visitor = self.enter_unique(DATA_NAME, mem::size_of::<*const ()>()); - for (k, v) in entries { - visitor.visit_field(KEY_NAME, k); - visitor.visit_field(VALUE_NAME, v); - } - visitor.exit(); + self.visit_field_with(DATA_NAME, mem::size_of::<*const ()>(), move |visitor| { + for (k, v) in entries { + visitor.visit_field(KEY_NAME, k); + visitor.visit_field(VALUE_NAME, v); + } + }) } pub fn visit_generic_set_fields<'b, 'x, K: Allocative + 'x>( @@ -217,11 +233,11 @@ impl<'a> Visitor<'a> { ) where 'a: 'b, { - let mut visitor = self.enter_unique(DATA_NAME, mem::size_of::<*const ()>()); - for k in entries { - visitor.visit_field(KEY_NAME, k); - } - visitor.exit(); + self.visit_field_with(DATA_NAME, mem::size_of::<*const ()>(), |visitor| { + for k in entries { + visitor.visit_field(KEY_NAME, k); + } + }) } fn exit_impl(&mut self) { diff --git a/allocative/allocative_derive/BUCK b/allocative/allocative_derive/BUCK index c31a4d85881cd..1403900147421 100644 --- a/allocative/allocative_derive/BUCK +++ b/allocative/allocative_derive/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/allocative/allocative_derive/Cargo.toml b/allocative/allocative_derive/Cargo.toml index 3d2ac95c85d09..a30726ce7766b 100644 --- a/allocative/allocative_derive/Cargo.toml +++ b/allocative/allocative_derive/Cargo.toml @@ -1,17 +1,17 @@ [package] -name = "allocative_derive" -version = "0.3.0" -edition = "2021" -description = "Implementation of derive(Allocative) for allocative crate" -license = "MIT OR Apache-2.0" authors = ["Meta"] -repository = "https://github.com/facebookexperimental/allocative" +description = "Implementation of derive(Allocative) for allocative crate" documentation = "https://docs.rs/allocative" +edition = "2021" +license = { workspace = true } +name = "allocative_derive" +repository = "https://github.com/facebookexperimental/allocative" +version = "0.3.3" [lib] proc-macro = true [dependencies] -quote = { workspace = true } proc-macro2 = { workspace = true } +quote = { workspace = true } syn = { workspace = true } diff --git a/allocative/allocative_derive/src/derive_allocative.rs b/allocative/allocative_derive/src/derive_allocative.rs index a8537035c0967..6044c357dc138 100644 --- a/allocative/allocative_derive/src/derive_allocative.rs +++ b/allocative/allocative_derive/src/derive_allocative.rs @@ -7,8 +7,8 @@ * of this source tree. */ -use proc_macro::TokenStream; use proc_macro2::Ident; +use proc_macro2::Span; use quote::quote_spanned; use quote::ToTokens; use syn::parse::ParseStream; @@ -41,7 +41,7 @@ const fn hash(s: &str) -> u64 { hash } -pub(crate) fn derive_allocative(input: TokenStream) -> TokenStream { +pub(crate) fn derive_allocative(input: proc_macro::TokenStream) -> proc_macro::TokenStream { match derive_allocative_impl(input.into()) { Ok(tokens) => tokens.into(), Err(err) => err.to_compile_error().into(), @@ -54,10 +54,9 @@ fn impl_generics( ) -> syn::Result { if let Some(bound) = &attrs.bound { if !bound.is_empty() { - return Err(syn::Error::new( - attrs.bound.span(), - "non-empty bound is not implemented", - )); + let span = attrs.span.unwrap_or_else(Span::call_site); + let bound = bound.parse::()?; + return Ok(quote_spanned! { span => < #bound > }); } } @@ -301,13 +300,13 @@ fn gen_visit_field( #[derive(Default)] struct AllocativeAttrs { + span: Option, skip: bool, bound: Option, visit: Option, } /// Parse an `#[allocative(...)]` annotation. -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_impl_dupe))] // The custom_keyword macro fn extract_attrs(attrs: &[Attribute]) -> syn::Result { syn::custom_keyword!(skip); syn::custom_keyword!(bound); @@ -320,6 +319,8 @@ fn extract_attrs(attrs: &[Attribute]) -> syn::Result { continue; } + opts.span = Some(attr.span()); + attr.parse_args_with(|input: ParseStream| { loop { if input.parse::().is_ok() { diff --git a/app/bazel_event_publisher_proto/Cargo.toml b/app/bazel_event_publisher_proto/Cargo.toml new file mode 100644 index 0000000000000..f7daddf9bf52d --- /dev/null +++ b/app/bazel_event_publisher_proto/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "bazel_event_publisher_proto" + +edition = "2021" +license = { workspace = true } +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +prost = { workspace = true } +prost-types = { workspace = true } +tonic = { workspace = true } + +[build-dependencies] +buck2_protoc_dev = { workspace = true } diff --git a/app/bazel_event_publisher_proto/build.rs b/app/bazel_event_publisher_proto/build.rs new file mode 100644 index 0000000000000..1b59ef89a797f --- /dev/null +++ b/app/bazel_event_publisher_proto/build.rs @@ -0,0 +1,37 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::env; +use std::io; +use std::path::PathBuf; + +fn main() -> io::Result<()> { + let proto_files = &[ + "proto/action_cache.proto", + "proto/build_event_stream.proto", + "proto/command_line.proto", + "proto/failure_details.proto", + "proto/invocation_policy.proto", + "proto/option_filters.proto", + "proto/package_load_metrics.proto", + "proto/strategy_policy.proto", + "proto/google/api/annotations.proto", + "proto/google/api/client.proto", + "proto/google/api/field_behavior.proto", + "proto/google/api/http.proto", + "proto/google/api/launch_stage.proto", + "proto/google/devtools/build/v1/build_events.proto", + "proto/google/devtools/build/v1/build_status.proto", + "proto/google/devtools/build/v1/publish_build_event.proto", + ]; + + buck2_protoc_dev::configure() + .setup_protoc() + .compile(proto_files, &["./proto/"]) +} diff --git a/app/bazel_event_publisher_proto/proto/action_cache.proto b/app/bazel_event_publisher_proto/proto/action_cache.proto new file mode 100644 index 0000000000000..a31cba599ed60 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/action_cache.proto @@ -0,0 +1,63 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package blaze; + +option java_package = "com.google.devtools.build.lib.actions.cache"; +option java_outer_classname = "Protos"; + +// Information about the action cache behavior during a single build. +message ActionCacheStatistics { + // Size of the action cache in bytes. + // + // This is computed by the code that persists the action cache to disk and + // represents the size of the written files, which has no direct relation to + // the number of entries in the cache. + uint64 size_in_bytes = 1; + + // Time it took to save the action cache to disk. + uint64 save_time_in_ms = 2; + + // Reasons for not finding an action in the cache. + enum MissReason { + DIFFERENT_ACTION_KEY = 0; + DIFFERENT_DEPS = 1; + DIFFERENT_ENVIRONMENT = 2; + DIFFERENT_FILES = 3; + CORRUPTED_CACHE_ENTRY = 4; + NOT_CACHED = 5; + UNCONDITIONAL_EXECUTION = 6; + } + + // Detailed information for a particular miss reason. + message MissDetail { + MissReason reason = 1; + int32 count = 2; + } + + // Cache counters. + int32 hits = 3; + int32 misses = 4; + + // Breakdown of the cache misses based on the reasons behind them. + repeated MissDetail miss_details = 5; + + // Time it took to load the action cache from disk. Reported as 0 if the + // action cache has not been loaded in this invocation. + uint64 load_time_in_ms = 6; + + // NEXT TAG: 7 +} diff --git a/app/bazel_event_publisher_proto/proto/build_event_stream.proto b/app/bazel_event_publisher_proto/proto/build_event_stream.proto new file mode 100644 index 0000000000000..c33b8ba651405 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/build_event_stream.proto @@ -0,0 +1,1446 @@ +// Copyright 2016 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// LINT: LEGACY_NAMES + +syntax = "proto3"; + +package build_event_stream; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "package_load_metrics.proto"; +import "action_cache.proto"; +import "command_line.proto"; +import "failure_details.proto"; +import "invocation_policy.proto"; + +option java_package = "com.google.devtools.build.lib.buildeventstream"; +option java_outer_classname = "BuildEventStreamProtos"; + +// Identifier for a build event. It is deliberately structured to also provide +// information about which build target etc the event is related to. +// +// Events are chained via the event id as follows: each event has an id and a +// set of ids of children events such that apart from the initial event each +// event has an id that is mentioned as child id in an earlier event and a build +// invocation is complete if and only if all direct and indirect children of the +// initial event have been posted. +message BuildEventId { + // Generic identifier for a build event. This is the default type of + // BuildEventId, but should not be used outside testing; nevertheless, + // tools should handle build events with this kind of id gracefully. + message UnknownBuildEventId { + string details = 1; + } + + // Identifier of an event reporting progress. Those events are also used to + // chain in events that come early. + message ProgressId { + // Unique identifier. No assumption should be made about how the ids are + // assigned; the only meaningful operation on this field is test for + // equality. + int32 opaque_count = 1; + } + + // Identifier of an event indicating the beginning of a build; this will + // normally be the first event. + message BuildStartedId {} + + // Identifier on an event indicating the original commandline received by + // the bazel server. + message UnstructuredCommandLineId {} + + // Identifier on an event describing the commandline received by Bazel. + message StructuredCommandLineId { + // A title for this command line value, as there may be multiple. + // For example, a single invocation may wish to report both the literal and + // canonical command lines, and this label would be used to differentiate + // between both versions. + string command_line_label = 1; + } + + // Identifier of an event indicating the workspace status. + message WorkspaceStatusId {} + + // Identifier on an event reporting on the options included in the command + // line, both explicitly and implicitly. + message OptionsParsedId {} + + // Identifier of an event reporting that an external resource was fetched + // from. + message FetchId { + // The external resource that was fetched from. + string url = 1; + } + + // Identifier of an event indicating that a target pattern has been expanded + // further. + // Messages of this shape are also used to describe parts of a pattern that + // have been skipped for some reason, if the actual expansion was still + // carried out (e.g., if keep_going is set). In this case, the + // pattern_skipped choice in the id field is to be made. + message PatternExpandedId { + repeated string pattern = 1; + } + + message WorkspaceConfigId {} + + message BuildMetadataId {} + + // Identifier of an event indicating that a target has been expanded by + // identifying for which configurations it should be build. + message TargetConfiguredId { + string label = 1; + + // If empty, the id refers to the expansion of the target. If not-empty, + // the id refers to the expansion of an aspect applied to the (already + // expanded) target. + // + // For example, when building an apple_binary that depends on proto_library + // "//:foo_proto", there will be two TargetConfigured events for + // "//:foo_proto": + // + // 1. An event with an empty aspect, corresponding to actions producing + // language-agnostic outputs from the proto_library; and + // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C + // code generation. + string aspect = 2; + } + + // Identifier of an event introducing a named set of files (usually artifacts) + // to be referred to in later messages. + message NamedSetOfFilesId { + // Identifier of the file set; this is an opaque string valid only for the + // particular instance of the event stream. + string id = 1; + } + + // Identifier of an event introducing a configuration. + message ConfigurationId { + // Identifier of the configuration; users of the protocol should not make + // any assumptions about it having any structure, or equality of the + // identifier between different streams. + // + // A value of "none" means the null configuration. It is used for targets + // that are not configurable, for example, source files. + string id = 1; + } + + // Identifier of an event indicating that a target was built completely; this + // does not include running the test if the target is a test target. + message TargetCompletedId { + string label = 1; + + // The configuration for which the target was built. + ConfigurationId configuration = 3; + + // If empty, the id refers to the completion of the target. If not-empty, + // the id refers to the completion of an aspect applied to the (already + // completed) target. + // + // For example, when building an apple_binary that depends on proto_library + // "//:foo_proto", there will be two TargetCompleted events for + // "//:foo_proto": + // + // 1. An event with an empty aspect, corresponding to actions producing + // language-agnostic outputs from the proto_library; and + // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C + // code generation. + string aspect = 2; + } + + // Identifier of an event reporting that an action was completed (not all + // actions are reported, only the ones that can be considered important; + // this includes all failed actions). + message ActionCompletedId { + string primary_output = 1; + // Optional, the label of the owner of the action, for reference. + string label = 2; + // Optional, the id of the configuration of the action owner. + ConfigurationId configuration = 3; + } + + // Identifier of an event reporting an event associated with an unconfigured + // label. Usually, this indicates a failure due to a missing input file. In + // any case, it will report some form of error (i.e., the payload will be an + // Aborted event); there are no regular events using this identifier. The + // purpose of those events is to serve as the root cause of a failed target. + message UnconfiguredLabelId { + string label = 1; + } + + // Identifier of an event reporting an event associated with a configured + // label, usually a visibility error. In any case, an event with such an + // id will always report some form of error (i.e., the payload will be an + // Aborted event); there are no regular events using this identifier. + message ConfiguredLabelId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of an event reporting on an individual test run. The label + // identifies the test that is reported about, the remaining fields are + // in such a way as to uniquely identify the action within a build. In fact, + // attempts for the same test, run, shard triple are counted sequentially, + // starting with 1. + message TestResultId { + string label = 1; + ConfigurationId configuration = 5; + int32 run = 2; + int32 shard = 3; + int32 attempt = 4; + } + + // Identifier of an event reporting progress of an individual test run. + message TestProgressId { + // The label of the target for the action. + string label = 1; + // The configuration under which the action is running. + ConfigurationId configuration = 2; + // The run number of the test action (e.g. for runs_per_test > 1). + int32 run = 3; + // For sharded tests, the shard number of the test action. + int32 shard = 4; + // The execution attempt number which may increase due to retries (e.g. for + // flaky tests). + int32 attempt = 5; + // An incrementing count used to differentiate TestProgressIds for the same + // test attempt. + int32 opaque_count = 6; + } + + // Identifier of an event reporting the summary of a test. + message TestSummaryId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of an event reporting the summary of a target. + message TargetSummaryId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of the BuildFinished event, indicating the end of a build. + message BuildFinishedId {} + + // Identifier of an event providing additional logs/statistics after + // completion of the build. + message BuildToolLogsId {} + + // Identifier of an event providing build metrics after completion + // of the build. + message BuildMetricsId {} + + // Identifier of an event providing convenience symlinks information. + message ConvenienceSymlinksIdentifiedId {} + + // Identifier of an event providing the ExecRequest of a run command. + message ExecRequestId {} + + reserved 27; + + oneof id { + UnknownBuildEventId unknown = 1; + ProgressId progress = 2; + BuildStartedId started = 3; + UnstructuredCommandLineId unstructured_command_line = 11; + StructuredCommandLineId structured_command_line = 18; + WorkspaceStatusId workspace_status = 14; + OptionsParsedId options_parsed = 12; + FetchId fetch = 17; + ConfigurationId configuration = 15; + TargetConfiguredId target_configured = 16; + PatternExpandedId pattern = 4; + PatternExpandedId pattern_skipped = 10; + NamedSetOfFilesId named_set = 13; + TargetCompletedId target_completed = 5; + ActionCompletedId action_completed = 6; + UnconfiguredLabelId unconfigured_label = 19; + ConfiguredLabelId configured_label = 21; + TestResultId test_result = 8; + TestProgressId test_progress = 29; + TestSummaryId test_summary = 7; + TargetSummaryId target_summary = 26; + BuildFinishedId build_finished = 9; + BuildToolLogsId build_tool_logs = 20; + BuildMetricsId build_metrics = 22; + WorkspaceConfigId workspace = 23; + BuildMetadataId build_metadata = 24; + ConvenienceSymlinksIdentifiedId convenience_symlinks_identified = 25; + ExecRequestId exec_request = 28; + } +} + +// Payload of an event summarizing the progress of the build so far. Those +// events are also used to be parents of events where the more logical parent +// event cannot be posted yet as the needed information is not yet complete. +message Progress { + // The next chunk of stdout that bazel produced since the last progress event + // or the beginning of the build. + // Consumers that need to reason about the relative order of stdout and stderr + // can assume that stderr has been emitted before stdout if both are present, + // on a best-effort basis. + string stdout = 1; + + // The next chunk of stderr that bazel produced since the last progress event + // or the beginning of the build. + // Consumers that need to reason about the relative order of stdout and stderr + // can assume that stderr has been emitted before stdout if both are present, + // on a best-effort basis. + string stderr = 2; +} + +// Payload of an event indicating that an expected event will not come, as +// the build is aborted prematurely for some reason. +message Aborted { + enum AbortReason { + UNKNOWN = 0; + + // The user requested the build to be aborted (e.g., by hitting Ctl-C). + USER_INTERRUPTED = 1; + + // The user requested that no analysis be performed. + NO_ANALYZE = 8; + + // The user requested that no build be carried out. + NO_BUILD = 9; + + // The build or target was aborted as a timeout was exceeded. + TIME_OUT = 2; + + // The build or target was aborted as some remote environment (e.g., for + // remote execution of actions) was not available in the expected way. + REMOTE_ENVIRONMENT_FAILURE = 3; + + // Failure due to reasons entirely internal to the build tool, i.e. an + // unexpected crash due to programmer error. + INTERNAL = 4; + + // A Failure occurred in the loading phase of a target. + LOADING_FAILURE = 5; + + // A Failure occurred in the analysis phase of a target. + ANALYSIS_FAILURE = 6; + + // Target build was skipped (e.g. due to incompatible CPU constraints). + SKIPPED = 7; + + // Build incomplete due to an earlier build failure (e.g. --keep_going was + // set to false causing the build be ended upon failure). + INCOMPLETE = 10; + + // The build tool ran out of memory and crashed. + OUT_OF_MEMORY = 11; + } + AbortReason reason = 1; + + // A human readable description with more details about there reason, where + // available and useful. + string description = 2; +} + +// Payload of an event indicating the beginning of a new build. Usually, events +// of those type start a new build-event stream. The target pattern requested +// to be build is contained in one of the announced child events; it is an +// invariant that precisely one of the announced child events has a non-empty +// target pattern. +message BuildStarted { + string uuid = 1; + + // Start of the build in ms since the epoch. + // + // Deprecated, use `start_time` instead. + // + // TODO(yannic): Remove. + int64 start_time_millis = 2 [deprecated = true]; + + // Start of the build. + google.protobuf.Timestamp start_time = 9; + + // Version of the build tool that is running. + string build_tool_version = 3; + + // A human-readable description of all the non-default option settings + string options_description = 4; + + // The name of the command that the user invoked. + string command = 5; + + // The working directory from which the build tool was invoked. + string working_directory = 6; + + // The directory of the workspace. + string workspace_directory = 7; + + // The process ID of the Bazel server. + int64 server_pid = 8; +} + +// Configuration related to the blaze workspace and output tree. +message WorkspaceConfig { + // The root of the local blaze exec root. All output files live underneath + // this at "blaze-out/". + string local_exec_root = 1; +} + +// Payload of an event reporting the command-line of the invocation as +// originally received by the server. Note that this is not the command-line +// given by the user, as the client adds information about the invocation, +// like name and relevant entries of rc-files and client environment variables. +// However, it does contain enough information to reproduce the build +// invocation. +message UnstructuredCommandLine { + repeated string args = 1; +} + +// Payload of an event reporting on the parsed options, grouped in various ways. +message OptionsParsed { + repeated string startup_options = 1; + repeated string explicit_startup_options = 2; + repeated string cmd_line = 3; + repeated string explicit_cmd_line = 4; + blaze.invocation_policy.InvocationPolicy invocation_policy = 5; + string tool_tag = 6; +} + +// Payload of an event indicating that an external resource was fetched. This +// event will only occur in streams where an actual fetch happened, not in ones +// where a cached copy of the entity to be fetched was used. +message Fetch { + bool success = 1; +} + +// Payload of an event reporting the workspace status. Key-value pairs can be +// provided by specifying the workspace_status_command to an executable that +// returns one key-value pair per line of output (key and value separated by a +// space). +message WorkspaceStatus { + message Item { + string key = 1; + string value = 2; + } + repeated Item item = 1; +} + +// Payload of an event reporting custom key-value metadata associated with the +// build. +message BuildMetadata { + // Custom metadata for the build. + map metadata = 1 + ; +} + +// Payload of an event reporting details of a given configuration. +message Configuration { + string mnemonic = 1; + string platform_name = 2; + string cpu = 3; + map make_variable = 4 + ; + // Whether this configuration is used for building tools. + bool is_tool = 5; +} + +// Payload of the event indicating the expansion of a target pattern. +// The main information is in the chaining part: the id will contain the +// target pattern that was expanded and the children id will contain the +// target or target pattern it was expanded to. +message PatternExpanded { + // Represents a test_suite target and the tests that it expanded to. Nested + // test suites are recursively expanded. The test labels only contain the + // final test targets, not any nested suites. + message TestSuiteExpansion { + // The label of the test_suite rule. + string suite_label = 1; + // Labels of the test targets included in the suite. Includes all tests in + // the suite regardless of any filters or negative patterns which may result + // in the test not actually being run. + repeated string test_labels = 2; + } + + // All test suites requested via top-level target patterns. Does not include + // test suites whose label matched a negative pattern. + repeated TestSuiteExpansion test_suite_expansions = 1; +} + +// Enumeration type characterizing the size of a test, as specified by the +// test rule. +enum TestSize { + UNKNOWN = 0; + SMALL = 1; + MEDIUM = 2; + LARGE = 3; + ENORMOUS = 4; +} + +// Payload of the event indicating that the configurations for a target have +// been identified. As with pattern expansion the main information is in the +// chaining part: the id will contain the target that was configured and the +// children id will contain the configured targets it was configured to. +message TargetConfigured { + // The kind of target (e.g., e.g. "cc_library rule", "source file", + // "generated file") where the completion is reported. + string target_kind = 1; + + // The size of the test, if the target is a test target. Unset otherwise. + TestSize test_size = 2; + + // List of all tags associated with this target (for all possible + // configurations). + repeated string tag = 3; +} + +message File { + // A sequence of prefixes to apply to the file name to construct a full path. + // In most but not all cases, there will be 3 entries: + // 1. A root output directory, eg "bazel-out" + // 2. A configuration mnemonic, eg "k8-fastbuild" + // 3. An output category, eg "genfiles" + repeated string path_prefix = 4; + + // identifier indicating the nature of the file (e.g., "stdout", "stderr") + string name = 1; + + oneof file { + // A location where the contents of the file can be found. The string is + // encoded according to RFC2396. + string uri = 2; + // The contents of the file, if they are guaranteed to be short. + bytes contents = 3; + // The symlink target path, if the file is an unresolved symlink. + string symlink_target_path = 7; + } + + // Digest of the file, using the build tool's configured digest algorithm, + // hex-encoded. + string digest = 5; + + // Length of the file in bytes. + int64 length = 6; +} + +// Payload of a message to describe a set of files, usually build artifacts, to +// be referred to later by their name. In this way, files that occur identically +// as outputs of several targets have to be named only once. +message NamedSetOfFiles { + // Files that belong to this named set of files. + repeated File files = 1; + + // Other named sets whose members also belong to this set. + repeated BuildEventId.NamedSetOfFilesId file_sets = 2; +} + +// Payload of the event indicating the completion of an action. The main purpose +// of posting those events is to provide details on the root cause for a target +// failing; however, consumers of the build-event protocol must not assume +// that only failed actions are posted. +message ActionExecuted { + bool success = 1; + + // The mnemonic of the action that was executed + string type = 8; + + // The exit code of the action, if it is available. + int32 exit_code = 2; + + // Location where to find the standard output of the action + // (e.g., a file path). + File stdout = 3; + + // Location where to find the standard error of the action + // (e.g., a file path). + File stderr = 4; + + // Deprecated. This field is now present on ActionCompletedId. + string label = 5 [deprecated = true]; + + // Deprecated. This field is now present on ActionCompletedId. + BuildEventId.ConfigurationId configuration = 7 [deprecated = true]; + + // Primary output; only provided for successful actions. + File primary_output = 6; + + // The command-line of the action, if the action is a command. + repeated string command_line = 9; + + // List of paths to log files + repeated File action_metadata_logs = 10; + + // Only populated if success = false, and sometimes not even then. + failure_details.FailureDetail failure_detail = 11; + + // Start of action execution, before any attempted execution begins. + google.protobuf.Timestamp start_time = 12; + + // End of action execution, after all attempted execution completes. + google.protobuf.Timestamp end_time = 13; + + // Additional details about action execution supplied by strategies. Bazel + // options will determine which strategy details are included when multiple + // strategies are involved in a single action's execution. + // + // The default type will be `tools.proto.SpawnExec` found in `spawn.proto`. + repeated google.protobuf.Any strategy_details = 14; +} + +// Collection of all output files belonging to that output group. +message OutputGroup { + // Ids of fields that have been removed. + reserved 2; + + // Name of the output group + string name = 1; + + // List of file sets that belong to this output group as well. + repeated BuildEventId.NamedSetOfFilesId file_sets = 3; + + // Indicates that one or more of the output group's files were not built + // successfully (the generating action failed). + bool incomplete = 4; + + // Inlined files that belong to this output group, requested via + // --build_event_inline_output_groups. + repeated File inline_files = 5; +} + +// Payload of the event indicating the completion of a target. The target is +// specified in the id. If the target failed the root causes are provided as +// children events. +message TargetComplete { + bool success = 1; + + // The kind of target (e.g., e.g. "cc_library rule", "source file", + // "generated file") where the completion is reported. + // Deprecated: use the target_kind field in TargetConfigured instead. + string target_kind = 5 [deprecated = true]; + + // The size of the test, if the target is a test target. Unset otherwise. + // Deprecated: use the test_size field in TargetConfigured instead. + TestSize test_size = 6 [deprecated = true]; + + // The output files are arranged by their output group. If an output file + // is part of multiple output groups, it appears once in each output + // group. + repeated OutputGroup output_group = 2; + + // Temporarily, also report the important outputs directly. This is only to + // allow existing clients help transition to the deduplicated representation; + // new clients should not use it. + repeated File important_output = 4 [deprecated = true]; + + // Report output artifacts (referenced transitively via output_group) which + // emit directories instead of singleton files. These directory_output entries + // will never include a uri. + repeated File directory_output = 8; + + // List of tags associated with this configured target. + repeated string tag = 3; + + // The timeout specified for test actions under this configured target. + // + // Deprecated, use `test_timeout` instead. + // + // TODO(yannic): Remove. + int64 test_timeout_seconds = 7 [deprecated = true]; + + // The timeout specified for test actions under this configured target. + google.protobuf.Duration test_timeout = 10; + + // Failure information about the target, only populated if success is false, + // and sometimes not even then. Equal to one of the ActionExecuted + // failure_detail fields for one of the root cause ActionExecuted events. + failure_details.FailureDetail failure_detail = 9; +} + +enum TestStatus { + NO_STATUS = 0; + PASSED = 1; + FLAKY = 2; + TIMEOUT = 3; + FAILED = 4; + INCOMPLETE = 5; + REMOTE_FAILURE = 6; + FAILED_TO_BUILD = 7; + TOOL_HALTED_BEFORE_TESTING = 8; +} + +// Payload on events reporting about individual test action. +message TestResult { + reserved 1; + + // The status of this test. + TestStatus status = 5; + + // Additional details about the status of the test. This is intended for + // user display and must not be parsed. + string status_details = 9; + + // True, if the reported attempt is taken from the tool's local cache. + bool cached_locally = 4; + + // Time in milliseconds since the epoch at which the test attempt was started. + // Note: for cached test results, this is time can be before the start of the + // build. + // + // Deprecated, use `test_attempt_start` instead. + // + // TODO(yannic): Remove. + int64 test_attempt_start_millis_epoch = 6 [deprecated = true]; + + // Time at which the test attempt was started. + // Note: for cached test results, this is time can be before the start of the + // build. + google.protobuf.Timestamp test_attempt_start = 10; + + // Time the test took to run. For locally cached results, this is the time + // the cached invocation took when it was invoked. + // + // Deprecated, use `test_attempt_duration` instead. + // + // TODO(yannic): Remove. + int64 test_attempt_duration_millis = 3 [deprecated = true]; + + // Time the test took to run. For locally cached results, this is the time + // the cached invocation took when it was invoked. + google.protobuf.Duration test_attempt_duration = 11; + + // Files (logs, test.xml, undeclared outputs, etc) generated by that test + // action. + repeated File test_action_output = 2; + + // Warnings generated by that test action. + repeated string warning = 7; + + // Message providing optional meta data on the execution of the test action, + // if available. + message ExecutionInfo { + // Deprecated, use TargetComplete.test_timeout instead. + int32 timeout_seconds = 1 [deprecated = true]; + + // Name of the strategy to execute this test action (e.g., "local", + // "remote") + string strategy = 2; + + // True, if the reported attempt was a cache hit in a remote cache. + bool cached_remotely = 6; + + // The exit code of the test action. + int32 exit_code = 7; + + // The hostname of the machine where the test action was executed (in case + // of remote execution), if known. + string hostname = 3; + + // Represents a hierarchical timing breakdown of an activity. + // The top level time should be the total time of the activity. + // Invariant: `time` >= sum of `time`s of all direct children. + message TimingBreakdown { + repeated TimingBreakdown child = 1; + string name = 2; + // Deprecated, use `time` instead. + // + // TODO(yannic): Remove. + int64 time_millis = 3 [deprecated = true]; + google.protobuf.Duration time = 4; + } + TimingBreakdown timing_breakdown = 4; + + message ResourceUsage { + string name = 1; + int64 value = 2; + } + repeated ResourceUsage resource_usage = 5; + } + ExecutionInfo execution_info = 8; +} + +// Event payload providing information about an active, individual test run. +message TestProgress { + // Identifies a resource that may provide information about an active test + // run. The resource is not necessarily a file and may need to be queried + // for information. The URI is not guaranteed to be available after the test + // completes. The string is encoded according to RFC2396. + string uri = 1; +} + +// Payload of the event summarizing a test. +message TestSummary { + // Wrapper around BlazeTestStatus to support importing that enum to proto3. + // Overall status of test, accumulated over all runs, shards, and attempts. + TestStatus overall_status = 5; + + // Total number of shard attempts. + // E.g., if a target has 4 runs, 3 shards, each with 2 attempts, + // then total_run_count will be 4*3*2 = 24. + int32 total_run_count = 1; + + // Value of runs_per_test for the test. + int32 run_count = 10; + + // Number of attempts. + // If there are a different number of attempts per shard, the highest attempt + // count across all shards for each run is used. + int32 attempt_count = 15; + + // Number of shards. + int32 shard_count = 11; + + // Path to logs of passed runs. + repeated File passed = 3; + + // Path to logs of failed runs; + repeated File failed = 4; + + // Total number of cached test actions + int32 total_num_cached = 6; + + // When the test first started running. + // + // Deprecated, use `first_start_time` instead. + // + // TODO(yannic): Remove. + int64 first_start_time_millis = 7 [deprecated = true]; + + // When the test first started running. + google.protobuf.Timestamp first_start_time = 13; + + // When the last test action completed. + // + // Deprecated, use `last_stop_time` instead. + // + // TODO(yannic): Remove. + int64 last_stop_time_millis = 8 [deprecated = true]; + + // When the test first started running. + google.protobuf.Timestamp last_stop_time = 14; + + // The total runtime of the test. + // + // Deprecated, use `total_run` instead. + // + // TODO(yannic): Remove. + int64 total_run_duration_millis = 9 [deprecated = true]; + + // The total runtime of the test. + google.protobuf.Duration total_run_duration = 12; +} + +// Payload of the event summarizing a target (test or non-test). +message TargetSummary { + // Conjunction of TargetComplete events for this target, including aspects. + bool overall_build_success = 1; + + // Repeats TestSummary's overall_status if available. + TestStatus overall_test_status = 2; +} + +// Event indicating the end of a build. +message BuildFinished { + // Exit code of a build. The possible values correspond to the predefined + // codes in bazel's lib.ExitCode class, as well as any custom exit code a + // module might define. The predefined exit codes are subject to change (but + // rarely do) and are not part of the public API. + // + // A build was successful iff ExitCode.code equals 0. + message ExitCode { + // The name of the exit code. + string name = 1; + + // The exit code. + int32 code = 2; + } + + // Things that happened during the build that could be of interest. + message AnomalyReport { + // Was the build suspended at any time during the build. + // Examples of suspensions are SIGSTOP, or the hardware being put to sleep. + // If was_suspended is true, then most of the timings for this build are + // suspect. + // NOTE: This is no longer set and is deprecated. + bool was_suspended = 1; + } + + // If the build succeeded or failed. + bool overall_success = 1 [deprecated = true]; + + // The overall status of the build. A build was successful iff + // ExitCode.code equals 0. + ExitCode exit_code = 3; + + // End of the build in ms since the epoch. + // + // Deprecated, use `finish_time` instead. + // + // TODO(yannic): Remove. + int64 finish_time_millis = 2 [deprecated = true]; + + // End of the build. + google.protobuf.Timestamp finish_time = 5; + + AnomalyReport anomaly_report = 4 [deprecated = true]; + + // Only populated if success = false, and sometimes not even then. + failure_details.FailureDetail failure_detail = 6; +} + +message BuildMetrics { + message ActionSummary { + // The total number of actions created and registered during the build, + // including both aspects and configured targets. This metric includes + // unused actions that were constructed but not executed during this build. + // It does not include actions that were created on prior builds that are + // still valid, even if those actions had to be re-executed on this build. + // For the total number of actions that would be created if this invocation + // were "clean", see BuildGraphMetrics below. + int64 actions_created = 1; + + // The total number of actions created this build just by configured + // targets. Used mainly to allow consumers of actions_created, which used to + // not include aspects' actions, to normalize across the Blaze release that + // switched actions_created to include all created actions. + int64 actions_created_not_including_aspects = 3; + + // The total number of actions executed during the build. This includes any + // remote cache hits, but excludes local action cache hits. + int64 actions_executed = 2; + + message ActionData { + string mnemonic = 1; + + // The total number of actions of this type executed during the build. As + // above, includes remote cache hits but excludes local action cache hits. + int64 actions_executed = 2; + + // When the first action of this type started being executed, in + // milliseconds from the epoch. + int64 first_started_ms = 3; + + // When the last action of this type ended being executed, in + // milliseconds from the epoch. + int64 last_ended_ms = 4; + + // Accumulated CPU time of all spawned actions of this type. + // This is only set if all the actions reported a time + google.protobuf.Duration system_time = 5; + google.protobuf.Duration user_time = 6; + + // The total number of actions of this type registered during the build. + int64 actions_created = 7; + } + // Contains the top N actions by number of actions executed. + repeated ActionData action_data = 4; + + // Deprecated. The total number of remote cache hits. + int64 remote_cache_hits = 5 [deprecated = true]; + + message RunnerCount { + string name = 1; + int32 count = 2; + string exec_kind = 3; + } + repeated RunnerCount runner_count = 6; + + blaze.ActionCacheStatistics action_cache_statistics = 7; + } + ActionSummary action_summary = 1; + + message MemoryMetrics { + // Size of the JVM heap post build in bytes. This is only collected if + // --memory_profile is set, since it forces a full GC. + int64 used_heap_size_post_build = 1; + + // Size of the peak JVM heap size in bytes post GC. Note that this reports 0 + // if there was no major GC during the build. + int64 peak_post_gc_heap_size = 2; + + // Size of the peak tenured space JVM heap size event in bytes post GC. Note + // that this reports 0 if there was no major GC during the build. + int64 peak_post_gc_tenured_space_heap_size = 4; + + message GarbageMetrics { + // Type of garbage collected, e.g. G1 Old Gen. + string type = 1; + // Number of bytes of garbage of the given type collected during this + // invocation. + int64 garbage_collected = 2; + } + + repeated GarbageMetrics garbage_metrics = 3; + } + MemoryMetrics memory_metrics = 2; + + message TargetMetrics { + // DEPRECATED + // No longer populated. It never measured what it was supposed to (targets + // loaded): it counted targets that were analyzed even if the underlying + // package had not changed. + // TODO(janakr): rename and remove. + int64 targets_loaded = 1; + + // Number of targets/aspects configured during this build. Does not include + // targets/aspects that were configured on prior builds on this server and + // were cached. See BuildGraphMetrics below if you need that. + int64 targets_configured = 2; + + // Number of configured targets analyzed during this build. Does not include + // aspects. Used mainly to allow consumers of targets_configured, which used + // to not include aspects, to normalize across the Blaze release that + // switched targets_configured to include aspects. + int64 targets_configured_not_including_aspects = 3; + } + TargetMetrics target_metrics = 3; + + message PackageMetrics { + // Number of BUILD files (aka packages) successfully loaded during this + // build. + // + // [For Bazel binaries built at source states] Before Dec 2021, this value + // was the number of packages attempted to be loaded, for a particular + // definition of "attempted". + // + // After Dec 2021, this value would sometimes overcount because the same + // package could sometimes be attempted to be loaded multiple times due to + // memory pressure. + // + // After Feb 2022, this value is the number of packages successfully + // loaded. + int64 packages_loaded = 1; + + // Loading time metrics per package. + repeated devtools.build.lib.packages.metrics.PackageLoadMetrics + package_load_metrics = 2; + } + PackageMetrics package_metrics = 4; + + message TimingMetrics { + // For Skymeld, it's possible that + // analysis_phase_time_in_ms + execution_phase_time_in_ms >= wall_time_in_ms + // + // The CPU time in milliseconds consumed during this build. + int64 cpu_time_in_ms = 1; + // The elapsed wall time in milliseconds during this build. + int64 wall_time_in_ms = 2; + // The elapsed wall time in milliseconds during the analysis phase. + // When analysis and execution phases are interleaved, this measures the + // elapsed time from the first analysis work to the last. + int64 analysis_phase_time_in_ms = 3; + // The elapsed wall time in milliseconds during the execution phase. + // When analysis and execution phases are interleaved, this measures the + // elapsed time from the first action execution (excluding workspace status + // actions) to the last. + int64 execution_phase_time_in_ms = 4; + + // The elapsed wall time in milliseconds until the first action execution + // started (excluding workspace status actions). + int64 actions_execution_start_in_ms = 5; + } + TimingMetrics timing_metrics = 5; + + message CumulativeMetrics { + // One-indexed number of "analyses" the server has run, including the + // current one. Will be incremented for every build/test/cquery/etc. command + // that reaches the analysis phase. + int32 num_analyses = 11; + // One-indexed number of "builds" the server has run, including the current + // one. Will be incremented for every build/test/run/etc. command that + // reaches the execution phase. + int32 num_builds = 12; + } + + CumulativeMetrics cumulative_metrics = 6; + + message ArtifactMetrics { + reserved 1; + + message FilesMetric { + int64 size_in_bytes = 1; + int32 count = 2; + } + + // Measures all source files newly read this build. Does not include + // unchanged sources on incremental builds. + FilesMetric source_artifacts_read = 2; + // Measures all output artifacts from executed actions. This includes + // actions that were cached locally (via the action cache) or remotely (via + // a remote cache or executor), but does *not* include outputs of actions + // that were cached internally in Skyframe. + FilesMetric output_artifacts_seen = 3; + // Measures all output artifacts from actions that were cached locally + // via the action cache. These artifacts were already present on disk at the + // start of the build. Does not include Skyframe-cached actions' outputs. + FilesMetric output_artifacts_from_action_cache = 4; + // Measures all artifacts that belong to a top-level output group. Does not + // deduplicate, so if there are two top-level targets in this build that + // share an artifact, it will be counted twice. + FilesMetric top_level_artifacts = 5; + } + + ArtifactMetrics artifact_metrics = 7; + + // Data about the evaluation of Skyfunctions. + message EvaluationStat { + // Name of the Skyfunction. + string skyfunction_name = 1; + // How many times a given operation was carried out on a Skyfunction. + int64 count = 2; + } + + // Information about the size and shape of the build graph. Some fields may + // not be populated if Bazel was able to skip steps due to caching. + message BuildGraphMetrics { + // How many configured targets/aspects were in this build, including any + // that were analyzed on a prior build and are still valid. May not be + // populated if analysis phase was fully cached. Note: for historical + // reasons this includes input/output files and other configured targets + // that do not actually have associated actions. + int32 action_lookup_value_count = 1; + // How many configured targets alone were in this build: always at most + // action_lookup_value_count. Useful mainly for historical comparisons to + // TargetMetrics.targets_configured, which used to not count aspects. This + // also includes configured targets that do not have associated actions. + int32 action_lookup_value_count_not_including_aspects = 5; + // How many actions belonged to the configured targets/aspects above. It may + // not be necessary to execute all of these actions to build the requested + // targets. May not be populated if analysis phase was fully cached. + int32 action_count = 2; + // How many actions belonged to configured targets: always at most + // action_count. Useful mainly for historical comparisons to + // ActionMetrics.actions_created, which used to not count aspects' actions. + int32 action_count_not_including_aspects = 6; + // How many "input file" configured targets there were: one per source file. + // Should agree with artifact_metrics.source_artifacts_read.count above, + int32 input_file_configured_target_count = 7; + // How many "output file" configured targets there were: output files that + // are targets (not implicit outputs). + int32 output_file_configured_target_count = 8; + // How many "other" configured targets there were (like alias, + // package_group, and other non-rule non-file configured targets). + int32 other_configured_target_count = 9; + // How many artifacts are outputs of the above actions. May not be populated + // if analysis phase was fully cached. + int32 output_artifact_count = 3; + // How many Skyframe nodes there are in memory at the end of the build. This + // may underestimate the number of nodes when running with memory-saving + // settings or with Skybuild, and may overestimate if there are nodes from + // prior evaluations still in the cache. + int32 post_invocation_skyframe_node_count = 4; + // Number of SkyValues that were dirtied during the build. Dirtied nodes are + // those that transitively depend on a node that changed by itself (e.g. one + // representing a file in the file system) + repeated EvaluationStat dirtied_values = 10; + // Number of SkyValues that changed by themselves. For example, when a file + // on the file system changes, the SkyValue representing it will change. + repeated EvaluationStat changed_values = 11; + // Number of SkyValues that were built. This means that they were evaluated + // and were found to have changed from their previous version. + repeated EvaluationStat built_values = 12; + // Number of SkyValues that were evaluated and found clean, i.e. equal to + // their previous version. + repeated EvaluationStat cleaned_values = 13; + // Number of evaluations to build SkyValues. This includes restarted + // evaluations, which means there can be multiple evaluations per built + // SkyValue. Subtract built_values from this number to get the number of + // restarted evaluations. + repeated EvaluationStat evaluated_values = 17; + + // For SkyKeys in 'done values' where the SkyValue is of type + // RuleConfiguredTargetValue, we pull those out separately and report the + // ruleClass and action count. + message RuleClassCount { + // Unique key for the rule class. + string key = 1; + + // String name of the rule_class (not guaranteed unique) + string rule_class = 2; + + // how many rule instances of this type were seen. + uint64 count = 3; + + // how many actions were created by this rule class. + uint64 action_count = 4; + } + repeated RuleClassCount rule_class = 14; + + // For SkyKeys whose function name is ASPECT break out that information + message AspectCount { + // Unique key for Aspect. + string key = 1; + + // usually the same as above, but can differ in some cases. + string aspect_name = 2; + + // number of aspects created of this type. + uint64 count = 3; + + // number of actions created by aspects of this type. + uint64 action_count = 4; + } + repeated AspectCount aspect = 15; + + // Removed due to overlap with EvaluationStat + reserved 16; + } + + BuildGraphMetrics build_graph_metrics = 8; + + // Information about all workers that were alive during the invocation. + message WorkerMetrics { + // Deprecated. Use worker_ids instead of this field. + int32 worker_id = 1 [deprecated = true]; + + // Ids of workers. Could be multiple in case of multiplex workers + repeated uint32 worker_ids = 8; + // Worker process id. If there is no process for worker, equals to zero. + uint32 process_id = 2; + // Mnemonic of running worker. + string mnemonic = 3; + // Multiplex or singleplex worker. + bool is_multiplex = 4; + // Using worker sandbox file system or not. + bool is_sandbox = 5; + // TODO(b/300067854): Deprecate since all worker metrics should have their + // WorkerStats set. + bool is_measurable = 6; + // Hash value of worker key. Needed to distinguish worker pools with same + // menmonic but with different worker keys. + int64 worker_key_hash = 9; + + WorkerStatus worker_status = 10; + + enum WorkerStatus { + // Used to indicate a worker instance where the process has not been + // created yet. In reality this isn't logged, but leaving this here as a + // possible option in the future. + NOT_STARTED = 0; + ALIVE = 1; + KILLED_DUE_TO_MEMORY_PRESSURE = 2; + // Indicates that the worker process was killed due to a reason unknown to + // Bazel at the point of measurement; if a known cause (below) comes along + // later on, this field will be updated. + KILLED_UNKNOWN = 3; + KILLED_DUE_TO_INTERRUPTED_EXCEPTION = 4; + KILLED_DUE_TO_IO_EXCEPTION = 5; + KILLED_DUE_TO_USER_EXEC_EXCEPTION = 6; + } + + optional failure_details.Worker.Code code = 12; + + int64 actions_executed = 11; + + int64 prior_actions_executed = 13; + + // Information collected from worker at some point. + message WorkerStats { + // Epoch unix time of collection of metrics. + int64 collect_time_in_ms = 1; + // Memory usage of worker process at the end of the build. + int32 worker_memory_in_kb = 2; + // Memory usage of the worker process prior to the invocation. + int32 prior_worker_memory_in_kb = 4; + // Epoch unix time of last action started on specific worker. + int64 last_action_start_time_in_ms = 3; + } + + // Combined workers statistics. + repeated WorkerStats worker_stats = 7; + } + + repeated WorkerMetrics worker_metrics = 9; + + // Information about host network. + message NetworkMetrics { + // Information for all the network traffic going on on the host machine + // during the invocation. + message SystemNetworkStats { + // Total bytes sent during the invocation. + uint64 bytes_sent = 1; + // Total bytes received during the invocation. + uint64 bytes_recv = 2; + // Total packets sent during the invocation. + uint64 packets_sent = 3; + // Total packets received during the invocation. + uint64 packets_recv = 4; + // Peak bytes/sec sent during the invocation. + uint64 peak_bytes_sent_per_sec = 5; + // Peak bytes/sec received during the invocation. + uint64 peak_bytes_recv_per_sec = 6; + // Peak packets/sec sent during the invocation. + uint64 peak_packets_sent_per_sec = 7; + // Peak packets/sec received during the invocation. + uint64 peak_packets_recv_per_sec = 8; + } + + SystemNetworkStats system_network_stats = 1; + } + + NetworkMetrics network_metrics = 10; + + // Information about worker pool actions. + message WorkerPoolMetrics { + // Statistics of worker pool per worker pool hash. Basically it's a map from + // worker pool hash to statistics. + repeated WorkerPoolStats worker_pool_stats = 1; + + message WorkerPoolStats { + // Hash of worker pool these stats are for. Contains information about + // startup flags. + int32 hash = 1; + // Mnemonic of workers these stats are for. + string mnemonic = 2; + // Number of workers created during a build. + int64 created_count = 3; + // Number of workers destroyed during a build (sum of all workers + // destroyed by eviction, UserExecException, IoException, + // InterruptedException and unknown reasons below). + int64 destroyed_count = 4; + // Number of workers evicted during a build. + int64 evicted_count = 5; + // Number of workers destroyed due to UserExecExceptions. + int64 user_exec_exception_destroyed_count = 6; + // Number of workers destroyed due to IoExceptions. + int64 io_exception_destroyed_count = 7; + // Number of workers destroyed due to InterruptedExceptions. + int64 interrupted_exception_destroyed_count = 8; + // Number of workers destroyed due to an unknown reason. + int64 unknown_destroyed_count = 9; + // Number of workers alive at the end of the build. + int64 alive_count = 10; + } + } + + WorkerPoolMetrics worker_pool_metrics = 11; + + // Information about dynamic execution. + message DynamicExecutionMetrics { + message RaceStatistics { + // Mnemonic of the action. + string mnemonic = 1; + // Name of runner of local branch. + string local_runner = 2; + // Name of runner of remote branch. + string remote_runner = 3; + // Number of wins of local branch in race. + int32 local_wins = 4; + // Number of wins of remote branch in race. + int32 remote_wins = 5; + } + // Race statistics grouped by mnemonic, local_name, remote_name. + repeated RaceStatistics race_statistics = 1; + } + + DynamicExecutionMetrics dynamic_execution_metrics = 12; +} + +// Event providing additional statistics/logs after completion of the build. +message BuildToolLogs { + repeated File log = 1; +} + +// Event describing all convenience symlinks (i.e., workspace symlinks) to be +// created or deleted once the execution phase has begun. Note that this event +// does not say anything about whether or not the build tool actually executed +// these filesystem operations; it only says what logical operations should be +// performed. This event is emitted exactly once per build; if no symlinks are +// to be modified, the event is still emitted with empty contents. +message ConvenienceSymlinksIdentified { + repeated ConvenienceSymlink convenience_symlinks = 1; +} + +// The message that contains what type of action to perform on a given path and +// target of a symlink. +message ConvenienceSymlink { + enum Action { + UNKNOWN = 0; + + // Indicates a symlink should be created, or overwritten if it already + // exists. + CREATE = 1; + + // Indicates a symlink should be deleted if it already exists. + DELETE = 2; + } + + // The path of the symlink to be created or deleted, absolute or relative to + // the workspace, creating any directories necessary. If a symlink already + // exists at that location, then it should be replaced by a symlink pointing + // to the new target. + string path = 1; + + // The operation we are performing on the symlink. + Action action = 2; + + // If action is CREATE, this is the target path (relative to the output base) + // that the symlink should point to. + // + // If action is DELETE, this field is not set. + string target = 3; +} + +// Event that contains the ExecRequest of a run command announced only after a +// successful build and before trying to execute the requested command-line. +message ExecRequestConstructed { + bytes working_directory = 1; + repeated bytes argv = 2; + repeated EnvironmentVariable environment_variable = 3; + repeated bytes environment_variable_to_clear = 4; + bool should_exec = 5; +} + +// An environment variable provided by a run command after a successful build. +message EnvironmentVariable { + bytes name = 1; + bytes value = 2; +} + +// Message describing a build event. Events will have an identifier that +// is unique within a given build invocation; they also announce follow-up +// events as children. More details, which are specific to the kind of event +// that is observed, is provided in the payload. More options for the payload +// might be added in the future. +message BuildEvent { + reserved 11, 19; + BuildEventId id = 1; + repeated BuildEventId children = 2; + bool last_message = 20; + oneof payload { + Progress progress = 3; + Aborted aborted = 4; + BuildStarted started = 5; + UnstructuredCommandLine unstructured_command_line = 12; + command_line.CommandLine structured_command_line = 22; + OptionsParsed options_parsed = 13; + WorkspaceStatus workspace_status = 16; + Fetch fetch = 21; + Configuration configuration = 17; + PatternExpanded expanded = 6; + TargetConfigured configured = 18; + ActionExecuted action = 7; + NamedSetOfFiles named_set_of_files = 15; + TargetComplete completed = 8; + TestResult test_result = 10; + TestProgress test_progress = 30; + TestSummary test_summary = 9; + TargetSummary target_summary = 28; + BuildFinished finished = 14; + BuildToolLogs build_tool_logs = 23; + BuildMetrics build_metrics = 24; + WorkspaceConfig workspace_info = 25; + BuildMetadata build_metadata = 26; + ConvenienceSymlinksIdentified convenience_symlinks_identified = 27; + ExecRequestConstructed exec_request = 29; + } +} diff --git a/app/bazel_event_publisher_proto/proto/command_line.proto b/app/bazel_event_publisher_proto/proto/command_line.proto new file mode 100644 index 0000000000000..181f5d63147b9 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/command_line.proto @@ -0,0 +1,102 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package command_line; + +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +import "option_filters.proto"; + +// Representation of a Bazel command line. +message CommandLine { + // A title for this command line value, to differentiate it from others. + // In particular, a single invocation may wish to report both the literal and + // canonical command lines, and this label would be used to differentiate + // between both versions. This is a string for flexibility. + string command_line_label = 1; + + // A Bazel command line is made of distinct parts. For example, + // `bazel --nomaster_bazelrc test --nocache_test_results //foo:aTest` + // has the executable "bazel", a startup flag, a command "test", a command + // flag, and a test target. There could be many more flags and targets, or + // none (`bazel info` for example), but the basic structure is there. The + // command line should be broken down into these logical sections here. + repeated CommandLineSection sections = 2; +} + +// A section of the Bazel command line. +message CommandLineSection { + // The name of this section, such as "startup_option" or "command". + string section_label = 1; + + oneof section_type { + // Sections with non-options, such as the list of targets or the command, + // should use simple string chunks. + ChunkList chunk_list = 2; + + // Startup and command options are lists of options and belong here. + OptionList option_list = 3; + } +} + +// Wrapper to allow a list of strings in the "oneof" section_type. +message ChunkList { + repeated string chunk = 1; +} + +// Wrapper to allow a list of options in the "oneof" section_type. +message OptionList { + repeated Option option = 1; +} + +// A single command line option. +// +// This represents the option itself, but does not take into account the type of +// option or how the parser interpreted it. If this option is part of a command +// line that represents the actual input that Bazel received, it would, for +// example, include expansion flags as they are. However, if this option +// represents the canonical form of the command line, with the values as Bazel +// understands them, then the expansion flag, which has no value, would not +// appear, and the flags it expands to would. +message Option { + // How the option looks with the option and its value combined. Depending on + // the purpose of this command line report, this could be the canonical + // form, or the way that the flag was set. + // + // Some examples: this might be `--foo=bar` form, or `--foo bar` with a space; + // for boolean flags, `--nobaz` is accepted on top of `--baz=false` and other + // negating values, or for a positive value, the unqualified `--baz` form + // is also accepted. This could also be a short `-b`, if the flag has an + // abbreviated form. + string combined_form = 1; + + // The canonical name of the option, without the preceding dashes. + string option_name = 2; + + // The value of the flag, or unset for flags that do not take values. + // Especially for boolean flags, this should be in canonical form, the + // combined_form field above gives room for showing the flag as it was set + // if that is preferred. + string option_value = 3; + + // This flag's tagged effects. See OptionEffectTag's java documentation for + // details. + repeated options.OptionEffectTag effect_tags = 4; + + // Metadata about the flag. See OptionMetadataTag's java documentation for + // details. + repeated options.OptionMetadataTag metadata_tags = 5; +} diff --git a/app/bazel_event_publisher_proto/proto/failure_details.proto b/app/bazel_event_publisher_proto/proto/failure_details.proto new file mode 100644 index 0000000000000..7df8ca4ea13e7 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/failure_details.proto @@ -0,0 +1,1378 @@ +// Copyright 2020 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file's messages describe any failure(s) that occurred during Bazel's +// handling of a request. The intent is to provide more detail to a Bazel client +// than is conveyed with an exit code, to help those clients decide how to +// respond to, or classify, a failure. + +syntax = "proto3"; + +package failure_details; + +option java_package = "com.google.devtools.build.lib.server"; + +import "google/protobuf/descriptor.proto"; + +message FailureDetailMetadata { + uint32 exit_code = 1; +} + + extend google.protobuf.EnumValueOptions { + FailureDetailMetadata metadata = 1078; +} + +// The FailureDetail message type is designed such that consumers can extract a +// basic classification of a FailureDetail message even if the consumer was +// built with a stale definition. This forward compatibility is implemented via +// conventions on FailureDetail and its submessage types, as follows. +// +// *** FailureDetail field numbers +// +// Field numbers 1 through 100 (inclusive) are reserved for generally applicable +// values. Any number of these fields may be set on a FailureDetail message. +// +// Field numbers 101 through 10,000 (inclusive) are reserved for use inside the +// "oneof" structure. Only one of these values should be set on a FailureDetail +// message. +// +// Additional fields numbers are unlikely to be needed, but, for extreme future- +// proofing purposes, field numbers 10,001 through 1,000,000 (inclusive; +// excluding protobuf's reserved range 19000 through 19999) are reserved for +// additional generally applicable values. +// +// *** FailureDetail's "oneof" submessages +// +// Each field in the "oneof" structure is a submessage corresponding to a +// category of failure. +// +// In each of these submessage types, field number 1 is an enum whose values +// correspond to a subcategory of the failure. Generally, the enum's constant +// which maps to 0 should be interpreted as "unspecified", though this is not +// required. +// +// *** Recommended forward compatibility strategy +// +// The recommended forward compatibility strategy is to reduce a FailureDetail +// message to a pair of integers. +// +// The first integer corresponds to the field number of the submessage set +// inside FailureDetail's "oneof", which corresponds with the failure's +// category. +// +// The second integer corresponds to the value of the enum at field number 1 +// within that submessage, which corresponds with the failure's subcategory. +// +// WARNING: This functionality is experimental and should not be relied on at +// this time. +// TODO(mschaller): remove experimental warning +message FailureDetail { + // A short human-readable message describing the failure, for debugging. + // + // This value is *not* intended to be used algorithmically. + string message = 1; + + // Reserved for future generally applicable values. Any of these may be set. + reserved 2 to 100; + + oneof category { + Interrupted interrupted = 101; + ExternalRepository external_repository = 103; + BuildProgress build_progress = 104; + RemoteOptions remote_options = 106; + ClientEnvironment client_environment = 107; + Crash crash = 108; + SymlinkForest symlink_forest = 110; + PackageOptions package_options = 114; + RemoteExecution remote_execution = 115; + Execution execution = 116; + Workspaces workspaces = 117; + CrashOptions crash_options = 118; + Filesystem filesystem = 119; + ExecutionOptions execution_options = 121; + Command command = 122; + Spawn spawn = 123; + GrpcServer grpc_server = 124; + CanonicalizeFlags canonicalize_flags = 125; + BuildConfiguration build_configuration = 126; + InfoCommand info_command = 127; + MemoryOptions memory_options = 129; + Query query = 130; + LocalExecution local_execution = 132; + ActionCache action_cache = 134; + FetchCommand fetch_command = 135; + SyncCommand sync_command = 136; + Sandbox sandbox = 137; + IncludeScanning include_scanning = 139; + TestCommand test_command = 140; + ActionQuery action_query = 141; + TargetPatterns target_patterns = 142; + CleanCommand clean_command = 144; + ConfigCommand config_command = 145; + ConfigurableQuery configurable_query = 146; + DumpCommand dump_command = 147; + HelpCommand help_command = 148; + MobileInstall mobile_install = 150; + ProfileCommand profile_command = 151; + RunCommand run_command = 152; + VersionCommand version_command = 153; + PrintActionCommand print_action_command = 154; + WorkspaceStatus workspace_status = 158; + JavaCompile java_compile = 159; + ActionRewinding action_rewinding = 160; + CppCompile cpp_compile = 161; + StarlarkAction starlark_action = 162; + NinjaAction ninja_action = 163; + DynamicExecution dynamic_execution = 164; + FailAction fail_action = 166; + SymlinkAction symlink_action = 167; + CppLink cpp_link = 168; + LtoAction lto_action = 169; + TestAction test_action = 172; + Worker worker = 173; + Analysis analysis = 174; + PackageLoading package_loading = 175; + Toolchain toolchain = 177; + StarlarkLoading starlark_loading = 179; + ExternalDeps external_deps = 181; + DiffAwareness diff_awareness = 182; + ModCommand mod_command = 183; + BuildReport build_report = 184; + Skyfocus skyfocus = 185; + RemoteAnalysisCaching remote_analysis_caching = 186; + } + + reserved 102; // For internal use + reserved 105; // For internal use + reserved 109; // For internal use + reserved 111 to 113; // For internal use + reserved 120; // For internal use + reserved 128; // For internal use + reserved 131; // For internal use + reserved 133; // For internal use + reserved 138; // For internal use + reserved 143; // For internal use + reserved 149; // For internal use + reserved 155 to 157; // For internal use + reserved 165; // For internal use + reserved 170 to 171; // For internal use + reserved 176; // For internal use + reserved 178; // For internal use + reserved 180; // For internal use +} + +message Interrupted { + enum Code { + // Unknown interrupt. Avoid using this code, instead use INTERRUPTED. + INTERRUPTED_UNKNOWN = 0 [(metadata) = { exit_code: 8 }]; + + // Command was interrupted (cancelled). + INTERRUPTED = 28 [(metadata) = { exit_code: 8 }]; + + // The following more specific interrupt codes have been deprecated and + // consolidated into INTERRUPTED. + DEPRECATED_BUILD = 4 [(metadata) = { exit_code: 8 }]; + DEPRECATED_BUILD_COMPLETION = 5 [(metadata) = { exit_code: 8 }]; + DEPRECATED_PACKAGE_LOADING_SYNC = 6 [(metadata) = { exit_code: 8 }]; + DEPRECATED_EXECUTOR_COMPLETION = 7 [(metadata) = { exit_code: 8 }]; + DEPRECATED_COMMAND_DISPATCH = 8 [(metadata) = { exit_code: 8 }]; + DEPRECATED_INFO_ITEM = 9 [(metadata) = { exit_code: 8 }]; + DEPRECATED_AFTER_QUERY = 10 [(metadata) = { exit_code: 8 }]; + DEPRECATED_FETCH_COMMAND = 17 [(metadata) = { exit_code: 8 }]; + DEPRECATED_SYNC_COMMAND = 18 [(metadata) = { exit_code: 8 }]; + DEPRECATED_CLEAN_COMMAND = 20 [(metadata) = { exit_code: 8 }]; + DEPRECATED_MOBILE_INSTALL_COMMAND = 21 [(metadata) = { exit_code: 8 }]; + DEPRECATED_QUERY = 22 [(metadata) = { exit_code: 8 }]; + DEPRECATED_RUN_COMMAND = 23 [(metadata) = { exit_code: 8 }]; + DEPRECATED_OPTIONS_PARSING = 27 [(metadata) = { exit_code: 8 }]; + + reserved 1 to 3; // For internal use + reserved 11 to 16; // For internal use + reserved 19; // For internal use + reserved 24 to 26; // For internal use + } + + Code code = 1; +} + +message Spawn { + enum Code { + SPAWN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + // See the SpawnResult.Status Java enum for definitions of the following + // Spawn failure codes. + NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }]; + TIMEOUT = 2 [(metadata) = { exit_code: 1 }]; + // Note: Spawn OUT_OF_MEMORY leads to a BUILD_FAILURE exit_code because the + // build tool itself did not run out of memory. + OUT_OF_MEMORY = 3 [(metadata) = { exit_code: 1 }]; + EXECUTION_FAILED = 4 [(metadata) = { exit_code: 34 }]; + EXECUTION_DENIED = 5 [(metadata) = { exit_code: 1 }]; + REMOTE_CACHE_FAILED = 6 [(metadata) = { exit_code: 34 }]; + COMMAND_LINE_EXPANSION_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + EXEC_IO_EXCEPTION = 8 [(metadata) = { exit_code: 36 }]; + INVALID_TIMEOUT = 9 [(metadata) = { exit_code: 1 }]; + INVALID_REMOTE_EXECUTION_PROPERTIES = 10 [(metadata) = { exit_code: 1 }]; + NO_USABLE_STRATEGY_FOUND = 11 [(metadata) = { exit_code: 1 }]; + // TODO(b/138456686): this code should be deprecated when SpawnResult is + // refactored to prohibit undetailed failures + UNSPECIFIED_EXECUTION_FAILURE = 12 [(metadata) = { exit_code: 1 }]; + FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }]; + // This also includes other remote cache errors, not just evictions, + // if --incompatible_remote_use_new_exit_code_for_lost_inputs is set. + // TODO: Rename it to a more general name when + // --experimental_remote_cache_eviction_retries is moved to + // non-experimental. + REMOTE_CACHE_EVICTED = 14 [(metadata) = { exit_code: 39 }]; + SPAWN_LOG_IO_EXCEPTION = 15 [(metadata) = { exit_code: 36 }]; + } + Code code = 1; + + // For Codes describing generic failure to spawn (eg. EXECUTION_FAILED and + // EXECUTION_DENIED) the `catastrophic` field may be set to true indicating a + // failure that immediately terminated the entire build tool. + bool catastrophic = 2; + + // If Code is NON_ZERO_EXIT, the `spawn_exit_code` field may be set to the + // non-zero exit code returned by the spawned process to the OS. + // + // NOTE: This field must not be confused with the build tool's overall + // exit code. + int32 spawn_exit_code = 3; +} + +message ExternalRepository { + enum Code { + EXTERNAL_REPOSITORY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OVERRIDE_DISALLOWED_MANAGED_DIRECTORIES = 1 [(metadata) = { exit_code: 2 }]; + BAD_DOWNLOADER_CONFIG = 2 [(metadata) = { exit_code: 2 }]; + REPOSITORY_MAPPING_RESOLUTION_FAILED = 3 [(metadata) = { exit_code: 37 }]; + CREDENTIALS_INIT_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + } + Code code = 1; + // Additional data could include external repository names. +} + +message BuildProgress { + enum Code { + BUILD_PROGRESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OUTPUT_INITIALIZATION = 3 [(metadata) = { exit_code: 36 }]; + BES_RUNS_PER_TEST_LIMIT_UNSUPPORTED = 4 [(metadata) = { exit_code: 2 }]; + BES_LOCAL_WRITE_ERROR = 5 [(metadata) = { exit_code: 36 }]; + BES_INITIALIZATION_ERROR = 6 [(metadata) = { exit_code: 36 }]; + BES_UPLOAD_TIMEOUT_ERROR = 7 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_TIMEOUT = 8 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_IO_ERROR = 9 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_INTERRUPTED = 10 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_CANCELED = 11 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_UNKNOWN_ERROR = 12 [(metadata) = { exit_code: 38 }]; + BES_UPLOAD_LOCAL_FILE_ERROR = 13 [(metadata) = { exit_code: 38 }]; + BES_STREAM_NOT_RETRYING_FAILURE = 14 [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_UNACK_EVENTS_ERROR = 15 + [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_UNSENT_EVENTS_ERROR = 16 + [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_REMOTE_ERROR = 19 + [(metadata) = { exit_code: 45 }]; + BES_UPLOAD_RETRY_LIMIT_EXCEEDED_FAILURE = 17 + [(metadata) = { exit_code: 38 }]; + reserved 1, 2, 18, 20; // For internal use + } + Code code = 1; + // Additional data could include the build progress upload endpoint. +} + +message RemoteOptions { + enum Code { + REMOTE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + REMOTE_DEFAULT_EXEC_PROPERTIES_LOGIC_ERROR = 1 + [(metadata) = { exit_code: 2 }]; + // Credentials could not be read from the requested file/socket/process/etc. + CREDENTIALS_READ_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + // Credentials could not be written to a shared, temporary file. + CREDENTIALS_WRITE_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + DOWNLOADER_WITHOUT_GRPC_CACHE = 4 [(metadata) = { exit_code: 2 }]; + EXECUTION_WITH_INVALID_CACHE = 5 [(metadata) = { exit_code: 2 }]; + + reserved 6; + } + + Code code = 1; +} + +message ClientEnvironment { + enum Code { + CLIENT_ENVIRONMENT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CLIENT_CWD_MALFORMED = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message Crash { + enum Code { + CRASH_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CRASH_OOM = 1 [(metadata) = { exit_code: 33 }]; + } + + Code code = 1; + + // The cause chain of the crash, with the outermost throwable first. Limited + // to the outermost exception and at most 4 nested causes (so, max size of 5). + repeated Throwable causes = 2; + + // True when the root cause of the crash was not an OutOfMemoryError, but + // CRASH_OOM was chosen because an OutOfMemoryError was detected prior to the + // crash. + bool oom_detector_override = 3; +} + +message Throwable { + // The class name of the java.lang.Throwable. + string throwable_class = 1; + // The throwable's message. + string message = 2; + // The result of calling toString on the deepest (i.e. closest to the + // throwable's construction site) 1000 (or fewer) StackTraceElements. + // Unstructured to simplify string matching. + repeated string stack_trace = 3; +} + +message SymlinkForest { + enum Code { + SYMLINK_FOREST_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOPLEVEL_OUTDIR_PACKAGE_PATH_CONFLICT = 1 [(metadata) = { exit_code: 2 }]; + TOPLEVEL_OUTDIR_USED_AS_SOURCE = 2 [(metadata) = { exit_code: 2 }]; + CREATION_FAILED = 3 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message BuildReport { + enum Code { + BUILD_REPORT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + BUILD_REPORT_UPLOADER_NEEDS_PACKAGE_PATHS = 1 + [(metadata) = { exit_code: 36 }]; + BUILD_REPORT_WRITE_FAILED = 2 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; + // Additional data for partial failures might include the build report that + // failed to be written. +} + +// Failure details for errors produced when using Skyfocus +message Skyfocus { + enum Code { + // The defined working set cannot be used for the focused targets. For + // example, this happens when the intersection of the working set and the + // transitive closure of the focused target is empty. + INVALID_WORKING_SET = 0 [(metadata) = { exit_code: 2 }]; + // The user needs to augment their working set to include the new file(s). + NON_WORKING_SET_CHANGE = 1 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_CHANGE = 2 [(metadata) = { exit_code: 2 }]; + DISALLOWED_OPERATION_ON_FOCUSED_GRAPH = 3 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +// Failure details for errors produced during remote analysis caching. +message RemoteAnalysisCaching { + enum Code { + REMOTE_ANALYSIS_CACHING_UNKNOWN = 0 [(metadata) = { exit_code: 1 }]; + SERIALIZED_FRONTIER_PROFILE_FAILED = 1 [(metadata) = { exit_code: 1 }]; + PROJECT_FILE_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message PackageOptions { + enum Code { + reserved 2, 3; // For internal use + + PACKAGE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PACKAGE_PATH_INVALID = 1 [(metadata) = { exit_code: 2 }]; + NONSINGLETON_PACKAGE_PATH = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message RemoteExecution { + // The association of some of these options with exit code 2, "command line + // error", seems sketchy. Especially worth reconsidering are the channel init + // failure modes, which can correspond to failures occurring in gRPC setup. + // These all correspond with current Bazel behavior. + enum Code { + REMOTE_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CAPABILITIES_QUERY_FAILURE = 1 [(metadata) = { exit_code: 34 }]; + CREDENTIALS_INIT_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + CACHE_INIT_FAILURE = 3 [(metadata) = { exit_code: 2 }]; + RPC_LOG_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + EXEC_CHANNEL_INIT_FAILURE = 5 [(metadata) = { exit_code: 2 }]; + CACHE_CHANNEL_INIT_FAILURE = 6 [(metadata) = { exit_code: 2 }]; + DOWNLOADER_CHANNEL_INIT_FAILURE = 7 [(metadata) = { exit_code: 2 }]; + LOG_DIR_CLEANUP_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + CLIENT_SERVER_INCOMPATIBLE = 9 [(metadata) = { exit_code: 34 }]; + DOWNLOADED_INPUTS_DELETION_FAILURE = 10 [(metadata) = { exit_code: 34 }]; + REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_DOTD = 11 + [(metadata) = { exit_code: 2 }]; + REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_JDEPS = 12 + [(metadata) = { exit_code: 2 }]; + INCOMPLETE_OUTPUT_DOWNLOAD_CLEANUP_FAILURE = 13 + [(metadata) = { exit_code: 36 }]; + REMOTE_DEFAULT_PLATFORM_PROPERTIES_PARSE_FAILURE = 14 + [(metadata) = { exit_code: 1 }]; + ILLEGAL_OUTPUT = 15 [(metadata) = { exit_code: 1 }]; + INVALID_EXEC_AND_PLATFORM_PROPERTIES = 16 [(metadata) = { exit_code: 1 }]; + TOPLEVEL_OUTPUTS_DOWNLOAD_FAILURE = 17 [(metadata) = { exit_code: 34 }]; + } + + Code code = 1; +} + +message Execution { + enum Code { + EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXECUTION_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + EXECUTION_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + EXECROOT_CREATION_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + TEMP_ACTION_OUTPUT_DIRECTORY_DELETION_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + TEMP_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 5 + [(metadata) = { exit_code: 36 }]; + PERSISTENT_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 6 + [(metadata) = { exit_code: 36 }]; + LOCAL_OUTPUT_DIRECTORY_SYMLINK_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + reserved 8; // was ACTION_INPUT_FILES_MISSING, now mostly + // SOURCE_INPUT_MISSING + LOCAL_TEMPLATE_EXPANSION_FAILURE = 9 [(metadata) = { exit_code: 36 }]; + INPUT_DIRECTORY_CHECK_IO_EXCEPTION = 10 [(metadata) = { exit_code: 36 }]; + EXTRA_ACTION_OUTPUT_CREATION_FAILURE = 11 [(metadata) = { exit_code: 36 }]; + TEST_RUNNER_IO_EXCEPTION = 12 [(metadata) = { exit_code: 36 }]; + FILE_WRITE_IO_EXCEPTION = 13 [(metadata) = { exit_code: 36 }]; + TEST_OUT_ERR_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_MANIFEST_COPY_IO_EXCEPTION = 15 + [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_MANIFEST_LINK_IO_EXCEPTION = 16 + [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_CREATION_IO_EXCEPTION = 17 [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_CREATION_COMMAND_EXCEPTION = 18 + [(metadata) = { exit_code: 36 }]; + ACTION_INPUT_READ_IO_EXCEPTION = 19 [(metadata) = { exit_code: 36 }]; + ACTION_NOT_UP_TO_DATE = 20 [(metadata) = { exit_code: 1 }]; + PSEUDO_ACTION_EXECUTION_PROHIBITED = 21 [(metadata) = { exit_code: 1 }]; + DISCOVERED_INPUT_DOES_NOT_EXIST = 22 [(metadata) = { exit_code: 36 }]; + ACTION_OUTPUTS_DELETION_FAILURE = 23 [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUTS_NOT_CREATED = 24 [(metadata) = { exit_code: 1 }]; + ACTION_FINALIZATION_FAILURE = 25 [(metadata) = { exit_code: 1 }]; + ACTION_INPUT_LOST = 26 [(metadata) = { exit_code: 1 }]; + FILESYSTEM_CONTEXT_UPDATE_FAILURE = 27 [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUT_CLOSE_FAILURE = 28 [(metadata) = { exit_code: 1 }]; + INPUT_DISCOVERY_IO_EXCEPTION = 29 [(metadata) = { exit_code: 1 }]; + TREE_ARTIFACT_DIRECTORY_CREATION_FAILURE = 30 + [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 31 + [(metadata) = { exit_code: 1 }]; + ACTION_FS_OUTPUT_DIRECTORY_CREATION_FAILURE = 32 + [(metadata) = { exit_code: 1 }]; + ACTION_FS_OUT_ERR_DIRECTORY_CREATION_FAILURE = 33 + [(metadata) = { exit_code: 1 }]; + NON_ACTION_EXECUTION_FAILURE = 34 [(metadata) = { exit_code: 1 }]; + CYCLE = 35 [(metadata) = { exit_code: 1 }]; + SOURCE_INPUT_MISSING = 36 [(metadata) = { exit_code: 1 }]; + UNEXPECTED_EXCEPTION = 37 [(metadata) = { exit_code: 1 }]; + reserved 38; + SOURCE_INPUT_IO_EXCEPTION = 39 [(metadata) = { exit_code: 1 }]; + SYMLINK_TREE_DELETION_IO_EXCEPTION = 40 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +// Failure details about Bazel's WORKSPACE features. +message Workspaces { + enum Code { + WORKSPACES_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + WORKSPACES_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + WORKSPACES_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + + // See `managed_directories` in + // https://bazel.build/rules/lib/globals#workspace. + ILLEGAL_WORKSPACE_FILE_SYMLINK_WITH_MANAGED_DIRECTORIES = 3 + [(metadata) = { exit_code: 1 }]; + WORKSPACE_FILE_READ_FAILURE_WITH_MANAGED_DIRECTORIES = 4 + [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message CrashOptions { + enum Code { + CRASH_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + reserved 1; // For internal use + } + + Code code = 1; +} + +message Filesystem { + enum Code { + FILESYSTEM_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + reserved 1; + reserved 2; + EMBEDDED_BINARIES_ENUMERATION_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + SERVER_PID_TXT_FILE_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + SERVER_FILE_WRITE_FAILURE = 5 [(metadata) = { exit_code: 36 }]; + DEFAULT_DIGEST_HASH_FUNCTION_INVALID_VALUE = 6 + [(metadata) = { exit_code: 2 }]; + FILESYSTEM_JNI_NOT_AVAILABLE = 8 [(metadata) = { exit_code: 36 }]; + + reserved 7, 9, 10; // For internal use + } + + Code code = 1; +} + +message ExecutionOptions { + // All numerical exit code associations correspond to pre-existing Bazel + // behavior. These associations are suspicious: + // - REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING (instead: 2?) + // - DEPRECATED_LOCAL_RESOURCES_USED (instead: 2?) + // TODO(b/138456686): Revise these after the (intentionally non-breaking) + // initial rollout of FailureDetail-based encoding. + enum Code { + EXECUTION_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_STRATEGY = 3 [(metadata) = { exit_code: 2 }]; + REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING = 4 + [(metadata) = { exit_code: 36 }]; + DEPRECATED_LOCAL_RESOURCES_USED = 5 [(metadata) = { exit_code: 36 }]; + INVALID_CYCLIC_DYNAMIC_STRATEGY = 6 [(metadata) = { exit_code: 36 }]; + RESTRICTION_UNMATCHED_TO_ACTION_CONTEXT = 7 [(metadata) = { exit_code: 2 }]; + REMOTE_FALLBACK_STRATEGY_NOT_ABSTRACT_SPAWN = 8 + [(metadata) = { exit_code: 2 }]; + STRATEGY_NOT_FOUND = 9 [(metadata) = { exit_code: 2 }]; + DYNAMIC_STRATEGY_NOT_SANDBOXED = 10 [(metadata) = { exit_code: 2 }]; + MULTIPLE_EXECUTION_LOG_FORMATS = 11 [(metadata) = { exit_code: 2 }]; + + reserved 1, 2; // For internal use + } + + Code code = 1; +} + +message Command { + enum Code { + // The name "COMMAND_UNKNOWN" might reasonably be interpreted as "command + // not found". The enum's default value should represent a lack of knowledge + // about the failure instead. + COMMAND_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_NOT_FOUND = 1 [(metadata) = { exit_code: 2 }]; + ANOTHER_COMMAND_RUNNING = 2 [(metadata) = { exit_code: 9 }]; + PREVIOUSLY_SHUTDOWN = 3 [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILE_FILE_INITIALIZATION_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILING_INITIALIZATION_FAILURE = 5 + [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILE_FILE_WRITE_FAILURE = 6 + [(metadata) = { exit_code: 36 }]; + INVOCATION_POLICY_PARSE_FAILURE = 7 [(metadata) = { exit_code: 2 }]; + INVOCATION_POLICY_INVALID = 8 [(metadata) = { exit_code: 2 }]; + OPTIONS_PARSE_FAILURE = 9 [(metadata) = { exit_code: 2 }]; + STARLARK_OPTIONS_PARSE_FAILURE = 10 [(metadata) = { exit_code: 2 }]; + ARGUMENTS_NOT_RECOGNIZED = 11 [(metadata) = { exit_code: 2 }]; + NOT_IN_WORKSPACE = 12 [(metadata) = { exit_code: 2 }]; + reserved 13; + IN_OUTPUT_DIRECTORY = 14 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message GrpcServer { + enum Code { + GRPC_SERVER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + GRPC_SERVER_NOT_COMPILED_IN = 1 [(metadata) = { exit_code: 37 }]; + SERVER_BIND_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + BAD_COOKIE = 3 [(metadata) = { exit_code: 36 }]; + NO_CLIENT_DESCRIPTION = 4 [(metadata) = { exit_code: 36 }]; + reserved 5; // For internal use + } + + Code code = 1; +} + +message CanonicalizeFlags { + enum Code { + CANONICALIZE_FLAGS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + FOR_COMMAND_INVALID = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +// Failure modes described by this category pertain to the Bazel invocation +// configuration consumed by Bazel's analysis phase. This category is not +// intended as a grab-bag for all Bazel flag value constraint violations, which +// instead generally belong in the category for the subsystem whose flag values +// participate in the constraint. +message BuildConfiguration { + enum Code { + BUILD_CONFIGURATION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PLATFORM_MAPPING_EVALUATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + PLATFORM_MAPPINGS_FILE_IS_DIRECTORY = 2 [(metadata) = { exit_code: 1 }]; + PLATFORM_MAPPINGS_FILE_NOT_FOUND = 3 [(metadata) = { exit_code: 1 }]; + TOP_LEVEL_CONFIGURATION_CREATION_FAILURE = 4 + [(metadata) = { exit_code: 1 }]; + INVALID_CONFIGURATION = 5 [(metadata) = { exit_code: 2 }]; + INVALID_BUILD_OPTIONS = 6 [(metadata) = { exit_code: 2 }]; + MULTI_CPU_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + HEURISTIC_INSTRUMENTATION_FILTER_INVALID = 8 + [(metadata) = { exit_code: 2 }]; + CYCLE = 9 [(metadata) = { exit_code: 2 }]; + CONFLICTING_CONFIGURATIONS = 10 [(metadata) = { exit_code: 2 }]; + // This can come from either an invalid user-specified option or a + // configuration transition. There's no sure-fire way to distinguish the two + // possibilities in Bazel, so we go with the more straightforward + // command-line error exit code 2. + INVALID_OUTPUT_DIRECTORY_MNEMONIC = 11 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_DISCARDED_ANALYSIS_CACHE = 12 [(metadata) = { exit_code: 2 }]; + // Failure modes specific to PROJECT.scl files. + INVALID_PROJECT = 13 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message InfoCommand { + // The distinction between a failure to write a single info item and a failure + // to write them all seems sketchy. Why do they have different exit codes? + // This reflects current Bazel behavior, but deserves more thought. + enum Code { + INFO_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOO_MANY_KEYS = 1 [(metadata) = { exit_code: 2 }]; + KEY_NOT_RECOGNIZED = 2 [(metadata) = { exit_code: 2 }]; + INFO_BLOCK_WRITE_FAILURE = 3 [(metadata) = { exit_code: 7 }]; + ALL_INFO_WRITE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message MemoryOptions { + enum Code { + MEMORY_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + // Deprecated: validation is now implemented by the option converter. + DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_THRESHOLD_INVALID_VALUE = 1 + [(metadata) = { exit_code: 2 }, deprecated = true]; + // Deprecated: no tenured collectors found is now a crash on startup. + DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_NO_TENURED_COLLECTORS_FOUND = 2 + [(metadata) = { exit_code: 2 }, deprecated = true]; + } + + Code code = 1; +} + +message Query { + enum Code { + QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + QUERY_FILE_WITH_COMMAND_LINE_EXPRESSION = 1 [(metadata) = { exit_code: 2 }]; + QUERY_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }]; + OUTPUT_FORMAT_INVALID = 4 [(metadata) = { exit_code: 2 }]; + GRAPHLESS_PREREQ_UNMET = 5 [(metadata) = { exit_code: 2 }]; + QUERY_OUTPUT_WRITE_FAILURE = 6 [(metadata) = { exit_code: 36 }]; + QUERY_STDOUT_FLUSH_FAILURE = 13 [(metadata) = { exit_code: 36 }]; + ANALYSIS_QUERY_PREREQ_UNMET = 14 [(metadata) = { exit_code: 2 }]; + QUERY_RESULTS_FLUSH_FAILURE = 15 [(metadata) = { exit_code: 36 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_UNCLOSED_QUOTATION_EXPRESSION_ERROR = 16 + [(metadata) = { exit_code: 2 }]; + VARIABLE_NAME_INVALID = 17 [(metadata) = { exit_code: 7 }]; + VARIABLE_UNDEFINED = 18 [(metadata) = { exit_code: 7 }]; + BUILDFILES_AND_LOADFILES_CANNOT_USE_OUTPUT_LOCATION_ERROR = 19 + [(metadata) = { exit_code: 2 }]; + BUILD_FILE_ERROR = 20 [(metadata) = { exit_code: 7 }]; + CYCLE = 21 [(metadata) = { exit_code: 7 }]; + UNIQUE_SKYKEY_THRESHOLD_EXCEEDED = 22 [(metadata) = { exit_code: 7 }]; + TARGET_NOT_IN_UNIVERSE_SCOPE = 23 [(metadata) = { exit_code: 2 }]; + INVALID_FULL_UNIVERSE_EXPRESSION = 24 [(metadata) = { exit_code: 7 }]; + UNIVERSE_SCOPE_LIMIT_EXCEEDED = 25 [(metadata) = { exit_code: 7 }]; + INVALIDATION_LIMIT_EXCEEDED = 26 [(metadata) = { exit_code: 7 }]; + OUTPUT_FORMAT_PREREQ_UNMET = 27 [(metadata) = { exit_code: 2 }]; + ARGUMENTS_MISSING = 28 [(metadata) = { exit_code: 7 }]; + RBUILDFILES_FUNCTION_REQUIRES_SKYQUERY = 29 [(metadata) = { exit_code: 7 }]; + FULL_TARGETS_NOT_SUPPORTED = 30 [(metadata) = { exit_code: 7 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_UNEXPECTED_TOKEN_ERROR = 31 [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_INTEGER_LITERAL_MISSING = 32 [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_INVALID_STARTING_CHARACTER_ERROR = 33 + [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_PREMATURE_END_OF_INPUT_ERROR = 34 + [(metadata) = { exit_code: 2 }]; + // Indicates the user specified invalid query syntax. + SYNTAX_ERROR = 35 [(metadata) = { exit_code: 2 }]; + OUTPUT_FORMATTER_IO_EXCEPTION = 36 [(metadata) = { exit_code: 36 }]; + SKYQUERY_TRANSITIVE_TARGET_ERROR = 37 [(metadata) = { exit_code: 7 }]; + SKYQUERY_TARGET_EXCEPTION = 38 [(metadata) = { exit_code: 7 }]; + INVALID_LABEL_IN_TEST_SUITE = 39 [(metadata) = { exit_code: 7 }]; + // Indicates any usage of flags that must not be combined. + ILLEGAL_FLAG_COMBINATION = 40 [(metadata) = { exit_code: 2 }]; + // Indicates a non-detailed exception that halted a query. This is a + // deficiency in Blaze/Bazel and code should be changed to attach a detailed + // exit code to this failure mode. + NON_DETAILED_ERROR = 41 [(metadata) = { exit_code: 1 }]; + + reserved 7 to 12; // For internal use + } + + Code code = 1; +} + +message LocalExecution { + enum Code { + LOCAL_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOCKFREE_OUTPUT_PREREQ_UNMET = 1 [(metadata) = { exit_code: 2 }]; + UNTRACKED_RESOURCE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message ActionCache { + enum Code { + ACTION_CACHE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message FetchCommand { + enum Code { + FETCH_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }]; + OPTIONS_INVALID = 2 [(metadata) = { exit_code: 2 }]; + QUERY_PARSE_ERROR = 3 [(metadata) = { exit_code: 2 }]; + QUERY_EVALUATION_ERROR = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message SyncCommand { + enum Code { + SYNC_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PACKAGE_LOOKUP_ERROR = 1 [(metadata) = { exit_code: 7 }]; + WORKSPACE_EVALUATION_ERROR = 2 [(metadata) = { exit_code: 7 }]; + REPOSITORY_FETCH_ERRORS = 3 [(metadata) = { exit_code: 7 }]; + REPOSITORY_NAME_INVALID = 4 [(metadata) = { exit_code: 7 }]; + } + + Code code = 1; +} + +message Sandbox { + enum Code { + SANDBOX_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + EXECUTION_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }]; + DOCKER_COMMAND_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + NO_DOCKER_IMAGE = 4 [(metadata) = { exit_code: 1 }]; + DOCKER_IMAGE_PREPARATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + BIND_MOUNT_ANALYSIS_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + MOUNT_SOURCE_DOES_NOT_EXIST = 7 [(metadata) = { exit_code: 1 }]; + MOUNT_SOURCE_TARGET_TYPE_MISMATCH = 8 [(metadata) = { exit_code: 1 }]; + MOUNT_TARGET_DOES_NOT_EXIST = 9 [(metadata) = { exit_code: 1 }]; + SUBPROCESS_START_FAILED = 10 [(metadata) = { exit_code: 36 }]; + FORBIDDEN_INPUT = 11 [(metadata) = { exit_code: 1 }]; + COPY_INPUTS_IO_EXCEPTION = 12 [(metadata) = { exit_code: 36 }]; + COPY_OUTPUTS_IO_EXCEPTION = 13 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message IncludeScanning { + enum Code { + INCLUDE_SCANNING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZE_INCLUDE_HINTS_ERROR = 1 [(metadata) = { exit_code: 36 }]; + SCANNING_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }]; + INCLUDE_HINTS_FILE_NOT_IN_PACKAGE = 3 [(metadata) = { exit_code: 36 }]; + INCLUDE_HINTS_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + ILLEGAL_ABSOLUTE_PATH = 5 [(metadata) = { exit_code: 1 }]; + // TODO(b/166268889): this code should be deprecated in favor of more finely + // resolved loading-phase codes. + PACKAGE_LOAD_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + USER_PACKAGE_LOAD_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + SYSTEM_PACKAGE_LOAD_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + UNDIFFERENTIATED_PACKAGE_LOAD_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; + PackageLoading.Code package_loading_code = 2; +} + +message TestCommand { + enum Code { + TEST_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_TEST_TARGETS = 1 [(metadata) = { exit_code: 4 }]; + TEST_WITH_NOANALYZE = 2 [(metadata) = { exit_code: 1 }]; + TESTS_FAILED = 3 [(metadata) = { exit_code: 3 }]; + } + + Code code = 1; +} + +message ActionQuery { + // All numerical exit code associations correspond to pre-existing Bazel + // behavior. These associations are suspicious: + // - COMMAND_LINE_EXPANSION_FAILURE: this is associated with 2, the numerical + // exit code for "bad Bazel command line", but is generated when an + // action's command line fails to expand, which sounds similar but is + // completely different. + // - OUTPUT_FAILURE: this is associated with 6, an undocumented exit code. + // - INVALID_AQUERY_EXPRESSION: this is associate with 1, which is not + // documented for (a)query. + // TODO(b/138456686): Revise these after the (intentionally non-breaking) + // initial rollout of FailureDetail-based encoding. + enum Code { + ACTION_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_LINE_EXPANSION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + OUTPUT_FAILURE = 2 [(metadata) = { exit_code: 6 }]; + COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }]; + EXPRESSION_PARSE_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + SKYFRAME_STATE_WITH_COMMAND_LINE_EXPRESSION = 5 + [(metadata) = { exit_code: 2 }]; + INVALID_AQUERY_EXPRESSION = 6 [(metadata) = { exit_code: 1 }]; + SKYFRAME_STATE_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + AQUERY_OUTPUT_TOO_BIG = 8 [(metadata) = { exit_code: 7 }]; + ILLEGAL_PATTERN_SYNTAX = 9 [(metadata) = { exit_code: 2 }]; + INCORRECT_ARGUMENTS = 10 [(metadata) = { exit_code: 2 }]; + TOP_LEVEL_TARGETS_WITH_SKYFRAME_STATE_NOT_SUPPORTED = 11 + [(metadata) = { exit_code: 2 }]; + SKYFRAME_STATE_AFTER_EXECUTION = 12 [(metadata) = { exit_code: 1 }]; + LABELS_FUNCTION_NOT_SUPPORTED = 13 [(metadata) = { exit_code: 2 }]; + TEMPLATE_EXPANSION_FAILURE = 14 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message TargetPatterns { + enum Code { + TARGET_PATTERNS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TARGET_PATTERN_FILE_WITH_COMMAND_LINE_PATTERN = 1 + [(metadata) = { exit_code: 2 }]; + TARGET_PATTERN_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + TARGET_PATTERN_PARSE_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + PACKAGE_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }]; + TARGET_FORMAT_INVALID = 5 [(metadata) = { exit_code: 1 }]; + ABSOLUTE_TARGET_PATTERN_INVALID = 6 [(metadata) = { exit_code: 1 }]; + CANNOT_DETERMINE_TARGET_FROM_FILENAME = 7 [(metadata) = { exit_code: 1 }]; + LABEL_SYNTAX_ERROR = 8 [(metadata) = { exit_code: 1 }]; + TARGET_CANNOT_BE_EMPTY_STRING = 9 [(metadata) = { exit_code: 1 }]; + PACKAGE_PART_CANNOT_END_IN_SLASH = 10 [(metadata) = { exit_code: 1 }]; + CYCLE = 11 [(metadata) = { exit_code: 1 }]; + CANNOT_PRELOAD_TARGET = 12 [(metadata) = { exit_code: 1 }]; + TARGETS_MISSING = 13 [(metadata) = { exit_code: 1 }]; + RECURSIVE_TARGET_PATTERNS_NOT_ALLOWED = 14 [(metadata) = { exit_code: 1 }]; + UP_LEVEL_REFERENCES_NOT_ALLOWED = 15 [(metadata) = { exit_code: 1 }]; + NEGATIVE_TARGET_PATTERN_NOT_ALLOWED = 16 [(metadata) = { exit_code: 1 }]; + TARGET_MUST_BE_A_FILE = 17 [(metadata) = { exit_code: 1 }]; + DEPENDENCY_NOT_FOUND = 18 [(metadata) = { exit_code: 1 }]; + PACKAGE_NAME_INVALID = 19 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message CleanCommand { + enum Code { + CLEAN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OUTPUT_SERVICE_CLEAN_FAILURE = 1 [(metadata) = { exit_code: 6 }]; + ACTION_CACHE_CLEAN_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + OUT_ERR_CLOSE_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + OUTPUT_BASE_DELETE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + OUTPUT_BASE_TEMP_MOVE_FAILURE = 5 [(metadata) = { exit_code: 36 }]; + ASYNC_OUTPUT_BASE_DELETE_FAILURE = 6 [(metadata) = { exit_code: 6 }]; + EXECROOT_DELETE_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + EXECROOT_TEMP_MOVE_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + ASYNC_EXECROOT_DELETE_FAILURE = 9 [(metadata) = { exit_code: 6 }]; + ARGUMENTS_NOT_RECOGNIZED = 10 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message ConfigCommand { + enum Code { + CONFIG_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOO_MANY_CONFIG_IDS = 1 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message ConfigurableQuery { + enum Code { + CONFIGURABLE_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_LINE_EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }]; + EXPRESSION_PARSE_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + FILTERS_NOT_SUPPORTED = 3 [(metadata) = { exit_code: 2 }]; + BUILDFILES_FUNCTION_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 2 }]; + SIBLINGS_FUNCTION_NOT_SUPPORTED = 5 [(metadata) = { exit_code: 2 }]; + VISIBLE_FUNCTION_NOT_SUPPORTED = 6 [(metadata) = { exit_code: 2 }]; + ATTRIBUTE_MISSING = 7 [(metadata) = { exit_code: 2 }]; + INCORRECT_CONFIG_ARGUMENT_ERROR = 8 [(metadata) = { exit_code: 2 }]; + TARGET_MISSING = 9 [(metadata) = { exit_code: 2 }]; + STARLARK_SYNTAX_ERROR = 10 [(metadata) = { exit_code: 2 }]; + STARLARK_EVAL_ERROR = 11 [(metadata) = { exit_code: 2 }]; + // Indicates failure to correctly define a format function + FORMAT_FUNCTION_ERROR = 12 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message DumpCommand { + enum Code { + DUMP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_OUTPUT_SPECIFIED = 1 [(metadata) = { exit_code: 7 }]; + ACTION_CACHE_DUMP_FAILED = 2 [(metadata) = { exit_code: 7 }]; + COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 7 }]; + ACTION_GRAPH_DUMP_FAILED = 4 [(metadata) = { exit_code: 7 }]; + STARLARK_HEAP_DUMP_FAILED = 5 [(metadata) = { exit_code: 8 }]; + reserved 6; // For internal use + SKYFRAME_MEMORY_DUMP_FAILED = 7 [(metadata) = { exit_code: 7 }]; + // deprecated, moved to the RemoteAnalysisCaching message. + reserved 8; + } + + Code code = 1; +} + +message HelpCommand { + enum Code { + HELP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_ARGUMENT = 1 [(metadata) = { exit_code: 2 }]; + COMMAND_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message MobileInstall { + enum Code { + MOBILE_INSTALL_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CLASSIC_UNSUPPORTED = 1 [(metadata) = { exit_code: 2 }]; + NO_TARGET_SPECIFIED = 2 [(metadata) = { exit_code: 2 }]; + MULTIPLE_TARGETS_SPECIFIED = 3 [(metadata) = { exit_code: 2 }]; + TARGET_TYPE_INVALID = 4 [(metadata) = { exit_code: 6 }]; + NON_ZERO_EXIT = 5 [(metadata) = { exit_code: 6 }]; + ERROR_RUNNING_PROGRAM = 6 [(metadata) = { exit_code: 6 }]; + } + + Code code = 1; +} + +message ProfileCommand { + enum Code { + PROFILE_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OLD_BINARY_FORMAT_UNSUPPORTED = 1 [(metadata) = { exit_code: 1 }]; + FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message RunCommand { + enum Code { + RUN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_TARGET_SPECIFIED = 1 [(metadata) = { exit_code: 2 }]; + TOO_MANY_TARGETS_SPECIFIED = 2 [(metadata) = { exit_code: 2 }]; + TARGET_NOT_EXECUTABLE = 3 [(metadata) = { exit_code: 2 }]; + TARGET_BUILT_BUT_PATH_NOT_EXECUTABLE = 4 [(metadata) = { exit_code: 1 }]; + TARGET_BUILT_BUT_PATH_VALIDATION_FAILED = 5 + [(metadata) = { exit_code: 36 }]; + RUN_UNDER_TARGET_NOT_BUILT = 6 [(metadata) = { exit_code: 2 }]; + RUN_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + TOO_MANY_TEST_SHARDS_OR_RUNS = 8 [(metadata) = { exit_code: 2 }]; + TEST_ENVIRONMENT_SETUP_FAILURE = 9 [(metadata) = { exit_code: 36 }]; + COMMAND_LINE_EXPANSION_FAILURE = 10 [(metadata) = { exit_code: 36 }]; + NO_SHELL_SPECIFIED = 11 [(metadata) = { exit_code: 2 }]; + SCRIPT_WRITE_FAILURE = 12 [(metadata) = { exit_code: 6 }]; + RUNFILES_DIRECTORIES_CREATION_FAILURE = 13 [(metadata) = { exit_code: 36 }]; + RUNFILES_SYMLINKS_CREATION_FAILURE = 14 [(metadata) = { exit_code: 36 }]; + TEST_ENVIRONMENT_SETUP_INTERRUPTED = 15 [(metadata) = { exit_code: 8 }]; + } + + Code code = 1; +} + +message VersionCommand { + enum Code { + VERSION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NOT_AVAILABLE = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message PrintActionCommand { + enum Code { + PRINT_ACTION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TARGET_NOT_FOUND = 1 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + TARGET_KIND_UNSUPPORTED = 3 [(metadata) = { exit_code: 1 }]; + ACTIONS_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message WorkspaceStatus { + enum Code { + WORKSPACE_STATUS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }]; + ABNORMAL_TERMINATION = 2 [(metadata) = { exit_code: 1 }]; + EXEC_FAILED = 3 [(metadata) = { exit_code: 1 }]; + PARSE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + VALIDATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + CONTENT_UPDATE_IO_EXCEPTION = 6 [(metadata) = { exit_code: 1 }]; + STDERR_IO_EXCEPTION = 7 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message JavaCompile { + enum Code { + JAVA_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + REDUCED_CLASSPATH_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + JDEPS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 36 }]; + REDUCED_CLASSPATH_FALLBACK_CLEANUP_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message ActionRewinding { + enum Code { + ACTION_REWINDING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOST_INPUT_TOO_MANY_TIMES = 1 [(metadata) = { exit_code: 1 }]; + REWIND_LOST_INPUTS_PREREQ_UNMET = 3 [(metadata) = { exit_code: 2 }]; + LOST_OUTPUT_TOO_MANY_TIMES = 4 [(metadata) = { exit_code: 1 }]; + LOST_INPUT_REWINDING_DISABLED = 5 [(metadata) = { exit_code: 1 }]; + LOST_OUTPUT_REWINDING_DISABLED = 6 [(metadata) = { exit_code: 1 }]; + // Deprecated: attempting to rewind a source artifact is now a hard crash. + DEPRECATED_LOST_INPUT_IS_SOURCE = 2 + [(metadata) = { exit_code: 1 }, deprecated = true]; + } + + Code code = 1; +} + +message CppCompile { + enum Code { + CPP_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + FIND_USED_HEADERS_IO_EXCEPTION = 1 [(metadata) = { exit_code: 36 }]; + COPY_OUT_ERR_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + D_FILE_READ_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + COMMAND_GENERATION_FAILURE = 4 [(metadata) = { exit_code: 1 }]; + MODULE_EXPANSION_TIMEOUT = 5 [(metadata) = { exit_code: 1 }]; + INCLUDE_PATH_OUTSIDE_EXEC_ROOT = 6 [(metadata) = { exit_code: 1 }]; + FAKE_COMMAND_GENERATION_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + UNDECLARED_INCLUSIONS = 8 [(metadata) = { exit_code: 1 }]; + D_FILE_PARSE_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + COVERAGE_NOTES_CREATION_FAILURE = 10 [(metadata) = { exit_code: 1 }]; + MODULE_EXPANSION_MISSING_DATA = 11 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message StarlarkAction { + enum Code { + STARLARK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + UNUSED_INPUT_LIST_READ_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + UNUSED_INPUT_LIST_FILE_NOT_FOUND = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message NinjaAction { + enum Code { + NINJA_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_DEPFILE_DECLARED_DEPENDENCY = 1 [(metadata) = { exit_code: 36 }]; + D_FILE_PARSE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message DynamicExecution { + enum Code { + DYNAMIC_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + XCODE_RELATED_PREREQ_UNMET = 1 [(metadata) = { exit_code: 36 }]; + ACTION_LOG_MOVE_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + RUN_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + NO_USABLE_STRATEGY_FOUND = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message FailAction { + enum Code { + FAIL_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INTENTIONAL_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + INCORRECT_PYTHON_VERSION = 2 [(metadata) = { exit_code: 1 }]; + PROGUARD_SPECS_MISSING = 3 [(metadata) = { exit_code: 1 }]; + DYNAMIC_LINKING_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 1 }]; + SOURCE_FILES_MISSING = 5 [(metadata) = { exit_code: 1 }]; + INCORRECT_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }]; + FRAGMENT_CLASS_MISSING = 7 [(metadata) = { exit_code: 1 }]; + reserved 8, 9; // For internal use + CANT_BUILD_INCOMPATIBLE_TARGET = 10 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message SymlinkAction { + enum Code { + SYMLINK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXECUTABLE_INPUT_NOT_FILE = 1 [(metadata) = { exit_code: 1 }]; + EXECUTABLE_INPUT_IS_NOT = 2 [(metadata) = { exit_code: 1 }]; + EXECUTABLE_INPUT_CHECK_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }]; + LINK_CREATION_IO_EXCEPTION = 4 [(metadata) = { exit_code: 1 }]; + LINK_TOUCH_IO_EXCEPTION = 5 [(metadata) = { exit_code: 1 }]; + LINK_LOG_IO_EXCEPTION = 6 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message CppLink { + enum Code { + CPP_LINK_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_GENERATION_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + FAKE_COMMAND_GENERATION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message LtoAction { + enum Code { + LTO_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_ABSOLUTE_PATH_IN_IMPORTS = 1 [(metadata) = { exit_code: 1 }]; + MISSING_BITCODE_FILES = 2 [(metadata) = { exit_code: 1 }]; + IMPORTS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message TestAction { + enum Code { + TEST_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_KEEP_GOING_TEST_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + LOCAL_TEST_PREREQ_UNMET = 2 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + DUPLICATE_CPU_TAGS = 4 [(metadata) = { exit_code: 1 }]; + INVALID_CPU_TAG = 5 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Worker { + enum Code { + WORKER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MULTIPLEXER_INSTANCE_REMOVAL_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + MULTIPLEXER_DOES_NOT_EXIST = 2 [(metadata) = { exit_code: 1 }]; + NO_TOOLS = 3 [(metadata) = { exit_code: 1 }]; + NO_FLAGFILE = 4 [(metadata) = { exit_code: 1 }]; + VIRTUAL_INPUT_MATERIALIZATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + BORROW_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + PREFETCH_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + PREPARE_FAILURE = 8 [(metadata) = { exit_code: 1 }]; + REQUEST_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + PARSE_RESPONSE_FAILURE = 10 [(metadata) = { exit_code: 1 }]; + NO_RESPONSE = 11 [(metadata) = { exit_code: 1 }]; + FINISH_FAILURE = 12 [(metadata) = { exit_code: 1 }]; + FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Analysis { + enum Code { + ANALYSIS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOAD_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + // TODO(b/138456686): this code should be deprecated in favor of more finely + // resolved loading-phase codes. + GENERIC_LOADING_PHASE_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + NOT_ALL_TARGETS_ANALYZED = 3 [(metadata) = { exit_code: 1 }]; + CYCLE = 4 [(metadata) = { exit_code: 1 }]; + PARAMETERIZED_TOP_LEVEL_ASPECT_INVALID = 5 [(metadata) = { exit_code: 1 }]; + ASPECT_LABEL_SYNTAX_ERROR = 6 [(metadata) = { exit_code: 1 }]; + ASPECT_PREREQ_UNMET = 7 [(metadata) = { exit_code: 1 }]; + ASPECT_NOT_FOUND = 8 [(metadata) = { exit_code: 1 }]; + ACTION_CONFLICT = 9 [(metadata) = { exit_code: 1 }]; + ARTIFACT_PREFIX_CONFLICT = 10 [(metadata) = { exit_code: 1 }]; + UNEXPECTED_ANALYSIS_EXCEPTION = 11 [(metadata) = { exit_code: 1 }]; + TARGETS_MISSING_ENVIRONMENTS = 12 [(metadata) = { exit_code: 1 }]; + INVALID_ENVIRONMENT = 13 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_MISSING_FROM_GROUPS = 14 [(metadata) = { exit_code: 1 }]; + EXEC_GROUP_MISSING = 15 [(metadata) = { exit_code: 1 }]; + INVALID_EXECUTION_PLATFORM = 16 [(metadata) = { exit_code: 1 }]; + ASPECT_CREATION_FAILED = 17 [(metadata) = { exit_code: 1 }]; + CONFIGURED_VALUE_CREATION_FAILED = 18 [(metadata) = { exit_code: 1 }]; + INCOMPATIBLE_TARGET_REQUESTED = 19 [(metadata) = { exit_code: 1 }]; + ANALYSIS_FAILURE_PROPAGATION_FAILED = 20 [(metadata) = { exit_code: 1 }]; + ANALYSIS_CACHE_DISCARDED = 21 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message PackageLoading { + enum Code { + PACKAGE_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + WORKSPACE_FILE_ERROR = 1 [(metadata) = { exit_code: 1 }]; + MAX_COMPUTATION_STEPS_EXCEEDED = 2 [(metadata) = { exit_code: 1 }]; + BUILD_FILE_MISSING = 3 [(metadata) = { exit_code: 1 }]; + REPOSITORY_MISSING = 4 [(metadata) = { exit_code: 1 }]; + PERSISTENT_INCONSISTENT_FILESYSTEM_ERROR = 5 + [(metadata) = { exit_code: 36 }]; + TRANSIENT_INCONSISTENT_FILESYSTEM_ERROR = 6 + [(metadata) = { exit_code: 36 }]; + INVALID_NAME = 7 [(metadata) = { exit_code: 1 }]; + // was: PRELUDE_FILE_READ_ERROR. Replaced by IMPORT_STARLARK_FILE_ERROR + // when the prelude was changed to be loaded as a Starlark module. + reserved 8; + EVAL_GLOBS_SYMLINK_ERROR = 9 [(metadata) = { exit_code: 1 }]; + IMPORT_STARLARK_FILE_ERROR = 10 [(metadata) = { exit_code: 1 }]; + PACKAGE_MISSING = 11 [(metadata) = { exit_code: 1 }]; + TARGET_MISSING = 12 [(metadata) = { exit_code: 1 }]; + NO_SUCH_THING = 13 [(metadata) = { exit_code: 1 }]; + GLOB_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }]; + DUPLICATE_LABEL = 15 [(metadata) = { exit_code: 1 }]; + INVALID_PACKAGE_SPECIFICATION = 16 [(metadata) = { exit_code: 1 }]; + SYNTAX_ERROR = 17 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_IN_DIFFERENT_PACKAGE = 18 [(metadata) = { exit_code: 1 }]; + DEFAULT_ENVIRONMENT_UNDECLARED = 19 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_IN_MULTIPLE_GROUPS = 20 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_DOES_NOT_EXIST = 21 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_INVALID = 22 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_NOT_IN_GROUP = 23 [(metadata) = { exit_code: 1 }]; + PACKAGE_NAME_INVALID = 24 [(metadata) = { exit_code: 1 }]; + STARLARK_EVAL_ERROR = 25 [(metadata) = { exit_code: 1 }]; + LICENSE_PARSE_FAILURE = 26 [(metadata) = { exit_code: 1 }]; + DISTRIBUTIONS_PARSE_FAILURE = 27 [(metadata) = { exit_code: 1 }]; + LABEL_CROSSES_PACKAGE_BOUNDARY = 28 [(metadata) = { exit_code: 1 }]; + // Failure while evaluating or applying @_builtins injection. Since the + // builtins .bzl files are always packaged with Blaze in production, a + // failure here generally indicates a bug in Blaze. + BUILTINS_INJECTION_FAILURE = 29 [(metadata) = { exit_code: 1 }]; + SYMLINK_CYCLE_OR_INFINITE_EXPANSION = 30 [(metadata) = { exit_code: 1 }]; + OTHER_IO_EXCEPTION = 31 [(metadata) = { exit_code: 36 }]; + BAD_REPO_FILE = 32 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Toolchain { + enum Code { + TOOLCHAIN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_PROVIDER = 1 [(metadata) = { exit_code: 1 }]; + INVALID_CONSTRAINT_VALUE = 2 [(metadata) = { exit_code: 1 }]; + INVALID_PLATFORM_VALUE = 3 [(metadata) = { exit_code: 1 }]; + INVALID_TOOLCHAIN = 4 [(metadata) = { exit_code: 1 }]; + NO_MATCHING_EXECUTION_PLATFORM = 5 [(metadata) = { exit_code: 1 }]; + NO_MATCHING_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }]; + INVALID_TOOLCHAIN_TYPE = 7 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message StarlarkLoading { + enum Code { + STARLARK_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CYCLE = 1 [(metadata) = { exit_code: 1 }]; + COMPILE_ERROR = 2 [(metadata) = { exit_code: 1 }]; + PARSE_ERROR = 3 [(metadata) = { exit_code: 1 }]; + EVAL_ERROR = 4 [(metadata) = { exit_code: 1 }]; + CONTAINING_PACKAGE_NOT_FOUND = 5 [(metadata) = { exit_code: 1 }]; + PACKAGE_NOT_FOUND = 6 [(metadata) = { exit_code: 1 }]; + IO_ERROR = 7 [(metadata) = { exit_code: 1 }]; + LABEL_CROSSES_PACKAGE_BOUNDARY = 8 [(metadata) = { exit_code: 1 }]; + BUILTINS_ERROR = 9 [(metadata) = { exit_code: 1 }]; + VISIBILITY_ERROR = 10 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message ExternalDeps { + enum Code { + EXTERNAL_DEPS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MODULE_NOT_FOUND = 1 [(metadata) = { exit_code: 48 }]; + BAD_MODULE = 2 [(metadata) = { exit_code: 48 }]; + VERSION_RESOLUTION_ERROR = 3 [(metadata) = { exit_code: 48 }]; + INVALID_REGISTRY_URL = 4 [(metadata) = { exit_code: 48 }]; + ERROR_ACCESSING_REGISTRY = 5 [(metadata) = { exit_code: 32 }]; + INVALID_EXTENSION_IMPORT = 6 [(metadata) = { exit_code: 48 }]; + BAD_LOCKFILE = 7 [(metadata) = { exit_code: 48 }]; + } + + Code code = 1; +} + +message DiffAwareness { + enum Code { + DIFF_AWARENESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + DIFF_STAT_FAILED = 1 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message ModCommand { + enum Code { + MOD_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_ARGUMENTS = 1 [(metadata) = { exit_code: 2 }]; + TOO_MANY_ARGUMENTS = 2 [(metadata) = { exit_code: 2 }]; + INVALID_ARGUMENTS = 3 [(metadata) = { exit_code: 2 }]; + BUILDOZER_FAILED = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/annotations.proto b/app/bazel_event_publisher_proto/proto/google/api/annotations.proto new file mode 100644 index 0000000000000..84c48164aa92b --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/client.proto b/app/bazel_event_publisher_proto/proto/google/api/client.proto new file mode 100644 index 0000000000000..7ba1db3da04f8 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/client.proto @@ -0,0 +1,456 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/launch_stage.proto"; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ClientProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + repeated string method_signature = 1051; +} + +extend google.protobuf.ServiceOptions { + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + string default_host = 1049; + + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + string oauth_scopes = 1050; + + // The API version of this service, which should be sent by version-aware + // clients to the service. This allows services to abide by the schema and + // behavior of the service at the time this API version was deployed. + // The format of the API version must be treated as opaque by clients. + // Services may use a format with an apparent structure, but clients must + // not rely on this to determine components within an API version, or attempt + // to construct other valid API versions. Note that this is for upcoming + // functionality and may not be implemented for all services. + // + // Example: + // + // service Foo { + // option (google.api.api_version) = "v1_20230821_preview"; + // } + string api_version = 525000001; +} + +// Required information for every language. +message CommonLanguageSettings { + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + string reference_docs_uri = 1 [deprecated = true]; + + // The destination where API teams want this client library to be published. + repeated ClientLibraryDestination destinations = 2; + + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration selective_gapic_generation = 3; +} + +// Details about how and where to publish client libraries. +message ClientLibrarySettings { + // Version of the API to apply these settings to. This is the full protobuf + // package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + string version = 1; + + // Launch stage of this version of the API. + LaunchStage launch_stage = 2; + + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + bool rest_numeric_enums = 3; + + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings java_settings = 21; + + // Settings for C++ client libraries. + CppSettings cpp_settings = 22; + + // Settings for PHP client libraries. + PhpSettings php_settings = 23; + + // Settings for Python client libraries. + PythonSettings python_settings = 24; + + // Settings for Node client libraries. + NodeSettings node_settings = 25; + + // Settings for .NET client libraries. + DotnetSettings dotnet_settings = 26; + + // Settings for Ruby client libraries. + RubySettings ruby_settings = 27; + + // Settings for Go client libraries. + GoSettings go_settings = 28; +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +message Publishing { + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + repeated MethodSettings method_settings = 2; + + // Link to a *public* URI where users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + string new_issue_uri = 101; + + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + string documentation_uri = 102; + + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + string api_short_name = 103; + + // GitHub label to apply to issues and pull requests opened for this API. + string github_label = 104; + + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + repeated string codeowner_github_teams = 105; + + // A prefix used in sample code when demarking regions to be included in + // documentation. + string doc_tag_prefix = 106; + + // For whom the client library is being published. + ClientLibraryOrganization organization = 107; + + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + repeated ClientLibrarySettings library_settings = 109; + + // Optional link to proto reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + string proto_reference_documentation_uri = 110; + + // Optional link to REST reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rest + string rest_reference_documentation_uri = 111; +} + +// Settings for Java client libraries. +message JavaSettings { + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + string library_package = 1; + + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + map service_class_names = 2; + + // Some settings. + CommonLanguageSettings common = 3; +} + +// Settings for C++ client libraries. +message CppSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Php client libraries. +message PhpSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Python client libraries. +message PythonSettings { + // Experimental features to be included during client library generation. + // These fields will be deprecated once the feature graduates and is enabled + // by default. + message ExperimentalFeatures { + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + bool rest_async_io_enabled = 1; + } + + // Some settings. + CommonLanguageSettings common = 1; + + // Experimental features to be included during client library generation. + ExperimentalFeatures experimental_features = 2; +} + +// Settings for Node client libraries. +message NodeSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Dotnet client libraries. +message DotnetSettings { + // Some settings. + CommonLanguageSettings common = 1; + + // Map from original service names to renamed versions. + // This is used when the default generated types + // would cause a naming conflict. (Neither name is + // fully-qualified.) + // Example: Subscriber to SubscriberServiceApi. + map renamed_services = 2; + + // Map from full resource types to the effective short name + // for the resource. This is used when otherwise resource + // named from different services would cause naming collisions. + // Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + map renamed_resources = 3; + + // List of full resource types to ignore during generation. + // This is typically used for API-specific Location resources, + // which should be handled by the generator as if they were actually + // the common Location resources. + // Example entry: "documentai.googleapis.com/Location" + repeated string ignored_resources = 4; + + // Namespaces which must be aliased in snippets due to + // a known (but non-generator-predictable) naming collision + repeated string forced_namespace_aliases = 5; + + // Method signatures (in the form "service.method(signature)") + // which are provided separately, so shouldn't be generated. + // Snippets *calling* these methods are still generated, however. + repeated string handwritten_signatures = 6; +} + +// Settings for Ruby client libraries. +message RubySettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Go client libraries. +message GoSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Describes the generator configuration for a method. +message MethodSettings { + // Describes settings to use when generating API methods that use the + // long-running operation pattern. + // All default values below are from those used in the client library + // generators (e.g. + // [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). + message LongRunning { + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + google.protobuf.Duration initial_poll_delay = 1; + + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + float poll_delay_multiplier = 2; + + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + google.protobuf.Duration max_poll_delay = 3; + + // Total polling timeout. + // Default value: 5 minutes. + google.protobuf.Duration total_poll_timeout = 4; + } + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... + string selector = 1; + + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_settings: + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes + LongRunning long_running = 2; + + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + repeated string auto_populated_fields = 3; +} + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +enum ClientLibraryOrganization { + // Not useful. + CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED = 0; + + // Google Cloud Platform Org. + CLOUD = 1; + + // Ads (Advertising) Org. + ADS = 2; + + // Photos Org. + PHOTOS = 3; + + // Street View Org. + STREET_VIEW = 4; + + // Shopping Org. + SHOPPING = 5; + + // Geo Org. + GEO = 6; + + // Generative AI - https://developers.generativeai.google + GENERATIVE_AI = 7; +} + +// To where should client libraries be published? +enum ClientLibraryDestination { + // Client libraries will neither be generated nor published to package + // managers. + CLIENT_LIBRARY_DESTINATION_UNSPECIFIED = 0; + + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + GITHUB = 10; + + // Publish the library to package managers like nuget.org and npmjs.com. + PACKAGE_MANAGER = 20; +} + +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +message SelectiveGapicGeneration { + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + repeated string methods = 1; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/field_behavior.proto b/app/bazel_event_publisher_proto/proto/google/api/field_behavior.proto new file mode 100644 index 0000000000000..2865ba0537393 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/field_behavior.proto @@ -0,0 +1,104 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "FieldBehaviorProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + repeated google.api.FieldBehavior field_behavior = 1052 [packed = false]; +} + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +enum FieldBehavior { + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED = 0; + + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL = 1; + + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED = 2; + + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY = 3; + + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY = 4; + + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE = 5; + + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + UNORDERED_LIST = 6; + + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + NON_EMPTY_DEFAULT = 7; + + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + IDENTIFIER = 8; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/http.proto b/app/bazel_event_publisher_proto/proto/google/api/http.proto new file mode 100644 index 0000000000000..e3270371d4457 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/http.proto @@ -0,0 +1,371 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` +// +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// The following example selects a gRPC method and applies an `HttpRule` to it: +// +// http: +// rules: +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/launch_stage.proto b/app/bazel_event_publisher_proto/proto/google/api/launch_stage.proto new file mode 100644 index 0000000000000..9863fc23d4229 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/launch_stage.proto @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option go_package = "google.golang.org/genproto/googleapis/api;api"; +option java_multiple_files = true; +option java_outer_classname = "LaunchStageProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +enum LaunchStage { + // Do not use this default value. + LAUNCH_STAGE_UNSPECIFIED = 0; + + // The feature is not yet implemented. Users can not use it. + UNIMPLEMENTED = 6; + + // Prelaunch features are hidden from users and are only visible internally. + PRELAUNCH = 7; + + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + EARLY_ACCESS = 1; + + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + ALPHA = 2; + + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + BETA = 3; + + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + GA = 4; + + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + DEPRECATED = 5; +} diff --git a/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_events.proto b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_events.proto new file mode 100644 index 0000000000000..43cf5e275fb95 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_events.proto @@ -0,0 +1,187 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.build.v1; + +import "google/devtools/build/v1/build_status.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build"; +option java_multiple_files = true; +option java_outer_classname = "BuildEventProto"; +option java_package = "com.google.devtools.build.v1"; +option php_namespace = "Google\\Cloud\\Build\\V1"; + +// An event representing some state change that occurred in the build. This +// message does not include field for uniquely identifying an event. +message BuildEvent { + // Notification that the build system has attempted to run the build tool. + message InvocationAttemptStarted { + // The number of the invocation attempt, starting at 1 and increasing by 1 + // for each new attempt. Can be used to determine if there is a later + // invocation attempt replacing the current one a client is processing. + int64 attempt_number = 1; + + // Arbitrary details about the invocation attempt. + google.protobuf.Any details = 2; + } + + // Notification that an invocation attempt has finished. + message InvocationAttemptFinished { + // Final status of the invocation. + BuildStatus invocation_status = 3; + + // Arbitrary details about the invocation attempt. + google.protobuf.Any details = 4; + } + + // Notification that the build request is enqueued. + message BuildEnqueued { + // Additional details about the Build. + google.protobuf.Any details = 1; + } + + // Notification that the build request has finished, and no further + // invocations will occur. Note that this applies to the entire Build. + // Individual invocations trigger InvocationFinished when they finish. + message BuildFinished { + // Final status of the build. + BuildStatus status = 1; + + // Additional details about the Build. + google.protobuf.Any details = 2; + } + + // Textual output written to standard output or standard error. + message ConsoleOutput { + // The output stream type. + ConsoleOutputStream type = 1; + + // The output stream content. + oneof output { + // Regular UTF-8 output; normal text. + string text_output = 2; + + // Used if the output is not UTF-8 text (for example, a binary proto). + bytes binary_output = 3; + } + } + + // Notification of the end of a build event stream published by a build + // component other than CONTROLLER (See StreamId.BuildComponents). + message BuildComponentStreamFinished { + // How did the event stream finish. + enum FinishType { + // Unknown or unspecified; callers should never set this value. + FINISH_TYPE_UNSPECIFIED = 0; + + // Set by the event publisher to indicate a build event stream is + // finished. + FINISHED = 1; + + // Set by the WatchBuild RPC server when the publisher of a build event + // stream stops publishing events without publishing a + // BuildComponentStreamFinished event whose type equals FINISHED. + EXPIRED = 2; + } + + // How the event stream finished. + FinishType type = 1; + } + + // This should be precisely the time when this event happened, and not when + // the event proto was created or sent. + google.protobuf.Timestamp event_time = 1; + + // ////////////////////////////////////////////////////////////////////////// + // Events that indicate a state change of a build request in the build + // queue. + oneof event { + // An invocation attempt has started. + InvocationAttemptStarted invocation_attempt_started = 51; + + // An invocation attempt has finished. + InvocationAttemptFinished invocation_attempt_finished = 52; + + // The build is enqueued. + BuildEnqueued build_enqueued = 53; + + // The build has finished. Set when the build is terminated. + BuildFinished build_finished = 55; + + // An event containing printed text. + ConsoleOutput console_output = 56; + + // Indicates the end of a build event stream (with the same StreamId) from + // a build component executing the requested build task. + // *** This field does not indicate the WatchBuild RPC is finished. *** + BuildComponentStreamFinished component_stream_finished = 59; + + // Structured build event generated by Bazel about its execution progress. + google.protobuf.Any bazel_event = 60; + + // An event that contains supplemental tool-specific information about + // build execution. + google.protobuf.Any build_execution_event = 61; + + // An event that contains supplemental tool-specific information about + // source fetching. + google.protobuf.Any source_fetch_event = 62; + } +} + +// Unique identifier for a build event stream. +message StreamId { + // Which build component generates this event stream. Each build component + // may generate one event stream. + enum BuildComponent { + // Unknown or unspecified; callers should never set this value. + UNKNOWN_COMPONENT = 0; + + // A component that coordinates builds. + CONTROLLER = 1; + + // A component that runs executables needed to complete a build. + WORKER = 2; + + // A component that builds something. + TOOL = 3; + } + + // The id of a Build message. + string build_id = 1; + + // The unique invocation ID within this build. + // It should be the same as {invocation} (below) during the migration. + string invocation_id = 6; + + // The component that emitted this event. + BuildComponent component = 3; +} + +// The type of console output stream. +enum ConsoleOutputStream { + // Unspecified or unknown. + UNKNOWN = 0; + + // Normal output stream. + STDOUT = 1; + + // Error output stream. + STDERR = 2; +} diff --git a/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_status.proto b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_status.proto new file mode 100644 index 0000000000000..93a525f131cab --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_status.proto @@ -0,0 +1,77 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.build.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build"; +option java_multiple_files = true; +option java_outer_classname = "BuildStatusProto"; +option java_package = "com.google.devtools.build.v1"; +option php_namespace = "Google\\Cloud\\Build\\V1"; + +// Status used for both invocation attempt and overall build completion. +message BuildStatus { + // The end result of the Build. + enum Result { + // Unspecified or unknown. + UNKNOWN_STATUS = 0; + + // Build was successful and tests (if requested) all pass. + COMMAND_SUCCEEDED = 1; + + // Build error and/or test failure. + COMMAND_FAILED = 2; + + // Unable to obtain a result due to input provided by the user. + USER_ERROR = 3; + + // Unable to obtain a result due to a failure within the build system. + SYSTEM_ERROR = 4; + + // Build required too many resources, such as build tool RAM. + RESOURCE_EXHAUSTED = 5; + + // An invocation attempt time exceeded its deadline. + INVOCATION_DEADLINE_EXCEEDED = 6; + + // Build request time exceeded the request_deadline + REQUEST_DEADLINE_EXCEEDED = 8; + + // The build was cancelled by a call to CancelBuild. + CANCELLED = 7; + } + + // The end result. + Result result = 1; + + // Final invocation ID of the build, if there was one. + // This field is only set on a status in BuildFinished event. + string final_invocation_id = 3; + + // Build tool exit code. Integer value returned by the executed build tool. + // Might not be available in some cases, e.g., a build timeout. + google.protobuf.Int32Value build_tool_exit_code = 4; + + // Human-readable error message. Do not use for programmatic purposes. + string error_message = 5; + + // Fine-grained diagnostic information to complement the status. + google.protobuf.Any details = 2; +} diff --git a/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/publish_build_event.proto b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/publish_build_event.proto new file mode 100644 index 0000000000000..641ba5af2e9a4 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/publish_build_event.proto @@ -0,0 +1,187 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.build.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/devtools/build/v1/build_events.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build"; +option java_multiple_files = true; +option java_outer_classname = "BackendProto"; +option java_package = "com.google.devtools.build.v1"; +option php_namespace = "Google\\Cloud\\Build\\V1"; + +// A service for publishing BuildEvents. BuildEvents are generated by Build +// Systems to record actions taken during a Build. Events occur in streams, +// are identified by a StreamId, and ordered by sequence number in a stream. +// +// A Build may contain several streams of BuildEvents, depending on the systems +// that are involved in the Build. Some BuildEvents are used to declare the +// beginning and end of major portions of a Build; these are called +// LifecycleEvents, and are used (for example) to indicate the beginning or end +// of a Build, and the beginning or end of an Invocation attempt (there can be +// more than 1 Invocation in a Build if, for example, a failure occurs somewhere +// and it needs to be retried). +// +// Other, build-tool events represent actions taken by the Build tool, such as +// target objects produced via compilation, tests run, et cetera. There could be +// more than one build tool stream for an invocation attempt of a build. +service PublishBuildEvent { + option (google.api.default_host) = "buildeventservice.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Publish a build event stating the new state of a build (typically from the + // build queue). The BuildEnqueued event must be published before all other + // events for the same build ID. + // + // The backend will persist the event and deliver it to registered frontend + // jobs immediately without batching. + // + // The commit status of the request is reported by the RPC's util_status() + // function. The error code is the canonical error code defined in + // //util/task/codes.proto. + rpc PublishLifecycleEvent(PublishLifecycleEventRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/projects/{project_id=*}/lifecycleEvents:publish" + body: "*" + additional_bindings { post: "/v1/lifecycleEvents:publish" body: "*" } + }; + } + + // Publish build tool events belonging to the same stream to a backend job + // using bidirectional streaming. + rpc PublishBuildToolEventStream(stream PublishBuildToolEventStreamRequest) + returns (stream PublishBuildToolEventStreamResponse) { + option (google.api.http) = { + post: "/v1/projects/{project_id=*}/events:publish" + body: "*" + additional_bindings { post: "/v1/events:publish" body: "*" } + }; + option (google.api.method_signature) = + "ordered_build_event,notification_keywords,project_id"; + } +} + +// Publishes 'lifecycle events' that update the high-level state of a build: +// - BuildEnqueued: When a build is scheduled. +// - InvocationAttemptStarted: When work for a build starts; there can be +// multiple invocations for a build (e.g. retries). +// - InvocationAttemptCompleted: When work for a build finishes. +// - BuildFinished: When a build is finished. +message PublishLifecycleEventRequest { + // The service level of the build request. Backends only uses this value when + // the BuildEnqueued event is published to determine what level of service + // this build should receive. + enum ServiceLevel { + // Non-interactive builds can tolerate longer event latencies. This is the + // default ServiceLevel if callers do not specify one. + NONINTERACTIVE = 0; + + // The events of an interactive build should be delivered with low latency. + INTERACTIVE = 1; + } + + // The interactivity of this build. + ServiceLevel service_level = 1; + + // Required. The lifecycle build event. If this is a build tool event, the RPC + // will fail with INVALID_REQUEST. + OrderedBuildEvent build_event = 2 [(google.api.field_behavior) = REQUIRED]; + + // If the next event for this build or invocation (depending on the event + // type) hasn't been published after this duration from when {build_event} + // is written to BES, consider this stream expired. If this field is not set, + // BES backend will use its own default value. + google.protobuf.Duration stream_timeout = 3; + + // Additional information about a build request. These are define by the event + // publishers, and the Build Event Service does not validate or interpret + // them. They are used while notifying internal systems of new builds and + // invocations if the OrderedBuildEvent.event type is + // BuildEnqueued/InvocationAttemptStarted. + repeated string notification_keywords = 4; + + // Required. The project this build is associated with. + // This should match the project used for the initial call to + // PublishLifecycleEvent (containing a BuildEnqueued message). + string project_id = 6 [(google.api.field_behavior) = REQUIRED]; + + // Whether to require a previously received matching parent lifecycle event + // for the current request's event before continuing processing. + // - InvocationAttemptStarted and BuildFinished events require a BuildEnqueued + // parent event. + // - InvocationAttemptFinished events require an InvocationAttemptStarted + // parent event. + bool check_preceding_lifecycle_events_present = 7; +} + +// States which event has been committed. Any failure to commit will cause +// RPC errors, hence not recorded by this proto. +message PublishBuildToolEventStreamResponse { + // The stream that contains this event. + StreamId stream_id = 1; + + // The sequence number of this event that has been committed. + int64 sequence_number = 2; +} + +// Build event with contextual information about the stream it belongs to and +// its position in that stream. +message OrderedBuildEvent { + // Which build event stream this event belongs to. + StreamId stream_id = 1; + + // The position of this event in the stream. The sequence numbers for a build + // event stream should be a sequence of consecutive natural numbers starting + // from one. (1, 2, 3, ...) + int64 sequence_number = 2; + + // The actual event. + BuildEvent event = 3; +} + +// Streaming request message for PublishBuildToolEventStream. +message PublishBuildToolEventStreamRequest { + // Required. The build event with position info. + // New publishing clients should use this field rather than the 3 above. + OrderedBuildEvent ordered_build_event = 4 + [(google.api.field_behavior) = REQUIRED]; + + // The keywords to be attached to the notification which notifies the start + // of a new build event stream. BES only reads this field when sequence_number + // or ordered_build_event.sequence_number is 1 in this message. If this field + // is empty, BES will not publish notification messages for this stream. + repeated string notification_keywords = 5; + + // Required. The project this build is associated with. + // This should match the project used for the initial call to + // PublishLifecycleEvent (containing a BuildEnqueued message). + string project_id = 6 [(google.api.field_behavior) = REQUIRED]; + + // Whether to require a previously received matching InvocationAttemptStarted + // event before continuing event processing for the event in the current + // request. BES only performs this check for events with sequence_number 1 + // i.e. the first event in the stream. + bool check_preceding_lifecycle_events_present = 7; +} diff --git a/app/bazel_event_publisher_proto/proto/invocation_policy.proto b/app/bazel_event_publisher_proto/proto/invocation_policy.proto new file mode 100644 index 0000000000000..a6820f0af580d --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/invocation_policy.proto @@ -0,0 +1,207 @@ +// Copyright 2015 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package blaze.invocation_policy; + +import "strategy_policy.proto"; + +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +// The --invocation_policy flag takes a base64-encoded binary-serialized or text +// formatted InvocationPolicy message. +message InvocationPolicy { + // Order matters. + // After expanding policies on expansion flags or flags with implicit + // requirements, only the final policy on a specific flag will be enforced + // onto the user's command line. + repeated FlagPolicy flag_policies = 1; + + optional blaze.strategy_policy.StrategyPolicy strategy_policy = 2; +} + +// A policy for controlling the value of a flag. +message FlagPolicy { + // The name of the flag to enforce this policy on. + // + // Note that this should be the full name of the flag, not the abbreviated + // name of the flag. If the user specifies the abbreviated name of a flag, + // that flag will be matched using its full name. + // + // The "no" prefix will not be parsed, so for boolean flags, use + // the flag's full name and explicitly set it to true or false. + optional string flag_name = 1; + + // If set, this flag policy is applied only if one of the given commands or a + // command that inherits from one of the given commands is being run. For + // instance, if "build" is one of the commands here, then this policy will + // apply to any command that inherits from build, such as info, coverage, or + // test. If empty, this flag policy is applied for all commands. This allows + // the policy setter to add all policies to the proto without having to + // determine which Bazel command the user is actually running. Additionally, + // Bazel allows multiple flags to be defined by the same name, and the + // specific flag definition is determined by the command. + repeated string commands = 2; + + oneof operation { + SetValue set_value = 3; + UseDefault use_default = 4; + DisallowValues disallow_values = 5; + AllowValues allow_values = 6; + } +} + +message SetValue { + // Use this value for the specified flag, overriding any default or user-set + // value (unless behavior = APPEND for repeatable flags). + // + // This field is repeated for repeatable flags. It is an error to set + // multiple values for a flag that is not actually a repeatable flag. + // This requires at least 1 value, if even the empty string. + // + // If the flag allows multiple values, all of its values are replaced with the + // value or values from the policy (i.e., no diffing or merging is performed), + // unless behavior = APPEND (see below). + // + // Note that some flags are tricky. For example, some flags look like boolean + // flags, but are actually Void expansion flags that expand into other flags. + // The Bazel flag parser will accept "--void_flag=false", but because + // the flag is Void, the "=false" is ignored. It can get even trickier, like + // "--novoid_flag" which is also an expansion flag with the type Void whose + // name is explicitly "novoid_flag" and which expands into other flags that + // are the opposite of "--void_flag". For expansion flags, it's best to + // explicitly override the flags they expand into. + // + // Other flags may be differently tricky: A flag could have a converter that + // converts some string to a list of values, but that flag may not itself have + // allowMultiple set to true. + // + // An example is "--test_tag_filters": this flag sets its converter to + // CommaSeparatedOptionListConverter, but does not set allowMultiple to true. + // So "--test_tag_filters=foo,bar" results in ["foo", "bar"], however + // "--test_tag_filters=foo --test_tag_filters=bar" results in just ["bar"] + // since the 2nd value overrides the 1st. + // + // Similarly, "--test_tag_filters=foo,bar --test_tag_filters=baz,qux" results + // in ["baz", "qux"]. For flags like these, the policy should specify + // "foo,bar" instead of separately specifying "foo" and "bar" so that the + // converter is appropriately invoked. + // + // Note that the opposite is not necessarily + // true: for a flag that specifies allowMultiple=true, "--flag=foo,bar" + // may fail to parse or result in an unexpected value. + repeated string flag_value = 1; + + // Obsolete overridable and append fields. + reserved 2, 3; + + enum Behavior { + UNDEFINED = 0; + // Change the flag value but allow it to be overridden by explicit settings + // from command line/config expansion/rc files. + // Matching old flag values: append = false, overridable = true. + ALLOW_OVERRIDES = 1; + // Append a new value for a repeatable flag, leave old values and allow + // further overrides. + // Matching old flag values: append = true, overridable = false. + APPEND = 2; + // Set a final value of the flag. Any overrides provided by the user for + // this flag will be ignored. + // Matching old flag values: append = false, overridable = false. + FINAL_VALUE_IGNORE_OVERRIDES = 3; + } + + // Defines how invocation policy should interact with user settings for the + // same flag. + optional Behavior behavior = 4; +} + +message UseDefault { + // Use the default value of the flag, as defined by Bazel (or equivalently, do + // not allow the user to set this flag). + // + // Note on implementation: UseDefault sets the default by clearing the flag, + // so that when the value is requested and no flag is found, the flag parser + // returns the default. This is mostly relevant for expansion flags: it will + // erase user values in *all* flags that the expansion flag expands to. Only + // use this on expansion flags if this is acceptable behavior. Since the last + // policy wins, later policies on this same flag will still remove the + // expanded UseDefault, so there is a way around, but it's really best not to + // use this on expansion flags at all. +} + +message DisallowValues { + // Obsolete new_default_value field. + reserved 2; + + // It is an error for the user to use any of these values (that is, the Bazel + // command will fail), unless new_value or use_default is set. + // + // For repeatable flags, if any one of the values in the flag matches a value + // in the list of disallowed values, an error is thrown. + // + // Care must be taken for flags with complicated converters. For example, + // it's possible for a repeated flag to be of type List>, so that + // "--foo=a,b --foo=c,d" results in foo=[["a","b"], ["c", "d"]]. In this case, + // it is not possible to disallow just "b", nor will ["b", "a"] match, nor + // will ["b", "c"] (but ["a", "b"] will still match). + repeated string disallowed_values = 1; + + oneof replacement_value { + // If set and if the value of the flag is disallowed (including the default + // value of the flag if the user doesn't specify a value), use this value as + // the value of the flag instead of raising an error. This does not apply to + // repeatable flags and is ignored if the flag is a repeatable flag. + string new_value = 3; + + // If set and if the value of the flag is disallowed, use the default value + // of the flag instead of raising an error. Unlike new_value, this works for + // repeatable flags, but note that the default value for repeatable flags is + // always empty. + // + // Note that it is an error to disallow the default value of the flag and + // to set use_default, unless the flag is a repeatable flag where the + // default value is always the empty list. + UseDefault use_default = 4; + } +} + +message AllowValues { + // Obsolete new_default_value field. + reserved 2; + + // It is an error for the user to use any value not in this list, unless + // new_value or use_default is set. + repeated string allowed_values = 1; + + oneof replacement_value { + // If set and if the value of the flag is disallowed (including the default + // value of the flag if the user doesn't specify a value), use this value as + // the value of the flag instead of raising an error. This does not apply to + // repeatable flags and is ignored if the flag is a repeatable flag. + string new_value = 3; + + // If set and if the value of the flag is disallowed, use the default value + // of the flag instead of raising an error. Unlike new_value, this works for + // repeatable flags, but note that the default value for repeatable flags is + // always empty. + // + // Note that it is an error to disallow the default value of the flag and + // to set use_default, unless the flag is a repeatable flag where the + // default value is always the empty list. + UseDefault use_default = 4; + } +} diff --git a/app/bazel_event_publisher_proto/proto/option_filters.proto b/app/bazel_event_publisher_proto/proto/option_filters.proto new file mode 100644 index 0000000000000..629e006888a31 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/option_filters.proto @@ -0,0 +1,61 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto3"; + +package options; + +// option java_api_version = 2; +option java_package = "com.google.devtools.common.options.proto"; + +// IMPORTANT NOTE: These two enums must be kept in sync with their Java +// equivalents in src/main/java/com/google/devtools/common/options. +// Changing this proto has specific compatibility requirements, please see the +// Java documentation for details. + +// Docs in java enum. +enum OptionEffectTag { + // This option's effect or intent is unknown. + UNKNOWN = 0; + + // This flag has literally no effect. + NO_OP = 1; + + LOSES_INCREMENTAL_STATE = 2; + CHANGES_INPUTS = 3; + AFFECTS_OUTPUTS = 4; + BUILD_FILE_SEMANTICS = 5; + BAZEL_INTERNAL_CONFIGURATION = 6; + LOADING_AND_ANALYSIS = 7; + EXECUTION = 8; + HOST_MACHINE_RESOURCE_OPTIMIZATIONS = 9; + EAGERNESS_TO_EXIT = 10; + BAZEL_MONITORING = 11; + TERMINAL_OUTPUT = 12; + ACTION_COMMAND_LINES = 13; + TEST_RUNNER = 14; +} + +// Docs in java enum. +enum OptionMetadataTag { + EXPERIMENTAL = 0; + INCOMPATIBLE_CHANGE = 1; + DEPRECATED = 2; + HIDDEN = 3; + INTERNAL = 4; + reserved "TRIGGERED_BY_ALL_INCOMPATIBLE_CHANGES"; + reserved 5; + reserved "EXPLICIT_IN_OUTPUT_PATH"; + reserved 6; + IMMUTABLE = 7; +} diff --git a/app/bazel_event_publisher_proto/proto/package_load_metrics.proto b/app/bazel_event_publisher_proto/proto/package_load_metrics.proto new file mode 100644 index 0000000000000..e27ecceaf4456 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/package_load_metrics.proto @@ -0,0 +1,44 @@ +// Copyright 2020 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto2"; + +package devtools.build.lib.packages.metrics; + +import "google/protobuf/duration.proto"; + +option java_package = "com.google.devtools.build.lib.packages.metrics"; +option java_multiple_files = true; + +// Message used to concisely report all package metrics. +message PackageLoadMetrics { + // Name of the package. + optional string name = 1; + + // Wall-time duration it took to construct the package. + optional google.protobuf.Duration load_duration = 2; + + // Number of targets created in the package. + optional uint64 num_targets = 3; + + // Number of Starlark computation steps required to create the package. + optional uint64 computation_steps = 4; + + // Number of transitive Starlark load()s required to create the package. + optional uint64 num_transitive_loads = 5; + + // Numeric value given to the memory and general accounting costs associated + // with a loaded package. Values are an approximate but reasonable proxy for + // the real storage costs of a package. + optional uint64 package_overhead = 6; +} diff --git a/app/bazel_event_publisher_proto/proto/strategy_policy.proto b/app/bazel_event_publisher_proto/proto/strategy_policy.proto new file mode 100644 index 0000000000000..0f58c9b81f359 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/strategy_policy.proto @@ -0,0 +1,67 @@ +// Copyright 2022 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package blaze.strategy_policy; + +option java_multiple_files = true; +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +// Provides control over what strategies (local, remote, etc) may be used. +// +// An empty policies (e.g. unset) implies no enforcement, anything is allowed. +// +// Policies are enforced against both user-provided values (flags) and +// application-internal defaults. The latter is useful for guarding against +// unexpectedly hard-coded defaults. +// +// Sample usage to allow everything to execute remotely, while only allowing +// genrules to execute locally: +// +// strategy_policy { +// mnemonic_policy { +// default_allowlist: ["remote"] +// strategy_allowlist: [ +// { mnemonic: "Genrule" strategy: ["local"] } +// ] +// } +// } +message StrategyPolicy { + // Controls per-mnemonic policies for regular spawn/action execution. Relevant + // command-line flags this controls include --strategy and --genrule_strategy. + optional MnemonicPolicy mnemonic_policy = 1; + + // Controls per-mnemonic policies for the remote execution leg of dynamic + // execution. Relevant flag is --dynamic_remote_strategy. + optional MnemonicPolicy dynamic_remote_policy = 2; + + // Controls per-mnemonic policies for the local execution leg of dynamic + // execution. Relevant flag is --dynamic_local_strategy. + optional MnemonicPolicy dynamic_local_policy = 3; +} + +message MnemonicPolicy { + // Default allowed strategies for mnemonics not present in `strategy` list. + repeated string default_allowlist = 1; + + repeated StrategiesForMnemonic strategy_allowlist = 2; +} + +// Per-mnemonic allowlist settings. +message StrategiesForMnemonic { + optional string mnemonic = 1; + repeated string strategy = 2; +} diff --git a/app/bazel_event_publisher_proto/src/lib.rs b/app/bazel_event_publisher_proto/src/lib.rs new file mode 100644 index 0000000000000..e34b7627fb57b --- /dev/null +++ b/app/bazel_event_publisher_proto/src/lib.rs @@ -0,0 +1,61 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] + +pub mod blaze { + tonic::include_proto!("blaze"); + pub mod invocation_policy { + tonic::include_proto!("blaze.invocation_policy"); + } + pub mod strategy_policy { + tonic::include_proto!("blaze.strategy_policy"); + } +} + +pub mod build_event_stream { + tonic::include_proto!("build_event_stream"); +} + +pub mod command_line { + tonic::include_proto!("command_line"); +} + +pub mod devtools { + pub mod build { + pub mod lib { + pub mod packages { + pub mod metrics { + tonic::include_proto!("devtools.build.lib.packages.metrics"); + } + } + } + } +} + +pub mod failure_details { + tonic::include_proto!("failure_details"); +} + +pub mod google { + pub mod api { + tonic::include_proto!("google.api"); + } + pub mod devtools { + pub mod build { + pub mod v1 { + tonic::include_proto!("google.devtools.build.v1"); + } + } + } +} + +pub mod options { + tonic::include_proto!("options"); +} diff --git a/app/buck2/BUCK b/app/buck2/BUCK index 54b965d7d3642..41ef2c79261b1 100644 --- a/app/buck2/BUCK +++ b/app/buck2/BUCK @@ -1,13 +1,12 @@ load("@fbcode//buck2:buck_rust_binary.bzl", "buck_rust_binary") +# @oss-disable: load("@fbcode_macros//build_defs:platform_utils.bzl", "platform_utils") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load( - "@fbsource//tools/build_defs:audit_dependencies_test.bzl", - "audit_dependencies_test", -) -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load(":transition.bzl", "buck2_client_transition_alias") oncall("build_infra") +prelude = native + rust_library( name = "buck2", srcs = glob(["src/**/*.rs"]), @@ -15,13 +14,17 @@ rust_library( ( "linux", [ + "fbsource//third-party/rust:dirs", "fbsource//third-party/rust:nix", + "//buck2/app/buck2_forkserver:buck2_forkserver", ], ), ( "macos", [ + "fbsource//third-party/rust:dirs", "fbsource//third-party/rust:nix", + "//buck2/app/buck2_forkserver:buck2_forkserver", ], ), ( @@ -31,51 +34,47 @@ rust_library( ], ), ], + rustc_flags = select({ + ":buck2_client_only_build": ["--cfg=client_only"], + "DEFAULT": [], + }), deps = [ "fbsource//third-party/rust:anyhow", - "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:backtrace", - "fbsource//third-party/rust:clap-3", - "fbsource//third-party/rust:dirs", - "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:libc", - "fbsource//third-party/rust:rand", - "fbsource//third-party/rust:serde", - "fbsource//third-party/rust:serde_json", - "fbsource//third-party/rust:termimad", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", - "fbsource//third-party/rust:tokio-stream", "fbsource//third-party/rust:tracing", - "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_audit:buck2_audit", - "//buck2/app/buck2_audit_server:buck2_audit_server", - "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_client:buck2_client", "//buck2/app/buck2_client_ctx:buck2_client_ctx", + "//buck2/app/buck2_cmd_completion_client:buck2_cmd_completion_client", + "//buck2/app/buck2_cmd_docs:buck2_cmd_docs", + "//buck2/app/buck2_cmd_starlark_client:buck2_cmd_starlark_client", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_event_observer:buck2_event_observer", "//buck2/app/buck2_events:buck2_events", - "//buck2/app/buck2_forkserver:buck2_forkserver", - "//buck2/app/buck2_query:buck2_query", - "//buck2/app/buck2_server:buck2_server", - "//buck2/app/buck2_server_ctx:buck2_server_ctx", - "//buck2/app/buck2_starlark:buck2_starlark", - "//buck2/app/buck2_test_runner:buck2_test_runner", "//buck2/app/buck2_util:buck2_util", "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", - "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", - "//common/rust/gflags:gflags", "//common/rust/shed/fbinit:fbinit", - ], + ] + select({ + ":buck2_client_only_build": [], + "DEFAULT": [ + "//buck2/app/buck2_daemon:buck2_daemon", + "//buck2/app/buck2_server:buck2_server", + "//buck2/app/buck2_test_runner:buck2_test_runner", + ], + }), ) buck_rust_binary( name = "buck2-bin", srcs = ["bin/buck2.rs"], + allow_cache_upload = True, crate = "buck2", crate_root = "bin/buck2.rs", env = { @@ -88,48 +87,63 @@ buck_rust_binary( # This config value is set by the upload script, and controls the output of `--internal-version` on windows "BUCK2_WIN_INTERNAL_VERSION": read_config("buck", "win_internal_version", ""), }, + rustc_flags = select({ + ":buck2_client_only_build": ["--cfg=client_only"], + "DEFAULT": [], + }), unittests = False, deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:tracing", "//buck2/app/buck2:buck2", - "//buck2/app/buck2_action_impl:buck2_action_impl", - "//buck2/app/buck2_analysis:buck2_analysis", - "//buck2/app/buck2_anon_target:buck2_anon_target", - "//buck2/app/buck2_audit_server:buck2_audit_server", - "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_build_info:buck2_build_info", - "//buck2/app/buck2_build_signals_impl:buck2_build_signals_impl", - "//buck2/app/buck2_bxl:buck2_bxl", - "//buck2/app/buck2_cfg_constructor:buck2_cfg_constructor", "//buck2/app/buck2_client_ctx:buck2_client_ctx", - "//buck2/app/buck2_configured:buck2_configured", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_event_log:buck2_event_log", "//buck2/app/buck2_events:buck2_events", - "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", - "//buck2/app/buck2_query_impls:buck2_query_impls", - "//buck2/app/buck2_server_commands:buck2_server_commands", - "//buck2/app/buck2_test:buck2_test", - "//buck2/app/buck2_transition:buck2_transition", "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", "//buck2/gazebo/dupe:dupe", - "//common/rust/folly/logging:logging", - "//common/rust/gflags:gflags", - "//common/rust/shed/fbinit:fbinit", - ], + ] + select({ + ":buck2_client_only_build": [], + "DEFAULT": [ + "//buck2/app/buck2_action_impl:buck2_action_impl", + "//buck2/app/buck2_analysis:buck2_analysis", + "//buck2/app/buck2_anon_target:buck2_anon_target", + "//buck2/app/buck2_audit_server:buck2_audit_server", + "//buck2/app/buck2_build_api:buck2_build_api", + "//buck2/app/buck2_build_signals_impl:buck2_build_signals_impl", + "//buck2/app/buck2_bxl:buck2_bxl", + "//buck2/app/buck2_cfg_constructor:buck2_cfg_constructor", + "//buck2/app/buck2_cmd_docs_server:buck2_cmd_docs_server", + "//buck2/app/buck2_cmd_starlark_server:buck2_cmd_starlark_server", + "//buck2/app/buck2_configured:buck2_configured", + "//buck2/app/buck2_external_cells:buck2_external_cells", + "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", + "//buck2/app/buck2_query_impls:buck2_query_impls", + "//buck2/app/buck2_server_commands:buck2_server_commands", + "//buck2/app/buck2_test:buck2_test", + "//buck2/app/buck2_transition:buck2_transition", + "//buck2/app/buck2_validation:buck2_validation", + ], + }), ) -# We use //third-party/blake3/blake3-rust to avoid ODR violations instead. -audit_dependencies_test( - name = "ban_third_party_rust_blake3", - blocklist_patterns = [ - "fbsource//third-party/rust:blake3.*", - ], - # audit_dependencies_test doesn't work on Windows as the macro relies on bash - compatible_with = [ - "ovr_config//os:linux", - "ovr_config//os:macos", - ], - contacts = ["oncall+build_infra@xmail.facebook.com"], - rule = ":buck2", +platform_utils = None # @oss-enable + +_dtp = platform_utils.get_cxx_platform_for_base_path(prelude.package_name()).target_platform if platform_utils else None + +prelude.constraint_setting( + name = "buck2_client_only_setting", +) + +prelude.constraint_value( + name = "buck2_client_only_build", + constraint_setting = ":buck2_client_only_setting", +) + +buck2_client_transition_alias( + name = "buck2_client-bin", + actual = ":buck2-bin", + default_target_platform = _dtp, + visibility = ["PUBLIC"], ) diff --git a/app/buck2/Cargo.toml b/app/buck2/Cargo.toml index f0088ad4781a2..d09175d0a2376 100644 --- a/app/buck2/Cargo.toml +++ b/app/buck2/Cargo.toml @@ -1,40 +1,25 @@ [package] description = "" edition = "2021" +license = { workspace = true } name = "buck2" +repository = { workspace = true } version = "0.1.0" [dependencies] anyhow = { workspace = true } -async-trait = { workspace = true } backtrace = { workspace = true } blake3 = { workspace = true } clap = { workspace = true } -dirs = { workspace = true } -futures = { workspace = true } +fbinit = { workspace = true } hex = { workspace = true } -# @oss-disable: hostcaps = { path = "../../../common/rust/shed/hostcaps" } libc = { workspace = true } -rand = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -termimad = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } -tokio-stream = { workspace = true } tracing = { workspace = true } -which = "4.2.4" -dice = { workspace = true } -fbinit = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" +gazebo = { workspace = true } host_sharing = { workspace = true } -# @oss-disable: user = { path = "../../../common/rust/user", optional = true } -allocative = { workspace = true } buck2_action_impl = { workspace = true } buck2_analysis = { workspace = true } @@ -47,35 +32,43 @@ buck2_build_signals = { workspace = true } buck2_build_signals_impl = { workspace = true } buck2_bxl = { workspace = true } buck2_cfg_constructor = { workspace = true } -buck2_configured = { workspace = true } -buck2_core = { workspace = true } -buck2_query = { workspace = true } -buck2_query_impls = { workspace = true } buck2_client = { workspace = true } buck2_client_ctx = { workspace = true } +buck2_cmd_completion_client = { workspace = true } +buck2_cmd_docs = { workspace = true } +buck2_cmd_docs_server = { workspace = true } +buck2_cmd_starlark_client = { workspace = true } +buck2_cmd_starlark_server = { workspace = true } buck2_common = { workspace = true } +buck2_configured = { workspace = true } +buck2_core = { workspace = true } +buck2_daemon = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } +buck2_event_log = { workspace = true } buck2_event_observer = { workspace = true } buck2_events = { workspace = true } -buck2_forkserver = { workspace = true } +buck2_external_cells = { workspace = true } buck2_interpreter_for_build = { workspace = true } +buck2_query_impls = { workspace = true } buck2_server = { workspace = true } buck2_server_commands = { workspace = true } -buck2_server_ctx = { workspace = true } -buck2_starlark = { workspace = true } buck2_test = { workspace = true } buck2_test_runner = { workspace = true } buck2_transition = { workspace = true } -buck2_cli_proto = { workspace = true } buck2_util = { workspace = true } +buck2_validation = { workspace = true } buck2_wrapper_common = { workspace = true } [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dependencies] jemallocator = { workspace = true } [target.'cfg(unix)'.dependencies] +dirs = { workspace = true } nix = { workspace = true } +buck2_forkserver = { workspace = true } + [target.'cfg(windows)'.dependencies] libc = { workspace = true } winapi = { workspace = true } @@ -84,5 +77,5 @@ winapi = { workspace = true } name = "buck2" path = "bin/buck2.rs" -[features] -# @oss-disable: default = ["gazebo_lint"] +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(buck_build)", "cfg(client_only)", "cfg(fbcode_build)"] } diff --git a/app/buck2/bin/buck2.rs b/app/buck2/bin/buck2.rs index 8d53ff8be98d5..440526af63efd 100644 --- a/app/buck2/bin/buck2.rs +++ b/app/buck2/bin/buck2.rs @@ -7,11 +7,7 @@ * of this source tree. */ -#![feature(async_closure)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] +#![feature(used_with_arg)] use std::fs; use std::io; @@ -21,33 +17,28 @@ use std::sync::Arc; use buck2::exec; use buck2::panic; use buck2::process_context::ProcessContext; -use buck2::TracingLogFile; use buck2_build_info::Buck2BuildInfo; use buck2_build_info::BUCK2_BUILD_INFO; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::restarter::Restarter; use buck2_client_ctx::stdin::Stdin; use buck2_client_ctx::stdio; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_core::fs::working_dir::WorkingDir; use buck2_core::logging::init_tracing_for_writer; +use buck2_core::logging::log_file::TracingLogFile; use buck2_core::logging::LogConfigurationReloadHandle; use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; -use fbinit::FacebookInit; // fbcode likes to set its own allocator in fbcode.default_allocator // So when we set our own allocator, buck build buck2 or buck2 build buck2 often breaks. // Making jemalloc the default only when we do a cargo build. #[global_allocator] -#[cfg(all( - any(target_os = "linux", target_os = "macos"), - not(fbcode_build), - not(buck_oss_build) -))] +#[cfg(all(any(target_os = "linux", target_os = "macos"), not(buck_build)))] static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; -fn init_logging(_fb: FacebookInit) -> anyhow::Result> { +fn init_logging() -> anyhow::Result> { static ENV_TRACING_LOG_FILE_PATH: &str = "BUCK_LOG_TO_FILE_PATH"; let handle = match std::env::var_os(ENV_TRACING_LOG_FILE_PATH) { @@ -66,22 +57,11 @@ fn init_logging(_fb: FacebookInit) -> anyhow::Result anyhow::Result<()> { // As this main() is used as the entry point for the `buck daemon` command, // it must be single-threaded. Commands that want to be multi-threaded/async // will start up their own tokio runtime. -#[fbinit::main] -fn main(init: fbinit::FacebookInit) -> ! { - buck2_analysis::init_late_bindings(); - buck2_anon_target::init_late_bindings(); - buck2_action_impl::init_late_bindings(); - buck2_audit_server::init_late_bindings(); - buck2_build_api::init_late_bindings(); - buck2_transition::init_late_bindings(); - buck2_build_signals_impl::init_late_bindings(); - buck2_bxl::init_late_bindings(); - buck2_cfg_constructor::init_late_bindings(); - buck2_configured::init_late_bindings(); - buck2_query_impls::init_late_bindings(); - buck2_interpreter_for_build::init_late_bindings(); - buck2_server_commands::init_late_bindings(); - buck2_test::init_late_bindings(); +fn main() -> ! { + buck2_core::client_only::CLIENT_ONLY_VAL.init(cfg!(client_only)); + #[cfg(not(client_only))] + { + buck2_analysis::init_late_bindings(); + buck2_anon_target::init_late_bindings(); + buck2_action_impl::init_late_bindings(); + buck2_audit_server::init_late_bindings(); + buck2_build_api::init_late_bindings(); + buck2_cmd_docs_server::init_late_bindings(); + buck2_external_cells::init_late_bindings(); + buck2_transition::init_late_bindings(); + buck2_build_signals_impl::init_late_bindings(); + buck2_bxl::init_late_bindings(); + buck2_cfg_constructor::init_late_bindings(); + buck2_configured::init_late_bindings(); + buck2_query_impls::init_late_bindings(); + buck2_interpreter_for_build::init_late_bindings(); + buck2_server_commands::init_late_bindings(); + buck2_cmd_starlark_server::init_late_bindings(); + buck2_test::init_late_bindings(); + buck2_validation::init_late_bindings(); + } BUCK2_BUILD_INFO.init(Buck2BuildInfo { revision: std::option_env!("BUCK2_SET_EXPLICIT_VERSION"), win_internal_version: std::option_env!("BUCK2_WIN_INTERNAL_VERSION"), release_timestamp: std::option_env!("BUCK2_RELEASE_TIMESTAMP"), }); - fn main_with_result(init: fbinit::FacebookInit) -> ExitResult { - panic::initialize(init)?; + fn main_with_result() -> ExitResult { + panic::initialize()?; check_cargo(); - static FORCE_WANT_RESTART: EnvHelper = EnvHelper::new("FORCE_WANT_RESTART"); + let force_want_restart = buck2_env_anyhow!("FORCE_WANT_RESTART", bool)?; - let force_want_restart = FORCE_WANT_RESTART.get_copied()?.unwrap_or_default(); + let log_reload_handle = init_logging()?; - let log_reload_handle = init_logging(init)?; + // Log the start timestamp + tracing::debug!("Client initialized logging"); let args = std::env::args().collect::>(); let cwd = WorkingDir::current_dir()?; @@ -153,7 +141,6 @@ fn main(init: fbinit::FacebookInit) -> ! { let first_trace_id = TraceId::from_env_or_new()?; let res = exec(ProcessContext { - init, log_reload_handle: &log_reload_handle, stdin: &mut stdin, working_dir: &cwd, @@ -180,7 +167,6 @@ fn main(init: fbinit::FacebookInit) -> ! { } exec(ProcessContext { - init, log_reload_handle: &log_reload_handle, stdin: &mut stdin, working_dir: &cwd, @@ -198,5 +184,5 @@ fn main(init: fbinit::FacebookInit) -> ! { } } - main_with_result(init).report() + main_with_result().report() } diff --git a/app/buck2/daemon_lifecycle.md b/app/buck2/daemon_lifecycle.md deleted file mode 100644 index 201a5296744d9..0000000000000 --- a/app/buck2/daemon_lifecycle.md +++ /dev/null @@ -1,92 +0,0 @@ -# buckd - -Buck runs a persistent daemon process (buckd) to reuse work between commands. -Most work is done by the daemon process. When executing a buck command, the -process running the command is a client to the buckd server. The buckd server -exposes a simple grpc service that the client uses to implement the various -buck commands. - -There's a small set of commands/arguments that don't require the daemon (`buck -help`, cli arg parse failures, `buck version`, ...), but most commands will -require it. - -For almost all commands, buck requires that the client and server are the same -version of buck and may restart buckd to ensure that's the case. - -# daemon process flow - -The daemon process is started with the (hidden) `buck daemon` command. - -The daemon process has a simple startup. It will first daemonize itself and -write its pid to a locked file "buckd.pid" in the "daemon directory" (a -directory in `$HOME/.buck` specific to that repository+output directory). The -file is locked exclusively by the daemon process until it exits. This means that -only a single daemon is allowed at a time. It redirects its stdout and stderr -to files in the daemon directory. - -The daemon then starts up the grpc DaemonApi server. Once that is running, it will -write the port it is running on (along with some other information) to the -"buckd.info" file in the daemon dir. Once that is done, the server is ready to -be used. - -There are 3 ways that the buckd process will shutdown: - -1. The grpc api includes a `kill()` call that will shutdown buckd. -2. buckd will periodically (every 100s or so) check the "buckd.pid" and - "buckd.info" files to ensure that they still match that buckd process. -3. If buckd hits a rust `panic()` the buckd process will exit - -# client connection and buckd startup - -When the client is processing a command that requires communicating with the -buckd server it will follow this approach: - -1. read the "buckd.info" file to get the port the grpc api is being served on -2. connect to the api on that port -3. send a `status()` request to get the version - -If there is an error during 1-3, or if there is a version mismatch the client -needs to (re)start the buck daemon. Otherwise, the client can continue as it -now has made a connection with a correctly versioned buckd. - -When the client is killing or starting the buckd process, it will grab an -exclusive lock on the "lifecycle.lock" file in the daemon directory to ensure -that multiple clients aren't racing with each other. - -To start/restart the buckd process, the client does: - -1. lock the "lifecycle.lock" file -2. send a kill command to the existing buckd -3. ensure the buckd process has exited (based on pid) -4. run a `buck daemon` command to start buckd -5. wait for the daemon to start up and the grpc server to be ready -6. release the "lifecycle.lock" file - -After that, it will repeat the connection steps (including verifying the -version after connecting). - -# buck kill and other daemon restarts - -If there are other invocations currently using the buck daemon when it is killed or -restarted by a client, those invocations will fail due to the early disconnection. - -Generally, we support concurrent buck invocations using the same buck version, but -if there are concurrent invocations with different versions, they may unexpectedly -fail or otherwise work incorrectly. This is sufficient for the normal buck workflow -where the buckversion is checked into the repo, in that case, it's not expected that -buck commands will work across a rebase or other operation that changes the buckversion. - -# correctness - -We have a couple of guarantees here. - -1. Only a single buckd is running at a time -2. Only a single client is killing/starting a buckd at a time -3. A client only uses a buckd connection after making sure it has a compatible version - -The main way that we could run into issues would be if there are multiple clients -that are racing and they want different versions of buck. In that case, one -might cause the other two fail to connect to a buckd with the correct version -or one of the client's connections may be prematurely disconnected. A client **will not** -use a server with a mismatched version. While this is a failure, no expected workflow -would hit this case, all concurrent commands should be using the same buck version. diff --git a/app/buck2/src/check_user_allowed.rs b/app/buck2/src/check_user_allowed.rs index 93626889d924d..84d66d8034140 100644 --- a/app/buck2/src/check_user_allowed.rs +++ b/app/buck2/src/check_user_allowed.rs @@ -9,16 +9,14 @@ #[cfg(windows)] pub(crate) fn check_user_allowed() -> anyhow::Result<()> { - use std::env; - use std::ffi::OsStr; use std::io; use std::mem; use std::mem::MaybeUninit; use std::ptr; use anyhow::Context; - use buck2_core::sandcastle::is_sandcastle; - use buck2_wrapper_common::winapi_handle::WinapiHandle; + use buck2_core::ci::is_ci; + use buck2_wrapper_common::win::winapi_handle::WinapiHandle; use winapi::ctypes::c_void; use winapi::shared::minwindef::DWORD; use winapi::um::processthreadsapi::GetCurrentProcess; @@ -28,22 +26,14 @@ pub(crate) fn check_user_allowed() -> anyhow::Result<()> { use winapi::um::winnt::TOKEN_ELEVATION; use winapi::um::winnt::TOKEN_QUERY; - #[derive(Debug, thiserror::Error)] - enum CheckUserAllowedError { - #[error("OpenProcessToken returned null token handle (unreachable)")] - NullTokenHandle, - } - let mut handle = ptr::null_mut(); let token_ok = unsafe { OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &mut handle) }; if token_ok == 0 { return Err(io::Error::last_os_error()).context("OpenProcessToken failed"); } - if handle.is_null() { - return Err(CheckUserAllowedError::NullTokenHandle.into()); - } - let handle = unsafe { WinapiHandle::new(handle) }; + let handle = + unsafe { WinapiHandle::new_check_last_os_error(handle).context("OpenProcessToken")? }; let size = mem::size_of::(); let elevation: MaybeUninit = MaybeUninit::zeroed(); let mut ret_size = 0; @@ -63,21 +53,10 @@ pub(crate) fn check_user_allowed() -> anyhow::Result<()> { let elevation_struct: TOKEN_ELEVATION = unsafe { elevation.assume_init() }; if elevation_struct.TokenIsElevated == 1 { - // The CI environment variable is consistently set by CI providers. - // - // - GitHub Actions: https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables - // - GitLab CI/CD: https://docs.gitlab.com/ee/ci/variables/predefined_variables.html - // - CircleCI: https://circleci.com/docs/variables/#built-in-environment-variables - // - many others - // // In CI, if buck2 got run from an admin shell, we need not worry that a // subsequent invocation might come from a non-admin shell. It almost // certainly will not. - // - // Internally, CI should be setting SANDCASTLE env var. - let is_ci = env::var_os("CI").as_deref() == Some(OsStr::new("true")) || is_sandcastle()?; - - if !is_ci { + if !is_ci()? { tracing::warn!( "You're running buck2 from an admin shell. Invocations from non-admin shells will likely fail going forward. To remediate, run `buck2 clean` in this admin shell, then switch to a non-admin shell." ); @@ -95,7 +74,7 @@ pub(crate) fn check_user_allowed() -> anyhow::Result<()> { use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::soft_error; - #[derive(Debug, thiserror::Error)] + #[derive(Debug, buck2_error::Error)] #[error("buck2 is not allowed to run as root (unless home dir is owned by root)")] struct RootError; diff --git a/app/buck2/src/cli_style.rs b/app/buck2/src/cli_style.rs new file mode 100644 index 0000000000000..544e269ba8bbd --- /dev/null +++ b/app/buck2/src/cli_style.rs @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use clap::builder::styling; +use clap::builder::Styles; + +pub(crate) fn get_styles() -> Styles { + let heading = styling::AnsiColor::Yellow.on_default().bold(); + Styles::styled() + .header(heading) + .usage(heading) + .literal(styling::AnsiColor::Green.on_default()) + .placeholder(styling::AnsiColor::Cyan.on_default()) +} diff --git a/app/buck2/src/commands.rs b/app/buck2/src/commands.rs new file mode 100644 index 0000000000000..ad71e1b2213cb --- /dev/null +++ b/app/buck2/src/commands.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#[cfg(not(client_only))] +pub(crate) mod forkserver; +#[cfg(not(client_only))] +pub(crate) mod internal_test_runner; diff --git a/app/buck2/src/commands/daemon.rs b/app/buck2/src/commands/daemon.rs deleted file mode 100644 index bbeb3ddb2d80c..0000000000000 --- a/app/buck2/src/commands/daemon.rs +++ /dev/null @@ -1,675 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( - -use std::fs::File; -use std::path::PathBuf; -use std::process; -use std::sync::Arc; -use std::thread; -use std::time::Duration; - -use allocative::Allocative; -use anyhow::Context as _; -use async_trait::async_trait; -use buck2_audit_server::server::server_audit_command; -use buck2_cli_proto::DaemonProcessInfo; -use buck2_client_ctx::daemon_constraints::gen_daemon_constraints; -use buck2_client_ctx::version::BuckVersion; -use buck2_common::buckd_connection::ConnectionType; -use buck2_common::daemon_dir::DaemonDir; -use buck2_common::invocation_paths::InvocationPaths; -use buck2_common::legacy_configs::init::DaemonStartupConfig; -use buck2_common::memory; -use buck2_core::env_helper::EnvHelper; -use buck2_core::fs::fs_util; -use buck2_core::logging::LogConfigurationReloadHandle; -use buck2_server::builtin_docs::docs::docs_command; -use buck2_server::daemon::daemon_tcp::create_listener; -use buck2_server::daemon::server::BuckdServer; -use buck2_server::daemon::server::BuckdServerDelegate; -use buck2_server::daemon::server::BuckdServerDependencies; -use buck2_server::daemon::server::BuckdServerInitPreferences; -use buck2_server::profile::profile_command; -use buck2_server_ctx::bxl::BXL_SERVER_COMMANDS; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_starlark::server::server_starlark_command; -use futures::channel::mpsc; -use futures::channel::mpsc::UnboundedSender; -use futures::pin_mut; -use futures::select; -use futures::FutureExt; -use futures::StreamExt; -use rand::Rng; -use thiserror::Error; -use tokio::runtime::Builder; - -use crate::commands::daemon_lower_priority::daemon_lower_priority; -use crate::commands::schedule_termination::maybe_schedule_termination; -use crate::DaemonBeforeSubcommandOptions; - -#[derive(Debug, Error)] -enum DaemonError { - #[error("The buckd pid file at `{0}` had a mismatched pid, expected `{1}`, got `{2}`")] - PidFileMismatch(PathBuf, u32, u32), -} - -/// Start or run buck daemon. -/// -/// This is an internal command, not intended to be used directly. -/// Buck client invokes it to spawn a server process. -#[derive(Clone, Debug, clap::Parser)] -pub(crate) struct DaemonCommand { - /// Sets the interval for how often the daemon performs consistency checks. - /// These are used to ensure that the daemon is still the one referenced - /// by files in the daemon dir. - #[clap(long, default_value("60"))] - checker_interval_seconds: u64, - /// Run buck daemon but do not daemonize the process. - #[clap(long)] - dont_daemonize: bool, - /// This flag is set to prevent infinite recursion when the process is restarted - /// with lower priority. - #[clap(long)] - skip_macos_qos: bool, - /// Early configs that the daemon needs at startup. Those are read by the client then passed to - /// the daemon. The client will restart the daemon if they mismatch. - #[clap(parse(try_from_str = DaemonStartupConfig::deserialize))] - daemon_startup_config: DaemonStartupConfig, -} - -impl DaemonCommand { - /// Command instance for `--no-buckd`. - pub(crate) fn new_in_process(daemon_startup_config: DaemonStartupConfig) -> DaemonCommand { - DaemonCommand { - checker_interval_seconds: 60, - dont_daemonize: true, - skip_macos_qos: true, - daemon_startup_config, - } - } -} - -struct BuckdServerDependenciesImpl; - -#[async_trait] -impl BuckdServerDependencies for BuckdServerDependenciesImpl { - async fn audit( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, - ) -> anyhow::Result { - server_audit_command(ctx, partial_result_dispatcher, req).await - } - async fn starlark( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, - ) -> anyhow::Result { - server_starlark_command(ctx, partial_result_dispatcher, req).await - } - async fn profile( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::ProfileRequest, - ) -> anyhow::Result { - match req.profile_opts.as_ref().expect("Missing profile opts") { - buck2_cli_proto::profile_request::ProfileOpts::TargetProfile(_) => { - profile_command(ctx, partial_result_dispatcher, req).await - } - buck2_cli_proto::profile_request::ProfileOpts::BxlProfile(_) => { - BXL_SERVER_COMMANDS - .get()? - .bxl_profile(ctx, partial_result_dispatcher, req) - .await - } - } - } - async fn docs( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::UnstableDocsRequest, - ) -> anyhow::Result { - docs_command(ctx, partial_result_dispatcher, req).await - } -} - -pub(crate) fn init_listener() -> anyhow::Result<(std::net::TcpListener, ConnectionType)> { - let (endpoint, listener) = create_listener()?; - - tracing::info!("Listener created on {}", &endpoint); - - Ok((listener, endpoint)) -} - -pub(crate) fn write_process_info( - daemon_dir: &DaemonDir, - process_info: &DaemonProcessInfo, -) -> anyhow::Result<()> { - let file = File::create(daemon_dir.buckd_info())?; - serde_json::to_writer(&file, &process_info)?; - Ok(()) -} - -fn verify_current_daemon(daemon_dir: &DaemonDir) -> anyhow::Result<()> { - let file = daemon_dir.buckd_pid(); - let my_pid = process::id(); - - let recorded_pid: u32 = fs_util::read_to_string(&file)?.trim().parse()?; - if recorded_pid != my_pid { - return Err( - DaemonError::PidFileMismatch(file.into_path_buf(), my_pid, recorded_pid).into(), - ); - } - - Ok(()) -} - -fn gen_auth_token() -> String { - (0..20) - .map(|_| rand::thread_rng().gen_range('a'..='z')) - .collect() -} - -fn terminate_on_panic() { - let orig_hook = std::panic::take_hook(); - std::panic::set_hook(Box::new(move |panic_info| { - orig_hook(panic_info); - // We are using `_exit` instead of `exit` to avoid running global destructors. - // This is similar to what default rust panic handler does - // when there `panic=abort`: it does `abort`. - unsafe { libc::_exit(1) } - })); -} - -impl DaemonCommand { - fn run( - self, - fb: fbinit::FacebookInit, - log_reload_handle: Arc, - paths: InvocationPaths, - before_subcommand_options: DaemonBeforeSubcommandOptions, - in_process: bool, - listener_created: impl FnOnce() + Send, - ) -> anyhow::Result<()> { - // NOTE: Do not create any threads before this point. - // Daemonize does not preserve threads. - - let server_init_ctx = BuckdServerInitPreferences { - detect_cycles: before_subcommand_options.detect_cycles, - which_dice: before_subcommand_options.which_dice, - enable_trace_io: before_subcommand_options.enable_trace_io, - reject_materializer_state: before_subcommand_options - .reject_materializer_state - .map(|s| s.into()), - daemon_startup_config: self.daemon_startup_config, - }; - - let span = tracing::info_span!("daemon_listener"); - let span_guard = span.enter(); - - let daemon_dir = paths.daemon_dir()?; - let pid_path = daemon_dir.buckd_pid(); - let stdout_path = daemon_dir.buckd_stdout(); - let stderr_path = daemon_dir.buckd_stderr(); - // Even if we don't redirect output, we still need to create stdout/stderr files, - // because tailer opens them. This is untidy. - let stdout = File::create(stdout_path)?; - let stderr = File::create(stderr_path)?; - - let auth_token = gen_auth_token(); - - let (listener, process_info, endpoint) = if !self.dont_daemonize { - // We must create stdout/stderr before creating a listener, - // otherwise it is race: - // * daemon parent process exits - // * client successfully connects to the unix socket - // * but stdout/stderr may be not yet created, so tailer fails to open them - let (listener, endpoint) = init_listener()?; - - Self::daemonize(stdout, stderr)?; - - fs_util::write(&pid_path, format!("{}", process::id()))?; - - let pid = process::id(); - let process_info = DaemonProcessInfo { - pid: pid as i64, - endpoint: endpoint.to_string(), - version: BuckVersion::get().unique_id().to_owned(), - auth_token, - }; - - // TODO(nga): this code is executed after server daemonization, - // so client has to retry to read it. Fix it. - write_process_info(&daemon_dir, &process_info)?; - - tracing::info!("Daemonized."); - - (listener, process_info, endpoint) - } else { - fs_util::write(&pid_path, format!("{}", process::id()))?; - - if !in_process { - Self::redirect_output(stdout, stderr)?; - } - - let (listener, endpoint) = init_listener()?; - - let process_info = DaemonProcessInfo { - pid: process::id() as i64, - endpoint: endpoint.to_string(), - version: BuckVersion::get().unique_id().to_owned(), - auth_token, - }; - - write_process_info(&daemon_dir, &process_info)?; - - (listener, process_info, endpoint) - }; - - tracing::info!("Starting Buck2 daemon"); - tracing::info!("Version: {}", BuckVersion::get_version()); - tracing::info!("PID: {}", process::id()); - tracing::info!("ID: {}", *buck2_events::daemon_id::DAEMON_UUID); - tracing::info!("Endpoint: {}", endpoint); - - listener_created(); - - terminate_on_panic(); - - maybe_schedule_termination()?; - - // Higher performance for jemalloc, recommended (but may not have any effect on Mac) - // https://github.com/jemalloc/jemalloc/blob/dev/TUNING.md#notable-runtime-options-for-performance-tuning - memory::enable_background_threads()?; - - if cfg!(target_os = "linux") { - #[cfg(fbcode_build)] - { - gflags::set_gflag_value( - fb, - "cgroup2_reader_update_interval_ms", - gflags::GflagValue::U32(2000), - ) - .expect("failed to set gflag --cgroup2_reader_update_interval_ms"); - } - } - - let mut builder = Builder::new_multi_thread(); - builder.enable_all(); - builder.thread_name("buck2-rt"); - - static RUNTIME_THREADS: EnvHelper = EnvHelper::new("BUCK2_RUNTIME_THREADS"); - if let Some(threads) = RUNTIME_THREADS.get_copied()? { - builder.worker_threads(threads); - } - - static MAX_BLOCKING_THREADS: EnvHelper = - EnvHelper::new("BUCK2_MAX_BLOCKING_THREADS"); - if let Some(threads) = MAX_BLOCKING_THREADS.get_copied()? { - builder.max_blocking_threads(threads); - } - - tracing::info!("Starting tokio runtime..."); - - let rt = builder.build().context("Error creating Tokio runtime")?; - let handle = rt.handle().clone(); - - let rt = Builder::new_multi_thread() - .enable_all() - .thread_name("buck2-tn") - // These values are arbitrary, but I/O shouldn't take up many threads. - .worker_threads(2) - .max_blocking_threads(2) - .build() - .context("Error creating Tonic Tokio runtime")?; - - rt.block_on(async move { - // Once any item is received on the hard_shutdown_receiver, the daemon process will exit immediately. - let (hard_shutdown_sender, mut hard_shutdown_receiver) = mpsc::unbounded(); - - #[derive(Allocative)] - struct Delegate { - #[allocative(skip)] - hard_shutdown_sender: UnboundedSender, - } - - impl BuckdServerDelegate for Delegate { - fn force_shutdown_with_timeout(&self, reason: String, timeout: Duration) { - let sender = self.hard_shutdown_sender.clone(); - tokio::spawn(async move { - tokio::time::sleep(timeout).await; - sender.unbounded_send(reason).expect("Shouldn't happen."); - }); - } - } - - let delegate = Box::new(Delegate { - hard_shutdown_sender: hard_shutdown_sender.clone(), - }); - let daemon_dir = paths.daemon_dir()?; - - listener.set_nonblocking(true)?; - let listener = tokio::net::TcpListener::from_std(listener)?; - let listener = tokio_stream::wrappers::TcpListenerStream::new(listener); - - tracing::info!("Listening."); - - drop(span_guard); - - let daemon_constraints = - gen_daemon_constraints(&server_init_ctx.daemon_startup_config)?; - - let buckd_server = BuckdServer::run( - fb, - log_reload_handle, - paths, - delegate, - server_init_ctx, - process_info, - daemon_constraints, - Box::pin(listener), - &BuckdServerDependenciesImpl, - handle, - ) - .fuse(); - let shutdown_future = async move { hard_shutdown_receiver.next().await }.fuse(); - pin_mut!(buckd_server); - pin_mut!(shutdown_future); - - let checker_interval_seconds = self.checker_interval_seconds; - - thread::Builder::new() - .name("check-daemon-dir".to_owned()) - .spawn(move || { - Self::check_daemon_dir_thread( - checker_interval_seconds, - daemon_dir, - hard_shutdown_sender, - ) - })?; - - tracing::info!("Initialization complete, running the server."); - - select! { - res = buckd_server => { - tracing::info!("server shutdown"); - res - } - reason = shutdown_future => { - let reason = reason.as_deref().unwrap_or("no reason available"); - tracing::info!("server forced shutdown: {}", reason); - anyhow::Ok(()) - }, - } - }) - } - - /// We start a dedicated thread to periodically check that the files in the daemon - /// dir still reflect that we are the current buckd and verify that when you connect - /// to the server it is our server. - /// It gets a dedicated thread so that if somehow the main runtime gets all jammed up, - /// this will still run (and presumably connecting to the server or our request would - /// then fail and we'd do a hard shutdown). - fn check_daemon_dir_thread( - checker_interval_seconds: u64, - daemon_dir: DaemonDir, - hard_shutdown_sender: UnboundedSender, - ) { - let this_rt = Builder::new_current_thread().enable_all().build().unwrap(); - - this_rt.block_on(async move { - loop { - tokio::time::sleep(Duration::from_secs(checker_interval_seconds)).await; - match verify_current_daemon(&daemon_dir) { - Ok(()) => {} - Err(e) => { - // This bit of code cannot relay errors, ignoring that we can't log - // a warning is reasonable. - let _ignored = buck2_client_ctx::eprintln!( - "daemon verification failed, forcing shutdown: {:#}", - e - ); - - // If this is already shutting down, we don't need to do it again. - let _ignored = hard_shutdown_sender - .unbounded_send("Daemon verification failed".to_owned()); - } - }; - } - }) - } - - pub(crate) fn exec( - self, - init: fbinit::FacebookInit, - log_reload_handle: Arc, - paths: InvocationPaths, - before_subcommand_options: DaemonBeforeSubcommandOptions, - in_process: bool, - listener_created: impl FnOnce() + Send, - ) -> anyhow::Result<()> { - daemon_lower_priority(self.skip_macos_qos)?; - - let project_root = paths.project_root(); - let daemon_dir = paths.daemon_dir()?; - - if !daemon_dir.path.is_dir() { - fs_util::create_dir_all(&daemon_dir.path)?; - } - - // TODO(nga): this breaks relative paths in `--no-buckd`. - // `--no-buckd` should capture correct directories earlier. - // Or even better, client should set current directory to project root, - // and resolve all paths relative to original cwd. - fs_util::set_current_dir(project_root.root())?; - - self.run( - init, - log_reload_handle, - paths, - before_subcommand_options, - in_process, - listener_created, - )?; - Ok(()) - } - - #[cfg(unix)] - fn redirect_output(stdout: File, stderr: File) -> anyhow::Result<()> { - use std::os::unix::io::AsRawFd; - - nix::unistd::dup2(stdout.as_raw_fd(), nix::libc::STDOUT_FILENO)?; - nix::unistd::dup2(stderr.as_raw_fd(), nix::libc::STDERR_FILENO)?; - Ok(()) - } - - #[cfg(windows)] - fn redirect_output(stdout: File, stderr: File) -> anyhow::Result<()> { - use std::os::windows::io::AsRawHandle; - - unsafe { - let stdout_fd = libc::open_osfhandle(stdout.as_raw_handle() as isize, libc::O_RDWR); - let stderr_fd = libc::open_osfhandle(stderr.as_raw_handle() as isize, libc::O_RDWR); - if stdout_fd == -1 || stderr_fd == -1 { - return Err(anyhow::Error::msg( - "Can't get file descriptors for output files", - )); - } - // MSVC libc doesn't export STDOUT_FILENO and STDERR_FILENO. - let stdout_exit_code = libc::dup2(stdout_fd, 1); - let stderr_exit_code = libc::dup2(stderr_fd, 2); - if stdout_exit_code == -1 || stderr_exit_code == -1 { - return Err(anyhow::Error::msg("Failed to redirect daemon output")); - } - } - Ok(()) - } - - #[cfg(unix)] - fn daemonize(stdout: File, stderr: File) -> anyhow::Result<()> { - // TODO(cjhopman): Daemonize is pretty un-maintained. We may need to move - // to something else or just do it ourselves. - let daemonize = crate::commands::daemonize::Daemonize::new() - .stdout(stdout) - .stderr(stderr); - daemonize.start()?; - Ok(()) - } - - #[cfg(windows)] - /// Restart current process in detached mode with '--dont-daemonize' flag. - fn daemonize(_stdout: File, _stderr: File) -> anyhow::Result<()> { - Err(anyhow::anyhow!("Cannot daemonize on Windows")) - } -} - -#[cfg(test)] -mod tests { - use std::process; - use std::time::Duration; - - use allocative::Allocative; - use anyhow::Context; - use buck2_cli_proto::DaemonProcessInfo; - use buck2_cli_proto::KillRequest; - use buck2_cli_proto::PingRequest; - use buck2_client_ctx::daemon::client::connect::new_daemon_api_client; - use buck2_client_ctx::daemon_constraints::gen_daemon_constraints; - use buck2_common::invocation_paths::InvocationPaths; - use buck2_common::invocation_roots::InvocationRoots; - use buck2_common::legacy_configs::init::DaemonStartupConfig; - use buck2_core::fs::paths::file_name::FileNameBuf; - use buck2_core::fs::project::ProjectRootTemp; - use buck2_core::logging::LogConfigurationReloadHandle; - use buck2_server::daemon::daemon_tcp::create_listener; - use buck2_server::daemon::server::BuckdServer; - use buck2_server::daemon::server::BuckdServerDelegate; - use buck2_server::daemon::server::BuckdServerInitPreferences; - use dupe::Dupe; - use rand::RngCore; - use rand::SeedableRng; - use tokio::runtime::Handle; - - use crate::commands::daemon::BuckdServerDependenciesImpl; - - // `fbinit_tokio` is not on crates, so we cannot use `#[fbinit::test]`. - #[tokio::test] - async fn test_daemon_smoke() { - // TODO(nga): this should be `fbinit::perform_init`, but it is not on crates yet. - let fbinit = unsafe { fbinit::assume_init() }; - - let project_root = ProjectRootTemp::new().unwrap(); - - let (endpoint, listener) = create_listener().unwrap(); - listener.set_nonblocking(true).unwrap(); - let listener = tokio::net::TcpListener::from_std(listener).unwrap(); - let listener = tokio_stream::wrappers::TcpListenerStream::new(listener); - - let invocation_paths = InvocationPaths { - roots: InvocationRoots { - cell_root: project_root.path().root().to_buf(), - project_root: project_root.path().dupe(), - }, - isolation: FileNameBuf::try_from("v2".to_owned()).unwrap(), - }; - - #[derive(Allocative)] - struct Delegate; - - impl BuckdServerDelegate for Delegate { - fn force_shutdown_with_timeout(&self, _reason: String, _timeout: Duration) {} - } - - let process_info = DaemonProcessInfo { - endpoint: endpoint.to_string(), - pid: process::id() as i64, - version: "13.17.19".to_owned(), - auth_token: "abc".to_owned(), - }; - - let handle = tokio::spawn(BuckdServer::run( - fbinit, - ::noop(), - invocation_paths, - Box::new(Delegate), - BuckdServerInitPreferences { - detect_cycles: None, - which_dice: None, - enable_trace_io: false, - reject_materializer_state: None, - daemon_startup_config: DaemonStartupConfig::testing_empty(), - }, - process_info.clone(), - gen_daemon_constraints(&DaemonStartupConfig::testing_empty()).unwrap(), - Box::pin(listener), - &BuckdServerDependenciesImpl, - Handle::current(), - )); - - let mut client = new_daemon_api_client(endpoint.clone(), process_info.auth_token) - .await - .unwrap(); - - client.ping(PingRequest::default()).await.unwrap(); - - let mut client_with_wrong_token = new_daemon_api_client(endpoint, "wrong_token".to_owned()) - .await - .unwrap(); - - let err = format!( - "{:#}", - client_with_wrong_token - .ping(PingRequest::default()) - .await - .unwrap_err() - ); - assert!(err.contains("invalid auth token"), "Error is: {}", err); - - client.ping(PingRequest::default()).await.unwrap(); - - for req_size in [0, 1 << 10, 1 << 20, 10 << 20, 100 << 20] { - let mut payload = vec![0; req_size]; - rand::rngs::SmallRng::seed_from_u64(20).fill_bytes(&mut payload); - client - .ping(PingRequest { - payload, - ..PingRequest::default() - }) - .await - .context(format!("req_size={}", req_size)) - .unwrap(); - } - - for resp_size in [0, 1 << 10, 1 << 20, 10 << 20, 100 << 20] { - client - .ping(PingRequest { - response_payload_size: resp_size, - ..PingRequest::default() - }) - .await - .context(format!("resp_size={}", resp_size)) - .unwrap(); - } - - client.kill(KillRequest::default()).await.unwrap(); - - handle - .await - .expect("handle join failed") - .expect("daemon returned error"); - } -} diff --git a/app/buck2/src/commands/docs/mod.rs b/app/buck2/src/commands/docs/mod.rs deleted file mode 100644 index b78c9084b01ec..0000000000000 --- a/app/buck2/src/commands/docs/mod.rs +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::streaming::BuckSubcommand; - -use crate::commands::docs::query::DocsAqueryCommand; -use crate::commands::docs::query::DocsCqueryCommand; -use crate::commands::docs::query::DocsUqueryCommand; -use crate::commands::docs::starlark::DocsStarlarkCommand; - -mod output; -mod query; -mod starlark; - -#[allow(clippy::large_enum_variant)] -#[derive(Debug, clap::Parser)] -enum DocsKind { - Starlark(DocsStarlarkCommand), - Uquery(DocsUqueryCommand), - Query(DocsUqueryCommand), - Cquery(DocsCqueryCommand), - Aquery(DocsAqueryCommand), -} - -#[derive(Debug, clap::Parser)] -#[clap(name = "docs", about = "Print documentation of specified symbols")] -pub(crate) struct DocsCommand { - #[clap(subcommand)] - docs_kind: DocsKind, -} - -impl DocsCommand { - pub(crate) fn exec( - self, - matches: &clap::ArgMatches, - ctx: ClientCommandContext<'_>, - ) -> ExitResult { - let submatches = match matches.subcommand().map(|s| s.1) { - Some(submatches) => submatches, - None => panic!("Parsed a subcommand but couldn't extract subcommand argument matches"), - }; - match self.docs_kind { - DocsKind::Starlark(cmd) => cmd.exec(submatches, ctx), - DocsKind::Uquery(cmd) => cmd.exec(submatches, ctx), - DocsKind::Query(cmd) => cmd.exec(submatches, ctx), - DocsKind::Cquery(cmd) => cmd.exec(submatches, ctx), - DocsKind::Aquery(cmd) => cmd.exec(submatches, ctx), - } - } -} diff --git a/app/buck2/src/commands/docs/output.rs b/app/buck2/src/commands/docs/output.rs deleted file mode 100644 index c584db9ac06f8..0000000000000 --- a/app/buck2/src/commands/docs/output.rs +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use dupe::Dupe; - -#[derive(Debug, Clone, Dupe, clap::ArgEnum)] -#[clap(rename_all = "snake_case")] -enum DocsOutputFormatArg { - Markdown, - Rendered, -} - -#[derive(Debug, clap::Parser)] -pub(crate) struct DocsOutputFormatOptions { - /// How to format the documentation - #[clap( - long = "format", - default_value = "rendered", - arg_enum, - ignore_case = true - )] - format: DocsOutputFormatArg, -} - -impl DocsOutputFormatOptions { - pub fn emit_markdown(&self, markdown: &str) -> anyhow::Result<()> { - match self.format { - DocsOutputFormatArg::Markdown => { - buck2_client_ctx::println!("{}", markdown)?; - } - DocsOutputFormatArg::Rendered => { - let skin = termimad::MadSkin::default(); - let area = termimad::Area::full_screen(); - let width = std::cmp::min(100, area.width) as usize; - let rendered = skin.text(markdown, Some(width)); - buck2_client_ctx::println!("{}", rendered)?; - } - } - - Ok(()) - } -} diff --git a/app/buck2/src/commands/docs/query.rs b/app/buck2/src/commands/docs/query.rs deleted file mode 100644 index c5535a3261c97..0000000000000 --- a/app/buck2/src/commands/docs/query.rs +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_query::query::syntax::simple::functions::description::QueryType; -use buck2_query::query::syntax::simple::functions::description::QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE; -use buck2_query::query::syntax::simple::functions::docs::MarkdownOptions; -use buck2_query::query::syntax::simple::functions::docs::QueryEnvironmentDescription; - -use crate::commands::docs::output::DocsOutputFormatOptions; - -#[derive(Debug, clap::Parser)] -#[clap(name = "docs-uquery", about = "Print documentation for uquery")] -pub(crate) struct DocsUqueryCommand { - #[clap(flatten)] - docs_options: DocsOutputFormatOptions, -} - -#[derive(Debug, clap::Parser)] -#[clap(name = "docs-cquery", about = "Print documentation for cquery")] -pub(crate) struct DocsCqueryCommand { - #[clap(flatten)] - docs_options: DocsOutputFormatOptions, -} - -#[derive(Debug, clap::Parser)] -#[clap(name = "docs-aquery", about = "Print documentation for aquery")] -pub(crate) struct DocsAqueryCommand { - #[clap(flatten)] - docs_options: DocsOutputFormatOptions, -} - -fn output( - options: DocsOutputFormatOptions, - description: QueryEnvironmentDescription, -) -> ExitResult { - let markdown = description.render_markdown(&MarkdownOptions { - include_alt_text: true, - }); - options.emit_markdown(&markdown)?; - ExitResult::success() -} - -impl DocsUqueryCommand { - pub(crate) fn exec( - self, - _matches: &clap::ArgMatches, - _ctx: ClientCommandContext<'_>, - ) -> ExitResult { - let description = (QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE.get()?)(QueryType::Uquery); - output(self.docs_options, description) - } -} - -impl DocsCqueryCommand { - pub(crate) fn exec( - self, - _matches: &clap::ArgMatches, - _ctx: ClientCommandContext<'_>, - ) -> ExitResult { - let description = (QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE.get()?)(QueryType::Cquery); - output(self.docs_options, description) - } -} - -impl DocsAqueryCommand { - pub(crate) fn exec( - self, - _matches: &clap::ArgMatches, - _ctx: ClientCommandContext<'_>, - ) -> ExitResult { - let description = (QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE.get()?)(QueryType::Aquery); - output(self.docs_options, description) - } -} diff --git a/app/buck2/src/commands/docs/starlark.rs b/app/buck2/src/commands/docs/starlark.rs deleted file mode 100644 index d42c5c69b3086..0000000000000 --- a/app/buck2/src/commands/docs/starlark.rs +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod markdown; - -use anyhow::Context; -use async_trait::async_trait; -use buck2_cli_proto::UnstableDocsRequest; -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; -use buck2_client_ctx::daemon::client::BuckdClientConnector; -use buck2_client_ctx::daemon::client::NoPartialResultHandler; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::streaming::StreamingCommand; -use dupe::Dupe; - -use crate::commands::docs::starlark::markdown::MarkdownFileOptions; - -#[derive(Debug, Clone, Dupe, clap::ArgEnum)] -#[clap(rename_all = "snake_case")] -enum DocsOutputFormatArg { - Json, - MarkdownFiles, -} - -#[derive(Debug, clap::Parser)] -#[clap( - name = "docs-starlark", - about = "Print documentation of user-defined starlark symbols" -)] -pub(crate) struct DocsStarlarkCommand { - #[clap(flatten)] - pub config_opts: CommonBuildConfigurationOptions, - - #[clap(flatten)] - console_opts: CommonConsoleOptions, - - #[clap(flatten)] - event_log_opts: CommonDaemonCommandOptions, - - #[clap(flatten)] - markdown_file_opts: MarkdownFileOptions, - - #[clap( - long = "format", - help = "how to format the returned documentation", - default_value = "json", - arg_enum, - ignore_case = true - )] - format: DocsOutputFormatArg, - - #[clap( - long = "builtins", - help = "get documentation for built in functions, rules, and providers" - )] - builtins: bool, - - #[clap( - long = "prelude", - help = "get documentation for the prelude, if present" - )] - prelude: bool, - - #[clap( - name = "SYMBOL_PATTERNS", - help = "Patterns to interpret. //foo:bar.bzl is 'every symbol in //foo:bar.bzl', //foo:bar.bzl:baz only returns the documentation for the symbol 'baz' in //foo:bar.bzl" - )] - patterns: Vec, -} - -#[async_trait] -impl StreamingCommand for DocsStarlarkCommand { - const COMMAND_NAME: &'static str = "docs starlark"; - async fn exec_impl( - self, - buckd: &mut BuckdClientConnector, - matches: &clap::ArgMatches, - ctx: &mut ClientCommandContext<'_>, - ) -> ExitResult { - let client_context = ctx.client_context(matches, &self)?; - - let response = buckd - .with_flushing() - .unstable_docs( - UnstableDocsRequest { - context: Some(client_context), - symbol_patterns: self.patterns.clone(), - retrieve_builtins: self.builtins, - retrieve_prelude: self.prelude, - format: match self.format { - DocsOutputFormatArg::Json => { - buck2_cli_proto::unstable_docs_request::Format::Json as i32 - } - DocsOutputFormatArg::MarkdownFiles => { - buck2_cli_proto::unstable_docs_request::Format::Markdown as i32 - } - }, - markdown_output_path: self - .markdown_file_opts - .destination_dir - .as_ref() - .map(|d| { - anyhow::Ok( - d.resolve(&ctx.working_dir) - .to_str() - .context("path is not valid")? - .to_owned(), - ) - }) - .transpose()?, - markdown_starlark_subdir: self.markdown_file_opts.starlark_subdir.clone(), - markdown_native_subdir: self.markdown_file_opts.native_subdir.clone(), - }, - ctx.stdin().console_interaction_stream(&self.console_opts), - &mut NoPartialResultHandler, - ) - .await??; - - if let Some(json_output) = response.json_output { - buck2_client_ctx::println!("{}", json_output.trim_end())?; - } - - ExitResult::success() - } - - fn console_opts(&self) -> &CommonConsoleOptions { - &self.console_opts - } - - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - &self.event_log_opts - } - - fn common_opts(&self) -> &CommonBuildConfigurationOptions { - &self.config_opts - } -} diff --git a/app/buck2/src/commands/docs/starlark/markdown.rs b/app/buck2/src/commands/docs/starlark/markdown.rs deleted file mode 100644 index 97d9068db8951..0000000000000 --- a/app/buck2/src/commands/docs/starlark/markdown.rs +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_client_ctx::path_arg::PathArg; - -#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] -pub(crate) struct MarkdownFileOptions { - #[structopt( - long = "markdown-files-destination-dir", - required_if_eq("format", "markdown_files") - )] - pub(crate) destination_dir: Option, - #[structopt(long = "markdown-files-native-subdir", default_value = "native")] - pub(crate) native_subdir: String, - #[structopt(long = "markdown-files-starlark-subdir", default_value = "starlark")] - pub(crate) starlark_subdir: String, -} diff --git a/app/buck2/src/commands/forkserver.rs b/app/buck2/src/commands/forkserver.rs index 7e8f410335d43..bac7a24901218 100644 --- a/app/buck2/src/commands/forkserver.rs +++ b/app/buck2/src/commands/forkserver.rs @@ -7,10 +7,10 @@ * of this source tree. */ -use std::path::PathBuf; use std::sync::Arc; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_common::init::ResourceControlConfig; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::logging::LogConfigurationReloadHandle; @@ -28,7 +28,10 @@ pub(crate) struct ForkserverCommand { fd: RawFd, #[clap(long)] - state_dir: PathBuf, + state_dir: AbsNormPathBuf, + + #[clap(long, value_parser = ResourceControlConfig::deserialize)] + resource_control: ResourceControlConfig, } impl ForkserverCommand { @@ -38,9 +41,9 @@ impl ForkserverCommand { _ctx: ClientCommandContext<'_>, log_reload_handle: Arc, ) -> anyhow::Result<()> { - let state_dir = AbsNormPathBuf::try_from(self.state_dir)?; + fs_util::create_dir_all(&self.state_dir)?; - fs_util::create_dir_all(&state_dir)?; + let _todo_resource_control = self.resource_control; #[cfg(unix)] { @@ -55,7 +58,7 @@ impl ForkserverCommand { rt.block_on(buck2_forkserver::unix::run_forkserver( self.fd, log_reload_handle, - state_dir, + self.state_dir, )) } diff --git a/app/buck2/src/commands/internal_test_runner.rs b/app/buck2/src/commands/internal_test_runner.rs index c1e2b2a05a7de..b086123a15088 100644 --- a/app/buck2/src/commands/internal_test_runner.rs +++ b/app/buck2/src/commands/internal_test_runner.rs @@ -45,9 +45,9 @@ impl InternalTestRunnerCommand { } }) } else { - anyhow::bail!( + Err(anyhow::anyhow!( "Cannot use internal test runner. Config value must be provided for test.v2_test_executor." - ) + )) } } } diff --git a/app/buck2/src/commands/mod.rs b/app/buck2/src/commands/mod.rs deleted file mode 100644 index c79211fd53e15..0000000000000 --- a/app/buck2/src/commands/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod daemon; -pub(crate) mod daemon_lower_priority; -pub(crate) mod daemonize; -pub mod docs; -pub mod forkserver; -pub mod internal_test_runner; -pub(crate) mod schedule_termination; diff --git a/app/buck2/src/commands/schedule_termination.rs b/app/buck2/src/commands/schedule_termination.rs deleted file mode 100644 index 715f58b713c5b..0000000000000 --- a/app/buck2/src/commands/schedule_termination.rs +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::thread; -use std::time::Duration; - -use buck2_core::env_helper::EnvHelper; -use buck2_util::process_stats::process_cpu_time_us; - -fn elapsed_cpu_time_as_percents( - cpu_time_before_us: Option, - cpu_time_after_us: Option, - duration_seconds: u64, -) -> Option { - let cpu_time_before = cpu_time_before_us?; - let cpu_time_after = cpu_time_after_us?; - let elapsed_cpu_time_us = cpu_time_after.checked_sub(cpu_time_before)?; - let elapsed_cpu_time_us_avg_per_second = elapsed_cpu_time_us.checked_div(duration_seconds)?; - elapsed_cpu_time_us_avg_per_second.checked_div(1_000_000 / 100) -} - -/// Our tests sometimes don't exit Buck 2 cleanly, and they might not get an oppportunity to do so -/// if they are terminated. This allows the daemon to self-destruct. -pub(crate) fn maybe_schedule_termination() -> anyhow::Result<()> { - static TERMINATE_AFTER: EnvHelper = EnvHelper::new("BUCK2_TERMINATE_AFTER"); - - if let Some(duration) = TERMINATE_AFTER.get_copied()? { - thread::Builder::new() - .name("buck2-terminate-after".to_owned()) - .spawn(move || { - const MEASURE_CPU_TIME_FOR: u64 = 10; - let (sleep_before, sleep_after) = match duration.checked_sub(MEASURE_CPU_TIME_FOR) { - Some(sleep_before) => (sleep_before, MEASURE_CPU_TIME_FOR), - None => (0, duration), - }; - - thread::sleep(Duration::from_secs(sleep_before)); - let process_cpu_time_us_before = process_cpu_time_us(); - thread::sleep(Duration::from_secs(sleep_after)); - let process_cpu_time_us_after = process_cpu_time_us(); - - let elapsed_cpu_time_avg_in_percents = elapsed_cpu_time_as_percents( - process_cpu_time_us_before, - process_cpu_time_us_after, - sleep_after, - ); - if let Some(elapsed_cpu_time_avg_in_percents) = elapsed_cpu_time_avg_in_percents { - panic!( - "Buck is exiting after {}s elapsed; avg process CPU in the last {}s is {}%", - duration, sleep_after, elapsed_cpu_time_avg_in_percents - ); - } else { - panic!("Buck is exiting after {}s elapsed", duration); - } - })?; - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use crate::commands::schedule_termination::elapsed_cpu_time_as_percents; - - #[test] - fn test_elapsed_cpu_time_as_percents() { - // 12 seconds wall time - // 6 seconds of CPU time - // equivalent to 50% CPU usage - assert_eq!( - Some(50), - elapsed_cpu_time_as_percents(Some(1_000_123), Some(7_000_123), 12) - ); - } -} diff --git a/app/buck2/src/lib.rs b/app/buck2/src/lib.rs index 27ab13328c687..c890508f7f209 100644 --- a/app/buck2/src/lib.rs +++ b/app/buck2/src/lib.rs @@ -7,21 +7,21 @@ * of this source tree. */ -//! `buck2 audit` command implementation, both client and server. +#![feature(error_generic_member_access)] +#![feature(used_with_arg)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] +use std::thread; use anyhow::Context as _; use buck2_audit::AuditCommand; -use buck2_client::args::expand_argfiles_with_context; use buck2_client::commands::build::BuildCommand; use buck2_client::commands::bxl::BxlCommand; use buck2_client::commands::clean::CleanCommand; use buck2_client::commands::ctargets::ConfiguredTargetsCommand; use buck2_client::commands::debug::DebugCommand; +use buck2_client::commands::expand_external_cell::ExpandExternalCellCommand; +use buck2_client::commands::explain::ExplainCommand; +use buck2_client::commands::help_env::HelpEnvCommand; use buck2_client::commands::init::InitCommand; use buck2_client::commands::install::InstallCommand; use buck2_client::commands::kill::KillCommand; @@ -40,8 +40,7 @@ use buck2_client::commands::status::StatusCommand; use buck2_client::commands::subscribe::SubscribeCommand; use buck2_client::commands::targets::TargetsCommand; use buck2_client::commands::test::TestCommand; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::cleanup_ctx::AsyncCleanupContextGuard; +use buck2_client_ctx::argfiles::expand_argfiles_with_context; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::client_metadata::ClientMetadata; use buck2_client_ctx::exit_result::ExitResult; @@ -49,59 +48,52 @@ use buck2_client_ctx::immediate_config::ImmediateConfigContext; use buck2_client_ctx::streaming::BuckSubcommand; use buck2_client_ctx::tokio_runtime_setup::client_tokio_runtime; use buck2_client_ctx::version::BuckVersion; +use buck2_cmd_starlark_client::StarlarkCommand; +use buck2_common::argv::Argv; use buck2_common::invocation_paths::InvocationPaths; +use buck2_common::invocation_paths_result::InvocationPathsResult; use buck2_common::invocation_roots::find_invocation_roots; -use buck2_common::result::ToSharedResultExt; -use buck2_core::env_helper::EnvHelper; +use buck2_common::invocation_roots::BuckCliError; +use buck2_core::buck2_env_anyhow; use buck2_core::fs::paths::file_name::FileNameBuf; use buck2_event_observer::verbosity::Verbosity; -use buck2_starlark::StarlarkCommand; -use clap::AppSettings; -use clap::Parser; -use dice::DetectCycles; -use dice::WhichDice; +use buck2_util::cleanup_ctx::AsyncCleanupContextGuard; +use buck2_util::threads::thread_spawn_scoped; +use clap::CommandFactory; +use clap::FromArgMatches; use dupe::Dupe; -use no_buckd::start_in_process_daemon; use crate::check_user_allowed::check_user_allowed; -use crate::commands::daemon::DaemonCommand; -use crate::commands::docs::DocsCommand; -use crate::commands::forkserver::ForkserverCommand; -use crate::commands::internal_test_runner::InternalTestRunnerCommand; use crate::process_context::ProcessContext; -#[macro_use] -pub mod panic; mod check_user_allowed; - -pub mod commands; -mod no_buckd; +mod cli_style; +pub(crate) mod commands; +pub mod panic; pub mod process_context; fn parse_isolation_dir(s: &str) -> anyhow::Result { FileNameBuf::try_from(s.to_owned()).context("isolation dir must be a directory name") } -pub use buck2_server_ctx::logging::TracingLogFile; - /// Options of `buck2` command, before subcommand. #[derive(Clone, Debug, clap::Parser)] +#[clap(next_help_heading = "Universal Options")] struct BeforeSubcommandOptions { + /// The name of the directory that Buck2 creates within buck-out for writing outputs and daemon + /// information. If one is not provided, Buck2 creates a directory with the default name. + /// /// Instances of Buck2 share a daemon if and only if their isolation directory is identical. /// The isolation directory also influences the output paths provided by Buck2, /// and as a result using a non-default isolation dir will cause cache misses (and slower builds). #[clap( - parse(try_from_str = parse_isolation_dir), + value_parser = parse_isolation_dir, env("BUCK_ISOLATION_DIR"), long, default_value="v2" )] isolation_dir: FileNameBuf, - // TODO: Those should be on the daemon subcommand. - #[clap(flatten)] - daemon: DaemonBeforeSubcommandOptions, - /// How verbose buck should be while logging. /// /// Values: @@ -118,7 +110,7 @@ struct BeforeSubcommandOptions { long = "verbose", default_value = "1", global = true, - parse(try_from_str = Verbosity::try_from_cli) + value_parser= Verbosity::try_from_cli )] verbosity: Verbosity, @@ -139,7 +131,7 @@ struct BeforeSubcommandOptions { /// running with the same isolation directory. /// /// This is an unsupported option used only for development work. - #[clap(env("BUCK2_NO_BUCKD"), long, global(true), hidden(true))] + #[clap(env("BUCK2_NO_BUCKD"), long, global(true), hide(true))] // Env var is BUCK2_NO_BUCKD instead of NO_BUCKD env var from buck1 because no buckd // is not supported for production work for buck2 and lots of places already set // NO_BUCKD=1 for buck1. @@ -151,24 +143,6 @@ struct BeforeSubcommandOptions { help_wrapper: bool, } -#[derive(Clone, Debug, clap::Parser)] -struct DaemonBeforeSubcommandOptions { - #[clap(env("DICE_DETECT_CYCLES_UNSTABLE"), long, hidden(true))] - detect_cycles: Option, - - #[clap(env("WHICH_DICE_UNSTABLE"), long, hidden(true))] - which_dice: Option, - - #[clap(env("ENABLE_TRACE_IO"), long, hidden(true))] - enable_trace_io: bool, - - /// If passed a given materializer identity, if the materializer state DB matches that - /// identity, the daemon will not use it and will instead create a new empty materializer - /// state. - #[clap(long, hidden(true))] - reject_materializer_state: Option, -} - #[rustfmt::skip] // Formatting in internal and in OSS versions disagree after oss markers applied. fn help() -> &'static str { concat!( @@ -183,13 +157,14 @@ fn help() -> &'static str { #[clap( name = "buck2", about(Some(help())), - version(BuckVersion::get_version()) + version(BuckVersion::get_version()), + styles = cli_style::get_styles(), )] pub(crate) struct Opt { - #[clap(flatten)] - common_opts: BeforeSubcommandOptions, #[clap(subcommand)] cmd: CommandKind, + #[clap(flatten)] + common_opts: BeforeSubcommandOptions, } impl Opt { @@ -222,14 +197,13 @@ pub fn exec(process: ProcessContext<'_>) -> ExitResult { .context("Error expanding argsfiles")?; // Override arg0 in `buck2 help`. - static BUCK2_ARG0: EnvHelper = EnvHelper::new("BUCK2_ARG0"); - if let Some(arg0) = BUCK2_ARG0.get()? { - expanded_args[0] = arg0.clone(); + if let Some(arg0) = buck2_env_anyhow!("BUCK2_ARG0")? { + expanded_args[0] = arg0.to_owned(); } - let clap = Opt::clap(); + let clap = Opt::command(); let matches = clap.get_matches_from(&expanded_args); - let opt: Opt = Opt::from_clap(&matches); + let opt: Opt = Opt::from_arg_matches(&matches)?; if opt.common_opts.help_wrapper { return ExitResult::err(anyhow::anyhow!( @@ -238,7 +212,9 @@ pub fn exec(process: ProcessContext<'_>) -> ExitResult { } match &opt.cmd { - CommandKind::Clean(..) | CommandKind::Daemon(..) | CommandKind::Forkserver(..) => {} + #[cfg(not(client_only))] + CommandKind::Daemon(..) | CommandKind::Forkserver(..) => {} + CommandKind::Clean(..) => {} _ => { check_user_allowed()?; } @@ -254,20 +230,29 @@ pub fn exec(process: ProcessContext<'_>) -> ExitResult { #[derive(Debug, clap::Subcommand)] pub(crate) enum CommandKind { - #[clap(setting(AppSettings::Hidden))] - Daemon(DaemonCommand), - #[clap(setting(AppSettings::Hidden))] - Forkserver(ForkserverCommand), - #[clap(setting(AppSettings::Hidden))] - InternalTestRunner(InternalTestRunnerCommand), + #[cfg(not(client_only))] + #[clap(hide = true)] + Daemon(buck2_daemon::daemon::DaemonCommand), + #[cfg(not(client_only))] + #[clap(hide = true)] + Forkserver(crate::commands::forkserver::ForkserverCommand), + #[cfg(not(client_only))] + #[clap(hide = true)] + InternalTestRunner(crate::commands::internal_test_runner::InternalTestRunnerCommand), #[clap(subcommand)] Audit(AuditCommand), Aquery(AqueryCommand), Build(BuildCommand), Bxl(BxlCommand), + // TODO(nga): implement `buck2 help-buckconfig` too + // https://www.internalfb.com/tasks/?t=183528129 + HelpEnv(HelpEnvCommand), Test(TestCommand), Cquery(CqueryCommand), Init(InitCommand), + #[clap(hide = true)] // TODO iguridi: remove + Explain(ExplainCommand), + ExpandExternalCell(ExpandExternalCellCommand), Install(InstallCommand), Kill(KillCommand), Killall(KillallCommand), @@ -284,9 +269,12 @@ pub(crate) enum CommandKind { Utargets(TargetsCommand), Ctargets(ConfiguredTargetsCommand), Uquery(UqueryCommand), - #[clap(subcommand, setting(AppSettings::Hidden))] + #[clap(subcommand, hide = true)] Debug(DebugCommand), - Docs(DocsCommand), + #[clap(hide = true)] + Complete(buck2_cmd_completion_client::complete::CompleteCommand), + Completion(buck2_cmd_completion_client::completion::CompletionCommand), + Docs(buck2_cmd_docs::DocsCommand), #[clap(subcommand)] Profile(ProfileCommand), #[clap(hide(true))] // @oss-enable @@ -308,72 +296,124 @@ impl CommandKind { common_opts: BeforeSubcommandOptions, ) -> ExitResult { let roots = find_invocation_roots(process.working_dir.path()); - let paths = roots - .map(|r| InvocationPaths { - roots: r, - isolation: common_opts.isolation_dir.clone(), - }) - .shared_error(); - + let paths_anyhow = roots.map(|r| InvocationPaths { + roots: r, + isolation: common_opts.isolation_dir.clone(), + }); + + let paths_result = match paths_anyhow { + Ok(paths) => InvocationPathsResult::Paths(paths.clone()), + Err(err) => match err.downcast_ref::() { + Some(BuckCliError::NoBuckRoot(_)) => { + InvocationPathsResult::OutsideOfRepo(buck2_error::Error::from(err)) + } + None => InvocationPathsResult::OtherError(buck2_error::Error::from(err)), + }, + }; // Handle the daemon command earlier: it wants to fork, but the things we do below might // want to create threads. + #[cfg(not(client_only))] if let CommandKind::Daemon(cmd) = self { return cmd .exec( - process.init, process.log_reload_handle.dupe(), - paths?, - common_opts.daemon, + paths_result.get_result()?, false, || {}, ) .into(); } + thread::scope(|scope| { + // Spawn a thread to have stack size independent on linker/environment. + match thread_spawn_scoped("buck2-main", scope, move || { + self.exec_no_daemon( + common_opts, + process, + immediate_config, + matches, + argv, + paths_result, + ) + }) { + Ok(t) => match t.join() { + Ok(res) => res, + Err(_) => ExitResult::bail("Main thread panicked"), + }, + Err(e) => ExitResult::bail(format_args!("Failed to start main thread: {}", e)), + } + }) + } + + fn exec_no_daemon( + self, + common_opts: BeforeSubcommandOptions, + process: ProcessContext<'_>, + immediate_config: &ImmediateConfigContext, + matches: &clap::ArgMatches, + argv: Argv, + paths: InvocationPathsResult, + ) -> ExitResult { + if common_opts.no_buckd { + // `no_buckd` can't work in a client-only binary + if let Some(res) = ExitResult::retry_command_with_full_binary()? { + return res; + } + } + + let fb = buck2_common::fbinit::get_or_init_fbcode_globals(); let runtime = client_tokio_runtime()?; let async_cleanup = AsyncCleanupContextGuard::new(&runtime); let start_in_process_daemon = if common_opts.no_buckd { - start_in_process_daemon( - process.init, + #[cfg(not(client_only))] + let v = buck2_daemon::no_buckd::start_in_process_daemon( immediate_config.daemon_startup_config()?, - paths.clone()?, - common_opts.daemon, + paths.clone().get_result()?, &runtime, - )? + )?; + #[cfg(client_only)] + let v = unreachable!(); // case covered above + #[allow(dead_code)] + v } else { None }; - let command_ctx = ClientCommandContext { - init: process.init, + let command_ctx = ClientCommandContext::new( + fb, immediate_config, paths, - verbosity: common_opts.verbosity, + process.working_dir.clone(), + common_opts.verbosity, start_in_process_daemon, - working_dir: process.working_dir.clone(), - trace_id: process.trace_id.dupe(), - async_cleanup: async_cleanup.ctx().dupe(), - stdin: process.stdin, - restarter: process.restarter, - restarted_trace_id: process.restarted_trace_id.dupe(), argv, - runtime: &runtime, - oncall: common_opts.oncall, - client_metadata: common_opts.client_metadata, - }; + process.trace_id.dupe(), + async_cleanup.ctx().dupe(), + process.stdin, + process.restarter, + process.restarted_trace_id.dupe(), + &runtime, + common_opts.oncall, + common_opts.client_metadata, + common_opts.isolation_dir, + ); match self { + #[cfg(not(client_only))] CommandKind::Daemon(..) => unreachable!("Checked earlier"), + #[cfg(not(client_only))] CommandKind::Forkserver(cmd) => cmd .exec(matches, command_ctx, process.log_reload_handle.dupe()) .into(), + #[cfg(not(client_only))] CommandKind::InternalTestRunner(cmd) => cmd.exec(matches, command_ctx).into(), CommandKind::Aquery(cmd) => cmd.exec(matches, command_ctx), CommandKind::Build(cmd) => cmd.exec(matches, command_ctx), CommandKind::Bxl(cmd) => cmd.exec(matches, command_ctx), CommandKind::Test(cmd) => cmd.exec(matches, command_ctx), CommandKind::Cquery(cmd) => cmd.exec(matches, command_ctx), + CommandKind::HelpEnv(cmd) => cmd.exec(matches, command_ctx), CommandKind::Kill(cmd) => cmd.exec(matches, command_ctx), CommandKind::Killall(cmd) => cmd.exec(matches, command_ctx), CommandKind::Clean(cmd) => cmd.exec(matches, command_ctx), @@ -394,14 +434,18 @@ impl CommandKind { CommandKind::Run(cmd) => cmd.exec(matches, command_ctx), CommandKind::Uquery(cmd) => cmd.exec(matches, command_ctx), CommandKind::Debug(cmd) => cmd.exec(matches, command_ctx), + CommandKind::Complete(cmd) => cmd.exec(matches, command_ctx), + CommandKind::Completion(cmd) => cmd.exec(Opt::command(), matches, command_ctx), CommandKind::Docs(cmd) => cmd.exec(matches, command_ctx), CommandKind::Profile(cmd) => cmd.exec(matches, command_ctx), CommandKind::Rage(cmd) => cmd.exec(matches, command_ctx), CommandKind::Init(cmd) => cmd.exec(matches, command_ctx), + CommandKind::Explain(cmd) => cmd.exec(matches, command_ctx), CommandKind::Install(cmd) => cmd.exec(matches, command_ctx), CommandKind::Log(cmd) => cmd.exec(matches, command_ctx), CommandKind::Lsp(cmd) => cmd.exec(matches, command_ctx), CommandKind::Subscribe(cmd) => cmd.exec(matches, command_ctx), + CommandKind::ExpandExternalCell(cmd) => cmd.exec(matches, command_ctx), } } } diff --git a/app/buck2/src/panic.rs b/app/buck2/src/panic.rs index 4025af34da3fc..d3f563847a9fa 100644 --- a/app/buck2/src/panic.rs +++ b/app/buck2/src/panic.rs @@ -19,13 +19,15 @@ use anyhow::Context as _; use fbinit::FacebookInit; /// Initializes the panic hook. -pub fn initialize(fb: FacebookInit) -> anyhow::Result<()> { +pub fn initialize() -> anyhow::Result<()> { let hook = panic::take_hook(); panic::set_hook(Box::new(move |info| { + let fb = buck2_common::fbinit::get_or_init_fbcode_globals(); the_panic_hook(fb, info); hook(info); })); buck2_core::error::initialize(Box::new(move |category, err, loc, options| { + let fb = buck2_common::fbinit::get_or_init_fbcode_globals(); imp::write_soft_error( fb, category, @@ -54,15 +56,15 @@ fn the_panic_hook(fb: FacebookInit, info: &PanicInfo) { mod imp { use std::collections::HashMap; use std::panic::PanicInfo; - use std::thread; use std::time::Duration; use backtrace::Backtrace; use buck2_core::error::StructuredErrorOptions; use buck2_data::Location; use buck2_events::metadata; - use buck2_events::sink::scribe::new_thrift_scribe_sink_if_enabled; + use buck2_events::sink::remote::new_remote_event_sink_if_enabled; use buck2_events::BuckEvent; + use buck2_util::threads::thread_spawn; use fbinit::FacebookInit; use tokio::runtime::Builder; @@ -122,16 +124,27 @@ mod imp { } /// Collects metadata from the current environment for use in LogView. - fn get_metadata_for_panic() -> HashMap { + fn get_metadata_for_panic(options: &StructuredErrorOptions) -> HashMap { + #[cfg_attr(client_only, allow(unused_mut))] let mut map = metadata::collect(); + #[cfg(not(client_only))] if let Some(commands) = buck2_server::active_commands::try_active_commands() { let commands = commands.keys().map(|id| id.to_string()).collect::>(); map.insert("active_commands".to_owned(), commands.join(",")); } + if let Some(logview_key) = options + .low_cardinality_key_for_additional_logview_samples + .as_ref() + { + map.insert( + "low_cardinality_key_for_additional_logview_samples".to_owned(), + logview_key.to_string(), + ); + } map } - /// Writes a representation of the given `PanicInfo` to Scribe, via the `Panic` event. + /// Writes a representation of the given `PanicInfo` to Scribe, via the `StructuredError` event. pub(crate) fn write_panic_to_scribe(fb: FacebookInit, info: &PanicInfo) { let message = get_message_for_panic(info); let location = info.location().map(|loc| Location { @@ -148,7 +161,7 @@ mod imp { pub(crate) fn write_soft_error( fb: FacebookInit, category: &str, - err: &anyhow::Error, + err: &buck2_error::Error, location: Location, options: StructuredErrorOptions, ) { @@ -168,8 +181,12 @@ mod imp { dispatcher.instant_event(event.clone()); } None => { - if !buck2_server::active_commands::broadcast_instant_event(&event) && !options.quiet - { + #[cfg(client_only)] + let warn = true; + #[cfg(not(client_only))] + let warn = !buck2_server::active_commands::broadcast_instant_event(&event) + && !options.quiet; + if warn { tracing::warn!("Warning \"{}\": {:#}", category, err); } } @@ -185,7 +202,7 @@ mod imp { options: &StructuredErrorOptions, soft_error_category: Option<&str>, ) -> buck2_data::StructuredError { - let metadata = get_metadata_for_panic(); + let metadata = get_metadata_for_panic(options); buck2_data::StructuredError { location, payload: message, @@ -197,6 +214,7 @@ mod imp { daemon_in_memory_state_is_corrupted: options.daemon_in_memory_state_is_corrupted, daemon_materializer_state_is_corrupted: options.daemon_materializer_state_is_corrupted, action_cache_is_corrupted: options.action_cache_is_corrupted, + deprecation: options.deprecation, } } @@ -206,15 +224,15 @@ mod imp { use buck2_core::facebook_only; use buck2_data::InstantEvent; - use buck2_events::sink::scribe; + use buck2_events::sink::remote; use buck2_wrapper_common::invocation_id::TraceId; facebook_only(); - if !scribe::is_enabled() { + if !remote::is_enabled() { return; } - let sink = match new_thrift_scribe_sink_if_enabled( + let sink = match new_remote_event_sink_if_enabled( fb, /* buffer size */ 100, /* retry_backoff */ Duration::from_millis(500), @@ -238,23 +256,22 @@ mod imp { // on that thread. // // Note that if we fail to spawn a writer thread, then we just won't log. - let _err = thread::Builder::new() - .spawn(move || { - let runtime = Builder::new_current_thread().enable_all().build().unwrap(); - runtime.block_on( - sink.send_now(BuckEvent::new( - SystemTime::now(), - TraceId::new(), - None, - None, - InstantEvent { - data: Some(data.into()), - } - .into(), - )), - ); - }) - .map_err(|_| ()) - .and_then(|t| t.join().map_err(|_| ())); + let _err = thread_spawn("buck2-write-panic-to-scribe", move || { + let runtime = Builder::new_current_thread().enable_all().build().unwrap(); + runtime.block_on( + sink.send_now(BuckEvent::new( + SystemTime::now(), + TraceId::new(), + None, + None, + InstantEvent { + data: Some(data.into()), + } + .into(), + )), + ); + }) + .map_err(|_| ()) + .and_then(|t| t.join().map_err(|_| ())); } } diff --git a/app/buck2/src/process_context.rs b/app/buck2/src/process_context.rs index eb52c2ac9d900..9d882c1a14dda 100644 --- a/app/buck2/src/process_context.rs +++ b/app/buck2/src/process_context.rs @@ -17,7 +17,6 @@ use buck2_wrapper_common::invocation_id::TraceId; /// State passed down from `main` to this crate. pub struct ProcessContext<'a> { - pub init: fbinit::FacebookInit, pub log_reload_handle: &'a Arc, pub stdin: &'a mut Stdin, pub working_dir: &'a WorkingDir, diff --git a/app/buck2/transition.bzl b/app/buck2/transition.bzl new file mode 100644 index 0000000000000..4d56297c7585b --- /dev/null +++ b/app/buck2/transition.bzl @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _transition_impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: + val = refs.val[ConstraintValueInfo] + new_cfg = ConfigurationInfo( + constraints = platform.configuration.constraints | {val.setting.label: val}, + values = platform.configuration.values, + ) + return PlatformInfo( + label = platform.label, + configuration = new_cfg, + ) + +_transition_func = transition( + impl = _transition_impl, + refs = { + "val": "//buck2/app/buck2:buck2_client_only_build", + }, +) + +def _rule_impl(ctx: AnalysisContext) -> list[Provider]: + return ctx.attrs.actual.providers + +buck2_client_transition_alias = rule( + impl = _rule_impl, + attrs = { + "actual": attrs.dep(), + }, + cfg = _transition_func, +) diff --git a/app/buck2_action_impl/BUCK b/app/buck2_action_impl/BUCK index ae0605b50432f..a172776e5615a 100644 --- a/app/buck2_action_impl/BUCK +++ b/app/buck2_action_impl/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -21,7 +20,6 @@ rust_library( "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:parking_lot", "fbsource//third-party/rust:relative-path", - "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:sha1", "fbsource//third-party/rust:thiserror", @@ -30,13 +28,19 @@ rust_library( "//buck2/app/buck2_action_metadata_proto:buck2_action_metadata_proto", "//buck2/app/buck2_artifact:buck2_artifact", "//buck2/app/buck2_build_api:buck2_build_api", + "//buck2/app/buck2_build_signals:buck2_build_signals", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_directory:buck2_directory", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_file_watcher:buck2_file_watcher", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/app/buck2_http:buck2_http", "//buck2/app/buck2_interpreter:buck2_interpreter", + "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", diff --git a/app/buck2_action_impl/Cargo.toml b/app/buck2_action_impl/Cargo.toml index f367b3bb11196..cc395c7e2acb9 100644 --- a/app/buck2_action_impl/Cargo.toml +++ b/app/buck2_action_impl/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Action implementations and context functions" +edition = "2021" +license = { workspace = true } name = "buck2_action_impl" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Action implementations and context functions" [dependencies] anyhow = { workspace = true } @@ -19,9 +21,8 @@ indexmap = { workspace = true } itertools = { workspace = true } once_cell = { workspace = true } parking_lot = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } relative-path = { workspace = true } +serde_json = { workspace = true } sha1 = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } @@ -34,14 +35,21 @@ starlark = { workspace = true } starlark_map = { workspace = true } buck2_action_metadata_proto = { workspace = true } +buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } +buck2_build_signals = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } -buck2_execute = { workspace = true } +buck2_directory = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } +buck2_execute = { workspace = true } buck2_file_watcher = { workspace = true } +buck2_futures = { workspace = true } +buck2_http = { workspace = true } buck2_interpreter = { workspace = true } -buck2_artifact = { workspace = true } +buck2_node = { workspace = true } +buck2_util = { workspace = true } host_sharing = { workspace = true } remote_execution = { workspace = true } diff --git a/app/buck2_action_impl/src/actions.rs b/app/buck2_action_impl/src/actions.rs new file mode 100644 index 0000000000000..f82749ddda895 --- /dev/null +++ b/app/buck2_action_impl/src/actions.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod impls; diff --git a/app/buck2_action_impl/src/actions/impls.rs b/app/buck2_action_impl/src/actions/impls.rs new file mode 100644 index 0000000000000..cccd8e5e79ad4 --- /dev/null +++ b/app/buck2_action_impl/src/actions/impls.rs @@ -0,0 +1,18 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod cas_artifact; +pub(crate) mod copy; +pub(crate) mod download_file; +pub(crate) mod offline; +pub(crate) mod run; +pub(crate) mod symlinked_dir; +pub(crate) mod write; +pub(crate) mod write_json; +pub(crate) mod write_macros; diff --git a/app/buck2_action_impl/src/actions/impls/cas_artifact.rs b/app/buck2_action_impl/src/actions/impls/cas_artifact.rs index e4cdc59cc01a3..933dc334ef9f5 100644 --- a/app/buck2_action_impl/src/actions/impls/cas_artifact.rs +++ b/app/buck2_action_impl/src/actions/impls/cas_artifact.rs @@ -18,6 +18,7 @@ use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::actions::execute::action_executor::ActionExecutionKind; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; @@ -28,7 +29,7 @@ use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; use buck2_common::io::trace::TracingIoProvider; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest::CasDigestToReExt; @@ -43,14 +44,12 @@ use chrono::TimeZone; use chrono::Utc; use dupe::Dupe; use indexmap::IndexSet; -use once_cell::sync::Lazy; use remote_execution as RE; use starlark::values::OwnedFrozenValue; -use thiserror::Error; use crate::actions::impls::offline; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum CasArtifactActionDeclarationError { #[error("CAS artifact action should not have inputs, got {0}")] WrongNumberOfInputs(usize), @@ -58,7 +57,7 @@ enum CasArtifactActionDeclarationError { WrongNumberOfOutputs(usize), } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum CasArtifactActionExecutionError { #[error("Error accessing digest expiration for: `{0}`")] GetDigestExpirationError(FileDigest), @@ -108,6 +107,7 @@ impl UnregisteredAction for UnregisteredCasArtifactAction { inputs: IndexSet, outputs: IndexSet, _starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { Ok(Box::new(CasArtifactAction::new(inputs, outputs, *self)?)) } @@ -157,6 +157,7 @@ impl CasArtifactAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Deferred, timing: ActionExecutionTimingData::default(), + input_files_bytes: None, }, )) } @@ -172,18 +173,20 @@ impl Action for CasArtifactAction { Ok(Cow::Borrowed(&[])) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(slice::from_ref(&self.output))) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(slice::from_ref(&self.output)) + } + + fn first_output(&self) -> &BuildArtifact { + &self.output } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Incremental(self) } - fn category(&self) -> &Category { - static CAS_ARTIFACT_CATEGORY: Lazy = - Lazy::new(|| Category::try_from("cas_artifact").unwrap()); - &CAS_ARTIFACT_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new("cas_artifact") } fn identifier(&self) -> Option<&str> { @@ -196,11 +199,11 @@ impl IncrementalActionExecutable for CasArtifactAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { // If running in offline environment, try to restore from cached outputs // first. Fallthrough to normal operation if unsuccessful. if ctx.run_action_knobs().use_network_action_output_cache { - return self.execute_for_offline(ctx).await; + return self.execute_for_offline(ctx).await.map_err(Into::into); } let expiration = ctx @@ -216,12 +219,14 @@ impl IncrementalActionExecutable for CasArtifactAction { .1; if expiration < self.inner.expires_after { - return Err(CasArtifactActionExecutionError::InvalidExpiration { - digest: self.inner.digest.dupe(), - declared_expiration: self.inner.expires_after, - effective_expiration: expiration, - } - .into()); + return Err( + anyhow::Error::new(CasArtifactActionExecutionError::InvalidExpiration { + digest: self.inner.digest.dupe(), + declared_expiration: self.inner.expires_after, + effective_expiration: expiration, + }) + .into(), + ); } let value = match self.inner.kind { @@ -230,6 +235,7 @@ impl IncrementalActionExecutable for CasArtifactAction { DirectoryKind::Tree => ctx .re_client() .download_typed_blobs::( + None, vec![self.inner.digest.to_re()], self.inner.re_use_case, ) @@ -243,6 +249,7 @@ impl IncrementalActionExecutable for CasArtifactAction { let re_client = ctx.re_client(); let root_directory = re_client .download_typed_blobs::( + None, vec![self.inner.digest.to_re()], self.inner.re_use_case, ) @@ -301,8 +308,7 @@ impl IncrementalActionExecutable for CasArtifactAction { .await?; let io_provider = ctx.io_provider(); - let maybe_io_tracer = io_provider.as_any().downcast_ref::(); - if let Some(tracer) = maybe_io_tracer { + if let Some(tracer) = TracingIoProvider::from_io(&*io_provider) { let offline_cache_path = offline::declare_copy_to_offline_output_cache(ctx, &self.output, value.dupe()) .await?; @@ -314,6 +320,7 @@ impl IncrementalActionExecutable for CasArtifactAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Deferred, timing: ActionExecutionTimingData::default(), + input_files_bytes: None, }, )) } diff --git a/app/buck2_action_impl/src/actions/impls/copy.rs b/app/buck2_action_impl/src/actions/impls/copy.rs index 50f9acefdf02a..14d5765c51e63 100644 --- a/app/buck2_action_impl/src/actions/impls/copy.rs +++ b/app/buck2_action_impl/src/actions/impls/copy.rs @@ -17,13 +17,14 @@ use buck2_build_api::actions::box_slice_set::BoxSliceSet; use buck2_build_api::actions::execute::action_executor::ActionExecutionKind; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; use buck2_build_api::actions::IncrementalActionExecutable; use buck2_build_api::actions::UnregisteredAction; use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_execute::artifact_utils::ArtifactValueBuilder; use buck2_execute::execute::command_executor::ActionExecutionTimingData; @@ -31,11 +32,9 @@ use buck2_execute::materialize::materializer::CopiedArtifact; use dupe::Dupe; use gazebo::prelude::*; use indexmap::IndexSet; -use once_cell::sync::Lazy; use starlark::values::OwnedFrozenValue; -use thiserror::Error; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum CopyActionValidationError { #[error("Exactly one input file must be specified for a copy action, got {0}")] WrongNumberOfInputs(usize), @@ -68,6 +67,7 @@ impl UnregisteredAction for UnregisteredCopyAction { inputs: IndexSet, outputs: IndexSet, _starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { Ok(Box::new(CopyAction::new(self.copy, inputs, outputs)?)) } @@ -133,18 +133,20 @@ impl Action for CopyAction { Ok(Cow::Borrowed(self.inputs.as_slice())) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(self.outputs.as_slice())) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(self.outputs.as_slice()) + } + + fn first_output(&self) -> &BuildArtifact { + self.output() } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Incremental(self) } - fn category(&self) -> &Category { - static COPY_CATEGORY: Lazy = Lazy::new(|| Category::try_from("copy").unwrap()); - - ©_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new("copy") } fn identifier(&self) -> Option<&str> { @@ -157,7 +159,7 @@ impl IncrementalActionExecutable for CopyAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { let (input, src_value) = ctx .artifact_values(self.input()) .iter() @@ -187,6 +189,9 @@ impl IncrementalActionExecutable for CopyAction { .declare_copy( dest.clone(), value.dupe(), + // FIXME(JakobDegen): This is wrong in cases where the input artifact is a source + // directory with ignored paths, as the materializer will incorrectly assume that + // the source directory matches the artifact value when it doesn't. vec![CopiedArtifact::new( src, dest, @@ -201,6 +206,7 @@ impl IncrementalActionExecutable for CopyAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Simple, timing: ActionExecutionTimingData::default(), + input_files_bytes: None, }, )) } diff --git a/app/buck2_action_impl/src/actions/impls/download_file.rs b/app/buck2_action_impl/src/actions/impls/download_file.rs index 0d87fd52f9b63..06ad43390b78f 100644 --- a/app/buck2_action_impl/src/actions/impls/download_file.rs +++ b/app/buck2_action_impl/src/actions/impls/download_file.rs @@ -17,6 +17,7 @@ use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::actions::execute::action_executor::ActionExecutionKind; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; @@ -27,10 +28,9 @@ use buck2_common::cas_digest::RawDigest; use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; -use buck2_common::http::HttpClient; -use buck2_common::http::HttpError; use buck2_common::io::trace::TracingIoProvider; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; +use buck2_error::ErrorTag; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest_config::DigestConfig; use buck2_execute::execute::command_executor::ActionExecutionTimingData; @@ -38,22 +38,19 @@ use buck2_execute::materialize::http::http_download; use buck2_execute::materialize::http::http_head; use buck2_execute::materialize::http::Checksum; use buck2_execute::materialize::materializer::HttpDownloadInfo; +use buck2_http::HttpClient; use dupe::Dupe; use indexmap::IndexSet; -use once_cell::sync::Lazy; use starlark::values::OwnedFrozenValue; -use thiserror::Error; use crate::actions::impls::offline; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum DownloadFileActionError { #[error("download file action should not have inputs, got {0}")] WrongNumberOfInputs(usize), #[error("Exactly one output file must be specified for a download file action, got {0}")] WrongNumberOfOutputs(usize), - #[error(transparent)] - Http(#[from] HttpError), } #[derive(Debug, Allocative)] @@ -89,6 +86,7 @@ impl UnregisteredAction for UnregisteredDownloadFileAction { inputs: IndexSet, outputs: IndexSet, _starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { Ok(Box::new(DownloadFileAction::new(inputs, outputs, *self)?)) } @@ -169,7 +167,9 @@ impl DownloadFileAction { }; let url = self.url(client); - let head = http_head(client, url).await?; + let head = http_head(client, url) + .await + .map_err(|e| buck2_error::Error::from(e).tag([ErrorTag::DownloadFileHeadRequest]))?; let content_length = head .headers() @@ -219,6 +219,7 @@ impl DownloadFileAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Simple, timing: ActionExecutionTimingData::default(), + input_files_bytes: None, }, )) } @@ -234,19 +235,20 @@ impl Action for DownloadFileAction { Ok(Cow::Borrowed(&self.inputs)) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(&self.outputs)) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(&self.outputs) + } + + fn first_output(&self) -> &BuildArtifact { + self.output() } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Incremental(self) } - fn category(&self) -> &Category { - static DOWNLOAD_FILE_CATEGORY: Lazy = - Lazy::new(|| Category::try_from("download_file").unwrap()); - - &DOWNLOAD_FILE_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new("download_file") } fn identifier(&self) -> Option<&str> { @@ -262,14 +264,14 @@ impl IncrementalActionExecutable for DownloadFileAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { // Early return - if this path exists, it's because we're running in a // special offline mode where the HEAD request below will likely fail. // Shortcut and just return this path as the action output. // // This mostly looks like a "copy" action. if ctx.run_action_knobs().use_network_action_output_cache { - return self.execute_for_offline(ctx).await; + return self.execute_for_offline(ctx).await.map_err(Into::into); } let client = ctx.http_client(); @@ -332,8 +334,7 @@ impl IncrementalActionExecutable for DownloadFileAction { // If we're tracing I/O, get the materializer to copy to the offline cache // so we can include it in the offline archive manifest later. let io_provider = ctx.io_provider(); - let maybe_io_tracer = io_provider.as_any().downcast_ref::(); - if let Some(tracer) = maybe_io_tracer { + if let Some(tracer) = TracingIoProvider::from_io(&*io_provider) { let offline_cache_path = offline::declare_copy_to_offline_output_cache(ctx, self.output(), value.dupe()) .await?; @@ -345,6 +346,7 @@ impl IncrementalActionExecutable for DownloadFileAction { ActionExecutionMetadata { execution_kind, timing: ActionExecutionTimingData::default(), + input_files_bytes: None, }, )) } diff --git a/app/buck2_action_impl/src/actions/impls/mod.rs b/app/buck2_action_impl/src/actions/impls/mod.rs deleted file mode 100644 index 340eb4e4c609d..0000000000000 --- a/app/buck2_action_impl/src/actions/impls/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod cas_artifact; -pub(crate) mod copy; -pub(crate) mod download_file; -pub(crate) mod offline; -pub mod run; -pub(crate) mod symlinked_dir; -pub(crate) mod write; -pub(crate) mod write_json; -pub(crate) mod write_macros; diff --git a/app/buck2_action_impl/src/actions/impls/offline.rs b/app/buck2_action_impl/src/actions/impls/offline.rs index 2bb216a6565a8..13a86ae4a52f8 100644 --- a/app/buck2_action_impl/src/actions/impls/offline.rs +++ b/app/buck2_action_impl/src/actions/impls/offline.rs @@ -47,15 +47,13 @@ pub(crate) async fn declare_copy_from_offline_cache( .fs() .resolve_offline_output_cache_path(output.get_path()); - let (value, _hashing_time) = ctx - .blocking_executor() - .execute_io_inline(|| { - build_entry_from_disk( - ctx.fs().fs().resolve(&offline_cache_path), - FileDigestConfig::build(ctx.digest_config().cas_digest_config()), - ) - }) - .await?; + let (value, _hashing_time) = build_entry_from_disk( + ctx.fs().fs().resolve(&offline_cache_path), + FileDigestConfig::build(ctx.digest_config().cas_digest_config()), + ctx.blocking_executor(), + ctx.fs().fs().root(), + ) + .await?; let entry = value .ok_or_else(|| anyhow::anyhow!("Missing offline cache entry: `{}`", offline_cache_path))? diff --git a/app/buck2_action_impl/src/actions/impls/run.rs b/app/buck2_action_impl/src/actions/impls/run.rs new file mode 100644 index 0000000000000..2ac04d2028c51 --- /dev/null +++ b/app/buck2_action_impl/src/actions/impls/run.rs @@ -0,0 +1,849 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; +use std::ops::ControlFlow; + +use allocative::Allocative; +use anyhow::Context; +use async_trait::async_trait; +use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_build_api::actions::box_slice_set::BoxSliceSet; +use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; +use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; +use buck2_build_api::actions::impls::expanded_command_line::ExpandedCommandLine; +use buck2_build_api::actions::Action; +use buck2_build_api::actions::ActionExecutable; +use buck2_build_api::actions::ActionExecutionCtx; +use buck2_build_api::actions::IncrementalActionExecutable; +use buck2_build_api::actions::UnregisteredAction; +use buck2_build_api::artifact_groups::ArtifactGroup; +use buck2_build_api::artifact_groups::ArtifactGroupValues; +use buck2_build_api::interpreter::rule_defs::cmd_args::space_separated::SpaceSeparatedCommandLineBuilder; +use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineContext; +use buck2_build_api::interpreter::rule_defs::cmd_args::DefaultCommandLineContext; +use buck2_build_api::interpreter::rule_defs::cmd_args::FrozenStarlarkCmdArgs; +use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; +use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; +use buck2_build_api::interpreter::rule_defs::provider::builtin::worker_info::FrozenWorkerInfo; +use buck2_build_api::interpreter::rule_defs::provider::builtin::worker_info::WorkerInfo; +use buck2_core::category::CategoryRef; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; +use buck2_core::fs::buck_out_path::BuckOutPath; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::span_async_simple; +use buck2_execute::artifact::fs::ExecutorFs; +use buck2_execute::execute::action_digest::ActionDigest; +use buck2_execute::execute::cache_uploader::force_cache_upload; +use buck2_execute::execute::environment_inheritance::EnvironmentInheritance; +use buck2_execute::execute::manager::CommandExecutionManager; +use buck2_execute::execute::prepared::PreparedAction; +use buck2_execute::execute::request::ActionMetadataBlob; +use buck2_execute::execute::request::CommandExecutionInput; +use buck2_execute::execute::request::CommandExecutionOutput; +use buck2_execute::execute::request::CommandExecutionPaths; +use buck2_execute::execute::request::CommandExecutionRequest; +use buck2_execute::execute::request::ExecutorPreference; +use buck2_execute::execute::request::WorkerId; +use buck2_execute::execute::request::WorkerSpec; +use buck2_execute::execute::result::CommandExecutionResult; +use derive_more::Display; +use dupe::Dupe; +use gazebo::prelude::*; +use host_sharing::HostSharingRequirements; +use host_sharing::WeightClass; +use indexmap::indexmap; +use indexmap::IndexSet; +use itertools::Itertools; +use serde_json::json; +use sorted_vector_map::SortedVectorMap; +use starlark::values::dict::DictRef; +use starlark::values::dict::DictType; +use starlark::values::starlark_value; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::FrozenStringValue; +use starlark::values::FrozenValueOfUnchecked; +use starlark::values::FrozenValueTyped; +use starlark::values::NoSerialize; +use starlark::values::OwnedFrozenValue; +use starlark::values::OwnedFrozenValueTyped; +use starlark::values::ProvidesStaticType; +use starlark::values::StarlarkValue; +use starlark::values::StringValue; +use starlark::values::Trace; +use starlark::values::UnpackValue; +use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueTyped; +use starlark::values::ValueTypedComplex; + +use self::dep_files::DepFileBundle; +use crate::actions::impls::run::dep_files::make_dep_file_bundle; +use crate::actions::impls::run::dep_files::populate_dep_files; +use crate::actions::impls::run::dep_files::DepFilesCommandLineVisitor; +use crate::actions::impls::run::dep_files::RunActionDepFiles; +use crate::actions::impls::run::metadata::metadata_content; +use crate::context::run::RunActionError; + +pub(crate) mod audit_dep_files; +pub(crate) mod dep_files; +mod metadata; + +#[derive(Debug, Allocative)] +pub(crate) struct MetadataParameter { + /// Name of the environment variable which is set to contain + /// resolved path of the metadata file when requested by user. + pub(crate) env_var: String, + /// User-defined path in the output directory of the metadata file. + pub(crate) path: ForwardRelativePathBuf, +} + +impl Display for MetadataParameter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let json = json!({ + "env_var": self.env_var, + "path": self.path, + }); + write!(f, "{}", json) + } +} + +#[derive(Debug, buck2_error::Error)] +enum LocalPreferenceError { + #[error("cannot have `local_only = True` and `prefer_local = True` at the same time")] + LocalOnlyAndPreferLocal, + #[error("cannot have `local_only = True` and `prefer_remote = True` at the same time")] + LocalOnlyAndPreferRemote, + #[error( + "cannot have `local_only = True`, `prefer_local = True` and `prefer_remote = True` at the same time" + )] + LocalOnlyAndPreferLocalAndPreferRemote, + #[error("cannot have `prefer_local = True` and `prefer_remote = True` at the same time")] + PreferLocalAndPreferRemote, +} + +pub(crate) fn new_executor_preference( + local_only: bool, + prefer_local: bool, + prefer_remote: bool, +) -> anyhow::Result { + match (local_only, prefer_local, prefer_remote) { + (true, false, false) => Ok(ExecutorPreference::LocalRequired), + (true, false, true) => Err(anyhow::anyhow!( + LocalPreferenceError::LocalOnlyAndPreferRemote + )), + (false, true, false) => Ok(ExecutorPreference::LocalPreferred), + (false, true, true) => Err(anyhow::anyhow!( + LocalPreferenceError::PreferLocalAndPreferRemote + )), + (false, false, false) => Ok(ExecutorPreference::Default), + (false, false, true) => Ok(ExecutorPreference::RemotePreferred), + (true, true, false) => Err(anyhow::anyhow!( + LocalPreferenceError::LocalOnlyAndPreferLocal + )), + (true, true, true) => Err(anyhow::anyhow!( + LocalPreferenceError::LocalOnlyAndPreferLocalAndPreferRemote + )), + } +} + +#[derive(Debug, Allocative)] +pub(crate) struct UnregisteredRunAction { + pub(crate) executor_preference: ExecutorPreference, + pub(crate) always_print_stderr: bool, + pub(crate) weight: WeightClass, + pub(crate) low_pass_filter: bool, + pub(crate) dep_files: RunActionDepFiles, + pub(crate) metadata_param: Option, + pub(crate) no_outputs_cleanup: bool, + pub(crate) allow_cache_upload: bool, + pub(crate) allow_dep_file_cache_upload: bool, + pub(crate) force_full_hybrid_if_capable: bool, + pub(crate) unique_input_inodes: bool, + pub(crate) remote_execution_dependencies: Vec, +} + +impl UnregisteredAction for UnregisteredRunAction { + fn register( + self: Box, + _: IndexSet, + outputs: IndexSet, + starlark_data: Option, + error_handler: Option, + ) -> anyhow::Result> { + let starlark_values = starlark_data.internal_error_anyhow("module data to be present")?; + let run_action = RunAction::new(*self, starlark_values, outputs, error_handler)?; + Ok(Box::new(run_action)) + } +} + +#[derive(Debug, Display, Trace, ProvidesStaticType, NoSerialize, Allocative)] +#[display("run_action_values")] +pub(crate) struct StarlarkRunActionValues<'v> { + pub(crate) exe: ValueTyped<'v, StarlarkCmdArgs<'v>>, + pub(crate) args: ValueTyped<'v, StarlarkCmdArgs<'v>>, + pub(crate) env: Option>>>, + pub(crate) worker: Option>>, + pub(crate) category: StringValue<'v>, + pub(crate) identifier: Option>, +} + +#[derive(Debug, Display, Trace, ProvidesStaticType, NoSerialize, Allocative)] +#[display("run_action_values")] +pub(crate) struct FrozenStarlarkRunActionValues { + pub(crate) exe: FrozenValueTyped<'static, FrozenStarlarkCmdArgs>, + pub(crate) args: FrozenValueTyped<'static, FrozenStarlarkCmdArgs>, + pub(crate) env: + Option>>>, + pub(crate) worker: Option>, + pub(crate) category: FrozenStringValue, + pub(crate) identifier: Option, +} + +#[starlark_value(type = "run_action_values")] +impl<'v> StarlarkValue<'v> for StarlarkRunActionValues<'v> {} + +#[starlark_value(type = "run_action_values")] +impl<'v> StarlarkValue<'v> for FrozenStarlarkRunActionValues { + type Canonical = StarlarkRunActionValues<'v>; +} + +impl<'v> Freeze for StarlarkRunActionValues<'v> { + type Frozen = FrozenStarlarkRunActionValues; + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + let StarlarkRunActionValues { + exe, + args, + env, + worker, + category, + identifier, + } = self; + Ok(FrozenStarlarkRunActionValues { + exe: FrozenValueTyped::new_err(exe.to_value().freeze(freezer)?)?, + args: FrozenValueTyped::new_err(args.to_value().freeze(freezer)?)?, + env: env.freeze(freezer)?, + worker: worker.freeze(freezer)?, + category: category.freeze(freezer)?, + identifier: identifier.freeze(freezer)?, + }) + } +} + +impl FrozenStarlarkRunActionValues { + pub(crate) fn worker<'v>(&'v self) -> anyhow::Result>>> { + let Some(worker) = self.worker else { + return Ok(None); + }; + ValueOf::unpack_value_err(worker.to_value()).map(Some) + } +} + +struct UnpackedWorkerValues<'v> { + exe: &'v dyn CommandLineArgLike, + id: WorkerId, + concurrency: Option, +} + +struct UnpackedRunActionValues<'v> { + exe: &'v dyn CommandLineArgLike, + args: &'v dyn CommandLineArgLike, + env: Vec<(&'v str, &'v dyn CommandLineArgLike)>, + worker: Option>, +} + +#[derive(Debug, Allocative)] +pub(crate) struct RunAction { + inner: UnregisteredRunAction, + starlark_values: OwnedFrozenValueTyped, + outputs: BoxSliceSet, + error_handler: Option, + input_files_bytes: u64, +} + +enum ExecuteResult { + LocalDepFileHit(ActionOutputs, ActionExecutionMetadata), + ExecutedOrReHit { + result: CommandExecutionResult, + dep_file_bundle: Option, + executor_preference: ExecutorPreference, + prepared_action: PreparedAction, + input_files_bytes: u64, + }, +} + +impl RunAction { + fn unpack( + values: &OwnedFrozenValueTyped, + ) -> anyhow::Result { + let exe: &dyn CommandLineArgLike = &*values.exe; + let args: &dyn CommandLineArgLike = &*values.args; + let env = match values.env { + None => Vec::new(), + Some(env) => { + let d = DictRef::from_value(env.to_value().get()).context("expecting dict")?; + let mut res = Vec::with_capacity(d.len()); + for (k, v) in d.iter() { + res.push(( + k.unpack_str().context("expecting string")?, + ValueAsCommandLineLike::unpack_value_err(v)?.0, + )); + } + res + } + }; + let worker: Option<&WorkerInfo> = values.worker()?.map(|v| v.typed); + + let worker = worker.map(|worker| UnpackedWorkerValues { + exe: worker.exe_command_line(), + id: WorkerId(worker.id), + concurrency: worker.concurrency(), + }); + + Ok(UnpackedRunActionValues { + exe, + args, + env, + worker, + }) + } + + /// Get the command line expansion for this RunAction. + fn expand_command_line_and_worker( + &self, + fs: &ExecutorFs, + artifact_visitor: &mut impl CommandLineArtifactVisitor, + ) -> anyhow::Result<(ExpandedCommandLine, Option)> { + let mut ctx = DefaultCommandLineContext::new(fs); + let values = Self::unpack(&self.starlark_values)?; + + let mut exe_rendered = Vec::::new(); + values + .exe + .add_to_command_line(&mut exe_rendered, &mut ctx)?; + values.exe.visit_artifacts(artifact_visitor)?; + + let worker = if let Some(worker) = values.worker { + let mut worker_rendered = Vec::::new(); + worker + .exe + .add_to_command_line(&mut worker_rendered, &mut ctx)?; + worker.exe.visit_artifacts(artifact_visitor)?; + Some(WorkerSpec { + exe: worker_rendered, + id: worker.id, + concurrency: worker.concurrency, + }) + } else { + None + }; + + let mut args_rendered = Vec::::new(); + values + .args + .add_to_command_line(&mut args_rendered, &mut ctx)?; + values.args.visit_artifacts(artifact_visitor)?; + + let cli_env: anyhow::Result> = values + .env + .into_iter() + .map(|(k, v)| { + let mut env = String::new(); + let mut ctx = DefaultCommandLineContext::new(fs); + v.add_to_command_line( + &mut SpaceSeparatedCommandLineBuilder::wrap_string(&mut env), + &mut ctx, + )?; + v.visit_artifacts(artifact_visitor)?; + Ok((k.to_owned(), env)) + }) + .collect(); + + Ok(( + ExpandedCommandLine { + exe: exe_rendered, + args: args_rendered, + env: cli_env?, + }, + worker, + )) + } + + pub(crate) fn new( + inner: UnregisteredRunAction, + starlark_values: OwnedFrozenValue, + outputs: IndexSet, + error_handler: Option, + ) -> anyhow::Result { + let starlark_values = starlark_values + .downcast_anyhow() + .internal_error_anyhow("Must be `run_action_values`")?; + + Self::unpack(&starlark_values)?; + + // This is checked when declared, but we depend on it so make it clear that it's enforced. + if outputs.is_empty() { + return Err(RunActionError::NoOutputsSpecified.into()); + } + + Ok(RunAction { + inner, + starlark_values, + outputs: BoxSliceSet::from(outputs), + error_handler, + input_files_bytes: 0, + }) + } + + fn prepare( + &self, + visitor: &mut impl RunActionVisitor, + ctx: &mut dyn ActionExecutionCtx, + ) -> anyhow::Result { + let executor_fs = ctx.executor_fs(); + let fs = executor_fs.fs(); + + let (expanded, worker) = + self.expand_command_line_and_worker(&ctx.executor_fs(), visitor)?; + + // TODO (@torozco): At this point, might as well just receive the list already. Finding + // those things in a HashMap is just not very useful. + let artifact_inputs: Vec<&ArtifactGroupValues> = visitor + .inputs() + .map(|group| ctx.artifact_values(group)) + .collect(); + + let mut inputs: Vec = + artifact_inputs[..].map(|&i| CommandExecutionInput::Artifact(Box::new(i.dupe()))); + + // Handle case when user requested file with action metadata to be generated. + // Generate content and output path for the file. It will be either passed + // to RE as a blob or written to disk in local executor. + // Path to this file is passed to user in environment variable which is selected by user. + let cli_ctx = DefaultCommandLineContext::new(&executor_fs); + + let mut extra_env = Vec::new(); + + if let Some(metadata_param) = &self.inner.metadata_param { + let path = BuckOutPath::new(ctx.target().owner().dupe(), metadata_param.path.clone()); + let env = cli_ctx + .resolve_project_path(fs.buck_out_path_resolver().resolve_gen(&path))? + .into_string(); + let (data, digest) = metadata_content(fs, &artifact_inputs, ctx.digest_config())?; + inputs.push(CommandExecutionInput::ActionMetadata(ActionMetadataBlob { + data, + digest, + path, + })); + extra_env.push((metadata_param.env_var.to_owned(), env)); + } + + let scratch = ctx.target().scratch_path(); + let scratch_path = fs.buck_out_path_resolver().resolve_scratch(&scratch); + extra_env.push(( + "BUCK_SCRATCH_PATH".to_owned(), + cli_ctx.resolve_project_path(scratch_path)?.into_string(), + )); + inputs.push(CommandExecutionInput::ScratchPath(scratch)); + + let paths = CommandExecutionPaths::new( + inputs, + self.outputs + .iter() + .map(|b| CommandExecutionOutput::BuildArtifact { + path: b.get_path().dupe(), + output_type: b.output_type(), + }) + .collect(), + ctx.fs(), + ctx.digest_config(), + )?; + + Ok(PreparedRunAction { + expanded, + extra_env, + paths, + worker, + }) + } + + pub(crate) async fn check_cache_result_is_useable( + &self, + ctx: &mut dyn ActionExecutionCtx, + request: &CommandExecutionRequest, + action_digest: &ActionDigest, + result: CommandExecutionResult, + dep_file_bundle: &Option, + ) -> anyhow::Result> { + // If it's served by the regular action cache no need to verify anything here. + if !result.was_served_by_remote_dep_file_cache() { + return Ok(ControlFlow::Break(result)); + } + + if let Some(bundle) = dep_file_bundle { + if let Some(found_dep_file_entry) = &result.dep_file_metadata { + let can_use = span_async_simple( + buck2_data::MatchDepFilesStart { + checking_filtered_inputs: true, + remote_cache: true, + }, + bundle.check_remote_dep_file_entry( + ctx.digest_config(), + ctx.fs(), + ctx.materializer(), + found_dep_file_entry, + ), + buck2_data::MatchDepFilesEnd {}, + ) + .await?; + + if can_use { + tracing::info!( + "Action result is cached via remote dep file cache, skipping execution of :\n```\n$ {}\n```\n for action `{}` with remote dep file key `{}`", + request.all_args_str(), + action_digest, + bundle.remote_dep_file_action.action, + ); + return Ok(ControlFlow::Break(result)); + } + } else { + // This should not happen as we check for the metadata on the cache querier side. + tracing::debug!( + "The remote dep file cache returned a hit for `{}`, but there is no metadata", + bundle.remote_dep_file_action.action + ); + } + } + // Continue through other options below + Ok(ControlFlow::Continue(ctx.command_execution_manager())) + } + + async fn execute_inner( + &self, + ctx: &mut dyn ActionExecutionCtx, + ) -> Result { + let knobs = ctx.run_action_knobs(); + let process_dep_files = !self.inner.dep_files.labels.is_empty() || knobs.hash_all_commands; + let (prepared_run_action, dep_file_visitor) = if !process_dep_files { + ( + self.prepare(&mut SimpleCommandLineArtifactVisitor::new(), ctx)?, + None, + ) + } else { + let mut visitor = DepFilesCommandLineVisitor::new(&self.inner.dep_files); + let prepared = self.prepare(&mut visitor, ctx)?; + (prepared, Some(visitor)) + }; + let cmdline_digest = prepared_run_action.expanded.fingerprint(); + let input_files_bytes = prepared_run_action.paths.input_files_bytes(); + // Run actions are assumed to be shared + let host_sharing_requirements = HostSharingRequirements::Shared(self.inner.weight); + + let req = prepared_run_action + .into_command_execution_request() + .with_prefetch_lossy_stderr(true) + .with_executor_preference(self.inner.executor_preference) + .with_host_sharing_requirements(host_sharing_requirements.into()) + .with_low_pass_filter(self.inner.low_pass_filter) + .with_outputs_cleanup(!self.inner.no_outputs_cleanup) + .with_local_environment_inheritance(EnvironmentInheritance::local_command_exclusions()) + .with_force_full_hybrid_if_capable(self.inner.force_full_hybrid_if_capable) + .with_unique_input_inodes(self.inner.unique_input_inodes) + .with_remote_execution_dependencies(self.inner.remote_execution_dependencies.clone()); + + let (dep_file_bundle, req) = if let Some(visitor) = dep_file_visitor { + let bundle = make_dep_file_bundle(ctx, visitor, cmdline_digest, req.paths())?; + // Enable remote dep file cache lookup for actions that have remote depfile uploads enabled. + let req = if self.inner.allow_dep_file_cache_upload { + req.with_remote_dep_file_key(&bundle.remote_dep_file_action.action.coerce()) + } else { + req + }; + (Some(bundle), req) + } else { + (None, req) + }; + + // First, check in the local dep file cache if an identical action can be found there. + // Do this before checking the action cache as we can avoid a potentially large download. + // Once the action cache lookup misses, we will do the full dep file cache look up. + let should_fully_check_dep_file_cache = if let Some(dep_file_bundle) = &dep_file_bundle { + let (outputs, should_fully_check_dep_file_cache) = dep_file_bundle + .check_local_dep_file_cache_for_identical_action(ctx, self.outputs.as_slice()) + .await?; + if let Some((outputs, metadata)) = outputs { + return Ok(ExecuteResult::LocalDepFileHit(outputs, metadata)); + } + should_fully_check_dep_file_cache + } else { + false + }; + + // Prepare the action, check the action cache, fully check the local dep file cache if needed, then execute the command + let prepared_action = ctx.prepare_action(&req)?; + let manager = ctx.command_execution_manager(); + + let action_cache_result = ctx.action_cache(manager, &req, &prepared_action).await; + + // If the result was served by the remote dep file cache, we can't use the result just yet. We need to verify that + // the inputs tracked by a depfile that was actually used in the cache hit are indentical to the inputs we have for this action. + let result = if let ControlFlow::Break(res) = action_cache_result { + self.check_cache_result_is_useable( + ctx, + &req, + &prepared_action.action_and_blobs.action, + res, + &dep_file_bundle, + ) + .await? + } else { + action_cache_result + }; + + // If the cache queries did not yield to a result, fallback to local dep file query (continuation), then execution. + let mut result = match result { + ControlFlow::Break(res) => res, + ControlFlow::Continue(manager) => { + if let Some(dep_file_bundle) = &dep_file_bundle { + if should_fully_check_dep_file_cache { + let lookup = dep_file_bundle + .check_local_dep_file_cache(ctx, self.outputs.as_slice()) + .await?; + if let Some((outputs, metadata)) = lookup { + return Ok(ExecuteResult::LocalDepFileHit(outputs, metadata)); + } + } + }; + + ctx.exec_cmd(manager, &req, &prepared_action).await + } + }; + + // If the action has a dep file, log the remote dep file key so we can look out for collisions + if let Some(bundle) = &dep_file_bundle { + result.dep_file_key = Some(bundle.remote_dep_file_action.action.coerce()) + } + + Ok(ExecuteResult::ExecutedOrReHit { + result, + dep_file_bundle, + // Dropping rest of req to avoid holding paths longer than necessary. + executor_preference: req.executor_preference, + prepared_action, + input_files_bytes, + }) + } +} + +pub(crate) struct PreparedRunAction { + expanded: ExpandedCommandLine, + extra_env: Vec<(String, String)>, + paths: CommandExecutionPaths, + worker: Option, +} + +impl PreparedRunAction { + fn into_command_execution_request(self) -> CommandExecutionRequest { + let Self { + expanded: ExpandedCommandLine { exe, args, mut env }, + extra_env, + paths, + worker, + } = self; + + for (k, v) in extra_env { + env.insert(k, v); + } + + CommandExecutionRequest::new(exe, args, paths, env).with_worker(worker) + } +} + +trait RunActionVisitor: CommandLineArtifactVisitor { + type Iter<'a>: Iterator + where + Self: 'a; + + fn inputs<'a>(&'a self) -> Self::Iter<'a>; +} + +impl RunActionVisitor for SimpleCommandLineArtifactVisitor { + type Iter<'a> = impl Iterator where Self: 'a; + + fn inputs<'a>(&'a self) -> Self::Iter<'a> { + self.inputs.iter() + } +} + +impl RunActionVisitor for DepFilesCommandLineVisitor<'_> { + type Iter<'a> = impl Iterator where Self: 'a; + + fn inputs<'a>(&'a self) -> Self::Iter<'a> { + self.inputs.iter().flat_map(|g| g.iter()) + } +} + +#[async_trait] +impl Action for RunAction { + fn kind(&self) -> buck2_data::ActionKind { + buck2_data::ActionKind::Run + } + + fn inputs(&self) -> anyhow::Result> { + let values = Self::unpack(&self.starlark_values)?; + let mut artifact_visitor = SimpleCommandLineArtifactVisitor::new(); + values.args.visit_artifacts(&mut artifact_visitor)?; + values.exe.visit_artifacts(&mut artifact_visitor)?; + if let Some(worker) = values.worker { + worker.exe.visit_artifacts(&mut artifact_visitor)?; + } + for (_, v) in values.env.iter() { + v.visit_artifacts(&mut artifact_visitor)?; + } + Ok(Cow::Owned(artifact_visitor.inputs.into_iter().collect())) + } + + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(self.outputs.as_slice()) + } + + fn first_output(&self) -> &BuildArtifact { + // Required to have outputs on construction + &self.outputs.as_slice()[0] + } + + fn as_executable(&self) -> ActionExecutable<'_> { + ActionExecutable::Incremental(self) + } + + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new(self.starlark_values.category.as_str()) + } + + fn identifier(&self) -> Option<&str> { + self.starlark_values.identifier.map(|x| x.as_str()) + } + + fn always_print_stderr(&self) -> bool { + self.inner.always_print_stderr + } + + fn aquery_attributes(&self, fs: &ExecutorFs) -> indexmap::IndexMap { + let mut cli_rendered = Vec::::new(); + let mut ctx = DefaultCommandLineContext::new(fs); + let values = Self::unpack(&self.starlark_values).unwrap(); + values + .exe + .add_to_command_line(&mut cli_rendered, &mut ctx) + .unwrap(); + values + .args + .add_to_command_line(&mut cli_rendered, &mut ctx) + .unwrap(); + let cmd = format!("[{}]", cli_rendered.iter().join(", ")); + indexmap! { + "cmd".to_owned() => cmd, + "executor_preference".to_owned() => self.inner.executor_preference.to_string(), + "always_print_stderr".to_owned() => self.inner.always_print_stderr.to_string(), + "weight".to_owned() => self.inner.weight.to_string(), + "dep_files".to_owned() => self.inner.dep_files.to_string(), + "metadata_param".to_owned() => match &self.inner.metadata_param { + None => "None".to_owned(), + Some(x) => x.to_string(), + }, + "no_outputs_cleanup".to_owned() => self.inner.no_outputs_cleanup.to_string(), + "allow_cache_upload".to_owned() => self.inner.allow_cache_upload.to_string(), + "allow_dep_file_cache_upload".to_owned() => self.inner.allow_dep_file_cache_upload.to_string(), + } + } + + fn error_handler(&self) -> Option { + self.error_handler.clone() + } +} + +#[async_trait] +impl IncrementalActionExecutable for RunAction { + async fn execute( + &self, + ctx: &mut dyn ActionExecutionCtx, + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { + let ( + mut result, + mut dep_file_bundle, + executor_preference, + prepared_action, + input_files_bytes, + ) = match self.execute_inner(ctx).await? { + ExecuteResult::LocalDepFileHit(outputs, metadata) => { + return Ok((outputs, metadata)); + } + ExecuteResult::ExecutedOrReHit { + result, + dep_file_bundle, + executor_preference, + prepared_action, + input_files_bytes, + } => ( + result, + dep_file_bundle, + executor_preference, + prepared_action, + input_files_bytes, + ), + }; + + // If there is a dep file entry AND if dep file cache upload is enabled, upload it + let upload_dep_file = self.inner.allow_dep_file_cache_upload && dep_file_bundle.is_some(); + if result.was_success() + && !result.was_served_by_remote_dep_file_cache() + && (self.inner.allow_cache_upload || upload_dep_file || force_cache_upload()?) + { + let re_result = result.action_result.take(); + let upload_result = ctx + .cache_upload( + &prepared_action.action_and_blobs, + &result, + re_result, + // match needed for coercion, https://github.com/rust-lang/rust/issues/108999 + match dep_file_bundle.as_mut() { + Some(dep_file_bundle) if self.inner.allow_dep_file_cache_upload => { + Some(dep_file_bundle) + } + _ => None, + }, + ) + .await?; + + result.did_cache_upload = upload_result.did_cache_upload; + result.did_dep_file_cache_upload = upload_result.did_dep_file_cache_upload; + } + + let was_locally_executed = result.was_locally_executed(); + let (outputs, metadata) = ctx.unpack_command_execution_result( + executor_preference, + result, + self.inner.allow_cache_upload, + self.inner.allow_dep_file_cache_upload, + Some(input_files_bytes), + )?; + + if let Some(dep_file_bundle) = dep_file_bundle { + populate_dep_files(ctx, dep_file_bundle, &outputs, was_locally_executed).await?; + } + + Ok((outputs, metadata)) + } +} diff --git a/app/buck2_action_impl/src/actions/impls/run/audit_dep_files.rs b/app/buck2_action_impl/src/actions/impls/run/audit_dep_files.rs index 0a96f6a202ccd..bc96d00f9fb88 100644 --- a/app/buck2_action_impl/src/actions/impls/run/audit_dep_files.rs +++ b/app/buck2_action_impl/src/actions/impls/run/audit_dep_files.rs @@ -15,9 +15,9 @@ use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::audit_dep_files::AUDIT_DEP_FILES; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::category::Category; -use buck2_core::directory::Directory; -use buck2_core::directory::DirectoryIterator; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; use buck2_execute::digest_config::HasDigestConfig; use buck2_execute::materialize::materializer::HasMaterializer; use dice::DiceTransaction; @@ -42,12 +42,13 @@ async fn audit_dep_files( ) -> anyhow::Result<()> { let key = DepFilesKey::new(BaseDeferredKey::TargetLabel(label), category, identifier); - let state = get_dep_files(&key).context("Failed to find dep files")?; + let state = get_dep_files(&key) + .with_context(|| format!("Failed to find dep files for key `{}`", key))?; let dep_files = read_dep_files( state.has_signatures(), state.declared_dep_files(), - &ctx.get_artifact_fs().await?, + &ctx.clone().get_artifact_fs().await?, ctx.per_transaction_data().get_materializer().as_ref(), ) .await @@ -63,26 +64,19 @@ async fn audit_dep_files( let dirs = match &*fingerprints { StoredFingerprints::Digests(..) => { // This is bit awkward but this only for testing right now so that's OK - return Err(anyhow::anyhow!("Fingerprints were stored as digests!")); + return Err(anyhow::anyhow!( + "Fingerprints were stored as digests! You probably need to use BUCK2_KEEP_DEP_FILE_DIRECTORIES=true" + )); } StoredFingerprints::Dirs(dirs) => dirs, }; - for (path, ..) in dirs - .untagged - .ordered_walk() - .with_paths() - .filter_map(|(p, e)| Some((p, e.into_leaf()?))) - { + for path in dirs.untagged.ordered_walk_leaves().paths() { writeln!(stdout, "untagged\t{}", path)?; } for (tag, dir) in dirs.tagged.iter() { - for (path, ..) in dir - .ordered_walk() - .with_paths() - .filter_map(|(p, e)| Some((p, e.into_leaf()?))) - { + for path in dir.ordered_walk_leaves().paths() { writeln!(stdout, "{}\t{}", tag, path)?; } } diff --git a/app/buck2_action_impl/src/actions/impls/run/dep_files.rs b/app/buck2_action_impl/src/actions/impls/run/dep_files.rs index e00e506c9a28e..06c220ea7fbd9 100644 --- a/app/buck2_action_impl/src/actions/impls/run/dep_files.rs +++ b/app/buck2_action_impl/src/actions/impls/run/dep_files.rs @@ -10,11 +10,11 @@ use std::borrow::Cow; use std::collections::HashMap; use std::collections::HashSet; -use std::fmt::Display; use std::sync::Arc; use allocative::Allocative; use anyhow::Context as _; +use async_trait::async_trait; use buck2_action_metadata_proto::DepFileInputs; use buck2_action_metadata_proto::RemoteDepFile; use buck2_artifact::artifact::artifact_type::Artifact; @@ -29,34 +29,38 @@ use buck2_build_api::actions::ActionExecutionCtx; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::interpreter::rule_defs::artifact_tagging::ArtifactTag; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; +use buck2_common::cas_digest::CasDigestData; use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::TrackedFileDigest; use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::buck2_env_anyhow; use buck2_core::category::Category; -use buck2_core::directory::DirectorySelector; -use buck2_core::directory::FingerprintedDirectory; -use buck2_core::env_helper::EnvHelper; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::fs_util; -use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathNormalizer; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::soft_error; -use buck2_events::dispatch::span_async; +use buck2_directory::directory::directory_selector::DirectorySelector; +use buck2_directory::directory::fingerprinted_directory::FingerprintedDirectory; +use buck2_events::dispatch::span_async_simple; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; +use buck2_execute::digest::CasDigestToReExt; use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::expand_selector_for_dependencies; use buck2_execute::directory::ActionDirectoryBuilder; use buck2_execute::directory::ActionImmutableDirectory; use buck2_execute::directory::ActionSharedDirectory; use buck2_execute::directory::INTERNER; -use buck2_execute::execute::cache_uploader::DepFileEntry; +use buck2_execute::execute::action_digest_and_blobs::ActionDigestAndBlobs; +use buck2_execute::execute::action_digest_and_blobs::ActionDigestAndBlobsBuilder; +use buck2_execute::execute::cache_uploader::IntoRemoteDepFile; use buck2_execute::execute::dep_file_digest::DepFileDigest; use buck2_execute::execute::request::CommandExecutionPaths; use buck2_execute::execute::request::OutputType; use buck2_execute::materialize::materializer::MaterializationError; use buck2_execute::materialize::materializer::Materializer; use buck2_file_watcher::dep_files::FLUSH_DEP_FILES; -use buck2_file_watcher::mergebase::Mergebase; +use buck2_file_watcher::dep_files::FLUSH_NON_LOCAL_DEP_FILES; use dashmap::DashMap; use derive_more::Display; use dupe::Dupe; @@ -66,25 +70,43 @@ use parking_lot::MappedMutexGuard; use parking_lot::Mutex; use parking_lot::MutexGuard; use starlark_map::ordered_map::OrderedMap; -use thiserror::Error; use tracing::instrument; #[allocative::root] static DEP_FILES: Lazy>> = Lazy::new(DashMap::new); -/// When this is set, we retain directories after fingerprintig, so that we can output them later +/// When this is set, we retain directories after fingerprinting, so that we can output them later /// for debugging via `buck2 audit dep-files`. -static KEEP_DIRECTORIES: EnvHelper = EnvHelper::new("BUCK2_KEEP_DEP_FILE_DIRECTORIES"); +fn keep_directories() -> anyhow::Result { + buck2_env_anyhow!("BUCK2_KEEP_DEP_FILE_DIRECTORIES", bool) +} /// Forget about all dep files. This isn't really meant to be commonly used, but if an invalid dep /// file was produced and the user wants unblocking, this will provide it. fn flush_dep_files() { - tracing::info!("Flushing {} dep files", DEP_FILES.len()); + tracing::info!("Flushing all {} dep files", DEP_FILES.len()); DEP_FILES.clear(); } +/// Flush all dep files that were not produced locally. +/// In general we may want to retain dep files that were produced locally for longer, since (a) they are +/// already on disk and we don't need to download them, and (b) since they were produced locally they are +/// not cached elsewhere so there is more value in retaining them. +fn flush_non_local_dep_files() { + tracing::info!( + "Flushing non-local dep files, current size is: {}", + DEP_FILES.len() + ); + DEP_FILES.retain(|_, dep_file_state| dep_file_state.was_produced_locally); + tracing::info!( + "Number of remaining local dep files is: {}", + DEP_FILES.len() + ); +} + pub(crate) fn init_flush_dep_files() { FLUSH_DEP_FILES.init(flush_dep_files); + FLUSH_NON_LOCAL_DEP_FILES.init(flush_non_local_dep_files); } pub(crate) fn get_dep_files(key: &DepFilesKey) -> Option> { @@ -94,19 +116,23 @@ pub(crate) fn get_dep_files(key: &DepFilesKey) -> Option> { /// A key used to associate a RunAction with a possible previous dep file. #[derive(Eq, PartialEq, Hash, Display, Allocative)] #[display( - fmt = "{} {} {}", + "{} {} {}", owner, category, - "identifier.as_deref().unwrap_or(\"\")" + identifier.as_deref().unwrap_or("") )] -pub struct DepFilesKey { +pub(crate) struct DepFilesKey { owner: BaseDeferredKey, category: Category, identifier: Option, } impl DepFilesKey { - pub fn new(owner: BaseDeferredKey, category: Category, identifier: Option) -> Self { + pub(crate) fn new( + owner: BaseDeferredKey, + category: Category, + identifier: Option, + ) -> Self { Self { owner, category, @@ -114,10 +140,10 @@ impl DepFilesKey { } } - pub fn from_action_execution_target(target: ActionExecutionTarget<'_>) -> Self { + pub(crate) fn from_action_execution_target(target: ActionExecutionTarget<'_>) -> Self { Self { owner: target.owner().dupe(), - category: target.category().clone(), + category: target.category().to_owned(), identifier: target.identifier().map(|t| t.to_owned()), } } @@ -144,6 +170,7 @@ impl StoredFingerprints { digests: CommandDigests, declared_dep_files: DeclaredDepFiles, result: &ActionOutputs, + was_produced_locally: bool, ) -> DepFileState { let input_signatures = Mutex::new(DepFileStateInputSignatures::Computed(self)); DepFileState { @@ -151,12 +178,13 @@ impl StoredFingerprints { input_signatures, declared_dep_files, result: result.dupe(), + was_produced_locally, } } } #[derive(Allocative)] -pub enum StoredFingerprints { +pub(crate) enum StoredFingerprints { /// Store only digests. This is what we use in prod because it is small. Digests(PartitionedInputs), @@ -180,17 +208,18 @@ impl PartialEq> for StoredFingerprin /// contains everything we need to determine whether re-evaluation is necessary (and if it isn't, /// to return the previous value). #[derive(Allocative)] -pub struct DepFileState { +pub(crate) struct DepFileState { digests: CommandDigests, input_signatures: Mutex, declared_dep_files: DeclaredDepFiles, result: ActionOutputs, + was_produced_locally: bool, } #[derive(Allocative)] -pub struct CommandDigests { - pub cli: ExpandedCommandLineDigest, - pub directory: FileDigest, +pub(crate) struct CommandDigests { + pub(crate) cli: ExpandedCommandLineDigest, + pub(crate) directory: FileDigest, } impl DepFileState { @@ -257,7 +286,7 @@ impl Display for RunActionDepFiles { } impl RunActionDepFiles { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { labels: OrderedMap::new(), } @@ -267,8 +296,8 @@ impl RunActionDepFiles { fn get_output_path_digest( digest_config: DigestConfig, output_paths: &[(ProjectRelativePathBuf, OutputType)], -) -> DepFileDigest { - let mut digester = DepFileDigest::digester(digest_config.cas_digest_config()); +) -> CasDigestData { + let mut digester = CasDigestData::digester(digest_config.cas_digest_config()); digester.update(&output_paths.len().to_le_bytes()); for (output_path, output_type) in output_paths.iter() { digester.update(output_path.as_str().as_bytes()); @@ -278,17 +307,17 @@ fn get_output_path_digest( } // A utility struct to hold digests that are included in both remote depfile key and value -pub struct CommonDigests { +pub(crate) struct CommonDigests { commandline_cli_digest: ExpandedCommandLineDigest, // A digest of all output paths for this action - output_paths_digest: DepFileDigest, + output_paths_digest: CasDigestData, // A digest of inputs that are untagged (not tied to a dep file) untagged_inputs_digest: TrackedFileDigest, } impl CommonDigests { // Take the digest of everythig in the structure - fn fingerprint(&self, digest_config: DigestConfig) -> DepFileDigest { - let mut digester = DepFileDigest::digester(digest_config.cas_digest_config()); + fn fingerprint(&self, digest_config: DigestConfig) -> CasDigestData { + let mut digester = CasDigestData::digester(digest_config.cas_digest_config()); digester.update(self.output_paths_digest.raw_digest().as_bytes()); digester.update(self.commandline_cli_digest.as_bytes()); @@ -297,26 +326,42 @@ impl CommonDigests { digester.finalize() } - fn make_remote_dep_file_key( + // Construct an action so that the action digest can be used as a remote dep file key, + // which is the key to an action result with dep file metadata. + // The action itself is unused but we need to upload some action to avoid permission + // errors when uploading the action result, and the digest needs to match that action. + fn make_remote_dep_file_action( &self, - digest_config: DigestConfig, - mergebase: &Mergebase, - ) -> DepFileDigest { + ctx: &mut dyn ActionExecutionCtx, + ) -> ActionDigestAndBlobs { + let digest_config = ctx.digest_config(); let mut digester = DepFileDigest::digester(digest_config.cas_digest_config()); - digester.update(self.fingerprint(digest_config).raw_digest().as_bytes()); // Take the digest of the mergebase to get the closest hit. - match mergebase.0.as_ref() { + match ctx.mergebase().0.as_ref() { Some(m) => digester.update(m.as_bytes()), None => (), }; + let inner_remote_dep_file_key = digester.finalize().to_string(); + + let mut blobs = ActionDigestAndBlobsBuilder::new(digest_config); + let command = blobs.add_command(&remote_execution::Command { + // Instead of using the digest directly, we could use the constituent digests, or constituent paths + // which might be useful for debugging. + arguments: vec![inner_remote_dep_file_key], + platform: Some(ctx.re_platform().clone()), + ..Default::default() + }); - digester.finalize() + blobs.build(&remote_execution::Action { + command_digest: Some(command.to_grpc()), + ..Default::default() + }) } /// Take a list of declared dep files (label, artifact) and filtered inputs (StoredFingerprints) - /// and create a list of dep file path and filterd inputs for that input + /// and create a list of dep file path and filtered inputs for that input fn get_dep_file_inputs( &self, declared_dep_files: &DeclaredDepFiles, @@ -360,7 +405,7 @@ impl CommonDigests { pub(crate) struct DepFileBundle { dep_files_key: DepFilesKey, - pub remote_dep_file_key: DepFileDigest, + pub(crate) remote_dep_file_action: ActionDigestAndBlobs, input_directory_digest: FileDigest, shared_declared_inputs: PartitionedInputs, declared_dep_files: DeclaredDepFiles, @@ -368,19 +413,25 @@ pub(crate) struct DepFileBundle { common_digests: CommonDigests, } -impl DepFileBundle { - pub async fn make_remote_dep_file_entry( +#[async_trait] +impl IntoRemoteDepFile for DepFileBundle { + fn remote_dep_file_action(&self) -> &ActionDigestAndBlobs { + &self.remote_dep_file_action + } + + async fn make_remote_dep_file( &mut self, - ctx: &dyn ActionExecutionCtx, - ) -> anyhow::Result { - let digest_config = ctx.digest_config(); + digest_config: DigestConfig, + fs: &ArtifactFs, + materializer: &dyn Materializer, + ) -> anyhow::Result { // Compute the input fingerprint digest if it hasn't been computed already. if self.filtered_input_fingerprints.is_none() { self.filtered_input_fingerprints = Some( eagerly_compute_fingerprints( digest_config, - ctx.fs(), - ctx.materializer(), + fs, + materializer, &self.shared_declared_inputs, &self.declared_dep_files, ) @@ -388,45 +439,34 @@ impl DepFileBundle { ); } - let entry = self.common_digests.make_dep_file_entry_proto( + Ok(self.common_digests.make_dep_file_entry_proto( &self.declared_dep_files, self.filtered_input_fingerprints.as_ref().unwrap(), - ); - - let res = DepFileEntry { - key: self.remote_dep_file_key.dupe(), - entry, - }; - - Ok(res) + )) } +} - pub async fn check_local_dep_file_cache_for_identical_action( +impl DepFileBundle { + pub(crate) async fn check_local_dep_file_cache_for_identical_action( &self, ctx: &mut dyn ActionExecutionCtx, declared_outputs: &[BuildArtifact], ) -> anyhow::Result<(Option<(ActionOutputs, ActionExecutionMetadata)>, bool)> { // Get the action outputs (if cache hit) and an indicator on whether a full lookup operation should be performed - let (outputs, check_filterd_inputs) = span_async( + let (outputs, check_filtered_inputs) = span_async_simple( buck2_data::MatchDepFilesStart { checking_filtered_inputs: false, remote_cache: false, }, - async { - let res: anyhow::Result<_> = try { - match_if_identical_action( - ctx, - &self.dep_files_key, - &self.input_directory_digest, - &self.common_digests.commandline_cli_digest, - declared_outputs, - &self.declared_dep_files, - ) - .await? - }; - - (res, buck2_data::MatchDepFilesEnd {}) - }, + match_if_identical_action( + ctx, + &self.dep_files_key, + &self.input_directory_digest, + &self.common_digests.commandline_cli_digest, + declared_outputs, + &self.declared_dep_files, + ), + buck2_data::MatchDepFilesEnd {}, ) .await?; let outputs = outputs.map(|o| { @@ -435,38 +475,33 @@ impl DepFileBundle { ActionExecutionMetadata { execution_kind: ActionExecutionKind::LocalDepFile, timing: Default::default(), + input_files_bytes: None, }, ) }); - Ok((outputs, check_filterd_inputs)) + Ok((outputs, check_filtered_inputs)) } - pub async fn check_local_dep_file_cache( + pub(crate) async fn check_local_dep_file_cache( &self, ctx: &mut dyn ActionExecutionCtx, declared_outputs: &[BuildArtifact], ) -> anyhow::Result> { - let matching_result = span_async( + let matching_result = span_async_simple( buck2_data::MatchDepFilesStart { checking_filtered_inputs: true, remote_cache: false, }, - async { - let res: anyhow::Result<_> = try { - match_or_clear_dep_file( - ctx, - &self.dep_files_key, - &self.input_directory_digest, - &self.common_digests.commandline_cli_digest, - &self.shared_declared_inputs, - declared_outputs, - &self.declared_dep_files, - ) - .await? - }; - - (res, buck2_data::MatchDepFilesEnd {}) - }, + match_or_clear_dep_file( + ctx, + &self.dep_files_key, + &self.input_directory_digest, + &self.common_digests.commandline_cli_digest, + &self.shared_declared_inputs, + declared_outputs, + &self.declared_dep_files, + ), + buck2_data::MatchDepFilesEnd {}, ) .await?; @@ -476,13 +511,14 @@ impl DepFileBundle { ActionExecutionMetadata { execution_kind: ActionExecutionKind::LocalDepFile, timing: Default::default(), + input_files_bytes: None, }, ) }); Ok(matching_result) } - pub async fn check_remote_dep_file_entry( + pub(crate) async fn check_remote_dep_file_entry( &self, digest_config: DigestConfig, fs: &ArtifactFs, @@ -624,12 +660,11 @@ pub(crate) fn make_dep_file_bundle<'a>( execution_paths.output_paths(), ), }; - let remote_dep_file_key = - common_digests.make_remote_dep_file_key(ctx.digest_config(), ctx.mergebase()); + let remote_dep_file_action = common_digests.make_remote_dep_file_action(ctx); Ok(DepFileBundle { dep_files_key, - remote_dep_file_key, + remote_dep_file_action, input_directory_digest, shared_declared_inputs, declared_dep_files, @@ -656,6 +691,7 @@ pub(crate) async fn match_if_identical_action( }; let actions_match = check_action( + key, &previous_state, input_directory_digest, cli_digest, @@ -695,6 +731,7 @@ pub(crate) async fn match_or_clear_dep_file( }; let dep_files_match = dep_files_match( + key, &previous_state, input_directory_digest, cli_digest, @@ -746,6 +783,7 @@ async fn outputs_match( } fn check_action( + key: &DepFilesKey, previous_state: &DepFileState, input_directory_digest: &FileDigest, cli_digest: &ExpandedCommandLineDigest, @@ -756,16 +794,19 @@ fn check_action( // We first need to check if the same dep files existed before or not. If not, then we // can't assume they'll still be on disk, and we have to bail. tracing::trace!("Dep files miss: Dep files declaration has changed"); + DEP_FILES.remove(key); return InitialDepFileLookupResult::Miss; } if !outputs_are_reusable(declared_outputs, &previous_state.result) { tracing::trace!("Dep files miss: Output declaration has changed"); + DEP_FILES.remove(key); return InitialDepFileLookupResult::Miss; } if *cli_digest != previous_state.digests.cli { tracing::trace!("Dep files miss: Command line has changed"); + DEP_FILES.remove(key); return InitialDepFileLookupResult::Miss; } @@ -778,6 +819,7 @@ fn check_action( } async fn dep_files_match( + key: &DepFilesKey, previous_state: &DepFileState, input_directory_digest: &FileDigest, cli_digest: &ExpandedCommandLineDigest, @@ -787,6 +829,7 @@ async fn dep_files_match( ctx: &dyn ActionExecutionCtx, ) -> anyhow::Result { let initial_check = check_action( + key, previous_state, input_directory_digest, cli_digest, @@ -797,6 +840,11 @@ async fn dep_files_match( return Ok(true); } + // We didn't get an exact match, and we don't have any dep files, so we're done. + if declared_dep_files.tagged.is_empty() { + return Ok(false); + } + let dep_files = read_dep_files( previous_state.has_signatures(), previous_state.declared_dep_files(), @@ -828,7 +876,7 @@ async fn dep_files_match( // so), because this Mutex won't be contended: only one action will look at its value. let previous_fingerprints = previous_state.locked_compute_fingerprints( Cow::Borrowed(&dep_files), - KEEP_DIRECTORIES.get_copied()?.unwrap_or_default(), + keep_directories()?, digest_config, ); @@ -915,7 +963,7 @@ async fn eagerly_compute_fingerprints( shared_declared_inputs.clone().unshare(), dep_files, digest_config, - KEEP_DIRECTORIES.get_copied()?.unwrap_or_default(), + keep_directories()?, ); Ok(fingerprints) } @@ -925,6 +973,7 @@ pub(crate) async fn populate_dep_files( ctx: &dyn ActionExecutionCtx, dep_file_bundle: DepFileBundle, result: &ActionOutputs, + was_produced_locally: bool, ) -> anyhow::Result<()> { let DepFileBundle { declared_dep_files, @@ -943,7 +992,12 @@ pub(crate) async fn populate_dep_files( }; let state = match filtered_input_fingerprints { - Some(fingerprints) => fingerprints.to_dep_file_state(digests, declared_dep_files, result), + Some(fingerprints) => fingerprints.to_dep_file_state( + digests, + declared_dep_files, + result, + was_produced_locally, + ), None if should_compute_fingerprints => { let fingerprints = eagerly_compute_fingerprints( ctx.digest_config(), @@ -953,7 +1007,12 @@ pub(crate) async fn populate_dep_files( &declared_dep_files, ) .await?; - fingerprints.to_dep_file_state(digests, declared_dep_files, result) + fingerprints.to_dep_file_state( + digests, + declared_dep_files, + result, + was_produced_locally, + ) } None => DepFileState { digests, @@ -962,6 +1021,7 @@ pub(crate) async fn populate_dep_files( ))), declared_dep_files, result: result.dupe(), + was_produced_locally, }, }; @@ -971,13 +1031,13 @@ pub(crate) async fn populate_dep_files( /// Inputs partitioned by tag. `D` is the representation of the set of inputs. #[derive(Clone, PartialEq, Eq, Allocative)] -pub struct PartitionedInputs { - pub untagged: D, - pub tagged: OrderedMap, D>, +pub(crate) struct PartitionedInputs { + pub(crate) untagged: D, + pub(crate) tagged: OrderedMap, D>, } impl PartitionedInputs { - pub fn iter(&self) -> impl Iterator { + pub(crate) fn iter(&self) -> impl Iterator { std::iter::once(&self.untagged).chain(self.tagged.values()) } } @@ -1226,7 +1286,7 @@ impl DeclaredDepFiles { None => { soft_error!( "missing_dep_file", - anyhow::anyhow!("Dep file is missing at {}", dep_file_path) + anyhow::anyhow!("Dep file is missing at {}", dep_file_path).into() )?; return Ok(None); } @@ -1237,10 +1297,11 @@ impl DeclaredDepFiles { if line.is_empty() { continue; } - let path = ProjectRelativePath::new(line) - .context("Invalid line encountered in dep file")?; - selector.select(path); + // On windows, valid dep files can contain backslashes in paths, normalize them. + let path = ForwardRelativePathNormalizer::normalize_path(line) + .context("Invalid line encountered in dep file")?; + selector.select(path.as_ref()); } }; @@ -1267,7 +1328,7 @@ impl DeclaredDepFiles { } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum MaterializeDepFilesError { #[error("Error materializing dep file")] MaterializationFailed { @@ -1282,15 +1343,15 @@ enum MaterializeDepFilesError { /// A set of concrete dep files. That is, given a label, a selector that represents the subset of /// files whose tags matches this label that should be considered relevant. #[derive(Clone)] -pub struct ConcreteDepFiles { +pub(crate) struct ConcreteDepFiles { contents: HashMap, DirectorySelector>, } /// A command line visitor to collect inputs and outputs in a form relevant for dep files /// computations. pub(crate) struct DepFilesCommandLineVisitor<'a> { - pub inputs: PartitionedInputs>, - pub tagged_outputs: OrderedMap, Option)>, + pub(crate) inputs: PartitionedInputs>, + pub(crate) tagged_outputs: OrderedMap, Option)>, dep_files: &'a RunActionDepFiles, } @@ -1342,12 +1403,11 @@ impl CommandLineArtifactVisitor for DepFilesCommandLineVisitor<'_> { } #[cfg(test)] -mod test { +mod tests { + use buck2_artifact::actions::key::ActionIndex; use buck2_artifact::artifact::artifact_type::testing::BuildArtifactTestingExt; - use buck2_artifact::deferred::id::DeferredId; use buck2_core::configuration::data::ConfigurationData; - use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use super::*; @@ -1363,28 +1423,28 @@ mod test { ConfiguredTargetLabel::testing_parse("cell//pkg:foo", ConfigurationData::testing_new()); let artifact1 = Artifact::from(BuildArtifact::testing_new( target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar1.h".to_owned()), - DeferredId::testing_new(0), + "foo/bar1.h", + ActionIndex::new(0), )); let artifact2 = Artifact::from(BuildArtifact::testing_new( target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar2.h".to_owned()), - DeferredId::testing_new(0), + "foo/bar2.h", + ActionIndex::new(0), )); let artifact3 = Artifact::from(BuildArtifact::testing_new( target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar3.h".to_owned()), - DeferredId::testing_new(0), + "foo/bar3.h", + ActionIndex::new(0), )); let artifact4 = Artifact::from(BuildArtifact::testing_new( target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar4.h".to_owned()), - DeferredId::testing_new(0), + "foo/bar4.h", + ActionIndex::new(0), )); let artifact5 = Artifact::from(BuildArtifact::testing_new( target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar5.h".to_owned()), - DeferredId::testing_new(0), + "foo/bar5.h", + ActionIndex::new(0), )); let dep_files = RunActionDepFiles { @@ -1417,14 +1477,14 @@ mod test { let artifact1 = Artifact::from(BuildArtifact::testing_new( target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar1.h".to_owned()), - DeferredId::testing_new(0), + "foo/bar1.h", + ActionIndex::new(0), )); let artifact2 = Artifact::from(BuildArtifact::testing_new( target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar2.h".to_owned()), - DeferredId::testing_new(0), + "foo/bar2.h", + ActionIndex::new(0), )); let depfile1 = DeclaredDepFile { diff --git a/app/buck2_action_impl/src/actions/impls/run/metadata.rs b/app/buck2_action_impl/src/actions/impls/run/metadata.rs index b33823445a666..0b120e0f561ce 100644 --- a/app/buck2_action_impl/src/actions/impls/run/metadata.rs +++ b/app/buck2_action_impl/src/actions/impls/run/metadata.rs @@ -7,80 +7,45 @@ * of this source tree. */ -use std::fmt::Display; - use buck2_build_api::artifact_groups::ArtifactGroupValues; -use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::TrackedFileDigest; -use buck2_core::directory::Directory; -use buck2_core::directory::DirectoryEntry; -use buck2_core::directory::DirectoryIterator; use buck2_core::fs::artifact_path_resolver::ArtifactFs; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::directory_iterator::DirectoryIteratorPathStack; use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::ActionDirectoryBuilder; use buck2_execute::directory::ActionDirectoryMember; -use serde::Serialize; -use serde::Serializer; - -fn stringify(value: &T, serializer: S) -> Result -where - T: Display, - S: Serializer, -{ - serializer.collect_str(value) -} +use buck2_execute::execute::paths_with_digest::PathsWithDigestBlobData; +use buck2_execute::execute::paths_with_digest::PathsWithDigestBuilder; pub(crate) fn metadata_content( fs: &ArtifactFs, inputs: &[&ArtifactGroupValues], digest_config: DigestConfig, -) -> anyhow::Result<(Vec, TrackedFileDigest)> { +) -> anyhow::Result<(PathsWithDigestBlobData, TrackedFileDigest)> { + let mut blob_builder = PathsWithDigestBuilder::default(); + let mut builder = ActionDirectoryBuilder::empty(); for &group in inputs { group.add_to_directory(&mut builder, fs)?; } - #[derive(Serialize)] - struct PathWithDigest<'a> { - path: ForwardRelativePathBuf, - #[serde(serialize_with = "stringify")] - digest: &'a FileDigest, - } - - #[derive(Serialize)] - struct MetadataJson<'a> { - version: i32, - digests: Vec>, - } - - let mut digests = Vec::new(); - let mut walk = builder.ordered_walk(); + let mut walk = builder.ordered_walk_leaves(); while let Some((path, item)) = walk.next() { match item { - DirectoryEntry::Leaf(ActionDirectoryMember::File(metadata)) => { - digests.push(PathWithDigest { - path: path.get(), - digest: metadata.digest.data(), - }); + ActionDirectoryMember::File(metadata) => { + blob_builder.add(path.get(), metadata.digest.data()); } // Omit symlinks and let user script detect and handle symlinks in inputs. // Metadata will contain artifacts which are symlinked, meaning the user // can resolve the symlink and get the digest of the symlinked artifact. - DirectoryEntry::Leaf(ActionDirectoryMember::Symlink(_)) - | DirectoryEntry::Leaf(ActionDirectoryMember::ExternalSymlink(_)) => {} - // Only interested in actual content, skip. - DirectoryEntry::Dir(_) => {} + ActionDirectoryMember::Symlink(_) | ActionDirectoryMember::ExternalSymlink(_) => {} } } - let json = MetadataJson { - digests, - // Increment this version if format changes - version: 1, - }; - let json_string = serde_json::to_string(&json)?; - let digest = - TrackedFileDigest::from_content(json_string.as_bytes(), digest_config.cas_digest_config()); - Ok((json_string.into(), digest)) + let blob = blob_builder.build()?; + + let digest = TrackedFileDigest::from_content(&blob.0.0, digest_config.cas_digest_config()); + Ok((blob, digest)) } diff --git a/app/buck2_action_impl/src/actions/impls/run/mod.rs b/app/buck2_action_impl/src/actions/impls/run/mod.rs deleted file mode 100644 index 919aba80ecb98..0000000000000 --- a/app/buck2_action_impl/src/actions/impls/run/mod.rs +++ /dev/null @@ -1,744 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::borrow::Cow; -use std::fmt::Display; -use std::ops::ControlFlow; - -use allocative::Allocative; -use anyhow::Context; -use async_trait::async_trait; -use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_build_api::actions::box_slice_set::BoxSliceSet; -use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; -use buck2_build_api::actions::execute::action_executor::ActionOutputs; -use buck2_build_api::actions::impls::expanded_command_line::ExpandedCommandLine; -use buck2_build_api::actions::Action; -use buck2_build_api::actions::ActionExecutable; -use buck2_build_api::actions::ActionExecutionCtx; -use buck2_build_api::actions::IncrementalActionExecutable; -use buck2_build_api::actions::UnregisteredAction; -use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::artifact_groups::ArtifactGroupValues; -use buck2_build_api::interpreter::rule_defs::cmd_args::space_separated::SpaceSeparatedCommandLineBuilder; -use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; -use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; -use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; -use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineContext; -use buck2_build_api::interpreter::rule_defs::cmd_args::DefaultCommandLineContext; -use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; -use buck2_build_api::interpreter::rule_defs::provider::builtin::worker_info::WorkerInfo; -use buck2_core::category::Category; -use buck2_core::fs::buck_out_path::BuckOutPath; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -use buck2_events::dispatch::span_async; -use buck2_execute::artifact::fs::ExecutorFs; -use buck2_execute::execute::action_digest::ActionDigest; -use buck2_execute::execute::environment_inheritance::EnvironmentInheritance; -use buck2_execute::execute::manager::CommandExecutionManager; -use buck2_execute::execute::request::ActionMetadataBlob; -use buck2_execute::execute::request::CommandExecutionInput; -use buck2_execute::execute::request::CommandExecutionOutput; -use buck2_execute::execute::request::CommandExecutionPaths; -use buck2_execute::execute::request::CommandExecutionRequest; -use buck2_execute::execute::request::ExecutorPreference; -use buck2_execute::execute::request::WorkerId; -use buck2_execute::execute::request::WorkerSpec; -use buck2_execute::execute::result::CommandExecutionResult; -use derive_more::Display; -use dupe::Dupe; -use gazebo::prelude::*; -use host_sharing::HostSharingRequirements; -use host_sharing::WeightClass; -use indexmap::indexmap; -use indexmap::IndexSet; -use itertools::Itertools; -use serde_json::json; -use sorted_vector_map::SortedVectorMap; -use starlark::coerce::Coerce; -use starlark::starlark_complex_value; -use starlark::values::dict::DictRef; -use starlark::values::none::NoneOr; -use starlark::values::starlark_value; -use starlark::values::Freeze; -use starlark::values::NoSerialize; -use starlark::values::OwnedFrozenValue; -use starlark::values::OwnedFrozenValueTyped; -use starlark::values::ProvidesStaticType; -use starlark::values::StarlarkValue; -use starlark::values::Trace; -use starlark::values::UnpackValue; -use starlark::values::ValueLike; -use starlark::values::ValueOf; -use thiserror::Error; - -use self::dep_files::DepFileBundle; -use crate::actions::impls::run::dep_files::make_dep_file_bundle; -use crate::actions::impls::run::dep_files::populate_dep_files; -use crate::actions::impls::run::dep_files::DepFilesCommandLineVisitor; -use crate::actions::impls::run::dep_files::RunActionDepFiles; -use crate::actions::impls::run::metadata::metadata_content; - -pub(crate) mod audit_dep_files; -pub mod dep_files; -mod metadata; - -#[derive(Debug, Error)] -enum RunActionValidationError { - #[error("Expected command line value, got {0}")] - ContentsNotCommandLineValue(String), -} - -#[derive(Debug, Allocative)] -pub(crate) struct MetadataParameter { - /// Name of the environment variable which is set to contain - /// resolved path of the metadata file when requested by user. - pub(crate) env_var: String, - /// User-defined path in the output directory of the metadata file. - pub(crate) path: ForwardRelativePathBuf, -} - -impl Display for MetadataParameter { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let json = json!({ - "env_var": self.env_var, - "path": self.path, - }); - write!(f, "{}", json) - } -} - -#[derive(Debug, Error)] -enum LocalPreferenceError { - #[error("cannot have `local_only = True` and `prefer_local = True` at the same time")] - LocalOnlyAndPreferLocal, - #[error("cannot have `local_only = True` and `prefer_remote = True` at the same time")] - LocalOnlyAndPreferRemote, - #[error( - "cannot have `local_only = True`, `prefer_local = True` and `prefer_remote = True` at the same time" - )] - LocalOnlyAndPreferLocalAndPreferRemote, - #[error("cannot have `prefer_local = True` and `prefer_remote = True` at the same time")] - PreferLocalAndPreferRemote, -} - -pub(crate) fn new_executor_preference( - local_only: bool, - prefer_local: bool, - prefer_remote: bool, -) -> anyhow::Result { - match (local_only, prefer_local, prefer_remote) { - (true, false, false) => Ok(ExecutorPreference::LocalRequired), - (true, false, true) => Err(anyhow::anyhow!( - LocalPreferenceError::LocalOnlyAndPreferRemote - )), - (false, true, false) => Ok(ExecutorPreference::LocalPreferred), - (false, true, true) => Err(anyhow::anyhow!( - LocalPreferenceError::PreferLocalAndPreferRemote - )), - (false, false, false) => Ok(ExecutorPreference::Default), - (false, false, true) => Ok(ExecutorPreference::RemotePreferred), - (true, true, false) => Err(anyhow::anyhow!( - LocalPreferenceError::LocalOnlyAndPreferLocal - )), - (true, true, true) => Err(anyhow::anyhow!( - LocalPreferenceError::LocalOnlyAndPreferLocalAndPreferRemote - )), - } -} - -#[derive(Debug, Allocative)] -pub(crate) struct UnregisteredRunAction { - pub(crate) category: Category, - pub(crate) identifier: Option, - pub(crate) executor_preference: ExecutorPreference, - pub(crate) always_print_stderr: bool, - pub(crate) weight: WeightClass, - pub(crate) low_pass_filter: bool, - pub(crate) dep_files: RunActionDepFiles, - pub(crate) metadata_param: Option, - pub(crate) no_outputs_cleanup: bool, - pub(crate) allow_cache_upload: bool, - pub(crate) allow_dep_file_cache_upload: bool, - pub(crate) force_full_hybrid_if_capable: bool, - pub(crate) unique_input_inodes: bool, -} - -impl UnregisteredAction for UnregisteredRunAction { - fn register( - self: Box, - _: IndexSet, - outputs: IndexSet, - starlark_data: Option, - ) -> anyhow::Result> { - let starlark_values = - starlark_data.context("module data to be present (internal error)")?; - let run_action = RunAction::new(*self, starlark_values, outputs)?; - Ok(Box::new(run_action)) - } -} - -#[derive( - Debug, - Display, - Trace, - ProvidesStaticType, - NoSerialize, - Allocative, - Freeze, - Coerce -)] -#[display(fmt = "run_action_values")] -#[repr(C)] -pub(crate) struct StarlarkRunActionValuesGen { - pub(crate) exe: V, - pub(crate) args: V, - pub(crate) env: V, - /// `WorkerInfo` or `None`. - pub(crate) worker: V, -} - -#[starlark_value(type = "run_action_values")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for StarlarkRunActionValuesGen where - Self: ProvidesStaticType<'v> -{ -} - -starlark_complex_value!(pub(crate) StarlarkRunActionValues); - -impl<'v, V: ValueLike<'v>> StarlarkRunActionValuesGen { - pub(crate) fn worker(&self) -> anyhow::Result>>> { - ValueOf::unpack_value_err(self.worker.to_value()) - } -} - -struct UnpackedWorkerValues<'v> { - exe: &'v dyn CommandLineArgLike, - id: WorkerId, - concurrency: Option, -} - -struct UnpackedRunActionValues<'v> { - exe: &'v dyn CommandLineArgLike, - args: &'v dyn CommandLineArgLike, - env: Vec<(&'v str, &'v dyn CommandLineArgLike)>, - worker: Option>, -} - -#[derive(Debug, Allocative)] -pub(crate) struct RunAction { - inner: UnregisteredRunAction, - starlark_values: OwnedFrozenValueTyped, - outputs: BoxSliceSet, -} - -impl RunAction { - fn unpack( - values: &OwnedFrozenValueTyped, - ) -> anyhow::Result { - let exe = values.exe.to_value().as_command_line_err()?; - let args = values.args.to_value().as_command_line_err()?; - let env = if values.env.is_none() { - Vec::new() - } else { - let d = DictRef::from_value(values.env.to_value()).context("expecting dict")?; - let mut res = Vec::with_capacity(d.len()); - for (k, v) in d.iter() { - res.push(( - k.unpack_str().context("expecting string")?, - v.as_command_line_err()?, - )); - } - res - }; - let worker: NoneOr<&WorkerInfo> = values.worker()?.typed; - - let worker = worker.into_option().map(|worker| UnpackedWorkerValues { - exe: worker.exe_command_line(), - id: WorkerId(worker.id), - concurrency: worker.concurrency(), - }); - - Ok(UnpackedRunActionValues { - exe, - args, - env, - worker, - }) - } - - /// Get the command line expansion for this RunAction. - fn expand_command_line_and_worker( - &self, - fs: &ExecutorFs, - artifact_visitor: &mut impl CommandLineArtifactVisitor, - ) -> anyhow::Result<(ExpandedCommandLine, Option)> { - let mut ctx = DefaultCommandLineContext::new(fs); - let values = Self::unpack(&self.starlark_values)?; - - let mut exe_rendered = Vec::::new(); - values - .exe - .add_to_command_line(&mut exe_rendered, &mut ctx)?; - values.exe.visit_artifacts(artifact_visitor)?; - - let worker = if let Some(worker) = values.worker { - let mut worker_rendered = Vec::::new(); - worker - .exe - .add_to_command_line(&mut worker_rendered, &mut ctx)?; - worker.exe.visit_artifacts(artifact_visitor)?; - Some(WorkerSpec { - exe: worker_rendered, - id: worker.id, - concurrency: worker.concurrency, - }) - } else { - None - }; - - let mut args_rendered = Vec::::new(); - values - .args - .add_to_command_line(&mut args_rendered, &mut ctx)?; - values.args.visit_artifacts(artifact_visitor)?; - - let cli_env: anyhow::Result> = values - .env - .into_iter() - .map(|(k, v)| { - let mut env = String::new(); - let mut ctx = DefaultCommandLineContext::new(fs); - v.add_to_command_line( - &mut SpaceSeparatedCommandLineBuilder::wrap_string(&mut env), - &mut ctx, - )?; - v.visit_artifacts(artifact_visitor)?; - Ok((k.to_owned(), env)) - }) - .collect(); - - Ok(( - ExpandedCommandLine { - exe: exe_rendered, - args: args_rendered, - env: cli_env?, - }, - worker, - )) - } - - pub(crate) fn new( - inner: UnregisteredRunAction, - starlark_values: OwnedFrozenValue, - outputs: IndexSet, - ) -> anyhow::Result { - let starlark_values = match starlark_values.downcast() { - Ok(starlark_values) => starlark_values, - Err(starlark_values) => { - return Err(RunActionValidationError::ContentsNotCommandLineValue( - starlark_values.value().to_repr(), - ) - .into()); - } - }; - - Self::unpack(&starlark_values)?; - - Ok(RunAction { - inner, - starlark_values, - outputs: BoxSliceSet::from(outputs), - }) - } - - fn prepare( - &self, - visitor: &mut impl RunActionVisitor, - ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result { - let executor_fs = ctx.executor_fs(); - let fs = executor_fs.fs(); - - let (expanded, worker) = - self.expand_command_line_and_worker(&ctx.executor_fs(), visitor)?; - - // TODO (@torozco): At this point, might as well just receive the list already. Finding - // those things in a HashMap is just not very useful. - let artifact_inputs: Vec<&ArtifactGroupValues> = visitor - .inputs() - .map(|group| ctx.artifact_values(group)) - .collect(); - - let mut inputs: Vec = - artifact_inputs[..].map(|&i| CommandExecutionInput::Artifact(Box::new(i.dupe()))); - - // Handle case when user requested file with action metadata to be generated. - // Generate content and output path for the file. It will be either passed - // to RE as a blob or written to disk in local executor. - // Path to this file is passed to user in environment variable which is selected by user. - let cli_ctx = DefaultCommandLineContext::new(&executor_fs); - - let mut extra_env = Vec::new(); - - if let Some(metadata_param) = &self.inner.metadata_param { - let path = BuckOutPath::new(ctx.target().owner().dupe(), metadata_param.path.clone()); - let env = cli_ctx - .resolve_project_path(fs.buck_out_path_resolver().resolve_gen(&path))? - .into_string(); - let (data, digest) = metadata_content(fs, &artifact_inputs, ctx.digest_config())?; - inputs.push(CommandExecutionInput::ActionMetadata(ActionMetadataBlob { - data, - digest, - path, - })); - extra_env.push((metadata_param.env_var.to_owned(), env)); - } - - let scratch = ctx.target().scratch_path(); - let scratch_path = fs.buck_out_path_resolver().resolve_scratch(&scratch); - extra_env.push(( - "BUCK_SCRATCH_PATH".to_owned(), - cli_ctx.resolve_project_path(scratch_path)?.into_string(), - )); - inputs.push(CommandExecutionInput::ScratchPath(scratch)); - - let paths = CommandExecutionPaths::new( - inputs, - self.outputs - .iter() - .map(|b| CommandExecutionOutput::BuildArtifact { - path: b.get_path().dupe(), - output_type: b.output_type(), - }) - .collect(), - ctx.fs(), - ctx.digest_config(), - )?; - - Ok(PreparedRunAction { - expanded, - extra_env, - paths, - worker, - }) - } - - pub async fn check_cache_result_is_useable( - &self, - ctx: &mut dyn ActionExecutionCtx, - request: &CommandExecutionRequest, - action_digest: &ActionDigest, - result: CommandExecutionResult, - dep_file_bundle: &Option, - ) -> anyhow::Result> { - // If it's served by the regular action cache no need to verify anything here. - if !result.was_served_by_remote_dep_file_cache() { - return Ok(ControlFlow::Break(result)); - } - - if let Some(bundle) = dep_file_bundle { - if let Some(found_dep_file_entry) = &result.dep_file_metadata { - let can_use = span_async( - buck2_data::MatchDepFilesStart { - checking_filtered_inputs: true, - remote_cache: true, - }, - async { - let res: anyhow::Result<_> = try { - bundle - .check_remote_dep_file_entry( - ctx.digest_config(), - ctx.fs(), - ctx.materializer(), - found_dep_file_entry, - ) - .await? - }; - - (res, buck2_data::MatchDepFilesEnd {}) - }, - ) - .await?; - - if can_use { - tracing::info!( - "Action result is cached via remote dep file cache, skipping execution of :\n```\n$ {}\n```\n for action `{}` with remote dep file key `{}`", - request.all_args_str(), - action_digest, - bundle.remote_dep_file_key, - ); - return Ok(ControlFlow::Break(result)); - } - } else { - // This should not happen as we check for the metadata on the cache querier side. - tracing::debug!( - "The remote dep file cache returned a hit for `{}`, but there is no metadata", - bundle.remote_dep_file_key - ); - } - } - // Continue through other options below - Ok(ControlFlow::Continue(ctx.command_execution_manager())) - } -} - -pub(crate) struct PreparedRunAction { - expanded: ExpandedCommandLine, - extra_env: Vec<(String, String)>, - paths: CommandExecutionPaths, - worker: Option, -} - -impl PreparedRunAction { - fn into_command_execution_request(self) -> CommandExecutionRequest { - let Self { - expanded: ExpandedCommandLine { exe, args, mut env }, - extra_env, - paths, - worker, - } = self; - - for (k, v) in extra_env { - env.insert(k, v); - } - - CommandExecutionRequest::new(exe, args, paths, env).with_worker(worker) - } -} - -trait RunActionVisitor: CommandLineArtifactVisitor { - type Iter<'a>: Iterator - where - Self: 'a; - - fn inputs<'a>(&'a self) -> Self::Iter<'a>; -} - -impl RunActionVisitor for SimpleCommandLineArtifactVisitor { - type Iter<'a> = impl Iterator where Self: 'a; - - fn inputs<'a>(&'a self) -> Self::Iter<'a> { - self.inputs.iter() - } -} - -impl RunActionVisitor for DepFilesCommandLineVisitor<'_> { - type Iter<'a> = impl Iterator where Self: 'a; - - fn inputs<'a>(&'a self) -> Self::Iter<'a> { - self.inputs.iter().flat_map(|g| g.iter()) - } -} - -#[async_trait] -impl Action for RunAction { - fn kind(&self) -> buck2_data::ActionKind { - buck2_data::ActionKind::Run - } - - fn inputs(&self) -> anyhow::Result> { - let values = Self::unpack(&self.starlark_values)?; - let mut artifact_visitor = SimpleCommandLineArtifactVisitor::new(); - values.args.visit_artifacts(&mut artifact_visitor)?; - values.exe.visit_artifacts(&mut artifact_visitor)?; - if let Some(worker) = values.worker { - worker.exe.visit_artifacts(&mut artifact_visitor)?; - } - for (_, v) in values.env.iter() { - v.visit_artifacts(&mut artifact_visitor)?; - } - Ok(Cow::Owned(artifact_visitor.inputs.into_iter().collect())) - } - - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(self.outputs.as_slice())) - } - - fn as_executable(&self) -> ActionExecutable<'_> { - ActionExecutable::Incremental(self) - } - - fn category(&self) -> &Category { - &self.inner.category - } - - fn identifier(&self) -> Option<&str> { - self.inner.identifier.as_deref() - } - - fn always_print_stderr(&self) -> bool { - self.inner.always_print_stderr - } - - fn aquery_attributes(&self, fs: &ExecutorFs) -> indexmap::IndexMap { - let mut cli_rendered = Vec::::new(); - let mut ctx = DefaultCommandLineContext::new(fs); - let values = Self::unpack(&self.starlark_values).unwrap(); - values - .exe - .add_to_command_line(&mut cli_rendered, &mut ctx) - .unwrap(); - values - .args - .add_to_command_line(&mut cli_rendered, &mut ctx) - .unwrap(); - let cmd = format!("[{}]", cli_rendered.iter().join(", ")); - indexmap! { - "cmd".to_owned() => cmd, - "executor_preference".to_owned() => self.inner.executor_preference.to_string(), - "always_print_stderr".to_owned() => self.inner.always_print_stderr.to_string(), - "weight".to_owned() => self.inner.weight.to_string(), - "dep_files".to_owned() => self.inner.dep_files.to_string(), - "metadata_param".to_owned() => match &self.inner.metadata_param { - None => "None".to_owned(), - Some(x) => x.to_string(), - }, - "no_outputs_cleanup".to_owned() => self.inner.no_outputs_cleanup.to_string(), - "allow_cache_upload".to_owned() => self.inner.allow_cache_upload.to_string(), - "allow_dep_file_cache_upload".to_owned() => self.inner.allow_dep_file_cache_upload.to_string(), - } - } -} - -#[async_trait] -impl IncrementalActionExecutable for RunAction { - async fn execute( - &self, - ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { - let knobs = ctx.run_action_knobs(); - let process_dep_files = !self.inner.dep_files.labels.is_empty() || knobs.hash_all_commands; - let (prepared_run_action, dep_file_visitor) = if !process_dep_files { - ( - self.prepare(&mut SimpleCommandLineArtifactVisitor::new(), ctx)?, - None, - ) - } else { - let mut visitor = DepFilesCommandLineVisitor::new(&self.inner.dep_files); - let prepared = self.prepare(&mut visitor, ctx)?; - (prepared, Some(visitor)) - }; - let cmdline_digest = prepared_run_action.expanded.fingerprint(); - - // Run actions are assumed to be shared - let host_sharing_requirements = HostSharingRequirements::Shared(self.inner.weight); - - let req = prepared_run_action - .into_command_execution_request() - .with_prefetch_lossy_stderr(true) - .with_executor_preference(self.inner.executor_preference) - .with_host_sharing_requirements(host_sharing_requirements) - .with_low_pass_filter(self.inner.low_pass_filter) - .with_outputs_cleanup(!self.inner.no_outputs_cleanup) - .with_local_environment_inheritance(EnvironmentInheritance::local_command_exclusions()) - .with_force_full_hybrid_if_capable(self.inner.force_full_hybrid_if_capable) - .with_unique_input_inodes(self.inner.unique_input_inodes); - - let (mut dep_file_bundle, req) = if let Some(visitor) = dep_file_visitor { - let bundle = make_dep_file_bundle(ctx, visitor, cmdline_digest, req.paths())?; - // Enable remote dep file cache lookup - let req = req.with_remote_dep_file_key(&bundle.remote_dep_file_key); - (Some(bundle), req) - } else { - (None, req) - }; - - // First, check in the local dep file cache if an identical action can be found there. - // Do this before checking the action cache as we can avoid a potentially large download. - // Once the action cache lookup misses, we will do the full dep file cache look up. - let should_fully_check_dep_file_cache = if let Some(dep_file_bundle) = &dep_file_bundle { - let (outputs, should_fully_check_dep_file_cache) = dep_file_bundle - .check_local_dep_file_cache_for_identical_action(ctx, self.outputs.as_slice()) - .await?; - if let Some(m) = outputs { - return Ok(m); - } - should_fully_check_dep_file_cache - } else { - false - }; - - // Prepare the action, check the action cache, fully check the local dep file cache if needed, then execute the command - let prepared_action = ctx.prepare_action(&req)?; - let manager = ctx.command_execution_manager(); - - let action_cache_result = ctx.action_cache(manager, &req, &prepared_action).await; - - // If the result was served by the remote dep file cache, we can't use the result just yet. We need to verify that - // the inputs tracked by a depfile that was actually used in the cache hit are indentical to the inputs we have for this action. - let result = if let ControlFlow::Break(res) = action_cache_result { - self.check_cache_result_is_useable( - ctx, - &req, - &prepared_action.action, - res, - &dep_file_bundle, - ) - .await? - } else { - action_cache_result - }; - - // If the cache queries did not yield to a result, fallback to local dep file query (continuation), then execution. - let mut result = match result { - ControlFlow::Break(res) => res, - ControlFlow::Continue(manager) => { - if let Some(dep_file_bundle) = &dep_file_bundle { - if should_fully_check_dep_file_cache { - let lookup = dep_file_bundle - .check_local_dep_file_cache(ctx, self.outputs.as_slice()) - .await?; - if let Some(m) = lookup { - return Ok(m); - } - } - }; - - ctx.exec_cmd(manager, &req, &prepared_action).await - } - }; - - // If the action has a dep file, log the remote dep file key so we can look out for collisions - if let Some(bundle) = &dep_file_bundle { - result.dep_file_key = Some(bundle.remote_dep_file_key.to_string()) - } - - // If there is a dep file entry AND if dep file cache upload is enabled, upload it - let upload_dep_file = self.inner.allow_dep_file_cache_upload && dep_file_bundle.is_some(); - if result.was_success() && (self.inner.allow_cache_upload || upload_dep_file) { - let dep_file_entry = match &mut dep_file_bundle { - Some(dep_file_bundle) if self.inner.allow_dep_file_cache_upload => { - let entry = dep_file_bundle.make_remote_dep_file_entry(ctx).await?; - Some(entry) - } - _ => None, - }; - let upload_result = ctx - .cache_upload(prepared_action.action.dupe(), &result, dep_file_entry) - .await?; - - result.did_cache_upload = upload_result.did_cache_upload; - result.did_dep_file_cache_upload = upload_result.did_dep_file_cache_upload; - } - - let (outputs, metadata) = ctx.unpack_command_execution_result( - &req, - result, - self.inner.allow_cache_upload, - self.inner.allow_dep_file_cache_upload, - )?; - - if let Some(dep_file_bundle) = dep_file_bundle { - populate_dep_files(ctx, dep_file_bundle, &outputs).await?; - } - - Ok((outputs, metadata)) - } -} diff --git a/app/buck2_action_impl/src/actions/impls/symlinked_dir.rs b/app/buck2_action_impl/src/actions/impls/symlinked_dir.rs index 3a19fd408ce13..e5af9df77daf0 100644 --- a/app/buck2_action_impl/src/actions/impls/symlinked_dir.rs +++ b/app/buck2_action_impl/src/actions/impls/symlinked_dir.rs @@ -17,6 +17,7 @@ use buck2_build_api::actions::box_slice_set::BoxSliceSet; use buck2_build_api::actions::execute::action_executor::ActionExecutionKind; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; @@ -25,7 +26,7 @@ use buck2_build_api::actions::UnregisteredAction; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; @@ -36,14 +37,12 @@ use dupe::Dupe; use gazebo::prelude::*; use indexmap::IndexSet; use itertools::Itertools; -use once_cell::sync::Lazy; -use starlark::values::dict::DictOf; +use starlark::values::dict::UnpackDictEntries; use starlark::values::OwnedFrozenValue; use starlark::values::ValueError; use starlark_map::small_set::SmallSet; -use thiserror::Error; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum SymlinkedDirError { #[error("Paths to symlink_dir must be non-overlapping, but got `{0}` and `{1}`")] OverlappingPaths(Box, Box), @@ -94,14 +93,15 @@ impl UnregisteredSymlinkedDirAction { // Map each artifact into an optional tuple of (artifact, path) and associated_artifacts, then collect // them into an optional tuple of vector and an index set respectively fn unpack_args<'v>( - srcs: DictOf<'v, &'v str, ValueAsArtifactLike<'v>>, + srcs: UnpackDictEntries<&'v str, ValueAsArtifactLike<'v>>, ) -> anyhow::Result<( Vec<(ArtifactGroup, Box)>, SmallSet, )> { // This assignment doesn't look like it should be necessary, but we get an error if we // don't do it. - srcs.collect_entries() + let len = srcs.entries.len(); + srcs.entries .into_iter() .map(|(k, as_artifact)| { let associates = as_artifact.0.get_associated_artifacts(); @@ -116,7 +116,7 @@ impl UnregisteredSymlinkedDirAction { )) }) .fold_ok( - (Vec::with_capacity(srcs.len()), SmallSet::new()), + (Vec::with_capacity(len), SmallSet::new()), |(mut aps, mut assocs), (ap, assoc)| { aps.push(ap); assoc.iter().flat_map(|v| v.iter()).for_each(|a| { @@ -129,7 +129,7 @@ impl UnregisteredSymlinkedDirAction { pub(crate) fn new<'v>( copy: bool, - srcs: DictOf<'v, &'v str, ValueAsArtifactLike<'v>>, + srcs: UnpackDictEntries<&'v str, ValueAsArtifactLike<'v>>, ) -> anyhow::Result { let (mut args, unioned_associated_artifacts) = Self::unpack_args(srcs) // FIXME: This warning is talking about the Starlark-level argument name `srcs`. @@ -162,6 +162,7 @@ impl UnregisteredAction for UnregisteredSymlinkedDirAction { inputs: IndexSet, outputs: IndexSet, _starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { Ok(Box::new(SymlinkedDirAction { copy: self.copy, @@ -199,19 +200,20 @@ impl Action for SymlinkedDirAction { Ok(Cow::Borrowed(self.inputs.as_slice())) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(self.outputs.as_slice())) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(self.outputs.as_slice()) + } + + fn first_output(&self) -> &BuildArtifact { + self.output() } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Incremental(self) } - fn category(&self) -> &Category { - static SYMLINKED_DIR_CATEGORY: Lazy = - Lazy::new(|| Category::try_from("symlinked_dir").unwrap()); - - &SYMLINKED_DIR_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new("symlinked_dir") } fn identifier(&self) -> Option<&str> { @@ -224,7 +226,7 @@ impl IncrementalActionExecutable for SymlinkedDirAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { let fs = ctx.fs().fs(); let output = ctx.fs().resolve_build(self.output().get_path()); let mut builder = ArtifactValueBuilder::new(fs, ctx.digest_config()); @@ -261,6 +263,7 @@ impl IncrementalActionExecutable for SymlinkedDirAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Simple, timing: ActionExecutionTimingData::default(), + input_files_bytes: None, }, )) } @@ -270,16 +273,12 @@ impl IncrementalActionExecutable for SymlinkedDirAction { mod tests { use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; - use buck2_core::buck_path::path::BuckPath; - use buck2_core::package::package_relative_path::PackageRelativePathBuf; - use buck2_core::package::PackageLabel; + use buck2_core::package::source_path::SourcePath; use super::*; fn mk_artifact() -> Artifact { - let pkg = PackageLabel::testing_parse("cell//pkg"); - let path = PackageRelativePathBuf::unchecked_new("".to_owned()); - let buck_path = BuckPath::testing_new(pkg, path); + let buck_path = SourcePath::testing_new("cell//pkg", ""); Artifact::from(SourceArtifact::new(buck_path)) } diff --git a/app/buck2_action_impl/src/actions/impls/write.rs b/app/buck2_action_impl/src/actions/impls/write.rs index 25ab49f0f3df0..eeb7516dd87e4 100644 --- a/app/buck2_action_impl/src/actions/impls/write.rs +++ b/app/buck2_action_impl/src/actions/impls/write.rs @@ -19,6 +19,7 @@ use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::actions::execute::action_executor::ActionExecutionKind; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; @@ -28,7 +29,7 @@ use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use buck2_build_api::interpreter::rule_defs::cmd_args::AbsCommandLineContext; use buck2_build_api::interpreter::rule_defs::cmd_args::DefaultCommandLineContext; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_execute::artifact::fs::ExecutorFs; use buck2_execute::execute::command_executor::ActionExecutionTimingData; use buck2_execute::materialize::materializer::WriteRequest; @@ -36,11 +37,11 @@ use dupe::Dupe; use indexmap::indexmap; use indexmap::IndexMap; use indexmap::IndexSet; -use once_cell::sync::Lazy; use starlark::values::OwnedFrozenValue; -use thiserror::Error; +use starlark::values::UnpackValue; +use starlark::StarlarkResultExt; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum WriteActionValidationError { #[error("WriteAction received inputs")] TooManyInputs, @@ -65,6 +66,7 @@ impl UnregisteredAction for UnregisteredWriteAction { inputs: IndexSet, outputs: IndexSet, starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { let contents = starlark_data.expect("module data to be present"); @@ -99,7 +101,10 @@ impl WriteAction { return Err(WriteActionValidationError::TooManyInputs.into()); } - if contents.value().as_command_line().is_none() { + if ValueAsCommandLineLike::unpack_value(contents.value()) + .into_anyhow_result()? + .is_none() + { return Err(WriteActionValidationError::ContentsNotCommandLineValue( contents.value().to_repr(), ) @@ -131,10 +136,9 @@ impl WriteAction { &mut ctx as _ }; - self.contents - .value() - .as_command_line() + ValueAsCommandLineLike::unpack_value_err(self.contents.value()) .unwrap() + .0 .add_to_command_line(&mut cli, ctx)?; Ok(cli.join("\n")) @@ -151,18 +155,20 @@ impl Action for WriteAction { Ok(Cow::Borrowed(&[])) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(slice::from_ref(&self.output))) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(slice::from_ref(&self.output)) + } + + fn first_output(&self) -> &BuildArtifact { + &self.output } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Incremental(self) } - fn category(&self) -> &Category { - static WRITE_CATEGORY: Lazy = Lazy::new(|| Category::try_from("write").unwrap()); - - &WRITE_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new("write") } fn identifier(&self) -> Option<&str> { @@ -186,7 +192,7 @@ impl IncrementalActionExecutable for WriteAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { let fs = ctx.fs(); let mut execution_start = None; @@ -216,6 +222,7 @@ impl IncrementalActionExecutable for WriteAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Simple, timing: ActionExecutionTimingData { wall_time }, + input_files_bytes: None, }, )) } diff --git a/app/buck2_action_impl/src/actions/impls/write_json.rs b/app/buck2_action_impl/src/actions/impls/write_json.rs index e9375ddbe705a..ffdc93f64b1b3 100644 --- a/app/buck2_action_impl/src/actions/impls/write_json.rs +++ b/app/buck2_action_impl/src/actions/impls/write_json.rs @@ -8,7 +8,6 @@ */ use std::borrow::Cow; -use std::convert::TryFrom; use std::fmt; use std::slice; use std::time::Instant; @@ -20,21 +19,24 @@ use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::actions::execute::action_executor::ActionExecutionKind; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::impls::json; use buck2_build_api::actions::impls::json::validate_json; +use buck2_build_api::actions::impls::json::JsonUnpack; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; use buck2_build_api::actions::IncrementalActionExecutable; use buck2_build_api::actions::UnregisteredAction; use buck2_build_api::artifact_groups::ArtifactGroup; +use buck2_build_api::command_line_arg_like_impl; use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineBuilder; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineContext; use buck2_build_api::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_execute::artifact::fs::ExecutorFs; use buck2_execute::execute::command_executor::ActionExecutionTimingData; use buck2_execute::materialize::materializer::WriteRequest; @@ -42,22 +44,23 @@ use dupe::Dupe; use indexmap::indexmap; use indexmap::IndexMap; use indexmap::IndexSet; -use once_cell::sync::Lazy; use starlark::any::ProvidesStaticType; use starlark::coerce::Coerce; use starlark::starlark_complex_value; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Demand; use starlark::values::Freeze; use starlark::values::NoSerialize; use starlark::values::OwnedFrozenValue; use starlark::values::StarlarkValue; use starlark::values::Trace; +use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; -use thiserror::Error; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum WriteJsonActionValidationError { #[error("WriteJsonAction received inputs")] TooManyInputs, @@ -67,14 +70,15 @@ enum WriteJsonActionValidationError { TooManyOutputs, } -#[derive(Allocative)] +#[derive(Allocative, Debug)] pub(crate) struct UnregisteredWriteJsonAction { pretty: bool, + absolute: bool, } impl UnregisteredWriteJsonAction { - pub(crate) fn new(pretty: bool) -> Self { - Self { pretty } + pub(crate) fn new(pretty: bool, absolute: bool) -> Self { + Self { pretty, absolute } } pub(crate) fn cli<'v>( @@ -91,9 +95,10 @@ impl UnregisteredAction for UnregisteredWriteJsonAction { inputs: IndexSet, outputs: IndexSet, starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { let contents = starlark_data.expect("module data to be present"); - let action = WriteJsonAction::new(contents, inputs, outputs, self.pretty)?; + let action = WriteJsonAction::new(contents, inputs, outputs, *self)?; Ok(Box::new(action)) } } @@ -102,7 +107,7 @@ impl UnregisteredAction for UnregisteredWriteJsonAction { struct WriteJsonAction { contents: OwnedFrozenValue, // JSON value output: BuildArtifact, - pretty: bool, + inner: UnregisteredWriteJsonAction, } impl WriteJsonAction { @@ -110,9 +115,9 @@ impl WriteJsonAction { contents: OwnedFrozenValue, inputs: IndexSet, outputs: IndexSet, - pretty: bool, + inner: UnregisteredWriteJsonAction, ) -> anyhow::Result { - validate_json(contents.value())?; + validate_json(JsonUnpack::unpack_value_err(contents.value())?)?; let mut outputs = outputs.into_iter(); @@ -131,13 +136,19 @@ impl WriteJsonAction { Ok(WriteJsonAction { contents, output, - pretty, + inner, }) } fn get_contents(&self, fs: &ExecutorFs) -> anyhow::Result> { let mut writer = Vec::new(); - json::write_json(self.contents.value(), Some(fs), &mut writer, self.pretty)?; + json::write_json( + JsonUnpack::unpack_value_err(self.contents.value())?, + Some(fs), + &mut writer, + self.inner.pretty, + self.inner.absolute, + )?; Ok(writer) } } @@ -152,19 +163,20 @@ impl Action for WriteJsonAction { Ok(Cow::Borrowed(&[])) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(slice::from_ref(&self.output))) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(slice::from_ref(&self.output)) + } + + fn first_output(&self) -> &BuildArtifact { + &self.output } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Incremental(self) } - fn category(&self) -> &Category { - static WRITE_CATEGORY: Lazy = - Lazy::new(|| Category::try_from("write_json").unwrap()); - - &WRITE_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new("write_json") } fn identifier(&self) -> Option<&str> { @@ -178,7 +190,8 @@ impl Action for WriteJsonAction { "contents".to_owned() => match res { Ok(v) => v, Err(e) => format!("ERROR: constructing contents ({})", e) - } + }, + "absolute".to_owned() => self.inner.absolute.to_string(), } } } @@ -188,7 +201,7 @@ impl IncrementalActionExecutable for WriteJsonAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { let fs = ctx.fs(); let mut execution_start = None; @@ -218,6 +231,7 @@ impl IncrementalActionExecutable for WriteJsonAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Simple, timing: ActionExecutionTimingData { wall_time }, + input_files_bytes: None, }, )) } @@ -229,7 +243,7 @@ impl IncrementalActionExecutable for WriteJsonAction { #[derive(Debug, Clone, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[derive(NoSerialize)] // TODO we should probably have a serialization for transitive set #[repr(C)] -pub(crate) struct WriteJsonCommandLineArgGen { +pub(crate) struct WriteJsonCommandLineArgGen { artifact: V, // The list of artifacts here could be large and we don't want to hold those explicitly (due to // the memory cost) and so we hold the same content value that the write_json action itself will and @@ -246,7 +260,7 @@ impl<'v, V: ValueLike<'v>> fmt::Display for WriteJsonCommandLineArgGen { starlark_complex_value!(pub(crate) WriteJsonCommandLineArg); #[starlark_value(type = "write_json_cli_args")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for WriteJsonCommandLineArgGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for WriteJsonCommandLineArgGen where Self: ProvidesStaticType<'v>, { @@ -256,21 +270,26 @@ where } impl<'v, V: ValueLike<'v>> CommandLineArgLike for WriteJsonCommandLineArgGen { + fn register_me(&self) { + command_line_arg_like_impl!(WriteJsonCommandLineArg::starlark_type_repr()); + } + fn add_to_command_line( &self, builder: &mut dyn CommandLineBuilder, context: &mut dyn CommandLineContext, ) -> anyhow::Result<()> { - self.artifact - .to_value() - .as_command_line_err()? + ValueAsCommandLineLike::unpack_value_err(self.artifact.to_value())? + .0 .add_to_command_line(builder, context) } fn visit_artifacts(&self, visitor: &mut dyn CommandLineArtifactVisitor) -> anyhow::Result<()> { let artifact = self.artifact.to_value(); let content = self.content.to_value(); - artifact.as_command_line_err()?.visit_artifacts(visitor)?; + ValueAsCommandLineLike::unpack_value_err(artifact)? + .0 + .visit_artifacts(visitor)?; json::visit_json_artifacts(content, visitor) } diff --git a/app/buck2_action_impl/src/actions/impls/write_macros.rs b/app/buck2_action_impl/src/actions/impls/write_macros.rs index 4b06de0005103..31f6cbcb29f30 100644 --- a/app/buck2_action_impl/src/actions/impls/write_macros.rs +++ b/app/buck2_action_impl/src/actions/impls/write_macros.rs @@ -17,6 +17,7 @@ use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::actions::execute::action_executor::ActionExecutionKind; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; @@ -30,17 +31,18 @@ use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineLocation; use buck2_build_api::interpreter::rule_defs::cmd_args::DefaultCommandLineContext; use buck2_build_api::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedMacro; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_core::fs::paths::RelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_error::internal_error_anyhow; use buck2_execute::artifact::fs::ExecutorFs; use buck2_execute::execute::command_executor::ActionExecutionTimingData; use buck2_execute::materialize::materializer::WriteRequest; use dupe::Dupe; use indexmap::IndexSet; -use once_cell::sync::Lazy; use starlark::values::OwnedFrozenValue; -use thiserror::Error; +use starlark::values::UnpackValue; +use starlark::StarlarkResultExt; #[derive(Allocative)] pub(crate) struct UnregisteredWriteMacrosToFileAction { @@ -59,14 +61,23 @@ impl UnregisteredAction for UnregisteredWriteMacrosToFileAction { inputs: IndexSet, outputs: IndexSet, starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { let contents = starlark_data.expect("Action data should be present"); - let action = WriteMacrosToFileAction::new(self.identifier, contents, inputs, outputs)?; + + if !inputs.is_empty() { + return Err(internal_error_anyhow!( + "Input artifacts mut be empty for write macros action" + )); + } + + let action = WriteMacrosToFileAction::new(self.identifier, contents, outputs)?; + Ok(Box::new(action)) } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum WriteMacrosActionValidationError { #[error("At least one output file must be specified for a write macros action")] NoOutputsSpecified, @@ -82,7 +93,6 @@ enum WriteMacrosActionValidationError { struct WriteMacrosToFileAction { identifier: String, contents: OwnedFrozenValue, // StarlarkCmdArgs - inputs: Box<[ArtifactGroup]>, outputs: Box<[BuildArtifact]>, } @@ -90,14 +100,16 @@ impl WriteMacrosToFileAction { fn new( identifier: String, contents: OwnedFrozenValue, - inputs: IndexSet, outputs: IndexSet, ) -> anyhow::Result { if outputs.is_empty() { Err(anyhow::anyhow!( WriteMacrosActionValidationError::NoOutputsSpecified )) - } else if contents.value().as_command_line().is_none() { + } else if ValueAsCommandLineLike::unpack_value(contents.value()) + .into_anyhow_result()? + .is_none() + { Err(anyhow::anyhow!( WriteMacrosActionValidationError::ContentsNotCommandLineValue( contents.value().to_repr() @@ -107,7 +119,6 @@ impl WriteMacrosToFileAction { Ok(Self { identifier, contents, - inputs: inputs.into_iter().collect(), outputs: outputs.into_iter().collect(), }) } @@ -121,22 +132,24 @@ impl Action for WriteMacrosToFileAction { } fn inputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(&self.inputs)) + Ok(Cow::Borrowed(&[])) + } + + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(&self.outputs) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(&self.outputs)) + fn first_output(&self) -> &BuildArtifact { + // Required to have outputs on construction + &self.outputs[0] } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Incremental(self) } - fn category(&self) -> &Category { - static WRITE_MACROS_CATEGORY: Lazy = - Lazy::new(|| Category::try_from("write_macros_to_file").unwrap()); - - &WRITE_MACROS_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::unchecked_new("write_macros_to_file") } fn identifier(&self) -> Option<&str> { @@ -149,7 +162,7 @@ impl IncrementalActionExecutable for WriteMacrosToFileAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { let mut execution_start = None; let values = ctx @@ -162,16 +175,15 @@ impl IncrementalActionExecutable for WriteMacrosToFileAction { let mut output_contents = Vec::with_capacity(self.outputs.len()); let mut macro_writer = MacroToFileWriter::new(&fs, &mut output_contents); - self.contents - .value() - .as_command_line() - .unwrap() + ValueAsCommandLineLike::unpack_value_err(self.contents.value())? + .0 .visit_write_to_file_macros(&mut macro_writer)?; if self.outputs.len() != output_contents.len() { - return Err( - WriteMacrosActionValidationError::InconsistentNumberOfMacroArtifacts.into(), - ); + return Err(anyhow::Error::new( + WriteMacrosActionValidationError::InconsistentNumberOfMacroArtifacts, + ) + .into()); } Ok( @@ -199,6 +211,7 @@ impl IncrementalActionExecutable for WriteMacrosToFileAction { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Simple, timing: ActionExecutionTimingData { wall_time }, + input_files_bytes: None, }, )) } diff --git a/app/buck2_action_impl/src/actions/mod.rs b/app/buck2_action_impl/src/actions/mod.rs deleted file mode 100644 index 9eadc96e15e5f..0000000000000 --- a/app/buck2_action_impl/src/actions/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod impls; diff --git a/app/buck2_action_impl/src/context.rs b/app/buck2_action_impl/src/context.rs index 323a808e43b72..1eb547c150ee7 100644 --- a/app/buck2_action_impl/src/context.rs +++ b/app/buck2_action_impl/src/context.rs @@ -7,996 +7,35 @@ * of this source tree. */ -use std::collections::HashMap; -use std::sync::Arc; - -use anyhow::Context; -use buck2_artifact::artifact::artifact_type::OutputArtifact; -use buck2_build_api::actions::impls::json::validate_json; -use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; -use buck2_build_api::interpreter::rule_defs::artifact::output_artifact_like::OutputArtifactArg; -use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkOutputOrDeclaredArtifact; -use buck2_build_api::interpreter::rule_defs::artifact_tagging::ArtifactTag; -use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; -use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; -use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; -use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineContext; -use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; -use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; -use buck2_build_api::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; -use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; use buck2_build_api::interpreter::rule_defs::context::ANALYSIS_ACTIONS_METHODS_ACTIONS; -use buck2_build_api::interpreter::rule_defs::digest_config::StarlarkDigestConfig; -use buck2_build_api::interpreter::rule_defs::provider::builtin::run_info::RunInfo; -use buck2_build_api::interpreter::rule_defs::provider::builtin::worker_info::WorkerInfo; -use buck2_build_api::interpreter::rule_defs::provider::builtin::worker_run_info::WorkerRunInfo; -use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedMacro; -use buck2_build_api::interpreter::rule_defs::transitive_set::TransitiveSet; -use buck2_build_api::interpreter::rule_defs::transitive_set::TransitiveSetDefinition; -use buck2_common::cas_digest::CasDigest; -use buck2_core::category::Category; -use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -use buck2_execute::execute::request::OutputType; -use buck2_execute::materialize::http::Checksum; -use chrono::TimeZone; -use chrono::Utc; -use dupe::Dupe; -use dupe::OptionDupedExt; -use either::Either; -use host_sharing::WeightClass; -use host_sharing::WeightPercentage; -use indexmap::indexset; -use indexmap::IndexSet; -use relative_path::RelativePathBuf; -use sha1::Digest; -use sha1::Sha1; -use starlark::environment::MethodsBuilder; -use starlark::eval::Evaluator; -use starlark::starlark_module; -use starlark::values::dict::DictOf; -use starlark::values::function::FUNCTION_TYPE; -use starlark::values::none::NoneOr; -use starlark::values::none::NoneType; -use starlark::values::typing::StarlarkIter; -use starlark::values::AllocValue; -use starlark::values::Heap; -use starlark::values::Value; -use starlark::values::ValueOf; -use starlark::values::ValueOfUnchecked; -use starlark::values::ValueTyped; -use starlark::values::ValueTypedComplex; -use starlark_map::small_map; -use starlark_map::small_map::SmallMap; -use starlark_map::small_set::SmallSet; - -use crate::actions::impls::cas_artifact::ArtifactKind; -use crate::actions::impls::cas_artifact::DirectoryKind; -use crate::actions::impls::cas_artifact::UnregisteredCasArtifactAction; -use crate::actions::impls::copy::CopyMode; -use crate::actions::impls::copy::UnregisteredCopyAction; -use crate::actions::impls::download_file::UnregisteredDownloadFileAction; -use crate::actions::impls::run::dep_files::RunActionDepFiles; -use crate::actions::impls::run::new_executor_preference; -use crate::actions::impls::run::MetadataParameter; -use crate::actions::impls::run::StarlarkRunActionValues; -use crate::actions::impls::run::UnregisteredRunAction; -use crate::actions::impls::symlinked_dir::UnregisteredSymlinkedDirAction; -use crate::actions::impls::write::UnregisteredWriteAction; -use crate::actions::impls::write_json::UnregisteredWriteJsonAction; -use crate::actions::impls::write_macros::UnregisteredWriteMacrosToFileAction; - -#[derive(thiserror::Error, Debug)] -enum DownloadFileError { - #[error("Must pass in at least one checksum (e.g. `sha1 = ...`)")] - MissingChecksum, -} - -#[derive(thiserror::Error, Debug)] -enum DynamicOutputError { - #[error("Output list may not be empty")] - EmptyOutput, - #[error("List of dynamic inputs may not be empty")] - EmptyDynamic, - #[error("Final argument must be a function, got `{0}`")] - NotAFunction(String), -} - -#[derive(thiserror::Error, Debug)] -enum CasArtifactError { - #[error("Not a valid RE digest: `{0}`")] - InvalidDigest(String), - #[error("is_tree and is_directory are mutually exclusive")] - TreeAndDirectory, -} - -#[derive(Debug, thiserror::Error)] -enum RunActionError { - #[error("expected at least one output artifact, did not get any")] - NoOutputsSpecified, - #[error("`weight` must be a positive integer, got `{0}`")] - InvalidWeight(i32), - #[error("`weight` and `weight_percentage` cannot both be passed")] - DuplicateWeightsSpecified, - #[error("`dep_files` value with key `{}` has an invalid count of associated outputs. Expected 1, got {}.", .key, .count)] - InvalidDepFileOutputs { key: String, count: usize }, - #[error("`dep_files` with keys `{}` and {} are using the same tag", .first, .second)] - ConflictingDepFiles { first: String, second: String }, - #[error( - "missing `metadata_path` parameter which is required when `metadata_env_var` parameter is present" - )] - MetadataPathMissing, - #[error( - "missing `metadata_env_var` parameter which is required when `metadata_path` parameter is present" - )] - MetadataEnvVarMissing, - #[error( - "Recursion limit exceeded when visiting artifacts: do you have a cycle in your inputs or outputs?" - )] - ArtifactVisitRecursionLimitExceeded, -} - -#[derive(Debug, thiserror::Error)] -enum WriteActionError { - #[error( - "Argument type attributes detected in a content to be written into a file, but support for arguments was not turned on. Use `allow_args` parameter to turn on the support for arguments." - )] - ArgAttrsDetectedButNotAllowed, -} -fn create_dir_tree<'v>( - eval: &mut Evaluator<'v, '_>, - this: &AnalysisActions<'v>, - output: OutputArtifactArg<'v>, - srcs: DictOf<'v, &'v str, ValueAsArtifactLike<'v>>, - copy: bool, -) -> anyhow::Result> { - // validate that the moves are valid, and move them into inputs - let action = UnregisteredSymlinkedDirAction::new(copy, srcs)?; - let inputs = action.inputs(); - let unioned_associated_artifacts = action.unioned_associated_artifacts(); - - let mut this = this.state(); - let (declaration, output_artifact) = - this.get_or_declare_output(eval, output, OutputType::Directory)?; - this.register_action(inputs, indexset![output_artifact], action, None)?; - - Ok(declaration.into_declared_artifact(unioned_associated_artifacts)) -} - -fn copy_file_impl<'v>( - eval: &mut Evaluator<'v, '_>, - this: &AnalysisActions<'v>, - dest: OutputArtifactArg<'v>, - src: ValueAsArtifactLike<'v>, - copy: CopyMode, - output_type: OutputType, -) -> anyhow::Result> { - let src = src.0; - - let artifact = src.get_artifact_group()?; - let associated_artifacts = src.get_associated_artifacts(); - let mut this = this.state(); - let (declaration, output_artifact) = this.get_or_declare_output(eval, dest, output_type)?; - - this.register_action( - indexset![artifact], - indexset![output_artifact], - UnregisteredCopyAction::new(copy), - None, - )?; - - Ok(declaration.into_declared_artifact( - associated_artifacts - .duped() - .unwrap_or_else(AssociatedArtifacts::new), - )) -} - -/// Functions to allow users to interact with the Actions registry. -/// Accessed via `ctx.actions.`. +use crate::context::copy::analysis_actions_methods_copy; +use crate::context::download::analysis_actions_methods_download; +use crate::context::dynamic_output::analysis_actions_methods_dynamic_output; +use crate::context::run::analysis_actions_methods_run; +use crate::context::unsorted::analysis_actions_methods_unsorted; +use crate::context::write::analysis_actions_methods_write; + +mod copy; +mod download; +pub(crate) mod dynamic_output; +pub(crate) mod run; +mod unsorted; +mod write; + +/// Functions to allow users to interact with the Actions registry. Accessed via +/// `ctx.actions.`. /// -/// Actions take inputs and produce outputs, mostly using the `artifact` type. -/// Most output filenames can either be artifacts created with `declare_output` or strings that are implicitly converted to output artifacts. -#[starlark_module] -fn analysis_actions_methods_actions(builder: &mut MethodsBuilder) { - /// Returns an unbound `artifact` which must be bound before analysis terminates. The usual way of binding an artifact is - /// with `ctx.actions.run`. - /// - /// To construct an artifact with the name `foo`, call `ctx.actions.declare_output("foo")`. Artifacts from a single target may not - /// have the same name, so if you then want a second artifact also named `foo` you need to supply a prefix, e.g. - /// `ctx.actions.declare_output("directory", "foo")`. The artifact will still report it has name `foo`, but will be located at - /// `directory/foo`. - /// - /// The `dir` argument should be set to `True` if the binding will be a directory. - fn declare_output<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] prefix: &str, - #[starlark(require = pos)] filename: Option<&str>, - #[starlark(require = named, default = false)] dir: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result { - // We take either one or two positional arguments, namely (filename) or (prefix, filename). - // The prefix argument is optional, but first, so we pretend the filename is optional - // and fix them up here. - let (prefix, filename) = match filename { - None => (None, prefix), - Some(filename) => (Some(prefix), filename), - }; - - let output_type = if dir { - OutputType::Directory - } else { - OutputType::FileOrDirectory - }; - let artifact = this.state().declare_output( - prefix, - filename, - output_type, - eval.call_stack_top_location(), - )?; - - Ok(StarlarkDeclaredArtifact::new( - eval.call_stack_top_location(), - artifact, - AssociatedArtifacts::new(), - )) - } - - /// Returns an `artifact` whose contents are content written as a JSON value. - /// - /// * `filename`: can be a string, or an existing artifact created with `declare_output` - /// * `content`: must be composed of the basic json types (boolean, number, string, list/tuple, dictionary) plus artifacts and command lines - /// * An artifact will be written as a string containing the path - /// * A command line will be written as a list of strings, unless `joined=True` is set, in which case it will be a string - /// * If you pass `with_inputs = True`, you'll get back a `cmd_args` that expands to the JSON file but carries all the underlying inputs as dependencies (so you don't have to use, for example, `hidden` for them to be added to an action that already receives the JSON file) - /// * `pretty` (optional): write formatted JSON (defaults to `False`) - fn write_json<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] output: OutputArtifactArg<'v>, - #[starlark(require = pos)] content: Value<'v>, - #[starlark(require = named, default = false)] with_inputs: bool, - #[starlark(require = named, default = false)] pretty: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let mut this = this.state(); - let (declaration, output_artifact) = - this.get_or_declare_output(eval, output, OutputType::File)?; - - validate_json(content)?; - this.register_action( - IndexSet::new(), - indexset![output_artifact], - UnregisteredWriteJsonAction::new(pretty), - Some(content), - )?; - - let value = declaration.into_declared_artifact(AssociatedArtifacts::new()); - // TODO(cjhopman): The with_inputs thing can go away once we have artifact dependencies (we'll still - // need the UnregisteredWriteJsonAction::cli() to represent the dependency though). - if with_inputs { - // TODO(nga): we use `AllocValue`, so this function return type for this branch - // is `write_json_cli_args`. We want just `cmd_args`, - // because users don't care about precise type. - // Do it when we migrate to new types not based on strings. - let cli = UnregisteredWriteJsonAction::cli(value.to_value(), content)?; - Ok(Either::Right(cli)) - } else { - Ok(Either::Left(value)) - } - } - - /// Returns an `artifact` whose contents are content - /// - /// * `is_executable` (optional): indicates whether the resulting file should be marked with executable permissions - /// * `allow_args` (optional): must be set to `True` if you want to write parameter arguments to the file (in particular, macros that write to file) - /// * If it is true, the result will be a pair of the `artifact` containing content and a list of artifact values that were written by macros, which should be used in hidden fields or similar - /// * `absolute` (optional): if set, this action will produce absolute paths in its output when - /// rendering artifact paths. You generally shouldn't use this if you plan to use this action - /// as the input for anything else, as this would effectively result in losing all shared - /// caching. - fn write<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] output: OutputArtifactArg<'v>, - #[starlark(require = pos)] content: Value<'v>, - #[starlark(require = named, default = false)] is_executable: bool, - #[starlark(require = named, default = false)] allow_args: bool, - // If set, add artifacts in content as associated artifacts of the output. This will only work for bound artifacts. - #[starlark(require = named, default = false)] with_inputs: bool, - #[starlark(require = named, default = false)] absolute: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result< - Either< - ValueTyped<'v, StarlarkDeclaredArtifact>, - ( - ValueTyped<'v, StarlarkDeclaredArtifact>, - Vec, - ), - >, - > { - fn count_write_to_file_macros( - args_allowed: bool, - cli: &dyn CommandLineArgLike, - ) -> anyhow::Result { - if !args_allowed && cli.contains_arg_attr() { - return Err(anyhow::anyhow!( - WriteActionError::ArgAttrsDetectedButNotAllowed - )); - } - - struct WriteToFileMacrosCounter { - count: u32, - } - - impl WriteToFileMacroVisitor for WriteToFileMacrosCounter { - fn visit_write_to_file_macro(&mut self, _m: &ResolvedMacro) -> anyhow::Result<()> { - self.count += 1; - Ok(()) - } - - fn set_current_relative_to_path( - &mut self, - _gen: &dyn Fn( - &dyn CommandLineContext, - ) -> anyhow::Result>, - ) -> anyhow::Result<()> { - Ok(()) - } - } - - let mut counter = WriteToFileMacrosCounter { count: 0 }; - cli.visit_write_to_file_macros(&mut counter)?; - Ok(counter.count) - } - - fn get_cli_inputs( - with_inputs: bool, - cli: &dyn CommandLineArgLike, - ) -> anyhow::Result> { - if !with_inputs { - return Ok(Default::default()); - } - - #[derive(Default)] - struct CommandLineInputVisitor { - inputs: SmallSet, - } - impl CommandLineArtifactVisitor for CommandLineInputVisitor { - fn visit_input(&mut self, input: ArtifactGroup, _tag: Option<&ArtifactTag>) { - self.inputs.insert(input); - } - - fn visit_output(&mut self, _artifact: OutputArtifact, _tag: Option<&ArtifactTag>) {} - } - - let mut visitor = CommandLineInputVisitor::default(); - cli.visit_artifacts(&mut visitor)?; - Ok(visitor.inputs) - } - - let mut this = this.state(); - let (declaration, output_artifact) = - this.get_or_declare_output(eval, output, OutputType::File)?; - - let (content_cli, written_macro_count, mut associated_artifacts) = - if let Some(content_arg) = content.as_command_line() { - let count = count_write_to_file_macros(allow_args, content_arg)?; - let cli_inputs = get_cli_inputs(with_inputs, content_arg)?; - (content, count, cli_inputs) - } else { - let cli = StarlarkCmdArgs::try_from_value(content)?; - let count = count_write_to_file_macros(allow_args, &cli)?; - let cli_inputs = get_cli_inputs(with_inputs, &cli)?; - (eval.heap().alloc(cli), count, cli_inputs) - }; - - let written_macro_files = if written_macro_count > 0 { - let macro_directory_path = { - // There might be several write actions at once, use write action output hash to deterministically avoid collisions for .macro files. - let digest = output_artifact - .get_path() - .with_full_path(|path| Sha1::digest(path.as_str().as_bytes())); - let sha = hex::encode(digest); - format!("__macros/{}", sha) - }; - - let mut written_macro_files = indexset![]; - for i in 0..written_macro_count { - let macro_file = this.declare_output( - None, - &format!("{}/{}.macro", ¯o_directory_path, i), - OutputType::File, - eval.call_stack_top_location(), - )?; - written_macro_files.insert(macro_file); - } - - let state = &mut *this; - let action = UnregisteredWriteMacrosToFileAction::new( - output_artifact - .get_path() - .with_short_path(|p| p.to_string()), - ); - state.register_action( - indexset![], - written_macro_files.iter().map(|a| a.as_output()).collect(), - action, - Some(eval.heap().alloc(content_cli)), - )?; - - written_macro_files - } else { - indexset![] - }; - - let action = { - let maybe_macro_files = if allow_args { - let mut macro_files = indexset![]; - for a in &written_macro_files { - macro_files.insert(a.dupe().ensure_bound()?.into_artifact()); - } - Some(macro_files) - } else { - None - }; - UnregisteredWriteAction { - is_executable, - macro_files: maybe_macro_files, - absolute, - } - }; - this.register_action( - indexset![], - indexset![output_artifact], - action, - Some(content_cli), - )?; - - if allow_args { - for a in &written_macro_files { - associated_artifacts.insert(ArtifactGroup::Artifact( - a.dupe().ensure_bound()?.into_artifact(), - )); - } - } - - let value = - declaration.into_declared_artifact(AssociatedArtifacts::from(associated_artifacts)); - if allow_args { - let macro_files: Vec = written_macro_files - .into_iter() - .map(|a| StarlarkDeclaredArtifact::new(None, a, AssociatedArtifacts::new())) - .collect(); - Ok(Either::Right((value, macro_files))) - } else { - // Prefer simpler API when there is no possibility for write-to-file macros to be present in a content - Ok(Either::Left(value)) - } - } - - /// Copies the source `artifact` to the destination (which can be a string representing a filename or an output `artifact`) and returns the output `artifact`. - /// The copy works for files or directories. - fn copy_file<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] dest: OutputArtifactArg<'v>, - #[starlark(require = pos)] src: ValueAsArtifactLike<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - // `copy_file` can copy either a file or a directory, even though its name has the word `file` in it - copy_file_impl( - eval, - this, - dest, - src, - CopyMode::Copy, - OutputType::FileOrDirectory, - ) - } - - /// Creates a symlink to the source `artifact` at the destination (which can be a string representing a filename or an output `artifact`) and returns the output `artifact`. - /// The symlink works for files or directories. - fn symlink_file<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] dest: OutputArtifactArg<'v>, - #[starlark(require = pos)] src: ValueAsArtifactLike<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - // `copy_file` can copy either a file or a directory, even though its name has the word `file` in it - copy_file_impl( - eval, - this, - dest, - src, - CopyMode::Symlink, - OutputType::FileOrDirectory, - ) - } - - /// Make a copy of a directory. - fn copy_dir<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] dest: OutputArtifactArg<'v>, - #[starlark(require = pos)] src: ValueAsArtifactLike<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - copy_file_impl(eval, this, dest, src, CopyMode::Copy, OutputType::Directory) - } - - /// Create a symlink to a directory. - fn symlink_dir<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] dest: OutputArtifactArg<'v>, - #[starlark(require = pos)] src: ValueAsArtifactLike<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - copy_file_impl( - eval, - this, - dest, - src, - CopyMode::Symlink, - OutputType::Directory, - ) - } - - /// Returns an `artifact` that is a directory containing symlinks. - /// The srcs must be a dictionary of path (as string, relative to the result directory) to bound `artifact`, which will be laid out in the directory. - fn symlinked_dir<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] output: OutputArtifactArg<'v>, - #[starlark(require = pos)] srcs: DictOf<'v, &'v str, ValueAsArtifactLike<'v>>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - create_dir_tree(eval, this, output, srcs, false) - } - - /// Returns an `artifact` which is a directory containing copied files. - /// The srcs must be a dictionary of path (as string, relative to the result directory) to the bound `artifact`, which will be laid out in the directory. - fn copied_dir<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] output: OutputArtifactArg<'v>, - #[starlark(require = pos)] srcs: DictOf<'v, &'v str, ValueAsArtifactLike<'v>>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - create_dir_tree(eval, this, output, srcs, true) - } - - /// Runs a command - /// - /// * `arguments`: must be of type `cmd_args`, or a type convertible to such (such as a list of strings and artifacts) and must contain at least one `.as_output()` artifact - /// * `category`: category and identifier - when used together, identify the action in Buck2's event stream, and must be unique for a given target - /// * `weight`: used to note how heavy the command is and will typically be set to a higher value to indicate that less such commands should be run in parallel (if running locally) - /// * `no_outputs_cleanup`: if this flag is set then Buck2 won't clean the outputs of a previous build that might be present on a disk; in which case, command from arguments should be responsible for the cleanup (that is useful, for example, when an action is supporting incremental mode and its outputs are based on result from a previous build) - /// * `metadata_env_var` and `meadata_path` should be used together: both set or both unset - /// * `metadata_path`: defines a path relative to the result directory for a file with action metadata, which will be created right before the command will be run. - /// * Metadata contains the path relative to the Buck2 project root and hash digest for every action input (this excludes symlinks as they could be resolved by a user script if needed). The resolved path relative to the Buck2 project for the metadata file will be passed to command from arguments, via the environment variable, with its name set by `metadata_env_var` - /// * Both `metadata_env_var` and `metadata_path` are useful when making actions behave in an incremental manner (for details, see [Incremental Actions](https://buck2.build/docs/rule_authors/incremental_actions/)) - /// * The `prefer_local`, `prefer_remote` and `local_only` options allow selecting where the - /// action should run if the executor selected for this target is a hybrid executor. - /// * All those options disable concurrent execution: the action will run on the preferred - /// platform first (concurrent execution only happens with a "full" hybrid executor). - /// * Execution may be retried on the "non-preferred" platform if it fails due to a - /// transient error, except for `local_only`, which does not allow this. - /// * If the executor selected is a remote-only executor and you use `local_only`, that's - /// an error. The other options will not raise errors. - /// * Setting more than one of those options is an error. - /// * Those flags behave the same way as the equivalent `--prefer-remote`, `--prefer-local` - /// and `--local-only` CLI flags. The CLI flags take precedence. - /// * The `force_full_hybrid_if_capable` option overrides the `use_limited_hybrid` hybrid. - /// The options listed above take precedence if set. - /// - /// When actions execute, they'll do so from the root of the repository. As they execute, - /// actions have exclusive access to their output directory. - /// - /// Actions also get exclusive access to a "scratch" path that is exposed via the environment - /// variable `BUCK_SCRATCH_PATH`. This path is expressed as a path relative to the working - /// directory (i.e. relative to the project). This path is guaranteed to exist when the action - /// executes. - /// - /// When actions run locally, the scratch path is also used as the `TMPDIR`. - fn run<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] arguments: Value<'v>, - #[starlark(require = named)] category: String, - #[starlark(require = named, default = NoneOr::None)] identifier: NoneOr, - #[starlark(require = named)] env: Option>>>, - #[starlark(require = named, default = false)] local_only: bool, - #[starlark(require = named, default = false)] prefer_local: bool, - #[starlark(require = named, default = false)] prefer_remote: bool, - #[starlark(require = named, default = true)] low_pass_filter: bool, - #[starlark(require = named, default = false)] always_print_stderr: bool, - #[starlark(require = named)] weight: Option, - #[starlark(require = named)] weight_percentage: Option, - #[starlark(require = named)] dep_files: Option>, - #[starlark(require = named)] metadata_env_var: Option, - #[starlark(require = named)] metadata_path: Option, - // TODO(scottcao): Refactor `no_outputs_cleanup` to `outputs_cleanup` - #[starlark(require = named, default = false)] no_outputs_cleanup: bool, - #[starlark(require = named, default = false)] allow_cache_upload: bool, - #[starlark(require = named, default = false)] allow_dep_file_cache_upload: bool, - #[starlark(require = named, default = false)] force_full_hybrid_if_capable: bool, - #[starlark(require = named)] exe: Option< - Either>, ValueOf<'v, &'v RunInfo<'v>>>, - >, - #[starlark(require = named, default = false)] unique_input_inodes: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result { - struct RunCommandArtifactVisitor { - inner: SimpleCommandLineArtifactVisitor, - tagged_outputs: HashMap>, - depth: u64, - } - - impl RunCommandArtifactVisitor { - fn new() -> Self { - Self { - inner: SimpleCommandLineArtifactVisitor::new(), - tagged_outputs: HashMap::new(), - depth: 0, - } - } - } - - impl CommandLineArtifactVisitor for RunCommandArtifactVisitor { - fn visit_input(&mut self, input: ArtifactGroup, tag: Option<&ArtifactTag>) { - self.inner.visit_input(input, tag); - } - - fn visit_output(&mut self, artifact: OutputArtifact, tag: Option<&ArtifactTag>) { - match tag { - None => {} - Some(tag) => { - self.tagged_outputs - .entry(tag.dupe()) - .or_default() - .push(artifact.dupe()); - } - } - - self.inner.visit_output(artifact, tag); - } - - fn push_frame(&mut self) -> anyhow::Result<()> { - self.depth += 1; - if self.depth > 1000 { - return Err(RunActionError::ArtifactVisitRecursionLimitExceeded.into()); - } - Ok(()) - } - - fn pop_frame(&mut self) { - self.depth = self.depth.saturating_sub(1); - } - } - - let executor_preference = new_executor_preference(local_only, prefer_local, prefer_remote)?; - - let mut artifact_visitor = RunCommandArtifactVisitor::new(); - - let starlark_args = StarlarkCmdArgs::try_from_value(arguments)?; - starlark_args.visit_artifacts(&mut artifact_visitor)?; - - let (starlark_exe, starlark_worker) = match exe { - Some(Either::Left(worker_run)) => { - let worker: ValueOf<&WorkerInfo> = worker_run.typed.worker(); - let worker_exe = worker_run.typed.exe(); - worker_exe.as_ref().visit_artifacts(&mut artifact_visitor)?; - let starlark_exe = StarlarkCmdArgs::try_from_value(worker_exe.to_value())?; - starlark_exe.visit_artifacts(&mut artifact_visitor)?; - (starlark_exe, NoneOr::Other(worker)) - } - Some(Either::Right(exe)) => { - let starlark_exe = StarlarkCmdArgs::try_from_value(*exe)?; - starlark_exe.visit_artifacts(&mut artifact_visitor)?; - (starlark_exe, NoneOr::None) - } - None => (StarlarkCmdArgs::default(), NoneOr::None), - }; - - let weight = match (weight, weight_percentage) { - (None, None) => WeightClass::Permits(1), - (Some(v), None) => { - if v < 1 { - return Err(RunActionError::InvalidWeight(v).into()); - } else { - WeightClass::Permits(v as usize) - } - } - (None, Some(v)) => WeightClass::Percentage( - WeightPercentage::try_new(v).context("Invalid `weight_percentage`")?, - ), - (Some(..), Some(..)) => { - return Err(RunActionError::DuplicateWeightsSpecified.into()); - } - }; - - let starlark_env = match env { - None => Value::new_none(), - Some(env) => { - for v in env.typed.values() { - v.as_command_line_err()? - .visit_artifacts(&mut artifact_visitor)?; - } - env.value - } - }; - - let RunCommandArtifactVisitor { - inner: artifacts, - tagged_outputs, - depth: _, - } = artifact_visitor; - - let mut dep_files_configuration = RunActionDepFiles::new(); - - if let Some(dep_files) = dep_files { - for (key, tag) in dep_files { - let tagged = tagged_outputs.get(tag); - let count = tagged.map_or(0, |t| t.len()); - - if count != 1 { - return Err(RunActionError::InvalidDepFileOutputs { - key: (*key).to_owned(), - count, - } - .into()); - } - - match dep_files_configuration.labels.entry(tag.dupe()) { - small_map::Entry::Vacant(v) => { - v.insert(Arc::from(key)); - } - small_map::Entry::Occupied(o) => { - return Err(RunActionError::ConflictingDepFiles { - first: (**o.get()).to_owned(), - second: (*key).to_owned(), - } - .into()); - } - } - } - } - - let category = Category::try_from(category)?; - let identifier = identifier.into_option(); - - let metadata_param = match (metadata_env_var, metadata_path) { - (Some(env_var), Some(path)) => { - let path: ForwardRelativePathBuf = path.try_into()?; - this.state().claim_output_path(eval, &path)?; - Ok(Some(MetadataParameter { env_var, path })) - } - (Some(_), None) => Err(anyhow::anyhow!(RunActionError::MetadataPathMissing)), - (None, Some(_)) => Err(anyhow::anyhow!(RunActionError::MetadataEnvVarMissing)), - (None, None) => Ok(None), - }?; - - if artifacts.outputs.is_empty() { - return Err(RunActionError::NoOutputsSpecified.into()); - } - let heap = eval.heap(); - - let starlark_values = heap.alloc(StarlarkRunActionValues { - exe: heap.alloc(starlark_exe), - args: heap.alloc(starlark_args), - env: starlark_env, - worker: heap.alloc(starlark_worker), - }); - - let action = UnregisteredRunAction { - category, - identifier, - executor_preference, - always_print_stderr, - weight, - low_pass_filter, - dep_files: dep_files_configuration, - metadata_param, - no_outputs_cleanup, - allow_cache_upload, - allow_dep_file_cache_upload, - force_full_hybrid_if_capable, - unique_input_inodes, - }; - this.state().register_action( - artifacts.inputs, - artifacts.outputs, - action, - Some(starlark_values), - )?; - Ok(NoneType) - } - - /// Downloads a URL to an output (filename as string or output artifact). - /// The file at the URL must have the given sha1 or the command will fail. - /// The optional parameter is_executable indicates whether the resulting file should be marked with executable permissions. - /// (Meta-internal) The optional parameter vpnless_url indicates a url from which this resource can be downloaded off VPN; this has the same restrictions as `url` above. - fn download_file<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] output: OutputArtifactArg<'v>, - #[starlark(require = pos)] url: &str, - #[starlark(require = named, default = NoneOr::None)] vpnless_url: NoneOr<&str>, - #[starlark(require = named, default = NoneOr::None)] sha1: NoneOr<&str>, - #[starlark(require = named, default = NoneOr::None)] sha256: NoneOr<&str>, - #[starlark(require = named, default = false)] is_executable: bool, - #[starlark(require = named, default = false)] is_deferrable: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let mut this = this.state(); - let (declaration, output_artifact) = - this.get_or_declare_output(eval, output, OutputType::File)?; - - let checksum = match ( - sha1.into_option().map(Arc::from), - sha256.into_option().map(Arc::from), - ) { - (Some(sha1), None) => Checksum::Sha1(sha1), - (None, Some(sha256)) => Checksum::Sha256(sha256), - (Some(sha1), Some(sha256)) => Checksum::Both { sha1, sha256 }, - (None, None) => return Err(DownloadFileError::MissingChecksum.into()), - }; - - this.register_action( - IndexSet::new(), - indexset![output_artifact], - UnregisteredDownloadFileAction::new( - checksum, - Arc::from(url), - vpnless_url.into_option().map(Arc::from), - is_executable, - is_deferrable, - ), - None, - )?; - - Ok(declaration.into_declared_artifact(AssociatedArtifacts::new())) - } - - /// Downloads a CAS artifact to an output - /// - /// * `digest`: must look like `SHA1:SIZE` - /// * `use_case`: your RE use case - /// * `expires_after_timestamp`: must be a UNIX timestamp. Your digest's TTL must exceed this timestamp. Your build will break once the digest expires, so make sure the expiry is long enough (preferably, in years). - /// * `is_executable` (optional): indicates the resulting file should be marked with executable permissions - fn cas_artifact<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] output: OutputArtifactArg<'v>, - #[starlark(require = pos)] digest: &str, - #[starlark(require = pos)] use_case: &str, - #[starlark(require = named)] expires_after_timestamp: i64, - #[starlark(require = named, default = false)] is_executable: bool, - #[starlark(require = named, default = false)] is_tree: bool, - #[starlark(require = named, default = false)] is_directory: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let mut registry = this.state(); - - let digest = CasDigest::parse_digest(digest, this.digest_config.cas_digest_config()) - .with_context(|| CasArtifactError::InvalidDigest(digest.to_owned()))? - .0; - - let use_case = RemoteExecutorUseCase::new(use_case.to_owned()); - - let expires_after_timestamp = Utc.timestamp_opt(expires_after_timestamp, 0).unwrap(); - - let kind = match (is_tree, is_directory) { - (true, true) => return Err(CasArtifactError::TreeAndDirectory.into()), - (false, true) => ArtifactKind::Directory(DirectoryKind::Directory), - (true, false) => ArtifactKind::Directory(DirectoryKind::Tree), - (false, false) => ArtifactKind::File, - }; - - let output_type = match kind { - ArtifactKind::Directory(_) => OutputType::Directory, - ArtifactKind::File => OutputType::File, - }; - let (output_value, output_artifact) = - registry.get_or_declare_output(eval, output, output_type)?; - - registry.register_action( - IndexSet::new(), - indexset![output_artifact], - UnregisteredCasArtifactAction { - digest, - re_use_case: use_case, - expires_after: expires_after_timestamp, - executable: is_executable, - kind, - }, - None, - )?; - - Ok(output_value.into_declared_artifact(AssociatedArtifacts::new())) - } - - /// Creates a new transitive set. For details, see https://buck2.build/docs/rule_authors/transitive_sets/. - fn tset<'v>( - this: &AnalysisActions<'v>, - #[starlark(require = pos)] definition: ValueTypedComplex<'v, TransitiveSetDefinition<'v>>, - value: Option>, - children: Option>>>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>> { - let mut this = this.state(); - this.create_transitive_set( - definition.to_value(), - value, - children.map(|v| v.get()), - eval, - ) - } - - /// `dynamic_output` allows a rule to use information that was not available when the rule was first run at analysis time. - /// Examples include things like Distributed ThinLTO (where the index file is created by another action) or OCaml builds - /// (where the dependencies are created by `ocamldeps`). - /// - /// The arguments are: - /// - /// * `dynamic` - a list of artifacts whose values will be available in the function. These will be built before the function is run. - /// * `inputs` - a container of artifacts (`cmd_args`, list of artifacts, and so on). - /// * These inputs must include all the inputs that are referenced by the body of the function argument, apart from those listed in `dynamic` and `outputs`: extra inputs may be passed that are not used. - /// * The inputs are used for `buck2 aquery` functionality, but do not cause speculative building. In fact, these inputs may form a cycle with other `dynamic_output` actions if they were all required. - /// * In the future, it may be possible to not pass all the inputs if the repo is set to permissive mode, allowing a more powerful form of dynamic dependencies. - /// * `outputs` - a list of unbound artifacts (created with `declare_artifact`) which will be bound by the function. - /// * The function argument is given 3 arguments: - /// * `ctx` (context) - which is the same as that passed to the initial rule analysis. - /// * `artifacts` - using one of the artifacts from `dynamic` (example usage: `artifacts[artifact_from_dynamic])` gives an artifact value containing the methods `read_string`, `read_lines`, and `read_json` to obtain the values from the disk in various formats. Anything too complex should be piped through a Python script for transformation to JSON. - /// * `outputs` - using one of the artifacts from the `dynamic_output`'s `outputs` (example usage: `outputs[artifact_from_dynamic_output_outputs]`) gives an unbounded artifact. The function argument must use its `outputs` argument to bind output artifacts, rather than reusing artifacts from the outputs passed into `dynamic_output` directly. - /// * The function must call `ctx.actions` (probably `ctx.actions.run`) to bind all outputs. It can examine the values of the dynamic variables and depends on the inputs. - /// * The function will usually be a `def`, as `lambda` in Starlark does not allow statements, making it quite underpowered. - /// For full details see https://buck2.build/docs/rule_authors/dynamic_dependencies/. - fn dynamic_output<'v>( - this: &'v AnalysisActions<'v>, - #[starlark(require = named)] dynamic: Vec, - #[starlark(require = named)] inputs: Vec, - #[starlark(require = named)] outputs: Vec, - #[starlark(require = named)] f: Value<'v>, - heap: &'v Heap, - ) -> anyhow::Result { - // Parameter validation - let lambda_type = f.get_type(); - if lambda_type != FUNCTION_TYPE { - return Err(DynamicOutputError::NotAFunction(lambda_type.to_owned()).into()); - } - if dynamic.is_empty() { - return Err(DynamicOutputError::EmptyDynamic.into()); - } - if outputs.is_empty() { - return Err(DynamicOutputError::EmptyOutput.into()); - } - - // Conversion - let dynamic = dynamic.iter().map(|x| x.artifact()).collect(); - let inputs = inputs.iter().map(|x| x.artifact()).collect(); - let outputs = outputs.iter().map(|x| x.0.artifact()).collect(); - - // Registration - let attributes_plugins_lambda = heap.alloc((this.attributes, this.plugins, f)); - let mut this = this.state(); - this.register_dynamic_output(dynamic, inputs, outputs, attributes_plugins_lambda)?; - Ok(NoneType) - } - - /// Allocate a new input tag. Used with the `dep_files` argument to `run`. - fn artifact_tag<'v>(this: &AnalysisActions<'v>) -> anyhow::Result { - let _ = this; - Ok(ArtifactTag::new()) - } - - /// Obtain this daemon's digest configuration. This allows rules to discover what digests the - /// daemon may be able to e.g. defer download because they conform to its RE backend's expected - /// digest format. - fn digest_config<'v>(this: &AnalysisActions<'v>) -> anyhow::Result { - Ok(StarlarkDigestConfig { - digest_config: this.digest_config, - }) - } -} - +/// Actions take inputs and produce outputs, mostly using the `artifact` type. Most output filenames +/// can either be artifacts created with `declare_output` or strings that are implicitly converted +/// to output artifacts. pub(crate) fn init_analysis_action_methods_actions() { - ANALYSIS_ACTIONS_METHODS_ACTIONS.init(analysis_actions_methods_actions); + ANALYSIS_ACTIONS_METHODS_ACTIONS.init(|methods| { + analysis_actions_methods_copy(methods); + analysis_actions_methods_download(methods); + analysis_actions_methods_dynamic_output(methods); + analysis_actions_methods_run(methods); + analysis_actions_methods_unsorted(methods); + analysis_actions_methods_write(methods); + }); } diff --git a/app/buck2_action_impl/src/context/copy.rs b/app/buck2_action_impl/src/context/copy.rs new file mode 100644 index 0000000000000..688aa9604cbc6 --- /dev/null +++ b/app/buck2_action_impl/src/context/copy.rs @@ -0,0 +1,153 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use buck2_build_api::interpreter::rule_defs::artifact::output_artifact_like::OutputArtifactArg; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_execute::execute::request::OutputType; +use dupe::OptionDupedExt; +use indexmap::indexset; +use starlark::environment::MethodsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::dict::UnpackDictEntries; +use starlark::values::ValueTyped; + +use crate::actions::impls::copy::CopyMode; +use crate::actions::impls::copy::UnregisteredCopyAction; +use crate::actions::impls::symlinked_dir::UnregisteredSymlinkedDirAction; + +fn create_dir_tree<'v>( + eval: &mut Evaluator<'v, '_, '_>, + this: &AnalysisActions<'v>, + output: OutputArtifactArg<'v>, + srcs: UnpackDictEntries<&'v str, ValueAsArtifactLike<'v>>, + copy: bool, +) -> anyhow::Result> { + // validate that the moves are valid, and move them into inputs + let action = UnregisteredSymlinkedDirAction::new(copy, srcs)?; + let inputs = action.inputs(); + let unioned_associated_artifacts = action.unioned_associated_artifacts(); + + let mut this = this.state()?; + let (declaration, output_artifact) = + this.get_or_declare_output(eval, output, OutputType::Directory)?; + this.register_action(inputs, indexset![output_artifact], action, None, None)?; + + Ok(declaration.into_declared_artifact(unioned_associated_artifacts)) +} + +fn copy_file_impl<'v>( + eval: &mut Evaluator<'v, '_, '_>, + this: &AnalysisActions<'v>, + dest: OutputArtifactArg<'v>, + src: ValueAsArtifactLike<'v>, + copy: CopyMode, + output_type: OutputType, +) -> anyhow::Result> { + let src = src.0; + + let artifact = src.get_artifact_group()?; + let associated_artifacts = src.get_associated_artifacts(); + let mut this = this.state()?; + let (declaration, output_artifact) = this.get_or_declare_output(eval, dest, output_type)?; + + this.register_action( + indexset![artifact], + indexset![output_artifact], + UnregisteredCopyAction::new(copy), + None, + None, + )?; + + Ok(declaration.into_declared_artifact( + associated_artifacts + .duped() + .unwrap_or_else(AssociatedArtifacts::new), + )) +} + +#[starlark_module] +pub(crate) fn analysis_actions_methods_copy(methods: &mut MethodsBuilder) { + /// Copies the source `artifact` to the destination (which can be a string representing a + /// filename or an output `artifact`) and returns the output `artifact`. The copy works for + /// files or directories. + fn copy_file<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] dest: OutputArtifactArg<'v>, + #[starlark(require = pos)] src: ValueAsArtifactLike<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + // `copy_file` can copy either a file or a directory, even though its name has the word + // `file` in it + copy_file_impl( + eval, + this, + dest, + src, + CopyMode::Copy, + OutputType::FileOrDirectory, + ) + } + + /// Creates a symlink to the source `artifact` at the destination (which can be a string + /// representing a filename or an output `artifact`) and returns the output `artifact`. The + /// symlink works for files or directories. + fn symlink_file<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] dest: OutputArtifactArg<'v>, + #[starlark(require = pos)] src: ValueAsArtifactLike<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + // `copy_file` can copy either a file or a directory, even though its name has the word + // `file` in it + copy_file_impl( + eval, + this, + dest, + src, + CopyMode::Symlink, + OutputType::FileOrDirectory, + ) + } + + /// Make a copy of a directory. + fn copy_dir<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] dest: OutputArtifactArg<'v>, + #[starlark(require = pos)] src: ValueAsArtifactLike<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + copy_file_impl(eval, this, dest, src, CopyMode::Copy, OutputType::Directory) + } + + /// Returns an `artifact` that is a directory containing symlinks. + /// The srcs must be a dictionary of path (as string, relative to the result directory) to bound `artifact`, which will be laid out in the directory. + fn symlinked_dir<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] output: OutputArtifactArg<'v>, + #[starlark(require = pos)] srcs: UnpackDictEntries<&'v str, ValueAsArtifactLike<'v>>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + create_dir_tree(eval, this, output, srcs, false) + } + + /// Returns an `artifact` which is a directory containing copied files. + /// The srcs must be a dictionary of path (as string, relative to the result directory) to the bound `artifact`, which will be laid out in the directory. + fn copied_dir<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] output: OutputArtifactArg<'v>, + #[starlark(require = pos)] srcs: UnpackDictEntries<&'v str, ValueAsArtifactLike<'v>>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + create_dir_tree(eval, this, output, srcs, true) + } +} diff --git a/app/buck2_action_impl/src/context/download.rs b/app/buck2_action_impl/src/context/download.rs new file mode 100644 index 0000000000000..f2236546d799f --- /dev/null +++ b/app/buck2_action_impl/src/context/download.rs @@ -0,0 +1,149 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use anyhow::Context; +use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use buck2_build_api::interpreter::rule_defs::artifact::output_artifact_like::OutputArtifactArg; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_common::cas_digest::CasDigest; +use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; +use buck2_execute::execute::request::OutputType; +use buck2_execute::materialize::http::Checksum; +use chrono::TimeZone; +use chrono::Utc; +use indexmap::indexset; +use indexmap::IndexSet; +use starlark::environment::MethodsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::none::NoneOr; +use starlark::values::ValueTyped; + +use crate::actions::impls::cas_artifact::ArtifactKind; +use crate::actions::impls::cas_artifact::DirectoryKind; +use crate::actions::impls::cas_artifact::UnregisteredCasArtifactAction; +use crate::actions::impls::download_file::UnregisteredDownloadFileAction; + +#[derive(buck2_error::Error, Debug)] +enum CasArtifactError { + #[error("Not a valid RE digest: `{0}`")] + InvalidDigest(String), + #[error("is_tree and is_directory are mutually exclusive")] + TreeAndDirectory, +} + +#[starlark_module] +pub(crate) fn analysis_actions_methods_download(methods: &mut MethodsBuilder) { + /// Downloads a URL to an output (filename as string or output artifact). The file at the URL + /// must have the given sha1 or the command will fail. The optional parameter is_executable + /// indicates whether the resulting file should be marked with executable permissions. + /// (Meta-internal) The optional parameter vpnless_url indicates a url from which this resource + /// can be downloaded off VPN; this has the same restrictions as `url` above. + fn download_file<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] output: OutputArtifactArg<'v>, + #[starlark(require = pos)] url: &str, + #[starlark(require = named, default = NoneOr::None)] vpnless_url: NoneOr<&str>, + #[starlark(require = named, default = NoneOr::None)] sha1: NoneOr<&str>, + #[starlark(require = named, default = NoneOr::None)] sha256: NoneOr<&str>, + #[starlark(require = named, default = false)] is_executable: bool, + #[starlark(require = named, default = false)] is_deferrable: bool, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let mut this = this.state()?; + let (declaration, output_artifact) = + this.get_or_declare_output(eval, output, OutputType::File)?; + + let checksum = Checksum::new(sha1.into_option(), sha256.into_option())?; + + this.register_action( + IndexSet::new(), + indexset![output_artifact], + UnregisteredDownloadFileAction::new( + checksum, + Arc::from(url), + vpnless_url.into_option().map(Arc::from), + is_executable, + is_deferrable, + ), + None, + None, + )?; + + Ok(declaration.into_declared_artifact(AssociatedArtifacts::new())) + } + + /// Downloads a CAS artifact to an output + /// + /// * `digest`: must look like `SHA1:SIZE` + /// * `use_case`: your RE use case + /// * `expires_after_timestamp`: must be a UNIX timestamp. Your digest's TTL must exceed this + /// timestamp. Your build will break once the digest expires, so make sure the expiry is long + /// enough (preferably, in years). + /// * `is_executable`: indicates the resulting file should be marked with executable + /// permissions + /// * `is_tree`: digest must point to a blob of type + /// [RE.Tree](https://fburl.com/code/95rqgju0) + /// * `is_directory`: digest must point to a blob of type + /// [RE.Directory](https://fburl.com/code/4eg40nnp) + fn cas_artifact<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] output: OutputArtifactArg<'v>, + #[starlark(require = pos)] digest: &str, + #[starlark(require = pos)] use_case: &str, + #[starlark(require = named)] expires_after_timestamp: i64, + #[starlark(require = named, default = false)] is_executable: bool, + #[starlark(require = named, default = false)] is_tree: bool, + #[starlark(require = named, default = false)] is_directory: bool, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let mut registry = this.state()?; + + let digest = CasDigest::parse_digest(digest, this.digest_config.cas_digest_config()) + .with_context(|| CasArtifactError::InvalidDigest(digest.to_owned()))? + .0; + + let use_case = RemoteExecutorUseCase::new(use_case.to_owned()); + + let expires_after_timestamp = Utc.timestamp_opt(expires_after_timestamp, 0).unwrap(); + + let kind = match (is_tree, is_directory) { + (true, true) => return Err(CasArtifactError::TreeAndDirectory.into()), + (false, true) => ArtifactKind::Directory(DirectoryKind::Directory), + (true, false) => ArtifactKind::Directory(DirectoryKind::Tree), + (false, false) => ArtifactKind::File, + }; + + let output_type = match kind { + ArtifactKind::Directory(_) => OutputType::Directory, + ArtifactKind::File => OutputType::File, + }; + let (output_value, output_artifact) = + registry.get_or_declare_output(eval, output, output_type)?; + + registry.register_action( + IndexSet::new(), + indexset![output_artifact], + UnregisteredCasArtifactAction { + digest, + re_use_case: use_case, + expires_after: expires_after_timestamp, + executable: is_executable, + kind, + }, + None, + None, + )?; + + Ok(output_value.into_declared_artifact(AssociatedArtifacts::new())) + } +} diff --git a/app/buck2_action_impl/src/context/dynamic_output.rs b/app/buck2_action_impl/src/context/dynamic_output.rs new file mode 100644 index 0000000000000..501f2adb7670f --- /dev/null +++ b/app/buck2_action_impl/src/context/dynamic_output.rs @@ -0,0 +1,257 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use anyhow::Context; +use buck2_artifact::actions::key::ActionIndex; +use buck2_artifact::actions::key::ActionKey; +use buck2_artifact::artifact::artifact_type::BoundBuildArtifact; +use buck2_artifact::artifact::artifact_type::OutputArtifact; +use buck2_artifact::deferred::key::DeferredHolderKey; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_build_api::dynamic_value::DynamicValue; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_value::StarlarkArtifactValue; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::unpack_artifact::UnpackArtifactOrDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use dupe::Dupe; +use indexmap::IndexSet; +use starlark::environment::MethodsBuilder; +use starlark::starlark_module; +use starlark::values::list_or_tuple::UnpackListOrTuple; +use starlark::values::none::NoneType; +use starlark::values::typing::StarlarkCallable; +use starlark::values::FrozenValue; +use starlark::values::ValueTyped; +use starlark_map::small_map::SmallMap; + +use crate::dynamic::dynamic_actions::StarlarkDynamicActions; +use crate::dynamic::dynamic_actions::StarlarkDynamicActionsData; +use crate::dynamic::dynamic_value::StarlarkDynamicValue; +use crate::dynamic::params::DynamicLambdaParams; +use crate::dynamic::params::DynamicLambdaStaticFields; +use crate::dynamic::storage::DynamicLambdaParamsStorageImpl; + +#[derive(buck2_error::Error, Debug)] +enum DynamicOutputError { + #[error("Output list may not be empty")] + EmptyOutput, +} + +pub(crate) struct DynamicActionsOutputArtifactBinder { + key: DeferredHolderKey, + index: u32, +} + +impl DynamicActionsOutputArtifactBinder { + pub(crate) fn new(key: &DynamicLambdaResultsKey) -> Self { + DynamicActionsOutputArtifactBinder { + key: DeferredHolderKey::DynamicLambda(Arc::new(key.dupe())), + index: 0, + } + } + + pub(crate) fn bind(&mut self, output: OutputArtifact) -> anyhow::Result { + // We create ActionKeys that point directly to the dynamic_lambda's + // output rather than our own. This saves the resolution of the key from + // needing to first lookup our result just to get forwarded to the lambda's result. + // + // This means that we are creating ActionKeys for the lambda and it needs to offset + // its key's index to account for this (see ActionRegistry where this is done). + // + // TODO(cjhopman): We should probably combine ActionRegistry and DynamicRegistry (and + // probably ArtifactGroupRegistry too). + let bound = output + .bind(ActionKey::new( + self.key.dupe(), + ActionIndex::new(self.index), + ))? + .dupe(); + self.index += 1; + Ok(bound) + } +} + +fn output_artifacts_to_lambda_build_artifacts( + dynamic_key: &DynamicLambdaResultsKey, + outputs: IndexSet, +) -> anyhow::Result> { + let mut bind = DynamicActionsOutputArtifactBinder::new(dynamic_key); + + outputs + .into_iter() + .map(|output| bind.bind(output)) + .collect::>() +} + +#[starlark_module] +pub(crate) fn analysis_actions_methods_dynamic_output(methods: &mut MethodsBuilder) { + /// `dynamic_output` allows a rule to use information that was not available when the rule was + /// first run at analysis time. Examples include things like Distributed ThinLTO (where the + /// index file is created by another action) or OCaml builds (where the dependencies are created + /// by `ocamldeps`). + /// + /// The arguments are: + /// + /// * `dynamic` - a list of artifacts whose values will be available in the function. These will + /// be built before the function is run. + /// * `inputs` - parameter is ignored. + /// * `outputs` - a list of unbound artifacts (created with `declare_artifact`) which will be + /// bound by the function. + /// * The function argument is given 3 arguments: + /// * `ctx` (context) - which is the same as that passed to the initial rule analysis. + /// * `artifacts` - using one of the artifacts from `dynamic` (example usage: + /// `artifacts[artifact_from_dynamic])` gives an artifact value containing the methods + /// `read_string`, `read_lines`, and `read_json` to obtain the values from the disk in + /// various formats. Anything too complex should be piped through a Python script for + /// transformation to JSON. + /// * `outputs` - using one of the artifacts from the `dynamic_output`'s `outputs` (example + /// usage: `outputs[artifact_from_dynamic_output_outputs]`) gives an unbounded artifact. The + /// function argument must use its `outputs` argument to bind output artifacts, rather than + /// reusing artifacts from the outputs passed into `dynamic_output` directly. + /// * The function must call `ctx.actions` (probably `ctx.actions.run`) to bind all outputs. It + /// can examine the values of the dynamic variables and depends on the inputs. + /// * The function will usually be a `def`, as `lambda` in Starlark does not allow statements, + /// making it quite underpowered. For full details see + /// https://buck2.build/docs/rule_authors/dynamic_dependencies/. + /// + /// Besides dynamic dependencies, there is a second use case for `dynamic_output`: say that you + /// have some output artifact, and that the analysis to produce the action that outputs that + /// artifact is expensive, ie takes a lot of CPU time; you would like to skip that work in + /// builds that do not actually use that artifact. + /// + /// This can be accomplished by putting the analysis for that artifact behind a `dynamic_output` + /// with an empty `dynamic` list. The `dynamic_output`'s function will not be run unless one of + /// the actions it outputs is actually requested as part of the build. + fn dynamic_output<'v>( + this: &'v AnalysisActions<'v>, + #[starlark(require = named)] dynamic: UnpackListOrTuple, + #[starlark(require = named)] inputs: Option< + UnpackListOrTuple, + >, + #[starlark(require = named)] outputs: UnpackListOrTuple<&'v StarlarkOutputArtifact>, + #[starlark(require = named)] f: StarlarkCallable< + 'v, + ( + FrozenValue, + SmallMap, + SmallMap, + ), + NoneType, + >, + ) -> anyhow::Result { + // TODO(nga): delete. + let _unused = inputs; + + // Parameter validation + if outputs.items.is_empty() { + return Err(DynamicOutputError::EmptyOutput.into()); + } + + // Conversion + let artifact_values = dynamic + .items + .iter() + .map(|x| x.artifact()) + .collect::>()?; + let outputs = outputs + .items + .iter() + .map(|x| x.artifact()) + .collect::>()?; + + let attributes = this.attributes; + let plugins = this.plugins; + + let mut this = this.state()?; + + let execution_platform = this.actions.execution_platform.dupe(); + + let lambda_params_storage = + DynamicLambdaParamsStorageImpl::get(&mut this.analysis_value_storage)?; + + let key = lambda_params_storage.next_dynamic_actions_key()?; + let outputs = output_artifacts_to_lambda_build_artifacts(&key, outputs)?; + + // Registration + let lambda_params = DynamicLambdaParams { + attributes, + plugins, + lambda: f.erase(), + attr_values: None, + static_fields: DynamicLambdaStaticFields { + owner: key.owner().dupe(), + artifact_values, + dynamic_values: IndexSet::new(), + outputs, + execution_platform, + }, + }; + lambda_params_storage.set_dynamic_actions(key, lambda_params)?; + Ok(NoneType) + } + + /// New version of `dynamic_output`. + /// + /// This is work in progress, and will eventually replace the old `dynamic_output`. + fn dynamic_output_new<'v>( + this: &'v AnalysisActions<'v>, + #[starlark(require = pos)] dynamic_actions: ValueTyped<'v, StarlarkDynamicActions<'v>>, + ) -> anyhow::Result { + let dynamic_actions = dynamic_actions + .data + .try_borrow_mut()? + .take() + .context("dynamic_action data can be used only in one `dynamic_output_new` call")?; + let StarlarkDynamicActionsData { + attr_values, + callable, + } = dynamic_actions; + + let mut this = this.state()?; + + let execution_platform = this.actions.execution_platform.dupe(); + + let lambda_params_storage = + DynamicLambdaParamsStorageImpl::get(&mut this.analysis_value_storage)?; + let key = lambda_params_storage.next_dynamic_actions_key()?; + + let attr_values = attr_values.bind(&key)?; + + let outputs = attr_values.outputs().into_iter().collect(); + let artifact_values = attr_values.artifact_values(); + let dynamic_values = attr_values.dynamic_values(); + + // Registration + let lambda_params = DynamicLambdaParams { + attributes: None, + plugins: None, + lambda: callable.implementation.erase().to_callable(), + attr_values: Some((attr_values, callable)), + static_fields: DynamicLambdaStaticFields { + owner: key.owner().dupe(), + artifact_values, + dynamic_values, + outputs, + execution_platform, + }, + }; + + lambda_params_storage.set_dynamic_actions(key.dupe(), lambda_params)?; + + Ok(StarlarkDynamicValue { + dynamic_value: DynamicValue { + dynamic_lambda_results_key: key, + }, + }) + } +} diff --git a/app/buck2_action_impl/src/context/run.rs b/app/buck2_action_impl/src/context/run.rs new file mode 100644 index 0000000000000..d48a979cca6a7 --- /dev/null +++ b/app/buck2_action_impl/src/context/run.rs @@ -0,0 +1,356 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::sync::Arc; + +use anyhow::Context; +use buck2_artifact::artifact::artifact_type::OutputArtifact; +use buck2_build_api::artifact_groups::ArtifactGroup; +use buck2_build_api::interpreter::rule_defs::artifact_tagging::ArtifactTag; +use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; +use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; +use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; +use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCommandLineValueUnpack; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_build_api::interpreter::rule_defs::provider::builtin::run_info::RunInfo; +use buck2_build_api::interpreter::rule_defs::provider::builtin::worker_info::WorkerInfo; +use buck2_build_api::interpreter::rule_defs::provider::builtin::worker_run_info::WorkerRunInfo; +use buck2_core::category::CategoryRef; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use dupe::Dupe; +use either::Either; +use host_sharing::WeightClass; +use host_sharing::WeightPercentage; +use starlark::environment::MethodsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::dict::UnpackDictEntries; +use starlark::values::list::UnpackList; +use starlark::values::none::NoneOr; +use starlark::values::none::NoneType; +use starlark::values::typing::StarlarkCallable; +use starlark::values::StringValue; +use starlark::values::UnpackAndDiscard; +use starlark::values::ValueOf; +use starlark::values::ValueTypedComplex; +use starlark_map::small_map; +use starlark_map::small_map::SmallMap; + +use crate::actions::impls::run::dep_files::RunActionDepFiles; +use crate::actions::impls::run::new_executor_preference; +use crate::actions::impls::run::MetadataParameter; +use crate::actions::impls::run::StarlarkRunActionValues; +use crate::actions::impls::run::UnregisteredRunAction; + +#[derive(Debug, buck2_error::Error)] +pub(crate) enum RunActionError { + #[error("expected at least one output artifact, did not get any")] + NoOutputsSpecified, + #[error("`weight` must be a positive integer, got `{0}`")] + InvalidWeight(i32), + #[error("`weight` and `weight_percentage` cannot both be passed")] + DuplicateWeightsSpecified, + #[error("`dep_files` value with key `{}` has an invalid count of associated outputs. Expected 1, got {}.", .key, .count)] + InvalidDepFileOutputs { key: String, count: usize }, + #[error("`dep_files` with keys `{}` and {} are using the same tag", .first, .second)] + ConflictingDepFiles { first: String, second: String }, + #[error( + "missing `metadata_path` parameter which is required when `metadata_env_var` parameter is present" + )] + MetadataPathMissing, + #[error( + "missing `metadata_env_var` parameter which is required when `metadata_path` parameter is present" + )] + MetadataEnvVarMissing, + #[error( + "Recursion limit exceeded when visiting artifacts: do you have a cycle in your inputs or outputs?" + )] + ArtifactVisitRecursionLimitExceeded, +} + +#[starlark_module] +pub(crate) fn analysis_actions_methods_run(methods: &mut MethodsBuilder) { + /// Runs a command + /// + /// * `arguments`: must be of type `cmd_args`, or a type convertible to such (such as a list of + /// strings and artifacts) and must contain at least one `.as_output()` artifact + /// * `category`: category and identifier - when used together, identify the action in Buck2's + /// event stream, and must be unique for a given target + /// * `weight`: used to note how heavy the command is and will typically be set to a higher + /// value to indicate that less such commands should be run in parallel (if running locally) + /// * `no_outputs_cleanup`: if this flag is set then Buck2 won't clean the outputs of a previous + /// build that might be present on a disk; in which case, command from arguments should be + /// responsible for the cleanup (that is useful, for example, when an action is supporting + /// incremental mode and its outputs are based on result from a previous build) + /// * `metadata_env_var` and `meadata_path` should be used together: both set or both unset + /// * `metadata_path`: defines a path relative to the result directory for a file with + /// action metadata, which will be created right before the command will be run. + /// * Metadata contains the path relative to the Buck2 project root and hash digest for + /// every action input (this excludes symlinks as they could be resolved by a user script + /// if needed). The resolved path relative to the Buck2 project for the metadata file will + /// be passed to command from arguments, via the environment variable, with its name set + /// by `metadata_env_var` + /// * Both `metadata_env_var` and `metadata_path` are useful when making actions behave in + /// an incremental manner (for details, see [Incremental + /// Actions](https://buck2.build/docs/rule_authors/incremental_actions/)) + /// * The `prefer_local`, `prefer_remote` and `local_only` options allow selecting where the + /// action should run if the executor selected for this target is a hybrid executor. + /// * All those options disable concurrent execution: the action will run on the preferred + /// platform first (concurrent execution only happens with a "full" hybrid executor). + /// * Execution may be retried on the "non-preferred" platform if it fails due to a + /// transient error, except for `local_only`, which does not allow this. + /// * If the executor selected is a remote-only executor and you use `local_only`, that's an + /// error. The other options will not raise errors. + /// * Setting more than one of those options is an error. + /// * Those flags behave the same way as the equivalent `--prefer-remote`, `--prefer-local` + /// and `--local-only` CLI flags. The CLI flags take precedence. + /// * The `force_full_hybrid_if_capable` option overrides the `use_limited_hybrid` hybrid. + /// The options listed above take precedence if set. + /// * `remote_execution_dependencies`: list of dependencies which is passed to Remote Execution. + /// Each dependency is dictionary with the following keys: + /// * `smc_tier`: name of the SMC tier to call by RE Scheduler. + /// * `id`: name of the dependency. + /// + /// When actions execute, they'll do so from the root of the repository. As they execute, + /// actions have exclusive access to their output directory. + /// + /// Actions also get exclusive access to a "scratch" path that is exposed via the environment + /// variable `BUCK_SCRATCH_PATH`. This path is expressed as a path relative to the working + /// directory (i.e. relative to the project). This path is guaranteed to exist when the action + /// executes. + /// + /// When actions run locally, the scratch path is also used as the `TMPDIR`. + fn run<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] arguments: StarlarkCommandLineValueUnpack<'v>, + #[starlark(require = named)] category: StringValue<'v>, + #[starlark(require = named, default = NoneOr::None)] identifier: NoneOr>, + #[starlark(require = named)] env: Option< + ValueOf<'v, UnpackDictEntries, ValueAsCommandLineLike<'v>>>, + >, + #[starlark(require = named, default = false)] local_only: bool, + #[starlark(require = named, default = false)] prefer_local: bool, + #[starlark(require = named, default = false)] prefer_remote: bool, + #[starlark(require = named, default = true)] low_pass_filter: bool, + #[starlark(require = named, default = false)] always_print_stderr: bool, + #[starlark(require = named)] weight: Option, + #[starlark(require = named)] weight_percentage: Option, + #[starlark(require = named)] dep_files: Option>, + #[starlark(require = named)] metadata_env_var: Option, + #[starlark(require = named)] metadata_path: Option, + // TODO(scottcao): Refactor `no_outputs_cleanup` to `outputs_cleanup` + #[starlark(require = named, default = false)] no_outputs_cleanup: bool, + #[starlark(require = named, default = false)] allow_cache_upload: bool, + #[starlark(require = named, default = false)] allow_dep_file_cache_upload: bool, + #[starlark(require = named, default = false)] force_full_hybrid_if_capable: bool, + #[starlark(require = named)] exe: Option< + Either>, ValueOf<'v, &'v RunInfo<'v>>>, + >, + #[starlark(require = named, default = false)] unique_input_inodes: bool, + #[starlark(require = named)] error_handler: Option>, + eval: &mut Evaluator<'v, '_, '_>, + #[starlark(require = named, default=UnpackList::default())] + remote_execution_dependencies: UnpackList>, + ) -> anyhow::Result { + struct RunCommandArtifactVisitor { + inner: SimpleCommandLineArtifactVisitor, + tagged_outputs: HashMap>, + depth: u64, + } + + impl RunCommandArtifactVisitor { + fn new() -> Self { + Self { + inner: SimpleCommandLineArtifactVisitor::new(), + tagged_outputs: HashMap::new(), + depth: 0, + } + } + } + + impl CommandLineArtifactVisitor for RunCommandArtifactVisitor { + fn visit_input(&mut self, input: ArtifactGroup, tag: Option<&ArtifactTag>) { + self.inner.visit_input(input, tag); + } + + fn visit_output(&mut self, artifact: OutputArtifact, tag: Option<&ArtifactTag>) { + match tag { + None => {} + Some(tag) => { + self.tagged_outputs + .entry(tag.dupe()) + .or_default() + .push(artifact.dupe()); + } + } + + self.inner.visit_output(artifact, tag); + } + + fn push_frame(&mut self) -> anyhow::Result<()> { + self.depth += 1; + if self.depth > 1000 { + return Err(RunActionError::ArtifactVisitRecursionLimitExceeded.into()); + } + Ok(()) + } + + fn pop_frame(&mut self) { + self.depth = self.depth.saturating_sub(1); + } + } + + let executor_preference = new_executor_preference(local_only, prefer_local, prefer_remote)?; + + let mut artifact_visitor = RunCommandArtifactVisitor::new(); + + let starlark_args = StarlarkCmdArgs::try_from_value_typed(arguments)?; + starlark_args.visit_artifacts(&mut artifact_visitor)?; + + let (starlark_exe, starlark_worker) = match exe { + Some(Either::Left(worker_run)) => { + let worker: ValueTypedComplex = worker_run.typed.worker(); + let worker_exe = worker_run.typed.exe(); + worker_exe.as_ref().visit_artifacts(&mut artifact_visitor)?; + let starlark_exe = StarlarkCmdArgs::try_from_value(worker_exe.to_value())?; + starlark_exe.visit_artifacts(&mut artifact_visitor)?; + (starlark_exe, Some(worker)) + } + Some(Either::Right(exe)) => { + let starlark_exe = StarlarkCmdArgs::try_from_value(*exe)?; + starlark_exe.visit_artifacts(&mut artifact_visitor)?; + (starlark_exe, None) + } + None => (StarlarkCmdArgs::default(), None), + }; + + let weight = match (weight, weight_percentage) { + (None, None) => WeightClass::Permits(1), + (Some(v), None) => { + if v < 1 { + return Err(RunActionError::InvalidWeight(v).into()); + } else { + WeightClass::Permits(v as usize) + } + } + (None, Some(v)) => WeightClass::Percentage( + WeightPercentage::try_new(v).context("Invalid `weight_percentage`")?, + ), + (Some(..), Some(..)) => { + return Err(RunActionError::DuplicateWeightsSpecified.into()); + } + }; + + let starlark_env = match &env { + None => None, + Some(env) => { + for (_k, v) in &env.typed.entries { + v.0.visit_artifacts(&mut artifact_visitor)?; + } + Some(env.as_unchecked().cast()) + } + }; + + let RunCommandArtifactVisitor { + inner: artifacts, + tagged_outputs, + depth: _, + } = artifact_visitor; + + let mut dep_files_configuration = RunActionDepFiles::new(); + + if let Some(dep_files) = dep_files { + for (key, tag) in dep_files { + let tagged = tagged_outputs.get(tag); + let count = tagged.map_or(0, |t| t.len()); + + if count != 1 { + return Err(RunActionError::InvalidDepFileOutputs { + key: (*key).to_owned(), + count, + } + .into()); + } + + match dep_files_configuration.labels.entry(tag.dupe()) { + small_map::Entry::Vacant(v) => { + v.insert(Arc::from(key)); + } + small_map::Entry::Occupied(o) => { + return Err(RunActionError::ConflictingDepFiles { + first: (**o.get()).to_owned(), + second: (*key).to_owned(), + } + .into()); + } + } + } + } + + let metadata_param = match (metadata_env_var, metadata_path) { + (Some(env_var), Some(path)) => { + let path: ForwardRelativePathBuf = path.try_into()?; + this.state()?.claim_output_path(eval, &path)?; + Ok(Some(MetadataParameter { env_var, path })) + } + (Some(_), None) => Err(anyhow::anyhow!(RunActionError::MetadataPathMissing)), + (None, Some(_)) => Err(anyhow::anyhow!(RunActionError::MetadataEnvVarMissing)), + (None, None) => Ok(None), + }?; + + if artifacts.outputs.is_empty() { + return Err(RunActionError::NoOutputsSpecified.into()); + } + let heap = eval.heap(); + + let starlark_values = heap.alloc_complex(StarlarkRunActionValues { + exe: heap.alloc_typed(starlark_exe), + args: heap.alloc_typed(starlark_args), + env: starlark_env, + worker: starlark_worker, + category: { + CategoryRef::new(category.as_str())?; + category + }, + identifier: identifier.into_option(), + }); + + let re_dependencies = remote_execution_dependencies + .into_iter() + .map(RemoteExecutorDependency::parse) + .collect::>>()?; + + let action = UnregisteredRunAction { + executor_preference, + always_print_stderr, + weight, + low_pass_filter, + dep_files: dep_files_configuration, + metadata_param, + no_outputs_cleanup, + allow_cache_upload, + allow_dep_file_cache_upload, + force_full_hybrid_if_capable, + unique_input_inodes, + remote_execution_dependencies: re_dependencies, + }; + this.state()?.register_action( + artifacts.inputs, + artifacts.outputs, + action, + Some(starlark_values), + error_handler, + )?; + Ok(NoneType) + } +} diff --git a/app/buck2_action_impl/src/context/unsorted.rs b/app/buck2_action_impl/src/context/unsorted.rs new file mode 100644 index 0000000000000..4888b50a20f9f --- /dev/null +++ b/app/buck2_action_impl/src/context/unsorted.rs @@ -0,0 +1,110 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::artifact_tagging::ArtifactTag; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_build_api::interpreter::rule_defs::digest_config::StarlarkDigestConfig; +use buck2_build_api::interpreter::rule_defs::transitive_set::FrozenTransitiveSetDefinition; +use buck2_build_api::interpreter::rule_defs::transitive_set::TransitiveSet; +use buck2_execute::execute::request::OutputType; +use starlark::environment::MethodsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::typing::StarlarkIter; +use starlark::values::FrozenValueTyped; +use starlark::values::Value; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueTyped; + +#[starlark_module] +pub(crate) fn analysis_actions_methods_unsorted(builder: &mut MethodsBuilder) { + /// Returns an unbound `artifact`, representing where a file will go, which must be bound before analysis terminates. + /// The usual way of binding an artifact is with `ctx.actions.run`. As an example: + /// + /// ```python + /// my_output = ctx.actions.declare_output("output.o") + /// ctx.actions.run(["gcc", "-c", my_source, "-o", my_output.as_output()], category = "compile") + /// ``` + /// + /// This snippet declares an output with the filename `output.o` (it will be located in the output directory + /// for this target). Note the use of `as_output` to tag this artifact as being an output in + /// the action. After binding the artifact you can subsequently use `my_output` as either an + /// input for subsequent actions, or as the result in a provider. + /// + /// Artifacts from a single target may not have the same name, so if you then want a second + /// artifact also named `output.o` you need to supply a prefix, e.g. + /// `ctx.actions.declare_output("directory", "output.o")`. The artifact will still report having + /// name `output.o`, but will be located at `directory/output.o`. + /// + /// The `dir` argument should be set to `True` if the binding will be a directory. + fn declare_output<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] prefix: &str, + #[starlark(require = pos)] filename: Option<&str>, + #[starlark(require = named, default = false)] dir: bool, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result { + // We take either one or two positional arguments, namely (filename) or (prefix, filename). + // The prefix argument is optional, but first, so we pretend the filename is optional + // and fix them up here. + let (prefix, filename) = match filename { + None => (None, prefix), + Some(filename) => (Some(prefix), filename), + }; + + let output_type = if dir { + OutputType::Directory + } else { + OutputType::FileOrDirectory + }; + let artifact = this.state()?.declare_output( + prefix, + filename, + output_type, + eval.call_stack_top_location(), + )?; + + Ok(StarlarkDeclaredArtifact::new( + eval.call_stack_top_location(), + artifact, + AssociatedArtifacts::new(), + )) + } + + /// Creates a new transitive set. For details, see https://buck2.build/docs/rule_authors/transitive_sets/. + fn tset<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] definition: FrozenValueTyped<'v, FrozenTransitiveSetDefinition>, + #[starlark(require = named)] value: Option>, + #[starlark(require = named)] children: Option< + ValueOfUnchecked<'v, StarlarkIter>>, + >, + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result>> { + let mut this = this.state()?; + this.create_transitive_set(definition, value, children.map(|v| v.get()), eval) + } + + /// Allocate a new input tag. Used with the `dep_files` argument to `run`. + fn artifact_tag<'v>(this: &AnalysisActions<'v>) -> anyhow::Result { + let _ = this; + Ok(ArtifactTag::new()) + } + + /// Obtain this daemon's digest configuration. This allows rules to discover what digests the + /// daemon may be able to e.g. defer download because they conform to its RE backend's expected + /// digest format. + fn digest_config<'v>(this: &AnalysisActions<'v>) -> anyhow::Result { + Ok(StarlarkDigestConfig { + digest_config: this.digest_config, + }) + } +} diff --git a/app/buck2_action_impl/src/context/write.rs b/app/buck2_action_impl/src/context/write.rs new file mode 100644 index 0000000000000..ab2e8c7794440 --- /dev/null +++ b/app/buck2_action_impl/src/context/write.rs @@ -0,0 +1,320 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_artifact::artifact::artifact_type::OutputArtifact; +use buck2_build_api::actions::impls::json::JsonUnpack; +use buck2_build_api::artifact_groups::ArtifactGroup; +use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use buck2_build_api::interpreter::rule_defs::artifact::output_artifact_like::OutputArtifactArg; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::artifact_tagging::ArtifactTag; +use buck2_build_api::interpreter::rule_defs::cmd_args::value::CommandLineArg; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; +use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineContext; +use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; +use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCommandLineValueUnpack; +use buck2_build_api::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedMacro; +use buck2_execute::execute::request::OutputType; +use dupe::Dupe; +use either::Either; +use indexmap::indexset; +use indexmap::IndexSet; +use relative_path::RelativePathBuf; +use sha1::Digest; +use sha1::Sha1; +use starlark::environment::MethodsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::AllocValue; +use starlark::values::UnpackValue; +use starlark::values::ValueOf; +use starlark::values::ValueTyped; +use starlark_map::small_set::SmallSet; + +use crate::actions::impls::write::UnregisteredWriteAction; +use crate::actions::impls::write_json::UnregisteredWriteJsonAction; +use crate::actions::impls::write_macros::UnregisteredWriteMacrosToFileAction; + +#[derive(Debug, buck2_error::Error)] +enum WriteActionError { + #[error( + "Argument type attributes detected in a content to be written into a file, but support for arguments was not turned on. Use `allow_args` parameter to turn on the support for arguments." + )] + ArgAttrsDetectedButNotAllowed, +} + +#[derive(UnpackValue, StarlarkTypeRepr)] +enum WriteContentArg<'v> { + CommandLineArg(CommandLineArg<'v>), + StarlarkCommandLineValueUnpack(StarlarkCommandLineValueUnpack<'v>), +} + +#[starlark_module] +pub(crate) fn analysis_actions_methods_write(methods: &mut MethodsBuilder) { + /// Returns an `artifact` whose contents are `content` written as a JSON value. + /// + /// * `output`: can be a string, or an existing artifact created with `declare_output` + /// * `content`: must be composed of the basic json types (boolean, number, string, list/tuple, + /// dictionary) plus artifacts and command lines + /// * An artifact will be written as a string containing the path + /// * A command line will be written as a list of strings, unless `joined=True` is set, in + /// which case it will be a string + /// * If you pass `with_inputs = True`, you'll get back a `cmd_args` that expands to the JSON + /// file but carries all the underlying inputs as dependencies (so you don't have to use, for + /// example, `hidden` for them to be added to an action that already receives the JSON file) + /// * `pretty` (optional): write formatted JSON (defaults to `False`) + /// * `absolute` (optional): if set, this action will produce absolute paths in its output when + /// rendering artifact paths. You generally shouldn't use this if you plan to use this action + /// as the input for anything else, as this would effectively result in losing all shared + /// caching. (defaults to `False`) + fn write_json<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] output: OutputArtifactArg<'v>, + #[starlark(require = pos)] content: ValueOf<'v, JsonUnpack<'v>>, + #[starlark(require = named, default = false)] with_inputs: bool, + #[starlark(require = named, default = false)] pretty: bool, + #[starlark(require = named, default = false)] absolute: bool, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let mut this = this.state()?; + let (declaration, output_artifact) = + this.get_or_declare_output(eval, output, OutputType::File)?; + + this.register_action( + IndexSet::new(), + indexset![output_artifact], + UnregisteredWriteJsonAction::new(pretty, absolute), + Some(content.value), + None, + )?; + + let value = declaration.into_declared_artifact(AssociatedArtifacts::new()); + // TODO(cjhopman): The with_inputs thing can go away once we have artifact dependencies (we'll still + // need the UnregisteredWriteJsonAction::cli() to represent the dependency though). + if with_inputs { + // TODO(nga): we use `AllocValue`, so this function return type for this branch + // is `write_json_cli_args`. We want just `cmd_args`, + // because users don't care about precise type. + // Do it when we migrate to new types not based on strings. + let cli = UnregisteredWriteJsonAction::cli(value.to_value(), content.value)?; + Ok(Either::Right(cli)) + } else { + Ok(Either::Left(value)) + } + } + + /// Returns an `artifact` whose contents are `content` + /// + /// * `is_executable` (optional): indicates whether the resulting file should be marked with + /// executable permissions + /// * `allow_args` (optional): must be set to `True` if you want to write parameter arguments to + /// the file (in particular, macros that write to file) + /// * If it is true, the result will be a pair of the `artifact` containing content and a + /// list of artifact values that were written by macros, which should be used in hidden + /// fields or similar + /// * `absolute` (optional): if set, this action will produce absolute paths in its output when + /// rendering artifact paths. You generally shouldn't use this if you plan to use this action + /// as the input for anything else, as this would effectively result in losing all shared + /// caching. + /// + /// The content is often a string, but can be any `ArgLike` value. This is occasionally useful + /// for generating scripts to run as a part of another action. `cmd_args` in the content are + /// newline separated unless another delimiter is explicitly specified. + fn write<'v>( + this: &AnalysisActions<'v>, + #[starlark(require = pos)] output: OutputArtifactArg<'v>, + #[starlark(require = pos)] content: WriteContentArg<'v>, + #[starlark(require = named, default = false)] is_executable: bool, + #[starlark(require = named, default = false)] allow_args: bool, + // If set, add artifacts in content as associated artifacts of the output. This will only work for bound artifacts. + #[starlark(require = named, default = false)] with_inputs: bool, + #[starlark(require = named, default = false)] absolute: bool, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result< + Either< + ValueTyped<'v, StarlarkDeclaredArtifact>, + ( + ValueTyped<'v, StarlarkDeclaredArtifact>, + Vec, + ), + >, + > { + fn count_write_to_file_macros( + args_allowed: bool, + cli: &dyn CommandLineArgLike, + ) -> anyhow::Result { + if !args_allowed && cli.contains_arg_attr() { + return Err(anyhow::anyhow!( + WriteActionError::ArgAttrsDetectedButNotAllowed + )); + } + + struct WriteToFileMacrosCounter { + count: u32, + } + + impl WriteToFileMacroVisitor for WriteToFileMacrosCounter { + fn visit_write_to_file_macro(&mut self, _m: &ResolvedMacro) -> anyhow::Result<()> { + self.count += 1; + Ok(()) + } + + fn set_current_relative_to_path( + &mut self, + _gen: &dyn Fn( + &dyn CommandLineContext, + ) -> anyhow::Result>, + ) -> anyhow::Result<()> { + Ok(()) + } + } + + let mut counter = WriteToFileMacrosCounter { count: 0 }; + cli.visit_write_to_file_macros(&mut counter)?; + Ok(counter.count) + } + + fn get_cli_inputs( + with_inputs: bool, + cli: &dyn CommandLineArgLike, + ) -> anyhow::Result> { + if !with_inputs { + return Ok(Default::default()); + } + + #[derive(Default)] + struct CommandLineInputVisitor { + inputs: SmallSet, + } + impl CommandLineArtifactVisitor for CommandLineInputVisitor { + fn visit_input(&mut self, input: ArtifactGroup, _tag: Option<&ArtifactTag>) { + self.inputs.insert(input); + } + + fn visit_output(&mut self, _artifact: OutputArtifact, _tag: Option<&ArtifactTag>) {} + } + + let mut visitor = CommandLineInputVisitor::default(); + cli.visit_artifacts(&mut visitor)?; + Ok(visitor.inputs) + } + + let mut this = this.state()?; + let (declaration, output_artifact) = + this.get_or_declare_output(eval, output, OutputType::File)?; + + let (content_cli, written_macro_count, mut associated_artifacts) = match content { + WriteContentArg::CommandLineArg(content) => { + let content_arg = content.as_command_line_arg(); + let count = count_write_to_file_macros(allow_args, content_arg)?; + let cli_inputs = get_cli_inputs(with_inputs, content_arg)?; + (content, count, cli_inputs) + } + WriteContentArg::StarlarkCommandLineValueUnpack(content) => { + let cli = StarlarkCmdArgs::try_from_value_typed(content)?; + let count = count_write_to_file_macros(allow_args, &cli)?; + let cli_inputs = get_cli_inputs(with_inputs, &cli)?; + ( + CommandLineArg::from_cmd_args(eval.heap().alloc_typed(cli)), + count, + cli_inputs, + ) + } + }; + + let written_macro_files = if written_macro_count > 0 { + let macro_directory_path = { + // There might be several write actions at once, use write action output hash to deterministically avoid collisions for .macro files. + let digest = output_artifact + .get_path() + .with_full_path(|path| Sha1::digest(path.as_str().as_bytes())); + let sha = hex::encode(digest); + format!("__macros/{}", sha) + }; + + let mut written_macro_files = indexset![]; + for i in 0..written_macro_count { + let macro_file = this.declare_output( + None, + &format!("{}/{}.macro", ¯o_directory_path, i), + OutputType::File, + eval.call_stack_top_location(), + )?; + written_macro_files.insert(macro_file); + } + + let state = &mut *this; + let action = UnregisteredWriteMacrosToFileAction::new( + output_artifact + .get_path() + .with_short_path(|p| p.to_string()), + ); + state.register_action( + indexset![], + written_macro_files.iter().map(|a| a.as_output()).collect(), + action, + Some(content_cli.to_value()), + None, + )?; + + written_macro_files + } else { + indexset![] + }; + + let action = { + let maybe_macro_files = if allow_args { + let mut macro_files = indexset![]; + for a in &written_macro_files { + macro_files.insert(a.dupe().ensure_bound()?.into_artifact()); + } + Some(macro_files) + } else { + None + }; + UnregisteredWriteAction { + is_executable, + macro_files: maybe_macro_files, + absolute, + } + }; + this.register_action( + indexset![], + indexset![output_artifact], + action, + Some(content_cli.to_value()), + None, + )?; + + if allow_args { + for a in &written_macro_files { + associated_artifacts.insert(ArtifactGroup::Artifact( + a.dupe().ensure_bound()?.into_artifact(), + )); + } + } + + let value = + declaration.into_declared_artifact(AssociatedArtifacts::from(associated_artifacts)); + if allow_args { + let macro_files: Vec = written_macro_files + .into_iter() + .map(|a| StarlarkDeclaredArtifact::new(None, a, AssociatedArtifacts::new())) + .collect(); + Ok(Either::Right((value, macro_files))) + } else { + // Prefer simpler API when there is no possibility for write-to-file macros to be present in a content + Ok(Either::Left(value)) + } + } +} diff --git a/app/buck2_action_impl/src/dynamic.rs b/app/buck2_action_impl/src/dynamic.rs new file mode 100644 index 0000000000000..a7b49fca49c46 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic.rs @@ -0,0 +1,21 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod attrs; +pub(crate) mod attrs_starlark; +pub mod bxl; +pub mod calculation; +pub mod deferred; +pub(crate) mod dynamic_actions; +pub(crate) mod dynamic_actions_callable; +pub(crate) mod dynamic_actions_globals; +pub(crate) mod dynamic_value; +pub mod params; +pub(crate) mod resolved_dynamic_value; +pub(crate) mod storage; diff --git a/app/buck2_action_impl/src/dynamic/attrs.rs b/app/buck2_action_impl/src/dynamic/attrs.rs new file mode 100644 index 0000000000000..086c2ae3885d1 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/attrs.rs @@ -0,0 +1,375 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_artifact::artifact::artifact_type::BoundBuildArtifact; +use buck2_artifact::artifact::artifact_type::OutputArtifact; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_build_api::dynamic_value::DynamicValue; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_value::StarlarkArtifactValue; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::unpack_artifact::UnpackArtifactOrDeclaredArtifact; +use buck2_error::buck2_error_anyhow; +use dupe::Dupe; +use indexmap::IndexSet; +use starlark::typing::Ty; +use starlark::values::dict::DictRef; +use starlark::values::list::ListRef; +use starlark::values::tuple::TupleRef; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::typing::TypeCompiled; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::FrozenValue; +use starlark::values::Trace; +use starlark::values::Tracer; +use starlark::values::UnpackValue; +use starlark::values::Value; +use starlark::values::ValueLifetimeless; +use starlark_map::small_map::SmallMap; + +use crate::context::dynamic_output::DynamicActionsOutputArtifactBinder; +use crate::dynamic::dynamic_value::StarlarkDynamicValue; +use crate::dynamic::resolved_dynamic_value::StarlarkResolvedDynamicValue; + +#[derive(Clone, Debug, derive_more::Display, Allocative)] +pub(crate) enum DynamicAttrType { + /// `OutputArtifact`. + #[display("dynattrs.output()")] + Output, + /// Take `Artifact`, provide `ArtifactValue`. + #[display("dynattrs.artifact_value()")] + ArtifactValue, + /// Take `DynamicValue`, provide `ResolvedDynamicValue`. + #[display("dynattrs.dynamic_value()")] + DynamicValue, + /// Pass arbitrary starlark value. + #[display("dynattrs.value({})", _0)] + Value(TypeCompiled), + /// List. + #[display("dynattrs.list({})", _0)] + List(Box), + /// Tuple. + #[display("dynattrs.tuple({})", _0.iter().map(|x| format!("{}", x)).collect::>().join(", "))] + Tuple(Box<[DynamicAttrType]>), + /// Value or `None`. + #[display("dynattrs.option({})", _0)] + Option(Box), + /// Dict. + #[display("dynattrs.dict({}, {})", _0.0, _0.1)] + Dict(Box<(TypeCompiled, DynamicAttrType)>), +} + +#[derive(Debug, Trace, Allocative)] +#[trace(bound = "V: Trace<'v>, O: 'static")] +pub(crate) enum DynamicAttrValue< + // Starlark value passed as is from dynamic actions creation site to impl. + V: ValueLifetimeless, + // `OutputArtifact` during creation, and `BoundBuildArtifact` after artifact is bound. + O, +> { + Output(#[trace(static)] O), + ArtifactValue(Artifact), + DynamicValue(DynamicValue), + Value(V), + List(Box<[DynamicAttrValue]>), + Tuple(Box<[DynamicAttrValue]>), + Dict(SmallMap>), + Option(Option>>), +} + +// We implement `Freeze` manually because starlark `derive(Freeze)` does not support custom bounds. +impl Freeze for DynamicAttrValue { + type Frozen = DynamicAttrValue; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + match self { + DynamicAttrValue::Output(o) => Ok(DynamicAttrValue::Output(o)), + DynamicAttrValue::ArtifactValue(a) => Ok(DynamicAttrValue::ArtifactValue(a)), + DynamicAttrValue::DynamicValue(d) => Ok(DynamicAttrValue::DynamicValue(d)), + DynamicAttrValue::Value(v) => Ok(DynamicAttrValue::Value(v.freeze(freezer)?)), + DynamicAttrValue::List(l) => Ok(DynamicAttrValue::List(l.freeze(freezer)?)), + DynamicAttrValue::Dict(d) => Ok(DynamicAttrValue::Dict(d.freeze(freezer)?)), + DynamicAttrValue::Tuple(t) => Ok(DynamicAttrValue::Tuple(t.freeze(freezer)?)), + DynamicAttrValue::Option(o) => Ok(DynamicAttrValue::Option(o.freeze(freezer)?)), + } + } +} + +#[derive(Debug, Allocative)] +pub struct DynamicAttrValues { + /// Indexed by attrs definitions in `DynamicActionCallable`. + pub(crate) values: Box<[DynamicAttrValue]>, +} + +unsafe impl<'v, V: ValueLifetimeless + Trace<'v>, O> Trace<'v> for DynamicAttrValues { + fn trace(&mut self, tracer: &Tracer<'v>) { + let DynamicAttrValues { values } = self; + values.trace(tracer); + } +} + +impl Freeze for DynamicAttrValues { + type Frozen = DynamicAttrValues; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + Ok(DynamicAttrValues { + values: self.values.freeze(freezer)?, + }) + } +} + +impl<'v> DynamicAttrValue, BoundBuildArtifact> { + fn for_each_node(&self, f: &mut impl FnMut(&Self)) { + f(self); + match self { + DynamicAttrValue::Output(_) + | DynamicAttrValue::ArtifactValue(_) + | DynamicAttrValue::DynamicValue(_) + | DynamicAttrValue::Value(_) => {} + DynamicAttrValue::List(xs) => { + for x in xs.iter() { + x.for_each_node(f); + } + } + DynamicAttrValue::Tuple(xs) => { + for x in xs.iter() { + x.for_each_node(f); + } + } + DynamicAttrValue::Dict(xs) => { + for x in xs.values() { + x.for_each_node(f); + } + } + DynamicAttrValue::Option(x) => { + if let Some(x) = x { + x.for_each_node(f); + } + } + } + } +} + +impl<'v> DynamicAttrValue, OutputArtifact> { + fn bind( + self, + bind: &mut DynamicActionsOutputArtifactBinder, + ) -> anyhow::Result, BoundBuildArtifact>> { + match self { + DynamicAttrValue::Output(output) => Ok(DynamicAttrValue::Output(bind.bind(output)?)), + DynamicAttrValue::ArtifactValue(v) => Ok(DynamicAttrValue::ArtifactValue(v)), + DynamicAttrValue::DynamicValue(v) => Ok(DynamicAttrValue::DynamicValue(v)), + DynamicAttrValue::Value(v) => Ok(DynamicAttrValue::Value(v)), + DynamicAttrValue::List(xs) => Ok(DynamicAttrValue::List( + xs.into_vec() + .into_iter() + .map(|x| x.bind(bind)) + .collect::>()?, + )), + DynamicAttrValue::Tuple(xs) => Ok(DynamicAttrValue::Tuple( + xs.into_vec() + .into_iter() + .map(|x| x.bind(bind)) + .collect::>()?, + )), + DynamicAttrValue::Dict(xs) => { + let mut r = SmallMap::with_capacity(xs.len()); + for (k, v) in xs.into_iter_hashed() { + r.insert_hashed_unique_unchecked(k, v.bind(bind)?); + } + Ok(DynamicAttrValue::Dict(r)) + } + DynamicAttrValue::Option(x) => match x { + Some(x) => Ok(DynamicAttrValue::Option(Some(Box::new(x.bind(bind)?)))), + None => Ok(DynamicAttrValue::Option(None)), + }, + } + } +} + +impl<'v> DynamicAttrValues, BoundBuildArtifact> { + fn for_each_node(&self, f: &mut impl FnMut(&DynamicAttrValue, BoundBuildArtifact>)) { + for value in &self.values { + value.for_each_node(f); + } + } + + pub(crate) fn outputs(&self) -> IndexSet { + let mut outputs = IndexSet::new(); + self.for_each_node(&mut |value| { + if let DynamicAttrValue::Output(output) = value { + outputs.insert(output.dupe()); + } + }); + outputs + } + + pub(crate) fn artifact_values(&self) -> IndexSet { + let mut artifact_values = IndexSet::new(); + self.for_each_node(&mut |value| { + if let DynamicAttrValue::ArtifactValue(artifact) = value { + artifact_values.insert(artifact.dupe()); + } + }); + artifact_values + } + + pub(crate) fn dynamic_values(&self) -> IndexSet { + let mut dynamic_values = IndexSet::new(); + self.for_each_node(&mut |value| { + if let DynamicAttrValue::DynamicValue(dynamic_value) = value { + dynamic_values.insert(dynamic_value.dupe()); + } + }); + dynamic_values + } +} + +impl<'v> DynamicAttrValues, OutputArtifact> { + pub(crate) fn bind( + self, + key: &DynamicLambdaResultsKey, + ) -> anyhow::Result, BoundBuildArtifact>> { + let DynamicAttrValues { values } = self; + let mut bind = DynamicActionsOutputArtifactBinder::new(key); + Ok(DynamicAttrValues { + values: values + .into_vec() + .into_iter() + .map(|v| v.bind(&mut bind)) + .collect::>()?, + }) + } +} + +impl DynamicAttrType { + /// Parameter type of `impl` function. + pub(crate) fn impl_param_ty(&self) -> Ty { + match self { + DynamicAttrType::Output => StarlarkOutputArtifact::starlark_type_repr(), + DynamicAttrType::ArtifactValue => StarlarkArtifactValue::starlark_type_repr(), + DynamicAttrType::DynamicValue => StarlarkResolvedDynamicValue::starlark_type_repr(), + DynamicAttrType::Value(ty) => ty.as_ty().dupe(), + DynamicAttrType::List(item_ty) => Ty::list(item_ty.impl_param_ty()), + DynamicAttrType::Tuple(item_tys) => { + Ty::tuple(item_tys.iter().map(|ty| ty.impl_param_ty()).collect()) + } + DynamicAttrType::Option(ty) => Ty::union2(ty.impl_param_ty(), Ty::none()), + DynamicAttrType::Dict(k_v) => { + let (k, v) = &**k_v; + Ty::dict(k.as_ty().dupe(), v.impl_param_ty()) + } + } + } + + /// Parameter type of callable created by `dynamic_actions`. + pub(crate) fn callable_param_ty(&self) -> Ty { + match self { + DynamicAttrType::Output => StarlarkOutputArtifact::starlark_type_repr(), + DynamicAttrType::ArtifactValue => { + UnpackArtifactOrDeclaredArtifact::starlark_type_repr() + } + DynamicAttrType::DynamicValue => StarlarkDynamicValue::starlark_type_repr(), + DynamicAttrType::Value(ty) => ty.as_ty().dupe(), + DynamicAttrType::List(item_ty) => Ty::list(item_ty.callable_param_ty()), + DynamicAttrType::Tuple(item_tys) => { + Ty::tuple(item_tys.iter().map(|ty| ty.callable_param_ty()).collect()) + } + DynamicAttrType::Option(ty) => Ty::union2(ty.callable_param_ty(), Ty::none()), + DynamicAttrType::Dict(k_v) => { + let (k, v) = &**k_v; + Ty::dict(k.as_ty().dupe(), v.callable_param_ty()) + } + } + } + + pub(crate) fn coerce<'v>( + &self, + value: Value<'v>, + ) -> anyhow::Result, OutputArtifact>> { + match self { + DynamicAttrType::Output => { + let artifact = <&StarlarkOutputArtifact>::unpack_value_err(value)?; + Ok(DynamicAttrValue::Output(artifact.artifact()?)) + } + DynamicAttrType::ArtifactValue => { + let artifact = UnpackArtifactOrDeclaredArtifact::unpack_value_err(value)?; + Ok(DynamicAttrValue::ArtifactValue(artifact.artifact()?)) + } + DynamicAttrType::DynamicValue => { + let dynamic_value = <&StarlarkDynamicValue>::unpack_value_err(value)?; + Ok(DynamicAttrValue::DynamicValue( + dynamic_value.dynamic_value.dupe(), + )) + } + DynamicAttrType::Value(ty) => { + if !ty.matches(value) { + return Err(buck2_error_anyhow!( + [], + "Expecting a value of type `{}`, got: {}", + ty, + value.to_string_for_type_error() + )); + } + Ok(DynamicAttrValue::Value(value)) + } + DynamicAttrType::List(elem_ty) => { + let list = <&ListRef>::unpack_value_err(value)?; + let mut res = Vec::with_capacity(list.len()); + for elem in list.iter() { + res.push(elem_ty.coerce(elem)?); + } + Ok(DynamicAttrValue::List(res.into_boxed_slice())) + } + DynamicAttrType::Dict(elem_ty) => { + let (key_ty, value_ty) = &**elem_ty; + let dict = DictRef::unpack_value_err(value)?; + let mut res = SmallMap::with_capacity(dict.len()); + for (key, value) in dict.iter_hashed() { + if !key_ty.matches(key.into_key()) { + return Err(buck2_error_anyhow!( + [], + "Expecting a key of type `{}`, got: {}", + key_ty, + key.to_string_for_type_error() + )); + } + res.insert_hashed_unique_unchecked(key, value_ty.coerce(value)?); + } + Ok(DynamicAttrValue::Dict(res)) + } + DynamicAttrType::Tuple(elem_tys) => { + let tuple = <&TupleRef>::unpack_value_err(value)?; + if tuple.len() != elem_tys.len() { + return Err(buck2_error_anyhow!( + [], + "Expecting a tuple of length {}, got: {}", + elem_tys.len(), + tuple.len() + )); + } + let mut res = Vec::with_capacity(elem_tys.len()); + for (elem, ty) in tuple.iter().zip(elem_tys.iter()) { + res.push(ty.coerce(elem)?); + } + Ok(DynamicAttrValue::Tuple(res.into_boxed_slice())) + } + DynamicAttrType::Option(elem_ty) => { + if value.is_none() { + Ok(DynamicAttrValue::Option(None)) + } else { + let elem = elem_ty.coerce(value)?; + Ok(DynamicAttrValue::Option(Some(Box::new(elem)))) + } + } + } + } +} diff --git a/app/buck2_action_impl/src/dynamic/attrs_starlark.rs b/app/buck2_action_impl/src/dynamic/attrs_starlark.rs new file mode 100644 index 0000000000000..0ee5cdd53ded2 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/attrs_starlark.rs @@ -0,0 +1,131 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use starlark::any::ProvidesStaticType; +use starlark::environment::GlobalsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::starlark_value; +use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::tuple::UnpackTuple; +use starlark::values::typing::TypeCompiled; +use starlark::values::typing::TypeType; +use starlark::values::AllocValue; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Value; +use starlark::values::ValueOf; + +use crate::dynamic::attrs::DynamicAttrType; + +/// Attribute type for dynamic actions. Created from `dynattrs` module. +#[derive( + Debug, + derive_more::Display, + ProvidesStaticType, + Allocative, + NoSerialize +)] +#[display("{}", ty)] +pub(crate) struct StarlarkDynamicAttrType { + pub(crate) ty: DynamicAttrType, +} + +#[starlark_value(type = "DynamicAttrType", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for StarlarkDynamicAttrType {} + +impl<'v> AllocValue<'v> for StarlarkDynamicAttrType { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_simple(self) + } +} + +#[starlark_module] +fn struct_dynattrs(globals: &mut GlobalsBuilder) { + fn output() -> anyhow::Result { + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::Output, + }) + } + + fn artifact_value() -> anyhow::Result { + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::ArtifactValue, + }) + } + + fn dynamic_value() -> anyhow::Result { + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::DynamicValue, + }) + } + + fn value<'v>( + #[starlark(require = pos)] ty: ValueOf<'v, TypeType>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result { + let ty = TypeCompiled::new(ty.value, eval.heap())?; + // We allocate a type in the frozen heap (which is not garbage collected), + // which is fine because this code is not meant to be executed outside top-level code. + let ty = ty.to_frozen(eval.frozen_heap()); + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::Value(ty), + }) + } + + fn list<'v>( + #[starlark(require = pos)] ty: &'v StarlarkDynamicAttrType, + ) -> anyhow::Result { + let ty = ty.ty.clone(); + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::List(Box::new(ty)), + }) + } + + fn dict<'v>( + #[starlark(require = pos)] key: ValueOf<'v, TypeType>, + #[starlark(require = pos)] value: &'v StarlarkDynamicAttrType, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result { + let key = TypeCompiled::new(key.value, eval.heap())?; + // See the comment above about frozen heap. + let key = key.to_frozen(eval.frozen_heap()); + let value = value.ty.clone(); + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::Dict(Box::new((key, value))), + }) + } + + fn tuple<'v>( + #[starlark(args)] args: UnpackTuple<&'v StarlarkDynamicAttrType>, + ) -> anyhow::Result { + let items = args.items.into_iter().map(|x| x.ty.clone()).collect(); + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::Tuple(items), + }) + } + + fn option<'v>( + #[starlark(require = pos)] ty: &'v StarlarkDynamicAttrType, + ) -> anyhow::Result { + let ty = ty.ty.clone(); + Ok(StarlarkDynamicAttrType { + ty: DynamicAttrType::Option(Box::new(ty)), + }) + } + + const DynamicAttrType: StarlarkValueAsType = + StarlarkValueAsType::new(); +} + +pub(crate) fn register_dynamic_attrs(globals: &mut GlobalsBuilder) { + globals.namespace("dynattrs", struct_dynattrs); +} diff --git a/app/buck2_action_impl/src/dynamic/bxl.rs b/app/buck2_action_impl/src/dynamic/bxl.rs new file mode 100644 index 0000000000000..4036dab33dbf4 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/bxl.rs @@ -0,0 +1,66 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::pin::Pin; + +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_build_api::analysis::registry::RecordedAnalysisValues; +use buck2_build_api::dynamic_value::DynamicValue; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; +use buck2_core::base_deferred_key::BaseDeferredKeyBxl; +use buck2_execute::digest_config::DigestConfig; +use buck2_futures::cancellable_future::CancellationObserver; +use buck2_util::late_binding::LateBinding; +use dice::DiceComputations; +use futures::Future; +use starlark::values::OwnedRefFrozenRef; + +use crate::dynamic::deferred::InputArtifactsMaterialized; +use crate::dynamic::params::FrozenDynamicLambdaParams; + +pub static EVAL_BXL_FOR_DYNAMIC_OUTPUT: LateBinding< + for<'v> fn( + &'v BaseDeferredKeyBxl, + DynamicLambdaResultsKey, + OwnedRefFrozenRef<'v, FrozenDynamicLambdaParams>, + &'v mut DiceComputations, + String, + InputArtifactsMaterialized, + HashMap, + DigestConfig, + CancellationObserver, + ) + -> Pin> + Send + 'v>>, +> = LateBinding::new("EVAL_BXL_FOR_DYNAMIC_OUTPUT"); + +pub(crate) async fn eval_bxl_for_dynamic_output<'v>( + base_deferred_key: &'v BaseDeferredKeyBxl, + self_key: DynamicLambdaResultsKey, + dynamic_lambda: OwnedRefFrozenRef<'_, FrozenDynamicLambdaParams>, + dice_ctx: &'v mut DiceComputations<'_>, + action_key: String, + input_artifacts_materialized: InputArtifactsMaterialized, + resolved_dynamic_values: HashMap, + digest_config: DigestConfig, + liveness: CancellationObserver, +) -> anyhow::Result { + (EVAL_BXL_FOR_DYNAMIC_OUTPUT.get()?)( + base_deferred_key, + self_key, + dynamic_lambda, + dice_ctx, + action_key, + input_artifacts_materialized, + resolved_dynamic_values, + digest_config, + liveness, + ) + .await +} diff --git a/app/buck2_action_impl/src/dynamic/calculation.rs b/app/buck2_action_impl/src/dynamic/calculation.rs new file mode 100644 index 0000000000000..25b6a9b60869f --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/calculation.rs @@ -0,0 +1,98 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_build_api::deferred::calculation::lookup_deferred_holder; +use buck2_build_api::dynamic::calculation::DynamicLambdaCalculation; +use buck2_build_api::dynamic::calculation::DynamicLambdaResult; +use buck2_build_api::dynamic::calculation::DYNAMIC_LAMBDA_CALCULATION_IMPL; +use buck2_build_signals::node_key::BuildSignalsNodeKey; +use buck2_build_signals::node_key::BuildSignalsNodeKeyImpl; +use dice::CancellationContext; +use dice::Demand; +use dice::DiceComputations; +use dice::Key; +use dupe::Dupe; + +use crate::dynamic::deferred::prepare_and_execute_lambda; +use crate::dynamic::storage::FrozenDynamicLambdaParamsStorageImpl; + +struct DynamicLambdaCalculationImpl; + +#[async_trait] +impl DynamicLambdaCalculation for DynamicLambdaCalculationImpl { + async fn dynamic_lambda_result( + &self, + dice: &mut DiceComputations<'_>, + key: &DynamicLambdaResultsKey, + ) -> anyhow::Result> { + Ok(dice.compute(&DynamicLambdaDiceKey(key.dupe())).await??) + } +} + +pub(crate) fn init_dynamic_lambda_calculation() { + DYNAMIC_LAMBDA_CALCULATION_IMPL.init(&DynamicLambdaCalculationImpl) +} + +#[derive( + Debug, + derive_more::Display, + Dupe, + Clone, + Allocative, + Hash, + Eq, + PartialEq +)] +pub struct DynamicLambdaDiceKey(DynamicLambdaResultsKey); + +#[async_trait] +impl Key for DynamicLambdaDiceKey { + type Value = buck2_error::Result>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + cancellation: &CancellationContext, + ) -> Self::Value { + let deferred_holder = lookup_deferred_holder(ctx, self.0.holder_key()).await?; + let lambda = FrozenDynamicLambdaParamsStorageImpl::lookup_lambda( + deferred_holder.analysis_values().analysis_storage()?, + &self.0, + )?; + + let analysis_values = prepare_and_execute_lambda( + ctx, + cancellation, + lambda, + self.0.dupe(), + self.0.action_key(), + ) + .await?; + Ok(Arc::new(DynamicLambdaResult { analysis_values })) + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } + + fn provide<'a>(&'a self, demand: &mut Demand<'a>) { + demand.provide_value_with(|| BuildSignalsNodeKey::new(self.dupe())); + } +} + +impl BuildSignalsNodeKeyImpl for DynamicLambdaDiceKey {} diff --git a/app/buck2_action_impl/src/dynamic/deferred.rs b/app/buck2_action_impl/src/dynamic/deferred.rs new file mode 100644 index 0000000000000..971d0ab008f31 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/deferred.rs @@ -0,0 +1,679 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::iter; +use std::sync::Arc; + +use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_artifact::artifact::artifact_type::BoundBuildArtifact; +use buck2_artifact::deferred::key::DeferredHolderKey; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; +use buck2_build_api::analysis::registry::AnalysisRegistry; +use buck2_build_api::analysis::registry::RecordedAnalysisValues; +use buck2_build_api::artifact_groups::calculation::ArtifactGroupCalculation; +use buck2_build_api::artifact_groups::ArtifactGroup; +use buck2_build_api::dynamic::calculation::dynamic_lambda_result; +use buck2_build_api::dynamic_value::DynamicValue; +use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_value::StarlarkArtifactValue; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_build_api::interpreter::rule_defs::context::AnalysisContext; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; +use buck2_build_api::interpreter::rule_defs::provider::collection::ProviderCollection; +use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_error::buck2_error_anyhow; +use buck2_error::internal_error_anyhow; +use buck2_error::starlark_error::from_starlark; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::get_dispatcher; +use buck2_events::dispatch::span_async; +use buck2_events::dispatch::span_async_simple; +use buck2_execute::artifact::artifact_dyn::ArtifactDyn; +use buck2_execute::digest_config::DigestConfig; +use buck2_execute::digest_config::HasDigestConfig; +use buck2_execute::materialize::materializer::HasMaterializer; +use buck2_futures::cancellable_future::CancellationObserver; +use buck2_interpreter::print_handler::EventDispatcherPrintHandler; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use dice::CancellationContext; +use dice::DiceComputations; +use dupe::Dupe; +use futures::FutureExt; +use indexmap::IndexSet; +use starlark::environment::Module; +use starlark::eval::Evaluator; +use starlark::values::dict::AllocDict; +use starlark::values::dict::DictType; +use starlark::values::list::AllocList; +use starlark::values::tuple::AllocTuple; +use starlark::values::FrozenValue; +use starlark::values::Heap; +use starlark::values::OwnedRefFrozenRef; +use starlark::values::Value; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueTyped; +use starlark::values::ValueTypedComplex; +use starlark::StarlarkResultExt; +use starlark_map::small_map::SmallMap; + +use crate::dynamic::attrs::DynamicAttrValue; +use crate::dynamic::attrs::DynamicAttrValues; +use crate::dynamic::bxl::eval_bxl_for_dynamic_output; +use crate::dynamic::dynamic_actions_callable::FrozenStarlarkDynamicActionsCallable; +use crate::dynamic::dynamic_actions_callable::P_ACTIONS; +use crate::dynamic::params::FrozenDynamicLambdaParams; +use crate::dynamic::resolved_dynamic_value::StarlarkResolvedDynamicValue; + +pub enum DynamicLambdaArgs<'v> { + OldPositional { + ctx: Value<'v>, + artifact_values: ValueOfUnchecked<'v, DictType>, + outputs: ValueOfUnchecked<'v, DictType>, + }, + DynamicActionsNamed { + actions: ValueTyped<'v, AnalysisActions<'v>>, + attr_values: Box<[(String, Value<'v>)]>, + }, +} + +pub fn invoke_dynamic_output_lambda<'v>( + eval: &mut Evaluator<'v, '_, '_>, + lambda: Value<'v>, + args: DynamicLambdaArgs<'v>, +) -> anyhow::Result> { + let pos; + let named; + let (pos, named): (&[_], &[(_, _)]) = match &args { + DynamicLambdaArgs::OldPositional { + ctx, + artifact_values, + outputs, + } => { + pos = [*ctx, artifact_values.get(), outputs.get()]; + (&pos, &[]) + } + DynamicLambdaArgs::DynamicActionsNamed { + actions, + attr_values, + } => { + named = iter::once((P_ACTIONS.name, actions.to_value())) + .chain(attr_values.iter().map(|(k, v)| (k.as_str(), *v))) + .collect::>(); + (&[], &named) + } + }; + let return_value = eval + .eval_function(lambda, pos, named) + .map_err(from_starlark)?; + + let provider_collection = match args { + DynamicLambdaArgs::OldPositional { .. } => { + if !return_value.is_none() { + return Err(buck2_error_anyhow!( + [], + "dynamic_output lambda must return `None`, got: `{0}`", + return_value.to_string_for_type_error() + )); + } + ProviderCollection::try_from_value_dynamic_output( + FrozenValue::new_empty_list().to_value(), + )? + } + DynamicLambdaArgs::DynamicActionsNamed { .. } => { + ProviderCollection::try_from_value_dynamic_output(return_value)? + } + }; + + Ok(provider_collection) +} + +async fn execute_lambda( + lambda: OwnedRefFrozenRef<'_, FrozenDynamicLambdaParams>, + dice: &mut DiceComputations<'_>, + self_key: DynamicLambdaResultsKey, + action_key: String, + resolved_dynamic_values: HashMap, + input_artifacts_materialized: InputArtifactsMaterialized, + digest_config: DigestConfig, + liveness: CancellationObserver, +) -> anyhow::Result { + if let BaseDeferredKey::BxlLabel(key) = &lambda.as_ref().static_fields.owner { + eval_bxl_for_dynamic_output( + key, + self_key, + lambda, + dice, + action_key, + input_artifacts_materialized, + resolved_dynamic_values, + digest_config, + liveness, + ) + .await + } else { + let proto_rule = "dynamic_lambda".to_owned(); + + let start_event = buck2_data::AnalysisStart { + target: Some(buck2_data::analysis_start::Target::DynamicLambda( + lambda.as_ref().static_fields.owner.to_proto().into(), + )), + rule: proto_rule.clone(), + }; + + let artifact_fs = dice.get_artifact_fs().await?; + + span_async(start_event, async { + let mut declared_actions = None; + let mut declared_artifacts = None; + + let output: anyhow::Result<_> = try { + let env = Module::new(); + + let analysis_registry = { + let heap = env.heap(); + let print = EventDispatcherPrintHandler(get_dispatcher()); + let mut eval = Evaluator::new(&env); + eval.set_print_handler(&print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); + let dynamic_lambda_ctx_data = dynamic_lambda_ctx_data( + lambda, + self_key, + &action_key, + input_artifacts_materialized, + &resolved_dynamic_values, + &artifact_fs, + digest_config, + &env, + )?; + let ctx = AnalysisContext::prepare( + heap, + dynamic_lambda_ctx_data.lambda.attributes()?, + lambda.as_ref().static_fields.owner.configured_label(), + dynamic_lambda_ctx_data.lambda.plugins()?, + dynamic_lambda_ctx_data.registry, + dynamic_lambda_ctx_data.digest_config, + ); + + let args = match ( + &dynamic_lambda_ctx_data.lambda.attr_values, + &dynamic_lambda_ctx_data.spec, + ) { + ( + None, + DynamicLambdaCtxDataSpec::Old { + outputs, + artifact_values, + }, + ) => DynamicLambdaArgs::OldPositional { + ctx: ctx.to_value(), + artifact_values: *artifact_values, + outputs: *outputs, + }, + (Some(_arg), DynamicLambdaCtxDataSpec::New { attr_values }) => { + DynamicLambdaArgs::DynamicActionsNamed { + // TODO(nga): no need to create `ctx` + // because we only need `actions` here. + actions: ctx.actions, + attr_values: attr_values.clone(), + } + } + (None, DynamicLambdaCtxDataSpec::New { .. }) + | (Some(_), DynamicLambdaCtxDataSpec::Old { .. }) => { + Err(internal_error_anyhow!( + "Unexpected combination of attr_values and spec" + ))?; + unreachable!(); + } + }; + + let providers: ProviderCollection = invoke_dynamic_output_lambda( + &mut eval, + dynamic_lambda_ctx_data.lambda.lambda(), + args, + )?; + let providers = eval.heap().alloc(providers); + let providers = ValueTypedComplex::::new(providers) + .internal_error_anyhow("Just allocated ProviderCollection")?; + + ctx.assert_no_promises()?; + + let registry = ctx.take_state(); + + registry + .analysis_value_storage + .set_result_value(providers)?; + + registry + }; + + declared_actions = Some(analysis_registry.num_declared_actions()); + declared_artifacts = Some(analysis_registry.num_declared_artifacts()); + let (_frozen_env, recorded_values) = analysis_registry.finalize(&env)?(env)?; + recorded_values + }; + + ( + output, + buck2_data::AnalysisEnd { + target: Some(buck2_data::analysis_end::Target::DynamicLambda( + lambda.as_ref().static_fields.owner.to_proto().into(), + )), + rule: proto_rule, + profile: None, + declared_actions, + declared_artifacts, + }, + ) + }) + .await + } +} + +pub(crate) async fn prepare_and_execute_lambda( + ctx: &mut DiceComputations<'_>, + cancellation: &CancellationContext<'_>, + lambda: OwnedRefFrozenRef<'_, FrozenDynamicLambdaParams>, + self_holder_key: DynamicLambdaResultsKey, + action_key: String, +) -> buck2_error::Result { + // This is a bit suboptimal: we wait for all artifacts to be ready in order to + // materialize any of them. However that is how we execute *all* local actions so in + // the grand scheme of things that's probably not a huge deal. + ensure_artifacts_built(&lambda.as_ref().static_fields.artifact_values, ctx).await?; + + Ok(span_async_simple( + buck2_data::DynamicLambdaStart { + owner: Some(lambda.as_ref().static_fields.owner.to_proto().into()), + }, + async move { + let (input_artifacts_materialized, resolved_dynamic_values) = span_async_simple( + buck2_data::DeferredPreparationStageStart { + stage: Some(buck2_data::MaterializedArtifacts {}.into()), + }, + ctx.try_compute2( + |ctx| { + Box::pin(materialize_inputs( + &lambda.as_ref().static_fields.artifact_values, + ctx, + )) + }, + |ctx| { + Box::pin(resolve_dynamic_values( + &lambda.as_ref().static_fields.dynamic_values, + ctx, + )) + }, + ), + buck2_data::DeferredPreparationStageEnd {}, + ) + .await?; + + cancellation + .with_structured_cancellation(|observer| { + execute_lambda( + lambda, + ctx, + self_holder_key, + action_key, + resolved_dynamic_values, + input_artifacts_materialized, + ctx.global_data().get_digest_config(), + observer, + ) + .boxed() + }) + .await + }, + buck2_data::DeferredEvaluationEnd {}, + ) + .await?) +} + +async fn ensure_artifacts_built( + materialized_artifacts: &IndexSet, + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result<()> { + if materialized_artifacts.is_empty() { + return Ok(()); + } + ctx.try_compute_join(materialized_artifacts, |ctx, artifact| { + async move { + ctx.ensure_artifact_group(&ArtifactGroup::Artifact(artifact.dupe())) + .await + } + .boxed() + }) + .await?; + + Ok(()) +} + +/// Marker to indicate artifacts we pass to dynamic actions are materialized. +/// To not forget to materialize after refactoring. +#[derive(Copy, Clone, Dupe)] +pub struct InputArtifactsMaterialized(()); + +async fn materialize_inputs( + materialized_artifacts: &IndexSet, + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result { + if materialized_artifacts.is_empty() { + return Ok(InputArtifactsMaterialized(())); + } + + let artifact_fs = ctx.get_artifact_fs().await?; + + let mut paths = Vec::with_capacity(materialized_artifacts.len()); + + for artifact in materialized_artifacts { + let path = artifact.resolve_path(&artifact_fs)?; + paths.push(path.clone()); + } + + ctx.per_transaction_data() + .get_materializer() + .ensure_materialized(paths) + .await?; + + Ok(InputArtifactsMaterialized(())) +} + +async fn resolve_dynamic_values( + dynamic_values: &IndexSet, + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result> { + if dynamic_values.is_empty() { + return Ok(HashMap::new()); + } + + let providers = ctx + .try_compute_join(dynamic_values, |ctx, dynamic_value| { + Box::pin(async { + let result = dynamic_lambda_result(ctx, &dynamic_value.dynamic_lambda_results_key) + .await? + .analysis_values + .provider_collection()? + .to_owned(); + anyhow::Ok((dynamic_value.dupe(), result)) + }) + }) + .await?; + + Ok(HashMap::from_iter(providers)) +} + +pub enum DynamicLambdaCtxDataSpec<'v> { + Old { + outputs: ValueOfUnchecked<'v, DictType>, + artifact_values: ValueOfUnchecked<'v, DictType>, + }, + New { + attr_values: Box<[(String, Value<'v>)]>, + }, +} + +/// Data used to construct an `AnalysisContext` or `BxlContext` for the dynamic lambda. +pub struct DynamicLambdaCtxData<'v> { + pub lambda: &'v FrozenDynamicLambdaParams, + pub key: &'v BaseDeferredKey, + pub spec: DynamicLambdaCtxDataSpec<'v>, + pub digest_config: DigestConfig, + pub registry: AnalysisRegistry<'v>, +} + +/// Prepare dict of artifact values for dynamic actions. +fn artifact_values<'v>( + artifact_values: &IndexSet, + _: InputArtifactsMaterialized, + artifact_fs: &ArtifactFs, + heap: &'v Heap, +) -> anyhow::Result>> { + let mut artifact_values_dict = Vec::with_capacity(artifact_values.len()); + for x in artifact_values { + let k = StarlarkArtifact::new(x.dupe()); + let path = x.get_path().resolve(artifact_fs)?; + // `InputArtifactsMaterialized` marker indicates that the artifact is materialized. + let v = StarlarkArtifactValue::new(x.dupe(), path.to_owned(), artifact_fs.fs().dupe()); + artifact_values_dict.push((k, v)); + } + Ok(heap + .alloc_typed_unchecked(AllocDict(artifact_values_dict)) + .cast()) +} + +/// Prepare dict of output artifacts for dynamic actions. +fn outputs<'v>( + outputs: &[BoundBuildArtifact], + registry: &mut AnalysisRegistry<'v>, + heap: &'v Heap, +) -> anyhow::Result>> { + let mut outputs_dict = Vec::with_capacity(outputs.len()); + for x in outputs { + let k = StarlarkArtifact::new(x.dupe().into_artifact()); + let declared = registry.declare_dynamic_output(x.as_base_artifact())?; + let v = StarlarkDeclaredArtifact::new(None, declared, AssociatedArtifacts::new()); + outputs_dict.push((k, v)); + } + + Ok(heap.alloc_typed_unchecked(AllocDict(outputs_dict)).cast()) +} + +fn new_attr_value<'v>( + value: &DynamicAttrValue, + _input_artifacts_materialized: InputArtifactsMaterialized, + artifact_fs: &ArtifactFs, + registry: &mut AnalysisRegistry<'v>, + resolved_dynamic_values: &HashMap, + env: &'v Module, +) -> anyhow::Result> { + match value { + DynamicAttrValue::Output(artifact) => { + let declared = registry.declare_dynamic_output(artifact.as_base_artifact())?; + let artifact = env.heap().alloc_typed(StarlarkDeclaredArtifact::new( + None, + declared, + AssociatedArtifacts::new(), + )); + Ok(env.heap().alloc(StarlarkOutputArtifact::new(artifact))) + } + DynamicAttrValue::ArtifactValue(artifact) => { + let path = artifact.get_path().resolve(&artifact_fs)?; + // `InputArtifactsMaterialized` marker indicates that the artifact is materialized. + Ok(env.heap().alloc(StarlarkArtifactValue::new( + Artifact::from(artifact.dupe()), + path.to_owned(), + artifact_fs.fs().dupe(), + ))) + } + DynamicAttrValue::DynamicValue(v) => { + let v = resolved_dynamic_values + .get(v) + .internal_error_anyhow("Missing resolved dynamic value")?; + Ok(env.heap().alloc(StarlarkResolvedDynamicValue { + value: v.add_heap_ref_static(env.frozen_heap()), + })) + } + DynamicAttrValue::Value(v) => Ok(v.to_value()), + DynamicAttrValue::List(xs) => { + let xs = xs + .iter() + .map(|x| { + new_attr_value( + x, + _input_artifacts_materialized, + artifact_fs, + registry, + resolved_dynamic_values, + env, + ) + }) + .collect::>>()?; + Ok(env.heap().alloc(AllocList(xs))) + } + DynamicAttrValue::Dict(xs) => { + let mut r = SmallMap::with_capacity(xs.len()); + for (k, v) in xs { + let prev = r.insert_hashed( + k.to_value().get_hashed().into_anyhow_result()?, + new_attr_value( + v, + _input_artifacts_materialized, + artifact_fs, + registry, + resolved_dynamic_values, + env, + )?, + ); + if prev.is_some() { + return Err(buck2_error_anyhow!([], "Duplicate key in dict")); + } + } + Ok(env.heap().alloc(AllocDict(r))) + } + DynamicAttrValue::Tuple(xs) => { + let xs = xs + .iter() + .map(|x| { + new_attr_value( + x, + _input_artifacts_materialized, + artifact_fs, + registry, + resolved_dynamic_values, + env, + ) + }) + .collect::>>()?; + Ok(env.heap().alloc(AllocTuple(xs))) + } + DynamicAttrValue::Option(option) => match option { + Some(v) => new_attr_value( + v, + _input_artifacts_materialized, + artifact_fs, + registry, + resolved_dynamic_values, + env, + ), + None => Ok(Value::new_none()), + }, + } +} + +fn new_attr_values<'v>( + values: &DynamicAttrValues, + callable: &FrozenStarlarkDynamicActionsCallable, + input_artifacts_materialized: InputArtifactsMaterialized, + artifact_fs: &ArtifactFs, + registry: &mut AnalysisRegistry<'v>, + resolved_dynamic_values: &HashMap, + env: &'v Module, +) -> anyhow::Result)]>> { + if values.values.len() != callable.attrs.len() { + return Err(internal_error_anyhow!("Parameter count mismatch")); + } + callable + .attrs + .keys() + .zip(values.values.iter()) + .map(|(name, value)| { + Ok(( + name.clone(), + new_attr_value( + value, + input_artifacts_materialized, + artifact_fs, + registry, + resolved_dynamic_values, + env, + )?, + )) + }) + .collect() +} + +/// Sets up the data needed to create the dynamic lambda ctx and evaluate the lambda. +pub fn dynamic_lambda_ctx_data<'v>( + dynamic_lambda: OwnedRefFrozenRef<'_, FrozenDynamicLambdaParams>, + self_key: DynamicLambdaResultsKey, + action_key: &str, + input_artifacts_materialized: InputArtifactsMaterialized, + resolved_dynamic_values: &HashMap, + artifact_fs: &ArtifactFs, + digest_config: DigestConfig, + env: &'v Module, +) -> anyhow::Result> { + let self_key = Arc::new(self_key); + + if &dynamic_lambda.as_ref().static_fields.owner != self_key.owner() { + return Err(internal_error_anyhow!( + "Dynamic lambda owner `{}` does not match self key `{}`", + dynamic_lambda.as_ref().static_fields.owner, + self_key + )); + } + + let dynamic_lambda = dynamic_lambda.add_heap_ref(env.frozen_heap()); + + let mut registry = AnalysisRegistry::new_from_owner_and_deferred( + dynamic_lambda.static_fields.execution_platform.dupe(), + DeferredHolderKey::DynamicLambda(self_key), + Some(Arc::from(action_key)), + )?; + + let spec = match &dynamic_lambda.attr_values { + None => { + let artifact_values = artifact_values( + &dynamic_lambda.static_fields.artifact_values, + input_artifacts_materialized, + artifact_fs, + env.heap(), + )?; + let outputs = outputs( + &dynamic_lambda.static_fields.outputs, + &mut registry, + env.heap(), + )?; + if !dynamic_lambda.static_fields.dynamic_values.is_empty() { + return Err(internal_error_anyhow!( + "Non-empty `dynamic_value` for `dynamic_output`" + )); + } + DynamicLambdaCtxDataSpec::Old { + outputs, + artifact_values, + } + } + Some((attr_values, callable)) => DynamicLambdaCtxDataSpec::New { + attr_values: new_attr_values( + attr_values, + callable.as_ref(), + input_artifacts_materialized, + artifact_fs, + &mut registry, + resolved_dynamic_values, + env, + )?, + }, + }; + + Ok(DynamicLambdaCtxData { + lambda: dynamic_lambda, + spec, + key: &dynamic_lambda.static_fields.owner, + digest_config, + registry, + }) +} diff --git a/app/buck2_action_impl/src/dynamic/dynamic_actions.rs b/app/buck2_action_impl/src/dynamic/dynamic_actions.rs new file mode 100644 index 0000000000000..00a96a071c05e --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/dynamic_actions.rs @@ -0,0 +1,55 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::RefCell; + +use allocative::Allocative; +use buck2_artifact::artifact::artifact_type::OutputArtifact; +use starlark::any::ProvidesStaticType; +use starlark::values::starlark_value; +use starlark::values::AllocValue; +use starlark::values::FrozenValueTyped; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Trace; +use starlark::values::Value; + +use crate::dynamic::attrs::DynamicAttrValues; +use crate::dynamic::dynamic_actions_callable::FrozenStarlarkDynamicActionsCallable; + +#[derive(Debug, Trace, Allocative)] +pub(crate) struct StarlarkDynamicActionsData<'v> { + pub(crate) callable: FrozenValueTyped<'v, FrozenStarlarkDynamicActionsCallable>, + pub(crate) attr_values: DynamicAttrValues, OutputArtifact>, +} + +#[derive( + Debug, + NoSerialize, + ProvidesStaticType, + derive_more::Display, + Trace, + Allocative +)] +#[display("DynamicActions<...>")] +pub(crate) struct StarlarkDynamicActions<'v> { + pub(crate) data: RefCell>>, +} + +#[starlark_value(type = "DynamicAction")] +impl<'v> StarlarkValue<'v> for StarlarkDynamicActions<'v> {} + +impl<'v> AllocValue<'v> for StarlarkDynamicActions<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + // No need to freeze: we unpack contents of this struct + // in `ctx.actions.dynamic_output` call. + heap.alloc_complex_no_freeze(self) + } +} diff --git a/app/buck2_action_impl/src/dynamic/dynamic_actions_callable.rs b/app/buck2_action_impl/src/dynamic/dynamic_actions_callable.rs new file mode 100644 index 0000000000000..ae9a2907efb3e --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/dynamic_actions_callable.rs @@ -0,0 +1,236 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::OnceCell; +use std::cell::RefCell; +use std::sync::LazyLock; + +use allocative::Allocative; +use anyhow::Context; +use buck2_artifact::artifact::artifact_type::OutputArtifact; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_build_api::interpreter::rule_defs::provider::ty::abstract_provider::AbstractProvider; +use buck2_error::BuckErrorContext; +use dupe::Dupe; +use starlark::any::ProvidesStaticType; +use starlark::eval::Arguments; +use starlark::eval::Evaluator; +use starlark::eval::ParametersSpec; +use starlark::eval::ParametersSpecParam; +use starlark::typing::ParamIsRequired; +use starlark::typing::ParamSpec; +use starlark::typing::Ty; +use starlark::util::ArcStr; +use starlark::values::list::ListType; +use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::typing::FrozenStarlarkCallable; +use starlark::values::typing::StarlarkCallable; +use starlark::values::typing::StarlarkCallableParamSpec; +use starlark::values::AllocValue; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::FrozenValue; +use starlark::values::FrozenValueTyped; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Trace; +use starlark::values::Value; +use starlark_map::small_map::SmallMap; + +use crate::dynamic::attrs::DynamicAttrType; +use crate::dynamic::attrs::DynamicAttrValues; +use crate::dynamic::dynamic_actions::StarlarkDynamicActions; +use crate::dynamic::dynamic_actions::StarlarkDynamicActionsData; + +pub(crate) struct DynamicActionsCallbackParamSpec; + +pub(crate) struct DynamicActionsCallbackParam { + pub(crate) name: &'static str, + pub(crate) ty: LazyLock, +} + +pub(crate) static P_ACTIONS: DynamicActionsCallbackParam = DynamicActionsCallbackParam { + name: "actions", + ty: LazyLock::new(AnalysisActions::starlark_type_repr), +}; + +impl StarlarkCallableParamSpec for DynamicActionsCallbackParamSpec { + fn params() -> ParamSpec { + ParamSpec::new_parts( + [], + [], + None, + [( + ArcStr::new_static(P_ACTIONS.name), + ParamIsRequired::Yes, + P_ACTIONS.ty.dupe(), + )], + Some(Ty::any()), + ) + .unwrap() + } +} + +pub(crate) type DynamicActionsCallbackReturnType = ListType; + +#[derive(Debug, thiserror::Error)] +enum DynamicActionCallableError { + #[error("DynamicActionCallable can be called only if frozen")] + NotFrozen, + #[error("DynamicActionCallable must be exported (assigned to global variable)")] + NotExported, +} + +/// Result of `dynamic_actions` rule invocation. +#[derive( + Debug, + NoSerialize, + ProvidesStaticType, + Allocative, + derive_more::Display, + Trace +)] +#[display( + "DynamicActionCallable[{}]", + self.name.get().map(|s| s.as_str()).unwrap_or("(unbound)") +)] +pub(crate) struct DynamicActionsCallable<'v> { + pub(crate) self_ty: Ty, + pub(crate) implementation: + StarlarkCallable<'v, DynamicActionsCallbackParamSpec, DynamicActionsCallbackReturnType>, + pub(crate) attrs: SmallMap, + pub(crate) name: OnceCell, +} + +#[derive( + Debug, + NoSerialize, + ProvidesStaticType, + Allocative, + derive_more::Display +)] +#[display("DynamicActionsCallable[{}]", name)] +pub struct FrozenStarlarkDynamicActionsCallable { + pub(crate) self_ty: Ty, + pub(crate) implementation: + FrozenStarlarkCallable, + pub(crate) attrs: SmallMap, + name: String, + signature: ParametersSpec, +} + +#[starlark_value(type = "DynamicActionCallable")] +impl<'v> StarlarkValue<'v> for DynamicActionsCallable<'v> { + type Canonical = FrozenStarlarkDynamicActionsCallable; + + fn export_as( + &self, + variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { + // First wins. + self.name.get_or_init(|| variable_name.to_owned()); + Ok(()) + } + + fn invoke( + &self, + _me: Value<'v>, + _args: &Arguments<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + Err(starlark::Error::new_other( + DynamicActionCallableError::NotFrozen, + )) + } + + fn typechecker_ty(&self) -> Option { + Some(self.self_ty.dupe()) + } +} + +#[starlark_value(type = "DynamicActionCallable")] +impl<'v> StarlarkValue<'v> for FrozenStarlarkDynamicActionsCallable { + type Canonical = Self; + + fn invoke( + &self, + me: Value<'v>, + args: &Arguments<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + let me = me + .unpack_frozen() + .internal_error_anyhow("me must be frozen")?; + let me = FrozenValueTyped::new_err(me)?; + let attr_values: DynamicAttrValues = + self.signature.parser(args, eval, |parser, _eval| { + let mut attr_values = Vec::with_capacity(self.attrs.len()); + for (name, attr_ty) in &self.attrs { + let value = attr_ty + .coerce(parser.next()?) + .with_context(|| format!("Error coercing attribute `{name}`"))?; + attr_values.push(value); + } + Ok(DynamicAttrValues { + values: attr_values.into_boxed_slice(), + }) + })?; + Ok(eval.heap().alloc(StarlarkDynamicActions { + data: RefCell::new(Some(StarlarkDynamicActionsData { + callable: me, + attr_values, + })), + })) + } + + fn typechecker_ty(&self) -> Option { + Some(self.self_ty.dupe()) + } +} + +impl<'v> AllocValue<'v> for DynamicActionsCallable<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(self) + } +} + +impl<'v> Freeze for DynamicActionsCallable<'v> { + type Frozen = FrozenStarlarkDynamicActionsCallable; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + let DynamicActionsCallable { + self_ty, + implementation, + name, + attrs, + } = self; + + let name = name + .into_inner() + .context(DynamicActionCallableError::NotExported)?; + + let signature = ParametersSpec::new_named_only( + &name, + attrs + .keys() + .map(|s| (s.as_str(), ParametersSpecParam::Required)), + ); + + Ok(FrozenStarlarkDynamicActionsCallable { + self_ty, + implementation: implementation.freeze(freezer)?, + name, + attrs, + signature, + }) + } +} diff --git a/app/buck2_action_impl/src/dynamic/dynamic_actions_globals.rs b/app/buck2_action_impl/src/dynamic/dynamic_actions_globals.rs new file mode 100644 index 0000000000000..69a6e69945034 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/dynamic_actions_globals.rs @@ -0,0 +1,99 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::OnceCell; +use std::iter; + +use anyhow::Context; +use buck2_error::buck2_error_anyhow; +use buck2_error::BuckErrorContext; +use starlark::environment::GlobalsBuilder; +use starlark::starlark_module; +use starlark::typing::ParamIsRequired; +use starlark::typing::ParamSpec; +use starlark::typing::Ty; +use starlark::util::ArcStr; +use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::typing::StarlarkCallableChecked; +use starlark::StarlarkResultExt; +use starlark_map::small_map::SmallMap; + +use crate::dynamic::attrs::DynamicAttrType; +use crate::dynamic::attrs_starlark::StarlarkDynamicAttrType; +use crate::dynamic::dynamic_actions::StarlarkDynamicActions; +use crate::dynamic::dynamic_actions_callable::DynamicActionsCallable; +use crate::dynamic::dynamic_actions_callable::DynamicActionsCallbackParamSpec; +use crate::dynamic::dynamic_actions_callable::DynamicActionsCallbackReturnType; +use crate::dynamic::dynamic_actions_callable::FrozenStarlarkDynamicActionsCallable; +use crate::dynamic::dynamic_actions_callable::P_ACTIONS; + +#[starlark_module] +pub(crate) fn register_dynamic_actions(globals: &mut GlobalsBuilder) { + /// Create new dynamic action callable. Returned object will be callable, + /// and the result of calling it can be passed to `ctx.actions.dynamic_output_new`. + fn dynamic_actions<'v>( + #[starlark(require = named)] r#impl: StarlarkCallableChecked< + 'v, + DynamicActionsCallbackParamSpec, + DynamicActionsCallbackReturnType, + >, + #[starlark(require = named)] attrs: SmallMap, + ) -> anyhow::Result> { + if attrs.contains_key(P_ACTIONS.name) { + return Err(buck2_error_anyhow!([], "Cannot define `actions` attribute")); + } + let attrs: SmallMap = attrs + .into_iter() + .map(|(name, ty)| (name, ty.ty.clone())) + .collect(); + + let attr_args = attrs + .iter() + .map(|(name, ty)| (name.as_str(), ty.impl_param_ty())) + .collect::>(); + + r#impl + .0 + .check_callable_with( + [], + iter::once((P_ACTIONS.name, &*P_ACTIONS.ty)) + .chain(attr_args.iter().map(|(name, ty)| (*name, ty))), + None, + None, + &DynamicActionsCallbackReturnType::starlark_type_repr(), + ) + .into_anyhow_result() + .context("`impl` function must be callable with given params")?; + + let callable_ty = Ty::function( + ParamSpec::new_named_only(attrs.iter().map(|(name, ty)| { + ( + ArcStr::from(name.as_str()), + ParamIsRequired::Yes, + ty.callable_param_ty(), + ) + })) + .into_anyhow_result() + .internal_error("Signature must be correct")?, + StarlarkDynamicActions::starlark_type_repr(), + ); + + Ok(DynamicActionsCallable { + self_ty: callable_ty, + implementation: r#impl.to_unchecked(), + name: OnceCell::new(), + attrs, + }) + } + + const DynamicActions: StarlarkValueAsType = StarlarkValueAsType::new(); + const DynamicActionsCallable: StarlarkValueAsType = + StarlarkValueAsType::new(); +} diff --git a/app/buck2_action_impl/src/dynamic/dynamic_value.rs b/app/buck2_action_impl/src/dynamic/dynamic_value.rs new file mode 100644 index 0000000000000..d47adc2e1ae42 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/dynamic_value.rs @@ -0,0 +1,63 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; + +use allocative::Allocative; +use buck2_build_api::dynamic_value::DynamicValue; +use starlark::any::ProvidesStaticType; +use starlark::environment::GlobalsBuilder; +use starlark::starlark_module; +use starlark::values::starlark_value; +use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::AllocValue; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Value; +use starlark::values::ValueTyped; +use starlark_map::StarlarkHasher; + +#[derive( + Debug, + ProvidesStaticType, + derive_more::Display, + Allocative, + NoSerialize +)] +#[display("DynamicValue<{}>", self.dynamic_value)] +pub struct StarlarkDynamicValue { + pub(crate) dynamic_value: DynamicValue, +} + +#[starlark_value(type = "DynamicValue", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for StarlarkDynamicValue { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { + self.dynamic_value.hash(hasher); + Ok(()) + } + + fn equals(&self, other: Value<'v>) -> starlark::Result { + let Some(other) = ValueTyped::::new(other) else { + return Ok(false); + }; + Ok(self.dynamic_value == other.dynamic_value) + } +} + +impl<'v> AllocValue<'v> for StarlarkDynamicValue { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_simple(self) + } +} + +#[starlark_module] +pub(crate) fn register_dynamic_value(globals: &mut GlobalsBuilder) { + const DynamicValue: StarlarkValueAsType = StarlarkValueAsType::new(); +} diff --git a/app/buck2_action_impl/src/dynamic/params.rs b/app/buck2_action_impl/src/dynamic/params.rs new file mode 100644 index 0000000000000..d921c78f9cfea --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/params.rs @@ -0,0 +1,125 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_artifact::artifact::artifact_type::BoundBuildArtifact; +use buck2_build_api::dynamic_value::DynamicValue; +use buck2_build_api::interpreter::rule_defs::plugins::AnalysisPlugins; +use buck2_build_api::interpreter::rule_defs::plugins::FrozenAnalysisPlugins; +use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::execution_types::execution::ExecutionPlatformResolution; +use buck2_error::BuckErrorContext; +use gazebo::prelude::OptionExt; +use indexmap::IndexSet; +use starlark::any::ProvidesStaticType; +use starlark::values::structs::StructRef; +use starlark::values::typing::FrozenStarlarkCallable; +use starlark::values::typing::StarlarkCallable; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::FrozenValue; +use starlark::values::FrozenValueOfUnchecked; +use starlark::values::FrozenValueTyped; +use starlark::values::Trace; +use starlark::values::Value; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueTypedComplex; + +use crate::dynamic::attrs::DynamicAttrValues; +use crate::dynamic::dynamic_actions_callable::FrozenStarlarkDynamicActionsCallable; + +#[derive(Allocative, Debug)] +pub(crate) struct DynamicLambdaStaticFields { + /// the owner that defined this lambda + pub(crate) owner: BaseDeferredKey, + /// Input artifacts required to be materialized by the lambda. + pub(crate) artifact_values: IndexSet, + /// Dynamic values I depend on. + pub(crate) dynamic_values: IndexSet, + /// Things I produce + pub(crate) outputs: Box<[BoundBuildArtifact]>, + /// Execution platform inherited from the owner to use for actionsfbcode/buck2/app/buck2_action_impl/src/dynamic/deferred.rs + pub(crate) execution_platform: ExecutionPlatformResolution, +} + +#[derive(Allocative, Trace, Debug, ProvidesStaticType)] +pub(crate) struct DynamicLambdaParams<'v> { + pub(crate) attributes: Option>>, + pub(crate) plugins: Option>>, + pub(crate) lambda: StarlarkCallable<'v>, + pub(crate) attr_values: Option<( + DynamicAttrValues, BoundBuildArtifact>, + FrozenValueTyped<'v, FrozenStarlarkDynamicActionsCallable>, + )>, + pub(crate) static_fields: DynamicLambdaStaticFields, +} + +#[derive(Allocative, Debug, ProvidesStaticType)] +pub struct FrozenDynamicLambdaParams { + pub(crate) attributes: Option>>, + pub(crate) plugins: Option>, + pub(crate) lambda: FrozenStarlarkCallable, + pub attr_values: Option<( + DynamicAttrValues, + FrozenValueTyped<'static, FrozenStarlarkDynamicActionsCallable>, + )>, + pub(crate) static_fields: DynamicLambdaStaticFields, +} + +impl FrozenDynamicLambdaParams { + pub(crate) fn attributes<'v>( + &'v self, + ) -> anyhow::Result>>> { + let Some(attributes) = self.attributes else { + return Ok(None); + }; + Ok(Some(attributes.to_value().cast())) + } + + pub(crate) fn plugins<'v>( + &'v self, + ) -> anyhow::Result>>> { + let Some(plugins) = self.plugins else { + return Ok(None); + }; + Ok(Some( + ValueTypedComplex::new(plugins.to_value()) + .internal_error_anyhow("plugins must be AnalysisPlugins")?, + )) + } + + pub fn lambda<'v>(&'v self) -> Value<'v> { + self.lambda.0.to_value() + } +} + +impl<'v> Freeze for DynamicLambdaParams<'v> { + type Frozen = FrozenDynamicLambdaParams; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + let attr_values = match self.attr_values { + None => None, + Some((attr_values, callable)) => Some(( + attr_values.freeze(freezer)?, + // Change lifetime. + FrozenValueTyped::new_err(callable.to_frozen_value())?, + )), + }; + Ok(FrozenDynamicLambdaParams { + attributes: self + .attributes + .try_map(|a| anyhow::Ok(a.freeze(freezer)?.cast()))?, + plugins: self.plugins.freeze(freezer)?, + lambda: self.lambda.freeze(freezer)?, + attr_values, + static_fields: self.static_fields, + }) + } +} diff --git a/app/buck2_action_impl/src/dynamic/resolved_dynamic_value.rs b/app/buck2_action_impl/src/dynamic/resolved_dynamic_value.rs new file mode 100644 index 0000000000000..fff3e0dd106e5 --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/resolved_dynamic_value.rs @@ -0,0 +1,72 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; +use starlark::any::ProvidesStaticType; +use starlark::environment::GlobalsBuilder; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::starlark_module; +use starlark::values::starlark_value; +use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::AllocValue; +use starlark::values::FrozenValueTyped; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Value; +use starlark::values::ValueTyped; + +#[derive( + Debug, + derive_more::Display, + Allocative, + NoSerialize, + ProvidesStaticType +)] +#[display("ResolvedDynamicValue<{}>", self.value)] +pub struct StarlarkResolvedDynamicValue { + pub(crate) value: FrozenValueTyped<'static, FrozenProviderCollection>, +} + +#[starlark_value(type = "ResolvedDynamicValue")] +impl<'v> StarlarkValue<'v> for StarlarkResolvedDynamicValue +where + Self: ProvidesStaticType<'v>, +{ + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(resolved_dynamic_value_methods) + } +} + +impl<'v> AllocValue<'v> for StarlarkResolvedDynamicValue { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_simple(self) + } +} + +#[starlark_module] +fn resolved_dynamic_value_methods(method: &mut MethodsBuilder) { + /// Get providers from the resolved dynamic value. + #[starlark(attribute)] + fn providers<'v>( + this: ValueTyped<'v, StarlarkResolvedDynamicValue>, + ) -> anyhow::Result> { + Ok(this.value) + } +} + +#[starlark_module] +pub(crate) fn register_resolved_dynamic_value(globals: &mut GlobalsBuilder) { + const ResolvedDynamicValue: StarlarkValueAsType = + StarlarkValueAsType::new(); +} diff --git a/app/buck2_action_impl/src/dynamic/storage.rs b/app/buck2_action_impl/src/dynamic/storage.rs new file mode 100644 index 0000000000000..c56dd217a1d7b --- /dev/null +++ b/app/buck2_action_impl/src/dynamic/storage.rs @@ -0,0 +1,184 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_artifact::deferred::key::DeferredHolderKey; +use buck2_artifact::dynamic::DynamicLambdaIndex; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_build_api::analysis::registry::AnalysisValueStorage; +use buck2_build_api::analysis::registry::FrozenAnalysisValueStorage; +use buck2_build_api::dynamic::storage::DynamicLambdaParamStorages; +use buck2_build_api::dynamic::storage::DynamicLambdaParamsStorage; +use buck2_build_api::dynamic::storage::FrozenDynamicLambdaParamsStorage; +use buck2_build_api::dynamic::storage::DYNAMIC_LAMBDA_PARAMS_STORAGES; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use dupe::Dupe; +use starlark::any::AnyLifetime; +use starlark::any::ProvidesStaticType; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::OwnedRefFrozenRef; +use starlark::values::Trace; +use starlark::values::Tracer; +use starlark_map::small_map::SmallMap; + +use crate::dynamic::params::DynamicLambdaParams; +use crate::dynamic::params::FrozenDynamicLambdaParams; + +#[derive(Debug, Allocative, ProvidesStaticType)] +pub(crate) struct DynamicLambdaParamsStorageImpl<'v> { + self_key: DeferredHolderKey, + lambda_params: SmallMap>, +} + +#[derive(Debug, Allocative, ProvidesStaticType)] +pub(crate) struct FrozenDynamicLambdaParamsStorageImpl { + self_key: DeferredHolderKey, + lambda_params: SmallMap, +} + +impl<'v> DynamicLambdaParamsStorageImpl<'v> { + pub(crate) fn get<'a>( + storage: &'a mut AnalysisValueStorage<'v>, + ) -> anyhow::Result<&'a mut DynamicLambdaParamsStorageImpl<'v>> { + storage + .lambda_params + .as_any_mut() + .downcast_mut() + .internal_error_anyhow("Wrong type for lambda params storage") + } + + pub fn next_dynamic_actions_key(&self) -> anyhow::Result { + let index = DynamicLambdaIndex::new(self.lambda_params.len().try_into()?); + Ok(DynamicLambdaResultsKey::new(self.self_key.dupe(), index)) + } + + pub fn set_dynamic_actions( + &mut self, + key: DynamicLambdaResultsKey, + lambda_params: DynamicLambdaParams<'v>, + ) -> anyhow::Result<()> { + if &self.self_key != key.holder_key() { + return Err(internal_error_anyhow!( + "Wrong lambda owner: expecting `{}`, got `{}`", + self.self_key, + key + )); + } + self.lambda_params.insert(key, lambda_params); + Ok(()) + } +} + +impl FrozenDynamicLambdaParamsStorageImpl { + pub(crate) fn lookup_lambda<'f>( + storage: OwnedRefFrozenRef<'f, FrozenAnalysisValueStorage>, + key: &DynamicLambdaResultsKey, + ) -> anyhow::Result> { + if key.holder_key() != &storage.as_ref().self_key { + return Err(internal_error_anyhow!( + "Wrong owner for lambda: expecting `{}`, got `{}`", + storage.as_ref().self_key, + key + )); + } + storage.try_map_result(|s| { + s.lambda_params + .as_any() + .downcast_ref::() + .internal_error_anyhow("Wrong type for lambda params storage")? + .lambda_params + .get(key) + .with_internal_error_anyhow(|| format!("missing lambda `{}`", key)) + }) + } +} + +unsafe impl<'v> Trace<'v> for DynamicLambdaParamsStorageImpl<'v> { + fn trace(&mut self, tracer: &Tracer<'v>) { + let DynamicLambdaParamsStorageImpl { + self_key, + lambda_params, + } = self; + tracer.trace_static(self_key); + for (k, v) in lambda_params.iter_mut() { + tracer.trace_static(k); + v.trace(tracer); + } + } +} + +impl<'v> DynamicLambdaParamsStorage<'v> for DynamicLambdaParamsStorageImpl<'v> { + fn as_any_mut(&mut self) -> &mut dyn AnyLifetime<'v> { + self + } + + fn freeze( + self: Box, + freezer: &Freezer, + ) -> anyhow::Result> { + let DynamicLambdaParamsStorageImpl { + self_key, + lambda_params, + } = *self; + let lambda_params = lambda_params + .into_iter_hashed() + .map(|(k, v)| Ok((k, v.freeze(freezer)?))) + .collect::>()?; + Ok(Box::new(FrozenDynamicLambdaParamsStorageImpl { + self_key, + lambda_params, + })) + } +} + +impl FrozenDynamicLambdaParamsStorage for FrozenDynamicLambdaParamsStorageImpl { + fn as_any(&self) -> &dyn AnyLifetime<'static> { + self + } + + fn iter_dynamic_lambda_outputs(&self) -> Box + Send + '_> { + Box::new(self.lambda_params.values().flat_map(|v| { + v.static_fields + .outputs + .iter() + .map(|a| a.as_base_artifact().dupe()) + })) + } +} + +pub(crate) fn init_dynamic_lambda_params_storages() { + struct Impl; + + impl DynamicLambdaParamStorages for Impl { + fn new_dynamic_lambda_params_storage<'v>( + &self, + self_key: DeferredHolderKey, + ) -> Box> { + Box::new(DynamicLambdaParamsStorageImpl { + self_key, + lambda_params: SmallMap::new(), + }) + } + + fn new_frozen_dynamic_lambda_params_storage( + &self, + self_key: DeferredHolderKey, + ) -> Box { + Box::new(FrozenDynamicLambdaParamsStorageImpl { + self_key, + lambda_params: SmallMap::new(), + }) + } + } + + DYNAMIC_LAMBDA_PARAMS_STORAGES.init(&Impl); +} diff --git a/app/buck2_action_impl/src/lib.rs b/app/buck2_action_impl/src/lib.rs index 02be6f3da19fb..cacb934176629 100644 --- a/app/buck2_action_impl/src/lib.rs +++ b/app/buck2_action_impl/src/lib.rs @@ -7,14 +7,17 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(try_blocks)] -#![feature(type_alias_impl_trait)] #![feature(impl_trait_in_assoc_type)] +#![feature(used_with_arg)] use std::sync::Once; mod actions; mod context; +pub mod dynamic; +mod starlark_defs; pub fn init_late_bindings() { static ONCE: Once = Once::new(); @@ -22,5 +25,8 @@ pub fn init_late_bindings() { actions::impls::run::audit_dep_files::init_audit_dep_files(); actions::impls::run::dep_files::init_flush_dep_files(); context::init_analysis_action_methods_actions(); + dynamic::calculation::init_dynamic_lambda_calculation(); + dynamic::storage::init_dynamic_lambda_params_storages(); + starlark_defs::init_register_buck2_action_impl_globals(); }); } diff --git a/app/buck2_action_impl/src/starlark_defs.rs b/app/buck2_action_impl/src/starlark_defs.rs new file mode 100644 index 0000000000000..dc27370e7f464 --- /dev/null +++ b/app/buck2_action_impl/src/starlark_defs.rs @@ -0,0 +1,24 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_ACTION_IMPL_GLOBALS; + +use crate::dynamic::attrs_starlark::register_dynamic_attrs; +use crate::dynamic::dynamic_actions_globals::register_dynamic_actions; +use crate::dynamic::dynamic_value::register_dynamic_value; +use crate::dynamic::resolved_dynamic_value::register_resolved_dynamic_value; + +pub(crate) fn init_register_buck2_action_impl_globals() { + REGISTER_BUCK2_ACTION_IMPL_GLOBALS.init(|globals| { + register_dynamic_actions(globals); + register_dynamic_value(globals); + register_dynamic_attrs(globals); + register_resolved_dynamic_value(globals); + }); +} diff --git a/app/buck2_action_impl_tests/BUCK b/app/buck2_action_impl_tests/BUCK index 295b00d26fd24..443d41d7e967f 100644 --- a/app/buck2_action_impl_tests/BUCK +++ b/app/buck2_action_impl_tests/BUCK @@ -1,12 +1,11 @@ -load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:rust_unittest.bzl", "rust_unittest") oncall("build_infra") -rust_library( +rust_unittest( name = "buck2_action_impl_tests", srcs = glob(["src/**/*.rs"]), - test_deps = [ + deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:ctor", "fbsource//third-party/rust:indoc", diff --git a/app/buck2_action_impl_tests/Cargo.toml b/app/buck2_action_impl_tests/Cargo.toml index 4d5e955060942..cf2a27c87c6d7 100644 --- a/app/buck2_action_impl_tests/Cargo.toml +++ b/app/buck2_action_impl_tests/Cargo.toml @@ -1,10 +1,12 @@ [package] +description = "Tests for buck2_action_impl" +edition = "2021" +license = { workspace = true } name = "buck2_action_impl_tests" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Tests for buck2_action_impl" -[dependencies] +[dev-dependencies] anyhow = { workspace = true } ctor = { workspace = true } indoc = { workspace = true } diff --git a/app/buck2_action_impl_tests/src/context.rs b/app/buck2_action_impl_tests/src/context.rs index af7dd3b7ac9bb..be3efc94744ed 100644 --- a/app/buck2_action_impl_tests/src/context.rs +++ b/app/buck2_action_impl_tests/src/context.rs @@ -14,11 +14,9 @@ use buck2_build_api::interpreter::rule_defs::register_rule_defs; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::configuration::data::ConfigurationData; use buck2_core::execution_types::execution::ExecutionPlatformResolution; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersName; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_execute::digest_config::DigestConfig; -use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; +use buck2_interpreter::file_type::StarlarkFileType; use dupe::Dupe; use indoc::indoc; use maplit::hashmap; @@ -28,10 +26,10 @@ use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::eval::ReturnFileLoader; use starlark::syntax::AstModule; -use starlark::syntax::Dialect; use starlark::values::structs::AllocStruct; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::StarlarkResultExt; fn run_ctx_test( content: &str, @@ -50,7 +48,12 @@ fn run_ctx_test( { let mut eval = Evaluator::new(&func_mod); - let ast = AstModule::parse("foo.bzl", full_content, &Dialect::Extended).unwrap(); + let ast = AstModule::parse( + "foo.bzl", + full_content, + &StarlarkFileType::Bzl.dialect(false), + ) + .unwrap(); eval.eval_module(ast, &globals).unwrap(); }; let frozen_func_mod = func_mod.freeze()?; @@ -69,27 +72,27 @@ fn run_ctx_test( BaseDeferredKey::TargetLabel(label.dupe()), ExecutionPlatformResolution::unspecified(), )?; - let attributes = eval.heap().alloc(AllocStruct([("name", "some_name")])); + let attributes = eval + .heap() + .alloc_typed_unchecked(AllocStruct([("name", "some_name")])) + .cast(); let plugins = eval .heap() .alloc_typed(AnalysisPlugins::new(SmallMap::new())) .into(); - let ctx = eval.heap().alloc(AnalysisContext::new( + let ctx = eval.heap().alloc(AnalysisContext::prepare( eval.heap(), - attributes, - Some( - eval.heap() - .alloc_typed(StarlarkConfiguredProvidersLabel::new( - ConfiguredProvidersLabel::new(label, ProvidersName::Default), - )), - ), - plugins, + Some(attributes), + Some(label), + Some(plugins), registry, DigestConfig::testing_default(), )); - let returned = eval.eval_function(test_function, &[ctx], &[]); + let returned = eval + .eval_function(test_function, &[ctx], &[]) + .into_anyhow_result(); result_handler(returned) } @@ -121,7 +124,7 @@ fn declare_output_declares_outputs() -> anyhow::Result<()> { ); run_ctx_test(content, |ret| { - let a = <(&str, &str)>::unpack_value(ret.unwrap()).unwrap(); + let a = <(&str, &str)>::unpack_value(ret.unwrap()).unwrap().unwrap(); assert_eq!("bar.cpp", a.0); assert_eq!("foo/bar.cpp", a.1); Ok(()) @@ -139,7 +142,7 @@ fn declare_output_with_prefix() -> anyhow::Result<()> { ); run_ctx_test(content, |ret| { - let a = <(&str, &str)>::unpack_value(ret.unwrap()).unwrap(); + let a = <(&str, &str)>::unpack_value(ret.unwrap()).unwrap().unwrap(); assert_eq!("bar.cpp", a.0); assert_eq!("foo/bar.cpp", a.1); Ok(()) diff --git a/app/buck2_action_impl_tests/src/lib.rs b/app/buck2_action_impl_tests/src/lib.rs index 353ed01469aef..706a036596a02 100644 --- a/app/buck2_action_impl_tests/src/lib.rs +++ b/app/buck2_action_impl_tests/src/lib.rs @@ -8,6 +8,7 @@ */ #![cfg(test)] +#![feature(error_generic_member_access)] mod context; diff --git a/app/buck2_action_metadata_proto/BUCK b/app/buck2_action_metadata_proto/BUCK index 2deacbbe6ad59..5334fc9d1d6ab 100644 --- a/app/buck2_action_metadata_proto/BUCK +++ b/app/buck2_action_metadata_proto/BUCK @@ -1,5 +1,4 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -7,7 +6,6 @@ rust_protobuf_library( name = "buck2_action_metadata_proto", srcs = glob(["src/**/*.rs"]), build_script = "build.rs", - doctests = False, # FIXME protos = ["action_metadata.proto"], deps = [ "fbsource//third-party/rust:tonic", diff --git a/app/buck2_action_metadata_proto/Cargo.toml b/app/buck2_action_metadata_proto/Cargo.toml index d7c8c84fde0b0..046b973da909c 100644 --- a/app/buck2_action_metadata_proto/Cargo.toml +++ b/app/buck2_action_metadata_proto/Cargo.toml @@ -2,11 +2,12 @@ name = "buck2_action_metadata_proto" edition = "2021" +license = { workspace = true } +repository = { workspace = true } version = "0.1.0" [dependencies] prost = { workspace = true } -prost-types = { workspace = true } tonic = { workspace = true } [build-dependencies] diff --git a/app/buck2_action_metadata_proto/src/lib.rs b/app/buck2_action_metadata_proto/src/lib.rs index 5d5aa5510b03b..6007097d75289 100644 --- a/app/buck2_action_metadata_proto/src/lib.rs +++ b/app/buck2_action_metadata_proto/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + tonic::include_proto!("action_metadata"); pub static REMOTE_DEP_FILE_KEY: &str = "remote_dep_file"; diff --git a/app/buck2_analysis/BUCK b/app/buck2_analysis/BUCK index a42be19c95d09..a2a4f93584b46 100644 --- a/app/buck2_analysis/BUCK +++ b/app/buck2_analysis/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -13,18 +12,18 @@ rust_library( "fbsource//third-party/rust:either", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:smallvec", - "fbsource//third-party/rust:thiserror", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_artifact:buck2_artifact", "//buck2/app/buck2_build_api:buck2_build_api", - "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_query:buck2_query", + "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", diff --git a/app/buck2_analysis/Cargo.toml b/app/buck2_analysis/Cargo.toml index c82b39939bc44..a34d11ddfae4f 100644 --- a/app/buck2_analysis/Cargo.toml +++ b/app/buck2_analysis/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Execute rule function" +edition = "2021" +license = { workspace = true } name = "buck2_analysis" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Execute rule function" [dependencies] anyhow = { workspace = true } @@ -11,7 +13,6 @@ derive_more = { workspace = true } either = { workspace = true } futures = { workspace = true } smallvec = { workspace = true } -thiserror = { workspace = true } allocative = { workspace = true } dice = { workspace = true } @@ -22,11 +23,12 @@ starlark_map = { workspace = true } buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } -buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_execute = { workspace = true } buck2_interpreter = { workspace = true } buck2_node = { workspace = true } buck2_query = { workspace = true } +buck2_util = { workspace = true } diff --git a/app/buck2_analysis/src/analysis/mod.rs b/app/buck2_analysis/src/analysis.rs similarity index 100% rename from app/buck2_analysis/src/analysis/mod.rs rename to app/buck2_analysis/src/analysis.rs diff --git a/app/buck2_analysis/src/analysis/calculation.rs b/app/buck2_analysis/src/analysis/calculation.rs index 1274a2acb712f..c2916517efea4 100644 --- a/app/buck2_analysis/src/analysis/calculation.rs +++ b/app/buck2_analysis/src/analysis/calculation.rs @@ -20,22 +20,26 @@ use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::analysis::calculation::EVAL_ANALYSIS_QUERY; use buck2_build_api::analysis::calculation::RULE_ANALYSIS_CALCULATION; use buck2_build_api::analysis::AnalysisResult; -use buck2_build_api::keep_going; -use buck2_common::result::SharedResult; -use buck2_common::result::ToUnsharedResultExt; +use buck2_build_api::keep_going::KeepGoing; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_data::error::ErrorTag; use buck2_data::ToProtoMessage; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::async_record_root_spans; +use buck2_events::dispatch::record_root_spans; use buck2_events::dispatch::span_async; +use buck2_events::dispatch::span_async_simple; use buck2_events::span::SpanId; -use buck2_interpreter::dice::starlark_profiler::GetStarlarkProfilerInstrumentation; use buck2_interpreter::load_module::InterpreterCalculation; -use buck2_interpreter::starlark_profiler::StarlarkProfileDataAndStats; -use buck2_interpreter::starlark_profiler::StarlarkProfileModeOrInstrumentation; +use buck2_interpreter::starlark_profiler::config::GetStarlarkProfilerInstrumentation; +use buck2_interpreter::starlark_profiler::data::StarlarkProfileDataAndStats; +use buck2_interpreter::starlark_profiler::mode::StarlarkProfileMode; use buck2_node::attrs::attr_type::query::ResolvedQueryLiterals; use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured::ConfiguredTargetNodeRef; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use buck2_node::rule_type::RuleType; use buck2_node::rule_type::StarlarkRuleType; @@ -46,23 +50,14 @@ use dice::DiceComputations; use dice::Key; use dupe::Dupe; use dupe::IterDupedExt; -use futures::stream::FuturesOrdered; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use futures::FutureExt; use smallvec::SmallVec; -use starlark::eval::ProfileMode; -use crate::analysis::env::get_user_defined_rule_impl; +use crate::analysis::env::get_user_defined_rule_spec; use crate::analysis::env::run_analysis; -use crate::analysis::env::RuleImplFunction; +use crate::analysis::env::RuleSpec; use crate::attrs::resolve::ctx::AnalysisQueryResult; -#[derive(Debug, thiserror::Error)] -enum AnalysisCalculationError { - #[error("Internal error: literal `{0}` not found in `deps`")] - LiteralNotFoundInDeps(String), -} - struct RuleAnalysisCalculationInstance; #[derive( @@ -75,50 +70,49 @@ struct RuleAnalysisCalculationInstance; PartialEq, Allocative )] -#[display(fmt = "{}", "_0")] +#[display("{}", _0)] pub struct AnalysisKey(pub ConfiguredTargetLabel); pub(crate) fn init_rule_analysis_calculation() { RULE_ANALYSIS_CALCULATION.init(&RuleAnalysisCalculationInstance); } +#[async_trait] +impl Key for AnalysisKey { + type Value = buck2_error::Result>; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + Ok(get_analysis_result(ctx, &self.0) + .await + .with_context(|| format!("Error running analysis for `{}`", &self.0))?) + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + // analysis result is not comparable + // TODO consider if we want analysis result to be eq + false + } +} + #[async_trait] impl RuleAnalsysisCalculationImpl for RuleAnalysisCalculationInstance { async fn get_analysis_result( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &ConfiguredTargetLabel, ) -> anyhow::Result> { - #[async_trait] - impl Key for AnalysisKey { - type Value = SharedResult>; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - let profile_mode = ctx.get_profile_mode_for_intermediate_analysis().await?; - Ok(get_analysis_result(ctx, &self.0, &profile_mode) - .await - .with_context(|| format!("Error running analysis for `{}`", &self.0))?) - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - // analysis result is not comparable - // TODO consider if we want analysis result to be eq - false - } - } - ctx.compute(&AnalysisKey(target.dupe())) .await? - .unshared_error() + .map_err(anyhow::Error::from) } } pub async fn resolve_queries( - ctx: &DiceComputations, - configured_node: &ConfiguredTargetNode, + ctx: &mut DiceComputations<'_>, + configured_node: ConfiguredTargetNodeRef<'_>, ) -> anyhow::Result>> { let mut queries = configured_node.queries().peekable(); @@ -126,59 +120,69 @@ pub async fn resolve_queries( return Ok(Default::default()); } - span_async( - buck2_data::AnalysisStageStart { - stage: Some(buck2_data::analysis_stage_start::Stage::ResolveQueries(())), - }, - async { - ( - resolve_queries_impl(ctx, configured_node, queries).await, - buck2_data::AnalysisStageEnd {}, - ) + span_async_simple( + buck2_data::AnalysisResolveQueriesStart { + standard_target: Some(configured_node.label().as_proto().into()), }, + resolve_queries_impl(ctx, configured_node, queries), + buck2_data::AnalysisResolveQueriesEnd {}, ) .await } async fn resolve_queries_impl( - ctx: &DiceComputations, - configured_node: &ConfiguredTargetNode, - queries: impl Iterator)>, + ctx: &mut DiceComputations<'_>, + configured_node: ConfiguredTargetNodeRef<'_>, + queries: impl IntoIterator)>, ) -> anyhow::Result>> { let deps: TargetSet<_> = configured_node.deps().duped().collect(); - let query_results = - futures::future::try_join_all(queries.map(|(query, resolved_literals_labels)| { - let ctx = ctx; - let deps = &deps; - async move { - let mut resolved_literals = - HashMap::with_capacity(resolved_literals_labels.0.len()); - for (literal, label) in resolved_literals_labels.0 { - let node = deps.get(label.target()).ok_or_else(|| { - AnalysisCalculationError::LiteralNotFoundInDeps(literal.clone()) - })?; - resolved_literals.insert(literal, node.dupe()); - } - - let result = (EVAL_ANALYSIS_QUERY.get()?)(ctx, &query, resolved_literals).await?; - - // analysis for all the deps in the query result should already have been run since they must - // be in our dependency graph, and so we don't worry about parallelizing these lookups. - let mut query_results = Vec::new(); - for node in result.iter() { - let label = node.label(); - query_results.push(( - label.dupe(), - ctx.get_analysis_result(label) - .await? - .require_compatible()? - .provider_collection, + let query_results = ctx + .try_compute_join( + queries, + |ctx, + (query, resolved_literals_labels): ( + String, + ResolvedQueryLiterals, + )| { + let deps = &deps; + async move { + let mut resolved_literals = + HashMap::with_capacity(resolved_literals_labels.0.len()); + for (literal, label) in resolved_literals_labels.0 { + let node = deps.get(label.target()).with_internal_error_anyhow(|| { + format!("Literal `{literal}` not found in `deps`") + })?; + resolved_literals.insert(literal, node.dupe()); + } + + let result = + (EVAL_ANALYSIS_QUERY.get()?)(ctx, &query, resolved_literals).await?; + + // analysis for all the deps in the query result should already have been run since they must + // be in our dependency graph, and so we don't worry about parallelizing these lookups. + let mut query_results = Vec::new(); + for node in result.iter() { + let label = node.label(); + query_results.push(( + label.dupe(), + ctx.get_analysis_result(label) + .await? + .require_compatible()? + .providers()? + .to_owned(), + )) + } + + anyhow::Ok(( + query.to_owned(), + Arc::new(AnalysisQueryResult { + result: query_results, + }), )) } - - anyhow::Ok((query.to_owned(), Arc::new(query_results))) - } - })) + .boxed() + }, + ) .await?; let query_results: HashMap<_, _> = query_results.into_iter().collect(); @@ -186,39 +190,44 @@ async fn resolve_queries_impl( } pub async fn get_dep_analysis<'v>( - configured_node: &'v ConfiguredTargetNode, - ctx: &DiceComputations, + configured_node: ConfiguredTargetNodeRef<'v>, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result> { - keep_going::try_join_all( - ctx, - configured_node - .deps() - .map(async move |dep| { - let res = ctx - .get_analysis_result(dep.label()) - .await - .and_then(|v| v.require_compatible()); - res.map(|x| (dep.label(), x)) - }) - .collect::>(), - ) + KeepGoing::try_compute_join_all(ctx, configured_node.deps(), |ctx, dep| { + async move { + let res = ctx + .get_analysis_result(dep.label()) + .await + .and_then(|v| v.require_compatible()); + res.map(|x| (dep.label(), x)) + } + .boxed() + }) .await } -pub async fn get_rule_impl( - ctx: &DiceComputations, +pub async fn get_rule_spec( + ctx: &mut DiceComputations<'_>, func: &StarlarkRuleType, -) -> anyhow::Result { +) -> anyhow::Result { let module = ctx .get_loaded_module_from_import_path(&func.import_path) .await?; - Ok(get_user_defined_rule_impl(module.env().dupe(), func)) + Ok(get_user_defined_rule_spec(module.env().dupe(), func)) } async fn get_analysis_result( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, + target: &ConfiguredTargetLabel, +) -> anyhow::Result> { + get_analysis_result_inner(ctx, target) + .await + .tag_anyhow(ErrorTag::Analysis) +} + +async fn get_analysis_result_inner( + ctx: &mut DiceComputations<'_>, target: &ConfiguredTargetLabel, - profile_mode: &StarlarkProfileModeOrInstrumentation, ) -> anyhow::Result> { let configured_node: MaybeCompatible = ctx.get_configured_target_node(target).await?; @@ -229,16 +238,24 @@ async fn get_analysis_result( MaybeCompatible::Compatible(configured_node) => configured_node, }; - let configured_node = &configured_node; - let mut dep_analysis = get_dep_analysis(configured_node, ctx).await?; - - let now = Instant::now(); - - let (res, spans) = async_record_root_spans(async move { - let func = configured_node.rule_type(); - match func { - RuleType::Starlark(func) => { - let rule_impl = get_rule_impl(ctx, func).await?; + let configured_node = configured_node.as_ref(); + + let ((res, now), spans): ((anyhow::Result<_>, Instant), _) = match configured_node.rule_type() { + RuleType::Starlark(func) => { + let (dep_analysis, query_results, profile_mode) = ctx + .try_compute3( + |ctx| get_dep_analysis(configured_node, ctx).boxed(), + |ctx| resolve_queries(ctx, configured_node).boxed(), + |ctx| { + ctx.get_profile_mode_for_analysis(configured_node.label()) + .boxed() + }, + ) + .await?; + + let now = Instant::now(); + let (res, spans) = async_record_root_spans(async { + let rule_spec = get_rule_spec(ctx, func).await?; let start_event = buck2_data::AnalysisStart { target: Some(target.as_proto().into()), rule: func.to_string(), @@ -246,36 +263,33 @@ async fn get_analysis_result( span_async(start_event, async { let mut profile = None; + let mut declared_artifacts = None; + let mut declared_actions = None; let result: anyhow::Result<_> = try { - let query_results = resolve_queries(ctx, configured_node).await?; - - let result = span_async( + let result = span_async_simple( buck2_data::AnalysisStageStart { stage: Some(buck2_data::analysis_stage_start::Stage::EvaluateRule( (), )), }, - async { - ( - run_analysis( - ctx, - target, - dep_analysis, - query_results, - configured_node.execution_platform_resolution(), - &rule_impl, - configured_node, - profile_mode, - ) - .await, - buck2_data::AnalysisStageEnd {}, - ) - }, + run_analysis( + ctx, + target, + dep_analysis, + query_results, + configured_node.execution_platform_resolution(), + &rule_spec, + configured_node, + &profile_mode, + ), + buck2_data::AnalysisStageEnd {}, ) .await?; - profile = Some(make_analysis_profile(&result)); + profile = Some(make_analysis_profile(&result)?); + declared_artifacts = Some(result.num_declared_artifacts); + declared_actions = Some(result.num_declared_actions); MaybeCompatible::Compatible(result) }; @@ -286,18 +300,35 @@ async fn get_analysis_result( target: Some(target.as_proto().into()), rule: func.to_string(), profile, + declared_actions, + declared_artifacts, }, ) }) .await - } - RuleType::Forward => { - assert!(dep_analysis.len() == 1); - Ok(MaybeCompatible::Compatible(dep_analysis.pop().unwrap().1)) - } + }) + .await; + + ((res, now), spans) } - }) - .await; + RuleType::Forward => { + let mut dep_analysis = get_dep_analysis(configured_node, ctx).await?; + let now = Instant::now(); + let (res, spans) = record_root_spans(|| { + let one_dep_analysis = dep_analysis + .pop() + .internal_error_anyhow("Forward node analysis produced no results")?; + if !dep_analysis.is_empty() { + return Err(internal_error_anyhow!( + "Forward node analysis produced more than one result" + )); + } + Ok(MaybeCompatible::Compatible(one_dep_analysis.1)) + }); + + ((res, now), spans) + } + }; ctx.store_evaluation_data(AnalysisKeyActivationData { duration: now.elapsed(), @@ -307,86 +338,85 @@ async fn get_analysis_result( res } -fn make_analysis_profile(res: &AnalysisResult) -> buck2_data::AnalysisProfile { - let heap = res.providers().value().owner(); +fn make_analysis_profile(res: &AnalysisResult) -> anyhow::Result { + let heap = res.providers()?.owner(); - buck2_data::AnalysisProfile { + Ok(buck2_data::AnalysisProfile { starlark_allocated_bytes: heap.allocated_bytes() as u64, starlark_available_bytes: heap.available_bytes() as u64, - } -} - -#[derive(Debug, thiserror::Error)] -enum ProfileAnalysisError { - #[error("recursive analysis configured incorrectly (internal error)")] - RecursiveProfileConfiguredIncorrectly, -} - -/// Run get_analysis_result but discard the results (public outside the `analysis` module, unlike -/// get_analysis_result) -pub async fn profile_analysis( - ctx: &DiceComputations, - target: &ConfiguredTargetLabel, - profile_mode: &ProfileMode, -) -> anyhow::Result> { - get_analysis_result( - ctx, - target, - &StarlarkProfileModeOrInstrumentation::Profile(profile_mode.dupe()), - ) - .await? - .require_compatible()? - .profile_data - .context("profile_data not set (internal error)") + }) } -fn all_deps(node: ConfiguredTargetNode) -> LabelIndexedSet { - let mut stack = vec![node]; +fn all_deps(nodes: &[ConfiguredTargetNode]) -> LabelIndexedSet { + let mut stack = nodes.to_vec(); let mut visited = LabelIndexedSet::new(); + let mut result = LabelIndexedSet::new(); while let Some(node) = stack.pop() { if visited.insert(node.dupe()) { + match node.rule_type() { + RuleType::Starlark(_) => { + result.insert(node.dupe()); + } + RuleType::Forward => { + // No starlark code ran on forward node. + } + } + stack.extend(node.deps().duped()); } } - visited + result } -pub async fn profile_analysis_recursively( - ctx: &DiceComputations, - target: &ConfiguredTargetLabel, +pub async fn profile_analysis( + ctx: &mut DiceComputations<'_>, + targets: &[ConfiguredTargetLabel], ) -> anyhow::Result { // Self check. - let profile_mode = ctx.get_profile_mode_for_intermediate_analysis().await?; - if !matches!( - profile_mode, - StarlarkProfileModeOrInstrumentation::Profile(_) - ) { - return Err(ProfileAnalysisError::RecursiveProfileConfiguredIncorrectly.into()); + for target in targets { + let profile_mode = ctx.get_profile_mode_for_analysis(target).await?; + if !matches!(profile_mode, StarlarkProfileMode::Profile(_)) { + return Err(internal_error_anyhow!( + "recursive analysis configured incorrectly" + )); + } } - let node = ctx - .get_configured_target_node(target) - .await? - .require_compatible()?; - - let all_deps = all_deps(node); - - let mut futures = all_deps - .iter() - .map(|node| ctx.get_analysis_result(node.label())) - .collect::>(); - - let mut profile_datas: Vec> = Vec::new(); - while let Some(result) = futures.next().await { - profile_datas.push( - result? - .require_compatible()? - .profile_data - .context("profile_data not set (internal error)")?, - ); - } + let nodes: Vec = ctx + .try_compute_join(targets.iter(), |ctx, target| { + async move { + let node = ctx + .get_configured_target_node(target) + .await? + .require_compatible()?; + anyhow::Ok(node) + } + .boxed() + }) + .await?; + + let all_deps = all_deps(&nodes); + + let profile_datas = ctx + .try_compute_join(all_deps.iter(), |ctx, node| { + async move { + let result = ctx + .get_analysis_result(node.label()) + .await? + .require_compatible()?; + // This may be `None` if we are running profiling for a subset of the targets. + anyhow::Ok(result.profile_data) + } + .boxed() + }) + .await?; - StarlarkProfileDataAndStats::merge(profile_datas.iter().map(|x| &**x)) + StarlarkProfileDataAndStats::merge( + profile_datas + .iter() + .filter_map(|o| o.as_ref()) + .map(|x| &**x), + ) } pub struct AnalysisKeyActivationData { diff --git a/app/buck2_analysis/src/analysis/env.rs b/app/buck2_analysis/src/analysis/env.rs index 0d7c21278b6cf..8b6dc230ed79d 100644 --- a/app/buck2_analysis/src/analysis/env.rs +++ b/app/buck2_analysis/src/analysis/env.rs @@ -8,50 +8,60 @@ */ use std::collections::HashMap; -use std::future::Future; use std::sync::Arc; use anyhow::Context; use buck2_build_api::analysis::registry::AnalysisRegistry; use buck2_build_api::analysis::AnalysisResult; -use buck2_build_api::deferred::types::DeferredTable; use buck2_build_api::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; use buck2_build_api::interpreter::rule_defs::context::AnalysisContext; use buck2_build_api::interpreter::rule_defs::provider::builtin::template_placeholder_info::FrozenTemplatePlaceholderInfo; +use buck2_build_api::interpreter::rule_defs::provider::builtin::validation_info::FrozenValidationInfo; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValueRef; use buck2_build_api::interpreter::rule_defs::provider::collection::ProviderCollection; -use buck2_common::result::SharedResult; +use buck2_build_api::validation::transitive_validations::TransitiveValidations; +use buck2_build_api::validation::transitive_validations::TransitiveValidationsData; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersName; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_core::unsafe_send_future::UnsafeSendFuture; +use buck2_error::starlark_error::from_starlark; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::get_dispatcher; use buck2_execute::digest_config::HasDigestConfig; use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; use buck2_interpreter::print_handler::EventDispatcherPrintHandler; -use buck2_interpreter::starlark_profiler::StarlarkProfileModeOrInstrumentation; -use buck2_interpreter::starlark_profiler::StarlarkProfiler; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; -use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use buck2_interpreter::starlark_profiler::data::ProfileTarget; +use buck2_interpreter::starlark_profiler::mode::StarlarkProfileMode; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfiler; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; +use buck2_interpreter::types::rule::FROZEN_PROMISE_ARTIFACT_MAPPINGS_GET_IMPL; use buck2_interpreter::types::rule::FROZEN_RULE_GET_IMPL; -use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured::ConfiguredTargetNodeRef; use buck2_node::rule_type::StarlarkRuleType; use dice::DiceComputations; use dupe::Dupe; +use futures::Future; use starlark::environment::FrozenModule; use starlark::environment::Module; use starlark::eval::Evaluator; +use starlark::values::FrozenValueTyped; +use starlark::values::OwnedFrozenRef; use starlark::values::Value; use starlark::values::ValueTyped; +use starlark::values::ValueTypedComplex; +use starlark_map::small_map::SmallMap; use crate::analysis::plugins::plugins_to_starlark_value; use crate::attrs::resolve::ctx::AnalysisQueryResult; use crate::attrs::resolve::ctx::AttrResolutionContext; use crate::attrs::resolve::node_to_attrs_struct::node_to_attrs_struct; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum AnalysisError { #[error( "Analysis context was missing a query result, this shouldn't be possible. Query was `{0}`" @@ -78,7 +88,7 @@ impl<'v> AttrResolutionContext<'v> for RuleAnalysisAttrResolutionContext<'v> { fn get_dep( &self, target: &ConfiguredProvidersLabel, - ) -> anyhow::Result { + ) -> anyhow::Result> { get_dep(&self.dep_analysis_results, target, self.module) } @@ -93,7 +103,7 @@ impl<'v> AttrResolutionContext<'v> for RuleAnalysisAttrResolutionContext<'v> { )) } - fn resolve_query(&self, query: &str) -> SharedResult> { + fn resolve_query(&self, query: &str) -> buck2_error::Result> { resolve_query(&self.query_results, query, self.module) } @@ -103,17 +113,16 @@ impl<'v> AttrResolutionContext<'v> for RuleAnalysisAttrResolutionContext<'v> { } pub fn get_dep<'v>( - dep_analysis_results: &HashMap<&'v ConfiguredTargetLabel, FrozenProviderCollectionValue>, + dep_analysis_results: &HashMap<&'_ ConfiguredTargetLabel, FrozenProviderCollectionValue>, target: &ConfiguredProvidersLabel, module: &'v Module, -) -> anyhow::Result { +) -> anyhow::Result> { match dep_analysis_results.get(target.target()) { - None => Err(AnalysisError::MissingDep(target.clone()).into()), + None => Err(AnalysisError::MissingDep(target.dupe()).into()), Some(x) => { let x = x.lookup_inner(target)?; // IMPORTANT: Anything given back to the user must be kept alive - module.frozen_heap().add_reference(x.value().owner()); - Ok(x.dupe()) + Ok(x.add_heap_ref(module.frozen_heap())) } } } @@ -145,11 +154,11 @@ pub fn resolve_query<'v>( query_results: &HashMap>, query: &str, module: &'v Module, -) -> SharedResult> { +) -> buck2_error::Result> { match query_results.get(query) { None => Err(anyhow::anyhow!(AnalysisError::MissingQuery(query.to_owned())).into()), Some(x) => { - for (_, y) in x.iter() { + for (_, y) in x.result.iter() { // IMPORTANT: Anything given back to the user must be kept alive module.frozen_heap().add_reference(y.value().owner()); } @@ -158,77 +167,68 @@ pub fn resolve_query<'v>( } } -pub trait RuleImplFunction: Sync { +pub trait RuleSpec: Sync { fn invoke<'v>( &self, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ctx: ValueTyped<'v, AnalysisContext<'v>>, ) -> anyhow::Result>; + + fn promise_artifact_mappings<'v>( + &self, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result>>; } /// Container for the environment that analysis implementation functions should run in struct AnalysisEnv<'a> { - impl_function: &'a dyn RuleImplFunction, - deps: HashMap<&'a ConfiguredTargetLabel, FrozenProviderCollectionValue>, + rule_spec: &'a dyn RuleSpec, + deps: Vec<(&'a ConfiguredTargetLabel, AnalysisResult)>, query_results: HashMap>, execution_platform: &'a ExecutionPlatformResolution, label: ConfiguredTargetLabel, } pub(crate) async fn run_analysis<'a>( - dice: &DiceComputations, + dice: &'a mut DiceComputations<'_>, label: &ConfiguredTargetLabel, results: Vec<(&'a ConfiguredTargetLabel, AnalysisResult)>, query_results: HashMap>, execution_platform: &'a ExecutionPlatformResolution, - impl_function: &'a dyn RuleImplFunction, - node: &ConfiguredTargetNode, - profile_mode: &StarlarkProfileModeOrInstrumentation, + rule_spec: &'a dyn RuleSpec, + node: ConfiguredTargetNodeRef<'a>, + profile_mode: &'a StarlarkProfileMode, ) -> anyhow::Result { - let analysis_env = AnalysisEnv::new( - label, - results, + let analysis_env = AnalysisEnv { + rule_spec, + deps: results, query_results, execution_platform, - impl_function, - )?; + label: label.dupe(), + }; run_analysis_with_env(dice, analysis_env, node, profile_mode).await } -impl<'a> AnalysisEnv<'a> { - /// Create a new `AnalysisEnv`, ensuring that all heaps are kept alive that need to be - fn new( - label: &ConfiguredTargetLabel, - results: Vec<(&'a ConfiguredTargetLabel, AnalysisResult)>, - query_results: HashMap>, - execution_platform: &'a ExecutionPlatformResolution, - impl_function: &'a dyn RuleImplFunction, - ) -> anyhow::Result { - Ok(AnalysisEnv { - impl_function, - deps: get_deps_from_analysis_results(results)?, - query_results, - execution_platform, - label: label.dupe(), - }) - } -} - -pub fn get_deps_from_analysis_results<'v>( - results: Vec<(&'v ConfiguredTargetLabel, AnalysisResult)>, -) -> anyhow::Result> { +pub fn get_deps_from_analysis_results( + results: Vec<(&ConfiguredTargetLabel, AnalysisResult)>, +) -> anyhow::Result> { results .into_iter() - .map(|(label, result)| Ok((label, result.providers().dupe()))) + .map(|(label, result)| Ok((label, result.providers()?.to_owned()))) .collect::>>() } -fn run_analysis_with_env<'a>( - dice: &'a DiceComputations, +// Used to express that the impl Future below captures multiple named lifetimes. +// See https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999 for more details. +trait Captures<'x> {} +impl<'x, T: ?Sized> Captures<'x> for T {} + +fn run_analysis_with_env<'a, 'd: 'a>( + dice: &'a mut DiceComputations<'d>, analysis_env: AnalysisEnv<'a>, - node: &'a ConfiguredTargetNode, - profile_mode: &'a StarlarkProfileModeOrInstrumentation, -) -> impl Future> + Send + 'a { + node: ConfiguredTargetNodeRef<'a>, + profile_mode: &'a StarlarkProfileMode, +) -> impl Future> + 'a + Captures<'d> { let fut = async move { run_analysis_with_env_underlying(dice, analysis_env, node, profile_mode).await }; @@ -236,18 +236,30 @@ fn run_analysis_with_env<'a>( } async fn run_analysis_with_env_underlying( - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, analysis_env: AnalysisEnv<'_>, - node: &ConfiguredTargetNode, - profile_mode: &StarlarkProfileModeOrInstrumentation, + node: ConfiguredTargetNodeRef<'_>, + profile_mode: &StarlarkProfileMode, ) -> anyhow::Result { let env = Module::new(); let print = EventDispatcherPrintHandler(get_dispatcher()); + let validations_from_deps = analysis_env + .deps + .iter() + .filter_map(|(label, analysis_result)| { + analysis_result + .validations + .dupe() + .map(|v| ((*label).dupe(), v)) + }) + .collect::>(); + let (attributes, plugins) = { + let dep_analysis_results = get_deps_from_analysis_results(analysis_env.deps)?; let resolution_ctx = RuleAnalysisAttrResolutionContext { module: &env, - dep_analysis_results: analysis_env.deps, + dep_analysis_results, query_results: analysis_env.query_results, execution_platform_resolution: node.execution_platform_resolution().clone(), }; @@ -263,41 +275,38 @@ async fn run_analysis_with_env_underlying( analysis_env.execution_platform.dupe(), )?; - let mut profiler_opt = profile_mode - .profile_mode() - .map(|profile_mode| StarlarkProfiler::new(profile_mode.dupe(), true)); + let mut profiler_opt = profile_mode.profile_mode().map(|profile_mode| { + StarlarkProfiler::new( + profile_mode.dupe(), + true, + ProfileTarget::Analysis(node.label().dupe()), + ) + }); let mut profiler = match &mut profiler_opt { - None => StarlarkProfilerOrInstrumentation::disabled(), - Some(profiler) => StarlarkProfilerOrInstrumentation::for_profiler(profiler), + None => StarlarkProfilerOpt::disabled(), + Some(profiler) => StarlarkProfilerOpt::for_profiler(profiler), }; - let (mut eval, ctx, list_res) = with_starlark_eval_provider( + let (dice, mut eval, ctx, list_res) = with_starlark_eval_provider( dice, &mut profiler, format!("analysis:{}", node.label()), |provider, dice| { - let mut eval = provider.make(&env)?; + let (mut eval, _) = provider.make(&env)?; eval.set_print_handler(&print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); - let ctx = env.heap().alloc_typed(AnalysisContext::new( + let ctx = AnalysisContext::prepare( eval.heap(), - attributes, - Some( - eval.heap() - .alloc_typed(StarlarkConfiguredProvidersLabel::new( - ConfiguredProvidersLabel::new( - analysis_env.label, - ProvidersName::Default, - ), - )), - ), - plugins.into(), + Some(attributes), + Some(analysis_env.label), + Some(plugins.into()), registry, dice.global_data().get_digest_config(), - )); + ); - let list_res = analysis_env.impl_function.invoke(&mut eval, ctx)?; + let list_res = analysis_env.rule_spec.invoke(&mut eval, ctx)?; // TODO(cjhopman): This seems quite wrong. This should be happening after run_promises. provider @@ -309,7 +318,7 @@ async fn run_analysis_with_env_underlying( // turn requires those permits). We will actually re-enter a provider scope in the // run_promises call when we get back to resolving the promises (and running the starlark // Promise::map() lambdas). - Ok((eval, ctx, list_res)) + Ok((dice, eval, ctx, list_res)) }, ) .await?; @@ -322,15 +331,24 @@ async fn run_analysis_with_env_underlying( ) .await?; + // Pull the ctx object back out, and steal ctx.action's state back + let analysis_registry = ctx.take_state(); + // TODO: Convert the ValueError from `try_from_value` better than just printing its Debug let res_typed = ProviderCollection::try_from_value(list_res)?; - let res = env.heap().alloc(res_typed); - env.set_extra_value(res); + { + let provider_collection = ValueTypedComplex::new_err(env.heap().alloc(res_typed)) + .internal_error_anyhow("Just allocated provider collection")?; + analysis_registry + .analysis_value_storage + .set_result_value(provider_collection)?; + } - // Pull the ctx object back out, and steal ctx.action's state back - let analysis_registry = ctx.take_state(); - std::mem::drop(eval); - let (frozen_env, deferreds) = analysis_registry.finalize(&env)?(env)?; + drop(eval); + + let declared_actions = analysis_registry.num_declared_actions(); + let declared_artifacts = analysis_registry.num_declared_artifacts(); + let (frozen_env, recorded_values) = analysis_registry.finalize(&env)?(env)?; profiler .visit_frozen_module(Some(&frozen_env)) @@ -338,34 +356,58 @@ async fn run_analysis_with_env_underlying( let profile_data = profiler_opt.map(|p| p.finish()).transpose()?.map(Arc::new); - let res = frozen_env - .owned_extra_value() - .context("extra_value not set (internal error)")?; - let provider_collection = FrozenProviderCollectionValue::try_from_value(res) - .expect("just created this, this shouldn't happen"); + let validations = transitive_validations( + validations_from_deps, + recorded_values.provider_collection()?, + ); - // this could look nicer if we had the entire analysis be a deferred - let deferred = DeferredTable::new(deferreds.take_result()?); Ok(AnalysisResult::new( - provider_collection, - deferred, + recorded_values, profile_data, + HashMap::new(), + declared_actions, + declared_artifacts, + validations, )) } -pub fn get_user_defined_rule_impl( +pub fn transitive_validations( + deps: SmallMap, + provider_collection: FrozenProviderCollectionValueRef, +) -> Option { + let info = provider_collection + .value() + .builtin_provider::(); + if info.is_some() || deps.len() > 1 { + let owned_info = info.map(|x| unsafe { + OwnedFrozenRef::new_unchecked(x.as_ref(), provider_collection.owner().dupe()) + }); + Some(TransitiveValidations(Arc::new(TransitiveValidationsData { + info: owned_info, + children: deps.into_keys().collect(), + }))) + } else { + assert!( + deps.len() <= 1, + "Reuse the single element if any from one of the deps for current node." + ); + deps.into_values().next() + } +} + +pub fn get_user_defined_rule_spec( module: FrozenModule, rule_type: &StarlarkRuleType, -) -> impl RuleImplFunction { +) -> impl RuleSpec { struct Impl { module: FrozenModule, name: String, } - impl RuleImplFunction for Impl { + impl RuleSpec for Impl { fn invoke<'v>( &self, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ctx: ValueTyped<'v, AnalysisContext<'v>>, ) -> anyhow::Result> { let rule_callable = self @@ -378,11 +420,39 @@ pub fn get_user_defined_rule_impl( let rule_callable = rule_callable.owned_value(eval.frozen_heap()); let rule_callable = rule_callable .unpack_frozen() - .context("Must be frozen (internal error)")?; + .internal_error_anyhow("Must be frozen")?; (FROZEN_RULE_GET_IMPL.get()?)(rule_callable)? }; - eval.eval_function(rule_impl.to_value(), &[ctx.to_value()], &[]) + eval.eval_function(rule_impl.0.to_value(), &[ctx.to_value()], &[]) + .map_err(|e| from_starlark(e).into()) + } + + fn promise_artifact_mappings<'v>( + &self, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result>> { + let rule_callable = self + .module + .get_any_visibility(&self.name) + .with_context(|| format!("Couldn't find rule `{}`", self.name))? + .0; + let frozen_promise_artifact_mappings = { + // Need to free up the starlark_ctx borrow before we return + let rule_callable = rule_callable.owned_value(eval.frozen_heap()); + let rule_callable = rule_callable + .unpack_frozen() + .internal_error_anyhow("Must be frozen")?; + + (FROZEN_PROMISE_ARTIFACT_MAPPINGS_GET_IMPL.get()?)(rule_callable)? + }; + + Ok(frozen_promise_artifact_mappings + .iter() + .map(|(frozen_string, frozen_func)| { + (frozen_string.to_string(), frozen_func.to_value()) + }) + .collect::>()) } } diff --git a/app/buck2_analysis/src/analysis/plugins.rs b/app/buck2_analysis/src/analysis/plugins.rs index 8c5cb85266d82..53a4e072f7f20 100644 --- a/app/buck2_analysis/src/analysis/plugins.rs +++ b/app/buck2_analysis/src/analysis/plugins.rs @@ -12,7 +12,7 @@ use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_node::attrs::attr_type::dep::DepAttr; use buck2_node::attrs::attr_type::dep::DepAttrTransition; use buck2_node::attrs::attr_type::dep::DepAttrType; -use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured::ConfiguredTargetNodeRef; use buck2_node::provider_id_set::ProviderIdSet; use dupe::IterDupedExt; use starlark::values::Value; @@ -23,7 +23,7 @@ use crate::attrs::resolve::attr_type::dep::DepAttrTypeExt; use crate::attrs::resolve::ctx::AttrResolutionContext; pub fn plugins_to_starlark_value<'v>( - node: &ConfiguredTargetNode, + node: ConfiguredTargetNodeRef, ctx: &dyn AttrResolutionContext<'v>, ) -> anyhow::Result>> { let mut plugins = SmallMap::new(); diff --git a/app/buck2_analysis/src/attrs/mod.rs b/app/buck2_analysis/src/attrs.rs similarity index 100% rename from app/buck2_analysis/src/attrs/mod.rs rename to app/buck2_analysis/src/attrs.rs diff --git a/app/buck2_analysis/src/attrs/resolve/mod.rs b/app/buck2_analysis/src/attrs/resolve.rs similarity index 100% rename from app/buck2_analysis/src/attrs/resolve/mod.rs rename to app/buck2_analysis/src/attrs/resolve.rs diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/mod.rs b/app/buck2_analysis/src/attrs/resolve/attr_type.rs similarity index 100% rename from app/buck2_analysis/src/attrs/resolve/attr_type/mod.rs rename to app/buck2_analysis/src/attrs/resolve/attr_type.rs diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/arg.rs b/app/buck2_analysis/src/attrs/resolve/attr_type/arg.rs new file mode 100644 index 0000000000000..7fffea4362835 --- /dev/null +++ b/app/buck2_analysis/src/attrs/resolve/attr_type/arg.rs @@ -0,0 +1,192 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use anyhow::Context; +use buck2_artifact::artifact::source_artifact::SourceArtifact; +use buck2_build_api::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; +use buck2_build_api::interpreter::rule_defs::provider::builtin::run_info::RunInfoCallable; +use buck2_build_api::interpreter::rule_defs::provider::builtin::template_placeholder_info::FrozenTemplatePlaceholderInfo; +use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedMacro; +use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedStringWithMacros; +use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedStringWithMacrosPart; +use buck2_core::package::source_path::SourcePath; +use buck2_core::package::PackageLabel; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_node::attrs::attr_type::arg::ConfiguredMacro; +use buck2_node::attrs::attr_type::arg::ConfiguredStringWithMacros; +use buck2_node::attrs::attr_type::arg::ConfiguredStringWithMacrosPart; +use buck2_node::attrs::attr_type::arg::StringWithMacros; +use buck2_node::attrs::attr_type::arg::UnrecognizedMacro; +use dupe::Dupe; +use either::Either; +use starlark::values::Value; + +use crate::attrs::resolve::attr_type::arg::query::ConfiguredQueryMacroBaseExt; +use crate::attrs::resolve::ctx::AttrResolutionContext; + +pub mod query; + +#[derive(Debug, buck2_error::Error)] +enum ResolveMacroError { + #[error( + "The mapping for {0} in the TemplatePlaceholderInfo for {1} was not a dictionary (required because requested arg `{2}`)." + )] + KeyedPlaceholderMappingNotADict(String, ConfiguredProvidersLabel, String), + #[error( + "The mapping for {0} in the TemplatePlaceholderInfo for {1} had no mapping for arg `{2}`." + )] + KeyedPlaceholderArgMissing(String, ConfiguredProvidersLabel, String), + #[error("There was no mapping for {0} in the TemplatePlaceholderInfo for {1}.")] + KeyedPlaceholderMappingMissing(String, ConfiguredProvidersLabel), + #[error( + "Macro `{0}` it not builtin, target `{1}` must provide `TemplatePlaceholderInfo` to resolve it" + )] + KeyedPlaceholderInfoMissing(String, ConfiguredProvidersLabel), + #[error("There was no mapping for {0}.")] + UnkeyedPlaceholderUnresolved(String), + #[error("Expected a RunInfo provider from target `{0}`.")] + ExpectedRunInfo(String), + #[error("Can't expand unrecognized macros (`{0}`).")] + UnrecognizedMacroUnimplemented(String), +} + +pub trait ConfiguredStringWithMacrosExt { + fn resolve<'v>( + &self, + ctx: &dyn AttrResolutionContext<'v>, + pkg: PackageLabel, + ) -> anyhow::Result>; +} + +impl ConfiguredStringWithMacrosExt for ConfiguredStringWithMacros { + fn resolve<'v>( + &self, + ctx: &dyn AttrResolutionContext<'v>, + pkg: PackageLabel, + ) -> anyhow::Result> { + let resolved_parts = match &self.string_with_macros { + StringWithMacros::StringPart(s) => { + vec![ResolvedStringWithMacrosPart::String(s.dupe())] + } + StringWithMacros::ManyParts(ref parts) => { + let mut resolved_parts = Vec::with_capacity(parts.len()); + for part in parts.iter() { + match part { + ConfiguredStringWithMacrosPart::String(s) => { + resolved_parts.push(ResolvedStringWithMacrosPart::String(s.dupe())); + } + ConfiguredStringWithMacrosPart::Macro(write_to_file, m) => { + resolved_parts.push(ResolvedStringWithMacrosPart::Macro( + *write_to_file, + resolve_configured_macro(m, ctx, pkg) + .with_context(|| format!("Error resolving `{}`.", part))?, + )); + } + } + } + resolved_parts + } + }; + + let configured_macros = if self.anon_target_compatible { + Some(self) + } else { + None + }; + + Ok(ctx.heap().alloc(ResolvedStringWithMacros::new( + resolved_parts, + configured_macros, + ))) + } +} + +fn resolve_configured_macro( + configured_macro: &ConfiguredMacro, + ctx: &dyn AttrResolutionContext, + pkg: PackageLabel, +) -> anyhow::Result { + match configured_macro { + ConfiguredMacro::Location(target) => { + let providers_value = ctx.get_dep(target)?; + Ok(ResolvedMacro::Location(providers_value.default_info()?)) + } + ConfiguredMacro::Exe { label, .. } => { + // Don't need to consider exec_dep as it already was applied when configuring the label. + let providers = ctx.get_dep(label)?; + let run_info = match providers.get_provider_raw(RunInfoCallable::provider_id()) { + Some(value) => *value, + None => { + return Err(ResolveMacroError::ExpectedRunInfo(label.to_string()).into()); + } + }; + // A RunInfo is an arg-like value. + Ok(ResolvedMacro::ArgLike(FrozenCommandLineArg::new(run_info)?)) + } + ConfiguredMacro::Source(p) => { + let buck_path = SourcePath::new(pkg.dupe(), p.path().dupe()); + Ok(ResolvedMacro::Source(SourceArtifact::new(buck_path).into())) + } + ConfiguredMacro::UserUnkeyedPlaceholder(name) => { + let provider = ctx.resolve_unkeyed_placeholder(name)?.ok_or_else(|| { + ResolveMacroError::UnkeyedPlaceholderUnresolved((**name).to_owned()) + })?; + Ok(ResolvedMacro::ArgLike(provider)) + } + ConfiguredMacro::UserKeyedPlaceholder(box (name, label, arg)) => { + let providers = ctx.get_dep(label)?; + let placeholder_info = providers + .builtin_provider::() + .ok_or_else(|| { + ResolveMacroError::KeyedPlaceholderInfoMissing( + (**name).to_owned(), + label.dupe(), + ) + })?; + let keyed_variables = placeholder_info.keyed_variables(); + let either_cmd_or_mapping = keyed_variables.get(&**name).ok_or_else(|| { + ResolveMacroError::KeyedPlaceholderMappingMissing( + (**name).to_owned(), + label.to_owned(), + ) + })?; + + let value: FrozenCommandLineArg = match (arg, either_cmd_or_mapping) { + (None, Either::Left(mapping)) => *mapping, + (Some(arg), Either::Left(_)) => { + return Err(ResolveMacroError::KeyedPlaceholderMappingNotADict( + (**name).to_owned(), + label.dupe(), + (**arg).to_owned(), + ) + .into()); + } + (arg, Either::Right(mapping)) => { + let arg = arg.as_deref().unwrap_or("DEFAULT"); + mapping.get(arg).copied().ok_or_else(|| { + ResolveMacroError::KeyedPlaceholderArgMissing( + (**name).to_owned(), + label.dupe(), + arg.to_owned(), + ) + })? + } + }; + + Ok(ResolvedMacro::ArgLike(value)) + } + ConfiguredMacro::Query(query) => Ok(ResolvedMacro::Query(query.resolve(ctx)?)), + ConfiguredMacro::UnrecognizedMacro(box UnrecognizedMacro { + macro_type, + args: _, + }) => Err(anyhow::anyhow!( + ResolveMacroError::UnrecognizedMacroUnimplemented((**macro_type).to_owned()) + )), + } +} diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/arg/mod.rs b/app/buck2_analysis/src/attrs/resolve/attr_type/arg/mod.rs deleted file mode 100644 index 9d21189699b7d..0000000000000 --- a/app/buck2_analysis/src/attrs/resolve/attr_type/arg/mod.rs +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use anyhow::Context; -use buck2_build_api::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; -use buck2_build_api::interpreter::rule_defs::provider::builtin::run_info::RunInfoCallable; -use buck2_build_api::interpreter::rule_defs::provider::builtin::template_placeholder_info::FrozenTemplatePlaceholderInfo; -use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedMacro; -use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedStringWithMacros; -use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedStringWithMacrosPart; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_node::attrs::attr_type::arg::ConfiguredMacro; -use buck2_node::attrs::attr_type::arg::ConfiguredStringWithMacros; -use buck2_node::attrs::attr_type::arg::ConfiguredStringWithMacrosPart; -use buck2_node::attrs::attr_type::arg::StringWithMacros; -use buck2_node::attrs::attr_type::arg::UnrecognizedMacro; -use dupe::Dupe; -use either::Either; -use starlark::values::Value; - -use crate::attrs::resolve::attr_type::arg::query::ConfiguredQueryMacroBaseExt; -use crate::attrs::resolve::ctx::AttrResolutionContext; - -pub mod query; - -#[derive(Debug, thiserror::Error)] -enum ResolveMacroError { - #[error( - "The mapping for {0} in the TemplatePlaceholderInfo for {1} was not a dictionary (required because requested arg `{2}`)." - )] - KeyedPlaceholderMappingNotADict(String, ConfiguredProvidersLabel, String), - #[error( - "The mapping for {0} in the TemplatePlaceholderInfo for {1} had no mapping for arg `{2}`." - )] - KeyedPlaceholderArgMissing(String, ConfiguredProvidersLabel, String), - #[error("There was no mapping for {0} in the TemplatePlaceholderInfo for {1}.")] - KeyedPlaceholderMappingMissing(String, ConfiguredProvidersLabel), - #[error("There was no TemplatePlaceholderInfo for {0}.")] - KeyedPlaceholderInfoMissing(ConfiguredProvidersLabel), - #[error("There was no mapping for {0}.")] - UnkeyedPlaceholderUnresolved(String), - #[error("Expected a RunInfo provider from target `{0}`.")] - ExpectedRunInfo(String), - #[error("Can't expand unrecognized macros (`{0}`).")] - UnrecognizedMacroUnimplemented(String), -} - -pub trait ConfiguredStringWithMacrosExt { - fn resolve<'v>(&self, ctx: &dyn AttrResolutionContext<'v>) -> anyhow::Result>; -} - -impl ConfiguredStringWithMacrosExt for ConfiguredStringWithMacros { - fn resolve<'v>(&self, ctx: &dyn AttrResolutionContext<'v>) -> anyhow::Result> { - let resolved_parts = match &self.string_with_macros { - StringWithMacros::StringPart(s) => { - vec![ResolvedStringWithMacrosPart::String(s.dupe())] - } - StringWithMacros::ManyParts(ref parts) => { - let mut resolved_parts = Vec::with_capacity(parts.len()); - for part in parts.iter() { - match part { - ConfiguredStringWithMacrosPart::String(s) => { - resolved_parts.push(ResolvedStringWithMacrosPart::String(s.dupe())); - } - ConfiguredStringWithMacrosPart::Macro(write_to_file, m) => { - resolved_parts.push(ResolvedStringWithMacrosPart::Macro( - *write_to_file, - resolve_configured_macro(m, ctx) - .with_context(|| format!("Error resolving `{}`.", part))?, - )); - } - } - } - resolved_parts - } - }; - - let configured_macros = if self.anon_target_compatible { - Some(self) - } else { - None - }; - - Ok(ctx.heap().alloc(ResolvedStringWithMacros::new( - resolved_parts, - configured_macros, - ))) - } -} - -fn resolve_configured_macro( - configured_macro: &ConfiguredMacro, - ctx: &dyn AttrResolutionContext, -) -> anyhow::Result { - match configured_macro { - ConfiguredMacro::Location(target) => { - let providers_value = ctx.get_dep(target)?; - let providers = providers_value.provider_collection(); - Ok(ResolvedMacro::Location(providers.default_info())) - } - ConfiguredMacro::Exe { label, .. } => { - // Don't need to consider exec_dep as it already was applied when configuring the label. - let providers_value = ctx.get_dep(label)?; - let providers = providers_value.provider_collection(); - let run_info = match providers.get_provider_raw(RunInfoCallable::provider_id()) { - Some(value) => *value, - None => { - return Err(ResolveMacroError::ExpectedRunInfo(label.to_string()).into()); - } - }; - // A RunInfo is an arg-like value. - Ok(ResolvedMacro::ArgLike(FrozenCommandLineArg::new(run_info)?)) - } - ConfiguredMacro::UserUnkeyedPlaceholder(name) => { - let provider = ctx.resolve_unkeyed_placeholder(name)?.ok_or_else(|| { - ResolveMacroError::UnkeyedPlaceholderUnresolved((**name).to_owned()) - })?; - Ok(ResolvedMacro::ArgLike(provider)) - } - ConfiguredMacro::UserKeyedPlaceholder(box (name, label, arg)) => { - let providers = ctx.get_dep(label)?; - let placeholder_info = providers - .provider_collection() - .builtin_provider::() - .ok_or_else(|| ResolveMacroError::KeyedPlaceholderInfoMissing(label.clone()))?; - let keyed_variables = placeholder_info.keyed_variables(); - let either_cmd_or_mapping = keyed_variables.get(&**name).ok_or_else(|| { - ResolveMacroError::KeyedPlaceholderMappingMissing( - (**name).to_owned(), - label.to_owned(), - ) - })?; - - let value: FrozenCommandLineArg = match (arg, either_cmd_or_mapping) { - (None, Either::Left(mapping)) => *mapping, - (Some(arg), Either::Left(_)) => { - return Err(ResolveMacroError::KeyedPlaceholderMappingNotADict( - (**name).to_owned(), - label.clone(), - (**arg).to_owned(), - ) - .into()); - } - (arg, Either::Right(mapping)) => { - let arg = arg.as_deref().unwrap_or("DEFAULT"); - mapping.get(arg).copied().ok_or_else(|| { - ResolveMacroError::KeyedPlaceholderArgMissing( - (**name).to_owned(), - label.clone(), - arg.to_owned(), - ) - })? - } - }; - - Ok(ResolvedMacro::ArgLike(value)) - } - ConfiguredMacro::Query(query) => Ok(ResolvedMacro::Query(query.resolve(ctx)?)), - ConfiguredMacro::UnrecognizedMacro(box UnrecognizedMacro { - macro_type, - args: _, - }) => Err(anyhow::anyhow!( - ResolveMacroError::UnrecognizedMacroUnimplemented((**macro_type).to_owned()) - )), - } -} diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/arg/query.rs b/app/buck2_analysis/src/attrs/resolve/attr_type/arg/query.rs index 9db889cac1f08..c56e869a2effe 100644 --- a/app/buck2_analysis/src/attrs/resolve/attr_type/arg/query.rs +++ b/app/buck2_analysis/src/attrs/resolve/attr_type/arg/query.rs @@ -16,7 +16,6 @@ use buck2_node::attrs::attr_type::arg::QueryExpansion; use buck2_node::attrs::attr_type::query::QueryMacroBase; use dupe::Dupe; -use crate::attrs::resolve::attr_type::query::ConfiguredQueryAttrBaseExt; use crate::attrs::resolve::ctx::AnalysisQueryResult; use crate::attrs::resolve::ctx::AttrResolutionContext; @@ -26,23 +25,25 @@ pub(crate) trait ConfiguredQueryMacroBaseExt { impl ConfiguredQueryMacroBaseExt for QueryMacroBase { fn resolve(&self, ctx: &dyn AttrResolutionContext) -> anyhow::Result { - let query_result: Arc = self.query.resolve(ctx)?; + let query_result: Arc = ctx.resolve_query(&self.query.query)?; match &self.expansion_type { QueryExpansion::Output => Ok(ResolvedQueryMacro::Outputs( query_result + .result .iter() .map(|(_, providers)| { - providers + Ok(providers .provider_collection() - .default_info() + .default_info()? .default_outputs() - .into_boxed_slice() + .into_boxed_slice()) }) - .collect(), + .collect::>()?, )), QueryExpansion::Target => Ok(ResolvedQueryMacro::Targets( query_result + .result .iter() .map(|(target, _)| target.dupe()) .collect(), @@ -55,18 +56,19 @@ impl ConfiguredQueryMacroBaseExt for QueryMacroBase { None => " ".to_owned().into_boxed_str(), }, list: query_result + .result .iter() .map(|(target, providers)| { - ( + Ok(( target.dupe(), providers .provider_collection() - .default_info() + .default_info()? .default_outputs() .into_boxed_slice(), - ) + )) }) - .collect(), + .collect::>()?, }, ))) } diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/configuration_dep.rs b/app/buck2_analysis/src/attrs/resolve/attr_type/configuration_dep.rs index c1289597faf8f..a087dafcbcbe1 100644 --- a/app/buck2_analysis/src/attrs/resolve/attr_type/configuration_dep.rs +++ b/app/buck2_analysis/src/attrs/resolve/attr_type/configuration_dep.rs @@ -7,8 +7,8 @@ * of this source tree. */ -use buck2_core::target::label::TargetLabel; use buck2_node::attrs::attr_type::configuration_dep::ConfigurationDepAttrType; +use buck2_node::configuration::resolved::ConfigurationSettingKey; use starlark::values::Value; use crate::attrs::resolve::ctx::AttrResolutionContext; @@ -16,7 +16,7 @@ use crate::attrs::resolve::ctx::AttrResolutionContext; pub(crate) trait ConfigurationDepAttrTypeExt { fn resolve_single<'v>( ctx: &dyn AttrResolutionContext<'v>, - label: &TargetLabel, + label: &ConfigurationSettingKey, ) -> anyhow::Result> { Ok(ctx.heap().alloc(label.to_string())) } diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/dep.rs b/app/buck2_analysis/src/attrs/resolve/attr_type/dep.rs index 8a4a400367e6a..8bec6e6b4fdee 100644 --- a/app/buck2_analysis/src/attrs/resolve/attr_type/dep.rs +++ b/app/buck2_analysis/src/attrs/resolve/attr_type/dep.rs @@ -8,7 +8,6 @@ */ use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; -use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; use buck2_build_api::interpreter::rule_defs::provider::dependency::Dependency; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::provider::label::ConfiguredProvidersLabel; @@ -19,12 +18,12 @@ use buck2_node::attrs::attr_type::dep::DepAttrTransition; use buck2_node::attrs::attr_type::dep::DepAttrType; use buck2_node::provider_id_set::ProviderIdSet; use starlark::environment::Module; +use starlark::values::FrozenValueTyped; use starlark::values::Value; -use thiserror::Error; use crate::attrs::resolve::ctx::AttrResolutionContext; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ResolutionError { #[error("required provider `{0}` was not found on `{1}`. Found these providers: {}", .2.join(", "))] MissingRequiredProvider(String, ConfiguredProvidersLabel, Vec), @@ -40,7 +39,7 @@ pub trait DepAttrTypeExt { fn alloc_dependency<'v>( env: &'v Module, target: &ConfiguredProvidersLabel, - v: &FrozenProviderCollectionValue, + v: FrozenValueTyped<'v, FrozenProviderCollection>, execution_platform_resolution: Option<&ExecutionPlatformResolution>, ) -> Value<'v>; @@ -79,13 +78,13 @@ impl DepAttrTypeExt for DepAttrType { fn alloc_dependency<'v>( env: &'v Module, target: &ConfiguredProvidersLabel, - v: &FrozenProviderCollectionValue, + v: FrozenValueTyped<'v, FrozenProviderCollection>, execution_platform_resolution: Option<&ExecutionPlatformResolution>, ) -> Value<'v> { env.heap().alloc(Dependency::new( env.heap(), target.clone(), - v.value().owned_value(env.frozen_heap()), + v, execution_platform_resolution, )) } @@ -96,9 +95,8 @@ impl DepAttrTypeExt for DepAttrType { required_providers: &ProviderIdSet, is_exec_dep: bool, ) -> anyhow::Result> { - let v = ctx.get_dep(target)?; - let provider_collection = v.provider_collection(); - Self::check_providers(required_providers, provider_collection, target)?; + let provider_collection = ctx.get_dep(target)?; + Self::check_providers(required_providers, provider_collection.as_ref(), target)?; let execution_platform_resolution = if is_exec_dep { Some(ctx.execution_platform_resolution()) } else { @@ -108,7 +106,7 @@ impl DepAttrTypeExt for DepAttrType { Ok(Self::alloc_dependency( ctx.starlark_module(), target, - &v, + provider_collection, execution_platform_resolution, )) } diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/query.rs b/app/buck2_analysis/src/attrs/resolve/attr_type/query.rs index f94752fc6ea93..899ce8870e0e8 100644 --- a/app/buck2_analysis/src/attrs/resolve/attr_type/query.rs +++ b/app/buck2_analysis/src/attrs/resolve/attr_type/query.rs @@ -7,41 +7,26 @@ * of this source tree. */ -use std::sync::Arc; - -use buck2_common::result::SharedResult; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersName; use buck2_node::attrs::attr_type::dep::DepAttrType; use buck2_node::attrs::attr_type::query::QueryAttr; -use buck2_node::attrs::attr_type::query::QueryAttrBase; use dupe::Dupe; use starlark::values::Value; use crate::attrs::resolve::attr_type::dep::DepAttrTypeExt; -use crate::attrs::resolve::ctx::AnalysisQueryResult; use crate::attrs::resolve::ctx::AttrResolutionContext; -pub(crate) trait ConfiguredQueryAttrBaseExt { - fn resolve(&self, ctx: &dyn AttrResolutionContext) -> SharedResult>; -} - -impl ConfiguredQueryAttrBaseExt for QueryAttrBase { - fn resolve(&self, ctx: &dyn AttrResolutionContext) -> SharedResult> { - ctx.resolve_query(&self.query) - } -} - pub(crate) trait ConfiguredQueryAttrExt { fn resolve<'v>(&self, ctx: &dyn AttrResolutionContext<'v>) -> anyhow::Result>; } impl ConfiguredQueryAttrExt for QueryAttr { fn resolve<'v>(&self, ctx: &dyn AttrResolutionContext<'v>) -> anyhow::Result> { - let query_results = self.query.resolve(ctx)?; + let query_results = ctx.resolve_query(&self.query.query)?; let mut dependencies = Vec::new(); - for (target, providers) in &*query_results { + for (target, providers) in &query_results.result { let providers_label = ConfiguredProvidersLabel::new(target.dupe(), ProvidersName::Default); if !self.providers.is_empty() { @@ -56,7 +41,7 @@ impl ConfiguredQueryAttrExt for QueryAttr { dependencies.push(DepAttrType::alloc_dependency( ctx.starlark_module(), &providers_label, - providers, + providers.add_heap_ref(ctx.starlark_module().frozen_heap()), None, )); } diff --git a/app/buck2_analysis/src/attrs/resolve/attr_type/source.rs b/app/buck2_analysis/src/attrs/resolve/attr_type/source.rs index dcfd2bceaf8cc..8e45dd8e05ac7 100644 --- a/app/buck2_analysis/src/attrs/resolve/attr_type/source.rs +++ b/app/buck2_analysis/src/attrs/resolve/attr_type/source.rs @@ -8,8 +8,8 @@ */ use buck2_artifact::artifact::source_artifact::SourceArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; -use buck2_core::buck_path::path::BuckPath; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use buck2_core::package::source_path::SourcePath; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_node::attrs::attr_type::source::SourceAttrType; use starlark::values::list::ListRef; @@ -17,14 +17,14 @@ use starlark::values::Value; use crate::attrs::resolve::ctx::AttrResolutionContext; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum SourceLabelResolutionError { #[error("Expected a single artifact from {0}, but it returned {1} artifacts")] ExpectedSingleValue(String, usize), } pub(crate) trait SourceAttrTypeExt { - fn resolve_single_file<'v>(ctx: &dyn AttrResolutionContext<'v>, path: BuckPath) -> Value<'v> { + fn resolve_single_file<'v>(ctx: &dyn AttrResolutionContext<'v>, path: SourcePath) -> Value<'v> { ctx.heap() .alloc(StarlarkArtifact::new(SourceArtifact::new(path).into())) } @@ -34,10 +34,7 @@ pub(crate) trait SourceAttrTypeExt { label: &ConfiguredProvidersLabel, ) -> anyhow::Result>> { let dep = ctx.get_dep(label)?; - let default_outputs = dep - .provider_collection() - .default_info() - .default_outputs_raw(); + let default_outputs = dep.default_info()?.default_outputs_raw(); let res = ListRef::from_frozen_value(default_outputs) .unwrap() .iter() diff --git a/app/buck2_analysis/src/attrs/resolve/configured_attr.rs b/app/buck2_analysis/src/attrs/resolve/configured_attr.rs index 0c6cf72f31552..3a3ee5ebf5b75 100644 --- a/app/buck2_analysis/src/attrs/resolve/configured_attr.rs +++ b/app/buck2_analysis/src/attrs/resolve/configured_attr.rs @@ -9,10 +9,14 @@ use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; +use buck2_build_api::actions::query::PackageLabelOption; +use buck2_build_api::actions::query::CONFIGURED_ATTR_TO_VALUE; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_build_api::interpreter::rule_defs::provider::dependency::DependencyGen; -use buck2_core::buck_path::path::BuckPath; +use buck2_core::package::package_relative_path::PackageRelativePath; +use buck2_core::package::source_path::SourcePath; use buck2_core::package::PackageLabel; +use buck2_error::starlark_error::from_starlark; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use buck2_interpreter::types::opaque_metadata::OpaqueMetadata; use buck2_interpreter::types::target_label::StarlarkTargetLabel; @@ -25,6 +29,7 @@ use buck2_node::attrs::configured_attr::ConfiguredAttr; use buck2_node::visibility::VisibilityPatternList; use buck2_node::visibility::VisibilitySpecification; use buck2_node::visibility::WithinViewSpecification; +use buck2_util::arc_str::ArcS; use dupe::Dupe; use gazebo::prelude::SliceExt; use starlark::values::dict::Dict; @@ -47,6 +52,12 @@ use crate::attrs::resolve::attr_type::source::SourceAttrTypeExt; use crate::attrs::resolve::attr_type::split_transition_dep::SplitTransitionDepAttrTypeExt; use crate::attrs::resolve::ctx::AttrResolutionContext; +#[derive(Debug, buck2_error::Error)] +enum ConfiguredAttrError { + #[error("Source path `{0}` cannot be used in attributes referenced in transition")] + SourceFileToStarlarkValue(ArcS), +} + pub trait ConfiguredAttrExt { fn resolve<'v>( &self, @@ -62,7 +73,7 @@ pub trait ConfiguredAttrExt { fn starlark_type(&self) -> anyhow::Result<&'static str>; - fn to_value<'v>(&self, pkg: PackageLabel, heap: &'v Heap) -> anyhow::Result>; + fn to_value<'v>(&self, pkg: PackageLabelOption, heap: &'v Heap) -> anyhow::Result>; } impl ConfiguredAttrExt for ConfiguredAttr { @@ -80,6 +91,8 @@ impl ConfiguredAttrExt for ConfiguredAttr { match self { // SourceLabel is special since it is the only type that can be expand to many ConfiguredAttr::SourceLabel(src) => SourceAttrType::resolve_label(ctx, src), + // OneOf could contain a SourceLabel + ConfiguredAttr::OneOf(box l, _) => l.resolve(pkg, ctx), _ => Ok(vec![self.resolve_single(pkg, ctx)?]), } } @@ -100,14 +113,14 @@ impl ConfiguredAttrExt for ConfiguredAttr { ConfiguredAttr::List(list) => { let mut values = Vec::with_capacity(list.len()); for v in list.iter() { - values.append(&mut v.resolve(pkg.dupe(), ctx)?); + values.append(&mut v.resolve(pkg, ctx)?); } Ok(ctx.heap().alloc(values)) } ConfiguredAttr::Tuple(list) => { let mut values = Vec::with_capacity(list.len()); for v in list.iter() { - values.append(&mut v.resolve(pkg.dupe(), ctx)?); + values.push(v.resolve_single(pkg, ctx)?); } Ok(ctx.heap().alloc(AllocTuple(values))) } @@ -115,8 +128,10 @@ impl ConfiguredAttrExt for ConfiguredAttr { let mut res = SmallMap::with_capacity(dict.len()); for (k, v) in dict.iter() { res.insert_hashed( - k.resolve_single(pkg.dupe(), ctx)?.get_hashed()?, - v.resolve_single(pkg.dupe(), ctx)?, + k.resolve_single(pkg, ctx)? + .get_hashed() + .map_err(from_starlark)?, + v.resolve_single(pkg, ctx)?, ); } Ok(ctx.heap().alloc(Dict::new(res))) @@ -126,7 +141,7 @@ impl ConfiguredAttrExt for ConfiguredAttr { a @ (ConfiguredAttr::Visibility(_) | ConfiguredAttr::WithinView(_)) => { // TODO(nga): rule implementations should not need visibility attribute. // But adding it here to preserve existing behavior. - a.to_value(pkg, ctx.heap()) + configured_attr_to_value(a, PackageLabelOption::PackageLabel(pkg), ctx.heap()) } ConfiguredAttr::ExplicitConfiguredDep(d) => { ExplicitConfiguredDepAttrType::resolve_single(ctx, d.as_ref()) @@ -135,22 +150,23 @@ impl ConfiguredAttrExt for ConfiguredAttr { SplitTransitionDepAttrType::resolve_single(ctx, d.as_ref()) } ConfiguredAttr::ConfigurationDep(d) => ConfigurationDepAttrType::resolve_single(ctx, d), - ConfiguredAttr::PluginDep(d) => { - Ok(ctx.heap().alloc(StarlarkTargetLabel::new(d.0.dupe()))) + ConfiguredAttr::PluginDep(d, _) => { + Ok(ctx.heap().alloc(StarlarkTargetLabel::new(d.dupe()))) } ConfiguredAttr::Dep(d) => DepAttrType::resolve_single(ctx, d), ConfiguredAttr::SourceLabel(s) => SourceAttrType::resolve_single_label(ctx, s), ConfiguredAttr::Label(label) => { - let label = StarlarkConfiguredProvidersLabel::new(*label.clone()); + let label = StarlarkConfiguredProvidersLabel::new(label.dupe()); Ok(ctx.heap().alloc(label)) } - ConfiguredAttr::Arg(arg) => arg.resolve(ctx), + ConfiguredAttr::Arg(arg) => arg.resolve(ctx, pkg), ConfiguredAttr::Query(query) => query.resolve(ctx), ConfiguredAttr::SourceFile(s) => Ok(SourceAttrType::resolve_single_file( ctx, - BuckPath::new(pkg.dupe(), s.path().dupe()), + SourcePath::new(pkg, s.path().dupe()), )), ConfiguredAttr::Metadata(..) => Ok(ctx.heap().alloc(OpaqueMetadata)), + ConfiguredAttr::TargetModifiers(..) => Ok(ctx.heap().alloc(OpaqueMetadata)), } } @@ -174,7 +190,7 @@ impl ConfiguredAttrExt for ConfiguredAttr { } ConfiguredAttr::SplitTransitionDep(_) => Ok(Dict::TYPE), ConfiguredAttr::ConfigurationDep(_) => Ok(starlark::values::string::STRING_TYPE), - ConfiguredAttr::PluginDep(_) => { + ConfiguredAttr::PluginDep(..) => { Ok(StarlarkTargetLabel::get_type_value_static().as_str()) } ConfiguredAttr::Dep(_) => { @@ -190,76 +206,101 @@ impl ConfiguredAttrExt for ConfiguredAttr { ConfiguredAttr::Query(_) => Ok(starlark::values::string::STRING_TYPE), ConfiguredAttr::SourceFile(_) => Ok(StarlarkArtifact::get_type_value_static().as_str()), ConfiguredAttr::Metadata(..) => Ok(OpaqueMetadata::get_type_value_static().as_str()), + ConfiguredAttr::TargetModifiers(..) => { + Ok(OpaqueMetadata::get_type_value_static().as_str()) + } } } /// Converts the configured attr to a starlark value without fully resolving - fn to_value<'v>(&self, pkg: PackageLabel, heap: &'v Heap) -> anyhow::Result> { - Ok(match &self { - ConfiguredAttr::Bool(v) => heap.alloc(v.0), - ConfiguredAttr::Int(v) => heap.alloc(*v), - ConfiguredAttr::String(s) | ConfiguredAttr::EnumVariant(s) => heap.alloc(s.as_str()), - ConfiguredAttr::List(list) => { - heap.alloc(list.try_map(|v| v.to_value(pkg.dupe(), heap))?) - } - ConfiguredAttr::Tuple(v) => { - heap.alloc(AllocTuple(v.try_map(|v| v.to_value(pkg.dupe(), heap))?)) - } - ConfiguredAttr::Dict(map) => { - let mut res = SmallMap::with_capacity(map.len()); + fn to_value<'v>(&self, pkg: PackageLabelOption, heap: &'v Heap) -> anyhow::Result> { + configured_attr_to_value(self, pkg, heap) + } +} - for (k, v) in map.iter() { - res.insert_hashed( - k.to_value(pkg.dupe(), heap)?.get_hashed()?, - v.to_value(pkg.dupe(), heap)?, - ); - } +fn configured_attr_to_value<'v>( + this: &ConfiguredAttr, + pkg: PackageLabelOption, + heap: &'v Heap, +) -> anyhow::Result> { + Ok(match this { + ConfiguredAttr::Bool(v) => heap.alloc(v.0), + ConfiguredAttr::Int(v) => heap.alloc(*v), + ConfiguredAttr::String(s) | ConfiguredAttr::EnumVariant(s) => heap.alloc(s.as_str()), + ConfiguredAttr::List(list) => { + heap.alloc(list.try_map(|v| configured_attr_to_value(&v, pkg, heap))?) + } + ConfiguredAttr::Tuple(v) => heap.alloc(AllocTuple( + v.try_map(|v| configured_attr_to_value(&v, pkg, heap))?, + )), + ConfiguredAttr::Dict(map) => { + let mut res = SmallMap::with_capacity(map.len()); - heap.alloc(Dict::new(res)) + for (k, v) in map.iter() { + res.insert_hashed( + configured_attr_to_value(&k, pkg, heap)? + .get_hashed() + .map_err(from_starlark)?, + configured_attr_to_value(&v, pkg, heap)?, + ); } - ConfiguredAttr::None => Value::new_none(), - ConfiguredAttr::OneOf(box l, _) => l.to_value(pkg, heap)?, - ConfiguredAttr::Visibility(VisibilitySpecification(specs)) - | ConfiguredAttr::WithinView(WithinViewSpecification(specs)) => match specs { - VisibilityPatternList::Public => heap.alloc(AllocList(["PUBLIC"])), - VisibilityPatternList::List(specs) => { - heap.alloc(AllocList(specs.iter().map(|s| s.to_string()))) - } - }, - ConfiguredAttr::ExplicitConfiguredDep(d) => heap.alloc( - StarlarkConfiguredProvidersLabel::new(d.as_ref().label.clone()), - ), - ConfiguredAttr::SplitTransitionDep(t) => { - let mut map = SmallMap::with_capacity(t.deps.len()); - for (trans, p) in t.deps.iter() { - map.insert_hashed( - heap.alloc(trans).get_hashed()?, - heap.alloc(StarlarkConfiguredProvidersLabel::new(p.clone())), - ); - } - - heap.alloc(Dict::new(map)) - } - ConfiguredAttr::ConfigurationDep(c) => { - heap.alloc(StarlarkTargetLabel::new(c.as_ref().dupe())) + heap.alloc(Dict::new(res)) + } + ConfiguredAttr::None => Value::new_none(), + ConfiguredAttr::OneOf(box l, _) => configured_attr_to_value(&l, pkg, heap)?, + ConfiguredAttr::Visibility(VisibilitySpecification(specs)) + | ConfiguredAttr::WithinView(WithinViewSpecification(specs)) => match specs { + VisibilityPatternList::Public => heap.alloc(AllocList(["PUBLIC"])), + VisibilityPatternList::List(specs) => { + heap.alloc(AllocList(specs.iter().map(|s| s.to_string()))) } - ConfiguredAttr::PluginDep(d) => heap.alloc(StarlarkTargetLabel::new(d.0.dupe())), - ConfiguredAttr::Dep(d) => { - heap.alloc(StarlarkConfiguredProvidersLabel::new(d.label.clone())) + }, + ConfiguredAttr::ExplicitConfiguredDep(d) => heap.alloc( + StarlarkConfiguredProvidersLabel::new(d.as_ref().label.dupe()), + ), + ConfiguredAttr::SplitTransitionDep(t) => { + let mut map = SmallMap::with_capacity(t.deps.len()); + + for (trans, p) in t.deps.iter() { + map.insert_hashed( + heap.alloc(trans).get_hashed().map_err(from_starlark)?, + heap.alloc(StarlarkConfiguredProvidersLabel::new(p.dupe())), + ); } - ConfiguredAttr::SourceLabel(s) => { - heap.alloc(StarlarkConfiguredProvidersLabel::new(*s.clone())) + + heap.alloc(Dict::new(map)) + } + ConfiguredAttr::ConfigurationDep(c) => heap.alloc(StarlarkTargetLabel::new(c.0.dupe())), + ConfiguredAttr::PluginDep(d, _) => heap.alloc(StarlarkTargetLabel::new(d.dupe())), + ConfiguredAttr::Dep(d) => heap.alloc(StarlarkConfiguredProvidersLabel::new(d.label.dupe())), + ConfiguredAttr::SourceLabel(s) => { + heap.alloc(StarlarkConfiguredProvidersLabel::new(s.dupe())) + } + ConfiguredAttr::Label(l) => heap.alloc(StarlarkConfiguredProvidersLabel::new(l.dupe())), + ConfiguredAttr::Arg(arg) => heap.alloc(arg.to_string()), + ConfiguredAttr::Query(query) => heap.alloc(&query.query.query), + ConfiguredAttr::SourceFile(f) => match pkg { + PackageLabelOption::PackageLabel(pkg) => { + heap.alloc(StarlarkArtifact::new(Artifact::from(SourceArtifact::new( + SourcePath::new(pkg.to_owned(), f.path().dupe()), + )))) } - ConfiguredAttr::Label(l) => { - heap.alloc(StarlarkConfiguredProvidersLabel::new(*l.clone())) + // We don't store package label in transition key for better caching of transition between packages. + // (This is not inherent requirement, + // but it was easier to implement this ways, + // and probably transitions do not need access to sources anyway). + // So package label is not available. If the need arises, we can store package label along with source attributes. + // TODO(romanp): add earlier check during rule function construction to prevent using source attributes in transitions. + PackageLabelOption::TransitionAttr => { + return Err(ConfiguredAttrError::SourceFileToStarlarkValue(f.path().dupe()).into()); } - ConfiguredAttr::Arg(arg) => heap.alloc(arg.to_string()), - ConfiguredAttr::Query(query) => heap.alloc(query.query.query()), - ConfiguredAttr::SourceFile(f) => heap.alloc(StarlarkArtifact::new(Artifact::from( - SourceArtifact::new(BuckPath::new(pkg.to_owned(), f.path().dupe())), - ))), - ConfiguredAttr::Metadata(..) => heap.alloc(OpaqueMetadata), - }) - } + }, + ConfiguredAttr::Metadata(data) => heap.alloc(data.to_value()), + ConfiguredAttr::TargetModifiers(data) => heap.alloc(data.to_value()), + }) +} + +pub(crate) fn init_configured_attr_to_value() { + CONFIGURED_ATTR_TO_VALUE.init(configured_attr_to_value); } diff --git a/app/buck2_analysis/src/attrs/resolve/ctx.rs b/app/buck2_analysis/src/attrs/resolve/ctx.rs index db02f976273e5..ccdb1d7f05376 100644 --- a/app/buck2_analysis/src/attrs/resolve/ctx.rs +++ b/app/buck2_analysis/src/attrs/resolve/ctx.rs @@ -10,15 +10,25 @@ use std::sync::Arc; use buck2_build_api::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; -use buck2_common::result::SharedResult; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use starlark::environment::Module; +use starlark::values::FrozenValueTyped; use starlark::values::Heap; -pub type AnalysisQueryResult = Vec<(ConfiguredTargetLabel, FrozenProviderCollectionValue)>; +/// Result of query evaluation from queries referenced in target nodes. +/// +/// Queries are: +/// * `attrs.query()` queries +/// * macro queries like `$(query_outputs ...)` +pub struct AnalysisQueryResult { + // TODO(nga): we perform analysis even when providers are not needed, + // for example, `$(query_targets ...)` only needs target labels. + pub result: Vec<(ConfiguredTargetLabel, FrozenProviderCollectionValue)>, +} /// The context for attribute resolution. Provides access to the providers from /// dependents. @@ -34,7 +44,7 @@ pub trait AttrResolutionContext<'v> { fn get_dep( &self, target: &ConfiguredProvidersLabel, - ) -> anyhow::Result; + ) -> anyhow::Result>; fn resolve_unkeyed_placeholder( &self, @@ -43,7 +53,7 @@ pub trait AttrResolutionContext<'v> { /// Provides the result of the query. This will only provide results for queries that are reported during the configured attr traversal. // TODO(cjhopman): Ideally, we wouldn't need to split query attr resolution in this way, but processing queries is an async operation and the starlark Heap cannot be used in async code. - fn resolve_query(&self, query: &str) -> SharedResult>; + fn resolve_query(&self, query: &str) -> buck2_error::Result>; fn execution_platform_resolution(&self) -> &ExecutionPlatformResolution; } diff --git a/app/buck2_analysis/src/attrs/resolve/node_to_attrs_struct.rs b/app/buck2_analysis/src/attrs/resolve/node_to_attrs_struct.rs index 6eee0b1ee7291..8fd275ce33643 100644 --- a/app/buck2_analysis/src/attrs/resolve/node_to_attrs_struct.rs +++ b/app/buck2_analysis/src/attrs/resolve/node_to_attrs_struct.rs @@ -8,22 +8,26 @@ */ use buck2_node::attrs::inspect_options::AttrInspectOptions; -use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured::ConfiguredTargetNodeRef; use starlark::values::structs::AllocStruct; -use starlark::values::Value; +use starlark::values::structs::StructRef; +use starlark::values::ValueOfUnchecked; use crate::attrs::resolve::configured_attr::ConfiguredAttrExt; use crate::attrs::resolve::ctx::AttrResolutionContext; /// Prepare `ctx.attrs` for rule impl. pub(crate) fn node_to_attrs_struct<'v>( - node: &ConfiguredTargetNode, + node: ConfiguredTargetNodeRef, ctx: &dyn AttrResolutionContext<'v>, -) -> anyhow::Result> { +) -> anyhow::Result>> { let attrs_iter = node.attrs(AttrInspectOptions::All); let mut resolved_attrs = Vec::with_capacity(attrs_iter.size_hint().0); for a in attrs_iter { resolved_attrs.push((a.name, a.value.resolve_single(node.label().pkg(), ctx)?)); } - Ok(ctx.heap().alloc(AllocStruct(resolved_attrs))) + Ok(ctx + .heap() + .alloc_typed_unchecked(AllocStruct(resolved_attrs)) + .cast()) } diff --git a/app/buck2_analysis/src/lib.rs b/app/buck2_analysis/src/lib.rs index 566beeeb469e2..9526893f7afb2 100644 --- a/app/buck2_analysis/src/lib.rs +++ b/app/buck2_analysis/src/lib.rs @@ -7,7 +7,7 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(box_patterns)] #![feature(try_blocks)] @@ -15,5 +15,6 @@ pub mod analysis; pub mod attrs; pub fn init_late_bindings() { + attrs::resolve::configured_attr::init_configured_attr_to_value(); analysis::calculation::init_rule_analysis_calculation(); } diff --git a/app/buck2_anon_target/BUCK b/app/buck2_anon_target/BUCK index f25ff54e9f9e9..d302159adbcf4 100644 --- a/app/buck2_anon_target/BUCK +++ b/app/buck2_anon_target/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -12,31 +11,27 @@ rust_library( "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:either", "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", - "fbsource//third-party/rust:thiserror", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_analysis:buck2_analysis", "//buck2/app/buck2_artifact:buck2_artifact", "//buck2/app/buck2_build_api:buck2_build_api", - "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_configured:buck2_configured", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", "//buck2/app/buck2_node:buck2_node", - "//buck2/app/buck2_query:buck2_query", - "//buck2/app/buck2_query_parser:buck2_query_parser", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", "//buck2/gazebo/cmp_any:cmp_any", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_map:starlark_map", ], diff --git a/app/buck2_anon_target/Cargo.toml b/app/buck2_anon_target/Cargo.toml index e5d32d72cd427..391d5cfb152ae 100644 --- a/app/buck2_anon_target/Cargo.toml +++ b/app/buck2_anon_target/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Anonymous targets" +edition = "2021" +license = { workspace = true } name = "buck2_anon_target" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Anonymous targets" [dependencies] anyhow = { workspace = true } @@ -10,32 +12,28 @@ async-trait = { workspace = true } derive_more = { workspace = true } either = { workspace = true } futures = { workspace = true } -once_cell = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -thiserror = { workspace = true } allocative = { workspace = true } +cmp_any = { workspace = true } dice = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } -cmp_any = { workspace = true } -more_futures = { workspace = true } starlark = { workspace = true } starlark_map = { workspace = true } buck2_analysis = { workspace = true } buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } -buck2_common = { workspace = true } +buck2_configured = { workspace = true } +buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_execute = { workspace = true } -buck2_configured = { workspace = true } -buck2_core = { workspace = true } +buck2_futures = { workspace = true } buck2_interpreter = { workspace = true } buck2_interpreter_for_build = { workspace = true } buck2_node = { workspace = true } -buck2_query = { workspace = true } -buck2_query_parser = { workspace = true } buck2_util = { workspace = true } diff --git a/app/buck2_anon_target/src/anon_promises.rs b/app/buck2_anon_target/src/anon_promises.rs index 913fc749fedbb..00b0a9ded03dd 100644 --- a/app/buck2_anon_target/src/anon_promises.rs +++ b/app/buck2_anon_target/src/anon_promises.rs @@ -11,11 +11,11 @@ use allocative::Allocative; use async_trait::async_trait; use buck2_build_api::analysis::anon_promises_dyn::AnonPromisesDyn; use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; use buck2_interpreter::starlark_promise::StarlarkPromise; use dice::DiceComputations; use either::Either; -use futures::future; +use futures::FutureExt; use starlark::eval::Evaluator; use starlark::values::list::AllocList; use starlark::values::Trace; @@ -51,8 +51,8 @@ impl<'v> AnonPromises<'v> { impl<'v> AnonPromisesDyn<'v> for AnonPromises<'v> { async fn run_promises( self: Box, - dice: &DiceComputations, - eval: &mut Evaluator<'v, '_>, + dice: &mut DiceComputations, + eval: &mut Evaluator<'v, '_, '_>, description: String, ) -> anyhow::Result<()> { // Resolve all the targets in parallel @@ -72,32 +72,30 @@ impl<'v> AnonPromisesDyn<'v> for AnonPromises<'v> { } } - let values = - future::try_join_all(targets.iter().map(|target| target.resolve(dice))).await?; + let values = dice + .try_compute_join(targets.iter(), |ctx, target| { + async move { target.resolve(ctx).await }.boxed() + }) + .await?; + with_starlark_eval_provider( dice, - &mut StarlarkProfilerOrInstrumentation::disabled(), + &mut StarlarkProfilerOpt::disabled(), description, |_provider, _| { // But must bind the promises sequentially for (promise, xs) in shape { match xs { Either::Left(i) => { - let val = values[i] - .provider_collection - .value() - .owned_value(eval.frozen_heap()); - promise.resolve(val, eval)? + let val = values[i].providers()?.add_heap_ref(eval.frozen_heap()); + promise.resolve(val.to_value(), eval)? } Either::Right(is) => { let xs: Vec<_> = is .map(|i| { - values[i] - .provider_collection - .value() - .owned_value(eval.frozen_heap()) + Ok(values[i].providers()?.add_heap_ref(eval.frozen_heap())) }) - .collect(); + .collect::>()?; let list = eval.heap().alloc(AllocList(xs)); promise.resolve(list, eval)? } diff --git a/app/buck2_anon_target/src/anon_target_attr.rs b/app/buck2_anon_target/src/anon_target_attr.rs index 2fcfd03bc7255..7478ddd06710f 100644 --- a/app/buck2_anon_target/src/anon_target_attr.rs +++ b/app/buck2_anon_target/src/anon_target_attr.rs @@ -8,11 +8,9 @@ */ use std::fmt::Debug; -use std::fmt::Display; use allocative::Allocative; use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkPromiseArtifact; use buck2_core::package::PackageLabel; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; @@ -37,6 +35,9 @@ use serde::Serialize; use serde::Serializer; use serde_json::to_value; +use crate::anon_target_attr_resolve::AnonTargetAttrTraversal; +use crate::promise_artifacts::PromiseArtifactAttr; + #[derive(Debug, Clone, PartialEq, Eq, Hash, Allocative)] pub enum AnonTargetAttr { Bool(BoolLiteral), @@ -64,7 +65,7 @@ pub enum AnonTargetAttr { // Accepts any bound artifacts. Maps to `attr.source()`. Artifact(Artifact), // Accepts unresolved promise artifacts. Maps to `attr.source()`. - PromiseArtifact(StarlarkPromiseArtifact), + PromiseArtifact(PromiseArtifactAttr), Arg(ConfiguredStringWithMacros), Label(ProvidersLabel), } @@ -90,7 +91,9 @@ impl AttrDisplayWithContext for AnonTargetAttr { AnonTargetAttr::Int(v) => { write!(f, "{}", v) } - AnonTargetAttr::String(v) | AnonTargetAttr::EnumVariant(v) => Display::fmt(v, f), + AnonTargetAttr::String(v) | AnonTargetAttr::EnumVariant(v) => { + AttrDisplayWithContext::fmt(v, ctx, f) + } AnonTargetAttr::List(list) => AttrDisplayWithContext::fmt(list, ctx, f), AnonTargetAttr::Tuple(v) => AttrDisplayWithContext::fmt(v, ctx, f), AnonTargetAttr::Dict(v) => AttrDisplayWithContext::fmt(v, ctx, f), @@ -105,7 +108,7 @@ impl AttrDisplayWithContext for AnonTargetAttr { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum AnonTargetAttrError { #[error("Inconsistent number of elements in tuple")] InconsistentTupleLength, @@ -131,7 +134,7 @@ impl ToJsonWithContext for AnonTargetAttr { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum AnonTargetFromCoercedAttrError { #[error("Anon targets do not support default values for `{0}`")] DefaultAttrTypeNotSupported(String), @@ -151,20 +154,20 @@ impl AnonTargetAttr { AnonTargetAttr::EnumVariant(_) => Ok(()), AnonTargetAttr::List(list) => { for v in list.iter() { - v.traverse(pkg.dupe(), traversal)?; + v.traverse(pkg, traversal)?; } Ok(()) } AnonTargetAttr::Tuple(list) => { for v in list.iter() { - v.traverse(pkg.dupe(), traversal)?; + v.traverse(pkg, traversal)?; } Ok(()) } AnonTargetAttr::Dict(dict) => { for (k, v) in dict.iter() { - k.traverse(pkg.dupe(), traversal)?; - v.traverse(pkg.dupe(), traversal)?; + k.traverse(pkg, traversal)?; + v.traverse(pkg, traversal)?; } Ok(()) } @@ -172,12 +175,53 @@ impl AnonTargetAttr { AnonTargetAttr::OneOf(l, _) => l.traverse(pkg, traversal), AnonTargetAttr::Dep(dep) => traversal.dep(&dep.label), AnonTargetAttr::Artifact(_) => Ok(()), - AnonTargetAttr::Arg(e) => e.string_with_macros.traverse(traversal), + AnonTargetAttr::Arg(e) => e.string_with_macros.traverse(traversal, pkg), AnonTargetAttr::PromiseArtifact(..) => Ok(()), AnonTargetAttr::Label(_) => Ok(()), } } + #[allow(unused)] + pub fn traverse_anon_attr<'a>( + &'a self, + traversal: &mut dyn AnonTargetAttrTraversal, + ) -> anyhow::Result<()> { + match self { + AnonTargetAttr::Bool(_) => Ok(()), + AnonTargetAttr::Int(_) => Ok(()), + AnonTargetAttr::String(_) => Ok(()), + AnonTargetAttr::EnumVariant(_) => Ok(()), + AnonTargetAttr::List(list) => { + for v in list.iter() { + v.traverse_anon_attr(traversal)?; + } + Ok(()) + } + AnonTargetAttr::Tuple(list) => { + for v in list.iter() { + v.traverse_anon_attr(traversal)?; + } + Ok(()) + } + AnonTargetAttr::Dict(dict) => { + for (k, v) in dict.iter() { + k.traverse_anon_attr(traversal)?; + v.traverse_anon_attr(traversal)?; + } + Ok(()) + } + AnonTargetAttr::None => Ok(()), + AnonTargetAttr::OneOf(l, _) => l.traverse_anon_attr(traversal), + AnonTargetAttr::Dep(_) => Ok(()), + AnonTargetAttr::Artifact(_) => Ok(()), + AnonTargetAttr::Arg(_) => Ok(()), + AnonTargetAttr::PromiseArtifact(promise_artifact) => { + traversal.promise_artifact(promise_artifact) + } + AnonTargetAttr::Label(_) => Ok(()), + } + } + pub fn _unpack_list(&self) -> Option<&[AnonTargetAttr]> { match self { AnonTargetAttr::List(list) => Some(list), diff --git a/app/buck2_anon_target/src/anon_target_attr_coerce.rs b/app/buck2_anon_target/src/anon_target_attr_coerce.rs index d96741af9b1d5..19e891449be45 100644 --- a/app/buck2_anon_target/src/anon_target_attr_coerce.rs +++ b/app/buck2_anon_target/src/anon_target_attr_coerce.rs @@ -12,7 +12,7 @@ use std::fmt::Debug; use std::iter; use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkPromiseArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_promise_artifact::StarlarkPromiseArtifact; use buck2_build_api::interpreter::rule_defs::provider::dependency::Dependency; use buck2_build_api::interpreter::rule_defs::resolved_macro::ResolvedStringWithMacros; use buck2_core::provider::label::ProvidersLabel; @@ -32,7 +32,6 @@ use buck2_node::attrs::attr_type::tuple::TupleAttrType; use buck2_node::attrs::attr_type::tuple::TupleLiteral; use buck2_node::attrs::attr_type::AttrType; use buck2_node::attrs::attr_type::AttrTypeInner; -use buck2_node::attrs::coercion_context::AttrCoercionContext; use dupe::Dupe; use dupe::IterDupedExt; use gazebo::prelude::SliceExt; @@ -43,9 +42,11 @@ use starlark::values::string::STRING_TYPE; use starlark::values::tuple::TupleRef; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::StarlarkResultExt; use crate::anon_target_attr::AnonTargetAttr; use crate::anon_targets::AnonAttrCtx; +use crate::promise_artifacts::PromiseArtifactAttr; pub trait AnonTargetAttrTypeCoerce { fn coerce_item(&self, ctx: &AnonAttrCtx, value: Value) -> anyhow::Result; @@ -53,7 +54,7 @@ pub trait AnonTargetAttrTypeCoerce { impl AnonTargetAttrTypeCoerce for AttrType { fn coerce_item(&self, ctx: &AnonAttrCtx, value: Value) -> anyhow::Result { - match self.0.as_ref() { + match &self.0.inner { AttrTypeInner::Any(_) => to_anon_target_any(value, ctx), AttrTypeInner::Bool(_) => match value.unpack_bool() { Some(s) => Ok(AnonTargetAttr::Bool(BoolLiteral(s))), @@ -61,7 +62,7 @@ impl AnonTargetAttrTypeCoerce for AttrType { "bool", value ))), }, - AttrTypeInner::Int(_) => match i64::unpack_value(value) { + AttrTypeInner::Int(_) => match i64::unpack_value(value).into_anyhow_result()? { Some(x) => Ok(AnonTargetAttr::Int(x)), None => Err(anyhow::anyhow!(AnonTargetCoercionError::type_error( "int", value @@ -102,12 +103,12 @@ impl AnonTargetAttrTypeCoerce for AttrType { AttrTypeInner::Dep(x) => { match Dependency::from_value(value) { Some(dep) => { - let label = dep.label().inner().clone(); + let label = dep.label().inner().dupe(); let attr_type = match x.transition { DepAttrTransition::Identity(..) => x.clone(), DepAttrTransition::Exec => { - match dep.execution_platform() { + match dep.execution_platform()? { Some(exec_dep_resolution) => { if !exec_dep_resolution.eq(&ctx.execution_platform_resolution) { return Err(AnonTargetCoercionError::ExecDepPlatformMismatch(exec_dep_resolution.platform()?.id(), ctx.execution_platform_resolution.platform()?.id()).into()); @@ -134,11 +135,21 @@ impl AnonTargetAttrTypeCoerce for AttrType { // Check if this is a StarlarkPromiseArtifact first before checking other artifact types to // allow anon targets to accept unresolved promise artifacts. if let Some(promise_artifact) = StarlarkPromiseArtifact::from_value(value) { - Ok(AnonTargetAttr::PromiseArtifact(promise_artifact.clone())) - } else if let Some(artifact_like) = ValueAsArtifactLike::unpack_value(value) { + Ok(AnonTargetAttr::PromiseArtifact(PromiseArtifactAttr { + id: promise_artifact.artifact.id.as_ref().clone(), + short_path: promise_artifact.short_path.clone(), + })) + } else if let Some(artifact_like) = + ValueAsArtifactLike::unpack_value(value).into_anyhow_result()? + { let artifact = artifact_like.0.get_bound_artifact()?; Ok(AnonTargetAttr::Artifact(artifact)) } else { + // TODO(nga): `EnsuredArtifact` gets here with unhelpful error message like: + // ``` + // Expected value of type `artifact`, got value with type `ensured_artifact` + // (value was ` { if let Some(label) = StarlarkProvidersLabel::from_value(value) { - Ok(AnonTargetAttr::Label(label.label().clone())) + Ok(AnonTargetAttr::Label(label.label().dupe())) } else if let Some(label) = StarlarkTargetLabel::from_value(value) { Ok(AnonTargetAttr::Label(ProvidersLabel::default_for( - label.label().clone(), + label.label().dupe(), ))) } else { Err(AnonTargetCoercionError::type_error( @@ -169,7 +180,7 @@ impl AnonTargetAttrTypeCoerce for AttrType { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum AnonTargetCoercionError { #[error("Expected value of type `{0}`, got value with type `{1}` (value was `{2}`)")] TypeError(String, String, String), @@ -222,7 +233,7 @@ fn to_anon_target_any(value: Value, ctx: &AnonAttrCtx) -> anyhow::Result { + pub(crate) promised_artifacts_map: HashMap<&'v PromiseArtifactAttr, Artifact>, + pub(crate) rule_analysis_attr_resolution_ctx: RuleAnalysisAttrResolutionContext<'v>, +} -pub trait AnonTargetAttrExt { +pub trait AnonTargetAttrResolution { fn resolve<'v>( &self, pkg: PackageLabel, - ctx: &dyn AttrResolutionContext<'v>, + ctx: &AnonTargetAttrResolutionContext<'v>, ) -> anyhow::Result>>; fn resolve_single<'v>( &self, pkg: PackageLabel, - ctx: &dyn AttrResolutionContext<'v>, + ctx: &AnonTargetAttrResolutionContext<'v>, ) -> anyhow::Result>; } -impl AnonTargetAttrExt for AnonTargetAttr { +impl AnonTargetAttrResolution for AnonTargetAttr { /// "Resolves" the anon target attr value to the resolved value provided to the rule implementation. /// /// `resolve` may return multiple values. It is up to the caller to fail if @@ -46,7 +76,7 @@ impl AnonTargetAttrExt for AnonTargetAttr { fn resolve<'v>( &self, pkg: PackageLabel, - ctx: &dyn AttrResolutionContext<'v>, + ctx: &AnonTargetAttrResolutionContext<'v>, ) -> anyhow::Result>> { Ok(vec![self.resolve_single(pkg, ctx)?]) } @@ -56,8 +86,9 @@ impl AnonTargetAttrExt for AnonTargetAttr { fn resolve_single<'v>( &self, pkg: PackageLabel, - ctx: &dyn AttrResolutionContext<'v>, + anon_resolution_ctx: &AnonTargetAttrResolutionContext<'v>, ) -> anyhow::Result> { + let ctx = &anon_resolution_ctx.rule_analysis_attr_resolution_ctx; match self { AnonTargetAttr::Bool(v) => Ok(Value::new_bool(v.0)), AnonTargetAttr::Int(v) => Ok(ctx.heap().alloc(*v)), @@ -67,14 +98,14 @@ impl AnonTargetAttrExt for AnonTargetAttr { AnonTargetAttr::List(list) => { let mut values = Vec::with_capacity(list.len()); for v in list.iter() { - values.append(&mut v.resolve(pkg.dupe(), ctx)?); + values.append(&mut v.resolve(pkg, anon_resolution_ctx)?); } Ok(ctx.heap().alloc(values)) } AnonTargetAttr::Tuple(list) => { let mut values = Vec::with_capacity(list.len()); for v in list.iter() { - values.append(&mut v.resolve(pkg.dupe(), ctx)?); + values.append(&mut v.resolve(pkg, anon_resolution_ctx)?); } Ok(ctx.heap().alloc(AllocTuple(values))) } @@ -82,21 +113,162 @@ impl AnonTargetAttrExt for AnonTargetAttr { let mut res = SmallMap::with_capacity(dict.len()); for (k, v) in dict.iter() { res.insert_hashed( - k.resolve_single(pkg.dupe(), ctx)?.get_hashed()?, - v.resolve_single(pkg.dupe(), ctx)?, + k.resolve_single(pkg, anon_resolution_ctx)? + .get_hashed() + .map_err(from_starlark)?, + v.resolve_single(pkg, anon_resolution_ctx)?, ); } Ok(ctx.heap().alloc(Dict::new(res))) } AnonTargetAttr::None => Ok(Value::new_none()), - AnonTargetAttr::OneOf(box l, _) => l.resolve_single(pkg, ctx), + AnonTargetAttr::OneOf(box l, _) => l.resolve_single(pkg, anon_resolution_ctx), AnonTargetAttr::Dep(d) => DepAttrType::resolve_single(ctx, d), AnonTargetAttr::Artifact(d) => Ok(ctx.heap().alloc(StarlarkArtifact::new(d.clone()))), - AnonTargetAttr::Arg(a) => a.resolve(ctx), - AnonTargetAttr::PromiseArtifact(artifact) => Ok(ctx.heap().alloc(artifact.clone())), + AnonTargetAttr::Arg(a) => a.resolve(ctx, pkg), + AnonTargetAttr::PromiseArtifact(promise_artifact_attr) => { + let promise_id = promise_artifact_attr.id.clone(); + // We validated that the analysis contains the promise artifact id earlier + let artifact = anon_resolution_ctx + .promised_artifacts_map + .get(&promise_artifact_attr) + .unwrap(); + + // Assert the short path, since we have the real artifact now + if let Some(expected_short_path) = &promise_artifact_attr.short_path { + artifact.get_path().with_short_path(|artifact_short_path| { + if artifact_short_path != expected_short_path { + Err(anyhow::Error::from( + PromiseArtifactResolveError::ShortPathMismatch( + expected_short_path.clone(), + artifact_short_path.to_string(), + ), + )) + } else { + Ok(()) + } + })?; + } + + let fulfilled = OnceLock::new(); + fulfilled.set(artifact.clone()).unwrap(); + + let fulfilled_promise_inner = + PromiseArtifact::new(Arc::new(fulfilled), Arc::new(promise_id)); + + let fulfilled_promise_artifact = StarlarkPromiseArtifact::new( + None, + fulfilled_promise_inner, + promise_artifact_attr.short_path.clone(), + ); + + // To resolve the promise artifact attr, we end up creating a new `StarlarkPromiseArtifact` with the `OnceLock` set + // with the artifact that was found from the upstream analysis. + Ok(ctx.heap().alloc(fulfilled_promise_artifact)) + } AnonTargetAttr::Label(label) => { - Ok(ctx.heap().alloc(StarlarkProvidersLabel::new(label.clone()))) + Ok(ctx.heap().alloc(StarlarkProvidersLabel::new(label.dupe()))) + } + } + } +} + +// Container for things that require looking up analysis results in order to resolve the attribute. +pub(crate) struct AnonTargetDependents { + pub(crate) deps: Vec, + pub(crate) promise_artifacts: Vec, +} + +// Container for analysis results of the anon target dependents. +pub(crate) struct AnonTargetDependentAnalysisResults<'v> { + pub(crate) dep_analysis_results: Vec<(&'v ConfiguredTargetLabel, AnalysisResult)>, + pub(crate) promised_artifacts: HashMap<&'v PromiseArtifactAttr, Artifact>, +} + +pub(crate) trait AnonTargetAttrTraversal { + fn promise_artifact(&mut self, promise_artifact: &PromiseArtifactAttr) -> anyhow::Result<()>; +} + +impl AnonTargetDependents { + pub(crate) fn get_dependents( + anon_target: &AnonTargetKey, + ) -> anyhow::Result { + struct DepTraversal(Vec); + struct PromiseArtifactTraversal(Vec); + + impl ConfiguredAttrTraversal for DepTraversal { + fn dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { + self.0.push(dep.target().dupe()); + Ok(()) + } + + fn query( + &mut self, + _query: &str, + _resolved_literals: &ResolvedQueryLiterals, + ) -> anyhow::Result<()> { + Err(AnonTargetsError::QueryMacroNotSupported.into()) } } + + impl AnonTargetAttrTraversal for PromiseArtifactTraversal { + fn promise_artifact( + &mut self, + promise_artifact: &PromiseArtifactAttr, + ) -> anyhow::Result<()> { + self.0.push(promise_artifact.clone()); + Ok(()) + } + } + + let mut dep_traversal = DepTraversal(Vec::new()); + let mut promise_artifact_traversal = PromiseArtifactTraversal(Vec::new()); + for x in anon_target.0.attrs().values() { + x.traverse(anon_target.0.name().pkg(), &mut dep_traversal)?; + x.traverse_anon_attr(&mut promise_artifact_traversal)?; + } + Ok(AnonTargetDependents { + deps: dep_traversal.0, + promise_artifacts: promise_artifact_traversal.0, + }) + } + + pub(crate) async fn get_analysis_results<'v>( + &'v self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + let dep_analysis_results = + KeepGoing::try_compute_join_all(dice, self.deps.iter(), |ctx, dep| { + async move { + ctx.get_analysis_result(dep) + .await + .and_then(|v| v.require_compatible()) + .map(|r| (dep, r)) + } + .boxed() + }) + .await?; + let promised_artifacts: HashMap<_, _> = { + KeepGoing::try_compute_join_all( + dice, + self.promise_artifacts.iter(), + |ctx, promise_artifact_attr| { + async move { + get_artifact_from_anon_target_analysis(&promise_artifact_attr.id, ctx) + .await + .map(|artifact| (promise_artifact_attr, artifact)) + } + .boxed() + }, + ) + } + .await? + .into_iter() + .collect(); + + Ok(AnonTargetDependentAnalysisResults { + dep_analysis_results, + promised_artifacts, + }) } } diff --git a/app/buck2_anon_target/src/anon_target_node.rs b/app/buck2_anon_target/src/anon_target_node.rs index 0f6f306664976..3654d6d7244db 100644 --- a/app/buck2_anon_target/src/anon_target_node.rs +++ b/app/buck2_anon_target/src/anon_target_node.rs @@ -23,7 +23,7 @@ use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_data::action_key_owner::BaseDeferredKeyProto; use buck2_data::ToProtoMessage; use buck2_node::rule_type::StarlarkRuleType; diff --git a/app/buck2_anon_target/src/anon_targets.rs b/app/buck2_anon_target/src/anon_targets.rs index 257c3381c5524..b1ebe00d4014b 100644 --- a/app/buck2_anon_target/src/anon_targets.rs +++ b/app/buck2_anon_target/src/anon_targets.rs @@ -13,96 +13,86 @@ use std::mem; use std::sync::Arc; use allocative::Allocative; -use anyhow::Context as _; +use anyhow::Context; use async_trait::async_trait; -use buck2_analysis::analysis::calculation::get_rule_impl; +use buck2_analysis::analysis::calculation::get_rule_spec; +use buck2_analysis::analysis::env::get_deps_from_analysis_results; +use buck2_analysis::analysis::env::transitive_validations; use buck2_analysis::analysis::env::RuleAnalysisAttrResolutionContext; -use buck2_analysis::analysis::env::RuleImplFunction; +use buck2_analysis::analysis::env::RuleSpec; +use buck2_artifact::artifact::artifact_type::Artifact; use buck2_build_api::analysis::anon_promises_dyn::AnonPromisesDyn; use buck2_build_api::analysis::anon_targets_registry::AnonTargetsRegistryDyn; use buck2_build_api::analysis::anon_targets_registry::ANON_TARGET_REGISTRY_NEW; -use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::analysis::registry::AnalysisRegistry; use buck2_build_api::analysis::AnalysisResult; use buck2_build_api::artifact_groups::promise::PromiseArtifact; use buck2_build_api::artifact_groups::promise::PromiseArtifactId; +use buck2_build_api::artifact_groups::promise::PromiseArtifactResolveError; use buck2_build_api::deferred::calculation::EVAL_ANON_TARGET; -use buck2_build_api::deferred::types::DeferredTable; +use buck2_build_api::deferred::calculation::GET_PROMISED_ARTIFACT; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; use buck2_build_api::interpreter::rule_defs::context::AnalysisContext; use buck2_build_api::interpreter::rule_defs::plugins::AnalysisPlugins; -use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; use buck2_build_api::interpreter::rule_defs::provider::collection::ProviderCollection; -use buck2_build_api::keep_going; -use buck2_common::result::SharedResult; use buck2_configured::nodes::calculation::find_execution_platform_by_configuration; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::base_deferred_key::BaseDeferredKeyDyn; use buck2_core::cells::name::CellName; use buck2_core::cells::paths::CellRelativePath; -use buck2_core::configuration::config_setting::ConfigSettingData; -use buck2_core::configuration::data::ConfigurationData; -use buck2_core::configuration::pair::ConfigurationNoExec; -use buck2_core::configuration::pair::ConfigurationWithExec; -use buck2_core::configuration::transition::applied::TransitionApplied; -use buck2_core::configuration::transition::id::TransitionId; use buck2_core::execution_types::execution::ExecutionPlatformResolution; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::package::PackageLabel; -use buck2_core::pattern::lex_target_pattern; +use buck2_core::pattern::pattern::lex_target_pattern; +use buck2_core::pattern::pattern::PatternData; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::pattern::PatternData; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersLabel; -use buck2_core::provider::label::ProvidersName; -use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_core::target::name::TargetNameRef; use buck2_core::unsafe_send_future::UnsafeSendFuture; +use buck2_error::starlark_error::from_starlark; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::get_dispatcher; use buck2_events::dispatch::span_async; use buck2_execute::digest_config::HasDigestConfig; +use buck2_futures::cancellation::CancellationContext; use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; use buck2_interpreter::print_handler::EventDispatcherPrintHandler; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; use buck2_interpreter::starlark_promise::StarlarkPromise; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use buck2_interpreter_for_build::rule::FrozenRuleCallable; -use buck2_node::attrs::attr_type::query::ResolvedQueryLiterals; use buck2_node::attrs::attr_type::AttrType; use buck2_node::attrs::coerced_attr::CoercedAttr; -use buck2_node::attrs::coerced_path::CoercedPath; -use buck2_node::attrs::coercion_context::AttrCoercionContext; -use buck2_node::attrs::configuration_context::AttrConfigurationContext; -use buck2_node::attrs::configured_traversal::ConfiguredAttrTraversal; use buck2_node::attrs::internal::internal_attrs; -use buck2_util::arc_str::ArcSlice; use buck2_util::arc_str::ArcStr; use derive_more::Display; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use futures::stream::FuturesUnordered; -use futures::Future; +use futures::future::BoxFuture; use futures::FutureExt; -use more_futures::cancellation::CancellationContext; use starlark::any::AnyLifetime; use starlark::any::ProvidesStaticType; use starlark::codemap::FileSpan; use starlark::environment::Module; -use starlark::values::dict::DictOf; +use starlark::eval::Evaluator; +use starlark::values::dict::UnpackDictEntries; use starlark::values::structs::AllocStruct; use starlark::values::Trace; +use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueTyped; +use starlark::values::ValueTypedComplex; +use starlark::StarlarkResultExt; use starlark_map::ordered_map::OrderedMap; use starlark_map::small_map::SmallMap; -use thiserror::Error; use crate::anon_promises::AnonPromises; use crate::anon_target_attr::AnonTargetAttr; use crate::anon_target_attr_coerce::AnonTargetAttrTypeCoerce; -use crate::anon_target_attr_resolve::AnonTargetAttrExt; +use crate::anon_target_attr_resolve::AnonTargetAttrResolution; +use crate::anon_target_attr_resolve::AnonTargetAttrResolutionContext; +use crate::anon_target_attr_resolve::AnonTargetDependents; use crate::anon_target_node::AnonTarget; use crate::promise_artifacts::PromiseArtifactRegistry; @@ -111,21 +101,17 @@ pub struct AnonTargetsRegistry<'v> { // We inherit the execution platform of our parent execution_platform: ExecutionPlatformResolution, promises: AnonPromises<'v>, - promise_artifact_registry: PromiseArtifactRegistry<'v>, + promise_artifact_registry: PromiseArtifactRegistry, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum AnonTargetsError { #[error("Not allowed to call `anon_targets` in this context")] AssertNoPromisesFailed, - #[error( - "Invalid `name` attribute, must be a label or a string, got `{value}` of type `{typ}`" - )] + #[error("Invalid `name` attribute, must be a label or a string, got `{value}` of type `{typ}`")] InvalidNameType { typ: String, value: String }, #[error("`name` attribute must be a valid target label, got `{0}`")] NotTargetLabel(String), - #[error("can't parse strings during `anon_targets` coercion, got `{0}`")] - CantParseDuringCoerce(String), #[error("Unknown attribute `{0}`")] UnknownAttribute(String), #[error("Internal attribute `{0}` not allowed as argument to `anon_targets`")] @@ -139,25 +125,42 @@ pub enum AnonTargetsError { #[derive(Hash, Eq, PartialEq, Clone, Dupe, Debug, Display, Trace, Allocative)] pub(crate) struct AnonTargetKey(pub(crate) Arc); +#[async_trait] +impl Key for AnonTargetKey { + type Value = buck2_error::Result; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + Ok(self.run_analysis(ctx).await?) + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + false + } +} + impl AnonTargetKey { fn downcast(key: Arc) -> anyhow::Result { Ok(AnonTargetKey( key.into_any() .downcast() .ok() - .context("Expecting AnonTarget (internal error)")?, + .internal_error_anyhow("Expecting AnonTarget")?, )) } pub(crate) fn new<'v>( execution_platform: &ExecutionPlatformResolution, rule: ValueTyped<'v, FrozenRuleCallable>, - attributes: DictOf<'v, &'v str, Value<'v>>, + attributes: UnpackDictEntries<&'v str, Value<'v>>, ) -> anyhow::Result { let mut name = None; let internal_attrs = internal_attrs(); - let entries = attributes.collect_entries(); + let entries = attributes.entries; let attrs_spec = rule.attributes(); let mut attrs = OrderedMap::with_capacity(attrs_spec.len()); @@ -266,74 +269,37 @@ impl AnonTargetKey { AnonTargetAttr::from_coerced_attr(x, ty) } - pub(crate) async fn resolve(&self, dice: &DiceComputations) -> anyhow::Result { - #[async_trait] - impl Key for AnonTargetKey { - type Value = SharedResult; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - Ok(self.run_analysis(ctx).await?) - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - false - } - } - + pub(crate) async fn resolve( + &self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result { Ok(dice.compute(self).await??) } fn run_analysis<'a>( &'a self, - dice: &'a DiceComputations, - ) -> impl Future> + Send + 'a { + dice: &'a mut DiceComputations<'_>, + ) -> BoxFuture<'a, anyhow::Result> { let fut = async move { self.run_analysis_impl(dice).await }; - unsafe { UnsafeSendFuture::new_encapsulates_starlark(fut) } + Box::pin(unsafe { UnsafeSendFuture::new_encapsulates_starlark(fut) }) } - fn deps(&self) -> anyhow::Result> { - struct Traversal(Vec); - - impl ConfiguredAttrTraversal for Traversal { - fn dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - self.0.push(dep.target().dupe()); - Ok(()) - } - - fn query_macro( - &mut self, - _query: &str, - _resolved_literals: &ResolvedQueryLiterals, - ) -> anyhow::Result<()> { - Err(AnonTargetsError::QueryMacroNotSupported.into()) - } - } - - let mut traversal = Traversal(Vec::new()); - for x in self.0.attrs().values() { - x.traverse(self.0.name().pkg(), &mut traversal)?; - } - Ok(traversal.0) - } - async fn run_analysis_impl(&self, dice: &DiceComputations) -> anyhow::Result { - let deps = self.deps()?; - let dep_analysis_results: HashMap<_, _> = keep_going::try_join_all( - dice, - deps.iter() - .map(async move |dep| { - let res = dice - .get_analysis_result(dep) - .await - .and_then(|v| v.require_compatible()); - res.map(|x| (dep, x.providers().dupe())) - }) - .collect::>(), - ) - .await?; + async fn run_analysis_impl( + &self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result { + let dependents = AnonTargetDependents::get_dependents(self)?; + let dependents_analyses = dependents.get_analysis_results(dice).await?; + let validations_from_deps = dependents_analyses + .dep_analysis_results + .iter() + .filter_map(|(label, analysis_result)| { + analysis_result + .validations + .dupe() + .map(|v| ((*label).dupe(), v)) + }) + .collect::>(); let exec_resolution = ExecutionPlatformResolution::new( Some( @@ -347,7 +313,7 @@ impl AnonTargetKey { Vec::new(), ); - let rule_impl = get_rule_impl(dice, self.0.rule_type()).await?; + let rule_impl = get_rule_spec(dice, self.0.rule_type()).await?; let env = Module::new(); let print = EventDispatcherPrintHandler(get_dispatcher()); @@ -357,22 +323,32 @@ impl AnonTargetKey { rule: self.0.rule_type().to_string(), }, async move { - let (mut eval, ctx, list_res) = with_starlark_eval_provider( + let (dice, mut eval, ctx, list_res) = with_starlark_eval_provider( dice, - &mut StarlarkProfilerOrInstrumentation::disabled(), + &mut StarlarkProfilerOpt::disabled(), format!("anon_analysis:{}", self), |provider, dice| { - let mut eval = provider.make(&env)?; + let (mut eval, _) = provider.make(&env)?; eval.set_print_handler(&print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); + + let dep_analysis_results = get_deps_from_analysis_results( + dependents_analyses.dep_analysis_results, + )?; // No attributes are allowed to contain macros or other stuff, so an empty resolution context works - let resolution_ctx = RuleAnalysisAttrResolutionContext { + let rule_analysis_attr_resolution_ctx = RuleAnalysisAttrResolutionContext { module: &env, dep_analysis_results, query_results: HashMap::new(), execution_platform_resolution: exec_resolution.clone(), }; + let resolution_ctx = AnonTargetAttrResolutionContext { + promised_artifacts_map: dependents_analyses.promised_artifacts, + rule_analysis_attr_resolution_ctx, + }; + let mut resolved_attrs = Vec::with_capacity(self.0.attrs().len()); for (name, attr) in self.0.attrs().iter() { resolved_attrs.push(( @@ -380,36 +356,33 @@ impl AnonTargetKey { attr.resolve_single(self.0.name().pkg(), &resolution_ctx)?, )); } - let attributes = env.heap().alloc(AllocStruct(resolved_attrs)); + let attributes = env + .heap() + .alloc_typed_unchecked(AllocStruct(resolved_attrs)) + .cast(); let registry = AnalysisRegistry::new_from_owner( BaseDeferredKey::AnonTarget(self.0.dupe()), exec_resolution, )?; - let ctx = env.heap().alloc_typed(AnalysisContext::new( + let ctx = AnalysisContext::prepare( eval.heap(), - attributes, + Some(attributes), + Some(self.0.configured_label()), + // FIXME(JakobDegen): There should probably be a way to pass plugins + // into anon targets Some( eval.heap() - .alloc_typed(StarlarkConfiguredProvidersLabel::new( - ConfiguredProvidersLabel::new( - self.0.configured_label(), - ProvidersName::Default, - ), - )), + .alloc_typed(AnalysisPlugins::new(SmallMap::new())) + .into(), ), - // FIXME(JakobDegen): There should probably be a way to pass plugins - // into anon targets - eval.heap() - .alloc_typed(AnalysisPlugins::new(SmallMap::new())) - .into(), registry, dice.global_data().get_digest_config(), - )); + ); let list_res = rule_impl.invoke(&mut eval, ctx)?; - Ok((eval, ctx, list_res)) + Ok((dice, eval, ctx, list_res)) }, ) .await?; @@ -419,130 +392,104 @@ impl AnonTargetKey { .await?; let res_typed = ProviderCollection::try_from_value(list_res)?; let res = env.heap().alloc(res_typed); - env.set("", res); + + let fulfilled_artifact_mappings = { + let promise_artifact_mappings = + rule_impl.promise_artifact_mappings(&mut eval)?; + + self.get_fulfilled_promise_artifacts(promise_artifact_mappings, res, &mut eval)? + }; + + let res = ValueTypedComplex::new(res) + .internal_error_anyhow("Just allocated the provider collection")?; // Pull the ctx object back out, and steal ctx.action's state back let analysis_registry = ctx.take_state(); + analysis_registry + .analysis_value_storage + .set_result_value(res)?; std::mem::drop(eval); + let num_declared_actions = analysis_registry.num_declared_actions(); + let num_declared_artifacts = analysis_registry.num_declared_artifacts(); + let (_frozen_env, recorded_values) = analysis_registry.finalize(&env)?(env)?; - let (frozen_env, deferreds) = analysis_registry.finalize(&env)?(env)?; - - let res = frozen_env.get("").unwrap(); - let provider_collection = FrozenProviderCollectionValue::try_from_value(res) - .expect("just created this, this shouldn't happen"); + let validations = transitive_validations( + validations_from_deps, + recorded_values.provider_collection()?, + ); - // this could look nicer if we had the entire analysis be a deferred - let deferred = DeferredTable::new(deferreds.take_result()?); - Ok(AnalysisResult::new(provider_collection, deferred, None)) + Ok(AnalysisResult::new( + recorded_values, + None, + fulfilled_artifact_mappings, + num_declared_actions, + num_declared_artifacts, + validations, + )) } .map(|res| { - ( - res, - buck2_data::AnalysisEnd { - target: Some(self.0.as_proto().into()), - rule: self.0.rule_type().to_string(), - profile: None, // Not implemented for anon targets - }, - ) + let end = buck2_data::AnalysisEnd { + target: Some(self.0.as_proto().into()), + rule: self.0.rule_type().to_string(), + profile: None, // Not implemented for anon targets + declared_actions: res.as_ref().ok().map(|v| v.num_declared_actions), + declared_artifacts: res.as_ref().ok().map(|v| v.num_declared_artifacts), + }; + (res, end) }), ) .await } + + fn get_fulfilled_promise_artifacts<'v>( + &self, + promise_artifact_mappings: SmallMap>, + anon_target_result: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let mut fulfilled_artifact_mappings = HashMap::new(); + + for (id, func) in promise_artifact_mappings.values().enumerate() { + let artifact = eval + .eval_function(*func, &[anon_target_result], &[]) + .map_err(from_starlark)?; + + let promise_id = + PromiseArtifactId::new(BaseDeferredKey::AnonTarget(self.0.clone()), id); + + match ValueAsArtifactLike::unpack_value(artifact).into_anyhow_result()? { + Some(artifact) => { + fulfilled_artifact_mappings + .insert(promise_id.clone(), artifact.0.get_bound_artifact()?); + } + None => { + return Err( + PromiseArtifactResolveError::NotAnArtifact(artifact.to_repr()).into(), + ); + } + } + } + + Ok(fulfilled_artifact_mappings) + } } /// Several attribute functions need a context, make one that is mostly useless. -pub struct AnonAttrCtx { - cfg: ConfigurationData, - transitions: OrderedMap, Arc>, - pub execution_platform_resolution: ExecutionPlatformResolution, +pub(crate) struct AnonAttrCtx { + pub(crate) execution_platform_resolution: ExecutionPlatformResolution, } impl AnonAttrCtx { fn new(execution_platform_resolution: &ExecutionPlatformResolution) -> Self { Self { - cfg: ConfigurationData::unspecified(), - transitions: OrderedMap::new(), execution_platform_resolution: execution_platform_resolution.clone(), } } -} - -impl AttrCoercionContext for AnonAttrCtx { - fn coerce_providers_label(&self, value: &str) -> anyhow::Result { - Err(AnonTargetsError::CantParseDuringCoerce(value.to_owned()).into()) - } - fn intern_str(&self, value: &str) -> ArcStr { + pub(crate) fn intern_str(&self, value: &str) -> ArcStr { // TODO(scottcao): do intern. ArcStr::from(value) } - - fn intern_list(&self, value: Vec) -> ArcSlice { - // TODO(scottcao): do intern. - value.into() - } - - fn intern_dict( - &self, - value: Vec<(CoercedAttr, CoercedAttr)>, - ) -> ArcSlice<(CoercedAttr, CoercedAttr)> { - // TODO(scottcao): do intern. - value.into() - } - - fn intern_select( - &self, - value: Vec<(TargetLabel, CoercedAttr)>, - ) -> ArcSlice<(TargetLabel, CoercedAttr)> { - // TODO(scottcao): do intern. - value.into() - } - - fn coerce_path(&self, value: &str, _allow_directory: bool) -> anyhow::Result { - Err(AnonTargetsError::CantParseDuringCoerce(value.to_owned()).into()) - } - - fn coerce_target_pattern( - &self, - pattern: &str, - ) -> anyhow::Result> { - Err(AnonTargetsError::CantParseDuringCoerce(pattern.to_owned()).into()) - } - - fn visit_query_function_literals( - &self, - _visitor: &mut dyn buck2_query::query::syntax::simple::functions::QueryLiteralVisitor, - _expr: &buck2_query_parser::spanned::Spanned, - query: &str, - ) -> anyhow::Result<()> { - Err(AnonTargetsError::CantParseDuringCoerce(query.to_owned()).into()) - } -} - -impl AttrConfigurationContext for AnonAttrCtx { - fn matches<'a>(&'a self, _label: &TargetLabel) -> Option<&'a ConfigSettingData> { - None - } - - fn cfg(&self) -> ConfigurationNoExec { - ConfigurationNoExec::new(self.cfg.dupe()) - } - - fn exec_cfg(&self) -> ConfigurationNoExec { - ConfigurationNoExec::new(self.cfg.dupe()) - } - - fn toolchain_cfg(&self) -> ConfigurationWithExec { - ConfigurationWithExec::new(self.cfg.dupe(), self.cfg.dupe()) - } - - fn platform_cfg(&self, _label: &TargetLabel) -> anyhow::Result { - Ok(self.cfg.dupe()) - } - - fn resolved_transitions(&self) -> &OrderedMap, Arc> { - &self.transitions - } } pub(crate) fn init_eval_anon_target() { @@ -550,6 +497,43 @@ pub(crate) fn init_eval_anon_target() { .init(|ctx, key| Box::pin(async move { AnonTargetKey::downcast(key)?.resolve(ctx).await })); } +pub(crate) fn init_get_promised_artifact() { + GET_PROMISED_ARTIFACT.init(|promise_artifact, ctx| { + Box::pin( + async move { get_artifact_from_anon_target_analysis(promise_artifact.id(), ctx).await }, + ) + }); +} + +pub(crate) async fn get_artifact_from_anon_target_analysis<'v>( + promise_id: &'v PromiseArtifactId, + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result { + let owner = promise_id.owner(); + let analysis_result = match owner { + BaseDeferredKey::AnonTarget(anon_target) => { + AnonTargetKey::downcast(anon_target.dupe())? + .resolve(ctx) + .await? + } + _ => { + return Err(PromiseArtifactResolveError::OwnerIsNotAnonTarget( + promise_id.clone(), + owner.clone(), + ) + .into()); + } + }; + + analysis_result + .promise_artifact_map() + .get(promise_id) + .context(PromiseArtifactResolveError::NotFoundInAnalysis( + promise_id.clone(), + )) + .cloned() +} + pub(crate) fn init_anon_target_registry_new() { ANON_TARGET_REGISTRY_NEW.init(|_phantom, execution_platform| { Box::new(AnonTargetsRegistry { @@ -567,7 +551,7 @@ impl<'v> AnonTargetsRegistry<'v> { let registry: &mut AnonTargetsRegistry = registry .as_any_mut() .downcast_mut::() - .context("AnonTargetsRegistryDyn is not an AnonTargetsRegistry (internal error)")?; + .internal_error_anyhow("AnonTargetsRegistryDyn is not an AnonTargetsRegistry")?; unsafe { // It is hard or impossible to express this safely with the borrow checker. // Has something to do with 'v being invariant. @@ -581,7 +565,7 @@ impl<'v> AnonTargetsRegistry<'v> { pub(crate) fn anon_target_key( &self, rule: ValueTyped<'v, FrozenRuleCallable>, - attributes: DictOf<'v, &'v str, Value<'v>>, + attributes: UnpackDictEntries<&'v str, Value<'v>>, ) -> anyhow::Result { AnonTargetKey::new(&self.execution_platform, rule, attributes) } @@ -598,15 +582,13 @@ impl<'v> AnonTargetsRegistry<'v> { pub(crate) fn register_artifact( &mut self, - promise: ValueTyped<'v, StarlarkPromise<'v>>, location: Option, anon_target_key: AnonTargetKey, id: usize, ) -> anyhow::Result { let anon_target_key = BaseDeferredKey::AnonTarget(anon_target_key.0.dupe()); let id = PromiseArtifactId::new(anon_target_key, id); - self.promise_artifact_registry - .register(promise, location, id) + self.promise_artifact_registry.register(location, id) } } @@ -615,11 +597,8 @@ impl<'v> AnonTargetsRegistryDyn<'v> for AnonTargetsRegistry<'v> { self } - fn resolve_artifacts( - &self, - short_paths: &HashMap, - ) -> anyhow::Result<()> { - self.promise_artifact_registry.resolve_all(short_paths) + fn consumer_analysis_artifacts(&self) -> Vec { + self.promise_artifact_registry.consumer_analysis_artifacts() } fn take_promises(&mut self) -> Option>> { @@ -652,7 +631,7 @@ impl<'v> AnonTargetsRegistryDyn<'v> for AnonTargetsRegistry<'v> { } #[cfg(test)] -mod test { +mod tests { use super::*; #[test] diff --git a/app/buck2_anon_target/src/lib.rs b/app/buck2_anon_target/src/lib.rs index b4aea2f4b1e88..8a17b601486d3 100644 --- a/app/buck2_anon_target/src/lib.rs +++ b/app/buck2_anon_target/src/lib.rs @@ -7,7 +7,7 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(box_patterns)] use std::sync::Once; @@ -26,6 +26,7 @@ pub fn init_late_bindings() { ONCE.call_once(|| { anon_targets::init_anon_target_registry_new(); anon_targets::init_eval_anon_target(); + anon_targets::init_get_promised_artifact(); starlark_defs::init_analysis_actions_methods_anon_target(); starlark_defs::init_register_anon_target_types(); }); diff --git a/app/buck2_anon_target/src/promise_artifacts.rs b/app/buck2_anon_target/src/promise_artifacts.rs index fddae01220481..43f71e04a7199 100644 --- a/app/buck2_anon_target/src/promise_artifacts.rs +++ b/app/buck2_anon_target/src/promise_artifacts.rs @@ -7,7 +7,7 @@ * of this source tree. */ -use std::collections::HashMap; +use std::fmt; use std::fmt::Debug; use std::sync::Arc; use std::sync::OnceLock; @@ -15,15 +15,11 @@ use std::sync::OnceLock; use allocative::Allocative; use buck2_build_api::artifact_groups::promise::PromiseArtifact; use buck2_build_api::artifact_groups::promise::PromiseArtifactId; -use buck2_build_api::artifact_groups::promise::PromiseArtifactResolveError; -use buck2_build_api::interpreter::rule_defs::artifact::ValueAsArtifactLike; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -use buck2_interpreter::starlark_promise::StarlarkPromise; use dupe::Dupe; +use gazebo::prelude::SliceExt; use starlark::codemap::FileSpan; use starlark::values::Trace; -use starlark::values::UnpackValue; -use starlark::values::ValueTyped; #[derive(Debug, Trace, Allocative)] struct PromiseArtifactEntry { @@ -35,60 +31,35 @@ struct PromiseArtifactEntry { /// corresponding internal PromiseArtifact. At the end of analysis (after promises have been resolved), /// all PromiseArtifact will be updated to have the resolved artifact from the corresponding starlark promise. #[derive(Debug, Trace, Allocative)] -pub(crate) struct PromiseArtifactRegistry<'v> { - promises: Vec>>, +pub(crate) struct PromiseArtifactRegistry { artifacts: Vec, } -impl<'v> PromiseArtifactRegistry<'v> { +impl PromiseArtifactRegistry { pub(crate) fn new() -> Self { Self { - promises: Vec::new(), artifacts: Vec::new(), } } - pub(crate) fn resolve_all( - &self, - short_paths: &HashMap, - ) -> anyhow::Result<()> { - for (promise, artifact_entry) in std::iter::zip(&self.promises, &self.artifacts) { - match promise.get() { - Some(v) => match ValueAsArtifactLike::unpack_value(v) { - Some(v) => { - let short_path = short_paths.get(artifact_entry.artifact.id()).cloned(); - - artifact_entry.artifact.resolve(v.0, &short_path)?; - } - None => { - return Err(PromiseArtifactResolveError::NotAnArtifact( - artifact_entry.location.clone(), - v.to_repr(), - ) - .into()); - } - }, - None => { - return Err(PromiseArtifactResolveError::PromiseNotResolved( - artifact_entry.location.clone(), - promise.to_string(), - ) - .into()); - } - } - } - Ok(()) + /// The consumer analysis is the analysis that calls the anon target and uses the resulting + /// promised artifacts. It could be a normal rule analysis, an analysis from BXL, or an anon + /// target analysis. These promised artifacts are the ones that will have their short paths + /// asserted. During promise resolution, we use the promised artifact's owner (the anon target + /// key) to look up the owner's analysis results via DICE (which will be blocking) to ensure + /// that any dependent anon target analyses are finished first. + pub(crate) fn consumer_analysis_artifacts(&self) -> Vec { + self.artifacts.map(|e| e.artifact.clone()) } pub(crate) fn register( &mut self, - promise: ValueTyped<'v, StarlarkPromise<'v>>, location: Option, id: PromiseArtifactId, ) -> anyhow::Result { - let artifact = PromiseArtifact::new(Arc::new(OnceLock::new()), Arc::new(id)); + let artifact: PromiseArtifact = + PromiseArtifact::new(Arc::new(OnceLock::new()), Arc::new(id)); - self.promises.push(promise); self.artifacts.push(PromiseArtifactEntry { location, artifact: artifact.dupe(), @@ -96,3 +67,27 @@ impl<'v> PromiseArtifactRegistry<'v> { Ok(artifact) } } + +// When passing promise artifacts into anon targets, we will coerce them into this type. +// During resolve, we look up the analysis of the target that produced the promise artifact, +// assert short paths, and produce a new `StarlarkPromiseArtifact` with the `OnceLock` resolved. +#[allow(unused)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Allocative)] +pub(crate) struct PromiseArtifactAttr { + pub(crate) id: PromiseArtifactId, + pub(crate) short_path: Option, +} + +impl fmt::Display for PromiseArtifactAttr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // TODO(@wendyy) - we should figure out what to do about the declaration location. + // It's possible that 2 targets produce the same promise artifact and try to pass + // it into a downstream target, so then there would be 2 declaration locations. + write!(f, "")?; + Ok(()) + } +} diff --git a/app/buck2_anon_target/src/starlark_defs.rs b/app/buck2_anon_target/src/starlark_defs.rs index 299be10128fd2..390dd05d4eb44 100644 --- a/app/buck2_anon_target/src/starlark_defs.rs +++ b/app/buck2_anon_target/src/starlark_defs.rs @@ -13,11 +13,11 @@ use std::fmt::Display; use allocative::Allocative; use buck2_build_api::artifact_groups::promise::PromiseArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkPromiseArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_promise_artifact::StarlarkPromiseArtifact; use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; use buck2_build_api::interpreter::rule_defs::context::ANALYSIS_ACTIONS_METHODS_ANON_TARGET; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -use buck2_interpreter::anon_targets::REGISTER_ANON_TARGETS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_ANON_TARGETS_GLOBALS; use buck2_interpreter::starlark_promise::StarlarkPromise; use buck2_interpreter_for_build::rule::FrozenArtifactPromiseMappings; use buck2_interpreter_for_build::rule::FrozenRuleCallable; @@ -31,8 +31,9 @@ use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::starlark_module; use starlark::values::dict::Dict; -use starlark::values::dict::DictOf; +use starlark::values::dict::UnpackDictEntries; use starlark::values::list::AllocList; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::AllocValue; @@ -44,12 +45,11 @@ use starlark::values::Trace; use starlark::values::Value; use starlark::values::ValueTyped; use starlark_map::small_map::SmallMap; -use thiserror::Error; use crate::anon_targets::AnonTargetKey; use crate::anon_targets::AnonTargetsRegistry; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum AnonTargetsError { #[error("artifact with name `{0}` was not found")] ArtifactNotFound(String), @@ -68,28 +68,22 @@ struct StarlarkAnonTarget<'v> { impl<'v> StarlarkAnonTarget<'v> { fn new( declaration_location: Option, - res: ValueTyped<'v, StarlarkPromise<'v>>, + anon_target_promise: ValueTyped<'v, StarlarkPromise<'v>>, frozen_artifact_mappings: &Option, key: AnonTargetKey, registry: &mut AnonTargetsRegistry<'v>, - eval: &mut Evaluator<'v, '_>, ) -> anyhow::Result> { let mut artifacts_map = SmallMap::new(); if let Some(artifacts) = frozen_artifact_mappings { - for (id, (name, func)) in artifacts.mappings.iter().enumerate() { - let promise = StarlarkPromise::map(res, func.to_value(), eval)?; - let artifact = registry.register_artifact( - promise, - declaration_location.clone(), - key.clone(), - id, - )?; + for (id, name) in artifacts.mappings.keys().enumerate() { + let artifact = + registry.register_artifact(declaration_location.clone(), key.clone(), id)?; artifacts_map.insert(*name, artifact); } } let anon_target = StarlarkAnonTarget { - promise: res, + promise: anon_target_promise, artifacts: artifacts_map, declaration_location, }; @@ -131,8 +125,8 @@ fn anon_target_methods(builder: &mut MethodsBuilder) { /// pass in the artifact retrieved from this dict. fn artifacts<'v>( this: &StarlarkAnonTarget<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { Ok(eval.heap().alloc(Dict::new( this.artifacts .iter() @@ -146,7 +140,7 @@ fn anon_target_methods(builder: &mut MethodsBuilder) { )), )) }) - .collect::>()?, + .collect::>()?, ))) } @@ -155,7 +149,7 @@ fn anon_target_methods(builder: &mut MethodsBuilder) { fn artifact<'v>( this: &StarlarkAnonTarget<'v>, name: &'v str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { match this.artifacts.get(name) { Some(v) => Ok(eval.heap().alloc(StarlarkPromiseArtifact::new( @@ -241,7 +235,7 @@ pub(crate) fn register_anon_target_types(globals: &mut GlobalsBuilder) { } pub(crate) fn init_register_anon_target_types() { - REGISTER_ANON_TARGETS.init(register_anon_target_types); + REGISTER_BUCK2_ANON_TARGETS_GLOBALS.init(register_anon_target_types); } #[starlark_module] @@ -251,73 +245,57 @@ fn analysis_actions_methods_anon_target(builder: &mut MethodsBuilder) { /// Two distinct rules might ask for the same anonymous target, sharing the work it performs. /// /// For more details see https://buck2.build/docs/rule_authors/anon_targets/ - /// - /// `with_artifacts` is a temporary flag for migration purposes. If `with_artifacts` is false, the return type is just the - /// promise. If set to true, the return type will be an object containing a `promise` attribute with accessors to get the - /// promise artifacts. This will be the eventual return type after the migration. fn anon_target<'v>( this: &AnalysisActions<'v>, + // TODO(nga): this should be either positional or named, not both. rule: ValueTyped<'v, FrozenRuleCallable>, - attrs: DictOf<'v, &'v str, Value<'v>>, - #[starlark(require = named, default = false)] with_artifacts: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - // TODO - remove after migration - let _with_artifacts = with_artifacts; - let res = eval.heap().alloc_typed(StarlarkPromise::new_unresolved()); - let mut this = this.state(); + attrs: UnpackDictEntries<&'v str, Value<'v>>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let anon_target_promise = eval.heap().alloc_typed(StarlarkPromise::new_unresolved()); + let mut this = this.state()?; let registry = AnonTargetsRegistry::downcast_mut(&mut *this.anon_targets)?; let key = registry.anon_target_key(rule, attrs)?; - registry.register_one(res, key.clone())?; + registry.register_one(anon_target_promise, key.clone())?; - let anon_target = StarlarkAnonTarget::new( + StarlarkAnonTarget::new( eval.call_stack_top_location(), - res, + anon_target_promise, rule.artifact_promise_mappings(), key, registry, - eval, - )?; - - Ok(eval.heap().alloc(anon_target)) + ) } /// Generate a series of anonymous targets. - /// - /// `with_artifacts` is a temporary flag for migration purposes. If `with_artifacts` is false, the return type is just the - /// promise. If set to true, the return type will be an object containing a `promise` attribute with accessors to get the - /// promise artifacts. This will be the eventual return type after the migration. fn anon_targets<'v>( this: &AnalysisActions<'v>, - rules: Vec<( + // TODO(nga): this should be either positional or named, not both. + rules: UnpackListOrTuple<( ValueTyped<'v, FrozenRuleCallable>, - DictOf<'v, &'v str, Value<'v>>, + UnpackDictEntries<&'v str, Value<'v>>, )>, - #[starlark(require = named, default = false)] with_artifacts: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - // TODO - remove after migration - let _with_artifacts = with_artifacts; - let mut this = this.state(); + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let mut this = this.state()?; let registry = AnonTargetsRegistry::downcast_mut(&mut *this.anon_targets)?; let declaration_location = eval.call_stack_top_location(); let mut anon_targets = Vec::new(); let mut promises_to_join = Vec::new(); - rules.into_try_map(|(rule, attributes)| { + rules.items.into_try_map(|(rule, attributes)| { let key = registry.anon_target_key(rule, attributes)?; - let res = eval.heap().alloc_typed(StarlarkPromise::new_unresolved()); + let anon_target_promise = eval.heap().alloc_typed(StarlarkPromise::new_unresolved()); - promises_to_join.push(res); + promises_to_join.push(anon_target_promise); - registry.register_one(res, key.clone())?; + registry.register_one(anon_target_promise, key.clone())?; let anon_target = StarlarkAnonTarget::new( declaration_location.clone(), - res, + anon_target_promise, rule.artifact_promise_mappings(), key.clone(), registry, - eval, )?; anon_targets.push(anon_target); @@ -325,13 +303,11 @@ fn analysis_actions_methods_anon_target(builder: &mut MethodsBuilder) { anyhow::Ok(key) })?; - let anon_targets = StarlarkAnonTargets { + Ok(StarlarkAnonTargets { promise: StarlarkPromise::join(promises_to_join, eval.heap()), anon_targets, declaration_location, - }; - - Ok(eval.heap().alloc(anon_targets)) + }) } /// Generate a promise artifact that has short path accessible on it. The short path's correctness will @@ -341,10 +317,11 @@ fn analysis_actions_methods_anon_target(builder: &mut MethodsBuilder) { /// we cannot support this until we can get access to the `AnalysisContext` without passing it into this method. fn assert_short_path<'v>( this: &AnalysisActions<'v>, + // TODO(nga): this should be either positional or named, not both. artifact: ValueTyped<'v, StarlarkPromiseArtifact>, short_path: &'v str, ) -> anyhow::Result { - let mut this = this.state(); + let mut this = this.state()?; let promise = artifact.artifact.clone(); let short_path = ForwardRelativePathBuf::new(short_path.to_owned())?; diff --git a/app/buck2_artifact/BUCK b/app/buck2_artifact/BUCK index 90c7fc93b313f..7899391f210d9 100644 --- a/app/buck2_artifact/BUCK +++ b/app/buck2_artifact/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -8,19 +7,23 @@ rust_library( srcs = glob(["src/**/*.rs"]), test_deps = [ "fbsource//third-party/rust:assert_matches", + "fbsource//third-party/rust:serde_json", ], deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:either", - "fbsource//third-party/rust:itertools", + "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:static_assertions", "fbsource//third-party/rust:take_mut", - "fbsource//third-party/rust:thiserror", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_execute:buck2_execute", + "//buck2/app/buck2_util:buck2_util", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", "//buck2/starlark-rust/starlark_map:starlark_map", diff --git a/app/buck2_artifact/Cargo.toml b/app/buck2_artifact/Cargo.toml index 4b7bbe540f119..585197f730912 100644 --- a/app/buck2_artifact/Cargo.toml +++ b/app/buck2_artifact/Cargo.toml @@ -1,26 +1,32 @@ [package] +description = "Artifact types" +edition = "2021" +license = { workspace = true } name = "buck2_artifact" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Artifact types" [dependencies] anyhow = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } either = { workspace = true } -itertools = { workspace = true } +serde = { workspace = true } +static_assertions = { workspace = true } take_mut = { workspace = true } -thiserror = { workspace = true } allocative = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } starlark_map = { workspace = true } +buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_execute = { workspace = true } +buck2_util = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +serde_json = { workspace = true } diff --git a/app/buck2_artifact/src/actions/mod.rs b/app/buck2_artifact/src/actions.rs similarity index 100% rename from app/buck2_artifact/src/actions/mod.rs rename to app/buck2_artifact/src/actions.rs diff --git a/app/buck2_artifact/src/actions/key.rs b/app/buck2_artifact/src/actions/key.rs index 0eb3ba27e04db..3d2cd615bc917 100644 --- a/app/buck2_artifact/src/actions/key.rs +++ b/app/buck2_artifact/src/actions/key.rs @@ -7,12 +7,14 @@ * of this source tree. */ +use std::fmt::Write; + use allocative::Allocative; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_data::ToProtoMessage; use dupe::Dupe; -use crate::deferred::key::DeferredKey; +use crate::deferred::key::DeferredHolderKey; /// A key to look up an 'Action' from the 'ActionAnalysisResult'. /// Since 'Action's are registered as 'Deferred's @@ -26,22 +28,56 @@ use crate::deferred::key::DeferredKey; derive_more::Display, Allocative )] -pub struct ActionKey( - /// `DeferredData>`. - DeferredKey, -); +#[display("(target: `{parent}`, id: `{id}`)")] +pub struct ActionKey { + parent: DeferredHolderKey, + id: ActionIndex, +} + +/// An unique identifier for different actions with the same parent. +#[derive( + Debug, + Eq, + PartialEq, + Hash, + Clone, + Dupe, + Copy, + derive_more::Display, + Allocative +)] +pub struct ActionIndex(u32); +impl ActionIndex { + pub fn new(v: u32) -> ActionIndex { + Self(v) + } +} impl ActionKey { - pub fn unchecked_new(key: DeferredKey) -> ActionKey { - ActionKey(key) + pub fn unchecked_new(parent: DeferredHolderKey, id: ActionIndex) -> ActionKey { + ActionKey { parent, id } + } + + pub fn new(parent: DeferredHolderKey, id: ActionIndex) -> ActionKey { + ActionKey { parent, id } + } + + pub fn holder_key(&self) -> &DeferredHolderKey { + &self.parent } - pub fn deferred_key(&self) -> &DeferredKey { - &self.0 + pub fn action_index(&self) -> ActionIndex { + self.id } pub fn owner(&self) -> &BaseDeferredKey { - self.deferred_key().owner() + self.parent.owner() + } + + pub fn action_key(&self) -> String { + let mut v = self.parent.action_key(); + write!(&mut v, "_{}", self.id).unwrap(); + v } } @@ -50,9 +86,9 @@ impl ToProtoMessage for ActionKey { fn as_proto(&self) -> Self::Message { buck2_data::ActionKey { - id: self.deferred_key().id().as_usize().to_ne_bytes().to_vec(), - owner: Some(self.deferred_key().owner().to_proto().into()), - key: self.deferred_key().action_key(), + id: (self.id.0 as usize).to_ne_bytes().to_vec(), + owner: Some(self.owner().to_proto().into()), + key: self.action_key(), } } } diff --git a/app/buck2_artifact/src/artifact.rs b/app/buck2_artifact/src/artifact.rs new file mode 100644 index 0000000000000..72654952112b4 --- /dev/null +++ b/app/buck2_artifact/src/artifact.rs @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! An 'Artifact' represents a File to be used as part of the build. It can be either a file in the +//! source tree, or a file to be generated as part of the build. +//! +//! The existence of an Artifact does not mean the actual file exists. Artifacts needs to be +//! 'made available' before its existence on the filesystem is guaranteed. +//! +//! An 'Artifact' is first "declared" by rule implementation as a 'DeclaredArtifact'. The artifact +//! will need to be "bound" to an 'Action' through being used as an 'OutputArtifact'. Once bound, +//! it becomes a 'BuildArtifact' that can be available. + +pub mod artifact_dump; +pub mod artifact_type; +pub mod build_artifact; +pub mod source_artifact; diff --git a/app/buck2_artifact/src/artifact/artifact_dump.rs b/app/buck2_artifact/src/artifact/artifact_dump.rs new file mode 100644 index 0000000000000..11a4341b8de58 --- /dev/null +++ b/app/buck2_artifact/src/artifact/artifact_dump.rs @@ -0,0 +1,147 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![allow(clippy::ref_option_ref)] // within Serialize + +use std::fmt::Display; +use std::path::Path; + +use buck2_common::cas_digest::CasDigest; +use buck2_common::cas_digest::DigestAlgorithmFamily; +use buck2_common::file_ops::FileDigestKind; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::paths::RelativePath; +use serde::Serialize; +use serde::Serializer; + +fn stringify(value: &T, serializer: S) -> Result +where + T: Display, + S: Serializer, +{ + serializer.collect_str(value) +} + +#[derive(Serialize)] +pub struct DirectoryInfo {} + +#[derive(Serialize)] +pub struct FileInfo<'a> { + #[serde(serialize_with = "stringify")] + pub digest: &'a CasDigest, + #[serde(serialize_with = "stringify")] + pub digest_kind: DigestAlgorithmFamily, + pub is_exec: bool, +} + +#[derive(Serialize)] +pub struct SymlinkInfo<'a> { + #[serde(serialize_with = "stringify")] + pub symlink_rel_path: &'a RelativePath, +} + +#[derive(Serialize)] +pub struct ExternalSymlinkInfo<'a> { + pub target: &'a Path, + pub remaining_path: Option<&'a ForwardRelativePath>, +} + +#[derive(Serialize)] +#[serde(tag = "kind")] +#[serde(rename_all = "snake_case")] +pub enum ArtifactInfo<'a> { + Directory(DirectoryInfo), + File(FileInfo<'a>), + Symlink(SymlinkInfo<'a>), + ExternalSymlink(ExternalSymlinkInfo<'a>), +} + +#[derive(Serialize)] +pub struct ArtifactMetadataJson<'a> { + pub path: &'a ForwardRelativePath, + #[serde(flatten)] + pub info: ArtifactInfo<'a>, +} + +#[cfg(test)] +mod tests { + use buck2_common::cas_digest::CasDigestConfig; + + use super::*; + + #[test] + fn test_dir_json() { + let path = ForwardRelativePath::unchecked_new("test"); + let metadata = ArtifactMetadataJson { + path, + info: ArtifactInfo::Directory(DirectoryInfo {}), + }; + let json = serde_json::to_string(&metadata).expect("failed to serialize"); + assert_eq!(json, r#"{"path":"test","kind":"directory"}"#,); + } + + #[test] + fn test_file_json() { + let path = ForwardRelativePath::unchecked_new("test.txt"); + let digest = CasDigest::parse_digest( + "fb19d5b1546753df5f7741efbabd0d24dcaacd65:20", + CasDigestConfig::testing_default(), + ) + .expect("failed to create digest") + .0; + let metadata = ArtifactMetadataJson { + path, + info: ArtifactInfo::File(FileInfo { + digest: &digest, + digest_kind: DigestAlgorithmFamily::Sha1, + is_exec: false, + }), + }; + let json = serde_json::to_string(&metadata).expect("failed to serialize"); + assert_eq!( + json, + r#"{"path":"test.txt","kind":"file","digest":"fb19d5b1546753df5f7741efbabd0d24dcaacd65:20","digest_kind":"SHA1","is_exec":false}"#, + ); + } + + #[test] + fn test_symlink_json() { + let path = ForwardRelativePath::unchecked_new("test.txt"); + let symlink_rel_path = RelativePath::new("../test.txt"); + let metadata = ArtifactMetadataJson { + path, + info: ArtifactInfo::Symlink(SymlinkInfo { symlink_rel_path }), + }; + let json = serde_json::to_string(&metadata).expect("failed to serialize"); + assert_eq!( + json, + r#"{"path":"test.txt","kind":"symlink","symlink_rel_path":"../test.txt"}"#, + ); + } + + #[test] + fn test_external_symlink_json() { + let path = ForwardRelativePath::unchecked_new("test.txt"); + let target = Path::new("/mnt/gvfs"); + let remaining = + ForwardRelativePath::new("test.txt").expect("failed to make remaining path"); + let metadata = ArtifactMetadataJson { + path, + info: ArtifactInfo::ExternalSymlink(ExternalSymlinkInfo { + target, + remaining_path: Some(remaining), + }), + }; + let json = serde_json::to_string(&metadata).expect("failed to serialize"); + assert_eq!( + json, + r#"{"path":"test.txt","kind":"external_symlink","target":"/mnt/gvfs","remaining_path":"test.txt"}"#, + ); + } +} diff --git a/app/buck2_artifact/src/artifact/artifact_type.rs b/app/buck2_artifact/src/artifact/artifact_type.rs index b50e9951df52e..20c64775c7646 100644 --- a/app/buck2_artifact/src/artifact/artifact_type.rs +++ b/app/buck2_artifact/src/artifact/artifact_type.rs @@ -9,6 +9,8 @@ use std::cell::Ref; use std::cell::RefCell; +use std::fmt; +use std::fmt::Formatter; use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; @@ -20,11 +22,11 @@ use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_execute::execute::request::OutputType; use buck2_execute::path::artifact_path::ArtifactPath; +use buck2_util::arc_str::ThinArcS; use derivative::Derivative; use derive_more::Display; use derive_more::From; @@ -32,10 +34,10 @@ use dupe::Dupe; use either::Either; use gazebo::cell::ARef; use starlark_map::Hashed; +use static_assertions::assert_eq_size; use crate::actions::key::ActionKey; use crate::artifact::build_artifact::BuildArtifact; -use crate::artifact::projected_artifact::ProjectedArtifact; use crate::artifact::source_artifact::SourceArtifact; /// An 'Artifact' that can be materialized at its path. The underlying data is not very large here, @@ -45,29 +47,28 @@ use crate::artifact::source_artifact::SourceArtifact; )] pub struct Artifact(Arc); -#[derive(Clone, Debug, Display, Dupe, Allocative, Derivative)] -#[derivative(Hash, Eq, PartialEq)] -#[display(fmt = "{}", data)] +#[derive(Clone, Debug, Display, Dupe, Allocative, Hash, Eq, PartialEq)] +#[display("{}", data)] struct ArtifactData { data: Hashed, /// The number of components at the prefix of that path that are internal details to the rule, - /// not returned by `.short_path`. Omitted from Eq and Hash comparisons. - #[derivative(Hash = "ignore", PartialEq = "ignore")] + /// not returned by `.short_path`. hidden_components_count: usize, } +assert_eq_size!(ArtifactData, [usize; 9]); + impl Artifact { pub fn new( artifact: impl Into, - projected_path: Option>, + projected_path: ThinArcS, hidden_components_count: usize, ) -> Self { - let artifact = match projected_path { - Some(path) => ArtifactKind::Projected(ProjectedArtifact::new(artifact.into(), path)), - None => ArtifactKind::Base(artifact.into()), + let artifact = ArtifactKind { + base: artifact.into(), + path: projected_path, }; - Self(Arc::new(ArtifactData { data: Hashed::new(artifact), hidden_components_count, @@ -75,16 +76,13 @@ impl Artifact { } pub fn as_output_artifact(&self) -> Option { - let (kind, projected_path) = match self.0.data.key() { - ArtifactKind::Base(a) => (a, None), - ArtifactKind::Projected(a) => (a.base(), Some(a.path_shared())), - }; - match kind { + let key = self.0.data.key(); + match &key.base { BaseArtifactKind::Source(_) => None, BaseArtifactKind::Build(artifact) => { let bound = BoundBuildArtifact { artifact: artifact.dupe(), - projected_path: projected_path.cloned(), + projected_path: key.path.dupe(), hidden_components_count: self.0.hidden_components_count, }; Some(bound.into_declared_artifact().into()) @@ -126,11 +124,9 @@ impl Artifact { } } - pub fn as_parts(&self) -> (&BaseArtifactKind, Option<&ForwardRelativePath>) { - match self.0.data.key() { - ArtifactKind::Base(a) => (a, None), - ArtifactKind::Projected(a) => (a.base(), Some(a.path())), - } + pub fn as_parts(&self) -> (&BaseArtifactKind, &ForwardRelativePath) { + let key = self.0.data.key(); + (&key.base, &key.path) } pub fn get_path(&self) -> ArtifactPath<'_> { @@ -147,6 +143,29 @@ impl Artifact { hidden_components_count: self.0.hidden_components_count, } } + + pub fn project(&self, path: &ForwardRelativePath, hide_prefix: bool) -> Artifact { + if path.is_empty() { + return self.dupe(); + } + + let hidden_components_count = self.0.hidden_components_count + + if hide_prefix { + self.get_path().with_short_path(|p| p.iter().count()) + } else { + 0 + }; + + let (base, already_projected) = self.as_parts(); + + let projected = already_projected.join(path); + + Self::new( + base.dupe(), + ThinArcS::from(projected.as_ref()), + hidden_components_count, + ) + } } impl ArtifactDyn for Artifact { @@ -154,8 +173,16 @@ impl ArtifactDyn for Artifact { self.get_path().resolve(fs) } - fn is_source(&self) -> bool { - self.is_source() + fn requires_materialization(&self, fs: &ArtifactFs) -> bool { + let Some(source_artifact) = self.get_source() else { + return true; + }; + let path = source_artifact.get_path(); + fs.cell_resolver() + .get(path.package().cell_name()) + .unwrap() + .external() + .is_some() } } @@ -165,32 +192,45 @@ pub enum BaseArtifactKind { Build(BuildArtifact), } -#[derive(Clone, Debug, Display, Dupe, PartialEq, Eq, Hash, Allocative)] -pub enum ArtifactKind { - Base(BaseArtifactKind), - Projected(ProjectedArtifact), +assert_eq_size!(BaseArtifactKind, [usize; 6]); + +#[derive(Clone, Debug, Dupe, PartialEq, Eq, Hash, Allocative)] +pub struct ArtifactKind { + pub base: BaseArtifactKind, + /// When non-empty, the artifact is considered "projected". + pub path: ThinArcS, +} + +impl Display for ArtifactKind { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + if self.path.is_empty() { + write!(f, "{}", self.base) + } else { + write!(f, "{}/{}", self.base, self.path) + } + } } +assert_eq_size!(ArtifactKind, [usize; 7]); + impl From for Artifact { fn from(a: SourceArtifact) -> Self { - Self::new(a, None, 0) + Self::new(a, ThinArcS::from(ForwardRelativePath::empty()), 0) } } impl From for Artifact { fn from(a: BuildArtifact) -> Self { - Self::new(a, None, 0) + Self::new(a, ThinArcS::from(ForwardRelativePath::empty()), 0) } } /// An intermediate struct to respond to calls to `ensure_bound`. -#[derive(Clone, Dupe, Debug, Display, Allocative, Derivative)] -#[derivative(Hash, Eq, PartialEq)] -#[display(fmt = "{}", "self.get_path()")] +#[derive(Clone, Dupe, Debug, Display, Allocative, Hash, Eq, PartialEq)] +#[display("{}", self.get_path())] pub struct BoundBuildArtifact { artifact: BuildArtifact, - projected_path: Option>, - #[derivative(Hash = "ignore", PartialEq = "ignore")] + projected_path: ThinArcS, hidden_components_count: usize, } @@ -222,10 +262,7 @@ impl BoundBuildArtifact { pub fn get_path(&self) -> ArtifactPath<'_> { ArtifactPath { base_path: Either::Left(ARef::new_ptr(self.artifact.get_path())), - projected_path: self - .projected_path - .as_ref() - .map(|p| AsRef::::as_ref(&**p)), + projected_path: &self.projected_path, hidden_components_count: self.hidden_components_count, } } @@ -242,10 +279,11 @@ impl BoundBuildArtifact { /// /// All 'DeclaredArtifact's are forced to be bound at the end of the analysis phase. #[derive(Clone, Debug, Dupe, Display, Allocative)] -#[display(fmt = "{}", "self.get_path()")] +#[display("{}", self.get_path())] pub struct DeclaredArtifact { + /// `Rc` here is not optimization: `DeclaredArtifactKind` is a shared mutable state. artifact: Rc>, - projected_path: Option>, + projected_path: ThinArcS, hidden_components_count: usize, } @@ -259,7 +297,7 @@ impl DeclaredArtifact { artifact: Rc::new(RefCell::new(DeclaredArtifactKind::Unbound( UnboundArtifact(path, output_type), ))), - projected_path: None, + projected_path: ThinArcS::from(ForwardRelativePath::empty()), hidden_components_count, } } @@ -278,10 +316,7 @@ impl DeclaredArtifact { Self { artifact: self.artifact.dupe(), - projected_path: Some(Arc::new(match self.projected_path.as_ref() { - Some(existing_path) => existing_path.join(path), - None => path.to_owned(), - })), + projected_path: ThinArcS::from(self.projected_path.join(path).as_ref()), hidden_components_count, } } @@ -293,10 +328,7 @@ impl DeclaredArtifact { pub fn get_path(&self) -> ArtifactPath<'_> { let borrow = self.artifact.borrow(); - let projected_path = self - .projected_path - .as_ref() - .map(|p| AsRef::::as_ref(&**p)); + let projected_path = &self.projected_path; let base_path = Ref::map(borrow, |a| match &a { DeclaredArtifactKind::Bound(a) => a.get_path(), @@ -312,7 +344,7 @@ impl DeclaredArtifact { pub fn output_type(&self) -> OutputType { match &*self.artifact.borrow() { - DeclaredArtifactKind::Bound(x) => x.output_type, + DeclaredArtifactKind::Bound(x) => x.output_type(), DeclaredArtifactKind::Unbound(x) => x.1, } } @@ -369,7 +401,7 @@ impl PartialEq for DeclaredArtifact { impl Eq for DeclaredArtifact {} /// A 'DeclaredArtifact' can be either "bound" to an 'Action', or "unbound" -#[derive(Clone, Dupe, Debug, Display, Allocative)] +#[derive(Debug, Display, Allocative)] enum DeclaredArtifactKind { Bound(BuildArtifact), Unbound(UnboundArtifact), @@ -384,12 +416,15 @@ impl DeclaredArtifactKind { } } -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] pub enum ArtifactErrors { - #[error("artifact `{0}` was already bound, but attempted to bind to action id `{1}`")] + #[error( + "Attempted to bind an artifact which was already bound\n Artifact: {0}\n Attempted to bind to an action: {1}" + )] DuplicateBind(BuildArtifact, ActionKey), #[error( - "artifact `{0}` must be bound by now. If you are intending to use this artifact as the output of `run`, are you missing an `.as_output()` call?" + "Artifact must be bound by now. If you are intending to use this artifact as the output of `run`, are you missing an `.as_output()` call?\n Artifact: {0}" )] UnboundArtifact(UnboundArtifact), } @@ -421,7 +456,7 @@ impl OutputArtifact { } a => take_mut::take(a, |artifact| match artifact { DeclaredArtifactKind::Unbound(unbound) => { - DeclaredArtifactKind::Bound(unbound.bind(key)) + DeclaredArtifactKind::Bound(unbound.bind(key).unwrap()) } DeclaredArtifactKind::Bound(_) => { unreachable!("should already be verified to be unbound") @@ -455,11 +490,11 @@ impl Deref for OutputArtifact { } #[derive(Clone, Dupe, Debug, Display, Allocative)] -#[display(fmt = "{}", "self.0")] +#[display("{}", self.0)] pub struct UnboundArtifact(BuckOutPath, OutputType); impl UnboundArtifact { - fn bind(self, key: ActionKey) -> BuildArtifact { + fn bind(self, key: ActionKey) -> anyhow::Result { BuildArtifact::new(self.0, key, self.1) } } @@ -467,17 +502,17 @@ impl UnboundArtifact { pub mod testing { use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::fs::buck_out_path::BuckOutPath; - use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; + use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_execute::execute::request::OutputType; use dupe::Dupe; + use crate::actions::key::ActionIndex; use crate::actions::key::ActionKey; use crate::artifact::artifact_type::DeclaredArtifact; use crate::artifact::artifact_type::DeclaredArtifactKind; use crate::artifact::build_artifact::BuildArtifact; - use crate::deferred::id::DeferredId; - use crate::deferred::key::DeferredKey; + use crate::deferred::key::DeferredHolderKey; pub trait ArtifactTestingExt { fn testing_is_bound(&self) -> bool; @@ -495,7 +530,7 @@ pub mod testing { fn testing_action_key(&self) -> Option { match &*self.artifact.borrow() { - DeclaredArtifactKind::Bound(built) => Some(built.key.dupe()), + DeclaredArtifactKind::Bound(built) => Some(built.key().dupe()), DeclaredArtifactKind::Unbound(_) => None, } } @@ -507,45 +542,41 @@ pub mod testing { } fn testing_action_key(&self) -> Option { - Some(self.key.dupe()) + Some(self.key().dupe()) } } pub trait BuildArtifactTestingExt { - fn testing_new( - target: ConfiguredTargetLabel, - path: ForwardRelativePathBuf, - id: DeferredId, - ) -> BuildArtifact; + fn testing_new(target: ConfiguredTargetLabel, path: &str, id: ActionIndex) + -> BuildArtifact; } impl BuildArtifactTestingExt for BuildArtifact { fn testing_new( target: ConfiguredTargetLabel, - path: ForwardRelativePathBuf, - id: DeferredId, + path: &str, + id: ActionIndex, ) -> BuildArtifact { BuildArtifact::new( - BuckOutPath::new(BaseDeferredKey::TargetLabel(target.dupe()), path), - ActionKey::unchecked_new(DeferredKey::Base( - BaseDeferredKey::TargetLabel(target), + BuckOutPath::new( + BaseDeferredKey::TargetLabel(target.dupe()), + ForwardRelativePath::new(path).unwrap().to_buf(), + ), + ActionKey::unchecked_new( + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target)), id, - )), + ), OutputType::File, ) + .unwrap() } } } #[cfg(test)] mod tests { - use std::collections::hash_map::DefaultHasher; - use std::hash::Hash; - use std::hash::Hasher; - use assert_matches::assert_matches; use buck2_core::base_deferred_key::BaseDeferredKey; - use buck2_core::buck_path::path::BuckPath; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; @@ -555,17 +586,19 @@ mod tests { use buck2_core::fs::buck_out_path::BuckOutPathResolver; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; + use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project::ProjectRootTemp; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; - use buck2_core::package::package_relative_path::PackageRelativePathBuf; - use buck2_core::package::PackageLabel; + use buck2_core::package::source_path::SourcePath; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_execute::execute::request::OutputType; + use buck2_util::arc_str::ThinArcS; use dupe::Dupe; + use crate::actions::key::ActionIndex; use crate::actions::key::ActionKey; use crate::artifact::artifact_type::testing::BuildArtifactTestingExt; use crate::artifact::artifact_type::Artifact; @@ -573,8 +606,7 @@ mod tests { use crate::artifact::artifact_type::DeclaredArtifactKind; use crate::artifact::build_artifact::BuildArtifact; use crate::artifact::source_artifact::SourceArtifact; - use crate::deferred::id::DeferredId; - use crate::deferred::key::DeferredKey; + use crate::deferred::key::DeferredHolderKey; #[test] fn artifact_binding() -> anyhow::Result<()> { @@ -588,10 +620,10 @@ mod tests { OutputType::File, 0, ); - let key = ActionKey::unchecked_new(DeferredKey::Base( - BaseDeferredKey::TargetLabel(target.dupe()), - DeferredId::testing_new(0), - )); + let key = ActionKey::unchecked_new( + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target.dupe())), + ActionIndex::new(0), + ); let out = declared.as_output(); let bound = out.bind(key.dupe())?; @@ -610,10 +642,10 @@ mod tests { out.bind(key)?; // Binding again to a different key should fail - let other_key = ActionKey::unchecked_new(DeferredKey::Base( - BaseDeferredKey::TargetLabel(target), - DeferredId::testing_new(1), - )); + let other_key = ActionKey::unchecked_new( + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target)), + ActionIndex::new(1), + ); assert_matches!(out.bind(other_key), Err(..)); @@ -622,10 +654,7 @@ mod tests { #[test] fn resolve_artifact() -> anyhow::Result<()> { - let source = SourceArtifact::new(BuckPath::testing_new( - PackageLabel::testing_parse("cell//pkg"), - PackageRelativePathBuf::unchecked_new("src.cpp".into()), - )); + let source = SourceArtifact::new(SourcePath::testing_new("cell//pkg", "src.cpp")); let project_fs = ProjectRoot::new(AbsNormPathBuf::try_from(std::env::current_dir().unwrap()).unwrap()) @@ -655,21 +684,11 @@ mod tests { let target = ConfiguredTargetLabel::testing_parse("cell//pkg:foo", ConfigurationData::testing_new()); - let artifact1 = BuildArtifact::testing_new( - target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar.cpp".to_owned()), - DeferredId::testing_new(0), - ); - let artifact2 = BuildArtifact::testing_new( - target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar.h".to_owned()), - DeferredId::testing_new(0), - ); - let artifact3 = BuildArtifact::testing_new( - target, - ForwardRelativePathBuf::unchecked_new("foo/bar.cpp/invalid_file.txt".to_owned()), - DeferredId::testing_new(1), - ); + let artifact1 = + BuildArtifact::testing_new(target.dupe(), "foo/bar.cpp", ActionIndex::new(0)); + let artifact2 = BuildArtifact::testing_new(target.dupe(), "foo/bar.h", ActionIndex::new(0)); + let artifact3 = + BuildArtifact::testing_new(target, "foo/bar.cpp/invalid_file.txt", ActionIndex::new(1)); let fs = ArtifactFs::new( CellResolver::testing_with_name_and_path( @@ -714,52 +733,20 @@ mod tests { Ok(()) } - #[test] - fn test_eq_hash() -> anyhow::Result<()> { - let target = - ConfiguredTargetLabel::testing_parse("cell//pkg:foo", ConfigurationData::testing_new()); - - let artifact = BuildArtifact::testing_new( - target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar.cpp".to_owned()), - DeferredId::testing_new(0), - ); - - let full = Artifact::new(artifact.clone(), None, 0); - let hidden = Artifact::new(artifact, None, 1); - - assert_eq!(full, hidden); - - let hash_full = { - let mut hasher = DefaultHasher::new(); - full.hash(&mut hasher); - hasher.finish() - }; - - let hash_hidden = { - let mut hasher = DefaultHasher::new(); - hidden.hash(&mut hasher); - hasher.finish() - }; - - assert_eq!(hash_full, hash_hidden); - - Ok(()) - } - #[test] fn test_short_path() -> anyhow::Result<()> { let target = ConfiguredTargetLabel::testing_parse("cell//pkg:foo", ConfigurationData::testing_new()); - let artifact = BuildArtifact::testing_new( - target.dupe(), - ForwardRelativePathBuf::unchecked_new("foo/bar.cpp".to_owned()), - DeferredId::testing_new(0), - ); + let artifact = + BuildArtifact::testing_new(target.dupe(), "foo/bar.cpp", ActionIndex::new(0)); - let full = Artifact::new(artifact.clone(), None, 0); - let hidden = Artifact::new(artifact, None, 1); + let full = Artifact::new( + artifact.clone(), + ThinArcS::from(ForwardRelativePath::empty()), + 0, + ); + let hidden = Artifact::new(artifact, ThinArcS::from(ForwardRelativePath::empty()), 1); full.get_path() .with_full_path(|p| assert_eq!(p, "foo/bar.cpp")); diff --git a/app/buck2_artifact/src/artifact/build_artifact.rs b/app/buck2_artifact/src/artifact/build_artifact.rs index 087a7d1d077b4..efaa4b5509c52 100644 --- a/app/buck2_artifact/src/artifact/build_artifact.rs +++ b/app/buck2_artifact/src/artifact/build_artifact.rs @@ -10,10 +10,12 @@ use allocative::Allocative; use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_data::ToProtoMessage; +use buck2_error::internal_error_anyhow; use buck2_execute::execute::request::OutputType; use derivative::Derivative; use derive_more::Display; use dupe::Dupe; +use static_assertions::assert_eq_size; use crate::actions::key::ActionKey; @@ -21,25 +23,25 @@ use crate::actions::key::ActionKey; #[allow(clippy::derived_hash_with_manual_eq)] // The Eq is equivalent to what would have been generated #[derive(Clone, Debug, Dupe, Display, Derivative, Allocative)] #[derivative(PartialEq, Eq, Hash)] -#[display(fmt = "`{}`, action: {}", path, key)] +#[display("`{}`, action: {}", path, key)] pub struct BuildArtifact { - pub path: BuckOutPath, - // If two BuildArtifact's have the same path then they are basically the same, - // even if the ActionKey differs due to things like `dynamic_output`. - // TODO(ndmitchell): Clean this up by making it more explicit in ActionKey. - #[derivative(PartialEq = "ignore", Hash = "ignore")] - pub key: ActionKey, - #[derivative(PartialEq = "ignore", Hash = "ignore")] - pub output_type: OutputType, + path: BuckOutPath, + key: ActionKey, + output_type: OutputType, } +assert_eq_size!(BuildArtifact, [usize; 6]); + impl BuildArtifact { - pub fn new(path: BuckOutPath, key: ActionKey, output_type: OutputType) -> Self { - BuildArtifact { + pub fn new(path: BuckOutPath, key: ActionKey, output_type: OutputType) -> anyhow::Result { + if key.holder_key().owner() != path.owner() { + return Err(internal_error_anyhow!("BaseDeferredKey mismatch")); + } + Ok(BuildArtifact { path, key, output_type, - } + }) } pub fn get_path(&self) -> &BuckOutPath { diff --git a/app/buck2_artifact/src/artifact/mod.rs b/app/buck2_artifact/src/artifact/mod.rs deleted file mode 100644 index afcc5e106037e..0000000000000 --- a/app/buck2_artifact/src/artifact/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! An 'Artifact' represents a File to be used as part of the build. It can be either a file in the -//! source tree, or a file to be generated as part of the build. -//! -//! The existence of an Artifact does not mean the actual file exists. Artifacts needs to be -//! 'made available' before its existence on the filesystem is guaranteed. -//! -//! An 'Artifact' is first "declared" by rule implementation as a 'DeclaredArtifact'. The artifact -//! will need to be "bound" to an 'Action' through being used as an 'OutputArtifact'. Once bound, -//! it becomes a 'BuildArtifact' that can be available. - -pub mod artifact_type; -pub mod build_artifact; -pub mod projected_artifact; -pub mod provide_outputs; -pub mod source_artifact; diff --git a/app/buck2_artifact/src/artifact/projected_artifact.rs b/app/buck2_artifact/src/artifact/projected_artifact.rs deleted file mode 100644 index 570d107c5789b..0000000000000 --- a/app/buck2_artifact/src/artifact/projected_artifact.rs +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; - -use allocative::Allocative; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -use derive_more::Display; -use dupe::Dupe; - -use crate::artifact::artifact_type::BaseArtifactKind; - -/// A path within another Artifact. -#[derive(Clone, Debug, Display, Dupe, Hash, PartialEq, Eq, Allocative)] -#[display(fmt = "{}/{}", base, path)] -pub struct ProjectedArtifact { - base: BaseArtifactKind, - path: Arc, -} - -impl ProjectedArtifact { - pub fn new(base: BaseArtifactKind, path: Arc) -> Self { - Self { base, path } - } - - pub fn base(&self) -> &BaseArtifactKind { - &self.base - } - - pub fn path(&self) -> &ForwardRelativePath { - &self.path - } - - pub fn path_shared(&self) -> &Arc { - &self.path - } -} diff --git a/app/buck2_artifact/src/artifact/provide_outputs.rs b/app/buck2_artifact/src/artifact/provide_outputs.rs deleted file mode 100644 index a560c3060ba67..0000000000000 --- a/app/buck2_artifact/src/artifact/provide_outputs.rs +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use crate::actions::key::ActionKey; -use crate::artifact::build_artifact::BuildArtifact; - -/// Dynamic data requested from deferreds. -/// -/// Newtype to grep easier and to provide some type safety. -pub struct ProvideOutputs(pub anyhow::Result>); - -/// Dynamic data requested from deferreds, allows access to the action key for registered actions. -pub struct ProvideActionKey(pub ActionKey); diff --git a/app/buck2_artifact/src/artifact/source_artifact.rs b/app/buck2_artifact/src/artifact/source_artifact.rs index d6e66427e2643..34b1a86c55a8e 100644 --- a/app/buck2_artifact/src/artifact/source_artifact.rs +++ b/app/buck2_artifact/src/artifact/source_artifact.rs @@ -11,8 +11,8 @@ use std::hash::Hash; use std::sync::Arc; use allocative::Allocative; -use buck2_core::buck_path::path::BuckPath; -use buck2_core::buck_path::path::BuckPathRef; +use buck2_core::package::source_path::SourcePath; +use buck2_core::package::source_path::SourcePathRef; use derive_more::Display; use dupe::Dupe; @@ -23,14 +23,14 @@ use dupe::Dupe; pub struct SourceArtifact(Arc); #[derive(Debug, Display, Hash, PartialEq, Eq, PartialOrd, Ord, Allocative)] -struct SourceArtifactData(BuckPath); +struct SourceArtifactData(SourcePath); impl SourceArtifact { - pub fn new(path: BuckPath) -> Self { + pub fn new(path: SourcePath) -> Self { Self(Arc::new(SourceArtifactData(path))) } - pub fn get_path(&self) -> BuckPathRef { + pub fn get_path(&self) -> SourcePathRef { self.0.0.as_ref() } } diff --git a/app/buck2_artifact/src/deferred.rs b/app/buck2_artifact/src/deferred.rs new file mode 100644 index 0000000000000..6338744c4e7e2 --- /dev/null +++ b/app/buck2_artifact/src/deferred.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod key; diff --git a/app/buck2_artifact/src/deferred/data.rs b/app/buck2_artifact/src/deferred/data.rs index 5e1ea90432dbd..ca89abe88c26f 100644 --- a/app/buck2_artifact/src/deferred/data.rs +++ b/app/buck2_artifact/src/deferred/data.rs @@ -12,14 +12,14 @@ use std::marker::PhantomData; use allocative::Allocative; use derivative::Derivative; use dupe::Clone_; -use dupe::Dupe; +use dupe::Dupe_; use crate::deferred::key::DeferredKey; /// A value to be stored in 'Provider' fields representing an asynchronously computed value -#[derive(Clone_, Dupe, derive_more::Display, Derivative, Allocative)] +#[derive(Clone_, Dupe_, derive_more::Display, Derivative, Allocative)] #[derivative(Debug, Eq, Hash, PartialEq)] -#[display(fmt = "{}", key)] +#[display("{}", key)] #[allocative(bound = "")] #[repr(C)] pub struct DeferredData { diff --git a/app/buck2_artifact/src/deferred/id.rs b/app/buck2_artifact/src/deferred/id.rs index 9e74efbf20b64..76ae189ae8e41 100644 --- a/app/buck2_artifact/src/deferred/id.rs +++ b/app/buck2_artifact/src/deferred/id.rs @@ -14,7 +14,7 @@ use dupe::Dupe; #[derive(Clone, Copy, Debug, Dupe, derive_more::Display, Allocative)] // comment because linters and fmt don't agree #[derive(Eq, Hash, Ord, PartialEq, PartialOrd)] -#[display(fmt = "{}", id)] +#[display("{}", id)] pub struct DeferredId { pub id: u32, pub trivial: bool, diff --git a/app/buck2_artifact/src/deferred/key.rs b/app/buck2_artifact/src/deferred/key.rs index 54a2120c1c93c..8fdbf586ddf70 100644 --- a/app/buck2_artifact/src/deferred/key.rs +++ b/app/buck2_artifact/src/deferred/key.rs @@ -11,71 +11,55 @@ use std::sync::Arc; use allocative::Allocative; use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::configuration::data::ConfigurationData; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use dupe::Dupe; -use itertools::Itertools; -use crate::deferred::id::DeferredId; +use crate::dynamic::DynamicLambdaResultsKey; -/// A key to lookup a 'Deferred' of any result type +/// The base key. We can actually get rid of this and just use 'DeferredKey' if rule analysis is an +/// 'Deferred' itself. This is used to construct the composed 'DeferredKey::Deferred' or +/// 'DeferredKey::Base' type. #[derive( + Hash, + Eq, + PartialEq, Clone, Dupe, derive_more::Display, Debug, - Eq, - Hash, - PartialEq, Allocative )] -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_arc_on_dupe))] // Recursive type -pub enum DeferredKey { - /// Base means it's the first deferred registered that can be looked up via the ID based on - /// analysis of the 'ConfiguredTargetLabel'. - #[display(fmt = "(target: `{}`, id: `{}`)", _0, _1)] - Base(BaseDeferredKey, DeferredId), - /// Points to a 'Deferred' that is generated from another 'Deferred'. The 'DeferredID' can only - /// be looked up based on the results of executing the deferred at 'DeferredKey' - #[display(fmt = "(target: `{}`, id: `{}`)", _0, _1)] - Deferred(Arc, DeferredId), + +pub enum DeferredHolderKey { + Base(BaseDeferredKey), + DynamicLambda(Arc), } -impl DeferredKey { - pub fn id(&self) -> DeferredId { - *match self { - DeferredKey::Base(_, id) | DeferredKey::Deferred(_, id) => id, +impl DeferredHolderKey { + pub fn testing_new(target_label: &str) -> DeferredHolderKey { + let target = + ConfiguredTargetLabel::testing_parse(target_label, ConfigurationData::testing_new()); + let deferred_key = BaseDeferredKey::TargetLabel(target); + DeferredHolderKey::Base(deferred_key) + } + + pub fn owner(&self) -> &BaseDeferredKey { + match self { + DeferredHolderKey::Base(base) => base, + DeferredHolderKey::DynamicLambda(lambda) => lambda.owner(), } } /// Create action_key information from the ids, uniquely /// identifying this action within this target. pub fn action_key(&self) -> String { - let mut ids = Vec::new(); - let mut x = self; - loop { - match x { - DeferredKey::Base(_, id) => { - ids.push(id); - break; - } - DeferredKey::Deferred(base, id) => { - ids.push(id); - x = base - } - } - } // FIXME(ndmitchell): We'd like to have some kind of user supplied name/category here, // rather than using the usize ids, so things are a bit more stable and as these strings // are likely to come up in error messages users might see (e.g. with paths). - ids.iter().rev().map(|x| x.as_usize().to_string()).join("_") - } - - pub fn owner(&self) -> &BaseDeferredKey { - let mut x = self; - loop { - match x { - DeferredKey::Base(base, _) => return base, - DeferredKey::Deferred(base, _) => x = base, - } + match self { + DeferredHolderKey::Base(_) => String::new(), + DeferredHolderKey::DynamicLambda(lambda) => lambda.action_key(), } } } diff --git a/app/buck2_artifact/src/deferred/mod.rs b/app/buck2_artifact/src/deferred/mod.rs deleted file mode 100644 index d9a2f30665f33..0000000000000 --- a/app/buck2_artifact/src/deferred/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod data; -pub mod id; -pub mod key; diff --git a/app/buck2_artifact/src/dynamic.rs b/app/buck2_artifact/src/dynamic.rs new file mode 100644 index 0000000000000..8dead4e6fff85 --- /dev/null +++ b/app/buck2_artifact/src/dynamic.rs @@ -0,0 +1,71 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Write; + +use allocative::Allocative; +use buck2_core::base_deferred_key::BaseDeferredKey; +use dupe::Dupe; + +use crate::deferred::key::DeferredHolderKey; + +/// The base key. We can actually get rid of this and just use 'DeferredKey' if rule analysis is an +/// 'Deferred' itself. This is used to construct the composed 'DeferredKey::Deferred' or +/// 'DeferredKey::Base' type. +#[derive( + Hash, + Eq, + PartialEq, + Clone, + Dupe, + derive_more::Display, + Debug, + Allocative +)] +#[display("{_0}_{_1}")] +pub struct DynamicLambdaResultsKey(DeferredHolderKey, DynamicLambdaIndex); + +impl DynamicLambdaResultsKey { + pub fn new(key: DeferredHolderKey, idx: DynamicLambdaIndex) -> Self { + Self(key, idx) + } + + pub fn owner(&self) -> &BaseDeferredKey { + self.0.owner() + } + + pub fn holder_key(&self) -> &DeferredHolderKey { + &self.0 + } + + pub fn action_key(&self) -> String { + let mut v = self.0.action_key(); + write!(&mut v, "_{}", self.1).unwrap(); + v + } +} + +#[derive( + Debug, + Eq, + PartialEq, + Hash, + Clone, + Dupe, + Copy, + derive_more::Display, + Allocative +)] +pub struct DynamicLambdaIndex(u32); + +impl DynamicLambdaIndex { + pub fn new(v: u32) -> Self { + Self(v) + } +} diff --git a/app/buck2_artifact/src/lib.rs b/app/buck2_artifact/src/lib.rs index 0dce129ac25aa..4941e4bf262e3 100644 --- a/app/buck2_artifact/src/lib.rs +++ b/app/buck2_artifact/src/lib.rs @@ -7,6 +7,9 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + pub mod actions; pub mod artifact; pub mod deferred; +pub mod dynamic; diff --git a/app/buck2_audit/BUCK b/app/buck2_audit/BUCK index 173abbee499e7..be826a2e8f257 100644 --- a/app/buck2_audit/BUCK +++ b/app/buck2_audit/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -8,7 +7,7 @@ rust_library( srcs = glob(["src/**/*.rs"]), deps = [ "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:clap-3", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "//buck2/app/buck2_cli_proto:buck2_cli_proto", diff --git a/app/buck2_audit/Cargo.toml b/app/buck2_audit/Cargo.toml index 63755afc502ed..acca3d7d46f18 100644 --- a/app/buck2_audit/Cargo.toml +++ b/app/buck2_audit/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Client side of `buck2 audit`" +edition = "2021" +license = { workspace = true } name = "buck2_audit" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Client side of `buck2 audit`" [dependencies] async-trait = { workspace = true } @@ -12,5 +14,5 @@ serde_json = { workspace = true } dupe = { workspace = true } -buck2_client_ctx = { workspace = true } buck2_cli_proto = { workspace = true } +buck2_client_ctx = { workspace = true } diff --git a/app/buck2_audit/src/analysis_queries.rs b/app/buck2_audit/src/analysis_queries.rs index b44f89e03e662..2caa269b6836f 100644 --- a/app/buck2_audit/src/analysis_queries.rs +++ b/app/buck2_audit/src/analysis_queries.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,8 +19,11 @@ use crate::AuditSubcommand; about = "buck audit analysis resolving query attrs" )] pub struct AuditAnalysisQueriesCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, + #[clap( + long, + help = "Enable to print the outputs for the targets in the resolved queries" + )] + pub include_outputs: bool, #[clap( name = "TARGET_PATTERNS", @@ -27,11 +31,11 @@ pub struct AuditAnalysisQueriesCommand { )] pub patterns: Vec, - #[clap( - long, - help = "Enable to print the outputs for the targets in the resolved queries" - )] - pub include_outputs: bool, + #[clap(flatten)] + pub target_cfg: TargetCfgWithUniverseOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/cell.rs b/app/buck2_audit/src/cell.rs index 9bc2b4e2be55a..dc26329e45725 100644 --- a/app/buck2_audit/src/cell.rs +++ b/app/buck2_audit/src/cell.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -15,12 +16,9 @@ use crate::AuditSubcommand; #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] #[clap( name = "audit-cell", - about = "Query information about the [repositories] list in .buckconfig." + about = "Query information about the [cells] list in .buckconfig." )] pub struct AuditCellCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap(long = "json", help = "Output in JSON format")] pub json: bool, @@ -41,6 +39,13 @@ pub struct AuditCellCommand { help = "Cell aliases to query. These aliases will be resolved in the working directory cell." )] pub aliases_to_resolve: Vec, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/classpath.rs b/app/buck2_audit/src/classpath.rs index 2b0490babe7c6..af2d32cc7975e 100644 --- a/app/buck2_audit/src/classpath.rs +++ b/app/buck2_audit/src/classpath.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -20,16 +21,18 @@ use crate::AuditSubcommand; We will replace this command with something that can audit the entire `TemplatePlaceholderInfo` in the future." )] pub struct AuditClasspathCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - - #[clap(name = "TARGET_PATTERNS", help = "Target patterns to audit")] - pub patterns: Vec, - /// Output in JSON format #[clap(long)] pub json: bool, // TODO(scottcao): Add --show-targets, --dot, and other relevant flags + #[clap(name = "TARGET_PATTERNS", help = "Target patterns to audit")] + pub patterns: Vec, + + #[clap(flatten)] + pub target_cfg: TargetCfgOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/config.rs b/app/buck2_audit/src/config.rs index 316ea1f92e759..105ade5eb3175 100644 --- a/app/buck2_audit/src/config.rs +++ b/app/buck2_audit/src/config.rs @@ -10,6 +10,7 @@ use std::str::FromStr; use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use dupe::Dupe; @@ -20,9 +21,11 @@ use crate::AuditSubcommand; Dupe, Clone, Copy, + PartialEq, + Eq, serde::Serialize, serde::Deserialize, - clap::ArgEnum + clap::ValueEnum )] #[clap(rename_all = "snake_case")] pub enum OutputFormat { @@ -30,7 +33,15 @@ pub enum OutputFormat { Json, } -#[derive(Debug, Clone, Copy, Dupe, serde::Serialize, serde::Deserialize)] +#[derive( + Debug, + Clone, + Copy, + Dupe, + serde::Serialize, + serde::Deserialize, + clap::ValueEnum +)] pub enum LocationStyle { None, Direct, @@ -49,7 +60,15 @@ impl FromStr for LocationStyle { } } -#[derive(Debug, Clone, Copy, Dupe, serde::Serialize, serde::Deserialize)] +#[derive( + Debug, + Clone, + Copy, + Dupe, + serde::Serialize, + serde::Deserialize, + clap::ValueEnum +)] pub enum ValueStyle { Resolved, Raw, @@ -71,22 +90,33 @@ impl FromStr for ValueStyle { #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] #[clap(name = "audit-config", about = "buck audit config")] pub struct AuditConfigCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap(long = "cell")] pub cell: Option, - #[clap(long, alias = "style", ignore_case = true, arg_enum)] + /// Produce information for all cells that Buck2 knows about. + #[clap(long, conflicts_with = "cell")] + pub all_cells: bool, + + #[clap(long, alias = "style", ignore_case = true, value_enum)] pub output_format: Option, #[clap(long)] pub json: bool, - #[clap(long = "location", default_value = "none", possible_values=&["none", "direct", "extended"])] + #[clap( + long = "location", + default_value = "none", + ignore_case = true, + value_enum + )] pub location_style: LocationStyle, - #[clap(long = "value", default_value = "resolved", possible_values=&["resolved", "raw", "both"])] + #[clap( + long = "value", + default_value = "resolved", + ignore_case = true, + value_enum + )] pub value_style: ValueStyle, /// config section/key specs of the form `section` or `section.key`. @@ -94,6 +124,13 @@ pub struct AuditConfigCommand { /// (section headers will be printed only for sections with a key matching the spec). #[clap(name = "SPECS")] pub specs: Vec, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } impl AuditConfigCommand { diff --git a/app/buck2_audit/src/configurations.rs b/app/buck2_audit/src/configurations.rs index 5c8819314e165..dbbf252f9d112 100644 --- a/app/buck2_audit/src/configurations.rs +++ b/app/buck2_audit/src/configurations.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,15 +19,19 @@ use crate::AuditSubcommand; about = "prints the constraints for configuration IDs" )] pub struct AuditConfigurationsCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap( name = "configurations", - multiple_values = true, + num_args = 1.., help = "configurations to audit (example: `cell//package:target-105fe3389fc7e436`). If none provided, will print information about all known configurations." )] pub configs: Vec, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/deferred_materializer.rs b/app/buck2_audit/src/deferred_materializer.rs index 9c8e4633d74d6..e0cf5ee3eda86 100644 --- a/app/buck2_audit/src/deferred_materializer.rs +++ b/app/buck2_audit/src/deferred_materializer.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,16 +19,21 @@ use crate::AuditSubcommand; about = "Access and interact with the deferred materializer" )] pub struct DeferredMaterializerCommand { - #[clap(flatten)] - pub common_opts: CommonCommandOptions, - #[clap(subcommand)] pub subcommand: DeferredMaterializerSubcommand, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + pub _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[derive(Debug, clap::Subcommand, serde::Serialize, serde::Deserialize)] pub enum DeferredMaterializerSubcommand { List, + ListSubscriptions, Fsck, Refresh { /// Minimum TTL to require for actions. diff --git a/app/buck2_audit/src/dep_files.rs b/app/buck2_audit/src/dep_files.rs index 50101b5493259..a96aaf25e2fc2 100644 --- a/app/buck2_audit/src/dep_files.rs +++ b/app/buck2_audit/src/dep_files.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,9 +19,6 @@ use crate::AuditSubcommand; about = "prints out the select files for a command" )] pub struct AuditDepFilesCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap(help = "Target to query dep files for")] pub pattern: String, @@ -29,6 +27,12 @@ pub struct AuditDepFilesCommand { #[clap(help = "Action identifier")] pub identifier: Option, + + #[clap(flatten)] + pub target_cfg: TargetCfgOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/execution_platform_resolution.rs b/app/buck2_audit/src/execution_platform_resolution.rs index 3f21a229fbbbd..addb95490a3cd 100644 --- a/app/buck2_audit/src/execution_platform_resolution.rs +++ b/app/buck2_audit/src/execution_platform_resolution.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,11 +19,14 @@ use crate::AuditSubcommand; about = "prints out information about execution platform resolution" )] pub struct AuditExecutionPlatformResolutionCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap(name = "TARGET_PATTERNS", help = "Patterns to analyze")] pub patterns: Vec, + + #[clap(flatten)] + pub target_cfg: TargetCfgWithUniverseOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/includes.rs b/app/buck2_audit/src/includes.rs index 82948b1abda71..d2eb2688dcd2c 100644 --- a/app/buck2_audit/src/includes.rs +++ b/app/buck2_audit/src/includes.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,9 +19,6 @@ use crate::AuditSubcommand; about = "list build file extensions imported at parse time." )] pub struct AuditIncludesCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - /// Print json representation of outputs #[clap(long)] pub json: bool, @@ -30,6 +28,13 @@ pub struct AuditIncludesCommand { help = "Build files to audit. These are expected to be relative paths from the working dir cell." )] pub patterns: Vec, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/lib.rs b/app/buck2_audit/src/lib.rs index c3d17a12be3ea..7fd67677c6088 100644 --- a/app/buck2_audit/src/lib.rs +++ b/app/buck2_audit/src/lib.rs @@ -7,17 +7,17 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(try_blocks)] -#![feature(provide_any)] use async_trait::async_trait; use buck2_cli_proto::GenericRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; @@ -156,11 +156,15 @@ impl StreamingCommand for AuditCommand { &self.as_subcommand().common_opts().console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.as_subcommand().common_opts().event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.as_subcommand().common_opts().config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.as_subcommand().common_opts().starlark_opts + } } diff --git a/app/buck2_audit/src/output/mod.rs b/app/buck2_audit/src/output.rs similarity index 100% rename from app/buck2_audit/src/output/mod.rs rename to app/buck2_audit/src/output.rs diff --git a/app/buck2_audit/src/output/command.rs b/app/buck2_audit/src/output/command.rs index 72c85b0b62902..4391ed89403fd 100644 --- a/app/buck2_audit/src/output/command.rs +++ b/app/buck2_audit/src/output/command.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; use buck2_client_ctx::common::CommonCommandOptions; use buck2_client_ctx::query_args::CommonAttributeArgs; @@ -19,9 +20,6 @@ use crate::AuditSubcommand; about = "Query the action that produced the output artifact. Does not support BXL, test, scratch, or anon artifacts. If the configuration hash of the output path does not match the current platform configuration, the unconfigured target label will be returned." )] pub struct AuditOutputCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap( name = "OUTPUT_PATH", help = "The buck-out path to the build artifact, starting with `buck-out` and including the configuration platform." @@ -33,6 +31,12 @@ pub struct AuditOutputCommand { #[clap(flatten)] pub query_attributes: CommonAttributeArgs, + + #[clap(flatten)] + pub target_cfg: TargetCfgOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/output/parse.rs b/app/buck2_audit/src/output/parse.rs index 7cea56091960b..5fe8a7a70b4a4 100644 --- a/app/buck2_audit/src/output/parse.rs +++ b/app/buck2_audit/src/output/parse.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -21,6 +22,10 @@ pub struct AuditParseCommand { #[clap(flatten)] common_opts: CommonCommandOptions, + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + #[clap( name = "OUTPUT_PATH", help = "The buck-out path to the build artifact, starting with `buck-out` and including the configuration platform." diff --git a/app/buck2_audit/src/package_values.rs b/app/buck2_audit/src/package_values.rs index 5235e59624428..185fa3d755508 100644 --- a/app/buck2_audit/src/package_values.rs +++ b/app/buck2_audit/src/package_values.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,11 +19,15 @@ use crate::AuditSubcommand; #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] #[clap(name = "package-values")] pub struct PackageValuesCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - /// Package names to inspect (like `//foo/bar`, no trailing colon). pub packages: Vec, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } impl AuditSubcommand for PackageValuesCommand { diff --git a/app/buck2_audit/src/prelude.rs b/app/buck2_audit/src/prelude.rs index 1a8396a05dbc8..7517fab76d653 100644 --- a/app/buck2_audit/src/prelude.rs +++ b/app/buck2_audit/src/prelude.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,6 +19,10 @@ use crate::AuditSubcommand; about = "print the interpreter prelude to stdout" )] pub struct AuditPreludeCommand { + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + #[clap(flatten)] common_opts: CommonCommandOptions, } diff --git a/app/buck2_audit/src/providers.rs b/app/buck2_audit/src/providers.rs index d16114cbb7250..add6451e7628a 100644 --- a/app/buck2_audit/src/providers.rs +++ b/app/buck2_audit/src/providers.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,19 +19,13 @@ use crate::AuditSubcommand; about = "prints out the providers for a target pattern" )] pub struct AuditProvidersCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - - #[clap(name = "TARGET_PATTERNS", help = "Patterns to analyze")] - pub patterns: Vec, - - #[clap(long, conflicts_with_all=&["list", "print-debug"])] + #[clap(long, conflicts_with_all=&["list", "print_debug"])] pub quiet: bool, #[clap( long, short = 'l', - help = "List the available providers", conflicts_with_all=&["print-debug", "quiet"] + help = "List the available providers", conflicts_with_all=&["print_debug", "quiet"] )] pub list: bool, @@ -40,6 +35,19 @@ pub struct AuditProvidersCommand { conflicts_with_all=&["list", "quiet"] )] pub print_debug: bool, + + #[clap( + name = "TARGET_PATTERNS", + help = "Patterns to analyze", + required = true + )] + pub patterns: Vec, + + #[clap(flatten)] + pub target_cfg: TargetCfgWithUniverseOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/starlark/mod.rs b/app/buck2_audit/src/starlark.rs similarity index 100% rename from app/buck2_audit/src/starlark/mod.rs rename to app/buck2_audit/src/starlark.rs diff --git a/app/buck2_audit/src/starlark/module.rs b/app/buck2_audit/src/starlark/module.rs index e58ea8f3e4db5..8325b20a500d9 100644 --- a/app/buck2_audit/src/starlark/module.rs +++ b/app/buck2_audit/src/starlark/module.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] @@ -18,6 +19,10 @@ pub struct StarlarkModuleCommand { #[clap(name = "IMPORT_PATH", help = "Module import path")] pub import_path: String, + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + #[clap(flatten)] pub(crate) common_opts: CommonCommandOptions, } diff --git a/app/buck2_audit/src/starlark/package_deps.rs b/app/buck2_audit/src/starlark/package_deps.rs index 55caf084e21e2..381407101ec67 100644 --- a/app/buck2_audit/src/starlark/package_deps.rs +++ b/app/buck2_audit/src/starlark/package_deps.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] @@ -18,6 +19,10 @@ pub struct StarlarkPackageDepsCommand { #[clap(name = "PACKAGE", help = "Package")] pub package: String, + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + #[clap(flatten)] pub(crate) common_opts: CommonCommandOptions, } diff --git a/app/buck2_audit/src/subtargets.rs b/app/buck2_audit/src/subtargets.rs index 860181389a71b..226cbb9f63ccf 100644 --- a/app/buck2_audit/src/subtargets.rs +++ b/app/buck2_audit/src/subtargets.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -16,13 +17,6 @@ use crate::AuditSubcommand; #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] #[clap(name = "audit-subtargets")] pub struct AuditSubtargetsCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - - /// Patterns to analyze. - #[clap(name = "TARGET_PATTERNS")] - pub patterns: Vec, - /// Do not recursively print all nested subtargets; print only /// the first level. This is set to false by default. #[clap(long)] @@ -31,6 +25,16 @@ pub struct AuditSubtargetsCommand { /// Print subtargets as JSON. #[clap(long)] pub json: bool, + + /// Patterns to analyze. + #[clap(name = "TARGET_PATTERNS", required = true)] + pub patterns: Vec, + + #[clap(flatten)] + pub target_cfg: TargetCfgWithUniverseOptions, + + #[clap(flatten)] + pub common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit/src/visibility.rs b/app/buck2_audit/src/visibility.rs index 87f9a762145b6..0458ded48f48d 100644 --- a/app/buck2_audit/src/visibility.rs +++ b/app/buck2_audit/src/visibility.rs @@ -8,6 +8,7 @@ */ use async_trait::async_trait; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; use crate::AuditSubcommand; @@ -18,11 +19,15 @@ use crate::AuditSubcommand; about = "Verify the visibility for transitive deps of the specified target(s) on the unconfigured target graph" )] pub struct AuditVisibilityCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap(name = "TARGET_PATTERNS", help = "Target pattern(s) to analyze.")] pub patterns: Vec, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] diff --git a/app/buck2_audit_server/BUCK b/app/buck2_audit_server/BUCK index cbbcb15fd5c32..34b63f87552d9 100644 --- a/app/buck2_audit_server/BUCK +++ b/app/buck2_audit_server/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -18,7 +17,6 @@ rust_library( "fbsource//third-party/rust:regex", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", - "fbsource//third-party/rust:thiserror", "//buck2/app/buck2_analysis:buck2_analysis", "//buck2/app/buck2_audit:buck2_audit", "//buck2/app/buck2_build_api:buck2_build_api", @@ -27,7 +25,9 @@ rust_library( "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_query:buck2_query", diff --git a/app/buck2_audit_server/Cargo.toml b/app/buck2_audit_server/Cargo.toml index f9702dbccdea5..da974a7e6497e 100644 --- a/app/buck2_audit_server/Cargo.toml +++ b/app/buck2_audit_server/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Server side of `buck2 audit`" +edition = "2021" +license = { workspace = true } name = "buck2_audit_server" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Server side of `buck2 audit`" [dependencies] anyhow = { workspace = true } @@ -13,25 +15,26 @@ indent_write = { workspace = true } indexmap = { workspace = true } itertools = { workspace = true } ref-cast = { workspace = true } -thiserror = { workspace = true } regex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } starlark_map = { workspace = true } dice = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } +gazebo = { workspace = true } buck2_analysis = { workspace = true } buck2_audit = { workspace = true } buck2_build_api = { workspace = true } -buck2_client_ctx = { workspace = true } buck2_cli_proto = { workspace = true } +buck2_client_ctx = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } +buck2_execute = { workspace = true } buck2_interpreter = { workspace = true } buck2_node = { workspace = true } buck2_query = { workspace = true } diff --git a/app/buck2_audit_server/src/analysis_queries.rs b/app/buck2_audit_server/src/analysis_queries.rs index 7db2fb6bf6261..0b03eefbefa30 100644 --- a/app/buck2_audit_server/src/analysis_queries.rs +++ b/app/buck2_audit_server/src/analysis_queries.rs @@ -13,80 +13,74 @@ use async_trait::async_trait; use buck2_analysis::analysis::calculation::resolve_queries; use buck2_audit::analysis_queries::AuditAnalysisQueriesCommand; use buck2_cli_proto::ClientContext; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::pattern::resolve::resolve_target_patterns; +use buck2_common::pattern::parse_from_cli::parse_and_resolve_patterns_from_cli_args; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; -use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; use dupe::Dupe; -use gazebo::prelude::*; -use crate::AuditSubcommand; +use crate::common::target_resolution_config::audit_command_target_resolution_config; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditAnalysisQueriesCommand { +impl ServerAuditSubcommand for AuditAnalysisQueriesCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, - client_ctx: ClientContext, + _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, mut ctx| { - let cells = ctx.get_cell_resolver().await?; + .with_dice_ctx(|server_ctx, mut ctx| async move { + let target_resolution_config = + audit_command_target_resolution_config(&mut ctx, &self.target_cfg, server_ctx) + .await?; - let global_target_platform = - target_platform_from_client_context(&client_ctx, server_ctx, &mut ctx).await?; - - let parsed_patterns = parse_patterns_from_cli_args::( - &mut ctx, - &self - .patterns - .map(|pat| buck2_data::TargetPattern { value: pat.clone() }), - server_ctx.working_dir(), - ) - .await?; let resolved_pattern = - resolve_target_patterns(&cells, &parsed_patterns, &ctx.file_ops()).await?; + parse_and_resolve_patterns_from_cli_args::( + &mut ctx, + &self.patterns, + server_ctx.working_dir(), + ) + .await?; let mut stdout = stdout.as_writer(); for (package, spec) in resolved_pattern.specs { match spec { - buck2_core::pattern::PackageSpec::Targets(targets) => { + buck2_core::pattern::pattern::PackageSpec::Targets(targets) => { for (target, TargetPatternExtra) in targets { let label = TargetLabel::new(package.dupe(), target.as_ref()); - let configured_target = ctx - .get_configured_target(&label, global_target_platform.as_ref()) - .await?; - let node = - ctx.get_configured_target_node(&configured_target).await?; - let node = node.require_compatible()?; - let query_results = resolve_queries(&ctx, &node).await?; - writeln!(stdout, "{}:", label)?; - for (query, result) in &query_results { - writeln!(stdout, " {}", query)?; - for (target, providers) in &**result { - writeln!(stdout, " {}", target.unconfigured())?; - if self.include_outputs { - let outputs = providers - .provider_collection() - .default_info() - .default_outputs_raw(); - writeln!(stdout, " {}", outputs)?; + for configured_target in target_resolution_config + .get_configured_target(&mut ctx, &label) + .await? + { + let node = + ctx.get_configured_target_node(&configured_target).await?; + let node = node.require_compatible()?; + let query_results = + resolve_queries(&mut ctx, node.as_ref()).await?; + writeln!(stdout, "{}:", label)?; + for (query, result) in &query_results { + writeln!(stdout, " {}", query)?; + for (target, providers) in &result.result { + writeln!(stdout, " {}", target.unconfigured())?; + if self.include_outputs { + let outputs = providers + .provider_collection() + .default_info()? + .default_outputs_raw(); + writeln!(stdout, " {}", outputs)?; + } } } } } } - buck2_core::pattern::PackageSpec::All => { + buck2_core::pattern::pattern::PackageSpec::All => { unimplemented!() } } diff --git a/app/buck2_audit_server/src/cell.rs b/app/buck2_audit_server/src/cell.rs index 06138eca7dc23..c3b4c449af755 100644 --- a/app/buck2_audit_server/src/cell.rs +++ b/app/buck2_audit_server/src/cell.rs @@ -14,19 +14,20 @@ use buck2_audit::cell::AuditCellCommand; use buck2_build_api::audit_cell::AUDIT_CELL; use buck2_cli_proto::ClientContext; use buck2_common::dice::cells::HasCellResolver; -use buck2_core::cells::CellResolver; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use dice::DiceComputations; +use futures::FutureExt; use indexmap::IndexMap; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditCellCommand { +impl ServerAuditSubcommand for AuditCellCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -34,12 +35,12 @@ impl AuditSubcommand for AuditCellCommand { _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, ctx| { - let cells = ctx.get_cell_resolver().await?; + .with_dice_ctx(|server_ctx, mut ctx| async move { let fs = server_ctx.project_root(); let cwd = server_ctx.working_dir(); - let mappings = audit_cell(&self.aliases_to_resolve, self.aliases, &cells, cwd, fs)?; + let mappings = + audit_cell(&mut ctx, &self.aliases_to_resolve, self.aliases, cwd, fs).await?; let mut stdout = stdout.as_writer(); if self.paths_only { @@ -65,19 +66,19 @@ impl AuditSubcommand for AuditCellCommand { } } -pub(crate) fn audit_cell( - aliases_to_resolve: &Vec, +pub(crate) async fn audit_cell( + ctx: &mut DiceComputations<'_>, + aliases_to_resolve: &[String], aliases: bool, - cells: &CellResolver, cwd: &ProjectRelativePath, fs: &ProjectRoot, ) -> anyhow::Result> { - let this_cell = cells.get(cells.find(cwd)?).unwrap(); + let cells = ctx.get_cell_resolver().await?; + let this_resolver = ctx.get_cell_alias_resolver_for_dir(cwd).await?; let mappings: IndexMap<_, _> = { if aliases_to_resolve.is_empty() { if aliases { - this_cell - .cell_alias_resolver() + this_resolver .mappings() .map(|(alias, cell_name)| { ( @@ -104,7 +105,6 @@ pub(crate) fn audit_cell( .collect() } } else { - let cell_alias_resolver = this_cell.cell_alias_resolver(); aliases_to_resolve .iter() .map(|alias| { @@ -112,7 +112,7 @@ pub(crate) fn audit_cell( alias.to_owned(), fs.resolve( cells - .get(cell_alias_resolver.resolve(alias)?) + .get(this_resolver.resolve(alias)?) .unwrap() .path() .as_project_relative_path(), @@ -126,7 +126,7 @@ pub(crate) fn audit_cell( } pub(crate) fn init_audit_cell() { - AUDIT_CELL.init(|aliases_to_resolve, aliases, cells, cwd, fs| { - audit_cell(aliases_to_resolve, aliases, cells, cwd, fs) + AUDIT_CELL.init(|ctx, aliases_to_resolve, aliases, cwd, fs| { + audit_cell(ctx, aliases_to_resolve, aliases, cwd, fs).boxed() }); } diff --git a/app/buck2_audit_server/src/classpath.rs b/app/buck2_audit_server/src/classpath.rs index 2d5139c2048cb..6b4f11437377b 100644 --- a/app/buck2_audit_server/src/classpath.rs +++ b/app/buck2_audit_server/src/classpath.rs @@ -15,45 +15,47 @@ use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::configure_targets::load_compatible_patterns; use buck2_build_api::query::analysis::CLASSPATH_FOR_TARGETS; use buck2_cli_proto::ClientContext; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; use buck2_core::pattern::pattern_type::TargetPatternExtra; use buck2_node::load_patterns::MissingTargetBehavior; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; use dupe::Dupe; -use gazebo::prelude::SliceExt; +use futures::FutureExt; use indexmap::IndexMap; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditClasspathCommand { +impl ServerAuditSubcommand for AuditClasspathCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, - client_ctx: ClientContext, + _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, mut ctx| { + .with_dice_ctx(|server_ctx, mut ctx| async move { let cwd = server_ctx.working_dir(); let parsed_patterns = parse_patterns_from_cli_args::( &mut ctx, - &self - .patterns - .map(|pat| buck2_data::TargetPattern { value: pat.clone() }), + &self.patterns, cwd, ) .await?; - let target_platform = - target_platform_from_client_context(&client_ctx, server_ctx, &mut ctx).await?; + let global_cfg_options = global_cfg_options_from_client_context( + &self.target_cfg.target_cfg(), + server_ctx, + &mut ctx, + ) + .await?; // Incompatible targets are skipped because this is an audit command let targets = load_compatible_patterns( - &ctx, + &mut ctx, parsed_patterns, - target_platform, + &global_cfg_options, MissingTargetBehavior::Fail, ) .await?; @@ -64,16 +66,16 @@ impl AuditSubcommand for AuditClasspathCommand { // Json prints a map of targets to list of classpaths while default prints // classpaths for all targets. if self.json { - let target_to_artifacts = - futures::future::try_join_all(targets.into_iter().map(|target| { - let ctx = &ctx; + let target_to_artifacts = ctx + .try_compute_join(targets.into_iter(), |ctx, target| { async move { let label = target.label().dupe(); let label_to_artifact = (CLASSPATH_FOR_TARGETS.get()?)(ctx, vec![label.dupe()]).await?; anyhow::Ok((label, label_to_artifact)) } - })) + .boxed() + }) .await?; let target_to_classpaths: anyhow::Result> = target_to_artifacts .into_iter() @@ -99,7 +101,7 @@ impl AuditSubcommand for AuditClasspathCommand { )?; } else { let label_to_artifact = (CLASSPATH_FOR_TARGETS.get()?)( - &ctx, + &mut ctx, targets.into_iter().map(|t| t.label().dupe()).collect(), ) .await?; diff --git a/app/buck2_audit_server/src/common.rs b/app/buck2_audit_server/src/common.rs new file mode 100644 index 0000000000000..26a2f4cf4f55c --- /dev/null +++ b/app/buck2_audit_server/src/common.rs @@ -0,0 +1,11 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod configured_target_labels; +pub mod target_resolution_config; diff --git a/app/buck2_audit_server/src/common/configured_target_labels.rs b/app/buck2_audit_server/src/common/configured_target_labels.rs new file mode 100644 index 0000000000000..8e0a81ef9cf96 --- /dev/null +++ b/app/buck2_audit_server/src/common/configured_target_labels.rs @@ -0,0 +1,46 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; +use buck2_core::pattern::pattern_type::ConfiguredTargetPatternExtra; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::pattern_parse_and_resolve::parse_and_resolve_patterns_to_targets_from_cli_args; +use dice::DiceComputations; + +use crate::common::target_resolution_config::audit_command_target_resolution_config; + +pub(crate) async fn audit_command_configured_target_labels( + ctx: &mut DiceComputations<'_>, + patterns: &[String], + target_cfg: &TargetCfgWithUniverseOptions, + server_ctx: &dyn ServerCommandContextTrait, +) -> anyhow::Result> { + let targets = + parse_and_resolve_patterns_to_targets_from_cli_args::( + ctx, + &patterns, + server_ctx.working_dir(), + ) + .await?; + + let target_resolution_config = + audit_command_target_resolution_config(ctx, target_cfg, server_ctx).await?; + + let mut configured_target_labels: Vec = Vec::new(); + for target in targets { + configured_target_labels.extend( + target_resolution_config + .get_configured_targets_for_configured_target_literals(ctx, &target) + .await?, + ); + } + + Ok(configured_target_labels) +} diff --git a/app/buck2_audit_server/src/common/target_resolution_config.rs b/app/buck2_audit_server/src/common/target_resolution_config.rs new file mode 100644 index 0000000000000..888aa80498578 --- /dev/null +++ b/app/buck2_audit_server/src/common/target_resolution_config.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::target_resolution_config::TargetResolutionConfig; +use dice::DiceComputations; + +pub(crate) async fn audit_command_target_resolution_config( + ctx: &mut DiceComputations<'_>, + target_cfg: &TargetCfgWithUniverseOptions, + server_ctx: &dyn ServerCommandContextTrait, +) -> anyhow::Result { + TargetResolutionConfig::from_args( + ctx, + &target_cfg.target_cfg.target_cfg(), + server_ctx, + &target_cfg.target_universe, + ) + .await +} diff --git a/app/buck2_audit_server/src/config.rs b/app/buck2_audit_server/src/config.rs index f3a1304e83cb3..b9e7a94bcc51f 100644 --- a/app/buck2_audit_server/src/config.rs +++ b/app/buck2_audit_server/src/config.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use std::collections::BTreeSet; use std::collections::HashMap; use std::io::Write; @@ -17,17 +18,20 @@ use buck2_audit::config::OutputFormat; use buck2_audit::config::ValueStyle; use buck2_cli_proto::ClientContext; use buck2_common::dice::cells::HasCellResolver; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfigLocation; +use buck2_common::legacy_configs::configs::LegacyBuckConfigValue; use buck2_common::legacy_configs::dice::HasLegacyConfigs; -use buck2_common::legacy_configs::LegacyBuckConfigLocation; -use buck2_common::legacy_configs::LegacyBuckConfigValue; use buck2_core::cells::name::CellName; +use buck2_core::cells::CellAliasResolver; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::stdout_partial_output::StdoutPartialOutput; use gazebo::prelude::*; use serde_json::json; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; fn print_location_string( writer: &mut impl Write, @@ -92,8 +96,218 @@ fn print_value( Ok(()) } +struct Match<'a> { + /// The original specification - important if we want to produce JSON keys. + /// Not present if it wasn't a key match, as then we can't express it. + spec: Option<&'a str>, + /// The cell, or otherwise require the cell passed in by the filterer. + cell: Option, + /// The section. + section: &'a str, + /// The key. Might be optional if the user did `foo` as their match. + key: Option<&'a str>, +} + +impl<'a> Match<'a> { + fn parse(resolver: &CellAliasResolver, spec: &'a str) -> anyhow::Result { + let (cell, config) = match spec.split_once("//") { + Some((cell, config)) => (Some(resolver.resolve(cell)?), config), + None => (None, spec), + }; + let (section, key) = config.split1("."); + Ok(Self { + spec: if key == "" { None } else { Some(spec) }, + cell, + section, + key: if key == "" { None } else { Some(key) }, + }) + } + + fn filter( + &self, + default_cell: CellName, + cell: CellName, + section: &str, + key: &str, + ) -> Option { + if cell == self.cell.unwrap_or(default_cell) + && section == self.section + && self.key.map_or(true, |k| k == key) + { + Some( + self.spec + .map_or_else(|| format!("{section}.{key}"), str::to_owned), + ) + } else { + None + } + } +} + +struct Matches<'a> { + matches: Vec>, +} + +impl<'a> Matches<'a> { + fn parse(resolver: &CellAliasResolver, specs: &'a [String]) -> anyhow::Result { + Ok(Self { + matches: specs.try_map(|x| Match::parse(resolver, x))?, + }) + } + + fn filter( + &self, + default_cell: CellName, + cell: CellName, + section: &str, + key: &str, + ) -> Option { + if self.matches.is_empty() { + if cell == default_cell { + Some(format!("{section}.{key}")) + } else { + None + } + } else { + self.matches + .iter() + .find_map(|x| x.filter(default_cell, cell, section, key)) + } + } + + fn cells(&self) -> BTreeSet { + self.matches.iter().filter_map(|x| x.cell).collect() + } +} + +trait CellConfigRenderer { + fn render_cell_header(&mut self, cell: CellName) -> anyhow::Result<()>; + fn render_section_header(&mut self, section: &str) -> anyhow::Result<()>; + fn render_config_key( + &mut self, + spec: &str, + cell: CellName, + section: &str, + key: &str, + value: LegacyBuckConfigValue<'_>, + ) -> anyhow::Result<()>; + fn flush(&mut self) -> anyhow::Result<()>; +} + +struct SimpleCellConfigRenderer<'a> { + stdout: StdoutPartialOutput<'a>, + render_cell_headers: bool, + value_style: ValueStyle, + location_style: LocationStyle, +} + +impl<'a> CellConfigRenderer for SimpleCellConfigRenderer<'a> { + fn render_cell_header(&mut self, cell: CellName) -> anyhow::Result<()> { + if self.render_cell_headers { + writeln!(&mut self.stdout, "# Cell: {cell}")?; + } + + Ok(()) + } + + fn render_section_header(&mut self, section: &str) -> anyhow::Result<()> { + writeln!(&mut self.stdout, "[{section}]")?; + + Ok(()) + } + + fn render_config_key( + &mut self, + _spec: &str, + _cell: CellName, + _section: &str, + key: &str, + value: LegacyBuckConfigValue<'_>, + ) -> anyhow::Result<()> { + print_value(&mut self.stdout, key, &value, self.value_style)?; + print_location(&mut self.stdout, &value, self.location_style)?; + + Ok(()) + } + + fn flush(&mut self) -> anyhow::Result<()> { + Ok(()) + } +} + +struct JsonCellConfigRenderer<'a> { + stdout: StdoutPartialOutput<'a>, + scope_keys_to_cell: bool, + json_output: HashMap, +} + +impl<'a> CellConfigRenderer for JsonCellConfigRenderer<'a> { + fn render_cell_header(&mut self, _cell: CellName) -> anyhow::Result<()> { + Ok(()) + } + + fn render_section_header(&mut self, _section: &str) -> anyhow::Result<()> { + Ok(()) + } + + fn render_config_key( + &mut self, + spec: &str, + cell: CellName, + _section: &str, + _key: &str, + value: LegacyBuckConfigValue<'_>, + ) -> anyhow::Result<()> { + let key = if self.scope_keys_to_cell && !spec.contains("//") { + format!("{cell}//{spec}") + } else { + spec.to_owned() + }; + + self.json_output.insert(key, value.as_str().to_owned()); + + Ok(()) + } + + fn flush(&mut self) -> anyhow::Result<()> { + writeln!(&mut self.stdout, "{}", json!(self.json_output))?; + + Ok(()) + } +} + +fn render_cell_config( + renderer: &mut dyn CellConfigRenderer, + relevant_cell: Option, + cell: CellName, + cell_config: LegacyBuckConfig, + specs: &Matches<'_>, +) -> anyhow::Result<()> { + let mut rendered_cell_header = false; + for (section, values) in cell_config.all_sections() { + let mut rendered_section_header = false; + for (key, value) in values.iter() { + if let Some(spec) = specs.filter(relevant_cell.unwrap_or(cell), cell, section, key) { + if !rendered_cell_header { + renderer.render_cell_header(cell)?; + rendered_cell_header = true; + } + + if !rendered_section_header { + renderer.render_section_header(section.as_str())?; + rendered_section_header = true; + } + + renderer.render_config_key(&spec, cell, section, key, value)?; + } + } + } + + Ok(()) +} + #[async_trait] -impl AuditSubcommand for AuditConfigCommand { +impl ServerAuditSubcommand for AuditConfigCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -101,102 +315,67 @@ impl AuditSubcommand for AuditConfigCommand { _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, ctx| { + .with_dice_ctx(|server_ctx, mut ctx| async move { let cwd = server_ctx.working_dir(); let cell_resolver = ctx.get_cell_resolver().await?; + let cell_alias_resolver = ctx.get_cell_alias_resolver_for_dir(cwd).await?; - let working_dir_cell = cell_resolver.find(cwd)?; - let cell_alias_resolver = cell_resolver - .get(working_dir_cell) - .unwrap() - .cell_alias_resolver(); - - let relevant_cell = match &self.cell { - Some(v) => v, - None => "", + let stdout = stdout.as_writer(); + let mut renderer: Box = match self.output_format() { + OutputFormat::Simple => Box::new(SimpleCellConfigRenderer { + stdout, + render_cell_headers: self.all_cells, + value_style: self.value_style, + location_style: self.location_style, + }), + OutputFormat::Json => Box::new(JsonCellConfigRenderer { + stdout, + scope_keys_to_cell: self.all_cells, + json_output: HashMap::new(), + }), }; - let resolved_relevant_cell = cell_alias_resolver.resolve(relevant_cell)?; - - let config = ctx.get_legacy_configs().await?; - - let specs = self.specs.try_map(|v| { - let (cell, config) = match v.split_once("//") { - Some((cell, config)) => (cell_alias_resolver.resolve(cell)?, config), - None => (resolved_relevant_cell, v.as_str()), - }; - let (section, key) = config.split1("."); - anyhow::Ok((cell, section, key, v)) - })?; - - let filter = move |cell: CellName, section: &str, key: &str| { - if specs.is_empty() { - if cell == resolved_relevant_cell { - Some(format!("{}.{}", section, key)) - } else { - None - } - } else { - for (filter_cell, filter_section, filter_key, spec) in &specs { - if cell == *filter_cell - && §ion == filter_section - && (filter_key == &"" || &key == filter_key) - { - return if filter_key == &"" { - Some(format!("{}.{}", section, key)) - } else { - Some((*spec).to_owned()) - }; - } - } - None + let specs = Matches::parse(&cell_alias_resolver, &self.specs)?; + + if self.all_cells { + for (cell, _) in cell_resolver.cells() { + let cell_config = ctx.get_legacy_config_for_cell(cell).await?; + render_cell_config(renderer.as_mut(), None, cell, cell_config, &specs)?; } - }; + } else { + let cell = + cell_alias_resolver.resolve(self.cell.as_deref().unwrap_or_default())?; + + { + // Render the target cell first + let cell_config = ctx.get_legacy_config_for_cell(cell).await?; + render_cell_config( + renderer.as_mut(), + Some(cell), + cell, + cell_config, + &specs, + )?; + } + + // Allow callers to specify a "cell//" spec without --all-cells + let mut cells_to_render = specs.cells(); + cells_to_render.remove(&cell); + let relevant_cell = Some(cell); - let mut stdout = stdout.as_writer(); - - match self.output_format() { - OutputFormat::Json => writeln!( - &mut stdout, - "{}", - json!( - config - .iter() - .flat_map(|(cell, cell_config)| cell_config - .all_sections() - .map(move |(section, cfg)| (cell, section, cfg))) - .flat_map(|(cell, section, cfg)| { - cfg.iter() - .filter_map(|(key, value)| { - filter(cell, section, key) - .map(|spec| (spec, value.as_str().to_owned())) - }) - .collect::>() - }) - .collect::>() - ) - )?, - OutputFormat::Simple => { - for (cell, cell_config) in config.iter() { - for section in cell_config.sections() { - let mut printed_section = false; - let values = cell_config.get_section(section).unwrap(); - for (key, value) in values.iter() { - if filter(cell, section, key).is_some() { - if !printed_section { - writeln!(&mut stdout, "[{}]", section)?; - printed_section = true; - } - print_value(&mut stdout, key, &value, self.value_style)?; - print_location(&mut stdout, &value, self.location_style)?; - } - } - } - } + for cell in cells_to_render { + let cell_config = ctx.get_legacy_config_for_cell(cell).await?; + render_cell_config( + renderer.as_mut(), + relevant_cell, + cell, + cell_config, + &specs, + )?; } } - Ok(()) + renderer.flush() }) .await } diff --git a/app/buck2_audit_server/src/configurations.rs b/app/buck2_audit_server/src/configurations.rs index a6c92d2cd94d7..2ca5269c45190 100644 --- a/app/buck2_audit_server/src/configurations.rs +++ b/app/buck2_audit_server/src/configurations.rs @@ -18,10 +18,10 @@ use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use itertools::Itertools; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditConfigurationsCommand { +impl ServerAuditSubcommand for AuditConfigurationsCommand { async fn server_execute( &self, _server_ctx: &dyn ServerCommandContextTrait, diff --git a/app/buck2_audit_server/src/deferred_materializer.rs b/app/buck2_audit_server/src/deferred_materializer.rs index e386151679961..aa5b89d4c0dc7 100644 --- a/app/buck2_audit_server/src/deferred_materializer.rs +++ b/app/buck2_audit_server/src/deferred_materializer.rs @@ -14,14 +14,15 @@ use async_trait::async_trait; use buck2_audit::deferred_materializer::DeferredMaterializerCommand; use buck2_audit::deferred_materializer::DeferredMaterializerSubcommand; use buck2_cli_proto::ClientContext; +use buck2_execute::materialize::materializer::DeferredMaterializerIterItem; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use futures::stream::StreamExt; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for DeferredMaterializerCommand { +impl ServerAuditSubcommand for DeferredMaterializerCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -41,8 +42,26 @@ impl AuditSubcommand for DeferredMaterializerCommand { .iterate() .context("Failed to start iterating")?; - while let Some((path, entry)) = stream.next().await { - writeln!(stdout, "{}\t{}", path, entry)?; + while let Some(DeferredMaterializerIterItem { + artifact_path, + artifact_display, + deps, + }) = stream.next().await + { + writeln!(stdout, "{artifact_path}\t{artifact_display}")?; + writeln!(stdout, " deps: {}", deps.len())?; + for (dep_path, dep_kind) in deps { + writeln!(stdout, " {dep_path} {dep_kind}")?; + } + } + } + DeferredMaterializerSubcommand::ListSubscriptions => { + let mut stream = deferred_materializer + .list_subscriptions() + .context("Failed to start listing subscriptions")?; + + while let Some(path) = stream.next().await { + writeln!(stdout, "{}", path)?; } } DeferredMaterializerSubcommand::Fsck => { diff --git a/app/buck2_audit_server/src/dep_files.rs b/app/buck2_audit_server/src/dep_files.rs index 25a440026d320..7aaf73552e1d1 100644 --- a/app/buck2_audit_server/src/dep_files.rs +++ b/app/buck2_audit_server/src/dep_files.rs @@ -12,35 +12,37 @@ use async_trait::async_trait; use buck2_audit::dep_files::AuditDepFilesCommand; use buck2_build_api::audit_dep_files::AUDIT_DEP_FILES; use buck2_cli_proto::ClientContext; -use buck2_core::category::Category; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_core::category::CategoryRef; use buck2_core::pattern::pattern_type::TargetPatternExtra; use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditDepFilesCommand { +impl ServerAuditSubcommand for AuditDepFilesCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, - client_ctx: ClientContext, + _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, mut ctx| { - let target_platform = - target_platform_from_client_context(&client_ctx, server_ctx, &mut ctx).await?; + .with_dice_ctx(|server_ctx, mut ctx| async move { + let global_cfg_options = global_cfg_options_from_client_context( + &self.target_cfg.target_cfg(), + server_ctx, + &mut ctx, + ) + .await?; let label = parse_patterns_from_cli_args::( &mut ctx, - &[buck2_data::TargetPattern { - value: self.pattern.clone(), - }], + &[self.pattern.clone()], server_ctx.working_dir(), ) .await? @@ -50,10 +52,10 @@ impl AuditSubcommand for AuditDepFilesCommand { .as_target_label(&self.pattern)?; let label = ctx - .get_configured_target(&label, target_platform.as_ref()) + .get_configured_target_post_transition(&label, &global_cfg_options) .await?; - let category = Category::try_from(self.category.as_str())?; + let category = CategoryRef::new(self.category.as_str())?.to_owned(); (AUDIT_DEP_FILES.get()?)( &ctx, diff --git a/app/buck2_audit_server/src/execution_platform_resolution.rs b/app/buck2_audit_server/src/execution_platform_resolution.rs index 521d747aef9e9..70d4ab88c321d 100644 --- a/app/buck2_audit_server/src/execution_platform_resolution.rs +++ b/app/buck2_audit_server/src/execution_platform_resolution.rs @@ -12,99 +12,73 @@ use std::io::Write; use async_trait::async_trait; use buck2_audit::execution_platform_resolution::AuditExecutionPlatformResolutionCommand; use buck2_cli_proto::ClientContext; -use buck2_core::configuration::bound_id::BoundConfigurationId; -use buck2_core::configuration::data::ConfigurationData; -use buck2_core::pattern::pattern_type::ConfigurationPredicate; -use buck2_core::pattern::pattern_type::ConfiguredTargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::target::label::TargetLabel; -use buck2_node::load_patterns::load_patterns; -use buck2_node::load_patterns::MissingTargetBehavior; +use buck2_node::execution::GetExecutionPlatforms; +use buck2_node::execution::EXECUTION_PLATFORMS_BUCKCONFIG; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; -use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::target_platform_from_client_context; -use buck2_server_ctx::pattern::PatternParser; use indent_write::io::IndentWriter; -use crate::AuditSubcommand; - -#[derive(Debug, thiserror::Error)] -enum AuditExecutionPlatformResolutionCommandError { - #[error("Builtin configurations are not supported: `{0}`")] - BuiltinConfigurationsNotSupported(String), - #[error( - "Patterns with configuration label without configuration hash are not supported: `{0}`" - )] - ConfigurationLabelWithoutHashNotSupported(String), -} +use crate::common::configured_target_labels::audit_command_configured_target_labels; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditExecutionPlatformResolutionCommand { +impl ServerAuditSubcommand for AuditExecutionPlatformResolutionCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, - client_ctx: ClientContext, + _client_ctx: ClientContext, ) -> anyhow::Result<()> { - server_ctx.with_dice_ctx( - async move |server_ctx, mut ctx| { - let pattern_parser = PatternParser::new( + server_ctx + .with_dice_ctx(|server_ctx, mut ctx| async move { + let configured_patterns = audit_command_configured_target_labels( &mut ctx, - server_ctx.working_dir(), - ).await?; - - let mut configured_patterns = Vec::new(); - let mut target_patterns = Vec::new(); - for pat in self.patterns.iter() { - let pat = pattern_parser.parse_pattern::(pat)?; - match pat.clone() { - ParsedPattern::Package(pkg) => target_patterns.push(ParsedPattern::Package(pkg)), - ParsedPattern::Recursive(path) => target_patterns.push(ParsedPattern::Recursive(path)), - ParsedPattern::Target(pkg, target_name, extra) => { - match extra.cfg { - ConfigurationPredicate::Any => target_patterns.push(ParsedPattern::Target(pkg, target_name, extra)), - ConfigurationPredicate::Builtin(_) => return Err(AuditExecutionPlatformResolutionCommandError::BuiltinConfigurationsNotSupported(pat.to_string()).into()), - ConfigurationPredicate::Bound(_label, None) => return Err(AuditExecutionPlatformResolutionCommandError::ConfigurationLabelWithoutHashNotSupported(pat.to_string()).into()), - ConfigurationPredicate::Bound(label, Some(hash)) => { - let cfg = ConfigurationData::lookup_bound(BoundConfigurationId { label, hash })?; - configured_patterns.push(TargetLabel::new(pkg, target_name.as_ref()).configure(cfg)); - } - } - } - } - } - - let loaded_patterns = load_patterns(&ctx, target_patterns, MissingTargetBehavior::Fail).await?; - let target_platform = target_platform_from_client_context( - &client_ctx, + &self.patterns, + &self.target_cfg, server_ctx, - &mut ctx, ) .await?; - for (_, targets) in loaded_patterns.into_iter() { - for (_, node) in targets? { - configured_patterns.push( - ctx.get_configured_target(node.label(), target_platform.as_ref()) - .await?, - ); + let mut stdout = stdout.as_writer(); + + match ctx.get_execution_platforms().await? { + None => { + writeln!( + stdout, + "Execution platforms are not configured: {} unset", + EXECUTION_PLATFORMS_BUCKCONFIG + )?; + writeln!(stdout, "Using legacy execution platform")?; + } + Some(platforms) => { + writeln!( + stdout, + "Checking each target against execution platforms defined by {}", + platforms.execution_platforms_target() + )?; } } - let mut stdout = stdout.as_writer(); - for configured_target in configured_patterns { - let configured_node = ctx.get_configured_target_node(&configured_target).await?; + // This calls `get_internal_configured_target_node` rather than + // `get_configured_target_node` because exec platform resolution operates + // on `get_internal_configured_target_node`. + let configured_node = ctx + .get_internal_configured_target_node(&configured_target) + .await?; let configured_node = configured_node.require_compatible()?; writeln!(stdout, "{}:", configured_target)?; let resolution = configured_node.execution_platform_resolution(); match resolution.platform() { Ok(platform) => { writeln!(stdout, " Execution platform: {}", platform.id())?; - writeln!(stdout, " Execution platform configuration: {}", platform.cfg())?; + writeln!( + stdout, + " Execution platform configuration: {}", + platform.cfg() + )?; writeln!(stdout, " Execution deps:")?; for execution_dep in configured_node.exec_deps() { writeln!(stdout, " {}", execution_dep.label())?; @@ -113,6 +87,10 @@ impl AuditSubcommand for AuditExecutionPlatformResolutionCommand { for toolchain_dep in configured_node.toolchain_deps() { writeln!(stdout, " {}", toolchain_dep.label())?; } + writeln!(stdout, " Configuration deps:")?; + for config_dep in configured_node.configuration_deps() { + writeln!(stdout, " {}", config_dep.label())?; + } for (label, reason) in resolution.skipped() { writeln!(stdout, " Skipped {}", label)?; writeln!(IndentWriter::new(" ", &mut stdout), "{:#}", reason)?; @@ -124,6 +102,6 @@ impl AuditSubcommand for AuditExecutionPlatformResolutionCommand { Ok(()) }) - .await + .await } } diff --git a/app/buck2_audit_server/src/includes.rs b/app/buck2_audit_server/src/includes.rs index 517619e7f188c..55337c539dfb7 100644 --- a/app/buck2_audit_server/src/includes.rs +++ b/app/buck2_audit_server/src/includes.rs @@ -13,8 +13,6 @@ use async_trait::async_trait; use buck2_audit::includes::AuditIncludesCommand; use buck2_cli_proto::ClientContext; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; use buck2_core::bzl::ImportPath; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::CellResolver; @@ -29,17 +27,18 @@ use buck2_interpreter::load_module::InterpreterCalculation; use buck2_interpreter::paths::module::StarlarkModulePath; use buck2_node::nodes::eval_result::EvaluationResult; use buck2_node::nodes::frontend::TargetGraphCalculation; -use buck2_query::query::environment::LabeledNode; -use buck2_query::query::environment::NodeLabel; +use buck2_query::query::graph::node::LabeledNode; +use buck2_query::query::graph::node::NodeKey; +use buck2_query::query::graph::successors::AsyncChildVisitor; use buck2_query::query::traversal::async_depth_first_postorder_traversal; use buck2_query::query::traversal::AsyncNodeLookup; -use buck2_query::query::traversal::AsyncTraversalDelegate; use buck2_query::query::traversal::ChildVisitor; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use derive_more::Display; use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; use futures::stream::FuturesOrdered; use futures::StreamExt; @@ -50,11 +49,10 @@ use ref_cast::RefCast; use serde::ser::SerializeMap; use serde::Serialize; use serde::Serializer; -use thiserror::Error; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum AuditIncludesError { #[error("Error loading buildfile for `{0}` found a mismatched buildfile name (`{1}`)")] WrongBuildfilePath(CellPath, FileNameBuf), @@ -63,7 +61,7 @@ enum AuditIncludesError { } async fn get_transitive_includes( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, load_result: &EvaluationResult, ) -> anyhow::Result> { // We define a simple graph of LoadedModules to traverse. @@ -83,70 +81,72 @@ async fn get_transitive_includes( #[repr(transparent)] struct NodeRef(ImportPath); - impl NodeLabel for NodeRef {} + impl NodeKey for NodeRef {} impl LabeledNode for Node { - type NodeRef = NodeRef; + type Key = NodeRef; - fn node_ref(&self) -> &NodeRef { + fn node_key(&self) -> &NodeRef { NodeRef::ref_cast(self.import_path()) } } - struct Lookup<'a> { - ctx: &'a DiceComputations, + struct Lookup<'a, 'd> { + ctx: &'a LinearRecomputeDiceComputations<'d>, } #[async_trait] - impl AsyncNodeLookup for Lookup<'_> { + impl AsyncNodeLookup for Lookup<'_, '_> { async fn get(&self, label: &NodeRef) -> anyhow::Result { Ok(Node( self.ctx + .get() .get_loaded_module(StarlarkModulePath::LoadFile(&label.0)) .await?, )) } } - struct Delegate { - imports: Vec, - } + let mut imports: Vec = Vec::new(); + struct Delegate; - #[async_trait] - impl AsyncTraversalDelegate for Delegate { - fn visit(&mut self, target: Node) -> anyhow::Result<()> { - self.imports.push(target.import_path().clone()); - Ok(()) - } + let visit = |target: Node| { + imports.push(target.import_path().clone()); + Ok(()) + }; + impl AsyncChildVisitor for Delegate { async fn for_each_child( - &mut self, + &self, target: &Node, - func: &mut dyn ChildVisitor, + mut func: impl ChildVisitor, ) -> anyhow::Result<()> { for import in target.0.imports() { - func.visit(NodeRef(import.clone()))?; + func.visit(&NodeRef(import.clone()))?; } Ok(()) } } - let mut delegate = Delegate { imports: vec![] }; - let lookup = Lookup { ctx }; + ctx.with_linear_recompute(|ctx| async move { + let lookup = Lookup { ctx: &ctx }; - async_depth_first_postorder_traversal( - &lookup, - load_result.imports().map(NodeRef::ref_cast), - &mut delegate, - ) + async_depth_first_postorder_traversal( + &lookup, + load_result.imports().map(NodeRef::ref_cast), + Delegate, + visit, + ) + .await + }) .await?; - Ok(delegate.imports) + Ok(imports) } async fn load_and_collect_includes( - ctx: &mut DiceComputations, + ctx: &mut DiceComputations<'_>, path: &CellPath, -) -> SharedResult> { +) -> buck2_error::Result> { let parent = path .parent() .ok_or_else(|| anyhow::anyhow!(AuditIncludesError::InvalidPath(path.clone())))?; @@ -163,8 +163,8 @@ async fn load_and_collect_includes( return Err(anyhow::anyhow!(AuditIncludesError::WrongBuildfilePath( path.clone(), buildfile_name.to_owned(), - ))) - .shared_error(); + )) + .into()); } Ok(get_transitive_includes(ctx, &load_result).await?) @@ -191,7 +191,7 @@ fn resolve_path( } #[async_trait] -impl AuditSubcommand for AuditIncludesCommand { +impl ServerAuditSubcommand for AuditIncludesCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -199,7 +199,7 @@ impl AuditSubcommand for AuditIncludesCommand { _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, ctx| { + .with_dice_ctx(|server_ctx, mut ctx| async move { let cells = ctx.get_cell_resolver().await?; let cwd = server_ctx.working_dir(); let current_cell = cells.get(cells.find(cwd)?)?; @@ -225,7 +225,7 @@ impl AuditSubcommand for AuditIncludesCommand { }) .collect(); - let results: Vec<(_, SharedResult>)> = futures.collect().await; + let results: Vec<(_, buck2_error::Result>)> = futures.collect().await; // This is expected to not return any errors, and so we're not careful about not propagating it. let to_absolute_path = move |include: ImportPath| -> anyhow::Result<_> { let include = include.path(); @@ -234,10 +234,10 @@ impl AuditSubcommand for AuditIncludesCommand { Ok(fs.resolve(&path)) }; let absolutize_paths = - |paths: Vec| -> SharedResult> { + |paths: Vec| -> buck2_error::Result> { Ok(paths.into_try_map(&to_absolute_path)?) }; - let results: Vec<(String, SharedResult>)> = results + let results: Vec<(String, buck2_error::Result>)> = results .into_map(|(path, includes)| (path, includes.and_then(absolutize_paths))); let mut stdout = stdout.as_writer(); diff --git a/app/buck2_audit_server/src/lib.rs b/app/buck2_audit_server/src/lib.rs index f9df75d946e46..f21841b17cf23 100644 --- a/app/buck2_audit_server/src/lib.rs +++ b/app/buck2_audit_server/src/lib.rs @@ -7,9 +7,9 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(try_blocks)] -#![feature(provide_any)] +#![allow(clippy::comparison_to_empty)] use std::sync::Once; @@ -22,6 +22,7 @@ use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; mod analysis_queries; mod cell; mod classpath; +mod common; mod config; mod configurations; pub mod deferred_materializer; @@ -32,7 +33,7 @@ pub mod output; mod package_values; mod prelude; mod providers; -pub mod server; +mod server; mod starlark; mod subtargets; mod visibility; @@ -46,7 +47,7 @@ mod visibility; /// Audit subcommands implement this trait so that we can handle the entire client side /// logic here and to support that serialization to the daemon. #[async_trait] -pub trait AuditSubcommand: Send + Sync + 'static { +pub trait ServerAuditSubcommand: Send + Sync + 'static { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -63,7 +64,7 @@ pub trait AuditCommandExt { stdout: PartialResultDispatcher, client_server_ctx: ClientContext, ) -> anyhow::Result<()>; - fn as_subcommand(&self) -> &dyn AuditSubcommand; + fn as_subcommand(&self) -> &dyn ServerAuditSubcommand; } #[async_trait] @@ -78,7 +79,7 @@ impl AuditCommandExt for AuditCommand { .server_execute(server_ctx, stdout, client_server_ctx) .await } - fn as_subcommand(&self) -> &dyn AuditSubcommand { + fn as_subcommand(&self) -> &dyn ServerAuditSubcommand { match self { AuditCommand::Cell(cmd) => cmd, AuditCommand::Classpath(cmd) => cmd, @@ -106,5 +107,6 @@ pub fn init_late_bindings() { ONCE.call_once(|| { output::command::init_audit_output(); cell::init_audit_cell(); + server::init_audit_server_command(); }) } diff --git a/app/buck2_audit_server/src/output/mod.rs b/app/buck2_audit_server/src/output.rs similarity index 100% rename from app/buck2_audit_server/src/output/mod.rs rename to app/buck2_audit_server/src/output.rs diff --git a/app/buck2_audit_server/src/output/buck_out_path_parser.rs b/app/buck2_audit_server/src/output/buck_out_path_parser.rs index 2465ab2b28bb9..31eda772ad593 100644 --- a/app/buck2_audit_server/src/output/buck_out_path_parser.rs +++ b/app/buck2_audit_server/src/output/buck_out_path_parser.rs @@ -19,14 +19,14 @@ use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::package::PackageLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_core::target::name::TargetNameRef; use buck2_core::target::name::EQ_SIGN_SUBST; use buck2_interpreter::paths::bxl::BxlFilePath; use dupe::Dupe; use itertools::Itertools; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum BuckOutPathParserError { #[error( "Malformed buck-out path. Expected format: `buck-out//////____/<__action__id__?>/`. Actual path was: `{0}`" @@ -79,8 +79,8 @@ pub(crate) enum BuckOutPathType { }, } -pub(crate) struct BuckOutPathParser<'v> { - cell_resolver: &'v CellResolver, +pub(crate) struct BuckOutPathParser { + cell_resolver: CellResolver, } fn validate_buck_out_and_isolation_prefix<'v>( @@ -130,91 +130,71 @@ fn get_cell_path<'v>( let is_anon = generated_prefix == "gen-anon"; let is_test = generated_prefix == "test"; // Get cell name and validate it exists - match iter.next() { - Some(cell_name) => { - let cell_name = CellName::unchecked_new(cell_name.as_str())?; - let mut raw_path_to_output = ForwardRelativePath::new(cell_name.as_str())?.to_buf(); - - cell_resolver.get(cell_name)?; - - // Advance iterator to the config hash - let config_hash = match iter.next() { - Some(config_hash) => config_hash, - None => { - return Err(anyhow::anyhow!( - "Path does not have a platform configuration" - )); - } - }; - - iter.clone().for_each(|f| { - raw_path_to_output.push(f); - }); - - // Get cell relative path and construct the cell path - let mut cell_relative_path = CellRelativePath::unchecked_new("").to_owned(); - - while let Some(part) = iter.next() { - cell_relative_path = cell_relative_path.join(part).to_owned(); - - // We make sure not to consume the target name part via the iterator. - match iter.peek() { - Some(maybe_target_name) => { - let maybe_target_name = maybe_target_name.as_str(); - // TODO(@wendyy) We assume that the first string that we find that starts with "__" - // is the target name. There is a small risk of naming collisions (ex: if there's a directory - // name that follows this convention that contains a build file), but I will fix this at a - // later date. - if (*maybe_target_name).starts_with("__") { - // If it's an anonymous target, then the last part before the target name is actually the - // hash, and not part of the cell relative path. - let cell_path = if is_anon { - CellPath::new( - cell_name, - cell_relative_path - .parent() - .with_context(|| "Invalid path for anonymous target")? - .to_buf(), - ) - } else { - CellPath::new(cell_name, cell_relative_path.to_buf()) - }; - - let anon_hash = if is_anon { - // Iterator is pointing to the part right before the target name, aka the attr - // hash for the anonymous target. - Some(part.to_string()) - } else { - None - }; - - let buck_out_path_data = BuckOutPathData { - cell_path, - config_hash: config_hash.to_string(), - anon_hash, - raw_path_to_output: raw_path_to_output.to_buf(), - }; - - return Ok(buck_out_path_data); - } - } - None => (), - } - } + let Some(cell_name) = iter.next() else { + return Err(anyhow::anyhow!("Invalid cell name")); + }; - if is_test { - let buck_out_path_data = BuckOutPathData { - cell_path: CellPath::new(cell_name, cell_relative_path.to_buf()), - config_hash: config_hash.to_string(), - anon_hash: None, - raw_path_to_output: raw_path_to_output.to_buf(), - }; - Ok(buck_out_path_data) - } else { - Err(anyhow::anyhow!("Invalid target name")) - } + let cell_name = CellName::unchecked_new(cell_name.as_str())?; + let mut raw_path_to_output = ForwardRelativePath::new(cell_name.as_str())?.to_buf(); + + cell_resolver.get(cell_name)?; + + // Advance iterator to the config hash + let Some(config_hash) = iter.next() else { + return Err(anyhow::anyhow!( + "Path does not have a platform configuration" + )); + }; + + iter.clone().for_each(|f| { + raw_path_to_output.push(f); + }); + + // Get cell relative path and construct the cell path + let mut cell_relative_path = CellRelativePath::unchecked_new("").to_owned(); + + while let Some(maybe_target_name) = iter.peek() { + if !maybe_target_name.as_str().starts_with("__") { + cell_relative_path.push(maybe_target_name); + iter.next(); + continue; } - None => Err(anyhow::anyhow!("Invalid cell name")), + // Intentionally leave the target label on the iterator + + // If it's an anonymous target, then the last part before the target name is actually the + // hash, and not part of the cell relative path. + let (cell_relative_path, anon_hash) = if is_anon { + let path = cell_relative_path + .parent() + .with_context(|| "Invalid path for anonymous target")? + .to_buf(); + let anon_hash = cell_relative_path.file_name().unwrap().as_str().to_owned(); + (path, Some(anon_hash)) + } else { + (cell_relative_path.to_buf(), None) + }; + let cell_path = CellPath::new(cell_name, cell_relative_path); + + let buck_out_path_data = BuckOutPathData { + cell_path, + config_hash: config_hash.to_string(), + anon_hash, + raw_path_to_output: raw_path_to_output.to_buf(), + }; + + return Ok(buck_out_path_data); + } + + if is_test { + let buck_out_path_data = BuckOutPathData { + cell_path: CellPath::new(cell_name, cell_relative_path.to_buf()), + config_hash: config_hash.to_string(), + anon_hash: None, + raw_path_to_output: raw_path_to_output.to_buf(), + }; + Ok(buck_out_path_data) + } else { + Err(anyhow::anyhow!("Invalid target name")) } } @@ -270,8 +250,8 @@ fn get_bxl_function_label<'v>( Ok(bxl_function_label) } -impl<'v> BuckOutPathParser<'v> { - pub(crate) fn new(cell_resolver: &'v CellResolver) -> BuckOutPathParser { +impl BuckOutPathParser { + pub(crate) fn new(cell_resolver: CellResolver) -> BuckOutPathParser { BuckOutPathParser { cell_resolver } } @@ -306,7 +286,7 @@ impl<'v> BuckOutPathParser<'v> { let result = match part.as_str() { "tmp" => { let buck_out_path_data = - get_cell_path(&mut iter, self.cell_resolver, "tmp")?; + get_cell_path(&mut iter, &self.cell_resolver, "tmp")?; let target_label = get_target_label(&mut iter, buck_out_path_data.cell_path.clone())?; @@ -323,7 +303,7 @@ impl<'v> BuckOutPathParser<'v> { } "test" => { let buck_out_path_data = - get_cell_path(&mut iter, self.cell_resolver, "test")?; + get_cell_path(&mut iter, &self.cell_resolver, "test")?; let common_attrs = BuckOutPathTypeCommon { config_hash: buck_out_path_data.config_hash, @@ -337,7 +317,7 @@ impl<'v> BuckOutPathParser<'v> { } "gen" => { let buck_out_path_data = - get_cell_path(&mut iter, self.cell_resolver, "gen")?; + get_cell_path(&mut iter, &self.cell_resolver, "gen")?; let target_label = get_target_label(&mut iter, buck_out_path_data.cell_path.clone())?; let path_after_target_name = @@ -356,7 +336,7 @@ impl<'v> BuckOutPathParser<'v> { } "gen-anon" => { let buck_out_path_data = - get_cell_path(&mut iter, self.cell_resolver, "gen-anon")?; + get_cell_path(&mut iter, &self.cell_resolver, "gen-anon")?; let target_label = get_target_label(&mut iter, buck_out_path_data.cell_path.clone())?; let common_attrs = BuckOutPathTypeCommon { @@ -375,7 +355,7 @@ impl<'v> BuckOutPathParser<'v> { } "gen-bxl" => { let buck_out_path_data = - get_cell_path(&mut iter, self.cell_resolver, "gen-bxl")?; + get_cell_path(&mut iter, &self.cell_resolver, "gen-bxl")?; let bxl_function_label = get_bxl_function_label(&mut iter, buck_out_path_data.cell_path)?; let common_attrs = BuckOutPathTypeCommon { @@ -420,26 +400,22 @@ mod tests { use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::package::PackageLabel; - use buck2_core::target::label::TargetLabel; + use buck2_core::target::label::label::TargetLabel; use buck2_core::target::name::TargetNameRef; use buck2_interpreter::paths::bxl::BxlFilePath; + use dupe::Dupe; use crate::output::buck_out_path_parser::BuckOutPathParser; use crate::output::buck_out_path_parser::BuckOutPathType; - fn get_parse_test_cell_resolver() -> anyhow::Result { - let cell_path = CellRootPath::new(ProjectRelativePath::new("foo/bar")?); - + fn get_test_data() -> (BuckOutPathParser, String, TargetLabel, CellPath) { + let cell_path = CellRootPath::new(ProjectRelativePath::new("foo/bar").unwrap()); let cell_resolver = CellResolver::testing_with_name_and_path( CellName::testing_new("bar"), cell_path.to_buf(), ); + let parser = BuckOutPathParser::new(cell_resolver); - Ok(cell_resolver) - } - - #[test] - fn test_buck_path_parser_validation() -> anyhow::Result<()> { let configuration = ConfigurationData::from_platform( "cfg_for//:testing_exec".to_owned(), ConfigurationDataData { @@ -447,8 +423,30 @@ mod tests { }, ) .unwrap(); - let cell_resolver = get_parse_test_cell_resolver()?; - let buck_out_parser = BuckOutPathParser::new(&cell_resolver); + let config_hash = configuration.output_hash().to_string(); + + let pkg = PackageLabel::new( + CellName::testing_new("bar"), + CellRelativePath::unchecked_new("path/to/target"), + ); + let expected_target_label = + TargetLabel::new(pkg, TargetNameRef::new("target_name").unwrap()); + let expected_cell_path = CellPath::new( + CellName::testing_new("bar"), + CellRelativePath::unchecked_new("path/to/target").to_owned(), + ); + + ( + parser, + config_hash, + expected_target_label, + expected_cell_path, + ) + } + + #[test] + fn test_validation() -> anyhow::Result<()> { + let (buck_out_parser, config_hash, _, _) = get_test_data(); let malformed_path1 = "does/not/start/with/buck-out/blah/blah"; let malformed_path2 = "buck-out/v2/invalid_buck_prefix/blah/blah/blah/blah"; @@ -477,7 +475,6 @@ mod tests { let res = buck_out_parser.parse(cell_does_not_exist); assert!(res.err().unwrap().to_string().contains("Malformed")); - let config_hash = configuration.output_hash(); let no_artifacts_after_target_name = &format!( "buck-out/v2/gen/bar/{}/path/to/target/__target_name__", config_hash @@ -489,31 +486,9 @@ mod tests { } #[test] - fn test_buck_path_parser() -> anyhow::Result<()> { - let configuration = ConfigurationData::from_platform( - "cfg_for//:testing_exec".to_owned(), - ConfigurationDataData { - constraints: BTreeMap::new(), - }, - ) - .unwrap(); - let cell_resolver = get_parse_test_cell_resolver()?; - let buck_out_parser = BuckOutPathParser::new(&cell_resolver); - - let pkg = PackageLabel::new( - CellName::testing_new("bar"), - CellRelativePath::unchecked_new("path/to/target"), - ); - - let expected_target_label = - TargetLabel::new(pkg.clone(), TargetNameRef::new("target_name")?); - - let expected_cell_path = CellPath::new( - CellName::testing_new("bar"), - CellRelativePath::unchecked_new("path/to/target").to_owned(), - ); - - let expected_config_hash = configuration.output_hash(); + fn test_target_output() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, expected_target_label, expected_cell_path) = + get_test_data(); let rule_path = format!( "buck-out/v2/gen/bar/{}/path/to/target/__target_name__/output", @@ -544,6 +519,14 @@ mod tests { _ => panic!("Should have parsed buck-out path successfully"), } + Ok(()) + } + + #[test] + fn test_target_output_with_slashes() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, expected_target_label, expected_cell_path) = + get_test_data(); + let rule_path_target_label_with_slashes = format!( "buck-out/v2/gen/bar/{}/path/to/target/__target_name_start/target_name_end__/output", expected_config_hash @@ -552,7 +535,7 @@ mod tests { let res = buck_out_parser.parse(&rule_path_target_label_with_slashes)?; let expected_target_label_with_slashes = TargetLabel::new( - pkg.clone(), + expected_target_label.pkg().dupe(), TargetNameRef::new("target_name_start/target_name_end")?, ); @@ -578,6 +561,14 @@ mod tests { _ => panic!("Should have parsed buck-out path successfully"), } + Ok(()) + } + + #[test] + fn test_target_output_with_eq_sign() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, expected_target_label, expected_cell_path) = + get_test_data(); + let rule_path_with_equal_sign = format!( "buck-out/v2/gen/bar/{}/path/to/target/__target_name_eqsb_out__/output", expected_config_hash @@ -585,8 +576,10 @@ mod tests { let res = buck_out_parser.parse(&rule_path_with_equal_sign)?; - let expected_target_label_with_equal_sign = - TargetLabel::new(pkg, TargetNameRef::new("target_name=out")?); + let expected_target_label_with_equal_sign = TargetLabel::new( + expected_target_label.pkg(), + TargetNameRef::new("target_name=out")?, + ); match res { BuckOutPathType::RuleOutput { @@ -610,6 +603,14 @@ mod tests { _ => panic!("Should have parsed buck-out path successfully"), } + Ok(()) + } + + #[test] + fn test_tmp_output() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, expected_target_label, expected_cell_path) = + get_test_data(); + let tmp_path = format!( "buck-out/v2/tmp/bar/{}/path/to/target/__target_name__/output", expected_config_hash @@ -634,6 +635,13 @@ mod tests { _ => panic!("Should have parsed buck-out path successfully"), } + Ok(()) + } + + #[test] + fn test_test_output() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, _, _) = get_test_data(); + let test_path = format!( "buck-out/v2/test/bar/{}/path/to/target/test/output", expected_config_hash @@ -658,6 +666,14 @@ mod tests { _ => panic!("Should have parsed buck-out path successfully"), } + Ok(()) + } + + #[test] + fn test_anon_output() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, expected_target_label, expected_cell_path) = + get_test_data(); + let anon_path = format!( "buck-out/v2/gen-anon/bar/{}/path/to/target/anon_hash/__target_name__/output", expected_config_hash @@ -684,6 +700,13 @@ mod tests { _ => panic!("Should have parsed buck-out path successfully"), } + Ok(()) + } + + #[test] + fn test_bxl_output() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, _, _) = get_test_data(); + let path = format!( "buck-out/v2/gen-bxl/bar/{}/path/to/function.bxl/__function_name__/output", expected_config_hash @@ -719,4 +742,26 @@ mod tests { Ok(()) } + + #[test] + fn test_empty_package_path() -> anyhow::Result<()> { + let (buck_out_parser, expected_config_hash, _, _) = get_test_data(); + + let target_path = format!( + "buck-out/v2/gen/bar/{}/__target_name__/output", + expected_config_hash + ); + + let BuckOutPathType::RuleOutput { + path, target_label, .. + } = buck_out_parser.parse(&target_path)? + else { + panic!("Should have parsed buck-out path successfully") + }; + + assert!(path.path().is_empty()); + assert_eq!(target_label.name().as_str(), "target_name"); + + Ok(()) + } } diff --git a/app/buck2_audit_server/src/output/command.rs b/app/buck2_audit_server/src/output/command.rs index 47738d45e2a9a..2da43bfe0abec 100644 --- a/app/buck2_audit_server/src/output/command.rs +++ b/app/buck2_audit_server/src/output/command.rs @@ -18,22 +18,22 @@ use buck2_build_api::audit_output::AuditOutputResult; use buck2_build_api::audit_output::AUDIT_OUTPUT; use buck2_cli_proto::ClientContext; use buck2_common::dice::cells::HasCellResolver; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::cells::CellResolver; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::target_platform_from_client_context; use dice::DiceComputations; -use thiserror::Error; +use dupe::Dupe; use crate::output::buck_out_path_parser::BuckOutPathParser; use crate::output::buck_out_path_parser::BuckOutPathType; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum AuditOutputError { #[error( "BXL, anonymous target, test, and tmp artifacts are not supported for audit output. Only rule output artifacts are supported. Path: `{0}`" @@ -44,9 +44,9 @@ pub(crate) enum AuditOutputError { async fn audit_output<'v>( output_path: &'v str, working_dir: &'v ProjectRelativePath, - cell_resolver: &'v CellResolver, - dice_ctx: &'v DiceComputations, - global_target_platform: Option, + cell_resolver: CellResolver, + dice_ctx: &'v mut DiceComputations<'_>, + global_cfg_options: &'v GlobalCfgOptions, ) -> anyhow::Result> { let buck_out_parser = BuckOutPathParser::new(cell_resolver); let parsed = buck_out_parser.parse(output_path)?; @@ -70,7 +70,7 @@ async fn audit_output<'v>( }; let configured_target_label = dice_ctx - .get_configured_target(&target_label, global_target_platform.as_ref()) + .get_configured_target(&target_label, global_cfg_options) .await?; let command_config = configured_target_label.cfg(); @@ -87,7 +87,7 @@ async fn audit_output<'v>( Ok(FIND_MATCHING_ACTION.get()?( dice_ctx, working_dir, - global_target_platform, + global_cfg_options, &analysis, path_after_target_name, ) @@ -97,28 +97,28 @@ async fn audit_output<'v>( pub(crate) fn init_audit_output() { AUDIT_OUTPUT.init( - |output_path, working_dir, cell_resolver, dice_ctx, global_target_platform| { + |output_path, working_dir, cell_resolver, dice_ctx, global_cfg_options| { Box::pin(audit_output( output_path, working_dir, - cell_resolver, + cell_resolver.dupe(), dice_ctx, - global_target_platform, + global_cfg_options, )) }, ); } #[async_trait] -impl AuditSubcommand for AuditOutputCommand { +impl ServerAuditSubcommand for AuditOutputCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, - client_ctx: ClientContext, + _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, mut dice_ctx| { + .with_dice_ctx(|server_ctx, mut dice_ctx| async move { // First, we parse the buck-out path to get a target label. Next, we configure the target // label and run analysis on it to get the `DeferredTable`. Then, we iterate through the // deferred table's entries and look at their build outputs (if they have any) to try to @@ -129,14 +129,14 @@ impl AuditSubcommand for AuditOutputCommand { let working_dir = server_ctx.working_dir(); let cell_resolver = dice_ctx.get_cell_resolver().await?; - let global_target_platform = target_platform_from_client_context( - &client_ctx, + let global_cfg_options = global_cfg_options_from_client_context( + &self.target_cfg.target_cfg(), server_ctx, &mut dice_ctx, ) .await?; - let result = audit_output(&self.output_path, working_dir, &cell_resolver, &dice_ctx, global_target_platform).await?; + let result = audit_output(&self.output_path, working_dir, cell_resolver.dupe(), &mut dice_ctx, &global_cfg_options).await?; let mut stdout = stdout.as_writer(); diff --git a/app/buck2_audit_server/src/output/parse.rs b/app/buck2_audit_server/src/output/parse.rs index 904739f034179..b5d07d8a19c22 100644 --- a/app/buck2_audit_server/src/output/parse.rs +++ b/app/buck2_audit_server/src/output/parse.rs @@ -17,10 +17,10 @@ use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use super::buck_out_path_parser::BuckOutPathParser; use super::buck_out_path_type_printer::BuckOutPathTypePrinter; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditParseCommand { +impl ServerAuditSubcommand for AuditParseCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -28,9 +28,9 @@ impl AuditSubcommand for AuditParseCommand { _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |_server_ctx, dice_ctx| { + .with_dice_ctx(|_server_ctx, mut dice_ctx| async move { let cell_resolver = dice_ctx.get_cell_resolver().await?; - let buck_out_parser = BuckOutPathParser::new(&cell_resolver); + let buck_out_parser = BuckOutPathParser::new(cell_resolver); let parsed_path = buck_out_parser.parse(&self.output_path)?; let printer = BuckOutPathTypePrinter::new(self.json, &self.output_attribute)?; diff --git a/app/buck2_audit_server/src/package_values.rs b/app/buck2_audit_server/src/package_values.rs index 63a90a0b3e992..b2eefc9351938 100644 --- a/app/buck2_audit_server/src/package_values.rs +++ b/app/buck2_audit_server/src/package_values.rs @@ -21,14 +21,14 @@ use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use dupe::Dupe; +use futures::FutureExt; use gazebo::prelude::SliceExt; -use gazebo::prelude::VecExt; use starlark_map::small_map::SmallMap; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for PackageValuesCommand { +impl ServerAuditSubcommand for PackageValuesCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -40,31 +40,31 @@ impl AuditSubcommand for PackageValuesCommand { } server_ctx - .with_dice_ctx(async move |server_ctx, dice_ctx| { - let cell_resolver = dice_ctx.get_cell_resolver().await?; - let current_cell_path = cell_resolver.get_cell_path(server_ctx.working_dir())?; - - let cell_alias_resolver = cell_resolver - .get(current_cell_path.cell())? - .cell_alias_resolver(); + .with_dice_ctx(|server_ctx, mut dice_ctx| async move { + let cell_alias_resolver = dice_ctx + .get_cell_alias_resolver_for_dir(server_ctx.working_dir()) + .await?; let packages = self .packages - .try_map(|package| parse_package(package.dupe(), cell_alias_resolver))?; + .try_map(|package| parse_package(package.dupe(), &cell_alias_resolver))?; - let package_values_by_package = packages.into_map(|package| async { - let package_values = PACKAGE_VALUES_CALCULATION - .get()? - .package_values(&dice_ctx, package.dupe()) - .await?; - anyhow::Ok((package, package_values)) - }); + let package_values_by_package = dice_ctx + .try_compute_join(packages, |ctx, package| { + async move { + let package_values = PACKAGE_VALUES_CALCULATION + .get()? + .package_values(ctx, package.dupe()) + .await?; + anyhow::Ok((package, package_values)) + } + .boxed() + }) + .await?; let package_values_by_package: SmallMap< PackageLabel, SmallMap, - > = SmallMap::from_iter( - futures::future::try_join_all(package_values_by_package).await?, - ); + > = package_values_by_package.into_iter().collect(); let mut stdout = stdout.as_writer(); serde_json::to_writer_pretty(&mut stdout, &package_values_by_package)?; diff --git a/app/buck2_audit_server/src/prelude.rs b/app/buck2_audit_server/src/prelude.rs index 43ce5d2da3490..9f30824357886 100644 --- a/app/buck2_audit_server/src/prelude.rs +++ b/app/buck2_audit_server/src/prelude.rs @@ -13,7 +13,6 @@ use async_trait::async_trait; use buck2_audit::prelude::AuditPreludeCommand; use buck2_cli_proto::ClientContext; use buck2_common::dice::cells::HasCellResolver; -use buck2_interpreter::file_type::StarlarkFileType; use buck2_interpreter::load_module::InterpreterCalculation; use buck2_interpreter::load_module::INTERPRETER_CALCULATION_IMPL; use buck2_interpreter::prelude_path::prelude_path; @@ -21,10 +20,16 @@ use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; + +#[derive(buck2_error::Error, Debug)] +enum AuditPreludeError { + #[error("Project has no prelude")] + NoPrelude, +} #[async_trait] -impl AuditSubcommand for AuditPreludeCommand { +impl ServerAuditSubcommand for AuditPreludeCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -32,17 +37,19 @@ impl AuditSubcommand for AuditPreludeCommand { _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |_server_ctx, ctx| { + .with_dice_ctx(|_server_ctx, mut ctx| async move { let mut stdout = stdout.as_writer(); // Print out all the Prelude-like stuff that is loaded into each module let cell_resolver = ctx.get_cell_resolver().await?; - let prelude_path = prelude_path(&cell_resolver)?; + let Some(prelude_path) = prelude_path(&cell_resolver)? else { + return Err(AuditPreludeError::NoPrelude.into()); + }; writeln!( stdout, "{}", INTERPRETER_CALCULATION_IMPL .get()? - .global_env_for_file_type(&ctx, StarlarkFileType::Buck) + .global_env(&mut ctx) .await? .describe() )?; diff --git a/app/buck2_audit_server/src/providers.rs b/app/buck2_audit_server/src/providers.rs index 84dd4cfed9d44..c333c0db139a2 100644 --- a/app/buck2_audit_server/src/providers.rs +++ b/app/buck2_audit_server/src/providers.rs @@ -14,44 +14,38 @@ use buck2_audit::providers::AuditProvidersCommand; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; use buck2_cli_proto::ClientContext; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::pattern::resolve::resolve_target_patterns; -use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::provider::label::ProvidersName; -use buck2_node::nodes::frontend::TargetGraphCalculation; -use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; +use buck2_server_ctx::pattern_parse_and_resolve::parse_and_resolve_provider_labels_from_cli_args; use buck2_util::indent::indent; +use dice::DiceComputations; use dice::DiceTransaction; -use dupe::Dupe; use futures::stream::FuturesOrdered; +use futures::FutureExt; use futures::StreamExt; -use gazebo::prelude::*; -use crate::AuditSubcommand; +use crate::common::target_resolution_config::audit_command_target_resolution_config; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditProvidersCommand { +impl ServerAuditSubcommand for AuditProvidersCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, stdout: PartialResultDispatcher, - client_ctx: ClientContext, + _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx .with_dice_ctx(move |server_ctx, ctx| { - server_execute_with_dice(self, client_ctx, server_ctx, stdout, ctx) + server_execute_with_dice(self, server_ctx, stdout, ctx) }) .await } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum AuditProvidersError { #[error("Evaluation of at least one target providers failed")] AtLeastOneFailed, @@ -59,64 +53,38 @@ enum AuditProvidersError { async fn server_execute_with_dice( command: &AuditProvidersCommand, - client_ctx: ClientContext, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, mut ctx: DiceTransaction, ) -> anyhow::Result<()> { - let cells = ctx.get_cell_resolver().await?; - let target_platform = - target_platform_from_client_context(&client_ctx, server_ctx, &mut ctx).await?; + let target_resolution_config = + audit_command_target_resolution_config(&mut ctx, &command.target_cfg, server_ctx).await?; - let parsed_patterns = parse_patterns_from_cli_args::( + let provider_labels = parse_and_resolve_provider_labels_from_cli_args( &mut ctx, - &command - .patterns - .map(|pat| buck2_data::TargetPattern { value: pat.clone() }), + &command.patterns, server_ctx.working_dir(), ) .await?; - let resolved_pattern = - resolve_target_patterns(&cells, &parsed_patterns, &ctx.file_ops()).await?; - let mut futs = FuturesOrdered::new(); - for (package, spec) in resolved_pattern.specs { - let ctx = &ctx; - let targets = match spec { - buck2_core::pattern::PackageSpec::Targets(targets) => targets, - buck2_core::pattern::PackageSpec::All => { - let interpreter_results = ctx.get_interpreter_results(package.dupe()).await?; - interpreter_results - .targets() - .keys() - .map(|target| { - ( - target.to_owned(), - ProvidersPatternExtra { - providers: ProvidersName::Default, - }, - ) - }) - .collect() - } - }; - - for (target_name, providers) in targets { - let label = providers.into_providers_label(package.dupe(), target_name.as_ref()); - let providers_label = ctx - .get_configured_provider_label(&label, target_platform.as_ref()) - .await?; - - // `.push` is deprecated in newer `futures`, - // but we did not updated vendored `futures` yet. - #[allow(deprecated)] - futs.push(async move { - let result = ctx.get_providers(&providers_label).await; - (providers_label, result) - }); + let mut futs = Vec::new(); + for label in provider_labels { + for providers_label in target_resolution_config + .get_configured_provider_label(&mut ctx, &label) + .await? + { + futs.push(DiceComputations::declare_closure(|ctx| { + async move { + let result = ctx.get_providers(&providers_label).await; + (providers_label, result) + } + .boxed() + })); } } + let mut futs: FuturesOrdered<_> = ctx.compute_many(futs).into_iter().collect(); + let mut stdout = stdout.as_writer(); let mut stderr = server_ctx.stderr()?; @@ -174,6 +142,7 @@ async fn server_execute_with_dice( stdout.flush()?; stderr.flush()?; + // FIXME(JakobDegen): We should try preserving error metadata here if at_least_one_error { Err(AuditProvidersError::AtLeastOneFailed.into()) } else { diff --git a/app/buck2_audit_server/src/server.rs b/app/buck2_audit_server/src/server.rs index 0f3831765ec65..7e061f4ed38a0 100644 --- a/app/buck2_audit_server/src/server.rs +++ b/app/buck2_audit_server/src/server.rs @@ -8,28 +8,40 @@ */ use buck2_events::dispatch::span_async; -use buck2_server_ctx::command_end::command_end; +use buck2_server_ctx::commands::command_end; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::late_bindings::AuditServerCommand; +use buck2_server_ctx::late_bindings::AUDIT_SERVER_COMMAND; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use crate::AuditCommand; use crate::AuditCommandExt; -pub async fn server_audit_command( - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, -) -> anyhow::Result { - let start_event = buck2_data::CommandStart { - metadata: ctx.request_metadata().await?, - data: Some(buck2_data::AuditCommandStart {}.into()), - }; +pub(crate) fn init_audit_server_command() { + AUDIT_SERVER_COMMAND.init(&AuditServerCommandImpl); +} + +struct AuditServerCommandImpl; - span_async( - start_event, - server_audit_command_inner(ctx, partial_result_dispatcher, req), - ) - .await +#[async_trait::async_trait] +impl AuditServerCommand for AuditServerCommandImpl { + async fn audit( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::GenericRequest, + ) -> anyhow::Result { + let start_event = buck2_data::CommandStart { + metadata: ctx.request_metadata().await?, + data: Some(buck2_data::AuditCommandStart {}.into()), + }; + + span_async( + start_event, + server_audit_command_inner(ctx, partial_result_dispatcher, req), + ) + .await + } } async fn server_audit_command_inner( @@ -40,10 +52,14 @@ async fn server_audit_command_inner( anyhow::Result, buck2_data::CommandEnd, ) { - let result = parse_command_and_execute(context, partial_result_dispatcher, req).await; + let result = parse_command_and_execute(context, partial_result_dispatcher, req) + .await + .map_err(Into::into); let end_event = command_end(&result, buck2_data::AuditCommandEnd {}); - let result = result.map(|()| buck2_cli_proto::GenericResponse {}); + let result = result + .map(|()| buck2_cli_proto::GenericResponse {}) + .map_err(Into::into); (result, end_event) } diff --git a/app/buck2_audit_server/src/starlark.rs b/app/buck2_audit_server/src/starlark.rs new file mode 100644 index 0000000000000..bd17fb28429f0 --- /dev/null +++ b/app/buck2_audit_server/src/starlark.rs @@ -0,0 +1,40 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Starlark debugging. + +mod module; +mod package_deps; + +use async_trait::async_trait; +use buck2_audit::starlark::StarlarkCommand; +use buck2_cli_proto::ClientContext; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; + +use crate::ServerAuditSubcommand; + +#[async_trait] +impl ServerAuditSubcommand for StarlarkCommand { + async fn server_execute( + &self, + server_ctx: &dyn ServerCommandContextTrait, + stdout: PartialResultDispatcher, + client_ctx: ClientContext, + ) -> anyhow::Result<()> { + match self { + StarlarkCommand::Module(cmd) => { + module::server_execute(cmd, server_ctx, stdout, client_ctx).await + } + StarlarkCommand::PackageDeps(cmd) => { + package_deps::server_execute(cmd, server_ctx, stdout, client_ctx).await + } + } + } +} diff --git a/app/buck2_audit_server/src/starlark/mod.rs b/app/buck2_audit_server/src/starlark/mod.rs deleted file mode 100644 index 3713e0fa407b4..0000000000000 --- a/app/buck2_audit_server/src/starlark/mod.rs +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Starlark debugging. - -mod module; -mod package_deps; - -use async_trait::async_trait; -use buck2_audit::starlark::StarlarkCommand; -use buck2_cli_proto::ClientContext; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; - -use crate::AuditSubcommand; - -#[async_trait] -impl AuditSubcommand for StarlarkCommand { - async fn server_execute( - &self, - server_ctx: &dyn ServerCommandContextTrait, - stdout: PartialResultDispatcher, - client_ctx: ClientContext, - ) -> anyhow::Result<()> { - match self { - StarlarkCommand::Module(cmd) => { - module::server_execute(cmd, server_ctx, stdout, client_ctx).await - } - StarlarkCommand::PackageDeps(cmd) => { - package_deps::server_execute(cmd, server_ctx, stdout, client_ctx).await - } - } - } -} diff --git a/app/buck2_audit_server/src/starlark/module.rs b/app/buck2_audit_server/src/starlark/module.rs index ac529b4715213..2af8fca50c868 100644 --- a/app/buck2_audit_server/src/starlark/module.rs +++ b/app/buck2_audit_server/src/starlark/module.rs @@ -29,17 +29,17 @@ pub(crate) async fn server_execute( _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, dice_ctx| { + .with_dice_ctx(|server_ctx, mut dice_ctx| async move { let cell_resolver = dice_ctx.get_cell_resolver().await?; - let current_cell_path = cell_resolver.get_cell_path(server_ctx.working_dir())?; + let cwd = server_ctx.working_dir(); + let current_cell_path = cell_resolver.get_cell_path(cwd)?; let current_cell = BuildFileCell::new(current_cell_path.cell()); - - let cell_alias_resolver = cell_resolver - .get(current_cell_path.cell())? - .cell_alias_resolver(); + let cell_alias_resolver = dice_ctx + .get_cell_alias_resolver(current_cell_path.cell()) + .await?; let import_path = parse_bzl_path_with_config( - cell_alias_resolver, + &cell_alias_resolver, &command.import_path, &ParseImportOptions { relative_import_option: RelativeImports::Allow { diff --git a/app/buck2_audit_server/src/starlark/package_deps.rs b/app/buck2_audit_server/src/starlark/package_deps.rs index 7832510dd9514..abf7c69a783db 100644 --- a/app/buck2_audit_server/src/starlark/package_deps.rs +++ b/app/buck2_audit_server/src/starlark/package_deps.rs @@ -30,20 +30,20 @@ pub(crate) async fn server_execute( _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, dice_ctx| { + .with_dice_ctx(|server_ctx, mut dice_ctx| async move { let cell_resolver = dice_ctx.get_cell_resolver().await?; - let current_cell_path = cell_resolver.get_cell_path(server_ctx.working_dir())?; + let cwd = server_ctx.working_dir(); + let current_cell_path = cell_resolver.get_cell_path(cwd)?; let current_cell = BuildFileCell::new(current_cell_path.cell()); + let cell_alias_resolver = dice_ctx + .get_cell_alias_resolver(current_cell_path.cell()) + .await?; - let cell_alias_resolver = cell_resolver - .get(current_cell_path.cell())? - .cell_alias_resolver(); - - let package = parse_package(&command.package, cell_alias_resolver)?; + let package = parse_package(&command.package, &cell_alias_resolver)?; let module_deps = INTERPRETER_CALCULATION_IMPL .get()? - .get_module_deps(&dice_ctx, package, current_cell) + .get_module_deps(&mut dice_ctx, package, current_cell) .await?; let mut stdout = stdout.as_writer(); diff --git a/app/buck2_audit_server/src/subtargets.rs b/app/buck2_audit_server/src/subtargets.rs index a46e95eef7a2c..a9ad36b9cabf5 100644 --- a/app/buck2_audit_server/src/subtargets.rs +++ b/app/buck2_audit_server/src/subtargets.rs @@ -16,40 +16,31 @@ use buck2_audit::subtargets::AuditSubtargetsCommand; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use buck2_cli_proto::ClientContext; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::pattern::resolve::resolve_target_patterns; -use buck2_core::pattern::pattern_type::ProvidersPatternExtra; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersName; -use buck2_node::nodes::frontend::TargetGraphCalculation; -use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; +use buck2_server_ctx::pattern_parse_and_resolve::parse_and_resolve_provider_labels_from_cli_args; use buck2_server_ctx::stdout_partial_output::StdoutPartialOutput; use buck2_util::indent::indent; use dice::DiceTransaction; -use dupe::Dupe; use futures::stream::FuturesOrdered; use futures::StreamExt; -use gazebo::prelude::*; -use crate::AuditSubcommand; +use crate::common::target_resolution_config::audit_command_target_resolution_config; +use crate::ServerAuditSubcommand; #[async_trait] -impl AuditSubcommand for AuditSubtargetsCommand { +impl ServerAuditSubcommand for AuditSubtargetsCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, stdout: PartialResultDispatcher, - client_ctx: ClientContext, + _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx .with_dice_ctx(move |server_ctx, ctx| { - server_execute_with_dice(self, client_ctx, server_ctx, stdout, ctx) + server_execute_with_dice(self, server_ctx, stdout, ctx) }) .await } @@ -57,58 +48,31 @@ impl AuditSubcommand for AuditSubtargetsCommand { async fn server_execute_with_dice( command: &AuditSubtargetsCommand, - client_ctx: ClientContext, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, mut ctx: DiceTransaction, ) -> anyhow::Result<()> { // TODO(raulgarcia4): Extract function where possible, shares a lot of code with audit providers. - let cells = ctx.get_cell_resolver().await?; - let target_platform = - target_platform_from_client_context(&client_ctx, server_ctx, &mut ctx).await?; + let target_resolution_config = + audit_command_target_resolution_config(&mut ctx, &command.target_cfg, server_ctx).await?; - let parsed_patterns = parse_patterns_from_cli_args::( + let provider_labels = parse_and_resolve_provider_labels_from_cli_args( &mut ctx, - command - .patterns - .map(|pat| buck2_data::TargetPattern { value: pat.clone() }) - .as_slice(), + &command.patterns, server_ctx.working_dir(), ) .await?; - let resolved_pattern = - resolve_target_patterns(&cells, &parsed_patterns, &ctx.file_ops()).await?; let mut futs = FuturesOrdered::new(); - for (package, spec) in resolved_pattern.specs { - let ctx = &ctx; - let targets = match spec { - buck2_core::pattern::PackageSpec::Targets(targets) => targets, - buck2_core::pattern::PackageSpec::All => { - let interpreter_results = ctx.get_interpreter_results(package.dupe()).await?; - interpreter_results - .targets() - .keys() - .map(|target| { - ( - target.to_owned(), - ProvidersPatternExtra { - providers: ProvidersName::Default, - }, - ) - }) - .collect() - } - }; - - for (target_name, providers) in targets { - let label = providers.into_providers_label(package.dupe(), target_name.as_ref()); - let providers_label = ctx - .get_configured_provider_label(&label, target_platform.as_ref()) - .await?; + for label in provider_labels { + for providers_label in target_resolution_config + .get_configured_provider_label(&mut ctx, &label) + .await? + { // `.push` is deprecated in newer `futures`, // but we did not updated vendored `futures` yet. + let mut ctx = ctx.clone(); #[allow(deprecated)] futs.push(async move { let result = ctx.get_providers(&providers_label).await; @@ -131,23 +95,23 @@ async fn server_execute_with_dice( if json_format { fn serialize_nested_subtargets( providers: &FrozenProviderCollection, - ) -> serde_json::Value { + ) -> anyhow::Result { let mut entries = serde_json::Map::new(); for (subtarget, providers) in - providers.default_info().sub_targets().iter() + providers.default_info()?.sub_targets().iter() { entries.insert( subtarget.to_string(), - serialize_nested_subtargets(providers), + serialize_nested_subtargets(providers)?, ); } - serde_json::Value::Object(entries) + Ok(serde_json::Value::Object(entries)) } subtargets_map.insert( target.to_string(), serialize_nested_subtargets( v.require_compatible()?.provider_collection(), - ), + )?, ); } else { fn recursive_iterate( @@ -156,7 +120,7 @@ async fn server_execute_with_dice( label: &mut Subtarget, ) -> anyhow::Result<()> { for (subtarget, providers) in - providers.default_info().sub_targets().iter() + providers.default_info()?.sub_targets().iter() { label.push(subtarget.to_string()); writeln!(stdout, "{}", label)?; @@ -176,7 +140,7 @@ async fn server_execute_with_dice( for sub in v .require_compatible()? .provider_collection() - .default_info() + .default_info()? .sub_targets() .keys() { diff --git a/app/buck2_audit_server/src/visibility.rs b/app/buck2_audit_server/src/visibility.rs index 23dbfd228f1bf..354e86a3b00fa 100644 --- a/app/buck2_audit_server/src/visibility.rs +++ b/app/buck2_audit_server/src/visibility.rs @@ -10,29 +10,25 @@ use async_trait::async_trait; use buck2_audit::visibility::AuditVisibilityCommand; use buck2_cli_proto::ClientContext; -use buck2_common::result::SharedResult; -use buck2_common::result::ToUnsharedResultExt; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; use buck2_core::pattern::pattern_type::TargetPatternExtra; use buck2_node::load_patterns::load_patterns; use buck2_node::load_patterns::MissingTargetBehavior; use buck2_node::nodes::lookup::TargetNodeLookup; use buck2_node::nodes::unconfigured::TargetNode; use buck2_node::visibility::VisibilityError; +use buck2_query::query::environment::QueryTargetDepsSuccessors; use buck2_query::query::syntax::simple::eval::set::TargetSet; use buck2_query::query::traversal::async_depth_first_postorder_traversal; -use buck2_query::query::traversal::AsyncTraversalDelegate; -use buck2_query::query::traversal::ChildVisitor; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; use dice::DiceTransaction; use dupe::Dupe; -use gazebo::prelude::SliceExt; -use crate::AuditSubcommand; +use crate::ServerAuditSubcommand; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum VisibilityCommandError { #[error( "Internal Error: The dependency `{0}` of the target `{1}` was not found during the traversal." @@ -41,44 +37,34 @@ enum VisibilityCommandError { } async fn verify_visibility( - ctx: DiceTransaction, + mut ctx: DiceTransaction, targets: TargetSet, ) -> anyhow::Result<()> { - struct Delegate { - targets: TargetSet, - } + let mut new_targets: TargetSet = TargetSet::new(); - #[async_trait] - impl AsyncTraversalDelegate for Delegate { - fn visit(&mut self, target: TargetNode) -> anyhow::Result<()> { - self.targets.insert(target); - Ok(()) - } - async fn for_each_child( - &mut self, - target: &TargetNode, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - for dep in target.deps() { - func.visit(dep.dupe())?; - } - Ok(()) - } - } - - let lookup = TargetNodeLookup(&ctx); - - let mut delegate = Delegate { - targets: TargetSet::::new(), + let visit = |target| { + new_targets.insert(target); + Ok(()) }; - async_depth_first_postorder_traversal(&lookup, targets.iter_names(), &mut delegate).await?; + ctx.with_linear_recompute(|ctx| async move { + let lookup = TargetNodeLookup(&ctx); + + async_depth_first_postorder_traversal( + &lookup, + targets.iter_names(), + QueryTargetDepsSuccessors, + visit, + ) + .await + }) + .await?; let mut visibility_errors = Vec::new(); - for target in delegate.targets.iter() { + for target in new_targets.iter() { for dep in target.deps() { - match delegate.targets.get(dep) { + match new_targets.get(dep) { Some(val) => { if !val.is_visible_to(target.label())? { visibility_errors.push(VisibilityError::NotVisibleTo( @@ -88,10 +74,12 @@ async fn verify_visibility( } } None => { - return Err(anyhow::Error::new(VisibilityCommandError::DepNodeNotFound( - dep.to_string(), - target.label().name().to_string(), - ))); + return Err(anyhow::Error::from( + VisibilityCommandError::DepNodeNotFound( + dep.to_string(), + target.label().name().to_string(), + ), + )); } } } @@ -110,7 +98,7 @@ async fn verify_visibility( } #[async_trait] -impl AuditSubcommand for AuditVisibilityCommand { +impl ServerAuditSubcommand for AuditVisibilityCommand { async fn server_execute( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -118,29 +106,21 @@ impl AuditSubcommand for AuditVisibilityCommand { _client_ctx: ClientContext, ) -> anyhow::Result<()> { server_ctx - .with_dice_ctx(async move |server_ctx, mut ctx| { + .with_dice_ctx(|server_ctx, mut ctx| async move { let parsed_patterns = parse_patterns_from_cli_args::( &mut ctx, - &self - .patterns - .map(|pat| buck2_data::TargetPattern { value: pat.clone() }), + &self.patterns, server_ctx.working_dir(), ) .await?; let parsed_target_patterns = - load_patterns(&ctx, parsed_patterns, MissingTargetBehavior::Fail).await?; + load_patterns(&mut ctx, parsed_patterns, MissingTargetBehavior::Fail).await?; let mut nodes = TargetSet::::new(); for (_package, result) in parsed_target_patterns.iter() { - match result { - Ok(res) => { - nodes.extend(res.values()); - } - Err(e) => { - return SharedResult::unshared_error(Err(e.dupe())); - } - } + let res = result.as_ref().map_err(Dupe::dupe)?; + nodes.extend(res.values().map(|n| n.to_owned())); } verify_visibility(ctx, nodes).await?; diff --git a/app/buck2_build_api/BUCK b/app/buck2_build_api/BUCK index d8ce2d72e81e6..8274dfb8bb314 100644 --- a/app/buck2_build_api/BUCK +++ b/app/buck2_build_api/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -10,27 +9,25 @@ rust_library( "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", ], deps = [ - "fbsource//third-party/blake3:blake3-rust", "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-recursion", "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:blake3", "fbsource//third-party/rust:dashmap", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:either", - "fbsource//third-party/rust:fnv", "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:higher-order-closure", "fbsource//third-party/rust:indexmap", "fbsource//third-party/rust:internment", "fbsource//third-party/rust:inventory", "fbsource//third-party/rust:itertools", + "fbsource//third-party/rust:linkme", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:ref-cast", "fbsource//third-party/rust:regex", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", - "fbsource//third-party/rust:shlex", "fbsource//third-party/rust:smallvec", "fbsource//third-party/rust:static_assertions", "fbsource//third-party/rust:thiserror", @@ -45,20 +42,25 @@ rust_library( "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_directory:buck2_directory", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_event_observer:buck2_event_observer", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_file_watcher:buck2_file_watcher", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/app/buck2_http:buck2_http", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_query:buck2_query", "//buck2/app/buck2_test_api:buck2_test_api", "//buck2/app/buck2_util:buck2_util", + "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", "//buck2/dice/dice:dice", "//buck2/gazebo/display_container:display_container", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", - "//buck2/shed/provider:provider", + "//buck2/remote_execution:remote_execution", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_map:starlark_map", "//common/rust/shed/sorted_vector_map:sorted_vector_map", diff --git a/app/buck2_build_api/Cargo.toml b/app/buck2_build_api/Cargo.toml index 811ce482273dd..f4fbee76d273d 100644 --- a/app/buck2_build_api/Cargo.toml +++ b/app/buck2_build_api/Cargo.toml @@ -1,76 +1,69 @@ [package] +description = "Build related interpreter defs, coercion logic, some build rule impelementations, and handing actions off to DICE for caching and execution. A large chunk of the 'build' part of BuckV2" +edition = "2021" +license = { workspace = true } name = "buck2_build_api" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Build related interpreter defs, coercion logic, some build rule impelementations, and handing actions off to DICE for caching and execution. A large chunk of the 'build' part of BuckV2" [dependencies] -dashmap = { workspace = true } anyhow = { workspace = true } async-recursion = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } -derive_more = { workspace = true } +dashmap = { workspace = true } derivative = { workspace = true } -once_cell = { workspace = true } -higher-order-closure = { workspace = true } -inventory = { workspace = true } +derive_more = { workspace = true } +either = { workspace = true } futures = { workspace = true } +indexmap = { workspace = true } internment = { workspace = true } +inventory = { workspace = true } itertools = { workspace = true } -pin-project = { workspace = true } -regex = { workspace = true } -tracing = { workspace = true } -tracing-subscriber = { workspace = true } +linkme = { workspace = true } +once_cell = { workspace = true } ref-cast = { workspace = true } -shlex = { workspace = true } -static_assertions = { workspace = true } -structopt = { workspace = true } -thiserror = { workspace = true } +regex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -tokio = { workspace = true } -glob = { workspace = true } -indexmap = { workspace = true } -either = { workspace = true } smallvec = { workspace = true } -crossbeam-epoch = { workspace = true } -fnv = { workspace = true } +static_assertions = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } allocative = { workspace = true } dice = { workspace = true } -fbinit = { workspace = true } display_container = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -more_futures = { workspace = true } -provider = { workspace = true } +gazebo = { workspace = true } +remote_execution = { workspace = true } sorted_vector_map = { workspace = true } starlark = { workspace = true } starlark_map = { workspace = true } +buck2_artifact = { workspace = true } buck2_build_api_derive = { workspace = true } buck2_build_info = { workspace = true } -buck2_core = { workspace = true } +buck2_build_signals = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } +buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_directory = { workspace = true } +buck2_error = { workspace = true } +buck2_event_observer = { workspace = true } buck2_events = { workspace = true } buck2_execute = { workspace = true } +buck2_file_watcher = { workspace = true } +buck2_futures = { workspace = true } +buck2_http = { workspace = true } buck2_interpreter = { workspace = true } buck2_node = { workspace = true } -buck2_artifact = { workspace = true } buck2_query = { workspace = true } buck2_test_api = { workspace = true } -buck2_cli_proto = { workspace = true } buck2_util = { workspace = true } -buck2_build_signals = { workspace = true } -buck2_file_watcher = { workspace = true } +buck2_wrapper_common = { workspace = true } [dev-dependencies] buck2_wrapper_common = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_build_api/src/actions.rs b/app/buck2_build_api/src/actions.rs new file mode 100644 index 0000000000000..f34a2b6299a7b --- /dev/null +++ b/app/buck2_build_api/src/actions.rs @@ -0,0 +1,416 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! This module contains support for running actions and asynchronous providers +//! +//! An 'Action' is a unit of work with a set of input files known as 'Artifact's that are required +//! for its execution, and a set of output files called 'BuildArtifact's that are created by its +//! execution. Each 'Action' registered by a rule will only be executed when it's 'BuildArtifact's +//! are requested to be available. It will be guaranteed by the action system that all input +//! 'Artifact's are available before the execution of an 'Action'. +//! +//! 'Actions' struct will act as a general registry where users can create new 'Artifact's that +//! represent the outputs of the execution of their 'Action'. These are 'DeclaredArtifact's that +//! are yet bound to any 'Action's. When 'Action's are registered, they will be bound to their +//! appropriate 'DeclaredArtifact' to create a 'BuildArtifact' +//! +//! An 'Action' can be bound to multiple 'BuildArtifact's, but each 'BuildArtifact' can only be +//! bound to a particular 'Action'. + +use std::borrow::Cow; +use std::fmt::Debug; +use std::ops::ControlFlow; +use std::sync::Arc; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_artifact::actions::key::ActionKey; +use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_common::io::IoProvider; +use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::category::Category; +use buck2_core::category::CategoryRef; +use buck2_core::execution_types::executor_config::CommandExecutorConfig; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_events::dispatch::EventDispatcher; +use buck2_execute::artifact::fs::ExecutorFs; +use buck2_execute::digest_config::DigestConfig; +use buck2_execute::execute::action_digest_and_blobs::ActionDigestAndBlobs; +use buck2_execute::execute::blocking::BlockingExecutor; +use buck2_execute::execute::cache_uploader::CacheUploadResult; +use buck2_execute::execute::cache_uploader::IntoRemoteDepFile; +use buck2_execute::execute::manager::CommandExecutionManager; +use buck2_execute::execute::prepared::PreparedAction; +use buck2_execute::execute::request::CommandExecutionRequest; +use buck2_execute::execute::request::ExecutorPreference; +use buck2_execute::execute::result::CommandExecutionResult; +use buck2_execute::materialize::materializer::Materializer; +use buck2_execute::re::manager::ManagedRemoteExecutionClient; +use buck2_file_watcher::mergebase::Mergebase; +use buck2_futures::cancellation::CancellationContext; +use buck2_http::HttpClient; +use derivative::Derivative; +use derive_more::Display; +use indexmap::indexmap; +use indexmap::IndexMap; +use indexmap::IndexSet; +use remote_execution::TActionResult2; +use starlark::values::OwnedFrozenValue; +use static_assertions::_core::ops::Deref; + +use crate::actions::execute::action_execution_target::ActionExecutionTarget; +use crate::actions::execute::action_executor::ActionExecutionMetadata; +use crate::actions::execute::action_executor::ActionOutputs; +use crate::actions::execute::error::ExecuteError; +use crate::actions::impls::run_action_knobs::RunActionKnobs; +use crate::artifact_groups::ArtifactGroup; +use crate::artifact_groups::ArtifactGroupValues; + +pub mod artifact; +pub mod box_slice_set; +pub mod calculation; +mod error; +pub mod error_handler; +pub mod execute; +pub mod impls; +pub mod query; +pub mod registry; + +/// Represents an unregistered 'Action' that will be registered into the 'Actions' module. +/// The 'UnregisteredAction' is not executable until it is registered, upon which it becomes an +/// 'Action' that is executable. +pub trait UnregisteredAction: Allocative { + /// consumes the self and becomes a registered 'Action'. The 'Action' will be executable + /// and no longer bindable to any other 'Artifact's. + fn register( + self: Box, + inputs: IndexSet, + outputs: IndexSet, + starlark_data: Option, + error_handler: Option, + ) -> anyhow::Result>; +} + +/// A registered, immutable 'Action' that is fully bound. All it's 'Artifact's, both inputs and +/// outputs are verified to exist. +/// +/// The 'Action' can be executed to produce the set of 'BuildArtifact's it declares. Before +/// execution, all input 'Artifact's will be made available to access. +#[async_trait] +pub trait Action: Allocative + Debug + Send + Sync + 'static { + /// A machine readable kind identifying this type of action. + fn kind(&self) -> buck2_data::ActionKind; + + /// All the input 'Artifact's, both sources and built artifacts, that are required for + /// executing this artifact. While nothing enforces it, this should be a pure function. + fn inputs(&self) -> anyhow::Result>; + + /// All the outputs this 'Artifact' will generate. Just like inputs, this should be a pure + /// function. + fn outputs(&self) -> Cow<'_, [BuildArtifact]>; + + /// Returns a reference to an output of the action. All actions are required to have at least one output. + fn first_output(&self) -> &BuildArtifact; + + /// Obtains an executable for this action. + fn as_executable(&self) -> ActionExecutable<'_>; + + /// A machine-readable category for this action, intended to be used when analyzing actions outside of buck2 itself. + /// + /// A category provides a namespace for identifiers within the rule that produced this action. Examples of + /// categories would be things such as `cxx_compile`, `cxx_link`, and so on. Categories are user-specified in the + /// rule implementation; however, buck2 enforces some restrictions on category names. + fn category(&self) -> CategoryRef; + + /// A machine-readable identifier for this action. Required (but as of now, not yet enforced) to be unique within + /// a category within a single invocation of a rule. Like categories, identifiers are also user-specified and buck2 + /// ascribes no semantics to them. Examples of category-identifier pairs would be `cxx_compile` + `MyCppFile.cpp`, + /// reflecting a C++ compiler invocation for a file `MyCppFile.cpp`. + /// + /// Not required; if None, only one action will be given in the given category. The user should + /// be given either control over the identifier or the category. + fn identifier(&self) -> Option<&str>; + + /// Whether to always print stderr, or only print when a user asks for it. + fn always_print_stderr(&self) -> bool { + false + } + + /// Provides a string name for this action, obtained by combining the provided category and identifier. + fn name(&self) -> String { + if let Some(identifier) = self.identifier() { + format!("{} {}", self.category(), identifier) + } else { + self.category().to_string() + } + } + + fn aquery_attributes(&self, _fs: &ExecutorFs) -> IndexMap { + indexmap! {} + } + + /// error handler + fn error_handler(&self) -> Option { + None + } + + // TODO this probably wants more data for execution, like printing a short_name and the target +} + +pub enum ActionExecutable<'a> { + // FIXME(JakobDegen): This is only used in tests. Delete? + Pristine(&'a dyn PristineActionExecutable), + Incremental(&'a dyn IncrementalActionExecutable), +} + +#[async_trait] +pub trait PristineActionExecutable: Send + Sync + 'static { + /// Runs the 'Action', where all inputs are available and the output directory has been cleaned + /// up. Upon success, it is expected that all outputs will be available + async fn execute( + &self, + ctx: &mut dyn ActionExecutionCtx, + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError>; +} + +#[async_trait] +pub trait IncrementalActionExecutable: Send + Sync + 'static { + /// Runs the 'Action', where all inputs are available but the output directory may not have + /// been cleaned up. Upon success, it is expected that all outputs will be available + async fn execute( + &self, + ctx: &mut dyn ActionExecutionCtx, + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError>; +} + +/// The context for actions to use when executing +#[async_trait] +pub trait ActionExecutionCtx: Send + Sync { + fn target(&self) -> ActionExecutionTarget<'_>; + + /// An 'ArtifactFs' to be used for managing 'Artifact's + fn fs(&self) -> &ArtifactFs; + + fn executor_fs(&self) -> ExecutorFs; + + /// A `Materializer` used for expensive materializations + fn materializer(&self) -> &dyn Materializer; + + fn events(&self) -> &EventDispatcher; + + fn command_execution_manager(&self) -> CommandExecutionManager; + + fn mergebase(&self) -> &Mergebase; + + fn prepare_action( + &mut self, + request: &CommandExecutionRequest, + ) -> anyhow::Result; + + async fn action_cache( + &mut self, + manager: CommandExecutionManager, + request: &CommandExecutionRequest, + prepared_action: &PreparedAction, + ) -> ControlFlow; + + async fn cache_upload( + &mut self, + action: &ActionDigestAndBlobs, + execution_result: &CommandExecutionResult, + re_result: Option, + dep_file_entry: Option<&mut dyn IntoRemoteDepFile>, + ) -> anyhow::Result; + + /// Executes a command + /// TODO(bobyf) this seems like it deserves critical sections? + async fn exec_cmd( + &mut self, + manager: CommandExecutionManager, + request: &CommandExecutionRequest, + prepared_action: &PreparedAction, + ) -> CommandExecutionResult; + + fn unpack_command_execution_result( + &mut self, + executor_preference: ExecutorPreference, + result: CommandExecutionResult, + allows_cache_upload: bool, + allows_dep_file_cache_upload: bool, + input_files_bytes: Option, + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError>; + + /// Clean up all the output directories for this action. This requires a mutable reference + /// because you shouldn't be doing anything else with the ActionExecutionCtx while cleaning the + /// outputs. + async fn cleanup_outputs(&mut self) -> anyhow::Result<()>; + + /// Get the value of an Artifact. This Artifact _must_ have been declared + /// as an input to the associated action or a panic will be raised. + fn artifact_values(&self, input: &ArtifactGroup) -> &ArtifactGroupValues; + + fn blocking_executor(&self) -> &dyn BlockingExecutor; + + fn re_client(&self) -> ManagedRemoteExecutionClient; + + fn re_platform(&self) -> &remote_execution::Platform; + + fn digest_config(&self) -> DigestConfig; + + /// Obtain per-command knobs for RunAction. + fn run_action_knobs(&self) -> RunActionKnobs; + + fn cancellation_context(&self) -> &CancellationContext; + + /// I/O layer access to add non-source files (e.g. downloaded files) to + /// offline archive trace. If None, tracing is not enabled. + fn io_provider(&self) -> Arc; + + /// Http client used for fetching and downloading remote artifacts. + fn http_client(&self) -> HttpClient; +} + +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] +pub enum ActionErrors { + #[error("Output path for artifact or metadata file cannot be empty.")] + EmptyOutputPath, + #[error( + "Multiple artifacts and/or metadata files are declared at the same output location `{0}` declared at `{1}`." + )] + ConflictingOutputPath(ForwardRelativePathBuf, String), + #[error( + "Multiple artifacts and/or metadata files are declared at conflicting output locations. Output path `{0}` conflicts with the following output paths: {1:?}." + )] + ConflictingOutputPaths(ForwardRelativePathBuf, Vec), + #[error( + "Action category `{0}` contains duplicate identifier `{1}`; category-identifier pairs must be unique within a rule" + )] + ActionCategoryIdentifierNotUnique(Category, String), + #[error( + "Analysis produced multiple actions with category `{0}` and at least one of them had no identifier. Add an identifier to these actions to disambiguate them" + )] + ActionCategoryDuplicateSingleton(Category), +} + +#[derive(Derivative, Debug, Display, Allocative)] +#[derivative(Eq, Hash, PartialEq)] +#[display("Action(key={}, name={})", key, action.name())] +pub struct RegisteredAction { + /// The key uniquely identifies a registered action. + /// The key to the action is a one to one mapping. + key: ActionKey, + #[derivative(Hash = "ignore", PartialEq = "ignore")] + action: Box, + #[derivative(Hash = "ignore", PartialEq = "ignore")] + executor_config: Arc, +} + +impl RegisteredAction { + pub fn new( + key: ActionKey, + action: Box, + executor_config: Arc, + ) -> Self { + Self { + key, + action, + executor_config, + } + } + + pub fn action(&self) -> &dyn Action { + self.action.as_ref() + } + + /// Gets the target label to the rule that created this action. + pub fn owner(&self) -> &BaseDeferredKey { + self.key.owner() + } + + /// Gets the action key, uniquely identifying this action in a target. + pub fn action_key(&self, new_style: bool) -> String { + if new_style { + // We want the action key to not cause instability in the RE action. + // As an artifact can only be bound as an output to one action, we know it uniquely identifies the action and we can + // derive the scratch path from that and that will be no unstable than the artifact already is. + let output_path = self.action.first_output().get_path(); + match output_path.dynamic_actions_action_key() { + Some(k) => format!("{}/{}", k, output_path.path()), + None => output_path.path().to_string(), + } + } else { + self.key.action_key() + } + } + + pub fn key(&self) -> &ActionKey { + &self.key + } + + pub fn execution_config(&self) -> &CommandExecutorConfig { + &self.executor_config + } + + pub fn category(&self) -> CategoryRef { + self.action.category() + } + + pub fn identifier(&self) -> Option<&str> { + self.action.identifier() + } +} + +impl Deref for RegisteredAction { + type Target = dyn Action; + + fn deref(&self) -> &Self::Target { + self.action.as_ref() + } +} + +/// An 'UnregisteredAction' that is stored by the 'ActionRegistry' to be registered. +/// The stored inputs have not yet been validated as bound, but will be validated upon registering. +#[derive(Allocative)] +struct ActionToBeRegistered { + key: ActionKey, + inputs: IndexSet, + outputs: IndexSet, + action: Box, +} + +impl ActionToBeRegistered { + fn new( + key: ActionKey, + inputs: IndexSet, + outputs: IndexSet, + a: A, + ) -> Self { + Self { + key, + inputs, + outputs, + action: Box::new(a), + } + } + + pub fn key(&self) -> &ActionKey { + &self.key + } + + fn register( + self, + starlark_data: Option, + error_handler: Option, + ) -> anyhow::Result> { + self.action + .register(self.inputs, self.outputs, starlark_data, error_handler) + } +} diff --git a/app/buck2_build_api/src/actions/artifact/mod.rs b/app/buck2_build_api/src/actions/artifact.rs similarity index 100% rename from app/buck2_build_api/src/actions/artifact/mod.rs rename to app/buck2_build_api/src/actions/artifact.rs diff --git a/app/buck2_build_api/src/actions/artifact/get_artifact_fs.rs b/app/buck2_build_api/src/actions/artifact/get_artifact_fs.rs index 92e385e324aae..ee1f0156585a7 100644 --- a/app/buck2_build_api/src/actions/artifact/get_artifact_fs.rs +++ b/app/buck2_build_api/src/actions/artifact/get_artifact_fs.rs @@ -19,12 +19,12 @@ use crate::context::HasBuildContextData; #[async_trait] pub trait GetArtifactFs { /// Get the configured ArtifactFs. - async fn get_artifact_fs(&self) -> anyhow::Result; + async fn get_artifact_fs(&mut self) -> anyhow::Result; } #[async_trait] -impl GetArtifactFs for DiceComputations { - async fn get_artifact_fs(&self) -> anyhow::Result { +impl GetArtifactFs for DiceComputations<'_> { + async fn get_artifact_fs(&mut self) -> anyhow::Result { let buck_out_path_resolver = self.get_buck_out_path().await?; let project_filesystem = self.global_data().get_io_provider().project_root().dupe(); let buck_path_resolver = self.get_cell_resolver().await?; diff --git a/app/buck2_build_api/src/actions/artifact/materializer.rs b/app/buck2_build_api/src/actions/artifact/materializer.rs index b0e7007f04b59..7abbd46b95da5 100644 --- a/app/buck2_build_api/src/actions/artifact/materializer.rs +++ b/app/buck2_build_api/src/actions/artifact/materializer.rs @@ -12,11 +12,11 @@ use std::time::Instant; use async_trait::async_trait; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_build_signals::NodeDuration; +use buck2_build_signals::env::NodeDuration; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_data::ToProtoMessage; use buck2_events::dispatch::current_span; -use buck2_events::dispatch::span_async; +use buck2_events::dispatch::span_async_simple; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_execute::materialize::materializer::HasMaterializer; use dice::DiceComputations; @@ -27,20 +27,20 @@ use crate::build_signals::HasBuildSignals; #[async_trait] pub trait ArtifactMaterializer { - async fn materialize(&self, artifact: &Artifact) -> anyhow::Result; + async fn materialize(&mut self, artifact: &Artifact) -> anyhow::Result; /// called to materialized the final set of requested artifacts for the build of a target. /// This method will render events in superconsole async fn try_materialize_requested_artifact( - &self, + &mut self, artifact: &BuildArtifact, required: bool, ) -> anyhow::Result<()>; } #[async_trait] -impl ArtifactMaterializer for DiceComputations { - async fn materialize(&self, artifact: &Artifact) -> anyhow::Result { +impl ArtifactMaterializer for DiceComputations<'_> { + async fn materialize(&mut self, artifact: &Artifact) -> anyhow::Result { let materializer = self.per_transaction_data().get_materializer(); let artifact_fs = self.get_artifact_fs().await?; let path = artifact.resolve_path(&artifact_fs)?; @@ -49,7 +49,7 @@ impl ArtifactMaterializer for DiceComputations { } async fn try_materialize_requested_artifact( - &self, + &mut self, artifact: &BuildArtifact, required: bool, ) -> anyhow::Result<()> { @@ -61,37 +61,39 @@ impl ArtifactMaterializer for DiceComputations { artifact: Some(artifact.as_proto()), }; - span_async(start_event, async move { - let now = Instant::now(); + span_async_simple( + start_event, + async move { + let now = Instant::now(); - let result: anyhow::Result<_> = try { - if required { - materializer.ensure_materialized(vec![path]).await?; - } else { - materializer.try_materialize_final_artifact(path).await?; - } - }; + let result: anyhow::Result<_> = try { + if required { + materializer.ensure_materialized(vec![path]).await?; + } else { + materializer.try_materialize_final_artifact(path).await?; + } + }; - if let Some(signals) = self.per_transaction_data().get_build_signals() { - let duration = now.elapsed(); + if let Some(signals) = self.per_transaction_data().get_build_signals() { + let duration = now.elapsed(); - signals.final_materialization( - artifact.dupe(), - NodeDuration { - user: duration, - total: duration, - }, - current_span(), - ); - } + signals.final_materialization( + artifact.dupe(), + NodeDuration { + user: duration, + total: duration, + queue: None, + }, + current_span(), + ); + } - ( - result, - buck2_data::MaterializeRequestedArtifactEnd { - artifact: Some(artifact.as_proto()), - }, - ) - }) + result + }, + buck2_data::MaterializeRequestedArtifactEnd { + artifact: Some(artifact.as_proto()), + }, + ) .await } } diff --git a/app/buck2_build_api/src/actions/calculation.rs b/app/buck2_build_api/src/actions/calculation.rs index f778f7764ed27..ad45af975802b 100644 --- a/app/buck2_build_api/src/actions/calculation.rs +++ b/app/buck2_build_api/src/actions/calculation.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use std::future::Future; use std::iter::zip; use std::sync::Arc; use std::time::Instant; @@ -16,47 +17,62 @@ use anyhow::Context; use async_trait::async_trait; use buck2_artifact::actions::key::ActionKey; use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_build_signals::NodeDuration; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; +use buck2_build_signals::env::NodeDuration; +use buck2_common::events::HasEvents; +use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_data::ActionErrorDiagnostics; +use buck2_data::ActionSubErrors; use buck2_data::ToProtoMessage; +use buck2_error::starlark_error::from_starlark; +use buck2_event_observer::action_util::get_action_digest; use buck2_events::dispatch::async_record_root_spans; +use buck2_events::dispatch::get_dispatcher; use buck2_events::dispatch::span_async; use buck2_events::span::SpanId; use buck2_execute::execute::result::CommandExecutionReport; use buck2_execute::execute::result::CommandExecutionStatus; use buck2_execute::output_size::OutputSize; +use buck2_futures::cancellation::CancellationContext; +use buck2_interpreter::print_handler::EventDispatcherPrintHandler; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use derive_more::Display; use dice::DiceComputations; +use dice::DiceTrackedInvalidationPath; use dice::Key; use dupe::Dupe; -use futures::future; -use futures::stream::FuturesOrdered; +use futures::future::BoxFuture; +use futures::future::{self}; use futures::FutureExt; use indexmap::IndexMap; -use more_futures::cancellation::CancellationContext; use ref_cast::RefCast; use smallvec::SmallVec; +use starlark::environment::Module; +use starlark::eval::Evaluator; use tracing::debug; +use crate::actions::error::ActionError; +use crate::actions::error_handler::ActionErrorHandlerError; +use crate::actions::error_handler::ActionSubErrorResult; +use crate::actions::error_handler::StarlarkActionErrorContext; use crate::actions::execute::action_executor::ActionOutputs; +use crate::actions::execute::action_executor::BuckActionExecutor; use crate::actions::execute::action_executor::HasActionExecutor; -use crate::actions::key::ActionKeyExt; use crate::actions::RegisteredAction; use crate::artifact_groups::calculation::ensure_artifact_group_staged; -use crate::deferred::calculation::DeferredCalculation; -use crate::keep_going; +use crate::artifact_groups::ArtifactGroup; +use crate::artifact_groups::ArtifactGroupValues; +use crate::deferred::calculation::lookup_deferred_holder; +use crate::deferred::calculation::ActionLookup; +use crate::keep_going::KeepGoing; +use crate::starlark::values::type_repr::StarlarkTypeRepr; +use crate::starlark::values::UnpackValue; -#[async_trait] -pub trait ActionCalculation { - async fn get_action(&self, action_key: &ActionKey) -> anyhow::Result>; - async fn build_action(&self, action_key: &ActionKey) -> anyhow::Result; - async fn build_artifact(&self, artifact: &BuildArtifact) -> anyhow::Result; -} +pub struct ActionCalculation; async fn build_action_impl( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, cancellation: &CancellationContext<'_>, key: &ActionKey, ) -> anyhow::Result { @@ -80,23 +96,33 @@ async fn build_action_impl( } async fn build_action_no_redirect( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, cancellation: &CancellationContext<'_>, action: Arc, ) -> anyhow::Result { let materialized_inputs = { let inputs = action.inputs()?; - let ensure_futs: FuturesOrdered<_> = inputs - .iter() - .map(|v| ensure_artifact_group_staged(ctx, v)) - .collect(); - let ready_inputs: Vec<_> = - tokio::task::unconstrained(keep_going::try_join_all(ctx, ensure_futs)).await?; + let ready_inputs: Vec<_> = tokio::task::unconstrained(KeepGoing::try_compute_join_all( + ctx, + inputs.iter(), + |ctx, v| { + async move { + let resolved = v.resolved_artifact(ctx).await?; + anyhow::Ok( + ensure_artifact_group_staged(ctx, resolved.clone()) + .await? + .to_group_values(&resolved)?, + ) + } + .boxed() + }, + )) + .await?; let mut results = IndexMap::with_capacity(inputs.len()); for (artifact, ready) in zip(inputs.iter(), ready_inputs) { - results.insert(artifact.clone(), ready.to_group_values(artifact)?); + results.insert(artifact.clone(), ready); } results }; @@ -118,175 +144,376 @@ async fn build_action_no_redirect( let now = Instant::now(); let action = &action; - let fut = async move { - let (execute_result, command_reports) = executor - .execute(materialized_inputs, action, cancellation) - .await; - - let allow_omit_details = execute_result.is_ok(); - - let commands = future::join_all( - command_reports - .iter() - .map(|r| command_execution_report_to_proto(r, allow_omit_details)), - ) - .await; + let target = match action.key().owner() { + BaseDeferredKey::TargetLabel(target_label) => Some(target_label.dupe()), + _ => None, + }; - let action_result; - let execution_kind; - let wall_time; - let error; - let output_size; - - let mut prefers_local = None; - let mut requires_local = None; - let mut allows_cache_upload = None; - let mut did_cache_upload = None; - let mut allows_dep_file_cache_upload = None; - let mut did_dep_file_cache_upload = None; - let mut dep_file_key = None; - let mut eligible_for_full_hybrid = None; - - let mut buck2_revision = None; - let mut buck2_build_time = None; - let mut hostname = None; - - match execute_result { - Ok((outputs, meta)) => { - output_size = outputs.calc_output_count_and_bytes().bytes; - action_result = Ok(outputs); - execution_kind = Some(meta.execution_kind.as_enum()); - wall_time = Some(meta.timing.wall_time); - error = None; - - if let Some(command) = meta.execution_kind.command() { - prefers_local = Some(command.prefers_local); - requires_local = Some(command.requires_local); - allows_cache_upload = Some(command.allows_cache_upload); - did_cache_upload = Some(command.did_cache_upload); - allows_dep_file_cache_upload = Some(command.allows_dep_file_cache_upload); - did_dep_file_cache_upload = Some(command.did_dep_file_cache_upload); - dep_file_key = command.dep_file_key.clone(); - eligible_for_full_hybrid = Some(command.eligible_for_full_hybrid); - } - } - Err(e) => { - // Because we already are sending the error message in the - // ActionExecutionEnd event, we slim the error down in the result. - // We can then unconditionally print the error message for compute(), - // including ones near the beginning of this method, and also not - // duplicate any error messages. - action_result = Err(anyhow::anyhow!("Failed to build '{}'", action.owner())); - // TODO (torozco): Remove (see protobuf file)? - execution_kind = command_reports - .last() - .and_then(|r| r.status.execution_kind()) - .map(|e| e.as_enum()); - wall_time = None; - error = Some(e.as_proto()); - output_size = 0; - // We define the below fields only in the instance of an action error - // so as to reduce Scribe traffic and log it in buck2_action_errors - buck2_revision = buck2_build_info::revision().map(|s| s.to_owned()); - buck2_build_time = buck2_build_info::time_iso8601().map(|s| s.to_owned()); - hostname = buck2_events::metadata::hostname(); - } - }; - - let outputs = action_result - .as_ref() - .map(|outputs| { - outputs - .iter() - .filter_map(|(_artifact, value)| { - Some(buck2_data::ActionOutput { - tiny_digest: value.digest()?.tiny_digest().to_string(), - }) - }) - .collect() - }) - .unwrap_or_default(); - - ( - (action_result, wall_time), - Box::new(buck2_data::ActionExecutionEnd { - key: Some(action.key().as_proto()), - kind: action.kind().into(), - name: Some(buck2_data::ActionName { - category: action.category().as_str().to_owned(), - identifier: action.identifier().unwrap_or("").to_owned(), - }), - failed: error.is_some(), - error, - always_print_stderr: action.always_print_stderr(), - wall_time: wall_time.and_then(|d| d.try_into().ok()), - execution_kind: execution_kind.unwrap_or(buck2_data::ActionExecutionKind::NotSet) - as i32, - output_size, - commands, - outputs, - prefers_local: prefers_local.unwrap_or_default(), - requires_local: requires_local.unwrap_or_default(), - allows_cache_upload: allows_cache_upload.unwrap_or_default(), - did_cache_upload: did_cache_upload.unwrap_or_default(), - allows_dep_file_cache_upload: allows_dep_file_cache_upload.unwrap_or_default(), - did_dep_file_cache_upload: did_dep_file_cache_upload.unwrap_or_default(), - dep_file_key, - eligible_for_full_hybrid, - buck2_revision, - buck2_build_time, - hostname, - }), - ) + let target_rule_type_name = match target { + Some(label) => Some(get_target_rule_type_name(ctx, &label).await?), + None => None, }; + let fut = build_action_inner( + ctx, + cancellation, + &executor, + materialized_inputs, + action, + target_rule_type_name, + ); + // boxed() the future so that we don't need to allocate space for it while waiting on input dependencies. - let ((res, wall_time), spans) = + let (action_execution_data, spans) = async_record_root_spans(span_async(start_event, fut.boxed())).await; - // TODO: This wall time is rather wrong. We should report a wall time on failures too. ctx.store_evaluation_data(BuildKeyActivationData { - action: action.dupe(), + action_with_extra_data: ActionWithExtraData { + action: action.dupe(), + execution_kind: action_execution_data + .execution_kind + .unwrap_or(buck2_data::ActionExecutionKind::NotSet), + target_rule_type_name: action_execution_data.target_rule_type_name, + action_digest: action_execution_data.action_digest, + }, duration: NodeDuration { - user: wall_time.unwrap_or_default(), + user: action_execution_data.wall_time.unwrap_or_default(), total: now.elapsed(), + queue: action_execution_data.queue_duration, }, spans, })?; - res + action_execution_data.action_result +} + +async fn build_action_inner( + ctx: &mut DiceComputations<'_>, + cancellation: &CancellationContext<'_>, + executor: &BuckActionExecutor, + materialized_inputs: IndexMap, + action: &Arc, + target_rule_type_name: Option, +) -> (ActionExecutionData, Box) { + let (execute_result, command_reports) = executor + .execute(materialized_inputs, action, cancellation) + .await; + + let allow_omit_details = execute_result.is_ok(); + + let commands = future::join_all( + command_reports + .iter() + .map(|r| command_execution_report_to_proto(r, allow_omit_details)), + ) + .await; + + let action_digest = get_action_digest(&commands); + + let queue_duration = command_reports.last().and_then(|r| r.timing.queue_duration); + + let action_key = action.key().as_proto(); + + let action_name = buck2_data::ActionName { + category: action.category().as_str().to_owned(), + identifier: action.identifier().unwrap_or("").to_owned(), + }; + + let action_result; + let execution_kind; + let wall_time; + let error; + let output_size; + + let mut prefers_local = None; + let mut requires_local = None; + let mut allows_cache_upload = None; + let mut did_cache_upload = None; + let mut allows_dep_file_cache_upload = None; + let mut did_dep_file_cache_upload = None; + let mut dep_file_key = None; + let mut eligible_for_full_hybrid = None; + + let mut buck2_revision = None; + let mut buck2_build_time = None; + let mut hostname = None; + let mut input_files_bytes = None; + let error_diagnostics = match execute_result { + Ok((outputs, meta)) => { + output_size = outputs.calc_output_count_and_bytes().bytes; + action_result = Ok(outputs); + execution_kind = Some(meta.execution_kind.as_enum()); + wall_time = Some(meta.timing.wall_time); + error = None; + input_files_bytes = meta.input_files_bytes; + + if let Some(command) = meta.execution_kind.command() { + prefers_local = Some(command.prefers_local); + requires_local = Some(command.requires_local); + allows_cache_upload = Some(command.allows_cache_upload); + did_cache_upload = Some(command.did_cache_upload); + allows_dep_file_cache_upload = Some(command.allows_dep_file_cache_upload); + did_dep_file_cache_upload = Some(command.did_dep_file_cache_upload); + dep_file_key = *command.dep_file_key; + eligible_for_full_hybrid = Some(command.eligible_for_full_hybrid); + } + + None + } + Err(e) => { + // TODO (torozco): Remove (see protobuf file)? + execution_kind = command_reports + .last() + .and_then(|r| r.status.execution_kind()) + .map(|e| e.as_enum()); + wall_time = command_reports.last().map(|r| r.timing.wall_time); + output_size = 0; + // We define the below fields only in the instance of an action error + // so as to reduce Scribe traffic and log it in buck2_action_errors + buck2_revision = buck2_build_info::revision().map(|s| s.to_owned()); + buck2_build_time = buck2_build_info::time_iso8601().map(|s| s.to_owned()); + hostname = buck2_events::metadata::hostname(); + + let last_command = commands.last().cloned(); + + let error_diagnostics = try_run_error_handler(action.dupe(), last_command.as_ref()); + + let e = ActionError::new( + e, + action_name.clone(), + action_key.clone(), + last_command.clone(), + error_diagnostics.clone(), + ); + + error = Some(e.as_proto_field()); + + ctx.per_transaction_data() + .get_dispatcher() + .instant_event(e.as_proto_event()); + + action_result = Err(buck2_error::Error::from(e) + // Make sure to mark the error as emitted so that it is not printed out to console + // again in this command. We still need to keep it around for the build report (and + // in the future) other commands + .mark_emitted({ + let owner = action.owner().dupe(); + Arc::new(move |f| write!(f, "Failed to build '{}'", owner)) + }) + .into()); + + error_diagnostics + } + }; + + let outputs = action_result + .as_ref() + .map(|outputs| { + outputs + .iter() + .filter_map(|(_artifact, value)| { + Some(buck2_data::ActionOutput { + tiny_digest: value.digest()?.tiny_digest().to_string(), + }) + }) + .collect() + }) + .unwrap_or_default(); + + let invalidation_info = if executor.invalidation_tracking_enabled() { + fn to_proto( + invalidation_path: &DiceTrackedInvalidationPath, + ) -> Option { + match invalidation_path { + dice::DiceTrackedInvalidationPath::Clean + | dice::DiceTrackedInvalidationPath::Unknown => None, + dice::DiceTrackedInvalidationPath::Invalidated(_) => { + Some(buck2_data::command_invalidation_info::InvalidationSource {}) + } + } + } + let invalidation_paths = ctx.get_invalidation_paths(); + Some(buck2_data::CommandInvalidationInfo { + changed_any: to_proto(&invalidation_paths.normal_priority_path), + changed_file: to_proto(&invalidation_paths.high_priority_path), + }) + } else { + None + }; + + ( + ActionExecutionData { + action_result, + wall_time, + queue_duration, + execution_kind, + target_rule_type_name, + action_digest, + }, + Box::new(buck2_data::ActionExecutionEnd { + key: Some(action_key), + kind: action.kind().into(), + name: Some(action_name), + failed: error.is_some(), + error, + always_print_stderr: action.always_print_stderr(), + wall_time: wall_time.and_then(|d| d.try_into().ok()), + execution_kind: execution_kind.unwrap_or(buck2_data::ActionExecutionKind::NotSet) + as i32, + output_size, + commands, + outputs, + prefers_local: prefers_local.unwrap_or_default(), + requires_local: requires_local.unwrap_or_default(), + allows_cache_upload: allows_cache_upload.unwrap_or_default(), + did_cache_upload: did_cache_upload.unwrap_or_default(), + allows_dep_file_cache_upload: allows_dep_file_cache_upload.unwrap_or_default(), + did_dep_file_cache_upload: did_dep_file_cache_upload.unwrap_or_default(), + dep_file_key: dep_file_key.map(|d| d.to_string()), + eligible_for_full_hybrid, + buck2_revision, + buck2_build_time, + hostname, + error_diagnostics, + input_files_bytes, + invalidation_info, + }), + ) +} + +// Attempt to run the error handler if one was specified. Returns either the error diagnostics, or +// an actual error if the handler failed to run successfully. +fn try_run_error_handler( + action: Arc, + last_command: Option<&buck2_data::CommandExecution>, +) -> Option { + use buck2_data::action_error_diagnostics::Data; + + match action.action.error_handler() { + Some(error_handler) => { + let dispatcher = get_dispatcher(); + dispatcher + .clone() + .span(buck2_data::ActionErrorHandlerExecutionStart {}, || { + let env = Module::new(); + let heap = env.heap(); + let print = EventDispatcherPrintHandler(get_dispatcher()); + let mut eval = Evaluator::new(&env); + eval.set_print_handler(&print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); + + let error_handler_ctx = + StarlarkActionErrorContext::new_from_command_execution(last_command); + + let error_handler_result = eval.eval_function( + error_handler.value(), + &[heap.alloc(error_handler_ctx)], + &[], + ); + + let data = match error_handler_result { + Ok(result) => match ActionSubErrorResult::unpack_value_err(result) { + Ok(result) => Data::SubErrors(ActionSubErrors { + sub_errors: result + .items + .into_iter() + .map(|s| s.to_proto()) + .collect(), + }), + Err(_) => Data::HandlerInvocationError(format!( + "{}", + ActionErrorHandlerError::TypeError( + ActionSubErrorResult::starlark_type_repr(), + result.get_type().to_owned() + ) + )), + }, + Err(e) => { + let e = from_starlark(e).context("Error handler failed"); + Data::HandlerInvocationError(format!("{:#}", e)) + } + }; + ( + Some(ActionErrorDiagnostics { data: Some(data) }), + buck2_data::ActionErrorHandlerExecutionEnd {}, + ) + }) + } + None => None, + } } pub struct BuildKeyActivationData { - pub action: Arc, + pub action_with_extra_data: ActionWithExtraData, pub duration: NodeDuration, pub spans: SmallVec<[SpanId; 1]>, } +#[derive(Clone)] +pub struct ActionWithExtraData { + pub action: Arc, + pub execution_kind: buck2_data::ActionExecutionKind, + pub target_rule_type_name: Option, + pub action_digest: Option, +} + +struct ActionExecutionData { + action_result: anyhow::Result, + wall_time: Option, + queue_duration: Option, + execution_kind: Option, + target_rule_type_name: Option, + action_digest: Option, +} + /// The cost of these calls are particularly critical. To control the cost (particularly size) of these calls /// we drop the `async_trait` common in other `*Calculation` types and avoid `async fn` (for /// build_action/build_artifact at least). -#[async_trait] -impl ActionCalculation for DiceComputations { - async fn get_action(&self, action_key: &ActionKey) -> anyhow::Result> { - // TODO add async/deferred stuff - self.compute_deferred_data(action_key.deferred_data()) - .await - .map(|a| (*a).dupe()) - .with_context(|| format!("for action key `{}`", action_key)) +impl ActionCalculation { + pub async fn get_action( + ctx: &mut DiceComputations<'_>, + action_key: &ActionKey, + ) -> anyhow::Result> { + // In the typical case, this lookup is only going to require a single deferred holder lookup. There's three cases: + // 1. a normal action defined in analysis: lookup the holder for that analysis, get the action + // 2. an action bound to a dynamic_output and then bound to an action there: the initial holder_key will actually + // point to the dynamic_output (not the analysis that first created the action key) and then the action will be found there + // 3. an action bound to a dynamic_output, and then in that dynamic_output bound to another dynamic_output: only in this case + // will the initial lookup not find the key and we'll recurse. + // + // We could introduce a dice key to cache the recursive resolution, but that would only be valuable if we had long nested chains + // of dynamic_output that were re-binding artifacts. In practice we've not yet encountered that. + let deferred_holder = lookup_deferred_holder(ctx, action_key.holder_key()).await?; + match deferred_holder.lookup_action(action_key)? { + ActionLookup::Action(action) => Ok(action), + ActionLookup::Deferred(action_key) => { + fn get_action_recurse<'a>( + ctx: &'a mut DiceComputations<'_>, + action_key: &'a ActionKey, + ) -> BoxFuture<'a, anyhow::Result>> { + async move { ActionCalculation::get_action(ctx, action_key).await }.boxed() + } + get_action_recurse(ctx, &action_key).await + } + } } - async fn build_action(&self, action_key: &ActionKey) -> anyhow::Result { + pub fn build_action<'a>( + ctx: &'a mut DiceComputations<'_>, + action_key: &ActionKey, + ) -> impl Future> + 'a { // build_action is called for every action key. We don't use `async fn` to ensure that it has minimal cost. // We don't currently consume this in buck_e2e but it's good to log for debugging purposes. debug!("build_action {}", action_key); - self.compute(BuildKey::ref_cast(action_key)) - .map(|v| v?.unshared_error()) - .await + ctx.compute(BuildKey::ref_cast(action_key)) + .map(|v| v?.map_err(anyhow::Error::from)) } - async fn build_artifact(&self, artifact: &BuildArtifact) -> anyhow::Result { - self.build_action(artifact.key()).await + pub fn build_artifact<'a>( + ctx: &'a mut DiceComputations<'_>, + artifact: &BuildArtifact, + ) -> impl Future> + 'a { + Self::build_action(ctx, artifact.key()) } } @@ -296,7 +523,7 @@ pub struct BuildKey(pub ActionKey); #[async_trait] impl Key for BuildKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, @@ -305,7 +532,7 @@ impl Key for BuildKey { ) -> Self::Value { build_action_impl(ctx, cancellation, &self.0) .await - .shared_error() + .map_err(buck2_error::Error::from) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -340,11 +567,13 @@ async fn command_execution_report_to_proto( } .into() } - CommandExecutionStatus::Error { stage, error } => buck2_data::command_execution::Error { - stage: (*stage).to_owned(), - error: format!("{:#}", error), + CommandExecutionStatus::Error { stage, error, .. } => { + buck2_data::command_execution::Error { + stage: (*stage).to_owned(), + error: format!("{:#}", error), + } + .into() } - .into(), }; buck2_data::CommandExecution { @@ -390,3 +619,16 @@ pub async fn command_details( metadata: Some(command.timing.to_proto()), } } + +pub async fn get_target_rule_type_name( + ctx: &mut DiceComputations<'_>, + label: &ConfiguredTargetLabel, +) -> anyhow::Result { + Ok(ctx + .get_configured_target_node(label) + .await? + .require_compatible()? + .rule_type() + .name() + .to_owned()) +} diff --git a/app/buck2_build_api/src/actions/error.rs b/app/buck2_build_api/src/actions/error.rs new file mode 100644 index 0000000000000..d8cc7a41c4843 --- /dev/null +++ b/app/buck2_build_api/src/actions/error.rs @@ -0,0 +1,218 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; + +use buck2_event_observer::display::display_action_error; +use buck2_event_observer::display::TargetDisplayOptions; + +use crate::actions::execute::error::ExecuteError; + +#[derive(Debug)] +pub struct ActionError { + execute_error: ExecuteError, + name: buck2_data::ActionName, + key: buck2_data::ActionKey, + last_command: Option, + error_diagnostics: Option, +} + +impl std::error::Error for ActionError { + fn provide<'a>(&'a self, request: &mut std::error::Request<'a>) { + if let ExecuteError::Error { error } = &self.execute_error { + error.provide(request); + } + + let is_command_failure = self.last_command.as_ref().is_some_and(|c| { + matches!( + c.status, + Some(buck2_data::command_execution::Status::Failure { .. }) + ) + }); + + let mut tags = vec![buck2_error::ErrorTag::AnyActionExecution]; + + let category = match &self.execute_error { + ExecuteError::CommandExecutionError { error } => { + let category = if let Some(err) = error { + tags.extend(err.tags()); + err.get_tier() + } else { + None + }; + + if is_command_failure { + Some(buck2_error::Tier::Input) + } else { + category + } + } + // Returning extra outputs is a bug in the executor + ExecuteError::MismatchedOutputs { .. } => Some(buck2_error::Tier::Tier0), + // However outputs may be legitimately missing if the action didn't produce them + ExecuteError::MissingOutputs { .. } => Some(buck2_error::Tier::Input), + // Or if the action produced the wrong type + ExecuteError::WrongOutputType { .. } => Some(buck2_error::Tier::Input), + ExecuteError::Error { error } => { + let err = buck2_error::Error::from_anyhow_ref(error); + tags.extend(err.tags()); + err.get_tier() + } + }; + + buck2_error::provide_metadata( + request, + category, + tags, + std::file!(), + Some("ActionError"), + Some(self.as_proto_event()), + ); + } + + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match &self.execute_error { + ExecuteError::Error { error } => error.source(), + _ => None, + } + } +} + +impl fmt::Display for ActionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = display_action_error(&self.as_proto_event(), TargetDisplayOptions::for_log()) + .expect("Action key is always present in `ActionError`") + .simple_format_for_build_report(); + write!(f, "{}", s) + } +} + +impl ActionError { + pub(crate) fn new( + execute_error: ExecuteError, + name: buck2_data::ActionName, + key: buck2_data::ActionKey, + last_command: Option, + error_diagnostics: Option, + ) -> Self { + Self { + execute_error, + name, + key, + last_command, + error_diagnostics, + } + } + + pub(crate) fn as_proto_field(&self) -> buck2_data::action_execution_end::Error { + match &self.execute_error { + ExecuteError::MissingOutputs { declared } => buck2_data::CommandOutputsMissing { + message: format!("Action failed to produce outputs: {}", error_items(declared)), + } + .into(), + ExecuteError::MismatchedOutputs { declared, real } => buck2_data::CommandOutputsMissing { + message: format!( + "Action didn't produce the right set of outputs.\nExpected {}`\nreal {}", + error_items(declared), + error_items(real) + ), + } + .into(), + ExecuteError::WrongOutputType {path, declared, real} => buck2_data::CommandOutputsMissing { + message: format!( + "Action didn't produce output of the right type.\nExpected {path} to be {declared:?}\nreal {real:?}", + ), + } + .into(), + ExecuteError::Error { error } => format!("{:#}", error).into(), + ExecuteError::CommandExecutionError { .. } => buck2_data::CommandExecutionError {}.into(), + } + } + + pub(crate) fn as_proto_event(&self) -> buck2_data::ActionError { + let field = match self.as_proto_field() { + buck2_data::action_execution_end::Error::Unknown(e) => e.into(), + buck2_data::action_execution_end::Error::MissingOutputs(e) => e.into(), + buck2_data::action_execution_end::Error::CommandExecutionError(e) => e.into(), + }; + buck2_data::ActionError { + error: Some(field), + name: Some(self.name.clone()), + key: Some(self.key.clone()), + last_command: self.last_command.clone(), + error_diagnostics: self.error_diagnostics.clone(), + } + } +} + +fn error_items(xs: &[T]) -> String { + use fmt::Write; + + if xs.is_empty() { + return "none".to_owned(); + } + let mut res = String::new(); + for (i, x) in xs.iter().enumerate() { + if i != 0 { + res.push_str(", "); + } + write!(res, "`{}`", x).unwrap(); + } + res +} + +#[cfg(test)] +mod tests { + use buck2_error::buck2_error; + use buck2_error::ErrorTag; + + use super::*; + + #[test] + fn test_error_conversion() { + let error = buck2_error!([ErrorTag::Http], "error"); + + let execute_error = ExecuteError::Error { + error: error.into(), + }; + + let action_error = ActionError::new( + execute_error, + buck2_data::ActionName { + category: "category".to_owned(), + identifier: "identifier".to_owned(), + }, + buck2_data::ActionKey { + id: vec![], + key: "key".to_owned(), + owner: Some(buck2_data::action_key::Owner::TargetLabel( + buck2_data::ConfiguredTargetLabel { + label: Some(buck2_data::TargetLabel { + package: "package".to_owned(), + name: "name".to_owned(), + }), + configuration: Some(buck2_data::Configuration { + full_name: "conf".into(), + }), + execution_configuration: None, + }, + )), + }, + None, + None, + ); + + let buck2_error: buck2_error::Error = action_error.into(); + + assert_eq!( + buck2_error.tags(), + vec![ErrorTag::AnyActionExecution, ErrorTag::Http] + ); + } +} diff --git a/app/buck2_build_api/src/actions/error_handler.rs b/app/buck2_build_api/src/actions/error_handler.rs new file mode 100644 index 0000000000000..8988efce71b3a --- /dev/null +++ b/app/buck2_build_api/src/actions/error_handler.rs @@ -0,0 +1,373 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_data::ActionErrorLocation; +use buck2_data::ActionErrorLocations; +use buck2_data::ActionSubError; +use buck2_data::CommandExecution; +use derive_more::Display; +use display_container::fmt_container; +use gazebo::prelude::SliceClonedExt; +use starlark::environment::GlobalsBuilder; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::starlark_module; +use starlark::starlark_simple_value; +use starlark::typing::Ty; +use starlark::values::list::UnpackList; +use starlark::values::list_or_tuple::UnpackListOrTuple; +use starlark::values::none::NoneOr; +use starlark::values::starlark_value; +use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::AllocValue; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::ProvidesStaticType; +use starlark::values::StarlarkValue; +use starlark::values::Trace; +use starlark::values::Value; +use starlark::values::ValueError; + +use crate::starlark::values::ValueLike; + +pub(crate) type ActionSubErrorResult<'a> = UnpackList<&'a StarlarkActionSubError<'a>>; + +#[derive(Debug, buck2_error::Error)] +pub(crate) enum ActionErrorHandlerError { + #[error("Error handler failed. Expected return type `{0}`, got value with type `{1}`")] + TypeError(Ty, String), +} + +#[derive( + ProvidesStaticType, + Trace, + Allocative, + Debug, + Display, + NoSerialize, + Clone +)] +#[display( + "ActionErrorCtx(stderr: {}, stdout: {})", + self.stderr, + self.stdout +)] +pub struct StarlarkActionErrorContext { + stderr: String, + stdout: String, +} + +impl StarlarkActionErrorContext { + pub(crate) fn new_from_command_execution(command: Option<&CommandExecution>) -> Self { + let stderr = command.map_or(String::default(), |c| { + c.details + .as_ref() + .map_or(String::default(), |c| c.stderr.clone()) + }); + let stdout = command.map_or(String::default(), |c| { + c.details + .as_ref() + .map_or(String::default(), |c| c.stdout.clone()) + }); + + StarlarkActionErrorContext { stderr, stdout } + } +} + +starlark_simple_value!(StarlarkActionErrorContext); + +#[starlark_value(type = "ActionErrorCtx")] +impl<'v> StarlarkValue<'v> for StarlarkActionErrorContext { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(action_error_context_methods) + } +} + +/// Methods available on `ActionErrorCtx` to help categorize the action failure. These +/// categorizations should be finer grain, and most likely language specific. +#[starlark_module] +fn action_error_context_methods(builder: &mut MethodsBuilder) { + /// The stderr of the failed action. + #[starlark(attribute)] + fn stderr<'v>(this: &'v StarlarkActionErrorContext) -> anyhow::Result<&'v str> { + Ok(&this.stderr) + } + + /// The stdout of the failed action. + #[starlark(attribute)] + fn stdout<'v>(this: &'v StarlarkActionErrorContext) -> anyhow::Result<&'v str> { + Ok(&this.stdout) + } + + /// Create a new error location, specifying a file path and an optional line number. + /// + /// The file path should be either a project-relative path, or an absolute path. + fn new_error_location<'v>( + #[starlark(this)] _this: &'v StarlarkActionErrorContext, + #[starlark(require = named)] file: String, + #[starlark(require = named, default = NoneOr::None)] line: NoneOr, + ) -> anyhow::Result { + // @TODO(wendyy) - actually enforce/validate the path types. + Ok(StarlarkActionErrorLocation { + file, + line: line.into_option(), + }) + } + + /// Create a new sub error, specifying an error category name, optional message, and + /// an optional list of error locations. + /// + /// The category should be finer grain error categorizations provided by the rule authors, + /// and tend to be language specific. These should not be any kind of shared concepts + /// among all errors for all languages/rules. For example, timeouts and infra errors + /// should not go here - buck2 tries to categorize these types of errors automatically. + /// An example of a finer grain error category may be the error code for rustc outputs. + /// + /// The message will be emitted to the build report, and to the stderr in the error diagnostics + /// section. + fn new_sub_error<'v>( + #[starlark(this)] _this: &'v StarlarkActionErrorContext, + #[starlark(require = named)] category: String, + #[starlark(require = named, default = NoneOr::None)] message: NoneOr, + #[starlark(require = named, default = NoneOr::None)] locations: NoneOr< + UnpackListOrTuple<&'v StarlarkActionErrorLocation>, + >, + ) -> anyhow::Result> { + Ok(StarlarkActionSubError { + category, + message: message.into_option(), + locations: locations.into_option(), + }) + } +} + +#[derive( + ProvidesStaticType, + Trace, + Allocative, + Debug, + Display, + NoSerialize, + Clone, + Default, + Ord, + PartialOrd, + Eq, + PartialEq +)] +#[display( + "ActionErrorLocation(file={}, line={})", + self.file, + self.line.map_or("None".to_owned(), |l| l.to_string()) +)] +pub struct StarlarkActionErrorLocation { + file: String, + line: Option, +} + +#[starlark_value(type = "ActionErrorLocation", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for StarlarkActionErrorLocation { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(action_error_location_methods) + } + + fn equals(&self, other: Value<'v>) -> starlark::Result { + if let Some(other) = other.downcast_ref::() { + Ok(self.eq(other)) + } else { + Ok(false) + } + } + + fn compare(&self, other: Value<'v>) -> starlark::Result { + if let Some(other) = other.downcast_ref::() { + Ok(self.cmp(other)) + } else { + ValueError::unsupported_with(self, "compare", other) + } + } +} + +impl<'v> AllocValue<'v> for StarlarkActionErrorLocation { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex_no_freeze(self) + } +} + +/// Methods available on `StarlarkActionErrorLocation` to help with testing the error +/// handler implementation +#[starlark_module] +fn action_error_location_methods(builder: &mut MethodsBuilder) { + /// The file of the error location. This is only needed for action error handler + /// unit testing. + #[starlark(attribute)] + fn file<'v>(this: &'v StarlarkActionErrorLocation) -> anyhow::Result<&'v str> { + Ok(&this.file) + } + + /// The line of the error location. This is only needed for action error handler + /// unit testing. + #[starlark(attribute)] + fn line<'v>(this: &'v StarlarkActionErrorLocation) -> anyhow::Result> { + Ok(NoneOr::from_option(this.line)) + } +} + +#[derive( + ProvidesStaticType, + Trace, + Allocative, + Debug, + NoSerialize, + Clone, + Ord, + PartialOrd, + Eq, + PartialEq +)] +pub(crate) struct StarlarkActionSubError<'v> { + category: String, + message: Option, + #[allocative(skip)] + #[trace(unsafe_ignore)] + locations: Option>, +} + +impl<'v> Display for StarlarkActionSubError<'v> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let prefix = format!( + "ActionSubError(category={}, message={}, locations=[", + self.category, + self.message.clone().unwrap_or_default() + ); + fmt_container( + f, + &prefix, + "])", + self.locations + .as_ref() + .map_or(Vec::new(), |l| l.items.iter().collect()), + ) + } +} + +impl<'v> AllocValue<'v> for StarlarkActionSubError<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex_no_freeze(self) + } +} + +#[starlark_value(type = "ActionSubError", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for StarlarkActionSubError<'v> { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(action_sub_error_methods) + } + + fn equals(&self, other: Value<'v>) -> starlark::Result { + if let Some(other) = other.downcast_ref::() { + Ok(self.eq(other)) + } else { + Ok(false) + } + } + + fn compare(&self, other: Value<'v>) -> starlark::Result { + if let Some(other) = other.downcast_ref::() { + Ok(self.cmp(other)) + } else { + ValueError::unsupported_with(self, "compare", other) + } + } +} + +/// Methods available on `StarlarkActionSubError` to help with testing the error +/// handler implementation +#[starlark_module] +fn action_sub_error_methods(builder: &mut MethodsBuilder) { + /// The category name of this sub error. This function is only needed for action + /// error handler unit testing. + #[starlark(attribute)] + fn category<'v>(this: &'v StarlarkActionSubError) -> anyhow::Result<&'v str> { + Ok(&this.category) + } + + /// The optional message associated with this sub error. This function is only + /// needed for action error handler unit testing. + #[starlark(attribute)] + fn message<'v>(this: &'v StarlarkActionSubError) -> anyhow::Result> { + Ok(match &this.message { + Some(message) => NoneOr::Other(message.as_str()), + None => NoneOr::None, + }) + } + + /// Any locations associated with this sub error. This function is only needed + /// for action error handler unit testing. + #[starlark(attribute)] + fn locations<'v>( + this: &'v StarlarkActionSubError, + ) -> anyhow::Result>> { + match &this.locations { + None => Ok(NoneOr::None), + Some(locations) => Ok(NoneOr::Other(locations.items.cloned())), + } + } +} + +impl<'v> StarlarkActionSubError<'v> { + pub(crate) fn to_proto(&self) -> ActionSubError { + ActionSubError { + category: self.category.clone(), + message: self.message.clone(), + locations: self + .locations + .clone() + .map(|locations| ActionErrorLocations { + locations: locations + .items + .iter() + .map(|l| ActionErrorLocation { + file: l.file.clone(), + line: l.line, + }) + .collect(), + }), + } + } +} + +#[starlark_module] +pub(crate) fn register_action_error_types(globals: &mut GlobalsBuilder) { + const ActionSubError: StarlarkValueAsType = StarlarkValueAsType::new(); + const ActionErrorCtx: StarlarkValueAsType = + StarlarkValueAsType::new(); + const ActionErrorLocation: StarlarkValueAsType = + StarlarkValueAsType::new(); +} + +/// Global methods for testing starlark action error handler. +#[starlark_module] +pub(crate) fn register_action_error_handler_for_testing(builder: &mut GlobalsBuilder) { + /// Global function to create a new `ActionErrorContext` for testing a starlark action error + /// handler via `bxl_test`. + fn new_test_action_error_ctx( + #[starlark(require=named, default = "")] stderr: &str, + #[starlark(require=named, default = "")] stdout: &str, + ) -> anyhow::Result { + Ok(StarlarkActionErrorContext { + stderr: stderr.to_owned(), + stdout: stdout.to_owned(), + }) + } +} diff --git a/app/buck2_build_api/src/actions/execute.rs b/app/buck2_build_api/src/actions/execute.rs new file mode 100644 index 0000000000000..b6edd438f1e93 --- /dev/null +++ b/app/buck2_build_api/src/actions/execute.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod action_execution_target; +pub mod action_executor; +pub mod dice_data; +pub mod error; diff --git a/app/buck2_build_api/src/actions/execute/action_execution_target.rs b/app/buck2_build_api/src/actions/execute/action_execution_target.rs index 85c32921c5220..334295fe77c4e 100644 --- a/app/buck2_build_api/src/actions/execute/action_execution_target.rs +++ b/app/buck2_build_api/src/actions/execute/action_execution_target.rs @@ -10,7 +10,7 @@ use std::fmt::Write; use buck2_core::base_deferred_key::BaseDeferredKey; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_core::fs::buck_out_path::BuckOutScratchPath; use buck2_data::ToProtoMessage; use buck2_execute::execute::target::CommandExecutionTarget; @@ -24,18 +24,22 @@ use crate::actions::RegisteredAction; #[derivative(Debug)] pub struct ActionExecutionTarget<'a> { action: &'a RegisteredAction, + new_style_scratch_path: bool, } impl<'a> ActionExecutionTarget<'a> { - pub(crate) fn new(action: &'a RegisteredAction) -> Self { - Self { action } + pub(crate) fn new(action: &'a RegisteredAction, new_style_scratch_path: bool) -> Self { + Self { + action, + new_style_scratch_path, + } } pub fn owner(&self) -> &'a BaseDeferredKey { self.action.owner() } - pub fn category(&self) -> &'a Category { + pub fn category(&self) -> CategoryRef<'a> { self.action.category() } @@ -48,6 +52,7 @@ impl<'a> ActionExecutionTarget<'a> { self.action.owner().dupe(), self.action.category(), self.action.identifier(), + self.action.action_key(self.new_style_scratch_path), ) .unwrap() } diff --git a/app/buck2_build_api/src/actions/execute/action_executor.rs b/app/buck2_build_api/src/actions/execute/action_executor.rs index 25e62ab297856..99e383672c245 100644 --- a/app/buck2_build_api/src/actions/execute/action_executor.rs +++ b/app/buck2_build_api/src/actions/execute/action_executor.rs @@ -18,7 +18,6 @@ use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_common::dice::data::HasIoProvider; use buck2_common::events::HasEvents; use buck2_common::http::HasHttpClient; -use buck2_common::http::HttpClient; use buck2_common::io::IoProvider; use buck2_common::liveliness_observer::NoopLivelinessObserver; use buck2_core::execution_types::executor_config::CommandExecutorConfig; @@ -29,24 +28,23 @@ use buck2_execute::artifact::fs::ExecutorFs; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::HasDigestConfig; -use buck2_execute::execute::action_digest::ActionDigest; +use buck2_execute::execute::action_digest_and_blobs::ActionDigestAndBlobs; use buck2_execute::execute::blocking::BlockingExecutor; use buck2_execute::execute::blocking::HasBlockingExecutor; use buck2_execute::execute::cache_uploader::CacheUploadInfo; use buck2_execute::execute::cache_uploader::CacheUploadResult; -use buck2_execute::execute::cache_uploader::DepFileEntry; +use buck2_execute::execute::cache_uploader::IntoRemoteDepFile; use buck2_execute::execute::claim::MutexClaimManager; use buck2_execute::execute::clean_output_paths::CleanOutputPaths; use buck2_execute::execute::command_executor::ActionExecutionTimingData; use buck2_execute::execute::command_executor::CommandExecutor; -use buck2_execute::execute::dice_data::CommandExecutorResponse; -use buck2_execute::execute::dice_data::GetReClient; -use buck2_execute::execute::dice_data::HasCommandExecutor; +use buck2_execute::execute::dep_file_digest::DepFileDigest; use buck2_execute::execute::kind::CommandExecutionKind; use buck2_execute::execute::manager::CommandExecutionManager; use buck2_execute::execute::prepared::PreparedAction; use buck2_execute::execute::prepared::PreparedCommand; use buck2_execute::execute::request::CommandExecutionRequest; +use buck2_execute::execute::request::ExecutorPreference; use buck2_execute::execute::request::OutputType; use buck2_execute::execute::result::CommandExecutionReport; use buck2_execute::execute::result::CommandExecutionResult; @@ -58,6 +56,8 @@ use buck2_execute::output_size::OutputSize; use buck2_execute::re::manager::ManagedRemoteExecutionClient; use buck2_file_watcher::mergebase::GetMergebase; use buck2_file_watcher::mergebase::Mergebase; +use buck2_futures::cancellation::CancellationContext; +use buck2_http::HttpClient; use derivative::Derivative; use derive_more::Display; use dice::DiceComputations; @@ -65,11 +65,14 @@ use dupe::Dupe; use indexmap::indexmap; use indexmap::IndexMap; use itertools::Itertools; -use more_futures::cancellation::CancellationContext; +use remote_execution::TActionResult2; use crate::actions::artifact::get_artifact_fs::GetArtifactFs; use crate::actions::execute::action_execution_target::ActionExecutionTarget; -use crate::actions::execute::error::CommandExecutionErrorMarker; +use crate::actions::execute::dice_data::CommandExecutorResponse; +use crate::actions::execute::dice_data::DiceHasCommandExecutor; +use crate::actions::execute::dice_data::GetInvalidationTrackingConfig; +use crate::actions::execute::dice_data::GetReClient; use crate::actions::execute::error::ExecuteError; use crate::actions::impls::run_action_knobs::HasRunActionKnobs; use crate::actions::impls::run_action_knobs::RunActionKnobs; @@ -110,14 +113,15 @@ struct ActionOutputsData { pub struct ActionExecutionMetadata { pub execution_kind: ActionExecutionKind, pub timing: ActionExecutionTimingData, + pub input_files_bytes: Option, } /// The *way* that a particular action was executed. #[derive(Debug, Display, Clone)] pub enum ActionExecutionKind { - #[display(fmt = "command({})", kind)] + #[display("command({})", kind)] Command { - kind: CommandExecutionKind, + kind: Box, prefers_local: bool, requires_local: bool, allows_cache_upload: bool, @@ -125,16 +129,16 @@ pub enum ActionExecutionKind { allows_dep_file_cache_upload: bool, did_dep_file_cache_upload: bool, eligible_for_full_hybrid: bool, - dep_file_key: Option, + dep_file_key: Option, }, /// This action is simple and executed inline within buck2 (e.g. write, symlink_dir) - #[display(fmt = "simple")] + #[display("simple")] Simple, /// This action logically executed, but didn't do all the work. - #[display(fmt = "deferred")] + #[display("deferred")] Deferred, /// This action was served by the local dep file cache and not executed. - #[display(fmt = "local_dep_files")] + #[display("local_dep_files")] LocalDepFile, } @@ -147,7 +151,7 @@ pub struct CommandExecutionRef<'a> { pub allows_dep_file_cache_upload: bool, pub did_dep_file_cache_upload: bool, pub eligible_for_full_hybrid: bool, - pub dep_file_key: &'a Option, + pub dep_file_key: &'a Option, } impl ActionExecutionKind { @@ -210,34 +214,20 @@ impl ActionOutputs { } } -/// Executes 'Actions' -#[async_trait] -pub trait ActionExecutor: Send + Sync { - async fn execute( - &self, - inputs: IndexMap, - action: &RegisteredAction, - cancellation: &CancellationContext, - ) -> ( - Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError>, - Vec, - ); -} - #[async_trait] pub trait HasActionExecutor { async fn get_action_executor( - &self, + &mut self, config: &CommandExecutorConfig, - ) -> anyhow::Result>; + ) -> anyhow::Result>; } #[async_trait] -impl HasActionExecutor for DiceComputations { +impl HasActionExecutor for DiceComputations<'_> { async fn get_action_executor( - &self, + &mut self, executor_config: &CommandExecutorConfig, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let artifact_fs = self.get_artifact_fs().await?; let digest_config = self.global_data().get_digest_config(); @@ -246,7 +236,7 @@ impl HasActionExecutor for DiceComputations { platform, cache_checker, cache_uploader, - } = self.get_command_executor(&artifact_fs, executor_config)?; + } = self.get_command_executor_from_dice(executor_config).await?; let blocking_executor = self.get_blocking_executor(); let materializer = self.per_transaction_data().get_materializer(); let events = self.per_transaction_data().get_dispatcher().dupe(); @@ -255,6 +245,7 @@ impl HasActionExecutor for DiceComputations { let io_provider = self.global_data().get_io_provider(); let http_client = self.per_transaction_data().get_http_client(); let mergebase = self.per_transaction_data().get_mergebase(); + let invalidation_tracking_enabled = self.get_invalidation_tracking_config().enabled; Ok(Arc::new(BuckActionExecutor::new( CommandExecutor::new( @@ -274,6 +265,7 @@ impl HasActionExecutor for DiceComputations { io_provider, http_client, mergebase, + invalidation_tracking_enabled, ))) } } @@ -289,6 +281,7 @@ pub struct BuckActionExecutor { io_provider: Arc, http_client: HttpClient, mergebase: Mergebase, + invalidation_tracking_enabled: bool, } impl BuckActionExecutor { @@ -303,6 +296,7 @@ impl BuckActionExecutor { io_provider: Arc, http_client: HttpClient, mergebase: Mergebase, + invalidation_tracking_enabled: bool, ) -> Self { Self { command_executor, @@ -315,6 +309,7 @@ impl BuckActionExecutor { io_provider, http_client, mergebase, + invalidation_tracking_enabled, } } } @@ -331,7 +326,8 @@ struct BuckActionExecutionContext<'a> { #[async_trait] impl ActionExecutionCtx for BuckActionExecutionContext<'_> { fn target(&self) -> ActionExecutionTarget<'_> { - ActionExecutionTarget::new(self.action) + let new_style_scratch_path = self.run_action_knobs().new_style_scratch_path; + ActionExecutionTarget::new(self.action, new_style_scratch_path) } fn fs(&self) -> &ArtifactFs { @@ -359,7 +355,7 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { } fn artifact_values(&self, artifact: &ArtifactGroup) -> &ArtifactGroupValues { - self.inputs.get(artifact).unwrap_or_else(|| panic!("Internal error: action {:?} tried to grab the artifact {:?} even though it was not an input.", self.action.owner(), artifact)) + self.inputs.get(artifact).unwrap_or_else(|| panic!("Internal error: action {} tried to grab the artifact {} even though it was not an input.", self.action.owner(), artifact)) } fn blocking_executor(&self) -> &dyn BlockingExecutor { @@ -370,6 +366,10 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { self.executor.re_client.dupe() } + fn re_platform(&self) -> &remote_execution::Platform { + self.executor.command_executor.re_platform() + } + fn digest_config(&self) -> DigestConfig { self.executor.digest_config } @@ -419,11 +419,12 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { fn unpack_command_execution_result( &mut self, - request: &CommandExecutionRequest, + executor_preference: ExecutorPreference, result: CommandExecutionResult, allows_cache_upload: bool, allows_dep_file_cache_upload: bool, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + input_files_bytes: Option, + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { let CommandExecutionResult { outputs, report, @@ -432,7 +433,7 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { did_dep_file_cache_upload, dep_file_key, eligible_for_full_hybrid, - dep_file_metadata: _, + .. } = result; // TODO (@torozco): The execution kind should be made to come via the command reports too. let res = match &report.status { @@ -449,9 +450,9 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { ), ActionExecutionMetadata { execution_kind: ActionExecutionKind::Command { - kind: execution_kind.clone(), - prefers_local: request.executor_preference().prefers_local(), - requires_local: request.executor_preference().requires_local(), + kind: Box::new(execution_kind.clone()), + prefers_local: executor_preference.prefers_local(), + requires_local: executor_preference.requires_local(), allows_cache_upload, did_cache_upload, allows_dep_file_cache_upload, @@ -460,11 +461,17 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { eligible_for_full_hybrid, }, timing: report.timing.into(), + input_files_bytes, }, ); Ok(result) } - _ => Err(CommandExecutionErrorMarker.into()), + CommandExecutionStatus::Error { error, .. } => { + Err(ExecuteError::CommandExecutionError { + error: Some(error.clone()), + }) + } + _ => Err(ExecuteError::CommandExecutionError { error: None }), }; self.command_reports.extend(rejected_execution); self.command_reports.push(report); @@ -495,9 +502,10 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { async fn cache_upload( &mut self, - action_digest: ActionDigest, + action_digest_and_blobs: &ActionDigestAndBlobs, execution_result: &CommandExecutionResult, - dep_file_entry: Option, + re_result: Option, + dep_file_bundle: Option<&mut dyn IntoRemoteDepFile>, ) -> anyhow::Result { let action = self.target(); self.executor @@ -505,11 +513,12 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { .cache_upload( &CacheUploadInfo { target: &action as _, - action_digest, digest_config: self.digest_config(), }, execution_result, - dep_file_entry, + re_result, + dep_file_bundle, + action_digest_and_blobs, ) .await } @@ -533,24 +542,16 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { .await .context("Failed to invalidate output directory")?; - // Use Eden's clean up API if possible, it is significantly faster on Eden compared with - // the native method as the API does not load and materialize files or folders - if let Some(eden_buck_out) = self.executor.materializer.eden_buck_out() { - eden_buck_out - .remove_paths_recursive(self.fs().fs(), output_paths, self.cancellations) - .await?; - } else { - self.executor - .blocking_executor - .execute_io( - Box::new(CleanOutputPaths { - paths: output_paths, - }), - self.cancellations, - ) - .await - .context("Failed to cleanup output directory")?; - } + self.executor + .blocking_executor + .execute_io( + Box::new(CleanOutputPaths { + paths: output_paths, + }), + self.cancellations, + ) + .await + .context("Failed to cleanup output directory")?; Ok(()) } @@ -564,13 +565,12 @@ impl ActionExecutionCtx for BuckActionExecutionContext<'_> { } } -#[async_trait] -impl ActionExecutor for BuckActionExecutor { - async fn execute( +impl BuckActionExecutor { + pub(crate) async fn execute( &self, inputs: IndexMap, action: &RegisteredAction, - cancellations: &CancellationContext, + cancellations: &CancellationContext<'_>, ) -> ( Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError>, Vec, @@ -578,7 +578,7 @@ impl ActionExecutor for BuckActionExecutor { let mut command_reports = Vec::new(); let res = async { - let outputs = action.outputs()?; + let outputs = action.outputs(); let mut ctx = BuckActionExecutionContext { executor: self, @@ -657,6 +657,10 @@ impl ActionExecutor for BuckActionExecutor { (res, command_reports) } + + pub fn invalidation_tracking_enabled(&self) -> bool { + self.invalidation_tracking_enabled + } } #[cfg(test)] @@ -669,23 +673,19 @@ mod tests { use allocative::Allocative; use async_trait::async_trait; + use buck2_artifact::actions::key::ActionIndex; use buck2_artifact::actions::key::ActionKey; use buck2_artifact::artifact::artifact_type::testing::BuildArtifactTestingExt; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; - use buck2_artifact::deferred::data::DeferredData; - use buck2_artifact::deferred::id::DeferredId; - use buck2_artifact::deferred::key::DeferredKey; + use buck2_artifact::deferred::key::DeferredHolderKey; use buck2_common::cas_digest::CasDigestConfig; - use buck2_common::http::HttpClientBuilder; use buck2_common::io::fs::FsIoProvider; use buck2_core::base_deferred_key::BaseDeferredKey; - use buck2_core::buck_path::path::BuckPath; - use buck2_core::category::Category; + use buck2_core::category::CategoryRef; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; - use buck2_core::cells::paths::CellRelativePath; use buck2_core::cells::CellResolver; use buck2_core::configuration::data::ConfigurationData; use buck2_core::execution_types::executor_config::CommandExecutorConfig; @@ -694,14 +694,11 @@ mod tests { use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::buck_out_path::BuckOutPathResolver; use buck2_core::fs::fs_util; - use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project::ProjectRootTemp; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; - use buck2_core::package::package_relative_path::PackageRelativePathBuf; - use buck2_core::package::PackageLabel; - use buck2_core::target::label::TargetLabel; - use buck2_core::target::name::TargetNameRef; + use buck2_core::package::source_path::SourcePath; + use buck2_core::target::label::label::TargetLabel; use buck2_events::dispatch::with_dispatcher_async; use buck2_events::dispatch::EventDispatcher; use buck2_execute::artifact_value::ArtifactValue; @@ -720,26 +717,26 @@ mod tests { use buck2_execute::execute::testing_dry_run::DryRunExecutor; use buck2_execute::materialize::nodisk::NoDiskMaterializer; use buck2_execute::re::manager::ManagedRemoteExecutionClient; + use buck2_futures::cancellation::CancellationContext; + use buck2_http::HttpClientBuilder; use dupe::Dupe; use indexmap::indexset; - use more_futures::cancellation::CancellationContext; - use once_cell::sync::Lazy; use sorted_vector_map::SortedVectorMap; use crate::actions::box_slice_set::BoxSliceSet; use crate::actions::execute::action_executor::ActionExecutionKind; use crate::actions::execute::action_executor::ActionExecutionMetadata; - use crate::actions::execute::action_executor::ActionExecutor; use crate::actions::execute::action_executor::ActionOutputs; use crate::actions::execute::action_executor::BuckActionExecutor; - use crate::actions::key::ActionKeyExt; use crate::actions::Action; use crate::actions::ActionExecutable; use crate::actions::ActionExecutionCtx; + use crate::actions::ExecuteError; use crate::actions::PristineActionExecutable; use crate::actions::RegisteredAction; use crate::artifact_groups::ArtifactGroup; use crate::artifact_groups::ArtifactGroupValues; + #[tokio::test] async fn can_execute_some_action() { let cells = CellResolver::testing_with_name_and_path( @@ -785,9 +782,11 @@ mod tests { CasDigestConfig::testing_default(), )), HttpClientBuilder::https_with_system_roots() + .await .unwrap() .build(), Default::default(), + true, ); #[derive(Debug, Allocative)] @@ -807,19 +806,20 @@ mod tests { Ok(Cow::Borrowed(self.inputs.as_slice())) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(self.outputs.as_slice())) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(self.outputs.as_slice()) + } + + fn first_output(&self) -> &BuildArtifact { + &self.outputs.as_slice()[0] } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Pristine(self) } - fn category(&self) -> &Category { - static TEST_CATEGORY: Lazy = - Lazy::new(|| Category::try_from("testing").unwrap()); - - &TEST_CATEGORY + fn category(&self) -> CategoryRef { + CategoryRef::new("testing").unwrap() } fn identifier(&self) -> Option<&str> { @@ -832,7 +832,7 @@ mod tests { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { self.ran.store(true, Ordering::SeqCst); let req = CommandExecutionRequest::new( @@ -875,7 +875,13 @@ mod tests { ctx.fs().fs().write_file(&dest_path, "", false)? } - ctx.unpack_command_execution_result(&req, res, false, false)?; + ctx.unpack_command_execution_result( + req.executor_preference, + res, + false, + false, + None, + )?; let outputs = self .outputs .iter() @@ -891,35 +897,28 @@ mod tests { ActionExecutionMetadata { execution_kind: ActionExecutionKind::Simple, timing: ActionExecutionTimingData::default(), + input_files_bytes: None, }, )) } } - let pkg = PackageLabel::new( - CellName::testing_new("cell"), - CellRelativePath::unchecked_new("pkg"), - ); - let inputs = indexset![ArtifactGroup::Artifact(Artifact::from( - SourceArtifact::new(BuckPath::testing_new( - pkg.dupe(), - PackageRelativePathBuf::unchecked_new("source".into()), - )) + SourceArtifact::new(SourcePath::testing_new("cell//pkg", "source")) ))]; - let label = TargetLabel::new(pkg, TargetNameRef::unchecked_new("foo")) - .configure(ConfigurationData::testing_new()); + let label = + TargetLabel::testing_parse("cell//pkg:foo").configure(ConfigurationData::testing_new()); let outputs = indexset![BuildArtifact::testing_new( label.dupe(), - ForwardRelativePathBuf::unchecked_new("output".into()), - DeferredId::testing_new(0), + "output", + ActionIndex::new(0), )]; let action = RegisteredAction::new( - ActionKey::new(DeferredData::unchecked_new(DeferredKey::Base( - BaseDeferredKey::TargetLabel(label.dupe()), - DeferredId::testing_new(0), - ))), + ActionKey::new( + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(label.dupe())), + ActionIndex::new(0), + ), Box::new(TestingAction { inputs: BoxSliceSet::from(inputs), outputs: BoxSliceSet::from(outputs.clone()), diff --git a/app/buck2_build_api/src/actions/execute/dice_data.rs b/app/buck2_build_api/src/actions/execute/dice_data.rs new file mode 100644 index 0000000000000..d4cbfca27e67c --- /dev/null +++ b/app/buck2_build_api/src/actions/execute/dice_data.rs @@ -0,0 +1,153 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Attaching command execution related data to Dice + +use std::sync::Arc; + +use anyhow::Context; +use async_trait::async_trait; +use buck2_core::execution_types::executor_config::CommandExecutorConfig; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_execute::execute::cache_uploader::UploadCache; +use buck2_execute::execute::prepared::PreparedCommandExecutor; +use buck2_execute::execute::prepared::PreparedCommandOptionalExecutor; +use buck2_execute::re::manager::ManagedRemoteExecutionClient; +use dice::DiceComputations; +use dice::DiceData; +use dice::DiceDataBuilder; +use dice::UserComputationData; +use dupe::Dupe; +use remote_execution as RE; + +use crate::actions::artifact::get_artifact_fs::GetArtifactFs; + +pub struct CommandExecutorResponse { + pub executor: Arc, + pub platform: RE::Platform, + pub cache_checker: Arc, + pub cache_uploader: Arc, +} + +pub trait SetCommandExecutor { + fn set_command_executor(&mut self, init: Box); +} + +pub trait HasCommandExecutor { + fn get_command_executor( + &self, + artifact_fs: &ArtifactFs, + config: &CommandExecutorConfig, + ) -> anyhow::Result; +} + +impl SetCommandExecutor for UserComputationData { + fn set_command_executor( + &mut self, + delegate: Box, + ) { + self.data.set(HasCommandExecutorHolder { delegate }) + } +} + +#[async_trait] +pub trait DiceHasCommandExecutor { + async fn get_command_executor_from_dice( + &mut self, + config: &CommandExecutorConfig, + ) -> anyhow::Result; +} + +#[async_trait] +impl DiceHasCommandExecutor for DiceComputations<'_> { + async fn get_command_executor_from_dice( + &mut self, + config: &CommandExecutorConfig, + ) -> anyhow::Result { + let artifact_fs = self.get_artifact_fs().await?; + let holder = self + .per_transaction_data() + .data + .get::() + .context("CommandExecutorDelegate should be set")?; + holder.delegate.get_command_executor(&artifact_fs, config) + } +} + +struct HasCommandExecutorHolder { + delegate: Box, +} + +pub trait HasFallbackExecutorConfig { + fn get_fallback_executor_config(&self) -> &Arc; +} + +impl HasFallbackExecutorConfig for DiceComputations<'_> { + fn get_fallback_executor_config(&self) -> &Arc { + self.per_transaction_data() + .data + .get::>() + .expect("CommandExecutorConfig should be set") + } +} + +pub fn set_fallback_executor_config(data: &mut DiceData, config: Arc) { + data.set(config) +} + +pub trait SetReClient { + fn set_re_client(&mut self, re_client: ManagedRemoteExecutionClient); +} + +pub trait GetReClient { + fn get_re_client(&self) -> ManagedRemoteExecutionClient; +} + +impl SetReClient for UserComputationData { + fn set_re_client(&mut self, re_client: ManagedRemoteExecutionClient) { + self.data.set(re_client); + } +} + +impl GetReClient for UserComputationData { + fn get_re_client(&self) -> ManagedRemoteExecutionClient { + self.data + .get::() + .expect("Materializer should be set") + .dupe() + } +} + +#[derive(Debug, Clone, Copy, Dupe)] +pub struct InvalidationTrackingConfig { + pub enabled: bool, +} + +pub trait SetInvalidationTrackingConfig { + fn set_invalidation_tracking_config(&mut self, enabled: bool); +} + +pub trait GetInvalidationTrackingConfig { + fn get_invalidation_tracking_config(&self) -> InvalidationTrackingConfig; +} + +impl SetInvalidationTrackingConfig for DiceDataBuilder { + fn set_invalidation_tracking_config(&mut self, enabled: bool) { + self.set(InvalidationTrackingConfig { enabled }); + } +} + +impl GetInvalidationTrackingConfig for DiceComputations<'_> { + fn get_invalidation_tracking_config(&self) -> InvalidationTrackingConfig { + *self + .global_data() + .get::() + .expect("InvalidationTrackingConfig should be set") + } +} diff --git a/app/buck2_build_api/src/actions/execute/error.rs b/app/buck2_build_api/src/actions/execute/error.rs index 26dd4a0d54833..8f2308974da25 100644 --- a/app/buck2_build_api/src/actions/execute/error.rs +++ b/app/buck2_build_api/src/actions/execute/error.rs @@ -7,13 +7,12 @@ * of this source tree. */ -use std::fmt::Display; -use std::fmt::Write; - use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_execute::execute::request::OutputType; -use thiserror::Error; +/// This type intentionally does not implement `std::error::Error`. That's because it represents an +/// "incomplete" error - it needs more information like the command results, action keys, etc. +/// before it can be turned into a `buck2_build_api::actions::error::ActionError`. #[derive(Debug)] pub enum ExecuteError { MissingOutputs { @@ -31,59 +30,13 @@ pub enum ExecuteError { Error { error: anyhow::Error, }, - CommandExecutionError, -} - -impl ExecuteError { - pub(crate) fn as_proto(&self) -> buck2_data::action_execution_end::Error { - match self { - ExecuteError::MissingOutputs { declared } => buck2_data::CommandOutputsMissing { - message: format!("Action failed to produce outputs: {}", error_items(declared)), - } - .into(), - ExecuteError::MismatchedOutputs { declared, real } => buck2_data::CommandOutputsMissing { - message: format!( - "Action didn't produce the right set of outputs.\nExpected {}`\nreal {}", - error_items(declared), - error_items(real) - ), - } - .into(), - ExecuteError::WrongOutputType {path, declared, real} => buck2_data::CommandOutputsMissing { - message: format!( - "Action didn't produce output of the right type.\nExpected {path} to be {declared:?}\nreal {real:?}", - ), - } - .into(), - ExecuteError::Error { error } => format!("{:#}", error).into(), - ExecuteError::CommandExecutionError => buck2_data::CommandExecutionError {}.into(), - } - } -} - -fn error_items(xs: &[T]) -> String { - if xs.is_empty() { - return "none".to_owned(); - } - let mut res = String::new(); - for (i, x) in xs.iter().enumerate() { - if i != 0 { - res.push_str(", "); - } - write!(res, "`{}`", x).unwrap(); - } - res + CommandExecutionError { + error: Option, + }, } impl From for ExecuteError { fn from(error: anyhow::Error) -> Self { - if error.is::() { - return Self::CommandExecutionError; - } Self::Error { error } } } - -#[derive(Error, Debug)] -#[error("Command execution failed. Details are in the command report.")] -pub struct CommandExecutionErrorMarker; diff --git a/app/buck2_build_api/src/actions/execute/mod.rs b/app/buck2_build_api/src/actions/execute/mod.rs deleted file mode 100644 index 11b3c9b1702c3..0000000000000 --- a/app/buck2_build_api/src/actions/execute/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod action_execution_target; -pub mod action_executor; -pub(crate) mod error; diff --git a/app/buck2_build_api/src/actions/impls/mod.rs b/app/buck2_build_api/src/actions/impls.rs similarity index 100% rename from app/buck2_build_api/src/actions/impls/mod.rs rename to app/buck2_build_api/src/actions/impls.rs diff --git a/app/buck2_build_api/src/actions/impls/expanded_command_line.rs b/app/buck2_build_api/src/actions/impls/expanded_command_line.rs index 004738abeb77d..465a327068873 100644 --- a/app/buck2_build_api/src/actions/impls/expanded_command_line.rs +++ b/app/buck2_build_api/src/actions/impls/expanded_command_line.rs @@ -67,9 +67,8 @@ impl ExpandedCommandLine { } #[cfg(test)] -mod test { +mod tests { use sorted_vector_map::sorted_vector_map; - use sorted_vector_map::SortedVectorMap; use super::*; diff --git a/app/buck2_build_api/src/actions/impls/json.rs b/app/buck2_build_api/src/actions/impls/json.rs index 0caefa0a5b7a1..215e91e2efe83 100644 --- a/app/buck2_build_api/src/actions/impls/json.rs +++ b/app/buck2_build_api/src/actions/impls/json.rs @@ -9,6 +9,7 @@ use std::io::sink; use std::io::Write; +use std::sync::Arc; use anyhow::Context; use buck2_artifact::artifact::artifact_type::Artifact; @@ -17,45 +18,69 @@ use buck2_execute::artifact::fs::ExecutorFs; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use dupe::Dupe; +use either::Either; use serde::Serialize; use serde::Serializer; use starlark::values::dict::DictRef; use starlark::values::enumeration::EnumValue; use starlark::values::list::ListRef; +use starlark::values::none::NoneType; use starlark::values::record::Record; use starlark::values::structs::StructRef; use starlark::values::tuple::TupleRef; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; +use starlark::values::ValueTypedComplex; use crate::artifact_groups::ArtifactGroup; -use crate::interpreter::rule_defs::artifact::FrozenStarlarkOutputArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; -use crate::interpreter::rule_defs::artifact::StarlarkOutputArtifact; -use crate::interpreter::rule_defs::artifact::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; use crate::interpreter::rule_defs::artifact_tagging::TaggedValue; +use crate::interpreter::rule_defs::cmd_args::value::CommandLineArg; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; -use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use crate::interpreter::rule_defs::cmd_args::AbsCommandLineContext; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; +use crate::interpreter::rule_defs::cmd_args::CommandLineContext; use crate::interpreter::rule_defs::cmd_args::DefaultCommandLineContext; use crate::interpreter::rule_defs::cmd_args::FrozenStarlarkCmdArgs; use crate::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; -use crate::interpreter::rule_defs::provider::ProviderLike; use crate::interpreter::rule_defs::provider::ValueAsProviderLike; use crate::interpreter::rule_defs::transitive_set::TransitiveSetJsonProjection; /// A wrapper with a Serialize instance so we can pass down the necessary context. pub struct SerializeValue<'a, 'v> { - pub value: Value<'v>, + pub value: JsonUnpack<'v>, pub fs: Option<&'a ExecutorFs<'a>>, + pub absolute: bool, +} + +struct AnyhowResultOfSerializedValue<'a, 'v> { + result: anyhow::Result>, +} + +impl<'a, 'v> Serialize for AnyhowResultOfSerializedValue<'a, 'v> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match &self.result { + Ok(v) => v.serialize(serializer), + Err(e) => Err(serde::ser::Error::custom(format!("{:#}", e))), + } + } } impl<'a, 'v> SerializeValue<'a, 'v> { - fn with_value(&self, x: Value<'v>) -> Self { - Self { - value: x, - fs: self.fs, + fn with_value(&self, x: Value<'v>) -> AnyhowResultOfSerializedValue<'a, 'v> { + AnyhowResultOfSerializedValue { + result: JsonUnpack::unpack_value_err(x).map(|value| SerializeValue { + value, + fs: self.fs, + absolute: self.absolute, + }), } } } @@ -67,24 +92,50 @@ fn err(res: anyhow::Result) -> Result { } } +fn with_command_line_context(fs: &ExecutorFs<'_>, absolute: bool, f: F) -> T +where + F: FnOnce(&mut dyn CommandLineContext) -> T, +{ + let mut ctx = DefaultCommandLineContext::new(fs); + let mut abs; + let ctx = if absolute { + abs = AbsCommandLineContext::wrap(ctx); + &mut abs as _ + } else { + &mut ctx as _ + }; + + f(ctx) +} + /// Grab the value as an artifact, if you can. /// We want to deal with both normal artifacts, and .as_output() artifacts, /// since otherwise the .as_output ones will fall through as a cmd_args /// and end up getting wrapped in a list below. -fn get_artifact<'v>(x: Value<'v>) -> Option anyhow::Result + 'v>> { - if let Some(x) = ValueAsArtifactLike::unpack_value(x) { - Some(Box::new(|| Ok(x.0.get_bound_artifact()?.dupe()))) - } else if let Some(x) = x.downcast_ref::() { - Some(Box::new(|| Ok(((*x.inner()).get_bound_artifact())?.dupe()))) - } else if let Some(x) = x.downcast_ref::() { - Some(Box::new(|| Ok(x.inner().artifact()))) - } else { - None +#[derive(UnpackValue, StarlarkTypeRepr)] +pub enum JsonArtifact<'v> { + ValueAsArtifactLike(ValueAsArtifactLike<'v>), + StarlarkOutputArtifact(ValueTypedComplex<'v, StarlarkOutputArtifact<'v>>), +} + +impl<'v> JsonArtifact<'v> { + fn artifact(&self) -> anyhow::Result { + match self { + JsonArtifact::ValueAsArtifactLike(x) => Ok(x.0.get_bound_artifact()?.dupe()), + JsonArtifact::StarlarkOutputArtifact(x) => match x.unpack() { + Either::Left(x) => Ok((*x.inner()?).get_bound_artifact()?.dupe()), + Either::Right(x) => Ok(x.inner()?.artifact()), + }, + } } } -enum JsonUnpack<'v> { - None, +/// Partially unpack the value into JSON writable with `ctx.actions.write_json`. +/// This does not help typechecker much (because it only validates top-level types), +/// but it provides better documentation. +#[derive(UnpackValue, StarlarkTypeRepr)] +pub enum JsonUnpack<'v> { + None(NoneType), String(&'v str), Number(i64), Bool(bool), @@ -97,51 +148,10 @@ enum JsonUnpack<'v> { TransitiveSetJsonProjection(&'v TransitiveSetJsonProjection<'v>), TargetLabel(&'v StarlarkTargetLabel), ConfiguredProvidersLabel(&'v StarlarkConfiguredProvidersLabel), - Artifact(Box anyhow::Result + 'v>), - CommandLine(&'v dyn CommandLineArgLike), - Provider(&'v dyn ProviderLike<'v>), + Artifact(JsonArtifact<'v>), + CommandLine(CommandLineArg<'v>), + Provider(ValueAsProviderLike<'v>), TaggedValue(&'v TaggedValue<'v>), - Unsupported, -} - -fn unpack<'v>(value: Value<'v>) -> JsonUnpack<'v> { - if value.is_none() { - JsonUnpack::None - } else if let Some(x) = value.unpack_str() { - JsonUnpack::String(x) - } else if let Some(x) = i64::unpack_value(value) { - JsonUnpack::Number(x) - } else if let Some(x) = value.unpack_bool() { - JsonUnpack::Bool(x) - } else if let Some(x) = ListRef::from_value(value) { - JsonUnpack::List(x) - } else if let Some(x) = TupleRef::from_value(value) { - JsonUnpack::Tuple(x) - } else if let Some(x) = DictRef::from_value(value) { - JsonUnpack::Dict(x) - } else if let Some(x) = StructRef::from_value(value) { - JsonUnpack::Struct(x) - } else if let Some(x) = Record::from_value(value) { - JsonUnpack::Record(x) - } else if let Some(x) = EnumValue::from_value(value) { - JsonUnpack::Enum(x) - } else if let Some(x) = TransitiveSetJsonProjection::from_value(value) { - JsonUnpack::TransitiveSetJsonProjection(x) - } else if let Some(x) = StarlarkTargetLabel::from_value(value) { - JsonUnpack::TargetLabel(x) - } else if let Some(x) = StarlarkConfiguredProvidersLabel::from_value(value) { - JsonUnpack::ConfiguredProvidersLabel(x) - } else if let Some(x) = get_artifact(value) { - JsonUnpack::Artifact(x) - } else if let Some(x) = value.as_command_line() { - JsonUnpack::CommandLine(x) - } else if let Some(x) = value.as_provider() { - JsonUnpack::Provider(x) - } else if let Some(x) = TaggedValue::from_value(value) { - JsonUnpack::TaggedValue(x) - } else { - JsonUnpack::Unsupported - } } impl<'a, 'v> Serialize for SerializeValue<'a, 'v> { @@ -149,11 +159,11 @@ impl<'a, 'v> Serialize for SerializeValue<'a, 'v> { where S: Serializer, { - match unpack(self.value) { - JsonUnpack::None => serializer.serialize_none(), + match &self.value { + JsonUnpack::None(_) => serializer.serialize_none(), JsonUnpack::String(x) => serializer.serialize_str(x), - JsonUnpack::Number(x) => serializer.serialize_i64(x), - JsonUnpack::Bool(x) => serializer.serialize_bool(x), + JsonUnpack::Number(x) => serializer.serialize_i64(*x), + JsonUnpack::Bool(x) => serializer.serialize_bool(*x), JsonUnpack::List(x) => serializer.collect_seq(x.iter().map(|v| self.with_value(v))), JsonUnpack::Tuple(x) => serializer.collect_seq(x.iter().map(|v| self.with_value(v))), JsonUnpack::Dict(x) => serializer.collect_map( @@ -188,12 +198,16 @@ impl<'a, 'v> Serialize for SerializeValue<'a, 'v> { serializer.serialize_str("") } Some(fs) => { - serializer.serialize_str(err(err(x())?.resolve_path(fs.fs()))?.as_str()) + let path = err(err(x.artifact())?.resolve_path(fs.fs()))?; + let path = with_command_line_context(fs, self.absolute, |ctx| { + err(ctx.resolve_project_path(path)).map(|loc| loc.into_string()) + })?; + serializer.serialize_str(&path) } } } JsonUnpack::CommandLine(x) => { - let singleton = is_singleton_cmdargs(self.value); + let singleton = is_singleton_cmdargs(*x); match self.fs { None => { // See a few lines up for fs == None details. @@ -207,8 +221,11 @@ impl<'a, 'v> Serialize for SerializeValue<'a, 'v> { // WriteJsonCommandLineArgGen assumes that any args/write-to-file macros are // rejected here and needs to be updated if that changes. let mut items = Vec::::new(); - let mut ctx = DefaultCommandLineContext::new(fs); - err(x.add_to_command_line(&mut items, &mut ctx))?; + + with_command_line_context(fs, self.absolute, |ctx| { + err(x.as_command_line_arg().add_to_command_line(&mut items, ctx)) + })?; + // We change the type, based on the value - singleton = String, otherwise list. // That's a little annoying (type based on value), but otherwise there would be // no way to produce a cmd_args as a single string. @@ -221,38 +238,39 @@ impl<'a, 'v> Serialize for SerializeValue<'a, 'v> { } } JsonUnpack::Provider(x) => { - serializer.collect_map(x.items().iter().map(|(k, v)| (k, self.with_value(*v)))) + serializer.collect_map(x.0.items().iter().map(|(k, v)| (k, self.with_value(*v)))) } JsonUnpack::TaggedValue(x) => self.with_value(*x.value()).serialize(serializer), - JsonUnpack::Unsupported => Err(serde::ser::Error::custom(format!( - "Type `{}` is not supported by `write_json`", - self.value.get_type() - ))), } } } -fn is_singleton_cmdargs(x: Value) -> bool { - if let Some(x) = x.downcast_ref::() { +fn is_singleton_cmdargs(x: CommandLineArg) -> bool { + if let Some(x) = x.to_value().downcast_ref::() { x.is_concat() - } else if let Some(x) = x.downcast_ref::() { + } else if let Some(x) = x.to_value().downcast_ref::() { x.is_concat() } else { false } } -pub fn validate_json(x: Value) -> anyhow::Result<()> { - write_json(x, None, &mut sink(), false) +pub fn validate_json(x: JsonUnpack) -> anyhow::Result<()> { + write_json(x, None, &mut sink(), false, false) } pub fn write_json( - x: Value, + value: JsonUnpack, fs: Option<&ExecutorFs>, mut writer: &mut dyn Write, pretty: bool, + absolute: bool, ) -> anyhow::Result<()> { - let value = SerializeValue { value: x, fs }; + let value = SerializeValue { + value, + fs, + absolute, + }; (|| { if pretty { serde_json::to_writer_pretty(&mut writer, &value)?; @@ -271,8 +289,8 @@ pub fn visit_json_artifacts( v: Value, visitor: &mut dyn CommandLineArtifactVisitor, ) -> anyhow::Result<()> { - match unpack(v) { - JsonUnpack::None + match JsonUnpack::unpack_value_err(v)? { + JsonUnpack::None(_) | JsonUnpack::String(_) | JsonUnpack::Number(_) | JsonUnpack::Bool(_) @@ -307,24 +325,20 @@ pub fn visit_json_artifacts( } } JsonUnpack::TransitiveSetJsonProjection(x) => visitor.visit_input( - ArtifactGroup::TransitiveSetProjection(x.to_projection_key()?), + ArtifactGroup::TransitiveSetProjection(Arc::new(x.to_projection_key()?)), None, ), JsonUnpack::Artifact(_x) => { // The _x function requires that the artifact is already bound, but we may need to visit artifacts // before that happens. Treating it like an opaque command_line works as we want for any artifact // type. - v.as_command_line_err()?.visit_artifacts(visitor)?; - } - JsonUnpack::CommandLine(x) => x.visit_artifacts(visitor)?, - JsonUnpack::Unsupported => { - return Err(anyhow::anyhow!( - "Type `{}` is not supported by `write_json` (this should be unreachable)", - v.get_type() - )); + ValueAsCommandLineLike::unpack_value_err(v)? + .0 + .visit_artifacts(visitor)?; } + JsonUnpack::CommandLine(x) => x.as_command_line_arg().visit_artifacts(visitor)?, JsonUnpack::Provider(x) => { - for (_, v) in x.items() { + for (_, v) in x.0.items() { visit_json_artifacts(v, visitor)?; } } diff --git a/app/buck2_build_api/src/actions/impls/run_action_knobs.rs b/app/buck2_build_api/src/actions/impls/run_action_knobs.rs index 6518e0eeb37da..9cb57a3a46fd4 100644 --- a/app/buck2_build_api/src/actions/impls/run_action_knobs.rs +++ b/app/buck2_build_api/src/actions/impls/run_action_knobs.rs @@ -24,6 +24,9 @@ pub struct RunActionKnobs { /// for network actions (download_file, cas_artifact). Used to support offline /// builds. pub use_network_action_output_cache: bool, + + /// TODO(cjhopman): Modifies action digest, remove after migration + pub new_style_scratch_path: bool, } pub trait HasRunActionKnobs { diff --git a/app/buck2_build_api/src/actions/key.rs b/app/buck2_build_api/src/actions/key.rs deleted file mode 100644 index d7283606e9c58..0000000000000 --- a/app/buck2_build_api/src/actions/key.rs +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; - -use buck2_artifact::actions::key::ActionKey; -use buck2_artifact::deferred::data::DeferredData; - -use crate::actions::RegisteredAction; - -pub trait ActionKeyExt { - #[allow(clippy::new_ret_no_self)] - fn new(key: DeferredData>) -> ActionKey; - fn deferred_data(&self) -> &DeferredData>; -} - -impl ActionKeyExt for ActionKey { - fn new(key: DeferredData>) -> ActionKey { - ActionKey::unchecked_new(key.into_deferred_key()) - } - - fn deferred_data(&self) -> &DeferredData> { - DeferredData::unchecked_new_ref(self.deferred_key()) - } -} diff --git a/app/buck2_build_api/src/actions/mod.rs b/app/buck2_build_api/src/actions/mod.rs deleted file mode 100644 index d57ebc9cbb513..0000000000000 --- a/app/buck2_build_api/src/actions/mod.rs +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! This module contains support for running actions and asynchronous providers -//! -//! An 'Action' is a unit of work with a set of input files known as 'Artifact's that are required -//! for its execution, and a set of output files called 'BuildArtifact's that are created by its -//! execution. Each 'Action' registered by a rule will only be executed when it's 'BuildArtifact's -//! are requested to be available. It will be guaranteed by the action system that all input -//! 'Artifact's are available before the execution of an 'Action'. -//! -//! 'Actions' struct will act as a general registry where users can create new 'Artifact's that -//! represent the outputs of the execution of their 'Action'. These are 'DeclaredArtifact's that -//! are yet bound to any 'Action's. When 'Action's are registered, they will be bound to their -//! appropriate 'DeclaredArtifact' to create a 'BuildArtifact' -//! -//! An 'Action' can be bound to multiple 'BuildArtifact's, but each 'BuildArtifact' can only be -//! bound to a particular 'Action'. - -pub mod artifact; -pub mod box_slice_set; -pub mod calculation; -pub mod execute; -pub mod impls; -pub mod key; -pub mod query; -pub mod registry; - -use std::borrow::Cow; -use std::fmt::Debug; -use std::ops::ControlFlow; -use std::sync::Arc; - -use allocative::Allocative; -use async_trait::async_trait; -use buck2_artifact::actions::key::ActionKey; -use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_artifact::artifact::provide_outputs::ProvideActionKey; -use buck2_artifact::artifact::provide_outputs::ProvideOutputs; -use buck2_common::http::HttpClient; -use buck2_common::io::IoProvider; -use buck2_core::base_deferred_key::BaseDeferredKey; -use buck2_core::category::Category; -use buck2_core::execution_types::executor_config::CommandExecutorConfig; -use buck2_core::fs::artifact_path_resolver::ArtifactFs; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -use buck2_events::dispatch::EventDispatcher; -use buck2_execute::artifact::fs::ExecutorFs; -use buck2_execute::digest_config::DigestConfig; -use buck2_execute::execute::action_digest::ActionDigest; -use buck2_execute::execute::blocking::BlockingExecutor; -use buck2_execute::execute::cache_uploader::CacheUploadResult; -use buck2_execute::execute::cache_uploader::DepFileEntry; -use buck2_execute::execute::manager::CommandExecutionManager; -use buck2_execute::execute::prepared::PreparedAction; -use buck2_execute::execute::request::CommandExecutionRequest; -use buck2_execute::execute::result::CommandExecutionResult; -use buck2_execute::materialize::materializer::Materializer; -use buck2_execute::re::manager::ManagedRemoteExecutionClient; -use buck2_file_watcher::mergebase::Mergebase; -use derivative::Derivative; -use derive_more::Display; -use dupe::Dupe; -use indexmap::indexmap; -use indexmap::IndexMap; -use indexmap::IndexSet; -use more_futures::cancellation::CancellationContext; -use starlark::values::OwnedFrozenValue; -use static_assertions::_core::ops::Deref; -use thiserror::Error; - -use crate::actions::execute::action_execution_target::ActionExecutionTarget; -use crate::actions::execute::action_executor::ActionExecutionMetadata; -use crate::actions::execute::action_executor::ActionOutputs; -use crate::actions::impls::run_action_knobs::RunActionKnobs; -use crate::artifact_groups::ArtifactGroup; -use crate::artifact_groups::ArtifactGroupValues; -use crate::deferred::types::AnyValue; -use crate::deferred::types::TrivialDeferred; - -/// Represents an unregistered 'Action' that will be registered into the 'Actions' module. -/// The 'UnregisteredAction' is not executable until it is registered, upon which it becomes an -/// 'Action' that is executable. -pub trait UnregisteredAction: Allocative { - /// consumes the self and becomes a registered 'Action'. The 'Action' will be executable - /// and no longer bindable to any other 'Artifact's. - fn register( - self: Box, - inputs: IndexSet, - outputs: IndexSet, - starlark_data: Option, - ) -> anyhow::Result>; -} - -/// A registered, immutable 'Action' that is fully bound. All it's 'Artifact's, both inputs and -/// outputs are verified to exist. -/// -/// The 'Action' can be executed to produce the set of 'BuildArtifact's it declares. Before -/// execution, all input 'Artifact's will be made available to access. -#[async_trait] -pub trait Action: Allocative + Debug + Send + Sync + 'static { - /// A machine readable kind identifying this type of action. - fn kind(&self) -> buck2_data::ActionKind; - - /// All the input 'Artifact's, both sources and built artifacts, that are required for - /// executing this artifact. While nothing enforces it, this should be a pure function. - fn inputs(&self) -> anyhow::Result>; - - /// All the outputs this 'Artifact' will generate. Just like inputs, this should be a pure - /// function. - fn outputs(&self) -> anyhow::Result>; - - /// Obtains an executable for this action. - fn as_executable(&self) -> ActionExecutable<'_>; - - /// A machine-readable category for this action, intended to be used when analyzing actions outside of buck2 itself. - /// - /// A category provides a namespace for identifiers within the rule that produced this action. Examples of - /// categories would be things such as `cxx_compile`, `cxx_link`, and so on. Categories are user-specified in the - /// rule implementation; however, buck2 enforces some restrictions on category names. - fn category(&self) -> &Category; - - /// A machine-readable identifier for this action. Required (but as of now, not yet enforced) to be unique within - /// a category within a single invocation of a rule. Like categories, identifiers are also user-specified and buck2 - /// ascribes no semantics to them. Examples of category-identifier pairs would be `cxx_compile` + `MyCppFile.cpp`, - /// reflecting a C++ compiler invocation for a file `MyCppFile.cpp`. - /// - /// Not required; if None, only one action will be given in the given category. The user should - /// be given either control over the identifier or the category. - fn identifier(&self) -> Option<&str>; - - /// Whether to always print stderr, or only print when a user asks for it. - fn always_print_stderr(&self) -> bool { - false - } - - /// Provides a string name for this action, obtained by combining the provided category and identifier. - fn name(&self) -> String { - if let Some(identifier) = self.identifier() { - format!("{} {}", self.category(), identifier) - } else { - self.category().to_string() - } - } - - fn aquery_attributes(&self, _fs: &ExecutorFs) -> IndexMap { - indexmap! {} - } - - // TODO this probably wants more data for execution, like printing a short_name and the target -} - -pub enum ActionExecutable<'a> { - Pristine(&'a dyn PristineActionExecutable), - Incremental(&'a dyn IncrementalActionExecutable), -} - -#[async_trait] -pub trait PristineActionExecutable: Send + Sync + 'static { - /// Runs the 'Action', where all inputs are available and the output directory has been cleaned - /// up. Upon success, it is expected that all outputs will be available - async fn execute( - &self, - ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)>; -} - -#[async_trait] -pub trait IncrementalActionExecutable: Send + Sync + 'static { - /// Runs the 'Action', where all inputs are available but the output directory may not have - /// been cleaned up. Upon success, it is expected that all outputs will be available - async fn execute( - &self, - ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)>; -} - -/// The context for actions to use when executing -#[async_trait] -pub trait ActionExecutionCtx: Send + Sync { - fn target(&self) -> ActionExecutionTarget<'_>; - - /// An 'ArtifactFs' to be used for managing 'Artifact's - fn fs(&self) -> &ArtifactFs; - - fn executor_fs(&self) -> ExecutorFs; - - /// A `Materializer` used for expensive materializations - fn materializer(&self) -> &dyn Materializer; - - fn events(&self) -> &EventDispatcher; - - fn command_execution_manager(&self) -> CommandExecutionManager; - - fn mergebase(&self) -> &Mergebase; - - fn prepare_action( - &mut self, - request: &CommandExecutionRequest, - ) -> anyhow::Result; - - async fn action_cache( - &mut self, - manager: CommandExecutionManager, - request: &CommandExecutionRequest, - prepared_action: &PreparedAction, - ) -> ControlFlow; - - async fn cache_upload( - &mut self, - action_digest: ActionDigest, - execution_result: &CommandExecutionResult, - dep_file_entry: Option, - ) -> anyhow::Result; - - /// Executes a command - /// TODO(bobyf) this seems like it deserves critical sections? - async fn exec_cmd( - &mut self, - manager: CommandExecutionManager, - request: &CommandExecutionRequest, - prepared_action: &PreparedAction, - ) -> CommandExecutionResult; - - fn unpack_command_execution_result( - &mut self, - request: &CommandExecutionRequest, - result: CommandExecutionResult, - allows_cache_upload: bool, - allows_dep_file_cache_upload: bool, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)>; - - /// Clean up all the output directories for this action. This requires a mutable reference - /// because you shouldn't be doing anything else with the ActionExecutionCtx while cleaning the - /// outputs. - async fn cleanup_outputs(&mut self) -> anyhow::Result<()>; - - /// Get the value of an Artifact. This Artifact _must_ have been declared - /// as an input to the associated action or a panic will be raised. - fn artifact_values(&self, input: &ArtifactGroup) -> &ArtifactGroupValues; - - fn blocking_executor(&self) -> &dyn BlockingExecutor; - - fn re_client(&self) -> ManagedRemoteExecutionClient; - - fn digest_config(&self) -> DigestConfig; - - /// Obtain per-command knobs for RunAction. - fn run_action_knobs(&self) -> RunActionKnobs; - - fn cancellation_context(&self) -> &CancellationContext; - - /// I/O layer access to add non-source files (e.g. downloaded files) to - /// offline archive trace. If None, tracing is not enabled. - fn io_provider(&self) -> Arc; - - /// Http client used for fetching and downloading remote artifacts. - fn http_client(&self) -> HttpClient; -} - -#[derive(Error, Debug)] -pub enum ActionErrors { - #[error("Output path for artifact or metadata file cannot be empty.")] - EmptyOutputPath, - #[error( - "Multiple artifacts and/or metadata files are declared at the same output location `{0}` declared at `{1}`." - )] - ConflictingOutputPath(ForwardRelativePathBuf, String), - #[error( - "Multiple artifacts and/or metadata files are declared at conflicting output locations. Output path `{0}` conflicts with the following output paths: {1:?}." - )] - ConflictingOutputPaths(ForwardRelativePathBuf, Vec), - #[error( - "Action category `{0}` contains duplicate identifier `{1}`; category-identifier pairs must be unique within a rule" - )] - ActionCategoryIdentifierNotUnique(Category, String), - #[error( - "Analysis produced multiple actions with category `{0}` and at least one of them had no identifier. Add an identifier to these actions to disambiguate them" - )] - ActionCategoryDuplicateSingleton(Category), -} - -#[derive(Derivative, Debug, Display, Allocative)] -#[derivative(Eq, Hash, PartialEq)] -#[display(fmt = "Action(key={}, name={})", key, "action.name()")] -pub struct RegisteredAction { - /// The key uniquely identifies a registered action. - /// The key to the action is a one to one mapping. - key: ActionKey, - #[derivative(Hash = "ignore", PartialEq = "ignore")] - action: Box, - #[derivative(Hash = "ignore", PartialEq = "ignore")] - executor_config: Arc, -} - -impl TrivialDeferred for Arc { - fn as_any_value(&self) -> &dyn AnyValue { - self - } - - fn provide<'a>(&'a self, demand: &mut provider::Demand<'a>) { - demand.provide_value_with(|| { - ProvideOutputs( - self.action - .outputs() - .map(|outputs| outputs.iter().cloned().collect()), - ) - }); - demand.provide_value_with(|| ProvideActionKey(self.key.dupe())); - } -} - -impl RegisteredAction { - pub fn new( - key: ActionKey, - action: Box, - executor_config: Arc, - ) -> Self { - Self { - key, - action, - executor_config, - } - } - - pub fn action(&self) -> &dyn Action { - self.action.as_ref() - } - - /// Gets the target label to the rule that created this action. - pub fn owner(&self) -> &BaseDeferredKey { - self.key.deferred_key().owner() - } - - pub fn key(&self) -> &ActionKey { - &self.key - } - - pub fn execution_config(&self) -> &CommandExecutorConfig { - &self.executor_config - } - - pub fn category(&self) -> &Category { - self.action.category() - } - - pub fn identifier(&self) -> Option<&str> { - self.action.identifier() - } -} - -impl Deref for RegisteredAction { - type Target = dyn Action; - - fn deref(&self) -> &Self::Target { - self.action.as_ref() - } -} - -/// An 'UnregisteredAction' that is stored by the 'ActionRegistry' to be registered. -/// The stored inputs have not yet been validated as bound, but will be validated upon registering. -#[derive(Allocative)] -struct ActionToBeRegistered { - inputs: IndexSet, - outputs: IndexSet, - action: Box, -} - -impl ActionToBeRegistered { - fn new( - inputs: IndexSet, - outputs: IndexSet, - a: A, - ) -> Self { - Self { - inputs, - outputs, - action: Box::new(a), - } - } - - fn register(self, starlark_data: Option) -> anyhow::Result> { - self.action - .register(self.inputs, self.outputs, starlark_data) - } -} diff --git a/app/buck2_build_api/src/actions/query.rs b/app/buck2_build_api/src/actions/query.rs index 6ccbb5747eb7f..1935f600e838e 100644 --- a/app/buck2_build_api/src/actions/query.rs +++ b/app/buck2_build_api/src/actions/query.rs @@ -18,28 +18,32 @@ use std::sync::Arc; use allocative::Allocative; use buck2_artifact::actions::key::ActionKey; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::CellResolver; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::package::PackageLabel; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::target::label::TargetLabel; use buck2_execute::artifact::fs::ExecutorFs; -use buck2_query::query::environment::LabeledNode; -use buck2_query::query::environment::NodeLabel; +use buck2_node::attrs::configured_attr::ConfiguredAttr; use buck2_query::query::environment::QueryTarget; +use buck2_query::query::graph::node::LabeledNode; +use buck2_query::query::graph::node::NodeKey; use buck2_util::late_binding::LateBinding; use derivative::Derivative; use dice::DiceComputations; use dupe::Dupe; +use either::Either; use gazebo::variants::VariantName; use indexmap::IndexMap; use internment::ArcIntern; use ref_cast::RefCast; use serde::Serialize; -use serde::Serializer; +use starlark::values::Heap; +use starlark::values::Value; use crate::actions::RegisteredAction; use crate::analysis::AnalysisResult; @@ -61,10 +65,6 @@ impl ActionAttr { ActionAttr::ref_cast(x) } - pub fn value(&self) -> &str { - &self.0 - } - pub fn to_owned(&self) -> OwnedActionAttr { OwnedActionAttr(self.0.to_owned()) } @@ -184,9 +184,9 @@ impl ActionQueryNode { } impl LabeledNode for ActionQueryNode { - type NodeRef = ActionQueryNodeRef; + type Key = ActionQueryNodeRef; - fn node_ref(&self) -> &Self::NodeRef { + fn node_key(&self) -> &Self::Key { &self.key } } @@ -251,7 +251,13 @@ pub enum ActionQueryNodeRef { Action(ActionKey), } -impl NodeLabel for ActionQueryNodeRef {} +#[derive(Copy, Clone)] +pub enum PackageLabelOption { + PackageLabel(PackageLabel), + TransitionAttr, +} + +impl NodeKey for ActionQueryNodeRef {} impl ActionQueryNodeRef { pub fn require_action(&self) -> anyhow::Result<&ActionKey> { @@ -274,32 +280,44 @@ impl QueryTarget for ActionQueryNode { } } + fn name(&self) -> Cow { + Cow::Owned(self.node_key().to_string()) + } + /// Return the path to the buildfile that defines this target, e.g. `fbcode//foo/bar/TARGETS` fn buildfile_path(&self) -> &BuildFilePath { // TODO(cjhopman): In addition to implementing this, we should be able to return an anyhow::Error here rather than panicking. unimplemented!("buildfile not yet implemented in aquery") } - // TODO(cjhopman): Use existential traits to remove the Box<> once they are stabilized. - fn deps<'a>(&'a self) -> Box + Send + 'a> { + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a { // When traversing deps in aquery, we do *not* traverse deps for the target nodes, since // those are just for literals let action = match &self.data { ActionQueryNodeData::Action(action) => action, - ActionQueryNodeData::Analysis(..) => return Box::new(std::iter::empty()), + ActionQueryNodeData::Analysis(..) => return Either::Left(std::iter::empty()), }; - Box::new(iter_action_inputs(&action.deps)) + Either::Right(iter_action_inputs(&action.deps)) } - fn exec_deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(std::iter::empty()) + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + std::iter::empty() } - fn target_deps<'a>(&'a self) -> Box + Send + 'a> { + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a { self.deps() } + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + std::iter::empty() + } + + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + // TODO(ezgi): implement toolchain deps for aquery + std::iter::empty() + } + fn attr_any_matches( attr: &Self::Attr<'_>, filter: &dyn Fn(&str) -> anyhow::Result, @@ -333,9 +351,8 @@ impl QueryTarget for ActionQueryNode { "identifier", ActionAttr::new(action.action.identifier().unwrap_or("")), )?; - // TODO(cjhopman): impl inputs/outputs for actions in aquery - func("inputs", ActionAttr::new(""))?; - func("outputs", ActionAttr::new(""))?; + + // inputs and outputs are not supported for aquery for (k, v) in action.attrs() { func(&k, ActionAttr::new(&v))?; @@ -343,6 +360,13 @@ impl QueryTarget for ActionQueryNode { Ok(()) } + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + func: F, + ) -> Result<(), E> { + self.attrs_for_each(func) + } + fn map_attr>) -> R>(&self, key: &str, mut func: F) -> R { let mut res = None; @@ -366,22 +390,6 @@ impl QueryTarget for ActionQueryNode { // TODO(cjhopman): In addition to implementing this, we should be able to return an anyhow::Error here rather than panicking. unimplemented!("inputs not yet implemented in aquery") } - - fn call_stack(&self) -> Option { - None - } - - fn attr_to_string_alternate(&self, attr: &Self::Attr<'_>) -> String { - format!("{:#}", attr) - } - - fn attr_serialize( - &self, - attr: &Self::Attr<'_>, - serializer: S, - ) -> Result { - attr.serialize(serializer) - } } pub fn iter_action_inputs<'a>( @@ -409,14 +417,12 @@ pub fn iter_action_inputs<'a>( type Item = &'a SetProjectionInputs; fn next(&mut self) -> Option { - self.queue.pop_front().map(|node| { + self.queue.pop_front().inspect(|node| { for child in &*node.node.children { if self.visited.insert(child) { self.queue.push_back(child); } } - - node }) } } @@ -438,11 +444,11 @@ pub fn iter_action_inputs<'a>( pub static FIND_MATCHING_ACTION: LateBinding< for<'c> fn( - &'c DiceComputations, + &'c mut DiceComputations, // Working dir. &'c ProjectRelativePath, - // global_target_platform - Option, + // target cfg info (target platform, cli modifiers) + &'c GlobalCfgOptions, &'c AnalysisResult, // path_after_target_name ForwardRelativePathBuf, @@ -461,3 +467,12 @@ pub static PRINT_ACTION_NODE: LateBinding< cell_resolver: &'a CellResolver, ) -> Pin> + Send + 'a>>, > = LateBinding::new("PRINT_ACTION_NODE"); + +/// Use of "configured_attr_to_value" in `buck2_transition` from `buck2_analysis`. +pub static CONFIGURED_ATTR_TO_VALUE: LateBinding< + for<'v> fn( + this: &ConfiguredAttr, + pkg: PackageLabelOption, + heap: &'v Heap, + ) -> anyhow::Result>, +> = LateBinding::new("CONFIGURED_ATTR_TO_VALUE"); diff --git a/app/buck2_build_api/src/actions/registry.rs b/app/buck2_build_api/src/actions/registry.rs index 53023020d7059..334097d0d8a65 100644 --- a/app/buck2_build_api/src/actions/registry.rs +++ b/app/buck2_build_api/src/actions/registry.rs @@ -12,77 +12,102 @@ use std::collections::HashSet; use std::sync::Arc; use allocative::Allocative; +use buck2_artifact::actions::key::ActionIndex; use buck2_artifact::actions::key::ActionKey; use buck2_artifact::artifact::artifact_type::DeclaredArtifact; use buck2_artifact::artifact::artifact_type::OutputArtifact; -use buck2_artifact::deferred::id::DeferredId; -use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_artifact::deferred::key::DeferredHolderKey; use buck2_core::category::Category; -use buck2_core::directory; -use buck2_core::directory::Directory; -use buck2_core::directory::DirectoryBuilder; -use buck2_core::directory::DirectoryEntry; -use buck2_core::directory::DirectoryInsertError; -use buck2_core::directory::DirectoryIterator; -use buck2_core::directory::NoDigest; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_directory::directory; +use buck2_directory::directory::builder::DirectoryBuilder; +use buck2_directory::directory::builder::DirectoryInsertError; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_hasher::NoDigest; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_execute::execute::request::OutputType; use dupe::Dupe; +use dupe::OptionDupedExt; +use gazebo::prelude::SliceExt; use indexmap::IndexSet; use starlark::codemap::FileSpan; +use starlark::collections::SmallMap; +use starlark_map::Hashed; -use crate::actions::key::ActionKeyExt; use crate::actions::ActionErrors; use crate::actions::ActionToBeRegistered; use crate::actions::RegisteredAction; use crate::actions::UnregisteredAction; use crate::analysis::registry::AnalysisValueFetcher; use crate::artifact_groups::ArtifactGroup; -use crate::deferred::types::DeferredRegistry; -use crate::deferred::types::ReservedTrivialDeferredData; +use crate::deferred::calculation::ActionLookup; -/// The actions registry for a particular analysis of a rule implementation +/// The actions registry for a particular analysis of a rule, dynamic actions, anon target, BXL. #[derive(Allocative)] pub struct ActionsRegistry { - owner: BaseDeferredKey, - action_key: Option>, + owner: DeferredHolderKey, + dynamic_actions_action_key: Option>, artifacts: IndexSet, - pending: Vec<( - ReservedTrivialDeferredData>, - ActionToBeRegistered, - )>, - execution_platform: ExecutionPlatformResolution, + + // For a dynamic_output, maps the ActionKeys for the outputs that have been bound + // to this dynamic_output to the DeclaredArtifact created in the dynamic_output. + declared_dynamic_outputs: SmallMap, + pending: Vec, + pub execution_platform: ExecutionPlatformResolution, claimed_output_paths: DirectoryBuilder, NoDigest>, } impl ActionsRegistry { - pub fn new(owner: BaseDeferredKey, execution_platform: ExecutionPlatformResolution) -> Self { + pub fn new( + owner: DeferredHolderKey, + execution_platform: ExecutionPlatformResolution, + dynamic_actions_action_key: Option>, + ) -> Self { Self { owner, - action_key: None, + dynamic_actions_action_key, artifacts: Default::default(), + declared_dynamic_outputs: SmallMap::new(), pending: Default::default(), execution_platform, claimed_output_paths: DirectoryBuilder::empty(), } } - pub fn set_action_key(&mut self, action_key: Arc) { - self.action_key = Some(action_key); - } - pub fn declare_dynamic_output( &mut self, - path: BuckOutPath, - output_type: OutputType, - ) -> DeclaredArtifact { - // We don't want to claim path, because the output belongs to different (outer) context. We - // also don't care to keep track of the hidden components count since this output will + artifact: &BuildArtifact, + ) -> anyhow::Result { + if !self.pending.is_empty() { + return Err(internal_error_anyhow!( + "output for dynamic_output/actions declared after actions: {}, {:?}", + artifact, + self.pending.map(|v| v.key()) + )); + } + + // We don't want to claim path, because the output belongs to different (outer) context. + + // We also don't care to keep track of the hidden components count since this output will // never escape the dynamic lambda. - DeclaredArtifact::new(path, output_type, 0) + // TODO(cjhopman): dynamic values mean this can escape. does this need to be updated for that? + let new_artifact = + DeclaredArtifact::new(artifact.get_path().dupe(), artifact.output_type(), 0); + + assert!( + self.declared_dynamic_outputs + .insert(artifact.key().dupe(), new_artifact.dupe()) + .is_none() + ); + + Ok(new_artifact) } pub fn claim_output_path( @@ -108,15 +133,14 @@ impl ActionsRegistry { } DirectoryEntry::Dir(conflict_dir) => { let conflicting_paths = conflict_dir - .ordered_walk() + .ordered_walk_leaves() .with_paths() - .filter_map(|(p, entry)| match entry { - DirectoryEntry::Leaf(location) => Some(format!( + .map(|(p, location)| { + format!( "{} declared at {}", path.join(p), display_location_opt(location.as_ref()), - )), - _ => None, + ) }) .collect::>(); Err(anyhow::anyhow!(ActionErrors::ConflictingOutputPaths( @@ -129,10 +153,11 @@ impl ActionsRegistry { Err(anyhow::anyhow!(ActionErrors::EmptyOutputPath)) } Err(DirectoryInsertError::CannotTraverseLeaf { path: conflict }) => { - let location = match directory::find(&self.claimed_output_paths, &conflict) { - Ok(Some(DirectoryEntry::Leaf(l))) => l.as_ref(), - _ => None, - }; + let location = + match directory::find::find(self.claimed_output_paths.as_ref(), &conflict) { + Ok(Some(DirectoryEntry::Leaf(l))) => l.as_ref(), + _ => None, + }; let conflict = format!( "{} declared at {}", @@ -161,8 +186,11 @@ impl ActionsRegistry { Some(prefix) => (prefix.join(path), prefix.iter().count()), }; self.claim_output_path(&path, declaration_location)?; - let out_path = - BuckOutPath::with_action_key(self.owner.dupe(), path, self.action_key.dupe()); + let out_path = BuckOutPath::with_dynamic_actions_action_key( + self.owner.owner().dupe(), + path, + self.dynamic_actions_action_key.dupe(), + ); let declared = DeclaredArtifact::new(out_path, output_type, hidden); if !self.artifacts.insert(declared.dupe()) { panic!("not expected duplicate artifact after output path was successfully claimed"); @@ -173,57 +201,67 @@ impl ActionsRegistry { /// Registers the supplied action pub fn register( &mut self, - registry: &mut DeferredRegistry, + self_key: &DeferredHolderKey, inputs: IndexSet, outputs: IndexSet, action: A, - ) -> anyhow::Result { - let reserved = registry.reserve_trivial::>(); - + ) -> anyhow::Result { + let key = ActionKey::new( + self_key.dupe(), + // If there are declared_dynamic_outputs, then the analysis that created this one has + // already created ActionKeys for each of those declared outputs and so we offset the + // index by that number. + ActionIndex::new( + (self.declared_dynamic_outputs.len() + self.pending.len()).try_into()?, + ), + ); let mut bound_outputs = IndexSet::with_capacity(outputs.len()); for output in outputs { - let bound = output - .bind(ActionKey::new(reserved.data().dupe()))? - .as_base_artifact() - .dupe(); + let bound = output.bind(key.dupe())?.as_base_artifact().dupe(); bound_outputs.insert(bound); } - let id = reserved.data().deferred_key().id(); - self.pending.push(( - reserved, - ActionToBeRegistered::new(inputs, bound_outputs, action), + self.pending.push(ActionToBeRegistered::new( + key.dupe(), + inputs, + bound_outputs, + action, )); - Ok(id) + Ok(key) } /// Consumes the registry so no more 'Action's can be registered. This returns /// an 'ActionAnalysisResult' that holds all the registered 'Action's pub fn ensure_bound( self, - registry: &mut DeferredRegistry, analysis_value_fetcher: &AnalysisValueFetcher, - ) -> anyhow::Result<()> { + ) -> anyhow::Result { for artifact in self.artifacts { artifact.ensure_bound()?; } + let mut actions = RecordedActions::new(); + + for (key, artifact) in self.declared_dynamic_outputs.into_iter_hashed() { + actions.insert_dynamic_output(key, artifact.ensure_bound()?.action_key().dupe()); + } + // Buck2 has an invariant that pairs of categories and identifiers are unique throughout a build. That // invariant is enforced here, using observed_names to keep track of the categories and identifiers that we've seen. let mut observed_names: HashMap> = HashMap::new(); - for (key, a) in self.pending.into_iter() { - let starlark_data = analysis_value_fetcher.get(key.data().deferred_key().id())?; - let action_key = ActionKey::new(key.data().dupe()); - let action = a.register(starlark_data)?; + for a in self.pending.into_iter() { + let key = a.key().dupe(); + let (starlark_data, error_handler) = analysis_value_fetcher.get_action_data(&key)?; + let action = a.register(starlark_data, error_handler)?; match (action.category(), action.identifier()) { (category, Some(identifier)) => { let existing_identifiers = observed_names - .entry(category.clone()) + .entry(category.to_owned()) .or_insert_with(HashSet::::new); // false -> identifier was already present in the set if !existing_identifiers.insert(identifier.to_owned()) { return Err(ActionErrors::ActionCategoryIdentifierNotUnique( - category.clone(), + category.to_owned(), identifier.to_owned(), ) .into()); @@ -231,41 +269,118 @@ impl ActionsRegistry { } (category, None) => { if observed_names - .insert(category.clone(), HashSet::new()) + .insert(category.to_owned(), HashSet::new()) .is_some() { return Err(ActionErrors::ActionCategoryDuplicateSingleton( - category.clone(), + category.to_owned(), ) .into()); }; } } - registry.bind_trivial( - key, + actions.insert( + key.dupe(), Arc::new(RegisteredAction::new( - action_key, + key, action, (*self.execution_platform.executor_config()?).dupe(), )), ); } - Ok(()) + Ok(actions) } pub fn testing_artifacts(&self) -> &IndexSet { &self.artifacts } - pub fn testing_pending( - &self, - ) -> impl Iterator>> { - self.pending.iter().map(|(reserved, _)| reserved) + pub fn testing_pending_action_keys(&self) -> Vec { + self.pending.map(|a| a.key().dupe()) } pub(crate) fn execution_platform(&self) -> &ExecutionPlatformResolution { &self.execution_platform } + + pub(crate) fn actions_len(&self) -> usize { + self.pending.len() + } + + pub(crate) fn artifacts_len(&self) -> usize { + self.artifacts.len() + } +} + +#[derive(Debug, Allocative)] +pub struct RecordedActions { + /// ActionLookup::Action would indicate that this analysis created the action. + /// + /// It's possible for an Action to appear in this map multiple times. That can + /// happen for a dynamic_outputs' "outputs" argument when the output is bound to + /// an action created in that dynamic_output. + /// + /// ActionLookup::Deferred is only used for a dynamic_outputs "outputs" argument + /// that has been re-bound to another dynamic_output. + actions: SmallMap, +} + +impl RecordedActions { + pub fn new() -> Self { + Self { + actions: SmallMap::new(), + } + } + + pub fn insert(&mut self, key: ActionKey, action: Arc) { + assert!( + self.actions + .insert(key, ActionLookup::Action(action)) + .is_none() + ); + } + + /// Inserts a binding for a dynamic_outputs' "outputs" arg. + pub(crate) fn insert_dynamic_output( + &mut self, + key: Hashed, + bound_to_key: ActionKey, + ) { + match self.actions.get(&bound_to_key) { + Some(ActionLookup::Action(v)) => { + // indicates that a dynamic_output "outputs" has been bound to an action it created + assert!( + self.actions + .insert_hashed(key, ActionLookup::Action(v.dupe())) + .is_none() + ); + } + _ => { + assert!( + self.actions + .insert_hashed(key, ActionLookup::Deferred(bound_to_key)) + .is_none() + ); + } + } + } + + pub fn lookup(&self, key: &ActionKey) -> anyhow::Result { + self.actions + .get(key) + .duped() + .with_internal_error_anyhow(|| { + format!("action key missing in recorded actions {}", key) + }) + } + + /// Iterates over the actions created in this analysis. + pub fn iter_actions(&self) -> impl Iterator> + '_ { + self.actions.values().filter_map(|v| match v { + ActionLookup::Action(a) => Some(a), + ActionLookup::Deferred(_) => None, + }) + } } diff --git a/app/buck2_build_api/src/analysis.rs b/app/buck2_build_api/src/analysis.rs new file mode 100644 index 0000000000000..2ecb5779a8bbb --- /dev/null +++ b/app/buck2_build_api/src/analysis.rs @@ -0,0 +1,89 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::fmt::Debug; +use std::sync::Arc; + +use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_interpreter::starlark_profiler::data::StarlarkProfileDataAndStats; + +use crate::analysis::registry::RecordedAnalysisValues; +use crate::artifact_groups::promise::PromiseArtifactId; + +// TODO(@wendyy) move into `buck2_node` +pub mod anon_promises_dyn; +// TODO(@wendyy) move into `buck2_interpreter_for_build` +pub mod anon_targets_registry; +pub mod calculation; +pub mod extra_v; +pub mod registry; + +use allocative::Allocative; +use dupe::Dupe; + +use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; +use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValueRef; +use crate::validation::transitive_validations::TransitiveValidations; + +#[derive(Debug, Clone, Dupe, Allocative)] +pub struct AnalysisResult { + analysis_values: Arc, + /// Profiling data after running analysis, for this analysis only, without dependencies. + /// `None` when profiling is disabled. + /// For forward node, this value is shared with underlying analysis (including this field). + pub profile_data: Option>, + promise_artifact_map: Arc>, + pub num_declared_actions: u64, + pub num_declared_artifacts: u64, + /// `None` means there are no `ValidationInfo` providers in transitive dependencies. + pub validations: Option, +} + +impl AnalysisResult { + /// Create a new AnalysisResult + pub fn new( + analysis_values: RecordedAnalysisValues, + profile_data: Option>, + promise_artifact_map: HashMap, + num_declared_actions: u64, + num_declared_artifacts: u64, + validations: Option, + ) -> Self { + Self { + analysis_values: Arc::new(analysis_values), + profile_data, + promise_artifact_map: Arc::new(promise_artifact_map), + num_declared_actions, + num_declared_artifacts, + validations, + } + } + + pub fn providers(&self) -> anyhow::Result> { + self.analysis_values.provider_collection() + } + + pub fn promise_artifact_map(&self) -> &Arc> { + &self.promise_artifact_map + } + + /// Used to lookup an inner named provider result. + pub fn lookup_inner( + &self, + label: &ConfiguredProvidersLabel, + ) -> anyhow::Result { + Ok(self.providers()?.lookup_inner(label)?.to_owned()) + } + + pub fn analysis_values(&self) -> &RecordedAnalysisValues { + &self.analysis_values + } +} diff --git a/app/buck2_build_api/src/analysis/anon_promises_dyn.rs b/app/buck2_build_api/src/analysis/anon_promises_dyn.rs index d10d87cf3e5ed..83bdfdf55ea8c 100644 --- a/app/buck2_build_api/src/analysis/anon_promises_dyn.rs +++ b/app/buck2_build_api/src/analysis/anon_promises_dyn.rs @@ -15,8 +15,8 @@ use starlark::eval::Evaluator; pub trait AnonPromisesDyn<'v>: 'v { async fn run_promises( self: Box, - dice: &DiceComputations, - eval: &mut Evaluator<'v, '_>, + dice: &mut DiceComputations, + eval: &mut Evaluator<'v, '_, '_>, description: String, ) -> anyhow::Result<()>; } diff --git a/app/buck2_build_api/src/analysis/anon_targets_registry.rs b/app/buck2_build_api/src/analysis/anon_targets_registry.rs index fb23b690e2cbb..bb71e5e1e22fe 100644 --- a/app/buck2_build_api/src/analysis/anon_targets_registry.rs +++ b/app/buck2_build_api/src/analysis/anon_targets_registry.rs @@ -7,20 +7,18 @@ * of this source tree. */ -use std::collections::HashMap; use std::fmt::Debug; use std::marker::PhantomData; use allocative::Allocative; use buck2_core::execution_types::execution::ExecutionPlatformResolution; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_util::late_binding::LateBinding; use starlark::any::AnyLifetime; use starlark::values::Trace; use starlark::values::Value; use crate::analysis::anon_promises_dyn::AnonPromisesDyn; -use crate::artifact_groups::promise::PromiseArtifactId; +use crate::artifact_groups::promise::PromiseArtifact; pub static ANON_TARGET_REGISTRY_NEW: LateBinding< for<'v> fn( @@ -34,9 +32,6 @@ pub trait AnonTargetsRegistryDyn<'v>: { fn as_any_mut(&mut self) -> &mut dyn AnyLifetime<'v>; fn take_promises(&mut self) -> Option>>; - fn resolve_artifacts( - &self, - short_paths: &HashMap, - ) -> anyhow::Result<()>; + fn consumer_analysis_artifacts(&self) -> Vec; fn assert_no_promises(&self) -> anyhow::Result<()>; } diff --git a/app/buck2_build_api/src/analysis/calculation.rs b/app/buck2_build_api/src/analysis/calculation.rs index ddfda61062afc..5bfe5eb8ad1ab 100644 --- a/app/buck2_build_api/src/analysis/calculation.rs +++ b/app/buck2_build_api/src/analysis/calculation.rs @@ -16,8 +16,8 @@ use async_trait::async_trait; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::configuration::pair::ConfigurationNoExec; use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::provider::label::ProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::configured_ref::ConfiguredGraphNodeRef; use buck2_query::query::syntax::simple::eval::set::TargetSet; @@ -27,10 +27,11 @@ use dupe::Dupe; use crate::analysis::AnalysisResult; use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; +use crate::validation::transitive_validations::TransitiveValidations; pub static EVAL_ANALYSIS_QUERY: LateBinding< for<'a> fn( - &'a DiceComputations, + &'a mut DiceComputations, &'a str, HashMap, ) -> Pin< @@ -42,7 +43,7 @@ pub static EVAL_ANALYSIS_QUERY: LateBinding< pub trait RuleAnalsysisCalculationImpl: Send + Sync + 'static { async fn get_analysis_result( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &ConfiguredTargetLabel, ) -> anyhow::Result>; } @@ -55,29 +56,34 @@ pub trait RuleAnalysisCalculation { /// Returns the analysis result for a ConfiguredTargetLabel. This is the full set of Providers /// returned by the target's rule implementation function. async fn get_analysis_result( - &self, + &mut self, target: &ConfiguredTargetLabel, ) -> anyhow::Result>; /// Return the analysis result for a configuration rule `TargetLabel` /// (e. g. `constraint_value`). async fn get_configuration_analysis_result( - &self, - target: &TargetLabel, - ) -> anyhow::Result; + &mut self, + target: &ProvidersLabel, + ) -> anyhow::Result; /// Returns the provider collection for a ConfiguredProvidersLabel. This is the full set of Providers /// returned by the target's rule implementation function. async fn get_providers( - &self, + &mut self, target: &ConfiguredProvidersLabel, ) -> anyhow::Result>; + + async fn get_validations( + &mut self, + target: &ConfiguredTargetLabel, + ) -> anyhow::Result>>; } #[async_trait] -impl RuleAnalysisCalculation for DiceComputations { +impl RuleAnalysisCalculation for DiceComputations<'_> { async fn get_analysis_result( - &self, + &mut self, target: &ConfiguredTargetLabel, ) -> anyhow::Result> { RULE_ANALYSIS_CALCULATION @@ -87,23 +93,28 @@ impl RuleAnalysisCalculation for DiceComputations { } async fn get_configuration_analysis_result( - &self, - target: &TargetLabel, - ) -> anyhow::Result { + &mut self, + target: &ProvidersLabel, + ) -> anyhow::Result { // Analysis for configuration nodes is always done with the unbound configuration. let target = target.configure_pair(ConfigurationNoExec::unbound().cfg_pair().dupe()); - Ok(self - .get_analysis_result(&target) - .await? - .require_compatible()?) + Ok(self.get_providers(&target).await?.require_compatible()?) } async fn get_providers( - &self, + &mut self, target: &ConfiguredProvidersLabel, ) -> anyhow::Result> { let analysis = self.get_analysis_result(target.target()).await?; analysis.try_map(|analysis| analysis.lookup_inner(target)) } + + async fn get_validations( + &mut self, + target: &ConfiguredTargetLabel, + ) -> anyhow::Result>> { + let analysis = self.get_analysis_result(target).await?; + Ok(analysis.map(|x| x.validations)) + } } diff --git a/app/buck2_build_api/src/analysis/extra_v.rs b/app/buck2_build_api/src/analysis/extra_v.rs new file mode 100644 index 0000000000000..0c197f7d8b565 --- /dev/null +++ b/app/buck2_build_api/src/analysis/extra_v.rs @@ -0,0 +1,94 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::OnceCell; + +use allocative::Allocative; +use buck2_error::BuckErrorContext; +use gazebo::prelude::OptionExt; +use starlark::any::ProvidesStaticType; +use starlark::environment::FrozenModule; +use starlark::environment::Module; +use starlark::values::any_complex::StarlarkAnyComplex; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::FrozenValueTyped; +use starlark::values::OwnedFrozenValueTyped; +use starlark::values::Trace; +use starlark::values::ValueLike; +use starlark::values::ValueTyped; + +use crate::analysis::registry::AnalysisValueStorage; +use crate::analysis::registry::FrozenAnalysisValueStorage; + +#[derive(Default, Debug, ProvidesStaticType, Allocative, Trace)] +pub struct AnalysisExtraValue<'v> { + pub analysis_value_storage: + OnceCell>>>, +} + +#[derive(Debug, ProvidesStaticType, Allocative)] +pub struct FrozenAnalysisExtraValue { + pub(crate) analysis_value_storage: + Option>>, +} + +impl<'v> Freeze for AnalysisExtraValue<'v> { + type Frozen = FrozenAnalysisExtraValue; + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + let AnalysisExtraValue { + analysis_value_storage, + } = self; + let analysis_value_storage = + analysis_value_storage + .into_inner() + .try_map(|analysis_value_storage| { + FrozenValueTyped::new_err(analysis_value_storage.to_value().freeze(freezer)?) + })?; + Ok(FrozenAnalysisExtraValue { + analysis_value_storage, + }) + } +} + +impl<'v> AnalysisExtraValue<'v> { + pub fn get(module: &'v Module) -> anyhow::Result>> { + let Some(extra) = module.extra_value() else { + return Ok(None); + }; + Ok(Some( + &extra + .downcast_ref_err::>()? + .value, + )) + } + + pub fn get_or_init(module: &'v Module) -> anyhow::Result<&'v AnalysisExtraValue<'v>> { + if let Some(extra) = Self::get(module)? { + return Ok(extra); + } + module.set_extra_value_no_overwrite( + module + .heap() + .alloc(StarlarkAnyComplex::new(AnalysisExtraValue::default())), + )?; + Self::get(module)?.internal_error_anyhow("extra_value must be set") + } +} + +impl FrozenAnalysisExtraValue { + pub fn get( + module: &FrozenModule, + ) -> anyhow::Result>> { + module + .owned_extra_value() + .internal_error_anyhow("extra_value not set")? + .downcast_anyhow() + } +} diff --git a/app/buck2_build_api/src/analysis/mod.rs b/app/buck2_build_api/src/analysis/mod.rs deleted file mode 100644 index 97aa48373093b..0000000000000 --- a/app/buck2_build_api/src/analysis/mod.rs +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt::Debug; -use std::sync::Arc; - -use buck2_artifact::deferred::id::DeferredId; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_interpreter::starlark_profiler::StarlarkProfileDataAndStats; - -use crate::deferred::types::DeferredLookup; -use crate::deferred::types::DeferredTable; - -// TODO(@wendyy) move into `buck2_node` -pub mod anon_promises_dyn; -// TODO(@wendyy) move into `buck2_interpreter_for_build` -pub mod anon_targets_registry; -pub mod calculation; -pub mod registry; - -use allocative::Allocative; -use dupe::Dupe; - -use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; - -#[derive(Debug, Clone, Dupe, Allocative)] -pub struct AnalysisResult { - /// The actual provider collection, validated to be the correct type (`FrozenProviderCollection`) - pub provider_collection: FrozenProviderCollectionValue, - deferred: DeferredTable, - pub profile_data: Option>, -} - -impl AnalysisResult { - /// Create a new AnalysisResult - pub fn new( - provider_collection: FrozenProviderCollectionValue, - deferred: DeferredTable, - profile_data: Option>, - ) -> Self { - Self { - provider_collection, - deferred, - profile_data, - } - } - - pub fn providers(&self) -> &FrozenProviderCollectionValue { - &self.provider_collection - } - - /// Used to lookup an inner named provider result. - pub fn lookup_inner( - &self, - label: &ConfiguredProvidersLabel, - ) -> anyhow::Result { - self.provider_collection.lookup_inner(label) - } - - pub fn lookup_deferred(&self, id: DeferredId) -> anyhow::Result> { - self.deferred.lookup_deferred(id) - } - - pub fn iter_deferreds(&self) -> impl Iterator> { - self.deferred.iter() - } - - pub fn testing_deferred(&self) -> &DeferredTable { - &self.deferred - } -} diff --git a/app/buck2_build_api/src/analysis/registry.rs b/app/buck2_build_api/src/analysis/registry.rs index a3169c7b341c8..c3290b77ceb17 100644 --- a/app/buck2_build_api/src/analysis/registry.rs +++ b/app/buck2_build_api/src/analysis/registry.rs @@ -7,69 +7,95 @@ * of this source tree. */ +use std::cell::OnceCell; use std::collections::HashMap; use std::fmt::Debug; use std::marker::PhantomData; use std::sync::Arc; use allocative::Allocative; -use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_artifact::actions::key::ActionKey; use buck2_artifact::artifact::artifact_type::DeclaredArtifact; use buck2_artifact::artifact::artifact_type::OutputArtifact; -use buck2_artifact::deferred::id::DeferredId; +use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_artifact::deferred::key::DeferredHolderKey; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::execution_types::execution::ExecutionPlatformResolution; -use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_execute::execute::request::OutputType; use derivative::Derivative; use dupe::Dupe; use indexmap::IndexSet; +use starlark::any::ProvidesStaticType; use starlark::codemap::FileSpan; use starlark::environment::FrozenModule; use starlark::environment::Module; use starlark::eval::Evaluator; +use starlark::values::any_complex::StarlarkAnyComplex; +use starlark::values::typing::FrozenStarlarkCallable; +use starlark::values::typing::StarlarkCallable; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::FrozenHeap; +use starlark::values::FrozenHeapRef; +use starlark::values::FrozenValue; +use starlark::values::FrozenValueTyped; use starlark::values::Heap; use starlark::values::OwnedFrozenValue; +use starlark::values::OwnedFrozenValueTyped; +use starlark::values::OwnedRefFrozenRef; use starlark::values::Trace; use starlark::values::Tracer; use starlark::values::Value; use starlark::values::ValueTyped; +use starlark::values::ValueTypedComplex; +use starlark_map::small_map::SmallMap; use crate::actions::registry::ActionsRegistry; +use crate::actions::registry::RecordedActions; +use crate::actions::RegisteredAction; use crate::actions::UnregisteredAction; use crate::analysis::anon_promises_dyn::AnonPromisesDyn; use crate::analysis::anon_targets_registry::AnonTargetsRegistryDyn; use crate::analysis::anon_targets_registry::ANON_TARGET_REGISTRY_NEW; +use crate::analysis::extra_v::AnalysisExtraValue; +use crate::analysis::extra_v::FrozenAnalysisExtraValue; +use crate::artifact_groups::deferred::TransitiveSetIndex; +use crate::artifact_groups::deferred::TransitiveSetKey; +use crate::artifact_groups::promise::PromiseArtifact; use crate::artifact_groups::promise::PromiseArtifactId; use crate::artifact_groups::registry::ArtifactGroupRegistry; use crate::artifact_groups::ArtifactGroup; -use crate::deferred::types::BaseKey; -use crate::deferred::types::DeferredRegistry; -use crate::dynamic::registry::DynamicRegistry; +use crate::deferred::calculation::ActionLookup; +use crate::dynamic::storage::DynamicLambdaParamsStorage; +use crate::dynamic::storage::FrozenDynamicLambdaParamsStorage; +use crate::dynamic::storage::DYNAMIC_LAMBDA_PARAMS_STORAGES; use crate::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; use crate::interpreter::rule_defs::artifact::output_artifact_like::OutputArtifactArg; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollection; +use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValueRef; +use crate::interpreter::rule_defs::provider::collection::ProviderCollection; +use crate::interpreter::rule_defs::transitive_set::FrozenTransitiveSet; +use crate::interpreter::rule_defs::transitive_set::FrozenTransitiveSetDefinition; use crate::interpreter::rule_defs::transitive_set::TransitiveSet; #[derive(Derivative, Trace, Allocative)] #[derivative(Debug)] pub struct AnalysisRegistry<'v> { #[derivative(Debug = "ignore")] - deferred: DeferredRegistry, - #[derivative(Debug = "ignore")] - actions: ActionsRegistry, + pub actions: ActionsRegistry, #[derivative(Debug = "ignore")] artifact_groups: ArtifactGroupRegistry, - #[derivative(Debug = "ignore")] - dynamic: DynamicRegistry, pub anon_targets: Box>, - analysis_value_storage: AnalysisValueStorage<'v>, + pub analysis_value_storage: AnalysisValueStorage<'v>, pub short_path_assertions: HashMap, } -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum DeclaredArtifactError { #[error("Can't declare an artifact with an empty filename component")] DeclaredEmptyFileName, @@ -80,51 +106,44 @@ impl<'v> AnalysisRegistry<'v> { owner: BaseDeferredKey, execution_platform: ExecutionPlatformResolution, ) -> anyhow::Result> { - Self::new_from_owner_and_deferred( - owner.dupe(), - execution_platform, - DeferredRegistry::new(BaseKey::Base(owner)), - ) + Self::new_from_owner_and_deferred(execution_platform, DeferredHolderKey::Base(owner), None) } - pub(crate) fn new_from_owner_and_deferred( - owner: BaseDeferredKey, + pub fn new_from_owner_and_deferred( execution_platform: ExecutionPlatformResolution, - deferred: DeferredRegistry, + self_key: DeferredHolderKey, + dynamic_actions_action_key: Option>, ) -> anyhow::Result { Ok(AnalysisRegistry { - deferred, - actions: ActionsRegistry::new(owner.dupe(), execution_platform.dupe()), + actions: ActionsRegistry::new( + self_key.dupe(), + execution_platform.dupe(), + dynamic_actions_action_key, + ), artifact_groups: ArtifactGroupRegistry::new(), - dynamic: DynamicRegistry::new(owner.dupe()), anon_targets: (ANON_TARGET_REGISTRY_NEW.get()?)(PhantomData, execution_platform), - analysis_value_storage: AnalysisValueStorage::new(), + analysis_value_storage: AnalysisValueStorage::new(self_key), short_path_assertions: HashMap::new(), }) } - pub(crate) fn set_action_key(&mut self, action_key: Arc) { - self.actions.set_action_key(action_key); - } - /// Reserves a path in an output directory. Doesn't declare artifact, /// but checks that there is no previously declared artifact with a path /// which is in conflict with claimed `path`. pub fn claim_output_path( &mut self, - eval: &Evaluator<'_, '_>, + eval: &Evaluator<'_, '_, '_>, path: &ForwardRelativePath, ) -> anyhow::Result<()> { let declaration_location = eval.call_stack_top_location(); self.actions.claim_output_path(path, declaration_location) } - pub(crate) fn declare_dynamic_output( + pub fn declare_dynamic_output( &mut self, - path: BuckOutPath, - output_type: OutputType, - ) -> DeclaredArtifact { - self.actions.declare_dynamic_output(path, output_type) + artifact: &BuildArtifact, + ) -> anyhow::Result { + self.actions.declare_dynamic_output(artifact) } pub fn declare_output( @@ -165,7 +184,7 @@ impl<'v> AnalysisRegistry<'v> { /// - `StarlarkArtifact`/`StarlarkDeclaredArtifact`: If the artifact is already bound, an error is raised. Otherwise we proceed with the original artifact. pub fn get_or_declare_output<'v2>( &mut self, - eval: &Evaluator<'v2, '_>, + eval: &Evaluator<'v2, '_, '_>, value: OutputArtifactArg<'v2>, output_type: OutputType, ) -> anyhow::Result<(ArtifactDeclaration<'v2>, OutputArtifact)> { @@ -181,7 +200,7 @@ impl<'v> AnalysisRegistry<'v> { AssociatedArtifacts::new(), )) } - OutputArtifactArg::OutputArtifact(output) => output.inner(), + OutputArtifactArg::OutputArtifact(output) => output.inner()?, OutputArtifactArg::DeclaredArtifact(artifact) => artifact, OutputArtifactArg::WrongArtifact(artifact) => { return Err(artifact.0.as_output_error()); @@ -205,64 +224,41 @@ impl<'v> AnalysisRegistry<'v> { outputs: IndexSet, action: A, associated_value: Option>, + error_handler: Option>, ) -> anyhow::Result<()> { - let id = self - .actions - .register(&mut self.deferred, inputs, outputs, action)?; - if let Some(value) = associated_value { - self.analysis_value_storage.set_value(id, value); - } + let id = self.actions.register( + &self.analysis_value_storage.self_key, + inputs, + outputs, + action, + )?; + self.analysis_value_storage + .set_action_data(id, (associated_value, error_handler))?; Ok(()) } pub fn create_transitive_set( &mut self, - definition: Value<'v>, + definition: FrozenValueTyped<'v, FrozenTransitiveSetDefinition>, value: Option>, children: Option>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>> { - let set = self.artifact_groups.create_transitive_set( + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result>> { + self.artifact_groups.create_transitive_set( definition, value, children, - &mut self.deferred, + &mut self.analysis_value_storage, eval, - )?; - - let key = set.key().deferred_key().id(); - let set = eval.heap().alloc_complex(set); - let set = ValueTyped::::new_err(set)?; - - self.analysis_value_storage.set_value(key, set.to_value()); - - Ok(set) - } - - pub fn register_dynamic_output( - &mut self, - dynamic: IndexSet, - inputs: IndexSet, - outputs: IndexSet, - attributes_plugins_lambda: Value<'v>, - ) -> anyhow::Result<()> { - let id = self - .dynamic - .register(dynamic, inputs, outputs, &mut self.deferred)?; - self.analysis_value_storage - .set_value(id, attributes_plugins_lambda); - Ok(()) + ) } pub(crate) fn take_promises(&mut self) -> Option>> { self.anon_targets.take_promises() } - pub fn resolve_artifacts( - &self, - short_paths: &HashMap, - ) -> anyhow::Result<()> { - self.anon_targets.resolve_artifacts(short_paths) + pub fn consumer_analysis_artifacts(&self) -> Vec { + self.anon_targets.consumer_analysis_artifacts() } pub fn record_short_path_assertion( @@ -278,17 +274,23 @@ impl<'v> AnalysisRegistry<'v> { self.anon_targets.assert_no_promises() } + pub fn num_declared_actions(&self) -> u64 { + self.actions.actions_len() as u64 + } + + pub fn num_declared_artifacts(&self) -> u64 { + self.actions.artifacts_len() as u64 + } + /// You MUST pass the same module to both the first function and the second one. /// It requires both to get the lifetimes to line up. pub fn finalize( self, env: &'v Module, ) -> anyhow::Result< - impl FnOnce(Module) -> anyhow::Result<(FrozenModule, DeferredRegistry)> + 'static, + impl FnOnce(Module) -> anyhow::Result<(FrozenModule, RecordedAnalysisValues)> + 'static, > { let AnalysisRegistry { - mut deferred, - dynamic, actions, artifact_groups, anon_targets: _, @@ -296,16 +298,19 @@ impl<'v> AnalysisRegistry<'v> { short_path_assertions: _, } = self; - analysis_value_storage.write_to_module(env); + let self_key = analysis_value_storage.self_key.dupe(); + analysis_value_storage.write_to_module(env)?; Ok(move |env: Module| { let frozen_env = env.freeze()?; let analysis_value_fetcher = AnalysisValueFetcher { + self_key, frozen_module: Some(frozen_env.dupe()), }; - actions.ensure_bound(&mut deferred, &analysis_value_fetcher)?; - artifact_groups.ensure_bound(&mut deferred, &analysis_value_fetcher)?; - dynamic.ensure_bound(&mut deferred, &analysis_value_fetcher)?; - Ok((frozen_env, deferred)) + let actions = actions.ensure_bound(&analysis_value_fetcher)?; + artifact_groups.ensure_bound(&analysis_value_fetcher)?; + let recorded_values = analysis_value_fetcher.get_recorded_values(actions)?; + + Ok((frozen_env, recorded_values)) }) } @@ -333,27 +338,80 @@ impl<'v> ArtifactDeclaration<'v> { /// Store `Value<'v>` values for actions registered in an implementation function /// -/// Threading lifetimes through the various action registries is kind of a pain. So instead, -/// store the starlark values in this struct, using the `DeferredId` as the key. -/// /// These values eventually are written into the mutable `Module`, and a wrapper is /// made available to get the `OwnedFrozenValue` back out after that `Module` is frozen. /// /// Note that this object has internal mutation and is only expected to live for the duration /// of impl function execution. /// -/// At the end of impl function execution, `write_to_module` should be called to ensure -/// that the values are written the top level of the `Module`. -#[derive(Debug, Allocative)] -struct AnalysisValueStorage<'v> { - values: HashMap>, +/// At the end of impl function execution, `write_to_module` should be called +/// to write this object to `Module` extra value to get the values frozen. +#[derive(Debug, Allocative, ProvidesStaticType)] +pub struct AnalysisValueStorage<'v> { + pub self_key: DeferredHolderKey, + action_data: SmallMap>, Option>)>, + transitive_sets: SmallMap>>, + pub lambda_params: Box>, + result_value: OnceCell>>, +} + +#[derive(Debug, Allocative, ProvidesStaticType)] +pub struct FrozenAnalysisValueStorage { + pub self_key: DeferredHolderKey, + action_data: SmallMap, Option)>, + transitive_sets: SmallMap>, + pub lambda_params: Box, + result_value: Option>, } unsafe impl<'v> Trace<'v> for AnalysisValueStorage<'v> { fn trace(&mut self, tracer: &Tracer<'v>) { - for v in self.values.values_mut() { - tracer.trace(v) + let AnalysisValueStorage { + action_data, + transitive_sets, + lambda_params, + self_key, + result_value, + } = self; + for (k, v) in action_data.iter_mut() { + tracer.trace_static(k); + v.trace(tracer); } + for (k, v) in transitive_sets.iter_mut() { + tracer.trace_static(k); + v.trace(tracer); + } + lambda_params.trace(tracer); + tracer.trace_static(self_key); + result_value.trace(tracer); + } +} + +impl<'v> Freeze for AnalysisValueStorage<'v> { + type Frozen = FrozenAnalysisValueStorage; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + let AnalysisValueStorage { + self_key, + action_data, + transitive_sets, + lambda_params, + result_value, + } = self; + + Ok(FrozenAnalysisValueStorage { + self_key, + action_data: action_data + .into_iter() + .map(|(k, v)| Ok((k, v.freeze(freezer)?))) + .collect::>()?, + transitive_sets: transitive_sets + .into_iter() + .map(|(k, v)| Ok((k, FrozenValueTyped::new_err(v.to_value().freeze(freezer)?)?))) + .collect::>()?, + lambda_params: lambda_params.freeze(freezer)?, + result_value: result_value.freeze(freezer)?, + }) } } @@ -362,43 +420,274 @@ unsafe impl<'v> Trace<'v> for AnalysisValueStorage<'v> { /// These values are pulled from the `FrozenModule` that results from `env.freeze()`. /// This is used by the action registry to make an `OwnedFrozenValue` available to /// Actions' register function. -#[derive(Default)] pub struct AnalysisValueFetcher { + self_key: DeferredHolderKey, frozen_module: Option, } +impl AnalysisValueFetcher { + pub fn testing_new(self_key: DeferredHolderKey) -> Self { + AnalysisValueFetcher { + self_key, + frozen_module: None, + } + } +} + impl<'v> AnalysisValueStorage<'v> { - fn new() -> Self { + fn new(self_key: DeferredHolderKey) -> Self { Self { - values: HashMap::new(), + self_key: self_key.dupe(), + action_data: SmallMap::new(), + transitive_sets: SmallMap::new(), + lambda_params: DYNAMIC_LAMBDA_PARAMS_STORAGES + .get() + .unwrap() + .new_dynamic_lambda_params_storage(self_key), + result_value: OnceCell::new(), } } - /// Write all of the values to `module` using an internal name - fn write_to_module(&self, module: &'v Module) { - for (id, v) in self.values.iter() { - let starlark_key = format!("$action_key_{}", id); - module.set(&starlark_key, *v); + /// Write self to `module` extra value. + fn write_to_module(self, module: &'v Module) -> anyhow::Result<()> { + let extra_v = AnalysisExtraValue::get_or_init(module)?; + let res = extra_v.analysis_value_storage.set( + module + .heap() + .alloc_typed(StarlarkAnyComplex { value: self }), + ); + if res.is_err() { + return Err(internal_error_anyhow!( + "analysis_value_storage is already set" + )); } + Ok(()) } - /// Add a value to the internal hash map that maps ids -> values - fn set_value(&mut self, id: DeferredId, value: Value<'v>) { - self.values.insert(id, value); + pub(crate) fn register_transitive_set< + F: FnOnce(TransitiveSetKey) -> anyhow::Result>>, + >( + &mut self, + func: F, + ) -> anyhow::Result>> { + let key = TransitiveSetKey::new( + self.self_key.dupe(), + TransitiveSetIndex(self.transitive_sets.len().try_into()?), + ); + let set = func(key.dupe())?; + self.transitive_sets.insert(key, set.dupe()); + Ok(set) + } + + fn set_action_data( + &mut self, + id: ActionKey, + action_data: (Option>, Option>), + ) -> anyhow::Result<()> { + if &self.self_key != id.holder_key() { + return Err(internal_error_anyhow!( + "Wrong action owner: expecting `{}`, got `{}`", + self.self_key, + id + )); + } + self.action_data.insert(id, action_data); + Ok(()) + } + + pub fn set_result_value( + &self, + providers: ValueTypedComplex<'v, ProviderCollection<'v>>, + ) -> anyhow::Result<()> { + if self.result_value.set(providers).is_err() { + return Err(internal_error_anyhow!("result_value is already set")); + } + Ok(()) } } impl AnalysisValueFetcher { - /// Get the `OwnedFrozenValue` that corresponds to a `DeferredId`, if present - pub(crate) fn get(&self, id: DeferredId) -> anyhow::Result> { + fn extra_value(&self) -> anyhow::Result> { match &self.frozen_module { None => Ok(None), Some(module) => { - let starlark_key = format!("$action_key_{}", id); - // This return `Err` is the symbol is private. - // It is never private, but error is better than panic. - module.get_option(&starlark_key) + let analysis_extra_value = FrozenAnalysisExtraValue::get(module)? + .value + .analysis_value_storage + .internal_error_anyhow("analysis_value_storage not set")? + .as_ref(); + Ok(Some((&analysis_extra_value.value, module.frozen_heap()))) } } } + + /// Get the `OwnedFrozenValue` that corresponds to a `DeferredId`, if present + pub fn get_action_data( + &self, + id: &ActionKey, + ) -> anyhow::Result<(Option, Option)> { + let Some((storage, heap_ref)) = self.extra_value()? else { + return Ok((None, None)); + }; + + if id.holder_key() != &storage.self_key { + return Err(internal_error_anyhow!( + "Wrong action owner: expecting `{}`, got `{}`", + storage.self_key, + id + )); + } + + let Some(value) = storage.action_data.get(id) else { + return Ok((None, None)); + }; + + unsafe { + Ok(( + value.0.map(|v| OwnedFrozenValue::new(heap_ref.dupe(), v)), + value.1.map(|v| OwnedFrozenValue::new(heap_ref.dupe(), v.0)), + )) + } + } + + pub(crate) fn get_recorded_values( + &self, + actions: RecordedActions, + ) -> anyhow::Result { + let analysis_storage = match &self.frozen_module { + None => None, + Some(module) => Some(FrozenAnalysisExtraValue::get(module)?.try_map(|v| { + v.value + .analysis_value_storage + .internal_error_anyhow("analysis_value_storage not set") + })?), + }; + + Ok(RecordedAnalysisValues { + self_key: self.self_key.dupe(), + analysis_storage, + actions, + }) + } +} + +/// The analysis values stored in DeferredHolder. +#[derive(Debug, Allocative)] +pub struct RecordedAnalysisValues { + self_key: DeferredHolderKey, + analysis_storage: Option>>, + actions: RecordedActions, +} + +impl RecordedAnalysisValues { + pub fn testing_new( + self_key: DeferredHolderKey, + transitive_sets: Vec<(TransitiveSetKey, OwnedFrozenValueTyped)>, + actions: RecordedActions, + ) -> Self { + let heap = FrozenHeap::new(); + let mut alloced_tsets = SmallMap::new(); + for (key, tset) in transitive_sets { + heap.add_reference(tset.owner()); + let tset = tset.owned_frozen_value_typed(&heap); + alloced_tsets.insert(key, tset); + } + + let providers = FrozenProviderCollection::testing_new_default(&heap); + + let value = heap.alloc_simple(StarlarkAnyComplex { + value: FrozenAnalysisValueStorage { + self_key: self_key.dupe(), + action_data: SmallMap::new(), + transitive_sets: alloced_tsets, + lambda_params: DYNAMIC_LAMBDA_PARAMS_STORAGES + .get() + .unwrap() + .new_frozen_dynamic_lambda_params_storage(self_key.dupe()), + result_value: Some( + FrozenValueTyped::::new(heap.alloc(providers)) + .unwrap(), + ), + }, + }); + Self { + self_key, + analysis_storage: Some( + unsafe { OwnedFrozenValue::new(heap.into_ref(), value) } + .downcast() + .unwrap(), + ), + actions, + } + } + + pub(crate) fn lookup_transitive_set( + &self, + key: &TransitiveSetKey, + ) -> anyhow::Result> { + if key.holder_key() != &self.self_key { + return Err(internal_error_anyhow!( + "Wrong owner for transitive set: expecting `{}`, got `{}`", + self.self_key, + key + )); + } + self.analysis_storage + .as_ref() + .with_internal_error_anyhow(|| format!("Missing analysis storage for `{key}`"))? + .maybe_map(|v| v.value.transitive_sets.get(key).copied()) + .with_internal_error_anyhow(|| format!("Missing transitive set `{key}`")) + } + + pub fn lookup_action(&self, key: &ActionKey) -> anyhow::Result { + if key.holder_key() != &self.self_key { + return Err(internal_error_anyhow!( + "Wrong owner for action: expecting `{}`, got `{}`", + self.self_key, + key + )); + } + self.actions.lookup(key) + } + + /// Iterates over the actions created in this analysis. + pub fn iter_actions(&self) -> impl Iterator> + '_ { + self.actions.iter_actions() + } + + pub fn analysis_storage( + &self, + ) -> anyhow::Result> { + Ok(self + .analysis_storage + .as_ref() + .internal_error_anyhow("missing analysis storage")? + .as_owned_ref_frozen_ref() + .map(|v| &v.value)) + } + + /// Iterates over the declared dynamic_output/actions. + pub fn iter_dynamic_lambda_outputs(&self) -> impl Iterator + '_ { + self.analysis_storage + .iter() + .flat_map(|v| v.value.lambda_params.iter_dynamic_lambda_outputs()) + } + + pub fn provider_collection(&self) -> anyhow::Result> { + let analysis_storage = self + .analysis_storage + .as_ref() + .internal_error_anyhow("missing analysis storage")?; + let value = analysis_storage + .as_ref() + .value + .result_value + .internal_error_anyhow("missing provider collection")?; + unsafe { + Ok(FrozenProviderCollectionValueRef::new( + analysis_storage.owner(), + value, + )) + } + } } diff --git a/app/buck2_build_api/src/artifact_groups.rs b/app/buck2_build_api/src/artifact_groups.rs new file mode 100644 index 0000000000000..08590e7dd1eff --- /dev/null +++ b/app/buck2_build_api/src/artifact_groups.rs @@ -0,0 +1,103 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod artifact_group_values; +pub mod calculation; +pub mod deferred; +pub mod promise; + +use crate::actions::calculation::BuildKey; +use crate::deferred::calculation::GET_PROMISED_ARTIFACT; + +pub mod registry; + +use std::hash::Hash; +use std::sync::Arc; + +use allocative::Allocative; +pub use artifact_group_values::ArtifactGroupValues; +use buck2_artifact::artifact::artifact_type::Artifact; +use derive_more::Display; +use dice::DiceComputations; +use dupe::Dupe; +use gazebo::variants::UnpackVariants; +use static_assertions::assert_eq_size; + +use self::calculation::EnsureTransitiveSetProjectionKey; +use crate::artifact_groups::deferred::TransitiveSetKey; +use crate::artifact_groups::promise::PromiseArtifact; + +/// An [ArtifactGroup] can expand to one or more [Artifact]. Those Artifacts will be made available +/// to Actions when they execute. +#[derive( + Clone, + Debug, + Display, + Dupe, + PartialEq, + Eq, + Hash, + UnpackVariants, + Allocative +)] +pub enum ArtifactGroup { + Artifact(Artifact), + TransitiveSetProjection(Arc), + Promise(Arc), +} + +assert_eq_size!(ArtifactGroup, [usize; 2]); + +impl ArtifactGroup { + /// Gets the resolved artifact group, which is used further downstream to use DICE to get + /// or compute the actual artifact values. For the `Artifact` variant, we will get the results + /// via the base or projected artifact key. For the `TransitiveSetProjection` variant, we will + /// look get the results via the `EnsureTransitiveSetProjectionKey`, which expands the underlying + /// tset. For the `Promise` variant, we will look up the promised artifact values by getting + /// the analysis results of the owning anon target's analysis. + pub async fn resolved_artifact( + &self, + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result { + Ok(match self { + ArtifactGroup::Artifact(a) => ResolvedArtifactGroup::Artifact(a.clone()), + ArtifactGroup::TransitiveSetProjection(a) => { + ResolvedArtifactGroup::TransitiveSetProjection(a) + } + ArtifactGroup::Promise(p) => match p.get() { + Some(a) => ResolvedArtifactGroup::Artifact(a.clone()), + None => { + let artifact = (GET_PROMISED_ARTIFACT.get()?)(p, ctx).await?; + ResolvedArtifactGroup::Artifact(artifact) + } + }, + }) + } +} + +// TODO(@wendyy) if we move PromiseArtifact into ArtifactKind someday, we should probably +// split the Artifact variant into two cases (artifact by ref and by value) to prevent memory +// regressions. +#[derive(Clone)] +pub enum ResolvedArtifactGroup<'a> { + Artifact(Artifact), + TransitiveSetProjection(&'a TransitiveSetProjectionKey), +} + +pub enum ResolvedArtifactGroupBuildSignalsKey { + EnsureTransitiveSetProjectionKey(EnsureTransitiveSetProjectionKey), + BuildKey(BuildKey), +} + +#[derive(Clone, Debug, Display, Dupe, PartialEq, Eq, Hash, Allocative)] +#[display("TransitiveSetProjection({}, {})", key, projection)] +pub struct TransitiveSetProjectionKey { + pub key: TransitiveSetKey, + pub projection: usize, +} diff --git a/app/buck2_build_api/src/artifact_groups/artifact_group_values.rs b/app/buck2_build_api/src/artifact_groups/artifact_group_values.rs index aca158be86731..497f711c93c77 100644 --- a/app/buck2_build_api/src/artifact_groups/artifact_group_values.rs +++ b/app/buck2_build_api/src/artifact_groups/artifact_group_values.rs @@ -14,8 +14,8 @@ use std::sync::Arc; use allocative::Allocative; use anyhow::Context as _; use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_core::directory::Directory; use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_directory::directory::directory::Directory; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_execute::artifact::group::artifact_group_values_dyn::ArtifactGroupValuesDyn; use buck2_execute::artifact_value::ArtifactValue; @@ -240,13 +240,11 @@ impl ArtifactGroupValuesDyn for ArtifactGroupValues { #[cfg(test)] mod tests { + use buck2_artifact::actions::key::ActionIndex; use buck2_artifact::artifact::artifact_type::testing::BuildArtifactTestingExt; use buck2_artifact::artifact::build_artifact::BuildArtifact; - use buck2_artifact::deferred::id::DeferredId; use buck2_core::configuration::data::ConfigurationData; - use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; - use buck2_execute::artifact_value::ArtifactValue; use super::*; @@ -254,11 +252,7 @@ mod tests { let target = ConfiguredTargetLabel::testing_parse("cell//pkg:foo", ConfigurationData::testing_new()); - let artifact = BuildArtifact::testing_new( - target.dupe(), - ForwardRelativePathBuf::unchecked_new(name.to_owned()), - DeferredId::testing_new(0), - ); + let artifact = BuildArtifact::testing_new(target.dupe(), name, ActionIndex::new(0)); let value = ArtifactValue::file(DigestConfig::testing_default().empty_file()); diff --git a/app/buck2_build_api/src/artifact_groups/calculation.rs b/app/buck2_build_api/src/artifact_groups/calculation.rs index 9c7a642895839..56f0f72fe8f38 100644 --- a/app/buck2_build_api/src/artifact_groups/calculation.rs +++ b/app/buck2_build_api/src/artifact_groups/calculation.rs @@ -18,15 +18,14 @@ use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::artifact_type::ArtifactKind; use buck2_artifact::artifact::artifact_type::BaseArtifactKind; use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_artifact::artifact::projected_artifact::ProjectedArtifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::file_ops::FileOps; +use buck2_common::dice::file_ops::DiceFileComputations; use buck2_common::file_ops::PathMetadata; use buck2_common::file_ops::PathMetadataOrRedirection; -use buck2_common::result::SharedResult; use buck2_core::cells::cell_path::CellPath; -use buck2_core::directory::DirectoryData; +use buck2_directory::directory::directory_data::DirectoryData; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest_config::HasDigestConfig; use buck2_execute::directory::extract_artifact_value; @@ -36,18 +35,16 @@ use buck2_execute::directory::ActionDirectoryEntry; use buck2_execute::directory::ActionDirectoryMember; use buck2_execute::directory::ActionSharedDirectory; use buck2_execute::directory::INTERNER; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use futures::future; -use futures::stream::FuturesOrdered; use futures::Future; use futures::FutureExt; -use more_futures::cancellation::CancellationContext; +use itertools::Itertools; use ref_cast::RefCast; use smallvec::SmallVec; -use thiserror::Error; use crate::actions::artifact::get_artifact_fs::GetArtifactFs; use crate::actions::calculation::ActionCalculation; @@ -56,29 +53,29 @@ use crate::artifact_groups::ArtifactGroup; use crate::artifact_groups::ArtifactGroupValues; use crate::artifact_groups::ResolvedArtifactGroup; use crate::artifact_groups::TransitiveSetProjectionKey; -use crate::deferred::calculation::DeferredCalculation; -use crate::keep_going; +use crate::keep_going::KeepGoing; #[async_trait] pub trait ArtifactGroupCalculation { /// Makes an 'Artifact' available to be accessed async fn ensure_artifact_group( - &self, + &mut self, input: &ArtifactGroup, ) -> anyhow::Result; } #[async_trait] -impl ArtifactGroupCalculation for DiceComputations { +impl ArtifactGroupCalculation for DiceComputations<'_> { /// makes the 'Artifact' available to be accessed async fn ensure_artifact_group( - &self, + &mut self, input: &ArtifactGroup, ) -> anyhow::Result { // TODO consider if we need to cache this - ensure_artifact_group_staged(self, input) + let resolved_artifacts = input.resolved_artifact(self).await?; + ensure_artifact_group_staged(self, resolved_artifacts.clone()) .await? - .to_group_values(input) + .to_group_values(&resolved_artifacts) } } @@ -101,12 +98,12 @@ impl ArtifactGroupCalculation for DiceComputations { /// on many inputs, this allows them to only allocate those large values only after all /// inputs are ready. pub(crate) fn ensure_artifact_group_staged<'a>( - ctx: &'a DiceComputations, - input: &'a ArtifactGroup, + ctx: &'a mut DiceComputations, + input: ResolvedArtifactGroup<'a>, ) -> impl Future> + 'a { - match input.assert_resolved() { + match input { ResolvedArtifactGroup::Artifact(artifact) => { - ensure_artifact_staged(ctx, artifact).left_future() + ensure_artifact_staged(ctx, artifact.clone()).left_future() } ResolvedArtifactGroup::TransitiveSetProjection(key) => ctx .compute(EnsureTransitiveSetProjectionKey::ref_cast(key)) @@ -117,8 +114,8 @@ pub(crate) fn ensure_artifact_group_staged<'a>( /// See [ensure_artifact_group_staged]. pub(super) fn ensure_base_artifact_staged<'a>( - dice: &'a DiceComputations, - artifact: &'a BaseArtifactKind, + dice: &'a mut DiceComputations, + artifact: BaseArtifactKind, ) -> impl Future> + 'a { match artifact { BaseArtifactKind::Build(built) => ensure_build_artifact_staged(dice, built).left_future(), @@ -130,21 +127,22 @@ pub(super) fn ensure_base_artifact_staged<'a>( /// See [ensure_artifact_group_staged]. pub(super) fn ensure_artifact_staged<'a>( - dice: &'a DiceComputations, - artifact: &'a Artifact, + dice: &'a mut DiceComputations, + artifact: Artifact, ) -> impl Future> + 'a { - match artifact.data() { - ArtifactKind::Base(base) => ensure_base_artifact_staged(dice, base).left_future(), - ArtifactKind::Projected(projected) => dice - .compute(EnsureProjectedArtifactKey::ref_cast(projected)) + let ArtifactKind { base, path } = artifact.data(); + match path.is_empty() { + true => ensure_base_artifact_staged(dice, base.clone()).left_future(), + false => dice + .compute(EnsureProjectedArtifactKey::ref_cast(artifact.data())) .map(|v| Ok(EnsureArtifactGroupReady::Single(v??))) .right_future(), } } fn ensure_build_artifact_staged<'a>( - dice: &'a DiceComputations, - built: &'a BuildArtifact, + dice: &'a mut DiceComputations, + built: BuildArtifact, ) -> impl Future> + 'a { ActionCalculation::build_action(dice, built.key()).map(move |action_outputs| { let action_outputs = action_outputs?; @@ -160,8 +158,8 @@ fn ensure_build_artifact_staged<'a>( } fn ensure_source_artifact_staged<'a>( - dice: &'a DiceComputations, - source: &'a SourceArtifact, + dice: &'a mut DiceComputations, + source: SourceArtifact, ) -> impl Future> + 'a { async move { Ok(EnsureArtifactGroupReady::Single( @@ -175,17 +173,28 @@ fn ensure_source_artifact_staged<'a>( // These errors should be unreachable, they indicate misuse of the staged ensure artifact (or other buck // invariant violations), but it's still better to propagate them as Error than to panic!(). -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum EnsureArtifactStagedError { #[error("Tried to unpack single artifact, but got transitive set")] UnpackSingleTransitiveSet, #[error("Expected a transitive set, got a single artifact")] ExpectedTransitiveSet, // This one could probably be a panic! if DICE didn't eagerly re-evaluate all deps. - #[error("Building an artifact didn't produce it. Expected `{0}` but only have `{1:?}`")] + #[error("Building an artifact didn't produce it. Expected `{}` but only have `{}`", .0.get_path(), display_outputs(.1))] BuildArtifactMissing(BuildArtifact, ActionOutputs), } +fn display_outputs(outputs: &ActionOutputs) -> String { + format!( + "({})", + outputs + .iter() + .map(|(path, _)| path.path()) + .sorted() + .join(", ") + ) +} + /// Represents the "ready" stage of an ensure_artifact_*() call. At this point the /// ArtifactValue/ArtifactGroupValues can be synchronously accessed/constructed. pub(crate) enum EnsureArtifactGroupReady { @@ -196,13 +205,13 @@ pub(crate) enum EnsureArtifactGroupReady { impl EnsureArtifactGroupReady { /// Converts the ensured artifact to an ArtifactGroupValues. The caller must ensure that the passed in artifact /// is the same one that was used to ensure this. - pub(crate) fn to_group_values( + pub(crate) fn to_group_values<'v>( self, - artifact: &ArtifactGroup, + resolved_artifact_group: &ResolvedArtifactGroup<'v>, ) -> anyhow::Result { match self { EnsureArtifactGroupReady::TransitiveSet(values) => Ok(values), - EnsureArtifactGroupReady::Single(value) => match artifact.assert_resolved() { + EnsureArtifactGroupReady::Single(value) => match resolved_artifact_group { ResolvedArtifactGroup::Artifact(artifact) => { Ok(ArtifactGroupValues::from_artifact(artifact.clone(), value)) } @@ -229,13 +238,42 @@ static_assertions::assert_eq_size!(EnsureArtifactGroupReady, [usize; 3]); // TODO(cjhopman): We should be able to wrap this in a convenient assertion macro. #[allow(unused, clippy::diverging_sub_expression)] fn _assert_ensure_artifact_group_future_size() { - let v = ensure_artifact_group_staged(panic!(), panic!()); + let mut ctx: DiceComputations = panic!(); + + // These first two are the important ones to track and not regress. + let v = ctx.ensure_artifact_group(panic!()); + let e = [0u8; 128 / 8]; + static_assertions::assert_eq_size_ptr!(&v, &e); + + let v = ensure_artifact_group_staged(&mut ctx, panic!()); + let e = [0u8; 1088 / 8]; + static_assertions::assert_eq_size_ptr!(&v, &e); + + // The rest of these are to help understand how changes are impacting the important ones above. Regressing these + // is generally okay if the above don't regress. + let v = ensure_artifact_staged(&mut ctx, panic!()); + let e = [0u8; 1088 / 8]; + static_assertions::assert_eq_size_ptr!(&v, &e); + + let v = ensure_base_artifact_staged(&mut ctx, panic!()); + let e = [0u8; 1088 / 8]; + static_assertions::assert_eq_size_ptr!(&v, &e); + + let v = ensure_build_artifact_staged(&mut ctx, panic!()); + let e = [0u8; 1088 / 8]; + static_assertions::assert_eq_size_ptr!(&v, &e); + + let v = ActionCalculation::build_action(&mut ctx, panic!()); let e = [0u8; 704 / 8]; static_assertions::assert_eq_size_ptr!(&v, &e); + + let v = ensure_source_artifact_staged(&mut ctx, panic!()); + let e = [0u8; 128 / 8]; + static_assertions::assert_eq_size_ptr!(&v, &e); } async fn dir_artifact_value( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, cell_path: Arc, ) -> anyhow::Result> { // We keep running into this performance footgun where a large directory is declared @@ -246,31 +284,36 @@ async fn dir_artifact_value( // using that directory now only depends on one DirArtifactValueKey, and that DirArtifactValueKey // depends on the PathMetadataKey of every member of the directory. #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "dir_artifact_value({})", .0)] + #[display("dir_artifact_value({})", _0)] struct DirArtifactValueKey(Arc); #[async_trait] impl Key for DirArtifactValueKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let file_ops = ctx.file_ops(); - let files = file_ops.read_dir(self.0.as_ref().as_ref()).await?.included; - - let entries = files.iter().map(|x| async { - // TODO(scottcao): This current creates a `DirArtifactValueKey` for each subdir of a source directory. - // Instead, this should be 1 key for the entire top-level directory since there's almost - // no chance of getting cache hit with a sub-directory. - let value = - path_artifact_value(ctx, Arc::new(self.0.as_ref().join(&x.file_name))).await?; - anyhow::Ok((x.file_name.clone(), value)) - }); - - let entries = future::try_join_all(entries).await?; + let files = DiceFileComputations::read_dir(ctx, self.0.as_ref().as_ref()) + .await? + .included; + + let entries = ctx + .try_compute_join(files.iter(), |ctx, x| { + async move { + // TODO(scottcao): This current creates a `DirArtifactValueKey` for each subdir of a source directory. + // Instead, this should be 1 key for the entire top-level directory since there's almost + // no chance of getting cache hit with a sub-directory. + let value = + path_artifact_value(ctx, Arc::new(self.0.as_ref().join(&x.file_name))) + .await?; + anyhow::Ok((x.file_name.clone(), value)) + } + .boxed() + }) + .await?; let entries = entries.into_iter().collect(); let digest_config = ctx.global_data().get_digest_config(); @@ -293,13 +336,10 @@ async fn dir_artifact_value( #[async_recursion] async fn path_artifact_value( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, cell_path: Arc, ) -> anyhow::Result> { - let file_ops = &ctx.file_ops() as &dyn FileOps; - let raw = file_ops - .read_path_metadata(cell_path.as_ref().as_ref()) - .await?; + let raw = DiceFileComputations::read_path_metadata(ctx, cell_path.as_ref().as_ref()).await?; match PathMetadataOrRedirection::from(raw) { PathMetadataOrRedirection::PathMetadata(meta) => match meta { PathMetadata::ExternalSymlink(symlink) => Ok(ActionDirectoryEntry::Leaf( @@ -319,25 +359,34 @@ async fn path_artifact_value( #[derive(Clone, Dupe, Eq, PartialEq, Hash, Display, Debug, Allocative, RefCast)] #[repr(transparent)] -pub struct EnsureProjectedArtifactKey(pub(crate) ProjectedArtifact); +pub struct EnsureProjectedArtifactKey(pub(crate) ArtifactKind); #[async_trait] impl Key for EnsureProjectedArtifactKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let base_value = ensure_base_artifact_staged(ctx, self.0.base()) + let ArtifactKind { base, path } = &self.0; + + if path.is_empty() { + return Err(internal_error_anyhow!( + "EnsureProjectedArtifactKey with non-empty projected path" + ) + .into()); + } + + let base_value = ensure_base_artifact_staged(ctx, base.dupe()) .await? .unpack_single()?; let artifact_fs = ctx.get_artifact_fs().await?; let digest_config = ctx.global_data().get_digest_config(); - let base_path = match self.0.base() { + let base_path = match base { BaseArtifactKind::Build(built) => artifact_fs.resolve_build(built.get_path()), BaseArtifactKind::Source(source) => artifact_fs.resolve_source(source.get_path())?, }; @@ -345,21 +394,12 @@ impl Key for EnsureProjectedArtifactKey { let mut builder = ActionDirectoryBuilder::empty(); insert_artifact(&mut builder, base_path.as_ref(), &base_value)?; - let value = extract_artifact_value(&builder, &base_path.join(self.0.path()), digest_config) + let value = extract_artifact_value(&builder, &base_path.join(path), digest_config) .with_context(|| { - format!( - "The path `{}` cannot be projected in the artifact `{}`", - self.0.path(), - self.0.base() - ) + format!("The path `{path}` cannot be projected in the artifact `{base}`") })? - .with_context(|| { - format!( - "The path `{}` does not exist in the artifact `{}`", - self.0.path(), - self.0.base() - ) - })?; + .with_context(|| format!("The path `{path}` does not exist in the artifact `{base}`")) + .tag_anyhow(buck2_error::ErrorTag::ProjectMissingPath)?; Ok(value) } @@ -378,41 +418,40 @@ pub struct EnsureTransitiveSetProjectionKey(pub TransitiveSetProjectionKey); #[async_trait] impl Key for EnsureTransitiveSetProjectionKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let set = ctx - .compute_deferred_data(&self.0.key) - .await - .context("Failed to compute deferred")?; + let set = self.0.key.lookup(ctx).await?; let artifact_fs = ctx.get_artifact_fs().await?; - let sub_inputs = set - .as_transitive_set() - .get_projection_sub_inputs(self.0.projection)?; + let projection_sub_inputs = set.get_projection_sub_inputs(self.0.projection)?; + + let sub_inputs: Vec<_> = tokio::task::unconstrained(KeepGoing::try_compute_join_all( + ctx, + projection_sub_inputs.iter(), + |ctx, a| async move { a.resolved_artifact(ctx).await }.boxed(), + )) + .await?; let (values, children) = { // Compute the new inputs. Note that ordering here (and below) is important to ensure - // stability of the ArtifactGroupValues we produce across executions, so we use - // FuturesOrdered. - - let ensure_futs: FuturesOrdered<_> = sub_inputs - .iter() - .map(|v| ensure_artifact_group_staged(ctx, v)) - .collect(); - - let ready_inputs: Vec<_> = - tokio::task::unconstrained(keep_going::try_join_all(ctx, ensure_futs)).await?; + // stability of the ArtifactGroupValues we produce across executions, which try_compute_join_all preserves. + let ready_inputs: Vec<_> = tokio::task::unconstrained(KeepGoing::try_compute_join_all( + ctx, + sub_inputs.iter(), + |ctx, v| async move { ensure_artifact_group_staged(ctx, v.clone()).await }.boxed(), + )) + .await?; // Partition our inputs in artifacts and projections. let mut values_count = 0; for input in sub_inputs.iter() { - if let ArtifactGroup::Artifact(..) = input { + if let ResolvedArtifactGroup::Artifact(..) = input { values_count += 1; } } @@ -421,7 +460,7 @@ impl Key for EnsureTransitiveSetProjectionKey { let mut children = Vec::with_capacity(sub_inputs.len() - values_count); for (group, ready) in zip(sub_inputs.iter(), ready_inputs) { - match group.assert_resolved() { + match group { ResolvedArtifactGroup::Artifact(artifact) => { values.push((artifact.dupe(), ready.unpack_single()?)) } diff --git a/app/buck2_build_api/src/artifact_groups/deferred.rs b/app/buck2_build_api/src/artifact_groups/deferred.rs index bc502f449e87b..ca8f72da6bd78 100644 --- a/app/buck2_build_api/src/artifact_groups/deferred.rs +++ b/app/buck2_build_api/src/artifact_groups/deferred.rs @@ -8,42 +8,51 @@ */ use allocative::Allocative; -use buck2_artifact::deferred::data::DeferredData; +use buck2_artifact::deferred::key::DeferredHolderKey; +use derive_more::Display; +use dice::DiceComputations; use dupe::Dupe; -use starlark::values::OwnedFrozenValue; use starlark::values::OwnedFrozenValueTyped; -use starlark::values::Value; -use crate::deferred::types::AnyValue; -use crate::deferred::types::TrivialDeferred; +use crate::deferred::calculation::lookup_deferred_holder; use crate::interpreter::rule_defs::transitive_set::FrozenTransitiveSet; -pub type TransitiveSetKey = DeferredData; +#[derive(Hash, Eq, PartialEq, Clone, Dupe, Display, Debug, Allocative)] +#[display("{:?}", self)] +pub struct TransitiveSetKey(DeferredHolderKey, TransitiveSetIndex); -#[derive(Clone, Dupe, Debug, Allocative)] -pub struct DeferredTransitiveSetData( - #[allocative(skip)] // TODO(nga): visit heap. - pub(super) OwnedFrozenValueTyped, -); - -impl TrivialDeferred for DeferredTransitiveSetData { - fn as_any_value(&self) -> &dyn AnyValue { - self +impl TransitiveSetKey { + pub fn new(key: DeferredHolderKey, id: TransitiveSetIndex) -> Self { + Self(key, id) } - fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} -} + pub fn holder_key(&self) -> &DeferredHolderKey { + &self.0 + } -impl DeferredTransitiveSetData { - pub fn testing_new(value: OwnedFrozenValue) -> DeferredTransitiveSetData { - DeferredTransitiveSetData(value.downcast_anyhow().unwrap()) + pub fn index(&self) -> TransitiveSetIndex { + self.1 } +} - pub fn as_transitive_set(&self) -> OwnedFrozenValueTyped { - self.0.dupe() +#[derive(Hash, Eq, PartialEq, Clone, Dupe, Copy, Display, Debug, Allocative)] +#[display("{:?}", self)] +/// Index for the transitive set data in the analysis result +pub struct TransitiveSetIndex(pub(crate) u32); + +impl TransitiveSetKey { + pub async fn lookup( + &self, + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + lookup_deferred_holder(ctx, &self.0) + .await? + .lookup_transitive_set(self) } +} - pub fn as_value(&self) -> Value { - self.0.to_value() +impl TransitiveSetIndex { + pub fn testing_new(v: u32) -> Self { + Self(v) } } diff --git a/app/buck2_build_api/src/artifact_groups/mod.rs b/app/buck2_build_api/src/artifact_groups/mod.rs deleted file mode 100644 index 252684afde838..0000000000000 --- a/app/buck2_build_api/src/artifact_groups/mod.rs +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod artifact_group_values; -pub mod calculation; -pub mod deferred; -pub mod promise; -pub mod registry; - -use std::hash::Hash; - -use allocative::Allocative; -pub use artifact_group_values::ArtifactGroupValues; -use buck2_artifact::artifact::artifact_type::Artifact; -use derive_more::Display; -use dupe::Dupe; -use gazebo::variants::UnpackVariants; - -use crate::artifact_groups::deferred::TransitiveSetKey; -use crate::artifact_groups::promise::PromiseArtifact; - -/// An [ArtifactGroup] can expand to one or more [Artifact]. Those Artifacts will be made available -/// to Actions when they execute. -#[derive( - Clone, - Debug, - Display, - Dupe, - PartialEq, - Eq, - Hash, - UnpackVariants, - Allocative -)] -pub enum ArtifactGroup { - Artifact(Artifact), - TransitiveSetProjection(TransitiveSetProjectionKey), - Promise(PromiseArtifact), -} - -impl ArtifactGroup { - pub fn assert_resolved(&self) -> ResolvedArtifactGroup { - self.resolved().unwrap() - } - - pub fn resolved(&self) -> anyhow::Result { - Ok(match self { - ArtifactGroup::Artifact(a) => ResolvedArtifactGroup::Artifact(a), - ArtifactGroup::TransitiveSetProjection(a) => { - ResolvedArtifactGroup::TransitiveSetProjection(a) - } - ArtifactGroup::Promise(p) => ResolvedArtifactGroup::Artifact(p.get_err()?), - }) - } -} -pub enum ResolvedArtifactGroup<'a> { - Artifact(&'a Artifact), - TransitiveSetProjection(&'a TransitiveSetProjectionKey), -} - -#[derive(Clone, Debug, Display, Dupe, PartialEq, Eq, Hash, Allocative)] -#[display(fmt = "TransitiveSetProjection({}, {})", key, projection)] -pub struct TransitiveSetProjectionKey { - pub key: TransitiveSetKey, - pub projection: usize, -} diff --git a/app/buck2_build_api/src/artifact_groups/promise.rs b/app/buck2_build_api/src/artifact_groups/promise.rs index dc3e2c992a3d7..1bc97fba23933 100644 --- a/app/buck2_build_api/src/artifact_groups/promise.rs +++ b/app/buck2_build_api/src/artifact_groups/promise.rs @@ -13,23 +13,16 @@ use std::sync::Arc; use std::sync::OnceLock; use allocative::Allocative; -use anyhow::Context; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use dupe::Dupe; use starlark::codemap::FileSpan; -use thiserror::Error; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; - -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum PromiseArtifactResolveError { - #[error( - "Resolved promise of the artifact promise {} was not an artifact (was `{1}`)", - maybe_declared_at(_0) - )] - NotAnArtifact(Option, String), + #[error("Resolved promise of the artifact promise was not an artifact (was `{0}`)")] + NotAnArtifact(String), #[error("Artifact promise {1} {} wasn't resolved", maybe_declared_at(_0))] PromiseNotResolved(Option, String), #[error("Artifact promise was resolved multiple times")] @@ -48,6 +41,12 @@ pub enum PromiseArtifactResolveError { "assert_short_path() was called with `short_path = {0}`, but it did not match the artifact's actual short path: `{1}`" )] ShortPathMismatch(ForwardRelativePathBuf, String), + #[error("Internal error: analysis result did not contain promise with ID ({0})")] + NotFoundInAnalysis(PromiseArtifactId), + #[error( + "Internal error: promise artifact (id: {0}) owner is ({1}), which is not an anon target" + )] + OwnerIsNotAnonTarget(PromiseArtifactId, BaseDeferredKey), } fn maybe_declared_at(location: &Option) -> String { @@ -77,6 +76,16 @@ impl PromiseArtifactId { pub fn new(owner: BaseDeferredKey, id: usize) -> PromiseArtifactId { Self { owner, id } } + + pub fn owner(&self) -> &BaseDeferredKey { + &self.owner + } +} + +impl Display for PromiseArtifactId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}#{}", &self.owner, self.id) + } } impl PromiseArtifact { @@ -101,24 +110,17 @@ impl PromiseArtifact { pub fn resolve( &self, - artifact: &dyn StarlarkArtifactLike, + artifact: Artifact, expected_short_path: &Option, ) -> anyhow::Result<()> { - if let Some(v) = artifact.get_associated_artifacts() { - if !v.is_empty() { - return Err(PromiseArtifactResolveError::HasAssociatedArtifacts.into()); - } - } - let bound = artifact - .get_bound_artifact() - .context("expected bound artifact for promise_artifact resolve")?; + let bound = artifact; if bound.is_source() { return Err(PromiseArtifactResolveError::SourceArtifact.into()); } if let Some(expected_short_path) = expected_short_path { bound.get_path().with_short_path(|artifact_short_path| { if artifact_short_path != expected_short_path { - Err(anyhow::Error::new( + Err(anyhow::Error::from( PromiseArtifactResolveError::ShortPathMismatch( expected_short_path.clone(), artifact_short_path.to_string(), @@ -138,6 +140,10 @@ impl PromiseArtifact { pub fn id(&self) -> &PromiseArtifactId { self.id.as_ref() } + + pub fn owner(&self) -> &BaseDeferredKey { + &self.id.owner + } } impl Display for PromiseArtifact { diff --git a/app/buck2_build_api/src/artifact_groups/registry.rs b/app/buck2_build_api/src/artifact_groups/registry.rs index 90190cf7d9071..6a9a77ee10333 100644 --- a/app/buck2_build_api/src/artifact_groups/registry.rs +++ b/app/buck2_build_api/src/artifact_groups/registry.rs @@ -8,66 +8,44 @@ */ use allocative::Allocative; -use anyhow::Context as _; use dupe::Dupe; use starlark::eval::Evaluator; +use starlark::values::FrozenValueTyped; use starlark::values::Value; +use starlark::values::ValueTyped; use crate::analysis::registry::AnalysisValueFetcher; -use crate::artifact_groups::deferred::DeferredTransitiveSetData; -use crate::deferred::types::DeferredRegistry; -use crate::deferred::types::ReservedTrivialDeferredData; -use crate::interpreter::rule_defs::transitive_set::FrozenTransitiveSet; +use crate::analysis::registry::AnalysisValueStorage; +use crate::interpreter::rule_defs::transitive_set::FrozenTransitiveSetDefinition; use crate::interpreter::rule_defs::transitive_set::TransitiveSet; #[derive(Allocative)] -pub struct ArtifactGroupRegistry { - pending: Vec>, -} +pub struct ArtifactGroupRegistry; impl ArtifactGroupRegistry { pub fn new() -> Self { - Self { - pending: Vec::new(), - } + Self } - pub fn create_transitive_set<'v>( + pub(crate) fn create_transitive_set<'v>( &mut self, - definition: Value<'v>, + definition: FrozenValueTyped<'v, FrozenTransitiveSetDefinition>, value: Option>, children: Option>, - deferred: &mut DeferredRegistry, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let reserved = deferred.reserve_trivial::(); - let set = TransitiveSet::new_from_values( - reserved.data().dupe(), - definition, - value, - children, - eval, - )?; - self.pending.push(reserved); - Ok(set) + analysis_value_storage: &mut AnalysisValueStorage<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result>> { + Ok(analysis_value_storage.register_transitive_set(move |key| { + let set = TransitiveSet::new_from_values(key.dupe(), definition, value, children, eval) + .map_err(|e| e.into_anyhow())?; + Ok(eval.heap().alloc_typed(set)) + })?) } pub(crate) fn ensure_bound( self, - registry: &mut DeferredRegistry, - analysis_value_fetcher: &AnalysisValueFetcher, + _analysis_value_fetcher: &AnalysisValueFetcher, ) -> anyhow::Result<()> { - for key in self.pending { - let id = key.data().deferred_key().id(); - - let set = analysis_value_fetcher - .get(id)? - .with_context(|| format!("Key is missing in AnalysisValueFetcher: {:?}", id))?; - - let set = set.downcast_anyhow::()?; - registry.bind_trivial(key, DeferredTransitiveSetData(set)); - } - Ok(()) } } diff --git a/app/buck2_build_api/src/attrs/mod.rs b/app/buck2_build_api/src/attrs.rs similarity index 100% rename from app/buck2_build_api/src/attrs/mod.rs rename to app/buck2_build_api/src/attrs.rs diff --git a/app/buck2_build_api/src/audit_cell.rs b/app/buck2_build_api/src/audit_cell.rs index f98f5a38872e8..9838061a4809d 100644 --- a/app/buck2_build_api/src/audit_cell.rs +++ b/app/buck2_build_api/src/audit_cell.rs @@ -7,29 +7,36 @@ * of this source tree. */ -use buck2_core::cells::CellResolver; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_util::late_binding::LateBinding; +use dice::DiceComputations; +use futures::future::BoxFuture; use indexmap::IndexMap; pub static AUDIT_CELL: LateBinding< for<'v> fn( + ctx: &'v mut DiceComputations<'_>, aliases_to_resolve: &'v Vec, aliases: bool, - cells: &'v CellResolver, cwd: &'v ProjectRelativePath, fs: &'v ProjectRoot, - ) -> anyhow::Result>, + ) -> BoxFuture<'v, anyhow::Result>>, > = LateBinding::new("AUDIT_CELL"); pub fn audit_cell<'v>( + ctx: &'v mut DiceComputations<'_>, aliases_to_resolve: &'v Vec, aliases: bool, - cells: &'v CellResolver, cwd: &'v ProjectRelativePath, fs: &'v ProjectRoot, -) -> anyhow::Result> { - (AUDIT_CELL.get()?)(aliases_to_resolve, aliases, cells, cwd, fs) +) -> anyhow::Result>>> { + Ok((AUDIT_CELL.get()?)( + ctx, + aliases_to_resolve, + aliases, + cwd, + fs, + )) } diff --git a/app/buck2_build_api/src/audit_output.rs b/app/buck2_build_api/src/audit_output.rs index 5c0d7cb303d1f..c61abd9c84a50 100644 --- a/app/buck2_build_api/src/audit_output.rs +++ b/app/buck2_build_api/src/audit_output.rs @@ -10,9 +10,10 @@ use std::future::Future; use std::pin::Pin; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::cells::CellResolver; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_util::late_binding::LateBinding; use dice::DiceComputations; @@ -32,8 +33,8 @@ pub static AUDIT_OUTPUT: LateBinding< &'v str, &'v ProjectRelativePath, &'v CellResolver, - &'v DiceComputations, - global_target_platform: Option, + &'v mut DiceComputations, + &'v GlobalCfgOptions, ) -> Pin>> + 'v>>, > = LateBinding::new("AUDIT_OUTPUT"); @@ -42,15 +43,15 @@ pub async fn audit_output<'v>( output_path: &'v str, working_dir: &'v ProjectRelativePath, cell_resolver: &'v CellResolver, - dice_ctx: &'v DiceComputations, - global_target_platform: Option, + dice_ctx: &'v mut DiceComputations<'_>, + global_cfg_options: &'v GlobalCfgOptions, ) -> anyhow::Result> { (AUDIT_OUTPUT.get()?)( output_path, working_dir, cell_resolver, dice_ctx, - global_target_platform, + global_cfg_options, ) .await } diff --git a/app/buck2_build_api/src/build.rs b/app/buck2_build_api/src/build.rs new file mode 100644 index 0000000000000..3290ae748e643 --- /dev/null +++ b/app/buck2_build_api/src/build.rs @@ -0,0 +1,593 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::BTreeMap; +use std::collections::HashMap; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::sync::Arc; + +use allocative::Allocative; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_core::execution_types::executor_config::PathSeparatorKind; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::console_message; +use buck2_execute::artifact::fs::ExecutorFs; +use dice::LinearRecomputeDiceComputations; +use dice::UserComputationData; +use dupe::Dupe; +use dupe::OptionDupedExt; +use futures::stream; +use futures::stream::BoxStream; +use futures::stream::FuturesUnordered; +use futures::stream::Stream; +use futures::stream::StreamExt; +use futures::FutureExt; +use itertools::Itertools; +use tokio::sync::Mutex; + +use crate::actions::artifact::get_artifact_fs::GetArtifactFs; +use crate::actions::calculation::get_target_rule_type_name; +use crate::actions::calculation::BuildKey; +use crate::analysis::calculation::RuleAnalysisCalculation; +use crate::artifact_groups::calculation::EnsureTransitiveSetProjectionKey; +use crate::artifact_groups::ArtifactGroup; +use crate::artifact_groups::ArtifactGroupValues; +use crate::artifact_groups::ResolvedArtifactGroup; +use crate::artifact_groups::ResolvedArtifactGroupBuildSignalsKey; +use crate::build_signals::HasBuildSignals; +use crate::interpreter::rule_defs::cmd_args::AbsCommandLineContext; +use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use crate::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; +use crate::interpreter::rule_defs::provider::builtin::run_info::FrozenRunInfo; +use crate::interpreter::rule_defs::provider::test_provider::TestProvider; +use crate::keep_going::KeepGoing; +use crate::materialize::materialize_artifact_group; +use crate::materialize::MaterializationContext; +use crate::validation::validation_impl::VALIDATION_IMPL; + +mod action_error; +pub mod build_report; +mod graph_size; + +/// The types of provider to build on the configured providers label +#[derive(Debug, Clone, Dupe, Allocative)] +pub enum BuildProviderType { + Default, + DefaultOther, + Run, + Test, +} + +#[derive(Clone, Debug, Allocative)] +pub struct ConfiguredBuildTargetResultGen { + pub outputs: Vec, + pub run_args: Option>, + pub target_rule_type_name: Option, + pub configured_graph_size: Option>>, + pub errors: Vec, +} + +pub type ConfiguredBuildTargetResult = + ConfiguredBuildTargetResultGen>; + +pub struct BuildTargetResult { + pub configured: BTreeMap>, + /// Errors that could not be associated with a specific configured target. These errors may be + /// associated with a providers label, or might not be associated with any target at all. + pub other_errors: BTreeMap, Vec>, + pub build_failed: bool, +} + +impl BuildTargetResult { + pub fn new() -> Self { + Self { + configured: BTreeMap::new(), + other_errors: BTreeMap::new(), + build_failed: false, + } + } + + pub fn extend(&mut self, other: BuildTargetResult) { + self.configured.extend(other.configured); + self.other_errors.extend(other.other_errors); + } + + pub fn is_empty(&self) -> bool { + self.configured.is_empty() && self.other_errors.is_empty() + } + + pub async fn collect_stream( + mut stream: impl Stream + Unpin, + fail_fast: bool, + ) -> anyhow::Result { + // Create a map of labels to outputs, but retain the expected index of each output. + let mut res = HashMap::< + ConfiguredProvidersLabel, + Option)>>, + >::new(); + let mut other_errors = BTreeMap::<_, Vec<_>>::new(); + let mut build_failed = false; + + while let Some(event) = stream.next().await { + let ConfiguredBuildEvent { variant, label } = match event { + BuildEvent::Configured(variant) => variant, + BuildEvent::OtherError { label: target, err } => { + other_errors.entry(target).or_default().push(err); + build_failed = true; + continue; + } + }; + match variant { + ConfiguredBuildEventVariant::SkippedIncompatible => { + res.entry((*label).dupe()).or_insert(None); + } + ConfiguredBuildEventVariant::Prepared { + run_args, + target_rule_type_name, + } => { + res.entry((*label).dupe()) + .or_insert(Some(ConfiguredBuildTargetResultGen { + outputs: Vec::new(), + run_args, + target_rule_type_name: Some(target_rule_type_name), + configured_graph_size: None, + errors: Vec::new(), + })); + } + ConfiguredBuildEventVariant::Validation { result } => { + if let Err(e) = result { + build_failed = true; + res.get_mut(label.as_ref()) + .with_internal_error_anyhow(|| format!("BuildEventVariant::Validation before BuildEventVariant::Prepared for `{}`", label))? + .as_mut() + .with_internal_error_anyhow(|| format!("BuildEventVariant::Validation for a skipped target: `{}`", label))? + .errors + .push(e); + if fail_fast { + break; + } + } + } + ConfiguredBuildEventVariant::Output { index, output } => { + let is_err = output.is_err(); + + res.get_mut(label.as_ref()) + .with_internal_error_anyhow(|| format!("BuildEventVariant::Output before BuildEventVariant::Prepared for {}", label))? + .as_mut() + .with_internal_error_anyhow(|| format!("BuildEventVariant::Output for a skipped target: `{}`", label))? + .outputs + .push((index, output)); + + if is_err { + build_failed = true; + if fail_fast { + break; + } + } + } + ConfiguredBuildEventVariant::GraphSize { + configured_graph_size, + } => { + res.get_mut(label.as_ref()) + .with_internal_error_anyhow(|| format!("BuildEventVariant::GraphSize before BuildEventVariant::Prepared for {}", label))? + .as_mut() + .with_internal_error_anyhow(|| format!("BuildEventVariant::GraphSize for a skipped target: `{}`", label))? + .configured_graph_size = Some(configured_graph_size); + } + ConfiguredBuildEventVariant::Error { err } => { + build_failed = true; + res.entry((*label).dupe()) + .or_insert(Some(ConfiguredBuildTargetResultGen { + outputs: Vec::new(), + run_args: None, + target_rule_type_name: None, + configured_graph_size: None, + errors: Vec::new(), + })) + .as_mut() + .unwrap() + .errors + .push(err); + if fail_fast { + break; + } + } + } + } + + // Sort our outputs within each individual BuildTargetResult, then return those. + // Also, turn our HashMap into a BTreeMap. + let res = res + .into_iter() + .map(|(label, result)| { + let result = result.map(|result| { + let ConfiguredBuildTargetResultGen { + mut outputs, + run_args, + target_rule_type_name, + configured_graph_size, + errors, + } = result; + + // No need for a stable sort: the indices are unique (see below). + outputs.sort_unstable_by_key(|(index, _outputs)| *index); + + // TODO: This whole building thing needs quite a bit of refactoring. We might + // request the same targets multiple times here, but since we know that + // ConfiguredTargetLabel -> Output is going to be deterministic, we just dedupe + // them using the index. + ConfiguredBuildTargetResult { + outputs: outputs + .into_iter() + .unique_by(|(index, _outputs)| *index) + .map(|(_index, outputs)| outputs) + .collect(), + run_args, + target_rule_type_name, + configured_graph_size, + errors, + } + }); + + (label, result) + }) + .collect(); + + Ok(Self { + configured: res, + other_errors, + build_failed, + }) + } +} + +pub enum ConfiguredBuildEventVariant { + SkippedIncompatible, + Prepared { + run_args: Option>, + target_rule_type_name: String, + }, + Output { + output: buck2_error::Result, + /// Ensure a stable ordering of outputs. + index: usize, + }, + Validation { + result: buck2_error::Result<()>, + }, + GraphSize { + configured_graph_size: buck2_error::Result>, + }, + Error { + /// An error that can't be associated with a single artifact. + err: buck2_error::Error, + }, +} + +/// Events to be accumulated using BuildTargetResult::collect_stream. +pub struct ConfiguredBuildEvent { + label: Arc, + variant: ConfiguredBuildEventVariant, +} + +pub enum BuildEvent { + Configured(ConfiguredBuildEvent), + // An error that cannot be associated with a specific configured target + OtherError { + label: Option, + err: buck2_error::Error, + }, +} + +impl BuildEvent { + pub fn new_configured( + label: ConfiguredProvidersLabel, + variant: ConfiguredBuildEventVariant, + ) -> Self { + Self::Configured(ConfiguredBuildEvent { + label: Arc::new(label), + variant, + }) + } +} + +#[derive(Copy, Clone, Dupe, Debug)] +pub struct BuildConfiguredLabelOptions { + pub skippable: bool, + pub want_configured_graph_size: bool, +} + +pub async fn build_configured_label<'a>( + ctx: &'a LinearRecomputeDiceComputations<'_>, + materialization: &'a MaterializationContext, + providers_label: ConfiguredProvidersLabel, + providers_to_build: &ProvidersToBuild, + opts: BuildConfiguredLabelOptions, +) -> BoxStream<'a, ConfiguredBuildEvent> { + let providers_label = Arc::new(providers_label); + build_configured_label_inner( + ctx, + materialization, + providers_label.dupe(), + providers_to_build, + opts, + ) + .await + .unwrap_or_else(|e| { + futures::stream::once(futures::future::ready(ConfiguredBuildEvent { + label: providers_label, + variant: ConfiguredBuildEventVariant::Error { err: e.into() }, + })) + .boxed() + }) +} + +async fn build_configured_label_inner<'a>( + ctx: &'a LinearRecomputeDiceComputations<'_>, + materialization: &'a MaterializationContext, + providers_label: Arc, + providers_to_build: &ProvidersToBuild, + opts: BuildConfiguredLabelOptions, +) -> anyhow::Result> { + let artifact_fs = ctx.get().get_artifact_fs().await?; + + let (outputs, run_args, target_rule_type_name) = { + // A couple of these objects aren't Send and so scope them here so async transform doesn't get concerned. + let providers = match ctx.get().get_providers(providers_label.as_ref()).await? { + MaybeCompatible::Incompatible(reason) => { + return if opts.skippable { + console_message(reason.skipping_message(providers_label.target())); + Ok( + futures::stream::once(futures::future::ready(ConfiguredBuildEvent { + label: providers_label.dupe(), + variant: ConfiguredBuildEventVariant::SkippedIncompatible, + })) + .boxed(), + ) + } else { + Err(reason.to_err()) + }; + } + MaybeCompatible::Compatible(v) => v, + }; + + // Important we use an ordered collections, so the order matches the order the rule + // author wrote. + let mut outputs = Vec::new(); + // Providers that produced each output, in the order of outputs above. We use a separate collection + // otherwise we'd build the same output twice when it's both in DefaultInfo and RunInfo + let collection = providers.provider_collection(); + + let mut run_args: Option> = None; + + if providers_to_build.default { + collection + .default_info()? + .for_each_default_output_artifact_only(&mut |o| { + outputs.push((ArtifactGroup::Artifact(o), BuildProviderType::Default)) + })?; + } + if providers_to_build.default_other { + collection + .default_info()? + .for_each_default_output_other_artifacts_only(&mut |o| { + outputs.push((o, BuildProviderType::DefaultOther)) + })?; + collection.default_info()?.for_each_other_output(&mut |o| { + outputs.push((o, BuildProviderType::DefaultOther)) + })?; + } + if providers_to_build.run { + if let Some(runinfo) = providers + .provider_collection() + .builtin_provider::() + { + let mut artifact_visitor = SimpleCommandLineArtifactVisitor::new(); + runinfo.visit_artifacts(&mut artifact_visitor)?; + for input in artifact_visitor.inputs { + outputs.push((input, BuildProviderType::Run)); + } + // Produce arguments to run on a local machine. + let path_separator = if cfg!(windows) { + PathSeparatorKind::Windows + } else { + PathSeparatorKind::Unix + }; + let executor_fs = ExecutorFs::new(&artifact_fs, path_separator); + let mut cli = Vec::::new(); + let mut ctx = AbsCommandLineContext::new(&executor_fs); + runinfo.add_to_command_line(&mut cli, &mut ctx)?; + run_args = Some(cli); + } + } + if providers_to_build.tests { + if let Some(test_provider) = ::from_collection(collection) { + let mut artifact_visitor = SimpleCommandLineArtifactVisitor::new(); + test_provider.visit_artifacts(&mut artifact_visitor)?; + for input in artifact_visitor.inputs { + outputs.push((input, BuildProviderType::Test)); + } + } + } + + let target_rule_type_name = + get_target_rule_type_name(&mut ctx.get(), providers_label.target()).await?; + + (outputs, run_args, target_rule_type_name) + }; + + if let Some(signals) = ctx + .get() + .per_transaction_data() + .get_build_signals() + .cloned() + { + let resolved_artifacts: Vec<_> = + tokio::task::unconstrained(KeepGoing::try_compute_join_all( + &mut ctx.get(), + outputs.iter(), + |ctx, (output, _type)| async move { output.resolved_artifact(ctx).await }.boxed(), + )) + .await?; + let node_keys = resolved_artifacts + .iter() + .filter_map(|resolved| match resolved.dupe() { + ResolvedArtifactGroup::Artifact(artifact) => artifact + .action_key() + .duped() + .map(BuildKey) + .map(ResolvedArtifactGroupBuildSignalsKey::BuildKey), + ResolvedArtifactGroup::TransitiveSetProjection(key) => Some( + ResolvedArtifactGroupBuildSignalsKey::EnsureTransitiveSetProjectionKey( + EnsureTransitiveSetProjectionKey(key.dupe().dupe()), + ), + ), + }) + .collect(); + + signals.top_level_target(providers_label.target().dupe(), node_keys); + } + + if !opts.skippable && outputs.is_empty() { + let docs = "https://buck2.build/docs/users/faq/common_issues/#why-does-my-target-not-have-any-outputs"; // @oss-enable + // @oss-disable: let docs = "https://www.internalfb.com/intern/staticdocs/buck2/docs/users/faq/common_issues/#why-does-my-target-not-have-any-outputs"; + console_message(format!( + "Target {} does not have any outputs. This means the rule did not define any outputs. See {} for more information", + providers_label.target(), + docs, + )); + } + + let validation_result = { + async fn linear_validate( + ctx: &LinearRecomputeDiceComputations<'_>, + target: ConfiguredTargetLabel, + ) -> Result<(), buck2_error::Error> { + let mut ctx = ctx.get(); + VALIDATION_IMPL + .get()? + .validate_target_node_transitively(&mut ctx, target) + .await + } + linear_validate(&ctx, providers_label.target().dupe()).map({ + let providers_label = providers_label.dupe(); + move |result| ConfiguredBuildEvent { + label: providers_label, + variant: ConfiguredBuildEventVariant::Validation { result }, + } + }) + }; + + let outputs = outputs + .into_iter() + .enumerate() + .map({ + |(index, (output, provider_type))| { + let materialization = materialization.dupe(); + async move { + let res = + match materialize_artifact_group(&mut ctx.get(), &output, &materialization) + .await + { + Ok(values) => Ok(ProviderArtifacts { + values, + provider_type, + }), + Err(e) => Err(buck2_error::Error::from(e)), + }; + (index, res) + } + } + }) + .collect::>() + .map({ + let providers_label = providers_label.dupe(); + move |(index, output)| ConfiguredBuildEvent { + label: providers_label.dupe(), + variant: ConfiguredBuildEventVariant::Output { index, output }, + } + }); + + let stream = futures::stream::once(futures::future::ready(ConfiguredBuildEvent { + label: providers_label.dupe(), + variant: ConfiguredBuildEventVariant::Prepared { + run_args, + target_rule_type_name, + }, + })) + .chain(outputs) + .chain(stream::once(validation_result)); + + if opts.want_configured_graph_size { + let stream = stream.chain(futures::stream::once(async move { + let configured_graph_size = + graph_size::get_configured_graph_size(&mut ctx.get(), providers_label.target()) + .await + .map_err(|e| e.into()); + + ConfiguredBuildEvent { + label: providers_label, + variant: ConfiguredBuildEventVariant::GraphSize { + configured_graph_size, + }, + } + })); + + Ok(stream.boxed()) + } else { + Ok(stream.boxed()) + } +} + +#[derive(Clone, Allocative)] +pub struct ProviderArtifacts { + pub values: ArtifactGroupValues, + pub provider_type: BuildProviderType, +} + +// what type of artifacts to build based on the provider it came from +#[derive(Default, Clone)] +pub struct ProvidersToBuild { + pub default: bool, + pub default_other: bool, + pub run: bool, + pub tests: bool, +} + +impl Debug for ProviderArtifacts { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ProviderArtifacts") + .field("values", &self.values.iter().collect::>()) + .field("provider_type", &self.provider_type) + .finish() + } +} + +pub trait HasCreateUnhashedSymlinkLock { + fn set_create_unhashed_symlink_lock(&mut self, lock: Arc>); + + fn get_create_unhashed_symlink_lock(&self) -> Arc>; +} + +impl HasCreateUnhashedSymlinkLock for UserComputationData { + fn set_create_unhashed_symlink_lock(&mut self, lock: Arc>) { + self.data.set(lock); + } + + fn get_create_unhashed_symlink_lock(&self) -> Arc> { + self.data + .get::>>() + .expect("Lock for creating unhashed symlinks should be set") + .dupe() + } +} diff --git a/app/buck2_build_api/src/build/action_error.rs b/app/buck2_build_api/src/build/action_error.rs new file mode 100644 index 0000000000000..2b2030dd91059 --- /dev/null +++ b/app/buck2_build_api/src/build/action_error.rs @@ -0,0 +1,163 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Schema for the structured action error within the build report. + +use buck2_data::command_execution_kind::Command; +use buck2_data::ActionError; +use buck2_data::CommandExecutionDetails; +use buck2_event_observer::display::display_action_owner; +use buck2_event_observer::display::get_action_error_reason; +use buck2_event_observer::display::TargetDisplayOptions; +use serde::Serialize; + +use crate::build::build_report::BuildReportCollector; + +#[derive(Debug, Clone, Serialize, PartialOrd, Ord, PartialEq, Eq)] +struct BuildReportActionName { + category: String, + identifier: String, +} + +#[derive(Debug, Clone, Serialize, PartialOrd, Ord, PartialEq, Eq)] +struct BuildReportActionKey { + owner: String, +} + +#[derive(Debug, Clone, Serialize, PartialOrd, Ord, PartialEq, Eq)] +enum BuildReportActionErrorDiagnostics { + #[serde(rename = "sub_errors")] + SubErrors(Vec), + #[serde(rename = "handler_invocation_error")] + HandlerInvocationError(String), +} + +#[derive(Debug, Clone, Serialize, PartialOrd, Ord, PartialEq, Eq)] +struct BuildReportActionSubError { + category: String, + message_content: Option, + locations: Option>, +} + +#[derive(Debug, Clone, Serialize, PartialOrd, Ord, PartialEq, Eq)] +struct BuildReportActionErrorLocation { + file: String, + line: Option, +} + +/// DO NOT UPDATE WITHOUT UPDATING `docs/users/build_observability/build_report.md`! +#[derive(Debug, Clone, Serialize, PartialOrd, Ord, PartialEq, Eq)] +pub(crate) struct BuildReportActionError { + name: BuildReportActionName, + key: BuildReportActionKey, + digest: String, + error_content: String, + stderr_content: String, + stdout_content: String, + error_diagnostics: Option, +} + +impl BuildReportActionError { + pub(crate) fn new<'a>(error: &ActionError, collector: &mut BuildReportCollector<'a>) -> Self { + let reason = get_action_error_reason(error).ok().unwrap_or_default(); + + let command_details = error.last_command.as_ref().and_then(|c| c.details.as_ref()); + + let owner = error.key.as_ref().map_or(String::default(), |key| { + key.owner.as_ref().map_or(String::default(), |owner| { + display_action_owner(owner, TargetDisplayOptions::for_build_report()) + .unwrap_or_default() + }) + }); + + let key = BuildReportActionKey { owner }; + + let name = BuildReportActionName { + category: error + .name + .as_ref() + .map_or(String::default(), |name| name.category.clone()), + identifier: error + .name + .as_ref() + .map_or(String::default(), |name| name.identifier.clone()), + }; + + let error_diagnostics = error.error_diagnostics.clone().map(|error_diagnostics| { + match error_diagnostics.data.unwrap() { + buck2_data::action_error_diagnostics::Data::SubErrors(sub_errors) => { + let sub_errors = sub_errors + .sub_errors + .iter() + .map(|s| { + let locations = s.locations.as_ref().map(|locations| { + locations + .locations + .iter() + .map(|l| BuildReportActionErrorLocation { + file: l.file.clone(), + line: l.line, + }) + .collect() + }); + BuildReportActionSubError { + category: s.category.clone(), + message_content: s + .message + .clone() + .map(|m| collector.update_string_cache(m)), + locations, + } + }) + .collect(); + BuildReportActionErrorDiagnostics::SubErrors(sub_errors) + } + buck2_data::action_error_diagnostics::Data::HandlerInvocationError( + invocation_failure, + ) => BuildReportActionErrorDiagnostics::HandlerInvocationError( + collector.update_string_cache(invocation_failure.clone()), + ), + } + }); + + let stderr = command_details.map_or(String::default(), |c| c.stderr.clone()); + let stdout = command_details.map_or(String::default(), |c| c.stdout.clone()); + + let error_content = collector.update_string_cache(reason); + let stderr_content = collector.update_string_cache(stderr); + let stdout_content = collector.update_string_cache(stdout); + + Self { + key, + name, + error_content, + stderr_content, + stdout_content, + digest: get_action_digest(command_details).unwrap_or_default(), + error_diagnostics, + } + } +} + +fn get_action_digest(command_details: Option<&CommandExecutionDetails>) -> Option { + command_details.and_then(|command_details| { + if let Some(command_kind) = &command_details.command_kind { + match command_kind.command.as_ref() { + Some(Command::LocalCommand(c)) => Some(c.action_digest.clone()), + Some(Command::OmittedLocalCommand(c)) => Some(c.action_digest.clone()), + Some(Command::WorkerCommand(c)) => Some(c.action_digest.clone()), + Some(Command::WorkerInitCommand(_)) => None, + Some(Command::RemoteCommand(c)) => Some(c.action_digest.clone()), + None => None, + } + } else { + None + } + }) +} diff --git a/app/buck2_build_api/src/build/build_report.rs b/app/buck2_build_api/src/build/build_report.rs new file mode 100644 index 0000000000000..c641706130c7d --- /dev/null +++ b/app/buck2_build_api/src/build/build_report.rs @@ -0,0 +1,617 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Processing and reporting the the results of the build + +use std::collections::hash_map::DefaultHasher; +use std::collections::BTreeMap; +use std::collections::HashMap; +use std::collections::HashSet; +use std::hash::Hash; +use std::hash::Hasher; +use std::io::BufWriter; +use std::sync::Arc; + +use anyhow::Context as _; +use buck2_cli_proto::CommonBuildOptions; +use buck2_common::legacy_configs::dice::HasLegacyConfigs; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_core::cells::CellResolver; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_core::configuration::data::ConfigurationData; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::provider::label::NonDefaultProvidersName; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::provider::label::ProvidersName; +use buck2_core::target::label::label::TargetLabel; +use buck2_error::UniqueRootId; +use buck2_events::errors::create_error_report; +use buck2_execute::artifact::artifact_dyn::ArtifactDyn; +use buck2_wrapper_common::invocation_id::TraceId; +use derivative::Derivative; +use dice::DiceComputations; +use dupe::Dupe; +use itertools::Either; +use itertools::EitherOrBoth; +use itertools::Itertools; +use serde::Serialize; +use starlark_map::small_set::SmallSet; + +use crate::build::action_error::BuildReportActionError; +use crate::build::BuildProviderType; +use crate::build::ConfiguredBuildTargetResult; + +#[derive(Debug, Serialize)] +#[allow(clippy::upper_case_acronyms)] // We care about how they serialise +enum BuildOutcome { + SUCCESS, + FAIL, + #[allow(dead_code)] // Part of the spec, but not yet used + CANCELED, +} + +impl Default for BuildOutcome { + fn default() -> Self { + Self::SUCCESS + } +} + +/// DO NOT UPDATE WITHOUT UPDATING `docs/users/build_observability/build_report.md`! +#[derive(Debug, Serialize)] +pub struct BuildReport { + trace_id: TraceId, + success: bool, + results: HashMap, + /// filled only when fill-out-failures is passed for Buck1 backcompat only + failures: HashMap, + project_root: AbsNormPathBuf, + truncated: bool, + strings: BTreeMap, +} + +/// The fields that stored in the unconfigured `BuildReportEntry` for buck1 backcompat. +/// +/// Do not put new fields in here. Put them in `ConfiguredBuildReportEntry` +#[derive(Default, Debug, Serialize)] +struct MaybeConfiguredBuildReportEntry { + /// whether this particular target was successful + success: BuildOutcome, + /// a map of each subtarget of the current target (outputted as a `|` delimited list) to + /// the default exposed output of the subtarget + outputs: HashMap, SmallSet>, + /// a map of each subtarget of the current target (outputted as a `|` delimited list) to + /// the hidden, implicitly built outputs of the subtarget. There are multiple outputs + /// per subtarget + /// + /// FIXME(JakobDegen): This should be in `ConfiguredBuildReportEntry` + other_outputs: HashMap, SmallSet>, + /// The size of the graph for this target, if it was produced + /// + /// FIXME(JakobDegen): This should be in `ConfiguredBuildReportEntry` + configured_graph_size: Option, +} + +/// DO NOT UPDATE WITHOUT UPDATING `docs/users/build_observability/build_report.md`! +#[derive(Default, Debug, Serialize)] +pub(crate) struct ConfiguredBuildReportEntry { + /// A list of errors that occurred while building this target + errors: Vec, + #[serde(flatten)] + inner: MaybeConfiguredBuildReportEntry, +} + +/// DO NOT UPDATE WITHOUT UPDATING `docs/users/build_observability/build_report.md`! +#[derive(Debug, Serialize)] +struct BuildReportEntry { + /// The buck1 build report did not support multiple configurations of the same target. We + /// do, which is why we have the `configured` field below, which users should ideally use. + /// This field is kept around for buck1 compatibility only and should ideally be removed. + /// + /// We avoid the `WithErrors` variant here, to keep the errors field from conflicting with + /// the one on this struct. + #[serde(flatten)] + #[serde(skip_serializing_if = "Option::is_none")] + compatible: Option, + + /// the configured entry + configured: HashMap, + + /// Errors that could not be associated with a particular configured version of the target, + /// typically because they happened before configuration. + errors: Vec, + + /// The path to the package where this target is defined, relative to the project root. + #[serde(skip_serializing_if = "Option::is_none")] + package_project_relative_path: Option, +} + +/// DO NOT UPDATE WITHOUT UPDATING `docs/users/build_observability/build_report.md`! +#[derive(Debug, Clone, Serialize, PartialOrd, Ord, PartialEq, Eq)] +struct BuildReportError { + message_content: String, + action_error: Option, + /// An opaque index that can be use to de-duplicate errors. Two errors with the same + /// cause index have the same cause + /// + /// For example, two targets in different packages may have the same cause (evaluation of + /// common bzl file), but error stack will be different. + cause_index: usize, +} + +#[derive(Derivative, Serialize, Eq, PartialEq, Hash)] +#[derivative(Debug)] +#[serde(untagged)] +enum EntryLabel { + #[derivative(Debug = "transparent")] + Target(TargetLabel), +} + +pub struct BuildReportOpts { + pub print_unconfigured_section: bool, + pub unstable_include_other_outputs: bool, + pub unstable_include_failures_build_report: bool, + pub unstable_include_package_project_relative_paths: bool, + pub unstable_build_report_filename: String, +} + +pub struct BuildReportCollector<'a> { + artifact_fs: &'a ArtifactFs, + cell_resolver: &'a CellResolver, + overall_success: bool, + include_unconfigured_section: bool, + include_other_outputs: bool, + error_cause_cache: HashMap, + next_cause_index: usize, + strings: BTreeMap, + failures: HashMap, + include_failures: bool, + include_package_project_relative_paths: bool, +} + +impl<'a> BuildReportCollector<'a> { + pub fn convert( + trace_id: &TraceId, + artifact_fs: &'a ArtifactFs, + cell_resolver: &'a CellResolver, + project_root: &ProjectRoot, + include_unconfigured_section: bool, + include_other_outputs: bool, + include_failures: bool, + include_package_project_relative_paths: bool, + configured: &BTreeMap>, + other_errors: &BTreeMap, Vec>, + ) -> BuildReport { + let mut this: BuildReportCollector<'_> = Self { + artifact_fs, + cell_resolver, + overall_success: true, + include_unconfigured_section, + include_other_outputs, + error_cause_cache: HashMap::default(), + next_cause_index: 0, + strings: BTreeMap::default(), + failures: HashMap::default(), + include_failures, + include_package_project_relative_paths, + }; + let mut entries = HashMap::new(); + + if other_errors.values().flatten().next().is_some() { + // Do this check ahead of time. We don't check for errors that aren't associated + // with a target below, so we'd miss this otherwise. + this.overall_success = false; + } + + // The `BuildTargetResult` doesn't group errors by their unconfigured target, so we need + // to do a little iterator munging to achieve that ourselves + let results_by_unconfigured = configured + .iter() + .chunk_by(|x| x.0.target().unconfigured().dupe()); + let errors_by_unconfigured = other_errors + .iter() + .filter_map(|(l, e)| Some((l.as_ref()?.target().dupe(), e))); + for i in Itertools::merge_join_by( + IntoIterator::into_iter(&results_by_unconfigured), + errors_by_unconfigured, + |(l1, _), (l2, _)| Ord::cmp(l1, l2), + ) { + let (label, results, errors) = match i { + EitherOrBoth::Both((label, results), (_, errors)) => { + (label, Either::Left(results), &**errors) + } + EitherOrBoth::Left((label, results)) => (label, Either::Left(results), &[][..]), + EitherOrBoth::Right((label, errors)) => { + (label, Either::Right(std::iter::empty()), &**errors) + } + }; + let entry = this.collect_results_for_unconfigured(label.dupe(), results, errors); + entries.insert(EntryLabel::Target(label), entry); + } + + BuildReport { + trace_id: trace_id.dupe(), + success: this.overall_success, + results: entries, + failures: this.failures, + project_root: project_root.root().to_owned(), + // In buck1 we may truncate build report for a large number of targets. + // Setting this to false since we don't currently truncate buck2's build report. + truncated: false, + strings: this.strings, + } + } + + pub(crate) fn update_string_cache(&mut self, string: String) -> String { + let mut hasher = DefaultHasher::new(); + string.hash(&mut hasher); + let hash = hasher.finish().to_string(); + self.strings.insert(hash.clone(), string); + hash + } + + /// Always called for one unconfigured target at a time + fn collect_results_for_unconfigured<'b>( + &mut self, + target: TargetLabel, + results: impl IntoIterator< + Item = ( + &'b ConfiguredProvidersLabel, + &'b Option, + ), + >, + errors: &[buck2_error::Error], + ) -> BuildReportEntry { + // NOTE: if we're actually building a thing, then the package path must exist, but be + // conservative and don't crash the overall processing if that happens. + let package_project_relative_path = if self.include_package_project_relative_paths { + self.cell_resolver + .resolve_path(target.pkg().as_cell_path()) + .ok() + } else { + None + }; + + let mut unconfigured_report = if self.include_unconfigured_section { + Some(MaybeConfiguredBuildReportEntry::default()) + } else { + None + }; + let mut configured_reports = HashMap::new(); + + for (label, results) in &results + .into_iter() + // We omit skipped targets here. + .filter_map(|(label, result)| Some((label, result.as_ref()?))) + .chunk_by(|x| x.0.target().dupe()) + { + let configured_report = self.collect_results_for_configured(target.dupe(), results); + if let Some(report) = unconfigured_report.as_mut() { + if !configured_report.errors.is_empty() { + report.success = BuildOutcome::FAIL; + } + + // FIXME(JakobDegen): This potentially overwrites entries from other + // configurations. Is that intended? Send a diff with a comment if you know + report.outputs.extend( + configured_report + .inner + .outputs + .iter() + .map(|(k, v)| (k.clone(), v.clone())), + ); + report.other_outputs.extend( + configured_report + .inner + .other_outputs + .iter() + .map(|(k, v)| (k.clone(), v.clone())), + ); + if let Some(configured_graph_size) = configured_report.inner.configured_graph_size { + report.configured_graph_size = Some(configured_graph_size); + } + } + + configured_reports.insert(label.cfg().dupe(), configured_report); + } + + let errors = self.convert_error_list(errors, target); + if !errors.is_empty() { + if let Some(report) = unconfigured_report.as_mut() { + report.success = BuildOutcome::FAIL; + } + } + + BuildReportEntry { + compatible: unconfigured_report, + configured: configured_reports, + errors, + package_project_relative_path, + } + } + + fn collect_results_for_configured<'b>( + &mut self, + target: TargetLabel, + results: impl IntoIterator< + Item = ( + &'b ConfiguredProvidersLabel, + &'b ConfiguredBuildTargetResult, + ), + >, + ) -> ConfiguredBuildReportEntry { + let mut configured_report = ConfiguredBuildReportEntry::default(); + let mut errors = Vec::new(); + for (label, result) in results { + let provider_name: Arc = report_providers_name(label).into(); + + result.outputs.iter().for_each(|res| { + match res { + Ok(artifacts) => { + let mut is_default = false; + let mut is_other = false; + + match artifacts.provider_type { + BuildProviderType::Default => { + // as long as we have requested it as a default info, it should be + // considered a default output whether or not it also appears as an other + // non-main output + is_default = true; + } + BuildProviderType::DefaultOther + | BuildProviderType::Run + | BuildProviderType::Test => { + // as long as the output isn't the default, we add it to other outputs. + // This means that the same artifact may appear twice if its part of the + // default AND the other outputs, but this is intended as it accurately + // describes the type of the artifact + is_other = true; + } + } + + for (artifact, _value) in artifacts.values.iter() { + if is_default { + configured_report + .inner + .outputs + .entry(provider_name.clone()) + .or_default() + .insert(artifact.resolve_path(self.artifact_fs).unwrap()); + } + + if is_other && self.include_other_outputs { + configured_report + .inner + .other_outputs + .entry(provider_name.clone()) + .or_default() + .insert(artifact.resolve_path(self.artifact_fs).unwrap()); + } + } + } + Err(e) => errors.push(e.dupe()), + } + }); + + errors.extend(result.errors.iter().cloned()); + + if let Some(Ok(MaybeCompatible::Compatible(configured_graph_size))) = + result.configured_graph_size + { + configured_report.inner.configured_graph_size = Some(configured_graph_size); + } + } + configured_report.errors = self.convert_error_list(&errors, target); + if !configured_report.errors.is_empty() { + configured_report.inner.success = BuildOutcome::FAIL; + } + configured_report + } + + /// Note: In order for production of the build report to be deterministic, the order in + /// which this function is called, and which errors it is called with, must be + /// deterministic. The particular order of the errors need not be. + fn convert_error_list( + &mut self, + errors: &[buck2_error::Error], + target: TargetLabel, + ) -> Vec { + if errors.is_empty() { + return Vec::new(); + } + self.overall_success = false; + + struct ExpandedErrorInfo { + root: UniqueRootId, + cause_index: Option, + message: String, + action_error: Option, + } + + let mut temp = Vec::with_capacity(errors.len()); + for e in errors { + // we initially avoid assigning new cause indexes and instead use a sentinal value. + // This is to make sure that we can be deterministic + let root = e.root_id(); + let error_report = create_error_report(e); + let message = if let Some(telemetry_message) = error_report.telemetry_message { + telemetry_message + } else { + error_report.message + }; + temp.push(ExpandedErrorInfo { + root, + cause_index: self.error_cause_cache.get(&root).copied(), + message, + action_error: e + .action_error() + .map(|e| BuildReportActionError::new(e, self)), + }); + } + // Sort the errors. This sort *almost* guarantees full determinism, but unfortunately + // not quite; it is hypothetically non-deterministic if the same configured target has + // two errors with different error roots but the same error message. Probably unlikely? + temp.sort_unstable_by(|x, y| { + Ord::cmp(&(x.cause_index, &x.message), &(y.cause_index, &y.message)) + }); + + // Deduplicate errors with the same root. We have to do this after sorting to retain + // determinism. + // + // FIXME(JakobDegen): Ideally we wouldn't need this. It originally wasn't here, but this + // caused the size of the build report to grow very large in some cases. I suspect this + // is the result of some rules producing large amounts of `other_outputs`. Because those + // are all top level artifacts that get their own `BuildEvent`, if they all fail, they + // all get their own error in the build report. Completing the migration to artifact + // groups would likely let us get rid of this. + let mut found_roots = HashSet::new(); + temp.retain(|info| found_roots.insert(info.root)); + + let mut out = Vec::with_capacity(temp.len()); + // Now assign new cause indexes if we haven't yet + for info in temp { + let cause_index = match info.cause_index { + Some(i) => i, + None => { + // We need to recheck the cache first, as a previous iteration of this loop + // may have inserted our root + self.error_cause_cache + .get(&info.root) + .copied() + .unwrap_or_else(|| { + let index = self.next_cause_index; + self.next_cause_index += 1; + self.error_cause_cache.insert(info.root, index); + index + }) + } + }; + + let message_content = self.update_string_cache(info.message.clone()); + + out.push(BuildReportError { + message_content, + action_error: info.action_error, + cause_index, + }); + } + + if self.include_failures { + // Order is deterministic now, so picking the last one is fine. Also, we checked that + // there was at least one error above. + // + // This both omits errors and overwrites previous ones. That's the price you pay for + // using buck1 + self.failures.insert( + EntryLabel::Target(target), + self.strings + .get(&out.last().unwrap().message_content) + .unwrap() + .to_string(), + ); + } + + out + } +} + +fn report_providers_name(label: &ConfiguredProvidersLabel) -> String { + match label.name() { + ProvidersName::Default => "DEFAULT".to_owned(), + ProvidersName::NonDefault(flavor) => match flavor.as_ref() { + NonDefaultProvidersName::Named(names) => names.iter().join("|"), + NonDefaultProvidersName::UnrecognizedFlavor(s) => { + format!("#{}", s) + } + }, + } +} + +pub async fn build_report_opts<'a>( + ctx: &mut DiceComputations<'a>, + cell_resolver: &CellResolver, + build_opts: &CommonBuildOptions, +) -> anyhow::Result { + let esto = &build_opts.unstable_build_report_filename; + let build_report_opts = BuildReportOpts { + print_unconfigured_section: ctx + .parse_legacy_config_property( + cell_resolver.root_cell(), + BuckconfigKeyRef { + section: "build_report", + property: "print_unconfigured_section", + }, + ) + .await? + .unwrap_or(true), + unstable_include_other_outputs: ctx + .parse_legacy_config_property( + cell_resolver.root_cell(), + BuckconfigKeyRef { + section: "build_report", + property: "unstable_include_other_outputs", + }, + ) + .await? + .unwrap_or(false), + unstable_include_failures_build_report: build_opts.unstable_include_failures_build_report, + unstable_include_package_project_relative_paths: build_opts + .unstable_include_package_project_relative_paths, + unstable_build_report_filename: esto.clone(), + }; + + Ok(build_report_opts) +} + +pub fn generate_build_report( + opts: BuildReportOpts, + artifact_fs: &ArtifactFs, + cell_resolver: &CellResolver, + project_root: &ProjectRoot, + cwd: &ProjectRelativePath, + trace_id: &TraceId, + configured: &BTreeMap>, + other_errors: &BTreeMap, Vec>, +) -> Result, buck2_error::Error> { + let build_report = BuildReportCollector::convert( + trace_id, + artifact_fs, + cell_resolver, + project_root, + opts.print_unconfigured_section, + opts.unstable_include_other_outputs, + opts.unstable_include_failures_build_report, + opts.unstable_include_package_project_relative_paths, + configured, + other_errors, + ); + + let mut serialized_build_report = None; + + if !opts.unstable_build_report_filename.is_empty() { + let file = fs_util::create_file( + project_root + .resolve(cwd) + .as_abs_path() + .join(opts.unstable_build_report_filename), + ) + .context("Error writing build report")?; + let mut file = BufWriter::new(file); + serde_json::to_writer_pretty(&mut file, &build_report)? + } else { + serialized_build_report = Some(serde_json::to_string(&build_report)?); + }; + + Ok(serialized_build_report) +} diff --git a/app/buck2_build_api/src/build/graph_size.rs b/app/buck2_build_api/src/build/graph_size.rs index aebfecf020db7..9e020f972566d 100644 --- a/app/buck2_build_api/src/build/graph_size.rs +++ b/app/buck2_build_api/src/build/graph_size.rs @@ -11,7 +11,6 @@ use std::collections::HashSet; use allocative::Allocative; use async_trait::async_trait; -use buck2_common::result::SharedResult; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; @@ -34,7 +33,7 @@ struct GraphSizeKey(ConfiguredTargetLabel); #[async_trait] impl Key for GraphSizeKey { - type Value = SharedResult>; + type Value = buck2_error::Result>; async fn compute( &self, @@ -68,7 +67,7 @@ impl Key for GraphSizeKey { /// Returns the total graph size for all dependencies of a target. pub async fn get_configured_graph_size( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, key: &ConfiguredTargetLabel, ) -> anyhow::Result> { Ok(ctx.compute(&GraphSizeKey(key.dupe())).await??) diff --git a/app/buck2_build_api/src/build/mod.rs b/app/buck2_build_api/src/build/mod.rs deleted file mode 100644 index caafabe9388b7..0000000000000 --- a/app/buck2_build_api/src/build/mod.rs +++ /dev/null @@ -1,499 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::BTreeMap; -use std::collections::HashMap; -use std::fmt::Debug; -use std::fmt::Formatter; -use std::sync::Arc; - -use allocative::Allocative; -use anyhow::Context; -use buck2_artifact::artifact::artifact_type::BaseArtifactKind; -use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_cli_proto::build_request::Materializations; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_core::execution_types::executor_config::PathSeparatorKind; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_events::dispatch::console_message; -use buck2_execute::artifact::fs::ExecutorFs; -use dashmap::mapref::entry::Entry; -use dashmap::DashMap; -use dice::DiceComputations; -use dice::UserComputationData; -use dupe::Dupe; -use futures::future; -use futures::stream::BoxStream; -use futures::stream::FuturesUnordered; -use futures::stream::Stream; -use futures::stream::StreamExt; -use futures::stream::TryStreamExt; -use futures::FutureExt; -use itertools::Itertools; -use tokio::sync::Mutex; - -use crate::actions::artifact::get_artifact_fs::GetArtifactFs; -use crate::actions::artifact::materializer::ArtifactMaterializer; -use crate::analysis::calculation::RuleAnalysisCalculation; -use crate::artifact_groups::calculation::ArtifactGroupCalculation; -use crate::artifact_groups::ArtifactGroup; -use crate::artifact_groups::ArtifactGroupValues; -use crate::build_signals::HasBuildSignals; -use crate::interpreter::rule_defs::cmd_args::AbsCommandLineContext; -use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; -use crate::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; -use crate::interpreter::rule_defs::provider::builtin::run_info::FrozenRunInfo; -use crate::interpreter::rule_defs::provider::test_provider::TestProvider; - -mod graph_size; - -/// The types of provider to build on the configured providers label -#[derive(Debug, Clone, Dupe, Allocative)] -pub enum BuildProviderType { - Default, - DefaultOther, - Run, - Test, -} - -#[derive(Clone, Debug, Allocative)] -pub struct BuildTargetResultGen { - pub outputs: Vec, - pub run_args: Option>, - pub configured_graph_size: Option>>, -} - -pub type BuildTargetResult = BuildTargetResultGen>; - -impl BuildTargetResult { - pub async fn collect_stream( - mut stream: impl Stream> + Unpin, - fail_fast: bool, - ) -> anyhow::Result>> { - // Create a map of labels to outputs, but retain the expected index of each output. - let mut res = HashMap::< - ConfiguredProvidersLabel, - Option)>>, - >::new(); - - while let Some(BuildEvent { label, variant }) = stream.try_next().await? { - match variant { - BuildEventVariant::SkippedIncompatible => { - res.entry((*label).clone()).or_insert(None); - } - BuildEventVariant::Prepared { run_args } => { - res.entry((*label).clone()) - .or_insert(Some(BuildTargetResultGen { - outputs: Vec::new(), - run_args, - configured_graph_size: None, - })); - } - BuildEventVariant::Output { index, output } => { - let is_err = output.is_err(); - - res.get_mut(label.as_ref()) - .with_context(|| format!("BuildEventVariant::Output before BuildEventVariant::Prepared for {} (internal error)", label))? - .as_mut() - .with_context(|| format!("BuildEventVariant::Output for a skipped target: `{}` (internal error)", label))? - .outputs - .push((index, output)); - - if is_err && fail_fast { - break; - } - } - BuildEventVariant::GraphSize { - configured_graph_size, - } => { - res.get_mut(label.as_ref()) - .with_context(|| format!("BuildEventVariant::GraphSize before BuildEventVariant::Prepared for {} (internal error)", label))? - .as_mut() - .with_context(|| format!("BuildEventVariant::GraphSize for a skipped target: `{}` (internal error)", label))? - .configured_graph_size = Some(configured_graph_size); - } - } - } - - // Sort our outputs within each individual BuildTargetResult, then return those. - // Also, turn our HashMap into a BTreeMap. - let res = res - .into_iter() - .map(|(label, result)| { - let result = result.map(|result| { - let BuildTargetResultGen { - mut outputs, - run_args, - configured_graph_size, - } = result; - - // No need for a stable sort: the indices are unique (see below). - outputs.sort_unstable_by_key(|(index, _outputs)| *index); - - // TODO: This whole building thing needs quite a bit of refactoring. We might - // request the same targets multiple times here, but since we know that - // ConfiguredTargetLabel -> Output is going to be deterministic, we just dedupe - // them using the index. - BuildTargetResult { - outputs: outputs - .into_iter() - .unique_by(|(index, _outputs)| *index) - .map(|(_index, outputs)| outputs) - .collect(), - run_args, - configured_graph_size, - } - }); - - (label, result) - }) - .collect(); - - Ok(res) - } -} - -enum BuildEventVariant { - SkippedIncompatible, - Prepared { - run_args: Option>, - }, - Output { - output: SharedResult, - /// Ensure a stable ordering of outputs. - index: usize, - }, - GraphSize { - configured_graph_size: SharedResult>, - }, -} - -/// Events to be accumulated using BuildTargetResult::collect_stream. -pub struct BuildEvent { - label: Arc, - variant: BuildEventVariant, -} - -#[derive(Copy, Clone, Dupe, Debug)] -pub struct BuildConfiguredLabelOptions { - pub skippable: bool, - pub want_configured_graph_size: bool, -} - -pub async fn build_configured_label<'a>( - ctx: &'a DiceComputations, - materialization_context: &MaterializationContext, - providers_label: ConfiguredProvidersLabel, - providers_to_build: &ProvidersToBuild, - opts: BuildConfiguredLabelOptions, -) -> anyhow::Result> { - let providers_label = Arc::new(providers_label); - - let artifact_fs = ctx.get_artifact_fs().await?; - - let (outputs, run_args) = { - // A couple of these objects aren't Send and so scope them here so async transform doesn't get concerned. - let providers = match ctx.get_providers(providers_label.as_ref()).await? { - MaybeCompatible::Incompatible(reason) => { - if opts.skippable { - console_message(reason.skipping_message(providers_label.target())); - return Ok(futures::stream::once(futures::future::ready(BuildEvent { - label: providers_label.dupe(), - variant: BuildEventVariant::SkippedIncompatible, - })) - .boxed()); - } else { - return Err(reason.to_err()); - } - } - MaybeCompatible::Compatible(v) => v, - }; - - // Important we use an an ordered collections, so the order matches the order the rule - // author wrote. - let mut outputs = Vec::new(); - // Providers that produced each output, in the order of outputs above. We use a separate collection - // otherwise we'd build the same output twice when it's both in DefaultInfo and RunInfo - let collection = providers.provider_collection(); - - let mut run_args: Option> = None; - - if providers_to_build.default { - collection - .default_info() - .for_each_default_output_artifact_only(&mut |o| { - outputs.push((ArtifactGroup::Artifact(o), BuildProviderType::Default)); - Ok(()) - })?; - } - if providers_to_build.default_other { - collection - .default_info() - .for_each_default_output_other_artifacts_only(&mut |o| { - outputs.push((o, BuildProviderType::DefaultOther)); - Ok(()) - })?; - // TODO(marwhal): We can remove this once we migrate all other outputs to be handled with Artifacts directly - collection.default_info().for_each_other_output(&mut |o| { - outputs.push((o, BuildProviderType::DefaultOther)); - Ok(()) - })?; - } - if providers_to_build.run { - if let Some(runinfo) = providers - .provider_collection() - .builtin_provider::() - { - let mut artifact_visitor = SimpleCommandLineArtifactVisitor::new(); - runinfo.visit_artifacts(&mut artifact_visitor)?; - for input in artifact_visitor.inputs { - outputs.push((input, BuildProviderType::Run)); - } - // Produce arguments to run on a local machine. - let path_separator = if cfg!(windows) { - PathSeparatorKind::Windows - } else { - PathSeparatorKind::Unix - }; - let executor_fs = ExecutorFs::new(&artifact_fs, path_separator); - let mut cli = Vec::::new(); - let mut ctx = AbsCommandLineContext::new(&executor_fs); - runinfo.add_to_command_line(&mut cli, &mut ctx)?; - run_args = Some(cli); - } - } - if providers_to_build.tests { - if let Some(test_provider) = ::from_collection(collection) { - let mut artifact_visitor = SimpleCommandLineArtifactVisitor::new(); - test_provider.visit_artifacts(&mut artifact_visitor)?; - for input in artifact_visitor.inputs { - outputs.push((input, BuildProviderType::Test)); - } - } - } - - (outputs, run_args) - }; - - if let Some(signals) = ctx.per_transaction_data().get_build_signals() { - signals.top_level_target( - providers_label.target().dupe(), - outputs - .iter() - .map(|(output, _type)| output.dupe()) - .collect(), - ); - } - - if !opts.skippable && outputs.is_empty() { - console_message(format!( - "target {} does not have any outputs: building it does nothing", - providers_label.target() - )); - } - - let outputs = outputs - .into_iter() - .enumerate() - .map({ - |(index, (output, provider_type))| { - let materialization_context = materialization_context.dupe(); - materialize_artifact_group_owned(ctx, output, materialization_context).map( - move |res| { - let res = res.shared_error().map(|values| ProviderArtifacts { - values, - provider_type, - }); - - (index, res) - }, - ) - } - }) - .collect::>() - .map({ - let providers_label = providers_label.dupe(); - move |(index, output)| BuildEvent { - label: providers_label.dupe(), - variant: BuildEventVariant::Output { index, output }, - } - }); - - let stream = futures::stream::once(futures::future::ready(BuildEvent { - label: providers_label.dupe(), - variant: BuildEventVariant::Prepared { run_args }, - })) - .chain(outputs); - - if opts.want_configured_graph_size { - let stream = stream.chain(futures::stream::once(async move { - let configured_graph_size = - graph_size::get_configured_graph_size(ctx, providers_label.target()) - .await - .map_err(|e| e.into()); - - BuildEvent { - label: providers_label, - variant: BuildEventVariant::GraphSize { - configured_graph_size, - }, - } - })); - - Ok(stream.boxed()) - } else { - Ok(stream.boxed()) - } -} -pub async fn materialize_artifact_group_owned( - ctx: &DiceComputations, - artifact_group: ArtifactGroup, - materialization_context: MaterializationContext, -) -> anyhow::Result { - materialize_artifact_group(ctx, &artifact_group, &materialization_context).await -} - -#[derive(Clone, Allocative)] -pub struct ProviderArtifacts { - pub values: ArtifactGroupValues, - pub provider_type: BuildProviderType, -} - -// what type of artifacts to build based on the provider it came from -#[derive(Default, Clone)] -pub struct ProvidersToBuild { - pub default: bool, - pub default_other: bool, - pub run: bool, - pub tests: bool, -} - -impl Debug for ProviderArtifacts { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ProviderArtifacts") - .field("values", &self.values.iter().collect::>()) - .field("provider_type", &self.provider_type) - .finish() - } -} - -pub async fn materialize_artifact_group( - ctx: &DiceComputations, - artifact_group: &ArtifactGroup, - materialization_context: &MaterializationContext, -) -> anyhow::Result { - let values = ctx.ensure_artifact_group(artifact_group).await?; - - if let MaterializationContext::Materialize { map, force } = materialization_context { - future::try_join_all(values.iter().filter_map(|(artifact, _value)| { - match artifact.as_parts().0 { - BaseArtifactKind::Build(artifact) => { - match map.entry(artifact.dupe()) { - Entry::Vacant(v) => { - // Ensure we won't request this artifact elsewhere, and proceed to request - // it. - v.insert(()); - } - Entry::Occupied(..) => { - // We've already requested this artifact, no use requesting it again. - return None; - } - } - - Some(ctx.try_materialize_requested_artifact(artifact, *force)) - } - BaseArtifactKind::Source(..) => None, - } - })) - .await - .context("Failed to materialize artifacts")?; - } - - Ok(values) -} - -#[derive(Clone, Dupe)] -pub enum MaterializationContext { - Skip, - Materialize { - /// This map contains all the artifacts that we enqueued for materialization. This ensures - /// we don't enqueue the same thing more than once. - map: Arc>, - /// Whether we should force the materialization of requested artifacts, or defer to the - /// config. - force: bool, - }, -} - -impl MaterializationContext { - /// Create a new MaterializationContext that will force all materializations. - pub fn force_materializations() -> Self { - Self::Materialize { - map: Arc::new(DashMap::new()), - force: true, - } - } -} - -pub trait ConvertMaterializationContext { - fn from(self) -> MaterializationContext; - - fn with_existing_map(self, map: &Arc>) -> MaterializationContext; -} - -impl ConvertMaterializationContext for Materializations { - fn from(self) -> MaterializationContext { - match self { - Materializations::Skip => MaterializationContext::Skip, - Materializations::Default => MaterializationContext::Materialize { - map: Arc::new(DashMap::new()), - force: false, - }, - Materializations::Materialize => MaterializationContext::Materialize { - map: Arc::new(DashMap::new()), - force: true, - }, - } - } - - fn with_existing_map(self, map: &Arc>) -> MaterializationContext { - match self { - Materializations::Skip => MaterializationContext::Skip, - Materializations::Default => MaterializationContext::Materialize { - map: map.dupe(), - force: false, - }, - Materializations::Materialize => MaterializationContext::Materialize { - map: map.dupe(), - force: true, - }, - } - } -} - -pub trait HasCreateUnhashedSymlinkLock { - fn set_create_unhashed_symlink_lock(&mut self, lock: Arc>); - - fn get_create_unhashed_symlink_lock(&self) -> Arc>; -} - -impl HasCreateUnhashedSymlinkLock for UserComputationData { - fn set_create_unhashed_symlink_lock(&mut self, lock: Arc>) { - self.data.set(lock); - } - - fn get_create_unhashed_symlink_lock(&self) -> Arc> { - self.data - .get::>>() - .expect("Lock for creating unhashed symlinks should be set") - .dupe() - } -} diff --git a/app/buck2_build_api/src/build_signals.rs b/app/buck2_build_api/src/build_signals.rs index a1bfead86da1c..5b5c157acf62c 100644 --- a/app/buck2_build_api/src/build_signals.rs +++ b/app/buck2_build_api/src/build_signals.rs @@ -10,8 +10,8 @@ use std::sync::Arc; use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_build_signals::DeferredBuildSignals; -use buck2_build_signals::NodeDuration; +use buck2_build_signals::env::DeferredBuildSignals; +use buck2_build_signals::env::NodeDuration; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_events::span::SpanId; use buck2_util::late_binding::LateBinding; @@ -19,7 +19,7 @@ use dice::ActivationTracker; use dice::UserComputationData; use dupe::Dupe; -use crate::artifact_groups::ArtifactGroup; +use crate::artifact_groups::ResolvedArtifactGroupBuildSignalsKey; pub static CREATE_BUILD_SIGNALS: LateBinding< fn() -> (BuildSignalsInstaller, Box), @@ -38,7 +38,11 @@ pub struct BuildSignalsInstaller { /// Send notifications to the build signals backend. pub trait BuildSignals: Send + Sync + 'static { - fn top_level_target(&self, label: ConfiguredTargetLabel, artifacts: Vec); + fn top_level_target( + &self, + label: ConfiguredTargetLabel, + artifacts: Vec, + ); fn final_materialization( &self, @@ -59,14 +63,11 @@ impl SetBuildSignals for UserComputationData { } pub trait HasBuildSignals { - fn get_build_signals(&self) -> Option<&dyn BuildSignals>; + fn get_build_signals(&self) -> Option<&Arc>; } impl HasBuildSignals for UserComputationData { - fn get_build_signals(&self) -> Option<&dyn BuildSignals> { - self.data - .get::>() - .ok() - .map(|build_signals| build_signals.as_ref()) + fn get_build_signals(&self) -> Option<&Arc> { + self.data.get::>().ok() } } diff --git a/app/buck2_build_api/src/bxl.rs b/app/buck2_build_api/src/bxl.rs new file mode 100644 index 0000000000000..b5a560d85034e --- /dev/null +++ b/app/buck2_build_api/src/bxl.rs @@ -0,0 +1,18 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! bxl is the Buck Extension Language, allowing any integrator to write Starlark code that +//! introspects buck2 internal graphs in a safe, incremental way to perform more complex operations + +pub mod build_result; + +pub mod calculation; +pub mod result; +pub mod types; diff --git a/app/buck2_build_api/src/bxl/build_result.rs b/app/buck2_build_api/src/bxl/build_result.rs index 3cdd864ebb6d6..348e93ef74436 100644 --- a/app/buck2_build_api/src/bxl/build_result.rs +++ b/app/buck2_build_api/src/bxl/build_result.rs @@ -7,23 +7,44 @@ * of this source tree. */ +use std::fmt::Display; + use allocative::Allocative; +use buck2_core::provider::label::ConfiguredProvidersLabel; use gazebo::variants::UnpackVariants; -use crate::build::BuildTargetResult; - -#[derive(Clone, Debug, derive_more::Display, UnpackVariants, Allocative)] +use crate::build::ConfiguredBuildTargetResult; +#[derive(Clone, Debug, UnpackVariants, Allocative)] pub enum BxlBuildResult { None, - #[display(fmt = "build result")] - Built(BuildTargetResult), + Built { + label: ConfiguredProvidersLabel, + result: ConfiguredBuildTargetResult, + }, } impl BxlBuildResult { - pub fn new(result: Option) -> Self { + pub fn new( + label: ConfiguredProvidersLabel, + result: Option, + ) -> Self { match result { - Some(result) => Self::Built(result), + Some(result) => Self::Built { label, result }, None => Self::None, } } } + +impl Display for BxlBuildResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BxlBuildResult::None => write!(f, "BxlBuildResult::None"), + BxlBuildResult::Built { result, .. } => write!( + f, + "BxlBuildResult::Built({} outputs, {} errors)", + result.outputs.len(), + result.errors.len() + ), + } + } +} diff --git a/app/buck2_build_api/src/bxl/calculation.rs b/app/buck2_build_api/src/bxl/calculation.rs index 18b06be56247e..5543c1c861313 100644 --- a/app/buck2_build_api/src/bxl/calculation.rs +++ b/app/buck2_build_api/src/bxl/calculation.rs @@ -14,10 +14,8 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; -use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_core::base_deferred_key::BaseDeferredKeyDyn; +use buck2_core::base_deferred_key::BaseDeferredKeyBxl; use buck2_util::late_binding::LateBinding; -use dashmap::DashMap; use dice::DiceComputations; use dupe::Dupe; @@ -27,16 +25,13 @@ use crate::bxl::result::BxlResult; pub trait BxlCalculationDyn: Debug + Send + Sync + 'static { async fn eval_bxl( &self, - ctx: &DiceComputations, - bxl: Arc, + ctx: &mut DiceComputations<'_>, + bxl: BaseDeferredKeyBxl, ) -> anyhow::Result; } #[derive(Allocative, Clone, Dupe)] -pub struct BxlComputeResult { - pub bxl_result: Arc, - pub materializations: Arc>, -} +pub struct BxlComputeResult(pub Arc); /// Dependency injection for BXL. /// diff --git a/app/buck2_build_api/src/bxl/mod.rs b/app/buck2_build_api/src/bxl/mod.rs deleted file mode 100644 index c00a4a907e6fe..0000000000000 --- a/app/buck2_build_api/src/bxl/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! bxl is the Buck Extension Language, allowing any integrator to write Starlark code that -//! introspects buck2 internal graphs in a safe, incremental way to perform more complex operations -//! - -pub mod build_result; - -pub mod calculation; -pub mod result; -pub mod types; diff --git a/app/buck2_build_api/src/bxl/result.rs b/app/buck2_build_api/src/bxl/result.rs index fb28daffce2cd..d52f376acc5e5 100644 --- a/app/buck2_build_api/src/bxl/result.rs +++ b/app/buck2_build_api/src/bxl/result.rs @@ -8,14 +8,12 @@ */ use allocative::Allocative; -use buck2_artifact::deferred::id::DeferredId; use buck2_core::fs::buck_out_path::BuckOutPath; use indexmap::IndexSet; +use crate::analysis::registry::RecordedAnalysisValues; use crate::artifact_groups::ArtifactGroup; use crate::bxl::build_result::BxlBuildResult; -use crate::deferred::types::DeferredLookup; -use crate::deferred::types::DeferredTable; /// The result of evaluating a bxl function #[derive(Allocative)] @@ -24,6 +22,7 @@ pub enum BxlResult { None { output_loc: BuckOutPath, error_loc: BuckOutPath, + analysis_values: RecordedAnalysisValues, }, /// a bxl that deals with builds BuildsArtifacts { @@ -31,7 +30,7 @@ pub enum BxlResult { error_loc: BuckOutPath, built: Vec, artifacts: Vec, - deferred: DeferredTable, + analysis_values: RecordedAnalysisValues, }, } @@ -40,12 +39,13 @@ impl BxlResult { output_loc: BuckOutPath, error_loc: BuckOutPath, ensured_artifacts: IndexSet, - deferred: DeferredTable, + analysis_values: RecordedAnalysisValues, ) -> Self { if ensured_artifacts.is_empty() { Self::None { output_loc, error_loc, + analysis_values, } } else { Self::BuildsArtifacts { @@ -53,16 +53,19 @@ impl BxlResult { error_loc, built: vec![], artifacts: ensured_artifacts.into_iter().collect(), - deferred, + analysis_values, } } } - /// looks up an 'Deferred' given the id - pub fn lookup_deferred(&self, id: DeferredId) -> anyhow::Result> { + pub(crate) fn analysis_values(&self) -> &RecordedAnalysisValues { match self { - BxlResult::None { .. } => Err(anyhow::anyhow!("Bxl never attempted to build anything")), - BxlResult::BuildsArtifacts { deferred, .. } => deferred.lookup_deferred(id), + BxlResult::None { + analysis_values, .. + } => analysis_values, + BxlResult::BuildsArtifacts { + analysis_values, .. + } => analysis_values, } } @@ -79,4 +82,18 @@ impl BxlResult { BxlResult::BuildsArtifacts { error_loc, .. } => error_loc, } } + + pub fn get_artifacts_opt(&self) -> Option<&Vec> { + match self { + BxlResult::None { .. } => None, + BxlResult::BuildsArtifacts { artifacts, .. } => Some(artifacts), + } + } + + pub fn get_build_result_opt(&self) -> Option<&Vec> { + match self { + BxlResult::None { .. } => None, + BxlResult::BuildsArtifacts { built, .. } => Some(built), + } + } } diff --git a/app/buck2_build_api/src/bxl/types.rs b/app/buck2_build_api/src/bxl/types.rs index 1a2e57c8a8c52..262fedad0f3f6 100644 --- a/app/buck2_build_api/src/bxl/types.rs +++ b/app/buck2_build_api/src/bxl/types.rs @@ -20,7 +20,7 @@ use serde::Serializer; #[derive( Debug, Clone, Display, Eq, PartialEq, Hash, Ord, PartialOrd, Allocative )] -#[display(fmt = "{}:{}", bxl_path, name)] +#[display("{}:{}", bxl_path, name)] pub struct BxlFunctionLabel { /// The cell, package, and file that contains the output of `bxl()` pub bxl_path: BxlFilePath, diff --git a/app/buck2_build_api/src/configure_dice.rs b/app/buck2_build_api/src/configure_dice.rs index 8b9556e2dc929..c1ccca3382c5d 100644 --- a/app/buck2_build_api/src/configure_dice.rs +++ b/app/buck2_build_api/src/configure_dice.rs @@ -12,14 +12,21 @@ use std::sync::Arc; use buck2_common::dice::cells::SetCellResolver; use buck2_common::dice::data::SetIoProvider; use buck2_common::io::IoProvider; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_common::legacy_configs::dice::SetLegacyConfigs; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_core::rollout_percentage::RolloutPercentage; use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::SetDigestConfig; use dice::DetectCycles; use dice::Dice; use dice::WhichDice; +use crate::actions::execute::dice_data::SetInvalidationTrackingConfig; + +/// This is just a simple version number to allow us to more easily rollout modern dice. +const CURRENT_MODERN_DICE_VERSION: u32 = 3; + /// Utility to configure the dice globals. /// One place to not forget to initialize something in all places. pub async fn configure_dice_for_buck( @@ -33,35 +40,235 @@ pub async fn configure_dice_for_buck( || { root_config .and_then(|c| { - c.parse::("buck2", "detect_cycles") - .transpose() + c.parse::(BuckconfigKeyRef { + section: "buck2", + property: "detect_cycles", + }) + .transpose() }) .unwrap_or(Ok(DetectCycles::Enabled)) }, Ok, )?; - let which_dice = which_dice.map_or_else( - || { - root_config - .and_then(|c| c.parse::("buck2", "dice").transpose()) - .unwrap_or(Ok(WhichDice::Legacy)) - }, - Ok, - )?; - - let mut dice = match which_dice { + let mut dice = match determine_which_dice(root_config, which_dice)? { WhichDice::Legacy => Dice::builder(), WhichDice::Modern => Dice::modern(), }; dice.set_io_provider(io); dice.set_digest_config(digest_config); + let invalidation_tracking_enabled = match root_config { + Some(c) => c + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "invalidation_tracking_enabled", + })? + .map_or(false, |v| v.roll()), + None => false, + }; + dice.set_invalidation_tracking_config(invalidation_tracking_enabled); let dice = dice.build(detect_cycles); let mut dice_ctx = dice.updater(); dice_ctx.set_none_cell_resolver()?; - dice_ctx.set_none_legacy_configs()?; + dice_ctx.set_none_legacy_config_external_data()?; dice_ctx.commit().await; Ok(dice) } + +fn determine_which_dice( + root_config: Option<&LegacyBuckConfig>, + which_dice: Option, +) -> anyhow::Result { + if let Some(v) = which_dice { + return Ok(v); + } + + if let Some(cfg) = root_config { + if let Some(v) = cfg.parse::(BuckconfigKeyRef { + section: "buck2", + property: "dice", + })? { + return Ok(v); + } + + if let Some(minimum_dice_version) = cfg.parse::(BuckconfigKeyRef { + section: "buck2", + property: "modern_dice_min_version", + })? { + if CURRENT_MODERN_DICE_VERSION >= minimum_dice_version { + return Ok(WhichDice::Modern); + } + } + + if let Some(rollout) = cfg.parse::(BuckconfigKeyRef { + section: "buck2", + property: "modern_dice_rollout", + })? { + if rollout.roll() { + return Ok(WhichDice::Modern); + } + } + } + + Ok(WhichDice::Modern) +} + +#[cfg(test)] +mod tests { + use buck2_common::legacy_configs::configs::testing::parse; + + use super::*; + + struct Cfg { + dice: &'static str, + modern_dice_min_version: &'static str, + modern_dice_rollout: &'static str, + } + + impl Cfg { + fn to_cfg(self) -> LegacyBuckConfig { + let mut section = String::new(); + if self.dice != "" { + section += &format!("dice = {}\n", self.dice); + } + if self.modern_dice_min_version != "" { + section += &format!( + "modern_dice_min_version = {}\n", + self.modern_dice_min_version + ); + } + if self.modern_dice_rollout != "" { + section += &format!("modern_dice_rollout = {}\n", self.modern_dice_rollout); + } + + parse(&[("config", &format!("[buck2]\n{}", section))], "config").unwrap() + } + } + + #[test] + fn test_determine_which_dice() -> anyhow::Result<()> { + assert_eq!( + WhichDice::Modern, + determine_which_dice( + Some( + &Cfg { + dice: "", + modern_dice_min_version: "", + modern_dice_rollout: "", + } + .to_cfg() + ), + None + )? + ); + + assert_eq!( + WhichDice::Legacy, + determine_which_dice( + Some( + &Cfg { + dice: "modern", + modern_dice_min_version: "", + modern_dice_rollout: "", + } + .to_cfg() + ), + Some(WhichDice::Legacy) + )? + ); + + assert_eq!( + WhichDice::Modern, + determine_which_dice( + Some( + &Cfg { + dice: "legacy", + modern_dice_min_version: "", + modern_dice_rollout: "", + } + .to_cfg() + ), + Some(WhichDice::Modern) + )? + ); + + assert_eq!( + WhichDice::Legacy, + determine_which_dice( + Some( + &Cfg { + dice: "legacy", + modern_dice_min_version: "", + modern_dice_rollout: "", + } + .to_cfg() + ), + None + )? + ); + + assert_eq!( + WhichDice::Modern, + determine_which_dice( + Some( + &Cfg { + dice: "modern", + modern_dice_min_version: "", + modern_dice_rollout: "", + } + .to_cfg() + ), + None + )? + ); + + assert_eq!( + WhichDice::Modern, + determine_which_dice( + Some( + &Cfg { + dice: "", + modern_dice_min_version: "100000", + modern_dice_rollout: "", + } + .to_cfg() + ), + None + )? + ); + + assert_eq!( + WhichDice::Modern, + determine_which_dice( + Some( + &Cfg { + dice: "", + modern_dice_min_version: "1", + modern_dice_rollout: "", + } + .to_cfg() + ), + None + )? + ); + + assert_eq!( + WhichDice::Modern, + determine_which_dice( + Some( + &Cfg { + dice: "", + modern_dice_min_version: "", + modern_dice_rollout: "hostname:1", + } + .to_cfg() + ), + None + )? + ); + + Ok(()) + } +} diff --git a/app/buck2_build_api/src/configure_targets.rs b/app/buck2_build_api/src/configure_targets.rs index 650ee83a58888..444d4b5e78f8e 100644 --- a/app/buck2_build_api/src/configure_targets.rs +++ b/app/buck2_build_api/src/configure_targets.rs @@ -7,13 +7,13 @@ * of this source tree. */ +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::configuration::compatibility::IncompatiblePlatformReason; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; use buck2_events::dispatch::console_message; use buck2_node::load_patterns::load_patterns; use buck2_node::load_patterns::MissingTargetBehavior; @@ -23,16 +23,14 @@ use buck2_node::nodes::unconfigured::TargetNode; use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_query::query::syntax::simple::eval::set::TargetSet; use dice::DiceComputations; -use dice::DiceComputationsParallel; use dupe::Dupe; -use futures::future::BoxFuture; use futures::FutureExt; use gazebo::prelude::VecExt; use starlark_map::small_set::SmallSet; // Returns a tuple of compatible and incompatible targets. fn split_compatible_incompatible( - targets: impl Iterator>>, + targets: impl IntoIterator>>, ) -> anyhow::Result<( TargetSet, SmallSet, @@ -54,31 +52,39 @@ fn split_compatible_incompatible( } pub async fn get_maybe_compatible_targets<'a>( - ctx: &'a DiceComputations, + ctx: &'a mut DiceComputations<'_>, loaded_targets: impl IntoIterator>)>, - global_target_platform: Option<&TargetLabel>, + global_cfg_options: &GlobalCfgOptions, + keep_going: bool, ) -> anyhow::Result>>> { let mut by_package_fns: Vec<_> = Vec::new(); for (_package, result) in loaded_targets { - let targets = result?; + match result { + Ok(targets) => { + by_package_fns.extend({ + let target_fns: Vec<_> = targets.into_map(|target| { + DiceComputations::declare_closure(|ctx| { + async move { + let target = ctx + .get_configured_target(target.label(), global_cfg_options) + .await?; + anyhow::Ok(ctx.get_configured_target_node(&target).await?) + } + .boxed() + }) + }); - by_package_fns.extend({ - let target_fns: Vec<_> = targets.into_map(|target| - higher_order_closure! { - #![with<'a>] - for<'x> |ctx: &'x mut DiceComputationsParallel<'a>| -> BoxFuture<'x, anyhow::Result>> { - async move { - let target = ctx - .get_configured_target(target.label(), global_target_platform) - .await?; - anyhow::Ok(ctx.get_configured_target_node(&target).await?) - }.boxed() - } + target_fns }); - - target_fns - }); + } + Err(e) => { + // TODO(@wendyy) - log the error + if !keep_going { + return Err(e); + } + } + } } Ok(futures::future::join_all(ctx.compute_many(by_package_fns)) @@ -88,12 +94,12 @@ pub async fn get_maybe_compatible_targets<'a>( /// Converts target nodes to a set of compatible configured target nodes. pub async fn get_compatible_targets( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, loaded_targets: impl IntoIterator>)>, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result> { let maybe_compatible_targets = - get_maybe_compatible_targets(ctx, loaded_targets, global_target_platform.as_ref()).await?; + get_maybe_compatible_targets(ctx, loaded_targets, global_cfg_options, false).await?; let (compatible_targets, incompatible_targets) = split_compatible_incompatible(maybe_compatible_targets)?; @@ -108,16 +114,16 @@ pub async fn get_compatible_targets( } pub async fn load_compatible_patterns( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, parsed_patterns: Vec>, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, skip_missing_targets: MissingTargetBehavior, ) -> anyhow::Result> { let loaded_patterns = load_patterns(ctx, parsed_patterns, skip_missing_targets).await?; get_compatible_targets( ctx, loaded_patterns.iter_loaded_targets_by_package(), - global_target_platform, + global_cfg_options, ) .await } diff --git a/app/buck2_build_api/src/context.rs b/app/buck2_build_api/src/context.rs index 04de138fb123c..b97f947fe2d1f 100644 --- a/app/buck2_build_api/src/context.rs +++ b/app/buck2_build_api/src/context.rs @@ -23,7 +23,7 @@ use dupe::Dupe; #[async_trait] pub trait HasBuildContextData { - async fn get_buck_out_path(&self) -> anyhow::Result; + async fn get_buck_out_path(&mut self) -> anyhow::Result; } pub trait SetBuildContextData { @@ -36,7 +36,7 @@ pub struct BuildData { } #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct BuildDataKey; impl InjectedKey for BuildDataKey { @@ -48,8 +48,8 @@ impl InjectedKey for BuildDataKey { } #[async_trait] -impl HasBuildContextData for DiceComputations { - async fn get_buck_out_path(&self) -> anyhow::Result { +impl HasBuildContextData for DiceComputations<'_> { + async fn get_buck_out_path(&mut self) -> anyhow::Result { let data = self.compute(&BuildDataKey).await?; Ok(BuckOutPathResolver::new(data.buck_out_path.to_buf())) } diff --git a/app/buck2_configured/src/configuration/mod.rs b/app/buck2_build_api/src/deferred.rs similarity index 100% rename from app/buck2_configured/src/configuration/mod.rs rename to app/buck2_build_api/src/deferred.rs diff --git a/app/buck2_build_api/src/deferred/calculation.rs b/app/buck2_build_api/src/deferred/calculation.rs index 7d343562363cf..b2cbc3be19a25 100644 --- a/app/buck2_build_api/src/deferred/calculation.rs +++ b/app/buck2_build_api/src/deferred/calculation.rs @@ -9,110 +9,50 @@ //! Dice calculations relating to deferreds -use std::collections::HashMap; -use std::hash::Hash; use std::pin::Pin; use std::sync::Arc; use allocative::Allocative; -use anyhow::Context as _; -use async_trait::async_trait; +use buck2_artifact::actions::key::ActionKey; use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_artifact::deferred::data::DeferredData; -use buck2_artifact::deferred::id::DeferredId; -use buck2_artifact::deferred::key::DeferredKey; -use buck2_common::dice::data::HasIoProvider; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; +use buck2_artifact::deferred::key::DeferredHolderKey; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::base_deferred_key::BaseDeferredKeyDyn; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_events::dispatch::create_span; -use buck2_events::dispatch::Span; -use buck2_execute::artifact::artifact_dyn::ArtifactDyn; -use buck2_execute::digest_config::HasDigestConfig; -use buck2_execute::materialize::materializer::HasMaterializer; -use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use buck2_util::late_binding::LateBinding; -use derive_more::Display; use dice::DiceComputations; -use dice::Key; use dupe::Dupe; -use futures::stream::FuturesUnordered; use futures::Future; -use futures::FutureExt; -use futures::StreamExt; -use futures::TryFutureExt; -use futures::TryStreamExt; -use more_futures::cancellation::CancellationContext; -use once_cell::sync::Lazy; +use starlark::values::OwnedFrozenValueTyped; -use crate::actions::artifact::get_artifact_fs::GetArtifactFs; +use crate::actions::RegisteredAction; use crate::analysis::calculation::RuleAnalysisCalculation; +use crate::analysis::registry::RecordedAnalysisValues; use crate::analysis::AnalysisResult; -use crate::artifact_groups::calculation::ArtifactGroupCalculation; -use crate::artifact_groups::ArtifactGroup; +use crate::artifact_groups::deferred::TransitiveSetKey; +use crate::artifact_groups::promise::PromiseArtifact; use crate::bxl::calculation::BXL_CALCULATION_IMPL; use crate::bxl::result::BxlResult; -use crate::deferred::types::BaseKey; -use crate::deferred::types::DeferredInput; -use crate::deferred::types::DeferredLookup; -use crate::deferred::types::DeferredRegistry; -use crate::deferred::types::DeferredResult; -use crate::deferred::types::DeferredValueAny; -use crate::deferred::types::DeferredValueAnyReady; -use crate::deferred::types::DeferredValueReady; -use crate::deferred::types::ResolveDeferredCtx; - -#[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "ResolveDeferred({})", _0)] -pub struct DeferredResolve(pub DeferredKey); - -#[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "ComputeDeferred({})", _0)] -pub struct DeferredCompute(pub DeferredKey); - -#[async_trait] -pub trait DeferredCalculation { - /// Computes and returns the evaluated value of an 'DeferredData' - async fn compute_deferred_data( - &self, - data: &DeferredData, - ) -> anyhow::Result>; -} - -#[async_trait] -impl DeferredCalculation for DiceComputations { - async fn compute_deferred_data( - &self, - data: &DeferredData, - ) -> anyhow::Result> { - if data.deferred_key().id().is_trivial() { - let deferred = lookup_deferred(self, data.deferred_key()).await?; - let deferred = deferred - .get()? - .as_trivial() - .context("Invalid deferred")? - .dupe(); - return DeferredValueAnyReady::TrivialDeferred(deferred).resolve(data); - } - - let deferred = resolve_deferred(self, data.deferred_key()).await?; - deferred.resolve(data) - } -} +use crate::dynamic::calculation::dynamic_lambda_result; +use crate::dynamic::calculation::DynamicLambdaResult; +use crate::interpreter::rule_defs::transitive_set::FrozenTransitiveSet; pub static EVAL_ANON_TARGET: LateBinding< for<'c> fn( - &'c DiceComputations, + &'c mut DiceComputations, Arc, ) -> Pin> + Send + 'c>>, > = LateBinding::new("EVAL_ANON_TARGET"); +pub static GET_PROMISED_ARTIFACT: LateBinding< + for<'c> fn( + &'c PromiseArtifact, + &'c mut DiceComputations, + ) -> Pin> + Send + 'c>>, +> = LateBinding::new("GET_PROMISED_ARTIFACT"); + async fn lookup_deferred_inner( key: &BaseDeferredKey, - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, ) -> anyhow::Result { match key { BaseDeferredKey::TargetLabel(target) => { @@ -128,7 +68,7 @@ async fn lookup_deferred_inner( .get()? .eval_bxl(dice, bxl.dupe()) .await? - .bxl_result; + .0; Ok(DeferredHolder::Bxl(bxl_result)) } @@ -138,254 +78,49 @@ async fn lookup_deferred_inner( } } -struct PartialLookup { - holder: DeferredHolder, - id: DeferredId, -} - -impl PartialLookup { - fn get(&self) -> anyhow::Result> { - self.holder.lookup_deferred(self.id) - } -} - -/// looks up an deferred -async fn lookup_deferred( - dice: &DiceComputations, - key: &DeferredKey, -) -> anyhow::Result { +pub async fn lookup_deferred_holder( + dice: &mut DiceComputations<'_>, + key: &DeferredHolderKey, +) -> anyhow::Result { Ok(match key { - DeferredKey::Base(target, id) => { - let holder = lookup_deferred_inner(target, dice).await?; - PartialLookup { holder, id: *id } - } - DeferredKey::Deferred(key, id) => { - let deferred = compute_deferred(dice, key).await?; - PartialLookup { - holder: DeferredHolder::Deferred(deferred), - id: *id, - } + DeferredHolderKey::Base(key) => lookup_deferred_inner(key, dice).await?, + DeferredHolderKey::DynamicLambda(lambda) => { + DeferredHolder::DynamicLambda(dynamic_lambda_result(dice, lambda).await?) } }) } -/// Fully resolve the deferred, including any deferreds it might have return when attempting -/// to calculate it. -async fn resolve_deferred( - dice: &DiceComputations, - deferred: &DeferredKey, -) -> anyhow::Result { - #[async_trait] - impl Key for DeferredResolve { - type Value = SharedResult; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - let result = compute_deferred(ctx, &self.0).await?; - match result.value() { - DeferredValueAny::Ready(value) => Ok(value.dupe()), - DeferredValueAny::Deferred(key) => resolve_deferred(ctx, key).await.shared_error(), - } - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - // TODO(bobyf) consider if we want deferreds to be eq - false - } - } - - dice.compute(&DeferredResolve(deferred.dupe())) - .await? - .unshared_error() -} - -/// Computes and returns the untyped deferred at the given key. This does not fully resolve -/// the deferred as another deferred may be returned. -async fn compute_deferred( - dice: &DiceComputations, - deferred: &DeferredKey, -) -> anyhow::Result { - #[async_trait] - impl Key for DeferredCompute { - type Value = SharedResult; - - async fn compute( - &self, - ctx: &mut DiceComputations, - cancellation: &CancellationContext, - ) -> Self::Value { - let deferred = lookup_deferred(ctx, &self.0).await?; - let deferred = deferred.get()?.as_complex(); - - // We'll create the Span lazily when materialization hits it. - let span = Lazy::new(|| deferred.span().map(create_span)); - - let target_node_futs = FuturesUnordered::new(); - let deferreds_futs = FuturesUnordered::new(); - let mut materialized_artifacts = Vec::new(); - - deferred.inputs().iter().for_each(|input| match input { - DeferredInput::ConfiguredTarget(target) => target_node_futs.push( - ctx.get_configured_target_node(target) - .map(|res| anyhow::Ok((target.dupe(), res?.require_compatible()?))), - ), - DeferredInput::Deferred(deferred_key) => deferreds_futs.push( - resolve_deferred(ctx, deferred_key) - .map(|res| anyhow::Ok((deferred_key.dupe(), res?))), - ), - DeferredInput::MaterializedArtifact(artifact) => { - materialized_artifacts.push(ArtifactGroup::Artifact(artifact.dupe())); - } - }); - - let materialized_artifacts_fut = - self.create_materializer_futs(&materialized_artifacts, ctx, &span); - - // TODO(nga): do we need to compute artifacts? - let (targets, deferreds, materialized_artifacts) = futures::future::join3( - futures_pair_to_map(target_node_futs), - futures_pair_to_map(deferreds_futs), - materialized_artifacts_fut, - ) - .await; - - let mut registry = DeferredRegistry::new(BaseKey::Deferred(Arc::new(self.0.dupe()))); - - cancellation - .with_structured_cancellation(|observer| { - async move { - let mut deferred_ctx = ResolveDeferredCtx::new( - self.0.dupe(), - targets?, - deferreds?, - materialized_artifacts?, - &mut registry, - ctx.global_data().get_io_provider().project_root().dupe(), - ctx.global_data().get_digest_config(), - observer, - ); - - let execute = deferred.execute(&mut deferred_ctx, ctx); - - let res = match Lazy::into_value(span).unwrap_or_else(|init| init()) { - Some(span) => { - span.wrap_future(async { - (execute.await, buck2_data::DeferredEvaluationEnd {}) - }) - .await - } - None => execute.await, - }; - - // TODO populate the deferred map - Ok(DeferredResult::new(res?, registry.take_result()?)) - } - .boxed() - }) - .await - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - false - } - - fn validity(x: &Self::Value) -> bool { - x.is_ok() - } - } - - impl DeferredCompute { - fn create_materializer_futs<'a>( - &'a self, - materialized_artifacts: &'a Vec, - ctx: &'a DiceComputations, - span: &'a Lazy, impl FnOnce() -> Option>, - ) -> impl Future>> + 'a - { - if materialized_artifacts.is_empty() { - return async move { Ok(HashMap::new()) }.left_future(); - } - // This is a bit suboptimal: we wait for all artifacts to be ready in order to - // materialize any of them. However that is how we execute *all* local actions so in - // the grand scheme of things that's probably not a huge deal. - let materialized_artifacts_fut = { - let artifact_futs = futures::future::try_join_all( - materialized_artifacts - .iter() - .map(|artifact| ctx.ensure_artifact_group(artifact)), - ); - - artifact_futs.and_then(move |_| async move { - let materializer = ctx.per_transaction_data().get_materializer(); - let artifact_fs = ctx.get_artifact_fs().await?; - - let fut = materialized_artifacts - .iter() - .map(|artifact| async { - let artifact = artifact - .unpack_artifact() - .expect("we only put Artifacts into this list") - .dupe(); - let path = artifact.resolve_path(&artifact_fs)?; - materializer.ensure_materialized(vec![path.clone()]).await?; - - anyhow::Ok((artifact, path)) - }) - .collect::>() - .try_collect::>(); - - match span.as_ref() { - Some(span) => { - span.create_child(buck2_data::DeferredPreparationStageStart { - stage: Some(buck2_data::MaterializedArtifacts {}.into()), - }) - .wrap_future( - fut.map(|r| (r, buck2_data::DeferredPreparationStageEnd {})), - ) - .await - } - None => fut.await, - } - }) - }; - materialized_artifacts_fut.right_future() - } - } - - Ok(dice.compute(&DeferredCompute(deferred.dupe())).await??) -} - -async fn futures_pair_to_map( - mut futs: FuturesUnordered>>, -) -> anyhow::Result> { - let mut res = HashMap::with_capacity(futs.len()); - while let Some(p) = futs.next().await { - // terminate immediately once a fail occurs - // TODO support keep going? - - let (key, value) = p?; - res.insert(key, value); - } - Ok(res) -} - /// Represents an Analysis or Deferred result. Technically, we can treat analysis as a 'Deferred' /// and get rid of this enum -enum DeferredHolder { +pub enum DeferredHolder { Analysis(AnalysisResult), Bxl(Arc), - Deferred(DeferredResult), + DynamicLambda(Arc), } impl DeferredHolder { - fn lookup_deferred(&self, id: DeferredId) -> anyhow::Result> { + pub(crate) fn lookup_transitive_set( + &self, + key: &TransitiveSetKey, + ) -> anyhow::Result> { + self.analysis_values().lookup_transitive_set(key) + } + + pub(crate) fn lookup_action(&self, key: &ActionKey) -> anyhow::Result { + self.analysis_values().lookup_action(key) + } + + pub fn analysis_values(&self) -> &RecordedAnalysisValues { match self { - DeferredHolder::Analysis(result) => result.lookup_deferred(id), - DeferredHolder::Deferred(result) => result.lookup_deferred(id), - DeferredHolder::Bxl(result) => result.lookup_deferred(id), + DeferredHolder::Analysis(result) => result.analysis_values(), + DeferredHolder::Bxl(result) => result.analysis_values(), + DeferredHolder::DynamicLambda(result) => result.analysis_values(), } } } + +#[derive(Debug, Allocative, Clone, Dupe)] +pub enum ActionLookup { + Action(Arc), + Deferred(ActionKey), +} diff --git a/app/buck2_build_api/src/deferred/mod.rs b/app/buck2_build_api/src/deferred/mod.rs deleted file mode 100644 index fc528bc7dcb7a..0000000000000 --- a/app/buck2_build_api/src/deferred/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! 'DeferredData's are deferred work registered by a rule implementation that can be stored in -//! fields of providers. -//! -//! These are usually slow operations required to generate the information on 'Provider's. -//! For example, information that depend on the execution of some actions, or information that -//! require reading the filesystem. -//! -//! We model these asynchronous data to be stored on fields of 'Provider's as 'DeferredData'. Each -//! 'DeferredData' will hold an 'DeferredKey'. That key that maps to an implementation of -//! 'Deferred', which is the actual work to be ran when execution of the deferred is needed. - -pub mod calculation; -pub mod types; diff --git a/app/buck2_build_api/src/deferred/types.rs b/app/buck2_build_api/src/deferred/types.rs deleted file mode 100644 index 16155fa5aa9fc..0000000000000 --- a/app/buck2_build_api/src/deferred/types.rs +++ /dev/null @@ -1,1374 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::any::type_name; -use std::any::Any; -use std::collections::HashMap; -use std::fmt; -use std::fmt::Debug; -use std::fmt::Formatter; -use std::marker::PhantomData; -use std::sync::Arc; - -use allocative::Allocative; -use async_trait::async_trait; -use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_artifact::deferred::data::DeferredData; -use buck2_artifact::deferred::id::DeferredId; -use buck2_artifact::deferred::key::DeferredKey; -use buck2_core::base_deferred_key::BaseDeferredKey; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_execute::digest_config::DigestConfig; -use buck2_node::nodes::configured::ConfiguredTargetNode; -use dice::DiceComputations; -use dupe::Dupe; -use gazebo::variants::VariantName; -use indexmap::indexset; -use indexmap::IndexSet; -use itertools::Itertools; -use more_futures::cancellable_future::CancellationObserver; -use once_cell::sync::Lazy; -use thiserror::Error; - -/// An asynchronous chunk of work that will be executed when requested. -/// The 'Deferred' can have "inputs" which are values that will be guaranteed to be ready to use -/// before the 'Deferred' is actually executed. These can be 'Artifact's, which means that those -/// 'Artifact's will be materialized and its corresponding 'Action's executed, or other -/// 'DeferredData', which means those 'Deferred' will be computed first. -/// -/// `any::Provider` can be used to obtain data for introspection. -#[async_trait] -pub trait Deferred: Allocative + provider::Provider { - type Output; - - /// the set of 'Deferred's that should be computed first before executing this 'Deferred' - fn inputs(&self) -> &IndexSet; - - /// executes this 'Deferred', assuming all inputs and input artifacts are already computed - async fn execute( - &self, - ctx: &mut dyn DeferredCtx, - dice: &mut DiceComputations, - ) -> anyhow::Result>; - - /// An optional stage to wrap execution in. - fn span(&self) -> Option { - None - } -} - -/// The context for executing a 'Deferred'. -pub trait DeferredCtx: Send { - fn get_configured_target(&self, label: &ConfiguredTargetLabel) - -> Option<&ConfiguredTargetNode>; - - fn get_action_key(&self) -> String; - - fn get_deferred_data(&self, key: &DeferredKey) -> Option; - - fn get_materialized_artifact(&self, artifact: &Artifact) -> Option<&ProjectRelativePath>; - - fn registry(&mut self) -> &mut DeferredRegistry; - - fn project_filesystem(&self) -> &ProjectRoot; - - fn digest_config(&self) -> DigestConfig; - - fn liveness(&self) -> CancellationObserver; -} - -/// DeferredCtx with already resolved values -pub struct ResolveDeferredCtx<'a> { - key: DeferredKey, - configured_targets: HashMap, - deferreds: HashMap, - materialized_artifacts: HashMap, - registry: &'a mut DeferredRegistry, - project_filesystem: ProjectRoot, - digest_config: DigestConfig, - liveness: CancellationObserver, -} - -impl<'a> ResolveDeferredCtx<'a> { - pub fn new( - key: DeferredKey, - configured_targets: HashMap, - deferreds: HashMap, - materialized_artifacts: HashMap, - registry: &'a mut DeferredRegistry, - project_filesystem: ProjectRoot, - digest_config: DigestConfig, - liveness: CancellationObserver, - ) -> Self { - Self { - key, - configured_targets, - deferreds, - materialized_artifacts, - registry, - project_filesystem, - digest_config, - liveness, - } - } -} - -impl<'a> DeferredCtx for ResolveDeferredCtx<'a> { - fn get_configured_target( - &self, - label: &ConfiguredTargetLabel, - ) -> Option<&ConfiguredTargetNode> { - self.configured_targets.get(label) - } - - fn get_action_key(&self) -> String { - self.key.action_key() - } - - fn get_deferred_data(&self, key: &DeferredKey) -> Option { - self.deferreds.get(key).map(|b| b.dupe()) - } - - fn get_materialized_artifact(&self, artifact: &Artifact) -> Option<&ProjectRelativePath> { - self.materialized_artifacts - .get(artifact) - .map(|x| x.as_ref()) - } - - fn registry(&mut self) -> &mut DeferredRegistry { - self.registry - } - - fn project_filesystem(&self) -> &ProjectRoot { - &self.project_filesystem - } - - fn digest_config(&self) -> DigestConfig { - self.digest_config - } - - fn liveness(&self) -> CancellationObserver { - self.liveness.dupe() - } -} - -/// input to a deferred that needs to be computed first before executing -#[derive(Clone, Debug, Eq, PartialEq, Hash, Allocative)] -pub enum DeferredInput { - ConfiguredTarget(ConfiguredTargetLabel), - Deferred(DeferredKey), - MaterializedArtifact(Artifact), -} - -/// The base key. We can actually get rid of this and just use 'DeferredKey' if rule analysis is an -/// 'Deferred' itself. This is used to construct the composed 'DeferredKey::Deferred' or -/// 'DeferredKey::Base' type. -#[derive(Allocative)] -pub enum BaseKey { - Base(BaseDeferredKey), - // While DeferredKey is Dupe, it has quite a lot of Arc's inside it, so maybe an Arc here makes sense? - // Maybe not? - #[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_arc_on_dupe))] - Deferred(Arc), -} - -impl BaseKey { - fn make_key(&self, id: DeferredId) -> DeferredKey { - match self { - BaseKey::Base(base) => DeferredKey::Base(base.dupe(), id), - BaseKey::Deferred(base) => DeferredKey::Deferred(base.dupe(), id), - } - } -} - -/// Implemented by all trivial deferreds. -pub trait TrivialDeferred: Allocative + AnyValue + Debug + Send + Sync { - /// Convert the object to an AnyValue object - fn as_any_value(&self) -> &dyn AnyValue; - - /// Obtain deferred-specific debug data. - /// - /// This function is copied from `any::Provider` trait, which cannot be implemented - /// for `Arc`. - fn provide<'a>(&'a self, demand: &mut provider::Demand<'a>); -} - -#[derive(Allocative)] -pub struct TrivialDeferredValue(pub Arc); - -impl provider::Provider for TrivialDeferredValue { - fn provide<'a>(&'a self, demand: &mut provider::Demand<'a>) { - self.0.provide(demand) - } -} - -#[async_trait] -impl DeferredAny for TrivialDeferredValue { - fn inputs(&self) -> &IndexSet { - static INPUTS: Lazy> = Lazy::new(IndexSet::new); - &INPUTS - } - - async fn execute( - &self, - _ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, - ) -> anyhow::Result { - Ok(DeferredValueAny::Ready( - DeferredValueAnyReady::TrivialDeferred(self.0.dupe()), - )) - } - - fn as_any(&self) -> &dyn Any { - self.0.into_any() - } - - fn type_name(&self) -> &str { - self.0.type_name() - } - - fn span(&self) -> Option { - // No evaluation Span if you never evaluate. - None - } -} - -#[derive(Allocative)] -pub enum DeferredTableEntry { - Trivial(TrivialDeferredValue), - Complex(Box), -} - -impl provider::Provider for DeferredTableEntry { - fn provide<'a>(&'a self, demand: &mut provider::Demand<'a>) { - match self { - Self::Trivial(v) => v.provide(demand), - Self::Complex(v) => v.provide(demand), - } - } -} - -#[async_trait] -impl DeferredAny for DeferredTableEntry { - fn inputs(&self) -> &IndexSet { - match self { - Self::Trivial(v) => v.inputs(), - Self::Complex(v) => v.inputs(), - } - } - - async fn execute( - &self, - ctx: &mut dyn DeferredCtx, - dice: &mut DiceComputations, - ) -> anyhow::Result { - match self { - Self::Trivial(v) => v.execute(ctx, dice).await, - Self::Complex(v) => v.execute(ctx, dice).await, - } - } - - fn as_any(&self) -> &dyn Any { - match self { - Self::Trivial(v) => v.as_any(), - Self::Complex(v) => v.as_any(), - } - } - - fn type_name(&self) -> &str { - match self { - Self::Trivial(v) => v.type_name(), - Self::Complex(v) => v.type_name(), - } - } - - fn span(&self) -> Option { - match self { - Self::Trivial(..) => None, - Self::Complex(v) => v.span(), - } - } -} - -#[derive(Allocative)] -enum DeferredRegistryEntry { - Set(DeferredTableEntry), - Pending, -} - -/// The registry for creating 'DeferredData's and registering 'Deferred's -#[derive(Allocative)] -pub struct DeferredRegistry { - base_key: BaseKey, - registry: Vec, -} - -#[derive(Allocative)] -#[allocative(bound = "")] -pub struct ReservedDeferredData(DeferredData); - -impl<'a, T> ReservedDeferredData -where - T: Clone + Send + Sync + 'static, -{ - fn new(id: DeferredData) -> Self { - Self(id) - } - - pub fn data(&'a self) -> &'a DeferredData { - &self.0 - } -} - -#[derive(Allocative)] -#[allocative(bound = "")] -pub struct ReservedTrivialDeferredData(DeferredData); - -impl<'a, T> ReservedTrivialDeferredData -where - T: Clone + Send + Sync + 'static, -{ - fn new(id: DeferredData) -> Self { - Self(id) - } - - pub fn data(&'a self) -> &'a DeferredData { - &self.0 - } -} - -impl DeferredRegistry { - pub fn new(base_key: BaseKey) -> Self { - Self { - base_key, - registry: Vec::new(), - } - } - - /// Reserves a 'DeferredData', with it's underlying key, on the promise that it should be bound - /// to a 'Deferred' before 'take_result' is called. - pub fn reserve(&mut self) -> ReservedDeferredData - where - D: Clone + Send + Sync + 'static, - { - let id = DeferredId { - id: self.registry.len().try_into().unwrap(), - trivial: false, - }; - self.registry.push(DeferredRegistryEntry::Pending); - ReservedDeferredData::new(DeferredData::unchecked_new(self.base_key.make_key(id))) - } - - /// Reserves a 'DeferredData', with it's underlying key, on the promise that it should be bound - /// to a 'Deferred' before 'take_result' is called. - pub fn reserve_trivial(&mut self) -> ReservedTrivialDeferredData - where - D: Clone + Send + Sync + 'static, - { - let id = DeferredId { - id: self.registry.len().try_into().unwrap(), - trivial: true, - }; - self.registry.push(DeferredRegistryEntry::Pending); - ReservedTrivialDeferredData::new(DeferredData::unchecked_new(self.base_key.make_key(id))) - } - - /// binds a reserved 'ReservedDeferredData' to a 'Deferred' - pub fn bind(&mut self, reserved: ReservedDeferredData, d: D) -> DeferredData - where - D: Deferred + Send + Sync + 'static, - T: Allocative + Clone + Debug + Send + Sync + 'static, - { - let id = reserved.0.deferred_key().id().as_usize(); - - match self.registry.get_mut(id) { - Some(entry @ DeferredRegistryEntry::Pending) => { - *entry = DeferredRegistryEntry::Set(DeferredTableEntry::Complex(Box::new(d))); - } - _ => { - panic!("the reserved should always be in pending"); - } - } - - reserved.0 - } - - /// binds a reserved 'ReservedDeferredData' to a 'Deferred' - pub fn bind_trivial( - &mut self, - reserved: ReservedTrivialDeferredData, - d: D, - ) -> DeferredData - where - D: TrivialDeferred + Clone + 'static, - { - let id = reserved.0.deferred_key().id().as_usize(); - - match self.registry.get_mut(id) { - Some(entry @ DeferredRegistryEntry::Pending) => { - *entry = DeferredRegistryEntry::Set(DeferredTableEntry::Trivial( - TrivialDeferredValue(Arc::new(d) as _), - )); - } - _ => { - panic!("the reserved should always be in pending"); - } - } - - reserved.0 - } - - /// creates a new 'DeferredData' - pub fn defer< - D: Deferred + Send + Sync + 'static, - T: Allocative + Clone + Debug + Send + Sync + 'static, - >( - &mut self, - d: D, - ) -> DeferredData { - let id = DeferredId { - id: self.registry.len().try_into().unwrap(), - trivial: false, - }; - self.registry - .push(DeferredRegistryEntry::Set(DeferredTableEntry::Complex( - Box::new(d), - ))); - DeferredData::unchecked_new(self.base_key.make_key(id)) - } - - /// creates a new 'DeferredData' - pub fn defer_trivial(&mut self, d: D) -> DeferredData - where - D: TrivialDeferred + Clone + 'static, - { - let id = DeferredId { - id: self.registry.len().try_into().unwrap(), - trivial: true, - }; - self.registry - .push(DeferredRegistryEntry::Set(DeferredTableEntry::Trivial( - TrivialDeferredValue(Arc::new(d) as _), - ))); - DeferredData::unchecked_new(self.base_key.make_key(id)) - } - - /// performs a mapping function over an 'DeferredData' - pub fn map(&mut self, orig: &DeferredData, f: F) -> DeferredData - where - T: Allocative + Clone + Debug + Send + Sync + 'static, - U: Allocative + Clone + Debug + Send + Sync + 'static, - F: Fn(&T, &mut dyn DeferredCtx) -> DeferredValue + Send + Sync + 'static, - { - #[derive(Allocative)] - #[allocative(bound = "")] - struct Map { - orig: IndexSet, - #[allocative(skip)] - f: F, - p: PhantomData<(T, U)>, - } - - impl provider::Provider for Map - where - T: Allocative + Send + Sync + 'static, - F: Fn(&T, &mut dyn DeferredCtx) -> DeferredValue + Send + Sync + 'static, - U: Allocative + 'static, - { - fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} - } - - #[async_trait] - impl Deferred for Map - where - T: Allocative + Send + Sync + 'static, - F: Fn(&T, &mut dyn DeferredCtx) -> DeferredValue + Send + Sync + 'static, - U: Allocative + Send + Sync + 'static, - { - type Output = U; - - fn inputs(&self) -> &IndexSet { - &self.orig - } - - async fn execute( - &self, - ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, - ) -> anyhow::Result> { - let orig = match self.orig.iter().exactly_one() { - Ok(DeferredInput::Deferred(orig)) => orig, - _ => unreachable!(), - }; - Ok((self.f)( - ctx.get_deferred_data(orig) - .unwrap() - .downcast::() - .unwrap(), - ctx, - )) - } - } - - self.defer(Map { - orig: indexset![DeferredInput::Deferred(orig.deferred_key().dupe())], - f, - p: PhantomData, - }) - } - - pub fn take_result(self) -> anyhow::Result> { - self.registry - .into_iter() - .enumerate() - .map(|(i, e)| match e { - DeferredRegistryEntry::Set(e) => anyhow::Ok(e), - DeferredRegistryEntry::Pending => { - Err(DeferredErrors::UnboundReservedDeferred(i).into()) - } - }) - .collect() - } -} - -#[derive(Debug, Error)] -pub enum DeferredErrors { - #[error("no deferred found for deferred id `{0}`")] - DeferredNotFound(u32), - #[error("reserved deferred id of `{0:?}` was never bound")] - UnboundReservedDeferred(usize), -} - -pub enum DeferredLookup<'a> { - Trivial(&'a TrivialDeferredValue), - Complex(&'a (dyn DeferredAny + 'static)), -} - -impl<'a> DeferredLookup<'a> { - pub fn as_complex(&self) -> &'a (dyn DeferredAny + 'static) { - match self { - Self::Trivial(v) => *v as _, - Self::Complex(v) => *v, - } - } - - pub fn as_trivial(&self) -> Option<&'a Arc> { - match self { - Self::Trivial(v) => Some(&v.0), - Self::Complex(..) => None, - } - } -} - -/// Contains all the deferreds generated by analyzing a particular rule implementation -#[derive(Clone, Debug, Dupe, Allocative)] -pub struct DeferredResult(Arc); - -#[derive(Allocative)] -struct DeferredResultData { - deferreds: Vec, - value: DeferredValueAny, -} - -#[derive(Clone, Dupe, Allocative)] -pub struct DeferredTable(Arc>); - -impl Debug for DeferredResultData { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "DeferredResult(value=`{:?}`, {} deferreds)", - self.value, - self.deferreds.len(), - ) - } -} - -impl Debug for DeferredTable { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "DeferredTable({} deferreds)", self.0.len()) - } -} - -impl DeferredTable { - pub fn new(deferreds: Vec) -> Self { - Self(Arc::new(deferreds)) - } - - /// looks up an 'Deferred' given the id - pub fn lookup_deferred(&self, id: DeferredId) -> anyhow::Result> { - match self.0.get(id.as_usize()) { - Some(DeferredTableEntry::Complex(deferred)) => Ok(DeferredLookup::Complex(&**deferred)), - Some(DeferredTableEntry::Trivial(value)) => Ok(DeferredLookup::Trivial(value)), - None => Err(anyhow::anyhow!(DeferredErrors::DeferredNotFound(id.id))), - } - } - - /// Iterator on the DeferredTable which converts a `DeferredTableEntry` to a `DeferredLookup` - pub fn iter(&self) -> impl Iterator> { - self.0.iter().map(|deferred| match deferred { - DeferredTableEntry::Complex(deferred) => DeferredLookup::Complex(&**deferred), - DeferredTableEntry::Trivial(value) => DeferredLookup::Trivial(value), - }) - } -} - -impl DeferredResult { - pub fn new(value: DeferredValueAny, deferreds: Vec) -> Self { - Self(Arc::new(DeferredResultData { deferreds, value })) - } - - /// looks up an 'Deferred' given the id - pub fn lookup_deferred(&self, id: DeferredId) -> anyhow::Result> { - match self.0.deferreds.get(id.as_usize()) { - Some(DeferredTableEntry::Complex(deferred)) => Ok(DeferredLookup::Complex(&**deferred)), - Some(DeferredTableEntry::Trivial(value)) => Ok(DeferredLookup::Trivial(value)), - None => Err(anyhow::anyhow!(DeferredErrors::DeferredNotFound(id.id))), - } - } - - pub fn value(&self) -> &DeferredValueAny { - &self.0.value - } -} - -/// typed value computed by a deferred. This can either be a completed calculation, in which case -/// a value is returned, or defer to another 'Deferred' computation. -#[derive(Allocative)] -pub enum DeferredValue { - Ready(T), - Deferred(DeferredData), -} - -/// Enum of AnyValue or TrivialDeferreds. -#[derive(Allocative, Debug, Dupe, Clone)] -pub enum DeferredValueAnyReady { - AnyValue(Arc), - TrivialDeferred(Arc), -} - -impl DeferredValueAnyReady { - pub fn downcast(&self) -> anyhow::Result<&T> { - match self { - Self::AnyValue(v) => v.downcast(), - Self::TrivialDeferred(v) => v.as_any_value().downcast(), - } - } - - pub fn downcast_into(self) -> anyhow::Result> { - // Check if it can downcast to T - self.downcast::()?; - - Ok(DeferredValueReady { - inner: self, - _type: PhantomData, - }) - } - - pub fn resolve( - self, - _data: &DeferredData, - ) -> anyhow::Result> { - self.downcast_into() - } -} - -/// This is a `Any` that has been checked to contain a T and can therefore provide &T infallibly -#[derive(Allocative, Debug, Dupe, Clone)] -pub struct DeferredValueReady { - inner: DeferredValueAnyReady, - _type: PhantomData, -} - -impl std::ops::Deref for DeferredValueReady -where - T: Send + 'static, -{ - type Target = T; - - fn deref(&self) -> &Self::Target { - // This was checked earlier - self.inner.downcast::().unwrap() - } -} - -/// untyped value computed by the deferred. This is same as 'DeferredValue', but with 'T' as an -/// 'ValueAny' -#[derive(Debug, VariantName, Clone, Dupe, Allocative)] -pub enum DeferredValueAny { - Ready(DeferredValueAnyReady), - Deferred(DeferredKey), -} - -impl DeferredValueAny { - fn ready(t: T) -> Self { - Self::Ready(DeferredValueAnyReady::AnyValue(Arc::new(t))) - } - - fn defer(k: DeferredData) -> Self { - Self::Deferred(k.into_deferred_key()) - } -} - -/// An 'Any' that is the return type of a 'Deferred'. This is box cloneable, and castable. -pub trait AnyValue: Allocative + Any + Debug + Send + Sync { - fn into_any(&self) -> &(dyn Any); - - fn type_name(&self) -> &str; -} - -impl AnyValue for T -where - T: Allocative + Any + Debug + Send + Sync, -{ - fn into_any(&self) -> &dyn Any { - self - } - - fn type_name(&self) -> &str { - type_name::() - } -} - -impl dyn AnyValue { - pub(crate) fn downcast(&self) -> anyhow::Result<&T> { - match self.into_any().downcast_ref::() { - Some(t) => Ok(t), - None => Err(anyhow::anyhow!( - "Cannot cast Deferred of value type `{}` into type `{}`", - self.type_name(), - type_name::() - )), - } - } -} - -/// untyped deferred -#[async_trait] -pub trait DeferredAny: Allocative + provider::Provider + Send + Sync { - /// the set of 'Deferred's that should be computed first before executing this 'Deferred' - fn inputs(&self) -> &IndexSet; - - /// executes this 'Deferred', assuming all inputs and input artifacts are already computed - async fn execute( - &self, - ctx: &mut dyn DeferredCtx, - dice: &mut DiceComputations, - ) -> anyhow::Result; - - fn as_any(&self) -> &dyn Any; - - fn type_name(&self) -> &str; - - /// An optional stage to wrap execution in. - fn span(&self) -> Option; -} - -impl dyn DeferredAny { - pub fn downcast(&self) -> anyhow::Result<&T> { - match self.as_any().downcast_ref::() { - Some(t) => Ok(t), - None => Err(anyhow::anyhow!( - "Cannot cast Deferred of value type `{}` into type `{}`", - self.type_name(), - type_name::() - )), - } - } -} - -#[async_trait] -impl DeferredAny for D -where - D: Deferred + Send + Sync + Any + 'static, - T: Allocative + Clone + Debug + Send + Sync + 'static, -{ - fn inputs(&self) -> &IndexSet { - self.inputs() - } - - async fn execute( - &self, - ctx: &mut dyn DeferredCtx, - dice: &mut DiceComputations, - ) -> anyhow::Result { - match self.execute(ctx, dice).await? { - DeferredValue::Ready(t) => Ok(DeferredValueAny::ready(t)), - DeferredValue::Deferred(d) => Ok(DeferredValueAny::defer(d)), - } - } - - fn as_any(&self) -> &dyn Any { - self - } - - fn type_name(&self) -> &str { - type_name::() - } - - fn span(&self) -> Option { - D::span(self) - } -} - -pub mod testing { - - use buck2_artifact::deferred::key::DeferredKey; - use gazebo::variants::VariantName; - - use crate::deferred::types::AnyValue; - use crate::deferred::types::DeferredResult; - use crate::deferred::types::DeferredTable; - use crate::deferred::types::DeferredTableEntry; - use crate::deferred::types::DeferredValueAny; - use crate::deferred::types::DeferredValueAnyReady; - - pub trait DeferredValueAnyExt { - fn assert_ready(self) -> DeferredValueAnyReady; - fn assert_deferred(self) -> DeferredKey; - } - - impl DeferredValueAnyExt for DeferredValueAny { - fn assert_ready(self) -> DeferredValueAnyReady { - match self { - DeferredValueAny::Ready(v) => v, - x => panic!("Expected deferred to be Ready but was {}", x.variant_name()), - } - } - - fn assert_deferred(self) -> DeferredKey { - match self { - DeferredValueAny::Deferred(k) => k, - x => panic!( - "Expected deferred to be Deferred but was {}", - x.variant_name() - ), - } - } - } - - pub trait AnyValueExt { - /// tests if the any is equal to 't' - fn eq(&self, t: &T) -> bool; - } - - impl AnyValueExt for dyn AnyValue { - fn eq(&self, t: &T) -> bool { - self.into_any() - .downcast_ref::() - .map_or(false, |x| x == t) - } - } - - pub trait DeferredAnalysisResultExt { - fn get_registered(&self) -> &Vec; - } - - impl DeferredAnalysisResultExt for DeferredResult { - fn get_registered(&self) -> &Vec { - &self.0.deferreds - } - } - - impl DeferredAnalysisResultExt for DeferredTable { - fn get_registered(&self) -> &Vec { - &self.0 - } - } -} - -#[cfg(test)] -mod tests { - - use std::fmt; - use std::fmt::Debug; - use std::fmt::Formatter; - use std::marker::Send; - use std::sync::Arc; - - use allocative::Allocative; - use async_trait::async_trait; - use buck2_artifact::deferred::data::DeferredData; - use buck2_artifact::deferred::id::DeferredId; - use buck2_artifact::deferred::key::DeferredKey; - use buck2_core::base_deferred_key::BaseDeferredKey; - use buck2_core::configuration::data::ConfigurationData; - use buck2_core::fs::paths::abs_norm_path::AbsNormPath; - use buck2_core::fs::project::ProjectRoot; - use buck2_core::target::configured_target_label::ConfiguredTargetLabel; - use buck2_execute::digest_config::DigestConfig; - use dice::CancellationContext; - use dice::DetectCycles; - use dice::Dice; - use dice::DiceComputations; - use dice::DiceTransaction; - use dupe::Dupe; - use indexmap::indexset; - use indexmap::IndexSet; - - use super::AnyValue; - use super::TrivialDeferred; - use crate::deferred::types::testing::DeferredValueAnyExt; - use crate::deferred::types::BaseKey; - use crate::deferred::types::Deferred; - use crate::deferred::types::DeferredAny; - use crate::deferred::types::DeferredCtx; - use crate::deferred::types::DeferredInput; - use crate::deferred::types::DeferredRegistry; - use crate::deferred::types::DeferredTable; - use crate::deferred::types::DeferredValue; - use crate::deferred::types::DeferredValueAny; - use crate::deferred::types::DeferredValueAnyReady; - use crate::deferred::types::ResolveDeferredCtx; - - #[derive(Clone, PartialEq, Eq, Allocative)] - #[allocative(bound = "")] - struct FakeDeferred { - inputs: IndexSet, - #[allocative(skip)] - val: T, - } - - impl Debug for FakeDeferred { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "FakeDeferred") - } - } - - impl provider::Provider for FakeDeferred { - fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} - } - - #[async_trait] - impl Deferred for FakeDeferred { - type Output = T; - - fn inputs(&self) -> &IndexSet { - &self.inputs - } - - async fn execute( - &self, - _ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, - ) -> anyhow::Result> { - Ok(DeferredValue::Ready(self.val.clone())) - } - } - - impl TrivialDeferred for FakeDeferred { - fn as_any_value(&self) -> &dyn AnyValue { - self - } - - fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} - } - - #[derive(Clone, Debug, PartialEq, Eq, Allocative)] - #[allocative(bound = "")] - struct DeferringDeferred { - inputs: IndexSet, - #[allocative(skip)] - defer: FakeDeferred, - } - - impl provider::Provider - for DeferringDeferred - { - fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} - } - - #[async_trait] - impl Deferred for DeferringDeferred { - type Output = T; - - fn inputs(&self) -> &IndexSet { - &self.inputs - } - - async fn execute( - &self, - ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, - ) -> anyhow::Result> { - Ok(DeferredValue::Deferred( - ctx.registry().defer(self.defer.clone()), - )) - } - } - - fn make_resolved( - data: &DeferredData, - deferred: &FakeDeferred, - ) -> (DeferredKey, DeferredValueAnyReady) { - ( - data.deferred_key().dupe(), - DeferredValueAnyReady::AnyValue(Arc::new(deferred.val.clone())), - ) - } - - fn dummy_base() -> BaseDeferredKey { - BaseDeferredKey::TargetLabel(ConfiguredTargetLabel::testing_parse( - "cell//pkg:foo", - ConfigurationData::testing_new(), - )) - } - - fn dummy_project_filesystem() -> ProjectRoot { - let cwd = if cfg!(windows) { - AbsNormPath::new("c:/tmp").unwrap().to_owned() - } else { - AbsNormPath::new("/dev/null").unwrap().to_owned() - }; - ProjectRoot::new_unchecked(cwd) - } - - async fn dummy_dice_transaction() -> anyhow::Result { - let dice = Dice::modern().build(DetectCycles::Enabled); - let res = dice.updater().commit().await; - Ok(res) - } - - #[tokio::test] - async fn register_deferred() -> anyhow::Result<()> { - let target = dummy_base(); - let mut registry = DeferredRegistry::new(BaseKey::Base(target.dupe())); - - let deferred = FakeDeferred { - inputs: IndexSet::new(), - val: 2, - }; - - let deferred_data = registry.defer(deferred); - - let result = registry.take_result()?; - - let mut ctx = DeferredRegistry::new(BaseKey::Deferred(Arc::new( - deferred_data.deferred_key().dupe(), - ))); - - let mut dummy_dice_transaction = dummy_dice_transaction().await?; - - CancellationContext::testing() - .with_structured_cancellation(|observer| async move { - assert_eq!( - *result - .get(deferred_data.deferred_key().id().as_usize()) - .unwrap() - .execute( - &mut ResolveDeferredCtx::new( - deferred_data.deferred_key().dupe(), - Default::default(), - Default::default(), - Default::default(), - &mut ctx, - dummy_project_filesystem(), - DigestConfig::testing_default(), - observer - ), - &mut dummy_dice_transaction - ) - .await - .unwrap() - .assert_ready() - .resolve(&deferred_data)?, - 2 - ); - Ok(()) - }) - .await - } - - #[tokio::test] - async fn mapping_async_data() -> anyhow::Result<()> { - let base = BaseDeferredKey::TargetLabel(ConfiguredTargetLabel::testing_parse( - "cell//pkg:foo", - ConfigurationData::testing_new(), - )); - let mut registry = DeferredRegistry::new(BaseKey::Base(base.dupe())); - - let deferred = FakeDeferred { - inputs: IndexSet::new(), - val: 0, - }; - - let deferred_data = registry.defer(deferred.clone()); - let mapped = registry.map(&deferred_data, |x, _ctx| DeferredValue::Ready(x + 1)); - - let result = registry.take_result()?; - let mapped_deferred = result.get(mapped.deferred_key().id().as_usize()).unwrap(); - - assert_eq!( - mapped_deferred.inputs(), - &indexset![DeferredInput::Deferred(deferred_data.deferred_key().dupe())] - ); - - let mut registry = DeferredRegistry::new(BaseKey::Deferred(Arc::new( - deferred_data.deferred_key().dupe(), - ))); - - let mut dummy_dice_transaction = dummy_dice_transaction().await?; - - CancellationContext::testing() - .with_structured_cancellation(|observer| async move { - let mut resolved = ResolveDeferredCtx::new( - deferred_data.deferred_key().dupe(), - Default::default(), - vec![make_resolved(&deferred_data, &deferred)] - .into_iter() - .collect(), - Default::default(), - &mut registry, - dummy_project_filesystem(), - DigestConfig::testing_default(), - observer, - ); - - assert_eq!( - *mapped_deferred - .execute(&mut resolved, &mut dummy_dice_transaction) - .await - .unwrap() - .assert_ready() - .downcast::()?, - 1 - ); - Ok(()) - }) - .await - } - - #[tokio::test] - async fn register_nested_deferred() -> anyhow::Result<()> { - let target = - ConfiguredTargetLabel::testing_parse("cell//pkg:foo", ConfigurationData::testing_new()); - let id = DeferredId { - id: 1, - trivial: false, - }; - - let base = DeferredKey::Base(BaseDeferredKey::TargetLabel(target.dupe()), id); - let mut registry = DeferredRegistry::new(BaseKey::Deferred(Arc::new(base))); - - let deferred = FakeDeferred { - inputs: IndexSet::new(), - val: 2, - }; - - let deferring_deferred = DeferringDeferred { - inputs: Default::default(), - defer: deferred, - }; - - let deferring_deferred_data = registry.defer(deferring_deferred); - let result = registry.take_result()?; - - let mut registry = DeferredRegistry::new(BaseKey::Deferred(Arc::new( - deferring_deferred_data.deferred_key().dupe(), - ))); - - let mut dummy_dice_transaction = dummy_dice_transaction().await?; - - CancellationContext::testing() - .with_structured_cancellation(|observer| async move { - let exec_result = result - .get(deferring_deferred_data.deferred_key().id().as_usize()) - .unwrap() - .execute( - &mut ResolveDeferredCtx::new( - deferring_deferred_data.deferred_key().dupe(), - Default::default(), - Default::default(), - Default::default(), - &mut registry, - dummy_project_filesystem(), - DigestConfig::testing_default(), - observer.dupe(), - ), - &mut dummy_dice_transaction, - ) - .await - .unwrap(); - - let deferred_key = match exec_result { - DeferredValueAny::Ready(_) => panic!("expected a deferred"), - DeferredValueAny::Deferred(deferred) => deferred, - }; - - assert_eq!( - deferred_key, - DeferredKey::Deferred( - Arc::new(deferring_deferred_data.deferred_key().dupe()), - DeferredId { - id: 0, - trivial: false - } - ) - ); - - let result = registry.take_result()?; - let deferred = result.get(deferred_key.id().as_usize()).unwrap(); - - let mut registry = - DeferredRegistry::new(BaseKey::Deferred(Arc::new(deferred_key.dupe()))); - assert_eq!( - *deferred - .execute( - &mut ResolveDeferredCtx::new( - deferred_key, - Default::default(), - Default::default(), - Default::default(), - &mut registry, - dummy_project_filesystem(), - DigestConfig::testing_default(), - observer, - ), - &mut dummy_dice_transaction - ) - .await - .unwrap() - .assert_ready() - .downcast::()?, - 2 - ); - - Ok(()) - }) - .await - } - - #[tokio::test] - async fn reserving_deferred() -> anyhow::Result<()> { - let base = BaseDeferredKey::TargetLabel(ConfiguredTargetLabel::testing_parse( - "cell//pkg:foo", - ConfigurationData::testing_new(), - )); - let mut registry = DeferredRegistry::new(BaseKey::Base(base)); - - let deferred = FakeDeferred { - inputs: IndexSet::new(), - val: 2, - }; - - let reserved = registry.reserve(); - let reserved_deferred_data = reserved.data().dupe(); - - let deferred_data = registry.bind(reserved, deferred); - - assert_eq!(deferred_data, reserved_deferred_data); - - let result = registry.take_result()?; - - let mut registry = DeferredRegistry::new(BaseKey::Deferred(Arc::new( - deferred_data.deferred_key().dupe(), - ))); - - let mut dummy_dice_transaction = dummy_dice_transaction().await?; - - let key = deferred_data.deferred_key().dupe(); - - CancellationContext::testing() - .with_structured_cancellation(|observer| async move { - assert_eq!( - *result - .get(deferred_data.deferred_key().id().as_usize()) - .unwrap() - .execute( - &mut ResolveDeferredCtx::new( - key, - Default::default(), - Default::default(), - Default::default(), - &mut registry, - dummy_project_filesystem(), - DigestConfig::testing_default(), - observer, - ), - &mut dummy_dice_transaction, - ) - .await - .unwrap() - .assert_ready() - .resolve(&deferred_data)?, - 2 - ); - - Ok(()) - }) - .await - } - - #[test] - fn reserving_deferred_unbound() { - let base = BaseDeferredKey::TargetLabel(ConfiguredTargetLabel::testing_parse( - "cell//pkg:foo", - ConfigurationData::testing_new(), - )); - let mut registry = DeferredRegistry::new(BaseKey::Base(base)); - - let _reserved = registry.reserve::>(); - let _reserved1 = registry.reserve::>(); - - assert_eq!(registry.take_result().is_err(), true); - } - - #[test] - fn trivial_deferred() -> anyhow::Result<()> { - let base = BaseDeferredKey::TargetLabel(ConfiguredTargetLabel::testing_parse( - "cell//pkg:foo", - ConfigurationData::testing_new(), - )); - let mut registry = DeferredRegistry::new(BaseKey::Base(base)); - - let deferred0 = FakeDeferred { - inputs: IndexSet::new(), - val: 123, - }; - - let deferred1 = FakeDeferred { - inputs: IndexSet::new(), - val: 456, - }; - let deferred_data0 = registry.defer_trivial(deferred0.clone()); - let deferred_data1 = registry.reserve_trivial(); - let deferred_data1 = registry.bind_trivial(deferred_data1, deferred1.clone()); - - let result = DeferredTable::new(registry.take_result()?); - - assert_eq!( - *DeferredValueAnyReady::TrivialDeferred( - result - .lookup_deferred(deferred_data0.deferred_key().id())? - .as_trivial() - .unwrap() - .dupe() - ) - .resolve(&deferred_data0)?, - deferred0 - ); - - assert_eq!( - *DeferredValueAnyReady::TrivialDeferred( - result - .lookup_deferred(deferred_data1.deferred_key().id())? - .as_trivial() - .unwrap() - .dupe() - ) - .resolve(&deferred_data1)?, - deferred1 - ); - - Ok(()) - } -} diff --git a/app/buck2_build_api/src/dynamic.rs b/app/buck2_build_api/src/dynamic.rs new file mode 100644 index 0000000000000..664f09a75de27 --- /dev/null +++ b/app/buck2_build_api/src/dynamic.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod calculation; +pub mod lambda; +pub mod storage; diff --git a/app/buck2_build_api/src/dynamic/bxl.rs b/app/buck2_build_api/src/dynamic/bxl.rs deleted file mode 100644 index ce609d93af017..0000000000000 --- a/app/buck2_build_api/src/dynamic/bxl.rs +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::pin::Pin; -use std::sync::Arc; - -use buck2_artifact::actions::key::ActionKey; -use buck2_core::base_deferred_key::BaseDeferredKeyDyn; -use buck2_util::late_binding::LateBinding; -use dice::DiceComputations; -use futures::Future; - -use super::deferred::DynamicLambda; -use crate::deferred::types::DeferredCtx; - -pub static EVAL_BXL_FOR_DYNAMIC_OUTPUT: LateBinding< - for<'v> fn( - &'v Arc, - &'v DynamicLambda, - &'v mut dyn DeferredCtx, - &'v mut DiceComputations, - ) -> Pin>> + Send + 'v>>, -> = LateBinding::new("EVAL_BXL_FOR_DYNAMIC_OUTPUT"); - -#[allow(unused)] -pub(crate) async fn eval_bxl_for_dynamic_output<'v>( - base_deferred_key: &'v Arc, - dynamic_lambda: &'v DynamicLambda, - deferred_ctx: &'v mut dyn DeferredCtx, - dice_ctx: &'v mut DiceComputations, -) -> anyhow::Result> { - (EVAL_BXL_FOR_DYNAMIC_OUTPUT.get()?)(base_deferred_key, dynamic_lambda, deferred_ctx, dice_ctx) - .await -} diff --git a/app/buck2_build_api/src/dynamic/calculation.rs b/app/buck2_build_api/src/dynamic/calculation.rs new file mode 100644 index 0000000000000..47ddbfc741d3f --- /dev/null +++ b/app/buck2_build_api/src/dynamic/calculation.rs @@ -0,0 +1,51 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_util::late_binding::LateBinding; +use dice::DiceComputations; + +use crate::analysis::registry::RecordedAnalysisValues; + +#[derive(Allocative)] +pub struct DynamicLambdaResult { + pub analysis_values: RecordedAnalysisValues, +} + +impl DynamicLambdaResult { + pub(crate) fn analysis_values(&self) -> &crate::analysis::registry::RecordedAnalysisValues { + &self.analysis_values + } +} + +#[async_trait] +pub trait DynamicLambdaCalculation: Sync + 'static { + async fn dynamic_lambda_result( + &self, + dice: &mut DiceComputations<'_>, + key: &DynamicLambdaResultsKey, + ) -> anyhow::Result>; +} + +pub static DYNAMIC_LAMBDA_CALCULATION_IMPL: LateBinding<&'static dyn DynamicLambdaCalculation> = + LateBinding::new("DYNAMIC_LAMBDA_CALCULATION_IMPL"); + +pub async fn dynamic_lambda_result( + dice: &mut DiceComputations<'_>, + key: &DynamicLambdaResultsKey, +) -> anyhow::Result> { + DYNAMIC_LAMBDA_CALCULATION_IMPL + .get()? + .dynamic_lambda_result(dice, key) + .await +} diff --git a/app/buck2_build_api/src/dynamic/deferred.rs b/app/buck2_build_api/src/dynamic/deferred.rs deleted file mode 100644 index 91ad2a2dea447..0000000000000 --- a/app/buck2_build_api/src/dynamic/deferred.rs +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::mem; -use std::sync::Arc; - -use allocative::Allocative; -use async_trait::async_trait; -use buck2_artifact::actions::key::ActionKey; -use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_artifact::artifact::artifact_type::DeclaredArtifact; -use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_artifact::artifact::provide_outputs::ProvideOutputs; -use buck2_artifact::deferred::data::DeferredData; -use buck2_core::base_deferred_key::BaseDeferredKey; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersName; -use buck2_events::dispatch::get_dispatcher; -use buck2_execute::digest_config::DigestConfig; -use buck2_interpreter::print_handler::EventDispatcherPrintHandler; -use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; -use dice::DiceComputations; -use dupe::Dupe; -use gazebo::prelude::*; -use indexmap::indexset; -use indexmap::IndexSet; -use starlark::collections::SmallMap; -use starlark::environment::Module; -use starlark::eval::Evaluator; -use starlark::values::dict::Dict; -use starlark::values::tuple::TupleRef; -use starlark::values::OwnedFrozenValue; -use starlark::values::Value; -use starlark::values::ValueTypedComplex; -use thiserror::Error; - -use crate::actions::key::ActionKeyExt; -use crate::actions::RegisteredAction; -use crate::analysis::registry::AnalysisRegistry; -use crate::deferred::types::BaseKey; -use crate::deferred::types::Deferred; -use crate::deferred::types::DeferredCtx; -use crate::deferred::types::DeferredInput; -use crate::deferred::types::DeferredRegistry; -use crate::deferred::types::DeferredValue; -use crate::dynamic::bxl::eval_bxl_for_dynamic_output; -use crate::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactValue; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; -use crate::interpreter::rule_defs::context::AnalysisContext; -use crate::interpreter::rule_defs::plugins::AnalysisPlugins; - -/// The artifacts that are returned are dynamic actions, which depend on the `DynamicLambda` -/// to get their real `RegisteredAction`. -#[derive(Clone, Debug, Allocative)] -pub(crate) struct DynamicAction { - // A singleton pointing at a DynamicLambda - inputs: IndexSet, - index: usize, -} - -impl DynamicAction { - pub fn new(deferred: &DeferredData, index: usize) -> Self { - Self { - inputs: indexset![DeferredInput::Deferred(deferred.deferred_key().dupe())], - index, - } - } -} - -/// The lambda captured by `dynamic_output`, alongside the other required data. -#[derive(Clone, Debug, Allocative)] -pub struct DynamicLambda { - /// the owner that defined this lambda - owner: BaseDeferredKey, - /// Things required by the lambda (wrapped in DeferredInput) - dynamic: IndexSet, - /// Things I am allowed to use as inputs, but don't wait for - inputs: IndexSet, - /// Things I produce - outputs: Vec, - /// A Starlark pair of the attributes and a lambda function that binds the outputs given a context - attributes_lambda: OwnedFrozenValue, -} - -impl DynamicLambda { - pub(crate) fn new( - owner: BaseDeferredKey, - dynamic: IndexSet, - inputs: IndexSet, - outputs: Vec, - ) -> Self { - let mut depends = IndexSet::with_capacity(dynamic.len() + 1); - match &owner { - BaseDeferredKey::TargetLabel(target) => { - depends.insert(DeferredInput::ConfiguredTarget(target.dupe())); - } - BaseDeferredKey::BxlLabel(_) => { - // Execution platform resolution is handled when we execute the DynamicLambda - } - BaseDeferredKey::AnonTarget(_) => { - // This will return an error later, so doesn't need to have the dependency - } - } - depends.extend(dynamic.into_iter().map(DeferredInput::MaterializedArtifact)); - Self { - owner, - dynamic: depends, - inputs, - outputs, - attributes_lambda: Default::default(), - } - } - - pub(crate) fn bind(&mut self, attributes_lambda: OwnedFrozenValue) { - self.attributes_lambda = attributes_lambda; - } -} - -/// The `Output` from `DynamicLambda`. -#[derive(Clone, Debug, Allocative)] -pub struct DynamicLambdaOutput { - /// The actions the DynamicLambda produces, in the right order. - /// `DynamicAction.index` is an index into this Vec. - output: Vec, -} - -impl provider::Provider for DynamicAction { - fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} -} - -#[async_trait] -impl Deferred for DynamicAction { - type Output = Arc; - - fn inputs(&self) -> &IndexSet { - &self.inputs - } - - async fn execute( - &self, - ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, - ) -> anyhow::Result> { - let id = match self.inputs.iter().into_singleton() { - Some(DeferredInput::Deferred(x)) => x, - _ => unreachable!("DynamicAction must have a single Deferred as as inputs"), - }; - let val = ctx.get_deferred_data(id).unwrap(); - let key = val - .downcast::()? - .output - .get(self.index) - .map_or_else( - || { - Err(anyhow::anyhow!( - "Unexpected index in DynamicAction (internal error)" - )) - }, - Ok, - )?; - Ok(DeferredValue::Deferred(key.deferred_data().dupe())) - } -} - -#[derive(Debug, Error)] -enum DynamicLambdaError { - #[error("dynamic_output and anon_target cannot be used together (yet)")] - AnonTargetIncompatible, -} - -impl provider::Provider for DynamicLambda { - fn provide<'a>(&'a self, demand: &mut provider::Demand<'a>) { - demand.provide_value_with(|| ProvideOutputs(Ok(self.outputs.clone()))); - } -} - -#[async_trait] -impl Deferred for DynamicLambda { - type Output = DynamicLambdaOutput; - - fn inputs(&self) -> &IndexSet { - &self.dynamic - } - - async fn execute( - &self, - deferred_ctx: &mut dyn DeferredCtx, - dice: &mut DiceComputations, - ) -> anyhow::Result> { - let output = if let BaseDeferredKey::BxlLabel(key) = &self.owner { - eval_bxl_for_dynamic_output(key, self, deferred_ctx, dice).await - } else { - let env = Module::new(); - - let (analysis_registry, declared_outputs) = { - let heap = env.heap(); - let print = EventDispatcherPrintHandler(get_dispatcher()); - let mut eval = Evaluator::new(&env); - eval.set_print_handler(&print); - let dynamic_lambda_ctx_data = dynamic_lambda_ctx_data(self, deferred_ctx, &env)?; - let ctx = heap.alloc_typed(AnalysisContext::new( - heap, - dynamic_lambda_ctx_data.attributes, - match &self.owner { - BaseDeferredKey::TargetLabel(target) => Some(heap.alloc_typed( - StarlarkConfiguredProvidersLabel::new(ConfiguredProvidersLabel::new( - target.dupe(), - ProvidersName::Default, - )), - )), - BaseDeferredKey::BxlLabel(target) | BaseDeferredKey::AnonTarget(target) => { - target.configured_label().map(|configured_target_label| { - heap.alloc_typed(StarlarkConfiguredProvidersLabel::new( - ConfiguredProvidersLabel::new( - configured_target_label, - ProvidersName::Default, - ), - )) - }) - } - }, - dynamic_lambda_ctx_data.plugins, - dynamic_lambda_ctx_data.registry, - dynamic_lambda_ctx_data.digest_config, - )); - - eval.eval_function( - dynamic_lambda_ctx_data.lambda, - &[ - ctx.to_value(), - dynamic_lambda_ctx_data.artifacts, - dynamic_lambda_ctx_data.outputs, - ], - &[], - )?; - ctx.assert_no_promises()?; - - (ctx.take_state(), dynamic_lambda_ctx_data.declared_outputs) - }; - - let (_frozen_env, deferred) = analysis_registry.finalize(&env)?(env)?; - let _fake_registry = mem::replace(deferred_ctx.registry(), deferred); - - // TODO(ndmitchell): Check we don't use anything not in `inputs` - - let output: anyhow::Result> = declared_outputs - .into_iter() - .map(|x| anyhow::Ok(x.ensure_bound()?.action_key().dupe())) - .collect(); - output - }; - Ok(DeferredValue::Ready(DynamicLambdaOutput { - output: output?, - })) - } - - fn span(&self) -> Option { - let owner = self.owner.to_proto().into(); - Some(buck2_data::DynamicLambdaStart { owner: Some(owner) }.into()) - } -} - -/// Data used to construct an `AnalysisContext` or `BxlContext` for the dynamic lambda. -pub struct DynamicLambdaCtxData<'v> { - pub attributes: Value<'v>, - pub lambda: Value<'v>, - pub outputs: Value<'v>, - pub plugins: ValueTypedComplex<'v, AnalysisPlugins<'v>>, - pub artifacts: Value<'v>, - pub key: &'v BaseDeferredKey, - pub digest_config: DigestConfig, - pub declared_outputs: IndexSet, - pub registry: AnalysisRegistry<'v>, -} - -/// Sets up the data needed to create the dynamic lambda ctx and evaluate the lambda. -pub fn dynamic_lambda_ctx_data<'v>( - dynamic_lambda: &'v DynamicLambda, - deferred_ctx: &mut dyn DeferredCtx, - env: &'v Module, -) -> anyhow::Result> { - let heap = env.heap(); - let mut outputs = SmallMap::with_capacity(dynamic_lambda.outputs.len()); - let mut declared_outputs = IndexSet::with_capacity(dynamic_lambda.outputs.len()); - - let data = TupleRef::from_value( - dynamic_lambda - .attributes_lambda - .owned_value(env.frozen_heap()), - ) - .unwrap(); - assert_eq!(data.len(), 3); - let attributes = data.content()[0]; - let plugins = ValueTypedComplex::new(data.content()[1]).unwrap(); - let lambda = data.content()[2]; - - let execution_platform = { - match &dynamic_lambda.owner { - BaseDeferredKey::TargetLabel(target) => { - let configured_target = deferred_ctx.get_configured_target(target).unwrap(); - - configured_target.execution_platform_resolution().dupe() - } - BaseDeferredKey::BxlLabel(k) => k.execution_platform_resolution().clone(), - BaseDeferredKey::AnonTarget(_) => { - return Err(DynamicLambdaError::AnonTargetIncompatible.into()); - } - } - }; - - // The DeferredCtx has a registry it wants us to use as &mut. - // The AnalysisRegistry wants ownership of a registry. - // To overcome the difference, we create a fake registry, swap it with the one in deferred, - // and swap back after AnalysisRegistry completes. - - let fake_registry = DeferredRegistry::new(BaseKey::Base(dynamic_lambda.owner.dupe())); - - let deferred = mem::replace(deferred_ctx.registry(), fake_registry); - let mut registry = AnalysisRegistry::new_from_owner_and_deferred( - dynamic_lambda.owner.dupe(), - execution_platform, - deferred, - )?; - registry.set_action_key(Arc::from(deferred_ctx.get_action_key())); - - let mut artifacts = SmallMap::with_capacity(dynamic_lambda.inputs.len()); - let fs = deferred_ctx.project_filesystem(); - for x in &dynamic_lambda.dynamic { - let x = match x { - DeferredInput::MaterializedArtifact(x) => x, - DeferredInput::ConfiguredTarget(_) => continue, - _ => unreachable!("DynamicLambda only depends on artifact and target"), - }; - let k = heap.alloc(StarlarkArtifact::new(x.dupe())); - let path = deferred_ctx.get_materialized_artifact(x).unwrap(); - let v = heap.alloc(StarlarkArtifactValue::new( - x.dupe(), - path.to_owned(), - fs.dupe(), - )); - artifacts.insert_hashed(k.get_hashed()?, v); - } - - for x in &dynamic_lambda.outputs { - let k = heap.alloc(StarlarkArtifact::new(Artifact::from(x.dupe()))); - let declared = registry.declare_dynamic_output(x.get_path().dupe(), x.output_type()); - declared_outputs.insert(declared.dupe()); - let v = heap.alloc(StarlarkDeclaredArtifact::new( - None, - declared, - AssociatedArtifacts::new(), - )); - outputs.insert_hashed(k.get_hashed()?, v); - } - - let artifacts = Dict::new(artifacts); - let outputs = Dict::new(outputs); - - Ok(DynamicLambdaCtxData { - attributes, - lambda, - plugins, - outputs: heap.alloc(outputs), - artifacts: heap.alloc(artifacts), - key: &dynamic_lambda.owner, - digest_config: deferred_ctx.digest_config(), - declared_outputs, - registry, - }) -} diff --git a/app/buck2_build_api/src/dynamic/lambda.rs b/app/buck2_build_api/src/dynamic/lambda.rs new file mode 100644 index 0000000000000..63e09ef58f46a --- /dev/null +++ b/app/buck2_build_api/src/dynamic/lambda.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#[derive(Debug, buck2_error::Error)] +pub enum DynamicLambdaError { + #[error("dynamic_output and anon_target cannot be used together (yet)")] + AnonTargetIncompatible, +} diff --git a/app/buck2_build_api/src/dynamic/mod.rs b/app/buck2_build_api/src/dynamic/mod.rs deleted file mode 100644 index a4f1dcdbe82fa..0000000000000 --- a/app/buck2_build_api/src/dynamic/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod bxl; -pub mod deferred; -pub(crate) mod registry; diff --git a/app/buck2_build_api/src/dynamic/registry.rs b/app/buck2_build_api/src/dynamic/registry.rs deleted file mode 100644 index 1fe3f7a88e568..0000000000000 --- a/app/buck2_build_api/src/dynamic/registry.rs +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocative::Allocative; -use anyhow::Context; -use buck2_artifact::actions::key::ActionKey; -use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_artifact::artifact::artifact_type::OutputArtifact; -use buck2_artifact::deferred::id::DeferredId; -use buck2_core::base_deferred_key::BaseDeferredKey; -use dupe::Dupe; -use indexmap::IndexSet; - -use crate::actions::key::ActionKeyExt; -use crate::analysis::registry::AnalysisValueFetcher; -use crate::deferred::types::DeferredRegistry; -use crate::deferred::types::ReservedDeferredData; -use crate::dynamic::deferred::DynamicAction; -use crate::dynamic::deferred::DynamicLambda; -use crate::dynamic::deferred::DynamicLambdaOutput; - -#[derive(Allocative)] -pub(crate) struct DynamicRegistry { - owner: BaseDeferredKey, - pending: Vec<(ReservedDeferredData, DynamicLambda)>, -} - -impl DynamicRegistry { - pub fn new(owner: BaseDeferredKey) -> Self { - Self { - owner, - pending: Vec::new(), - } - } - - pub fn register( - &mut self, - dynamic: IndexSet, - inputs: IndexSet, - outputs: IndexSet, - registry: &mut DeferredRegistry, - ) -> anyhow::Result { - let reserved = registry.reserve::(); - let outputs = outputs - .iter() - .enumerate() - .map(|(i, output)| { - let output_id = registry.defer(DynamicAction::new(reserved.data(), i)); - let bound = output - .bind(ActionKey::new(output_id))? - .as_base_artifact() - .dupe(); - Ok(bound) - }) - .collect::>()?; - let lambda = DynamicLambda::new(self.owner.dupe(), dynamic, inputs, outputs); - let lambda_id = reserved.data().deferred_key().id(); - self.pending.push((reserved, lambda)); - Ok(lambda_id) - } - - pub fn ensure_bound( - self, - registry: &mut DeferredRegistry, - analysis_value_fetcher: &AnalysisValueFetcher, - ) -> anyhow::Result<()> { - for (key, mut data) in self.pending { - let id = key.data().deferred_key().id(); - - let fv = analysis_value_fetcher - .get(id)? - .with_context(|| format!("Key is missing in AnalysisValueFetcher: {:?}", id))?; - - data.bind(fv); - registry.bind(key, data); - } - Ok(()) - } -} diff --git a/app/buck2_build_api/src/dynamic/storage.rs b/app/buck2_build_api/src/dynamic/storage.rs new file mode 100644 index 0000000000000..7b0f5a90d69fe --- /dev/null +++ b/app/buck2_build_api/src/dynamic/storage.rs @@ -0,0 +1,47 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Debug; + +use allocative::Allocative; +use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_artifact::deferred::key::DeferredHolderKey; +use buck2_util::late_binding::LateBinding; +use starlark::any::AnyLifetime; +use starlark::values::Freezer; +use starlark::values::Trace; + +pub trait DynamicLambdaParamsStorage<'v>: Trace<'v> + Debug + Allocative + Send + 'v { + fn as_any_mut(&mut self) -> &mut dyn AnyLifetime<'v>; + + fn freeze( + self: Box, + freezer: &Freezer, + ) -> anyhow::Result>; +} + +pub trait FrozenDynamicLambdaParamsStorage: Debug + Allocative + Send + Sync + 'static { + fn as_any(&self) -> &dyn AnyLifetime<'static>; + + fn iter_dynamic_lambda_outputs(&self) -> Box + Send + '_>; +} + +pub trait DynamicLambdaParamStorages: Send + Sync + 'static { + fn new_dynamic_lambda_params_storage<'v>( + &self, + self_key: DeferredHolderKey, + ) -> Box>; + fn new_frozen_dynamic_lambda_params_storage( + &self, + self_key: DeferredHolderKey, + ) -> Box; +} + +pub static DYNAMIC_LAMBDA_PARAMS_STORAGES: LateBinding<&'static dyn DynamicLambdaParamStorages> = + LateBinding::new("DYNAMIC_LAMBDA_PARAMS_STORAGES"); diff --git a/app/buck2_build_api/src/dynamic_value.rs b/app/buck2_build_api/src/dynamic_value.rs new file mode 100644 index 0000000000000..86cff76699f1a --- /dev/null +++ b/app/buck2_build_api/src/dynamic_value.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use dupe::Dupe; + +#[derive( + Clone, + Dupe, + Eq, + PartialEq, + Hash, + Debug, + Allocative, + derive_more::Display +)] +#[display("{}", self.dynamic_lambda_results_key)] +pub struct DynamicValue { + pub dynamic_lambda_results_key: DynamicLambdaResultsKey, +} diff --git a/app/buck2_build_api/src/interpreter/mod.rs b/app/buck2_build_api/src/interpreter.rs similarity index 100% rename from app/buck2_build_api/src/interpreter/mod.rs rename to app/buck2_build_api/src/interpreter.rs diff --git a/app/buck2_build_api/src/interpreter/more.rs b/app/buck2_build_api/src/interpreter/more.rs index c9d6691780112..fec43cc765d10 100644 --- a/app/buck2_build_api/src/interpreter/more.rs +++ b/app/buck2_build_api/src/interpreter/more.rs @@ -7,9 +7,12 @@ * of this source tree. */ -use buck2_interpreter::functions::more::REGISTER_BUCK2_BUILD_API_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_BUILD_API_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_BUILD_API_INTERNALS; use starlark::environment::GlobalsBuilder; +use crate::actions::error_handler::register_action_error_handler_for_testing; +use crate::actions::error_handler::register_action_error_types; use crate::interpreter::rule_defs::artifact::artifact_type::register_artifact; use crate::interpreter::rule_defs::artifact::starlark_artifact_value::register_artifact_value; use crate::interpreter::rule_defs::artifact::starlark_output_artifact::register_output_artifact; @@ -22,9 +25,11 @@ use crate::interpreter::rule_defs::provider::collection::register_provider_colle use crate::interpreter::rule_defs::provider::dependency::register_dependency; use crate::interpreter::rule_defs::provider::registration::register_builtin_providers; use crate::interpreter::rule_defs::register_rule_defs; +use crate::interpreter::rule_defs::required_test_local_resource::register_required_test_local_resource; use crate::interpreter::rule_defs::resolved_macro::register_string_with_macros; use crate::interpreter::rule_defs::transitive_set::globals::register_transitive_set_types; use crate::interpreter::rule_defs::transitive_set::transitive_set_definition::register_transitive_set; +use crate::interpreter::rule_defs::validation_spec::register_validation_spec; fn register_build_api_globals(globals: &mut GlobalsBuilder) { register_builtin_providers(globals); @@ -42,8 +47,16 @@ fn register_build_api_globals(globals: &mut GlobalsBuilder) { register_artifact_tag(globals); register_artifact_value(globals); register_output_artifact(globals); + register_action_error_types(globals); + register_validation_spec(globals); + register_required_test_local_resource(globals); +} + +fn register_build_api_internals(globals: &mut GlobalsBuilder) { + register_action_error_handler_for_testing(globals); } pub(crate) fn init_register_build_api_globals() { REGISTER_BUCK2_BUILD_API_GLOBALS.init(register_build_api_globals); + REGISTER_BUCK2_BUILD_API_INTERNALS.init(register_build_api_internals); } diff --git a/app/buck2_build_api/src/interpreter/rule_defs.rs b/app/buck2_build_api/src/interpreter/rule_defs.rs new file mode 100644 index 0000000000000..a9d5dbbd31c4c --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs.rs @@ -0,0 +1,32 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use starlark::environment::GlobalsBuilder; + +use crate::interpreter::rule_defs::provider::registration::register_builtin_providers; + +pub mod artifact; +pub mod artifact_tagging; +pub mod cmd_args; +pub mod command_executor_config; +pub mod context; +pub mod digest_config; +pub mod label_relative_path; +pub mod plugins; +pub mod provider; +pub mod required_test_local_resource; +pub mod resolve_query_macro; +pub mod resolved_macro; +pub mod transitive_set; +pub mod validation_spec; + +pub fn register_rule_defs(globals: &mut GlobalsBuilder) { + cmd_args::register_cmd_args(globals); + register_builtin_providers(globals); +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact.rs new file mode 100644 index 0000000000000..0bb0d39d7d206 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact.rs @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod artifact_type; +pub mod associated; +pub(crate) mod methods; +pub mod output_artifact_like; +pub mod starlark_artifact; +pub mod starlark_artifact_like; +pub mod starlark_artifact_value; +pub mod starlark_declared_artifact; +pub mod starlark_output_artifact; +pub mod starlark_promise_artifact; +pub mod unpack_artifact; + +use std::fmt::Debug; + +use buck2_core::base_deferred_key::BaseDeferredKey; + +#[derive(Debug, buck2_error::Error)] +pub(crate) enum ArtifactError { + #[error("expected artifact {repr} to be used as the output of an action, but it was not")] + DeclaredArtifactWasNotBound { repr: String }, + #[error( + "attempted to use source artifact {repr} as the output of an action. Source \ + artifacts may not be outputs." + )] + SourceArtifactAsOutput { repr: String }, + #[error( + "attempted to use artifact {artifact_repr} as the output of an action, but \ + it was already used by another action in {existing_owner}" + )] + BoundArtifactAsOutput { + artifact_repr: String, + existing_owner: BaseDeferredKey, + }, + #[error( + "attempted to use promise artifact {artifact_repr} as the output of an action, but \ + only declared artifacts can be used as an output" + )] + PromiseArtifactAsOutput { artifact_repr: String }, +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/artifact_type.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/artifact_type.rs index a078838ebb29a..9b76e34d6b48d 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/artifact_type.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/artifact_type.rs @@ -12,7 +12,7 @@ use starlark::environment::GlobalsBuilder; use starlark::values::starlark_value_as_type::StarlarkValueAsType; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; #[starlark_module] pub(crate) fn register_artifact(globals: &mut GlobalsBuilder) { diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/methods.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/methods.rs new file mode 100644 index 0000000000000..9a23c3a85c035 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/methods.rs @@ -0,0 +1,158 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::convert::Infallible; + +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; +use starlark::environment::MethodsBuilder; +use starlark::typing::Ty; +use starlark::values::list::UnpackList; +use starlark::values::none::NoneOr; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::AllocValue; +use starlark::values::Heap; +use starlark::values::StringValue; +use starlark::values::UnpackValue; +use starlark::values::Value; +use starlark::values::ValueOf; + +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; +use crate::interpreter::rule_defs::artifact::starlark_promise_artifact::StarlarkPromiseArtifact; + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub enum EitherArtifactRef<'v> { + Artifact(&'v StarlarkArtifact), + DeclaredArtifact(&'v StarlarkDeclaredArtifact), + PromiseArtifact(&'v StarlarkPromiseArtifact), +} + +impl<'v> StarlarkTypeRepr for &'v dyn StarlarkArtifactLike { + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + EitherArtifactRef::starlark_type_repr() + } +} + +impl<'v> UnpackValue<'v> for &'v dyn StarlarkArtifactLike { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + match EitherArtifactRef::unpack_value_opt(value) { + Some(EitherArtifactRef::Artifact(artifact)) => Ok(Some(artifact)), + Some(EitherArtifactRef::DeclaredArtifact(artifact)) => Ok(Some(artifact)), + Some(EitherArtifactRef::PromiseArtifact(artifact)) => Ok(Some(artifact)), + None => Ok(None), + } + } +} + +#[derive(StarlarkTypeRepr, AllocValue)] +pub enum EitherStarlarkArtifact { + Artifact(StarlarkArtifact), + DeclaredArtifact(StarlarkDeclaredArtifact), + PromiseArtifact(StarlarkPromiseArtifact), +} + +/// A single input or output file for an action. +/// +/// There is no `.parent` method on `artifact`, but in most cases +/// `cmd_args(my_artifact, parent = 1)` can be used to similar effect. +#[starlark_module] +pub(crate) fn artifact_methods(builder: &mut MethodsBuilder) { + /// The base name of this artifact. e.g. for an artifact at `foo/bar`, this is `bar` + #[starlark(attribute)] + fn basename<'v>( + this: &'v dyn StarlarkArtifactLike, + heap: &'v Heap, + ) -> anyhow::Result> { + this.basename(heap) + } + + /// The file extension of this artifact. e.g. for an artifact at foo/bar.sh, + /// this is `.sh`. If no extension is present, `""` is returned. + #[starlark(attribute)] + fn extension<'v>( + this: &'v dyn StarlarkArtifactLike, + heap: &'v Heap, + ) -> anyhow::Result> { + this.extension(heap) + } + + /// Whether the artifact represents a source file + #[starlark(attribute)] + fn is_source<'v>(this: &'v dyn StarlarkArtifactLike) -> anyhow::Result { + this.is_source() + } + + /// The `Label` of the rule that originally created this artifact. May also be None in + /// the case of source files, or if the artifact has not be used in an action, or if the + /// action was not created by a rule. + #[starlark(attribute)] + fn owner<'v>( + this: &'v dyn StarlarkArtifactLike, + ) -> anyhow::Result> { + Ok(NoneOr::from_option(this.owner()?)) + } + + /// The interesting part of the path, relative to somewhere in the output directory. + /// For an artifact declared as `foo/bar`, this is `foo/bar`. + #[starlark(attribute)] + fn short_path<'v>( + this: &'v dyn StarlarkArtifactLike, + heap: &Heap, + ) -> anyhow::Result> { + this.short_path(heap) + } + + /// Returns a `StarlarkOutputArtifact` instance, or fails if the artifact is + /// either an `Artifact`, or is a bound `Artifact` (You cannot bind twice) + fn as_output<'v>( + this: ValueOf<'v, &'v dyn StarlarkArtifactLike>, + ) -> anyhow::Result> { + this.typed.as_output(this.value) + } + + /// Create an artifact that lives at path relative from this artifact. + /// + /// For example, if artifact foo is a directory containing a file bar, then `foo.project("bar")` + /// yields the file bar. It is possible for projected artifacts to hide the prefix in order to + /// have the short name of the resulting artifact only contain the projected path, by passing + /// `hide_prefix = True` to `project()`. + fn project<'v>( + this: &'v dyn StarlarkArtifactLike, + #[starlark(require = pos)] path: &str, + #[starlark(require = named, default = false)] hide_prefix: bool, + ) -> anyhow::Result { + let path = ForwardRelativePath::new(path)?; + this.project(path, hide_prefix) + } + + /// Returns a `StarlarkArtifact` instance which is identical to the original artifact, except + /// with no associated artifacts + fn without_associated_artifacts<'v>( + this: &'v dyn StarlarkArtifactLike, + ) -> anyhow::Result { + this.without_associated_artifacts() + } + + /// Returns a `StarlarkArtifact` instance which is identical to the original artifact, but with + /// potentially additional artifacts. The artifacts must be bound. + fn with_associated_artifacts<'v>( + this: &'v dyn StarlarkArtifactLike, + artifacts: UnpackList>, + ) -> anyhow::Result { + this.with_associated_artifacts(artifacts) + } +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/mod.rs deleted file mode 100644 index deba9d8572023..0000000000000 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/mod.rs +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod artifact_type; -pub mod associated; -pub mod output_artifact_like; -mod starlark_artifact; -pub mod starlark_artifact_like; -pub(crate) mod starlark_artifact_value; -mod starlark_declared_artifact; -pub(crate) mod starlark_output_artifact; -mod starlark_promise_artifact; - -use std::fmt::Debug; - -use buck2_core::base_deferred_key::BaseDeferredKey; - -pub use self::starlark_artifact::StarlarkArtifact; -pub(crate) use self::starlark_artifact_like::StarlarkArtifactLike; -pub use self::starlark_artifact_like::ValueAsArtifactLike; -pub use self::starlark_artifact_value::StarlarkArtifactValue; -pub use self::starlark_declared_artifact::StarlarkDeclaredArtifact; -pub use self::starlark_output_artifact::FrozenStarlarkOutputArtifact; -pub use self::starlark_output_artifact::StarlarkOutputArtifact; -pub use self::starlark_output_artifact::StarlarkOutputOrDeclaredArtifact; -pub use self::starlark_promise_artifact::StarlarkPromiseArtifact; - -#[derive(Debug, thiserror::Error)] -pub(crate) enum ArtifactError { - #[error("expected artifact {repr} to be used as the output of an action, but it was not")] - DeclaredArtifactWasNotBound { repr: String }, - #[error( - "attempted to use source artifact {repr} as the output of an action. Source \ - artifacts may not be outputs." - )] - SourceArtifactAsOutput { repr: String }, - #[error( - "attempted to use artifact {artifact_repr} as the output of an action, but \ - it was already used by another action in {existing_owner}" - )] - BoundArtifactAsOutput { - artifact_repr: String, - existing_owner: BaseDeferredKey, - }, - #[error( - "attempted to use promise artifact {artifact_repr} as the output of an action, but \ - only declared artifacts can be used as an output" - )] - PromiseArtifactAsOutput { artifact_repr: String }, -} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/output_artifact_like.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/output_artifact_like.rs index 496de5f708277..977216da8e01a 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/output_artifact_like.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/output_artifact_like.rs @@ -11,9 +11,9 @@ use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::UnpackValue; use starlark::values::ValueTyped; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkOutputArtifact; -use crate::interpreter::rule_defs::artifact::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; #[derive(StarlarkTypeRepr, UnpackValue)] pub enum OutputArtifactArg<'v> { diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact.rs index 0fa88618620c9..f9f313e50a6b0 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact.rs @@ -13,6 +13,7 @@ use allocative::Allocative; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::artifact_type::BaseArtifactKind; use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersName; use buck2_execute::path::artifact_path::ArtifactPath; @@ -23,28 +24,26 @@ use serde::Serializer; use starlark::any::ProvidesStaticType; use starlark::collections::StarlarkHasher; use starlark::environment::Methods; -use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::typing::Ty; -use starlark::values::list::ListOf; +use starlark::values::list::UnpackList; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Demand; use starlark::values::Heap; use starlark::values::StarlarkValue; use starlark::values::StringValue; -use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueLike; -use thiserror::Error; use crate::artifact_groups::ArtifactGroup; use crate::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use crate::interpreter::rule_defs::artifact::methods::artifact_methods; +use crate::interpreter::rule_defs::artifact::methods::EitherStarlarkArtifact; use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ArtifactFingerprint; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; use crate::interpreter::rule_defs::artifact::ArtifactError; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkOutputArtifact; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; @@ -53,15 +52,7 @@ use crate::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; /// A wrapper for an `Artifact` that is guaranteed to be bound, such as outputs /// from dependencies, or source files. -#[derive( - Debug, - Dupe, - Clone, - PartialEq, - ProvidesStaticType, - Allocative, - StarlarkDocs -)] +#[derive(Debug, Dupe, Clone, PartialEq, ProvidesStaticType, Allocative)] pub struct StarlarkArtifact { pub(crate) artifact: Artifact, // A set of ArtifactGroups that should be materialized along with the main artifact @@ -70,29 +61,6 @@ pub struct StarlarkArtifact { starlark_simple_value!(StarlarkArtifact); -impl<'v> UnpackValue<'v> for StarlarkArtifact { - fn expected() -> String { - format!( - "either {} or {}", - StarlarkArtifact::get_type_value_static().as_str(), - StarlarkDeclaredArtifact::get_type_value_static().as_str() - ) - } - - fn unpack_value(value: Value<'v>) -> Option { - if let Some(x) = value.downcast_ref::() { - Some(x.dupe()) - } else if let Some(x) = value.downcast_ref::() { - x.get_bound_artifact().ok().map(|a| StarlarkArtifact { - artifact: a, - associated_artifacts: x.associated_artifacts.dupe(), - }) - } else { - None - } - } -} - impl StarlarkArtifact { pub fn new(artifact: Artifact) -> Self { StarlarkArtifact { @@ -188,9 +156,83 @@ impl StarlarkArtifactLike for StarlarkArtifact { fn get_artifact_group(&self) -> anyhow::Result { Ok(ArtifactGroup::Artifact(self.get_bound_artifact()?)) } + + fn basename<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + StarlarkArtifactHelpers::basename(&self.artifact, heap) + } + + fn extension<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + StarlarkArtifactHelpers::extension(&self.artifact, heap) + } + + fn is_source<'v>(&'v self) -> anyhow::Result { + Ok(self.artifact.is_source()) + } + + fn owner<'v>(&'v self) -> anyhow::Result> { + StarlarkArtifactHelpers::owner(&self.artifact) + } + + fn short_path<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + StarlarkArtifactHelpers::short_path(&self.artifact, heap) + } + + fn as_output<'v>(&'v self, _this: Value<'v>) -> anyhow::Result> { + match self.artifact.as_parts().0 { + BaseArtifactKind::Source(_) => Err(ArtifactError::SourceArtifactAsOutput { + repr: self.to_string(), + } + .into()), + BaseArtifactKind::Build(b) => Err(ArtifactError::BoundArtifactAsOutput { + artifact_repr: self.to_string(), + existing_owner: b.get_path().owner().dupe(), + } + .into()), + } + } + + fn project<'v>( + &'v self, + path: &ForwardRelativePath, + hide_prefix: bool, + ) -> anyhow::Result { + Ok(EitherStarlarkArtifact::Artifact(StarlarkArtifact { + artifact: self.artifact.dupe().project(path, hide_prefix), + associated_artifacts: self.associated_artifacts.dupe(), + })) + } + + fn without_associated_artifacts<'v>(&'v self) -> anyhow::Result { + Ok(EitherStarlarkArtifact::Artifact(StarlarkArtifact { + artifact: self.artifact.dupe(), + associated_artifacts: AssociatedArtifacts::new(), + })) + } + + fn with_associated_artifacts<'v>( + &'v self, + artifacts: UnpackList>, + ) -> anyhow::Result { + let artifacts = artifacts + .items + .iter() + .map(|a| a.0.get_artifact_group()) + .collect::, _>>()?; + + let artifacts = AssociatedArtifacts::from(artifacts); + + Ok(EitherStarlarkArtifact::Artifact(StarlarkArtifact { + artifact: self.artifact.dupe(), + associated_artifacts: self.associated_artifacts.union(artifacts), + })) + } } impl CommandLineArgLike for StarlarkArtifact { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkArtifact::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -227,29 +269,17 @@ impl<'v> StarlarkValue<'v> for StarlarkArtifact { RES.methods(artifact_methods) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { StarlarkArtifactLike::equals(self, other) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { StarlarkArtifactLike::write_hash(self, hasher) } fn provide(&'v self, demand: &mut Demand<'_, 'v>) { demand.provide_value::<&dyn CommandLineArgLike>(self); } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } -} - -#[derive(Error, Debug)] -enum CannotProject { - #[error("Source artifacts cannot be projected")] - SourceArtifact, - #[error("This artifact was declared by another rule: `{0}`")] - DeclaredElsewhere(BaseDeferredKey), } pub(crate) struct StarlarkArtifactHelpers; @@ -313,112 +343,3 @@ impl StarlarkArtifactHelpers { } } } - -/// A single input or output file for an action. -/// -/// There is no `.parent` method on `artifact`, but in most cases -/// `cmd_args(my_artifact).parent()` can be used to similar effect. -#[starlark_module] -fn artifact_methods(builder: &mut MethodsBuilder) { - /// The base name of this artifact. e.g. for an artifact at `foo/bar`, this is `bar` - #[starlark(attribute)] - fn basename<'v>(this: &'v StarlarkArtifact, heap: &Heap) -> anyhow::Result> { - StarlarkArtifactHelpers::basename(&this.artifact, heap) - } - - /// The file extension of this artifact. e.g. for an artifact at foo/bar.sh, - /// this is `.sh`. If no extension is present, `""` is returned. - #[starlark(attribute)] - fn extension<'v>(this: &StarlarkArtifact, heap: &Heap) -> anyhow::Result> { - StarlarkArtifactHelpers::extension(&this.artifact, heap) - } - - /// Whether the artifact represents a source file - #[starlark(attribute)] - fn is_source(this: &StarlarkArtifact) -> anyhow::Result { - Ok(this.artifact.is_source()) - } - - /// The `Label` of the rule that originally created this artifact. May also be None in - /// the case of source files, or if the artifact has not be used in an action, or if the - /// action was not created by a rule. - #[starlark(attribute)] - fn owner<'v>( - this: &StarlarkArtifact, - ) -> anyhow::Result> { - StarlarkArtifactHelpers::owner(&this.artifact) - } - - /// The interesting part of the path, relative to somewhere in the output directory. - /// For an artifact declared as `foo/bar`, this is `foo/bar`. - #[starlark(attribute)] - fn short_path<'v>(this: &'v StarlarkArtifact, heap: &Heap) -> anyhow::Result> { - StarlarkArtifactHelpers::short_path(&this.artifact, heap) - } - - /// Returns a `StarlarkOutputArtifact` instance, or fails if the artifact is - /// either an `Artifact`, or is a bound `Artifact` (You cannot bind twice) - fn as_output<'v>(this: &'v StarlarkArtifact) -> anyhow::Result> { - match this.artifact.as_parts().0 { - BaseArtifactKind::Source(_) => Err(ArtifactError::SourceArtifactAsOutput { - repr: this.to_string(), - } - .into()), - BaseArtifactKind::Build(b) => Err(ArtifactError::BoundArtifactAsOutput { - artifact_repr: this.to_string(), - existing_owner: b.get_path().owner().dupe(), - } - .into()), - } - } - - /// Create an artifact that lives at path relative from this artifact. - /// For example, if artifact foo is a directory containing a file bar, then foo.project("bar") yields the file bar. - /// It is possible for projected artifacts to hide the prefix in order to have the short name of the resulting artifact only contain the projected path, by passing hide_prefix = True to project(). - fn project<'v>( - this: &'v StarlarkArtifact, - #[starlark(require = pos)] path: &str, - #[starlark(require = named, default = false)] hide_prefix: bool, - ) -> anyhow::Result { - let _ignored = hide_prefix; - - let err = anyhow::Error::from(match this.artifact.owner() { - Some(owner) => CannotProject::DeclaredElsewhere(owner.dupe()), - None => CannotProject::SourceArtifact, - }); - - Err(err.context(format!( - "Cannot project path `{}` in artifact `{}`", - path, this - ))) - } - - /// Returns a `StarlarkArtifact` instance which is identical to the original artifact, except - /// with no associated artifacts - fn without_associated_artifacts(this: &StarlarkArtifact) -> anyhow::Result { - Ok(StarlarkArtifact { - artifact: this.artifact.dupe(), - associated_artifacts: AssociatedArtifacts::new(), - }) - } - - /// Returns a `StarlarkArtifact` instance which is identical to the original artifact, but with - /// potentially additional artifacts. The artifacts must be bound. - fn with_associated_artifacts<'v>( - this: &'v StarlarkArtifact, - artifacts: ListOf<'v, ValueAsArtifactLike<'v>>, - ) -> anyhow::Result { - let artifacts = artifacts - .to_vec() - .iter() - .map(|a| a.0.get_artifact_group()) - .collect::, _>>()?; - - let artifacts = AssociatedArtifacts::from(artifacts); - - Ok(StarlarkArtifact { - artifact: this.artifact.dupe(), - associated_artifacts: this.associated_artifacts.union(artifacts), - }) - } -} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_like.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_like.rs index 3f3bc2695d45f..7242831628d80 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_like.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_like.rs @@ -7,24 +7,32 @@ * of this source tree. */ +use std::convert::Infallible; use std::fmt::Display; use std::hash::Hash; use std::hash::Hasher; use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_execute::path::artifact_path::ArtifactPath; +use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use starlark::collections::StarlarkHasher; use starlark::typing::Ty; +use starlark::values::list::UnpackList; use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::Heap; +use starlark::values::StringValue; use starlark::values::UnpackValue; use starlark::values::Value; use crate::artifact_groups::promise::PromiseArtifactId; use crate::artifact_groups::ArtifactGroup; use crate::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkPromiseArtifact; +use crate::interpreter::rule_defs::artifact::methods::EitherStarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; +use crate::interpreter::rule_defs::artifact::starlark_promise_artifact::StarlarkPromiseArtifact; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; /// The Starlark representation of an `Artifact` @@ -63,12 +71,12 @@ pub trait StarlarkArtifactLike: Display { /// as the inputs to Hash/Eq to ensure they are consistent fn fingerprint(&self) -> ArtifactFingerprint<'_>; - fn equals<'v>(&self, other: Value<'v>) -> anyhow::Result { - Ok(ValueAsArtifactLike::unpack_value(other) + fn equals<'v>(&self, other: Value<'v>) -> starlark::Result { + Ok(ValueAsArtifactLike::unpack_value(other)? .map_or(false, |other| self.fingerprint() == other.0.fingerprint())) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.fingerprint().hash(hasher); Ok(()) } @@ -86,11 +94,36 @@ pub trait StarlarkArtifactLike: Display { /// Gets the artifact group. fn get_artifact_group(&self) -> anyhow::Result; + + fn basename<'v>(&'v self, heap: &'v Heap) -> anyhow::Result>; + + fn extension<'v>(&'v self, heap: &'v Heap) -> anyhow::Result>; + + fn is_source<'v>(&'v self) -> anyhow::Result; + + fn owner<'v>(&'v self) -> anyhow::Result>; + + fn short_path<'v>(&'v self, heap: &'v Heap) -> anyhow::Result>; + + fn as_output<'v>(&'v self, this: Value<'v>) -> anyhow::Result>; + + fn project<'v>( + &'v self, + path: &ForwardRelativePath, + hide_prefix: bool, + ) -> anyhow::Result; + + fn without_associated_artifacts<'v>(&'v self) -> anyhow::Result; + + fn with_associated_artifacts<'v>( + &'v self, + artifacts: UnpackList>, + ) -> anyhow::Result; } /// Helper type to unpack artifacts. #[derive(StarlarkTypeRepr, UnpackValue)] -enum ValueAsArtifactLikeUnpack<'v> { +pub enum ValueAsArtifactLikeUnpack<'v> { Artifact(&'v StarlarkArtifact), DeclaredArtifact(&'v StarlarkDeclaredArtifact), PromiseArtifact(&'v StarlarkPromiseArtifact), @@ -99,23 +132,28 @@ enum ValueAsArtifactLikeUnpack<'v> { pub struct ValueAsArtifactLike<'v>(pub &'v dyn StarlarkArtifactLike); impl<'v> StarlarkTypeRepr for ValueAsArtifactLike<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { ValueAsArtifactLikeUnpack::starlark_type_repr() } } impl<'v> UnpackValue<'v> for ValueAsArtifactLike<'v> { - fn unpack_value(value: Value<'v>) -> Option { - match ValueAsArtifactLikeUnpack::unpack_value(value)? { - ValueAsArtifactLikeUnpack::Artifact(a) => { - Some(ValueAsArtifactLike(a as &dyn StarlarkArtifactLike)) + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + match ValueAsArtifactLikeUnpack::unpack_value_opt(value) { + Some(ValueAsArtifactLikeUnpack::Artifact(a)) => { + Ok(Some(ValueAsArtifactLike(a as &dyn StarlarkArtifactLike))) } - ValueAsArtifactLikeUnpack::DeclaredArtifact(a) => { - Some(ValueAsArtifactLike(a as &dyn StarlarkArtifactLike)) + Some(ValueAsArtifactLikeUnpack::DeclaredArtifact(a)) => { + Ok(Some(ValueAsArtifactLike(a as &dyn StarlarkArtifactLike))) } - ValueAsArtifactLikeUnpack::PromiseArtifact(a) => { - Some(ValueAsArtifactLike(a as &dyn StarlarkArtifactLike)) + Some(ValueAsArtifactLikeUnpack::PromiseArtifact(a)) => { + Ok(Some(ValueAsArtifactLike(a as &dyn StarlarkArtifactLike))) } + None => Ok(None), } } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_value.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_value.rs index 619ad233d3422..8abcb8754d6a9 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_value.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_artifact_value.rs @@ -32,7 +32,6 @@ use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Value; -use thiserror::Error; /// The Starlark representation of an `Artifact` on disk which can be accessed. #[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] @@ -66,13 +65,13 @@ impl<'v> StarlarkValue<'v> for StarlarkArtifactValue { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum JsonError { #[error("JSON number is outside the bounds that Starlark supports, `{0}`")] NumberOutOfBounds(String), } -fn json_convert<'v>(v: serde_json::Value, heap: &'v Heap) -> anyhow::Result> { +fn json_convert<'v>(v: serde_json::Value, heap: &'v Heap) -> starlark::Result> { match v { serde_json::Value::Null => Ok(Value::new_none()), serde_json::Value::Bool(x) => Ok(Value::new_bool(x)), @@ -82,7 +81,9 @@ fn json_convert<'v>(v: serde_json::Value, heap: &'v Heap) -> anyhow::Result Ok(heap.alloc(x)), @@ -101,10 +102,10 @@ fn json_convert<'v>(v: serde_json::Value, heap: &'v Heap) -> anyhow::Result anyhow::Result { let path = this.fs.resolve(&this.path); - fs_util::read_to_string(path) + fs_util::read_to_string(path).map_err(Into::into) } - fn read_json<'v>(this: &StarlarkArtifactValue, heap: &'v Heap) -> anyhow::Result> { + fn read_json<'v>(this: &StarlarkArtifactValue, heap: &'v Heap) -> starlark::Result> { let path = this.fs.resolve(&this.path); let file = File::open(&path).with_context(|| format!("Error opening file `{}`", path))?; let reader = BufReader::new(file); @@ -121,7 +122,6 @@ pub(crate) fn register_artifact_value(globals: &mut GlobalsBuilder) { #[cfg(test)] mod tests { - use starlark::values::Heap; use super::*; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_declared_artifact.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_declared_artifact.rs index 724525505457c..00633e91f6130 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_declared_artifact.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_declared_artifact.rs @@ -19,6 +19,7 @@ use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersName; +use buck2_error::BuckErrorContext; use buck2_execute::path::artifact_path::ArtifactPath; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use dupe::Dupe; @@ -26,11 +27,10 @@ use starlark::any::ProvidesStaticType; use starlark::codemap::FileSpan; use starlark::collections::StarlarkHasher; use starlark::environment::Methods; -use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::typing::Ty; -use starlark::values::list::ListOf; +use starlark::values::list::UnpackList; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::AllocValue; use starlark::values::Demand; use starlark::values::Freeze; @@ -45,13 +45,16 @@ use starlark::values::ValueTyped; use crate::artifact_groups::ArtifactGroup; use crate::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use crate::interpreter::rule_defs::artifact::methods::artifact_methods; +use crate::interpreter::rule_defs::artifact::methods::EitherStarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifactHelpers; use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ArtifactFingerprint; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; use crate::interpreter::rule_defs::artifact::ArtifactError; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; -use crate::interpreter::rule_defs::artifact::StarlarkOutputArtifact; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; @@ -154,9 +157,103 @@ impl StarlarkArtifactLike for StarlarkDeclaredArtifact { fn get_artifact_group(&self) -> anyhow::Result { Ok(ArtifactGroup::Artifact(self.get_bound_artifact()?)) } + + fn basename<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + self.artifact + .get_path() + .with_filename(|filename| Ok(heap.alloc_str(filename?.as_str()))) + } + + fn extension<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + self.artifact.get_path().with_filename(|filename| { + Ok(StarlarkArtifactHelpers::alloc_extension( + filename?.extension(), + heap, + )) + }) + } + + fn is_source<'v>(&'v self) -> anyhow::Result { + Ok(false) + } + + fn owner<'v>(&'v self) -> anyhow::Result> { + match self.artifact.owner() { + None => Ok(None), + Some(x) => Ok(match x { + BaseDeferredKey::TargetLabel(t) => Some(StarlarkConfiguredProvidersLabel::new( + ConfiguredProvidersLabel::new(t, ProvidersName::Default), + )), + BaseDeferredKey::AnonTarget(_) | BaseDeferredKey::BxlLabel(_) => None, + }), + } + } + + fn short_path<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + self.artifact + .get_path() + .with_short_path(|short_path| Ok(heap.alloc_str(short_path.as_str()))) + } + + fn as_output<'v>(&'v self, this: Value<'v>) -> anyhow::Result> { + Ok(StarlarkOutputArtifact::new( + ValueTyped::::new_err(this) + .internal_error_anyhow("Type must have been checked earlier")?, + )) + } + + fn project<'v>( + &'v self, + path: &ForwardRelativePath, + hide_prefix: bool, + ) -> anyhow::Result { + // Not sure if this.declaration_location is or the project() call is more appropriate here. + Ok(EitherStarlarkArtifact::DeclaredArtifact( + StarlarkDeclaredArtifact { + declaration_location: self.declaration_location.dupe(), + artifact: self.artifact.project(path, hide_prefix), + associated_artifacts: self.associated_artifacts.dupe(), + }, + )) + } + + fn without_associated_artifacts<'v>(&'v self) -> anyhow::Result { + Ok(EitherStarlarkArtifact::DeclaredArtifact( + StarlarkDeclaredArtifact { + declaration_location: self.declaration_location.dupe(), + artifact: self.artifact.dupe(), + associated_artifacts: AssociatedArtifacts::new(), + }, + )) + } + + fn with_associated_artifacts<'v>( + &'v self, + artifacts: UnpackList>, + ) -> anyhow::Result { + let artifacts = artifacts + .items + .iter() + .map(|a| a.0.get_artifact_group()) + .collect::, _>>()?; + + let artifacts = AssociatedArtifacts::from(artifacts); + + Ok(EitherStarlarkArtifact::DeclaredArtifact( + StarlarkDeclaredArtifact { + declaration_location: self.declaration_location.dupe(), + artifact: self.artifact.dupe(), + associated_artifacts: self.associated_artifacts.union(artifacts), + }, + )) + } } impl CommandLineArgLike for StarlarkDeclaredArtifact { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkDeclaredArtifact::starlark_type_repr()); + } + fn add_to_command_line( &self, _cli: &mut dyn CommandLineBuilder, @@ -222,146 +319,18 @@ impl<'v> StarlarkValue<'v> for StarlarkDeclaredArtifact { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(declared_artifact_methods) + RES.methods(artifact_methods) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { StarlarkArtifactLike::equals(self, other) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { StarlarkArtifactLike::write_hash(self, hasher) } fn provide(&'v self, demand: &mut Demand<'_, 'v>) { demand.provide_value::<&dyn CommandLineArgLike>(self); } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } -} - -/// A single input or output for an action -#[starlark_module] -fn declared_artifact_methods(builder: &mut MethodsBuilder) { - /// The base name of this artifact. e.g. for an artifact at `foo/bar`, this is `bar` - #[starlark(attribute)] - fn basename<'v>( - this: &StarlarkDeclaredArtifact, - heap: &Heap, - ) -> anyhow::Result> { - this.artifact - .get_path() - .with_filename(|filename| Ok(heap.alloc_str(filename?.as_str()))) - } - - /// The file extension of this artifact. e.g. for an artifact at foo/bar.sh, - /// this is `.sh`. If no extension is present, `""` is returned. - #[starlark(attribute)] - fn extension<'v>( - this: &StarlarkDeclaredArtifact, - heap: &Heap, - ) -> anyhow::Result> { - this.artifact.get_path().with_filename(|filename| { - Ok(StarlarkArtifactHelpers::alloc_extension( - filename?.extension(), - heap, - )) - }) - } - - /// Whether the artifact represents a source file - #[starlark(attribute)] - fn is_source(this: &StarlarkDeclaredArtifact) -> anyhow::Result { - Ok(false) - } - - /// The `Label` of the rule that originally created this artifact. May also be None in - /// the case of source files, or if the artifact has not be used in an action, or if the - /// action was not created by a rule. - #[starlark(attribute)] - fn owner<'v>( - this: &StarlarkDeclaredArtifact, - ) -> anyhow::Result> { - match this.artifact.owner() { - None => Ok(None), - Some(x) => Ok(match x { - BaseDeferredKey::TargetLabel(t) => Some(StarlarkConfiguredProvidersLabel::new( - ConfiguredProvidersLabel::new(t, ProvidersName::Default), - )), - BaseDeferredKey::AnonTarget(_) | BaseDeferredKey::BxlLabel(_) => None, - }), - } - } - - /// Returns a `StarlarkOutputArtifact` instance, or fails if the artifact is - /// either an `Artifact`, or is a bound `DeclaredArtifact` (You cannot bind twice) - fn as_output<'v>( - this: ValueTyped<'v, StarlarkDeclaredArtifact>, - ) -> anyhow::Result> { - Ok(StarlarkOutputArtifact::new(this)) - } - - /// The interesting part of the path, relative to somewhere in the output directory. - /// For an artifact declared as `foo/bar`, this is `foo/bar`. - #[starlark(attribute)] - fn short_path<'v>( - this: &StarlarkDeclaredArtifact, - heap: &Heap, - ) -> anyhow::Result> { - this.artifact - .get_path() - .with_short_path(|short_path| Ok(heap.alloc_str(short_path.as_str()))) - } - - /// Create an artifact that lives at path relative from this artifact - /// For example, if artifact foo is a directory containing a file bar, then foo.project("bar") yields the file bar. - /// It is possible for projected artifacts to hide the prefix in order to have the short name of the resulting artifact only contain the projected path, by passing hide_prefix = True to project(). - fn project<'v>( - this: &'v StarlarkDeclaredArtifact, - path: &str, - #[starlark(require = named, default = false)] hide_prefix: bool, - ) -> anyhow::Result { - let path = ForwardRelativePath::new(path)?; - // Not sure if this.declaration_location is or the project() call is more appropriate here. - Ok(StarlarkDeclaredArtifact { - declaration_location: this.declaration_location.dupe(), - artifact: this.artifact.project(path, hide_prefix), - associated_artifacts: this.associated_artifacts.dupe(), - }) - } - - /// Returns a `StarlarkDeclaredArtifact` instance which is identical to the original artifact, - /// except with no associated artifacts - fn without_associated_artifacts( - this: &StarlarkDeclaredArtifact, - ) -> anyhow::Result { - Ok(StarlarkDeclaredArtifact { - declaration_location: this.declaration_location.dupe(), - artifact: this.artifact.dupe(), - associated_artifacts: AssociatedArtifacts::new(), - }) - } - - /// Returns a `StarlarkArtifact` instance which is identical to the original artifact, but with - /// potentially additional artifacts. The artifacts must be bound. - fn with_associated_artifacts<'v>( - this: &'v StarlarkDeclaredArtifact, - artifacts: ListOf<'v, ValueAsArtifactLike<'v>>, - ) -> anyhow::Result { - let artifacts = artifacts - .to_vec() - .iter() - .map(|a| a.0.get_artifact_group()) - .collect::, _>>()?; - - let artifacts = AssociatedArtifacts::from(artifacts); - - Ok(StarlarkDeclaredArtifact { - declaration_location: this.declaration_location.dupe(), - artifact: this.artifact.dupe(), - associated_artifacts: this.associated_artifacts.union(artifacts), - }) - } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_output_artifact.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_output_artifact.rs index 5667bc3fbeece..70127be6d769f 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_output_artifact.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_output_artifact.rs @@ -13,11 +13,10 @@ use std::fmt::Display; use allocative::Allocative; use buck2_artifact::artifact::artifact_type::OutputArtifact; +use buck2_error::BuckErrorContext; use dupe::Dupe; -use either::Either; use starlark::any::ProvidesStaticType; use starlark::environment::GlobalsBuilder; -use starlark::typing::Ty; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::type_repr::StarlarkTypeRepr; @@ -28,13 +27,14 @@ use starlark::values::FrozenValueTyped; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; -use starlark::values::UnpackValue; -use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; +use starlark::values::ValueOfUncheckedGeneric; use starlark::values::ValueTyped; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; @@ -57,51 +57,26 @@ use crate::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; Coerce )] #[repr(C)] -pub struct StarlarkOutputArtifactGen { - pub(super) declared_artifact: V, +pub struct StarlarkOutputArtifactGen { + pub(super) declared_artifact: ValueOfUncheckedGeneric, } starlark_complex_value!(pub StarlarkOutputArtifact); impl<'v> Display for StarlarkOutputArtifact<'v> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "", - self.inner().get_artifact_path() - ) + match self.inner() { + Ok(inner) => write!(f, "", inner.get_artifact_path()), + Err(_) => write!(f, ""), + } } } impl Display for FrozenStarlarkOutputArtifact { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "", - self.inner().get_artifact_path() - ) - } -} - -/// A wrapper for `UnpackValue` that accepts either an output artifact, -/// or an artifact declared in the same rule. -pub struct StarlarkOutputOrDeclaredArtifact<'v>(pub StarlarkOutputArtifact<'v>); - -impl<'v> StarlarkTypeRepr for StarlarkOutputOrDeclaredArtifact<'v> { - fn starlark_type_repr() -> Ty { - Either::::starlark_type_repr() - } -} - -impl<'v> UnpackValue<'v> for StarlarkOutputOrDeclaredArtifact<'v> { - fn unpack_value(value: Value<'v>) -> Option { - #[allow(clippy::manual_map)] - if let Some(x) = value.downcast_ref::() { - Some(Self(x.dupe())) - } else if let Some(x) = ValueTyped::::new(value) { - Some(Self(StarlarkOutputArtifact::new(x))) - } else { - None + match self.inner() { + Ok(inner) => write!(f, "", inner.get_artifact_path()), + Err(_) => write!(f, ""), } } } @@ -109,30 +84,52 @@ impl<'v> UnpackValue<'v> for StarlarkOutputOrDeclaredArtifact<'v> { impl<'v> StarlarkOutputArtifact<'v> { pub fn new(v: ValueTyped<'v, StarlarkDeclaredArtifact>) -> Self { Self { - declared_artifact: v.to_value(), + declared_artifact: v.to_value_of_unchecked(), } } - pub(crate) fn inner(&self) -> ValueTyped<'v, StarlarkDeclaredArtifact> { - ValueTyped::new(self.declared_artifact).unwrap() + pub(crate) fn inner(&self) -> anyhow::Result> { + ValueTyped::new_err(self.declared_artifact.get()).with_internal_error_anyhow(|| { + format!( + "Must be a declared artifact: `{}`", + self.declared_artifact.get().to_string_for_type_error() + ) + }) } - pub fn artifact(&self) -> OutputArtifact { - self.inner().output_artifact() + pub fn artifact(&self) -> anyhow::Result { + Ok(self.inner()?.output_artifact()) } } impl FrozenStarlarkOutputArtifact { - pub(crate) fn inner(&self) -> FrozenValueTyped { - FrozenValueTyped::new(self.declared_artifact).unwrap() - } - - pub fn artifact(&self) -> OutputArtifact { - self.inner().artifact().as_output_artifact().unwrap() + pub(crate) fn inner(&self) -> anyhow::Result> { + FrozenValueTyped::new_err(self.declared_artifact.get()).with_internal_error_anyhow(|| { + format!( + "Must be a declared artifact: `{}`", + self.declared_artifact + .get() + .to_value() + .to_string_for_type_error() + ) + }) + } + + pub fn artifact(&self) -> anyhow::Result { + let artifact = self.inner()?.artifact(); + artifact + .as_output_artifact() + .with_internal_error_anyhow(|| { + format!("Expecting artifact to be output artifact, got {artifact}") + }) } } impl<'v> CommandLineArgLike for StarlarkOutputArtifact<'v> { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkOutputArtifact::starlark_type_repr()); + } + fn add_to_command_line( &self, _cli: &mut dyn CommandLineBuilder, @@ -145,7 +142,7 @@ impl<'v> CommandLineArgLike for StarlarkOutputArtifact<'v> { } fn visit_artifacts(&self, visitor: &mut dyn CommandLineArtifactVisitor) -> anyhow::Result<()> { - visitor.visit_output(self.artifact(), None); + visitor.visit_output(self.artifact()?, None); Ok(()) } @@ -162,7 +159,7 @@ impl<'v> CommandLineArgLike for StarlarkOutputArtifact<'v> { } #[starlark_value(type = "output_artifact")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for StarlarkOutputArtifactGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StarlarkOutputArtifactGen where Self: ProvidesStaticType<'v> + Display + CommandLineArgLike, { @@ -172,20 +169,24 @@ where } impl CommandLineArgLike for FrozenStarlarkOutputArtifact { + fn register_me(&self) { + command_line_arg_like_impl!(FrozenStarlarkOutputArtifact::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, ctx: &mut dyn CommandLineContext, ) -> anyhow::Result<()> { cli.push_arg( - ctx.resolve_artifact(&self.inner().artifact())? + ctx.resolve_artifact(&self.inner()?.artifact())? .into_string(), ); Ok(()) } fn visit_artifacts(&self, visitor: &mut dyn CommandLineArtifactVisitor) -> anyhow::Result<()> { - visitor.visit_output(self.artifact(), None); + visitor.visit_output(self.artifact()?, None); Ok(()) } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_promise_artifact.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_promise_artifact.rs index 8f2d91dbeba0a..3b5f5aeded687 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_promise_artifact.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/starlark_promise_artifact.rs @@ -10,6 +10,7 @@ use std::fmt; use std::fmt::Debug; use std::fmt::Display; +use std::sync::Arc; use allocative::Allocative; use anyhow::Context as _; @@ -23,11 +24,10 @@ use starlark::any::ProvidesStaticType; use starlark::codemap::FileSpan; use starlark::collections::StarlarkHasher; use starlark::environment::Methods; -use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::typing::Ty; -use starlark::values::list::ListOf; +use starlark::values::list::UnpackList; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Demand; use starlark::values::Heap; use starlark::values::NoSerialize; @@ -35,26 +35,27 @@ use starlark::values::StarlarkValue; use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::Value; -use thiserror::Error; use crate::artifact_groups::promise::PromiseArtifact; use crate::artifact_groups::ArtifactGroup; use crate::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; +use crate::interpreter::rule_defs::artifact::methods::artifact_methods; +use crate::interpreter::rule_defs::artifact::methods::EitherStarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifactHelpers; use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ArtifactFingerprint; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; use crate::interpreter::rule_defs::artifact::ArtifactError; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkOutputArtifact; -use crate::interpreter::rule_defs::artifact::ValueAsArtifactLike; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; use crate::interpreter::rule_defs::cmd_args::CommandLineContext; use crate::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum PromiseArtifactError { #[error("cannot access {1} on unresolved promise artifact ({0})")] MethodUnsupported(StarlarkPromiseArtifact, &'static str), @@ -92,7 +93,7 @@ enum PromiseArtifactError { pub struct StarlarkPromiseArtifact { pub declaration_location: Option, pub artifact: PromiseArtifact, - short_path: Option, + pub short_path: Option, } starlark_simple_value!(StarlarkPromiseArtifact); @@ -130,7 +131,7 @@ impl StarlarkPromiseArtifact { pub fn as_artifact(&self) -> ArtifactGroup { match self.artifact.get() { Some(artifact) => ArtifactGroup::Artifact(artifact.dupe()), - None => ArtifactGroup::Promise(self.artifact.dupe()), + None => ArtifactGroup::Promise(Arc::new(self.artifact.dupe())), } } @@ -185,9 +186,73 @@ impl StarlarkArtifactLike for StarlarkPromiseArtifact { fn get_artifact_group(&self) -> anyhow::Result { Ok(self.as_artifact()) } + + fn basename<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + match self.artifact.get() { + Some(v) => StarlarkArtifactHelpers::basename(v, heap), + None => Ok(heap.alloc_str(self.file_name_err()?.as_str())), + } + } + + fn extension<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + match self.artifact.get() { + Some(v) => StarlarkArtifactHelpers::extension(v, heap), + None => Ok(StarlarkArtifactHelpers::alloc_extension( + self.file_name_err()?.extension(), + heap, + )), + } + } + + fn is_source<'v>(&'v self) -> anyhow::Result { + Ok(false) + } + + fn owner<'v>(&'v self) -> anyhow::Result> { + match self.artifact.get() { + Some(v) => StarlarkArtifactHelpers::owner(v), + None => Err(PromiseArtifactError::MethodUnsupported(self.clone(), "owner").into()), + } + } + + fn short_path<'v>(&'v self, heap: &'v Heap) -> anyhow::Result> { + match self.artifact.get() { + Some(v) => StarlarkArtifactHelpers::short_path(v, heap), + None => Ok(heap.alloc_str(self.short_path_err()?.as_str())), + } + } + + fn as_output<'v>(&'v self, _this: Value<'v>) -> anyhow::Result> { + Err(self.as_output_error()) + } + + fn project<'v>( + &'v self, + path: &ForwardRelativePath, + hide_prefix: bool, + ) -> anyhow::Result { + let _ = (path, hide_prefix); + Err(PromiseArtifactError::CannotProject(self.clone()).into()) + } + + fn without_associated_artifacts<'v>(&'v self) -> anyhow::Result { + Ok(EitherStarlarkArtifact::PromiseArtifact(self.clone())) + } + + fn with_associated_artifacts<'v>( + &'v self, + artifacts: UnpackList>, + ) -> anyhow::Result { + let _unused = artifacts; + Err(PromiseArtifactError::CannotAddAssociatedArtifacts.into()) + } } impl CommandLineArgLike for StarlarkPromiseArtifact { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkPromiseArtifact::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -225,128 +290,18 @@ impl<'v> StarlarkValue<'v> for StarlarkPromiseArtifact { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(promise_artifact_methods) + RES.methods(artifact_methods) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { StarlarkArtifactLike::equals(self, other) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { StarlarkArtifactLike::write_hash(self, hasher) } fn provide(&'v self, demand: &mut Demand<'_, 'v>) { demand.provide_value::<&dyn CommandLineArgLike>(self); } - - fn matches_type(&self, ty: &str) -> bool { - Self::TYPE == ty || ty == "artifact" - } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } -} - -/// A single input or output for an action -#[starlark_module] -fn promise_artifact_methods(builder: &mut MethodsBuilder) { - /// The base name of this artifact. e.g. for an artifact at `foo/bar`, this is `bar` - #[starlark(attribute)] - fn basename<'v>( - this: &StarlarkPromiseArtifact, - heap: &Heap, - ) -> anyhow::Result> { - match this.artifact.get() { - Some(v) => StarlarkArtifactHelpers::basename(v, heap), - None => Ok(heap.alloc_str(this.file_name_err()?.as_str())), - } - } - - /// The file extension of this artifact. e.g. for an artifact at foo/bar.sh, - /// this is `.sh`. If no extension is present, `""` is returned. - #[starlark(attribute)] - fn extension<'v>( - this: &StarlarkPromiseArtifact, - heap: &Heap, - ) -> anyhow::Result> { - match this.artifact.get() { - Some(v) => StarlarkArtifactHelpers::extension(v, heap), - None => Ok(StarlarkArtifactHelpers::alloc_extension( - this.file_name_err()?.extension(), - heap, - )), - } - } - - /// Whether the artifact represents a source file - #[starlark(attribute)] - fn is_source(this: &StarlarkPromiseArtifact) -> anyhow::Result { - Ok(false) - } - - /// The `Label` of the rule that originally created this artifact. May also be None in - /// the case of source files, or if the artifact has not be used in an action, or if the - /// action was not created by a rule. - #[starlark(attribute)] - fn owner<'v>( - this: &StarlarkPromiseArtifact, - ) -> anyhow::Result> { - match this.artifact.get() { - Some(v) => StarlarkArtifactHelpers::owner(v), - None => Err(PromiseArtifactError::MethodUnsupported(this.clone(), "owner").into()), - } - } - - /// Returns a `StarlarkOutputArtifact` instance, or fails if the artifact is - /// either an `Artifact`, or is a bound `DeclaredArtifact` (You cannot bind twice) - fn as_output<'v>( - this: &'v StarlarkPromiseArtifact, - ) -> anyhow::Result> { - Err(this.as_output_error()) - } - - /// The interesting part of the path, relative to somewhere in the output directory. - /// For an artifact declared as `foo/bar`, this is `foo/bar`. - #[starlark(attribute)] - fn short_path<'v>( - this: &StarlarkPromiseArtifact, - heap: &Heap, - ) -> anyhow::Result> { - match this.artifact.get() { - Some(v) => StarlarkArtifactHelpers::short_path(v, heap), - None => Ok(heap.alloc_str(this.short_path_err()?.as_str())), - } - } - - /// Create an artifact that lives at path relative from this artifact - /// For example, if artifact foo is a directory containing a file bar, then foo.project("bar") yields the file bar. - /// It is possible for projected artifacts to hide the prefix in order to have the short name of the resulting artifact only contain the projected path, by passing hide_prefix = True to project(). - fn project<'v>( - this: &'v StarlarkPromiseArtifact, - path: &str, - #[starlark(require = named, default = false)] hide_prefix: bool, - ) -> anyhow::Result { - let _ = (path, hide_prefix); - Err(PromiseArtifactError::CannotProject(this.clone()).into()) - } - - /// Returns a `StarlarkPromiseArtifact` instance which is identical to the original artifact, - /// except with no associated artifacts - fn without_associated_artifacts( - this: &StarlarkPromiseArtifact, - ) -> anyhow::Result { - Ok(this.clone()) - } - - /// Returns a `StarlarkArtifact` instance which is identical to the original artifact, but with - /// potentially additional artifacts. The artifacts must be bound. - fn with_associated_artifacts<'v>( - this: &'v StarlarkDeclaredArtifact, - artifacts: ListOf<'v, ValueAsArtifactLike<'v>>, - ) -> anyhow::Result { - let _unused = (this, artifacts); - Err(PromiseArtifactError::CannotAddAssociatedArtifacts.into()) - } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact/unpack_artifact.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact/unpack_artifact.rs new file mode 100644 index 0000000000000..6b4256cc95081 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact/unpack_artifact.rs @@ -0,0 +1,32 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_artifact::artifact::artifact_type::Artifact; +use dupe::Dupe; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::UnpackValue; + +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub enum UnpackArtifactOrDeclaredArtifact<'v> { + Artifact(&'v StarlarkArtifact), + DeclaredArtifact(&'v StarlarkDeclaredArtifact), +} + +impl<'v> UnpackArtifactOrDeclaredArtifact<'v> { + pub fn artifact(&self) -> anyhow::Result { + match self { + UnpackArtifactOrDeclaredArtifact::Artifact(x) => Ok(x.artifact.dupe()), + UnpackArtifactOrDeclaredArtifact::DeclaredArtifact(x) => x.get_bound_artifact(), + } + } +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging.rs similarity index 100% rename from app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/mod.rs rename to app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging.rs diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/artifact_tag.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/artifact_tag.rs index c6d0e97b6da0c..eb90ef86ca207 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/artifact_tag.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/artifact_tag.rs @@ -21,15 +21,16 @@ use starlark::environment::GlobalsBuilder; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::typing::Ty; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::Freeze; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; +use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; +use starlark::StarlarkResultExt; use crate::interpreter::rule_defs::artifact_tagging::TaggedCommandLine; use crate::interpreter::rule_defs::artifact_tagging::TaggedValue; @@ -51,7 +52,6 @@ use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; Allocative )] pub struct ArtifactTag { - #[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_arc_on_dupe))] #[freeze(identity)] identity: Arc<()>, } @@ -93,21 +93,17 @@ impl<'v> StarlarkValue<'v> for ArtifactTag { RES.methods(artifact_tag_methods) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { Ok(match other.downcast_ref::() { Some(other) => self == other, None => false, }) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { Hash::hash(self, hasher); Ok(()) } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } #[starlark_module] @@ -118,11 +114,16 @@ fn artifact_tag_methods(_: &mut MethodsBuilder) { ) -> anyhow::Result, TaggedCommandLine<'v>>> { let value = TaggedValue::new(inner, this.dupe()); - Ok(if inner.as_command_line().is_some() { - Either::Right(TaggedCommandLine::new(value)) - } else { - Either::Left(value) - }) + Ok( + if ValueAsCommandLineLike::unpack_value(inner) + .into_anyhow_result()? + .is_some() + { + Either::Right(TaggedCommandLine::new(value)) + } else { + Either::Left(value) + }, + ) } fn tag_inputs<'v>( @@ -131,11 +132,16 @@ fn artifact_tag_methods(_: &mut MethodsBuilder) { ) -> anyhow::Result, TaggedCommandLine<'v>>> { let value = TaggedValue::inputs_only(inner, this.dupe()); - Ok(if inner.as_command_line().is_some() { - Either::Right(TaggedCommandLine::new(value)) - } else { - Either::Left(value) - }) + Ok( + if ValueAsCommandLineLike::unpack_value(inner) + .into_anyhow_result()? + .is_some() + { + Either::Right(TaggedCommandLine::new(value)) + } else { + Either::Left(value) + }, + ) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_command_line.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_command_line.rs index 49904f9b0f502..d018329f96459 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_command_line.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_command_line.rs @@ -11,16 +11,19 @@ use allocative::Allocative; use derive_more::Display; use starlark::any::ProvidesStaticType; use starlark::coerce::Coerce; -use starlark::typing::Ty; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Demand; use starlark::values::Freeze; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; +use starlark::values::UnpackValue; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use super::TaggedValueGen; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; @@ -42,12 +45,12 @@ use crate::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; )] #[derive(NoSerialize)] // TODO make artifacts serializable #[repr(C)] -#[display(fmt = "TaggedCommandLine({})", inner)] -pub struct TaggedCommandLineGen { +#[display("TaggedCommandLine({})", inner)] +pub struct TaggedCommandLineGen { inner: TaggedValueGen, } -impl TaggedCommandLineGen { +impl TaggedCommandLineGen { pub fn new(inner: TaggedValueGen) -> Self { Self { inner } } @@ -56,58 +59,49 @@ impl TaggedCommandLineGen { starlark_complex_value!(pub TaggedCommandLine); #[starlark_value(type = "tagged_command_line")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TaggedCommandLineGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TaggedCommandLineGen where Self: ProvidesStaticType<'v>, { fn provide(&'v self, demand: &mut Demand<'_, 'v>) { demand.provide_value::<&dyn CommandLineArgLike>(self); } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } impl<'v, V: ValueLike<'v>> CommandLineArgLike for TaggedCommandLineGen { + fn register_me(&self) { + command_line_arg_like_impl!(TaggedCommandLine::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, context: &mut dyn CommandLineContext, ) -> anyhow::Result<()> { - self.inner - .value() - .to_value() - .as_command_line_err()? + ValueAsCommandLineLike::unpack_value_err(self.inner.value().to_value())? + .0 .add_to_command_line(cli, context) } fn visit_artifacts(&self, visitor: &mut dyn CommandLineArtifactVisitor) -> anyhow::Result<()> { let mut visitor = self.inner.wrap_visitor(visitor); - self.inner - .value() - .to_value() - .as_command_line_err()? + ValueAsCommandLineLike::unpack_value_err(self.inner.value().to_value())? + .0 .visit_artifacts(&mut visitor) } fn contains_arg_attr(&self) -> bool { - self.inner - .value() - .to_value() - .as_command_line() - .map_or(false, |inner| inner.contains_arg_attr()) + ValueAsCommandLineLike::unpack(self.inner.value().to_value()) + .map_or(false, |inner| inner.0.contains_arg_attr()) } fn visit_write_to_file_macros( &self, visitor: &mut dyn WriteToFileMacroVisitor, ) -> anyhow::Result<()> { - self.inner - .value() - .to_value() - .as_command_line_err()? + ValueAsCommandLineLike::unpack_value_err(self.inner.value().to_value())? + .0 .visit_write_to_file_macros(visitor) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_value.rs b/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_value.rs index cf279826f584e..5fb553e9816a9 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_value.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/artifact_tagging/tagged_value.rs @@ -11,13 +11,13 @@ use allocative::Allocative; use derive_more::Display; use starlark::any::ProvidesStaticType; use starlark::coerce::Coerce; -use starlark::typing::Ty; use starlark::values::starlark_value; use starlark::values::Freeze; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use super::ArtifactTag; @@ -39,8 +39,8 @@ use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; )] #[derive(NoSerialize)] // TODO make artifacts serializable #[repr(C)] -#[display(fmt = "TaggedValue({}, tagged {})", inner, tag)] -pub struct TaggedValueGen { +#[display("TaggedValue({}, tagged {})", inner, tag)] +pub struct TaggedValueGen { inner: V, tag: ArtifactTag, inputs_only: bool, @@ -67,16 +67,10 @@ impl<'v> TaggedValue<'v> { starlark_complex_value!(pub TaggedValue); #[starlark_value(type = "tagged_value")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TaggedValueGen -where - Self: ProvidesStaticType<'v>, -{ - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } -} +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TaggedValueGen where Self: ProvidesStaticType<'v> +{} -impl TaggedValueGen { +impl TaggedValueGen { pub fn value(&self) -> &V { &self.inner } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args.rs new file mode 100644 index 0000000000000..fb47964beda52 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args.rs @@ -0,0 +1,24 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod arg_builder; +mod builder; +pub mod command_line_arg_like_type; +mod options; +pub(crate) mod regex; +pub(crate) mod shlex_quote; +pub mod space_separated; +mod traits; +mod typ; +pub mod value; +pub mod value_as; + +pub use builder::*; +pub use traits::*; +pub use typ::*; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/builder.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/builder.rs index 92f26ed624564..048769ec4792e 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/builder.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/builder.rs @@ -15,12 +15,11 @@ use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_execute::artifact::fs::ExecutorFs; use indexmap::IndexSet; -use thiserror::Error; use crate::interpreter::rule_defs::cmd_args::traits::CommandLineContext; use crate::interpreter::rule_defs::cmd_args::traits::CommandLineLocation; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum CommandLineBuilderErrors { #[error( "write-to-file macro is only supported as a part of command line argument which is written to a file" @@ -149,10 +148,9 @@ mod tests { use buck2_core::fs::buck_out_path::BuckOutPathResolver; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; - use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; + use buck2_core::fs::project_rel_path::ProjectRelativePath; use super::*; - use crate::interpreter::rule_defs::cmd_args::builder::DefaultCommandLineContext; use crate::interpreter::rule_defs::cmd_args::traits::CommandLineArgLike; #[test] @@ -174,9 +172,8 @@ mod tests { let mut ctx = DefaultCommandLineContext::new(&executor_fs); "foo".add_to_command_line(&mut cli, &mut ctx)?; - "bar".to_owned().add_to_command_line(&mut cli, &mut ctx)?; - assert_eq!(&["foo".to_owned(), "bar".to_owned()], cli.as_slice()); + assert_eq!(&["foo".to_owned()], cli.as_slice()); Ok(()) } @@ -216,9 +213,12 @@ mod tests { ); } - #[cfg(not(unix))] #[test] fn test_abs_command_line_location_windows() { + if !cfg!(windows) { + return; + } + let root = ProjectRoot::new_unchecked(AbsNormPathBuf::unchecked_new(PathBuf::from( "C:\\foo\\bar", ))); @@ -226,7 +226,7 @@ mod tests { assert_eq!( CommandLineLocation::from_root( &root, - RelativePathBuf::new(), + ProjectRelativePath::empty().to_buf(), PathSeparatorKind::Windows ) .into_string(), @@ -236,7 +236,7 @@ mod tests { assert_eq!( CommandLineLocation::from_root( &root, - RelativePathBuf::from("baz"), + ProjectRelativePathBuf::testing_new("baz"), PathSeparatorKind::Windows ) .into_string(), @@ -246,7 +246,7 @@ mod tests { assert_eq!( CommandLineLocation::from_root( &root, - RelativePathBuf::from("baz/qux"), + ProjectRelativePathBuf::testing_new("baz/qux"), PathSeparatorKind::Windows ) .into_string(), @@ -254,22 +254,29 @@ mod tests { ); } - #[cfg(unix)] #[test] fn test_abs_command_line_location_unix() { + if !cfg!(unix) { + return; + } + let root = ProjectRoot::new_unchecked(AbsNormPathBuf::unchecked_new(PathBuf::from("/foo/bar"))); assert_eq!( - CommandLineLocation::from_root(&root, RelativePathBuf::new(), PathSeparatorKind::Unix) - .into_string(), + CommandLineLocation::from_root( + &root, + ProjectRelativePath::empty().to_buf(), + PathSeparatorKind::Unix + ) + .into_string(), "/foo/bar", ); assert_eq!( CommandLineLocation::from_root( &root, - RelativePathBuf::from("baz"), + ProjectRelativePathBuf::testing_new("baz"), PathSeparatorKind::Unix ) .into_string(), @@ -279,7 +286,7 @@ mod tests { assert_eq!( CommandLineLocation::from_root( &root, - RelativePathBuf::from("baz/qux"), + ProjectRelativePathBuf::testing_new("baz/qux"), PathSeparatorKind::Unix ) .into_string(), diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/command_line_arg_like_type.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/command_line_arg_like_type.rs new file mode 100644 index 0000000000000..f4d31be27af5b --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/command_line_arg_like_type.rs @@ -0,0 +1,67 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use once_cell::sync::Lazy; +use starlark::typing::Ty; + +pub struct CommandLineArgLikeImpl { + pub ty: fn() -> Ty, +} + +pub mod __macro_refs { + pub use linkme; +} + +#[macro_export] +macro_rules! command_line_arg_like_impl { + ($ty:expr) => { + { + use $crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::__macro_refs::linkme; + + #[linkme::distributed_slice( + $crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::COMMAND_LINE_ARG_LIKE_IMPLS + )] + #[linkme(crate = $crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::__macro_refs::linkme)] + static IMPL: $crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::CommandLineArgLikeImpl = + $crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::CommandLineArgLikeImpl { ty: || $ty }; + } + }; +} + +pub use command_line_arg_like_impl; + +#[linkme::distributed_slice] +pub static COMMAND_LINE_ARG_LIKE_IMPLS: [CommandLineArgLikeImpl] = [..]; + +pub(crate) fn command_line_arg_like_ty() -> &'static Ty { + static TY: Lazy = Lazy::new(|| { + assert!(!COMMAND_LINE_ARG_LIKE_IMPLS.is_empty()); + let ty = Ty::unions( + COMMAND_LINE_ARG_LIKE_IMPLS + .iter() + .map(|impl_| (impl_.ty)()) + .collect(), + ); + assert_ne!(ty, Ty::any()); + assert_ne!(ty, Ty::never()); + ty + }); + &TY +} + +#[cfg(test)] +mod tests { + use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_ty; + + #[test] + fn test_command_arg_like_ty() { + // Trigger assertions. + command_line_arg_like_ty(); + } +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/mod.rs deleted file mode 100644 index 948d132a3c08a..0000000000000 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod arg_builder; -mod builder; -mod options; -pub(crate) mod regex; -pub mod space_separated; -mod traits; -mod typ; -pub mod value; -pub mod value_as; - -pub use builder::*; -pub use traits::*; -pub use typ::*; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/options.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/options.rs index 91d01288cc676..503a656080590 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/options.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/options.rs @@ -10,7 +10,6 @@ use std::borrow::Cow; use std::fmt; use std::fmt::Debug; -use std::fmt::Display; use std::fmt::Formatter; use allocative::Allocative; @@ -18,43 +17,45 @@ use buck2_core::fs::paths::RelativePath; use buck2_core::fs::paths::RelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_error::BuckErrorContext; use buck2_execute::artifact::fs::ExecutorFs; use buck2_interpreter::types::cell_root::CellRoot; -use buck2_interpreter::types::project_root::ProjectRoot; +use buck2_interpreter::types::project_root::StarlarkProjectRoot; use buck2_interpreter::types::regex::BuckStarlarkRegex; -use buck2_util::commas::commas; use buck2_util::thin_box::ThinBoxSlice; use derive_more::Display; +use display_container::fmt_container; use dupe::Dupe; use either::Either; use gazebo::prelude::*; use regex::Regex; use serde::Serialize; use serde::Serializer; -use starlark::typing::Ty; +use starlark::values::string::StarlarkStr; use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Freeze; use starlark::values::Freezer; use starlark::values::FrozenStringValue; -use starlark::values::FrozenValue; +use starlark::values::FrozenValueOfUnchecked; use starlark::values::StringValue; use starlark::values::StringValueLike; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueLike; +use starlark::values::ValueOfUnchecked; use static_assertions::assert_eq_size; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; -use crate::interpreter::rule_defs::artifact::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; use crate::interpreter::rule_defs::cmd_args::regex::CmdArgsRegex; use crate::interpreter::rule_defs::cmd_args::regex::FrozenCmdArgsRegex; +use crate::interpreter::rule_defs::cmd_args::shlex_quote::shlex_quote; use crate::interpreter::rule_defs::cmd_args::traits::CommandLineContext; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; use crate::interpreter::rule_defs::cmd_args::CommandLineLocation; +use crate::starlark::StarlarkResultExt; /// Supported ways of quoting arguments. -#[derive(Debug, Clone, Dupe, Trace, Freeze, Serialize, Allocative)] +#[derive(Debug, Clone, Copy, Dupe, Trace, Freeze, Serialize, Allocative)] pub enum QuoteStyle { /// Quote arguments for Unix shell: /// @@ -69,7 +70,7 @@ impl Display for QuoteStyle { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum CommandLineArgError { #[error("Unknown quoting style `{0}`")] UnknownQuotingStyle(String), @@ -88,7 +89,7 @@ impl QuoteStyle { } } -pub(crate) trait CommandLineOptionsTrait<'v>: Display { +pub(crate) trait CommandLineOptionsTrait<'v> { fn ignore_artifacts(&self) -> bool; fn delimiter(&self) -> Option>; @@ -99,8 +100,7 @@ pub(crate) trait CommandLineOptionsTrait<'v>: Display { #[repr(C)] pub(crate) struct CommandLineOptions<'v> { // These impact how artifacts are rendered - /// The value of V must be convertible to a `RelativeOrigin` - pub(crate) relative_to: Option<(Value<'v>, usize)>, + pub(crate) relative_to: Option<(ValueOfUnchecked<'v, RelativeOrigin<'v>>, u32)>, pub(crate) absolute_prefix: Option>, pub(crate) absolute_suffix: Option>, pub(crate) parent: u32, @@ -115,11 +115,32 @@ pub(crate) struct CommandLineOptions<'v> { pub(crate) replacements: Option, StringValue<'v>)>>>, } +#[derive(Clone, Copy, Dupe)] pub(crate) enum OptionsReplacementsRef<'v, 'a> { Unfrozen(&'a [(CmdArgsRegex<'v>, StringValue<'v>)]), Frozen(&'a [(FrozenCmdArgsRegex, FrozenStringValue)]), } +impl<'v, 'a> Display for OptionsReplacementsRef<'v, 'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let iter = self.iter(); + fmt_container( + f, + "[", + "]", + iter.map(|(r, s)| { + struct D<'v>(CmdArgsRegex<'v>, StringValue<'v>); + impl<'v> Display for D<'v> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "({}, {})", StarlarkStr::repr(self.0.as_str()), self.1) + } + } + D(r, s) + }), + ) + } +} + impl<'v, 'a> OptionsReplacementsRef<'v, 'a> { pub(crate) fn is_empty(&self) -> bool { match self { @@ -167,7 +188,7 @@ impl<'v, 'a> Serialize for OptionsReplacementsRef<'v, 'a> { #[derive(Default, Serialize)] pub(crate) struct CommandLineOptionsRef<'v, 'a> { #[serde(serialize_with = "serialize_opt_display")] - pub(crate) relative_to: Option<(Value<'v>, usize)>, + pub(crate) relative_to: Option<(ValueOfUnchecked<'v, RelativeOrigin<'v>>, u32)>, pub(crate) absolute_prefix: Option>, pub(crate) absolute_suffix: Option>, pub(crate) parent: u32, @@ -231,7 +252,10 @@ impl<'v> CommandLineOptionsTrait<'v> for CommandLineOptions<'v> { #[derive(Debug, Allocative)] enum FrozenCommandLineOption { - RelativeTo(FrozenValue, u32), + RelativeTo( + FrozenValueOfUnchecked<'static, RelativeOrigin<'static>>, + u32, + ), AbsolutePrefix(FrozenStringValue), AbsoluteSuffix(FrozenStringValue), Parent(u32), @@ -252,6 +276,12 @@ pub(crate) struct FrozenCommandLineOptions { } impl FrozenCommandLineOptions { + pub const fn empty() -> Self { + FrozenCommandLineOptions { + options: ThinBoxSlice::empty(), + } + } + pub(crate) fn is_empty(&self) -> bool { self.options.is_empty() } @@ -281,7 +311,7 @@ impl<'v> CommandLineOptionsTrait<'v> for FrozenCommandLineOptions { for option in &*self.options { match option { FrozenCommandLineOption::RelativeTo(value, parent) => { - options.relative_to = Some((value.to_value(), *parent as usize)); + options.relative_to = Some((value.to_value(), *parent)); } FrozenCommandLineOption::AbsolutePrefix(value) => { options.absolute_prefix = Some(value.to_string_value()); @@ -334,18 +364,6 @@ impl Serialize for FrozenCommandLineOptions { } } -impl<'v> Display for CommandLineOptions<'v> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Display::fmt(&self.to_command_line_options(), f) - } -} - -impl Display for FrozenCommandLineOptions { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Display::fmt(&self.to_command_line_options(), f) - } -} - impl<'v> Freeze for CommandLineOptions<'v> { type Frozen = FrozenCommandLineOptions; @@ -364,10 +382,9 @@ impl<'v> Freeze for CommandLineOptions<'v> { } = self; let mut options = Vec::new(); - if let Some(relative_to) = relative_to { - let (relative, parent) = relative_to.freeze(freezer)?; - let parent: u32 = parent.try_into()?; - options.push(FrozenCommandLineOption::RelativeTo(relative, parent)); + if let Some((relative_to, parent)) = relative_to { + let relative_to = relative_to.cast().freeze(freezer)?; + options.push(FrozenCommandLineOption::RelativeTo(relative_to, parent)); } if let Some(absolute_prefix) = absolute_prefix { let absolute_prefix = absolute_prefix.freeze(freezer)?; @@ -411,7 +428,7 @@ impl<'v> Freeze for CommandLineOptions<'v> { } } -fn serialize_opt_display(v: &Option<(V, usize)>, s: S) -> Result +fn serialize_opt_display(v: &Option<(V, u32)>, s: S) -> Result where S: Serializer, { @@ -421,99 +438,14 @@ where } } -impl<'v, 'a> Display for CommandLineOptionsRef<'v, 'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut comma = commas(); - if let Some((v, i)) = &self.relative_to { - comma(f)?; - write!(f, "relative_to = {}", v)?; - if *i != 0 { - comma(f)?; - write!(f, "relative_to_parent = {}", i)?; - } - } - if let Some(v) = &self.absolute_prefix { - comma(f)?; - write!(f, "absolute_prefix = {}", v)?; - } - if let Some(v) = &self.absolute_suffix { - comma(f)?; - write!(f, "absolute_suffix = {}", v)?; - } - if self.parent != 0 { - comma(f)?; - write!(f, "parent = {}", self.parent)?; - } - if self.ignore_artifacts { - comma(f)?; - write!(f, "ignore_artifacts = True")?; - } - if let Some(v) = &self.delimiter { - comma(f)?; - write!(f, "delimiter = {:?}", v)?; - } - if let Some(v) = &self.format { - comma(f)?; - write!(f, "format = {:?}", v)?; - } - if let Some(v) = &self.prepend { - comma(f)?; - write!(f, "prepend = {:?}", v)?; - } - if let Some(v) = &self.quote { - comma(f)?; - write!(f, "quote = \"{}\"", v)?; - } - if !self.replacements.is_empty() { - comma(f)?; - write!(f, "replacements = [")?; - let mut vec_comma = commas(); - for p in self.replacements.iter() { - vec_comma(f)?; - write!(f, "({:?}, {:?})", p.0, p.1)?; - } - write!(f, "]")?; - } - Ok(()) - } -} - // NOTE: This is an enum as opposed to a trait because of the `C` parameter on (which is required // because upcasting is not stable). -#[derive(Display)] +#[derive(Display, StarlarkTypeRepr, UnpackValue)] pub(crate) enum RelativeOrigin<'v> { Artifact(&'v dyn StarlarkArtifactLike), CellRoot(&'v CellRoot), /// Bit of a useless variant since this is simply the default, but we allow it for consistency. - ProjectRoot, -} - -impl<'v> StarlarkTypeRepr for RelativeOrigin<'v> { - fn starlark_type_repr() -> Ty { - Ty::unions(vec![ - ValueAsArtifactLike::starlark_type_repr(), - CellRoot::starlark_type_repr(), - ProjectRoot::starlark_type_repr(), - ]) - } -} - -impl<'v> UnpackValue<'v> for RelativeOrigin<'v> { - fn unpack_value(value: Value<'v>) -> Option { - if let Some(v) = ValueAsArtifactLike::unpack_value(value) { - return Some(RelativeOrigin::Artifact(v.0)); - } - - if let Some(v) = value.downcast_ref::() { - return Some(RelativeOrigin::CellRoot(v)); - } - - if value.downcast_ref::().is_some() { - return Some(RelativeOrigin::ProjectRoot); - } - - None - } + ProjectRoot(&'v StarlarkProjectRoot), } impl<'v> RelativeOrigin<'v> { @@ -529,7 +461,7 @@ impl<'v> RelativeOrigin<'v> { ctx.resolve_artifact(&artifact)? } Self::CellRoot(cell_root) => ctx.resolve_cell_path(cell_root.cell_path())?, - Self::ProjectRoot => { + Self::ProjectRoot(_) => { ctx.resolve_project_path(ProjectRelativePath::empty().to_owned())? } }; @@ -692,7 +624,8 @@ impl<'v, 'x> CommandLineOptionsRef<'v, 'x> { } match &self.opts.quote { Some(QuoteStyle::Shell) => { - arg = shlex::quote(&arg).into_owned(); + let quoted = shlex_quote(&arg); + arg = quoted.into_owned(); } _ => {} } @@ -748,8 +681,9 @@ impl<'v, 'x> CommandLineOptionsRef<'v, 'x> { None => return Ok(None), }; - let origin = RelativeOrigin::unpack_value(value) - .expect("Must be a valid RelativeOrigin as this was checked in the setter"); + let origin = value.unpack().into_anyhow_result().internal_error_anyhow( + "Must be a valid RelativeOrigin as this was checked in the setter", + )?; let mut relative_path = origin.resolve(ctx)?; for _ in 0..parent { if !relative_path.pop() { @@ -764,4 +698,90 @@ impl<'v, 'x> CommandLineOptionsRef<'v, 'x> { Ok(Some(relative_path)) } + + pub(crate) fn iter_fields_display( + &self, + ) -> impl Iterator)> { + let CommandLineOptionsRef { + relative_to, + absolute_prefix, + absolute_suffix, + parent, + ignore_artifacts, + delimiter, + format, + prepend, + quote, + replacements, + } = self; + + // This can be implemented without allocation, + // but generic version with iterator chain chain chain... + // or either either either... leads to compilation of the crate slowing down + // from 15s to minutes, and a fight against the borrow checker. + let mut iter = Vec::new(); + + if let Some((value, index)) = relative_to { + iter.push(( + "relative_to", + CommandLineOptionsIterItem::Value(value.get()), + )); + if *index != 0 { + iter.push(( + "relative_to_parent", + CommandLineOptionsIterItem::U32(*index), + )); + } + } + + if let Some(value) = absolute_prefix { + iter.push(( + "absolute_prefix", + CommandLineOptionsIterItem::StringValue(*value), + )); + } + if let Some(value) = absolute_suffix { + iter.push(( + "absolute_suffix", + CommandLineOptionsIterItem::StringValue(*value), + )); + } + if *parent != 0 { + iter.push(("parent", CommandLineOptionsIterItem::U32(*parent))); + } + if *ignore_artifacts { + iter.push(("ignore_artifacts", CommandLineOptionsIterItem::Str("True"))); + } + if let Some(value) = delimiter { + iter.push(("delimiter", CommandLineOptionsIterItem::StringValue(*value))); + } + if let Some(value) = format { + iter.push(("format", CommandLineOptionsIterItem::StringValue(*value))); + } + if let Some(value) = prepend { + iter.push(("prepend", CommandLineOptionsIterItem::StringValue(*value))); + } + if let Some(value) = quote { + iter.push(("quote", CommandLineOptionsIterItem::QuoteStyle(*value))); + } + if !replacements.is_empty() { + iter.push(( + "replacements", + CommandLineOptionsIterItem::Replacements(*replacements), + )); + } + + iter.into_iter() + } +} + +#[derive(derive_more::Display)] +pub(crate) enum CommandLineOptionsIterItem<'v, 'a> { + U32(u32), + Value(Value<'v>), + Str(&'static str), + StringValue(StringValue<'v>), + Replacements(OptionsReplacementsRef<'v, 'a>), + #[display("\"{}\"", _0)] + QuoteStyle(QuoteStyle), } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/regex.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/regex.rs index abd03f836669a..1bc07bce0a5c5 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/regex.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/regex.rs @@ -8,9 +8,9 @@ */ use allocative::Allocative; -use anyhow::Context; use buck2_interpreter::types::regex::BuckStarlarkRegex; use dupe::Dupe; +use regex::Regex; use serde::Serialize; use serde::Serializer; use starlark::values::type_repr::StarlarkTypeRepr; @@ -41,22 +41,43 @@ pub(crate) enum CmdArgsRegex<'v> { Regex(ValueTyped<'v, BuckStarlarkRegex>), } +impl<'v> CmdArgsRegex<'v> { + pub(crate) fn validate(&self) -> anyhow::Result<()> { + match self { + CmdArgsRegex::Str(pattern) => { + // Validate that regex is valid + Regex::new(pattern.as_str())?; + } + CmdArgsRegex::Regex(_) => {} + } + Ok(()) + } +} + #[derive(Debug, Clone, Dupe, Copy, Allocative)] pub(crate) enum FrozenCmdArgsRegex { Str(FrozenStringValue), Regex(FrozenValueTyped<'static, BuckStarlarkRegex>), } +impl<'v> CmdArgsRegex<'v> { + pub(crate) fn as_str(&self) -> &str { + match self { + Self::Str(s) => s.as_str(), + Self::Regex(r) => r.as_ref().as_str(), + } + } +} + impl<'v> Freeze for CmdArgsRegex<'v> { type Frozen = FrozenCmdArgsRegex; fn freeze(self, freezer: &Freezer) -> anyhow::Result { Ok(match self { Self::Str(s) => FrozenCmdArgsRegex::Str(s.freeze(freezer)?), - Self::Regex(r) => FrozenCmdArgsRegex::Regex( - FrozenValueTyped::new(r.to_value().freeze(freezer)?) - .context("frozen to the wrong type (internal error)")?, - ), + Self::Regex(r) => { + FrozenCmdArgsRegex::Regex(FrozenValueTyped::new_err(r.to_value().freeze(freezer)?)?) + } }) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/shlex_quote.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/shlex_quote.rs new file mode 100644 index 0000000000000..4076385058d13 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/shlex_quote.rs @@ -0,0 +1,62 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; + +/// Quote string for shell. +/// +/// This is copy-paste from [`shlex`](https://github.com/comex/rust-shlex/) 1.0. +/// +/// Generally shell quoting is unspecified, for example `a` can be quoted as: +/// - `a` +/// - `"a"` +/// - `'a'` +/// - and many more +/// +/// to be used in shell. But we use shell quoting to generate arguments for argfiles. +/// And certain programs expect certain quoting style (for example, `cl.exe` expect double-quoted). +/// Additionally, we probably also incorrectly use shell quoting for `cmd.exe`. +/// +/// Long story short, we should not depend on possible correct behavior change in `shlex` crate. +pub(crate) fn shlex_quote(in_str: &str) -> Cow { + if in_str.is_empty() { + "\"\"".into() + } else if in_str.bytes().any(|c| match c as char { + '|' | '&' | ';' | '<' | '>' | '(' | ')' | '$' | '`' | '\\' | '"' | '\'' | ' ' | '\t' + | '\r' | '\n' | '*' | '?' | '[' | '#' | '~' | '=' | '%' => true, + _ => false, + }) { + let mut out: Vec = Vec::new(); + out.push(b'"'); + for c in in_str.bytes() { + match c as char { + '$' | '`' | '"' | '\\' => out.push(b'\\'), + _ => (), + } + out.push(c); + } + out.push(b'"'); + unsafe { String::from_utf8_unchecked(out) }.into() + } else { + in_str.into() + } +} + +#[cfg(test)] +mod tests { + use crate::interpreter::rule_defs::cmd_args::shlex_quote::shlex_quote; + + #[test] + fn test_quote() { + assert_eq!(shlex_quote("foobar"), "foobar"); + assert_eq!(shlex_quote("foo bar"), "\"foo bar\""); + assert_eq!(shlex_quote("\""), "\"\\\"\""); + assert_eq!(shlex_quote(""), "\"\""); + } +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/traits.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/traits.rs index bae020a949831..79a0ad41de39c 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/traits.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/traits.rs @@ -23,14 +23,17 @@ use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_execute::artifact::fs::ExecutorFs; use buck2_interpreter::types::cell_root::CellRoot; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; -use buck2_interpreter::types::project_root::ProjectRoot as StarlarkProjectRoot; +use buck2_interpreter::types::project_root::StarlarkProjectRoot; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use indexmap::IndexSet; use starlark::any::ProvidesStaticType; +use starlark::typing::Ty; use starlark::values::string::StarlarkStr; +use starlark::values::type_repr::StarlarkTypeRepr; use crate::artifact_groups::ArtifactGroup; use crate::interpreter::rule_defs::artifact_tagging::ArtifactTag; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::resolved_macro::ResolvedMacro; pub trait CommandLineArtifactVisitor { @@ -85,6 +88,9 @@ pub trait WriteToFileMacroVisitor { /// /// Certain operations on `CommandLineBuilder` can fail, so propagate those upward pub trait CommandLineArgLike { + /// Call `command_line_arg_like_impl!` to register the type with the interpreter typechecker. + fn register_me(&self); + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -109,6 +115,10 @@ unsafe impl<'v> ProvidesStaticType<'v> for &'v dyn CommandLineArgLike { } impl CommandLineArgLike for &str { + fn register_me(&self) { + command_line_arg_like_impl!(Ty::string()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -131,34 +141,16 @@ impl CommandLineArgLike for &str { } impl CommandLineArgLike for StarlarkStr { - fn add_to_command_line( - &self, - cli: &mut dyn CommandLineBuilder, - _context: &mut dyn CommandLineContext, - ) -> anyhow::Result<()> { - cli.push_arg(self.as_str().to_owned()); - Ok(()) - } - - fn contains_arg_attr(&self) -> bool { - false + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkStr::starlark_type_repr()); } - fn visit_write_to_file_macros( - &self, - _visitor: &mut dyn WriteToFileMacroVisitor, - ) -> anyhow::Result<()> { - Ok(()) - } -} - -impl CommandLineArgLike for String { fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, _context: &mut dyn CommandLineContext, ) -> anyhow::Result<()> { - cli.push_arg(self.clone()); + cli.push_arg(self.as_str().to_owned()); Ok(()) } @@ -175,6 +167,10 @@ impl CommandLineArgLike for String { } impl CommandLineArgLike for StarlarkTargetLabel { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkTargetLabel::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -197,6 +193,10 @@ impl CommandLineArgLike for StarlarkTargetLabel { } impl CommandLineArgLike for StarlarkConfiguredProvidersLabel { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkConfiguredProvidersLabel::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -219,6 +219,10 @@ impl CommandLineArgLike for StarlarkConfiguredProvidersLabel { } impl CommandLineArgLike for CellRoot { + fn register_me(&self) { + command_line_arg_like_impl!(CellRoot::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -241,6 +245,10 @@ impl CommandLineArgLike for CellRoot { } impl CommandLineArgLike for StarlarkProjectRoot { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkProjectRoot::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -328,12 +336,12 @@ impl CommandLineLocation<'_> { impl<'a> CommandLineLocation<'a> { pub fn from_root( root: &'a ProjectRoot, - path: RelativePathBuf, + path: ProjectRelativePathBuf, path_separator: PathSeparatorKind, ) -> Self { Self { root: Some(root), - path, + path: path.into(), path_separator, } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/typ.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/typ.rs index 914aa069e18ca..5e89d56cbd248 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/typ.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/typ.rs @@ -10,6 +10,7 @@ use std::cell::Ref; use std::cell::RefCell; use std::cell::RefMut; +use std::convert::Infallible; use std::fmt; use std::fmt::Debug; use std::fmt::Display; @@ -18,7 +19,6 @@ use std::marker::PhantomData; use allocative::Allocative; use buck2_core::fs::paths::RelativePathBuf; -use buck2_util::commas::commas; use buck2_util::thin_box::ThinBoxSlice; use display_container::display_pair; use display_container::fmt_container; @@ -27,24 +27,26 @@ use dupe::Dupe; use either::Either; use gazebo::prelude::*; use indexmap::IndexSet; -use regex::Regex; use serde::Serialize; use serde::Serializer; use starlark::any::ProvidesStaticType; use starlark::coerce::coerce; -use starlark::docs::StarlarkDocs; use starlark::environment::GlobalsBuilder; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::typing::Ty; use starlark::values::list::ListRef; +use starlark::values::list::UnpackList; use starlark::values::starlark_value; +use starlark::values::tuple::UnpackTuple; use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::AllocStaticSimple; use starlark::values::AllocValue; use starlark::values::Demand; use starlark::values::Freeze; use starlark::values::Freezer; +use starlark::values::FrozenValue; use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; @@ -52,15 +54,15 @@ use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueError; use starlark::values::ValueLike; use starlark::values::ValueOf; use static_assertions::assert_eq_size; use crate::artifact_groups::ArtifactGroup; use crate::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; -use crate::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkOutputArtifact; +use crate::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use crate::interpreter::rule_defs::artifact::starlark_output_artifact::StarlarkOutputArtifact; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::options::CommandLineOptions; use crate::interpreter::rule_defs::cmd_args::options::CommandLineOptionsRef; use crate::interpreter::rule_defs::cmd_args::options::CommandLineOptionsTrait; @@ -87,12 +89,21 @@ trait Fields<'v> { /// Wrapper because we cannot implement traits for traits. struct FieldsRef<'v, F: Fields<'v>>(F, PhantomData>); +/// There's no good reason for a user to write `cmd_args` as JSON in analysis or BXL. +/// +/// This implementation exists for operations such as: +/// +/// ```ignore +/// buck2 cquery :buck2 --providers +/// ``` +/// +/// which must not fail if a provider contains `cmd_args` (D34887765). impl<'v, F: Fields<'v>> Serialize for FieldsRef<'v, F> { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - /// Make sure mutable mutable and frozen `cmd_args` are serialized identically + /// Make sure mutable and frozen `cmd_args` are serialized identically /// by routing through this struct. #[derive(Serialize)] struct Mirror<'v, 'a> { @@ -132,8 +143,10 @@ impl<'v, F: Fields<'v>> Display for FieldsRef<'v, F> { }), self.0 .options() - .iter() - .map(|options| display_pair("options", "=", options)), + .map(|o| o.to_command_line_options()) + .unwrap_or_default() + .iter_fields_display() + .map(|(k, v)| display_pair(k, "=", v)), ), ), ) @@ -179,6 +192,10 @@ impl<'v, F: Fields<'v>> FieldsRef<'v, F> { } impl<'v, F: Fields<'v>> CommandLineArgLike for FieldsRef<'v, F> { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkCmdArgs::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -265,15 +282,7 @@ pub struct StarlarkCommandLineData<'v> { options: Option>>, } -#[derive( - Debug, - Default, - Clone, - Trace, - ProvidesStaticType, - StarlarkDocs, - Allocative -)] +#[derive(Debug, Default, Clone, Trace, ProvidesStaticType, Allocative)] pub struct StarlarkCmdArgs<'v>(RefCell>); impl<'v> Serialize for StarlarkCmdArgs<'v> { @@ -412,6 +421,10 @@ impl FrozenStarlarkCmdArgs { pub(crate) fn is_concat(&self) -> bool { FieldsRef(self, PhantomData).is_concat() } + + pub(crate) fn is_empty(&self) -> bool { + self.items.is_empty() + } } impl<'v> StarlarkCmdArgs<'v> { @@ -430,6 +443,25 @@ impl<'v> StarlarkValue<'v> for StarlarkCmdArgs<'v> { fn provide(&'v self, demand: &mut Demand<'_, 'v>) { demand.provide_value::<&dyn CommandLineArgLike>(self); } + + fn try_freeze_static(&self) -> Option { + let StarlarkCommandLineData { + items, + hidden, + options, + } = &*self.0.borrow(); + if items.is_empty() && hidden.is_empty() && options.is_none() { + static EMPTY: AllocStaticSimple = + AllocStaticSimple::alloc(FrozenStarlarkCmdArgs { + items: ThinBoxSlice::empty(), + hidden: ThinBoxSlice::empty(), + options: FrozenCommandLineOptions::empty(), + }); + Some(EMPTY.unpack().to_frozen_value()) + } else { + None + } + } } #[starlark_value(type = "cmd_args")] @@ -455,6 +487,10 @@ impl<'v> AllocValue<'v> for StarlarkCmdArgs<'v> { } impl<'v> CommandLineArgLike for StarlarkCmdArgs<'v> { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkCmdArgs::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -480,6 +516,10 @@ impl<'v> CommandLineArgLike for StarlarkCmdArgs<'v> { } impl CommandLineArgLike for FrozenStarlarkCmdArgs { + fn register_me(&self) { + command_line_arg_like_impl!(FrozenStarlarkCmdArgs::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -533,39 +573,32 @@ impl<'v> StarlarkCmdArgs<'v> { } pub fn try_from_value(value: Value<'v>) -> anyhow::Result { + Self::try_from_value_typed(StarlarkCommandLineValueUnpack::unpack_value_err(value)?) + } + + pub fn try_from_value_typed(value: StarlarkCommandLineValueUnpack<'v>) -> anyhow::Result { let mut builder = Self::new(); - builder.0.get_mut().add_value(value)?; + builder.0.get_mut().add_value_typed(value)?; Ok(builder) } +} - fn try_from_values_with_options( - value: &[Value<'v>], - delimiter: Option>, - format: Option>, - prepend: Option>, - quote: Option, - ) -> anyhow::Result { - let mut builder = StarlarkCommandLineData::default(); - if delimiter.is_some() || format.is_some() || prepend.is_some() || quote.is_some() { - let opts = builder.options_mut(); - opts.delimiter = delimiter; - opts.format = format; - opts.prepend = prepend; - opts.quote = quote; - } - for v in value { - builder.add_value(*v)?; - } - Ok(Self(RefCell::new(builder))) - } +#[derive(UnpackValue, StarlarkTypeRepr)] +pub enum StarlarkCommandLineValueUnpack<'v> { + // This should be `list[Self]`, but we cannot express it. + List(&'v ListRef<'v>), + CommandLineArg(CommandLineArg<'v>), } impl<'v> StarlarkCommandLineData<'v> { fn add_value(&mut self, value: Value<'v>) -> anyhow::Result<()> { - if let Some(values) = ListRef::from_value(value) { - self.add_values(values.content())?; - } else { - self.items.push(CommandLineArg::try_from_value(value)?); + self.add_value_typed(StarlarkCommandLineValueUnpack::unpack_value_err(value)?) + } + + fn add_value_typed(&mut self, value: StarlarkCommandLineValueUnpack<'v>) -> anyhow::Result<()> { + match value { + StarlarkCommandLineValueUnpack::List(values) => self.add_values(values.content())?, + StarlarkCommandLineValueUnpack::CommandLineArg(value) => self.items.push(value), } Ok(()) } @@ -582,12 +615,15 @@ impl<'v> StarlarkCommandLineData<'v> { } /// Add values to the artifact that don't show up on the command line, but do for dependency - fn add_hidden(&mut self, values: &[Value<'v>]) -> anyhow::Result<()> { - for value in values { - if let Some(values) = ListRef::from_value(*value) { - self.add_hidden(values.content())?; - } else { - self.hidden.push(CommandLineArg::try_from_value(*value)?); + fn add_hidden(&mut self, value: StarlarkCommandLineValueUnpack<'v>) -> anyhow::Result<()> { + match value { + StarlarkCommandLineValueUnpack::List(values) => { + for value in values.content() { + self.add_hidden(StarlarkCommandLineValueUnpack::unpack_value_err(*value)?)? + } + } + StarlarkCommandLineValueUnpack::CommandLineArg(arg) => { + self.hidden.push(arg); } } Ok(()) @@ -600,21 +636,21 @@ struct StarlarkCommandLineMut<'v> { } impl<'v> StarlarkTypeRepr for StarlarkCommandLineMut<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { StarlarkCmdArgs::starlark_type_repr() } } impl<'v> UnpackValue<'v> for StarlarkCommandLineMut<'v> { - fn expected() -> String { - "command line builder; frozen command line cannot be mutated".to_owned() - } + type Error = Infallible; - fn unpack_value(value: Value<'v>) -> Option { - value.downcast_ref::().map(|v| Self { + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(value.downcast_ref::().map(|v| Self { value, borrow: v.0.borrow_mut(), - }) + })) } } @@ -638,57 +674,29 @@ fn cmd_args<'v>(x: Value<'v>) -> FieldsRef<'v, impl Fields<'v>> { /// The type is a mutable collection of strings and `artifact` values. /// In general, command lines, artifacts, strings, `RunInfo` and lists thereof can be added to or used to construct a `cmd_args` value. /// All these methods operate mutably on `cmd` and return that value too. +// TODO(nga): `cmd_args` should be immutable, so that all parameters should be +// either set in constructor, or operations like `hidden` should return a copy +// rather than modify this. https://fburl.com/workplace/ihkplvbn #[starlark_module] fn cmd_args_methods(builder: &mut MethodsBuilder) { /// A list of arguments to be added to the command line, which may including `cmd_args`, artifacts, strings, `RunInfo` or lists thereof. /// Note that this operation mutates the input `cmd_args`. fn add<'v>( mut this: StarlarkCommandLineMut<'v>, - #[starlark(args)] args: Vec>, - ) -> anyhow::Result> { - this.borrow.add_values(&args)?; - Ok(this) - } - - /// Things to add to the command line which do not show up but are added as dependencies. - /// The values can be anything normally permissible to pass to `add`. - /// - /// Typically used if the command you are running implicitly depends on files that are not - /// passed on the command line, e.g. headers in the case of a C compilation. - fn hidden<'v>( - mut this: StarlarkCommandLineMut<'v>, - #[starlark(args)] args: Vec>, - ) -> anyhow::Result> { - this.borrow.add_hidden(&args)?; - Ok(this) - } - - /// Causes this `cmd_args` to have no declared dependencies. Allows you to reference the path of an artifact _without_ - /// introducing dependencies on it. - /// - /// As an example where this can be useful, consider passing a dependency that is only accessed at runtime, but whose path - /// must be baked into the binary. As an example: - /// - /// ```python - /// resources = cmd_args(resource_file, format = "-DFOO={}").ignore_artifacts() - /// ctx.actions.run(cmd_args("gcc", "-c", source_file, resources)) - /// ``` - /// - /// Note that `ignore_artifacts` sets all artifacts referenced by this `cmd_args` to be ignored, including those added afterwards, - /// so generally create a special `cmd_args` and scope it quite tightly. - /// - /// If you actually do use the inputs referenced by this command, you will either error out due to missing dependencies (if running actions remotely) - /// or have untracked dependencies that will fail to rebuild when it should. - fn ignore_artifacts<'v>( - mut this: StarlarkCommandLineMut<'v>, + #[starlark(args)] args: UnpackTuple>, ) -> anyhow::Result> { - this.borrow.options_mut().ignore_artifacts = true; + this.borrow.add_values(&args.items)?; Ok(this) } /// Make all artifact paths relative to a given location. Typically used when the command /// you are running changes directory. /// + /// By default, the paths are relative to the artifacts themselves (equivalent to + /// `parent = 0`). Use `parent` to make the paths relative to an ancestor directory. + /// For example `parent = 1` would make all paths relative to the containing dirs + /// of any artifacts in the `cmd_args`. + /// /// ```python /// dir = symlinked_dir(...) /// script = [ @@ -699,75 +707,9 @@ fn cmd_args_methods(builder: &mut MethodsBuilder) { fn relative_to<'v>( mut this: StarlarkCommandLineMut<'v>, #[starlark(require = pos)] directory: ValueOf<'v, RelativeOrigin<'v>>, - #[starlark(require = named, default = 0i32)] parent: i32, - ) -> anyhow::Result> { - if parent < 0 { - return Err(ValueError::IncorrectParameterTypeNamed("parent".to_owned()).into()); - } - this.borrow.options_mut().relative_to = Some((directory.value, parent as usize)); - Ok(this) - } - - /// Adds a prefix to the end of start artifact. Often used if you have a `$ROOT` variable - /// in a shell script and want to use it to make files absolute. - /// - /// ```python - /// cmd_args(script).absolute_prefix("$ROOT/") - /// ``` - fn absolute_prefix<'v>( - mut this: StarlarkCommandLineMut<'v>, - prefix: StringValue<'v>, + #[starlark(require = named, default = 0u32)] parent: u32, ) -> anyhow::Result> { - this.borrow.options_mut().absolute_prefix = Some(prefix); - Ok(this) - } - - /// Adds a suffix to the end of every artifact. Useful in conjunction with `absolute_prefix` to wrap - /// artifacts in function calls. - /// - /// ```python - /// cmd_args(script).absolute_prefix("call(").absolute_suffix(")") - /// ``` - fn absolute_suffix<'v>( - mut this: StarlarkCommandLineMut<'v>, - suffix: StringValue<'v>, - ) -> anyhow::Result> { - this.borrow.options_mut().absolute_suffix = Some(suffix); - Ok(this) - } - - /// For all the artifacts listed in this `cmd_args`, use their parent directory. - /// - /// Typically used when the file name is passed one way, and the directory another, - /// e.g. `cmd_args(artifact, format="-L{}").parent()`. - fn parent<'v>( - mut this: StarlarkCommandLineMut<'v>, - #[starlark(require = pos, default = 1u32)] count: u32, - ) -> anyhow::Result> { - this.borrow.options_mut().parent += count; - Ok(this) - } - - /// Replaces all parts matching pattern regular expression in each argument with replacement string. - /// Several replacements can be added by multiple replace_regex calls. - fn replace_regex<'v>( - mut this: StarlarkCommandLineMut<'v>, - #[starlark(require = pos)] pattern: CmdArgsRegex<'v>, - #[starlark(require = pos)] replacement: StringValue<'v>, - ) -> anyhow::Result> { - let options = this.borrow.options_mut(); - match &pattern { - CmdArgsRegex::Str(pattern) => { - // Validate that regex is valid - Regex::new(pattern.as_str())?; - } - CmdArgsRegex::Regex(_) => {} - } - if let Some(replacements) = &mut options.replacements { - replacements.push((pattern, replacement)); - } else { - options.replacements = Some(Box::new(vec![(pattern, replacement)])); - } + this.borrow.options_mut().relative_to = Some((directory.as_unchecked(), parent)); Ok(this) } @@ -824,20 +766,146 @@ pub fn register_cmd_args(builder: &mut GlobalsBuilder) { /// * `delimiter` - added between arguments to join them together. For example, `cmd_args(["--args=",x], delimiter="")` would produce a single argument to the underlying tool. /// * `prepend` - added as a separate argument before each argument. /// * `quote` - indicates whether quoting is to be applied to each argument. The only current valid value is `"shell"`. + /// * `ignore_artifacts` - if `True`, artifacts paths are used, but artifacts are not pulled. + /// * `hidden` - artifacts not present on the command line, but added as dependencies. + /// * `absolute_prefix` and `absolute_suffix` - added to the start and end of each artifact. + /// * `parent` - for all the artifacts use their `parent`th directory (e.g. `parent = 1` for the directory the artifact is located, `parent = 2` for that directory's parent, etc.). + /// * `relative_to` - make all artifact paths relative to a given location. + /// * `replace_regex` - replaces arguments with a regular expression. + /// + /// ### `ignore_artifacts` + /// + /// `ignore_artifacts=True` makes `cmd_args` to have no declared dependencies. + /// Allows you to reference the path of an artifact _without_ introducing dependencies on it. + /// + /// As an example where this can be useful, consider passing a dependency that is only accessed at runtime, but whose path + /// must be baked into the binary. As an example: + /// + /// ```python + /// resources = cmd_args(resource_file, format = "-DFOO={}").ignore_artifacts() + /// ctx.actions.run(cmd_args("gcc", "-c", source_file, resources)) + /// ``` + /// + /// Note that `ignore_artifacts` sets all artifacts referenced by this `cmd_args` to be ignored, including those added afterwards, + /// so generally create a special `cmd_args` and scope it quite tightly. + /// + /// If you actually do use the inputs referenced by this command, + /// you will either error out due to missing dependencies (if running actions remotely) + /// or have untracked dependencies that will fail to rebuild when it should. + /// + /// ### `hidden` + /// + /// Things to add to the command line which do not show up but are added as dependencies. + /// The values can be anything normally permissible to pass to `add`. + /// + /// Typically used if the command you are running implicitly depends on files that are not + /// passed on the command line, e.g. headers in the case of a C compilation. + /// + /// ### `absolute_prefix` and `absolute_suffix` + /// + /// Adds a prefix to the start or end of every artifact. + /// + /// Prefix is often used if you have a `$ROOT` variable + /// in a shell script and want to use it to make files absolute. + /// + /// Suffix is often used in conjunction with `absolute_prefix` + /// to wrap artifacts in function calls. + /// + /// ```python + /// cmd_args(script, absolute_prefix = "$ROOT/") + /// cmd_args(script, absolute_prefix = "call", absolute_suffix = ")") + /// ``` + /// + /// ### `parent` + /// + /// For all the artifacts use their parent directory. + /// + /// Typically used when the file name is passed one way, and the directory another, + /// e.g. `cmd_args(artifact, format="-L{}", parent=1)`. + /// + /// ### `relative_to=dir` or `relative_to=(dir, parent)` + /// + /// Make all artifact paths relative to a given location. Typically used when the command + /// you are running changes directory. + /// + /// By default, the paths are relative to the artifacts themselves (equivalent to + /// parent equals to `0`). Use `parent` to make the paths relative to an ancestor directory. + /// For example parent equals to `1` would make all paths relative to the containing dirs + /// of any artifacts in the `cmd_args`. + /// + /// ```python + /// dir = symlinked_dir(...) + /// script = [ + /// cmd_args(dir, format = "cd {}", relative_to=dir), + /// ] + /// ``` + /// + /// ### `replace_regex` + /// + /// Replaces all parts matching pattern regular expression (or regular expressions) + /// in each argument with replacement strings. fn cmd_args<'v>( - #[starlark(args)] args: Vec>, + #[starlark(args)] args: UnpackTuple>, + hidden: Option>, delimiter: Option>, format: Option>, prepend: Option>, quote: Option<&str>, + #[starlark(default = false)] ignore_artifacts: bool, + absolute_prefix: Option>, + absolute_suffix: Option>, + #[starlark(default = 0)] parent: u32, + relative_to: Option< + Either>, (ValueOf<'v, RelativeOrigin<'v>>, u32)>, + >, + #[starlark(default = Either::Right(UnpackList::default()))] replace_regex: Either< + (CmdArgsRegex<'v>, StringValue<'v>), + UnpackList<(CmdArgsRegex<'v>, StringValue<'v>)>, + >, ) -> anyhow::Result> { - StarlarkCmdArgs::try_from_values_with_options( - &args, - delimiter, - format, - prepend, - quote.try_map(QuoteStyle::parse)?, - ) + let quote = quote.try_map(QuoteStyle::parse)?; + let mut builder = StarlarkCommandLineData::default(); + if delimiter.is_some() + || format.is_some() + || prepend.is_some() + || quote.is_some() + || ignore_artifacts + || absolute_prefix.is_some() + || absolute_suffix.is_some() + || parent != 0 + || relative_to.is_some() + { + let opts = builder.options_mut(); + opts.delimiter = delimiter; + opts.format = format; + opts.prepend = prepend; + opts.quote = quote; + opts.ignore_artifacts = ignore_artifacts; + opts.absolute_prefix = absolute_prefix; + opts.absolute_suffix = absolute_suffix; + opts.parent = parent; + opts.relative_to = relative_to.map(|either| { + let (relative_to, parent) = either.map_left(|o| (o, 0)).into_inner(); + (relative_to.as_unchecked(), parent) + }); + } + let replace_regex: Vec<(CmdArgsRegex, StringValue)> = replace_regex + .map_left(|x| vec![x]) + .map_right(|x| x.items) + .into_inner(); + if !replace_regex.is_empty() { + for (pattern, _replacement) in &replace_regex { + pattern.validate()?; + } + builder.options_mut().replacements = Some(Box::new(replace_regex)); + } + for v in args.items { + builder.add_value_typed(v)?; + } + if let Some(hidden) = hidden { + builder.add_hidden(hidden)?; + } + Ok(StarlarkCmdArgs(RefCell::new(builder))) } } @@ -852,23 +920,20 @@ starlark_simple_value!(StarlarkCommandLineInputs); impl Display for StarlarkCommandLineInputs { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut comma = commas(); - write!(f, "command_line_inputs(")?; - for v in self.inputs.iter() { - comma(f)?; - Display::fmt(v, f)?; - } - write!(f, ")") + fmt_container(f, "command_line_inputs(", ")", self.inputs.iter()) } } #[starlark_value(type = "command_line_inputs")] impl<'v> StarlarkValue<'v> for StarlarkCommandLineInputs { - fn length(&self) -> anyhow::Result { - Ok(self.inputs.len().try_into()?) + fn length(&self) -> starlark::Result { + self.inputs + .len() + .try_into() + .map_err(starlark::Error::new_other) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { if let Some(other) = other.downcast_ref::() { Ok(self.inputs == other.inputs) } else { diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value.rs index bef00e6146966..7a872a82e07ec 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use std::convert::Infallible; use std::fmt::Display; use allocative::Allocative; @@ -14,15 +15,19 @@ use dupe::Dupe; use serde::Serializer; use starlark::__derive_refs::serde::Serialize; use starlark::coerce::Coerce; +use starlark::typing::Ty; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Freeze; use starlark::values::Freezer; use starlark::values::FrozenValue; use starlark::values::Trace; +use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueLike; +use starlark::values::ValueTyped; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use crate::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; fn serialize_as_display(v: &V, s: S) -> Result where @@ -44,8 +49,7 @@ where Trace, derive_more::Display, Serialize, - Allocative, - Coerce + Allocative )] #[serde(transparent)] #[repr(transparent)] @@ -59,16 +63,40 @@ impl<'v> Freeze for CommandLineArg<'v> { } } +impl<'v> StarlarkTypeRepr for CommandLineArg<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + ValueAsCommandLineLike::starlark_type_repr() + } +} + +impl<'v> UnpackValue<'v> for CommandLineArg<'v> { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + if ValueAsCommandLineLike::unpack_value_opt(value).is_some() { + Ok(Some(CommandLineArg(value))) + } else { + Ok(None) + } + } +} + impl<'v> CommandLineArg<'v> { - pub(crate) fn try_from_value(value: Value<'v>) -> anyhow::Result { - value.to_value().as_command_line_err()?; - Ok(Self(value)) + pub fn from_cmd_args(cmd_args: ValueTyped<'v, StarlarkCmdArgs<'v>>) -> Self { + let _no_check_needed: &dyn CommandLineArgLike = cmd_args.as_ref(); + CommandLineArg(cmd_args.to_value()) } - pub(crate) fn as_command_line_arg(self) -> &'v dyn CommandLineArgLike { - self.0 - .as_command_line_err() + pub fn as_command_line_arg(self) -> &'v dyn CommandLineArgLike { + ValueAsCommandLineLike::unpack_value_err(self.0) .expect("checked type in constructor") + .0 + } + + pub fn to_value(self) -> Value<'v> { + self.0 } } @@ -89,7 +117,7 @@ unsafe impl<'v> Coerce> for FrozenCommandLineArg {} impl FrozenCommandLineArg { pub fn new(value: FrozenValue) -> anyhow::Result { - value.to_value().as_command_line_err()?; + ValueAsCommandLineLike::unpack_value_err(value.to_value())?; Ok(FrozenCommandLineArg(value)) } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value_as.rs b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value_as.rs index 61d55150cad95..22212caa142c3 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value_as.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/cmd_args/value_as.rs @@ -7,48 +7,45 @@ * of this source tree. */ +use std::convert::Infallible; + use buck2_interpreter::types::cell_path::StarlarkCellPath; use buck2_interpreter::types::cell_root::CellRoot; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; -use buck2_interpreter::types::project_root::ProjectRoot; +use buck2_interpreter::types::project_root::StarlarkProjectRoot; use buck2_interpreter::types::target_label::StarlarkTargetLabel; +use dupe::Dupe; +use starlark::typing::Ty; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_ty; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::provider::builtin::run_info::FrozenRunInfo; use crate::interpreter::rule_defs::provider::builtin::run_info::RunInfo; -use crate::starlark::values::StarlarkValue; -#[derive(Debug, thiserror::Error)] -enum CommandLineArgError { - #[error( - "expected command line item to be a string, artifact, {}, or {}, or list thereof, \ - not `{repr}` (`{type_name}`)", - StarlarkConfiguredProvidersLabel::TYPE, - StarlarkTargetLabel::TYPE - )] - InvalidItemType { - repr: String, - type_name: &'static str, - }, -} +pub struct ValueAsCommandLineLike<'v>(pub &'v dyn CommandLineArgLike); + +impl<'v> StarlarkTypeRepr for ValueAsCommandLineLike<'v> { + type Canonical = ValueAsCommandLineLike<'static>; -pub trait ValueAsCommandLineLike<'v> { - fn as_command_line(&self) -> Option<&'v dyn CommandLineArgLike>; - fn as_command_line_err(&self) -> anyhow::Result<&'v dyn CommandLineArgLike>; + fn starlark_type_repr() -> Ty { + command_line_arg_like_ty().dupe() + } } -impl<'v> ValueAsCommandLineLike<'v> for Value<'v> { - fn as_command_line(&self) -> Option<&'v dyn CommandLineArgLike> { - if let Some(x) = self.to_value().unpack_starlark_str() { - return Some(x as &dyn CommandLineArgLike); +impl<'v> ValueAsCommandLineLike<'v> { + pub(crate) fn unpack(value: Value<'v>) -> Option { + if let Some(x) = value.unpack_starlark_str() { + return Some(ValueAsCommandLineLike(x as &dyn CommandLineArgLike)); } macro_rules! check { ($t:ty) => { - if let Some(v) = self.to_value().downcast_ref::<$t>() { - return Some(v as &dyn CommandLineArgLike); + if let Some(v) = value.downcast_ref::<$t>() { + return Some(ValueAsCommandLineLike(v as &dyn CommandLineArgLike)); } }; } @@ -65,18 +62,16 @@ impl<'v> ValueAsCommandLineLike<'v> for Value<'v> { check!(StarlarkTargetLabel); check!(StarlarkConfiguredProvidersLabel); check!(CellRoot); - check!(ProjectRoot); + check!(StarlarkProjectRoot); - self.request_value() + Some(ValueAsCommandLineLike(value.request_value()?)) } +} + +impl<'v> UnpackValue<'v> for ValueAsCommandLineLike<'v> { + type Error = Infallible; - fn as_command_line_err(&self) -> anyhow::Result<&'v dyn CommandLineArgLike> { - self.as_command_line().ok_or_else(|| { - CommandLineArgError::InvalidItemType { - repr: self.to_value().to_repr(), - type_name: self.to_value().get_type(), - } - .into() - }) + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(Self::unpack(value)) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/command_executor_config.rs b/app/buck2_build_api/src/interpreter/rule_defs/command_executor_config.rs index e7941adf48be6..035743cb83519 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/command_executor_config.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/command_executor_config.rs @@ -18,13 +18,18 @@ use buck2_core::execution_types::executor_config::Executor; use buck2_core::execution_types::executor_config::HybridExecutionLevel; use buck2_core::execution_types::executor_config::LocalExecutorOptions; use buck2_core::execution_types::executor_config::PathSeparatorKind; +use buck2_core::execution_types::executor_config::RePlatformFields; use buck2_core::execution_types::executor_config::RemoteEnabledExecutor; +use buck2_core::execution_types::executor_config::RemoteEnabledExecutorOptions; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; use buck2_core::execution_types::executor_config::RemoteExecutorOptions; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use derive_more::Display; use starlark::any::ProvidesStaticType; +use starlark::collections::SmallMap; use starlark::environment::GlobalsBuilder; use starlark::values::dict::DictRef; +use starlark::values::list::UnpackList; use starlark::values::none::NoneOr; use starlark::values::none::NoneType; use starlark::values::starlark_value; @@ -32,9 +37,8 @@ use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Value; use starlark::values::ValueLike; -use thiserror::Error; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum CommandExecutorConfigErrors { #[error("expected a dict, got `{0}` (type `{1}`)")] RePropertiesNotADict(String, String), @@ -49,7 +53,7 @@ enum CommandExecutorConfigErrors { } #[derive(Debug, Display, NoSerialize, ProvidesStaticType, Allocative)] -#[display(fmt = "{:?}", _0)] +#[display("{:?}", _0)] pub struct StarlarkCommandExecutorConfig(pub Arc); starlark_simple_value!(StarlarkCommandExecutorConfig); @@ -82,6 +86,8 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { /// * `max_cache_upload_mebibytes`: Maximum size to upload in cache uploads /// * `experimental_low_pass_filter`: Whether to use the experimental low pass filter /// * `remote_output_paths`: How to express output paths to RE + /// * `remote_execution_resource_units`: The resources (eg. GPUs) to use for remote execution + /// * `remote_execution_dependencies`: Dependencies for remote execution for this platform #[starlark(as_type = StarlarkCommandExecutorConfig)] fn CommandExecutorConfig<'v>( #[starlark(require = named)] local_enabled: bool, @@ -106,6 +112,10 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { >, #[starlark(default = false, require = named)] experimental_low_pass_filter: bool, #[starlark(default = NoneOr::None, require = named)] remote_output_paths: NoneOr<&str>, + #[starlark(default = NoneOr::None, require = named)] + remote_execution_resource_units: NoneOr, + #[starlark(default=UnpackList::default(), require = named)] + remote_execution_dependencies: UnpackList>, ) -> anyhow::Result { let command_executor_config = { let remote_execution_max_input_files_mebibytes = @@ -114,6 +124,8 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { let remote_execution_queue_time_threshold_s = remote_execution_queue_time_threshold_s.into_option(); + let re_resource_units = remote_execution_resource_units.into_option(); + let max_cache_upload_mebibytes = max_cache_upload_mebibytes.into_option(); let re_properties = if remote_execution_properties.is_none() { @@ -127,14 +139,21 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { ) })?; - Some( - re_properties - .iter() - .map(|(k, v)| (k.to_str(), v.to_str())) - .collect(), - ) + Some(RePlatformFields { + properties: Arc::new( + re_properties + .iter() + .map(|(k, v)| (k.to_str(), v.to_str())) + .collect(), + ), + }) }; + let re_dependencies = remote_execution_dependencies + .into_iter() + .map(RemoteExecutorDependency::parse) + .collect::>>()?; + let re_use_case = if remote_execution_use_case.is_none() { None } else { @@ -176,6 +195,7 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { Some(RemoteExecutorOptions { re_max_input_files_bytes, re_max_queue_time_ms, + re_resource_units, }) } else { None @@ -231,7 +251,7 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { None => RemoteEnabledExecutor::Remote(remote), }; - Executor::RemoteEnabled { + Executor::RemoteEnabled(RemoteEnabledExecutorOptions { executor, re_properties: re_properties.context( CommandExecutorConfigErrors::MissingField( @@ -244,10 +264,11 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { cache_upload_behavior, remote_cache_enabled, remote_dep_file_cache_enabled, - } + dependencies: re_dependencies, + }) } (Some(local), None, true) => { - Executor::RemoteEnabled { + Executor::RemoteEnabled(RemoteEnabledExecutorOptions { executor: RemoteEnabledExecutor::Local(local), // FIXME: We need a migration flip the default for remote_cache_enabled to // remote_enabled first. @@ -258,7 +279,8 @@ pub fn register_command_executor_config(builder: &mut GlobalsBuilder) { cache_upload_behavior, remote_cache_enabled: true, remote_dep_file_cache_enabled, - } + dependencies: re_dependencies, + }) } // If remote cache is disabled, also disable the remote dep file cache as well (Some(local), None, false) => Executor::Local(local), diff --git a/app/buck2_build_api/src/interpreter/rule_defs/context.rs b/app/buck2_build_api/src/interpreter/rule_defs/context.rs index 70b58fd829b3c..56cacdf76faa5 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/context.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/context.rs @@ -9,12 +9,18 @@ use std::cell::RefCell; use std::cell::RefMut; +use std::convert::Infallible; use std::fmt; -use std::fmt::Display; use std::fmt::Formatter; use allocative::Allocative; +use anyhow::Context; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::provider::label::ProvidersName; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_error::BuckErrorContext; use buck2_execute::digest_config::DigestConfig; +use buck2_interpreter::late_binding_ty::AnalysisContextReprLate; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use buck2_util::late_binding::LateBinding; use derive_more::Display; @@ -26,6 +32,7 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::typing::Ty; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::structs::StructRef; @@ -43,49 +50,46 @@ use starlark::values::ValueTyped; use starlark::values::ValueTypedComplex; use crate::analysis::registry::AnalysisRegistry; +use crate::deferred::calculation::GET_PROMISED_ARTIFACT; use crate::interpreter::rule_defs::plugins::AnalysisPlugins; /// Functions to allow users to interact with the Actions registry. /// /// Accessed via `ctx.actions.` -#[derive( - ProvidesStaticType, - Debug, - Display, - Trace, - NoSerialize, - Allocative, - StarlarkDocs -)] -#[display(fmt = "")] +#[derive(ProvidesStaticType, Debug, Display, Trace, NoSerialize, Allocative)] +#[display("")] pub struct AnalysisActions<'v> { /// Use a RefCell/Option so when we are done with it, without obtaining exclusive access, /// we can take the internal state without having to clone it. pub state: RefCell>>, /// Copies from the ctx, so we can capture them for `dynamic`. - pub attributes: ValueOfUnchecked<'v, StructRef<'v>>, - pub plugins: ValueTypedComplex<'v, AnalysisPlugins<'v>>, + pub attributes: Option>>, + pub plugins: Option>>, /// Digest configuration to use when interpreting digests passed in analysis. pub digest_config: DigestConfig, } impl<'v> AnalysisActions<'v> { - pub fn state(&self) -> RefMut> { - RefMut::map(self.state.borrow_mut(), |x| { - x.as_mut().expect("state to be present during execution") - }) + pub fn state(&self) -> anyhow::Result>> { + let state = self + .state + .try_borrow_mut() + .internal_error_anyhow("AnalysisActions.state is already borrowed")?; + RefMut::filter_map(state, |x| x.as_mut()) + .ok() + .internal_error_anyhow("state to be present during execution") } pub async fn run_promises( &self, - dice: &DiceComputations, - eval: &mut Evaluator<'v, '_>, + dice: &mut DiceComputations<'_>, + eval: &mut Evaluator<'v, '_, '_>, description: String, ) -> anyhow::Result<()> { // We need to loop here because running the promises evaluates promise.map, which might produce more promises. // We keep going until there are no promises left. loop { - let promises = self.state().take_promises(); + let promises = self.state()?.take_promises(); if let Some(promises) = promises { promises .run_promises(dice, eval, description.clone()) @@ -95,10 +99,29 @@ impl<'v> AnalysisActions<'v> { } } - // Resolve promise artifacts after running the promises - let state = self.state(); - let short_path_assertions = &state.short_path_assertions; - state.resolve_artifacts(short_path_assertions)?; + self.assert_short_paths_and_resolve(dice).await?; + + Ok(()) + } + + // Called after `run_promises()` to assert short paths and resolve consumer's promise artifacts. + pub async fn assert_short_paths_and_resolve( + &self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result<()> { + let (short_path_assertions, consumer_analysis_artifacts) = { + let state = self.state()?; + ( + state.short_path_assertions.clone(), + state.consumer_analysis_artifacts(), + ) + }; + + for consumer_artifact in consumer_analysis_artifacts { + let artifact = (GET_PROMISED_ARTIFACT.get()?)(&consumer_artifact, dice).await?; + let short_path = short_path_assertions.get(consumer_artifact.id()).cloned(); + consumer_artifact.resolve(artifact.clone(), &short_path)?; + } Ok(()) } } @@ -112,10 +135,6 @@ impl<'v> StarlarkValue<'v> for AnalysisActions<'v> { (ANALYSIS_ACTIONS_METHODS_ANON_TARGET.get().unwrap())(builder); }) } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } impl<'v> AllocValue<'v> for AnalysisActions<'v> { @@ -124,36 +143,35 @@ impl<'v> AllocValue<'v> for AnalysisActions<'v> { } } +#[allow(dead_code)] // field `0` is never read struct RefAnalysisAction<'v>(&'v AnalysisActions<'v>); impl<'v> StarlarkTypeRepr for RefAnalysisAction<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { AnalysisActions::starlark_type_repr() } } impl<'v> UnpackValue<'v> for RefAnalysisAction<'v> { - fn unpack_value(value: Value<'v>) -> Option { - Some(RefAnalysisAction( - value.downcast_ref::().unwrap(), - )) + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(analysis_actions) = value.downcast_ref::() else { + return Ok(None); + }; + Ok(Some(RefAnalysisAction(analysis_actions))) } } -#[derive( - ProvidesStaticType, - Debug, - Trace, - NoSerialize, - Allocative, - StarlarkDocs -)] +#[derive(ProvidesStaticType, Debug, Trace, NoSerialize, Allocative)] pub struct AnalysisContext<'v> { - attrs: ValueOfUnchecked<'v, StructRef<'v>>, + attrs: Option>>, pub actions: ValueTyped<'v, AnalysisActions<'v>>, /// Only `None` when running a `dynamic_output` action from Bxl. label: Option>, - plugins: ValueTypedComplex<'v, AnalysisPlugins<'v>>, + plugins: Option>>, } impl<'v> Display for AnalysisContext<'v> { @@ -171,16 +189,14 @@ impl<'v> Display for AnalysisContext<'v> { impl<'v> AnalysisContext<'v> { /// The context that is provided to users' UDR implementation functions. Comprised of things like attribute values, actions, etc - pub fn new( + fn new( heap: &'v Heap, - attrs: Value<'v>, + attrs: Option>>, label: Option>, - plugins: ValueTypedComplex<'v, AnalysisPlugins<'v>>, + plugins: Option>>, registry: AnalysisRegistry<'v>, digest_config: DigestConfig, ) -> Self { - let attrs = ValueOfUnchecked::new_checked(attrs).unwrap(); - Self { attrs, actions: heap.alloc_typed(AnalysisActions { @@ -194,8 +210,26 @@ impl<'v> AnalysisContext<'v> { } } - pub(crate) fn assert_no_promises(&self) -> anyhow::Result<()> { - self.actions.state().assert_no_promises() + pub fn prepare( + heap: &'v Heap, + attrs: Option>>, + label: Option, + plugins: Option>>, + registry: AnalysisRegistry<'v>, + digest_config: DigestConfig, + ) -> ValueTyped<'v, AnalysisContext<'v>> { + let label = label.map(|label| { + heap.alloc_typed(StarlarkConfiguredProvidersLabel::new( + ConfiguredProvidersLabel::new(label, ProvidersName::Default), + )) + }); + + let analysis_context = Self::new(heap, attrs, label, plugins, registry, digest_config); + heap.alloc_typed(analysis_context) + } + + pub fn assert_no_promises(&self) -> anyhow::Result<()> { + self.actions.state()?.assert_no_promises() } /// Must take an `AnalysisContext` which has never had `take_state` called on it before. @@ -214,10 +248,6 @@ impl<'v> StarlarkValue<'v> for AnalysisContext<'v> { static RES: MethodsStatic = MethodsStatic::new(); RES.methods(analysis_context_methods) } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } impl<'v> AllocValue<'v> for AnalysisContext<'v> { @@ -229,16 +259,21 @@ impl<'v> AllocValue<'v> for AnalysisContext<'v> { struct RefAnalysisContext<'v>(&'v AnalysisContext<'v>); impl<'v> StarlarkTypeRepr for RefAnalysisContext<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { AnalysisContext::starlark_type_repr() } } impl<'v> UnpackValue<'v> for RefAnalysisContext<'v> { - fn unpack_value(value: Value<'v>) -> Option { - Some(RefAnalysisContext( - value.downcast_ref::().unwrap(), - )) + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(analysis_context) = value.downcast_ref::() else { + return Ok(None); + }; + Ok(Some(RefAnalysisContext(analysis_context))) } } @@ -256,8 +291,12 @@ fn analysis_context_methods(builder: &mut MethodsBuilder) { /// As an example, given a rule with the `attrs` argument of `{"foo": attrs.string()}`, this field will be /// a `struct` containing a field `foo` of type string. #[starlark(attribute)] - fn attrs<'v>(this: RefAnalysisContext) -> anyhow::Result>> { - Ok(this.0.attrs) + fn attrs<'v>( + this: RefAnalysisContext, + ) -> anyhow::Result>> { + this.0 + .attrs + .context("`attrs` is not available for `dynamic_output` or BXL") } /// Returns an `actions` value containing functions to define actual actions that are run. @@ -274,8 +313,8 @@ fn analysis_context_methods(builder: &mut MethodsBuilder) { #[starlark(attribute)] fn label<'v>( this: RefAnalysisContext, - ) -> anyhow::Result>> { - Ok(this.0.label) + ) -> anyhow::Result>> { + Ok(NoneOr::from_option(this.0.label)) } /// An opaque value that can be indexed with a plugin kind to get a list of the available plugin @@ -285,7 +324,9 @@ fn analysis_context_methods(builder: &mut MethodsBuilder) { fn plugins<'v>( this: RefAnalysisContext, ) -> anyhow::Result>> { - Ok(this.0.plugins) + this.0 + .plugins + .context("`plugins` is not available for `dynamic_output` or BXL") } } @@ -299,3 +340,7 @@ pub static ANALYSIS_ACTIONS_METHODS_ACTIONS: LateBinding = LateBinding::new("ANALYSIS_ACTIONS_METHODS_ANON_TARGET"); + +pub(crate) fn init_analysis_context_ty() { + AnalysisContextReprLate::init(AnalysisContext::starlark_type_repr()); +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/digest_config.rs b/app/buck2_build_api/src/interpreter/rule_defs/digest_config.rs index b79fb243d9c87..064912b9bc0b7 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/digest_config.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/digest_config.rs @@ -36,10 +36,9 @@ use starlark::values::Value; Trace, ProvidesStaticType, NoSerialize, - Allocative, - StarlarkDocs + Allocative )] -#[display(fmt = "{}", "self.digest_config")] +#[display("{}", self.digest_config)] pub struct StarlarkDigestConfig { #[freeze(identity)] pub digest_config: DigestConfig, diff --git a/app/buck2_build_api/src/interpreter/rule_defs/label_relative_path.rs b/app/buck2_build_api/src/interpreter/rule_defs/label_relative_path.rs index 777099b6f1400..0042d374be8e4 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/label_relative_path.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/label_relative_path.rs @@ -8,12 +8,18 @@ */ use buck2_interpreter::types::cell_path::StarlarkCellPath; +use starlark::values::type_repr::StarlarkTypeRepr; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; use crate::interpreter::rule_defs::cmd_args::CommandLineContext; impl CommandLineArgLike for StarlarkCellPath { + fn register_me(&self) { + command_line_arg_like_impl!(StarlarkCellPath::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, diff --git a/app/buck2_build_api/src/interpreter/rule_defs/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/mod.rs deleted file mode 100644 index 39d7b507c7230..0000000000000 --- a/app/buck2_build_api/src/interpreter/rule_defs/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use starlark::environment::GlobalsBuilder; - -use crate::interpreter::rule_defs::provider::registration::register_builtin_providers; - -pub mod artifact; -pub mod artifact_tagging; -pub mod cmd_args; -pub mod command_executor_config; -pub mod context; -pub mod digest_config; -pub mod label_relative_path; -pub mod plugins; -pub mod provider; -pub mod resolve_query_macro; -pub mod resolved_macro; -pub mod transitive_set; - -pub fn register_rule_defs(globals: &mut GlobalsBuilder) { - cmd_args::register_cmd_args(globals); - register_builtin_providers(globals); -} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/plugins.rs b/app/buck2_build_api/src/interpreter/rule_defs/plugins.rs index 0b2caa96483ea..995a30a629af7 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/plugins.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/plugins.rs @@ -17,7 +17,6 @@ use dupe::Dupe; use starlark::coerce::coerce; use starlark::coerce::CoerceKey; use starlark::starlark_complex_value; -use starlark::typing::Ty; use starlark::values::starlark_value; use starlark::values::Coerce; use starlark::values::Freeze; @@ -27,6 +26,7 @@ use starlark::values::ProvidesStaticType; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark_map::small_map::SmallMap; @@ -35,12 +35,7 @@ use starlark_map::small_map::SmallMap; Clone, Dupe, Debug, Display, Eq, PartialEq, Hash, Ord, PartialOrd, Allocative, Freeze, Trace )] #[repr(transparent)] -struct PluginKindWrapper( - #[freeze(identity)] - // SAFETY: `PluginKind` does not contain any starlark values - #[trace(unsafe_ignore)] - PluginKind, -); +struct PluginKindWrapper(#[freeze(identity)] PluginKind); // SAFETY: Trivial coercion is always correct unsafe impl Coerce for PluginKindWrapper {} @@ -66,41 +61,39 @@ impl Borrow for PluginKindWrapper { NoSerialize, Allocative )] -#[display(fmt = "")] +#[display("")] #[repr(transparent)] -pub struct AnalysisPluginsGen { +pub struct AnalysisPluginsGen { plugins: SmallMap, } starlark_complex_value!(pub AnalysisPlugins); -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum AnalysisPluginsError { #[error("The rule did not declare that it uses plugins of kind {0}")] PluginKindNotUsed(PluginKind), } #[starlark_value(type = "AnalysisPlugins")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for AnalysisPluginsGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for AnalysisPluginsGen where Self: ProvidesStaticType<'v>, { - fn at(&self, index: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, _heap: &'v Heap) -> starlark::Result> { let kind = (PLUGIN_KIND_FROM_VALUE.get()?)(index)?; match self.plugins.get(&kind) { Some(v) => Ok(v.to_value()), - None => Err(AnalysisPluginsError::PluginKindNotUsed(kind).into()), + None => Err(starlark::Error::new_other( + AnalysisPluginsError::PluginKindNotUsed(kind), + )), } } - fn is_in(&self, other: Value<'v>) -> anyhow::Result { + fn is_in(&self, other: Value<'v>) -> starlark::Result { let kind = (PLUGIN_KIND_FROM_VALUE.get()?)(other)?; Ok(self.plugins.contains_key(&kind)) } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } impl<'v> AnalysisPlugins<'v> { diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider.rs new file mode 100644 index 0000000000000..4e88a46bdc567 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider.rs @@ -0,0 +1,127 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Providers are the data returned from a rule, and are the only way that information from this +//! rule is available to rules that depend on it. Every rule must return at least the `DefaultInfo` +//! provider, but most will also return either `RunInfo` (because they are executable) or some +//! custom provider (because they are incorporated into something that is ultimately executable). +//! +//! Internal providers (those defined and used by buck itself) can be defined easily using the +//! #[internal_provider(creator_func)] macro. This will generate all the code needed for that +//! provider to be used in starlark and to be treated as a provider in the various rust utilities +//! we have for providers. +//! +//! For an internal provider like: +//! ```skip +//! #[internal_provider(create_my_prov)] +//! #[derive(Clone, Debug, Trace, Coerce)] +//! #[repr(transparent)] +//! pub struct MyProviderGen { +//! field1: V, +//! field2: V, +//! } +//! +//! #[starlark_module] +//! fn create_my_prov(globals: &mut GlobalsBuilder) { +//! fn NameDoesntMatter( +//! // It's not enforced that the args here match the fields, but it's generally the user expectation that they do. +//! field1: Value<'v>, +//! field2: Value<'v>, +//! ) -> MyProvider<'v> { +//! // Can do some arg validation or computation here, just need to construct the provider. +//! Ok(MyProvider { +//! field1, +//! field2 +//! }) +//! } +//! } +//! ``` +//! +//! This will generate a "ProviderCallable" starlark type named (in starlark) `MyProvider` that acts like +//! the instance returned by a `provider()` call in starlark (so can be used to construct instances of the +//! provider or used in places like `attrs.dep(required_providers=[MyProvider]))`. +//! +//! For provider instances, in starlark all of their fields will be accessible by the field name. +//! +//! In rust, a StarlarkValue can be converted to the provider like normal with `MyProvider::from_value()`. +//! Often internally we'd have the analysis result (`FrozenProviderCollection`) and want to get the +//! provider out of their so there's a convenience function for that: `MyProvider::from_providers(collect)`. + +use std::convert::Infallible; +// TODO(cjhopman): That last one would be more discoverable if we moved it onto the +// `FrozenProviderCollectionValue` itself so you could do `collection.get::()`. +use std::fmt::Debug; +use std::sync::Arc; + +use buck2_core::provider::id::ProviderId; +use starlark::any::ProvidesStaticType; +use starlark::typing::Ty; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::StarlarkValue; +use starlark::values::UnpackValue; +use starlark::values::Value; + +use crate::interpreter::rule_defs::provider::builtin::default_info::DefaultInfo; +use crate::interpreter::rule_defs::provider::builtin::default_info::DefaultInfoCallable; +use crate::interpreter::rule_defs::provider::builtin::default_info::FrozenDefaultInfo; +use crate::interpreter::rule_defs::provider::collection::ProviderCollection; +use crate::interpreter::rule_defs::provider::ty::abstract_provider::AbstractProvider; + +pub mod builtin; +pub mod callable; +pub mod collection; +pub mod dependency; +pub(crate) mod doc; +pub mod execution_platform; +pub mod registration; +pub mod test_provider; +pub mod ty; +pub(crate) mod user; + +/// Implemented by providers (builtin or user defined). +pub trait ProviderLike<'v>: Debug { + /// The ID. Guaranteed to be set on the `ProviderCallable` before constructing this object + fn id(&self) -> &Arc; + /// Returns a list of all the keys and values. + // TODO(cjhopman): I'd rather return an iterator. I couldn't get that to work, though. + fn items(&self) -> Vec<(&str, Value<'v>)>; +} + +/// Implemented by frozen builtin providers. +pub trait FrozenBuiltinProviderLike: ProviderLike<'static> + StarlarkValue<'static> { + fn builtin_provider_id() -> &'static Arc; +} + +unsafe impl<'v> ProvidesStaticType<'v> for &'v dyn ProviderLike<'v> { + type StaticType = &'static dyn ProviderLike<'static>; +} + +pub struct ValueAsProviderLike<'v>(pub(crate) &'v dyn ProviderLike<'v>); + +impl<'v> StarlarkTypeRepr for ValueAsProviderLike<'v> { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + AbstractProvider::starlark_type_repr() + } +} + +impl<'v> ValueAsProviderLike<'v> { + pub(crate) fn unpack(value: Value<'v>) -> Option { + Some(ValueAsProviderLike(value.request_value()?)) + } +} + +impl<'v> UnpackValue<'v> for ValueAsProviderLike<'v> { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(ValueAsProviderLike::unpack(value)) + } +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin.rs new file mode 100644 index 0000000000000..a304067fe1f7f --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Builtin providers. + +pub mod configuration_info; +pub mod constraint_setting_info; +pub mod constraint_value_info; +pub mod default_info; +pub mod execution_platform_info; +pub mod execution_platform_registration_info; +pub mod external_runner_test_info; +pub mod install_info; +pub mod local_resource_info; +pub mod platform_info; +pub mod run_info; +pub mod template_placeholder_info; +pub(crate) mod ty; +pub mod validation_info; +pub mod worker_info; +pub mod worker_run_info; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/configuration_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/configuration_info.rs index 53d63945441d8..43e213688310b 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/configuration_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/configuration_info.rs @@ -12,7 +12,7 @@ use std::fmt::Debug; use allocative::Allocative; use buck2_build_api_derive::internal_provider; -use buck2_common::legacy_configs::parse_config_section_and_key; +use buck2_common::legacy_configs::configs::parse_config_section_and_key; use buck2_core::configuration::config_setting::ConfigSettingData; use buck2_core::configuration::constraints::ConstraintKey; use buck2_core::configuration::constraints::ConstraintValue; @@ -25,19 +25,22 @@ use starlark::collections::SmallMap; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::values::dict::AllocDict; -use starlark::values::dict::Dict; -use starlark::values::dict::DictOf; use starlark::values::dict::DictRef; -use starlark::values::type_repr::DictType; +use starlark::values::dict::DictType; +use starlark::values::dict::UnpackDictEntries; use starlark::values::Freeze; use starlark::values::Heap; use starlark::values::Trace; +use starlark::values::UnpackAndDiscard; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; -use thiserror::Error; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; use crate::interpreter::rule_defs::provider::builtin::constraint_setting_info::ConstraintSettingInfo; use crate::interpreter::rule_defs::provider::builtin::constraint_value_info::ConstraintValueInfo; +use crate::interpreter::rule_defs::provider::builtin::constraint_value_info::FrozenConstraintValueInfo; /// Provider that signals that a rule contains configuration info. This is used both as part of /// defining configurations (`platform()`, `constraint_value()`) and defining whether a target "matches" @@ -45,17 +48,16 @@ use crate::interpreter::rule_defs::provider::builtin::constraint_value_info::Con #[internal_provider(configuration_info_creator)] #[derive(Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct ConfigurationInfoGen { - #[provider(field_type = DictType>)] - constraints: V, - #[provider(field_type = DictType)] - values: V, +pub struct ConfigurationInfoGen { + constraints: + ValueOfUncheckedGeneric>, + values: ValueOfUncheckedGeneric>, } impl<'v, V: ValueLike<'v>> ConfigurationInfoGen { pub fn to_config_setting_data(&self) -> ConfigSettingData { - let constraints = - DictRef::from_value(self.constraints.to_value()).expect("type checked on construction"); + let constraints = DictRef::from_value(self.constraints.get().to_value()) + .expect("type checked on construction"); let mut converted_constraints = BTreeMap::new(); for (k, v) in constraints.iter() { let key_target = StarlarkTargetLabel::from_value(k.to_value()) @@ -68,8 +70,8 @@ impl<'v, V: ValueLike<'v>> ConfigurationInfoGen { ); } - let values = - DictRef::from_value(self.values.to_value()).expect("type checked on construction"); + let values = DictRef::from_value(self.values.get().to_value()) + .expect("type checked on construction"); let mut converted_values = BTreeMap::new(); for (k, v) in values.iter() { let key_config = k.to_value().to_str(); @@ -117,13 +119,13 @@ impl<'v> ConfigurationInfo<'v> { } ConfigurationInfoGen { - constraints: heap.alloc(Dict::new(constraints)), - values: heap.alloc(AllocDict::EMPTY), + constraints: ValueOfUnchecked::new(heap.alloc(constraints)), + values: heap.alloc_typed_unchecked(AllocDict([("", ""); 0])).cast(), } } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum ConfigurationInfoError { #[error("key `{0}` in constraints dict does not match constraint value `{1}`")] ConstraintsKeyValueMismatch(String, String), @@ -135,16 +137,18 @@ enum ConfigurationInfoError { fn configuration_info_creator(globals: &mut GlobalsBuilder) { #[starlark(as_type = FrozenConfigurationInfo)] fn ConfigurationInfo<'v>( - #[starlark(require = named)] constraints: DictOf< - 'v, + #[starlark(require = named)] constraints: UnpackDictEntries< ValueOf<'v, &'v StarlarkTargetLabel>, ValueOf<'v, &'v ConstraintValueInfo<'v>>, >, - #[starlark(require = named)] values: DictOf<'v, &'v str, &'v str>, - eval: &mut Evaluator<'v, '_>, + #[starlark(require = named)] values: ValueOf< + 'v, + UnpackDictEntries<&'v str, UnpackAndDiscard<&'v str>>, + >, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let mut new_constraints = SmallMap::new(); - for (constraint_setting, constraint_value) in constraints.collect_entries() { + for (constraint_setting, constraint_value) in constraints.entries { let constraint_setting_hashed = constraint_setting .value .get_hashed() @@ -162,13 +166,13 @@ fn configuration_info_creator(globals: &mut GlobalsBuilder) { new_constraints.insert_hashed(constraint_setting_hashed, constraint_value.value); assert!(prev.is_none()); } - for (k, _) in values.collect_entries() { + for (k, _) in &values.typed.entries { // Validate the config section and key can be parsed correctly parse_config_section_and_key(k, None)?; } Ok(ConfigurationInfo { - constraints: eval.heap().alloc(Dict::new(new_constraints)), - values: *values, + constraints: ValueOfUnchecked::new(eval.heap().alloc(new_constraints)), + values: values.as_unchecked().cast(), }) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_setting_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_setting_info.rs index 9e9b1166c64a9..10ca2615e7629 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_setting_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_setting_info.rs @@ -23,8 +23,10 @@ use starlark::coerce::Coerce; use starlark::environment::GlobalsBuilder; use starlark::values::Freeze; use starlark::values::Trace; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; +use starlark::values::ValueOfUncheckedGeneric; use starlark::values::ValueTyped; /// Provider that signals that a target can be used as a constraint key. This is the only provider @@ -32,20 +34,21 @@ use starlark::values::ValueTyped; #[internal_provider(constraint_info_creator)] #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(transparent)] -pub(crate) struct ConstraintSettingInfoGen { - #[provider(field_type = StarlarkTargetLabel)] - label: V, +pub(crate) struct ConstraintSettingInfoGen { + label: ValueOfUncheckedGeneric, } impl<'v, V: ValueLike<'v>> ConstraintSettingInfoGen { pub(crate) fn label(&self) -> ValueTyped<'v, StarlarkTargetLabel> { - ValueTyped::new(self.label.to_value()).expect("validated at construction") + ValueTyped::new_err(self.label.get().to_value()).expect("validated at construction") } } impl<'v> ConstraintSettingInfo<'v> { pub(crate) fn new(label: ValueOf<'v, &'v StarlarkTargetLabel>) -> ConstraintSettingInfo<'v> { - ConstraintSettingInfoGen { label: label.value } + ConstraintSettingInfoGen { + label: label.as_unchecked().cast(), + } } } @@ -55,6 +58,8 @@ fn constraint_info_creator(globals: &mut GlobalsBuilder) { fn ConstraintSettingInfo<'v>( #[starlark(require = named)] label: ValueOf<'v, &'v StarlarkTargetLabel>, ) -> anyhow::Result> { - Ok(ConstraintSettingInfo { label: *label }) + Ok(ConstraintSettingInfo { + label: label.as_unchecked().cast(), + }) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_value_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_value_info.rs index 991cb6ecb1900..65fe2273f3337 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_value_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/constraint_value_info.rs @@ -20,31 +20,33 @@ use starlark::environment::GlobalsBuilder; use starlark::values::Freeze; use starlark::values::Trace; use starlark::values::UnpackValue; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; use starlark::values::ValueTyped; use crate::interpreter::rule_defs::provider::builtin::constraint_setting_info::ConstraintSettingInfo; +use crate::interpreter::rule_defs::provider::builtin::constraint_setting_info::FrozenConstraintSettingInfo; /// Provider that signals that a target can be used as a constraint key. This is the only provider /// returned by a `constraint_value()` target. #[internal_provider(constraint_value_info_creator)] #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub(crate) struct ConstraintValueInfoGen { - #[provider(field_type = ConstraintSettingInfo<'v>)] - setting: V, - #[provider(field_type = StarlarkTargetLabel)] - label: V, +pub(crate) struct ConstraintValueInfoGen { + setting: ValueOfUncheckedGeneric, + label: ValueOfUncheckedGeneric, } impl<'v, V: ValueLike<'v>> ConstraintValueInfoGen { pub(crate) fn setting(&self) -> ValueOf<'v, &'v ConstraintSettingInfo<'v>> { - ValueOf::unpack_value(self.setting.to_value()).expect("validated at construction") + ValueOf::unpack_value_err(self.setting.get().to_value()).expect("validated at construction") } pub(crate) fn label(&self) -> ValueTyped<'v, StarlarkTargetLabel> { - ValueTyped::new(self.label.to_value()).expect("validated at construction") + ValueTyped::new_err(self.label.get().to_value()).expect("validated at construction") } } @@ -54,8 +56,8 @@ impl<'v> ConstraintValueInfo<'v> { label: ValueOf<'v, &'v StarlarkTargetLabel>, ) -> ConstraintValueInfo<'v> { ConstraintValueInfoGen { - setting: setting.value, - label: label.value, + setting: ValueOfUnchecked::new(setting.value), + label: label.as_unchecked().cast(), } } } @@ -68,8 +70,8 @@ fn constraint_value_info_creator(globals: &mut GlobalsBuilder) { #[starlark(require = named)] label: ValueOf<'v, &'v StarlarkTargetLabel>, ) -> anyhow::Result> { Ok(ConstraintValueInfo { - setting: *setting, - label: *label, + setting: ValueOfUnchecked::new(setting.value), + label: label.as_unchecked().cast(), }) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/default_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/default_info.rs index b37f5c4e65d01..a36c56381e710 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/default_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/default_info.rs @@ -8,11 +8,13 @@ */ use std::fmt::Debug; +use std::iter; use std::ptr; use allocative::Allocative; use anyhow::Context; use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_artifact::artifact::artifact_type::OutputArtifact; use buck2_build_api_derive::internal_provider; use dupe::Dupe; use starlark::any::ProvidesStaticType; @@ -20,30 +22,40 @@ use starlark::coerce::Coerce; use starlark::collections::SmallMap; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; -use starlark::values::dict::Dict; +use starlark::values::dict::AllocDict; +use starlark::values::dict::DictType; use starlark::values::dict::FrozenDictRef; +use starlark::values::dict::UnpackDictEntries; use starlark::values::list::AllocList; use starlark::values::list::ListRef; -use starlark::values::none::NoneType; -use starlark::values::type_repr::DictType; +use starlark::values::list::ListType; +use starlark::values::list::UnpackList; +use starlark::values::none::NoneOr; use starlark::values::Freeze; +use starlark::values::FrozenHeap; use starlark::values::FrozenRef; use starlark::values::FrozenValue; +use starlark::values::FrozenValueOfUnchecked; use starlark::values::FrozenValueTyped; +use starlark::values::Heap; +use starlark::values::StringValue; use starlark::values::Trace; +use starlark::values::UnpackAndDiscard; use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueError; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; -use thiserror::Error; +use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::StarlarkResultExt; use crate::artifact_groups::ArtifactGroup; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; -use crate::interpreter::rule_defs::artifact::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; +use crate::interpreter::rule_defs::artifact_tagging::ArtifactTag; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; -use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; -use crate::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; +use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use crate::interpreter::rule_defs::provider::ProviderCollection; @@ -115,30 +127,29 @@ use crate::interpreter::rule_defs::provider::ProviderCollection; #[derive(Clone, Debug, Freeze, Trace, Coerce, ProvidesStaticType, Allocative)] #[freeze(validator = validate_default_info, bounds = "V: ValueLike<'freeze>")] #[repr(C)] -pub struct DefaultInfoGen { +pub struct DefaultInfoGen { /// A mapping of names to `ProviderCollection`s. The keys are used when resolving the /// `ProviderName` portion of a `ProvidersLabel` in order to access the providers for a /// subtarget, such as when doing `buck2 build cell//foo:bar[baz]`. Just like any /// `ProviderCollection`, this collection must include at least a `DefaultInfo` provider. The /// subtargets can have their own subtargets as well, which can be accessed by chaining them, /// e.g.: `buck2 build cell//foo:bar[baz][qux]`. - #[provider(field_type = DictType>)] - sub_targets: V, + sub_targets: ValueOfUncheckedGeneric>, /// A list of `Artifact`s that are built by default if this rule is requested - /// explicitly, or depended on as as a "source". - #[provider(field_type = Vec)] - default_outputs: V, + /// explicitly (via CLI or `$(location)` etc), or depended on as as a "source" + /// (i.e., `attrs.source()`). + default_outputs: ValueOfUncheckedGeneric>>, /// A list of `ArtifactTraversable`. The underlying `Artifact`s they define will - /// be built by default if this rule is requested, but _not_ when it's depended - /// on as as a "source". `ArtifactTraversable` can be an `Artifact` (which yields - /// itself), or `cmd_args`, which expand to all their inputs. - #[provider(field_type = Vec)] - other_outputs: V, + /// be built by default if this rule is requested (via CLI or `$(location)` etc), + /// but _not_ when it's depended on as as a "source" (i.e., `attrs.source()`). + /// `ArtifactTraversable` can be an `Artifact` (which yields itself), or + /// `cmd_args`, which expand to all their inputs. + other_outputs: ValueOfUncheckedGeneric>>, } fn validate_default_info(info: &FrozenDefaultInfo) -> anyhow::Result<()> { // Check length of default outputs - let default_output_list = ListRef::from_value(info.default_outputs.to_value()) + let default_output_list = ListRef::from_value(info.default_outputs.get().to_value()) .expect("should be a list from constructor"); if default_output_list.len() > 1 { tracing::info!("DefaultInfo.default_output should only have a maximum of 1 item."); @@ -160,16 +171,47 @@ fn validate_default_info(info: &FrozenDefaultInfo) -> anyhow::Result<()> { Ok(()) } +impl<'v> DefaultInfo<'v> { + pub fn empty(heap: &'v Heap) -> Self { + let sub_targets = ValueOfUnchecked::>::new(heap.alloc(AllocDict::EMPTY)); + let default_outputs = ValueOfUnchecked::>::new(heap.alloc(AllocList::EMPTY)); + let other_outputs = ValueOfUnchecked::>::new(heap.alloc(AllocList::EMPTY)); + DefaultInfo { + sub_targets, + default_outputs, + other_outputs, + } + } +} + impl FrozenDefaultInfo { + pub(crate) fn testing_empty(heap: &FrozenHeap) -> FrozenValueTyped<'static, FrozenDefaultInfo> { + let sub_targets = heap + .alloc_typed_unchecked(AllocDict( + iter::empty::<(String, FrozenProviderCollection)>(), + )) + .cast(); + let default_outputs = + FrozenValueOfUnchecked::>::new(heap.alloc(AllocList::EMPTY)); + let other_outputs = + FrozenValueOfUnchecked::>::new(heap.alloc(AllocList::EMPTY)); + FrozenValueTyped::new_err(heap.alloc(FrozenDefaultInfo { + sub_targets, + default_outputs, + other_outputs, + })) + .unwrap() + } + fn get_sub_target_providers_impl( &self, name: &str, ) -> anyhow::Result>> { - FrozenDictRef::from_frozen_value(self.sub_targets) + FrozenDictRef::from_frozen_value(self.sub_targets.get()) .context("sub_targets should be a dict-like object")? .get_str(name) .map(|v| { - FrozenValueTyped::new(v).context( + FrozenValueTyped::new_err(v).context( "Values inside of a frozen provider should be frozen provider collection", ) }) @@ -186,7 +228,7 @@ impl FrozenDefaultInfo { fn default_outputs_impl( &self, ) -> anyhow::Result> + '_> { - let list = ListRef::from_frozen_value(self.default_outputs) + let list = ListRef::from_frozen_value(self.default_outputs.get()) .context("Should be list of artifacts")?; Ok(list.iter().map(|v| { @@ -198,6 +240,7 @@ impl FrozenDefaultInfo { } else { // This code path is for StarlarkPromiseArtifact. We have to create a `StarlarkArtifact` object here. let artifact_like = ValueAsArtifactLike::unpack_value(frozen_value.to_value()) + .into_anyhow_result()? .context("Should be list of artifacts")?; artifact_like.0.get_bound_starlark_artifact()? }, @@ -213,7 +256,7 @@ impl FrozenDefaultInfo { } pub fn default_outputs_raw(&self) -> FrozenValue { - self.default_outputs + self.default_outputs.get() } fn sub_targets_impl( @@ -221,7 +264,7 @@ impl FrozenDefaultInfo { ) -> anyhow::Result< impl Iterator)>> + '_, > { - let sub_targets = FrozenDictRef::from_frozen_value(self.sub_targets) + let sub_targets = FrozenDictRef::from_frozen_value(self.sub_targets.get()) .context("sub_targets should be a dict-like object")?; Ok(sub_targets.iter().map(|(k, v)| { @@ -245,60 +288,63 @@ impl FrozenDefaultInfo { } pub fn sub_targets_raw(&self) -> FrozenValue { - self.sub_targets + self.sub_targets.get() } pub fn for_each_default_output_artifact_only( &self, - processor: &mut dyn FnMut(Artifact) -> anyhow::Result<()>, + processor: &mut dyn FnMut(Artifact), ) -> anyhow::Result<()> { - self.for_each_in_list(self.default_outputs, |value| { + self.for_each_in_list(self.default_outputs.get(), |value| { processor( - ValueAsArtifactLike::unpack_value(value) - .ok_or_else(|| anyhow::anyhow!("not an artifact"))? + ValueAsArtifactLike::unpack_value_err(value)? .0 .get_bound_artifact()?, - ) + ); + Ok(()) }) } pub fn for_each_default_output_other_artifacts_only( &self, - processor: &mut dyn FnMut(ArtifactGroup) -> anyhow::Result<()>, + processor: &mut dyn FnMut(ArtifactGroup), ) -> anyhow::Result<()> { - self.for_each_in_list(self.default_outputs, |value| { - let others = ValueAsArtifactLike::unpack_value(value) - .ok_or_else(|| anyhow::anyhow!("not an artifact"))? + self.for_each_in_list(self.default_outputs.get(), |value| { + let others = ValueAsArtifactLike::unpack_value_err(value)? .0 .get_associated_artifacts(); others .iter() .flat_map(|v| v.iter()) - .for_each(|other| processor(other.dupe()).unwrap()); + .for_each(|other| processor(other.dupe())); Ok(()) }) } - // TODO(marwhal): We can remove this once we migrate all other outputs to be handled with Artifacts directly pub fn for_each_other_output( &self, - processor: &mut dyn FnMut(ArtifactGroup) -> anyhow::Result<()>, + processor: &mut dyn FnMut(ArtifactGroup), ) -> anyhow::Result<()> { - self.for_each_in_list(self.other_outputs, |value| { - value - .as_artifact_traversable() - .with_context(|| format!("Expected artifact traversable, got: {:?}", value))? - .traverse(processor) + struct Visitor<'x>(&'x mut dyn FnMut(ArtifactGroup)); + + impl CommandLineArtifactVisitor for Visitor<'_> { + fn visit_input(&mut self, input: ArtifactGroup, _: Option<&ArtifactTag>) { + (self.0)(input); + } + + fn visit_output(&mut self, _artifact: OutputArtifact, _tag: Option<&ArtifactTag>) {} + } + + self.for_each_in_list(self.other_outputs.get(), |value| { + let arg_like = ValueAsCommandLineLike::unpack_value_err(value)?.0; + arg_like.visit_artifacts(&mut Visitor(processor))?; + Ok(()) }) } - pub fn for_each_output( - &self, - processor: &mut dyn FnMut(ArtifactGroup) -> anyhow::Result<()>, - ) -> anyhow::Result<()> { + pub fn for_each_output(&self, processor: &mut dyn FnMut(ArtifactGroup)) -> anyhow::Result<()> { self.for_each_default_output_artifact_only(&mut |a| processor(ArtifactGroup::Artifact(a)))?; self.for_each_default_output_other_artifacts_only(processor)?; - // TODO(marwhal): We can remove this once we migrate all other outputs to be handled with Artifacts directly self.for_each_other_output(processor) } @@ -325,59 +371,7 @@ impl PartialEq for FrozenDefaultInfo { } } -trait ArtifactTraversable { - fn traverse( - &self, - processor: &mut dyn FnMut(ArtifactGroup) -> anyhow::Result<()>, - ) -> anyhow::Result<()>; -} - -// TODO: This is a hack. We need a way to express "the inputs of that other thing", but at the -// moment we don't have one, so we allow adding a command line (which is often the input container -// we care about) as an "other" output on DefaultInfo. We could use a better abstraction for this. -impl ArtifactTraversable for &dyn CommandLineArgLike { - fn traverse( - &self, - processor: &mut dyn FnMut(ArtifactGroup) -> anyhow::Result<()>, - ) -> anyhow::Result<()> { - let mut acc = SimpleCommandLineArtifactVisitor::new(); - CommandLineArgLike::visit_artifacts(*self, &mut acc)?; - for input in acc.inputs { - processor(input)?; - } - Ok(()) - } -} - -impl ArtifactTraversable for &dyn StarlarkArtifactLike { - fn traverse( - &self, - processor: &mut dyn FnMut(ArtifactGroup) -> anyhow::Result<()>, - ) -> anyhow::Result<()> { - processor(ArtifactGroup::Artifact(self.get_bound_artifact()?))?; - Ok(()) - } -} - -trait ValueAsArtifactTraversable<'v> { - fn as_artifact_traversable(&self) -> Option>; -} - -impl<'v, V: ValueLike<'v>> ValueAsArtifactTraversable<'v> for V { - fn as_artifact_traversable(&self) -> Option> { - if let Some(artifact) = ValueAsArtifactLike::unpack_value(self.to_value()) { - return Some(Box::new(artifact.0)); - } - - if let Some(cli) = self.to_value().as_command_line() { - return Some(Box::new(cli)); - } - - None - } -} - -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum DefaultOutputError { #[error("Cannot specify both `default_output` and `default_outputs`.")] ConflictingArguments, @@ -387,91 +381,65 @@ enum DefaultOutputError { fn default_info_creator(builder: &mut GlobalsBuilder) { #[starlark(as_type = FrozenDefaultInfo)] fn DefaultInfo<'v>( - #[starlark(default = NoneType)] default_output: Value<'v>, - #[starlark(default = NoneType)] default_outputs: Value<'v>, - #[starlark(default = AllocList::EMPTY)] other_outputs: Value<'v>, - #[starlark(default = SmallMap::new())] sub_targets: SmallMap>, - eval: &mut Evaluator<'v, '_>, + // TODO(nga): parameters must be named only. + #[starlark(default = NoneOr::None)] default_output: NoneOr< + ValueOf<'v, ValueAsArtifactLike<'v>>, + >, + #[starlark(default = NoneOr::None)] default_outputs: NoneOr< + ValueOf<'v, UnpackList>>>, + >, + #[starlark(default = ValueOf { value: FrozenValue::new_empty_list().to_value(), typed: UnpackList::default()})] + other_outputs: ValueOf< + 'v, + UnpackList>>, + >, + #[starlark(default = UnpackDictEntries::default())] sub_targets: UnpackDictEntries< + StringValue<'v>, + Value<'v>, + >, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let heap = eval.heap(); - let default_info_creator = || { - let default_outputs = heap.alloc(AllocList::EMPTY); - let other_outputs = heap.alloc(AllocList::EMPTY); - let sub_targets = heap.alloc(Dict::default()); - heap.alloc(DefaultInfo { - sub_targets, - default_outputs, - other_outputs, - }) - }; // support both list and singular options for now until we migrate all the rules. - let valid_default_outputs = if !default_outputs.is_none() { - match ListRef::from_value(default_outputs) { - Some(list) => { - if !default_output.is_none() { - return Err(anyhow::anyhow!(DefaultOutputError::ConflictingArguments)); - } - - if list - .iter() - .all(|v| ValueAsArtifactLike::unpack_value(v).is_some()) - { - default_outputs - } else { - return Err(anyhow::anyhow!(ValueError::IncorrectParameterTypeNamed( - "default_outputs".to_owned() - ))); - } + let valid_default_outputs: ValueOfUnchecked> = + match (default_outputs.into_option(), default_output.into_option()) { + (Some(list), None) => list.as_unchecked().cast(), + (None, Some(default_output)) => { + // handle where we didn't specify `default_outputs`, which means we should use the new + // `default_output`. + eval.heap() + .alloc_typed_unchecked(AllocList([default_output.as_unchecked()])) + .cast() } - None => { - return Err(anyhow::anyhow!(ValueError::IncorrectParameterTypeNamed( - "default_outputs".to_owned() - ))); + (None, None) => { + ValueOfUnchecked::>::new(eval.heap().alloc(AllocList::EMPTY)) } - } - } else { - // handle where we didn't specify `default_outputs`, which means we should use the new - // `default_output`. - if default_output.is_none() { - eval.heap().alloc(AllocList::EMPTY) - } else if ValueAsArtifactLike::unpack_value(default_output).is_some() { - eval.heap().alloc(AllocList([default_output])) - } else { - return Err(anyhow::anyhow!(ValueError::IncorrectParameterTypeNamed( - "default_output".to_owned() - ))); - } - }; - - let valid_other_outputs = match ListRef::from_value(other_outputs) { - Some(list) => { - if list.iter().all(|v| v.as_artifact_traversable().is_some()) { - Ok(other_outputs) - } else { - Err(()) + (Some(_), Some(_)) => { + return Err(DefaultOutputError::ConflictingArguments.into()); } - } - None => Err(()), - } - .map_err(|_| ValueError::IncorrectParameterTypeNamed("other_outputs".to_owned()))?; + }; let valid_sub_targets = sub_targets + .entries .into_iter() .map(|(k, v)| { - let as_provider_collection = - ProviderCollection::try_from_value_with_default_info(v, default_info_creator)?; + let as_provider_collection = ProviderCollection::try_from_value_subtarget(v, heap)?; Ok(( - heap.alloc_str(&k).get_hashed_value(), - heap.alloc(as_provider_collection), + k, + ValueOfUnchecked::::new( + heap.alloc(as_provider_collection), + ), )) }) - .collect::, Value<'v>>>>()?; + .collect::, _)>>>()?; Ok(DefaultInfo { default_outputs: valid_default_outputs, - other_outputs: valid_other_outputs, - sub_targets: heap.alloc(Dict::new(valid_sub_targets)), + other_outputs: other_outputs.as_unchecked().cast(), + sub_targets: heap + .alloc_typed_unchecked(AllocDict(valid_sub_targets)) + .cast(), }) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_info.rs index 546d6c9eb9a40..670064e4ee444 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_info.rs @@ -13,7 +13,7 @@ use allocative::Allocative; use buck2_build_api_derive::internal_provider; use buck2_core::configuration::data::ConfigurationData; use buck2_core::execution_types::execution::ExecutionPlatform; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use dupe::Dupe; use starlark::any::ProvidesStaticType; @@ -21,17 +21,20 @@ use starlark::coerce::Coerce; use starlark::environment::GlobalsBuilder; use starlark::values::Freeze; use starlark::values::Trace; -use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; -use thiserror::Error; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::values::ValueTyped; +use starlark::values::ValueTypedComplex; +use starlark::StarlarkResultExt; use crate::interpreter::rule_defs::command_executor_config::StarlarkCommandExecutorConfig; use crate::interpreter::rule_defs::provider::builtin::configuration_info::ConfigurationInfo; +use crate::interpreter::rule_defs::provider::builtin::configuration_info::FrozenConfigurationInfo; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum ExecutionPlatformProviderErrors { - #[error("expected a label, got `{0}` (type `{1}`)")] - ExpectedLabel(String, String), #[error("expected a ConfigurationInfo, got `{0}` (type `{1}`)")] ExpectedConfigurationInfo(String, String), #[error("expected a CommandExecutorConfig, got `{0}` (type `{1}`)")] @@ -42,63 +45,61 @@ enum ExecutionPlatformProviderErrors { #[internal_provider(info_creator)] #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct ExecutionPlatformInfoGen { +pub struct ExecutionPlatformInfoGen { /// label of the defining rule, used in informative messages - #[provider(field_type = StarlarkTargetLabel)] - label: V, + label: ValueOfUncheckedGeneric, /// The configuration of the execution platform - #[provider(field_type = ConfigurationInfo<'v>)] - configuration: V, + configuration: ValueOfUncheckedGeneric, /// The executor config - #[provider(field_type = StarlarkCommandExecutorConfig)] - executor_config: V, + executor_config: ValueOfUncheckedGeneric, } impl<'v, V: ValueLike<'v>> ExecutionPlatformInfoGen { pub fn to_execution_platform(&self) -> anyhow::Result { - let target = StarlarkTargetLabel::from_value(self.label.to_value()) - .ok_or_else(|| { - ExecutionPlatformProviderErrors::ExpectedLabel( - self.label.to_value().to_repr(), - self.label.to_value().get_type().to_owned(), - ) - })? - .label() - .dupe(); - let cfg = ConfigurationInfo::from_value(self.configuration.to_value()) + let target = self + .label + .cast::<&StarlarkTargetLabel>() + .unpack() + .into_anyhow_result()? + .label(); + let cfg = ConfigurationInfo::from_value(self.configuration.get().to_value()) .ok_or_else(|| { ExecutionPlatformProviderErrors::ExpectedConfigurationInfo( - self.configuration.to_value().to_repr(), - self.configuration.to_value().get_type().to_owned(), + self.configuration.to_value().get().to_repr(), + self.configuration.to_value().get().get_type().to_owned(), ) })? .to_configuration_data()?; - let cfg = ConfigurationData::from_platform(TargetLabel::to_string(&target), cfg)?; + let cfg = ConfigurationData::from_platform(TargetLabel::to_string(target), cfg)?; let executor_config = - StarlarkCommandExecutorConfig::from_value(self.executor_config.to_value()) + StarlarkCommandExecutorConfig::from_value(self.executor_config.get().to_value()) .ok_or_else(|| { ExecutionPlatformProviderErrors::ExpectedCommandExecutorConfig( - self.configuration.to_value().to_repr(), - self.configuration.to_value().get_type().to_owned(), + self.configuration.get().to_value().to_repr(), + self.configuration.get().to_value().get_type().to_owned(), ) })? .0 .dupe(); - Ok(ExecutionPlatform::platform(target, cfg, executor_config)) + Ok(ExecutionPlatform::platform( + target.dupe(), + cfg, + executor_config, + )) } } #[starlark_module] fn info_creator(globals: &mut GlobalsBuilder) { fn ExecutionPlatformInfo<'v>( - #[starlark(require = named)] label: Value<'v>, - #[starlark(require = named)] configuration: Value<'v>, - #[starlark(require = named)] executor_config: Value<'v>, + #[starlark(require = named)] label: ValueTyped<'v, StarlarkTargetLabel>, + #[starlark(require = named)] configuration: ValueTypedComplex<'v, ConfigurationInfo<'v>>, + #[starlark(require = named)] executor_config: ValueTyped<'v, StarlarkCommandExecutorConfig>, ) -> anyhow::Result> { let info = ExecutionPlatformInfo { - label, - configuration, - executor_config, + label: label.to_value_of_unchecked(), + configuration: ValueOfUnchecked::new(configuration.to_value()), + executor_config: executor_config.to_value_of_unchecked(), }; // This checks that the values are valid. info.to_execution_platform()?; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_registration_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_registration_info.rs index bff0de4c7bc26..3bcbe2d8105ee 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_registration_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/execution_platform_registration_info.rs @@ -16,16 +16,23 @@ use starlark::any::ProvidesStaticType; use starlark::coerce::Coerce; use starlark::environment::GlobalsBuilder; use starlark::values::list::ListRef; +use starlark::values::list::ListType; +use starlark::values::none::NoneOr; use starlark::values::Freeze; use starlark::values::FrozenRef; +use starlark::values::FrozenValue; use starlark::values::Trace; use starlark::values::Value; -use thiserror::Error; +use starlark::values::ValueLifetimeless; +use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::values::ValueTypedComplex; use crate::interpreter::rule_defs::provider::builtin::execution_platform_info::ExecutionPlatformInfo; use crate::interpreter::rule_defs::provider::builtin::execution_platform_info::FrozenExecutionPlatformInfo; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum ExecutionPlatformRegistrationTypeError { #[error("expected a list of ExecutionPlatformInfo, got `{0}` (type `{1}`)")] ExpectedListOfPlatforms(String, String), @@ -41,13 +48,11 @@ enum ExecutionPlatformRegistrationTypeError { #[internal_provider(info_creator)] #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct ExecutionPlatformRegistrationInfoGen { - #[provider(field_type = Vec)] - platforms: V, +pub struct ExecutionPlatformRegistrationInfoGen { + platforms: ValueOfUncheckedGeneric>, // OneOf // TODO(nga): specify type more precisely. - #[provider(field_type = Value<'v>)] - fallback: V, + fallback: ValueOfUncheckedGeneric, } impl FrozenExecutionPlatformRegistrationInfo { @@ -55,11 +60,11 @@ impl FrozenExecutionPlatformRegistrationInfo { pub fn platforms( &self, ) -> anyhow::Result>> { - ListRef::from_frozen_value(self.platforms) + ListRef::from_frozen_value(self.platforms.get()) .ok_or_else(|| { ExecutionPlatformRegistrationTypeError::ExpectedListOfPlatforms( - self.platforms.to_value().to_repr(), - self.platforms.to_value().get_type().to_owned(), + self.platforms.to_value().get().to_repr(), + self.platforms.to_value().get().get_type().to_owned(), ) })? .iter() @@ -78,10 +83,10 @@ impl FrozenExecutionPlatformRegistrationInfo { } pub fn fallback(&self) -> anyhow::Result { - if self.fallback.is_none() { + if self.fallback.get().is_none() { return Ok(ExecutionPlatformFallback::UseUnspecifiedExec); } - let fallback = self.fallback.to_value(); + let fallback = self.fallback.get().to_value(); if let Some(v) = ExecutionPlatformInfo::from_value(fallback) { return Ok(ExecutionPlatformFallback::Platform( v.to_execution_platform()?, @@ -104,12 +109,18 @@ impl FrozenExecutionPlatformRegistrationInfo { #[starlark_module] fn info_creator(globals: &mut GlobalsBuilder) { fn ExecutionPlatformRegistrationInfo<'v>( - #[starlark(require = named)] platforms: Value<'v>, - #[starlark(require = named)] fallback: Option>, + #[starlark(require = named)] platforms: ValueOf< + 'v, + ListType>>, + >, + #[starlark(require = named, default = NoneOr::None)] fallback: NoneOr>, ) -> anyhow::Result> { Ok(ExecutionPlatformRegistrationInfo { - platforms, - fallback: fallback.unwrap_or_else(Value::new_none), + platforms: ValueOfUnchecked::new(platforms.value), + fallback: ValueOfUnchecked::new(match fallback { + NoneOr::None => Value::new_none(), + NoneOr::Other(v) => v, + }), }) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs index c094c5cd33e11..e9cfa0444e685 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs @@ -21,17 +21,21 @@ use starlark::any::ProvidesStaticType; use starlark::coerce::Coerce; use starlark::environment::GlobalsBuilder; use starlark::values::dict::DictRef; +use starlark::values::dict::DictType; use starlark::values::list::ListRef; use starlark::values::none::NoneOr; use starlark::values::none::NoneType; use starlark::values::tuple::TupleRef; -use starlark::values::type_repr::DictType; use starlark::values::Freeze; use starlark::values::FrozenValue; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::StarlarkResultExt; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; @@ -39,6 +43,9 @@ use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; use crate::interpreter::rule_defs::cmd_args::CommandLineContext; use crate::interpreter::rule_defs::command_executor_config::StarlarkCommandExecutorConfig; +use crate::interpreter::rule_defs::provider::builtin::worker_info::FrozenWorkerInfo; +use crate::interpreter::rule_defs::provider::builtin::worker_info::WorkerInfo; +use crate::interpreter::rule_defs::required_test_local_resource::StarlarkRequiredTestLocalResource; use crate::interpreter::rule_defs::resolved_macro::ResolvedStringWithMacros; /// Provider that signals that a rule can be tested using an external runner. This is the @@ -47,107 +54,134 @@ use crate::interpreter::rule_defs::resolved_macro::ResolvedStringWithMacros; #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[freeze(validator = validate_external_runner_test_info, bounds = "V: ValueLike<'freeze>")] #[repr(C)] -pub struct ExternalRunnerTestInfoGen { +pub struct ExternalRunnerTestInfoGen { /// A Starlark value representing the type of this test. - #[provider(field_type = String)] - test_type: V, + test_type: ValueOfUncheckedGeneric, /// A Starlark value representing the command for this test. The external test runner is what /// gives meaning to this command. - /// This is of type `list[str | ArgLike]`. - #[provider(field_type = Vec>)] - command: V, + command: ValueOfUncheckedGeneric>>, /// A Starlark value representing the environment for this test. Here again, the external test /// runner is what will this meaning. /// This is of type `dict[str, ArgLike]`. - #[provider(field_type = DictType)] - env: V, + env: ValueOfUncheckedGeneric>, /// A starlark value representing the labels for this test. - #[provider(field_type = Vec)] - labels: V, + labels: ValueOfUncheckedGeneric>, /// A starlark value representing the contacts for this test. This is largely expected to be an /// oncall, though it's not validated in any way. - #[provider(field_type = Vec)] - contacts: V, + contacts: ValueOfUncheckedGeneric>, /// Whether this test should use relative paths - #[provider(field_type = Vec)] - use_project_relative_paths: V, + use_project_relative_paths: ValueOfUncheckedGeneric, /// Whether this test should run from the project root, as opposed to the cell root - #[provider(field_type = Vec)] - run_from_project_root: V, + /// + /// Defaults to `True`. + run_from_project_root: ValueOfUncheckedGeneric, /// Default executor to use to run tests. If none is /// passed we will default to the execution platform. - #[provider(field_type = StarlarkCommandExecutorConfig)] - default_executor: V, + default_executor: ValueOfUncheckedGeneric, /// Executors that Tpx can use to override the default executor. - #[provider(field_type = DictType)] - executor_overrides: V, + executor_overrides: ValueOfUncheckedGeneric>, /// Mapping from a local resource type to a target with a corresponding provider. /// Required types are passed from test runner. /// If the value for a corresponding type is omitted it means local resource /// should be ignored when executing tests even if those are passed as required from test runner. - #[provider(field_type = DictType>)] - local_resources: V, + local_resources: + ValueOfUncheckedGeneric>>, + + /// List of local resource types which should be set up additionally to those which are + /// passed from test runner. Allows specifying local resources on a per-rule basis. + required_local_resources: ValueOfUncheckedGeneric>, + + /// Configuration needed to spawn a new worker. This worker will be used to run every single + /// command related to test execution, including listing. + worker: ValueOfUncheckedGeneric, } // NOTE: All the methods here unwrap because we validate at freeze time. impl FrozenExternalRunnerTestInfo { pub fn test_type(&self) -> &str { - self.test_type.to_value().unpack_str().unwrap() + self.test_type.to_value().get().unpack_str().unwrap() } pub fn command(&self) -> impl Iterator> { - unwrap_all(iter_test_command(self.command.to_value())) + unwrap_all(iter_test_command(self.command.get().to_value())) } pub fn env(&self) -> impl Iterator { - unwrap_all(iter_test_env(self.env.to_value())) + unwrap_all(iter_test_env(self.env.get().to_value())) } pub fn labels(&self) -> impl Iterator { - unwrap_all(iter_opt_str_list(self.labels.to_value(), "labels")) + unwrap_all(iter_opt_str_list(self.labels.get().to_value(), "labels")) } pub fn contacts(&self) -> impl Iterator { - unwrap_all(iter_opt_str_list(self.contacts.to_value(), "contacts")) + unwrap_all(iter_opt_str_list( + self.contacts.get().to_value(), + "contacts", + )) } pub fn use_project_relative_paths(&self) -> bool { - NoneOr::::unpack_value(self.use_project_relative_paths.to_value()) + NoneOr::::unpack_value(self.use_project_relative_paths.get().to_value()) + .unwrap() .unwrap() .into_option() .unwrap_or_else(buck2_core::is_open_source) } pub fn run_from_project_root(&self) -> bool { - NoneOr::::unpack_value(self.run_from_project_root.to_value()) + NoneOr::::unpack_value(self.run_from_project_root.get().to_value()) + .unwrap() .unwrap() .into_option() - .unwrap_or_else(buck2_core::is_open_source) + .unwrap_or(true) } pub fn default_executor(&self) -> Option<&StarlarkCommandExecutorConfig> { - unpack_opt_executor(self.default_executor.to_value()).unwrap() + unpack_opt_executor(self.default_executor.get().to_value()).unwrap() } /// Access a specific executor override. pub fn executor_override(&self, key: &str) -> Option<&StarlarkCommandExecutorConfig> { - let executor_overrides = DictRef::from_value(self.executor_overrides.to_value()).unwrap(); + let executor_overrides = + DictRef::from_value(self.executor_overrides.get().to_value()).unwrap(); executor_overrides .get_str(key) .map(|v| StarlarkCommandExecutorConfig::from_value(v.to_value()).unwrap()) } pub fn local_resources(&self) -> IndexMap<&str, Option<&ConfiguredProvidersLabel>> { - unwrap_all(iter_local_resources(self.local_resources.to_value())).collect() + unwrap_all(iter_local_resources(self.local_resources.get().to_value())).collect() + } + + pub fn required_local_resources( + &self, + ) -> impl Iterator { + let val = self.required_local_resources.get().to_value(); + if val.is_none() { + return Either::Left(empty()); + } + Either::Right( + iter_value(val) + .expect("checked during construction") + .map(|v| { + StarlarkRequiredTestLocalResource::from_value(v) + .expect("checked during construction") + }), + ) + } + + pub fn worker(&self) -> Option<&WorkerInfo> { + unpack_opt_worker(self.worker.get().to_value()).unwrap() } pub fn visit_artifacts( @@ -192,13 +226,9 @@ impl<'v> TestCommandMember<'v> { } fn iter_value<'v>(value: Value<'v>) -> anyhow::Result> + 'v> { - match (ListRef::from_value(value), TupleRef::from_value(value)) { - (Some(list), None) => Ok(Either::Left(list.iter())), - (None, Some(tuple)) => Ok(Either::Right(tuple.iter())), - _ => Err(anyhow::anyhow!( - "Expected a list or a tuple, got `{}`", - value - )), + match Either::<&ListRef, &TupleRef>::unpack_value_err(value)? { + Either::Left(list) => Ok(list.iter()), + Either::Right(tuple) => Ok(tuple.iter()), } } @@ -227,9 +257,9 @@ fn iter_test_command<'v>( } } - let arglike = item - .as_command_line_err() - .context("Invalid item in `command`")?; + let arglike = ValueAsCommandLineLike::unpack_value_err(item) + .with_context(|| format!("Invalid item in `command`: {}", item))? + .0; Ok(TestCommandMember::Arglike(arglike)) })) @@ -252,9 +282,6 @@ fn iter_test_env<'v>( } }; - // TODO: In an ideal world this wouldnt be necessary, but env's lifetime is bound by this - // function. - #[allow(clippy::needless_collect)] let env = env.iter().collect::>(); Either::Right(env.into_iter().map(|(key, value)| { @@ -262,9 +289,9 @@ fn iter_test_env<'v>( .unpack_str() .with_context(|| format!("Invalid key in `env`: Expected a str, got: `{}`", key))?; - let arglike = value - .as_command_line_err() - .with_context(|| format!("Invalid value in `env` for key `{}`", key))?; + let arglike = ValueAsCommandLineLike::unpack_value_err(value) + .with_context(|| format!("Invalid value in `env` for key `{}`", key))? + .0; Ok((key, arglike)) })) @@ -313,9 +340,6 @@ fn iter_executor_overrides<'v>( } }; - // TODO: In an ideal world this wouldnt be necessary, but executor_overrides's lifetime is - // bound by this function. - #[allow(clippy::needless_collect)] let executor_overrides = executor_overrides.iter().collect::>(); Either::Right(executor_overrides.into_iter().map(|(key, value)| { @@ -350,9 +374,6 @@ fn iter_local_resources<'v>( } }; - // TODO: In an ideal world this wouldnt be necessary, but local_resources's lifetime is - // bound by this function. - #[allow(clippy::needless_collect)] let local_resources = local_resources.iter().collect::>(); Either::Right(local_resources.into_iter().map(|(key, value)| { @@ -395,6 +416,17 @@ fn unpack_opt_executor<'v>( Ok(Some(executor)) } +fn unpack_opt_worker<'v>(worker: Value<'v>) -> anyhow::Result>> { + if worker.is_none() { + return Ok(None); + } + + let worker = WorkerInfo::from_value(worker) + .with_context(|| format!("Value is not a worker: `{}`", worker))?; + + Ok(Some(worker)) +} + fn check_all(it: I) -> anyhow::Result<()> where I: IntoIterator>, @@ -418,18 +450,46 @@ fn validate_external_runner_test_info<'v, V>( where V: ValueLike<'v>, { - check_all(iter_test_command(info.command.to_value()))?; - check_all(iter_test_env(info.env.to_value()))?; - check_all(iter_opt_str_list(info.labels.to_value(), "labels"))?; - check_all(iter_opt_str_list(info.contacts.to_value(), "contacts"))?; - check_all(iter_executor_overrides(info.executor_overrides.to_value()))?; - check_all(iter_local_resources(info.local_resources.to_value()))?; - NoneOr::::unpack_value(info.use_project_relative_paths.to_value()) + check_all(iter_test_command(info.command.get().to_value()))?; + check_all(iter_test_env(info.env.get().to_value()))?; + check_all(iter_opt_str_list(info.labels.get().to_value(), "labels"))?; + check_all(iter_opt_str_list( + info.contacts.get().to_value(), + "contacts", + ))?; + check_all(iter_executor_overrides( + info.executor_overrides.get().to_value(), + ))?; + + let provided_local_resources = + iter_local_resources(info.local_resources.get().to_value()) + .collect::>>>()?; + + let required_local_resources = info.required_local_resources.get().to_value(); + if !required_local_resources.is_none() { + for resource_type in iter_value(required_local_resources).context("`required_local_resources` should be a list or a tuple of `RequiredTestLocalResource` objects")? { + let resource_type = StarlarkRequiredTestLocalResource::from_value(resource_type) + .ok_or_else(|| anyhow::anyhow!("`required_local_resources` should only contain `RequiredTestLocalResource` values, got {}", resource_type))?; + if !provided_local_resources.contains_key(&resource_type.name as &str) { + return Err(anyhow::anyhow!( + "`required_local_resources` contains `{}` which is not present in `local_resources`", + resource_type.name + )); + } + } + } + + NoneOr::::unpack_value(info.use_project_relative_paths.get().to_value()) + .into_anyhow_result()? .context("`use_project_relative_paths` must be a bool if provided")?; - NoneOr::::unpack_value(info.run_from_project_root.to_value()) + NoneOr::::unpack_value(info.run_from_project_root.get().to_value()) + .into_anyhow_result()? .context("`run_from_project_root` must be a bool if provided")?; - unpack_opt_executor(info.default_executor.to_value()).context("Invalid `default_executor`")?; + unpack_opt_executor(info.default_executor.get().to_value()) + .context("Invalid `default_executor`")?; + unpack_opt_worker(info.worker.get().to_value()).context("Invalid `worker`")?; info.test_type + .get() .to_value() .unpack_str() .context("`type` must be a str")?; @@ -441,6 +501,8 @@ fn external_runner_test_info_creator(globals: &mut GlobalsBuilder) { #[starlark(as_type = FrozenExternalRunnerTestInfo)] fn ExternalRunnerTestInfo<'v>( r#type: Value<'v>, + // TODO(nga): these need types. + // TODO(nga): parameters should be either named or positional, not both. #[starlark(default = NoneType)] command: Value<'v>, #[starlark(default = NoneType)] env: Value<'v>, #[starlark(default = NoneType)] labels: Value<'v>, @@ -450,18 +512,22 @@ fn external_runner_test_info_creator(globals: &mut GlobalsBuilder) { #[starlark(default = NoneType)] default_executor: Value<'v>, #[starlark(default = NoneType)] executor_overrides: Value<'v>, #[starlark(default = NoneType)] local_resources: Value<'v>, + #[starlark(default = NoneType)] required_local_resources: Value<'v>, + #[starlark(default = NoneType)] worker: Value<'v>, ) -> anyhow::Result> { let res = ExternalRunnerTestInfo { - test_type: r#type, - command, - env, - labels, - contacts, - use_project_relative_paths, - run_from_project_root, - default_executor, - executor_overrides, - local_resources, + test_type: ValueOfUnchecked::new(r#type), + command: ValueOfUnchecked::new(command), + env: ValueOfUnchecked::new(env), + labels: ValueOfUnchecked::new(labels), + contacts: ValueOfUnchecked::new(contacts), + use_project_relative_paths: ValueOfUnchecked::new(use_project_relative_paths), + run_from_project_root: ValueOfUnchecked::new(run_from_project_root), + default_executor: ValueOfUnchecked::new(default_executor), + executor_overrides: ValueOfUnchecked::new(executor_overrides), + local_resources: ValueOfUnchecked::new(local_resources), + required_local_resources: ValueOfUnchecked::new(required_local_resources), + worker: ValueOfUnchecked::new(worker), }; validate_external_runner_test_info(&res)?; Ok(res) diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/install_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/install_info.rs index 6ddee557038d4..8cf581088f77f 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/install_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/install_info.rs @@ -17,21 +17,22 @@ use starlark::any::ProvidesStaticType; use starlark::collections::SmallMap; use starlark::environment::GlobalsBuilder; use starlark::values::dict::DictRef; -use starlark::values::type_repr::DictType; +use starlark::values::dict::DictType; use starlark::values::Coerce; use starlark::values::Freeze; use starlark::values::Trace; use starlark::values::UnpackValue; -use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; -use thiserror::Error; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::StarlarkResultExt; + +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::ValueAsArtifactLike; // Provider that signals a rule is installable (ex. android_binary) -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum InstallInfoProviderErrors { #[error("expected a label, got `{0}` (type `{1}`)")] ExpectedLabel(String, String), @@ -47,22 +48,20 @@ enum InstallInfoProviderErrors { #[derive(Clone, Coerce, Debug, Freeze, Trace, ProvidesStaticType, Allocative)] #[repr(C)] #[freeze(validator = validate_install_info, bounds = "V: ValueLike<'freeze>")] -pub struct InstallInfoGen { +pub struct InstallInfoGen { // Label for the installer - #[provider(field_type = StarlarkConfiguredProvidersLabel)] - installer: V, + installer: ValueOfUncheckedGeneric, // list of files that need to be installed - #[provider(field_type = DictType)] - files: V, + files: ValueOfUncheckedGeneric>>, } impl<'v, V: ValueLike<'v>> InstallInfoGen { pub fn get_installer(&self) -> anyhow::Result { - let label = StarlarkConfiguredProvidersLabel::from_value(self.installer.to_value()) + let label = StarlarkConfiguredProvidersLabel::from_value(self.installer.get().to_value()) .ok_or_else(|| { InstallInfoProviderErrors::ExpectedLabel( - self.installer.to_value().to_repr(), - self.installer.to_value().get_type().to_owned(), + self.installer.get().to_value().to_repr(), + self.installer.get().to_value().get_type().to_owned(), ) })? .label() @@ -71,7 +70,7 @@ impl<'v, V: ValueLike<'v>> InstallInfoGen { } fn get_files_dict(&self) -> DictRef<'v> { - DictRef::from_value(self.files.to_value()).expect("Value is a Dict") + DictRef::from_value(self.files.get().to_value()).expect("Value is a Dict") } fn get_files_iter<'a>( @@ -83,12 +82,12 @@ impl<'v, V: ValueLike<'v>> InstallInfoGen { .ok_or_else(|| InstallInfoProviderErrors::ExpectedStringKey(k.to_string()))?; Ok(( k, - ValueAsArtifactLike::unpack_value(v).ok_or_else(|| { - InstallInfoProviderErrors::ExpectedArtifact { + ValueAsArtifactLike::unpack_value(v) + .into_anyhow_result()? + .ok_or_else(|| InstallInfoProviderErrors::ExpectedArtifact { key: k.to_owned(), got: v.get_type().to_owned(), - } - })?, + })?, )) }) } @@ -111,11 +110,11 @@ impl<'v, V: ValueLike<'v>> InstallInfoGen { fn install_info_creator(globals: &mut GlobalsBuilder) { fn InstallInfo<'v>( installer: ValueOf<'v, &'v StarlarkConfiguredProvidersLabel>, - files: ValueOf<'v, SmallMap<&'v str, Value<'v>>>, + files: ValueOf<'v, DictType<&'v str, ValueAsArtifactLike<'v>>>, ) -> anyhow::Result> { let info = InstallInfo { - installer: *installer, - files: files.value, + installer: installer.as_unchecked().cast(), + files: files.as_unchecked().cast(), }; validate_install_info(&info)?; Ok(info) diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs index b38a415b82d72..e6cf13b9b076e 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs @@ -10,23 +10,34 @@ use std::time::Duration; use allocative::Allocative; -use anyhow::Context; use buck2_build_api_derive::internal_provider; +use buck2_error::BuckErrorContext; +use either::Either; use indexmap::IndexMap; use starlark::any::ProvidesStaticType; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::values::dict::DictRef; +use starlark::values::dict::DictType; +use starlark::values::dict::UnpackDictEntries; +use starlark::values::float::UnpackFloat; use starlark::values::none::NoneOr; -use starlark::values::type_repr::DictType; use starlark::values::Coerce; use starlark::values::Freeze; use starlark::values::Trace; use starlark::values::Value; +use starlark::values::ValueLifetimeless; +use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::values::ValueTypedComplex; +use starlark::StarlarkResultExt; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use crate::interpreter::rule_defs::cmd_args::FrozenStarlarkCmdArgs; use crate::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; +use crate::interpreter::rule_defs::cmd_args::StarlarkCommandLineValueUnpack; use crate::starlark::values::UnpackValue; use crate::starlark::values::ValueLike; @@ -34,11 +45,14 @@ use crate::starlark::values::ValueLike; #[derive(Clone, Debug, Freeze, Coerce, Trace, ProvidesStaticType, Allocative)] #[freeze(validator = validate_local_resource_info, bounds = "V: ValueLike<'freeze>")] #[repr(C)] -pub struct LocalResourceInfoGen { +pub struct LocalResourceInfoGen { /// Command to run to initialize a local resource. + /// /// Running this command writes a JSON to stdout. /// This JSON represents a pool of local resources which are ready to be used. /// Example JSON would be: + /// + /// ```text /// { /// "pid": 42, /// "resources": [ @@ -46,79 +60,51 @@ pub struct LocalResourceInfoGen { /// {"socket_address": "bar:2"} /// ] /// } - /// Where '"pid"` maps to a PID of a process which should be sent SIGTERM to release the pool of resources + /// ``` + /// + /// Where `"pid"` maps to a PID of a process which should be sent SIGTERM to release the pool of resources /// when they are no longer needed. `"resources"` maps to the pool of resources. /// When a local resource from this particular pool is needed for an execution command, single entity /// will be reserved from the pool, for example `{"socket_address": "bar:2"}` and environment variable with /// name resolved using mapping in `resource_env_vars` field and `"socket_address"` key will be added to /// execution command. - #[provider(field_type = StarlarkCmdArgs<'v>)] - setup: V, + setup: ValueOfUncheckedGeneric, /// Mapping from environment variable (appended to an execution command which is dependent on this local resource) /// to keys in setup command JSON output. - #[provider(field_type = DictType)] - resource_env_vars: V, + resource_env_vars: ValueOfUncheckedGeneric>, /// Timeout in seconds for `setup` command. - #[provider(field_type = NoneOr)] - setup_timeout_seconds: V, + setup_timeout_seconds: ValueOfUncheckedGeneric>, } fn validate_local_resource_info<'v, V>(info: &LocalResourceInfoGen) -> anyhow::Result<()> where V: ValueLike<'v>, { - let setup = StarlarkCmdArgs::try_from_value(info.setup.to_value()).with_context(|| { - format!( - "Value for `setup` field is not a command line: `{}`", - info.setup - ) - })?; - if setup.is_empty() { - return Err(anyhow::anyhow!( - "Value for `setup` field is an empty command line: `{}`", - info.setup - )); - } - - let env_vars = DictRef::from_value(info.resource_env_vars.to_value()).with_context(|| { - format!( - "Value for `resource_env_vars` field is not a dictionary: `{}`", - info.resource_env_vars - ) - })?; - - if env_vars.iter().count() == 0 { + let env_vars = info + .resource_env_vars + .cast::>() + .unpack() + .into_anyhow_result()?; + if env_vars.entries.is_empty() { return Err(anyhow::anyhow!( "Value for `resource_env_vars` field is an empty dictionary: `{}`", info.resource_env_vars )); } - let validation_iter = env_vars.iter().map(|(key, value)| { - _ = key.unpack_str().with_context(|| { - format!( - "Invalid key in `resource_env_vars`: Expected a str, got: `{}`", - key - ) - })?; - - _ = value.unpack_str().with_context(|| { - format!( - "Invalid value in `resource_env_vars`: Expected a str, got: `{}`", - value - ) - })?; - - Ok::<(), anyhow::Error>(()) - }); - - for validation_item in validation_iter { - validation_item?; + let setup = ValueTypedComplex::::new(info.setup.get().to_value()) + .internal_error_anyhow("Validated in constructor")?; + let setup_is_empty = match setup.unpack() { + Either::Left(a) => a.is_empty(), + Either::Right(b) => b.is_empty(), + }; + if setup_is_empty { + return Err(anyhow::anyhow!( + "Value for `setup` field is an empty command line: `{}`", + info.setup + )); } - NoneOr::::unpack_value(info.setup_timeout_seconds.to_value()) - .context("`setup_timeout_seconds` must be a number if provided")?; - Ok(()) } @@ -126,17 +112,24 @@ where fn local_resource_info_creator(globals: &mut GlobalsBuilder) { #[starlark(as_type = FrozenLocalResourceInfo)] fn LocalResourceInfo<'v>( - #[starlark(require = named)] setup: Value<'v>, - #[starlark(require = named)] resource_env_vars: Value<'v>, + #[starlark(require = named)] setup: StarlarkCommandLineValueUnpack<'v>, + #[starlark(require = named)] resource_env_vars: ValueOf< + 'v, + UnpackDictEntries<&'v str, &'v str>, + >, #[starlark(require = named, default = NoneOr::None)] setup_timeout_seconds: NoneOr< - Value<'v>, + ValueOf<'v, UnpackFloat>, >, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { + let setup = StarlarkCmdArgs::try_from_value_typed(setup)?; let result = LocalResourceInfo { - setup, - resource_env_vars, - setup_timeout_seconds: eval.heap().alloc(setup_timeout_seconds), + setup: ValueOfUnchecked::::new(eval.heap().alloc(setup)), + resource_env_vars: resource_env_vars.as_unchecked().cast(), + setup_timeout_seconds: match setup_timeout_seconds { + NoneOr::None => ValueOfUnchecked::new(Value::new_none()), + NoneOr::Other(s) => ValueOfUnchecked::new(s.value), + }, }; validate_local_resource_info(&result)?; Ok(result) @@ -147,7 +140,7 @@ impl FrozenLocalResourceInfo { /// Mapping from keys in setup command JSON output to environment variables keys which /// should be appended to execution commands dependent on this local resource. pub fn env_var_mapping(&self) -> IndexMap { - let env_vars = DictRef::from_value(self.resource_env_vars.to_value()).unwrap(); + let env_vars = DictRef::from_value(self.resource_env_vars.to_value().get()).unwrap(); env_vars .iter() .map(|(k, v)| { @@ -160,13 +153,16 @@ impl FrozenLocalResourceInfo { } pub fn setup_command_line(&self) -> &dyn CommandLineArgLike { - self.setup.to_value().as_command_line().unwrap() + ValueAsCommandLineLike::unpack_value_err(self.setup.to_value().get()) + .unwrap() + .0 } pub fn setup_timeout(&self) -> Option { - NoneOr::::unpack_value(self.setup_timeout_seconds.to_value()) + self.setup_timeout_seconds + .unpack() .unwrap() .into_option() - .map(Duration::from_secs_f64) + .map(|f| Duration::from_secs_f64(f.0)) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/mod.rs deleted file mode 100644 index d7e440a3ecc0c..0000000000000 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Builtin providers. - -pub mod configuration_info; -pub mod constraint_setting_info; -pub mod constraint_value_info; -pub mod default_info; -pub mod execution_platform_info; -pub mod execution_platform_registration_info; -pub mod external_runner_test_info; -pub mod install_info; -pub mod local_resource_info; -pub mod platform_info; -pub mod run_info; -pub mod template_placeholder_info; -pub(crate) mod ty; -pub mod worker_info; -pub mod worker_run_info; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/platform_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/platform_info.rs index 7701cce6516b7..f416ca96e5c3d 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/platform_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/platform_info.rs @@ -19,19 +19,21 @@ use starlark::values::Freeze; use starlark::values::Heap; use starlark::values::StringValue; use starlark::values::Trace; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; use crate::interpreter::rule_defs::provider::builtin::configuration_info::ConfigurationInfo; +use crate::interpreter::rule_defs::provider::builtin::configuration_info::FrozenConfigurationInfo; #[internal_provider(platform_info_creator)] #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct PlatformInfoGen { - #[provider(field_type = String)] - label: V, - #[provider(field_type = ConfigurationInfo<'v>)] - configuration: V, +pub struct PlatformInfoGen { + label: ValueOfUncheckedGeneric, + configuration: ValueOfUncheckedGeneric, } impl<'v, V: ValueLike<'v>> PlatformInfoGen { @@ -39,10 +41,11 @@ impl<'v, V: ValueLike<'v>> PlatformInfoGen { ConfigurationData::from_platform( self.label .to_value() + .get() .unpack_str() .expect("type checked during construction") .to_owned(), - ConfigurationInfo::from_value(self.configuration.to_value()) + ConfigurationInfo::from_value(self.configuration.get().to_value()) .expect("type checked during construction") .to_configuration_data()?, ) @@ -54,14 +57,14 @@ impl<'v> PlatformInfo<'v> { cfg: &ConfigurationData, heap: &'v Heap, ) -> anyhow::Result> { - let label = heap.alloc_str(cfg.label()?).to_value(); + let label = heap.alloc_str(cfg.label()?); let configuration = heap.alloc(ConfigurationInfo::from_configuration_data( cfg.data()?, heap, )); Ok(PlatformInfoGen { - label, - configuration, + label: label.to_value_of_unchecked().cast(), + configuration: ValueOfUnchecked::::new(configuration), }) } } @@ -74,8 +77,8 @@ fn platform_info_creator(globals: &mut GlobalsBuilder) { #[starlark(require = named)] configuration: ValueOf<'v, &'v ConfigurationInfo<'v>>, ) -> anyhow::Result> { Ok(PlatformInfo { - label: label.to_value(), - configuration: *configuration, + label: label.to_value_of_unchecked().cast(), + configuration: ValueOfUnchecked::::new(configuration.value), }) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/run_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/run_info.rs index a202e58860f05..451dc6f05dd08 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/run_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/run_info.rs @@ -15,73 +15,82 @@ use starlark::any::ProvidesStaticType; use starlark::coerce::Coerce; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; -use starlark::values::list::AllocList; +use starlark::values::list::ListRef; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Freeze; use starlark::values::Trace; -use starlark::values::Value; +use starlark::values::UnpackValue; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::cmd_args::CommandLineBuilder; use crate::interpreter::rule_defs::cmd_args::CommandLineContext; +use crate::interpreter::rule_defs::cmd_args::FrozenStarlarkCmdArgs; use crate::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; +use crate::interpreter::rule_defs::cmd_args::StarlarkCommandLineValueUnpack; use crate::interpreter::rule_defs::cmd_args::WriteToFileMacroVisitor; /// Provider that signals that a rule is runnable #[internal_provider(run_info_creator)] #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(transparent)] -pub struct RunInfoGen { +pub struct RunInfoGen { /// The command to run, stored as CommandLine - #[provider(field_type = StarlarkCmdArgs<'v>)] - args: V, + args: ValueOfUncheckedGeneric, } #[starlark_module] fn run_info_creator(globals: &mut GlobalsBuilder) { #[starlark(as_type = FrozenRunInfo)] fn RunInfo<'v>( - #[starlark(default = AllocList::EMPTY)] args: Value<'v>, - eval: &mut Evaluator<'v, '_>, + // TODO(nga): make the argument either named or positional. + #[starlark(default = StarlarkCommandLineValueUnpack::List(ListRef::empty()))] + args: StarlarkCommandLineValueUnpack<'v>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let heap = eval.heap(); - let valid_args = StarlarkCmdArgs::try_from_value(args)?; + let valid_args = StarlarkCmdArgs::try_from_value_typed(args)?; Ok(RunInfo { - args: heap.alloc(valid_args), + args: ValueOfUnchecked::::new(heap.alloc(valid_args)), }) } } impl<'v, V: ValueLike<'v>> CommandLineArgLike for RunInfoGen { + fn register_me(&self) { + command_line_arg_like_impl!(RunInfo::starlark_type_repr()); + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, context: &mut dyn CommandLineContext, ) -> anyhow::Result<()> { - self.args - .to_value() - .as_command_line() + ValueAsCommandLineLike::unpack_value_err(self.args.get().to_value()) .expect("a command line from construction") + .0 .add_to_command_line(cli, context)?; Ok(()) } fn visit_artifacts(&self, visitor: &mut dyn CommandLineArtifactVisitor) -> anyhow::Result<()> { - self.args - .to_value() - .as_command_line() + ValueAsCommandLineLike::unpack_value_err(self.args.get().to_value()) .expect("a command line from construction") + .0 .visit_artifacts(visitor)?; Ok(()) } fn contains_arg_attr(&self) -> bool { - self.args - .to_value() - .as_command_line() + ValueAsCommandLineLike::unpack_value_err(self.args.get().to_value()) .expect("a command line from construction") + .0 .contains_arg_attr() } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/template_placeholder_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/template_placeholder_info.rs index 289e2bccb348b..81f91a2cb19c4 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/template_placeholder_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/template_placeholder_info.rs @@ -18,20 +18,24 @@ use starlark::collections::SmallMap; use starlark::environment::GlobalsBuilder; use starlark::values::dict::AllocDict; use starlark::values::dict::DictRef; +use starlark::values::dict::DictType; use starlark::values::dict::FrozenDictRef; -use starlark::values::type_repr::DictType; use starlark::values::Freeze; use starlark::values::FrozenRef; use starlark::values::FrozenValue; use starlark::values::Trace; +use starlark::values::UnpackValue; use starlark::values::Value; -use thiserror::Error; +use starlark::values::ValueLifetimeless; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::StarlarkResultExt; use crate::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum TemplatePlaceholderInfoError { #[error( "Expected TemplatePlaceholderInfo.{field_key} to be a dict of String->arg-like Value, got `{value_repr}`." @@ -78,18 +82,19 @@ enum TemplatePlaceholderInfoError { #[internal_provider(template_placeholder_info_creator)] #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct TemplatePlaceholderInfoGen { +pub struct TemplatePlaceholderInfoGen { // `Value` in both fields is command line arg. // TODO(nga): specify type more precisely. - #[provider(field_type = DictType>)] - unkeyed_variables: V, - #[provider(field_type = DictType, DictType>>>)] - keyed_variables: V, + unkeyed_variables: ValueOfUncheckedGeneric>, + keyed_variables: ValueOfUncheckedGeneric< + V, + DictType>>, + >, } impl FrozenTemplatePlaceholderInfo { pub fn unkeyed_variables(&self) -> SmallMap, FrozenCommandLineArg> { - FrozenDictRef::from_frozen_value(self.unkeyed_variables) + FrozenDictRef::from_frozen_value(self.unkeyed_variables.get()) .expect("should be a dict-like object") .iter() .map(|(k, v)| { @@ -107,7 +112,7 @@ impl FrozenTemplatePlaceholderInfo { FrozenRef<'static, str>, Either, FrozenCommandLineArg>>, > { - FrozenDictRef::from_frozen_value(self.keyed_variables) + FrozenDictRef::from_frozen_value(self.keyed_variables.get()) .expect("should be a dict-like object") .iter() .map(|(k, v)| { @@ -150,13 +155,19 @@ fn verify_variables_type(field_key: &str, variables: Value) -> anyhow::Result<() .into()), Some(dict) => { for (key, value) in dict.iter() { - if value.as_command_line().is_some() { + if ValueAsCommandLineLike::unpack_value(value) + .into_anyhow_result()? + .is_some() + { continue; } if let Some(dict) = DictRef::from_value(value) { for (inner_key, value) in dict.iter() { - if value.as_command_line().is_none() { + if ValueAsCommandLineLike::unpack_value(value) + .into_anyhow_result()? + .is_none() + { return Err( TemplatePlaceholderInfoError::InnerValueNotCommandLineLike { field_key: field_key.to_owned(), @@ -189,8 +200,8 @@ impl<'v> TemplatePlaceholderInfo<'v> { verify_variables_type("unkeyed_variables", unkeyed_variables)?; verify_variables_type("keyed_variables", keyed_variables)?; Ok(Self { - unkeyed_variables, - keyed_variables, + unkeyed_variables: ValueOfUnchecked::new(unkeyed_variables), + keyed_variables: ValueOfUnchecked::new(keyed_variables), }) } } @@ -199,6 +210,7 @@ impl<'v> TemplatePlaceholderInfo<'v> { fn template_placeholder_info_creator(builder: &mut GlobalsBuilder) { #[starlark(as_type = FrozenTemplatePlaceholderInfo)] fn TemplatePlaceholderInfo<'v>( + // TODO(nga): specify parameter types. #[starlark(default = AllocDict::EMPTY)] unkeyed_variables: Value<'v>, #[starlark(default = AllocDict::EMPTY)] keyed_variables: Value<'v>, ) -> anyhow::Result> { diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/ty.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/ty.rs index 384db849a6d38..241c48131291e 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/ty.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/ty.rs @@ -15,7 +15,7 @@ use dupe::Dupe; use starlark::environment::GlobalsBuilder; use starlark::typing::Ty; use starlark::typing::TyStarlarkValue; -use starlark::values::function::NativeFunction; +use starlark::values::function::FUNCTION_TYPE; use starlark::values::typing::TypeInstanceId; use starlark::values::StarlarkValue; use starlark_map::sorted_map::SortedMap; @@ -87,13 +87,14 @@ fn builtin_provider_typechecker_ty<'v, C: StarlarkValue<'v> + ProviderCallableLi if iter.next().is_some() { panic!("more then one global in creator func globals"); } - if first.1.to_value().get_type() != NativeFunction::TYPE { + if first.1.to_value().get_type() != FUNCTION_TYPE { panic!("creator func is not a function"); } let ty = Ty::of_value(first.1.to_value()); - let ty_function = ty + let ty_callable = ty .as_function() .expect("creator func is not a function") - .clone(); - ty_provider_callable::(ty_function).unwrap() + .callable() + .dupe(); + ty_provider_callable::(ty_callable).unwrap() } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/validation_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/validation_info.rs new file mode 100644 index 0000000000000..21c3a5c13a898 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/validation_info.rs @@ -0,0 +1,113 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashSet; +use std::fmt::Debug; + +use allocative::Allocative; +use buck2_build_api_derive::internal_provider; +use starlark::any::ProvidesStaticType; +use starlark::coerce::Coerce; +use starlark::environment::GlobalsBuilder; +use starlark::values::list::ListRef; +use starlark::values::list::ListType; +use starlark::values::Freeze; +use starlark::values::Trace; +use starlark::values::ValueLifetimeless; +use starlark::values::ValueLike; +use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; + +use crate::interpreter::rule_defs::validation_spec::FrozenStarlarkValidationSpec; +use crate::interpreter::rule_defs::validation_spec::StarlarkValidationSpec; + +#[derive(Debug, thiserror::Error)] +enum ValidationInfoError { + #[error("Expected `ValidationSpec` value, got `{0}`")] + WrongSpecType(String), + #[error("Multiple specs with same name `{0}` which is not allowed.")] + SpecsWithDuplicateName(String), + #[error("Validations should be a list of `ValidationSpec` values")] + ValidationsAreNotListOfSpecs, + #[error("`ValidationInfo` should contain at least one validation.")] + ValidationSpecsEmpty, +} + +/// Provider describing how a given target node should be validated. +/// Validations are run when target with `ValidationInfo` provider is a transitive +/// dependency of a requested target. +#[internal_provider(validation_info_creator)] +#[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] +#[freeze(validator = validate_validation_info, bounds = "V: ValueLike<'freeze>")] +#[repr(transparent)] +pub struct ValidationInfoGen { + /// List of `ValidationSpec` values each representing a single validation. + validations: ValueOfUncheckedGeneric>, +} + +fn validate_validation_info<'v, V>(info: &ValidationInfoGen) -> anyhow::Result<()> +where + V: ValueLike<'v>, +{ + let values = ListRef::from_value(info.validations.get().to_value()) + .ok_or(ValidationInfoError::ValidationsAreNotListOfSpecs)? + .iter(); + let mut spec_names = HashSet::new(); + for value in values { + let wrong_type_error = || ValidationInfoError::WrongSpecType(format!("{}", value)); + let name = if let Some(frozen_value) = value.unpack_frozen() { + let spec = frozen_value + .downcast_ref::() + .ok_or_else(wrong_type_error)?; + spec.name() + } else { + let spec = value + .downcast_ref::() + .ok_or_else(wrong_type_error)?; + spec.name() + }; + if !spec_names.insert(name) { + return Err(ValidationInfoError::SpecsWithDuplicateName(name.to_owned()).into()); + } + } + if spec_names.is_empty() { + return Err(ValidationInfoError::ValidationSpecsEmpty.into()); + } + Ok(()) +} + +#[starlark_module] +fn validation_info_creator(globals: &mut GlobalsBuilder) { + #[starlark(as_type = FrozenValidationInfo)] + fn ValidationInfo<'v>( + #[starlark(require = named)] validations: ValueOf< + 'v, + ListType<&'v StarlarkValidationSpec<'v>>, + >, + ) -> anyhow::Result> { + let result = ValidationInfo { + validations: ValueOfUnchecked::new(validations.value), + }; + validate_validation_info(&result)?; + Ok(result) + } +} + +impl FrozenValidationInfo { + pub fn validations(&self) -> impl Iterator { + let it = ListRef::from_value(self.validations.get().to_value()) + .expect("type checked during construction or freezing") + .iter(); + it.map(|x| { + x.downcast_ref::() + .expect("type checked during construction or freezing") + }) + } +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_info.rs index 27d334728ff43..9109016537d7d 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_info.rs @@ -24,10 +24,15 @@ use starlark::values::Freeze; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; +use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; +use crate::interpreter::rule_defs::cmd_args::FrozenStarlarkCmdArgs; use crate::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; /// Provider that signals that a rule is a worker tool @@ -35,13 +40,11 @@ use crate::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[freeze(validator = validate_worker_info, bounds = "V: ValueLike<'freeze>")] #[repr(C)] -pub struct WorkerInfoGen { +pub struct WorkerInfoGen { // Command to spawn a new worker - #[provider(field_type = StarlarkCmdArgs<'v>)] - pub exe: V, + pub exe: ValueOfUncheckedGeneric, // Maximum number of concurrent commands to execute on a worker instance without queuing - #[provider(field_type = NoneOr)] - pub concurrency: V, + pub concurrency: ValueOfUncheckedGeneric>, pub id: u64, } @@ -56,31 +59,34 @@ fn worker_info_creator(globals: &mut GlobalsBuilder) { #[starlark(as_type = FrozenWorkerInfo)] fn WorkerInfo<'v>( #[starlark(default = AllocList::EMPTY)] exe: Value<'v>, - #[starlark(require = named, default = NoneOr::None)] concurrency: NoneOr, - eval: &mut Evaluator<'v, '_>, + #[starlark(require = named, default = NoneOr::None)] concurrency: NoneOr< + ValueOf<'v, usize>, + >, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let heap = eval.heap(); let valid_exe = StarlarkCmdArgs::try_from_value(exe)?; - let exe = heap.alloc(valid_exe); + let exe = ValueOfUnchecked::new(heap.alloc(valid_exe)); let id = next_id(); Ok(WorkerInfo { exe, id, - concurrency: heap.alloc(concurrency), + concurrency: heap.alloc_typed_unchecked(concurrency).cast(), }) } } impl<'v, V: ValueLike<'v>> WorkerInfoGen { pub fn exe_command_line(&self) -> &'v dyn CommandLineArgLike { - self.exe - .to_value() - .as_command_line() + ValueAsCommandLineLike::unpack_value_err(self.exe.get().to_value()) .expect("validated at construction") + .0 } pub fn concurrency(&self) -> Option { - NoneOr::::unpack_value(self.concurrency.to_value()) + self.concurrency + .to_value() + .unpack() .expect("validated at construction") .into_option() } @@ -90,7 +96,7 @@ fn validate_worker_info<'v, V>(info: &WorkerInfoGen) -> anyhow::Result<()> where V: ValueLike<'v>, { - let exe = StarlarkCmdArgs::try_from_value(info.exe.to_value()).with_context(|| { + let exe = StarlarkCmdArgs::try_from_value(info.exe.get().to_value()).with_context(|| { format!( "Value for `exe` field is not a command line: `{}`", info.exe diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_run_info.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_run_info.rs index 974cd7a574fbd..f0462e5deb8cb 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_run_info.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/builtin/worker_run_info.rs @@ -18,27 +18,30 @@ use starlark::eval::Evaluator; use starlark::values::list::AllocList; use starlark::values::Freeze; use starlark::values::Trace; -use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueOfUncheckedGeneric; use starlark::values::ValueTyped; +use starlark::values::ValueTypedComplex; +use crate::interpreter::rule_defs::cmd_args::FrozenStarlarkCmdArgs; use crate::interpreter::rule_defs::cmd_args::StarlarkCmdArgs; +use crate::interpreter::rule_defs::provider::builtin::worker_info::FrozenWorkerInfo; use crate::interpreter::rule_defs::provider::builtin::worker_info::WorkerInfo; /// Provider that signals that a rule can run using a worker #[internal_provider(worker_run_info_creator)] #[derive(Clone, Debug, Coerce, Trace, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct WorkerRunInfoGen { +pub struct WorkerRunInfoGen { // Configuration needed to spawn a new worker - #[provider(field_type = WorkerInfo<'v>)] - worker: V, + worker: ValueOfUncheckedGeneric, // Command to execute without spawning a worker, when the build environment or configuration does not support workers - #[provider(field_type = StarlarkCmdArgs<'v>)] - exe: V, + exe: ValueOfUncheckedGeneric, } #[starlark_module] @@ -47,23 +50,23 @@ fn worker_run_info_creator(globals: &mut GlobalsBuilder) { fn WorkerRunInfo<'v>( #[starlark(require = named)] worker: ValueOf<'v, &'v WorkerInfo<'v>>, #[starlark(require = named, default = AllocList::EMPTY)] exe: Value<'v>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let heap = eval.heap(); let valid_exe = StarlarkCmdArgs::try_from_value(exe)?; Ok(WorkerRunInfo { - worker: *worker, - exe: heap.alloc(valid_exe), + worker: ValueOfUnchecked::new(worker.to_value()), + exe: ValueOfUnchecked::new(heap.alloc(valid_exe)), }) } } impl<'v, V: ValueLike<'v>> WorkerRunInfoGen { - pub fn worker(&self) -> ValueOf<'v, &'v WorkerInfo<'v>> { - ValueOf::unpack_value(self.worker.to_value()).expect("validated at construction") + pub fn worker(&self) -> ValueTypedComplex<'v, WorkerInfo<'v>> { + ValueTypedComplex::new(self.worker.get().to_value()).expect("validated at construction") } pub fn exe(&self) -> ValueTyped<'v, StarlarkCmdArgs<'v>> { - ValueTyped::new(self.exe.to_value()).expect("validated at construction") + ValueTyped::new_err(self.exe.get().to_value()).expect("validated at construction") } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/callable.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/callable.rs index 5bad01f8af952..71c7f796a3fef 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/callable.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/callable.rs @@ -11,36 +11,41 @@ use std::cell::OnceCell; use std::fmt; use std::fmt::Display; use std::fmt::Formatter; +use std::hash::BuildHasher; +use std::hash::Hasher; use std::sync::Arc; use allocative::Allocative; use anyhow::Context; use buck2_core::cells::cell_path::CellPath; use buck2_core::provider::id::ProviderId; +use buck2_error::BuckErrorContext; use buck2_interpreter::build_context::starlark_path_from_build_context; use buck2_interpreter::types::provider::callable::ProviderCallableLike; use dupe::Dupe; use either::Either; +use indexmap::IndexMap; use itertools::Itertools; use starlark::any::ProvidesStaticType; use starlark::docs::DocItem; +use starlark::docs::DocMember; +use starlark::docs::DocProperty; use starlark::docs::DocString; use starlark::docs::DocStringKind; use starlark::environment::GlobalsBuilder; -use starlark::environment::Methods; -use starlark::environment::MethodsBuilder; -use starlark::environment::MethodsStatic; +use starlark::eval::param_specs; use starlark::eval::Arguments; use starlark::eval::Evaluator; use starlark::eval::ParametersSpec; -use starlark::typing::Param; +use starlark::eval::ParametersSpecParam; use starlark::typing::Ty; -use starlark::typing::TyFunction; +use starlark::typing::TyCallable; use starlark::typing::TyStarlarkValue; use starlark::values::dict::AllocDict; use starlark::values::dict::DictRef; use starlark::values::list::AllocList; use starlark::values::list::ListRef; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::typing::TypeCompiled; @@ -59,8 +64,11 @@ use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; use starlark::values::ValueLike; +use starlark::StarlarkResultExt; use starlark_map::small_map::SmallMap; use starlark_map::small_set::SmallSet; +use starlark_map::StarlarkHasher; +use starlark_map::StarlarkHasherBuilder; use crate::interpreter::rule_defs::provider::doc::provider_callable_documentation; use crate::interpreter::rule_defs::provider::ty::abstract_provider::AbstractProvider; @@ -69,7 +77,7 @@ use crate::interpreter::rule_defs::provider::ty::provider_callable::ty_provider_ use crate::interpreter::rule_defs::provider::user::user_provider_creator; use crate::interpreter::rule_defs::provider::user::UserProvider; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ProviderCallableError { #[error( "The result of `provider()` must be assigned to a top-level variable before it can be called" @@ -87,26 +95,58 @@ enum ProviderCallableError { InvalidDefaultValueType(String, &'static str, Ty), } +/// `Hashed` from starlark contains the small hash, +/// we get it in `UserProvider::get_hashed`. +/// To lookup in `IndexMap` we can promote it to `u64`. +/// This is what this hasher does. +#[derive(Default, Debug, Clone, Copy, Dupe)] +pub(crate) struct StarlarkHasherSmallPromoteBuilder(StarlarkHasherBuilder); +pub(crate) struct StarlarkHasherSmallPromote(StarlarkHasher); + +impl BuildHasher for StarlarkHasherSmallPromoteBuilder { + type Hasher = StarlarkHasherSmallPromote; + + fn build_hasher(&self) -> Self::Hasher { + StarlarkHasherSmallPromote(self.0.build_hasher()) + } +} + +impl Hasher for StarlarkHasherSmallPromote { + fn finish(&self) -> u64 { + self.0.finish_small().promote() + } + + fn write(&mut self, bytes: &[u8]) { + self.0.write(bytes) + } +} + fn create_callable_function_signature( function_name: &str, - fields: &SmallMap, + fields: &IndexMap, ret_ty: Ty, -) -> (ParametersSpec, TyFunction) { - let mut signature = ParametersSpec::with_capacity(function_name.to_owned(), fields.len()); - let mut ty_params = Vec::with_capacity(fields.len()); - // TODO(nmj): Should double check we don't actually need positional args in-repo - signature.no_more_positional_args(); - for (name, field) in fields { - if field.default.is_some() { - signature.optional(name); - ty_params.push(Param::name_only(name, field.ty.as_ty().dupe()).optional()); - } else { - signature.required(name); - ty_params.push(Param::name_only(name, field.ty.as_ty().dupe())); - } - } +) -> anyhow::Result<(ParametersSpec, TyCallable)> { + let (parameters_spec, param_spec) = param_specs( + function_name, + [], + [], + None, + fields.iter().map(|(name, field)| { + ( + name.as_str(), + match field.default { + None => ParametersSpecParam::Required, + Some(default) => ParametersSpecParam::Defaulted(default), + }, + field.ty.as_ty().dupe(), + ) + }), + None, + ) + .into_anyhow_result() + .internal_error_anyhow("Must have created correct signature")?; - (signature.finish(), TyFunction::new(ty_params, ret_ty)) + Ok((parameters_spec, TyCallable::new(param_spec, ret_ty))) } #[derive(Debug, Allocative)] @@ -114,7 +154,7 @@ pub(crate) struct UserProviderCallableData { pub(crate) provider_id: Arc, /// Type id of provider callable instance. pub(crate) ty_provider_type_instance_id: TypeInstanceId, - pub(crate) fields: SmallMap, + pub(crate) fields: IndexMap, } /// Initialized after the name is assigned to the provider. @@ -136,10 +176,10 @@ impl UserProviderCallableNamed { fn invoke<'v>( &self, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { self.signature.parser(args, eval, |parser, eval| { - user_provider_creator(self.data, eval, parser) + user_provider_creator(self.data, eval, parser).map_err(Into::into) }) } } @@ -195,14 +235,14 @@ pub struct UserProviderCallable { /// The docstring for this provider docs: Option, /// The names of the fields used in `callable` - fields: SmallMap, + fields: IndexMap, /// Field is initialized after the provider is assigned to a variable. callable: OnceCell, } fn user_provider_callable_display( id: Option<&Arc>, - fields: &SmallMap, + fields: &IndexMap, f: &mut Formatter, ) -> fmt::Result { write!(f, "provider")?; @@ -226,7 +266,7 @@ fn user_provider_callable_display( impl Display for UserProviderCallable { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - user_provider_callable_display(self.id(), &self.fields, f) + user_provider_callable_display(self.callable.get().map(|x| &x.id), &self.fields, f) } } @@ -234,7 +274,7 @@ impl UserProviderCallable { fn new( path: CellPath, docs: Option, - fields: SmallMap, + fields: IndexMap, ) -> Self { Self { callable: OnceCell::new(), @@ -246,8 +286,11 @@ impl UserProviderCallable { } impl ProviderCallableLike for UserProviderCallable { - fn id(&self) -> Option<&Arc> { - self.callable.get().map(|x| &x.id) + fn id(&self) -> anyhow::Result<&Arc> { + self.callable + .get() + .map(|x| &x.id) + .ok_or(ProviderCallableError::NotBound.into()) } } @@ -303,7 +346,11 @@ impl TypeMatcher for UserProviderMatcher { impl<'v> StarlarkValue<'v> for UserProviderCallable { type Canonical = FrozenUserProviderCallable; - fn export_as(&self, variable_name: &str, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { // First export wins self.callable.get_or_try_init(|| { let provider_id = Arc::new(ProviderId { @@ -327,18 +374,16 @@ impl<'v> StarlarkValue<'v> for UserProviderCallable { &provider_id.name, &self.fields, ty_provider.clone(), - ); + )?; let ty_callable = ty_provider_callable::(creator_func)?; anyhow::Ok(UserProviderCallableNamed { id: provider_id.dupe(), signature, - data: eval - .frozen_heap() - .alloc_any_display_from_debug(UserProviderCallableData { - provider_id, - fields: self.fields.clone(), - ty_provider_type_instance_id, - }), + data: eval.frozen_heap().alloc_any(UserProviderCallableData { + provider_id, + fields: self.fields.clone(), + ty_provider_type_instance_id, + }), ty_provider, ty_callable, }) @@ -346,20 +391,15 @@ impl<'v> StarlarkValue<'v> for UserProviderCallable { Ok(()) } - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(provider_callable_methods) - } - fn invoke( &self, _me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { match self.callable.get() { Some(callable) => callable.invoke(args, eval), - None => Err(ProviderCallableError::NotBound.into()), + None => Err(starlark::Error::new_other(ProviderCallableError::NotBound)), } } @@ -371,16 +411,25 @@ impl<'v> StarlarkValue<'v> for UserProviderCallable { self.callable.get().map(|named| named.ty_provider.dupe()) } - fn documentation(&self) -> Option { + fn documentation(&self) -> DocItem { let return_types = vec![Ty::any(); self.fields.len()]; - Some(provider_callable_documentation( + let Some(callable) = self.callable.get() else { + // This shouldn't really happen, we mostly don't even ask for documentation on + // non-frozen things + return DocItem::Member(DocMember::Property(DocProperty { + docs: None, + typ: Ty::any(), + })); + }; + provider_callable_documentation( None, + callable.ty_callable.dupe(), &self.docs, &self.fields.keys().map(|x| x.as_str()).collect::>(), // TODO(nga): types. &vec![None; self.fields.len()], &return_types, - )) + ) } fn typechecker_ty(&self) -> Option { @@ -393,7 +442,7 @@ pub struct FrozenUserProviderCallable { /// The docstring for this provider docs: Option, /// The names of the fields used in `callable` - fields: SmallMap, + fields: IndexMap, /// The actual callable that creates instances of `UserProvider` callable: UserProviderCallableNamed, } @@ -408,7 +457,7 @@ impl Display for FrozenUserProviderCallable { impl FrozenUserProviderCallable { fn new( docs: Option, - fields: SmallMap, + fields: IndexMap, callable: UserProviderCallableNamed, ) -> Self { Self { @@ -420,8 +469,8 @@ impl FrozenUserProviderCallable { } impl ProviderCallableLike for FrozenUserProviderCallable { - fn id(&self) -> Option<&Arc> { - Some(&self.callable.id) + fn id(&self) -> anyhow::Result<&Arc> { + Ok(&self.callable.id) } } @@ -429,17 +478,12 @@ impl ProviderCallableLike for FrozenUserProviderCallable { impl<'v> StarlarkValue<'v> for FrozenUserProviderCallable { type Canonical = Self; - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(provider_callable_methods) - } - fn invoke( &self, _me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { self.callable.invoke(args, eval) } @@ -447,15 +491,16 @@ impl<'v> StarlarkValue<'v> for FrozenUserProviderCallable { demand.provide_value::<&dyn ProviderCallableLike>(self); } - fn documentation(&self) -> Option { + fn documentation(&self) -> DocItem { let return_types = vec![Ty::any(); self.fields.len()]; - Some(provider_callable_documentation( + provider_callable_documentation( None, + self.callable.ty_callable.dupe(), &self.docs, &self.fields.keys().map(|x| x.as_str()).collect::>(), &vec![None; self.fields.len()], &return_types, - )) + ) } fn typechecker_ty(&self) -> Option { @@ -467,27 +512,11 @@ impl<'v> StarlarkValue<'v> for FrozenUserProviderCallable { } } -#[starlark_module] -fn provider_callable_methods(builder: &mut MethodsBuilder) { - #[starlark(attribute)] - fn r#type<'v>(this: Value<'v>, heap: &Heap) -> anyhow::Result> { - if let Some(x) = this.downcast_ref::() { - match x.callable.get() { - None => Err(ProviderCallableError::ProviderNotAssigned( - x.fields.keys().cloned().collect(), - ) - .into()), - Some(named) => Ok(heap.alloc(named.id.name.as_str())), - } - } else if let Some(x) = this.downcast_ref::() { - Ok(heap.alloc(x.callable.id.name.as_str())) - } else { - unreachable!( - "This parameter must be one of the types, but got `{}`", - this.get_type() - ) - } - } +fn provider_field_parse_type<'v>( + ty: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, +) -> anyhow::Result> { + TypeCompiled::new(ty, eval.heap()).map(|ty| ty.to_frozen(eval.frozen_heap())) } #[starlark_module] @@ -496,9 +525,9 @@ pub fn register_provider(builder: &mut GlobalsBuilder) { fn provider_field<'v>( #[starlark(require=pos)] ty: Value<'v>, #[starlark(require=named)] default: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { - let ty = TypeCompiled::new(ty, eval.heap())?.to_frozen(eval.frozen_heap()); + let ty = provider_field_parse_type(ty, eval)?; let default = match default { None => None, Some(x) => { @@ -544,32 +573,42 @@ pub fn register_provider(builder: &mut GlobalsBuilder) { /// For providers that accumulate upwards a transitive set is often a good choice. fn provider<'v>( #[starlark(require=named, default = "")] doc: &str, - #[starlark(require=named)] fields: Either, SmallMap>>, - eval: &mut Evaluator<'v, '_>, + #[starlark(require=named)] fields: Either< + UnpackListOrTuple, + SmallMap>, + >, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let docstring = DocString::from_docstring(DocStringKind::Starlark, doc); let path = starlark_path_from_build_context(eval)?.path(); let fields = match fields { Either::Left(fields) => { - let new_fields: SmallMap = fields + let new_fields: IndexMap< + String, + UserProviderField, + StarlarkHasherSmallPromoteBuilder, + > = fields + .items .iter() .map(|name| (name.clone(), UserProviderField::default())) .collect(); - if new_fields.len() != fields.len() { - return Err(ProviderCallableError::NonUniqueFields(fields).into()); + if new_fields.len() != fields.items.len() { + return Err(ProviderCallableError::NonUniqueFields(fields.items).into()); } new_fields } Either::Right(fields) => { - let mut new_fields = SmallMap::with_capacity(fields.len()); + let mut new_fields = IndexMap::with_capacity_and_hasher( + fields.len(), + StarlarkHasherSmallPromoteBuilder::default(), + ); for (name, field) in fields { if let Some(field) = field.downcast_ref::() { new_fields.insert(name, field.dupe()); } else { - let ty = TypeCompiled::new(field, eval.heap()) - .with_context(|| format!("Field `{name}` type `{field}` is not created with `provider_field`, and cannot be evaluated as a type"))? - .to_frozen(eval.frozen_heap()); + let ty = provider_field_parse_type(field, eval) + .with_context(|| format!("Field `{name}` type `{field}` is not created with `provider_field`, and cannot be evaluated as a type"))?; new_fields.insert(name, UserProviderField { ty, default: None }); } } @@ -591,5 +630,5 @@ pub fn register_provider(builder: &mut GlobalsBuilder) { /// def foo() -> list[Provider]: /// return [DefaultInfo()] /// ``` - const Provider: StarlarkValueAsType = StarlarkValueAsType::new(); + const Provider: StarlarkValueAsType = StarlarkValueAsType::new_no_docs(); } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/collection.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/collection.rs index ce98fadadff0c..e44f8ebeea4e0 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/collection.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/collection.rs @@ -7,8 +7,10 @@ * of this source tree. */ +use std::convert::Infallible; use std::fmt; use std::fmt::Display; +use std::mem; use std::sync::Arc; use allocative::Allocative; @@ -17,6 +19,7 @@ use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::NonDefaultProvidersName; use buck2_core::provider::label::ProviderName; use buck2_core::provider::label::ProvidersName; +use buck2_error::BuckErrorContext; use buck2_interpreter::starlark_promise::StarlarkPromise; use buck2_interpreter::types::provider::callable::ValueAsProviderCallableLike; use display_container::fmt_container; @@ -25,29 +28,41 @@ use either::Either; use serde::Serialize; use serde::Serializer; use starlark::any::ProvidesStaticType; +use starlark::coerce::coerce; use starlark::coerce::Coerce; use starlark::collections::SmallMap; use starlark::environment::GlobalsBuilder; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; +use starlark::typing::Ty; use starlark::values::list::ListRef; use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::AllocFrozenValue; +use starlark::values::AllocStaticSimple; +use starlark::values::AllocValue; use starlark::values::Freeze; use starlark::values::Freezer; +use starlark::values::FrozenHeap; +use starlark::values::FrozenHeapRef; use starlark::values::FrozenRef; use starlark::values::FrozenValue; +use starlark::values::FrozenValueTyped; use starlark::values::Heap; use starlark::values::OwnedFrozenValue; use starlark::values::OwnedFrozenValueTyped; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Tracer; +use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOfUnchecked; +use starlark::StarlarkResultExt; use crate::interpreter::rule_defs::provider::ty::abstract_provider::AbstractProvider; use crate::interpreter::rule_defs::provider::DefaultInfo; @@ -66,19 +81,20 @@ fn format_provider_keys_for_error(keys: &[String]) -> String { ) } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum ProviderCollectionError { #[error("expected a list of Provider objects, got {repr}")] CollectionNotAList { repr: String }, #[error("expected a Provider object, got {repr}")] CollectionElementNotAProvider { repr: String }, - #[error("provider of type {provider_name} specified twice ({original_repr} and {new_repr})")] + #[error("provider of type `{provider_name}` specified twice ({original_repr} and {new_repr})")] CollectionSpecifiedProviderTwice { provider_name: String, original_repr: String, new_repr: String, }, - #[error("collection {repr} did not receive a DefaultInfo provider")] + #[error("collection {repr} did not receive a `DefaultInfo` provider")] CollectionMissingDefaultInfo { repr: String }, #[error( "requested sub target named `{0}` of target `{1}` is not available. Available subtargets are: `{2:?}`" @@ -88,10 +104,6 @@ enum ProviderCollectionError { "Cannot handle flavor `{flavor}` on target `{target}`. Most flavors are unsupported in Buck2." )] UnknownFlavors { target: String, flavor: String }, - #[error( - "provider value that should have been `DefaultInfo` was not. It was `{repr}`. This is an internal error." - )] - ValueIsNotDefaultInfo { repr: String }, #[error( "provider collection operation {0} parameter type must be a provider type \ but not and instance of provider (for example, `RunInfo` or user defined provider type), \ @@ -105,32 +117,78 @@ enum ProviderCollectionError { AtNotFound(String, Vec), } -/// Holds a collection of `UserProvider`s. These can be accessed in Starlark by indexing on -/// a `ProviderCallable` object. -/// -/// e.g. -/// ```ignore -/// FooInfo = provider(fields=["bar"]) -/// .... -/// collection[FooInfo] # None if absent, a FooInfo instance if present -/// ``` -/// -/// This is the result of all UDR implementation functions #[derive(Debug, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct ProviderCollectionGen { +pub struct ProviderCollectionGen { pub(crate) providers: SmallMap, V>, } +pub type ProviderCollection<'v> = ProviderCollectionGen>; +pub type FrozenProviderCollection = ProviderCollectionGen; + // Can't derive this since no instance for Arc -unsafe impl, To> Coerce> - for ProviderCollectionGen +unsafe impl + ValueLifetimeless, To: ValueLifetimeless> + Coerce> for ProviderCollectionGen { } -starlark_complex_value!(pub ProviderCollection); +fn empty_provider_collection_value() -> FrozenValueTyped<'static, FrozenProviderCollection> { + static EMPTY: AllocStaticSimple = + AllocStaticSimple::alloc(FrozenProviderCollection { + providers: SmallMap::new(), + }); + EMPTY.unpack() +} + +impl<'v> AllocValue<'v> for ProviderCollectionGen> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + if self.providers.is_empty() { + // Provider collection is immutable, so it's OK to return frozen value here. + empty_provider_collection_value().to_value() + } else { + heap.alloc_complex(self) + } + } +} + +impl AllocFrozenValue for ProviderCollectionGen { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + if self.providers.is_empty() { + empty_provider_collection_value().to_frozen_value() + } else { + heap.alloc_simple(self) + } + } +} + +impl<'v> ProviderCollection<'v> { + #[inline] + pub fn from_value(x: Value<'v>) -> Option<&'v Self> { + if let Some(x) = x.unpack_frozen() { + ValueLike::downcast_ref::(x).map(coerce) + } else { + ValueLike::downcast_ref::>(x) + } + } +} + +impl<'v> StarlarkTypeRepr for &'v ProviderCollection<'v> { + type Canonical = as StarlarkValue<'v>>::Canonical; -impl Display for ProviderCollectionGen { + fn starlark_type_repr() -> Ty { + ::get_type_starlark_repr() + } +} + +impl<'v> UnpackValue<'v> for &'v ProviderCollection<'v> { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(ProviderCollection::from_value(value)) + } +} + +impl Display for ProviderCollectionGen { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_container( f, @@ -153,11 +211,11 @@ impl<'v, V: ValueLike<'v>> Serialize for ProviderCollectionGen { /// Provider collection access operator. #[derive(derive_more::Display, Debug)] enum GetOp { - #[display(fmt = "[]")] + #[display("[]")] At, - #[display(fmt = "in")] + #[display("in")] In, - #[display(fmt = ".get")] + #[display(".get")] Get, } @@ -182,11 +240,11 @@ impl<'v, V: ValueLike<'v>> ProviderCollectionGen { let mut providers = SmallMap::with_capacity(list.len()); for value in list.iter() { - match value.as_provider() { + match ValueAsProviderLike::unpack_value(value).into_anyhow_result()? { Some(provider) => { - if let Some(existing_value) = providers.insert(provider.id().dupe(), value) { + if let Some(existing_value) = providers.insert(provider.0.id().dupe(), value) { return Err(ProviderCollectionError::CollectionSpecifiedProviderTwice { - provider_name: provider.id().name.clone(), + provider_name: provider.0.id().name.clone(), original_repr: existing_value.to_repr(), new_repr: value.to_repr(), } @@ -223,32 +281,41 @@ impl<'v, V: ValueLike<'v>> ProviderCollectionGen { Ok(ProviderCollection::<'v> { providers }) } - /// Takes a value, e.g. a return from a `rule()` implementation function, and builds a `ProviderCollection` from it. + /// Takes a value, e.g. a value passed to `DefaultInfo(subtargets)`, and builds a `ProviderCollection` from it. /// /// An error is returned if: /// - `value` is not a list /// - Two instances of the same provider are provided /// - /// `default_info_creator` is only invoked if `DefaultInfo` was not in the collection - pub fn try_from_value_with_default_info( + /// Should only be used for subtargets, where an empty `DefaultInfo` can be inferred. + pub fn try_from_value_subtarget( value: Value<'v>, - default_info_creator: impl FnOnce() -> Value<'v>, + heap: &'v Heap, ) -> anyhow::Result> { let mut providers = Self::try_from_value_impl(value)?; if !providers.contains_key(DefaultInfoCallable::provider_id()) { - let di_value = default_info_creator(); - if DefaultInfo::from_value(di_value).is_none() { - return Err(ProviderCollectionError::ValueIsNotDefaultInfo { - repr: di_value.to_repr(), - } - .into()); - } - providers.insert(DefaultInfoCallable::provider_id().dupe(), di_value); + providers.insert( + DefaultInfoCallable::provider_id().dupe(), + heap.alloc(DefaultInfo::empty(heap)), + ); } Ok(ProviderCollection::<'v> { providers }) } + /// Takes a value, e.g. a return from a `dynamic_output` function, and builds a `ProviderCollection` from it. + /// + /// An error is returned if: + /// - `value` is not a list + /// - Two instances of the same provider are provided + pub fn try_from_value_dynamic_output( + value: Value<'v>, + ) -> anyhow::Result> { + let providers = Self::try_from_value_impl(value)?; + + Ok(ProviderCollection::<'v> { providers }) + } + /// Common implementation of `[]`, `in`, and `.get`. fn get_impl( &self, @@ -257,7 +324,7 @@ impl<'v, V: ValueLike<'v>> ProviderCollectionGen { ) -> anyhow::Result, Arc>> { match index.as_provider_callable() { Some(callable) => { - let provider_id = callable.require_id()?; + let provider_id = callable.id()?.dupe(); match self.providers.get(&provider_id) { Some(v) => Ok(Either::Left(v.to_value())), None => Ok(Either::Right(provider_id)), @@ -279,6 +346,31 @@ impl<'v, V: ValueLike<'v>> ProviderCollectionGen { } } +impl FrozenProviderCollection { + pub fn testing_new_default( + heap: &FrozenHeap, + ) -> FrozenValueTyped<'static, FrozenProviderCollection> { + FrozenValueTyped::new_err(heap.alloc(FrozenProviderCollection { + providers: SmallMap::from_iter([( + DefaultInfoCallable::provider_id().dupe(), + FrozenDefaultInfo::testing_empty(heap).to_frozen_value(), + )]), + })) + .unwrap() + } +} + +/// Holds a collection of `UserProvider`s. These can be accessed in Starlark by indexing on +/// a `ProviderCallable` object. +/// +/// e.g. +/// ```ignore +/// FooInfo = provider(fields=["bar"]) +/// .... +/// collection.get(FooInfo) # None if absent, a FooInfo instance if present +/// ``` +/// +/// This is the result of all UDR implementation functions #[starlark_module] fn provider_collection_methods(builder: &mut MethodsBuilder) { fn get<'v>( @@ -290,22 +382,23 @@ fn provider_collection_methods(builder: &mut MethodsBuilder) { } #[starlark_value(type = "provider_collection")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for ProviderCollectionGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for ProviderCollectionGen where Self: ProvidesStaticType<'v>, { - fn at(&self, index: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, _heap: &'v Heap) -> starlark::Result> { match self.get_impl(index, GetOp::At)? { Either::Left(v) => Ok(v), - Either::Right(provider_id) => Err(ProviderCollectionError::AtNotFound( - provider_id.name.clone(), - self.providers.keys().map(|k| k.name.clone()).collect(), - ) - .into()), + Either::Right(provider_id) => Err(starlark::Error::new_other( + ProviderCollectionError::AtNotFound( + provider_id.name.clone(), + self.providers.keys().map(|k| k.name.clone()).collect(), + ), + )), } } - fn is_in(&self, other: Value<'v>) -> anyhow::Result { + fn is_in(&self, other: Value<'v>) -> starlark::Result { Ok(self.get_impl(other, GetOp::In)?.is_left()) } @@ -336,22 +429,11 @@ impl<'v> Freeze for ProviderCollection<'v> { } } -impl<'v> ProviderCollection<'v> { - pub fn default_info(&self) -> FrozenRef<'static, FrozenDefaultInfo> { - self.providers - .get(DefaultInfoCallable::provider_id()) - .expect("DefaultInfo should always be set") - .unpack_frozen() - .expect("Provider collections are always frozen") - .downcast_frozen_ref::() - .expect("DefaultInfo should be of the right type") - } -} - impl FrozenProviderCollection { - pub fn default_info(&self) -> FrozenRef<'static, FrozenDefaultInfo> { - self.builtin_provider() - .expect("DefaultInfo should always be set") + pub fn default_info(&self) -> anyhow::Result> { + self.builtin_provider().internal_error_anyhow( + "DefaultInfo should always be set for providers returned from rule function", + ) } pub fn contains_provider(&self, provider_id: &ProviderId) -> bool { @@ -383,7 +465,14 @@ impl FrozenProviderCollection { #[derive(Debug, Clone, Dupe, Allocative)] pub struct FrozenProviderCollectionValue { #[allocative(skip)] // TODO(nga): do not skip. - value: OwnedFrozenValueTyped, + pub value: OwnedFrozenValueTyped, +} + +#[derive(Clone, Copy, Dupe)] +pub struct FrozenProviderCollectionValueRef<'f> { + /// Heap that owns the value. + heap: &'f FrozenHeapRef, + value: FrozenValueTyped<'f, FrozenProviderCollection>, } impl Serialize for FrozenProviderCollectionValue { @@ -414,52 +503,131 @@ impl FrozenProviderCollectionValue { self.value.as_ref() } - pub fn lookup_inner(&self, label: &ConfiguredProvidersLabel) -> anyhow::Result { + pub fn as_ref(&self) -> FrozenProviderCollectionValueRef<'_> { + FrozenProviderCollectionValueRef { + heap: self.value.owner(), + value: unsafe { self.value.value_typed() }, + } + } + + pub fn add_heap_ref<'v>( + &self, + heap: &'v FrozenHeap, + ) -> FrozenValueTyped<'v, FrozenProviderCollection> { + self.as_ref().add_heap_ref(heap) + } + + pub fn add_heap_ref_static( + &self, + heap: &FrozenHeap, + ) -> FrozenValueTyped<'static, FrozenProviderCollection> { + unsafe { + mem::transmute::< + FrozenValueTyped<'_, FrozenProviderCollection>, + FrozenValueTyped<'_, FrozenProviderCollection>, + >(self.add_heap_ref(heap)) + } + } + + pub fn lookup_inner<'f>( + &'f self, + label: &ConfiguredProvidersLabel, + ) -> anyhow::Result> { + self.as_ref().lookup_inner(label) + } +} + +impl<'f> FrozenProviderCollectionValueRef<'f> { + pub unsafe fn new( + heap: &'f FrozenHeapRef, + value: FrozenValueTyped<'f, FrozenProviderCollection>, + ) -> Self { + FrozenProviderCollectionValueRef { heap, value } + } + + pub fn value(self) -> FrozenValueTyped<'f, FrozenProviderCollection> { + self.value + } + + pub fn owner(self) -> &'f FrozenHeapRef { + self.heap + } + + pub fn to_owned(self) -> FrozenProviderCollectionValue { + unsafe { + // Cast lifetime. + let value = mem::transmute::< + FrozenValueTyped, + FrozenValueTyped, + >(self.value); + FrozenProviderCollectionValue { + value: OwnedFrozenValueTyped::new(self.heap.dupe(), value), + } + } + } + + pub fn add_heap_ref<'v>( + self, + heap: &'v FrozenHeap, + ) -> FrozenValueTyped<'v, FrozenProviderCollection> { + heap.add_reference(self.heap); + unsafe { + mem::transmute::< + FrozenValueTyped<'_, FrozenProviderCollection>, + FrozenValueTyped<'_, FrozenProviderCollection>, + >(self.value) + } + } + + pub fn lookup_inner( + self, + label: &ConfiguredProvidersLabel, + ) -> anyhow::Result> { match label.name() { - ProvidersName::Default => anyhow::Ok(self.dupe()), - ProvidersName::NonDefault(box NonDefaultProvidersName::Named(provider_names)) => { - Ok(FrozenProviderCollectionValue::from_value( - self.value().try_map(|v| { - let mut collection_value = v; - - for provider_name in &**provider_names { - let maybe_di = collection_value - .default_info() - .get_sub_target_providers(provider_name.as_str()); - - match maybe_di { - // The inner values should all be frozen if in a frozen provider collection - Some(inner) => { - collection_value = inner; - } - None => { - return Err(anyhow::anyhow!( - ProviderCollectionError::RequestedInvalidSubTarget( - provider_name.clone(), - label.clone(), - collection_value - .default_info() - .sub_targets() - .keys() - .map(|s| (*s).to_owned()) - .collect() - ) - )); - } + ProvidersName::Default => anyhow::Ok(self), + ProvidersName::NonDefault(flavor) => match flavor.as_ref() { + NonDefaultProvidersName::Named(provider_names) => { + let mut collection_value = self.value; + + for provider_name in &**provider_names { + let maybe_di = collection_value + .default_info()? + .get_sub_target_providers(provider_name.as_str()); + + match maybe_di { + // The inner values should all be frozen if in a frozen provider collection + Some(inner) => { + collection_value = inner; + } + None => { + return Err(anyhow::anyhow!( + ProviderCollectionError::RequestedInvalidSubTarget( + provider_name.clone(), + label.dupe(), + collection_value + .default_info()? + .sub_targets() + .keys() + .map(|s| (*s).to_owned()) + .collect() + ) + )); } } - - Ok(collection_value) - })?, - )) - } - ProvidersName::NonDefault(box NonDefaultProvidersName::UnrecognizedFlavor(flavor)) => { - Err(ProviderCollectionError::UnknownFlavors { - target: label.unconfigured().to_string(), - flavor: (**flavor).to_owned(), + } + Ok(FrozenProviderCollectionValueRef { + heap: self.heap, + value: collection_value, + }) } - .into()) - } + NonDefaultProvidersName::UnrecognizedFlavor(flavor) => { + Err(ProviderCollectionError::UnknownFlavors { + target: label.unconfigured().to_string(), + flavor: (**flavor).to_owned(), + } + .into()) + } + }, } } } @@ -488,7 +656,7 @@ pub mod tester { .downcast_ref::() .ok_or_else(|| anyhow::anyhow!("{:?} was not a FrozenProviderCollection", value))?; - let ret = collection.default_info().default_outputs_raw().to_value(); + let ret = collection.default_info()?.default_outputs_raw().to_value(); Ok(ret) } @@ -500,7 +668,7 @@ pub mod tester { .downcast_ref::() .ok_or_else(|| anyhow::anyhow!("{:?} was not a FrozenProviderCollection", value))?; - let ret = collection.default_info().sub_targets_raw().to_value(); + let ret = collection.default_info()?.sub_targets_raw().to_value(); Ok(ret) } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/dependency.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/dependency.rs index 692064d144e1a..2a70bf04ba7fa 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/dependency.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/dependency.rs @@ -9,11 +9,15 @@ use std::fmt; use std::fmt::Display; +use std::mem; use allocative::Allocative; +use anyhow::Context; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProviderName; +use buck2_error::starlark_error::from_starlark; +use buck2_error::AnyhowContextForError; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use starlark::any::ProvidesStaticType; use starlark::coerce::Coerce; @@ -27,22 +31,23 @@ use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::Freeze; use starlark::values::FrozenValue; +use starlark::values::FrozenValueTyped; use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; -use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOfUnchecked; -use starlark::values::ValueTyped; -use thiserror::Error; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::StarlarkResultExt; +use crate::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use crate::interpreter::rule_defs::provider::execution_platform::StarlarkExecutionPlatformResolution; use crate::interpreter::rule_defs::provider::ty::abstract_provider::AbstractProvider; -use crate::interpreter::rule_defs::provider::ProviderCollection; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum DependencyError { #[error("Unknown subtarget, could not find `{0}`")] UnknownSubtarget(String), @@ -59,22 +64,22 @@ enum DependencyError { Freeze, ProvidesStaticType, NoSerialize, - Allocative, - StarlarkDocs + Allocative )] #[repr(C)] -pub struct DependencyGen { - label: V, - providers_collection: V, - execution_platform: V, +pub struct DependencyGen { + label: ValueOfUncheckedGeneric, + provider_collection: FrozenValueTyped<'static, FrozenProviderCollection>, + // This could be `Option<...>`, but that breaks `Coerce`. + execution_platform: ValueOfUncheckedGeneric>, } starlark_complex_value!(pub Dependency); -impl Display for DependencyGen { +impl Display for DependencyGen { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "") } } @@ -83,45 +88,44 @@ impl<'v> Dependency<'v> { pub fn new( heap: &'v Heap, label: ConfiguredProvidersLabel, - providers_collection: Value<'v>, + provider_collection: FrozenValueTyped<'v, FrozenProviderCollection>, execution_platform: Option<&ExecutionPlatformResolution>, ) -> Self { - let execution_platform = match execution_platform { - Some(e) => NoneOr::Other(StarlarkExecutionPlatformResolution(e.clone())), - None => NoneOr::None, - }; + let execution_platform: ValueOfUnchecked> = + match execution_platform { + Some(e) => ValueOfUnchecked::new( + heap.alloc(StarlarkExecutionPlatformResolution(e.clone())), + ), + None => ValueOfUnchecked::new(Value::new_none()), + }; Dependency { - label: heap.alloc(StarlarkConfiguredProvidersLabel::new(label)), - providers_collection, - execution_platform: heap.alloc(execution_platform), + label: heap.alloc_typed_unchecked(StarlarkConfiguredProvidersLabel::new(label)), + provider_collection: unsafe { + mem::transmute::< + FrozenValueTyped<'_, FrozenProviderCollection>, + FrozenValueTyped<'_, FrozenProviderCollection>, + >(provider_collection) + }, + execution_platform, } } pub fn label(&self) -> &StarlarkConfiguredProvidersLabel { - StarlarkConfiguredProvidersLabel::from_value(self.label).unwrap() + StarlarkConfiguredProvidersLabel::from_value(self.label.get()).unwrap() } - pub fn execution_platform(&self) -> Option<&ExecutionPlatformResolution> { - match NoneOr::unpack_value(self.execution_platform) - .unwrap() - .into_option() - { - Some(v) => match StarlarkExecutionPlatformResolution::from_value(v) { - Some(starlark_execution_platform) => Some(&starlark_execution_platform.0), - None => None, - }, - None => None, + pub fn execution_platform(&self) -> anyhow::Result> { + let execution_platform: ValueOfUnchecked> = + self.execution_platform.cast(); + match execution_platform.unpack().into_anyhow_result()? { + NoneOr::None => Ok(None), + NoneOr::Other(e) => Ok(Some(&e.0)), } } - - fn provider_collection(&self) -> anyhow::Result<&ProviderCollection<'v>> { - ProviderCollection::from_value(self.providers_collection) - .ok_or_else(|| anyhow::anyhow!("internal error: not a ProviderCollection")) - } } #[starlark_value(type = "dependency")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for DependencyGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for DependencyGen where Self: ProvidesStaticType<'v>, { @@ -134,12 +138,17 @@ where RES.methods(dependency_methods) } - fn at(&self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - self.providers_collection.to_value().at(index, heap) + fn at(&self, index: Value<'v>, heap: &'v Heap) -> starlark::Result> { + self.provider_collection + .to_value() + .at(index, heap) + .map_err(from_starlark) + .with_context(|| format!("Error accessing dependencies of `{}`", self.label)) + .map_err(Into::into) } - fn is_in(&self, other: Value<'v>) -> anyhow::Result { - self.providers_collection.to_value().is_in(other) + fn is_in(&self, other: Value<'v>) -> starlark::Result { + self.provider_collection.to_value().is_in(other) } } @@ -149,15 +158,15 @@ fn dependency_methods(builder: &mut MethodsBuilder) { #[starlark(attribute)] fn label<'v>( this: &Dependency, - ) -> anyhow::Result> { - Ok(ValueTyped::new(this.label).unwrap()) + ) -> anyhow::Result> { + Ok(this.label) } // TODO(nga): should return provider collection. #[starlark(attribute)] - fn providers<'v>(this: &Dependency) -> anyhow::Result>> { + fn providers<'v>(this: &Dependency) -> anyhow::Result> { Ok(this - .provider_collection()? + .provider_collection .providers .values() .copied() @@ -173,25 +182,35 @@ fn dependency_methods(builder: &mut MethodsBuilder) { #[starlark(require = pos)] subtarget: &str, heap: &'v Heap, ) -> anyhow::Result> { - let di = this.provider_collection()?.default_info(); + let di = this.provider_collection.default_info()?; let providers = di .get_sub_target_providers(subtarget) .ok_or_else(|| DependencyError::UnknownSubtarget(subtarget.to_owned()))?; - let lbl = StarlarkConfiguredProvidersLabel::from_value(this.label) + let lbl = StarlarkConfiguredProvidersLabel::from_value(this.label.get()) .unwrap() .inner(); let lbl = ConfiguredProvidersLabel::new( lbl.target().clone(), lbl.name().push(ProviderName::new(subtarget.to_owned())?), ); - Ok(Dependency::new(heap, lbl, providers.to_value(), None)) + Ok(Dependency::new(heap, lbl, providers, None)) } + /// Gets a provider by indexing on a `ProviderCallable` object. + /// + /// e.g. + /// ```ignore + /// FooInfo = provider(fields=["bar"]) + /// .... + /// collection.get(FooInfo) # None if absent, a FooInfo instance if present + /// ``` fn get<'v>( this: &Dependency<'v>, index: Value<'v>, ) -> anyhow::Result>> { - this.provider_collection()?.get(index) + this.provider_collection + .get(index) + .with_context(|| format!("Error accessing dependencies of `{}`", this.label)) } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/doc.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/doc.rs index fbd542f0bf42a..ef463705b2d6f 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/doc.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/doc.rs @@ -11,14 +11,15 @@ use itertools::Itertools; use starlark::docs::DocFunction; use starlark::docs::DocItem; use starlark::docs::DocMember; -use starlark::docs::DocObject; use starlark::docs::DocProperty; use starlark::docs::DocString; +use starlark::docs::DocType; use starlark::environment::GlobalsBuilder; use starlark::typing::Ty; pub fn provider_callable_documentation( creator: Option fn(&'a mut GlobalsBuilder)>, + self_ty: Ty, overall: &Option, fields: &[&str], field_docs: &[Option], @@ -39,7 +40,7 @@ pub fn provider_callable_documentation( let docs = GlobalsBuilder::new().with(creator).build().documentation(); if docs.members.len() == 1 { match docs.members.into_iter().next() { - Some((name, DocMember::Function(x))) => Some((name, x)), + Some((name, DocItem::Member(DocMember::Function(x)))) => Some((name, x)), _ => None, } } else { @@ -50,22 +51,16 @@ pub fn provider_callable_documentation( }; match ctor { - None => DocItem::Object(DocObject { + None => DocItem::Type(DocType { docs: overall.clone(), + ty: self_ty, members: members .into_iter() .map(|(a, b)| (a.to_owned(), DocMember::Property(b))) .collect(), + constructor: None, }), - Some(( - _name, - DocFunction { - docs, - params, - ret, - as_type, - }, - )) => { + Some((_name, DocFunction { docs, params, ret })) => { let summary = if let Some(x) = &docs { x.summary.clone() } else if let Some(x) = &overall { @@ -95,12 +90,7 @@ pub fn provider_callable_documentation( summary, details: Some(details.iter().flatten().join("\n\n")), }); - DocItem::Function(DocFunction { - docs, - params, - ret, - as_type, - }) + DocItem::Member(DocMember::Function(DocFunction { docs, params, ret })) } } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/execution_platform.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/execution_platform.rs index 8e21b35983f94..0b537df976522 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/execution_platform.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/execution_platform.rs @@ -17,9 +17,8 @@ use starlark::starlark_simple_value; use starlark::values::starlark_value; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; -use starlark::StarlarkDocs; -#[derive(ProvidesStaticType, Debug, NoSerialize, StarlarkDocs, Allocative)] +#[derive(ProvidesStaticType, Debug, NoSerialize, Allocative)] pub struct StarlarkExecutionPlatformResolution(pub ExecutionPlatformResolution); starlark_simple_value!(StarlarkExecutionPlatformResolution); diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/mod.rs deleted file mode 100644 index 9094546207d65..0000000000000 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/mod.rs +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Providers are the data returned from a rule, and are the only way that information from this -//! rule is available to rules that depend on it. Every rule must return at least the `DefaultInfo` -//! provider, but most will also return either `RunInfo` (because they are executable) or some -//! custom provider (because they are incorporated into something that is ultimately executable). -//! -//! Internal providers (those defined and used by buck itself) can be defined easily using the -//! #[internal_provider(creator_func)] macro. This will generate all the code needed for that -//! provider to be used in starlark and to be treated as a provider in the various rust utilities -//! we have for providers. -//! -//! For an internal provider like: -//! ```skip -//! #[internal_provider(create_my_prov)] -//! #[derive(Clone, Debug, Trace, Coerce)] -//! #[repr(transparent)] -//! pub struct MyProviderGen { -//! field1: V, -//! field2: V, -//! } -//! -//! #[starlark_module] -//! fn create_my_prov(globals: &mut GlobalsBuilder) { -//! fn NameDoesntMatter( -//! // It's not enforced that the args here match the fields, but it's generally the user expectation that they do. -//! field1: Value<'v>, -//! field2: Value<'v>, -//! ) -> MyProvider<'v> { -//! // Can do some arg validation or computation here, just need to construct the provider. -//! Ok(MyProvider { -//! field1, -//! field2 -//! }) -//! } -//! } -//! ``` -//! -//! This will generate a "ProviderCallable" starlark type named (in starlark) `MyProvider` that acts like -//! the instance returned by a `provider()` call in starlark (so can be used to construct instances of the -//! provider or used in places like `attrs.dep(required_providers=[MyProvider]))`. -//! -//! For provider instances, in starlark all of their fields will be accessible by the field name. -//! -//! In rust, a StarlarkValue can be converted to the provider like normal with `MyProvider::from_value()`. -//! Often internally we'd have the analysis result (`FrozenProviderCollection`) and want to get the -//! provider out of their so there's a convenience function for that: `MyProvider::from_providers(collect)`. -// TODO(cjhopman): That last one would be more discoverable if we moved it onto the -// `FrozenProviderCollectionValue` itself so you could do `collection.get::()`. -use std::fmt::Debug; -use std::sync::Arc; - -use buck2_core::provider::id::ProviderId; -use starlark::any::ProvidesStaticType; -use starlark::environment::MethodsBuilder; -use starlark::values::StarlarkValue; -use starlark::values::Value; -use starlark::values::ValueLike; - -use crate::interpreter::rule_defs::provider::builtin::default_info::DefaultInfo; -use crate::interpreter::rule_defs::provider::builtin::default_info::DefaultInfoCallable; -use crate::interpreter::rule_defs::provider::builtin::default_info::FrozenDefaultInfo; -use crate::interpreter::rule_defs::provider::collection::ProviderCollection; - -pub mod builtin; -pub mod callable; -pub mod collection; -pub mod dependency; -pub(crate) mod doc; -pub mod execution_platform; -pub mod registration; -pub mod test_provider; -pub(crate) mod ty; -pub(crate) mod user; - -/// Implemented by providers (builtin or user defined). -pub trait ProviderLike<'v>: Debug { - /// The ID. Guaranteed to be set on the `ProviderCallable` before constructing this object - fn id(&self) -> &Arc; - /// Gets the value for a given field. - fn get_field(&self, name: &str) -> Option>; - /// Returns a list of all the keys and values. - // TODO(cjhopman): I'd rather return an iterator. I couldn't get that to work, though. - fn items(&self) -> Vec<(&str, Value<'v>)>; -} - -/// Implemented by frozen builtin providers. -pub trait FrozenBuiltinProviderLike: ProviderLike<'static> + StarlarkValue<'static> { - fn builtin_provider_id() -> &'static Arc; -} - -unsafe impl<'v> ProvidesStaticType<'v> for &'v dyn ProviderLike<'v> { - type StaticType = &'static dyn ProviderLike<'static>; -} - -/// Common methods on user and builtin providers. -#[starlark_module] -pub(crate) fn provider_methods(builder: &mut MethodsBuilder) { - fn to_json(this: Value) -> anyhow::Result { - this.to_json() - } -} - -pub(crate) trait ValueAsProviderLike<'v> { - fn as_provider(&self) -> Option<&'v dyn ProviderLike<'v>>; -} - -impl<'v, V: ValueLike<'v>> ValueAsProviderLike<'v> for V { - fn as_provider(&self) -> Option<&'v dyn ProviderLike<'v>> { - self.to_value().request_value() - } -} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/ty.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/ty.rs new file mode 100644 index 0000000000000..8b126f062973d --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/ty.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Compile type typechecking for providers. + +pub mod abstract_provider; +pub(crate) mod provider; +pub(crate) mod provider_callable; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/abstract_provider.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/abstract_provider.rs index 72357fb3c71ba..0cc813379a405 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/abstract_provider.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/abstract_provider.rs @@ -10,6 +10,7 @@ use std::sync::OnceLock; use allocative::Allocative; +use buck2_interpreter::late_binding_ty::ProviderReprLate; use dupe::Dupe; use starlark::typing::Ty; use starlark::typing::TyStarlarkValue; @@ -31,7 +32,7 @@ struct ProviderMatcher; impl TypeMatcher for ProviderMatcher { fn matches(&self, value: Value) -> bool { - value.as_provider().is_some() + ValueAsProviderLike::unpack(value).is_some() } } @@ -50,11 +51,17 @@ fn mk_ty_provider() -> anyhow::Result { } /// Type of any provider instance. In Starlark it is available as `Provider`. -pub(crate) struct AbstractProvider; +pub struct AbstractProvider; impl StarlarkTypeRepr for AbstractProvider { + type Canonical = Self; + fn starlark_type_repr() -> Ty { static TY: OnceLock = OnceLock::new(); TY.get_or_init(|| mk_ty_provider().unwrap()).dupe() } } + +pub(crate) fn init_provider_ty() { + ProviderReprLate::init(AbstractProvider::starlark_type_repr()) +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/mod.rs deleted file mode 100644 index 038523db66d95..0000000000000 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Compile type typechecking for providers. - -pub(crate) mod abstract_provider; -pub(crate) mod provider; -pub(crate) mod provider_callable; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/provider_callable.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/provider_callable.rs index 417a6b947721f..f67a2c3513b6d 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/provider_callable.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/ty/provider_callable.rs @@ -9,7 +9,7 @@ use buck2_interpreter::types::provider::callable::ProviderCallableLike; use starlark::typing::Ty; -use starlark::typing::TyFunction; +use starlark::typing::TyCallable; use starlark::typing::TyStarlarkValue; use starlark::typing::TyUser; use starlark::typing::TyUserParams; @@ -17,7 +17,7 @@ use starlark::values::typing::TypeInstanceId; use starlark::values::StarlarkValue; pub(crate) fn ty_provider_callable<'v, C: StarlarkValue<'v> + ProviderCallableLike>( - creator_func: TyFunction, + creator_func: TyCallable, ) -> anyhow::Result { Ok(Ty::custom(TyUser::new( C::TYPE.to_owned(), diff --git a/app/buck2_build_api/src/interpreter/rule_defs/provider/user.rs b/app/buck2_build_api/src/interpreter/rule_defs/provider/user.rs index 30e929ef297e1..aa05d869cfda6 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/provider/user.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/provider/user.rs @@ -18,14 +18,13 @@ use allocative::Allocative; use buck2_core::provider::id::ProviderId; use display_container::fmt_keyed_container; use dupe::Dupe; +use indexmap::map::RawEntryApiV1; use serde::Serializer; use starlark::any::ProvidesStaticType; use starlark::coerce::coerce; use starlark::coerce::Coerce; use starlark::collections::Hashed; use starlark::collections::StarlarkHasher; -use starlark::environment::Methods; -use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::eval::ParametersParser; use starlark::typing::Ty; @@ -40,10 +39,9 @@ use starlark::values::Value; use starlark::values::ValueLike; use crate::interpreter::rule_defs::provider::callable::UserProviderCallableData; -use crate::interpreter::rule_defs::provider::provider_methods; use crate::interpreter::rule_defs::provider::ProviderLike; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum UserProviderError { #[error("Value for parameter `{0}` mismatches type `{1}`: `{2}`")] MismatchedType(String, Ty, String), @@ -88,14 +86,10 @@ impl<'v, V: ValueLike<'v>> Display for UserProviderGen<'v, V> { } #[starlark_value(type = "provider")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for UserProviderGen<'v, V> +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for UserProviderGen<'v, V> where Self: ProvidesStaticType<'v>, { - fn matches_type(&self, ty: &str) -> bool { - ty == self.callable.provider_id.name - } - fn dir_attr(&self) -> Vec { self.callable.fields.keys().cloned().collect() } @@ -105,16 +99,15 @@ where } fn get_attr_hashed(&self, attribute: Hashed<&str>, _heap: &'v Heap) -> Option> { - let index = self.callable.fields.get_index_of_hashed(attribute)?; + let index = self + .callable + .fields + .raw_entry_v1() + .index_from_hash(attribute.hash().promote(), |k| k == attribute.key())?; Some(self.attributes[index].to_value()) } - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(provider_methods) - } - - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { let this: &UserProvider = coerce(self); let other: &UserProvider = match UserProvider::from_value(other) { Some(other) => other, @@ -141,7 +134,7 @@ where Ok(true) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.callable.provider_id.hash(hasher); for (k, v) in self.iter_items() { k.hash(hasher); @@ -169,11 +162,6 @@ impl<'v, V: ValueLike<'v>> ProviderLike<'v> for UserProviderGen<'v, V> { &self.callable.provider_id } - fn get_field(&self, name: &str) -> Option> { - let index = self.callable.fields.get_index_of(name)?; - Some(self.attributes[index].to_value()) - } - fn items(&self) -> Vec<(&str, Value<'v>)> { self.iter_items().map(|(k, v)| (k, v.to_value())).collect() } @@ -182,14 +170,14 @@ impl<'v, V: ValueLike<'v>> ProviderLike<'v> for UserProviderGen<'v, V> { /// Creates instances of mutable `UserProvider`s; called from a `NativeFunction` pub(crate) fn user_provider_creator<'v>( callable: FrozenRef<'static, UserProviderCallableData>, - eval: &Evaluator<'v, '_>, - mut param_parser: ParametersParser<'v, '_>, + eval: &Evaluator<'v, '_, '_>, + param_parser: &mut ParametersParser<'v, '_>, ) -> anyhow::Result> { let heap = eval.heap(); let values = callable .fields .iter() - .map(|(name, field)| match param_parser.next_opt(name)? { + .map(|(name, field)| match param_parser.next_opt()? { Some(value) => { if !field.ty.matches(value) { return Err(UserProviderError::MismatchedType( diff --git a/app/buck2_build_api/src/interpreter/rule_defs/required_test_local_resource.rs b/app/buck2_build_api/src/interpreter/rule_defs/required_test_local_resource.rs new file mode 100644 index 0000000000000..6f202b3eba13d --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/required_test_local_resource.rs @@ -0,0 +1,88 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use derive_more::Display; +use starlark::any::ProvidesStaticType; +use starlark::environment::GlobalsBuilder; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::values::starlark_value; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; + +/// Object describing which local resources are needed for a given test rule. +#[derive(Debug, Display, NoSerialize, ProvidesStaticType, Allocative)] +#[display( + "RequiredTestLocalResource(name: {}, listing: {}, execution: {})", + self.name, + self.listing, + self.execution +)] +pub struct StarlarkRequiredTestLocalResource { + /// Local resource type + pub name: String, + /// Is it needed for test listing + pub listing: bool, + /// Is it needed for test execution + pub execution: bool, +} + +starlark_simple_value!(StarlarkRequiredTestLocalResource); + +#[starlark_value(type = "RequiredTestLocalResource")] +impl<'v> StarlarkValue<'v> for StarlarkRequiredTestLocalResource { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(required_test_local_resource_methods) + } +} + +#[starlark_module] +pub fn register_required_test_local_resource(builder: &mut GlobalsBuilder) { + #[starlark(as_type = StarlarkRequiredTestLocalResource)] + fn RequiredTestLocalResource<'v>( + #[starlark(require = pos)] name: String, + #[starlark(require = named, default = true)] listing: bool, + #[starlark(require = named, default = true)] execution: bool, + ) -> anyhow::Result { + if !(listing || execution) { + return Err(anyhow::anyhow!( + "`RequiredTestLocalResource` should not be disabled for both listing and execution stages", + )); + } + Ok(StarlarkRequiredTestLocalResource { + name, + listing, + execution, + }) + } +} + +#[starlark_module] +fn required_test_local_resource_methods(builder: &mut MethodsBuilder) { + #[starlark(attribute)] + /// Local resource type + fn name<'v>(this: &'v StarlarkRequiredTestLocalResource) -> anyhow::Result<&'v str> { + Ok(&this.name) + } + + #[starlark(attribute)] + /// Is this resource type needed for test listing? + fn listing<'v>(this: &'v StarlarkRequiredTestLocalResource) -> anyhow::Result { + Ok(this.listing) + } + + #[starlark(attribute)] + /// Is this resource type needed for test execution? + fn execution<'v>(this: &'v StarlarkRequiredTestLocalResource) -> anyhow::Result { + Ok(this.execution) + } +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/resolve_query_macro.rs b/app/buck2_build_api/src/interpreter/rule_defs/resolve_query_macro.rs index d507edc8915ef..12334ae11a1e5 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/resolve_query_macro.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/resolve_query_macro.rs @@ -15,8 +15,8 @@ use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_util::thin_box::ThinBoxSlice; use static_assertions::assert_eq_size; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; use crate::interpreter::rule_defs::cmd_args::arg_builder::ArgBuilder; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; use crate::interpreter::rule_defs::cmd_args::CommandLineContext; diff --git a/app/buck2_build_api/src/interpreter/rule_defs/resolved_macro.rs b/app/buck2_build_api/src/interpreter/rule_defs/resolved_macro.rs index 3d796bf078d62..240d35d6a6408 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/resolved_macro.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/resolved_macro.rs @@ -14,12 +14,15 @@ use std::fmt::Debug; use std::fmt::Display; use allocative::Allocative; +use buck2_artifact::artifact::artifact_type::Artifact; use buck2_node::attrs::attr_type::arg::ConfiguredStringWithMacros; use buck2_util::arc_str::ArcStr; +use dupe::Dupe; use starlark::any::ProvidesStaticType; use starlark::environment::GlobalsBuilder; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Demand; use starlark::values::FrozenRef; use starlark::values::NoSerialize; @@ -27,9 +30,11 @@ use starlark::values::StarlarkValue; use starlark::values::Value; use static_assertions::assert_eq_size; -use crate::interpreter::rule_defs::artifact::StarlarkArtifact; -use crate::interpreter::rule_defs::artifact::StarlarkArtifactLike; +use crate::artifact_groups::ArtifactGroup; +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; use crate::interpreter::rule_defs::cmd_args::arg_builder::ArgBuilder; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::space_separated::SpaceSeparatedCommandLineBuilder; use crate::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; @@ -50,6 +55,7 @@ use crate::interpreter::rule_defs::resolve_query_macro::ResolvedQueryMacro; #[derive(Debug, PartialEq, Allocative)] pub enum ResolvedMacro { Location(FrozenRef<'static, FrozenDefaultInfo>), + Source(Artifact), /// Holds an arg-like value ArgLike(FrozenCommandLineArg), /// Holds a resolved query placeholder @@ -65,6 +71,7 @@ impl Display for ResolvedMacro { // Unfortunately we don't keep the location here, which makes it harder to show write!(f, "$(location ...)") } + ResolvedMacro::Source(a) => write!(f, "$(source {})", a), ResolvedMacro::ArgLike(x) => Display::fmt(x, f), ResolvedMacro::Query(x) => Display::fmt(x, f), } @@ -104,6 +111,10 @@ impl ResolvedMacro { ctx: &mut dyn CommandLineContext, ) -> anyhow::Result<()> { match self { + Self::Source(artifact) => { + let s = ctx.resolve_artifact(artifact)?.into_string(); + builder.push_str(&s); + } Self::Location(info) => { let outputs = &info.default_outputs(); @@ -124,10 +135,7 @@ impl ResolvedMacro { fn visit_artifacts(&self, visitor: &mut dyn CommandLineArtifactVisitor) -> anyhow::Result<()> { match self { Self::Location(info) => { - info.for_each_output(&mut |i| { - visitor.visit_input(i, None); - Ok(()) - })?; + info.for_each_output(&mut |i| visitor.visit_input(i, None))?; } Self::ArgLike(command_line_like) => { command_line_like @@ -135,6 +143,9 @@ impl ResolvedMacro { .visit_artifacts(visitor)?; } Self::Query(value) => value.visit_artifacts(visitor)?, + Self::Source(artifact) => { + visitor.visit_input(ArtifactGroup::Artifact(artifact.dupe()), None) + } } Ok(()) } @@ -205,6 +216,10 @@ impl ResolvedStringWithMacros { } impl CommandLineArgLike for ResolvedStringWithMacros { + fn register_me(&self) { + command_line_arg_like_impl!(ResolvedStringWithMacros::starlark_type_repr()); + } + fn add_to_command_line( &self, cmdline_builder: &mut dyn CommandLineBuilder, @@ -290,7 +305,7 @@ impl CommandLineArgLike for ResolvedStringWithMacros { #[starlark_value(type = "resolved_macro")] impl<'v> StarlarkValue<'v> for ResolvedStringWithMacros { - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { match ResolvedStringWithMacros::from_value(other) { None => Ok(false), Some(other) => Ok(*self == *other), diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set.rs new file mode 100644 index 0000000000000..16340ce187941 --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set.rs @@ -0,0 +1,111 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod globals; +mod transitive_set; +mod transitive_set_args_projection; +pub mod transitive_set_definition; +mod transitive_set_iterator; +mod transitive_set_json_projection; +mod traversal; + +pub use self::transitive_set::FrozenTransitiveSet; +pub use self::transitive_set::TransitiveSet; +use self::transitive_set::TransitiveSetGen; +pub use self::transitive_set::TransitiveSetLike; +pub use self::transitive_set_args_projection::FrozenTransitiveSetArgsProjection; +pub use self::transitive_set_args_projection::TransitiveSetArgsProjection; +pub use self::transitive_set_definition::FrozenTransitiveSetDefinition; +pub use self::transitive_set_definition::TransitiveSetDefinition; +pub use self::transitive_set_definition::TransitiveSetOperations; +pub use self::transitive_set_definition::TransitiveSetProjectionKind; +pub use self::transitive_set_definition::TransitiveSetProjectionSpec; +use self::transitive_set_iterator::BfsTransitiveSetIteratorGen; +use self::transitive_set_iterator::PostorderTransitiveSetIteratorGen; +use self::transitive_set_iterator::PreorderTransitiveSetIteratorGen; +use self::transitive_set_iterator::TopologicalTransitiveSetIteratorGen; +use self::transitive_set_iterator::TransitiveSetIteratorLike; +pub use self::transitive_set_json_projection::FrozenTransitiveSetJsonProjection; +pub use self::transitive_set_json_projection::TransitiveSetJsonProjection; +pub use self::traversal::TransitiveSetOrdering; + +#[derive(Debug, buck2_error::Error)] +pub(crate) enum TransitiveSetError { + #[error( + "Transitive set type must be assigned to a top-level variable, e.g. `MySet = transitive_set()`" + )] + TransitiveSetNotAssigned, + + #[error( + "Transitive set was used before being assigned to a top-level variable, e.g. `MySet = transitive_set()`" + )] + TransitiveSetUsedBeforeAssignment, + + #[error("Transitive set transitive values must be transitive sets, got `{}`", .got)] + TransitiveValueIsNotTransitiveSet { got: String }, + + #[error( + "Transitive set transitive values must be of the same transitive set type (expected: `{}`, got: `{}`)", + .expected, + .got + )] + TransitiveValueIsOfWrongType { expected: String, got: String }, + + #[error( + "The requested projection `{}` does not exist. Valid projections: {}", + .projection, + .valid_projections.join(", "), + )] + ProjectionDoesNotExist { + projection: String, + valid_projections: Vec, + }, + + #[error( + "Requested a {} projection, but `{}` is a `{}` projection (and should use `{}` instead)", + .expected_kind.short_name(), + .projection, + .actual_kind.short_name(), + .actual_kind.function_name(), + )] + ProjectionKindMismatch { + projection: String, + expected_kind: TransitiveSetProjectionKind, + actual_kind: TransitiveSetProjectionKind, + }, + + #[error("Error evaluating transitive set projection {}", .name)] + ProjectionError { + name: String, + + #[source] + error: anyhow::Error, + }, + + #[error("Error evaluating transitive set reduction {}", .name)] + ReductionError { + name: String, + + #[source] + error: anyhow::Error, + }, + + #[error( + "The requested reduction `{}` does not exist. Valid reduction: {}", + .reduction, + .valid_reductions.join(", "), + )] + ReductionDoesNotExist { + reduction: String, + valid_reductions: Vec, + }, + + #[error("Expected ordering to be one of `preorder`, `postorder`, `topological`, or `bfs`, but got `{0}`", .ordering)] + OrderingUnexpectedValue { ordering: String }, +} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/mod.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/mod.rs deleted file mode 100644 index 09b10d723fe66..0000000000000 --- a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/mod.rs +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![allow(clippy::module_inception)] - -pub mod globals; -mod transitive_set; -mod transitive_set_args_projection; -pub mod transitive_set_definition; -mod transitive_set_iterator; -mod transitive_set_json_projection; -mod traversal; - -use thiserror::Error; - -pub use self::transitive_set::FrozenTransitiveSet; -pub use self::transitive_set::TransitiveSet; -use self::transitive_set::TransitiveSetGen; -pub use self::transitive_set::TransitiveSetLike; -pub use self::transitive_set_args_projection::FrozenTransitiveSetArgsProjection; -pub use self::transitive_set_args_projection::TransitiveSetArgsProjection; -pub use self::transitive_set_definition::transitive_set_definition_from_value; -pub use self::transitive_set_definition::FrozenTransitiveSetDefinition; -pub use self::transitive_set_definition::TransitiveSetDefinition; -pub use self::transitive_set_definition::TransitiveSetOperations; -pub use self::transitive_set_definition::TransitiveSetProjectionKind; -pub use self::transitive_set_definition::TransitiveSetProjectionSpec; -use self::transitive_set_iterator::BfsTransitiveSetIteratorGen; -use self::transitive_set_iterator::PostorderTransitiveSetIteratorGen; -use self::transitive_set_iterator::PreorderTransitiveSetIteratorGen; -use self::transitive_set_iterator::TopologicalTransitiveSetIteratorGen; -use self::transitive_set_iterator::TransitiveSetIteratorLike; -pub use self::transitive_set_json_projection::FrozenTransitiveSetJsonProjection; -pub use self::transitive_set_json_projection::TransitiveSetJsonProjection; -pub use self::traversal::TransitiveSetOrdering; - -#[derive(Debug, Error)] -pub(crate) enum TransitiveSetError { - #[error( - "Transitive set type must be assigned to a top-level variable, e.g. `MySet = transitive_set()`" - )] - TransitiveSetNotAssigned, - - #[error( - "Transitive set was provided with a definition that is not the output of transitive_set()" - )] - TransitiveSetDefinitionWasInvalid, - - #[error( - "Transitive set was used before being assigned to a top-level variable, e.g. `MySet = transitive_set()`" - )] - TransitiveSetUsedBeforeAssignment, - - #[error("Transitive set transitive values must be transitive sets, got `{}`", .got)] - TransitiveValueIsNotTransitiveSet { got: String }, - - #[error( - "Transitive set transitive values must be of the same transitive set type (expected: `{}`, got: `{}`)", - .expected, - .got - )] - TransitiveValueIsOfWrongType { expected: String, got: String }, - - #[error( - "The requested projection `{}` does not exist. Valid projections: {}", - .projection, - .valid_projections.join(", "), - )] - ProjectionDoesNotExist { - projection: String, - valid_projections: Vec, - }, - - #[error( - "Requested a {} projection, but `{}` is a `{}` projection (and should use `{}` instead)", - .expected_kind.short_name(), - .projection, - .actual_kind.short_name(), - .actual_kind.function_name(), - )] - ProjectionKindMismatch { - projection: String, - expected_kind: TransitiveSetProjectionKind, - actual_kind: TransitiveSetProjectionKind, - }, - - #[error("Error evaluating transitive set projection {}", .name)] - ProjectionError { - name: String, - - #[source] - error: anyhow::Error, - }, - - #[error("Expected args_projection `{}` function to take a single argument", .name)] - ProjectionSignatureError { name: String }, - - #[error("Error evaluating transitive set reduction {}", .name)] - ReductionError { - name: String, - - #[source] - error: anyhow::Error, - }, - - #[error( - "The requested reduction `{}` does not exist. Valid reduction: {}", - .reduction, - .valid_reductions.join(", "), - )] - ReductionDoesNotExist { - reduction: String, - valid_reductions: Vec, - }, - - #[error("Expected ordering to be one of `preorder`, `postorder`, `topological`, or `bfs`, but got `{0}`", .ordering)] - OrderingUnexpectedValue { ordering: String }, -} diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set.rs index d88f4eeba9599..154a143a4f77d 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set.rs @@ -9,9 +9,12 @@ use std::fmt; use std::iter; +use std::sync::Arc; use allocative::Allocative; -use anyhow::Context as _; +use anyhow::Context; +use buck2_error::starlark_error::from_starlark; +use buck2_error::BuckErrorContext; use display_container::display_pair; use display_container::fmt_container; use display_container::iter_display_chain; @@ -27,7 +30,6 @@ use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; -use starlark::typing::Ty; use starlark::values::list::AllocList; use starlark::values::starlark_value; use starlark::values::typing::TypeInstanceId; @@ -35,25 +37,29 @@ use starlark::values::typing::TypeMatcher; use starlark::values::Freeze; use starlark::values::Freezer; use starlark::values::FrozenValue; +use starlark::values::FrozenValueTyped; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; use starlark::values::ValueTypedComplex; use crate::actions::impls::json::validate_json; use crate::actions::impls::json::visit_json_artifacts; +use crate::actions::impls::json::JsonUnpack; use crate::artifact_groups::deferred::TransitiveSetKey; use crate::artifact_groups::ArtifactGroup; use crate::artifact_groups::TransitiveSetProjectionKey; use crate::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; +use crate::interpreter::rule_defs::transitive_set::transitive_set_definition::TransitiveSetDefinitionLike; use crate::interpreter::rule_defs::transitive_set::transitive_set_definition::TransitiveSetProjectionKind; -use crate::interpreter::rule_defs::transitive_set::transitive_set_definition_from_value; use crate::interpreter::rule_defs::transitive_set::traversal::TransitiveSetOrdering; use crate::interpreter::rule_defs::transitive_set::traversal::TransitiveSetTraversal; use crate::interpreter::rule_defs::transitive_set::BfsTransitiveSetIteratorGen; +use crate::interpreter::rule_defs::transitive_set::FrozenTransitiveSetDefinition; use crate::interpreter::rule_defs::transitive_set::PostorderTransitiveSetIteratorGen; use crate::interpreter::rule_defs::transitive_set::PreorderTransitiveSetIteratorGen; use crate::interpreter::rule_defs::transitive_set::TopologicalTransitiveSetIteratorGen; @@ -93,12 +99,12 @@ impl TypeMatcher for TransitiveSetMatcher { #[derive(Debug, Clone, Trace, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct TransitiveSetGen { +pub struct TransitiveSetGen { /// A Deferred key that maps back to this set. This is used to compute its inputs. pub key: TransitiveSetKey, /// The TransitiveSetCallable that this set uses. - pub(crate) definition: V, + pub(crate) definition: FrozenValueTyped<'static, FrozenTransitiveSetDefinition>, /// The immediate value of this node. If None, then this node will not yield anything when /// iterated over (but we'll still traverse to its children). @@ -113,7 +119,7 @@ pub struct TransitiveSetGen { #[derive(Debug, Clone, Trace, Allocative)] #[repr(C)] -pub struct NodeGen { +pub struct NodeGen { /// The value pub value: V, @@ -123,7 +129,7 @@ pub struct NodeGen { unsafe impl<'v> Coerce>> for TransitiveSetGen {} -impl fmt::Display for TransitiveSetGen { +impl fmt::Display for TransitiveSetGen { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt_container( f, @@ -155,7 +161,7 @@ impl<'v, V: ValueLike<'v>> Serialize for TransitiveSetGen { } } -impl TransitiveSetGen { +impl TransitiveSetGen { pub fn key(&self) -> &TransitiveSetKey { &self.key } @@ -173,13 +179,15 @@ impl<'v> NodeGen> { } impl<'v, V: ValueLike<'v>> TransitiveSetGen { - fn matches_definition(&self, definition: Value<'v>) -> bool { - definition.ptr_eq(self.definition.to_value()) + fn matches_definition( + &self, + definition: FrozenValueTyped<'v, FrozenTransitiveSetDefinition>, + ) -> bool { + definition.to_value().ptr_eq(self.definition.to_value()) } pub fn projection_name(&'v self, projection: usize) -> anyhow::Result<&'v str> { - let def = transitive_set_definition_from_value(self.definition.to_value()) - .context("Invalid definition")?; + let def = self.definition.as_ref(); Ok(def .operations() @@ -212,7 +220,8 @@ impl<'v, V: ValueLike<'v>> TransitiveSetGen { pub(crate) fn definition( &self, ) -> anyhow::Result>> { - ValueTypedComplex::unpack_value_err(self.definition.to_value()).context("(internal error)") + ValueTypedComplex::unpack_value_err(self.definition.to_value()) + .internal_error_anyhow("Must be a TransitiveSetDefinition") } } @@ -233,12 +242,12 @@ impl FrozenTransitiveSet { // Reuse the same projection for children sets. for v in self.children.iter() { let v = TransitiveSet::from_value(v.to_value()).context("Invalid deferred")?; - sub_inputs.push(ArtifactGroup::TransitiveSetProjection( + sub_inputs.push(ArtifactGroup::TransitiveSetProjection(Arc::new( TransitiveSetProjectionKey { key: v.key().dupe(), projection, }, - )); + ))); } Ok(sub_inputs) } @@ -329,7 +338,7 @@ impl<'v> TransitiveSetLike<'v> for FrozenTransitiveSet { starlark_complex_value!(pub TransitiveSet); #[starlark_value(type = "transitive_set")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TransitiveSetGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TransitiveSetGen where Self: ProvidesStaticType<'v> + TransitiveSetLike<'v>, { @@ -337,19 +346,6 @@ where static RES: MethodsStatic = MethodsStatic::new(); RES.methods(transitive_set_methods) } - - fn matches_type(&self, ty: &str) -> bool { - if ty == "transitive_set" { - return true; - } - - transitive_set_definition_from_value(self.definition.to_value()) - .map_or(false, |d| d.matches_type(ty)) - } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } impl<'v> Freeze for TransitiveSet<'v> { @@ -380,34 +376,26 @@ impl<'v> Freeze for TransitiveSet<'v> { impl<'v> TransitiveSet<'v> { pub fn new( key: TransitiveSetKey, - definition: Value<'v>, + definition: FrozenValueTyped<'v, FrozenTransitiveSetDefinition>, value: Option>, children: impl IntoIterator>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { - let def = match transitive_set_definition_from_value(definition.to_value()) { - Some(def) if def.has_id() => def, - Some(..) => { - return Err(TransitiveSetError::TransitiveSetUsedBeforeAssignment.into()); - } - None => { - return Err(TransitiveSetError::TransitiveSetDefinitionWasInvalid.into()); - } - }; + let def: &dyn TransitiveSetDefinitionLike = &*definition; + if !def.has_id() { + return Err(TransitiveSetError::TransitiveSetUsedBeforeAssignment.into()); + } let children = children.into_iter().collect::>(); let children_sets = children.try_map(|v| match TransitiveSet::from_value(*v) { Some(set) if set.matches_definition(definition) => Ok(set), Some(set) => { - fn format_def(def: Value<'_>) -> String { - match transitive_set_definition_from_value(def) { - Some(def) => format!("{:?}", def.as_debug()), - None => "".to_owned(), - } + fn format_def(def: &FrozenTransitiveSetDefinition) -> String { + format!("{:?}", def.as_debug()) } Err(TransitiveSetError::TransitiveValueIsOfWrongType { - expected: format_def(definition), - got: format_def(set.definition), + expected: format_def(&definition), + got: format_def(&set.definition), }) } None => { @@ -422,9 +410,9 @@ impl<'v> TransitiveSet<'v> { .iter() .map(|(name, spec)| { let projected_value = eval - .eval_function(spec.projection, &[value], &[]) + .eval_function(spec.projection.get(), &[value], &[]) .map_err(|error| TransitiveSetError::ProjectionError { - error, + error: from_starlark(error).into(), name: name.clone(), })?; match spec.kind { @@ -432,7 +420,7 @@ impl<'v> TransitiveSet<'v> { TransitiveSetArgsProjection::as_command_line(projected_value)?; } TransitiveSetProjectionKind::Json => { - validate_json(projected_value)?; + validate_json(JsonUnpack::unpack_value_err(projected_value)?)?; } } anyhow::Ok(projected_value) @@ -459,9 +447,9 @@ impl<'v> TransitiveSet<'v> { let value = value.unwrap_or_else(Value::new_none); let reduced = eval - .eval_function(*reduce, &[children_values, value], &[]) + .eval_function(reduce.get(), &[children_values, value], &[]) .map_err(|error| TransitiveSetError::ReductionError { - error, + error: from_starlark(error).into(), name: name.clone(), })?; @@ -471,7 +459,9 @@ impl<'v> TransitiveSet<'v> { Ok(Self { key, - definition, + definition: + // Cast lifetime from 'v to 'static + FrozenValueTyped::::new(FrozenValueTyped::::to_frozen_value(definition)).context("internal error")?, node, reductions, children, @@ -480,18 +470,18 @@ impl<'v> TransitiveSet<'v> { pub fn new_from_values( key: TransitiveSetKey, - definition: Value<'v>, + definition: FrozenValueTyped<'v, FrozenTransitiveSetDefinition>, value: Option>, children: Option>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result { let children = children .map(|v| v.iterate(eval.heap())) .transpose()? .into_iter() .flatten(); - Self::new(key, definition, value, children, eval) + Self::new(key, definition, value, children, eval).map_err(Into::into) } } @@ -502,8 +492,7 @@ fn transitive_set_methods(builder: &mut MethodsBuilder) { projection: &str, #[starlark(require = named, default = "preorder")] ordering: &str, ) -> anyhow::Result> { - let def = transitive_set_definition_from_value(this.typed.definition) - .context("Invalid this.definition")?; + let def = this.typed.definition; let index = def .operations() @@ -521,8 +510,7 @@ fn transitive_set_methods(builder: &mut MethodsBuilder) { projection: &str, #[starlark(require = named, default = "preorder")] ordering: &str, ) -> anyhow::Result> { - let def = transitive_set_definition_from_value(this.typed.definition) - .context("Invalid this.definition")?; + let def = this.typed.definition; let index = def .operations() @@ -539,8 +527,7 @@ fn transitive_set_methods(builder: &mut MethodsBuilder) { this: ValueOf<'v, &'v TransitiveSet<'v>>, reduction: &str, ) -> anyhow::Result> { - let def = transitive_set_definition_from_value(this.typed.definition) - .context("Invalid this.definition")?; + let def = this.typed.definition; let index = match def.operations().reductions.get_index_of(reduction) { Some(index) => index, @@ -589,4 +576,8 @@ fn transitive_set_methods(builder: &mut MethodsBuilder) { None => Value::new_none(), }) } + #[starlark(attribute)] + fn children<'v>(this: ValueOf<'v, &'v TransitiveSet<'v>>) -> anyhow::Result>> { + Ok(this.typed.children.to_vec()) + } } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_args_projection.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_args_projection.rs index 1bc7271c2b03e..431e8ab4059aa 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_args_projection.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_args_projection.rs @@ -10,6 +10,7 @@ use std::fmt; use std::fmt::Display; use std::iter; +use std::sync::Arc; use allocative::Allocative; use anyhow::Context as _; @@ -24,6 +25,7 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::values::list::ListRef; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Demand; use starlark::values::Freeze; use starlark::values::Heap; @@ -31,12 +33,15 @@ use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::StringValue; use starlark::values::Trace; +use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; use crate::artifact_groups::ArtifactGroup; use crate::artifact_groups::TransitiveSetProjectionKey; +use crate::interpreter::rule_defs::cmd_args::command_line_arg_like_type::command_line_arg_like_impl; use crate::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArgLike; use crate::interpreter::rule_defs::cmd_args::CommandLineArtifactVisitor; @@ -55,7 +60,7 @@ use crate::interpreter::rule_defs::transitive_set::TransitiveSet; #[derive(Debug, Clone, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[derive(NoSerialize)] // TODO we should probably have a serialization for transitive set #[repr(C)] -pub struct TransitiveSetArgsProjectionGen { +pub struct TransitiveSetArgsProjectionGen { pub(super) transitive_set: V, /// The index of the projection. Once transitive sets are defined, their projections never @@ -100,6 +105,10 @@ impl<'v, V: ValueLike<'v>> TransitiveSetArgsProjectionGen { List(&'v [Value<'v>]), } impl<'v> CommandLineArgLike for Impl<'v> { + fn register_me(&self) { + // No need because this is not proper implementation. + } + fn add_to_command_line( &self, cli: &mut dyn CommandLineBuilder, @@ -109,8 +118,8 @@ impl<'v, V: ValueLike<'v>> TransitiveSetArgsProjectionGen { Impl::Item(v) => v.add_to_command_line(cli, context), Impl::List(items) => { for v in *items { - v.as_command_line() - .unwrap() + ValueAsCommandLineLike::unpack_value_err(*v)? + .0 .add_to_command_line(cli, context)?; } Ok(()) @@ -123,7 +132,11 @@ impl<'v, V: ValueLike<'v>> TransitiveSetArgsProjectionGen { Impl::Item(v) => v.contains_arg_attr(), Impl::List(items) => { for v in *items { - if v.as_command_line().unwrap().contains_arg_attr() { + if ValueAsCommandLineLike::unpack_value_err(*v) + .unwrap() + .0 + .contains_arg_attr() + { return true; } } @@ -140,8 +153,8 @@ impl<'v, V: ValueLike<'v>> TransitiveSetArgsProjectionGen { Impl::Item(v) => v.visit_write_to_file_macros(visitor), Impl::List(items) => { for v in *items { - v.as_command_line() - .unwrap() + ValueAsCommandLineLike::unpack_value_err(*v)? + .0 .visit_write_to_file_macros(visitor)?; } Ok(()) @@ -157,7 +170,9 @@ impl<'v, V: ValueLike<'v>> TransitiveSetArgsProjectionGen { Impl::Item(v) => v.visit_artifacts(visitor), Impl::List(items) => { for v in *items { - v.as_command_line().unwrap().visit_artifacts(visitor)?; + ValueAsCommandLineLike::unpack_value_err(*v)? + .0 + .visit_artifacts(visitor)?; } Ok(()) } @@ -168,11 +183,13 @@ impl<'v, V: ValueLike<'v>> TransitiveSetArgsProjectionGen { let value = v.to_value(); if let Some(values) = ListRef::from_value(value) { for v in values.content() { - v.as_command_line_err()?; + ValueAsCommandLineLike::unpack_value_err(*v)?; } Ok(Impl::List(values.content())) } else { - Ok(Impl::Item(value.as_command_line_err()?)) + Ok(Impl::Item( + ValueAsCommandLineLike::unpack_value_err(value)?.0, + )) } } } @@ -180,7 +197,7 @@ impl<'v, V: ValueLike<'v>> TransitiveSetArgsProjectionGen { starlark_complex_value!(pub TransitiveSetArgsProjection); #[starlark_value(type = "transitive_set_args_projection")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TransitiveSetArgsProjectionGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TransitiveSetArgsProjectionGen where Self: ProvidesStaticType<'v>, { @@ -195,6 +212,10 @@ where } impl<'v, V: ValueLike<'v>> CommandLineArgLike for TransitiveSetArgsProjectionGen { + fn register_me(&self) { + command_line_arg_like_impl!(TransitiveSetArgsProjection::starlark_type_repr()); + } + fn add_to_command_line( &self, builder: &mut dyn CommandLineBuilder, @@ -221,10 +242,10 @@ impl<'v, V: ValueLike<'v>> CommandLineArgLike for TransitiveSetArgsProjectionGen .context("Invalid transitive_set")?; visitor.visit_input( - ArtifactGroup::TransitiveSetProjection(TransitiveSetProjectionKey { + ArtifactGroup::TransitiveSetProjection(Arc::new(TransitiveSetProjectionKey { key: set.key().dupe(), projection: self.projection, - }), + })), None, ); diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_definition.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_definition.rs index 0f7e34aa81241..a12f89d98a3fd 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_definition.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_definition.rs @@ -8,7 +8,6 @@ */ use std::fmt; -use std::fmt::Display; use std::hash::Hash; use std::sync::Arc; @@ -32,7 +31,10 @@ use starlark::typing::Ty; use starlark::typing::TyStarlarkValue; use starlark::typing::TyUser; use starlark::typing::TyUserParams; +use starlark::values::list::ListType; use starlark::values::starlark_value; +use starlark::values::typing::FrozenStarlarkCallable; +use starlark::values::typing::StarlarkCallableChecked; use starlark::values::typing::TypeInstanceId; use starlark::values::typing::TypeMatcherFactory; use starlark::values::AllocValue; @@ -43,13 +45,14 @@ use starlark::values::Heap; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; -use starlark::values::ValueLike; +use starlark::values::ValueLifetimeless; +use starlark::values::ValueOfUncheckedGeneric; use crate::interpreter::rule_defs::transitive_set::transitive_set::TransitiveSetMatcher; use crate::interpreter::rule_defs::transitive_set::TransitiveSet; use crate::interpreter::rule_defs::transitive_set::TransitiveSetError; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum TransitiveSetDefinitionError { #[error("`transitive_set()` can only be used in `bzl` files")] TransitiveSetOnlyInBzl, @@ -80,14 +83,14 @@ impl TransitiveSetProjectionKind { // The Coerce derivation doesn't work if this is just a tuple in the SmallMap value. #[derive(Debug, Clone, Trace, Coerce, Freeze, Allocative)] #[repr(C)] -pub struct TransitiveSetProjectionSpec { +pub struct TransitiveSetProjectionSpec { pub kind: TransitiveSetProjectionKind, - pub projection: V, + pub projection: ValueOfUncheckedGeneric>, } /// A unique identity for a given [`TransitiveSetDefinition`]. #[derive(Debug, Clone, Display, Allocative, Hash)] -#[display(fmt = "{}", "name")] +#[display("{}", name)] struct TransitiveSetId { module_id: ImportPath, name: String, @@ -116,19 +119,25 @@ pub struct TransitiveSetDefinition<'v> { #[derive(Debug, Clone, Trace, Coerce, Freeze, Allocative)] #[repr(C)] -pub struct TransitiveSetOperationsGen { +pub struct TransitiveSetOperationsGen { /// Callables that will project the values contained in transitive sets of this type to /// cmd_args or json. This can be used to include a transitive set into a command or json file. pub(crate) projections: SmallMap>, /// Callables that will reduce the values contained in transitive sets to a single value per /// node. This can be used to e.g. aggregate flags throughout a transitive set; - pub(crate) reductions: SmallMap, + pub(crate) reductions: SmallMap< + String, + ValueOfUncheckedGeneric< + V, + FrozenStarlarkCallable<(ListType, FrozenValue), FrozenValue>, + >, + >, } pub type TransitiveSetOperations<'v> = TransitiveSetOperationsGen>; -impl TransitiveSetOperationsGen { +impl TransitiveSetOperationsGen { pub fn valid_projections(&self, kind: TransitiveSetProjectionKind) -> Vec { self.projections .iter() @@ -216,7 +225,11 @@ impl<'v> AllocValue<'v> for TransitiveSetDefinition<'v> { impl<'v> StarlarkValue<'v> for TransitiveSetDefinition<'v> { type Canonical = FrozenTransitiveSetDefinition; - fn export_as(&self, variable_name: &str, _: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + _: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { // First export wins self.exported.get_or_try_init(|| { let id = Arc::new(TransitiveSetId { @@ -267,7 +280,7 @@ impl<'v> StarlarkValue<'v> for TransitiveSetDefinition<'v> { } } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { let exported = self .exported .get() @@ -276,17 +289,13 @@ impl<'v> StarlarkValue<'v> for TransitiveSetDefinition<'v> { Ok(()) } - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } - fn eval_type(&self) -> Option { self.exported.get().map(|exported| exported.set_ty.dupe()) } } #[derive(Display, ProvidesStaticType, Allocative)] -#[display(fmt = "{}", "exported.id")] +#[display("{}", exported.id)] pub struct FrozenTransitiveSetDefinition { pub(crate) exported: TransitiveSetDefinitionExported, @@ -333,7 +342,7 @@ impl<'v> StarlarkValue<'v> for FrozenTransitiveSetDefinition { } } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.exported.id.hash(hasher); Ok(()) } @@ -372,25 +381,11 @@ impl<'v> Freeze for TransitiveSetDefinition<'v> { } } -pub fn transitive_set_definition_from_value<'v>( - x: Value<'v>, -) -> Option<&dyn TransitiveSetDefinitionLike<'v>> { - if let Some(x) = x.downcast_ref::() { - Some(x as &dyn TransitiveSetDefinitionLike<'v>) - } else if let Some(x) = x.downcast_ref::() { - Some(x as &dyn TransitiveSetDefinitionLike<'v>) - } else { - None - } -} - pub trait TransitiveSetDefinitionLike<'v> { fn has_id(&self) -> bool; fn as_debug(&self) -> &dyn fmt::Debug; - fn matches_type(&self, ty: &str) -> bool; - fn operations(&self) -> &TransitiveSetOperations<'v>; } @@ -403,13 +398,6 @@ impl<'v> TransitiveSetDefinitionLike<'v> for TransitiveSetDefinition<'v> { self } - fn matches_type(&self, ty: &str) -> bool { - self.exported - .get() - .as_ref() - .map_or(false, |exported| exported.id.name == ty) - } - fn operations(&self) -> &TransitiveSetOperations<'v> { &self.operations } @@ -424,10 +412,6 @@ impl<'v> TransitiveSetDefinitionLike<'v> for FrozenTransitiveSetDefinition { self } - fn matches_type(&self, ty: &str) -> bool { - self.exported.id.name == ty - } - fn operations(&self) -> &TransitiveSetOperations<'v> { coerce(&self.operations) } @@ -436,12 +420,20 @@ impl<'v> TransitiveSetDefinitionLike<'v> for FrozenTransitiveSetDefinition { #[starlark_module] pub fn register_transitive_set(builder: &mut GlobalsBuilder) { fn transitive_set<'v>( - #[starlark(require = named)] args_projections: Option>>, - #[starlark(require = named)] json_projections: Option>>, - #[starlark(require = named)] reductions: Option>>, + #[starlark(require = named)] args_projections: Option< + SmallMap,), Value<'v>>>, + >, + #[starlark(require = named)] json_projections: Option< + SmallMap,), Value<'v>>>, + >, + #[starlark(require = named)] reductions: Option< + SmallMap< + String, + StarlarkCallableChecked<'v, (ListType>, Value<'v>), Value<'v>>, + >, + >, eval: &mut Evaluator, ) -> anyhow::Result> { - // TODO(cjhopman): Reductions could do similar signature checking. let projections: SmallMap<_, _> = args_projections .into_iter() .flat_map(|v| v.into_iter()) @@ -450,7 +442,7 @@ pub fn register_transitive_set(builder: &mut GlobalsBuilder) { k, TransitiveSetProjectionSpec { kind: TransitiveSetProjectionKind::Args, - projection: v, + projection: ValueOfUncheckedGeneric::new(v.0), }, ) }) @@ -463,27 +455,18 @@ pub fn register_transitive_set(builder: &mut GlobalsBuilder) { k, TransitiveSetProjectionSpec { kind: TransitiveSetProjectionKind::Json, - projection: v, + projection: ValueOfUncheckedGeneric::new(v.0), }, ) }), ) .collect(); - // Both kinds of projections take functions with the same signature. - for (name, spec) in projections.iter() { - // We should probably be able to require that the projection returns a parameters_spec, but - // we don't depend on this type-checking and we'd just error out later when calling it if it - // were wrong. - if let Some(v) = spec.projection.parameters_spec() { - if v.len() != 1 { - return Err(TransitiveSetError::ProjectionSignatureError { - name: name.clone(), - } - .into()); - } - }; - } + let reductions = reductions + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, ValueOfUncheckedGeneric::new(v.0))) + .collect(); let starlark_path: StarlarkPath = starlark_path_from_build_context(eval)?; Ok(TransitiveSetDefinition::new( @@ -493,7 +476,7 @@ pub fn register_transitive_set(builder: &mut GlobalsBuilder) { }, TransitiveSetOperations { projections, - reductions: reductions.unwrap_or_default(), + reductions, }, )) } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_iterator.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_iterator.rs index e3b5dac60983a..312a29bff0fcf 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_iterator.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_iterator.rs @@ -13,7 +13,7 @@ use std::collections::HashSet; use std::collections::VecDeque; use anyhow::Context as _; -use fnv::FnvBuildHasher; +use buck2_util::hash::BuckHasherBuilder; use starlark::values::Value; use starlark::values::ValueIdentity; use starlark::values::ValueLike; @@ -48,9 +48,9 @@ where } /// A DFS, left-to-right iterator over a TransitiveSet. -pub struct PreorderTransitiveSetIteratorGen<'a, 'v, V> { +pub struct PreorderTransitiveSetIteratorGen<'a, 'v, V: ValueLike<'v>> { stack: Vec<&'a TransitiveSetGen>, - seen: HashSet, FnvBuildHasher>, + seen: HashSet, BuckHasherBuilder>, } impl<'a, 'v, V> PreorderTransitiveSetIteratorGen<'a, 'v, V> @@ -108,10 +108,10 @@ where /// A postorder traversal iterator over a TransitiveSet. /// Traverses by children left-to-right, and then visits the current node. -pub struct PostorderTransitiveSetIteratorGen<'a, 'v, V> { +pub struct PostorderTransitiveSetIteratorGen<'a, 'v, V: ValueLike<'v>> { stack: Vec>>, parent_stack: Vec<&'a TransitiveSetGen>, - seen: HashSet, FnvBuildHasher>, + seen: HashSet, BuckHasherBuilder>, } impl<'a, 'v, V> PostorderTransitiveSetIteratorGen<'a, 'v, V> @@ -183,9 +183,9 @@ where /// /// This is equivalent to a pre-order traversal, except that when nodes are shared with more than /// one parent it is returned in the order of its last occurrence. -pub struct TopologicalTransitiveSetIteratorGen<'a, 'v, V> { +pub struct TopologicalTransitiveSetIteratorGen<'a, 'v, V: ValueLike<'v>> { output_stack: Vec<&'a TransitiveSetGen>, - instance_counts: HashMap, u32, FnvBuildHasher>, + instance_counts: HashMap, u32, BuckHasherBuilder>, } impl<'a, 'v, V> TopologicalTransitiveSetIteratorGen<'a, 'v, V> @@ -203,9 +203,9 @@ where fn count_instances( set: &'a TransitiveSetGen, - ) -> HashMap, u32, FnvBuildHasher> { + ) -> HashMap, u32, BuckHasherBuilder> { let mut stack = vec![set]; - let mut instance_counts = HashMap::, u32, FnvBuildHasher>::default(); + let mut instance_counts = HashMap::, u32, BuckHasherBuilder>::default(); while let Some(next) = stack.pop() { for child in next.children.iter().rev() { @@ -278,9 +278,9 @@ where } /// A breadth-first-search (BFS), left-to-right iterator over a TransitiveSet. -pub struct BfsTransitiveSetIteratorGen<'a, 'v, V> { +pub struct BfsTransitiveSetIteratorGen<'a, 'v, V: ValueLike<'v>> { queue: VecDeque<&'a TransitiveSetGen>, - seen: HashSet, FnvBuildHasher>, + seen: HashSet, BuckHasherBuilder>, } impl<'a, 'v, V> BfsTransitiveSetIteratorGen<'a, 'v, V> @@ -336,7 +336,7 @@ where } } -pub struct TransitiveSetValuesIteratorGen<'a, 'v, V> { +pub struct TransitiveSetValuesIteratorGen<'a, 'v, V: ValueLike<'v>> { inner: Box + 'a>, } diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_json_projection.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_json_projection.rs index d0584785c7475..dd245a6fd8a0c 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_json_projection.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/transitive_set_json_projection.rs @@ -30,6 +30,7 @@ use starlark::values::StarlarkValue; use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use starlark::values::ValueOf; @@ -46,7 +47,7 @@ use crate::interpreter::rule_defs::transitive_set::TransitiveSet; #[derive(Debug, Clone, Coerce, Trace, Freeze, ProvidesStaticType, Allocative)] #[derive(NoSerialize)] // TODO we should probably have a serialization for transitive set #[repr(C)] -pub struct TransitiveSetJsonProjectionGen { +pub struct TransitiveSetJsonProjectionGen { pub(super) transitive_set: V, /// The index of the projection. Once transitive sets are defined, their projections never @@ -104,7 +105,7 @@ impl<'v, V: ValueLike<'v>> TransitiveSetJsonProjectionGen { starlark_complex_value!(pub TransitiveSetJsonProjection); #[starlark_value(type = "transitive_set_json_projection")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TransitiveSetJsonProjectionGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TransitiveSetJsonProjectionGen where Self: ProvidesStaticType<'v>, { diff --git a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/traversal.rs b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/traversal.rs index 0c364decc81b9..a8b359de58be2 100644 --- a/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/traversal.rs +++ b/app/buck2_build_api/src/interpreter/rule_defs/transitive_set/traversal.rs @@ -20,6 +20,7 @@ use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; use crate::interpreter::rule_defs::transitive_set::TransitiveSet; @@ -65,9 +66,9 @@ impl TransitiveSetOrdering { NoSerialize, Allocative )] -#[display(fmt = "Traversal({})", inner)] +#[display("Traversal({})", inner)] #[repr(C)] -pub struct TransitiveSetTraversalGen { +pub struct TransitiveSetTraversalGen { pub(super) inner: V, pub ordering: TransitiveSetOrdering, } @@ -75,11 +76,11 @@ pub struct TransitiveSetTraversalGen { starlark_complex_value!(pub TransitiveSetTraversal); #[starlark_value(type = "transitive_set_iterator")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TransitiveSetTraversalGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TransitiveSetTraversalGen where Self: ProvidesStaticType<'v>, { - fn iterate_collect(&self, _heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, _heap: &'v Heap) -> starlark::Result>> { let tset = TransitiveSet::from_value(self.inner.to_value()).context("Invalid inner")?; Ok(tset.iter_values(self.ordering)?.collect()) } @@ -98,9 +99,9 @@ where NoSerialize, Allocative )] -#[display(fmt = "Traversal({}[\"{}\"])", transitive_set, projection)] +#[display("Traversal({}[\"{}\"])", transitive_set, projection)] #[repr(C)] -pub struct TransitiveSetProjectionTraversalGen { +pub struct TransitiveSetProjectionTraversalGen { pub(super) transitive_set: V, pub projection: usize, pub ordering: TransitiveSetOrdering, @@ -109,11 +110,11 @@ pub struct TransitiveSetProjectionTraversalGen { starlark_complex_value!(pub TransitiveSetProjectionTraversal); #[starlark_value(type = "transitive_set_args_projection_iterator")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TransitiveSetProjectionTraversalGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TransitiveSetProjectionTraversalGen where Self: ProvidesStaticType<'v>, { - fn iterate_collect(&self, _heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, _heap: &'v Heap) -> starlark::Result>> { let set = TransitiveSet::from_value(self.transitive_set.to_value()).context("Invalid inner")?; Ok(set diff --git a/app/buck2_build_api/src/interpreter/rule_defs/validation_spec.rs b/app/buck2_build_api/src/interpreter/rule_defs/validation_spec.rs new file mode 100644 index 0000000000000..842cc548dccbc --- /dev/null +++ b/app/buck2_build_api/src/interpreter/rule_defs/validation_spec.rs @@ -0,0 +1,183 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Display; +use std::fmt::Formatter; + +use allocative::Allocative; +use anyhow::Context; +use starlark::any::ProvidesStaticType; +use starlark::coerce::Coerce; +use starlark::environment::GlobalsBuilder; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::values::starlark_value; +use starlark::values::Freeze; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::StringValue; +use starlark::values::Trace; +use starlark::values::ValueLifetimeless; +use starlark::values::ValueLike; +use starlark::values::ValueOf; +use starlark::values::ValueOfUncheckedGeneric; +use starlark::StarlarkResultExt; + +use crate::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; +use crate::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; + +#[derive(Debug, thiserror::Error)] +enum ValidationSpecError { + #[error("Name of validation spec should not be empty")] + EmptyName, + #[error("Validation result artifact should be a build artifact, not a source one.")] + ValidationResultIsSourceArtifact, + #[error("Validation result artifact should be bound.")] + ValidationResultIsNotBound, +} + +/// Value describing a single identifiable validation. +/// Validation is represented by a build artifact with defined structure. +/// Content of such artifact determines if validation is successful or not. +/// A collection of such objects forms a `ValidationInfo` provider +/// which describes how a given target should be validated. +#[derive( + Debug, + Trace, + NoSerialize, + Coerce, + ProvidesStaticType, + Allocative, + Freeze +)] +#[freeze(validator = validate_validation_spec, bounds = "V: ValueLike<'freeze>")] +#[repr(C)] +pub struct StarlarkValidationSpecGen { + /// Name used to identify validation. Should be unique per target node. + name: ValueOfUncheckedGeneric, + /// Build artifact which is the result of running a validation. + /// Should contain JSON of defined schema setting API between Buck2 and user-created validators/scripts. + validation_result: ValueOfUncheckedGeneric>, + + /// Is validation optional, i.e., should it be skipped by default? + /// By default validations are required unless this flag is specified. + /// Optional validations are only run when explicitly requested via CLI arguments. + optional: bool, +} + +starlark_complex_value!(pub(crate) StarlarkValidationSpec); + +impl<'v, V: ValueLike<'v>> StarlarkValidationSpecGen { + pub fn name(&self) -> &'v str { + self.name + .cast::<&str>() + .unpack() + .expect("type checked during construction or freezing") + } + + pub fn validation_result(&self) -> &'v dyn StarlarkArtifactLike { + self.validation_result + .unpack() + .expect("type checked during construction or freezing") + .0 + } + + pub fn optional(&self) -> bool { + self.optional + } +} + +impl<'v, V: ValueLike<'v>> Display for StarlarkValidationSpecGen +where + Self: ProvidesStaticType<'v>, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "ValidationSpec(name={}, validation_result=", self.name)?; + Display::fmt(&self.validation_result, f)?; + write!(f, ")") + } +} + +fn validate_validation_spec<'v, V>(spec: &StarlarkValidationSpecGen) -> anyhow::Result<()> +where + V: ValueLike<'v>, +{ + let name = spec.name.unpack().into_anyhow_result()?; + if name.is_empty() { + return Err(ValidationSpecError::EmptyName.into()); + } + let artifact = spec.validation_result.unpack().into_anyhow_result()?; + let artifact = match artifact.0.get_bound_artifact() { + Ok(bound_artifact) => bound_artifact, + Err(e) => { + return Err(e).context(ValidationSpecError::ValidationResultIsNotBound); + } + }; + if artifact.is_source() { + return Err(ValidationSpecError::ValidationResultIsSourceArtifact.into()); + } + Ok(()) +} + +#[starlark_value(type = "ValidationSpec")] +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StarlarkValidationSpecGen +where + Self: ProvidesStaticType<'v>, +{ + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(validation_spec_methods) + } +} + +#[starlark_module] +fn validation_spec_methods(builder: &mut MethodsBuilder) { + #[starlark(attribute)] + /// Name identifying validation. + fn name<'v>( + this: &'v StarlarkValidationSpec, + heap: &'v Heap, + ) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.name())) + } + + #[starlark(attribute)] + /// Is validation optional. + fn optional<'v>(this: &'v StarlarkValidationSpec) -> anyhow::Result { + Ok(this.optional()) + } + + #[starlark(attribute)] + /// Artifact which is the result of running a validation. + fn validation_result<'v>(this: &'v StarlarkValidationSpec) -> anyhow::Result { + let artifact = this.validation_result.unpack().into_anyhow_result()?; + artifact.0.get_bound_starlark_artifact() + } +} + +#[starlark_module] +pub fn register_validation_spec(builder: &mut GlobalsBuilder) { + #[starlark(as_type = FrozenStarlarkValidationSpec)] + fn ValidationSpec<'v>( + #[starlark(require = named)] name: StringValue<'v>, + #[starlark(require = named)] validation_result: ValueOf<'v, ValueAsArtifactLike<'v>>, + #[starlark(require = named, default = false)] optional: bool, + ) -> anyhow::Result> { + let result = StarlarkValidationSpec { + name: name.to_value_of_unchecked().cast(), + validation_result: validation_result.as_unchecked().cast(), + optional, + }; + validate_validation_spec(&result)?; + Ok(result) + } +} diff --git a/app/buck2_build_api/src/keep_going.rs b/app/buck2_build_api/src/keep_going.rs index 71c33244c53f2..9685d48efde34 100644 --- a/app/buck2_build_api/src/keep_going.rs +++ b/app/buck2_build_api/src/keep_going.rs @@ -7,103 +7,43 @@ * of this source tree. */ -use std::collections::HashMap; -use std::hash::Hash; - use dice::DiceComputations; use dice::UserComputationData; -use futures::Stream; -use futures::StreamExt; -use indexmap::IndexMap; -use smallvec::SmallVec; - -/// Evaluate a series of futures, returning a series of results. -/// If any future fails, it will fail. -/// If KEEP_GOING is true, it will first make all others continue. -pub async fn try_join_all( - ctx: &DiceComputations, - mut inputs: impl Stream> + Unpin, -) -> Result -where - C: KeepGoingCollectable, -{ - let keep_going = ctx.per_transaction_data().get_keep_going(); - - let size = inputs.size_hint().0; - let mut res = C::with_capacity(size); - let mut err = None; - while let Some(x) = inputs.next().await { - match x { - Ok(x) => res.push(x), - Err(e) => { - if keep_going { - err = Some(e); - } else { - return Err(e); - } - } +use futures::future::BoxFuture; +use futures::Future; + +pub struct KeepGoing; + +impl KeepGoing { + pub fn try_compute_join_all<'a, T: Send, R: 'a, E: 'a>( + ctx: &'a mut DiceComputations<'_>, + items: impl IntoIterator, + mapper: ( + impl for<'x> FnOnce(&'x mut DiceComputations<'a>, T) -> BoxFuture<'x, Result> + + Send + + Sync + + Copy + ), + ) -> impl Future, E>> + 'a { + let keep_going = ctx.per_transaction_data().get_keep_going(); + + let futs = ctx.compute_many(items.into_iter().map(move |v| { + DiceComputations::declare_closure( + move |ctx: &mut DiceComputations| -> BoxFuture> { mapper(ctx, v) }, + ) + })); + + async move { + Ok(if keep_going { + futures::future::join_all(futs) + .await + .into_iter() + .try_collect::>()? + } else { + buck2_util::future::try_join_all(futs).await? + }) } } - - if let Some(err) = err { - return Err(err); - } - - Ok(res) -} - -pub trait KeepGoingCollectable { - fn with_capacity(cap: usize) -> Self; - - fn push(&mut self, item: I); -} - -impl KeepGoingCollectable<(K, V)> for IndexMap -where - K: PartialEq + Eq + Hash, -{ - fn with_capacity(cap: usize) -> Self { - IndexMap::with_capacity(cap) - } - - fn push(&mut self, item: (K, V)) { - let (k, v) = item; - IndexMap::insert(self, k, v); - } -} - -impl KeepGoingCollectable<(K, V)> for HashMap -where - K: PartialEq + Eq + Hash, -{ - fn with_capacity(cap: usize) -> Self { - HashMap::with_capacity(cap) - } - - fn push(&mut self, item: (K, V)) { - let (k, v) = item; - HashMap::insert(self, k, v); - } -} - -impl KeepGoingCollectable for Vec { - fn with_capacity(cap: usize) -> Self { - Vec::with_capacity(cap) - } - - fn push(&mut self, item: I) { - Vec::push(self, item); - } -} - -impl KeepGoingCollectable for SmallVec<[I; 1]> { - fn with_capacity(cap: usize) -> Self { - SmallVec::with_capacity(cap) - } - - fn push(&mut self, item: I) { - SmallVec::push(self, item); - } } pub struct KeepGoingHolder(bool); diff --git a/app/buck2_build_api/src/lib.rs b/app/buck2_build_api/src/lib.rs index dae44b33965b8..a38c526979c6c 100644 --- a/app/buck2_build_api/src/lib.rs +++ b/app/buck2_build_api/src/lib.rs @@ -7,23 +7,16 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(box_patterns)] #![feature(iter_order_by)] #![feature(try_blocks)] -#![feature(try_trait_v2)] -#![feature(never_type)] -#![feature(provide_any)] #![feature(once_cell_try)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] +#![feature(used_with_arg)] +#![feature(iterator_try_collect)] #[macro_use] extern crate starlark; -#[macro_use] -extern crate higher_order_closure; use std::sync::Once; @@ -42,15 +35,20 @@ pub mod configure_targets; pub mod context; pub mod deferred; pub mod dynamic; +pub mod dynamic_value; pub mod interpreter; pub mod keep_going; +pub mod materialize; pub mod query; pub mod spawner; pub mod transition; +pub mod validation; pub fn init_late_bindings() { static ONCE: Once = Once::new(); ONCE.call_once(|| { interpreter::more::init_register_build_api_globals(); + interpreter::rule_defs::context::init_analysis_context_ty(); + interpreter::rule_defs::provider::ty::abstract_provider::init_provider_ty(); }); } diff --git a/app/buck2_build_api/src/materialize.rs b/app/buck2_build_api/src/materialize.rs new file mode 100644 index 0000000000000..35a39376f42bc --- /dev/null +++ b/app/buck2_build_api/src/materialize.rs @@ -0,0 +1,107 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use anyhow::Context; +use buck2_artifact::artifact::artifact_type::BaseArtifactKind; +use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_cli_proto::build_request::Materializations; +use dashmap::DashSet; +use dice::DiceComputations; +use dice::UserComputationData; +use dupe::Dupe; +use futures::FutureExt; + +use crate::actions::artifact::materializer::ArtifactMaterializer; +use crate::artifact_groups::calculation::ArtifactGroupCalculation; +use crate::artifact_groups::ArtifactGroup; +use crate::artifact_groups::ArtifactGroupValues; + +pub async fn materialize_artifact_group( + ctx: &mut DiceComputations<'_>, + artifact_group: &ArtifactGroup, + materialization_context: &MaterializationContext, +) -> anyhow::Result { + let values = ctx.ensure_artifact_group(artifact_group).await?; + + if let MaterializationContext::Materialize { force } = materialization_context { + let queue_tracker = ctx + .per_transaction_data() + .get_materialization_queue_tracker(); + let mut artifacts_to_materialize = Vec::new(); + for (artifact, _value) in values.iter() { + if let BaseArtifactKind::Build(artifact) = artifact.as_parts().0 { + if !queue_tracker.insert(artifact.dupe()) { + // We've already requested this artifact, no use requesting it again. + continue; + } + artifacts_to_materialize.push(artifact); + } + } + + ctx.try_compute_join(artifacts_to_materialize, |ctx, artifact| { + async move { + ctx.try_materialize_requested_artifact(artifact, *force) + .await + } + .boxed() + }) + .await + .context("Failed to materialize artifacts")?; + } + + Ok(values) +} + +#[derive(Clone, Dupe)] +pub enum MaterializationContext { + Skip, + Materialize { + /// Whether we should force the materialization of requested artifacts, or defer to the + /// config. + force: bool, + }, +} + +impl From for MaterializationContext { + fn from(value: Materializations) -> Self { + match value { + Materializations::Skip => MaterializationContext::Skip, + Materializations::Default => MaterializationContext::Materialize { force: false }, + Materializations::Materialize => MaterializationContext::Materialize { force: true }, + } + } +} + +/// This map contains all the artifacts that we enqueued for materialization. This ensures +/// we don't enqueue the same thing more than once. Should be shared across work done +/// in a single DICE transaction. +pub struct MaterializationQueueTrackerHolder(Arc>); + +pub trait HasMaterializationQueueTracker { + fn init_materialization_queue_tracker(&mut self); + + fn get_materialization_queue_tracker(&self) -> Arc>; +} + +impl HasMaterializationQueueTracker for UserComputationData { + fn init_materialization_queue_tracker(&mut self) { + self.data + .set(MaterializationQueueTrackerHolder(Arc::new(DashSet::new()))); + } + + fn get_materialization_queue_tracker(&self) -> Arc> { + self.data + .get::() + .expect("MaterializationQueueTracker should be set") + .0 + .dupe() + } +} diff --git a/app/buck2_build_api/src/query/mod.rs b/app/buck2_build_api/src/query.rs similarity index 100% rename from app/buck2_build_api/src/query/mod.rs rename to app/buck2_build_api/src/query.rs diff --git a/app/buck2_build_api/src/query/analysis.rs b/app/buck2_build_api/src/query/analysis.rs index 5b7ace5120cff..3f9dffd9604a3 100644 --- a/app/buck2_build_api/src/query/analysis.rs +++ b/app/buck2_build_api/src/query/analysis.rs @@ -19,7 +19,7 @@ use indexmap::IndexMap; /// Used by `audit classpath`. pub static CLASSPATH_FOR_TARGETS: LateBinding< for<'c> fn( - &'c DiceComputations, + &'c mut DiceComputations, Vec, ) -> Pin< Box< diff --git a/app/buck2_build_api/src/query/bxl.rs b/app/buck2_build_api/src/query/bxl.rs index 79db98dfb23f4..810887249b9f2 100644 --- a/app/buck2_build_api/src/query/bxl.rs +++ b/app/buck2_build_api/src/query/bxl.rs @@ -11,13 +11,13 @@ use std::future::Future; use std::pin::Pin; use async_trait::async_trait; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::fs::project::ProjectRoot; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::unconfigured::TargetNode; use buck2_query::query::syntax::simple::eval::file_set::FileSet; @@ -32,44 +32,47 @@ use crate::actions::query::ActionQueryNode; pub trait BxlCqueryFunctions: Send { async fn allpaths( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn somepath( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn owner( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, file_set: &FileSet, target_universe: Option<&TargetSet>, ) -> anyhow::Result>; async fn deps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, deps: Option, captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn rdeps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, universe: &TargetSet, targets: &TargetSet, depth: Option, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn testsof( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result>; async fn testsof_with_default_target_platform( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result>>; } @@ -78,38 +81,46 @@ pub trait BxlCqueryFunctions: Send { pub trait BxlUqueryFunctions: Send { async fn allpaths( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn somepath( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn deps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, deps: Option, captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn rdeps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, universe: &TargetSet, targets: &TargetSet, depth: Option, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn testsof( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result>; async fn owner( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, + file_set: &FileSet, + ) -> anyhow::Result>; + async fn targets_in_buildfile( + &self, + dice: &mut DiceComputations<'_>, file_set: &FileSet, ) -> anyhow::Result>; } @@ -118,61 +129,64 @@ pub trait BxlUqueryFunctions: Send { pub trait BxlAqueryFunctions: Send { async fn allpaths( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn somepath( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn deps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, deps: Option, captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn rdeps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, universe: &TargetSet, targets: &TargetSet, depth: Option, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result>; async fn testsof( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result>; async fn owner( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, file_set: &FileSet, ) -> anyhow::Result>; async fn get_target_set( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, configured_labels: Vec, ) -> anyhow::Result<(Vec, TargetSet)>; async fn all_outputs( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result>; async fn all_actions( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result>; } pub static NEW_BXL_CQUERY_FUNCTIONS: LateBinding< fn( - // Target platform - Option, + // Target configuration info (target platform + cli modifiers) + GlobalCfgOptions, ProjectRoot, CellName, CellResolver, @@ -189,8 +203,8 @@ pub static NEW_BXL_UQUERY_FUNCTIONS: LateBinding< pub static NEW_BXL_AQUERY_FUNCTIONS: LateBinding< fn( - // Target platform - Option, + // Target configuration info (target platform + cli modifiers) + GlobalCfgOptions, ProjectRoot, CellName, CellResolver, diff --git a/app/buck2_build_api/src/query/oneshot.rs b/app/buck2_build_api/src/query/oneshot.rs index 35f4e402a3720..58d142bb8ce58 100644 --- a/app/buck2_build_api/src/query/oneshot.rs +++ b/app/buck2_build_api/src/query/oneshot.rs @@ -7,64 +7,52 @@ * of this source tree. */ +use std::sync::Arc; + use async_trait::async_trait; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; use buck2_node::configured_universe::CqueryUniverse; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::unconfigured::TargetNode; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; use buck2_util::late_binding::LateBinding; use dice::DiceComputations; -use dupe::Dupe; use crate::actions::query::ActionQueryNode; -/// [Context](https://fburl.com/adiagq2f). -#[derive(Copy, Clone, Dupe)] -pub enum CqueryOwnerBehavior { - Deprecated, - Correct, -} - #[async_trait] pub trait QueryFrontend: Send + Sync + 'static { async fn eval_uquery( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, working_dir: &ProjectRelativePath, query: &str, query_args: &[String], - global_target_platform: Option, ) -> anyhow::Result>; async fn eval_cquery( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, working_dir: &ProjectRelativePath, - owner_behavior: CqueryOwnerBehavior, query: &str, query_args: &[String], - global_target_platform: Option, + global_cfg_options: GlobalCfgOptions, target_universe: Option<&[String]>, - ) -> anyhow::Result>; + collect_universes: bool, + ) -> anyhow::Result<( + QueryEvaluationResult, + Option>>, + )>; async fn eval_aquery( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, working_dir: &ProjectRelativePath, query: &str, query_args: &[String], - global_target_platform: Option, + global_cfg_options: GlobalCfgOptions, ) -> anyhow::Result>; - - async fn universe_from_literals( - &self, - ctx: &DiceComputations, - cwd: &ProjectRelativePath, - literals: &[String], - global_target_platform: Option, - ) -> anyhow::Result; } pub static QUERY_FRONTEND: LateBinding<&'static dyn QueryFrontend> = diff --git a/app/buck2_build_api/src/spawner.rs b/app/buck2_build_api/src/spawner.rs index dea483d10fb13..3d87afd39f88f 100644 --- a/app/buck2_build_api/src/spawner.rs +++ b/app/buck2_build_api/src/spawner.rs @@ -12,9 +12,9 @@ use std::any::Any; use allocative::Allocative; use buck2_common::events::HasEvents; use buck2_events::dispatch::with_dispatcher_async; +use buck2_futures::spawner::Spawner; use dupe::Dupe; use futures::future::BoxFuture; -use more_futures::spawner::Spawner; use tokio::runtime::Handle; use tokio::task::JoinHandle; @@ -59,11 +59,11 @@ mod tests { use buck2_events::dispatch::EventDispatcher; use buck2_events::source::ChannelEventSource; use buck2_events::BuckEvent; + use buck2_futures::spawn::spawn_dropcancel_with_preamble; use buck2_wrapper_common::invocation_id::TraceId; use dice::DiceData; use dice::UserComputationData; use futures::future::FutureExt; - use more_futures::spawn::spawn_dropcancel_with_preamble; use super::*; @@ -91,7 +91,7 @@ mod tests { let end = CommandEnd { data: Default::default(), is_success: true, - error_messages: vec![], + errors: vec![], }; (start, end) diff --git a/app/buck2_build_api/src/transition.rs b/app/buck2_build_api/src/transition.rs index c63d4ddc6f032..054a66939056c 100644 --- a/app/buck2_build_api/src/transition.rs +++ b/app/buck2_build_api/src/transition.rs @@ -13,17 +13,18 @@ use async_trait::async_trait; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::transition::applied::TransitionApplied; use buck2_core::configuration::transition::id::TransitionId; -use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::attrs::configured_attr::ConfiguredAttr; use buck2_util::late_binding::LateBinding; use dice::DiceComputations; +use starlark_map::ordered_map::OrderedMap; #[async_trait] pub trait TransitionCalculation: Send + Sync + 'static { /// Apply transition function to configuration and cache the result. async fn apply_transition( &self, - ctx: &DiceComputations, - target_node: &TargetNode, + ctx: &mut DiceComputations<'_>, + attrs: &OrderedMap<&str, Arc>, conf: &ConfigurationData, transition_id: &TransitionId, ) -> anyhow::Result>; @@ -31,3 +32,21 @@ pub trait TransitionCalculation: Send + Sync + 'static { pub static TRANSITION_CALCULATION: LateBinding<&'static dyn TransitionCalculation> = LateBinding::new("TRANSITION_CALCULATION"); + +pub static TRANSITION_ATTRS_PROVIDER: LateBinding<&'static dyn TransitionAttrProvider> = + LateBinding::new("TRANSITION_ATTRS_PROVIDER"); + +//TODO transition attributes can be added to Rule. Basic idea is this: +// * in RuleCallable fetch TransitionId from transition value using TransitionValue trait +// * add a function like attrs to TransitionValue, +// * call it from RuleCallable, and store in Rule. +// * in TargetNode we have access to Rule. +#[async_trait] +pub trait TransitionAttrProvider: Send + Sync + 'static { + /// Fetch attribute names accessed by transition function. + async fn transition_attrs( + &self, + ctx: &mut DiceComputations<'_>, + transition_id: &TransitionId, + ) -> anyhow::Result>>; +} diff --git a/app/buck2_build_api/src/validation.rs b/app/buck2_build_api/src/validation.rs new file mode 100644 index 0000000000000..8a713a8c269f5 --- /dev/null +++ b/app/buck2_build_api/src/validation.rs @@ -0,0 +1,11 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod transitive_validations; +pub mod validation_impl; diff --git a/app/buck2_build_api/src/validation/transitive_validations.rs b/app/buck2_build_api/src/validation/transitive_validations.rs new file mode 100644 index 0000000000000..79304c6d66aaa --- /dev/null +++ b/app/buck2_build_api/src/validation/transitive_validations.rs @@ -0,0 +1,33 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use dupe::Dupe; +use starlark::values::OwnedFrozenRef; +use starlark_map::small_set::SmallSet; + +use crate::interpreter::rule_defs::provider::builtin::validation_info::FrozenValidationInfo; + +/// Efficiently encoded collection of `ValidationInfo` providers for a given target node +/// and all of its recursive dependencies. Forms an optimized/sparse graph tracking +/// only those providers. +#[derive(Debug, Allocative, Dupe, Clone)] +pub struct TransitiveValidations(pub Arc); + +#[derive(Debug, Allocative)] +pub struct TransitiveValidationsData { + /// `ValidationInfo` provider if the current node contains it + pub info: Option>, + /// If empty it means that there are no transitive dependencies of current node + /// which contain `ValidationInfo` providers. + pub children: SmallSet, +} diff --git a/app/buck2_build_api/src/validation/validation_impl.rs b/app/buck2_build_api/src/validation/validation_impl.rs new file mode 100644 index 0000000000000..3dd0a3aab6a3e --- /dev/null +++ b/app/buck2_build_api/src/validation/validation_impl.rs @@ -0,0 +1,26 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use async_trait::async_trait; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_util::late_binding::LateBinding; +use dice::DiceComputations; + +#[async_trait] +pub trait ValidationImpl: Send + Sync + 'static { + /// Validate a given configured target node and any transitive nodes. + async fn validate_target_node_transitively( + &self, + ctx: &mut DiceComputations<'_>, + target: ConfiguredTargetLabel, + ) -> Result<(), buck2_error::Error>; +} + +pub static VALIDATION_IMPL: LateBinding<&'static dyn ValidationImpl> = + LateBinding::new("VALIDATION_IMPL"); diff --git a/app/buck2_build_api_derive/BUCK b/app/buck2_build_api_derive/BUCK index 0f5e345683464..9218e03ba5bc8 100644 --- a/app/buck2_build_api_derive/BUCK +++ b/app/buck2_build_api_derive/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/app/buck2_build_api_derive/Cargo.toml b/app/buck2_build_api_derive/Cargo.toml index c32b50d704ed1..ad51c20f936e1 100644 --- a/app/buck2_build_api_derive/Cargo.toml +++ b/app/buck2_build_api_derive/Cargo.toml @@ -1,10 +1,11 @@ [package] -name = "buck2_build_api_derive" -version = "0.4.1" -license = "MIT OR Apache-2.0" authors = ["Facebook"] -edition = "2021" description = "Derive macros for the Buck's buck2_build_api" +edition = "2021" +license = { workspace = true } +name = "buck2_build_api_derive" +repository = { workspace = true } +version = "0.4.1" [lib] proc-macro = true @@ -12,14 +13,7 @@ proc-macro = true [dependencies] convert_case = { workspace = true } proc-macro2 = { workspace = true } -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } gazebo = { workspace = true } - -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_build_api_derive/src/lib.rs b/app/buck2_build_api_derive/src/lib.rs index e76a91a768aa4..f9e674d35cb5c 100644 --- a/app/buck2_build_api_derive/src/lib.rs +++ b/app/buck2_build_api_derive/src/lib.rs @@ -7,9 +7,7 @@ * of this source tree. */ -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] +#![feature(error_generic_member_access)] #[allow(unused_extern_crates)] // proc_macro is very special extern crate proc_macro; diff --git a/app/buck2_build_api_derive/src/provider.rs b/app/buck2_build_api_derive/src/provider.rs index 27bd3a5c73850..b0ef61c8c2bec 100644 --- a/app/buck2_build_api_derive/src/provider.rs +++ b/app/buck2_build_api_derive/src/provider.rs @@ -7,19 +7,14 @@ * of this source tree. */ -use std::collections::HashMap; - use convert_case::Case; use convert_case::Casing; use gazebo::prelude::*; use quote::format_ident; use quote::quote; use quote::ToTokens; -use syn::parse::ParseStream; -use syn::Attribute; use syn::Fields; - -const PROVIDER_IDENT: &str = "provider"; +use syn::TypeParamBound; pub(crate) struct InternalProviderArgs { creator_func: syn::Ident, @@ -57,7 +52,6 @@ struct ProviderCodegen { span: proc_macro2::Span, input: syn::ItemStruct, args: InternalProviderArgs, - field_attr_providers: HashMap, } impl ProviderCodegen { @@ -66,33 +60,15 @@ impl ProviderCodegen { /// This modifies the original input and removes any instances of `#[provider()]` macros /// on fields of the provided structs, and saves them into `field_attr_providers`. fn new(mut input: syn::ItemStruct, args: InternalProviderArgs) -> syn::Result { - let mut provider_attrs = HashMap::new(); if let Fields::Named(fields_named) = &mut input.fields { for field in fields_named.named.iter_mut() { - let (attrs, mut provider_attr): (Vec, Vec) = field - .attrs - .clone() - .into_iter() - .partition(|a| !a.path().is_ident(PROVIDER_IDENT)); - field.attrs = attrs; - if provider_attr.len() > 1 { - return Err(syn::Error::new_spanned( - field.to_token_stream(), - format!("{} attribute can only be specified once", PROVIDER_IDENT), - )); - } else if !provider_attr.is_empty() { - provider_attrs.insert( - field.ident.as_ref().unwrap().to_owned(), - provider_attr.remove(0), - ); - } + field.attrs = field.attrs.clone(); } }; Ok(Self { span: input.ident.span(), input, args, - field_attr_providers: provider_attrs, }) } @@ -180,36 +156,54 @@ impl ProviderCodegen { )); } - syn::custom_keyword!(field_type); - - let name = field.ident.as_ref().unwrap().to_owned(); + let error = "Field type must be `ValueOfUncheckedGeneric`"; - let Some(field_type_attr) = self.field_attr_providers.get(&name) else { - return Err(syn::Error::new_spanned( - field, - "field should have a `#[provider(field_type = SomeType)]` attribute", - )); + let syn::Type::Path(ty) = &field.ty else { + return Err(syn::Error::new_spanned(field, error)); + }; + let syn::TypePath { + qself: None, + path: + syn::Path { + leading_colon: None, + segments, + }, + } = ty + else { + return Err(syn::Error::new_spanned(field, error)); }; + let [ + syn::PathSegment { + ident, + arguments: syn::PathArguments::AngleBracketed(args), + }, + ] = Vec::from_iter(segments).as_slice() + else { + return Err(syn::Error::new_spanned(field, error)); + }; + if ident != "ValueOfUncheckedGeneric" { + return Err(syn::Error::new_spanned(field, error)); + } + let [ + syn::GenericArgument::Type(v), + syn::GenericArgument::Type(field_type), + ] = Vec::from_iter(&args.args).as_slice() + else { + return Err(syn::Error::new_spanned(field, error)); + }; + let expected_v: syn::Type = syn::parse_quote!(V); + if v != &expected_v { + return Err(syn::Error::new_spanned(field, error)); + } - let field_type: syn::Type = - field_type_attr.parse_args_with(|input: ParseStream| -> syn::Result { - if input.parse::().is_ok() { - input.parse::()?; - input.parse::() - } else { - Err(syn::Error::new_spanned( - field_type_attr, - "expected `field_type = SomeType`", - )) - } - })?; + let name = field.ident.as_ref().unwrap().to_owned(); let docstring = self.get_docstring_impl(&field.attrs); Ok(Field { name, docstring, - field_type, + field_type: field_type.clone(), }) } @@ -250,7 +244,7 @@ impl ProviderCodegen { } Ok(syn::parse_quote_spanned! {self.span=> - fn documentation(&self) -> Option { + fn documentation(&self) -> starlark::docs::DocItem { let docstring = #provider_docstring; let field_names = [ #(#field_names),* @@ -261,13 +255,14 @@ impl ProviderCodegen { let field_types = [ #(#field_types),* ]; - Some(crate::interpreter::rule_defs::provider::doc::provider_callable_documentation( + crate::interpreter::rule_defs::provider::doc::provider_callable_documentation( Some(#create_func), + BUILTIN_PROVIDER_TY.instance(), &docstring, &field_names, &field_docs, &field_types, - )) + ) } }) } @@ -299,7 +294,7 @@ impl ProviderCodegen { let name_str = self.name_str()?; let field_names = self.field_names()?; Ok(syn::parse_quote_spanned! { self.span=> - impl std::fmt::Display for #gen_name { + impl std::fmt::Display for #gen_name { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { display_container::fmt_keyed_container( f, @@ -307,8 +302,8 @@ impl ProviderCodegen { ")", "=", [ - #((stringify!(#field_names), &self.#field_names)),* - ].into_iter() + #((stringify!(#field_names), &self.#field_names.get())),* + ] ) } } @@ -328,7 +323,7 @@ impl ProviderCodegen { }, syn::parse_quote_spanned! { self.span=> #[starlark::values::starlark_value(type = #name_str)] - impl<'v, V: starlark::values::ValueLike<'v> + 'v> starlark::values::StarlarkValue<'v> + impl<'v, V: starlark::values::ValueLike<'v>> starlark::values::StarlarkValue<'v> for #gen_name where Self: starlark::any::ProvidesStaticType<'v>, @@ -338,7 +333,6 @@ impl ProviderCodegen { starlark::environment::MethodsStatic::new(); RES.methods(|x| { - crate::interpreter::rule_defs::provider::provider_methods(x); #provider_methods_func_name(x); }) } @@ -348,7 +342,7 @@ impl ProviderCodegen { &dyn crate::interpreter::rule_defs::provider::ProviderLike>(self); } - fn equals(&self, other: starlark::values::Value<'v>) -> anyhow::Result { + fn equals(&self, other: starlark::values::Value<'v>) -> starlark::Result { let this: &#name = starlark::coerce::coerce(self); let other: &#name = match #name::from_value(other) { Some(other) => other, @@ -356,7 +350,7 @@ impl ProviderCodegen { }; #( - if !this.#field_names.equals(other.#field_names)? { + if !this.#field_names.to_value().get().equals(other.#field_names.to_value().get())? { return Ok(false); } )* @@ -388,7 +382,7 @@ impl ProviderCodegen { #( s.serialize_entry( stringify!(#field_names), - &self.#field_names + &self.#field_names.get().to_value() )?; )* s.end() @@ -402,7 +396,7 @@ impl ProviderCodegen { let field_names = self.field_names()?; let callable_name = self.callable_name()?; Ok(syn::parse_quote_spanned! { self.span=> - impl<'v, V: starlark::values::ValueLike<'v> + 'v> crate::interpreter::rule_defs::provider::ProviderLike<'v> for #gen_name + impl<'v, V: starlark::values::ValueLike<'v>> crate::interpreter::rule_defs::provider::ProviderLike<'v> for #gen_name where Self: std::fmt::Debug, { @@ -410,16 +404,9 @@ impl ProviderCodegen { #callable_name::provider_id() } - fn get_field(&self, name: &str) -> Option> { - match name { - #(stringify!(#field_names) => Some(self.#field_names.to_value()),)* - _ => None, - } - } - fn items(&self) -> Vec<(&str, starlark::values::Value<'v>)> { vec![ - #((stringify!(#field_names), self.#field_names.to_value())),* + #((stringify!(#field_names), self.#field_names.get().to_value())),* ] } } @@ -450,7 +437,6 @@ impl ProviderCodegen { } fn callable_impl_starlark_value(&self) -> syn::Result> { - let name_str = self.name_str()?; let callable_name = self.callable_name()?; let documentation_function = self.documentation_function()?; let typechecker_ty_function = self.typechecker_ty_function()?; @@ -465,20 +451,12 @@ impl ProviderCodegen { #[starlark::values::starlark_value(type = #callable_name_snake_str)] impl<'v> starlark::values::StarlarkValue<'v> for #callable_name { - fn get_methods() -> Option<&'static starlark::environment::Methods> { - static RES: starlark::environment::MethodsStatic = - starlark::environment::MethodsStatic::new(); - // TODO(nmj): This should use the docstring from the attribute, rather than - // None - RES.methods(|x| x.set_attribute("type", #name_str, None)) - } - fn invoke( &self, _me: starlark::values::Value<'v>, args: &starlark::eval::Arguments<'v, '_>, - eval: &mut starlark::eval::Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + ) -> starlark::Result> { static RES: starlark::environment::GlobalsStatic = starlark::environment::GlobalsStatic::new(); starlark::values::ValueLike::invoke( @@ -505,8 +483,8 @@ impl ProviderCodegen { let callable_name = self.callable_name()?; Ok(syn::parse_quote_spanned! { self.span=> impl buck2_interpreter::types::provider::callable::ProviderCallableLike for #callable_name { - fn id(&self) -> Option<&std::sync::Arc> { - Some(self.id) + fn id(&self) -> anyhow::Result<&std::sync::Arc> { + Ok(self.id) } } }) @@ -597,7 +575,7 @@ impl ProviderCodegen { fn #field_names<'v>(this: & #name) -> anyhow::Result> { - Ok(starlark::values::ValueOfUnchecked::new(this.#field_names)) + Ok(this.#field_names.to_value()) } )* } @@ -654,17 +632,23 @@ pub(crate) fn define_provider( "should have exactly one type param", )); } + + let type_bound_error = "type param should be V: ValueLifetimeless"; let type_param = type_params.pop().unwrap(); - if let Some(bound) = type_param.bounds.iter().next() { - return Err(syn::Error::new_spanned( - bound, - "type param should have no bounds", - )); + let Some(bound) = type_param.bounds.iter().into_singleton() else { + return Err(syn::Error::new_spanned(type_param, type_bound_error)); + }; + match bound { + TypeParamBound::Trait(b) => { + if b.to_token_stream().to_string() != "ValueLifetimeless" { + return Err(syn::Error::new_spanned(b, type_bound_error)); + } + } + _ => { + return Err(syn::Error::new_spanned(bound, type_bound_error)); + } } - // TODO(cjhopman): Verify `V` type param as expected - // TODO(cjhopman): Verify all fields are type `V` - let input = &codegen.input; let input: syn::Item = syn::parse_quote_spanned! { codegen.span=> #input diff --git a/app/buck2_build_api_tests/BUCK b/app/buck2_build_api_tests/BUCK index ff274ed7f7390..929cd9bbbaae6 100644 --- a/app/buck2_build_api_tests/BUCK +++ b/app/buck2_build_api_tests/BUCK @@ -1,12 +1,11 @@ -load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:rust_unittest.bzl", "rust_unittest") oncall("build_infra") -rust_library( +rust_unittest( name = "buck2_build_api_tests", srcs = glob(["src/**/*.rs"]), - test_deps = [ + deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:assert_matches", "fbsource//third-party/rust:async-trait", @@ -19,6 +18,7 @@ rust_library( "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:tokio", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_action_impl:buck2_action_impl", "//buck2/app/buck2_analysis:buck2_analysis", "//buck2/app/buck2_anon_target:buck2_anon_target", "//buck2/app/buck2_artifact:buck2_artifact", @@ -27,17 +27,20 @@ rust_library( "//buck2/app/buck2_configured:buck2_configured", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_directory:buck2_directory", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_file_watcher:buck2_file_watcher", + "//buck2/app/buck2_http:buck2_http", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", "//buck2/app/buck2_node:buck2_node", + "//buck2/app/buck2_transition:buck2_transition", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/provider:provider", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_map:starlark_map", "//common/rust/shed/sorted_vector_map:sorted_vector_map", diff --git a/app/buck2_build_api_tests/Cargo.toml b/app/buck2_build_api_tests/Cargo.toml index 5060b68c914aa..3293e335d2acc 100644 --- a/app/buck2_build_api_tests/Cargo.toml +++ b/app/buck2_build_api_tests/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Tests for buck2_build_api crate" +edition = "2021" +license = { workspace = true } name = "buck2_build_api_tests" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Tests for buck2_build_api crate" [dev-dependencies] anyhow = { workspace = true } @@ -19,26 +21,30 @@ tokio = { workspace = true } sorted_vector_map = { workspace = true } allocative = { workspace = true } -dupe = { workspace = true } dice = { workspace = true } +dupe = { workspace = true } gazebo = { workspace = true } itertools = { workspace = true } -provider = { workspace = true } starlark = { workspace = true } starlark_map = { workspace = true } +buck2_action_impl = { workspace = true } buck2_analysis = { workspace = true } -buck2_artifact = { workspace = true } buck2_anon_target = { workspace = true } +buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } buck2_common = { workspace = true } buck2_configured = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_directory = { workspace = true } +buck2_error = { workspace = true } +buck2_events = { workspace = true } +buck2_execute = { workspace = true } buck2_file_watcher = { workspace = true } +buck2_http = { workspace = true } buck2_interpreter = { workspace = true } buck2_interpreter_for_build = { workspace = true } buck2_node = { workspace = true } -buck2_events = { workspace = true } -buck2_execute = { workspace = true } +buck2_transition = { workspace = true } buck2_util = { workspace = true } diff --git a/app/buck2_build_api_tests/src/actions/mod.rs b/app/buck2_build_api_tests/src/actions.rs similarity index 100% rename from app/buck2_build_api_tests/src/actions/mod.rs rename to app/buck2_build_api_tests/src/actions.rs diff --git a/app/buck2_build_api_tests/src/actions/calculation.rs b/app/buck2_build_api_tests/src/actions/calculation.rs index 4651dc64be6c8..f7153e46e55f9 100644 --- a/app/buck2_build_api_tests/src/actions/calculation.rs +++ b/app/buck2_build_api_tests/src/actions/calculation.rs @@ -7,58 +7,66 @@ * of this source tree. */ +use std::collections::HashMap; use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use assert_matches::assert_matches; +use buck2_analysis::analysis::calculation::AnalysisKey; +use buck2_artifact::actions::key::ActionIndex; +use buck2_artifact::actions::key::ActionKey; use buck2_artifact::artifact::artifact_type::testing::BuildArtifactTestingExt; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; -use buck2_artifact::deferred::id::DeferredId; +use buck2_artifact::deferred::key::DeferredHolderKey; use buck2_build_api::actions::calculation::command_details; use buck2_build_api::actions::calculation::ActionCalculation; +use buck2_build_api::actions::execute::dice_data::set_fallback_executor_config; +use buck2_build_api::actions::execute::dice_data::CommandExecutorResponse; +use buck2_build_api::actions::execute::dice_data::HasCommandExecutor; +use buck2_build_api::actions::execute::dice_data::SetCommandExecutor; +use buck2_build_api::actions::execute::dice_data::SetInvalidationTrackingConfig; +use buck2_build_api::actions::execute::dice_data::SetReClient; use buck2_build_api::actions::impls::run_action_knobs::RunActionKnobs; +use buck2_build_api::actions::registry::RecordedActions; use buck2_build_api::actions::Action; use buck2_build_api::actions::RegisteredAction; +use buck2_build_api::analysis::registry::RecordedAnalysisValues; +use buck2_build_api::analysis::AnalysisResult; use buck2_build_api::artifact_groups::calculation::ArtifactGroupCalculation; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::context::SetBuildContextData; -use buck2_build_api::deferred::calculation::DeferredResolve; -use buck2_build_api::deferred::types::AnyValue; -use buck2_build_api::deferred::types::DeferredValueAnyReady; use buck2_build_api::keep_going::HasKeepGoing; use buck2_build_api::spawner::BuckSpawner; use buck2_common::dice::cells::SetCellResolver; use buck2_common::dice::data::testing::SetTestingIoProvider; -use buck2_common::dice::file_ops::keys::FileOpsValue; -use buck2_common::dice::file_ops::testing::FileOpsKey; use buck2_common::external_symlink::ExternalSymlink; use buck2_common::file_ops::testing::TestFileOps; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; -use buck2_common::http::HttpClientBuilder; use buck2_common::http::SetHttpClient; -use buck2_common::result::ToSharedResultExt; -use buck2_core::buck_path::path::BuckPath; -use buck2_core::category::Category; +use buck2_configured::nodes::calculation::ConfiguredTargetNodeKey; +use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::category::CategoryRef; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; use buck2_core::cells::paths::CellRelativePathBuf; use buck2_core::cells::CellResolver; +use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::configuration::data::ConfigurationData; -use buck2_core::directory::DirectoryEntry; +use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::execution_types::executor_config::CommandExecutorConfig; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project::ProjectRootTemp; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::package::package_relative_path::PackageRelativePathBuf; -use buck2_core::package::PackageLabel; -use buck2_core::target::label::TargetLabel; -use buck2_core::target::name::TargetNameRef; +use buck2_core::package::source_path::SourcePath; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_directory::directory::entry::DirectoryEntry; use buck2_events::dispatch::with_dispatcher_async; use buck2_events::dispatch::EventDispatcher; use buck2_execute::artifact_value::ArtifactValue; @@ -69,11 +77,6 @@ use buck2_execute::execute::action_digest::ActionDigest; use buck2_execute::execute::blocking::testing::DummyBlockingExecutor; use buck2_execute::execute::blocking::SetBlockingExecutor; use buck2_execute::execute::cache_uploader::NoOpCacheUploader; -use buck2_execute::execute::dice_data::set_fallback_executor_config; -use buck2_execute::execute::dice_data::CommandExecutorResponse; -use buck2_execute::execute::dice_data::HasCommandExecutor; -use buck2_execute::execute::dice_data::SetCommandExecutor; -use buck2_execute::execute::dice_data::SetReClient; use buck2_execute::execute::kind::CommandExecutionKind; use buck2_execute::execute::output::CommandStdStreams; use buck2_execute::execute::prepared::NoOpCommandOptionalExecutor; @@ -87,6 +90,8 @@ use buck2_execute::materialize::materializer::SetMaterializer; use buck2_execute::materialize::nodisk::NoDiskMaterializer; use buck2_execute::re::manager::ManagedRemoteExecutionClient; use buck2_file_watcher::mergebase::SetMergebase; +use buck2_http::HttpClientBuilder; +use buck2_node::nodes::configured::ConfiguredTargetNode; use dice::testing::DiceBuilder; use dice::DiceTransaction; use dice::UserComputationData; @@ -97,34 +102,18 @@ use sorted_vector_map::sorted_vector_map; use crate::actions::testings::SimpleAction; -fn create_test_build_artifact( - package_cell: &str, - package_path: &str, - target_name: &str, -) -> BuildArtifact { - let configured_target_label = TargetLabel::new( - PackageLabel::testing_new(package_cell, package_path), - TargetNameRef::unchecked_new(target_name), - ) - .configure(ConfigurationData::testing_new()); - let forward_relative_path_buf = ForwardRelativePathBuf::unchecked_new("bar.out".into()); - let deferred_id = DeferredId::testing_new(0); - BuildArtifact::testing_new( - configured_target_label, - forward_relative_path_buf, - deferred_id, - ) +fn create_test_configured_target_label() -> ConfiguredTargetLabel { + TargetLabel::testing_parse("cell//pkg:foo").configure(ConfigurationData::testing_new()) } -fn create_test_source_artifact( - package_cell: &str, - package_path: &str, - target_name: &str, -) -> SourceArtifact { - SourceArtifact::new(BuckPath::testing_new( - PackageLabel::testing_new(package_cell, package_path), - PackageRelativePathBuf::unchecked_new(target_name.into()), - )) +fn create_test_build_artifact() -> BuildArtifact { + let configured_target_label = create_test_configured_target_label(); + let deferred_id = ActionIndex::new(0); + BuildArtifact::testing_new(configured_target_label, "bar.out", deferred_id) +} + +fn create_test_source_artifact(package_label: &str, target_name: &str) -> SourceArtifact { + SourceArtifact::new(SourcePath::testing_new(package_label, target_name)) } fn registered_action( @@ -139,14 +128,51 @@ fn registered_action( Arc::new(registered_action) } -fn mock_deferred_resolution_calculation( - dice_builder: DiceBuilder, - deferred_resolve: DeferredResolve, +fn mock_analysis_for_action_resolution( + mut dice_builder: DiceBuilder, + action_key: &ActionKey, registered_action_arc: Arc, ) -> DiceBuilder { - let arc_any: Arc = Arc::new(registered_action_arc); - let an_any = DeferredValueAnyReady::AnyValue(arc_any); - dice_builder.mock_and_return(deferred_resolve, anyhow::Ok(an_any).shared_error()) + let configured_target_label = create_test_configured_target_label(); + let configured_node_key = ConfiguredTargetNodeKey(configured_target_label.dupe()); + + assert_eq!( + &DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(configured_target_label.dupe())), + action_key.holder_key() + ); + + let mut actions = RecordedActions::new(); + actions.insert(action_key.dupe(), registered_action_arc); + + dice_builder = dice_builder.mock_and_return( + AnalysisKey(configured_target_label.dupe()), + buck2_error::Ok(MaybeCompatible::Compatible(AnalysisResult::new( + RecordedAnalysisValues::testing_new( + action_key.holder_key().dupe(), + Vec::new(), + actions, + ), + None, + HashMap::new(), + 0, + 0, + None, + ))), + ); + + dice_builder.mock_and_return( + configured_node_key, + Ok(MaybeCompatible::Compatible( + ConfiguredTargetNode::testing_new( + configured_target_label, + "foo_lib", + ExecutionPlatformResolution::new(None, Vec::new()), + vec![], + vec![], + None, + ), + )), + ) } async fn make_default_dice_state( @@ -166,6 +192,7 @@ async fn make_default_dice_state( dice_builder = dice_builder.set_data(|data| { data.set_testing_io_provider(temp_fs); data.set_digest_config(DigestConfig::testing_default()); + data.set_invalidation_tracking_config(true); }); for mock in mocks.into_iter() { @@ -201,7 +228,7 @@ async fn make_default_dice_state( extra.set_blocking_executor(Arc::new(DummyBlockingExecutor { fs })); extra.set_materializer(Arc::new(NoDiskMaterializer)); extra.set_re_client(ManagedRemoteExecutionClient::testing_new_dummy()); - extra.set_http_client(HttpClientBuilder::https_with_system_roots()?.build()); + extra.set_http_client(HttpClientBuilder::https_with_system_roots().await?.build()); extra.set_mergebase(Default::default()); extra.data.set(EventDispatcher::null()); extra.data.set(RunActionKnobs::default()); @@ -216,33 +243,32 @@ async fn make_default_dice_state( #[tokio::test] async fn test_get_action_for_artifact() -> anyhow::Result<()> { - let build_artifact = create_test_build_artifact("cell", "pkg", "foo"); - let deferred_resolve = DeferredResolve(build_artifact.key().deferred_key().dupe()); + let build_artifact = create_test_build_artifact(); let registered_action = registered_action( build_artifact.dupe(), Box::new(SimpleAction::new( indexset![], indexset![build_artifact.dupe()], vec![], - Category::try_from("fake_action").unwrap(), + CategoryRef::new("fake_action").unwrap().to_owned(), None, )), ); let mut dice_builder = DiceBuilder::new(); - dice_builder = mock_deferred_resolution_calculation( + dice_builder = mock_analysis_for_action_resolution( dice_builder, - deferred_resolve, + build_artifact.key(), registered_action.dupe(), ); - let dice_computations = dice_builder + let mut dice_computations = dice_builder .build(UserComputationData::new())? .commit() .await; let result = with_dispatcher_async( EventDispatcher::null(), - dice_computations.get_action(build_artifact.key()), + ActionCalculation::get_action(&mut dice_computations, build_artifact.key()), ) .await; assert_eq!(result?, registered_action); @@ -252,35 +278,34 @@ async fn test_get_action_for_artifact() -> anyhow::Result<()> { #[tokio::test] async fn test_build_action() -> anyhow::Result<()> { let temp_fs = ProjectRootTemp::new()?; - let build_artifact = create_test_build_artifact("cell", "pkg", "foo"); - let deferred_resolve = DeferredResolve(build_artifact.key().deferred_key().dupe()); + let build_artifact = create_test_build_artifact(); let registered_action = registered_action( build_artifact.dupe(), Box::new(SimpleAction::new( indexset![], indexset![build_artifact.dupe()], vec!["foo".to_owned(), "cmd".to_owned()], - Category::try_from("fake_action").unwrap(), + CategoryRef::new("fake_action").unwrap().to_owned(), None, )), ); let dry_run_tracker = Arc::new(Mutex::new(vec![])); - let dice_computations = make_default_dice_state( + let mut dice_computations = make_default_dice_state( dry_run_tracker.dupe(), &temp_fs, vec![{ let action = registered_action.dupe(); + let action_key = build_artifact.key().dupe(); Box::new(move |builder| { - mock_deferred_resolution_calculation(builder, deferred_resolve, action) + mock_analysis_for_action_resolution(builder, &action_key, action) }) }], ) .await?; - let result = dice_computations - .build_action(registered_action.key()) - .await; + let result = + ActionCalculation::build_action(&mut dice_computations, registered_action.key()).await; assert!(result.is_ok()); @@ -302,31 +327,31 @@ async fn test_build_action() -> anyhow::Result<()> { #[tokio::test] async fn test_build_artifact() -> anyhow::Result<()> { let temp_fs = ProjectRootTemp::new()?; - let build_artifact = create_test_build_artifact("cell", "pkg", "foo"); - let deferred_resolve = DeferredResolve(build_artifact.key().deferred_key().dupe()); + let build_artifact = create_test_build_artifact(); let registered_action = registered_action( build_artifact.dupe(), Box::new(SimpleAction::new( indexset![], indexset![build_artifact.dupe()], vec!["bar".to_owned(), "cmd".to_owned()], - Category::try_from("fake_action").unwrap(), + CategoryRef::new("fake_action").unwrap().to_owned(), None, )), ); let dry_run_tracker = Arc::new(Mutex::new(vec![])); - let dice_computations = make_default_dice_state(dry_run_tracker.dupe(), &temp_fs, { + let mut dice_computations = make_default_dice_state(dry_run_tracker.dupe(), &temp_fs, { let registered_action = registered_action.dupe(); + let action_key = build_artifact.key().dupe(); vec![Box::new(move |builder| { - mock_deferred_resolution_calculation(builder, deferred_resolve, registered_action) + mock_analysis_for_action_resolution(builder, &action_key, registered_action) })] }) .await?; let result = with_dispatcher_async( EventDispatcher::null(), - dice_computations.build_artifact(&build_artifact), + ActionCalculation::build_artifact(&mut dice_computations, &build_artifact), ) .await; @@ -349,24 +374,24 @@ async fn test_build_artifact() -> anyhow::Result<()> { #[tokio::test] async fn test_ensure_artifact_build_artifact() -> anyhow::Result<()> { let temp_fs = ProjectRootTemp::new()?; - let build_artifact = create_test_build_artifact("cell", "pkg", "foo"); - let deferred_resolve = DeferredResolve(build_artifact.key().deferred_key().dupe()); + let build_artifact = create_test_build_artifact(); let registered_action = registered_action( build_artifact.dupe(), Box::new(SimpleAction::new( indexset![], indexset![build_artifact.dupe()], vec!["ensure".to_owned(), "cmd".to_owned()], - Category::try_from("fake_action").unwrap(), + CategoryRef::new("fake_action").unwrap().to_owned(), None, )), ); let dry_run_tracker = Arc::new(Mutex::new(vec![])); - let dice_computations = make_default_dice_state(dry_run_tracker.dupe(), &temp_fs, { + let mut dice_computations = make_default_dice_state(dry_run_tracker.dupe(), &temp_fs, { let registered_action = registered_action.dupe(); + let action_key = build_artifact.key().dupe(); vec![Box::new(move |builder| { - mock_deferred_resolution_calculation(builder, deferred_resolve, registered_action) + mock_analysis_for_action_resolution(builder, &action_key, registered_action) })] }) .await?; @@ -403,7 +428,7 @@ async fn test_ensure_artifact_source_artifact() -> anyhow::Result<()> { CellName::testing_new("cell"), CellRelativePathBuf::unchecked_new("pkg/src.cpp".to_owned()), ); - let source_artifact = create_test_source_artifact("cell", "pkg", "src.cpp"); + let source_artifact = create_test_source_artifact("cell//pkg", "src.cpp"); let metadata = FileMetadata { digest: TrackedFileDigest::from_content(b"content", digest_config.cas_digest_config()), is_executable: true, @@ -412,13 +437,9 @@ async fn test_ensure_artifact_source_artifact() -> anyhow::Result<()> { let dice_builder = DiceBuilder::new().set_data(|data| { data.set_digest_config(DigestConfig::testing_default()); }); - let dice_computations = dice_builder - .mock_and_return( - FileOpsKey(), - Ok(FileOpsValue(Arc::new( - TestFileOps::new_with_files_metadata(btreemap![path => metadata.dupe()]), - ))), - ) + let file_ops = TestFileOps::new_with_files_metadata(btreemap![path => metadata.dupe()]); + let mut dice_computations = file_ops + .mock_in_cell(CellName::testing_new("cell"), dice_builder) .build(UserComputationData::new())? .commit() .await; @@ -453,11 +474,11 @@ async fn test_ensure_artifact_external_symlink() -> anyhow::Result<()> { CellName::testing_new("cell"), CellRelativePathBuf::unchecked_new("proj/to_gvfs/include".to_owned()), ); - let source_artifact = create_test_source_artifact("cell", "proj/to_gvfs", "include"); + let source_artifact = create_test_source_artifact("cell//proj/to_gvfs", "include"); let symlink = Arc::new( ExternalSymlink::new( PathBuf::from("/mnt/gvfs"), - Some(ForwardRelativePathBuf::unchecked_new("include".to_owned())), + ForwardRelativePathBuf::new("include".to_owned()).unwrap(), ) .unwrap(), ); @@ -465,13 +486,9 @@ async fn test_ensure_artifact_external_symlink() -> anyhow::Result<()> { let dice_builder = DiceBuilder::new().set_data(|data| { data.set_digest_config(DigestConfig::testing_default()); }); - let dice_computations = dice_builder - .mock_and_return( - FileOpsKey(), - Ok(FileOpsValue(Arc::new(TestFileOps::new_with_symlinks( - btreemap![path => symlink.dupe()], - )))), - ) + let file_ops = TestFileOps::new_with_symlinks(btreemap![path => symlink.dupe()]); + let mut dice_computations = file_ops + .mock_in_cell(CellName::testing_new("cell"), dice_builder) .build(UserComputationData::new())? .commit() .await; diff --git a/app/buck2_build_api_tests/src/actions/impls/mod.rs b/app/buck2_build_api_tests/src/actions/impls.rs similarity index 100% rename from app/buck2_build_api_tests/src/actions/impls/mod.rs rename to app/buck2_build_api_tests/src/actions/impls.rs diff --git a/app/buck2_build_api_tests/src/actions/impls/json.rs b/app/buck2_build_api_tests/src/actions/impls/json.rs index 8288b1264cb1e..7d05e34959398 100644 --- a/app/buck2_build_api_tests/src/actions/impls/json.rs +++ b/app/buck2_build_api_tests/src/actions/impls/json.rs @@ -11,6 +11,7 @@ use anyhow::Context; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::artifact_type::OutputArtifact; use buck2_build_api::actions::impls::json::visit_json_artifacts; +use buck2_build_api::actions::impls::json::JsonUnpack; use buck2_build_api::actions::impls::json::SerializeValue; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; @@ -21,6 +22,7 @@ use dupe::Dupe; use indoc::indoc; use starlark::environment::GlobalsBuilder; use starlark::starlark_module; +use starlark::values::UnpackValue; use starlark::values::Value; use crate::interpreter::rule_defs::artifact::testing::artifactory; @@ -63,11 +65,16 @@ fn test_tagging() -> anyhow::Result<()> { fn check_passthrough<'v>(tagged: Value<'v>, value: Value<'v>) -> anyhow::Result> { let json1 = serde_json::to_string(&SerializeValue { - value: tagged, + value: JsonUnpack::unpack_value_err(tagged)?, fs: None, + absolute: false, })?; - let json2 = serde_json::to_string(&SerializeValue { value, fs: None })?; + let json2 = serde_json::to_string(&SerializeValue { + value: JsonUnpack::unpack_value_err(value)?, + fs: None, + absolute: false, + })?; assert_eq!(json1, json2); diff --git a/app/buck2_build_api_tests/src/actions/registry.rs b/app/buck2_build_api_tests/src/actions/registry.rs index eea6b1ee72f35..54de58a60bb15 100644 --- a/app/buck2_build_api_tests/src/actions/registry.rs +++ b/app/buck2_build_api_tests/src/actions/registry.rs @@ -8,19 +8,18 @@ */ use assert_matches::assert_matches; +use buck2_artifact::actions::key::ActionIndex; use buck2_artifact::artifact::artifact_type::testing::ArtifactTestingExt; use buck2_artifact::artifact::artifact_type::testing::BuildArtifactTestingExt; use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_artifact::deferred::id::DeferredId; -use buck2_build_api::actions::key::ActionKeyExt; +use buck2_artifact::deferred::key::DeferredHolderKey; use buck2_build_api::actions::registry::ActionsRegistry; use buck2_build_api::actions::ActionErrors; use buck2_build_api::analysis::registry::AnalysisValueFetcher; use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::deferred::types::BaseKey; -use buck2_build_api::deferred::types::DeferredRegistry; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::pair::ConfigurationNoExec; use buck2_core::execution_types::execution::ExecutionPlatform; @@ -32,6 +31,7 @@ use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_execute::execute::request::OutputType; use dupe::Dupe; use indexmap::indexset; +use itertools::Itertools; use crate::actions::testings::SimpleUnregisteredAction; @@ -41,7 +41,11 @@ fn declaring_artifacts() -> anyhow::Result<()> { "cell//pkg:foo", ConfigurationData::testing_new(), )); - let mut actions = ActionsRegistry::new(base.dupe(), ExecutionPlatformResolution::unspecified()); + let mut actions = ActionsRegistry::new( + DeferredHolderKey::Base(base.dupe()), + ExecutionPlatformResolution::unspecified(), + None, + ); let out1 = ForwardRelativePathBuf::unchecked_new("bar.out".into()); let buckout1 = BuckOutPath::new(base.dupe(), out1.clone()); let declared1 = actions.declare_artifact(None, out1.clone(), OutputType::File, None)?; @@ -71,13 +75,10 @@ fn declaring_artifacts() -> anyhow::Result<()> { #[test] fn claiming_conflicting_path() -> anyhow::Result<()> { - let target = ConfiguredTargetLabel::testing_parse( - "cell//pkg:my_target", - ConfigurationData::testing_new(), - ); let mut actions = ActionsRegistry::new( - BaseDeferredKey::TargetLabel(target.dupe()), + DeferredHolderKey::testing_new("cell//pkg:my_target"), ExecutionPlatformResolution::unspecified(), + None, ); let out1 = ForwardRelativePathBuf::unchecked_new("foo/a/1".into()); @@ -140,39 +141,39 @@ fn register_actions() -> anyhow::Result<()> { "cell//pkg:foo", ConfigurationData::testing_new(), )); - let mut deferreds = DeferredRegistry::new(BaseKey::Base(base.dupe())); - let mut actions = ActionsRegistry::new(base.dupe(), ExecutionPlatformResolution::unspecified()); + let mut actions = ActionsRegistry::new( + DeferredHolderKey::Base(base.dupe()), + ExecutionPlatformResolution::unspecified(), + None, + ); let out = ForwardRelativePathBuf::unchecked_new("bar.out".into()); let declared = actions.declare_artifact(None, out, OutputType::File, None)?; let inputs = indexset![ArtifactGroup::Artifact( BuildArtifact::testing_new( base.unpack_target_label().unwrap().dupe(), - ForwardRelativePathBuf::unchecked_new("input".into()), - DeferredId::testing_new(1), + "input", + ActionIndex::new(1), ) .into() )]; let outputs = indexset![declared.as_output()]; - let unregistered_action = - SimpleUnregisteredAction::new(vec![], Category::try_from("fake_action").unwrap(), None); - assert_eq!( - actions - .register(&mut deferreds, inputs, outputs, unregistered_action) - .is_ok(), - true + let unregistered_action = SimpleUnregisteredAction::new( + vec![], + CategoryRef::new("fake_action").unwrap().to_owned(), + None, ); - assert_eq!(actions.testing_pending().count(), 1); + let key = actions.register( + &DeferredHolderKey::Base(base.dupe()), + inputs, + outputs, + unregistered_action.clone(), + )?; + + assert_eq!(actions.testing_pending_action_keys(), vec![key]); assert_eq!(declared.testing_is_bound(), true); - assert_eq!( - actions - .testing_pending() - .any(|reserved| reserved.data() - == declared.testing_action_key().unwrap().deferred_data()), - true - ); Ok(()) } @@ -183,9 +184,8 @@ fn finalizing_actions() -> anyhow::Result<()> { "cell//pkg:foo", ConfigurationData::testing_new(), )); - let mut deferreds = DeferredRegistry::new(BaseKey::Base(base.dupe())); let mut actions = ActionsRegistry::new( - base.dupe(), + DeferredHolderKey::Base(base.dupe()), ExecutionPlatformResolution::new( Some(ExecutionPlatform::legacy_execution_platform( CommandExecutorConfig::testing_local(), @@ -193,6 +193,7 @@ fn finalizing_actions() -> anyhow::Result<()> { )), Vec::new(), ), + None, ); let out = ForwardRelativePathBuf::unchecked_new("bar.out".into()); let declared = actions.declare_artifact(None, out, OutputType::File, None)?; @@ -200,36 +201,31 @@ fn finalizing_actions() -> anyhow::Result<()> { let inputs = indexset![ArtifactGroup::Artifact( BuildArtifact::testing_new( base.unpack_target_label().unwrap().dupe(), - ForwardRelativePathBuf::unchecked_new("input".into()), - DeferredId::testing_new(1), + "input", + ActionIndex::new(1), ) .into() )]; let outputs = indexset![declared.as_output()]; - let unregistered_action = - SimpleUnregisteredAction::new(vec![], Category::try_from("fake_action").unwrap(), None); - actions.register(&mut deferreds, inputs, outputs, unregistered_action)?; - - let result = actions.ensure_bound(&mut deferreds, &AnalysisValueFetcher::default()); - assert_eq!(result.is_ok(), true, "Expected Ok(_), got `{:?}`", result); - - let registered_deferreds = deferreds.take_result()?; + let unregistered_action = SimpleUnregisteredAction::new( + vec![], + CategoryRef::new("fake_action").unwrap().to_owned(), + None, + ); + let holder_key = DeferredHolderKey::Base(base.dupe()); + actions.register(&holder_key, inputs, outputs, unregistered_action)?; - assert_eq!(registered_deferreds.len(), 1); + let result = actions.ensure_bound(&AnalysisValueFetcher::testing_new(holder_key))?; assert_eq!( - registered_deferreds - .get( - declared - .testing_action_key() - .unwrap() - .deferred_key() - .id() - .as_usize() - ) - .is_some(), - true + result + .lookup(&declared.testing_action_key().unwrap()) + .is_ok(), + true, + "Expected results to contain `{}`, had `[{}]`", + declared.testing_action_key().unwrap(), + result.iter_actions().map(|v| v.key()).join(", ") ); Ok(()) @@ -268,11 +264,7 @@ fn duplicate_category_identifier() { fn category_identifier_test( action_names: &[(&'static str, Option<&'static str>)], ) -> anyhow::Result<()> { - let base = BaseDeferredKey::TargetLabel(ConfiguredTargetLabel::testing_parse( - "cell//pkg:foo", - ConfigurationData::testing_new(), - )); - let mut deferreds = DeferredRegistry::new(BaseKey::Base(base.dupe())); + let base = DeferredHolderKey::testing_new("cell//pkg:foo"); let mut actions = ActionsRegistry::new( base.dupe(), ExecutionPlatformResolution::new( @@ -282,21 +274,18 @@ fn category_identifier_test( )), Vec::new(), ), + None, ); for (category, identifier) in action_names { let unregistered_action = SimpleUnregisteredAction::new( vec![], - Category::try_from(category.to_owned()).unwrap(), + Category::new((*category).to_owned()).unwrap(), identifier.map(|i| i.to_owned()), ); - actions.register( - &mut deferreds, - indexset![], - indexset![], - unregistered_action, - )?; + actions.register(&base, indexset![], indexset![], unregistered_action)?; } - actions.ensure_bound(&mut deferreds, &AnalysisValueFetcher::default()) + actions.ensure_bound(&AnalysisValueFetcher::testing_new(base))?; + Ok(()) } diff --git a/app/buck2_build_api_tests/src/actions/testings.rs b/app/buck2_build_api_tests/src/actions/testings.rs index 923507a76aabf..82a9d13e467c0 100644 --- a/app/buck2_build_api_tests/src/actions/testings.rs +++ b/app/buck2_build_api_tests/src/actions/testings.rs @@ -15,6 +15,7 @@ use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::actions::box_slice_set::BoxSliceSet; use buck2_build_api::actions::execute::action_executor::ActionExecutionMetadata; use buck2_build_api::actions::execute::action_executor::ActionOutputs; +use buck2_build_api::actions::execute::error::ExecuteError; use buck2_build_api::actions::Action; use buck2_build_api::actions::ActionExecutable; use buck2_build_api::actions::ActionExecutionCtx; @@ -22,6 +23,7 @@ use buck2_build_api::actions::PristineActionExecutable; use buck2_build_api::actions::UnregisteredAction; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_execute::execute::request::CommandExecutionOutput; use buck2_execute::execute::request::CommandExecutionPaths; use buck2_execute::execute::request::CommandExecutionRequest; @@ -37,7 +39,7 @@ use starlark::values::OwnedFrozenValue; /// /// This action is for testing, and bypasses the need to create starlark values and frozen /// modules -#[derive(Allocative)] +#[derive(Allocative, Clone, PartialEq)] pub(crate) struct SimpleUnregisteredAction { cmd: Vec, category: Category, @@ -89,6 +91,7 @@ impl UnregisteredAction for SimpleUnregisteredAction { inputs: IndexSet, outputs: IndexSet, _starlark_data: Option, + _error_handler: Option, ) -> anyhow::Result> { Ok(Box::new(SimpleAction { inputs: BoxSliceSet::from(inputs), @@ -110,16 +113,20 @@ impl Action for SimpleAction { Ok(Cow::Borrowed(self.inputs.as_slice())) } - fn outputs(&self) -> anyhow::Result> { - Ok(Cow::Borrowed(self.outputs.as_slice())) + fn outputs(&self) -> Cow<'_, [BuildArtifact]> { + Cow::Borrowed(self.outputs.as_slice()) + } + + fn first_output(&self) -> &BuildArtifact { + &self.outputs.as_slice()[0] } fn as_executable(&self) -> ActionExecutable<'_> { ActionExecutable::Pristine(self) } - fn category(&self) -> &Category { - &self.category + fn category(&self) -> CategoryRef { + self.category.as_ref() } fn identifier(&self) -> Option<&str> { @@ -132,7 +139,7 @@ impl PristineActionExecutable for SimpleAction { async fn execute( &self, ctx: &mut dyn ActionExecutionCtx, - ) -> anyhow::Result<(ActionOutputs, ActionExecutionMetadata)> { + ) -> Result<(ActionOutputs, ActionExecutionMetadata), ExecuteError> { let req = CommandExecutionRequest::new( vec![], self.cmd.clone(), @@ -154,7 +161,13 @@ impl PristineActionExecutable for SimpleAction { let prepared_action = ctx.prepare_action(&req)?; let manager = ctx.command_execution_manager(); let result = ctx.exec_cmd(manager, &req, &prepared_action).await; - let (outputs, meta) = ctx.unpack_command_execution_result(&req, result, false, false)?; + let (outputs, meta) = ctx.unpack_command_execution_result( + req.executor_preference, + result, + false, + false, + None, + )?; Ok((outputs, meta)) } diff --git a/app/buck2_build_api_tests/src/analysis/mod.rs b/app/buck2_build_api_tests/src/analysis.rs similarity index 100% rename from app/buck2_build_api_tests/src/analysis/mod.rs rename to app/buck2_build_api_tests/src/analysis.rs diff --git a/app/buck2_build_api_tests/src/analysis/calculation.rs b/app/buck2_build_api_tests/src/analysis/calculation.rs index 0e93126a2bc29..39b22c7de62e7 100644 --- a/app/buck2_build_api_tests/src/analysis/calculation.rs +++ b/app/buck2_build_api_tests/src/analysis/calculation.rs @@ -10,44 +10,40 @@ use std::collections::HashMap; use std::sync::Arc; +use buck2_build_api::actions::execute::dice_data::set_fallback_executor_config; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; -use buck2_build_api::deferred::types::testing::DeferredAnalysisResultExt; use buck2_build_api::interpreter::rule_defs::provider::builtin::default_info::DefaultInfoCallable; use buck2_build_api::interpreter::rule_defs::provider::callable::register_provider; use buck2_build_api::interpreter::rule_defs::provider::registration::register_builtin_providers; use buck2_build_api::keep_going::HasKeepGoing; use buck2_build_api::spawner::BuckSpawner; use buck2_common::dice::data::testing::SetTestingIoProvider; -use buck2_common::legacy_configs::LegacyBuckConfig; -use buck2_common::legacy_configs::LegacyBuckConfigs; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_common::package_listing::listing::testing::PackageListingExt; use buck2_common::package_listing::listing::PackageListing; use buck2_configured::configuration::calculation::ExecutionPlatformsKey; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; -use buck2_core::cells::alias::NonEmptyCellAlias; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; use buck2_core::cells::CellAliasResolver; -use buck2_core::cells::CellsAggregator; +use buck2_core::cells::CellResolver; use buck2_core::configuration::data::ConfigurationData; use buck2_core::execution_types::executor_config::CommandExecutorConfig; use buck2_core::fs::project::ProjectRootTemp; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::package::PackageLabel; use buck2_core::provider::id::testing::ProviderIdExt; use buck2_core::provider::id::ProviderId; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; +use buck2_core::target::label::label::TargetLabel; use buck2_events::dispatch::EventDispatcher; use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::SetDigestConfig; -use buck2_execute::execute::dice_data::set_fallback_executor_config; use buck2_interpreter::dice::starlark_debug::SetStarlarkDebugger; use buck2_interpreter::extra::InterpreterHostArchitecture; use buck2_interpreter::extra::InterpreterHostPlatform; use buck2_interpreter::file_loader::LoadedModules; use buck2_interpreter::paths::module::OwnedStarlarkModulePath; -use buck2_interpreter_for_build::attrs::attrs_global::register_attrs; use buck2_interpreter_for_build::interpreter::calculation::InterpreterResultsKey; use buck2_interpreter_for_build::interpreter::configuror::BuildInterpreterConfiguror; use buck2_interpreter_for_build::interpreter::dice_calculation_delegate::testing::EvalImportKey; @@ -59,41 +55,29 @@ use dice::UserComputationData; use dupe::Dupe; use indoc::indoc; use itertools::Itertools; -use maplit::hashmap; use starlark_map::ordered_map::OrderedMap; #[tokio::test] async fn test_analysis_calculation() -> anyhow::Result<()> { let bzlfile = ImportPath::testing_new("cell//pkg:foo.bzl"); - let resolver = { - let mut cells = CellsAggregator::new(); - cells.add_cell_entry( - CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("cell".to_owned())), - NonEmptyCellAlias::new("root".to_owned()).unwrap(), - CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("".to_owned())), - )?; - cells.add_cell_entry( - CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("cell".to_owned())), - NonEmptyCellAlias::new("cell".to_owned()).unwrap(), - CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("cell".to_owned())), - )?; - cells.make_cell_resolver()? - }; - let configs = LegacyBuckConfigs::new(hashmap![ - CellName::testing_new("root") => - LegacyBuckConfig::empty(), - CellName::testing_new("cell") => - LegacyBuckConfig::empty(), + let resolver = CellResolver::testing_with_names_and_paths(&[ + ( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ), + ( + CellName::testing_new("cell"), + CellRootPathBuf::testing_new("cell"), + ), ]); let mut interpreter = Tester::with_cells(( CellAliasResolver::new(CellName::testing_new("cell"), HashMap::new())?, resolver.dupe(), - configs.dupe(), + LegacyBuckConfig::empty(), ))?; interpreter.additional_globals(register_rule_function); interpreter.additional_globals(register_provider); interpreter.additional_globals(register_builtin_providers); - interpreter.additional_globals(register_attrs); let module = interpreter .eval_import( &bzlfile, @@ -177,15 +161,11 @@ async fn test_analysis_calculation() -> anyhow::Result<()> { None, false, false, - |_| {}, - |_| {}, - |_| {}, - |_| {}, None, + Arc::new(ConcurrentTargetLabelInterner::default()), )?, - configs, )?; - let dice = dice.commit().await; + let mut dice = dice.commit().await; let analysis = dice .get_analysis_result( @@ -195,12 +175,13 @@ async fn test_analysis_calculation() -> anyhow::Result<()> { .await? .require_compatible()?; - assert_eq!(analysis.testing_deferred().get_registered().len(), 0); + assert_eq!(analysis.analysis_values().iter_actions().count(), 0); assert_eq!( analysis .providers() - .provider_collection() + .unwrap() + .value() .provider_names() .iter() .sorted() @@ -211,7 +192,8 @@ async fn test_analysis_calculation() -> anyhow::Result<()> { assert_eq!( analysis .providers() - .provider_collection() + .unwrap() + .value() .get_provider_raw(&ProviderId::testing_new(bzlfile.path().clone(), "FooInfo")) .is_some(), true @@ -219,7 +201,8 @@ async fn test_analysis_calculation() -> anyhow::Result<()> { assert_eq!( analysis .providers() - .provider_collection() + .unwrap() + .value() .get_provider_raw(DefaultInfoCallable::provider_id()) .is_some(), true diff --git a/app/buck2_build_api_tests/src/artifact_groups/mod.rs b/app/buck2_build_api_tests/src/artifact_groups.rs similarity index 100% rename from app/buck2_build_api_tests/src/artifact_groups/mod.rs rename to app/buck2_build_api_tests/src/artifact_groups.rs diff --git a/app/buck2_build_api_tests/src/artifact_groups/calculation.rs b/app/buck2_build_api_tests/src/artifact_groups/calculation.rs index 155a53bc1fb64..2430f4f7e0a19 100644 --- a/app/buck2_build_api_tests/src/artifact_groups/calculation.rs +++ b/app/buck2_build_api_tests/src/artifact_groups/calculation.rs @@ -10,36 +10,36 @@ use std::collections::HashMap; use std::sync::Arc; +use buck2_analysis::analysis::calculation::AnalysisKey; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; +use buck2_artifact::deferred::key::DeferredHolderKey; +use buck2_build_api::actions::registry::RecordedActions; +use buck2_build_api::analysis::registry::RecordedAnalysisValues; +use buck2_build_api::analysis::AnalysisResult; use buck2_build_api::artifact_groups::calculation::ArtifactGroupCalculation; -use buck2_build_api::artifact_groups::deferred::DeferredTransitiveSetData; +use buck2_build_api::artifact_groups::deferred::TransitiveSetKey; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::artifact_groups::TransitiveSetProjectionKey; use buck2_build_api::context::SetBuildContextData; -use buck2_build_api::deferred::calculation::DeferredResolve; -use buck2_build_api::deferred::types::AnyValue; -use buck2_build_api::deferred::types::DeferredValueAnyReady; -use buck2_build_api::interpreter::rule_defs::transitive_set::TransitiveSet; +use buck2_build_api::interpreter::rule_defs::transitive_set::FrozenTransitiveSet; use buck2_build_api::interpreter::rule_defs::transitive_set::TransitiveSetOrdering; use buck2_build_api::keep_going::HasKeepGoing; use buck2_common::dice::cells::SetCellResolver; use buck2_common::dice::data::testing::SetTestingIoProvider; -use buck2_common::dice::file_ops::keys::FileOpsKey; -use buck2_common::dice::file_ops::keys::FileOpsValue; use buck2_common::file_ops::testing::TestFileOps; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; -use buck2_common::result::ToSharedResultExt; -use buck2_core::buck_path::path::BuckPath; +use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; use buck2_core::cells::paths::CellRelativePathBuf; use buck2_core::cells::CellResolver; +use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::fs::project::ProjectRootTemp; -use buck2_core::package::package_relative_path::PackageRelativePathBuf; -use buck2_core::package::PackageLabel; +use buck2_core::package::source_path::SourcePath; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::SetDigestConfig; @@ -49,16 +49,51 @@ use dupe::Dupe; use indoc::indoc; use maplit::btreemap; use starlark::values::OwnedFrozenValue; +use starlark::values::OwnedFrozenValueTyped; use crate::interpreter::transitive_set::testing::new_transitive_set; -fn mock_deferred_tset(dice_builder: DiceBuilder, value: OwnedFrozenValue) -> DiceBuilder { - let tset = TransitiveSet::from_value(value.value()).unwrap(); - let resolve = DeferredResolve(tset.key().deferred_key().dupe()); +fn mock_analysis_for_tsets( + mut dice_builder: DiceBuilder, + tsets: Vec>, +) -> DiceBuilder { + let mut by_target: HashMap< + ConfiguredTargetLabel, + Vec<(TransitiveSetKey, OwnedFrozenValueTyped)>, + > = HashMap::new(); + + for value in tsets { + let key = value.key().dupe(); + match key.holder_key() { + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target)) => { + by_target + .entry(target.dupe()) + .or_insert(Vec::new()) + .push((key, value.dupe())); + } + _ => unreachable!("we only make fake tsets with configured targets `{}`", key), + } + } + + for (target, tsets) in by_target.into_iter() { + dice_builder = dice_builder.mock_and_return( + AnalysisKey(target.dupe()), + buck2_error::Ok(MaybeCompatible::Compatible(AnalysisResult::new( + RecordedAnalysisValues::testing_new( + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target)), + tsets, + RecordedActions::new(), + ), + None, + HashMap::new(), + 0, + 0, + None, + ))), + ); + } - let data: Arc = Arc::new(DeferredTransitiveSetData::testing_new(value)); - let any = DeferredValueAnyReady::AnyValue(data); - dice_builder.mock_and_return(resolve, anyhow::Ok(any).shared_error()) + dice_builder } #[tokio::test] @@ -77,23 +112,22 @@ async fn test_ensure_artifact_group() -> anyhow::Result<()> { foo = source_artifact("foo", "foo") bar = source_artifact("bar", "bar") - s1 = make_tset(TestSet, value = foo) - make_tset(TestSet, value = bar, children = [s1]) + def make(): + s1 = make_tset(TestSet, value = foo) + return make_tset(TestSet, value = bar, children = [s1]) "# ))?; let heap = set.owner(); - let cell_resolver = CellResolver::testing_with_names_and_paths_with_alias(&[ + let cell_resolver = CellResolver::testing_with_names_and_paths(&[ ( CellName::testing_new("root"), CellRootPathBuf::testing_new("cell-path"), - HashMap::new(), ), ( CellName::testing_new("parent"), CellRootPathBuf::testing_new(""), - HashMap::new(), ), ]); @@ -102,9 +136,9 @@ async fn test_ensure_artifact_group() -> anyhow::Result<()> { CellRelativePathBuf::unchecked_new("foo/foo".to_owned()), ); - let foo_artifact = Artifact::from(SourceArtifact::new(BuckPath::testing_new( - PackageLabel::testing_parse("root//foo"), - PackageRelativePathBuf::unchecked_new("foo".to_owned()), + let foo_artifact = Artifact::from(SourceArtifact::new(SourcePath::testing_new( + "root//foo", + "foo", ))); let foo_meta = FileMetadata { @@ -112,9 +146,9 @@ async fn test_ensure_artifact_group() -> anyhow::Result<()> { is_executable: true, }; - let bar_artifact = Artifact::from(SourceArtifact::new(BuckPath::testing_new( - PackageLabel::testing_parse("root//bar"), - PackageRelativePathBuf::unchecked_new("bar".to_owned()), + let bar_artifact = Artifact::from(SourceArtifact::new(SourcePath::testing_new( + "root//bar", + "bar", ))); let bar = CellPath::new( @@ -134,41 +168,44 @@ async fn test_ensure_artifact_group() -> anyhow::Result<()> { let fs = ProjectRootTemp::new()?; - let mut dice_builder = DiceBuilder::new() - .mock_and_return(FileOpsKey(), Ok(FileOpsValue(Arc::new(files)))) - .set_data(|data| { - data.set_testing_io_provider(&fs); - data.set_digest_config(DigestConfig::testing_default()); - }); - - // Register all the sets as deferreds. - dice_builder = mock_deferred_tset(dice_builder, set.to_owned_frozen_value()); + let cell_root = CellName::testing_new("root"); + let cell_parent = CellName::testing_new("parent"); + let dice_builder = files.mock_in_cell(cell_root, DiceBuilder::new()); + let dice_builder = files.mock_in_cell(cell_parent, dice_builder); + let mut dice_builder = dice_builder.set_data(|data| { + data.set_testing_io_provider(&fs); + data.set_digest_config(DigestConfig::testing_default()); + }); + let mut all_tsets = vec![set.dupe()]; // This is kinda clowny, but we can't upcast the TransitiveSetGen back to a Value so we // have to access Values from their parents. for set in set.as_ref().iter(TransitiveSetOrdering::Preorder) { for child in set.children.iter() { // Safety: We know the entire set came from the same heap. let child = unsafe { OwnedFrozenValue::new(heap.dupe(), *child) }; - dice_builder = mock_deferred_tset(dice_builder, child); + all_tsets.push(child.downcast().unwrap()); } } + // Register all the sets as deferreds. + dice_builder = mock_analysis_for_tsets(dice_builder, all_tsets); + let mut extra = UserComputationData::new(); extra.set_keep_going(true); let mut dice = dice_builder.build(extra)?; dice.set_cell_resolver(cell_resolver)?; dice.set_buck_out_path(None)?; - let dice = dice.commit().await; + let mut dice = dice.commit().await; let result = dice - .ensure_artifact_group(&ArtifactGroup::TransitiveSetProjection( + .ensure_artifact_group(&ArtifactGroup::TransitiveSetProjection(Arc::new( TransitiveSetProjectionKey { key: set.key.dupe(), projection: 0, }, - )) + ))) .await? .iter() .cloned() diff --git a/app/buck2_build_api_tests/src/attrs/mod.rs b/app/buck2_build_api_tests/src/attrs.rs similarity index 100% rename from app/buck2_build_api_tests/src/attrs/mod.rs rename to app/buck2_build_api_tests/src/attrs.rs diff --git a/app/buck2_build_api_tests/src/attrs/resolve/mod.rs b/app/buck2_build_api_tests/src/attrs/resolve.rs similarity index 100% rename from app/buck2_build_api_tests/src/attrs/resolve/mod.rs rename to app/buck2_build_api_tests/src/attrs/resolve.rs diff --git a/app/buck2_build_api_tests/src/attrs/resolve/testing.rs b/app/buck2_build_api_tests/src/attrs/resolve/testing.rs index 185ee062cbde8..25aed6f369291 100644 --- a/app/buck2_build_api_tests/src/attrs/resolve/testing.rs +++ b/app/buck2_build_api_tests/src/attrs/resolve/testing.rs @@ -9,14 +9,15 @@ use std::sync::Arc; +use anyhow::Context; use buck2_analysis::attrs::resolve::ctx::AnalysisQueryResult; use buck2_analysis::attrs::resolve::ctx::AttrResolutionContext; use buck2_build_api::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; use buck2_build_api::interpreter::rule_defs::provider::builtin::template_placeholder_info::FrozenTemplatePlaceholderInfo; use buck2_build_api::interpreter::rule_defs::provider::callable::register_provider; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; use buck2_build_api::interpreter::rule_defs::provider::registration::register_builtin_providers; -use buck2_common::result::SharedResult; use buck2_core::configuration::data::ConfigurationData; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::provider::label::ConfiguredProvidersLabel; @@ -33,6 +34,7 @@ use starlark::environment::Globals; use starlark::environment::GlobalsBuilder; use starlark::environment::Module; use starlark::values::dict::FrozenDictRef; +use starlark::values::FrozenValueTyped; use starlark_map::small_map::SmallMap; use starlark_map::smallmap; @@ -215,11 +217,13 @@ pub(crate) fn resolution_ctx_with_providers<'v>( fn get_dep( &self, target: &ConfiguredProvidersLabel, - ) -> anyhow::Result { - self.deps + ) -> anyhow::Result> { + Ok(self + .deps .get(target) .duped() - .ok_or_else(|| anyhow::anyhow!("missing dep")) + .context("missing dep")? + .add_heap_ref(self.module.frozen_heap())) } fn resolve_unkeyed_placeholder( @@ -239,7 +243,7 @@ pub(crate) fn resolution_ctx_with_providers<'v>( Ok(None) } - fn resolve_query(&self, _query: &str) -> SharedResult> { + fn resolve_query(&self, _query: &str) -> buck2_error::Result> { unimplemented!("This test resolution context doesn't handle queries") } diff --git a/app/buck2_build_api_tests/src/attrs/tests.rs b/app/buck2_build_api_tests/src/attrs/tests.rs index bfb14a90eaa71..701464d30f353 100644 --- a/app/buck2_build_api_tests/src/attrs/tests.rs +++ b/app/buck2_build_api_tests/src/attrs/tests.rs @@ -35,7 +35,7 @@ use buck2_node::attrs::attr_type::AttrType; use buck2_node::attrs::coerced_deps_collector::CoercedDepsCollector; use buck2_node::attrs::configurable::AttrIsConfigurable; use buck2_node::attrs::configuration_context::AttrConfigurationContext; -use buck2_node::attrs::configured_info::ConfiguredAttrInfo; +use buck2_node::attrs::configured_attr_info_for_tests::ConfiguredAttrInfoForTests; use buck2_node::attrs::display::AttrDisplayWithContextExt; use buck2_node::attrs::fmt_context::AttrFmtContext; use buck2_node::attrs::testing::configuration_ctx; @@ -46,6 +46,7 @@ use indoc::indoc; use starlark::environment::GlobalsBuilder; use starlark::environment::Module; use starlark::values::Heap; +use starlark::values::UnpackValue; use starlark::values::Value; use crate::attrs::resolve::testing::resolution_ctx; @@ -82,13 +83,13 @@ fn test() -> anyhow::Result<()> { let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; assert_eq!( - "[[[\"hello\",\"world!\"]+select(\"root//some:config\"=[\"some\"],\"DEFAULT\"=[\"okay\"]+select(\"root//other:config\"=[\"other\"],\"DEFAULT\"=[\"default\",\"for\",\"realz\"]))+[\"...\"]+[\"...\"]]]", + "[[[\"hello\", \"world!\"]+select({\"root//some:config\": [\"some\"], \"DEFAULT\": [\"okay\"]+select({\"root//other:config\": [\"other\"], \"DEFAULT\": [\"default\", \"for\", \"realz\"]})})+[\"...\"]+[\"...\"]]]", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - "[[[\"hello\",\"world!\",\"okay\",\"other\",\"...\",\"...\"]]]", + "[[[\"hello\", \"world!\", \"okay\", \"other\", \"...\", \"...\"]]]", configured.as_display_no_ctx().to_string() ); @@ -173,7 +174,7 @@ fn test_concat_option_one_of() { .unwrap(); let configured = coerced.configure(&attr, &configuration_ctx()).unwrap(); assert_eq!( - r#"["foo","bar"]"#, + r#"["foo", "bar"]"#, configured.as_display_no_ctx().to_string() ); } @@ -186,12 +187,12 @@ fn test_any() -> anyhow::Result<()> { let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; assert_eq!( - "[\"//some:target\",\"cell1//named:target[foo]\"]", + "[\"//some:target\", \"cell1//named:target[foo]\"]", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - "[\"//some:target\",\"cell1//named:target[foo]\"]", + "[\"//some:target\", \"cell1//named:target[foo]\"]", configured.as_display_no_ctx().to_string() ); @@ -224,12 +225,12 @@ fn test_option() -> anyhow::Result<()> { let value = heap.alloc(vec!["string1", "string2"]); let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; assert_eq!( - "[\"string1\",\"string2\"]", + "[\"string1\", \"string2\"]", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - "[\"string1\",\"string2\"]", + "[\"string1\", \"string2\"]", configured.as_display_no_ctx().to_string() ); @@ -251,12 +252,12 @@ fn test_dict() -> anyhow::Result<()> { let attr = AttrType::dict(AttrType::string(), AttrType::list(AttrType::string()), true); let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; assert_eq!( - "{\"a\": [],\"b\": [\"1\"]}", + "{\"a\": [], \"b\": [\"1\"]}", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - "{\"a\": [],\"b\": [\"1\"]}", + "{\"a\": [], \"b\": [\"1\"]}", configured.as_display_no_ctx().to_string() ); @@ -267,24 +268,24 @@ fn test_dict() -> anyhow::Result<()> { ); let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; assert_eq!( - "{\"b\": [\"1\"],\"a\": []}", + "{\"b\": [\"1\"], \"a\": []}", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - "{\"b\": [\"1\"],\"a\": []}", + "{\"b\": [\"1\"], \"a\": []}", configured.as_display_no_ctx().to_string() ); let value = to_value( &env, &globals, - r#"{"b":["1"],"a":[]} + select({"DEFAULT": { "c": []}})"#, + r#"{"b":["1"], "a":[]} + select({"DEFAULT": { "c": []}})"#, ); let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - r#"{"b": ["1"],"a": [],"c": []}"#, + r#"{"b": ["1"], "a": [], "c": []}"#, configured.as_display_no_ctx().to_string() ); @@ -305,12 +306,12 @@ fn test_one_of() -> anyhow::Result<()> { let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), values)?; assert_eq!( - "[\"test\",\"extra\"]", + "[\"test\", \"extra\"]", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - "[\"test\",\"extra\"]", + "[\"test\", \"extra\"]", configured.as_display_no_ctx().to_string() ); @@ -330,14 +331,14 @@ fn test_label() -> anyhow::Result<()> { let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; assert_eq!( - "[\"root//some:target\",\"cell1//named:target[foo]\"]", + "[\"root//some:target\", \"cell1//named:target[foo]\"]", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( format!( - "[\"root//some:target ({})\",\"cell1//named:target[foo] ({})\"]", + "[\"root//some:target ({})\", \"cell1//named:target[foo] ({})\"]", ConfigurationData::testing_new(), ConfigurationData::testing_new() ), @@ -420,7 +421,7 @@ fn test_configured_deps() -> anyhow::Result<()> { let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; let configured = coerced.configure(&attr, &configuration_ctx())?; - let mut info = ConfiguredAttrInfo::new(); + let mut info = ConfiguredAttrInfoForTests::new(); configured.traverse(PackageLabel::testing(), &mut info)?; let expected_deps = [ @@ -446,13 +447,14 @@ fn test_configured_deps() -> anyhow::Result<()> { let attr_exec = AttrType::list(AttrType::exec_dep(ProviderIdSet::EMPTY)); let coerced_exec = attr_exec.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; let configured_exec = coerced_exec.configure(&attr_exec, &configuration_ctx())?; - let mut info = ConfiguredAttrInfo::new(); + let mut info = ConfiguredAttrInfoForTests::new(); configured_exec.traverse(PackageLabel::testing(), &mut info)?; eprintln!("{:?}", info); + let exec_cfg = configuration_ctx().exec_cfg()?; assert_eq!( expected_deps .to_vec() - .map(|s| format!("{} ({})", s, configuration_ctx().exec_cfg())), + .map(|s| format!("{} ({})", s, exec_cfg)), info.execution_deps .iter() .map(ToString::to_string) @@ -581,10 +583,11 @@ fn test_source_label() -> anyhow::Result<()> { value, )?; assert_eq!( - "[\"root//some:target\",\"cell1//named:target[foo]\",\"root//package/subdir/foo/bar.cpp\"]", + "[\"root//some:target\", \"cell1//named:target[foo]\", \"root//package/subdir/foo/bar.cpp\"]", coerced .as_display(&AttrFmtContext { - package: Some(PackageLabel::testing()) + package: Some(PackageLabel::testing()), + options: Default::default(), }) .to_string(), ); @@ -592,7 +595,7 @@ fn test_source_label() -> anyhow::Result<()> { let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( format!( - "[{},{},{}]", + "[{}, {}, {}]", format_args!( "\"root//some:target ({})\"", ConfigurationData::testing_new(), @@ -605,7 +608,8 @@ fn test_source_label() -> anyhow::Result<()> { ), configured .as_display(&AttrFmtContext { - package: Some(PackageLabel::testing()) + package: Some(PackageLabel::testing()), + options: Default::default(), }) .to_string(), ); @@ -784,7 +788,7 @@ fn test_arg() -> anyhow::Result<()> { assert_eq!( format!( "\"$(exe root//some:exe ({})) --file=$(location root//some:location ({}))\"", - configuration_ctx().exec_cfg(), + configuration_ctx().exec_cfg()?, ConfigurationData::testing_new(), ), configured.as_display_no_ctx().to_string() @@ -798,7 +802,7 @@ fn test_arg() -> anyhow::Result<()> { let deps: Vec<_> = deps.iter().map(|t| t.to_string()).collect(); let exec_deps: Vec<_> = exec_deps.iter().map(|t| t.to_string()).collect(); - let mut info = ConfiguredAttrInfo::new(); + let mut info = ConfiguredAttrInfoForTests::new(); configured.traverse(PackageLabel::testing(), &mut info)?; let expected_deps = vec!["root//some:location"]; @@ -809,7 +813,7 @@ fn test_arg() -> anyhow::Result<()> { )]; let expected_configured_exec_deps = vec![format!( "root//some:exe ({})", - configuration_ctx().exec_cfg() + configuration_ctx().exec_cfg()? )]; assert_eq!(expected_deps, deps); @@ -859,13 +863,13 @@ fn test_bool() -> anyhow::Result<()> { let coerced = attr.coerce(AttrIsConfigurable::Yes, &coercion_ctx(), value)?; assert_eq!( - "[True,False]+select(\"root//some:config\"=[True],\"DEFAULT\"=[False])+[True]", + "[True, False]+select({\"root//some:config\": [True], \"DEFAULT\": [False]})+[True]", coerced.as_display_no_ctx().to_string() ); let configured = coerced.configure(&attr, &configuration_ctx())?; assert_eq!( - "[True,False,False,True]", + "[True, False, False, True]", configured.as_display_no_ctx().to_string() ); @@ -918,8 +922,9 @@ fn test_user_placeholders() -> anyhow::Result<()> { let mut cli = Vec::::new(); let mut ctx = DefaultCommandLineContext::new(&executor_fs); - v.as_command_line() + ValueAsCommandLineLike::unpack_value_err(v) .unwrap() + .0 .add_to_command_line(&mut cli, &mut ctx) .unwrap(); cli.join(" ") diff --git a/app/buck2_build_api_tests/src/deferred/mod.rs b/app/buck2_build_api_tests/src/deferred.rs similarity index 100% rename from app/buck2_build_api_tests/src/deferred/mod.rs rename to app/buck2_build_api_tests/src/deferred.rs diff --git a/app/buck2_build_api_tests/src/deferred/calculation.rs b/app/buck2_build_api_tests/src/deferred/calculation.rs index 19648f6b88225..2a7f4754534b6 100644 --- a/app/buck2_build_api_tests/src/deferred/calculation.rs +++ b/app/buck2_build_api_tests/src/deferred/calculation.rs @@ -7,34 +7,35 @@ * of this source tree. */ +use std::collections::HashMap; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::sync::Arc; use allocative::Allocative; -use async_trait::async_trait; use buck2_analysis::analysis::calculation::AnalysisKey; +use buck2_artifact::deferred::key::DeferredHolderKey; +use buck2_build_api::actions::execute::dice_data::set_fallback_executor_config; use buck2_build_api::analysis::AnalysisResult; use buck2_build_api::deferred::calculation::DeferredCalculation; -use buck2_build_api::deferred::types::BaseKey; use buck2_build_api::deferred::types::Deferred; use buck2_build_api::deferred::types::DeferredCtx; use buck2_build_api::deferred::types::DeferredInput; +use buck2_build_api::deferred::types::DeferredInputsRef; +use buck2_build_api::deferred::types::DeferredOutput; use buck2_build_api::deferred::types::DeferredRegistry; -use buck2_build_api::deferred::types::DeferredTable; use buck2_build_api::deferred::types::DeferredValue; use buck2_common::dice::data::testing::SetTestingIoProvider; -use buck2_common::result::ToSharedResultExt; use buck2_configured::nodes::calculation::ConfiguredTargetNodeKey; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::configuration::data::ConfigurationData; +use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::execution_types::executor_config::CommandExecutorConfig; use buck2_core::fs::project::ProjectRootTemp; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::SetDigestConfig; -use buck2_execute::execute::dice_data::set_fallback_executor_config; use buck2_node::nodes::configured::ConfiguredTargetNode; use dice::testing::DiceBuilder; use dice::DiceComputations; @@ -45,28 +46,32 @@ use indoc::indoc; use crate::interpreter::rule_defs::provider::testing::FrozenProviderCollectionValueExt; -#[derive(Allocative)] +#[derive(Debug, Allocative)] struct FakeDeferred(usize, IndexSet, Arc); impl provider::Provider for FakeDeferred { fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} } -#[async_trait] +#[derive(Allocative, Clone, Debug, Eq, PartialEq)] +struct UsizeOutput(usize); + +impl DeferredOutput for UsizeOutput {} + impl Deferred for FakeDeferred { - type Output = usize; + type Output = UsizeOutput; - fn inputs(&self) -> &IndexSet { - &self.1 + fn inputs(&self) -> DeferredInputsRef<'_> { + DeferredInputsRef::IndexSet(&self.1) } async fn execute( &self, _ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, + _dice: &mut DiceComputations<'_>, ) -> anyhow::Result> { self.2.store(true, Ordering::SeqCst); - Ok(DeferredValue::Ready(self.0)) + Ok(DeferredValue::Ready(UsizeOutput(self.0))) } } @@ -84,14 +89,15 @@ async fn lookup_deferred_from_analysis() -> anyhow::Result<()> { "# )); - let mut deferred = - DeferredRegistry::new(BaseKey::Base(BaseDeferredKey::TargetLabel(target.dupe()))); + let mut deferred = DeferredRegistry::new(DeferredHolderKey::Base( + BaseDeferredKey::TargetLabel(target.dupe()), + )); let executed0 = Arc::new(AtomicBool::new(false)); let executed1 = Arc::new(AtomicBool::new(false)); let data0 = deferred.defer(FakeDeferred(1, IndexSet::new(), executed0.dupe())); let data1 = deferred.defer(FakeDeferred(5, IndexSet::new(), executed1.dupe())); - let deferred_result = DeferredTable::new(deferred.take_result()?); + let (deferred_result, analysis_values) = deferred.take_result()?; let fs = ProjectRootTemp::new()?; let dice = DiceBuilder::new() @@ -101,39 +107,48 @@ async fn lookup_deferred_from_analysis() -> anyhow::Result<()> { }) .mock_and_return( analysis_key, - anyhow::Ok(MaybeCompatible::Compatible(AnalysisResult::new( + buck2_error::Ok(MaybeCompatible::Compatible(AnalysisResult::new( provider_collection, deferred_result, + analysis_values, None, - ))) - .shared_error(), + HashMap::new(), + 0, + 0, + ))), ) .mock_and_return( configured_node_key, Ok(MaybeCompatible::Compatible( - ConfiguredTargetNode::testing_new(target.dupe(), "foo_lib"), + ConfiguredTargetNode::testing_new( + target.dupe(), + "foo_lib", + ExecutionPlatformResolution::new(None, Vec::new()), + vec![], + vec![], + ), )), ); let mut dice_data = UserComputationData::new(); set_fallback_executor_config(&mut dice_data.data, CommandExecutorConfig::testing_local()); - let dice = dice.build(dice_data)?.commit().await; + let mut dice = dice.build(dice_data)?.commit().await; let deferred_result = dice.compute_deferred_data(&data0).await?; - assert_eq!(*deferred_result, 1); + assert_eq!(deferred_result.0, 1); assert_eq!(executed0.load(Ordering::SeqCst), true); // we should cache deferred execution executed0.store(false, Ordering::SeqCst); let deferred_result = dice.compute_deferred_data(&data0).await?; - assert_eq!(*deferred_result, 1); + assert_eq!(deferred_result.0, 1); assert_eq!(executed0.load(Ordering::SeqCst), false); let deferred_result = dice.compute_deferred_data(&data1).await?; - assert_eq!(*deferred_result, 5); + assert_eq!(deferred_result.0, 5); assert_eq!(executed1.load(Ordering::SeqCst), true); // we should cache deferred execution executed1.store(false, Ordering::SeqCst); - assert_eq!(*deferred_result, 5); + assert_eq!(deferred_result.0, 5); assert_eq!(executed1.load(Ordering::SeqCst), false); Ok(()) @@ -141,25 +156,24 @@ async fn lookup_deferred_from_analysis() -> anyhow::Result<()> { #[tokio::test] async fn lookup_deferred_that_has_deferreds() -> anyhow::Result<()> { - #[derive(Allocative)] - struct DeferringDeferred(usize, IndexSet, Arc); + #[derive(Debug, Allocative)] + struct TestDeferringDeferred(usize, IndexSet, Arc); - impl provider::Provider for DeferringDeferred { + impl provider::Provider for TestDeferringDeferred { fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} } - #[async_trait] - impl Deferred for DeferringDeferred { - type Output = usize; + impl Deferred for TestDeferringDeferred { + type Output = UsizeOutput; - fn inputs(&self) -> &IndexSet { - &self.1 + fn inputs(&self) -> DeferredInputsRef<'_> { + DeferredInputsRef::IndexSet(&self.1) } async fn execute( &self, ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, + _dice: &mut DiceComputations<'_>, ) -> anyhow::Result> { let data = ctx .registry() @@ -180,12 +194,13 @@ async fn lookup_deferred_that_has_deferreds() -> anyhow::Result<()> { "# )); - let mut deferred = - DeferredRegistry::new(BaseKey::Base(BaseDeferredKey::TargetLabel(target.dupe()))); + let mut deferred = DeferredRegistry::new(DeferredHolderKey::Base( + BaseDeferredKey::TargetLabel(target.dupe()), + )); let executed = Arc::new(AtomicBool::new(false)); - let data = deferred.defer(DeferringDeferred(8, IndexSet::new(), executed.dupe())); - let deferred_result = DeferredTable::new(deferred.take_result()?); + let data = deferred.defer(TestDeferringDeferred(8, IndexSet::new(), executed.dupe())); + let (deferred_result, analysis_values) = deferred.take_result()?; let fs = ProjectRootTemp::new()?; let dice = DiceBuilder::new() @@ -195,31 +210,40 @@ async fn lookup_deferred_that_has_deferreds() -> anyhow::Result<()> { }) .mock_and_return( analysis_key, - anyhow::Ok(MaybeCompatible::Compatible(AnalysisResult::new( + buck2_error::Ok(MaybeCompatible::Compatible(AnalysisResult::new( provider_collection, deferred_result, + analysis_values, None, - ))) - .shared_error(), + HashMap::new(), + 0, + 0, + ))), ) .mock_and_return( configured_node_key, Ok(MaybeCompatible::Compatible( - ConfiguredTargetNode::testing_new(target.dupe(), "foo_lib"), + ConfiguredTargetNode::testing_new( + target.dupe(), + "foo_lib", + ExecutionPlatformResolution::new(None, Vec::new()), + vec![], + vec![], + ), )), ); let mut dice_data = UserComputationData::new(); set_fallback_executor_config(&mut dice_data.data, CommandExecutorConfig::testing_local()); - let dice = dice.build(dice_data)?.commit().await; + let mut dice = dice.build(dice_data)?.commit().await; let deferred_result = dice.compute_deferred_data(&data).await?; - assert_eq!(*deferred_result, 8); + assert_eq!(deferred_result.0, 8); assert_eq!(executed.load(Ordering::SeqCst), true); // we should cache deferred execution executed.store(false, Ordering::SeqCst); let deferred_result = dice.compute_deferred_data(&data).await?; - assert_eq!(*deferred_result, 8); + assert_eq!(deferred_result.0, 8); assert_eq!(executed.load(Ordering::SeqCst), false); Ok(()) diff --git a/app/buck2_build_api_tests/src/interpreter/mod.rs b/app/buck2_build_api_tests/src/interpreter.rs similarity index 100% rename from app/buck2_build_api_tests/src/interpreter/mod.rs rename to app/buck2_build_api_tests/src/interpreter.rs diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/mod.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs.rs similarity index 100% rename from app/buck2_build_api_tests/src/interpreter/rule_defs/mod.rs rename to app/buck2_build_api_tests/src/interpreter/rule_defs.rs diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact.rs new file mode 100644 index 0000000000000..1cd2b4f5e7fd3 --- /dev/null +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact.rs @@ -0,0 +1,355 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_interpreter_for_build::interpreter::testing::expect_error; +use buck2_interpreter_for_build::interpreter::testing::Tester; +use indoc::indoc; + +use crate::interpreter::rule_defs::artifact::testing::artifactory; + +pub(crate) mod testing; + +#[test] +fn source_artifact() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + a1 = source_artifact("foo/bar", "baz/quz.h") + a2 = source_artifact("foo/bar", "baz/file1") + + def test(): + a3 = source_artifact("foo/bar", "baz/quz.cpp") + a4 = source_artifact("foo/bar", "baz/file2") + + assert_eq("", repr(a1)) + assert_eq("quz.h", a1.basename) + assert_eq("baz/quz.h", a1.short_path) + assert_eq(".h", a1.extension) + assert_eq(True, a1.is_source) + assert_eq(None, a1.owner) + + assert_eq("", repr(a2)) + assert_eq("file1", a2.basename) + assert_eq("baz/file1", a2.short_path) + assert_eq("", a2.extension) + assert_eq(True, a2.is_source) + assert_eq(None, a2.owner) + + assert_eq("", repr(a3)) + assert_eq("quz.cpp", a3.basename) + assert_eq("baz/quz.cpp", a3.short_path) + assert_eq(".cpp", a3.extension) + assert_eq(True, a3.is_source) + assert_eq(None, a3.owner) + + assert_eq("", repr(a4)) + assert_eq("file2", a4.basename) + assert_eq("baz/file2", a4.short_path) + assert_eq("", a4.extension) + assert_eq(True, a4.is_source) + assert_eq(None, a4.owner) + + # Validate that attrs are setup properly + for a in (a1, a2, a3, a4): + for prop in dir(a): + assert_eq(True, hasattr(a, prop)) + if prop != "as_output": + getattr(a, prop) + "# + ))?; + + let as_output = indoc!( + r#" + def test(): + source_artifact("foo/bar", "baz/quz.cpp").as_output() + "# + ); + expect_error( + tester.run_starlark_bzl_test(as_output), + as_output, + "Source artifacts may not be outputs", + ); + Ok(()) +} + +#[test] +fn bound_artifact() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(buck2_build_api::interpreter::rule_defs::register_rule_defs); + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + a1 = bound_artifact("//foo:bar", "baz/quz.h") + a2 = bound_artifact("//foo:bar", "baz/file1") + + def test(): + a3 = bound_artifact("//foo:bar", "baz/quz.cpp") + a4 = bound_artifact("//foo:bar", "baz/file2") + + assert_eq_ignore_hash("#)>", repr(a1)) + assert_eq("quz.h", a1.basename) + assert_eq("baz/quz.h", a1.short_path) + assert_eq(".h", a1.extension) + assert_eq(False, a1.is_source) + assert_eq("bar", a1.owner.name) + + assert_eq_ignore_hash("#)>", repr(a2)) + assert_eq("file1", a2.basename) + assert_eq("baz/file1", a2.short_path) + assert_eq("", a2.extension) + assert_eq(False, a2.is_source) + assert_eq("bar", a2.owner.name) + + assert_eq_ignore_hash("#)>", repr(a3)) + assert_eq("quz.cpp", a3.basename) + assert_eq("baz/quz.cpp", a3.short_path) + assert_eq(".cpp", a3.extension) + assert_eq(False, a3.is_source) + assert_eq("bar", a3.owner.name) + + assert_eq_ignore_hash("#)>", repr(a4)) + assert_eq("file2", a4.basename) + assert_eq("baz/file2", a4.short_path) + assert_eq("", a4.extension) + assert_eq(False, a4.is_source) + assert_eq("bar", a4.owner.name) + + # Validate that attrs are setup properly + for a in (a1, a2, a3, a4): + for prop in dir(a): + assert_eq(True, hasattr(a, prop)) + if prop != "as_output": + getattr(a, prop) + "# + ))?; + + let as_output = indoc!( + r#" + def test(): + bound_artifact("//foo:bar", "baz/quz.cpp").as_output() + "# + ); + expect_error( + tester.run_starlark_bzl_test(as_output), + as_output, + "already used", + ); + Ok(()) +} + +#[test] +fn declared_artifact() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + def test(): + a1 = declared_artifact("baz/quz.cpp") + a2 = declared_artifact("baz/file2") + + assert_eq("", repr(a1)) + assert_eq("quz.cpp", a1.basename) + assert_eq(".cpp", a1.extension) + assert_eq(False, a1.is_source) + assert_eq(None, a1.owner) + assert_eq("", repr(a1.as_output())) + + assert_eq("", repr(a2)) + assert_eq("file2", a2.basename) + assert_eq("", a2.extension) + assert_eq(False, a2.is_source) + assert_eq(None, a2.owner) + assert_eq("", repr(a2.as_output())) + + # Validate that attrs are setup properly + for a in (a1, a2): + for prop in dir(a): + assert_eq(True, hasattr(a, prop)) + if prop != "as_output": + getattr(a, prop) + "# + ))?; + Ok(()) +} + +#[test] +fn declared_bound() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(buck2_build_api::interpreter::rule_defs::register_rule_defs); + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + a1 = declared_bound_artifact("//foo:bar", "baz/quz.h") + a2 = declared_bound_artifact("//foo:bar", "baz/file1") + + def test(): + a3 = declared_bound_artifact("//foo:bar", "baz/quz.cpp") + a4 = declared_bound_artifact("//foo:bar", "baz/file2") + + assert_eq_ignore_hash("#)>", repr(a1)) + assert_eq("quz.h", a1.basename) + assert_eq("baz/quz.h", a1.short_path) + assert_eq(".h", a1.extension) + assert_eq(False, a1.is_source) + assert_eq("bar", a1.owner.name) + + assert_eq_ignore_hash("#)>", repr(a2)) + assert_eq("file1", a2.basename) + assert_eq("baz/file1", a2.short_path) + assert_eq("", a2.extension) + assert_eq(False, a2.is_source) + assert_eq("bar", a2.owner.name) + + assert_eq_ignore_hash("#)>", repr(a3)) + assert_eq("quz.cpp", a3.basename) + assert_eq("baz/quz.cpp", a3.short_path) + assert_eq(".cpp", a3.extension) + assert_eq(False, a3.is_source) + assert_eq("bar", a3.owner.name) + + assert_eq_ignore_hash("#)>", repr(a4)) + assert_eq("file2", a4.basename) + assert_eq("baz/file2", a4.short_path) + assert_eq("", a4.extension) + assert_eq(False, a4.is_source) + assert_eq("bar", a4.owner.name) + + # Validate that attrs are setup properly + for a in (a1, a2, a3, a4): + for prop in dir(a): + assert_eq(True, hasattr(a, prop)) + if prop != "as_output": + getattr(a, prop) + "# + ))?; + + Ok(()) +} + +#[test] +fn project_declared_artifact() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + def test(): + source = source_artifact("foo/bar", "src").project("baz.cpp") + assert_eq("", repr(source)) + assert_eq("baz.cpp", source.basename) + assert_eq(".cpp", source.extension) + + bound = bound_artifact("//foo:bar", "baz").project("quz.h") + assert_eq_ignore_hash("#)>", repr(bound)) + assert_eq("quz.h", bound.basename) + assert_eq(".h", bound.extension) + + bound = declared_bound_artifact("//foo:bar", "out").project("baz.o") + assert_eq_ignore_hash("#)>", repr(bound)) + assert_eq("baz.o", bound.basename) + assert_eq(".o", bound.extension) + + unbound = declared_artifact("out").project("qux.so") + assert_eq("", repr(unbound)) + assert_eq("", repr(unbound.as_output())) + assert_eq("qux.so", unbound.basename) + assert_eq(".so", unbound.extension) + "# + ))?; + Ok(()) +} + +#[test] +fn test_short_path() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + def test(): + test = declared_artifact("foo/bar/baz") + assert_eq("foo/bar/baz", test.short_path) + + test = declared_artifact("foo").project("bar/baz") + assert_eq("foo/bar/baz", test.short_path) + + test = declared_artifact("foo").project("bar").project("baz") + assert_eq("foo/bar/baz", test.short_path) + + test = declared_artifact("foo").project("bar/baz", hide_prefix=True) + assert_eq("bar/baz", test.short_path) + + test = declared_artifact("foo").project("bar").project("baz", hide_prefix=True) + assert_eq("baz", test.short_path) + + test = declared_artifact("foo").project("bar", hide_prefix=True).project("baz") + assert_eq("bar/baz", test.short_path) + + test = declared_artifact("foo").project("bar", hide_prefix=True).project("baz", hide_prefix=True) + assert_eq("baz", test.short_path) + "# + ))?; + Ok(()) +} + +#[test] +fn stringifies_for_command_line() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + a1 = bound_artifact("//foo:bar", "baz/quz.h") + a2 = source_artifact("foo/bar", "baz/file1") + + def test(): + a3 = bound_artifact("//foo:bar", "baz/quz.cpp") + a4 = source_artifact("foo/bar", "baz/file2") + + assert_eq_ignore_hash("buck-out/v2/gen/root//foo/__bar__/baz/quz.h", stringify_for_cli(a1)) + assert_eq("foo/bar/baz/file1", stringify_for_cli(a2)) + assert_eq_ignore_hash("buck-out/v2/gen/root//foo/__bar__/baz/quz.cpp", stringify_for_cli(a3)) + assert_eq("foo/bar/baz/file2", stringify_for_cli(a4)) + "# + )) +} + +#[test] +fn bound_artifact_with_associated_artifacts() -> buck2_error::Result<()> { + let mut tester = Tester::new()?; + tester.additional_globals(buck2_build_api::interpreter::rule_defs::register_rule_defs); + tester.additional_globals(artifactory); + tester.run_starlark_bzl_test(indoc!( + r#" + def test(): + # declare an artifact (a2) with string and add an associated artifact (a1) + a1 = source_artifact("foo/bar", "baz/file1") + a2 = declared_bound_artifact_with_associated_artifacts("baz/quz.h", [a1]) + assert_eq(a2.short_path, "baz/quz.h") + assert_eq(get_associated_artifacts_as_string(a1), "") + assert_eq(get_associated_artifacts_as_string(a2), "root//foo/bar/baz/file1") + + # use a predeclared artifact (a3) and add an associated artifact (a4) + a3 = declared_artifact("wom/bat.h") + a4 = source_artifact("foo/bar", "baz/file2") + a5 = declared_bound_artifact_with_associated_artifacts(a3, [a4]) + assert_eq(a3.short_path, "wom/bat.h") + assert_eq(a5.short_path, "wom/bat.h") + assert_eq(get_associated_artifacts_as_string(a3), "") + assert_eq(get_associated_artifacts_as_string(a5), "root//foo/bar/baz/file2") + + # use a predeclared artifact (a3) with no associated artifacts + a6 = declared_bound_artifact_with_associated_artifacts(a3, []) + assert_eq(a6.short_path, "wom/bat.h") + assert_eq(get_associated_artifacts_as_string(a6), "") + + a7 = a5.without_associated_artifacts() + assert_eq(a5.short_path, a7.short_path) + assert_eq(get_associated_artifacts_as_string(a7), "") + "# + )) +} diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact/mod.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact/mod.rs deleted file mode 100644 index 208fee4205be1..0000000000000 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact/mod.rs +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_common::result::SharedResult; -use buck2_interpreter_for_build::interpreter::testing::expect_error; -use buck2_interpreter_for_build::interpreter::testing::Tester; -use indoc::indoc; - -use crate::interpreter::rule_defs::artifact::testing::artifactory; - -pub(crate) mod testing; - -#[test] -fn source_artifact() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - a1 = source_artifact("foo/bar", "baz/quz.h") - a2 = source_artifact("foo/bar", "baz/file1") - - def test(): - a3 = source_artifact("foo/bar", "baz/quz.cpp") - a4 = source_artifact("foo/bar", "baz/file2") - - assert_eq("", repr(a1)) - assert_eq("quz.h", a1.basename) - assert_eq("baz/quz.h", a1.short_path) - assert_eq(".h", a1.extension) - assert_eq(True, a1.is_source) - assert_eq(None, a1.owner) - - assert_eq("", repr(a2)) - assert_eq("file1", a2.basename) - assert_eq("baz/file1", a2.short_path) - assert_eq("", a2.extension) - assert_eq(True, a2.is_source) - assert_eq(None, a2.owner) - - assert_eq("", repr(a3)) - assert_eq("quz.cpp", a3.basename) - assert_eq("baz/quz.cpp", a3.short_path) - assert_eq(".cpp", a3.extension) - assert_eq(True, a3.is_source) - assert_eq(None, a3.owner) - - assert_eq("", repr(a4)) - assert_eq("file2", a4.basename) - assert_eq("baz/file2", a4.short_path) - assert_eq("", a4.extension) - assert_eq(True, a4.is_source) - assert_eq(None, a4.owner) - - # Validate that attrs are setup properly - for a in (a1, a2, a3, a4): - for prop in dir(a): - assert_eq(True, hasattr(a, prop)) - if prop != "as_output": - getattr(a, prop) - "# - ))?; - - let as_output = indoc!( - r#" - def test(): - source_artifact("foo/bar", "baz/quz.cpp").as_output() - "# - ); - expect_error( - tester.run_starlark_bzl_test(as_output), - as_output, - "Source artifacts may not be outputs", - ); - Ok(()) -} - -#[test] -fn bound_artifact() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(buck2_build_api::interpreter::rule_defs::register_rule_defs); - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - a1 = bound_artifact("//foo:bar", "baz/quz.h") - a2 = bound_artifact("//foo:bar", "baz/file1") - - def test(): - a3 = bound_artifact("//foo:bar", "baz/quz.cpp") - a4 = bound_artifact("//foo:bar", "baz/file2") - - assert_eq_ignore_hash("#)>", repr(a1)) - assert_eq("quz.h", a1.basename) - assert_eq("baz/quz.h", a1.short_path) - assert_eq(".h", a1.extension) - assert_eq(False, a1.is_source) - assert_eq("bar", a1.owner.name) - - assert_eq_ignore_hash("#)>", repr(a2)) - assert_eq("file1", a2.basename) - assert_eq("baz/file1", a2.short_path) - assert_eq("", a2.extension) - assert_eq(False, a2.is_source) - assert_eq("bar", a2.owner.name) - - assert_eq_ignore_hash("#)>", repr(a3)) - assert_eq("quz.cpp", a3.basename) - assert_eq("baz/quz.cpp", a3.short_path) - assert_eq(".cpp", a3.extension) - assert_eq(False, a3.is_source) - assert_eq("bar", a3.owner.name) - - assert_eq_ignore_hash("#)>", repr(a4)) - assert_eq("file2", a4.basename) - assert_eq("baz/file2", a4.short_path) - assert_eq("", a4.extension) - assert_eq(False, a4.is_source) - assert_eq("bar", a4.owner.name) - - # Validate that attrs are setup properly - for a in (a1, a2, a3, a4): - for prop in dir(a): - assert_eq(True, hasattr(a, prop)) - if prop != "as_output": - getattr(a, prop) - "# - ))?; - - let as_output = indoc!( - r#" - def test(): - bound_artifact("//foo:bar", "baz/quz.cpp").as_output() - "# - ); - expect_error( - tester.run_starlark_bzl_test(as_output), - as_output, - "already used", - ); - Ok(()) -} - -#[test] -fn declared_artifact() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - def test(): - a1 = declared_artifact("baz/quz.cpp") - a2 = declared_artifact("baz/file2") - - assert_eq("", repr(a1)) - assert_eq("quz.cpp", a1.basename) - assert_eq(".cpp", a1.extension) - assert_eq(False, a1.is_source) - assert_eq(None, a1.owner) - assert_eq("", repr(a1.as_output())) - - assert_eq("", repr(a2)) - assert_eq("file2", a2.basename) - assert_eq("", a2.extension) - assert_eq(False, a2.is_source) - assert_eq(None, a2.owner) - assert_eq("", repr(a2.as_output())) - - # Validate that attrs are setup properly - for a in (a1, a2): - for prop in dir(a): - assert_eq(True, hasattr(a, prop)) - if prop != "as_output": - getattr(a, prop) - "# - ))?; - Ok(()) -} - -#[test] -fn declared_bound() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(buck2_build_api::interpreter::rule_defs::register_rule_defs); - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - a1 = declared_bound_artifact("//foo:bar", "baz/quz.h") - a2 = declared_bound_artifact("//foo:bar", "baz/file1") - - def test(): - a3 = declared_bound_artifact("//foo:bar", "baz/quz.cpp") - a4 = declared_bound_artifact("//foo:bar", "baz/file2") - - assert_eq_ignore_hash("#)>", repr(a1)) - assert_eq("quz.h", a1.basename) - assert_eq("baz/quz.h", a1.short_path) - assert_eq(".h", a1.extension) - assert_eq(False, a1.is_source) - assert_eq("bar", a1.owner.name) - - assert_eq_ignore_hash("#)>", repr(a2)) - assert_eq("file1", a2.basename) - assert_eq("baz/file1", a2.short_path) - assert_eq("", a2.extension) - assert_eq(False, a2.is_source) - assert_eq("bar", a2.owner.name) - - assert_eq_ignore_hash("#)>", repr(a3)) - assert_eq("quz.cpp", a3.basename) - assert_eq("baz/quz.cpp", a3.short_path) - assert_eq(".cpp", a3.extension) - assert_eq(False, a3.is_source) - assert_eq("bar", a3.owner.name) - - assert_eq_ignore_hash("#)>", repr(a4)) - assert_eq("file2", a4.basename) - assert_eq("baz/file2", a4.short_path) - assert_eq("", a4.extension) - assert_eq(False, a4.is_source) - assert_eq("bar", a4.owner.name) - - # Validate that attrs are setup properly - for a in (a1, a2, a3, a4): - for prop in dir(a): - assert_eq(True, hasattr(a, prop)) - if prop != "as_output": - getattr(a, prop) - "# - ))?; - - Ok(()) -} - -#[test] -fn project_declared_artifact() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - def test(): - bound = declared_bound_artifact("//foo:bar", "out").project("baz.o") - assert_eq_ignore_hash("#)>", repr(bound)) - assert_eq("baz.o", bound.basename) - assert_eq(".o", bound.extension) - - unbound = declared_artifact("out").project("qux.so") - assert_eq("", repr(unbound)) - assert_eq("", repr(unbound.as_output())) - assert_eq("qux.so", unbound.basename) - assert_eq(".so", unbound.extension) - "# - ))?; - Ok(()) -} - -#[test] -fn test_short_path() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - def test(): - test = declared_artifact("foo/bar/baz") - assert_eq("foo/bar/baz", test.short_path) - - test = declared_artifact("foo").project("bar/baz") - assert_eq("foo/bar/baz", test.short_path) - - test = declared_artifact("foo").project("bar").project("baz") - assert_eq("foo/bar/baz", test.short_path) - - test = declared_artifact("foo").project("bar/baz", hide_prefix=True) - assert_eq("bar/baz", test.short_path) - - test = declared_artifact("foo").project("bar").project("baz", hide_prefix=True) - assert_eq("baz", test.short_path) - - test = declared_artifact("foo").project("bar", hide_prefix=True).project("baz") - assert_eq("bar/baz", test.short_path) - - test = declared_artifact("foo").project("bar", hide_prefix=True).project("baz", hide_prefix=True) - assert_eq("baz", test.short_path) - "# - ))?; - Ok(()) -} - -#[test] -fn project_source_artifact() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(artifactory); - let test = indoc!( - r#" - def test(): - source_artifact("foo/bar", "baz").project("foo") - "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "Source artifacts cannot be projected", - ); - Ok(()) -} - -#[test] -fn project_artifact() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(artifactory); - let test = indoc!( - r#" - def test(): - bound_artifact("//foo:bar", "baz").project("foo") - "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "This artifact was declared by another rule", - ); - Ok(()) -} - -#[test] -fn stringifies_for_command_line() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - a1 = bound_artifact("//foo:bar", "baz/quz.h") - a2 = source_artifact("foo/bar", "baz/file1") - - def test(): - a3 = bound_artifact("//foo:bar", "baz/quz.cpp") - a4 = source_artifact("foo/bar", "baz/file2") - - assert_eq_ignore_hash("buck-out/v2/gen/root//foo/__bar__/baz/quz.h", stringify_for_cli(a1)) - assert_eq("foo/bar/baz/file1", stringify_for_cli(a2)) - assert_eq_ignore_hash("buck-out/v2/gen/root//foo/__bar__/baz/quz.cpp", stringify_for_cli(a3)) - assert_eq("foo/bar/baz/file2", stringify_for_cli(a4)) - "# - )) -} - -#[test] -fn bound_artifact_with_associated_artifacts() -> SharedResult<()> { - let mut tester = Tester::new()?; - tester.additional_globals(buck2_build_api::interpreter::rule_defs::register_rule_defs); - tester.additional_globals(artifactory); - tester.run_starlark_bzl_test(indoc!( - r#" - def test(): - # declare an artifact (a2) with string and add an associated artifact (a1) - a1 = source_artifact("foo/bar", "baz/file1") - a2 = declared_bound_artifact_with_associated_artifacts("baz/quz.h", [a1]) - assert_eq(a2.short_path, "baz/quz.h") - assert_eq(get_associated_artifacts_as_string(a1), "") - assert_eq(get_associated_artifacts_as_string(a2), "root//foo/bar/baz/file1") - - # use a predeclared artifact (a3) and add an associated artifact (a4) - a3 = declared_artifact("wom/bat.h") - a4 = source_artifact("foo/bar", "baz/file2") - a5 = declared_bound_artifact_with_associated_artifacts(a3, [a4]) - assert_eq(a3.short_path, "wom/bat.h") - assert_eq(a5.short_path, "wom/bat.h") - assert_eq(get_associated_artifacts_as_string(a3), "") - assert_eq(get_associated_artifacts_as_string(a5), "root//foo/bar/baz/file2") - - # use a predeclared artifact (a3) with no associated artifacts - a6 = declared_bound_artifact_with_associated_artifacts(a3, []) - assert_eq(a6.short_path, "wom/bat.h") - assert_eq(get_associated_artifacts_as_string(a6), "") - - a7 = a5.without_associated_artifacts() - assert_eq(a5.short_path, a7.short_path) - assert_eq(get_associated_artifacts_as_string(a7), "") - "# - )) -} diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact/testing.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact/testing.rs index 0a031b785b820..ab6073ef30272 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact/testing.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact/testing.rs @@ -7,25 +7,24 @@ * of this source tree. */ +use buck2_artifact::actions::key::ActionIndex; use buck2_artifact::artifact::artifact_type::testing::BuildArtifactTestingExt; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; -use buck2_artifact::deferred::id::DeferredId; +use buck2_artifact::deferred::key::DeferredHolderKey; use buck2_build_api::actions::registry::ActionsRegistry; use buck2_build_api::analysis::registry::AnalysisRegistry; use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::deferred::types::BaseKey; -use buck2_build_api::deferred::types::DeferredRegistry; use buck2_build_api::interpreter::rule_defs::artifact::associated::AssociatedArtifacts; use buck2_build_api::interpreter::rule_defs::artifact::output_artifact_like::OutputArtifactArg; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::unpack_artifact::UnpackArtifactOrDeclaredArtifact; use buck2_build_api::interpreter::rule_defs::cmd_args::DefaultCommandLineContext; use buck2_core::base_deferred_key::BaseDeferredKey; -use buck2_core::buck_path::path::BuckPath; -use buck2_core::category::Category; +use buck2_core::category::CategoryRef; use buck2_core::cells::paths::CellRelativePath; use buck2_core::configuration::data::ConfigurationData; use buck2_core::execution_types::execution::ExecutionPlatformResolution; @@ -36,22 +35,25 @@ use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::package::package_relative_path::PackageRelativePathBuf; +use buck2_core::package::package_relative_path::PackageRelativePath; +use buck2_core::package::source_path::SourcePath; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_execute::artifact::fs::ExecutorFs; use buck2_execute::execute::request::OutputType; use buck2_interpreter_for_build::interpreter::build_context::BuildContext; use buck2_interpreter_for_build::interpreter::testing::cells; +use buck2_util::arc_str::ArcS; use dupe::Dupe; use indexmap::indexset; use indexmap::IndexSet; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::Value; use crate::actions::testings::SimpleUnregisteredAction; @@ -62,6 +64,7 @@ fn get_label(eval: &Evaluator, target: &str) -> anyhow::Result { Ok(TargetLabel::new(package, target_name.as_ref()) @@ -83,10 +86,7 @@ pub(crate) fn artifactory(builder: &mut GlobalsBuilder) { ctx.build_file_cell().name(), CellRelativePath::from_path(package).unwrap(), ); - let path = BuckPath::testing_new( - package, - PackageRelativePathBuf::try_from(path.to_owned()).unwrap(), - ); + let path = SourcePath::new(package, ArcS::from(PackageRelativePath::new(path)?)); Ok(StarlarkArtifact::new(SourceArtifact::new(path).into())) } @@ -96,12 +96,8 @@ pub(crate) fn artifactory(builder: &mut GlobalsBuilder) { eval: &mut Evaluator, ) -> anyhow::Result { let target_label = get_label(eval, target)?; - let id = DeferredId::testing_new(0); - let artifact = Artifact::from(BuildArtifact::testing_new( - target_label, - ForwardRelativePathBuf::try_from(path.to_owned()).unwrap(), - id, - )); + let id = ActionIndex::new(0); + let artifact = Artifact::from(BuildArtifact::testing_new(target_label, path, id)); Ok(StarlarkArtifact::new(artifact)) } @@ -111,8 +107,9 @@ pub(crate) fn artifactory(builder: &mut GlobalsBuilder) { ) -> anyhow::Result { let target_label = get_label(eval, "//foo:bar")?; let mut registry = ActionsRegistry::new( - BaseDeferredKey::TargetLabel(target_label), + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target_label)), ExecutionPlatformResolution::unspecified(), + None, ); let artifact = registry.declare_artifact( None, @@ -133,12 +130,10 @@ pub(crate) fn artifactory(builder: &mut GlobalsBuilder) { eval: &mut Evaluator, ) -> anyhow::Result { let target_label = get_label(eval, target)?; - let mut deferred = DeferredRegistry::new(BaseKey::Base(BaseDeferredKey::TargetLabel( - target_label.dupe(), - ))); let mut registry = ActionsRegistry::new( - BaseDeferredKey::TargetLabel(target_label), + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target_label.dupe())), ExecutionPlatformResolution::unspecified(), + None, ); let artifact = registry.declare_artifact( None, @@ -148,10 +143,14 @@ pub(crate) fn artifactory(builder: &mut GlobalsBuilder) { )?; let outputs = indexset![artifact.as_output()]; registry.register( - &mut deferred, + &DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target_label.dupe())), IndexSet::new(), outputs, - SimpleUnregisteredAction::new(vec![], Category::try_from("fake_action").unwrap(), None), + SimpleUnregisteredAction::new( + vec![], + CategoryRef::new("fake_action").unwrap().to_owned(), + None, + ), )?; Ok(StarlarkDeclaredArtifact::new( None, @@ -181,16 +180,16 @@ pub(crate) fn artifactory(builder: &mut GlobalsBuilder) { .add_to_command_line(&mut cli, &mut ctx) .unwrap(); assert_eq!(1, cli.len()); - Ok(cli.get(0).unwrap().to_owned()) + Ok(cli.first().unwrap().to_owned()) } // Mainly tests get_or_declare_output function that can transfer associated artifacts // artifact parameter can be either string or artifact - #[allow(clippy::from_iter_instead_of_collect)] fn declared_bound_artifact_with_associated_artifacts<'v>( + // TODO(nga): parameters should be either positional or named, not both. artifact: OutputArtifactArg<'v>, - associated_artifacts: Vec, - eval: &mut Evaluator<'v, '_>, + associated_artifacts: UnpackListOrTuple>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let target_label = get_label(eval, "//foo:bar")?; let mut analysis_registry = AnalysisRegistry::new_from_owner( @@ -198,26 +197,29 @@ pub(crate) fn artifactory(builder: &mut GlobalsBuilder) { ExecutionPlatformResolution::unspecified(), )?; let mut actions_registry = ActionsRegistry::new( - BaseDeferredKey::TargetLabel(target_label.dupe()), + DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target_label.dupe())), ExecutionPlatformResolution::unspecified(), + None, ); - let mut deferred = DeferredRegistry::new(BaseKey::Base(BaseDeferredKey::TargetLabel( - target_label.dupe(), - ))); let associated_artifacts = AssociatedArtifacts::from( associated_artifacts + .items .iter() - .map(|a| ArtifactGroup::Artifact(a.artifact())), + .map(|a| ArtifactGroup::Artifact(a.artifact().unwrap())), ); let (declaration, output_artifact) = analysis_registry.get_or_declare_output(eval, artifact, OutputType::File)?; actions_registry.register( - &mut deferred, + &DeferredHolderKey::Base(BaseDeferredKey::TargetLabel(target_label.dupe())), IndexSet::new(), indexset![output_artifact], - SimpleUnregisteredAction::new(vec![], Category::try_from("fake_action").unwrap(), None), + SimpleUnregisteredAction::new( + vec![], + CategoryRef::new("fake_action").unwrap().to_owned(), + None, + ), )?; let value = declaration diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact_tagging/mod.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/artifact_tagging.rs similarity index 100% rename from app/buck2_build_api_tests/src/interpreter/rule_defs/artifact_tagging/mod.rs rename to app/buck2_build_api_tests/src/interpreter/rule_defs/artifact_tagging.rs diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/mod.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args.rs similarity index 100% rename from app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/mod.rs rename to app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args.rs diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/testing.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/testing.rs index 9d99f97c25b05..71ce60c290f13 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/testing.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/testing.rs @@ -19,7 +19,9 @@ use buck2_execute::artifact::fs::ExecutorFs; use buck2_interpreter_for_build::interpreter::testing::cells; use starlark::environment::GlobalsBuilder; use starlark::starlark_module; +use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::StarlarkResultExt; fn artifact_fs() -> ArtifactFs { let cell_info = cells(None).unwrap(); @@ -39,10 +41,10 @@ fn get_command_line(value: Value) -> anyhow::Result> { let mut cli = Vec::::new(); let mut ctx = DefaultCommandLineContext::new(&executor_fs); - match value.as_command_line() { - Some(v) => v.add_to_command_line(&mut cli, &mut ctx), - None => value - .as_command_line_err()? + match ValueAsCommandLineLike::unpack_value(value).into_anyhow_result()? { + Some(v) => v.0.add_to_command_line(&mut cli, &mut ctx), + None => ValueAsCommandLineLike::unpack_value_err(value)? + .0 .add_to_command_line(&mut cli, &mut ctx), }?; Ok(cli) @@ -59,10 +61,10 @@ pub(crate) fn command_line_stringifier(builder: &mut GlobalsBuilder) { let executor_fs = ExecutorFs::new(&fs, PathSeparatorKind::Unix); let mut cli = Vec::::new(); let mut ctx = DefaultCommandLineContext::new(&executor_fs); - value - .as_command_line_err()? + ValueAsCommandLineLike::unpack_value_err(value)? + .0 .add_to_command_line(&mut cli, &mut ctx)?; assert_eq!(1, cli.len()); - Ok(cli.get(0).unwrap().clone()) + Ok(cli.first().unwrap().clone()) } } diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/tests.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/tests.rs index 31060c538ce23..205f86f756118 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/tests.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/cmd_args/tests.rs @@ -11,7 +11,6 @@ use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandL use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCommandLineInputs; use buck2_build_api::interpreter::rule_defs::register_rule_defs; -use buck2_common::result::SharedResult; use buck2_core::bzl::ImportPath; use buck2_interpreter::types::regex::register_buck_regex; use buck2_interpreter_for_build::interpreter::testing::expect_error; @@ -20,6 +19,8 @@ use buck2_interpreter_for_build::label::testing::label_creator; use indoc::indoc; use starlark::environment::GlobalsBuilder; use starlark::starlark_module; +use starlark::values::list_or_tuple::UnpackListOrTuple; +use starlark::values::UnpackValue; use starlark::values::Value; use crate::interpreter::rule_defs::artifact::testing::artifactory; @@ -27,10 +28,12 @@ use crate::interpreter::rule_defs::cmd_args::testing; #[starlark_module] pub(crate) fn inputs_helper(builder: &mut GlobalsBuilder) { - fn make_inputs<'v>(values: Vec>) -> anyhow::Result { + fn make_inputs<'v>( + values: UnpackListOrTuple>, + ) -> anyhow::Result { let mut visitor = SimpleCommandLineArtifactVisitor::new(); for v in values { - let cli = v.as_command_line_err()?; + let cli = ValueAsCommandLineLike::unpack_value_err(v)?.0; cli.visit_artifacts(&mut visitor)?; } @@ -52,7 +55,7 @@ fn tester() -> anyhow::Result { } #[test] -fn stringifies_correctly() -> SharedResult<()> { +fn stringifies_correctly() -> buck2_error::Result<()> { let mut tester = tester()?; tester.run_starlark_bzl_test(indoc!( r#" @@ -92,23 +95,23 @@ fn stringifies_correctly() -> SharedResult<()> { expect_error( tester.run_starlark_bzl_test(contents), contents, - "expected command line item to be a string", + "Expected `CellPath | artifact", ); Ok(()) } #[test] -fn displays_correctly() -> SharedResult<()> { +fn displays_correctly() -> buck2_error::Result<()> { let mut tester = tester()?; tester.run_starlark_bzl_test(indoc!( r#" def test(): - cli = cmd_args() + cli = cmd_args(format="x{}y", quote="shell", hidden="bar") cli.add("foo") - cli.hidden("bar") - assert_eq('cmd_args("foo", hidden=["bar"])', str(cli)) - assert_eq('cmd_args(\n "foo",\n hidden=[ "bar" ]\n)', pprint_str(cli)) + # TODO(nga): fix options formatting. + assert_eq('cmd_args("foo", hidden=["bar"], format="x{}y", quote="shell")', str(cli)) + assert_eq('cmd_args(\n "foo",\n hidden=[ "bar" ],\n format="x{}y",\n quote="shell"\n)', prepr(cli)) "# ))?; @@ -116,7 +119,21 @@ fn displays_correctly() -> SharedResult<()> { } #[test] -fn command_line_builder() -> SharedResult<()> { +fn displays_correctly_replace_regex() { + let mut tester = tester().unwrap(); + tester + .run_starlark_bzl_test(indoc!( + r#" + def test(): + cli = cmd_args(replace_regex=(regex("foo"), "bar")) + assert_eq('cmd_args(replacements=[("foo", "bar")])', str(cli)) + "# + )) + .unwrap(); +} + +#[test] +fn command_line_builder() -> buck2_error::Result<()> { let mut tester = tester()?; let content = indoc!( r#" @@ -235,30 +252,27 @@ fn command_line_builder() -> SharedResult<()> { expect_error( tester.run_starlark_bzl_test(content_invalid_type_1), content_invalid_type_1, - "expected command line item", + "Expected `CellPath | artifact", ); expect_error( tester.run_starlark_bzl_test(content_invalid_type_3), content_invalid_type_3, - "expected command line item", + "Expected `CellPath | artifact", ); Ok(()) } #[test] -fn test_relative_absolute() -> anyhow::Result<()> { +fn test_relative_absolute_old() -> anyhow::Result<()> { let mut tester = tester()?; let contents = indoc!( r#" def test(): - args = cmd_args() + args = cmd_args(absolute_prefix="$ABSOLUTE/", absolute_suffix="!") args.add(source_artifact("foo","bar/baz/qux.h")) args.relative_to(source_artifact("foo", "bar/foo")) - args.absolute_prefix("$ABSOLUTE/") - assert_eq(get_args(args), ["$ABSOLUTE/../baz/qux.h"]) - args.absolute_suffix("!") assert_eq(get_args(args), ["$ABSOLUTE/../baz/qux.h!"]) args = cmd_args() @@ -271,15 +285,78 @@ fn test_relative_absolute() -> anyhow::Result<()> { Ok(()) } +#[test] +fn test_relative_absolute() -> anyhow::Result<()> { + let mut tester = tester()?; + let contents = indoc!( + r#" + def test(): + args = cmd_args(absolute_prefix="$ABSOLUTE/", absolute_suffix="!", relative_to=source_artifact("foo", "bar/foo")) + args.add(source_artifact("foo","bar/baz/qux.h")) + + assert_eq(get_args(args), ["$ABSOLUTE/../baz/qux.h!"]) + + args = cmd_args(relative_to=(source_artifact("foo", "bar/baz"), 1)) + args.add(source_artifact("foo","bar/baz/qux.h")) + assert_eq(get_args(args), ["baz/qux.h"]) + "# + ); + tester.run_starlark_bzl_test(contents)?; + Ok(()) +} + +#[test] +fn test_relative_to_propagated_up_and_down() -> anyhow::Result<()> { + let mut tester = tester()?; + let contents = indoc!( + r#" + def test(): + args = cmd_args(source_artifact("foo", "bar.h")) + # Self check + assert_eq(get_args(args), ["foo/bar.h"]) + + # `relative_to` is propagated down to `args` + args2 = cmd_args(args, relative_to=(source_artifact("foo", "baz.c"), 1)) + assert_eq(get_args(args2), ["./bar.h"]) + + # `relative_to` is propagated up to `args3` + args3 = cmd_args(args2) + assert_eq(get_args(args3), ["./bar.h"]) + "# + ); + tester.run_starlark_bzl_test(contents)?; + Ok(()) +} + +#[test] +fn test_relative_to_does_not_affect_new_artifacts() -> anyhow::Result<()> { + let mut tester = tester().unwrap(); + let content = indoc!( + r#" + def test(): + args = cmd_args( + source_artifact("foo", "bar.h"), + relative_to=(source_artifact("foo", "baz.c"), 1), + ) + # Self check + assert_eq(get_args(args), ["./bar.h"]) + + args2 = cmd_args(args, source_artifact("foo", "bar2.h")) + assert_eq(get_args(args2), ["./bar.h", "foo/bar2.h"]) + "# + ); + tester.run_starlark_bzl_test(content)?; + Ok(()) +} + #[test] fn test_parent() -> anyhow::Result<()> { let mut tester = tester()?; let contents = indoc!( r#" def test(): - args = cmd_args() + args = cmd_args(absolute_suffix="!", parent=1) args.add(source_artifact("foo","bar/baz/qux.h")) - args.parent().absolute_suffix("!") assert_eq(get_args(args), ["foo/bar/baz!"]) "# ); @@ -288,9 +365,8 @@ fn test_parent() -> anyhow::Result<()> { let too_many_parent_calls = indoc!( r#" def test(): - args = cmd_args() + args = cmd_args(parent=3) args.add(source_artifact("foo","qux.h")) - args.parent().parent().parent() get_args(args) "# ); @@ -309,20 +385,30 @@ fn test_parent_n() -> anyhow::Result<()> { let contents = indoc!( r#" def test(): - args = cmd_args() - args.add(source_artifact("foo","bar/baz/qux.h")) - args.parent(2).absolute_suffix("!") + args = cmd_args( + source_artifact("foo","bar/baz/qux.h"), + absolute_suffix="!", + parent=2, + ) assert_eq(get_args(args), ["foo/bar!"]) "# ); tester.run_starlark_bzl_test(contents)?; + Ok(()) +} + +#[test] +fn test_parent_n_too_many_parents() -> anyhow::Result<()> { + let mut tester = tester()?; + let too_many_parent_calls = indoc!( r#" def test(): - args = cmd_args() - args.add(source_artifact("foo","qux.h")) - args.parent(3) + args = cmd_args( + source_artifact("foo","qux.h"), + parent=3, + ) get_args(args) "# ); @@ -332,19 +418,27 @@ fn test_parent_n() -> anyhow::Result<()> { "too many .parent() calls", ); + Ok(()) +} + +#[test] +fn test_parent_n_parent_type() -> anyhow::Result<()> { + let mut tester = tester()?; + let bad_count = indoc!( r#" def test(): - args = cmd_args() - args.add(source_artifact("foo","qux.h")) - args.parent(-12) + args = cmd_args( + source_artifact("foo","qux.h"), + parent=-12, + ) get_args(args) "# ); expect_error( tester.run_starlark_bzl_test(bad_count), bad_count, - "Type of parameter `count` doesn't match", + "Integer value is too big to fit in u32", ); Ok(()) @@ -431,11 +525,10 @@ fn test_inputs_outputs() -> anyhow::Result<()> { artifact4 = bound_artifact("//:dep2", "dir/quz.h") artifact5 = declared_artifact("declared") - cli = cmd_args() + cli = cmd_args(hidden=artifact1) cli.add(artifact3) cli.add("just a string") cli.add(artifact4) - cli.hidden(artifact1) cli.add(artifact5.as_output()) assert_eq(make_inputs([artifact3, artifact4, artifact1]), cli.inputs) @@ -458,9 +551,8 @@ fn test_ignore_artifacts() -> anyhow::Result<()> { def test(): artifact = bound_artifact("//:dep2", "dir/quz.h") - cli = cmd_args() + cli = cmd_args(ignore_artifacts=True) cli.add(artifact) - cli.ignore_artifacts() assert_eq(make_inputs([]), cli.inputs) assert_eq([], cli.outputs) @@ -593,13 +685,45 @@ fn test_replace_regex() -> anyhow::Result<()> { let contents = indoc!( r#" def test(): - args = cmd_args("$OUT", "$OUTPUT", "$SRCS", format="$OUT: {}") - args.replace_regex("\\$OUT\\b", "%OUT%") - args.replace_regex("\\$SRCS\\b", "%SRCS%") + args = cmd_args( + "$OUT", + "$OUTPUT", + "$SRCS", + format="$OUT: {}", + replace_regex=[("\\$OUT\\b", "%OUT%"), ("\\$SRCS\\b", "%SRCS%")], + ) assert_eq(["$OUT: %OUT%", "$OUT: $OUTPUT", "$OUT: %SRCS%"], get_args(args)) - args = cmd_args("\\n\n") - args.replace_regex("\\\\n", "\\\n").replace_regex("\\n", "\\n") + args = cmd_args( + "\\n\n", + replace_regex=[("\\\\n", "\\\n"), ("\\n", "\\n")], + ) + assert_eq(["\\\\n\\n"], get_args(args)) + "# + ); + tester.run_starlark_bzl_test(contents)?; + Ok(()) +} + +#[test] +fn test_replace_regex_old() -> anyhow::Result<()> { + let mut tester = tester()?; + let contents = indoc!( + r#" + def test(): + args = cmd_args( + "$OUT", + "$OUTPUT", + "$SRCS", + format="$OUT: {}", + replace_regex=[("\\$OUT\\b", "%OUT%"), ("\\$SRCS\\b", "%SRCS%")], + ) + assert_eq(["$OUT: %OUT%", "$OUT: $OUTPUT", "$OUT: %SRCS%"], get_args(args)) + + args = cmd_args( + "\\n\n", + replace_regex=[("\\\\n", "\\\n"), ("\\n", "\\n")], + ) assert_eq(["\\\\n\\n"], get_args(args)) "# ); @@ -613,13 +737,12 @@ fn test_replace_regex_regex() -> anyhow::Result<()> { let contents = indoc!( r#" def test(): - args = cmd_args("$OUT", "$OUTPUT", "$SRCS", format="$OUT: {}") - args.replace_regex(regex("\\$OUT\\b"), "%OUT%") - args.replace_regex(regex("\\$SRCS\\b"), "%SRCS%") + args = cmd_args("$OUT", "$OUTPUT", "$SRCS", format="$OUT: {}", + replace_regex=[(regex("\\$OUT\\b"), "%OUT%"), (regex("\\$SRCS\\b"), "%SRCS%")]) assert_eq(["$OUT: %OUT%", "$OUT: $OUTPUT", "$OUT: %SRCS%"], get_args(args)) - args = cmd_args("\\n\n") - args.replace_regex(regex("\\\\n"), "\\\n").replace_regex("\\n", "\\n") + args = cmd_args("\\n\n", + replace_regex = [(regex("\\\\n"), "\\\n"), ("\\n", "\\n")]) assert_eq(["\\\\n\\n"], get_args(args)) "# ); diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider.rs new file mode 100644 index 0000000000000..b7154642455fd --- /dev/null +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider.rs @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod build_defs; +mod builtin; +mod collection; +mod field_types; +mod provider_symbol; +mod tests; diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin.rs new file mode 100644 index 0000000000000..a96a2f2d526a0 --- /dev/null +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin.rs @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod configuration_info; +mod default_info; +mod dependency; +mod external_runner_test_info; +mod install_info; +mod local_resource_info; +mod run_info; +mod tests; +mod validation_info; +mod validation_spec; +mod worker_info; diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/configuration_info.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/configuration_info.rs index c81c5cc3e562a..e66221509133f 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/configuration_info.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/configuration_info.rs @@ -8,12 +8,11 @@ */ use buck2_build_api::interpreter::rule_defs::register_rule_defs; -use buck2_common::result::SharedResult; use buck2_interpreter_for_build::interpreter::testing::Tester; use indoc::indoc; #[test] -fn configuration_info_validates_buckconfigs() -> SharedResult<()> { +fn configuration_info_validates_buckconfigs() -> buck2_error::Result<()> { let mut tester = Tester::new().unwrap(); tester.additional_globals(register_rule_defs); tester.run_starlark_bzl_test_expecting_error( diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/default_info.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/default_info.rs index 2ce8416979bd2..ab995f8a23ae7 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/default_info.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/default_info.rs @@ -10,7 +10,6 @@ use buck2_build_api::interpreter::rule_defs::provider::callable::register_provider; use buck2_build_api::interpreter::rule_defs::provider::registration::register_builtin_providers; use buck2_build_api::interpreter::rule_defs::register_rule_defs; -use buck2_common::result::SharedResult; use buck2_core::bzl::ImportPath; use buck2_interpreter_for_build::interpreter::testing::Tester; use indoc::indoc; @@ -18,7 +17,7 @@ use indoc::indoc; use crate::interpreter::rule_defs::artifact::testing::artifactory; #[test] -fn default_info_is_available() -> SharedResult<()> { +fn default_info_is_available() -> buck2_error::Result<()> { let mut tester = Tester::new()?; tester.additional_globals(artifactory); tester.additional_globals(register_rule_defs); @@ -65,8 +64,7 @@ fn default_info_is_available() -> SharedResult<()> { } #[test] -fn default_info_validates_types() -> SharedResult<()> { - // TODO(nmj): More complex types +fn default_info_validates_types_1() -> buck2_error::Result<()> { let mut tester = Tester::new().unwrap(); tester.additional_globals(register_rule_defs); tester.run_starlark_bzl_test_expecting_error( @@ -81,6 +79,13 @@ fn default_info_validates_types() -> SharedResult<()> { "Type of parameter", ); + Ok(()) +} + +#[test] +fn default_info_validates_types_2() -> buck2_error::Result<()> { + let mut tester = Tester::new().unwrap(); + tester.additional_globals(register_rule_defs); tester.run_starlark_bzl_test_expecting_error( indoc!( r#" @@ -90,15 +95,10 @@ fn default_info_validates_types() -> SharedResult<()> { DefaultInfo(sub_targets=hide_type([]), default_outputs=["foo"]) "# ), - "Type of parameter", + "Expected type", ); - tester.run_starlark_bzl_test(indoc!( - r#" - def test(): - assert_eq(DefaultInfo.type, "DefaultInfo") - "# - )) + Ok(()) } #[test] @@ -114,7 +114,7 @@ fn test_to_json() { ) assert_eq( '{"sub_targets":{"foo":{"DefaultInfo":{"sub_targets":{},"default_outputs":[],"other_outputs":[]}}},"default_outputs":[],"other_outputs":[]}', - default.to_json(), + json.encode(default), ) "# )) diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/dependency.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/dependency.rs index 9e2e54f56a7e6..9a4f62250309f 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/dependency.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/dependency.rs @@ -7,32 +7,30 @@ * of this source tree. */ -use buck2_build_api::interpreter::rule_defs::provider::collection::ProviderCollection; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use buck2_build_api::interpreter::rule_defs::provider::dependency::Dependency; -use buck2_common::result::SharedResult; use buck2_core::configuration::data::ConfigurationData; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_interpreter_for_build::interpreter::build_context::BuildContext; use buck2_interpreter_for_build::interpreter::testing::Tester; use indoc::indoc; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::starlark_module; -use starlark::values::Value; #[starlark_module] fn dependency_creator(builder: &mut GlobalsBuilder) { fn create_collection<'v>( s: &str, - providers: Value<'v>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let c = BuildContext::from_context(eval)?; let label = match ParsedPattern::::parse_precise( s, c.build_file_cell().name(), c.cell_resolver(), + c.cell_info.cell_alias_resolver(), ) { Ok(ParsedPattern::Target(package, target_name, providers)) => providers .into_providers_label(package, target_name.as_ref()) @@ -42,24 +40,22 @@ fn dependency_creator(builder: &mut GlobalsBuilder) { panic!(); } }; - let collection = eval - .heap() - .alloc(ProviderCollection::try_from_value(providers)?); + let collection = FrozenProviderCollection::testing_new_default(eval.frozen_heap()); Ok(Dependency::new(eval.heap(), label, collection, None)) } } #[test] -fn dependency_works() -> SharedResult<()> { +fn dependency_works() -> buck2_error::Result<()> { let mut tester = Tester::new()?; tester.additional_globals(buck2_build_api::interpreter::rule_defs::register_rule_defs); tester.additional_globals(dependency_creator); tester.run_starlark_bzl_test(indoc!( r#" - frozen = create_collection("root//foo:bar[baz]", [DefaultInfo()]) + frozen = create_collection("root//foo:bar[baz]") def test(): - notfrozen = create_collection("root//foo:bar[baz]", [DefaultInfo()]) + notfrozen = create_collection("root//foo:bar[baz]") expect = "#)>" assert_eq_ignore_hash(expect, repr(notfrozen)) diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs index 12ddcfa9160c2..fd555f054a359 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/external_runner_test_info.rs @@ -8,6 +8,7 @@ */ use buck2_build_api::interpreter::rule_defs::register_rule_defs; +use buck2_build_api::interpreter::rule_defs::required_test_local_resource::register_required_test_local_resource; use buck2_core::bzl::ImportPath; use buck2_interpreter_for_build::interpreter::testing::Tester; use indoc::indoc; @@ -15,6 +16,7 @@ use indoc::indoc; fn tester() -> Tester { let mut tester = Tester::new().unwrap(); tester.additional_globals(register_rule_defs); + tester.additional_globals(register_required_test_local_resource); tester } @@ -34,6 +36,7 @@ fn test_construction() -> anyhow::Result<()> { ExternalRunnerTestInfo(type = "foo", labels = ("foo",)) ExternalRunnerTestInfo(type = "foo", use_project_relative_paths = True) ExternalRunnerTestInfo(type = "foo", run_from_project_root = True) + ExternalRunnerTestInfo(type = "foo", local_resources = {"bar": None}, required_local_resources = [RequiredTestLocalResource("bar", listing=False)]) "# ); let mut tester = tester(); @@ -184,6 +187,26 @@ fn test_validation() -> anyhow::Result<()> { "`executor_overrides`", ); + tester.run_starlark_bzl_test_expecting_error( + indoc!( + r#" + def test(): + ExternalRunnerTestInfo(type = "foo", required_local_resources = ["bar"]) + "# + ), + "`required_local_resources` should only contain `RequiredTestLocalResource` values, got \"bar\"", + ); + + tester.run_starlark_bzl_test_expecting_error( + indoc!( + r#" + def test(): + ExternalRunnerTestInfo(type = "foo", required_local_resources = [RequiredTestLocalResource("bar")]) + "# + ), + "`required_local_resources` contains `bar` which is not present in `local_resources`", + ); + Ok(()) } diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/install_info.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/install_info.rs index f31131ed402f5..cd3aa01229691 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/install_info.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/install_info.rs @@ -9,7 +9,6 @@ use buck2_build_api::interpreter::rule_defs::provider::collection::tester::collection_creator; use buck2_build_api::interpreter::rule_defs::register_rule_defs; -use buck2_common::result::SharedResult; use buck2_interpreter_for_build::interpreter::testing::Tester; use buck2_interpreter_for_build::label::testing::label_creator; use indoc::indoc; @@ -26,7 +25,7 @@ fn tester() -> Tester { } #[test] -fn install_info_works_as_provider_key() -> SharedResult<()> { +fn install_info_works_as_provider_key() -> buck2_error::Result<()> { let content = indoc!( r#" installer_app = label("//foo:bar[quz]") @@ -40,7 +39,7 @@ fn install_info_works_as_provider_key() -> SharedResult<()> { } #[test] -fn info_validator_succeeds_for_artifacts_without_additional_artifacts() -> SharedResult<()> { +fn info_validator_succeeds_for_artifacts_without_additional_artifacts() -> buck2_error::Result<()> { let content = indoc!( r#" a1 = source_artifact("foo/bar", "baz.h") diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs index 1d1f669599bbf..01b740cd3b8ad 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/local_resource_info.rs @@ -44,7 +44,6 @@ fn test_missing_fields_validation() -> anyhow::Result<()> { let test = indoc!( r#" def test(): - target = label("//:foobar") LocalResourceInfo(resource_env_vars={}) "# ); @@ -54,7 +53,6 @@ fn test_missing_fields_validation() -> anyhow::Result<()> { let test = indoc!( r#" def test(): - target = label("//:foobar") LocalResourceInfo(setup=cmd_args()) "# ); @@ -68,109 +66,123 @@ fn test_missing_fields_validation() -> anyhow::Result<()> { } #[test] -fn test_validation() -> anyhow::Result<()> { +fn test_validation_1() { let mut tester = new_tester(); - { - let test = indoc!( - r#" - def test(): - target = label("//:foobar") - wrong_setup = {5:6} - LocalResourceInfo(setup=wrong_setup, resource_env_vars={"RESOURCE_ENV_VAR": "json_key"}) - "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "Value for `setup` field is not a command line", - ); - } - { - let test = indoc!( - r#" + let test = indoc!( + r#" + def test(): + wrong_setup = {5:6} + LocalResourceInfo(setup=wrong_setup, resource_env_vars={"RESOURCE_ENV_VAR": "json_key"}) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `CellPath | artifact", + ); +} + +#[test] +fn test_validation_2() { + let mut tester = new_tester(); + let test = indoc!( + r#" def test(): wrong_setup = [] LocalResourceInfo(setup=wrong_setup, resource_env_vars={"RESOURCE_ENV_VAR": "json_key"}) "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "Value for `setup` field is an empty command line", - ); - } - { - let test = indoc!( - r#" + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Value for `setup` field is an empty command line", + ); +} + +#[test] +fn test_validation_3() { + let mut tester = new_tester(); + let test = indoc!( + r#" def test(): wrong_env_vars = "baz" LocalResourceInfo(setup=["/foo", "--resource"], resource_env_vars=wrong_env_vars) "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "Value for `resource_env_vars` field is not a dictionary", - ); - } - { - let test = indoc!( - r#" + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `dict[str, str]`", + ); +} + +#[test] +fn test_validation_4() { + let mut tester = new_tester(); + let test = indoc!( + r#" def test(): wrong_env_vars = {} LocalResourceInfo(setup=["/foo", "--resource"], resource_env_vars=wrong_env_vars) "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "Value for `resource_env_vars` field is an empty dictionary", - ); - } - { - let test = indoc!( - r#" + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Value for `resource_env_vars` field is an empty dictionary", + ); +} + +#[test] +fn test_validation_5() { + let mut tester = new_tester(); + let test = indoc!( + r#" def test(): wrong_env_vars = {1:"one"} LocalResourceInfo(setup=["/foo", "--resource"], resource_env_vars=wrong_env_vars) "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "Invalid key in `resource_env_vars`: Expected a str, got", - ); - } - { - let test = indoc!( - r#" + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `dict[str, str]`", + ); +} + +#[test] +fn test_validation_6() { + let mut tester = new_tester(); + let test = indoc!( + r#" def test(): wrong_env_vars = {"one":1} LocalResourceInfo(setup=["/foo", "--resource"], resource_env_vars=wrong_env_vars) "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "Invalid value in `resource_env_vars`: Expected a str, got", - ); - } - { - let test = indoc!( - r#" + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `dict[str, str]`", + ); +} + +#[test] +fn test_validation_7() { + let mut tester = new_tester(); + let test = indoc!( + r#" def test(): wrong_env_vars = {"one":"1"} LocalResourceInfo(setup=["/foo", "--resource"], resource_env_vars=wrong_env_vars, setup_timeout_seconds="42") "# - ); - expect_error( - tester.run_starlark_bzl_test(test), - test, - "`setup_timeout_seconds` must be a number if provided", - ); - } - Ok(()) + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `None | float | int`", + ); } #[test] diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/mod.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/mod.rs deleted file mode 100644 index 34d8b8b1b0468..0000000000000 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod configuration_info; -mod default_info; -mod dependency; -mod external_runner_test_info; -mod install_info; -mod local_resource_info; -mod run_info; -mod tests; -mod worker_info; diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/run_info.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/run_info.rs index 8a57c9abc13b7..ce1f1fe6633d1 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/run_info.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/run_info.rs @@ -9,7 +9,6 @@ use buck2_build_api::interpreter::rule_defs::provider::collection::tester::collection_creator; use buck2_build_api::interpreter::rule_defs::register_rule_defs; -use buck2_common::result::SharedResult; use buck2_interpreter_for_build::interpreter::testing::Tester; use indoc::indoc; @@ -26,7 +25,7 @@ fn run_info_tester() -> Tester { } #[test] -fn run_info_stringifies() -> SharedResult<()> { +fn run_info_stringifies() -> buck2_error::Result<()> { let mut tester = run_info_tester(); let content = indoc!( r#" @@ -93,7 +92,7 @@ fn run_info_stringifies() -> SharedResult<()> { } #[test] -fn run_info_validates_types() { +fn run_info_validates_types_1() { let content_bad_args1 = indoc!( r#" def test(): @@ -101,8 +100,14 @@ fn run_info_validates_types() { "# ); let mut tester = run_info_tester(); - tester.run_starlark_bzl_test_expecting_error(content_bad_args1, "expected command line item"); + tester.run_starlark_bzl_test_expecting_error( + content_bad_args1, + "Expected type `CellPath | artifact", + ); +} +#[test] +fn run_info_validates_types_2() { let content_bad_args2 = indoc!( r#" def test(): @@ -110,11 +115,14 @@ fn run_info_validates_types() { "# ); let mut tester = run_info_tester(); - tester.run_starlark_bzl_test_expecting_error(content_bad_args2, "expected command line item"); + tester.run_starlark_bzl_test_expecting_error( + content_bad_args2, + "Expected type `CellPath | artifact", + ); } #[test] -fn run_info_works_as_provider_key() -> SharedResult<()> { +fn run_info_works_as_provider_key() -> buck2_error::Result<()> { let mut tester = run_info_tester(); let content = indoc!( diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/validation_info.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/validation_info.rs new file mode 100644 index 0000000000000..96ebce654c3ab --- /dev/null +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/validation_info.rs @@ -0,0 +1,103 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_build_api::interpreter::rule_defs::register_rule_defs; +use buck2_build_api::interpreter::rule_defs::validation_spec; +use buck2_interpreter_for_build::interpreter::testing::expect_error; +use buck2_interpreter_for_build::interpreter::testing::Tester; +use indoc::indoc; + +use crate::interpreter::rule_defs::artifact::testing::artifactory; + +fn new_tester() -> Tester { + let mut tester = Tester::new().unwrap(); + tester.additional_globals(register_rule_defs); + tester.additional_globals(validation_spec::register_validation_spec); + tester.additional_globals(artifactory); + tester +} + +#[test] +fn test_construction() -> anyhow::Result<()> { + let mut tester = new_tester(); + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationInfo(validations=[ValidationSpec(name="foo", validation_result=a)]) + "# + ); + tester.run_starlark_bzl_test(test)?; + Ok(()) +} + +#[test] +fn test_missing_fields_validation() -> anyhow::Result<()> { + let mut tester = new_tester(); + { + let test = indoc!( + r#" + def test(): + ValidationInfo() + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Missing required parameter `validations`", + ); + } + Ok(()) +} + +#[test] +fn test_validation_failure() -> anyhow::Result<()> { + let mut tester = new_tester(); + { + let test = indoc!( + r#" + def test(): + ValidationInfo(validations=[1, 2]) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `list[ValidationSpec]` but got `list[int]`", + ); + } + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationInfo(validations=[ValidationSpec(name="foo", validation_result=a), ValidationSpec(name="foo", validation_result=a)]) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Multiple specs with same name `foo` which is not allowed.", + ); + } + { + let test = indoc!( + r#" + def test(): + ValidationInfo(validations=[]) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "`ValidationInfo` should contain at least one validation.", + ); + } + Ok(()) +} diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/validation_spec.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/validation_spec.rs new file mode 100644 index 0000000000000..0e85fb4a166a3 --- /dev/null +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/builtin/validation_spec.rs @@ -0,0 +1,189 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_build_api::interpreter::rule_defs::validation_spec; +use buck2_interpreter_for_build::interpreter::testing::expect_error; +use buck2_interpreter_for_build::interpreter::testing::Tester; +use indoc::indoc; + +use crate::interpreter::rule_defs::artifact::testing::artifactory; + +fn new_tester() -> Tester { + let mut tester = Tester::new().unwrap(); + tester.additional_globals(validation_spec::register_validation_spec); + tester.additional_globals(artifactory); + tester +} + +#[test] +fn test_construction() -> anyhow::Result<()> { + let mut tester = new_tester(); + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationSpec(name="foo", validation_result=a) + "# + ); + tester.run_starlark_bzl_test(test)?; + } + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationSpec(name="foo", validation_result=a, optional=True) + "# + ); + tester.run_starlark_bzl_test(test)?; + } + Ok(()) +} + +#[test] +fn test_missing_fields_validation() -> anyhow::Result<()> { + let mut tester = new_tester(); + { + let test = indoc!( + r#" + def test(): + ValidationSpec(name="foo") + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Missing required parameter `validation_result`", + ); + } + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationSpec(validation_result=a) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Missing required parameter `name`", + ); + } + Ok(()) +} + +#[test] +fn test_validation_failure() -> anyhow::Result<()> { + let mut tester = new_tester(); + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationSpec(name=1, validation_result=a) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `str` but got `int`", + ); + } + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationSpec(name="", validation_result=a) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Name of validation spec should not be empty", + ); + } + { + let test = indoc!( + r#" + def test(): + ValidationSpec(name="foo", validation_result="bar") + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `artifact` but got `str`", + ); + } + { + let test = indoc!( + r#" + def test(): + bar = declared_artifact("baz/quz.h") + ValidationSpec(name="foo", validation_result=bar) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Validation result artifact should be bound", + ); + } + { + let test = indoc!( + r#" + def test(): + bar = source_artifact("foo/bar", "baz/quz.h") + ValidationSpec(name="foo", validation_result=bar) + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Validation result artifact should be a build artifact", + ); + } + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + ValidationSpec(name="test", validation_result=a, optional="invalid") + "# + ); + expect_error( + tester.run_starlark_bzl_test(test), + test, + "Expected type `bool` but got `str`", + ); + } + Ok(()) +} + +#[test] +fn test_attributes() -> anyhow::Result<()> { + let mut tester = new_tester(); + { + let test = indoc!( + r#" + def test(): + a = declared_bound_artifact("//foo:bar", "baz/quz.h") + s = ValidationSpec(name="foo", validation_result=a) + assert_eq(s.name, "foo") + assert_eq_ignore_hash("#)>", repr(s.validation_result)) + assert_eq(s.optional, False) + "# + ); + tester.run_starlark_bzl_test(test)?; + } + Ok(()) +} diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/collection.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/collection.rs index e37d3362ede1d..dbe5afc4f9777 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/collection.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/collection.rs @@ -10,7 +10,6 @@ use buck2_build_api::interpreter::rule_defs::provider::callable::register_provider; use buck2_build_api::interpreter::rule_defs::provider::collection::tester::collection_creator; use buck2_build_api::interpreter::rule_defs::register_rule_defs; -use buck2_common::result::SharedResult; use buck2_core::bzl::ImportPath; use buck2_interpreter_for_build::interpreter::testing::expect_error; use buck2_interpreter_for_build::interpreter::testing::Tester; @@ -18,7 +17,7 @@ use indoc::indoc; use crate::interpreter::rule_defs::artifact::testing::artifactory; -fn provider_collection_tester() -> SharedResult { +fn provider_collection_tester() -> buck2_error::Result { let mut tester = Tester::new()?; tester.additional_globals(collection_creator); tester.additional_globals(artifactory); @@ -51,7 +50,7 @@ fn provider_collection_tester() -> SharedResult { } #[test] -fn provider_collection_constructs_properly() -> SharedResult<()> { +fn provider_collection_constructs_properly() -> buck2_error::Result<()> { let mut tester = provider_collection_tester()?; tester.run_starlark_bzl_test(indoc!( r#" @@ -69,7 +68,7 @@ fn provider_collection_constructs_properly() -> SharedResult<()> { } #[test] -fn provider_collection_fails_to_construct_on_bad_data() -> SharedResult<()> { +fn provider_collection_fails_to_construct_on_bad_data() -> buck2_error::Result<()> { let mut tester = provider_collection_tester()?; let not_a_list = indoc!( r#" @@ -123,13 +122,13 @@ fn provider_collection_fails_to_construct_on_bad_data() -> SharedResult<()> { expect_error( tester.run_starlark_bzl_test(missing_default_info), missing_default_info, - "did not receive a DefaultInfo", + "did not receive a `DefaultInfo`", ); Ok(()) } #[test] -fn returns_default_info() -> SharedResult<()> { +fn returns_default_info() -> buck2_error::Result<()> { let mut tester = provider_collection_tester()?; tester.run_starlark_bzl_test(indoc!( r#" @@ -150,7 +149,7 @@ fn returns_default_info() -> SharedResult<()> { } #[test] -fn provider_collection_contains_methods_and_in_operator() -> SharedResult<()> { +fn provider_collection_contains_methods_and_in_operator() -> buck2_error::Result<()> { let mut tester = provider_collection_tester()?; tester.add_import( &ImportPath::testing_new("root//providers:defs.bzl"), @@ -177,7 +176,7 @@ fn provider_collection_contains_methods_and_in_operator() -> SharedResult<()> { } #[test] -fn provider_collection_get() -> SharedResult<()> { +fn provider_collection_get() -> buck2_error::Result<()> { let mut tester = provider_collection_tester()?; tester.add_import( &ImportPath::testing_new("root//providers:defs.bzl"), diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/field_types.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/field_types.rs index 6704ebd693f38..9ad8108281cc2 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/field_types.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/field_types.rs @@ -59,7 +59,7 @@ def p(): def test(): p()(x = 1) "#, - "Missing parameter `y`", + "Missing named-only parameter `y`", ); } diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/mod.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/mod.rs deleted file mode 100644 index fba7213ab1021..0000000000000 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod build_defs; -mod builtin; -mod collection; -mod field_types; -mod provider_symbol; -pub(crate) mod testing; -mod tests; diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/testing.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/testing.rs deleted file mode 100644 index 26e058b74b23e..0000000000000 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/testing.rs +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_build_api::interpreter::rule_defs::provider::callable::register_provider; -use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; -use buck2_build_api::interpreter::rule_defs::provider::registration::register_builtin_providers; -use buck2_interpreter_for_build::attrs::coerce; -use starlark::environment::GlobalsBuilder; -use starlark::environment::Module; - -pub trait FrozenProviderCollectionValueExt { - /// Creates a `FrozenProviderCollectionValue` for testing. The given string should be - /// Starlark code that returns a list of providers. The built in providers are available. - fn testing_new(providers: &str) -> Self; -} - -impl FrozenProviderCollectionValueExt for FrozenProviderCollectionValue { - fn testing_new(providers: &str) -> Self { - let env = Module::new(); - let globals = GlobalsBuilder::standard() - .with(register_builtin_providers) - .with(register_provider) - .build(); - let value = coerce::testing::to_value(&env, &globals, providers); - let res_typed = - buck2_build_api::interpreter::rule_defs::provider::collection::ProviderCollection::try_from_value(value) - .map_err(|e| anyhow::anyhow!("{:?}", e)) - .unwrap(); - - let res = env.heap().alloc(res_typed); - env.set("", res); - - let frozen_env = env.freeze().expect("should freeze successfully"); - let res = frozen_env.get("").unwrap(); - - FrozenProviderCollectionValue::try_from_value(res) - .expect("just created this, this shouldn't happen") - } -} diff --git a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/tests.rs b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/tests.rs index d958179bcb291..958b94f437e74 100644 --- a/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/tests.rs +++ b/app/buck2_build_api_tests/src/interpreter/rule_defs/provider/tests.rs @@ -39,7 +39,6 @@ fn creates_providers() -> anyhow::Result<()> { assert_eq('provider[FooInfo](fields={"bar": provider_field(typing.Any, default=None), "baz": provider_field(typing.Any, default=None)})', repr(FooInfo2)) def test(): - assert_eq(FooInfo.type, "FooInfo") assert_eq('provider[FooInfo](fields={"bar": provider_field(typing.Any, default=None), "baz": provider_field(typing.Any, default=None)})', repr(FooInfo)) assert_eq('provider[FooInfo](fields={"bar": provider_field(typing.Any, default=None), "baz": provider_field(typing.Any, default=None)})', repr(FooInfo2)) @@ -61,7 +60,7 @@ fn creates_providers() -> anyhow::Result<()> { assert_eq("bar_2", foo_2.bar) assert_eq(None, foo_2.baz) - assert_eq("{\"bar\":\"bar_1\",\"baz\":\"baz_1\"}", foo_1.to_json()) + assert_eq("{\"bar\":\"bar_1\",\"baz\":\"baz_1\"}", json.encode(foo_1)) "# ))?; diff --git a/app/buck2_build_api_tests/src/interpreter/transitive_set/mod.rs b/app/buck2_build_api_tests/src/interpreter/transitive_set.rs similarity index 100% rename from app/buck2_build_api_tests/src/interpreter/transitive_set/mod.rs rename to app/buck2_build_api_tests/src/interpreter/transitive_set.rs diff --git a/app/buck2_build_api_tests/src/interpreter/transitive_set/testing.rs b/app/buck2_build_api_tests/src/interpreter/transitive_set/testing.rs index cae54d11b7f77..112d01b7ba8f1 100644 --- a/app/buck2_build_api_tests/src/interpreter/transitive_set/testing.rs +++ b/app/buck2_build_api_tests/src/interpreter/transitive_set/testing.rs @@ -11,45 +11,40 @@ use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use anyhow::Context as _; -use buck2_artifact::deferred::data::DeferredData; -use buck2_artifact::deferred::id::DeferredId; -use buck2_artifact::deferred::key::DeferredKey; +use buck2_artifact::deferred::key::DeferredHolderKey; +use buck2_build_api::artifact_groups::deferred::TransitiveSetIndex; +use buck2_build_api::artifact_groups::deferred::TransitiveSetKey; use buck2_build_api::interpreter::rule_defs::transitive_set::transitive_set_definition::register_transitive_set; use buck2_build_api::interpreter::rule_defs::transitive_set::FrozenTransitiveSet; +use buck2_build_api::interpreter::rule_defs::transitive_set::FrozenTransitiveSetDefinition; use buck2_build_api::interpreter::rule_defs::transitive_set::TransitiveSet; use buck2_build_api::interpreter::rule_defs::transitive_set::TransitiveSetOrdering; -use buck2_core::base_deferred_key::BaseDeferredKey; -use buck2_core::configuration::data::ConfigurationData; -use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use indoc::indoc; use starlark::environment::GlobalsBuilder; use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::FrozenValueTyped; use starlark::values::OwnedFrozenValueTyped; use starlark::values::Value; +use starlark::StarlarkResultExt; use crate::interpreter::rule_defs::artifact::testing::artifactory; #[starlark_module] pub(crate) fn tset_factory(builder: &mut GlobalsBuilder) { fn make_tset<'v>( - definition: Value<'v>, + definition: FrozenValueTyped<'v, FrozenTransitiveSetDefinition>, value: Option>, children: Option>, // An iterable. - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { static LAST_ID: AtomicU32 = AtomicU32::new(0); - let target = ConfiguredTargetLabel::testing_parse( - "cell//path:target", - ConfigurationData::testing_new(), - ); - let deferred_id = DeferredId::testing_new(LAST_ID.fetch_add(1, Ordering::Relaxed)); - let deferred_key = DeferredKey::Base(BaseDeferredKey::TargetLabel(target), deferred_id); + let tset_id = TransitiveSetIndex::testing_new(LAST_ID.fetch_add(1, Ordering::Relaxed)); let set = TransitiveSet::new_from_values( - DeferredData::unchecked_new(deferred_key), + TransitiveSetKey::new(DeferredHolderKey::testing_new("cell//more:tsets"), tset_id), definition, value, children, @@ -71,13 +66,25 @@ pub(crate) fn new_transitive_set( .with(artifactory) .build(); - let val = buck2_interpreter_for_build::attrs::coerce::testing::to_value(&env, &globals, code); + buck2_interpreter_for_build::attrs::coerce::testing::to_value(&env, &globals, code); - env.set("", val); let frozen = env.freeze().context("Freeze failed")?; - let value = frozen.get("").context("Frozen tset was not found!")?; - value.downcast_anyhow() + let make = frozen.get("make").context("`make` was not found")?; + + let env2 = Module::new(); + let ret = Evaluator::new(&env2) + .eval_function(make.owned_value(&env2.frozen_heap()), &[], &[]) + .into_anyhow_result()?; + + env2.set_extra_value(ret); + + let frozen = env2.freeze()?; + + frozen + .owned_extra_value() + .context("Frozen value must be in extra value")? + .downcast_anyhow() } #[test] @@ -85,8 +92,10 @@ fn test_new_transitive_set() -> anyhow::Result<()> { let set = new_transitive_set(indoc!( r#" FooSet = transitive_set() - s1 = make_tset(FooSet, value = "foo") - make_tset(FooSet, value = "bar", children = [s1]) + + def make(): + s1 = make_tset(FooSet, value = "foo") + return make_tset(FooSet, value = "bar", children = [s1]) "# ))?; diff --git a/app/buck2_build_api_tests/src/interpreter/transitive_set/tests.rs b/app/buck2_build_api_tests/src/interpreter/transitive_set/tests.rs index 849a91c0be5e9..b02327c9c1ffe 100644 --- a/app/buck2_build_api_tests/src/interpreter/transitive_set/tests.rs +++ b/app/buck2_build_api_tests/src/interpreter/transitive_set/tests.rs @@ -181,48 +181,14 @@ fn test_transitive_set_display() -> anyhow::Result<()> { assert_eq("FooSet(value=2, 1 children)", repr(f2)) assert_eq("FooSet(1 children)", repr(f3)) - assert_eq("FooSet(\n value=1,\n 0 children\n)", pprint_str(f1)) - assert_eq("FooSet( 1 children )", pprint_str(f3)) + assert_eq("FooSet(\n value=1,\n 0 children\n)", prepr(f1)) + assert_eq("FooSet( 1 children )", prepr(f3)) "# ))?; Ok(()) } -#[test] -fn test_transitive_sets_validation() -> anyhow::Result<()> { - let mut tester = transitive_set_tester(); - - let contents = indoc!( - r#" - def test(): - FooSet = transitive_set() - make_tset(FooSet, value = None) - "# - ); - - expect_error( - tester.run_starlark_bzl_test(contents), - contents, - "used before being assigned", - ); - - let contents = indoc!( - r#" - def test(): - make_tset(123, value = None) - "# - ); - - expect_error( - tester.run_starlark_bzl_test(contents), - contents, - "not the output of transitive_set", - ); - - Ok(()) -} - #[test] fn test_transitive_sets_projection() -> anyhow::Result<()> { let mut tester = transitive_set_tester(); @@ -721,7 +687,23 @@ fn test_accessors() -> anyhow::Result<()> { s2 = make_tset(FooSet, value = 1) assert_eq(s2.value, 1) - "# + + f4 = make_tset(FooSet, value = "baz") + assert_eq([], f4.children) + assert_eq([], [x.value for x in f4.children]) + + f3 = make_tset(FooSet, value = "bar", children = [f4]) + assert_eq([f4], f3.children) + assert_eq(["baz"], [x.value for x in f3.children]) + + f2 = make_tset(FooSet, children = [f4, f3]) + assert_eq([f4, f3], f2.children) + assert_eq(["baz", "bar"], [x.value for x in f2.children]) + + f1 = make_tset(FooSet, children = [f4, f3, f2]) + assert_eq([f4, f3, f2], f1.children) + assert_eq(["baz", "bar"], filter(None, [x.value for x in f1.children])) + "# ))?; Ok(()) diff --git a/app/buck2_build_api_tests/src/lib.rs b/app/buck2_build_api_tests/src/lib.rs index 5e106974d9d20..3b27c3ee2acf2 100644 --- a/app/buck2_build_api_tests/src/lib.rs +++ b/app/buck2_build_api_tests/src/lib.rs @@ -7,14 +7,14 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![cfg(test)] -#![feature(provide_any)] +#![allow(clippy::bool_assert_comparison)] mod actions; mod analysis; mod artifact_groups; mod attrs; -mod deferred; mod interpreter; mod nodes; @@ -22,9 +22,12 @@ mod nodes; fn init_late_bindings_for_test() { #[ctor::ctor] fn init() { + buck2_action_impl::init_late_bindings(); buck2_analysis::init_late_bindings(); buck2_anon_target::init_late_bindings(); buck2_configured::init_late_bindings(); buck2_interpreter_for_build::init_late_bindings(); + buck2_build_api::init_late_bindings(); + buck2_transition::init_late_bindings(); } } diff --git a/app/buck2_build_api_tests/src/nodes/mod.rs b/app/buck2_build_api_tests/src/nodes.rs similarity index 100% rename from app/buck2_build_api_tests/src/nodes/mod.rs rename to app/buck2_build_api_tests/src/nodes.rs diff --git a/app/buck2_build_api_tests/src/nodes/calculation.rs b/app/buck2_build_api_tests/src/nodes/calculation.rs index b20ee08a5b8ec..1c0e7882b52f5 100644 --- a/app/buck2_build_api_tests/src/nodes/calculation.rs +++ b/app/buck2_build_api_tests/src/nodes/calculation.rs @@ -9,6 +9,7 @@ use std::sync::Arc; +use buck2_build_api::actions::execute::dice_data::set_fallback_executor_config; use buck2_configured::configuration::calculation::ExecutionPlatformsKey; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; @@ -19,9 +20,8 @@ use buck2_core::package::PackageLabel; use buck2_core::plugins::PluginKindSet; use buck2_core::provider::label::ProvidersLabel; use buck2_core::provider::label::ProvidersName; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_core::target::name::TargetName; -use buck2_execute::execute::dice_data::set_fallback_executor_config; use buck2_interpreter_for_build::interpreter::calculation::InterpreterResultsKey; use buck2_interpreter_for_build::super_package::package_value::SuperPackageValuesImpl; use buck2_node::attrs::attr::Attribute; @@ -59,10 +59,10 @@ async fn test_get_node() -> anyhow::Result<()> { let cfg = ConfigurationData::testing_new(); let pkg = PackageLabel::testing_parse("cell//foo/bar"); - let name1 = TargetName::unchecked_new("t1"); + let name1 = TargetName::testing_new("t1"); let label1 = TargetLabel::new(pkg.dupe(), name1.as_ref()); - let name2 = TargetName::unchecked_new("t2"); + let name2 = TargetName::testing_new("t2"); let label2 = TargetLabel::new(pkg.dupe(), name2.as_ref()); let rule_type = RuleType::Starlark(Arc::new(StarlarkRuleType { @@ -93,7 +93,7 @@ async fn test_get_node() -> anyhow::Result<()> { ), ]; - let node1 = TargetNode::testing_new(label1.dupe(), rule_type.dupe(), attrs1); + let node1 = TargetNode::testing_new(label1.dupe(), rule_type.dupe(), attrs1, vec![], None); let attrs2 = vec![ ( @@ -117,7 +117,7 @@ async fn test_get_node() -> anyhow::Result<()> { ), ]; - let node2 = TargetNode::testing_new(label2.dupe(), rule_type.dupe(), attrs2); + let node2 = TargetNode::testing_new(label2.dupe(), rule_type.dupe(), attrs2, vec![], None); let eval_result = EvaluationResult::new( Arc::new(BuildFilePath::new( @@ -135,13 +135,13 @@ async fn test_get_node() -> anyhow::Result<()> { .mock_and_return(InterpreterResultsKey(pkg), Ok(Arc::new(eval_result))) .mock_and_return(ExecutionPlatformsKey, Ok(None)) .build(data)?; - let computations = computations.commit().await; + let mut computations = computations.commit().await; let node = computations.get_target_node(&label1).await?; - assert_eq!(node.0, node1.0); + assert_eq!(node, node1); let node = computations.get_target_node(&label2).await?; - assert_eq!(node.0, node2.0); + assert_eq!(node, node2); let conf_attrs1 = smallmap![ "bool_field" => ConfiguredAttr::Bool(BoolLiteral(false)), @@ -167,10 +167,10 @@ async fn test_get_node() -> anyhow::Result<()> { ]; let node = computations.get_target_node(&label1).await?; - assert_eq!(node.0, node1.0); + assert_eq!(node, node1); let node = computations.get_target_node(&label2).await?; - assert_eq!(node.0, node2.0); + assert_eq!(node, node2); let node = computations .get_configured_target_node(&label1.configure(cfg.dupe())) diff --git a/app/buck2_build_info/BUCK b/app/buck2_build_info/BUCK index a963f12ea2b34..97027f53f7d32 100644 --- a/app/buck2_build_info/BUCK +++ b/app/buck2_build_info/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/app/buck2_build_info/Cargo.toml b/app/buck2_build_info/Cargo.toml index ac32d0d587e11..e1aacc06a5a8e 100644 --- a/app/buck2_build_info/Cargo.toml +++ b/app/buck2_build_info/Cargo.toml @@ -1,8 +1,12 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_build_info" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] buck2_util = { workspace = true } -# @oss-disable: build_info = { path = "../../../common/rust/build_info" } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_build_info/src/lib.rs b/app/buck2_build_info/src/lib.rs index 856d9bab39978..c39699240fa57 100644 --- a/app/buck2_build_info/src/lib.rs +++ b/app/buck2_build_info/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + use buck2_util::late_binding::LateBinding; pub struct Buck2BuildInfo { @@ -38,12 +40,12 @@ pub fn win_internal_version() -> Option<&'static str> { /// Get the time at which this binary was built, if available. pub fn time_iso8601() -> Option<&'static str> { - #[cfg(any(fbcode_build, cargo_internal_build))] + #[cfg(fbcode_build)] { Some(build_info::BuildInfo::get_time_iso8601()) } - #[cfg(not(any(fbcode_build, cargo_internal_build)))] + #[cfg(not(fbcode_build))] { None } diff --git a/app/buck2_build_signals/BUCK b/app/buck2_build_signals/BUCK index cc3de136ec3de..7b7dbb0d94fc7 100644 --- a/app/buck2_build_signals/BUCK +++ b/app/buck2_build_signals/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -12,11 +11,13 @@ rust_library( "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:derive_more", - "fbsource//third-party/rust:futures", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_data:buck2_data", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", + "//buck2/gazebo/cmp_any:cmp_any", "//buck2/gazebo/dupe:dupe", ], ) diff --git a/app/buck2_build_signals/Cargo.toml b/app/buck2_build_signals/Cargo.toml index 9b1e81af1ef55..5a9f7cc48b3f6 100644 --- a/app/buck2_build_signals/Cargo.toml +++ b/app/buck2_build_signals/Cargo.toml @@ -1,18 +1,21 @@ [package] +authors = ["Meta"] +description = "Interface for build signals" +edition = "2021" +license = { workspace = true } name = "buck2_build_signals" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Interface for build signals" -license = "MIT OR Apache-2.0" -authors = ["Meta"] [dependencies] +allocative = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } -derive_more = { workspace = true } -futures = { workspace = true } -allocative = { workspace = true } buck2_core = { workspace = true } +buck2_data = { workspace = true } buck2_events = { workspace = true } +buck2_util = { workspace = true } +cmp_any = { workspace = true } +derive_more = { workspace = true } dice = { workspace = true } dupe = { workspace = true } diff --git a/app/buck2_build_signals/src/env.rs b/app/buck2_build_signals/src/env.rs new file mode 100644 index 0000000000000..cc27d4f806a10 --- /dev/null +++ b/app/buck2_build_signals/src/env.rs @@ -0,0 +1,160 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::future::Future; +use std::str::FromStr; +use std::time::Duration; + +use allocative::Allocative; +use anyhow::Context as _; +use async_trait::async_trait; +use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::soft_error; +use buck2_events::dispatch::EventDispatcher; +use dice::UserComputationData; +use dupe::Dupe; + +#[derive(Copy, Clone, Dupe)] +pub struct NodeDuration { + /// The amount of time for this node that corresponds to something the user might be able to + /// improve. We should better break this down. + pub user: Duration, + /// The total duration for this node. + pub total: Duration, + /// The waiting duration for this node. + pub queue: Option, +} + +impl NodeDuration { + /// Returns the duration we are using in our critical path calculation. This doesn't really + /// *need* to be a function but right now we use user and want to switch to total so it's + /// easier to do that if this is in a single function. + pub fn critical_path_duration(&self) -> Duration { + self.total + } + + pub fn zero() -> Self { + Self { + user: Duration::from_secs(0), + total: Duration::from_secs(0), + queue: None, + } + } +} + +#[derive(Copy, Clone, Dupe, derive_more::Display, Allocative)] +pub enum CriticalPathBackendName { + #[display("longest-path-graph")] + LongestPathGraph, + #[display("default")] + Default, + #[display("logging")] + Logging, +} + +impl FromStr for CriticalPathBackendName { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + if s == "longest-path-graph" { + return Ok(Self::LongestPathGraph); + } + + if s == "default" { + return Ok(Self::Default); + } + + if s == "logging" { + return Ok(Self::Logging); + } + + Err(anyhow::anyhow!("Invalid backend name: `{}`", s)) + } +} + +pub struct BuildSignalsContext { + pub command_name: String, + pub metadata: HashMap, + pub isolation_prefix: FileNameBuf, +} + +/// Created along with the BuildSignalsInstaller (ideally, BuildSignalsInstaller's definition would +/// live here, but that can't be done for now because it has some dependencies on buck2_build_api). +/// +/// This can be started to actually start processing build signals. +pub trait DeferredBuildSignals: Send { + fn start( + self: Box, + events: EventDispatcher, + backend: CriticalPathBackendName, + ctx: BuildSignalsContext, + ) -> Box; +} + +/// Returned by DeferredBuildSignals once started. Lets us report that we finished. +#[async_trait] +pub trait FinishBuildSignals: Send { + async fn finish(self: Box) -> anyhow::Result<()>; +} + +/// Start the backend for a DeferredBuildSignals instance. +/// +/// Build listeners operate by creating a matched pair of signal senders and signal receivers. +/// Senders are Dupe and allow for arbitrarily many writeres. Receivers are not Dupe and are +/// expected to be driven by a single thread. This implies that, in order for the receiver to +/// function correctly and dispatch to build listeners, it must be run in a background task that is +/// periodically polled. +/// +/// This function arranges for a background task to be spawned that drives the receiver, while +/// invoking the called function with a live BuildSignalSender that can be used to send events to +/// the listening receiver. Upon return of `scope`, the sender terminates the receiver by sending a +/// `BuildFinished` signal and joins the receiver task. +pub async fn scope( + deferred: Box, + events: EventDispatcher, + backend: CriticalPathBackendName, + ctx: BuildSignalsContext, + func: F, +) -> anyhow::Result +where + F: FnOnce() -> Fut + Send, + Fut: Future> + Send, + R: Send, +{ + let handle = deferred.start(events, backend, ctx); + let result = func().await; + let res = handle + .finish() + .await + .context("Error computing critical path"); + if let Err(e) = res { + soft_error!("critical_path_computation_failed", e.into())?; + } + result +} + +pub trait HasCriticalPathBackend { + fn set_critical_path_backend(&mut self, backend: CriticalPathBackendName); + + fn get_critical_path_backend(&self) -> CriticalPathBackendName; +} + +impl HasCriticalPathBackend for UserComputationData { + fn set_critical_path_backend(&mut self, backend: CriticalPathBackendName) { + self.data.set(backend); + } + + fn get_critical_path_backend(&self) -> CriticalPathBackendName { + *self + .data + .get::() + .expect("CriticalPathBackendName should be set") + } +} diff --git a/app/buck2_build_signals/src/lib.rs b/app/buck2_build_signals/src/lib.rs index 793af29e20888..91e70167dc804 100644 --- a/app/buck2_build_signals/src/lib.rs +++ b/app/buck2_build_signals/src/lib.rs @@ -7,145 +7,7 @@ * of this source tree. */ -use std::collections::HashMap; -use std::str::FromStr; -use std::time::Duration; +#![feature(error_generic_member_access)] -use allocative::Allocative; -use anyhow::Context as _; -use async_trait::async_trait; -use buck2_core::fs::paths::file_name::FileNameBuf; -use buck2_core::soft_error; -use buck2_events::dispatch::EventDispatcher; -use dice::UserComputationData; -use dupe::Dupe; -use futures::future::Future; - -#[derive(Copy, Clone, Dupe)] -pub struct NodeDuration { - /// The amount of time for this node that corresponds to something the user might be able to - /// improve. We should better break this down. - pub user: Duration, - /// The total duration for this node. - pub total: Duration, -} - -impl NodeDuration { - /// Returns the duration we are using in our critical path calculation. This doesn't really - /// *need* to be a function but right now we use user and want to switch to total so it's - /// easier to do that if this is in a single function. - pub fn critical_path_duration(&self) -> Duration { - self.total - } - - pub fn zero() -> Self { - Self { - user: Duration::from_secs(0), - total: Duration::from_secs(0), - } - } -} - -#[derive(Copy, Clone, Dupe, derive_more::Display, Allocative)] -pub enum CriticalPathBackendName { - #[display(fmt = "longest-path-graph")] - LongestPathGraph, - #[display(fmt = "default")] - Default, -} - -impl FromStr for CriticalPathBackendName { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - if s == "longest-path-graph" { - return Ok(Self::LongestPathGraph); - } - - if s == "default" { - return Ok(Self::Default); - } - - Err(anyhow::anyhow!("Invalid backend name: `{}`", s)) - } -} - -pub struct BuildSignalsContext { - pub command_name: String, - pub metadata: HashMap, - pub isolation_prefix: FileNameBuf, -} - -/// Created along with the BuildSignalsInstaller (ideally, BuildSignalsInstaller's definition would -/// live here, but that can't be done for now because it has some dependencies on buck2_build_api). -/// -/// This can be started to actually start processing build signals. -pub trait DeferredBuildSignals: Send { - fn start( - self: Box, - events: EventDispatcher, - backend: CriticalPathBackendName, - ctx: BuildSignalsContext, - ) -> Box; -} - -/// Returned by DeferredBuildSignals once started. Lets us report that we finished. -#[async_trait] -pub trait FinishBuildSignals: Send { - async fn finish(self: Box) -> anyhow::Result<()>; -} - -/// Start the backend for a DeferredBuildSignals instance. -/// -/// Build listeners operate by creating a matched pair of signal senders and signal receivers. -/// Senders are Dupe and allow for arbitrarily many writeres. Receivers are not Dupe and are -/// expected to be driven by a single thread. This implies that, in order for the receiver to -/// function correctly and dispatch to build listeners, it must be run in a background task that is -/// periodically polled. -/// -/// This function arranges for a background task to be spawned that drives the receiver, while -/// invoking the called function with a live BuildSignalSender that can be used to send events to -/// the listening receiver. Upon return of `scope`, the sender terminates the receiver by sending a -/// `BuildFinished` signal and joins the receiver task. -pub async fn scope( - deferred: Box, - events: EventDispatcher, - backend: CriticalPathBackendName, - ctx: BuildSignalsContext, - func: F, -) -> anyhow::Result -where - F: FnOnce() -> Fut + Send, - Fut: Future> + Send, - R: Send, -{ - let handle = deferred.start(events, backend, ctx); - let result = func().await; - let res = handle - .finish() - .await - .context("Error computing critical path"); - if let Err(e) = res { - soft_error!("critical_path_computation_failed", e)?; - } - result -} - -pub trait HasCriticalPathBackend { - fn set_critical_path_backend(&mut self, backend: CriticalPathBackendName); - - fn get_critical_path_backend(&self) -> CriticalPathBackendName; -} - -impl HasCriticalPathBackend for UserComputationData { - fn set_critical_path_backend(&mut self, backend: CriticalPathBackendName) { - self.data.set(backend); - } - - fn get_critical_path_backend(&self) -> CriticalPathBackendName { - *self - .data - .get::() - .expect("CriticalPathBackendName should be set") - } -} +pub mod env; +pub mod node_key; diff --git a/app/buck2_build_signals/src/node_key.rs b/app/buck2_build_signals/src/node_key.rs new file mode 100644 index 0000000000000..842d9305c36ae --- /dev/null +++ b/app/buck2_build_signals/src/node_key.rs @@ -0,0 +1,98 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Debug; +use std::fmt::Display; +use std::hash::Hash; +use std::hash::Hasher; +use std::sync::Arc; + +use buck2_util::hash::BuckHasher; +use cmp_any::PartialEqAny; +use dupe::Dupe; + +/// `Display` should not include type name (like `dice::Key`). +pub trait BuildSignalsNodeKeyImpl: + Eq + PartialEq + Hash + Display + Debug + Send + Sync + 'static +{ + fn critical_path_entry_proto(&self) -> Option { + None + } +} + +pub trait BuildSignalsNodeKeyDyn: Send + Sync + 'static { + fn eq_token(&self) -> PartialEqAny; + fn dislpay(&self) -> &dyn Display; + fn debug(&self) -> &dyn Debug; + fn critical_path_entry_proto(&self) -> Option; + fn hash(&self) -> u64; +} + +impl BuildSignalsNodeKeyDyn for T { + fn eq_token(&self) -> PartialEqAny { + PartialEqAny::new(self) + } + + fn dislpay(&self) -> &dyn Display { + self + } + + fn debug(&self) -> &dyn Debug { + self + } + + fn critical_path_entry_proto(&self) -> Option { + self.critical_path_entry_proto() + } + + fn hash(&self) -> u64 { + let mut hasher = BuckHasher::new(); + self.hash(&mut hasher); + hasher.finish() + } +} + +#[derive(Clone, Dupe)] +pub struct BuildSignalsNodeKey(Arc); + +impl BuildSignalsNodeKey { + pub fn new(key: T) -> Self { + BuildSignalsNodeKey(Arc::new(key)) + } + + pub fn critical_path_entry_proto(&self) -> Option { + self.0.critical_path_entry_proto() + } +} + +impl PartialEq for BuildSignalsNodeKey { + fn eq(&self, other: &Self) -> bool { + self.0.eq_token() == other.0.eq_token() + } +} + +impl Eq for BuildSignalsNodeKey {} + +impl Display for BuildSignalsNodeKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(self.0.dislpay(), f) + } +} + +impl Debug for BuildSignalsNodeKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Debug::fmt(self.0.debug(), f) + } +} + +impl Hash for BuildSignalsNodeKey { + fn hash(&self, state: &mut H) { + state.write_u64(self.0.hash()); + } +} diff --git a/app/buck2_build_signals_impl/BUCK b/app/buck2_build_signals_impl/BUCK index dcc4fca901291..9807e6c40bb5c 100644 --- a/app/buck2_build_signals_impl/BUCK +++ b/app/buck2_build_signals_impl/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -11,8 +10,9 @@ rust_library( deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:itertools", + "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:smallvec", "fbsource//third-party/rust:static_assertions", "fbsource//third-party/rust:tokio", @@ -22,7 +22,6 @@ rust_library( "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_build_signals:buck2_build_signals", "//buck2/app/buck2_common:buck2_common", - "//buck2/app/buck2_configured:buck2_configured", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_critical_path:buck2_critical_path", "//buck2/app/buck2_data:buck2_data", diff --git a/app/buck2_build_signals_impl/Cargo.toml b/app/buck2_build_signals_impl/Cargo.toml index 1ab09f5fb74f4..682847223a807 100644 --- a/app/buck2_build_signals_impl/Cargo.toml +++ b/app/buck2_build_signals_impl/Cargo.toml @@ -1,25 +1,20 @@ [package] +authors = ["Meta"] +description = "Capture build signals during a build to produce a critical path" +edition = "2021" +license = { workspace = true } name = "buck2_build_signals_impl" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Capture build signals during a build to produce a critical path" -license = "MIT OR Apache-2.0" -authors = ["Meta"] [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -derive_more = { workspace = true } -itertools = { workspace = true } -smallvec = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } buck2_analysis = { workspace = true } buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } buck2_build_signals = { workspace = true } buck2_common = { workspace = true } -buck2_configured = { workspace = true } buck2_core = { workspace = true } buck2_critical_path = { workspace = true } buck2_data = { workspace = true } @@ -29,4 +24,10 @@ buck2_node = { workspace = true } dice = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } +itertools = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +smallvec = { workspace = true } static_assertions = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } diff --git a/app/buck2_build_signals_impl/src/backend.rs b/app/buck2_build_signals_impl/src/backend.rs new file mode 100644 index 0000000000000..bb444d9436d67 --- /dev/null +++ b/app/buck2_build_signals_impl/src/backend.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod backend; +pub(crate) mod default; +pub(crate) mod logging; +pub(crate) mod longest_path_graph; diff --git a/app/buck2_build_signals_impl/src/backend/backend.rs b/app/buck2_build_signals_impl/src/backend/backend.rs index 9705fd4d3bb93..066bbb26bd1c2 100644 --- a/app/buck2_build_signals_impl/src/backend/backend.rs +++ b/app/buck2_build_signals_impl/src/backend/backend.rs @@ -7,11 +7,9 @@ * of this source tree. */ -use std::sync::Arc; - -use buck2_build_api::actions::RegisteredAction; -use buck2_build_signals::CriticalPathBackendName; -use buck2_build_signals::NodeDuration; +use buck2_build_api::actions::calculation::ActionWithExtraData; +use buck2_build_signals::env::CriticalPathBackendName; +use buck2_build_signals::env::NodeDuration; use buck2_events::span::SpanId; use smallvec::SmallVec; @@ -22,16 +20,16 @@ pub(crate) trait BuildListenerBackend { fn process_node( &mut self, key: NodeKey, - value: Option>, + value: Option, duration: NodeDuration, - dep_keys: impl Iterator, + dep_keys: impl IntoIterator, span_ids: SmallVec<[SpanId; 1]>, ); fn process_top_level_target( &mut self, analysis: NodeKey, - artifacts: impl Iterator, + artifacts: impl IntoIterator, ); fn finish(self) -> anyhow::Result; diff --git a/app/buck2_build_signals_impl/src/backend/default.rs b/app/buck2_build_signals_impl/src/backend/default.rs index e9c0011ba5a34..ce94317644fbd 100644 --- a/app/buck2_build_signals_impl/src/backend/default.rs +++ b/app/buck2_build_signals_impl/src/backend/default.rs @@ -11,13 +11,12 @@ use std::collections::HashMap; use std::collections::HashSet; use std::fmt::Display; use std::hash::Hash; -use std::sync::Arc; use std::time::Duration; use anyhow::Context as _; -use buck2_build_api::actions::RegisteredAction; -use buck2_build_signals::CriticalPathBackendName; -use buck2_build_signals::NodeDuration; +use buck2_build_api::actions::calculation::ActionWithExtraData; +use buck2_build_signals::env::CriticalPathBackendName; +use buck2_build_signals::env::NodeDuration; use buck2_events::span::SpanId; use dupe::Dupe; use gazebo::prelude::VecExt; @@ -32,10 +31,10 @@ use crate::NodeKey; #[derive(Clone, Dupe)] struct CriticalPathNode { /// The aggregated duration of this critical path. - pub duration: Duration, + pub(crate) duration: Duration, /// The value of this node. If None, this node just won't be included when displaying. - pub value: TValue, - pub prev: Option, + pub(crate) value: TValue, + pub(crate) prev: Option, } fn extract_critical_path( @@ -95,12 +94,13 @@ impl BuildListenerBackend for DefaultBackend { fn process_node( &mut self, key: NodeKey, - value: Option>, + value: Option, duration: NodeDuration, - dep_keys: impl Iterator, + dep_keys: impl IntoIterator, span_ids: SmallVec<[SpanId; 1]>, ) { let longest_ancestor = dep_keys + .into_iter() .unique() .filter_map(|node_key| { self.num_edges += 1; @@ -110,7 +110,7 @@ impl BuildListenerBackend for DefaultBackend { .max_by_key(|d| d.1); let value = NodeData { - action: value, + action_with_extra_data: value, duration, span_ids, }; @@ -135,7 +135,7 @@ impl BuildListenerBackend for DefaultBackend { fn process_top_level_target( &mut self, _analysis: NodeKey, - _artifacts: impl Iterator, + _artifacts: impl IntoIterator, ) { } @@ -177,6 +177,7 @@ mod tests { }, ); } + #[test] fn empty_path() { let predecessors = CriticalPathMap::new(); diff --git a/app/buck2_build_signals_impl/src/backend/logging.rs b/app/buck2_build_signals_impl/src/backend/logging.rs new file mode 100644 index 0000000000000..ead1eb0559216 --- /dev/null +++ b/app/buck2_build_signals_impl/src/backend/logging.rs @@ -0,0 +1,77 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_build_api::actions::calculation::ActionWithExtraData; +use buck2_build_signals::env::CriticalPathBackendName; +use buck2_build_signals::env::NodeDuration; +use buck2_data::QuickUnstableE2eData; +use buck2_events::dispatch::EventDispatcher; +use buck2_events::span::SpanId; +use serde::Deserialize; +use serde::Serialize; +use smallvec::SmallVec; + +use crate::backend::backend::BuildListenerBackend; +use crate::BuildInfo; +use crate::NodeKey; + +pub(crate) struct LoggingBackend { + events: EventDispatcher, +} + +impl LoggingBackend { + pub(crate) fn new(events: EventDispatcher) -> Self { + Self { events } + } +} + +#[derive(Serialize, Deserialize)] +struct Node { + key: String, + deps: Vec, +} + +impl BuildListenerBackend for LoggingBackend { + fn process_node( + &mut self, + key: NodeKey, + _value: Option, + _duration: NodeDuration, + dep_keys: impl IntoIterator, + _span_ids: SmallVec<[SpanId; 1]>, + ) { + self.events.instant_event(QuickUnstableE2eData { + key: "critical_path_logging_node".to_owned(), + data: serde_json::to_string(&Node { + key: key.to_string(), + deps: dep_keys.into_iter().map(|v| v.to_string()).collect(), + }) + .unwrap(), + }); + } + + fn process_top_level_target( + &mut self, + _analysis: NodeKey, + _artifacts: impl IntoIterator, + ) { + } + + fn finish(self) -> anyhow::Result { + Ok(BuildInfo { + critical_path: Vec::new(), + num_nodes: 0, + num_edges: 0, + }) + } + + fn name() -> CriticalPathBackendName { + CriticalPathBackendName::Logging + } +} diff --git a/app/buck2_build_signals_impl/src/backend/longest_path_graph.rs b/app/buck2_build_signals_impl/src/backend/longest_path_graph.rs index b5d1b7217c912..737a13cbef9cf 100644 --- a/app/buck2_build_signals_impl/src/backend/longest_path_graph.rs +++ b/app/buck2_build_signals_impl/src/backend/longest_path_graph.rs @@ -7,13 +7,12 @@ * of this source tree. */ -use std::sync::Arc; use std::time::Duration; use anyhow::Context as _; -use buck2_build_api::actions::RegisteredAction; -use buck2_build_signals::CriticalPathBackendName; -use buck2_build_signals::NodeDuration; +use buck2_build_api::actions::calculation::ActionWithExtraData; +use buck2_build_signals::env::CriticalPathBackendName; +use buck2_build_signals::env::NodeDuration; use buck2_core::soft_error; use buck2_critical_path::compute_critical_path_potentials; use buck2_critical_path::GraphBuilder; @@ -54,9 +53,9 @@ impl BuildListenerBackend for LongestPathGraphBackend { fn process_node( &mut self, key: NodeKey, - action: Option>, + action_with_extra_data: Option, duration: NodeDuration, - dep_keys: impl Iterator, + dep_keys: impl IntoIterator, span_ids: SmallVec<[SpanId; 1]>, ) { let builder = match self.builder.as_mut() { @@ -68,7 +67,7 @@ impl BuildListenerBackend for LongestPathGraphBackend { key, dep_keys, NodeData { - action, + action_with_extra_data, duration, span_ids, }, @@ -91,11 +90,11 @@ impl BuildListenerBackend for LongestPathGraphBackend { fn process_top_level_target( &mut self, analysis: NodeKey, - artifacts: impl Iterator, + artifacts: impl IntoIterator, ) { self.top_level_analysis.push(VisibilityEdge { node: analysis, - makes_visible: artifacts.collect(), + makes_visible: artifacts.into_iter().collect(), }) } @@ -188,7 +187,7 @@ impl BuildListenerBackend for LongestPathGraphBackend { let data = std::mem::replace( &mut data[vertex_idx], NodeData { - action: None, + action_with_extra_data: None, duration: NodeDuration::zero(), span_ids: Default::default(), }, diff --git a/app/buck2_build_signals_impl/src/backend/mod.rs b/app/buck2_build_signals_impl/src/backend/mod.rs deleted file mode 100644 index 34de68bc0dcd3..0000000000000 --- a/app/buck2_build_signals_impl/src/backend/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![allow(clippy::module_inception)] - -pub mod backend; -pub mod default; -pub mod longest_path_graph; diff --git a/app/buck2_build_signals_impl/src/lib.rs b/app/buck2_build_signals_impl/src/lib.rs index c36ae5581a4af..a3ebd6fcd8259 100644 --- a/app/buck2_build_signals_impl/src/lib.rs +++ b/app/buck2_build_signals_impl/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + use std::any::Any; use std::collections::HashMap; use std::fmt; @@ -20,26 +22,23 @@ use async_trait::async_trait; use buck2_analysis::analysis::calculation::AnalysisKey; use buck2_analysis::analysis::calculation::AnalysisKeyActivationData; use buck2_artifact::artifact::build_artifact::BuildArtifact; +use buck2_build_api::actions::calculation::ActionWithExtraData; use buck2_build_api::actions::calculation::BuildKey; use buck2_build_api::actions::calculation::BuildKeyActivationData; -use buck2_build_api::actions::RegisteredAction; use buck2_build_api::artifact_groups::calculation::EnsureProjectedArtifactKey; use buck2_build_api::artifact_groups::calculation::EnsureTransitiveSetProjectionKey; -use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::artifact_groups::ResolvedArtifactGroup; +use buck2_build_api::artifact_groups::ResolvedArtifactGroupBuildSignalsKey; use buck2_build_api::build_signals::BuildSignals; use buck2_build_api::build_signals::BuildSignalsInstaller; use buck2_build_api::build_signals::CREATE_BUILD_SIGNALS; -use buck2_build_api::deferred::calculation::DeferredCompute; -use buck2_build_api::deferred::calculation::DeferredResolve; -use buck2_build_signals::BuildSignalsContext; -use buck2_build_signals::CriticalPathBackendName; -use buck2_build_signals::DeferredBuildSignals; -use buck2_build_signals::FinishBuildSignals; -use buck2_build_signals::NodeDuration; +use buck2_build_signals::env::BuildSignalsContext; +use buck2_build_signals::env::CriticalPathBackendName; +use buck2_build_signals::env::DeferredBuildSignals; +use buck2_build_signals::env::FinishBuildSignals; +use buck2_build_signals::env::NodeDuration; +use buck2_build_signals::node_key::BuildSignalsNodeKey; use buck2_common::package_listing::dice::PackageListingKey; use buck2_common::package_listing::dice::PackageListingKeyActivationData; -use buck2_configured::nodes::calculation::ConfiguredTargetNodeKey; use buck2_core::package::PackageLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_data::ToProtoMessage; @@ -50,11 +49,11 @@ use buck2_events::span::SpanId; use buck2_interpreter_for_build::interpreter::calculation::IntepreterResultsKeyActivationData; use buck2_interpreter_for_build::interpreter::calculation::InterpreterResultsKey; use buck2_node::nodes::eval_result::EvaluationResult; -use derive_more::From; use dice::ActivationData; use dice::ActivationTracker; +use dice::DynKey; use dupe::Dupe; -use dupe::OptionDupedExt; +use gazebo::prelude::SliceExt; use itertools::Itertools; use smallvec::SmallVec; use static_assertions::assert_eq_size; @@ -66,26 +65,27 @@ use tokio_stream::StreamExt; use crate::backend::backend::BuildListenerBackend; use crate::backend::default::DefaultBackend; +use crate::backend::logging::LoggingBackend; use crate::backend::longest_path_graph::LongestPathGraphBackend; mod backend; /// A node in our critical path graph. -#[derive(Hash, Eq, PartialEq, Clone, Dupe, Debug, From)] +#[derive(Hash, Eq, PartialEq, Clone, Dupe, Debug)] enum NodeKey { // Those are DICE keys. BuildKey(BuildKey), AnalysisKey(AnalysisKey), EnsureProjectedArtifactKey(EnsureProjectedArtifactKey), EnsureTransitiveSetProjectionKey(EnsureTransitiveSetProjectionKey), - DeferredCompute(DeferredCompute), - DeferredResolve(DeferredResolve), - ConfiguredTargetNodeKey(ConfiguredTargetNodeKey), InterpreterResultsKey(InterpreterResultsKey), PackageListingKey(PackageListingKey), // This one is not a DICE key. Materialization(BuildArtifact), + + // Dynamically-typed. + Dyn(&'static str, BuildSignalsNodeKey), } // Explain the sizeof this struct (and avoid regressing it since we store it in the longest path @@ -95,16 +95,13 @@ assert_eq_size!(BuildKey, [usize; 4]); assert_eq_size!(AnalysisKey, [usize; 2]); assert_eq_size!(EnsureTransitiveSetProjectionKey, [usize; 5]); assert_eq_size!(EnsureProjectedArtifactKey, [usize; 7]); -assert_eq_size!(DeferredCompute, [usize; 4]); -assert_eq_size!(DeferredResolve, [usize; 4]); -assert_eq_size!(ConfiguredTargetNodeKey, [usize; 2]); assert_eq_size!(InterpreterResultsKey, [usize; 1]); assert_eq_size!(PackageListingKey, [usize; 1]); assert_eq_size!(BuildArtifact, [usize; 6]); assert_eq_size!(NodeKey, [usize; 7]); impl NodeKey { - fn from_any(key: &dyn Any) -> Option { + fn from_dyn_key(key: &DynKey) -> Option { let key = if let Some(key) = key.downcast_ref::() { Self::BuildKey(key.dupe()) } else if let Some(key) = key.downcast_ref::() { @@ -113,16 +110,12 @@ impl NodeKey { Self::EnsureProjectedArtifactKey(key.dupe()) } else if let Some(key) = key.downcast_ref::() { Self::EnsureTransitiveSetProjectionKey(key.dupe()) - } else if let Some(key) = key.downcast_ref::() { - Self::DeferredCompute(key.dupe()) - } else if let Some(key) = key.downcast_ref::() { - Self::DeferredResolve(key.dupe()) - } else if let Some(key) = key.downcast_ref::() { - Self::ConfiguredTargetNodeKey(key.dupe()) } else if let Some(key) = key.downcast_ref::() { Self::InterpreterResultsKey(key.dupe()) } else if let Some(key) = key.downcast_ref::() { Self::PackageListingKey(key.dupe()) + } else if let Some(node_key) = key.request_value::() { + Self::Dyn(key.key_type_name(), node_key) } else { return None; }; @@ -140,25 +133,23 @@ impl fmt::Display for NodeKey { Self::EnsureTransitiveSetProjectionKey(k) => { write!(f, "EnsureTransitiveSetProjectionKey({})", k) } - Self::DeferredCompute(k) => write!(f, "DeferredCompute({})", k), - Self::DeferredResolve(k) => write!(f, "DeferredResolve({})", k), - Self::ConfiguredTargetNodeKey(k) => write!(f, "ConfiguredTargetNodeKey({})", k), Self::InterpreterResultsKey(k) => write!(f, "InterpreterResultsKey({})", k), Self::PackageListingKey(k) => write!(f, "PackageListingKey({})", k), Self::Materialization(k) => write!(f, "Materialization({})", k), + Self::Dyn(name, k) => write!(f, "{name}({k})"), } } } struct TopLevelTargetSignal { - pub label: ConfiguredTargetLabel, - pub artifacts: Vec, + pub(crate) label: ConfiguredTargetLabel, + pub(crate) artifacts: Vec, } struct FinalMaterializationSignal { - pub artifact: BuildArtifact, - pub duration: NodeDuration, - pub span_id: Option, + pub(crate) artifact: BuildArtifact, + pub(crate) duration: NodeDuration, + pub(crate) span_id: Option, } /* These signals are distinct from the main Buck event bus because some @@ -166,7 +157,6 @@ struct FinalMaterializationSignal { * entire build graph isn't feasible - therefore, we have these signals * with an unserializable but lightweight handle on a RegisteredAction. */ -#[derive(From)] enum BuildSignal { Evaluation(Evaluation), TopLevelTarget(TopLevelTargetSignal), @@ -175,7 +165,7 @@ enum BuildSignal { } /// Data for a BuildSignal that is the result of a DICE key evaluation. -pub struct Evaluation { +pub(crate) struct Evaluation { /// The key we evaluated. key: NodeKey, /// The duration. By default this'll be zero, unless activation data says otherwise. @@ -189,22 +179,30 @@ pub struct Evaluation { // now) to have them not tied to the right variant. /// The RegisteredAction that corresponds to this Evaluation (this will only be present for /// NodeKey::BuildKey). - action: Option>, + action_with_extra_data: Option, /// The Load result that corresponds to this Evaluation (this will only be pesent for /// InterpreterResultsKey). load_result: Option>, } -pub struct BuildSignalSender { +#[derive(Clone)] +pub(crate) struct BuildSignalSender { sender: UnboundedSender, } impl BuildSignals for BuildSignalSender { - fn top_level_target(&self, label: ConfiguredTargetLabel, artifacts: Vec) { + fn top_level_target( + &self, + label: ConfiguredTargetLabel, + artifacts: Vec, + ) { let _ignored = self .sender - .send(TopLevelTargetSignal { label, artifacts }.into()); + .send(BuildSignal::TopLevelTarget(TopLevelTargetSignal { + label, + artifacts, + })); } fn final_materialization( @@ -213,14 +211,13 @@ impl BuildSignals for BuildSignalSender { duration: NodeDuration, span_id: Option, ) { - let _ignored = self.sender.send( + let _ignored = self.sender.send(BuildSignal::FinalMaterialization( FinalMaterializationSignal { artifact, duration, span_id, - } - .into(), - ); + }, + )); } } @@ -230,20 +227,20 @@ impl ActivationTracker for BuildSignalSender { /// (if any). fn key_activated( &self, - key: &dyn Any, - deps: &mut dyn Iterator, + key: &DynKey, + deps: &mut dyn Iterator, activation_data: ActivationData, ) { - let key = match NodeKey::from_any(key) { + let key = match NodeKey::from_dyn_key(key) { Some(key) => key, None => return, }; let mut signal = Evaluation { key, - action: None, + action_with_extra_data: None, duration: NodeDuration::zero(), - dep_keys: deps.into_iter().filter_map(NodeKey::from_any).collect(), + dep_keys: deps.into_iter().filter_map(NodeKey::from_dyn_key).collect(), spans: Default::default(), load_result: None, }; @@ -262,12 +259,12 @@ impl ActivationTracker for BuildSignalSender { if let ActivationData::Evaluated(mut activation_data) = activation_data { if let Some(BuildKeyActivationData { - action, + action_with_extra_data, duration, spans, }) = downcast_and_take(&mut activation_data) { - signal.action = Some(action); + signal.action_with_extra_data = Some(action_with_extra_data); signal.duration = duration; signal.spans = spans; } else if let Some(AnalysisKeyActivationData { duration, spans }) = @@ -276,6 +273,7 @@ impl ActivationTracker for BuildSignalSender { signal.duration = NodeDuration { user: duration, total: duration, + queue: None, }; signal.spans = spans; } else if let Some(IntepreterResultsKeyActivationData { @@ -287,6 +285,7 @@ impl ActivationTracker for BuildSignalSender { signal.duration = NodeDuration { user: duration, total: duration, + queue: None, }; signal.load_result = result.ok(); @@ -297,16 +296,17 @@ impl ActivationTracker for BuildSignalSender { signal.duration = NodeDuration { user: duration, total: duration, + queue: None, }; signal.spans = spans; } } - let _ignored = self.sender.send(signal.into()); + let _ignored = self.sender.send(BuildSignal::Evaluation(signal)); } } -pub struct DeferredBuildSignalsImpl { +pub(crate) struct DeferredBuildSignalsImpl { sender: Arc, receiver: UnboundedReceiver, } @@ -325,6 +325,12 @@ impl DeferredBuildSignals for DeferredBuildSignalsImpl { CriticalPathBackendName::Default => { start_backend(events, self.receiver, DefaultBackend::new(), ctx) } + CriticalPathBackendName::Logging => start_backend( + events.dupe(), + self.receiver, + LoggingBackend::new(events), + ctx, + ), }; Box::new(FinishBuildSignalsImpl { @@ -334,7 +340,7 @@ impl DeferredBuildSignals for DeferredBuildSignalsImpl { } } -pub struct FinishBuildSignalsImpl { +pub(crate) struct FinishBuildSignalsImpl { sender: Arc, handle: JoinHandle>, } @@ -383,7 +389,7 @@ where } } - pub async fn run_and_log(mut self, ctx: BuildSignalsContext) -> anyhow::Result<()> { + pub(crate) async fn run_and_log(mut self, ctx: BuildSignalsContext) -> anyhow::Result<()> { while let Some(event) = self.receiver.next().await { match event { BuildSignal::Evaluation(eval) => self.process_evaluation(eval), @@ -408,10 +414,11 @@ where let compute_elapsed = now.elapsed(); let meta_entry_data = NodeData { - action: None, + action_with_extra_data: None, duration: NodeDuration { user: Duration::ZERO, total: compute_elapsed, + queue: None, }, span_ids: Default::default(), }; @@ -432,14 +439,27 @@ where // If we have a NodeKey that's an ActionKey we'd expect to have an `action` // in our data (unless we didn't actually run it because of e.g. early // cutoff, in which case omitting it is what we want). - let action = data.action.as_ref()?; + let action_with_extra_data = data.action_with_extra_data.as_ref()?; buck2_data::critical_path_entry2::ActionExecution { owner: Some(owner), name: Some(buck2_data::ActionName { - category: action.category().as_str().to_owned(), - identifier: action.identifier().unwrap_or("").to_owned(), + category: action_with_extra_data + .action + .category() + .as_str() + .to_owned(), + identifier: action_with_extra_data + .action + .identifier() + .unwrap_or("") + .to_owned(), }), + execution_kind: action_with_extra_data.execution_kind.into(), + target_rule_type_name: action_with_extra_data + .target_rule_type_name + .to_owned(), + action_digest: action_with_extra_data.action_digest.to_owned(), } .into() } @@ -466,9 +486,7 @@ where .into(), NodeKey::EnsureProjectedArtifactKey(..) => return None, NodeKey::EnsureTransitiveSetProjectionKey(..) => return None, - NodeKey::DeferredCompute(..) => return None, - NodeKey::DeferredResolve(..) => return None, - NodeKey::ConfiguredTargetNodeKey(..) => return None, + NodeKey::Dyn(_, d) => d.critical_path_entry_proto()?, }; Some((entry, data, potential_improvement)) @@ -483,6 +501,7 @@ where .collect(), duration: Some(data.duration.critical_path_duration().try_into()?), user_duration: Some(data.duration.user.try_into()?), + queue_duration: data.duration.queue.map(|d| d.try_into()).transpose()?, total_duration: Some(data.duration.total.try_into()?), potential_improvement_duration: potential_improvement .map(|p| p.try_into()) @@ -513,7 +532,7 @@ where self.backend.process_node( evaluation.key, - evaluation.action, + evaluation.action_with_extra_data, evaluation.duration, evaluation.dep_keys.into_iter(), evaluation.spans, @@ -564,26 +583,14 @@ where &mut self, top_level: TopLevelTargetSignal, ) -> Result<(), anyhow::Error> { - let artifact_keys = - top_level - .artifacts - .into_iter() - .filter_map(|dep| match dep.assert_resolved() { - ResolvedArtifactGroup::Artifact(artifact) => artifact - .action_key() - .duped() - .map(BuildKey) - .map(NodeKey::BuildKey), - ResolvedArtifactGroup::TransitiveSetProjection(key) => { - Some(NodeKey::EnsureTransitiveSetProjectionKey( - EnsureTransitiveSetProjectionKey(key.dupe()), - )) - } - }); - self.backend.process_top_level_target( NodeKey::AnalysisKey(AnalysisKey(top_level.label)), - artifact_keys, + top_level.artifacts.map(|k| match k { + ResolvedArtifactGroupBuildSignalsKey::BuildKey(b) => NodeKey::BuildKey(b.clone()), + ResolvedArtifactGroupBuildSignalsKey::EnsureTransitiveSetProjectionKey(e) => { + NodeKey::EnsureTransitiveSetProjectionKey(e.clone()) + } + }), ); Ok(()) @@ -607,7 +614,7 @@ where } } -pub struct BuildInfo { +pub(crate) struct BuildInfo { // Node, its data, and its potential for improvement critical_path: Vec<(NodeKey, NodeData, Option)>, num_nodes: u64, @@ -616,12 +623,12 @@ pub struct BuildInfo { #[derive(Clone)] struct NodeData { - action: Option>, + action_with_extra_data: Option, duration: NodeDuration, span_ids: SmallVec<[SpanId; 1]>, } -assert_eq_size!(NodeData, [usize; 8]); +assert_eq_size!(NodeData, [usize; 17]); fn create_build_signals() -> (BuildSignalsInstaller, Box) { let (sender, receiver) = tokio::sync::mpsc::unbounded_channel(); diff --git a/app/buck2_bxl/BUCK b/app/buck2_bxl/BUCK index 82440695db706..61c6faee60fb3 100644 --- a/app/buck2_bxl/BUCK +++ b/app/buck2_bxl/BUCK @@ -1,49 +1,46 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") rust_library( name = "buck2_bxl", srcs = glob(["src/**/*.rs"]), - doctests = False, # FIXME test_deps = [ "fbsource//third-party/rust:ctor", "fbsource//third-party/rust:maplit", "//buck2/app/buck2_interpreter:buck2_interpreter", - "//buck2/shed/provider:provider", ], deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-recursion", - "fbsource//third-party/rust:async-scoped", "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:clap-3", - "fbsource//third-party/rust:dashmap", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:either", "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:higher-order-closure", "fbsource//third-party/rust:indexmap", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:num-bigint", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tracing", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_action_impl:buck2_action_impl", "//buck2/app/buck2_analysis:buck2_analysis", "//buck2/app/buck2_artifact:buck2_artifact", "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_common:buck2_common", - "//buck2/app/buck2_configured:buck2_configured", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_interpreter:buck2_interpreter", + "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_profile:buck2_profile", "//buck2/app/buck2_query:buck2_query", @@ -55,7 +52,6 @@ rust_library( "//buck2/gazebo/display_container:display_container", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_map:starlark_map", ], diff --git a/app/buck2_bxl/Cargo.toml b/app/buck2_bxl/Cargo.toml index 765f143294f64..a9b5f19744229 100644 --- a/app/buck2_bxl/Cargo.toml +++ b/app/buck2_bxl/Cargo.toml @@ -1,63 +1,57 @@ [package] +description = "Buck V2 extension language" +edition = "2021" +license = { workspace = true } name = "buck2_bxl" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Buck V2 extension language" [dependencies] +allocative = { workspace = true } anyhow = { workspace = true } async-recursion = { workspace = true } async-trait = { workspace = true } -async-scoped = { workspace = true } +buck2_events = { workspace = true } clap = { workspace = true } -dashmap = { workspace = true } +cmp_any = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } +dice = { workspace = true } +display_container = { workspace = true } +dupe = { workspace = true } either = { workspace = true } futures = { workspace = true } -higher-order-closure = { workspace = true } +gazebo = { workspace = true } indexmap = { workspace = true } itertools = { workspace = true } num-bigint = { workspace = true } -once_cell = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -structopt = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true } - -fbinit = { workspace = true } - -allocative = { workspace = true } -gazebo = { workspace = true } -display_container = { workspace = true } -dupe = { workspace = true } -dice = { workspace = true } -cmp_any = { workspace = true } -buck2_events = { workspace = true } starlark = { workspace = true } starlark_map = { workspace = true } -more_futures = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +buck2_action_impl = { workspace = true } buck2_analysis = { workspace = true } buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } -buck2_configured = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_execute = { workspace = true } +buck2_futures = { workspace = true } buck2_interpreter = { workspace = true } +buck2_interpreter_for_build = { workspace = true } buck2_node = { workspace = true } buck2_profile = { workspace = true } buck2_query = { workspace = true } buck2_query_parser = { workspace = true } buck2_server_ctx = { workspace = true } buck2_util = { workspace = true } -buck2_cli_proto = { workspace = true } [dev-dependencies] -provider = { workspace = true } - ctor = { workspace = true } maplit = { workspace = true } diff --git a/app/buck2_bxl/src/bxl.rs b/app/buck2_bxl/src/bxl.rs new file mode 100644 index 0000000000000..47909c6c8a605 --- /dev/null +++ b/app/buck2_bxl/src/bxl.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod calculation; +pub(crate) mod eval; +pub(crate) mod key; +pub(crate) mod starlark_defs; +pub(crate) mod value_as_starlark_target_label; diff --git a/app/buck2_bxl/src/bxl/calculation.rs b/app/buck2_bxl/src/bxl/calculation.rs index 77aee0c5ab82c..691aa2a62f8ba 100644 --- a/app/buck2_bxl/src/bxl/calculation.rs +++ b/app/buck2_bxl/src/bxl/calculation.rs @@ -13,16 +13,13 @@ use async_trait::async_trait; use buck2_build_api::bxl::calculation::BxlCalculationDyn; use buck2_build_api::bxl::calculation::BxlComputeResult; use buck2_build_api::bxl::calculation::BXL_CALCULATION_IMPL; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; -use buck2_core::base_deferred_key::BaseDeferredKeyDyn; -use buck2_interpreter::dice::starlark_profiler::GetStarlarkProfilerInstrumentation; +use buck2_core::base_deferred_key::BaseDeferredKeyBxl; +use buck2_futures::cancellation::CancellationContext; +use buck2_interpreter::starlark_profiler::mode::StarlarkProfileMode; use dice::DiceComputations; use dice::Key; use dupe::Dupe; use futures::future::FutureExt; -use more_futures::cancellation::CancellationContext; use crate::bxl::eval::eval; use crate::bxl::key::BxlKey; @@ -34,8 +31,8 @@ struct BxlCalculationImpl; impl BxlCalculationDyn for BxlCalculationImpl { async fn eval_bxl( &self, - ctx: &DiceComputations, - bxl: Arc, + ctx: &mut DiceComputations<'_>, + bxl: BaseDeferredKeyBxl, ) -> anyhow::Result { eval_bxl(ctx, BxlKey::from_base_deferred_key_dyn_impl_err(bxl)?).await } @@ -46,17 +43,17 @@ pub(crate) fn init_bxl_calculation_impl() { } pub(crate) async fn eval_bxl( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, bxl: BxlKey, ) -> anyhow::Result { ctx.compute(&internal::BxlComputeKey(bxl)) .await? - .unshared_error() + .map_err(anyhow::Error::from) } #[async_trait] impl Key for internal::BxlComputeKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, @@ -65,17 +62,13 @@ impl Key for internal::BxlComputeKey { ) -> Self::Value { let key = self.0.dupe(); - let profiler = ctx.get_profile_mode_for_intermediate_analysis().await?; - cancellation .with_structured_cancellation(|observer| { async move { - eval(ctx, key, profiler, observer).await.shared_error().map( - |(result, _, materializations)| BxlComputeResult { - bxl_result: Arc::new(result), - materializations, - }, - ) + eval(ctx, key, StarlarkProfileMode::None, observer) + .await + .map_err(buck2_error::Error::from) + .map(|(result, _)| BxlComputeResult(Arc::new(result))) } .boxed() }) @@ -97,8 +90,3 @@ mod internal { #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] pub(crate) struct BxlComputeKey(pub(crate) BxlKey); } - -#[cfg(test)] -pub(crate) mod testing { - pub(crate) use crate::bxl::calculation::internal::BxlComputeKey; -} diff --git a/app/buck2_bxl/src/bxl/deferred.rs b/app/buck2_bxl/src/bxl/deferred.rs index b4263f3dd58ea..9bf0eb39d4a9d 100644 --- a/app/buck2_bxl/src/bxl/deferred.rs +++ b/app/buck2_bxl/src/bxl/deferred.rs @@ -15,27 +15,27 @@ mod tests { use std::sync::Arc; use allocative::Allocative; - use async_trait::async_trait; + use buck2_artifact::deferred::key::DeferredHolderKey; + use buck2_build_api::actions::execute::dice_data::set_fallback_executor_config; use buck2_build_api::bxl::calculation::BxlComputeResult; use buck2_build_api::bxl::result::BxlResult; use buck2_build_api::bxl::types::BxlFunctionLabel; use buck2_build_api::deferred::calculation::DeferredCalculation; - use buck2_build_api::deferred::types::BaseKey; use buck2_build_api::deferred::types::Deferred; use buck2_build_api::deferred::types::DeferredCtx; use buck2_build_api::deferred::types::DeferredInput; + use buck2_build_api::deferred::types::DeferredInputsRef; + use buck2_build_api::deferred::types::DeferredOutput; use buck2_build_api::deferred::types::DeferredRegistry; - use buck2_build_api::deferred::types::DeferredTable; use buck2_build_api::deferred::types::DeferredValue; use buck2_common::dice::data::testing::SetTestingIoProvider; - use buck2_common::result::ToSharedResultExt; + use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::execution_types::executor_config::CommandExecutorConfig; use buck2_core::fs::project::ProjectRootTemp; use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::SetDigestConfig; - use buck2_execute::execute::dice_data::set_fallback_executor_config; use buck2_interpreter::paths::bxl::BxlFilePath; use dice::testing::DiceBuilder; use dice::DiceComputations; @@ -48,28 +48,32 @@ mod tests { use crate::bxl::eval::mk_stream_cache; use crate::bxl::key::BxlKey; - #[derive(Allocative)] + #[derive(Allocative, Clone, Debug, Eq, PartialEq)] + struct FakeDeferredOutput(usize); + + impl DeferredOutput for FakeDeferredOutput {} + + #[derive(Debug, Allocative)] struct FakeDeferred(usize, IndexSet, Arc); impl provider::Provider for FakeDeferred { fn provide<'a>(&'a self, _demand: &mut provider::Demand<'a>) {} } - #[async_trait] impl Deferred for FakeDeferred { - type Output = usize; + type Output = FakeDeferredOutput; - fn inputs(&self) -> &IndexSet { - &self.1 + fn inputs(&self) -> DeferredInputsRef<'_> { + DeferredInputsRef::IndexSet(&self.1) } async fn execute( &self, _ctx: &mut dyn DeferredCtx, - _dice: &mut DiceComputations, + _dice: &mut DiceComputations<'_>, ) -> anyhow::Result> { self.2.store(true, Ordering::SeqCst); - Ok(DeferredValue::Ready(self.0)) + Ok(DeferredValue::Ready(FakeDeferredOutput(self.0))) } } @@ -81,22 +85,23 @@ mod tests { name: "foo".to_owned(), }, Arc::new(OrderedMap::new()), - None, + false, + GlobalCfgOptions::default(), ); - let mut deferred = DeferredRegistry::new(BaseKey::Base(BaseDeferredKey::BxlLabel( - bxl.dupe().into_base_deferred_key_dyn_impl( + let mut deferred = DeferredRegistry::new(DeferredHolderKey::Base( + BaseDeferredKey::BxlLabel(bxl.dupe().into_base_deferred_key_dyn_impl( ExecutionPlatformResolution::unspecified(), Vec::new(), Vec::new(), - ), - ))); + )), + )); let executed0 = Arc::new(AtomicBool::new(false)); let executed1 = Arc::new(AtomicBool::new(false)); let data0 = deferred.defer(FakeDeferred(1, IndexSet::new(), executed0.dupe())); let data1 = deferred.defer(FakeDeferred(5, IndexSet::new(), executed1.dupe())); - let deferred_result = DeferredTable::new(deferred.take_result()?); + let (deferred_result, analysis_values) = deferred.take_result()?; let fs = ProjectRootTemp::new()?; let dice = DiceBuilder::new() @@ -106,38 +111,35 @@ mod tests { }) .mock_and_return( BxlComputeKey(bxl.dupe()), - anyhow::Ok(BxlComputeResult { - bxl_result: Arc::new(BxlResult::BuildsArtifacts { - output_loc: mk_stream_cache("test", &bxl), - error_loc: mk_stream_cache("errortest", &bxl), - built: vec![], - artifacts: vec![], - deferred: deferred_result, - }), - materializations: Arc::new(Default::default()), - }) - .shared_error(), + buck2_error::Ok(BxlComputeResult(Arc::new(BxlResult::BuildsArtifacts { + output_loc: mk_stream_cache("test", &bxl), + error_loc: mk_stream_cache("errortest", &bxl), + built: vec![], + artifacts: vec![], + deferred: deferred_result, + analysis_values, + }))), ); let mut dice_data = UserComputationData::new(); set_fallback_executor_config(&mut dice_data.data, CommandExecutorConfig::testing_local()); - let dice = dice.build(dice_data)?.commit().await; + let mut dice = dice.build(dice_data)?.commit().await; let deferred_result = dice.compute_deferred_data(&data0).await?; - assert_eq!(*deferred_result, 1); + assert_eq!(deferred_result.0, 1); assert!(executed0.load(Ordering::SeqCst)); // we should cache deferred execution executed0.store(false, Ordering::SeqCst); let deferred_result = dice.compute_deferred_data(&data0).await?; - assert_eq!(*deferred_result, 1); + assert_eq!(deferred_result.0, 1); assert!(!executed0.load(Ordering::SeqCst)); let deferred_result = dice.compute_deferred_data(&data1).await?; - assert_eq!(*deferred_result, 5); + assert_eq!(deferred_result.0, 5); assert!(executed1.load(Ordering::SeqCst)); // we should cache deferred execution executed1.store(false, Ordering::SeqCst); - assert_eq!(*deferred_result, 5); + assert_eq!(deferred_result.0, 5); assert!(!executed1.load(Ordering::SeqCst)); Ok(()) diff --git a/app/buck2_bxl/src/bxl/eval.rs b/app/buck2_bxl/src/bxl/eval.rs index 227c991be1e9e..241732bab5946 100644 --- a/app/buck2_bxl/src/bxl/eval.rs +++ b/app/buck2_bxl/src/bxl/eval.rs @@ -8,73 +8,73 @@ */ use std::cell::RefCell; -use std::sync::Arc; +use std::rc::Rc; use anyhow::Context; -use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::bxl::result::BxlResult; use buck2_build_api::bxl::types::BxlFunctionLabel; -use buck2_build_api::deferred::types::DeferredTable; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::data::HasIoProvider; use buck2_common::events::HasEvents; +use buck2_common::scope::scope_and_collect_with_dice; use buck2_common::target_aliases::BuckConfigTargetAliasResolver; -use buck2_common::target_aliases::HasTargetAliasResolver; -use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; -use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::package::PackageLabel; use buck2_data::BxlExecutionEnd; use buck2_data::BxlExecutionStart; +use buck2_data::StarlarkFailNoStacktrace; +use buck2_error::starlark_error::from_starlark_with_options; use buck2_events::dispatch::console_message; +use buck2_events::dispatch::get_dispatcher; use buck2_events::dispatch::with_dispatcher; -use buck2_events::dispatch::with_dispatcher_async; +use buck2_events::dispatch::EventDispatcher; +use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::HasDigestConfig; +use buck2_futures::cancellable_future::CancellationObserver; use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; use buck2_interpreter::factory::StarlarkEvaluatorProvider; use buck2_interpreter::file_loader::LoadedModule; use buck2_interpreter::load_module::InterpreterCalculation; use buck2_interpreter::paths::module::StarlarkModulePath; use buck2_interpreter::print_handler::EventDispatcherPrintHandler; -use buck2_interpreter::starlark_profiler::StarlarkProfileDataAndStats; -use buck2_interpreter::starlark_profiler::StarlarkProfileModeOrInstrumentation; -use buck2_interpreter::starlark_profiler::StarlarkProfiler; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; -use clap::ErrorKind; -use dashmap::DashMap; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use buck2_interpreter::starlark_profiler::data::ProfileTarget; +use buck2_interpreter::starlark_profiler::data::StarlarkProfileDataAndStats; +use buck2_interpreter::starlark_profiler::mode::StarlarkProfileMode; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfiler; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; +use clap::error::ErrorKind; use dice::DiceComputations; use dice::DiceTransaction; use dupe::Dupe; use itertools::Itertools; -use more_futures::cancellable_future::CancellationObserver; use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::values::structs::AllocStruct; +use starlark::values::structs::StructRef; use starlark::values::OwnedFrozenValueTyped; -use starlark::values::Value; +use starlark::values::UnpackValue; +use starlark::values::ValueOfUnchecked; use starlark::values::ValueTyped; use starlark_map::ordered_map::OrderedMap; -use thiserror::Error; use crate::bxl::key::BxlKey; use crate::bxl::starlark_defs::bxl_function::FrozenBxlFunction; use crate::bxl::starlark_defs::cli_args::CliArgValue; +use crate::bxl::starlark_defs::context::actions::BxlExecutionResolution; use crate::bxl::starlark_defs::context::starlark_async::BxlSafeDiceComputations; use crate::bxl::starlark_defs::context::BxlContext; +use crate::bxl::starlark_defs::context::BxlContextCoreData; +use crate::bxl::starlark_defs::eval_extra::BxlEvalExtra; +use crate::bxl::starlark_defs::functions::BxlErrorWithoutStacktrace; pub(crate) async fn eval( - ctx: &mut DiceComputations, + ctx: &mut DiceComputations<'_>, key: BxlKey, - profile_mode_or_instrumentation: StarlarkProfileModeOrInstrumentation, + profile_mode_or_instrumentation: StarlarkProfileMode, liveness: CancellationObserver, -) -> anyhow::Result<( - BxlResult, - Option, - Arc>, -)> { +) -> anyhow::Result<(BxlResult, Option)> { // Note: because we use `block_in_place`, that will prevent the inner future from being polled // and yielded. So, for cancellation observers to work properly within the dice cancellable // future context, we need the future that it's attached to the cancellation context can @@ -91,201 +91,15 @@ pub(crate) async fn eval( // on the scope will be dropped at the earliest await point. If we are within the blocking // section of bxl, the cancellation observer will be notified and cause the blocking calls // to terminate. - async_scoped::TokioScope::scope_and_collect(|s| { + scope_and_collect_with_dice(ctx, |ctx, s| { s.spawn_cancellable( - with_dispatcher_async(dispatcher.dupe(), async move { - let bxl_module = ctx - .get_loaded_module(StarlarkModulePath::BxlFile(&key.label().bxl_path)) - .await?; - - let cell_resolver = ctx.get_cell_resolver().await?; - - let bxl_cell = cell_resolver - .get(key.label().bxl_path.cell()) - .with_context(|| { - format!("Cell does not exist: `{}`", key.label().bxl_path.cell()) - })? - .dupe(); - - let target_alias_resolver = ctx - .target_alias_resolver_for_cell(key.label().bxl_path.cell()) - .await?; - - let project_fs = ctx.global_data().get_io_provider().project_root().dupe(); - let artifact_fs = ctx.get_artifact_fs().await?; - - let digest_config = ctx.global_data().get_digest_config(); - - // The bxl function may trigger async operations like builds, analysis, parsing etc, but those - // will be blocking calls so that starlark can remain synchronous. - // So indicate to tokio that this may block in place to avoid starvation. Ideally we use - // spawn_blocking but that requires a static lifetime. There is no `join`s of multiple - // futures that requires work to be done on the current thread, so using block_in_place - // should have no noticeable different compared to spawn_blocking - - let output_stream = mk_stream_cache("output", &key); - let file_path = artifact_fs - .buck_out_path_resolver() - .resolve_gen(&output_stream); - - let file = RefCell::new(Box::new( - project_fs - .create_file(&file_path, false) - .context("Failed to create output cache for BXL")?, - )); - - let error_stream = mk_stream_cache("error", &key); - let error_file_path = artifact_fs - .buck_out_path_resolver() - .resolve_gen(&error_stream); - - let error_file = RefCell::new(Box::new( - project_fs - .create_file(&error_file_path, false) - .context("Failed to create error cache for BXL")?, - )); - - let print = EventDispatcherPrintHandler(dispatcher.clone()); - - let mut profiler_opt = profile_mode_or_instrumentation - .profile_mode() - .map(|profile_mode| StarlarkProfiler::new(profile_mode.dupe(), true)); - - let mut profiler = match &mut profiler_opt { - None => StarlarkProfilerOrInstrumentation::disabled(), - Some(profiler) => StarlarkProfilerOrInstrumentation::for_profiler(profiler), - }; - - let global_target_platform = key.global_target_platform().clone(); - - let (bxl_result, materializations) = with_starlark_eval_provider( - ctx, - &mut profiler, - format!("bxl:{}", key), - move |provider, ctx| { - let env = Module::new(); - - let resolved_args = env.heap().alloc(AllocStruct( - key.cli_args() - .iter() - .map(|(k, v)| (k, v.as_starlark(env.heap()))), - )); - - let mut eval = provider.make(&env)?; - let bxl_function_name = key.label().name.clone(); - let frozen_callable = get_bxl_callable(key.label(), &bxl_module)?; - eval.set_print_handler(&print); - - let bxl_dice = BxlSafeDiceComputations::new(ctx, liveness); - - let bxl_ctx = BxlContext::new( - eval.heap(), - key, - resolved_args, - target_alias_resolver, - project_fs, - artifact_fs, - cell_resolver, - bxl_cell.name(), - bxl_dice, - file, - error_file, - digest_config, - global_target_platform, - )?; - - let bxl_ctx = - ValueTyped::::new(env.heap().alloc(bxl_ctx)).unwrap(); - - let result = tokio::task::block_in_place(|| { - with_dispatcher(dispatcher.clone(), || { - dispatcher.clone().span( - BxlExecutionStart { - name: bxl_function_name, - }, - || { - ( - eval_bxl( - &mut eval, - frozen_callable, - bxl_ctx.to_value(), - provider, - ), - BxlExecutionEnd {}, - ) - }, - ) - }) - })?; - if !result.is_none() { - return Err(anyhow::anyhow!(NotAValidReturnType( - result.get_type() - ))); - } - - let (actions, ensured_artifacts, materializations) = - BxlContext::take_state(bxl_ctx)?; - std::mem::drop(eval); - - let (actions_finalizer, ensured_artifacts, materializations) = { - // help rust understand that actions is consumed here. - let actions = actions; - match actions { - Some(registry) => ( - Some(registry.finalize(&env)?), - ensured_artifacts, - materializations, - ), - None => (None, ensured_artifacts, materializations), - } - }; - - let (frozen_module, bxl_result) = match actions_finalizer { - Some(actions_finalizer) => { - // this bxl registered actions, so extract the deferreds from it - let (frozen_module, deferred) = actions_finalizer(env)?; - - let deferred_table = - DeferredTable::new(deferred.take_result()?); - - ( - frozen_module, - BxlResult::new( - output_stream, - error_stream, - ensured_artifacts, - deferred_table, - ), - ) - } - None => { - let frozen_module = env.freeze()?; - - // this bxl did not try to build anything, so we don't have any deferreds - ( - frozen_module, - BxlResult::new( - output_stream, - error_stream, - ensured_artifacts, - DeferredTable::new(Vec::new()), - ), - ) - } - }; - - provider - .visit_frozen_module(Some(&frozen_module)) - .context("Profiler heap visitation failed")?; - - Ok((bxl_result, materializations)) - }, - ) - .await?; - - let profile_data = profiler_opt.map(|p| p.finish()).transpose()?; - Ok((bxl_result, profile_data, materializations)) - }), + eval_bxl_inner( + ctx, + dispatcher, + key, + profile_mode_or_instrumentation, + liveness, + ), || Err(anyhow::anyhow!("cancelled")), ) }) @@ -298,6 +112,186 @@ pub(crate) async fn eval( } } +struct BxlInnerEvaluator { + data: BxlContextCoreData, + module: LoadedModule, + liveness: CancellationObserver, + digest_config: DigestConfig, + dispatcher: EventDispatcher, +} + +impl BxlInnerEvaluator { + fn do_eval<'a>( + self, + provider: &mut dyn StarlarkEvaluatorProvider, + dice: &'a mut DiceComputations, + ) -> anyhow::Result { + let BxlInnerEvaluator { + data, + module, + liveness, + digest_config, + dispatcher, + } = self; + let bxl_dice = BxlSafeDiceComputations::new(dice, liveness); + let bxl_dice = Rc::new(RefCell::new(bxl_dice)); + let data = Rc::new(data); + + let env = Module::new(); + let key = data.key().dupe(); + + let output_stream = mk_stream_cache("output", &key); + let file_path = data + .artifact_fs() + .buck_out_path_resolver() + .resolve_gen(&output_stream); + + let file = Rc::new(RefCell::new( + data.project_fs() + .create_file(&file_path, false) + .context("Failed to create output cache for BXL")?, + )); + + let error_stream = mk_stream_cache("error", &key); + let error_file_path = data + .artifact_fs() + .buck_out_path_resolver() + .resolve_gen(&error_stream); + + let error_file = Rc::new(RefCell::new( + data.project_fs() + .create_file(&error_file_path, false) + .context("Failed to create error cache for BXL")?, + )); + + let (actions, ensured_artifacts) = { + let resolved_args = ValueOfUnchecked::::unpack_value_err( + env.heap().alloc(AllocStruct( + key.cli_args() + .iter() + .map(|(k, v)| (k, v.as_starlark(env.heap()))), + )), + )?; + + let print = EventDispatcherPrintHandler(dispatcher.clone()); + let extra = BxlEvalExtra::new(bxl_dice.dupe(), data.dupe(), error_file.dupe()); + + let (mut eval, _) = provider.make(&env)?; + let bxl_function_name = key.label().name.clone(); + let frozen_callable = get_bxl_callable(key.label(), &module)?; + eval.set_print_handler(&print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); + + eval.extra = Some(&extra); + + let force_print_stacktrace = key.force_print_stacktrace(); + let bxl_ctx = BxlContext::new( + eval.heap(), + data, + resolved_args, + bxl_dice, + file, + error_file, + digest_config, + )?; + + let bxl_ctx = ValueTyped::::new_err(env.heap().alloc(bxl_ctx))?; + + tokio::task::block_in_place(|| { + with_dispatcher(dispatcher.clone(), || { + dispatcher.clone().span( + BxlExecutionStart { + name: bxl_function_name, + }, + || { + ( + eval_bxl( + &mut eval, + frozen_callable, + bxl_ctx, + provider, + force_print_stacktrace, + ), + BxlExecutionEnd {}, + ) + }, + ) + }) + })?; + + BxlContext::take_state(bxl_ctx)? + }; + + let actions_finalizer = actions.finalize(&env)?; + let (frozen_module, recorded_values) = actions_finalizer(env)?; + + let bxl_result = BxlResult::new( + output_stream, + error_stream, + ensured_artifacts, + recorded_values, + ); + + provider + .visit_frozen_module(Some(&frozen_module)) + .context("Profiler heap visitation failed")?; + + Ok(bxl_result) + } +} + +async fn eval_bxl_inner( + ctx: &mut DiceComputations<'_>, + dispatcher: EventDispatcher, + key: BxlKey, + profile_mode_or_instrumentation: StarlarkProfileMode, + liveness: CancellationObserver, +) -> anyhow::Result<(BxlResult, Option)> { + let bxl_module = ctx + .get_loaded_module(StarlarkModulePath::BxlFile(&key.label().bxl_path)) + .await?; + + let digest_config = ctx.global_data().get_digest_config(); + let core_data = BxlContextCoreData::new(key.dupe(), ctx).await?; + + // The bxl function may trigger async operations like builds, analysis, parsing etc, but those + // will be blocking calls so that starlark can remain synchronous. + // So indicate to tokio that this may block in place to avoid starvation. Ideally we use + // spawn_blocking but that requires a static lifetime. There is no `join`s of multiple + // futures that requires work to be done on the current thread, so using block_in_place + // should have no noticeable different compared to spawn_blocking + + let mut profiler_opt = profile_mode_or_instrumentation + .profile_mode() + .map(|profile_mode| StarlarkProfiler::new(profile_mode.dupe(), true, ProfileTarget::Bxl)); + + let mut profiler = match &mut profiler_opt { + None => StarlarkProfilerOpt::disabled(), + Some(profiler) => StarlarkProfilerOpt::for_profiler(profiler), + }; + + let starlark_eval_description = format!("bxl:{}", core_data.key()); + + let eval_ctx = BxlInnerEvaluator { + data: core_data, + module: bxl_module, + liveness, + digest_config, + dispatcher, + }; + + let bxl_result = with_starlark_eval_provider( + ctx, + &mut profiler, + starlark_eval_description, + move |provider, ctx| eval_ctx.do_eval(provider, ctx), + ) + .await?; + + let profile_data = profiler_opt.map(|p| p.finish()).transpose()?; + Ok((bxl_result, profile_data)) +} + // We use a file as our output/error stream cache. The file is associated with the `BxlDynamicKey` (created from `BxlKey`), // which is super important, as it HAS to be the SAME as the DiceKey so that DICE is keeping the output file // cache up to date. `BxlDynamicKey` requires an execution platform. We set the execution platform to be unspecified here @@ -305,11 +299,8 @@ pub(crate) async fn eval( // are empty here for the same reason. pub(crate) fn mk_stream_cache(stream_type: &str, key: &BxlKey) -> BuckOutPath { BuckOutPath::new( - BaseDeferredKey::BxlLabel(key.dupe().into_base_deferred_key_dyn_impl( - ExecutionPlatformResolution::unspecified(), - Vec::new(), - Vec::new(), - )), + key.dupe() + .into_base_deferred_key(BxlExecutionResolution::unspecified()), ForwardRelativePathBuf::unchecked_new(format!( "__bxl_internal__/{}stream_cache", stream_type @@ -317,28 +308,64 @@ pub(crate) fn mk_stream_cache(stream_type: &str, key: &BxlKey) -> BuckOutPath { ) } -fn eval_bxl<'a>( - eval: &mut Evaluator<'a, '_>, +fn eval_bxl<'v>( + eval: &mut Evaluator<'v, '_, '_>, frozen_callable: OwnedFrozenValueTyped, - ctx: Value<'a>, + ctx: ValueTyped<'v, BxlContext<'v>>, provider: &mut dyn StarlarkEvaluatorProvider, -) -> anyhow::Result> { + force_print_stacktrace: bool, +) -> anyhow::Result<()> { let bxl_impl = frozen_callable.implementation(); - let result = eval.eval_function(bxl_impl.to_value(), &[ctx], &[]); + let result = eval.eval_function(bxl_impl.to_value(), &[ctx.to_value()], &[]); provider .evaluation_complete(eval) .context("Profiler finalization failed")?; - result + + let e = match result { + Ok(v) => { + if !v.is_none() { + return Err(anyhow::anyhow!(NotAValidReturnType(v.get_type()))); + } + + return Ok(()); + } + Err(e) => e, + }; + + let should_skip_backtrace = !force_print_stacktrace + && match e.kind() { + starlark::ErrorKind::Native(e) => { + e.downcast_ref::().is_some() + } + _ => false, + }; + + let e = from_starlark_with_options( + e, + buck2_error::starlark_error::NativeErrorHandling::Unknown, + should_skip_backtrace, + ); + if should_skip_backtrace { + let dispatcher = get_dispatcher(); + dispatcher.instant_event(StarlarkFailNoStacktrace { + trace: format!("{}", e), + }); + dispatcher + .console_message("Re-run the script with `-v5` to show the full stacktrace".to_owned()); + } + + Err(e.into()) } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] #[error("Expected {0} to be a bxl function, was a {1}")] +#[allow(dead_code)] struct NotABxlFunction(String, &'static str); -pub(crate) fn get_bxl_callable<'a>( +pub(crate) fn get_bxl_callable( spec: &BxlFunctionLabel, - bxl_module: &'a LoadedModule, + bxl_module: &LoadedModule, ) -> anyhow::Result> { let callable = bxl_module.env().get_any_visibility(&spec.name)?.0; @@ -348,6 +375,7 @@ pub(crate) fn get_bxl_callable<'a>( pub(crate) struct CliResolutionCtx<'a> { pub(crate) target_alias_resolver: BuckConfigTargetAliasResolver, pub(crate) cell_resolver: CellResolver, + pub(crate) cell_alias_resolver: CellAliasResolver, pub(crate) relative_dir: PackageLabel, pub(crate) dice: &'a DiceTransaction, } @@ -389,6 +417,6 @@ pub(crate) async fn resolve_cli_args<'a>( } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] #[error("Expected `NoneType` to be returned from bxl. Got return value `{0}`")] struct NotAValidReturnType(&'static str); diff --git a/app/buck2_bxl/src/bxl/key.rs b/app/buck2_bxl/src/bxl/key.rs index 657930e536c6f..dafd39ed974b3 100644 --- a/app/buck2_bxl/src/bxl/key.rs +++ b/app/buck2_bxl/src/bxl/key.rs @@ -14,23 +14,25 @@ use std::hash::Hasher; use std::sync::Arc; use allocative::Allocative; -use anyhow::Context; use buck2_build_api::bxl::types::BxlFunctionLabel; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::base_deferred_key::BaseDeferredKeyBxl; use buck2_core::base_deferred_key::BaseDeferredKeyDyn; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; use buck2_data::action_key_owner::BaseDeferredKeyProto; use buck2_data::ToProtoMessage; +use buck2_error::BuckErrorContext; use cmp_any::PartialEqAny; use dupe::Dupe; use starlark_map::ordered_map::OrderedMap; use crate::bxl::starlark_defs::cli_args::CliArgValue; +use crate::bxl::starlark_defs::context::actions::BxlExecutionResolution; #[derive( Clone, @@ -50,12 +52,14 @@ impl BxlKey { pub(crate) fn new( spec: BxlFunctionLabel, bxl_args: Arc>, - global_target_platform: Option, + force_print_stacktrace: bool, + global_cfg_options: GlobalCfgOptions, ) -> Self { Self(Arc::new(BxlKeyData { spec, bxl_args, - global_target_platform, + force_print_stacktrace, + global_cfg_options, })) } @@ -67,30 +71,39 @@ impl BxlKey { &self.0.bxl_args } - pub(crate) fn into_base_deferred_key_dyn_impl( + fn into_base_deferred_key_dyn_impl( self, - execution_platform_resolution: ExecutionPlatformResolution, - exec_deps: Vec, - toolchains: Vec, + execution_resolution: BxlExecutionResolution, ) -> Arc { Arc::new(BxlDynamicKeyData { key: self.0, - execution_platform_resolution, - exec_deps, - toolchains, + execution_resolution, }) } + pub(crate) fn into_base_deferred_key( + self, + execution_resolution: BxlExecutionResolution, + ) -> BaseDeferredKey { + BaseDeferredKey::BxlLabel(BaseDeferredKeyBxl( + self.into_base_deferred_key_dyn_impl(execution_resolution), + )) + } + pub(crate) fn from_base_deferred_key_dyn_impl_err( - key: Arc, + key: BaseDeferredKeyBxl, ) -> anyhow::Result { BxlDynamicKey::from_base_deferred_key_dyn_impl(key) .map(|k| BxlKey(k.0.key.dupe())) - .context("Not BxlKey (internal error)") + .internal_error_anyhow("Not BxlKey") + } + + pub(crate) fn global_cfg_options(&self) -> &GlobalCfgOptions { + &self.0.global_cfg_options } - pub(crate) fn global_target_platform(&self) -> &Option { - &self.0.global_target_platform + pub(crate) fn force_print_stacktrace(&self) -> bool { + self.0.force_print_stacktrace } } @@ -105,11 +118,15 @@ impl BxlKey { PartialOrd, Allocative )] -#[display(fmt = "{}", "spec")] +#[display("{}", spec)] struct BxlKeyData { spec: BxlFunctionLabel, bxl_args: Arc>, - global_target_platform: Option, + /// Overrides `fail_no_stacktrace` to print a stacktrace anyway. FIXME(JakobDegen): Might be + /// better to put this on the `UserComputationData` instead, to keep this from invalidating the + /// dice node. A bit hard to wire up though, so just leave it here for now. + force_print_stacktrace: bool, + global_cfg_options: GlobalCfgOptions, } impl BxlKeyData { @@ -126,12 +143,10 @@ impl BxlKeyData { // construct the hashed path. However, we still need to include them in the BxlDynamicKeyData so that we can pass // them from the root BXL to the dynamic BXL context, and then access them on the dynamic BXL context's actions factory. #[derive(Clone, derive_more::Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{}", "key")] +#[display("{}", key)] pub(crate) struct BxlDynamicKeyData { key: Arc, - execution_platform_resolution: ExecutionPlatformResolution, - pub(crate) exec_deps: Vec, - pub(crate) toolchains: Vec, + pub(crate) execution_resolution: BxlExecutionResolution, } pub(crate) struct BxlDynamicKey(pub(crate) Arc); @@ -141,14 +156,14 @@ impl BxlDynamicKey { BxlKey(self.0.key.dupe()) } - fn from_base_deferred_key_dyn_impl(key: Arc) -> Option { - key.into_any().downcast().ok().map(BxlDynamicKey) + fn from_base_deferred_key_dyn_impl(key: BaseDeferredKeyBxl) -> Option { + key.0.into_any().downcast().ok().map(BxlDynamicKey) } pub(crate) fn from_base_deferred_key_dyn_impl_err( - key: Arc, + key: BaseDeferredKeyBxl, ) -> anyhow::Result { - Self::from_base_deferred_key_dyn_impl(key).context("Not BxlDynamicKey (internal error)") + Self::from_base_deferred_key_dyn_impl(key).internal_error_anyhow("Not BxlDynamicKey") } } @@ -176,13 +191,16 @@ impl BaseDeferredKeyDyn for BxlDynamicKeyData { let output_hash = { let mut hasher = DefaultHasher::new(); self.key.bxl_args.hash(&mut hasher); + self.key.global_cfg_options.hash(&mut hasher); let output_hash = hasher.finish(); format!("{:x}", output_hash) }; let exec_platform = { let mut hasher = DefaultHasher::new(); - self.execution_platform_resolution.hash(&mut hasher); + self.execution_resolution + .resolved_execution + .hash(&mut hasher); let output_hash = hasher.finish(); format!("{:x}", output_hash) }; @@ -232,6 +250,6 @@ impl BaseDeferredKeyDyn for BxlDynamicKeyData { } fn execution_platform_resolution(&self) -> &ExecutionPlatformResolution { - &self.execution_platform_resolution + &self.execution_resolution.resolved_execution } } diff --git a/app/buck2_bxl/src/bxl/mod.rs b/app/buck2_bxl/src/bxl/mod.rs deleted file mode 100644 index 96a0a55a1afd5..0000000000000 --- a/app/buck2_bxl/src/bxl/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod calculation; -mod deferred; -pub(crate) mod eval; -pub(crate) mod key; -pub(crate) mod starlark_defs; -pub(crate) mod value_as_starlark_target_label; diff --git a/app/buck2_bxl/src/bxl/starlark_defs.rs b/app/buck2_bxl/src/bxl/starlark_defs.rs new file mode 100644 index 0000000000000..5eab186a9e870 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs.rs @@ -0,0 +1,39 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Definitions of core functionality just for bxl functions to access + +pub(crate) mod alloc_node; +pub(crate) mod analysis_result; +pub(crate) mod aquery; +pub(crate) mod artifacts; +pub(crate) mod audit; +pub(crate) mod build_result; +pub(crate) mod bxl_function; +pub(crate) mod cli_args; +pub(crate) mod context; +pub(crate) mod cquery; +pub(crate) mod eval_extra; +pub(crate) mod event; +pub(crate) mod file_expr; +pub(crate) mod file_set; +pub(crate) mod functions; +pub(crate) mod globals; +pub(crate) mod lazy_ctx; +pub(crate) mod nodes; +pub(crate) mod providers_expr; +mod query_util; +pub(crate) mod result; +pub(crate) mod target_expr; +pub(crate) mod target_list_expr; +pub(crate) mod target_universe; +pub(crate) mod targetset; +pub(crate) mod time; +pub(crate) mod type_names; +pub(crate) mod uquery; diff --git a/app/buck2_bxl/src/bxl/starlark_defs/analysis_result.rs b/app/buck2_bxl/src/bxl/starlark_defs/analysis_result.rs index bd125b8e92993..b04a469ef0fd1 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/analysis_result.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/analysis_result.rs @@ -7,49 +7,61 @@ * of this source tree. */ +use std::fmt; + use allocative::Allocative; use buck2_build_api::analysis::AnalysisResult; use buck2_build_api::interpreter::rule_defs::provider::dependency::Dependency; use buck2_core::provider::label::ConfiguredProvidersLabel; -use derive_more::Display; +use dupe::Dupe; use starlark::any::ProvidesStaticType; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; +use starlark::eval::Evaluator; use starlark::starlark_module; use starlark::starlark_simple_value; use starlark::values::starlark_value; use starlark::values::FrozenValue; -use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; -use starlark::values::Value; -use starlark::StarlarkDocs; +use starlark::values::ValueTyped; -#[derive( - ProvidesStaticType, - Debug, - Display, - NoSerialize, - StarlarkDocs, - Allocative -)] -#[display(fmt = "{:?}", self)] -#[starlark_docs(directory = "bxl")] +#[derive(ProvidesStaticType, Debug, NoSerialize, Allocative)] pub(crate) struct StarlarkAnalysisResult { + // Invariant: The subtarget specified on the label is present in the analysis result. analysis: AnalysisResult, label: ConfiguredProvidersLabel, } +impl fmt::Display for StarlarkAnalysisResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "AnalysisResult(")?; + fmt::Display::fmt( + self.analysis + .lookup_inner(&self.label) + .unwrap() + .provider_collection(), + f, + )?; + write!(f, ")") + } +} + impl StarlarkAnalysisResult { - pub(crate) fn new(analysis: AnalysisResult, label: ConfiguredProvidersLabel) -> Self { - Self { analysis, label } + pub(crate) fn new( + analysis: AnalysisResult, + label: ConfiguredProvidersLabel, + ) -> anyhow::Result { + // Check that the specified subtarget actually exists + drop(analysis.lookup_inner(&label)?); + Ok(Self { analysis, label }) } } starlark_simple_value!(StarlarkAnalysisResult); -#[starlark_value(type = "analysis_result")] +#[starlark_value(type = "bxl.AnalysisResult")] impl<'v> StarlarkValue<'v> for StarlarkAnalysisResult { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -60,7 +72,7 @@ impl<'v> StarlarkValue<'v> for StarlarkAnalysisResult { /// The result of running an analysis in bxl. #[starlark_module] fn starlark_analysis_result_methods(builder: &mut MethodsBuilder) { - /// Access the providers of the rule. Returns a `[ProviderCollection]` the same as accessing + /// Access the providers of the rule. Returns a `provider_collection` the same as accessing /// providers of dependencies within a rule implementation. /// /// Sample usage: @@ -88,7 +100,6 @@ fn starlark_analysis_result_methods(builder: &mut MethodsBuilder) { /// transitions. This means that you cannot create an exec dep or toolchain from an analysis result. /// We may support other dependency transition types in the future. - /// /// This is useful for passing in the results of `ctx.analysis()` into anon targets. /// @@ -100,21 +111,16 @@ fn starlark_analysis_result_methods(builder: &mut MethodsBuilder) { /// ``` fn as_dependency<'v>( this: &'v StarlarkAnalysisResult, - heap: &'v Heap, - ) -> anyhow::Result> { - unsafe { - // SAFETY:: this actually just returns a FrozenValue from in the StarlarkAnalysisResult - // which is kept alive for 'v - Ok(heap.alloc(Dependency::new( - heap, - this.label.clone(), - this.analysis - .lookup_inner(&this.label)? - .value() - .to_frozen_value() - .to_value(), - None, - ))) - } + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result>> { + Ok(eval.heap().alloc_typed(Dependency::new( + eval.heap(), + this.label.dupe(), + this.analysis + .lookup_inner(&this.label)? + .value() + .owned_frozen_value_typed(eval.frozen_heap()), + None, + ))) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/aquery.rs b/app/buck2_bxl/src/bxl/starlark_defs/aquery.rs index e3ef73c7590a3..6eddb65f2bfe0 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/aquery.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/aquery.rs @@ -8,17 +8,16 @@ */ use allocative::Allocative; -use anyhow::Context; use buck2_build_api::actions::query::ActionQueryNode; use buck2_build_api::query::bxl::BxlAqueryFunctions; use buck2_build_api::query::bxl::NEW_BXL_AQUERY_FUNCTIONS; use buck2_build_api::query::oneshot::QUERY_FRONTEND; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::configuration::compatibility::IncompatiblePlatformReason; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_query::query::syntax::simple::eval::set::TargetSet; -use buck2_query::query::syntax::simple::eval::set::TargetSetExt; use buck2_query::query::syntax::simple::functions::helpers::CapturedExpr; use derivative::Derivative; use derive_more::Display; @@ -32,8 +31,10 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::list::UnpackList; use starlark::values::none::NoneOr; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::NoSerialize; @@ -41,18 +42,19 @@ use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueError; -use starlark::values::ValueLike; -use starlark::StarlarkDocs; -use thiserror::Error; +use starlark::values::ValueTyped; use crate::bxl::starlark_defs::context::BxlContext; use crate::bxl::starlark_defs::context::BxlContextNoDice; +use crate::bxl::starlark_defs::context::ErrorPrinter; +use crate::bxl::starlark_defs::nodes::action::StarlarkActionQueryNode; +use crate::bxl::starlark_defs::providers_expr::AnyProvidersExprArg; use crate::bxl::starlark_defs::providers_expr::ProvidersExpr; use crate::bxl::starlark_defs::query_util::parse_query_evaluation_result; -use crate::bxl::starlark_defs::target_expr::TargetExpr; +use crate::bxl::starlark_defs::target_list_expr::ConfiguredTargetListExprArg; +use crate::bxl::starlark_defs::target_list_expr::TargetListExpr; use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; -use crate::bxl::starlark_defs::uquery::unpack_unconfigured_query_args; +use crate::bxl::starlark_defs::uquery::UnpackUnconfiguredQueryArgs; use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; #[derive( @@ -61,22 +63,20 @@ use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] #[allocative(skip)] pub(crate) struct StarlarkAQueryCtx<'v> { - #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, #[derivative(Debug = "ignore")] - target_platform: Option, + // Overrides the GlobalCfgOptions in the BxlContext + global_cfg_options_override: GlobalCfgOptions, } -#[starlark_value(type = "aqueryctx", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.AqueryContext", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for StarlarkAQueryCtx<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -92,43 +92,47 @@ impl<'v> AllocValue<'v> for StarlarkAQueryCtx<'v> { impl<'v> StarlarkAQueryCtx<'v> { pub(crate) fn new( - ctx: &'v BxlContext<'v>, - global_target_platform: Value<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, + global_target_platform: ValueAsStarlarkTargetLabel<'v>, default_target_platform: &Option, ) -> anyhow::Result> { let target_platform = global_target_platform.parse_target_platforms( - &ctx.data.target_alias_resolver, - &ctx.data.cell_resolver, - ctx.data.cell_name, + ctx.target_alias_resolver(), + ctx.cell_resolver(), + ctx.cell_alias_resolver(), + ctx.cell_name(), default_target_platform, )?; Ok(Self { ctx, - target_platform, + global_cfg_options_override: GlobalCfgOptions { + target_platform, + cli_modifiers: vec![].into(), + }, }) } } pub(crate) async fn get_aquery_env( ctx: &BxlContextNoDice<'_>, - target_platform: Option, + global_cfg_options_override: &GlobalCfgOptions, ) -> anyhow::Result> { (NEW_BXL_AQUERY_FUNCTIONS.get()?)( - target_platform, + global_cfg_options_override.clone(), ctx.project_root().dupe(), - ctx.cell_name, - ctx.cell_resolver.dupe(), + ctx.cell_name(), + ctx.cell_resolver().dupe(), ) .await } -#[derive(Debug, Error)] -pub(crate) enum BxlAqueryError { - #[error( - "Expected a list of target-like or provider-like items, or a target set of action query nodes, but was `{0}`" - )] - InvalidInputs(String), +#[derive(StarlarkTypeRepr, UnpackValue)] +enum UnpackActionNodes<'v> { + ActionQueryNodes(UnpackList), + ActionQueryNodesSet(&'v StarlarkTargetSet), + ConfiguredProviders(AnyProvidersExprArg<'v>), + ConfiguredTargets(ConfiguredTargetListExprArg<'v>), } // Aquery operates on `ActionQueryNode`s. Under the hood, the target set of action query nodes is obtained @@ -137,84 +141,78 @@ pub(crate) enum BxlAqueryError { // and `ProvidersExpr`, we need to pass the aquery delegate a list of configured providers labels, and it will // run analysis on them to construct the `ActionQueryNode`s. async fn unpack_action_nodes<'v>( - expr: Value<'v>, - target_platform: &Option, - ctx: &BxlContextNoDice<'v>, - dice: &mut DiceComputations, - aquery_env: &dyn BxlAqueryFunctions, - eval: &mut Evaluator<'v, '_>, + this: &StarlarkAQueryCtx<'v>, + dice: &mut DiceComputations<'_>, + expr: UnpackActionNodes<'v>, ) -> anyhow::Result> { - if let Some(action_nodes) = expr.downcast_ref::>() { - return Ok(action_nodes.0.clone()); - } - - let providers = if let Some(providers) = ProvidersExpr::::unpack_opt( - expr, - target_platform.clone(), - ctx, - dice, - eval, - ) - .await? - { - providers.labels().cloned().collect() - } else if let Some(targets) = - TargetExpr::::unpack_opt(expr, target_platform, ctx, dice, eval, true) + let aquery_env = get_aquery_env(&this.ctx.data, &this.global_cfg_options_override).await?; + let providers = match expr { + UnpackActionNodes::ActionQueryNodes(action_nodes) => { + return Ok(action_nodes.into_iter().map(|v| v.0).collect()); + } + UnpackActionNodes::ActionQueryNodesSet(action_nodes) => return Ok(action_nodes.0.clone()), + UnpackActionNodes::ConfiguredProviders(arg) => { + ProvidersExpr::::unpack( + arg, + &this.global_cfg_options_override, + &this.ctx.data, + dice, + ) + .await? + .labels() + .cloned() + .collect() + } + UnpackActionNodes::ConfiguredTargets(arg) => { + TargetListExpr::::unpack_opt( + arg, + &this.global_cfg_options_override, + &this.ctx.data, + dice, + true, + ) .await? - { - targets.as_provider_labels() - } else { - return Err(anyhow::anyhow!(BxlAqueryError::InvalidInputs( - expr.to_repr() - ))); + .as_provider_labels() + } }; let (incompatible_targets, result) = aquery_env.get_target_set(dice, providers).await?; if !incompatible_targets.is_empty() { - ctx.print_to_error_stream(IncompatiblePlatformReason::skipping_message_for_multiple( - incompatible_targets.iter(), - ))?; + this.ctx.data.print_to_error_stream( + IncompatiblePlatformReason::skipping_message_for_multiple(incompatible_targets.iter()), + )?; } Ok(result) } - /// The context for performing `aquery` operations in bxl. The functions offered on this ctx are /// the same behaviour as the query functions available within aquery command. /// -/// Query results are `[StarlarkTargetSet]`s of `[ActionQueryNode]`s, which supports iteration, +/// Query results are `target_set`s of `action_query_node`s, which supports iteration, /// indexing, `len()`, set addition/subtraction, and `equals()`. #[starlark_module] fn aquery_methods(builder: &mut MethodsBuilder) { /// The deps query for finding the transitive closure of dependencies. fn deps<'v>( this: &StarlarkAQueryCtx<'v>, - universe: Value<'v>, + // TODO(nga): parameters should be either positional or named, not both. + universe: UnpackActionNodes<'v>, #[starlark(default = NoneOr::None)] depth: NoneOr, #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, - eval: &mut Evaluator<'v, '_>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let aquery_env = get_aquery_env(ctx, this.target_platform.dupe()).await?; - let filter = filter .into_option() .try_map(buck2_query_parser::parse_expr)?; - let universe = unpack_action_nodes( - universe, - &this.target_platform, - ctx, - dice, - aquery_env.as_ref(), - eval, - ) - .await?; + let universe = unpack_action_nodes(this, dice, universe).await?; + let aquery_env = + get_aquery_env(ctx, &this.global_cfg_options_override).await?; aquery_env .deps( dice, @@ -239,26 +237,15 @@ fn aquery_methods(builder: &mut MethodsBuilder) { /// an action). fn all_actions<'v>( this: &StarlarkAQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + // TODO(nga): parameters should be either positional or named, not both. + targets: UnpackActionNodes<'v>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let aquery_env = get_aquery_env(ctx, this.target_platform.dupe()).await?; - - let targets = unpack_action_nodes( - targets, - &this.target_platform, - ctx, - dice, - aquery_env.as_ref(), - eval, - ) - .await?; - - get_aquery_env(ctx, this.target_platform.dupe()) + let targets = unpack_action_nodes(this, dice, targets).await?; + get_aquery_env(ctx, &this.global_cfg_options_override) .await? .all_actions(dice, &targets) .await @@ -276,26 +263,16 @@ fn aquery_methods(builder: &mut MethodsBuilder) { /// else). fn all_outputs<'v>( this: &StarlarkAQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + // TODO(nga): parameters should be either positional or named, not both. + targets: UnpackActionNodes<'v>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let aquery_env = get_aquery_env(ctx, this.target_platform.dupe()).await?; - - let targets = unpack_action_nodes( - targets, - &this.target_platform, - ctx, - dice, - aquery_env.as_ref(), - eval, - ) - .await?; + let targets = unpack_action_nodes(this, dice, targets).await?; - get_aquery_env(ctx, this.target_platform.dupe()) + get_aquery_env(ctx, &this.global_cfg_options_override) .await? .all_outputs(dice, &targets) .await @@ -309,25 +286,15 @@ fn aquery_methods(builder: &mut MethodsBuilder) { /// The attrfilter query for rule attribute filtering. fn attrfilter<'v>( this: &StarlarkAQueryCtx<'v>, + // TODO(nga): parameters should be either positional or named, not both. attr: &str, value: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: UnpackActionNodes<'v>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { - let aquery_env = get_aquery_env(ctx, this.target_platform.dupe()).await?; - - let targets = unpack_action_nodes( - targets, - &this.target_platform, - ctx, - dice, - aquery_env.as_ref(), - eval, - ) - .await?; + let targets = unpack_action_nodes(this, dice, targets).await?; targets .attrfilter(attr, &|v| Ok(v == value)) @@ -339,39 +306,27 @@ fn aquery_methods(builder: &mut MethodsBuilder) { } /// Evaluates some general query string. `query_args` can be a target_set of unconfigured nodes, or - /// a list of strings. + /// a list of strings. Returns a `dict` of target labels mapped to their `target_set` results if `query_args` + /// was passed in, otherwise returns a single `target_set`. + /// + /// Sample usage: + /// ```text + /// def _impl_eval(ctx): + /// result = ctx.aquery().eval(":foo") + /// ctx.output.print(result) + /// ``` fn eval<'v>( this: &StarlarkAQueryCtx<'v>, query: &'v str, - #[starlark(default = NoneOr::None)] query_args: NoneOr>, - eval: &mut Evaluator<'v, '_>, + #[starlark(default = NoneOr::None)] query_args: NoneOr>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { - let query_args = if query_args.is_none() { - Vec::new() - } else { - let unwrapped_query_args = query_args.into_option().unwrap(); - if let Some(query_args) = unpack_unconfigured_query_args(unwrapped_query_args)? { - query_args - } else { - // TODO(@wendyy) - we probably also want to support subtargets here - let err = Err(ValueError::IncorrectParameterTypeWithExpected( - "list of strings, or a target_set of unconfigured nodes".to_owned(), - query_args.into_option().unwrap().get_type().to_owned(), - ) - .into()); - - if <&StarlarkTargetSet>::unpack_value(unwrapped_query_args) - .is_some() - { - return err - .context("target_set with configured nodes are currently not supported"); - } - - return err; - } + let query_args = match query_args { + NoneOr::None => Vec::new(), + NoneOr::Other(query_args) => query_args.into_strings(), }; - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, ctx| { dice.via(|dice| { async { parse_query_evaluation_result( @@ -382,10 +337,10 @@ fn aquery_methods(builder: &mut MethodsBuilder) { &ctx.working_dir()?, query, &query_args, - this.target_platform.dupe(), + this.global_cfg_options_override.clone(), ) .await?, - eval, + eval.heap(), ) } .boxed_local() diff --git a/app/buck2_bxl/src/bxl/starlark_defs/artifacts.rs b/app/buck2_bxl/src/bxl/starlark_defs/artifacts.rs index f4ab844f0a824..eb37ba7a965bf 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/artifacts.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/artifacts.rs @@ -15,13 +15,11 @@ use std::hash::Hash; use std::hash::Hasher; use allocative::Allocative; -use anyhow::Context as _; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::artifact_groups::ResolvedArtifactGroup; -use buck2_build_api::deferred::calculation::DeferredCalculation; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::StarlarkArtifactLike; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkDeclaredArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_declared_artifact::StarlarkDeclaredArtifact; use buck2_execute::path::artifact_path::ArtifactPath; use dice::DiceComputations; use dupe::Dupe; @@ -31,12 +29,12 @@ use serde::Serializer; use starlark::any::ProvidesStaticType; use starlark::collections::SmallSet; use starlark::collections::StarlarkHasher; -use starlark::docs::StarlarkDocs; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::starlark_module; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::StarlarkValue; @@ -44,11 +42,10 @@ use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; -use starlark::values::ValueOf; +use starlark::values::ValueTyped; -#[derive(Clone, Debug, Trace, ProvidesStaticType, StarlarkDocs, Allocative)] +#[derive(Clone, Debug, Trace, ProvidesStaticType, Allocative)] #[repr(C)] -#[starlark_docs(directory = "bxl")] pub(crate) enum EnsuredArtifact { Artifact { artifact: StarlarkArtifact, @@ -60,7 +57,7 @@ pub(crate) enum EnsuredArtifact { }, } -#[derive(Clone, Debug, Trace, ProvidesStaticType, StarlarkDocs, Allocative)] +#[derive(Clone, Debug, Trace, ProvidesStaticType, Allocative)] #[repr(C)] pub(crate) struct EnsuredArtifactGroupInner { pub(crate) ags: Vec, @@ -70,7 +67,7 @@ pub(crate) async fn visit_artifact_path_without_associated_deduped( ags: &[ArtifactGroup], abs: bool, mut visitor: impl FnMut(ArtifactPath, bool) -> anyhow::Result<()>, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result<()> { // If there's a case where a tset projection returns a projection, we want to make sure // we are not reprocessing the nested projection over again. Since we are using @@ -86,18 +83,12 @@ pub(crate) async fn visit_artifact_path_without_associated_deduped( if !visited.insert(ag.dupe()) { continue; } - match ag.resolved()? { + match ag.resolved_artifact(ctx).await? { ResolvedArtifactGroup::Artifact(a) => { visitor(a.get_path(), abs)?; } ResolvedArtifactGroup::TransitiveSetProjection(t) => { - let set = ctx - .compute_deferred_data(&t.key) - .await - .context("Failed to compute deferred for transitive set projection key")?; - - let set = set.as_transitive_set(); - + let set = t.key.lookup(ctx).await?; todo.extend(set.get_projection_sub_inputs(t.projection)?); } } @@ -106,9 +97,8 @@ pub(crate) async fn visit_artifact_path_without_associated_deduped( Ok(()) } -#[derive(Clone, Debug, Trace, ProvidesStaticType, StarlarkDocs, Allocative)] +#[derive(Clone, Debug, Trace, ProvidesStaticType, Allocative)] #[repr(C)] -#[starlark_docs(directory = "bxl")] pub(crate) struct EnsuredArtifactGroup<'v> { // Have `EnsuredArtifactGroup` be a wrapper around `EnsuredArtifactGroupInner` as a Starlark `Value` // so that we don't have to copy all of its artifact groups whenever we call `abs_path()` or `rel_path()`, @@ -128,6 +118,7 @@ impl<'v> EnsuredArtifactGroup<'v> { pub(crate) fn inner(&self) -> &Vec { &<&EnsuredArtifactGroupInner>::unpack_value(self.inner) + .unwrap() .unwrap() .ags } @@ -135,7 +126,7 @@ impl<'v> EnsuredArtifactGroup<'v> { pub(crate) async fn visit_artifact_path_without_associated_deduped( &self, visitor: impl FnMut(ArtifactPath, bool) -> anyhow::Result<()>, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result<()> { visit_artifact_path_without_associated_deduped(self.inner(), self.abs, visitor, ctx).await } @@ -151,7 +142,7 @@ where RES.methods(artifact_group_methods) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { let _hash = self.inner.write_hash(hasher); self.abs.hash(hasher); Ok(()) @@ -163,7 +154,7 @@ impl<'v> StarlarkValue<'v> for EnsuredArtifactGroupInner where Self: ProvidesStaticType<'v>, { - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.ags.hash(hasher); Ok(()) } @@ -184,29 +175,28 @@ impl PartialEq for EnsuredArtifact { impl Eq for EnsuredArtifact {} -impl EnsuredArtifact { - pub(crate) fn new<'v>(artifact: Value<'v>) -> anyhow::Result { - let artifact = artifact - .downcast_ref::() - .map(|o| EnsuredArtifact::Artifact { - artifact: o.dupe(), - abs: false, - }) - .or_else(|| { - artifact - .downcast_ref::() - .map(|o| EnsuredArtifact::DeclaredArtifact { - artifact: o.dupe(), - abs: false, - }) - }); +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum EnsuredArtifactArg<'v> { + Artifact(&'v StarlarkArtifact), + DeclaredArtifact(&'v StarlarkDeclaredArtifact), +} - match artifact { - Some(artifact) => Ok(artifact), - None => Err(anyhow::anyhow!("must be artifact like")), +impl<'v> EnsuredArtifactArg<'v> { + pub(crate) fn into_ensured_artifact(self) -> EnsuredArtifact { + match self { + EnsuredArtifactArg::Artifact(artifact) => EnsuredArtifact::Artifact { + artifact: artifact.dupe(), + abs: false, + }, + EnsuredArtifactArg::DeclaredArtifact(artifact) => EnsuredArtifact::DeclaredArtifact { + artifact: artifact.dupe(), + abs: false, + }, } } +} +impl EnsuredArtifact { pub(crate) fn as_artifact(&self) -> &dyn StarlarkArtifactLike { match self { EnsuredArtifact::Artifact { artifact, .. } => artifact as &dyn StarlarkArtifactLike, @@ -303,7 +293,7 @@ impl Serialize for EnsuredArtifactGroupInner { } } -#[starlark_value(type = "ensured_artifact", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.EnsuredArtifact", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for EnsuredArtifact where Self: ProvidesStaticType<'v>, @@ -313,7 +303,7 @@ where RES.methods(ensured_artifact_methods) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { let _artifact_write_hash = self.as_artifact().write_hash(hasher); self.abs().hash(hasher); Ok(()) @@ -343,13 +333,13 @@ fn ensured_artifact_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(ensured_with_abs_path) # should return the absolute path of the artifact /// ``` fn abs_path<'v>( - this: ValueOf<'v, &'v EnsuredArtifact>, + this: ValueTyped<'v, EnsuredArtifact>, heap: &'v Heap, - ) -> anyhow::Result> { - if this.typed.abs() { - Ok(this.value) + ) -> anyhow::Result> { + if this.abs() { + Ok(this) } else { - let artifact = match this.typed { + let artifact = match &*this { EnsuredArtifact::Artifact { artifact, .. } => EnsuredArtifact::Artifact { artifact: artifact.dupe(), abs: true, @@ -362,7 +352,7 @@ fn ensured_artifact_methods(builder: &mut MethodsBuilder) { } }; - Ok(heap.alloc(artifact)) + Ok(heap.alloc_typed(artifact)) } } @@ -382,13 +372,13 @@ fn ensured_artifact_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(ensured_with_rel_path) # should return the relative path of the artifact /// ``` fn rel_path<'v>( - this: ValueOf<'v, &'v EnsuredArtifact>, + this: ValueTyped<'v, EnsuredArtifact>, heap: &'v Heap, - ) -> anyhow::Result> { - if !this.typed.abs() { - Ok(this.value) + ) -> anyhow::Result> { + if !this.abs() { + Ok(this) } else { - let artifact = match this.typed { + let artifact = match &*this { EnsuredArtifact::Artifact { artifact, .. } => EnsuredArtifact::Artifact { artifact: artifact.dupe(), abs: false, @@ -401,7 +391,7 @@ fn ensured_artifact_methods(builder: &mut MethodsBuilder) { } }; - Ok(heap.alloc(artifact)) + Ok(heap.alloc_typed(artifact)) } } } @@ -429,18 +419,18 @@ fn artifact_group_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(ensured_with_abs_path) # should return the absolute path of the artifact /// ``` fn abs_path<'v>( - this: ValueOf<'v, &'v EnsuredArtifactGroup<'v>>, + this: ValueTyped<'v, EnsuredArtifactGroup<'v>>, heap: &'v Heap, - ) -> anyhow::Result> { - if this.typed.abs { - Ok(this.value) + ) -> anyhow::Result>> { + if this.abs { + Ok(this) } else { let artifact = EnsuredArtifactGroup { - inner: this.typed.inner, + inner: this.inner, abs: true, }; - Ok(heap.alloc(artifact)) + Ok(heap.alloc_typed(artifact)) } } @@ -460,18 +450,18 @@ fn artifact_group_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(ensured_with_rel_path) # should return the relative path of the artifact /// ``` fn rel_path<'v>( - this: ValueOf<'v, &'v EnsuredArtifactGroup<'v>>, + this: ValueTyped<'v, EnsuredArtifactGroup<'v>>, heap: &'v Heap, - ) -> anyhow::Result> { - if !this.typed.abs { - Ok(this.value) + ) -> anyhow::Result>> { + if !this.abs { + Ok(this) } else { let artifact = EnsuredArtifactGroup { - inner: this.typed.inner, + inner: this.inner, abs: false, }; - Ok(heap.alloc(artifact)) + Ok(heap.alloc_typed(artifact)) } } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/audit.rs b/app/buck2_bxl/src/bxl/starlark_defs/audit.rs index 78fc52e58f3eb..3bef3afab4cd0 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/audit.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/audit.rs @@ -12,9 +12,9 @@ use anyhow::Context as _; use buck2_build_api::audit_cell::audit_cell; use buck2_build_api::audit_output::audit_output; use buck2_build_api::audit_output::AuditOutputResult; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::cells::CellResolver; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::target::label::TargetLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use derivative::Derivative; use derive_more::Display; @@ -25,8 +25,9 @@ use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::starlark_module; -use starlark::values::dict::Dict; -use starlark::values::none::NoneType; +use starlark::values::dict::AllocDict; +use starlark::values::list_or_tuple::UnpackListOrTuple; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::AllocValue; use starlark::values::Heap; @@ -34,7 +35,7 @@ use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; -use starlark::StarlarkDocs; +use starlark::values::ValueTyped; use crate::bxl::starlark_defs::context::BxlContext; use crate::bxl::starlark_defs::nodes::action::StarlarkAction; @@ -46,27 +47,23 @@ use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; Display, Trace, NoSerialize, - Allocative, - StarlarkDocs + Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] #[allocative(skip)] pub(crate) struct StarlarkAuditCtx<'v> { - #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] working_dir: ProjectRelativePathBuf, #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] cell_resolver: CellResolver, - global_target_platform: Option, } -#[starlark_value(type = "audit_ctx", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.AuditContext", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for StarlarkAuditCtx<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -82,16 +79,14 @@ impl<'v> AllocValue<'v> for StarlarkAuditCtx<'v> { impl<'v> StarlarkAuditCtx<'v> { pub(crate) fn new( - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, working_dir: ProjectRelativePathBuf, cell_resolver: CellResolver, - global_target_platform: Option, ) -> anyhow::Result { Ok(Self { ctx, working_dir, cell_resolver, - global_target_platform, }) } } @@ -101,8 +96,8 @@ impl<'v> StarlarkAuditCtx<'v> { #[starlark_module] fn audit_methods(builder: &mut MethodsBuilder) { /// Returns either: - /// - The `StarlarkAction` which created the buck-out path, if exists. - /// - The `StarlarkTargetLabel` (unconfigured target label) constructed from the buck-out path, if the configuration hashes do not match. + /// - The `action` which created the buck-out path, if exists. + /// - The `unconfigured_target_label` constructed from the buck-out path, if the configuration hashes do not match. /// - None, if the configuration hash of the buck-out path matches the one passed into this function, or the default target /// configuration, but no action could be found that generated the buck-out path. /// @@ -117,29 +112,39 @@ fn audit_methods(builder: &mut MethodsBuilder) { /// ``` fn output<'v>( this: &StarlarkAuditCtx<'v>, + // TODO(nga): parameters should be either positional or named, not both. output_path: &'v str, - #[starlark(default = NoneType)] target_platform: Value<'v>, + #[starlark(default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, heap: &'v Heap, - ) -> anyhow::Result>> { + ) -> anyhow::Result< + // TODO(nga): used precise type. + NoneOr>, + > { let target_platform = target_platform.parse_target_platforms( - &this.ctx.data.target_alias_resolver, - &this.ctx.data.cell_resolver, - this.ctx.data.cell_name, - &this.global_target_platform, + this.ctx.target_alias_resolver(), + this.ctx.cell_resolver(), + this.ctx.cell_alias_resolver(), + this.ctx.cell_name(), + &this.ctx.global_cfg_options().target_platform, )?; this.ctx.async_ctx.borrow_mut().via(|ctx| { async move { - audit_output( + let output = audit_output( output_path, &this.working_dir, &this.cell_resolver, ctx, - target_platform, + &GlobalCfgOptions { + target_platform, + cli_modifiers: vec![].into(), + }, ) - .await? - .map(|result| { - anyhow::Ok(match result { + .await?; + match output { + None => Ok(NoneOr::None), + Some(result) => anyhow::Ok(NoneOr::Other(match result { AuditOutputResult::Match(action) => heap.alloc(StarlarkAction( action .action() @@ -149,15 +154,14 @@ fn audit_methods(builder: &mut MethodsBuilder) { AuditOutputResult::MaybeRelevant(label) => { heap.alloc(StarlarkTargetLabel::new(label)) } - }) - }) - .transpose() + })), + } } .boxed_local() }) } - /// Query information about the [repositories] list in .buckconfig. + /// Query information about the [cells] list in .buckconfig. /// /// Takes the following parameters: /// * `aliases_to_resolve` - list of cell aliases to query. These aliases will be resolved in the root cell of the BXL script. @@ -173,29 +177,27 @@ fn audit_methods(builder: &mut MethodsBuilder) { /// ``` fn cell<'v>( this: &StarlarkAuditCtx<'v>, - #[starlark(default = Vec::new())] aliases_to_resolve: Vec, + // TODO(nga): parameters should be either positional or named, not both. + #[starlark(default = UnpackListOrTuple::default())] aliases_to_resolve: UnpackListOrTuple< + String, + >, #[starlark(require = named, default = false)] aliases: bool, - heap: &'v Heap, - ) -> anyhow::Result> { - audit_cell( - &aliases_to_resolve, - aliases, - &this.cell_resolver, - &this.working_dir, - this.ctx.project_root(), - ) - .map(|result| { - Ok(heap.alloc(Dict::new( - result - .into_iter() - .map(|(k, v)| { - Ok(( - heap.alloc_str(&k).to_value().get_hashed()?, - heap.alloc_str(&v.to_string()).to_value(), - )) - }) - .collect::>()?, - ))) - })? + ) -> anyhow::Result>> { + this.ctx.async_ctx.borrow_mut().via(|ctx| { + async { + let result = audit_cell( + ctx, + &aliases_to_resolve.items, + aliases, + &this.working_dir, + this.ctx.project_root(), + )? + .await?; + Ok(AllocDict( + result.into_iter().map(|(k, v)| (k, v.to_string())), + )) + } + .boxed_local() + }) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/build_result.rs b/app/buck2_bxl/src/bxl/starlark_defs/build_result.rs index cf07096550147..5008cd19f3d5e 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/build_result.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/build_result.rs @@ -14,13 +14,13 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::starlark_module; use starlark::starlark_simple_value; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::NoSerialize; use starlark::values::ProvidesStaticType; use starlark::values::StarlarkValue; use starlark::values::Value; use starlark::values::ValueLike; -use starlark::StarlarkDocs; use crate::bxl::starlark_defs::context::build::StarlarkFailedArtifactIterable; use crate::bxl::starlark_defs::context::build::StarlarkFailedArtifactIterableGen; @@ -34,10 +34,8 @@ use crate::bxl::starlark_defs::context::build::StarlarkProvidersArtifactIterable derive_more::Display, ProvidesStaticType, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] pub(crate) struct StarlarkBxlBuildResult(pub(crate) BxlBuildResult); /// The result of building in bxl. @@ -54,10 +52,12 @@ fn starlark_build_result_methods(builder: &mut MethodsBuilder) { /// ``` fn artifacts<'v>( this: Value<'v>, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { match &this.downcast_ref::().unwrap().0 { - BxlBuildResult::None => Ok(None), - BxlBuildResult::Built { .. } => Ok(Some(StarlarkProvidersArtifactIterableGen(this))), + BxlBuildResult::None => Ok(NoneOr::None), + BxlBuildResult::Built { .. } => { + Ok(NoneOr::Other(StarlarkProvidersArtifactIterableGen(this))) + } } } @@ -70,17 +70,19 @@ fn starlark_build_result_methods(builder: &mut MethodsBuilder) { /// for target, value in ctx.build(ctx.cli_args.target).items(): /// ctx.output.print(value.failures()) /// ``` - fn failures<'v>(this: Value<'v>) -> anyhow::Result>> { + fn failures<'v>(this: Value<'v>) -> anyhow::Result>> { match &this.downcast_ref::().unwrap().0 { - BxlBuildResult::None => Ok(None), - BxlBuildResult::Built { .. } => Ok(Some(StarlarkFailedArtifactIterableGen(this))), + BxlBuildResult::None => Ok(NoneOr::None), + BxlBuildResult::Built { .. } => { + Ok(NoneOr::Other(StarlarkFailedArtifactIterableGen(this))) + } } } } starlark_simple_value!(StarlarkBxlBuildResult); -#[starlark_value(type = "bxl_build_result")] +#[starlark_value(type = "bxl.BuildResult")] impl<'v> StarlarkValue<'v> for StarlarkBxlBuildResult { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); diff --git a/app/buck2_bxl/src/bxl/starlark_defs/bxl_function.rs b/app/buck2_bxl/src/bxl/starlark_defs/bxl_function.rs index 1f2000f38d973..7ec6dbe105ee7 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/bxl_function.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/bxl_function.rs @@ -9,7 +9,6 @@ use std::cell::RefCell; use std::fmt; -use std::fmt::Display; use std::sync::Arc; use allocative::Allocative; @@ -26,7 +25,7 @@ use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::starlark_module; use starlark::starlark_simple_value; -use starlark::values::dict::DictOf; +use starlark::values::dict::UnpackDictEntries; use starlark::values::starlark_value; use starlark::values::typing::StarlarkCallable; use starlark::values::AllocValue; @@ -39,7 +38,6 @@ use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; use starlark_map::ordered_map::OrderedMap; -use thiserror::Error; use crate::bxl::eval::CliResolutionCtx; use crate::bxl::starlark_defs::cli_args; @@ -48,12 +46,24 @@ use crate::bxl::starlark_defs::cli_args::CliArgError; use crate::bxl::starlark_defs::cli_args::CliArgValue; #[starlark_module] -pub(crate) fn register_bxl_function(builder: &mut GlobalsBuilder) { +pub(crate) fn register_bxl_prefixed_main_function(builder: &mut GlobalsBuilder) { fn bxl_main<'v>( #[starlark(require = named)] r#impl: StarlarkCallable<'v>, - #[starlark(require = named)] cli_args: DictOf<'v, &'v str, &'v CliArgs>, + #[starlark(require = named)] cli_args: UnpackDictEntries<&'v str, &'v CliArgs>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + bxl_impl(r#impl, cli_args, doc, eval) + } +} + +#[starlark_module] +pub(crate) fn register_bxl_main_function(builder: &mut GlobalsBuilder) { + fn main<'v>( + #[starlark(require = named)] r#impl: StarlarkCallable<'v>, + #[starlark(require = named)] cli_args: UnpackDictEntries<&'v str, &'v CliArgs>, + #[starlark(require = named, default = "")] doc: &str, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { bxl_impl(r#impl, cli_args, doc, eval) } @@ -61,9 +71,9 @@ pub(crate) fn register_bxl_function(builder: &mut GlobalsBuilder) { fn bxl_impl<'v>( r#impl: StarlarkCallable<'v>, - cli_args: DictOf<'v, &'v str, &'v CliArgs>, + cli_args: UnpackDictEntries<&'v str, &'v CliArgs>, doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let implementation = r#impl.0; @@ -75,7 +85,7 @@ fn bxl_impl<'v>( let mut unresolved_cli_args = SmallMap::new(); let mut short_args = SmallSet::new(); - for (arg, def) in cli_args.to_dict().into_iter() { + for (arg, def) in cli_args.entries { if let Some(short) = def.short { if short_args.contains(&short) { return Err(CliArgError::DuplicateShort(short.to_owned()).into()); @@ -96,7 +106,7 @@ fn bxl_impl<'v>( } /// Errors around rule declaration, instantiation, validation, etc -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum BxlError { #[error("Bxl defined in `{0}` must be assigned to a variable, e.g. `my_bxl = bxl_main(...)`")] BxlNotAssigned(String), @@ -135,7 +145,11 @@ impl<'v> AllocValue<'v> for BxlFunction<'v> { #[starlark_value(type = "bxl")] impl<'v> StarlarkValue<'v> for BxlFunction<'v> { - fn export_as(&self, variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { *self.id.borrow_mut() = Some(BxlFunctionLabel { bxl_path: self.bxl_path.clone(), name: variable_name.to_owned(), @@ -165,7 +179,7 @@ impl<'v> Freeze for BxlFunction<'v> { } #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] -#[display(fmt = "{}()", "bxl_id.name")] +#[display("{}()", bxl_id.name)] pub(crate) struct FrozenBxlFunction { implementation: FrozenValue, cli_args: SmallMap, @@ -184,13 +198,13 @@ impl FrozenBxlFunction { self.implementation } - pub(crate) fn to_clap<'v>(&'v self, mut clap: clap::Command<'v>) -> clap::Command<'v> { + pub(crate) fn to_clap<'v>(&'v self, mut clap: clap::Command) -> clap::Command { if let Some(docs) = self.docs.as_ref() { - clap = clap.about(docs.as_str()) + clap = clap.about(docs.clone()) } for (arg, def) in self.cli_args.iter() { - clap = clap.arg(def.to_clap(clap::Arg::new(arg.as_str()).long(arg.as_str()))) + clap = clap.arg(def.to_clap(clap::Arg::new(arg.clone()).long(arg.clone()))) } clap diff --git a/app/buck2_bxl/src/bxl/starlark_defs/cli_args.rs b/app/buck2_bxl/src/bxl/starlark_defs/cli_args.rs index ef64f264b586d..c0b71226d528c 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/cli_args.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/cli_args.rs @@ -10,24 +10,23 @@ //! Command line arguments definition for bxl functions use std::collections::HashSet; -use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; use std::sync::Arc; use allocative::Allocative; use anyhow::Context as _; -use buck2_common::result::SharedResult; -use buck2_core::pattern::lex_target_pattern; +use buck2_core::pattern::pattern::lex_target_pattern; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_interpreter::types::configured_providers_label::StarlarkProvidersLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use buck2_node::load_patterns::load_patterns; use buck2_node::load_patterns::MissingTargetBehavior; +use clap::ArgAction; use derive_more::Display; use dupe::Dupe; use futures::future::BoxFuture; @@ -44,6 +43,7 @@ use starlark::values::dict::Dict; use starlark::values::float::StarlarkFloat; use starlark::values::list::AllocList; use starlark::values::list::ListRef; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::none::NoneType; use starlark::values::starlark_value; use starlark::values::Heap; @@ -53,15 +53,15 @@ use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueError; use starlark::values::ValueLike; +use starlark::StarlarkResultExt; use starlark_map::ordered_map::OrderedMap; use starlark_map::small_map::SmallMap; -use thiserror::Error; use crate::bxl::eval::CliResolutionCtx; /// Defines the cli args for the bxl function #[derive(Clone, Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub(crate) struct CliArgs { /// The default value. If None, the value is not optional and must be provided by the user pub(crate) default: Option>, @@ -77,7 +77,7 @@ pub(crate) struct CliArgs { starlark_simple_value!(CliArgs); -#[starlark_value(type = "bxl_cli_args")] +#[starlark_value(type = "bxl.CliArgs")] impl<'v> StarlarkValue<'v> for CliArgs {} impl CliArgs { @@ -118,8 +118,8 @@ impl CliArgs { }) } - pub(crate) fn to_clap<'a>(&'a self, arg: clap::Arg<'a>) -> clap::Arg<'a> { - let mut arg = self.coercer.to_clap(arg.help(self.doc.as_str())); + pub(crate) fn to_clap<'a>(&'a self, arg: clap::Arg) -> clap::Arg { + let mut arg = self.coercer.to_clap(arg.help(self.doc.clone())); if let Some(short) = self.short { arg = arg.short(short); } @@ -164,11 +164,11 @@ pub(crate) enum JsonCliArgValueData { Float(String), Int(String), String(String), - #[display(fmt = "{}", "_0.iter().map(|v| v.to_string()).join(\",\")")] + #[display("{}", _0.iter().map(|v| v.to_string()).join(","))] List(Vec), #[display( - fmt = "{}", - "_0.iter().map(|(k, v)| format!(\"(k={},v={})\", k, v.to_string())).join(\",\")" + "{}", + _0.iter().map(|(k, v)| format!("(k={},v={})", k, v)).join(",") )] Object(OrderedMap), } @@ -250,7 +250,7 @@ pub(crate) enum CliArgValue { // Type of list elements is used to verify that concatenation is valid. // That only can be checked after configuration took place, // so pass the type info together with values to be used later. - #[display(fmt = "{}", "_0.iter().map(|v| v.to_string()).join(\",\")")] + #[display("{}", _0.iter().map(|v| v.to_string()).join(","))] List(Vec), None, TargetLabel(TargetLabel), @@ -277,7 +277,6 @@ impl CliArgValue { } #[derive(Debug, VariantName, Clone, Dupe, Allocative)] -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_arc_on_dupe))] // recursive type pub(crate) enum CliArgType { Bool, Int, @@ -365,7 +364,8 @@ impl CliArgType { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] pub(crate) enum CliArgError { #[error("Expected default value of type `{}`, but got `{}`", _0, _1)] DefaultValueTypeError(CliArgType, String), @@ -389,11 +389,16 @@ impl CliArgType { CliArgType::Bool => CliArgValue::Bool(value.unpack_bool().ok_or_else(|| { CliArgError::DefaultValueTypeError(self.dupe(), value.get_type().to_owned()) })?), - CliArgType::Int => CliArgValue::Int(BigInt::unpack_value(value).ok_or_else(|| { - CliArgError::DefaultValueTypeError(self.dupe(), value.get_type().to_owned()) - })?), + CliArgType::Int => CliArgValue::Int( + BigInt::unpack_value(value) + .into_anyhow_result()? + .ok_or_else(|| { + CliArgError::DefaultValueTypeError(self.dupe(), value.get_type().to_owned()) + })?, + ), CliArgType::Float => CliArgValue::Float( StarlarkFloat::unpack_value(value) + .into_anyhow_result()? .ok_or_else(|| { CliArgError::DefaultValueTypeError(self.dupe(), value.get_type().to_owned()) })? @@ -474,19 +479,24 @@ impl CliArgType { }) } - #[allow(deprecated)] // TODO(nga): fix. - pub(crate) fn to_clap<'a>(&'a self, clap: clap::Arg<'a>) -> clap::Arg<'a> { + pub(crate) fn to_clap<'a>(&'a self, clap: clap::Arg) -> clap::Arg { match self { - CliArgType::Bool => clap.takes_value(true).validator(|x| x.parse::()), - CliArgType::Int => clap.takes_value(true).validator(|x| x.parse::()), - CliArgType::Float => clap.takes_value(true).validator(|x| x.parse::()), - CliArgType::String => clap.takes_value(true), + CliArgType::Bool => clap + .num_args(1) + .value_parser(|x: &str| x.parse::().map(|_| x.to_owned())), + CliArgType::Int => clap + .num_args(1) + .value_parser(|x: &str| x.parse::().map(|_| x.to_owned())), + CliArgType::Float => clap + .num_args(1) + .value_parser(|x: &str| x.parse::().map(|_| x.to_owned())), + CliArgType::String => clap.num_args(1), CliArgType::Enumeration(variants) => clap - .takes_value(true) - .possible_values(variants.iter().map(String::as_str)), - CliArgType::List(inner) => inner.to_clap(clap.takes_value(true).multiple(true)), - CliArgType::Option(inner) => inner.to_clap(clap.required(false)), - CliArgType::TargetLabel => clap.takes_value(true).validator(|x| { + .num_args(1) + .value_parser(variants.iter().cloned().collect::>()), + CliArgType::List(inner) => inner.to_clap(clap).num_args(0..).action(ArgAction::Append), + CliArgType::Option(inner) => inner.to_clap(clap).required(false), + CliArgType::TargetLabel => clap.num_args(1).value_parser(|x: &str| { lex_target_pattern::(x, false) .and_then(|parsed| parsed.pattern.infer_target()) .and_then(|parsed| { @@ -495,8 +505,9 @@ impl CliArgType { .context(CliArgError::NotALabel(x.to_owned(), "target")) .map(|_| ()) }) + .map(|_| x.to_owned()) }), - CliArgType::SubTarget => clap.takes_value(true).validator(|x| { + CliArgType::SubTarget => clap.num_args(1).value_parser(|x: &str| { lex_target_pattern::(x, false) .and_then(|parsed| parsed.pattern.infer_target()) .and_then(|parsed| { @@ -505,10 +516,11 @@ impl CliArgType { .context(CliArgError::NotALabel(x.to_owned(), "target")) .map(|_| ()) }) + .map(|_| x.to_owned()) }), - CliArgType::TargetExpr => clap.takes_value(true), - CliArgType::SubTargetExpr => clap.takes_value(true), - CliArgType::Json => clap.takes_value(true), + CliArgType::TargetExpr => clap.num_args(1), + CliArgType::SubTargetExpr => clap.num_args(1), + CliArgType::Json => clap.num_args(1), } } @@ -544,11 +556,13 @@ impl CliArgType { CliArgType::List(inner) => match clap.values_of() { None => None, Some(values) => Some(CliArgValue::List( - futures::future::join_all(values.map(async move |v| try { - inner - .parse_clap(ArgAccessor::Literal(v), ctx) - .await? - .expect("shouldn't be empty when parsing list items") + futures::future::join_all(values.map(|v| async move { + try { + inner + .parse_clap(ArgAccessor::Literal(v), ctx) + .await? + .expect("shouldn't be empty when parsing list items") + } })) .await .into_iter() @@ -571,6 +585,7 @@ impl CliArgType { ctx.relative_dir.as_cell_path(), x, &ctx.cell_resolver, + &ctx.cell_alias_resolver, )? .as_target_label(x)?, ) @@ -585,6 +600,7 @@ impl CliArgType { ctx.relative_dir.as_cell_path(), x, &ctx.cell_resolver, + &ctx.cell_alias_resolver, )? .as_providers_label(x)?, ) @@ -598,14 +614,19 @@ impl CliArgType { ctx.relative_dir.as_cell_path(), x, &ctx.cell_resolver, + &ctx.cell_alias_resolver, )?; - let loaded = - load_patterns(ctx.dice, vec![pattern], MissingTargetBehavior::Fail).await?; + let loaded = load_patterns( + &mut ctx.dice.clone(), + vec![pattern], + MissingTargetBehavior::Fail, + ) + .await?; Some(CliArgValue::List( loaded .iter_loaded_targets() .map_ok(|t| CliArgValue::TargetLabel(t.label().dupe())) - .collect::>()?, + .collect::>()?, )) } CliArgType::SubTargetExpr => { @@ -615,9 +636,14 @@ impl CliArgType { ctx.relative_dir.as_cell_path(), x, &ctx.cell_resolver, + &ctx.cell_alias_resolver, )?; - let loaded = - load_patterns(ctx.dice, vec![pattern], MissingTargetBehavior::Fail).await?; + let loaded = load_patterns( + &mut ctx.dice.clone(), + vec![pattern], + MissingTargetBehavior::Fail, + ) + .await?; Some(CliArgValue::List( loaded @@ -635,7 +661,7 @@ impl CliArgType { .collect::>(), Err(e) => vec![Err(e.dupe())], }) - .collect::>>()?, + .collect::>>()?, )) } CliArgType::Json => match clap.value_of() { @@ -711,7 +737,7 @@ pub(crate) fn cli_args_module(registry: &mut GlobalsBuilder) { } fn r#enum<'v>( - #[starlark(require = pos)] variants: Vec, + #[starlark(require = pos)] variants: UnpackListOrTuple, default: Option>, #[starlark(default = "")] doc: &str, #[starlark(require = named)] short: Option>, @@ -767,13 +793,39 @@ pub(crate) fn register_cli_args_module(registry: &mut GlobalsBuilder) { cli_args_module(registry) } +pub(crate) enum ArgAccessor<'a> { + Clap { + clap: &'a clap::ArgMatches, + arg: &'a str, + }, + Literal(&'a str), +} + +impl<'a> ArgAccessor<'a> { + fn value_of(&self) -> Option<&str> { + match self { + ArgAccessor::Clap { clap, arg } => clap.get_one::(arg).map(|x| x.as_str()), + ArgAccessor::Literal(s) => Some(s), + } + } + + fn values_of(&self) -> Option> { + match self { + ArgAccessor::Clap { clap, arg } => clap + .get_many::(arg) + .map(|x| itertools::Either::Left(x.map(|y| y.as_str()))), + ArgAccessor::Literal(s) => Some(itertools::Either::Right(std::iter::once(*s))), + } + } +} + #[cfg(test)] mod tests { use std::collections::HashSet; use buck2_core::provider::label::testing::ProvidersLabelTestExt; use buck2_core::provider::label::ProvidersLabel; - use buck2_core::target::label::TargetLabel; + use buck2_core::target::label::label::TargetLabel; use buck2_interpreter::types::configured_providers_label::StarlarkProvidersLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use num_bigint::BigInt; @@ -911,28 +963,3 @@ mod tests { Ok(()) } } - -pub(crate) enum ArgAccessor<'a> { - Clap { - clap: &'a clap::ArgMatches, - arg: &'a str, - }, - Literal(&'a str), -} - -#[allow(deprecated)] // TODO(nga): fix. -impl<'a> ArgAccessor<'a> { - fn value_of(&self) -> Option<&str> { - match self { - ArgAccessor::Clap { clap, arg } => clap.value_of(arg), - ArgAccessor::Literal(s) => Some(s), - } - } - - fn values_of(&self) -> Option> { - match self { - ArgAccessor::Clap { clap, arg } => clap.values_of(arg).map(itertools::Either::Left), - ArgAccessor::Literal(s) => Some(itertools::Either::Right(std::iter::once(*s))), - } - } -} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context.rs b/app/buck2_bxl/src/bxl/starlark_defs/context.rs index b23022ff257af..49db35f511817 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/context.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/context.rs @@ -8,87 +8,48 @@ */ //! The context containing the available buck commands and query operations for `bxl` functions. -//! use std::cell::RefCell; -use std::cell::RefMut; -use std::fmt::Display; use std::io::Write; use std::iter; +use std::ops::Deref; use std::rc::Rc; -use std::sync::Arc; use allocative::Allocative; use anyhow::Context; -use buck2_artifact::actions::key::ActionKey; -use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::analysis::registry::AnalysisRegistry; use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::deferred::types::DeferredCtx; -use buck2_build_api::dynamic::bxl::EVAL_BXL_FOR_DYNAMIC_OUTPUT; -use buck2_build_api::dynamic::deferred::dynamic_lambda_ctx_data; -use buck2_build_api::dynamic::deferred::DynamicLambda; use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; -use buck2_build_api::interpreter::rule_defs::plugins::AnalysisPlugins; -use buck2_cli_proto::build_request::Materializations; use buck2_common::dice::cells::HasCellResolver; use buck2_common::dice::data::HasIoProvider; -use buck2_common::events::HasEvents; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_common::target_aliases::BuckConfigTargetAliasResolver; use buck2_common::target_aliases::HasTargetAliasResolver; -use buck2_core::base_deferred_key::BaseDeferredKeyDyn; use buck2_core::cells::cell_path::CellPath; -use buck2_core::cells::cell_path::CellPathRef; use buck2_core::cells::name::CellName; -use buck2_core::cells::paths::CellRelativePath; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; +use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::pattern::pattern_type::TargetPatternExtra; use buck2_core::pattern::query_file_literal::parse_query_file_literal; -use buck2_core::pattern::ParsedPattern; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; use buck2_events::dispatch::console_message; -use buck2_events::dispatch::with_dispatcher_async; use buck2_execute::digest_config::DigestConfig; -use buck2_execute::digest_config::HasDigestConfig; -use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; -use buck2_interpreter::print_handler::EventDispatcherPrintHandler; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; -use buck2_interpreter::starlark_promise::StarlarkPromise; -use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; -use buck2_interpreter::types::configured_providers_label::StarlarkProvidersLabel; -use buck2_node::nodes::configured::ConfiguredTargetNode; -use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; -use buck2_node::nodes::frontend::TargetGraphCalculation; -use buck2_node::nodes::unconfigured::TargetNode; -use buck2_query::query::syntax::simple::eval::set::TargetSet; -use dashmap::DashMap; use derivative::Derivative; use derive_more::Display; use dice::DiceComputations; use dupe::Dupe; -use either::Either; -use futures::FutureExt; use indexmap::IndexSet; use itertools::Itertools; use starlark::any::ProvidesStaticType; -use starlark::collections::SmallMap; use starlark::environment::Methods; -use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::environment::Module; -use starlark::eval::Evaluator; -use starlark::starlark_module; -use starlark::values::dict::Dict; -use starlark::values::none::NoneType; use starlark::values::starlark_value; -use starlark::values::structs::AllocStruct; +use starlark::values::structs::StructRef; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::NoSerialize; @@ -97,39 +58,24 @@ use starlark::values::Trace; use starlark::values::Value; use starlark::values::ValueOfUnchecked; use starlark::values::ValueTyped; -use starlark::StarlarkDocs; -use thiserror::Error; -use super::target_universe::StarlarkTargetUniverse; -use crate::bxl::key::BxlDynamicKey; use crate::bxl::key::BxlKey; -use crate::bxl::starlark_defs::alloc_node::AllocNode; -use crate::bxl::starlark_defs::aquery::StarlarkAQueryCtx; -use crate::bxl::starlark_defs::audit::StarlarkAuditCtx; -use crate::bxl::starlark_defs::context::actions::resolve_bxl_execution_platform; -use crate::bxl::starlark_defs::context::actions::validate_action_instantiation; -use crate::bxl::starlark_defs::context::actions::BxlActions; -use crate::bxl::starlark_defs::context::fs::BxlFilesystem; +use crate::bxl::starlark_defs::context::actions::BxlExecutionResolution; use crate::bxl::starlark_defs::context::output::EnsuredArtifactOrGroup; use crate::bxl::starlark_defs::context::output::OutputStream; +use crate::bxl::starlark_defs::context::starlark_async::BxlDiceComputations; use crate::bxl::starlark_defs::context::starlark_async::BxlSafeDiceComputations; -use crate::bxl::starlark_defs::cquery::StarlarkCQueryCtx; -use crate::bxl::starlark_defs::event::StarlarkUserEventParser; -use crate::bxl::starlark_defs::providers_expr::ProvidersExpr; -use crate::bxl::starlark_defs::target_expr::filter_incompatible; -use crate::bxl::starlark_defs::target_expr::TargetExpr; -use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; -use crate::bxl::starlark_defs::uquery::StarlarkUQueryCtx; -use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; pub(crate) mod actions; pub(crate) mod analysis; pub(crate) mod build; +pub(crate) mod dynamic; pub(crate) mod fs; +pub(crate) mod methods; pub(crate) mod output; pub(crate) mod starlark_async; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum BxlContextDynamicError { #[error("`{0}()` is unsupported")] Unsupported(String), @@ -137,21 +83,22 @@ enum BxlContextDynamicError { RequireSameExecutionPlatformAsRoot, } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] #[error("Expected a single target as a string literal, not a target pattern")] struct NotATargetLabelString; +#[derive(buck2_error::Error, Debug)] +#[error( + "Unconfigured target label(s)/node(s) was passed into analysis. Targets passed into analysis should be configured." +)] +struct UnconfiguredTargetInAnalysis; + /// Data object for `BxlContextType::Root`. #[derive(ProvidesStaticType, Trace, NoSerialize, Allocative, Debug, Derivative)] pub(crate) struct RootBxlContextData<'v> { - output_stream: ValueTyped<'v, OutputStream<'v>>, - error_stream: ValueTyped<'v, OutputStream<'v>>, - cli_args: Value<'v>, - /// Use a RefCell/Option so when we are done with it, without obtaining exclusive access, - /// we can take the internal state without having to clone it. - #[derivative(Debug = "ignore")] - #[allocative(skip)] - materializations: Arc>, + output_stream: ValueTyped<'v, OutputStream>, + error_stream: ValueTyped<'v, OutputStream>, + cli_args: ValueOfUnchecked<'v, StructRef<'v>>, } /// Data object for `BxlContextType::Dynamic`. @@ -198,55 +145,76 @@ impl<'v> Display for BxlContextType<'v> { Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub(crate) struct BxlContext<'v> { #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] #[allocative(skip)] - pub(crate) async_ctx: Rc>>, + pub(crate) async_ctx: Rc>, pub(crate) data: BxlContextNoDice<'v>, } +impl<'v> Deref for BxlContext<'v> { + type Target = BxlContextNoDice<'v>; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + #[derive(Derivative, Display, Trace, NoSerialize, Allocative)] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub(crate) struct BxlContextNoDice<'v> { - pub(crate) current_bxl: BxlKey, + state: ValueTyped<'v, AnalysisActions<'v>>, + context_type: BxlContextType<'v>, + core: Rc, +} + +impl Deref for BxlContextNoDice<'_> { + type Target = BxlContextCoreData; + + fn deref(&self) -> &Self::Target { + &self.core + } +} + +#[derive(Derivative, Display, Trace, Allocative)] +#[derivative(Debug)] +#[display("{:?}", self)] +pub(crate) struct BxlContextCoreData { + current_bxl: BxlKey, #[derivative(Debug = "ignore")] - pub(crate) target_alias_resolver: BuckConfigTargetAliasResolver, - pub(crate) cell_name: CellName, - pub(crate) cell_root_abs: AbsNormPathBuf, + target_alias_resolver: BuckConfigTargetAliasResolver, + cell_name: CellName, + cell_root_abs: AbsNormPathBuf, #[derivative(Debug = "ignore")] - pub(crate) cell_resolver: CellResolver, - pub(crate) state: ValueTyped<'v, AnalysisActions<'v>>, - pub(crate) global_target_platform: Option, - pub(crate) context_type: BxlContextType<'v>, - pub(crate) project_fs: ProjectRoot, + cell_resolver: CellResolver, #[derivative(Debug = "ignore")] - pub(crate) artifact_fs: ArtifactFs, + cell_alias_resolver: CellAliasResolver, + project_fs: ProjectRoot, + #[derivative(Debug = "ignore")] + artifact_fs: ArtifactFs, } -impl<'v> BxlContext<'v> { - pub(crate) fn new( - heap: &'v Heap, - current_bxl: BxlKey, - cli_args: Value<'v>, - target_alias_resolver: BuckConfigTargetAliasResolver, - project_fs: ProjectRoot, - artifact_fs: ArtifactFs, - cell_resolver: CellResolver, - cell_name: CellName, - async_ctx: BxlSafeDiceComputations<'v>, - output_sink: RefCell>, - error_sink: RefCell>, - digest_config: DigestConfig, - global_target_platform: Option, - ) -> anyhow::Result { +impl BxlContextCoreData { + pub(crate) async fn new(key: BxlKey, dice: &mut DiceComputations<'_>) -> anyhow::Result { + let label = key.label(); + let cell_resolver = dice.get_cell_resolver().await?; + let cell = label.bxl_path.cell(); + let bxl_cell = cell_resolver + .get(cell) + .with_context(|| format!("Cell does not exist: `{}`", cell))? + .dupe(); + let cell_name = bxl_cell.name(); + let target_alias_resolver = dice.target_alias_resolver().await?; + let cell_alias_resolver = dice.get_cell_alias_resolver(cell).await?; + let artifact_fs = dice.get_artifact_fs().await?; + let project_fs = dice.global_data().get_io_provider().project_root().dupe(); + let cell_root_abs = project_fs.root().join( cell_resolver .get(cell_name)? @@ -254,96 +222,144 @@ impl<'v> BxlContext<'v> { .as_project_relative_path(), ); - let async_ctx = Rc::new(RefCell::new(async_ctx)); + Ok(Self { + current_bxl: key, + target_alias_resolver, + cell_name, + cell_root_abs, + cell_resolver, + cell_alias_resolver, + project_fs, + artifact_fs, + }) + } + + pub(crate) fn key(&self) -> &BxlKey { + &self.current_bxl + } + + pub(crate) fn project_root(&self) -> &ProjectRoot { + &self.project_fs + } + + pub(crate) fn global_cfg_options(&self) -> &GlobalCfgOptions { + self.current_bxl.global_cfg_options() + } + + pub(crate) fn target_alias_resolver(&self) -> &BuckConfigTargetAliasResolver { + &self.target_alias_resolver + } + + pub(crate) fn cell_resolver(&self) -> &CellResolver { + &self.cell_resolver + } + + pub(crate) fn cell_name(&self) -> CellName { + self.cell_name + } + + pub(crate) fn cell_root_abs(&self) -> &AbsNormPathBuf { + &self.cell_root_abs + } + + pub(crate) fn cell_alias_resolver(&self) -> &CellAliasResolver { + &self.cell_alias_resolver + } + + pub(crate) fn current_bxl(&self) -> &BxlKey { + &self.current_bxl + } + + pub(crate) fn project_fs(&self) -> &ProjectRoot { + &self.project_fs + } + + pub(crate) fn artifact_fs(&self) -> &ArtifactFs { + &self.artifact_fs + } + + /// Working dir for resolving literals. + /// Note, unlike buck2 command line UI, we resolve targets and literals + /// against the cell root instead of user working dir. + pub(crate) fn working_dir(&self) -> anyhow::Result { + let cell = self.cell_resolver().get(self.cell_name())?; + Ok(cell.path().as_project_relative_path().to_owned()) + } + + pub(crate) fn parse_query_file_literal(&self, literal: &str) -> anyhow::Result { + parse_query_file_literal( + literal, + self.cell_alias_resolver(), + self.cell_resolver(), + // NOTE(nga): we pass cell root as working directory here, + // which is inconsistent with the rest of buck2: + // The same query `owner(foo.h)` is resolved using + // current directory in `buck2 query`, but relative to cell root in BXL. + self.cell_root_abs(), + self.project_root(), + ) + } +} +impl<'v> BxlContext<'v> { + pub(crate) fn new( + heap: &'v Heap, + core: Rc, + cli_args: ValueOfUnchecked<'v, StructRef<'v>>, + async_ctx: Rc>>, + output_sink: Rc>, + error_sink: Rc>, + digest_config: DigestConfig, + ) -> anyhow::Result { let root_data = RootBxlContextData { cli_args, output_stream: heap.alloc_typed(OutputStream::new( - project_fs.clone(), - artifact_fs.clone(), + core.project_fs.clone(), + core.artifact_fs.clone(), output_sink, - async_ctx.dupe(), )), error_stream: heap.alloc_typed(OutputStream::new( - project_fs.clone(), - artifact_fs.clone(), + core.project_fs.clone(), + core.artifact_fs.clone(), error_sink, - async_ctx.dupe(), )), - materializations: Arc::new(DashMap::new()), }; - let context_type = BxlContextType::Root(root_data); Ok(Self { async_ctx: async_ctx.clone(), data: BxlContextNoDice { - current_bxl, - target_alias_resolver, - cell_name, - cell_root_abs, - cell_resolver, state: heap.alloc_typed(AnalysisActions { state: RefCell::new(None), - // TODO(nga): attributes struct should not be accessible to BXL. - attributes: ValueOfUnchecked::new_checked(heap.alloc(AllocStruct::EMPTY)) - .unwrap(), - plugins: heap - .alloc_typed(AnalysisPlugins::new(SmallMap::new())) - .into(), + attributes: None, + plugins: None, digest_config, }), - global_target_platform, context_type, - project_fs, - artifact_fs, + core, }, }) } pub(crate) fn new_dynamic( heap: &'v Heap, - current_bxl: BxlKey, - target_alias_resolver: BuckConfigTargetAliasResolver, - project_fs: ProjectRoot, - artifact_fs: ArtifactFs, - cell_resolver: CellResolver, - cell_name: CellName, - async_ctx: Rc>>, + core: Rc, + async_ctx: Rc>>, digest_config: DigestConfig, - global_target_platform: Option, analysis_registry: AnalysisRegistry<'v>, dynamic_data: DynamicBxlContextData, ) -> anyhow::Result { - let cell_root_abs = project_fs.root().join( - cell_resolver - .get(cell_name)? - .path() - .as_project_relative_path(), - ); - Ok(Self { async_ctx, data: BxlContextNoDice { - current_bxl, - target_alias_resolver, - cell_name, - cell_root_abs, - cell_resolver, state: heap.alloc_typed(AnalysisActions { state: RefCell::new(Some(analysis_registry)), - // TODO(nga): attributes struct should not be accessible to BXL. - attributes: ValueOfUnchecked::new_checked(heap.alloc(AllocStruct::EMPTY)) - .unwrap(), - plugins: heap - .alloc_typed(AnalysisPlugins::new(SmallMap::new())) - .into(), + attributes: None, + plugins: None, digest_config, }), - global_target_platform, context_type: BxlContextType::Dynamic(dynamic_data), - project_fs, - artifact_fs, + core, }, }) } @@ -352,10 +368,10 @@ impl<'v> BxlContext<'v> { /// This should generally only be called at the top level functions in bxl. /// Within the lambdas, use the existing reference to Dice provided instead of calling nested /// via_dice, as that breaks borrow invariants of the dice computations. - pub fn via_dice<'a, 's, T>( + pub(crate) fn via_dice<'a, 's, T>( &'a self, f: impl for<'x> FnOnce( - RefMut<'x, BxlSafeDiceComputations<'v>>, + &'x mut dyn BxlDiceComputations, &'a BxlContextNoDice<'v>, ) -> anyhow::Result, ) -> anyhow::Result @@ -363,65 +379,68 @@ impl<'v> BxlContext<'v> { 'v: 'a, { let data = &self.data; - f(self.async_ctx.borrow_mut(), data) - } - - pub(crate) fn project_root(&self) -> &ProjectRoot { - self.data.project_root() - } - - /// Working dir for resolving literals. - /// Note, unlike buck2 command line UI, we resolve targets and literals - /// against the cell root instead of user working dir. - pub(crate) fn working_dir(&self) -> anyhow::Result { - self.data.working_dir() + f(&mut *self.async_ctx.borrow_mut(), data) } /// Must take an `AnalysisContext` and `OutputStream` which has never had `take_state` called on it before. pub(crate) fn take_state( value: ValueTyped<'v, BxlContext<'v>>, - ) -> anyhow::Result<( - Option>, - IndexSet, - Arc>, - )> { + ) -> anyhow::Result<(AnalysisRegistry<'v>, IndexSet)> { let this = value.as_ref(); let root_data = this.data.context_type.unpack_root()?; let output_stream = &root_data.output_stream; - let materializations = &root_data.materializations; - Ok(( - this.data.state.as_ref().state.borrow_mut().take(), - // artifacts should be bound by now as the bxl has finished running - output_stream - .as_ref() - .take_artifacts() - .into_iter() - .map(|ensured_artifact_type| match ensured_artifact_type { - EnsuredArtifactOrGroup::Artifact(artifact) => { - let as_artifact = artifact.as_artifact(); - let bound_artifact = as_artifact.get_bound_artifact()?; - let associated_artifacts = as_artifact.get_associated_artifacts(); + let analysis_registry = this + .data + .state + .as_ref() + .state + .borrow_mut() + .take() + .map(Ok) + .unwrap_or_else(|| { + // BXL did not request actions, so we don't know execution platform. + // It doesn't matter what owner/platform we put here because + // the registry is empty, nothing will be fetched from it. + AnalysisRegistry::new_from_owner( + this.core + .current_bxl + .dupe() + .into_base_deferred_key(BxlExecutionResolution::unspecified()), + ExecutionPlatformResolution::unspecified(), + ) + })?; + + // artifacts should be bound by now as the bxl has finished running + let artifacts = output_stream + .as_ref() + .take_artifacts() + .into_iter() + .map(|ensured_artifact_type| match ensured_artifact_type { + EnsuredArtifactOrGroup::Artifact(artifact) => { + let as_artifact = artifact.as_artifact(); + let bound_artifact = as_artifact.get_bound_artifact()?; + let associated_artifacts = as_artifact.get_associated_artifacts(); + + Ok(associated_artifacts + .iter() + .flat_map(|v| v.iter()) + .cloned() + .chain(iter::once(ArtifactGroup::Artifact(bound_artifact))) + .collect::>()) + } + EnsuredArtifactOrGroup::ArtifactGroup(ag) => Ok(vec![ag]), + }) + .flatten_ok() + .collect::>>()?; - Ok(associated_artifacts - .iter() - .flat_map(|v| v.iter()) - .cloned() - .chain(iter::once(ArtifactGroup::Artifact(bound_artifact))) - .collect::>()) - } - EnsuredArtifactOrGroup::ArtifactGroup(ag) => Ok(vec![ag]), - }) - .flatten_ok() - .collect::>>()?, - materializations.dupe(), - )) + Ok((analysis_registry, artifacts)) } /// Must take an `AnalysisContext` which has never had `take_state` called on it before. pub(crate) fn take_state_dynamic(&self) -> anyhow::Result> { let state = self.data.state.as_ref(); - state.state().assert_no_promises()?; + state.state()?.assert_no_promises()?; Ok(state .state @@ -431,200 +450,26 @@ impl<'v> BxlContext<'v> { } } -impl<'v> BxlContextNoDice<'v> { +pub(crate) trait ErrorPrinter { + fn print_to_error_stream(&self, msg: String) -> anyhow::Result<()>; +} + +impl<'v> ErrorPrinter for BxlContextNoDice<'v> { // Used for caching error logs emitted from within the BXL core. - pub(crate) fn print_to_error_stream(&self, msg: String) -> anyhow::Result<()> { + fn print_to_error_stream(&self, msg: String) -> anyhow::Result<()> { match &self.context_type { BxlContextType::Root(root) => writeln!(root.error_stream.sink.borrow_mut(), "{}", msg)?, BxlContextType::Dynamic(_) => console_message(msg), } Ok(()) } - - pub(crate) fn project_root(&self) -> &ProjectRoot { - &self.project_fs - } - - /// Working dir for resolving literals. - /// Note, unlike buck2 command line UI, we resolve targets and literals - /// against the cell root instead of user working dir. - pub(crate) fn working_dir(&self) -> anyhow::Result { - let cell = self.cell_resolver.get(self.cell_name)?; - Ok(cell.path().as_project_relative_path().to_owned()) - } - - pub(crate) fn parse_query_file_literal(&self, literal: &str) -> anyhow::Result { - parse_query_file_literal( - literal, - self.cell_resolver - .get(self.cell_name)? - .cell_alias_resolver(), - &self.cell_resolver, - // NOTE(nga): we pass cell root as working directory here, - // which is inconsistent with the rest of buck2: - // The same query `owner(foo.h)` is resolved using - // current directory in `buck2 query`, but relative to cell root in BXL. - &self.cell_root_abs, - self.project_root(), - ) - } -} - -pub(crate) async fn eval_bxl_for_dynamic_output<'v>( - base_deferred_key: &'v Arc, - dynamic_lambda: &'v DynamicLambda, - deferred_ctx: &'v mut dyn DeferredCtx, - dice_ctx: &'v mut DiceComputations, -) -> anyhow::Result> { - // TODO(wendyy) emit telemetry, support profiler - let env = Module::new(); - let liveness = deferred_ctx.liveness(); - let dynamic_key = - BxlDynamicKey::from_base_deferred_key_dyn_impl_err(base_deferred_key.clone())?; - let key = dynamic_key.key(); - let dynamic_data = DynamicBxlContextData { - exec_deps: dynamic_key.0.exec_deps.clone(), - toolchains: dynamic_key.0.toolchains.clone(), - }; - let global_target_platform = key.global_target_platform().dupe(); - let label = key.label(); - let cell_resolver = dice_ctx.get_cell_resolver().await?; - let cell = label.bxl_path.cell(); - let bxl_cell = cell_resolver - .get(cell) - .with_context(|| format!("Cell does not exist: `{}`", cell))? - .dupe(); - let cell_name = bxl_cell.name(); - let target_alias_resolver = dice_ctx.target_alias_resolver_for_cell(cell_name).await?; - let artifact_fs = dice_ctx.get_artifact_fs().await?; - let digest_config = dice_ctx.global_data().get_digest_config(); - let project_fs = dice_ctx - .global_data() - .get_io_provider() - .project_root() - .dupe(); - - let dispatcher = dice_ctx.per_transaction_data().get_dispatcher().dupe(); - let print = EventDispatcherPrintHandler(dispatcher.dupe()); - - // Note: because we use `block_in_place`, that will prevent the inner future from being polled - // and yielded. So, for cancellation observers to work properly within the dice cancellable - // future context, we need the future that it's attached to the cancellation context can - // yield and be polled. To ensure that, we have to spawn the future that then enters block_in_place - let (_, futs) = unsafe { - // SAFETY: as long as we don't `forget` the return object from `scope_and_collect`, it is safe - - // Additional cancellation notes: - // the `scope_and_collect` will block on drop, but it will move the blocking to a tokio - // blocking thread, freeing up the main worker threads. Additionally, the `spawn_cancellable` - // on the scope will be dropped at the earliest await point. If we are within the blocking - // section of bxl, the cancellation observer will be notified and cause the blocking calls - // to terminate. - async_scoped::TokioScope::scope_and_collect(|s| { - s.spawn_cancellable( - with_dispatcher_async(dispatcher.dupe(), async move { - with_starlark_eval_provider( - dice_ctx, - &mut StarlarkProfilerOrInstrumentation::disabled(), - format!("bxl_dynamic:{}", "foo"), - move |provider, dice_ctx| { - tokio::task::block_in_place(|| { - let mut eval = provider.make(&env)?; - eval.set_print_handler(&print); - - let (analysis_registry, declared_outputs) = { - let dynamic_lambda_ctx_data = dynamic_lambda_ctx_data( - dynamic_lambda, - deferred_ctx, - &env, - )?; - - let async_ctx = Rc::new(RefCell::new( - BxlSafeDiceComputations::new(dice_ctx, liveness), - )); - - let bxl_dynamic_ctx = BxlContext::new_dynamic( - env.heap(), - key, - target_alias_resolver, - project_fs, - artifact_fs, - cell_resolver, - cell_name, - async_ctx, - digest_config, - global_target_platform, - dynamic_lambda_ctx_data.registry, - dynamic_data, - )?; - - let ctx = ValueTyped::::new( - env.heap().alloc(bxl_dynamic_ctx), - ) - .unwrap(); - - eval.eval_function( - dynamic_lambda_ctx_data.lambda, - &[ - ctx.to_value(), - dynamic_lambda_ctx_data.artifacts, - dynamic_lambda_ctx_data.outputs, - ], - &[], - )?; - - ( - ctx.take_state_dynamic()?, - dynamic_lambda_ctx_data.declared_outputs, - ) - }; - - std::mem::drop(eval); - - let (_frozen_env, deferred) = - analysis_registry.finalize(&env)?(env)?; - let _fake_registry = - std::mem::replace(deferred_ctx.registry(), deferred); - let output: anyhow::Result> = declared_outputs - .into_iter() - .map(|x| anyhow::Ok(x.ensure_bound()?.action_key().dupe())) - .collect(); - output - }) - }, - ) - .await - }), - || Err(anyhow::anyhow!("cancelled")), - ) - }) - } - .await; - - match futs.into_iter().exactly_one() { - Ok(res) => res?, - Err(_) => panic!("only spawned one task"), - } -} - -pub(crate) fn init_eval_bxl_for_dynamic_output() { - EVAL_BXL_FOR_DYNAMIC_OUTPUT.init( - |base_deferred_key, dynamic_lambda, deferred_ctx, dice_ctx| { - Box::pin(eval_bxl_for_dynamic_output( - base_deferred_key, - dynamic_lambda, - deferred_ctx, - dice_ctx, - )) - }, - ); } -#[starlark_value(type = "bxl_ctx", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.Context", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for BxlContext<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(context_methods) + RES.methods(methods::bxl_context_methods) } } @@ -633,712 +478,3 @@ impl<'v> AllocValue<'v> for BxlContext<'v> { heap.alloc_complex_no_freeze(self) } } - -/// The bxl context that the top level bxl implementation receives as parameter. -/// This context contains all the core bxl functions to query, build, create actions, etc. -#[starlark_module] -fn context_methods(builder: &mut MethodsBuilder) { - /// Gets the output stream to the console via stdout. Items written to the output stream - /// are considered to be the results of a bxl script, which will be displayed to stdout by - /// buck2 even when the script is cached. - /// - /// Prints that are not result of the bxl should be printed via stderr via the stdlib `print` - /// and `pprint`. - /// - /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. - #[starlark(attribute)] - fn output<'v>(this: &'v BxlContext) -> anyhow::Result> { - let output_stream = this - .data - .context_type - .unpack_root() - .context(BxlContextDynamicError::Unsupported("output".to_owned()))? - .output_stream; - Ok(output_stream.to_value()) - } - - /// Returns the absolute path to the root of the repository - /// - /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. - fn root<'v>(this: &'v BxlContext<'v>) -> anyhow::Result { - let _root_type = this - .data - .context_type - .unpack_root() - .context(BxlContextDynamicError::Unsupported("root".to_owned()))?; - Ok(this - .async_ctx - .borrow() - .global_data() - .get_io_provider() - .project_root() - .root() - .to_str()? - .to_owned()) - } - - /// Returns the absolute path to the cell of the repository - /// - /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. - fn cell_root<'v>(this: &'v BxlContext<'v>) -> anyhow::Result { - let _root_type = this - .data - .context_type - .unpack_root() - .context(BxlContextDynamicError::Unsupported("root".to_owned()))?; - Ok(this.data.cell_root_abs.to_owned().to_string()) - } - - /// Gets the target nodes for the `labels`, accepting an optional `target_platform` which is the - /// target platform configuration used to resolve configurations of any unconfigured target - /// nodes. - /// The `target_platform` is either a string that can be parsed as a target label, or a - /// target label. - /// - /// The given `labels` is a [`TargetExpr`], which is either: - /// - a single string that is a `target pattern`. - /// - a single target node or label, configured or unconfigured - /// - a list of the two options above. - /// - /// Note that this function does not accept `Label` (which is a configured provider label), since this - /// is the label of a subtarget. You can get the underlying configured target label on the `Label` - /// using `configured_targets()` (ex: `my_label.configured_target()`). - /// - /// This returns either a single [`StarlarkConfiguredTargetNode`] if the given `labels` - /// is "singular", a dict keyed by target labels of [`StarlarkConfiguredTargetNode`] if the - /// given `labels` is list-like - fn configured_targets<'v>( - this: &'v BxlContext<'v>, - #[starlark(require = pos)] labels: Value<'v>, - #[starlark(default = NoneType)] target_platform: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let target_platform = target_platform.parse_target_platforms( - &this.data.target_alias_resolver, - &this.data.cell_resolver, - this.data.cell_name, - &this.data.global_target_platform, - )?; - - let res: anyhow::Result> = this.via_dice(|mut dice, this| { - dice.via(|ctx| { - async move { - let target_expr = - TargetExpr::<'v, ConfiguredTargetNode>::unpack_allow_unconfigured( - labels, - &target_platform, - this, - ctx, - eval, - ) - .await?; - - Ok(match target_expr { - TargetExpr::Label(label) => { - let set = filter_incompatible( - iter::once(ctx.get_configured_target_node(&label).await?), - this, - )?; - - // When a target label is passed in, we should only get one target node. - // filter_incompatible() returns a set, so lets assert the size - assert!(set.len() <= 1); - - if let Some(node) = set.iter().next() { - node.clone().alloc(eval.heap()) - } else { - Value::new_none() - } - } - - TargetExpr::Node(node) => node.alloc(eval.heap()), - multi => eval - .heap() - .alloc(StarlarkTargetSet::from(filter_incompatible( - multi.get(ctx).await?.into_iter(), - this, - )?)), - }) - } - .boxed_local() - }) - }); - - res - } - - /// Gets the unconfigured target nodes for the `labels` - /// - /// The given `labels` is either: - /// - a single string that is a `target pattern`. - /// - a single unconfigured target node or label - /// - a list of the two options above. - /// - /// This returns either a single [`StarlarkTargetNode`] if the given `labels` - /// is "singular", a dict keyed by target labels of [`StarlarkTargetNode`] if the - /// given `labels` is list-like - fn unconfigured_targets<'v>( - this: &'v BxlContext<'v>, - labels: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let res: anyhow::Result> = this.via_dice(|mut ctx, this| { - ctx.via(|ctx| { - async move { - Ok( - match TargetExpr::<'v, TargetNode>::unpack(labels, this, ctx, eval).await? { - TargetExpr::Label(label) => { - let node = ctx.get_target_node(&label).await?; - - node.alloc(eval.heap()) - } - - TargetExpr::Node(node) => node.alloc(eval.heap()), - multi => eval - .heap() - .alloc(StarlarkTargetSet::from(multi.get(ctx).await?.into_owned())), - }, - ) - } - .boxed_local() - }) - }); - - res - } - - /// Gets the unconfigured subtargets for the given `labels` - /// - /// The given `labels` is a providers expression, which is either: - /// - a single string that is a `target pattern`. - /// - a single target node or label, configured or unconfigured - /// - a single subtarget label, configured or unconfigured - /// - a list of the two options above. - /// - /// This returns either a single [`StarlarkProvidersLabel`] if the given `labels` - /// is "singular", or dict of the subtarget string representation to the - /// [`StarlarkProvidersLabel`] if the given `labels` is list-like. - /// - /// Note that this function does not check that this subtarget exists in the repo. - fn unconfigured_sub_targets<'v>( - this: &BxlContext<'v>, - labels: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let providers = this.via_dice(|mut dice, this| { - dice.via(|_| ProvidersExpr::::unpack(labels, this, eval).boxed_local()) - })?; - - let res = match providers { - ProvidersExpr::Literal(provider) => { - eval.heap().alloc(StarlarkProvidersLabel::new(provider)) - } - ProvidersExpr::Iterable(providers) => eval.heap().alloc(Dict::new( - providers - .into_iter() - .map(|p| { - Ok(( - eval.heap() - .alloc_str(&p.to_string()) - .to_value() - .get_hashed()?, - eval.heap().alloc(StarlarkProvidersLabel::new(p)), - )) - }) - .collect::>()?, - )), - }; - - Ok(res) - } - - /// Returns the [`StarlarkTargetUniverse`] that can lookup valid configured nodes in the universe. - /// - /// The given `labels` is a target expression, which is either: - /// - a single string that is a `target pattern`. - /// - a single target node or label, configured or unconfigured - /// - a single subtarget label, configured or unconfigured - /// - a list of the two options above. - /// - /// Also takes in an optional `target_platform` param to configure the nodes with. - fn target_universe<'v>( - this: &'v BxlContext<'v>, - labels: Value<'v>, - #[starlark(default = NoneType)] target_platform: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let target_platform = target_platform.parse_target_platforms( - &this.data.target_alias_resolver, - &this.data.cell_resolver, - this.data.cell_name, - &this.data.global_target_platform, - )?; - - this.via_dice(|mut ctx, this_no_dice: &BxlContextNoDice<'_>| { - ctx.via(|ctx| { - async move { - let target_expr = - TargetExpr::<'v, ConfiguredTargetNode>::unpack_allow_unconfigured( - labels, - &target_platform, - this_no_dice, - ctx, - eval, - ) - .await?; - - let target_set = match target_expr { - TargetExpr::Label(label) => filter_incompatible( - iter::once(ctx.get_configured_target_node(&label).await?), - this_no_dice, - )?, - TargetExpr::Node(node) => { - let mut set = TargetSet::new(); - set.insert(node); - set - } - multi => { - filter_incompatible(multi.get(ctx).await?.into_iter(), this_no_dice)? - } - }; - - StarlarkTargetUniverse::new(this, target_set).await - } - .boxed_local() - }) - }) - } - - /// Returns the [`StarlarkUQueryCtx`] that holds all uquery functions. - fn uquery<'v>(this: &'v BxlContext<'v>) -> anyhow::Result> { - StarlarkUQueryCtx::new(this) - } - - /// Returns the [`StarlarkCQueryCtx`] that holds all the cquery functions. - /// This function takes an optional parameter `target_platform`, which is the target platform - /// configuration used to configured any unconfigured target nodes. - /// - /// The `target_platform` is a target label, or a string that is a target label. - fn cquery<'v>( - this: &'v BxlContext<'v>, - // TODO(brasselsprouts): I would like to strongly type this. - #[starlark(default = NoneType)] target_platform: Value<'v>, - ) -> anyhow::Result> { - StarlarkCQueryCtx::new(this, target_platform, &this.data.global_target_platform) - } - - /// Returns the [`StarlarkAQueryCtx`] that holds all the aquery functions. - /// This function takes an optional parameter `target_platform`, which is the target platform - /// configuration used to configured any unconfigured target nodes. - /// - /// The `target_platform` is a target label, or a string that is a target label. - fn aquery<'v>( - this: &'v BxlContext<'v>, - #[starlark(default = NoneType)] target_platform: Value<'v>, - ) -> anyhow::Result> { - StarlarkAQueryCtx::new(this, target_platform, &this.data.global_target_platform) - } - - /// Returns the bxl actions to create and register actions for this - /// bxl function. This will have the execution platform resolved according to the execution - /// deps and toolchains you pass into this function. - /// You'll be able to access the analysis action factory of the correct execution platform, - /// toolchains, and execution deps of the corresponding configuration via this context. - /// - /// Actions created by bxl will not be built by default. Instead, they are marked to be built - /// by `ctx.output.ensure(artifact)` on the output module of the [`BxlContext`]. Only artifacts - /// marked by ensure will be built. - /// - /// Sample usage: - /// ```python - /// def _impl_write_action(ctx): - /// bxl_actions = ctx.bxl_actions() - /// output = bxl_actions.actions.write("my_output", "my_content") - /// ensured = ctx.output.ensure(output) - /// ctx.output.print(ensured) - /// ``` - /// - /// There are several optional named parameters: - /// - /// `exec_deps` - These are dependencies you wish to access as executables for creating the action. - /// This is usually the same set of targets one would pass to rule's `attr.exec_dep`. - /// `toolchains` - The set of toolchains needed for the actions you intend to create. - /// `target_platform` - The intended target platform for your toolchains - /// `exec_compatible_with` - Explicit list of configuration nodes (like platforms or constraints) - /// that these actions are compatible with. This is the 'exec_compatible_with' attribute of a target. - /// - /// If you passed in `exec_deps` or `toolchains`, you can access the resolved dependencies using the `exec_deps` - /// and `toolchains` attributes on the `bxl_actions`, which both return a `dict` of unconfigured subtarget labels - /// and their configured/resolved `dependency` objects. - /// - /// Note that the keys of `exec_deps` and `toolchains` must be unconfigured subtarget labels (`StarlarkProvidersLabel`), - /// and not unconfigured target labels. You can use `ctx.unconfigured_sub_targets(...)` or `with_sub_target()` on - /// `target_label` to create the label. - /// - /// ```python - /// def _impl_run_action(ctx): - /// my_exec_dep = ctx.unconfigured_sub_targets("foo//bar:baz") # has some provider that you would use in the action - /// bxl_actions = ctx.bxl_actions(exec_deps = [my_exec_dep]) # call once, reuse wherever needed - /// output = bxl_actions.actions.run( - /// [ - /// "python3", - /// bxl_actions.exec_deps[my_exec_dep][RunInfo], # access resolved exec_deps on the `bxl_actions` - /// out.as_output(), - /// ], - /// category = "command", - /// local_only = True, - /// ) - /// ctx.output.ensure(output) - /// ``` - /// - /// When called from a `dynamic_output`, `bxl_actions()` cannot be configured with a different execution - /// platform resolution from the parent BXL. - fn bxl_actions<'v>( - this: &'v BxlContext<'v>, - #[starlark(require = named, default = NoneType)] exec_deps: Value<'v>, - #[starlark(require = named, default = NoneType)] toolchains: Value<'v>, - #[starlark(require = named, default = NoneType)] target_platform: Value<'v>, - #[starlark(require = named, default = NoneType)] exec_compatible_with: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - this.via_dice(|mut ctx, this| { - ctx.via(|ctx| { - async { - let (exec_deps, toolchains) = match &this.context_type { - BxlContextType::Root { .. } => { - let target_platform = target_platform.parse_target_platforms( - &this.target_alias_resolver, - &this.cell_resolver, - this.cell_name, - &this.global_target_platform, - )?; - let exec_deps = if exec_deps.is_none() { - Vec::new() - } else { - ProvidersExpr::::unpack(exec_deps, this, eval) - .await? - .labels() - .cloned() - .collect() - }; - - let toolchains = if toolchains.is_none() { - Vec::new() - } else { - ProvidersExpr::::unpack(toolchains, this, eval) - .await? - .labels() - .cloned() - .collect() - }; - - let exec_compatible_with = if exec_compatible_with.is_none() { - Vec::new() - } else { - TargetExpr::::unpack( - exec_compatible_with, - this, - ctx, - eval, - ) - .await? - .get(ctx) - .await? - .iter() - .map(|n| n.label().dupe()) - .collect() - }; - - let execution_resolution = resolve_bxl_execution_platform( - ctx, - this.cell_name, - exec_deps, - toolchains, - target_platform.clone(), - exec_compatible_with.clone(), - eval.module(), - ) - .await?; - - validate_action_instantiation(this, &execution_resolution)?; - - ( - execution_resolution.exec_deps_configured, - execution_resolution.toolchain_deps_configured, - ) - } - BxlContextType::Dynamic(data) => { - if !exec_deps.is_none() - || !toolchains.is_none() - || !target_platform.is_none() - || !exec_compatible_with.is_none() - { - return Err( - BxlContextDynamicError::RequireSameExecutionPlatformAsRoot - .into(), - ); - } - (data.exec_deps.clone(), data.toolchains.clone()) - } - }; - - BxlActions::new( - this.state, - exec_deps.to_vec(), - toolchains.to_vec(), - eval, - ctx, - ) - .await - } - .boxed_local() - }) - }) - } - - /// Runs analysis on the given `labels`, accepting an optional `target_platform` which is the - /// target platform configuration used to resolve configurations of any unconfigured target - /// nodes, and an optional `skip_incompatible` boolean that indicates whether to skip analysis - /// of nodes that are incompatible with the target platform. - /// The `target_platform` is either a string that can be parsed as a target label, or a - /// target label. - /// - /// The given `labels` is a providers expression, which is either: - /// - a single string that is a `target pattern`. - /// - a single target node or label, configured or unconfigured - /// - a single sub target label, configured or unconfigured - /// - a list of the two options above. - /// - /// This returns either a single [`StarlarkAnalysisResult`] if the given `labels` is "singular", - /// or a dict keyed by sub target labels of [`StarlarkAnalysisResult`] if the given `labels` - /// is list-like - fn analysis<'v>( - this: &BxlContext<'v>, - labels: Value<'v>, - #[starlark(default = NoneType)] target_platform: Value<'v>, - #[starlark(require = named, default = true)] skip_incompatible: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let target_platform = target_platform.parse_target_platforms( - &this.data.target_alias_resolver, - &this.data.cell_resolver, - this.data.cell_name, - &this.data.global_target_platform, - )?; - - let res: anyhow::Result<_> = this.via_dice(|mut dice, ctx| { - dice.via(|dice| { - async { - let providers = ProvidersExpr::::unpack( - labels, - target_platform, - ctx, - dice, - eval, - ) - .await?; - analysis::analysis(dice, ctx, providers, skip_incompatible).await - } - .boxed_local() - }) - }); - - Ok(match res? { - Either::Left(single) => eval.heap().alloc(single), - Either::Right(many) => eval.heap().alloc(Dict::new( - many.into_iter() - .map(|(t, v)| { - Ok(( - eval.heap() - .alloc(StarlarkConfiguredProvidersLabel::new(t)) - .get_hashed()?, - eval.heap().alloc(v), - )) - }) - .collect::>()?, - )), - }) - } - - /// Runs a build on the given `labels`, accepting an optional `target_platform` which is the - /// target platform configuration used to resolve configurations. Note that when `build()` is called, - /// the artifacts are materialized without needing to additionally call `ensure()` on them. - /// - /// The given `labels` is a providers expression, which is either: - /// - a single string that is a `target pattern`. - /// - a single target node or label, configured or unconfigured - /// - a single provider label, configured or unconfigured - /// - a list of the two options above. - /// - /// This returns a dict keyed by sub target labels of [`StarlarkBuildResult`] if the - /// given `labels` is list-like - /// - /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. - fn build<'v>( - this: &'v BxlContext<'v>, - labels: Value<'v>, - #[starlark(default = NoneType)] target_platform: Value<'v>, - #[starlark(require = named, default = "default")] materializations: &str, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let materialization_setting = materializations; - let materializations = &this - .data - .context_type - .unpack_root() - .context(BxlContextDynamicError::Unsupported("build".to_owned()))? - .materializations; - Ok(eval.heap().alloc(Dict::new(build::build( - this, - materializations, - labels, - target_platform, - Materializations::from_str_name(&materialization_setting.to_uppercase()).ok_or_else( - || { - anyhow::anyhow!( - "Unknown materialization setting `{}`", - materialization_setting - ) - }, - )?, - eval, - )?))) - } - - /// A struct of the command line args as declared using the [`cli_args`] module. - /// These command lines are resolved per the users input on the cli when invoking the bxl script. - /// - /// If you wish to pass in a kebab-cased arg, the arg accessed from the BXL context's `cli_args` - /// attrbute will always be in snakecase. For example, if you passed in `my-arg`, accessing it - /// within BXL would look like `ctx.cli_args.my_arg`. - /// - /// This attribute is not available on the bxl context within the a dynamic lambda. - #[starlark(attribute)] - fn cli_args<'v>(this: &BxlContext<'v>) -> anyhow::Result> { - let cli_args = this - .data - .context_type - .unpack_root() - .context(BxlContextDynamicError::Unsupported("cli_args".to_owned()))? - .cli_args; - - Ok(cli_args) - } - - /// Returns the [`BxlFilesystem`] for performing a basic set of filesystem operations within bxl - #[starlark(attribute)] - fn fs<'v>(this: &BxlContext<'v>) -> anyhow::Result> { - Ok(BxlFilesystem::new(this)) - } - - /// Checks if a target label exists. Target label must be a string literal, and an exact target. - fn target_exists<'v>(this: &'v BxlContext<'v>, label: &'v str) -> anyhow::Result { - this.via_dice(|mut ctx, this_no_dice: &BxlContextNoDice<'_>| { - ctx.via(|ctx| { - async move { - match ParsedPattern::::parse_relaxed( - &this_no_dice.target_alias_resolver, - CellPathRef::new(this_no_dice.cell_name, CellRelativePath::empty()), - label, - &this_no_dice.cell_resolver, - )? { - ParsedPattern::Target(pkg, name, TargetPatternExtra) => { - let target_label = TargetLabel::new(pkg, name.as_ref()); - Ok(ctx.get_target_node(&target_label).await.ok().is_some()) - } - _ => Err(anyhow::anyhow!(NotATargetLabelString)), - } - } - .boxed_local() - }) - }) - } - - /// Returns the [`StarlarkAuditCtx`] that holds all the audit functions. - fn audit<'v>(this: &'v BxlContext<'v>) -> anyhow::Result> { - let (working_dir, cell_resolver) = this.via_dice(|mut ctx, this| { - ctx.via(|ctx| { - async move { - Ok(( - this.cell_resolver - .get(this.cell_name)? - .path() - .as_project_relative_path() - .to_buf(), - ctx.get_cell_resolver().await?, - )) - } - .boxed_local() - }) - })?; - - StarlarkAuditCtx::new( - this, - working_dir, - cell_resolver, - this.data.global_target_platform.clone(), - ) - } - - /// Awaits a promise and returns an optional value of the promise. - /// - /// Sample usage: - /// ```python - /// load("//path/to/rules:rules.bzl", "my_anon_targets_rule", "my_map_function") - /// - /// def _resolve_impl(ctx): - /// actions = ctx.bxl_actions().actions - /// my_attrs = { - /// "false": False, - /// "int": 42, - /// "list_string": ["a", "b", "c"], - /// "string": "a-string", - /// "true": True, - /// } - /// - /// promise = actions.anon_target(my_anon_targets_rule, attrs).promise.map(my_map_function) - /// providers_result = ctx.resolve(actions, promise) # result is `provider_collection` type, which is a collection of `provider`s - /// ctx.output.print(providers_result[0].my_field) - /// ``` - fn resolve<'v>( - this: &'v BxlContext<'v>, - action_factory: ValueTyped<'v, AnalysisActions<'v>>, - promise: ValueTyped<'v, StarlarkPromise<'v>>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>> { - this.via_dice(|mut dice, this| { - dice.via(|dice| { - action_factory - .run_promises(dice, eval, format!("bxl$promises:{}", &this.current_bxl)) - .boxed_local() - }) - })?; - Ok(promise.get()) - } - - /// Emits a user-defined instant event, taking in a required string id and a metadata dictionary where the - /// keys are strings, and values are either strings, bools, or ints. The id is user-supplied, and used to - /// identify the instant events in the event logs more easily. - /// - /// You may pass in an ensured artifact as a value in the metadata. The resulting output would be the ensured - /// artifact's relative or absolute path as a string. - fn instant_event<'v>( - this: &'v BxlContext<'v>, - #[starlark(require = named)] id: &str, - #[starlark(require = named)] metadata: Value<'v>, - ) -> anyhow::Result { - let parser = StarlarkUserEventParser { - artifact_fs: &this.data.artifact_fs, - project_fs: &this.data.project_fs, - }; - let event = parser.parse(id, metadata)?; - - this.async_ctx - .borrow() - .per_transaction_data() - .get_dispatcher() - .instant_event(event); - - Ok(NoneType) - } -} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/actions.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/actions.rs index f81fb6309de57..2945d9b54d60b 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/context/actions.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/actions.rs @@ -8,40 +8,41 @@ */ //! Starlark Actions API for bxl functions -//! +use std::sync::Arc; + use allocative::Allocative; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::analysis::registry::AnalysisRegistry; use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; use buck2_build_api::interpreter::rule_defs::provider::dependency::Dependency; -use buck2_configured::configuration::calculation::ConfigurationCalculation; -use buck2_configured::nodes::calculation::ExecutionPlatformConstraints; -use buck2_core::base_deferred_key::BaseDeferredKey; use buck2_core::cells::name::CellName; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::pair::ConfigurationNoExec; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_core::target::target_configured_target_label::TargetConfiguredTargetLabel; use buck2_interpreter::types::configured_providers_label::StarlarkProvidersLabel; use buck2_node::attrs::configuration_context::AttrConfigurationContext; use buck2_node::attrs::configuration_context::AttrConfigurationContextImpl; +use buck2_node::configuration::calculation::CONFIGURATION_CALCULATION; +use buck2_node::configuration::resolved::ConfigurationSettingKey; +use buck2_node::execution::GET_EXECUTION_PLATFORMS; use derivative::Derivative; use derive_more::Display; use dice::DiceComputations; use dupe::Dupe; +use futures::FutureExt; use gazebo::prelude::SliceExt; use starlark::any::ProvidesStaticType; -use starlark::collections::SmallMap; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::starlark_module; -use starlark::values::dict::Dict; -use starlark::values::dict::DictRef; +use starlark::values::dict::AllocDict; +use starlark::values::dict::DictType; use starlark::values::starlark_value; use starlark::values::AllocValue; use starlark::values::Heap; @@ -51,13 +52,11 @@ use starlark::values::Trace; use starlark::values::Value; use starlark::values::ValueOfUnchecked; use starlark::values::ValueTyped; -use starlark::StarlarkDocs; use starlark_map::ordered_map::OrderedMap; -use thiserror::Error; use crate::bxl::starlark_defs::context::BxlContextNoDice; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum BxlActionsError { #[error( "An action registry was already requested via `ctx.bxl_actions().actions`. Only one action registry is allowed" @@ -65,30 +64,30 @@ enum BxlActionsError { RegistryAlreadyCreated, } -#[allow(unused)] pub(crate) async fn resolve_bxl_execution_platform( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, cell: CellName, exec_deps: Vec, toolchain_deps: Vec, target_platform: Option, - exec_compatible_with: Vec, - module: &Module, + exec_compatible_with: Arc<[ConfigurationSettingKey]>, ) -> anyhow::Result { // bxl has on transitions let resolved_transitions = OrderedMap::new(); let platform_configuration = match target_platform.as_ref() { Some(global_target_platform) => { - ctx.get_platform_configuration(global_target_platform) + CONFIGURATION_CALCULATION + .get()? + .get_platform_configuration(ctx, global_target_platform) .await? } None => ConfigurationData::unspecified(), }; - let resolved_configuration = { - ctx.get_resolved_configuration(&platform_configuration, cell, &exec_compatible_with) - .await? - }; + let resolved_configuration = CONFIGURATION_CALCULATION + .get()? + .get_resolved_configuration(ctx, &platform_configuration, cell, &exec_compatible_with) + .await?; // there is not explicit configured deps, so platforms is empty let platform_cfgs = OrderedMap::new(); @@ -109,21 +108,24 @@ pub(crate) async fn resolve_bxl_execution_platform( .map(|t| configuration_ctx.configure_toolchain_target(t)) .collect(); - let execution_constraints = ExecutionPlatformConstraints::new_constraints( - exec_deps - .iter() - .map(|label| label.target().dupe()) - .collect(), - toolchain_deps_configured - .iter() - .map(|dep| dep.target().clone()) - .collect(), - exec_compatible_with, - ); - - let resolved_execution = execution_constraints.one_for_cell(ctx, cell).await?; + let resolved_execution = GET_EXECUTION_PLATFORMS + .get()? + .execution_platform_resolution_one_for_cell( + ctx, + exec_deps + .iter() + .map(|label| label.target().dupe()) + .collect(), + toolchain_deps_configured + .iter() + .map(|dep| TargetConfiguredTargetLabel::new_without_exec_cfg(dep.target().dupe())) + .collect(), + exec_compatible_with, + cell, + ) + .await?; - let mut exec_deps_configured = exec_deps.try_map(|e| { + let exec_deps_configured = exec_deps.try_map(|e| { let label = e.configure_pair_no_exec(resolved_execution.platform()?.cfg_pair_no_exec().dupe()); anyhow::Ok(label) @@ -136,34 +138,23 @@ pub(crate) async fn resolve_bxl_execution_platform( }) } -pub(crate) async fn get_dependency_for_label<'v>( - configured: ConfiguredProvidersLabel, - ctx: &DiceComputations, - module: &'v Module, -) -> anyhow::Result> { - let analysis_result = ctx - .get_analysis_result(configured.target()) - .await? - .require_compatible()?; - - let v = analysis_result.lookup_inner(&configured)?; - - let dependency = Dependency::new( - module.heap(), - configured, - v.value().owned_value(module.frozen_heap()), - None, - ); - - Ok(dependency) -} - +#[derive(Clone, Debug, Eq, PartialEq, Hash, Allocative)] pub(crate) struct BxlExecutionResolution { pub(crate) resolved_execution: ExecutionPlatformResolution, pub(crate) exec_deps_configured: Vec, pub(crate) toolchain_deps_configured: Vec, } +impl BxlExecutionResolution { + pub(crate) fn unspecified() -> BxlExecutionResolution { + BxlExecutionResolution { + resolved_execution: ExecutionPlatformResolution::unspecified(), + exec_deps_configured: Vec::new(), + toolchain_deps_configured: Vec::new(), + } + } +} + pub(crate) fn validate_action_instantiation( this: &BxlContextNoDice<'_>, bxl_execution_resolution: &BxlExecutionResolution, @@ -175,11 +166,9 @@ pub(crate) fn validate_action_instantiation( } else { let execution_platform = bxl_execution_resolution.resolved_execution.clone(); let analysis_registry = AnalysisRegistry::new_from_owner( - BaseDeferredKey::BxlLabel(this.current_bxl.dupe().into_base_deferred_key_dyn_impl( - execution_platform.clone(), - bxl_execution_resolution.exec_deps_configured.clone(), - bxl_execution_resolution.toolchain_deps_configured.clone(), - )), + this.current_bxl() + .dupe() + .into_base_deferred_key(bxl_execution_resolution.clone()), execution_platform, )?; @@ -195,16 +184,14 @@ pub(crate) fn validate_action_instantiation( Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub(crate) struct BxlActions<'v> { actions: ValueTyped<'v, AnalysisActions<'v>>, - exec_deps: ValueOfUnchecked<'v, DictRef<'v>>, - toolchains: ValueOfUnchecked<'v, DictRef<'v>>, + exec_deps: ValueOfUnchecked<'v, DictType>>, + toolchains: ValueOfUnchecked<'v, DictType>>, } impl<'v> BxlActions<'v> { @@ -212,8 +199,8 @@ impl<'v> BxlActions<'v> { actions: ValueTyped<'v, AnalysisActions<'v>>, exec_deps: Vec, toolchains: Vec, - eval: &mut Evaluator<'v, '_>, - ctx: &'c DiceComputations, + eval: &mut Evaluator<'v, '_, '_>, + ctx: &'c mut DiceComputations<'_>, ) -> anyhow::Result> { let exec_deps = alloc_deps(exec_deps, eval, ctx).await?; let toolchains = alloc_deps(toolchains, eval, ctx).await?; @@ -227,32 +214,43 @@ impl<'v> BxlActions<'v> { async fn alloc_deps<'v, 'c>( deps: Vec, - eval: &mut Evaluator<'v, '_>, - ctx: &'c DiceComputations, -) -> anyhow::Result>> { - let deps: Vec<_> = deps - .into_iter() - .map(|k| async { - let unconfigured = k.unconfigured(); - let dep = get_dependency_for_label(k, ctx, eval.module()).await?; - anyhow::Ok(( - eval.heap() - .alloc(StarlarkProvidersLabel::new(unconfigured)) - .get_hashed()?, - eval.heap().alloc(dep), - )) + eval: &mut Evaluator<'v, '_, '_>, + ctx: &'c mut DiceComputations<'_>, +) -> anyhow::Result>>> { + let analysis_results: Vec<_> = ctx + .try_compute_join(deps, |ctx, target| { + async move { + let res = ctx + .get_analysis_result(target.target()) + .await? + .require_compatible()?; + anyhow::Ok((target, res)) + } + .boxed() }) - .collect(); - let deps: SmallMap<_, _> = futures::future::try_join_all(deps) - .await? + .await?; + + let deps: Vec<(StarlarkProvidersLabel, Dependency)> = analysis_results .into_iter() - .collect(); - let deps = eval.heap().alloc(Dict::new(deps)); + .map(|(configured, analysis_result)| { + let v = analysis_result.lookup_inner(&configured)?; + + let starlark_label = StarlarkProvidersLabel::new(configured.unconfigured()); + let dependency = Dependency::new( + eval.heap(), + configured, + v.value().owned_frozen_value_typed(eval.frozen_heap()), + None, + ); + + anyhow::Ok((starlark_label, dependency)) + }) + .collect::>()?; - ValueOfUnchecked::new_checked(deps) + Ok(eval.heap().alloc_typed_unchecked(AllocDict(deps)).cast()) } -#[starlark_value(type = "bxl_actions", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.Actions", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for BxlActions<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -282,13 +280,19 @@ fn bxl_actions_methods(builder: &mut MethodsBuilder) { /// Gets the execution deps requested correctly configured for the current execution platform #[starlark(attribute)] - fn exec_deps<'v>(this: &'v BxlActions) -> anyhow::Result>> { + fn exec_deps<'v>( + this: &'v BxlActions, + ) -> anyhow::Result>>> + { Ok(this.exec_deps) } /// Gets the toolchains requested configured for the current execution platform #[starlark(attribute)] - fn toolchains<'v>(this: &'v BxlActions) -> anyhow::Result>> { + fn toolchains<'v>( + this: &'v BxlActions, + ) -> anyhow::Result>>> + { Ok(this.toolchains) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/analysis.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/analysis.rs index 2246c00ac4817..6baf2a2e05acf 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/context/analysis.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/analysis.rs @@ -13,48 +13,57 @@ use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::provider::label::ConfiguredProvidersLabel; use dice::DiceComputations; use either::Either; +use futures::FutureExt; use gazebo::prelude::*; use crate::bxl::starlark_defs::analysis_result::StarlarkAnalysisResult; use crate::bxl::starlark_defs::context::BxlContextNoDice; +use crate::bxl::starlark_defs::context::ErrorPrinter; use crate::bxl::starlark_defs::providers_expr::ProvidersExpr; pub(crate) async fn analysis<'v>( - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, ctx: &BxlContextNoDice<'v>, expr: ProvidersExpr, skip_incompatible: bool, ) -> anyhow::Result< Either, Vec<(ConfiguredProvidersLabel, StarlarkAnalysisResult)>>, > { - let analysis = futures::future::join_all(expr.labels().map(async move |label| { - let maybe_result = dice.get_analysis_result(label.target()).await?; - - match maybe_result { - MaybeCompatible::Incompatible(reason) => { - if skip_incompatible { - ctx.print_to_error_stream(IncompatiblePlatformReason::skipping_message( - &reason, - label.target(), - ))?; - Ok(None) - } else { - Err(reason.to_err()) + let analysis = dice + .compute_join(expr.labels(), |dice, label| { + async move { + let maybe_result = dice.get_analysis_result(label.target()).await?; + anyhow::Ok((label, maybe_result)) + } + .boxed() + }) + .await + .into_iter() + .map(|res| { + let (label, maybe_result) = res?; + match maybe_result { + MaybeCompatible::Incompatible(reason) => { + if skip_incompatible { + ctx.print_to_error_stream(IncompatiblePlatformReason::skipping_message( + &reason, + label.target(), + ))?; + Ok(None) + } else { + Err(reason.to_err()) + } } + MaybeCompatible::Compatible(result) => Ok(Some(( + label.clone(), + StarlarkAnalysisResult::new(result, label.clone())?, + ))), } - MaybeCompatible::Compatible(result) => Ok(Some(( - label.clone(), - StarlarkAnalysisResult::new(result, label.clone()), - ))), - } - })) - .await - .into_iter() - .filter_map(|r| match r { - Ok(r) => r.map(Ok), - Err(e) => Some(Err(e)), - }) - .collect::>>()?; + }) + .filter_map(|r| match r { + Ok(r) => r.map(Ok), + Err(e) => Some(Err(e)), + }) + .collect::>>()?; match expr { ProvidersExpr::Literal(_) => { diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/build.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/build.rs index bce58d4158086..6489bc9ac4f3c 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/context/build.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/build.rs @@ -9,29 +9,23 @@ //! //! Implements the ability for bxl to build targets -use std::sync::Arc; use allocative::Allocative; use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_artifact::artifact::build_artifact::BuildArtifact; use buck2_build_api::build::build_configured_label; use buck2_build_api::build::BuildConfiguredLabelOptions; use buck2_build_api::build::BuildEvent; use buck2_build_api::build::BuildTargetResult; -use buck2_build_api::build::ConvertMaterializationContext; +use buck2_build_api::build::ConfiguredBuildEvent; use buck2_build_api::build::ProvidersToBuild; use buck2_build_api::bxl::build_result::BxlBuildResult; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_cli_proto::build_request::Materializations; -use buck2_common::result::SharedError; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; -use dashmap::DashMap; use derive_more::Display; -use dice::DiceComputationsParallel; use dupe::Dupe; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; use futures::FutureExt; use futures::StreamExt; use itertools::Itertools; @@ -48,11 +42,14 @@ use starlark::values::Trace; use starlark::values::UnpackValue as _; use starlark::values::Value; use starlark::values::ValueError; +use starlark::values::ValueLifetimeless; use starlark::values::ValueLike; +use starlark::values::ValueTyped; use starlark_map::small_map::SmallMap; use crate::bxl::starlark_defs::build_result::StarlarkBxlBuildResult; use crate::bxl::starlark_defs::context::BxlContext; +use crate::bxl::starlark_defs::providers_expr::AnyProvidersExprArg; use crate::bxl::starlark_defs::providers_expr::ProvidersExpr; use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; @@ -68,11 +65,11 @@ use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; Allocative )] #[repr(C)] -pub(crate) struct StarlarkProvidersArtifactIterableGen(pub(crate) V); +pub(crate) struct StarlarkProvidersArtifactIterableGen(pub(crate) V); starlark_complex_value!(pub(crate) StarlarkProvidersArtifactIterable); -impl<'v, V: ValueLike<'v> + 'v> StarlarkProvidersArtifactIterableGen +impl<'v, V: ValueLike<'v>> StarlarkProvidersArtifactIterableGen where Self: ProvidesStaticType<'v>, { @@ -83,6 +80,7 @@ where .0 .unpack_built() .unwrap() + .1 .outputs .iter() .filter_map(|built| built.as_ref().ok()) @@ -91,29 +89,29 @@ where } #[starlark_value(type = "bxl_built_artifacts_iterable")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for StarlarkProvidersArtifactIterableGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StarlarkProvidersArtifactIterableGen where Self: ProvidesStaticType<'v>, { - fn iterate_collect(&self, heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, heap: &'v Heap) -> starlark::Result>> { Ok(self .iter() .map(|artifact| heap.alloc(StarlarkArtifact::new(artifact.dupe()))) .collect()) } - fn at(&self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, heap: &'v Heap) -> starlark::Result> { let i = i32::unpack_value_err(index)?; if let Ok(i) = usize::try_from(i) { if let Some(artifact) = self.iter().nth(i) { return Ok(heap.alloc(StarlarkArtifact::new(artifact.dupe()))); } } - Err(anyhow::anyhow!(ValueError::IndexOutOfBound(i))) + Err(ValueError::IndexOutOfBound(i).into()) } - fn length(&self) -> anyhow::Result { - i32::try_from(self.iter().count()).map_err(|e| e.into()) + fn length(&self) -> starlark::Result { + i32::try_from(self.iter().count()).map_err(starlark::Error::new_other) } } @@ -129,21 +127,22 @@ where Allocative )] #[repr(C)] -pub(crate) struct StarlarkFailedArtifactIterableGen(pub(crate) V); +pub(crate) struct StarlarkFailedArtifactIterableGen(pub(crate) V); starlark_complex_value!(pub(crate) StarlarkFailedArtifactIterable); -impl<'v, V: ValueLike<'v> + 'v> StarlarkFailedArtifactIterableGen +impl<'v, V: ValueLike<'v>> StarlarkFailedArtifactIterableGen where Self: ProvidesStaticType<'v>, { - fn iter(&self) -> impl Iterator { + fn iter(&self) -> impl Iterator { self.0 .downcast_ref::() .unwrap() .0 .unpack_built() .unwrap() + .1 .outputs .iter() .filter_map(|built| built.as_ref().err()) @@ -151,70 +150,72 @@ where } #[starlark_value(type = "bxl_failed_artifacts_iterable")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for StarlarkFailedArtifactIterableGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StarlarkFailedArtifactIterableGen where Self: ProvidesStaticType<'v>, { - fn iterate_collect(&self, heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, heap: &'v Heap) -> starlark::Result>> { Ok(self.iter().map(|e| heap.alloc(format!("{}", e))).collect()) } - fn at(&self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, heap: &'v Heap) -> starlark::Result> { let i = i32::unpack_value_err(index)?; if let Ok(i) = usize::try_from(i) { if let Some(e) = self.iter().nth(i) { return Ok(heap.alloc(format!("{}", e))); } } - Err(anyhow::anyhow!(ValueError::IndexOutOfBound(i))) + Err(ValueError::IndexOutOfBound(i).into()) } - fn length(&self) -> anyhow::Result { - i32::try_from(self.iter().count()).map_err(|e| e.into()) + fn length(&self) -> starlark::Result { + i32::try_from(self.iter().count()).map_err(starlark::Error::new_other) } } pub(crate) fn build<'v>( ctx: &BxlContext<'v>, - materializations_map: &Arc>, - spec: Value<'v>, - target_platform: Value<'v>, + spec: AnyProvidersExprArg<'v>, + target_platform: ValueAsStarlarkTargetLabel<'v>, materializations: Materializations, - eval: &Evaluator<'v, '_>, -) -> anyhow::Result, Value<'v>>> { - let materializations = - ConvertMaterializationContext::with_existing_map(materializations, materializations_map); - + eval: &Evaluator<'v, '_, '_>, +) -> anyhow::Result< + SmallMap< + ValueTyped<'v, StarlarkConfiguredProvidersLabel>, + ValueTyped<'v, StarlarkBxlBuildResult>, + >, +> { let target_platform = target_platform.parse_target_platforms( - &ctx.data.target_alias_resolver, - &ctx.data.cell_resolver, - ctx.data.cell_name, - &ctx.data.global_target_platform, + ctx.target_alias_resolver(), + ctx.cell_resolver(), + ctx.cell_alias_resolver(), + ctx.cell_name(), + &ctx.data.global_cfg_options().target_platform, )?; - let build_result = ctx.via_dice( - |mut dice, ctx| - dice.via(|dice| - async { - let build_spec = ProvidersExpr::::unpack( - spec, - target_platform, - ctx, - dice, - eval, - ) - .await?; + let build_result = ctx.via_dice(|dice, ctx| { + dice.via(|dice| { + async { + let build_spec = ProvidersExpr::::unpack( + spec, + &GlobalCfgOptions { + target_platform, + cli_modifiers: vec![].into(), + }, + ctx, + dice, + ) + .await?; + + let per_spec_results: Vec> = dice + .compute_join(build_spec.labels().unique(), |ctx, target| { + async move { + let target = target.clone(); - let stream = dice - .compute_many(build_spec.labels().unique().map(|target| { - let target = target.clone(); - let materializations = materializations.dupe(); - higher_order_closure! { - for <'x> move |dice: &'x mut DiceComputationsParallel<'_>| -> BoxFuture<'x, Vec>> { - async move { + ctx.with_linear_recompute(|ctx| async move { build_configured_label( - dice, - &materializations, + &ctx, + &materializations.into(), target, &ProvidersToBuild { default: true, @@ -226,34 +227,59 @@ pub(crate) fn build<'v>( skippable: false, want_configured_graph_size: false, }, - ).await - }.then(|res| async move { - match res { - Ok(stream) => stream.map(Ok).collect::>().await, - Err(e) => vec![Err(e)], - } - }).boxed() + ) + .await + .collect::>() + .await + }) + .await } - } - })) - .into_iter().collect::>().map(|v| v.into_iter().map(futures::future::ready).collect::>()).flatten(); + .boxed() + }) + .await; - // TODO (torozco): support --fail-fast in BXL. - BuildTargetResult::collect_stream(stream, false).await - }.boxed_local()) - )?; + // TODO (torozco): support --fail-fast in BXL. + BuildTargetResult::collect_stream( + futures::stream::iter( + per_spec_results + .into_iter() + .flatten() + .map(BuildEvent::Configured), + ), + false, + ) + .await + } + .boxed_local() + }) + })?; + + if let Some(err) = build_result + .configured + .values() + .flatten() + .flat_map(|r| &r.errors) + .chain(build_result.other_errors.values().flatten()) + .next() + { + return Err(err.dupe().into()); + } - build_result + Ok(build_result + .configured .into_iter() .map(|(target, result)| { - Ok(( + ( eval.heap() - .alloc(StarlarkConfiguredProvidersLabel::new(target)) - .get_hashed() + .alloc_typed(StarlarkConfiguredProvidersLabel::new(target.clone())) + .hashed() .unwrap(), eval.heap() - .alloc(StarlarkBxlBuildResult(BxlBuildResult::new(result))), - )) + .alloc_typed(StarlarkBxlBuildResult(BxlBuildResult::new( + target.clone(), + result, + ))), + ) }) - .collect() + .collect()) } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/dynamic.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/dynamic.rs new file mode 100644 index 0000000000000..b8d51834afdf1 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/dynamic.rs @@ -0,0 +1,254 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::RefCell; +use std::collections::HashMap; +use std::rc::Rc; + +use buck2_action_impl::dynamic::bxl::EVAL_BXL_FOR_DYNAMIC_OUTPUT; +use buck2_action_impl::dynamic::deferred::dynamic_lambda_ctx_data; +use buck2_action_impl::dynamic::deferred::invoke_dynamic_output_lambda; +use buck2_action_impl::dynamic::deferred::DynamicLambdaArgs; +use buck2_action_impl::dynamic::deferred::DynamicLambdaCtxDataSpec; +use buck2_action_impl::dynamic::deferred::InputArtifactsMaterialized; +use buck2_action_impl::dynamic::params::FrozenDynamicLambdaParams; +use buck2_artifact::dynamic::DynamicLambdaResultsKey; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; +use buck2_build_api::analysis::registry::RecordedAnalysisValues; +use buck2_build_api::dynamic_value::DynamicValue; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; +use buck2_common::events::HasEvents; +use buck2_common::scope::scope_and_collect_with_dice; +use buck2_core::base_deferred_key::BaseDeferredKeyBxl; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_error::internal_error_anyhow; +use buck2_execute::digest_config::DigestConfig; +use buck2_execute::digest_config::HasDigestConfig; +use buck2_futures::cancellable_future::CancellationObserver; +use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; +use buck2_interpreter::factory::StarlarkEvaluatorProvider; +use buck2_interpreter::print_handler::EventDispatcherPrintHandler; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; +use dice::DiceComputations; +use dupe::Dupe; +use itertools::Itertools; +use starlark::environment::Module; +use starlark::values::OwnedRefFrozenRef; +use starlark::values::ValueTyped; + +use crate::bxl::key::BxlDynamicKey; +use crate::bxl::starlark_defs::context::starlark_async::BxlSafeDiceComputations; +use crate::bxl::starlark_defs::context::BxlContext; +use crate::bxl::starlark_defs::context::BxlContextCoreData; +use crate::bxl::starlark_defs::context::DynamicBxlContextData; +use crate::bxl::starlark_defs::eval_extra::BxlEvalExtra; + +pub(crate) async fn eval_bxl_for_dynamic_output<'v>( + base_deferred_key: &'v BaseDeferredKeyBxl, + self_key: DynamicLambdaResultsKey, + dynamic_lambda: OwnedRefFrozenRef<'v, FrozenDynamicLambdaParams>, + dice_ctx: &'v mut DiceComputations<'_>, + action_key: String, + input_artifacts_materialized: InputArtifactsMaterialized, + resolved_dynamic_values: HashMap, + _digest_config: DigestConfig, + liveness: CancellationObserver, +) -> anyhow::Result { + // TODO(wendyy) emit telemetry, support profiler + let dynamic_key = + BxlDynamicKey::from_base_deferred_key_dyn_impl_err(base_deferred_key.clone())?; + let key = dynamic_key.key(); + let dynamic_data = DynamicBxlContextData { + exec_deps: dynamic_key + .0 + .execution_resolution + .exec_deps_configured + .clone(), + toolchains: dynamic_key + .0 + .execution_resolution + .toolchain_deps_configured + .clone(), + }; + // TODO(cjhopman): Why does this get the digest_config from dice??? + let digest_config = dice_ctx.global_data().get_digest_config(); + let dispatcher = dice_ctx.per_transaction_data().get_dispatcher().dupe(); + let artifact_fs = dice_ctx.get_artifact_fs().await?; + let eval_ctx = BxlDynamicOutputEvaluator { + data: BxlContextCoreData::new(key, dice_ctx).await?, + self_key, + liveness, + dynamic_lambda, + dynamic_data, + digest_config, + action_key, + input_artifacts_materialized, + resolved_dynamic_values, + artifact_fs, + + print: EventDispatcherPrintHandler(dispatcher.dupe()), + }; + + // Note: because we use `block_in_place`, that will prevent the inner future from being polled + // and yielded. So, for cancellation observers to work properly within the dice cancellable + // future context, we need the future that it's attached to the cancellation context can + // yield and be polled. To ensure that, we have to spawn the future that then enters block_in_place + let (_, futs) = unsafe { + // SAFETY: as long as we don't `forget` the return object from `scope_and_collect`, it is safe + + // Additional cancellation notes: + // the `scope_and_collect` will block on drop, but it will move the blocking to a tokio + // blocking thread, freeing up the main worker threads. Additionally, the `spawn_cancellable` + // on the scope will be dropped at the earliest await point. If we are within the blocking + // section of bxl, the cancellation observer will be notified and cause the blocking calls + // to terminate. + scope_and_collect_with_dice(dice_ctx, |dice_ctx, s| { + s.spawn_cancellable( + async move { + with_starlark_eval_provider( + dice_ctx, + &mut StarlarkProfilerOpt::disabled(), + format!("bxl_dynamic:{}", "foo"), + move |provider, dice_ctx| { + tokio::task::block_in_place(|| eval_ctx.do_eval(provider, dice_ctx)) + }, + ) + .await + }, + || Err(anyhow::anyhow!("cancelled")), + ) + }) + } + .await; + + match futs.into_iter().exactly_one() { + Ok(res) => res?, + Err(_) => panic!("only spawned one task"), + } +} + +struct BxlDynamicOutputEvaluator<'f> { + data: BxlContextCoreData, + self_key: DynamicLambdaResultsKey, + liveness: CancellationObserver, + dynamic_lambda: OwnedRefFrozenRef<'f, FrozenDynamicLambdaParams>, + dynamic_data: DynamicBxlContextData, + digest_config: DigestConfig, + action_key: String, + input_artifacts_materialized: InputArtifactsMaterialized, + resolved_dynamic_values: HashMap, + artifact_fs: ArtifactFs, + print: EventDispatcherPrintHandler, +} + +impl BxlDynamicOutputEvaluator<'_> { + fn do_eval( + self, + provider: &mut dyn StarlarkEvaluatorProvider, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result { + let env = Module::new(); + + let bxl_dice = Rc::new(RefCell::new(BxlSafeDiceComputations::new( + dice, + self.liveness, + ))); + + let analysis_registry = { + let data = Rc::new(self.data); + let extra = BxlEvalExtra::new_dynamic(bxl_dice.dupe(), data.dupe()); + let (mut eval, _) = provider.make(&env)?; + eval.set_print_handler(&self.print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); + eval.extra = Some(&extra); + + let dynamic_lambda_ctx_data = dynamic_lambda_ctx_data( + self.dynamic_lambda, + self.self_key.dupe(), + &self.action_key, + self.input_artifacts_materialized, + &self.resolved_dynamic_values, + &self.artifact_fs, + self.digest_config, + &env, + )?; + + let bxl_dynamic_ctx = BxlContext::new_dynamic( + env.heap(), + data, + bxl_dice, + self.digest_config, + dynamic_lambda_ctx_data.registry, + self.dynamic_data, + )?; + + let ctx = ValueTyped::::new_err(env.heap().alloc(bxl_dynamic_ctx))?; + + let args = match ( + &dynamic_lambda_ctx_data.lambda.attr_values, + &dynamic_lambda_ctx_data.spec, + ) { + ( + None, + DynamicLambdaCtxDataSpec::Old { + outputs, + artifact_values, + }, + ) => DynamicLambdaArgs::OldPositional { + ctx: ctx.to_value(), + artifact_values: *artifact_values, + outputs: *outputs, + }, + (Some(_arg), DynamicLambdaCtxDataSpec::New { .. }) => { + return Err(anyhow::anyhow!( + "New `dynamic_actions` API is not implemented for BXL" + )); + } + (None, DynamicLambdaCtxDataSpec::New { .. }) + | (Some(_), DynamicLambdaCtxDataSpec::Old { .. }) => { + return Err(internal_error_anyhow!("Inconsistent")); + } + }; + + invoke_dynamic_output_lambda(&mut eval, dynamic_lambda_ctx_data.lambda.lambda(), args)?; + + ctx.take_state_dynamic()? + }; + + let (_frozen_env, recorded_values) = analysis_registry.finalize(&env)?(env)?; + Ok(recorded_values) + } +} + +pub(crate) fn init_eval_bxl_for_dynamic_output() { + EVAL_BXL_FOR_DYNAMIC_OUTPUT.init( + |base_deferred_key, + self_key, + dynamic_lambda, + dice_ctx, + action_key, + input_artifacts_materialized, + resolved_dynamic_values, + digest_config, + liveness| { + Box::pin(eval_bxl_for_dynamic_output( + base_deferred_key, + self_key, + dynamic_lambda, + dice_ctx, + action_key, + input_artifacts_materialized, + resolved_dynamic_values, + digest_config, + liveness, + )) + }, + ); +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/fs.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/fs.rs index 2ce52b061bf23..b421a4ec859f4 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/context/fs.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/fs.rs @@ -7,19 +7,14 @@ * of this source tree. */ -use std::ops::DerefMut; - use allocative::Allocative; use async_recursion::async_recursion; use buck2_artifact::artifact::source_artifact::SourceArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; -use buck2_common::dice::file_ops::DiceFileOps; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::file_ops::FileOps; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; +use buck2_common::dice::file_ops::DiceFileComputations; use buck2_common::file_ops::PathMetadataOrRedirection; -use buck2_common::package_listing::dice::HasPackageListingResolver; +use buck2_common::package_listing::dice::DicePackageListingResolver; use buck2_common::package_listing::resolver::PackageListingResolver; -use buck2_core::buck_path::path::BuckPath; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_path::CellPathRef; use buck2_core::cells::instance::CellInstance; @@ -28,10 +23,12 @@ use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::package::package_relative_path::PackageRelativePath; +use buck2_core::package::source_path::SourcePath; use buck2_core::package::PackageLabel; use buck2_node::nodes::unconfigured::TargetNode; use derivative::Derivative; use derive_more::Display; +use dice::DiceComputations; use futures::FutureExt; use starlark::any::ProvidesStaticType; use starlark::environment::Methods; @@ -39,7 +36,7 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::starlark_module; -use starlark::values::none::NoneType; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::AllocValue; use starlark::values::Heap; @@ -48,13 +45,14 @@ use starlark::values::StarlarkValue; use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::Value; -use starlark::StarlarkDocs; -use thiserror::Error; +use starlark::values::ValueOf; +use starlark::values::ValueTyped; use super::BxlContext; use crate::bxl::starlark_defs::file_expr::FileExpr; use crate::bxl::starlark_defs::file_set::StarlarkReadDirSet; -use crate::bxl::starlark_defs::target_expr::TargetExpr; +use crate::bxl::starlark_defs::target_list_expr::TargetListExpr; +use crate::bxl::starlark_defs::target_list_expr::TargetListExprArg; #[derive( ProvidesStaticType, @@ -62,38 +60,35 @@ use crate::bxl::starlark_defs::target_expr::TargetExpr; Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] #[derivative(Debug)] -#[starlark_docs(directory = "bxl")] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] #[allocative(skip)] pub(crate) struct BxlFilesystem<'v> { - #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, } impl<'v> BxlFilesystem<'v> { - pub(crate) fn new(ctx: &'v BxlContext<'v>) -> Self { + pub(crate) fn new(ctx: ValueTyped<'v, BxlContext<'v>>) -> Self { Self { ctx } } fn artifact_fs(&self) -> &ArtifactFs { - &self.ctx.data.artifact_fs + self.ctx.artifact_fs() } fn project_fs(&self) -> &ProjectRoot { - &self.ctx.data.project_fs + self.ctx.project_fs() } fn cell(&self) -> anyhow::Result<&CellInstance> { - self.ctx.data.cell_resolver.get(self.ctx.data.cell_name) + self.ctx.cell_resolver().get(self.ctx.cell_name()) } } -#[starlark_value(type = "fs", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.Filesystem", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for BxlFilesystem<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -107,7 +102,7 @@ impl<'v> AllocValue<'v> for BxlFilesystem<'v> { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum BxlFilesystemError { #[error("Inferred package path `{0}` is not a valid package within the given file path `{1}`")] PackageMismatch(PackageLabel, CellPath), @@ -127,22 +122,24 @@ impl<'v> BxlFilesystem<'v> { &'v self, expr: FileExpr<'v>, ) -> anyhow::Result { - let cell_path = - self.ctx.async_ctx.borrow_mut().via(|dice| { - async { expr.get(dice.deref_mut(), self.cell()?).await }.boxed_local() - })?; + let cell_path = self + .ctx + .async_ctx + .borrow_mut() + .via(|dice| async { expr.get(dice, self.cell()?).await }.boxed_local())?; self.artifact_fs().resolve_cell_path(cell_path.as_ref()) } } #[async_recursion] -async fn try_exists<'v>(file_ops: &DiceFileOps<'v>, path: CellPathRef<'v>) -> anyhow::Result { - match file_ops.read_path_metadata_if_exists(path).await? { +async fn try_exists( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'async_recursion>, +) -> anyhow::Result { + match DiceFileComputations::read_path_metadata_if_exists(ctx, path).await? { Some(path) => match PathMetadataOrRedirection::from(path) { PathMetadataOrRedirection::PathMetadata(_) => Ok(true), - PathMetadataOrRedirection::Redirection(r) => { - try_exists(file_ops, r.as_ref().as_ref()).await - } + PathMetadataOrRedirection::Redirection(r) => try_exists(ctx, r.as_ref().as_ref()).await, }, None => Ok(false), } @@ -153,7 +150,7 @@ async fn try_exists<'v>(file_ops: &DiceFileOps<'v>, path: CellPathRef<'v>) -> an #[starlark_module] fn fs_operations(builder: &mut MethodsBuilder) { /// Check if a path exists on disk, taking advantage of Buck's cached filesystem. - /// Takes in a literal, a source artifact (via `[StarlarkArtifact]`), or a `[StarlarkFileNode]`. + /// Takes in a literal, a source artifact (via `artifact`), or a `file_node`. /// /// Sample usage: /// ```text @@ -166,7 +163,7 @@ fn fs_operations(builder: &mut MethodsBuilder) { let path = expr.get(dice, this.cell()?).await; match path { - Ok(p) => try_exists(&dice.file_ops(), p.as_ref()).await, + Ok(p) => try_exists(dice, p.as_ref()).await, Err(e) => Err(e), } } @@ -177,7 +174,7 @@ fn fs_operations(builder: &mut MethodsBuilder) { /// Returns all the contents of the given input that points to a directory. /// Errors if the given path is a file. Takes an optional boolean `dirs_only` to only return directories, defaults to false. /// - /// The input is a either a literal, a source artifact (via `[StarlarkArtifact]`), or a `[StarlarkFileNode]`. + /// The input is a either a literal, a source artifact (via `artifact`), or a `file_node`. /// /// Sample usage: /// ```text @@ -197,7 +194,8 @@ fn fs_operations(builder: &mut MethodsBuilder) { match path { Ok(path) => { - let read_dir_output = dice.file_ops().read_dir(path.as_ref()).await?; + let read_dir_output = + DiceFileComputations::read_dir(dice, path.as_ref()).await?; Ok(StarlarkReadDirSet { cell_path: path, included: read_dir_output.included, @@ -212,7 +210,7 @@ fn fs_operations(builder: &mut MethodsBuilder) { } /// Returns whether the provided path is a dir. Returns false is the dir does not exist. - /// The input is a either a literal, a source artifact (via `[StarlarkArtifact]`), or a `[StarlarkFileNode]`. + /// The input is a either a literal, a source artifact (via `artifact`), or a `file_node`. /// /// Sample usage: /// ```text @@ -224,7 +222,7 @@ fn fs_operations(builder: &mut MethodsBuilder) { } /// Returns whether the provided path is a file. Returns false is the file does not exist. - /// The input is a either a literal, a source artifact (via `[StarlarkArtifact]`), or a `[StarlarkFileNode]`. + /// The input is a either a literal, a source artifact (via `artifact`), or a `file_node`. /// /// Sample usage: /// ```text @@ -274,10 +272,10 @@ fn fs_operations(builder: &mut MethodsBuilder) { fn source<'v>( this: &'v BxlFilesystem<'v>, expr: FileExpr<'v>, - #[starlark(default = NoneType)] target_hint: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let buck_path = this.ctx.via_dice(|mut dice, ctx| { + #[starlark(default = NoneOr::None)] target_hint: NoneOr>>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let buck_path = this.ctx.via_dice(|dice, ctx| { dice.via(|dice| { async { let file_path_as_cell_path = expr.get(dice, this.cell()?).await?; @@ -285,22 +283,28 @@ fn fs_operations(builder: &mut MethodsBuilder) { .artifact_fs() .resolve_cell_path(file_path_as_cell_path.as_ref())?; - let package_label = if target_hint.is_none() { - dice.get_package_listing_resolver() - .get_enclosing_package(file_path_as_cell_path.as_ref()) - .await - } else { - let target_expr = - TargetExpr::<'v, TargetNode>::unpack(target_hint, ctx, dice, eval) - .await?; - match target_expr { - TargetExpr::Node(node) => Ok(node.label().pkg()), - TargetExpr::Label(label) => Ok(label.as_ref().pkg()), - _ => Err(anyhow::anyhow!( - BxlFilesystemError::MultipleTargetHintsNotSupported( - target_hint.to_repr() - ) - )), + let package_label = match target_hint { + NoneOr::None => { + DicePackageListingResolver(dice) + .get_enclosing_package(file_path_as_cell_path.as_ref()) + .await + } + NoneOr::Other(target_hint) => { + let target_expr = TargetListExpr::<'v, TargetNode>::unpack( + target_hint.typed, + ctx, + dice, + ) + .await?; + if let Some(node) = target_expr.get_one(dice).await? { + Ok(node.label().pkg()) + } else { + Err(anyhow::anyhow!( + BxlFilesystemError::MultipleTargetHintsNotSupported( + target_hint.value.to_repr() + ) + )) + } } }?; @@ -320,7 +324,7 @@ fn fs_operations(builder: &mut MethodsBuilder) { let package_relative_path = PackageRelativePath::new(forward_relative_path.as_path())?; - Ok(BuckPath::new(package_label, package_relative_path.into())) + Ok(SourcePath::new(package_label, package_relative_path.into())) } .boxed_local() }) @@ -328,6 +332,6 @@ fn fs_operations(builder: &mut MethodsBuilder) { Ok(eval .heap() - .alloc(StarlarkArtifact::new(SourceArtifact::new(buck_path).into()))) + .alloc_typed(StarlarkArtifact::new(SourceArtifact::new(buck_path).into()))) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/methods.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/methods.rs new file mode 100644 index 0000000000000..ab8980cccadf8 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/methods.rs @@ -0,0 +1,830 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::iter; +use std::sync::Arc; + +use anyhow::Context; +use buck2_build_api::interpreter::rule_defs::context::AnalysisActions; +use buck2_cli_proto::build_request::Materializations; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::dice::data::HasIoProvider; +use buck2_common::events::HasEvents; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_core::cells::cell_path::CellPathRef; +use buck2_core::cells::paths::CellRelativePath; +use buck2_core::pattern::pattern::ParsedPattern; +use buck2_core::pattern::pattern_type::TargetPatternExtra; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::soft_error; +use buck2_core::target::label::label::TargetLabel; +use buck2_interpreter::starlark_promise::StarlarkPromise; +use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; +use buck2_interpreter::types::configured_providers_label::StarlarkProvidersLabel; +use buck2_interpreter::types::target_label::StarlarkTargetLabel; +use buck2_node::configuration::resolved::ConfigurationSettingKey; +use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::frontend::TargetGraphCalculation; +use buck2_node::nodes::unconfigured::TargetNode; +use dupe::Dupe; +use either::Either; +use futures::FutureExt; +use starlark::collections::SmallMap; +use starlark::environment::MethodsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::list::UnpackList; +use starlark::values::none::NoneOr; +use starlark::values::none::NoneType; +use starlark::values::structs::StructRef; +use starlark::values::Value; +use starlark::values::ValueOfUnchecked; +use starlark::values::ValueTyped; + +use crate::bxl::starlark_defs::analysis_result::StarlarkAnalysisResult; +use crate::bxl::starlark_defs::aquery::StarlarkAQueryCtx; +use crate::bxl::starlark_defs::audit::StarlarkAuditCtx; +use crate::bxl::starlark_defs::build_result::StarlarkBxlBuildResult; +use crate::bxl::starlark_defs::context::actions::resolve_bxl_execution_platform; +use crate::bxl::starlark_defs::context::actions::validate_action_instantiation; +use crate::bxl::starlark_defs::context::actions::BxlActions; +use crate::bxl::starlark_defs::context::analysis; +use crate::bxl::starlark_defs::context::build; +use crate::bxl::starlark_defs::context::fs::BxlFilesystem; +use crate::bxl::starlark_defs::context::output::OutputStream; +use crate::bxl::starlark_defs::context::BxlContext; +use crate::bxl::starlark_defs::context::BxlContextDynamicError; +use crate::bxl::starlark_defs::context::BxlContextNoDice; +use crate::bxl::starlark_defs::context::BxlContextType; +use crate::bxl::starlark_defs::context::NotATargetLabelString; +use crate::bxl::starlark_defs::context::UnconfiguredTargetInAnalysis; +use crate::bxl::starlark_defs::cquery::StarlarkCQueryCtx; +use crate::bxl::starlark_defs::event::StarlarkUserEventParser; +use crate::bxl::starlark_defs::lazy_ctx::StarlarkLazyCtx; +use crate::bxl::starlark_defs::nodes::configured::StarlarkConfiguredTargetNode; +use crate::bxl::starlark_defs::nodes::unconfigured::StarlarkTargetNode; +use crate::bxl::starlark_defs::providers_expr::AnyProvidersExprArg; +use crate::bxl::starlark_defs::providers_expr::ProvidersExpr; +use crate::bxl::starlark_defs::providers_expr::ProvidersExprArg; +use crate::bxl::starlark_defs::target_list_expr::filter_incompatible; +use crate::bxl::starlark_defs::target_list_expr::ConfiguredTargetListExprArg; +use crate::bxl::starlark_defs::target_list_expr::TargetListExpr; +use crate::bxl::starlark_defs::target_list_expr::TargetListExprArg; +use crate::bxl::starlark_defs::target_universe::StarlarkTargetUniverse; +use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; +use crate::bxl::starlark_defs::uquery::StarlarkUQueryCtx; +use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; + +/// The bxl context that the top level bxl implementation receives as parameter. +/// This context contains all the core bxl functions to query, build, create actions, etc. +#[starlark_module] +pub(crate) fn bxl_context_methods(builder: &mut MethodsBuilder) { + /// Gets the output stream to the console via stdout. Items written to the output stream + /// are considered to be the results of a bxl script, which will be displayed to stdout by + /// buck2 even when the script is cached. + /// + /// Prints that are not result of the bxl should be printed via stderr via the stdlib `print` + /// and `pprint`. + /// + /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. + #[starlark(attribute)] + fn output<'v>(this: &'v BxlContext) -> anyhow::Result> { + let output_stream = this + .data + .context_type + .unpack_root() + .context(BxlContextDynamicError::Unsupported("output".to_owned()))? + .output_stream; + Ok(output_stream) + } + + /// Returns the absolute path to the root of the repository + /// + /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. + fn root<'v>(this: &'v BxlContext<'v>) -> anyhow::Result { + let _root_type = this + .data + .context_type + .unpack_root() + .context(BxlContextDynamicError::Unsupported("root".to_owned()))?; + Ok(this + .async_ctx + .borrow() + .global_data() + .get_io_provider() + .project_root() + .root() + .to_str()? + .to_owned()) + } + + /// Returns the absolute path to the cell of the repository + /// + /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. + fn cell_root<'v>(this: &'v BxlContext<'v>) -> anyhow::Result { + let _root_type = this + .data + .context_type + .unpack_root() + .context(BxlContextDynamicError::Unsupported("root".to_owned()))?; + Ok(this.cell_root_abs().to_owned().to_string()) + } + + /// Gets the target nodes for the `labels`, accepting an optional `target_platform` which is the + /// target platform configuration used to resolve configurations of any unconfigured target + /// nodes. + /// The `target_platform` is either a string that can be parsed as a target label, or a + /// target label. + /// + /// The given `labels` is a [`TargetListExpr`], which is either: + /// - a single string that is a `target pattern`. + /// - a single target node or label, configured or unconfigured + /// - a list of the two options above. + /// + /// Note that this function does not accept `Label` (which is a configured provider label), since this + /// is the label of a subtarget. You can get the underlying configured target label on the `Label` + /// using `configured_targets()` (ex: `my_label.configured_target()`). + /// + /// This returns either a single `target_node` if the given `labels` + /// is "singular", a dict keyed by target labels of `target_node` if the + /// given `labels` is list-like + fn configured_targets<'v>( + this: &'v BxlContext<'v>, + #[starlark(require = pos)] labels: ConfiguredTargetListExprArg<'v>, + #[starlark(default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + #[starlark(require = named, default = NoneOr::None)] modifiers: NoneOr>, + ) -> anyhow::Result< + Either, StarlarkTargetSet>, + > { + let target_platform = target_platform.parse_target_platforms( + this.target_alias_resolver(), + this.cell_resolver(), + this.cell_alias_resolver(), + this.cell_name(), + &this.global_cfg_options().target_platform, + )?; + let cli_modifiers = match modifiers.into_option() { + Some(cli_modifiers) => cli_modifiers.items, + None => Vec::new(), + }; + + this.via_dice(|dice, this| { + dice.via(|ctx| { + async move { + let target_expr = + TargetListExpr::<'v, ConfiguredTargetNode>::unpack_allow_unconfigured( + labels, + &GlobalCfgOptions { + target_platform, + cli_modifiers: Arc::new(cli_modifiers), + }, + this, + ctx, + ) + .await?; + + if let Some(one) = target_expr.get_one(ctx).await? { + let result = filter_incompatible(iter::once(one), this)?; + if let Some(node) = result.iter().next() { + Ok(Either::Left(NoneOr::Other(StarlarkConfiguredTargetNode( + node.dupe(), + )))) + } else { + Ok(Either::Left(NoneOr::None)) + } + } else { + Ok(Either::Right(StarlarkTargetSet(filter_incompatible( + target_expr.get(ctx).await?, + this, + )?))) + } + } + .boxed_local() + }) + }) + } + + /// Gets the unconfigured target nodes for the `labels` + /// + /// The given `labels` is either: + /// - a single string that is a `target pattern`. + /// - a single unconfigured target node or label + /// - a list of the two options above. + /// + /// This returns either a single [`StarlarkTargetNode`] if the given `labels` + /// is "singular", a dict keyed by target labels of [`StarlarkTargetNode`] if the + /// given `labels` is list-like + fn unconfigured_targets<'v>( + this: &'v BxlContext<'v>, + labels: TargetListExprArg<'v>, + ) -> anyhow::Result>> { + this.via_dice(|ctx, this| { + ctx.via(|ctx| { + async move { + let expr = TargetListExpr::<'v, TargetNode>::unpack(labels, this, ctx).await?; + if let Some(one) = expr.get_one(ctx).await? { + Ok(Either::Left(StarlarkTargetNode(one))) + } else { + Ok(Either::Right(StarlarkTargetSet( + expr.get(ctx).await?.into_owned(), + ))) + } + } + .boxed_local() + }) + }) + } + + /// Gets the unconfigured subtargets for the given `labels` + /// + /// The given `labels` is a providers expression, which is either: + /// - a single string that is a `target pattern`. + /// - a single target node or label, configured or unconfigured + /// - a single subtarget label, configured or unconfigured + /// - a list of the two options above. + /// + /// This returns either a single `providers_label` if the given `labels` argument + /// is "singular", or dict of the subtarget string representation to the + /// `providers_label` if the given `labels` argument is list-like. + /// + /// Note that this function does not check that this subtarget exists in the repo. + fn unconfigured_sub_targets<'v>( + this: &BxlContext<'v>, + // TODO(nga): parameter should be either positional or named, not both. + labels: ProvidersExprArg<'v>, + ) -> anyhow::Result>> + { + let providers = + this.via_dice(|_dice, this| ProvidersExpr::::unpack(labels, this))?; + + match providers { + ProvidersExpr::Literal(provider) => { + Ok(Either::Left(StarlarkProvidersLabel::new(provider))) + } + ProvidersExpr::Iterable(providers) => Ok(Either::Right( + providers + .into_iter() + .map(|p| (p.to_string(), StarlarkProvidersLabel::new(p))) + .collect(), + )), + } + } + + /// Returns the `target_universe` that can lookup valid configured nodes in the universe. + /// + /// The given `labels` is a target expression, which is either: + /// - a single string that is a `target pattern`. + /// - a single target node or label, configured or unconfigured + /// - a single subtarget label, configured or unconfigured + /// - a list of the two options above. + /// + /// Also takes in an optional `target_platform` param to configure the nodes with, and a `keep_going`` + /// flag to skip any loading or configuration errors. Note that `keep_going` currently can only be used + /// if the input labels is a single target pattern as a string literal. + fn target_universe<'v>( + this: ValueTyped<'v, BxlContext<'v>>, + labels: ConfiguredTargetListExprArg<'v>, + #[starlark(default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + #[starlark(require = named, default = false)] keep_going: bool, + ) -> anyhow::Result> { + let target_platform = target_platform.parse_target_platforms( + this.target_alias_resolver(), + this.cell_resolver(), + this.cell_alias_resolver(), + this.cell_name(), + &this.global_cfg_options().target_platform, + )?; + + this.via_dice(|ctx, this_no_dice: &BxlContextNoDice<'_>| { + ctx.via(|ctx| { + async move { + let target_expr = if keep_going { + TargetListExpr::<'v, ConfiguredTargetNode>::unpack_keep_going( + labels, + &GlobalCfgOptions { + target_platform, + cli_modifiers: vec![].into(), + }, + this_no_dice, + ctx, + ) + .await? + } else { + TargetListExpr::<'v, ConfiguredTargetNode>::unpack_allow_unconfigured( + labels, + &GlobalCfgOptions { + target_platform, + cli_modifiers: vec![].into(), + }, + this_no_dice, + ctx, + ) + .await? + }; + + let maybe_compatible_set = target_expr.get(ctx).await?; + + let target_set = filter_incompatible(maybe_compatible_set, this_no_dice)?; + + StarlarkTargetUniverse::new(this, target_set).await + } + .boxed_local() + }) + }) + } + + /// Returns the `uqueryctx` that holds all uquery functions. + fn uquery<'v>(this: ValueTyped<'v, BxlContext<'v>>) -> anyhow::Result> { + StarlarkUQueryCtx::new(this) + } + + /// Returns the `cqueryctx` that holds all the cquery functions. + /// This function takes an optional parameter `target_platform`, which is the target platform + /// configuration used to configured any unconfigured target nodes. + /// + /// The `target_platform` is a target label, or a string that is a target label. + fn cquery<'v>( + this: ValueTyped<'v, BxlContext<'v>>, + // TODO(nga): parameter should be either positional or named, not both. + #[starlark(default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + ) -> anyhow::Result> { + StarlarkCQueryCtx::new(this, target_platform, this.data.global_cfg_options()) + } + + /// Returns the `aqueryctx` that holds all the aquery functions. + /// This function takes an optional parameter `target_platform`, which is the target platform + /// configuration used to configured any unconfigured target nodes. + /// + /// The `target_platform` is a target label, or a string that is a target label. + fn aquery<'v>( + this: ValueTyped<'v, BxlContext<'v>>, + #[starlark(default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + ) -> anyhow::Result> { + StarlarkAQueryCtx::new( + this, + target_platform, + &this.data.global_cfg_options().target_platform, + ) + } + + /// Returns the bxl actions to create and register actions for this + /// bxl function. This will have the execution platform resolved according to the execution + /// deps and toolchains you pass into this function. + /// You'll be able to access the analysis action factory of the correct execution platform, + /// toolchains, and execution deps of the corresponding configuration via this context. + /// + /// Actions created by bxl will not be built by default. Instead, they are marked to be built + /// by `ctx.output.ensure(artifact)` on the output module of the `bxl_ctx`. Only artifacts + /// marked by ensure will be built. + /// + /// Sample usage: + /// ```python + /// def _impl_write_action(ctx): + /// bxl_actions = ctx.bxl_actions() + /// output = bxl_actions.actions.write("my_output", "my_content") + /// ensured = ctx.output.ensure(output) + /// ctx.output.print(ensured) + /// ``` + /// + /// There are several optional named parameters: + /// + /// `exec_deps` - These are dependencies you wish to access as executables for creating the action. + /// This is usually the same set of targets one would pass to rule's `attr.exec_dep`. + /// `toolchains` - The set of toolchains needed for the actions you intend to create. + /// `target_platform` - The intended target platform for your toolchains + /// `exec_compatible_with` - Explicit list of configuration nodes (like platforms or constraints) + /// that these actions are compatible with. This is the 'exec_compatible_with' attribute of a target. + /// + /// If you passed in `exec_deps` or `toolchains`, you can access the resolved dependencies using the `exec_deps` + /// and `toolchains` attributes on the `bxl_actions`, which both return a `dict` of unconfigured subtarget labels + /// and their configured/resolved `dependency` objects. + /// + /// Note that the keys of `exec_deps` and `toolchains` must be unconfigured subtarget labels (`providers_label`s), + /// and not unconfigured target labels. You can use `ctx.unconfigured_sub_targets(...)` or `with_sub_target()` on + /// `target_label` to create the label. + /// + /// ```python + /// def _impl_run_action(ctx): + /// my_exec_dep = ctx.unconfigured_sub_targets("foo//bar:baz") # has some provider that you would use in the action + /// bxl_actions = ctx.bxl_actions(exec_deps = [my_exec_dep]) # call once, reuse wherever needed + /// output = bxl_actions.actions.run( + /// [ + /// "python3", + /// bxl_actions.exec_deps[my_exec_dep][RunInfo], # access resolved exec_deps on the `bxl_actions` + /// out.as_output(), + /// ], + /// category = "command", + /// local_only = True, + /// ) + /// ctx.output.ensure(output) + /// ``` + /// + /// When called from a `dynamic_output`, `bxl_actions()` cannot be configured with a different execution + /// platform resolution from the parent BXL. + fn bxl_actions<'v>( + this: &'v BxlContext<'v>, + #[starlark(require = named, default = NoneOr::None)] exec_deps: NoneOr< + ProvidersExprArg<'v>, + >, + #[starlark(require = named, default = NoneOr::None)] toolchains: NoneOr< + ProvidersExprArg<'v>, + >, + #[starlark(require = named, default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + #[starlark(require = named, default = NoneOr::None)] exec_compatible_with: NoneOr< + TargetListExprArg<'v>, + >, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + this.via_dice(|ctx, this| { + ctx.via(|ctx| { + async { + let (exec_deps, toolchains) = match &this.context_type { + BxlContextType::Root { .. } => { + let target_platform = target_platform.parse_target_platforms( + this.target_alias_resolver(), + this.cell_resolver(), + this.cell_alias_resolver(), + this.cell_name(), + &this.global_cfg_options().target_platform, + )?; + let exec_deps = match exec_deps { + NoneOr::None => Vec::new(), + NoneOr::Other(exec_deps) => { + ProvidersExpr::::unpack(exec_deps, this)? + .labels() + .cloned() + .collect() + } + }; + + let toolchains = match toolchains { + NoneOr::None => Vec::new(), + NoneOr::Other(toolchains) => { + ProvidersExpr::::unpack(toolchains, this)? + .labels() + .cloned() + .collect() + } + }; + + let exec_compatible_with: Arc<[_]> = match exec_compatible_with { + NoneOr::None => Arc::new([]), + NoneOr::Other(exec_compatible_with) => { + TargetListExpr::::unpack( + exec_compatible_with, + this, + ctx, + ) + .await? + .get(ctx) + .await? + .iter() + .map(|n| ConfigurationSettingKey(n.label().dupe())) + .collect() + } + }; + + let execution_resolution = resolve_bxl_execution_platform( + ctx, + this.cell_name(), + exec_deps, + toolchains, + target_platform.clone(), + exec_compatible_with.clone(), + ) + .await?; + + validate_action_instantiation(this, &execution_resolution)?; + + ( + execution_resolution.exec_deps_configured, + execution_resolution.toolchain_deps_configured, + ) + } + BxlContextType::Dynamic(data) => { + if !exec_deps.is_none() + || !toolchains.is_none() + || !target_platform.is_none() + || !exec_compatible_with.is_none() + { + return Err( + BxlContextDynamicError::RequireSameExecutionPlatformAsRoot + .into(), + ); + } + (data.exec_deps.clone(), data.toolchains.clone()) + } + }; + + BxlActions::new( + this.state, + exec_deps.to_vec(), + toolchains.to_vec(), + eval, + ctx, + ) + .await + } + .boxed_local() + }) + }) + } + + /// Runs analysis on the given `labels`, accepting an optional `target_platform` which is the + /// target platform configuration used to resolve configurations of any unconfigured target + /// nodes, and an optional `skip_incompatible` boolean that indicates whether to skip analysis + /// of nodes that are incompatible with the target platform. + /// The `target_platform` is either a string that can be parsed as a target label, or a + /// target label. + /// + /// The given `labels` is a providers expression, which is either: + /// - a single string that is a `target pattern`. + /// - a single target node or label, configured or unconfigured + /// - a single sub target label, configured or unconfigured + /// - a list of the two options above. + /// + /// This returns either a single `analysis_result` if the given `labels` argument is "singular", + /// or a dict keyed by sub target labels of `analysis` if the given `labels` argument + /// is list-like + fn analysis<'v>( + this: &BxlContext<'v>, + // TODO(nga): these parameters should be either position or named, not both. + labels: AnyProvidersExprArg<'v>, + #[starlark(default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + #[starlark(require = named, default = true)] skip_incompatible: bool, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result< + Either< + NoneOr, + SmallMap< + ValueTyped<'v, StarlarkConfiguredProvidersLabel>, + ValueTyped<'v, StarlarkAnalysisResult>, + >, + >, + > { + if labels.contains_unconfigured() { + soft_error!( + "bxl_unconfigured_target_in_analysis", + UnconfiguredTargetInAnalysis.into(), + quiet: true + )?; + } + + let target_platform = target_platform.parse_target_platforms( + this.data.target_alias_resolver(), + this.data.cell_resolver(), + this.cell_alias_resolver(), + this.data.cell_name(), + &this.data.global_cfg_options().target_platform, + )?; + + let res: anyhow::Result<_> = this.via_dice(|dice, ctx| { + dice.via(|dice| { + async { + let providers = ProvidersExpr::::unpack( + labels, + &GlobalCfgOptions { + target_platform, + cli_modifiers: vec![].into(), + }, + ctx, + dice, + ) + .await?; + analysis::analysis(dice, ctx, providers, skip_incompatible).await + } + .boxed_local() + }) + }); + + Ok(match res? { + Either::Left(single) => Either::Left(NoneOr::from_option(single)), + Either::Right(many) => Either::Right( + many.into_iter() + .map(|(t, v)| { + Ok(( + eval.heap() + .alloc_typed(StarlarkConfiguredProvidersLabel::new(t)) + .hashed() + .unwrap(), + eval.heap().alloc_typed(v), + )) + }) + .collect::>()?, + ), + }) + } + + /// Runs a build on the given `labels`, accepting an optional `target_platform` which is the + /// target platform configuration used to resolve configurations. Note that when `build()` is called, + /// the artifacts are materialized without needing to additionally call `ensure()` on them. + /// + /// The given `labels` is a providers expression, which is either: + /// - a single string that is a `target pattern`. + /// - a single target node or label, configured or unconfigured + /// - a single provider label, configured or unconfigured + /// - a list of the two options above. + /// + /// This returns a dict keyed by sub target labels mapped to `bxl_build_result`s if the + /// given `labels` argument is list-like. + /// + /// This function is not available on the `bxl_ctx` when called from `dynamic_output`. + fn build<'v>( + this: &'v BxlContext<'v>, + // TODO(nga): parameter should be either positional or named, not both. + labels: AnyProvidersExprArg<'v>, + #[starlark(default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + #[starlark(require = named, default = "default")] materializations: &str, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result< + SmallMap< + ValueTyped<'v, StarlarkConfiguredProvidersLabel>, + ValueTyped<'v, StarlarkBxlBuildResult>, + >, + > { + build::build( + this, + labels, + target_platform, + Materializations::from_str_name(&materializations.to_uppercase()).ok_or_else(|| { + anyhow::anyhow!("Unknown materialization setting `{}`", materializations) + })?, + eval, + ) + } + + /// A struct of the command line args as declared using the [`cli_args`] module. + /// These command lines are resolved per the users input on the cli when invoking the bxl script. + /// + /// If you wish to pass in a kebab-cased arg, the arg accessed from the BXL context's `cli_args` + /// attrbute will always be in snakecase. For example, if you passed in `my-arg`, accessing it + /// within BXL would look like `ctx.cli_args.my_arg`. + /// + /// This attribute is not available on the bxl context within the a dynamic lambda. + #[starlark(attribute)] + fn cli_args<'v>(this: &BxlContext<'v>) -> anyhow::Result>> { + let cli_args = this + .data + .context_type + .unpack_root() + .context(BxlContextDynamicError::Unsupported("cli_args".to_owned()))? + .cli_args; + + Ok(cli_args) + } + + /// Returns the `bxl.Filesystem` for performing a basic set of filesystem operations within bxl + #[starlark(attribute)] + fn fs<'v>(this: ValueTyped<'v, BxlContext<'v>>) -> anyhow::Result> { + Ok(BxlFilesystem::new(this)) + } + + /// Checks if a target label exists. Target label must be a string literal, and an exact target. + fn target_exists<'v>(this: &'v BxlContext<'v>, label: &'v str) -> anyhow::Result { + this.via_dice(|ctx, this_no_dice: &BxlContextNoDice<'_>| { + ctx.via(|ctx| { + async move { + match ParsedPattern::::parse_relaxed( + this_no_dice.target_alias_resolver(), + CellPathRef::new(this_no_dice.cell_name(), CellRelativePath::empty()), + label, + this_no_dice.cell_resolver(), + this_no_dice.cell_alias_resolver(), + )? { + ParsedPattern::Target(pkg, name, TargetPatternExtra) => { + let target_label = TargetLabel::new(pkg, name.as_ref()); + Ok(ctx.get_target_node(&target_label).await.ok().is_some()) + } + _ => Err(anyhow::anyhow!(NotATargetLabelString)), + } + } + .boxed_local() + }) + }) + } + + /// Returns the `audit_ctx` that holds all the audit functions. + fn audit<'v>(this: ValueTyped<'v, BxlContext<'v>>) -> anyhow::Result> { + let (working_dir, cell_resolver) = this.via_dice(|ctx, this| { + ctx.via(|ctx| { + async move { + Ok(( + this.cell_resolver() + .get(this.cell_name())? + .path() + .as_project_relative_path() + .to_buf(), + ctx.get_cell_resolver().await?, + )) + } + .boxed_local() + }) + })?; + + StarlarkAuditCtx::new(this, working_dir, cell_resolver) + } + + /// Awaits a promise and returns an optional value of the promise. + /// + /// Sample usage: + /// ```python + /// load("//path/to/rules:rules.bzl", "my_anon_targets_rule", "my_map_function") + /// + /// def _resolve_impl(ctx): + /// actions = ctx.bxl_actions().actions + /// my_attrs = { + /// "false": False, + /// "int": 42, + /// "list_string": ["a", "b", "c"], + /// "string": "a-string", + /// "true": True, + /// } + /// + /// promise = actions.anon_target(my_anon_targets_rule, attrs).promise.map(my_map_function) + /// providers_result = ctx.resolve(actions, promise) # result is `provider_collection` type, which is a collection of `provider`s + /// ctx.output.print(providers_result[0].my_field) + /// ``` + fn resolve<'v>( + this: &'v BxlContext<'v>, + action_factory: ValueTyped<'v, AnalysisActions<'v>>, + promise: ValueTyped<'v, StarlarkPromise<'v>>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result>> { + this.via_dice(|dice, this| { + dice.via(|dice| { + action_factory + .run_promises(dice, eval, format!("bxl$promises:{}", this.current_bxl())) + .boxed_local() + }) + })?; + Ok(match promise.get() { + Some(v) => NoneOr::Other(v), + None => NoneOr::None, + }) + } + + /// Emits a user-defined instant event, taking in a required string id and a metadata dictionary where the + /// keys are strings, and values are either strings, bools, or ints. The id is user-supplied, and used to + /// identify the instant events in the event logs more easily. + /// + /// You may pass in an ensured artifact as a value in the metadata. The resulting output would be the ensured + /// artifact's relative or absolute path as a string. + fn instant_event<'v>( + this: &'v BxlContext<'v>, + #[starlark(require = named)] id: &str, + #[starlark(require = named)] metadata: Value<'v>, + ) -> anyhow::Result { + let parser = StarlarkUserEventParser { + artifact_fs: this.artifact_fs(), + project_fs: this.project_fs(), + }; + let event = parser.parse(id, metadata)?; + + this.async_ctx + .borrow() + .per_transaction_data() + .get_dispatcher() + .instant_event(event); + + Ok(NoneType) + } + + /// Lazy/batch/error handling operations. + #[starlark(attribute)] + fn lazy<'v>(this: ValueTyped<'v, BxlContext<'v>>) -> anyhow::Result> { + Ok(StarlarkLazyCtx::new(this)) + } + + /// The target_platform from the bxl invocation. It is from the `--target-platforms` flag. + #[starlark(attribute)] + fn target_platform<'v>( + this: &'v BxlContext<'v>, + ) -> anyhow::Result> { + Ok(NoneOr::from_option( + this.global_cfg_options() + .target_platform + .dupe() + .map(StarlarkTargetLabel::new), + )) + } + + /// The modfiers from the bxl invocation. It is from the `--modifier` flag. + #[starlark(attribute)] + fn modifiers<'v>(this: &'v BxlContext<'v>) -> anyhow::Result> { + Ok((*this.global_cfg_options().cli_modifiers).clone()) + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/output.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/output.rs index c5a1db3267d1d..3d3035fe24fbe 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/context/output.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/output.rs @@ -8,7 +8,6 @@ */ use std::cell::RefCell; -use std::fmt::Display; use std::io::Write; use std::ops::DerefMut; use std::rc::Rc; @@ -17,19 +16,20 @@ use allocative::Allocative; use anyhow::Context; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::bxl::build_result::BxlBuildResult; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; use buck2_build_api::interpreter::rule_defs::cmd_args::StarlarkCommandLineInputs; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::project::ProjectRoot; +use buck2_error::starlark_error::from_starlark_with_options; use buck2_execute::path::artifact_path::ArtifactPath; use derivative::Derivative; use derive_more::Display; use dupe::Dupe; use futures::FutureExt; -use gazebo::prelude::SliceExt; +use gazebo::prelude::VecExt; use serde::ser::SerializeSeq; use serde::Serialize; use serde::Serializer; @@ -38,15 +38,20 @@ use starlark::collections::SmallSet; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; +use starlark::eval::Evaluator; use starlark::starlark_module; use starlark::values::dict::Dict; use starlark::values::dict::DictRef; +use starlark::values::dict::UnpackDictEntries; use starlark::values::list::ListRef; +use starlark::values::list::UnpackList; use starlark::values::none::NoneType; use starlark::values::record::Record; use starlark::values::starlark_value; use starlark::values::structs::StructRef; use starlark::values::tuple::TupleRef; +use starlark::values::tuple::UnpackTuple; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::NoSerialize; @@ -54,15 +59,16 @@ use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueError; use starlark::values::ValueLike; -use starlark::StarlarkDocs; +use starlark::StarlarkResultExt; -use super::starlark_async::BxlSafeDiceComputations; use crate::bxl::starlark_defs::artifacts::EnsuredArtifact; +use crate::bxl::starlark_defs::artifacts::EnsuredArtifactArg; use crate::bxl::starlark_defs::artifacts::EnsuredArtifactGroup; use crate::bxl::starlark_defs::build_result::StarlarkBxlBuildResult; use crate::bxl::starlark_defs::context::build::StarlarkProvidersArtifactIterable; +use crate::bxl::starlark_defs::context::starlark_async::BxlDiceComputations; +use crate::bxl::starlark_defs::eval_extra::BxlEvalExtra; #[derive( ProvidesStaticType, @@ -70,27 +76,21 @@ use crate::bxl::starlark_defs::context::build::StarlarkProvidersArtifactIterable Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[display(fmt = "{:?}", self)] -#[starlark_docs(directory = "bxl")] +#[display("{:?}", self)] #[derivative(Debug)] -pub(crate) struct OutputStream<'v> { +pub(crate) struct OutputStream { #[derivative(Debug = "ignore")] #[trace(unsafe_ignore)] #[allocative(skip)] - pub(crate) sink: RefCell>, + pub(crate) sink: Rc>, #[trace(unsafe_ignore)] artifacts_to_ensure: RefCell>>, #[derivative(Debug = "ignore")] pub(crate) project_fs: ProjectRoot, #[derivative(Debug = "ignore")] pub(crate) artifact_fs: ArtifactFs, - #[trace(unsafe_ignore)] - #[derivative(Debug = "ignore")] - #[allocative(skip)] - pub(crate) async_ctx: Rc>>, } /// We can ensure either an `Artifact` or an `ArtifactGroup`. When we want to ensure a `CommandLineArgLike` object, @@ -102,19 +102,17 @@ pub(crate) enum EnsuredArtifactOrGroup { ArtifactGroup(ArtifactGroup), } -impl<'v> OutputStream<'v> { +impl OutputStream { pub(crate) fn new( project_fs: ProjectRoot, artifact_fs: ArtifactFs, - sink: RefCell>, - async_ctx: Rc>>, + sink: Rc>, ) -> Self { Self { sink, artifacts_to_ensure: RefCell::new(Some(Default::default())), project_fs, artifact_fs, - async_ctx, } } @@ -123,20 +121,30 @@ impl<'v> OutputStream<'v> { } } -#[starlark_value(type = "bxl_output_stream", StarlarkTypeRepr, UnpackValue)] -impl<'v> StarlarkValue<'v> for OutputStream<'v> { +#[starlark_value(type = "bxl.OutputStream", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for OutputStream { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); RES.methods(output_stream_methods) } } -impl<'v> AllocValue<'v> for OutputStream<'v> { +impl<'v> AllocValue<'v> for OutputStream { fn alloc_value(self, heap: &'v Heap) -> Value<'v> { heap.alloc_complex_no_freeze(self) } } +#[derive(StarlarkTypeRepr, UnpackValue)] +enum EnsureMultipleArtifactsArg<'v> { + None(NoneType), + EnsuredArtifactArgs(UnpackList>), + ProvidersArtifactIterable(&'v StarlarkProvidersArtifactIterable<'v>), + BxlBuildResult(&'v StarlarkBxlBuildResult), + Dict(UnpackDictEntries, &'v StarlarkBxlBuildResult>), + CmdLine(ValueAsCommandLineLike<'v>), +} + /// The output stream for bxl to print values to the console as their result #[starlark_module] fn output_stream_methods(builder: &mut MethodsBuilder) { @@ -154,9 +162,10 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { /// ctx.output.print("test") /// ``` fn print<'v>( - this: &'v OutputStream<'v>, - #[starlark(args)] args: Vec>, + this: &'v OutputStream, + #[starlark(args)] args: UnpackTuple>, #[starlark(default = " ")] sep: &'v str, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let mut first = true; let mut write = |d: &dyn Display| -> anyhow::Result<()> { @@ -170,7 +179,7 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { }; for arg in args { - if let Some(ensured) = <&EnsuredArtifact>::unpack_value(arg) { + if let Some(ensured) = <&EnsuredArtifact>::unpack_value(arg).into_anyhow_result()? { let path = get_artifact_path_display( ensured.get_artifact_path(), ensured.abs(), @@ -178,23 +187,28 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { &this.artifact_fs, )?; write(&path)?; - } else if let Some(ensured) = <&EnsuredArtifactGroup>::unpack_value(arg) { - this.async_ctx.borrow_mut().via(|dice| { - ensured - .visit_artifact_path_without_associated_deduped( - |artifact_path, abs| { - let path = get_artifact_path_display( - artifact_path, - abs, - &this.project_fs, - &this.artifact_fs, - )?; - write(&path) - }, - dice, - ) - .boxed_local() - })?; + } else if let Some(ensured) = + <&EnsuredArtifactGroup>::unpack_value(arg).into_anyhow_result()? + { + BxlEvalExtra::from_context(eval)? + .dice + .borrow_mut() + .via(|dice| { + ensured + .visit_artifact_path_without_associated_deduped( + |artifact_path, abs| { + let path = get_artifact_path_display( + artifact_path, + abs, + &this.project_fs, + &this.artifact_fs, + )?; + write(&path) + }, + dice, + ) + .boxed_local() + })?; } else { write(&arg.to_str())?; } @@ -221,19 +235,20 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { /// ctx.output.print_json("test") /// ``` fn print_json<'v>( - this: &'v OutputStream<'v>, + this: &'v OutputStream, value: Value<'v>, #[starlark(require=named, default=true)] pretty: bool, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { /// A wrapper with a Serialize instance so we can pass down the necessary context. - struct SerializeValue<'a, 'v> { + struct SerializeValue<'a, 'v, 'd> { value: Value<'v>, artifact_fs: &'a ArtifactFs, project_fs: &'a ProjectRoot, - async_ctx: &'a Rc>>, + async_ctx: &'a Rc>, } - impl<'a, 'v> SerializeValue<'a, 'v> { + impl<'v> SerializeValue<'_, 'v, '_> { fn with_value(&self, x: Value<'v>) -> Self { Self { value: x, @@ -244,12 +259,14 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { } } - impl<'a, 'v> Serialize for SerializeValue<'a, 'v> { + impl Serialize for SerializeValue<'_, '_, '_> { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - if let Some(ensured) = <&EnsuredArtifact>::unpack_value(self.value) { + if let Some(ensured) = <&EnsuredArtifact>::unpack_value(self.value) + .map_err(|e| serde::ser::Error::custom(format!("{:#}", e)))? + { let path = get_artifact_path_display( ensured.get_artifact_path(), ensured.abs(), @@ -258,7 +275,9 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { ) .map_err(|err| serde::ser::Error::custom(format!("{:#}", err)))?; serializer.serialize_str(&path) - } else if let Some(ensured) = <&EnsuredArtifactGroup>::unpack_value(self.value) { + } else if let Some(ensured) = <&EnsuredArtifactGroup>::unpack_value(self.value) + .map_err(|e| serde::ser::Error::custom(format!("{:#}", e)))? + { let mut seq_ser = serializer.serialize_seq(None)?; self.async_ctx @@ -314,7 +333,7 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { value, artifact_fs: &this.artifact_fs, project_fs: &this.project_fs, - async_ctx: &this.async_ctx, + async_ctx: &BxlEvalExtra::from_context(eval)?.dice, }, ) .context("Error writing to JSON for `write_json`")?; @@ -338,8 +357,11 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { /// ensured = ctx.output.ensure(output) /// ctx.output.print(ensured) /// ``` - fn ensure<'v>(this: &OutputStream, artifact: Value<'v>) -> anyhow::Result { - let artifact = EnsuredArtifact::new(artifact)?; + fn ensure<'v>( + this: &OutputStream, + artifact: EnsuredArtifactArg<'v>, + ) -> anyhow::Result { + let artifact = artifact.into_ensured_artifact(); populate_ensured_artifacts(this, EnsuredArtifactOrGroup::Artifact(artifact.clone()))?; Ok(artifact) @@ -347,7 +369,7 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { /// Same as `ensure`, but for multiple artifacts. Will preserve the shape of the inputs (i.e. if the resulting /// `Dict` of a `ctx.build()` is passed in, the output will be a `Dict` where the key is preserved, - /// and the values are converted to `EnsuredArtifact`s). + /// and the values are converted to `ensured_artifact`s). /// /// Note that is slower to loop through objects and ensure them one by one than it is to call `ensure_multiple()` /// on all the objects at once (if possible). @@ -362,73 +384,77 @@ fn output_stream_methods(builder: &mut MethodsBuilder) { /// ctx.output.print_json(outputs) /// ``` fn ensure_multiple<'v>( - this: &'v OutputStream<'v>, - artifacts: Value<'v>, + this: &'v OutputStream, + // TODO(nga): must be either positional or named. + artifacts: EnsureMultipleArtifactsArg<'v>, heap: &'v Heap, ) -> anyhow::Result> { - if artifacts.is_none() { - Ok(heap.alloc(Vec::::new())) - } else if let Some(list) = <&ListRef>::unpack_value(artifacts) { - let artifacts: Vec = list.content().try_map(|artifact| { - let artifact = EnsuredArtifact::new(*artifact)?; + match artifacts { + EnsureMultipleArtifactsArg::None(_) => Ok(heap.alloc(Vec::::new())), + EnsureMultipleArtifactsArg::EnsuredArtifactArgs(list) => { + let artifacts: Vec = list.items.into_try_map(|artifact| { + let artifact = artifact.into_ensured_artifact(); + populate_ensured_artifacts( + this, + EnsuredArtifactOrGroup::Artifact(artifact.clone()), + )?; + + Ok::(artifact) + })?; - populate_ensured_artifacts( + Ok(heap.alloc(artifacts)) + } + EnsureMultipleArtifactsArg::ProvidersArtifactIterable(artifact_gen) => { + Ok(heap.alloc(get_artifacts_from_bxl_build_result( + artifact_gen + .0 + .downcast_ref::() + .unwrap(), this, - EnsuredArtifactOrGroup::Artifact(artifact.clone()), - )?; - - Ok::(artifact) - })?; - - Ok(heap.alloc(artifacts)) - } else if let Some(artifact_gen) = - <&StarlarkProvidersArtifactIterable>::unpack_value(artifacts) - { - Ok(heap.alloc(get_artifacts_from_bxl_build_result( - artifact_gen - .0 - .downcast_ref::() - .unwrap(), - this, - )?)) - } else if let Some(bxl_build_result) = <&StarlarkBxlBuildResult>::unpack_value(artifacts) { - Ok(heap.alloc(get_artifacts_from_bxl_build_result(bxl_build_result, this)?)) - } else if let Some(build_result_dict) = ::unpack_value(artifacts) { - Ok(heap.alloc(Dict::new( + )?)) + } + EnsureMultipleArtifactsArg::BxlBuildResult(bxl_build_result) => { + Ok(heap.alloc(get_artifacts_from_bxl_build_result(bxl_build_result, this)?)) + } + EnsureMultipleArtifactsArg::Dict(build_result_dict) => Ok(heap.alloc(Dict::new( build_result_dict - .iter() - .map(|(label, value)| { - if let Some(bxl_build_result) = - <&StarlarkBxlBuildResult>::unpack_value(value) - { - Ok(( - label.get_hashed()?, - heap.alloc(get_artifacts_from_bxl_build_result( - bxl_build_result, - this, - )?), - )) - } else { - Err(anyhow::anyhow!(incorrect_parameter_type_error(artifacts))) - } + .entries + .into_iter() + .map(|(label, bxl_build_result)| { + Ok(( + label.get_hashed().map_err(|e| { + from_starlark_with_options( + e, + buck2_error::starlark_error::NativeErrorHandling::Unknown, + false, + ) + })?, + heap.alloc(get_artifacts_from_bxl_build_result( + bxl_build_result, + this, + )?), + )) }) .collect::>()?, - ))) - } else if let Some(cmd_line) = artifacts.as_command_line() { - let inputs = get_cmd_line_inputs(cmd_line)?; - let mut result = Vec::new(); + ))), + EnsureMultipleArtifactsArg::CmdLine(cmd_line) => { + // TODO(nga): we should not be doing that here. + // If we pass random string to this function, + // it will be interpreted as a command line without inputs, + // and this function will return empty `EnsuredArtifactGroup`. + let inputs = get_cmd_line_inputs(cmd_line.0)?; + let mut result = Vec::new(); + + for artifact_group in &inputs.inputs { + populate_ensured_artifacts( + this, + EnsuredArtifactOrGroup::ArtifactGroup(artifact_group.dupe()), + )?; + result.push(artifact_group.dupe()); + } - for artifact_group in &inputs.inputs { - populate_ensured_artifacts( - this, - EnsuredArtifactOrGroup::ArtifactGroup(artifact_group.dupe()), - )?; - result.push(artifact_group.dupe()); + Ok(heap.alloc(EnsuredArtifactGroup::new(result, false, heap))) } - - Ok(heap.alloc(EnsuredArtifactGroup::new(result, false, heap))) - } else { - Err(anyhow::anyhow!(incorrect_parameter_type_error(artifacts))) } } } @@ -458,13 +484,6 @@ pub(crate) fn get_artifact_path_display( }) } -fn incorrect_parameter_type_error(artifacts: Value) -> ValueError { - ValueError::IncorrectParameterTypeWithExpected( - "list of artifacts, bxl_built_artifacts_iterable, or command-line-arg-like".to_owned(), - artifacts.get_type().to_owned(), - ) -} - fn populate_ensured_artifacts( output_stream: &OutputStream, ensured: EnsuredArtifactOrGroup, @@ -484,7 +503,7 @@ fn get_artifacts_from_bxl_build_result( ) -> anyhow::Result> { match &bxl_build_result.0 { BxlBuildResult::None => Ok(Vec::new()), - BxlBuildResult::Built(result) => result + BxlBuildResult::Built { result, .. } => result .outputs .iter() .filter_map(|built| { diff --git a/app/buck2_bxl/src/bxl/starlark_defs/context/starlark_async.rs b/app/buck2_bxl/src/bxl/starlark_defs/context/starlark_async.rs index 4495468080455..c196b0c151be5 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/context/starlark_async.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/context/starlark_async.rs @@ -7,13 +7,14 @@ * of this source tree. */ -use std::ops::Deref; -use std::ops::DerefMut; +use std::cell::OnceCell; +use std::rc::Rc; use buck2_common::events::HasEvents; use buck2_data::BxlDiceInvocationEnd; use buck2_data::BxlDiceInvocationStart; use buck2_events::dispatch::with_dispatcher_async; +use buck2_futures::cancellable_future::CancellationObserver; use dice::DiceComputations; use dice::DiceData; use dice::UserComputationData; @@ -21,10 +22,9 @@ use dupe::Dupe; use futures::future::select; use futures::future::Either; use futures::future::LocalBoxFuture; -use more_futures::cancellable_future::CancellationObserver; -use thiserror::Error; +use futures::FutureExt; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ViaError { #[error("The owning DICE evaluation has been cancelled")] Cancelled, @@ -36,52 +36,62 @@ enum ViaError { /// This is not exposed to starlark but rather, used by operations exposed to starlark to run /// code. /// This also provides a handle for dice. -pub struct BxlSafeDiceComputations<'a>(pub(super) &'a mut DiceComputations, CancellationObserver); - -/// For a `via_dice`, the DiceComputations provided to each lambda is a reference that's only -/// available for some specific lifetime `'x`. This is express as a higher rank lifetime bound -/// `for <'x>` in rust. However, `for <'x>` bounds do not have constraints on them so rust infers -/// them to be any lifetime, including 'static, which is wrong. So, we introduce an extra lifetime -/// here which forces rust compiler to infer additional bounds on the `for <'x>` as a -/// `&'x DiceComputationRef<'a>` cannot live more than `'a`, so using this type as the argument -/// to the closure forces the correct lifetime bounds to be inferred by rust. -pub struct DiceComputationsRef<'s>(&'s mut DiceComputations); - -impl<'s> Deref for DiceComputationsRef<'s> { - type Target = DiceComputations; - - fn deref(&self) -> &Self::Target { - self.0 - } -} -impl<'s> DerefMut for DiceComputationsRef<'s> { - fn deref_mut(&mut self) -> &mut Self::Target { - self.0 - } +pub(crate) trait BxlDiceComputations { + // via() below provides a more useful api for consumers. + fn via_impl<'a: 'b, 'b>( + &'a mut self, + f: Box< + dyn for<'d> FnOnce( + &'a mut DiceComputations<'d>, + ) -> LocalBoxFuture<'a, anyhow::Result<()>> + + 'b, + >, + ) -> anyhow::Result<()>; + + fn global_data(&self) -> &DiceData; + + fn per_transaction_data(&self) -> &UserComputationData; } -impl<'a> BxlSafeDiceComputations<'a> { - pub fn new(dice: &'a mut DiceComputations, cancellation: CancellationObserver) -> Self { - Self(dice, cancellation) +impl dyn BxlDiceComputations + '_ { + // We require that BxlDiceComputations be object-safe, but that means we can't have a type parameter in `via_impl`. + // It's really inconvenient to not have that, though, so we provide an implementation here that supports it. + pub(crate) fn via<'a, T: 'a>( + &'a mut self, + // The returned future as a 'a lifetime to allow people to capture things in the future with a matching lifetime to self. + f: impl for<'d> FnOnce(&'a mut DiceComputations<'d>) -> LocalBoxFuture<'a, anyhow::Result> + + 'a, + ) -> anyhow::Result { + // We can't capture a &mut res here in the closure unfortunately, so we need to do this little dance to get values out. + let res: Rc> = Rc::new(OnceCell::new()); + let res2 = res.clone(); + self.via_impl(Box::new(move |dice| { + async move { + res2.set(f(dice).await?).ok().unwrap(); + Ok(()) + } + .boxed_local() + }))?; + Ok(Rc::try_unwrap(res).ok().unwrap().take().unwrap()) } +} - /// runs any async computation - pub fn via<'s, T>( - &'s mut self, - f: impl for<'x> FnOnce(&'x mut DiceComputationsRef<'s>) -> LocalBoxFuture<'x, anyhow::Result>, - ) -> anyhow::Result - where - 'a: 's, - { +impl BxlDiceComputations for BxlSafeDiceComputations<'_, '_> { + fn via_impl<'a: 'b, 'b>( + &'a mut self, + f: Box< + dyn for<'d> FnOnce( + &'a mut DiceComputations<'d>, + ) -> LocalBoxFuture<'a, anyhow::Result<()>> + + 'b, + >, + ) -> anyhow::Result<()> { let dispatcher = self.0.per_transaction_data().get_dispatcher().dupe(); dispatcher.span(BxlDiceInvocationStart {}, || { let liveness = self.1.dupe(); - let fut = with_dispatcher_async(dispatcher.clone(), async move { - let mut ctx = DiceComputationsRef(self.0); - f(&mut ctx).await - }); + let fut = with_dispatcher_async(dispatcher.clone(), async move { f(self.0).await }); let fut = async move { futures::pin_mut!(fut); @@ -98,11 +108,25 @@ impl<'a> BxlSafeDiceComputations<'a> { }) } - pub fn global_data(&self) -> &DiceData { + fn global_data(&self) -> &DiceData { self.0.global_data() } - pub fn per_transaction_data(&self) -> &UserComputationData { + fn per_transaction_data(&self) -> &UserComputationData { self.0.per_transaction_data() } } + +pub(crate) struct BxlSafeDiceComputations<'a, 'd>( + &'a mut DiceComputations<'d>, + CancellationObserver, +); + +impl<'a, 'd> BxlSafeDiceComputations<'a, 'd> { + pub(crate) fn new( + dice: &'a mut DiceComputations<'d>, + cancellation: CancellationObserver, + ) -> Self { + Self(dice, cancellation) + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/cquery.rs b/app/buck2_bxl/src/bxl/starlark_defs/cquery.rs index abb876af714e4..7fb1f06891ca9 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/cquery.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/cquery.rs @@ -8,17 +8,16 @@ */ use allocative::Allocative; -use anyhow::Context; use buck2_build_api::query::bxl::BxlCqueryFunctions; use buck2_build_api::query::bxl::NEW_BXL_CQUERY_FUNCTIONS; -use buck2_build_api::query::oneshot::CqueryOwnerBehavior; use buck2_build_api::query::oneshot::QUERY_FRONTEND; -use buck2_core::target::label::TargetLabel; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_node::nodes::configured::ConfiguredTargetNode; -use buck2_query::query::syntax::simple::eval::set::TargetSetExt; +use buck2_query::query::syntax::simple::eval::set::TargetSet; use buck2_query::query::syntax::simple::functions::helpers::CapturedExpr; use derivative::Derivative; use derive_more::Display; +use dice::DiceComputations; use dupe::Dupe; use futures::FutureExt; use gazebo::prelude::*; @@ -28,6 +27,7 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::AllocValue; @@ -35,20 +35,19 @@ use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; -use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueError; -use starlark::StarlarkDocs; +use starlark::values::ValueTyped; use crate::bxl::starlark_defs::context::BxlContext; use crate::bxl::starlark_defs::context::BxlContextNoDice; use crate::bxl::starlark_defs::file_set::FileSetExpr; use crate::bxl::starlark_defs::file_set::StarlarkFileSet; use crate::bxl::starlark_defs::query_util::parse_query_evaluation_result; -use crate::bxl::starlark_defs::target_expr::filter_incompatible; -use crate::bxl::starlark_defs::target_expr::TargetExpr; +use crate::bxl::starlark_defs::target_list_expr::filter_incompatible; +use crate::bxl::starlark_defs::target_list_expr::ConfiguredTargetListExprArg; +use crate::bxl::starlark_defs::target_list_expr::TargetListExpr; use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; -use crate::bxl::starlark_defs::uquery::unpack_unconfigured_query_args; +use crate::bxl::starlark_defs::uquery::UnpackUnconfiguredQueryArgs; use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; #[derive( @@ -57,22 +56,20 @@ use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] #[allocative(skip)] pub(crate) struct StarlarkCQueryCtx<'v> { - #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, #[derivative(Debug = "ignore")] - target_platform: Option, + // Overrides the GlobalCfgOptions in the BxlContext + global_cfg_options_override: GlobalCfgOptions, } -#[starlark_value(type = "cqueryctx", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.CqueryContext", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for StarlarkCQueryCtx<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -88,33 +85,56 @@ impl<'v> AllocValue<'v> for StarlarkCQueryCtx<'v> { pub(crate) async fn get_cquery_env( ctx: &BxlContextNoDice<'_>, - target_platform: Option, + global_cfg_options_override: &GlobalCfgOptions, ) -> anyhow::Result> { (NEW_BXL_CQUERY_FUNCTIONS.get()?)( - target_platform, + global_cfg_options_override.clone(), ctx.project_root().dupe(), - ctx.cell_name, - ctx.cell_resolver.dupe(), + ctx.cell_name(), + ctx.cell_resolver().dupe(), ) .await } +async fn unpack_targets<'v>( + this: &StarlarkCQueryCtx<'v>, + dice: &mut DiceComputations<'_>, + targets: ConfiguredTargetListExprArg<'v>, +) -> anyhow::Result> { + filter_incompatible( + TargetListExpr::<'v, ConfiguredTargetNode>::unpack( + targets, + &this.global_cfg_options_override, + &this.ctx.data, + dice, + ) + .await? + .get(dice) + .await?, + &this.ctx.data, + ) +} + impl<'v> StarlarkCQueryCtx<'v> { pub(crate) fn new( - ctx: &'v BxlContext<'v>, - global_target_platform: Value<'v>, - default_target_platform: &Option, + ctx: ValueTyped<'v, BxlContext<'v>>, + global_target_platform: ValueAsStarlarkTargetLabel<'v>, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result> { let target_platform = global_target_platform.parse_target_platforms( - &ctx.data.target_alias_resolver, - &ctx.data.cell_resolver, - ctx.data.cell_name, - default_target_platform, + ctx.target_alias_resolver(), + ctx.cell_resolver(), + ctx.cell_alias_resolver(), + ctx.cell_name(), + &global_cfg_options.target_platform, )?; Ok(Self { ctx, - target_platform, + global_cfg_options_override: GlobalCfgOptions { + target_platform, + cli_modifiers: vec![].into(), + }, }) } } @@ -122,51 +142,33 @@ impl<'v> StarlarkCQueryCtx<'v> { /// The context for performing `cquery` operations in bxl. The functions offered on this ctx are /// the same behaviour as the query functions available within cquery command. /// -/// Query results are `[StarlarkTargetSet]`s of `[ConfiguredTargetNode]`s, which supports iteration, +/// Query results are `target_set`s of `target_node`s, which supports iteration, /// indexing, `len()`, set addition/subtraction, and `equals()`. #[starlark_module] fn cquery_methods(builder: &mut MethodsBuilder) { /// The `allpaths` query for computing all dependency paths. fn allpaths<'v>( this: &StarlarkCQueryCtx<'v>, - from: Value<'v>, - to: Value<'v>, - eval: &mut Evaluator<'v, '_>, + from: ConfiguredTargetListExprArg<'v>, + to: ConfiguredTargetListExprArg<'v>, + #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, ) -> anyhow::Result> { - this.ctx.via_dice(move |mut dice, ctx| { + this.ctx.via_dice(move |dice, ctx| { dice.via(|dice| { async move { - let from = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - from, - &this.target_platform, - ctx, - dice, - eval, - ) + let filter = filter + .into_option() + .try_map(buck2_query_parser::parse_expr)?; + let from = unpack_targets(this, dice, from).await?; + let to = unpack_targets(this, dice, to).await?; + get_cquery_env(ctx, &this.global_cfg_options_override) .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; - let to = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - to, - &this.target_platform, - ctx, + .allpaths( dice, - eval, + &from, + &to, + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; - get_cquery_env(ctx, this.target_platform.dupe()) - .await? - .allpaths(dice, &from, &to) .await .map(StarlarkTargetSet::from) } @@ -178,44 +180,27 @@ fn cquery_methods(builder: &mut MethodsBuilder) { // The somepath query. fn somepath<'v>( this: &StarlarkCQueryCtx<'v>, - from: Value<'v>, - to: Value<'v>, - eval: &mut Evaluator<'v, '_>, + from: ConfiguredTargetListExprArg<'v>, + to: ConfiguredTargetListExprArg<'v>, + #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, ctx| { dice.via(|dice| { async { - let from = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - from, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) + let filter = filter + .into_option() + .try_map(buck2_query_parser::parse_expr)?; + + let from = unpack_targets(this, dice, from).await?; + let to = unpack_targets(this, dice, to).await?; + get_cquery_env(ctx, &this.global_cfg_options_override) .await? - .into_iter(), - ctx, - )?; - let to = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - to, - &this.target_platform, - ctx, + .somepath( dice, - eval, + &from, + &to, + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; - get_cquery_env(ctx, this.target_platform.dupe()) - .await? - .somepath(dice, &from, &to) .await .map(StarlarkTargetSet::from) } @@ -229,28 +214,36 @@ fn cquery_methods(builder: &mut MethodsBuilder) { this: &StarlarkCQueryCtx<'v>, attr: &str, value: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { - filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) + unpack_targets(this, dice, targets) .await? - .get(dice) + .attrfilter(attr, &|v| Ok(v == value)) + .map(StarlarkTargetSet::from) + } + .boxed_local() + }) + }) + } + + /// The nattrfilter query for rule attribute filtering. + /// It is the opposite of `attrfilter`, i.e. it filters targets by attribute but excludes those that match. + fn nattrfilter<'v>( + this: &StarlarkCQueryCtx<'v>, + attr: &str, + value: &str, + targets: ConfiguredTargetListExprArg<'v>, + ) -> anyhow::Result> { + this.ctx.via_dice(|dice, _| { + dice.via(|dice| { + async { + unpack_targets(this, dice, targets) .await? - .into_iter(), - ctx, - )? - .attrfilter(attr, &|v| Ok(v == value)) - .map(StarlarkTargetSet::from) + .nattrfilter(attr, &|v| Ok(v == value)) + .map(StarlarkTargetSet::from) } .boxed_local() }) @@ -268,28 +261,15 @@ fn cquery_methods(builder: &mut MethodsBuilder) { fn kind<'v>( this: &StarlarkCQueryCtx<'v>, regex: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { - filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) + unpack_targets(this, dice, targets) .await? - .get(dice) - .await? - .into_iter(), - ctx, - )? - .kind(regex) - .map(StarlarkTargetSet::from) + .kind(regex) + .map(StarlarkTargetSet::from) } .boxed_local() }) @@ -308,28 +288,15 @@ fn cquery_methods(builder: &mut MethodsBuilder) { this: &StarlarkCQueryCtx<'v>, attribute: &str, value: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { - filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) + unpack_targets(this, dice, targets) .await? - .into_iter(), - ctx, - )? - .attrregexfilter(attribute, value) - .map(StarlarkTargetSet::from) + .attrregexfilter(attribute, value) + .map(StarlarkTargetSet::from) } .boxed_local() }) @@ -350,32 +317,18 @@ fn cquery_methods(builder: &mut MethodsBuilder) { fn owner<'v>( this: &StarlarkCQueryCtx<'v>, files: FileSetExpr, - #[starlark(default = NoneOr::None)] universe: NoneOr>, - eval: &mut Evaluator<'v, '_>, + #[starlark(default = NoneOr::None)] universe: NoneOr>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { let universe = match universe.into_option() { - Some(universe) => Some(filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - universe, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?), + Some(universe) => Some(unpack_targets(this, dice, universe).await?), None => None, }; - get_cquery_env(ctx, this.target_platform.dupe()) + get_cquery_env(ctx, &this.global_cfg_options_override) .await? .owner(dice, files.get(ctx).await?.as_ref(), universe.as_ref()) .await @@ -396,44 +349,27 @@ fn cquery_methods(builder: &mut MethodsBuilder) { /// ``` fn deps<'v>( this: &StarlarkCQueryCtx<'v>, - universe: Value<'v>, + universe: ConfiguredTargetListExprArg<'v>, #[starlark(default = NoneOr::None)] depth: NoneOr, #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, - eval: &mut Evaluator<'v, '_>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { let filter = filter .into_option() .try_map(buck2_query_parser::parse_expr)?; - let targets = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - universe, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; + let targets = unpack_targets(this, dice, universe).await?; - get_cquery_env(ctx, this.target_platform.dupe()) + get_cquery_env(ctx, &this.global_cfg_options_override) .await? .deps( dice, &targets, depth.into_option(), - filter - .as_ref() - .map(|span| CapturedExpr { expr: span }) - .as_ref(), + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), ) .await } @@ -454,28 +390,15 @@ fn cquery_methods(builder: &mut MethodsBuilder) { fn filter<'v>( this: &StarlarkCQueryCtx<'v>, regex: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, _| { dice.via(|dice| { async { - filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) + unpack_targets(this, dice, targets) .await? - .get(dice) - .await? - .into_iter(), - ctx, - )? - .filter_name(regex) + .filter_name(regex) } .boxed_local() }) @@ -493,30 +416,12 @@ fn cquery_methods(builder: &mut MethodsBuilder) { /// ``` fn inputs<'v>( this: &StarlarkCQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, _| { dice.via(|dice| { - async { - filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )? - .inputs() - } - .boxed_local() + async { unpack_targets(this, dice, targets).await?.inputs() }.boxed_local() }) }) .map(StarlarkFileSet::from) @@ -525,28 +430,14 @@ fn cquery_methods(builder: &mut MethodsBuilder) { /// The testsof query for listing the tests of the specified targets. fn testsof<'v>( this: &StarlarkCQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let targets = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; - get_cquery_env(ctx, this.target_platform.dupe()) + let targets = unpack_targets(this, dice, targets).await?; + get_cquery_env(ctx, &this.global_cfg_options_override) .await? .testsof(dice, &targets) .await @@ -561,33 +452,20 @@ fn cquery_methods(builder: &mut MethodsBuilder) { /// resolution under the hood for the tests found. fn testsof_with_default_target_platform<'v>( this: &StarlarkCQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let targets = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack_allow_unconfigured( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; - let maybe_compatibles = get_cquery_env(ctx, this.target_platform.dupe()) - .await? - .testsof_with_default_target_platform(dice, &targets) - .await?; + let targets = unpack_targets(this, dice, targets).await?; + let maybe_compatibles = + get_cquery_env(ctx, &this.global_cfg_options_override) + .await? + .testsof_with_default_target_platform(dice, &targets) + .await?; - filter_incompatible(maybe_compatibles.into_iter(), ctx) + filter_incompatible(maybe_compatibles, ctx) } .boxed_local() }) @@ -605,46 +483,29 @@ fn cquery_methods(builder: &mut MethodsBuilder) { /// ``` fn rdeps<'v>( this: &StarlarkCQueryCtx<'v>, - universe: Value<'v>, - from: Value<'v>, + universe: ConfiguredTargetListExprArg<'v>, + from: ConfiguredTargetListExprArg<'v>, depth: Option, - eval: &mut Evaluator<'v, '_>, + #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let universe = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - universe, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) + let filter = filter + .into_option() + .try_map(buck2_query_parser::parse_expr)?; + let universe = unpack_targets(this, dice, universe).await?; + let targets = unpack_targets(this, dice, from).await?; + get_cquery_env(ctx, &this.global_cfg_options_override) .await? - .into_iter(), - ctx, - )?; - let targets = filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - from, - &this.target_platform, - ctx, + .rdeps( dice, - eval, + &universe, + &targets, + depth, + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; - get_cquery_env(ctx, this.target_platform.dupe()) - .await? - .rdeps(dice, &universe, &targets, depth) .await } .boxed_local() @@ -654,7 +515,8 @@ fn cquery_methods(builder: &mut MethodsBuilder) { } /// Evaluates some general query string. `query_args` can be a target_set of unconfigured nodes, or - /// a list of strings. + /// a list of strings. Returns a `dict` of target labels mapped to their `target_set` results if `query_args` + /// was passed in, otherwise returns a single `target_set`. /// /// Sample usage: /// ```text @@ -668,35 +530,16 @@ fn cquery_methods(builder: &mut MethodsBuilder) { fn eval<'v>( this: &StarlarkCQueryCtx<'v>, query: &'v str, - #[starlark(default = NoneOr::None)] query_args: NoneOr>, - #[starlark(default = NoneOr::None)] target_universe: NoneOr>, - eval: &mut Evaluator<'v, '_>, + #[starlark(default = NoneOr::None)] query_args: NoneOr>, + #[starlark(default = NoneOr::None)] target_universe: NoneOr>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { - let query_args = if query_args.is_none() { - Vec::new() - } else { - let unwrapped_query_args = query_args.into_option().unwrap(); - if let Some(query_args) = unpack_unconfigured_query_args(unwrapped_query_args)? { - query_args - } else { - let err = Err(ValueError::IncorrectParameterTypeWithExpected( - "list of strings, or a target_set of unconfigured nodes".to_owned(), - query_args.into_option().unwrap().get_type().to_owned(), - ) - .into()); - - if <&StarlarkTargetSet>::unpack_value(unwrapped_query_args) - .is_some() - { - return err - .context("target_set with configured nodes are currently not supported"); - } - - return err; - } + let query_args = match query_args { + NoneOr::None => Vec::new(), + NoneOr::Other(query_args) => query_args.into_strings(), }; - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, ctx| { dice.via(|dice| { async { parse_query_evaluation_result( @@ -705,14 +548,15 @@ fn cquery_methods(builder: &mut MethodsBuilder) { .eval_cquery( dice, &ctx.working_dir()?, - CqueryOwnerBehavior::Correct, query, &query_args, - this.target_platform.dupe(), - target_universe.into_option().as_ref().map(|v| &v[..]), + this.global_cfg_options_override.clone(), + target_universe.into_option().as_ref().map(|v| &v.items[..]), + false, ) - .await?, - eval, + .await? + .0, + eval.heap(), ) } .boxed_local() @@ -731,28 +575,13 @@ fn cquery_methods(builder: &mut MethodsBuilder) { /// ``` fn buildfile<'v>( this: &StarlarkCQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: ConfiguredTargetListExprArg<'v>, ) -> anyhow::Result { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, _| { dice.via(|dice| { async { - let targets = &filter_incompatible( - TargetExpr::<'v, ConfiguredTargetNode>::unpack( - targets, - &this.target_platform, - ctx, - dice, - eval, - ) - .await? - .get(dice) - .await? - .into_iter(), - ctx, - )?; - + let targets = unpack_targets(this, dice, targets).await?; Ok(targets.buildfile()) } .boxed_local() diff --git a/app/buck2_bxl/src/bxl/starlark_defs/eval_extra.rs b/app/buck2_bxl/src/bxl/starlark_defs/eval_extra.rs new file mode 100644 index 0000000000000..47ac62a2d8c71 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/eval_extra.rs @@ -0,0 +1,93 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::RefCell; +use std::io::Write; +use std::rc::Rc; + +use buck2_events::dispatch::console_message; +use starlark::eval::Evaluator; +use starlark::values::ProvidesStaticType; + +use crate::bxl::starlark_defs::context::starlark_async::BxlDiceComputations; +use crate::bxl::starlark_defs::context::BxlContextCoreData; +use crate::bxl::starlark_defs::context::ErrorPrinter; + +enum BxlEvalExtraType { + Root { error_sink: Rc> }, + Dynamic, +} + +/// A tag that is only available when running in Bxl, to guard Bxl +/// functions from a non-Bxl context. +#[derive(ProvidesStaticType)] +pub(crate) struct BxlEvalExtra<'e> { + pub(crate) dice: Rc>, + core: Rc, + eval_extra_type: BxlEvalExtraType, +} + +#[derive(Debug, buck2_error::Error)] +pub(crate) enum BxlContextError { + #[error("This function can only be called from Bxl")] + UnavailableOutsideBxl, +} + +impl<'e> BxlEvalExtra<'e> { + pub(crate) fn new( + dice: Rc>, + core: Rc, + error_sink: Rc>, + ) -> Self { + Self { + dice, + core, + eval_extra_type: BxlEvalExtraType::Root { error_sink }, + } + } + + pub(crate) fn new_dynamic( + dice: Rc>, + core: Rc, + ) -> Self { + Self { + dice, + core, + eval_extra_type: BxlEvalExtraType::Dynamic, + } + } + + pub(crate) fn from_context<'v, 'a>( + eval: &Evaluator<'v, 'a, 'e>, + ) -> anyhow::Result<&'a BxlEvalExtra<'e>> { + let f = || eval.extra?.downcast_ref::(); + f().ok_or_else(|| BxlContextError::UnavailableOutsideBxl.into()) + } + + pub(crate) fn via_dice<'a, T>( + &'a self, + f: impl for<'x> FnOnce( + &'x mut dyn BxlDiceComputations, + &'a BxlContextCoreData, + ) -> anyhow::Result, + ) -> anyhow::Result { + let core = &self.core; + f(&mut *self.dice.borrow_mut(), core) + } +} + +impl<'e> ErrorPrinter for BxlEvalExtra<'e> { + fn print_to_error_stream(&self, msg: String) -> anyhow::Result<()> { + match &self.eval_extra_type { + BxlEvalExtraType::Root { error_sink } => writeln!(error_sink.borrow_mut(), "{}", msg)?, + BxlEvalExtraType::Dynamic => console_message(msg), + } + Ok(()) + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/event.rs b/app/buck2_bxl/src/bxl/starlark_defs/event.rs index 7ec6ae548b49c..98172c11d437b 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/event.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/event.rs @@ -8,7 +8,6 @@ */ //! Parse some inputs to a `[`StarlarkUserEvent`]. -//! use std::collections::HashMap; @@ -24,15 +23,16 @@ use buck2_data::StarlarkUserMetadataDictValue; use buck2_data::StarlarkUserMetadataListValue; use buck2_data::StarlarkUserMetadataValue; use starlark::values::dict::DictRef; +use starlark::values::float::UnpackFloat; use starlark::values::list::ListRef; use starlark::values::UnpackValue; use starlark::values::Value; -use thiserror::Error; +use starlark::StarlarkResultExt; use super::artifacts::EnsuredArtifact; use super::context::output::get_artifact_path_display; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum StarlarkUserEventUnpack { #[error( "Metadata should be a dict where keys are strings, and values are strings, ints, bools, or dicts/lists of the mentioned types. Got type: `{0}`" @@ -109,11 +109,11 @@ impl<'v> StarlarkUserEventParser<'v> { value: Some(IntValue(v)), }) // Let's also accept floats since `instant()` methods return floats, but cast them to ints - } else if let Some(v) = f64::unpack_value(v) { + } else if let Some(v) = UnpackFloat::unpack_value(v).into_anyhow_result()? { Ok(StarlarkUserMetadataValue { - value: Some(IntValue(v as i32)), + value: Some(IntValue(v.0 as i32)), }) - } else if let Some(v) = <&EnsuredArtifact>::unpack_value(v) { + } else if let Some(v) = <&EnsuredArtifact>::unpack_value(v).into_anyhow_result()? { let path = get_artifact_path_display( v.get_artifact_path(), v.abs(), diff --git a/app/buck2_bxl/src/bxl/starlark_defs/file_expr.rs b/app/buck2_bxl/src/bxl/starlark_defs/file_expr.rs index 42ed682bd0efe..153d510d7e173 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/file_expr.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/file_expr.rs @@ -8,11 +8,12 @@ */ use std::borrow::Cow; +use std::convert::Infallible; use std::path::Path; use buck2_artifact::artifact::source_artifact::SourceArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLike; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; use buck2_common::dice::cells::HasCellResolver; use buck2_common::dice::data::HasIoProvider; use buck2_core::cells::cell_path::CellPath; @@ -21,7 +22,7 @@ use buck2_core::cells::paths::CellRelativePath; use buck2_core::cells::CellAliasResolver; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::pattern::maybe_split_cell_alias_and_relative_path; +use buck2_core::pattern::pattern::maybe_split_cell_alias_and_relative_path; use derive_more::Display; use dice::DiceComputations; use dupe::Dupe; @@ -38,17 +39,27 @@ pub(crate) struct SourceArtifactUnpack { } impl StarlarkTypeRepr for SourceArtifactUnpack { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { StarlarkArtifact::starlark_type_repr() } } impl<'v> UnpackValue<'v> for SourceArtifactUnpack { - fn unpack_value(value: Value<'v>) -> Option { - let v = ValueAsArtifactLike::unpack_value(value)?; - Some(SourceArtifactUnpack { - artifact: v.0.get_bound_artifact().ok()?.get_source()?, - }) + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(v) = ValueAsArtifactLike::unpack_value_opt(value) else { + return Ok(None); + }; + let Some(bound_artifact) = v.0.get_bound_artifact().ok() else { + return Ok(None); + }; + let Some(artifact) = bound_artifact.get_source() else { + return Ok(None); + }; + Ok(Some(SourceArtifactUnpack { artifact })) } } @@ -82,15 +93,14 @@ fn parse_cell_path_as_file_expr_literal( impl<'a> FileExpr<'a> { pub(crate) async fn get( self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, cell_instance: &CellInstance, ) -> anyhow::Result { match self { FileExpr::Literal(val) => { - match parse_cell_path_as_file_expr_literal( - val, - cell_instance.cell_alias_resolver(), - )? { + let cell_alias_resolver = + dice.get_cell_alias_resolver(cell_instance.name()).await?; + match parse_cell_path_as_file_expr_literal(val, &cell_alias_resolver)? { Some(cell_path) => Ok(cell_path), None => { let fs = dice.global_data().get_io_provider().project_root().dupe(); @@ -115,7 +125,6 @@ mod tests { use buck2_core::cells::alias::NonEmptyCellAlias; use buck2_core::cells::name::CellName; - use buck2_core::cells::CellAliasResolver; use maplit::hashmap; use super::*; diff --git a/app/buck2_bxl/src/bxl/starlark_defs/file_set.rs b/app/buck2_bxl/src/bxl/starlark_defs/file_set.rs index 32bfcceb19e33..ed0f9117292fe 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/file_set.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/file_set.rs @@ -22,7 +22,12 @@ use display_container::fmt_container; use gazebo::prelude::VecExt; use indexmap::IndexSet; use starlark::any::ProvidesStaticType; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::starlark_module; use starlark::starlark_simple_value; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::starlark_value; use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Heap; @@ -31,7 +36,7 @@ use starlark::values::StarlarkValue; use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueError; -use starlark::StarlarkDocs; +use starlark::values::ValueLike; use crate::bxl::starlark_defs::context::BxlContextNoDice; @@ -42,7 +47,7 @@ use crate::bxl::starlark_defs::context::BxlContextNoDice; #[derive(StarlarkTypeRepr, UnpackValue)] pub(crate) enum FileSetExpr<'v> { Literal(&'v str), - Literals(Vec<&'v str>), + Literals(UnpackListOrTuple<&'v str>), FileSet(&'v StarlarkFileSet), } @@ -54,7 +59,7 @@ impl<'a> FileSetExpr<'a> { )])), FileSetExpr::Literals(val) => { let mut file_set = FileSet::new(IndexSet::new()); - for arg in val.iter() { + for arg in &val { file_set.insert(FileNode(bxl.parse_query_file_literal(arg)?)); } Cow::Owned(file_set) @@ -65,9 +70,8 @@ impl<'a> FileSetExpr<'a> { } } -#[derive(Debug, Display, ProvidesStaticType, Allocative, StarlarkDocs)] +#[derive(Debug, Display, ProvidesStaticType, Allocative)] #[derive(NoSerialize)] // TODO maybe this should be -#[starlark_docs(directory = "bxl")] pub(crate) struct StarlarkFileSet( /// Set of files or directories. pub(crate) FileSet, @@ -77,7 +81,7 @@ starlark_simple_value!(StarlarkFileSet); #[starlark_value(type = "file_set")] impl<'v> StarlarkValue<'v> for StarlarkFileSet { - fn iterate_collect(&self, heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, heap: &'v Heap) -> starlark::Result>> { Ok(self .0 .iter() @@ -85,24 +89,60 @@ impl<'v> StarlarkValue<'v> for StarlarkFileSet { .collect()) } - fn at(&self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, heap: &'v Heap) -> starlark::Result> { let i = i32::unpack_value_err(index)?; if let Ok(i) = usize::try_from(i) { if let Some(cell_path) = self.0.get_index(i) { return Ok(heap.alloc(StarlarkFileNode(cell_path.clone()))); } } - Err(anyhow::anyhow!(ValueError::IndexOutOfBound(i))) + Err(ValueError::IndexOutOfBound(i).into()) } - fn length(&self) -> anyhow::Result { - match i32::try_from(self.0.len()) { - Ok(l) => Ok(l), - Err(e) => Err(e.into()), + fn length(&self) -> starlark::Result { + i32::try_from(self.0.len()).map_err(starlark::Error::new_other) + } + + fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + let other = other.downcast_ref::()?; + let union = self.0.union(&other.0); + Some(Ok(heap.alloc(Self(union)))) + } + + fn sub(&self, other: Value<'v>, heap: &'v Heap) -> starlark::Result> { + let Some(other) = other.downcast_ref::() else { + return ValueError::unsupported_with(self, "-", other); + }; + let difference = self.0.difference(&other.0)?; + Ok(heap.alloc(Self(difference))) + } + + fn equals(&self, other: Value<'v>) -> starlark::Result { + match other.downcast_ref::() { + Some(other) => Ok(self.0 == other.0), + None => Ok(false), } } + + fn bit_and(&self, other: Value<'v>, heap: &'v Heap) -> starlark::Result> { + let Some(other) = other.downcast_ref::() else { + return ValueError::unsupported_with(self, "&", other); + }; + let intersect = self.0.intersect(&other.0)?; + Ok(heap.alloc(Self(intersect))) + } + + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(register_file_set) + } } +/// A set of `file_node`s. Supports the operations such as set addition/subtraction, length, +/// iteration, equality and indexing. +#[starlark_module] +pub(crate) fn register_file_set(globals: &mut MethodsBuilder) {} + impl From for StarlarkFileSet { fn from(v: FileSet) -> Self { Self(v) @@ -117,22 +157,38 @@ impl Deref for StarlarkFileSet { } } -#[derive(Debug, Display, ProvidesStaticType, Clone, Allocative, StarlarkDocs)] +#[derive(Debug, Display, ProvidesStaticType, Clone, Allocative)] #[derive(NoSerialize)] -#[starlark_docs(directory = "bxl")] -pub(crate) struct StarlarkFileNode( - /// Cell path to the file or directory. - pub(crate) CellPath, -); +pub(crate) struct StarlarkFileNode(pub(crate) CellPath); starlark_simple_value!(StarlarkFileNode); -#[starlark_value(type = "file_node")] -impl<'v> StarlarkValue<'v> for StarlarkFileNode {} +#[starlark_value(type = "bxl.FileNode")] +impl<'v> StarlarkValue<'v> for StarlarkFileNode { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(file_node_methods) + } +} + +/// Wrapper around the cell relative path to the file or directory. +#[starlark_module] +pub(crate) fn file_node_methods(methods: &mut MethodsBuilder) { + /// The cell relative path as a string. + #[starlark(attribute)] + fn path<'v>(this: &StarlarkFileNode) -> anyhow::Result<&'v str> { + Ok(this.0.path().as_str()) + } + + /// The cell name for the file_node. + #[starlark(attribute)] + fn cell<'v>(this: &StarlarkFileNode) -> anyhow::Result<&'v str> { + Ok(this.0.cell().as_str()) + } +} -#[derive(Debug, ProvidesStaticType, Clone, Allocative, StarlarkDocs)] +#[derive(Debug, ProvidesStaticType, Clone, Allocative)] #[derive(NoSerialize)] -#[starlark_docs(directory = "bxl")] pub(crate) struct StarlarkReadDirSet { /// Cell path to the directory/files. pub(crate) cell_path: CellPath, @@ -171,7 +227,7 @@ impl fmt::Display for StarlarkReadDirSet { #[starlark_value(type = "read_dir_set")] impl<'v> StarlarkValue<'v> for StarlarkReadDirSet { - fn iterate_collect(&self, heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, heap: &'v Heap) -> starlark::Result>> { Ok(self .children()? .into_map(|cell_path| heap.alloc(StarlarkFileNode(cell_path)))) diff --git a/app/buck2_bxl/src/bxl/starlark_defs/functions.rs b/app/buck2_bxl/src/bxl/starlark_defs/functions.rs index 02e5a5a39fd34..98ed2c40c1e87 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/functions.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/functions.rs @@ -9,65 +9,109 @@ use std::time::Instant; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact_like::ValueAsArtifactLikeUnpack; use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::unconfigured::TargetNode; -use buck2_query::query::syntax::simple::eval::set::TargetSet; +use buck2_query::query::syntax::simple::eval::file_set::FileSet; use dupe::Dupe; use futures::FutureExt; +use indexmap::IndexSet; use starlark::environment::GlobalsBuilder; +use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::list::UnpackList; use starlark::values::none::NoneType; +use starlark::values::tuple::UnpackTuple; use starlark::values::Heap; use starlark::values::StringValue; use starlark::values::Value; use starlark::values::ValueLike; -use thiserror::Error; use super::artifacts::visit_artifact_path_without_associated_deduped; use super::context::output::get_artifact_path_display; use super::context::output::get_cmd_line_inputs; +use super::nodes::unconfigured::StarlarkTargetNode; use crate::bxl::starlark_defs::context::BxlContext; +use crate::bxl::starlark_defs::eval_extra::BxlEvalExtra; +use crate::bxl::starlark_defs::file_set::StarlarkFileSet; +use crate::bxl::starlark_defs::nodes::configured::StarlarkConfiguredTargetNode; use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; use crate::bxl::starlark_defs::time::StarlarkInstant; /// Global methods on the target set. #[starlark_module] pub(crate) fn register_target_function(builder: &mut GlobalsBuilder) { - /// Creates an empty target set for configured nodes. + /// Creates a target set from a list of configured nodes. /// /// Sample usage: /// ```text /// def _impl_ctarget_set(ctx): - /// targets = ctarget_set() + /// targets = bxl.ctarget_set([cnode_a, cnode_b]) /// ctx.output.print(type(targets)) /// ctx.output.print(len(targets)) /// ``` - fn ctarget_set() -> anyhow::Result> { - Ok(StarlarkTargetSet::from(TargetSet::new())) + fn ctarget_set( + nodes: Option>, + ) -> anyhow::Result> { + Ok(StarlarkTargetSet::from_iter( + nodes + .unwrap_or(UnpackList::default()) + .items + .into_iter() + .map(|node| node.0), + )) } - /// Creates an empty target set for unconfigured nodes. + /// Creates a target set from a list of unconfigured nodes. /// /// Sample usage: /// ```text /// def _impl_utarget_set(ctx): - /// targets = utarget_set() + /// targets = bxl.utarget_set([unode_a, unode_b]) /// ctx.output.print(type(targets)) /// ctx.output.print(len(targets)) /// ``` - fn utarget_set() -> anyhow::Result> { - Ok(StarlarkTargetSet::from(TargetSet::new())) + fn utarget_set( + nodes: Option>, + ) -> anyhow::Result> { + Ok(StarlarkTargetSet::from_iter( + nodes + .unwrap_or(UnpackList::default()) + .items + .into_iter() + .map(|node| node.0), + )) } } +/// Global methods on the file set. +#[starlark_module] +pub(crate) fn register_file_set_function(builder: &mut GlobalsBuilder) { + /// Creates an empty file set for configured nodes. + /// + /// Sample usage: + /// ```text + /// def _impl_file_set(ctx): + /// files = file_set() + /// ctx.output.print(type(files)) + /// ctx.output.print(len(files)) + /// ``` + fn file_set() -> anyhow::Result { + Ok(StarlarkFileSet(FileSet::new(IndexSet::new()))) + } +} + +#[derive(Debug, buck2_error::Error, Clone)] +#[error("Promise artifacts are not supported in `get_path_without_materialization()`")] +pub(crate) struct PromiseArtifactsNotSupported; + /// Global methods on artifacts. #[starlark_module] pub(crate) fn register_artifact_function(builder: &mut GlobalsBuilder) { - /// The output path of a source or build artifact. Takes an optional boolean to print the absolute or relative path. - /// Note that this method returns an artifact path without asking for the artifact to be materialized, - /// (i.e. it may not actually exist on the disk yet). + /// The output path of an artifact-like (source, build, declared). Takes an optional boolean to print the + /// absolute or relative path. Note that this method returns an artifact path without asking for the artifact + /// to be materialized (i.e. it may not actually exist on the disk yet). /// /// This is a risky function to call because you may accidentally pass this path to further BXL actions /// that expect the artifact to be materialized. If this happens, the BXL script will error out. @@ -83,17 +127,29 @@ pub(crate) fn register_artifact_function(builder: &mut GlobalsBuilder) { /// ctx.output.print(source_artifact_project_rel_path) # Note this artifact is NOT ensured or materialized /// ``` fn get_path_without_materialization<'v>( - this: &'v StarlarkArtifact, - ctx: &'v BxlContext<'v>, + #[starlark(require=pos)] this: ValueAsArtifactLikeUnpack<'v>, + #[starlark(require=pos)] ctx: &'v BxlContext<'v>, #[starlark(require = named, default = false)] abs: bool, heap: &'v Heap, ) -> anyhow::Result> { - let path = get_artifact_path_display( - this.artifact().get_path(), - abs, - &ctx.data.project_fs, - &ctx.data.artifact_fs, - )?; + let path = match this { + ValueAsArtifactLikeUnpack::Artifact(a) => { + let artifact = a.artifact(); + get_artifact_path_display( + artifact.get_path(), + abs, + ctx.project_fs(), + ctx.artifact_fs(), + )? + } + ValueAsArtifactLikeUnpack::DeclaredArtifact(a) => get_artifact_path_display( + a.get_artifact_path(), + abs, + ctx.project_fs(), + ctx.artifact_fs(), + )?, + _ => return Err(PromiseArtifactsNotSupported.into()), + }; Ok(heap.alloc_str(&path)) } @@ -117,47 +173,42 @@ pub(crate) fn register_artifact_function(builder: &mut GlobalsBuilder) { /// ctx.output.print(path) /// ``` fn get_paths_without_materialization<'v>( - this: Value<'v>, - ctx: &'v BxlContext<'v>, + #[starlark(require=pos)] cmd_line: ValueAsCommandLineLike<'v>, + #[starlark(require=pos)] ctx: &'v BxlContext<'v>, #[starlark(require = named, default = false)] abs: bool, heap: &'v Heap, ) -> anyhow::Result> { - match this.as_command_line() { - Some(cmd_line) => { - let inputs = get_cmd_line_inputs(cmd_line)?; - let mut result = Vec::new(); + let inputs = get_cmd_line_inputs(cmd_line.0)?; + let mut result = Vec::new(); - for artifact_group in &inputs.inputs { - result.push(artifact_group.dupe()); - } + for artifact_group in &inputs.inputs { + result.push(artifact_group.dupe()); + } - let mut paths = Vec::new(); + let mut paths = Vec::new(); - let _result = ctx.via_dice(|mut dice_ctx, bxl_ctx| { - dice_ctx.via(|dice_ctx| { - visit_artifact_path_without_associated_deduped( - &result, + ctx.via_dice(|dice_ctx, bxl_ctx| { + dice_ctx.via(|dice_ctx| { + visit_artifact_path_without_associated_deduped( + &result, + abs, + |artifact_path, abs| { + let path = get_artifact_path_display( + artifact_path, abs, - |artifact_path, abs| { - let path = get_artifact_path_display( - artifact_path, - abs, - &bxl_ctx.project_fs, - &bxl_ctx.artifact_fs, - )?; - - paths.push(path); - Ok(()) - }, - dice_ctx, - ) - .boxed_local() - }) - }); - Ok(heap.alloc(paths)) - } - None => Err(anyhow::anyhow!("Expected a cmd_args()")), - } + bxl_ctx.project_fs(), + bxl_ctx.artifact_fs(), + )?; + + paths.push(path); + Ok(()) + }, + dice_ctx, + ) + .boxed_local() + }) + })?; + Ok(heap.alloc(paths)) } } @@ -177,7 +228,10 @@ pub(crate) fn register_instant_function(builder: &mut GlobalsBuilder) { /// ctx.output.print(time_a) /// ctx.output.print(time_b) /// ``` - fn now() -> anyhow::Result { + /// + /// This function is only accessible through Bxl. + fn now(eval: &mut Evaluator) -> anyhow::Result { + BxlEvalExtra::from_context(eval)?; Ok(StarlarkInstant(Instant::now())) } } @@ -185,14 +239,14 @@ pub(crate) fn register_instant_function(builder: &mut GlobalsBuilder) { /// This is used to mark the error returned by `fail_no_stacktrace()` (via context chaining). /// We check if this marker is present after finishing BXL evaluation. If this marker is present, /// then we hide the stacktrace. Otherwise, we emit the stacktrace to users. -#[derive(Debug, Error, Clone)] +#[derive(Debug, buck2_error::Error, Clone)] #[error("fail:{0}")] pub(crate) struct BxlErrorWithoutStacktrace(String); /// Global method for error handling. #[starlark_module] pub(crate) fn register_error_handling_function(builder: &mut GlobalsBuilder) { - fn fail_no_stacktrace(#[starlark(args)] args: Vec) -> anyhow::Result { + fn fail_no_stacktrace(#[starlark(args)] args: UnpackTuple) -> anyhow::Result { let mut s = String::new(); for x in args { s.push(' '); diff --git a/app/buck2_bxl/src/bxl/starlark_defs/globals.rs b/app/buck2_bxl/src/bxl/starlark_defs/globals.rs index 65ef8f4646c03..d2c1c5e6c39c4 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/globals.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/globals.rs @@ -9,38 +9,41 @@ //! Starlark globals for BXL. -use buck2_interpreter::bxl::BXL_SPECIFIC_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_BXL_GLOBALS; use starlark::environment::GlobalsBuilder; -use crate::bxl::starlark_defs::bxl_function::register_bxl_function; +use crate::bxl::starlark_defs::bxl_function::register_bxl_main_function; +use crate::bxl::starlark_defs::bxl_function::register_bxl_prefixed_main_function; use crate::bxl::starlark_defs::cli_args; use crate::bxl::starlark_defs::functions::register_artifact_function; use crate::bxl::starlark_defs::functions::register_error_handling_function; +use crate::bxl::starlark_defs::functions::register_file_set_function; use crate::bxl::starlark_defs::functions::register_instant_function; use crate::bxl::starlark_defs::functions::register_target_function; -use crate::bxl::starlark_defs::type_names::register_bxl_type_names; use crate::bxl::starlark_defs::type_names::register_bxl_type_names_in_bxl_namespace; fn bxl_namespace(g: &mut GlobalsBuilder) { - g.struct_("cli_args", cli_args::register_cli_args_module); + register_bxl_main_function(g); + g.namespace("cli_args", cli_args::register_cli_args_module); // TODO(nga): add `main` function here. register_artifact_function(g); register_target_function(g); + register_file_set_function(g); register_instant_function(g); register_error_handling_function(g); register_bxl_type_names_in_bxl_namespace(g); } pub(crate) fn init_bxl_specific_globals() { - BXL_SPECIFIC_GLOBALS.init(|g| { - g.struct_("bxl", bxl_namespace); + REGISTER_BUCK2_BXL_GLOBALS.init(|g| { + g.namespace("bxl", bxl_namespace); // TODO(nga): move these into `bxl` namespace. - g.struct_("cli_args", cli_args::register_cli_args_module); - register_bxl_function(g); + g.namespace("cli_args", cli_args::register_cli_args_module); + register_bxl_prefixed_main_function(g); register_artifact_function(g); register_target_function(g); + register_file_set_function(g); register_instant_function(g); register_error_handling_function(g); - register_bxl_type_names(g); }); } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/lazy_ctx.rs b/app/buck2_bxl/src/bxl/starlark_defs/lazy_ctx.rs new file mode 100644 index 0000000000000..e5a1b53cc6f78 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/lazy_ctx.rs @@ -0,0 +1,213 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use derivative::Derivative; +use derive_more::Display; +use dupe::Dupe; +use operation::StarlarkLazy; +use starlark::any::ProvidesStaticType; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::starlark_module; +use starlark::values::list::UnpackList; +use starlark::values::starlark_value; +use starlark::values::AllocValue; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Trace; +use starlark::values::Value; +use starlark::values::ValueTyped; + +use crate::bxl::starlark_defs::context::BxlContext; +use crate::bxl::starlark_defs::providers_expr::ConfiguredProvidersLabelArg; +use crate::bxl::starlark_defs::target_list_expr::ConfiguredTargetNodeArg; +use crate::bxl::starlark_defs::target_list_expr::OwnedConfiguredTargetNodeArg; +use crate::bxl::starlark_defs::target_list_expr::OwnedTargetNodeArg; +use crate::bxl::starlark_defs::target_list_expr::TargetNodeOrTargetLabelOrStr; +use crate::bxl::value_as_starlark_target_label::ValueAsStarlarkTargetLabel; + +pub(crate) mod operation; + +/// Context for lazy/batch/error handling operations. +/// Available as `ctx.lazy`, has type `bxl.LazyContext`. +#[derive( + ProvidesStaticType, + Derivative, + Display, + Trace, + NoSerialize, + Allocative +)] +#[derivative(Debug)] +#[display("bxl.LazyContext")] +#[allocative(skip)] +pub(crate) struct StarlarkLazyCtx<'v> { + #[derivative(Debug = "ignore")] + ctx: ValueTyped<'v, BxlContext<'v>>, +} + +impl<'v> AllocValue<'v> for StarlarkLazyCtx<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex_no_freeze(self) + } +} + +impl<'v> StarlarkLazyCtx<'v> { + pub(crate) fn new(ctx: ValueTyped<'v, BxlContext<'v>>) -> Self { + Self { ctx } + } +} + +#[starlark_value(type = "bxl.LazyContext", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for StarlarkLazyCtx<'v> { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(lazy_ctx_methods) + } +} + +#[starlark_module] +fn lazy_ctx_methods(builder: &mut MethodsBuilder) { + /// Join two lazy operations into a single operation that can be evaluated. + /// + /// Example: + /// ```text + /// def _impl(ctx): + /// ... + /// joined = ctx.lazy.join(ctx.lazy.analysis(t1), ctx.lazy.analysis(t2)) + /// (res1, res2) = joined.resolve() + /// ctx.output.print(res1) + /// ctx.output.print(res2) + /// ``` + fn join<'v>( + #[starlark(this)] _this: &'v StarlarkLazyCtx, + #[starlark(require = pos)] lazy0: &'v StarlarkLazy, + #[starlark(require = pos)] lazy1: &'v StarlarkLazy, + ) -> anyhow::Result { + Ok(StarlarkLazy::new_join(lazy0.dupe(), lazy1.dupe())) + } + + /// Join a list of lazy operations into a single operation that can be evaluated. + /// This is useful when you want to evaluate multiple operations in parallel. + /// Using `.try_resolve()` can catch errors for the individual operations. + /// + /// Example: + /// ```text + /// def _impl(ctx): + /// ... + /// joined = ctx.lazy.join_all([ctx.lazy.analysis(t) for t in targets]) + /// analysis_results = joined.resolve() + /// ctx.output.print(analysis_results) + /// ``` + fn join_all<'v>( + #[starlark(this)] _this: &'v StarlarkLazyCtx, + #[starlark(require = pos)] operations: UnpackList<&StarlarkLazy>, + ) -> anyhow::Result { + Ok(StarlarkLazy::new_batch( + operations.into_iter().map(|o| o.dupe()), + )) + } + + /// Analyze a target lazily. This will return a lazy operation that can be evaluated later. + /// The target should be a ConfiguredTargetLabel, a ConfiguredProvidersLabel, or a ConfiguredTargetNode. + /// + /// Example: + /// ```text + /// def _impl(ctx): + /// target = ctx.configured_targets("cell//path/to:target") + /// analysis_result = ctx.lazy.analysis(target).resolve() + /// (analysis_result, err) = ctx.lazy.analysis(target).try_resolve() + /// ``` + fn analysis<'v>( + #[starlark(this)] _this: &'v StarlarkLazyCtx, + #[starlark(require = pos)] label: ConfiguredProvidersLabelArg<'v>, + ) -> anyhow::Result { + let configured_providers_label = label.configured_providers_label(); + Ok(StarlarkLazy::new_analysis(configured_providers_label)) + } + + /// Gets the configured target node for the `expr`. + /// If given a string target pattern, it will resolve to a target set of configured target nodes. + /// it also accepts an optional `target_platform` and an optional modifers list which is used + /// to resolve configurations of any unconfigured target nodes. + /// The `target_platform` is either a string that can be parsed as a target label, or a + /// target label. + /// + /// The given `expr` is either: + /// - a single string that is a target ot a target pattern. + /// - a single target node or label, configured or unconfigured + /// + /// Note that this function does not accept `ConfiguredProviderLabel` (which is a configured provider label), since this + /// is the label of a subtarget. You can get the underlying configured target label on the `Label` + /// using `configured_targets()` (ex: `my_label.configured_target()`). + /// + /// This returns either a target set of `ConfiguredTargetNode`s if the given `expr` is a target pattern string, + /// else a single `ConfiguredTargetNode`. + /// + /// When the given a target pattern (returns the target set), for the incompatible targets, it will print the warning message of these incompatible targets. + /// Else (returns a single `ConfiguredTargetNode`), it will raise an error if incompatible when resolve. Use `Lazy.catch()` to catch the error. + /// + /// Example: + /// ```text + /// def _impl(ctx): + /// # returns a single `ConfiguredTargetNode` + /// node = ctx.lazy.configured_target_node("cell//path/to:target").resolve() + /// + /// # returns a target set of `ConfiguredTargetNode`s + /// target_set = ctx.lazy.configured_target_node("cell//path/to:").resolve() + /// ``` + fn configured_target_node<'v>( + #[starlark(this)] this: &'v StarlarkLazyCtx, + #[starlark(require = pos)] expr: ConfiguredTargetNodeArg<'v>, + #[starlark(require = named, default = ValueAsStarlarkTargetLabel::NONE)] + target_platform: ValueAsStarlarkTargetLabel<'v>, + #[starlark(require = named, default = UnpackList::default())] modifiers: UnpackList, + ) -> anyhow::Result { + let bxl_ctx = this.ctx; + let target_platform = target_platform.parse_target_platforms( + bxl_ctx.target_alias_resolver(), + bxl_ctx.cell_resolver(), + bxl_ctx.cell_alias_resolver(), + bxl_ctx.cell_name(), + &bxl_ctx.global_cfg_options().target_platform, + ); + let cli_modifiers = modifiers.items; + let global_cfg_options = target_platform.map(|target_platform| GlobalCfgOptions { + target_platform, + cli_modifiers: Arc::new(cli_modifiers), + }); + let owned = OwnedConfiguredTargetNodeArg::from_ref(expr); + Ok(StarlarkLazy::new_configured_target_node( + owned, + global_cfg_options, + )) + } + + /// Gets the unconfigured target node(s) for the `expr` + /// + /// The given `expr` is either: + /// - a single string that is a target ot a target pattern. + /// - a single unconfigured target node or label + /// + /// This returns either a target set of `UnconfiguredTargetNode`s if the given `expr` is a target pattern string, + /// else a single `UnconfiguredTargetNode`. + fn unconfigured_target_node<'v>( + #[starlark(this)] _this: &'v StarlarkLazyCtx, + #[starlark(require = pos)] expr: TargetNodeOrTargetLabelOrStr<'v>, + ) -> anyhow::Result { + let expr = OwnedTargetNodeArg::from_ref(&expr); + Ok(StarlarkLazy::new_unconfigured_target_node(expr)) + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/lazy_ctx/operation.rs b/app/buck2_bxl/src/bxl/starlark_defs/lazy_ctx/operation.rs new file mode 100644 index 0000000000000..bcb11f1d2856e --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/lazy_ctx/operation.rs @@ -0,0 +1,268 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use async_recursion::async_recursion; +use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_node::nodes::unconfigured::TargetNode; +use derivative::Derivative; +use derive_more::Display; +use dice::DiceComputations; +use dupe::Dupe; +use either::Either; +use futures::FutureExt; +use starlark::any::ProvidesStaticType; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::starlark_simple_value; +use starlark::values::list::AllocList; +use starlark::values::starlark_value; +use starlark::values::Heap; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Trace; +use starlark::values::Value; + +use crate::bxl::starlark_defs::analysis_result::StarlarkAnalysisResult; +use crate::bxl::starlark_defs::context::BxlContextCoreData; +use crate::bxl::starlark_defs::eval_extra::BxlEvalExtra; +use crate::bxl::starlark_defs::nodes::unconfigured::StarlarkTargetNode; +use crate::bxl::starlark_defs::result::StarlarkResultGen; +use crate::bxl::starlark_defs::target_list_expr::OwnedConfiguredTargetNodeArg; +use crate::bxl::starlark_defs::target_list_expr::OwnedTargetNodeArg; +use crate::bxl::starlark_defs::target_list_expr::SingleOrCompatibleConfiguredTargets; +use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; + +#[derive(Derivative, Debug, Allocative)] +enum LazyOperation { + Analysis(ConfiguredProvidersLabel), + ConfiguredTargetNode { + arg: OwnedConfiguredTargetNodeArg, + global_cfg_options: buck2_error::Result, + }, + UnconfiguredTargetNode(OwnedTargetNodeArg), + Join(Arc, Arc), + Batch(Vec>), + Catch(Arc), +} + +#[derive(Allocative)] +enum LazyResult { + Analysis(StarlarkAnalysisResult), + ConfiguredTargetNode(SingleOrCompatibleConfiguredTargets), + UnconfiguredTargetNode(Either>), + Join(Box<(LazyResult, LazyResult)>), + Batch(Vec), + Catch(Box>), +} + +impl LazyResult { + fn into_value<'v>( + self, + heap: &'v Heap, + bxl_eval_extra: &BxlEvalExtra, + ) -> anyhow::Result> { + match self { + LazyResult::Analysis(analysis_res) => Ok(heap.alloc(analysis_res)), + LazyResult::ConfiguredTargetNode(res) => res.into_value(heap, bxl_eval_extra), + LazyResult::UnconfiguredTargetNode(node) => Ok(heap.alloc(node)), + LazyResult::Join(res) => Ok(heap.alloc(( + res.0.into_value(heap, bxl_eval_extra)?, + res.1.into_value(heap, bxl_eval_extra)?, + ))), + LazyResult::Batch(res) => Ok(heap.alloc(AllocList( + res.into_iter() + .map(|v| v.into_value(heap, bxl_eval_extra)) + .collect::>>()?, + ))), + LazyResult::Catch(res) => { + let val = match *res { + Ok(res) => Ok(res.into_value(heap, bxl_eval_extra)?), + Err(e) => Err(e), + }; + Ok(heap.alloc(StarlarkResultGen::from_result(val))) + } + } + } +} + +impl LazyOperation { + #[async_recursion] + async fn resolve( + &self, + dice: &mut DiceComputations<'_>, + core_data: &BxlContextCoreData, + ) -> anyhow::Result { + match self { + LazyOperation::Analysis(label) => { + Ok(LazyResult::Analysis(analysis(dice, label).await?)) + } + LazyOperation::ConfiguredTargetNode { + arg, + global_cfg_options, + } => { + let global_cfg_options = global_cfg_options.as_ref().map_err(|e| e.clone())?; + let res = arg + .to_configured_target_node(global_cfg_options, core_data, dice) + .await?; + Ok(LazyResult::ConfiguredTargetNode(res)) + } + LazyOperation::UnconfiguredTargetNode(expr) => { + let node = expr.to_unconfigured_target_node(core_data, dice).await?; + Ok(LazyResult::UnconfiguredTargetNode(node)) + } + LazyOperation::Join(lazy0, lazy1) => { + let compute0 = DiceComputations::declare_closure(|dice| { + async move { lazy0.resolve(dice, core_data).await }.boxed() + }); + let compute1 = DiceComputations::declare_closure(|dice| { + async move { lazy1.resolve(dice, core_data).await }.boxed() + }); + let (res0, res1) = dice.try_compute2(compute0, compute1).await?; + Ok(LazyResult::Join(Box::new((res0, res1)))) + } + LazyOperation::Batch(lazies) => { + let res = dice + .try_compute_join(lazies, |dice, lazy| { + async move { lazy.resolve(dice, core_data).await }.boxed() + }) + .await?; + Ok(LazyResult::Batch(res)) + } + LazyOperation::Catch(lazy) => { + let res = lazy.resolve(dice, core_data).await; + Ok(LazyResult::Catch(Box::new(res))) + } + } + } +} + +#[derive( + ProvidesStaticType, + Derivative, + Display, + Trace, + NoSerialize, + Allocative, + Clone, + Dupe +)] +#[derivative(Debug)] +#[display("{:?}", self)] +pub(crate) struct StarlarkLazy { + lazy: Arc, +} + +starlark_simple_value!(StarlarkLazy); + +impl StarlarkLazy { + pub(crate) fn new_analysis(label: ConfiguredProvidersLabel) -> Self { + Self { + lazy: Arc::new(LazyOperation::Analysis(label)), + } + } + + pub(crate) fn new_configured_target_node( + arg: OwnedConfiguredTargetNodeArg, + global_cfg_options: anyhow::Result, + ) -> Self { + Self { + lazy: Arc::new(LazyOperation::ConfiguredTargetNode { + arg, + global_cfg_options: global_cfg_options.map_err(buck2_error::Error::from), + }), + } + } + + pub(crate) fn new_unconfigured_target_node(expr: OwnedTargetNodeArg) -> Self { + Self { + lazy: Arc::new(LazyOperation::UnconfiguredTargetNode(expr)), + } + } + + pub(crate) fn new_batch>(lazies: I) -> Self { + Self { + lazy: Arc::new(LazyOperation::Batch( + lazies.into_iter().map(|v| v.lazy).collect(), + )), + } + } + + pub(crate) fn new_join(lazy0: StarlarkLazy, lazy1: StarlarkLazy) -> Self { + Self { + lazy: Arc::new(LazyOperation::Join(lazy0.lazy, lazy1.lazy)), + } + } +} + +#[starlark_value(type = "bxl.Lazy")] +impl<'v> StarlarkValue<'v> for StarlarkLazy { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(lazy_operation_methods) + } +} + +async fn analysis<'v>( + dice: &mut DiceComputations<'_>, + label: &ConfiguredProvidersLabel, +) -> anyhow::Result { + let maybe_result = dice.get_analysis_result(label.target()).await?; + match maybe_result { + MaybeCompatible::Incompatible(reason) => Err(reason.to_err()), + MaybeCompatible::Compatible(result) => StarlarkAnalysisResult::new(result, label.dupe()), + } +} + +/// bxl.Lazy can be resolved to the actual result. The computation only happens when called `.resolve()` or `try_resolve()`. +#[starlark_module] +fn lazy_operation_methods(builder: &mut MethodsBuilder) { + /// Resolve the operation to the actual result without catching the error. + /// + /// Example: + /// ```text + /// def _impl(ctx): + /// target = ctx.configured_targets("cell//path/to:target") + /// analysis_result = ctx.lazy.analysis(target).resolve() + /// ``` + fn resolve<'v>( + this: &StarlarkLazy, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let bxl_eval_extra = BxlEvalExtra::from_context(eval)?; + let lazy = this.lazy.clone(); + let res = bxl_eval_extra.via_dice(|dice, core_data| { + dice.via(|dice| async { lazy.resolve(dice, core_data).await }.boxed_local()) + }); + + let heap = eval.heap(); + res.and_then(|v| v.into_value(heap, bxl_eval_extra)) + } + + /// Make `Lazy` can be resolved later by catching the error. + /// + /// Example: + /// ```text + /// def _impl(ctx): + /// target = ctx.configured_targets("cell//path/to:target") + /// analysis_result = ctx.lazy.analysis(target).catch().resolve() + /// ``` + fn catch(this: &StarlarkLazy) -> anyhow::Result { + let lazy = Arc::new(LazyOperation::Catch(this.lazy.dupe())); + Ok(StarlarkLazy { lazy }) + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/mod.rs b/app/buck2_bxl/src/bxl/starlark_defs/mod.rs deleted file mode 100644 index 954660924e698..0000000000000 --- a/app/buck2_bxl/src/bxl/starlark_defs/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Definitions of core functionality just for bxl functions to access - -pub(crate) mod alloc_node; -pub(crate) mod analysis_result; -pub(crate) mod aquery; -pub(crate) mod artifacts; -pub(crate) mod audit; -pub(crate) mod build_result; -pub(crate) mod bxl_function; -pub(crate) mod cli_args; -pub(crate) mod context; -pub(crate) mod cquery; -pub(crate) mod event; -pub(crate) mod file_expr; -pub(crate) mod file_set; -pub(crate) mod functions; -pub(crate) mod globals; -pub(crate) mod nodes; -pub(crate) mod providers_expr; -mod query_util; -pub(crate) mod target_expr; -pub(crate) mod target_universe; -pub(crate) mod targetset; -pub(crate) mod time; -pub(crate) mod type_names; -pub(crate) mod uquery; diff --git a/app/buck2_bxl/src/bxl/starlark_defs/nodes.rs b/app/buck2_bxl/src/bxl/starlark_defs/nodes.rs index dc409ef22a8a4..cd72c533a5125 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/nodes.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/nodes.rs @@ -9,4 +9,5 @@ pub(crate) mod action; pub(crate) mod configured; +pub(crate) mod node_attrs; pub(crate) mod unconfigured; diff --git a/app/buck2_bxl/src/bxl/starlark_defs/nodes/action.rs b/app/buck2_bxl/src/bxl/starlark_defs/nodes/action.rs index dbd5dff85794b..5dbaecb0fc0b2 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/nodes/action.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/nodes/action.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use std::convert::Infallible; use std::sync::Arc; use allocative::Allocative; @@ -25,6 +26,7 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::starlark_module; use starlark::starlark_simple_value; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::structs::AllocStruct; use starlark::values::Heap; @@ -35,14 +37,12 @@ use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; use starlark::values::ValueTyped; -use starlark::StarlarkDocs; use crate::bxl::starlark_defs::analysis_result::StarlarkAnalysisResult; -#[derive(Debug, Display, ProvidesStaticType, Allocative, StarlarkDocs)] +#[derive(Debug, Display, ProvidesStaticType, Allocative)] #[derive(NoSerialize)] -#[display(fmt = "{}", "self.0")] -#[starlark_docs(directory = "bxl")] +#[display("{}", self.0)] pub(crate) struct StarlarkAction(pub(crate) Arc); starlark_simple_value!(StarlarkAction); @@ -56,14 +56,12 @@ impl<'v> StarlarkValue<'v> for StarlarkAction { } impl<'a> UnpackValue<'a> for StarlarkAction { - fn expected() -> String { - "action".to_owned() - } + type Error = Infallible; - fn unpack_value(value: starlark::values::Value<'a>) -> Option { - value + fn unpack_value_impl(value: Value<'a>) -> Result, Self::Error> { + Ok(value .downcast_ref::() - .map(|value| Self(value.0.dupe())) + .map(|value| Self(value.0.dupe()))) } } @@ -88,15 +86,14 @@ fn action_methods(builder: &mut MethodsBuilder) { } } -#[derive(Debug, Display, ProvidesStaticType, Allocative, StarlarkDocs)] +#[derive(Debug, Display, ProvidesStaticType, Allocative)] #[derive(NoSerialize)] -#[display(fmt = "{}", "self.0.key()")] -#[starlark_docs(directory = "bxl")] +#[display("{}", self.0.key())] pub(crate) struct StarlarkActionQueryNode(pub(crate) ActionQueryNode); starlark_simple_value!(StarlarkActionQueryNode); -#[starlark_value(type = "action_query_node")] +#[starlark_value(type = "bxl.ActionQueryNode")] impl<'v> StarlarkValue<'v> for StarlarkActionQueryNode { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -105,14 +102,12 @@ impl<'v> StarlarkValue<'v> for StarlarkActionQueryNode { } impl<'a> UnpackValue<'a> for StarlarkActionQueryNode { - fn expected() -> String { - "action query node".to_owned() - } + type Error = Infallible; - fn unpack_value(value: starlark::values::Value<'a>) -> Option { - value + fn unpack_value_impl(value: Value<'a>) -> Result, Self::Error> { + Ok(value .downcast_ref::() - .map(|value| Self(value.0.dupe())) + .map(|value| Self(value.0.dupe()))) } } @@ -135,24 +130,28 @@ fn action_query_node_value_methods(builder: &mut MethodsBuilder) { fn action<'v>( this: &StarlarkActionQueryNode, heap: &'v Heap, - ) -> anyhow::Result>> { - Ok(this - .0 - .action() - .map(|a| heap.alloc_typed(StarlarkAction(a.clone())))) + ) -> anyhow::Result>> { + let action = this.0.action(); + match action { + None => Ok(NoneOr::None), + Some(a) => Ok(NoneOr::Other(heap.alloc_typed(StarlarkAction(a.dupe())))), + } } /// Gets optional analysis from the action query target node. fn analysis<'v>( this: &StarlarkActionQueryNode, heap: &'v Heap, - ) -> anyhow::Result>> { - Ok(this.0.analysis_opt().map(|a| { - heap.alloc_typed(StarlarkAnalysisResult::new( - a.analysis_result().clone(), - a.target().as_ref().clone(), - )) - })) + ) -> anyhow::Result>> { + match this.0.analysis_opt() { + Some(a) => Ok(NoneOr::Other(heap.alloc_typed( + StarlarkAnalysisResult::new( + a.analysis_result().clone(), + a.target().as_ref().clone(), + )?, + ))), + None => Ok(NoneOr::None), + } } /// Gets the kind of action query node, either analysis or action kind. @@ -162,17 +161,9 @@ fn action_query_node_value_methods(builder: &mut MethodsBuilder) { } } -#[derive( - Debug, - ProvidesStaticType, - Allocative, - StarlarkDocs, - derive_more::Display, - Serialize -)] +#[derive(Debug, ProvidesStaticType, Allocative, derive_more::Display, Serialize)] #[repr(transparent)] #[serde(transparent)] -#[starlark_docs(directory = "bxl")] pub(crate) struct StarlarkActionAttr(pub(crate) OwnedActionAttr); starlark_simple_value!(StarlarkActionAttr); diff --git a/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured.rs b/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured.rs index 0f38e6538af12..1674a4cd7e42b 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured.rs @@ -8,8 +8,9 @@ */ use std::borrow::Cow; +use std::collections::HashMap; +use std::convert::Infallible; use std::fmt; -use std::fmt::Display; use std::path::Path; use std::sync::OnceLock; @@ -22,14 +23,15 @@ use buck2_analysis::analysis::env::RuleAnalysisAttrResolutionContext; use buck2_analysis::attrs::resolve::configured_attr::ConfiguredAttrExt; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; +use buck2_build_api::actions::query::PackageLabelOption; use buck2_build_api::analysis::AnalysisResult; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_common::dice::cells::HasCellResolver; use buck2_common::dice::data::HasIoProvider; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::cells::cell_path::CellPath; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::package::PackageLabel; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; @@ -48,6 +50,7 @@ use futures::FutureExt; use serde::Serialize; use serde::Serializer; use starlark::any::ProvidesStaticType; +use starlark::collections::SmallMap; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; @@ -55,33 +58,36 @@ use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::starlark_module; use starlark::starlark_simple_value; +use starlark::values::list::AllocList; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::structs::AllocStruct; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; +use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; -use starlark::StarlarkDocs; +use starlark::values::ValueTyped; +use super::node_attrs::NodeAttributeGetter; use crate::bxl::starlark_defs::context::BxlContext; use crate::bxl::starlark_defs::file_set::StarlarkFileNode; use crate::bxl::starlark_defs::nodes::configured::attr_resolution_ctx::LazyAttrResolutionContext; mod attr_resolution_ctx; -#[derive(Debug, Display, ProvidesStaticType, StarlarkDocs, Allocative)] +#[derive(Debug, Display, ProvidesStaticType, Allocative, Clone, Dupe)] #[derive(NoSerialize)] // TODO probably should be serializable the same as how queries serialize -#[display(fmt = "configured_target_node(name = {}, ...)", "self.0.label()")] -#[starlark_docs(directory = "bxl")] +#[display("configured_target_node(name = {}, ...)", self.0.label())] pub(crate) struct StarlarkConfiguredTargetNode(pub(crate) ConfiguredTargetNode); starlark_simple_value!(StarlarkConfiguredTargetNode); -#[starlark_value(type = "target_node")] +#[starlark_value(type = "bxl.ConfiguredTargetNode")] impl<'v> StarlarkValue<'v> for StarlarkConfiguredTargetNode { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -90,14 +96,12 @@ impl<'v> StarlarkValue<'v> for StarlarkConfiguredTargetNode { } impl<'a> UnpackValue<'a> for StarlarkConfiguredTargetNode { - fn expected() -> String { - "target node".to_owned() - } + type Error = Infallible; - fn unpack_value(value: starlark::values::Value<'a>) -> Option { - value + fn unpack_value_impl(value: Value<'a>) -> Result, Self::Error> { + Ok(value .downcast_ref::() - .map(|value| Self(value.0.dupe())) + .map(|value| Self(value.0.dupe()))) } } @@ -132,6 +136,60 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { Ok(StarlarkFileNode(this.0.buildfile_path().path())) } + /// Gets the attribute from the configured target node. + /// If the attribute is unset, returns the default value. + /// If the attribute is not defined by the rule, returns `None`. + /// It will not return special attribute (attribute that start with 'buck.' in `buck2 cquery -A`` command). + /// + /// Sample usage: + /// ```text + /// def _impl_attributes(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.get_attr('my_attr')) + /// ``` + fn get_attr<'v>( + this: &StarlarkConfiguredTargetNode, + #[starlark(require=pos)] key: &str, + heap: &'v Heap, + ) -> anyhow::Result>> { + NodeAttributeGetter::get_attr(this, key, heap) + } + + /// Gets the all attributes (not include speical attributes) from the configured target node. + /// For attributes that are not explicitly set, the default value is returned. + /// + /// Sample usage: + /// ```text + /// def _impl_attributes(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.get_attrs()) + /// ``` + fn get_attrs<'v>( + this: &StarlarkConfiguredTargetNode, + heap: &'v Heap, + ) -> anyhow::Result, Value<'v>>> { + NodeAttributeGetter::get_attrs(this, heap) + } + + /// Check if rule has the attribute. + /// + /// Known attribute is always set explicitly or to default value + /// (otherwise target would not be created) + /// For special attributes, it will return `False` + /// + /// Sample usage: + /// ```text + /// def _impl_has_attr(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.has_attr('my_attr')) + /// ``` + fn has_attr<'v>( + this: &StarlarkConfiguredTargetNode, + #[starlark(require=pos)] key: &str, + ) -> anyhow::Result { + Ok(NodeAttributeGetter::has_attr(this, key)) + } + /// Returns a struct of all the attributes of this target node. The structs fields are the /// attributes names, and the values are [`StarlarkConfiguredAttr`]. /// @@ -143,6 +201,9 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { /// You should store the result of this function call for further usage in the code rather than calling /// `attrs_eager()` each time you need to access the attrs. /// + /// Right now, it is not recommended to use this method. Instead, use `get_attr` and `get_attrs` methods. + /// We will deprecate this method in the future. + /// /// Sample usage: /// ```text /// def _impl_attrs_eager(ctx): @@ -175,8 +236,7 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { Ok(heap.alloc(AllocStruct(attrs))) } - /// Gets a `StarlarkLazyAttrs` for getting attrs lazily. Returns a `StarlarkLazyAttrs` object - /// that you can call `get()` on that gets an attr one at a time. + /// Returns a `lazy_attrs` object that you can call `get()` on that gets an attr one at a time. /// /// If you need to access only few attrs on the same node, then this is the preferred way. Otherwise, /// using `attrs_eager()` would be a better option for accessing many or all attrs, although this really @@ -184,8 +244,11 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { /// indication on which method to use. /// /// You should store the result of this function call for further usage in the code rather than calling - /// `attrs_lazy()` each time to get the `StarlarkLazyAttrs` object. Note that if the `get()` is `NoneType`, - /// then any methods called on `NoneType` will result in an error. + /// `attrs_lazy()` each time to get the `lazy_attrs` object. Note that if the `get()` is `None`, + /// then any methods called on `None` will result in an error. + /// + /// Right now, it is not recommended to use this method. Instead, use `get_attr` and `get_attrs` methods. + /// We will deprecate this method in the future. /// /// Sample usage: /// ```text @@ -201,8 +264,7 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { Ok(StarlarkLazyAttrs::new(this)) } - /// Gets a `StarlarkLazyResolvedAttrs` for getting resolved attrs lazily. Returns a `StarlarkLazyResolvedAttrs` object - /// that you can call `get()` on that gets a resolved attr one at a time. + /// Returns a `lazy_resolved_attrs` object that you can call `get()` on that gets a resolved attr one at a time. /// /// If you need to access only few resolved attrs on the same node, then this is the preferred way. Otherwise, /// using `resolved_attrs_eager()` would be a better option for accessing many or all resolved attrs, although this really @@ -210,8 +272,11 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { /// indication on which method to use. /// /// You should store the result of this function call for further usage in the code rather than calling - /// `resolved_attrs_lazy()` each time to get the `StarlarkResolvedLazyAttrs` object. Note that if the `get()` is `NoneType`, - /// then any methods called on `NoneType` will result in an error. + /// `resolved_attrs_lazy()` each time to get the `lazy_resolved_attrs` object. Note that if the `get()` is `None`, + /// then any methods called on `None` will result in an error. + /// + /// Right now, it is not recommended to use this method. Instead, use `get_attr` and `get_attrs` methods. + /// We will deprecate this method in the future. /// /// Sample usage: /// ```text @@ -224,13 +289,13 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { fn resolved_attrs_lazy<'v>( this: &'v StarlarkConfiguredTargetNode, ctx: &'v BxlContext<'v>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { Ok(StarlarkLazyResolvedAttrs::new(this, ctx, eval.module())) } /// Returns a struct of all the resolved attributes of this target node. The structs fields are the - /// attributes names, and the values are Starlark `[Value]`. + /// attributes names, and the values are the underlying Starlark values of the attributes. /// /// If you need to access many or all resolved attrs on the same node, then this is the preferred way. Otherwise, /// using `resolved_attrs_lazy()` would be a better option for accessing only a few resolved attrs, although this really @@ -240,20 +305,23 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { /// You should store the result of this function call for further usage in the code rather than calling /// `resolved_attrs_eager()` each time you need all the resolved attrs. /// + /// Right now, it is not recommended to use this method. Instead, use `get_attr` and `get_attrs` methods. + /// We will deprecate this method in the future. + /// /// Sample usage: /// ```text /// def _impl_resolved_attrs_eager(ctx): /// node = ctx.cquery().owner("cell//path/to/TARGETS")[0] - /// attrs = node.resolved_attrs_eager() # cache once + /// attrs = node.resolved_attrs_eager(ctx) # cache once /// ctx.output.print(attrs) /// # do more stuff with attrs /// ``` fn resolved_attrs_eager<'v>( this: &'v StarlarkConfiguredTargetNode, ctx: &'v BxlContext<'v>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { - let configured_node = &this.0; + let configured_node = this.0.as_ref(); let dep_analysis: anyhow::Result, _> = ctx .async_ctx @@ -286,6 +354,27 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { Ok(eval.heap().alloc(AllocStruct(resolved_attrs))) } + /// Skip incoming transition forward node. + /// If a target is a forward node, which is created by applying incoming configuration transition, + /// return the transition target, otherwise return itself. + /// This is is particularly useful when you don't care about 'forward' node. + /// + /// Example usage: + /// ```text + /// def _impl_unwrap_forward(ctx): + /// node = ctx.configured_targets("my_cell//bin:the_binary") + /// actual_node = node.unwrap_forward() + /// ``` + fn unwrap_forward<'v>( + this: ValueTyped<'v, StarlarkConfiguredTargetNode>, + heap: &'v Heap, + ) -> anyhow::Result> { + match this.0.forward_target() { + Some(n) => Ok(heap.alloc_typed(StarlarkConfiguredTargetNode(n.dupe()))), + None => Ok(this), + } + } + /// Gets the targets' corresponding rule's name. This is the fully qualified rule name including /// the import path. /// @@ -296,8 +385,30 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(node.rule_type) /// ``` #[starlark(attribute)] - fn rule_type(this: &StarlarkConfiguredTargetNode) -> anyhow::Result { - Ok(this.0.rule_type().to_string()) + fn rule_type<'v>( + this: &'v StarlarkConfiguredTargetNode, + heap: &'v Heap, + ) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.0.rule_type().to_string().as_str())) + } + + /// Gets the targets' corresponding rule's kind which is one of + /// - normal (with no special properties) + /// - configured (usable in a configuration context) + /// - toolchain (only usable as a toolchain dep) + /// + /// Sample usage: + /// ```text + /// def _impl_rule_kind(ctx): + /// node = ctx.configured_targets("my_cell//bin:the_binary") + /// ctx.output.print(node.rule_kind) + /// ``` + #[starlark(attribute)] + fn rule_kind<'v>( + this: &'v StarlarkConfiguredTargetNode, + heap: &'v Heap, + ) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.0.rule_kind().as_str())) } /// Returns a List of all the sources used by this node. @@ -317,10 +428,10 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { Ok(()) } - fn input(&mut self, path: BuckPathRef) -> anyhow::Result<()> { + fn input(&mut self, path: SourcePathRef) -> anyhow::Result<()> { self.inputs .push(StarlarkArtifact::new(Artifact::from(SourceArtifact::new( - path.to_buck_path(), + path.to_owned(), )))); Ok(()) } @@ -332,7 +443,7 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { Ok(traversal.inputs) } - /// Gets the `SourceArtifact` that corresponds to the given `path` given a context. The path should be the + /// Gets the source `Artifact` that corresponds to the given `path` given a context. The path should be the /// project relative path to the file, or an absolute path. /// /// Sample usage: @@ -346,7 +457,7 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { this: &StarlarkConfiguredTargetNode, path: &str, ctx: &BxlContext, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let path = Path::new(path); let fs = ctx .async_ctx @@ -379,10 +490,10 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { Ok(()) } - fn input(&mut self, path: BuckPathRef) -> anyhow::Result<()> { + fn input(&mut self, path: SourcePathRef) -> anyhow::Result<()> { if path.to_cell_path() == self.target { self.found = Some(StarlarkArtifact::new(Artifact::from(SourceArtifact::new( - path.to_buck_path(), + path.to_owned(), )))); } Ok(()) @@ -396,16 +507,56 @@ fn configured_target_node_value_methods(builder: &mut MethodsBuilder) { a.traverse(this.0.label().pkg(), &mut traversal)?; if let Some(found) = traversal.found { - return Ok(Some(found)); + return Ok(NoneOr::Other(found)); } } - Ok(None) + Ok(NoneOr::None) + } + + /// Gets the target's special attr `oncall` + /// + /// Sample usage: + /// ```text + /// def _impl_get_oncall(ctx): + /// target_node = ctx.cquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.oncall) + /// ``` + #[starlark(attribute)] + fn oncall<'v>( + this: &'v StarlarkConfiguredTargetNode, + heap: &'v Heap, + ) -> anyhow::Result>> { + match this.0.oncall() { + Some(oncall) => Ok(NoneOr::Other(heap.alloc_str_intern(oncall))), + None => Ok(NoneOr::None), + } + } + + /// Gets all deps for this target. + /// The result is a list of `ConfiguredTargetNode`. + /// + /// Sample usage: + /// ```text + /// def _impl_get_deps(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.deps()) + /// ``` + fn deps<'v>( + this: &'v StarlarkConfiguredTargetNode, + // ) -> anyhow::Result> { + ) -> anyhow::Result + 'v>> + { + Ok(AllocList( + this.0 + .deps() + .map(|node| StarlarkConfiguredTargetNode(node.dupe())) + .into_iter(), + )) } } -#[derive(Debug, Clone, ProvidesStaticType, StarlarkDocs, Allocative)] +#[derive(Debug, Clone, ProvidesStaticType, Allocative)] #[repr(C)] -#[starlark_docs(directory = "bxl")] pub(crate) struct StarlarkConfiguredAttr(ConfiguredAttr, PackageLabel); impl Display for StarlarkConfiguredAttr { @@ -413,6 +564,7 @@ impl Display for StarlarkConfiguredAttr { self.0.fmt( &AttrFmtContext { package: Some(self.1.dupe()), + options: Default::default(), }, f, ) @@ -427,6 +579,7 @@ impl Serialize for StarlarkConfiguredAttr { self.0.serialize_with_ctx( &AttrFmtContext { package: Some(self.1.dupe()), + options: Default::default(), }, serializer, ) @@ -470,7 +623,8 @@ fn configured_attr_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(attrs.name.value()) /// ``` fn value<'v>(this: &StarlarkConfiguredAttr, heap: &'v Heap) -> anyhow::Result> { - this.0.to_value(this.1.dupe(), heap) + this.0 + .to_value(PackageLabelOption::PackageLabel(this.1.dupe()), heap) } } @@ -481,12 +635,10 @@ fn configured_attr_methods(builder: &mut MethodsBuilder) { Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub(crate) struct StarlarkLazyAttrs<'v> { #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] @@ -518,11 +670,12 @@ impl<'v> StarlarkLazyAttrs<'v> { } } -/// The context for getting attrs lazily on a `StarlarkConfiguredTargetNode`. +/// The context for getting attrs lazily on a `target_node`. #[starlark_module] fn lazy_attrs_methods(builder: &mut MethodsBuilder) { - /// Gets a single attribute. Returns an optional `[StarlarkConfiguredAttr]`. + /// Gets a single attribute. Returns an optional `[configured_attr]`. /// + /// ```text /// def _impl_attrs_lazy(ctx): /// node = ctx.cquery().owner("cell//path/to/TARGETS")[0] /// attrs = node.attrs_lazy() # cache once @@ -532,14 +685,35 @@ fn lazy_attrs_methods(builder: &mut MethodsBuilder) { fn get<'v>( this: &StarlarkLazyAttrs<'v>, attr: &str, - ) -> anyhow::Result> { - Ok(this - .configured_target_node - .0 - .get(attr, AttrInspectOptions::All) - .map(|a| { - StarlarkConfiguredAttr(a.value, this.configured_target_node.0.label().pkg().dupe()) - })) + ) -> anyhow::Result> { + Ok( + match this + .configured_target_node + .0 + .get(attr, AttrInspectOptions::All) + { + Some(attr) => NoneOr::Other(StarlarkConfiguredAttr( + attr.value, + this.configured_target_node.0.label().pkg().dupe(), + )), + None => { + // Check special attrs + let special_attrs = this + .configured_target_node + .0 + .special_attrs() + .collect::>(); + let attr = special_attrs.get(attr); + match attr { + None => NoneOr::None, + Some(attr) => NoneOr::Other(StarlarkConfiguredAttr( + attr.clone(), + this.configured_target_node.0.label().pkg().dupe(), + )), + } + } + }, + ) } } @@ -549,12 +723,10 @@ fn lazy_attrs_methods(builder: &mut MethodsBuilder) { Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub(crate) struct StarlarkLazyResolvedAttrs<'v> { #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] @@ -566,7 +738,7 @@ pub(crate) struct StarlarkLazyResolvedAttrs<'v> { resolution_ctx: LazyAttrResolutionContext<'v>, } -#[starlark_value(type = "lazy_resolved_attrs", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.LazyResolvedAttrs", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for StarlarkLazyResolvedAttrs<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -602,13 +774,14 @@ impl<'v> StarlarkLazyResolvedAttrs<'v> { } } -/// The context for getting resolved attrs lazily on a `StarlarkConfiguredTargetNode`. +/// The context for getting resolved attrs lazily on a `target_node`. #[starlark_module] fn lazy_resolved_attrs_methods(builder: &mut MethodsBuilder) { /// Gets a single resolved attribute. Returns an optional configured attribute. /// /// Gets a single attribute. /// + /// ```text /// def _impl_resolved_attrs_lazy(ctx): /// node = ctx.cquery().owner("cell//path/to/TARGETS")[0] /// attrs = node.resolved_attrs_lazy(ctx) # cache once @@ -618,14 +791,28 @@ fn lazy_resolved_attrs_methods(builder: &mut MethodsBuilder) { fn get<'v>( this: &StarlarkLazyResolvedAttrs<'v>, attr: &str, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { Ok( match this.configured_node.get(attr, AttrInspectOptions::All) { - Some(attr) => Some( + Some(attr) => NoneOr::Other( attr.value .resolve_single(this.configured_node.label().pkg(), &this.resolution_ctx)?, ), - None => None, + None => { + // Check special attrs + let special_attrs = this + .configured_node + .special_attrs() + .collect::>(); + let attr = special_attrs.get(attr); + match attr { + None => NoneOr::None, + Some(attr) => NoneOr::Other(attr.resolve_single( + this.configured_node.label().pkg(), + &this.resolution_ctx, + )?), + } + } }, ) } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured/attr_resolution_ctx.rs b/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured/attr_resolution_ctx.rs index bd00cfc32b82c..69a52b0b6eee4 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured/attr_resolution_ctx.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/nodes/configured/attr_resolution_ctx.rs @@ -20,15 +20,15 @@ use buck2_analysis::analysis::env::resolve_unkeyed_placeholder; use buck2_analysis::attrs::resolve::ctx::AnalysisQueryResult; use buck2_analysis::attrs::resolve::ctx::AttrResolutionContext; use buck2_build_api::interpreter::rule_defs::cmd_args::value::FrozenCommandLineArg; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_node::nodes::configured::ConfiguredTargetNode; use futures::FutureExt; use starlark::environment::Module; +use starlark::values::FrozenValueTyped; use crate::bxl::starlark_defs::context::BxlContext; @@ -48,11 +48,9 @@ impl<'v> LazyAttrResolutionContext<'v> { &self, ) -> &anyhow::Result> { self.dep_analysis_results.get_or_init(|| { - get_deps_from_analysis_results( - self.ctx.async_ctx.borrow_mut().via(|dice_ctx| { - get_dep_analysis(self.configured_node, dice_ctx).boxed_local() - })?, - ) + get_deps_from_analysis_results(self.ctx.async_ctx.borrow_mut().via(|dice_ctx| { + get_dep_analysis(self.configured_node.as_ref(), dice_ctx).boxed_local() + })?) }) } @@ -60,10 +58,9 @@ impl<'v> LazyAttrResolutionContext<'v> { &self, ) -> &anyhow::Result>> { self.query_results.get_or_init(|| { - self.ctx - .async_ctx - .borrow_mut() - .via(|dice_ctx| resolve_queries(dice_ctx, self.configured_node).boxed_local()) + self.ctx.async_ctx.borrow_mut().via(|dice_ctx| { + resolve_queries(dice_ctx, self.configured_node.as_ref()).boxed_local() + }) }) } } @@ -76,7 +73,7 @@ impl<'v> AttrResolutionContext<'v> for LazyAttrResolutionContext<'v> { fn get_dep( &self, target: &ConfiguredProvidersLabel, - ) -> anyhow::Result { + ) -> anyhow::Result> { match self.dep_analysis_results() { Ok(deps) => get_dep(deps, target, self.module), Err(e) => Err(anyhow::anyhow!("Error getting deps from analysis: `{}`", e)), @@ -96,10 +93,11 @@ impl<'v> AttrResolutionContext<'v> for LazyAttrResolutionContext<'v> { } } - fn resolve_query(&self, query: &str) -> SharedResult> { + fn resolve_query(&self, query: &str) -> buck2_error::Result> { match self.query_results() { Ok(res) => resolve_query(res, query, self.module), - Err(e) => Err(anyhow::anyhow!("Error resolving query: `{}`", e)).shared_error(), + Err(e) => Err(anyhow::anyhow!("Error resolving query: `{}`", e)) + .map_err(buck2_error::Error::from), } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/nodes/node_attrs.rs b/app/buck2_bxl/src/bxl/starlark_defs/nodes/node_attrs.rs new file mode 100644 index 0000000000000..81d19980f6c83 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/nodes/node_attrs.rs @@ -0,0 +1,93 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_analysis::attrs::resolve::configured_attr::ConfiguredAttrExt; +use buck2_build_api::actions::query::PackageLabelOption; +use buck2_node::attrs::inspect_options::AttrInspectOptions; +use starlark::collections::SmallMap; +use starlark::values::none::NoneOr; +use starlark::values::Heap; +use starlark::values::StringValue; +use starlark::values::Value; + +use crate::bxl::starlark_defs::nodes::configured::StarlarkConfiguredTargetNode; +use crate::bxl::starlark_defs::nodes::unconfigured::attribute::CoercedAttrExt; +use crate::bxl::starlark_defs::nodes::unconfigured::StarlarkTargetNode; + +pub(crate) trait NodeAttributeGetter { + fn get_attr<'v>(&self, key: &str, heap: &'v Heap) -> anyhow::Result>>; + fn get_attrs<'v>(&self, heap: &'v Heap) + -> anyhow::Result, Value<'v>>>; + fn has_attr(&self, key: &str) -> bool; +} + +impl NodeAttributeGetter for StarlarkTargetNode { + fn get_attr<'v>(&self, key: &str, heap: &'v Heap) -> anyhow::Result>> { + let node = &self.0; + let pkg = node.label().pkg(); + match node.attr_or_none(key, AttrInspectOptions::All) { + Some(attr) => Ok(NoneOr::Other(attr.value.to_value(pkg, heap)?)), + None => Ok(NoneOr::None), + } + } + + fn get_attrs<'v>( + &self, + heap: &'v Heap, + ) -> anyhow::Result, Value<'v>>> { + let node = &self.0; + let pkg = node.label().pkg(); + let attrs_iter = node.attrs(AttrInspectOptions::All); + attrs_iter + .map(|attr| { + let name = heap.alloc_str_intern(attr.name); + let value = attr.value.to_value(pkg, heap)?; + Ok((name, value)) + }) + .collect::>>() + } + + fn has_attr(&self, key: &str) -> bool { + let node = &self.0; + node.attr_or_none(key, AttrInspectOptions::All).is_some() + } +} + +impl NodeAttributeGetter for StarlarkConfiguredTargetNode { + fn get_attr<'v>(&self, key: &str, heap: &'v Heap) -> anyhow::Result>> { + let node = &self.0; + let pkg = PackageLabelOption::PackageLabel(node.label().pkg()); + match node.get(key, AttrInspectOptions::All) { + Some(attr) => Ok(NoneOr::Other(attr.value.to_value(pkg, heap)?)), + None => Ok(NoneOr::None), + } + } + + fn get_attrs<'v>( + &self, + heap: &'v Heap, + ) -> anyhow::Result, Value<'v>>> { + let node = &self.0; + let pkg = PackageLabelOption::PackageLabel(node.label().pkg()); + let attrs_iter = node.attrs(AttrInspectOptions::All); + attrs_iter + .map(|attr| { + let name = heap.alloc_str_intern(attr.name); + let value = attr.value.to_value(pkg, heap)?; + Ok((name, value)) + }) + .collect::>>() + } + + fn has_attr(&self, key: &str) -> bool { + let node = &self.0; + // attr coercion here is somewhat expensive, we need a more efficient way to check if an attr exists + node.get(key, AttrInspectOptions::All).is_some() + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured.rs b/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured.rs index cdcf22ae6ab9a..4593ac784ab08 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured.rs @@ -7,6 +7,8 @@ * of this source tree. */ +use std::convert::Infallible; + use allocative::Allocative; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use buck2_node::attrs::inspect_options::AttrInspectOptions; @@ -14,35 +16,38 @@ use buck2_node::nodes::unconfigured::TargetNode; use derive_more::Display; use dupe::Dupe; use starlark::any::ProvidesStaticType; +use starlark::collections::SmallMap; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::starlark_module; use starlark::starlark_simple_value; +use starlark::values::list::AllocList; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::structs::AllocStruct; use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; +use starlark::values::StringValue; use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; -use starlark::StarlarkDocs; +use super::node_attrs::NodeAttributeGetter; use crate::bxl::starlark_defs::file_set::StarlarkFileNode; use crate::bxl::starlark_defs::nodes::unconfigured::attribute::StarlarkCoercedAttr; pub(crate) mod attribute; -#[derive(Debug, Display, ProvidesStaticType, Allocative, StarlarkDocs)] +#[derive(Debug, Display, ProvidesStaticType, Allocative, Clone, Dupe)] #[derive(NoSerialize)] // TODO probably should be serializable the same as how queries serialize -#[display(fmt = "{:?}", self)] -#[starlark_docs(directory = "bxl")] +#[display("{:?}", self)] pub(crate) struct StarlarkTargetNode(pub(crate) TargetNode); starlark_simple_value!(StarlarkTargetNode); -#[starlark_value(type = "unconfigured_target_node")] +#[starlark_value(type = "bxl.UnconfiguredTargetNode")] impl<'v> StarlarkValue<'v> for StarlarkTargetNode { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -51,14 +56,12 @@ impl<'v> StarlarkValue<'v> for StarlarkTargetNode { } impl<'a> UnpackValue<'a> for StarlarkTargetNode { - fn expected() -> String { - "target node".to_owned() - } + type Error = Infallible; - fn unpack_value(value: starlark::values::Value<'a>) -> Option { - value + fn unpack_value_impl(value: Value<'a>) -> Result, Self::Error> { + Ok(value .downcast_ref::() - .map(|value| Self(value.0.dupe())) + .map(|value| Self(value.0.dupe()))) } } @@ -66,11 +69,13 @@ impl<'a> UnpackValue<'a> for StarlarkTargetNode { #[starlark_module] fn target_node_value_methods(builder: &mut MethodsBuilder) { /// Gets the coerced attributes from the unconfigured target node. Returns a struct. + /// Right now, it is not recommended to use this method. Instead, use `get_attr` and `get_attrs` methods. + /// We will deprecate this method in the future. /// /// Sample usage: /// ```text /// def _impl_attributes(ctx): - /// target_node = ctx.uquery().eval("owner('path/to/file')")[0] + /// target_node = ctx.uquery().eval("//foo:bar")[0] /// ctx.output.print(target_node.attrs.my_attr) /// ``` #[starlark(attribute)] @@ -91,12 +96,67 @@ fn target_node_value_methods(builder: &mut MethodsBuilder) { Ok(heap.alloc(AllocStruct(attrs))) } + /// Gets the attribute from the unconfigured target node. + /// If the attribute is unset, returns the default value. + /// If the attribute is not defined by the rule, returns `None`. + /// It will not return special attribute (attribute that start with 'buck.' in `buck2 uquery -A`` command). + /// + /// Sample usage: + /// ```text + /// def _impl_attributes(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.get_attr('my_attr')) + /// ``` + fn get_attr<'v>( + this: &StarlarkTargetNode, + #[starlark(require=pos)] key: &str, + heap: &'v Heap, + ) -> anyhow::Result>> { + NodeAttributeGetter::get_attr(this, key, heap) + } + + /// Gets the all attributes (not include speical attributes) from the unconfigured target node. + /// For attributes that are not explicitly set, the default value is returned. + /// + /// Sample usage: + /// ```text + /// def _impl_attributes(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.get_attrs()) + /// ``` + fn get_attrs<'v>( + this: &StarlarkTargetNode, + heap: &'v Heap, + ) -> anyhow::Result, Value<'v>>> { + NodeAttributeGetter::get_attrs(this, heap) + } + + /// Check if rule has the attribute. + /// + /// Known attribute is always set explicitly or to default value + /// (otherwise target would not be created) + /// For special attributes, it will return `False` + /// + /// + /// Sample usage: + /// ```text + /// def _impl_attributes(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.has_attr('my_attr')) + /// ``` + fn has_attr<'v>( + this: &StarlarkTargetNode, + #[starlark(require=pos)] key: &str, + ) -> anyhow::Result { + Ok(NodeAttributeGetter::has_attr(this, key)) + } + /// Gets the label from the unconfigured target node. /// /// Sample usage: /// ```text /// def _impl_label(ctx): - /// target_node = ctx.uquery().eval("owner('path/to/file')")[0] + /// target_node = ctx.uquery().eval("//foo:bar")[0] /// ctx.output.print(target_node.label) /// ``` #[starlark(attribute)] @@ -109,7 +169,7 @@ fn target_node_value_methods(builder: &mut MethodsBuilder) { /// Sample usage: /// ```text /// def _impl_label(ctx): - /// target_node = ctx.uquery().eval("owner('path/to/file')")[0] + /// target_node = ctx.uquery().eval("//foo:bar")[0] /// ctx.output.print(target_node.buildfile_path) /// ``` #[starlark(attribute)] @@ -123,11 +183,72 @@ fn target_node_value_methods(builder: &mut MethodsBuilder) { /// Sample usage: /// ```text /// def _impl_rule_type(ctx): - /// target_node = ctx.uquery().owner('path/to/file')[0] + /// target_node = ctx.uquery().eval("//foo:bar")[0] /// ctx.output.print(target_node.rule_type) /// ``` #[starlark(attribute)] - fn rule_type(this: &StarlarkTargetNode) -> anyhow::Result { - Ok(this.0.rule_type().to_string()) + fn rule_type<'v>( + this: &'v StarlarkTargetNode, + heap: &'v Heap, + ) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.0.rule_type().to_string().as_str())) + } + + /// Gets the targets' corresponding rule's kind which is one of + /// - normal (with no special properties) + /// - configured (usable in a configuration context) + /// - toolchain (only usable as a toolchain dep) + /// + /// Sample usage: + /// ```text + /// def _impl_rule_kind(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.rule_kind) + /// ``` + #[starlark(attribute)] + fn rule_kind<'v>( + this: &'v StarlarkTargetNode, + heap: &'v Heap, + ) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.0.rule_kind().as_str())) + } + + /// Gets the target's special attr `oncall` + /// + /// Sample usage: + /// ```text + /// def _impl_get_oncall(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.oncall) + /// ``` + #[starlark(attribute)] + fn oncall<'v>( + this: &'v StarlarkTargetNode, + heap: &'v Heap, + ) -> anyhow::Result>> { + match this.0.oncall() { + None => Ok(NoneOr::None), + Some(oncall) => Ok(NoneOr::Other(heap.alloc_str_intern(oncall))), + } + } + + /// Gets all deps for this target. + /// The result is a list of `UnconfiguredTargetLabel`. + /// + /// Sample usage: + /// ```text + /// def _impl_get_deps(ctx): + /// target_node = ctx.uquery().eval("//foo:bar")[0] + /// ctx.output.print(target_node.deps()) + /// ``` + fn deps<'v>( + this: &'v StarlarkTargetNode, + ) -> anyhow::Result + 'v>> { + Ok(AllocList( + this.0 + .deps() + .map(|label| StarlarkTargetLabel::new(label.dupe())) + .into_iter(), + )) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured/attribute.rs b/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured/attribute.rs index ba4f2f5c636fd..9d8f9533dea29 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured/attribute.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/nodes/unconfigured/attribute.rs @@ -13,14 +13,16 @@ use std::fmt::Formatter; use allocative::Allocative; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::source_artifact::SourceArtifact; -use buck2_build_api::interpreter::rule_defs::artifact::StarlarkArtifact; +use buck2_build_api::interpreter::rule_defs::artifact::starlark_artifact::StarlarkArtifact; use buck2_build_api::interpreter::rule_defs::provider::dependency::DependencyGen; -use buck2_core::buck_path::path::BuckPath; +use buck2_core::package::source_path::SourcePath; use buck2_core::package::PackageLabel; +use buck2_error::starlark_error::from_starlark_with_options; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use buck2_interpreter::types::configured_providers_label::StarlarkProvidersLabel; use buck2_interpreter::types::opaque_metadata::OpaqueMetadata; use buck2_interpreter::types::target_label::StarlarkTargetLabel; +use buck2_interpreter_for_build::interpreter::selector::StarlarkSelector; use buck2_node::attrs::coerced_attr::CoercedAttr; use buck2_node::attrs::display::AttrDisplayWithContext; use buck2_node::attrs::fmt_context::AttrFmtContext; @@ -49,11 +51,9 @@ use starlark::values::FrozenValue; use starlark::values::Heap; use starlark::values::StarlarkValue; use starlark::values::Value; -use starlark::StarlarkDocs; use starlark_map::small_map::SmallMap; -#[derive(Debug, ProvidesStaticType, From, Allocative, StarlarkDocs)] -#[starlark_docs(directory = "bxl")] +#[derive(Debug, ProvidesStaticType, From, Allocative)] pub(crate) struct StarlarkCoercedAttr(pub(crate) CoercedAttr, pub(crate) PackageLabel); starlark_simple_value!(StarlarkCoercedAttr); @@ -63,6 +63,7 @@ impl Display for StarlarkCoercedAttr { self.0.fmt( &AttrFmtContext { package: Some(self.1.dupe()), + options: Default::default(), }, f, ) @@ -77,6 +78,7 @@ impl Serialize for StarlarkCoercedAttr { self.0.serialize_with_ctx( &AttrFmtContext { package: Some(self.1.dupe()), + options: Default::default(), }, serializer, ) @@ -122,7 +124,7 @@ fn coerced_attr_methods(builder: &mut MethodsBuilder) { } } -pub trait CoercedAttrExt { +pub(crate) trait CoercedAttrExt { fn starlark_type(&self) -> anyhow::Result<&'static str>; fn to_value<'v>(&self, pkg: PackageLabel, heap: &'v Heap) -> anyhow::Result>; @@ -159,6 +161,9 @@ impl CoercedAttrExt for CoercedAttr { CoercedAttr::Query(_) => Ok(starlark::values::string::STRING_TYPE), CoercedAttr::SourceFile(_) => Ok(StarlarkArtifact::get_type_value_static().as_str()), CoercedAttr::Metadata(..) => Ok(OpaqueMetadata::get_type_value_static().as_str()), + CoercedAttr::TargetModifiers(..) => { + Ok(OpaqueMetadata::get_type_value_static().as_str()) + } // TODO(@wendyy) - should return the starlark selector type. CoercedAttr::Selector(_) => Ok("selector"), // TODO(@wendyy) - starlark concat is not implemented. @@ -184,7 +189,13 @@ impl CoercedAttrExt for CoercedAttr { for (k, v) in map.iter() { res.insert_hashed( - k.to_value(pkg.dupe(), heap)?.get_hashed()?, + k.to_value(pkg.dupe(), heap)?.get_hashed().map_err(|e| { + from_starlark_with_options( + e, + buck2_error::starlark_error::NativeErrorHandling::Unknown, + false, + ) + })?, v.to_value(pkg.dupe(), heap)?, ); } @@ -202,32 +213,34 @@ impl CoercedAttrExt for CoercedAttr { }, CoercedAttr::ExplicitConfiguredDep(d) => heap.alloc( // TODO(@wendyy) - this needs better support - StarlarkProvidersLabel::new(d.as_ref().label.clone()), + StarlarkProvidersLabel::new(d.as_ref().label.dupe()), ), CoercedAttr::ConfiguredDep(d) => heap.alloc(StarlarkConfiguredProvidersLabel::new( - d.as_ref().label.clone(), + d.as_ref().label.dupe(), )), - CoercedAttr::SplitTransitionDep(d) => { - heap.alloc(StarlarkProvidersLabel::new(d.clone())) - } - CoercedAttr::ConfigurationDep(c) => heap.alloc(StarlarkTargetLabel::new(c.dupe())), + CoercedAttr::SplitTransitionDep(d) => heap.alloc(StarlarkProvidersLabel::new(d.dupe())), + CoercedAttr::ConfigurationDep(c) => heap.alloc(StarlarkTargetLabel::new(c.0.dupe())), CoercedAttr::PluginDep(d) => heap.alloc(StarlarkTargetLabel::new(d.dupe())), - CoercedAttr::Dep(d) => heap.alloc(StarlarkProvidersLabel::new(d.clone())), - CoercedAttr::SourceLabel(s) => heap.alloc(StarlarkProvidersLabel::new(s.clone())), - CoercedAttr::Label(l) => heap.alloc(StarlarkProvidersLabel::new(l.clone())), + CoercedAttr::Dep(d) => heap.alloc(StarlarkProvidersLabel::new(d.dupe())), + CoercedAttr::SourceLabel(s) => heap.alloc(StarlarkProvidersLabel::new(s.dupe())), + CoercedAttr::Label(l) => heap.alloc(StarlarkProvidersLabel::new(l.dupe())), CoercedAttr::Arg(arg) => heap.alloc(arg.to_string()), - CoercedAttr::Query(query) => heap.alloc(query.query.query()), + CoercedAttr::Query(query) => heap.alloc(&query.query.query), CoercedAttr::SourceFile(f) => heap.alloc(StarlarkArtifact::new(Artifact::from( - SourceArtifact::new(BuckPath::new(pkg.to_owned(), f.path().dupe())), + SourceArtifact::new(SourcePath::new(pkg.to_owned(), f.path().dupe())), ))), - CoercedAttr::Metadata(..) => heap.alloc(OpaqueMetadata), - CoercedAttr::Selector(_) => { - // TODO(@wendyy) - this needs better support - heap.alloc_str("selector(...)").to_value() + CoercedAttr::Metadata(data) => heap.alloc(data.to_value()), + CoercedAttr::TargetModifiers(data) => heap.alloc(data.to_value()), + CoercedAttr::Selector(selector) => { + let map: SmallMap = selector + .all_entries() + .map(|(k, v)| v.to_value(pkg.dupe(), heap).map(|v| (k.to_string(), v))) + .collect::>()?; + heap.alloc(StarlarkSelector::new(heap.alloc(map))) } - CoercedAttr::Concat(_) => { - // TODO(@wendyy) - this needs better support - heap.alloc_str("concat(...)").to_value() + CoercedAttr::Concat(l) => { + let list = l.as_ref().try_map(|attr| attr.to_value(pkg.dupe(), heap))?; + StarlarkSelector::from_concat(list, heap) } }) } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/providers_expr.rs b/app/buck2_bxl/src/bxl/starlark_defs/providers_expr.rs index 692aacb255d70..01369763315f1 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/providers_expr.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/providers_expr.rs @@ -7,15 +7,15 @@ * of this source tree. */ +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::cells::cell_path::CellPathRef; use buck2_core::cells::paths::CellRelativePath; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; use buck2_core::provider::label::ProvidersLabelMaybeConfigured; use buck2_core::provider::label::ProvidersName; -use buck2_core::target::label::TargetLabel; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use buck2_interpreter::types::configured_providers_label::StarlarkProvidersLabel; use buck2_interpreter::types::target_label::StarlarkConfiguredTargetLabel; @@ -25,29 +25,17 @@ use buck2_node::nodes::unconfigured::TargetNode; use buck2_node::target_calculation::ConfiguredTargetCalculation; use dice::DiceComputations; use dupe::Dupe; -use futures::future::BoxFuture; -use futures::future::LocalBoxFuture; use futures::FutureExt; use itertools::Either; -use starlark::eval::Evaluator; -use starlark::values::list::ListRef; -use starlark::values::Value; -use starlark::values::ValueLike; -use thiserror::Error; +use starlark::values::list::UnpackList; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::UnpackValue; use crate::bxl::starlark_defs::context::BxlContextNoDice; use crate::bxl::starlark_defs::nodes::configured::StarlarkConfiguredTargetNode; use crate::bxl::starlark_defs::nodes::unconfigured::StarlarkTargetNode; use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; -#[derive(Debug, Error)] -enum ProviderExprError { - #[error("Expected a list of target like items, but was `{0}`")] - NotAListOfTargets(String), - #[error("Expected a single target like item, but was `{0}`")] - NotATarget(String), -} - /// ProvidersExpr is just a simple type that can be used in starlark_module /// functions for arguments that should be a set of provider labels. It will accept a /// literal (like `//some:target[subtarget]`) or list of literals or a single provider label @@ -56,115 +44,212 @@ pub(crate) enum ProvidersExpr { Iterable(Vec

), } -impl ProvidersExpr { - pub(crate) async fn unpack_opt<'v, 'c>( - value: Value<'v>, - target_platform: Option, - ctx: &BxlContextNoDice<'_>, - dice: &'c DiceComputations, - eval: &Evaluator<'v, '_>, - ) -> anyhow::Result> { - Ok( - if let Some(resolved) = Self::unpack_literal(value, &target_platform, ctx, dice).await? - { - Some(resolved) - } else { - Self::unpack_iterable(value, ctx, eval, |v, ctx| { - Self::unpack_literal(v, &target_platform, ctx, dice) - }) - .await? - }, - ) +/// ProvidersLabelArg is a type that can be used as an argument in starlark api for +/// an unconfigured provider label +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ProvidersLabelArg<'v> { + Str(&'v str), + StarlarkTargetLabel(&'v StarlarkTargetLabel), + StarlarkProvidersLabel(&'v StarlarkProvidersLabel), + StarlarkTargetNode(&'v StarlarkTargetNode), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ProvidersLabelListArg<'v> { + List(UnpackList>), + TargetSet(&'v StarlarkTargetSet), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ProvidersExprArg<'v> { + One(ProvidersLabelArg<'v>), + List(ProvidersLabelListArg<'v>), +} + +/// ConfiguredProvidersLabelArg is a type that can be used as an argument in starlark api for +/// a configured provider label +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ConfiguredProvidersLabelArg<'v> { + Node(&'v StarlarkConfiguredTargetNode), + Label(&'v StarlarkConfiguredTargetLabel), + ProvidersLabel(&'v StarlarkConfiguredProvidersLabel), +} + +/// AnyProvidersLabelArg is a type that can be used as an argument in stalark api for +/// a configured provider label or an unconfigured provider label +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum AnyProvidersLabelArg<'v> { + Configured(ConfiguredProvidersLabelArg<'v>), + Unconfigured(ProvidersLabelArg<'v>), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum AnyProvidersLabelListArg<'v> { + StarlarkTargetSet(&'v StarlarkTargetSet), + StarlarkConfiguredTargetSet(&'v StarlarkTargetSet), + List(UnpackList>), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum AnyProvidersExprArg<'v> { + One(AnyProvidersLabelArg<'v>), + List(AnyProvidersLabelListArg<'v>), +} + +impl<'v> ConfiguredProvidersLabelArg<'v> { + pub(crate) fn configured_providers_label(&self) -> ConfiguredProvidersLabel { + match self { + ConfiguredProvidersLabelArg::Node(node) => { + ConfiguredProvidersLabel::default_for(node.0.label().dupe()) + } + ConfiguredProvidersLabelArg::Label(label) => { + ConfiguredProvidersLabel::default_for(label.label().dupe()) + } + ConfiguredProvidersLabelArg::ProvidersLabel(providers_label) => { + providers_label.label().dupe() + } + } + } +} + +impl<'v> AnyProvidersExprArg<'v> { + pub(crate) fn contains_unconfigured(&self) -> bool { + match self { + AnyProvidersExprArg::One(arg) => arg.is_unconfigured(), + AnyProvidersExprArg::List(arg) => arg.contains_unconfigured(), + } + } +} + +impl<'v> AnyProvidersLabelArg<'v> { + fn is_unconfigured(&self) -> bool { + matches!(self, AnyProvidersLabelArg::Unconfigured(_)) + } +} + +impl<'v> AnyProvidersLabelListArg<'v> { + fn contains_unconfigured(&self) -> bool { + match self { + AnyProvidersLabelListArg::List(args) => { + args.items.iter().any(|arg| arg.is_unconfigured()) + } + AnyProvidersLabelListArg::StarlarkTargetSet(_) => true, + _ => false, + } } +} +impl ProvidersExpr { pub(crate) async fn unpack<'v, 'c>( - value: Value<'v>, - target_platform: Option, + arg: AnyProvidersExprArg<'v>, + global_cfg_options_override: &GlobalCfgOptions, ctx: &BxlContextNoDice<'_>, - dice: &'c DiceComputations, - eval: &Evaluator<'v, '_>, + dice: &'c mut DiceComputations<'_>, ) -> anyhow::Result { - Ok( - if let Some(resolved) = - Self::unpack_opt(value, target_platform, ctx, dice, eval).await? - { - resolved - } else { - return Err(anyhow::anyhow!(ProviderExprError::NotAListOfTargets( - value.to_repr() - ))); - }, - ) + match arg { + AnyProvidersExprArg::One(arg) => Ok(ProvidersExpr::Literal( + Self::unpack_literal(arg, global_cfg_options_override, ctx, dice).await?, + )), + AnyProvidersExprArg::List(arg) => { + Ok(Self::unpack_iterable(arg, global_cfg_options_override, ctx, dice).await?) + } + } } - fn unpack_literal<'v, 'c>( - value: Value<'v>, - target_platform: &'c Option, + async fn unpack_literal<'v, 'c>( + arg: AnyProvidersLabelArg<'v>, + global_cfg_options_override: &'c GlobalCfgOptions, ctx: &BxlContextNoDice<'_>, - dice: &'c DiceComputations, - ) -> BoxFuture<'c, anyhow::Result>> { - if let Some(configured_target) = value.downcast_ref::() { - futures::future::ready(Ok(Some(Self::Literal(ConfiguredProvidersLabel::new( - configured_target.0.label().dupe(), - ProvidersName::Default, - ))))) - .boxed() - } else if let Some(configured_target) = - value.downcast_ref::() - { - futures::future::ready(Ok(Some(Self::Literal(ConfiguredProvidersLabel::new( - configured_target.label().dupe(), - ProvidersName::Default, - ))))) - .boxed() - } else if let Some(configured_target) = - value.downcast_ref::() - { - futures::future::ready(Ok(Some(Self::Literal(configured_target.label().clone())))) - .boxed() - } else { - match Self::unpack_providers_label(value, ctx) { - Ok(Some(label)) => async move { - dice.get_configured_provider_label(&label, target_platform.as_ref()) - .map(|res| res.map(|r| Some(Self::Literal(r)))) + dice: &'c mut DiceComputations<'_>, + ) -> anyhow::Result { + match arg { + AnyProvidersLabelArg::Configured(arg) => Ok(arg.configured_providers_label()), + AnyProvidersLabelArg::Unconfigured(arg) => { + let label = Self::unpack_providers_label(arg, ctx)?; + dice.get_configured_provider_label(&label, global_cfg_options_override) + .await + } + } + } + + async fn unpack_iterable<'c, 'v: 'c>( + arg: AnyProvidersLabelListArg<'v>, + global_cfg_options_override: &'c GlobalCfgOptions, + ctx: &'c BxlContextNoDice<'_>, + dice: &'c mut DiceComputations<'_>, + ) -> anyhow::Result> { + match arg { + AnyProvidersLabelListArg::StarlarkTargetSet(s) => Ok(ProvidersExpr::Iterable( + dice.try_compute_join(s.0.iter(), |dice, node| { + async move { + let providers_label = ProvidersLabel::default_for(node.label().dupe()); + dice.get_configured_provider_label( + &providers_label, + global_cfg_options_override, + ) .await + } + .boxed() + }) + .await?, + )), + AnyProvidersLabelListArg::StarlarkConfiguredTargetSet(s) => { + Ok(ProvidersExpr::Iterable( + s.0.iter() + .map(|node| ConfiguredProvidersLabel::default_for(node.label().dupe())) + .collect(), + )) + } + AnyProvidersLabelListArg::List(iterable) => { + let mut res = Vec::new(); + for arg in iterable.items { + res.push( + Self::unpack_literal(arg, global_cfg_options_override, ctx, dice).await?, + ); } - .boxed(), - Ok(None) => futures::future::ready(Ok(None)).boxed(), - Err(e) => futures::future::ready(Err(e)).boxed(), + + Ok(Self::Iterable(res)) } } } } impl ProvidersExpr { - pub(crate) async fn unpack<'v>( - value: Value<'v>, + pub(crate) fn unpack<'v>( + arg: ProvidersExprArg<'v>, ctx: &BxlContextNoDice<'_>, - eval: &Evaluator<'v, '_>, ) -> anyhow::Result { - Ok(if let Some(resolved) = Self::unpack_literal(value, ctx)? { - resolved - } else if let Some(resolved) = Self::unpack_iterable(value, ctx, eval, |value, ctx| { - let res = Self::unpack_literal(value, ctx); - async move { res }.boxed() - }) - .await? - { - resolved - } else { - return Err(anyhow::anyhow!(ProviderExprError::NotAListOfTargets( - value.to_repr() - ))); - }) + match arg { + ProvidersExprArg::One(arg) => Self::unpack_literal(arg, ctx), + ProvidersExprArg::List(arg) => Self::unpack_iterable(arg, ctx), + } } fn unpack_literal<'v>( - value: Value<'v>, + value: ProvidersLabelArg<'v>, ctx: &BxlContextNoDice<'_>, - ) -> anyhow::Result> { - Self::unpack_providers_label(value, ctx)? - .map_or(Ok(None), |label| Ok(Some(Self::Literal(label)))) + ) -> anyhow::Result { + Ok(Self::Literal(Self::unpack_providers_label(value, ctx)?)) + } + + fn unpack_iterable<'c, 'v: 'c>( + arg: ProvidersLabelListArg<'v>, + ctx: &'c BxlContextNoDice<'_>, + ) -> anyhow::Result> { + match arg { + ProvidersLabelListArg::TargetSet(s) => Ok(ProvidersExpr::Iterable( + s.0.iter() + .map(|node| ProvidersLabel::default_for(node.label().dupe())) + .collect(), + )), + ProvidersLabelListArg::List(iterable) => { + let mut res = Vec::new(); + for val in iterable.items { + res.push(Self::unpack_providers_label(val, ctx)?) + } + Ok(ProvidersExpr::Iterable(res)) + } + } } } @@ -177,70 +262,30 @@ impl ProvidersExpr

{ } fn unpack_providers_label<'v>( - value: Value<'v>, + arg: ProvidersLabelArg<'v>, ctx: &BxlContextNoDice<'_>, - ) -> anyhow::Result> { - #[allow(clippy::manual_map)] // `if else if` looks better here - Ok(if let Some(s) = value.unpack_str() { - Some( - ParsedPattern::::parse_relaxed( - &ctx.target_alias_resolver, + ) -> anyhow::Result { + match arg { + ProvidersLabelArg::Str(s) => { + Ok(ParsedPattern::::parse_relaxed( + ctx.target_alias_resolver(), // TODO(nga): Parse relaxed relative to cell root is incorrect. - CellPathRef::new(ctx.cell_name, CellRelativePath::empty()), + CellPathRef::new(ctx.cell_name(), CellRelativePath::empty()), s, - &ctx.cell_resolver, + ctx.cell_resolver(), + ctx.cell_alias_resolver(), )? - .as_providers_label(s)?, - ) - } else if let Some(target) = value.downcast_ref::() { - Some(ProvidersLabel::new( + .as_providers_label(s)?) + } + ProvidersLabelArg::StarlarkTargetLabel(target) => Ok(ProvidersLabel::new( target.label().dupe(), ProvidersName::Default, - )) - } else if let Some(label) = value.downcast_ref::() { - Some(label.label().clone()) - } else if let Some(node) = value.downcast_ref::() { - Some(ProvidersLabel::new( + )), + ProvidersLabelArg::StarlarkProvidersLabel(label) => Ok(label.label().dupe()), + ProvidersLabelArg::StarlarkTargetNode(node) => Ok(ProvidersLabel::new( node.0.label().dupe(), ProvidersName::Default, - )) - } else { - None - }) - } - - async fn unpack_iterable<'c, 'v: 'c>( - value: Value<'v>, - ctx: &'c BxlContextNoDice<'_>, - eval: &Evaluator<'v, '_>, - unpack_literal: impl Fn( - Value<'v>, - &'c BxlContextNoDice<'_>, - ) -> LocalBoxFuture<'c, anyhow::Result>>>, - ) -> anyhow::Result>> { - #[allow(clippy::manual_map)] // `if else if` looks better here - let iterable = if let Some(s) = value.downcast_ref::>() { - Some(Either::Left(Either::Left(s.iter(eval.heap())))) - } else if let Some(s) = value.downcast_ref::>() { - Some(Either::Left(Either::Right(s.iter(eval.heap())))) - } else if let Some(iterable) = ListRef::from_value(value) { - Some(Either::Right(iterable.iter())) - } else { - None - } - .ok_or_else(|| ProviderExprError::NotATarget(value.to_repr()))?; - - let mut res = Vec::new(); - for val in iterable { - if let Some(ProvidersExpr::Literal(resolved_val)) = unpack_literal(val, ctx).await? { - res.push(resolved_val) - } else { - return Err(anyhow::anyhow!(ProviderExprError::NotATarget( - val.to_repr() - ))); - } + )), } - - Ok(Some(Self::Iterable(res))) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/query_util.rs b/app/buck2_bxl/src/bxl/starlark_defs/query_util.rs index 9b69355491f5d..95b457ac3cec7 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/query_util.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/query_util.rs @@ -7,10 +7,11 @@ * of this source tree. */ +use buck2_error::starlark_error::from_starlark_with_options; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationValue; -use starlark::eval::Evaluator; use starlark::values::dict::Dict; +use starlark::values::Heap; use starlark::values::Value; use super::targetset::NodeLike; @@ -19,28 +20,34 @@ use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; pub(crate) fn parse_query_evaluation_result<'v, T: NodeLike>( result: QueryEvaluationResult, - eval: &mut Evaluator<'v, '_>, + heap: &'v Heap, ) -> anyhow::Result> { Ok(match result { QueryEvaluationResult::Single(result) => match result { QueryEvaluationValue::TargetSet(targets) => { - eval.heap().alloc(StarlarkTargetSet::from(targets)) + heap.alloc(StarlarkTargetSet::from(targets)) } - QueryEvaluationValue::FileSet(files) => eval.heap().alloc(StarlarkFileSet::from(files)), + QueryEvaluationValue::FileSet(files) => heap.alloc(StarlarkFileSet::from(files)), }, - QueryEvaluationResult::Multiple(multi) => eval.heap().alloc(Dict::new( + QueryEvaluationResult::Multiple(multi) => heap.alloc(Dict::new( multi .0 .into_iter() .map(|(q, res)| { Ok(( - eval.heap().alloc(q).get_hashed()?, + heap.alloc(q).get_hashed().map_err(|e| { + from_starlark_with_options( + e, + buck2_error::starlark_error::NativeErrorHandling::Unknown, + false, + ) + })?, match res? { QueryEvaluationValue::TargetSet(targets) => { - eval.heap().alloc(StarlarkTargetSet::from(targets)) + heap.alloc(StarlarkTargetSet::from(targets)) } QueryEvaluationValue::FileSet(files) => { - eval.heap().alloc(StarlarkFileSet::from(files)) + heap.alloc(StarlarkFileSet::from(files)) } }, )) diff --git a/app/buck2_bxl/src/bxl/starlark_defs/result.rs b/app/buck2_bxl/src/bxl/starlark_defs/result.rs new file mode 100644 index 0000000000000..d406c4e44bf01 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/result.rs @@ -0,0 +1,190 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use derivative::Derivative; +use derive_more::Display; +use display_container::fmt_container; +use dupe::Dupe; +use starlark::any::ProvidesStaticType; +use starlark::environment::Methods; +use starlark::environment::MethodsBuilder; +use starlark::environment::MethodsStatic; +use starlark::starlark_complex_values; +use starlark::starlark_module; +use starlark::starlark_simple_value; +use starlark::values::starlark_value; +use starlark::values::string::StarlarkStr; +use starlark::values::Freeze; +use starlark::values::FrozenValue; +use starlark::values::NoSerialize; +use starlark::values::StarlarkValue; +use starlark::values::Trace; +use starlark::values::Value; +use starlark::values::ValueLike; +use starlark::values::ValueTypedComplex; + +#[derive(Debug, buck2_error::Error)] +enum BxlResultError { + #[error("called `bxl.Result.unwrap()` on an `Err` value: {0}")] + UnwrapOnError(buck2_error::Error), + #[error("called `bxl.Result.unwrap_err()` on an `Ok` value: {0}")] + UnwrapErrOnOk(String), +} + +/// Error value object returned by fallible BXL operation. +#[derive( + Debug, + ProvidesStaticType, + Derivative, + Display, + // TODO(nero): implement Serialize for StarlarkError + NoSerialize, + Allocative, + Trace +)] +#[display("bx.Error({})", StarlarkStr::repr(&format!("{:?}", err)))] +pub(crate) struct StarlarkError { + err: buck2_error::Error, +} + +starlark_simple_value!(StarlarkError); + +#[starlark_value(type = "bxl.Error")] +impl<'v> StarlarkValue<'v> for StarlarkError { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(error_methods) + } +} + +/// The error type for bxl +#[starlark_module] +fn error_methods(builder: &mut MethodsBuilder) { + /// The error message + #[starlark(attribute)] + fn message<'v>(this: &'v StarlarkError) -> anyhow::Result { + Ok(format!("{:?}", this.err)) + } +} + +#[derive( + Debug, + // TODO(nero): implement Serialize for StarlarkResult + NoSerialize, + Trace, + Freeze, + ProvidesStaticType, + Allocative +)] +#[repr(C)] +pub(crate) enum StarlarkResultGen { + Ok(T), + Err(#[freeze(identity)] buck2_error::Error), +} + +pub(crate) type StarlarkResult<'v> = StarlarkResultGen>; +pub(crate) type FrozenStarlarkResult = StarlarkResultGen; + +starlark_complex_values!(StarlarkResult); + +impl Display for StarlarkResultGen { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StarlarkResultGen::Ok(val) => fmt_container(f, "Result(Ok = ", ")", [val]), + StarlarkResultGen::Err(err) => fmt_container( + f, + "Result(Err = ", + ")", + // TODO(nero): implement multiline when multiline is requested + [StarlarkStr::repr(&format!("{:?}", err))], + ), + } + } +} + +#[starlark_value(type = "bxl.Result")] +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StarlarkResultGen +where + Self: ProvidesStaticType<'v>, +{ + fn get_methods() -> Option<&'static Methods> + where + Self: Sized, + { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(result_methods) + } +} + +#[starlark_module] +fn result_methods(builder: &mut MethodsBuilder) { + /// Returns true if the result is an `Ok` value, false if it is an Error + fn is_ok<'v>(this: ValueTypedComplex<'v, StarlarkResult<'v>>) -> anyhow::Result { + Ok(match this.unpack() { + either::Either::Left(x) => x.is_ok(), + either::Either::Right(x) => x.is_ok(), + }) + } + + /// Unwrap the result, returning the inner value if the result is `Ok`. + /// If the result is an `Error`, it will fail + fn unwrap<'v>(this: ValueTypedComplex<'v, StarlarkResult<'v>>) -> anyhow::Result> { + match this.unpack() { + either::Either::Left(x) => x.unwrap(), + either::Either::Right(x) => x.unwrap(), + } + } + + /// Unwrap the error, returning the inner error if the result is `Err`. + /// If the result is an `Ok`, it will fail + fn unwrap_err<'v>( + this: ValueTypedComplex<'v, StarlarkResult<'v>>, + ) -> anyhow::Result { + match this.unpack() { + either::Either::Left(x) => x.unwrap_err(), + either::Either::Right(x) => x.unwrap_err(), + } + } +} + +impl StarlarkResultGen { + pub(crate) fn from_result(res: anyhow::Result) -> Self { + match res { + Ok(val) => Self::Ok(val), + Err(err) => Self::Err(buck2_error::Error::from(err)), + } + } + + fn is_ok(&self) -> bool { + match self { + StarlarkResultGen::Ok(_) => true, + StarlarkResultGen::Err(_) => false, + } + } +} + +impl<'v, V: ValueLike<'v>> StarlarkResultGen { + fn unwrap(&self) -> anyhow::Result> { + match self { + StarlarkResultGen::Ok(val) => Ok(val.to_value()), + StarlarkResultGen::Err(err) => Err(BxlResultError::UnwrapOnError(err.dupe()).into()), + } + } + + fn unwrap_err(&self) -> anyhow::Result { + match self { + StarlarkResultGen::Ok(val) => { + let display_str = format!("{}", val); + Err(BxlResultError::UnwrapErrOnOk(display_str).into()) + } + StarlarkResultGen::Err(err) => Ok(StarlarkError { err: err.dupe() }), + } + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/target_expr.rs b/app/buck2_bxl/src/bxl/starlark_defs/target_expr.rs index f4026d7719c48..4e3f0ffdfd38c 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/target_expr.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/target_expr.rs @@ -9,497 +9,35 @@ use std::borrow::Cow; -use buck2_build_api::configure_targets::get_maybe_compatible_targets; -use buck2_core::cells::cell_path::CellPathRef; -use buck2_core::cells::paths::CellRelativePath; -use buck2_core::configuration::compatibility::IncompatiblePlatformReason; -use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::soft_error; -use buck2_core::target::label::TargetLabel; -use buck2_interpreter::types::target_label::StarlarkConfiguredTargetLabel; -use buck2_interpreter::types::target_label::StarlarkTargetLabel; -use buck2_node::load_patterns::load_patterns; -use buck2_node::load_patterns::MissingTargetBehavior; -use buck2_node::nodes::configured::ConfiguredTargetNode; -use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::nodes::unconfigured::TargetNode; -use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_query::query::environment::QueryTarget; -use buck2_query::query::syntax::simple::eval::set::TargetSet; -use buck2_util::truncate::truncate; use dice::DiceComputations; use dupe::Dupe; -use dupe::IterDupedExt; -use either::Either; -use futures::TryFutureExt; -use starlark::collections::SmallSet; -use starlark::eval::Evaluator; -use starlark::values::list::ListRef; -use starlark::values::Value; -use starlark::values::ValueLike; -use thiserror::Error; -use crate::bxl::starlark_defs::context::BxlContextNoDice; -use crate::bxl::starlark_defs::nodes::configured::StarlarkConfiguredTargetNode; -use crate::bxl::starlark_defs::nodes::unconfigured::StarlarkTargetNode; -use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; - -/// TargetExpr is just a simple type that can be used in starlark_module -/// functions for arguments that should be target sets. It will accept a -/// literal (like `//some:target`) or list of literals or a TargetSet Value (from one of the -/// BXL functions that return them). It can be resolved to a `&TargetSet` with -/// the help of the `targets!()` macro. +#[derive(Clone)] pub(crate) enum TargetExpr<'v, Node: QueryTarget> { Node(Node), - Label(Cow<'v, Node::NodeRef>), - Iterable(Vec>>), - TargetSet(Cow<'v, TargetSet>), + Label(Cow<'v, Node::Key>), } -impl<'v> TargetExpr<'v, ConfiguredTargetNode> { - /// Get a vector of maybe compatible `ConfiguredTargetNode`s from the `TargetExpr`. - /// Any callers of this function will need to call `filter_incompatible()` on the result - /// in order to get the `TargetSet`. - pub(crate) async fn get( - self, - dice: &mut DiceComputations, - ) -> anyhow::Result>> { +impl<'v, Node: QueryTarget> TargetExpr<'v, Node> { + pub(crate) fn node_ref(&self) -> &Node::Key { match self { - TargetExpr::Node(val) => Ok(vec![dice.get_configured_target_node(val.label()).await?]), - TargetExpr::Label(label) => { - Ok(vec![dice.get_configured_target_node(label.as_ref()).await?]) - } - TargetExpr::Iterable(val) => { - let futs = val.into_iter().map(|node_or_ref| async { - match node_or_ref { - Either::Left(node) => dice.get_configured_target_node(node.label()).await, - Either::Right(label) => { - dice.get_configured_target_node(label.as_ref()).await - } - } - }); - - futures::future::join_all(futs).await.into_iter().collect() - } - TargetExpr::TargetSet(val) => futures::future::join_all(val.iter().map(|node| { - dice.get_configured_target_node(node.label()) - .map_err(anyhow::Error::from) - })) - .await - .into_iter() - .collect(), - } - } -} - -// Filters out incompatible targets and emits the error message -pub(crate) fn filter_incompatible( - targets: impl Iterator>, - bxl_ctx: &BxlContextNoDice, -) -> anyhow::Result> { - let mut target_set = TargetSet::new(); - let mut incompatible_targets = SmallSet::new(); - - for res in targets { - match res { - MaybeCompatible::Incompatible(reason) => { - incompatible_targets.insert(reason.target.dupe()); - } - MaybeCompatible::Compatible(target) => { - target_set.insert(target); - } + TargetExpr::Node(node) => node.node_key(), + TargetExpr::Label(label) => label, } } - - if !incompatible_targets.is_empty() { - bxl_ctx.print_to_error_stream( - IncompatiblePlatformReason::skipping_message_for_multiple(incompatible_targets.iter()), - )?; - } - - Ok(target_set) -} - -fn unpack_target_label<'v>(value: Value<'v>) -> Option<&'v TargetLabel> { - if let Some(target) = value.downcast_ref::() { - Some(target.label()) - } else if let Some(node) = value.downcast_ref::() { - Some(node.0.label()) - } else { - None - } } impl<'v> TargetExpr<'v, TargetNode> { - /// Get a `TargetSet` from the `TargetExpr` - pub(crate) async fn get( - self, - ctx: &DiceComputations, - ) -> anyhow::Result>> { + pub(crate) async fn get_from_dice( + &self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result { match self { - TargetExpr::Node(val) => { - let mut set = TargetSet::new(); - set.insert(val); - Ok(Cow::Owned(set)) - } - TargetExpr::Label(label) => { - let node = ctx.get_target_node(&label).await?; - let mut set = TargetSet::new(); - set.insert(node); - Ok(Cow::Owned(set)) - } - TargetExpr::Iterable(val) => { - let mut set = TargetSet::new(); - let futs = val.into_iter().map(|node_or_ref| async { - match node_or_ref { - Either::Left(node) => Ok(node), - Either::Right(node_ref) => ctx.get_target_node(&node_ref).await, - } - }); - - for node in futures::future::join_all(futs).await { - set.insert(node?); - } - - Ok(Cow::Owned(set)) - } - TargetExpr::TargetSet(val) => Ok(val), - } - } -} - -#[derive(Debug, Error)] -pub(crate) enum TargetExprError { - #[error( - "Expected a list of target like items, but was `{0}`. If you have passed in a list of `label`s, make sure to call `configured_target()` to get the underlying configured target label." - )] - NotAListOfTargets(String), - #[error( - "Expected a single target like item, but was `{0}`. If you have passed in a `label`, make sure to call `configured_target()` to get the underlying configured target label." - )] - NotATarget(String), - #[error( - "Unconfigured target with label `{0}` was passed into cquery. Targets passed into cquery should be configured (recommendation is to use `ctx.target_universe()`)." - )] - UnconfiguredTargetInCquery(String), -} - -impl<'v> TargetExpr<'v, ConfiguredTargetNode> { - pub(crate) fn as_provider_labels(&self) -> Vec { - match &self { - TargetExpr::Iterable(i) => i - .iter() - .map(|e| match e { - Either::Left(node) => { - ConfiguredProvidersLabel::default_for(node.label().dupe()) - } - Either::Right(label) => { - ConfiguredProvidersLabel::default_for(label.as_ref().clone()) - } - }) - .collect(), - TargetExpr::Label(l) => vec![ConfiguredProvidersLabel::default_for(l.as_ref().clone())], - TargetExpr::Node(n) => vec![ConfiguredProvidersLabel::default_for(n.label().dupe())], - TargetExpr::TargetSet(t) => t - .iter() - .map(|n| ConfiguredProvidersLabel::default_for(n.label().dupe())) - .collect(), - } - } - - pub(crate) async fn unpack_opt<'c>( - value: Value<'v>, - target_platform: &Option, - ctx: &BxlContextNoDice<'v>, - dice: &mut DiceComputations, - eval: &Evaluator<'v, 'c>, - allow_unconfigured: bool, - ) -> anyhow::Result>> { - Ok( - if let Some(resolved) = - Self::unpack_literal(value, target_platform, ctx, dice, allow_unconfigured).await? - { - Some(resolved) - } else { - Self::unpack_iterable(value, target_platform, ctx, dice, eval, allow_unconfigured) - .await? - }, - ) - } - - pub(crate) async fn unpack<'c>( - value: Value<'v>, - target_platform: &Option, - ctx: &BxlContextNoDice<'v>, - dice: &mut DiceComputations, - eval: &Evaluator<'v, 'c>, - ) -> anyhow::Result> { - Ok( - if let Some(resolved) = - Self::unpack_opt(value, target_platform, ctx, dice, eval, false).await? - { - resolved - } else { - return Err(anyhow::anyhow!(TargetExprError::NotAListOfTargets( - value.to_repr() - ))); - }, - ) - } - - pub(crate) async fn unpack_allow_unconfigured<'c>( - value: Value<'v>, - target_platform: &Option, - ctx: &BxlContextNoDice<'v>, - dice: &mut DiceComputations, - eval: &Evaluator<'v, 'c>, - ) -> anyhow::Result> { - Ok( - if let Some(resolved) = - Self::unpack_opt(value, target_platform, ctx, dice, eval, true).await? - { - resolved - } else { - return Err(anyhow::anyhow!(TargetExprError::NotAListOfTargets( - value.to_repr() - ))); - }, - ) - } - - async fn unpack_literal( - value: Value<'v>, - target_platform: &Option, - ctx: &BxlContextNoDice<'_>, - dice: &mut DiceComputations, - allow_unconfigured: bool, - ) -> anyhow::Result>> { - if let Some(configured_target) = value.downcast_ref::() { - Ok(Some(Self::Node(configured_target.0.dupe()))) - } else if let Some(configured_target) = - value.downcast_ref::() - { - Ok(Some(Self::Label(Cow::Borrowed(configured_target.label())))) - } else { - // Handle the unconfigured case - let mut unconfigured_label = None; - let result = if let Some(s) = value.unpack_str() { - unconfigured_label = Some(s.to_owned()); - - match ParsedPattern::::parse_relaxed( - &ctx.target_alias_resolver, - // TODO(nga): Parse relaxed relative to cell root is incorrect. - CellPathRef::new(ctx.cell_name, CellRelativePath::empty()), - s, - &ctx.cell_resolver, - )? { - ParsedPattern::Target(pkg, name, TargetPatternExtra) => { - Ok(Some(Self::Label(Cow::Owned( - dice.get_configured_target( - &TargetLabel::new(pkg, name.as_ref()), - target_platform.as_ref(), - ) - .await?, - )))) - } - pattern => { - let loaded_patterns = - load_patterns(dice, vec![pattern], MissingTargetBehavior::Fail).await?; - - let maybe_compatible = get_maybe_compatible_targets( - dice, - loaded_patterns.iter_loaded_targets_by_package(), - target_platform.as_ref(), - ) - .await? - .collect::, _>>()?; - - let result = filter_incompatible(maybe_compatible.into_iter(), ctx)?; - Ok(Some(Self::TargetSet(Cow::Owned(result)))) - } - } - } else { - match unpack_target_label(value) { - None => Ok(None), - Some(label) => { - unconfigured_label = Some(label.to_string()); - Ok(Some(Self::Label(Cow::Owned( - dice.get_configured_target(label, target_platform.as_ref()) - .await?, - )))) - } - } - }; - - if !allow_unconfigured { - if let Some(unconfigured_label) = unconfigured_label { - if target_platform.is_none() { - soft_error!( - "bxl_unconfigured_target_in_cquery", - TargetExprError::UnconfiguredTargetInCquery(unconfigured_label).into() - )?; - } - } - } - - result - } - } - - async fn unpack_iterable<'c>( - value: Value<'v>, - target_platform: &Option, - ctx: &BxlContextNoDice<'_>, - dice: &mut DiceComputations, - eval: &Evaluator<'v, 'c>, - allow_unconfigured: bool, - ) -> anyhow::Result>> { - if let Some(s) = value.downcast_ref::>() { - return Ok(Some(Self::TargetSet(Cow::Borrowed(s)))); - } - - #[allow(clippy::manual_map)] // `if else if` looks better here - let items = if let Some(s) = value.downcast_ref::>() { - Some(Either::Left(s.iter(eval.heap()))) - } else if let Some(iterable) = ListRef::from_value(value) { - Some(Either::Right(iterable.iter())) - } else { - None - } - .ok_or_else(|| TargetExprError::NotAListOfTargets(value.to_repr()))?; - - let mut resolved = vec![]; - - for item in items { - let unpacked = - Self::unpack_literal(item, target_platform, ctx, dice, allow_unconfigured).await?; - - match unpacked { - Some(TargetExpr::Node(node)) => resolved.push(Either::Left(node)), - Some(TargetExpr::Label(label)) => resolved.push(Either::Right(label)), - Some(TargetExpr::TargetSet(set)) => match set { - Cow::Borrowed(s) => itertools::Either::Left(s.iter().duped()), - Cow::Owned(s) => itertools::Either::Right(s.into_iter()), - } - .for_each(|t| resolved.push(Either::Left(t))), - _ => { - return Err(anyhow::anyhow!(TargetExprError::NotATarget(item.to_repr())) - .context(format!( - "Error resolving list `{}`", - truncate(&value.to_repr(), 150) - ))); - } - } - } - - Ok(Some(Self::Iterable(resolved))) - } -} - -impl<'v> TargetExpr<'v, TargetNode> { - pub(crate) async fn unpack<'c>( - value: Value<'v>, - ctx: &BxlContextNoDice<'_>, - dice: &mut DiceComputations, - eval: &Evaluator<'v, 'c>, - ) -> anyhow::Result> { - Ok( - if let Some(resolved) = Self::unpack_literal(value, ctx, dice).await? { - resolved - } else if let Some(resolved) = Self::unpack_iterable(value, ctx, dice, eval).await? { - resolved - } else { - return Err(anyhow::anyhow!(TargetExprError::NotAListOfTargets( - value.to_repr() - ))); - }, - ) - } - - async fn unpack_literal( - value: Value<'v>, - ctx: &BxlContextNoDice<'_>, - dice: &mut DiceComputations, - ) -> anyhow::Result>> { - if let Some(target) = value.downcast_ref::() { - Ok(Some(Self::Node(target.0.dupe()))) - } else if let Some(label) = value.downcast_ref::() { - Ok(Some(Self::Label(Cow::Borrowed(label.label())))) - } else if let Some(s) = value.unpack_str() { - match ParsedPattern::::parse_relaxed( - &ctx.target_alias_resolver, - // TODO(nga): Parse relaxed relative to cell root is incorrect. - CellPathRef::new(ctx.cell_name, CellRelativePath::empty()), - s, - &ctx.cell_resolver, - )? { - ParsedPattern::Target(pkg, name, TargetPatternExtra) => Ok(Some(Self::Label( - Cow::Owned(TargetLabel::new(pkg, name.as_ref())), - ))), - pattern => { - let loaded_patterns = - load_patterns(dice, vec![pattern], MissingTargetBehavior::Fail).await?; - let mut target_set = TargetSet::new(); - for (_package, results) in loaded_patterns.into_iter() { - target_set.extend(results?.into_values()); - } - Ok(Some(Self::TargetSet(Cow::Owned(target_set)))) - } - } - } else { - match unpack_target_label(value) { - None => Ok(None), - Some(label) => Ok(Some(Self::Label(Cow::Borrowed(label)))), - } - } - } - - async fn unpack_iterable<'c>( - value: Value<'v>, - ctx: &BxlContextNoDice<'_>, - dice: &mut DiceComputations, - eval: &Evaluator<'v, 'c>, - ) -> anyhow::Result>> { - if let Some(s) = value.downcast_ref::>() { - return Ok(Some(Self::TargetSet(Cow::Borrowed(s)))); - } - - #[allow(clippy::manual_map)] // `if else if` looks better here - let items = if let Some(s) = value.downcast_ref::>() { - Some(Either::Left(s.iter(eval.heap()))) - } else if let Some(iterable) = ListRef::from_value(value) { - Some(Either::Right(iterable.iter())) - } else { - None - } - .ok_or_else(|| TargetExprError::NotAListOfTargets(value.to_repr()))?; - - let mut resolved = vec![]; - - for item in items { - let unpacked = Self::unpack_literal(item, ctx, dice).await?; - - match unpacked { - Some(TargetExpr::Node(node)) => resolved.push(Either::Left(node)), - Some(TargetExpr::Label(label)) => resolved.push(Either::Right(label)), - Some(TargetExpr::TargetSet(set)) => match set { - Cow::Borrowed(s) => itertools::Either::Left(s.iter().duped()), - Cow::Owned(s) => itertools::Either::Right(s.into_iter()), - } - .for_each(|t| resolved.push(Either::Left(t))), - _ => { - return Err(anyhow::anyhow!(TargetExprError::NotATarget(item.to_repr())) - .context(format!( - "Error resolving list `{}`", - truncate(&value.to_repr(), 150) - ))); - } - } + TargetExpr::Node(node) => Ok(node.dupe()), + TargetExpr::Label(label) => dice.get_target_node(label).await, } - Ok(Some(Self::Iterable(resolved))) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/target_list_expr.rs b/app/buck2_bxl/src/bxl/starlark_defs/target_list_expr.rs new file mode 100644 index 0000000000000..78ac813c70320 --- /dev/null +++ b/app/buck2_bxl/src/bxl/starlark_defs/target_list_expr.rs @@ -0,0 +1,815 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; +use std::iter; + +use allocative::Allocative; +use anyhow::Context; +use buck2_build_api::configure_targets::get_maybe_compatible_targets; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_core::cells::cell_path::CellPathRef; +use buck2_core::cells::paths::CellRelativePath; +use buck2_core::configuration::compatibility::IncompatiblePlatformReason; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_core::pattern::pattern::ParsedPattern; +use buck2_core::pattern::pattern_type::TargetPatternExtra; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::soft_error; +use buck2_core::target::label::label::TargetLabel; +use buck2_interpreter::types::target_label::StarlarkConfiguredTargetLabel; +use buck2_interpreter::types::target_label::StarlarkTargetLabel; +use buck2_node::load_patterns::load_patterns; +use buck2_node::load_patterns::MissingTargetBehavior; +use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; +use buck2_node::nodes::frontend::TargetGraphCalculation; +use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::target_calculation::ConfiguredTargetCalculation; +use buck2_query::query::environment::QueryTarget; +use buck2_query::query::syntax::simple::eval::set::TargetSet; +use buck2_util::truncate::truncate; +use dice::DiceComputations; +use dupe::Dupe; +use dupe::IterDupedExt; +use either::Either; +use futures::FutureExt; +use starlark::collections::SmallSet; +use starlark::values::list::UnpackList; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::Heap; +use starlark::values::UnpackValue; +use starlark::values::Value; +use starlark::values::ValueOf; + +use crate::bxl::starlark_defs::context::BxlContextCoreData; +use crate::bxl::starlark_defs::context::BxlContextNoDice; +use crate::bxl::starlark_defs::context::ErrorPrinter; +use crate::bxl::starlark_defs::eval_extra::BxlEvalExtra; +use crate::bxl::starlark_defs::nodes::configured::StarlarkConfiguredTargetNode; +use crate::bxl::starlark_defs::nodes::unconfigured::StarlarkTargetNode; +use crate::bxl::starlark_defs::target_expr::TargetExpr; +use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; + +/// TargetExpr is just a simple type that can be used in starlark_module +/// functions for arguments that should be target sets. It will accept a +/// literal (like `//some:target`) or list of literals or a TargetSet Value (from one of the +/// BXL functions that return them). +pub(crate) enum TargetListExpr<'v, Node: QueryTarget> { + One(TargetExpr<'v, Node>), + Iterable(Vec>), + TargetSet(Cow<'v, TargetSet>), +} + +// Filters out incompatible targets and emits the error message +pub(crate) fn filter_incompatible( + targets: impl IntoIterator>, + error_printer: &T, +) -> anyhow::Result> { + let mut target_set = TargetSet::new(); + let mut incompatible_targets = SmallSet::new(); + + for res in targets { + match res { + MaybeCompatible::Incompatible(reason) => { + incompatible_targets.insert(reason.target.dupe()); + } + MaybeCompatible::Compatible(target) => { + target_set.insert(target); + } + } + } + + if !incompatible_targets.is_empty() { + error_printer.print_to_error_stream( + IncompatiblePlatformReason::skipping_message_for_multiple(incompatible_targets.iter()), + )?; + } + + Ok(target_set) +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum TargetNodeOrTargetLabel<'v> { + TargetNode(&'v StarlarkTargetNode), + TargetLabel(&'v StarlarkTargetLabel), +} + +impl<'v> TargetNodeOrTargetLabel<'v> { + fn label(&self) -> &'v TargetLabel { + match self { + TargetNodeOrTargetLabel::TargetNode(node) => node.0.label(), + TargetNodeOrTargetLabel::TargetLabel(label) => label.label(), + } + } +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum TargetNodeOrTargetLabelOrStr<'v> { + TargetNode(&'v StarlarkTargetNode), + TargetLabel(&'v StarlarkTargetLabel), + Str(&'v str), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ConfiguredTargetNodeArg<'v> { + ConfiguredTargetNode(&'v StarlarkConfiguredTargetNode), + ConfiguredTargetLabel(&'v StarlarkConfiguredTargetLabel), + Str(&'v str), + Unconfigured(TargetNodeOrTargetLabel<'v>), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum TargetSetOrTargetList<'v> { + TargetSet(&'v StarlarkTargetSet), + TargetList(UnpackList>>), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ConfiguredTargetListArg<'v> { + ConfiguredTargetSet(&'v StarlarkTargetSet), + TargetSet(&'v StarlarkTargetSet), + TargetList(UnpackList>), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum TargetListExprArg<'v> { + Target(TargetNodeOrTargetLabelOrStr<'v>), + List(TargetSetOrTargetList<'v>), +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ConfiguredTargetListExprArg<'v> { + Target(ConfiguredTargetNodeArg<'v>), + List(ValueOf<'v, ConfiguredTargetListArg<'v>>), +} + +impl<'v> TargetListExpr<'v, TargetNode> { + pub(crate) fn iter(&self) -> Box> + '_> { + match &self { + Self::One(one) => Box::new(iter::once(one.clone())), + Self::Iterable(iterable) => Box::new(iterable.iter().cloned()), + Self::TargetSet(target_set) => { + Box::new(target_set.iter().map(|s| TargetExpr::Node(s.clone()))) + } + } + } + + /// Get a `TargetSet` from the `TargetExpr` + pub(crate) async fn get( + self, + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result>> { + let set = ctx + .try_compute_join(self.iter(), |ctx, node_or_ref| { + async move { node_or_ref.get_from_dice(ctx).await }.boxed() + }) + .await? + .into_iter() + .collect(); + Ok(Cow::Owned(set)) + } + + /// Get a single `TargetNode` + pub(crate) async fn get_one( + &self, + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + Ok(match &self { + Self::One(node_or_ref) => Some(node_or_ref.get_from_dice(ctx).await?), + _ => None, + }) + } +} + +#[derive(Debug, buck2_error::Error)] +pub(crate) enum TargetExprError { + #[error( + "Expected a single target like item, but was `{0}`. If you have passed in a `label`, make sure to call `configured_target()` to get the underlying configured target label." + )] + NotATarget(String), + #[error( + "Unconfigured target with label `{0}` was passed into cquery. Targets passed into cquery should be configured (recommendation is to use `ctx.target_universe()`)." + )] + UnconfiguredTargetInCquery(String), + #[error( + "`keep_going` is currently only implemented for a single target pattern as a string literal." + )] + KeepGoingOnlyForStringLiteral, +} + +impl<'v> TargetListExpr<'v, ConfiguredTargetNode> { + fn iter(&self) -> Box> + '_> { + match &self { + Self::One(one) => Box::new(iter::once(one.clone())), + Self::Iterable(iterable) => Box::new(iterable.iter().cloned()), + Self::TargetSet(target_set) => { + Box::new(target_set.iter().map(|s| TargetExpr::Node(s.clone()))) + } + } + } + + /// Get a vector of maybe compatible `ConfiguredTargetNode`s from the `TargetExpr`. + /// Any callers of this function will need to call `filter_incompatible()` on the result + /// in order to get the `TargetSet`. + pub(crate) async fn get( + self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result>> { + dice.compute_join(self.iter(), |ctx, node_or_ref| { + async move { ctx.get_configured_target_node(node_or_ref.node_ref()).await }.boxed() + }) + .await + .into_iter() + .collect() + } + + /// Get a single maybe compatible `ConfiguredTargetNode`. + pub(crate) async fn get_one( + &self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result>> { + Ok(match &self { + Self::One(node_or_ref) => Some( + dice.get_configured_target_node(node_or_ref.node_ref()) + .await?, + ), + _ => None, + }) + } + + pub(crate) fn as_provider_labels(&self) -> Vec { + self.iter() + .map(|e| ConfiguredProvidersLabel::default_for(e.node_ref().dupe())) + .collect() + } + + pub(crate) async fn unpack_opt<'c>( + arg: ConfiguredTargetListExprArg<'v>, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextNoDice<'v>, + dice: &mut DiceComputations<'_>, + allow_unconfigured: bool, + ) -> anyhow::Result> { + match arg { + ConfiguredTargetListExprArg::Target(arg) => { + Ok( + Self::unpack_literal(arg, global_cfg_options, ctx, dice, allow_unconfigured) + .await?, + ) + } + ConfiguredTargetListExprArg::List(arg) => { + Ok( + Self::unpack_iterable(arg, global_cfg_options, ctx, dice, allow_unconfigured) + .await?, + ) + } + } + } + + pub(crate) async fn unpack<'c>( + // TODO(nga): this does not accept unconfigured targets, so should be narrower type here. + arg: ConfiguredTargetListExprArg<'v>, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextNoDice<'v>, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + Self::unpack_opt(arg, global_cfg_options, ctx, dice, false).await + } + + pub(crate) async fn unpack_allow_unconfigured<'c>( + arg: ConfiguredTargetListExprArg<'v>, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextNoDice<'v>, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + Self::unpack_opt(arg, global_cfg_options, ctx, dice, true).await + } + + fn check_allow_unconfigured( + allow_unconfigured: bool, + unconfigured_label: &str, + global_cfg_options: &GlobalCfgOptions, + ) -> anyhow::Result<()> { + if !allow_unconfigured { + if global_cfg_options.target_platform.is_none() { + soft_error!( + "bxl_unconfigured_target_in_cquery", + TargetExprError::UnconfiguredTargetInCquery(unconfigured_label.to_owned()) + .into() + )?; + } + } + Ok(()) + } + + async fn unpack_literal( + arg: ConfiguredTargetNodeArg<'v>, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextNoDice<'_>, + dice: &mut DiceComputations<'_>, + allow_unconfigured: bool, + ) -> anyhow::Result> { + match arg { + ConfiguredTargetNodeArg::ConfiguredTargetNode(configured_target) => { + Ok(Self::One(TargetExpr::Node(configured_target.0.dupe()))) + } + ConfiguredTargetNodeArg::ConfiguredTargetLabel(configured_target) => Ok( + TargetListExpr::One(TargetExpr::Label(Cow::Borrowed(configured_target.label()))), + ), + ConfiguredTargetNodeArg::Str(s) => { + Self::check_allow_unconfigured(allow_unconfigured, s, global_cfg_options)?; + + Self::unpack_string_literal(s, global_cfg_options, ctx, dice, false).await + } + ConfiguredTargetNodeArg::Unconfigured(label) => { + Self::check_allow_unconfigured( + allow_unconfigured, + &label.label().to_string(), + global_cfg_options, + )?; + Ok(TargetListExpr::One(TargetExpr::Label(Cow::Owned( + dice.get_configured_target(label.label(), global_cfg_options) + .await?, + )))) + } + } + } + + // Ideally we refactor the entire unpacking logic for configured targets to make this easier, + // but let's support keep_going for string literals for now. + pub(crate) async fn unpack_keep_going<'c>( + arg: ConfiguredTargetListExprArg<'v>, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextNoDice<'v>, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + match arg { + ConfiguredTargetListExprArg::Target(ConfiguredTargetNodeArg::Str(val)) => { + Self::unpack_string_literal(val, global_cfg_options, ctx, dice, true).await + } + _ => Err(TargetExprError::KeepGoingOnlyForStringLiteral.into()), + } + } + + // Unpack functionality for a string literal, with keep_going support + async fn unpack_string_literal( + val: &str, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextNoDice<'_>, + dice: &mut DiceComputations<'_>, + keep_going: bool, + ) -> anyhow::Result> { + match ParsedPattern::::parse_relaxed( + ctx.target_alias_resolver(), + // TODO(nga): Parse relaxed relative to cell root is incorrect. + CellPathRef::new(ctx.cell_name(), CellRelativePath::empty()), + val, + ctx.cell_resolver(), + ctx.cell_alias_resolver(), + )? { + ParsedPattern::Target(pkg, name, TargetPatternExtra) => { + let result = match dice + .get_configured_target( + &TargetLabel::new(pkg, name.as_ref()), + global_cfg_options, + ) + .await + { + Ok(label) => { + // check if we can get a maybe compatible configured target node successfully here to make + // sure keep_going works. We will try to get the node later, but due to how complex this + // code is, it's much easier to just call it once here as a sanity check. + match dice.get_configured_target_node(&label).await { + Ok(_) => Ok(TargetListExpr::One(TargetExpr::Label(Cow::Owned(label)))), + Err(e) => Err(e), + } + } + Err(e) => Err(e), + }; + + result.or_else(|e| { + if keep_going { + Ok(TargetListExpr::Iterable(Vec::new())) + } else { + Err(e) + } + }) + } + pattern => { + let loaded_patterns = + load_patterns(dice, vec![pattern], MissingTargetBehavior::Fail).await?; + + let maybe_compatible = get_maybe_compatible_targets( + dice, + loaded_patterns.iter_loaded_targets_by_package(), + global_cfg_options, + keep_going, + ) + .await?; + + let maybe_compatible: Vec<_> = if keep_going { + maybe_compatible.filter_map(|r| r.ok()).collect() + } else { + maybe_compatible.collect::>()? + }; + + let result = filter_incompatible(maybe_compatible, ctx)?; + Ok(TargetListExpr::TargetSet(Cow::Owned(result))) + } + } + } + + async fn unpack_iterable<'c>( + value: ValueOf<'v, ConfiguredTargetListArg<'v>>, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextNoDice<'_>, + dice: &mut DiceComputations<'_>, + allow_unconfigured: bool, + ) -> anyhow::Result> { + match value.typed { + ConfiguredTargetListArg::ConfiguredTargetSet(s) => { + return Ok(Self::TargetSet(Cow::Borrowed(s))); + } + ConfiguredTargetListArg::TargetSet(s) => { + return Ok(TargetListExpr::Iterable( + dice.try_compute_join(s.0.iter(), |dice, node| { + async move { + Self::check_allow_unconfigured( + allow_unconfigured, + &node.label().to_string(), + global_cfg_options, + )?; + + anyhow::Ok(TargetExpr::Label(Cow::Owned( + dice.get_configured_target(node.label(), global_cfg_options) + .await?, + ))) + } + .boxed() + }) + .await?, + )); + } + ConfiguredTargetListArg::TargetList(unpack) => { + let mut resolved = vec![]; + + // FIXME(JakobDegen): These iterations don't run in parallel, which is pretty sad. + // Unfortunately, that's also not easy to fix because for some reason this code + // prints to console + for item in unpack.items { + let unpacked = Self::unpack_literal( + item, + global_cfg_options, + ctx, + dice, + allow_unconfigured, + ) + .await?; + + match unpacked { + TargetListExpr::One(node) => resolved.push(node), + TargetListExpr::TargetSet(set) => match set { + Cow::Borrowed(s) => itertools::Either::Left(s.iter().duped()), + Cow::Owned(s) => itertools::Either::Right(s.into_iter()), + } + .for_each(|t| resolved.push(TargetExpr::Node(t))), + _ => { + return Err(anyhow::anyhow!(TargetExprError::NotATarget( + value.value.to_repr() + )) + .context(format!( + "Error resolving list `{}`", + truncate(&value.value.to_repr(), 150) + ))); + } + } + } + + Ok(Self::Iterable(resolved)) + } + } + } +} + +impl<'v> TargetListExpr<'v, TargetNode> { + pub(crate) async fn unpack<'c>( + value: TargetListExprArg<'v>, + ctx: &BxlContextNoDice<'_>, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + match value { + TargetListExprArg::Target(x) => Self::unpack_literal(x, ctx, dice).await, + TargetListExprArg::List(x) => Self::unpack_iterable(x, ctx, dice).await, + } + } + + async fn unpack_literal( + value: TargetNodeOrTargetLabelOrStr<'v>, + ctx: &BxlContextNoDice<'_>, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + match value { + TargetNodeOrTargetLabelOrStr::TargetNode(target) => { + Ok(TargetListExpr::One(TargetExpr::Node(target.0.dupe()))) + } + TargetNodeOrTargetLabelOrStr::TargetLabel(target) => Ok(TargetListExpr::One( + TargetExpr::Label(Cow::Borrowed(target.label())), + )), + TargetNodeOrTargetLabelOrStr::Str(s) => { + match ParsedPattern::::parse_relaxed( + ctx.target_alias_resolver(), + // TODO(nga): Parse relaxed relative to cell root is incorrect. + CellPathRef::new(ctx.cell_name(), CellRelativePath::empty()), + s, + ctx.cell_resolver(), + ctx.cell_alias_resolver(), + )? { + ParsedPattern::Target(pkg, name, TargetPatternExtra) => { + Ok(TargetListExpr::One(TargetExpr::Label(Cow::Owned( + TargetLabel::new(pkg, name.as_ref()), + )))) + } + pattern => { + let loaded_patterns = + load_patterns(dice, vec![pattern], MissingTargetBehavior::Fail).await?; + let mut target_set = TargetSet::new(); + for (_package, results) in loaded_patterns.into_iter() { + target_set.extend(results?.into_values()); + } + Ok(Self::TargetSet(Cow::Owned(target_set))) + } + } + } + } + } + + async fn unpack_iterable<'c>( + value: TargetSetOrTargetList<'v>, + ctx: &BxlContextNoDice<'_>, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + match value { + TargetSetOrTargetList::TargetSet(s) => Ok(Self::TargetSet(Cow::Borrowed(s))), + TargetSetOrTargetList::TargetList(items) => { + let mut resolved = vec![]; + + for item in items.items { + let unpacked = Self::unpack_literal(item.typed, ctx, dice).await?; + + match unpacked { + TargetListExpr::One(node) => resolved.push(node), + TargetListExpr::TargetSet(set) => match set { + Cow::Borrowed(s) => itertools::Either::Left(s.iter().duped()), + Cow::Owned(s) => itertools::Either::Right(s.into_iter()), + } + .for_each(|t| resolved.push(TargetExpr::Node(t))), + TargetListExpr::Iterable(_) => { + return Err(TargetExprError::NotATarget(item.value.to_repr())) + .context("list in a list"); + } + } + } + Ok(Self::Iterable(resolved)) + } + } + } +} + +#[derive(Debug, Allocative)] +pub(crate) enum SingleOrCompatibleConfiguredTargets { + Single(ConfiguredTargetNode), + Compatibles(Vec>), +} + +impl SingleOrCompatibleConfiguredTargets { + pub(crate) fn into_value<'v>( + self, + heap: &'v Heap, + bxl_eval_extra: &BxlEvalExtra, + ) -> anyhow::Result> { + match self { + SingleOrCompatibleConfiguredTargets::Single(node) => { + Ok(heap.alloc(StarlarkConfiguredTargetNode(node))) + } + SingleOrCompatibleConfiguredTargets::Compatibles(compatibles) => { + let target_set = filter_incompatible(compatibles, bxl_eval_extra)?; + Ok(heap.alloc(StarlarkTargetSet(target_set))) + } + } + } +} + +async fn unpack_string_literal<'v>( + val: &str, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextCoreData, + dice: &mut DiceComputations<'_>, +) -> anyhow::Result { + match ParsedPattern::::parse_relaxed( + ctx.target_alias_resolver(), + // TODO(nga): Parse relaxed relative to cell root is incorrect. + CellPathRef::new(ctx.cell_name(), CellRelativePath::empty()), + val, + ctx.cell_resolver(), + ctx.cell_alias_resolver(), + )? { + ParsedPattern::Target(pkg, name, TargetPatternExtra) => { + let label = dice + .get_configured_target(&TargetLabel::new(pkg, name.as_ref()), global_cfg_options) + .await?; + let compatible_node = dice.get_configured_target_node(&label).await?; + compatible_node + .require_compatible() + .map(SingleOrCompatibleConfiguredTargets::Single) + } + pattern => { + let loaded_patterns = + load_patterns(dice, vec![pattern], MissingTargetBehavior::Fail).await?; + + let maybe_compatible = get_maybe_compatible_targets( + dice, + loaded_patterns.iter_loaded_targets_by_package(), + global_cfg_options, + true, + ) + .await?; + + let maybe_compatible = maybe_compatible.collect::>()?; + Ok(SingleOrCompatibleConfiguredTargets::Compatibles( + maybe_compatible, + )) + } + } +} + +#[derive(Debug, Clone, Allocative)] +pub(crate) enum OwnedTargetNodeOrTargetLabel { + TargetNode(StarlarkTargetNode), + TargetLabel(StarlarkTargetLabel), +} + +impl OwnedTargetNodeOrTargetLabel { + pub(crate) fn from_ref(reference: TargetNodeOrTargetLabel<'_>) -> Self { + match reference { + TargetNodeOrTargetLabel::TargetNode(node) => { + OwnedTargetNodeOrTargetLabel::TargetNode(node.dupe()) + } + TargetNodeOrTargetLabel::TargetLabel(label) => { + OwnedTargetNodeOrTargetLabel::TargetLabel(label.dupe()) + } + } + } + + fn label(&self) -> &TargetLabel { + match self { + OwnedTargetNodeOrTargetLabel::TargetNode(node) => node.0.label(), + OwnedTargetNodeOrTargetLabel::TargetLabel(label) => label.label(), + } + } + + pub(crate) async fn to_configured_target_node( + &self, + global_cfg_options: &GlobalCfgOptions, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result { + let configured_label = dice + .get_configured_target(self.label(), global_cfg_options) + .await?; + dice.get_configured_target_node(&configured_label) + .await? + .require_compatible() + } + + pub(crate) async fn to_unconfigured_target_node( + &self, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result { + match self { + OwnedTargetNodeOrTargetLabel::TargetNode(node) => Ok(node.0.dupe()), + OwnedTargetNodeOrTargetLabel::TargetLabel(label) => dice + .get_target_node(label.label()) + .await + .map(|node| node.dupe()), + } + } +} + +#[derive(Debug, Clone, Allocative)] +pub(crate) enum OwnedConfiguredTargetNodeArg { + ConfiguredTargetNode(StarlarkConfiguredTargetNode), + ConfiguredTargetLabel(StarlarkConfiguredTargetLabel), + String(String), + Unconfigured(OwnedTargetNodeOrTargetLabel), +} + +impl OwnedConfiguredTargetNodeArg { + pub(crate) fn from_ref(reference: ConfiguredTargetNodeArg<'_>) -> Self { + match reference { + ConfiguredTargetNodeArg::ConfiguredTargetNode(node) => { + OwnedConfiguredTargetNodeArg::ConfiguredTargetNode(node.dupe()) + } + ConfiguredTargetNodeArg::ConfiguredTargetLabel(label) => { + OwnedConfiguredTargetNodeArg::ConfiguredTargetLabel(label.dupe()) + } + ConfiguredTargetNodeArg::Str(str) => { + OwnedConfiguredTargetNodeArg::String(str.to_owned()) + } + ConfiguredTargetNodeArg::Unconfigured(unconfigured) => { + OwnedConfiguredTargetNodeArg::Unconfigured(OwnedTargetNodeOrTargetLabel::from_ref( + unconfigured, + )) + } + } + } + + pub(crate) async fn to_configured_target_node( + &self, + global_cfg_options: &GlobalCfgOptions, + ctx: &BxlContextCoreData, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result { + match self { + OwnedConfiguredTargetNodeArg::ConfiguredTargetNode(node) => { + Ok(SingleOrCompatibleConfiguredTargets::Single(node.0.dupe())) + } + OwnedConfiguredTargetNodeArg::ConfiguredTargetLabel(label) => { + let compatible = dice.get_configured_target_node(label.label()).await?; + compatible + .require_compatible() + .map(SingleOrCompatibleConfiguredTargets::Single) + } + OwnedConfiguredTargetNodeArg::String(str) => { + unpack_string_literal(str, global_cfg_options, ctx, dice).await + } + OwnedConfiguredTargetNodeArg::Unconfigured(unconfigured) => unconfigured + .to_configured_target_node(global_cfg_options, dice) + .await + .map(SingleOrCompatibleConfiguredTargets::Single), + } + } +} + +#[derive(Debug, Clone, Allocative)] +pub(crate) enum OwnedTargetNodeArg { + Unconfigured(OwnedTargetNodeOrTargetLabel), + String(String), +} + +impl OwnedTargetNodeArg { + pub(crate) fn from_ref(expr: &TargetNodeOrTargetLabelOrStr<'_>) -> Self { + match *expr { + TargetNodeOrTargetLabelOrStr::TargetNode(node) => OwnedTargetNodeArg::Unconfigured( + OwnedTargetNodeOrTargetLabel::TargetNode(node.dupe()), + ), + TargetNodeOrTargetLabelOrStr::TargetLabel(label) => OwnedTargetNodeArg::Unconfigured( + OwnedTargetNodeOrTargetLabel::TargetLabel(label.dupe()), + ), + TargetNodeOrTargetLabelOrStr::Str(str) => OwnedTargetNodeArg::String(str.to_owned()), + } + } + + pub(crate) async fn to_unconfigured_target_node( + &self, + ctx: &BxlContextCoreData, + dice: &mut DiceComputations<'_>, + ) -> anyhow::Result>> { + match self { + OwnedTargetNodeArg::Unconfigured(unconfigured) => unconfigured + .to_unconfigured_target_node(dice) + .await + .map(|node| Either::Left(StarlarkTargetNode(node))), + OwnedTargetNodeArg::String(str) => { + match ParsedPattern::::parse_relaxed( + ctx.target_alias_resolver(), + CellPathRef::new(ctx.cell_name(), CellRelativePath::empty()), + &str, + ctx.cell_resolver(), + ctx.cell_alias_resolver(), + )? { + ParsedPattern::Target(pkg, name, TargetPatternExtra) => { + let label = TargetLabel::new(pkg, name.as_ref()); + dice.get_target_node(&label) + .await + .map(|node| Either::Left(StarlarkTargetNode(node))) + } + pattern => { + let loaded_patterns = + load_patterns(dice, vec![pattern], MissingTargetBehavior::Fail).await?; + let mut target_set = TargetSet::new(); + for (_package, results) in loaded_patterns.into_iter() { + target_set.extend(results?.into_values()); + } + Ok(Either::Right(StarlarkTargetSet(target_set))) + } + } + } + } + } +} diff --git a/app/buck2_bxl/src/bxl/starlark_defs/target_universe.rs b/app/buck2_bxl/src/bxl/starlark_defs/target_universe.rs index 496999465805a..0bdd34e66f98d 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/target_universe.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/target_universe.rs @@ -29,10 +29,11 @@ use starlark::values::NoSerialize; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; -use starlark::StarlarkDocs; +use starlark::values::ValueTyped; use crate::bxl::starlark_defs::context::BxlContext; -use crate::bxl::starlark_defs::target_expr::TargetExpr; +use crate::bxl::starlark_defs::target_list_expr::TargetListExpr; +use crate::bxl::starlark_defs::target_list_expr::TargetListExprArg; use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; #[derive( @@ -41,12 +42,10 @@ use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; Display, Trace, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "", "self.target_universe.len()")] +#[display("", self.target_universe.len())] pub(crate) struct StarlarkTargetUniverse<'v> { // Cquery universe for performing target_universe: CqueryUniverse, @@ -54,13 +53,12 @@ pub(crate) struct StarlarkTargetUniverse<'v> { target_set: TargetSet, // Trace/Allocative are implemented for BxlContext, but we take a reference here. // This is used in unpacking target expressions for lookups. - #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] #[allocative(skip)] - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, } -#[starlark_value(type = "target_universe", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.TargetUniverse", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for StarlarkTargetUniverse<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -76,10 +74,10 @@ impl<'v> AllocValue<'v> for StarlarkTargetUniverse<'v> { impl<'v> StarlarkTargetUniverse<'v> { pub(crate) async fn new( - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, target_set: TargetSet, - ) -> anyhow::Result> { - let target_universe = CqueryUniverse::build(&target_set).await?; + ) -> anyhow::Result { + let target_universe = CqueryUniverse::build(&target_set)?; let target_set = target_universe .get_from_targets(target_set.iter().map(|i| i.label().unconfigured().dupe())); Ok(StarlarkTargetUniverse { @@ -93,25 +91,37 @@ impl<'v> StarlarkTargetUniverse<'v> { /// Target universe in BXL. Used for looking up valid configured targets to use in cquery. This is not needed for uquery. #[starlark_module] fn target_universe_methods(builder: &mut MethodsBuilder) { - // The target set of the target universe. + /// The target set of the nodes used to construct the target universe. fn target_set<'v>( this: &'v StarlarkTargetUniverse<'v>, heap: &'v Heap, - ) -> anyhow::Result> { - Ok(heap.alloc(StarlarkTargetSet::from(this.target_set.clone()))) + ) -> anyhow::Result>> { + Ok(heap.alloc_typed(StarlarkTargetSet::from(this.target_set.clone()))) } - // Looks up valid configured target nodes within the universe. The targets passed in are either string literals, - // unconfigured target nodes, or unconfigured target labels. + /// The target set of the entire target universe. + fn universe_target_set<'v>( + this: &'v StarlarkTargetUniverse<'v>, + ) -> anyhow::Result> { + Ok(StarlarkTargetSet::from( + this.target_universe + .iter() + .map(|node| node.to_owned()) + .collect::>(), + )) + } + + /// Looks up valid configured target nodes within the universe. The targets passed in are either string literals, + /// unconfigured target nodes, or unconfigured target labels. fn lookup<'v>( this: &'v StarlarkTargetUniverse<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + targets: TargetListExprArg<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result>> { + this.ctx.via_dice(|dice, ctx| { dice.via(|dice| { async move { - let inputs = &*TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) + let inputs = &*TargetListExpr::<'v, TargetNode>::unpack(targets, ctx, dice) .await? .get(dice) .await?; @@ -120,7 +130,7 @@ fn target_universe_methods(builder: &mut MethodsBuilder) { .target_universe .get_from_targets(inputs.iter().map(|i| i.label().dupe())); - Ok(eval.heap().alloc(StarlarkTargetSet::from(result))) + Ok(eval.heap().alloc_typed(StarlarkTargetSet::from(result))) } .boxed_local() }) diff --git a/app/buck2_bxl/src/bxl/starlark_defs/targetset.rs b/app/buck2_bxl/src/bxl/starlark_defs/targetset.rs index 677512502f98e..4f3a420b6f930 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/targetset.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/targetset.rs @@ -7,12 +7,12 @@ * of this source tree. */ +use std::convert::Infallible; use std::ops::Deref; use allocative::Allocative; use buck2_query::query::environment::QueryTarget; use buck2_query::query::syntax::simple::eval::set::TargetSet; -use buck2_query::query::syntax::simple::eval::set::TargetSetExt; use derive_more::Display; use dupe::Dupe; use starlark::any::ProvidesStaticType; @@ -63,14 +63,18 @@ impl Freeze for StarlarkTargetSet { } impl<'v, Node: NodeLike> StarlarkTypeRepr for &'v StarlarkTargetSet { + type Canonical = Self; + fn starlark_type_repr() -> Ty { StarlarkTargetSet::::starlark_type_repr() } } impl<'v, Node: NodeLike> UnpackValue<'v> for &'v StarlarkTargetSet { - fn unpack_value(x: Value<'v>) -> Option { - StarlarkTargetSet::from_value(x) + type Error = Infallible; + + fn unpack_value_impl(x: Value<'v>) -> Result, Self::Error> { + Ok(StarlarkTargetSet::from_value(x)) } } @@ -84,31 +88,31 @@ impl<'v, Node: NodeLike> AllocValue<'v> for StarlarkTargetSet { impl<'v, Node: NodeLike> StarlarkValue<'v> for StarlarkTargetSet { type Canonical = Self; - fn iterate_collect(&self, heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, heap: &'v Heap) -> starlark::Result>> { Ok(self.iter(heap).collect()) } - fn at(&self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, heap: &'v Heap) -> starlark::Result> { let i = i32::unpack_value_err(index)?; if let Ok(i) = usize::try_from(i) { if let Some(node) = self.0.get_index(i) { return Ok(node.dupe().alloc(heap)); } } - Err(anyhow::anyhow!(ValueError::IndexOutOfBound(i))) + Err(ValueError::IndexOutOfBound(i).into()) } - fn length(&self) -> anyhow::Result { - Ok(self.0.len().try_into()?) + fn length(&self) -> starlark::Result { + self.0.len().try_into().map_err(starlark::Error::new_other) } - fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { let other = other.downcast_ref::()?; let union = self.0.union(&other.0); Some(Ok(heap.alloc(Self(union)))) } - fn sub(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn sub(&self, other: Value<'v>, heap: &'v Heap) -> starlark::Result> { let Some(other) = other.downcast_ref::() else { return ValueError::unsupported_with(self, "-", other); }; @@ -116,14 +120,14 @@ impl<'v, Node: NodeLike> StarlarkValue<'v> for StarlarkTargetSet { Ok(heap.alloc(Self(difference))) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { match other.downcast_ref::>() { Some(other) => Ok(self.0 == other.0), None => Ok(false), } } - fn bit_and(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn bit_and(&self, other: Value<'v>, heap: &'v Heap) -> starlark::Result> { let Some(other) = other.downcast_ref::() else { return ValueError::unsupported_with(self, "&", other); }; @@ -138,6 +142,13 @@ impl From> for StarlarkTargetSet { } } +impl FromIterator for StarlarkTargetSet { + fn from_iter>(iter: Iter) -> Self { + let targets = TargetSet::from_iter(iter); + Self(targets) + } +} + impl Deref for StarlarkTargetSet { type Target = TargetSet; diff --git a/app/buck2_bxl/src/bxl/starlark_defs/time.rs b/app/buck2_bxl/src/bxl/starlark_defs/time.rs index cafd87e2678c2..72265d55cba26 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/time.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/time.rs @@ -16,13 +16,11 @@ use starlark::environment::MethodsStatic; use starlark::starlark_module; use starlark::starlark_simple_value; use starlark::values::starlark_value; -use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::ProvidesStaticType; use starlark::values::StarlarkValue; use starlark::values::Value; use starlark::values::ValueLike; -use starlark::StarlarkDocs; /// Starlark object for Instant. #[derive( @@ -31,11 +29,9 @@ use starlark::StarlarkDocs; derive_more::Display, ProvidesStaticType, NoSerialize, - StarlarkDocs, Allocative )] -#[starlark_docs(directory = "bxl")] -#[display(fmt = "{:?}", _0)] +#[display("{:?}", _0)] pub(crate) struct StarlarkInstant(pub(crate) Instant); /// Instant methods, to aid in debugging/timing individual pieces of the bxl script. @@ -54,7 +50,7 @@ fn starlark_instant_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(time_a) /// ctx.output.print(time_b) /// ``` - fn elapsed_secs<'v>(this: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn elapsed_secs<'v>(this: Value<'v>) -> anyhow::Result { let secs = this .downcast_ref::() .unwrap() @@ -62,7 +58,7 @@ fn starlark_instant_methods(builder: &mut MethodsBuilder) { .elapsed() .as_secs() as f64; - Ok(heap.alloc(secs)) + Ok(secs) } /// Elapsed time in millis as a float @@ -78,7 +74,7 @@ fn starlark_instant_methods(builder: &mut MethodsBuilder) { /// ctx.output.print(time_a) /// ctx.output.print(time_b) /// ``` - fn elapsed_millis<'v>(this: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn elapsed_millis<'v>(this: Value<'v>) -> anyhow::Result { let millis = this .downcast_ref::() .unwrap() @@ -86,7 +82,7 @@ fn starlark_instant_methods(builder: &mut MethodsBuilder) { .elapsed() .as_millis() as f64; - Ok(heap.alloc(millis)) + Ok(millis) } } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/type_names.rs b/app/buck2_bxl/src/bxl/starlark_defs/type_names.rs index ea6b95fb0ee0a..1169dc5a153fe 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/type_names.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/type_names.rs @@ -13,35 +13,60 @@ use starlark::environment::GlobalsBuilder; use starlark::starlark_module; use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use super::nodes::action::StarlarkActionQueryNode; +use crate::bxl::starlark_defs::analysis_result::StarlarkAnalysisResult; +use crate::bxl::starlark_defs::aquery::StarlarkAQueryCtx; +use crate::bxl::starlark_defs::artifacts::EnsuredArtifact; use crate::bxl::starlark_defs::audit::StarlarkAuditCtx; use crate::bxl::starlark_defs::build_result::StarlarkBxlBuildResult; +use crate::bxl::starlark_defs::cli_args::CliArgs; use crate::bxl::starlark_defs::context::actions::BxlActions; use crate::bxl::starlark_defs::context::fs::BxlFilesystem; +use crate::bxl::starlark_defs::context::output::OutputStream; use crate::bxl::starlark_defs::context::BxlContext; use crate::bxl::starlark_defs::cquery::StarlarkCQueryCtx; +use crate::bxl::starlark_defs::file_set::StarlarkFileNode; +use crate::bxl::starlark_defs::lazy_ctx::operation::StarlarkLazy; +use crate::bxl::starlark_defs::lazy_ctx::StarlarkLazyCtx; +use crate::bxl::starlark_defs::nodes::configured::StarlarkConfiguredTargetNode; +use crate::bxl::starlark_defs::nodes::configured::StarlarkLazyResolvedAttrs; +use crate::bxl::starlark_defs::nodes::unconfigured::StarlarkTargetNode; +use crate::bxl::starlark_defs::result::StarlarkError; +use crate::bxl::starlark_defs::result::StarlarkResult; +use crate::bxl::starlark_defs::target_universe::StarlarkTargetUniverse; use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; use crate::bxl::starlark_defs::uquery::StarlarkUQueryCtx; -#[starlark_module] -pub(crate) fn register_bxl_type_names(globals: &mut GlobalsBuilder) { - // TODO(nga): remove these. - const BxlContext: StarlarkValueAsType = StarlarkValueAsType::new(); - const BxlActions: StarlarkValueAsType = StarlarkValueAsType::new(); - const BxlFilesystem: StarlarkValueAsType = StarlarkValueAsType::new(); - const BxlBuildResult: StarlarkValueAsType = StarlarkValueAsType::new(); -} - #[starlark_module] pub(crate) fn register_bxl_type_names_in_bxl_namespace(globals: &mut GlobalsBuilder) { + const CliArgs: StarlarkValueAsType = StarlarkValueAsType::new(); const Context: StarlarkValueAsType = StarlarkValueAsType::new(); const AuditContext: StarlarkValueAsType = StarlarkValueAsType::new(); + const AqueryContext: StarlarkValueAsType = StarlarkValueAsType::new(); const CqueryContext: StarlarkValueAsType = StarlarkValueAsType::new(); const UqueryContext: StarlarkValueAsType = StarlarkValueAsType::new(); const Actions: StarlarkValueAsType = StarlarkValueAsType::new(); const Filesystem: StarlarkValueAsType = StarlarkValueAsType::new(); const BuildResult: StarlarkValueAsType = StarlarkValueAsType::new(); - const TargetSet: StarlarkValueAsType> = + const AnalysisResult: StarlarkValueAsType = StarlarkValueAsType::new(); + const EnsuredArtifact: StarlarkValueAsType = StarlarkValueAsType::new(); + const FileNode: StarlarkValueAsType = StarlarkValueAsType::new(); + const ActionQueryNode: StarlarkValueAsType = + StarlarkValueAsType::new(); + const UnconfiguredTargetNode: StarlarkValueAsType = + StarlarkValueAsType::new(); + const ConfiguredTargetNode: StarlarkValueAsType = + StarlarkValueAsType::new(); + const LazyResolvedAttrs: StarlarkValueAsType = + StarlarkValueAsType::new(); + const UnconfiguredTargetSet: StarlarkValueAsType> = StarlarkValueAsType::new(); const ConfiguredTargetSet: StarlarkValueAsType> = StarlarkValueAsType::new(); + const TargetUniverse: StarlarkValueAsType = StarlarkValueAsType::new(); + const OutputStream: StarlarkValueAsType = StarlarkValueAsType::new(); + const LazyContext: StarlarkValueAsType = StarlarkValueAsType::new(); + const Lazy: StarlarkValueAsType = StarlarkValueAsType::new(); + const Error: StarlarkValueAsType = StarlarkValueAsType::new(); + const Result: StarlarkValueAsType = StarlarkValueAsType::new(); } diff --git a/app/buck2_bxl/src/bxl/starlark_defs/uquery.rs b/app/buck2_bxl/src/bxl/starlark_defs/uquery.rs index b8b14b5e48bb6..9b95af529b7b7 100644 --- a/app/buck2_bxl/src/bxl/starlark_defs/uquery.rs +++ b/app/buck2_bxl/src/bxl/starlark_defs/uquery.rs @@ -7,28 +7,31 @@ * of this source tree. */ +use std::borrow::Cow; + use allocative::Allocative; use buck2_build_api::query::bxl::BxlUqueryFunctions; use buck2_build_api::query::bxl::NEW_BXL_UQUERY_FUNCTIONS; use buck2_build_api::query::oneshot::QUERY_FRONTEND; use buck2_node::nodes::unconfigured::TargetNode; -use buck2_query::query::syntax::simple::eval::set::TargetSetExt; +use buck2_query::query::syntax::simple::eval::set::TargetSet; use buck2_query::query::syntax::simple::functions::helpers::CapturedExpr; use derivative::Derivative; use derive_more::Display; +use dice::DiceComputations; use dupe::Dupe; use futures::FutureExt; use gazebo::prelude::OptionExt; -use gazebo::prelude::SliceExt; use starlark::any::ProvidesStaticType; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::starlark_module; -use starlark::values::list::ListRef; +use starlark::values::list::UnpackList; use starlark::values::none::NoneOr; use starlark::values::starlark_value; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::NoSerialize; @@ -36,15 +39,15 @@ use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueError; -use starlark::StarlarkDocs; +use starlark::values::ValueTyped; use super::file_set::StarlarkFileSet; -use super::target_expr::TargetExpr; +use super::target_list_expr::TargetListExpr; use crate::bxl::starlark_defs::context::BxlContext; use crate::bxl::starlark_defs::context::BxlContextNoDice; use crate::bxl::starlark_defs::file_set::FileSetExpr; use crate::bxl::starlark_defs::query_util::parse_query_evaluation_result; +use crate::bxl::starlark_defs::target_list_expr::TargetListExprArg; use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; #[derive( @@ -53,20 +56,17 @@ use crate::bxl::starlark_defs::targetset::StarlarkTargetSet; Display, Trace, NoSerialize, - Allocative, - StarlarkDocs + Allocative )] -#[starlark_docs(directory = "bxl")] #[derivative(Debug)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] #[allocative(skip)] pub(crate) struct StarlarkUQueryCtx<'v> { - #[trace(unsafe_ignore)] #[derivative(Debug = "ignore")] - ctx: &'v BxlContext<'v>, + ctx: ValueTyped<'v, BxlContext<'v>>, } -#[starlark_value(type = "uqueryctx", StarlarkTypeRepr, UnpackValue)] +#[starlark_value(type = "bxl.UqueryContext", StarlarkTypeRepr, UnpackValue)] impl<'v> StarlarkValue<'v> for StarlarkUQueryCtx<'v> { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); @@ -79,8 +79,8 @@ pub(crate) async fn get_uquery_env<'v>( ) -> anyhow::Result> { (NEW_BXL_UQUERY_FUNCTIONS.get()?)( ctx.project_root().dupe(), - ctx.cell_name, - ctx.cell_resolver.dupe(), + ctx.cell_name(), + ctx.cell_resolver().dupe(), ) .await } @@ -92,11 +92,22 @@ impl<'v> AllocValue<'v> for StarlarkUQueryCtx<'v> { } impl<'v> StarlarkUQueryCtx<'v> { - pub(crate) fn new(ctx: &'v BxlContext<'v>) -> anyhow::Result { + pub(crate) fn new(ctx: ValueTyped<'v, BxlContext<'v>>) -> anyhow::Result { Ok(Self { ctx }) } } +async fn unpack_targets<'c, 'v>( + this: &'c StarlarkUQueryCtx<'v>, + dice: &'c mut DiceComputations<'_>, + targets: TargetListExprArg<'v>, +) -> anyhow::Result>> { + TargetListExpr::<'v, TargetNode>::unpack(targets, &this.ctx.data, dice) + .await? + .get(dice) + .await +} + /// The context for performing `uquery` operations in bxl. The functions offered on this ctx are /// the same behaviour as the query functions available within uquery command. #[starlark_module] @@ -104,24 +115,26 @@ fn uquery_methods(builder: &mut MethodsBuilder) { /// The `allpaths` query for computing all dependency paths. fn allpaths<'v>( this: &StarlarkUQueryCtx<'v>, - from: Value<'v>, - to: Value<'v>, - eval: &mut Evaluator<'v, '_>, + from: TargetListExprArg<'v>, + to: TargetListExprArg<'v>, + #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, ctx| { dice.via(|dice| { async { - let from = TargetExpr::<'v, TargetNode>::unpack(from, ctx, dice, eval) - .await? - .get(dice) - .await?; - let to = TargetExpr::<'v, TargetNode>::unpack(to, ctx, dice, eval) - .await? - .get(dice) - .await?; + let filter = filter + .into_option() + .try_map(buck2_query_parser::parse_expr)?; + let from = unpack_targets(this, dice, from).await?; + let to = unpack_targets(this, dice, to).await?; get_uquery_env(ctx) .await? - .allpaths(dice, &from, &to) + .allpaths( + dice, + &from, + &to, + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), + ) .await .map(StarlarkTargetSet::from) } @@ -133,24 +146,27 @@ fn uquery_methods(builder: &mut MethodsBuilder) { /// The somepaths query, which returns the graph of nodes on some arbitrary path from a start to destination target. fn somepath<'v>( this: &StarlarkUQueryCtx<'v>, - from: Value<'v>, - to: Value<'v>, - eval: &mut Evaluator<'v, '_>, + from: TargetListExprArg<'v>, + to: TargetListExprArg<'v>, + #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, ctx| { dice.via(|dice| { async { - let from = TargetExpr::<'v, TargetNode>::unpack(from, ctx, dice, eval) - .await? - .get(dice) - .await?; - let to = TargetExpr::<'v, TargetNode>::unpack(to, ctx, dice, eval) - .await? - .get(dice) - .await?; + let filter = filter + .into_option() + .try_map(buck2_query_parser::parse_expr)?; + + let from = unpack_targets(this, dice, from).await?; + let to = unpack_targets(this, dice, to).await?; get_uquery_env(ctx) .await? - .somepath(dice, &from, &to) + .somepath( + dice, + &from, + &to, + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), + ) .await .map(StarlarkTargetSet::from) } @@ -164,16 +180,12 @@ fn uquery_methods(builder: &mut MethodsBuilder) { this: &StarlarkUQueryCtx<'v>, attr: &str, value: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: TargetListExprArg<'v>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { - let targets = TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) - .await? - .get(dice) - .await?; + let targets = unpack_targets(this, dice, targets).await?; targets .attrfilter(attr, &|v| Ok(v == value)) .map(StarlarkTargetSet::from) @@ -193,18 +205,13 @@ fn uquery_methods(builder: &mut MethodsBuilder) { /// ``` fn inputs<'v>( this: &StarlarkUQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: TargetListExprArg<'v>, ) -> anyhow::Result { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, _| { dice.via(|dice| { async { - let targets = - TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) - .await? - .get(dice) - .await?; + let targets = unpack_targets(this, dice, targets).await?; targets.inputs() } .boxed_local() @@ -224,16 +231,12 @@ fn uquery_methods(builder: &mut MethodsBuilder) { fn kind<'v>( this: &StarlarkUQueryCtx<'v>, regex: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: TargetListExprArg<'v>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { - let targets = TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) - .await? - .get(dice) - .await?; + let targets = unpack_targets(this, dice, targets).await?; targets.kind(regex).map(StarlarkTargetSet::from) } .boxed_local() @@ -251,24 +254,19 @@ fn uquery_methods(builder: &mut MethodsBuilder) { /// ``` fn deps<'v>( this: &StarlarkUQueryCtx<'v>, - universe: Value<'v>, + universe: TargetListExprArg<'v>, #[starlark(default = NoneOr::None)] depth: NoneOr, #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, - eval: &mut Evaluator<'v, '_>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { let filter = filter .into_option() .try_map(buck2_query_parser::parse_expr)?; - let targets = - TargetExpr::<'v, TargetNode>::unpack(universe, ctx, dice, eval) - .await? - .get(dice) - .await?; + let targets = unpack_targets(this, dice, universe).await?; get_uquery_env(ctx) .await? @@ -276,10 +274,7 @@ fn uquery_methods(builder: &mut MethodsBuilder) { dice, &targets, depth.into_option(), - filter - .as_ref() - .map(|span| CapturedExpr { expr: span }) - .as_ref(), + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), ) .await } @@ -299,29 +294,31 @@ fn uquery_methods(builder: &mut MethodsBuilder) { /// ``` fn rdeps<'v>( this: &StarlarkUQueryCtx<'v>, - universe: Value<'v>, - from: Value<'v>, - depth: Option, - eval: &mut Evaluator<'v, '_>, + universe: TargetListExprArg<'v>, + from: TargetListExprArg<'v>, + #[starlark(default = NoneOr::None)] depth: NoneOr, + #[starlark(default = NoneOr::None)] filter: NoneOr<&'v str>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let universe = - TargetExpr::<'v, TargetNode>::unpack(universe, ctx, dice, eval) - .await? - .get(dice) - .await?; + let filter = filter + .into_option() + .try_map(buck2_query_parser::parse_expr)?; - let targets = TargetExpr::<'v, TargetNode>::unpack(from, ctx, dice, eval) - .await? - .get(dice) - .await?; + let universe = unpack_targets(this, dice, universe).await?; + let targets = unpack_targets(this, dice, from).await?; get_uquery_env(ctx) .await? - .rdeps(dice, &universe, &targets, depth) + .rdeps( + dice, + &universe, + &targets, + depth.into_option(), + filter.as_ref().map(|expr| CapturedExpr { expr }).as_ref(), + ) .await } .boxed_local() @@ -341,18 +338,13 @@ fn uquery_methods(builder: &mut MethodsBuilder) { fn filter<'v>( this: &StarlarkUQueryCtx<'v>, regex: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: TargetListExprArg<'v>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, _| { dice.via(|dice| { async { - let targets = - TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) - .await? - .get(dice) - .await?; + let targets = unpack_targets(this, dice, targets).await?; targets.filter_name(regex) } .boxed_local() @@ -371,18 +363,13 @@ fn uquery_methods(builder: &mut MethodsBuilder) { /// ``` fn testsof<'v>( this: &StarlarkUQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: TargetListExprArg<'v>, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { dice.via(|dice| { async { - let targets = - TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) - .await? - .get(dice) - .await?; + let targets = unpack_targets(this, dice, targets).await?; get_uquery_env(ctx).await?.testsof(dice, &targets).await } .boxed_local() @@ -402,19 +389,13 @@ fn uquery_methods(builder: &mut MethodsBuilder) { /// ``` fn buildfile<'v>( this: &StarlarkUQueryCtx<'v>, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: TargetListExprArg<'v>, ) -> anyhow::Result { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, _| { dice.via(|dice| { async { - let targets = - &*TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) - .await? - .get(dice) - .await?; - + let targets = unpack_targets(this, dice, targets).await?; Ok(targets.buildfile()) } .boxed_local() @@ -439,12 +420,41 @@ fn uquery_methods(builder: &mut MethodsBuilder) { files: FileSetExpr, ) -> anyhow::Result> { this.ctx - .via_dice(|mut dice, ctx| { + .via_dice(|dice, ctx| { + dice.via(|dice| { + async { + get_uquery_env(ctx) + .await? + .owner(dice, (files.get(&this.ctx.data).await?).as_ref()) + .await + } + .boxed_local() + }) + }) + .map(StarlarkTargetSet::from) + } + + /// Given a set of buildfiles, return all targets within those buildfiles. + /// + /// Usage: + /// ```text + /// def _targets_in_buildfile_impl(ctx): + /// targets = ctx.uquery().targets_in_buildfile("bin/TARGETS.fixture") + /// ctx.output.print(targets) + /// ``` + /// + /// This is subject to be removed in future in favor of a more general `targets_in_packages`. + fn targets_in_buildfile<'v>( + this: &StarlarkUQueryCtx, + files: FileSetExpr, + ) -> anyhow::Result> { + this.ctx + .via_dice(|dice, ctx| { dice.via(|dice| { async { get_uquery_env(ctx) .await? - .owner(dice, (files.get(ctx).await?).as_ref()) + .targets_in_buildfile(dice, (files.get(&this.ctx.data).await?).as_ref()) .await } .boxed_local() @@ -465,16 +475,12 @@ fn uquery_methods(builder: &mut MethodsBuilder) { this: &StarlarkUQueryCtx<'v>, attribute: &str, value: &str, - targets: Value<'v>, - eval: &mut Evaluator<'v, '_>, + targets: TargetListExprArg<'v>, ) -> anyhow::Result> { - this.ctx.via_dice(|mut dice, ctx| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { - let targets = TargetExpr::<'v, TargetNode>::unpack(targets, ctx, dice, eval) - .await? - .get(dice) - .await?; + let targets = unpack_targets(this, dice, targets).await?; targets .attrregexfilter(attribute, value) .map(StarlarkTargetSet::from) @@ -485,7 +491,8 @@ fn uquery_methods(builder: &mut MethodsBuilder) { } /// Evaluates some general query string, `query_args` can be a target_set of unconfigured nodes, or - /// a list of strings. + /// a list of strings. Returns a `dict` of target labels mapped to their `target_set` results if `query_args` + /// was passed in, otherwise returns a single `target_set`. /// /// Sample usage: /// ```text @@ -499,33 +506,23 @@ fn uquery_methods(builder: &mut MethodsBuilder) { fn eval<'v>( this: &StarlarkUQueryCtx<'v>, query: &'v str, - #[starlark(default = NoneOr::None)] query_args: NoneOr>, - eval: &mut Evaluator<'v, '_>, + #[starlark(default = NoneOr::None)] query_args: NoneOr>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { - let query_args = if query_args.is_none() { - Vec::new() - } else { - let unwrapped_query_args = query_args.into_option().unwrap(); - if let Some(query_args) = unpack_unconfigured_query_args(unwrapped_query_args)? { - query_args - } else { - return Err(ValueError::IncorrectParameterTypeWithExpected( - "list of strings, or a target_set with unconfigured nodes".to_owned(), - unwrapped_query_args.get_type().to_owned(), - ) - .into()); - } + let query_args = match query_args { + NoneOr::None => Vec::new(), + NoneOr::Other(query_args) => query_args.into_strings(), }; - this.ctx.via_dice(|mut dice, _| { + this.ctx.via_dice(|dice, _| { dice.via(|dice| { async { parse_query_evaluation_result( QUERY_FRONTEND .get()? - .eval_uquery(dice, &this.ctx.working_dir()?, query, &query_args, None) + .eval_uquery(dice, &this.ctx.working_dir()?, query, &query_args) .await?, - eval, + eval.heap(), ) } .boxed_local() @@ -534,24 +531,21 @@ fn uquery_methods(builder: &mut MethodsBuilder) { } } -pub(crate) fn unpack_unconfigured_query_args<'v>( - query_args: Value<'v>, -) -> anyhow::Result>> { - if let Some(list) = <&ListRef>::unpack_value(query_args) { - Ok(Some(list.content().try_map(|e| match e.unpack_str() { - Some(arg) => Ok(arg.to_owned()), - None => Err(ValueError::IncorrectParameterTypeWithExpected( - "list of strings, or a target_set of unconfigured nodes".to_owned(), - query_args.get_type().to_owned(), - )), - })?)) - } else if let Some(set) = <&StarlarkTargetSet>::unpack_value(query_args) { - // TODO - we really should change eval_query() to handle this, but escaping the unconfigured target label for now - // as a quick solution. - Ok(Some( - set.0.iter_names().map(|e| format!("\"{}\"", e)).collect(), - )) - } else { - Ok(None) +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum UnpackUnconfiguredQueryArgs<'v> { + ListOfStrings(UnpackList), + TargetSet(&'v StarlarkTargetSet), +} + +impl<'v> UnpackUnconfiguredQueryArgs<'v> { + pub(crate) fn into_strings(self) -> Vec { + match self { + UnpackUnconfiguredQueryArgs::ListOfStrings(list) => list.items, + UnpackUnconfiguredQueryArgs::TargetSet(set) => { + // TODO - we really should change eval_query() to handle this, but escaping the unconfigured target label for now + // as a quick solution. + set.0.iter_names().map(|e| format!("\"{}\"", e)).collect() + } + } } } diff --git a/app/buck2_bxl/src/bxl/value_as_starlark_target_label.rs b/app/buck2_bxl/src/bxl/value_as_starlark_target_label.rs index c3fe41fd2611b..205d31f868c36 100644 --- a/app/buck2_bxl/src/bxl/value_as_starlark_target_label.rs +++ b/app/buck2_bxl/src/bxl/value_as_starlark_target_label.rs @@ -11,60 +11,55 @@ use buck2_common::target_aliases::BuckConfigTargetAliasResolver; use buck2_core::cells::cell_path::CellPathRef; use buck2_core::cells::name::CellName; use buck2_core::cells::paths::CellRelativePath; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use dupe::Dupe; -use starlark::values::Value; -use starlark::values::ValueLike; +use starlark::values::none::NoneType; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::UnpackValue; -#[derive(Debug, thiserror::Error)] -enum ValueAsTargetLabelError { - #[error("Expected a single target like item, but was `{0}`")] - NotATarget(String), +#[derive(StarlarkTypeRepr, UnpackValue)] +pub(crate) enum ValueAsStarlarkTargetLabel<'v> { + None(NoneType), + Str(&'v str), + TargetLabel(&'v StarlarkTargetLabel), } -pub(crate) trait ValueAsStarlarkTargetLabel { - fn parse_target_platforms( - self, - target_alias_resolver: &BuckConfigTargetAliasResolver, - cell_resolver: &CellResolver, - cell_name: CellName, - default_target_platform: &Option, - ) -> anyhow::Result>; -} +impl<'v> ValueAsStarlarkTargetLabel<'v> { + pub(crate) const NONE: Self = Self::None(NoneType); -impl<'v> ValueAsStarlarkTargetLabel for Value<'v> { - fn parse_target_platforms( + pub(crate) fn parse_target_platforms( self, target_alias_resolver: &BuckConfigTargetAliasResolver, cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, cell_name: CellName, default_target_platform: &Option, ) -> anyhow::Result> { - let target_platform = if self.is_none() { - default_target_platform.clone() - } else if let Some(s) = self.unpack_str() { - Some( - ParsedPattern::::parse_relaxed( - target_alias_resolver, - // TODO(nga): Parse relaxed relative to cell root is incorrect. - CellPathRef::new(cell_name, CellRelativePath::empty()), - s, - cell_resolver, - )? - .as_target_label(s)?, - ) - } else if let Some(target) = self.downcast_ref::() { - Some(target.label().dupe()) - } else { - return Err(anyhow::anyhow!(ValueAsTargetLabelError::NotATarget( - self.to_repr() - ))); - }; + match self { + ValueAsStarlarkTargetLabel::None(_) => Ok(default_target_platform.clone()), + ValueAsStarlarkTargetLabel::Str(s) => { + Ok(Some( + ParsedPattern::::parse_relaxed( + target_alias_resolver, + // TODO(nga): Parse relaxed relative to cell root is incorrect. + CellPathRef::new(cell_name, CellRelativePath::empty()), + s, + cell_resolver, + cell_alias_resolver, + )? + .as_target_label(s)?, + )) + } + ValueAsStarlarkTargetLabel::TargetLabel(target) => Ok(Some(target.label().dupe())), + } + } - Ok(target_platform) + pub(crate) fn is_none(&self) -> bool { + matches!(self, Self::None(_)) } } diff --git a/app/buck2_bxl/src/command.rs b/app/buck2_bxl/src/command.rs index d0fb9da68d780..d72d86c8723c5 100644 --- a/app/buck2_bxl/src/command.rs +++ b/app/buck2_bxl/src/command.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use std::collections::BTreeMap; use std::io; use std::io::Write; use std::sync::Arc; @@ -15,50 +16,52 @@ use anyhow::Context; use async_trait::async_trait; use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::build::materialize_artifact_group; -use buck2_build_api::build::BuildTargetResult; -use buck2_build_api::build::ConvertMaterializationContext; -use buck2_build_api::build::MaterializationContext; +use buck2_build_api::build::build_report::generate_build_report; +use buck2_build_api::build::build_report::BuildReportOpts; +use buck2_build_api::build::ConfiguredBuildTargetResult; use buck2_build_api::bxl::build_result::BxlBuildResult; -use buck2_build_api::bxl::calculation::BxlComputeResult; use buck2_build_api::bxl::types::BxlFunctionLabel; +use buck2_build_api::materialize::materialize_artifact_group; +use buck2_build_api::materialize::MaterializationContext; use buck2_cli_proto::build_request::Materializations; use buck2_cli_proto::BxlRequest; use buck2_cli_proto::BxlResponse; -use buck2_cli_proto::HasClientContext; use buck2_common::dice::cells::HasCellResolver; use buck2_common::dice::data::HasIoProvider; -use buck2_common::result::SharedError; use buck2_common::target_aliases::HasTargetAliasResolver; use buck2_core::cells::cell_path::CellPath; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_core::fs::fs_util; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::package::PackageLabel; +use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::soft_error; use buck2_core::tag_result; use buck2_data::BxlEnsureArtifactsEnd; use buck2_data::BxlEnsureArtifactsStart; -use buck2_data::StarlarkFailNoStacktrace; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::get_dispatcher; +use buck2_events::errors::create_error_report; use buck2_interpreter::load_module::InterpreterCalculation; use buck2_interpreter::parse_import::parse_import_with_config; use buck2_interpreter::parse_import::ParseImportOptions; use buck2_interpreter::parse_import::RelativeImports; use buck2_interpreter::paths::bxl::BxlFilePath; use buck2_interpreter::paths::module::StarlarkModulePath; +use buck2_server_ctx::commands::send_target_cfg_event; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use dice::DiceComputations; use dice::DiceTransaction; use dupe::Dupe; +use dupe::IterDupedExt; use futures::FutureExt; use itertools::Itertools; -use starlark::errors::Diagnostic; use crate::bxl::calculation::eval_bxl; use crate::bxl::eval::get_bxl_callable; @@ -66,7 +69,6 @@ use crate::bxl::eval::resolve_cli_args; use crate::bxl::eval::BxlResolvedCliArgs; use crate::bxl::eval::CliResolutionCtx; use crate::bxl::key::BxlKey; -use crate::bxl::starlark_defs::functions::BxlErrorWithoutStacktrace; pub(crate) async fn bxl_command( ctx: &dyn ServerCommandContextTrait, @@ -92,7 +94,7 @@ impl ServerCommandTemplate for BxlServerCommand { buck2_data::BxlCommandStart { bxl_label } } - fn end_event(&self, _response: &anyhow::Result) -> Self::EndEvent { + fn end_event(&self, _response: &buck2_error::Result) -> Self::EndEvent { let bxl_label = self.req.bxl_label.clone(); buck2_data::BxlCommandEnd { bxl_label } } @@ -112,8 +114,15 @@ impl ServerCommandTemplate for BxlServerCommand { .await } + fn additional_telemetry_errors( + &self, + response: &Self::Response, + ) -> Vec { + response.errors.clone() + } + fn is_success(&self, response: &Self::Response) -> bool { - response.error_messages.is_empty() + response.errors.is_empty() } } @@ -125,21 +134,35 @@ async fn bxl( ) -> anyhow::Result { let cwd = server_ctx.working_dir(); let cell_resolver = ctx.get_cell_resolver().await?; - let bxl_label = parse_bxl_label_from_cli(cwd, &request.bxl_label, &cell_resolver)?; + let cell_alias_resolver = ctx.get_cell_alias_resolver_for_dir(cwd).await?; + let bxl_label = parse_bxl_label_from_cli( + cwd, + &request.bxl_label, + &cell_resolver, + &cell_alias_resolver, + )?; let project_root = server_ctx.project_root().to_string(); - let client_ctx = request.client_context()?; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; + let global_cfg_options = global_cfg_options_from_client_context( + request + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut ctx, + ) + .await?; let bxl_args = - match get_bxl_cli_args(cwd, &ctx, &bxl_label, &request.bxl_args, &cell_resolver).await? { + match get_bxl_cli_args(cwd, &mut ctx, &bxl_label, &request.bxl_args, &cell_resolver).await? + { BxlResolvedCliArgs::Resolved(bxl_args) => Arc::new(bxl_args), // Return early if user passed in `--help` BxlResolvedCliArgs::Help => { return Ok(BxlResponse { project_root, - error_messages: Vec::new(), + errors: Vec::new(), + serialized_build_report: None, }); } }; @@ -149,73 +172,106 @@ async fn bxl( .with_context(|| "Invalid final_artifact_materializations") .unwrap(); - let bxl_key = BxlKey::new(bxl_label.clone(), bxl_args, global_target_platform); - - let ctx = &ctx; + let bxl_key = BxlKey::new( + bxl_label.clone(), + bxl_args, + request.print_stacktrace, + global_cfg_options, + ); - let BxlComputeResult { - bxl_result, - materializations, - } = match eval_bxl(ctx, bxl_key.clone()).await { - Ok(result) => result, + let bxl_result = match eval_bxl(&mut ctx, bxl_key.clone()).await { + Ok(result) => result.0, Err(e) => { - if !request.print_stacktrace { - if let Some(shared) = e.downcast_ref::() { - if let Some(diag) = shared.inner().downcast_ref::() { - if let Some(fail_no_stacktrace) = - diag.message.downcast_ref::() - { - let dispatcher = get_dispatcher(); - dispatcher.instant_event(StarlarkFailNoStacktrace { - trace: format!("{}", diag), - }); - dispatcher.console_message( - "Re-run the script with `-v5` to show the full stacktrace" - .to_owned(), - ); - return Err((fail_no_stacktrace.clone()).into()); - } - } - } - } - return Err(e); + // `buck2_error::Error` has more reliable downcasting + let e: buck2_error::Error = e.into(); + + return Err(e.into()); } }; - let materialization_context = ConvertMaterializationContext::with_existing_map( - final_artifact_materializations, - // Note: even though we have an Arc of the materialization map, we must actually clone the map - // so that we don't mutate the materialization state stored when materializing the ensured - // artifacts. We need to clone it so that we don't re-materialize what was already done, but - // in a separate instance of the map. - &Arc::new((*materializations).clone()), - ); + let build_results: Option<&Vec> = bxl_result.get_build_result_opt(); + let labeled_configured_build_results = filter_bxl_build_results(build_results); + send_bxl_target_cfg_event(server_ctx, request, &labeled_configured_build_results); + let configured_build_results = labeled_configured_build_results.values(); + let build_result = ensure_artifacts( + &mut ctx, + &final_artifact_materializations.into(), + configured_build_results, + bxl_result.get_artifacts_opt(), + ) + .await; + copy_output(stdout, &mut ctx, bxl_result.get_output_loc()).await?; + copy_output(server_ctx.stderr()?, &mut ctx, bxl_result.get_error_loc()).await?; + + let errors = match build_result { + Ok(_) => vec![], + Err(errors) => errors + .iter() + .map(create_error_report) + .unique_by(|e| e.message.clone()) + .collect(), + }; - let build_result = ensure_artifacts(ctx, &materialization_context, &bxl_result).await; - copy_output(stdout, ctx, bxl_result.get_output_loc()).await?; - copy_output(server_ctx.stderr()?, ctx, bxl_result.get_error_loc()).await?; + let bxl_opts = request + .build_opts + .as_ref() + .expect("should have build options"); + + let serialized_build_report = if bxl_opts.unstable_print_build_report { + let artifact_fs = ctx.get_artifact_fs().await?; + let build_report_opts = BuildReportOpts { + // These are all deprecated for `buck2 build`, so don't need to support them + print_unconfigured_section: false, + unstable_include_other_outputs: false, + unstable_include_failures_build_report: false, + unstable_include_package_project_relative_paths: false, + unstable_build_report_filename: bxl_opts.unstable_build_report_filename.clone(), + }; - let error_messages = match build_result { - Ok(_) => vec![], - Err(errors) => errors.iter().map(|e| format!("{:#}", e)).unique().collect(), + generate_build_report( + build_report_opts, + &artifact_fs, + &cell_resolver, + server_ctx.project_root(), + cwd, + server_ctx.events().trace_id(), + &labeled_configured_build_results + .iter() + .map(|(k, v)| (k.to_owned(), Some(v.to_owned()))) + .collect::>(), + &BTreeMap::default(), + )? + } else { + None }; + Ok(BxlResponse { project_root, - error_messages, + errors, + serialized_build_report, }) } +fn send_bxl_target_cfg_event( + server_ctx: &dyn ServerCommandContextTrait, + request: &buck2_cli_proto::BxlRequest, + labels: &BTreeMap, +) { + send_target_cfg_event(server_ctx.events(), labels.keys(), &request.target_cfg); +} + pub(crate) async fn get_bxl_cli_args( cwd: &ProjectRelativePath, - ctx: &DiceTransaction, + ctx: &mut DiceTransaction, bxl_label: &BxlFunctionLabel, bxl_args: &Vec, cell_resolver: &CellResolver, ) -> anyhow::Result { let cur_package = PackageLabel::from_cell_path(cell_resolver.get_cell_path(&cwd)?.as_ref()); let cell_name = cell_resolver.find(&cwd)?; + let cell_alias_resolver = ctx.get_cell_alias_resolver(cell_name).await?; - let target_alias_resolver = ctx.target_alias_resolver_for_cell(cell_name).await?; + let target_alias_resolver = ctx.target_alias_resolver().await?; let bxl_module = ctx .get_loaded_module(StarlarkModulePath::BxlFile(&bxl_label.bxl_path)) @@ -225,6 +281,7 @@ pub(crate) async fn get_bxl_cli_args( let cli_ctx = CliResolutionCtx { target_alias_resolver, cell_resolver: cell_resolver.dupe(), + cell_alias_resolver, relative_dir: cur_package, dice: ctx, }; @@ -234,7 +291,7 @@ pub(crate) async fn get_bxl_cli_args( async fn copy_output( mut output: W, - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, output_loc: &BuckOutPath, ) -> anyhow::Result<()> { let loc = dice.global_data().get_io_provider().project_root().resolve( @@ -249,7 +306,7 @@ async fn copy_output( // DICE. So now we open the file and read it all into the destination stream. let mut file = tag_result!( "bxl_output_missing", - fs_util::open_file(loc), + fs_util::open_file(loc).map_err(Into::into), quiet: true, daemon_in_memory_state_is_corrupted: true, task: false @@ -259,80 +316,67 @@ async fn copy_output( } async fn ensure_artifacts( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, materialization_ctx: &MaterializationContext, - bxl_result: &buck2_build_api::bxl::result::BxlResult, -) -> Result<(), Vec> { - match bxl_result { - buck2_build_api::bxl::result::BxlResult::None { .. } => Ok(()), - buck2_build_api::bxl::result::BxlResult::BuildsArtifacts { - built, artifacts, .. - } => { + target_results: impl IntoIterator, + artifacts: Option<&Vec>, +) -> Result<(), Vec> { + if let Some(artifacts) = artifacts { + return { get_dispatcher() .span_async(BxlEnsureArtifactsStart {}, async move { ( - ensure_artifacts_inner(ctx, materialization_ctx, built, artifacts).await, + ensure_artifacts_inner(ctx, materialization_ctx, target_results, artifacts) + .await, BxlEnsureArtifactsEnd {}, ) }) .await - } + }; } + Ok(()) } async fn ensure_artifacts_inner( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, materialization_ctx: &MaterializationContext, - built: &[BxlBuildResult], + target_results: impl IntoIterator, artifacts: &[ArtifactGroup], -) -> Result<(), Vec> { - let mut futs = vec![]; +) -> Result<(), Vec> { + let mut artifacts_to_materialize: Vec<_> = artifacts.iter().duped().collect(); + let mut errors = Vec::new(); - built.iter().for_each(|res| match res { - BxlBuildResult::Built(BuildTargetResult { outputs, .. }) => { - outputs.iter().for_each(|res| match res { + for res in target_results { + for output in &res.outputs { + match output { Ok(artifacts) => { for (artifact, _value) in artifacts.values.iter() { - futs.push( - async { - materialize_artifact_group( - ctx, - &ArtifactGroup::Artifact(artifact.dupe()), - materialization_ctx, - ) - .await?; - Ok(()) - } - .boxed(), - ) + artifacts_to_materialize.push(ArtifactGroup::Artifact(artifact.dupe())) } } - Err(e) => futs.push(futures::future::ready(Err(e.dupe())).boxed()), - }); + Err(e) => errors.push(e.dupe()), + } } + } - BxlBuildResult::None => {} - }); - - artifacts.iter().for_each(|a| { - futs.push( + let materialize_errors = ctx + .compute_join(artifacts_to_materialize, |ctx, artifact| { async move { - materialize_artifact_group(ctx, a, materialization_ctx).await?; + materialize_artifact_group(ctx, &artifact, materialization_ctx).await?; Ok(()) } - .boxed(), - ); - }); - - let res = futures::future::join_all(futs) - .await - .into_iter() - .filter_map(|res| res.err()) - .collect::>(); - if res.is_empty() { Ok(()) } else { Err(res) } + .boxed() + }) + .await; + errors.extend(materialize_errors.into_iter().filter_map(|v| v.err())); + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum BxlLabelError { #[error( "bxl label should be of format `//path/to/file.bxl:function_name`, but got `{0}`" @@ -349,16 +393,10 @@ pub(crate) fn parse_bxl_label_from_cli( cwd: &ProjectRelativePath, bxl_label: &str, cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, ) -> anyhow::Result { let current_cell = cell_resolver.get_cell_path(cwd)?; - // Targets with cell aliases should be resolved against the cell mapping - // as defined the cell derived from the cwd. - let cell_alias_resolver = cell_resolver - .get(current_cell.cell()) - .unwrap() - .cell_alias_resolver(); - let (bxl_path, bxl_fn) = bxl_label .rsplit_once(':') .ok_or_else(|| BxlLabelError::Format(bxl_label.to_owned()))?; @@ -389,3 +427,22 @@ pub(crate) fn parse_bxl_label_from_cli( name: bxl_fn.to_owned(), }) } + +fn filter_bxl_build_results( + build_results: Option<&Vec>, +) -> BTreeMap { + let mut btree = BTreeMap::new(); + if let Some(build_results) = build_results { + for res in build_results { + match res { + BxlBuildResult::Built { label, result } => { + if btree.insert(label.to_owned(), result.to_owned()).is_some() { + tracing::debug!("Found duped bxl result {}", label); + } + } + BxlBuildResult::None => (), + } + } + } + btree +} diff --git a/app/buck2_bxl/src/lib.rs b/app/buck2_bxl/src/lib.rs index f2daead47c4c8..8124a000a4c7a 100644 --- a/app/buck2_bxl/src/lib.rs +++ b/app/buck2_bxl/src/lib.rs @@ -7,13 +7,9 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(trait_alias)] #![feature(try_blocks)] -#![feature(provide_any)] - -#[macro_use] -extern crate higher_order_closure; use std::sync::Once; @@ -26,7 +22,7 @@ pub fn init_late_bindings() { static ONCE: Once = Once::new(); ONCE.call_once(|| { bxl::starlark_defs::globals::init_bxl_specific_globals(); - bxl::starlark_defs::context::init_eval_bxl_for_dynamic_output(); + bxl::starlark_defs::context::dynamic::init_eval_bxl_for_dynamic_output(); bxl::calculation::init_bxl_calculation_impl(); commands::init_bxl_server_commands(); }); diff --git a/app/buck2_bxl/src/profile_command.rs b/app/buck2_bxl/src/profile_command.rs index ff672f1d189d5..fb31ac4ac6cf6 100644 --- a/app/buck2_bxl/src/profile_command.rs +++ b/app/buck2_bxl/src/profile_command.rs @@ -12,19 +12,20 @@ use std::sync::Arc; use async_trait::async_trait; use buck2_cli_proto::profile_request::ProfileOpts; -use buck2_cli_proto::HasClientContext; use buck2_cli_proto::ProfileRequest; use buck2_cli_proto::ProfileResponse; use buck2_common::dice::cells::HasCellResolver; use buck2_core::fs::paths::abs_path::AbsPath; -use buck2_interpreter::dice::starlark_profiler::StarlarkProfilerConfiguration; -use buck2_interpreter::starlark_profiler::StarlarkProfileModeOrInstrumentation; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use buck2_interpreter::starlark_profiler::config::StarlarkProfilerConfiguration; +use buck2_interpreter::starlark_profiler::mode::StarlarkProfileMode; use buck2_profile::get_profile_response; use buck2_profile::starlark_profiler_configuration_from_request; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use dice::DiceTransaction; @@ -75,19 +76,27 @@ impl ServerCommandTemplate for BxlProfileServerCommand { ProfileOpts::BxlProfile(opts) => { let output = AbsPath::new(Path::new(&self.req.destination_path))?; - let profile_mode = starlark_profiler_configuration_from_request(&self.req)?; + let profile_mode = starlark_profiler_configuration_from_request( + &self.req, + server_ctx.project_root(), + )?; let profile_data = match profile_mode { StarlarkProfilerConfiguration::ProfileBxl(profile_mode) => { let cwd = server_ctx.working_dir(); let cell_resolver = ctx.get_cell_resolver().await?; - let bxl_label = - parse_bxl_label_from_cli(cwd, &opts.bxl_label, &cell_resolver)?; + let cell_alias_resolver = ctx.get_cell_alias_resolver_for_dir(cwd).await?; + let bxl_label = parse_bxl_label_from_cli( + cwd, + &opts.bxl_label, + &cell_resolver, + &cell_alias_resolver, + )?; let bxl_args = match get_bxl_cli_args( cwd, - &ctx, + &mut ctx, &bxl_label, &opts.bxl_args, &cell_resolver, @@ -102,13 +111,21 @@ impl ServerCommandTemplate for BxlProfileServerCommand { } }; - let client_ctx = self.req.client_context()?; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx) - .await?; + let global_cfg_options = global_cfg_options_from_client_context( + opts.target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut ctx, + ) + .await?; - let bxl_key = - BxlKey::new(bxl_label.clone(), bxl_args, global_target_platform); + let bxl_key = BxlKey::new( + bxl_label.clone(), + bxl_args, + /* force print stacktrace */ false, + global_cfg_options, + ); server_ctx .cancellation_context() @@ -118,9 +135,7 @@ impl ServerCommandTemplate for BxlProfileServerCommand { eval( &mut ctx, bxl_key, - StarlarkProfileModeOrInstrumentation::Profile( - profile_mode, - ), + StarlarkProfileMode::Profile(profile_mode), observer, ) .await? @@ -134,11 +149,11 @@ impl ServerCommandTemplate for BxlProfileServerCommand { .await? } _ => { - return Err(anyhow::anyhow!("Incorrect profile mode (internal error)")); + return Err(internal_error_anyhow!("Incorrect profile mode")); } }; - get_profile_response(profile_data, &self.req, output) + get_profile_response(profile_data, output) } _ => { return Err(anyhow::anyhow!( diff --git a/app/buck2_certs/BUCK b/app/buck2_certs/BUCK new file mode 100644 index 0000000000000..f472c72171b62 --- /dev/null +++ b/app/buck2_certs/BUCK @@ -0,0 +1,24 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_certs", + srcs = glob(["src/**/*.rs"]), + test_env = {"TEST_CERT_LOCATIONS": "$(source test/testdata)"}, + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:rustls", + "fbsource//third-party/rust:rustls-native-certs", + "fbsource//third-party/rust:rustls-pemfile", + "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tracing", + "fbsource//third-party/rust:x509-parser", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_util:buck2_util", + # @oss-disable: "//buck2/facebook/find_certs:find_certs", + "//buck2/gazebo/dupe:dupe", + "//buck2/gazebo/gazebo:gazebo", + # @oss-disable: "//common/rust/cpe:cpe", + ], +) diff --git a/app/buck2_certs/Cargo.toml b/app/buck2_certs/Cargo.toml new file mode 100644 index 0000000000000..a82a812826158 --- /dev/null +++ b/app/buck2_certs/Cargo.toml @@ -0,0 +1,25 @@ +[package] +edition = "2021" +license = { workspace = true } +name = "buck2_certs" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +rustls = { workspace = true } +rustls-native-certs = { workspace = true } +rustls-pemfile = { workspace = true } +tokio = { workspace = true } + +tracing = { workspace = true } +x509-parser = { workspace = true } + +dupe = { workspace = true } +gazebo = { workspace = true } + +buck2_error = { workspace = true } +buck2_util = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_certs/src/certs.rs b/app/buck2_certs/src/certs.rs new file mode 100644 index 0000000000000..4655040cc2b3d --- /dev/null +++ b/app/buck2_certs/src/certs.rs @@ -0,0 +1,164 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::ffi::OsString; +use std::path::Path; + +use anyhow::Context; +use gazebo::prelude::VecExt; +use rustls::Certificate; +use rustls::ClientConfig; +use rustls::PrivateKey; +use rustls::RootCertStore; + +/// Load system root certs, trying a few different methods to get a valid root +/// certificate store. +async fn load_system_root_certs() -> anyhow::Result { + let native_certs = rustls_native_certs::load_native_certs() + .context("Error loading system root certificates native frameworks"); + + let root_certs = + // Load the system root certificates using native frameworks. + if let Ok(certs) = native_certs { + certs.into_map(|cert| cert.0) + } + else if let Some(path) = find_root_ca_certs() { + tracing::debug!( + "Failed loading certs from native OS, falling back to disk at: {}", + path.to_string_lossy(), + ); + load_certs(&path) + .await + .with_context(|| format!("Loading root certs from: {}", path.to_string_lossy()))? + } else { + if let Err(e) = native_certs { + return Err(e); + } + + return Err(anyhow::anyhow!("Unable to load system root certificates")); + }; + + // According to [`rustls` documentation](https://docs.rs/rustls/latest/rustls/struct.RootCertStore.html#method.add_parsable_certificates), + // it's better to only add parseable certs when loading system certs because + // there are typically many system certs and not all of them can be valid. This + // is pertinent for e.g. macOS which may have a lot of old certificates that may + // not parse correctly. + let mut roots = RootCertStore::empty(); + let (valid, invalid) = roots.add_parsable_certificates(root_certs.as_slice()); + + // But make sure we get at least _one_ valid cert, otherwise we legitimately won't be + // able to make any connections via https. + if valid == 0 { + return Err(anyhow::anyhow!( + "Error loading system certs: unable to find any valid system certs" + )); + } + tracing::debug!("Loaded {} valid system root certs", valid); + tracing::debug!("Loaded {} invalid system root certs", invalid); + Ok(roots) +} + +// Load private key from the given path +async fn load_key>(key: P) -> anyhow::Result { + let key = key.as_ref(); + + let key_data = tokio::fs::read(key) + .await + .with_context(|| format!("Error opening key file `{}`", key.display()))?; + + let private_key = rustls_pemfile::pkcs8_private_keys(&mut key_data.as_slice()) + .with_context(|| format!("Error parsing key file `{}`", key.display()))? + .pop() + .with_context(|| format!("Found no private key in key file `{}`", key.display()))?; + let key = PrivateKey(private_key); + + Ok(key) +} + +/// Deserialize certificate pair at `cert` and `key` into structures that can +/// be inserted into rustls CertStore. +async fn load_cert_pair>( + cert: P, + key: P, +) -> anyhow::Result<(Vec, PrivateKey)> { + let certs = load_certs(cert).await?.into_map(Certificate); + let key = load_key(key).await?; + + Ok((certs, key)) +} + +pub async fn tls_config_with_system_roots() -> anyhow::Result { + let system_roots = load_system_root_certs().await?; + Ok(ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(system_roots) + .with_no_client_auth()) +} + +pub async fn tls_config_with_single_cert>( + cert_path: P, + key_path: P, +) -> anyhow::Result { + let system_roots = load_system_root_certs().await?; + let (cert, key) = load_cert_pair(cert_path, key_path) + .await + .context("Error loading certificate pair")?; + ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(system_roots) + .with_client_auth_cert(cert, key) + .context("Error creating TLS config with cert and key path") +} + +// Load certs from the given path, returns the bytes of the certs so caller can decide what to do with it +pub(crate) async fn load_certs>(cert_path: P) -> anyhow::Result>> { + let cert_path = cert_path.as_ref(); + + let cert_data = tokio::fs::read(cert_path) + .await + .with_context(|| format!("Error reading certificate file `{}`", cert_path.display()))?; + + let certs = rustls_pemfile::certs(&mut cert_data.as_slice()) + .with_context(|| format!("Error parsing certificate file `{}`", cert_path.display()))?; + + Ok(certs) +} + +/// Find root CA certs. +/// +/// In OSS or non-fbcode builds, returns None; we do not support hardcoded root +/// certificates in non-fbcode builds and rely solely on rustls-native-certs. +pub(crate) fn find_root_ca_certs() -> Option { + #[cfg(fbcode_build)] + return find_certs::find_root_ca_certs(); + + #[cfg(not(fbcode_build))] + return None; +} + +/// Find TLS certs. +/// +/// Return `None` in Cargo or open source builds; we do not support internal certs +/// in these builds. +pub fn find_internal_cert() -> Option { + #[cfg(fbcode_build)] + return find_certs::find_tls_cert(); + + #[cfg(not(fbcode_build))] + return None; +} + +/// Whether the machine buck is running on supports vpnless operation. +pub fn supports_vpnless() -> bool { + #[cfg(fbcode_build)] + return cpe::x2p::supports_vpnless(); + + #[cfg(not(fbcode_build))] + return false; +} diff --git a/app/buck2_certs/src/lib.rs b/app/buck2_certs/src/lib.rs new file mode 100644 index 0000000000000..b273018ab3969 --- /dev/null +++ b/app/buck2_certs/src/lib.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] +#![feature(if_let_guard)] + +pub mod certs; +pub mod validate; diff --git a/app/buck2_certs/src/validate.rs b/app/buck2_certs/src/validate.rs new file mode 100644 index 0000000000000..a51784f0e34f5 --- /dev/null +++ b/app/buck2_certs/src/validate.rs @@ -0,0 +1,208 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::ffi::OsString; +use std::sync::Arc; + +use buck2_util::process::async_background_command; +use dupe::Dupe; +use tokio::sync::Mutex; + +use crate::certs; +use crate::certs::load_certs; + +#[derive(Debug, buck2_error::Error)] +#[buck2(environment, tag = NoValidCerts)] +enum InvalidCertsError { + #[error( + "Could not find valid root certs. Please check your machine certificate settings. Instructions: https://fburl.com/devcert\nFailure Reason: {0}" + )] + SystemCerts(String), + #[error( + "Could not find valid client certs. Try refreshing your internal certs or re-login and try again. Instructions: https://fburl.com/devcert\nFailure Reason: {0}" + )] + ClientCerts(String), + #[error( + "Could not find valid certs for VPNless. Please refresh/renew certs with SKS agent and try again. Instructions: https://fburl.com/devcert" + )] + VPNlessCerts, +} + +/// Use SKS Agent to check the status of the VPNless cert in the scenario that VPNless is supported. +/// SKS Agent is different in Windows so we need to use the appropriate command for the OS. +async fn is_vpnless_cert_valid() -> bool { + let sks_agent = if cfg!(target_os = "windows") { + "sks-agent" + } else { + "fb-sks-agent" + }; + + // Post suggests using the following for VPN-less scenario + // https://fb.workplace.com/groups/382932749004606/permalink/1473311023300101/ + let cmd_result = async_background_command(sks_agent) + .args(["renew", "--status", "--corp-x509"]) + .output() + .await; + + match cmd_result { + Ok(cmd_output) => String::from_utf8_lossy(&cmd_output.stdout).starts_with("true"), + Err(_) => false, + } +} + +/// Check if the provided certs exists and if it is still valid at the current time. +async fn verify(path: &OsString) -> anyhow::Result<()> { + let certs = load_certs(path).await?; + if certs.is_empty() { + return Err(anyhow::anyhow!( + "Could not find any certs to validate at '{}'", + path.to_string_lossy() + )); + } + + let valid = certs.iter().any(|bytes| { + let x509_cert = match x509_parser::parse_x509_certificate(bytes) { + Ok((_, x509_cert)) => x509_cert, + Err(_) => return false, + }; + + x509_cert.validity().is_valid() + }); + + if !valid { + return Err(anyhow::anyhow!( + "Certificate Expired: expired certs found at '{}'", + path.to_string_lossy() + )); + } + + Ok(()) +} + +pub async fn validate_certs() -> anyhow::Result<()> { + if cfg!(not(fbcode_build)) { + return Ok(()); + } + + if certs::supports_vpnless() { + if is_vpnless_cert_valid().await { + return Ok(()); + } + + return Err(InvalidCertsError::VPNlessCerts.into()); + } else { + let err_msg = "Could not find any files that may contain certificates"; + // System certs are unlikely to be invalid, but if it is, it's a bigger issue than invalid client certs so we check it first + match certs::find_root_ca_certs() { + Some(root_certs) => { + if let Err(e) = verify(&root_certs).await { + return Err(InvalidCertsError::SystemCerts(e.to_string()).into()); + } + } + None => return Err(InvalidCertsError::SystemCerts(err_msg.to_owned()).into()), + } + + match certs::find_internal_cert() { + Some(client_certs) => { + if let Err(e) = verify(&client_certs).await { + return Err(InvalidCertsError::ClientCerts(e.to_string()).into()); + } + } + None => return Err(InvalidCertsError::ClientCerts(err_msg.to_owned()).into()), + } + } + + Ok(()) +} + +#[derive(Clone, Dupe)] +pub struct CertState { + pub state: Arc>, +} + +impl CertState { + pub async fn new() -> Self { + Self { + state: Arc::new(Mutex::new(validate_certs().await.is_ok())), + } + } +} + +pub async fn check_cert_state(cert_state: CertState) -> Option { + let mut valid = cert_state.state.lock().await; + + // If previous state is error, then we need to check regardless of the current state + // since we are expecting users to actively fix the issue and retry + if !*valid { + match validate_certs().await { + Ok(_) => *valid = true, + Err(e) => return Some(e), + } + } + + None +} + +#[cfg(fbcode_build)] +#[cfg(test)] +mod tests { + use std::env; + use std::ffi::OsString; + + use crate::validate::verify; + + #[tokio::test] + async fn invalid_certs_test() { + let base_path = env::var("TEST_CERT_LOCATIONS").unwrap(); + + let empty_path = format!("{}/test_empty.pem", base_path); + let empty_res = verify(&OsString::from(empty_path)).await; + assert_eq!(true, empty_res.is_err()); + let err_msg = empty_res.unwrap_err().to_string(); + assert!( + err_msg.starts_with("Could not find any certs to validate"), + "{}", + format!("Actual: {}", err_msg) + ); + + let invalid_path = format!("{}/test_invalid.pem", base_path); + let invalid_res = verify(&OsString::from(invalid_path)).await; + assert_eq!(true, invalid_res.is_err()); + let err_msg = invalid_res.unwrap_err().to_string(); + assert!( + err_msg.starts_with("Could not find any certs to validate"), + "{}", + format!("Actual: {}", err_msg) + ); + + // Self-signed cert for testing. Expired 05/31/2024 + let expired_path = format!("{}/test_expired.pem", base_path); + let expired_res = verify(&OsString::from(expired_path)).await; + assert_eq!(true, expired_res.is_err()); + let err_msg = expired_res.unwrap_err().to_string(); + assert!( + err_msg.starts_with("Certificate Expired"), + "{}", + format!("Actual: {}", err_msg) + ); + } + + #[tokio::test] + async fn valid_cert_test() { + // Self-signed cert for testing. Should expire in 100 years if this is around for that long! + // Generated using: + // 1. openssl genrsa -out mykey.pem 2048 + // 2. openssl req -new -key mykey.pem -out mycsr.csr + // 3. openssl x509 -req -in mycsr.csr -signkey mykey.pem -out x509.crt -days 36500 + // Copy content in x509.crt + let base_path = env::var("TEST_CERT_LOCATIONS").unwrap(); + let valid_path = format!("{}/test_valid.pem", base_path); + assert_eq!(true, verify(&OsString::from(valid_path)).await.is_ok()); + } +} diff --git a/examples/no_prelude/prelude/prelude.bzl b/app/buck2_certs/test/testdata/test_empty.pem similarity index 100% rename from examples/no_prelude/prelude/prelude.bzl rename to app/buck2_certs/test/testdata/test_empty.pem diff --git a/app/buck2_certs/test/testdata/test_expired.pem b/app/buck2_certs/test/testdata/test_expired.pem new file mode 100644 index 0000000000000..d9f59d50195f1 --- /dev/null +++ b/app/buck2_certs/test/testdata/test_expired.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICRjCCAa+gAwIBAgIBADANBgkqhkiG9w0BAQ0FADBAMQswCQYDVQQGEwJ1czET +MBEGA1UECAwKV2FzaGluZ3RvbjENMAsGA1UECgwETWV0YTENMAsGA1UEAwwEQnVj +azAeFw0yNDA1MzExODMzMjFaFw0yNDA2MDExODMzMjFaMEAxCzAJBgNVBAYTAnVz +MRMwEQYDVQQIDApXYXNoaW5ndG9uMQ0wCwYDVQQKDARNZXRhMQ0wCwYDVQQDDARC +dWNrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDHgu9ZYLZCd6MdI3mLTwCD +La8u8Qqt10rlyUZ7PxivRHVKKa7MtFD9GniYq3KxeSmUG2MCZaqlMRWsef+4tXXy +6jXalPZKEQEqupc9QCBcAeQvWL+wpzRPG4eYambnhMbI+I7qUwb0LKZssV9kxTzm +ulA+OPR78NBOuP2a7HECVwIDAQABo1AwTjAdBgNVHQ4EFgQUvH8Of9v7NJPpufEf +MYigdGf4QCowHwYDVR0jBBgwFoAUvH8Of9v7NJPpufEfMYigdGf4QCowDAYDVR0T +BAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOBgQCCovywmK/CpX/a6Uy/p0NoVBk/Mv7S +rZDz0fxhm4ae0KLTXZVKQb/gHCbbZwfurv1wu2gcYrxSlHOPAC9EhWBq7BSOowZ6 +lXKnDGs/z5T+p7fuwjNj2qqBc3Ap/v430KvLQo5NH3nX0ur3R7J4zFOO2a/uwtpw +Bx4/wCWapqMUyw== +-----END CERTIFICATE----- diff --git a/app/buck2_certs/test/testdata/test_invalid.pem b/app/buck2_certs/test/testdata/test_invalid.pem new file mode 100644 index 0000000000000..217b5d1dfea48 --- /dev/null +++ b/app/buck2_certs/test/testdata/test_invalid.pem @@ -0,0 +1 @@ +This is not valid pem content diff --git a/app/buck2_certs/test/testdata/test_valid.pem b/app/buck2_certs/test/testdata/test_valid.pem new file mode 100644 index 0000000000000..0295abc217c2d --- /dev/null +++ b/app/buck2_certs/test/testdata/test_valid.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSDCCAbGgAwIBAgIBADANBgkqhkiG9w0BAQ0FADBAMQswCQYDVQQGEwJ1czET +MBEGA1UECAwKV2FzaGluZ3RvbjENMAsGA1UECgwETWV0YTENMAsGA1UEAwwEQnVj +azAgFw0yNDA1MzExODM2NTFaGA8yMTI0MDUwNzE4MzY1MVowQDELMAkGA1UEBhMC +dXMxEzARBgNVBAgMCldhc2hpbmd0b24xDTALBgNVBAoMBE1ldGExDTALBgNVBAMM +BEJ1Y2swgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAL3zwyj19w2+Q3WR7S0Y +oZiHp+Yv6YIj824PPyVV/vFQr43BAScCic1nSZynHLmQEQA8EDrdNdQt/XvSW1hk +/IAV+h/9tnt5IlJ4f+GtNDVvYm749N45vnbeIghGqi9a2O5Rq8UbODQxN1dp6/JA +0M2RGIWFuC7J0XyugmZYQ0s1AgMBAAGjUDBOMB0GA1UdDgQWBBSUKFZzjdxaHECE +INHhx66lztPozTAfBgNVHSMEGDAWgBSUKFZzjdxaHECEINHhx66lztPozTAMBgNV +HRMEBTADAQH/MA0GCSqGSIb3DQEBDQUAA4GBAJUtNrGWSCe2B3oh0xTN7ovieFXw +tw4vIDXD37nIRxw3hJEUOy6+/IsyvMK8zKSG1gDfFWsFtFtI1F/g3gqUornjvpHA +E4miAiU9J+PZbNobBKzhYcb6DppuNFr0Q1mNq0oxmodDCR4+pSCZJJETorhtF96z +nzcrwb6QVFOKt510 +-----END CERTIFICATE----- diff --git a/app/buck2_cfg_constructor/BUCK b/app/buck2_cfg_constructor/BUCK index 67f7a4faafc07..7db4272d2600d 100644 --- a/app/buck2_cfg_constructor/BUCK +++ b/app/buck2_cfg_constructor/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -12,11 +11,12 @@ rust_library( "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:derive_more", - "fbsource//third-party/rust:thiserror", + "fbsource//third-party/rust:futures", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", diff --git a/app/buck2_cfg_constructor/Cargo.toml b/app/buck2_cfg_constructor/Cargo.toml index 2e7a8bdfb9b07..c96f4f732cc5b 100644 --- a/app/buck2_cfg_constructor/Cargo.toml +++ b/app/buck2_cfg_constructor/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Implementation of configuration constructor for modifier feature" +edition = "2021" +license = { workspace = true } name = "buck2_cfg_constructor" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Implementation of configuration constructor for modifier feature" [dependencies] allocative = { workspace = true } @@ -11,14 +13,14 @@ async-trait = { workspace = true } derive_more = { workspace = true } dice = { workspace = true } dupe = { workspace = true } +futures = { workspace = true } starlark = { workspace = true } -thiserror = { workspace = true } -buck2_common = { workspace = true } -buck2_configured = { workspace = true } -buck2_events = { workspace = true } buck2_build_api = { workspace = true } +buck2_common = { workspace = true } buck2_core = { workspace = true } +buck2_error = { workspace = true } +buck2_events = { workspace = true } buck2_interpreter = { workspace = true } buck2_interpreter_for_build = { workspace = true } buck2_node = { workspace = true } diff --git a/app/buck2_cfg_constructor/src/calculation.rs b/app/buck2_cfg_constructor/src/calculation.rs index f9b12ecf05e59..f0da6c091abf6 100644 --- a/app/buck2_cfg_constructor/src/calculation.rs +++ b/app/buck2_cfg_constructor/src/calculation.rs @@ -10,20 +10,20 @@ use std::sync::Arc; use allocative::Allocative; +use anyhow::Context; use async_trait::async_trait; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; -use buck2_core::bzl::ImportPath; -use buck2_core::cells::cell_path::CellPathRef; use buck2_core::cells::paths::CellRelativePath; use buck2_core::configuration::data::ConfigurationData; -use buck2_interpreter::paths::package::PackageFilePath; +use buck2_core::package::PackageLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_interpreter_for_build::interpreter::package_file_calculation::EvalPackageFile; use buck2_node::cfg_constructor::CfgConstructorCalculationImpl; use buck2_node::cfg_constructor::CfgConstructorImpl; -use buck2_node::cfg_constructor::CFG_CONSTRUCTOR_CALCULATION_IMPL; +use buck2_node::metadata::value::MetadataValue; +use buck2_node::nodes::unconfigured::TargetNodeRef; +use buck2_node::rule_type::RuleType; +use buck2_node::super_package::SuperPackage; use derive_more::Display; use dice::CancellationContext; use dice::DiceComputations; @@ -31,79 +31,100 @@ use dice::Key; use dupe::Dupe; use dupe::OptionDupedExt; +#[derive(Debug, buck2_error::Error)] +enum CalculationCfgConstructorError { + #[error( + "Usage of both `modifiers` attribute and modifiers in metadata is not allowed for target `{0}`" + )] + TargetModifiersAttrAndMetadataNotAllowed(TargetLabel), +} + pub struct CfgConstructorCalculationInstance; async fn get_cfg_constructor_uncached( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result>> { let root_cell = ctx.get_cell_resolver().await?.root_cell(); - let package_file_path = - PackageFilePath::for_dir(CellPathRef::new(root_cell, CellRelativePath::empty())); + let package_label = PackageLabel::new(root_cell, CellRelativePath::empty()); // This returns empty super package if `PACKAGE` file does not exist. - let super_package = ctx.eval_package_file(&package_file_path).await?; + let super_package = ctx.eval_package_file(package_label).await?; Ok(super_package.cfg_constructor().duped()) } -#[async_trait] -impl CfgConstructorCalculationImpl for CfgConstructorCalculationInstance { - async fn get_cfg_constructor( - &self, - ctx: &DiceComputations, - ) -> anyhow::Result>> { - #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - struct GetCfgConstructorKey; - - #[async_trait] - impl Key for GetCfgConstructorKey { - type Value = SharedResult>>; +async fn get_cfg_constructor( + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result>> { + #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] + struct GetCfgConstructorKey; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - get_cfg_constructor_uncached(ctx).await.shared_error() - } + #[async_trait] + impl Key for GetCfgConstructorKey { + type Value = buck2_error::Result>>; - fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { - false - } + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + get_cfg_constructor_uncached(ctx) + .await + .map_err(buck2_error::Error::from) } - ctx.compute(&GetCfgConstructorKey).await?.unshared_error() + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } } + ctx.compute(&GetCfgConstructorKey) + .await? + .map_err(anyhow::Error::from) +} + +#[async_trait] +impl CfgConstructorCalculationImpl for CfgConstructorCalculationInstance { async fn eval_cfg_constructor( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, + target: TargetNodeRef<'_>, + super_package: &SuperPackage, cfg: ConfigurationData, + cli_modifiers: &Arc>, + rule_type: &RuleType, ) -> anyhow::Result { - #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] + #[derive(Clone, Display, Dupe, Debug, Eq, Hash, PartialEq, Allocative)] + #[display("CfgConstructorInvocationKey")] struct CfgConstructorInvocationKey { + package_cfg_modifiers: Option, + target_cfg_modifiers: Option, cfg: ConfigurationData, + cli_modifiers: Arc>, + rule_type: RuleType, } #[async_trait] impl Key for CfgConstructorInvocationKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellations: &CancellationContext, ) -> Self::Value { - // Invoke eval fn from global instance of cfg constructors - match CFG_CONSTRUCTOR_CALCULATION_IMPL - .get()? - .get_cfg_constructor(ctx) + let cfg_constructor = get_cfg_constructor(ctx) .await? - { - Some(cfg_constructor) => { - cfg_constructor.eval(ctx, &self.cfg).await.shared_error() - } - // By this point we should have already confirmed that the global cfg constructor instance exists - None => unreachable!("Global cfg constructor instance should exist."), - } + .context("Internal error: Global cfg constructor instance should exist")?; + cfg_constructor + .eval( + ctx, + &self.cfg, + self.package_cfg_modifiers.as_ref(), + self.target_cfg_modifiers.as_ref(), + &self.cli_modifiers, + &self.rule_type, + ) + .await + .map_err(buck2_error::Error::from) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -114,20 +135,49 @@ impl CfgConstructorCalculationImpl for CfgConstructorCalculationInstance { } } - match self.get_cfg_constructor(ctx).await? { - Some(_) => { - let key = CfgConstructorInvocationKey { cfg }; - Ok(ctx.compute(&key).await??) - } + let Some(cfg_constructor) = get_cfg_constructor(ctx).await? else { // To facilitate rollout of modifiers, return original configuration if // no cfg constructors are available. - None => Ok(cfg), + return Ok(cfg); + }; + let modifier_key = cfg_constructor.key(); + let package_cfg_modifiers = super_package + .package_values() + .get_package_value_json(modifier_key)? + .map(MetadataValue::new); + + let metadata_modifiers = target.metadata()?.and_then(|m| m.get(modifier_key)); + let target_modifiers = target.target_modifiers()?; + let target_cfg_modifiers = match (metadata_modifiers, target_modifiers) { + (None, Some(t)) if !t.is_empty() => Some(MetadataValue(t.as_json())), + (Some(_), Some(t)) if !t.is_empty() => { + return Err( + CalculationCfgConstructorError::TargetModifiersAttrAndMetadataNotAllowed( + target.label().dupe(), + ) + .into(), + ); + } + (Some(m), _) => Some(m.dupe()), + _ => None, + }; + + // If there are no PACKAGE/target/cli modifiers, return the original configuration without computing DICE call + // TODO(scottcao): This is just for rollout purpose. Remove once modifier is rolled out + if package_cfg_modifiers.is_none() + && target_cfg_modifiers.is_none() + && cli_modifiers.is_empty() + { + return Ok(cfg); } - } -} -#[derive(Debug, PartialEq, Eq)] -struct CfgConstructorLocation { - pub import_path: ImportPath, - pub function: String, + let key = CfgConstructorInvocationKey { + package_cfg_modifiers, + target_cfg_modifiers, + cfg, + cli_modifiers: cli_modifiers.dupe(), + rule_type: rule_type.dupe(), + }; + Ok(ctx.compute(&key).await??) + } } diff --git a/app/buck2_cfg_constructor/src/lib.rs b/app/buck2_cfg_constructor/src/lib.rs index 486fdc663796f..706ddedaab17a 100644 --- a/app/buck2_cfg_constructor/src/lib.rs +++ b/app/buck2_cfg_constructor/src/lib.rs @@ -7,92 +7,304 @@ * of this source tree. */ -#![feature(async_closure)] -#![feature(async_fn_in_trait)] +#![feature(error_generic_member_access)] pub(crate) mod calculation; pub(crate) mod registration; +use std::borrow::Borrow; +use std::future::Future; +use std::pin::Pin; + use allocative::Allocative; use async_trait::async_trait; +use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::interpreter::rule_defs::provider::builtin::platform_info::PlatformInfo; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; +use buck2_common::dice::cells::HasCellResolver; use buck2_core::configuration::data::ConfigurationData; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::unsafe_send_future::UnsafeSendFuture; +use buck2_error::starlark_error::from_starlark; use buck2_events::dispatch::get_dispatcher; use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; use buck2_interpreter::print_handler::EventDispatcherPrintHandler; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; use buck2_node::cfg_constructor::CfgConstructorImpl; use buck2_node::cfg_constructor::CFG_CONSTRUCTOR_CALCULATION_IMPL; +use buck2_node::metadata::key::MetadataKey; +use buck2_node::metadata::key::MetadataKeyRef; +use buck2_node::metadata::value::MetadataValue; +use buck2_node::nodes::frontend::TargetGraphCalculation; +use buck2_node::nodes::unconfigured::RuleKind; +use buck2_node::rule_type::RuleType; use calculation::CfgConstructorCalculationInstance; use dice::DiceComputations; +use futures::FutureExt; +use starlark::collections::SmallMap; use starlark::environment::Module; +use starlark::eval::Evaluator; +use starlark::values::list_or_tuple::UnpackListOrTuple; +use starlark::values::none::NoneOr; use starlark::values::OwnedFrozenValue; use starlark::values::UnpackValue; use starlark::values::Value; use crate::registration::init_registration; +#[derive(Debug, buck2_error::Error)] +enum CfgConstructorError { + #[error( + "Parameter `refs` to post-constraint analysis function must only contain configuration rules. {0} is not a configuration rule." + )] + PostConstraintAnalysisRefsMustBeConfigurationRules(String), +} + #[derive(Allocative, Debug)] pub(crate) struct CfgConstructor { pub(crate) cfg_constructor_pre_constraint_analysis: OwnedFrozenValue, pub(crate) cfg_constructor_post_constraint_analysis: OwnedFrozenValue, + pub(crate) key: MetadataKey, + pub(crate) aliases: Option, + pub(crate) extra_data: Option, } -#[async_trait] -impl CfgConstructorImpl for CfgConstructor { - async fn eval( - &self, - ctx: &DiceComputations, - cfg: &ConfigurationData, - ) -> anyhow::Result { - let module = Module::new(); - let print = EventDispatcherPrintHandler(get_dispatcher()); - with_starlark_eval_provider( - ctx, - // TODO: pass proper profiler (T163570348) - &mut StarlarkProfilerOrInstrumentation::disabled(), - // TODO: better description - format!("cfg constructor invocation for cfg: {}", &cfg), - move |provider, _| -> anyhow::Result { - let mut eval = provider.make(&module)?; - eval.set_print_handler(&print); - - // Pre-constraint analysis - let args = vec![( - "platform", - // TODO: should eventually accept cli modifiers, target modifiers, and PACKAGE modifiers (T163570597) - // and unbound platform case will be handled properly - if cfg.is_bound() { - eval.heap() - .alloc_complex(PlatformInfo::from_configuration(cfg, eval.heap())?) - } else { - Value::new_none() - }, - )]; - let pre_constraint_analysis_result = eval.eval_function( - self.cfg_constructor_pre_constraint_analysis.value(), +async fn eval_pre_constraint_analysis<'v>( + cfg_constructor_pre_constraint_analysis: Value<'v>, + ctx: &mut DiceComputations<'_>, + cfg: &ConfigurationData, + package_cfg_modifiers: Option<&MetadataValue>, + target_cfg_modifiers: Option<&MetadataValue>, + cli_modifiers: &[String], + rule_type: &RuleType, + aliases: Option<&'v OwnedFrozenValue>, + extra_data: Option<&'v OwnedFrozenValue>, + module: &'v Module, + print: &'v EventDispatcherPrintHandler, +) -> anyhow::Result<(Vec, Value<'v>, Evaluator<'v, 'v, 'v>)> { + with_starlark_eval_provider( + ctx, + // TODO: pass proper profiler (T163570348) + &mut StarlarkProfilerOpt::disabled(), + "pre constraint-analysis invocation".to_owned(), + |provider, _| { + let (mut eval, _) = provider.make(module)?; + eval.set_print_handler(print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); + + let legacy_platform = if cfg.is_bound() { + eval.heap() + .alloc_complex(PlatformInfo::from_configuration(cfg, eval.heap())?) + } else { + Value::new_none() + }; + + let package_cfg_modifiers = eval.heap().alloc(match package_cfg_modifiers { + Some(v) => NoneOr::Other(v.as_json()), + None => NoneOr::None, + }); + let target_cfg_modifiers = eval.heap().alloc(match target_cfg_modifiers { + Some(v) => NoneOr::Other(v.as_json()), + None => NoneOr::None, + }); + let cli_modifiers = eval.heap().alloc(cli_modifiers); + let rule_name = eval.heap().alloc(rule_type.name()); + let aliases = match aliases { + Some(v) => v.value(), + None => Value::new_none(), + }; + let extra_data = match extra_data { + Some(v) => v.value(), + None => Value::new_none(), + }; + + // TODO: should eventually accept cli modifiers and target modifiers (T163570597) + let pre_constraint_analysis_args = vec![ + ("legacy_platform", legacy_platform), + ("package_modifiers", package_cfg_modifiers), + ("target_modifiers", target_cfg_modifiers), + ("cli_modifiers", cli_modifiers), + ("rule_name", rule_name), + ("aliases", aliases), + ("extra_data", extra_data), + ]; + + // Type check + unpack + let (refs, params) = <(UnpackListOrTuple, Value)>::unpack_value_err( + eval.eval_function( + cfg_constructor_pre_constraint_analysis, &[], - &args, + &pre_constraint_analysis_args, + ) + .map_err(from_starlark)?, + )?; + + // `params` Value lives on eval.heap() so we need to move eval out of the closure to keep it alive + Ok((refs.items, params, eval)) + }, + ) + .await +} + +async fn analyze_constraints( + ctx: &mut DiceComputations<'_>, + refs: Vec, +) -> anyhow::Result> { + let cell_resolver = &ctx.get_cell_resolver().await?; + let cell_alias_resolver = &ctx + .get_cell_alias_resolver(cell_resolver.root_cell()) + .await?; + let res = ctx + .try_compute_join(refs, |ctx, label_str| { + async move { + // Ensure all refs are configuration rules + let label = ProvidersLabel::parse( + &label_str, + cell_resolver.root_cell(), + cell_resolver, + cell_alias_resolver, )?; - // Check return type - drop(<(Vec, Value)>::unpack_value_err( - pre_constraint_analysis_result, - )?); - // TODO: analysis of constraints (T163226707) + if ctx.get_target_node(label.target()).await?.rule_kind() == RuleKind::Configuration + { + Ok(( + label_str, + ctx.get_configuration_analysis_result(&label).await?, + )) + } else { + Err::<_, anyhow::Error>( + CfgConstructorError::PostConstraintAnalysisRefsMustBeConfigurationRules( + label_str, + ) + .into(), + ) + } + } + .boxed() + }) + .await?; + Ok(res.into_iter().collect()) +} + +async fn eval_post_constraint_analysis<'v>( + cfg_constructor_post_constraint_analysis: Value<'v>, + ctx: &mut DiceComputations<'_>, + params: Value<'v>, + mut eval: Evaluator<'v, '_, '_>, + refs_providers_map: SmallMap, +) -> anyhow::Result { + with_starlark_eval_provider( + ctx, + // TODO: pass proper profiler (T163570348) + &mut StarlarkProfilerOpt::disabled(), + "post constraint-analysis invocation for cfg".to_owned(), + |_, _| -> anyhow::Result { + let post_constraint_analysis_args = vec![ + ( + "refs", + eval.heap().alloc( + refs_providers_map + .into_iter() + .map(|(label, providers)| { + (label, providers.value().owned_value(eval.frozen_heap())) + }) + .collect::>>(), + ), + ), + ("params", params), + ]; - // Post-constraint analysis - let post_constraint_analysis_result = eval.eval_function( - self.cfg_constructor_post_constraint_analysis.value(), + let post_constraint_analysis_result = eval + .eval_function( + cfg_constructor_post_constraint_analysis, &[], - &[("refs", pre_constraint_analysis_result)], - )?; + &post_constraint_analysis_args, + ) + .map_err(from_starlark)?; + + // Type check + unpack + <&PlatformInfo>::unpack_value_err(post_constraint_analysis_result)?.to_configuration() + }, + ) + .await +} + +async fn eval_underlying( + cfg_constructor: &CfgConstructor, + ctx: &mut DiceComputations<'_>, + cfg: &ConfigurationData, + package_cfg_modifiers: Option<&MetadataValue>, + target_cfg_modifiers: Option<&MetadataValue>, + cli_modifiers: &[String], + rule_type: &RuleType, +) -> anyhow::Result { + let module = Module::new(); + let print = EventDispatcherPrintHandler(get_dispatcher()); + + // Pre constraint-analysis + let (refs, params, eval) = eval_pre_constraint_analysis( + cfg_constructor + .cfg_constructor_pre_constraint_analysis + .value(), + ctx, + cfg, + package_cfg_modifiers, + target_cfg_modifiers, + cli_modifiers, + rule_type, + cfg_constructor.aliases.as_ref(), + cfg_constructor.extra_data.as_ref(), + &module, + &print, + ) + .await?; + + // Constraint analysis + let refs_providers_map = analyze_constraints(ctx, refs).await?; + + // Post constraint-analysis + eval_post_constraint_analysis( + cfg_constructor + .cfg_constructor_post_constraint_analysis + .value(), + ctx, + params, + eval, + refs_providers_map, + ) + .await +} + +#[async_trait] +impl CfgConstructorImpl for CfgConstructor { + fn eval<'a>( + &'a self, + ctx: &'a mut DiceComputations, + cfg: &'a ConfigurationData, + package_cfg_modifiers: Option<&'a MetadataValue>, + target_cfg_modifiers: Option<&'a MetadataValue>, + cli_modifiers: &'a [String], + rule_type: &'a RuleType, + ) -> Pin> + Send + 'a>> { + // Get around issue of Evaluator not being send by wrapping future in UnsafeSendFuture + let fut = async move { + eval_underlying( + self, + ctx, + cfg, + package_cfg_modifiers, + target_cfg_modifiers, + cli_modifiers, + rule_type, + ) + .await + }; + unsafe { Box::pin(UnsafeSendFuture::new_encapsulates_starlark(fut)) } + } - <&PlatformInfo>::unpack_value_err(post_constraint_analysis_result)? - .to_configuration() - }, - ) - .await + fn key(&self) -> &MetadataKeyRef { + self.key.borrow() } } diff --git a/app/buck2_cfg_constructor/src/registration.rs b/app/buck2_cfg_constructor/src/registration.rs index 47e51dc395fb3..68daa0fae80c0 100644 --- a/app/buck2_cfg_constructor/src/registration.rs +++ b/app/buck2_cfg_constructor/src/registration.rs @@ -12,18 +12,19 @@ use std::sync::Arc; use allocative::Allocative; use buck2_core::cells::cell_path::CellPathRef; use buck2_core::cells::paths::CellRelativePath; -use buck2_interpreter::cfg_constructor::REGISTER_SET_CFG_CONSTRUCTOR; -use buck2_interpreter::paths::package::PackageFilePath; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_CFG_CONSTRUCTOR_GLOBALS; use buck2_interpreter_for_build::interpreter::build_context::BuildContext; use buck2_interpreter_for_build::interpreter::build_context::PerFileTypeContext; use buck2_interpreter_for_build::interpreter::package_file_extra::PackageFileExtra; use buck2_interpreter_for_build::interpreter::package_file_extra::MAKE_CFG_CONSTRUCTOR; use buck2_node::cfg_constructor::CfgConstructorImpl; +use buck2_node::metadata::key::MetadataKeyRef; use dupe::Dupe; use starlark::any::ProvidesStaticType; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::none::NoneOr; use starlark::values::none::NoneType; use starlark::values::starlark_value; use starlark::values::Freeze; @@ -37,7 +38,7 @@ use starlark::values::Value; use crate::CfgConstructor; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum RegisterCfgConstructorError { #[error("`set_cfg_constructor()` can only be called from the repository root `PACKAGE` file")] NotPackageRoot, @@ -55,10 +56,13 @@ enum RegisterCfgConstructorError { ProvidesStaticType, Allocative )] -#[display(fmt = "{:?}", "self")] +#[display("{:?}", self)] struct StarlarkCfgConstructor<'v> { stage0: Value<'v>, stage1: Value<'v>, + key: String, + aliases: Option>, + extra_data: Option>, } #[derive( @@ -68,10 +72,13 @@ struct StarlarkCfgConstructor<'v> { ProvidesStaticType, Allocative )] -#[display(fmt = "{:?}", "self")] +#[display("{:?}", self)] struct FrozenStarlarkCfgConstructor { stage0: FrozenValue, stage1: FrozenValue, + key: String, + aliases: Option, + extra_data: Option, } #[starlark_value(type = "StarlarkCfgConstructor")] @@ -86,9 +93,22 @@ impl<'v> Freeze for StarlarkCfgConstructor<'v> { type Frozen = FrozenStarlarkCfgConstructor; fn freeze(self, freezer: &Freezer) -> anyhow::Result { - let StarlarkCfgConstructor { stage0, stage1 } = self; - let (stage0, stage1) = (stage0, stage1).freeze(freezer)?; - Ok(FrozenStarlarkCfgConstructor { stage0, stage1 }) + let StarlarkCfgConstructor { + stage0, + stage1, + key, + aliases, + extra_data, + } = self; + let (stage0, stage1, aliases, extra_data) = + (stage0, stage1, aliases, extra_data).freeze(freezer)?; + Ok(FrozenStarlarkCfgConstructor { + stage0, + stage1, + key, + aliases, + extra_data, + }) } } @@ -96,15 +116,30 @@ fn make_cfg_constructor( cfg_constructor: OwnedFrozenValue, ) -> anyhow::Result> { let cfg_constructor = cfg_constructor.downcast_anyhow::()?; - let (cfg_constructor_pre_constraint_analysis, cfg_constructor_post_constraint_analysis) = unsafe { + let ( + cfg_constructor_pre_constraint_analysis, + cfg_constructor_post_constraint_analysis, + aliases, + extra_data, + ) = unsafe { ( OwnedFrozenValue::new(cfg_constructor.owner().dupe(), cfg_constructor.stage0), OwnedFrozenValue::new(cfg_constructor.owner().dupe(), cfg_constructor.stage1), + cfg_constructor + .aliases + .map(|v| OwnedFrozenValue::new(cfg_constructor.owner().dupe(), v)), + cfg_constructor + .extra_data + .map(|v| OwnedFrozenValue::new(cfg_constructor.owner().dupe(), v)), ) }; + let key = MetadataKeyRef::new(&cfg_constructor.key)?.to_owned(); Ok(Arc::new(CfgConstructor { cfg_constructor_pre_constraint_analysis, cfg_constructor_post_constraint_analysis, + key, + aliases, + extra_data, })) } @@ -113,21 +148,32 @@ pub(crate) fn register_set_cfg_constructor(globals: &mut GlobalsBuilder) { /// Register global cfg constructor. /// /// This function can only be called from the repository root `PACKAGE` file. + /// + /// Parameters: + /// stage0: The first cfg constructor that will be invoked before configuration rules are analyzed. + /// stage1: The second cfg constructor that will be invoked after configuration rules are analyzed. + /// key: The key for cfg modifiers on PACKAGE values and metadata. + /// aliases: The aliases map to use for input modifiers. + /// extra_data: Some extra data that may be used by `set_cfg_constructor` implementation that is + /// custom to our implementation and may not be used in other context like open-source. fn set_cfg_constructor<'v>( #[starlark(require=named)] stage0: Value<'v>, #[starlark(require=named)] stage1: Value<'v>, - eval: &mut Evaluator<'v, '_>, + #[starlark(require=named)] key: &str, + #[starlark(require = named, default = NoneOr::None)] aliases: NoneOr>, + #[starlark(require = named, default = NoneOr::None)] extra_data: NoneOr>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let build_context = BuildContext::from_context(eval)?; let ctx = match &build_context.additional { PerFileTypeContext::Package(ctx) => ctx, _ => return Err(RegisterCfgConstructorError::NotPackageRoot.into()), }; - if ctx.path - != PackageFilePath::for_dir(CellPathRef::new( + if ctx.path.dir() + != CellPathRef::new( build_context.cell_info.cell_resolver().root_cell(), CellRelativePath::empty(), - )) + ) { return Err(RegisterCfgConstructorError::NotPackageRoot.into()); } @@ -136,8 +182,13 @@ pub(crate) fn register_set_cfg_constructor(globals: &mut GlobalsBuilder) { return Err(RegisterCfgConstructorError::AlreadyRegistered.into()); } package_file_extra.cfg_constructor.get_or_init(|| { - eval.heap() - .alloc_complex(StarlarkCfgConstructor { stage0, stage1 }) + eval.heap().alloc_complex(StarlarkCfgConstructor { + stage0, + stage1, + key: key.to_owned(), + aliases: aliases.into_option(), + extra_data: extra_data.into_option(), + }) }); Ok(NoneType) } @@ -145,5 +196,5 @@ pub(crate) fn register_set_cfg_constructor(globals: &mut GlobalsBuilder) { pub(crate) fn init_registration() { MAKE_CFG_CONSTRUCTOR.init(make_cfg_constructor); - REGISTER_SET_CFG_CONSTRUCTOR.init(register_set_cfg_constructor); + REGISTER_BUCK2_CFG_CONSTRUCTOR_GLOBALS.init(register_set_cfg_constructor); } diff --git a/app/buck2_cli_proto/BUCK b/app/buck2_cli_proto/BUCK index 033b436bae270..75b44e2169fdf 100644 --- a/app/buck2_cli_proto/BUCK +++ b/app/buck2_cli_proto/BUCK @@ -1,5 +1,6 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") +load("@fbcode_macros//build_defs/lib:oss.bzl", "translate_target") oncall("build_infra") @@ -7,11 +8,14 @@ rust_protobuf_library( name = "buck2_cli_proto", srcs = glob(["src/**/*.rs"]), build_env = { - "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location //buck2/app/buck2_data:data.proto)", - "BUCK_HACK_SUBSCRIPTION_PROTOC_INCLUDE": "$(location //buck2/app/buck2_subscription_proto:subscription.proto)", + "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location {})".format( + translate_target("//buck2/app/buck2_data:data_proto"), + ), + "BUCK_HACK_SUBSCRIPTION_PROTOC_INCLUDE": "$(location {})".format( + translate_target("//buck2/app/buck2_subscription_proto:subscription.proto"), + ), }, build_script = "build.rs", - doctests = False, # FIXME protos = ["daemon.proto"], test_deps = [ "fbsource//third-party/rust:futures", @@ -22,11 +26,16 @@ rust_protobuf_library( "fbsource//third-party/rust:bytes", "fbsource//third-party/rust:prost-types", "fbsource//third-party/rust:serde", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio-util", "fbsource//third-party/rust:tonic", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_subscription_proto:buck2_subscription_proto", ], ) + +export_file( + name = "daemon.proto", +) diff --git a/app/buck2_cli_proto/Cargo.toml b/app/buck2_cli_proto/Cargo.toml index af945ad6555c5..c42f9b0a33f15 100644 --- a/app/buck2_cli_proto/Cargo.toml +++ b/app/buck2_cli_proto/Cargo.toml @@ -2,6 +2,8 @@ name = "buck2_cli_proto" edition = "2021" +license = { workspace = true } +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -11,11 +13,12 @@ bytes = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } serde = { workspace = true } -thiserror = { workspace = true } tokio-util = { workspace = true } tonic = { workspace = true } +buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_subscription_proto = { workspace = true } [build-dependencies] diff --git a/app/buck2_cli_proto/daemon.proto b/app/buck2_cli_proto/daemon.proto index f4fcb6a05817a..fce667d73c93c 100644 --- a/app/buck2_cli_proto/daemon.proto +++ b/app/buck2_cli_proto/daemon.proto @@ -75,6 +75,10 @@ message StatusResponse { string isolation_dir = 10; optional uint32 forkserver_pid = 11; optional bool supports_vpnless = 12; + optional bool http2 = 13; + optional bool valid_working_directory = 14; + optional bool valid_buck_out_mount = 15; + optional string io_provider = 16; } message PingRequest { @@ -87,11 +91,17 @@ message PingResponse { bytes payload = 2; } +message TargetCfg { + /// Empty string means not specified. + string target_platform = 1; + repeated string cli_modifiers = 2; +} + message ClientContext { + reserved 5, 21; + // `AbsNormPath`. string working_dir = 1; repeated ConfigOverride config_overrides = 3; - /// Empty string means not specified. - string target_platform = 5; enum HostPlatformOverride { DEFAULT_PLATFORM = 0; LINUX = 1; @@ -126,10 +136,22 @@ message ClientContext { /// Contents of `BUCK2_HARD_ERROR` environment variable. string buck2_hard_error = 20; + + enum PreemptibleWhen { + /// Default + NEVER = 0; + ALWAYS = 1; + ON_DIFFERENT_STATE = 2; + } + /// Whether this invocation is preemptible. If another build attempts to start + // that /would/ block until this build finishes, `preemptible` determines + // whether the new build will preempt (ie kill) the current build and take its + // place. + PreemptibleWhen preemptible = 22; } message TargetsRequest { - reserved 3 to 16, 18, 4242000; + reserved 2, 3 to 16, 18, 4242000; enum TargetHashFileMode { PATHS_ONLY = 0; @@ -151,6 +173,12 @@ message TargetsRequest { STATS = 4; } + enum Compression { + UNCOMPRESSED = 0; + GZIP = 1; + ZSTD = 2; + } + message ResolveAlias {} message Other { @@ -173,7 +201,8 @@ message TargetsRequest { } ClientContext context = 1; - repeated buck.data.TargetPattern target_patterns = 2; + repeated string target_patterns = 102; + TargetCfg target_cfg = 201; optional string output = 17; OutputFormat output_format = 19; @@ -183,6 +212,7 @@ message TargetsRequest { Other other = 21; } Concurrency concurrency = 22; + Compression compression = 23; } message TargetsResponse { @@ -200,9 +230,10 @@ message TargetsShowOutputsResponse { // `buck2 ctargets` command message ConfiguredTargetsRequest { - reserved 10; + reserved 2, 10; ClientContext context = 1; - repeated buck.data.TargetPattern target_patterns = 2; + repeated string target_patterns = 12; + TargetCfg target_cfg = 201; bool skip_missing_targets = 11; } @@ -215,6 +246,7 @@ enum QueryOutputFormat { JSON = 1; DOT = 2; DOT_COMPACT = 3; + STARLARK = 4; } message AqueryRequest { @@ -223,6 +255,7 @@ message AqueryRequest { repeated string output_attributes = 3; // The literals for a repeated query (one containing `%s`). repeated string query_args = 4; + TargetCfg target_cfg = 5; // These should possibly be deleted and never become real options. Let's not // pollute the low ids (and then forever need a comment about them). @@ -234,7 +267,7 @@ message AqueryResponse { } message UqueryRequest { - reserved 6; + reserved 5, 6; ClientContext context = 1; string query = 2; repeated string output_attributes = 3; @@ -251,18 +284,19 @@ message UqueryResponse { } message CqueryRequest { - reserved 6; + reserved 6, 8; ClientContext context = 1; string query = 2; repeated string output_attributes = 3; // The literals for a repeated query (one containing `%s`). repeated string query_args = 4; repeated string target_universe = 5; + TargetCfg target_cfg = 9; bool show_providers = 7; - // Correct or deprecated owner? https://fburl.com/1mf2d2xj - bool correct_owner = 8; + optional ProfileMode profile_mode = 21; + optional string profile_output = 22; // These should possibly be deleted and never become real options. Let's not // pollute the low ids (and then forever need a comment about them). @@ -274,7 +308,12 @@ message CqueryResponse { } message ConfigOverride { - // `override` is reserved keyword in Rust + // A path to a cell root. Interpretation depends on the config type: + // - If its a value, then this is the cell the value should be applied to + // - If its a file, then the file path is relative to this cell + optional string cell = 3; + // The value passed either to `--config` or `--config-file`. This does not + // include the cell, which is instead included above string config_override = 1; enum ConfigType { VALUE = 0; @@ -334,19 +373,28 @@ message CommonBuildOptions { bool skip_missing_targets = 16; bool skip_incompatible_targets = 17; + /// Materializes inputs for failed actions which ran on RE. + bool materialize_failed_inputs = 18; + + /// Validations to run that are marked optional. + repeated string enable_optional_validations = 19; + // These should possibly be deleted and never become real options. Let's not // pollute the low ids (and then forever need a comment about them). The only // one of these that might stick around is print_build_report, it's unclear if // that should be handled in the server or the client, though. bool unstable_print_build_report = 4242000; string unstable_build_report_filename = 4242003; + bool unstable_include_failures_build_report = 4242004; + bool unstable_include_package_project_relative_paths = 4242005; } message BuildRequest { - reserved 5, 4242001; + reserved 2, 5, 4242001; ClientContext context = 1; - repeated buck.data.TargetPattern target_patterns = 2; + repeated string target_patterns = 10; + TargetCfg target_cfg = 201; repeated string target_universe = 8; message BuildProviders { @@ -383,6 +431,9 @@ message BuildRequest { } // Materialize final artifacts? Materializations final_artifact_materializations = 7; + + // File name where built artifact hash information should be saved + optional string output_hashes_file = 9; } message TestSessionOptions { @@ -392,10 +443,11 @@ message TestSessionOptions { } message TestRequest { - reserved 10; + reserved 2, 10; ClientContext context = 1; - repeated buck.data.TargetPattern target_patterns = 2; + repeated string target_patterns = 14; + TargetCfg target_cfg = 201; repeated string test_executor_args = 3; @@ -420,6 +472,15 @@ message TestRequest { CommonBuildOptions build_opts = 9; TestSessionOptions session_options = 11; + + // How long to execute tests for? If the timeout is exceeded, Buck2 will exit + // as quickly as possible and not run further tests. In-flight tests will be + // cancelled. The test orchestrator will be allowed to shut down gracefully. + // The exit code will be a user failure. + optional google.protobuf.Duration timeout = 12; + + // Should you add tests that are on the `tests` attribute of the target. + bool ignore_tests_attribute = 13; } message BxlRequest { @@ -431,20 +492,26 @@ message BxlRequest { CommonBuildOptions build_opts = 4; + TargetCfg target_cfg = 201; + BuildRequest.Materializations final_artifact_materializations = 6; bool print_stacktrace = 7; } message BxlResponse { + reserved 101; // Absolute path to the repo root string project_root = 2; - repeated string error_messages = 101; + repeated buck.data.ErrorReport errors = 102; + optional string serialized_build_report = 100; } message InstallRequest { + reserved 2; ClientContext context = 1; - repeated buck.data.TargetPattern target_patterns = 2; + repeated string target_patterns = 12; + TargetCfg target_cfg = 201; CommonBuildOptions build_opts = 3; repeated string installer_run_args = 4; bool installer_debug = 5; @@ -472,17 +539,17 @@ message BuildTarget { // The dependency graph size for this target, if enabled and the target was // not skipped. optional uint64 configured_graph_size = 5; + optional string target_rule_type_name = 6; } message BuildResponse { + reserved 101; repeated BuildTarget build_targets = 1; // Absolute path to the repo root string project_root = 2; - // TODO(nmj): These are temporary until we've moved the - // printing logic for the build report into - // the CLI. They *will* be removed - string serialized_build_report = 100; - repeated string error_messages = 101; + + optional string serialized_build_report = 100; + repeated buck.data.ErrorReport errors = 102; } message CounterWithExamples { @@ -492,8 +559,10 @@ message CounterWithExamples { } message TestResponse { + reserved 101; optional int32 exit_code = 1; - repeated string error_messages = 101; + optional string serialized_build_report = 100; + repeated buck.data.ErrorReport errors = 102; message TestStatuses { reserved 1 to 6; CounterWithExamples passed = 10; @@ -531,45 +600,24 @@ message NewGenericResponseMessage { string new_generic_response = 1; } -message UnstableCrashRequest {} - -message UnstableCrashResponse {} - -message SegfaultRequest {} - -message SegfaultResponse {} - -message UnstableDocsRequest { - enum Format { - UNKNOWN = 0; - JSON = 1; - MARKDOWN = 2; +message UnstableCrashRequest { + enum CrashType { + PANIC = 0; + ABORT = 2; } - - ClientContext context = 1; - repeated string symbol_patterns = 2; - bool retrieve_builtins = 3; - bool retrieve_prelude = 4; - Format format = 5; - // `markdown_output_path` must be set when format is Markdown and must be - // unset otherwise. - optional string markdown_output_path = 6; - string markdown_native_subdir = 7; - string markdown_starlark_subdir = 8; + CrashType crash_type = 1; } -message UnstableDocsResponse { - reserved 1, 2; - - // Set when requested format is JSON. - optional string json_output = 3; -} +message UnstableCrashResponse {} message CommandError { - repeated string messages = 1; + reserved 1; + repeated buck.data.ErrorReport errors = 2; } message CommandResult { + reserved 10; + oneof result { CommandError error = 1; // Ideally, this would be an Any, but tonic/prost doesn't support it yet. @@ -581,7 +629,6 @@ message CommandResult { CqueryResponse cquery_response = 7; UqueryResponse uquery_response = 8; TestResponse test_response = 9; - UnstableDocsResponse unstable_docs_response = 10; ProfileResponse profile_response = 11; TargetsShowOutputsResponse targets_show_outputs_response = 12; InstallResponse install_response = 14; @@ -660,6 +707,7 @@ message UnstableHeapDumpRequest { // The path to write the heap dump to. If this path is relative, it is made // absolute relative to the working directory of the daemon. string destination_path = 1; + optional string test_executor_destination_path = 2; } message UnstableHeapDumpResponse {} @@ -717,36 +765,42 @@ message DapResponse {} message BxlProfile { string bxl_label = 1; repeated string bxl_args = 2; + TargetCfg target_cfg = 201; } message TargetProfile { + reserved 1; + enum Action { ANALYSIS = 0; LOADING = 1; } - repeated buck.data.TargetPattern target_patterns = 1; + repeated string target_patterns = 4; + TargetCfg target_cfg = 101; + repeated string target_universe = 102; bool recursive = 2; Action action = 3; } -message ProfileRequest { - enum Profiler { - HEAP_FLAME_ALLOCATED = 0; - HEAP_FLAME_RETAINED = 10; - HEAP_SUMMARY_ALLOCATED = 1; - HEAP_SUMMARY_RETAINED = 11; - TIME_FLAME = 2; - STATEMENT = 3; - BYTECODE = 4; - BYTECODE_PAIRS = 5; - TYPECHECK = 6; - } +enum ProfileMode { + HEAP_FLAME_ALLOCATED = 0; + HEAP_FLAME_RETAINED = 10; + HEAP_SUMMARY_ALLOCATED = 1; + HEAP_SUMMARY_RETAINED = 11; + TIME_FLAME = 2; + STATEMENT = 3; + BYTECODE = 4; + BYTECODE_PAIRS = 5; + TYPECHECK = 6; + COVERAGE = 7; +} +message ProfileRequest { ClientContext context = 1; string destination_path = 3; - Profiler profiler = 4; + ProfileMode profile_mode = 4; oneof profile_opts { TargetProfile target_profile = 7; @@ -783,10 +837,12 @@ message FileStatusRequest { // The paths we want to learn about repeated string paths = 2; // Show hashes of files passed in. - bool verbose = 3; + bool show_matches = 3; } -message FlushDepFilesRequest {} +message FlushDepFilesRequest { + bool retain_locally_produced_dep_files = 1; +} message SetLogFilterRequest { string log_filter = 1; @@ -860,7 +916,6 @@ service DaemonApi { rpc Uquery(UqueryRequest) returns (stream MultiCommandProgress); rpc Audit(GenericRequest) returns (stream MultiCommandProgress); rpc Starlark(GenericRequest) returns (stream MultiCommandProgress); - rpc UnstableDocs(UnstableDocsRequest) returns (stream MultiCommandProgress); rpc Install(InstallRequest) returns (stream MultiCommandProgress); rpc CleanStale(CleanStaleRequest) returns (stream MultiCommandProgress); rpc FileStatus(FileStatusRequest) returns (stream MultiCommandProgress); @@ -870,12 +925,8 @@ service DaemonApi { returns (stream MultiCommandProgress); // Crashes the Buck daemon. Unless you are writing tests or checking Buck2's - // panic behavior, you probably don't want this. - rpc Unstable_Crash(UnstableCrashRequest) returns (UnstableCrashResponse); - - // Crashes the Buck daemon with a segfault. Unless you are writing tests or - // checking Buck2's segfault behavior, you probably don't want this. - rpc Segfault(SegfaultRequest) returns (SegfaultResponse); + // behavior when it crashes, you probably don't want this. + rpc Unstable_Crash(UnstableCrashRequest) returns (CommandResult); // Requests the daemon to perform a heap dump and save the dump to a file. rpc Unstable_HeapDump(UnstableHeapDumpRequest) diff --git a/app/buck2_cli_proto/src/lib.rs b/app/buck2_cli_proto/src/lib.rs index 34ec90137e12b..627c955215aa5 100644 --- a/app/buck2_cli_proto/src/lib.rs +++ b/app/buck2_cli_proto/src/lib.rs @@ -7,10 +7,15 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(min_specialization)] #![allow(clippy::large_enum_variant)] -use thiserror::Error; +use buck2_core::cells::cell_root_path::CellRootPath; +use buck2_core::cells::cell_root_path::CellRootPathBuf; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use crate::BuckDaemonProtoError::MissingClientContext; @@ -19,12 +24,49 @@ pub mod protobuf_util; tonic::include_proto!("buck.daemon"); -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum BuckDaemonProtoError { #[error("daemon request was missing client context")] MissingClientContext, - #[error("wrong gRPC request message type, expecting {0} (internal error)")] - WrongRequestType(&'static str), +} + +#[track_caller] +fn wrong_request_type(request_type: &'static str) -> anyhow::Error { + internal_error_anyhow!("wrong gRPC request message type, expecting {request_type}") +} + +impl ConfigOverride { + /// Not `fbcode//config.key=value` + pub fn flag_no_cell(s: &str) -> Self { + Self::flag(s, None) + } + + pub fn flag(s: &str, cell: Option) -> Self { + Self { + cell: cell.map(|c| c.as_str().to_owned()), + config_override: s.to_owned(), + config_type: crate::config_override::ConfigType::Value.into(), + } + } + + pub fn file(p: &str, cell: Option) -> Self { + Self { + cell: cell.map(|c| c.as_str().to_owned()), + config_override: p.to_owned(), + config_type: crate::config_override::ConfigType::File.into(), + } + } + + pub fn get_cell(&self) -> anyhow::Result> { + self.cell + .as_ref() + .map(|p| { + ProjectRelativePath::new(p) + .map(CellRootPath::new) + .internal_error_anyhow("Client should have sent a valid path") + }) + .transpose() + } } pub trait HasClientContext { @@ -52,7 +94,7 @@ impl TryFrom for LspRequest { fn try_from(value: StreamingRequest) -> Result { match value.request { Some(streaming_request::Request::Lsp(req)) => Ok(req), - _ => Err(BuckDaemonProtoError::WrongRequestType("LspRequest").into()), + _ => Err(wrong_request_type("LspRequest")), } } } @@ -71,7 +113,7 @@ impl TryFrom for SubscriptionRequestWrapper { fn try_from(value: StreamingRequest) -> Result { match value.request { Some(streaming_request::Request::Subscription(req)) => Ok(req), - _ => Err(BuckDaemonProtoError::WrongRequestType("SubscriptionRequestWrapper").into()), + _ => Err(wrong_request_type("SubscriptionRequestWrapper")), } } } @@ -90,7 +132,7 @@ impl TryFrom for DapRequest { fn try_from(value: StreamingRequest) -> Result { match value.request { Some(streaming_request::Request::Dap(req)) => Ok(req), - _ => Err(BuckDaemonProtoError::WrongRequestType("DapRequest").into()), + _ => Err(wrong_request_type("DapRequest")), } } } @@ -255,7 +297,6 @@ result_convert!(TargetsResponse); result_convert!(TargetsShowOutputsResponse); result_convert!(ConfiguredTargetsResponse); result_convert!(GenericResponse); -result_convert!(UnstableDocsResponse); result_convert!(ProfileResponse); result_convert!(InstallResponse); result_convert!(CleanStaleResponse); @@ -284,7 +325,6 @@ define_request!(CqueryRequest, has(context)); define_request!(UqueryRequest, has(context)); define_request!(TestRequest, has(context, build_options)); define_request!(GenericRequest, has(context)); -define_request!(UnstableDocsRequest, has(context)); define_request!(ProfileRequest, has(context)); define_request!(AllocativeRequest, has(context)); define_request!(CleanStaleRequest, has(context)); diff --git a/app/buck2_cli_proto/src/new_generic.rs b/app/buck2_cli_proto/src/new_generic.rs index cffa71f7ed7b9..71fd2ece3d38e 100644 --- a/app/buck2_cli_proto/src/new_generic.rs +++ b/app/buck2_cli_proto/src/new_generic.rs @@ -7,19 +7,30 @@ * of this source tree. */ +use buck2_core::fs::paths::abs_path::AbsPathBuf; use serde::Deserialize; use serde::Serialize; +use crate::TargetCfg; + #[derive(Serialize, Deserialize)] pub enum NewGenericRequest { Materialize(MaterializeRequest), DebugEval(DebugEvalRequest), + Explain(ExplainRequest), + ExpandExternalCell(ExpandExternalCellRequest), + Complete(CompleteRequest), + Docs(DocsRequest), } #[derive(Serialize, Deserialize)] pub enum NewGenericResponse { Materialize(MaterializeResponse), DebugEval(DebugEvalResponse), + Explain(ExplainResponse), + ExpandExternalCell(ExpandExternalCellResponse), + Complete(CompleteResponse), + Docs(DocsResponse), } #[derive(Serialize, Deserialize)] @@ -38,3 +49,70 @@ pub struct DebugEvalRequest { #[derive(Serialize, Deserialize)] pub struct DebugEvalResponse {} + +#[derive(Serialize, Deserialize)] +pub struct ExplainRequest { + pub output: Option, + pub target: String, + pub fbs_dump: Option, + pub manifold_path: Option, + // build options + pub target_universe: Vec, + pub target_cfg: TargetCfg, +} + +#[derive(Serialize, Deserialize)] +pub struct ExplainResponse {} + +#[derive(Serialize, Deserialize)] +pub struct ExpandExternalCellRequest { + pub cell_name: String, +} + +#[derive(Serialize, Deserialize)] +pub struct ExpandExternalCellResponse { + pub path: String, +} + +#[derive(Serialize, Deserialize)] +pub struct CompleteRequest { + pub target_cfg: TargetCfg, + /// The label with partial target we want to complete \[\[cell\]//\]package:\[partial_target\] + pub partial_target: String, +} + +#[derive(Serialize, Deserialize)] +pub struct CompleteResponse { + /// Completions matching the partial input. + pub completions: Vec, +} + +#[derive(Serialize, Deserialize)] +pub enum DocsOutputFormat { + Json, + /// Contains the markdown output path + Markdown(AbsPathBuf), +} + +#[derive(Serialize, Deserialize)] +pub struct DocsStarlarkRequest { + pub symbol_patterns: Vec, + pub format: DocsOutputFormat, +} + +#[derive(Serialize, Deserialize)] +pub struct DocsStarlarkBuiltinsRequest { + pub path: String, +} + +#[derive(Serialize, Deserialize)] +pub enum DocsRequest { + Starlark(DocsStarlarkRequest), + StarlarkBuiltins(DocsStarlarkBuiltinsRequest), +} + +#[derive(Serialize, Deserialize)] +pub struct DocsResponse { + // Set when requested format is JSON. + pub json_output: Option, +} diff --git a/app/buck2_cli_proto/src/protobuf_util.rs b/app/buck2_cli_proto/src/protobuf_util.rs index 61d760053da34..4a5740062f5cd 100644 --- a/app/buck2_cli_proto/src/protobuf_util.rs +++ b/app/buck2_cli_proto/src/protobuf_util.rs @@ -45,7 +45,7 @@ impl Decoder for ProtobufSplitter { } #[cfg(test)] -mod test { +mod tests { use anyhow::Context as _; use futures::stream::StreamExt; use prost::Message; diff --git a/app/buck2_client/BUCK b/app/buck2_client/BUCK index e43f97f1d0b3c..317554e9d3263 100644 --- a/app/buck2_client/BUCK +++ b/app/buck2_client/BUCK @@ -1,11 +1,12 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") rust_library( name = "buck2_client", - srcs = glob(["src/**/*.rs"]), + srcs = glob([ + "src/**/*.rs", + ]), test_deps = [ "fbsource//third-party/rust:assert_matches", "fbsource//third-party/rust:tempfile", @@ -17,16 +18,15 @@ rust_library( "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:bytesize", "fbsource//third-party/rust:chrono", - "fbsource//third-party/rust:clap-3", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:csv", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:humantime", "fbsource//third-party/rust:indexmap", "fbsource//third-party/rust:libc", + "fbsource//third-party/rust:linked-hash-map", "fbsource//third-party/rust:lsp-server", - "fbsource//third-party/rust:maplit", - "fbsource//third-party/rust:multimap", "fbsource//third-party/rust:num_cpus", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:prost", @@ -35,8 +35,6 @@ rust_library( "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:shlex", - "fbsource//third-party/rust:termwiz", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:threadpool", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tokio-stream", @@ -51,9 +49,10 @@ rust_library( "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_event_log:buck2_event_log", "//buck2/app/buck2_event_observer:buck2_event_observer", "//buck2/app/buck2_events:buck2_events", - "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_offline_archive:buck2_offline_archive", "//buck2/app/buck2_query_parser:buck2_query_parser", "//buck2/app/buck2_subscription_proto:buck2_subscription_proto", diff --git a/app/buck2_client/Cargo.toml b/app/buck2_client/Cargo.toml index 417f62639a5e8..71c9116820d3f 100644 --- a/app/buck2_client/Cargo.toml +++ b/app/buck2_client/Cargo.toml @@ -1,7 +1,9 @@ [package] description = "Some parts of Buck client" edition = "2021" +license = { workspace = true } name = "buck2_client" +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -11,53 +13,52 @@ async-compression = { workspace = true } async-recursion = { workspace = true } async-trait = { workspace = true } bytesize = { workspace = true } -clap = { workspace = true } chrono = { workspace = true } +clap = { workspace = true } csv = { workspace = true } derive_more = { workspace = true } +dupe = { workspace = true } futures = { workspace = true } +gazebo = { workspace = true } humantime = { workspace = true } indexmap = { workspace = true } libc = { workspace = true } +linked-hash-map = { workspace = true } lsp-server = { workspace = true } maplit = { workspace = true } multimap = { workspace = true } +num_cpus = { workspace = true } once_cell = { workspace = true } -prost-types = { workspace = true } prost = { workspace = true } +prost-types = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } shlex = { workspace = true } -termwiz = { workspace = true } -thiserror = { workspace = true } +superconsole = { version = "0.2.0", path = "../../superconsole" } +threadpool = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tokio-util = { workspace = true } +tonic = { workspace = true } tracing = { workspace = true } walkdir = { workspace = true } -num_cpus = { workspace = true } -threadpool = { workspace = true } -dice = { workspace = true } -dupe = { workspace = true } -gazebo = { workspace = true } -superconsole = { version = "0.2.0", path = "../../superconsole" } -tonic = { workspace = true } # Please do not add dependency on `buck2_build_api`. buck2_audit = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_client_ctx = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } -buck2_execute = { workspace = true } +buck2_error = { workspace = true } +buck2_event_log = { workspace = true } buck2_event_observer = { workspace = true } buck2_events = { workspace = true } -buck2_cli_proto = { workspace = true } +buck2_offline_archive = { workspace = true } buck2_query_parser = { workspace = true } -buck2_util = { workspace = true } buck2_subscription_proto = { workspace = true } -buck2_offline_archive = { workspace = true } +buck2_util = { workspace = true } buck2_wrapper_common = { workspace = true } [dev-dependencies] diff --git a/app/buck2_client/src/args.rs b/app/buck2_client/src/args.rs deleted file mode 100644 index f2b3e4ae6a264..0000000000000 --- a/app/buck2_client/src/args.rs +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fs::File; -use std::io; -use std::io::BufRead; -use std::path::Path; -use std::process::Command; -use std::str; - -use anyhow::Context as _; -use buck2_client_ctx::immediate_config::ImmediateConfigContext; -use buck2_core::fs::fs_util; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::is_open_source; -use buck2_util::process::background_command; -use termwiz::istty::IsTty; -use thiserror::Error; - -#[derive(Error, Debug)] -enum ArgExpansionError { - #[error("Missing flag file path after --flagfile argument")] - MissingFlagFilePath, - #[error("Unable to read flag file at `{path}`")] - MissingFlagFileOnDisk { source: anyhow::Error, path: String }, - #[error("Unable to read line in flag file `{path}`")] - FlagFileReadError { source: anyhow::Error, path: String }, - #[error("Python mode file `{path}` output is not UTF-8")] - PythonOutputNotUtf8 { path: String }, - #[error("No flag file path after @ symbol in argfile argument")] - MissingFlagFilePathInArgfile, - #[error("Python argfile at `{path}` exited with non-zero status, stderr: {err:?}")] - PythonExecutableFailed { path: String, err: String }, - #[error("Python argfile command ({cmd:?}) execution failed")] - PythonExecutionFailed { source: io::Error, cmd: Command }, - #[error("Unable to read line from stdin")] - StdinReadError { source: anyhow::Error }, -} - -/// Log that a relative flag file was not found in CWD, but was found, and used, from the cell root -/// -/// This prints directly to stderr (sometimes in color). This should be safe, because flagfile -/// expansion runs *very* early in the CLI process lifetime. -pub fn log_relative_path_from_cell_root(requested_path: &str) -> anyhow::Result<()> { - let (prefix, reset) = if io::stderr().is_tty() { - ("\x1b[33m", "\x1b[0m") - } else { - ("WARNING: ", "") - }; - buck2_client_ctx::eprintln!( - "{}`@{}` was specified, but not found. Using file at `//{}`.", - prefix, - requested_path, - requested_path - )?; - buck2_client_ctx::eprintln!( - "This behavior is being deprecated. Please use `@//{}` instead{}", - requested_path, - reset - )?; - Ok(()) -} - -#[derive(Clone, Debug)] -enum ArgFile { - PythonExecutable(AbsNormPathBuf, Option), - Path(AbsNormPathBuf), - Stdin, -} - -// Expands any argfiles passed as command line parameters. There are -// two ways to do: `@argfile` or `--flagfile PATH`. -// -// Caveats: -// - `--` and `--flagfile` cannot be values of other options -// - `--flagfile=X` is _not_ supported, you need to pass -// `--flagfile X` instead. -// - `--flagfil` is _not_ supported. -// -// TODO: This function should also return tracking information, so -// that we know where args come from. This would be useful -// in cases where the argfiles contain `--config` flags. -pub fn expand_argfiles_with_context( - args: Vec, - context: &mut ImmediateConfigContext, -) -> anyhow::Result> { - let mut expanded_args = Vec::new(); - let mut arg_iterator = args.into_iter(); - - while let Some(next_arg) = arg_iterator.next() { - match next_arg.as_str() { - "--" => { - expanded_args.push(next_arg); - expanded_args.extend(arg_iterator); - break; - } - "--flagfile" => { - let flagfile = match arg_iterator.next() { - Some(val) => val, - None => return Err(anyhow::anyhow!(ArgExpansionError::MissingFlagFilePath)), - }; - // TODO: We want to detect cyclic inclusion - let expanded_flagfile_args = resolve_and_expand_argfile(&flagfile, context)?; - expanded_args.extend(expanded_flagfile_args); - } - next_arg if next_arg.starts_with('@') => { - let flagfile = next_arg.strip_prefix('@').unwrap(); - if flagfile.is_empty() { - return Err(anyhow::anyhow!( - ArgExpansionError::MissingFlagFilePathInArgfile - )); - } - // TODO: We want to detect cyclic inclusion - let expanded_flagfile_args = resolve_and_expand_argfile(flagfile, context)?; - expanded_args.extend(expanded_flagfile_args); - } - _ => expanded_args.push(next_arg), - } - } - - Ok(expanded_args) -} - -// Resolves a path argument to an absolute path, reads the flag file and expands -// it into a list of arguments. -fn resolve_and_expand_argfile( - path: &str, - context: &mut ImmediateConfigContext, -) -> anyhow::Result> { - let flagfile = resolve_flagfile(path, context) - .with_context(|| format!("Error resolving flagfile `{}`", path))?; - let flagfile_lines = expand_argfile_contents(&flagfile)?; - expand_argfiles_with_context(flagfile_lines, context) -} - -fn expand_argfile_contents(flagfile: &ArgFile) -> anyhow::Result> { - match flagfile { - ArgFile::Path(path) => { - let mut lines = Vec::new(); - let file = - File::open(path).map_err(|source| ArgExpansionError::MissingFlagFileOnDisk { - source: source.into(), - path: path.to_string_lossy().into_owned(), - })?; - let reader = io::BufReader::new(file); - for line_result in reader.lines() { - let line = line_result.map_err(|source| ArgExpansionError::FlagFileReadError { - source: source.into(), - path: path.to_string_lossy().into_owned(), - })?; - if line.is_empty() { - continue; - } - lines.push(line); - } - Ok(lines) - } - ArgFile::PythonExecutable(path, flag) => { - let mut cmd = background_command(if is_open_source() { - "python3" - } else { - "fbpython" - }); - cmd.env("BUCK2_ARG_FILE", "1"); - cmd.arg(path.as_os_str()); - if let Some(flag) = flag.as_deref() { - cmd.args(["--flavors", flag]); - } - let cmd_out = cmd - .output() - .map_err(|source| ArgExpansionError::PythonExecutionFailed { cmd, source })?; - if cmd_out.status.success() { - Ok(str::from_utf8(&cmd_out.stdout) - .map_err(|_| ArgExpansionError::PythonOutputNotUtf8 { - path: path.to_string_lossy().into_owned(), - })? - .lines() - .filter(|line| !line.is_empty()) - .map(|s| s.to_owned()) - .collect::>()) - } else { - Err(anyhow::anyhow!(ArgExpansionError::PythonExecutableFailed { - path: path.to_string_lossy().into_owned(), - err: String::from_utf8_lossy(&cmd_out.stderr).to_string(), - })) - } - } - ArgFile::Stdin => io::stdin() - .lock() - .lines() - .filter_map(|line| match line { - Ok(x) if x.is_empty() => None, - Ok(x) => Some(Ok(x)), - Err(err) => Some(Err(ArgExpansionError::StdinReadError { - source: err.into(), - } - .into())), - }) - .collect(), - } -} - -// Resolves a path argument to an absolute path, so that it can be read. -fn resolve_flagfile(path: &str, context: &mut ImmediateConfigContext) -> anyhow::Result { - if path == "-" { - return Ok(ArgFile::Stdin); - } - - let (path_part, flag) = match path.split_once('#') { - Some((pypath, pyflag)) => (pypath, Some(pyflag)), - None => (path, None), - }; - - let resolved_path = if let Some(cell_resolved_path) = context.resolve_cell_path_arg(path_part) { - cell_resolved_path.context("Error resolving cell path")? - } else { - let p = Path::new(path_part); - if !p.is_absolute() { - match context.canonicalize(p) { - Ok(abs_path) => Ok(abs_path), - Err(original_error) => { - let cell_relative_path = context.resolve_cell_path("", path_part)?; - // If the relative path does not exist relative to the cwd, - // attempt to make it relative to the cell root. If *that* - // doesn't exist, just report the original error back, and - // don't tip users off that they can use relative-to-cell paths. - // We want to deprecate that. - match fs_util::try_exists(&cell_relative_path) { - Ok(true) => { - log_relative_path_from_cell_root(path_part)?; - Ok(cell_relative_path) - } - _ => Err(ArgExpansionError::MissingFlagFileOnDisk { - source: original_error, - path: p.to_string_lossy().into_owned(), - }), - } - } - }? - } else { - AbsNormPathBuf::try_from(p.to_owned())? - } - }; - - context.push_trace(&resolved_path); - if path_part.ends_with(".py") { - Ok(ArgFile::PythonExecutable( - resolved_path, - flag.map(ToOwned::to_owned), - )) - } else { - Ok(ArgFile::Path(resolved_path)) - } -} - -#[cfg(test)] -mod tests { - use buck2_client_ctx::immediate_config::ImmediateConfigContext; - use buck2_core::fs::fs_util; - use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; - use buck2_core::fs::paths::abs_path::AbsPath; - use buck2_core::fs::working_dir::WorkingDir; - - use super::*; - - #[test] - fn test_expand_argfile_content() { - let tempdir = tempfile::tempdir().unwrap(); - let root = AbsPath::new(tempdir.path()).unwrap(); - let mode_file = root.join("mode-file"); - // Test skips empty lines. - fs_util::write(&mode_file, "a\n\nb\n").unwrap(); - let lines = expand_argfile_contents(&ArgFile::Path( - AbsNormPathBuf::from(mode_file.to_string_lossy().into_owned()).unwrap(), - )) - .unwrap(); - assert_eq!(vec!["a".to_owned(), "b".to_owned()], lines); - } - - #[test] - fn test_relative_inclusion() { - // Currently all @-files both on the command line and in files are relative to the current directory. - // This matches gcc/clang, so write a test we don't inadvertantly change it. - let tempdir = tempfile::tempdir().unwrap(); - let root = AbsPath::new(tempdir.path()).unwrap(); - fs_util::create_dir(root.join("foo")).unwrap(); - fs_util::create_dir(root.join("foo/bar")).unwrap(); - fs_util::write(root.join("foo/bar/arg1.txt"), "@bar/arg2.txt").unwrap(); - fs_util::write(root.join("foo/bar/arg2.txt"), "--magic").unwrap(); - fs_util::write(root.join(".buckconfig"), "[repositories]\nroot = .").unwrap(); - let cwd = WorkingDir::unchecked_new( - AbsNormPathBuf::new(root.canonicalize().unwrap().join("foo")).unwrap(), - ); - let mut context = ImmediateConfigContext::new(&cwd); - let res = - expand_argfiles_with_context(vec!["@bar/arg1.txt".to_owned()], &mut context).unwrap(); - assert_eq!(res, vec!["--magic".to_owned()]); - } -} diff --git a/app/buck2_client/src/commands.rs b/app/buck2_client/src/commands.rs new file mode 100644 index 0000000000000..4d0e3977ad0ac --- /dev/null +++ b/app/buck2_client/src/commands.rs @@ -0,0 +1,34 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod build; +pub mod bxl; +pub mod clean; +pub mod clean_stale; +pub mod ctargets; +pub mod debug; +pub mod expand_external_cell; +pub mod explain; +pub mod help_env; +pub mod init; +pub mod install; +pub mod kill; +pub mod killall; +pub mod log; +pub mod lsp; +pub mod profile; +pub mod query; +pub mod rage; +pub mod root; +pub mod run; +pub mod server; +pub mod status; +pub mod subscribe; +pub mod targets; +pub mod test; diff --git a/app/buck2_client/src/commands/build.rs b/app/buck2_client/src/commands/build.rs index 830923e3a5cef..b6afe34c86d51 100644 --- a/app/buck2_client/src/commands/build.rs +++ b/app/buck2_client/src/commands/build.rs @@ -7,110 +7,55 @@ * of this source tree. */ -use std::borrow::Cow; -use std::collections::HashMap; -use std::io; use std::io::Write; -use std::path::Path; +use std::path::PathBuf; use anyhow::Context; use async_trait::async_trait; use buck2_cli_proto::build_request::build_providers; use buck2_cli_proto::build_request::BuildProviders; use buck2_cli_proto::build_request::ResponseOptions; -use buck2_cli_proto::build_target::BuildOutput; use buck2_cli_proto::BuildRequest; use buck2_cli_proto::BuildTarget; +use buck2_cli_proto::TargetCfg; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_client_ctx::common::build::CommonBuildOptions; +use buck2_client_ctx::common::build::CommonOutputOptions; +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonBuildOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::common::PrintOutputsFormat; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::exit_result::FailureExitCode; use buck2_client_ctx::final_console::FinalConsole; use buck2_client_ctx::output_destination_arg::OutputDestinationArg; +use buck2_client_ctx::path_arg::PathArg; use buck2_client_ctx::streaming::StreamingCommand; -use buck2_core::fs::async_fs_util; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::working_dir::WorkingDir; +use buck2_core::buck2_env_anyhow; use dupe::Dupe; -use futures::TryStreamExt; -use gazebo::prelude::*; -use multimap::MultiMap; -use serde::Serialize; + +use crate::commands::build::out::copy_to_out; +use crate::print::PrintOutputs; + +mod out; #[derive(Debug, clap::Parser)] #[clap(name = "build", about = "Build the specified targets")] pub struct BuildCommand { #[clap(flatten)] - common_opts: CommonCommandOptions, - - #[clap(flatten)] - build_opts: CommonBuildOptions, - - #[clap( - long, - short = 'u', - use_delimiter = true, - help = "Comma separated list of targets at which to root the queryable universe. - This is useful since targets can exist in multiple configurations." - )] - target_universe: Vec, - - /// This option does nothing. It is here to keep compatibility with Buck1 and ci - #[clap(long = "deep")] - #[allow(unused)] // for v1 compat - deep: bool, - - #[clap( - long = "show-output", - help = "Print the path to the output for each of the built rules relative to the project root" - )] - show_output: bool, - - #[clap( - long = "show-full-output", - help = "Print the absolute path to the output for each of the built rules" - )] - show_full_output: bool, - - #[clap( - long = "show-simple-output", - help = "Print only the path to the output for each of the built rules relative to the project root" - )] - show_simple_output: bool, - - #[clap( - long = "show-full-simple-output", - help = "Print only the absolute path to the output for each of the built rules" - )] - show_full_simple_output: bool, - - #[clap( - long = "show-json-output", - help = "Print the output paths relative to the cell, in JSON format" - )] - show_json_output: bool, - - #[clap( - long = "show-full-json-output", - help = "Print the output absolute paths, in JSON format" - )] - show_full_json_output: bool, + show_output: CommonOutputOptions, #[clap( long = "materializations", short = 'M', help = "Materialize (or skip) the final artifacts, bypassing buckconfig.", ignore_case = true, - arg_enum + value_enum )] materializations: Option, @@ -147,7 +92,6 @@ pub struct BuildCommand { #[clap( long, group = "test-info", - alias = "build-test-dependencies", help = "Build tests (this is not the default)" )] build_test_info: bool, @@ -168,6 +112,25 @@ pub struct BuildCommand { #[clap(name = "TARGET_PATTERNS", help = "Patterns to build")] patterns: Vec, + + #[clap( + long, + help = "Experimental: Path to a file where the Buck2 daemon should write a list of produced artifacts in json format" + )] + output_hashes_file: Option, + + /// This option does nothing. It is here to keep compatibility with Buck1 and ci + #[clap(long = "deep", hide = true)] + _deep: bool, + + #[clap(flatten)] + build_opts: CommonBuildOptions, + + #[clap(flatten)] + target_cfg: TargetCfgWithUniverseOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } impl BuildCommand { @@ -191,9 +154,21 @@ impl BuildCommand { } build_providers::Action::Skip } + + pub(crate) fn patterns(&self) -> &Vec { + &self.patterns + } + + pub(crate) fn target_universe(&self) -> &Vec { + &self.target_cfg.target_universe + } + + pub(crate) fn target_cfg(&self) -> TargetCfg { + self.target_cfg.target_cfg.target_cfg().clone() + } } -#[derive(Debug, Clone, Dupe, clap::ArgEnum)] +#[derive(Debug, Clone, Dupe, clap::ValueEnum)] #[clap(rename_all = "snake_case")] pub enum FinalArtifactMaterializations { All, @@ -217,9 +192,12 @@ impl MaterializationsToProto for Option { } } -pub fn print_build_result(console: &FinalConsole, error_messages: &[String]) -> anyhow::Result<()> { - for error_message in error_messages { - console.print_error(error_message)?; +pub fn print_build_result( + console: &FinalConsole, + errors: &[buck2_data::ErrorReport], +) -> anyhow::Result<()> { + for error in errors { + console.print_error(&error.message)?; } Ok(()) } @@ -242,27 +220,32 @@ impl StreamingCommand for BuildCommand { .build( BuildRequest { context: Some(context), - target_patterns: self - .patterns - .map(|p| buck2_data::TargetPattern { value: p.clone() }), + target_patterns: self.patterns.clone(), + target_cfg: Some(self.target_cfg.target_cfg.target_cfg()), build_providers: Some(BuildProviders { default_info: self.default_info() as i32, run_info: self.run_info() as i32, test_info: self.test_info() as i32, }), response_options: Some(ResponseOptions { - return_outputs: self.show_output - || self.show_full_output - || self.show_json_output - || self.show_full_json_output - || self.show_simple_output - || self.show_full_simple_output + return_outputs: self.show_output.format().is_some() || self.output_path.is_some(), return_default_other_outputs: show_default_other_outputs, }), build_opts: Some(self.build_opts.to_proto()), final_artifact_materializations: self.materializations.to_proto() as i32, - target_universe: self.target_universe, + target_universe: self.target_cfg.target_universe, + output_hashes_file: self + .output_hashes_file + .map(|p| { + p.resolve(&ctx.working_dir).into_string().with_context(|| { + format!( + "Failed to convert output hashes file path ({}) to string", + p.display() + ) + }) + }) + .transpose()?, }, ctx.stdin() .console_interaction_stream(&self.common_opts.console_opts), @@ -270,7 +253,7 @@ impl StreamingCommand for BuildCommand { ) .await; let success = match &result { - Ok(CommandOutcome::Success(response)) => response.error_messages.is_empty(), + Ok(CommandOutcome::Success(response)) => response.errors.is_empty(), Ok(CommandOutcome::Failure(_)) => false, Err(_) => false, }; @@ -287,16 +270,20 @@ impl StreamingCommand for BuildCommand { print_build_failed(&console)?; } - // Action errors will have already been printed, but any other type - // of error will be printed below the FAILED line here. + if buck2_env_anyhow!("BUCK2_TEST_BUILD_ERROR", bool, applicability = testing)? { + return anyhow::anyhow!("Injected Build Response Error").into(); + } + + // Most build errors are returned in the `result.errors` field, but some are not and printed + // here. let response = result??; - print_build_result(&console, &response.error_messages)?; + print_build_result(&console, &response.errors)?; let mut stdout = Vec::new(); - if !response.serialized_build_report.is_empty() { - stdout.extend(response.serialized_build_report.as_bytes()); + if let Some(build_report) = response.serialized_build_report { + stdout.extend(build_report.as_bytes()); writeln!(&mut stdout)?; } @@ -312,40 +299,19 @@ impl StreamingCommand for BuildCommand { .context("Error requesting specific output path for --out")?; } - if self.show_output - || self.show_full_output - || self.show_json_output - || self.show_full_json_output - || self.show_simple_output - || self.show_full_simple_output - { + if let Some(format) = self.show_output.format() { print_outputs( &mut stdout, response.build_targets, - if self.show_full_output - || self.show_full_json_output - || self.show_full_simple_output - { - Some(response.project_root) - } else { - None - }, - if self.show_json_output || self.show_full_json_output { - PrintOutputsFormat::Json - } else if self.show_output || self.show_full_output { - PrintOutputsFormat::Plain - } else if self.show_simple_output || self.show_full_simple_output { - PrintOutputsFormat::Simple - } else { - panic!("Unhandled output type"); - }, + self.show_output.is_full().then_some(response.project_root), + format, show_default_other_outputs, )?; } ExitResult::success() } else { - ExitResult::failure() + ExitResult::from_errors(&response.errors) }; res.with_stdout(stdout) @@ -355,13 +321,17 @@ impl StreamingCommand for BuildCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } pub(crate) fn print_build_succeeded( @@ -378,77 +348,15 @@ pub(crate) fn print_build_failed(console: &FinalConsole) -> anyhow::Result<()> { console.print_error("BUILD FAILED") } -#[derive(Debug, PartialEq)] -pub(crate) enum PrintOutputsFormat { - Plain, - Json, - Simple, -} - pub(crate) fn print_outputs( - mut out: impl Write, + out: impl Write, targets: Vec, root_path: Option, format: PrintOutputsFormat, show_all_outputs: bool, ) -> anyhow::Result<()> { - #[derive(Serialize)] - #[serde(untagged)] - enum TargetOutputs { - AllOutputs(MultiMap), - DefaultOutput(HashMap), - } - - impl TargetOutputs { - fn all_outputs() -> Self { - Self::AllOutputs(MultiMap::new()) - } - - fn default_output() -> Self { - Self::DefaultOutput(HashMap::new()) - } - - fn insert(&mut self, target: String, output: String) { - match self { - TargetOutputs::AllOutputs(map) => { - map.insert(target, output); - } - TargetOutputs::DefaultOutput(map) => { - map.insert(target, output); - } - } - } - } - - let mut output_map = if show_all_outputs { - TargetOutputs::all_outputs() - } else { - TargetOutputs::default_output() - }; - let mut process_output = |target: &String, output: Option| -> anyhow::Result<()> { - let output = match output { - Some(output) => { - let output = if cfg!(windows) { - output.replace('/', "\\") - } else { - output - }; - match &root_path { - Some(root) => Path::new(&root).join(output).to_string_lossy().into_owned(), - None => output, - } - } - None => "".to_owned(), - }; - - match format { - PrintOutputsFormat::Json => output_map.insert(target.clone(), output), - PrintOutputsFormat::Plain => writeln!(&mut out, "{} {}", target, output)?, - PrintOutputsFormat::Simple => writeln!(&mut out, "{}", output)?, - } - - Ok(()) - }; + let root_path = root_path.map(PathBuf::from); + let mut print = PrintOutputs::new(out, root_path, format)?; for build_target in targets { // just print the default info for build command @@ -465,205 +373,19 @@ pub(crate) fn print_outputs( // We only print the default outputs when we don't `show_all_outputs`, // which shouldn't have more than one output. // (although we currently don't yet restrict this, but we should). - process_output(&build_target.target, None)?; + print.output(&build_target.target, None)?; continue; } for output in outputs { - process_output(&build_target.target, Some(output.path))?; - } - } - - if format == PrintOutputsFormat::Json { - serde_json::to_writer(&mut out, &output_map)?; - writeln!(&mut out)?; - } - - Ok(()) -} - -/// Given a list of targets built by this command, extracts a reasonable default output from the list and writes it -/// to the path given by `out`. -/// -/// In order to extract a "reasonable default output", this function will bail if any of the following are true: -/// 1. Multiple top-level targets were built, in which case the correct output to write is ambiguous, -/// 2. A single top-level target was built, but it produced zero default outputs, -/// 3. A single top-level target was built, but it produced more than two default outputs -/// -/// Otherwise, we'll extract the single default output from the single top-level target and copy it to the output -/// path. If the given path is a directory then all output files will be copied inside of it. -/// -/// As a special case, `--out -` is interpreted as `--out /dev/stdout` and allows multiple output files to be -/// written to it. -async fn copy_to_out( - targets: &[BuildTarget], - root_path: &ProjectRoot, - working_dir: &WorkingDir, - out: &OutputDestinationArg, -) -> anyhow::Result<()> { - struct OutputToBeCopied { - from_path: AbsNormPathBuf, - is_dir: bool, - } - - let mut outputs_to_be_copied = Vec::new(); - for target in targets { - let default_outputs: Vec<&BuildOutput> = target - .outputs - .iter() - .filter(|output| { - output - .providers - .as_ref() - .map_or(true, |p| p.default_info && !p.other) - }) - .collect(); - - let single_default_output = match default_outputs.len() { - 0 => { - return Err(anyhow::anyhow!( - "target {} produced zero default outputs", - target.target - )); - } - 1 => &default_outputs[0], - n => { - return Err(anyhow::anyhow!( - "target {} produced {} outputs, choice of output is ambiguous", - target.target, - n - )); - } - }; - - let output_path = root_path - .root() - .join(ForwardRelativePath::new(&single_default_output.path)?); - let output_meta = tokio::fs::metadata(&output_path) - .await - .context("Error inspecting file metadata")?; - let is_dir = output_meta.is_dir(); - - outputs_to_be_copied.push(OutputToBeCopied { - from_path: output_path, - is_dir, - }); - } - - match out { - OutputDestinationArg::Stream => { - // Check no output is a directory. We allow outputting any number of - // files (including 0) to stdout. - if let Some(dir_i) = outputs_to_be_copied.iter().position(|o| o.is_dir) { - return Err(anyhow::anyhow!( - "target {} produces a default output that is a directory, and cannot be sent to stdout", - targets[dir_i].target, - )); - } - } - OutputDestinationArg::Path(..) => { - // Check we are outputting exactly 1 target. Okay if directory. - if outputs_to_be_copied.len() != 1 { - return Err(anyhow::anyhow!( - "build command built multiple top-level targets, choice of output is ambiguous" - )); - } - } - } - - for to_be_copied in outputs_to_be_copied { - match out { - OutputDestinationArg::Stream => { - let mut file = async_fs_util::open(&to_be_copied.from_path).await?; - tokio::io::copy(&mut file, &mut tokio::io::stdout()) - .await - .map_err(convert_broken_pipe_error)?; - } - OutputDestinationArg::Path(path) => { - let path = path.resolve(working_dir); - if to_be_copied.is_dir { - copy_directory(&to_be_copied.from_path, &path).await?; - } else { - copy_file(&to_be_copied.from_path, &path).await?; - } - } - } - } - - Ok(()) -} - -/// Recursively copies a directory to the output path, rooted at `dst`. -#[async_recursion::async_recursion] -async fn copy_directory(src: &Path, dst: &Path) -> anyhow::Result<()> { - tokio::fs::create_dir_all(dst).await?; - let stream = tokio_stream::wrappers::ReadDirStream::new( - tokio::fs::read_dir(src) - .await - .context(format!("reading directory {:?}", src))?, - ) - .err_into::(); - stream - .try_for_each(|entry| async move { - if entry.file_type().await?.is_dir() { - copy_directory(&entry.path(), &dst.join(entry.file_name())) - .await - .context(format!("copying subdirectory {:?}", entry.path())) - } else { - tokio::fs::copy(&entry.path(), &dst.join(entry.file_name())) - .await - .context(format!("copying file {:?}", entry.path())) - .map(|_| ()) - } - }) - .await?; - - Ok(()) -} - -async fn copy_file(src: &Path, dst: &Path) -> anyhow::Result<()> { - if let Some(parent) = dst.parent() { - if !parent.exists() { - return Err(anyhow::anyhow!( - "Directory `{}` does not exist", - parent.display() - )); - } - } - let dest_path = match dst.is_dir() { - true => Cow::Owned(dst.join(src.file_name().context("Failed getting output name")?)), - false => Cow::Borrowed(dst), - }; - - // NOTE: We don't do the overwrite since we might be writing to e.g. a pipe here and we can't - // do an atomic move into it. - match tokio::fs::copy(src, &dest_path).await { - Ok(..) => Ok(()), - Err(e) if e.raw_os_error() == Some(libc::ETXTBSY) => { - let dir = dest_path.parent().context("Output path has no parent")?; - let mut tmp_name = dest_path - .file_name() - .context("Output path has no file name")? - .to_owned(); - tmp_name.push(".buck2.tmp"); - let tmp_path = dir.join(tmp_name); - tokio::fs::copy(src, &tmp_path).await?; - tokio::fs::rename(&tmp_path, dest_path).await?; - Ok(()) + print.output(&build_target.target, Some(&output.path))?; } - Err(e) => Err(convert_broken_pipe_error(e)), } -} -fn convert_broken_pipe_error(e: io::Error) -> anyhow::Error { - if e.kind() == io::ErrorKind::BrokenPipe { - anyhow::Error::new(FailureExitCode::OutputFileBrokenPipe) - } else { - anyhow::Error::new(e).context("Error writing build artifact to --out") - } + print.finish() } #[cfg(test)] -mod test { +mod tests { use assert_matches::assert_matches; use build_providers::Action; use clap::Parser; @@ -671,7 +393,7 @@ mod test { use super::*; fn parse(args: &[&str]) -> anyhow::Result { - Ok(BuildCommand::from_iter_safe( + Ok(BuildCommand::try_parse_from( std::iter::once("program").chain(args.iter().copied()), )?) } @@ -713,10 +435,6 @@ mod test { let opts = parse(&["--build-test-info"])?; assert_eq!(opts.test_info(), Action::BuildIfAvailable); - // Legacy flag from before we could configure the other options. - let opts = parse(&["--build-test-dependencies"])?; - assert_eq!(opts.test_info(), Action::BuildIfAvailable); - Ok(()) } @@ -742,41 +460,4 @@ mod test { Ok(()) } - - #[cfg(unix)] - mod unix { - use assert_matches::assert_matches; - use tokio::process::Command; - - use super::*; - - #[tokio::test] - async fn test_copy_file() -> anyhow::Result<()> { - let dir = tempfile::tempdir()?; - let out = dir.path().join("sleep"); - - let res = Command::new("cp") - .arg(Path::new("/bin/sleep")) - .arg(&out) - .spawn()? - .wait() - .await?; - - assert!(res.success()); - - let mut proc = Command::new(&out) - .arg("10000") - .kill_on_drop(true) - .spawn() - .context("Error spawning")?; - - // This will fail if we don't handle ETXTBSY. - copy_file(Path::new("/bin/sleep"), &out).await?; - - // Check that our sleep didn't end - assert_matches!(proc.try_wait(), Ok(None)); - - Ok(()) - } - } } diff --git a/app/buck2_client/src/commands/build/out.rs b/app/buck2_client/src/commands/build/out.rs new file mode 100644 index 0000000000000..d49e4b3a71fd7 --- /dev/null +++ b/app/buck2_client/src/commands/build/out.rs @@ -0,0 +1,552 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; +use std::io; +use std::path::Path; + +use anyhow::Context; +use buck2_cli_proto::build_target::BuildOutput; +use buck2_cli_proto::BuildTarget; +use buck2_client_ctx::exit_result::ClientIoError; +use buck2_client_ctx::output_destination_arg::OutputDestinationArg; +use buck2_core::fs::async_fs_util; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::abs_path::AbsPath; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::working_dir::WorkingDir; +use futures::TryStreamExt; + +#[derive(Clone)] +struct CopyContext { + // Any symlink pointing outside of this path would be copied/recreated as an absolute symlink to original target. + // Should be canonicalized. + relative_symlink_boundary: AbsNormPathBuf, +} + +/// Given a list of targets built by this command, extracts a reasonable default output from the list and writes it +/// to the path given by `out`. +/// +/// In order to extract a "reasonable default output", this function will bail if any of the following are true: +/// 1. Multiple top-level targets were built, in which case the correct output to write is ambiguous, +/// 2. A single top-level target was built, but it produced zero default outputs, +/// 3. A single top-level target was built, but it produced more than two default outputs +/// +/// Otherwise, we'll extract the single default output from the single top-level target and copy it to the output +/// path. If the given path is a directory then all output files will be copied inside of it. +/// +/// Symbolic links are preserved. However, if a relative symlink within one of the outputs points outside that output, +/// it will be converted into an absolute link pointing to the same target as the original link. +/// On Windows platform relative symlinks are always converted to an absolute one, though the same rule applies to whether a copied +/// link would point to the same original target or a copied one. +/// +/// As a special case, `--out -` is interpreted as `--out /dev/stdout` and allows multiple output files to be +/// written to it. +pub(super) async fn copy_to_out( + targets: &[BuildTarget], + root_path: &ProjectRoot, + working_dir: &WorkingDir, + out: &OutputDestinationArg, +) -> anyhow::Result<()> { + struct OutputToBeCopied { + from_path: AbsNormPathBuf, + is_dir: bool, + } + + let mut outputs_to_be_copied = Vec::new(); + for target in targets { + let default_outputs: Vec<&BuildOutput> = target + .outputs + .iter() + .filter(|output| { + output + .providers + .as_ref() + .map_or(true, |p| p.default_info && !p.other) + }) + .collect(); + + let single_default_output = match default_outputs.len() { + 0 => { + return Err(anyhow::anyhow!( + "target {} produced zero default outputs", + target.target + )); + } + 1 => &default_outputs[0], + n => { + return Err(anyhow::anyhow!( + "target {} produced {} outputs, choice of output is ambiguous", + target.target, + n + )); + } + }; + + let output_path = root_path + .root() + .join(ForwardRelativePath::new(&single_default_output.path)?); + let output_meta = tokio::fs::metadata(&output_path) + .await + .context("Error inspecting file metadata")?; + let is_dir = output_meta.is_dir(); + + outputs_to_be_copied.push(OutputToBeCopied { + from_path: output_path, + is_dir, + }); + } + + match out { + OutputDestinationArg::Stream => { + // Check no output is a directory. We allow outputting any number of + // files (including 0) to stdout. + if let Some(dir_i) = outputs_to_be_copied.iter().position(|o| o.is_dir) { + return Err(anyhow::anyhow!( + "target {} produces a default output that is a directory, and cannot be sent to stdout", + targets[dir_i].target, + )); + } + } + OutputDestinationArg::Path(..) => { + // Check we are outputting exactly 1 target. Okay if directory. + if outputs_to_be_copied.len() != 1 { + return Err(anyhow::anyhow!( + "build command built multiple top-level targets, choice of output is ambiguous" + )); + } + } + } + + for to_be_copied in outputs_to_be_copied { + match out { + OutputDestinationArg::Stream => { + let mut file = async_fs_util::open(&to_be_copied.from_path).await?; + tokio::io::copy(&mut file, &mut tokio::io::stdout()) + .await + .map_err(convert_broken_pipe_error)?; + } + OutputDestinationArg::Path(path) => { + let path = path.resolve(working_dir); + if to_be_copied.is_dir { + let context = CopyContext { + relative_symlink_boundary: fs_util::canonicalize(&to_be_copied.from_path)?, + }; + copy_directory(&to_be_copied.from_path, &path, &context).await?; + } else { + copy_file(&to_be_copied.from_path, &path).await?; + } + } + } + } + + Ok(()) +} + +fn copy_symlink, Q: AsRef>( + src_path: P, + dst_path: Q, + context: &CopyContext, +) -> anyhow::Result<()> { + // Make symlinks overwrite items which were already present at destination path + fs_util::remove_all(&dst_path).context(format!( + "Removing pre-existing item at path {:?}", + src_path.as_ref() + ))?; + let symlink_target_abs_path = fs_util::canonicalize(src_path.as_ref()).context(format!( + "Resolving symlink to be copied {:?}", + src_path.as_ref() + ))?; + // Now recreate the symlink + let symlink_target = { + if symlink_target_abs_path.starts_with(&context.relative_symlink_boundary) { + // Symlink is not pointing outside the original output we are copying. + // Just keep it as it is. + fs_util::read_link(&src_path).context(format!( + "Reading value of a symlink to be copied {:?}", + src_path.as_ref() + ))? + } else { + // Force "copied" symlink to be absolute as it points outside of original output we are copying. + symlink_target_abs_path.into_path_buf() + } + }; + fs_util::symlink(&symlink_target, &dst_path).context(format!( + "Creating symlink at {:?} pointing to {:?}", + dst_path.as_ref(), + &symlink_target + ))?; + + Ok(()) +} + +/// Recursively copies a directory to the output path, rooted at `dst`. +#[async_recursion::async_recursion] +async fn copy_directory< + P: AsRef + std::marker::Send, + Q: AsRef + std::marker::Send + std::marker::Copy + std::marker::Sync, +>( + src: P, + dst: Q, + context: &CopyContext, +) -> anyhow::Result<()> { + tokio::fs::create_dir_all(dst.as_ref()).await?; + let stream = tokio_stream::wrappers::ReadDirStream::new( + tokio::fs::read_dir(src.as_ref()) + .await + .context(format!("reading directory {:?}", src.as_ref()))?, + ) + .err_into::(); + + stream + .try_for_each(|entry| async move { + let entry_source_path = AbsPathBuf::new(entry.path())?; + let entry_destination_path = dst.as_ref().join(entry.file_name()); + let file_type = entry.file_type().await?; + if file_type.is_dir() { + copy_directory(&entry_source_path, &entry_destination_path, context) + .await + .context(format!("Copying subdirectory {:?}", entry.path())) + } else if file_type.is_symlink() { + let copy_context = context.clone(); + tokio::task::spawn_blocking(move || { + copy_symlink(&entry_source_path, &entry_destination_path, ©_context) + }) + .await + .context(format!("Copying symlink {:?}", &entry.path()))? + } else { + tokio::fs::copy(&entry.path(), &entry_destination_path) + .await + .context(format!("Copying file {:?}", entry.path())) + .map(|_| ()) + } + }) + .await?; + + Ok(()) +} + +async fn copy_file(src: &Path, dst: &Path) -> anyhow::Result<()> { + if let Some(parent) = dst.parent() { + if !parent.exists() { + tokio::fs::create_dir_all(parent).await?; + } + } + let dest_path = match dst.is_dir() { + true => Cow::Owned(dst.join(src.file_name().context("Failed getting output name")?)), + false => Cow::Borrowed(dst), + }; + + // NOTE: We don't do the overwrite since we might be writing to e.g. a pipe here and we can't + // do an atomic move into it. + match tokio::fs::copy(src, &dest_path).await { + Ok(..) => Ok(()), + Err(e) if e.raw_os_error() == Some(libc::ETXTBSY) => { + let dir = dest_path.parent().context("Output path has no parent")?; + let mut tmp_name = dest_path + .file_name() + .context("Output path has no file name")? + .to_owned(); + tmp_name.push(".buck2.tmp"); + let tmp_path = dir.join(tmp_name); + tokio::fs::copy(src, &tmp_path).await?; + tokio::fs::rename(&tmp_path, dest_path).await?; + Ok(()) + } + Err(e) => Err(convert_broken_pipe_error(e)), + } +} + +fn convert_broken_pipe_error(e: io::Error) -> anyhow::Error { + anyhow::Error::new(ClientIoError::new(e)).context("Error writing build artifact to --out") +} + +#[cfg(test)] +mod tests { + #[cfg(unix)] + use std::path::PathBuf; + + use tempfile::TempDir; + + use super::*; + + #[tokio::test] + async fn test_copy_directory() -> anyhow::Result<()> { + let src_dir = tempfile::tempdir()?; + // + // ├── bar + // │ ├── qux + // │ │ └── buzz + // │ ├── foo_in_bar -> ../foo + // │ └── some_file + // ├── bax -> bar/qux + // ├── foo + // ├── foo_abs -> /foo + // └── fool -> foo + std::fs::create_dir_all(src_dir.path().join("bar/qux"))?; + std::fs::write(src_dir.path().join("foo"), "some content")?; + std::fs::write(src_dir.path().join("bar/some_file"), "more content")?; + std::fs::write(src_dir.path().join("bar/qux/buzz"), "even more")?; + let fool_path = src_dir.path().join("fool"); + let foo_abs_path = src_dir.path().join("foo_abs"); + let foo_in_bar_path = src_dir.path().join("bar/foo_in_bar"); + let bax_path = src_dir.path().join("bax"); + #[cfg(unix)] + { + std::os::unix::fs::symlink("foo", &fool_path)?; + std::os::unix::fs::symlink(src_dir.path().join("foo"), &foo_abs_path)?; + std::os::unix::fs::symlink("bar/qux", &bax_path)?; + std::os::unix::fs::symlink("../foo", &foo_in_bar_path)?; + } + #[cfg(windows)] + { + std::os::windows::fs::symlink_file("foo", &fool_path)?; + std::os::windows::fs::symlink_file(src_dir.path().join("foo"), &foo_abs_path)?; + std::os::windows::fs::symlink_dir("bar\\qux", &bax_path)?; + std::os::windows::fs::symlink_file("..\\foo", &foo_in_bar_path)?; + } + + let dst_dir = tempfile::tempdir()?; + let src_path = AbsPath::new(src_dir.path())?; + let dst_path = AbsPath::new(dst_dir.path())?; + let copy_context = CopyContext { + relative_symlink_boundary: fs_util::canonicalize(src_path)?, + }; + copy_directory(src_path, dst_path, ©_context).await?; + + assert!(dst_dir.path().join("foo").is_file()); + assert!(dst_dir.path().join("bar/some_file").is_file()); + assert!(dst_dir.path().join("bar/qux/buzz").is_file()); + + let copied_fool_path = std::fs::read_link(&dst_dir.path().join("fool"))?; + #[cfg(unix)] + { + assert_eq!(PathBuf::from("foo"), copied_fool_path); + } + #[cfg(windows)] + { + // Symlink value is canonicalized on Windows + let dst_dir_canon_path = fs_util::canonicalize(dst_path)?; + assert_eq!(dst_dir_canon_path.as_path().join("foo"), copied_fool_path); + } + + let copied_foo_abs_path = std::fs::read_link(&dst_dir.path().join("foo_abs"))?; + #[cfg(unix)] + { + assert_eq!(src_dir.path().join("foo"), copied_foo_abs_path); + } + #[cfg(windows)] + { + // Symlink value is canonicalized on Windows + let src_dir_canon_path = fs_util::canonicalize(src_path)?; + assert_eq!( + src_dir_canon_path.as_path().join("foo"), + copied_foo_abs_path + ); + } + + let copied_bax_path = std::fs::read_link(&dst_dir.path().join("bax"))?; + #[cfg(unix)] + { + assert_eq!(PathBuf::from("bar/qux"), copied_bax_path); + } + #[cfg(windows)] + { + // Symlink value is canonicalized on Windows + let dst_dir_canon_path = fs_util::canonicalize(dst_path)?; + assert_eq!( + dst_dir_canon_path.as_path().join("bar\\qux"), + copied_bax_path + ); + } + + let copied_foo_in_bar_path = std::fs::read_link(&dst_dir.path().join("bar/foo_in_bar"))?; + #[cfg(unix)] + { + assert_eq!(PathBuf::from("../foo"), copied_foo_in_bar_path); + } + #[cfg(windows)] + { + // Symlink value is canonicalized on Windows + let dst_dir_canon_path = fs_util::canonicalize(dst_path)?; + assert_eq!( + dst_dir_canon_path.as_path().join("foo"), + copied_foo_in_bar_path + ); + } + + // Second time to check everything overwrites fine + copy_directory(src_path, dst_path, ©_context) + .await + .context("copy second time")?; + + Ok(()) + } + + #[tokio::test] + async fn test_copy_directory_with_symlink_pointing_externally() -> anyhow::Result<()> { + let src_dir = tempfile::tempdir()?; + let src_path = fs_util::canonicalize(AbsPath::new(src_dir.path())?)?; + // + // ├── qux + // │ ├── foo -> ../foo + // │ └── bar -> ../bar + // ├── bar + // │ └── baz + // └── foo + std::fs::write(src_path.as_path().join("foo"), "some content")?; + std::fs::create_dir_all(src_path.as_path().join("bar"))?; + std::fs::write(src_path.as_path().join("bar/baz"), "some content")?; + std::fs::create_dir_all(src_path.as_path().join("qux"))?; + let foo_link_path = src_path.as_path().join("qux/foo"); + let bar_link_path = src_path.as_path().join("qux/bar"); + #[cfg(unix)] + { + std::os::unix::fs::symlink("../foo", &foo_link_path)?; + std::os::unix::fs::symlink("../bar", &bar_link_path)?; + } + #[cfg(windows)] + { + std::os::windows::fs::symlink_file("..\\foo", &foo_link_path)?; + std::os::windows::fs::symlink_dir("..\\bar", &bar_link_path)?; + } + let dst_dir = tempfile::tempdir()?; + let dst_path = AbsPath::new(dst_dir.path())?; + let copy_context = CopyContext { + relative_symlink_boundary: src_path.join(ForwardRelativePath::unchecked_new("qux")), + }; + copy_directory(&src_path, dst_path, ©_context).await?; + + // Check both symlinks are valid and are absolute. + + let copied_foo_target = std::fs::read_link(&dst_dir.path().join("qux/foo"))?; + assert_eq!(src_path.as_path().join("foo"), copied_foo_target); + + let copied_bar_target = std::fs::read_link(&dst_dir.path().join("qux/bar"))?; + assert_eq!(src_path.as_path().join("bar"), copied_bar_target); + + Ok(()) + } + + #[tokio::test] + async fn test_copy_symlink() -> anyhow::Result<()> { + test_copy_symlink_parametrized(&|_| Ok(()), false)?; + test_copy_symlink_parametrized(&|_| Ok(()), true)?; + // Check that symlink overwrites regular file in output directory + let setup_file_to_overwrite = |dst_dir: &TempDir| { + std::fs::write(dst_dir.path().join("foo_copy"), "some content")?; + Ok(()) + }; + test_copy_symlink_parametrized(&setup_file_to_overwrite, false)?; + test_copy_symlink_parametrized(&setup_file_to_overwrite, true)?; + // Check that symlink overwrites directory in output directory + let setup_directory_with_content_to_overwrite = |dst_dir: &TempDir| { + std::fs::create_dir(dst_dir.path().join("foo_copy"))?; + std::fs::write(dst_dir.path().join("foo_copy/some_file"), "some content")?; + Ok(()) + }; + test_copy_symlink_parametrized(&setup_directory_with_content_to_overwrite, false)?; + test_copy_symlink_parametrized(&setup_directory_with_content_to_overwrite, true)?; + Ok(()) + } + + fn test_copy_symlink_parametrized( + prepare_dst_dir: &dyn Fn(&TempDir) -> anyhow::Result<()>, + is_directory_symlink: bool, + ) -> anyhow::Result<()> { + let src_dir = tempfile::tempdir()?; + let src_symlink_target_path = src_dir.path().join("bar"); + let src_symlink_path = src_dir.path().join("foo"); + + if is_directory_symlink { + std::fs::create_dir(&src_symlink_target_path)?; + #[cfg(windows)] + { + std::os::windows::fs::symlink_dir("bar", &src_symlink_path)?; + } + } else { + std::fs::write(&src_symlink_target_path, "some content")?; + #[cfg(windows)] + { + std::os::windows::fs::symlink_file("bar", &src_symlink_path)?; + } + } + #[cfg(unix)] + { + std::os::unix::fs::symlink("bar", &src_symlink_path)?; + } + + let dst_dir = tempfile::tempdir()?; + prepare_dst_dir(&dst_dir)?; + + let dst_symlink_path = dst_dir.path().join("foo_copy"); + + copy_symlink( + AbsPath::new(&src_symlink_path)?, + AbsPath::new(&dst_symlink_path)?, + &CopyContext { + relative_symlink_boundary: fs_util::canonicalize(AbsPath::new(src_dir.path())?)?, + }, + )?; + + let target = std::fs::read_link(&dst_symlink_path)?; + #[cfg(unix)] + { + assert_eq!(PathBuf::from("bar"), target); + } + #[cfg(windows)] + { + // Symlink value is canonicalized on Windows + let dst_dir_canon_path = fs_util::canonicalize(AbsPath::new(dst_dir.path())?)?; + assert_eq!(dst_dir_canon_path.as_path().join("bar"), target); + } + + Ok(()) + } + + #[cfg(unix)] + mod unix { + + use assert_matches::assert_matches; + + use super::super::*; + + #[tokio::test] + async fn test_copy_file() -> anyhow::Result<()> { + let dir = tempfile::tempdir()?; + let out = dir.path().join("sleep"); + + let res = buck2_util::process::async_background_command("cp") + .arg(Path::new("/bin/sleep")) + .arg(&out) + .spawn()? + .wait() + .await?; + + assert!(res.success()); + + let mut proc = buck2_util::process::async_background_command(&out) + .arg("10000") + .kill_on_drop(true) + .spawn() + .context("Error spawning")?; + + // This will fail if we don't handle ETXTBSY. + copy_file(Path::new("/bin/sleep"), &out).await?; + + // Check that our sleep didn't end + assert_matches!(proc.try_wait(), Ok(None)); + + Ok(()) + } + } +} diff --git a/app/buck2_client/src/commands/bxl.rs b/app/buck2_client/src/commands/bxl.rs index 033fbb4117c41..febb1de9691fa 100644 --- a/app/buck2_client/src/commands/bxl.rs +++ b/app/buck2_client/src/commands/bxl.rs @@ -7,15 +7,19 @@ * of this source tree. */ +use std::io::Write; + use async_trait::async_trait; use buck2_cli_proto::BxlRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_client_ctx::common::build::CommonBuildOptions; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonBuildOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; @@ -32,21 +36,21 @@ pub struct BxlCommand { #[clap(flatten)] bxl_opts: BxlCommandOptions, + #[clap(flatten)] + target_cfg: TargetCfgOptions, + #[clap(flatten)] common_ops: CommonCommandOptions, } #[derive(Debug, clap::Parser)] pub struct BxlCommandOptions { - #[clap(flatten)] - build_opts: CommonBuildOptions, - #[clap( long = "materializations", short = 'M', help = "Materialize (or skip) the final artifacts, bypassing buckconfig.", ignore_case = true, - arg_enum + value_enum )] materializations: Option, @@ -68,8 +72,11 @@ pub struct BxlCommandOptions { /// Log format is JSONL, uncompressed if no known extensions are detected, or you can explicitly specify /// the compression via the file extension (ex: `.json-lines.gz` would be gzip compressed, `.json-lines.zst` /// would be zstd compressed). Resulting log is is compatible with `buck2 log show-user`. - #[clap(value_name = "PATH", long = "--user-event-log")] + #[clap(value_name = "PATH", long = "user-event-log")] pub user_event_log: Option, + + #[clap(flatten)] + build_opts: CommonBuildOptions, } #[async_trait] @@ -91,6 +98,7 @@ impl StreamingCommand for BxlCommand { bxl_label: self.bxl_opts.bxl_label, bxl_args: self.bxl_opts.bxl_args, build_opts: Some(self.bxl_opts.build_opts.to_proto()), + target_cfg: Some(self.target_cfg.target_cfg()), final_artifact_materializations: self.bxl_opts.materializations.to_proto() as i32, print_stacktrace: ctx.verbosity.print_success_stderr(), @@ -101,7 +109,7 @@ impl StreamingCommand for BxlCommand { ) .await; let success = match &result { - Ok(CommandOutcome::Success(response)) => response.error_messages.is_empty(), + Ok(CommandOutcome::Success(response)) => response.errors.is_empty(), _ => false, }; @@ -117,27 +125,36 @@ impl StreamingCommand for BxlCommand { // of error will be printed below the FAILED line here. let response = result??; - print_build_result(&console, &response.error_messages)?; + print_build_result(&console, &response.errors)?; + let mut stdout = Vec::new(); + if let Some(build_report) = response.serialized_build_report { + stdout.extend(build_report.as_bytes()); + writeln!(&mut stdout)?; + } if !success { - return ExitResult::failure(); + return ExitResult::from_errors(&response.errors); } - ExitResult::success() + ExitResult::success().with_stdout(stdout) } fn console_opts(&self) -> &CommonConsoleOptions { &self.common_ops.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_ops.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_ops.config_opts } + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_ops.starlark_opts + } + fn user_event_log(&self) -> &Option { &self.bxl_opts.user_event_log } diff --git a/app/buck2_client/src/commands/clean.rs b/app/buck2_client/src/commands/clean.rs index cf63dd9b962f2..77d9b39a8e521 100644 --- a/app/buck2_client/src/commands/clean.rs +++ b/app/buck2_client/src/commands/clean.rs @@ -12,15 +12,17 @@ use std::sync::Mutex; use std::time::Duration; use anyhow::Context; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; use buck2_client_ctx::common::CommonCommandOptions; +use buck2_client_ctx::daemon::client::kill::kill_command_impl; use buck2_client_ctx::daemon::client::BuckdLifecycleLock; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::final_console::FinalConsole; use buck2_client_ctx::startup_deadline::StartupDeadline; use buck2_client_ctx::streaming::BuckSubcommand; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use buck2_common::daemon_dir::DaemonDir; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; @@ -28,22 +30,17 @@ use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::paths::abs_path::AbsPathBuf; use dupe::Dupe; use gazebo::prelude::SliceExt; -use humantime; use threadpool::ThreadPool; use walkdir::WalkDir; use crate::commands::clean_stale::parse_clean_stale_args; use crate::commands::clean_stale::CleanStaleCommand; -use crate::commands::kill::kill_command_impl; /// Delete generated files and caches. /// /// The command also kills the buck2 daemon. #[derive(Debug, clap::Parser)] pub struct CleanCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap( long = "dry-run", help = "Performs a dry-run and prints the paths that would be removed." @@ -59,11 +56,24 @@ the specified duration, without killing the daemon", stale: Option>, // Like stale but since a specific timestamp, for testing - #[clap(long = "keep-since-time", conflicts_with = "stale", hidden = true)] + #[clap(long = "keep-since-time", conflicts_with = "stale", hide = true)] keep_since_time: Option, + /// Only considers tracked artifacts for cleanup. + /// + /// `buck-out` can contain untracked artifacts for different reasons: + /// - Outputs from aborted actions + /// - State getting deleted (e.g., new buckversion that changes the on-disk state format) + /// - Writing to `buck-out` without being expected by Buck #[clap(long = "tracked-only", requires = "stale")] tracked_only: bool, + + /// Command doesn't need these flags, but they are used in mode files, so we need to keep them. + #[clap(flatten)] + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } impl CleanCommand { @@ -78,28 +88,33 @@ impl CleanCommand { return cmd.exec(matches, ctx); } - ctx.instant_command("clean", async move |ctx| { - let buck_out_dir = ctx.paths()?.buck_out_path(); - let daemon_dir = ctx.paths()?.daemon_dir()?; - let console = &self.common_opts.console_opts.final_console(); - - if self.dry_run { - return clean(buck_out_dir, daemon_dir, console, None).await; - } + ctx.instant_command( + "clean", + &self.common_opts.event_log_opts, + |ctx| async move { + let buck_out_dir = ctx.paths()?.buck_out_path(); + let daemon_dir = ctx.paths()?.daemon_dir()?; + let console = &self.common_opts.console_opts.final_console(); - // Kill the daemon and make sure a new daemon does not spin up while we're performing clean up operations - // This will ensure we have exclusive access to the directories in question - let lifecycle_lock = BuckdLifecycleLock::lock_with_timeout( - daemon_dir.clone(), - StartupDeadline::duration_from_now(Duration::from_secs(10))?, - ) - .await - .with_context(|| "Error locking buckd lifecycle.lock")?; - - kill_command_impl(&lifecycle_lock, "`buck2 clean` was invoked").await?; + if self.dry_run { + return clean(buck_out_dir, daemon_dir, console, None).await; + } - clean(buck_out_dir, daemon_dir, console, Some(&lifecycle_lock)).await - }) + // Kill the daemon and make sure a new daemon does not spin up while we're performing clean up operations + // This will ensure we have exclusive access to the directories in question + let lifecycle_lock = BuckdLifecycleLock::lock_with_timeout( + daemon_dir.clone(), + StartupDeadline::duration_from_now(Duration::from_secs(10))?, + ) + .await + .with_context(|| "Error locking buckd lifecycle.lock")?; + + kill_command_impl(&lifecycle_lock, "`buck2 clean` was invoked").await?; + + clean(buck_out_dir, daemon_dir, console, Some(&lifecycle_lock)).await + }, + ) + .into() } pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { @@ -115,12 +130,7 @@ async fn clean( lifecycle_lock: Option<&BuckdLifecycleLock>, ) -> anyhow::Result<()> { let mut paths_to_clean = Vec::new(); - // Try to clean EdenFS based buck-out first. For EdenFS based buck-out, "eden rm" - // is efficient. Notice eden rm will remove the buck-out root directory, - // but for the native fs, the buck-out root directory is kept. - if let Some(paths) = try_clean_eden_buck_out(&buck_out_dir, lifecycle_lock.is_none()).await? { - paths_to_clean = paths; - } else if buck_out_dir.exists() { + if buck_out_dir.exists() { paths_to_clean = collect_paths_to_clean(&buck_out_dir)?.map(|path| path.display().to_string()); if lifecycle_lock.is_some() { @@ -192,7 +202,8 @@ fn clean_buck_out(path: &AbsNormPathBuf) -> anyhow::Result<()> { let error = error.dupe(); thread_pool.execute(move || { // The wlak gives us back absolute paths since we give it absolute paths. - let res = AbsPath::new(dir_entry.path()).and_then(fs_util::remove_file); + let res = AbsPath::new(dir_entry.path()) + .and_then(|p| fs_util::remove_file(p).map_err(Into::into)); match res { Ok(_) => {} @@ -219,61 +230,3 @@ fn clean_buck_out(path: &AbsNormPathBuf) -> anyhow::Result<()> { Ok(()) } - -#[cfg(any(fbcode_build, cargo_internal_build))] -async fn try_clean_eden_buck_out( - buck_out: &AbsNormPathBuf, - dryrun: bool, -) -> anyhow::Result>> { - use std::process::Stdio; - - use buck2_execute::materialize::eden_api::is_recas_eden_mount; - use buck2_util::process::async_background_command; - - if !cfg!(unix) { - return Ok(None); - } - - if !is_recas_eden_mount(buck_out)? { - return Ok(None); - } - - // Run eden rm to rm a mount - let mut eden_rm_cmd = async_background_command("eden"); - eden_rm_cmd - .arg("rm") - .arg("-y") // No promot - .arg(buck_out.as_os_str()) - .current_dir("/") - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - - buck2_client_ctx::eprintln!( - "The following command will be executed: `eden rm -y {}`", - buck_out - )?; - - if !dryrun { - eden_rm_cmd - .spawn() - .context("Failed to start to remove EdenFS buck-out mount.")? - .wait() - .await - .context("Failed to remove EdenFS buck-out mount.")?; - - // eden rm might not delete the buck-out completed. - if buck_out.exists() { - fs_util::remove_dir(buck_out)?; - } - } - - Ok(Some(vec![buck_out.display().to_string()])) -} - -#[cfg(not(any(fbcode_build, cargo_internal_build)))] -async fn try_clean_eden_buck_out( - _buck_out: &AbsNormPathBuf, - _dryrun: bool, -) -> anyhow::Result>> { - Ok(None) -} diff --git a/app/buck2_client/src/commands/clean_stale.rs b/app/buck2_client/src/commands/clean_stale.rs index 122f9caf45a54..f7652ae8c0214 100644 --- a/app/buck2_client/src/commands/clean_stale.rs +++ b/app/buck2_client/src/commands/clean_stale.rs @@ -12,10 +12,11 @@ use async_trait::async_trait; use buck2_cli_proto::CleanStaleRequest; use buck2_cli_proto::CleanStaleResponse; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; @@ -24,9 +25,9 @@ use chrono::DateTime; use chrono::Duration; use chrono::TimeZone; use chrono::Utc; -use humantime; /// Clean only old artifacts from a running buck daemon without killing the daemon. +/// This can be interrupted by other commands that run in parallel and request materialization. /// /// This is a separate command from CleanCommand even though it is invoked with /// a flag (--stale) on the clean subcommand, which is a bit weird. @@ -78,11 +79,8 @@ fn format_result_stats(stats: buck2_data::CleanStaleStats) -> String { stats.untracked_artifact_count, bytesize::to_string(stats.untracked_bytes, true), ); - if stats.cleaned_path_count > 0 || stats.cleaned_bytes > 0 { - output += &format!( - "Cleaned {} paths ({} artifacts)\n", - stats.cleaned_path_count, stats.cleaned_artifact_count, - ); + if stats.cleaned_artifact_count > 0 || stats.cleaned_bytes > 0 { + output += &format!("Cleaned {} paths\n", stats.cleaned_artifact_count,); output += &format!( "{} bytes cleaned ({})\n", stats.cleaned_bytes, @@ -152,11 +150,15 @@ impl StreamingCommand for CleanStaleCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/ctargets.rs b/app/buck2_client/src/commands/ctargets.rs index d45ea3a2a3ef4..5c5b70d711a0c 100644 --- a/app/buck2_client/src/commands/ctargets.rs +++ b/app/buck2_client/src/commands/ctargets.rs @@ -11,23 +11,21 @@ use async_trait::async_trait; use buck2_cli_proto::ConfiguredTargetsRequest; use buck2_cli_proto::ConfiguredTargetsResponse; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; -use gazebo::prelude::SliceExt; /// Resolve target patterns to configured targets. #[derive(Debug, clap::Parser)] #[clap(name = "ctargets")] pub struct ConfiguredTargetsCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - /// Skip missing targets from `BUCK` files when non-glob pattern is specified. /// This option does not skip missing packages /// and does not ignore errors of `BUCK` file evaluation. @@ -37,6 +35,12 @@ pub struct ConfiguredTargetsCommand { /// Patterns to interpret. #[clap(name = "TARGET_PATTERNS")] patterns: Vec, + + #[clap(flatten)] + target_cfg: TargetCfgOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -57,9 +61,8 @@ impl StreamingCommand for ConfiguredTargetsCommand { .ctargets( ConfiguredTargetsRequest { context, - target_patterns: self.patterns.map(|pat| buck2_data::TargetPattern { - value: pat.to_owned(), - }), + target_patterns: self.patterns, + target_cfg: Some(self.target_cfg.target_cfg()), skip_missing_targets: self.skip_missing_targets, }, ctx.stdin() @@ -77,11 +80,15 @@ impl StreamingCommand for ConfiguredTargetsCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/debug.rs b/app/buck2_client/src/commands/debug.rs new file mode 100644 index 0000000000000..3300a90bb39be --- /dev/null +++ b/app/buck2_client/src/commands/debug.rs @@ -0,0 +1,137 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocator_stats::AllocatorStatsCommand; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::streaming::BuckSubcommand; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; +use chrome_trace::ChromeTraceCommand; +use crash::CrashCommand; +use dice_dump::DiceDumpCommand; +use file_status::FileStatusCommand; +use flush_dep_files::FlushDepFilesCommand; +use heap_dump::HeapDumpCommand; +use internal_version::InternalVersionCommand; +use materialize::MaterializeCommand; + +use crate::commands::debug::allocative::AllocativeCommand; +use crate::commands::debug::daemon_dir::DaemonDirCommand; +use crate::commands::debug::eval::EvalCommand; +use crate::commands::debug::exe::ExeCommand; +use crate::commands::debug::log_perf::LogPerfCommand; +use crate::commands::debug::paranoid::ParanoidCommand; +use crate::commands::debug::persist_event_logs::PersistEventLogsCommand; +use crate::commands::debug::set_log_filter::SetLogFilterCommand; +use crate::commands::debug::thread_dump::ThreadDumpCommand; +use crate::commands::debug::trace_io::TraceIoCommand; +use crate::commands::debug::upload_re_logs::UploadReLogsCommand; +use crate::commands::log::debug_replay::DebugReplayCommand; +use crate::commands::log::debug_what_ran::DebugWhatRanCommand; + +mod allocative; +mod allocator_stats; +mod chrome_trace; +mod crash; +mod daemon_dir; +mod dice_dump; +mod eval; +mod exe; +mod file_status; +mod flush_dep_files; +mod heap_dump; +mod internal_version; +mod log_perf; +mod materialize; +mod paranoid; +mod persist_event_logs; +mod set_log_filter; +mod thread_dump; +mod trace_io; +pub(crate) mod upload_re_logs; + +#[derive(Debug, clap::Parser)] +#[clap(about = "Hidden debug commands useful for testing buck2")] +pub enum DebugCommand { + /// Deliberately crashes the Buck daemon, for testing purposes. + Crash(CrashCommand), + HeapDump(HeapDumpCommand), + /// Dumps allocator stat + AllocatorStats(AllocatorStatsCommand), + /// Dump the DICE graph to a file and saves it to disk. + DiceDump(DiceDumpCommand), + #[clap(hide = true)] + Replay(DebugReplayCommand), + /// Prints the hash of the buck2 binary + InternalVersion(InternalVersionCommand), + /// Renders an event-log to a Chrome trace file for inspection with a browser. + ChromeTrace(ChromeTraceCommand), + /// Flushes all dep files known to Buck2. + FlushDepFiles(FlushDepFilesCommand), + /// Forces materialization of a path, even on the deferred materializer + Materialize(MaterializeCommand), + // Upload RE logs given an RE session ID + UploadReLogs(UploadReLogsCommand), + /// Validates that Buck2 and disk agree on the state of files. + FileStatus(FileStatusCommand), + /// Shows the commands that buck ran + #[clap(alias = "whatran", hide = true)] + WhatRan(DebugWhatRanCommand), + /// Prints buck2 daemon directory (`~/.buckd/xxx`). + DaemonDir(DaemonDirCommand), + /// Prints buck2 executable (this executable) path. + Exe(ExeCommand), + Allocative(AllocativeCommand), + SetLogFilter(SetLogFilterCommand), + /// Make sense of log perf + LogPerf(LogPerfCommand), + /// Interact with I/O tracing of the daemon. + TraceIo(TraceIoCommand), + #[doc(hidden)] + PersistEventLogs(PersistEventLogsCommand), + #[clap(subcommand)] + Paranoid(ParanoidCommand), + Eval(EvalCommand), + ThreadDump(ThreadDumpCommand), +} + +impl DebugCommand { + pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + let matches = matches.subcommand().expect("subcommand not found").1; + match self { + DebugCommand::DiceDump(cmd) => cmd.exec(matches, ctx), + DebugCommand::Crash(cmd) => cmd.exec(matches, ctx), + DebugCommand::HeapDump(cmd) => cmd.exec(matches, ctx), + DebugCommand::AllocatorStats(cmd) => cmd.exec(matches, ctx), + DebugCommand::Replay(cmd) => cmd.exec(matches, ctx), + DebugCommand::InternalVersion(cmd) => cmd.exec(matches, ctx), + DebugCommand::ChromeTrace(cmd) => cmd.exec(matches, ctx), + DebugCommand::FlushDepFiles(cmd) => cmd.exec(matches, ctx), + DebugCommand::WhatRan(cmd) => cmd.exec(matches, ctx), + DebugCommand::Materialize(cmd) => cmd.exec(matches, ctx), + DebugCommand::UploadReLogs(cmd) => cmd.exec(matches, ctx), + DebugCommand::DaemonDir(cmd) => cmd.exec(matches, ctx), + DebugCommand::Exe(cmd) => cmd.exec(matches, ctx), + DebugCommand::Allocative(cmd) => cmd.exec(matches, ctx), + DebugCommand::SetLogFilter(cmd) => cmd.exec(matches, ctx), + DebugCommand::FileStatus(cmd) => cmd.exec(matches, ctx), + DebugCommand::LogPerf(cmd) => cmd.exec(matches, ctx), + DebugCommand::TraceIo(cmd) => cmd.exec(matches, ctx), + DebugCommand::PersistEventLogs(cmd) => cmd.exec(matches, ctx), + DebugCommand::Paranoid(cmd) => cmd.exec(matches, ctx), + DebugCommand::Eval(cmd) => cmd.exec(matches, ctx), + DebugCommand::ThreadDump(cmd) => cmd.exec(matches, ctx), + } + } + + pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { + argv.no_need_to_sanitize() + } +} diff --git a/app/buck2_client/src/commands/debug/allocative.rs b/app/buck2_client/src/commands/debug/allocative.rs index 481af7d4999ab..31299f24bf530 100644 --- a/app/buck2_client/src/commands/debug/allocative.rs +++ b/app/buck2_client/src/commands/debug/allocative.rs @@ -10,9 +10,10 @@ use async_trait::async_trait; use buck2_cli_proto::AllocativeRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; @@ -66,11 +67,15 @@ impl StreamingCommand for AllocativeCommand { CommonConsoleOptions::default_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/debug/allocator_stats.rs b/app/buck2_client/src/commands/debug/allocator_stats.rs index 25a348456bca1..d7579f894d280 100644 --- a/app/buck2_client/src/commands/debug/allocator_stats.rs +++ b/app/buck2_client/src/commands/debug/allocator_stats.rs @@ -10,9 +10,10 @@ use async_trait::async_trait; use buck2_cli_proto::UnstableAllocatorStatsRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; @@ -56,11 +57,15 @@ impl StreamingCommand for AllocatorStatsCommand { CommonConsoleOptions::none_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/debug/chrome_trace.rs b/app/buck2_client/src/commands/debug/chrome_trace.rs index 6fb4794f6c76a..41e35d72cb55c 100644 --- a/app/buck2_client/src/commands/debug/chrome_trace.rs +++ b/app/buck2_client/src/commands/debug/chrome_trace.rs @@ -21,12 +21,12 @@ use anyhow::Context; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::path_arg::PathArg; -use buck2_client_ctx::stream_value::StreamValue; -use buck2_client_ctx::subscribers::event_log::file_names::retrieve_nth_recent_log; -use buck2_client_ctx::subscribers::event_log::read::EventLogPathBuf; -use buck2_client_ctx::subscribers::event_log::utils::Invocation; use buck2_common::convert::ProstDurationExt; use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_event_log::file_names::retrieve_nth_recent_log; +use buck2_event_log::read::EventLogPathBuf; +use buck2_event_log::stream_value::StreamValue; +use buck2_event_log::utils::Invocation; use buck2_event_observer::display; use buck2_event_observer::display::TargetDisplayOptions; use buck2_events::BuckEvent; @@ -67,13 +67,17 @@ struct ChromeTraceFirstPass { /// Track assignment needs to know, when it sees a SpanStart, whether that /// span is going to be included in the final trace. /// But some spans need to be filtered based on later events, like: + /// /// 1. We shouldn't assign tracks to StartLoad events whose SpanEnd records /// a really short duration. + /// /// 2. We shouldn't assign tracks to ActionExecutionStart events who have /// no child LocalStage spans. + /// /// 3. (eventually) We should assign tracks to ActionExecutionStart events /// only if they appear in the CriticalPath, but the CriticalPath is one /// of the last events. + /// /// So this first pass builds up several lists of "interesting" span IDs. pub long_analyses: HashSet, pub long_loads: HashSet, @@ -366,7 +370,7 @@ where for (key, counter) in self.counters.iter_mut() { // TODO: With float counters this equality comparison seems sketchy. if counter.value == self.zero_value { - // If the counter is currently at its zero value, then emit the zero once, and thne + // If the counter is currently at its zero value, then emit the zero once, and then // stop emitting this counter altogether. if !counter.implicitly_zero { counters_to_output[key] = json!(counter.value); @@ -519,15 +523,15 @@ struct ChromeTraceWriter { unused_track_ids: HashMap, // Wrappers to contain values from InstantEvent.Data.Snapshot as a timeseries snapshot_counters: SimpleCounters, - max_rss_gigabytes_counter: SimpleCounters, + process_memory_counters: SimpleCounters, rate_of_change_counters: AverageRateOfChangeCounters, } #[derive(Copy, Clone, Dupe, Debug, Display, Hash, PartialEq, Eq)] enum SpanCategorization { - #[display(fmt = "uncategorized")] + #[display("uncategorized")] Uncategorized, - #[display(fmt = "critical-path")] + #[display("critical-path")] CriticalPath, } @@ -543,7 +547,7 @@ impl ChromeTraceWriter { unused_track_ids: HashMap::new(), span_counters: SpanCounters::new("spans"), snapshot_counters: SimpleCounters::::new("snapshot_counters", 0), - max_rss_gigabytes_counter: SimpleCounters::::new("max_rss", 0.0), + process_memory_counters: SimpleCounters::::new("process_memory", 0.0), rate_of_change_counters: AverageRateOfChangeCounters::new("rate_of_change_counters"), } } @@ -592,7 +596,7 @@ impl ChromeTraceWriter { .flush_all_to(&mut self.trace_events)?; self.snapshot_counters .flush_all_to(&mut self.trace_events)?; - self.max_rss_gigabytes_counter + self.process_memory_counters .flush_all_to(&mut self.trace_events)?; self.rate_of_change_counters .counters @@ -651,7 +655,7 @@ impl ChromeTraceWriter { }); enum Categorization<'a> { - /// Show this node on a speciifc tack + /// Show this node on a specific track Show { category: SpanCategorization, name: Cow<'a, str>, @@ -836,11 +840,18 @@ impl ChromeTraceWriter { data: Some(ref instant_data), }) => { if let buck2_data::instant_event::Data::Snapshot(_snapshot) = instant_data { - self.max_rss_gigabytes_counter.set( + self.process_memory_counters.set( event.timestamp(), "max_rss_gigabyte", (_snapshot.buck2_max_rss) as f64 / Self::BYTES_PER_GIGABYTE, )?; + if let Some(malloc_bytes_active) = _snapshot.malloc_bytes_active { + self.process_memory_counters.set( + event.timestamp(), + "malloc_active_gigabyte", + (malloc_bytes_active) as f64 / Self::BYTES_PER_GIGABYTE, + )?; + } self.rate_of_change_counters .set_average_rate_of_change_per_s( event.timestamp(), @@ -930,9 +941,11 @@ impl ChromeTraceCommand { log_path: EventLogPathBuf, ) -> anyhow::Result<(Invocation, BoxStream<'static, anyhow::Result>)> { let (invocation, stream_values) = log_path.unpack_stream().await?; - let stream = stream_values.try_filter_map(async move |stream_value| match stream_value { - StreamValue::Event(e) => Ok(Some(BuckEvent::try_from(e)?)), - _ => Ok(None), + let stream = stream_values.try_filter_map(|stream_value| async move { + match stream_value { + StreamValue::Event(e) => Ok(Some(BuckEvent::try_from(e)?)), + _ => Ok(None), + } }); Ok((invocation, Box::pin(stream))) @@ -956,28 +969,24 @@ impl ChromeTraceCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { let log = match self.path { Some(path) => path.resolve(&ctx.working_dir), - None => retrieve_nth_recent_log(&ctx, self.recent.unwrap_or(0))? - .path() - .to_owned(), + None => retrieve_nth_recent_log( + ctx.paths().context("Error identifying log dir")?, + self.recent.unwrap_or(0), + )? + .path() + .to_owned(), }; let trace_path = self.trace_path.resolve(&ctx.working_dir); - let dest_path_result = if trace_path.is_dir() { - Self::trace_path_from_dir(trace_path, &log) + let dest_path = if trace_path.is_dir() { + Self::trace_path_from_dir(trace_path, &log).context("Could not determine trace path")? } else { - Ok(trace_path) + trace_path }; - let dest_path = match dest_path_result { - Ok(dest_path) => dest_path, - Err(e) => { - buck2_client_ctx::eprintln!("Could not determine trace path, {:#}", e)?; - return ExitResult::failure(); - } - }; let log = EventLogPathBuf::infer(log)?; - let writer = ctx.runtime.block_on(Self::trace_writer(log))?; + let writer = ctx.with_runtime(|_| Self::trace_writer(log))?; let tracefile = std::fs::OpenOptions::new() .create(true) diff --git a/app/buck2_client/src/commands/debug/crash.rs b/app/buck2_client/src/commands/debug/crash.rs index 1b7d52fd4b24d..e83bdf5a27e6c 100644 --- a/app/buck2_client/src/commands/debug/crash.rs +++ b/app/buck2_client/src/commands/debug/crash.rs @@ -10,24 +10,43 @@ use async_trait::async_trait; use buck2_cli_proto::UnstableCrashRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; +#[derive(Debug, Clone, clap::ValueEnum)] +enum CrashType { + Panic, + Abort, +} + +impl CrashType { + fn to_proto(&self) -> i32 { + let crash_type = match self { + CrashType::Panic => buck2_cli_proto::unstable_crash_request::CrashType::Panic, + CrashType::Abort => buck2_cli_proto::unstable_crash_request::CrashType::Abort, + }; + crash_type as i32 + } +} + #[derive(Debug, clap::Parser)] -pub struct CrashCommand {} +pub struct CrashCommand { + #[arg(value_enum)] + crash_type: CrashType, + /// Event-log options. + #[clap(flatten)] + pub event_log_opts: CommonEventLogOptions, +} #[async_trait] impl StreamingCommand for CrashCommand { const COMMAND_NAME: &'static str = "crash"; - fn existing_only() -> bool { - true - } - async fn exec_impl( self, buckd: &mut BuckdClientConnector, @@ -36,20 +55,26 @@ impl StreamingCommand for CrashCommand { ) -> ExitResult { let _err = buckd .with_flushing() - .unstable_crash(UnstableCrashRequest {}) + .unstable_crash(UnstableCrashRequest { + crash_type: self.crash_type.to_proto(), + }) .await; ExitResult::success() } fn console_opts(&self) -> &CommonConsoleOptions { - CommonConsoleOptions::simple_ref() + CommonConsoleOptions::default_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + &self.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/debug/daemon_dir.rs b/app/buck2_client/src/commands/debug/daemon_dir.rs index 5af012c5efd52..bedd08e5f8007 100644 --- a/app/buck2_client/src/commands/debug/daemon_dir.rs +++ b/app/buck2_client/src/commands/debug/daemon_dir.rs @@ -16,7 +16,7 @@ pub struct DaemonDirCommand {} impl DaemonDirCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - buck2_client_ctx::println!("{}", ctx.paths?.daemon_dir()?.path.display())?; + buck2_client_ctx::println!("{}", ctx.paths()?.daemon_dir()?.path.display())?; ExitResult::success() } } diff --git a/app/buck2_client/src/commands/debug/dice_dump.rs b/app/buck2_client/src/commands/debug/dice_dump.rs index 4965b96c9145c..1935e1313a410 100644 --- a/app/buck2_client/src/commands/debug/dice_dump.rs +++ b/app/buck2_client/src/commands/debug/dice_dump.rs @@ -11,9 +11,10 @@ use async_trait::async_trait; use buck2_cli_proto::unstable_dice_dump_request::DiceDumpFormat; use buck2_cli_proto::UnstableDiceDumpRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::path_arg::PathArg; @@ -65,11 +66,15 @@ impl StreamingCommand for DiceDumpCommand { CommonConsoleOptions::none_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/debug/eval.rs b/app/buck2_client/src/commands/debug/eval.rs index c2930e2a677ce..c7e23ce049891 100644 --- a/app/buck2_client/src/commands/debug/eval.rs +++ b/app/buck2_client/src/commands/debug/eval.rs @@ -11,10 +11,11 @@ use async_trait::async_trait; use buck2_cli_proto::new_generic::DebugEvalRequest; use buck2_cli_proto::new_generic::NewGenericRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::path_arg::PathArg; @@ -27,12 +28,12 @@ use gazebo::prelude::SliceExt; /// Just evaluate and check evaluation does not fail. #[derive(Debug, clap::Parser)] pub struct EvalCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - /// Module names to evaluate, e.g. `fbsource//foo/bar:baz`. #[clap(value_name = "PATH", required = true)] paths: Vec, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -73,11 +74,15 @@ impl StreamingCommand for EvalCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/debug/file_status.rs b/app/buck2_client/src/commands/debug/file_status.rs index 76afa28ecd893..0d9b703b78a99 100644 --- a/app/buck2_client/src/commands/debug/file_status.rs +++ b/app/buck2_client/src/commands/debug/file_status.rs @@ -10,10 +10,11 @@ use async_trait::async_trait; use buck2_cli_proto::FileStatusRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; @@ -23,15 +24,15 @@ use gazebo::prelude::*; #[derive(Debug, clap::Parser)] pub struct FileStatusCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - /// Paths to validate #[clap(value_name = "PATH", required = true)] paths: Vec, - #[clap(long, short)] - verbose: bool, + #[clap(long, short, help = "Print all matches")] + show_matches: bool, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -57,7 +58,7 @@ impl StreamingCommand for FileStatusCommand { paths: self .paths .try_map(|x| x.resolve(&ctx.working_dir).into_string())?, - verbose: self.verbose, + show_matches: self.show_matches, }, ctx.stdin() .console_interaction_stream(&self.common_opts.console_opts), @@ -72,11 +73,15 @@ impl StreamingCommand for FileStatusCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/debug/flush_dep_files.rs b/app/buck2_client/src/commands/debug/flush_dep_files.rs index 0be4b7ad2b385..00740c7c61a50 100644 --- a/app/buck2_client/src/commands/debug/flush_dep_files.rs +++ b/app/buck2_client/src/commands/debug/flush_dep_files.rs @@ -10,15 +10,19 @@ use async_trait::async_trait; use buck2_cli_proto::FlushDepFilesRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; #[derive(Debug, clap::Parser)] -pub struct FlushDepFilesCommand {} +pub struct FlushDepFilesCommand { + #[clap(long, help = "Whether to retain locally produced dep files")] + retain_local: bool, +} #[async_trait] impl StreamingCommand for FlushDepFilesCommand { @@ -36,7 +40,9 @@ impl StreamingCommand for FlushDepFilesCommand { ) -> ExitResult { buckd .with_flushing() - .flush_dep_files(FlushDepFilesRequest {}) + .flush_dep_files(FlushDepFilesRequest { + retain_locally_produced_dep_files: self.retain_local, + }) .await??; ExitResult::success() } @@ -45,11 +51,15 @@ impl StreamingCommand for FlushDepFilesCommand { CommonConsoleOptions::simple_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/debug/heap_dump.rs b/app/buck2_client/src/commands/debug/heap_dump.rs index 9e45b10cb6495..88e1bc558873e 100644 --- a/app/buck2_client/src/commands/debug/heap_dump.rs +++ b/app/buck2_client/src/commands/debug/heap_dump.rs @@ -10,9 +10,10 @@ use async_trait::async_trait; use buck2_cli_proto::UnstableHeapDumpRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::connect::BuckdProcessInfo; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; @@ -31,6 +32,10 @@ pub struct HeapDumpCommand { /// The path to write the heap dump to. #[clap(short, long, value_name = "PATH")] path: PathArg, + + /// The path to write the heap dump to. + #[clap(short, long, value_name = "TEST_PATH")] + test_executor_path: Option, } #[async_trait] @@ -48,10 +53,16 @@ impl StreamingCommand for HeapDumpCommand { ctx: &mut ClientCommandContext<'_>, ) -> ExitResult { let path = self.path.resolve(&ctx.working_dir); + let test_executor_path = self + .test_executor_path + .map(|path| path.resolve(&ctx.working_dir)); buckd .with_flushing() .unstable_heap_dump(UnstableHeapDumpRequest { destination_path: path.to_str()?.to_owned(), + test_executor_destination_path: test_executor_path + .map(|v| -> anyhow::Result { Ok(v.to_str()?.to_owned()) }) + .transpose()?, }) .await?; @@ -63,7 +74,7 @@ impl StreamingCommand for HeapDumpCommand { Consider using this command to upload heap profile to Scuba:\n\ stackstoscuba --heap {} --heap_pid {}\n", path.to_str()?, - process_info.pid(), + process_info.pid()?, )?; } else { buck2_client_ctx::eprintln!("Heap dump written to `{}`", path.to_str()?)?; @@ -76,11 +87,15 @@ impl StreamingCommand for HeapDumpCommand { CommonConsoleOptions::none_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/debug/log_perf.rs b/app/buck2_client/src/commands/debug/log_perf.rs index e972b1852511e..7586f820bdb54 100644 --- a/app/buck2_client/src/commands/debug/log_perf.rs +++ b/app/buck2_client/src/commands/debug/log_perf.rs @@ -9,7 +9,7 @@ use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::subscribers::event_log::read::ReaderStats; +use buck2_event_log::read::ReaderStats; use tokio_stream::StreamExt; use crate::commands::log::options::EventLogOptions; @@ -17,12 +17,12 @@ use crate::commands::log::options::EventLogOptions; /// This command outputs the most recent log in JSON format #[derive(Debug, clap::Parser)] pub struct LogPerfCommand { - #[clap(flatten)] - event_log: EventLogOptions, - /// Stats will be emitted every `interval` events. #[clap(long, default_value = "10000")] interval: u64, + + #[clap(flatten)] + event_log: EventLogOptions, } impl LogPerfCommand { @@ -32,7 +32,7 @@ impl LogPerfCommand { interval, } = self; - ctx.with_runtime(async move |ctx| { + ctx.with_runtime(|ctx| async move { let log_path = event_log.get(&ctx).await?; let mut total_alloc = 0; diff --git a/app/buck2_client/src/commands/debug/materialize.rs b/app/buck2_client/src/commands/debug/materialize.rs index 913deb8026b56..7a8a3a0290867 100644 --- a/app/buck2_client/src/commands/debug/materialize.rs +++ b/app/buck2_client/src/commands/debug/materialize.rs @@ -11,22 +11,23 @@ use async_trait::async_trait; use buck2_cli_proto::new_generic::MaterializeRequest; use buck2_cli_proto::new_generic::NewGenericRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; #[derive(Debug, clap::Parser)] pub struct MaterializeCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - /// Paths to materialize, relative to project root #[clap(value_name = "PATH")] paths: Vec, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -61,11 +62,15 @@ impl StreamingCommand for MaterializeCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/debug/mod.rs b/app/buck2_client/src/commands/debug/mod.rs deleted file mode 100644 index 008a8b93579e4..0000000000000 --- a/app/buck2_client/src/commands/debug/mod.rs +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocator_stats::AllocatorStatsCommand; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::streaming::BuckSubcommand; -use chrome_trace::ChromeTraceCommand; -use crash::CrashCommand; -use dice_dump::DiceDumpCommand; -use file_status::FileStatusCommand; -use flush_dep_files::FlushDepFilesCommand; -use heap_dump::HeapDumpCommand; -use internal_version::InternalVersionCommand; -use materialize::MaterializeCommand; - -use crate::commands::debug::allocative::AllocativeCommand; -use crate::commands::debug::daemon_dir::DaemonDirCommand; -use crate::commands::debug::eval::EvalCommand; -use crate::commands::debug::exe::ExeCommand; -use crate::commands::debug::log_perf::LogPerfCommand; -use crate::commands::debug::paranoid::ParanoidCommand; -use crate::commands::debug::persist_event_logs::PersistEventLogsCommand; -use crate::commands::debug::segfault::SegfaultCommand; -use crate::commands::debug::set_log_filter::SetLogFilterCommand; -use crate::commands::debug::trace_io::TraceIoCommand; -use crate::commands::debug::upload_re_logs::UploadReLogsCommand; -use crate::commands::log::debug_replay::DebugReplayCommand; -use crate::commands::log::debug_what_ran::DebugWhatRanCommand; - -mod allocative; -mod allocator_stats; -mod chrome_trace; -mod crash; -mod daemon_dir; -mod dice_dump; -mod eval; -mod exe; -mod file_status; -mod flush_dep_files; -mod heap_dump; -mod internal_version; -mod log_perf; -mod materialize; -mod paranoid; -mod persist_event_logs; -mod segfault; -mod set_log_filter; -mod trace_io; -pub(crate) mod upload_re_logs; - -#[derive(Debug, clap::Parser)] -#[clap(about = "Hidden debug commands useful for testing buck2")] -pub enum DebugCommand { - /// Deliberately crashes the Buck daemon, for testing purposes. - Crash(CrashCommand), - /// Causes a segfault in the daemon. - /// - /// Useful to make sure that we're reporting it correctly. - SegFault(SegfaultCommand), - HeapDump(HeapDumpCommand), - /// Dumps allocator stat - AllocatorStats(AllocatorStatsCommand), - /// Dump the DICE graph to a file and saves it to disk. - DiceDump(DiceDumpCommand), - #[clap(setting(clap::AppSettings::Hidden))] - Replay(DebugReplayCommand), - /// Prints the hash of the buck2 binary - InternalVersion(InternalVersionCommand), - /// Renders an event-log to a Chrome trace file for inspection with a browser. - ChromeTrace(ChromeTraceCommand), - /// Flushes all dep files known to Buck2. - FlushDepFiles(FlushDepFilesCommand), - /// Forces materialization of a path, even on the deferred materializer - Materialize(MaterializeCommand), - // Upload RE logs given an RE session ID - UploadReLogs(UploadReLogsCommand), - /// Validates that Buck2 and disk agree on the state of files. - FileStatus(FileStatusCommand), - /// Shows the commands that buck ran - #[clap(alias = "whatran", setting(clap::AppSettings::Hidden))] - WhatRan(DebugWhatRanCommand), - /// Prints buck2 daemon directory (`~/.buckd/xxx`). - DaemonDir(DaemonDirCommand), - /// Prints buck2 executable (this executable) path. - Exe(ExeCommand), - Allocative(AllocativeCommand), - SetLogFilter(SetLogFilterCommand), - /// Make sense of log perf - LogPerf(LogPerfCommand), - /// Interact with I/O tracing of the daemon. - TraceIo(TraceIoCommand), - #[doc(hidden)] - PersistEventLogs(PersistEventLogsCommand), - #[clap(subcommand)] - Paranoid(ParanoidCommand), - Eval(EvalCommand), -} - -impl DebugCommand { - pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - let matches = matches.subcommand().expect("subcommand not found").1; - match self { - DebugCommand::DiceDump(cmd) => cmd.exec(matches, ctx), - DebugCommand::Crash(cmd) => cmd.exec(matches, ctx), - DebugCommand::HeapDump(cmd) => cmd.exec(matches, ctx), - DebugCommand::AllocatorStats(cmd) => cmd.exec(matches, ctx), - DebugCommand::Replay(cmd) => cmd.exec(matches, ctx), - DebugCommand::InternalVersion(cmd) => cmd.exec(matches, ctx), - DebugCommand::ChromeTrace(cmd) => cmd.exec(matches, ctx), - DebugCommand::SegFault(cmd) => cmd.exec(matches, ctx), - DebugCommand::FlushDepFiles(cmd) => cmd.exec(matches, ctx), - DebugCommand::WhatRan(cmd) => cmd.exec(matches, ctx), - DebugCommand::Materialize(cmd) => cmd.exec(matches, ctx), - DebugCommand::UploadReLogs(cmd) => cmd.exec(matches, ctx), - DebugCommand::DaemonDir(cmd) => cmd.exec(matches, ctx), - DebugCommand::Exe(cmd) => cmd.exec(matches, ctx), - DebugCommand::Allocative(cmd) => cmd.exec(matches, ctx), - DebugCommand::SetLogFilter(cmd) => cmd.exec(matches, ctx), - DebugCommand::FileStatus(cmd) => cmd.exec(matches, ctx), - DebugCommand::LogPerf(cmd) => cmd.exec(matches, ctx), - DebugCommand::TraceIo(cmd) => cmd.exec(matches, ctx), - DebugCommand::PersistEventLogs(cmd) => cmd.exec(matches, ctx), - DebugCommand::Paranoid(cmd) => cmd.exec(matches, ctx), - DebugCommand::Eval(cmd) => cmd.exec(matches, ctx), - } - } - - pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { - argv.no_need_to_sanitize() - } -} diff --git a/app/buck2_client/src/commands/debug/persist_event_logs.rs b/app/buck2_client/src/commands/debug/persist_event_logs.rs index 04ccd3f26bc6e..d8a7de6bb7fef 100644 --- a/app/buck2_client/src/commands/debug/persist_event_logs.rs +++ b/app/buck2_client/src/commands/debug/persist_event_logs.rs @@ -10,23 +10,22 @@ use std::time::SystemTime; use anyhow::Context; -use buck2_client_ctx::chunk_reader::ChunkReader; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::manifold; -use buck2_client_ctx::manifold::ManifoldChunkedUploader; -use buck2_client_ctx::manifold::ManifoldClient; -use buck2_core::env_helper::EnvHelper; +use buck2_common::chunk_reader::ChunkReader; +use buck2_common::manifold; +use buck2_common::manifold::ManifoldChunkedUploader; +use buck2_common::manifold::ManifoldClient; use buck2_core::fs::paths::abs_path::AbsPathBuf; use buck2_core::soft_error; use buck2_data::instant_event::Data; use buck2_data::InstantEvent; -use buck2_data::PersistSubprocess; -use buck2_events::sink::scribe::new_thrift_scribe_sink_if_enabled; -use buck2_events::sink::scribe::ThriftScribeSink; +use buck2_data::PersistEventLogSubprocess; +use buck2_event_log::ttl::manifold_event_log_ttl; +use buck2_events::sink::remote::new_remote_event_sink_if_enabled; +use buck2_events::sink::remote::RemoteEventSink; use buck2_events::BuckEvent; use buck2_wrapper_common::invocation_id::TraceId; -use thiserror::Error; use tokio::fs::File; use tokio::fs::OpenOptions; use tokio::io; @@ -38,10 +37,9 @@ use tokio::time::sleep; use tokio::time::Duration; use tokio::time::Instant; -static MANIFOLD_TTL_S: EnvHelper = EnvHelper::new("BUCK2_TEST_MANIFOLD_TTL_S"); const MAX_WAIT: Duration = Duration::from_secs(5 * 60); -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum PersistEventLogError { #[error("Read more bytes than are available")] ReadBytesOverflow, @@ -59,49 +57,56 @@ pub struct PersistEventLogsCommand { local_path: AbsPathBuf, #[clap(long, help = "If present, only write to disk and don't upload")] no_upload: bool, - #[clap(long, help = "Allow vpnless")] - allow_vpnless: bool, + #[clap( + long, + help = "UUID of invocation that called this subcommand for logging purposes" + )] + trace_id: TraceId, } impl PersistEventLogsCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { buck2_core::facebook_only(); let sink = create_scribe_sink(&ctx)?; - ctx.with_runtime(async move |mut ctx| { + let trace_id = self.trace_id.clone(); + ctx.with_runtime(|mut ctx| async move { let mut stdin = io::BufReader::new(ctx.stdin()); - if let Err(e) = self.write_and_upload(&mut stdin).await { - dispatch_event_to_scribe( - sink.as_ref(), - &ctx.trace_id, - PersistSubprocess { - errors: vec![e.to_string()], - }, - ) - .await; - let _res = soft_error!(categorize_error(&e), e); + let (local_result, remote_result) = self.write_and_upload(&mut stdin).await; + + let (local_error_messages, local_error_category, local_success) = + status_from_result(local_result); + let (remote_error_messages, remote_error_category, remote_success) = + status_from_result(remote_result); + + let event_to_send = PersistEventLogSubprocess { + local_error_messages, + local_error_category, + local_success, + remote_error_messages, + remote_error_category, + remote_success, + metadata: buck2_events::metadata::collect(), }; + dispatch_event_to_scribe(sink.as_ref(), &trace_id, event_to_send).await; }); ExitResult::success() } - async fn write_and_upload(self, stdin: impl io::AsyncBufRead + Unpin) -> anyhow::Result<()> { + async fn write_and_upload( + self, + stdin: impl io::AsyncBufRead + Unpin, + ) -> (anyhow::Result<()>, anyhow::Result<()>) { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - let file = Mutex::new(create_log_file(self.local_path).await?); - + let file = match create_log_file(self.local_path).await { + Ok(f) => Mutex::new(f), + Err(e) => return (Err(e), Err(anyhow::anyhow!("Not tried"))), + }; let write = write_task(&file, tx, stdin); - let upload = upload_task( - &file, - rx, - self.manifold_name, - self.no_upload, - self.allow_vpnless, - ); + let upload = upload_task(&file, rx, self.manifold_name, self.no_upload); // Wait for both tasks to finish. If the upload fails we want to keep writing to disk let (write_result, upload_result) = tokio::join!(write, upload); - write_result?; - upload_result?; - Ok(()) + (write_result, upload_result) } } @@ -130,6 +135,7 @@ async fn write_task( async fn create_log_file(local_path: AbsPathBuf) -> Result { let file = OpenOptions::new() .create(true) + .append(true) .write(true) .read(true) .open(&local_path) @@ -148,13 +154,12 @@ async fn upload_task( mut rx: tokio::sync::mpsc::UnboundedReceiver, manifold_name: String, no_upload: bool, - allow_vpnless: bool, ) -> anyhow::Result<()> { if no_upload { return Ok(()); } - let manifold_client = ManifoldClient::new(allow_vpnless)?; + let manifold_client = ManifoldClient::new().await?; let manifold_path = format!("flat/{}", manifold_name); let mut uploader = Uploader::new(file_mutex, &manifold_path, &manifold_client)?; @@ -211,12 +216,10 @@ impl<'a> Uploader<'a> { manifold_path: &'a str, manifold_client: &'a ManifoldClient, ) -> anyhow::Result { - let ttl = MANIFOLD_TTL_S.get_copied()?.map(manifold::Ttl::from_secs); - let manifold = manifold_client.start_chunked_upload( manifold::Bucket::EVENT_LOGS, manifold_path, - ttl.unwrap_or_default(), + manifold_event_log_ttl()?, ); Ok(Self { @@ -275,6 +278,21 @@ async fn write_to_file( Ok(()) } +fn status_from_result(res: anyhow::Result<()>) -> (Vec, Option, bool) { + // Returns a tuple of error messages, error category, and success/failure + if let Err(e) = res { + let status = ( + vec![e.to_string()], + Some(categorize_error(&e).to_owned()), + false, + ); + let _unused = soft_error!(categorize_error(&e), e.into()); + status + } else { + (vec![], None, true) + } +} + fn categorize_error(err: &anyhow::Error) -> &'static str { // This is for internal error tracking in `logview buck2` // Each category should point to 1 root cause @@ -305,11 +323,11 @@ fn categorize_error(err: &anyhow::Error) -> &'static str { } async fn dispatch_event_to_scribe( - sink: Option<&ThriftScribeSink>, + sink: Option<&RemoteEventSink>, invocation_id: &TraceId, - result: PersistSubprocess, + result: PersistEventLogSubprocess, ) { - let data = Some(Data::PersistSubprocess(result)); + let data = Some(Data::PersistEventLogSubprocess(result)); let event = InstantEvent { data }; if let Some(sink) = sink { sink.send_now(BuckEvent::new( @@ -325,8 +343,8 @@ async fn dispatch_event_to_scribe( }; } -fn create_scribe_sink(ctx: &ClientCommandContext) -> anyhow::Result> { - new_thrift_scribe_sink_if_enabled( +fn create_scribe_sink(ctx: &ClientCommandContext) -> anyhow::Result> { + new_remote_event_sink_if_enabled( ctx.fbinit(), /* buffer size */ 100, /* retry_backoff */ Duration::from_millis(500), diff --git a/app/buck2_client/src/commands/debug/segfault.rs b/app/buck2_client/src/commands/debug/segfault.rs deleted file mode 100644 index 52c99023a62be..0000000000000 --- a/app/buck2_client/src/commands/debug/segfault.rs +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use async_trait::async_trait; -use buck2_cli_proto::SegfaultRequest; -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; -use buck2_client_ctx::daemon::client::BuckdClientConnector; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::streaming::StreamingCommand; - -#[derive(Debug, clap::Parser)] -pub struct SegfaultCommand {} - -#[async_trait] -impl StreamingCommand for SegfaultCommand { - const COMMAND_NAME: &'static str = "SegFault"; - - async fn exec_impl( - self, - buckd: &mut BuckdClientConnector, - _matches: &clap::ArgMatches, - _ctx: &mut ClientCommandContext<'_>, - ) -> ExitResult { - let _err = buckd.with_flushing().segfault(SegfaultRequest {}).await; - ExitResult::success() - } - - fn console_opts(&self) -> &CommonConsoleOptions { - CommonConsoleOptions::default_ref() - } - - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() - } - - fn common_opts(&self) -> &CommonBuildConfigurationOptions { - CommonBuildConfigurationOptions::default_ref() - } -} diff --git a/app/buck2_client/src/commands/debug/set_log_filter.rs b/app/buck2_client/src/commands/debug/set_log_filter.rs index 652d1e69042ae..7fd05ed8b8109 100644 --- a/app/buck2_client/src/commands/debug/set_log_filter.rs +++ b/app/buck2_client/src/commands/debug/set_log_filter.rs @@ -31,7 +31,7 @@ pub struct SetLogFilterCommand { impl SetLogFilterCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - ctx.with_runtime(async move |ctx| { + ctx.with_runtime(|ctx| async move { let mut buckd = ctx .connect_buckd(BuckdConnectOptions::existing_only_no_console()) .await?; diff --git a/app/buck2_client/src/commands/debug/thread_dump.rs b/app/buck2_client/src/commands/debug/thread_dump.rs new file mode 100644 index 0000000000000..35a9e94c080dd --- /dev/null +++ b/app/buck2_client/src/commands/debug/thread_dump.rs @@ -0,0 +1,45 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use anyhow::Context; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::daemon::client::connect::BuckdProcessInfo; +use buck2_client_ctx::exit_result::ExitCode; +use buck2_client_ctx::exit_result::ExitResult; + +use crate::commands::rage::thread_dump::thread_dump_command; + +/// Prints a thread dump of the currently running buck daemon to stdout +#[derive(Debug, clap::Parser)] +pub struct ThreadDumpCommand {} + +impl ThreadDumpCommand { + pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + let paths = ctx.paths()?; + let daemon_dir = paths.daemon_dir()?; + let Ok(info) = BuckdProcessInfo::load(&daemon_dir) else { + buck2_client_ctx::eprintln!("No running buck daemon!")?; + return ExitResult::status(ExitCode::UserError); + }; + + ctx.with_runtime(|_| async move { + let status = thread_dump_command(&info)? + .spawn() + .context("Could not run LLDB to grab a thread-dump")? + .wait() + .await?; + if status.success() { + anyhow::Ok(ExitResult::success()) + } else { + // We don't capture stderr, so lldb should have printed an error + anyhow::Ok(ExitResult::status(ExitCode::InfraError)) + } + })? + } +} diff --git a/app/buck2_client/src/commands/debug/trace_io.rs b/app/buck2_client/src/commands/debug/trace_io.rs index ee180d5ad4da3..00e85f6770ef6 100644 --- a/app/buck2_client/src/commands/debug/trace_io.rs +++ b/app/buck2_client/src/commands/debug/trace_io.rs @@ -14,9 +14,10 @@ use buck2_cli_proto::TraceIoRequest; use buck2_cli_proto::TraceIoResponse; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::connect::DesiredTraceIoState; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; @@ -170,11 +171,15 @@ impl StreamingCommand for TraceIoCommand { CommonConsoleOptions::default_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/debug/upload_re_logs.rs b/app/buck2_client/src/commands/debug/upload_re_logs.rs index ee54d8f6e3989..20ebc9a11eafa 100644 --- a/app/buck2_client/src/commands/debug/upload_re_logs.rs +++ b/app/buck2_client/src/commands/debug/upload_re_logs.rs @@ -10,8 +10,8 @@ use async_compression::tokio::bufread::ZstdEncoder; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::manifold::Bucket; -use buck2_client_ctx::manifold::ManifoldClient; +use buck2_common::manifold::Bucket; +use buck2_common::manifold::ManifoldClient; use buck2_core::fs::async_fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; @@ -22,9 +22,6 @@ use tokio::io::BufReader; pub struct UploadReLogsCommand { #[clap(long)] session_id: String, - - #[clap(long)] - allow_vpnless: bool, } impl UploadReLogsCommand { @@ -32,8 +29,8 @@ impl UploadReLogsCommand { buck2_core::facebook_only(); // TODO: This should receive the path from the caller. - ctx.with_runtime(async move |ctx| { - let manifold = ManifoldClient::new(self.allow_vpnless)?; + ctx.with_runtime(|ctx| async move { + let manifold = ManifoldClient::new().await?; let re_logs_dir = ctx.paths()?.re_logs_dir(); upload_re_logs( &manifold, diff --git a/app/buck2_client/src/commands/expand_external_cell.rs b/app/buck2_client/src/commands/expand_external_cell.rs new file mode 100644 index 0000000000000..31351bd045ca9 --- /dev/null +++ b/app/buck2_client/src/commands/expand_external_cell.rs @@ -0,0 +1,90 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::new_generic::ExpandExternalCellRequest; +use buck2_cli_proto::new_generic::NewGenericRequest; +use buck2_cli_proto::new_generic::NewGenericResponse; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::CommonBuildConfigurationOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::daemon::client::BuckdClientConnector; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::streaming::StreamingCommand; +use clap::ArgMatches; + +/// Expand the contents of an external cell into the repo. +/// +/// The contents are placed at the path you specified for this cell in your buckconfig. +/// +/// If you additionally remove the entry from the `external_cells` section of your buckconfig, you +/// can edit the files directly in the repo and see those edits reflected in your build. +/// +/// Note that this creates a point-in-time snapshot. The files in the repo will not be updated if +/// you eg change the git commit of the cell in the future. +#[derive(Debug, clap::Parser)] +#[clap(name = "expand-external-cell")] +pub struct ExpandExternalCellCommand { + cell: String, +} + +const REMINDER_TEXT: &str = "Reminder: For edits to the expanded cell to take effect on \ +your build, you must additionally remove the entry from the `external_cells` section of your \ +buckconfig"; + +#[async_trait::async_trait] +impl StreamingCommand for ExpandExternalCellCommand { + const COMMAND_NAME: &'static str = "expand-external-cell"; + + async fn exec_impl( + self, + buckd: &mut BuckdClientConnector, + matches: &ArgMatches, + ctx: &mut ClientCommandContext<'_>, + ) -> ExitResult { + let context = ctx.client_context(matches, &self)?; + let resp = buckd + .with_flushing() + .new_generic( + context, + NewGenericRequest::ExpandExternalCell(ExpandExternalCellRequest { + cell_name: self.cell, + }), + None, + ) + .await??; + let NewGenericResponse::ExpandExternalCell(resp) = resp else { + return ExitResult::bail("Unexpected response type from generic command"); + }; + + let stdout = format!( + "Expanded external cell to {}.\n\n{}", + resp.path, REMINDER_TEXT, + ); + + ExitResult::success().with_stdout(stdout.into_bytes()) + } + + fn console_opts(&self) -> &CommonConsoleOptions { + CommonConsoleOptions::default_ref() + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() + } + + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + CommonBuildConfigurationOptions::default_ref() + } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } +} diff --git a/app/buck2_client/src/commands/explain.rs b/app/buck2_client/src/commands/explain.rs new file mode 100644 index 0000000000000..9443b257b013a --- /dev/null +++ b/app/buck2_client/src/commands/explain.rs @@ -0,0 +1,171 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::new_generic::ExplainRequest; +use buck2_cli_proto::new_generic::NewGenericRequest; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::CommonBuildConfigurationOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::daemon::client::BuckdClientConnector; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::path_arg::PathArg; +use buck2_client_ctx::streaming::StreamingCommand; +use buck2_event_log::file_names::get_local_logs; +use clap::ArgMatches; +use clap::Parser as _; +use tonic::async_trait; + +use crate::commands::build::BuildCommand; + +/// Buck2 Explain +/// +/// This command is to allow users to dive in and understand +/// builds, without requiring a solid grasp of Buck2 concepts +#[derive(Debug, clap::Parser)] +#[clap(name = "explain", group = clap::ArgGroup::new("out").multiple(true).required(true))] +pub struct ExplainCommand { + /// Output file path for profile data. + /// + /// File will be created if it does not exist, and overwritten if it does. + #[clap(long, short = 'o', group = "out")] + output: Option, + /// Whether to upload the output to Manifold + #[clap(long, group = "out")] + upload: bool, + /// Dev only: dump the flatbuffer info to file path + #[clap(long, hide = true)] + fbs_dump: Option, +} + +// TODO: not sure I need StreamingCommand +#[async_trait] +impl StreamingCommand for ExplainCommand { + const COMMAND_NAME: &'static str = "explain"; + + async fn exec_impl( + self, + buckd: &mut BuckdClientConnector, + _matches: &ArgMatches, + ctx: &mut ClientCommandContext<'_>, + ) -> ExitResult { + if cfg!(windows) { + return ExitResult::bail("Not implemented for windows"); + } + + let output = self.output.clone().map(|o| o.resolve(&ctx.working_dir)); + + // Get the most recent log + let paths = ctx.paths()?; + let logs = get_local_logs(&paths.log_dir())?; // oldest first + let mut logs = logs + .into_iter() + .filter(|l| match l.command_from_filename().ok() { + Some(c) => c == "build" || c == "test" || c == "run" || c == "install", + None => false, + }); + + let build_log = match logs.next_back() { + Some(log) => log, + None => { + return ExitResult::bail( + "No recent build commands found, did you try building something first?", + ); + } + }; + + // Check things are the same as last build + let (invocation, _) = build_log.unpack_stream().await?; + buck2_client_ctx::eprintln!( + "\nUsing last build invocation `buck2 {}`\n", + invocation.command_line_args[1..].join(" ") + )?; + + if invocation.working_dir != ctx.working_dir.to_string() { + return ExitResult::bail(format!( + "working dir mismatch {} and {}", + invocation.working_dir, ctx.working_dir, + )); + } + + let uuid = invocation.trace_id; + + // We are interested in the args passed only to a build command + let command = invocation.expanded_command_line_args; + let build_index = command.iter().position(|word| word == "build"); + let index = match build_index { + Some(index) => index, + None => return ExitResult::bail("Only build command is supported"), + }; + let command = &command[index..]; + + // Parse retrived args + let build_args = BuildCommand::parse_from(command); + + // TODO iguridi: get things like configs and target universe too + let patterns = build_args.patterns(); + if patterns.len() != 1 { + return ExitResult::bail("Only one target pattern is supported"); + } + let target = patterns[0].to_owned(); + let target_universe = build_args.target_universe().clone(); + let target_cfg = build_args.target_cfg(); + + let manifold_path = if self.upload { + Some(format!("flat/{}-explain.html", uuid)) + } else { + None + }; + + let mut context = ctx.empty_client_context("explain")?; + context.target_call_stacks = true; + + buckd + .with_flushing() + .new_generic( + context, + NewGenericRequest::Explain(ExplainRequest { + output, + target, + fbs_dump: self.fbs_dump.map(|x| x.resolve(&ctx.working_dir)), + manifold_path: manifold_path.clone(), + target_universe, + target_cfg, + }), + None, + ) + .await??; + + if let Some(p) = manifold_path { + buck2_client_ctx::eprintln!( + "\nView html in your browser: https://interncache-all.fbcdn.net/manifold/buck2_logs/{}\n", + p + )?; + } + + ExitResult::success() + } + + fn console_opts(&self) -> &CommonConsoleOptions { + CommonConsoleOptions::default_ref() + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() + } + + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + CommonBuildConfigurationOptions::default_ref() + } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } +} diff --git a/app/buck2_client/src/commands/help_env.rs b/app/buck2_client/src/commands/help_env.rs new file mode 100644 index 0000000000000..c76ea2b32ca6a --- /dev/null +++ b/app/buck2_client/src/commands/help_env.rs @@ -0,0 +1,92 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cmp; +use std::iter; + +use anyhow::Context; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_core::env::registry::Applicability; +use buck2_core::env::registry::EnvInfoEntry; +use buck2_core::env::registry::ENV_INFO; + +/// Print help for environment variables used by buck2. +#[derive(Debug, clap::Parser)] +pub struct HelpEnvCommand { + /// Also print those environment variables that are only used for buck2 integration tests. + /// + /// These are all unstable and not meant to be used by most users. + #[clap(long)] + self_testing: bool, +} + +impl HelpEnvCommand { + pub fn exec(self, _matches: &clap::ArgMatches, _ctx: ClientCommandContext<'_>) -> ExitResult { + // TODO(nga): print special buckconfigs too. + + // This command depends on `linkme` aggregating all the environment variables. + if let Some(res) = ExitResult::retry_command_with_full_binary()? { + return res; + } + + let mut env_info: Vec = ENV_INFO + .iter() + .copied() + .filter(|x| match x.applicability { + Applicability::All => true, + Applicability::Testing => self.self_testing, + Applicability::Internal => !buck2_core::is_open_source(), + }) + .collect(); + env_info.sort(); + env_info.dedup(); + + let longest_name = env_info + .iter() + .map(|e| e.name.len()) + .max() + .context("No environment variables stored defined, this is a bug")?; + let longest_ty = env_info + .iter() + .map(|e| e.ty_short().len()) + .max() + .context("No environment variables stored defined, this is a bug")?; + let longest_default = env_info + .iter() + .filter_map(|e| e.default) + .map(|d| d.len()) + .max() + .unwrap_or(0); + let name_column_title = "Name"; + let ty_column_title = "Type"; + let default_column_title = "Default"; + let name_column_width = cmp::max(longest_name, name_column_title.len()); + let ty_column_width = cmp::max(longest_ty, ty_column_title.len()); + let default_column_width = cmp::max(longest_default, default_column_title.len()); + let rows = iter::once((name_column_title, ty_column_title, default_column_title)).chain( + env_info + .iter() + .map(|e| (e.name, e.ty_short(), e.default.unwrap_or_default())), + ); + for (name, ty, default) in rows { + let line = format!( + "{name:name_column_width$} {ty:ty_column_width$} {default:default_column_width$}", + name = name, + ty = ty, + default = default, + name_column_width = name_column_width, + ty_column_width = ty_column_width, + default_column_width = default_column_width, + ); + buck2_client_ctx::println!("{}", line.trim_end())?; + } + ExitResult::success() + } +} diff --git a/app/buck2_client/src/commands/init.rs b/app/buck2_client/src/commands/init.rs index 66697b89ab5c5..35035469d63b2 100644 --- a/app/buck2_client/src/commands/init.rs +++ b/app/buck2_client/src/commands/init.rs @@ -9,40 +9,29 @@ use std::io::ErrorKind; use std::io::Write; -use std::process::Command; use anyhow::Context; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::common::CommonConsoleOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::exit_result::ExitCode; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::final_console::FinalConsole; use buck2_client_ctx::path_arg::PathArg; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_path::AbsPath; +use buck2_util::process::background_command; -/// Buck2 Init -/// -/// This command is intended to be part-tutorial part-convenience -/// for generating buck2 projects. Given a path and optional name -/// (in the case that the folder name is not desirable). +/// Initializes a buck2 project at the provided path. #[derive(Debug, clap::Parser)] #[clap(name = "install", about = "Initialize a buck2 project")] pub struct InitCommand { - #[clap(flatten)] - console_opts: CommonConsoleOptions, - /// The path to initialize the project in. The folder does not need to exist. #[clap(default_value = ".")] path: PathArg, - /// The name for the project. If not provided will default to the last segment - /// of the path. - #[clap(short, long)] - name: Option, - - /// Don't generate a prelude or a toolchain. + /// Don't include the standard prelude or generate toolchain definitions. #[clap(long)] no_prelude: bool, @@ -50,9 +39,13 @@ pub struct InitCommand { #[clap(long)] allow_dirty: bool, - // Use git to initialize the project and pull in buck2-prelude as a submodule + /// Also initialize a git repository at the given path, and set up an appropriate `.gitignore` + /// file. #[clap(long)] git: bool, + + #[clap(flatten)] + console_opts: CommonConsoleOptions, } impl InitCommand { @@ -60,12 +53,12 @@ impl InitCommand { let console = self.console_opts.final_console(); match exec_impl(self, ctx, &console) { - Ok(_) => ExitResult::status(0), + Ok(_) => ExitResult::success(), Err(e) => { // include the backtrace with the error output // (same behaviour as returning the Error from main) console.print_error(&format!("{:?}", e))?; - ExitResult::status(1) + ExitResult::status(ExitCode::UnknownFailure) } } } @@ -93,7 +86,7 @@ fn exec_impl( } if git { - let status = match Command::new("git") + let status = match background_command("git") .args(["status", "--porcelain"]) .current_dir(&absolute) .output() @@ -126,38 +119,51 @@ fn exec_impl( fn initialize_buckconfig(repo_root: &AbsPath, prelude: bool, git: bool) -> anyhow::Result<()> { let mut buckconfig = std::fs::File::create(repo_root.join(".buckconfig"))?; - writeln!(buckconfig, "[repositories]")?; - writeln!(buckconfig, "root = .")?; - writeln!(buckconfig, "prelude = prelude")?; + writeln!(buckconfig, "[cells]")?; + writeln!(buckconfig, " root = .")?; // Add additional configs that depend on prelude / no-prelude mode if prelude { - writeln!(buckconfig, "toolchains = toolchains")?; - writeln!(buckconfig, "none = none")?; + writeln!(buckconfig, " prelude = prelude")?; + writeln!(buckconfig, " toolchains = toolchains")?; + writeln!(buckconfig, " none = none")?; writeln!(buckconfig)?; - writeln!(buckconfig, "[repository_aliases]")?; - writeln!(buckconfig, "config = prelude")?; - writeln!(buckconfig, "ovr_config = prelude")?; - writeln!(buckconfig, "fbcode = none")?; - writeln!(buckconfig, "fbsource = none")?; - writeln!(buckconfig, "fbcode_macros = none")?; - writeln!(buckconfig, "buck = none")?; + writeln!(buckconfig, "[cell_aliases]")?; + writeln!(buckconfig, " config = prelude")?; + writeln!(buckconfig, " ovr_config = prelude")?; + writeln!(buckconfig, " fbcode = none")?; + writeln!(buckconfig, " fbsource = none")?; + writeln!(buckconfig, " fbcode_macros = none")?; + writeln!(buckconfig, " buck = none")?; + writeln!(buckconfig)?; + writeln!( + buckconfig, + "# Uses a copy of the prelude bundled with the buck2 binary. You can alternatively delete this" + )?; + writeln!( + buckconfig, + "# section and vendor a copy of the prelude to the `prelude` directory of your project." + )?; + writeln!(buckconfig, "[external_cells]")?; + writeln!(buckconfig, " prelude = bundled")?; writeln!(buckconfig)?; writeln!(buckconfig, "[parser]")?; writeln!( buckconfig, - "target_platform_detector_spec = target:root//...->prelude//platforms:default" + " target_platform_detector_spec = target:root//...->prelude//platforms:default" + )?; + writeln!(buckconfig)?; + writeln!(buckconfig, "[build]")?; + writeln!( + buckconfig, + " execution_platforms = prelude//platforms:default" )?; - } else { - // For the no-prelude mode, create an empty prelude/prelude.bzl as Buck2 expects one. - let prelude_dir = repo_root.join("prelude"); - fs_util::create_dir(&prelude_dir)?; - fs_util::create_file(prelude_dir.join("prelude.bzl"))?; } + if git { writeln!(buckconfig)?; writeln!(buckconfig, "[project]")?; - writeln!(buckconfig, "ignore = .git")?; + writeln!(buckconfig, " ignore = .git")?; } Ok(()) } @@ -183,7 +189,7 @@ fn initialize_root_buck(repo_root: &AbsPath, prelude: bool) -> anyhow::Result<() if prelude { writeln!( buck, - "# A list of available rules and their signatures can be found here: https://buck2.build/docs/api/rules/" + "# A list of available rules and their signatures can be found here: https://buck2.build/docs/prelude/globals/" )?; writeln!(buck)?; writeln!(buck, "genrule(")?; @@ -196,32 +202,6 @@ fn initialize_root_buck(repo_root: &AbsPath, prelude: bool) -> anyhow::Result<() Ok(()) } -fn set_up_prelude(repo_root: &AbsPath, git: bool) -> anyhow::Result<()> { - if git { - if !Command::new("git") - .args([ - "submodule", - "add", - "https://github.com/facebook/buck2-prelude.git", - "prelude", - ]) - .current_dir(repo_root) - .status()? - .success() - { - return Err(anyhow::anyhow!( - "Unable to clone the prelude. Is the folder in use?" - )); - } - } else { - println!( - "* Download https://github.com/facebookincubator/buck2-prelude.git into `prelude/` with a VCS of your choice." - ); - println!("* If you wish to use git submodule, run the command again with --git"); - } - Ok(()) -} - fn set_up_gitignore(repo_root: &AbsPath) -> anyhow::Result<()> { let gitignore = repo_root.join(".gitignore"); // If .gitignore is empty or doesn't exist, add in buck-out @@ -240,7 +220,7 @@ fn set_up_project(repo_root: &AbsPath, git: bool, prelude: bool) -> anyhow::Resu set_up_buckroot(repo_root)?; if git { - if !Command::new("git") + if !background_command("git") .arg("init") .current_dir(repo_root) .status()? @@ -251,12 +231,11 @@ fn set_up_project(repo_root: &AbsPath, git: bool, prelude: bool) -> anyhow::Resu set_up_gitignore(repo_root)?; } - if prelude { - set_up_prelude(repo_root, git)?; - } - // If the project already contains a .buckconfig, leave it alone if repo_root.join(".buckconfig").exists() { + buck2_client_ctx::println!( + ".buckconfig already exists, not overwriting and not generating toolchains" + )?; return Ok(()); } @@ -342,25 +321,33 @@ mod tests { let buckconfig_path = tempdir_path.join(".buckconfig"); initialize_buckconfig(tempdir_path, true, true)?; let actual_buckconfig = fs_util::read_to_string(buckconfig_path)?; - let expected_buckconfig = "[repositories] -root = . -prelude = prelude -toolchains = toolchains -none = none - -[repository_aliases] -config = prelude -ovr_config = prelude -fbcode = none -fbsource = none -fbcode_macros = none -buck = none + let expected_buckconfig = "[cells] + root = . + prelude = prelude + toolchains = toolchains + none = none + +[cell_aliases] + config = prelude + ovr_config = prelude + fbcode = none + fbsource = none + fbcode_macros = none + buck = none + +# Uses a copy of the prelude bundled with the buck2 binary. You can alternatively delete this +# section and vendor a copy of the prelude to the `prelude` directory of your project. +[external_cells] + prelude = bundled [parser] -target_platform_detector_spec = target:root//...->prelude//platforms:default + target_platform_detector_spec = target:root//...->prelude//platforms:default + +[build] + execution_platforms = prelude//platforms:default [project] -ignore = .git + ignore = .git "; assert_eq!(actual_buckconfig, expected_buckconfig); Ok(()) @@ -376,14 +363,11 @@ ignore = .git let buckconfig_path = tempdir_path.join(".buckconfig"); initialize_buckconfig(tempdir_path, false, false)?; let actual_buckconfig = fs_util::read_to_string(buckconfig_path)?; - let expected_buckconfig = "[repositories] -root = . -prelude = prelude + let expected_buckconfig = "[cells] + root = . "; assert_eq!(actual_buckconfig, expected_buckconfig); - // Test we have an empty prelude directory and prelude.bzl file - assert!(tempdir_path.join("prelude/prelude.bzl").exists()); Ok(()) } @@ -397,7 +381,7 @@ prelude = prelude let buck_path = tempdir_path.join("BUCK"); initialize_root_buck(tempdir_path, true)?; let actual_buck = fs_util::read_to_string(buck_path)?; - let expected_buck = "# A list of available rules and their signatures can be found here: https://buck2.build/docs/api/rules/ + let expected_buck = "# A list of available rules and their signatures can be found here: https://buck2.build/docs/prelude/globals/ genrule( name = \"hello_world\", diff --git a/app/buck2_client/src/commands/install.rs b/app/buck2_client/src/commands/install.rs index b65bc1bf69caa..ad8690d9af397 100644 --- a/app/buck2_client/src/commands/install.rs +++ b/app/buck2_client/src/commands/install.rs @@ -11,26 +11,21 @@ use async_trait::async_trait; use buck2_cli_proto::InstallRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_client_ctx::common::build::CommonBuildOptions; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonBuildOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; -use gazebo::prelude::*; #[derive(Debug, clap::Parser)] #[clap(name = "install", about = "Build and install an application")] pub struct InstallCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - - #[clap(flatten)] - build_opts: CommonBuildOptions, - #[clap( long, name = "installer-debug", @@ -50,6 +45,15 @@ pub struct InstallCommand { raw = true )] extra_run_args: Vec, + + #[clap(flatten)] + build_opts: CommonBuildOptions, + + #[clap(flatten)] + target_cfg: TargetCfgOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } /// Defines install options for Android that exist only for compatibility @@ -119,6 +123,13 @@ struct AndroidInstallOptions { help = "Use this option to uninstall an installed app before installing again. Here for compatibility with buck1 - it is automatically forwarded to the installer" )] uninstall: bool, + + #[clap( + short, + long, + help = "Use this option to Keep user data when uninstalling. Here for compatibility with buck1 - it is automatically forwarded to the installer" + )] + keep: bool, } #[async_trait] @@ -163,15 +174,17 @@ impl StreamingCommand for InstallCommand { if self.android_install_opts.uninstall { extra_run_args.push("-u".to_owned()); } + if self.android_install_opts.keep { + extra_run_args.push("-k".to_owned()); + } let response = buckd .with_flushing() .install( InstallRequest { context: Some(context), - target_patterns: self.patterns.map(|pat| buck2_data::TargetPattern { - value: pat.to_owned(), - }), + target_patterns: self.patterns.clone(), + target_cfg: Some(self.target_cfg.target_cfg()), build_opts: Some(self.build_opts.to_proto()), installer_run_args: extra_run_args, installer_debug: self.installer_debug, @@ -180,17 +193,21 @@ impl StreamingCommand for InstallCommand { .console_interaction_stream(&self.common_opts.console_opts), &mut NoPartialResultHandler, ) - .await; + .await?; let console = self.common_opts.console_opts.final_console(); match response { - Ok(CommandOutcome::Success(_)) => { - console.print_success("INSTALL SUCCEEDED")?; + CommandOutcome::Success(_) => { + if self.patterns.is_empty() { + console.print_warning("NO BUILD TARGET PATTERNS SPECIFIED")?; + } else { + console.print_success("INSTALL SUCCEEDED")?; + } ExitResult::success() } - Ok(CommandOutcome::Failure(_)) | Err(_) => { + CommandOutcome::Failure(exit_result) => { console.print_error("INSTALL FAILED")?; - ExitResult::failure() + exit_result } } } @@ -199,11 +216,15 @@ impl StreamingCommand for InstallCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/kill.rs b/app/buck2_client/src/commands/kill.rs index f3945d37e5d39..18d53a50f29b2 100644 --- a/app/buck2_client/src/commands/kill.rs +++ b/app/buck2_client/src/commands/kill.rs @@ -10,14 +10,13 @@ use std::time::Duration; use anyhow::Context as _; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::daemon::client::connect::buckd_startup_timeout; -use buck2_client_ctx::daemon::client::connect::BuckdProcessInfo; +use buck2_client_ctx::common::CommonEventLogOptions; use buck2_client_ctx::daemon::client::BuckdLifecycleLock; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::startup_deadline::StartupDeadline; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; /// Kill the buck daemon. /// @@ -27,11 +26,14 @@ use buck2_client_ctx::startup_deadline::StartupDeadline; /// /// `buck2 clean` kills the buck2 daemon and also deletes the buck2 state files. #[derive(Debug, clap::Parser)] -pub struct KillCommand {} +pub struct KillCommand { + #[clap(flatten)] + pub(crate) event_log_opts: CommonEventLogOptions, +} impl KillCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - ctx.instant_command("kill", async move |ctx| { + ctx.instant_command("kill", &self.event_log_opts, |ctx| async move { let daemon_dir = ctx.paths()?.daemon_dir()?; let lifecycle_lock = BuckdLifecycleLock::lock_with_timeout( @@ -41,75 +43,16 @@ impl KillCommand { .await .with_context(|| "Error locking buckd lifecycle.lock")?; - kill_command_impl(&lifecycle_lock, "`buck kill` was invoked").await + buck2_client_ctx::daemon::client::kill::kill_command_impl( + &lifecycle_lock, + "`buck kill` was invoked", + ) + .await }) + .into() } pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { argv.no_need_to_sanitize() } } - -pub async fn kill_command_impl( - lifecycle_lock: &BuckdLifecycleLock, - reason: &str, -) -> anyhow::Result<()> { - let process = match BuckdProcessInfo::load(lifecycle_lock.daemon_dir()) { - Ok(p) => p, - Err(e) => { - tracing::debug!("No BuckdProcessInfo: {:#}", e); - buck2_client_ctx::eprintln!("no buckd server running")?; - return Ok(()); - } - }; - - let buckd = tokio::time::timeout(buckd_startup_timeout()?, async { - process.create_channel().await?.upgrade().await - }) - .await; - - let response = match buckd { - Ok(Ok(mut buckd)) => { - buck2_client_ctx::eprintln!("killing buckd server")?; - Some(buckd.kill(reason).await?) - } - Ok(Err(e)) => { - // No time out: we just errored out. This is likely indicative that there is no - // buckd (i.e. our connection got rejected), so let's check for this and then - // provide some information. - - if e.is::() { - // OK, looks like the server - tracing::debug!("Connect failed with a Tonic error: {:#}", e); - buck2_client_ctx::eprintln!("no buckd server running")?; - } else { - buck2_client_ctx::eprintln!( - "unexpected error connecting to Buck2: {:#} \ - (no buckd server running?)", - e - )?; - } - - None - } - Err(e) => { - tracing::debug!("Connect timed out: {:#}", e); - - // If we timeout, then considering the generous timeout we give ourselves, then - // that must mean we're not getting a reply back from Buck, but that we did - // succeed in opening a connection to it (because if we didn't, we'd have - // errored out). - // - // This means the socket is probably open. We can reasonably got and kill this - // process if both the PID and the port exist. - buck2_client_ctx::eprintln!("killing unresponsive buckd server")?; - Some(process.hard_kill().await?) - } - }; - - if let Some(response) = response { - response.log()?; - } - - Ok(()) -} diff --git a/app/buck2_client/src/commands/killall.rs b/app/buck2_client/src/commands/killall.rs index 2d4ea5f8218d9..f696ac8e7dfb1 100644 --- a/app/buck2_client/src/commands/killall.rs +++ b/app/buck2_client/src/commands/killall.rs @@ -7,25 +7,30 @@ * of this source tree. */ -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::CommonEventLogOptions; use buck2_client_ctx::exit_result::ExitResult; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use buck2_wrapper_common::is_buck2::WhoIsAsking; #[derive(Debug, clap::Parser)] #[clap(about = "Kill all buck2 processes on the machine")] -pub struct KillallCommand {} +pub struct KillallCommand { + #[clap(flatten)] + pub(crate) event_log_opts: CommonEventLogOptions, +} impl KillallCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - ctx.instant_command("killall", async move |_ctx| { + ctx.instant_command("killall", &self.event_log_opts, |_ctx| async move { buck2_wrapper_common::killall(WhoIsAsking::Buck2, |s| { let _ignored = buck2_client_ctx::eprintln!("{}", s); }) .then_some(()) .ok_or(anyhow::anyhow!("Killall command failed")) }) + .into() } pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { diff --git a/app/buck2_client/src/commands/log.rs b/app/buck2_client/src/commands/log.rs new file mode 100644 index 0000000000000..8b0d5d69e6cc5 --- /dev/null +++ b/app/buck2_client/src/commands/log.rs @@ -0,0 +1,121 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod critical_path; +pub(crate) mod debug_replay; +pub(crate) mod debug_what_ran; +mod diff; +pub(crate) mod options; +pub(crate) mod path_log; +mod replay; +mod show_log; +mod show_user_log; +mod summary; +mod what_cmd; +mod what_failed; +mod what_materialized; +pub(crate) mod what_ran; +mod what_up; +mod what_uploaded; + +use std::fmt::Debug; + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; +use dupe::Dupe; + +#[derive( + Debug, + serde::Serialize, + serde::Deserialize, + Clone, + Dupe, + clap::ValueEnum +)] +#[clap(rename_all = "snake_case")] +pub(crate) enum LogCommandOutputFormat { + Tabulated, + Json, + Csv, +} + +pub(crate) enum LogCommandOutputFormatWithWriter<'a> { + Tabulated(&'a mut dyn std::io::Write), + Json(&'a mut dyn std::io::Write), + Csv(Box>), +} + +pub(crate) struct OutputFormatWithWriter<'a> { + pub(crate) format: LogCommandOutputFormatWithWriter<'a>, + pub(crate) include_std_err: bool, + pub(crate) omit_empty_std_err: bool, +} + +pub(crate) fn transform_format<'a>( + format: LogCommandOutputFormat, + w: &'a mut (dyn std::io::Write), +) -> LogCommandOutputFormatWithWriter<'a> { + match format { + LogCommandOutputFormat::Tabulated => LogCommandOutputFormatWithWriter::Tabulated(w), + LogCommandOutputFormat::Json => LogCommandOutputFormatWithWriter::Json(w), + LogCommandOutputFormat::Csv => LogCommandOutputFormatWithWriter::Csv(Box::new( + csv::WriterBuilder::new().from_writer(w), + )), + } +} + +#[derive(Debug, clap::Subcommand)] +#[clap(about = "Commands for interacting with buck2 logs")] +pub enum LogCommand { + #[clap(alias = "whatran")] + WhatRan(what_ran::WhatRanCommand), + #[clap(alias = "whatfailed")] + WhatFailed(what_failed::WhatFailedCommand), + #[clap(alias = "last")] + Path(path_log::PathLogCommand), + Show(show_log::ShowLogCommand), + #[clap(alias = "whatcmd", alias = "what-cmd")] + Cmd(what_cmd::WhatCmdCommand), + #[clap(alias = "whatup")] + WhatUp(what_up::WhatUpCommand), + WhatMaterialized(what_materialized::WhatMaterializedCommand), + WhatUploaded(what_uploaded::WhatUploadedCommand), + CriticalPath(critical_path::CriticalPathCommand), + Replay(replay::ReplayCommand), + ShowUser(show_user_log::ShowUserLogCommand), + Summary(summary::SummaryCommand), + #[clap(subcommand)] + Diff(diff::DiffCommand), +} + +impl LogCommand { + pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + match self { + Self::WhatRan(cmd) => cmd.exec(matches, ctx), + Self::WhatFailed(cmd) => cmd.exec(matches, ctx), + Self::Path(cmd) => cmd.exec(matches, ctx), + Self::Show(cmd) => cmd.exec(matches, ctx), + Self::Cmd(cmd) => cmd.exec(matches, ctx), + Self::WhatUp(cmd) => cmd.exec(matches, ctx), + Self::WhatMaterialized(cmd) => cmd.exec(matches, ctx), + Self::WhatUploaded(cmd) => cmd.exec(matches, ctx), + Self::CriticalPath(cmd) => cmd.exec(matches, ctx), + Self::Replay(cmd) => cmd.exec(matches, ctx), + Self::ShowUser(cmd) => cmd.exec(matches, ctx), + Self::Summary(cmd) => cmd.exec(matches, ctx), + Self::Diff(cmd) => cmd.exec(matches, ctx), + } + } + + pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { + argv.no_need_to_sanitize() + } +} diff --git a/app/buck2_client/src/commands/log/critical_path.rs b/app/buck2_client/src/commands/log/critical_path.rs index d52738e0e9472..fe1f06b77ef55 100644 --- a/app/buck2_client/src/commands/log/critical_path.rs +++ b/app/buck2_client/src/commands/log/critical_path.rs @@ -8,16 +8,21 @@ */ use std::fmt; +use std::io::Write; use std::time::Duration; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::stream_value::StreamValue; +use buck2_event_log::stream_value::StreamValue; use buck2_event_observer::display; use buck2_event_observer::display::TargetDisplayOptions; +use serde::Serialize; use tokio_stream::StreamExt; use crate::commands::log::options::EventLogOptions; +use crate::commands::log::transform_format; +use crate::commands::log::LogCommandOutputFormat; +use crate::commands::log::LogCommandOutputFormatWithWriter; /// Show the critical path for a selected build. /// @@ -32,13 +37,21 @@ use crate::commands::log::options::EventLogOptions; pub struct CriticalPathCommand { #[clap(flatten)] event_log: EventLogOptions, + #[clap( + long, + help = "Which output format to use for this command", + default_value = "tabulated", + ignore_case = true, + value_enum + )] + format: LogCommandOutputFormat, } impl CriticalPathCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - let Self { event_log } = self; + let Self { event_log, format } = self; - ctx.with_runtime(async move |ctx| { + ctx.instant_command_no_log("log-critical-path", |ctx| async move { let log_path = event_log.get(&ctx).await?; let (invocation, mut events) = log_path.unpack_stream().await?; @@ -55,7 +68,7 @@ impl CriticalPathCommand { Some(buck2_data::instant_event::Data::BuildGraphInfo( build_graph, )) => { - log_critical_path(&build_graph)?; + log_critical_path(&build_graph, format.clone())?; } _ => {} } @@ -67,124 +80,180 @@ impl CriticalPathCommand { } anyhow::Ok(()) - })?; + }) + .into() + } +} + +#[derive(Default)] +struct OptionalDuration { + inner: Option, +} - ExitResult::success() +impl OptionalDuration { + fn new(d: Option) -> Result + where + T: TryInto, + { + Ok(Self { + inner: d.map(|d| d.try_into()).transpose()?, + }) } } -fn log_critical_path(critical_path: &buck2_data::BuildGraphExecutionInfo) -> anyhow::Result<()> { - let target_display_options = TargetDisplayOptions::for_log(); +impl fmt::Display for OptionalDuration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(inner) = self.inner { + write!(f, "{}", inner.as_micros())?; + } + Ok(()) + } +} + +impl Serialize for OptionalDuration { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + if let Some(micros) = self.inner.map(|d| d.as_micros()) { + serializer.serialize_some(µs) + } else { + serializer.serialize_none() + } + } +} - for entry in &critical_path.critical_path2 { - use buck2_data::critical_path_entry2::Entry; +#[derive(Default, Serialize)] +struct CriticalPathEntry<'a> { + kind: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + category: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + identifier: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + execution_kind: Option<&'a str>, + total_duration: OptionalDuration, + user_duration: OptionalDuration, + potential_improvement_duration: OptionalDuration, +} - let kind; - let name; - let mut category = ""; - let mut identifier = ""; +fn log_critical_path( + critical_path: &buck2_data::BuildGraphExecutionInfo, + format: LogCommandOutputFormat, +) -> anyhow::Result<()> { + let target_display_options = TargetDisplayOptions::for_log(); - match &entry.entry { - Some(Entry::Analysis(analysis)) => { - use buck2_data::critical_path_entry2::analysis::Target; + buck2_client_ctx::stdio::print_with_writer::(|w| { + let mut log_writer = transform_format(format, w); - kind = "analysis"; + for entry in &critical_path.critical_path2 { + use buck2_data::critical_path_entry2::Entry; - name = match &analysis.target { - Some(Target::StandardTarget(t)) => { - display::display_configured_target_label(t, target_display_options)? - } - None => continue, - }; - } - Some(Entry::ActionExecution(action_execution)) => { - use buck2_data::critical_path_entry2::action_execution::Owner; + let mut critical_path = CriticalPathEntry::default(); - kind = "action"; + match &entry.entry { + Some(Entry::Analysis(analysis)) => { + use buck2_data::critical_path_entry2::analysis::Target; - name = match &action_execution.owner { - Some(Owner::TargetLabel(t)) => { - display::display_configured_target_label(t, target_display_options)? - } - Some(Owner::BxlKey(t)) => display::display_bxl_key(t)?, - Some(Owner::AnonTarget(t)) => display::display_anon_target(t)?, - None => continue, - }; - - match &action_execution.name { - Some(name) => { - category = &name.category; - identifier = &name.identifier; - } - None => {} + critical_path.kind = "analysis"; + + critical_path.name = match &analysis.target { + Some(Target::StandardTarget(t)) => Some( + display::display_configured_target_label(t, target_display_options)?, + ), + None => continue, + }; } - } - Some(Entry::Materialization(materialization)) => { - use buck2_data::critical_path_entry2::materialization::Owner; + Some(Entry::ActionExecution(action_execution)) => { + use buck2_data::critical_path_entry2::action_execution::Owner; - kind = "materialization"; + critical_path.kind = "action"; + + critical_path.name = Some(match &action_execution.owner { + Some(Owner::TargetLabel(t)) => { + display::display_configured_target_label(t, target_display_options)? + } + Some(Owner::BxlKey(t)) => display::display_bxl_key(t)?, + Some(Owner::AnonTarget(t)) => display::display_anon_target(t)?, + None => continue, + }); - name = match &materialization.owner { - Some(Owner::TargetLabel(t)) => { - display::display_configured_target_label(t, target_display_options)? + match &action_execution.name { + Some(name) => { + critical_path.category = Some(&name.category); + critical_path.identifier = Some(&name.identifier); + } + None => {} } - Some(Owner::BxlKey(t)) => display::display_bxl_key(t)?, - Some(Owner::AnonTarget(t)) => display::display_anon_target(t)?, - None => continue, - }; - identifier = &materialization.path; - } - Some(Entry::ComputeCriticalPath(..)) => { - kind = "compute-critical-path"; - name = "".to_owned(); - } - Some(Entry::Load(load)) => { - kind = "load"; - name = load.package.clone(); - } - Some(Entry::Listing(listing)) => { - kind = "listing"; - name = listing.package.clone(); - } - None => continue, - } + critical_path.execution_kind = Some( + buck2_data::ActionExecutionKind::from_i32(action_execution.execution_kind) + .unwrap_or(buck2_data::ActionExecutionKind::NotSet) + .as_str_name(), + ); + } + Some(Entry::Materialization(materialization)) => { + use buck2_data::critical_path_entry2::materialization::Owner; - struct OptionalDuration { - inner: Option, - } + critical_path.kind = "materialization"; - impl OptionalDuration { - fn new(d: Option) -> Result - where - T: TryInto, - { - Ok(Self { - inner: d.map(|d| d.try_into()).transpose()?, - }) - } - } + critical_path.name = Some(match &materialization.owner { + Some(Owner::TargetLabel(t)) => { + display::display_configured_target_label(t, target_display_options)? + } + Some(Owner::BxlKey(t)) => display::display_bxl_key(t)?, + Some(Owner::AnonTarget(t)) => display::display_anon_target(t)?, + None => continue, + }); - impl fmt::Display for OptionalDuration { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(inner) = self.inner { - write!(f, "{}", inner.as_micros())?; + critical_path.identifier = Some(&materialization.path); } - Ok(()) + Some(Entry::ComputeCriticalPath(..)) => { + critical_path.kind = "compute-critical-path"; + critical_path.name = None; + } + Some(Entry::Load(load)) => { + critical_path.kind = "load"; + critical_path.name = Some(load.package.clone()); + } + Some(Entry::Listing(listing)) => { + critical_path.kind = "listing"; + critical_path.name = Some(listing.package.clone()); + } + None => continue, } - } - buck2_client_ctx::println!( - "{}\t{}\t{}\t{}\t{}\t{}\t{}", - kind, - name, - category, - identifier, - OptionalDuration::new(entry.total_duration.clone())?, - OptionalDuration::new(entry.user_duration.clone())?, - OptionalDuration::new(entry.potential_improvement_duration.clone())?, - )?; - } + critical_path.total_duration = OptionalDuration::new(entry.total_duration.clone())?; + critical_path.user_duration = OptionalDuration::new(entry.user_duration.clone())?; + critical_path.potential_improvement_duration = + OptionalDuration::new(entry.potential_improvement_duration.clone())?; - Ok(()) + match &mut log_writer { + LogCommandOutputFormatWithWriter::Tabulated(writer) => { + writeln!( + writer, + "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}", + critical_path.kind, + critical_path.name.unwrap_or_default(), + critical_path.category.unwrap_or_default(), + critical_path.identifier.unwrap_or_default(), + critical_path.execution_kind.unwrap_or_default(), + critical_path.total_duration, + critical_path.user_duration, + critical_path.potential_improvement_duration + )?; + } + LogCommandOutputFormatWithWriter::Json(writer) => { + serde_json::to_writer(writer.by_ref(), &critical_path)?; + writer.write_all("\n".as_bytes())?; + } + LogCommandOutputFormatWithWriter::Csv(writer) => { + writer.serialize(critical_path)?; + } + } + } + Ok(()) + }) } diff --git a/app/buck2_client/src/commands/log/debug_replay.rs b/app/buck2_client/src/commands/log/debug_replay.rs index 21accd39e3a72..937a6eca1393b 100644 --- a/app/buck2_client/src/commands/log/debug_replay.rs +++ b/app/buck2_client/src/commands/log/debug_replay.rs @@ -13,7 +13,7 @@ use buck2_core::soft_error; use crate::commands::log::replay::ReplayCommand; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum DebugReplayCommandError { #[error("`buck2 debug replay` is deprecated. Use `buck2 log replay` instead.")] Deprecated, @@ -31,7 +31,11 @@ impl DebugReplayCommand { matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>, ) -> ExitResult { - soft_error!("debug_replay", DebugReplayCommandError::Deprecated.into())?; + soft_error!( + "debug_replay", + DebugReplayCommandError::Deprecated.into(), + deprecation: true + )?; self.replay.exec(matches, ctx) } } diff --git a/app/buck2_client/src/commands/log/debug_what_ran.rs b/app/buck2_client/src/commands/log/debug_what_ran.rs index ca2f8af8b6995..a808b7d7d0bfa 100644 --- a/app/buck2_client/src/commands/log/debug_what_ran.rs +++ b/app/buck2_client/src/commands/log/debug_what_ran.rs @@ -13,7 +13,7 @@ use buck2_core::soft_error; use crate::commands::log::what_ran::WhatRanCommand; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum DebugWhatRanCommandError { #[error("`buck2 debug what-ran` is deprecated. Use `buck2 log what-ran` instead.")] Deprecated, @@ -33,7 +33,8 @@ impl DebugWhatRanCommand { ) -> ExitResult { soft_error!( "debug_what_ran", - DebugWhatRanCommandError::Deprecated.into() + DebugWhatRanCommandError::Deprecated.into(), + deprecation: true )?; self.what_ran.exec(matches, ctx) } diff --git a/app/buck2_client/src/commands/log/diff.rs b/app/buck2_client/src/commands/log/diff.rs new file mode 100644 index 0000000000000..46aa4ae83e2bf --- /dev/null +++ b/app/buck2_client/src/commands/log/diff.rs @@ -0,0 +1,29 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; + +mod action_divergence; + +#[derive(Debug, clap::Subcommand)] +#[clap(about = "Subcommands for diff'ing two buck2 commands")] +pub enum DiffCommand { + ActionDivergence(action_divergence::ActionDivergenceCommand), + Configs, +} + +impl DiffCommand { + pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + match self { + Self::Configs => ExitResult::bail("Command not implemented yet!"), + Self::ActionDivergence(cmd) => cmd.exec(matches, ctx), + } + } +} diff --git a/app/buck2_client/src/commands/log/diff/action_divergence.rs b/app/buck2_client/src/commands/log/diff/action_divergence.rs new file mode 100644 index 0000000000000..d91a2932b7ca4 --- /dev/null +++ b/app/buck2_client/src/commands/log/diff/action_divergence.rs @@ -0,0 +1,203 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::path_arg::PathArg; +use buck2_data::ActionKey; +use buck2_data::ActionName; +use buck2_event_log::stream_value::StreamValue; +use buck2_event_observer::action_util::get_action_digest; +use buck2_event_observer::display::display_action_identity; +use buck2_event_observer::display::TargetDisplayOptions; +use buck2_wrapper_common::invocation_id::TraceId; +use futures::Stream; +use futures::TryStreamExt; +use linked_hash_map::LinkedHashMap; + +use crate::commands::log::options::EventLogOptions; + +/// Identifies the first divergent action between two builds. +/// Divergence is identified by the same action having differing outputs. Useful for identifying non-determinism. +#[derive(Debug, clap::Parser)] +#[clap(group = clap::ArgGroup::new("first").required(true))] +#[clap(group = clap::ArgGroup::new("second").required(true))] +pub struct ActionDivergenceCommand { + /// A path to an event-log file of the first build. + #[clap(long = "path1", group = "first")] + path1: Option, + /// Trace id of the first build. + #[clap(long = "trace-id1", group = "first")] + trace_id1: Option, + /// Open the event-log file from a recent command for the first build. + #[clap(long, group = "first", value_name = "NUMBER")] + recent1: Option, + /// A path to an event-log file of the second build. + #[clap(long = "path2", group = "second")] + path2: Option, + /// Trace id of the second build. + #[clap(long = "trace-id2", group = "second")] + trace_id2: Option, + /// Open the event-log file from a recent command for the second build. + #[clap(long, group = "second", value_name = "NUMBER")] + recent2: Option, +} + +#[derive(Clone, Debug)] +struct ActionExecutionData { + name: Option, + action_digest: Option, + output_tiny_digests: String, +} + +fn get_action_execution_data<'a>( + event: &'a buck2_data::BuckEvent, +) -> Option<(ActionKey, ActionExecutionData)> { + event.data.as_ref().and_then(|data| match data { + buck2_data::buck_event::Data::SpanEnd(end) => { + end.data.as_ref().and_then(|data| match data { + buck2_data::span_end_event::Data::ActionExecution(ref data) => { + data.key.as_ref().map(|key: &ActionKey| { + ( + key.clone(), + ActionExecutionData { + name: data.name.clone(), + action_digest: get_action_digest(&data.commands), + output_tiny_digests: data + .outputs + .iter() + .fold(String::new(), |acc, action_output| { + acc + " " + &action_output.tiny_digest + }), + }, + ) + }) + } + _ => None, + }) + } + _ => None, + }) +} + +async fn get_digest_map( + mut events: impl Stream> + Unpin + Send, +) -> anyhow::Result> { + let mut out = LinkedHashMap::new(); + + while let Some(event) = events.try_next().await? { + match event { + StreamValue::Event(event) => match get_action_execution_data(&event) { + Some((key, action_execution_data)) => { + out.insert(key, action_execution_data); + } + None => { + continue; + } + }, + _ => {} + } + } + Ok(out) +} + +fn print_divergence_msg( + action: &ActionKey, + ad1: Option<&ActionExecutionData>, + ad2: &ActionExecutionData, +) -> anyhow::Result<()> { + let action_identity = display_action_identity( + Some(action), + ad2.name.as_ref(), + TargetDisplayOptions::for_log(), + )?; + let header = match ad1 { + Some(_) => "Present in both builds with differing output digests", + None => "Present in only the second build", + }; + let output = [ + format!("{:-^44}", "First Divergent Action"), + header.to_owned(), + action_identity, + format!("{:-^44}", "Input Digest"), + format!( + "first: {} \t second: {}", + ad1.and_then(|data| data.action_digest.as_deref()) + .unwrap_or(""), + ad2.action_digest.as_deref().unwrap_or(""), + ), + format!("{:-^44}", "Tiny Output Digest(s)"), + format!( + "first: {} \t second: {}", + ad1.map(|data| data.output_tiny_digests.as_ref()) + .unwrap_or(""), + ad2.output_tiny_digests + ), + ]; + buck2_client_ctx::println!("{}", output.join("\n"))?; + + Ok(()) +} + +impl ActionDivergenceCommand { + pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + ctx.instant_command_no_log("log-diff-action-divergence", |ctx| async move { + let options1 = EventLogOptions { + recent: self.recent1, + path: self.path1, + trace_id: self.trace_id1, + no_remote: false, + allow_remote: true, + }; + let options2 = EventLogOptions { + recent: self.recent2, + path: self.path2, + trace_id: self.trace_id2, + no_remote: false, + allow_remote: true, + }; + + let log_path1 = EventLogOptions::get(&options1, &ctx).await?; + let log_path2 = EventLogOptions::get(&options2, &ctx).await?; + + let (invocation1, events1) = log_path1.unpack_stream().await?; + let (invocation2, events2) = log_path2.unpack_stream().await?; + + buck2_client_ctx::println!( + "Analyzing divergent actions between: \n{} and \n{}", + invocation1.display_command_line(), + invocation2.display_command_line() + )?; + + let digest_map1 = get_digest_map(events1).await?; + let digest_map2 = get_digest_map(events2).await?; + + let mut divergence_found = false; + + for (action2, ad2) in digest_map2 { + if let Some(ad1) = digest_map1.get(&action2).cloned() { + if ad1.output_tiny_digests == ad2.output_tiny_digests { + continue; + } + divergence_found = true; + print_divergence_msg(&action2, Some(&ad1), &ad2)?; + } else { + divergence_found = true; + print_divergence_msg(&action2, None, &ad2)?; + } + break; + } + if !divergence_found { + buck2_client_ctx::println!("No divergent actions found.")?; + } + anyhow::Ok(()) + }) + .into() + } +} diff --git a/app/buck2_client/src/commands/log/mod.rs b/app/buck2_client/src/commands/log/mod.rs deleted file mode 100644 index d600e1a651117..0000000000000 --- a/app/buck2_client/src/commands/log/mod.rs +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod critical_path; -pub(crate) mod debug_replay; -pub(crate) mod debug_what_ran; -pub(crate) mod options; -pub(crate) mod path_log; -mod replay; -mod show_log; -mod show_user_log; -mod what_cmd; -mod what_failed; -mod what_materialized; -pub(crate) mod what_ran; -mod what_up; -mod what_uploaded; - -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::exit_result::ExitResult; -use dupe::Dupe; - -#[derive( - Debug, - serde::Serialize, - serde::Deserialize, - Clone, - Dupe, - clap::ArgEnum -)] -#[clap(rename_all = "snake_case")] -pub enum LogCommandOutputFormat { - Tabulated, - Json, - Csv, -} - -#[derive(Debug, clap::Subcommand)] -#[clap(about = "Commands for interacting with buck2 logs")] -pub enum LogCommand { - #[clap(alias = "whatran")] - WhatRan(what_ran::WhatRanCommand), - #[clap(alias = "whatfailed")] - WhatFailed(what_failed::WhatFailedCommand), - #[clap(alias = "last")] - Path(path_log::PathLogCommand), - Show(show_log::ShowLogCommand), - #[clap(alias = "whatcmd", alias = "what-cmd")] - Cmd(what_cmd::WhatCmdCommand), - #[clap(alias = "whatup")] - WhatUp(what_up::WhatUpCommand), - WhatMaterialized(what_materialized::WhatMaterializedCommand), - WhatUploaded(what_uploaded::WhatUploadedCommand), - CriticalPath(critical_path::CriticalPathCommand), - Replay(replay::ReplayCommand), - ShowUser(show_user_log::ShowUserLogCommand), -} - -impl LogCommand { - pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - match self { - Self::WhatRan(cmd) => cmd.exec(matches, ctx), - Self::WhatFailed(cmd) => cmd.exec(matches, ctx), - Self::Path(cmd) => cmd.exec(matches, ctx), - Self::Show(cmd) => cmd.exec(matches, ctx), - Self::Cmd(cmd) => cmd.exec(matches, ctx), - Self::WhatUp(cmd) => cmd.exec(matches, ctx), - Self::WhatMaterialized(cmd) => cmd.exec(matches, ctx), - Self::WhatUploaded(cmd) => cmd.exec(matches, ctx), - Self::CriticalPath(cmd) => cmd.exec(matches, ctx), - Self::Replay(cmd) => cmd.exec(matches, ctx), - Self::ShowUser(cmd) => cmd.exec(matches, ctx), - } - } - - pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { - argv.no_need_to_sanitize() - } -} diff --git a/app/buck2_client/src/commands/log/options.rs b/app/buck2_client/src/commands/log/options.rs index ecf1a85a1d5d4..73c8740719169 100644 --- a/app/buck2_client/src/commands/log/options.rs +++ b/app/buck2_client/src/commands/log/options.rs @@ -12,22 +12,22 @@ use std::process::Stdio; use anyhow::Context; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::path_arg::PathArg; -use buck2_client_ctx::subscribers::event_log::file_names::find_log_by_trace_id; -use buck2_client_ctx::subscribers::event_log::file_names::retrieve_nth_recent_log; -use buck2_client_ctx::subscribers::event_log::read::EventLogPathBuf; -use buck2_client_ctx::subscribers::event_log::utils::Encoding; use buck2_common::temp_path::TempPath; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_path::AbsPathBuf; use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_event_log::file_names::find_log_by_trace_id; +use buck2_event_log::file_names::retrieve_nth_recent_log; +use buck2_event_log::read::EventLogPathBuf; +use buck2_event_log::utils::Encoding; use buck2_util::indent::indent; use buck2_util::process::async_background_command; use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; use rand::Rng; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum EventLogOptionsError { #[error("Manifold failed; stderr:\n{}", indent(" ", _0))] ManifoldFailed(String), @@ -35,28 +35,28 @@ enum EventLogOptionsError { LogNotFoundLocally(TraceId), } -#[derive(Debug, clap::Parser)] -#[clap(group = clap::ArgGroup::with_name("event_log"))] +#[derive(Debug, Clone, clap::Parser)] +#[clap(group = clap::ArgGroup::new("event_log"))] pub(crate) struct EventLogOptions { /// Open the event-log file from a recent command. #[clap(long, group = "event_log", value_name = "NUMBER")] - recent: Option, + pub(crate) recent: Option, /// Show log by trace id. #[clap(long, group = "event_log", value_name = "ID")] - trace_id: Option, + pub(crate) trace_id: Option, /// This option does nothing. - #[clap(long, requires = "trace-id")] - allow_remote: bool, + #[clap(long, requires = "trace_id")] + pub(crate) allow_remote: bool, /// Do not allow downloading the log from manifold if it's not found locally. - #[clap(long, requires = "trace-id")] - no_remote: bool, + #[clap(long, requires = "trace_id")] + pub(crate) no_remote: bool, /// A path to an event-log file to read from. #[clap(group = "event_log", value_name = "PATH")] - path: Option, + pub(crate) path: Option, } impl EventLogOptions { @@ -75,7 +75,10 @@ impl EventLogOptions { return Err(EventLogOptionsError::LogNotFoundLocally(id.dupe()).into()); } } else { - retrieve_nth_recent_log(ctx, self.recent.unwrap_or(0)) + retrieve_nth_recent_log( + ctx.paths().context("Error identifying log dir")?, + self.recent.unwrap_or(0), + ) } } diff --git a/app/buck2_client/src/commands/log/path_log.rs b/app/buck2_client/src/commands/log/path_log.rs index 285fe4f886306..2e6cc94cdbefe 100644 --- a/app/buck2_client/src/commands/log/path_log.rs +++ b/app/buck2_client/src/commands/log/path_log.rs @@ -7,9 +7,10 @@ * of this source tree. */ +use anyhow::Context; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::subscribers::event_log::file_names::retrieve_all_logs; +use buck2_event_log::file_names::retrieve_all_logs; use crate::commands::log::options::EventLogOptions; @@ -32,9 +33,9 @@ impl PathLogCommand { all, } = self; - ctx.with_runtime(async move |ctx| { + ctx.instant_command_no_log("log-path", |ctx| async move { let paths = if all { - retrieve_all_logs(&ctx)? + retrieve_all_logs(ctx.paths().context("Error identifying log dir")?)? } else { vec![event_log_options.get(&ctx).await?] }; @@ -42,8 +43,7 @@ impl PathLogCommand { buck2_client_ctx::println!("{}", path.path().display())?; } anyhow::Ok(()) - })?; - - ExitResult::success() + }) + .into() } } diff --git a/app/buck2_client/src/commands/log/replay.rs b/app/buck2_client/src/commands/log/replay.rs index ecfd8d7545199..635137ac3f88d 100644 --- a/app/buck2_client/src/commands/log/replay.rs +++ b/app/buck2_client/src/commands/log/replay.rs @@ -7,16 +7,16 @@ * of this source tree. */ -use anyhow::Context as _; use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::common::CommonConsoleOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::events_ctx::EventsCtx; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::exit_result::FailureExitCode; use buck2_client_ctx::replayer::Replayer; use buck2_client_ctx::signal_handler::with_simple_sigint_handler; use buck2_client_ctx::subscribers::get::get_console_with_root; +use buck2_client_ctx::subscribers::subscribers::EventSubscribers; +use buck2_error::buck2_error_anyhow; use crate::commands::log::options::EventLogOptions; @@ -24,9 +24,7 @@ use crate::commands::log::options::EventLogOptions; /// /// This command allows visualizing an existing event log in a Superconsole. #[derive(Debug, clap::Parser)] -#[clap( - setting = clap::AppSettings::TrailingVarArg -)] +#[clap(trailing_var_arg = true)] pub struct ReplayCommand { #[clap(flatten)] event_log: EventLogOptions, @@ -42,11 +40,11 @@ pub struct ReplayCommand { #[clap(long)] preload: bool, - #[clap(flatten)] - console_opts: CommonConsoleOptions, - #[clap(help = "Override the arguments")] pub override_args: Vec, + + #[clap(flatten)] + console_opts: CommonConsoleOptions, } impl ReplayCommand { @@ -59,11 +57,14 @@ impl ReplayCommand { override_args: _, } = self; - ctx.with_runtime(async move |mut ctx| { + ctx.instant_command_no_log("log-replay", |mut ctx| async move { let work = async { let (replayer, invocation) = Replayer::new(event_log.get(&ctx).await?, speed, preload).await?; - + let build_count_dir = match ctx.paths() { + Ok(paths) => Some(paths.build_count_dir()), + Err(_) => None, + }; let console = get_console_with_root( invocation.trace_id, console_opts.console_type, @@ -72,34 +73,46 @@ impl ReplayCommand { speed, "(replay)", // Could be better console_opts.superconsole_config(), - )? - .context("You must request a console for replay")?; + build_count_dir, + )?; - let res = EventsCtx::new(vec![console]) + let res = EventsCtx::new(EventSubscribers::new(vec![console])) .unpack_stream::<_, ReplayResult, _>( &mut NoPartialResultHandler, Box::pin(replayer), None, ctx.stdin().console_interaction_stream(&console_opts), ) - .await??; - + .await; + + if let Err(e) = &res { + let msg = "request finished without returning a CommandResult"; + if e.to_string().contains(msg) { + buck2_client_ctx::eprintln!( + "Warning: Incomplete log. Replay may be inaccurate." + )?; + }; + }; + + let res = res??; for e in &res.errors { - buck2_client_ctx::eprintln!("{}", e)?; + buck2_client_ctx::eprintln!("{}", e.message)?; } - ExitResult::success() + // FIXME(JakobDegen)(easy): This should probably return failures if there were errors + Ok(()) }; with_simple_sigint_handler(work) .await - .unwrap_or_else(|| ExitResult::from(FailureExitCode::SignalInterrupt)) + .unwrap_or_else(|| Err(buck2_error_anyhow!([], "Signal Interrupted"))) }) + .into() } } struct ReplayResult { - errors: Vec, + errors: Vec, } impl TryFrom for ReplayResult { @@ -111,10 +124,10 @@ impl TryFrom for ReplayResult { // It would be good to declare this as a extension trait on our types, but for now to // support Replay this is fine; let errors = match v { - Result::Error(v) => v.messages, - Result::BuildResponse(v) => v.error_messages, - Result::TestResponse(v) => v.error_messages, - Result::BxlResponse(v) => v.error_messages, + Result::Error(v) => v.errors, + Result::BuildResponse(v) => v.errors, + Result::TestResponse(v) => v.errors, + Result::BxlResponse(v) => v.errors, _ => Vec::new(), }; diff --git a/app/buck2_client/src/commands/log/show_log.rs b/app/buck2_client/src/commands/log/show_log.rs index 8f15d16b1581e..43ba0f935a037 100644 --- a/app/buck2_client/src/commands/log/show_log.rs +++ b/app/buck2_client/src/commands/log/show_log.rs @@ -25,7 +25,7 @@ impl ShowLogCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { let Self { event_log } = self; - ctx.with_runtime(async move |ctx| { + ctx.instant_command_no_log("log-show", |ctx| async move { let log_path = event_log.get(&ctx).await?; let (invocation, mut events) = log_path.unpack_stream().await?; @@ -44,7 +44,7 @@ impl ShowLogCommand { } anyhow::Ok(()) - })?; - ExitResult::success() + }) + .into() } } diff --git a/app/buck2_client/src/commands/log/show_user_log.rs b/app/buck2_client/src/commands/log/show_user_log.rs index f86e83f379780..2e6c92f9ac3d6 100644 --- a/app/buck2_client/src/commands/log/show_user_log.rs +++ b/app/buck2_client/src/commands/log/show_user_log.rs @@ -10,7 +10,7 @@ use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::stdio; -use buck2_client_ctx::subscribers::event_log::user_event_types::try_get_user_event_for_read; +use buck2_event_log::user_event_types::try_get_user_event_for_read; use tokio_stream::StreamExt; use crate::commands::log::options::EventLogOptions; @@ -26,7 +26,7 @@ impl ShowUserLogCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { let Self { event_log } = self; - ctx.with_runtime(async move |ctx| { + ctx.instant_command_no_log("log-show-user", |ctx| async move { let log_path = event_log.get(&ctx).await?; let (invocation, mut events) = log_path.unpack_stream().await?; @@ -48,7 +48,7 @@ impl ShowUserLogCommand { } anyhow::Ok(()) - })?; - ExitResult::success() + }) + .into() } } diff --git a/app/buck2_client/src/commands/log/summary.rs b/app/buck2_client/src/commands/log/summary.rs new file mode 100644 index 0000000000000..e29e1bc38c1d6 --- /dev/null +++ b/app/buck2_client/src/commands/log/summary.rs @@ -0,0 +1,282 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cmp::max; +use std::fmt::Display; +use std::fmt::Formatter; +use std::time::Duration; +use std::time::SystemTime; + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::subscribers::recorder::process_memory; +use buck2_data::ActionExecutionKind; +use buck2_event_log::stream_value::StreamValue; +use buck2_event_observer::fmt_duration; +use buck2_event_observer::humanized::HumanizedBytes; +use buck2_event_observer::humanized::HumanizedBytesPerSecond; +use buck2_util::network_speed_average::NetworkSpeedAverage; +use buck2_util::sliding_window::SlidingWindow; +use tokio_stream::StreamExt; + +use crate::commands::log::options::EventLogOptions; + +#[derive(Default)] +struct Stats { + // TODO(yurysamkevich): add number of file changes since last build once availbale in log + total_bytes_uploaded: u64, + total_files_materialized: u64, + total_bytes_materialized: u64, + total_local_actions: u64, + // TODO(yurysamkevich): split by RE platform - mac/windows/linux once available in log + total_remote_actions: u64, + total_cached_actions: u64, + total_other_actions: u64, + total_targets_analysed: u64, + peak_process_memory_bytes: Option, + re_avg_download_speed: NetworkSpeedAverage, + re_avg_upload_speed: NetworkSpeedAverage, + duration: Option, + peak_used_disk_space_bytes: Option, + total_disk_space_bytes: Option, + system_total_memory_bytes: Option, + re_max_download_speeds: Vec, + re_max_upload_speeds: Vec, + hg_revision: Option, + has_local_changes: Option, +} + +impl Stats { + fn update_with_event(&mut self, event: &buck2_data::BuckEvent) { + match &event.data { + Some(buck2_data::buck_event::Data::SpanEnd(end)) => match end.data.as_ref() { + Some(buck2_data::span_end_event::Data::ReUpload(ref data)) => { + self.total_bytes_uploaded += data.bytes_uploaded.unwrap_or_default(); + } + Some(buck2_data::span_end_event::Data::Materialization(ref data)) => { + self.total_files_materialized += data.file_count; + self.total_bytes_materialized += data.total_bytes; + } + Some(buck2_data::span_end_event::Data::ActionExecution(ref data)) => { + match ActionExecutionKind::from_i32(data.execution_kind) { + Some(ActionExecutionKind::Local) => self.total_local_actions += 1, + Some(ActionExecutionKind::Remote) => self.total_remote_actions += 1, + Some(ActionExecutionKind::ActionCache) => self.total_cached_actions += 1, + _ => self.total_other_actions += 1, + } + } + Some(buck2_data::span_end_event::Data::Analysis(_)) => { + self.total_targets_analysed += 1; + } + Some(buck2_data::span_end_event::Data::Command(_command)) => { + self.duration = end.duration.clone(); + } + _ => {} + }, + Some(buck2_data::buck_event::Data::Instant(instant_event)) => { + match instant_event.data.as_ref() { + Some(buck2_data::instant_event::Data::Snapshot(snapshot)) => { + self.peak_process_memory_bytes = + max(self.peak_process_memory_bytes, process_memory(snapshot)); + self.peak_used_disk_space_bytes = max( + self.peak_used_disk_space_bytes, + snapshot.used_disk_space_bytes, + ); + + if let Some(ts) = get_event_timestamp(event) { + self.re_avg_download_speed + .update(ts, snapshot.re_download_bytes); + self.re_avg_upload_speed + .update(ts, snapshot.re_upload_bytes); + + for s in self.re_max_download_speeds.iter_mut() { + s.update(ts, snapshot.re_download_bytes); + } + + for s in self.re_max_upload_speeds.iter_mut() { + s.update(ts, snapshot.re_upload_bytes); + } + } + } + Some(buck2_data::instant_event::Data::SystemInfo(system_info)) => { + self.total_disk_space_bytes = system_info.total_disk_space_bytes; + self.system_total_memory_bytes = system_info.system_total_memory_bytes; + } + Some(buck2_data::instant_event::Data::VersionControlRevision(vcs)) => { + match vcs.hg_revision { + Some(ref revision) => { + self.hg_revision = Some(revision.clone()); + } + None => {} + } + match vcs.has_local_changes { + Some(ref has_local_changes) => { + self.has_local_changes = Some(*has_local_changes); + } + None => {} + } + } + _ => {} + } + } + + _ => {} + } + } +} + +fn get_event_timestamp(event: &buck2_data::BuckEvent) -> Option { + SystemTime::try_from(event.timestamp.clone()?).ok() +} + +impl Display for Stats { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "total files materialized: {}", + self.total_files_materialized + )?; + writeln!( + f, + "total bytes materialized: {}", + self.total_bytes_materialized + )?; + writeln!(f, "total bytes uploaded: {}", self.total_bytes_uploaded)?; + writeln!(f, "local actions: {}", self.total_local_actions)?; + writeln!(f, "remote actions: {}", self.total_remote_actions)?; + writeln!(f, "cached actions: {}", self.total_cached_actions)?; + writeln!(f, "other actions: {}", self.total_other_actions)?; + writeln!(f, "targets analysed: {}", self.total_targets_analysed)?; + if let (Some(peak_process_memory_bytes), Some(system_total_memory_bytes)) = ( + self.peak_process_memory_bytes, + self.system_total_memory_bytes, + ) { + writeln!( + f, + "peak process memory: {} out of {}", + HumanizedBytes::fixed_width(peak_process_memory_bytes), + HumanizedBytes::fixed_width(system_total_memory_bytes) + )?; + } + if let (Some(peak_used_disk_space_bytes), Some(total_disk_space_bytes)) = + (self.peak_used_disk_space_bytes, self.total_disk_space_bytes) + { + writeln!( + f, + "peak used disk space: {} out of {}", + HumanizedBytes::fixed_width(peak_used_disk_space_bytes), + HumanizedBytes::fixed_width(total_disk_space_bytes) + )?; + } + if let Some(re_avg_download_speed) = self.re_avg_download_speed.avg_per_second() { + writeln!( + f, + "average download speed: {}", + HumanizedBytesPerSecond::fixed_width(re_avg_download_speed) + )?; + } + if let Some(re_avg_upload_speed) = self.re_avg_upload_speed.avg_per_second() { + writeln!( + f, + "average upload speed: {}", + HumanizedBytesPerSecond::fixed_width(re_avg_upload_speed) + )?; + } + + if let Some(re_max_download_speed) = self + .re_max_download_speeds + .iter() + .map(|w| w.max_per_second().unwrap_or_default()) + .max() + { + writeln!( + f, + "max download speed: {}", + HumanizedBytesPerSecond::fixed_width(re_max_download_speed) + )?; + } + + if let Some(re_max_upload_speed) = self + .re_max_upload_speeds + .iter() + .map(|w| w.max_per_second().unwrap_or_default()) + .max() + { + writeln!( + f, + "max upload speed: {}", + HumanizedBytesPerSecond::fixed_width(re_max_upload_speed) + )?; + } + + if let Some(duration) = &self.duration { + let duration = std::time::Duration::new(duration.seconds as u64, duration.nanos as u32); + writeln!(f, "duration: {}", fmt_duration::fmt_duration(duration, 1.0))?; + } else { + // TODO(ezgi): when there is no CommandEnd, take the timestamp from the last event and calculate the duration + } + if let Some(hg_revision) = &self.hg_revision { + writeln!(f, "hg revision: {}", hg_revision)?; + } + + if let Some(has_local_changes) = self.has_local_changes { + writeln!(f, "has local changes: {}", has_local_changes)?; + } else { + writeln!(f, "has local changes: unknown")?; + } + Ok(()) + } +} + +/// Outputs high level statistics about the build +#[derive(Debug, clap::Parser)] +pub struct SummaryCommand { + #[clap(flatten)] + event_log: EventLogOptions, +} + +impl SummaryCommand { + pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + ctx.instant_command_no_log("log-summary", |ctx| async move { + let log_path = self.event_log.get(&ctx).await?; + + let (invocation, mut events) = log_path.unpack_stream().await?; + + buck2_client_ctx::eprintln!( + "Showing summary from: {}", + invocation.display_command_line() + )?; + buck2_client_ctx::eprintln!("build ID: {}", invocation.trace_id)?; + + let mut stats = Stats { + re_max_download_speeds: vec![ + SlidingWindow::new(Duration::from_secs(1)), + SlidingWindow::new(Duration::from_secs(5)), + SlidingWindow::new(Duration::from_secs(10)), + ], + re_max_upload_speeds: vec![ + SlidingWindow::new(Duration::from_secs(1)), + SlidingWindow::new(Duration::from_secs(5)), + SlidingWindow::new(Duration::from_secs(10)), + ], + ..Default::default() + }; + + while let Some(event) = events.try_next().await? { + match event { + StreamValue::Event(event) => stats.update_with_event(&event), + StreamValue::Result(..) | StreamValue::PartialResult(..) => {} + } + } + buck2_client_ctx::eprintln!("{}", stats)?; + anyhow::Ok(()) + }) + .into() + } +} diff --git a/app/buck2_client/src/commands/log/what_cmd.rs b/app/buck2_client/src/commands/log/what_cmd.rs index b2ae05e3dd637..4d98a32a8bcbe 100644 --- a/app/buck2_client/src/commands/log/what_cmd.rs +++ b/app/buck2_client/src/commands/log/what_cmd.rs @@ -30,7 +30,7 @@ impl WhatCmdCommand { pub(crate) fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext) -> ExitResult { let WhatCmdCommand { event_log, expand } = self; - ctx.with_runtime(async move |ctx| { + ctx.instant_command_no_log("log-what-cmd", |ctx| async move { let log_path = event_log.get(&ctx).await?; let (invocation, _events) = log_path.unpack_stream().await?; @@ -40,8 +40,8 @@ impl WhatCmdCommand { } else { buck2_client_ctx::println!("{}", invocation.display_command_line())?; } - - ExitResult::success() + Ok(()) }) + .into() } } diff --git a/app/buck2_client/src/commands/log/what_failed.rs b/app/buck2_client/src/commands/log/what_failed.rs index b2001d083d4fc..7d3a19b2b4c54 100644 --- a/app/buck2_client/src/commands/log/what_failed.rs +++ b/app/buck2_client/src/commands/log/what_failed.rs @@ -27,6 +27,9 @@ impl WhatFailedCommand { WhatRanCommand { common: self.common, failed: true, + incomplete: false, + show_std_err: false, + omit_empty_std_err: false, } .exec(matches, ctx) } diff --git a/app/buck2_client/src/commands/log/what_materialized.rs b/app/buck2_client/src/commands/log/what_materialized.rs index e7efd5158a4fc..4d844df093198 100644 --- a/app/buck2_client/src/commands/log/what_materialized.rs +++ b/app/buck2_client/src/commands/log/what_materialized.rs @@ -7,13 +7,23 @@ * of this source tree. */ +use std::collections::BTreeMap; +use std::ffi::OsStr; +use std::fmt::Display; +use std::fmt::Formatter; +use std::io::Write; +use std::path::Path; + use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::stream_value::StreamValue; +use buck2_event_log::stream_value::StreamValue; +use serde::Serialize; use tokio_stream::StreamExt; use crate::commands::log::options::EventLogOptions; +use crate::commands::log::transform_format; use crate::commands::log::LogCommandOutputFormat; +use crate::commands::log::LogCommandOutputFormatWithWriter; /// Outputs materializations from selected invocation. /// @@ -23,118 +33,197 @@ use crate::commands::log::LogCommandOutputFormat; pub struct WhatMaterializedCommand { #[clap(flatten)] event_log: EventLogOptions, + + #[clap( + long = "sort-by-size", + short = 's', + help = "Sort the output by total bytes in ascending order", + conflicts_with = "aggregate_by_ext" + )] + sort_by_total_bytes: bool, + + /// Aggregates the output by file extension + #[clap(long, conflicts_with = "sort_by_total_bytes")] + aggregate_by_ext: bool, + #[clap( long = "format", help = "Which output format to use for this command", default_value = "tabulated", ignore_case = true, - arg_enum + value_enum )] - pub output: LogCommandOutputFormat, + output: LogCommandOutputFormat, } -fn write_output( - output: &LogCommandOutputFormat, - materialization: &buck2_data::MaterializationEnd, - method: &str, -) -> anyhow::Result<()> { - #[derive(serde::Serialize)] - struct Record<'a> { - path: &'a str, - method: &'a str, - file_count: u64, - total_bytes: u64, +#[derive(serde::Serialize)] +struct Record { + path: String, + method: &'static str, + file_count: u64, + total_bytes: u64, + action_digest: Option, +} + +impl Display for Record { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}\t{}\t{}\t{}\t{}", + self.path, + self.method, + self.action_digest.as_deref().unwrap_or("none"), + self.file_count, + self.total_bytes + ) + } +} + +#[derive(Eq, Ord, PartialEq, PartialOrd)] +struct AggregationKey<'a> { + extension: &'a str, + method: &'static str, +} + +#[derive(serde::Serialize)] +struct AggregatedRecord<'a> { + extension: &'a str, + method: &'static str, + file_count: u64, + total_bytes: u64, +} + +impl<'a> AggregatedRecord<'a> { + fn update(&mut self, value: &Record) { + self.file_count += value.file_count; + self.total_bytes += value.total_bytes; } + fn get_key(&self) -> AggregationKey<'a> { + AggregationKey { + extension: self.extension, + method: self.method, + } + } +} + +impl<'a> Display for AggregatedRecord<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}\t{}\t{}\t{}", + self.extension, self.method, self.file_count, self.total_bytes + ) + } +} + +impl<'a> From<&'a Record> for AggregatedRecord<'a> { + fn from(value: &'a Record) -> Self { + Self { + extension: Path::new(&value.path) + .extension() + .and_then(OsStr::to_str) + .unwrap_or(""), + method: value.method, + file_count: value.file_count, + total_bytes: value.total_bytes, + } + } +} + +fn write_output( + output: &mut LogCommandOutputFormatWithWriter, + record: &T, +) -> anyhow::Result<()> { match output { - LogCommandOutputFormat::Tabulated => { - buck2_client_ctx::println!( - "{}\t{}\t{}\t{}", - materialization.path, - method, - materialization.file_count, - materialization.total_bytes - ) + LogCommandOutputFormatWithWriter::Tabulated(w) => Ok(writeln!(w, "{}", record)?), + LogCommandOutputFormatWithWriter::Csv(writer) => Ok(writer.serialize(record)?), + LogCommandOutputFormatWithWriter::Json(w) => { + serde_json::to_writer(w.by_ref(), &record)?; + w.write_all("\n".as_bytes())?; + Ok(()) } - LogCommandOutputFormat::Csv => buck2_client_ctx::stdio::print_with_writer(|w| { - let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(w); - writer.serialize(Record { - path: &materialization.path, - method, - file_count: materialization.file_count, - total_bytes: materialization.total_bytes, - }) - }), - LogCommandOutputFormat::Json => buck2_client_ctx::stdio::print_with_writer(|mut w| { - let record = Record { - path: &materialization.path, - method, - file_count: materialization.file_count, - total_bytes: materialization.total_bytes, - }; - serde_json::to_writer(&mut w, &record)?; - w.write(b"\n").map(|_| ()) - }), + } +} + +fn get_record(materialization: &buck2_data::MaterializationEnd) -> Record { + let method = match materialization + .method + .and_then(buck2_data::MaterializationMethod::from_i32) + { + Some(buck2_data::MaterializationMethod::CasDownload) => "cas", + Some(buck2_data::MaterializationMethod::LocalCopy) => "copy", + Some(buck2_data::MaterializationMethod::HttpDownload) => "http", + Some(buck2_data::MaterializationMethod::Write) => "write", + _ => "", + }; + Record { + path: materialization.path.clone(), + method, + file_count: materialization.file_count, + total_bytes: materialization.total_bytes, + action_digest: materialization.action_digest.clone(), } } impl WhatMaterializedCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - let Self { event_log, output } = self; - - ctx.with_runtime(async move |ctx| { - let log_path = event_log.get(&ctx).await?; - - let (invocation, mut events) = log_path.unpack_stream().await?; - - buck2_client_ctx::eprintln!( - "Showing materializations from: {}", - invocation.display_command_line() - )?; - - while let Some(event) = events.try_next().await? { - match event { - StreamValue::Event(event) => { - match &event.data { - Some(buck2_data::buck_event::Data::SpanEnd(ref end)) => match end - .data - .as_ref() - { - Some(buck2_data::span_end_event::Data::Materialization(m)) => { - // Only log what has been materialized. - if !m.success { - continue; - } + let Self { + event_log, + output, + sort_by_total_bytes, + aggregate_by_ext, + } = self; + buck2_client_ctx::stdio::print_with_writer::(|w| { + let mut output = transform_format(output, w); + ctx.instant_command_no_log("log-what-materialized", |ctx| async move { + let log_path = event_log.get(&ctx).await?; + + let (invocation, mut events) = log_path.unpack_stream().await?; + + buck2_client_ctx::eprintln!( + "Showing materializations from: {}", + invocation.display_command_line() + )?; - let method = match m - .method - .and_then(buck2_data::MaterializationMethod::from_i32) - { - Some(buck2_data::MaterializationMethod::CasDownload) => { - "cas" - } - Some(buck2_data::MaterializationMethod::LocalCopy) => { - "copy" - } - Some(buck2_data::MaterializationMethod::HttpDownload) => { - "http" - } - Some(buck2_data::MaterializationMethod::Write) => "write", - _ => "", - }; - - write_output(&output, m, method)?; + let mut records: Vec = Vec::new(); + while let Some(event) = events.try_next().await? { + match event { + StreamValue::Event(event) => match &event.data { + Some(buck2_data::buck_event::Data::SpanEnd(buck2_data::SpanEndEvent { + data: Some(buck2_data::span_end_event::Data::Materialization(m)), + .. + })) if m.success => + // Only log what has been materialized. + { + let record = get_record(m); + if sort_by_total_bytes || aggregate_by_ext { + records.push(record); + } else { + write_output(&mut output, &record)?; + } } - _ => {} - }, _ => {} - } + }, + StreamValue::Result(..) | StreamValue::PartialResult(..) => {} + }; + } + + if aggregate_by_ext { + let mut kv: BTreeMap = BTreeMap::new(); + for r in records.iter() { + let v: AggregatedRecord = r.into(); + let k = v.get_key(); + kv.entry(k).and_modify(|e| e.update(r)).or_insert(v); } - StreamValue::Result(..) | StreamValue::PartialResult(..) => {} + kv.iter().try_for_each(|(_, v)| write_output(&mut output, v))?; + } else if sort_by_total_bytes { + records.sort_by(|a, b| a.total_bytes.cmp(&b.total_bytes)); + records.iter().try_for_each(|r| write_output(&mut output, r))?; } - } - anyhow::Ok(()) + anyhow::Ok(()) + }) })?; ExitResult::success() } diff --git a/app/buck2_client/src/commands/log/what_ran.rs b/app/buck2_client/src/commands/log/what_ran.rs index 4240fa892da7e..abe205170c32b 100644 --- a/app/buck2_client/src/commands/log/what_ran.rs +++ b/app/buck2_client/src/commands/log/what_ran.rs @@ -9,12 +9,14 @@ use std::borrow::Cow; use std::collections::HashMap; +use std::io::Write; -use async_trait::async_trait; +use anyhow::Context; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::stream_value::StreamValue; use buck2_data::re_platform::Property; +use buck2_event_log::stream_value::StreamValue; +use buck2_event_observer::fmt_duration; use buck2_event_observer::what_ran; use buck2_event_observer::what_ran::CommandReproducer; use buck2_event_observer::what_ran::WhatRanOptions; @@ -23,13 +25,16 @@ use buck2_event_observer::what_ran::WhatRanOutputCommandExtra; use buck2_event_observer::what_ran::WhatRanOutputWriter; use buck2_event_observer::what_ran::WhatRanRelevantAction; use buck2_event_observer::what_ran::WhatRanState; +use buck2_events::span::SpanId; use futures::stream::Stream; use futures::TryStreamExt; use indexmap::IndexMap; use crate::commands::log::options::EventLogOptions; +use crate::commands::log::transform_format; use crate::commands::log::LogCommandOutputFormat; - +use crate::commands::log::LogCommandOutputFormatWithWriter; +use crate::commands::log::OutputFormatWithWriter; /// Output everything Buck2 ran from selected invocation. /// /// The output is presented as a series of tab-delimited records with the following structure: @@ -58,8 +63,25 @@ pub struct WhatRanCommand { pub common: WhatRanCommandCommon, /// Show only commands that failed - #[clap(long)] + #[clap(long, conflicts_with = "incomplete")] pub failed: bool, + + /// Show only commands that were not completed. + /// That is command were running if buck2 process was killed, + /// or command currently running if buck2 is running build now. + #[clap(long)] + pub incomplete: bool, + + /// Show also std_err from commands that are run. + /// If the command fails before completing, we display "". + /// If it finishes but there is no error, we display "". + /// Otherwise, std_err is shown. For JSON, we show raw values and null for non-completion. + #[clap(long, conflicts_with = "incomplete")] + pub show_std_err: bool, + + /// Omit commands if their std_err is empty + #[clap(long, conflicts_with = "incomplete", requires = "show_std_err")] + pub omit_empty_std_err: bool, } #[derive(Debug, clap::Parser)] @@ -72,12 +94,22 @@ pub struct WhatRanCommandCommon { help = "Which output format to use for this command", default_value = "tabulated", ignore_case = true, - arg_enum + value_enum )] - pub output: LogCommandOutputFormat, + output: LogCommandOutputFormat, #[clap(flatten)] - pub options: WhatRanOptions, + options: WhatRanOptions, +} + +struct WhatRanCommandOptions { + options: WhatRanOptions, + + /// Print commands only if they failed. + failed: bool, + + /// Print commands only if they did not finish. + incomplete: bool, } impl WhatRanCommand { @@ -86,144 +118,133 @@ impl WhatRanCommand { common: WhatRanCommandCommon { event_log, - mut output, + output, options, }, failed, + incomplete, + show_std_err, + omit_empty_std_err, } = self; - - ctx.with_runtime(async move |ctx| { - let log_path = event_log.get(&ctx).await?; - - let (invocation, events) = log_path.unpack_stream().await?; - - buck2_client_ctx::eprintln!( - "Showing commands from: {}", - invocation.display_command_line() - )?; - - if failed { - WhatFailedImpl::execute(events, &mut output, &options).await?; - } else { - WhatRanImpl::execute(events, &mut output, &options).await?; + buck2_client_ctx::stdio::print_with_writer::(|w| { + let mut output = OutputFormatWithWriter { + format: transform_format(output, w), + include_std_err: show_std_err, + omit_empty_std_err, }; + ctx.instant_command_no_log("log-what-ran", |ctx| async move { + let log_path = event_log.get(&ctx).await?; + + let (invocation, events) = log_path.unpack_stream().await?; + + buck2_client_ctx::eprintln!( + "Showing commands from: {}{}", + invocation.display_command_line(), + if options.filter_category.is_some() { + ", filtered by action category" + } else { + "" + } + )?; - anyhow::Ok(()) + let options = WhatRanCommandOptions { + options, + failed, + incomplete, + }; + WhatRanCommandState::execute(events, &mut output, &options).await?; + Ok(()) + }) })?; - ExitResult::success() } } -#[async_trait] -trait WhatRanCommandImplementation: Default { - fn event( - &mut self, - event: Box, - output: &mut impl WhatRanOutputWriter, - options: &WhatRanOptions, - ) -> anyhow::Result<()>; +#[allow(clippy::vec_box)] +struct WhatRanEntry { + /// Known to be a WhatRanRelevantAction. + event: Box, - async fn execute( - mut events: impl Stream> + Unpin + Send, - output: &mut (impl WhatRanOutputWriter + Send), - options: &WhatRanOptions, - ) -> anyhow::Result<()> { - let mut cmd = Self::default(); + /// Known to be a CommandReproducer. + reproducers: Vec>, +} - while let Some(event) = events.try_next().await? { - match event { - StreamValue::Event(event) => cmd.event(event, output, options)?, - _ => {} - } +impl WhatRanEntry { + fn emit_what_ran_entry( + &self, + output: &mut impl WhatRanOutputWriter, + data: &Option, + options: &WhatRanCommandOptions, + ) -> anyhow::Result<()> { + let action = WhatRanRelevantAction::from_buck_data( + self.event.data.as_ref().context("Checked above")?, + ); + let options_regex = what_ran::WhatRanOptionsRegex::from_options(&options.options)?; + for repro in self.reproducers.iter() { + what_ran::emit_what_ran_entry( + action, + CommandReproducer::from_buck_data( + repro.data.as_ref().context("Checked above")?, + options_regex.options, + ) + .context("Checked above")?, + data, + output, + &options_regex, + )?; } - Ok(()) } } /// The state for a WhatRan command. This is all the events we have seen that are -/// WhatRanRelevantActions. This emits the actions immediately. +/// we have seen that are WhatRanRelevantActions, and the CommandReproducer associated with them. #[derive(Default)] -pub struct WhatRanImpl { +pub struct WhatRanCommandState { /// Maps action spans to their details. - known_actions: HashMap>, + known_actions: HashMap, } -impl WhatRanState for WhatRanImpl { - fn get(&self, span_id: u64) -> Option> { +impl WhatRanState for WhatRanCommandState { + fn get(&self, span_id: SpanId) -> Option> { self.known_actions .get(&span_id) - .and_then(|e| e.data.as_ref()) + .and_then(|e| e.event.data.as_ref()) .and_then(WhatRanRelevantAction::from_buck_data) } } -impl WhatRanCommandImplementation for WhatRanImpl { - /// Receive a new event. We start by emitting it if it's relevant (since that only takes a - /// borrow), and then if it's relevant as a parent, we store it for latter use. Note that in - /// practice we don't expect the event to be *both* relevant to emit *and* a - /// WhatRanRelevantAction, but it doesn't hurt to always check both. - fn event( - &mut self, - event: Box, +impl WhatRanCommandState { + async fn execute( + mut events: impl Stream> + Unpin + Send, output: &mut impl WhatRanOutputWriter, - options: &WhatRanOptions, + options: &WhatRanCommandOptions, ) -> anyhow::Result<()> { - if let Some(data) = &event.data { - what_ran::emit_event_if_relevant(event.parent_id, data, &*self, output, options)?; + let mut cmd = Self::default(); - if WhatRanRelevantAction::from_buck_data(data).is_some() { - self.known_actions.insert(event.span_id, event); + while let Some(event) = events.try_next().await? { + match event { + StreamValue::Event(event) => cmd.event(event, output, options)?, + _ => {} } } - - Ok(()) + cmd.emit_remaining(output, options) } -} -/// The state for a WhatRan command when only showing actions that failed. This stores all the events -/// we have seen that are WhatRanRelevantActions, and the CommandReproducer associated with them. -#[derive(Default)] -pub struct WhatFailedImpl { - /// Maps action spans to their details. - known_actions: HashMap, -} - -#[allow(clippy::vec_box)] -struct WhatFailedEntry { - /// Known to be a WhatRanRelevantAction. - event: Box, - - /// Known to be a CommandReproducer. - reproducers: Vec>, -} - -impl WhatRanState for WhatFailedImpl { - fn get(&self, span_id: u64) -> Option> { - self.known_actions - .get(&span_id) - .and_then(|e| e.event.data.as_ref()) - .and_then(WhatRanRelevantAction::from_buck_data) - } -} - -impl WhatRanCommandImplementation for WhatFailedImpl { - /// Receive a new event. We start by emitting it if it's relevant (since that only takes a - /// borrow), and then if it's relevant as a parent, we store it for latter use. Note that in - /// practice we don't expect the event to be *both* relevant to emit *and* a + /// Receive a new event. We store it if it's relevant and emmit them latter. + /// Note that in practice we don't expect the event to be *both* relevant to emit *and* a /// WhatRanRelevantAction, but it doesn't hurt to always check both. fn event( &mut self, event: Box, output: &mut impl WhatRanOutputWriter, - options: &WhatRanOptions, + options: &WhatRanCommandOptions, ) -> anyhow::Result<()> { if let Some(data) = &event.data { if WhatRanRelevantAction::from_buck_data(data).is_some() { self.known_actions.insert( - event.span_id, - WhatFailedEntry { + SpanId::from_u64(event.span_id)?, + WhatRanEntry { event, reproducers: Default::default(), }, @@ -231,67 +252,120 @@ impl WhatRanCommandImplementation for WhatFailedImpl { return Ok(()); } - if CommandReproducer::from_buck_data(data, options).is_some() { - if let Some(entry) = self.known_actions.get_mut(&event.parent_id) { - entry.reproducers.push(event); + if CommandReproducer::from_buck_data(data, &options.options).is_some() { + if let Some(parent_id) = SpanId::from_u64_opt(event.parent_id) { + if let Some(entry) = self.known_actions.get_mut(&parent_id) { + entry.reproducers.push(event); + } } return Ok(()); } match data { - buck2_data::buck_event::Data::SpanEnd(span) => match &span.data { - Some(buck2_data::span_end_event::Data::ActionExecution(action)) - if action.failed => + buck2_data::buck_event::Data::SpanEnd(span) => { + if let Some(entry) = + self.known_actions.remove(&SpanId::from_u64(event.span_id)?) { - if let Some(entry) = self.known_actions.remove(&event.span_id) { - let action = WhatRanRelevantAction::from_buck_data( - entry.event.data.as_ref().expect("Checked above"), - ); - - for repro in entry.reproducers.iter() { - what_ran::emit_reproducer( - action, - CommandReproducer::from_buck_data( - repro.data.as_ref().expect("Checked above"), - options, - ) - .expect("Checked above"), - output, - )?; - } + if should_emit_finished_action(&span.data, options) { + entry.emit_what_ran_entry(output, &span.data, options)?; } } - _ => {} - }, + } _ => {} } } Ok(()) } + + fn emit_remaining( + &self, + output: &mut impl WhatRanOutputWriter, + options: &WhatRanCommandOptions, + ) -> anyhow::Result<()> { + for (_, entry) in self.known_actions.iter() { + if should_emit_unfinished_action(options) { + entry.emit_what_ran_entry(output, &None, options)?; + } + } + Ok(()) + } +} + +fn should_emit_finished_action( + data: &Option, + options: &WhatRanCommandOptions, +) -> bool { + if options.incomplete { + return false; + } + + match data { + Some(buck2_data::span_end_event::Data::ActionExecution(action)) => { + action.failed || !options.failed + } + _ => !options.failed, // This is dead code (this span can only be ActionExecution End given + // its ID must match an ActionExecution start). + } +} + +fn should_emit_unfinished_action(options: &WhatRanCommandOptions) -> bool { + !options.failed // We don't know if it failed or not. } /// An output that writes to stdout in a tabulated format. -impl WhatRanOutputWriter for LogCommandOutputFormat { +impl WhatRanOutputWriter for OutputFormatWithWriter<'_> { fn emit_command(&mut self, command: WhatRanOutputCommand<'_>) -> anyhow::Result<()> { - match self { - Self::Tabulated => { - buck2_client_ctx::println!( - "{}\t{}\t{}\t{}", - command.reason(), - command.identity(), - command.repro().executor(), - command.repro().as_human_readable() - ) + if self.include_std_err + && self.omit_empty_std_err + && command.std_err.map_or(false, |s| s.is_empty()) + { + return Ok(()); + } + let std_err_formatted = if self.include_std_err { + Some(command.std_err.map_or_else( + || "", + |std_err| { + if std_err.is_empty() { + "" + } else { + std_err + } + }, + )) + } else { + None + }; + + match &mut self.format { + LogCommandOutputFormatWithWriter::Tabulated(w) => { + w.write_all(format!("{}\n", command.as_tabulated_reproducer()).as_bytes())?; + if let Some(std_err) = std_err_formatted { + write!( + w, + "{}{}", + std_err, + if std_err.ends_with('\n') { "" } else { "\n" } + )?; + } + Ok(()) } - Self::Json => { - let reproducer = match command.repro() { + LogCommandOutputFormatWithWriter::Json(w) => { + let reproducer = match command.repro { CommandReproducer::CacheQuery(cache_hit) => JsonReproducer::CacheQuery { digest: &cache_hit.action_digest, }, - CommandReproducer::CacheHit(cache_hit) => JsonReproducer::Cache { - digest: &cache_hit.action_digest, - action_key: cache_hit.action_key.as_deref(), + CommandReproducer::CacheHit(cache_hit) => match cache_hit.cache_type() { + buck2_data::CacheType::ActionCache => JsonReproducer::Cache { + digest: &cache_hit.action_digest, + action_key: cache_hit.action_key.as_deref(), + }, + buck2_data::CacheType::RemoteDepFileCache => { + JsonReproducer::ReDepFileCache { + digest: &cache_hit.action_digest, + action_key: cache_hit.action_key.as_deref(), + } + } }, CommandReproducer::ReExecute(re_execute) => JsonReproducer::Re { digest: &re_execute.action_digest, @@ -340,37 +414,44 @@ impl WhatRanOutputWriter for LogCommandOutputFormat { .collect(), }, }; + let std_err = if self.include_std_err { + Some(command.std_err.unwrap_or("null")) + } else { + None + }; let command = JsonCommand { - reason: command.reason(), - identity: command.identity(), + reason: command.reason, + identity: command.identity, reproducer, - extra: command.extra().map(Into::into), + duration: command + .duration + .map(|duration| fmt_duration::fmt_duration(duration, 1.0)), + extra: command.extra.map(Into::into), + std_err, }; - - buck2_client_ctx::stdio::print_with_writer(|mut w| { - serde_json::to_writer(&mut w, &command)?; - w.write(b"\n").map(|_| ()) - }) + serde_json::to_writer(w.by_ref(), &command)?; + w.write_all("\n".as_bytes())?; + Ok(()) } - Self::Csv => { + LogCommandOutputFormatWithWriter::Csv(writer) => { #[derive(serde::Serialize)] struct Record<'a> { reason: &'a str, identity: &'a str, executor: String, reproducer: String, + #[serde(skip_serializing_if = "Option::is_none")] + std_err: Option<&'a str>, } - - buck2_client_ctx::stdio::print_with_writer(|w| { - let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(w); - writer.serialize(Record { - reason: command.reason(), - identity: command.identity(), - executor: command.repro().executor(), - reproducer: command.repro().as_human_readable().to_string(), - }) - }) + writer.serialize(Record { + reason: command.reason, + identity: command.identity, + executor: command.repro.executor(), + reproducer: command.repro.as_human_readable().to_string(), + std_err: std_err_formatted, + })?; + Ok(()) } } } @@ -391,7 +472,11 @@ struct JsonCommand<'a> { identity: &'a str, reproducer: JsonReproducer<'a>, #[serde(skip_serializing_if = "Option::is_none")] + duration: Option, + #[serde(skip_serializing_if = "Option::is_none")] extra: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + std_err: Option<&'a str>, } mod json_reproducer { @@ -410,6 +495,11 @@ mod json_reproducer { #[serde(skip_serializing_if = "Option::is_none")] action_key: Option<&'a str>, }, + ReDepFileCache { + digest: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + action_key: Option<&'a str>, + }, Re { digest: &'a str, platform_properties: IndexMap<&'a str, &'a str>, @@ -460,7 +550,9 @@ mod tests { reason: "test.run", identity: "some/target", reproducer: JsonReproducer::Local { command, env }, + duration: Some("1".to_owned()), extra: None, + std_err: None, } } @@ -475,7 +567,9 @@ mod tests { }, action_key: None, }, + duration: Some("1".to_owned()), extra: None, + std_err: None, } } @@ -497,7 +591,8 @@ mod tests { "KEY": "val" } } - } + }, + "duration": "1" }"#; assert_eq!(expected, serde_json::to_string_pretty(&command)?); Ok(()) @@ -524,6 +619,7 @@ mod tests { } } }, + "duration": "1", "extra": { "testcases": [ "case" @@ -549,7 +645,8 @@ mod tests { "platform": "linux-remote-execution" } } - } + }, + "duration": "1" }"#; assert_eq!(expected, serde_json::to_string_pretty(&command)?); Ok(()) diff --git a/app/buck2_client/src/commands/log/what_up.rs b/app/buck2_client/src/commands/log/what_up.rs index 20d145643ecb4..51f855965bc92 100644 --- a/app/buck2_client/src/commands/log/what_up.rs +++ b/app/buck2_client/src/commands/log/what_up.rs @@ -14,13 +14,13 @@ use std::time::SystemTimeError; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::stream_value::StreamValue; use buck2_client_ctx::subscribers::superconsole::session_info::SessionInfoComponent; use buck2_client_ctx::subscribers::superconsole::timed_list::TimedList; use buck2_client_ctx::subscribers::superconsole::StatefulSuperConsole; use buck2_client_ctx::subscribers::superconsole::SuperConsoleConfig; use buck2_client_ctx::subscribers::superconsole::SuperConsoleState; use buck2_client_ctx::subscribers::superconsole::CUTOFFS; +use buck2_event_log::stream_value::StreamValue; use buck2_event_observer::verbosity::Verbosity; use buck2_events::BuckEvent; use superconsole::components::DrawVertical; @@ -52,7 +52,7 @@ impl WhatUpCommand { let Self { event_log, after } = self; let cutoff_time = after.map(Duration::from_millis); - ctx.with_runtime(async move |ctx| { + ctx.instant_command_no_log("log-what-up", |ctx| async move { let log_path = event_log.get(&ctx).await?; // Get events @@ -61,6 +61,10 @@ impl WhatUpCommand { let mut super_console = StatefulSuperConsole::console_builder() .build_forced(StatefulSuperConsole::FALLBACK_SIZE)?; + let build_count_dir = match ctx.paths() { + Ok(paths) => Some(paths.build_count_dir()), + Err(_) => None, + }; let mut super_console_state = SuperConsoleState::new( None, invocation.trace_id, @@ -70,6 +74,7 @@ impl WhatUpCommand { max_lines: 1000000, ..Default::default() }, + build_count_dir, )?; let mut first_timestamp = None; // Ignore any events that are truncated, hence unreadable @@ -90,7 +95,9 @@ impl WhatUpCommand { _ => (), } - super_console_state.update_event_observer(&Arc::new(e))?; + super_console_state + .update_event_observer(&Arc::new(e)) + .await?; } StreamValue::PartialResult(..) => {} StreamValue::Result(result) => { @@ -129,7 +136,7 @@ impl WhatUpCommand { }, mode, )?; - draw.draw(&TimedList::new(&CUTOFFS, "", self.state), mode)?; + draw.draw(&TimedList::new(&CUTOFFS, self.state), mode)?; Ok(draw.finish()) } } diff --git a/app/buck2_client/src/commands/log/what_uploaded.rs b/app/buck2_client/src/commands/log/what_uploaded.rs index 0729bbfcb99d7..e8af856c3b692 100644 --- a/app/buck2_client/src/commands/log/what_uploaded.rs +++ b/app/buck2_client/src/commands/log/what_uploaded.rs @@ -8,16 +8,22 @@ */ use std::collections::HashMap; +use std::fmt::Display; +use std::fmt::Formatter; +use std::io::Write; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::stream_value::StreamValue; +use buck2_data::ReUploadMetrics; +use buck2_event_log::stream_value::StreamValue; use buck2_event_observer::display; use buck2_event_observer::display::TargetDisplayOptions; use tokio_stream::StreamExt; use crate::commands::log::options::EventLogOptions; +use crate::commands::log::transform_format; use crate::commands::log::LogCommandOutputFormat; +use crate::commands::log::LogCommandOutputFormatWithWriter; /// Outputs stats about uploads to RE from the selected invocation. #[derive(Debug, clap::Parser)] @@ -29,64 +35,115 @@ pub struct WhatUploadedCommand { help = "Which output format to use for this command", default_value = "tabulated", ignore_case = true, - arg_enum + value_enum )] - pub output: LogCommandOutputFormat, + output: LogCommandOutputFormat, + #[clap( + long = "aggregate-by-ext", + help = "Aggregates the output by file extension" + )] + aggregate_by_extension: bool, } -fn print_uploads( - format: &LogCommandOutputFormat, - upload: ReUploadEvent, +#[derive(serde::Serialize)] +struct ActionRecord { + action: String, + digests_uploaded: u64, + bytes_uploaded: u64, +} + +impl Display for ActionRecord { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}\t{}\t{}", + self.action, self.digests_uploaded, self.bytes_uploaded + ) + } +} + +#[derive(serde::Serialize)] +struct ExtensionRecord { + extension: String, + digests_uploaded: u64, + bytes_uploaded: u64, +} + +impl Display for ExtensionRecord { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}\t{}\t{}", + self.extension, self.digests_uploaded, self.bytes_uploaded + ) + } +} + +fn get_action_record( state: &HashMap, - total_digests: &mut u64, - total_bytes: &mut u64, -) -> anyhow::Result<()> { + upload: &ReUploadEvent, +) -> ActionRecord { let digests_uploaded = upload.inner.digests_uploaded.unwrap_or_default(); let bytes_uploaded = upload.inner.bytes_uploaded.unwrap_or_default(); - *total_digests += digests_uploaded; - *total_bytes += bytes_uploaded; - + let unknown = "unknown action".to_owned(); let action_str = if let Some(action) = state.get(&upload.parent_span_id) { display::display_action_identity( action.key.as_ref(), action.name.as_ref(), TargetDisplayOptions::for_log(), - )? + ) + .unwrap_or(unknown) } else { - "unknown action".to_owned() + unknown }; - - #[derive(serde::Serialize)] - struct Record<'a> { - action: &'a str, - digests_uploaded: u64, - bytes_uploaded: u64, + ActionRecord { + action: action_str, + digests_uploaded, + bytes_uploaded, } +} - match format { - LogCommandOutputFormat::Tabulated => { - buck2_client_ctx::println!("{}\t{}\t{}", action_str, digests_uploaded, bytes_uploaded) +fn print_uploads( + output: &mut LogCommandOutputFormatWithWriter, + record: &ActionRecord, +) -> anyhow::Result<()> { + match output { + LogCommandOutputFormatWithWriter::Tabulated(w) => Ok(writeln!(w, "{}", record)?), + LogCommandOutputFormatWithWriter::Csv(writer) => Ok(writer.serialize(record)?), + LogCommandOutputFormatWithWriter::Json(w) => { + serde_json::to_writer(w.by_ref(), &record)?; + w.write_all("\n".as_bytes())?; + Ok(()) } - LogCommandOutputFormat::Csv => buck2_client_ctx::stdio::print_with_writer(|w| { - let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(w); - writer.serialize(Record { - action: &action_str, - digests_uploaded, - bytes_uploaded, - }) - }), - LogCommandOutputFormat::Json => { - buck2_client_ctx::stdio::print_with_writer(|w| { - let record = Record { - action: &action_str, - digests_uploaded, - bytes_uploaded, - }; - serde_json::to_writer(w, &record) - })?; - buck2_client_ctx::println!("") + } +} + +fn print_extension_stats( + output: &mut LogCommandOutputFormatWithWriter, + stats_by_extension: &HashMap, +) -> anyhow::Result<()> { + let mut records: Vec = stats_by_extension + .iter() + .map(|(ext, m)| ExtensionRecord { + extension: ext.to_owned(), + bytes_uploaded: m.bytes_uploaded, + digests_uploaded: m.digests_uploaded, + }) + .collect(); + records.sort_by(|a, b| a.bytes_uploaded.cmp(&b.bytes_uploaded)); + for record in records { + match output { + LogCommandOutputFormatWithWriter::Tabulated(w) => { + writeln!(w, "{}", record)?; + } + LogCommandOutputFormatWithWriter::Csv(writer) => writer.serialize(record)?, + LogCommandOutputFormatWithWriter::Json(w) => { + serde_json::to_writer(w.by_ref(), &record)?; + w.write_all("\n".as_bytes())?; + } } } + Ok(()) } struct ReUploadEvent<'a> { @@ -96,61 +153,87 @@ struct ReUploadEvent<'a> { impl WhatUploadedCommand { pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - let Self { event_log, output } = self; - - ctx.with_runtime(async move |ctx| { - let log_path = event_log.get(&ctx).await?; - - let (invocation, mut events) = log_path.unpack_stream().await?; - buck2_client_ctx::eprintln!( - "Showing uploads from: {}", - invocation.display_command_line() - )?; - - let mut total_digests_uploaded = 0; - let mut total_bytes_uploaded = 0; - let mut state = HashMap::new(); - while let Some(event) = events.try_next().await? { - match event { - // Insert parent span information so we can refer back to it later. - StreamValue::Event(event) => match event.data { - Some(buck2_data::buck_event::Data::SpanStart(start)) => match start.data { - Some(buck2_data::span_start_event::Data::ActionExecution(action)) => { - state.insert(event.span_id, action); + let Self { + event_log, + output, + aggregate_by_extension, + } = self; + + buck2_client_ctx::stdio::print_with_writer::(|w| { + let mut output = transform_format(output, w); + ctx.instant_command_no_log("log-what-uploaded", |ctx| async move { + let log_path = event_log.get(&ctx).await?; + + let (invocation, mut events) = log_path.unpack_stream().await?; + buck2_client_ctx::eprintln!( + "Showing uploads from: {}", + invocation.display_command_line() + )?; + + let mut total_digests_uploaded = 0; + let mut total_bytes_uploaded = 0; + let mut state = HashMap::new(); + let mut stats_by_extension: HashMap = HashMap::new(); + while let Some(event) = events.try_next().await? { + match event { + // Insert parent span information so we can refer back to it later. + StreamValue::Event(event) => match event.data { + Some(buck2_data::buck_event::Data::SpanStart(start)) => { + match start.data { + Some(buck2_data::span_start_event::Data::ActionExecution( + action, + )) => { + state.insert(event.span_id, action); + } + _ => {} + } } - _ => {} - }, - Some(buck2_data::buck_event::Data::SpanEnd(end)) => { - match end.data.as_ref() { - Some(buck2_data::span_end_event::Data::ReUpload(ref u)) => { - print_uploads( - &output, - ReUploadEvent { + + Some(buck2_data::buck_event::Data::SpanEnd(end)) => { + match end.data.as_ref() { + Some(buck2_data::span_end_event::Data::ReUpload(ref u)) => { + let upload = ReUploadEvent { parent_span_id: event.parent_id, inner: u, - }, - &state, - &mut total_digests_uploaded, - &mut total_bytes_uploaded, - )?; + }; + if aggregate_by_extension { + for (extension, metrics) in + &upload.inner.stats_by_extension + { + let entry = stats_by_extension + .entry(extension.to_owned()) + .or_default(); + entry.bytes_uploaded += metrics.bytes_uploaded; + entry.digests_uploaded += metrics.digests_uploaded; + } + } else { + let record = get_action_record(&state, &upload); + total_digests_uploaded += record.digests_uploaded; + total_bytes_uploaded += record.bytes_uploaded; + print_uploads(&mut output, &record)?; + } + } + _ => {} } - _ => {} } - } - _ => {} - }, - StreamValue::Result(..) | StreamValue::PartialResult(..) => {} + _ => {} + }, + StreamValue::Result(..) | StreamValue::PartialResult(..) => {} + } + } + if aggregate_by_extension { + print_extension_stats(&mut output, &stats_by_extension)?; + } else { + buck2_client_ctx::eprintln!( + "total: digests: {}, bytes: {}", + total_digests_uploaded, + total_bytes_uploaded + )?; } - } - buck2_client_ctx::eprintln!( - "total: digests: {}, bytes: {}", - total_digests_uploaded, - total_bytes_uploaded - )?; - anyhow::Ok(()) + anyhow::Ok(()) + }) })?; - ExitResult::success() } } diff --git a/app/buck2_client/src/commands/lsp.rs b/app/buck2_client/src/commands/lsp.rs index f14babdc979a8..46c5eb2f4d282 100644 --- a/app/buck2_client/src/commands/lsp.rs +++ b/app/buck2_client/src/commands/lsp.rs @@ -10,10 +10,11 @@ use async_trait::async_trait; use buck2_cli_proto::LspRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::ui::ConsoleType; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; -use buck2_client_ctx::common::ConsoleType; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::events_ctx::PartialResultCtx; use buck2_client_ctx::events_ctx::PartialResultHandler; @@ -32,7 +33,10 @@ pub struct LspCommand { config_opts: CommonBuildConfigurationOptions, #[clap(flatten)] - event_log_opts: CommonDaemonCommandOptions, + starlark_opts: CommonStarlarkOptions, + + #[clap(flatten)] + event_log_opts: CommonEventLogOptions, } #[async_trait] @@ -86,14 +90,18 @@ impl StreamingCommand for LspCommand { &SIMPLE_CONSOLE } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.config_opts } + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.starlark_opts + } + fn should_expect_spans(&self) -> bool { // If we're running the LSP, do not show "Waiting for daemon..." if we do not get any spans. false diff --git a/app/buck2_client/src/commands/mod.rs b/app/buck2_client/src/commands/mod.rs deleted file mode 100644 index d3fe3b7ab2c69..0000000000000 --- a/app/buck2_client/src/commands/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod build; -pub mod bxl; -pub mod clean; -pub mod clean_stale; -pub mod ctargets; -pub mod debug; -pub mod init; -pub mod install; -pub mod kill; -pub mod killall; -pub mod log; -pub mod lsp; -pub mod profile; -pub mod query; -pub mod rage; -pub mod root; -pub mod run; -pub mod server; -pub mod status; -pub mod subscribe; -pub mod targets; -pub mod test; diff --git a/app/buck2_client/src/commands/profile.rs b/app/buck2_client/src/commands/profile.rs index 393557c5379d0..7f6b6b329fb39 100644 --- a/app/buck2_client/src/commands/profile.rs +++ b/app/buck2_client/src/commands/profile.rs @@ -12,79 +12,42 @@ use std::time::Duration; use anyhow::Context as _; use async_trait::async_trait; use buck2_cli_proto::profile_request::ProfileOpts; -use buck2_cli_proto::profile_request::Profiler; -use buck2_cli_proto::target_profile::Action; +use buck2_cli_proto::target_profile; use buck2_cli_proto::BxlProfile; use buck2_cli_proto::ProfileRequest; use buck2_cli_proto::ProfileResponse; use buck2_cli_proto::TargetProfile; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::path_arg::PathArg; use buck2_client_ctx::streaming::BuckSubcommand; use buck2_client_ctx::streaming::StreamingCommand; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use dupe::Dupe; -use gazebo::prelude::VecExt; use super::bxl::BxlCommandOptions; #[derive(Debug, clap::Parser)] -#[clap(about = "Profiling mechanisms")] +#[clap(about = "Run starlark profiler")] pub enum ProfileCommand { - #[clap(about = "Profile analysis")] - Analysis(BuckProfileOptions), - - #[clap(about = "Profile loading")] - Loading(BuckProfileOptions), - - #[clap(about = "Profile BXL script")] - Bxl(BxlProfileOptions), -} - -pub enum ProfileOptionsType { - BuckProfileOptions { - opts: AnalysisLoadProfileOptions, - action: Action, - }, - BxlProfileOptions { - opts: BxlCommandOptions, - }, + Analysis(ProfileAnalysisCommand), + Loading(ProfileLoadingCommand), + Bxl(ProfileBxlCommand), } impl ProfileCommand { pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { let submatches = matches.subcommand().expect("subcommand not found").1; - match self { - Self::Analysis(opts) => ProfileSubcommand { - opts: ProfileOptionsType::BuckProfileOptions { - opts: opts.buck_opts, - action: Action::Analysis, - }, - profile_common_opts: opts.profile_common_opts, - }, - Self::Loading(opts) => ProfileSubcommand { - opts: ProfileOptionsType::BuckProfileOptions { - opts: opts.buck_opts, - action: Action::Loading, - }, - profile_common_opts: opts.profile_common_opts, - }, - Self::Bxl(opts) => ProfileSubcommand { - opts: ProfileOptionsType::BxlProfileOptions { - opts: opts.bxl_opts, - }, - profile_common_opts: opts.profile_common_opts, - }, - } - .exec(submatches, ctx) + ProfileSubcommand { subcommand: self }.exec(submatches, ctx) } pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { @@ -92,8 +55,8 @@ impl ProfileCommand { } } -#[derive(clap::ValueEnum, Dupe, Clone, Debug)] -enum BuckProfileMode { +#[derive(clap::ValueEnum, Dupe, Clone, Copy, Debug)] +pub(crate) enum BuckProfileMode { TimeFlame, HeapFlameAllocated, HeapFlameRetained, @@ -103,10 +66,12 @@ enum BuckProfileMode { Bytecode, BytecodePairs, Typecheck, + Coverage, } +/// Profile BXL script. #[derive(Debug, clap::Parser)] -pub struct BxlProfileOptions { +pub struct ProfileBxlCommand { #[clap(flatten)] bxl_opts: BxlCommandOptions, @@ -114,17 +79,29 @@ pub struct BxlProfileOptions { profile_common_opts: ProfileCommonOptions, } +/// Profile `BUCK` file evaluation. +#[derive(Debug, clap::Parser)] +pub struct ProfileLoadingCommand { + #[clap(flatten)] + buck_opts: AnalysisOrLoadProfileOptions, + + #[clap(flatten)] + profile_common_opts: ProfileCommonOptions, +} + +/// Profile analysis. #[derive(Debug, clap::Parser)] -pub struct BuckProfileOptions { +pub struct ProfileAnalysisCommand { #[clap(flatten)] - buck_opts: AnalysisLoadProfileOptions, + buck_opts: AnalysisOrLoadProfileOptions, #[clap(flatten)] profile_common_opts: ProfileCommonOptions, } +/// Common options for `profile loading` and `profile analysis`. #[derive(Debug, clap::Parser)] -pub struct AnalysisLoadProfileOptions { +struct AnalysisOrLoadProfileOptions { #[clap(value_name = "TARGET_PATTERNS")] target_patterns: Vec, @@ -134,11 +111,9 @@ pub struct AnalysisLoadProfileOptions { recursive: bool, } +/// Common options for three profile subcommands. #[derive(Debug, clap::Parser)] -pub struct ProfileCommonOptions { - #[clap(flatten)] - common_opts: CommonCommandOptions, - +struct ProfileCommonOptions { /// Output file path for profile data. /// /// File will be created if it does not exist, and overwritten if it does. @@ -147,7 +122,7 @@ pub struct ProfileCommonOptions { /// Profile mode. /// - /// Memory profiling modes have suffixes either `-allocated` or `retained`. + /// Memory profiling modes have suffixes either `-allocated` or `-retained`. /// /// `-retained` means memory kept in frozen starlark heap after analysis complete. /// `-retained` does not work when profiling loading, @@ -155,26 +130,42 @@ pub struct ProfileCommonOptions { /// This is probably what you want when profiling analysis. /// /// `-allocated` means allocated memory, including memory which is later garbage collected. - #[clap(long, short = 'm', value_enum)] + #[clap(long, value_enum)] mode: BuckProfileMode, + + #[clap(flatten)] + target_cfg: TargetCfgWithUniverseOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } -pub struct ProfileSubcommand { - opts: ProfileOptionsType, - profile_common_opts: ProfileCommonOptions, +struct ProfileSubcommand { + subcommand: ProfileCommand, } -fn profile_mode_to_profile(mode: &BuckProfileMode) -> Profiler { +pub(crate) fn profile_mode_to_profile(mode: BuckProfileMode) -> buck2_cli_proto::ProfileMode { match mode { - BuckProfileMode::TimeFlame => Profiler::TimeFlame, - BuckProfileMode::HeapFlameAllocated => Profiler::HeapFlameAllocated, - BuckProfileMode::HeapFlameRetained => Profiler::HeapFlameRetained, - BuckProfileMode::HeapSummaryAllocated => Profiler::HeapSummaryAllocated, - BuckProfileMode::HeapSummaryRetained => Profiler::HeapSummaryRetained, - BuckProfileMode::Statement => Profiler::Statement, - BuckProfileMode::Bytecode => Profiler::Bytecode, - BuckProfileMode::BytecodePairs => Profiler::BytecodePairs, - BuckProfileMode::Typecheck => Profiler::Typecheck, + BuckProfileMode::TimeFlame => buck2_cli_proto::ProfileMode::TimeFlame, + BuckProfileMode::HeapFlameAllocated => buck2_cli_proto::ProfileMode::HeapFlameAllocated, + BuckProfileMode::HeapFlameRetained => buck2_cli_proto::ProfileMode::HeapFlameRetained, + BuckProfileMode::HeapSummaryAllocated => buck2_cli_proto::ProfileMode::HeapSummaryAllocated, + BuckProfileMode::HeapSummaryRetained => buck2_cli_proto::ProfileMode::HeapSummaryRetained, + BuckProfileMode::Statement => buck2_cli_proto::ProfileMode::Statement, + BuckProfileMode::Bytecode => buck2_cli_proto::ProfileMode::Bytecode, + BuckProfileMode::BytecodePairs => buck2_cli_proto::ProfileMode::BytecodePairs, + BuckProfileMode::Typecheck => buck2_cli_proto::ProfileMode::Typecheck, + BuckProfileMode::Coverage => buck2_cli_proto::ProfileMode::Coverage, + } +} + +impl ProfileSubcommand { + fn common_opts(&self) -> &ProfileCommonOptions { + match &self.subcommand { + ProfileCommand::Analysis(analysis) => &analysis.profile_common_opts, + ProfileCommand::Loading(loading) => &loading.profile_common_opts, + ProfileCommand::Bxl(bxl) => &bxl.profile_common_opts, + } } } @@ -190,60 +181,83 @@ impl StreamingCommand for ProfileSubcommand { ) -> ExitResult { let context = ctx.client_context(matches, &self)?; - let destination_path = self.profile_common_opts.output.resolve(&ctx.working_dir); + let destination_path = self.common_opts().output.resolve(&ctx.working_dir); - let profile_mode = &self.profile_common_opts.mode; + let profile_mode = self.common_opts().mode; let destination_path = destination_path.into_string()?; let console_opts = ctx.stdin().console_interaction_stream(self.console_opts()); - let response = match self.opts { - ProfileOptionsType::BuckProfileOptions { opts, action } => { - let target_opts = TargetProfile { - target_patterns: opts - .target_patterns - .into_map(|value| buck2_data::TargetPattern { value }), - recursive: opts.recursive, - action: action.into(), - }; - - buckd - .with_flushing() - .profile( - ProfileRequest { - context: Some(context), - profile_opts: Some(ProfileOpts::TargetProfile(target_opts)), - destination_path, - profiler: profile_mode_to_profile(profile_mode).into(), - }, - console_opts, - &mut NoPartialResultHandler, - ) - .await?? - } - ProfileOptionsType::BxlProfileOptions { opts } => { - let bxl_opts = BxlProfile { - bxl_label: opts.bxl_label, - bxl_args: opts.bxl_args, - }; - - buckd - .with_flushing() - .profile( - ProfileRequest { - context: Some(context), - profile_opts: Some(ProfileOpts::BxlProfile(bxl_opts)), - destination_path, - profiler: profile_mode_to_profile(profile_mode).into(), - }, - console_opts, - &mut NoPartialResultHandler, - ) - .await?? + let profiler = profile_mode_to_profile(profile_mode); + + let profile_opts = match &self.subcommand { + ProfileCommand::Loading(loading) => ProfileOpts::TargetProfile(TargetProfile { + target_patterns: loading.buck_opts.target_patterns.clone(), + action: target_profile::Action::Loading as i32, + target_cfg: Some( + loading + .profile_common_opts + .target_cfg + .target_cfg + .target_cfg(), + ), + target_universe: loading + .profile_common_opts + .target_cfg + .target_universe + .clone(), + recursive: loading.buck_opts.recursive, + }), + ProfileCommand::Analysis(analysis) => ProfileOpts::TargetProfile(TargetProfile { + target_patterns: analysis.buck_opts.target_patterns.clone(), + action: target_profile::Action::Analysis as i32, + target_cfg: Some( + analysis + .profile_common_opts + .target_cfg + .target_cfg + .target_cfg(), + ), + target_universe: analysis + .profile_common_opts + .target_cfg + .target_universe + .clone(), + recursive: analysis.buck_opts.recursive, + }), + ProfileCommand::Bxl(bxl) => { + if !bxl + .profile_common_opts + .target_cfg + .target_universe + .is_empty() + { + return Err::<(), _>(anyhow::anyhow!( + "BXL profile does not support target universe" + )) + .into(); + } + ProfileOpts::BxlProfile(BxlProfile { + bxl_label: bxl.bxl_opts.bxl_label.clone(), + bxl_args: bxl.bxl_opts.bxl_args.clone(), + target_cfg: Some(bxl.profile_common_opts.target_cfg.target_cfg.target_cfg()), + }) } }; + let request = ProfileRequest { + context: Some(context), + profile_opts: Some(profile_opts), + destination_path, + profile_mode: profiler as i32, + }; + + let response = buckd + .with_flushing() + .profile(request, console_opts, &mut NoPartialResultHandler) + .await??; + let ProfileResponse { elapsed, total_retained_bytes, @@ -259,7 +273,7 @@ impl StreamingCommand for ProfileSubcommand { buck2_client_ctx::println!( "Starlark {:?} profile has been written to {}", profile_mode, - self.profile_common_opts.output.display(), + self.common_opts().output.display(), )?; buck2_client_ctx::println!("Elapsed: {:.3}s", elapsed.as_secs_f64())?; buck2_client_ctx::println!("Total retained bytes: {}", total_retained_bytes)?; @@ -268,14 +282,18 @@ impl StreamingCommand for ProfileSubcommand { } fn console_opts(&self) -> &CommonConsoleOptions { - &self.profile_common_opts.common_opts.console_opts + &self.common_opts().common_opts.console_opts + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + &self.common_opts().common_opts.event_log_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - &self.profile_common_opts.common_opts.event_log_opts + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + &self.common_opts().common_opts.config_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { - &self.profile_common_opts.common_opts.config_opts + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts().common_opts.starlark_opts } } diff --git a/app/buck2_client/src/commands/query.rs b/app/buck2_client/src/commands/query.rs new file mode 100644 index 0000000000000..e5053d644cb42 --- /dev/null +++ b/app/buck2_client/src/commands/query.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod aquery; +pub(crate) mod common; +pub mod cquery; +pub(crate) mod profile; +pub mod uquery; diff --git a/app/buck2_client/src/commands/query/aquery.rs b/app/buck2_client/src/commands/query/aquery.rs index b24eab2f124e3..dac2c5c872cdb 100644 --- a/app/buck2_client/src/commands/query/aquery.rs +++ b/app/buck2_client/src/commands/query/aquery.rs @@ -11,44 +11,72 @@ use async_trait::async_trait; use buck2_cli_proto::AqueryRequest; use buck2_cli_proto::AqueryResponse; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; +use buck2_core::if_else_opensource; use crate::commands::query::common::CommonQueryOptions; -/// Perform queries on the action graph (experimental). -/// -/// The action graph consists of all the declared actions for a build, with dependencies -/// when one action consumes the outputs of another action. -/// -/// Examples: -/// -/// Print the action producing a target's default output -/// -/// `buck2 aquery //java/com/example/app:amazing` -/// -/// List all the commands for run actions for building a target -/// -/// `buck2 aquery 'kind(run, deps("//java/com/example/app:amazing+more"))' --output-attribute=cmd` -/// -/// Dynamic outputs (`ctx.actions.dynamic_output`): -/// -/// Currently, aquery interacts poorly with dynamic outputs. It may return incorrect results or otherwise -/// behave unexpectedly. +fn help() -> &'static str { + concat!( + r#"Perform queries on the action graph (experimental) + +The action graph consists of all the declared actions for a build, +with dependencies when one action consumes the outputs of another +action. + +Run `buck2 docs aquery` or +"#, + if_else_opensource!( + "https://buck2.build/docs/users/query/aquery/", + "https://www.internalfb.com/intern/staticdocs/buck2/docs/users/query/aquery/", + ), + r#" +for more documentation about the functions available in aquery +expressions. + +Examples: + +Print the action producing a target's default output + +`buck2 aquery //java/com/example/app:amazing` + +List all the commands for run actions for building a target + +`buck2 aquery 'kind(run, deps("//java/com/example/app:amazing+more"))' --output-attribute=cmd` + +Dynamic outputs (`ctx.actions.dynamic_output`): + +Currently, aquery interacts poorly with dynamic outputs. It may +return incorrect results or otherwise behave unexpectedly. +"# + ) +} + #[derive(Debug, clap::Parser)] -#[clap(name = "aquery")] +#[clap( + name = "aquery", + about = "Perform queries on the action graph (experimental)", + long_about = help(), + verbatim_doc_comment, +)] pub struct AqueryCommand { #[clap(flatten)] - common_opts: CommonCommandOptions, + query_common: CommonQueryOptions, #[clap(flatten)] - query_common: CommonQueryOptions, + target_cfg: TargetCfgOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -72,6 +100,7 @@ impl StreamingCommand for AqueryCommand { AqueryRequest { query, query_args, + target_cfg: Some(self.target_cfg.target_cfg()), context: Some(context), output_attributes, unstable_output_format, @@ -89,11 +118,15 @@ impl StreamingCommand for AqueryCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/query/common.rs b/app/buck2_client/src/commands/query/common.rs index 293124d91d5a9..44ef9852de8a4 100644 --- a/app/buck2_client/src/commands/query/common.rs +++ b/app/buck2_client/src/commands/query/common.rs @@ -16,7 +16,7 @@ use dupe::Dupe; Debug, Clone, Dupe, - clap::ArgEnum, + clap::ValueEnum, serde::Serialize, serde::Deserialize )] @@ -25,6 +25,7 @@ enum QueryOutputFormatArg { Dot, Json, DotCompact, + Starlark, } /// Args common to all the query commands @@ -53,10 +54,11 @@ pub(crate) struct CommonQueryOptions { long_help = "Output format (default: list). \n dot - dot graph format. \n dot_compact - compact alternative to dot format. \n - json - JSON format. + json - JSON format. \n + starlark - targets are printed like starlark code that would produce them. ", - value_name = "dot|dot_compact|json", - arg_enum + value_name = "dot|dot_compact|json|starlark", + value_enum )] output_format: Option, @@ -87,6 +89,7 @@ impl CommonQueryOptions { Some(QueryOutputFormatArg::Json) => QueryOutputFormat::Json, Some(QueryOutputFormatArg::Dot) => QueryOutputFormat::Dot, Some(QueryOutputFormatArg::DotCompact) => QueryOutputFormat::DotCompact, + Some(QueryOutputFormatArg::Starlark) => QueryOutputFormat::Starlark, None => { if self.json { QueryOutputFormat::Json diff --git a/app/buck2_client/src/commands/query/cquery.rs b/app/buck2_client/src/commands/query/cquery.rs index b98b477b1e46f..43246738eca7b 100644 --- a/app/buck2_client/src/commands/query/cquery.rs +++ b/app/buck2_client/src/commands/query/cquery.rs @@ -11,82 +11,87 @@ use async_trait::async_trait; use buck2_cli_proto::CqueryRequest; use buck2_cli_proto::CqueryResponse; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::target_cfg::TargetCfgWithUniverseOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; +use buck2_core::if_else_opensource; use crate::commands::query::common::CommonQueryOptions; +use crate::commands::query::profile::QueryProfileOptions; + +fn help() -> &'static str { + concat!( + r#"Perform queries on the configured target graph + +The configured target graph includes information about the configuration +(platforms) and transitions involved in building targets. In the +configured graph, `selects` are fully resolved. The same target may +appear in multiple different configurations (when printed, the +configuration is after the target in parentheses). + +A user can specify a `--target-universe` flag to control how literals +are resolved. When provided, any literals will resolve to all +matching targets within the universe (which includes the targets +passed as the universe and all transitive deps of them). When not +provided, we implicitly set the universe to be rooted at every +target literal in the `cquery`. + +Run `buck2 docs cquery` or +"#, + if_else_opensource!( + "https://buck2.build/docs/users/query/cquery/", + "https://www.internalfb.com/intern/staticdocs/buck2/docs/users/query/cquery/", + ), + r#" +for more documentation about the functions available in cquery +expressions. + +Examples: + +Print all the attributes of a target + +`buck2 cquery //java/com/example/app:amazing --output-all-attributes` + +List the deps of a target (special characters in a target will +require quotes): + +`buck2 cquery 'deps("//java/com/example/app:amazing+more")'` +"# + ) +} -/// Perform queries on the configured target graph. -/// -/// The configured target graph includes information about the configuration (platforms) and -/// transitions involved in building targets. In the configured graph, `selects` are fully -/// resolved. The same target may appear in multiple different configurations (when printed, -/// the configuration is after the target in parentheses). -/// -/// A user can specify a `--target-universe` flag to control how literals are resolved. When -/// provided, any literals will resolve to all matching targets within the universe (which -/// includes the targets passed as the universe and all transitive deps of them). -/// When not provided, we implicitly set the universe to be rooted at every target literal -/// in the `cquery`. -/// -/// Run `buck2 docs cquery` for more documentation about the functions available in cquery -/// expressions. -/// -/// Examples: -/// -/// Print all the attributes of a target -/// -/// `buck2 cquery //java/com/example/app:amazing --output-all-attributes` -/// -/// List the deps of a target (special characters in a target will require quotes): -/// -/// `buck2 cquery 'deps("//java/com/example/app:amazing+more")'` #[derive(Debug, clap::Parser)] -#[clap(name = "cquery")] +#[clap( + name = "cquery", + about = "Perform queries on the configured target graph", + long_about = help(), + verbatim_doc_comment, +)] pub struct CqueryCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - #[clap(flatten)] query_common: CommonQueryOptions, - #[clap( - long, - short = 'u', - use_delimiter = true, - help = "Comma separated list of targets at which to root the queryable universe. - This is useful since targets can exist in multiple configurations. While - this argument isn't required, it's recommended for most non-trivial queries. - In the absence of this argument, buck2 will use the target literals - in your cquery expression as the argument to this." - )] - target_universe: Vec, - #[clap( long, help = "Show the providers of the query result instead of the attributes and labels" )] show_providers: bool, - #[allow(rustdoc::bare_urls)] - /// Enable deprecated `owner()` function behavior. - /// - /// See this post https://fburl.com/1mf2d2xj for details. - #[clap(long)] - deprecated_owner: bool, - - #[allow(rustdoc::bare_urls)] - /// Enable correct `owner()` function behavior. - /// - /// See this post https://fburl.com/1mf2d2xj for details. - #[clap(long)] - correct_owner: bool, + #[clap(flatten)] + target_cfg: TargetCfgWithUniverseOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, + + #[clap(flatten)] + profile_options: QueryProfileOptions, } #[async_trait] @@ -104,17 +109,6 @@ impl StreamingCommand for CqueryCommand { let output_attributes = self.query_common.attributes.get()?; let context = ctx.client_context(matches, &self)?; - let correct_owner = match (self.correct_owner, self.deprecated_owner) { - (true, false) => true, - (false, true) => false, - (false, false) => true, - (true, true) => { - return ExitResult::bail( - "Cannot specify both --correct-owner and --deprecated-owner", - ); - } - }; - let CqueryResponse {} = buckd .with_flushing() .cquery( @@ -123,10 +117,17 @@ impl StreamingCommand for CqueryCommand { query_args, context: Some(context), output_attributes, - target_universe: self.target_universe, + target_universe: self.target_cfg.target_universe, + target_cfg: Some(self.target_cfg.target_cfg.target_cfg()), show_providers: self.show_providers, unstable_output_format, - correct_owner, + profile_mode: self.profile_options.profile_mode_proto().map(|m| m as i32), + profile_output: self + .profile_options + .profile_output + .as_ref() + .map(|p| anyhow::Ok(p.resolve(&ctx.working_dir).to_str()?.to_owned())) + .transpose()?, }, ctx.stdin() .console_interaction_stream(&self.common_opts.console_opts), @@ -141,11 +142,15 @@ impl StreamingCommand for CqueryCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/commands/query/mod.rs b/app/buck2_client/src/commands/query/mod.rs deleted file mode 100644 index 1e8b33b152df4..0000000000000 --- a/app/buck2_client/src/commands/query/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod aquery; -pub(crate) mod common; -pub mod cquery; -pub mod uquery; diff --git a/app/buck2_client/src/commands/query/profile.rs b/app/buck2_client/src/commands/query/profile.rs new file mode 100644 index 0000000000000..f839ef7571d90 --- /dev/null +++ b/app/buck2_client/src/commands/query/profile.rs @@ -0,0 +1,36 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::path_arg::PathArg; + +use crate::commands::profile::profile_mode_to_profile; +use crate::commands::profile::BuckProfileMode; + +/// Starlark profiling options +#[derive(Debug, Clone, clap::Parser)] +#[clap(next_help_heading = "Starlark Profiling Options")] +pub(crate) struct QueryProfileOptions { + /// Profile target loading. + /// + /// When this option is enabled, Buck will profile every `BUCK` file loaded during the query + /// and merge the results into a single profile. + /// The command may return cached profile data if `BUCK` files were not invalidated. + #[clap(long, requires("profile_output"))] + pub(crate) profile_mode: Option, + + /// Where to write profile output. + #[clap(long)] + pub(crate) profile_output: Option, +} + +impl QueryProfileOptions { + pub(crate) fn profile_mode_proto(&self) -> Option { + self.profile_mode.map(profile_mode_to_profile) + } +} diff --git a/app/buck2_client/src/commands/query/uquery.rs b/app/buck2_client/src/commands/query/uquery.rs index 903f987c5ebd0..a65177bd8f172 100644 --- a/app/buck2_client/src/commands/query/uquery.rs +++ b/app/buck2_client/src/commands/query/uquery.rs @@ -11,54 +11,81 @@ use async_trait::async_trait; use buck2_cli_proto::UqueryRequest; use buck2_cli_proto::UqueryResponse; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::target_cfg::TargetCfgUnusedOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; +use buck2_core::if_else_opensource; use crate::commands::query::common::CommonQueryOptions; -/// Perform queries on the unconfigured target graph. -/// -/// The unconfigured target graph consists of the targets as they are defined in the build -/// files. In this graph, each target appears exactly once and `select()`s are in the unresolved -/// form. For large queries, the unconfigured graph may be much smaller than the configured -/// graph and queries can be much more efficiently performed there. -/// -/// When querying the unconfigured graph, dependencies appearing in all branches of `select()` -/// dictionaries will be treated as dependencies. -/// -/// Run `buck2 docs uquery` for more documentation about the functions available in cquery -/// expressions. -/// -/// Examples: -/// -/// Print all the attributes of a target -/// -/// `buck2 uquery //java/com/example/app:amazing --output-all-attributes -/// -/// List the deps of a target (special characters in a target will require quotes): -/// `buck2 uquery 'deps("//java/com/example/app:amazing+more")'` -/// -/// select() encoding: -/// -/// When printed, values with `select()`s use a special json encoding. -/// -/// `1 + select({"//:a": 1, "DEFAULT": 2})` will be encoded as: -/// -/// `{"__type": "concat", "items": [1, {"__type": "selector", "entries": {"//:a": 1, "DEFAULT": 2}}]}` +fn help() -> &'static str { + concat!( + "Perform queries on the unconfigured target graph + +The unconfigured target graph consists of the targets as they are +defined in the build files. In this graph, each target appears +exactly once and `select()`s are in the unresolved form. For large +queries, the unconfigured graph may be much smaller than the +configured graph and queries can be much more efficiently performed +there. + +When querying the unconfigured graph, dependencies appearing in all +branches of `select()` dictionaries will be treated as dependencies. + +Run `buck2 docs uquery` or +", + if_else_opensource!( + "https://buck2.build/docs/users/query/uquery/", + "https://www.internalfb.com/intern/staticdocs/buck2/docs/users/query/uquery/", + ), + r#" +for more documentation about the functions available in uquery +expressions. + +Examples: + +Print all the attributes of a target + +`buck2 uquery //java/com/example/app:amazing --output-all-attributes + +List the deps of a target (special characters in a target will require quotes): +`buck2 uquery 'deps("//java/com/example/app:amazing+more")'` + +select() encoding: + +When printed, values with `select()`s use a special json encoding. + +`1 + select({"//:a": 1, "DEFAULT": 2})` will be encoded as: + +`{"__type": "concat", "items": [1, {"__type": "selector", "entries": {"//:a": 1, "DEFAULT": 2}}]}` +"# + ) +} + #[derive(Debug, clap::Parser)] -#[clap(name = "uquery")] +#[clap( + name = "uquery", + about = "Perform queries on the unconfigured target graph", + long_about = help(), + verbatim_doc_comment, +)] pub struct UqueryCommand { #[clap(flatten)] - common_opts: CommonCommandOptions, + query_common: CommonQueryOptions, + /// Uquery doesn't need these flags, but they are used in mode files, so we need to keep them. #[clap(flatten)] - query_common: CommonQueryOptions, + _target_cfg: TargetCfgUnusedOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -99,14 +126,18 @@ impl StreamingCommand for UqueryCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } + fn logging_name(&self) -> &'static str { // FIXME: Figure out if we can replace this. We used to log this this way in Ingress :/ "query" diff --git a/app/buck2_client/src/commands/rage.rs b/app/buck2_client/src/commands/rage.rs new file mode 100644 index 0000000000000..2cbb3d5de93e1 --- /dev/null +++ b/app/buck2_client/src/commands/rage.rs @@ -0,0 +1,742 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod build_info; +mod dice; +mod manifold; +mod materializer; +mod source_control; +mod system_info; +pub(crate) mod thread_dump; + +use std::collections::HashMap; +use std::fmt; +use std::future::Future; +use std::process::Stdio; +use std::time::Duration; +use std::time::SystemTime; + +use anyhow::Context; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::daemon::client::connect::BuckdProcessInfo; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::stdin::Stdin; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; +use buck2_common::manifold::Bucket; +use buck2_common::manifold::ManifoldClient; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_data::instant_event::Data; +use buck2_data::InstantEvent; +use buck2_data::RageResult; +use buck2_event_log::file_names::do_find_log_by_trace_id; +use buck2_event_log::file_names::get_local_logs; +use buck2_event_log::read::EventLogPathBuf; +use buck2_event_log::read::EventLogSummary; +use buck2_events::sink::remote::new_remote_event_sink_if_enabled; +use buck2_events::sink::remote::RemoteEventSink; +use buck2_events::BuckEvent; +use buck2_util::process::async_background_command; +use buck2_wrapper_common::invocation_id::TraceId; +use chrono::offset::Local; +use chrono::DateTime; +use derive_more::Display; +use dupe::Dupe; +use futures::future::FutureExt; +use futures::future::LocalBoxFuture; +use manifold::file_to_manifold; +use manifold::manifold_leads; +use serde::Serialize; +use tokio::io::AsyncBufRead; +use tokio::io::AsyncBufReadExt; +use tokio::io::AsyncWriteExt; +use tokio::io::BufReader; + +use crate::commands::debug::upload_re_logs; + +#[derive(Debug, buck2_error::Error)] +enum RageError { + #[error("Failed to get a valid user selection")] + InvalidSelectionError, + #[error("Failed to find the logs for command")] + LogNotFoundError, + #[error("Pastry command timeout, make sure you are on Lighthouse/VPN")] + PastryTimeout, + #[error("Failed to spawn pastry")] + PastrySpawnError, + #[error("Error writing to pastry")] + PastryWriteError, + #[error("Error reading pastry output")] + PastryOutputError, + #[error("Pastry command failed with code '{0}' and error '{1}' ")] + PastryCommandError(i32, String), +} + +#[derive(Debug, clap::Parser)] +#[clap( + name = "rage", + about = "Record information about the previous failed buck2 command", + group = clap::ArgGroup::new("invocation").multiple(false) +)] +pub struct RageCommand { + /// Stop collecting information after `` seconds + #[clap(long, default_value = "120")] + timeout: u64, + /// Use value 0 to select last invocation, 1 to select second to last and so on + #[clap(long, group = "invocation")] + invocation_offset: Option, + /// Select invocation directly using the invocation's UUID + #[clap(long, group = "invocation")] + invocation_id: Option, + /// Collect rage report about buck2 in general, not about specific invocation + #[clap(long, group = "invocation")] + no_invocation: bool, + /// We may want to omit paste if this is not a user + /// or is called in a machine with no pastry command + #[clap(long)] + no_paste: bool, +} + +impl RageCommand { + pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + buck2_core::facebook_only(); + + ctx.with_runtime(|ctx| async move { + self.exec_impl(ctx).await?; + ExitResult::success() + }) + } + + async fn exec_impl(self, mut ctx: ClientCommandContext<'_>) -> anyhow::Result<()> { + let paths = ctx.paths()?; + let daemon_dir = paths.daemon_dir()?; + let stderr_path = daemon_dir.buckd_stderr(); + let re_logs_dir = ctx.paths()?.re_logs_dir(); + let logdir = paths.log_dir(); + let dice_dump_dir = paths.dice_dump_dir(); + + let client_ctx = ctx.empty_client_context("rage")?; + + // Don't fail the rage if you can't figure out whether to do vpnless. + let manifold = ManifoldClient::new().await?; + + let rage_id = TraceId::new(); + let mut manifold_id = format!("{}", rage_id); + let sink = create_scribe_sink(&ctx)?; + + buck2_client_ctx::eprintln!( + "Data collection will terminate after {} seconds (override with --timeout param)", + self.timeout + )?; + + // If there is a daemon, start connecting. + let info = BuckdProcessInfo::load(&daemon_dir).map_err(buck2_error::Error::from); + + let buckd = match &info { + Ok(info) => async { + info.create_channel() + .await? + .upgrade() + .await + .map_err(buck2_error::Error::from) + } + .boxed(), + Err(e) => futures::future::ready(Err(e.dupe())).boxed(), + } + .shared(); + + let selected_invocation = maybe_select_invocation(ctx.stdin(), &logdir, &self).await?; + let invocation_id = get_trace_id(&selected_invocation).await?; + if let Some(ref invocation_id) = invocation_id { + manifold_id = format!("{}_{}", invocation_id, manifold_id); + } + + buck2_client_ctx::eprintln!("Collecting debug info...")?; + + let thread_dump = self.section("Thread dump", || { + thread_dump::upload_thread_dump(&info, &manifold, &manifold_id) + }); + let build_info_command = self.skippable_section( + "Associated invocation info", + selected_invocation + .as_ref() + .map(|inv| || build_info::get(inv)), + ); + + let (thread_dump, build_info) = tokio::join!( + // Get thread dump before making any new connections to daemon (T159606309) + thread_dump, + // We need the RE session ID from here to upload RE logs + build_info_command + ); + + let system_info_command = self.section("System info", system_info::get); + let daemon_stderr_command = self.section("Daemon stderr", || { + upload_daemon_stderr(stderr_path, &manifold, &manifold_id) + }); + let hg_snapshot_id_command = self.section("Source control", source_control::get_info); + let dice_dump_command = self.section("Dice dump", || async { + dice::upload_dice_dump(buckd.clone().await?, dice_dump_dir, &manifold, &manifold_id) + .await + }); + let materializer_state = self.section("Materializer state", || { + materializer::upload_materializer_data( + buckd.clone(), + &client_ctx, + &manifold, + &manifold_id, + MaterializerRageUploadData::State, + ) + }); + let materializer_fsck = self.section("Materializer fsck", || { + materializer::upload_materializer_data( + buckd.clone(), + &client_ctx, + &manifold, + &manifold_id, + MaterializerRageUploadData::Fsck, + ) + }); + let event_log_command = self.skippable_section( + "Event log upload", + selected_invocation + .as_ref() + .map(|path| || upload_event_logs(path, &manifold, &manifold_id)), + ); + + let re_logs_command = self.skippable_section( + "RE logs upload", + build_info + .get_field(|o| o.re_session_id.clone()) + .map(|id| || upload_re_logs_impl(&manifold, &re_logs_dir, id)), + ); + + let ( + system_info, + daemon_stderr_dump, + hg_snapshot_id, + dice_dump, + materializer_state, + materializer_fsck, + event_log_dump, + re_logs, + ) = tokio::join!( + system_info_command, + daemon_stderr_command, + hg_snapshot_id_command, + dice_dump_command, + materializer_state, + materializer_fsck, + event_log_command, + re_logs_command + ); + let sections = vec![ + build_info.to_string(), + system_info.to_string(), + daemon_stderr_dump.to_string(), + hg_snapshot_id.to_string(), + dice_dump.to_string(), + materializer_state.to_string(), + materializer_fsck.to_string(), + thread_dump.to_string(), + event_log_dump.to_string(), + re_logs.to_string(), + ]; + output_rage(self.no_paste, §ions.join("")).await?; + + self.send_to_scuba( + sink, + invocation_id, + system_info, + daemon_stderr_dump, + hg_snapshot_id, + dice_dump, + materializer_state, + materializer_fsck, + thread_dump, + event_log_dump, + build_info, + re_logs, + ) + .await?; + Ok(()) + } + + async fn send_to_scuba( + &self, + sink: Option, + invocation_id: Option, + system_info: RageSection, + daemon_stderr_dump: RageSection, + hg_snapshot_id: RageSection, + dice_dump: RageSection, + materializer_state: RageSection, + materializer_fsck: RageSection, + thread_dump: RageSection, + event_log_dump: RageSection, + build_info: RageSection, + re_logs: RageSection, + ) -> anyhow::Result<()> { + let mut string_data: std::collections::HashMap = [ + ("dice_dump", dice_dump.output()), + ("materializer_state", materializer_state.output()), + ("materializer_fsck", materializer_fsck.output()), + ("thread_dump", thread_dump.output()), + ("daemon_stderr_dump", daemon_stderr_dump.output()), + ("hg_snapshot_id", hg_snapshot_id.output()), + ( + "invocation_id", + invocation_id + .clone() + .map(|inv| inv.to_string()) + .unwrap_or_default(), + ), + ("event_log_dump", event_log_dump.output()), + ("re_logs", re_logs.output()), + ] + .iter() + .map(|(k, v)| (k.to_string(), v.clone())) + .collect(); + + let command = build_info.get_field(|o| Some(o.command.to_owned())); + let buck2_revision = build_info.get_field(|o| Some(o.buck2_revision.to_owned())); + let username = system_info.get_field(|o| o.username.to_owned()); + let hostname = system_info.get_field(|o| o.hostname.to_owned()); + let os = system_info.get_field(|o| Some(o.os.to_owned())); + let os_version = system_info.get_field(|o| o.os_version.to_owned()); + + insert_if_some(&mut string_data, "command", command); + insert_if_some(&mut string_data, "buck2_revision", buck2_revision); + insert_if_some(&mut string_data, "username", username); + insert_if_some(&mut string_data, "hostname", hostname); + insert_if_some(&mut string_data, "os", os); + insert_if_some(&mut string_data, "os_version", os_version); + + let mut int_data = HashMap::new(); + let daemon_uptime_s = build_info.get_field(|o| o.daemon_uptime_s); + insert_if_some(&mut int_data, "daemon_uptime_s", daemon_uptime_s); + + let timestamp = build_info.get_field(|o| Some(SystemTime::from(o.timestamp).into())); + let command_duration = build_info.get_field(|o| { + Some(prost_types::Duration { + seconds: o.command_duration?.as_secs() as i64, + nanos: o.command_duration?.subsec_nanos() as i32, + }) + }); + + // We store in Ent via Ingress that rage was run for specific invocation + if let Some(invocation_id) = invocation_id { + dispatch_result_event( + sink.as_ref(), + &invocation_id, + RageResult { + string_data, + int_data, + timestamp, + command_duration, + }, + ) + .await?; + } + Ok(()) + } + + fn section<'a, Fut, T>( + &'a self, + title: &'a str, + command: impl FnOnce() -> Fut, + ) -> LocalBoxFuture> + where + Fut: Future> + 'a, + T: std::fmt::Display + 'a, + { + let timeout = Duration::from_secs(self.timeout); + RageSection::get(title, timeout, command) + } + + fn skippable_section<'a, Fut, T>( + &'a self, + title: &'a str, + command: Option Fut>, + ) -> LocalBoxFuture> + where + Fut: Future> + 'a, + T: std::fmt::Display + 'a, + { + let timeout = Duration::from_secs(self.timeout); + RageSection::get_skippable(title, timeout, command) + } + + pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { + argv.no_need_to_sanitize() + } +} + +#[derive(Debug, PartialEq, Serialize)] +struct RageSection { + title: String, + status: CommandStatus, +} + +#[derive(Display)] +pub enum MaterializerRageUploadData { + #[display("state")] + State, + #[display("fsck")] + Fsck, +} + +#[derive(Debug, PartialEq, Serialize)] +enum CommandStatus { + Success { output: T }, + Failure { error: String }, + Timeout, + Skipped, +} + +impl<'a, T> RageSection +where + T: std::fmt::Display + 'a, +{ + fn get( + title: &str, + timeout: Duration, + command: impl FnOnce() -> Fut, + ) -> LocalBoxFuture<'a, Self> + where + Fut: Future> + 'a, + { + let fut = command(); + let title = title.to_owned(); + async move { + let status = match tokio::time::timeout(timeout, fut).await { + Err(_) => CommandStatus::Timeout, + Ok(Ok(output)) => CommandStatus::Success { output }, + Ok(Err(e)) => CommandStatus::Failure { + error: format!("Error: {:?}", e), + }, + }; + RageSection { title, status } + } + .boxed_local() + } + + fn get_skippable( + title: &str, + timeout: Duration, + command: Option Fut>, + ) -> LocalBoxFuture<'a, Self> + where + Fut: Future> + 'a, + { + if let Some(command) = command { + Self::get(title, timeout, command) + } else { + let status = CommandStatus::Skipped; + let title = title.to_owned(); + async { RageSection { title, status } }.boxed_local() + } + } + + fn output(&self) -> String { + match &self.status { + CommandStatus::Success { output } => output.to_string(), + CommandStatus::Failure { error } => error.to_owned(), + CommandStatus::Timeout {} => "Timeout".to_owned(), + CommandStatus::Skipped {} => "Skipped".to_owned(), + } + } + + fn get_field(&self, extract_field: impl FnOnce(&T) -> Option) -> Option { + match &self.status { + CommandStatus::Success { output } => extract_field(output), + _ => None, + } + } + + fn pretty_print_section( + &self, + f: &mut fmt::Formatter, + content: String, + ) -> Result<(), std::fmt::Error> { + let content_divider = "-".repeat(30); + write!( + f, + "{title}\n{content_divider}\n{content}\n\n\n", + title = self.title + ) + } +} + +impl fmt::Display for RageSection +where + T: std::fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.pretty_print_section(f, self.output()) + } +} + +fn insert_if_some(data: &mut HashMap, key: &str, value: Option) { + if let Some(value) = value { + data.insert(key.to_owned(), value); + } +} + +async fn upload_daemon_stderr( + path: AbsNormPathBuf, + manifold: &ManifoldClient, + manifold_id: &str, +) -> anyhow::Result { + file_to_manifold(manifold, &path, format!("flat/{}.stderr", manifold_id)).await +} + +async fn upload_event_logs( + path: &EventLogPathBuf, + manifold: &ManifoldClient, + manifold_id: &str, +) -> anyhow::Result { + let filename = format!("flat/{}-event_log{}", manifold_id, path.extension()); + file_to_manifold(manifold, path.path(), filename).await +} + +async fn upload_re_logs_impl( + manifold: &ManifoldClient, + re_logs_dir: &AbsNormPath, + re_session_id: String, +) -> anyhow::Result { + let bucket = Bucket::RAGE_DUMPS; + let filename = format!("flat/{}-re_logs.zst", &re_session_id); + upload_re_logs::upload_re_logs(manifold, bucket, re_logs_dir, &re_session_id, &filename) + .await?; + + Ok(manifold_leads(&bucket, filename)) +} + +async fn dispatch_result_event( + sink: Option<&RemoteEventSink>, + rage_id: &TraceId, + result: RageResult, +) -> anyhow::Result<()> { + let data = Some(Data::RageResult(result)); + dispatch_event_to_scribe(sink, rage_id, InstantEvent { data }).await?; + Ok(()) +} + +async fn dispatch_event_to_scribe( + sink: Option<&RemoteEventSink>, + trace_id: &TraceId, + event: InstantEvent, +) -> anyhow::Result<()> { + if let Some(sink) = sink { + sink.send_now(BuckEvent::new( + SystemTime::now(), + trace_id.to_owned(), + None, + None, + event.into(), + )) + .await; + } else { + tracing::warn!( + "Couldn't send rage results to scribe, rage ID `{}`", + trace_id + ) + }; + Ok(()) +} + +#[allow(unused_variables)] // Conditional compilation +fn create_scribe_sink(ctx: &ClientCommandContext) -> anyhow::Result> { + // TODO(swgiillespie) scribe_logging is likely the right feature for this, but we should be able to inject a sink + // without using configurations at the call site + new_remote_event_sink_if_enabled( + ctx.fbinit(), + /* buffer size */ 100, + /* retry_backoff */ Duration::from_millis(500), + /* retry_attempts */ 5, + /* message_batch_size */ None, + ) +} + +async fn maybe_select_invocation( + stdin: &mut Stdin, + logdir: &AbsNormPathBuf, + command: &RageCommand, +) -> anyhow::Result> { + if command.no_invocation { + return Ok(None); + }; + + if let Some(trace_id) = &command.invocation_id { + return Ok(Some(do_find_log_by_trace_id(logdir, trace_id)?)); + } + + let logs = get_local_logs(logdir)?; + let mut logs = logs + .into_iter() + .rev() // newest first + .collect::>(); + if logs.is_empty() { + return Ok(None); + } + let index = log_index(stdin, &logs, command.invocation_offset).await?; + if index >= logs.len() { + return Err(RageError::LogNotFoundError.into()); + } + Ok(Some(logs.swap_remove(index))) +} + +async fn log_index( + stdin: &mut Stdin, + logs: &[EventLogPathBuf], + invocation_offset: Option, +) -> Result { + let index = match invocation_offset { + Some(i) => i, + None => { + let mut stdin = BufReader::new(stdin); + user_prompt_select_log(&mut stdin, logs).await? + } + }; + Ok(index) +} + +async fn user_prompt_select_log<'a>( + stdin: impl AsyncBufRead + Unpin, + logs: &'a [EventLogPathBuf], +) -> anyhow::Result { + buck2_client_ctx::eprintln!("Which buck invocation would you like to report?\n")?; + let logs_summary = futures::future::join_all( + logs.iter() + .map(|log_path| async move { log_path.get_summary().await.ok() }), + ) + .await; + for (index, log_summary) in logs_summary.iter().enumerate() { + print_log_summary(index, log_summary)?; + } + buck2_client_ctx::eprintln!()?; + let prompt = format!( + "Invocation: (type a number between 0 and {}) ", + logs_summary.len() - 1 + ); + let selection = get_user_selection(stdin, &prompt, |i| i < logs_summary.len()).await?; + + buck2_client_ctx::eprintln!("Selected invocation {}\n", selection)?; + Ok(selection) +} + +async fn get_user_selection

( + mut stdin: impl AsyncBufRead + Unpin, + prompt: &str, + predicate: P, +) -> anyhow::Result +where + P: Fn(usize) -> bool, +{ + buck2_client_ctx::eprint!("{}", prompt)?; + + let mut input = String::new(); + stdin.read_line(&mut input).await?; + + match input.trim().parse() { + Ok(selection) if predicate(selection) => Ok(selection), + _ => Err(RageError::InvalidSelectionError.into()), + } +} + +fn print_log_summary(index: usize, log_summary: &Option) -> anyhow::Result<()> { + if let Some(log_summary) = log_summary { + let cmd = build_info::format_cmd(&log_summary.invocation); + + let timestamp: DateTime = log_summary.timestamp.into(); + buck2_client_ctx::eprintln!( + "{:<7} {} {}", + format!("[{}].", index), + timestamp.format("%c %Z"), + cmd + ) + } else { + buck2_client_ctx::eprintln!( + "{:<7} <>", + format!("[{}].", index), + ) + } +} + +async fn output_rage(no_paste: bool, output: &str) -> anyhow::Result<()> { + if no_paste { + buck2_client_ctx::println!("{}", output)?; + } else { + match generate_paste("Buck2 Rage", output).await { + Err(e) => { + buck2_client_ctx::eprintln!( + "Failed to generate paste automatically with error \"{:?}\". + Please create paste manually with `bunnylol paste` using the output below:\n\n\n", + e + )?; + buck2_client_ctx::println!("{}", output)?; + } + Ok(paste) => buck2_client_ctx::eprintln!( + "\nPlease post in https://fb.workplace.com/groups/buck2users with the following link:\n\n{}\n", + paste + )?, + } + }; + Ok(()) +} + +async fn generate_paste(title: &str, content: &str) -> anyhow::Result { + let mut pastry = async_background_command("pastry") + .args(["--title", title]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true) + .spawn() + .context(RageError::PastrySpawnError)?; + let mut stdin = pastry.stdin.take().expect("Stdin should open"); + + let writer = async move { + stdin + .write_all(content.as_bytes()) + .await + .context(RageError::PastryWriteError) + }; + + let reader = async move { + let output = tokio::time::timeout(Duration::from_secs(10), pastry.wait_with_output()) + .await + .context(RageError::PastryTimeout)? + .context(RageError::PastryOutputError)?; + if !output.status.success() { + let error = String::from_utf8_lossy(&output.stderr).to_string(); + let code = output + .status + .code() + .ok_or_else(|| RageError::PastryCommandError(1, error.clone()))?; + return Err(RageError::PastryCommandError(code, error).into()); + } + let output = String::from_utf8(output.stdout).context(RageError::PastryOutputError)?; + Ok(output) + }; + + let ((), paste) = futures::future::try_join(writer, reader).await?; + + Ok(paste) +} + +async fn get_trace_id(invocation: &Option) -> anyhow::Result> { + let invocation_id = match invocation { + None => None, + Some(invocation) => Some(invocation.uuid_from_filename()?), + }; + Ok(invocation_id) +} diff --git a/app/buck2_client/src/commands/rage/build_info.rs b/app/buck2_client/src/commands/rage/build_info.rs index f31459b8ffdc6..8079568e54496 100644 --- a/app/buck2_client/src/commands/rage/build_info.rs +++ b/app/buck2_client/src/commands/rage/build_info.rs @@ -11,9 +11,9 @@ use std::fmt; use std::time::Duration; use std::time::SystemTime; -use buck2_client_ctx::stream_value::StreamValue; -use buck2_client_ctx::subscribers::event_log::read::EventLogPathBuf; -use buck2_client_ctx::subscribers::event_log::utils::Invocation; +use buck2_event_log::read::EventLogPathBuf; +use buck2_event_log::stream_value::StreamValue; +use buck2_event_log::utils::Invocation; use buck2_events::BuckEvent; use buck2_util::truncate::truncate; use buck2_wrapper_common::invocation_id::TraceId; @@ -21,9 +21,8 @@ use chrono::DateTime; use chrono::Local; use futures::TryStreamExt; use humantime::format_duration; -use thiserror::Error; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum BuildInfoError { #[error("Failed to read event log")] EventLogReadFail, diff --git a/app/buck2_client/src/commands/rage/dice.rs b/app/buck2_client/src/commands/rage/dice.rs index 3238a6f9e8746..44b7b4dede1c2 100644 --- a/app/buck2_client/src/commands/rage/dice.rs +++ b/app/buck2_client/src/commands/rage/dice.rs @@ -14,8 +14,8 @@ use buck2_cli_proto::unstable_dice_dump_request::DiceDumpFormat; use buck2_cli_proto::UnstableDiceDumpRequest; use buck2_client_ctx::daemon::client::connect::BootstrapBuckdClient; use buck2_client_ctx::daemon::client::BuckdClientConnector; -use buck2_client_ctx::manifold::Bucket; -use buck2_client_ctx::manifold::ManifoldClient; +use buck2_common::manifold::Bucket; +use buck2_common::manifold::ManifoldClient; use buck2_core::fs::fs_util::create_dir_all; use buck2_core::fs::fs_util::remove_all; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; diff --git a/app/buck2_client/src/commands/rage/manifold.rs b/app/buck2_client/src/commands/rage/manifold.rs index 1b39b1faf2000..4dda82fe5b21b 100644 --- a/app/buck2_client/src/commands/rage/manifold.rs +++ b/app/buck2_client/src/commands/rage/manifold.rs @@ -10,13 +10,12 @@ use std::io::Cursor; use anyhow::Context; -use buck2_client_ctx::manifold::Bucket; -use buck2_client_ctx::manifold::ManifoldClient; +use buck2_common::manifold::Bucket; +use buck2_common::manifold::ManifoldClient; use buck2_core::fs::paths::abs_path::AbsPath; -use thiserror::Error; use tokio::fs::File; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum ManifoldError { #[error("Failed to open file `{0}`")] OpenFileError(String), diff --git a/app/buck2_client/src/commands/rage/materializer.rs b/app/buck2_client/src/commands/rage/materializer.rs index 5bf2a66d6eb7e..d64efd52ca3bd 100644 --- a/app/buck2_client/src/commands/rage/materializer.rs +++ b/app/buck2_client/src/commands/rage/materializer.rs @@ -16,9 +16,9 @@ use buck2_client_ctx::command_outcome::CommandOutcome; use buck2_client_ctx::daemon::client::connect::BootstrapBuckdClient; use buck2_client_ctx::events_ctx::PartialResultCtx; use buck2_client_ctx::events_ctx::PartialResultHandler; -use buck2_client_ctx::manifold::ManifoldClient; use buck2_client_ctx::subscribers::subscriber::EventSubscriber; -use buck2_common::result::SharedResult; +use buck2_client_ctx::subscribers::subscribers::EventSubscribers; +use buck2_common::manifold::ManifoldClient; use futures::future::BoxFuture; use futures::future::Shared; @@ -26,15 +26,16 @@ use crate::commands::rage::manifold::buf_to_manifold; use crate::commands::rage::MaterializerRageUploadData; pub async fn upload_materializer_data( - buckd: Shared>>, + buckd: Shared>>, client_context: &ClientContext, manifold: &ManifoldClient, manifold_id: &String, materializer_data: MaterializerRageUploadData, ) -> anyhow::Result { - let mut buckd = buckd - .await? - .with_subscribers(vec![Box::new(TracingSubscriber) as _]); + let mut buckd = + buckd.await?.with_subscribers(EventSubscribers::new( + vec![Box::new(TracingSubscriber) as _], + )); let mut capture = CaptureStdout::new(); @@ -46,6 +47,7 @@ pub async fn upload_materializer_data( serialized_opts: serde_json::to_string(&AuditCommand::DeferredMaterializer( DeferredMaterializerCommand { common_opts: Default::default(), + _target_cfg: Default::default(), subcommand: match materializer_data { MaterializerRageUploadData::State => { DeferredMaterializerSubcommand::List @@ -105,7 +107,7 @@ impl EventSubscriber for TracingSubscriber { Ok(()) } - async fn handle_error(&mut self, error: &anyhow::Error) -> anyhow::Result<()> { + async fn handle_error(&mut self, error: &buck2_error::Error) -> anyhow::Result<()> { tracing::info!("{:#}", error); Ok(()) } diff --git a/app/buck2_client/src/commands/rage/mod.rs b/app/buck2_client/src/commands/rage/mod.rs deleted file mode 100644 index 870e5243bf1c7..0000000000000 --- a/app/buck2_client/src/commands/rage/mod.rs +++ /dev/null @@ -1,732 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod build_info; -mod dice; -mod manifold; -mod materializer; -mod source_control; -mod system_info; -mod thread_dump; - -use std::collections::HashMap; -use std::fmt; -use std::future::Future; -use std::process::Stdio; -use std::time::Duration; -use std::time::SystemTime; - -use anyhow::Context; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::daemon::client::connect::BuckdProcessInfo; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::manifold::Bucket; -use buck2_client_ctx::manifold::ManifoldClient; -use buck2_client_ctx::stdin::Stdin; -use buck2_client_ctx::subscribers::event_log::file_names::do_find_log_by_trace_id; -use buck2_client_ctx::subscribers::event_log::file_names::get_local_logs; -use buck2_client_ctx::subscribers::event_log::read::EventLogPathBuf; -use buck2_client_ctx::subscribers::event_log::read::EventLogSummary; -use buck2_common::result::ToSharedResultExt; -use buck2_core::fs::paths::abs_norm_path::AbsNormPath; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_data::instant_event::Data; -use buck2_data::InstantEvent; -use buck2_data::RageResult; -use buck2_events::sink::scribe::new_thrift_scribe_sink_if_enabled; -use buck2_events::sink::scribe::ThriftScribeSink; -use buck2_events::BuckEvent; -use buck2_wrapper_common::invocation_id::TraceId; -use chrono::offset::Local; -use chrono::DateTime; -use derive_more::Display; -use dupe::Dupe; -use futures::future::FutureExt; -use futures::future::LocalBoxFuture; -use manifold::file_to_manifold; -use manifold::manifold_leads; -use maplit::convert_args; -use maplit::hashmap; -use serde::Serialize; -use thiserror::Error; -use tokio::io::AsyncBufRead; -use tokio::io::AsyncBufReadExt; -use tokio::io::AsyncWriteExt; -use tokio::io::BufReader; -use tokio::process::Command; - -use crate::commands::debug::upload_re_logs; - -#[derive(Debug, Error)] -enum RageError { - #[error("Failed to get a valid user selection")] - InvalidSelectionError, - #[error("Failed to find the logs for command")] - LogNotFoundError, - #[error("Pastry command timeout, make sure you are on Lighthouse/VPN")] - PastryTimeout, - #[error("Failed to spawn pastry")] - PastrySpawnError, - #[error("Error writing to pastry")] - PastryWriteError, - #[error("Error reading pastry output")] - PastryOutputError, - #[error("Pastry command failed with code '{0}' and error '{1}' ")] - PastryCommandError(i32, String), -} - -#[derive(Debug, clap::Parser)] -#[clap( - name = "rage", - about = "Record information about the previous failed buck2 command", - group = clap::ArgGroup::new("invocation").multiple(false) -)] -pub struct RageCommand { - /// Stop collecting information after `` seconds - #[clap(long, default_value = "120")] - timeout: u64, - /// Use value 0 to select last invocation, 1 to select second to last and so on - #[clap(long, group = "invocation")] - invocation_offset: Option, - /// Select invocation directly using the invocation's UUID - #[clap(long, group = "invocation")] - invocation_id: Option, - /// Collect rage report about buck2 in general, not about specific invocation - #[clap(long, group = "invocation")] - no_invocation: bool, - /// We may want to omit paste if this is not a user - /// or is called in a machine with no pastry command - #[clap(long)] - no_paste: bool, -} - -impl RageCommand { - pub fn exec(self, _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - buck2_core::facebook_only(); - - ctx.with_runtime(async move |ctx| { - self.exec_impl(ctx).await?; - ExitResult::success() - }) - } - - async fn exec_impl(self, mut ctx: ClientCommandContext<'_>) -> anyhow::Result<()> { - let paths = ctx.paths.as_ref().map_err(|e| e.dupe())?; - let daemon_dir = paths.daemon_dir()?; - let stderr_path = daemon_dir.buckd_stderr(); - let re_logs_dir = ctx.paths()?.re_logs_dir(); - let logdir = paths.log_dir(); - let dice_dump_dir = paths.dice_dump_dir(); - - let client_ctx = ctx.empty_client_context("rage")?; - - // Don't fail the rage if you can't figure out whether to do vpnless. - let manifold = ManifoldClient::new(ctx.allow_vpnless_for_logging().unwrap_or_default())?; - - let rage_id = TraceId::new(); - let mut manifold_id = format!("{}", rage_id); - let sink = create_scribe_sink(&ctx)?; - - buck2_client_ctx::eprintln!( - "Data collection will terminate after {} seconds (override with --timeout param)", - self.timeout - )?; - - // If there is a daemon, start connecting. - let info = BuckdProcessInfo::load(&daemon_dir).shared_error(); - - let buckd = match &info { - Ok(info) => async { - info.create_channel() - .await - .shared_error()? - .upgrade() - .await - .shared_error() - } - .boxed(), - Err(e) => futures::future::ready(Err(e.dupe())).boxed(), - } - .shared(); - - let selected_invocation = maybe_select_invocation(ctx.stdin(), &logdir, &self).await?; - let invocation_id = get_trace_id(&selected_invocation).await?; - if let Some(ref invocation_id) = invocation_id { - manifold_id = format!("{}_{}", invocation_id, manifold_id); - } - - buck2_client_ctx::eprintln!("Collecting debug info...")?; - - let thread_dump = self.section("Thread dump", || { - thread_dump::upload_thread_dump(&info, &manifold, &manifold_id) - }); - let build_info_command = self.skippable_section( - "Associated invocation info", - selected_invocation - .as_ref() - .map(|inv| || build_info::get(inv)), - ); - - let (thread_dump, build_info) = tokio::join!( - // Get thread dump before making any new connections to daemon (T159606309) - thread_dump, - // We need the RE session ID from here to upload RE logs - build_info_command - ); - - let system_info_command = self.section("System info", system_info::get); - let daemon_stderr_command = self.section("Daemon stderr", || { - upload_daemon_stderr(stderr_path, &manifold, &manifold_id) - }); - let hg_snapshot_id_command = self.section("Source control", source_control::get_info); - let dice_dump_command = self.section("Dice dump", || async { - dice::upload_dice_dump(buckd.clone().await?, dice_dump_dir, &manifold, &manifold_id) - .await - }); - let materializer_state = self.section("Materializer state", || { - materializer::upload_materializer_data( - buckd.clone(), - &client_ctx, - &manifold, - &manifold_id, - MaterializerRageUploadData::State, - ) - }); - let materializer_fsck = self.section("Materializer fsck", || { - materializer::upload_materializer_data( - buckd.clone(), - &client_ctx, - &manifold, - &manifold_id, - MaterializerRageUploadData::Fsck, - ) - }); - let event_log_command = self.skippable_section( - "Event log upload", - selected_invocation - .as_ref() - .map(|path| || upload_event_logs(path, &manifold, &manifold_id)), - ); - - let re_logs_command = self.skippable_section( - "RE logs upload", - build_info - .get_field(|o| o.re_session_id.clone()) - .map(|id| || upload_re_logs_impl(&manifold, &re_logs_dir, id)), - ); - - let ( - system_info, - daemon_stderr_dump, - hg_snapshot_id, - dice_dump, - materializer_state, - materializer_fsck, - event_log_dump, - re_logs, - ) = tokio::join!( - system_info_command, - daemon_stderr_command, - hg_snapshot_id_command, - dice_dump_command, - materializer_state, - materializer_fsck, - event_log_command, - re_logs_command - ); - let sections = vec![ - build_info.to_string(), - system_info.to_string(), - daemon_stderr_dump.to_string(), - hg_snapshot_id.to_string(), - dice_dump.to_string(), - materializer_state.to_string(), - materializer_fsck.to_string(), - thread_dump.to_string(), - event_log_dump.to_string(), - re_logs.to_string(), - ]; - output_rage(self.no_paste, §ions.join("")).await?; - - self.send_to_scuba( - sink, - invocation_id, - system_info, - daemon_stderr_dump, - hg_snapshot_id, - dice_dump, - materializer_state, - materializer_fsck, - thread_dump, - event_log_dump, - build_info, - re_logs, - ) - .await?; - Ok(()) - } - - async fn send_to_scuba( - &self, - sink: Option, - invocation_id: Option, - system_info: RageSection, - daemon_stderr_dump: RageSection, - hg_snapshot_id: RageSection, - dice_dump: RageSection, - materializer_state: RageSection, - materializer_fsck: RageSection, - thread_dump: RageSection, - event_log_dump: RageSection, - build_info: RageSection, - re_logs: RageSection, - ) -> anyhow::Result<()> { - let mut string_data = convert_args!( - keys = String::from, - hashmap! ( - "dice_dump" => dice_dump.output(), - "materializer_state" => materializer_state.output(), - "materializer_fsck" => materializer_fsck.output(), - "thread_dump" => thread_dump.output(), - "daemon_stderr_dump" => daemon_stderr_dump.output(), - "hg_snapshot_id" => hg_snapshot_id.output(), - "invocation_id" => invocation_id.clone().map(|inv| inv.to_string()).unwrap_or_default(), - "event_log_dump" => event_log_dump.output(), - "re_logs" => re_logs.output(), - ) - ); - - let command = build_info.get_field(|o| Some(o.command.to_owned())); - let buck2_revision = build_info.get_field(|o| Some(o.buck2_revision.to_owned())); - let username = system_info.get_field(|o| o.username.to_owned()); - let hostname = system_info.get_field(|o| o.hostname.to_owned()); - let os = system_info.get_field(|o| Some(o.os.to_owned())); - let os_version = system_info.get_field(|o| o.os_version.to_owned()); - - insert_if_some(&mut string_data, "command", command); - insert_if_some(&mut string_data, "buck2_revision", buck2_revision); - insert_if_some(&mut string_data, "username", username); - insert_if_some(&mut string_data, "hostname", hostname); - insert_if_some(&mut string_data, "os", os); - insert_if_some(&mut string_data, "os_version", os_version); - - let mut int_data = HashMap::new(); - let daemon_uptime_s = build_info.get_field(|o| o.daemon_uptime_s); - insert_if_some(&mut int_data, "daemon_uptime_s", daemon_uptime_s); - - let timestamp = build_info.get_field(|o| Some(SystemTime::from(o.timestamp).into())); - let command_duration = build_info.get_field(|o| { - Some(prost_types::Duration { - seconds: o.command_duration?.as_secs() as i64, - nanos: o.command_duration?.subsec_nanos() as i32, - }) - }); - - // We store in Ent via Ingress that rage was run for specific invocation - if let Some(invocation_id) = invocation_id { - dispatch_result_event( - sink.as_ref(), - &invocation_id, - RageResult { - string_data, - int_data, - timestamp, - command_duration, - }, - ) - .await?; - } - Ok(()) - } - - fn section<'a, Fut, T>( - &'a self, - title: &'a str, - command: impl FnOnce() -> Fut, - ) -> LocalBoxFuture> - where - Fut: Future> + 'a, - T: std::fmt::Display + 'a, - { - let timeout = Duration::from_secs(self.timeout); - RageSection::get(title, timeout, command) - } - - fn skippable_section<'a, Fut, T>( - &'a self, - title: &'a str, - command: Option Fut>, - ) -> LocalBoxFuture> - where - Fut: Future> + 'a, - T: std::fmt::Display + 'a, - { - let timeout = Duration::from_secs(self.timeout); - RageSection::get_skippable(title, timeout, command) - } - - pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { - argv.no_need_to_sanitize() - } -} - -#[derive(Debug, PartialEq, Serialize)] -struct RageSection { - title: String, - status: CommandStatus, -} - -#[derive(Display)] -pub enum MaterializerRageUploadData { - #[display(fmt = "state")] - State, - #[display(fmt = "fsck")] - Fsck, -} - -#[derive(Debug, PartialEq, Serialize)] -enum CommandStatus { - Success { output: T }, - Failure { error: String }, - Timeout, - Skipped, -} - -impl<'a, T> RageSection -where - T: std::fmt::Display + 'a, -{ - fn get( - title: &str, - timeout: Duration, - command: impl FnOnce() -> Fut, - ) -> LocalBoxFuture<'a, Self> - where - Fut: Future> + 'a, - { - let fut = command(); - let title = title.to_owned(); - async move { - let status = match tokio::time::timeout(timeout, fut).await { - Err(_) => CommandStatus::Timeout, - Ok(Ok(output)) => CommandStatus::Success { output }, - Ok(Err(e)) => CommandStatus::Failure { - error: format!("Error: {:?}", e), - }, - }; - RageSection { title, status } - } - .boxed_local() - } - - fn get_skippable( - title: &str, - timeout: Duration, - command: Option Fut>, - ) -> LocalBoxFuture<'a, Self> - where - Fut: Future> + 'a, - { - if let Some(command) = command { - Self::get(title, timeout, command) - } else { - let status = CommandStatus::Skipped; - let title = title.to_owned(); - async { RageSection { title, status } }.boxed_local() - } - } - - fn output(&self) -> String { - match &self.status { - CommandStatus::Success { output } => output.to_string(), - CommandStatus::Failure { error } => error.to_owned(), - CommandStatus::Timeout {} => "Timeout".to_owned(), - CommandStatus::Skipped {} => "Skipped".to_owned(), - } - } - - fn get_field(&self, extract_field: impl FnOnce(&T) -> Option) -> Option { - match &self.status { - CommandStatus::Success { output } => extract_field(output), - _ => None, - } - } - - fn pretty_print_section( - &self, - f: &mut fmt::Formatter, - content: String, - ) -> Result<(), std::fmt::Error> { - let content_divider = "-".repeat(30); - write!( - f, - "{title}\n{content_divider}\n{content}\n\n\n", - title = self.title - ) - } -} - -impl fmt::Display for RageSection -where - T: std::fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.pretty_print_section(f, self.output()) - } -} - -fn insert_if_some(data: &mut HashMap, key: &str, value: Option) { - if let Some(value) = value { - data.insert(key.to_owned(), value); - } -} - -async fn upload_daemon_stderr( - path: AbsNormPathBuf, - manifold: &ManifoldClient, - manifold_id: &str, -) -> anyhow::Result { - file_to_manifold(manifold, &path, format!("flat/{}.stderr", manifold_id)).await -} - -async fn upload_event_logs( - path: &EventLogPathBuf, - manifold: &ManifoldClient, - manifold_id: &str, -) -> anyhow::Result { - let filename = format!("flat/{}-event_log{}", manifold_id, path.extension()); - file_to_manifold(manifold, path.path(), filename).await -} - -async fn upload_re_logs_impl( - manifold: &ManifoldClient, - re_logs_dir: &AbsNormPath, - re_session_id: String, -) -> anyhow::Result { - let bucket = Bucket::RAGE_DUMPS; - let filename = format!("flat/{}-re_logs.zst", &re_session_id); - upload_re_logs::upload_re_logs(manifold, bucket, re_logs_dir, &re_session_id, &filename) - .await?; - - Ok(manifold_leads(&bucket, filename)) -} - -async fn dispatch_result_event( - sink: Option<&ThriftScribeSink>, - rage_id: &TraceId, - result: RageResult, -) -> anyhow::Result<()> { - let data = Some(Data::RageResult(result)); - dispatch_event_to_scribe(sink, rage_id, InstantEvent { data }).await?; - Ok(()) -} - -async fn dispatch_event_to_scribe( - sink: Option<&ThriftScribeSink>, - trace_id: &TraceId, - event: InstantEvent, -) -> anyhow::Result<()> { - if let Some(sink) = sink { - sink.send_now(BuckEvent::new( - SystemTime::now(), - trace_id.to_owned(), - None, - None, - event.into(), - )) - .await; - } else { - tracing::warn!( - "Couldn't send rage results to scribe, rage ID `{}`", - trace_id - ) - }; - Ok(()) -} - -#[allow(unused_variables)] // Conditional compilation -fn create_scribe_sink(ctx: &ClientCommandContext) -> anyhow::Result> { - // TODO(swgiillespie) scribe_logging is likely the right feature for this, but we should be able to inject a sink - // without using configurations at the call site - new_thrift_scribe_sink_if_enabled( - ctx.fbinit(), - /* buffer size */ 100, - /* retry_backoff */ Duration::from_millis(500), - /* retry_attempts */ 5, - /* message_batch_size */ None, - ) -} - -async fn maybe_select_invocation( - stdin: &mut Stdin, - logdir: &AbsNormPathBuf, - command: &RageCommand, -) -> anyhow::Result> { - if command.no_invocation { - return Ok(None); - }; - - if let Some(trace_id) = &command.invocation_id { - return Ok(Some(do_find_log_by_trace_id(logdir, trace_id)?)); - } - - let logs = get_local_logs(logdir)?; - let mut logs = logs - .into_iter() - .rev() // newest first - .collect::>(); - if logs.is_empty() { - return Ok(None); - } - let index = log_index(stdin, &logs, command.invocation_offset).await?; - if index >= logs.len() { - return Err(RageError::LogNotFoundError.into()); - } - Ok(Some(logs.swap_remove(index))) -} - -async fn log_index( - stdin: &mut Stdin, - logs: &[EventLogPathBuf], - invocation_offset: Option, -) -> Result { - let index = match invocation_offset { - Some(i) => i, - None => { - let mut stdin = BufReader::new(stdin); - user_prompt_select_log(&mut stdin, logs).await? - } - }; - Ok(index) -} - -async fn user_prompt_select_log<'a>( - stdin: impl AsyncBufRead + Unpin, - logs: &'a [EventLogPathBuf], -) -> anyhow::Result { - buck2_client_ctx::eprintln!("Which buck invocation would you like to report?\n")?; - let logs_summary = futures::future::join_all( - logs.iter() - .map(async move |log_path| log_path.get_summary().await.ok()), - ) - .await; - for (index, log_summary) in logs_summary.iter().enumerate() { - print_log_summary(index, log_summary)?; - } - buck2_client_ctx::eprintln!()?; - let prompt = format!( - "Invocation: (type a number between 0 and {}) ", - logs_summary.len() - 1 - ); - let selection = get_user_selection(stdin, &prompt, |i| i < logs_summary.len()).await?; - - buck2_client_ctx::eprintln!("Selected invocation {}\n", selection)?; - Ok(selection) -} - -async fn get_user_selection

( - mut stdin: impl AsyncBufRead + Unpin, - prompt: &str, - predicate: P, -) -> anyhow::Result -where - P: Fn(usize) -> bool, -{ - buck2_client_ctx::eprint!("{}", prompt)?; - - let mut input = String::new(); - stdin.read_line(&mut input).await?; - - match input.trim().parse() { - Ok(selection) if predicate(selection) => Ok(selection), - _ => Err(RageError::InvalidSelectionError.into()), - } -} - -fn print_log_summary(index: usize, log_summary: &Option) -> anyhow::Result<()> { - if let Some(log_summary) = log_summary { - let cmd = build_info::format_cmd(&log_summary.invocation); - - let timestamp: DateTime = log_summary.timestamp.into(); - buck2_client_ctx::eprintln!( - "{:<7} {} {}", - format!("[{}].", index), - timestamp.format("%c %Z"), - cmd - ) - } else { - buck2_client_ctx::eprintln!( - "{:<7} <>", - format!("[{}].", index), - ) - } -} - -async fn output_rage(no_paste: bool, output: &str) -> anyhow::Result<()> { - if no_paste { - buck2_client_ctx::println!("{}", output)?; - } else { - let paste = generate_paste("Buck2 Rage", output).await?; - buck2_client_ctx::eprintln!( - "\nPlease post in https://fb.workplace.com/groups/buck2users with the following link:\n\n{}\n", - paste - )?; - }; - Ok(()) -} - -async fn generate_paste(title: &str, content: &str) -> anyhow::Result { - let mut pastry = Command::new("pastry") - .args(["--title", title]) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .kill_on_drop(true) - .spawn() - .context(RageError::PastrySpawnError)?; - let mut stdin = pastry.stdin.take().expect("Stdin should open"); - - let writer = async move { - stdin - .write_all(content.as_bytes()) - .await - .context(RageError::PastryWriteError) - }; - - let reader = async move { - let output = tokio::time::timeout(Duration::from_secs(10), pastry.wait_with_output()) - .await - .context(RageError::PastryTimeout)? - .context(RageError::PastryOutputError)?; - if !output.status.success() { - let error = String::from_utf8_lossy(&output.stderr).to_string(); - let code = output - .status - .code() - .ok_or_else(|| RageError::PastryCommandError(1, error.clone()))?; - return Err(RageError::PastryCommandError(code, error).into()); - } - let output = String::from_utf8(output.stdout).context(RageError::PastryOutputError)?; - Ok(output) - }; - - let ((), paste) = futures::future::try_join(writer, reader).await?; - - Ok(paste) -} - -async fn get_trace_id(invocation: &Option) -> anyhow::Result> { - let invocation_id = match invocation { - None => None, - Some(invocation) => Some(invocation.uuid_from_filename()?), - }; - Ok(invocation_id) -} diff --git a/app/buck2_client/src/commands/rage/source_control.rs b/app/buck2_client/src/commands/rage/source_control.rs index 2a9f47ad54758..a8baa901fdc6b 100644 --- a/app/buck2_client/src/commands/rage/source_control.rs +++ b/app/buck2_client/src/commands/rage/source_control.rs @@ -8,10 +8,9 @@ */ use anyhow::Context; -use thiserror::Error; -use tokio::process::Command; +use buck2_util::process::async_background_command; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum SourceControlError { #[error("HG command failed with code '{0}' and error '{1}' ")] HgCommand(i32, String), @@ -41,25 +40,45 @@ pub async fn get_info() -> anyhow::Result { } async fn get_hg_info() -> anyhow::Result { - let result = Command::new("hg") + let result = async_background_command("hg") .args(["snapshot", "create"]) .env("HGPLAIN", "1") .output() .await?; - if result.status.success() { + if !result.status.success() { + let error = from_utf8(result.stderr, "hg snapshot stderr")?; + if error.contains("is not inside a repository") { + return Ok(CommandResult::RepoNotFound); + }; + // On Unix, `code()` will return `None` if the process was terminated by a signal. + let code = result.status.code().unwrap_or(1); + return Err(SourceControlError::HgCommand(code, error).into()); + }; + let snapshot = { let output = from_utf8(result.stdout, "hg snapshot stdout")?; - return Ok(CommandResult::Ok(format!("hg snapshot update {}", output))); + format!("hg snapshot update {}", output) + }; + + let result = async_background_command("hg") + .arg("whereami") + .output() + .await?; + if !result.status.success() { + let error = from_utf8(result.stderr, "hg whereami stderr")?; + // On Unix, `code()` will return `None` if the process was terminated by a signal. + let code = result.status.code().unwrap_or(1); + return Err(SourceControlError::HgCommand(code, error).into()); }; - let error = from_utf8(result.stderr, "hg snapshot stderr")?; - if error.contains("is not inside a repository") { - return Ok(CommandResult::RepoNotFound); + let revision = { + let output = from_utf8(result.stdout, "hg whereami stdout")?; + format!("hg revision: {}", output) }; - let code = result.status.code().unwrap_or(1); - Err(SourceControlError::HgCommand(code, error).into()) + + Ok(CommandResult::Ok(format!("{}{}", revision, snapshot))) } async fn get_git_info() -> anyhow::Result { - let commit_hash = Command::new("git") + let commit_hash = async_background_command("git") .args(["log", "-1", "--format=%H"]) .output() .await?; @@ -72,7 +91,10 @@ async fn get_git_info() -> anyhow::Result { return Err(SourceControlError::GitCommand(code, error).into()); }; - let status = Command::new("git").args(["status", "-sb"]).output().await?; + let status = async_background_command("git") + .args(["status", "-sb"]) + .output() + .await?; if !status.status.success() { let error = from_utf8(status.stderr, "git status stderr")?; let code = status.status.code().unwrap_or(1); diff --git a/app/buck2_client/src/commands/rage/thread_dump.rs b/app/buck2_client/src/commands/rage/thread_dump.rs index dfe96da3861a9..3f79863877d8c 100644 --- a/app/buck2_client/src/commands/rage/thread_dump.rs +++ b/app/buck2_client/src/commands/rage/thread_dump.rs @@ -9,25 +9,32 @@ use anyhow::Context; use buck2_client_ctx::daemon::client::connect::BuckdProcessInfo; -use buck2_client_ctx::manifold::ManifoldClient; -use buck2_common::result::SharedResult; +use buck2_common::manifold::ManifoldClient; use buck2_util::process::async_background_command; use crate::commands::rage::manifold::buf_to_manifold; -pub async fn upload_thread_dump( - buckd: &SharedResult>, - manifold: &ManifoldClient, - manifold_id: &String, -) -> anyhow::Result { - let buckd_pid = buckd.as_ref().map_err(|e| e.clone())?.pid(); - let command = async_background_command("lldb") - .arg("-p") - .arg(buckd_pid.to_string()) +pub(crate) fn thread_dump_command( + buckd: &BuckdProcessInfo<'_>, +) -> anyhow::Result { + let pid = buckd.pid()?; + let mut cmd = async_background_command("lldb"); + cmd.arg("-p") + .arg(pid.to_string()) .arg("--batch") .arg("-o") .arg("thread backtrace all") - .stdin(std::process::Stdio::null()) + .stdin(std::process::Stdio::null()); + Ok(cmd) +} + +pub(crate) async fn upload_thread_dump( + buckd: &buck2_error::Result>, + manifold: &ManifoldClient, + manifold_id: &String, +) -> anyhow::Result { + let buckd = buckd.as_ref().map_err(|e| e.clone())?; + let command = thread_dump_command(buckd)? .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) .kill_on_drop(true) diff --git a/app/buck2_client/src/commands/root.rs b/app/buck2_client/src/commands/root.rs index 567b32753b050..0bbfbe6ceb799 100644 --- a/app/buck2_client/src/commands/root.rs +++ b/app/buck2_client/src/commands/root.rs @@ -9,14 +9,13 @@ use std::str::FromStr; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::path_arg::PathArg; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use buck2_common::invocation_roots::find_invocation_roots; -use thiserror::Error; -#[derive(Debug)] +#[derive(Debug, Clone, clap::ValueEnum)] enum RootKind { Package, Cell, @@ -40,7 +39,13 @@ impl FromStr for RootKind { #[derive(Debug, clap::Parser)] #[clap(about = "Find buck cell, project or package root")] pub struct RootCommand { - #[clap(short, long, help("which root to print"), default_value("cell"), possible_values(&["package", "cell", "project", "daemon"]))] + #[clap( + short, + long, + help("which root to print"), + default_value("cell"), + value_enum + )] kind: RootKind, #[clap( help( @@ -52,7 +57,7 @@ pub struct RootCommand { dir: Option, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum RootError { #[error("Finding package root isn't yet implemented.")] PackageRootUnimplemented, diff --git a/app/buck2_client/src/commands/run.rs b/app/buck2_client/src/commands/run.rs index 9916a4d194dd9..177a03b2d94e7 100644 --- a/app/buck2_client/src/commands/run.rs +++ b/app/buck2_client/src/commands/run.rs @@ -17,24 +17,25 @@ use buck2_cli_proto::build_request::build_providers; use buck2_cli_proto::build_request::BuildProviders; use buck2_cli_proto::build_request::Materializations; use buck2_cli_proto::BuildRequest; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_client_ctx::common::build::CommonBuildOptions; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonBuildOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::path_arg::PathArg; use buck2_client_ctx::streaming::StreamingCommand; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use buck2_wrapper_common::BUCK2_WRAPPER_ENV_VAR; use buck2_wrapper_common::BUCK_WRAPPER_UUID_ENV_VAR; use serde::Serialize; -use thiserror::Error; use crate::commands::build::print_build_failed; use crate::commands::build::print_build_result; @@ -45,17 +46,8 @@ use crate::commands::build::print_build_succeeded; /// The Build ID for the underlying build execution is made available to the target in /// the `BUCK_RUN_BUILD_ID` environment variable. #[derive(Debug, clap::Parser)] -#[clap( - name = "run", - setting = clap::AppSettings::TrailingVarArg -)] +#[clap(name = "run", trailing_var_arg = true)] pub struct RunCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - - #[clap(flatten)] - build_opts: CommonBuildOptions, - #[clap( long = "command-args-file", help = "Write the command to a file instead of executing it.", @@ -83,6 +75,15 @@ pub struct RunCommand { help = "Additional arguments passed to the target when running it" )] extra_run_args: Vec, + + #[clap(flatten)] + build_opts: CommonBuildOptions, + + #[clap(flatten)] + target_cfg: TargetCfgOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -103,9 +104,8 @@ impl StreamingCommand for RunCommand { BuildRequest { context: Some(context), // TODO(wendyy): glob patterns should be prohibited, and command should fail before the build event happens. - target_patterns: vec![buck2_data::TargetPattern { - value: self.target.clone(), - }], + target_patterns: vec![self.target.clone()], + target_cfg: Some(self.target_cfg.target_cfg()), build_providers: Some(BuildProviders { default_info: build_providers::Action::Skip as i32, run_info: build_providers::Action::Build as i32, @@ -115,6 +115,7 @@ impl StreamingCommand for RunCommand { build_opts: Some(self.build_opts.to_proto()), final_artifact_materializations: Materializations::Materialize as i32, target_universe: Vec::new(), + output_hashes_file: None, }, ctx.stdin() .console_interaction_stream(&self.common_opts.console_opts), @@ -124,7 +125,7 @@ impl StreamingCommand for RunCommand { let console = self.common_opts.console_opts.final_console(); let success = match &response { - Ok(CommandOutcome::Success(response)) => response.error_messages.is_empty(), + Ok(CommandOutcome::Success(response)) => response.errors.is_empty(), Ok(CommandOutcome::Failure(_)) => false, Err(_) => false, }; @@ -132,10 +133,10 @@ impl StreamingCommand for RunCommand { print_build_failed(&console)?; } let response = response??; - print_build_result(&console, &response.error_messages)?; + print_build_result(&console, &response.errors)?; if !success { - return ExitResult::failure(); + return ExitResult::from_errors(&response.errors); } if response.build_targets.len() > 1 { @@ -180,7 +181,10 @@ impl StreamingCommand for RunCommand { if self.emit_shell { if cfg!(unix) { - buck2_client_ctx::println!("{}", shlex::join(run_args.iter().map(|a| a.as_str())))?; + buck2_client_ctx::println!( + "{}", + shlex::try_join(run_args.iter().map(|a| a.as_str()))? + )?; return ExitResult::success(); } else { return ExitResult::err(RunCommandError::EmitShellNotSupportedOnWindows.into()); @@ -190,8 +194,8 @@ impl StreamingCommand for RunCommand { let chdir = self.chdir.map(|chdir| chdir.resolve(&ctx.working_dir)); ExitResult::exec( - run_args[0].clone(), - run_args, + run_args[0].clone().into(), + run_args.into_iter().map(|arg| arg.into()).collect(), chdir, vec![("BUCK_RUN_BUILD_ID".to_owned(), ctx.trace_id.to_string())], ) @@ -201,14 +205,18 @@ impl StreamingCommand for RunCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } + fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { let Argv { argv, @@ -239,7 +247,7 @@ struct CommandArgsFile { print_command: bool, } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum RunCommandError { #[error("Target `{0}` is not a binary rule (only binary rules can be `run`)")] NonBinaryRule(String), diff --git a/app/buck2_client/src/commands/server.rs b/app/buck2_client/src/commands/server.rs index 5399ff7ef2987..77d098a516589 100644 --- a/app/buck2_client/src/commands/server.rs +++ b/app/buck2_client/src/commands/server.rs @@ -9,9 +9,10 @@ use async_trait::async_trait; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::streaming::StreamingCommand; @@ -39,11 +40,15 @@ impl StreamingCommand for ServerCommand { CommonConsoleOptions::simple_ref() } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - CommonDaemonCommandOptions::default_ref() + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { CommonBuildConfigurationOptions::default_ref() } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } } diff --git a/app/buck2_client/src/commands/status.rs b/app/buck2_client/src/commands/status.rs index 5d44f5bd98ea4..af3f304e29f8b 100644 --- a/app/buck2_client/src/commands/status.rs +++ b/app/buck2_client/src/commands/status.rs @@ -11,18 +11,19 @@ use std::time::Duration; use anyhow::Context; use buck2_cli_proto::StatusResponse; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; use buck2_client_ctx::client_ctx::ClientCommandContext; use buck2_client_ctx::daemon::client::connect::establish_connection_existing; use buck2_client_ctx::daemon::client::connect::BuckdConnectOptions; use buck2_client_ctx::subscribers::stdout_stderr_forwarder::StdoutStderrForwarder; +use buck2_client_ctx::subscribers::subscribers::EventSubscribers; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use buck2_common::daemon_dir::DaemonDir; -use chrono::NaiveDateTime; +use chrono::DateTime; use humantime::format_duration; use walkdir::WalkDir; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum StatusError { #[error("Incorrect seconds/nanos argument")] NativeDateTime, @@ -43,7 +44,7 @@ impl StatusCommand { _matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>, ) -> anyhow::Result<()> { - ctx.with_runtime(async move |ctx| { + ctx.with_runtime(|ctx| async move { if self.all { let mut daemon_dirs = Vec::new(); let root = ctx.paths()?.roots.common_buckd_dir()?; @@ -66,7 +67,9 @@ impl StatusCommand { if let Ok(bootstrap_client) = establish_connection_existing(&dir).await { statuses.push(process_status( bootstrap_client - .with_subscribers(vec![Box::new(StdoutStderrForwarder)]) + .with_subscribers(EventSubscribers::new(vec![Box::new( + StdoutStderrForwarder, + )])) .with_flushing() .status(self.snapshot) .await?, @@ -105,7 +108,7 @@ impl StatusCommand { } fn timestamp_to_string(seconds: u64, nanos: u32) -> anyhow::Result { - Ok(NaiveDateTime::from_timestamp_opt(seconds as i64, nanos) + Ok(DateTime::from_timestamp(seconds as i64, nanos) .context(StatusError::NativeDateTime)? .format("%Y-%m-%dT%H:%M:%SZ") .to_string()) @@ -129,7 +132,7 @@ fn process_status(status: StatusResponse) -> anyhow::Result { } }; - Ok(serde_json::json!({ + let mut value = serde_json::json!({ "start_time": timestamp, "uptime": uptime, "process_info": serde_json::to_value(status.process_info)?, @@ -139,7 +142,19 @@ fn process_status(status: StatusResponse) -> anyhow::Result { "isolation_dir": status.isolation_dir, "forkserver_pid": serde_json::to_value(status.forkserver_pid)?, "supports_vpnless": status.supports_vpnless.unwrap_or_default(), - })) + "http2": status.http2, + "io_provider": status.io_provider, + }); + + if let Some(valid_working_directory) = status.valid_working_directory { + value["valid_working_directory"] = serde_json::to_value(valid_working_directory)?; + } + + if let Some(valid_buck_out_mount) = status.valid_buck_out_mount { + value["valid_buck_out_mount"] = serde_json::to_value(valid_buck_out_mount)?; + } + + Ok(value) } #[cfg(test)] diff --git a/app/buck2_client/src/commands/subscribe.rs b/app/buck2_client/src/commands/subscribe.rs index add3943a11fd3..9847688c4f405 100644 --- a/app/buck2_client/src/commands/subscribe.rs +++ b/app/buck2_client/src/commands/subscribe.rs @@ -11,13 +11,15 @@ use anyhow::Context as _; use async_trait::async_trait; use buck2_cli_proto::protobuf_util::ProtobufSplitter; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::ui::ConsoleType; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; -use buck2_client_ctx::common::ConsoleType; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::events_ctx::PartialResultCtx; use buck2_client_ctx::events_ctx::PartialResultHandler; +use buck2_client_ctx::exit_result::ExitCode; use buck2_client_ctx::exit_result::ExitResult; use buck2_client_ctx::stream_util::reborrow_stream_for_static; use buck2_client_ctx::streaming::StreamingCommand; @@ -44,12 +46,6 @@ use tokio_util::codec::FramedRead; #[derive(Debug, clap::Parser)] #[clap(about = "Subscribe to updates from the Buck2 daemon")] pub struct SubscribeCommand { - #[clap(flatten)] - config_opts: CommonBuildConfigurationOptions, - - #[clap(flatten)] - event_log_opts: CommonDaemonCommandOptions, - /// Whether to request command snapshots. #[clap(long)] active_commands: bool, @@ -58,6 +54,15 @@ pub struct SubscribeCommand { /// used for debugging. #[clap(long)] unstable_json: bool, + + #[clap(flatten)] + config_opts: CommonBuildConfigurationOptions, + + #[clap(flatten)] + starlark_opts: CommonStarlarkOptions, + + #[clap(flatten)] + event_log_opts: CommonEventLogOptions, } #[async_trait] @@ -145,7 +150,9 @@ impl StreamingCommand for SubscribeCommand { if partial_result_handler.ok { ExitResult::success() } else { - ExitResult::failure() + // FIXME(JakobDegen): This command should propagate some error information back from the + // server so that we can do error handling here. + ExitResult::status(ExitCode::UnknownFailure) } } @@ -160,14 +167,18 @@ impl StreamingCommand for SubscribeCommand { &SIMPLE_CONSOLE } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.config_opts } + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.starlark_opts + } + fn should_expect_spans(&self) -> bool { // It's normal to get no open spans for this command false diff --git a/app/buck2_client/src/commands/targets.rs b/app/buck2_client/src/commands/targets.rs index fdc5b47eb5596..cbd71f7676f19 100644 --- a/app/buck2_client/src/commands/targets.rs +++ b/app/buck2_client/src/commands/targets.rs @@ -12,10 +12,14 @@ use buck2_cli_proto::targets_request; use buck2_cli_proto::targets_request::OutputFormat; use buck2_cli_proto::TargetsRequest; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::build::CommonOutputOptions; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::common::PrintOutputsFormat; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; @@ -28,7 +32,9 @@ use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use dupe::Dupe; use gazebo::prelude::*; -#[derive(thiserror::Error, Debug)] +use crate::print::PrintOutputs; + +#[derive(buck2_error::Error, Debug)] enum TargetsError { /// Clap should report it, but if we missed something, this is a fallback. #[error("Flags are mutually exclusive")] @@ -37,7 +43,7 @@ enum TargetsError { // Use non-camel case so the possible values match buck1's #[allow(non_camel_case_types)] -#[derive(Debug, Clone, Dupe, clap::ArgEnum)] +#[derive(Debug, Clone, Dupe, clap::ValueEnum)] #[clap(rename_all = "snake_case")] enum TargetHashFileMode { PathsOnly, @@ -45,7 +51,19 @@ enum TargetHashFileMode { None, } -#[derive(Debug, clap::ArgEnum, Clone, Dupe)] +impl TargetHashFileMode { + fn to_proto(&self) -> targets_request::TargetHashFileMode { + match self { + TargetHashFileMode::PathsOnly => targets_request::TargetHashFileMode::PathsOnly, + TargetHashFileMode::PathsAndContents => { + targets_request::TargetHashFileMode::PathsAndContents + } + TargetHashFileMode::None => targets_request::TargetHashFileMode::NoFiles, + } + } +} + +#[derive(Debug, clap::ValueEnum, Clone, Dupe)] enum TargetHashGraphType { None, Unconfigured, @@ -56,7 +74,7 @@ enum TargetHashGraphType { /// Possible values for the --target-hash-function arg. We don't actually /// honor the specific algorithms, we use them as a hint to pick "fast" or "strong". #[allow(non_camel_case_types)] -#[derive(Debug, clap::ArgEnum, Clone, Dupe)] +#[derive(Debug, clap::ValueEnum, Clone, Dupe)] enum TargetHashFunction { Sha1, Sha256, @@ -65,6 +83,23 @@ enum TargetHashFunction { Strong, } +#[derive(Debug, clap::ValueEnum, Clone, Dupe)] +enum Compression { + None, + Gzip, + Zstd, +} + +impl Compression { + fn to_proto(&self) -> buck2_cli_proto::targets_request::Compression { + match self { + Compression::None => buck2_cli_proto::targets_request::Compression::Uncompressed, + Compression::Gzip => buck2_cli_proto::targets_request::Compression::Gzip, + Compression::Zstd => buck2_cli_proto::targets_request::Compression::Zstd, + } + } +} + /// Show details about the specified targets. /// /// This command is meant to only handle unconfigured targets, @@ -72,9 +107,6 @@ enum TargetHashFunction { #[derive(Debug, clap::Parser)] #[clap(name = "utargets")] pub struct TargetsCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - /// Print targets as JSON #[clap(long)] json: bool, @@ -96,7 +128,7 @@ pub struct TargetsCommand { show_target_hash: bool, /// Print a stable unconfigured hash of each target after the target name. - #[clap(long, conflicts_with = "show-target-hash")] + #[clap(long, conflicts_with = "show_target_hash")] show_unconfigured_target_hash: bool, /// Modifies computation of target hashes. If set to `PATHS_AND_CONTENTS` (the default), the contents @@ -105,7 +137,7 @@ pub struct TargetsCommand { /// See also --target-hash-modified-paths. #[clap( long, - arg_enum, + value_enum, ignore_case = true, default_value = "paths_and_contents", conflicts_with = "streaming" @@ -116,14 +148,14 @@ pub struct TargetsCommand { /// `PATHS_ONLY`. If a target or its dependencies reference a file from this set, the target's hash /// will be different than if this option was omitted. Otherwise, the target's hash will be the same /// as if this option was omitted. - #[clap(long, multiple_values = true, conflicts_with = "streaming")] + #[clap(long, num_args=1.., conflicts_with = "streaming")] target_hash_modified_paths: Vec, /// Selects either the "fast" or the "strong" target hash function to be used for computing target hashes. /// While we don't specify the exact algorithm, the "strong" algorithm should be a reasonable cryptographic /// hash (ex. blake3) while the "fast" function will likely be a non-crypto hash. Both functions are /// guaranteed to be deterministic and to have the same value across different platforms/architectures. - #[clap(long, ignore_case = true, default_value = "fast", arg_enum)] + #[clap(long, ignore_case = true, default_value = "fast", value_enum)] target_hash_function: TargetHashFunction, /// When true, emit the hash or target node and all dependencies recursively. @@ -139,13 +171,8 @@ pub struct TargetsCommand { #[clap(long)] include_defaults: bool, - /// Print the path to the output for each of the rules relative to the cell - #[clap(long)] - show_output: bool, - - /// Print the absolute path to the output for each of the rules relative to the cell - #[clap(long)] - show_full_output: bool, + #[clap(flatten)] + show_output: CommonOutputOptions, /// On loading errors, put buck.error in the output stream and continue #[clap(long)] @@ -184,6 +211,15 @@ pub struct TargetsCommand { #[clap(long, short = 'o', value_name = "PATH")] output: Option, + /// Compress the output. + #[clap( + long, + default_value = "none", + value_name = "SCHEME", + requires = "output" + )] + compression: Compression, + /// Patterns to interpret #[clap(name = "TARGET_PATTERNS")] patterns: Vec, @@ -191,6 +227,12 @@ pub struct TargetsCommand { /// Number of threads to use during execution (default is # cores) #[clap(short = 'j', long = "num-threads", value_name = "THREADS")] pub num_threads: Option, + + #[clap(flatten)] + target_cfg: TargetCfgOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } impl TargetsCommand { @@ -286,26 +328,14 @@ impl StreamingCommand for TargetsCommand { let target_request = TargetsRequest { context, - target_patterns: self.patterns.map(|pat| buck2_data::TargetPattern { - value: pat.to_owned(), - }), + target_patterns: self.patterns, output_format: output_format as i32, targets: Some(if self.resolve_alias { targets_request::Targets::ResolveAlias(targets_request::ResolveAlias {}) } else { targets_request::Targets::Other(targets_request::Other { output_attributes, - target_hash_file_mode: match self.target_hash_file_mode { - TargetHashFileMode::PathsOnly => { - targets_request::TargetHashFileMode::PathsOnly as i32 - } - TargetHashFileMode::PathsAndContents => { - targets_request::TargetHashFileMode::PathsAndContents as i32 - } - TargetHashFileMode::None => { - targets_request::TargetHashFileMode::NoFiles as i32 - } - }, + target_hash_file_mode: self.target_hash_file_mode.to_proto() as i32, target_hash_modified_paths, target_hash_use_fast_hash, target_hash_graph_type, @@ -318,30 +348,24 @@ impl StreamingCommand for TargetsCommand { package_values, }) }), + target_cfg: Some(self.target_cfg.target_cfg()), output: self .output .try_map(|x| x.resolve(&ctx.working_dir).into_string())?, concurrency: self .num_threads .map(|num| buck2_cli_proto::Concurrency { concurrency: num }), + compression: self.compression.to_proto() as i32, }; - if self.show_output { - targets_show_outputs( - ctx.stdin(), - buckd, - target_request, - None, - &self.common_opts.console_opts, - ) - .await - } else if self.show_full_output { + if let Some(format) = self.show_output.format() { let project_root = ctx.paths()?.roots.project_root.clone(); targets_show_outputs( ctx.stdin(), buckd, target_request, - Some(project_root.root()), + self.show_output.is_full().then(|| project_root.root()), + format, &self.common_opts.console_opts, ) .await @@ -360,13 +384,17 @@ impl StreamingCommand for TargetsCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } async fn targets_show_outputs( @@ -374,6 +402,7 @@ async fn targets_show_outputs( buckd: &mut BuckdClientConnector<'_>, target_request: TargetsRequest, root_path: Option<&AbsNormPath>, + format: PrintOutputsFormat, console_opts: &CommonConsoleOptions, ) -> ExitResult { let response = buckd @@ -384,25 +413,18 @@ async fn targets_show_outputs( &mut NoPartialResultHandler, ) .await??; - for target_paths in response.targets_paths { - for path in target_paths.paths { - let path = if cfg!(windows) { - path.replace('/', "\\") - } else { - path - }; - match root_path { - Some(root) => { - buck2_client_ctx::println!( - "{} {}", - target_paths.target, - root.as_path().join(path).display() - ) - } - None => buck2_client_ctx::println!("{} {}", target_paths.target, path), - }?; + + buck2_client_ctx::stdio::print_with_writer(|out| { + let root_path = root_path.map(|root| root.to_path_buf()); + let mut print = PrintOutputs::new(out, root_path, format)?; + for target_paths in response.targets_paths { + for path in target_paths.paths { + print.output(&target_paths.target, Some(&path))?; + } } - } + print.finish() + })?; + ExitResult::success() } diff --git a/app/buck2_client/src/commands/test.rs b/app/buck2_client/src/commands/test.rs index 737cc6b08c52f..bb9c0f2566043 100644 --- a/app/buck2_client/src/commands/test.rs +++ b/app/buck2_client/src/commands/test.rs @@ -13,11 +13,13 @@ use buck2_cli_proto::CounterWithExamples; use buck2_cli_proto::TestRequest; use buck2_cli_proto::TestSessionOptions; use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::build::CommonBuildOptions; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonBuildOptions; use buck2_client_ctx::common::CommonCommandOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; use buck2_client_ctx::daemon::client::BuckdClientConnector; use buck2_client_ctx::daemon::client::NoPartialResultHandler; use buck2_client_ctx::exit_result::ExitResult; @@ -30,7 +32,7 @@ use buck2_client_ctx::subscribers::superconsole::test::span_from_build_failure_c use buck2_client_ctx::subscribers::superconsole::test::TestCounterColumn; use buck2_core::fs::fs_util; use buck2_core::fs::working_dir::WorkingDir; -use gazebo::prelude::*; +use buck2_error::ErrorTag; use superconsole::Line; use superconsole::Span; @@ -68,15 +70,9 @@ fn print_error_counter( #[derive(Debug, clap::Parser)] #[clap(name = "test", about = "Build and test the specified targets")] pub struct TestCommand { - #[clap(flatten)] - common_opts: CommonCommandOptions, - - #[clap(flatten)] - build_opts: CommonBuildOptions, - #[clap( long = "exclude", - multiple_values = true, + num_args = 1.., help = "Labels on targets to exclude from tests" )] exclude: Vec, @@ -86,7 +82,7 @@ pub struct TestCommand { alias = "labels", help = "Labels on targets to include from tests. Prefixing with `!` means to exclude. First match wins unless overridden by `always-exclude` flag.\n\ If include patterns are present, regardless of whether exclude patterns are present, then all targets are by default excluded unless explicitly included.", - multiple_values = true + num_args=1.., )] include: Vec, @@ -103,16 +99,6 @@ If include patterns are present, regardless of whether exclude patterns are pres )] build_filtered_targets: bool, // TODO(bobyf) this flag should always override the buckconfig option when we use it - /// This option does nothing. It is here to keep compatibility with Buck1 and ci - #[allow(unused)] // for v1 compat - #[clap(long = "deep")] - deep: bool, - - // ignored. only for e2e tests. compatibility with v1. - #[clap(long = "xml")] - #[allow(unused)] // for v1 compat - xml: Option, - /// Will allow tests that are compatible with RE (setup to run from the repo root and /// use relative paths) to run from RE. #[clap(long, group = "re_options", alias = "unstable-allow-tests-on-re")] @@ -123,6 +109,25 @@ If include patterns are present, regardless of whether exclude patterns are pres #[clap(long, group = "re_options", alias = "unstable-force-tests-on-re")] unstable_allow_all_tests_on_re: bool, + // NOTE: the field below is given a different name from the test runner's `timeout` to avoid + // confusion between the two parameters. + /// How long to execute tests for. If the timeout is exceeded, Buck2 will exit + /// as quickly as possible and not run further tests. In-flight tests will be + /// cancelled. The test orchestrator will be allowed to shut down gracefully. + /// + /// The exit code is controlled by the test orchestrator (which normally should report zero for + /// this). + /// + /// The format is a concatenation of time spans (separated by spaces). Each time span is an + /// integer number and a suffix. + /// + /// Relevant supported suffixes: seconds, second, sec, s, minutes, minute, min, m, hours, hour, + /// hr, h + /// + /// For example: `5m 10s`, `500s`. + #[clap(long = "overall-timeout")] + timeout: Option, + #[clap(name = "TARGET_PATTERNS", help = "Patterns to test")] patterns: Vec, @@ -137,6 +142,11 @@ If include patterns are present, regardless of whether exclude patterns are pres #[clap(long)] test_executor_stdout: Option, + /// Normally testing will follow the `tests` attribute of all targets, to find their associated tests. + /// When passed, this flag will disable that, and only run the directly supplied targets. + #[clap(long)] + ignore_tests_attribute: bool, + /// Writes the test executor stderr to the provided path /// /// --test-executor-stderr=- will write to stderr @@ -156,6 +166,23 @@ If include patterns are present, regardless of whether exclude patterns are pres /// buck2 test //foo:bar -- --env PRIVATE_KEY=123 #[clap(name = "TEST_EXECUTOR_ARGS", raw = true)] test_executor_args: Vec, + + /// This option does nothing. It is here to keep compatibility with Buck1 and ci + #[clap(long = "deep", hide = true)] + _deep: bool, + + // ignored. only for e2e tests. compatibility with v1. + #[clap(long = "xml", hide = true)] + _xml: Option, + + #[clap(flatten)] + build_opts: CommonBuildOptions, + + #[clap(flatten)] + target_cfg: TargetCfgOptions, + + #[clap(flatten)] + common_opts: CommonCommandOptions, } #[async_trait] @@ -174,9 +201,8 @@ impl StreamingCommand for TestCommand { .test( TestRequest { context: Some(context), - target_patterns: self - .patterns - .map(|pat| buck2_data::TargetPattern { value: pat.clone() }), + target_patterns: self.patterns.clone(), + target_cfg: Some(self.target_cfg.target_cfg()), test_executor_args: self.test_executor_args, excluded_labels: self.exclude, included_labels: self.include, @@ -191,6 +217,15 @@ impl StreamingCommand for TestCommand { force_use_project_relative_paths: self.unstable_allow_all_tests_on_re, force_run_from_project_root: self.unstable_allow_all_tests_on_re, }), + timeout: self + .timeout + .map(|t| { + let t: std::time::Duration = t.into(); + t.try_into() + }) + .transpose() + .context("Invalid `timeout`")?, + ignore_tests_attribute: self.ignore_tests_attribute, }, ctx.stdin() .console_interaction_stream(&self.common_opts.console_opts), @@ -213,15 +248,22 @@ impl StreamingCommand for TestCommand { let skipped = statuses.skipped.as_ref().context("Missing `skipped`")?; let console = self.common_opts.console_opts.final_console(); - print_build_result(&console, &response.error_messages)?; - if !response.error_messages.is_empty() { - console.print_error(&format!("{} BUILDS FAILED", response.error_messages.len()))?; + print_build_result(&console, &response.errors)?; + + // Filtering out individual types might not be best here. While we just have 1 non-build + // error that seems OK, but if we add more we should reconsider (we could add a type on all + // the build errors, but that seems potentially confusing if we only do that in the test + // command). + let build_errors = response + .errors + .into_iter() + .filter(|e| !e.tags().any(|t| t == ErrorTag::TestDeadlineExpired)) + .collect::>(); + + if !build_errors.is_empty() { + console.print_error(&format!("{} BUILDS FAILED", build_errors.len()))?; } - // TODO(nmj): Might make sense for us to expose the event ctx, and use its - // handle_stdout method, instead of raw buck2_client::println!s here. - // TODO: also remove the duplicate information when the above is done. - let mut line = Line::default(); line.push(Span::new_unstyled_lossy("Tests finished: ")); if listing_failed.count > 0 { @@ -238,9 +280,7 @@ impl StreamingCommand for TestCommand { line.push(column.to_span_from_test_statuses(statuses)?); line.push(Span::new_unstyled_lossy(". ")); } - line.push(span_from_build_failure_count( - response.error_messages.len(), - )?); + line.push(span_from_build_failure_count(build_errors.len())?); eprint_line(&line)?; print_error_counter(&console, listing_failed, "LISTINGS FAILED", "⚠")?; @@ -262,13 +302,23 @@ impl StreamingCommand for TestCommand { Some(OutputDestinationArg::Stream) => { console.print_error(&response.executor_stderr)?; } - _ => {} + None => {} } - let exit_result = if let Some(exit_code) = response.exit_code { + if let Some(build_report) = response.serialized_build_report { + buck2_client_ctx::println!("{}", build_report)?; + } + + let exit_result = if !build_errors.is_empty() { + // If we had build errors, those take precedence and we return their exit code. + ExitResult::from_errors(&build_errors) + } else if let Some(exit_code) = response.exit_code { + // Otherwise, use the exit code from Tpx. ExitResult::status_extended(exit_code) } else { - ExitResult::failure() + // But if we had no build errors, and Tpx did not provide an exit code, then that's + // going to be an error. + ExitResult::bail("Test executor did not provide an exit code") }; match self.test_executor_stdout { @@ -287,11 +337,15 @@ impl StreamingCommand for TestCommand { &self.common_opts.console_opts } - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { + fn event_log_opts(&self) -> &CommonEventLogOptions { &self.common_opts.event_log_opts } - fn common_opts(&self) -> &CommonBuildConfigurationOptions { + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { &self.common_opts.config_opts } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } } diff --git a/app/buck2_client/src/lib.rs b/app/buck2_client/src/lib.rs index a19f9ca622a51..04772e9fe482a 100644 --- a/app/buck2_client/src/lib.rs +++ b/app/buck2_client/src/lib.rs @@ -7,10 +7,11 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(try_blocks)] #![feature(try_trait_v2)] #![feature(exit_status_error)] +#![feature(used_with_arg)] -pub mod args; pub mod commands; +pub mod print; diff --git a/app/buck2_client/src/print.rs b/app/buck2_client/src/print.rs new file mode 100644 index 0000000000000..06b788e8fa9d8 --- /dev/null +++ b/app/buck2_client/src/print.rs @@ -0,0 +1,182 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io::Write; +use std::path::PathBuf; + +use buck2_client_ctx::common::PrintOutputsFormat; + +pub struct PrintOutputs { + out: W, + root_path: Option, + format: PrintOutputsFormat, + empty: bool, +} + +impl PrintOutputs { + pub fn new( + mut out: W, + root_path: Option, + format: PrintOutputsFormat, + ) -> anyhow::Result { + if format == PrintOutputsFormat::Json { + write!(out, "{{")?; + } + Ok(PrintOutputs { + out, + root_path, + format, + empty: true, + }) + } + + pub fn output(&mut self, target: &str, path: Option<&str>) -> anyhow::Result<()> { + let windows_path; + let absolute_path; + let absolute_path_lossy; + let path = if let Some(mut path) = path { + if cfg!(windows) { + windows_path = path.replace('/', "\\"); + path = &windows_path; + } + if let Some(root_path) = &self.root_path { + absolute_path = root_path.join(path); + absolute_path_lossy = absolute_path.to_string_lossy(); + &absolute_path_lossy + } else { + path + } + } else { + "" + }; + + match self.format { + PrintOutputsFormat::Plain => { + writeln!(self.out, "{} {}", target, path)?; + } + PrintOutputsFormat::Simple => { + writeln!(self.out, "{}", path)?; + } + PrintOutputsFormat::Json => { + if !self.empty { + write!(self.out, ",")?; + } + serde_json::to_writer(&mut self.out, target)?; + write!(self.out, ":")?; + serde_json::to_writer(&mut self.out, path)?; + self.empty = false; + } + } + + Ok(()) + } + + pub fn finish(&mut self) -> anyhow::Result<()> { + if self.format == PrintOutputsFormat::Json { + writeln!(self.out, "}}")?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::path::PathBuf; + use std::str; + + use super::PrintOutputs; + use super::PrintOutputsFormat; + + #[test] + fn test() -> anyhow::Result<()> { + for (format, root_path, expected) in [ + ( + PrintOutputsFormat::Plain, + None, + #[cfg(not(windows))] + "\ + fb//third-party/rust:syn buck-out/third-party/rust/syn.rlib\n\ + fb//third-party/rust:serde_derive \n\ + ", + #[cfg(windows)] + "\ + fb//third-party/rust:syn buck-out\\third-party\\rust\\syn.rlib\n\ + fb//third-party/rust:serde_derive \n\ + ", + ), + ( + PrintOutputsFormat::Plain, + Some( + #[cfg(not(windows))] + "/home/metaguest", + #[cfg(windows)] + "C:\\metaguest", + ), + #[cfg(not(windows))] + "\ + fb//third-party/rust:syn /home/metaguest/buck-out/third-party/rust/syn.rlib\n\ + fb//third-party/rust:serde_derive \n\ + ", + #[cfg(windows)] + "\ + fb//third-party/rust:syn C:\\metaguest\\buck-out\\third-party\\rust\\syn.rlib\n\ + fb//third-party/rust:serde_derive \n\ + ", + ), + ( + PrintOutputsFormat::Simple, + None, + #[cfg(not(windows))] + "\ + buck-out/third-party/rust/syn.rlib\n\ + \n\ + ", + #[cfg(windows)] + "\ + buck-out\\third-party\\rust\\syn.rlib\n\ + \n\ + ", + ), + ( + PrintOutputsFormat::Json, + None, + #[cfg(not(windows))] + "\ + {\"fb//third-party/rust:syn\":\"buck-out/third-party/rust/syn.rlib\",\"fb//third-party/rust:serde_derive\":\"\"}\n\ + ", + #[cfg(windows)] + "\ + {\"fb//third-party/rust:syn\":\"buck-out\\\\third-party\\\\rust\\\\syn.rlib\",\"fb//third-party/rust:serde_derive\":\"\"}\n\ + ", + ), + ] { + let mut out = Vec::new(); + let root_path = root_path.map(PathBuf::from); + let mut print = PrintOutputs::new(&mut out, root_path, format)?; + print.output( + "fb//third-party/rust:syn", + Some("buck-out/third-party/rust/syn.rlib"), + )?; + print.output("fb//third-party/rust:serde_derive", None)?; + print.finish()?; + assert_eq!(str::from_utf8(&out).unwrap(), expected); + } + + Ok(()) + } + + #[test] + fn test_json_empty() -> anyhow::Result<()> { + let mut out = Vec::new(); + let mut print = PrintOutputs::new(&mut out, None, PrintOutputsFormat::Json)?; + print.finish()?; + assert_eq!(str::from_utf8(&out).unwrap(), "{}\n"); + Ok(()) + } +} diff --git a/app/buck2_client_ctx/BUCK b/app/buck2_client_ctx/BUCK index 46bb9d7e57e5f..e52abc196c291 100644 --- a/app/buck2_client_ctx/BUCK +++ b/app/buck2_client_ctx/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -30,26 +29,25 @@ rust_library( ], test_deps = [ "fbsource//third-party/rust:assert_matches", + "fbsource//third-party/rust:indoc", "fbsource//third-party/rust:lsp-server", - "fbsource//third-party/rust:maplit", "fbsource//third-party/rust:pretty_assertions", "fbsource//third-party/rust:tempfile", ], deps = [ - "fbsource//third-party/blake3:blake3-rust", "fbsource//third-party/rust:anyhow", - "fbsource//third-party/rust:async-compression", "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:blake3", "fbsource//third-party/rust:bytes", "fbsource//third-party/rust:chrono", - "fbsource//third-party/rust:clap-3", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:crossterm", + "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:fs4", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:hex", "fbsource//third-party/rust:httparse", - "fbsource//third-party/rust:hyper", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:libc", "fbsource//third-party/rust:memmap2", @@ -62,21 +60,22 @@ rust_library( "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:shlex", + "fbsource//third-party/rust:strum", "fbsource//third-party/rust:sysinfo", "fbsource//third-party/rust:take_mut", "fbsource//third-party/rust:termwiz", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tokio-stream", "fbsource//third-party/rust:tokio-util", "fbsource//third-party/rust:tonic", "fbsource//third-party/rust:tracing", - "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_build_info:buck2_build_info", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_event_log:buck2_event_log", "//buck2/app/buck2_event_observer:buck2_event_observer", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_util:buck2_util", @@ -86,6 +85,5 @@ rust_library( "//buck2/superconsole:superconsole", # @oss-disable: "//common/rust/shed/detect_eden:detect_eden", "//common/rust/shed/fbinit:fbinit", - # @oss-disable: "//common/rust/shed/hostcaps:hostcaps", ], ) diff --git a/app/buck2_client_ctx/Cargo.toml b/app/buck2_client_ctx/Cargo.toml index b377b380e98cb..32d3eb623c523 100644 --- a/app/buck2_client_ctx/Cargo.toml +++ b/app/buck2_client_ctx/Cargo.toml @@ -1,7 +1,9 @@ [package] description = "Code supporting buck2 client commands" edition = "2021" +license = { workspace = true } name = "buck2_client_ctx" +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -11,56 +13,53 @@ async-compression = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } bytes = { workspace = true } -clap = { workspace = true } chrono = { workspace = true } +clap = { workspace = true } crossterm = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } +dupe = { workspace = true } +fbinit = { workspace = true } fs4 = { workspace = true } futures = { workspace = true } +gazebo = { workspace = true } hex = { workspace = true } -regex = { workspace = true } httparse = { workspace = true } -hyper = { workspace = true } itertools = { workspace = true } libc = { workspace = true } -linked-hash-map = { workspace = true } memmap2 = { workspace = true } object = { workspace = true } once_cell = { workspace = true } pin-project = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } +regex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } shlex = { workspace = true } +strum = { workspace = true } sysinfo = { workspace = true } take_mut = { workspace = true } termwiz = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } -gazebo = { workspace = true } -dupe = { workspace = true } -fbinit = { workspace = true } -# @oss-disable: detect_eden = { path = "../../../common/rust/shed/detect_eden" } -# @oss-disable: hostcaps = { path = "../../../common/rust/shed/hostcaps" } superconsole = { version = "0.2.0", path = "../../superconsole" } # Please do not add dependency on `buck2_build_api`. buck2_build_info = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } +buck2_event_log = { workspace = true } +buck2_event_observer = { workspace = true } buck2_events = { workspace = true } -buck2_test_api = { workspace = true } -buck2_cli_proto = { workspace = true } buck2_util = { workspace = true } -buck2_event_observer = { workspace = true } buck2_wrapper_common = { workspace = true } [target.'cfg(unix)'.dependencies] @@ -71,8 +70,12 @@ termios = { workspace = true } winapi = { workspace = true } [dev-dependencies] -assert_matches= { workspace = true } +assert_matches = { workspace = true } +indoc = { workspace = true } lsp-server = { workspace = true } -maplit = {workspace = true} +maplit = { workspace = true } pretty_assertions = { workspace = true } tempfile = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_client_ctx/src/argfiles.rs b/app/buck2_client_ctx/src/argfiles.rs new file mode 100644 index 0000000000000..14c113bed924c --- /dev/null +++ b/app/buck2_client_ctx/src/argfiles.rs @@ -0,0 +1,308 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fs::File; +use std::io; +use std::io::BufRead; +use std::path::Path; +use std::process::Command; +use std::str; + +use anyhow::Context as _; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::is_open_source; +use buck2_util::process::background_command; +use termwiz::istty::IsTty; + +use crate::immediate_config::ImmediateConfigContext; + +#[derive(buck2_error::Error, Debug)] +enum ArgExpansionError { + #[error("Missing flag file path after --flagfile argument")] + MissingFlagFilePath, + #[error("Unable to read flag file at `{path}`")] + MissingFlagFileOnDisk { source: anyhow::Error, path: String }, + #[error("Unable to read line in flag file `{path}`")] + FlagFileReadError { source: anyhow::Error, path: String }, + #[error("Python mode file `{path}` output is not UTF-8")] + PythonOutputNotUtf8 { path: String }, + #[error("No flag file path after @ symbol in argfile argument")] + MissingFlagFilePathInArgfile, + #[error("Python argfile at `{path}` exited with non-zero status, stderr: {err:?}")] + PythonExecutableFailed { path: String, err: String }, + #[error("Python argfile command ({cmd:?}) execution failed")] + PythonExecutionFailed { source: io::Error, cmd: Command }, + #[error("Unable to read line from stdin")] + StdinReadError { source: anyhow::Error }, +} + +/// Log that a relative flag file was not found in CWD, but was found, and used, from the cell root +/// +/// This prints directly to stderr (sometimes in color). This should be safe, because flagfile +/// expansion runs *very* early in the CLI process lifetime. +pub fn log_relative_path_from_cell_root(requested_path: &str) -> anyhow::Result<()> { + let (prefix, reset) = if io::stderr().is_tty() { + ("\x1b[33m", "\x1b[0m") + } else { + ("WARNING: ", "") + }; + crate::eprintln!( + "{}`@{}` was specified, but not found. Using file at `//{}`.", + prefix, + requested_path, + requested_path + )?; + crate::eprintln!( + "This behavior is being deprecated. Please use `\"@//{}\"` instead{}", + requested_path, + reset + )?; + Ok(()) +} + +#[derive(Clone, Debug)] +enum ArgFile { + PythonExecutable(AbsNormPathBuf, Option), + Path(AbsNormPathBuf), + Stdin, +} + +// Expands any argfiles passed as command line parameters. There are +// two ways to do: `@argfile` or `--flagfile PATH`. +// +// Caveats: +// - `--` and `--flagfile` cannot be values of other options +// - `--flagfile=X` is _not_ supported, you need to pass +// `--flagfile X` instead. +// - `--flagfil` is _not_ supported. +// +// TODO: This function should also return tracking information, so +// that we know where args come from. This would be useful +// in cases where the argfiles contain `--config` flags. +pub fn expand_argfiles_with_context( + args: Vec, + context: &mut ImmediateConfigContext, +) -> anyhow::Result> { + let mut expanded_args = Vec::new(); + let mut arg_iterator = args.into_iter(); + + while let Some(next_arg) = arg_iterator.next() { + match next_arg.as_str() { + "--" => { + expanded_args.push(next_arg); + expanded_args.extend(arg_iterator); + break; + } + "--flagfile" => { + let flagfile = match arg_iterator.next() { + Some(val) => val, + None => return Err(anyhow::anyhow!(ArgExpansionError::MissingFlagFilePath)), + }; + // TODO: We want to detect cyclic inclusion + let expanded_flagfile_args = resolve_and_expand_argfile(&flagfile, context)?; + expanded_args.extend(expanded_flagfile_args); + } + next_arg if next_arg.starts_with('@') => { + let flagfile = next_arg.strip_prefix('@').unwrap(); + if flagfile.is_empty() { + return Err(anyhow::anyhow!( + ArgExpansionError::MissingFlagFilePathInArgfile + )); + } + // TODO: We want to detect cyclic inclusion + let expanded_flagfile_args = resolve_and_expand_argfile(flagfile, context)?; + expanded_args.extend(expanded_flagfile_args); + } + _ => expanded_args.push(next_arg), + } + } + + Ok(expanded_args) +} + +// Resolves a path argument to an absolute path, reads the flag file and expands +// it into a list of arguments. +fn resolve_and_expand_argfile( + path: &str, + context: &mut ImmediateConfigContext, +) -> anyhow::Result> { + let flagfile = resolve_flagfile(path, context) + .with_context(|| format!("Error resolving flagfile `{}`", path))?; + let flagfile_lines = expand_argfile_contents(&flagfile)?; + expand_argfiles_with_context(flagfile_lines, context) +} + +fn expand_argfile_contents(flagfile: &ArgFile) -> anyhow::Result> { + match flagfile { + ArgFile::Path(path) => { + let mut lines = Vec::new(); + let file = + File::open(path).map_err(|source| ArgExpansionError::MissingFlagFileOnDisk { + source: source.into(), + path: path.to_string_lossy().into_owned(), + })?; + let reader = io::BufReader::new(file); + for line_result in reader.lines() { + let line = line_result.map_err(|source| ArgExpansionError::FlagFileReadError { + source: source.into(), + path: path.to_string_lossy().into_owned(), + })?; + if line.is_empty() { + continue; + } + lines.push(line); + } + Ok(lines) + } + ArgFile::PythonExecutable(path, flag) => { + let mut cmd = background_command(if is_open_source() { + "python3" + } else { + "fbpython" + }); + cmd.env("BUCK2_ARG_FILE", "1"); + cmd.arg(path.as_os_str()); + if let Some(flag) = flag.as_deref() { + cmd.args(["--flavors", flag]); + } + let cmd_out = cmd + .output() + .map_err(|source| ArgExpansionError::PythonExecutionFailed { cmd, source })?; + if cmd_out.status.success() { + Ok(str::from_utf8(&cmd_out.stdout) + .map_err(|_| ArgExpansionError::PythonOutputNotUtf8 { + path: path.to_string_lossy().into_owned(), + })? + .lines() + .filter(|line| !line.is_empty()) + .map(|s| s.to_owned()) + .collect::>()) + } else { + Err(anyhow::anyhow!(ArgExpansionError::PythonExecutableFailed { + path: path.to_string_lossy().into_owned(), + err: String::from_utf8_lossy(&cmd_out.stderr).to_string(), + })) + } + } + ArgFile::Stdin => io::stdin() + .lock() + .lines() + .filter_map(|line| match line { + Ok(x) if x.is_empty() => None, + Ok(x) => Some(Ok(x)), + Err(err) => Some(Err(ArgExpansionError::StdinReadError { + source: err.into(), + } + .into())), + }) + .collect(), + } +} + +// Resolves a path argument to an absolute path, so that it can be read. +fn resolve_flagfile(path: &str, context: &mut ImmediateConfigContext) -> anyhow::Result { + if path == "-" { + return Ok(ArgFile::Stdin); + } + + let (path_part, flag) = match path.split_once('#') { + Some((pypath, pyflag)) => (pypath, Some(pyflag)), + None => (path, None), + }; + + let resolved_path = match path_part.split_once("//") { + Some((cell_alias, cell_relative_path)) => context + .resolve_cell_path(cell_alias, cell_relative_path) + .context("Error resolving cell path")?, + None => { + let p = Path::new(path_part); + if !p.is_absolute() { + match context.canonicalize(p) { + Ok(abs_path) => abs_path, + Err(original_error) => { + let cell_relative_path = context.resolve_cell_path("", path_part)?; + // If the relative path does not exist relative to the cwd, + // attempt to make it relative to the cell root. If *that* + // doesn't exist, just report the original error back, and + // don't tip users off that they can use relative-to-cell paths. + // We want to deprecate that. + match fs_util::try_exists(&cell_relative_path) { + Ok(true) => { + log_relative_path_from_cell_root(path_part)?; + cell_relative_path + } + _ => { + return Err(ArgExpansionError::MissingFlagFileOnDisk { + source: original_error, + path: p.to_string_lossy().into_owned(), + } + .into()); + } + } + } + } + } else { + AbsNormPathBuf::try_from(p.to_owned())? + } + } + }; + + context.push_trace(&resolved_path); + if path_part.ends_with(".py") { + Ok(ArgFile::PythonExecutable( + resolved_path, + flag.map(ToOwned::to_owned), + )) + } else { + Ok(ArgFile::Path(resolved_path)) + } +} + +#[cfg(test)] +mod tests { + use buck2_core::fs::paths::abs_path::AbsPath; + use buck2_core::fs::working_dir::WorkingDir; + + use super::*; + + #[test] + fn test_expand_argfile_content() { + let tempdir = tempfile::tempdir().unwrap(); + let root = AbsPath::new(tempdir.path()).unwrap(); + let mode_file = root.join("mode-file"); + // Test skips empty lines. + fs_util::write(&mode_file, "a\n\nb\n").unwrap(); + let lines = expand_argfile_contents(&ArgFile::Path( + AbsNormPathBuf::from(mode_file.to_string_lossy().into_owned()).unwrap(), + )) + .unwrap(); + assert_eq!(vec!["a".to_owned(), "b".to_owned()], lines); + } + + #[test] + fn test_relative_inclusion() { + // Currently all @-files both on the command line and in files are relative to the current directory. + // This matches gcc/clang, so write a test we don't inadvertantly change it. + let tempdir = tempfile::tempdir().unwrap(); + let root = AbsPath::new(tempdir.path()).unwrap(); + fs_util::create_dir(root.join("foo")).unwrap(); + fs_util::create_dir(root.join("foo/bar")).unwrap(); + fs_util::write(root.join("foo/bar/arg1.txt"), "@bar/arg2.txt").unwrap(); + fs_util::write(root.join("foo/bar/arg2.txt"), "--magic").unwrap(); + fs_util::write(root.join(".buckconfig"), "[cells]\nroot = .").unwrap(); + let cwd = WorkingDir::unchecked_new( + AbsNormPathBuf::new(root.canonicalize().unwrap().join("foo")).unwrap(), + ); + let mut context = ImmediateConfigContext::new(&cwd); + let res = + expand_argfiles_with_context(vec!["@bar/arg1.txt".to_owned()], &mut context).unwrap(); + assert_eq!(res, vec!["--magic".to_owned()]); + } +} diff --git a/app/buck2_client_ctx/src/build_count.rs b/app/buck2_client_ctx/src/build_count.rs deleted file mode 100644 index a7eb363ddc214..0000000000000 --- a/app/buck2_client_ctx/src/build_count.rs +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashMap; -use std::io::ErrorKind; -use std::time::Duration; - -use anyhow::Context; -use buck2_common::client_utils; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::file_name::FileName; -use buck2_data::ParsedTargetPatterns; -use fs4::FileExt; -use serde::Deserialize; -use serde::Serialize; -use tokio::io::AsyncReadExt; -use tokio::io::AsyncWriteExt; - -#[derive(Serialize, Deserialize)] -struct BuildCount(HashMap); - -impl BuildCount { - pub fn increment(&mut self, patterns: &ParsedTargetPatterns) { - for target in patterns.target_patterns.iter() { - match self.0.get_mut(&target.value) { - Some(counter) => { - *counter += 1; - } - None => { - self.0.insert(target.value.clone(), 1); - } - } - } - } - - pub fn min_count(&self, patterns: &ParsedTargetPatterns) -> u64 { - if patterns.target_patterns.is_empty() { - return 0; - } - - // If the target has never been succesfully built it won't be in the map, in that case its count is 0. - return patterns - .target_patterns - .iter() - .map(|v| self.0.get(&v.value).copied().unwrap_or(0)) - .min() - .unwrap(); // target_patterns is non-empty, so min() should return Some - } -} - -/// BuildCountManager keeps track of how many times each target has been successfully built since rebase. -/// This helps understand how much the performance differs between first and incremental builds. -pub struct BuildCountManager { - base_dir: AbsNormPathBuf, -} - -impl BuildCountManager { - const LOCK_FILE_NAME: &'static str = "build_count.lock"; - const LOCK_TIMEOUT: Duration = Duration::from_millis(2000); - - pub fn new(base_dir: AbsNormPathBuf) -> Self { - Self { base_dir } - } - - async fn ensure_dir(&self) -> anyhow::Result<()> { - tokio::fs::create_dir_all(&self.base_dir) - .await - .with_context(|| { - format!("Error creating build count directory: `{}`", self.base_dir) - })?; - Ok(()) - } - - async fn read(&self, file_name: &FileName) -> anyhow::Result { - match tokio::fs::File::open(self.base_dir.join(file_name)).await { - Ok(mut file) => { - let mut buffer = String::new(); - file.read_to_string(&mut buffer).await?; - Ok(serde_json::from_str(&buffer)?) - } - Err(e) => match e.kind() { - ErrorKind::NotFound => { - // it is normal after rebase, clean, etc. - Ok(BuildCount(HashMap::new())) - } - _ => Err(e.into()), - }, - } - } - - async fn write(&self, build_count: &BuildCount, file_name: &FileName) -> anyhow::Result<()> { - self.ensure_dir().await?; - let mut file = tokio::fs::File::create(self.base_dir.join(file_name)).await?; - file.write_all(&serde_json::to_vec(build_count)?).await?; - file.sync_data().await?; - Ok(()) - } - - async fn lock_with_timeout(&mut self, timeout: Duration) -> anyhow::Result { - self.ensure_dir().await?; - let file = std::fs::File::create(self.base_dir.join(FileName::new(Self::LOCK_FILE_NAME)?))?; - client_utils::retrying( - Duration::from_millis(5), - Duration::from_millis(100), - timeout, - async || anyhow::Ok(file.try_lock_exclusive()?), - ) - .await?; - Ok(FileLockGuard { file }) - } - - /// Updates the build counts for set of targets (on success) and returns the min. - pub async fn min_build_count( - &mut self, - merge_base: &str, - target_patterns: &ParsedTargetPatterns, - is_success: bool, - ) -> anyhow::Result { - let file_name = FileName::new(merge_base)?; - let _guard = self.lock_with_timeout(Self::LOCK_TIMEOUT).await?; - let mut build_count = self.read(file_name).await?; - - if is_success { - build_count.increment(target_patterns); - } - self.write(&build_count, file_name).await?; - Ok(build_count.min_count(target_patterns)) - } -} - -#[must_use] -struct FileLockGuard { - file: std::fs::File, -} - -impl Drop for FileLockGuard { - fn drop(&mut self) { - self.file - .unlock() - .expect("Unexpected failure to release a lock file for build count"); - } -} - -#[cfg(test)] -mod tests { - use gazebo::prelude::VecExt; - - use super::*; - - fn make_patterns(targets: Vec<&'static str>) -> ParsedTargetPatterns { - ParsedTargetPatterns { - target_patterns: targets.into_map(|v| buck2_data::TargetPattern { - value: v.to_owned(), - }), - } - } - - #[test] - fn test_update_normal_input() -> anyhow::Result<()> { - let mut before = HashMap::new(); - before.insert("//some:target".to_owned(), 1); - before.insert("//some/other:target".to_owned(), 2); - let mut bc = BuildCount(before); - let target_patterns = make_patterns(vec!["//some/other:target", "//yet/another:target"]); - bc.increment(&target_patterns); - let mut expected = HashMap::new(); - expected.insert("//some:target".to_owned(), 1); - expected.insert("//some/other:target".to_owned(), 3); - expected.insert("//yet/another:target".to_owned(), 1); - assert_eq!(bc.0, expected); - - Ok(()) - } - - #[test] - fn test_update_empty_input() -> anyhow::Result<()> { - let mut before = HashMap::new(); - before.insert("//some:target".to_owned(), 1); - let expected = before.clone(); - let mut bc = BuildCount(before); - let target_patterns = make_patterns(vec![]); - bc.increment(&target_patterns); - assert_eq!(bc.0, expected); - - Ok(()) - } - - #[test] - fn test_min_count_some_value() -> anyhow::Result<()> { - let mut data = HashMap::new(); - data.insert("//some:target1".to_owned(), 3); - data.insert("//some:target2".to_owned(), 4); - data.insert("//some:target3".to_owned(), 5); - let bc = BuildCount(data); - let target_patterns = make_patterns(vec!["//some:target1", "//some:target2"]); - assert_eq!(bc.min_count(&target_patterns), 3); - - Ok(()) - } - - #[test] - fn test_min_count_ignores_others() -> anyhow::Result<()> { - let mut data = HashMap::new(); - data.insert("//some:target1".to_owned(), 3); - data.insert("//some:target2".to_owned(), 4); - data.insert("//some:target3".to_owned(), 5); - let bc = BuildCount(data); - let target_patterns = make_patterns(vec!["//some:target2"]); - assert_eq!(bc.min_count(&target_patterns), 4); - - Ok(()) - } - - #[test] - fn test_min_count_empty_data() -> anyhow::Result<()> { - let data = HashMap::new(); - let bc = BuildCount(data); - assert_eq!(bc.min_count(&make_patterns(vec![])), 0); - - Ok(()) - } - - #[tokio::test] - async fn test_read_no_such_file() -> anyhow::Result<()> { - let no_such_dir = if cfg!(windows) { - "C:\\no\\such\\dir" - } else { - "/no/such/dir" - }; - let bcm = BuildCountManager::new(AbsNormPathBuf::from(no_such_dir.to_owned())?); - let bc = bcm.read(FileName::new("no_such_file")?).await?; - assert_eq!(bc.0, HashMap::new()); - - Ok(()) - } - - #[tokio::test] - async fn test_read_normal_file() -> anyhow::Result<()> { - let temp_dir = tempfile::tempdir()?; - let file_name = "some_file"; - tokio::fs::write(temp_dir.path().join(file_name), "{\"//some:target\":1}").await?; - let mut expected = HashMap::new(); - expected.insert("//some:target".to_owned(), 1); - let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); - let bc = bcm.read(FileName::new(file_name)?).await?; - assert_eq!(bc.0, expected); - - Ok(()) - } - - #[tokio::test] - async fn test_read_illegal_file_contents() -> anyhow::Result<()> { - let temp_dir = tempfile::tempdir()?; - let file_name = "some_file"; - tokio::fs::write(temp_dir.path().join(file_name), "aaa").await?; - let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); - assert!(bcm.read(FileName::new(file_name)?).await.is_err()); - - Ok(()) - } - - #[tokio::test] - async fn test_write_normal_input() -> anyhow::Result<()> { - let temp_dir = tempfile::tempdir()?; - let file_name = "some_file"; - let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); - let mut data = HashMap::new(); - data.insert("//some:target".to_owned(), 1); - bcm.write(&BuildCount(data), FileName::new(file_name)?) - .await?; - assert_eq!( - &tokio::fs::read(temp_dir.path().join(file_name)).await?, - b"{\"//some:target\":1}" - ); - - Ok(()) - } - - #[tokio::test] - async fn test_write_empty_input() -> anyhow::Result<()> { - let temp_dir = tempfile::tempdir()?; - let file_name = "some_file"; - let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); - let data = HashMap::new(); - bcm.write(&BuildCount(data), FileName::new(file_name)?) - .await?; - assert_eq!( - &tokio::fs::read(temp_dir.path().join(file_name)).await?, - b"{}" - ); - - Ok(()) - } - - #[tokio::test] - async fn test_min_build_count_normal_input() -> anyhow::Result<()> { - let temp_dir = tempfile::tempdir()?; - let file_name = "some_file"; - tokio::fs::write(temp_dir.path().join(file_name), "{\"//some:target\":1}").await?; - let target_patterns = make_patterns(vec!["//some:target", "//some/other:target"]); - let mut bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); - assert_eq!( - bcm.min_build_count(file_name, &target_patterns, true) - .await?, - 1 - ); - assert_eq!( - bcm.min_build_count(file_name, &target_patterns, true) - .await?, - 2 - ); - - Ok(()) - } - - #[tokio::test] - async fn test_min_build_count_on_failure() -> anyhow::Result<()> { - let temp_dir = tempfile::tempdir()?; - let file_name = "some_file"; - tokio::fs::write(temp_dir.path().join(file_name), "{\"//some:target\":1}").await?; - let target_patterns = make_patterns(vec!["//some:target", "//some/other:target"]); - let mut bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); - assert_eq!( - bcm.min_build_count(file_name, &target_patterns, true) - .await?, - 1 - ); - assert_eq!( - bcm.min_build_count(file_name, &target_patterns, false) - .await?, - 1 - ); - - Ok(()) - } - - #[tokio::test] - async fn test_min_build_count_empty_input() -> anyhow::Result<()> { - let temp_dir = tempfile::tempdir()?; - let file_name = "some_file"; - tokio::fs::write(temp_dir.path().join(file_name), "{}").await?; - let target_patterns = make_patterns(vec![]); - let mut bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); - assert_eq!( - bcm.min_build_count(file_name, &target_patterns, true) - .await?, - 0 - ); - - Ok(()) - } -} diff --git a/app/buck2_client_ctx/src/client_ctx.rs b/app/buck2_client_ctx/src/client_ctx.rs index 425a88e87edeb..3e155e3dda11d 100644 --- a/app/buck2_client_ctx/src/client_ctx.rs +++ b/app/buck2_client_ctx/src/client_ctx.rs @@ -12,25 +12,28 @@ use std::future::Future; use anyhow::Context as _; use buck2_cli_proto::client_context::HostArchOverride as GrpcHostArchOverride; use buck2_cli_proto::client_context::HostPlatformOverride as GrpcHostPlatformOverride; +use buck2_cli_proto::client_context::PreemptibleWhen as GrpcPreemptibleWhen; use buck2_cli_proto::ClientContext; +use buck2_common::argv::Argv; use buck2_common::invocation_paths::InvocationPaths; -use buck2_common::result::SharedResult; -use buck2_core::error::BUCK2_HARD_ERROR_ENV_VAR; +use buck2_common::invocation_paths_result::InvocationPathsResult; +use buck2_core::error::buck2_hard_error_env; +use buck2_core::fs::paths::file_name::FileNameBuf; use buck2_core::fs::working_dir::WorkingDir; use buck2_event_observer::verbosity::Verbosity; +use buck2_util::cleanup_ctx::AsyncCleanupContext; use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; use tokio::runtime::Runtime; -use crate::argv::Argv; -use crate::cleanup_ctx::AsyncCleanupContext; use crate::client_metadata::ClientMetadata; -use crate::common::CommonDaemonCommandOptions; +use crate::common::CommonEventLogOptions; use crate::common::HostArchOverride; use crate::common::HostPlatformOverride; +use crate::common::PreemptibleWhen; use crate::daemon::client::connect::BuckdConnectOptions; use crate::daemon::client::BuckdClientConnector; -use crate::exit_result::ExitResult; +use crate::daemon_constraints::get_possibly_nested_invocation_daemon_uuid; use crate::immediate_config::ImmediateConfigContext; use crate::restarter::Restarter; use crate::stdin::Stdin; @@ -38,35 +41,85 @@ use crate::streaming::StreamingCommand; use crate::subscribers::recorder::try_get_invocation_recorder; pub struct ClientCommandContext<'a> { - pub init: fbinit::FacebookInit, - pub immediate_config: &'a ImmediateConfigContext<'a>, - pub paths: SharedResult, + init: fbinit::FacebookInit, + pub(crate) immediate_config: &'a ImmediateConfigContext<'a>, + paths: InvocationPathsResult, pub working_dir: WorkingDir, pub verbosity: Verbosity, /// When set, this function is called to launch in process daemon. /// The function returns `Ok` when daemon successfully started /// and ready to accept connections. - pub start_in_process_daemon: Option anyhow::Result<()> + Send + Sync>>, - pub argv: Argv, + pub(crate) start_in_process_daemon: + Option anyhow::Result<()> + Send + Sync>>, + pub(crate) argv: Argv, pub trace_id: TraceId, - pub async_cleanup: AsyncCleanupContext<'a>, - pub stdin: &'a mut Stdin, - pub restarter: &'a mut Restarter, - pub restarted_trace_id: Option, - pub runtime: &'a Runtime, - pub oncall: Option, - pub client_metadata: Vec, + async_cleanup: AsyncCleanupContext<'a>, + stdin: &'a mut Stdin, + pub(crate) restarter: &'a mut Restarter, + pub(crate) restarted_trace_id: Option, + runtime: &'a Runtime, + oncall: Option, + pub(crate) client_metadata: Vec, + pub(crate) isolation: FileNameBuf, } impl<'a> ClientCommandContext<'a> { + pub fn new( + init: fbinit::FacebookInit, + immediate_config: &'a ImmediateConfigContext<'a>, + paths: InvocationPathsResult, + working_dir: WorkingDir, + verbosity: Verbosity, + start_in_process_daemon: Option anyhow::Result<()> + Send + Sync>>, + argv: Argv, + trace_id: TraceId, + async_cleanup: AsyncCleanupContext<'a>, + stdin: &'a mut Stdin, + restarter: &'a mut Restarter, + restarted_trace_id: Option, + runtime: &'a Runtime, + oncall: Option, + client_metadata: Vec, + isolation: FileNameBuf, + ) -> Self { + ClientCommandContext { + init, + immediate_config, + paths, + working_dir, + verbosity, + start_in_process_daemon, + argv, + trace_id, + async_cleanup, + stdin, + restarter, + restarted_trace_id, + runtime, + oncall, + client_metadata, + isolation, + } + } + pub fn fbinit(&self) -> fbinit::FacebookInit { self.init } pub fn paths(&self) -> anyhow::Result<&InvocationPaths> { match &self.paths { - Ok(p) => Ok(p), - Err(e) => Err(e.dupe().into()), + InvocationPathsResult::Paths(p) => Ok(p), + InvocationPathsResult::OutsideOfRepo(e) | InvocationPathsResult::OtherError(e) => { + Err(e.dupe().into()) + } + } + } + + pub fn maybe_paths(&self) -> anyhow::Result> { + match &self.paths { + InvocationPathsResult::Paths(p) => Ok(Some(p)), + InvocationPathsResult::OutsideOfRepo(_) => Ok(None), // commands like log don't need a root but still need to create an invocation record + InvocationPathsResult::OtherError(e) => Err(e.dupe().into()), } } @@ -78,25 +131,53 @@ impl<'a> ClientCommandContext<'a> { self.runtime.block_on(func(self)) } - pub fn instant_command(self, command_name: &'static str, func: F) -> ExitResult + pub fn instant_command( + self, + command_name: &'static str, + event_log_opts: &CommonEventLogOptions, + func: F, + ) -> anyhow::Result<()> where Fut: Future> + 'a, F: FnOnce(ClientCommandContext<'a>) -> Fut, { let mut recorder = try_get_invocation_recorder( &self, - CommonDaemonCommandOptions::default_ref(), + &event_log_opts, command_name, std::env::args().collect(), None, )?; - let result = self.runtime.block_on(func(self)); + recorder.update_metadata_from_client_metadata(&self.client_metadata); + + let result = self.with_runtime(func); recorder.instant_command_outcome(result.is_ok()); result.into() } + /// Invoke a command without writing event log. + /// (For example, we don't write logs in `buck2 log` command.) + pub fn instant_command_no_log( + self, + command_name: &'static str, + func: F, + ) -> anyhow::Result<()> + where + Fut: Future> + 'a, + F: FnOnce(ClientCommandContext<'a>) -> Fut, + { + self.instant_command( + command_name, + &CommonEventLogOptions { + no_event_log: true, + ..CommonEventLogOptions::default() + }, + func, + ) + } + pub fn stdin(&mut self) -> &mut Stdin { self.stdin } @@ -116,10 +197,11 @@ impl<'a> ClientCommandContext<'a> { cmd: &T, ) -> anyhow::Result { // TODO(cjhopman): Support non unicode paths? - let config_opts = cmd.common_opts(); + let config_opts = cmd.build_config_opts(); + let starlark_opts = cmd.starlark_opts(); + Ok(ClientContext { - config_overrides: config_opts.config_overrides(arg_matches)?, - target_platform: config_opts.target_platforms.clone().unwrap_or_default(), + config_overrides: config_opts.config_overrides(arg_matches, &self.immediate_config)?, host_platform: match config_opts.host_platform_override() { HostPlatformOverride::Default => GrpcHostPlatformOverride::DefaultPlatform, HostPlatformOverride::Linux => GrpcHostPlatformOverride::Linux, @@ -134,34 +216,36 @@ impl<'a> ClientCommandContext<'a> { } .into(), host_xcode_version: config_opts.host_xcode_version_override(), - disable_starlark_types: config_opts.disable_starlark_types, - unstable_typecheck: config_opts.unstable_typecheck, - skip_targets_with_duplicate_names: config_opts.skip_targets_with_duplicate_names, + disable_starlark_types: starlark_opts.disable_starlark_types, + unstable_typecheck: starlark_opts.unstable_typecheck, + skip_targets_with_duplicate_names: starlark_opts.skip_targets_with_duplicate_names, reuse_current_config: config_opts.reuse_current_config, sanitized_argv: cmd.sanitize_argv(self.argv.clone()).argv, exit_when_different_state: config_opts.exit_when_different_state, + preemptible: match config_opts.preemptible { + None => GrpcPreemptibleWhen::Never, + Some(PreemptibleWhen::Never) => GrpcPreemptibleWhen::Never, + Some(PreemptibleWhen::Always) => GrpcPreemptibleWhen::Always, + Some(PreemptibleWhen::OnDifferentState) => GrpcPreemptibleWhen::OnDifferentState, + } + .into(), argfiles: self .immediate_config .trace() .iter() .map(|path| path.to_string()) .collect(), - target_call_stacks: config_opts.target_call_stacks, + target_call_stacks: starlark_opts.target_call_stacks, ..self.empty_client_context(cmd.logging_name())? }) } /// A client context for commands where CommonConfigOptions are not provided. pub fn empty_client_context(&self, command_name: &str) -> anyhow::Result { - #[derive(Debug, thiserror::Error)] + #[derive(Debug, buck2_error::Error)] #[error("Current directory is not UTF-8")] struct CurrentDirIsNotUtf8; - let daemon_uuid = match std::env::var("BUCK2_DAEMON_UUID") { - Ok(daemon_uuid) => Some(daemon_uuid), - _ => None, - }; - Ok(ClientContext { working_dir: self .working_dir @@ -170,7 +254,6 @@ impl<'a> ClientCommandContext<'a> { .context(CurrentDirIsNotUtf8)? .to_owned(), config_overrides: Default::default(), - target_platform: Default::default(), host_platform: Default::default(), host_arch: Default::default(), host_xcode_version: Default::default(), @@ -181,10 +264,10 @@ impl<'a> ClientCommandContext<'a> { skip_targets_with_duplicate_names: false, trace_id: format!("{}", self.trace_id), reuse_current_config: false, - daemon_uuid, + daemon_uuid: get_possibly_nested_invocation_daemon_uuid(), sanitized_argv: Vec::new(), argfiles: Vec::new(), - buck2_hard_error: BUCK2_HARD_ERROR_ENV_VAR.get()?.cloned().unwrap_or_default(), + buck2_hard_error: buck2_hard_error_env()?.unwrap_or_default().to_owned(), command_name: command_name.to_owned(), exit_when_different_state: false, client_metadata: self @@ -192,17 +275,11 @@ impl<'a> ClientCommandContext<'a> { .iter() .map(ClientMetadata::to_proto) .collect(), + preemptible: Default::default(), }) } pub fn async_cleanup_context(&self) -> &AsyncCleanupContext<'a> { &self.async_cleanup } - - pub fn allow_vpnless_for_logging(&self) -> anyhow::Result { - Ok(self - .immediate_config - .daemon_startup_config()? - .allow_vpnless_for_logging) - } } diff --git a/app/buck2_client_ctx/src/client_metadata.rs b/app/buck2_client_ctx/src/client_metadata.rs index 139e16809836b..4eba08383d54d 100644 --- a/app/buck2_client_ctx/src/client_metadata.rs +++ b/app/buck2_client_ctx/src/client_metadata.rs @@ -12,7 +12,6 @@ use std::str::FromStr; use anyhow::Context as _; use once_cell::sync::Lazy; use regex::Regex; -use thiserror::Error; /// A key / value metadata pair provided by the client. This will be injected into Buck2's logging. #[derive(Clone, Debug, PartialEq, Eq)] @@ -52,7 +51,7 @@ impl FromStr for ClientMetadata { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum ClientMetadataError { #[error( "Invalid client metadata format: `{0}`. Client metadata keys must be a `key=value` pair." diff --git a/app/buck2_client_ctx/src/command_outcome.rs b/app/buck2_client_ctx/src/command_outcome.rs index 4cc2312c8ba06..0bb2cf93022ea 100644 --- a/app/buck2_client_ctx/src/command_outcome.rs +++ b/app/buck2_client_ctx/src/command_outcome.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use std::convert::Infallible; use std::ops::ControlFlow; use std::ops::FromResidual; use std::ops::Try; @@ -18,7 +19,6 @@ use crate::exit_result::ExitResult; /// Either "successful", in which case `R`, the response type, is available, or "failure", /// where a general `CommandError` was returned. Consider this a "failed successfully" indicator. /// At the point where this is returned, all event processing / logging should be handled. -#[derive(Debug)] #[must_use] pub enum CommandOutcome { /// The buckd client successfully returned the expected response. @@ -30,11 +30,11 @@ pub enum CommandOutcome { /// /// The user has already been presented an error message, and the CLI should exit with /// this status code. - Failure(Option), + Failure(ExitResult), } /// Small wrapper used in FromResidual -pub struct CommandFailure(Option); +pub struct CommandFailure(ExitResult); /// Allow the usage of '?' when going from a CommandOutcome -> ExitResult impl Try for CommandOutcome { @@ -54,8 +54,14 @@ impl Try for CommandOutcome { } impl FromResidual for ExitResult { - fn from_residual(_residual: CommandFailure) -> Self { - ExitResult::failure() + fn from_residual(residual: CommandFailure) -> Self { + residual.0 + } +} + +impl FromResidual for Result { + fn from_residual(residual: CommandFailure) -> Self { + Err(residual.0) } } @@ -64,3 +70,22 @@ impl FromResidual for CommandOutcome { Self::Failure(residual.0) } } + +impl FromResidual> for CommandOutcome +where + E: Into, +{ + fn from_residual(result: Result) -> Self { + match result { + Ok(infallible) => match infallible {}, + Err(err) => Self::Failure(ExitResult::err(err.into())), + } + } +} + +impl FromResidual for anyhow::Result { + fn from_residual(residual: CommandFailure) -> anyhow::Result { + // Err(residual.0.in) + Err(residual.0.into()) + } +} diff --git a/app/buck2_client_ctx/src/common.rs b/app/buck2_client_ctx/src/common.rs index 8033554dad0df..b165c8ea2eb1f 100644 --- a/app/buck2_client_ctx/src/common.rs +++ b/app/buck2_client_ctx/src/common.rs @@ -21,41 +21,22 @@ //! ... //! } //! ``` -use std::path::Path; -use buck2_cli_proto::common_build_options::ExecutionStrategy; +pub mod build; +pub mod target_cfg; +pub mod ui; + use buck2_cli_proto::config_override::ConfigType; use buck2_cli_proto::ConfigOverride; -use buck2_core::fs::fs_util; use dupe::Dupe; use gazebo::prelude::*; -use termwiz::istty::IsTty; -use crate::final_console::FinalConsole; +use crate::common::ui::CommonConsoleOptions; +use crate::immediate_config::ImmediateConfigContext; use crate::path_arg::PathArg; -use crate::subscribers::superconsole::SuperConsoleConfig; - -pub const EVENT_LOG: &str = "--event-log"; -pub const NO_EVENT_LOG: &str = "--no-event-log"; -#[derive( - Debug, - serde::Serialize, - serde::Deserialize, - Clone, - Dupe, - Copy, - clap::ArgEnum -)] -#[clap(rename_all = "lower")] -pub enum ConsoleType { - Simple, - SimpleNoTty, - SimpleTty, - Super, - Auto, - None, -} +pub const EVENT_LOG: &str = "event-log"; +pub const NO_EVENT_LOG: &str = "no-event-log"; #[derive( Debug, @@ -64,16 +45,14 @@ pub enum ConsoleType { Clone, Dupe, Copy, - clap::ArgEnum + clap::ValueEnum )] #[clap(rename_all = "lower")] -pub enum UiOptions { - Dice, - DebugEvents, - /// I/O panel. - Io, - /// RE panel. - Re, +pub enum HostPlatformOverride { + Default, + Linux, + MacOs, + Windows, } #[derive( @@ -83,14 +62,21 @@ pub enum UiOptions { Clone, Dupe, Copy, - clap::ArgEnum + clap::ValueEnum, + Default )] #[clap(rename_all = "lower")] -pub enum HostPlatformOverride { - Default, - Linux, - MacOs, - Windows, +pub enum PreemptibleWhen { + /// (default) When another command starts that cannot run in parallel with this one, block that command. + #[default] + Never, // Read; "If I am Never, then never preempt me" (the default) + /// When another command starts, interrupt this command, *even if they could run in + /// parallel*. There is no good reason to use this other than that it provides slightly nicer + /// superconsole output. + Always, + /// When another command starts that cannot run in parallel with this one, + /// interrupt this command. + OnDifferentState, // Read; "if a command comes in, preempt me on different state" } #[derive( @@ -100,7 +86,7 @@ pub enum HostPlatformOverride { Clone, Dupe, Copy, - clap::ArgEnum + clap::ValueEnum )] #[clap(rename_all = "lower")] pub enum HostArchOverride { @@ -111,13 +97,14 @@ pub enum HostArchOverride { /// Defines options related to commands that involves a streaming daemon command. #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] -pub struct CommonDaemonCommandOptions { +#[clap(next_help_heading = "Event Log Options")] +pub struct CommonEventLogOptions { /// Write events to this log file #[clap(value_name = "PATH", long = EVENT_LOG)] pub event_log: Option, /// Do not write any event logs. Overrides --event-log. Used from `replay` to avoid recursive logging - #[clap(long = NO_EVENT_LOG, hidden = true)] + #[clap(long = NO_EVENT_LOG, hide = true)] pub no_event_log: bool, /// Write command invocation id into this file. @@ -128,22 +115,30 @@ pub struct CommonDaemonCommandOptions { /// regarding the stability of the format. #[clap(long, value_name = "PATH")] pub(crate) unstable_write_invocation_record: Option, + + /// Write the command report to this path. A command report is always + /// written to `buck-out/v2//command_report` even without this flag. + #[clap(long, value_name = "PATH")] + pub(crate) command_report_path: Option, } -impl CommonDaemonCommandOptions { +impl CommonEventLogOptions { pub fn default_ref() -> &'static Self { - static DEFAULT: CommonDaemonCommandOptions = CommonDaemonCommandOptions { + static DEFAULT: CommonEventLogOptions = CommonEventLogOptions { event_log: None, no_event_log: false, write_build_id: None, + command_report_path: None, unstable_write_invocation_record: None, }; &DEFAULT } } -/// Defines options for config and configuration related things. Any command that involves the build graph should include these options. +/// Defines options for config and configuration related things. Any command that involves the build +/// graph should include these options. #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] +#[clap(next_help_heading = "Buckconfig Options")] pub struct CommonBuildConfigurationOptions { #[clap( value_name = "SECTION.OPTION=VALUE", @@ -152,7 +147,7 @@ pub struct CommonBuildConfigurationOptions { help = "List of config options", // Needs to be explicitly set, otherwise will treat `-c a b c` -> [a, b, c] // rather than [a] and other positional arguments `b c`. - number_of_values = 1 + num_args = 1 )] pub config_values: Vec, @@ -160,60 +155,44 @@ pub struct CommonBuildConfigurationOptions { value_name = "PATH", long = "config-file", help = "List of config file paths", - number_of_values = 1 + num_args = 1 )] pub config_files: Vec, - #[clap( - long = "target-platforms", - help = "Configuration target (one) to use to configure targets", - number_of_values = 1, - value_name = "PLATFORM" - )] - pub target_platforms: Option, - - #[clap(long, ignore_case = true, value_name = "HOST", arg_enum)] - fake_host: Option, + #[clap(long, ignore_case = true, value_name = "HOST", value_enum)] + pub fake_host: Option, - #[clap(long, ignore_case = true, value_name = "ARCH", arg_enum)] - fake_arch: Option, + #[clap(long, ignore_case = true, value_name = "ARCH", value_enum)] + pub fake_arch: Option, /// Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) #[clap(long, value_name = "VERSION-BUILD")] - fake_xcode_version: Option, + pub fake_xcode_version: Option, - /// Disable runtime type checking in Starlark interpreter. + /// Re-uses any `--config` values (inline or via modefiles) if there's + /// a previous command, otherwise the flag is ignored. /// - /// This option is not stable, and can be used only locally - /// to diagnose evaluation performance problems. - #[clap(long)] - pub disable_starlark_types: bool, - - /// Typecheck bzl and bxl files during evaluation. - #[clap(long, hidden(true))] - pub unstable_typecheck: bool, - - /// Record or show target call stacks. + /// If there is a previous command and `--reuse-current-config` is set, + /// then the old config is used, ignoring any overrides. /// - /// Starlark call stacks will be included in duplicate targets error. - /// - /// If a command outputs targets (like `targets` command), - /// starlark call stacks will be printed after the targets. - #[clap(long = "stack")] - pub target_call_stacks: bool, - - /// If there are targets with duplicate names in `BUCK` file, - /// skip all the duplicates but the first one. - /// This is a hack for TD. Do not use this option. - #[clap(long)] - pub(crate) skip_targets_with_duplicate_names: bool, - + /// If there is no previous command but the flag was set, then the flag is ignored, + /// the command behaves as if the flag was not set at all. #[clap(long)] pub reuse_current_config: bool, /// Used for exiting a concurrent command when a different state is detected. #[clap(long)] pub exit_when_different_state: bool, + + /// Used to configure when this command could be preempted by another command for the same isolation dir. + /// + /// Normally, when you run two commands - from different terminals, say - buck2 will attempt + /// to run them in parallel. However, if the two commands are based on different state, that + /// is they either have different configs or different filesystem states, buck2 cannot run them + /// in parallel. The default behavior in this case is to block the second command until the + /// first completes. + #[clap(long, ignore_case = true, value_enum)] + pub preemptible: Option, } impl CommonBuildConfigurationOptions { @@ -224,6 +203,7 @@ impl CommonBuildConfigurationOptions { pub fn config_overrides( &self, matches: &clap::ArgMatches, + immediate_ctx: &ImmediateConfigContext<'_>, ) -> anyhow::Result> { fn with_indices<'a, T>( collection: &'a [T], @@ -241,42 +221,46 @@ impl CommonBuildConfigurationOptions { indices.into_iter().zip(collection) } - // Relative paths passed on the command line are relative to the cwd - // of the client, not the daemon, so perform path canonicalisation here. - fn resolve_config_file_argument(arg: &str) -> anyhow::Result { - if arg.contains("//") { - // Cell-relative path resolution would be performed by the daemon - return Ok(arg.to_owned()); - } - - let path = Path::new(arg); - if path.is_absolute() { - return Ok(arg.to_owned()); - } - - let abs_path = fs_util::canonicalize(path)?; - Ok(abs_path.to_string_lossy().into_owned()) - } - - let config_values_args = with_indices(&self.config_values, "config-values", matches).map( - |(index, config_value)| { - ( + let config_values_args = with_indices(&self.config_values, "config_values", matches) + .map(|(index, config_value)| { + let (cell, raw_arg) = match config_value.split_once("//") { + Some((cell, val)) if !cell.contains('=') => { + let cell = immediate_ctx + .resolve_alias_to_path_in_cwd(cell)? + .to_string(); + (Some(cell), val) + } + _ => (None, config_value.as_str()), + }; + + anyhow::Ok(( index, ConfigOverride { - config_override: config_value.clone(), + cell, + config_override: raw_arg.to_owned(), config_type: ConfigType::Value as i32, }, - ) - }, - ); + )) + }) + .collect::>>()?; - let config_file_args = with_indices(&self.config_files, "config-files", matches) - .map(|(index, unresolved_file)| { - let resolved_file = resolve_config_file_argument(unresolved_file)?; + let config_file_args = with_indices(&self.config_files, "config_files", matches) + .map(|(index, file)| { + let (cell, path) = match file.split_once("//") { + Some((cell, val)) => { + // This should also reject =? + let cell = immediate_ctx + .resolve_alias_to_path_in_cwd(cell)? + .to_string(); + (Some(cell), val) + } + _ => (None, file.as_str()), + }; Ok(( index, ConfigOverride { - config_override: resolved_file, + cell, + config_override: path.to_owned(), config_type: ConfigType::File as i32, }, )) @@ -310,251 +294,70 @@ impl CommonBuildConfigurationOptions { static DEFAULT: CommonBuildConfigurationOptions = CommonBuildConfigurationOptions { config_values: vec![], config_files: vec![], - target_platforms: None, fake_host: None, fake_arch: None, fake_xcode_version: None, - disable_starlark_types: false, - unstable_typecheck: false, - target_call_stacks: false, - skip_targets_with_duplicate_names: false, reuse_current_config: false, exit_when_different_state: false, + preemptible: Some(PreemptibleWhen::Never), }; &DEFAULT } + + pub fn reuse_current_config_ref() -> &'static Self { + static OPTS: CommonBuildConfigurationOptions = CommonBuildConfigurationOptions { + config_values: vec![], + config_files: vec![], + fake_host: None, + fake_arch: None, + fake_xcode_version: None, + reuse_current_config: true, + exit_when_different_state: false, + preemptible: Some(PreemptibleWhen::Never), + }; + &OPTS + } } -/// Defines common options for build-like commands (build, test, install). -#[allow(rustdoc::invalid_html_tags)] -#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] -pub struct CommonBuildOptions { - /// Print a build report +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] +#[clap(next_help_heading = "Starlark Options")] +pub struct CommonStarlarkOptions { + /// Disable runtime type checking in Starlark interpreter. /// - /// --build-report=- will print the build report to stdout - /// --build-report= will write the build report to the file - #[clap(long = "build-report", value_name = "PATH")] - build_report: Option, - - /// Deprecated. Use --build-report=- - // TODO(cjhopman): this is probably only used by the e2e framework. remove it from there - #[clap(long = "print-build-report", hidden = true)] - print_build_report: bool, - - /// Number of threads to use during execution (default is # cores) - // TODO(cjhopman): This only limits the threads used for action execution and it doesn't work correctly with concurrent commands. - #[clap(short = 'j', long = "num-threads", value_name = "THREADS")] - pub num_threads: Option, - - /// Enable only local execution. Will reject actions that cannot execute locally. - #[clap(long, group = "build_strategy", env = "BUCK_OFFLINE_BUILD")] - local_only: bool, - - /// Enable only remote execution. Will reject actions that cannot execute remotely. - #[clap(long, group = "build_strategy")] - remote_only: bool, - - /// Enable hybrid execution. Will prefer executing actions that can execute locally on the - /// local host. - #[clap(long, group = "build_strategy")] - prefer_local: bool, - - /// Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and will avoid racing local and remote execution. - #[clap(long, group = "build_strategy")] - prefer_remote: bool, - - /// Experimental: Disable all execution. - #[clap(long, group = "build_strategy")] - unstable_no_execution: bool, - - /// Do not perform remote cache queries or cache writes. If remote execution is enabled, the RE - /// service might still deduplicate actions, so for e.g. benchmarking, using a random isolation - /// dir is preferred. - #[clap(long, env = "BUCK_OFFLINE_BUILD")] - no_remote_cache: bool, - - /// Could be used to enable the action cache writes on the RE worker when no_remote_cache is specified - #[clap(long, requires("no-remote-cache"))] - write_to_cache_anyway: bool, - - /// Process dep files when they are generated (i.e. after running a command that produces dep - /// files), rather than when they are used (i.e. before re-running a command that previously - /// produced dep files). Use this when debugging commands that produce dep files. Note that - /// commands that previously produced dep files will not re-run: only dep files produced during - /// this command will be eagerly loaded. - #[clap(long)] - eager_dep_files: bool, - - #[clap(long)] - upload_all_actions: bool, - - /// If Buck hits an error, do as little work as possible before exiting. - #[clap(long, group = "fail-when")] - fail_fast: bool, - - /// If Buck hits an error, continue doing as much work as possible before exiting. - #[clap(long, group = "fail-when")] - keep_going: bool, - - /// If target is missing, then skip building instead of throwing error. - #[clap(long)] - skip_missing_targets: bool, - - /// If target is incompatible with the specified configuration, skip building instead of throwing error. - /// This does not apply to targets specified with glob patterns `/...` or `:` - /// which are skipped unconditionally. + /// This option is not stable, and can be used only locally + /// to diagnose evaluation performance problems. #[clap(long)] - skip_incompatible_targets: bool, -} - -impl CommonBuildOptions { - fn build_report(&self) -> (bool, String) { - match (self.print_build_report, &self.build_report) { - (false, None) => (false, "".to_owned()), - (_, Some(path)) if path != "-" => (true, path.to_owned()), - _ => (true, "".to_owned()), - } - } - - pub fn to_proto(&self) -> buck2_cli_proto::CommonBuildOptions { - let (unstable_print_build_report, unstable_build_report_filename) = self.build_report(); - let concurrency = self - .num_threads - .map(|num| buck2_cli_proto::Concurrency { concurrency: num }); - - buck2_cli_proto::CommonBuildOptions { - concurrency, - execution_strategy: if self.local_only { - ExecutionStrategy::LocalOnly as i32 - } else if self.remote_only { - ExecutionStrategy::RemoteOnly as i32 - } else if self.prefer_local { - ExecutionStrategy::HybridPreferLocal as i32 - } else if self.prefer_remote { - ExecutionStrategy::HybridPreferRemote as i32 - } else if self.unstable_no_execution { - ExecutionStrategy::NoExecution as i32 - } else { - ExecutionStrategy::Default as i32 - }, - unstable_print_build_report, - unstable_build_report_filename, - eager_dep_files: self.eager_dep_files, - upload_all_actions: self.upload_all_actions, - skip_cache_read: self.no_remote_cache, - skip_cache_write: self.no_remote_cache && !self.write_to_cache_anyway, - fail_fast: self.fail_fast, - keep_going: self.keep_going, - skip_missing_targets: self.skip_missing_targets, - skip_incompatible_targets: self.skip_incompatible_targets, - } - } -} + pub disable_starlark_types: bool, -/// Defines common console options for commands. -#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] -pub struct CommonConsoleOptions { - #[clap( - long = "console", - help = "Which console to use for this command", - default_value = "auto", - ignore_case = true, - env = "BUCK_CONSOLE", - value_name = "super|simple|...", - arg_enum - )] - pub console_type: ConsoleType, + /// Typecheck bzl and bxl files during evaluation. + #[clap(long, hide = true)] + pub unstable_typecheck: bool, - /// Configure additional superconsole ui components. - /// - /// Accepts a comma-separated list of superconsole components to add. Possible values are: + /// Record or show target call stacks. /// - /// dice - shows information about evaluated dice nodes - /// debugevents - shows information about the flow of events from buckd + /// Starlark call stacks will be included in duplicate targets error. /// - /// These components can be turned on/off interactively. - /// Press 'h' for help when superconsole is active. - #[clap( - long = "ui", - ignore_case = true, - multiple = true, - number_of_values = 1, - arg_enum - )] - pub ui: Vec, - - #[clap( - long, - help = "Disable console interactions", - env = "BUCK_NO_INTERACTIVE_CONSOLE" - )] - pub no_interactive_console: bool, -} + /// If a command outputs targets (like `targets` command), + /// starlark call stacks will be printed after the targets. + #[clap(long = "stack")] + pub target_call_stacks: bool, -impl Default for CommonConsoleOptions { - fn default() -> Self { - Self { - console_type: ConsoleType::Auto, - ui: Vec::new(), - no_interactive_console: false, - } - } + /// If there are targets with duplicate names in `BUCK` file, + /// skip all the duplicates but the first one. + /// This is a hack for TD. Do not use this option. + #[clap(long, hide = true)] + pub(crate) skip_targets_with_duplicate_names: bool, } -impl CommonConsoleOptions { +impl CommonStarlarkOptions { pub fn default_ref() -> &'static Self { - static OPTS: CommonConsoleOptions = CommonConsoleOptions { - console_type: ConsoleType::Auto, - ui: vec![], - no_interactive_console: false, - }; - &OPTS - } - - pub fn simple_ref() -> &'static Self { - static OPTS: CommonConsoleOptions = CommonConsoleOptions { - console_type: ConsoleType::Simple, - ui: vec![], - no_interactive_console: false, - }; - &OPTS - } - - pub fn none_ref() -> &'static Self { - static OPTS: CommonConsoleOptions = CommonConsoleOptions { - console_type: ConsoleType::None, - ui: vec![], - no_interactive_console: false, - }; - &OPTS - } - - pub fn final_console(&self) -> FinalConsole { - let is_tty = match self.console_type { - ConsoleType::Auto | ConsoleType::Simple => std::io::stderr().is_tty(), - ConsoleType::Super => true, - ConsoleType::SimpleNoTty => false, - ConsoleType::SimpleTty => true, - ConsoleType::None => false, + static DEFAULT: CommonStarlarkOptions = CommonStarlarkOptions { + disable_starlark_types: false, + unstable_typecheck: false, + target_call_stacks: false, + skip_targets_with_duplicate_names: false, }; - if is_tty { - FinalConsole::new_with_tty() - } else { - FinalConsole::new_without_tty() - } - } - - pub fn superconsole_config(&self) -> SuperConsoleConfig { - let mut config = SuperConsoleConfig::default(); - for option in &self.ui { - match option { - UiOptions::Dice => config.enable_dice = true, - UiOptions::DebugEvents => config.enable_debug_events = true, - UiOptions::Io => config.enable_io = true, - UiOptions::Re => config.enable_detailed_re = true, - } - } - config + &DEFAULT } } @@ -566,11 +369,22 @@ pub struct CommonCommandOptions { #[clap(flatten)] pub config_opts: CommonBuildConfigurationOptions, + /// Starlark options. + #[clap(flatten)] + pub starlark_opts: CommonStarlarkOptions, + /// UI options. #[clap(flatten)] pub console_opts: CommonConsoleOptions, /// Event-log options. #[clap(flatten)] - pub event_log_opts: CommonDaemonCommandOptions, + pub event_log_opts: CommonEventLogOptions, +} + +#[derive(Debug, PartialEq)] +pub enum PrintOutputsFormat { + Plain, + Simple, + Json, } diff --git a/app/buck2_client_ctx/src/common/build.rs b/app/buck2_client_ctx/src/common/build.rs new file mode 100644 index 0000000000000..37d72dfd5f876 --- /dev/null +++ b/app/buck2_client_ctx/src/common/build.rs @@ -0,0 +1,292 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::str::FromStr; + +use buck2_cli_proto::common_build_options::ExecutionStrategy; +use buck2_core::buck2_env_name; +use clap::builder::FalseyValueParser; +use clap::ArgGroup; +use tracing::warn; + +use crate::common::PrintOutputsFormat; + +#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] +pub struct BuildReportOption { + /// Fill out the failures in build report as it was done by default in buck1. + fill_out_failures: bool, + + /// Include package relative paths in the output. + include_package_project_relative_paths: bool, +} + +impl FromStr for BuildReportOption { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + let mut fill_out_failures = false; + let mut include_package_project_relative_paths = false; + + if s.to_lowercase() == "fill-out-failures" { + fill_out_failures = true; + } else if s.to_lowercase() == "package-project-relative-paths" { + include_package_project_relative_paths = true; + } else { + warn!( + "Incorrect syntax for build report option. Got: `{}` but expected one of `fill-out-failures, package-project-relative-paths`", + s.to_owned() + ) + } + Ok(BuildReportOption { + fill_out_failures, + include_package_project_relative_paths, + }) + } +} + +/// Defines common options for build-like commands (build, test, install). +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] +pub struct CommonBuildOptions { + /// Print a build report + /// + /// `--build-report=-` will print the build report to stdout + /// `--build-report=` will write the build report to the file + #[clap(long = "build-report", value_name = "PATH")] + build_report: Option, + + /// Comma separated list of validation names to run that are marked optional. + /// + /// By default, validations marked as optional are skipped. This option overrides the behaviour and executes those validations. + #[clap(long, value_name = "VALIDATION_NAMES", value_delimiter = ',')] + enable_optional_validations: Vec, + + /// Comma separated list of build report options. + /// + /// The following options are supported: + /// + /// `fill-out-failures`: + /// fill out failures the same way Buck1 would. + /// + /// `package-project-relative-paths`: + /// emit the project-relative path of packages for the targets that were built. + #[clap( + long = "build-report-options", + requires = "build_report", + value_delimiter = ',' + )] + build_report_options: Vec, + + /// Number of threads to use during execution (default is # cores) + // TODO(cjhopman): This only limits the threads used for action execution and it doesn't work correctly with concurrent commands. + #[clap(short = 'j', long = "num-threads", value_name = "THREADS")] + pub num_threads: Option, + + /// Enable only local execution. Will reject actions that cannot execute locally. + #[clap(long, group = "build_strategy", env = buck2_env_name!("BUCK_OFFLINE_BUILD"), value_parser = FalseyValueParser::new())] + local_only: bool, + + /// Enable only remote execution. Will reject actions that cannot execute remotely. + #[clap(long, group = "build_strategy")] + remote_only: bool, + + /// Enable hybrid execution. Will prefer executing actions that can execute locally on the + /// local host. + #[clap(long, group = "build_strategy")] + prefer_local: bool, + + /// Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and will avoid racing local and remote execution. + #[clap(long, group = "build_strategy")] + prefer_remote: bool, + + /// Experimental: Disable all execution. + #[clap(long, group = "build_strategy")] + unstable_no_execution: bool, + + /// Do not perform remote cache queries or cache writes. If remote execution is enabled, the RE + /// service might still deduplicate actions, so for e.g. benchmarking, using a random isolation + /// dir is preferred. + #[clap(long, env = buck2_env_name!("BUCK_OFFLINE_BUILD"), value_parser = FalseyValueParser::new())] + no_remote_cache: bool, + + /// Could be used to enable the action cache writes on the RE worker when no_remote_cache is specified + #[clap(long, requires = "no_remote_cache")] + write_to_cache_anyway: bool, + + /// Process dep files when they are generated (i.e. after running a command that produces dep + /// files), rather than when they are used (i.e. before re-running a command that previously + /// produced dep files). Use this when debugging commands that produce dep files. Note that + /// commands that previously produced dep files will not re-run: only dep files produced during + /// this command will be eagerly loaded. + #[clap(long)] + eager_dep_files: bool, + + /// Uploads every action to the RE service, regardless of whether the action needs to execute on RE. + /// + /// This is useful when debugging builds and trying to inspect actions which executed remotely. + /// It's possible that the action result is cached but the action itself has expired. In this case, + /// downloading the action itself would fail. Enabling this option would unconditionally upload + /// all actions, thus you will not hit any expiration issues. + #[clap(long)] + upload_all_actions: bool, + + /// If Buck hits an error, do as little work as possible before exiting. + /// + /// To illustrate the effect of this flag, consider an invocation of `build :foo :bar`. The + /// default behavior of buck is to do enough work to get a result for the builds of each of + /// `:foo` and `:bar`, and no more. This means that buck will continue to complete the build of + /// `:bar` after the build of `:foo` has failed; however, once one dependency of `:foo` has + /// failed, other dependencies will be cancelled unless they are needed by `:bar`. + /// + /// This flag changes the behavior of buck to not wait on `:bar` to complete once `:foo` has + /// failed. Generally, this flag only has an effect on builds that specify multiple targets. + /// + /// `--keep-going` changes the behavior of buck to not only wait on `:bar` once one dependency + /// of `:foo` has failed, but to additionally attempt to build other dependencies of `:foo` if + /// possible. + #[clap(long, group = "fail-when")] + fail_fast: bool, + + /// If Buck hits an error, continue doing as much work as possible before exiting. + /// + /// See `--fail-fast` for more details. + #[clap(long, group = "fail-when")] + keep_going: bool, + + /// If target is missing, then skip building instead of throwing error. + #[clap(long)] + skip_missing_targets: bool, + + /// If target is incompatible with the specified configuration, skip building instead of throwing error. + /// This does not apply to targets specified with glob patterns `/...` or `:` + /// which are skipped unconditionally. + #[clap(long)] + skip_incompatible_targets: bool, + + /// Materializes inputs for failed actions which ran on RE + #[clap(long)] + materialize_failed_inputs: bool, +} + +impl CommonBuildOptions { + fn build_report(&self) -> (bool, String) { + match &self.build_report { + None => (false, "".to_owned()), + Some(path) if path != "-" => (true, path.to_owned()), + _ => (true, "".to_owned()), + } + } + + pub fn to_proto(&self) -> buck2_cli_proto::CommonBuildOptions { + let (unstable_print_build_report, unstable_build_report_filename) = self.build_report(); + let unstable_include_failures_build_report = self + .build_report_options + .iter() + .any(|option| option.fill_out_failures); + let unstable_include_package_project_relative_paths = self + .build_report_options + .iter() + .any(|option| option.include_package_project_relative_paths); + let concurrency = self + .num_threads + .map(|num| buck2_cli_proto::Concurrency { concurrency: num }); + let enable_optional_validations = self + .enable_optional_validations + .iter() + .map(|s| s.to_owned()) + .collect(); + + buck2_cli_proto::CommonBuildOptions { + concurrency, + execution_strategy: if self.local_only { + ExecutionStrategy::LocalOnly as i32 + } else if self.remote_only { + ExecutionStrategy::RemoteOnly as i32 + } else if self.prefer_local { + ExecutionStrategy::HybridPreferLocal as i32 + } else if self.prefer_remote { + ExecutionStrategy::HybridPreferRemote as i32 + } else if self.unstable_no_execution { + ExecutionStrategy::NoExecution as i32 + } else { + ExecutionStrategy::Default as i32 + }, + unstable_print_build_report, + unstable_build_report_filename, + eager_dep_files: self.eager_dep_files, + upload_all_actions: self.upload_all_actions, + skip_cache_read: self.no_remote_cache, + skip_cache_write: self.no_remote_cache && !self.write_to_cache_anyway, + fail_fast: self.fail_fast, + keep_going: self.keep_going, + skip_missing_targets: self.skip_missing_targets, + skip_incompatible_targets: self.skip_incompatible_targets, + materialize_failed_inputs: self.materialize_failed_inputs, + enable_optional_validations, + unstable_include_failures_build_report, + unstable_include_package_project_relative_paths, + } + } +} + +/// Show-output options shared by `build` and `targets`. +#[derive(Debug, clap::Parser)] +#[clap(group( + // Make mutually exclusive. A command may have at most one of the flags in + // the following group. + ArgGroup::new("output_args").args(&[ + "show_output", + "show_full_output", + "show_simple_output", + "show_full_simple_output", + "show_json_output", + "show_full_json_output", + ]) +))] +pub struct CommonOutputOptions { + /// Print the path to the output for each of the rules relative to the project root + #[clap(long)] + pub show_output: bool, + + /// Print the absolute path to the output for each of the rules + #[clap(long)] + pub show_full_output: bool, + + /// Print only the path to the output for each of the rules relative to the project root + #[clap(long)] + pub show_simple_output: bool, + + /// Print only the absolute path to the output for each of the rules + #[clap(long)] + pub show_full_simple_output: bool, + + /// Print the output paths relative to the project root, in JSON format + #[clap(long)] + pub show_json_output: bool, + + /// Print the output absolute paths, in JSON format + #[clap(long)] + pub show_full_json_output: bool, +} + +impl CommonOutputOptions { + pub fn format(&self) -> Option { + if self.show_output || self.show_full_output { + Some(PrintOutputsFormat::Plain) + } else if self.show_simple_output || self.show_full_simple_output { + Some(PrintOutputsFormat::Simple) + } else if self.show_json_output || self.show_full_json_output { + Some(PrintOutputsFormat::Json) + } else { + None + } + } + + pub fn is_full(&self) -> bool { + self.show_full_output || self.show_full_simple_output || self.show_full_json_output + } +} diff --git a/app/buck2_client_ctx/src/common/target_cfg.rs b/app/buck2_client_ctx/src/common/target_cfg.rs new file mode 100644 index 0000000000000..4bd3a3e9aa3c9 --- /dev/null +++ b/app/buck2_client_ctx/src/common/target_cfg.rs @@ -0,0 +1,141 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::TargetCfg; + +const HELP_HEADING: &str = "Target Configuration Options"; + +/// Defines options related to commands that involves a streaming daemon command. +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] +#[clap(next_help_heading = HELP_HEADING)] +pub struct TargetCfgOptions { + #[clap( + long = "target-platforms", + help = "Configuration target (one) to use to configure targets", + num_args = 1, + value_name = "PLATFORM" + )] + pub target_platforms: Option, + + #[clap( + value_name = "VALUE", + long = "modifier", + short = 'm', + help = "A configuration modifier to configure all targets on the command line. This may be a constraint value target." + )] + pub cli_modifier: Vec, +} + +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] +pub struct TargetCfgUnusedOptions { + /// This option is not used. + #[clap( + long = "target-platforms", + num_args = 1, + hide = true, + value_name = "PLATFORM" + )] + pub target_platforms: Option, + + /// This option is not used. + #[clap(value_name = "VALUE", long = "modifier")] + pub cli_modifier: Vec, +} + +impl TargetCfgOptions { + pub fn target_cfg(&self) -> TargetCfg { + TargetCfg { + target_platform: self.target_platforms.clone().unwrap_or_default(), + cli_modifiers: self.cli_modifiers(), + } + } + + fn cli_modifiers(&self) -> Vec { + self.cli_modifier.clone() + } +} + +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] +#[clap(next_help_heading = HELP_HEADING)] +pub struct TargetCfgWithUniverseOptions { + /// Comma separated list of targets to construct a configured target universe. + /// + /// When the option is specified, command targets are be resolved in this universe. + /// Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + /// universe targets, not the command targets. + /// + /// This argument is particularly recommended on most non-trivial cqueries. In the absence of + /// this argument, buck2 will use the target literals in your cquery expression as the value for + /// this argument, which may not be what you want. + #[clap(long, short = 'u', use_value_delimiter = true, verbatim_doc_comment)] + pub target_universe: Vec, + + #[clap(flatten)] + pub target_cfg: TargetCfgOptions, +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use clap::CommandFactory; + use clap::Parser; + + use super::*; + + fn parse(args: &[&str]) -> anyhow::Result { + Ok(TargetCfgOptions::try_parse_from( + std::iter::once("program").chain(args.iter().copied()), + )?) + } + + #[test] + fn opt_multiple() -> anyhow::Result<()> { + let opts = parse(&["--modifier", "value1", "--modifier", "value2"])?; + + assert_eq!(opts.cli_modifiers(), vec!["value1", "value2"]); + + Ok(()) + } + + #[test] + fn space_separated_fails() -> anyhow::Result<()> { + assert_matches!(parse(&["--modifier", "value1", "value2"]), Err(..)); + + Ok(()) + } + + #[test] + fn test_target_cfg_unused() { + #[derive(Debug, Eq, PartialEq)] + struct ReducedArg { + name: String, + long: Option, + value_delimiter: Option, + number_of_values: Option, + } + + fn args() -> Vec { + C::command() + .get_arguments() + .map(|a| ReducedArg { + name: a.get_id().as_str().to_owned(), + long: a.get_long().map(|s| s.to_owned()), + value_delimiter: a.get_value_delimiter(), + number_of_values: a.get_num_args(), + }) + .collect() + } + + let a = args::(); + let b = args::(); + + assert_eq!(a, b); + assert!(!a.is_empty()); + } +} diff --git a/app/buck2_client_ctx/src/common/ui.rs b/app/buck2_client_ctx/src/common/ui.rs new file mode 100644 index 0000000000000..25c3f1fd35f08 --- /dev/null +++ b/app/buck2_client_ctx/src/common/ui.rs @@ -0,0 +1,163 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::buck2_env_name; +use clap::builder::FalseyValueParser; +use dupe::Dupe; +use termwiz::istty::IsTty; + +use crate::final_console::FinalConsole; +use crate::subscribers::superconsole::SuperConsoleConfig; +use crate::subscribers::superconsole::BUCK_NO_INTERACTIVE_CONSOLE; + +#[derive( + Debug, + serde::Serialize, + serde::Deserialize, + Clone, + Dupe, + Copy, + clap::ValueEnum +)] +#[clap(rename_all = "lower")] +pub enum ConsoleType { + Auto, + None, + Simple, + SimpleNoTty, + SimpleTty, + Super, +} + +#[derive( + Debug, + serde::Serialize, + serde::Deserialize, + Clone, + Dupe, + Copy, + clap::ValueEnum +)] +#[clap(rename_all = "lower")] +pub enum UiOptions { + Dice, + DebugEvents, + /// I/O panel. + Io, + /// RE panel. + Re, +} + +/// Defines common console options for commands. +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] +#[clap(next_help_heading = "Console Options")] +pub struct CommonConsoleOptions { + #[clap( + long = "console", + help = "Which console to use for this command", + default_value = "auto", + ignore_case = true, + env = buck2_env_name!("BUCK_CONSOLE"), + value_name = "super|simple|...", + value_enum + )] + pub console_type: ConsoleType, + + /// Configure additional superconsole ui components. + /// + /// Accepts a comma-separated list of superconsole components to add. Possible values are: + /// + /// dice - shows information about evaluated dice nodes + /// debugevents - shows information about the flow of events from buckd + /// + /// These components can be turned on/off interactively. + /// Press 'h' for help when superconsole is active. + #[clap( + long = "ui", + ignore_case = true, + num_args = 1.., + value_enum, + )] + pub ui: Vec, + + #[clap( + long, + help = "Disable console interactions", + env = buck2_env_name!(BUCK_NO_INTERACTIVE_CONSOLE), + value_parser = FalseyValueParser::new(), + )] + pub no_interactive_console: bool, +} + +impl Default for CommonConsoleOptions { + fn default() -> Self { + Self { + console_type: ConsoleType::Auto, + ui: Vec::new(), + no_interactive_console: false, + } + } +} + +impl CommonConsoleOptions { + pub fn default_ref() -> &'static Self { + static OPTS: CommonConsoleOptions = CommonConsoleOptions { + console_type: ConsoleType::Auto, + ui: vec![], + no_interactive_console: false, + }; + &OPTS + } + + pub fn simple_ref() -> &'static Self { + static OPTS: CommonConsoleOptions = CommonConsoleOptions { + console_type: ConsoleType::Simple, + ui: vec![], + no_interactive_console: false, + }; + &OPTS + } + + pub fn none_ref() -> &'static Self { + static OPTS: CommonConsoleOptions = CommonConsoleOptions { + console_type: ConsoleType::None, + ui: vec![], + no_interactive_console: false, + }; + &OPTS + } + + pub fn final_console(&self) -> FinalConsole { + let is_tty = match self.console_type { + ConsoleType::Auto | ConsoleType::Simple => std::io::stderr().is_tty(), + ConsoleType::Super => true, + ConsoleType::SimpleNoTty => false, + ConsoleType::SimpleTty => true, + ConsoleType::None => false, + }; + if is_tty { + FinalConsole::new_with_tty() + } else { + FinalConsole::new_without_tty() + } + } + + pub fn superconsole_config(&self) -> SuperConsoleConfig { + let mut config = SuperConsoleConfig::default(); + for option in &self.ui { + match option { + UiOptions::Dice => config.enable_dice = true, + UiOptions::DebugEvents => config.enable_debug_events = true, + UiOptions::Io => config.enable_io = true, + UiOptions::Re => config.enable_detailed_re = true, + } + } + config + } +} diff --git a/app/buck2_client_ctx/src/console_interaction_stream.rs b/app/buck2_client_ctx/src/console_interaction_stream.rs index 3fb2422db595f..bbdc4b1174aa0 100644 --- a/app/buck2_client_ctx/src/console_interaction_stream.rs +++ b/app/buck2_client_ctx/src/console_interaction_stream.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use strum::EnumIter; use tokio::io::AsyncReadExt; use crate::stdin::Stdin; @@ -124,16 +125,82 @@ mod interactive_terminal { use interactive_terminal::InteractiveTerminal; +#[derive(Debug, EnumIter)] +pub enum SuperConsoleToggle { + Dice, + DebugEvents, + TwoLinesMode, + DetailedRE, + Io, + TargetConfigurations, + ExpandedProgress, + Commands, + IncrLines, + DecrLines, + Help, +} + +impl SuperConsoleToggle { + pub fn description(&self) -> &str { + match self { + SuperConsoleToggle::Dice => "DICE", + SuperConsoleToggle::DebugEvents => "debug events", + SuperConsoleToggle::TwoLinesMode => "two lines mode", + SuperConsoleToggle::DetailedRE => "detailed RE", + SuperConsoleToggle::Io => "I/O counters", + SuperConsoleToggle::TargetConfigurations => "target configurations", + SuperConsoleToggle::ExpandedProgress => "expanded progress", + SuperConsoleToggle::Commands => "commands", + SuperConsoleToggle::IncrLines => "more lines", + SuperConsoleToggle::DecrLines => "less lines", + SuperConsoleToggle::Help => "help", + } + } + + pub fn key(&self) -> char { + match self { + SuperConsoleToggle::Dice => 'd', + SuperConsoleToggle::DebugEvents => 'e', + SuperConsoleToggle::TwoLinesMode => '2', + SuperConsoleToggle::DetailedRE => 'r', + SuperConsoleToggle::Io => 'i', + SuperConsoleToggle::TargetConfigurations => 'p', + SuperConsoleToggle::ExpandedProgress => 'x', + SuperConsoleToggle::Commands => 'c', + SuperConsoleToggle::IncrLines => '+', + SuperConsoleToggle::DecrLines => '-', + SuperConsoleToggle::Help => '?', + } + } +} + #[async_trait::async_trait] -pub trait ConsoleInteraction: Send + Sync { - async fn char(&mut self) -> anyhow::Result; +pub trait SuperConsoleInteraction: Send + Sync { + async fn toggle(&mut self) -> anyhow::Result>; } #[async_trait::async_trait] -impl<'a> ConsoleInteraction for ConsoleInteractionStream<'a> { - async fn char(&mut self) -> anyhow::Result { +impl<'a> SuperConsoleInteraction for ConsoleInteractionStream<'a> { + async fn toggle(&mut self) -> anyhow::Result> { match self.stdin.read_u8().await { - Ok(c) => Ok(c.into()), + Ok(c) => { + let c: char = c.into(); + let console_toggle = match c { + 'd' => Some(SuperConsoleToggle::Dice), + 'e' => Some(SuperConsoleToggle::DebugEvents), + '2' => Some(SuperConsoleToggle::TwoLinesMode), + 'r' => Some(SuperConsoleToggle::DetailedRE), + 'i' => Some(SuperConsoleToggle::Io), + 'p' => Some(SuperConsoleToggle::TargetConfigurations), + 'x' => Some(SuperConsoleToggle::ExpandedProgress), + 'c' => Some(SuperConsoleToggle::Commands), + '+' => Some(SuperConsoleToggle::IncrLines), + '-' => Some(SuperConsoleToggle::DecrLines), + '?' | 'h' => Some(SuperConsoleToggle::Help), + _ => None, + }; + Ok(console_toggle) + } // NOTE: An EOF here would be reported as "unexpected" because we asked for a u8. Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof @@ -146,11 +213,11 @@ impl<'a> ConsoleInteraction for ConsoleInteractionStream<'a> { } } -pub struct NoopConsoleInteraction; +pub struct NoopSuperConsoleInteraction; #[async_trait::async_trait] -impl ConsoleInteraction for NoopConsoleInteraction { - async fn char(&mut self) -> anyhow::Result { +impl SuperConsoleInteraction for NoopSuperConsoleInteraction { + async fn toggle(&mut self) -> anyhow::Result> { futures::future::pending().await } } diff --git a/app/buck2_client_ctx/src/daemon/mod.rs b/app/buck2_client_ctx/src/daemon.rs similarity index 100% rename from app/buck2_client_ctx/src/daemon/mod.rs rename to app/buck2_client_ctx/src/daemon.rs diff --git a/app/buck2_client_ctx/src/daemon/client.rs b/app/buck2_client_ctx/src/daemon/client.rs new file mode 100644 index 0000000000000..89dc87e18da57 --- /dev/null +++ b/app/buck2_client_ctx/src/daemon/client.rs @@ -0,0 +1,625 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fs::create_dir_all; +use std::fs::File; +use std::mem; +use std::time::Duration; + +use anyhow::Context; +use async_trait::async_trait; +use buck2_cli_proto::daemon_api_client::*; +use buck2_cli_proto::new_generic::NewGenericRequest; +use buck2_cli_proto::new_generic::NewGenericResponse; +use buck2_cli_proto::*; +use buck2_common::daemon_dir::DaemonDir; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_data::error::ErrorTag; +use buck2_event_log::stream_value::StreamValue; +use fs4::FileExt; +use futures::future::BoxFuture; +use futures::pin_mut; +use futures::stream; +use futures::Stream; +use futures::StreamExt; +use futures::TryStreamExt; +use tonic::codegen::InterceptedService; +use tonic::transport::Channel; +use tonic::Request; +use tonic::Status; + +use crate::command_outcome::CommandOutcome; +use crate::console_interaction_stream::ConsoleInteractionStream; +use crate::daemon::client::connect::BuckAddAuthTokenInterceptor; +use crate::events_ctx::EventsCtx; +use crate::events_ctx::PartialResultCtx; +use crate::events_ctx::PartialResultHandler; +use crate::file_tailers::tailers::FileTailers; +use crate::subscribers::observer::ErrorObserver; + +pub mod connect; +pub mod kill; + +use crate::startup_deadline::StartupDeadline; + +#[derive(Debug, buck2_error::Error)] +enum LifecycleError { + #[error("Missing `{}` file in `{}` directory", BuckdLifecycleLock::BUCKD_LIFECYCLE, _0.display())] + MissingLifecycle(AbsNormPathBuf), +} + +/// We need to make sure that all calls to the daemon in buckd flush the tailers after completion. +/// The connector wraps all buckd calls with flushing. +pub struct BuckdClientConnector<'a> { + client: BuckdClient<'a>, +} + +impl<'a> BuckdClientConnector<'a> { + pub fn with_flushing(&mut self) -> FlushingBuckdClient<'_, 'a> { + FlushingBuckdClient { + inner: &mut self.client, + } + } + + pub fn daemon_constraints(&self) -> &buck2_cli_proto::DaemonConstraints { + &self.client.constraints + } + + pub fn error_observers(&self) -> impl Iterator { + self.client.events_ctx.subscribers.error_observers() + } +} + +pub struct BuckdLifecycleLock { + daemon_dir: DaemonDir, + lock_file: File, +} + +impl BuckdLifecycleLock { + const BUCKD_LIFECYCLE: &'static str = "buckd.lifecycle"; + + pub async fn lock_with_timeout( + daemon_dir: DaemonDir, + deadline: StartupDeadline, + ) -> anyhow::Result { + create_dir_all(&daemon_dir.path)?; + let lifecycle_path = daemon_dir.path.as_path().join(Self::BUCKD_LIFECYCLE); + let file = File::create(lifecycle_path)?; + let fileref = &file; + deadline + .retrying( + "locking buckd lifecycle", + Duration::from_millis(5), + Duration::from_millis(100), + || async { Ok(fileref.try_lock_exclusive()?) }, + ) + .await?; + + Ok(BuckdLifecycleLock { + lock_file: file, + daemon_dir, + }) + } + + /// Remove everything except `buckd.lifecycle` file which is the lock file. + pub fn clean_daemon_dir(&self) -> anyhow::Result<()> { + let mut seen_lifecycle = false; + for p in fs_util::read_dir(&self.daemon_dir.path)? { + let p = p?; + if p.file_name() == Self::BUCKD_LIFECYCLE { + seen_lifecycle = true; + continue; + } + fs_util::remove_all(p.path())?; + } + if !seen_lifecycle { + // Self-check. + return Err(LifecycleError::MissingLifecycle(self.daemon_dir.path.clone()).into()); + } + Ok(()) + } + + pub fn daemon_dir(&self) -> &DaemonDir { + &self.daemon_dir + } +} + +impl Drop for BuckdLifecycleLock { + fn drop(&mut self) { + self.lock_file + .unlock() + .expect("Unexpected failure to unlock buckd.lifecycle file.") + } +} + +/// This provides a thin wrapper around the proto-generated DaemonApiClient and hides +/// some of the complexity/verbosity of making calls with that. For example, the user +/// doesn't need to deal with tonic::Response/Request and this may provide functions +/// that take more primitive types than the protobuf structure itself. +pub struct BuckdClient<'a> { + client: DaemonApiClient>, + constraints: buck2_cli_proto::DaemonConstraints, + daemon_dir: DaemonDir, + // TODO(brasselsprouts): events_ctx should own tailers + tailers: Option, + pub(crate) events_ctx: EventsCtx<'a>, +} + +#[derive(Debug, buck2_error::Error)] +enum GrpcToStreamError { + #[error("buck daemon returned an empty CommandProgress")] + EmptyCommandProgress, +} + +/// Convert tonic error to our error. +/// +/// This function **must** be used explicitly to convert the error, because we want a tag. +pub(crate) fn tonic_status_to_error(status: tonic::Status) -> anyhow::Error { + let mut tags = vec![ErrorTag::ClientGrpc]; + if status.code() == tonic::Code::ResourceExhausted { + // The error looks like this: + // ``` + // status: ResourceExhausted + // message: "Cannot return body with more than 4GB of data but got 4294992775 bytes" + // details: [], metadata: MetadataMap { headers: {} } + // ``` + if status + .message() + .contains("Cannot return body with more than") + { + tags.push(ErrorTag::GrpcResponseMessageTooLarge); + } + } + buck2_error::Error::from(status).tag(tags).into() +} + +/// Translates a tonic streaming response into a stream of StreamValues, the set of things that can flow across the gRPC +/// event stream. +fn grpc_to_stream( + response: anyhow::Result>>, +) -> impl Stream> { + let stream = match response { + Ok(response) => response.into_inner(), + Err(e) => return futures::stream::once(futures::future::ready(Err(e))).left_stream(), + }; + + let stream = stream + .map_err(tonic_status_to_error) + .map_ok(|e| stream::iter(e.messages.into_iter().map(anyhow::Ok))) + .try_flatten(); + + stream::unfold(stream, |mut stream| async { + let msg = match stream.try_next().await { + Ok(Some(msg)) => msg, + Ok(None) => return None, + Err(e) => return Some((Err(e), stream)), + }; + let value = match msg.progress { + Some(command_progress::Progress::Event(e)) => Some(Ok(StreamValue::Event(e))), + Some(command_progress::Progress::Result(res)) => Some(Ok(StreamValue::Result(res))), + Some(command_progress::Progress::PartialResult(res)) => { + Some(Ok(StreamValue::PartialResult(res))) + } + None => Some(Err(GrpcToStreamError::EmptyCommandProgress.into())), + }; + + value.map(|v| (v, stream)) + }) + .right_stream() +} + +impl<'a> BuckdClient<'a> { + fn open_tailers(&mut self) -> anyhow::Result<()> { + let tailers = FileTailers::new(&self.daemon_dir)?; + self.tailers = Some(tailers); + + Ok(()) + } + + /// Some commands stream events back from the server. + /// For these commands, we want to be able to manipulate CLI state. + async fn stream<'i, T, Res, Handler, Command>( + &mut self, + command: Command, + request: T, + partial_result_handler: &mut Handler, + console_interaction: Option>, + ) -> anyhow::Result> + where + Command: for<'b> FnOnce( + &'b mut DaemonApiClient>, + Request, + ) -> BoxFuture< + 'b, + Result>, Status>, + >, + Res: TryFrom, + Handler: PartialResultHandler, + { + let Self { + client, events_ctx, .. + } = self; + + let response = command(client, Request::new(request)) + .await + .map_err(tonic_status_to_error) + .context("Error dispatching request"); + let stream = grpc_to_stream(response); + pin_mut!(stream); + events_ctx + .unpack_stream( + partial_result_handler, + stream, + self.tailers.take(), + console_interaction, + ) + .await + } + + pub async fn status(&mut self, snapshot: bool) -> anyhow::Result { + let outcome = self + .events_ctx + // Safe to unwrap tailers here because they are instantiated prior to a command being called. + .unpack_oneshot(mem::take(&mut self.tailers), { + self.client.status(Request::new(StatusRequest { snapshot })) + }) + .await; + // TODO(nmj): We have a number of things that wish to use status() and return an anyhow::Result, + // for now we'll just turn a "CommandMessage" into a error, but that's really not what we + // want long term. + match outcome? { + CommandOutcome::Success(r) => Ok(r), + CommandOutcome::Failure(_) => { + Err(anyhow::anyhow!("Unexpected failure message in status()")) + } + } + } + + pub async fn set_log_filter(&mut self, req: SetLogFilterRequest) -> anyhow::Result<()> { + self.client.set_log_filter(Request::new(req)).await?; + + Ok(()) + } +} + +pub struct FlushingBuckdClient<'a, 'b> { + inner: &'a mut BuckdClient<'b>, +} + +impl<'a, 'b> FlushingBuckdClient<'a, 'b> { + fn enter(&mut self) -> anyhow::Result<()> { + self.inner.open_tailers()?; + Ok(()) + } + + async fn exit(&mut self) -> anyhow::Result<()> { + self.inner + .events_ctx + .flush(mem::take(&mut self.inner.tailers)) + .await?; + + Ok(()) + } +} + +pub enum NoPartialResult {} + +impl TryFrom for NoPartialResult { + type Error = buck2_cli_proto::partial_result::PartialResult; + + fn try_from(v: buck2_cli_proto::partial_result::PartialResult) -> Result { + Err(v) + } +} + +pub struct NoPartialResultHandler; + +#[async_trait] +impl PartialResultHandler for NoPartialResultHandler { + type PartialResult = NoPartialResult; + + async fn handle_partial_result( + &mut self, + _ctx: PartialResultCtx<'_, '_>, + partial_res: Self::PartialResult, + ) -> anyhow::Result<()> { + match partial_res {} + } +} + +/// Receives StdoutBytes, writes them to stdout. +pub struct StdoutPartialResultHandler; + +#[async_trait] +impl PartialResultHandler for StdoutPartialResultHandler { + type PartialResult = buck2_cli_proto::StdoutBytes; + + async fn handle_partial_result( + &mut self, + mut ctx: PartialResultCtx<'_, '_>, + partial_res: Self::PartialResult, + ) -> anyhow::Result<()> { + ctx.stdout(&partial_res.data).await + } +} + +/// Implement a streaming method with full event reporting. +macro_rules! stream_method { + ($method: ident, $req: ty, $res: ty, $message: ty) => { + stream_method!($method, $method, $req, $res, $message); + }; + + ($method: ident, $grpc_method: ident, $req: ty, $res: ty, $message: ty) => { + pub async fn $method( + &mut self, + req: $req, + console_interaction: Option>, + handler: &mut impl PartialResultHandler, + ) -> anyhow::Result> { + self.enter()?; + let res = self + .inner + .stream( + |d, r| Box::pin(DaemonApiClient::$grpc_method(d, r)), + req, + // For now we only support handlers that can be constructed like so, and we + // don't let anything go out. Eventually if we wanted to stream structured + // data, that could change. + handler, + console_interaction, + ) + .await; + self.exit().await?; + res + } + }; +} + +/// Implement a bi-directional streaming method with full event reporting. +macro_rules! bidirectional_stream_method { + ($method: ident, $req: ty, $res: ty, $message: ty) => { + bidirectional_stream_method!($method, $method, $req, $res, $message); + }; + + ($method: ident, $grpc_method: ident, $req: ty, $res: ty, $message: ty) => { + pub async fn $method( + &mut self, + context: ClientContext, + requests: impl Stream + Send + Sync + 'static, + handler: &mut impl PartialResultHandler, + ) -> anyhow::Result> { + self.enter()?; + let req = create_client_stream(context, requests); + let res = self + .inner + .stream( + |d, r| Box::pin(DaemonApiClient::$method(d, r)), + req, + handler, + None, + ) + .await; + self.exit().await?; + res + } + }; +} + +/// Implement a oneshot method with full event reporting. +macro_rules! oneshot_method { + ($method: ident, $req: ty, $res: ty) => { + oneshot_method!($method, $method, $req, $res); + }; + + ($method: ident, $grpc_method: ident, $req: ty, $res: ty) => { + pub async fn $method(&mut self, req: $req) -> anyhow::Result> { + self.enter()?; + let res = self + .inner + .events_ctx + .unpack_oneshot(mem::take(&mut self.inner.tailers), { + self.inner.client.$method(Request::new(req)) + }) + .await; + self.exit().await?; + res + } + }; +} + +/// Implement a method that does not produce a CommandResult and does not produce any events. +macro_rules! debug_method { + ($method: ident, $req: ty, $res: ty) => { + debug_method!($method, $method, $req, $res); + }; + + ($method: ident, $grpc_method: ident, $req: ty, $res: ty) => { + pub async fn $method(&mut self, req: $req) -> anyhow::Result<$res> { + self.enter()?; + let out = self.inner.client.$method(Request::new(req)).await; + self.exit().await?; + Ok(out?.into_inner()) + } + }; +} + +/// Wrap a method that exists on the BuckdClient, with flushing. +macro_rules! wrap_method { + ($method: ident ($($param: ident : $param_type: ty),*), $res: ty) => { + pub async fn $method(&mut self, $($param: $param_type)*) -> anyhow::Result<$res> { + self.enter()?; + let out = self + .inner + .$method($($param)*) + .await; + self.exit().await?; + out + } + }; + } + +impl<'a, 'b> FlushingBuckdClient<'a, 'b> { + stream_method!( + aquery, + AqueryRequest, + AqueryResponse, + buck2_cli_proto::StdoutBytes + ); + stream_method!( + cquery, + CqueryRequest, + CqueryResponse, + buck2_cli_proto::StdoutBytes + ); + stream_method!( + uquery, + UqueryRequest, + UqueryResponse, + buck2_cli_proto::StdoutBytes + ); + stream_method!( + targets, + TargetsRequest, + TargetsResponse, + buck2_cli_proto::StdoutBytes + ); + stream_method!( + targets_show_outputs, + TargetsRequest, + TargetsShowOutputsResponse, + NoPartialResult + ); + stream_method!( + ctargets, + ConfiguredTargetsRequest, + ConfiguredTargetsResponse, + NoPartialResult + ); + stream_method!(build, BuildRequest, BuildResponse, NoPartialResult); + stream_method!(bxl, BxlRequest, BxlResponse, buck2_cli_proto::StdoutBytes); + stream_method!(test, TestRequest, TestResponse, NoPartialResult); + stream_method!(install, InstallRequest, InstallResponse, NoPartialResult); + stream_method!( + audit, + GenericRequest, + GenericResponse, + buck2_cli_proto::StdoutBytes + ); + stream_method!( + starlark, + GenericRequest, + GenericResponse, + buck2_cli_proto::StdoutBytes + ); + stream_method!( + new_generic_impl, + NewGenericRequestMessage, + NewGenericResponseMessage, + NoPartialResult + ); + stream_method!( + clean_stale, + CleanStaleRequest, + CleanStaleResponse, + NoPartialResult + ); + stream_method!( + file_status, + FileStatusRequest, + GenericResponse, + buck2_cli_proto::StdoutBytes + ); + stream_method!( + profile, + profile2, + ProfileRequest, + ProfileResponse, + NoPartialResult + ); + stream_method!( + allocative, + AllocativeRequest, + AllocativeResponse, + NoPartialResult + ); + + bidirectional_stream_method!(lsp, LspRequest, LspResponse, LspMessage); + bidirectional_stream_method!(dap, DapRequest, DapResponse, DapMessage); + bidirectional_stream_method!( + subscription, + SubscriptionRequestWrapper, + SubscriptionCommandResponse, + SubscriptionResponseWrapper + ); + + oneshot_method!(flush_dep_files, FlushDepFilesRequest, GenericResponse); + + oneshot_method!(unstable_crash, UnstableCrashRequest, GenericResponse); + debug_method!( + unstable_heap_dump, + UnstableHeapDumpRequest, + UnstableHeapDumpResponse + ); + debug_method!( + unstable_allocator_stats, + UnstableAllocatorStatsRequest, + UnstableAllocatorStatsResponse + ); + debug_method!( + unstable_dice_dump, + UnstableDiceDumpRequest, + UnstableDiceDumpResponse + ); + + wrap_method!(status(snapshot: bool), StatusResponse); + wrap_method!(set_log_filter(log_filter: SetLogFilterRequest), ()); + stream_method!(trace_io, TraceIoRequest, TraceIoResponse, NoPartialResult); + + pub async fn new_generic( + &mut self, + context: buck2_cli_proto::ClientContext, + req: NewGenericRequest, + stdin: Option>, + ) -> anyhow::Result> { + let req = serde_json::to_string(&req).context("Could not serialize `NewGenericRequest`")?; + let req = buck2_cli_proto::NewGenericRequestMessage { + context: Some(context), + new_generic_request: req, + }; + let command_outcome: CommandOutcome = self + .new_generic_impl(req, stdin, &mut NoPartialResultHandler) + .await?; + match command_outcome { + CommandOutcome::Success(resp) => { + let resp = serde_json::from_str(&resp.new_generic_response) + .context("Could not deserialize `NewGenericResponse`")?; + Ok(CommandOutcome::Success(resp)) + } + CommandOutcome::Failure(code) => Ok(CommandOutcome::Failure(code)), + } + } +} + +/// Create a stream that is sent over as a parameter via GRPC to the daemon. +/// +/// Ensures that we send a proper ClientContext message, and that the inner type is wrapped +/// properly into a [`StreamingRequest`] +fn create_client_stream< + T: Into, + InStream: Stream + Send + Sync + 'static, +>( + context: ClientContext, + requests: InStream, +) -> impl Stream + Send + Sync + 'static { + let init_req = StreamingRequest { + request: Some(streaming_request::Request::Context(context)), + }; + stream::once(async move { init_req }).chain(requests.map(|request| request.into())) +} diff --git a/app/buck2_client_ctx/src/daemon/client/connect.rs b/app/buck2_client_ctx/src/daemon/client/connect.rs index 73d6c007eaf56..2787362249a19 100644 --- a/app/buck2_client_ctx/src/daemon/client/connect.rs +++ b/app/buck2_client_ctx/src/daemon/client/connect.rs @@ -12,6 +12,7 @@ use std::ffi::OsStr; use std::fs::File; use std::io::BufReader; use std::net::Ipv4Addr; +use std::path::PathBuf; use std::time::Duration; use anyhow::Context; @@ -22,14 +23,20 @@ use buck2_common::buckd_connection::BUCK_AUTH_TOKEN_HEADER; use buck2_common::client_utils::get_channel_tcp; use buck2_common::client_utils::get_channel_uds; use buck2_common::daemon_dir::DaemonDir; +use buck2_common::init::DaemonStartupConfig; use buck2_common::invocation_paths::InvocationPaths; -use buck2_common::legacy_configs::init::DaemonStartupConfig; -use buck2_core::env_helper::EnvHelper; +use buck2_common::systemd::SystemdPropertySetType; +use buck2_common::systemd::SystemdRunner; +use buck2_core::buck2_env_anyhow; +use buck2_data::DaemonWasStartedReason; +use buck2_error::ErrorTag; use buck2_util::process::async_background_command; use buck2_util::truncate::truncate; +use buck2_wrapper_common::kill::process_exists; +use buck2_wrapper_common::pid::Pid; use dupe::Dupe; use futures::future::try_join3; -use thiserror::Error; +use futures::FutureExt; use tokio::io::AsyncReadExt; use tokio::time::timeout; use tonic::codegen::InterceptedService; @@ -41,28 +48,75 @@ use tonic::Status; use crate::command_outcome::CommandOutcome; use crate::daemon::client::kill; +use crate::daemon::client::kill::hard_kill_until; use crate::daemon::client::BuckdClient; use crate::daemon::client::BuckdClientConnector; use crate::daemon::client::BuckdLifecycleLock; use crate::daemon::daemon_windows::spawn_background_process_on_windows; use crate::daemon_constraints; +use crate::daemon_constraints::get_possibly_nested_invocation_daemon_uuid; use crate::events_ctx::EventsCtx; use crate::immediate_config::ImmediateConfigContext; use crate::startup_deadline::StartupDeadline; +use crate::subscribers::classify_server_stderr::classify_server_stderr; use crate::subscribers::stdout_stderr_forwarder::StdoutStderrForwarder; -use crate::subscribers::subscriber::EventSubscriber; +use crate::subscribers::subscribers::EventSubscribers; /// The client side matcher for DaemonConstraints. #[derive(Clone, Debug)] pub struct DaemonConstraintsRequest { + /// The version of buck2. version: String, + /// Sandcastle id. user_version: Option, desired_trace_io_state: DesiredTraceIoState, + nested_invocation_daemon_uuid: Option, pub reject_daemon: Option, pub reject_materializer_state: Option, pub daemon_startup_config: DaemonStartupConfig, } +#[derive(Debug, derive_more::Display)] +pub(crate) enum ConstraintUnsatisfiedReason { + #[display("Version mismatch")] + Version, + #[display("User version mismatch")] + UserVersion, + #[display("Startup config mismatch")] + StartupConfig, + #[display("Reject daemon id")] + RejectDaemonId, + #[display("Trace IO mismatch")] + TraceIo, + #[display("Materializer state identity mismatch")] + MaterializerStateIdentity, +} + +impl ConstraintUnsatisfiedReason { + pub(crate) fn to_daemon_was_started_reason(&self) -> buck2_data::DaemonWasStartedReason { + match self { + ConstraintUnsatisfiedReason::Version => { + buck2_data::DaemonWasStartedReason::ConstraintMismatchVersion + } + ConstraintUnsatisfiedReason::UserVersion => { + buck2_data::DaemonWasStartedReason::ConstraintMismatchUserVersion + } + ConstraintUnsatisfiedReason::StartupConfig => { + buck2_data::DaemonWasStartedReason::ConstraintMismatchStartupConfig + } + ConstraintUnsatisfiedReason::RejectDaemonId => { + buck2_data::DaemonWasStartedReason::ConstraintRejectDaemonId + } + ConstraintUnsatisfiedReason::TraceIo => { + buck2_data::DaemonWasStartedReason::ConstraintMismatchTraceIo + } + ConstraintUnsatisfiedReason::MaterializerStateIdentity => { + buck2_data::DaemonWasStartedReason::ConstraintMismatchMaterializerStateIdentity + } + } + } +} + impl DaemonConstraintsRequest { pub fn new( immediate_config: &ImmediateConfigContext<'_>, @@ -72,6 +126,7 @@ impl DaemonConstraintsRequest { version: daemon_constraints::version(), user_version: daemon_constraints::user_version()?, desired_trace_io_state, + nested_invocation_daemon_uuid: get_possibly_nested_invocation_daemon_uuid(), reject_daemon: None, reject_materializer_state: None, daemon_startup_config: immediate_config.daemon_startup_config()?.clone(), @@ -82,13 +137,18 @@ impl DaemonConstraintsRequest { matches!(self.desired_trace_io_state, DesiredTraceIoState::Enabled) } - fn satisfied(&self, daemon: &buck2_cli_proto::DaemonConstraints) -> bool { + fn satisfied( + &self, + daemon: &buck2_cli_proto::DaemonConstraints, + ) -> Result<(), ConstraintUnsatisfiedReason> { if self.version != daemon.version { - return false; + return Err(ConstraintUnsatisfiedReason::Version); } - if self.user_version != daemon.user_version { - return false; + if !is_nested_invocation(self.nested_invocation_daemon_uuid.as_ref(), daemon) + && self.user_version != daemon.user_version + { + return Err(ConstraintUnsatisfiedReason::UserVersion); } let server_daemon_startup_config = daemon.daemon_startup_config.as_ref().and_then(|c| { @@ -100,12 +160,12 @@ impl DaemonConstraintsRequest { }); if Some(&self.daemon_startup_config) != server_daemon_startup_config.as_ref() { - return false; + return Err(ConstraintUnsatisfiedReason::StartupConfig); } if let Some(r) = &self.reject_daemon { if *r == daemon.daemon_id { - return false; + return Err(ConstraintUnsatisfiedReason::RejectDaemonId); } } @@ -114,12 +174,16 @@ impl DaemonConstraintsRequest { let extra = match &daemon.extra { Some(e) => e, - None => return true, + None => return Ok(()), }; match (self.desired_trace_io_state, extra.trace_io_enabled) { - (DesiredTraceIoState::Enabled, false) => return false, - (DesiredTraceIoState::Disabled, true) => return false, + (DesiredTraceIoState::Enabled, false) => { + return Err(ConstraintUnsatisfiedReason::TraceIo); + } + (DesiredTraceIoState::Disabled, true) => { + return Err(ConstraintUnsatisfiedReason::TraceIo); + } _ => {} } @@ -129,11 +193,11 @@ impl DaemonConstraintsRequest { .as_ref() .map_or(false, |i| i == r) { - return false; + return Err(ConstraintUnsatisfiedReason::MaterializerStateIdentity); } } - true + Ok(()) } } @@ -151,7 +215,9 @@ pub enum BuckdConnectConstraints { Constraints(DaemonConstraintsRequest), } -static BUCKD_STARTUP_TIMEOUT: EnvHelper = EnvHelper::new("BUCKD_STARTUP_TIMEOUT"); +fn buckd_startup_timeout_var() -> anyhow::Result> { + buck2_env_anyhow!("BUCKD_STARTUP_TIMEOUT", type=u64) +} async fn get_channel( endpoint: ConnectionType, @@ -196,7 +262,7 @@ pub async fn new_daemon_api_client( pub fn buckd_startup_timeout() -> anyhow::Result { Ok(Duration::from_secs( - BUCKD_STARTUP_TIMEOUT.get_copied()?.unwrap_or(10), + buckd_startup_timeout_var()?.unwrap_or(10), )) } @@ -226,7 +292,7 @@ impl<'a> BuckdLifecycle<'a> { } async fn start_server(&self) -> anyhow::Result<()> { - let mut args = vec!["--isolation-dir", self.paths.isolation.as_str()]; + let mut args = vec!["--isolation-dir", self.paths.isolation.as_str(), "daemon"]; if self.constraints.is_trace_io_requested() { args.push("--enable-trace-io"); @@ -239,22 +305,22 @@ impl<'a> BuckdLifecycle<'a> { let mut daemon_env_vars = Vec::new(); - let has_backtrace_vars = - env::var_os("RUST_BACKTRACE").is_some() || env::var_os("RUST_LIB_BACKTRACE").is_some(); - - if !has_backtrace_vars { - daemon_env_vars.push((OsStr::new("RUST_BACKTRACE"), OsStr::new("1"))); - - // TODO(nga): somewhere we capture too many backtraces, probably - // we create too many `anyhow::Error` on non-error paths. - // Probably somewhere in Starlark, because of "evaluating build file" spans. - // Can be reproduced with this command: - // ``` - // buck2 --isolation-dir=xx audit providers fbcode//buck2:buck2 --quiet - // ``` - // Which regresses from 15s to 80s when `RUST_LIB_BACKTRACE` is set. - daemon_env_vars.push((OsStr::new("RUST_LIB_BACKTRACE"), OsStr::new("0"))); - }; + daemon_env_vars.push((OsStr::new("RUST_BACKTRACE"), OsStr::new("1"))); + + // TODO(nga): We create too many backtraces during `attrs.source()` coercion. Can be + // reproduced with this command: + // ``` + // buck2 --isolation-dir=xx audit providers fbcode//buck2:buck2 --quiet + // ``` + // Which regresses from 15s to 80s when `RUST_LIB_BACKTRACE` is set. So we disable + // backtraces in the daemon unless the user has explicitly asked for them. We + // intentionally avoid considering the `RUST_BACKTRACE` variables that buck was invoked + // with, because a lot of Rust tooling sets those without meaning to influence this + // behavior. + daemon_env_vars.push(( + OsStr::new("RUST_LIB_BACKTRACE"), + OsStr::new(buck2_env_anyhow!("BUCK2_LIB_BACKTRACE")?.unwrap_or("0")), + )); if env::var_os("FORCE_WANT_RESTART").is_some() { // Disable restarter for the actual daemon command, even if it was forced, otherwise we @@ -287,10 +353,10 @@ impl<'a> BuckdLifecycle<'a> { daemon_startup_config: &DaemonStartupConfig, ) -> anyhow::Result<()> { let daemon_startup_config = daemon_startup_config.serialize()?; - args.extend(["daemon", "--dont-daemonize"]); + args.extend(["--dont-daemonize"]); spawn_background_process_on_windows( self.paths.project_root().root(), - &env::current_exe()?, + &get_daemon_exe()?, args.into_iter() .chain(std::iter::once(daemon_startup_config.as_str())), daemon_env_vars, @@ -309,18 +375,34 @@ impl<'a> BuckdLifecycle<'a> { .unwrap_or_else(|_| panic!("Cannot convert {} to int", t)) })); - let mut cmd = - async_background_command(std::env::current_exe().context("Failed to get current exe")?); + let daemon_exe = get_daemon_exe()?; + let mut cmd = if let Some(systemd_runner) = SystemdRunner::create_if_enabled( + SystemdPropertySetType::Daemon, + &daemon_startup_config.resource_control, + )? { + systemd_runner + .background_command_linux( + daemon_exe, + format!( + "buck2-daemon-{}-{}", + project_dir.name().unwrap_or("unknown_project"), + self.paths.isolation.as_str() + ), + project_dir.root().to_buf(), + ) + .into() + } else { + async_background_command(daemon_exe) + }; + cmd.current_dir(project_dir.root()) .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) .args(args); - cmd.arg("daemon"); cmd.arg(daemon_startup_config.serialize()?); - static DAEMON_LOG_TO_FILE: EnvHelper = EnvHelper::::new("BUCK_DAEMON_LOG_TO_FILE"); - if DAEMON_LOG_TO_FILE.get_copied()? == Some(1) { + if buck2_env_anyhow!("BUCK_DAEMON_LOG_TO_FILE", type=u8)? == Some(1) { cmd.env("BUCK_LOG_TO_FILE_PATH", self.paths.log_dir().as_os_str()); } @@ -461,6 +543,7 @@ impl BootstrapBuckdClient { pub async fn connect( paths: &InvocationPaths, constraints: BuckdConnectConstraints, + event_subscribers: &mut EventSubscribers<'_>, ) -> anyhow::Result { let daemon_dir = paths.daemon_dir()?; @@ -484,16 +567,15 @@ impl BootstrapBuckdClient { establish_connection_existing(&daemon_dir).await } BuckdConnectConstraints::Constraints(constraints) => { - establish_connection(paths, constraints).await + establish_connection(paths, constraints, event_subscribers).await } } - .with_context(|| daemon_connect_error(paths)) - .context(error_message) + .map_err(|e| daemon_connect_error(e, paths).context(error_message).into()) } pub fn with_subscribers<'a>( self, - subscribers: Vec>, + subscribers: EventSubscribers<'a>, ) -> BuckdClientConnector<'a> { BuckdClientConnector { client: BuckdClient { @@ -506,11 +588,12 @@ impl BootstrapBuckdClient { } } - pub async fn kill(&mut self, reason: &str) -> anyhow::Result { - kill::kill(&mut self.client, &self.info, reason).await + pub(crate) async fn kill(&mut self, reason: &str) -> anyhow::Result { + kill::kill(&mut self.client, &self.info, reason).await?; + Pid::from_i64(self.info.pid) } - async fn kill_for_constraints_mismatch(&mut self) -> anyhow::Result { + async fn kill_for_constraints_mismatch(&mut self) -> anyhow::Result { self.kill("client expected different buckd constraints") .await } @@ -532,7 +615,7 @@ pub struct BuckdConnectOptions<'a> { /// Subscribers manage the way that incoming events from the server are handled. /// The client will forward events and stderr/stdout output from the server to each subscriber. /// By default, this list is set to a single subscriber that notifies the user of basic output from the server. - pub(crate) subscribers: Vec>, + pub(crate) subscribers: EventSubscribers<'a>, pub constraints: BuckdConnectConstraints, } @@ -540,21 +623,22 @@ impl<'a> BuckdConnectOptions<'a> { pub fn existing_only_no_console() -> Self { Self { constraints: BuckdConnectConstraints::ExistingOnly, - subscribers: vec![Box::new(StdoutStderrForwarder)], + subscribers: EventSubscribers::new(vec![Box::new(StdoutStderrForwarder)]), } } pub async fn connect( - self, + mut self, paths: &InvocationPaths, ) -> anyhow::Result> { - match BootstrapBuckdClient::connect(paths, self.constraints).await { + match BootstrapBuckdClient::connect(paths, self.constraints, &mut self.subscribers) + .await + .map_err(buck2_error::Error::from) + { Ok(client) => Ok(client.with_subscribers(self.subscribers)), Err(e) => { - self.subscribers - .into_iter() - .for_each(|mut s| s.handle_daemon_connection_failure()); - Err(e) + self.subscribers.handle_daemon_connection_failure(&e); + Err(e.into()) } } } @@ -562,7 +646,7 @@ impl<'a> BuckdConnectOptions<'a> { pub async fn establish_connection_existing( daemon_dir: &DaemonDir, -) -> anyhow::Result { +) -> buck2_error::Result { let deadline = StartupDeadline::duration_from_now(buckd_startup_timeout()?)?; deadline .run( @@ -576,12 +660,14 @@ pub async fn establish_connection_existing( }, ) .await + .map_err(buck2_error::Error::from) } async fn establish_connection( paths: &InvocationPaths, constraints: DaemonConstraintsRequest, -) -> anyhow::Result { + event_subscribers: &mut EventSubscribers<'_>, +) -> buck2_error::Result { // There are many places where `establish_connection_inner` may hang. // If it does, better print something to the user instead of hanging quietly forever. let timeout = buckd_startup_timeout()? * 9; @@ -589,27 +675,55 @@ async fn establish_connection( deadline .down( "establishing connection to Buck daemon or start a daemon", - |timeout| establish_connection_inner(paths, constraints, timeout), + |timeout| establish_connection_inner(paths, constraints, timeout, event_subscribers), ) .await + .map_err(buck2_error::Error::from) } +fn explain_failed_to_connect_reason(reason: buck2_data::DaemonWasStartedReason) -> &'static str { + match reason { + DaemonWasStartedReason::UnknownReason => "Unknown reason", + DaemonWasStartedReason::ConstraintMismatchVersion => "Version mismatch", + DaemonWasStartedReason::ConstraintMismatchUserVersion => "User version mismatch", + DaemonWasStartedReason::ConstraintMismatchStartupConfig => "Startup config mismatch", + DaemonWasStartedReason::ConstraintRejectDaemonId => "Reject daemon id", + DaemonWasStartedReason::ConstraintMismatchTraceIo => "Trace IO mismatch", + DaemonWasStartedReason::ConstraintMismatchMaterializerStateIdentity => { + "Materializer state identity mismatch" + } + DaemonWasStartedReason::CouldNotConnectToDaemon => { + // TODO(nga): get rid of this variant. + "Could not connect to daemon" + } + DaemonWasStartedReason::TimedOutConnectingToDaemon => "Timed out connecting to daemon", + DaemonWasStartedReason::TimeoutCalculationError => "Timeout calculation error", + DaemonWasStartedReason::NoBuckdInfo => "No buckd.info", + DaemonWasStartedReason::CouldNotLoadBuckdInfo => "Could not load buckd.info", + DaemonWasStartedReason::NoDaemonProcess => "buck2 daemon is not running", + } +} + +#[allow(clippy::collapsible_match)] async fn establish_connection_inner( paths: &InvocationPaths, constraints: DaemonConstraintsRequest, deadline: StartupDeadline, + event_subscribers: &mut EventSubscribers<'_>, ) -> anyhow::Result { let daemon_dir = paths.daemon_dir()?; - let connect_before_restart = deadline + + let res = deadline .half()? .run("connecting to existing buck daemon", { - try_connect_existing_before_daemon_restart(&daemon_dir, &constraints) + try_connect_existing_before_acquiring_lifecycle_lock(&daemon_dir, &constraints).map(Ok) }) - .await?; - - if let ConnectBeforeRestart::Accepted(client) = connect_before_restart { - return Ok(client); - }; + .await; + if let Ok(connect_before_restart) = res { + if let ConnectBeforeRestart::Accepted(client) = connect_before_restart { + return Ok(client); + }; + } // At this point, we've either failed to connect to buckd or buckd had the wrong constraints. // Get the lifecycle lock to ensure we don't have races with other processes as we check and change things. @@ -621,26 +735,123 @@ async fn establish_connection_inner( // Even if we didn't connect before, it's possible that we just raced with another invocation // starting the server, so we try to connect again while holding the lock. - if let Ok(channel) = try_connect_existing(&daemon_dir, &deadline).await { - let mut client = channel.upgrade().await?; - if constraints.satisfied(&client.constraints) { - return Ok(client); + let daemon_was_started_reason = { + match BuckdProcessInfo::load_if_exists(&daemon_dir) { + Ok(Some(buckd_info)) => { + match try_connect_existing(&buckd_info, &deadline, &lifecycle_lock).await { + Ok(channel) => { + let mut client = channel.upgrade().await?; + + let reason = match constraints.satisfied(&client.constraints) { + Ok(()) => return Ok(client), + Err(reason) => reason, + }; + + if is_nested_invocation( + get_possibly_nested_invocation_daemon_uuid().as_ref(), + &client.constraints, + ) { + match reason { + ConstraintUnsatisfiedReason::TraceIo + | ConstraintUnsatisfiedReason::StartupConfig => { + return Err(BuckdConnectError::NestedConstraintMismatch { + reason, + } + .into()); + } + _ => (), + } + } + + event_subscribers + .eprintln(&format!( + "buck2 daemon constraint mismatch: {reason}; killing daemon..." + )) + .await?; + + deadline + .run( + "sending kill command to the Buck daemon", + client.kill_for_constraints_mismatch(), + ) + .await?; + + event_subscribers + .eprintln("Starting new buck2 daemon...") + .await?; + + reason.to_daemon_was_started_reason() + } + Err(reason) => { + // TODO(nga): should print some proper message here. + hard_kill_until(&buckd_info.info, deadline.down_deadline()?.deadline()) + .await?; + + event_subscribers + .eprintln(&format!( + "Could not connect to buck2 daemon ({}), starting a new one...", + explain_failed_to_connect_reason(reason) + )) + .await?; + + reason + } + } + } + Ok(None) => { + event_subscribers + .eprintln("Starting new buck2 daemon...") + .await?; + + buck2_data::DaemonWasStartedReason::NoBuckdInfo + } + Err(e) => { + event_subscribers + .eprintln(&format!( + "Could not load buckd.info: {}, starting new buck2 daemon...", + e + )) + .await?; + + buck2_data::DaemonWasStartedReason::CouldNotLoadBuckdInfo + } } - deadline - .run( - "sending kill command to the Buck daemon", - client.kill_for_constraints_mismatch(), - ) - .await?; - } + }; + + deadline + .down( + &format!( + "starting new buck2 daemon for reason: {}", + explain_failed_to_connect_reason(daemon_was_started_reason) + ), + |deadline| { + start_new_buckd_and_connect( + deadline, + &lifecycle_lock, + paths, + &constraints, + event_subscribers, + daemon_was_started_reason, + ) + }, + ) + .await +} +async fn start_new_buckd_and_connect( + deadline: StartupDeadline, + lifecycle_lock: &BuckdLifecycle<'_>, + paths: &InvocationPaths, + constraints: &DaemonConstraintsRequest, + event_subscribers: &mut EventSubscribers<'_>, + daemon_was_started_reason: buck2_data::DaemonWasStartedReason, +) -> anyhow::Result { // Daemon dir may be corrupted. Safer to delete it. lifecycle_lock .clean_daemon_dir() .context("Cleaning daemon dir")?; // Now there's definitely no server that can be connected to - // TODO(cjhopman): a non-responsive buckd process may be somehow lingering around and we should probably kill it off here. lifecycle_lock.start_server().await?; // It might take a little bit for the daemon server to start up. We could wait for the buckd.info // file to appear, but it's just as easy to just retry the connection itself. @@ -656,14 +867,21 @@ async fn establish_connection_inner( let client = channel.upgrade().await?; - if !constraints.satisfied(&client.constraints) { + if let Err(reason) = constraints.satisfied(&client.constraints) { return Err(BuckdConnectError::BuckDaemonConstraintWrongAfterStart { + reason, expected: constraints.clone(), actual: client.constraints, } .into()); } + event_subscribers.handle_daemon_started(daemon_was_started_reason); + + event_subscribers + .eprintln("Connected to new buck2 daemon.") + .await?; + Ok(client) } @@ -679,41 +897,64 @@ enum ConnectBeforeRestart { /// * `Ok(Some(client))` if we connected to an existing buckd /// * `Ok(None)` if we failed to connect and should restart buckd /// * `Err` if we failed to connect and should abandon startup -async fn try_connect_existing_before_daemon_restart( +async fn try_connect_existing_before_acquiring_lifecycle_lock( daemon_dir: &DaemonDir, constraints: &DaemonConstraintsRequest, -) -> anyhow::Result { +) -> ConnectBeforeRestart { match BuckdProcessInfo::load_and_create_channel(daemon_dir).await { Ok(channel) => { - let client = channel.upgrade().await?; - if constraints.satisfied(&client.constraints) { - Ok(ConnectBeforeRestart::Accepted(client)) + let Ok(client) = channel.upgrade().await else { + return ConnectBeforeRestart::Rejected; + }; + if constraints.satisfied(&client.constraints).is_ok() { + ConnectBeforeRestart::Accepted(client) } else { - Ok(ConnectBeforeRestart::Rejected) + ConnectBeforeRestart::Rejected } } Err(e) => { tracing::debug!("Connect failed: {:#}", e); - Ok(ConnectBeforeRestart::Rejected) + ConnectBeforeRestart::Rejected } } } async fn try_connect_existing( - daemon_dir: &DaemonDir, + buckd_info: &BuckdProcessInfo<'_>, timeout: &StartupDeadline, -) -> anyhow::Result { - timeout - .min(buckd_startup_timeout()?)? - .run( - "connect existing buckd", - BuckdProcessInfo::load_and_create_channel(daemon_dir), - ) - .await + _lock: &BuckdLifecycle<'_>, +) -> Result { + let timeout: anyhow::Result<_> = try { timeout.min(buckd_startup_timeout()?)? }; + let Ok(timeout) = timeout else { + return Err(buck2_data::DaemonWasStartedReason::TimeoutCalculationError); + }; + let Ok(rem_duration) = timeout.rem_duration("connect existing buckd") else { + return Err(buck2_data::DaemonWasStartedReason::TimedOutConnectingToDaemon); + }; + match tokio::time::timeout(rem_duration, buckd_info.create_channel()).await { + Ok(Ok(channel)) => Ok(channel), + Ok(Err(_)) => { + let Ok(pid) = buckd_info.pid() else { + return Err(buck2_data::DaemonWasStartedReason::CouldNotLoadBuckdInfo); + }; + let buckd_process_exists = process_exists(pid).unwrap_or(true); + if !buckd_process_exists { + // We don't delete the `buckd.info` file, and if we failed to connect, + // the most likely reason is that the daemon process doesn't exist. + Err(buck2_data::DaemonWasStartedReason::NoDaemonProcess) + } else { + Err(buck2_data::DaemonWasStartedReason::CouldNotConnectToDaemon) + } + } + Err(e) => { + let _assert_type: tokio::time::error::Elapsed = e; + Err(buck2_data::DaemonWasStartedReason::TimedOutConnectingToDaemon) + } + } } pub struct BuckdProcessInfo<'a> { - info: DaemonProcessInfo, + pub(crate) info: DaemonProcessInfo, daemon_dir: &'a DaemonDir, } @@ -724,9 +965,25 @@ impl<'a> BuckdProcessInfo<'a> { } pub fn load(daemon_dir: &'a DaemonDir) -> anyhow::Result { + Self::load_if_exists(daemon_dir)?.with_context(|| { + format!( + "buckd info {} does not exist", + daemon_dir.buckd_info().display() + ) + }) + } + + pub fn load_if_exists(daemon_dir: &'a DaemonDir) -> anyhow::Result> { let location = daemon_dir.buckd_info(); - let file = File::open(&location) - .with_context(|| format!("Trying to open buckd info, `{}`", location.display()))?; + let file = match File::open(&location) { + Ok(file) => file, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None), + Err(e) => { + return Err(e).with_context(|| { + format!("Trying to open buckd info, `{}`", location.display()) + }); + } + }; let reader = BufReader::new(file); let info =serde_json::from_reader(reader).with_context(|| { format!( @@ -736,7 +993,7 @@ impl<'a> BuckdProcessInfo<'a> { ) })?; - Ok(Self { info, daemon_dir }) + Ok(Some(BuckdProcessInfo { info, daemon_dir })) } pub async fn create_channel(&self) -> anyhow::Result { @@ -754,12 +1011,12 @@ impl<'a> BuckdProcessInfo<'a> { }) } - pub async fn hard_kill(&self) -> anyhow::Result { + pub async fn hard_kill(&self) -> anyhow::Result<()> { kill::hard_kill(&self.info).await } - pub fn pid(&self) -> i64 { - self.info.pid + pub fn pid(&self) -> anyhow::Result { + Pid::from_i64(self.info.pid) } } @@ -768,8 +1025,8 @@ async fn get_constraints( ) -> anyhow::Result { // NOTE: No tailers in bootstrap client, we capture logs if we fail to connect, but // otherwise we leave them alone. - let status = EventsCtx::new(vec![Box::new(StdoutStderrForwarder)]) - .unpack_oneshot(&mut None, || { + let status = EventsCtx::new(EventSubscribers::new(vec![Box::new(StdoutStderrForwarder)])) + .unpack_oneshot(None, { client.status(tonic::Request::new(buck2_cli_proto::StatusRequest { snapshot: false, })) @@ -786,8 +1043,22 @@ async fn get_constraints( Ok(status.daemon_constraints.unwrap_or_default()) } -#[derive(Debug, Error)] +pub fn get_daemon_exe() -> anyhow::Result { + let exe = env::current_exe().context("Failed to get current exe")?; + if buck2_core::client_only::is_client_only()? { + let ext = if cfg!(windows) { ".exe" } else { "" }; + Ok(exe + .parent() + .context("Expected current exe to be in a directory")? + .join(format!("buck2-daemon{}", ext))) + } else { + Ok(exe) + } +} + +#[derive(Debug, buck2_error::Error)] #[allow(clippy::large_enum_variant)] +#[buck2(tag = DaemonConnect)] enum BuckdConnectError { #[error( "buck daemon startup failed with exit code {code}\nstdout:\n{stdout}\nstderr:\n{stderr}" @@ -798,17 +1069,18 @@ enum BuckdConnectError { stderr: String, }, #[error( - "during buck daemon startup, the started process did not match constraints.\nexpected: {expected:?}\nactual: {actual:?}" + "during buck daemon startup, the started process did not match constraints ({reason}).\nexpected: {expected:?}\nactual: {actual:?}" )] BuckDaemonConstraintWrongAfterStart { + reason: ConstraintUnsatisfiedReason, expected: DaemonConstraintsRequest, actual: buck2_cli_proto::DaemonConstraints, }, - #[error("Error connecting to the daemon, daemon stderr follows:\n{stderr}")] - ConnectError { stderr: String }, + #[error("buck2 daemon constraint mismatch during nested invocation: {reason}")] + NestedConstraintMismatch { reason: ConstraintUnsatisfiedReason }, } -fn daemon_connect_error(paths: &InvocationPaths) -> BuckdConnectError { +fn daemon_connect_error(error: buck2_error::Error, paths: &InvocationPaths) -> buck2_error::Error { let stderr = paths .daemon_dir() .and_then(|dir| { @@ -817,9 +1089,21 @@ fn daemon_connect_error(paths: &InvocationPaths) -> BuckdConnectError { }) .unwrap_or_else(|_| "".to_owned()); - BuckdConnectError::ConnectError { - stderr: truncate(&stderr, 64000), - } + let stderr = truncate(&stderr, 64000); + let error = error + .context(format!( + "Error connecting to the daemon, daemon stderr follows:\n{}", + stderr + )) + .tag([ErrorTag::DaemonConnect]); + classify_server_stderr(error, &stderr) +} + +fn is_nested_invocation( + buck2_daemon_uuid: Option<&String>, + daemon: &buck2_cli_proto::DaemonConstraints, +) -> bool { + buck2_daemon_uuid.map_or(false, |uuid| uuid == &daemon.daemon_id) } #[cfg(test)] @@ -846,6 +1130,7 @@ mod tests { version: "version".to_owned(), user_version: Some("test".to_owned()), desired_trace_io_state, + nested_invocation_daemon_uuid: None, reject_daemon: None, reject_materializer_state: None, daemon_startup_config: DaemonStartupConfig::testing_empty(), @@ -856,21 +1141,21 @@ mod tests { fn test_constraints_equal_for_same_constraints() { let req = request(DesiredTraceIoState::Enabled); let daemon = constraints(true); - assert!(req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_ok()); } #[test] fn test_constraints_equal_for_trace_io_existing() { let req = request(DesiredTraceIoState::Existing); let daemon = constraints(true); - assert!(req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_ok()); } #[test] fn test_constraints_unequal_for_trace_io() { let req = request(DesiredTraceIoState::Disabled); let daemon = constraints(true); - assert!(!req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_err()); } #[test] @@ -889,6 +1174,7 @@ mod tests { user_version: None, desired_trace_io_state: DesiredTraceIoState::Existing, reject_daemon: None, + nested_invocation_daemon_uuid: None, reject_materializer_state: None, daemon_startup_config: DaemonStartupConfig::testing_empty(), }; @@ -903,11 +1189,11 @@ mod tests { ), }; - assert!(req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_ok()); req.reject_daemon = Some("zzz".to_owned()); - assert!(req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_ok()); req.reject_daemon = Some("ddd".to_owned()); - assert!(!req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_err()); } #[test] @@ -917,6 +1203,7 @@ mod tests { user_version: None, desired_trace_io_state: DesiredTraceIoState::Existing, reject_daemon: None, + nested_invocation_daemon_uuid: None, reject_materializer_state: None, daemon_startup_config: DaemonStartupConfig::testing_empty(), }; @@ -934,11 +1221,11 @@ mod tests { ), }; - assert!(req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_ok()); req.reject_materializer_state = Some("zzz".to_owned()); - assert!(req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_ok()); req.reject_materializer_state = Some("mmm".to_owned()); - assert!(!req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_err()); } #[test] @@ -948,6 +1235,7 @@ mod tests { user_version: None, desired_trace_io_state: DesiredTraceIoState::Existing, reject_daemon: None, + nested_invocation_daemon_uuid: None, reject_materializer_state: None, daemon_startup_config: DaemonStartupConfig::testing_empty(), }; @@ -965,8 +1253,39 @@ mod tests { ), }; - assert!(req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_ok()); req.daemon_startup_config.daemon_buster = Some("1".to_owned()); - assert!(!req.satisfied(&daemon)); + assert!(req.satisfied(&daemon).is_err()); + } + + #[test] + fn test_constraints_nested_invocation_diff_user_version() { + let mut req = DaemonConstraintsRequest { + version: "foo".to_owned(), + user_version: Some("fake_version_1".to_owned()), + desired_trace_io_state: DesiredTraceIoState::Existing, + nested_invocation_daemon_uuid: None, + reject_daemon: None, + reject_materializer_state: None, + daemon_startup_config: DaemonStartupConfig::testing_empty(), + }; + + let daemon = buck2_cli_proto::DaemonConstraints { + version: "foo".to_owned(), + user_version: Some("fake_version_2".to_owned()), + daemon_id: "ddd".to_owned(), + extra: Some(buck2_cli_proto::ExtraDaemonConstraints { + trace_io_enabled: false, + materializer_state_identity: Some("mmm".to_owned()), + }), + daemon_startup_config: Some( + serde_json::to_string(&DaemonStartupConfig::testing_empty()).unwrap(), + ), + }; + + assert!(req.satisfied(&daemon).is_err()); + + req.nested_invocation_daemon_uuid = Some("ddd".to_owned()); + assert!(req.satisfied(&daemon).is_ok()); } } diff --git a/app/buck2_client_ctx/src/daemon/client/kill.rs b/app/buck2_client_ctx/src/daemon/client/kill.rs index bd38e8305cd78..9cf227973ceb5 100644 --- a/app/buck2_client_ctx/src/daemon/client/kill.rs +++ b/app/buck2_client_ctx/src/daemon/client/kill.rs @@ -10,53 +10,99 @@ use std::time::Duration; use std::time::Instant; -use anyhow::Context; use buck2_cli_proto::daemon_api_client::*; use buck2_cli_proto::*; +use buck2_data::error::ErrorTag; +use buck2_error::buck2_error_anyhow; use buck2_wrapper_common::kill; -use sysinfo::Pid; -use sysinfo::PidExt; -use sysinfo::ProcessExt; +use buck2_wrapper_common::pid::Pid; use sysinfo::ProcessRefreshKind; use sysinfo::System; -use sysinfo::SystemExt; use tonic::codegen::InterceptedService; use tonic::transport::Channel; use tonic::Request; +use crate::daemon::client::connect::buckd_startup_timeout; use crate::daemon::client::connect::BuckAddAuthTokenInterceptor; - -#[derive(Debug, thiserror::Error)] -enum KillError { - #[error("Daemon pid {} did not die after kill within {:.1}s (status: {})", _0, _1.as_secs_f32(), _2)] - DidNotDie(u32, Duration, String), -} +use crate::daemon::client::connect::BuckdProcessInfo; +use crate::daemon::client::BuckdLifecycleLock; const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(4); /// Kill request does not wait for the process to exit. const KILL_REQUEST_TIMEOUT: Duration = Duration::from_secs(3); const FORCE_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); -pub struct KillResponse { - pid: u32, -} +pub async fn kill_command_impl( + lifecycle_lock: &BuckdLifecycleLock, + reason: &str, +) -> anyhow::Result<()> { + let process = match BuckdProcessInfo::load(lifecycle_lock.daemon_dir()) { + Ok(p) => p, + Err(e) => { + tracing::debug!("No BuckdProcessInfo: {:#}", e); + crate::eprintln!("no buckd server running")?; + return Ok(()); + } + }; + + let buckd = tokio::time::timeout(buckd_startup_timeout()?, async { + process.create_channel().await?.upgrade().await + }) + .await; + + let pid = match buckd { + Ok(Ok(mut buckd)) => { + crate::eprintln!("killing buckd server")?; + Some(buckd.kill(reason).await?) + } + Ok(Err(e)) => { + // No time out: we just errored out. This is likely indicative that there is no + // buckd (i.e. our connection got rejected), so let's check for this and then + // provide some information. + + if e.is::() { + // OK, looks like the server + tracing::debug!("Connect failed with a Tonic error: {:#}", e); + crate::eprintln!("no buckd server running")?; + } else { + crate::eprintln!( + "unexpected error connecting to Buck2: {:#} \ + (no buckd server running?)", + e + )?; + } + + None + } + Err(e) => { + tracing::debug!("Connect timed out: {:#}", e); + + // If we timeout, then considering the generous timeout we give ourselves, then + // that must mean we're not getting a reply back from Buck, but that we did + // succeed in opening a connection to it (because if we didn't, we'd have + // errored out). + // + // This means the socket is probably open. We can reasonably got and kill this + // process if both the PID and the port exist. + crate::eprintln!("killing unresponsive buckd server")?; + process.hard_kill().await?; + Some(process.pid()?) + } + }; -impl KillResponse { - pub fn log(&self) -> anyhow::Result<()> { - crate::eprintln!("Buck2 daemon pid {} has exited", self.pid)?; - Ok(()) + if let Some(pid) = pid { + crate::eprintln!("Buck2 daemon pid {} has exited", pid)?; } + + Ok(()) } -pub async fn kill( +pub(crate) async fn kill( client: &mut DaemonApiClient>, info: &DaemonProcessInfo, reason: &str, -) -> anyhow::Result { - let pid = info.pid; - let pid: u32 = pid - .try_into() - .with_context(|| format!("Integer overflow converting pid {}", pid))?; +) -> anyhow::Result<()> { + let pid = Pid::from_i64(info.pid)?; let callers = get_callers_for_kill(); tracing::debug!("Killing daemon with PID {}", pid); @@ -74,7 +120,7 @@ pub async fn kill( match inner_result { Ok(_) => loop { if !kill::process_exists(pid)? { - return Ok(KillResponse { pid }); + return Ok(()); } if time_req_sent.elapsed() > GRACEFUL_SHUTDOWN_TIMEOUT { crate::eprintln!( @@ -108,90 +154,74 @@ pub async fn kill( hard_kill_impl(pid, time_req_sent, time_to_kill).await } -pub async fn hard_kill(info: &DaemonProcessInfo) -> anyhow::Result { - let pid = info - .pid - .try_into() - .with_context(|| format!("Integer overflow converting pid {}", info.pid))?; +pub(crate) async fn hard_kill(info: &DaemonProcessInfo) -> anyhow::Result<()> { + let pid = Pid::from_i64(info.pid)?; hard_kill_impl(pid, Instant::now(), FORCE_SHUTDOWN_TIMEOUT).await } -async fn hard_kill_impl( - pid: u32, - start_at: Instant, - deadline: Duration, -) -> anyhow::Result { +pub(crate) async fn hard_kill_until( + info: &DaemonProcessInfo, + deadline: Instant, +) -> anyhow::Result<()> { + let pid = Pid::from_i64(info.pid)?; + + let now = Instant::now(); + hard_kill_impl(pid, now, deadline.saturating_duration_since(now)).await +} + +async fn hard_kill_impl(pid: Pid, start_at: Instant, deadline: Duration) -> anyhow::Result<()> { tracing::info!( "Killing PID {} with status {}", pid, kill::get_sysinfo_status(pid) + .map(|s| s.to_string()) .as_deref() .unwrap_or("") ); - let handle = kill::kill(pid)?; + let Some(handle) = kill::kill(pid)? else { + return Ok(()); + }; let timestamp_after_kill = Instant::now(); while start_at.elapsed() < deadline { if handle.has_exited()? { - return Ok(KillResponse { pid }); + return Ok(()); } tokio::time::sleep(Duration::from_millis(100)).await; } // Last chance: we do logging this time. - let status = kill::get_sysinfo_status(pid); + let status = kill::get_sysinfo_status(pid).map(|s| s.to_string()); let status = status.unwrap_or_else(|| "".to_owned()); if handle.has_exited()? { - return Ok(KillResponse { pid }); + return Ok(()); } - Err(KillError::DidNotDie(pid, timestamp_after_kill.elapsed(), status).into()) -} - -#[cfg(unix)] -mod os_specific { - - use std::time::Duration; - - use sysinfo::Process; - use sysinfo::ProcessExt; - - pub(super) fn process_creation_time(process: &Process) -> Option { - // Returns process creation time with 1 second precision. - Some(Duration::from_secs(process.start_time())) - } -} - -#[cfg(windows)] -mod os_specific { - use std::time::Duration; - - use sysinfo::PidExt; - use sysinfo::Process; - use sysinfo::ProcessExt; - - pub(super) fn process_creation_time(process: &Process) -> Option { - buck2_wrapper_common::kill::os_specific::process_creation_time(process.pid().as_u32()) - } + let elapsed_s = timestamp_after_kill.elapsed().as_secs_f32(); + Err(buck2_error_anyhow!( + [ErrorTag::DaemonWontDieFromKill], + "Daemon pid {pid} did not die after kill within {elapsed_s:.1}s (status: {status})" + )) } fn get_callers_for_kill() -> Vec { /// Add a process to our parts and return its parent PID. fn push_process( - pid: Pid, + pid: sysinfo::Pid, creation_time: Duration, system: &mut System, process_tree: &mut Vec, - ) -> Option<(Pid, Duration)> { + ) -> Option<(sysinfo::Pid, Duration)> { // Specifics about this process need to be refreshed by this time. let proc = system.process(pid)?; - let title = shlex::join(proc.cmd().iter().map(|s| s.as_str())); + let title = + shlex::try_join(proc.cmd().iter().map(|s| s.as_str())).expect("Null byte unexpected"); process_tree.push(title); let parent_pid = proc.parent()?; system.refresh_process_specifics(parent_pid, ProcessRefreshKind::new()); let parent_proc = system.process(parent_pid)?; - let parent_creation_time = os_specific::process_creation_time(parent_proc)?; + let parent_creation_time = kill::process_creation_time(parent_proc)?; if parent_creation_time <= creation_time { Some((parent_pid, parent_creation_time)) } else { @@ -202,11 +232,11 @@ fn get_callers_for_kill() -> Vec { let mut system = System::new(); let mut process_tree = Vec::new(); - let pid = Pid::from_u32(std::process::id()); + let pid = sysinfo::Pid::from_u32(std::process::id()); system.refresh_process_specifics(pid, ProcessRefreshKind::new()); let mut curr = system .process(pid) - .and_then(|proc| Some((pid, os_specific::process_creation_time(proc)?))); + .and_then(|proc| Some((pid, kill::process_creation_time(proc)?))); while let Some((pid, creation_time)) = curr { curr = push_process(pid, creation_time, &mut system, &mut process_tree); } diff --git a/app/buck2_client_ctx/src/daemon/client/mod.rs b/app/buck2_client_ctx/src/daemon/client/mod.rs deleted file mode 100644 index 91a0704cd466a..0000000000000 --- a/app/buck2_client_ctx/src/daemon/client/mod.rs +++ /dev/null @@ -1,615 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fs::create_dir_all; -use std::fs::File; -use std::time::Duration; - -use anyhow::Context; -use async_trait::async_trait; -use buck2_cli_proto::daemon_api_client::*; -use buck2_cli_proto::new_generic::NewGenericRequest; -use buck2_cli_proto::new_generic::NewGenericResponse; -use buck2_cli_proto::*; -use buck2_common::daemon_dir::DaemonDir; -use buck2_core::fs::fs_util; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use fs4::FileExt; -use futures::future::BoxFuture; -use futures::pin_mut; -use futures::stream; -use futures::Stream; -use futures::StreamExt; -use futures::TryStreamExt; -use tonic::codegen::InterceptedService; -use tonic::transport::Channel; -use tonic::Request; -use tonic::Status; - -use crate::command_outcome::CommandOutcome; -use crate::console_interaction_stream::ConsoleInteractionStream; -use crate::daemon::client::connect::BuckAddAuthTokenInterceptor; -use crate::events_ctx::EventsCtx; -use crate::events_ctx::FileTailers; -use crate::events_ctx::PartialResultCtx; -use crate::events_ctx::PartialResultHandler; -use crate::stream_value::StreamValue; -use crate::subscribers::observer::ErrorCause; -use crate::subscribers::observer::ErrorObserver; - -pub mod connect; -pub mod kill; - -use crate::startup_deadline::StartupDeadline; - -#[derive(Debug, thiserror::Error)] -enum LifecycleError { - #[error("Missing `{}` file in `{}` directory", BuckdLifecycleLock::BUCKD_LIFECYCLE, _0.display())] - MissingLifecycle(AbsNormPathBuf), -} - -/// We need to make sure that all calls to the daemon in buckd flush the tailers after completion. -/// The connector wraps all buckd calls with flushing. -pub struct BuckdClientConnector<'a> { - client: BuckdClient<'a>, -} - -impl<'a> BuckdClientConnector<'a> { - pub fn with_flushing(&mut self) -> FlushingBuckdClient<'_, 'a> { - FlushingBuckdClient { - inner: &mut self.client, - } - } - - pub fn daemon_constraints(&self) -> &buck2_cli_proto::DaemonConstraints { - &self.client.constraints - } - - pub fn error_observers(&self) -> impl Iterator { - self.client - .events_ctx - .subscribers - .iter() - .filter_map(|s| s.as_error_observer()) - } - - pub fn collect_error_cause(&self) -> ErrorCause { - if let Some(obs) = self.error_observers().next() { - return obs.error_cause(); - } - - ErrorCause::Unknown - } -} - -pub struct BuckdLifecycleLock { - daemon_dir: DaemonDir, - lock_file: File, -} - -impl BuckdLifecycleLock { - const BUCKD_LIFECYCLE: &'static str = "buckd.lifecycle"; - - pub async fn lock_with_timeout( - daemon_dir: DaemonDir, - deadline: StartupDeadline, - ) -> anyhow::Result { - create_dir_all(&daemon_dir.path)?; - let lifecycle_path = daemon_dir.path.as_path().join(Self::BUCKD_LIFECYCLE); - let file = File::create(lifecycle_path)?; - deadline - .retrying( - "locking buckd lifecycle", - Duration::from_millis(5), - Duration::from_millis(100), - async || Ok(file.try_lock_exclusive()?), - ) - .await?; - - Ok(BuckdLifecycleLock { - lock_file: file, - daemon_dir, - }) - } - - /// Remove everything except `buckd.lifecycle` file which is the lock file. - pub fn clean_daemon_dir(&self) -> anyhow::Result<()> { - let mut seen_lifecycle = false; - for p in fs_util::read_dir(&self.daemon_dir.path)? { - let p = p?; - if p.file_name() == Self::BUCKD_LIFECYCLE { - seen_lifecycle = true; - continue; - } - fs_util::remove_all(p.path())?; - } - if !seen_lifecycle { - // Self-check. - return Err(LifecycleError::MissingLifecycle(self.daemon_dir.path.clone()).into()); - } - Ok(()) - } - - pub fn daemon_dir(&self) -> &DaemonDir { - &self.daemon_dir - } -} - -impl Drop for BuckdLifecycleLock { - fn drop(&mut self) { - self.lock_file - .unlock() - .expect("Unexpected failure to unlock buckd.lifecycle file.") - } -} - -/// This provides a thin wrapper around the proto-generated DaemonApiClient and hides -/// some of the complexity/verbosity of making calls with that. For example, the user -/// doesn't need to deal with tonic::Response/Request and this may provide functions -/// that take more primitive types than the protobuf structure itself. -pub struct BuckdClient<'a> { - client: DaemonApiClient>, - constraints: buck2_cli_proto::DaemonConstraints, - daemon_dir: DaemonDir, - // TODO(brasselsprouts): events_ctx should own tailers - tailers: Option, - pub(crate) events_ctx: EventsCtx<'a>, -} - -#[derive(Debug, thiserror::Error)] -enum GrpcToStreamError { - #[error("buck daemon returned an empty CommandProgress")] - EmptyCommandProgress, -} - -/// Translates a tonic streaming response into a stream of StreamValues, the set of things that can flow across the gRPC -/// event stream. -fn grpc_to_stream( - response: anyhow::Result>>, -) -> impl Stream> { - let stream = match response { - Ok(response) => response.into_inner(), - Err(e) => return futures::stream::once(futures::future::ready(Err(e))).left_stream(), - }; - - let stream = stream - .map_ok(|e| stream::iter(e.messages.into_iter().map(anyhow::Ok))) - .try_flatten(); - - stream::unfold(stream, |mut stream| async { - let msg = match stream.try_next().await { - Ok(Some(msg)) => msg, - Ok(None) => return None, - Err(e) => return Some((Err(e), stream)), - }; - let value = match msg.progress { - Some(command_progress::Progress::Event(e)) => Some(Ok(StreamValue::Event(e))), - Some(command_progress::Progress::Result(res)) => Some(Ok(StreamValue::Result(res))), - Some(command_progress::Progress::PartialResult(res)) => { - Some(Ok(StreamValue::PartialResult(res))) - } - None => Some(Err(GrpcToStreamError::EmptyCommandProgress.into())), - }; - - value.map(|v| (v, stream)) - }) - .right_stream() -} - -impl<'a> BuckdClient<'a> { - fn open_tailers(&mut self) -> anyhow::Result<()> { - let tailers = FileTailers::new(&self.daemon_dir)?; - self.tailers = Some(tailers); - - Ok(()) - } - - /// Some commands stream events back from the server. - /// For these commands, we want to be able to manipulate CLI state. - async fn stream<'i, T, Res, Handler, Command>( - &mut self, - command: Command, - request: T, - partial_result_handler: &mut Handler, - console_interaction: Option>, - ) -> anyhow::Result> - where - Command: for<'b> FnOnce( - &'b mut DaemonApiClient>, - Request, - ) -> BoxFuture< - 'b, - Result>, Status>, - >, - Res: TryFrom, - Handler: PartialResultHandler, - { - let Self { - client, events_ctx, .. - } = self; - - let response = command(client, Request::new(request)) - .await - .context("Error dispatching request"); - let stream = grpc_to_stream(response); - pin_mut!(stream); - events_ctx - .unpack_stream( - partial_result_handler, - stream, - self.tailers.take(), - console_interaction, - ) - .await - } - - pub async fn status(&mut self, snapshot: bool) -> anyhow::Result { - let outcome = self - .events_ctx - // Safe to unwrap tailers here because they are instantiated prior to a command being called. - .unpack_oneshot(&mut self.tailers, || { - self.client.status(Request::new(StatusRequest { snapshot })) - }) - .await; - // TODO(nmj): We have a number of things that wish to use status() and return an anyhow::Result, - // for now we'll just turn a "CommandMessage" into a error, but that's really not what we - // want long term. - match outcome? { - CommandOutcome::Success(r) => Ok(r), - CommandOutcome::Failure(_) => { - Err(anyhow::anyhow!("Unexpected failure message in status()")) - } - } - } - - pub async fn set_log_filter(&mut self, req: SetLogFilterRequest) -> anyhow::Result<()> { - self.client.set_log_filter(Request::new(req)).await?; - - Ok(()) - } -} - -pub struct FlushingBuckdClient<'a, 'b> { - inner: &'a mut BuckdClient<'b>, -} - -impl<'a, 'b> FlushingBuckdClient<'a, 'b> { - fn enter(&mut self) -> anyhow::Result<()> { - self.inner.open_tailers()?; - Ok(()) - } - - async fn exit(&mut self) -> anyhow::Result<()> { - self.inner.events_ctx.flush(&mut self.inner.tailers).await?; - - Ok(()) - } -} - -pub enum NoPartialResult {} - -impl TryFrom for NoPartialResult { - type Error = buck2_cli_proto::partial_result::PartialResult; - - fn try_from(v: buck2_cli_proto::partial_result::PartialResult) -> Result { - Err(v) - } -} - -pub struct NoPartialResultHandler; - -#[async_trait] -impl PartialResultHandler for NoPartialResultHandler { - type PartialResult = NoPartialResult; - - async fn handle_partial_result( - &mut self, - _ctx: PartialResultCtx<'_, '_>, - partial_res: Self::PartialResult, - ) -> anyhow::Result<()> { - match partial_res {} - } -} - -/// Receives StdoutBytes, writes them to stdout. -pub struct StdoutPartialResultHandler; - -#[async_trait] -impl PartialResultHandler for StdoutPartialResultHandler { - type PartialResult = buck2_cli_proto::StdoutBytes; - - async fn handle_partial_result( - &mut self, - mut ctx: PartialResultCtx<'_, '_>, - partial_res: Self::PartialResult, - ) -> anyhow::Result<()> { - ctx.stdout(&partial_res.data).await - } -} - -/// Implement a streaming method with full event reporting. -macro_rules! stream_method { - ($method: ident, $req: ty, $res: ty, $message: ty) => { - stream_method!($method, $method, $req, $res, $message); - }; - - ($method: ident, $grpc_method: ident, $req: ty, $res: ty, $message: ty) => { - pub async fn $method( - &mut self, - req: $req, - console_interaction: Option>, - handler: &mut impl PartialResultHandler, - ) -> anyhow::Result> { - self.enter()?; - let res = self - .inner - .stream( - |d, r| Box::pin(DaemonApiClient::$grpc_method(d, r)), - req, - // For now we only support handlers that can be constructed like so, and we - // don't let anything go out. Eventually if we wanted to stream structured - // data, that could change. - handler, - console_interaction, - ) - .await; - self.exit().await?; - res - } - }; -} - -/// Implement a bi-directional streaming method with full event reporting. -macro_rules! bidirectional_stream_method { - ($method: ident, $req: ty, $res: ty, $message: ty) => { - bidirectional_stream_method!($method, $method, $req, $res, $message); - }; - - ($method: ident, $grpc_method: ident, $req: ty, $res: ty, $message: ty) => { - pub async fn $method( - &mut self, - context: ClientContext, - requests: impl Stream + Send + Sync + 'static, - handler: &mut impl PartialResultHandler, - ) -> anyhow::Result> { - self.enter()?; - let req = create_client_stream(context, requests); - let res = self - .inner - .stream( - |d, r| Box::pin(DaemonApiClient::$method(d, r)), - req, - handler, - None, - ) - .await; - self.exit().await?; - res - } - }; -} - -/// Implement a oneshot method with full event reporting. -macro_rules! oneshot_method { - ($method: ident, $req: ty, $res: ty) => { - oneshot_method!($method, $method, $req, $res); - }; - - ($method: ident, $grpc_method: ident, $req: ty, $res: ty) => { - pub async fn $method(&mut self, req: $req) -> anyhow::Result> { - self.enter()?; - let res = self - .inner - .events_ctx - .unpack_oneshot(&mut self.inner.tailers, || { - self.inner.client.$method(Request::new(req)) - }) - .await; - self.exit().await?; - res - } - }; -} - -/// Implement a method that does not produce a CommandResult and does not produce any events. -macro_rules! debug_method { - ($method: ident, $req: ty, $res: ty) => { - debug_method!($method, $method, $req, $res); - }; - - ($method: ident, $grpc_method: ident, $req: ty, $res: ty) => { - pub async fn $method(&mut self, req: $req) -> anyhow::Result<$res> { - self.enter()?; - let out = self.inner.client.$method(Request::new(req)).await; - self.exit().await?; - Ok(out?.into_inner()) - } - }; -} - -/// Wrap a method that exists on the BuckdClient, with flushing. -macro_rules! wrap_method { - ($method: ident ($($param: ident : $param_type: ty),*), $res: ty) => { - pub async fn $method(&mut self, $($param: $param_type)*) -> anyhow::Result<$res> { - self.enter()?; - let out = self - .inner - .$method($($param)*) - .await; - self.exit().await?; - out - } - }; - } - -impl<'a, 'b> FlushingBuckdClient<'a, 'b> { - stream_method!( - aquery, - AqueryRequest, - AqueryResponse, - buck2_cli_proto::StdoutBytes - ); - stream_method!( - cquery, - CqueryRequest, - CqueryResponse, - buck2_cli_proto::StdoutBytes - ); - stream_method!( - uquery, - UqueryRequest, - UqueryResponse, - buck2_cli_proto::StdoutBytes - ); - stream_method!( - targets, - TargetsRequest, - TargetsResponse, - buck2_cli_proto::StdoutBytes - ); - stream_method!( - targets_show_outputs, - TargetsRequest, - TargetsShowOutputsResponse, - NoPartialResult - ); - stream_method!( - ctargets, - ConfiguredTargetsRequest, - ConfiguredTargetsResponse, - NoPartialResult - ); - stream_method!(build, BuildRequest, BuildResponse, NoPartialResult); - stream_method!(bxl, BxlRequest, BxlResponse, buck2_cli_proto::StdoutBytes); - stream_method!(test, TestRequest, TestResponse, NoPartialResult); - stream_method!(install, InstallRequest, InstallResponse, NoPartialResult); - stream_method!( - audit, - GenericRequest, - GenericResponse, - buck2_cli_proto::StdoutBytes - ); - stream_method!( - starlark, - GenericRequest, - GenericResponse, - buck2_cli_proto::StdoutBytes - ); - stream_method!( - new_generic_impl, - NewGenericRequestMessage, - NewGenericResponseMessage, - NoPartialResult - ); - stream_method!( - clean_stale, - CleanStaleRequest, - CleanStaleResponse, - NoPartialResult - ); - stream_method!( - file_status, - FileStatusRequest, - GenericResponse, - buck2_cli_proto::StdoutBytes - ); - stream_method!( - unstable_docs, - UnstableDocsRequest, - UnstableDocsResponse, - NoPartialResult - ); - stream_method!( - profile, - profile2, - ProfileRequest, - ProfileResponse, - NoPartialResult - ); - stream_method!( - allocative, - AllocativeRequest, - AllocativeResponse, - NoPartialResult - ); - - bidirectional_stream_method!(lsp, LspRequest, LspResponse, LspMessage); - bidirectional_stream_method!(dap, DapRequest, DapResponse, DapMessage); - bidirectional_stream_method!( - subscription, - SubscriptionRequestWrapper, - SubscriptionCommandResponse, - SubscriptionResponseWrapper - ); - - oneshot_method!(flush_dep_files, FlushDepFilesRequest, GenericResponse); - - debug_method!(unstable_crash, UnstableCrashRequest, UnstableCrashResponse); - debug_method!(segfault, SegfaultRequest, SegfaultResponse); - debug_method!( - unstable_heap_dump, - UnstableHeapDumpRequest, - UnstableHeapDumpResponse - ); - debug_method!( - unstable_allocator_stats, - UnstableAllocatorStatsRequest, - UnstableAllocatorStatsResponse - ); - debug_method!( - unstable_dice_dump, - UnstableDiceDumpRequest, - UnstableDiceDumpResponse - ); - - wrap_method!(status(snapshot: bool), StatusResponse); - wrap_method!(set_log_filter(log_filter: SetLogFilterRequest), ()); - stream_method!(trace_io, TraceIoRequest, TraceIoResponse, NoPartialResult); - - pub async fn new_generic( - &mut self, - context: buck2_cli_proto::ClientContext, - req: NewGenericRequest, - stdin: Option>, - ) -> anyhow::Result> { - let req = serde_json::to_string(&req).context("Could not serialize `NewGenericRequest`")?; - let req = buck2_cli_proto::NewGenericRequestMessage { - context: Some(context), - new_generic_request: req, - }; - let command_outcome: CommandOutcome = self - .new_generic_impl(req, stdin, &mut NoPartialResultHandler) - .await?; - match command_outcome { - CommandOutcome::Success(resp) => { - let resp = serde_json::from_str(&resp.new_generic_response) - .context("Could not deserialize `NewGenericResponse`")?; - Ok(CommandOutcome::Success(resp)) - } - CommandOutcome::Failure(code) => Ok(CommandOutcome::Failure(code)), - } - } -} - -/// Create a stream that is sent over as a parameter via GRPC to the daemon. -/// -/// Ensures that we send a proper ClientContext message, and that the inner type is wrapped -/// properly into a [`StreamingRequest`] -fn create_client_stream< - T: Into, - InStream: Stream + Send + Sync + 'static, ->( - context: ClientContext, - requests: InStream, -) -> impl Stream + Send + Sync + 'static { - let init_req = StreamingRequest { - request: Some(streaming_request::Request::Context(context)), - }; - stream::once(async move { init_req }).chain(requests.map(|request| request.into())) -} diff --git a/app/buck2_client_ctx/src/daemon/daemon_windows.rs b/app/buck2_client_ctx/src/daemon/daemon_windows.rs index 0c3bbf4ecbfe9..25b44a990557e 100644 --- a/app/buck2_client_ctx/src/daemon/daemon_windows.rs +++ b/app/buck2_client_ctx/src/daemon/daemon_windows.rs @@ -17,7 +17,7 @@ pub(crate) fn spawn_background_process_on_windows<'a>( _args: impl IntoIterator, _daemon_env_vars: &[(&OsStr, &OsStr)], ) -> anyhow::Result<()> { - #[derive(Debug, thiserror::Error)] + #[derive(Debug, buck2_error::Error)] #[error("not Windows")] struct NotWindows; @@ -35,12 +35,12 @@ pub(crate) fn spawn_background_process_on_windows<'a>( use std::ffi::c_void; use std::ffi::OsString; use std::io; - use std::iter; use std::mem; use std::os::windows::ffi::OsStrExt; use std::ptr; use anyhow::Context; + use buck2_util::os::win::os_str::os_str_to_wide_null_term; use winapi::shared::minwindef::DWORD; use winapi::shared::minwindef::FALSE; use winapi::um::handleapi::CloseHandle; @@ -55,10 +55,6 @@ pub(crate) fn spawn_background_process_on_windows<'a>( // doesn't allow to set 'bInheritHandles' to false. Without this waiting on // parent process will also wait on inherited handles of daemon process. - fn to_nullterm(s: &OsStr) -> Vec { - s.encode_wide().chain(iter::once(0)).collect() - } - // Translated from ArgvQuote at http://tinyurl.com/zmgtnls fn append_quoted(arg: &OsStr, cmdline: &mut Vec) { if !arg.is_empty() @@ -162,7 +158,8 @@ pub(crate) fn spawn_background_process_on_windows<'a>( } blk.push(0); - Ok((blk.as_mut_ptr() as *mut c_void, blk.into_boxed_slice())) + let mut blk = blk.into_boxed_slice(); + Ok((blk.as_mut_ptr() as *mut c_void, blk)) } } @@ -180,14 +177,14 @@ pub(crate) fn spawn_background_process_on_windows<'a>( let status = unsafe { CreateProcessW( - to_nullterm(program).as_ptr(), // lpApplicationName - cmd.as_mut_ptr(), // lpCommandLine - ptr::null_mut(), // lpProcessAttributes - ptr::null_mut(), // lpThreadAttributes - FALSE, // bInheritHandles - creation_flags, // dwCreationFlags - envp, // lpEnvironment - to_nullterm(cwd).as_ptr(), // lpCurrentDirectory + os_str_to_wide_null_term(program).as_ptr(), // lpApplicationName + cmd.as_mut_ptr(), // lpCommandLine + ptr::null_mut(), // lpProcessAttributes + ptr::null_mut(), // lpThreadAttributes + FALSE, // bInheritHandles + creation_flags, // dwCreationFlags + envp, // lpEnvironment + os_str_to_wide_null_term(cwd).as_ptr(), // lpCurrentDirectory &mut sinfo, &mut pinfo, ) diff --git a/app/buck2_client_ctx/src/daemon_constraints.rs b/app/buck2_client_ctx/src/daemon_constraints.rs index 0f8031b858946..1a8c84195dc37 100644 --- a/app/buck2_client_ctx/src/daemon_constraints.rs +++ b/app/buck2_client_ctx/src/daemon_constraints.rs @@ -7,11 +7,26 @@ * of this source tree. */ -use buck2_common::legacy_configs::init::DaemonStartupConfig; -use buck2_core::env_helper::EnvHelper; +use buck2_common::init::DaemonStartupConfig; +use buck2_core::buck2_env_anyhow; +use buck2_core::ci::ci_identifiers; use crate::version::BuckVersion; +/// Checks an environment variable to see if we were spawned by a buck daemon and if so, returns the +/// UUID of that daemon. +/// +/// This is used to detect nested invocations, but returning `Some` does not guarantee that this is +/// a nested invocation. +pub fn get_possibly_nested_invocation_daemon_uuid() -> Option { + // Intentionally don't use `buck2_env_anyhow!` because we don't want this showing up in help output + std::env::var("BUCK2_DAEMON_UUID").ok() +} + +/// Generates the daemon constraints *for the currently running daemon.* +/// +/// Note that this function is called *from the daemon* and represents the daemon's constraints - +/// the constraints that the client would like the daemon to have are generated separately. pub fn gen_daemon_constraints( daemon_startup_config: &DaemonStartupConfig, ) -> anyhow::Result { @@ -28,7 +43,14 @@ pub fn version() -> String { BuckVersion::get_unique_id().to_owned() } +/// Used to make sure that daemons are restarted between CI jobs if they don't properly clean up +/// after themselves. pub fn user_version() -> anyhow::Result> { - static SANDCASTLE_ID: EnvHelper = EnvHelper::new("SANDCASTLE_ID"); - Ok(SANDCASTLE_ID.get()?.cloned()) + // This shouldn't really be necessary, but we used to check it so we'll keep it for now. + if let Some(id) = buck2_env_anyhow!("SANDCASTLE_ID", applicability = internal)? { + return Ok(Some(id.to_owned())); + } + // The `ci_identifiers` function reports better identifiers earlier, so taking the first one is + // enough + Ok(ci_identifiers()?.find_map(|x| x.1).map(|s| s.to_owned())) } diff --git a/app/buck2_client_ctx/src/events_ctx.rs b/app/buck2_client_ctx/src/events_ctx.rs index e23aabac8b963..f5e86c98fd694 100644 --- a/app/buck2_client_ctx/src/events_ctx.rs +++ b/app/buck2_client_ctx/src/events_ctx.rs @@ -8,6 +8,7 @@ */ use std::ops::ControlFlow; +use std::pin::pin; use std::sync::Arc; use std::time::SystemTime; @@ -15,27 +16,27 @@ use anyhow::Context; use async_trait::async_trait; use buck2_cli_proto::command_result; use buck2_cli_proto::CommandResult; -use buck2_common::daemon_dir::DaemonDir; +use buck2_event_log::stream_value::StreamValue; use buck2_events::BuckEvent; -use futures::stream::FuturesUnordered; +use futures::stream; use futures::Future; +use futures::FutureExt; use futures::Stream; use futures::StreamExt; use gazebo::prelude::VecExt; -use thiserror::Error; -use tokio::sync::mpsc; -use tokio::sync::mpsc::UnboundedReceiver; use crate::client_cpu_tracker::ClientCpuTracker; use crate::command_outcome::CommandOutcome; -use crate::console_interaction_stream::ConsoleInteraction; use crate::console_interaction_stream::ConsoleInteractionStream; -use crate::console_interaction_stream::NoopConsoleInteraction; -use crate::file_tailer::FileTailer; -use crate::file_tailer::StdoutOrStderr; -use crate::stream_value::StreamValue; -use crate::subscribers::subscriber::EventSubscriber; +use crate::console_interaction_stream::NoopSuperConsoleInteraction; +use crate::console_interaction_stream::SuperConsoleInteraction; +use crate::console_interaction_stream::SuperConsoleToggle; +use crate::daemon::client::tonic_status_to_error; +use crate::daemon::client::NoPartialResultHandler; +use crate::exit_result::ExitResult; +use crate::file_tailers::tailers::FileTailers; use crate::subscribers::subscriber::Tick; +use crate::subscribers::subscribers::EventSubscribers; use crate::ticker::Ticker; /// Target number of self.tick() calls per second. These can be used by implementations for regular updates, for example @@ -43,7 +44,7 @@ use crate::ticker::Ticker; /// Other than tick() calls, implementations will only be notified when new events arrive. const TICKS_PER_SECOND: u32 = 10; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] #[allow(clippy::large_enum_variant)] enum BuckdCommunicationError { #[error("call to daemon returned an unexpected result type. got `{0:?}`")] @@ -52,6 +53,11 @@ enum BuckdCommunicationError { EmptyCommandResult, #[error("buck daemon request finished without returning a CommandResult")] MissingCommandResult, + #[error( + "The Buck2 daemon was shut down while executing your command. This happened because: {0}" + )] + #[buck2(tag = InterruptedByDaemonShutdown)] + InterruptedByDaemonShutdown(buck2_data::DaemonShutdown), #[error("buckd communication encountered an unexpected error `{0:?}`")] TonicError(tonic::Status), } @@ -90,7 +96,8 @@ pub struct PartialResultCtx<'a, 'b> { impl<'a, 'b> PartialResultCtx<'a, 'b> { pub async fn stdout(&mut self, bytes: &[u8]) -> anyhow::Result<()> { self.inner - .handle_subscribers(|subscriber| subscriber.handle_output(bytes)) + .subscribers + .for_each_subscriber(|subscriber| subscriber.handle_output(bytes)) .await } } @@ -98,7 +105,7 @@ impl<'a, 'b> PartialResultCtx<'a, 'b> { /// Manages incoming event streams from the daemon for the buck2 client and /// forwards them to the appropriate subscribers registered on this struct pub struct EventsCtx<'a> { - pub(crate) subscribers: Vec>, + pub(crate) subscribers: EventSubscribers<'a>, ticker: Ticker, client_cpu_tracker: ClientCpuTracker, } @@ -109,14 +116,8 @@ pub enum FileTailerEvent { Stderr(Vec), } -pub struct FileTailers { - _stdout_tailer: Option, - _stderr_tailer: Option, - stream: UnboundedReceiver, -} - impl<'a> EventsCtx<'a> { - pub fn new(subscribers: Vec>) -> Self { + pub fn new(subscribers: EventSubscribers<'a>) -> Self { Self { subscribers, ticker: Ticker::new(TICKS_PER_SECOND), @@ -171,8 +172,12 @@ impl<'a> EventsCtx<'a> { async fn dispatch_tailer_event(&mut self, event: FileTailerEvent) -> anyhow::Result<()> { match event { - FileTailerEvent::Stdout(stdout) => self.handle_tailer_stdout(&stdout).await, - FileTailerEvent::Stderr(stderr) => self.handle_tailer_stderr(&stderr).await, + FileTailerEvent::Stdout(out) | FileTailerEvent::Stderr(out) => { + // Sending daemon stdout to stderr. + // Daemon is not supposed to write anything to stdout. + // But if daemon does, it should not be used as standard output of buck2. + self.handle_tailer_stderr(&out).await + } } } @@ -184,11 +189,11 @@ impl<'a> EventsCtx<'a> { mut console_interaction: Option>, ) -> anyhow::Result where - S: Stream> + Unpin, + S: Stream>, Handler: PartialResultHandler, { - let mut noop_console_interaction = NoopConsoleInteraction; - let console_interaction: &mut dyn ConsoleInteraction = match &mut console_interaction { + let mut noop_console_interaction = NoopSuperConsoleInteraction; + let console_interaction: &mut dyn SuperConsoleInteraction = match &mut console_interaction { Some(i) => i as _, None => &mut noop_console_interaction as _, }; @@ -203,7 +208,8 @@ impl<'a> EventsCtx<'a> { let mut tailers = tailers.unwrap_or_else(FileTailers::empty); - let mut stream = stream.ready_chunks(1000); + let stream = stream.ready_chunks(1000); + let mut stream = pin!(stream); // NOTE: When unpacking the stream we capture any shutdown event we encounter. If we fail // to unpack the stream to completion, we'll use that later. @@ -226,21 +232,18 @@ impl<'a> EventsCtx<'a> { Some(event) = tailers.stream.recv() => { self.dispatch_tailer_event(event).await?; } - c = console_interaction.char() => { - self.handle_console_interaction(c?).await?; + c = console_interaction.toggle() => { + self.handle_console_interaction(&c?).await?; } tick = self.ticker.tick() => { self.tick(&tick).await?; } - else => { - unreachable!("The tick branch will always take precedence over an else case."); - } } } }; - let flush_result = self.flush(&mut Some(tailers)).await; - let exit_result = self.handle_exit().await; + let flush_result = self.flush(Some(tailers)).await; + let exit_result = self.subscribers.handle_exit().await; let command_result = match (command_result, shutdown) { (Ok(r), _) => r, @@ -253,11 +256,7 @@ impl<'a> EventsCtx<'a> { // certain) the daemon shutdown is the cause for us to simply claim it is. tracing::debug!("Original unpack_stream error was: {:#}", e); - return Err(anyhow::anyhow!( - "The Buck2 daemon was shut down while executing your command. \ - This happened because: {}", - shutdown, - )); + return Err(BuckdCommunicationError::InterruptedByDaemonShutdown(shutdown).into()); } (Err(e), None) => return Err(e), }; @@ -278,7 +277,7 @@ impl<'a> EventsCtx<'a> { console_interaction: Option>, ) -> anyhow::Result> where - S: Stream> + Unpin, + S: Stream>, Res: TryFrom, Handler: PartialResultHandler, { @@ -292,16 +291,6 @@ impl<'a> EventsCtx<'a> { } } - pub async fn flushing_tailers>( - &mut self, - tailers: &mut Option, - f: impl FnOnce() -> Fut, - ) -> anyhow::Result { - let res = f().await; - self.flush(tailers).await?; - Ok(res) - } - /// Unpack a single `CommandResult`, log any failures if necessary, and convert it to a /// `CommandOutcome` pub async fn unpack_oneshot< @@ -309,51 +298,37 @@ impl<'a> EventsCtx<'a> { Fut: Future, tonic::Status>>, >( &mut self, - tailers: &mut Option, - f: impl FnOnce() -> Fut, + tailers: Option, + f: Fut, ) -> anyhow::Result> { - let res = self.flushing_tailers(tailers, f).await?; - // important - do not early return before flushing the buffers! - let inner = res?.into_inner(); - self.handle_command_result(&inner).await?; - - convert_result(inner) - } - - /// Helper method to abstract the process of applying an `EventSubscriber` method to all of the subscribers. - /// Quits on the first error encountered. - async fn handle_subscribers<'b, Fut>( - &'b mut self, - f: impl FnMut(&'b mut Box) -> Fut, - ) -> anyhow::Result<()> - where - Fut: Future> + 'b, - { - let mut futures: FuturesUnordered<_> = self.subscribers.iter_mut().map(f).collect(); - while let Some(res) = futures.next().await { - res?; - } - Ok(()) + let stream = stream::once(f.map(|result| { + result + .map(|command_result| StreamValue::Result(Box::new(command_result.into_inner()))) + .map_err(tonic_status_to_error) + })); + self.unpack_stream(&mut NoPartialResultHandler, stream, tailers, None) + .await } async fn handle_error_owned(&mut self, error: anyhow::Error) -> anyhow::Error { + let error: buck2_error::Error = error.into(); let result = self - .handle_subscribers(|subscriber| subscriber.handle_error(&error)) + .subscribers + .for_each_subscriber(|subscriber| subscriber.handle_error(&error)) .await; match result { - Ok(()) => error, + Ok(()) => error.into(), Err(e) => EventsCtxError::WrappedStreamError { - source: error, + source: error.into(), other: e, } .into(), } } - pub async fn flush(&mut self, tailers: &mut Option) -> anyhow::Result<()> { - let tailers = match tailers.take() { - Some(tailers) => tailers, - None => return Ok(()), + pub async fn flush(&mut self, tailers: Option) -> anyhow::Result<()> { + let Some(tailers) = tailers else { + return Ok(()); }; let mut streams = tailers.stop_reading(); @@ -385,7 +360,9 @@ fn convert_result anyhow::Result> { match value.result { - Some(command_result::Result::Error(_)) => Ok(CommandOutcome::Failure(None)), + Some(command_result::Result::Error(buck2_cli_proto::CommandError { errors })) => { + Ok(CommandOutcome::Failure(ExitResult::from_errors(&errors))) + } Some(value) => match value.try_into() { Ok(v) => Ok(CommandOutcome::Success(v)), Err(res) => Err(BuckdCommunicationError::UnexpectedResultType(res).into()), @@ -395,20 +372,20 @@ fn convert_result EventsCtx<'a> { - async fn handle_tailer_stdout(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { - self.handle_subscribers(|subscriber| subscriber.handle_output(raw_output)) - .await - } - async fn handle_tailer_stderr(&mut self, stderr: &[u8]) -> anyhow::Result<()> { let stderr = String::from_utf8_lossy(stderr); let stderr = stderr.trim_end(); - self.handle_subscribers(|subscriber| subscriber.handle_tailer_stderr(stderr)) + self.subscribers + .for_each_subscriber(|subscriber| subscriber.handle_tailer_stderr(stderr)) .await } - async fn handle_console_interaction(&mut self, c: char) -> anyhow::Result<()> { - self.handle_subscribers(|subscriber| subscriber.handle_console_interaction(c)) + async fn handle_console_interaction( + &mut self, + toggle: &Option, + ) -> anyhow::Result<()> { + self.subscribers + .for_each_subscriber(|subscriber| subscriber.handle_console_interaction(toggle)) .await } @@ -443,7 +420,8 @@ impl<'a> EventsCtx<'a> { } Arc::new(event) }); - self.handle_subscribers(|subscriber| subscriber.handle_events(&events)) + self.subscribers + .for_each_subscriber(|subscriber| subscriber.handle_events(&events)) .await } @@ -451,7 +429,8 @@ impl<'a> EventsCtx<'a> { &mut self, result: &buck2_cli_proto::CommandResult, ) -> anyhow::Result<()> { - self.handle_subscribers(|subscriber| subscriber.handle_command_result(result)) + self.subscribers + .for_each_subscriber(|subscriber| subscriber.handle_command_result(result)) .await } @@ -459,58 +438,13 @@ impl<'a> EventsCtx<'a> { /// A subscriber will have the opportunity to do an arbitrary process at a reliable interval. /// In particular, this is crucial for superconsole so that it can draw itself consistently. async fn tick(&mut self, tick: &Tick) -> anyhow::Result<()> { - self.handle_subscribers(|subscriber| subscriber.tick(tick)) + self.subscribers + .for_each_subscriber(|subscriber| subscriber.tick(tick)) .await } - - async fn handle_exit(&mut self) -> anyhow::Result<()> { - let mut r = Ok(()); - for subscriber in &mut self.subscribers { - // Exit all subscribers, do not stop on first one. - let subscriber_err = subscriber.exit().await; - if r.is_ok() { - // Keep first error. - r = subscriber_err; - } - } - r - } -} - -impl FileTailers { - pub fn new(daemon_dir: &DaemonDir) -> anyhow::Result { - let (tx, rx) = mpsc::unbounded_channel(); - let stdout_tailer = FileTailer::tail_file( - daemon_dir.buckd_stdout(), - tx.clone(), - StdoutOrStderr::Stdout, - )?; - let stderr_tailer = - FileTailer::tail_file(daemon_dir.buckd_stderr(), tx, StdoutOrStderr::Stderr)?; - let this = Self { - _stdout_tailer: Some(stdout_tailer), - _stderr_tailer: Some(stderr_tailer), - stream: rx, - }; - Ok(this) - } - - pub fn empty() -> FileTailers { - FileTailers { - _stdout_tailer: None, - _stderr_tailer: None, - // Empty stream. - stream: mpsc::unbounded_channel().1, - } - } - - pub fn stop_reading(self) -> UnboundedReceiver { - // by dropping the tailers, they shut themselves down. - self.stream - } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum EventsCtxError { #[error("While propagating error:\n{source:#?}, another error was detected:\n{other:#?}")] WrappedStreamError { diff --git a/app/buck2_client_ctx/src/exit_result.rs b/app/buck2_client_ctx/src/exit_result.rs index 4c4a635e6bfd4..c643c2c4d216d 100644 --- a/app/buck2_client_ctx/src/exit_result.rs +++ b/app/buck2_client_ctx/src/exit_result.rs @@ -8,20 +8,25 @@ */ use std::convert::Infallible; -use std::fmt::Debug; +use std::ffi::OsString; +use std::fmt; use std::fmt::Display; use std::io; use std::io::Write; use std::ops::FromResidual; use std::process::Command; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_error::ErrorTag; +use buck2_wrapper_common::invocation_id::TraceId; -use crate::subscribers::observer::ErrorCause; - +#[derive(Debug)] pub struct ExecArgs { - prog: String, - argv: Vec, + prog: OsString, + argv: Vec, chdir: Option, env: Vec<(String, String)>, } @@ -40,73 +45,105 @@ pub struct ExecArgs { /// but the reverse is not possible: once created, the only useful thing we can with a /// ExitResult is propagate it. #[must_use] +#[derive(Debug)] pub struct ExitResult { variant: ExitResultVariant, /// Some stdout output that should be emitted prior to exiting. This allows commands to buffer /// their final output and choose not to send it if we opt to restart the command. stdout: Vec, + + // List of error messages that was observed during the command. Used for error reporting. + error_messages: Vec, } +#[derive(Debug)] enum ExitResultVariant { /// We finished successfully, return the specific exit code. - Status(u8), - /// The command failed and it doesn't have a specific exit code yet. This may be updated by - /// `ErrorObserver::error_cause` if more accurate categorization is available after the - /// command ends. If no categorization succeeded, it will return exit code 1. - UncategorizedError, + Status(ExitCode), /// Instead of terminating normally, `exec` (or spawn on Windows) /// a new process with the given name and argv. /// This is used to implement `buck2 run`. - Buck2RunExec(ExecArgs), + Exec(ExecArgs), /// We failed (i.e. due to a Buck internal error). /// At this time, when execution does fail, we print out the error message to stderr. - Err(anyhow::Error), + StatusWithErr(ExitCode, buck2_error::Error), } -impl ExitResult { - pub fn success() -> Self { - Self::status(0) +impl Display for ExitResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let _ignored = match &self.variant { + ExitResultVariant::Status(code) => write!(f, "ExitCode = {}", code.exit_code()), + ExitResultVariant::Exec(args) => { + write!( + f, + "Exec {} {}", + args.prog.to_string_lossy(), + args.argv + .iter() + .map(|s| s.to_string_lossy()) + .collect::>() + .join(" ") + ) + } + ExitResultVariant::StatusWithErr(code, e) => { + write!(f, "ExitCode = {}, Err = {}", code.exit_code(), e) + } + }; + if !self.stdout.is_empty() { + let _ignored = writeln!(f, "Stdout:"); + let _ignored = write!(f, "{}", String::from_utf8_lossy(self.stdout.as_slice())); + }; + Ok(()) } +} - pub fn failure() -> Self { - Self { - variant: ExitResultVariant::UncategorizedError, - stdout: Vec::new(), - } +impl ExitResult { + pub fn success() -> Self { + Self::status(ExitCode::Success) } - pub fn status(status: u8) -> Self { + pub fn status(status: ExitCode) -> Self { Self { variant: ExitResultVariant::Status(status), stdout: Vec::new(), + error_messages: Vec::new(), } } /// Values out of the range of u8 will have their status information ignored pub fn status_extended(status: i32) -> Self { if let Ok(code) = status.try_into() { - Self::status(code) + Self::status(ExitCode::Explicit(code)) } else { // The exit code isn't an allowable value, so just switch to generic failure - Self::failure() + Self::status(ExitCode::UnknownFailure) + } + } + + fn status_with_error_report(status: ExitCode, errors: &[buck2_data::ErrorReport]) -> Self { + Self { + variant: ExitResultVariant::Status(status), + stdout: Vec::new(), + error_messages: errors.iter().map(|e| e.message.clone()).collect(), } } pub fn exec( - prog: String, - argv: Vec, + prog: OsString, + argv: Vec, chdir: Option, env: Vec<(String, String)>, ) -> Self { Self { - variant: ExitResultVariant::Buck2RunExec(ExecArgs { + variant: ExitResultVariant::Exec(ExecArgs { prog, argv, chdir, env, }), stdout: Vec::new(), + error_messages: Vec::new(), } } @@ -115,30 +152,39 @@ impl ExitResult { } pub fn err(err: anyhow::Error) -> Self { + let err_msg = format!("{:#}", err); + let err: buck2_error::Error = err.into(); + let exit_code = if err.has_tag(ErrorTag::IoClientBrokenPipe) { + ExitCode::BrokenPipe + } else { + ExitCode::UnknownFailure + }; + + Self { + variant: ExitResultVariant::StatusWithErr(exit_code, err.into()), + stdout: Vec::new(), + error_messages: vec![err_msg], + } + } + + pub fn err_with_exit_code(err: anyhow::Error, exit_code: ExitCode) -> Self { + let err_msg = format!("{:#}", err); Self { - variant: ExitResultVariant::Err(err), + variant: ExitResultVariant::StatusWithErr(exit_code, err.into()), stdout: Vec::new(), + error_messages: vec![err_msg], } } /// Return this ExitStatus or call a function to produce a new one. pub fn or_else(self, f: impl FnOnce(Self) -> Self) -> Self { - if matches!(self.variant, ExitResultVariant::Status(0)) { + if matches!(self.variant, ExitResultVariant::Status(ExitCode::Success)) { return self; } f(self) } - /// Return this ExitStatus if it's not Uncategorized, or produce a new exit code. - pub fn categorized_or_else(mut self, f: impl FnOnce() -> u8) -> Self { - if matches!(self.variant, ExitResultVariant::UncategorizedError) { - self.variant = ExitResultVariant::Status(f()); - } - - self - } - pub fn with_stdout(mut self, stdout: Vec) -> Self { self.stdout.extend(stdout); self @@ -147,11 +193,106 @@ impl ExitResult { pub fn report(self) -> ! { match crate::stdio::print_bytes(&self.stdout) { Ok(()) => self.variant.report(), - Err(e) => ExitResultVariant::Err(e).report(), + Err(e) => Self::err(e).variant.report(), } } + + pub fn from_errors<'a>(errors: &'a Vec) -> Self { + let mut has_infra = false; + let mut has_user = false; + + for e in errors { + if e.tags + .contains(&(buck2_data::error::ErrorTag::DaemonIsBusy as i32)) + { + return Self::status_with_error_report(ExitCode::DaemonIsBusy, errors); + } + if e.tags + .contains(&(buck2_data::error::ErrorTag::DaemonPreempted as i32)) + { + return Self::status_with_error_report(ExitCode::DaemonPreempted, errors); + } + match e.tier.and_then(buck2_data::error::ErrorTier::from_i32) { + Some(buck2_data::error::ErrorTier::Tier0) + | Some(buck2_data::error::ErrorTier::Environment) => has_infra = true, + Some(buck2_data::error::ErrorTier::Input) => has_user = true, + Some(buck2_data::error::ErrorTier::UnusedDefaultCategory) | None => (), + } + } + if has_infra { + return Self::status_with_error_report(ExitCode::InfraError, errors); + } + if has_user { + return Self::status_with_error_report(ExitCode::UserError, errors); + } + // FIXME(JakobDegen): For compatibility with pre-existing behavior, we return infra failure + // here. However, it would be more honest to return the `1` status code that we use for + // "unknown" + Self::status_with_error_report(ExitCode::InfraError, errors) + } + + /// Buck2 supports being built as both a "full" binary as well as a "client-only" binary. + /// + /// However, some commands (eg `--no-buckd`) are not supported in the client-only binary, and so + /// when these commands are run, we have to retry them with the full build. + /// + /// This function is called in those cases. It returns `Some` only for client-only builds. + pub fn retry_command_with_full_binary() -> anyhow::Result> { + if buck2_core::client_only::is_client_only()? { + let exe = crate::daemon::client::connect::get_daemon_exe()?; + Ok(Some(ExitResult::exec( + exe.into_os_string(), + std::env::args_os().collect(), + None, + Vec::new(), + ))) + } else { + Ok(None) + } + } + + pub fn write_command_report( + &self, + trace_id: TraceId, + buck_log_dir: &AbsNormPathBuf, + command_report_path: &Option, + ) -> anyhow::Result<()> { + let dir = buck_log_dir.join(ForwardRelativePath::new(&trace_id.to_string())?); + fs_util::create_dir_all(&dir)?; + + let path = dir.join(ForwardRelativePath::new("command_report.json")?); + let file = fs_util::create_file(&path)?; + let mut file = std::io::BufWriter::new(file); + + match &self.variant { + ExitResultVariant::Status(exit_code) + | ExitResultVariant::StatusWithErr(exit_code, _) => { + serde_json::to_writer_pretty( + &mut file, + &buck2_data::CommandReport { + trace_id: trace_id.to_string(), + exit_code: exit_code.exit_code(), + error_messages: self.error_messages.clone(), + }, + )?; + + if let Some(report_path) = command_report_path { + if let Some(parent) = report_path.parent() { + fs_util::create_dir_all(parent)?; + } + file.flush()?; + fs_util::copy(path, report_path)?; + } + } + _ => {} + } + + Ok(()) + } } +impl std::error::Error for ExitResult {} + /// We can produce a ExitResult from a `anyhow::Result` for convenience. impl From> for ExitResult { fn from(e: anyhow::Result<()>) -> Self { @@ -162,8 +303,8 @@ impl From> for ExitResult { } } -impl From> for ExitResult { - fn from(e: anyhow::Result) -> Self { +impl From> for ExitResult { + fn from(e: anyhow::Result) -> Self { match e { Ok(code) => Self::status(code), Err(e) => Self::err(e), @@ -171,9 +312,9 @@ impl From> for ExitResult { } } -impl From for ExitResult { - fn from(e: FailureExitCode) -> Self { - Self::err(e.into()) +impl From for ExitResult { + fn from(e: anyhow::Error) -> Self { + Self::err(e) } } @@ -198,48 +339,31 @@ impl> FromResidual> for ExitResul /// Implementing Termination lets us set the exit code for the process. impl ExitResultVariant { pub fn report(self) -> ! { + // Log the exit timestamp + tracing::debug!("Client exiting"); // NOTE: We use writeln instead of println so we don't panic if stderr is closed. This // ensures we get the desired exit code printed instead of potentially a panic. let mut exit_code = match self { Self::Status(v) => v, - Self::UncategorizedError => 1, - Self::Buck2RunExec(args) => { + Self::Exec(args) => { // Terminate by exec-ing a new process - usually because of `buck2 run`. // // execv does not return. execv(args) } - Self::Err(e) => { - match e.downcast_ref::() { - None => { - let _ignored = writeln!(io::stderr().lock(), "Command failed: {:?}", e); - 1 - } - Some(FailureExitCode::SignalInterrupt) => { - tracing::debug!("Interrupted"); - 130 - } - Some(FailureExitCode::StdoutBrokenPipe) => { - // Report a broken pipe, but don't print anything to stderr by default. If - // the user wants to find out why we exited non-zero, they'll have to look - // at the output or raise the log level. - tracing::debug!("stdout pipe was broken"); - 141 - } - Some(FailureExitCode::StderrBrokenPipe) => { - // Not much point in printing anything here, since we know stderr is - // closed. - 141 - } - Some(FailureExitCode::OutputFileBrokenPipe) => { - tracing::debug!("--out pipe was broken"); - 141 + Self::StatusWithErr(exit_code, e) => { + tracing::debug!("Exiting with {:?} ({:?})", exit_code, e); + + match exit_code { + ExitCode::SignalInterrupt | ExitCode::BrokenPipe => { + // No logging for those. } - Some(FailureExitCode::ConnectError(e)) => { - let _ignored = writeln!(io::stderr().lock(), "{:?}", e); - 11 + _ => { + let _ignored = writeln!(io::stderr().lock(), "Command failed: {:?}", e); } } + + exit_code } }; @@ -251,46 +375,81 @@ impl ExitResultVariant { // Global destructors are hard (if even possible) to do safely anyway. if io::stdout().flush().is_err() { - exit_code = 141; + exit_code = ExitCode::SignalInterrupt; } // Stderr should be autoflushed, but just in case... if io::stderr().flush().is_err() { - exit_code = 141; + exit_code = ExitCode::SignalInterrupt; } - unsafe { libc::_exit(exit_code as libc::c_int) } + unsafe { libc::_exit(exit_code.exit_code() as libc::c_int) } } } -pub fn gen_error_exit_code(cause: ErrorCause) -> u8 { - match cause { - ErrorCause::Unknown => 2, // We treat unknown as infra error. - ErrorCause::Infra => 2, - ErrorCause::User => 3, - ErrorCause::DaemonIsBusy => 4, // For exiting concurrent commands of a different state early +/// A wrapper around an `io::Error` which indicates that the error came from "client IO". +/// +/// We use this to inform the exit code generation +#[derive(buck2_error::Error, derivative::Derivative)] +#[derivative(Debug = "transparent")] +#[error(transparent)] +pub enum ClientIoError { + /// A broken pipe when writing to stdout is expected if stdout is closed before the command finishes. + /// An easy way to trigger this is `buck2 audit config | head` + #[buck2(tag = IoClientBrokenPipe)] + #[buck2(environment)] + BrokenPipe(io::Error), + #[buck2(tier0)] + Other(io::Error), +} + +impl ClientIoError { + pub fn new(io_err: io::Error) -> Self { + if io_err.kind() == io::ErrorKind::BrokenPipe { + ClientIoError::BrokenPipe(io_err) + } else { + ClientIoError::Other(io_err) + } } } /// Common exit codes for buck with stronger semantic meanings -#[derive(thiserror::Error, Debug)] -pub enum FailureExitCode { +#[derive(Clone, Copy, Debug)] +pub enum ExitCode { // TODO: Fill in more exit codes from ExitCode.java here. Need to determine // how many make sense in v2 versus v1. Some are assuredly unnecessary in v2. - #[error("Ctrl-c was pressed")] + Success, + UnknownFailure, + InfraError, + UserError, + DaemonIsBusy, + DaemonPreempted, + Timeout, + ConnectError, SignalInterrupt, + BrokenPipe, + /// Something other than buck2 itself (usually a test runner) explicitly requested that this + /// exit code be returned + Explicit(u8), +} - #[error("Broken pipe writing on stdout")] - StdoutBrokenPipe, - - #[error("Broken pipe writing on stdout")] - StderrBrokenPipe, - - #[error("Broken pipe writing build artifact to --out")] - OutputFileBrokenPipe, - - #[error(transparent)] - ConnectError(anyhow::Error), +impl ExitCode { + pub fn exit_code(self) -> u32 { + use ExitCode::*; + match self { + Success => 0, + UnknownFailure => 1, + InfraError => 2, + UserError => 3, + DaemonIsBusy => 4, + DaemonPreempted => 5, + Timeout => 6, + ConnectError => 11, + BrokenPipe => 130, + SignalInterrupt => 141, + Explicit(code) => code.into(), + } + } } #[cfg(windows)] @@ -313,6 +472,7 @@ fn do_exec(command: &mut Command) -> anyhow::Error { /// Invokes the given program with the given argv and replaces the program image with the new program. /// Does not return. fn execv(args: ExecArgs) -> ! { + // patternlint-disable-next-line buck2-no-command-new let mut command = Command::new(&args.prog); command.args(&args.argv[1..]); if let Some(dir) = args.chdir { diff --git a/app/buck2_client_ctx/src/file_tailer.rs b/app/buck2_client_ctx/src/file_tailer.rs deleted file mode 100644 index 2c0be8d87fb99..0000000000000 --- a/app/buck2_client_ctx/src/file_tailer.rs +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fs::File; -use std::io::BufRead; -use std::io::BufReader; -use std::io::Seek; -use std::io::SeekFrom; -use std::time::Duration; - -use anyhow::Context; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use futures::FutureExt; -use tokio::sync::mpsc::UnboundedSender; -use tokio::sync::oneshot; - -use crate::events_ctx::FileTailerEvent; -use crate::tokio_runtime_setup::client_tokio_runtime; - -pub(crate) enum StdoutOrStderr { - Stdout, - Stderr, -} - -/// When `tail_file()` is invoked, the FileTailer will open the file and seek -/// to the end. It'll then watch for changes to the file and copy any newly written -/// data to the writer. -/// -/// When the tailer is dropped, it will do a final sync of the data to ensure that -/// the tail is up-to-date at that point. -pub(crate) struct FileTailer { - // This thread is periodically checking the file for new data. When a message is - // sent on the end_signaller, the thread will do one final sync of data and then exit. - thread: Option>>, - end_signaller: Option>, -} - -impl Drop for FileTailer { - fn drop(&mut self) { - // If the thread has exited then don't error here. - let _ignored = self.end_signaller.take().unwrap().send(()); - match self.thread.take().unwrap().join() { - Ok(Ok(())) => {} - Ok(Err(e)) => { - tracing::warn!("Error tailing daemon logs: {:#}", e); - } - Err(..) => { - tracing::warn!("Error tailing daemon logs: panic"); - } - } - } -} - -impl FileTailer { - pub(crate) fn tail_file( - file: AbsNormPathBuf, - sender: UnboundedSender, - stdout_or_stderr: StdoutOrStderr, - ) -> anyhow::Result { - let mut reader = BufReader::new( - File::open(&file) - .with_context(|| format!("Error setting up tailer for {}", file.display()))?, - ); - - reader.seek(SeekFrom::End(0))?; - let (tx, rx) = tokio::sync::oneshot::channel(); - // Startup a thread that will repeatedly (with a 200ms interval between) copy from - // the current position to the end of the file. - // TODO(cjhopman): It would probably be nicer to implement this via inotify/fsevents/etc - // rather than just repeatedly reading the file, but I tried to use each of - // https://crates.io/crates/hotwatch and https://crates.io/crates/notify and neither worked. - let thread = std::thread::spawn(move || { - let runtime = client_tokio_runtime()?; - let res = runtime.block_on(async move { - let mut interval = tokio::time::interval(Duration::from_millis(200)); - let mut rx = rx.fuse(); - - let mut completing = false; - while !completing { - tokio::select! { - _ = interval.tick() => {}, - _ = &mut rx => { - // This indicates that the FileTailer is being dropped. - // drain any remaining output and return. - completing = true; - } - } - - let mut line = Vec::new(); - while reader.read_until(b'\n', &mut line)? != 0 { - let event = match stdout_or_stderr { - StdoutOrStderr::Stdout => FileTailerEvent::Stdout(line), - StdoutOrStderr::Stderr => { - if omit_stderr_line(&line) { - line.clear(); - continue; - } - FileTailerEvent::Stderr(line) - } - }; - if sender.send(event).is_err() { - break; - } - line = Vec::new(); - } - } - - anyhow::Ok(()) - }); - - res.with_context(|| format!("Failed to read from `{}`", file)) - }); - - Ok(Self { - end_signaller: Some(tx), - thread: Some(thread), - }) - } -} - -fn omit_stderr_line(line: &[u8]) -> bool { - fn bytes_contains(haystack: &[u8], needle: &[u8]) -> bool { - haystack.windows(needle.len()).any(|w| w == needle) - } - - bytes_contains(line, b"[warn] kq_dispatch: skipping fd=") && bytes_contains(line, b"errno=9:") -} - -#[cfg(test)] -mod tests { - use std::io::Write; - - use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; - - use super::*; - use crate::events_ctx::FileTailerEvent; - use crate::file_tailer::FileTailer; - use crate::file_tailer::StdoutOrStderr; - - #[tokio::test] - async fn test_tailer_stdout() -> anyhow::Result<()> { - let mut file = tempfile::NamedTempFile::new()?; - file.write_all(b"before\n")?; - - // If we could control the interval for tailer polling, we could reliably - // test more of the behavior. For now, just test a simple case. - let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel(); - let tailer = FileTailer::tail_file( - AbsNormPathBuf::new(file.path().to_owned())?, - sender, - StdoutOrStderr::Stdout, - )?; - - let omitted_on_stderr = - b"[warn] kq_dispatch: skipping fd=421 errno=9: Socket is not connected\n"; - let ok_line = b"after\n"; - let invalid_utf8_line = b"\xc3\x28\n"; - - file.write_all(omitted_on_stderr.as_slice())?; - file.write_all(ok_line.as_slice())?; - file.write_all(invalid_utf8_line.as_slice())?; - - // have to sleep long enough for a read or else this test is racy. - tokio::time::sleep(Duration::from_millis(250)).await; - std::mem::drop(tailer); - assert_eq!( - FileTailerEvent::Stdout((*omitted_on_stderr).into()), - receiver.recv().await.unwrap() - ); - assert_eq!( - FileTailerEvent::Stdout((*ok_line).into()), - receiver.recv().await.unwrap() - ); - assert_eq!( - FileTailerEvent::Stdout((*invalid_utf8_line).into()), - receiver.recv().await.unwrap() - ); - - Ok(()) - } - - #[tokio::test] - async fn test_tailer_stderr() -> anyhow::Result<()> { - let mut file = tempfile::NamedTempFile::new()?; - file.write_all(b"before\n")?; - - // If we could control the interval for tailer polling, we could reliably - // test more of the behavior. For now, just test a simple case. - let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel(); - let tailer = FileTailer::tail_file( - AbsNormPathBuf::new(file.path().to_owned())?, - sender, - StdoutOrStderr::Stderr, - )?; - - let omitted_line = - b"[warn] kq_dispatch: skipping fd=421 errno=9: Socket is not connected\n"; - let ok_line = b"after\n"; - let invalid_utf8_line = b"\xc3\x28\n"; - - file.write_all(omitted_line.as_slice())?; - file.write_all(ok_line.as_slice())?; - file.write_all(invalid_utf8_line.as_slice())?; - - // have to sleep long enough for a read or else this test is racy. - tokio::time::sleep(Duration::from_millis(250)).await; - std::mem::drop(tailer); - assert_eq!( - FileTailerEvent::Stderr((*ok_line).into()), - receiver.recv().await.unwrap() - ); - assert_eq!( - FileTailerEvent::Stderr((*invalid_utf8_line).into()), - receiver.recv().await.unwrap() - ); - - Ok(()) - } -} diff --git a/app/buck2_client_ctx/src/file_tailers.rs b/app/buck2_client_ctx/src/file_tailers.rs new file mode 100644 index 0000000000000..f931ff7402a46 --- /dev/null +++ b/app/buck2_client_ctx/src/file_tailers.rs @@ -0,0 +1,11 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod tailer; +pub(crate) mod tailers; diff --git a/app/buck2_client_ctx/src/file_tailers/tailer.rs b/app/buck2_client_ctx/src/file_tailers/tailer.rs new file mode 100644 index 0000000000000..286334de7d612 --- /dev/null +++ b/app/buck2_client_ctx/src/file_tailers/tailer.rs @@ -0,0 +1,216 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::convert::Infallible; +use std::fs::File; +use std::io::BufRead; +use std::io::BufReader; +use std::io::Seek; +use std::io::SeekFrom; +use std::time::Duration; + +use anyhow::Context; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use dupe::Dupe; +use futures::FutureExt; +use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::oneshot; + +use crate::events_ctx::FileTailerEvent; + +#[derive(Copy, Clone, Dupe)] +pub(crate) enum StdoutOrStderr { + Stdout, + Stderr, +} + +/// When `tail_file()` is invoked, the FileTailer will open the file and seek +/// to the end. It'll then watch for changes to the file and copy any newly written +/// data to the writer. +/// +/// When the tailer is dropped, it will do a final sync of the data to ensure that +/// the tail is up-to-date at that point. +pub(crate) struct FileTailer { + // This thread is periodically checking the file for new data. When a message is + // sent on the end_signaller, the thread will do one final sync of data and then exit. + _end_signaller: oneshot::Sender, +} + +impl FileTailer { + pub(crate) fn tail_file( + file: AbsNormPathBuf, + sender: UnboundedSender, + stdout_or_stderr: StdoutOrStderr, + ) -> anyhow::Result { + let mut reader = BufReader::new( + File::open(&file) + .with_context(|| format!("Error setting up tailer for {}", file.display()))?, + ); + + reader.seek(SeekFrom::End(0))?; + let (tx, rx) = tokio::sync::oneshot::channel(); + // Startup a thread that will repeatedly (with a 200ms interval between) copy from + // the current position to the end of the file. + // TODO(cjhopman): It would probably be nicer to implement this via inotify/fsevents/etc + // rather than just repeatedly reading the file, but I tried to use each of + // https://crates.io/crates/hotwatch and https://crates.io/crates/notify and neither worked. + tokio::spawn(async move { + let res = Self::tailer_loop(rx, reader, stdout_or_stderr, sender).await; + match res { + Ok(()) => {} + Err(e) => { + tracing::warn!("Failed to read from `{}`: {:?}", file.display(), e); + } + } + }); + + Ok(FileTailer { _end_signaller: tx }) + } + + async fn tailer_loop( + rx: oneshot::Receiver, + mut reader: BufReader, + stdout_or_stderr: StdoutOrStderr, + mut sender: UnboundedSender, + ) -> anyhow::Result<()> { + let mut interval = tokio::time::interval(Duration::from_millis(200)); + let mut rx = rx.fuse(); + + let mut completing = false; + while !completing { + tokio::select! { + _ = interval.tick() => {}, + _ = &mut rx => { + // This indicates that the FileTailer is being dropped. + // drain any remaining output and return. + completing = true; + } + } + + (sender, reader) = tokio::task::spawn_blocking(move || { + let mut line = Vec::new(); + while reader.read_until(b'\n', &mut line)? != 0 { + let event = match stdout_or_stderr { + StdoutOrStderr::Stdout => FileTailerEvent::Stdout(line), + StdoutOrStderr::Stderr => { + if omit_stderr_line(&line) { + line.clear(); + continue; + } + FileTailerEvent::Stderr(line) + } + }; + if sender.send(event).is_err() { + break; + } + line = Vec::new(); + } + anyhow::Ok((sender, reader)) + }) + .await??; + } + + anyhow::Ok(()) + } +} + +fn omit_stderr_line(line: &[u8]) -> bool { + fn bytes_contains(haystack: &[u8], needle: &[u8]) -> bool { + haystack.windows(needle.len()).any(|w| w == needle) + } + + bytes_contains(line, b"[warn] kq_dispatch: skipping fd=") && bytes_contains(line, b"errno=9:") +} + +#[cfg(test)] +mod tests { + use std::io::Write; + + use super::*; + + #[tokio::test] + async fn test_tailer_stdout() -> anyhow::Result<()> { + let mut file = tempfile::NamedTempFile::new()?; + file.write_all(b"before\n")?; + + // If we could control the interval for tailer polling, we could reliably + // test more of the behavior. For now, just test a simple case. + let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel(); + let tailer = FileTailer::tail_file( + AbsNormPathBuf::new(file.path().to_owned())?, + sender, + StdoutOrStderr::Stdout, + )?; + + let omitted_on_stderr = + b"[warn] kq_dispatch: skipping fd=421 errno=9: Socket is not connected\n"; + let ok_line = b"after\n"; + let invalid_utf8_line = b"\xc3\x28\n"; + + file.write_all(omitted_on_stderr.as_slice())?; + file.write_all(ok_line.as_slice())?; + file.write_all(invalid_utf8_line.as_slice())?; + + // have to sleep long enough for a read or else this test is racy. + tokio::time::sleep(Duration::from_millis(250)).await; + std::mem::drop(tailer); + assert_eq!( + FileTailerEvent::Stdout((*omitted_on_stderr).into()), + receiver.recv().await.unwrap() + ); + assert_eq!( + FileTailerEvent::Stdout((*ok_line).into()), + receiver.recv().await.unwrap() + ); + assert_eq!( + FileTailerEvent::Stdout((*invalid_utf8_line).into()), + receiver.recv().await.unwrap() + ); + + Ok(()) + } + + #[tokio::test] + async fn test_tailer_stderr() -> anyhow::Result<()> { + let mut file = tempfile::NamedTempFile::new()?; + file.write_all(b"before\n")?; + + // If we could control the interval for tailer polling, we could reliably + // test more of the behavior. For now, just test a simple case. + let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel(); + let tailer = FileTailer::tail_file( + AbsNormPathBuf::new(file.path().to_owned())?, + sender, + StdoutOrStderr::Stderr, + )?; + + let omitted_line = + b"[warn] kq_dispatch: skipping fd=421 errno=9: Socket is not connected\n"; + let ok_line = b"after\n"; + let invalid_utf8_line = b"\xc3\x28\n"; + + file.write_all(omitted_line.as_slice())?; + file.write_all(ok_line.as_slice())?; + file.write_all(invalid_utf8_line.as_slice())?; + + // have to sleep long enough for a read or else this test is racy. + tokio::time::sleep(Duration::from_millis(250)).await; + std::mem::drop(tailer); + assert_eq!( + FileTailerEvent::Stderr((*ok_line).into()), + receiver.recv().await.unwrap() + ); + assert_eq!( + FileTailerEvent::Stderr((*invalid_utf8_line).into()), + receiver.recv().await.unwrap() + ); + + Ok(()) + } +} diff --git a/app/buck2_client_ctx/src/file_tailers/tailers.rs b/app/buck2_client_ctx/src/file_tailers/tailers.rs new file mode 100644 index 0000000000000..1eef38838fdde --- /dev/null +++ b/app/buck2_client_ctx/src/file_tailers/tailers.rs @@ -0,0 +1,55 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_common::daemon_dir::DaemonDir; +use tokio::sync::mpsc; +use tokio::sync::mpsc::UnboundedReceiver; + +use crate::events_ctx::FileTailerEvent; +use crate::file_tailers::tailer::FileTailer; +use crate::file_tailers::tailer::StdoutOrStderr; + +pub struct FileTailers { + _stdout_tailer: Option, + _stderr_tailer: Option, + pub(crate) stream: UnboundedReceiver, +} + +impl FileTailers { + pub fn new(daemon_dir: &DaemonDir) -> anyhow::Result { + let (tx, rx) = mpsc::unbounded_channel(); + let stdout_tailer = FileTailer::tail_file( + daemon_dir.buckd_stdout(), + tx.clone(), + StdoutOrStderr::Stdout, + )?; + let stderr_tailer = + FileTailer::tail_file(daemon_dir.buckd_stderr(), tx, StdoutOrStderr::Stderr)?; + let this = Self { + _stdout_tailer: Some(stdout_tailer), + _stderr_tailer: Some(stderr_tailer), + stream: rx, + }; + Ok(this) + } + + pub fn empty() -> FileTailers { + FileTailers { + _stdout_tailer: None, + _stderr_tailer: None, + // Empty stream. + stream: mpsc::unbounded_channel().1, + } + } + + pub fn stop_reading(self) -> UnboundedReceiver { + // by dropping the tailers, they shut themselves down. + self.stream + } +} diff --git a/app/buck2_client_ctx/src/ide_support.rs b/app/buck2_client_ctx/src/ide_support.rs index 6aeb9c5fe1b78..28e45652348c7 100644 --- a/app/buck2_client_ctx/src/ide_support.rs +++ b/app/buck2_client_ctx/src/ide_support.rs @@ -76,7 +76,7 @@ impl Deserialize<'a>> Decoder for LspMessageLikeDecoder { } #[cfg(test)] -mod test { +mod tests { use assert_matches::assert_matches; use lsp_server::Message; use lsp_server::Request; diff --git a/app/buck2_client_ctx/src/immediate_config.rs b/app/buck2_client_ctx/src/immediate_config.rs index fbdec5f07d6e2..1c0bd82f44ba1 100644 --- a/app/buck2_client_ctx/src/immediate_config.rs +++ b/app/buck2_client_ctx/src/immediate_config.rs @@ -12,23 +12,62 @@ use std::sync::OnceLock; use std::time::SystemTime; use anyhow::Context as _; +use buck2_common::init::DaemonStartupConfig; use buck2_common::invocation_roots::find_invocation_roots; use buck2_common::legacy_configs::cells::BuckConfigBasedCells; -use buck2_common::legacy_configs::init::DaemonStartupConfig; +use buck2_core::buck2_env_anyhow; +use buck2_core::cells::cell_root_path::CellRootPathBuf; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; -use buck2_core::env_helper::EnvHelper; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::working_dir::WorkingDir; use prost::Message; +/// Limited view of the root config. This does not follow includes. +struct ImmediateConfig { + cell_resolver: CellResolver, + cwd_cell_alias_resolver: CellAliasResolver, + daemon_startup_config: DaemonStartupConfig, +} + +impl ImmediateConfig { + /// Performs a parse of the root `.buckconfig` for the cell _only_ without following includes + /// and without parsing any configs for any referenced cells. This means this function might return + /// an empty mapping if the root `.buckconfig` does not contain the cell definitions. + fn parse( + project_fs: &ProjectRoot, + cwd: &ProjectRelativePath, + ) -> anyhow::Result { + // This function is non-reentrant, and blocking for a bit should be ok + let cells = futures::executor::block_on(BuckConfigBasedCells::parse_with_config_args( + project_fs, + &[], + cwd, + ))?; + + let cwd_cell_alias_resolver = futures::executor::block_on( + cells.get_cell_alias_resolver_for_cwd_fast(project_fs, cwd), + )?; + + Ok(ImmediateConfig { + cell_resolver: cells.cell_resolver, + cwd_cell_alias_resolver, + daemon_startup_config: DaemonStartupConfig::new(&cells.root_config) + .context("Error loading daemon startup config")?, + }) + } +} + /// Lazy-computed immediate config data. This is produced by reading the root buckconfig (but not /// processing any includes). struct ImmediateConfigContextData { cell_resolver: CellResolver, + cwd_cell_alias_resolver: CellAliasResolver, daemon_startup_config: DaemonStartupConfig, project_filesystem: ProjectRoot, } @@ -53,11 +92,11 @@ impl<'a> ImmediateConfigContext<'a> { } } - pub fn push_trace(&mut self, path: &AbsNormPath) { + pub(crate) fn push_trace(&mut self, path: &AbsNormPath) { self.trace.push(path.to_buf()); } - pub fn trace(&self) -> &[AbsNormPathBuf] { + pub(crate) fn trace(&self) -> &[AbsNormPathBuf] { &self.trace } @@ -65,18 +104,8 @@ impl<'a> ImmediateConfigContext<'a> { Ok(&self.data()?.daemon_startup_config) } - /// Resolves an argument which can possibly be a cell-relative path. - /// If the argument is not a cell-relative path, it returns `None`. - /// Otherwise, it tries to resolve the cell and returns a `Result`. - pub fn resolve_cell_path_arg(&self, path: &str) -> Option> { - path.split_once("//") - .map(|(cell_alias, cell_relative_path)| { - self.resolve_cell_path(cell_alias, cell_relative_path) - }) - } - - pub fn canonicalize(&self, path: &Path) -> anyhow::Result { - fs_util::canonicalize(self.cwd.path().as_path().join(path)) + pub(crate) fn canonicalize(&self, path: &Path) -> anyhow::Result { + fs_util::canonicalize(self.cwd.path().as_abs_path().join(path)) } /// Resolves a cell path (i.e., contains `//`) into an absolute path. The cell path must have @@ -84,19 +113,26 @@ impl<'a> ImmediateConfigContext<'a> { /// is `cell//path/to/file`, then: /// - `cell_alias` would be `cell` /// - `cell_relative_path` would be `path/to/file` - pub fn resolve_cell_path( + pub(crate) fn resolve_cell_path( &self, cell_alias: &str, cell_relative_path: &str, ) -> anyhow::Result { let data = self.data()?; - data.cell_resolver.resolve_cell_relative_path( - cell_alias, - cell_relative_path, - &data.project_filesystem, - self.cwd.path(), - ) + let cell = data.cwd_cell_alias_resolver.resolve(cell_alias)?; + let cell = data.cell_resolver.get(cell)?; + let path = cell.path().join_normalized(cell_relative_path)?; + Ok(data.project_filesystem.resolve(&path)) + } + + pub(crate) fn resolve_alias_to_path_in_cwd( + &self, + alias: &str, + ) -> anyhow::Result { + let data = self.data()?; + let cell = data.cwd_cell_alias_resolver.resolve(alias)?; + Ok(data.cell_resolver.get(cell)?.path().to_buf()) } fn data(&self) -> anyhow::Result<&ImmediateConfigContextData> { @@ -107,7 +143,10 @@ impl<'a> ImmediateConfigContext<'a> { // See comment in `ImmediateConfig` about why we use `OnceLock` rather than `Lazy` let project_filesystem = roots.project_root; - let cfg = BuckConfigBasedCells::parse_immediate_config(&project_filesystem)?; + let cfg = ImmediateConfig::parse( + &project_filesystem, + project_filesystem.relativize(self.cwd.path())?.as_ref(), + )?; // It'd be nice to deal with this a little differently by having this be a separate // type. @@ -128,6 +167,7 @@ impl<'a> ImmediateConfigContext<'a> { anyhow::Ok(ImmediateConfigContextData { cell_resolver: cfg.cell_resolver, + cwd_cell_alias_resolver: cfg.cwd_cell_alias_resolver, daemon_startup_config, project_filesystem, }) @@ -137,8 +177,7 @@ impl<'a> ImmediateConfigContext<'a> { } fn is_paranoid_enabled(path: &AbsPath) -> anyhow::Result { - static PARANOID: EnvHelper = EnvHelper::new("BUCK_PARANOID"); - if let Some(p) = PARANOID.get_copied()? { + if let Some(p) = buck2_env_anyhow!("BUCK_PARANOID", type=bool)? { return Ok(p); } diff --git a/app/buck2_client_ctx/src/lib.rs b/app/buck2_client_ctx/src/lib.rs index c66f1c54586dd..bb75040b9ee6d 100644 --- a/app/buck2_client_ctx/src/lib.rs +++ b/app/buck2_client_ctx/src/lib.rs @@ -8,14 +8,17 @@ */ #![feature(once_cell_try)] -#![feature(async_closure)] +#![feature(error_generic_member_access)] +#![feature(option_get_or_insert_default)] +#![feature(if_let_guard)] +#![feature(let_chains)] #![feature(try_blocks)] #![feature(try_trait_v2)] +#![feature(used_with_arg)] +#![feature(round_char_boundary)] +#![feature(extract_if)] -pub mod argv; -pub mod build_count; -pub mod chunk_reader; -pub mod cleanup_ctx; +pub mod argfiles; pub mod client_cpu_tracker; pub mod client_ctx; pub mod client_metadata; @@ -26,11 +29,10 @@ pub mod daemon; pub mod daemon_constraints; pub mod events_ctx; pub mod exit_result; -pub mod file_tailer; +pub mod file_tailers; pub mod final_console; pub mod ide_support; pub mod immediate_config; -pub mod manifold; pub mod output_destination_arg; pub mod path_arg; pub mod query_args; @@ -41,7 +43,6 @@ pub mod startup_deadline; pub mod stdin; pub mod stdio; pub mod stream_util; -pub mod stream_value; pub mod streaming; pub mod subscribers; pub mod ticker; diff --git a/app/buck2_client_ctx/src/output_destination_arg.rs b/app/buck2_client_ctx/src/output_destination_arg.rs index a756d53758f24..5c85a78e671da 100644 --- a/app/buck2_client_ctx/src/output_destination_arg.rs +++ b/app/buck2_client_ctx/src/output_destination_arg.rs @@ -13,7 +13,7 @@ use crate::path_arg::PathArg; /// Destination argument for clap that allows the user to specify the intention to either forward /// data to a file with a provided path, or to an output stream -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq, Clone)] pub enum OutputDestinationArg { Stream, Path(PathArg), @@ -33,5 +33,5 @@ impl FromStr for OutputDestinationArg { impl OutputDestinationArg { /// Token used to specify stream forwarding - const STREAM_TOKEN: &str = "-"; + const STREAM_TOKEN: &'static str = "-"; } diff --git a/app/buck2_client_ctx/src/path_arg.rs b/app/buck2_client_ctx/src/path_arg.rs index 049fccfb5edd1..a135566e8a10f 100644 --- a/app/buck2_client_ctx/src/path_arg.rs +++ b/app/buck2_client_ctx/src/path_arg.rs @@ -20,7 +20,7 @@ use serde::Serialize; /// Path arguments for clap which is either absolute or relative to current directory. /// /// Hides the path, but exposes the function which resolves the path against the current directory. -#[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Eq, PartialEq, Serialize, Deserialize, Clone)] pub struct PathArg { path: PathBuf, } diff --git a/app/buck2_client_ctx/src/query_args.rs b/app/buck2_client_ctx/src/query_args.rs index f835dc1679b5b..01266b0747566 100644 --- a/app/buck2_client_ctx/src/query_args.rs +++ b/app/buck2_client_ctx/src/query_args.rs @@ -8,7 +8,6 @@ */ use buck2_core::soft_error; -use thiserror::Error; #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] pub struct CommonAttributeArgs { @@ -45,12 +44,12 @@ pub struct CommonAttributeArgs { long, group = "output_attribute_flags", value_name = "ATTRIBUTE", - // without limiting number_of_values, clap will read all space-separated values + // without limiting num_args, clap will read all space-separated values // after the flag, we want to require that each value be preceded individually by the flag. - number_of_values = 1, + num_args = 1, // If the output_all_attributes flag (-A) is set, use "" to select all - default_value_if("output_all_attributes", None, Some("")), - default_value_if("output_basic_attributes", None, Some("^(buck\\.package|buck\\.type|[^\\.]*)$")), + default_value_if("output_all_attributes", "true", Some("")), + default_value_if("output_basic_attributes", "true", Some("^(buck\\.package|buck\\.type|[^\\.]*)$")), )] output_attribute: Vec, @@ -59,14 +58,14 @@ pub struct CommonAttributeArgs { /// List of space-separated attributes to output, --output-attributes attr1 attr2. #[clap( long, - multiple_values = true, + num_args = 1.., value_name = "ATTRIBUTE", group = "output_attribute_flags" )] output_attributes: Vec, } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ArgErrors { #[error("`--output-attributes` is deprecated, use `--output-attribute` instead")] OutputAttributesDeprecated, @@ -81,7 +80,8 @@ impl CommonAttributeArgs { if !self.output_attributes.is_empty() { soft_error!( "output_attributes", - ArgErrors::OutputAttributesDeprecated.into() + ArgErrors::OutputAttributesDeprecated.into(), + deprecation: true, )?; } diff --git a/app/buck2_client_ctx/src/replayer.rs b/app/buck2_client_ctx/src/replayer.rs index e33dd9ebfe1dd..06b76dba507d0 100644 --- a/app/buck2_client_ctx/src/replayer.rs +++ b/app/buck2_client_ctx/src/replayer.rs @@ -10,6 +10,9 @@ use std::pin::Pin; use std::time::SystemTime; +use buck2_event_log::read::EventLogPathBuf; +use buck2_event_log::stream_value::StreamValue; +use buck2_event_log::utils::Invocation; use futures::stream::BoxStream; use futures::task::Poll; use futures::Future; @@ -20,10 +23,6 @@ use pin_project::pin_project; use tokio::time::Instant; use tokio::time::Sleep; -use crate::stream_value::StreamValue; -use crate::subscribers::event_log::read::EventLogPathBuf; -use crate::subscribers::event_log::utils::Invocation; - #[pin_project] struct Pending { #[pin] diff --git a/app/buck2_client_ctx/src/startup_deadline.rs b/app/buck2_client_ctx/src/startup_deadline.rs index 8556f54b33a48..027c5c8f0237f 100644 --- a/app/buck2_client_ctx/src/startup_deadline.rs +++ b/app/buck2_client_ctx/src/startup_deadline.rs @@ -37,10 +37,14 @@ impl StartupDeadline { }) } + pub(crate) fn deadline(&self) -> Instant { + self.deadline + } + /// Deadline for a nested operation. /// /// Must be lower than outer deadline to make sure inner operation times out before outer one. - fn down_deadline(&self) -> anyhow::Result { + pub(crate) fn down_deadline(&self) -> anyhow::Result { let new_deadline = self .deadline .checked_sub(Duration::from_millis(100)) @@ -57,6 +61,7 @@ impl StartupDeadline { .with_context(|| format!("timed out before {}", op)) } + /// Decrease the deadline by 100ms and invoke the given function with the new deadline. pub(crate) async fn down(&self, op: &str, f: F) -> anyhow::Result where F: FnOnce(StartupDeadline) -> Fut, diff --git a/app/buck2_client_ctx/src/stdin.rs b/app/buck2_client_ctx/src/stdin.rs index ec59b8d7ec24d..0e5f0a4efcab6 100644 --- a/app/buck2_client_ctx/src/stdin.rs +++ b/app/buck2_client_ctx/src/stdin.rs @@ -14,7 +14,8 @@ use std::task::Context; use std::task::Poll; use std::thread::JoinHandle; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; +use buck2_util::threads::thread_spawn; use bytes::Bytes; use futures::stream::Fuse; use futures::stream::StreamExt; @@ -25,7 +26,7 @@ use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tokio_util::io::StreamReader; -use crate::common::CommonConsoleOptions; +use crate::common::ui::CommonConsoleOptions; use crate::console_interaction_stream::ConsoleInteractionStream; #[pin_project] @@ -51,8 +52,12 @@ impl AsyncRead for Stdin { impl Stdin { pub fn new() -> anyhow::Result { - static STDIN_BUFFER_SIZE: EnvHelper = EnvHelper::new("BUCK2_TEST_STDIN_BUFFER_SIZE"); - let buffer_size = STDIN_BUFFER_SIZE.get()?.copied().unwrap_or(8192); + let buffer_size = buck2_env_anyhow!( + "BUCK2_TEST_STDIN_BUFFER_SIZE", + type=usize, + applicability=testing, + )? + .unwrap_or(8192); // Small buffer, this isn't bytes we're buffering, just buffers of bytes. That said, since // we're on separate threads, give ourselves a bit of buffering. @@ -77,6 +82,7 @@ impl Stdin { } } +#[allow(dead_code)] // field `0` is never read enum State { Pending { buffer_size: usize, @@ -97,7 +103,7 @@ impl State { buffer_size, mut tx, } => { - let handle = std::thread::spawn({ + let handle = thread_spawn("buck2-stdin", { move || { #[allow(clippy::let_and_return)] let stdin = std::io::stdin().lock(); @@ -110,7 +116,8 @@ impl State { // NOTE: We ignore send errors since there is no point in reading without a receiver. let _ignored = read_and_forward(stdin, &mut tx, buffer_size); } - }); + }) + .unwrap(); Self::Started(handle) } diff --git a/app/buck2_client_ctx/src/stdio.rs b/app/buck2_client_ctx/src/stdio.rs index 6bf3ca8e73936..d95f8db5747e2 100644 --- a/app/buck2_client_ctx/src/stdio.rs +++ b/app/buck2_client_ctx/src/stdio.rs @@ -12,14 +12,18 @@ //! place, and should usually just be propagated in order to lead to a quick exit. use std::fmt::Arguments; +use std::fs::File; use std::io; +use std::io::LineWriter; +use std::io::Stdout; use std::io::Write; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; +use buck2_error::internal_error_anyhow; use superconsole::Line; -use crate::exit_result::FailureExitCode; +use crate::exit_result::ClientIoError; static HAS_WRITTEN_TO_STDOUT: AtomicBool = AtomicBool::new(false); @@ -27,9 +31,14 @@ pub fn has_written_to_stdout() -> bool { HAS_WRITTEN_TO_STDOUT.load(Ordering::Relaxed) } -fn stdout() -> io::Stdout { +static STDOUT_LOCKED: AtomicBool = AtomicBool::new(false); + +fn stdout() -> anyhow::Result { + if STDOUT_LOCKED.load(Ordering::Relaxed) { + return Err(internal_error_anyhow!("stdout is already locked")); + } HAS_WRITTEN_TO_STDOUT.store(true, Ordering::Relaxed); - io::stdout() + Ok(io::stdout()) } #[macro_export] @@ -78,32 +87,25 @@ macro_rules! eprintln { }; } -fn map_stdout_error(err: io::Error) -> anyhow::Error { - if err.kind() == io::ErrorKind::BrokenPipe { - anyhow::Error::new(FailureExitCode::StdoutBrokenPipe) - } else { - anyhow::Error::new(err) - } -} - -fn map_stderr_error(err: io::Error) -> anyhow::Error { - if err.kind() == io::ErrorKind::BrokenPipe { - anyhow::Error::new(FailureExitCode::StderrBrokenPipe) - } else { - anyhow::Error::new(err) - } -} - pub fn _print(fmt: Arguments) -> anyhow::Result<()> { - stdout().lock().write_fmt(fmt).map_err(map_stdout_error) + stdout()? + .lock() + .write_fmt(fmt) + .map_err(|e| ClientIoError::new(e).into()) } pub fn _eprint(fmt: Arguments) -> anyhow::Result<()> { - io::stderr().lock().write_fmt(fmt).map_err(map_stderr_error) + io::stderr() + .lock() + .write_fmt(fmt) + .map_err(|e| ClientIoError::new(e).into()) } pub fn print_bytes(bytes: &[u8]) -> anyhow::Result<()> { - stdout().lock().write_all(bytes).map_err(map_stdout_error) + stdout()? + .lock() + .write_all(bytes) + .map_err(|e| ClientIoError::new(e).into()) } pub fn eprint_line(line: &Line) -> anyhow::Result<()> { @@ -112,24 +114,61 @@ pub fn eprint_line(line: &Line) -> anyhow::Result<()> { } pub fn flush() -> anyhow::Result<()> { - stdout().flush().map_err(map_stdout_error) + stdout()?.flush().map_err(|e| ClientIoError::new(e).into()) +} + +fn stdout_to_file(stdout: &Stdout) -> anyhow::Result { + #[cfg(not(windows))] + { + use std::os::fd::AsFd; + Ok(File::from(stdout.as_fd().try_clone_to_owned()?)) + } + #[cfg(windows)] + { + use std::os::windows::io::AsHandle; + Ok(File::from(stdout.as_handle().try_clone_to_owned()?)) + } } pub fn print_with_writer(f: F) -> anyhow::Result<()> where E: Into, - F: FnOnce(&mut dyn Write) -> Result<(), E>, + F: FnOnce(&mut (dyn Write + Send)) -> Result<(), E>, { - match f(&mut stdout().lock()) { - Ok(_) => Ok(()), + let stdout = stdout()?; + + struct StdoutLockedGuard; + + impl Drop for StdoutLockedGuard { + fn drop(&mut self) { + assert!( + STDOUT_LOCKED + .compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed) + .is_ok() + ); + } + } + + assert!( + STDOUT_LOCKED + .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed) + .is_ok() + ); + let _guard = StdoutLockedGuard; + + let _guard = stdout.lock(); + let file = stdout_to_file(&stdout)?; + let mut w = LineWriter::new(file); + match f(&mut w) { + Ok(()) => {} Err(e) => { let e: anyhow::Error = e.into(); - match e.downcast_ref::() { - Some(io_error) if io_error.kind() == io::ErrorKind::BrokenPipe => { - Err(anyhow::Error::new(FailureExitCode::StdoutBrokenPipe)) - } - Some(_) | None => Err(e), - } + return match e.downcast::() { + Ok(io_error) => Err(ClientIoError::new(io_error).into()), + Err(e) => Err(e), + }; } } + w.flush()?; + Ok(()) } diff --git a/app/buck2_client_ctx/src/streaming.rs b/app/buck2_client_ctx/src/streaming.rs index d8720f7df6b30..550c39176d293 100644 --- a/app/buck2_client_ctx/src/streaming.rs +++ b/app/buck2_client_ctx/src/streaming.rs @@ -11,22 +11,22 @@ use std::sync::atomic::AtomicU64; use std::sync::Arc; use async_trait::async_trait; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; use dupe::Dupe; -use crate::argv::Argv; -use crate::argv::SanitizedArgv; use crate::client_ctx::ClientCommandContext; +use crate::common::ui::CommonConsoleOptions; use crate::common::CommonBuildConfigurationOptions; -use crate::common::CommonConsoleOptions; -use crate::common::CommonDaemonCommandOptions; +use crate::common::CommonEventLogOptions; +use crate::common::CommonStarlarkOptions; use crate::daemon::client::connect::BuckdConnectConstraints; use crate::daemon::client::connect::BuckdConnectOptions; use crate::daemon::client::connect::DaemonConstraintsRequest; use crate::daemon::client::connect::DesiredTraceIoState; use crate::daemon::client::BuckdClientConnector; -use crate::exit_result::gen_error_exit_code; +use crate::exit_result::ExitCode; use crate::exit_result::ExitResult; -use crate::exit_result::FailureExitCode; use crate::path_arg::PathArg; use crate::signal_handler::with_simple_sigint_handler; use crate::subscribers::get::get_console_with_root; @@ -36,11 +36,12 @@ use crate::subscribers::get::try_get_event_log_subscriber; use crate::subscribers::get::try_get_re_log_subscriber; use crate::subscribers::recorder::try_get_invocation_recorder; use crate::subscribers::subscriber::EventSubscriber; +use crate::subscribers::subscribers::EventSubscribers; fn default_subscribers<'a, T: StreamingCommand>( cmd: &T, ctx: &ClientCommandContext<'a>, -) -> anyhow::Result>> { +) -> anyhow::Result> { let console_opts = cmd.console_opts(); let mut subscribers = vec![]; let expect_spans = cmd.should_expect_spans(); @@ -49,7 +50,11 @@ fn default_subscribers<'a, T: StreamingCommand>( // and log it in another (invocation_recorder) let log_size_counter_bytes = Some(Arc::new(AtomicU64::new(0))); - if let Some(v) = get_console_with_root( + let build_count_dir = match ctx.paths() { + Ok(paths) => Some(paths.build_count_dir()), + Err(_) => None, + }; + subscribers.push(get_console_with_root( ctx.trace_id.dupe(), console_opts.console_type, ctx.verbosity, @@ -57,9 +62,9 @@ fn default_subscribers<'a, T: StreamingCommand>( None, T::COMMAND_NAME, console_opts.superconsole_config(), - )? { - subscribers.push(v) - } + build_count_dir, + )?); + if let Some(event_log) = try_get_event_log_subscriber(cmd, ctx, log_size_counter_bytes.clone())? { subscribers.push(event_log) @@ -83,7 +88,7 @@ fn default_subscribers<'a, T: StreamingCommand>( subscribers.push(recorder); subscribers.extend(cmd.extra_subscribers()); - Ok(subscribers) + Ok(EventSubscribers::new(subscribers)) } /// Trait to generalize the behavior of executable buck2 commands that rely on a server. @@ -116,9 +121,11 @@ pub trait StreamingCommand: Sized + Send + Sync { fn console_opts(&self) -> &CommonConsoleOptions; - fn event_log_opts(&self) -> &CommonDaemonCommandOptions; + fn event_log_opts(&self) -> &CommonEventLogOptions; - fn common_opts(&self) -> &CommonBuildConfigurationOptions; + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions; + + fn starlark_opts(&self) -> &CommonStarlarkOptions; fn extra_subscribers(&self) -> Vec> { vec![] @@ -155,7 +162,14 @@ impl BuckSubcommand for T { /// Actual call that runs a `StreamingCommand`. /// Handles all of the business of setting up a runtime, server, and subscribers. fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - ctx.with_runtime(async move |mut ctx| { + let buck_log_dir = &ctx.paths()?.log_dir(); + let command_report_path = &self + .event_log_opts() + .command_report_path + .as_ref() + .map(|path| path.resolve(&ctx.working_dir)); + + ctx.with_runtime(|mut ctx| async move { let work = async { let constraints = if T::existing_only() { BuckdConnectConstraints::ExistingOnly @@ -188,22 +202,23 @@ impl BuckSubcommand for T { let mut buckd = match buckd { Ok(buckd) => buckd, Err(e) => { - return ExitResult::from(FailureExitCode::ConnectError(e)); + return ExitResult::err_with_exit_code(e, ExitCode::ConnectError); } }; let command_result = self.exec_impl(&mut buckd, matches, &mut ctx).await; - let command_result = command_result - .categorized_or_else(|| gen_error_exit_code(buckd.collect_error_cause())); ctx.restarter.observe(&buckd); command_result }; - with_simple_sigint_handler(work) + let result = with_simple_sigint_handler(work) .await - .unwrap_or_else(|| ExitResult::from(FailureExitCode::SignalInterrupt)) + .unwrap_or_else(|| ExitResult::status(ExitCode::SignalInterrupt)); + + result.write_command_report(ctx.trace_id, buck_log_dir, command_report_path)?; + result }) } } diff --git a/app/buck2_client_ctx/src/subscribers.rs b/app/buck2_client_ctx/src/subscribers.rs new file mode 100644 index 0000000000000..ffc22c4594caf --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers.rs @@ -0,0 +1,24 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod build_graph_stats; +pub(crate) mod build_id_writer; +pub(crate) mod classify_server_stderr; +pub(crate) mod errorconsole; +pub mod event_log; +pub mod get; +pub(crate) mod observer; +pub mod re_log; +pub mod recorder; +pub(crate) mod simpleconsole; +pub mod stdout_stderr_forwarder; +pub mod subscriber; +pub mod subscribers; +pub mod superconsole; +pub(crate) mod system_warning; diff --git a/app/buck2_client_ctx/src/subscribers/build_graph_stats.rs b/app/buck2_client_ctx/src/subscribers/build_graph_stats.rs index 5caf339540dd2..9726cf1448d9b 100644 --- a/app/buck2_client_ctx/src/subscribers/build_graph_stats.rs +++ b/app/buck2_client_ctx/src/subscribers/build_graph_stats.rs @@ -12,7 +12,7 @@ use std::time::SystemTime; use async_trait::async_trait; use buck2_cli_proto::command_result; -use buck2_events::sink::scribe::new_thrift_scribe_sink_if_enabled; +use buck2_events::sink::remote::new_remote_event_sink_if_enabled; use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; use fbinit::FacebookInit; @@ -33,8 +33,8 @@ impl BuildGraphStats { &self, res: &buck2_cli_proto::BuildResponse, ) -> anyhow::Result<()> { - let event = self.build_graph_stats_from_build_response(res); - self.send_event(event).await; + let events = self.build_graph_stats_from_build_response(res); + self.send_events(events).await; Ok(()) } @@ -42,37 +42,46 @@ impl BuildGraphStats { fn build_graph_stats_from_build_response( &self, res: &buck2_cli_proto::BuildResponse, - ) -> buck2_events::BuckEvent { - let build_targets = res - .build_targets - .iter() - .map(|t| buck2_data::BuildTarget { - target: t.target.clone(), - configuration: t.configuration.clone(), - configured_graph_size: t.configured_graph_size, + ) -> Vec { + const MAX_BUILD_TARGETS_LEN: usize = 3000; + + res.build_targets + .chunks(MAX_BUILD_TARGETS_LEN) + .map(|ts| { + buck2_events::BuckEvent::new( + SystemTime::now(), + self.trace_id.dupe(), + None, + None, + buck2_data::RecordEvent { + data: Some( + buck2_data::BuildGraphStats { + build_targets: ts + .iter() + .map(|t| buck2_data::BuildTarget { + target: t.target.clone(), + configuration: t.configuration.clone(), + configured_graph_size: t.configured_graph_size, + }) + .collect(), + } + .into(), + ), + } + .into(), + ) }) - .collect(); - let stats = buck2_data::BuildGraphStats { build_targets }; - buck2_events::BuckEvent::new( - SystemTime::now(), - self.trace_id.dupe(), - None, - None, - buck2_data::RecordEvent { - data: Some(stats.into()), - } - .into(), - ) + .collect() } - async fn send_event(&self, event: buck2_events::BuckEvent) { + async fn send_events(&self, events: Vec) { if let Ok(Some(sink)) = - new_thrift_scribe_sink_if_enabled(self.fb, 1, Duration::from_millis(100), 2, None) + new_remote_event_sink_if_enabled(self.fb, 1, Duration::from_millis(100), 2, None) { - tracing::info!("Sending an event to Scribe: {:?}", &event); - sink.send_now(event).await; + tracing::info!("Sending events to Scribe: {:?}", &events); + sink.send_messages_now(events).await; } else { - tracing::info!("An event was not sent to Scribe: {:?}", &event); + tracing::info!("Events were not sent to Scribe: {:?}", &events); } } } @@ -118,7 +127,7 @@ mod tests { let uuid = TraceId::new(); let handler = BuildGraphStats::new(fb, uuid.dupe()); - let event = handler.build_graph_stats_from_build_response(&res); + let events = handler.build_graph_stats_from_build_response(&res); let event_expected = buck2_data::BuckEvent { data: Some(buck2_data::buck_event::Data::Record( @@ -144,8 +153,9 @@ mod tests { ..Default::default() }; - assert_eq!(event.trace_id().unwrap(), uuid); - assert_eq!(event.data(), &event_expected.data.unwrap()); + assert_eq!(events.len(), 1); + assert_eq!(events[0].trace_id().unwrap(), uuid); + assert_eq!(events[0].data(), event_expected.data.as_ref().unwrap()); } #[fbinit::test] @@ -157,14 +167,65 @@ mod tests { let uuid = TraceId::new(); let handler = BuildGraphStats::new(fb, uuid.dupe()); - let event = handler.build_graph_stats_from_build_response(&res); + let events = handler.build_graph_stats_from_build_response(&res); - let event_expected = buck2_data::BuckEvent { + assert_eq!(events.len(), 0); + } + + #[fbinit::test] + fn build_graph_stats_too_long_targets(fb: FacebookInit) { + let build_target = buck2_cli_proto::BuildTarget { + target: "T".to_owned(), + configuration: "C".to_owned(), + configured_graph_size: Some(1), + ..Default::default() + }; + + // Testing if [6002] becomes [[3000], [3000], [2]] + let mut input_build_targets = vec![]; + for _ in 0..6002 { + input_build_targets.push(build_target.clone()); + } + + let res = buck2_cli_proto::BuildResponse { + build_targets: input_build_targets, + ..Default::default() + }; + + let uuid = TraceId::new(); + let handler = BuildGraphStats::new(fb, uuid.dupe()); + let events = handler.build_graph_stats_from_build_response(&res); + + let build_target = buck2_data::BuildTarget { + target: "T".to_owned(), + configuration: "C".to_owned(), + configured_graph_size: Some(1), + }; + + let mut output_build_targets_3000 = vec![]; + for _ in 0..3000 { + output_build_targets_3000.push(build_target.clone()); + } + let output_build_targets_2 = vec![build_target.clone(), build_target.clone()]; + + let event_expected_3000 = buck2_data::BuckEvent { + data: Some(buck2_data::buck_event::Data::Record( + buck2_data::RecordEvent { + data: Some(buck2_data::record_event::Data::BuildGraphStats( + buck2_data::BuildGraphStats { + build_targets: output_build_targets_3000, + }, + )), + }, + )), + ..Default::default() + }; + let event_expected_2 = buck2_data::BuckEvent { data: Some(buck2_data::buck_event::Data::Record( buck2_data::RecordEvent { data: Some(buck2_data::record_event::Data::BuildGraphStats( buck2_data::BuildGraphStats { - build_targets: vec![], + build_targets: output_build_targets_2, }, )), }, @@ -172,7 +233,9 @@ mod tests { ..Default::default() }; - assert_eq!(event.trace_id().unwrap(), uuid); - assert_eq!(event.data(), &event_expected.data.unwrap()); + assert_eq!(events.len(), 3); + assert_eq!(events[0].data(), event_expected_3000.data.as_ref().unwrap()); + assert_eq!(events[1].data(), event_expected_3000.data.as_ref().unwrap()); + assert_eq!(events[2].data(), event_expected_2.data.as_ref().unwrap()); } } diff --git a/app/buck2_client_ctx/src/subscribers/classify_server_stderr.rs b/app/buck2_client_ctx/src/subscribers/classify_server_stderr.rs new file mode 100644 index 0000000000000..8f14a20b3b36c --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/classify_server_stderr.rs @@ -0,0 +1,329 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::ops::ControlFlow; + +use buck2_data::error::ErrorTag; +use once_cell::sync::Lazy; + +pub(crate) fn classify_server_stderr( + error: buck2_error::Error, + stderr: &str, +) -> buck2_error::Error { + let tag = if stderr.is_empty() { + ErrorTag::ServerStderrEmpty + } else if stderr.contains(": size mismatch detected") { + // P1181704561 + ErrorTag::ServerJemallocAssert + } else if stderr.contains("panicked at") { + // Sample output of `buck2 debug crash`: P1159041719 + ErrorTag::ServerPanicked + } else if stderr.contains("has overflowed its stack") { + // Stderr looks like this: + // ``` + // thread 'buck2-dm' has overflowed its stack + // ``` + ErrorTag::ServerStackOverflow + } else if stderr.contains("Signal 11 (SIGSEGV)") { + // P1180289404 + ErrorTag::ServerSegv + } else if stderr.contains("Signal 15 (SIGTERM)") { + ErrorTag::ServerSigterm + } else { + ErrorTag::ServerStderrUnknown + }; + + let error = if let Some(trace) = extract_trace(stderr) { + if tag != ErrorTag::ServerSigterm { + error.context_for_key(&format!("crash({})", trace.trace_key())) + } else if let Some(signal_line) = trace.signal_line { + // Keep this because the PID that (might have) sent it could be useful. + // *** Signal 15 (SIGTERM) (0x2b08100000ab5) received by PID 1762297 (pthread TID 0x7f6650339640) (linux TID 1762297) (maybe from PID 2741, UID 176257) (code: 0), stack trace: *** + error.context(signal_line) + } else { + error + } + } else { + error + }; + + error.tag([tag]) +} + +// 0: rust_begin_unwind +// at ./xplat/rust/toolchain/sysroot/1.80.1/library/std/src/panicking.rs:652:5 +// 1: ::unstable_crash::{closure#0} +// at ./fbcode/buck2/app/buck2_server/src/daemon/crash.rs:18:13 +static RUST_STACK_FRAME: Lazy = + Lazy::new(|| regex::Regex::new(r"^\s*\d*:\s*(.*)$").unwrap()); +static RUST_CONTEXT: Lazy = Lazy::new(|| regex::Regex::new(r"^\s*at \S*$").unwrap()); + +fn extract_rust_frame(line: &str) -> ControlFlow<(), Option> { + if let Some(capture) = RUST_STACK_FRAME + .captures(line) + .map(|captures| captures.get(1)) + .flatten() + { + ControlFlow::Continue(Some(capture.as_str().to_owned())) + } else if RUST_CONTEXT.is_match(line) { + ControlFlow::Continue(None) + } else { + ControlFlow::Break(()) + } +} + +// 0 buck2 0x0000000107f548c0 _ZN5folly10symbolizer17getStackTraceSafeEPmm + 12 +static FOLLY_MAC_STACK_FRAME: Lazy = + Lazy::new(|| regex::Regex::new(r"^\d*\s*\S*\s*0x\w*\s*(\S*) \+ \d*$").unwrap()); + +// @ 000000000004455f (unknown) +// /home/engshare/third-party2/glibc/2.34/src/glibc-2.34/signal/../sysdeps/unix/sysv/linux/libc_sigaction.c:8 +// -> /home/engshare/third-party2/glibc/2.34/src/glibc-2.34/signal/../sysdeps/unix/sysv/linux/x86_64/libc_sigaction.c +static FOLLY_LINUX_STACK_FRAME: Lazy = + Lazy::new(|| regex::Regex::new(r"^\s*@\s*\w*\s*(.*)$").unwrap()); +static FOLLY_LINUX_CONTEXT: Lazy = + Lazy::new(|| regex::Regex::new(r"^\s*(-> )?.*$").unwrap()); + +fn extract_folly_frame(line: &str) -> ControlFlow<(), Option> { + if let Some(capture) = FOLLY_MAC_STACK_FRAME + .captures(line) + .map(|captures| captures.get(1)) + .flatten() + { + ControlFlow::Continue(Some(capture.as_str().to_owned())) + } else if let Some(capture) = FOLLY_LINUX_STACK_FRAME + .captures(line) + .map(|captures| captures.get(1)) + .flatten() + { + ControlFlow::Continue(Some(capture.as_str().to_owned())) + } else if FOLLY_LINUX_CONTEXT.is_match(line) { + ControlFlow::Continue(None) + } else { + ControlFlow::Break(()) + } +} + +enum TraceType { + Rust, + Folly, +} + +const UNINTERESTING_SEGMENTS: [&str; 7] = [ + "rust_begin_unwind", + "core::panicking::panic_fmt", + "(unknown)", + "folly::symbolizer", + "folly10symbolizer", + "__GI_", + "_sigtramp", +]; + +struct StackTraceInfo { + signal_line: Option, + stack_trace_lines: Vec, +} + +impl StackTraceInfo { + fn sanitized_trace(self) -> Vec { + // Exclude some lines to reduce churn in trace keys. + // Changes higher up the stack are less likely to be related to the crash. + if self.stack_trace_lines.len() > 5 { + // 'uninteresting' lines shouldn't cause churn but exclude them if we are shortening the stack + // to get enough unique data. + + self.stack_trace_lines + .into_iter() + .filter(|line| !UNINTERESTING_SEGMENTS.iter().any(|s| line.contains(s))) + .take(5) + .collect() + } else { + self.stack_trace_lines.clone() + } + } + + fn trace_key(self) -> String { + // blake3 just because the default rust hasher isn't intended to be stable. + let mut hasher = blake3::Hasher::new(); + for s in self.sanitized_trace() { + hasher.update(s.as_bytes()); + } + let mut digest = hasher.finalize().to_string(); + // Truncate to keep category_key relatively short and readable. + // This should be enough to avoid collisions since there should be very few unique crashes. + digest.truncate(6); + digest + } +} + +fn extract_trace(stderr: &str) -> Option { + let mut stack_trace_lines: Vec = vec![]; + let mut signal_line = None; + let mut stack_trace_type = None; + for line in stderr.split('\n') { + if line.starts_with("*** Signal") { + signal_line = Some(line.to_owned()); + stack_trace_type = Some(TraceType::Folly); + } else if line.starts_with("stack backtrace:") { + stack_trace_type = Some(TraceType::Rust); + } else if let Some(ref trace_type) = stack_trace_type { + let res = match trace_type { + TraceType::Rust => extract_rust_frame(line), + TraceType::Folly => extract_folly_frame(line), + }; + match res { + ControlFlow::Continue(Some(line)) => stack_trace_lines.push(line), + ControlFlow::Continue(None) => (), + ControlFlow::Break(_) => break, + } + } + } + + stack_trace_type.map(|_| StackTraceInfo { + signal_line, + stack_trace_lines, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn test_trace_lines(trace: &str) -> Vec { + trace + .split('\n') + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) + .collect() + } + + #[test] + fn test_generated_stack_trace() { + let backtrace = std::backtrace::Backtrace::force_capture(); + let stderr = format!("stack backtrace:\n{}", backtrace); + assert!(extract_trace(&stderr).is_some()); + } + + #[test] + fn test_rust_stack_trace_hash() { + // from `buck2 debug crash panic` + let panic_trace = " +stack backtrace: + 0: rust_begin_unwind + at ./xplat/rust/toolchain/sysroot/1.80.1/library/std/src/panicking.rs:652:5 + 1: core::panicking::panic_fmt + at ./xplat/rust/toolchain/sysroot/1.80.1/library/core/src/panicking.rs:72:14 + 2: ::unstable_crash::{closure#0} + at ./fbcode/buck2/app/buck2_server/src/daemon/crash.rs:18:13 + 3: < as tower_service::Service>>::call::Unstable_CrashSvc as tonic::server::service::UnaryService>::call::{closure#0} + at ./xplat/rust/toolchain/sysroot/1.80.1/library/core/src/future/future.rs:123:9 + 4: as tower_service::Service>>::call::{closure#20} + at ./xplat/rust/toolchain/sysroot/1.80.1/library/core/src/future/future.rs:123:9 + 5: >, core::convert::Infallible>> + core::marker::Send>>>, futures_util::fns::MapOkFn<::add_service>>::{closure#0}>> as core::future::future::Future>::poll + at ./xplat/rust/toolchain/sysroot/1.80.1/library/core/src/future/future.rs:123:9 + 6: >, core::convert::Infallible>> + core::marker::Send>>, ::add_service>::{closure#0}>>, futures_util::fns::MapOkFn<> as axum_core::response::into_response::IntoResponse>::into_response>> as core::future::future::Future>::poll + at ./third-party/rust/vendor/futures-util-0.3.30/src/lib.rs:91:13 + 7: >, core::convert::Infallible>> + core::marker::Send>>, ::add_service>::{closure#0}>, > as axum_core::response::into_response::IntoResponse>::into_response> as core::future::future::Future>::poll + at ./third-party/rust/vendor/futures-util-0.3.30/src/lib.rs:91:13 + 8: , http::response::Response>, core::convert::Infallible>, http::request::Request> as core::future::future::Future>::poll + at ./xplat/rust/toolchain/sysroot/1.80.1/library/core/src/future/future.rs:123:9 + "; + + let panic_sanitized_trace = test_trace_lines(" + ::unstable_crash::{closure#0} + < as tower_service::Service>>::call::Unstable_CrashSvc as tonic::server::service::UnaryService>::call::{closure#0} + as tower_service::Service>>::call::{closure#20} + >, core::convert::Infallible>> + core::marker::Send>>>, futures_util::fns::MapOkFn<::add_service>>::{closure#0}>> as core::future::future::Future>::poll + >, core::convert::Infallible>> + core::marker::Send>>, ::add_service>::{closure#0}>>, futures_util::fns::MapOkFn<> as axum_core::response::into_response::IntoResponse>::into_response>> as core::future::future::Future>::poll + "); + assert_eq!( + extract_trace(panic_trace) + .expect("no trace found") + .sanitized_trace(), + panic_sanitized_trace + ); + } + + #[test] + fn test_folly_stack_trace() { + // from `buck2 debug crash abort` + let linux_folly_trace = " +*** Aborted at 1724968759 (Unix time, try 'date -d @1724968759') *** +*** Signal 6 (SIGABRT) (0x261c500150b2c) received by PID 1379116 (pthread TID 0x7f211b600640) (linux TID 1379195) (maybe from PID 1379116, UID 156101) (code: -6), stack trace: *** + @ 00000000081eca3f folly::symbolizer::(anonymous namespace)::signalHandler(int, siginfo_t*, void*) + ./fbcode/folly/debugging/symbolizer/SignalHandler.cpp:453 + @ 000000000004455f (unknown) + /home/engshare/third-party2/glibc/2.34/src/glibc-2.34/signal/../sysdeps/unix/sysv/linux/libc_sigaction.c:8 + -> /home/engshare/third-party2/glibc/2.34/src/glibc-2.34/signal/../sysdeps/unix/sysv/linux/x86_64/libc_sigaction.c + @ 000000000009c993 __GI___pthread_kill + /home/engshare/third-party2/glibc/2.34/src/glibc-2.34/nptl/pthread_kill.c:46 + @ 00000000000444ac __GI_raise + /home/engshare/third-party2/glibc/2.34/src/glibc-2.34/signal/../sysdeps/posix/raise.c:26 + @ 000000000002c432 __GI_abort + /home/engshare/third-party2/glibc/2.34/src/glibc-2.34/stdlib/abort.c:79 + @ 0000000007f217e8 std::sys::pal::unix::abort_internal + xplat/rust/toolchain/sysroot/1.80.1/library/std/src/sys/pal/unix/mod.rs:366 + @ 0000000007edad48 std::process::abort + xplat/rust/toolchain/sysroot/1.80.1/library/std/src/process.rs:2369 + @ 000000001875604b ::unstable_crash::{closure#0} + fbcode/buck2/app/buck2_server/src/daemon/crash.rs:27 + @ 0000000018e12834 < as tower_service::Service>>::call::Unstable_CrashSvc as tonic::server::service::UnaryService>::call::{closure#0} + xplat/rust/toolchain/sysroot/1.80.1/library/core/src/future/future.rs:123 + @ 0000000018dff24b as tower_service::Service>>::call::{closure#20} + xplat/rust/toolchain/sysroot/1.80.1/library/core/src/future/future.rs:123 + @ 0000000019a82766 >, core::convert::Infallible>> + core::marker::Send>>>, futures_util::fns::MapOkFn<::add_service>::{closure#0}>> as core::future::future::Future>::poll + xplat/rust/toolchain/sysroot/1.80.1/library/core/src/future/future.rs:123 + "; + + let linux_sanitized_trace = test_trace_lines(" + std::sys::pal::unix::abort_internal + std::process::abort + ::unstable_crash::{closure#0} + < as tower_service::Service>>::call::Unstable_CrashSvc as tonic::server::service::UnaryService>::call::{closure#0} + as tower_service::Service>>::call::{closure#20} + "); + assert_eq!( + extract_trace(linux_folly_trace) + .expect("no trace found") + .sanitized_trace(), + linux_sanitized_trace + ); + + let mac_folly_trace = " +*** Aborted at 1709157873 (Unix time, try 'date -d @1709157873') *** +*** Signal 11 (SIGSEGV) (0x0) received by PID 43312 (pthread TID 0x9026bf000) (code: invalid permissions for mapped object), stack trace: *** +0 buck2 0x0000000108c7b940 _ZN5folly10symbolizer17getStackTraceSafeEPmm + 12 +1 buck2 0x0000000108c6f8b8 _ZN5folly10symbolizer21SafeStackTracePrinter15printStackTraceEb + 72 +2 buck2 0x000000010a55f0f8 _ZN5folly10symbolizer12_GLOBAL__N_113signalHandlerEiP9__siginfoPv + 1536 +3 libsystem_platform.dylib 0x00000001810c5a24 _sigtramp + 56 +4 buck2 0x0000000109ff94c4 _ZN8facebook16remote_execution3cas18ManifoldHttpClient14co_materializeENSt3__110shared_ptrINS1_14CASCallContextEEENS4_INS1_20IDigestStatusTrackerEEENS3_6vectorINS3_4pairIN5build5bazel6remote9execution2v26DigestENS4_INS1_11FileWrapperEEEEENS3_9allocatorISJ_EEEENS0_6crypto8HashAlgoE.resume + 648 +5 buck2 0x0000000109ff54e4 _ZZN5folly4coro18collectAllTryRangeINS0_6detail9MoveRangeINSt3__16vectorINS0_16TaskWithExecutorIvEENS4_9allocatorIS7_EEEEEEEENS0_4TaskINS5_INS_3TryINS2_22decay_rvalue_referenceINS2_21lift_lvalue_referenceINS0_12await_resultIDTclL_ZNS0_11folly_cpo__13co_viaIfAsyncEEclL_ZNS4_7declvalINS_8Executor9KeepAliveISJ_EEEEDTcl9__declvalIT_ELi0EEEvEEclsr3stdE7declvalINS4_15iterator_traitsIDTclL_ZNS_6access5beginEEclsr3stdE7declvalIRSM_EEEEE9referenceEEEEEvE4typeEE4typeEE4typeEEENS8_IS11_EEEEEESM_ENKUlS7_mE_clES7_m.resume + 560 +6 buck2 0x0000000108c7b588 _ZN5folly36resumeCoroutineWithNewAsyncStackRootENSt12experimental13coroutines_v116coroutine_handleIvEERNS_15AsyncStackFrameE + 84 +7 buck2 0x0000000108c4edd4 _ZN5folly6detail8function14FunctionTraitsIFvvEE9callSmallIZNS_4coro6detail23ViaCoroutinePromiseBase20scheduleContinuationEvEUlvE_EEvRNS1_4DataE + 48 +8 buck2 0x0000000108caabdc _ZN5folly18ThreadPoolExecutor7runTaskERKNSt3__110shared_ptrINS0_6ThreadEEEONS0_4TaskE + 304 +9 buck2 0x0000000108e129f4 _ZN5folly21CPUThreadPoolExecutor9threadRunENSt3__110shared_ptrINS_18ThreadPoolExecutor6ThreadEEE + 484 +(safe mode, symbolizer not available) + "; + + let mac_sanitized_trace = test_trace_lines(" + _ZN8facebook16remote_execution3cas18ManifoldHttpClient14co_materializeENSt3__110shared_ptrINS1_14CASCallContextEEENS4_INS1_20IDigestStatusTrackerEEENS3_6vectorINS3_4pairIN5build5bazel6remote9execution2v26DigestENS4_INS1_11FileWrapperEEEEENS3_9allocatorISJ_EEEENS0_6crypto8HashAlgoE.resume + _ZZN5folly4coro18collectAllTryRangeINS0_6detail9MoveRangeINSt3__16vectorINS0_16TaskWithExecutorIvEENS4_9allocatorIS7_EEEEEEEENS0_4TaskINS5_INS_3TryINS2_22decay_rvalue_referenceINS2_21lift_lvalue_referenceINS0_12await_resultIDTclL_ZNS0_11folly_cpo__13co_viaIfAsyncEEclL_ZNS4_7declvalINS_8Executor9KeepAliveISJ_EEEEDTcl9__declvalIT_ELi0EEEvEEclsr3stdE7declvalINS4_15iterator_traitsIDTclL_ZNS_6access5beginEEclsr3stdE7declvalIRSM_EEEEE9referenceEEEEEvE4typeEE4typeEE4typeEEENS8_IS11_EEEEEESM_ENKUlS7_mE_clES7_m.resume + _ZN5folly36resumeCoroutineWithNewAsyncStackRootENSt12experimental13coroutines_v116coroutine_handleIvEERNS_15AsyncStackFrameE + _ZN5folly6detail8function14FunctionTraitsIFvvEE9callSmallIZNS_4coro6detail23ViaCoroutinePromiseBase20scheduleContinuationEvEUlvE_EEvRNS1_4DataE + _ZN5folly18ThreadPoolExecutor7runTaskERKNSt3__110shared_ptrINS0_6ThreadEEEONS0_4TaskE + "); + + assert_eq!( + extract_trace(mac_folly_trace) + .expect("no trace found") + .sanitized_trace(), + mac_sanitized_trace + ); + } +} diff --git a/app/buck2_client_ctx/src/subscribers/errorconsole.rs b/app/buck2_client_ctx/src/subscribers/errorconsole.rs new file mode 100644 index 0000000000000..53252a26b8238 --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/errorconsole.rs @@ -0,0 +1,37 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use async_trait::async_trait; + +use crate::subscribers::subscriber::EventSubscriber; + +/// This console is what is used for `--console none` and only prints errors. +/// +/// It is also used as a part of simpleconsole's implementation. +pub struct ErrorConsole; + +#[async_trait] +impl EventSubscriber for ErrorConsole { + async fn handle_command_result( + &mut self, + result: &buck2_cli_proto::CommandResult, + ) -> anyhow::Result<()> { + if let buck2_cli_proto::CommandResult { + result: Some(buck2_cli_proto::command_result::Result::Error(e)), + } = result + { + crate::eprintln!("Command failed: ")?; + for e in &e.errors { + crate::eprintln!("{}", e.message)?; + } + } + + Ok(()) + } +} diff --git a/app/buck2_client_ctx/src/subscribers/event_log.rs b/app/buck2_client_ctx/src/subscribers/event_log.rs new file mode 100644 index 0000000000000..caa0e9d528d87 --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/event_log.rs @@ -0,0 +1,108 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::atomic::AtomicU64; +use std::sync::Arc; + +use async_trait::async_trait; +use buck2_common::argv::SanitizedArgv; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::working_dir::WorkingDir; +use buck2_event_log::write::WriteEventLog; +use buck2_events::BuckEvent; +use buck2_util::cleanup_ctx::AsyncCleanupContext; +use futures::FutureExt; + +use crate::subscribers::subscriber::EventSubscriber; +use crate::subscribers::subscriber::Tick; + +/// This EventLog lets us to events emitted by Buck and log them to a file. The events are +/// serialized as JSON and logged one per line. +pub(crate) struct EventLog<'a> { + async_cleanup_context: Option>, + writer: WriteEventLog, +} + +impl<'a> EventLog<'a> { + pub(crate) fn new( + logdir: AbsNormPathBuf, + working_dir: WorkingDir, + extra_path: Option, + extra_user_event_log_path: Option, + sanitized_argv: SanitizedArgv, + async_cleanup_context: AsyncCleanupContext<'a>, + command_name: String, + log_size_counter_bytes: Option>, + ) -> anyhow::Result { + Ok(Self { + async_cleanup_context: Some(async_cleanup_context), + writer: WriteEventLog::new( + logdir, + working_dir, + extra_path, + extra_user_event_log_path, + sanitized_argv, + command_name, + log_size_counter_bytes, + )?, + }) + } +} + +#[async_trait] +impl<'a> EventSubscriber for EventLog<'a> { + async fn handle_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { + self.writer.write_events(events).await + } + + async fn handle_tailer_stderr(&mut self, _stderr: &str) -> anyhow::Result<()> { + // TODO(nga): currently we mostly ignore buckd stderr. + // It is very important to investigate crashes of buckd. + // + // We attach truncated log to Scuba since D53337966 + // (although we probably shouldn't do that). + // + // Regardless of that we should do either or both of the following: + // - write it to event log if it is interesting (e.g. crash) + // - upload it to manifold unconditionally as a separate file + // (but only relevant part, since command start) + Ok(()) + } + + async fn handle_command_result( + &mut self, + result: &buck2_cli_proto::CommandResult, + ) -> anyhow::Result<()> { + self.writer.write_result(result).await + } + + /// Flush all log files during on tick to avoid buffering data in memory which we might lose if + /// we hit an error. + async fn tick(&mut self, _tick: &Tick) -> anyhow::Result<()> { + self.writer.flush_files().await + } + + async fn exit(&mut self) -> anyhow::Result<()> { + self.writer.exit().await; + Ok(()) + } +} + +impl<'a> Drop for EventLog<'a> { + fn drop(&mut self) { + let exit = self.writer.exit(); + match self.async_cleanup_context.as_ref() { + Some(async_cleanup_context) => { + async_cleanup_context.register("event log upload", exit.boxed()); + } + None => (), + } + } +} diff --git a/app/buck2_client_ctx/src/subscribers/event_log/mod.rs b/app/buck2_client_ctx/src/subscribers/event_log/mod.rs deleted file mode 100644 index e41879c0b3a53..0000000000000 --- a/app/buck2_client_ctx/src/subscribers/event_log/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod file_names; -pub mod read; -pub mod subscriber; -pub mod user_event_types; -pub mod utils; -pub mod write; diff --git a/app/buck2_client_ctx/src/subscribers/event_log/subscriber.rs b/app/buck2_client_ctx/src/subscribers/event_log/subscriber.rs deleted file mode 100644 index 30c05b7fbb3a9..0000000000000 --- a/app/buck2_client_ctx/src/subscribers/event_log/subscriber.rs +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::atomic::AtomicU64; -use std::sync::Arc; - -use async_trait::async_trait; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::abs_path::AbsPathBuf; -use buck2_core::fs::working_dir::WorkingDir; -use buck2_events::BuckEvent; - -use crate::argv::SanitizedArgv; -use crate::cleanup_ctx::AsyncCleanupContext; -use crate::subscribers::event_log::write::WriteEventLog; -use crate::subscribers::subscriber::EventSubscriber; -use crate::subscribers::subscriber::Tick; - -/// This EventLog lets us to events emitted by Buck and log them to a file. The events are -/// serialized as JSON and logged one per line. -pub(crate) struct EventLog<'a> { - writer: WriteEventLog<'a>, -} - -impl<'a> EventLog<'a> { - pub(crate) fn new( - logdir: AbsNormPathBuf, - working_dir: WorkingDir, - extra_path: Option, - extra_user_event_log_path: Option, - sanitized_argv: SanitizedArgv, - async_cleanup_context: AsyncCleanupContext<'a>, - command_name: String, - log_size_counter_bytes: Option>, - allow_vpnless: bool, - ) -> anyhow::Result { - Ok(Self { - writer: WriteEventLog::new( - logdir, - working_dir, - extra_path, - extra_user_event_log_path, - sanitized_argv, - async_cleanup_context, - command_name, - log_size_counter_bytes, - allow_vpnless, - )?, - }) - } -} - -#[async_trait] -impl<'a> EventSubscriber for EventLog<'a> { - async fn handle_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { - self.writer.write_events(events).await - } - - async fn handle_command_result( - &mut self, - result: &buck2_cli_proto::CommandResult, - ) -> anyhow::Result<()> { - self.writer.write_result(result).await - } - - /// Flush all log files during on tick to avoid buffering data in memory which we might lose if - /// we hit an error. - async fn tick(&mut self, _tick: &Tick) -> anyhow::Result<()> { - self.writer.flush_files().await - } - - async fn exit(&mut self) -> anyhow::Result<()> { - self.writer.exit().await; - Ok(()) - } -} diff --git a/app/buck2_client_ctx/src/subscribers/event_log/utils.rs b/app/buck2_client_ctx/src/subscribers/event_log/utils.rs deleted file mode 100644 index c096580f6d4df..0000000000000 --- a/app/buck2_client_ctx/src/subscribers/event_log/utils.rs +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use anyhow::Context; -use buck2_core::fs::paths::abs_path::AbsPathBuf; -use buck2_wrapper_common::invocation_id::TraceId; -use dupe::Dupe; -use itertools::Itertools; -use serde::Deserialize; -use serde::Serialize; -use thiserror::Error; - -#[derive(Error, Debug)] -pub(crate) enum EventLogErrors { - #[error( - "Trying to write to logfile that hasn't been opened yet - this is an internal error, please report. Unwritten event: {serialized_event}" - )] - LogNotOpen { serialized_event: String }, - - #[error("Reached End of File before reading BuckEvent in log `{0}`")] - EndOfFile(String), - #[error("No event log available for {idx}th last command (have latest {num_logfiles})")] - RecentIndexOutOfBounds { idx: usize, num_logfiles: usize }, -} - -#[derive(Copy, Clone, Dupe, Debug)] -pub struct Encoding { - pub(crate) mode: LogMode, - pub(crate) compression: Compression, - /// List of extensions used to detect file type. - /// - /// The first extension is the default one, used when writing a file. - pub extensions: &'static [&'static str], -} - -impl Encoding { - pub(crate) const JSON: Encoding = Encoding { - mode: LogMode::Json, - compression: Compression::None, - extensions: &[".json-lines"], - }; - - pub(crate) const JSON_GZIP: Encoding = Encoding { - mode: LogMode::Json, - compression: Compression::Gzip, - extensions: &[".json-lines.gz"], - }; - - pub(crate) const JSON_ZSTD: Encoding = Encoding { - mode: LogMode::Json, - compression: Compression::Zstd, - extensions: &[".json-lines.zst"], - }; - - pub(crate) const PROTO: Encoding = Encoding { - mode: LogMode::Protobuf, - compression: Compression::None, - extensions: &[".pb", ".proto"], - }; - - pub(crate) const PROTO_GZIP: Encoding = Encoding { - mode: LogMode::Protobuf, - compression: Compression::Gzip, - extensions: &[".pb.gz", ".proto.gz"], - }; - - pub const PROTO_ZSTD: Encoding = Encoding { - mode: LogMode::Protobuf, - compression: Compression::Zstd, - extensions: &[".pb.zst"], - }; -} - -pub(crate) const KNOWN_ENCODINGS: &[Encoding] = &[ - // Don't forget to update these lists when this is updated: - // * https://fburl.com/code/zgdxtryb - // * https://fburl.com/code/antguytj - Encoding::JSON_GZIP, - Encoding::JSON, - Encoding::JSON_ZSTD, - Encoding::PROTO, - Encoding::PROTO_GZIP, - Encoding::PROTO_ZSTD, -]; - -#[derive(Error, Debug)] -pub(crate) enum EventLogInferenceError { - #[error("Event log at path {} has no filename", .0.display())] - NoFilename(AbsPathBuf), - - #[error("Event log at path {} has a non-utf-8 filename", .0.display())] - InvalidFilename(AbsPathBuf), - - #[error( - "Event log at path {} has an extension that was not recognized. Valid extensions are: {}.", - .0.display(), display_valid_extensions() - )] - InvalidExtension(AbsPathBuf), - - #[error("Event log at path {} has no uuid in its filename", .0.display())] - NoUuidInFilename(AbsPathBuf), -} - -fn display_valid_extensions() -> String { - let mut exts = KNOWN_ENCODINGS - .iter() - .flat_map(|encoding| encoding.extensions); - exts.join(", ") -} - -pub(crate) struct NoInference(pub(crate) AbsPathBuf); - -#[derive(Copy, Clone, Dupe, Debug, PartialEq, Eq)] -pub(crate) enum LogMode { - Json, - Protobuf, -} - -#[derive(Copy, Clone, Dupe, Debug)] -pub(crate) enum Compression { - None, - Gzip, - Zstd, -} - -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] -pub struct Invocation { - pub command_line_args: Vec, - /// Command line args with expanded `@` args. - #[serde(default)] // For backwards compatibility. Delete after 2023-08-01. - pub expanded_command_line_args: Vec, - /// This is `String` not `AbsPathBuf` because event log is cross-platform - /// and `AbsPathBuf` is not. - pub working_dir: String, - #[serde(default = "TraceId::null")] - pub trace_id: TraceId, -} - -impl Invocation { - pub fn display_command_line(&self) -> String { - shlex::join(self.command_line_args.iter().map(|e| e.as_str())) - } - - pub fn display_expanded_command_line(&self) -> String { - shlex::join(self.expanded_command_line_args.iter().map(|e| e.as_str())) - } - - pub(crate) fn parse_json_line(json: &str) -> anyhow::Result { - serde_json::from_str::(json) - .with_context(|| format!("Invalid header: {}", json.trim_end())) - } -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use buck2_wrapper_common::invocation_id::TraceId; - - use crate::subscribers::event_log::utils::Invocation; - - #[test] - fn test_parse_json_line() { - // Make sure serialization format is backwards compatible. - let line = r#"{"command_line_args":["/some/path/buck2","test","@//mode/mac","app/..."],"working_dir":"/Users/nga/dir45","trace_id":"281d1c16-8930-40cd-8fc1-7d71355c20f5"}"#; - let line = Invocation::parse_json_line(line).unwrap(); - let expected = Invocation { - command_line_args: vec![ - "/some/path/buck2".to_owned(), - "test".to_owned(), - "@//mode/mac".to_owned(), - "app/...".to_owned(), - ], - working_dir: "/Users/nga/dir45".to_owned(), - expanded_command_line_args: Vec::new(), - trace_id: TraceId::from_str("281d1c16-8930-40cd-8fc1-7d71355c20f5").unwrap(), - }; - assert_eq!(expected, line); - } -} diff --git a/app/buck2_client_ctx/src/subscribers/event_log/write.rs b/app/buck2_client_ctx/src/subscribers/event_log/write.rs deleted file mode 100644 index 7ab951f91ff05..0000000000000 --- a/app/buck2_client_ctx/src/subscribers/event_log/write.rs +++ /dev/null @@ -1,799 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::io; -use std::mem; -use std::pin::Pin; -use std::process::Stdio; -use std::sync::atomic::AtomicU64; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; - -use anyhow::Context as _; -use async_compression::tokio::write::GzipEncoder; -use async_compression::tokio::write::ZstdEncoder; -use buck2_cli_proto::*; -use buck2_core::env_helper::EnvHelper; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::abs_path::AbsPathBuf; -use buck2_core::fs::working_dir::WorkingDir; -use buck2_events::BuckEvent; -use buck2_wrapper_common::invocation_id::TraceId; -use futures::future::Future; -use futures::FutureExt; -use pin_project::pin_project; -use prost::Message; -use serde::Serialize; -use tokio::fs::OpenOptions; -use tokio::io::AsyncWrite; -use tokio::io::AsyncWriteExt; - -use crate::cleanup_ctx::AsyncCleanupContext; -use crate::subscribers::event_log::file_names::get_logfile_name; -use crate::subscribers::event_log::file_names::remove_old_logs; -use crate::subscribers::event_log::read::EventLogPathBuf; -use crate::subscribers::event_log::utils::Compression; -use crate::subscribers::event_log::utils::Encoding; -use crate::subscribers::event_log::utils::EventLogErrors; -use crate::subscribers::event_log::utils::Invocation; -use crate::subscribers::event_log::utils::LogMode; -use crate::subscribers::event_log::utils::NoInference; -use crate::subscribers::should_block_on_log_upload; -use crate::subscribers::should_upload_log; -use crate::subscribers::wait_for_child_and_log; -use crate::subscribers::FutureChildOutput; - -type EventLogWriter = Box; - -mod counting_reader { - use super::*; - - #[pin_project] - pub struct CountingReader { - #[pin] - pub(super) inner: T, - pub(super) stats: Option>, - } -} - -use counting_reader::CountingReader; - -use super::user_event_types::try_get_user_event; -use crate::argv::SanitizedArgv; - -impl CountingReader { - fn new(inner: T, stats: Option>) -> Self { - Self { inner, stats } - } -} - -impl AsyncWrite for CountingReader -where - T: AsyncWrite, -{ - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let this = self.project(); - let bytes = futures::ready!(this.inner.poll_write(cx, buf))?; - if let Some(stats) = this.stats { - stats.fetch_add(bytes as u64, Ordering::Relaxed); - } - - Poll::Ready(Ok(bytes)) - } - - fn poll_flush( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_shutdown( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.project().inner.poll_shutdown(cx) - } -} - -#[derive(Eq, PartialEq, Copy, Clone)] -pub(crate) enum EventLogType { - System, - User, -} - -pub(crate) struct NamedEventLogWriter { - path: EventLogPathBuf, - file: EventLogWriter, - event_log_type: EventLogType, - /// If this writing is done by a subprocess, that process's output, assuming we intend to wait - /// for it to exit. - process_to_wait_for: Option, -} - -pub(crate) enum LogWriterState { - Unopened { - logdir: AbsNormPathBuf, - extra_path: Option, - extra_user_event_log_path: Option, - }, - Opened { - writers: Vec, - }, - Closed, -} - -pub(crate) struct WriteEventLog<'a> { - state: LogWriterState, - async_cleanup_context: Option>, - sanitized_argv: SanitizedArgv, - command_name: String, - working_dir: WorkingDir, - /// Allocation cache. Must be cleaned before use. - buf: Vec, - log_size_counter_bytes: Option>, - allow_vpnless: bool, -} - -impl<'a> WriteEventLog<'a> { - pub(crate) fn new( - logdir: AbsNormPathBuf, - working_dir: WorkingDir, - extra_path: Option, - extra_user_event_log_path: Option, - sanitized_argv: SanitizedArgv, - async_cleanup_context: AsyncCleanupContext<'a>, - command_name: String, - log_size_counter_bytes: Option>, - allow_vpnless: bool, - ) -> anyhow::Result { - Ok(Self { - state: LogWriterState::Unopened { - logdir, - extra_path, - extra_user_event_log_path, - }, - async_cleanup_context: Some(async_cleanup_context), - sanitized_argv, - command_name, - working_dir, - buf: Vec::new(), - log_size_counter_bytes, - allow_vpnless, - }) - } - - /// Get the command line arguments and cwd and serialize them for replaying later. - async fn log_invocation(&mut self, trace_id: TraceId) -> anyhow::Result<()> { - let command_line_args = self.sanitized_argv.argv.clone(); - let expanded_command_line_args = self.sanitized_argv.expanded_argv.clone(); - let invocation = Invocation { - command_line_args, - expanded_command_line_args, - working_dir: self.working_dir.to_string(), - trace_id, - }; - self.write_ln(&[invocation]).await - } - - async fn write_ln<'b, T, I>(&'b mut self, events: I) -> anyhow::Result<()> - where - T: SerializeForLog + 'b, - I: IntoIterator + Clone + 'b, - { - match &mut self.state { - LogWriterState::Opened { writers, .. } => { - for writer in writers { - self.buf.clear(); - - for event in events.clone() { - match writer.event_log_type { - EventLogType::System => { - match writer.path.encoding.mode { - LogMode::Json => { - event.serialize_to_json(&mut self.buf)?; - self.buf.push(b'\n'); - } - LogMode::Protobuf => event - .serialize_to_protobuf_length_delimited(&mut self.buf)?, - }; - } - EventLogType::User => { - if event.maybe_serialize_user_event(&mut self.buf)? { - self.buf.push(b'\n'); - } - } - } - } - - writer - .file - .write_all(&self.buf) - .await - .context("Failed to write event")?; - - if self.buf.len() > 1_000_000 { - // Make sure we don't keep too much memory if encountered one large event. - self.buf = Vec::new(); - } - } - Ok(()) - } - LogWriterState::Unopened { .. } | LogWriterState::Closed => { - self.buf.clear(); - if let Some(event) = events.into_iter().next() { - event.serialize_to_json(&mut self.buf)?; - } else { - // Unreachable. - } - Err(EventLogErrors::LogNotOpen { - serialized_event: String::from_utf8(mem::take(&mut self.buf)) - .context("Failed to serialize event for debug")?, - } - .into()) - } - } - } - - async fn ensure_log_writers_opened(&mut self, event: &BuckEvent) -> anyhow::Result<()> { - let (logdir, maybe_extra_path, maybe_extra_user_event_log_path) = match &self.state { - LogWriterState::Unopened { - logdir, - extra_path, - extra_user_event_log_path, - } => (logdir, extra_path, extra_user_event_log_path), - LogWriterState::Opened { .. } => return Ok(()), - LogWriterState::Closed => { - return Err(anyhow::anyhow!("Received events after logs were closed")); - } - }; - tokio::fs::create_dir_all(logdir) - .await - .with_context(|| format!("Error creating event log directory: `{}`", logdir))?; - remove_old_logs(logdir).await; - - // The event-log is going to be written to file containing the build uuid. - // But we don't know the build uuid until we've gotten the CommandStart event. - // So we'll just create it when we know where to put it. - let mut log_mode = LogMode::Protobuf; - static JSON_LOG: EnvHelper = EnvHelper::new("BUCK2_JSON_LOG"); - if JSON_LOG.get_copied()?.unwrap_or(false) { - log_mode = LogMode::Json; - } - - // Open our log fie, gzip encoded. - let encoding = match log_mode { - LogMode::Json => Encoding::JSON_GZIP, - LogMode::Protobuf => Encoding::PROTO_ZSTD, - }; - - let file_name = &get_logfile_name(event, encoding, &self.command_name)?; - let path = EventLogPathBuf { - path: logdir.as_abs_path().join(file_name), - encoding, - }; - let writer = start_persist_subprocess( - path, - event.trace_id()?.clone(), - self.log_size_counter_bytes.clone(), - self.allow_vpnless, - ) - .await?; - let mut writers = vec![writer]; - - // Also open the user's log file, if any as provided, with no encoding. - if let Some(extra_path) = maybe_extra_path { - writers.push( - open_event_log_for_writing( - EventLogPathBuf::infer_opt(extra_path.clone())?.unwrap_or_else( - |NoInference(path)| EventLogPathBuf { - path, - encoding: Encoding::JSON_GZIP, - }, - ), - self.log_size_counter_bytes.clone(), - EventLogType::System, - ) - .await?, - ); - } - - // Also open the user's simple log file, if any as provided, json-line formatted with no compression if no extensions are detected. - if let Some(extra_user_event_log_path) = maybe_extra_user_event_log_path { - writers.push( - open_event_log_for_writing( - EventLogPathBuf::infer_opt(extra_user_event_log_path.clone())?.unwrap_or_else( - |NoInference(path)| EventLogPathBuf { - path, - encoding: Encoding::JSON, - }, - ), - self.log_size_counter_bytes.clone(), - EventLogType::User, - ) - .await?, - ); - } - - self.state = LogWriterState::Opened { writers }; - self.log_invocation(event.trace_id()?).await - } - - pub(crate) fn exit(&mut self) -> impl Future + 'static + Send + Sync { - // Shut down writers, flush all our files before exiting. - let state = std::mem::replace(&mut self.state, LogWriterState::Closed); - - async move { - let mut writers = match state { - LogWriterState::Opened { writers } => writers, - LogWriterState::Unopened { .. } | LogWriterState::Closed => { - // Nothing to do in this case, though this should be unreachable - // since we just did a write_ln. - return; - } - }; - - for writer in writers.iter_mut() { - if let Err(e) = writer.file.shutdown().await { - tracing::warn!( - "Failed to flush log file at `{}`: {:#}", - writer.path.path, - e - ); - } - } - - // NOTE: We call `into_iter()` here and that implicitly drops the `writer.file`, which - // is necessary for an actual `close` call to be send to the child FD (it is a bit of - // an odd behavior in Tokio that `shutdown` doesn't do that). - let futs = writers - .into_iter() - .filter_map(|mut w| w.process_to_wait_for.take()) - .map(|proc| wait_for_child_and_log(proc, "Event Log")); - - futures::future::join_all(futs).await; - } - } -} - -impl<'a> Drop for WriteEventLog<'a> { - fn drop(&mut self) { - let exit = self.exit(); - match self.async_cleanup_context.as_ref() { - Some(async_cleanup_context) => { - async_cleanup_context.register("event log upload", exit.boxed()); - } - None => (), - } - } -} - -async fn start_persist_subprocess( - path: EventLogPathBuf, - trace_id: TraceId, - bytes_written: Option>, - allow_vpnless: bool, -) -> anyhow::Result { - let current_exe = std::env::current_exe().context("No current_exe")?; - let mut command = buck2_util::process::async_background_command(current_exe); - // @oss-disable: #[cfg(unix)] - #[cfg(all(tokio_unstable, unix))] // @oss-enable - { - // Ensure that if we get CTRL-C, the persist-event-logs process does not get it. - command.process_group(0); - } - let manifold_name = &format!("{}{}", trace_id, path.extension()); - command - .args(["debug", "persist-event-logs"]) - .args(["--manifold-name", manifold_name]) - .args(["--local-path".as_ref(), path.path.as_os_str()]); - if !should_upload_log()? { - command.arg("--no-upload"); - }; - if allow_vpnless { - command.arg("--allow-vpnless"); - } - command.stdout(Stdio::null()).stdin(Stdio::piped()); - - let block = should_block_on_log_upload()?; - if block { - command.stderr(Stdio::piped()); - } else { - command.stderr(Stdio::null()); - } - - let mut child = command.spawn().with_context(|| { - format!( - "Failed to open event log subprocess for writing at `{}`", - path.path.display() - ) - })?; - let pipe = child.stdin.take().expect("stdin was piped"); - let mut writer = get_writer(path, pipe, bytes_written, EventLogType::System)?; - - // Only spawn this if we are going to wait. - if block { - writer.process_to_wait_for = Some(FutureChildOutput::new(child)); - } - - Ok(writer) -} - -async fn open_event_log_for_writing( - path: EventLogPathBuf, - bytes_written: Option>, - event_log_type: EventLogType, -) -> anyhow::Result { - let file = OpenOptions::new() - .create(true) - .append(true) - .open(&path.path) - .await - .with_context(|| { - format!( - "Failed to open event log for writing at `{}`", - path.path.display() - ) - })?; - - get_writer(path, file, bytes_written, event_log_type) -} - -fn get_writer( - path: EventLogPathBuf, - file: impl AsyncWrite + std::marker::Send + std::marker::Unpin + std::marker::Sync + 'static, - bytes_written: Option>, - event_log_type: EventLogType, -) -> Result { - let file = match path.encoding.compression { - Compression::None => Box::new(CountingReader::new(file, bytes_written)) as EventLogWriter, - Compression::Gzip => Box::new(GzipEncoder::with_quality( - CountingReader::new(file, bytes_written), - async_compression::Level::Fastest, - )) as EventLogWriter, - Compression::Zstd => Box::new(ZstdEncoder::with_quality( - CountingReader::new(file, bytes_written), - async_compression::Level::Default, - )) as EventLogWriter, - }; - Ok(NamedEventLogWriter { - path, - file, - event_log_type, - process_to_wait_for: None, - }) -} - -impl<'a> WriteEventLog<'a> { - pub(crate) async fn write_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { - let mut event_refs = Vec::new(); - let mut first = true; - for event in events { - if first { - self.ensure_log_writers_opened(event).await?; - first = false; - } - - event_refs.push(StreamValueForWrite::Event(event.event())); - } - - if event_refs.is_empty() { - return Ok(()); - } - - self.write_ln(&event_refs).await - } - - pub(crate) async fn write_result( - &mut self, - result: &buck2_cli_proto::CommandResult, - ) -> anyhow::Result<()> { - match &self.state { - LogWriterState::Opened { .. } | LogWriterState::Closed => {} - LogWriterState::Unopened { .. } => { - // This is a bit wonky. We can receive a CommandResult before we opened log files - // if the command crashed before it started. That can happen if the daemon - // initialization is what fails, since we need the daemon to initialize in order to - // access request metadata, which we need for the command start event. To keep - // things simple, just tolerate this happening. - return Ok(()); - } - } - - let event = StreamValueForWrite::Result(result); - - self.write_ln(&[event]).await - } - - pub(crate) async fn flush_files(&mut self) -> anyhow::Result<()> { - let writers = match &mut self.state { - LogWriterState::Opened { writers } => writers, - LogWriterState::Unopened { .. } | LogWriterState::Closed => return Ok(()), - }; - - for writer in writers { - writer.file.flush().await.with_context(|| { - format!("Error flushing log file at {}", writer.path.path.display()) - })?; - } - - Ok(()) - } -} - -pub(crate) trait SerializeForLog { - fn serialize_to_json(&self, buf: &mut Vec) -> anyhow::Result<()>; - fn serialize_to_protobuf_length_delimited(&self, buf: &mut Vec) -> anyhow::Result<()>; - fn maybe_serialize_user_event(&self, buf: &mut Vec) -> anyhow::Result; -} - -impl SerializeForLog for Invocation { - fn serialize_to_json(&self, buf: &mut Vec) -> anyhow::Result<()> { - serde_json::to_writer(buf, &self).context("Failed to serialize event") - } - - fn serialize_to_protobuf_length_delimited(&self, buf: &mut Vec) -> anyhow::Result<()> { - let invocation = buck2_data::Invocation { - command_line_args: self.command_line_args.clone(), - expanded_command_line_args: self.expanded_command_line_args.clone(), - working_dir: self.working_dir.clone(), - trace_id: Some(self.trace_id.to_string()), - }; - invocation.encode_length_delimited(buf)?; - Ok(()) - } - - // Always log invocation record to user event log for `buck2 log show` compatibility - fn maybe_serialize_user_event(&self, buf: &mut Vec) -> anyhow::Result { - serde_json::to_writer(buf, &self).context("Failed to serialize event")?; - Ok(true) - } -} - -#[derive(Serialize)] -pub enum StreamValueForWrite<'a> { - Result(&'a CommandResult), - Event(&'a buck2_data::BuckEvent), -} - -impl<'a> SerializeForLog for StreamValueForWrite<'a> { - fn serialize_to_json(&self, buf: &mut Vec) -> anyhow::Result<()> { - serde_json::to_writer(buf, &self).context("Failed to serialize event") - } - - fn serialize_to_protobuf_length_delimited(&self, buf: &mut Vec) -> anyhow::Result<()> { - // We use `CommandProgressForWrite` here to avoid cloning `BuckEvent`. - // `CommandProgressForWrite` serialization is bitwise identical to `CommandProgress`. - // See the protobuf spec - // https://developers.google.com/protocol-buffers/docs/encoding#length-types - // for the details about protobuf wire format. - let progress = match self { - Self::Event(e) => command_progress_for_write::Progress::Event(e.encode_to_vec()), - Self::Result(res) => command_progress_for_write::Progress::Result((*res).clone()), - }; - let stream_val = buck2_cli_proto::CommandProgressForWrite { - progress: Some(progress), - }; - stream_val.encode_length_delimited(buf)?; - Ok(()) - } - - fn maybe_serialize_user_event(&self, buf: &mut Vec) -> anyhow::Result { - if let StreamValueForWrite::Event(event) = self { - if let Some(user_event) = try_get_user_event(event)? { - serde_json::to_writer(buf, &user_event).context("Failed to serialize event")?; - return Ok(true); - } - } - - Ok(false) - } -} - -#[cfg(test)] -mod tests { - use std::time::SystemTime; - - use buck2_core::fs::paths::abs_path::AbsPathBuf; - use buck2_data::LoadBuildFileStart; - use buck2_data::SpanStartEvent; - use buck2_events::span::SpanId; - use buck2_events::BuckEvent; - use buck2_wrapper_common::invocation_id::TraceId; - use futures::TryStreamExt; - use tempfile::TempDir; - - use super::*; - use crate::stream_value::StreamValue; - - impl WriteEventLog<'static> { - async fn new_test(log: EventLogPathBuf) -> anyhow::Result { - Ok(Self { - state: LogWriterState::Opened { - writers: vec![ - open_event_log_for_writing(log, None, EventLogType::System).await?, - ], - }, - sanitized_argv: SanitizedArgv { - argv: vec!["buck2".to_owned()], - expanded_argv: vec!["buck2".to_owned()], - }, - async_cleanup_context: None, - command_name: "testtest".to_owned(), - working_dir: WorkingDir::current_dir()?, - buf: Vec::new(), - log_size_counter_bytes: None, - allow_vpnless: false, - }) - } - } - - fn make_event() -> BuckEvent { - BuckEvent::new( - SystemTime::now(), - TraceId::new(), - Some(SpanId::new()), - None, - buck2_data::buck_event::Data::SpanStart(SpanStartEvent { - data: Some(buck2_data::span_start_event::Data::Load( - LoadBuildFileStart { - module_id: "foo".to_owned(), - cell: "bar".to_owned(), - }, - )), - }), - ) - } - - #[tokio::test] - async fn test_protobuf_decoding_gzip() -> anyhow::Result<()> { - test_protobuf_decoding(Encoding::PROTO_GZIP).await - } - - #[tokio::test] - async fn test_protobuf_decoding_zstd() -> anyhow::Result<()> { - test_protobuf_decoding(Encoding::PROTO_ZSTD).await - } - - async fn test_protobuf_decoding(encoding: Encoding) -> anyhow::Result<()> { - //Create log dir - let tmp_dir = TempDir::new()?; - - //Create mock event - let event = make_event(); - - // Create event log - let log = EventLogPathBuf { - path: AbsPathBuf::try_from(tmp_dir.path().join("log")).unwrap(), - encoding, - }; - - let mut write_event_log = WriteEventLog::new_test(log.clone()).await?; - - //Log event - let value = StreamValueForWrite::Event(event.event()); - write_event_log.log_invocation(event.trace_id()?).await?; - write_event_log.write_ln(&[value]).await?; - write_event_log.exit().await; - - //Get and decode log - let (_invocation, mut events) = log.unpack_stream().await?; - - //Get event - let retrieved_event = match events.try_next().await?.expect("Failed getting log") { - StreamValue::Event(e) => BuckEvent::try_from(e), - _ => panic!("expected event"), - }?; - - //Assert it's the same event created in the beginning - assert_eq!(retrieved_event.timestamp(), event.timestamp()); - assert_eq!( - retrieved_event.trace_id().unwrap(), - event.trace_id().unwrap() - ); - assert_eq!(retrieved_event.span_id().unwrap(), event.span_id().unwrap()); - assert_eq!(retrieved_event.data(), event.data()); - - assert!( - events.try_next().await.unwrap().is_none(), - "expecting no more events" - ); - - Ok(()) - } - - #[tokio::test] - async fn test_tick_makes_valid_log_zstd() -> anyhow::Result<()> { - test_tick_makes_valid_log(Encoding::PROTO_ZSTD).await - } - - async fn test_tick_makes_valid_log(encoding: Encoding) -> anyhow::Result<()> { - if cfg!(windows) { - // Do not want to deal with exclusivity issues on Windows. - return Ok(()); - } - - let tmp_dir = TempDir::new()?; - - let log = EventLogPathBuf { - path: AbsPathBuf::try_from(tmp_dir.path().join("test_tick_makes_valid_log.pb.gz")) - .unwrap(), - encoding, - }; - - let mut write_event_log = WriteEventLog::new_test(log.clone()).await?; - - let event = make_event(); - let value = StreamValueForWrite::Event(event.event()); - write_event_log.log_invocation(event.trace_id()?).await?; - write_event_log.write_ln(&[value]).await?; - - assert!( - log.unpack_stream().await.is_err(), - "Sanity check: gzip was not flushed, so the log is invalid" - ); - - // Now flush the gzip stream. - write_event_log.flush_files().await?; - - // Do not close the log, and open it. - let (_invocation, mut events) = log.unpack_stream().await?; - - let retrieved_event = match events.try_next().await?.expect("Failed getting log") { - StreamValue::Event(e) => BuckEvent::try_from(e).unwrap(), - _ => panic!("expecting event"), - }; - - assert_eq!(retrieved_event.timestamp(), event.timestamp()); - assert_eq!( - retrieved_event.trace_id().unwrap(), - event.trace_id().unwrap() - ); - assert_eq!(retrieved_event.span_id(), event.span_id()); - assert_eq!(retrieved_event.data(), event.data()); - - match encoding.compression { - Compression::Gzip => { - // TODO(nga): `tick` does not write gzip footer, so even after `tick` - // generated file is not a valid gzip file. - // assert!(events.try_next().await.unwrap().is_none(), "expecting no more events"); - assert!(events.try_next().await.is_err()); - } - Compression::Zstd => { - assert!( - events.try_next().await.unwrap().is_none(), - "expecting no more events" - ); - } - Compression::None => unreachable!(), - } - - Ok(()) - } - - #[test] - fn test_stream_value_serialize_to_protobuf_length_delimited() { - let event = make_event(); - let mut actual = Vec::new(); - StreamValueForWrite::Event(event.event()) - .serialize_to_protobuf_length_delimited(&mut actual) - .unwrap(); - let expected = buck2_cli_proto::CommandProgress { - progress: Some(command_progress::Progress::Event(event.into())), - } - .encode_length_delimited_to_vec(); - assert_eq!(expected, actual); - } -} diff --git a/app/buck2_client_ctx/src/subscribers/get.rs b/app/buck2_client_ctx/src/subscribers/get.rs index a4671d7bbc7fd..f65afcec581dc 100644 --- a/app/buck2_client_ctx/src/subscribers/get.rs +++ b/app/buck2_client_ctx/src/subscribers/get.rs @@ -10,22 +10,23 @@ use std::sync::atomic::AtomicU64; use std::sync::Arc; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_event_observer::event_observer::NoopEventObserverExtra; use buck2_event_observer::verbosity::Verbosity; use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; use crate::client_ctx::ClientCommandContext; -use crate::common::CommonDaemonCommandOptions; -use crate::common::ConsoleType; +use crate::common::ui::ConsoleType; +use crate::common::CommonEventLogOptions; use crate::streaming::StreamingCommand; use crate::subscribers::build_graph_stats::BuildGraphStats; use crate::subscribers::build_id_writer::BuildIdWriter; -use crate::subscribers::event_log::subscriber::EventLog; +use crate::subscribers::errorconsole::ErrorConsole; +use crate::subscribers::event_log::EventLog; use crate::subscribers::re_log::ReLog; use crate::subscribers::simpleconsole::SimpleConsole; use crate::subscribers::subscriber::EventSubscriber; -use crate::subscribers::subscriber_unpack::UnpackingEventSubscriberAsEventSubscriber; use crate::subscribers::superconsole::StatefulSuperConsole; use crate::subscribers::superconsole::SuperConsoleConfig; @@ -38,28 +39,41 @@ pub fn get_console_with_root( replay_speed: Option, command_name: &str, config: SuperConsoleConfig, -) -> anyhow::Result>> { + build_count_dir: Option, +) -> anyhow::Result> { match console_type { - ConsoleType::Simple => Ok(Some(Box::new(UnpackingEventSubscriberAsEventSubscriber( - SimpleConsole::::autodetect(trace_id, verbosity, expect_spans), - )))), - ConsoleType::SimpleNoTty => Ok(Some(Box::new(UnpackingEventSubscriberAsEventSubscriber( - SimpleConsole::::without_tty(trace_id, verbosity, expect_spans), - )))), - ConsoleType::SimpleTty => Ok(Some(Box::new(UnpackingEventSubscriberAsEventSubscriber( - SimpleConsole::::with_tty(trace_id, verbosity, expect_spans), - )))), - ConsoleType::Super => Ok(Some(Box::new(UnpackingEventSubscriberAsEventSubscriber( - StatefulSuperConsole::new_with_root_forced( + ConsoleType::Simple => Ok(Box::new( + SimpleConsole::::autodetect( trace_id, - command_name, verbosity, expect_spans, - replay_speed, - None, - config, - )?, - )))), + build_count_dir, + ), + )), + ConsoleType::SimpleNoTty => Ok(Box::new( + SimpleConsole::::without_tty( + trace_id, + verbosity, + expect_spans, + build_count_dir, + ), + )), + ConsoleType::SimpleTty => Ok(Box::new(SimpleConsole::::with_tty( + trace_id, + verbosity, + expect_spans, + build_count_dir, + ))), + ConsoleType::Super => Ok(Box::new(StatefulSuperConsole::new_with_root_forced( + trace_id, + command_name, + verbosity, + expect_spans, + replay_speed, + None, + config, + build_count_dir, + )?)), ConsoleType::Auto => { match StatefulSuperConsole::new_with_root( trace_id.dupe(), @@ -68,20 +82,20 @@ pub fn get_console_with_root( expect_spans, replay_speed, config, + build_count_dir.clone(), )? { - Some(super_console) => Ok(Some(Box::new( - UnpackingEventSubscriberAsEventSubscriber(super_console), - ))), - None => Ok(Some(Box::new(UnpackingEventSubscriberAsEventSubscriber( + Some(super_console) => Ok(Box::new(super_console)), + None => Ok(Box::new( SimpleConsole::::autodetect( trace_id, verbosity, expect_spans, + build_count_dir, ), - )))), + )), } } - ConsoleType::None => Ok(None), + ConsoleType::None => Ok(Box::new(ErrorConsole)), } } @@ -111,7 +125,6 @@ pub(crate) fn try_get_event_log_subscriber<'a, T: StreamingCommand>( ctx.async_cleanup_context().dupe(), T::COMMAND_NAME.to_owned(), log_size_counter_bytes, - ctx.allow_vpnless_for_logging()?, )?; Ok(Some(Box::new(log))) } @@ -122,13 +135,12 @@ pub(crate) fn try_get_re_log_subscriber<'a>( let log = ReLog::new( ctx.paths()?.isolation.clone(), ctx.async_cleanup_context().dupe(), - ctx.allow_vpnless_for_logging()?, ); Ok(Some(Box::new(log))) } pub(crate) fn try_get_build_id_writer<'a>( - opts: &CommonDaemonCommandOptions, + opts: &CommonEventLogOptions, ctx: &ClientCommandContext<'a>, ) -> anyhow::Result>> { if let Some(file_loc) = opts.write_build_id.as_ref() { @@ -156,7 +168,7 @@ pub(crate) fn try_get_build_graph_stats<'a, T: StreamingCommand>( fn should_handle_build_graph_stats(cmd: &T) -> bool { // Currently, we only care about graph size info in BuildResponse which build command produces - cmd.common_opts() + cmd.build_config_opts() .config_values .contains(&"buck2.log_configured_graph_size=true".to_owned()) && cmd.logging_name() == "build" diff --git a/app/buck2_client_ctx/src/subscribers/mod.rs b/app/buck2_client_ctx/src/subscribers/mod.rs deleted file mode 100644 index 051774c2291a8..0000000000000 --- a/app/buck2_client_ctx/src/subscribers/mod.rs +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::io; -use std::process; -use std::time::Duration; - -use anyhow::Context as _; -use buck2_core::env_helper::EnvHelper; -use buck2_core::sandcastle::is_sandcastle; -use tokio::process::Child; -use tokio::task::JoinHandle; - -pub(crate) mod build_graph_stats; -pub(crate) mod build_id_writer; -pub mod event_log; -pub mod get; -pub(crate) mod observer; -pub mod re_log; -pub mod recorder; -pub(crate) mod simpleconsole; -pub mod stdout_stderr_forwarder; -pub mod subscriber; -pub mod subscriber_unpack; -pub mod superconsole; - -pub fn should_upload_log() -> anyhow::Result { - if buck2_core::is_open_source() { - return Ok(false); - } - static DISABLE_LOG_UPLOAD: EnvHelper = EnvHelper::new("BUCK2_TEST_DISABLE_LOG_UPLOAD"); - Ok(!DISABLE_LOG_UPLOAD.get()?.copied().unwrap_or_default()) -} - -pub fn should_block_on_log_upload() -> anyhow::Result { - // Used by our tests. - static TEST_BLOCK_ON_UPLOAD: EnvHelper = EnvHelper::new("BUCK2_TEST_BLOCK_ON_UPLOAD"); - - Ok(is_sandcastle()? || TEST_BLOCK_ON_UPLOAD.get_copied()?.unwrap_or_default()) -} - -/// Wait for the child to finish. Assume its stderr was piped. -pub async fn wait_for_child_and_log(child: FutureChildOutput, reason: &str) { - async fn inner(child: FutureChildOutput) -> anyhow::Result<()> { - let res = tokio::time::timeout(Duration::from_secs(20), child.task) - .await - .context("Timed out")? - .context("Task failed")? - .context("Process failed")?; - - if !res.status.success() { - let stderr = String::from_utf8_lossy(&res.stderr); - return Err(anyhow::anyhow!( - "Upload exited with status `{}`. Stderr: `{}`", - res.status, - stderr.trim(), - )); - }; - Ok(()) - } - - match inner(child).await { - Ok(_) => {} - Err(e) => { - tracing::warn!("Error uploading {}: {:#}", reason, e); - } - } -} - -/// Ensure that if we spawn children, we don't block their stderr. -pub struct FutureChildOutput { - task: JoinHandle>, -} - -impl FutureChildOutput { - pub fn new(child: Child) -> Self { - Self { - task: tokio::task::spawn(async move { child.wait_with_output().await }), - } - } -} diff --git a/app/buck2_client_ctx/src/subscribers/observer.rs b/app/buck2_client_ctx/src/subscribers/observer.rs index e0b9e49118faa..fee64ad109dac 100644 --- a/app/buck2_client_ctx/src/subscribers/observer.rs +++ b/app/buck2_client_ctx/src/subscribers/observer.rs @@ -10,10 +10,6 @@ /// A trait for such event subscribers that are watching a specific set of /// errors and keeping the record of them for later use. pub trait ErrorObserver { - fn error_cause(&self) -> ErrorCause { - ErrorCause::Unknown - } - /// Whether this observer thinks that the daemon needs killing to work again. fn daemon_in_memory_state_is_corrupted(&self) -> bool { false @@ -29,10 +25,3 @@ pub trait ErrorObserver { false } } - -pub enum ErrorCause { - Unknown, - Infra, - User, - DaemonIsBusy, -} diff --git a/app/buck2_client_ctx/src/subscribers/re_log.rs b/app/buck2_client_ctx/src/subscribers/re_log.rs index 6f8a51f934f9a..b4778b6b0f254 100644 --- a/app/buck2_client_ctx/src/subscribers/re_log.rs +++ b/app/buck2_client_ctx/src/subscribers/re_log.rs @@ -12,37 +12,34 @@ use std::sync::Arc; use async_trait::async_trait; use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_event_log::should_block_on_log_upload; +use buck2_event_log::should_upload_log; +use buck2_event_log::wait_for_child_and_log; +use buck2_event_log::FutureChildOutput; use buck2_event_observer::unpack_event::unpack_event; use buck2_event_observer::unpack_event::UnpackedBuckEvent; use buck2_events::BuckEvent; +use buck2_util::cleanup_ctx::AsyncCleanupContext; use futures::Future; use futures::FutureExt; -use crate::cleanup_ctx::AsyncCleanupContext; -use crate::subscribers::should_block_on_log_upload; -use crate::subscribers::should_upload_log; use crate::subscribers::subscriber::EventSubscriber; -use crate::subscribers::wait_for_child_and_log; -use crate::subscribers::FutureChildOutput; pub(crate) struct ReLog<'a> { re_session_id: Option, isolation_dir: FileNameBuf, async_cleanup_context: AsyncCleanupContext<'a>, - allow_vpnless: bool, } impl<'a> ReLog<'a> { pub(crate) fn new( isolation_dir: FileNameBuf, async_cleanup_context: AsyncCleanupContext<'a>, - allow_vpnless: bool, ) -> Self { Self { re_session_id: None, isolation_dir, async_cleanup_context, - allow_vpnless, } } @@ -51,10 +48,9 @@ impl<'a> ReLog<'a> { // the logs once no matter how many times this function is called let session_id = self.re_session_id.take(); let isolation_dir = self.isolation_dir.clone(); - let allow_vpnless = self.allow_vpnless; async move { if let Some(s_id) = session_id { - log_upload_impl(s_id, isolation_dir, allow_vpnless).await?; + log_upload_impl(s_id, isolation_dir).await?; } Ok(()) } @@ -99,11 +95,7 @@ impl<'a> Drop for ReLog<'a> { } } -async fn log_upload_impl( - session_id: String, - isolation_dir: FileNameBuf, - allow_vpnless: bool, -) -> anyhow::Result<()> { +async fn log_upload_impl(session_id: String, isolation_dir: FileNameBuf) -> anyhow::Result<()> { if !should_upload_log()? { return Ok(()); } @@ -118,11 +110,6 @@ async fn log_upload_impl( .arg(session_id) .stdin(Stdio::null()) .stdout(Stdio::null()); - - if allow_vpnless { - command.arg("--allow-vpnless"); - } - if should_block_on_log_upload()? { let child = command.stderr(Stdio::piped()).spawn()?; wait_for_child_and_log(FutureChildOutput::new(child), "RE Log").await; diff --git a/app/buck2_client_ctx/src/subscribers/recorder.rs b/app/buck2_client_ctx/src/subscribers/recorder.rs index 7bae4aef1bf3b..b686553781f3c 100644 --- a/app/buck2_client_ctx/src/subscribers/recorder.rs +++ b/app/buck2_client_ctx/src/subscribers/recorder.rs @@ -7,1137 +7,1746 @@ * of this source tree. */ +use std::cmp::max; +use std::cmp::min; +use std::collections::HashMap; +use std::collections::HashSet; +use std::future::Future; +use std::io::Write; use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; use std::sync::Arc; - +use std::time::Duration; +use std::time::Instant; +use std::time::SystemTime; + +use anyhow::Context; +use async_trait::async_trait; +use buck2_cli_proto::command_result; +use buck2_common::build_count::BuildCount; +use buck2_common::build_count::BuildCountManager; +use buck2_common::convert::ProstDurationExt; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::soft_error; +use buck2_data::error::ErrorTag; +use buck2_data::ErrorReport; +use buck2_data::ProcessedErrorReport; +use buck2_data::SystemInfo; +use buck2_data::TargetCfg; +use buck2_error::classify::best_error; +use buck2_error::classify::best_tag; +use buck2_error::classify::ErrorLike; +use buck2_error::classify::ERROR_TAG_UNCLASSIFIED; +use buck2_event_log::ttl::manifold_event_log_ttl; +use buck2_event_observer::action_stats; +use buck2_event_observer::action_stats::ActionStats; +use buck2_event_observer::cache_hit_rate::total_cache_hit_rate; +use buck2_event_observer::last_command_execution_kind; +use buck2_event_observer::last_command_execution_kind::LastCommandExecutionKind; +use buck2_events::errors::create_error_report; +use buck2_events::sink::remote::new_remote_event_sink_if_enabled; +use buck2_events::BuckEvent; +use buck2_util::cleanup_ctx::AsyncCleanupContext; +use buck2_util::network_speed_average::NetworkSpeedAverage; +use buck2_util::sliding_window::SlidingWindow; +use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; - -use crate::build_count::BuildCountManager; +use fbinit::FacebookInit; +use futures::FutureExt; +use gazebo::prelude::VecExt; +use gazebo::variants::VariantName; +use itertools::Itertools; +use termwiz::istty::IsTty; + +use super::system_warning::check_download_speed; +use super::system_warning::check_memory_pressure; +use super::system_warning::check_remaining_disk_space; use crate::client_ctx::ClientCommandContext; use crate::client_metadata::ClientMetadata; -use crate::common::CommonDaemonCommandOptions; - -mod imp { - use std::cmp; - use std::collections::HashMap; - use std::collections::HashSet; - use std::future::Future; - use std::io::Write; - use std::sync::atomic::AtomicU64; - use std::sync::atomic::Ordering; - use std::sync::Arc; - use std::time::Duration; - use std::time::Instant; - use std::time::SystemTime; - - use anyhow::Context; - use async_trait::async_trait; - use buck2_common::convert::ProstDurationExt; - use buck2_core::fs::fs_util; - use buck2_core::fs::paths::abs_path::AbsPathBuf; - use buck2_event_observer::action_stats; - use buck2_event_observer::cache_hit_rate::total_cache_hit_rate; - use buck2_event_observer::last_command_execution_kind; - use buck2_event_observer::last_command_execution_kind::LastCommandExecutionKind; - use buck2_events::sink::scribe::new_thrift_scribe_sink_if_enabled; - use buck2_events::BuckEvent; - use buck2_wrapper_common::invocation_id::TraceId; - use dupe::Dupe; - use fbinit::FacebookInit; - use futures::FutureExt; - use gazebo::variants::VariantName; - use termwiz::istty::IsTty; - - use crate::build_count::BuildCountManager; - use crate::cleanup_ctx::AsyncCleanupContext; - use crate::subscribers::observer::ErrorCause; - use crate::subscribers::observer::ErrorObserver; - use crate::subscribers::recorder::system_memory_stats; - use crate::subscribers::subscriber::EventSubscriber; - - pub struct InvocationRecorder<'a> { +use crate::common::CommonEventLogOptions; +use crate::console_interaction_stream::SuperConsoleToggle; +use crate::subscribers::classify_server_stderr::classify_server_stderr; +use crate::subscribers::observer::ErrorObserver; +use crate::subscribers::subscriber::EventSubscriber; +use crate::subscribers::system_warning::check_cache_misses; +use crate::subscribers::system_warning::is_vpn_enabled; + +pub fn process_memory(snapshot: &buck2_data::Snapshot) -> Option { + // buck2_rss is the resident set size observed by daemon (exluding subprocesses). + // On MacOS buck2_rss is not stored and also RSS in general is not a reliable indicator due to swapping which moves pages from resident set to disk. + // Hence, we take max of buck2_rss and malloc_bytes_active (coming from jemalloc and is available on Macs as well). + snapshot + .malloc_bytes_active + .into_iter() + .chain(snapshot.buck2_rss) + .max() +} + +const MEMORY_PRESSURE_TAG: &str = "memory_pressure_warning"; + +pub(crate) struct InvocationRecorder<'a> { + fb: FacebookInit, + write_to_path: Option, + command_name: &'static str, + cli_args: Vec, + isolation_dir: String, + start_time: Instant, + async_cleanup_context: AsyncCleanupContext<'a>, + build_count_manager: Option, + trace_id: TraceId, + command_end: Option, + command_duration: Option, + re_session_id: Option, + re_experiment_name: Option, + critical_path_duration: Option, + tags: Vec, + run_local_count: u64, + run_remote_count: u64, + run_action_cache_count: u64, + run_remote_dep_file_cache_count: u64, + run_skipped_count: u64, + run_fallback_count: u64, + local_actions_executed_via_worker: u64, + first_snapshot: Option, + last_snapshot: Option, + min_attempted_build_count_since_rebase: u64, + min_build_count_since_rebase: u64, + cache_upload_count: u64, + cache_upload_attempt_count: u64, + dep_file_upload_count: u64, + dep_file_upload_attempt_count: u64, + parsed_target_patterns: Option, + filesystem: String, + watchman_version: Option, + eden_version: Option, + test_info: Option, + eligible_for_full_hybrid: bool, + max_event_client_delay: Option, + max_malloc_bytes_active: Option, + max_malloc_bytes_allocated: Option, + run_command_failure_count: u64, + event_count: u64, + time_to_first_action_execution: Option, + materialization_output_size: u64, + initial_materializer_entries_from_sqlite: Option, + time_to_command_start: Option, + time_to_command_critical_section: Option, + time_to_first_analysis: Option, + time_to_load_first_build_file: Option, + time_to_first_command_execution_start: Option, + time_to_first_test_discovery: Option, + system_info: SystemInfo, + file_watcher_stats: Option, + file_watcher_duration: Option, + time_to_last_action_execution_end: Option, + initial_sink_success_count: Option, + initial_sink_failure_count: Option, + initial_sink_dropped_count: Option, + initial_sink_bytes_written: Option, + sink_max_buffer_depth: u64, + soft_error_categories: HashSet, + concurrent_command_blocking_duration: Option, + metadata: HashMap, + analysis_count: u64, + daemon_in_memory_state_is_corrupted: bool, + daemon_materializer_state_is_corrupted: bool, + enable_restarter: bool, + restarted_trace_id: Option, + has_command_result: bool, + has_end_of_stream: bool, + compressed_event_log_size_bytes: Option>, + critical_path_backend: Option, + instant_command_is_success: Option, + bxl_ensure_artifacts_duration: Option, + install_duration: Option, + install_device_metadata: Vec, + initial_re_upload_bytes: Option, + initial_re_download_bytes: Option, + initial_zdb_download_queries: Option, + initial_zdb_download_bytes: Option, + initial_zdb_upload_queries: Option, + initial_zdb_upload_bytes: Option, + initial_zgateway_download_queries: Option, + initial_zgateway_download_bytes: Option, + initial_zgateway_upload_queries: Option, + initial_zgateway_upload_bytes: Option, + initial_manifold_download_queries: Option, + initial_manifold_download_bytes: Option, + initial_manifold_upload_queries: Option, + initial_manifold_upload_bytes: Option, + initial_hedwig_download_queries: Option, + initial_hedwig_download_bytes: Option, + initial_hedwig_upload_queries: Option, + initial_hedwig_upload_bytes: Option, + concurrent_command_ids: HashSet, + daemon_connection_failure: bool, + /// Daemon started by this command. + daemon_was_started: Option, + client_metadata: Vec, + client_errors: Vec, + command_errors: Vec, + /// To append to gRPC errors. + server_stderr: String, + target_rule_type_names: Vec, + re_max_download_speeds: Vec, + re_max_upload_speeds: Vec, + re_avg_download_speed: NetworkSpeedAverage, + re_avg_upload_speed: NetworkSpeedAverage, + peak_process_memory_bytes: Option, + has_new_buckconfigs: bool, + buckconfig_diff_count: Option, + buckconfig_diff_size: Option, + peak_used_disk_space_bytes: Option, + active_networks_kinds: HashSet, + target_cfg: Option, + version_control_revision: Option, + concurrent_commands: bool, +} + +struct ErrorsReport { + errors: Vec, + best_error_tag: Option, + best_error_category_key: Option, + error_category: Option, +} + +impl<'a> InvocationRecorder<'a> { + pub fn new( fb: FacebookInit, + async_cleanup_context: AsyncCleanupContext<'a>, write_to_path: Option, command_name: &'static str, - cli_args: Vec, - isolation_dir: String, - start_time: Instant, - async_cleanup_context: AsyncCleanupContext<'a>, - build_count_manager: BuildCountManager, + sanitized_argv: Vec, trace_id: TraceId, - command_end: Option, - command_duration: Option, - re_session_id: Option, - re_experiment_name: Option, - critical_path_duration: Option, - tags: Vec, - run_local_count: u64, - run_remote_count: u64, - run_action_cache_count: u64, - run_remote_dep_file_cache_count: u64, - run_skipped_count: u64, - run_fallback_count: u64, - local_actions_executed_via_worker: u64, - first_snapshot: Option, - last_snapshot: Option, - min_build_count_since_rebase: u64, - cache_upload_count: u64, - cache_upload_attempt_count: u64, - parsed_target_patterns: Option, + isolation_dir: String, + build_count_manager: Option, filesystem: String, - watchman_version: Option, - eden_version: Option, - test_info: Option, - eligible_for_full_hybrid: bool, - max_event_client_delay: Option, - max_malloc_bytes_active: Option, - max_malloc_bytes_allocated: Option, - run_command_failure_count: u64, - event_count: u64, - time_to_first_action_execution: Option, - materialization_output_size: u64, - initial_materializer_entries_from_sqlite: Option, - time_to_command_start: Option, - time_to_command_critical_section: Option, - time_to_first_analysis: Option, - time_to_load_first_build_file: Option, - time_to_first_command_execution_start: Option, - system_total_memory_bytes: Option, - file_watcher_stats: Option, - time_to_last_action_execution_end: Option, - initial_sink_success_count: Option, - initial_sink_failure_count: Option, - initial_sink_dropped_count: Option, - sink_max_buffer_depth: u64, - soft_error_categories: HashSet, - concurrent_command_blocking_duration: Option, - metadata: HashMap, - analysis_count: u64, - exit_when_different_state: bool, - daemon_in_memory_state_is_corrupted: bool, - daemon_materializer_state_is_corrupted: bool, - enable_restarter: bool, restarted_trace_id: Option, - has_command_result: bool, - has_end_of_stream: bool, - compressed_event_log_size_bytes: Option>, - critical_path_backend: Option, - instant_command_is_success: Option, - bxl_ensure_artifacts_duration: Option, - initial_re_upload_bytes: Option, - initial_re_download_bytes: Option, - concurrent_command_ids: HashSet, - daemon_connection_failure: bool, + log_size_counter_bytes: Option>, client_metadata: Vec, - error_messages: Vec, + ) -> Self { + Self { + fb, + write_to_path, + command_name, + cli_args: sanitized_argv, + isolation_dir, + start_time: Instant::now(), + async_cleanup_context, + build_count_manager, + trace_id, + command_end: None, + command_duration: None, + re_session_id: None, + re_experiment_name: None, + critical_path_duration: None, + tags: vec![], + run_local_count: 0, + run_remote_count: 0, + run_action_cache_count: 0, + run_remote_dep_file_cache_count: 0, + run_skipped_count: 0, + run_fallback_count: 0, + local_actions_executed_via_worker: 0, + first_snapshot: None, + last_snapshot: None, + min_attempted_build_count_since_rebase: 0, + min_build_count_since_rebase: 0, + cache_upload_count: 0, + cache_upload_attempt_count: 0, + dep_file_upload_count: 0, + dep_file_upload_attempt_count: 0, + parsed_target_patterns: None, + filesystem, + watchman_version: None, + eden_version: None, + test_info: None, + eligible_for_full_hybrid: false, + max_event_client_delay: None, + max_malloc_bytes_active: None, + max_malloc_bytes_allocated: None, + run_command_failure_count: 0, + event_count: 0, + time_to_first_action_execution: None, + materialization_output_size: 0, + initial_materializer_entries_from_sqlite: None, + time_to_command_start: None, + time_to_command_critical_section: None, + time_to_first_analysis: None, + time_to_load_first_build_file: None, + time_to_first_command_execution_start: None, + time_to_first_test_discovery: None, + system_info: SystemInfo::default(), + file_watcher_stats: None, + file_watcher_duration: None, + time_to_last_action_execution_end: None, + initial_sink_success_count: None, + initial_sink_failure_count: None, + initial_sink_dropped_count: None, + initial_sink_bytes_written: None, + sink_max_buffer_depth: 0, + soft_error_categories: HashSet::new(), + concurrent_command_blocking_duration: None, + metadata: buck2_events::metadata::collect(), + analysis_count: 0, + daemon_in_memory_state_is_corrupted: false, + daemon_materializer_state_is_corrupted: false, + enable_restarter: false, + restarted_trace_id, + has_command_result: false, + has_end_of_stream: false, + compressed_event_log_size_bytes: log_size_counter_bytes, + critical_path_backend: None, + instant_command_is_success: None, + bxl_ensure_artifacts_duration: None, + install_duration: None, + install_device_metadata: Vec::new(), + initial_re_upload_bytes: None, + initial_re_download_bytes: None, + initial_zdb_download_queries: None, + initial_zdb_download_bytes: None, + initial_zdb_upload_queries: None, + initial_zdb_upload_bytes: None, + initial_zgateway_download_queries: None, + initial_zgateway_download_bytes: None, + initial_zgateway_upload_queries: None, + initial_zgateway_upload_bytes: None, + initial_manifold_download_queries: None, + initial_manifold_download_bytes: None, + initial_manifold_upload_queries: None, + initial_manifold_upload_bytes: None, + initial_hedwig_download_queries: None, + initial_hedwig_download_bytes: None, + initial_hedwig_upload_queries: None, + initial_hedwig_upload_bytes: None, + concurrent_command_ids: HashSet::new(), + daemon_connection_failure: false, + daemon_was_started: None, + client_metadata, + client_errors: Vec::new(), + command_errors: Vec::new(), + server_stderr: String::new(), + target_rule_type_names: Vec::new(), + re_max_download_speeds: vec![ + SlidingWindow::new(Duration::from_secs(1)), + SlidingWindow::new(Duration::from_secs(5)), + SlidingWindow::new(Duration::from_secs(10)), + ], + re_max_upload_speeds: vec![ + SlidingWindow::new(Duration::from_secs(1)), + SlidingWindow::new(Duration::from_secs(5)), + SlidingWindow::new(Duration::from_secs(10)), + ], + re_avg_download_speed: NetworkSpeedAverage::default(), + re_avg_upload_speed: NetworkSpeedAverage::default(), + peak_process_memory_bytes: None, + has_new_buckconfigs: false, + buckconfig_diff_count: None, + buckconfig_diff_size: None, + peak_used_disk_space_bytes: None, + active_networks_kinds: HashSet::new(), + target_cfg: None, + version_control_revision: None, + concurrent_commands: false, + } } - impl<'a> InvocationRecorder<'a> { - pub fn new( - fb: FacebookInit, - async_cleanup_context: AsyncCleanupContext<'a>, - write_to_path: Option, - command_name: &'static str, - sanitized_argv: Vec, - trace_id: TraceId, - isolation_dir: String, - build_count_manager: BuildCountManager, - filesystem: String, - restarted_trace_id: Option, - log_size_counter_bytes: Option>, - client_metadata: Vec, - ) -> Self { - Self { - fb, - write_to_path, - command_name, - cli_args: sanitized_argv, - isolation_dir, - start_time: Instant::now(), - async_cleanup_context, - build_count_manager, - trace_id, - command_end: None, - command_duration: None, - re_session_id: None, - re_experiment_name: None, - critical_path_duration: None, - tags: vec![], - run_local_count: 0, - run_remote_count: 0, - run_action_cache_count: 0, - run_remote_dep_file_cache_count: 0, - run_skipped_count: 0, - run_fallback_count: 0, - local_actions_executed_via_worker: 0, - first_snapshot: None, - last_snapshot: None, - min_build_count_since_rebase: 0, - cache_upload_count: 0, - cache_upload_attempt_count: 0, - parsed_target_patterns: None, - filesystem, - watchman_version: None, - eden_version: None, - test_info: None, - eligible_for_full_hybrid: false, - max_event_client_delay: None, - max_malloc_bytes_active: None, - max_malloc_bytes_allocated: None, - run_command_failure_count: 0, - event_count: 0, - time_to_first_action_execution: None, - materialization_output_size: 0, - initial_materializer_entries_from_sqlite: None, - time_to_command_start: None, - time_to_command_critical_section: None, - time_to_first_analysis: None, - time_to_load_first_build_file: None, - time_to_first_command_execution_start: None, - system_total_memory_bytes: Some(system_memory_stats()), - file_watcher_stats: None, - time_to_last_action_execution_end: None, - initial_sink_success_count: None, - initial_sink_failure_count: None, - initial_sink_dropped_count: None, - sink_max_buffer_depth: 0, - soft_error_categories: HashSet::new(), - concurrent_command_blocking_duration: None, - metadata: buck2_events::metadata::collect(), - analysis_count: 0, - exit_when_different_state: false, - daemon_in_memory_state_is_corrupted: false, - daemon_materializer_state_is_corrupted: false, - enable_restarter: false, - restarted_trace_id, - has_command_result: false, - has_end_of_stream: false, - compressed_event_log_size_bytes: log_size_counter_bytes, - critical_path_backend: None, - instant_command_is_success: None, - bxl_ensure_artifacts_duration: None, - initial_re_upload_bytes: None, - initial_re_download_bytes: None, - concurrent_command_ids: HashSet::new(), - daemon_connection_failure: false, - client_metadata, - error_messages: Vec::new(), + pub fn instant_command_outcome(&mut self, is_success: bool) { + self.instant_command_is_success = Some(is_success); + } + + async fn build_count( + &mut self, + is_success: bool, + command_name: &str, + ) -> anyhow::Result> { + if let Some(stats) = &self.file_watcher_stats { + if let Some(merge_base) = &stats.branched_from_revision { + match &self.parsed_target_patterns { + None => { + if is_success { + return Err(anyhow::anyhow!( + "successful {} commands should have resolved target patterns", + command_name + )); + } + // fallthrough to 0 below + } + Some(v) => { + return if let Some(build_count) = &self.build_count_manager { + Some( + build_count + .increment(merge_base, v, is_success) + .await + .context("Error recording build count"), + ) + .transpose() + } else { + Ok(None) + }; + } + }; } } - pub fn instant_command_outcome(&mut self, is_success: bool) { - self.instant_command_is_success = Some(is_success); - } + Ok(Default::default()) + } - async fn build_count( - &mut self, - is_success: bool, - command_name: &str, - ) -> anyhow::Result { - if let Some(stats) = &self.file_watcher_stats { - if let Some(merge_base) = &stats.branched_from_revision { - match &self.parsed_target_patterns { - None => { - if is_success { - return Err(anyhow::anyhow!( - "successful {} commands should have resolved target patterns", - command_name - )); - } - // fallthrough to 0 below - } - Some(v) => { - return self - .build_count_manager - .min_build_count(merge_base, v, is_success) - .await - .context("Error recording build count"); - } - }; + fn finalize_errors(&mut self) -> ErrorsReport { + // Add stderr to GRPC connection errors if available + let connection_errors: Vec = self + .client_errors + .extract_if(|e| e.has_tag(ErrorTag::ClientGrpc)) + .collect(); + + for error in connection_errors { + let error = classify_server_stderr(error, &self.server_stderr); + + let error = if self.server_stderr.is_empty() { + let error = error.context("buckd stderr is empty"); + // Likely buckd received SIGKILL, may be due to memory pressure + if self.tags.iter().any(|s| s == MEMORY_PRESSURE_TAG) { + error + .context("memory pressure detected") + .tag([ErrorTag::ServerMemoryPressure]) + } else { + error } - } + } else if error.has_tag(ErrorTag::ServerSigterm) { + error.context("buckd killed by SIGTERM") + } else { + // Scribe sink truncates messages, but here we can do it better: + // - truncate even if total message is not large enough + // - truncate stderr, but keep the error message + let server_stderr = truncate_stderr(&self.server_stderr); + error.context(format!("buckd stderr:\n{}", server_stderr)) + }; - Ok(0) + self.client_errors.push(error); } - fn send_it(&mut self) -> Option + 'static + Send> { - let mut sink_success_count = None; - let mut sink_failure_count = None; - let mut sink_dropped_count = None; - let mut re_upload_bytes = None; - let mut re_download_bytes = None; - if let Some(snapshot) = &self.last_snapshot { - sink_success_count = calculate_diff_if_some( - &snapshot.sink_successes, - &self.initial_sink_success_count, - ); - sink_failure_count = calculate_diff_if_some( - &snapshot.sink_failures, - &self.initial_sink_failure_count, - ); - sink_dropped_count = calculate_diff_if_some( - &snapshot.sink_dropped, - &self.initial_sink_dropped_count, - ); - re_upload_bytes = calculate_diff_if_some( - &Some(snapshot.re_upload_bytes), - &self.initial_re_upload_bytes, - ); - re_download_bytes = calculate_diff_if_some( - &Some(snapshot.re_download_bytes), - &self.initial_re_download_bytes, - ); - } + let mut errors = + std::mem::take(&mut self.client_errors).into_map(|e| create_error_report(&e)); + let command_errors = std::mem::take(&mut self.command_errors); + errors.extend(command_errors); - let mut metadata = Self::default_metadata(); - metadata.strings.extend(std::mem::take(&mut self.metadata)); - - let record = buck2_data::InvocationRecord { - command_name: Some(self.command_name.to_owned()), - command_end: self.command_end.take(), - command_duration: self.command_duration.take(), - client_walltime: self.start_time.elapsed().try_into().ok(), - re_session_id: self.re_session_id.take().unwrap_or_default(), - re_experiment_name: self.re_experiment_name.take().unwrap_or_default(), - cli_args: self.cli_args.clone(), - critical_path_duration: self.critical_path_duration.and_then(|x| x.try_into().ok()), - metadata: Some(metadata), - tags: self.tags.drain(..).collect(), - run_local_count: self.run_local_count, - run_remote_count: self.run_remote_count, - run_action_cache_count: self.run_action_cache_count, - run_remote_dep_file_cache_count: self.run_remote_dep_file_cache_count, - cache_hit_rate: total_cache_hit_rate( - self.run_local_count, - self.run_remote_count, - self.run_action_cache_count, - self.run_remote_dep_file_cache_count, - ) as f32, - run_skipped_count: self.run_skipped_count, - run_fallback_count: Some(self.run_fallback_count), - local_actions_executed_via_worker: Some(self.local_actions_executed_via_worker), - first_snapshot: self.first_snapshot.take(), - last_snapshot: self.last_snapshot.take(), - min_build_count_since_rebase: self.min_build_count_since_rebase, - cache_upload_count: self.cache_upload_count, - cache_upload_attempt_count: self.cache_upload_attempt_count, - parsed_target_patterns: self.parsed_target_patterns.take(), - filesystem: std::mem::take(&mut self.filesystem), - watchman_version: self.watchman_version.take(), - eden_version: self.eden_version.take(), - test_info: self.test_info.take(), - eligible_for_full_hybrid: Some(self.eligible_for_full_hybrid), - max_event_client_delay_ms: self - .max_event_client_delay - .and_then(|d| u64::try_from(d.as_millis()).ok()), - max_malloc_bytes_active: self.max_malloc_bytes_active.take(), - max_malloc_bytes_allocated: self.max_malloc_bytes_allocated.take(), - run_command_failure_count: Some(self.run_command_failure_count), - event_count: Some(self.event_count), - time_to_first_action_execution_ms: self - .time_to_first_action_execution - .and_then(|d| u64::try_from(d.as_millis()).ok()), - materialization_output_size: Some(self.materialization_output_size), - initial_materializer_entries_from_sqlite: self - .initial_materializer_entries_from_sqlite, - time_to_command_start_ms: self - .time_to_command_start - .and_then(|d| u64::try_from(d.as_millis()).ok()), - time_to_command_critical_section_ms: self - .time_to_command_critical_section - .and_then(|d| u64::try_from(d.as_millis()).ok()), - time_to_first_analysis_ms: self - .time_to_first_analysis - .and_then(|d| u64::try_from(d.as_millis()).ok()), - time_to_load_first_build_file_ms: self - .time_to_load_first_build_file - .and_then(|d| u64::try_from(d.as_millis()).ok()), - time_to_first_command_execution_start_ms: self - .time_to_first_command_execution_start - .and_then(|d| u64::try_from(d.as_millis()).ok()), - system_total_memory_bytes: self.system_total_memory_bytes, - file_watcher_stats: self.file_watcher_stats.take(), - time_to_last_action_execution_end_ms: self - .time_to_last_action_execution_end - .and_then(|d| u64::try_from(d.as_millis()).ok()), - isolation_dir: Some(self.isolation_dir.clone()), - sink_success_count, - sink_failure_count, - sink_dropped_count, - sink_max_buffer_depth: Some(self.sink_max_buffer_depth), - soft_error_categories: std::mem::take(&mut self.soft_error_categories) - .into_iter() - .collect(), - concurrent_command_blocking_duration: self - .concurrent_command_blocking_duration - .and_then(|x| x.try_into().ok()), - analysis_count: Some(self.analysis_count), - exit_when_different_state: Some(self.exit_when_different_state), - restarted_trace_id: self.restarted_trace_id.as_ref().map(|t| t.to_string()), - has_command_result: Some(self.has_command_result), - has_end_of_stream: Some(self.has_end_of_stream), - // At this point we expect the event log writer to have finished - compressed_event_log_size_bytes: Some( - self.compressed_event_log_size_bytes - .as_ref() - .map(|x| x.load(Ordering::Relaxed)) - .unwrap_or_default(), - ), - critical_path_backend: self.critical_path_backend.take(), - instant_command_is_success: self.instant_command_is_success.take(), - bxl_ensure_artifacts_duration: self.bxl_ensure_artifacts_duration.take(), - re_upload_bytes, - re_download_bytes, - concurrent_command_ids: std::mem::take(&mut self.concurrent_command_ids) - .into_iter() - .collect(), - daemon_connection_failure: Some(self.daemon_connection_failure), - client_metadata: std::mem::take(&mut self.client_metadata), - error_messages: std::mem::take(&mut self.error_messages), - }; + let best_error = best_error(&errors); + let best_error_category_key = best_error.map(|e| e.category_key.clone()).flatten(); + let best_tag = best_error.map(|e| e.best_tag()).flatten(); + let error_category = best_error.map(|error| error.category()); - let event = BuckEvent::new( - SystemTime::now(), - self.trace_id.dupe(), - None, - None, - buck2_data::RecordEvent { - data: Some((Box::new(record)).into()), - } - .into(), + let errors = errors.into_map(process_error_report); + + // `None` if no errors, `Some("UNCLASSIFIED")` if no tags. + let best_error_tag = if errors.is_empty() { + None + } else { + Some( + best_tag + .map_or( + // If we don't have tags on the errors, + // we still want to add a tag to Scuba column. + ERROR_TAG_UNCLASSIFIED, + |t| t.as_str_name(), + ) + .to_owned(), + ) + }; + + ErrorsReport { + errors, + best_error_tag, + best_error_category_key, + error_category, + } + } + + fn send_it(&mut self) -> Option + 'static + Send> { + let mut sink_success_count = None; + let mut sink_failure_count = None; + let mut sink_dropped_count = None; + let mut sink_bytes_written = None; + let mut re_upload_bytes = None; + let mut re_download_bytes = None; + + let mut zdb_download_queries = None; + let mut zdb_download_bytes = None; + let mut zdb_upload_queries = None; + let mut zdb_upload_bytes = None; + + let mut zgateway_download_queries = None; + let mut zgateway_download_bytes = None; + let mut zgateway_upload_queries = None; + let mut zgateway_upload_bytes = None; + + let mut manifold_download_queries = None; + let mut manifold_download_bytes = None; + let mut manifold_upload_queries = None; + let mut manifold_upload_bytes = None; + + let mut hedwig_download_queries = None; + let mut hedwig_download_bytes = None; + let mut hedwig_upload_queries = None; + let mut hedwig_upload_bytes = None; + + if let Some(snapshot) = &self.last_snapshot { + sink_success_count = + calculate_diff_if_some(&snapshot.sink_successes, &self.initial_sink_success_count); + sink_failure_count = + calculate_diff_if_some(&snapshot.sink_failures, &self.initial_sink_failure_count); + sink_dropped_count = + calculate_diff_if_some(&snapshot.sink_dropped, &self.initial_sink_dropped_count); + sink_bytes_written = calculate_diff_if_some( + &snapshot.sink_bytes_written, + &self.initial_sink_bytes_written, + ); + re_upload_bytes = calculate_diff_if_some( + &Some(snapshot.re_upload_bytes), + &self.initial_re_upload_bytes, + ); + re_download_bytes = calculate_diff_if_some( + &Some(snapshot.re_download_bytes), + &self.initial_re_download_bytes, + ); + zdb_download_queries = calculate_diff_if_some( + &Some(snapshot.zdb_download_queries), + &self.initial_zdb_download_queries, + ); + zdb_download_bytes = calculate_diff_if_some( + &Some(snapshot.zdb_download_bytes), + &self.initial_zdb_download_bytes, + ); + zdb_upload_queries = calculate_diff_if_some( + &Some(snapshot.zdb_upload_queries), + &self.initial_zdb_upload_queries, + ); + zdb_upload_bytes = calculate_diff_if_some( + &Some(snapshot.zdb_upload_bytes), + &self.initial_zdb_upload_bytes, + ); + zgateway_download_queries = calculate_diff_if_some( + &Some(snapshot.zgateway_download_queries), + &self.initial_zgateway_download_queries, + ); + zgateway_download_bytes = calculate_diff_if_some( + &Some(snapshot.zgateway_download_bytes), + &self.initial_zgateway_download_bytes, + ); + zgateway_upload_queries = calculate_diff_if_some( + &Some(snapshot.zgateway_upload_queries), + &self.initial_zgateway_upload_queries, + ); + zgateway_upload_bytes = calculate_diff_if_some( + &Some(snapshot.zgateway_upload_bytes), + &self.initial_zgateway_upload_bytes, + ); + manifold_download_queries = calculate_diff_if_some( + &Some(snapshot.manifold_download_queries), + &self.initial_manifold_download_queries, + ); + manifold_download_bytes = calculate_diff_if_some( + &Some(snapshot.manifold_download_bytes), + &self.initial_manifold_download_bytes, + ); + manifold_upload_queries = calculate_diff_if_some( + &Some(snapshot.manifold_upload_queries), + &self.initial_manifold_upload_queries, + ); + manifold_upload_bytes = calculate_diff_if_some( + &Some(snapshot.manifold_upload_bytes), + &self.initial_manifold_upload_bytes, + ); + hedwig_download_queries = calculate_diff_if_some( + &Some(snapshot.hedwig_download_queries), + &self.initial_hedwig_download_queries, + ); + hedwig_download_bytes = calculate_diff_if_some( + &Some(snapshot.hedwig_download_bytes), + &self.initial_hedwig_download_bytes, + ); + hedwig_upload_queries = calculate_diff_if_some( + &Some(snapshot.hedwig_upload_queries), + &self.initial_hedwig_upload_queries, + ); + hedwig_upload_bytes = calculate_diff_if_some( + &Some(snapshot.hedwig_upload_bytes), + &self.initial_hedwig_upload_bytes, ); - if let Some(path) = &self.write_to_path { - let res = (|| { - let out = fs_util::create_file(path).context("Error opening")?; - let mut out = std::io::BufWriter::new(out); - serde_json::to_writer(&mut out, event.event()).context("Error writing")?; - out.flush().context("Error flushing")?; - anyhow::Ok(()) - })(); - - if let Err(e) = &res { - tracing::warn!( - "Failed to write InvocationRecord to `{}`: {:#}", - path.as_path().display(), - e - ); - } + // We show memory/disk warnings in the console but we can't emit a tag event there due to having no access to dispatcher. + // Also, it suffices to only emit a single tag per invocation, not one tag each time memory pressure is exceeded. + // Each snapshot already keeps track of the peak memory/disk usage, so we can use that to check if we ever reported a warning. + if check_memory_pressure(Some(snapshot), &self.system_info).is_some() { + self.tags.push(MEMORY_PRESSURE_TAG.to_owned()); } - - if let Ok(Some(scribe_sink)) = - new_thrift_scribe_sink_if_enabled(self.fb, 1, Duration::from_millis(500), 5, None) - { - tracing::info!("Recording invocation to Scribe: {:?}", &event); - Some(async move { - scribe_sink.send_now(event).await; - }) - } else { - tracing::info!("Invocation record is not sent to Scribe: {:?}", &event); - None + if check_remaining_disk_space(Some(snapshot), &self.system_info).is_some() { + self.tags.push("low_disk_space".to_owned()); + } + if check_download_speed( + &self.first_snapshot, + self.last_snapshot.as_ref(), + &self.system_info, + self.re_avg_download_speed.avg_per_second(), + self.concurrent_commands, + ) { + self.tags.push("slow_network_speed".to_owned()); + } + if is_vpn_enabled() { + self.tags.push("vpn_enabled".to_owned()); + } + if check_cache_misses( + &ActionStats { + local_actions: self.run_local_count, + remote_actions: self.run_remote_count, + cached_actions: self.run_action_cache_count, + fallback_actions: self.run_fallback_count, + remote_dep_file_cached_actions: self.run_remote_dep_file_cache_count, + }, + &self.system_info, + self.min_build_count_since_rebase < 2, + None, + ) { + self.tags.push("low_cache_hits".to_owned()); } } - // Collects client-side state and data, suitable for telemetry. - // NOTE: If data is visible from the daemon, put it in cli::metadata::collect() - fn default_metadata() -> buck2_data::TypedMetadata { - let mut ints = HashMap::new(); - ints.insert("is_tty".to_owned(), std::io::stderr().is_tty() as i64); - buck2_data::TypedMetadata { - ints, - strings: HashMap::new(), + let mut metadata = Self::default_metadata(); + metadata.strings.extend(std::mem::take(&mut self.metadata)); + + let errors_report = self.finalize_errors(); + + let record = buck2_data::InvocationRecord { + command_name: Some(self.command_name.to_owned()), + command_end: self.command_end.take(), + command_duration: self.command_duration.take(), + client_walltime: self.start_time.elapsed().try_into().ok(), + re_session_id: self.re_session_id.take().unwrap_or_default(), + re_experiment_name: self.re_experiment_name.take().unwrap_or_default(), + cli_args: self.cli_args.clone(), + critical_path_duration: self.critical_path_duration.and_then(|x| x.try_into().ok()), + metadata: Some(metadata), + tags: self.tags.drain(..).collect(), + run_local_count: self.run_local_count, + run_remote_count: self.run_remote_count, + run_action_cache_count: self.run_action_cache_count, + run_remote_dep_file_cache_count: self.run_remote_dep_file_cache_count, + cache_hit_rate: total_cache_hit_rate( + self.run_local_count, + self.run_remote_count, + self.run_action_cache_count, + self.run_remote_dep_file_cache_count, + ) as f32, + run_skipped_count: self.run_skipped_count, + run_fallback_count: Some(self.run_fallback_count), + local_actions_executed_via_worker: Some(self.local_actions_executed_via_worker), + first_snapshot: self.first_snapshot.take(), + last_snapshot: self.last_snapshot.take(), + min_attempted_build_count_since_rebase: self.min_attempted_build_count_since_rebase, + min_build_count_since_rebase: self.min_build_count_since_rebase, + cache_upload_count: self.cache_upload_count, + cache_upload_attempt_count: self.cache_upload_attempt_count, + dep_file_upload_count: self.dep_file_upload_count, + dep_file_upload_attempt_count: self.dep_file_upload_attempt_count, + parsed_target_patterns: self.parsed_target_patterns.take(), + filesystem: std::mem::take(&mut self.filesystem), + watchman_version: self.watchman_version.take(), + eden_version: self.eden_version.take(), + test_info: self.test_info.take(), + eligible_for_full_hybrid: Some(self.eligible_for_full_hybrid), + max_event_client_delay_ms: self + .max_event_client_delay + .and_then(|d| u64::try_from(d.as_millis()).ok()), + max_malloc_bytes_active: self.max_malloc_bytes_active.take(), + max_malloc_bytes_allocated: self.max_malloc_bytes_allocated.take(), + run_command_failure_count: Some(self.run_command_failure_count), + event_count: Some(self.event_count), + time_to_first_action_execution_ms: self + .time_to_first_action_execution + .and_then(|d| u64::try_from(d.as_millis()).ok()), + materialization_output_size: Some(self.materialization_output_size), + initial_materializer_entries_from_sqlite: self.initial_materializer_entries_from_sqlite, + time_to_command_start_ms: self + .time_to_command_start + .and_then(|d| u64::try_from(d.as_millis()).ok()), + time_to_command_critical_section_ms: self + .time_to_command_critical_section + .and_then(|d| u64::try_from(d.as_millis()).ok()), + time_to_first_analysis_ms: self + .time_to_first_analysis + .and_then(|d| u64::try_from(d.as_millis()).ok()), + time_to_load_first_build_file_ms: self + .time_to_load_first_build_file + .and_then(|d| u64::try_from(d.as_millis()).ok()), + time_to_first_command_execution_start_ms: self + .time_to_first_command_execution_start + .and_then(|d| u64::try_from(d.as_millis()).ok()), + time_to_first_test_discovery_ms: self + .time_to_first_test_discovery + .and_then(|d| u64::try_from(d.as_millis()).ok()), + system_total_memory_bytes: self.system_info.system_total_memory_bytes, + file_watcher_stats: self.file_watcher_stats.take(), + file_watcher_duration_ms: self + .file_watcher_duration + .and_then(|d| u64::try_from(d.as_millis()).ok()), + time_to_last_action_execution_end_ms: self + .time_to_last_action_execution_end + .and_then(|d| u64::try_from(d.as_millis()).ok()), + isolation_dir: Some(self.isolation_dir.clone()), + sink_success_count, + sink_failure_count, + sink_dropped_count, + sink_bytes_written, + sink_max_buffer_depth: Some(self.sink_max_buffer_depth), + soft_error_categories: std::mem::take(&mut self.soft_error_categories) + .into_iter() + .collect(), + concurrent_command_blocking_duration: self + .concurrent_command_blocking_duration + .and_then(|x| x.try_into().ok()), + analysis_count: Some(self.analysis_count), + restarted_trace_id: self.restarted_trace_id.as_ref().map(|t| t.to_string()), + has_command_result: Some(self.has_command_result), + has_end_of_stream: Some(self.has_end_of_stream), + // At this point we expect the event log writer to have finished + compressed_event_log_size_bytes: Some( + self.compressed_event_log_size_bytes + .as_ref() + .map(|x| x.load(Ordering::Relaxed)) + .unwrap_or_default(), + ), + critical_path_backend: self.critical_path_backend.take(), + instant_command_is_success: self.instant_command_is_success.take(), + bxl_ensure_artifacts_duration: self.bxl_ensure_artifacts_duration.take(), + re_upload_bytes, + re_download_bytes, + concurrent_command_ids: std::mem::take(&mut self.concurrent_command_ids) + .into_iter() + .collect(), + daemon_connection_failure: Some(self.daemon_connection_failure), + daemon_was_started: self.daemon_was_started.map(|t| t as i32), + client_metadata: std::mem::take(&mut self.client_metadata), + errors: errors_report.errors, + best_error_tag: errors_report.best_error_tag, + best_error_category_key: errors_report.best_error_category_key, + error_category: errors_report.error_category, + target_rule_type_names: std::mem::take(&mut self.target_rule_type_names), + new_configs_used: Some( + self.has_new_buckconfigs || self.buckconfig_diff_size.map_or(false, |s| s > 0), + ), + re_max_download_speed: self + .re_max_download_speeds + .iter() + .map(|w| w.max_per_second().unwrap_or_default()) + .max(), + re_max_upload_speed: self + .re_max_upload_speeds + .iter() + .map(|w| w.max_per_second().unwrap_or_default()) + .max(), + re_avg_download_speed: self.re_avg_download_speed.avg_per_second(), + re_avg_upload_speed: self.re_avg_upload_speed.avg_per_second(), + install_duration: self.install_duration.take(), + install_device_metadata: self.install_device_metadata.drain(..).collect(), + peak_process_memory_bytes: self.peak_process_memory_bytes.take(), + buckconfig_diff_count: self.buckconfig_diff_count.take(), + buckconfig_diff_size: self.buckconfig_diff_size.take(), + event_log_manifold_ttl_s: manifold_event_log_ttl().ok().map(|t| t.as_secs()), + total_disk_space_bytes: self.system_info.total_disk_space_bytes.take(), + peak_used_disk_space_bytes: self.peak_used_disk_space_bytes.take(), + zdb_download_queries, + zdb_download_bytes, + zdb_upload_queries, + zdb_upload_bytes, + zgateway_download_queries, + zgateway_download_bytes, + zgateway_upload_queries, + zgateway_upload_bytes, + manifold_download_queries, + manifold_download_bytes, + manifold_upload_queries, + manifold_upload_bytes, + hedwig_download_queries, + hedwig_download_bytes, + hedwig_upload_queries, + hedwig_upload_bytes, + active_networks_kinds: std::mem::take(&mut self.active_networks_kinds) + .into_iter() + .collect(), + target_cfg: self.target_cfg.take(), + version_control_revision: self.version_control_revision.take(), + }; + + let event = BuckEvent::new( + SystemTime::now(), + self.trace_id.dupe(), + None, + None, + buck2_data::RecordEvent { + data: Some((Box::new(record)).into()), + } + .into(), + ); + + if let Some(path) = &self.write_to_path { + let res = (|| { + let out = fs_util::create_file(path).context("Error opening")?; + let mut out = std::io::BufWriter::new(out); + serde_json::to_writer(&mut out, event.event()).context("Error writing")?; + out.flush().context("Error flushing")?; + anyhow::Ok(()) + })(); + + if let Err(e) = &res { + tracing::warn!( + "Failed to write InvocationRecord to `{}`: {:#}", + path.as_path().display(), + e + ); } } - fn handle_command_start( - &mut self, - command: &buck2_data::CommandStart, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.metadata.extend(command.metadata.clone()); - self.time_to_command_start = Some(self.start_time.elapsed()); - Ok(()) + if let Ok(Some(scribe_sink)) = + new_remote_event_sink_if_enabled(self.fb, 1, Duration::from_millis(500), 5, None) + { + tracing::info!("Recording invocation to Scribe: {:?}", &event); + Some(async move { + scribe_sink.send_now(event).await; + }) + } else { + tracing::info!("Invocation record is not sent to Scribe: {:?}", &event); + None } + } - async fn handle_command_end( - &mut self, - command: &buck2_data::CommandEnd, - event: &BuckEvent, - ) -> anyhow::Result<()> { - let mut command = command.clone(); - self.error_messages - .extend(std::mem::take(&mut command.error_messages)); - - // Awkwardly unpacks the SpanEnd event so we can read its duration. - let command_end = match event.data() { - buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), - _ => { - return Err(anyhow::anyhow!( - "handle_command_end was passed a CommandEnd not contained in a SpanEndEvent" - )); - } - }; - self.command_duration = command_end.duration; - let command_data = command.data.as_ref().context("Missing command data")?; - self.min_build_count_since_rebase = match command_data { - buck2_data::command_end::Data::Build(..) - | buck2_data::command_end::Data::Test(..) - | buck2_data::command_end::Data::Install(..) => { - self.build_count(command.is_success, command_data.variant_name()) - .await? - } - // other events don't count builds - _ => 0, - }; - self.command_end = Some(command); - Ok(()) - } - fn handle_command_critical_start( - &mut self, - command: &buck2_data::CommandCriticalStart, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.metadata.extend(command.metadata.clone()); - self.time_to_command_critical_section = Some(self.start_time.elapsed()); - Ok(()) - } - fn handle_command_critical_end( - &mut self, - command: &buck2_data::CommandCriticalEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.metadata.extend(command.metadata.clone()); - Ok(()) + // Collects client-side state and data, suitable for telemetry. + // NOTE: If data is visible from the daemon, put it in cli::metadata::collect() + fn default_metadata() -> buck2_data::TypedMetadata { + let mut ints = HashMap::new(); + ints.insert("is_tty".to_owned(), std::io::stderr().is_tty() as i64); + buck2_data::TypedMetadata { + ints, + strings: HashMap::new(), } + } - fn handle_action_execution_start( - &mut self, - _action: &buck2_data::ActionExecutionStart, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - if self.time_to_first_action_execution.is_none() { - self.time_to_first_action_execution = Some(self.start_time.elapsed()); - } - Ok(()) + // Store the "client" field in the metadata for telemetry + pub fn update_metadata_from_client_metadata(&mut self, client_metadata: &[ClientMetadata]) { + if let Some(client_id_from_client_metadata) = client_metadata + .iter() + .find(|m| m.key == "id") + .map(|m| m.value.clone()) + { + self.metadata.insert( + "client".to_owned(), + client_id_from_client_metadata.to_owned(), + ); } - fn handle_action_execution_end( - &mut self, - action: &buck2_data::ActionExecutionEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - if action.kind == buck2_data::ActionKind::Run as i32 { - if action_stats::was_fallback_action(action) { - self.run_fallback_count += 1; - } + } - match last_command_execution_kind::get_last_command_execution_kind(action) { - LastCommandExecutionKind::Local => { - self.run_local_count += 1; - } - LastCommandExecutionKind::LocalWorker => { - self.run_local_count += 1; - self.local_actions_executed_via_worker += 1; - } - LastCommandExecutionKind::Cached => { - self.run_action_cache_count += 1; - } - LastCommandExecutionKind::RemoteDepFileCached => { - self.run_remote_dep_file_cache_count += 1; - } - LastCommandExecutionKind::Remote => { - self.run_remote_count += 1; - } - LastCommandExecutionKind::NoCommand => { - self.run_skipped_count += 1; + fn handle_command_start( + &mut self, + command: &buck2_data::CommandStart, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.metadata.extend(command.metadata.clone()); + self.time_to_command_start = Some(self.start_time.elapsed()); + Ok(()) + } + + async fn handle_command_end( + &mut self, + command: &buck2_data::CommandEnd, + event: &BuckEvent, + ) -> anyhow::Result<()> { + let mut command = command.clone(); + self.command_errors + .extend(std::mem::take(&mut command.errors)); + + // Awkwardly unpacks the SpanEnd event so we can read its duration. + let command_end = match event.data() { + buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), + _ => { + return Err(anyhow::anyhow!( + "handle_command_end was passed a CommandEnd not contained in a SpanEndEvent" + )); + } + }; + self.command_duration = command_end.duration; + let command_data = command.data.as_ref().context("Missing command data")?; + let build_count = match command_data { + buck2_data::command_end::Data::Build(..) + | buck2_data::command_end::Data::Test(..) + | buck2_data::command_end::Data::Install(..) => { + match self + .build_count(command.is_success, command_data.variant_name()) + .await + { + Ok(Some(build_count)) => build_count, + Ok(None) => Default::default(), + Err(e) => { + let _ignored = soft_error!("build_count_error", e.into()); + Default::default() } } } + // other events don't count builds + _ => Default::default(), + }; + self.min_attempted_build_count_since_rebase = build_count.attempted_build_count; + self.min_build_count_since_rebase = build_count.successful_build_count; - if action.eligible_for_full_hybrid.unwrap_or_default() { - self.eligible_for_full_hybrid = true; - } + self.command_end = Some(command); + Ok(()) + } + fn handle_command_critical_start( + &mut self, + command: &buck2_data::CommandCriticalStart, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.metadata.extend(command.metadata.clone()); + self.time_to_command_critical_section = Some(self.start_time.elapsed()); + Ok(()) + } + fn handle_command_critical_end( + &mut self, + command: &buck2_data::CommandCriticalEnd, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.metadata.extend(command.metadata.clone()); + Ok(()) + } - if action.commands.iter().any(|c| { - matches!( - c.status, - Some(buck2_data::command_execution::Status::Failure(..)) - ) - }) { - self.run_command_failure_count += 1; + fn handle_action_execution_start( + &mut self, + _action: &buck2_data::ActionExecutionStart, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + if self.time_to_first_action_execution.is_none() { + self.time_to_first_action_execution = Some(self.start_time.elapsed()); + } + Ok(()) + } + fn handle_action_execution_end( + &mut self, + action: &buck2_data::ActionExecutionEnd, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + if action.kind == buck2_data::ActionKind::Run as i32 { + if action_stats::was_fallback_action(action) { + self.run_fallback_count += 1; } - self.time_to_last_action_execution_end = Some(self.start_time.elapsed()); - - Ok(()) + match last_command_execution_kind::get_last_command_execution_kind(action) { + LastCommandExecutionKind::Local => { + self.run_local_count += 1; + } + LastCommandExecutionKind::LocalWorker => { + self.run_local_count += 1; + self.local_actions_executed_via_worker += 1; + } + LastCommandExecutionKind::Cached => { + self.run_action_cache_count += 1; + } + LastCommandExecutionKind::RemoteDepFileCached => { + self.run_remote_dep_file_cache_count += 1; + } + LastCommandExecutionKind::Remote => { + self.run_remote_count += 1; + } + LastCommandExecutionKind::NoCommand => { + self.run_skipped_count += 1; + } + } } - fn handle_analysis_start( - &mut self, - _analysis: &buck2_data::AnalysisStart, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.time_to_first_analysis - .get_or_insert_with(|| self.start_time.elapsed()); - Ok(()) + if action.eligible_for_full_hybrid.unwrap_or_default() { + self.eligible_for_full_hybrid = true; } - fn handle_load_start( - &mut self, - _eval: &buck2_data::LoadBuildFileStart, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.time_to_load_first_build_file - .get_or_insert_with(|| self.start_time.elapsed()); - Ok(()) + if action.commands.iter().any(|c| { + matches!( + c.status, + Some(buck2_data::command_execution::Status::Failure(..)) + ) + }) { + self.run_command_failure_count += 1; } - fn handle_executor_stage_start( - &mut self, - executor_stage: &buck2_data::ExecutorStageStart, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - match &executor_stage.stage { - Some(buck2_data::executor_stage_start::Stage::Re(re_stage)) => { - match &re_stage.stage { - Some(buck2_data::re_stage::Stage::Execute(_)) => { - self.time_to_first_command_execution_start - .get_or_insert_with(|| self.start_time.elapsed()); - } - _ => {} - } + self.time_to_last_action_execution_end = Some(self.start_time.elapsed()); + + Ok(()) + } + + fn handle_analysis_start( + &mut self, + _analysis: &buck2_data::AnalysisStart, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.time_to_first_analysis + .get_or_insert_with(|| self.start_time.elapsed()); + Ok(()) + } + + fn handle_load_start( + &mut self, + _eval: &buck2_data::LoadBuildFileStart, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.time_to_load_first_build_file + .get_or_insert_with(|| self.start_time.elapsed()); + Ok(()) + } + + fn handle_executor_stage_start( + &mut self, + executor_stage: &buck2_data::ExecutorStageStart, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + match &executor_stage.stage { + Some(buck2_data::executor_stage_start::Stage::Re(re_stage)) => match &re_stage.stage { + Some(buck2_data::re_stage::Stage::Execute(_)) => { + self.time_to_first_command_execution_start + .get_or_insert_with(|| self.start_time.elapsed()); } - Some(buck2_data::executor_stage_start::Stage::Local(local_stage)) => { - match &local_stage.stage { - Some(buck2_data::local_stage::Stage::Execute(_)) => { - self.time_to_first_command_execution_start - .get_or_insert_with(|| self.start_time.elapsed()); - } - _ => {} + _ => {} + }, + Some(buck2_data::executor_stage_start::Stage::Local(local_stage)) => { + match &local_stage.stage { + Some(buck2_data::local_stage::Stage::Execute(_)) => { + self.time_to_first_command_execution_start + .get_or_insert_with(|| self.start_time.elapsed()); } + _ => {} } - _ => {} } - Ok(()) - } - - fn handle_cache_upload_end( - &mut self, - cache_upload: &buck2_data::CacheUploadEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - if cache_upload.success { - self.cache_upload_count += 1; - } - self.cache_upload_attempt_count += 1; - Ok(()) + _ => {} } + Ok(()) + } - fn handle_re_session_created( - &mut self, - session: &buck2_data::RemoteExecutionSessionCreated, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.re_session_id = Some(session.session_id.clone()); - self.re_experiment_name = Some(session.experiment_name.clone()); - Ok(()) + fn handle_cache_upload_end( + &mut self, + cache_upload: &buck2_data::CacheUploadEnd, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + if cache_upload.success { + self.cache_upload_count += 1; } + self.cache_upload_attempt_count += 1; + Ok(()) + } - fn handle_materialization_end( - &mut self, - materialization: &buck2_data::MaterializationEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.materialization_output_size += materialization.total_bytes; - Ok(()) + fn handle_dep_file_upload_end( + &mut self, + upload: &buck2_data::DepFileUploadEnd, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + if upload.success { + self.dep_file_upload_count += 1; } + self.dep_file_upload_attempt_count += 1; + Ok(()) + } - fn handle_materializer_state_info( - &mut self, - materializer_state_info: &buck2_data::MaterializerStateInfo, - ) -> anyhow::Result<()> { - self.initial_materializer_entries_from_sqlite = - Some(materializer_state_info.num_entries_from_sqlite); - Ok(()) - } + fn handle_re_session_created( + &mut self, + session: &buck2_data::RemoteExecutionSessionCreated, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.re_session_id = Some(session.session_id.clone()); + self.re_experiment_name = Some(session.experiment_name.clone()); + Ok(()) + } - fn handle_bxl_ensure_artifacts_end( - &mut self, - _bxl_ensure_artifacts_end: &buck2_data::BxlEnsureArtifactsEnd, - event: &BuckEvent, - ) -> anyhow::Result<()> { - let bxl_ensure_artifacts_end = match event.data() { - buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), - _ => { - return Err(anyhow::anyhow!( - "handle_bxl_ensure_artifacts_end was passed a BxlEnsureArtifacts not contained in a SpanEndEvent" - )); - } - }; + fn handle_materialization_end( + &mut self, + materialization: &buck2_data::MaterializationEnd, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.materialization_output_size += materialization.total_bytes; + Ok(()) + } - self.bxl_ensure_artifacts_duration = bxl_ensure_artifacts_end.duration; - Ok(()) - } + fn handle_materializer_state_info( + &mut self, + materializer_state_info: &buck2_data::MaterializerStateInfo, + ) -> anyhow::Result<()> { + self.initial_materializer_entries_from_sqlite = + Some(materializer_state_info.num_entries_from_sqlite); + Ok(()) + } - fn handle_test_discovery( - &mut self, - test_info: &buck2_data::TestDiscovery, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - match &test_info.data { - Some(buck2_data::test_discovery::Data::Session(session_info)) => { - self.test_info = Some(session_info.info.clone()); - } - Some(buck2_data::test_discovery::Data::Tests(..)) | None => {} + fn handle_bxl_ensure_artifacts_end( + &mut self, + _bxl_ensure_artifacts_end: &buck2_data::BxlEnsureArtifactsEnd, + event: &BuckEvent, + ) -> anyhow::Result<()> { + let bxl_ensure_artifacts_end = match event.data() { + buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), + _ => { + return Err(anyhow::anyhow!( + "handle_bxl_ensure_artifacts_end was passed a BxlEnsureArtifacts not contained in a SpanEndEvent" + )); } + }; + + self.bxl_ensure_artifacts_duration = bxl_ensure_artifacts_end.duration; + Ok(()) + } + + fn handle_install_finished( + &mut self, + install_finished: &buck2_data::InstallFinished, + ) -> anyhow::Result<()> { + self.install_duration = install_finished.duration.clone(); + self.install_device_metadata = install_finished.device_metadata.clone(); + Ok(()) + } + + fn handle_system_info(&mut self, system_info: &buck2_data::SystemInfo) -> anyhow::Result<()> { + self.system_info = system_info.clone(); + Ok(()) + } - Ok(()) + fn handle_test_discovery( + &mut self, + test_info: &buck2_data::TestDiscovery, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + match &test_info.data { + Some(buck2_data::test_discovery::Data::Session(session_info)) => { + self.test_info = Some(session_info.info.clone()); + } + Some(buck2_data::test_discovery::Data::Tests(..)) | None => {} } - fn handle_build_graph_info( - &mut self, - info: &buck2_data::BuildGraphExecutionInfo, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - let mut duration = Duration::default(); + Ok(()) + } - for node in &info.critical_path { - if let Some(d) = &node.duration { - duration += d.try_into_duration()?; - } + fn handle_test_discovery_start( + &mut self, + _test_discovery: &buck2_data::TestDiscoveryStart, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + self.time_to_first_test_discovery + .get_or_insert_with(|| self.start_time.elapsed()); + Ok(()) + } + + fn handle_build_graph_info( + &mut self, + info: &buck2_data::BuildGraphExecutionInfo, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + let mut duration = Duration::default(); + + for node in &info.critical_path { + if let Some(d) = &node.duration { + duration += d.try_into_duration()?; } + } - for node in &info.critical_path2 { - if let Some(d) = &node.duration { - duration += d.try_into_duration()?; - } + for node in &info.critical_path2 { + if let Some(d) = &node.duration { + duration += d.try_into_duration()?; } + } - self.critical_path_duration = Some(duration); - self.critical_path_backend = info.backend_name.clone(); - Ok(()) + self.critical_path_duration = Some(duration); + self.critical_path_backend = info.backend_name.clone(); + Ok(()) + } + + fn handle_io_provider_info( + &mut self, + io_provider_info: &buck2_data::IoProviderInfo, + ) -> anyhow::Result<()> { + self.eden_version = io_provider_info.eden_version.to_owned(); + Ok(()) + } + + fn handle_tag(&mut self, tag: &buck2_data::TagEvent) -> anyhow::Result<()> { + self.tags.extend(tag.tags.iter().cloned()); + Ok(()) + } + + fn handle_concurrent_commands( + &mut self, + concurrent_commands: &buck2_data::ConcurrentCommands, + ) -> anyhow::Result<()> { + concurrent_commands.trace_ids.iter().for_each(|c| { + self.concurrent_command_ids.insert(c.clone()); + }); + self.concurrent_commands = + self.concurrent_commands || concurrent_commands.trace_ids.len() > 1; + Ok(()) + } + + fn handle_snapshot( + &mut self, + update: &buck2_data::Snapshot, + event: &BuckEvent, + ) -> anyhow::Result<()> { + self.max_malloc_bytes_active = + max(self.max_malloc_bytes_active, update.malloc_bytes_active); + self.max_malloc_bytes_allocated = max( + self.max_malloc_bytes_allocated, + update.malloc_bytes_allocated, + ); + if self.first_snapshot.is_none() { + self.first_snapshot = Some(update.clone()); + } else { + self.last_snapshot = Some(update.clone()); + } + if self.initial_sink_success_count.is_none() { + self.initial_sink_success_count = update.sink_successes; } + if self.initial_sink_failure_count.is_none() { + self.initial_sink_failure_count = update.sink_failures; + } + if self.initial_sink_dropped_count.is_none() { + self.initial_sink_dropped_count = update.sink_dropped; + } + if self.initial_sink_bytes_written.is_none() { + self.initial_sink_bytes_written = update.sink_bytes_written; + } + self.sink_max_buffer_depth = max(self.sink_max_buffer_depth, update.sink_buffer_depth()); - fn handle_io_provider_info( - &mut self, - io_provider_info: &buck2_data::IoProviderInfo, - ) -> anyhow::Result<()> { - self.eden_version = io_provider_info.eden_version.to_owned(); - Ok(()) + if self.initial_re_upload_bytes.is_none() { + self.initial_re_upload_bytes = Some(update.re_upload_bytes); + } + if self.initial_re_download_bytes.is_none() { + self.initial_re_download_bytes = Some(update.re_download_bytes); } - fn handle_exit_when_different_state( - &mut self, - _exit_when_different_state: &buck2_data::ExitWhenDifferentState, - ) -> anyhow::Result<()> { - self.exit_when_different_state = true; - Ok(()) + if self.initial_zdb_download_queries.is_none() { + self.initial_zdb_download_queries = Some(update.zdb_download_queries); + } + if self.initial_zdb_download_bytes.is_none() { + self.initial_zdb_download_bytes = Some(update.zdb_download_bytes); + } + if self.initial_zdb_upload_queries.is_none() { + self.initial_zdb_upload_queries = Some(update.zdb_upload_queries); + } + if self.initial_zdb_upload_bytes.is_none() { + self.initial_zdb_upload_bytes = Some(update.zdb_upload_bytes); } - fn handle_tag(&mut self, tag: &buck2_data::TagEvent) -> anyhow::Result<()> { - self.tags.extend(tag.tags.iter().cloned()); - Ok(()) + if self.initial_zgateway_download_queries.is_none() { + self.initial_zgateway_download_queries = Some(update.zgateway_download_queries); + } + if self.initial_zgateway_download_bytes.is_none() { + self.initial_zgateway_download_bytes = Some(update.zgateway_download_bytes); + } + if self.initial_zgateway_upload_queries.is_none() { + self.initial_zgateway_upload_queries = Some(update.zgateway_upload_queries); + } + if self.initial_zgateway_upload_bytes.is_none() { + self.initial_zgateway_upload_bytes = Some(update.zgateway_upload_bytes); } - fn handle_concurrent_commands( - &mut self, - concurrent_commands: &buck2_data::ConcurrentCommands, - ) -> anyhow::Result<()> { - concurrent_commands.trace_ids.iter().for_each(|c| { - self.concurrent_command_ids.insert(c.clone()); - }); - Ok(()) + if self.initial_manifold_download_queries.is_none() { + self.initial_manifold_download_queries = Some(update.manifold_download_queries); + } + if self.initial_manifold_download_bytes.is_none() { + self.initial_manifold_download_bytes = Some(update.manifold_download_bytes); + } + if self.initial_manifold_upload_queries.is_none() { + self.initial_manifold_upload_queries = Some(update.manifold_upload_queries); + } + if self.initial_manifold_upload_bytes.is_none() { + self.initial_manifold_upload_bytes = Some(update.manifold_upload_bytes); } - fn handle_snapshot( - &mut self, - update: &buck2_data::Snapshot, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.max_malloc_bytes_active = - cmp::max(self.max_malloc_bytes_active, update.malloc_bytes_active); - self.max_malloc_bytes_allocated = cmp::max( - self.max_malloc_bytes_allocated, - update.malloc_bytes_allocated, - ); - if self.first_snapshot.is_none() { - self.first_snapshot = Some(update.clone()); - } else { - self.last_snapshot = Some(update.clone()); - } - if self.initial_sink_success_count.is_none() { - self.initial_sink_success_count = update.sink_successes; - } - if self.initial_sink_failure_count.is_none() { - self.initial_sink_failure_count = update.sink_failures; - } - if self.initial_sink_dropped_count.is_none() { - self.initial_sink_dropped_count = update.sink_dropped; - } - self.sink_max_buffer_depth = - cmp::max(self.sink_max_buffer_depth, update.sink_buffer_depth()); + if self.initial_hedwig_download_queries.is_none() { + self.initial_hedwig_download_queries = Some(update.hedwig_download_queries); + } + if self.initial_hedwig_download_bytes.is_none() { + self.initial_hedwig_download_bytes = Some(update.hedwig_download_bytes); + } + if self.initial_hedwig_upload_queries.is_none() { + self.initial_hedwig_upload_queries = Some(update.hedwig_upload_queries); + } + if self.initial_hedwig_upload_bytes.is_none() { + self.initial_hedwig_upload_bytes = Some(update.hedwig_upload_bytes); + } - if self.initial_re_upload_bytes.is_none() { - self.initial_re_upload_bytes = Some(update.re_upload_bytes); - } - if self.initial_re_download_bytes.is_none() { - self.initial_re_download_bytes = Some(update.re_download_bytes); - } + for s in self.re_max_download_speeds.iter_mut() { + s.update(event.timestamp(), update.re_download_bytes); + } - Ok(()) + for s in self.re_max_upload_speeds.iter_mut() { + s.update(event.timestamp(), update.re_upload_bytes); } - fn handle_file_watcher_end( - &mut self, - file_watcher: &buck2_data::FileWatcherEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - // We might receive this event twice, so ... deal with it by merging the two. - // See: https://fb.workplace.com/groups/buck2dev/permalink/3396726613948720/ - self.file_watcher_stats = merge_file_watcher_stats( - self.file_watcher_stats.take(), - file_watcher.stats.clone(), - ); + self.re_avg_download_speed + .update(event.timestamp(), update.re_download_bytes); + + self.re_avg_upload_speed + .update(event.timestamp(), update.re_upload_bytes); - if let Some(stats) = &file_watcher.stats { - self.watchman_version = stats.watchman_version.to_owned(); + self.peak_process_memory_bytes = + max(self.peak_process_memory_bytes, process_memory(update)); + self.peak_used_disk_space_bytes = + max(self.peak_process_memory_bytes, update.used_disk_space_bytes); + + for stat in update.network_interface_stats.values() { + if stat.rx_bytes > 0 || stat.tx_bytes > 0 { + self.active_networks_kinds.insert(stat.network_kind.into()); } - Ok(()) } - fn handle_parsed_target_patterns( - &mut self, - patterns: &buck2_data::ParsedTargetPatterns, - ) -> anyhow::Result<()> { - self.parsed_target_patterns = Some(patterns.clone()); - Ok(()) + Ok(()) + } + + fn handle_file_watcher_end( + &mut self, + file_watcher: &buck2_data::FileWatcherEnd, + duration: Option<&prost_types::Duration>, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + // We might receive this event twice, so ... deal with it by merging the two. + // See: https://fb.workplace.com/groups/buck2dev/permalink/3396726613948720/ + self.file_watcher_stats = + merge_file_watcher_stats(self.file_watcher_stats.take(), file_watcher.stats.clone()); + if let Some(duration) = duration.cloned().and_then(|x| Duration::try_from(x).ok()) { + *self.file_watcher_duration.get_or_insert_default() += duration; + } + if let Some(stats) = &file_watcher.stats { + self.watchman_version = stats.watchman_version.to_owned(); } + Ok(()) + } - fn handle_structured_error( - &mut self, - err: &buck2_data::StructuredError, - ) -> anyhow::Result<()> { - if let Some(soft_error_category) = err.soft_error_category.as_ref() { - self.soft_error_categories - .insert(soft_error_category.to_owned()); + fn handle_parsed_target_patterns( + &mut self, + patterns: &buck2_data::ParsedTargetPatterns, + ) -> anyhow::Result<()> { + self.parsed_target_patterns = Some(patterns.clone()); + Ok(()) + } - if err.daemon_in_memory_state_is_corrupted { - self.daemon_in_memory_state_is_corrupted = true; - } + fn handle_structured_error(&mut self, err: &buck2_data::StructuredError) -> anyhow::Result<()> { + if let Some(soft_error_category) = err.soft_error_category.as_ref() { + self.soft_error_categories + .insert(soft_error_category.to_owned()); - if err.daemon_materializer_state_is_corrupted { - self.daemon_materializer_state_is_corrupted = true; - } + if err.daemon_in_memory_state_is_corrupted { + self.daemon_in_memory_state_is_corrupted = true; } - Ok(()) + if err.daemon_materializer_state_is_corrupted { + self.daemon_materializer_state_is_corrupted = true; + } } - fn handle_dice_block_concurrent_command_end( - &mut self, - _command: &buck2_data::DiceBlockConcurrentCommandEnd, - event: &BuckEvent, - ) -> anyhow::Result<()> { - let block_concurrent_command = match event.data() { - buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), - _ => { - return Err(anyhow::anyhow!( - "handle_dice_block_concurrent_command_end was passed a DiceBlockConcurrentCommandEnd not contained in a SpanEndEvent" - )); - } - }; + Ok(()) + } - let mut duration = self - .concurrent_command_blocking_duration - .unwrap_or_default(); - if let Some(d) = &block_concurrent_command.duration { - duration += d.try_into_duration()?; + fn handle_dice_block_concurrent_command_end( + &mut self, + _command: &buck2_data::DiceBlockConcurrentCommandEnd, + event: &BuckEvent, + ) -> anyhow::Result<()> { + let block_concurrent_command = match event.data() { + buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), + _ => { + return Err(anyhow::anyhow!( + "handle_dice_block_concurrent_command_end was passed a DiceBlockConcurrentCommandEnd not contained in a SpanEndEvent" + )); } + }; - self.concurrent_command_blocking_duration = Some(duration); - - Ok(()) + let mut duration = self + .concurrent_command_blocking_duration + .unwrap_or_default(); + if let Some(d) = &block_concurrent_command.duration { + duration += d.try_into_duration()?; } - fn handle_dice_cleanup_end( - &mut self, - _command: &buck2_data::DiceCleanupEnd, - event: &BuckEvent, - ) -> anyhow::Result<()> { - let dice_cleanup_end = match event.data() { - buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), - _ => { - return Err(anyhow::anyhow!( - "handle_dice_cleanup_end was passed a DiceCleanupEnd not contained in a SpanEndEvent" - )); - } - }; + self.concurrent_command_blocking_duration = Some(duration); - let mut duration = self - .concurrent_command_blocking_duration - .unwrap_or_default(); - if let Some(d) = &dice_cleanup_end.duration { - duration += d.try_into_duration()?; - } + Ok(()) + } - self.concurrent_command_blocking_duration = Some(duration); + fn handle_dice_cleanup_end( + &mut self, + _command: &buck2_data::DiceCleanupEnd, + event: &BuckEvent, + ) -> anyhow::Result<()> { + let dice_cleanup_end = match event.data() { + buck2_data::buck_event::Data::SpanEnd(ref end) => end.clone(), + _ => { + return Err(anyhow::anyhow!( + "handle_dice_cleanup_end was passed a DiceCleanupEnd not contained in a SpanEndEvent" + )); + } + }; - Ok(()) + let mut duration = self + .concurrent_command_blocking_duration + .unwrap_or_default(); + if let Some(d) = &dice_cleanup_end.duration { + duration += d.try_into_duration()?; } - async fn handle_event(&mut self, event: &Arc) -> anyhow::Result<()> { - // TODO(nga): query now once in `EventsCtx`. - let now = SystemTime::now(); - if let Ok(delay) = now.duration_since(event.timestamp()) { - self.max_event_client_delay = Some(cmp::max( - self.max_event_client_delay.unwrap_or_default(), - delay, - )); - } - self.event_count += 1; + self.concurrent_command_blocking_duration = Some(duration); - match event.data() { - buck2_data::buck_event::Data::SpanStart(ref start) => { - match start.data.as_ref().context("Missing `start`")? { - buck2_data::span_start_event::Data::Command(command) => { - self.handle_command_start(command, event) - } - buck2_data::span_start_event::Data::CommandCritical(command) => { - self.handle_command_critical_start(command, event) - } - buck2_data::span_start_event::Data::ActionExecution(action) => { - self.handle_action_execution_start(action, event) - } - buck2_data::span_start_event::Data::Analysis(analysis) => { - self.handle_analysis_start(analysis, event) - } - buck2_data::span_start_event::Data::Load(eval) => { - self.handle_load_start(eval, event) - } - buck2_data::span_start_event::Data::ExecutorStage(stage) => { - self.handle_executor_stage_start(stage, event) - } - _ => Ok(()), + Ok(()) + } + + async fn handle_event(&mut self, event: &Arc) -> anyhow::Result<()> { + // TODO(nga): query now once in `EventsCtx`. + let now = SystemTime::now(); + if let Ok(delay) = now.duration_since(event.timestamp()) { + self.max_event_client_delay = + Some(max(self.max_event_client_delay.unwrap_or_default(), delay)); + } + self.event_count += 1; + + match event.data() { + buck2_data::buck_event::Data::SpanStart(ref start) => { + match start.data.as_ref().context("Missing `start`")? { + buck2_data::span_start_event::Data::Command(command) => { + self.handle_command_start(command, event) + } + buck2_data::span_start_event::Data::CommandCritical(command) => { + self.handle_command_critical_start(command, event) + } + buck2_data::span_start_event::Data::ActionExecution(action) => { + self.handle_action_execution_start(action, event) + } + buck2_data::span_start_event::Data::Analysis(analysis) => { + self.handle_analysis_start(analysis, event) } + buck2_data::span_start_event::Data::Load(eval) => { + self.handle_load_start(eval, event) + } + buck2_data::span_start_event::Data::ExecutorStage(stage) => { + self.handle_executor_stage_start(stage, event) + } + buck2_data::span_start_event::Data::TestDiscovery(test_discovery) => { + self.handle_test_discovery_start(test_discovery, event) + } + _ => Ok(()), } - buck2_data::buck_event::Data::SpanEnd(ref end) => { - match end.data.as_ref().context("Missing `end`")? { - buck2_data::span_end_event::Data::Command(command) => { - self.handle_command_end(command, event).await - } - buck2_data::span_end_event::Data::CommandCritical(command) => { - self.handle_command_critical_end(command, event) - } - buck2_data::span_end_event::Data::ActionExecution(action) => { - self.handle_action_execution_end(action, event) - } - buck2_data::span_end_event::Data::FileWatcher(file_watcher) => { - self.handle_file_watcher_end(file_watcher, event) - } - buck2_data::span_end_event::Data::CacheUpload(cache_upload) => { - self.handle_cache_upload_end(cache_upload, event) - } - buck2_data::span_end_event::Data::Materialization(materialization) => { - self.handle_materialization_end(materialization, event) - } - buck2_data::span_end_event::Data::Analysis(..) => { - self.analysis_count += 1; - Ok(()) - } - buck2_data::span_end_event::Data::DiceBlockConcurrentCommand( - block_concurrent_command, - ) => self.handle_dice_block_concurrent_command_end( - block_concurrent_command, - event, - ), - buck2_data::span_end_event::Data::DiceCleanup(dice_cleanup_end) => { - self.handle_dice_cleanup_end(dice_cleanup_end, event) - } - buck2_data::span_end_event::Data::BxlEnsureArtifacts( - _bxl_ensure_artifacts, - ) => self.handle_bxl_ensure_artifacts_end(_bxl_ensure_artifacts, event), - _ => Ok(()), + } + buck2_data::buck_event::Data::SpanEnd(ref end) => { + match end.data.as_ref().context("Missing `end`")? { + buck2_data::span_end_event::Data::Command(command) => { + self.handle_command_end(command, event).await + } + buck2_data::span_end_event::Data::CommandCritical(command) => { + self.handle_command_critical_end(command, event) + } + buck2_data::span_end_event::Data::ActionExecution(action) => { + self.handle_action_execution_end(action, event) + } + buck2_data::span_end_event::Data::FileWatcher(file_watcher) => { + self.handle_file_watcher_end(file_watcher, end.duration.as_ref(), event) + } + buck2_data::span_end_event::Data::CacheUpload(cache_upload) => { + self.handle_cache_upload_end(cache_upload, event) + } + buck2_data::span_end_event::Data::DepFileUpload(dep_file_upload) => { + self.handle_dep_file_upload_end(dep_file_upload, event) + } + buck2_data::span_end_event::Data::Materialization(materialization) => { + self.handle_materialization_end(materialization, event) + } + buck2_data::span_end_event::Data::Analysis(..) => { + self.analysis_count += 1; + Ok(()) + } + buck2_data::span_end_event::Data::DiceBlockConcurrentCommand( + block_concurrent_command, + ) => self + .handle_dice_block_concurrent_command_end(block_concurrent_command, event), + buck2_data::span_end_event::Data::DiceCleanup(dice_cleanup_end) => { + self.handle_dice_cleanup_end(dice_cleanup_end, event) } + buck2_data::span_end_event::Data::BxlEnsureArtifacts(_bxl_ensure_artifacts) => { + self.handle_bxl_ensure_artifacts_end(_bxl_ensure_artifacts, event) + } + _ => Ok(()), } - buck2_data::buck_event::Data::Instant(ref instant) => { - match instant.data.as_ref().context("Missing `data`")? { - buck2_data::instant_event::Data::ReSession(session) => { - self.handle_re_session_created(session, event) - } - buck2_data::instant_event::Data::BuildGraphInfo(info) => { - self.handle_build_graph_info(info, event) - } - buck2_data::instant_event::Data::TestDiscovery(discovery) => { - self.handle_test_discovery(discovery, event) - } - buck2_data::instant_event::Data::Snapshot(result) => { - self.handle_snapshot(result, event) - } - buck2_data::instant_event::Data::TagEvent(tag) => self.handle_tag(tag), - buck2_data::instant_event::Data::IoProviderInfo(io_provider_info) => { - self.handle_io_provider_info(io_provider_info) - } - buck2_data::instant_event::Data::TargetPatterns(tag) => { - self.handle_parsed_target_patterns(tag) - } - buck2_data::instant_event::Data::MaterializerStateInfo( - materializer_state, - ) => self.handle_materializer_state_info(materializer_state), - buck2_data::instant_event::Data::StructuredError(err) => { - self.handle_structured_error(err) - } - buck2_data::instant_event::Data::ExitWhenDifferentState( - exit_when_different_state, - ) => self.handle_exit_when_different_state(exit_when_different_state), - buck2_data::instant_event::Data::RestartConfiguration(conf) => { - self.enable_restarter = conf.enable_restarter; - Ok(()) + } + buck2_data::buck_event::Data::Instant(ref instant) => { + match instant.data.as_ref().context("Missing `data`")? { + buck2_data::instant_event::Data::ReSession(session) => { + self.handle_re_session_created(session, event) + } + buck2_data::instant_event::Data::BuildGraphInfo(info) => { + self.handle_build_graph_info(info, event) + } + buck2_data::instant_event::Data::TestDiscovery(discovery) => { + self.handle_test_discovery(discovery, event) + } + buck2_data::instant_event::Data::Snapshot(result) => { + self.handle_snapshot(result, event) + } + buck2_data::instant_event::Data::TagEvent(tag) => self.handle_tag(tag), + buck2_data::instant_event::Data::IoProviderInfo(io_provider_info) => { + self.handle_io_provider_info(io_provider_info) + } + buck2_data::instant_event::Data::TargetPatterns(tag) => { + self.handle_parsed_target_patterns(tag) + } + buck2_data::instant_event::Data::MaterializerStateInfo(materializer_state) => { + self.handle_materializer_state_info(materializer_state) + } + buck2_data::instant_event::Data::StructuredError(err) => { + self.handle_structured_error(err) + } + buck2_data::instant_event::Data::RestartConfiguration(conf) => { + self.enable_restarter = conf.enable_restarter; + Ok(()) + } + buck2_data::instant_event::Data::ConcurrentCommands(concurrent_commands) => { + self.handle_concurrent_commands(concurrent_commands) + } + buck2_data::instant_event::Data::CellConfigDiff(conf) => { + if conf.new_config_indicator_only { + self.has_new_buckconfigs = true; + return Ok(()); } - buck2_data::instant_event::Data::ConcurrentCommands( - concurrent_commands, - ) => self.handle_concurrent_commands(concurrent_commands), - _ => Ok(()), + self.buckconfig_diff_count = Some( + self.buckconfig_diff_count.unwrap_or_default() + conf.config_diff_count, + ); + self.buckconfig_diff_size = Some( + self.buckconfig_diff_size.unwrap_or_default() + conf.config_diff_size, + ); + Ok(()) + } + buck2_data::instant_event::Data::InstallFinished(install_finished) => { + self.handle_install_finished(install_finished) } + buck2_data::instant_event::Data::SystemInfo(system_info) => { + self.handle_system_info(system_info) + } + buck2_data::instant_event::Data::TargetCfg(target_cfg) => { + self.target_cfg = Some(target_cfg.clone()); + Ok(()) + } + buck2_data::instant_event::Data::VersionControlRevision(revision) => { + self.version_control_revision = Some(revision.clone()); + Ok(()) + } + _ => Ok(()), } - buck2_data::buck_event::Data::Record(_) => Ok(()), } + buck2_data::buck_event::Data::Record(_) => Ok(()), } } +} - impl<'a> Drop for InvocationRecorder<'a> { - fn drop(&mut self) { - if let Some(fut) = self.send_it() { - self.async_cleanup_context - .register("sending invocation to Scribe", fut.boxed()); - } - } +fn process_error_report(error: buck2_data::ErrorReport) -> buck2_data::ProcessedErrorReport { + let best_tag = best_tag(error.tags.iter().filter_map(|tag| + // This should never fail, but it is safer to just ignore incorrect integers. + ErrorTag::from_i32(*tag))) + .map(|t| t.as_str_name()) + .unwrap_or(ERROR_TAG_UNCLASSIFIED); + buck2_data::ProcessedErrorReport { + tier: error.tier, + message: error.message, + telemetry_message: error.telemetry_message, + source_location: error.source_location, + tags: error + .tags + .iter() + .copied() + .filter_map(buck2_data::error::ErrorTag::from_i32) + .map(|t| t.as_str_name().to_owned()) + .collect(), + best_tag: Some(best_tag.to_owned()), + sub_error_categories: error.sub_error_categories, + category_key: error.category_key, } +} - #[async_trait] - impl<'a> EventSubscriber for InvocationRecorder<'a> { - async fn handle_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { - for event in events { - self.handle_event(event).await?; - } - Ok(()) +impl<'a> Drop for InvocationRecorder<'a> { + fn drop(&mut self) { + if let Some(fut) = self.send_it() { + self.async_cleanup_context + .register("sending invocation to Scribe", fut.boxed()); } + } +} - async fn handle_console_interaction(&mut self, _c: char) -> anyhow::Result<()> { - self.tags.push("console-interaction".to_owned()); - Ok(()) +#[async_trait] +impl<'a> EventSubscriber for InvocationRecorder<'a> { + async fn handle_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { + for event in events { + self.handle_event(event).await?; } + Ok(()) + } - async fn handle_command_result( - &mut self, - _result: &buck2_cli_proto::CommandResult, - ) -> anyhow::Result<()> { - self.has_command_result = true; - Ok(()) + async fn handle_console_interaction( + &mut self, + c: &Option, + ) -> anyhow::Result<()> { + match c { + Some(c) => self + .tags + .push(format!("superconsole-toggle:{}", c.key()).to_owned()), + None => {} } + Ok(()) + } - async fn exit(&mut self) -> anyhow::Result<()> { - self.has_end_of_stream = true; - Ok(()) + async fn handle_command_result( + &mut self, + result: &buck2_cli_proto::CommandResult, + ) -> anyhow::Result<()> { + self.has_command_result = true; + match &result.result { + Some(command_result::Result::BuildResponse(res)) => { + let mut built_rule_type_names: Vec = res + .build_targets + .iter() + .map(|t| { + t.target_rule_type_name + .clone() + .unwrap_or_else(|| "NULL".to_owned()) + }) + .unique_by(|x| x.clone()) + .collect(); + built_rule_type_names.sort(); + self.target_rule_type_names = built_rule_type_names; + } + _ => {} } + Ok(()) + } + + async fn handle_error(&mut self, error: &buck2_error::Error) -> anyhow::Result<()> { + self.client_errors.push(error.clone()); + Ok(()) + } - fn as_error_observer(&self) -> Option<&dyn ErrorObserver> { - Some(self) + async fn handle_tailer_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { + if self.server_stderr.len() > 100_000 { + // Proper truncation of the head is tricky, and for practical purposes + // discarding the whole thing is fine. + self.server_stderr.clear(); } - fn handle_daemon_connection_failure(&mut self) { - self.daemon_connection_failure = true; + if !stderr.is_empty() { + // We don't know yet whether we will need stderr or not, + // so we capture it unconditionally. + self.server_stderr.push_str(stderr); + self.server_stderr.push('\n'); } + + Ok(()) } - impl<'a> ErrorObserver for InvocationRecorder<'a> { - fn error_cause(&self) -> ErrorCause { - if self.exit_when_different_state { - // User wants to immediately exit concurrent commands with different states - return ErrorCause::DaemonIsBusy; - } else if self.run_command_failure_count > 0 { - // Action fails likely because of user-defined commands in the action - return ErrorCause::User; - } + async fn exit(&mut self) -> anyhow::Result<()> { + self.has_end_of_stream = true; + Ok(()) + } - ErrorCause::Unknown - } + fn as_error_observer(&self) -> Option<&dyn ErrorObserver> { + Some(self) + } - fn daemon_in_memory_state_is_corrupted(&self) -> bool { - self.daemon_in_memory_state_is_corrupted - } + fn handle_daemon_connection_failure(&mut self, error: &buck2_error::Error) { + self.daemon_connection_failure = true; + self.client_errors.push(error.clone()); + } - fn daemon_materializer_state_is_corrupted(&self) -> bool { - self.daemon_materializer_state_is_corrupted - } + fn handle_daemon_started(&mut self, daemon_was_started: buck2_data::DaemonWasStartedReason) { + self.daemon_was_started = Some(daemon_was_started); + } +} - fn restarter_is_enabled(&self) -> bool { - self.enable_restarter - } +impl<'a> ErrorObserver for InvocationRecorder<'a> { + fn daemon_in_memory_state_is_corrupted(&self) -> bool { + self.daemon_in_memory_state_is_corrupted } - fn calculate_diff_if_some(a: &Option, b: &Option) -> Option { - match (a, b) { - (Some(av), Some(bv)) => Some(std::cmp::max(av, bv) - std::cmp::min(av, bv)), - _ => None, - } + fn daemon_materializer_state_is_corrupted(&self) -> bool { + self.daemon_materializer_state_is_corrupted } - fn merge_file_watcher_stats( - a: Option, - b: Option, - ) -> Option { - let (mut a, b) = match (a, b) { - (Some(a), Some(b)) => (a, b), - (a, None) => return a, - (None, b) => return b, - }; + fn restarter_is_enabled(&self) -> bool { + self.enable_restarter + } +} - a.fresh_instance = a.fresh_instance || b.fresh_instance; - a.events_total += b.events_total; - a.events_processed += b.events_processed; - a.branched_from_revision = a.branched_from_revision.or(b.branched_from_revision); - a.events.extend(b.events); - a.incomplete_events_reason = a.incomplete_events_reason.or(b.incomplete_events_reason); - a.watchman_version = a.watchman_version.or(b.watchman_version); - Some(a) +fn calculate_diff_if_some(a: &Option, b: &Option) -> Option { + match (a, b) { + (Some(av), Some(bv)) => Some(max(av, bv) - min(av, bv)), + _ => None, } } -pub fn try_get_invocation_recorder<'a>( +fn merge_file_watcher_stats( + a: Option, + b: Option, +) -> Option { + let (mut a, b) = match (a, b) { + (Some(a), Some(b)) => (a, b), + (a, None) => return a, + (None, b) => return b, + }; + + a.fresh_instance = a.fresh_instance || b.fresh_instance; + a.events_total += b.events_total; + a.events_processed += b.events_processed; + a.branched_from_revision = a.branched_from_revision.or(b.branched_from_revision); + a.branched_from_global_rev = a.branched_from_global_rev.or(b.branched_from_global_rev); + a.events.extend(b.events); + a.incomplete_events_reason = a.incomplete_events_reason.or(b.incomplete_events_reason); + a.watchman_version = a.watchman_version.or(b.watchman_version); + Some(a) +} + +pub(crate) fn try_get_invocation_recorder<'a>( ctx: &ClientCommandContext<'a>, - opts: &CommonDaemonCommandOptions, + opts: &CommonEventLogOptions, command_name: &'static str, sanitized_argv: Vec, log_size_counter_bytes: Option>, -) -> anyhow::Result>> { +) -> anyhow::Result>> { let write_to_path = opts .unstable_write_invocation_record .as_ref() .map(|path| path.resolve(&ctx.working_dir)); + let paths = ctx.maybe_paths()?; + let filesystem; - #[cfg(any(fbcode_build, cargo_internal_build))] + #[cfg(fbcode_build)] { - let root = std::path::Path::to_owned(ctx.paths()?.project_root().root().to_buf().as_ref()); - if detect_eden::is_eden(root).unwrap_or(false) { + let is_eden = paths.map_or(false, |paths| { + let root = std::path::Path::to_owned(paths.project_root().root().to_buf().as_ref()); + detect_eden::is_eden(root).unwrap_or(false) + }); + if is_eden { filesystem = "eden".to_owned(); } else { filesystem = "default".to_owned(); } } - #[cfg(not(any(fbcode_build, cargo_internal_build)))] + #[cfg(not(fbcode_build))] { filesystem = "default".to_owned(); } - let recorder = imp::InvocationRecorder::new( + let build_count = paths.map(|p| BuildCountManager::new(p.build_count_dir())); + + let recorder = InvocationRecorder::new( ctx.fbinit(), ctx.async_cleanup_context().dupe(), write_to_path, command_name, sanitized_argv, ctx.trace_id.dupe(), - ctx.paths()?.isolation.as_str().to_owned(), - BuildCountManager::new(ctx.paths()?.build_count_dir()), + ctx.isolation.to_string(), + build_count, filesystem, ctx.restarted_trace_id.dupe(), log_size_counter_bytes, @@ -1149,23 +1758,29 @@ pub fn try_get_invocation_recorder<'a>( Ok(Box::new(recorder)) } -fn system_memory_stats() -> u64 { - use sysinfo::RefreshKind; - use sysinfo::System; - use sysinfo::SystemExt; - - let system = System::new_with_specifics(RefreshKind::new().with_memory()); - system.total_memory() +fn truncate_stderr(stderr: &str) -> &str { + // If server crashed, it means something is very broken, + // and we don't really need nicely formatted stderr. + // We only need to see it once, fix it, and never see it again. + let max_len = 20_000; + let truncate_at = stderr.len().saturating_sub(max_len); + let truncate_at = stderr.ceil_char_boundary(truncate_at); + &stderr[truncate_at..] } #[cfg(test)] mod tests { - use super::*; + use crate::subscribers::recorder::truncate_stderr; #[test] - fn get_system_memory_stats() { - let total_mem = system_memory_stats(); - // sysinfo returns zero when fails to retrieve data - assert!(total_mem > 0); + fn test_truncate_stderr() { + let mut stderr = String::new(); + stderr.push_str("prefix"); + stderr.push('Ъ'); // 2 bytes, so asking to truncate in the middle of the char. + for _ in 0..19_999 { + stderr.push('a'); + } + let truncated = truncate_stderr(&stderr); + assert_eq!(truncated.len(), 19_999); } } diff --git a/app/buck2_client_ctx/src/subscribers/simpleconsole.rs b/app/buck2_client_ctx/src/subscribers/simpleconsole.rs index bb51dfaf3183a..3a0f8829011cf 100644 --- a/app/buck2_client_ctx/src/subscribers/simpleconsole.rs +++ b/app/buck2_client_ctx/src/subscribers/simpleconsole.rs @@ -8,63 +8,84 @@ */ use std::borrow::Cow; +use std::collections::HashMap; use std::fmt::Display; use std::fmt::Write as _; use std::sync::Arc; +use std::sync::Mutex; use std::time::Duration; use std::time::Instant; use std::time::SystemTime; -use anyhow::Context; use async_trait::async_trait; -use buck2_data::CommandExecutionDetails; -use buck2_data::TagEvent; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_event_observer::display; use buck2_event_observer::display::display_file_watcher_end; use buck2_event_observer::display::TargetDisplayOptions; use buck2_event_observer::event_observer::EventObserver; use buck2_event_observer::event_observer::EventObserverExtra; use buck2_event_observer::humanized::HumanizedBytes; +use buck2_event_observer::pending_estimate::estimate_completion_percentage; +use buck2_event_observer::unpack_event::unpack_event; +use buck2_event_observer::unpack_event::VisitorError; use buck2_event_observer::verbosity::Verbosity; use buck2_event_observer::what_ran; -use buck2_event_observer::what_ran::command_to_string; -use buck2_event_observer::what_ran::worker_command_as_fallback_to_string; use buck2_event_observer::what_ran::WhatRanCommandConsoleFormat; use buck2_event_observer::what_ran::WhatRanOptions; +use buck2_event_observer::what_ran::WhatRanOptionsRegex; use buck2_event_observer::what_ran::WhatRanOutputCommand; use buck2_event_observer::what_ran::WhatRanOutputWriter; use buck2_events::BuckEvent; use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; +use once_cell::sync::Lazy; use superconsole::DrawMode; use superconsole::SuperConsole; -use termwiz::escape::Action; -use termwiz::escape::ControlCode; +use crate::subscribers::subscriber::EventSubscriber; use crate::subscribers::subscriber::Tick; -use crate::subscribers::subscriber_unpack::UnpackingEventSubscriber; use crate::subscribers::superconsole::io::io_in_flight_non_zero_counters; +use crate::subscribers::system_warning::cache_misses_msg; +use crate::subscribers::system_warning::check_cache_misses; +use crate::subscribers::system_warning::check_download_speed; +use crate::subscribers::system_warning::check_memory_pressure; +use crate::subscribers::system_warning::check_remaining_disk_space; +use crate::subscribers::system_warning::low_disk_space_msg; +use crate::subscribers::system_warning::slow_download_speed_msg; +use crate::subscribers::system_warning::system_memory_exceeded_msg; /// buck2 daemon info is printed to stderr if there are no other updates available /// within this duration. const KEEPALIVE_TIME_LIMIT: Duration = Duration::from_secs(7); +#[derive(Eq, PartialEq, Hash)] +enum SystemWarningTypes { + MemoryPressure, + LowDiskSpace, + SlowDownloadSpeed, + LowCacheHits, +} + +static ELAPSED_SYSTEM_WARNING_MAP: Lazy>> = + Lazy::new(|| Mutex::new(HashMap::new())); + fn now_display() -> impl Display { chrono::Local::now().to_rfc3339_opts(::chrono::SecondsFormat::Millis, false) } -fn echo_impl(message: &str) -> anyhow::Result<()> { +fn with_timestamps(message: &str) -> String { + let mut s = String::new(); let now = now_display(); for line in message.lines() { if line.is_empty() { - // patternlint-disable-next-line buck2-cli-simpleconsole-echo - crate::eprintln!("[{}]", now)?; + writeln!(s, "[{}]", now).unwrap(); } else { - // patternlint-disable-next-line buck2-cli-simpleconsole-echo - crate::eprintln!("[{}] {}", now, line)?; + writeln!(s, "[{}] {}", now, line).unwrap(); } } - Ok(()) + // Remove the trailing newline + s.pop(); + s } // Echoes a message to stderr, along with a timestamp. @@ -78,111 +99,63 @@ macro_rules! echo { ($fmt:expr $(, $args:expr)*) => { { let message = format!($fmt $(, $args)*); - echo_impl(&message) + let message = with_timestamps(&message); + // patternlint-disable-next-line buck2-cli-simpleconsole-echo + crate::eprintln!("{}", message) } }; } -#[derive(Copy, Clone, Dupe, Debug, PartialEq)] -enum TtyMode { - Enabled, - Disabled, -} - -struct ActionError { - display: display::ActionErrorDisplay<'static>, -} - -impl ActionError { - fn print(&self, tty_mode: TtyMode) -> anyhow::Result<()> { - echo!("Action failed: {}", self.display.action_id)?; - echo!("{}", self.display.reason)?; - if let Some(command) = &self.display.command { - eprint_command_details(command, tty_mode)?; +// Report only if at least double time has passed since reporting interval +fn echo_system_warning_exponential(warning: SystemWarningTypes, msg: &str) -> anyhow::Result<()> { + if let Some((last_reported, every_x)) = + ELAPSED_SYSTEM_WARNING_MAP.lock().unwrap().get_mut(&warning) + { + let now = Instant::now(); + let elapsed = now.duration_since(*last_reported); + let new_every_double: u64 = 2 * *every_x; + if elapsed > Duration::from_secs(new_every_double) { + echo!("{}", msg)?; + *every_x = new_every_double; + *last_reported = now; } - Ok(()) - } -} - -fn eprint_command_details( - command_failed: &CommandExecutionDetails, - tty_mode: TtyMode, -) -> anyhow::Result<()> { - if let Some(command_kind) = command_failed.command_kind.as_ref() { - use buck2_data::command_execution_kind::Command; - match command_kind.command.as_ref() { - Some(Command::LocalCommand(local_command)) => { - echo!("Local command: {}", command_to_string(local_command))?; - } - Some(Command::WorkerCommand(worker_command)) => { - echo!( - "Local worker command: {}", - worker_command_as_fallback_to_string(worker_command) - )?; - } - Some(Command::WorkerInitCommand(worker_init_command)) => { - echo!( - "Local worker initialization command: {}", - command_to_string(worker_init_command) - )?; - } - Some(Command::RemoteCommand(remote_command)) => { - echo!( - "Remote action{}, reproduce with: `frecli cas download-action {}`", - if remote_command.cache_hit { - " cache hit" - } else { - "" - }, - remote_command.action_digest - )?; - } - Some(Command::OmittedLocalCommand(..)) | None => { - // Nothing to show in this case. - } - }; } - - let (stdout, stderr) = if tty_mode == TtyMode::Disabled { - ( - sanitize_output_colors(command_failed.stdout.as_bytes()), - sanitize_output_colors(command_failed.stderr.as_bytes()), - ) - } else { - (command_failed.stdout.clone(), command_failed.stderr.clone()) - }; - print_stream("Stdout", &stdout)?; - print_stream("Stderr", &stderr)?; Ok(()) } -fn strip_trailing_newline(stream_contents: &str) -> &str { - match stream_contents.strip_suffix('\n') { - None => stream_contents, - Some(s) => s.strip_suffix('\r').unwrap_or(s), - } +#[derive(Copy, Clone, Dupe, Debug, PartialEq)] +enum TtyMode { + Enabled, + Disabled, } -fn print_stream(stream_name: &str, stream_contents: &str) -> anyhow::Result<()> { - if stream_contents.is_empty() { - echo!("{stream_name}: ")?; - return Ok(()); - } - echo!("{stream_name}:")?; - - let stream_contents = strip_trailing_newline(stream_contents); - crate::eprintln!("{}", &stream_contents)?; - Ok(()) +fn init_remaining_system_warning_count() { + ELAPSED_SYSTEM_WARNING_MAP + .lock() + .unwrap() + .insert(SystemWarningTypes::MemoryPressure, (Instant::now(), 1)); + ELAPSED_SYSTEM_WARNING_MAP + .lock() + .unwrap() + .insert(SystemWarningTypes::LowDiskSpace, (Instant::now(), 1)); + ELAPSED_SYSTEM_WARNING_MAP + .lock() + .unwrap() + .insert(SystemWarningTypes::SlowDownloadSpeed, (Instant::now(), 1)); + ELAPSED_SYSTEM_WARNING_MAP + .lock() + .unwrap() + .insert(SystemWarningTypes::LowCacheHits, (Instant::now(), 1)); } /// Just repeats stdout and stderr to client process. -pub(crate) struct SimpleConsole { +pub struct SimpleConsole { tty_mode: TtyMode, verbosity: Verbosity, // Whether to show "Waiting for daemon..." when no root spans are received expect_spans: bool, pub(crate) observer: EventObserver, - action_errors: Vec, + action_errors: Vec, last_print_time: Instant, last_shown_snapshot_ts: Option, } @@ -191,24 +164,36 @@ impl SimpleConsole where E: EventObserverExtra, { - pub(crate) fn with_tty(trace_id: TraceId, verbosity: Verbosity, expect_spans: bool) -> Self { + pub(crate) fn with_tty( + trace_id: TraceId, + verbosity: Verbosity, + expect_spans: bool, + build_count_dir: Option, + ) -> Self { + init_remaining_system_warning_count(); SimpleConsole { tty_mode: TtyMode::Enabled, verbosity, expect_spans, - observer: EventObserver::new(trace_id), + observer: EventObserver::new(trace_id, build_count_dir), action_errors: Vec::new(), last_print_time: Instant::now(), last_shown_snapshot_ts: None, } } - pub(crate) fn without_tty(trace_id: TraceId, verbosity: Verbosity, expect_spans: bool) -> Self { + pub(crate) fn without_tty( + trace_id: TraceId, + verbosity: Verbosity, + expect_spans: bool, + build_count_dir: Option, + ) -> Self { + init_remaining_system_warning_count(); SimpleConsole { tty_mode: TtyMode::Disabled, verbosity, expect_spans, - observer: EventObserver::new(trace_id), + observer: EventObserver::new(trace_id, build_count_dir.clone()), action_errors: Vec::new(), last_print_time: Instant::now(), last_shown_snapshot_ts: None, @@ -216,10 +201,15 @@ where } /// Create a SimpleConsole that auto detects whether it has a TTY or not. - pub(crate) fn autodetect(trace_id: TraceId, verbosity: Verbosity, expect_spans: bool) -> Self { + pub(crate) fn autodetect( + trace_id: TraceId, + verbosity: Verbosity, + expect_spans: bool, + build_count_dir: Option, + ) -> Self { match SuperConsole::compatible() { - true => Self::with_tty(trace_id, verbosity, expect_spans), - false => Self::without_tty(trace_id, verbosity, expect_spans), + true => Self::with_tty(trace_id, verbosity, expect_spans, build_count_dir), + false => Self::without_tty(trace_id, verbosity, expect_spans, build_count_dir), } } @@ -227,10 +217,11 @@ where &self.observer } - pub(crate) fn update_event_observer(&mut self, event: &Arc) -> anyhow::Result<()> { - self.observer - .observe(Instant::now(), event) - .context("Error tracking event") + pub(crate) async fn update_event_observer( + &mut self, + event: &Arc, + ) -> anyhow::Result<()> { + self.observer.observe(Instant::now(), event).await } fn notify_printed(&mut self) { @@ -290,43 +281,22 @@ where Ok(()) } -} -#[async_trait] -impl UnpackingEventSubscriber for SimpleConsole -where - E: EventObserverExtra, -{ - async fn handle_output(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { - // We expect output that gets here to already have been buffered if possible (because it - // primarily gets to us through a GRPC layer that already needs buffering), so we - // unconditionally flush it. - crate::stdio::print_bytes(raw_output)?; - crate::stdio::flush()?; - self.notify_printed(); - Ok(()) - } - - async fn handle_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { - echo!("{}", stderr)?; - self.notify_printed(); - Ok(()) - } - - async fn handle_structured_error( - &mut self, - err: &buck2_data::StructuredError, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - if err.quiet { - return Ok(()); + fn print_action_error(&mut self, error: &buck2_data::ActionError) -> anyhow::Result<()> { + let display = display::display_action_error(error, TargetDisplayOptions::for_log())?; + let message = display.simple_format_with_timestamps(with_timestamps); + if self.tty_mode == TtyMode::Disabled { + // patternlint-disable-next-line buck2-cli-simpleconsole-echo + crate::eprintln!("{}", display::sanitize_output_colors(message.as_bytes()))?; + } else { + // patternlint-disable-next-line buck2-cli-simpleconsole-echo + crate::eprintln!("{}", message)?; } - echo!("{}", err.payload)?; self.notify_printed(); Ok(()) } - async fn handle_file_watcher_end( + pub(crate) async fn handle_file_watcher_end( &mut self, file_watcher: &buck2_data::FileWatcherEnd, _event: &BuckEvent, @@ -340,25 +310,106 @@ where Ok(()) } - async fn handle_event(&mut self, event: &Arc) -> anyhow::Result<()> { - self.update_event_observer(event)?; - self.handle_inner_event(event) - .await - .with_context(|| display::InvalidBuckEvent(event.dupe()))?; + pub(crate) async fn handle_event(&mut self, event: &Arc) -> anyhow::Result<()> { + self.update_event_observer(event).await?; + + self.handle_event_inner(event).await?; if self.verbosity.print_all_commands() { + let options = WhatRanOptions::default(); + let options_regex = WhatRanOptionsRegex::from_options(&options)?; what_ran::emit_event_if_relevant( event.parent_id().into(), event.data(), self.observer().spans(), &mut PrintDebugCommandToStderr, - &WhatRanOptions::default(), + &options_regex, )?; } Ok(()) } + async fn handle_event_inner(&mut self, event: &BuckEvent) -> anyhow::Result<()> { + match unpack_event(event)? { + buck2_event_observer::unpack_event::UnpackedBuckEvent::SpanStart(_, _, data) => { + match data { + buck2_data::span_start_event::Data::Command(command) => { + self.handle_command_start(command, event).await + } + _ => Ok(()), + } + } + buck2_event_observer::unpack_event::UnpackedBuckEvent::SpanEnd(_, _, data) => { + match data { + buck2_data::span_end_event::Data::Command(command) => { + self.handle_command_end(command, event).await + } + buck2_data::span_end_event::Data::ActionExecution(action) => { + self.handle_action_execution_end(action, event).await + } + buck2_data::span_end_event::Data::FileWatcher(file_watcher) => { + self.handle_file_watcher_end(file_watcher, event).await + } + _ => Ok(()), + } + } + buck2_event_observer::unpack_event::UnpackedBuckEvent::Instant(_, _, data) => { + match data { + buck2_data::instant_event::Data::ConsoleMessage(message) => { + self.handle_stderr(&message.message).await + } + buck2_data::instant_event::Data::ConsoleWarning(message) => { + self.handle_stderr(&message.message).await + } + buck2_data::instant_event::Data::ReSession(session) => { + let message = format!("RE Session: {}", session.session_id); + self.handle_stderr(&message).await + } + buck2_data::instant_event::Data::StructuredError(err) => { + self.handle_structured_error(err, event).await + } + buck2_data::instant_event::Data::TestDiscovery(discovery) => { + self.handle_test_discovery(discovery, event).await + } + buck2_data::instant_event::Data::TestResult(result) => { + self.handle_test_result(result, event).await + } + buck2_data::instant_event::Data::TagEvent(tags) => { + if tags.tags.contains(&"which-dice:Legacy".to_owned()) { + self.handle_stderr("Note: using deprecated legacy dice.") + .await?; + } + + Ok(()) + } + buck2_data::instant_event::Data::ActionError(error) => { + self.handle_action_error(error).await + } + _ => Ok(()), + } + } + buck2_event_observer::unpack_event::UnpackedBuckEvent::UnrecognizedSpanStart(_, _) + | buck2_event_observer::unpack_event::UnpackedBuckEvent::UnrecognizedSpanEnd(_, _) + | buck2_event_observer::unpack_event::UnpackedBuckEvent::UnrecognizedInstant(_, _) => { + Err(VisitorError::MissingField(event.clone()).into()) + } + } + } + + pub(crate) async fn handle_structured_error( + &mut self, + err: &buck2_data::StructuredError, + _event: &BuckEvent, + ) -> anyhow::Result<()> { + if err.quiet { + return Ok(()); + } + echo!("{}", err.payload)?; + self.notify_printed(); + Ok(()) + } + async fn handle_command_start( &mut self, _command: &buck2_data::CommandStart, @@ -376,37 +427,6 @@ where Ok(()) } - async fn handle_command_result( - &mut self, - result: &buck2_cli_proto::CommandResult, - ) -> anyhow::Result<()> { - if let buck2_cli_proto::CommandResult { - result: Some(buck2_cli_proto::command_result::Result::Error(e)), - } = result - { - echo!("Command failed: ")?; - for message in &e.messages { - echo!("{}", message)?; - } - self.notify_printed(); - } - - let errors = std::mem::take(&mut self.action_errors); - - if !errors.is_empty() { - echo!()?; - echo!("BUILD ERRORS ({})", errors.len())?; - echo!("The following actions failed during the execution of this command:")?; - for error in errors.iter() { - error.print(self.tty_mode)?; - } - echo!()?; - self.notify_printed(); - } - - Ok(()) - } - async fn handle_command_end( &mut self, _command: &buck2_data::CommandEnd, @@ -450,24 +470,7 @@ where Ok(()) } - async fn handle_console_message( - &mut self, - message: &buck2_data::ConsoleMessage, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - self.handle_stderr(&message.message).await - } - - async fn handle_re_session_created( - &mut self, - session: &buck2_data::RemoteExecutionSessionCreated, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - let message = format!("RE Session: {}", session.session_id); - self.handle_stderr(&message).await - } - - async fn handle_action_execution_end( + pub(crate) async fn handle_action_execution_end( &mut self, action: &buck2_data::ActionExecutionEnd, _event: &BuckEvent, @@ -493,28 +496,25 @@ where echo!("stderr:{}\x1b[0m", stderr)?; } TtyMode::Disabled => { - echo!("stderr:\n{}", sanitize_output_colors(stderr.as_bytes()))?; + echo!( + "stderr:\n{}", + display::sanitize_output_colors(stderr.as_bytes()) + )?; } } } self.notify_printed(); } - if let Some(error) = &action.error { - let action_error = ActionError { - display: display::display_action_error( - action, - error, - TargetDisplayOptions::for_log(), - )? - .to_static(), - }; - - action_error.print(self.tty_mode)?; - self.action_errors.push(action_error); - self.notify_printed(); - } + Ok(()) + } + pub(crate) async fn handle_action_error( + &mut self, + error: &buck2_data::ActionError, + ) -> anyhow::Result<()> { + self.print_action_error(error)?; + self.action_errors.push(error.clone()); Ok(()) } @@ -554,15 +554,61 @@ where Ok(()) } - async fn handle_tags(&mut self, tags: &TagEvent) -> anyhow::Result<()> { - if tags.tags.contains(&"which-dice:Modern".to_owned()) { - self.handle_stderr("Note: using experimental modern dice.") - .await?; - } + pub(crate) async fn handle_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { + echo!("{}", stderr)?; + self.notify_printed(); + Ok(()) + } +} + +#[async_trait] +impl EventSubscriber for SimpleConsole +where + E: EventObserverExtra, +{ + async fn handle_output(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { + // We expect output that gets here to already have been buffered if possible (because it + // primarily gets to us through a GRPC layer that already needs buffering), so we + // unconditionally flush it. + crate::stdio::print_bytes(raw_output)?; + crate::stdio::flush()?; + self.notify_printed(); + Ok(()) + } + async fn handle_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { + for ev in events { + self.handle_event(ev).await?; + } Ok(()) } + async fn handle_tailer_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { + self.handle_stderr(stderr).await + } + + async fn handle_command_result( + &mut self, + result: &buck2_cli_proto::CommandResult, + ) -> anyhow::Result<()> { + let errors = std::mem::take(&mut self.action_errors); + + if !errors.is_empty() { + echo!()?; + echo!("BUILD ERRORS ({})", errors.len())?; + echo!("The following actions failed during the execution of this command:")?; + for error in errors.iter() { + self.print_action_error(error)?; + } + echo!()?; + self.notify_printed(); + } + + crate::subscribers::errorconsole::ErrorConsole + .handle_command_result(result) + .await + } + async fn tick(&mut self, _: &Tick) -> anyhow::Result<()> { if self.verbosity.print_status() && self.last_print_time.elapsed() > KEEPALIVE_TIME_LIMIT { let mut show_stats = self.expect_spans; @@ -576,7 +622,7 @@ where " [{}]", display::display_event( &c.info().event, - TargetDisplayOptions::for_log() + TargetDisplayOptions::for_log(), )? )), None => Cow::Borrowed(""), @@ -596,6 +642,58 @@ where remaining )?; + let first_snapshot = self.observer().re_state().first_snapshot(); + let last_snapshot = self.observer().two_snapshots().last.as_ref().map(|s| &s.1); + let sysinfo = self.observer().system_info(); + let avg_re_download_speed = + self.observer().re_avg_download_speed().avg_per_second(); + if let Some(memory_pressure) = check_memory_pressure(last_snapshot, sysinfo) { + echo_system_warning_exponential( + SystemWarningTypes::MemoryPressure, + &system_memory_exceeded_msg(&memory_pressure), + )?; + } + if let Some(low_disk_space) = check_remaining_disk_space(last_snapshot, sysinfo) + { + echo_system_warning_exponential( + SystemWarningTypes::LowDiskSpace, + &low_disk_space_msg(&low_disk_space), + )?; + } + if check_download_speed( + first_snapshot, + last_snapshot, + sysinfo, + avg_re_download_speed, + self.observer().concurrent_commands, + ) { + echo_system_warning_exponential( + SystemWarningTypes::SlowDownloadSpeed, + &slow_download_speed_msg(avg_re_download_speed), + )?; + } + let first_build_since_rebase = self + .observer + .cold_build_detector + .as_ref() + .and_then(|cbd| cbd.first_build_since_rebase()) + .unwrap_or(false); + let estimated_completion_percent = estimate_completion_percentage( + self.observer().spans().roots(), + self.observer().dice_state(), + ); + + if check_cache_misses( + self.observer().action_stats(), + sysinfo, + first_build_since_rebase, + Some(estimated_completion_percent), + ) { + echo_system_warning_exponential( + SystemWarningTypes::LowCacheHits, + &cache_misses_msg(self.observer().action_stats()), + )?; + } show_stats = self.verbosity.always_print_stats_in_status(); } None => { @@ -621,19 +719,10 @@ where Ok(()) } - async fn handle_error(&mut self, _error: &anyhow::Error) -> anyhow::Result<()> { + async fn handle_error(&mut self, _error: &buck2_error::Error) -> anyhow::Result<()> { // We don't need to do any cleanup to exit. Ok(()) } - - async fn handle_console_preferences( - &mut self, - _prefs: &buck2_data::ConsolePreferences, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - // Those are only used by the Superconsole at the moment. - Ok(()) - } } struct PrintDebugCommandToStderr; @@ -643,80 +732,11 @@ impl WhatRanOutputWriter for PrintDebugCommandToStderr { echo!( "{}", WhatRanCommandConsoleFormat { - reason: command.reason(), - identity: command.identity(), - repro: command.repro(), + reason: command.reason, + identity: command.identity, + repro: command.repro, } )?; Ok(()) } } - -fn sanitize_output_colors(stderr: &[u8]) -> String { - let mut sanitized = String::with_capacity(stderr.len()); - let mut parser = termwiz::escape::parser::Parser::new(); - parser.parse(stderr, |a| match a { - Action::Print(c) => sanitized.push(c), - Action::Control(cc) => match cc { - ControlCode::CarriageReturn => sanitized.push('\r'), - ControlCode::LineFeed => sanitized.push('\n'), - ControlCode::HorizontalTab => sanitized.push('\t'), - _ => {} - }, - _ => {} - }); - sanitized -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn removes_color_characters() { - let message = "\x1b[0mFoo\t\x1b[34mBar\n\x1b[DBaz\r\nQuz"; - - let sanitized = sanitize_output_colors(message.as_bytes()); - - assert_eq!("Foo\tBar\nBaz\r\nQuz", sanitized); - } - - mod strip_trailing_newline { - use super::*; - - #[test] - fn strips_trailing_newline_character() { - let stream_contents = "test\n"; - let res = strip_trailing_newline(stream_contents); - assert_eq!(res, "test"); - } - - #[test] - fn preserves_duplicate_newlines() { - let stream_contents = "test\n\n"; - let res = strip_trailing_newline(stream_contents); - assert_eq!(res, "test\n"); - } - - #[test] - fn preserves_other_trailing_whitespace() { - let stream_contents = "test \t"; - let res = strip_trailing_newline(stream_contents); - assert_eq!(res, stream_contents); - } - - #[test] - fn preserves_leading_whitespace() { - let stream_contents = "\n test"; - let res = strip_trailing_newline(stream_contents); - assert_eq!(res, stream_contents); - } - - #[test] - fn correctly_handles_carriage_return() { - let stream_contents = "test\r\n"; - let res = strip_trailing_newline(stream_contents); - assert_eq!(res, "test"); - } - } -} diff --git a/app/buck2_client_ctx/src/subscribers/subscriber.rs b/app/buck2_client_ctx/src/subscribers/subscriber.rs index 82a95c52846cc..5a3d9f5381dd9 100644 --- a/app/buck2_client_ctx/src/subscribers/subscriber.rs +++ b/app/buck2_client_ctx/src/subscribers/subscriber.rs @@ -15,6 +15,7 @@ use async_trait::async_trait; use buck2_events::BuckEvent; use dupe::Dupe; +use crate::console_interaction_stream::SuperConsoleToggle; use crate::subscribers::observer::ErrorObserver; /// Information about tick timing. @@ -48,7 +49,10 @@ pub trait EventSubscriber: Send { async fn handle_tailer_stderr(&mut self, _stderr: &str) -> anyhow::Result<()> { Ok(()) } - async fn handle_console_interaction(&mut self, _c: char) -> anyhow::Result<()> { + async fn handle_console_interaction( + &mut self, + _c: &Option, + ) -> anyhow::Result<()> { Ok(()) } async fn handle_events(&mut self, _event: &[Arc]) -> anyhow::Result<()> { @@ -63,7 +67,7 @@ pub trait EventSubscriber: Send { /// Give the subscriber a chance to react to errors as we start trying to clean up. /// They may return another error, which will be incorporated into the end result. - async fn handle_error(&mut self, _error: &anyhow::Error) -> anyhow::Result<()> { + async fn handle_error(&mut self, _error: &buck2_error::Error) -> anyhow::Result<()> { Ok(()) } @@ -81,5 +85,6 @@ pub trait EventSubscriber: Send { None } - fn handle_daemon_connection_failure(&mut self) {} + fn handle_daemon_connection_failure(&mut self, _error: &buck2_error::Error) {} + fn handle_daemon_started(&mut self, _reason: buck2_data::DaemonWasStartedReason) {} } diff --git a/app/buck2_client_ctx/src/subscribers/subscriber_unpack.rs b/app/buck2_client_ctx/src/subscribers/subscriber_unpack.rs deleted file mode 100644 index a74018e848240..0000000000000 --- a/app/buck2_client_ctx/src/subscribers/subscriber_unpack.rs +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; - -use async_trait::async_trait; -use buck2_cli_proto::CommandResult; -use buck2_data::buck_event; -use buck2_data::InstantEvent; -use buck2_data::SpanEndEvent; -use buck2_data::SpanStartEvent; -use buck2_event_observer::unpack_event::VisitorError; -use buck2_events::BuckEvent; - -use crate::subscribers::observer::ErrorObserver; -use crate::subscribers::subscriber::EventSubscriber; -use crate::subscribers::subscriber::Tick; - -/// Wrap an `UnpackingEventSubscriber` instance to provide an `EventSubscriber`. -pub(crate) struct UnpackingEventSubscriberAsEventSubscriber( - pub(crate) U, -); - -#[async_trait] -pub trait UnpackingEventSubscriber: Send { - async fn handle_output(&mut self, _raw_output: &[u8]) -> anyhow::Result<()> { - Ok(()) - } - async fn handle_stderr(&mut self, _stderr: &str) -> anyhow::Result<()> { - Ok(()) - } - async fn handle_console_interaction(&mut self, _c: char) -> anyhow::Result<()> { - Ok(()) - } - async fn handle_event(&mut self, event: &Arc) -> anyhow::Result<()> { - self.handle_inner_event(event).await - } - async fn handle_inner_event(&mut self, event: &Arc) -> anyhow::Result<()> { - match event.data() { - buck_event::Data::SpanStart(ref start) => self.handle_event_start(start, event), - buck_event::Data::SpanEnd(ref end) => self.handle_event_end(end, event), - buck_event::Data::Instant(ref instant) => self.handle_instant(instant, event), - // Not present in the event stream from the daemon to CLI. - buck_event::Data::Record(_) => Box::pin(async { Ok(()) }), - } - .await - } - async fn handle_command_result(&mut self, _result: &CommandResult) -> anyhow::Result<()> { - Ok(()) - } - - async fn exit(&mut self) -> anyhow::Result<()> { - Ok(()) - } - - async fn handle_event_start( - &mut self, - start: &SpanStartEvent, - event: &Arc, - ) -> anyhow::Result<()> { - match start - .data - .as_ref() - .ok_or_else(|| VisitorError::MissingField((**event).clone()))? - { - buck2_data::span_start_event::Data::Command(command) => { - self.handle_command_start(command, event).await - } - _ => Ok(()), - } - } - - async fn handle_event_end( - &mut self, - end: &SpanEndEvent, - event: &Arc, - ) -> anyhow::Result<()> { - match end - .data - .as_ref() - .ok_or_else(|| VisitorError::MissingField((**event).clone()))? - { - buck2_data::span_end_event::Data::Command(command) => { - self.handle_command_end(command, event).await - } - buck2_data::span_end_event::Data::ActionExecution(action) => { - self.handle_action_execution_end(action, event).await - } - buck2_data::span_end_event::Data::FileWatcher(file_watcher) => { - self.handle_file_watcher_end(file_watcher, event).await - } - _ => Ok(()), - } - } - - async fn handle_instant( - &mut self, - instant: &InstantEvent, - event: &Arc, - ) -> anyhow::Result<()> { - match instant - .data - .as_ref() - .ok_or_else(|| VisitorError::MissingField((**event).clone()))? - { - buck2_data::instant_event::Data::ConsoleMessage(message) => { - self.handle_console_message(message, event).await - } - buck2_data::instant_event::Data::ReSession(session) => { - self.handle_re_session_created(session, event).await - } - buck2_data::instant_event::Data::StructuredError(err) => { - self.handle_structured_error(err, event).await - } - buck2_data::instant_event::Data::TestDiscovery(discovery) => { - self.handle_test_discovery(discovery, event).await - } - buck2_data::instant_event::Data::TestResult(result) => { - self.handle_test_result(result, event).await - } - buck2_data::instant_event::Data::ConsolePreferences(preferences) => { - self.handle_console_preferences(preferences, event).await - } - buck2_data::instant_event::Data::DebugAdapterSnapshot(snapshot) => { - self.handle_debug_adapter_snapshot(snapshot).await - } - buck2_data::instant_event::Data::TagEvent(tags) => self.handle_tags(tags).await, - _ => Ok(()), - } - } - - async fn handle_command_start( - &mut self, - _command: &buck2_data::CommandStart, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_command_end( - &mut self, - _command: &buck2_data::CommandEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_action_execution_end( - &mut self, - _action: &buck2_data::ActionExecutionEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_file_watcher_end( - &mut self, - _watchman: &buck2_data::FileWatcherEnd, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_console_message( - &mut self, - _message: &buck2_data::ConsoleMessage, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_re_session_created( - &mut self, - _session: &buck2_data::RemoteExecutionSessionCreated, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - Ok(()) - } - async fn handle_structured_error( - &mut self, - _err: &buck2_data::StructuredError, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_test_discovery( - &mut self, - _test_info: &buck2_data::TestDiscovery, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_test_result( - &mut self, - _result: &buck2_data::TestResult, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_console_preferences( - &mut self, - _prefs: &buck2_data::ConsolePreferences, - _event: &BuckEvent, - ) -> anyhow::Result<()>; - - async fn handle_debug_adapter_snapshot( - &mut self, - _msg: &buck2_data::DebugAdapterSnapshot, - ) -> anyhow::Result<()> { - Ok(()) - } - - async fn handle_tags(&mut self, _tags: &buck2_data::TagEvent) -> anyhow::Result<()> { - Ok(()) - } - - /// Give the subscriber a chance to react to errors as we start trying to clean up. - /// They may return another error, which will be incorporated into the end result. - async fn handle_error(&mut self, _error: &anyhow::Error) -> anyhow::Result<()>; - - /// Allow the subscriber to do some sort of action once every render cycle. - async fn tick(&mut self, _tick: &Tick) -> anyhow::Result<()>; - - fn as_error_observer(&self) -> Option<&dyn ErrorObserver> { - None - } -} - -#[async_trait] -impl EventSubscriber for UnpackingEventSubscriberAsEventSubscriber { - async fn handle_output(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { - self.0.handle_output(raw_output).await - } - - async fn handle_tailer_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { - self.0.handle_stderr(stderr).await - } - - async fn handle_console_interaction(&mut self, c: char) -> anyhow::Result<()> { - self.0.handle_console_interaction(c).await - } - - async fn handle_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { - for event in events { - self.0.handle_event(event).await?; - } - Ok(()) - } - - async fn handle_error(&mut self, error: &anyhow::Error) -> anyhow::Result<()> { - self.0.handle_error(error).await - } - - async fn tick(&mut self, tick: &Tick) -> anyhow::Result<()> { - self.0.tick(tick).await - } - - async fn handle_command_result( - &mut self, - result: &buck2_cli_proto::CommandResult, - ) -> anyhow::Result<()> { - self.0.handle_command_result(result).await - } - - async fn exit(&mut self) -> anyhow::Result<()> { - self.0.exit().await - } - - fn as_error_observer(&self) -> Option<&dyn ErrorObserver> { - self.0.as_error_observer() - } -} diff --git a/app/buck2_client_ctx/src/subscribers/subscribers.rs b/app/buck2_client_ctx/src/subscribers/subscribers.rs new file mode 100644 index 0000000000000..a59c04caddf5a --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/subscribers.rs @@ -0,0 +1,82 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::future::Future; + +use futures::stream::FuturesUnordered; +use futures::StreamExt; + +use crate::subscribers::observer::ErrorObserver; +use crate::subscribers::subscriber::EventSubscriber; + +#[derive(Default)] +pub struct EventSubscribers<'a> { + subscribers: Vec>, +} + +impl<'a> EventSubscribers<'a> { + pub fn new(subscribers: Vec>) -> EventSubscribers<'a> { + EventSubscribers { subscribers } + } + + /// Helper method to abstract the process of applying an `EventSubscriber` method to all of the subscribers. + /// Quits on the first error encountered. + pub(crate) async fn for_each_subscriber<'b, Fut>( + &'b mut self, + f: impl FnMut(&'b mut Box) -> Fut, + ) -> anyhow::Result<()> + where + Fut: Future> + 'b, + { + let mut futures: FuturesUnordered<_> = self.subscribers.iter_mut().map(f).collect(); + while let Some(res) = futures.next().await { + res?; + } + Ok(()) + } + + pub(crate) async fn handle_exit(&mut self) -> anyhow::Result<()> { + let mut r = Ok(()); + for subscriber in &mut self.subscribers { + // Exit all subscribers, do not stop on first one. + let subscriber_err = subscriber.exit().await; + if r.is_ok() { + // Keep first error. + r = subscriber_err; + } + } + r + } + + pub(crate) fn handle_daemon_connection_failure(&mut self, error: &buck2_error::Error) { + for subscriber in &mut self.subscribers { + subscriber.handle_daemon_connection_failure(error); + } + } + + pub(crate) fn handle_daemon_started(&mut self, reason: buck2_data::DaemonWasStartedReason) { + for subscriber in &mut self.subscribers { + subscriber.handle_daemon_started(reason); + } + } + + pub(crate) fn error_observers(&self) -> impl Iterator { + self.subscribers + .iter() + .filter_map(|s| s.as_error_observer()) + } + + pub(crate) async fn eprintln(&mut self, message: &str) -> anyhow::Result<()> { + self.for_each_subscriber(|s| { + // TODO(nga): this is not a tailer. + s.handle_tailer_stderr(message) + }) + .await + } +} diff --git a/app/buck2_client_ctx/src/subscribers/superconsole.rs b/app/buck2_client_ctx/src/subscribers/superconsole.rs index 184e36b599835..fcffa5fdc9bf4 100644 --- a/app/buck2_client_ctx/src/subscribers/superconsole.rs +++ b/app/buck2_client_ctx/src/subscribers/superconsole.rs @@ -15,21 +15,27 @@ use std::time::Duration; use anyhow::Context as _; use async_trait::async_trait; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_data::CommandExecutionDetails; use buck2_event_observer::display; use buck2_event_observer::display::display_file_watcher_end; use buck2_event_observer::display::TargetDisplayOptions; use buck2_event_observer::event_observer::DebugEventObserverExtra; +use buck2_event_observer::pending_estimate::estimate_completion_percentage; use buck2_event_observer::session_info::SessionInfo; +use buck2_event_observer::unpack_event::unpack_event; +use buck2_event_observer::unpack_event::VisitorError; use buck2_event_observer::verbosity::Verbosity; use buck2_event_observer::what_ran; use buck2_event_observer::what_ran::command_to_string; use buck2_event_observer::what_ran::worker_command_as_fallback_to_string; use buck2_event_observer::what_ran::WhatRanOptions; +use buck2_event_observer::what_ran::WhatRanOptionsRegex; use buck2_events::BuckEvent; use buck2_wrapper_common::invocation_id::TraceId; use dupe::Dupe; use gazebo::prelude::*; +use strum::IntoEnumIterator; use superconsole::components::DrawVertical; use superconsole::style::Attribute; use superconsole::style::Color; @@ -44,16 +50,19 @@ use superconsole::Lines; use superconsole::Span; pub(crate) use superconsole::SuperConsole; +use crate::console_interaction_stream::SuperConsoleToggle; use crate::subscribers::simpleconsole::SimpleConsole; +use crate::subscribers::subscriber::EventSubscriber; use crate::subscribers::subscriber::Tick; -use crate::subscribers::subscriber_unpack::UnpackingEventSubscriber; use crate::subscribers::superconsole::commands::CommandsComponent; use crate::subscribers::superconsole::debug_events::DebugEventsComponent; use crate::subscribers::superconsole::debugger::StarlarkDebuggerComponent; use crate::subscribers::superconsole::dice::DiceComponent; +use crate::subscribers::superconsole::header::TasksHeader; use crate::subscribers::superconsole::io::IoHeader; use crate::subscribers::superconsole::re::ReHeader; use crate::subscribers::superconsole::session_info::SessionInfoComponent; +use crate::subscribers::superconsole::system_warning::SystemWarningComponent; use crate::subscribers::superconsole::test::TestHeader; use crate::subscribers::superconsole::timed_list::Cutoffs; use crate::subscribers::superconsole::timed_list::TimedList; @@ -63,9 +72,12 @@ mod common; pub(crate) mod debug_events; mod debugger; pub(crate) mod dice; +mod header; pub(crate) mod io; mod re; pub mod session_info; +pub(crate) mod system_warning; + pub mod test; pub mod timed_list; @@ -77,10 +89,17 @@ pub const CUTOFFS: Cutoffs = Cutoffs { _notable: Duration::from_millis(200), }; -pub struct StatefulSuperConsole { +pub enum StatefulSuperConsole { + Running(StatefulSuperConsoleImpl), + /// After receiving the command output, any stdout, or an event stream error, the superconsole + /// will be "finalized" and any further events are handled by simpleconsole. + Finalized(SimpleConsole), +} + +pub struct StatefulSuperConsoleImpl { header: String, state: SuperConsoleState, - super_console: Option, + super_console: SuperConsole, verbosity: Verbosity, } @@ -114,6 +133,12 @@ pub struct SuperConsoleState { config: SuperConsoleConfig, } +impl SuperConsoleState { + pub fn extra(&self) -> &DebugEventObserverExtra { + self.simple_console.observer.extra() + } +} + #[derive(Clone)] pub struct SuperConsoleConfig { pub enable_dice: bool, @@ -122,6 +147,7 @@ pub struct SuperConsoleConfig { pub enable_io: bool, pub enable_commands: bool, pub display_platform: bool, + pub expanded_progress: bool, /// Two lines for root events with single child event. pub two_lines: bool, pub max_lines: usize, @@ -135,6 +161,7 @@ impl Default for SuperConsoleConfig { enable_detailed_re: false, enable_io: false, enable_commands: false, + expanded_progress: false, display_platform: false, two_lines: false, max_lines: 10, @@ -156,6 +183,58 @@ impl<'s> Component for BuckRootComponent<'s> { }); let mut draw = DrawVertical::new(dimensions); + + let last_snapshot = self + .state + .simple_console + .observer + .two_snapshots() + .last + .as_ref() + .map(|s| &s.1); + let first_snapshot = self + .state + .simple_console + .observer + .re_state() + .first_snapshot(); + let avg_re_download_speed = self + .state + .simple_console + .observer + .re_avg_download_speed() + .avg_per_second(); + let first_build_since_rebase = self + .state + .simple_console + .observer + .cold_build_detector + .as_ref() + .and_then(|cbd| cbd.first_build_since_rebase()) + .unwrap_or(false); + let estimated_completion_percent = estimate_completion_percentage( + self.state.simple_console.observer().spans().roots(), + self.state.simple_console.observer().dice_state(), + ); + let system_info = self.state.simple_console.observer.system_info(); + let action_stats = self.state.simple_console.observer.action_stats(); + let concurrent_commands = self.state.simple_console.observer.concurrent_commands; + { + draw.draw( + &SystemWarningComponent { + last_snapshot, + first_snapshot, + system_info, + avg_re_download_speed, + action_stats, + first_build_since_rebase, + estimated_completion_percent, + concurrent_commands, + }, + mode, + )?; + } + draw.draw( &SessionInfoComponent { session_info: self.state.session_info(), @@ -194,7 +273,7 @@ impl<'s> Component for BuckRootComponent<'s> { draw.draw( &DiceComponent { super_console_config: &self.state.config, - dice_state: self.state.simple_console.observer.extra().dice_state(), + dice_state: self.state.simple_console.observer.dice_state(), }, mode, )?; @@ -215,7 +294,8 @@ impl<'s> Component for BuckRootComponent<'s> { }, mode, )?; - draw.draw(&TimedList::new(&CUTOFFS, self.header, self.state), mode)?; + draw.draw(&TasksHeader::new(&self.header, self.state), mode)?; + draw.draw(&TimedList::new(&CUTOFFS, self.state), mode)?; Ok(draw.finish()) } @@ -235,6 +315,7 @@ impl StatefulSuperConsole { replay_speed: Option, stream: Option>, config: SuperConsoleConfig, + build_count_dir: Option, ) -> anyhow::Result { let mut builder = Self::console_builder(); if let Some(stream) = stream { @@ -248,6 +329,7 @@ impl StatefulSuperConsole { expect_spans, replay_speed, config, + build_count_dir, ) } @@ -258,6 +340,7 @@ impl StatefulSuperConsole { expect_spans: bool, replay_speed: Option, config: SuperConsoleConfig, + build_count_dir: Option, ) -> anyhow::Result> { match Self::console_builder().build()? { None => Ok(None), @@ -269,6 +352,7 @@ impl StatefulSuperConsole { expect_spans, replay_speed, config, + build_count_dir, )?)), } } @@ -281,14 +365,22 @@ impl StatefulSuperConsole { expect_spans: bool, replay_speed: Option, config: SuperConsoleConfig, + build_count_dir: Option, ) -> anyhow::Result { let header = format!("Command: {}.", command_name); - Ok(Self { + Ok(Self::Running(StatefulSuperConsoleImpl { header, - state: SuperConsoleState::new(replay_speed, trace_id, verbosity, expect_spans, config)?, - super_console: Some(super_console), + state: SuperConsoleState::new( + replay_speed, + trace_id, + verbosity, + expect_spans, + config, + build_count_dir, + )?, + super_console, verbosity, - }) + })) } /// Construct a console suitable for use by the Buck2 CLI. We use non-blocking output here @@ -316,14 +408,37 @@ impl StatefulSuperConsole { foreground_color: Some(Color::DarkRed), ..Default::default() }; - for message in &e.messages { + for e in &e.errors { lines .0 - .extend(Lines::from_multiline_string(message, style).0); + .extend(Lines::from_multiline_string(&e.message, style).0); } } lines } + + async fn handle_event(&mut self, ev: &Arc) -> anyhow::Result<()> { + match self { + Self::Running(c) => c.handle_event(ev).await, + Self::Finalized(c) => c.handle_event(ev).await, + } + } + + fn finalize(&mut self) -> anyhow::Result<()> { + let mut res = Ok(()); + take_mut::take(self, |this| match this { + Self::Running(super_console) => { + let (state, err) = super_console.finalize(); + if let Some(err) = err { + res = Err(err); + } + Self::Finalized(state.simple_console) + } + v => v, + }); + + res + } } impl SuperConsoleState { @@ -333,17 +448,23 @@ impl SuperConsoleState { verbosity: Verbosity, expect_spans: bool, config: SuperConsoleConfig, + build_count_dir: Option, ) -> anyhow::Result { Ok(SuperConsoleState { current_tick: Tick::now(), time_speed: TimeSpeed::new(replay_speed)?, - simple_console: SimpleConsole::with_tty(trace_id, verbosity, expect_spans), + simple_console: SimpleConsole::with_tty( + trace_id, + verbosity, + expect_spans, + build_count_dir, + ), config, }) } - pub fn update_event_observer(&mut self, event: &Arc) -> anyhow::Result<()> { - self.simple_console.update_event_observer(event) + pub async fn update_event_observer(&mut self, event: &Arc) -> anyhow::Result<()> { + self.simple_console.update_event_observer(event).await } pub fn session_info(&self) -> &SessionInfo { @@ -351,7 +472,9 @@ impl SuperConsoleState { } } -impl StatefulSuperConsole { +pub(crate) const BUCK_NO_INTERACTIVE_CONSOLE: &str = "BUCK_NO_INTERACTIVE_CONSOLE"; + +impl StatefulSuperConsoleImpl { async fn toggle( &mut self, what: &str, @@ -367,322 +490,215 @@ impl StatefulSuperConsole { self.handle_stderr(&format!("{what}: {on_off}, press `{key}` to revert")) .await } -} -// TODO(brasselsprouts): after deprecating filetailers, simplify these code paths -#[async_trait] -impl UnpackingEventSubscriber for StatefulSuperConsole { async fn handle_event(&mut self, event: &Arc) -> anyhow::Result<()> { - match &mut self.super_console { - Some(_) => { - self.handle_inner_event(event) - .await - .with_context(|| display::InvalidBuckEvent(event.clone()))?; - self.state.update_event_observer(event)?; - } - None => { - self.state.simple_console.handle_event(event).await?; - } - } + self.state.update_event_observer(event).await?; + + self.handle_inner_event(event) + .await + .with_context(|| display::InvalidBuckEvent(event.clone()))?; if self.verbosity.print_all_commands() { - // This is a bit messy. It would be better for this to go in the branch above, but we - // can't do that, because we call a method on `self` in a branch that takes a mutable - // borrow of the SuperConsole there. That works *only* if we don't use the console we - // borrowed. - if let Some(console) = &mut self.super_console { - what_ran::emit_event_if_relevant( - event.parent_id().into(), - event.data(), - self.state.simple_console.observer().spans(), - console, - &WhatRanOptions::default(), - )?; - } + let options = WhatRanOptions::default(); + let options_regex = WhatRanOptionsRegex::from_options(&options)?; + what_ran::emit_event_if_relevant( + event.parent_id().into(), + event.data(), + self.state.simple_console.observer().spans(), + &mut self.super_console, + &options_regex, + )?; } - Ok(()) } - async fn handle_stderr(&mut self, msg: &str) -> anyhow::Result<()> { - match &mut self.super_console { - Some(super_console) => { - super_console.emit(msg.lines().map(Line::sanitized).collect()); - Ok(()) + async fn handle_inner_event(&mut self, event: &BuckEvent) -> anyhow::Result<()> { + match unpack_event(event)? { + buck2_event_observer::unpack_event::UnpackedBuckEvent::SpanStart(_, _, _) => Ok(()), + buck2_event_observer::unpack_event::UnpackedBuckEvent::SpanEnd(_, _, data) => { + match data { + buck2_data::span_end_event::Data::ActionExecution(action) => { + self.handle_action_execution_end(action).await + } + buck2_data::span_end_event::Data::FileWatcher(file_watcher) => { + self.handle_file_watcher_end(file_watcher).await + } + _ => Ok(()), + } + } + buck2_event_observer::unpack_event::UnpackedBuckEvent::Instant(_, _, data) => { + match data { + buck2_data::instant_event::Data::ConsoleMessage(message) => { + self.handle_console_message(message).await + } + buck2_data::instant_event::Data::ConsoleWarning(message) => { + self.handle_console_warning(message).await + } + buck2_data::instant_event::Data::StructuredError(err) => { + self.handle_structured_error(err).await + } + buck2_data::instant_event::Data::TestResult(result) => { + self.handle_test_result(result).await + } + buck2_data::instant_event::Data::ConsolePreferences(preferences) => { + self.handle_console_preferences(preferences).await + } + buck2_data::instant_event::Data::ActionError(error) => { + self.handle_action_error(error).await + } + _ => Ok(()), + } + } + buck2_event_observer::unpack_event::UnpackedBuckEvent::UnrecognizedSpanStart(_, _) + | buck2_event_observer::unpack_event::UnpackedBuckEvent::UnrecognizedSpanEnd(_, _) + | buck2_event_observer::unpack_event::UnpackedBuckEvent::UnrecognizedInstant(_, _) => { + Err(VisitorError::MissingField(event.clone()).into()) } - None => self.state.simple_console.handle_stderr(msg).await, } } + async fn handle_stderr(&mut self, msg: &str) -> anyhow::Result<()> { + self.super_console + .emit(msg.lines().map(Line::sanitized).collect()); + Ok(()) + } + async fn handle_structured_error( &mut self, err: &buck2_data::StructuredError, - event: &BuckEvent, ) -> anyhow::Result<()> { if err.quiet { return Ok(()); } - match &mut self.super_console { - Some(super_console) => { - super_console.emit( - err.payload - .lines() - .map(|line| { - Line::from_iter([Span::new_colored_lossy(line, Color::DarkYellow)]) - }) - .collect(), - ); - Ok(()) - } - None => { - self.state - .simple_console - .handle_structured_error(err, event) - .await - } - } + + self.super_console.emit( + err.payload + .lines() + .map(|line| Line::from_iter([Span::new_colored_lossy(line, Color::DarkYellow)])) + .collect(), + ); + Ok(()) } async fn handle_file_watcher_end( &mut self, file_watcher: &buck2_data::FileWatcherEnd, - event: &BuckEvent, ) -> anyhow::Result<()> { - match &mut self.super_console { - Some(super_console) => { - if self.verbosity.print_status() { - super_console.emit(Lines( - display_file_watcher_end(file_watcher).into_map(|x| Line::sanitized(&x)), - )); - } - Ok(()) - } - None => { - self.state - .simple_console - .handle_file_watcher_end(file_watcher, event) - .await - } - } - } - - async fn handle_output(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { - if let Some(super_console) = self.super_console.take() { - super_console.finalize(&BuckRootComponent { - header: &self.header, - state: &self.state, - })?; - } - - self.state.simple_console.handle_output(raw_output).await - } - - async fn handle_console_interaction(&mut self, c: char) -> anyhow::Result<()> { - if c == 'd' { - self.toggle("DICE component", 'd', |s| &mut s.state.config.enable_dice) - .await?; - } else if c == 'e' { - self.toggle("Debug events component", 'e', |s| { - &mut s.state.config.enable_debug_events - }) - .await?; - } else if c == '2' { - self.toggle("Two lines mode", '2', |s| &mut s.state.config.two_lines) - .await?; - } else if c == 'r' { - self.toggle("Detailed RE", 'r', |s| { - &mut s.state.config.enable_detailed_re - }) - .await?; - } else if c == 'i' { - self.toggle("I/O counters", 'i', |s| &mut s.state.config.enable_io) - .await?; - } else if c == 'p' { - self.toggle("Display target configurations", 'p', |s| { - &mut s.state.config.display_platform - }) - .await?; - } else if c == 'c' { - self.toggle("Commands", 'c', |s| &mut s.state.config.enable_commands) - .await?; - } else if c == '+' { - self.state.config.max_lines = self.state.config.max_lines.saturating_add(1); - } else if c == '-' { - self.state.config.max_lines = self.state.config.max_lines.saturating_sub(1); - } else if c == '?' || c == 'h' { - self.handle_stderr( - "Help:\n\ - `d` = toggle DICE\n\ - `e` = toggle debug events\n\ - `2` = toggle two lines mode\n\ - `r` = toggle detailed RE\n\ - `i` = toggle I/O counters\n\ - `p` = display target configurations\n\ - `+` = show more lines\n\ - `-` = show fewer lines\n\ - `h` = show this help", - ) - .await?; + if self.verbosity.print_status() { + self.super_console.emit(Lines( + display_file_watcher_end(file_watcher).into_map(|x| Line::sanitized(&x)), + )); } Ok(()) } - async fn handle_command_result( - &mut self, - result: &buck2_cli_proto::CommandResult, - ) -> anyhow::Result<()> { - match self.super_console.take() { - Some(mut super_console) => { - let lines = Self::render_result_errors(result); - super_console.emit(lines); - super_console.finalize(&BuckRootComponent { - header: &self.header, - state: &self.state, - }) - } - None => { - self.state - .simple_console - .handle_command_result(result) - .await - } - } - } - - async fn tick(&mut self, tick: &Tick) -> anyhow::Result<()> { - match &mut self.super_console { - Some(super_console) => { - self.state.current_tick = tick.dupe(); - super_console.render(&BuckRootComponent { - header: &self.header, - state: &self.state, - }) - } - None => Ok(()), - } - } - - async fn handle_error(&mut self, _error: &anyhow::Error) -> anyhow::Result<()> { - match self.super_console.take() { - Some(super_console) => super_console.finalize(&BuckRootComponent { - header: &self.header, - state: &self.state, - }), - None => Ok(()), - } - } - async fn handle_console_message( &mut self, message: &buck2_data::ConsoleMessage, - event: &BuckEvent, ) -> anyhow::Result<()> { // TODO(nmj): Maybe better handling of messages that have color data in them. Right now // they're just stripped - match &mut self.super_console { - Some(super_console) => { - super_console.emit(Lines::from_multiline_string( - &message.message, - ContentStyle::default(), - )); - Ok(()) - } - None => { - self.state - .simple_console - .handle_console_message(message, event) - .await - } - } + self.super_console.emit(Lines::from_multiline_string( + &message.message, + ContentStyle::default(), + )); + Ok(()) + } + + async fn handle_console_warning( + &mut self, + message: &buck2_data::ConsoleWarning, + ) -> anyhow::Result<()> { + let style = ContentStyle { + foreground_color: Some(Color::Yellow), + ..Default::default() + }; + self.super_console + .emit(Lines::from_multiline_string(&message.message, style)); + Ok(()) } async fn handle_action_execution_end( &mut self, action: &buck2_data::ActionExecutionEnd, - event: &BuckEvent, ) -> anyhow::Result<()> { - let super_console = match &mut self.super_console { - Some(super_console) => super_console, - None => { - return self - .state - .simple_console - .handle_action_execution_end(action, event) - .await; - } - }; + if action.error.is_some() { + // Don't handle action errors here. We deal with them as a part of a separate + // `ActionError` event + return Ok(()); + } + + if let Some(stderr) = display::success_stderr(action, self.verbosity)? { + let mut lines = vec![]; + let display_platform = self.state.config.display_platform; + let action_id = StyledContent::new( + ContentStyle { + foreground_color: Some(Color::White), + attributes: Attribute::Bold.into(), + ..Default::default() + }, + format!( + "stderr for {}:", + display::display_action_identity( + action.key.as_ref(), + action.name.as_ref(), + TargetDisplayOptions::for_console(display_platform), + )? + ), + ); + lines.push(Line::from_iter([Span::new_styled_lossy(action_id)])); + lines.extend(Lines::from_colored_multiline_string(stderr)); + + self.super_console.emit(Lines(lines)); + } + Ok(()) + } + + async fn handle_action_error(&mut self, error: &buck2_data::ActionError) -> anyhow::Result<()> { let mut lines = vec![]; let display_platform = self.state.config.display_platform; - match action.error.as_ref() { - Some(error) => { - let display::ActionErrorDisplay { - action_id, - reason, - command, - } = display::display_action_error( - action, - error, - TargetDisplayOptions::for_console(display_platform), - )?; - lines.push(Line::from_iter([Span::new_styled_lossy( - StyledContent::new( - ContentStyle { - foreground_color: Some(Color::White), - attributes: Attribute::Bold.into(), - ..Default::default() - }, - format!("Action failed: {}", action_id,), - ), - )])); + let display::ActionErrorDisplay { + action_id, + reason, + command, + .. + } = display::display_action_error( + error, + TargetDisplayOptions::for_console(display_platform), + )?; - lines.push(Line::from_iter([Span::new_styled_lossy( - reason.with(Color::DarkRed), - )])); + lines.push(Line::from_iter([Span::new_styled_lossy( + StyledContent::new( + ContentStyle { + foreground_color: Some(Color::White), + attributes: Attribute::Bold.into(), + ..Default::default() + }, + format!("Action failed: {}", action_id,), + ), + )])); + + lines.extend( + reason.lines().map(|l| { + Line::from_iter([Span::new_styled_lossy(l.to_owned().with(Color::DarkRed))]) + }), + ); - if let Some(command) = command { - lines_for_command_details(&command, self.verbosity, &mut lines); - } - } - None => { - if let Some(stderr) = display::success_stderr(action, self.verbosity)? { - let action_id = StyledContent::new( - ContentStyle { - foreground_color: Some(Color::White), - attributes: Attribute::Bold.into(), - ..Default::default() - }, - format!( - "stderr for {}:", - display::display_action_identity( - action.key.as_ref(), - action.name.as_ref(), - TargetDisplayOptions::for_console(display_platform), - )? - ), - ); - lines.push(Line::from_iter([Span::new_styled_lossy(action_id)])); - lines.extend(Lines::from_colored_multiline_string(stderr)); - } - } + if let Some(command) = command { + lines_for_command_details(&command, self.verbosity, &mut lines); } - super_console.emit(Lines(lines)); + self.super_console.emit(Lines(lines)); Ok(()) } - async fn handle_test_result( - &mut self, - result: &buck2_data::TestResult, - _event: &BuckEvent, - ) -> anyhow::Result<()> { - if let Some(super_console) = &mut self.super_console { - if let Some(msg) = display::format_test_result(result)? { - super_console.emit(msg); - } + async fn handle_test_result(&mut self, result: &buck2_data::TestResult) -> anyhow::Result<()> { + if let Some(msg) = display::format_test_result(result)? { + self.super_console.emit(msg); } Ok(()) @@ -691,36 +707,171 @@ impl UnpackingEventSubscriber for StatefulSuperConsole { async fn handle_console_preferences( &mut self, prefs: &buck2_data::ConsolePreferences, - _event: &BuckEvent, ) -> anyhow::Result<()> { self.state.config.max_lines = prefs.max_lines.try_into()?; Ok(()) } - // Our state snapshot handles those for us. + async fn handle_console_interaction( + &mut self, + c: &Option, + ) -> anyhow::Result<()> { + match c { + Some(c) => match c { + SuperConsoleToggle::Dice => { + self.toggle(c.description(), c.key(), |s| { + &mut s.state.config.enable_dice + }) + .await? + } + SuperConsoleToggle::DebugEvents => { + self.toggle(c.description(), c.key(), |s| { + &mut s.state.config.enable_debug_events + }) + .await? + } + SuperConsoleToggle::TwoLinesMode => { + self.toggle(c.description(), c.key(), |s| &mut s.state.config.two_lines) + .await? + } + SuperConsoleToggle::DetailedRE => { + self.toggle(c.description(), c.key(), |s| { + &mut s.state.config.enable_detailed_re + }) + .await? + } + SuperConsoleToggle::Io => { + self.toggle(c.description(), c.key(), |s| &mut s.state.config.enable_io) + .await? + } + SuperConsoleToggle::TargetConfigurations => { + self.toggle(c.description(), c.key(), |s| { + &mut s.state.config.display_platform + }) + .await? + } + SuperConsoleToggle::ExpandedProgress => { + self.toggle(c.description(), c.key(), |s| { + &mut s.state.config.expanded_progress + }) + .await? + } + SuperConsoleToggle::Commands => { + self.toggle(c.description(), c.key(), |s| { + &mut s.state.config.enable_commands + }) + .await? + } + SuperConsoleToggle::IncrLines { .. } => { + self.state.config.max_lines = self.state.config.max_lines.saturating_add(1) + } + SuperConsoleToggle::DecrLines { .. } => { + self.state.config.max_lines = self.state.config.max_lines.saturating_sub(1) + } + SuperConsoleToggle::Help { .. } => { + let help_message = SuperConsoleToggle::iter() + .map(|t| format!("`{}` = toggle {}", t.key(), t.description())) + .collect::>() + .join("\n"); + self.handle_stderr( + &format!("Help:\n{}\nenv var {BUCK_NO_INTERACTIVE_CONSOLE}=true disables interactive console", help_message), + ) + .await? + } + }, + None => {} + } + + Ok(()) + } + + async fn tick(&mut self, tick: &Tick) -> anyhow::Result<()> { + self.state.current_tick = tick.dupe(); + self.super_console.render(&BuckRootComponent { + header: &self.header, + state: &self.state, + })?; + Ok(()) + } - async fn handle_command_start( + async fn handle_command_result( &mut self, - _command: &buck2_data::CommandStart, - _event: &BuckEvent, + result: &buck2_cli_proto::CommandResult, ) -> anyhow::Result<()> { + let lines = StatefulSuperConsole::render_result_errors(result); + self.super_console.emit(lines); + Ok(()) + } + + fn finalize(self) -> (SuperConsoleState, Option) { + let err = self + .super_console + .finalize(&BuckRootComponent { + header: &self.header, + state: &self.state, + }) + .err(); + (self.state, err) + } +} + +#[async_trait] +impl EventSubscriber for StatefulSuperConsole { + async fn handle_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { + for ev in events { + self.handle_event(ev).await?; + } Ok(()) } - async fn handle_command_end( + async fn handle_output(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { + self.finalize()?; + match self { + Self::Running(_) => unreachable!(), + Self::Finalized(c) => c.handle_output(raw_output).await, + } + } + + async fn handle_tailer_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { + match self { + StatefulSuperConsole::Running(c) => c.handle_stderr(stderr).await, + StatefulSuperConsole::Finalized(c) => c.handle_stderr(stderr).await, + } + } + + async fn handle_console_interaction( &mut self, - _command: &buck2_data::CommandEnd, - _event: &BuckEvent, + c: &Option, ) -> anyhow::Result<()> { + if let Self::Running(super_console) = self { + super_console.handle_console_interaction(c).await?; + } + Ok(()) } - async fn handle_test_discovery( + async fn handle_command_result( &mut self, - _test_info: &buck2_data::TestDiscovery, - _event: &BuckEvent, + result: &buck2_cli_proto::CommandResult, ) -> anyhow::Result<()> { + match self { + Self::Running(c) => c.handle_command_result(result).await?, + Self::Finalized(c) => c.handle_command_result(result).await?, + } + self.finalize()?; + Ok(()) + } + + async fn tick(&mut self, tick: &Tick) -> anyhow::Result<()> { + if let Self::Running(super_console) = self { + super_console.tick(tick).await?; + } + Ok(()) + } + + async fn handle_error(&mut self, _error: &buck2_error::Error) -> anyhow::Result<()> { + self.finalize()?; Ok(()) } } @@ -850,15 +1001,14 @@ mod tests { use buck2_data::SpanEndEvent; use buck2_data::SpanStartEvent; use buck2_events::span::SpanId; - use buck2_wrapper_common::invocation_id::TraceId; - use superconsole::testing::frame_contains; + use superconsole::testing::assert_frame_contains; use superconsole::testing::test_console; use superconsole::testing::SuperConsoleTestingExt; use super::*; #[tokio::test] - async fn test_transfer_state_to_simpleconsole() { + async fn test_transfer_state_to_simpleconsole() -> anyhow::Result<()> { let trace_id = TraceId::new(); let mut console = StatefulSuperConsole::new_with_root_forced( trace_id.dupe(), @@ -868,11 +1018,12 @@ mod tests { None, None, Default::default(), + None, ) .unwrap(); // start a new event. - let id = SpanId::new(); + let id = SpanId::next(); let event = Arc::new(BuckEvent::new( SystemTime::now(), trace_id, @@ -911,12 +1062,16 @@ mod tests { module_id: "foo".to_owned(), cell: "bar".to_owned(), error: None, + starlark_peak_allocated_bytes: Some(0), + cpu_instruction_count: None, + target_count: Some(10), })), stats: None, duration: None, }), )); assert!(console.handle_event(&event).await.is_ok()); + Ok(()) } #[tokio::test] @@ -933,13 +1088,14 @@ mod tests { true, Default::default(), Default::default(), + None, )?; console .handle_event(&Arc::new(BuckEvent::new( now, trace_id.dupe(), - Some(SpanId::new()), + Some(SpanId::next()), None, buck2_data::buck_event::Data::SpanStart(SpanStartEvent { data: Some( @@ -976,7 +1132,7 @@ mod tests { .handle_event(&Arc::new(BuckEvent::new( now, trace_id.dupe(), - Some(SpanId::new()), + Some(SpanId::next()), None, SpanStartEvent { data: Some( @@ -993,24 +1149,27 @@ mod tests { console.tick(&tick).await?; - let frame = console - .super_console - .as_mut() - .context("Console was downgraded")? - .test_output_mut()? - .frames - .pop() - .context("No frame was emitted")?; + let frame = match &mut console { + StatefulSuperConsole::Running(c) => c + .super_console + .test_output_mut()? + .frames + .pop() + .context("No frame was emitted")?, + StatefulSuperConsole::Finalized(_) => { + panic!("Console was downgraded"); + } + }; // Verify we have the right output on intermediate frames if cfg!(fbcode_build) { - assert!(frame_contains(&frame, "Buck UI:")); + assert_frame_contains(&frame, "Buck UI:"); } else { - assert!(frame_contains(&frame, "Build ID:")); + assert_frame_contains(&frame, "Build ID:"); } - assert!(frame_contains(&frame, "Network:")); - assert!(frame_contains(&frame, "(reSessionID-123)")); - assert!(frame_contains(&frame, "Remaining")); + assert_frame_contains(&frame, "Network:"); + assert_frame_contains(&frame, "(reSessionID-123)"); + assert_frame_contains(&frame, "Remaining"); console .handle_command_result(&buck2_cli_proto::CommandResult { result: None }) @@ -1026,7 +1185,7 @@ mod tests { test_session: Some(buck2_data::TestSessionInfo { info: (0..100).map(|_| "a").collect(), }), - modern_dice: false, + legacy_dice: false, }; let full = SessionInfoComponent { @@ -1072,4 +1231,39 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_tailer_stderr() -> anyhow::Result<()> { + let trace_id = TraceId::new(); + let tick = Tick::now(); + + let mut console = StatefulSuperConsole::new( + "build", + trace_id.dupe(), + test_console(), + Verbosity::default(), + true, + Default::default(), + Default::default(), + None, + )?; + + console.handle_tailer_stderr("some stderr output").await?; + console.tick(&tick).await?; + + let frame = match &mut console { + StatefulSuperConsole::Running(c) => c + .super_console + .test_output_mut()? + .frames + .pop() + .context("No frame was emitted")?, + StatefulSuperConsole::Finalized(_) => { + panic!("Console was downgraded"); + } + }; + + assert_frame_contains(&frame, "some stderr output"); + Ok(()) + } } diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/dice.rs b/app/buck2_client_ctx/src/subscribers/superconsole/dice.rs index da71ceeb89b7c..3a524421c142f 100644 --- a/app/buck2_client_ctx/src/subscribers/superconsole/dice.rs +++ b/app/buck2_client_ctx/src/subscribers/superconsole/dice.rs @@ -33,8 +33,8 @@ impl<'s> Component for DiceComponent<'s> { let mut lines = vec!["Dice Key States".to_owned()]; let header = format!( - " {:<42} {:>6} {:>6} {:>6}", - " Key", "ChkDeps", "Pending", "Done" + " {:<42} {:>6} {:>6} {:>6} {:>6}", + " Key", "ChkDeps", "Compute", "Pending", "Done" ); let header_len = header.len(); lines.push(header); @@ -44,14 +44,16 @@ impl<'s> Component for DiceComponent<'s> { // silly with some keys claiming to be in progress, but this is a debug component so // that is probably OK. let check_deps = v.check_deps_started - v.check_deps_finished; + let computing = v.compute_started - v.compute_finished; let pending = v.started - v.finished; let finished = v.finished; lines.push(format!( - " {:<40} | {} | {} | {}", + " {:<40} | {} | {} | {} | {}", // Dice key states are all ascii if k.len() > 40 { &k[..40] } else { k }, HumanizedCount::fixed_width(check_deps.into()), + HumanizedCount::fixed_width(computing.into()), HumanizedCount::fixed_width(pending.into()), HumanizedCount::fixed_width(finished.into()) )); diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/header.rs b/app/buck2_client_ctx/src/subscribers/superconsole/header.rs new file mode 100644 index 0000000000000..d45648c004022 --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/superconsole/header.rs @@ -0,0 +1,879 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::time::Duration; + +use buck2_event_observer::action_stats::ActionStats; +use buck2_event_observer::fmt_duration; +use buck2_event_observer::humanized::HumanizedCount; +use buck2_event_observer::pending_estimate::pending_estimate; +use buck2_event_observer::progress::BuildProgressPhaseStats; +use buck2_event_observer::progress::BuildProgressStats; +use superconsole::Component; +use superconsole::Dimensions; +use superconsole::DrawMode; +use superconsole::Line; +use superconsole::Lines; + +use crate::subscribers::superconsole::common::HeaderLineComponent; +use crate::subscribers::superconsole::common::StaticStringComponent; +use crate::subscribers::superconsole::SuperConsoleState; + +pub(crate) struct TasksHeader<'s> { + header: &'s str, + state: &'s SuperConsoleState, +} + +impl<'s> TasksHeader<'s> { + pub fn new(header: &'s str, state: &'s SuperConsoleState) -> Self { + Self { header, state } + } +} + +impl<'s> Component for TasksHeader<'s> { + fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { + if self.state.config.expanded_progress { + let mut phase_stats = self.state.extra().progress_state().phase_stats(); + if let DrawMode::Final = mode { + phase_stats.loads.mark_all_finished(); + phase_stats.analyses.mark_all_finished(); + phase_stats.actions.mark_all_finished(); + } + + ProgressHeader { + header: self.header, + phase_stats: &phase_stats, + progress_stats: self.state.extra().progress_state().progress_stats(), + action_stats: self.state.simple_console.observer.action_stats(), + time_elapsed: time_elapsed(self.state), + } + .draw(dimensions, mode) + } else { + SimpleHeader::new(self.header, self.state).draw(dimensions, mode) + } + } +} + +struct HeaderData<'s> { + header: &'s str, + action_stats: &'s ActionStats, + elapsed_str: String, + finished: u64, + remaining: u64, +} + +impl<'s> HeaderData<'s> { + fn from_state(header: &'s str, state: &'s SuperConsoleState) -> Self { + let observer = state.simple_console.observer(); + let spans = observer.spans(); + let pending = pending_estimate(spans.roots(), observer.dice_state()); + let finished = spans.roots_completed() as u64; + let remaining = spans.iter_roots().len() as u64 + pending; + + HeaderData { + header, + action_stats: state.simple_console.observer().action_stats(), + elapsed_str: time_elapsed(state), + finished, + remaining, + } + } + + fn total(&self) -> u64 { + self.finished + self.remaining + } +} + +struct SimpleHeader<'s> { + data: HeaderData<'s>, +} + +impl<'s> SimpleHeader<'s> { + fn new(header: &'s str, state: &'s SuperConsoleState) -> Self { + Self::new_for_data(HeaderData::from_state(header, state)) + } + + fn new_for_data(data: HeaderData<'s>) -> Self { + Self { data } + } +} + +impl<'s> Component for SimpleHeader<'s> { + fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { + match mode { + DrawMode::Normal => HeaderLineComponent::new( + StaticStringComponent { + header: self.data.header, + }, + CountComponent { data: &self.data }, + ) + .draw(dimensions, mode), + DrawMode::Final => CountComponent { data: &self.data }.draw(dimensions, mode), + } + } +} + +fn time_elapsed(state: &SuperConsoleState) -> String { + fmt_duration::fmt_duration(state.current_tick.elapsed_time, state.time_speed.speed()) +} + +/// This component is used to display summary counts about the number of jobs. +struct CountComponent<'s> { + data: &'s HeaderData<'s>, +} + +impl<'s> Component for CountComponent<'s> { + fn draw_unchecked(&self, _dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { + match mode { + DrawMode::Normal => { + let remaining = HumanizedCount::new(self.data.remaining); + let total = HumanizedCount::new(self.data.total()); + + let contents = if self.data.action_stats.log_stats() { + let mut actions_summary = format!( + "Remaining: {}/{}. Cache hits: {}%. ", + remaining, + total, + self.data.action_stats.total_cache_hit_percentage() + ); + if self.data.action_stats.fallback_actions > 0 { + actions_summary += format!( + "Fallback: {}/{}. ", + HumanizedCount::new(self.data.action_stats.fallback_actions), + HumanizedCount::new(self.data.action_stats.total_executed_actions()) + ) + .as_str(); + } + actions_summary += format!("Time elapsed: {}", self.data.elapsed_str).as_str(); + actions_summary + } else { + format!( + "Remaining: {}/{}. Time elapsed: {}", + self.data.remaining, + self.data.total(), + self.data.elapsed_str + ) + }; + Ok(Lines(vec![Line::unstyled(&contents)?])) + } + DrawMode::Final => { + let mut lines = vec![Line::unstyled(&format!( + "Jobs completed: {}. Time elapsed: {}.", + self.data.finished, self.data.elapsed_str, + ))?]; + if self.data.action_stats.log_stats() { + lines.push(Line::unstyled(&self.data.action_stats.to_string())?); + } + Ok(Lines(lines)) + } + } + } +} + +pub(crate) struct ProgressHeader<'s> { + header: &'s str, + phase_stats: &'s BuildProgressPhaseStats, + progress_stats: &'s BuildProgressStats, + action_stats: &'s ActionStats, + time_elapsed: String, +} + +#[derive(Clone, Copy)] +enum Style { + Normal(usize), + Compact(usize), + ExtraCompact, +} + +impl Style { + fn render( + &self, + mode: DrawMode, + header: &str, + mut pending: u64, + total: u64, + running_str: &str, + running_num: u64, + ) -> String { + if let DrawMode::Final = mode { + pending = 0; + } + let mut line = match self { + Style::Normal(num_width) | Style::Compact(num_width) => format!( + "{header} Remaining {pending:>num_width$}/{total: { + format!( + "{header} Remaining {pending}/{total}", + header = header, + pending = pending, + total = total, + ) + } + }; + + if let DrawMode::Normal = mode { + line += &match self { + Style::Normal(_) | Style::Compact(_) => { + format!(" (running: {running_str})", running_str = running_str,) + } + Style::ExtraCompact => { + format!(" ({running_num})", running_num = running_num,) + } + }; + } + line + } + + fn display_num(&self, num: u64) -> String { + match self { + Style::Normal(num_width) | Style::Compact(num_width) => { + format!("{:num_width$}", num, num_width = num_width) + } + Style::ExtraCompact => format!("{}", num), + } + } +} + +impl ProgressHeader<'_> { + fn render_loads(&self, style: Style, mode: DrawMode) -> String { + style.render( + mode, + "Loading targets. ", + self.phase_stats.loads.pending(), + self.phase_stats.loads.started, + &style.display_num(self.phase_stats.loads.running), + self.phase_stats.loads.running, + ) + } + + fn render_loads_extra(&self) -> String { + let mut msgs = Vec::new(); + if self.progress_stats.dirs_read > 0 { + msgs.push(format!("{} dirs read", self.progress_stats.dirs_read)); + } + if self.progress_stats.targets > 0 { + msgs.push(format!("{} targets declared", self.progress_stats.targets)); + } + msgs.join(", ") + } + + fn render_analyses(&self, style: Style, mode: DrawMode) -> String { + style.render( + mode, + "Analyzing targets.", + self.phase_stats.analyses.pending(), + self.phase_stats.analyses.started, + &style.display_num(self.phase_stats.analyses.running), + self.phase_stats.analyses.running, + ) + } + + fn render_analyses_extra(&self) -> String { + let mut msgs = Vec::new(); + if self.progress_stats.actions_declared > 0 { + msgs.push(format!("{} actions", self.progress_stats.actions_declared)); + } + if self.progress_stats.artifacts_declared > 0 { + msgs.push(format!( + "{} artifacts declared", + self.progress_stats.artifacts_declared + )); + } + msgs.join(", ") + } + + fn render_actions(&self, style: Style, mode: DrawMode) -> String { + let phase_stats = &self.phase_stats.actions; + + let mut running = Vec::new(); + if self.progress_stats.running_local > 0 || self.action_stats.local_actions > 0 { + running.push(format!( + "{} local", + style.display_num(self.progress_stats.running_local), + )); + } + if self.progress_stats.running_remote > 0 || self.action_stats.remote_actions > 0 { + running.push(format!( + "{} remote", + style.display_num(self.progress_stats.running_remote), + )); + } + + let running_str = if running.is_empty() { + style.display_num(0) + } else { + running.join(", ") + }; + + style.render( + mode, + "Executing actions.", + phase_stats.pending(), + phase_stats.started, + &running_str, + phase_stats.running, + ) + } + + fn render_actions_extra(&self) -> String { + let exec_time_ms = self.progress_stats.exec_time_ms; + if exec_time_ms > 0 { + format!( + "{} exec time total", + fmt_duration::fmt_duration(Duration::from_millis(exec_time_ms), 1.0), + ) + } else { + String::new() + } + } + + fn render_actions_stats(&self, style: Style) -> String { + match style { + Style::Normal(_) | Style::Compact(_) => { + let compact = matches!(style, Style::Compact(_)); + + let mut res_types = Vec::new(); + if self.action_stats.local_actions > 0 { + res_types.push(format!("{} local", self.action_stats.local_actions)); + } + if self.action_stats.remote_actions > 0 { + res_types.push(format!("{} remote", self.action_stats.remote_actions)); + } + if self.action_stats.total_cached_actions() > 0 { + res_types.push(format!( + "{} cache ({}%{})", + self.action_stats.total_cached_actions(), + self.action_stats.total_cache_hit_percentage(), + if compact { "" } else { " hit" } + )); + } + + if res_types.is_empty() { + String::new() + } else { + format!( + "{}{}", + if compact { "" } else { "Finished " }, + res_types.join(", ") + ) + } + } + + Style::ExtraCompact => { + if self.action_stats.total_cached_actions() > 0 { + format!( + "Cache hits {}%", + self.action_stats.total_cache_hit_percentage() + ) + } else { + String::new() + } + } + } + } + + fn render_actions_stats_extra(&self) -> String { + let exec_time_ms = self.progress_stats.exec_time_ms; + let cached_exec_time_ms = self.progress_stats.cached_exec_time_ms; + + if cached_exec_time_ms > 0 { + format!( + "{} exec time cached ({}%)", + fmt_duration::fmt_duration(Duration::from_millis(cached_exec_time_ms), 1.0), + cached_exec_time_ms * 100 / std::cmp::max(exec_time_ms, 1) + ) + } else { + String::new() + } + } +} + +impl<'s> Component for ProgressHeader<'s> { + fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { + fn digits_len(v: u64) -> usize { + (v.checked_ilog10().unwrap_or(0) + 1) as usize + } + + let loads = &self.phase_stats.loads; + let analysis = &self.phase_stats.analyses; + let actions = &self.phase_stats.actions; + + let max_total = std::cmp::max( + std::cmp::max(loads.started, analysis.started), + actions.started, + ); + + let num_width = std::cmp::max(5, digits_len(max_total)); + + let header_width = "Executing actions. Remaining _/_ (running: _ local, _ remote) ".len() + + 4 * (num_width - 1); + + let elapsed = format!("Time elapsed: {}", &self.time_elapsed); + + // During normal drawing, the elapsed time is in the last row at the end. In the final rendering it gets its own line and is on the left. + let inline_elapsed = match mode { + DrawMode::Normal => &elapsed, + DrawMode::Final => "", + }; + + let long_middle_len = "111222333 actions, 111222333 artifacts declared ".len(); + + let style = if header_width + long_middle_len < dimensions.width { + Style::Normal(num_width) + } else if header_width < dimensions.width { + Style::Compact(num_width) + } else { + Style::ExtraCompact + }; + + let mut main = Vec::new(); + let mut extra = Vec::new(); + + if loads.started > 0 { + main.push(self.render_loads(style, mode)); + if let Style::Normal(..) = style { + extra.push(self.render_loads_extra()); + } else { + extra.push(String::new()); + } + } + + if analysis.started > 0 { + main.push(self.render_analyses(style, mode)); + if let Style::Normal(..) = style { + extra.push(self.render_analyses_extra()); + } else { + extra.push(String::new()); + } + } + + if actions.started == 0 { + main.push(self.header.to_owned()); + extra.push(String::new()); + } else { + main.push(self.render_actions(style, mode)); + main.push(format!( + // typically aligns this with "Remaining:" in the line above, but a long header would push it over, which is okay + "{:<18} {}", + self.header, + self.render_actions_stats(if dimensions.width > 90 { + Style::Normal(num_width) + } else { + style + }) + )); + if let Style::Normal(..) = style { + extra.push(self.render_actions_extra()); + extra.push(self.render_actions_stats_extra()); + } else { + extra.push(String::new()); + extra.push(String::new()); + } + } + + assert!(!extra.is_empty()); + assert_eq!(main.len(), extra.len()); + + // We now have the "main" column and the "extra" column and we want to lay them out. In addition, we're going to insert + // the "Time elapsed: 12s" string at the end of the final line. + // + // The main column is printed on the left and then padded to align the extra column. + // As long as there is less than `extra_preferred_width` space, the extra column will go immediately after the main column, + // once it's wider than that we'll right align it. + + let main_width = main.iter().map(String::len).max().unwrap(); + + let extra_preferred_width = long_middle_len + 20; + let extra_width = extra.iter().map(String::len).max().unwrap(); + // need to append elapsed time to the final line + let extra_min_width = 2 + std::cmp::max( + extra_width, + extra.last().unwrap().len() + inline_elapsed.len() + 2, + ); + let extra_max_width = dimensions.width.saturating_sub(main_width + 2); + + // If there's not actually enough space to draw them both, we'll prefer for the extra column to be truncated. + let extra_final_width = std::cmp::min( + std::cmp::max(extra_preferred_width, extra_min_width), + extra_max_width, + ); + + let pad_to = std::cmp::max( + main_width, + dimensions.width.saturating_sub(extra_final_width), + ); + + let mut lines = Vec::new(); + for i in 0..main.len() { + let mut line = format!("{: wanted_len { + // If we're going to have to truncate the extra column for the elapsed time, just drop it in this row. + line = main[i].to_owned(); + } + + if line.len() < wanted_len { + line += &" ".repeat(wanted_len - line.len()); + } else { + line.truncate(wanted_len); + } + line += " "; + line += inline_elapsed; + } + + lines.push(Line::unstyled(&line)?); + } + + if let DrawMode::Final = mode { + lines.push(Line::unstyled(&elapsed)?); + } + + Ok(Lines(lines)) + } +} + +#[cfg(test)] +mod tests { + use std::fmt::Write; + + use buck2_event_observer::progress::BuildProgressPhaseStatsItem; + use itertools::Itertools; + + use super::*; + + fn phase_stats() -> BuildProgressPhaseStats { + BuildProgressPhaseStats { + loads: BuildProgressPhaseStatsItem { + started: 11111, + finished: 111, + running: 11, + }, + analyses: BuildProgressPhaseStatsItem { + started: 22222, + finished: 222, + running: 22, + }, + actions: BuildProgressPhaseStatsItem { + started: 33333, + finished: 333, + running: 100, + }, + } + } + + fn progress_stats() -> BuildProgressStats { + BuildProgressStats { + dirs_read: 111, + targets: 22222, + actions_declared: 3333333, + artifacts_declared: 4444444, + running_local: 55, + running_remote: 66, + exec_time_ms: 7777000, + cached_exec_time_ms: 666000, + } + } + + fn action_stats() -> ActionStats { + ActionStats { + local_actions: 100, + remote_actions: 122, + cached_actions: 133, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + } + } + + #[test] + fn test_different_sizes_dont_fail() -> anyhow::Result<()> { + let phase_stats = &phase_stats(); + let progress_stats = &progress_stats(); + let action_stats = &action_stats(); + for i in 0..120 { + let header = ProgressHeader { + header: "header", + phase_stats, + progress_stats, + action_stats, + time_elapsed: "1234s".to_owned(), + }; + + header.draw( + Dimensions { + width: i, + height: 10, + }, + DrawMode::Normal, + )?; + header.draw( + Dimensions { + width: i, + height: 10, + }, + DrawMode::Final, + )?; + } + Ok(()) + } + + #[test] + fn test_rendering_golden() -> anyhow::Result<()> { + let mut all_output = String::new(); + + fn draw( + width: usize, + normal: bool, + phase_stats: &BuildProgressPhaseStats, + ) -> anyhow::Result { + ProgressHeader { + header: "header", + phase_stats, + progress_stats: &progress_stats(), + action_stats: &action_stats(), + time_elapsed: "1234s".to_owned(), + } + .draw( + Dimensions { width, height: 10 }, + if normal { + DrawMode::Normal + } else { + DrawMode::Final + }, + ) + } + + // 129 looks out of place here, but it tests the case where we have an extra column but Time elapsed won't quite fit. + for width in [30, 40, 60, 80, 100, 129, 130, 140, 160] { + writeln!( + &mut all_output, + "{}", + draw(width, true, &phase_stats())?.fmt_for_test() + )?; + } + + for width in [60, 140] { + writeln!( + &mut all_output, + "{}", + draw(width, false, &phase_stats())?.fmt_for_test() + )?; + } + + let expected = indoc::indoc!( + r#" + Loading targets. Remaining 1 + Analyzing targets. Remaining 2 + Executing actions. Remaining 3 + header Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 + Analyzing targets. Remaining 22000/22222 + Executing actions. Remaining 33000/33333 + header Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 (11) + Analyzing targets. Remaining 22000/22222 (22) + Executing actions. Remaining 33000/33333 (100) + header Cache hits 37% Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 (running: 11) + Analyzing targets. Remaining 22000/22222 (running: 22) + Executing actions. Remaining 33000/33333 (running: 55 local, 66 remote) + header 100 local, 122 remote, 133 cache (37%) Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 (running: 11) + Analyzing targets. Remaining 22000/22222 (running: 22) + Executing actions. Remaining 33000/33333 (running: 55 local, 66 remote) + header Finished 100 local, 122 remote, 133 cache (37% hit) Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 (running: 11) 111 dirs read, 22222 targets declared + Analyzing targets. Remaining 22000/22222 (running: 22) 3333333 actions, 4444444 artifacts declared + Executing actions. Remaining 33000/33333 (running: 55 local, 66 remote) 2:09:37.0s exec time total + header Finished 100 local, 122 remote, 133 cache (37% hit) Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 (running: 11) 111 dirs read, 22222 targets declared + Analyzing targets. Remaining 22000/22222 (running: 22) 3333333 actions, 4444444 artifacts declared + Executing actions. Remaining 33000/33333 (running: 55 local, 66 remote) 2:09:37.0s exec time total + header Finished 100 local, 122 remote, 133 cache (37% hit) 11:06.0s exec time cached (8%) Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 (running: 11) 111 dirs read, 22222 targets declared + Analyzing targets. Remaining 22000/22222 (running: 22) 3333333 actions, 4444444 artifacts declared + Executing actions. Remaining 33000/33333 (running: 55 local, 66 remote) 2:09:37.0s exec time total + header Finished 100 local, 122 remote, 133 cache (37% hit) 11:06.0s exec time cached (8%) Time elapsed: 1234s + + Loading targets. Remaining 11000/11111 (running: 11) 111 dirs read, 22222 targets declared + Analyzing targets. Remaining 22000/22222 (running: 22) 3333333 actions, 4444444 artifacts declared + Executing actions. Remaining 33000/33333 (running: 55 local, 66 remote) 2:09:37.0s exec time total + header Finished 100 local, 122 remote, 133 cache (37% hit) 11:06.0s exec time cached (8%) Time elapsed: 1234s + + Loading targets. Remaining 0/11111 + Analyzing targets. Remaining 0/22222 + Executing actions. Remaining 0/33333 + header Cache hits 37% + Time elapsed: 1234s + + Loading targets. Remaining 0/11111 111 dirs read, 22222 targets declared + Analyzing targets. Remaining 0/22222 3333333 actions, 4444444 artifacts declared + Executing actions. Remaining 0/33333 2:09:37.0s exec time total + header Finished 100 local, 122 remote, 133 cache (37% hit) 11:06.0s exec time cached (8%) + Time elapsed: 1234s + + "# + ); + + // copy-paste is easier if we don't need to worry about getting trailing spaces right + let expected = expected.lines().map(str::trim_end).join("\n"); + let all_output = all_output.lines().map(str::trim_end).join("\n"); + + // don't use pretty_assertions here because we mostly just want to copy-paste the golden + assert!( + all_output == expected, + "GOLDEN:\n{}\nEND_GOLDEN\nEXPECTED:\n{}\nEND_EXPECTED", + all_output, + expected + ); + + Ok(()) + } + + #[test] + fn test_remaining() -> anyhow::Result<()> { + let action_stats = ActionStats { + local_actions: 0, + remote_actions: 0, + cached_actions: 1, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + }; + let output = SimpleHeader::new_for_data(HeaderData { + header: "test", + action_stats: &action_stats, + elapsed_str: "123s".to_owned(), + finished: 0, + remaining: 3, + }) + .draw( + Dimensions { + width: 40, + height: 10, + }, + DrawMode::Normal, + )?; + let expected = "testRemaining: 3/3. Cache hits: 100%. Ti\n".to_owned(); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + Ok(()) + } + + #[test] + fn test_remaining_with_pending() -> anyhow::Result<()> { + let action_stats = ActionStats { + local_actions: 0, + remote_actions: 0, + cached_actions: 0, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + }; + let output = SimpleHeader::new_for_data(HeaderData { + header: "test", + action_stats: &action_stats, + elapsed_str: "0.0s".to_owned(), + finished: 0, + remaining: 2, + }) + .draw( + Dimensions { + width: 60, + height: 10, + }, + DrawMode::Normal, + )?; + + let expected = "test Remaining: 2/2. Time elapsed: 0.0s\n".to_owned(); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + Ok(()) + } + + #[test] + fn test_children() -> anyhow::Result<()> { + let action_stats = ActionStats { + local_actions: 0, + remote_actions: 0, + cached_actions: 1, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + }; + let output = SimpleHeader::new_for_data(HeaderData { + header: "test", + action_stats: &action_stats, + elapsed_str: "0.0s".to_owned(), + finished: 0, + remaining: 1, + }) + .draw( + Dimensions { + width: 80, + height: 10, + }, + DrawMode::Normal, + )?; + let expected = + "test Remaining: 1/1. Cache hits: 100%. Time elapsed: 0.0s\n" + .to_owned(); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + Ok(()) + } + + #[test] + fn test_simple_header_final() -> anyhow::Result<()> { + let action_stats = ActionStats { + local_actions: 0, + remote_actions: 0, + cached_actions: 1, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + }; + let output = SimpleHeader::new_for_data(HeaderData { + header: "test", + action_stats: &action_stats, + elapsed_str: "0.0s".to_owned(), + finished: 0, + remaining: 1, + }) + .draw( + Dimensions { + width: 80, + height: 10, + }, + DrawMode::Final, + )?; + let expected = indoc::indoc!( + r#" + Jobs completed: 0. Time elapsed: 0.0s. + Cache hits: 100%. Commands: 1 (cached: 1, remote: 0, local: 0) + "# + ); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + Ok(()) + } +} diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/io.rs b/app/buck2_client_ctx/src/subscribers/superconsole/io.rs index b58c31531d2e7..636831e27263f 100644 --- a/app/buck2_client_ctx/src/subscribers/superconsole/io.rs +++ b/app/buck2_client_ctx/src/subscribers/superconsole/io.rs @@ -99,9 +99,34 @@ fn do_render( let mut parts = Vec::new(); if let Some(buck2_rss) = snapshot.buck2_rss { parts.push(format!("RSS = {}", HumanizedBytes::new(buck2_rss))); + } else { + // buck2_rss is only available on Linux. On other platforms, buck2 keeps track of buck2_max_rss so show that instead. + parts.push(format!( + "Max RSS = {}", + HumanizedBytes::new(snapshot.buck2_max_rss) + )); } - if let Some(cpu) = two_snapshots.cpu_percents() { - parts.push(format!("CPU = {}%", cpu)); + + // We prefer to display malloc_bytes_active instead of malloc_bytes_allocated + // because it represents active pages which is more than allocated and better reflects actual memory use of buck2. + if let Some(malloc_bytes_active) = snapshot.malloc_bytes_active { + parts.push(format!( + "Malloc active = {}", + HumanizedBytes::new(malloc_bytes_active) + )); + } + let user_cpu_percents = two_snapshots.user_cpu_percents(); + let system_cpu_percents = two_snapshots.system_cpu_percents(); + if user_cpu_percents.is_some() || system_cpu_percents.is_some() { + let mut cpu_str_parts = vec!["buckd CPU".to_owned()]; + if let Some(p) = user_cpu_percents { + cpu_str_parts.push(format!("user = {}%", p)); + } + if let Some(p) = system_cpu_percents { + cpu_str_parts.push(format!("system = {}%", p)); + } + let cpu_str = cpu_str_parts.join(" "); + parts.push(cpu_str); } if snapshot.deferred_materializer_queue_size > 0 { parts.push(format!( @@ -155,7 +180,7 @@ mod tests { #[test] fn test_words_to_lines() { - assert_eq!(vec![String::new(); 0], words_to_lines(vec![], 5)); + assert_eq!(Vec::::new(), words_to_lines(vec![], 5)); assert_eq!( vec!["ab".to_owned()], words_to_lines(vec!["ab".to_owned()], 5) diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/session_info.rs b/app/buck2_client_ctx/src/subscribers/superconsole/session_info.rs index b3eef0bd2cc49..419de9ce89cae 100644 --- a/app/buck2_client_ctx/src/subscribers/superconsole/session_info.rs +++ b/app/buck2_client_ctx/src/subscribers/superconsole/session_info.rs @@ -40,10 +40,10 @@ impl<'s> Component for SessionInfoComponent<'s> { headers.push(Line::unstyled("Test UI:")?); ids.push(Span::new_unstyled(info)?); } - if self.session_info.modern_dice { + if self.session_info.legacy_dice { headers.push(Line::unstyled("Note:")?); ids.push(Span::new_unstyled( - "Using experimental modern dice".to_owned(), + "Using deprecated legacy dice".to_owned(), )?); } // pad all headers to the max width. diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/system_warning.rs b/app/buck2_client_ctx/src/subscribers/superconsole/system_warning.rs new file mode 100644 index 0000000000000..973393acb419b --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/superconsole/system_warning.rs @@ -0,0 +1,88 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_event_observer::action_stats::ActionStats; +use crossterm::style::Color; +use crossterm::style::Stylize; +use superconsole::Component; +use superconsole::Dimensions; +use superconsole::DrawMode; +use superconsole::Line; +use superconsole::Lines; +use superconsole::Span; + +use crate::subscribers::system_warning::cache_misses_msg; +use crate::subscribers::system_warning::check_cache_misses; +use crate::subscribers::system_warning::check_download_speed; +use crate::subscribers::system_warning::check_memory_pressure; +use crate::subscribers::system_warning::check_remaining_disk_space; +use crate::subscribers::system_warning::low_disk_space_msg; +use crate::subscribers::system_warning::slow_download_speed_msg; +use crate::subscribers::system_warning::system_memory_exceeded_msg; + +/// This component is used to display system warnings for a command e.g. memory pressure, low disk space etc. +pub(crate) struct SystemWarningComponent<'a> { + pub(crate) first_snapshot: &'a Option, + pub(crate) last_snapshot: Option<&'a buck2_data::Snapshot>, + pub(crate) system_info: &'a buck2_data::SystemInfo, + pub(crate) avg_re_download_speed: Option, + pub(crate) action_stats: &'a ActionStats, + pub(crate) estimated_completion_percent: u8, + pub(crate) first_build_since_rebase: bool, + pub(crate) concurrent_commands: bool, +} + +fn warning_styled(text: &str) -> anyhow::Result { + // cross term doesn't directly define orange as a color + let orange = Color::Rgb { + r: (244), + g: (140), + b: (40), + }; + Ok(Line::from_iter([Span::new_styled( + text.to_owned().with(orange), + )?])) +} + +impl<'a> Component for SystemWarningComponent<'a> { + fn draw_unchecked(&self, _dimensions: Dimensions, _mode: DrawMode) -> anyhow::Result { + let mut lines = Vec::new(); + + if let Some(memory_pressure) = check_memory_pressure(self.last_snapshot, self.system_info) { + lines.push(warning_styled(&system_memory_exceeded_msg( + &memory_pressure, + ))?); + } + if let Some(low_disk_space) = + check_remaining_disk_space(self.last_snapshot, self.system_info) + { + lines.push(warning_styled(&low_disk_space_msg(&low_disk_space))?); + } + if check_download_speed( + self.first_snapshot, + self.last_snapshot, + self.system_info, + self.avg_re_download_speed, + self.concurrent_commands, + ) { + lines.push(warning_styled(&slow_download_speed_msg( + self.avg_re_download_speed, + ))?); + } + if check_cache_misses( + self.action_stats, + self.system_info, + self.first_build_since_rebase, + Some(self.estimated_completion_percent), + ) { + lines.push(warning_styled(&cache_misses_msg(self.action_stats))?); + } + Ok(Lines(lines)) + } +} diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/timed_list.rs b/app/buck2_client_ctx/src/subscribers/superconsole/timed_list.rs new file mode 100644 index 0000000000000..2f3c30f2ab745 --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/superconsole/timed_list.rs @@ -0,0 +1,728 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Write; +use std::time::Duration; +use std::time::Instant; + +use buck2_event_observer::display; +use buck2_event_observer::display::TargetDisplayOptions; +use buck2_event_observer::fmt_duration; +use buck2_event_observer::span_tracker::BuckEventSpanHandle; +use buck2_event_observer::span_tracker::BuckEventSpanTracker; +use superconsole::components::DrawVertical; +use superconsole::style::Stylize; +use superconsole::Component; +use superconsole::Dimensions; +use superconsole::DrawMode; +use superconsole::Line; +use superconsole::Lines; +use superconsole::Span; + +use self::table_builder::Table; +use crate::subscribers::superconsole::timed_list::table_builder::Row; +use crate::subscribers::superconsole::timed_list::table_builder::TimedRow; +use crate::subscribers::superconsole::SuperConsoleState; + +mod table_builder; + +/// The minimum time disparity between a single subaction and a target elapsed time +/// before the former will be displayed separately from the latter's time. +/// Heuristic uses a percent difference to normalize for long running actions. +const DISPLAY_SUBACTION_CUTOFF: f64 = 0.9; + +/// Information about notable event durations. +#[derive(Debug)] +pub struct Cutoffs { + /// Cutoff for normal execution time. + pub inform: Duration, + /// Cutoff for abnormal but still OK execution time. + pub warn: Duration, + /// Minimum time an event must be alive before it is worth displaying. + pub _notable: Duration, +} + +/// This component renders each event and a timer indicating for how long the event has been ongoing. + +struct TimedListBody<'c> { + cutoffs: &'c Cutoffs, + state: &'c SuperConsoleState, +} + +impl<'c> TimedListBody<'c> { + /// Render a root as `root [first child + remaining children]` + fn draw_root_first_child( + &self, + root: &BuckEventSpanHandle, + single_child: BuckEventSpanHandle, + remaining_children: usize, + display_platform: bool, + ) -> anyhow::Result { + let time_speed = self.state.time_speed; + let info = root.info(); + let child_info = single_child.info(); + + // always display the event and subaction + let mut event_string = format!( + "{} [{}", + display::display_event( + &info.event, + TargetDisplayOptions::for_console(display_platform) + )?, + display::display_event( + &child_info.event, + TargetDisplayOptions::for_console(display_platform) + )? + ); + + let now = Instant::now(); + let child_info_elapsed = now - child_info.start; + let info_elapsed = now - info.start; + let subaction_ratio = child_info_elapsed.as_secs_f64() / info_elapsed.as_secs_f64(); + + // but only display the time of the subaction if it differs significantly. + if subaction_ratio < DISPLAY_SUBACTION_CUTOFF { + let subaction_time = fmt_duration::fmt_duration(child_info_elapsed, time_speed.speed()); + event_string.push(' '); + event_string.push_str(&subaction_time); + } + + if remaining_children > 0 { + write!(event_string, " + {}", remaining_children) + .expect("Write to String is not fallible"); + } + + event_string.push(']'); + + TimedRow::text( + 0, + event_string, + fmt_duration::fmt_duration(info_elapsed, time_speed.speed()), + info_elapsed.mul_f64(time_speed.speed()), + self.cutoffs, + ) + } + + fn draw_root(&self, root: &BuckEventSpanHandle) -> anyhow::Result> { + let time_speed = self.state.time_speed; + let config = &self.state.config; + let two_lines = config.two_lines; + let display_platform = config.display_platform; + let info = root.info(); + + let mut it = root.children(); + + match it.next() { + Some(first) if !two_lines => Ok(vec![self.draw_root_first_child( + root, + first, + it.len(), + display_platform, + )?]), + first => { + let mut rows = Vec::new(); + rows.push(TimedRow::span( + 0, + info, + time_speed.speed(), + self.cutoffs, + display_platform, + )?); + + for child in first.into_iter().chain(it) { + rows.push(TimedRow::span( + 2, + child.info(), + time_speed.speed(), + self.cutoffs, + display_platform, + )?); + } + Ok(rows) + } + } + } +} + +impl<'c> Component for TimedListBody<'c> { + fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { + let config = &self.state.config; + let max_lines = config.max_lines; + + let observer = self.state.simple_console.observer(); + + let spans = observer.spans(); + + let mut roots = spans.iter_roots(); + + let mut builder = Table::new(); + + let mut first_not_rendered = None; + + for root in &mut roots { + let rows = self.draw_root(&root)?; + + if builder.len() + rows.len() >= max_lines { + first_not_rendered = Some(root); + break; + } + + builder.rows.extend(rows.into_iter().map(Row::from)); + } + + // Add remaining unshown tasks, if any. + let more = roots.len() as u64 + first_not_rendered.map_or(0, |_| 1); + + if more > 0 { + let remaining = format!("... and {} more currently executing", more); + builder.rows.push( + std::iter::once(Span::new_styled(remaining.italic())?) + .collect::() + .into(), + ); + } + + builder.draw(dimensions, mode) + } +} + +/// Component for timed list header +struct TimedListHeader; + +impl Component for TimedListHeader { + fn draw_unchecked(&self, dimensions: Dimensions, _mode: DrawMode) -> anyhow::Result { + Ok(Lines(vec![Line::unstyled(&"-".repeat(dimensions.width))?])) + } +} + +/// Component that displays ongoing events and their durations + summary stats. +pub struct TimedList<'a> { + cutoffs: &'a Cutoffs, + state: &'a SuperConsoleState, +} + +impl<'a> TimedList<'a> { + /// * `cutoffs` determines durations for warnings, time-outs, and baseline notability. + pub fn new(cutoffs: &'a Cutoffs, state: &'a SuperConsoleState) -> Self { + Self { cutoffs, state } + } +} + +impl<'a> Component for TimedList<'a> { + fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { + let span_tracker: &BuckEventSpanTracker = self.state.simple_console.observer().spans(); + + match mode { + DrawMode::Normal if !span_tracker.is_unused() => { + let header = TimedListHeader; + let body = TimedListBody { + cutoffs: self.cutoffs, + state: self.state, + }; + + let mut draw = DrawVertical::new(dimensions); + draw.draw(&header, mode)?; + draw.draw(&body, mode)?; + Ok(draw.finish()) + } + _ => Ok(Lines::new()), + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::sync::Arc; + use std::time::UNIX_EPOCH; + + use buck2_data::FakeStart; + use buck2_data::SpanStartEvent; + use buck2_event_observer::action_stats::ActionStats; + use buck2_event_observer::verbosity::Verbosity; + use buck2_events::span::SpanId; + use buck2_events::BuckEvent; + use buck2_wrapper_common::invocation_id::TraceId; + use dupe::Dupe; + use itertools::Itertools; + + use super::*; + use crate::subscribers::subscriber::Tick; + use crate::subscribers::superconsole::SuperConsoleConfig; + use crate::subscribers::superconsole::TimeSpeed; + + const CUTOFFS: Cutoffs = Cutoffs { + inform: Duration::from_secs(2), + warn: Duration::from_secs(4), + _notable: Duration::from_millis(200), + }; + + const TIME_DILATION: u64 = 10; + + fn fake_time_speed() -> TimeSpeed { + // We run time 10x slower so that any time occurring due to the + // test running on an overloaded server is ignored. + // + // Note that going to 100x slower causes Windows CI to fail, because + // the `Instant` can't go below the time when the VM was booted, or you get an + // underflow of `Instant`. + TimeSpeed::new(Some(1.0 / (TIME_DILATION as f64))).unwrap() + } + + fn fake_time(tick: &Tick, secs: u64) -> Instant { + tick.start_time + .checked_sub(Duration::from_secs(secs * TIME_DILATION)) + .unwrap_or_else(|| { + panic!( + "Instant went too low: {:?} - ({secs} * {TIME_DILATION}", + tick.start_time + ) + }) + // We add 50ms to give us a 100ms window where we round down correctly + .checked_add(Duration::from_millis(50 * TIME_DILATION)) + .unwrap() + } + + fn super_console_state_for_test( + span_tracker: BuckEventSpanTracker, + action_stats: ActionStats, + tick: Tick, + time_speed: TimeSpeed, + timed_list_state: SuperConsoleConfig, + ) -> SuperConsoleState { + let mut state = SuperConsoleState::new( + None, + TraceId::null(), + Verbosity::default(), + false, + timed_list_state, + None, + ) + .unwrap(); + state.simple_console.observer.span_tracker = span_tracker; + state.simple_console.observer.action_stats = action_stats; + state.current_tick = tick; + state.time_speed = time_speed; + state + } + + #[test] + fn test_normal() -> anyhow::Result<()> { + let tick = Tick::now(); + + let label = Arc::new(BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + None, + buck2_data::buck_event::Data::SpanStart(SpanStartEvent { + data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { + caramba: "test".to_owned(), + })), + }), + )); + + let module = Arc::new(BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + None, + buck2_data::buck_event::Data::SpanStart(SpanStartEvent { + data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { + caramba: "foo".to_owned(), + })), + }), + )); + + let mut state = BuckEventSpanTracker::new(); + state.start_at(&label, fake_time(&tick, 3)).unwrap(); + state.start_at(&module, fake_time(&tick, 1)).unwrap(); + + let time_speed = fake_time_speed(); + let action_stats = ActionStats { + local_actions: 0, + remote_actions: 0, + cached_actions: 1, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + }; + + let timed_list_state = SuperConsoleConfig { + max_lines: 5, + ..Default::default() + }; + + let output = TimedList::new( + &CUTOFFS, + &super_console_state_for_test(state, action_stats, tick, time_speed, timed_list_state), + ) + .draw( + Dimensions { + width: 40, + height: 10, + }, + DrawMode::Normal, + )?; + let expected = [ + + "----------------------------------------", + "test -- speak of the devil 3.0s", + "foo -- speak of the devil 1.0s", + ].iter().map(|l| format!("{}\n", l)).join(""); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + Ok(()) + } + + #[test] + fn test_remaining() -> anyhow::Result<()> { + let tick = Tick::now(); + + let e1 = BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + None, + buck2_data::buck_event::Data::SpanStart(SpanStartEvent { + data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { + caramba: "e1".to_owned(), + })), + }), + ); + + let e2 = BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + None, + buck2_data::buck_event::Data::SpanStart(SpanStartEvent { + data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { + caramba: "e2".to_owned(), + })), + }), + ); + + let e3 = BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + None, + buck2_data::buck_event::Data::SpanStart(SpanStartEvent { + data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { + caramba: "e3".to_owned(), + })), + }), + ); + + let mut state = BuckEventSpanTracker::new(); + + for e in [e1, e2, e3] { + state + .start_at(&Arc::new(e.clone()), fake_time(&tick, 1)) + .unwrap(); + } + + let time_speed = fake_time_speed(); + let action_stats = ActionStats { + local_actions: 0, + remote_actions: 0, + cached_actions: 1, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + }; + + let timed_list_state = SuperConsoleConfig { + max_lines: 2, + ..Default::default() + }; + + let output = TimedList::new( + &CUTOFFS, + &super_console_state_for_test(state, action_stats, tick, time_speed, timed_list_state), + ) + .draw( + Dimensions { + width: 40, + height: 10, + }, + DrawMode::Normal, + )?; + let expected = [ + "----------------------------------------", + "e1 -- speak of the devil 1.0s", + "... and 2 more currently executing", + ] + .iter() + .map(|l| format!("{}\n", l)) + .join(""); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + Ok(()) + } + + #[tokio::test] + async fn test_remaining_with_pending() -> anyhow::Result<()> { + let tick = Tick::now(); + + let mut state = SuperConsoleState::new( + None, + TraceId::null(), + Verbosity::default(), + false, + SuperConsoleConfig { + max_lines: 2, + ..Default::default() + }, + None, + )?; + + state.time_speed = fake_time_speed(); + state.current_tick = tick.clone(); + + state + .simple_console + .observer + .observe(fake_time(&tick, 10), &span_start_event(None)) + .await?; + + state + .simple_console + .observer + .observe(fake_time(&tick, 1), &dice_snapshot()) + .await?; + + { + let output = TimedList::new(&CUTOFFS, &state).draw( + Dimensions { + width: 60, + height: 10, + }, + DrawMode::Normal, + )?; + + let expected = [ + "------------------------------------------------------------", + "pkg:target -- action (category identifier) 10.0s", + ].iter().map(|l| format!("{}\n", l)).join(""); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + } + + { + state.config.max_lines = 1; // With fewer lines now + + let output = TimedList::new(&CUTOFFS, &state).draw( + Dimensions { + width: 60, + height: 10, + }, + DrawMode::Normal, + )?; + + let expected = [ + "------------------------------------------------------------", + "... and 1 more currently executing", + ] + .iter() + .map(|l| format!("{}\n", l)) + .join(""); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + } + + Ok(()) + } + + #[test] + fn test_children() -> anyhow::Result<()> { + let tick = Tick::now(); + + let parent = SpanId::next(); + + let prepare = Arc::new(BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + Some(parent), + SpanStartEvent { + data: Some( + buck2_data::ExecutorStageStart { + stage: Some(buck2_data::PrepareAction {}.into()), + } + .into(), + ), + } + .into(), + )); + + let mut state = BuckEventSpanTracker::new(); + state + .start_at(&span_start_event(Some(parent)), fake_time(&tick, 10)) + .unwrap(); + state.start_at(&prepare, fake_time(&tick, 5)).unwrap(); + + let time_speed = fake_time_speed(); + + let action_stats = ActionStats { + local_actions: 0, + remote_actions: 0, + cached_actions: 1, + fallback_actions: 0, + remote_dep_file_cached_actions: 0, + }; + + let timed_list_state = SuperConsoleConfig { + max_lines: 5, + ..Default::default() + }; + + let output = TimedList::new( + &CUTOFFS, + &super_console_state_for_test( + state.clone(), + action_stats.dupe(), + tick.dupe(), + time_speed, + timed_list_state.clone(), + ), + ) + .draw( + Dimensions { + width: 80, + height: 10, + }, + DrawMode::Normal, + )?; + let expected = [ + "--------------------------------------------------------------------------------", + "pkg:target -- action (category identifier) [prepare 5.0s] 10.0s", + ].iter().map(|l| format!("{}\n", l)).join(""); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + // Now, add another action. Normally we don't have multiple stages actually running + // concurrently but this is a test! + + let re_download = Arc::new(BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + Some(parent), + SpanStartEvent { + data: Some( + buck2_data::ExecutorStageStart { + stage: Some( + buck2_data::ReStage { + stage: Some(buck2_data::ReDownload {}.into()), + } + .into(), + ), + } + .into(), + ), + } + .into(), + )); + + state.start_at(&re_download, fake_time(&tick, 2)).unwrap(); + + let output = TimedList::new( + &CUTOFFS, + &super_console_state_for_test(state, action_stats, tick, time_speed, timed_list_state), + ) + .draw( + Dimensions { + width: 80, + height: 10, + }, + DrawMode::Normal, + )?; + let expected = [ + "--------------------------------------------------------------------------------", + "pkg:target -- action (category identifier) [prepare 5.0s + 1] 10.0s", + ].iter().map(|l| format!("{}\n", l)).join(""); + + pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); + + Ok(()) + } + + fn dice_snapshot() -> Arc { + Arc::new(BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + None, + None, + buck2_data::InstantEvent { + data: Some( + buck2_data::DiceStateSnapshot { + key_states: { + let mut map = HashMap::new(); + map.insert( + "BuildKey".to_owned(), + buck2_data::DiceKeyState { + started: 5, + finished: 2, + check_deps_started: 2, + check_deps_finished: 1, + compute_started: 4, + compute_finished: 2, + }, + ); + map + }, + } + .into(), + ), + } + .into(), + )) + } + + fn span_start_event(parent_span: Option) -> Arc { + let span_id = Some(parent_span.unwrap_or(SpanId::next())); + Arc::new(BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + span_id, + None, + SpanStartEvent { + data: Some( + buck2_data::ActionExecutionStart { + key: Some(buck2_data::ActionKey { + id: Default::default(), + owner: Some(buck2_data::action_key::Owner::TargetLabel( + buck2_data::ConfiguredTargetLabel { + label: Some(buck2_data::TargetLabel { + package: "pkg".into(), + name: "target".into(), + }), + configuration: Some(buck2_data::Configuration { + full_name: "conf".into(), + }), + execution_configuration: None, + }, + )), + key: "".to_owned(), + }), + name: Some(buck2_data::ActionName { + category: "category".into(), + identifier: "identifier".into(), + }), + kind: buck2_data::ActionKind::NotSet as i32, + } + .into(), + ), + } + .into(), + )) + } +} diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/timed_list/mod.rs b/app/buck2_client_ctx/src/subscribers/superconsole/timed_list/mod.rs deleted file mode 100644 index ad0598e51661e..0000000000000 --- a/app/buck2_client_ctx/src/subscribers/superconsole/timed_list/mod.rs +++ /dev/null @@ -1,858 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt::Write; -use std::time::Duration; -use std::time::Instant; - -use buck2_event_observer::display; -use buck2_event_observer::display::TargetDisplayOptions; -use buck2_event_observer::fmt_duration; -use buck2_event_observer::humanized::HumanizedCount; -use buck2_event_observer::pending_estimate::pending_estimate; -use buck2_event_observer::span_tracker::BuckEventSpanHandle; -use buck2_event_observer::span_tracker::BuckEventSpanTracker; -use superconsole::components::bordering::BorderedSpec; -use superconsole::components::Bordered; -use superconsole::components::DrawVertical; -use superconsole::style::Stylize; -use superconsole::Component; -use superconsole::Dimensions; -use superconsole::DrawMode; -use superconsole::Line; -use superconsole::Lines; -use superconsole::Span; - -use self::table_builder::Table; -use crate::subscribers::superconsole::common::HeaderLineComponent; -use crate::subscribers::superconsole::common::StaticStringComponent; -use crate::subscribers::superconsole::timed_list::table_builder::Row; -use crate::subscribers::superconsole::timed_list::table_builder::TimedRow; -use crate::subscribers::superconsole::SuperConsoleState; - -mod table_builder; - -/// The minimum time disparity between a single subaction and a target elapsed time -/// before the former will be displayed separately from the latter's time. -/// Heuristic uses a percent difference to normalize for long running actions. -const DISPLAY_SUBACTION_CUTOFF: f64 = 0.9; - -/// Information about notable event durations. -#[derive(Debug)] -pub struct Cutoffs { - /// Cutoff for normal execution time. - pub inform: Duration, - /// Cutoff for abnormal but still OK execution time. - pub warn: Duration, - /// Minimum time an event must be alive before it is worth displaying. - pub _notable: Duration, -} - -/// This component renders each event and a timer indicating for how long the event has been ongoing. - -struct TimedListBody<'c> { - cutoffs: &'c Cutoffs, - state: &'c SuperConsoleState, -} - -impl<'c> TimedListBody<'c> { - /// Render a root as `root [first child + remaining children]` - fn draw_root_first_child( - &self, - root: &BuckEventSpanHandle, - single_child: BuckEventSpanHandle, - remaining_children: usize, - display_platform: bool, - ) -> anyhow::Result { - let time_speed = self.state.time_speed; - let info = root.info(); - let child_info = single_child.info(); - - // always display the event and subaction - let mut event_string = format!( - "{} [{}", - display::display_event( - &info.event, - TargetDisplayOptions::for_console(display_platform) - )?, - display::display_event( - &child_info.event, - TargetDisplayOptions::for_console(display_platform) - )? - ); - - let now = Instant::now(); - let child_info_elapsed = now - child_info.start; - let info_elapsed = now - info.start; - let subaction_ratio = child_info_elapsed.as_secs_f64() / info_elapsed.as_secs_f64(); - - // but only display the time of the subaction if it differs significantly. - if subaction_ratio < DISPLAY_SUBACTION_CUTOFF { - let subaction_time = fmt_duration::fmt_duration(child_info_elapsed, time_speed.speed()); - event_string.push(' '); - event_string.push_str(&subaction_time); - } - - if remaining_children > 0 { - write!(event_string, " + {}", remaining_children) - .expect("Write to String is not fallible"); - } - - event_string.push(']'); - - TimedRow::text( - 0, - event_string, - fmt_duration::fmt_duration(info_elapsed, time_speed.speed()), - info_elapsed.mul_f64(time_speed.speed()), - self.cutoffs, - ) - } - - fn draw_root(&self, root: &BuckEventSpanHandle) -> anyhow::Result> { - let time_speed = self.state.time_speed; - let config = &self.state.config; - let two_lines = config.two_lines; - let display_platform = config.display_platform; - let info = root.info(); - - let mut it = root.children(); - - match it.next() { - Some(first) if !two_lines => Ok(vec![self.draw_root_first_child( - root, - first, - it.len(), - display_platform, - )?]), - first => { - let mut rows = Vec::new(); - rows.push(TimedRow::span( - 0, - info, - time_speed.speed(), - self.cutoffs, - display_platform, - )?); - - for child in first.into_iter().chain(it) { - rows.push(TimedRow::span( - 2, - child.info(), - time_speed.speed(), - self.cutoffs, - display_platform, - )?); - } - Ok(rows) - } - } - } -} - -impl<'c> Component for TimedListBody<'c> { - fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { - let config = &self.state.config; - let max_lines = config.max_lines; - - let observer = self.state.simple_console.observer(); - - let spans = observer.spans(); - - let mut roots = spans.iter_roots(); - - let mut builder = Table::new(); - - let mut first_not_rendered = None; - - for root in &mut roots { - let rows = self.draw_root(&root)?; - - if builder.len() + rows.len() >= max_lines { - first_not_rendered = Some(root); - break; - } - - builder.rows.extend(rows.into_iter().map(Row::from)); - } - - // Add remaining unshown tasks, if any. - let more = roots.len() as u64 + first_not_rendered.map_or(0, |_| 1); - - if more > 0 { - let remaining = format!("... and {} more currently executing", more); - builder.rows.push( - std::iter::once(Span::new_styled(remaining.italic())?) - .collect::() - .into(), - ); - } - - builder.draw(dimensions, mode) - } -} - -/// This component is used to display summary counts about the number of jobs. -struct CountComponent<'s> { - state: &'s SuperConsoleState, -} - -impl<'s> Component for CountComponent<'s> { - fn draw_unchecked(&self, _dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { - let observer = self.state.simple_console.observer(); - let spans = observer.spans(); - let action_stats = self.state.simple_console.observer().action_stats(); - let time_speed = self.state.time_speed; - - let finished = spans.roots_completed() as u64; - - let elapsed = - fmt_duration::fmt_duration(self.state.current_tick.elapsed_time, time_speed.speed()); - - match mode { - DrawMode::Normal => { - let pending = pending_estimate(spans.roots(), observer.extra().dice_state()); - - let remaining = spans.iter_roots().len() as u64 + pending; - let total = remaining + finished; - - let remaining = HumanizedCount::new(remaining); - let total = HumanizedCount::new(total); - - let contents = if action_stats.log_stats() { - let mut actions_summary = format!( - "Remaining: {}/{}. Cache hits: {}%. ", - remaining, - total, - action_stats.total_cache_hit_percentage() - ); - if action_stats.fallback_actions > 0 { - actions_summary += format!( - "Fallback: {}/{}. ", - HumanizedCount::new(action_stats.fallback_actions), - HumanizedCount::new(action_stats.total_executed_actions()) - ) - .as_str(); - } - actions_summary += format!("Time elapsed: {}", elapsed).as_str(); - actions_summary - } else { - format!( - "Remaining: {}/{}. Time elapsed: {}", - remaining, total, elapsed - ) - }; - Ok(Lines(vec![Line::unstyled(&contents)?])) - } - DrawMode::Final => { - let mut lines = vec![Line::unstyled(&format!( - "Jobs completed: {}. Time elapsed: {}.", - finished, elapsed, - ))?]; - if action_stats.log_stats() { - lines.push(Line::unstyled(&action_stats.to_string())?); - } - Ok(Lines(lines)) - } - } - } -} - -/// Wrapper component for Header + Count -struct TimedListHeader<'s> { - state: &'s SuperConsoleState, - header: &'s str, -} - -impl<'s> Component for TimedListHeader<'s> { - fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { - let info = StaticStringComponent { - header: self.header, - }; - let header_split = HeaderLineComponent::new(info, CountComponent { state: self.state }); - let header_box = Bordered::new( - header_split, - BorderedSpec { - bottom: Some(Span::dash()), - top: None, - left: None, - right: None, - }, - ); - - header_box.draw(dimensions, mode) - } -} - -/// Component that displays ongoing events and their durations + summary stats. -pub struct TimedList<'a> { - header: &'a str, - cutoffs: &'a Cutoffs, - state: &'a SuperConsoleState, -} - -impl<'a> TimedList<'a> { - /// * `cutoffs` determines durations for warnings, time-outs, and baseline notability. - /// * `header` is the string displayed at the top of the list. - pub fn new(cutoffs: &'a Cutoffs, header: &'a str, state: &'a SuperConsoleState) -> Self { - Self { - header, - cutoffs, - state, - } - } -} - -impl<'a> Component for TimedList<'a> { - fn draw_unchecked(&self, dimensions: Dimensions, mode: DrawMode) -> anyhow::Result { - let span_tracker: &BuckEventSpanTracker = self.state.simple_console.observer().spans(); - - match mode { - DrawMode::Normal if !span_tracker.is_unused() => { - let header = TimedListHeader { - header: self.header, - state: self.state, - }; - let body = TimedListBody { - cutoffs: self.cutoffs, - state: self.state, - }; - - let mut draw = DrawVertical::new(dimensions); - draw.draw(&header, mode)?; - draw.draw(&body, mode)?; - Ok(draw.finish()) - } - // show a summary at the end - DrawMode::Final => { - CountComponent { state: self.state }.draw(dimensions, DrawMode::Final) - } - _ => Ok(Lines::new()), - } - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - use std::sync::Arc; - use std::time::UNIX_EPOCH; - - use buck2_data::FakeStart; - use buck2_data::SpanStartEvent; - use buck2_event_observer::action_stats::ActionStats; - use buck2_event_observer::verbosity::Verbosity; - use buck2_events::span::SpanId; - use buck2_events::BuckEvent; - use buck2_wrapper_common::invocation_id::TraceId; - use dupe::Dupe; - use itertools::Itertools; - - use super::*; - use crate::subscribers::subscriber::Tick; - use crate::subscribers::superconsole::SuperConsoleConfig; - use crate::subscribers::superconsole::TimeSpeed; - - const CUTOFFS: Cutoffs = Cutoffs { - inform: Duration::from_secs(2), - warn: Duration::from_secs(4), - _notable: Duration::from_millis(200), - }; - - const TIME_DILATION: u64 = 10; - - fn fake_time_speed() -> TimeSpeed { - // We run time 10x slower so that any time occurring due to the - // test running on an overloaded server is ignored. - // - // Note that going to 100x slower causes Windows CI to fail, because - // the `Instant` can't go below the time when the VM was booted, or you get an - // underflow of `Instant`. - TimeSpeed::new(Some(1.0 / (TIME_DILATION as f64))).unwrap() - } - - fn fake_time(tick: &Tick, secs: u64) -> Instant { - tick.start_time - .checked_sub(Duration::from_secs(secs * TIME_DILATION)) - .unwrap_or_else(|| { - panic!( - "Instant went too low: {:?} - ({secs} * {TIME_DILATION}", - tick.start_time - ) - }) - // We add 50ms to give us a 100ms window where we round down correctly - .checked_add(Duration::from_millis(50 * TIME_DILATION)) - .unwrap() - } - - fn super_console_state_for_test( - span_tracker: BuckEventSpanTracker, - action_stats: ActionStats, - tick: Tick, - time_speed: TimeSpeed, - timed_list_state: SuperConsoleConfig, - ) -> SuperConsoleState { - let mut state = SuperConsoleState::new( - None, - TraceId::null(), - Verbosity::default(), - false, - timed_list_state, - ) - .unwrap(); - state.simple_console.observer.span_tracker = span_tracker; - state.simple_console.observer.action_stats = action_stats; - state.current_tick = tick; - state.time_speed = time_speed; - state - } - - #[test] - fn test_normal() -> anyhow::Result<()> { - let tick = Tick::now(); - - let label = Arc::new(BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - None, - buck2_data::buck_event::Data::SpanStart(SpanStartEvent { - data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { - caramba: "test".to_owned(), - })), - }), - )); - - let module = Arc::new(BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - None, - buck2_data::buck_event::Data::SpanStart(SpanStartEvent { - data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { - caramba: "foo".to_owned(), - })), - }), - )); - - let mut state = BuckEventSpanTracker::new(); - state.start_at(&label, fake_time(&tick, 3)).unwrap(); - state.start_at(&module, fake_time(&tick, 1)).unwrap(); - - let time_speed = fake_time_speed(); - let action_stats = ActionStats { - local_actions: 0, - remote_actions: 0, - cached_actions: 1, - fallback_actions: 0, - remote_dep_file_cached_actions: 0, - }; - - let timed_list_state = SuperConsoleConfig { - max_lines: 5, - ..Default::default() - }; - - let output = TimedList::new( - &CUTOFFS, - "test", - &super_console_state_for_test(state, action_stats, tick, time_speed, timed_list_state), - ) - .draw( - Dimensions { - width: 40, - height: 10, - }, - DrawMode::Normal, - )?; - let expected = [ - "testRemaining: 2/2. Cache hits: 100%. Ti", - "----------------------------------------", - "test -- speak of the devil 3.0s", - "foo -- speak of the devil 1.0s", - ].iter().map(|l| format!("{}\n", l)).join(""); - - pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); - - Ok(()) - } - - #[test] - fn test_remaining() -> anyhow::Result<()> { - let tick = Tick::now(); - - let e1 = BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - None, - buck2_data::buck_event::Data::SpanStart(SpanStartEvent { - data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { - caramba: "e1".to_owned(), - })), - }), - ); - - let e2 = BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - None, - buck2_data::buck_event::Data::SpanStart(SpanStartEvent { - data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { - caramba: "e2".to_owned(), - })), - }), - ); - - let e3 = BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - None, - buck2_data::buck_event::Data::SpanStart(SpanStartEvent { - data: Some(buck2_data::span_start_event::Data::Fake(FakeStart { - caramba: "e3".to_owned(), - })), - }), - ); - - let mut state = BuckEventSpanTracker::new(); - - for e in [e1, e2, e3] { - state - .start_at(&Arc::new(e.clone()), fake_time(&tick, 1)) - .unwrap(); - } - - let time_speed = fake_time_speed(); - let action_stats = ActionStats { - local_actions: 0, - remote_actions: 0, - cached_actions: 1, - fallback_actions: 0, - remote_dep_file_cached_actions: 0, - }; - - let timed_list_state = SuperConsoleConfig { - max_lines: 2, - ..Default::default() - }; - - let output = TimedList::new( - &CUTOFFS, - "test", - &super_console_state_for_test(state, action_stats, tick, time_speed, timed_list_state), - ) - .draw( - Dimensions { - width: 40, - height: 10, - }, - DrawMode::Normal, - )?; - let expected = [ - "testRemaining: 3/3. Cache hits: 100%. Ti", - "----------------------------------------", - "e1 -- speak of the devil 1.0s", - "... and 2 more currently executing", - ] - .iter() - .map(|l| format!("{}\n", l)) - .join(""); - - pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); - - Ok(()) - } - - #[test] - fn test_remaining_with_pending() -> anyhow::Result<()> { - let tick = Tick::now(); - - let mut state = SuperConsoleState::new( - None, - TraceId::null(), - Verbosity::default(), - false, - SuperConsoleConfig { - max_lines: 2, - ..Default::default() - }, - )?; - - state.time_speed = fake_time_speed(); - state.current_tick = tick.clone(); - - state.simple_console.observer.observe( - fake_time(&tick, 10), - &Arc::new(BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - None, - SpanStartEvent { - data: Some( - buck2_data::ActionExecutionStart { - key: Some(buck2_data::ActionKey { - id: Default::default(), - owner: Some(buck2_data::action_key::Owner::TargetLabel( - buck2_data::ConfiguredTargetLabel { - label: Some(buck2_data::TargetLabel { - package: "pkg".into(), - name: "target".into(), - }), - configuration: Some(buck2_data::Configuration { - full_name: "conf".into(), - }), - execution_configuration: None, - }, - )), - key: "".to_owned(), - }), - name: Some(buck2_data::ActionName { - category: "category".into(), - identifier: "identifier".into(), - }), - kind: buck2_data::ActionKind::NotSet as i32, - } - .into(), - ), - } - .into(), - )), - )?; - - state.simple_console.observer.observe( - fake_time(&tick, 1), - &Arc::new(BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - None, - None, - buck2_data::InstantEvent { - data: Some( - buck2_data::DiceStateSnapshot { - key_states: { - let mut map = HashMap::new(); - map.insert( - "BuildKey".to_owned(), - buck2_data::DiceKeyState { - started: 4, - finished: 2, - check_deps_started: 2, - check_deps_finished: 1, - }, - ); - map - }, - } - .into(), - ), - } - .into(), - )), - )?; - - { - let output = TimedList::new(&CUTOFFS, "test", &state).draw( - Dimensions { - width: 60, - height: 10, - }, - DrawMode::Normal, - )?; - - let expected = [ - "test Remaining: 3/3. Time elapsed: 0.0s", - "------------------------------------------------------------", - "pkg:target -- action (category identifier) 10.0s", - ].iter().map(|l| format!("{}\n", l)).join(""); - - pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); - } - - { - state.config.max_lines = 1; // With fewer lines now - - let output = TimedList::new(&CUTOFFS, "test", &state).draw( - Dimensions { - width: 60, - height: 10, - }, - DrawMode::Normal, - )?; - - let expected = [ - "test Remaining: 3/3. Time elapsed: 0.0s", - "------------------------------------------------------------", - "... and 1 more currently executing", - ] - .iter() - .map(|l| format!("{}\n", l)) - .join(""); - - pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); - } - - Ok(()) - } - - #[test] - fn test_children() -> anyhow::Result<()> { - let tick = Tick::now(); - - let parent = SpanId::new(); - - let action = Arc::new(BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(parent), - None, - SpanStartEvent { - data: Some( - buck2_data::ActionExecutionStart { - key: Some(buck2_data::ActionKey { - id: Default::default(), - owner: Some(buck2_data::action_key::Owner::TargetLabel( - buck2_data::ConfiguredTargetLabel { - label: Some(buck2_data::TargetLabel { - package: "pkg".into(), - name: "target".into(), - }), - configuration: Some(buck2_data::Configuration { - full_name: "conf".into(), - }), - execution_configuration: None, - }, - )), - key: "".to_owned(), - }), - name: Some(buck2_data::ActionName { - category: "category".into(), - identifier: "identifier".into(), - }), - kind: buck2_data::ActionKind::NotSet as i32, - } - .into(), - ), - } - .into(), - )); - - let prepare = Arc::new(BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - Some(parent), - SpanStartEvent { - data: Some( - buck2_data::ExecutorStageStart { - stage: Some(buck2_data::PrepareAction {}.into()), - } - .into(), - ), - } - .into(), - )); - - let mut state = BuckEventSpanTracker::new(); - state.start_at(&action, fake_time(&tick, 10)).unwrap(); - state.start_at(&prepare, fake_time(&tick, 5)).unwrap(); - - let time_speed = fake_time_speed(); - - let action_stats = ActionStats { - local_actions: 0, - remote_actions: 0, - cached_actions: 1, - fallback_actions: 0, - remote_dep_file_cached_actions: 0, - }; - - let timed_list_state = SuperConsoleConfig { - max_lines: 5, - ..Default::default() - }; - - let output = TimedList::new( - &CUTOFFS, - "test", - &super_console_state_for_test( - state.clone(), - action_stats.dupe(), - tick.dupe(), - time_speed, - timed_list_state.clone(), - ), - ) - .draw( - Dimensions { - width: 80, - height: 10, - }, - DrawMode::Normal, - )?; - let expected = [ - "test Remaining: 1/1. Cache hits: 100%. Time elapsed: 0.0s", - "--------------------------------------------------------------------------------", - "pkg:target -- action (category identifier) [prepare 5.0s] 10.0s", - ].iter().map(|l| format!("{}\n", l)).join(""); - - pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); - - // Now, add another action. Normally we don't have multiple stages actually running - // concurrently but this is a test! - - let re_download = Arc::new(BuckEvent::new( - UNIX_EPOCH, - TraceId::new(), - Some(SpanId::new()), - Some(parent), - SpanStartEvent { - data: Some( - buck2_data::ExecutorStageStart { - stage: Some( - buck2_data::ReStage { - stage: Some(buck2_data::ReDownload {}.into()), - } - .into(), - ), - } - .into(), - ), - } - .into(), - )); - - state.start_at(&re_download, fake_time(&tick, 2)).unwrap(); - - let output = TimedList::new( - &CUTOFFS, - "test", - &super_console_state_for_test(state, action_stats, tick, time_speed, timed_list_state), - ) - .draw( - Dimensions { - width: 80, - height: 10, - }, - DrawMode::Normal, - )?; - let expected = [ - "test Remaining: 1/1. Cache hits: 100%. Time elapsed: 0.0s", - "--------------------------------------------------------------------------------", - "pkg:target -- action (category identifier) [prepare 5.0s + 1] 10.0s", - ].iter().map(|l| format!("{}\n", l)).join(""); - - pretty_assertions::assert_eq!(output.fmt_for_test().to_string(), expected); - - Ok(()) - } -} diff --git a/app/buck2_client_ctx/src/subscribers/superconsole/timed_list/table_builder.rs b/app/buck2_client_ctx/src/subscribers/superconsole/timed_list/table_builder.rs index 49b6daac4ae5a..db82c7627eddc 100644 --- a/app/buck2_client_ctx/src/subscribers/superconsole/timed_list/table_builder.rs +++ b/app/buck2_client_ctx/src/subscribers/superconsole/timed_list/table_builder.rs @@ -148,6 +148,7 @@ impl TimedRow { /// This component echoes the `Lines` that have been stored in it. #[derive(Debug)] +#[allow(dead_code)] struct LinesComponent(Lines); impl Component for LinesComponent { diff --git a/app/buck2_client_ctx/src/subscribers/system_warning.rs b/app/buck2_client_ctx/src/subscribers/system_warning.rs new file mode 100644 index 0000000000000..8aaab3e392d40 --- /dev/null +++ b/app/buck2_client_ctx/src/subscribers/system_warning.rs @@ -0,0 +1,222 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::is_open_source; +use buck2_event_observer::action_stats::ActionStats; +use buck2_event_observer::humanized::HumanizedBytes; +use buck2_event_observer::humanized::HumanizedBytesPerSecond; + +use crate::subscribers::recorder::process_memory; + +const BYTES_PER_GIGABYTE: u64 = 1000000000; + +pub(crate) struct MemoryPressureHigh { + pub(crate) system_total_memory: u64, + pub(crate) process_memory: u64, +} + +pub(crate) struct LowDiskSpace { + pub(crate) total_disk_space: u64, + pub(crate) used_disk_space: u64, +} + +pub const SYSTEM_MEMORY_REMEDIATION_LINK: &str = ": https://fburl.com/buck2_mem_remediation"; +pub const DISK_REMEDIATION_LINK: &str = ": https://fburl.com/buck2_disk_remediation"; +pub const DOWNLOAD_SPEED_LOW_LINK: &str = "https://fburl.com/buck2_slow_download"; +pub const CACHE_MISS_LINK: &str = "https://fburl.com/buck2_cache_miss"; + +pub(crate) fn system_memory_exceeded_msg(memory_pressure: &MemoryPressureHigh) -> String { + format!( + "High memory pressure: buck2 is using {} out of {}{}", + HumanizedBytes::new(memory_pressure.process_memory), + HumanizedBytes::new(memory_pressure.system_total_memory), + if is_open_source() { + "" + } else { + SYSTEM_MEMORY_REMEDIATION_LINK + } + ) +} + +pub(crate) fn low_disk_space_msg(low_disk_space: &LowDiskSpace) -> String { + format!( + "Low disk space: only {} remaining out of {}{}", + HumanizedBytes::new(low_disk_space.total_disk_space - low_disk_space.used_disk_space), + HumanizedBytes::new(low_disk_space.total_disk_space), + if is_open_source() { + "" + } else { + DISK_REMEDIATION_LINK + } + ) +} + +pub(crate) fn slow_download_speed_msg(avg_re_download_speed: Option) -> String { + let avg_speed = if let Some(avg_re_download_speed) = avg_re_download_speed { + format!(": {}", HumanizedBytesPerSecond::new(avg_re_download_speed)) + } else { + String::new() + }; + let msg = format!( + "Slow download speed is detected{}. This may significantly impact build speed", + avg_speed + ); + if !is_open_source() { + format!("{msg}: {DOWNLOAD_SPEED_LOW_LINK}.") + } else { + format!("{msg}.") + } +} + +pub(crate) fn cache_misses_msg(action_stats: &ActionStats) -> String { + let cache_hit_percent = action_stats.total_cache_hit_percentage(); + let msg = format!( + "Low cache hits detected: {}%. This may significantly impact build speed", + cache_hit_percent + ); + if !is_open_source() { + format!("{msg}: {CACHE_MISS_LINK}.") + } else { + format!("{msg}. Try rebasing to a stable revision with warmed caches.") + } +} + +pub(crate) fn check_memory_pressure( + last_snapshot: Option<&buck2_data::Snapshot>, + system_info: &buck2_data::SystemInfo, +) -> Option { + let process_memory = process_memory(last_snapshot?)?; + let system_total_memory = system_info.system_total_memory_bytes?; + let memory_pressure_threshold_percent = system_info.memory_pressure_threshold_percent?; + // TODO (ezgi): one-shot commands don't record this. Prevent panick (division-by-zero) until it is fixed. + if (process_memory * 100) + .checked_div(system_total_memory) + .is_some_and(|res| res >= memory_pressure_threshold_percent) + { + Some(MemoryPressureHigh { + system_total_memory, + process_memory, + }) + } else { + None + } +} + +pub(crate) fn check_remaining_disk_space( + last_snapshot: Option<&buck2_data::Snapshot>, + system_info: &buck2_data::SystemInfo, +) -> Option { + let used_disk_space = last_snapshot?.used_disk_space_bytes?; + let total_disk_space = system_info.total_disk_space_bytes?; + let remaining_disk_space_threshold = + system_info.remaining_disk_space_threshold_gb? * BYTES_PER_GIGABYTE; + + if total_disk_space - used_disk_space <= remaining_disk_space_threshold { + Some(LowDiskSpace { + total_disk_space, + used_disk_space, + }) + } else { + None + } +} + +// This check uses average RE download speed calculated as a number of bytes downloaded divided on time between two snapshots. +// This speed calculation is not precisely correct as we don't know how much time we've been downloading between two snapshots. +// TODO(yurysamkevich): compute average download speed in RE/HTTP client +pub(crate) fn check_download_speed( + first_snapshot: &Option, + last_snapshot: Option<&buck2_data::Snapshot>, + system_info: &buck2_data::SystemInfo, + avg_re_download_speed: Option, + concurrent_commands: bool, +) -> bool { + // RE download/upload stats is collected per daemon. + // If there are concurrent commands we get stats for both. + // It's incorrect to display the warning in this case. + if concurrent_commands { + return false; + } + inner_check_download_speed( + first_snapshot, + last_snapshot, + system_info, + avg_re_download_speed, + ) + .is_some() +} + +fn inner_check_download_speed( + first_snapshot: &Option, + last_snapshot: Option<&buck2_data::Snapshot>, + system_info: &buck2_data::SystemInfo, + avg_re_download_speed: Option, +) -> Option<()> { + let re_download_bytes = + last_snapshot?.re_download_bytes - first_snapshot.as_ref()?.re_download_bytes; + let avg_re_download_speed = avg_re_download_speed?; + + if re_download_bytes >= system_info.min_re_download_bytes_threshold? + && avg_re_download_speed < system_info.avg_re_download_bytes_per_sec_threshold? + { + Some(()) + } else { + None + } +} + +pub(crate) fn is_vpn_enabled() -> bool { + if !cfg!(target_os = "macos") { + // TODO(rajneeshl): Add support for Windows + return false; + } + + // Brittle check based on Cisco client's current behaviour. + // Small section copied from https://fburl.com/code/g7ttsdz3 + std::path::Path::new("/opt/cisco/secureclient/vpn/ac_pf.token").exists() +} + +pub(crate) fn check_cache_misses( + action_stats: &ActionStats, + system_info: &buck2_data::SystemInfo, + first_build_since_rebase: bool, + estimated_completion_percent: Option, +) -> bool { + if !cache_warning_completion_threshold_crossed( + action_stats, + estimated_completion_percent, + system_info, + ) { + return false; + } + let cache_hit_percent = action_stats.total_cache_hit_percentage(); + let threshold = system_info.min_cache_hit_threshold_percent.unwrap_or(0) as u8; + first_build_since_rebase && cache_hit_percent < threshold +} + +fn cache_warning_completion_threshold_crossed( + action_stats: &ActionStats, + estimated_completion_percent: Option, + system_info: &buck2_data::SystemInfo, +) -> bool { + if let Some(estimated_completion_percent) = estimated_completion_percent { + let percent_completion_threshold = system_info + .cache_warning_min_completion_threshold_percent + .unwrap_or(0) as u8; + if estimated_completion_percent > percent_completion_threshold { + return true; + } + } + + // The completion_treshold is set to 10% typically. + // For large builds, 10% completion may be too late to warn about cache misses. + // Additionally check if we have crossed an action count threshold. + action_stats.total_executed_and_cached_actions() + > system_info.cache_warning_min_actions_count.unwrap_or(0) +} diff --git a/app/buck2_client_ctx/src/tokio_runtime_setup.rs b/app/buck2_client_ctx/src/tokio_runtime_setup.rs index ce555df8a3433..1fde70b64c85e 100644 --- a/app/buck2_client_ctx/src/tokio_runtime_setup.rs +++ b/app/buck2_client_ctx/src/tokio_runtime_setup.rs @@ -8,10 +8,15 @@ */ use anyhow::Context; +use buck2_util::tokio_runtime::new_tokio_runtime; /// Tokio runtime used by the client commands. pub fn client_tokio_runtime() -> anyhow::Result { - tokio::runtime::Builder::new_current_thread() + // Do not use current thread because current thread may have too low thread size. + new_tokio_runtime("buck2-cli") + // Tokio creates this number of threads, + // and creating too many threads for short commands is expensive. + .worker_threads(1) .enable_all() .build() .context("Building tokio runtime") diff --git a/app/buck2_client_ctx/src/version.rs b/app/buck2_client_ctx/src/version.rs index f608b34fa449c..e801a487e8176 100644 --- a/app/buck2_client_ctx/src/version.rs +++ b/app/buck2_client_ctx/src/version.rs @@ -52,10 +52,8 @@ impl BuckVersion { } fn compute() -> BuckVersion { - // TODO(cjhopman): Currently, buck is just a single executable and we don't have really stringent - // perf requirements so we hash the binary itself for the unique id. We will need to move this to - // be part of the build/packaging process at some point. - let exe = std::env::current_exe().unwrap(); + // Make sure to use the daemon exe's version, if there is one + let exe = crate::daemon::client::connect::get_daemon_exe().unwrap(); let mut file = File::open(exe).unwrap(); let file_m = match unsafe { memmap2::Mmap::map(&file) } { Ok(mmap) => mmap, diff --git a/app/buck2_cmd_completion_client/BUCK b/app/buck2_cmd_completion_client/BUCK new file mode 100644 index 0000000000000..3691fef8031b5 --- /dev/null +++ b/app/buck2_cmd_completion_client/BUCK @@ -0,0 +1,58 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") +load("@fbcode_macros//build_defs:rust_linkable_symbol.bzl", "rust_linkable_symbol") + +oncall("build_infra") + +rust_library( + name = "buck2_cmd_completion_client", + srcs = glob(["src/**/*.rs"]), + test_deps = ["fbsource//third-party/rust:paste"], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:clap_complete", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:tokio", + ":completion_wrapper_bash", + ":completion_wrapper_fish", + ":completion_wrapper_zsh", + ":options_wrapper_bash", + ":options_wrapper_fish", + ":options_wrapper_zsh", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_client_ctx:buck2_client_ctx", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", + ], +) + +rust_linkable_symbol( + name = "completion_wrapper_bash", + content_str = "src/completion/completion-wrapper.bash", +) + +rust_linkable_symbol( + name = "completion_wrapper_fish", + content_str = "src/completion/completion-wrapper.fish", +) + +rust_linkable_symbol( + name = "completion_wrapper_zsh", + content_str = "src/completion/completion-wrapper.zsh", +) + +rust_linkable_symbol( + name = "options_wrapper_bash", + content_str = "src/completion/options-wrapper.bash", +) + +rust_linkable_symbol( + name = "options_wrapper_fish", + content_str = "src/completion/options-wrapper.fish", +) + +rust_linkable_symbol( + name = "options_wrapper_zsh", + content_str = "src/completion/options-wrapper.zsh", +) diff --git a/app/buck2_cmd_completion_client/Cargo.toml b/app/buck2_cmd_completion_client/Cargo.toml new file mode 100644 index 0000000000000..ee441d46e753a --- /dev/null +++ b/app/buck2_cmd_completion_client/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "`buck2 completion` command client implementation" +edition = "2021" +license = { workspace = true } +name = "buck2_cmd_completion_client" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +clap = { workspace = true } +clap_complete = { workspace = true } +futures = { workspace = true } +tokio = { workspace = true } + +buck2_cli_proto = { workspace = true } +buck2_client_ctx = { workspace = true } +buck2_common = { workspace = true } +buck2_core = { workspace = true } + +[dev-dependencies] +paste = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(buck_build)"] } diff --git a/app/buck2_cmd_completion_client/src/complete.rs b/app/buck2_cmd_completion_client/src/complete.rs new file mode 100644 index 0000000000000..3a8c60ca9294e --- /dev/null +++ b/app/buck2_cmd_completion_client/src/complete.rs @@ -0,0 +1,142 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod package; +mod path_completer; +mod path_sanitizer; +mod results; +mod target; + +use std::time::Duration; +use std::time::Instant; + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::streaming::BuckSubcommand; +use buck2_core::buck2_env_anyhow; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_path::AbsPath; +use clap::ArgMatches; +use package::PackageCompleter; +use target::CompleteTargetCommand; + +#[derive(Debug, clap::Parser)] +#[clap(name = "complete", hide = true)] +pub struct CompleteCommand { + #[clap(long = "target", help = "Target to complete")] + partial_target: String, + + #[clap( + hide = true, + long = "timeout", + help = "Timeout for completion in milliseconds", + env = "BUCK2_COMPLETION_TIMEOUT", + default_value_t = 500 + )] + timeout_ms: u64, +} + +/// Complete a given target string. +/// +/// This command and the files in `complete/*.rs` use the following naming +/// conventions when completing targets (aka labels): +/// +/// ```text +/// [[cell_name]//][path/to/package][:target_name] +/// +/// |---- cell ---||----- path ----||-- target --| +/// |---------- package -----------| +/// |------------------ label -------------------| +/// ``` +/// +/// Note that this code must, by design, take in a number of malformed strings +/// and cannot rely on buck2's own parsing logic which (generally) requires +/// its targets to be well-formed. +/// +/// Its goal is to create a label that buck2 commands will accept. These may, +/// but are not required to, be fully-qualified targets. It does this +/// following these principles: +/// +/// 1. Provide reasonable "next step" completions to the shell. This set of +/// completions may not be the same as what the shell decides to display +/// but should be the options the user might choose from to reach the next +/// decision branch. +/// 2. Completions extend the input while retain the specific partial label +/// to as large a degree as possible. +/// 3. When corrections are necessary to create a label that buck2 will accept, +/// make the corrections first, then next-step completions in a second +/// stage. + +impl CompleteCommand { + pub fn exec(self, matches: &ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + let lockfile = buck2_env_anyhow!("COMPLETION_VERIFY_LOCKFILE", applicability = testing)? + .map(AbsPath::new) + .transpose()?; + + if let Some(lockfile) = lockfile { + drop(fs_util::write(lockfile, "")); + } + + let res = self.exec_no_lockfile(matches, ctx); + + if let Some(lockfile) = lockfile { + drop(fs_util::remove_file(lockfile)); + } + + res + } + + fn exec_no_lockfile(self, matches: &ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + let start = Instant::now(); + let time_limit = Duration::from_millis(self.timeout_ms); + let deadline = start + time_limit; + + let cwd = ctx.working_dir.path(); + let exit_result = match self.partial_target.split(':').collect::>()[..] { + // Package completion is performed locally and called here directly + [given_partial_package] => { + let roots = &ctx.paths()?.roots; + let completer = futures::executor::block_on(PackageCompleter::new(cwd, roots))?; + print_completions(futures::executor::block_on( + completer.complete(given_partial_package), + )) + } + // Target completion requires a round-trip to the daemon, so we spin up a new command + [given_package, given_partial_target] => { + let completer = CompleteTargetCommand::new( + cwd, + given_package.to_owned(), + given_partial_target.to_owned(), + deadline, + print_completions, + ); + completer.exec(matches, ctx) + } + _ => ExitResult::bail( + "Malformed target string (expected [[cell]//][path/to/package][:target_name])", + ), + }; + exit_result + } +} + +fn print_completions(result: CommandOutcome>) -> ExitResult { + match result { + CommandOutcome::Success(completions) => { + let stdout = completions + .into_iter() + .map(|s| s + "\n") + .collect::>() + .join(""); + ExitResult::success().with_stdout(stdout.into_bytes()) + } + CommandOutcome::Failure(result) => result, + } +} diff --git a/app/buck2_cmd_completion_client/src/complete/package.rs b/app/buck2_cmd_completion_client/src/complete/package.rs new file mode 100644 index 0000000000000..d791506797928 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/complete/package.rs @@ -0,0 +1,623 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_common::invocation_roots::InvocationRoots; +use buck2_common::legacy_configs::cells::BuckConfigBasedCells; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::project_rel_path::ProjectRelativePath; + +use super::path_completer::PathCompleter; +use super::path_sanitizer::PathSanitizer; +use super::results::CompletionResults; + +pub(crate) struct PackageCompleter<'a> { + cwd: AbsNormPathBuf, + roots: &'a InvocationRoots, + cell_configs: Arc, + path_sanitizer: PathSanitizer, + results: CompletionResults<'a>, +} + +impl<'a> PackageCompleter<'a> { + pub(crate) async fn new(cwd: &AbsNormPath, roots: &'a InvocationRoots) -> CommandOutcome { + let cell_configs = Arc::new( + BuckConfigBasedCells::parse_with_config_args( + &roots.project_root, + &[], + ProjectRelativePath::empty(), + ) + .await?, + ); + + let path_sanitizer = PathSanitizer::new(&cell_configs, cwd).await?; + let results = CompletionResults::new(roots, cell_configs.clone()); + CommandOutcome::Success(Self { + cwd: cwd.to_owned(), + roots, + cell_configs, + path_sanitizer, + results, + }) + } + + /// Complete the package portion of a partial target. + /// + /// Returns a collection of possible completions, each generally including the + /// partial target. The partial target might not be returned as-is when + /// completion logic is able to unambiguously normalize the partial target, + /// such as partials which cross cell boundaries. In this case, normalized + /// completion(s) are returned. + pub(crate) async fn complete(mut self, given_path: &str) -> CommandOutcome> { + match given_path { + "" => { + self.results.insert_dir(&self.cwd, "//").await; + self.results + .insert_package_colon_if_buildfile_exists(&self.roots.cell_root, given_path) + .await; + self.complete_partial_cells(given_path).await?; + self.complete_partial_path(given_path).await?; + } + "/" => { + self.results.insert_dir(&self.roots.cell_root, "//").await; + } + "//" => { + self.results + .insert_package_colon_if_buildfile_exists(&self.roots.cell_root, given_path) + .await; + self.complete_partial_path(given_path).await?; + } + _ => { + self.complete_partial_cells(given_path).await?; + self.complete_partial_path(given_path).await?; + } + } + CommandOutcome::Success(self.results.into()) + } + + async fn complete_partial_cells(&mut self, given_path: &str) -> anyhow::Result<()> { + let cell_resolver = &self.cell_configs.cell_resolver; + let alias_resolver = self + .cell_configs + .get_cell_alias_resolver_for_cwd_fast( + &self.roots.project_root, + &self.roots.project_root.relativize(&self.cwd)?, + ) + .await?; + for (cell_alias, cell_name) in alias_resolver.mappings() { + let canonical_cell_root = format!("{}//", cell_alias); + if canonical_cell_root.starts_with(given_path) { + let cell = cell_resolver.get(cell_name)?; + let cell_abs_path = self + .roots + .project_root + .root() + .join_normalized(cell.path().as_project_relative_path())?; + if canonical_cell_root == given_path { + // "cell1//" -> "cell1//:" + self.results + .insert_package_colon_if_buildfile_exists( + &cell_abs_path, + &canonical_cell_root, + ) + .await; + } else { + // "cell1" -> ["cell1//", "cell1//:"] + // "cell" -> ["cell1//", "cell1//:", "cell2//"] + self.results + .insert_dir(&cell_abs_path, &canonical_cell_root) + .await; + } + } + } + Ok(()) + } + + async fn complete_partial_path(&mut self, given_path: &str) -> CommandOutcome<()> { + let completer = PathCompleter::new(&self.cwd, &self.path_sanitizer, &mut self.results)?; + completer.complete(given_path).await + } +} + +#[cfg(test)] +mod tests { + use buck2_client_ctx::exit_result::ExitResult; + use buck2_common::invocation_roots::find_invocation_roots; + + use super::*; + + fn paths_to_test_data() -> &'static [&'static str] { + &[ + "fbcode/buck2/app/buck2_cmd_completion_client/test_data", + "app/buck2_cmd_completion_client/test_data", + "test_data", + ] + } + + fn in_dir(d: &str) -> CommandOutcome<(InvocationRoots, AbsNormPathBuf)> { + let cwd = AbsNormPathBuf::new(std::env::current_dir().unwrap())?; + + for path in paths_to_test_data() { + let candidate = cwd.join_normalized(path)?.join_normalized(d)?; + if candidate.exists() { + return CommandOutcome::Success((find_invocation_roots(&candidate)?, candidate)); + } + } + + CommandOutcome::Failure(ExitResult::bail("test_data directory not found")) + } + + fn in_root() -> CommandOutcome<(InvocationRoots, AbsNormPathBuf)> { + let cwd = AbsNormPathBuf::new(std::env::current_dir().unwrap())?; + + for path in paths_to_test_data() { + let candidate = cwd.join_normalized(path)?; + if candidate.exists() { + return CommandOutcome::Success((find_invocation_roots(&candidate)?, candidate)); + } + } + + CommandOutcome::Failure(ExitResult::bail("test_data directory not found")) + } + + fn is_err(outcome: CommandOutcome) -> bool { + match outcome { + CommandOutcome::Success(_) => false, + CommandOutcome::Failure(_) => true, + } + } + + type TestResult = Result<(), ExitResult>; + + #[tokio::test] + async fn test_expands_top_level_directory() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("bare").await?; + + assert_eq!(actual, vec!["baredir0/"]); + Ok(()) + } + + #[tokio::test] + async fn test_expands_subdirectory() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("baredir0/bare").await?; + + assert_eq!(actual, vec!["baredir0/baredir0a/"]); + Ok(()) + } + + #[tokio::test] + async fn test_expands_subdirectory_with_buck_targets() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("baredir0/buck").await?; + + assert_eq!(actual, vec!["baredir0/buckdir0b/", "baredir0/buckdir0b:",]); + Ok(()) + } + + #[tokio::test] + async fn test_provides_subdirectory_and_target_alternatives() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("baredir0/buckdir0b").await?; + + assert_eq!(actual, vec!["baredir0/buckdir0b/", "baredir0/buckdir0b:",]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_partial_paths_and_matched_target_dirs() -> TestResult { + let (roots, cwd) = in_dir("baredir0")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("b").await?; + + assert_eq!(actual, vec!["baredir0a/", "buckdir0b/", "buckdir0b:",]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_cell_name_when_given_cell_root() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cel").await?; + + assert_eq!( + actual, + vec![ + "cell1//", + "cell1//:", + "cell2//", + "cell3a//", + "cell3a//:", + "cell3b//" + ] + ); + Ok(()) + } + + #[tokio::test] + async fn test_completes_cell_name_when_given_cell_root_as_directory() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1/").await?; + + assert_eq!(actual, vec!["cell1//", "cell1//:"]); + Ok(()) + } + + #[tokio::test] + async fn test_identifies_non_local_cell_name() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell3a").await?; + + assert_eq!(actual, vec!["cell3a//", "cell3a//:"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_non_local_cell_name_when_provided_with_a_single_slash() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell3b/").await?; + + assert_eq!(actual, vec!["cell3b//"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_non_local_cell_name() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell3").await?; + + assert_eq!(actual, vec!["cell3a//", "cell3a//:", "cell3b//"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_mix_of_non_local_cell_name_and_local_dirs() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell").await?; + + assert_eq!( + actual, + vec![ + "cell1//", + "cell1//:", + "cell2//", + "cell3a//", + "cell3a//:", + "cell3b//" + ] + ); + Ok(()) + } + + #[tokio::test] + async fn test_completes_multiple_partial_dirs() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("baredir0/b").await?; + + assert_eq!( + actual, + vec![ + "baredir0/baredir0a/", + "baredir0/buckdir0b/", + "baredir0/buckdir0b:" + ] + ); + Ok(()) + } + + #[tokio::test] + async fn test_completes_as_both_directory_and_target_root() -> anyhow::Result<()> { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("baredir0/buckdir0b").await?; + + assert_eq!(actual, vec!["baredir0/buckdir0b/", "baredir0/buckdir0b:",]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_subdirs_as_well_as_target_colon_for_fully_qualified_cell() -> TestResult + { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1//").await?; + + assert_eq!(actual, vec!["cell1//:", "cell1//buck2/", "cell1//buck2:"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_cell_correctly_from_a_different_cell() -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell2")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1//").await?; + + assert_eq!(actual, vec!["cell1//:", "cell1//buck2/", "cell1//buck2:"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_directory_in_different_cell_to_canonical_name() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1/buck2").await?; + + assert_eq!(actual, vec!["cell1//buck2"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_partial_dir_in_different_cell_to_canonical_name() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1/b").await?; + + assert_eq!(actual, vec!["cell1//b"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_canonical_path_in_other_cell_to_with_slash_or_colon() -> TestResult { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1//buck2").await?; + + assert_eq!(actual, vec!["cell1//buck2/", "cell1//buck2:"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_canonical_path_to_other_cell_from_subdirectory_in_this_cell() + -> TestResult { + let (roots, cwd) = in_dir("baredir0")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1//buck2").await?; + + assert_eq!(actual, vec!["cell1//buck2/", "cell1//buck2:"]); + Ok(()) + } + + #[tokio::test] + async fn test_in_subdirectory_can_complete_subdirs_of_project_root_with_canonical_cell() + -> TestResult { + let (roots, cwd) = in_dir("baredir0")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("root//").await?; + + assert_eq!(actual, vec!["root//baredir0/", "root//dir3/",]); + Ok(()) + } + + #[tokio::test] + async fn test_backwards_path_to_different_cell_root_directory_completes_to_canonical_cell_name() + -> TestResult { + let (roots, cwd) = in_dir("baredir0")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("../cell1").await?; + + assert_eq!(actual, vec!["cell1//"]); + Ok(()) + } + + #[tokio::test] + async fn test_partial_subdirectory_name_expands_to_all_matches() -> TestResult { + let (roots, cwd) = in_dir("baredir0")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("b").await?; + + assert_eq!(actual, vec!["baredir0a/", "buckdir0b/", "buckdir0b:",]); + Ok(()) + } + + #[tokio::test] + async fn test_empty_string_completes_both_cells_and_dirs() -> anyhow::Result<()> { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("").await?; + + assert_eq!( + actual, + vec![ + "//", + "baredir0/", + "cell1//", + "cell1//:", + "cell2//", + "cell3a//", + "cell3a//:", + "cell3b//", + "dir3/", + "prelude//", + "root//" + ] + ); + Ok(()) + } + + #[tokio::test] + async fn test_empty_string_also_completes_colon_if_in_target_dir() -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell1")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("").await?; + + assert_eq!( + actual, + vec![ + "//", + "//:", + ":", + "buck2/", + "buck2:", + "cell1//", + "cell1//:", + "cell2//", + "cell3a//", + "cell3a//:", + "cell3b//", + "prelude//", + "root//" + ] + ); + Ok(()) + } + + #[tokio::test] + async fn test_completes_single_slash_to_double_slash_in_root() -> anyhow::Result<()> { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("/").await?; + + assert_eq!(actual, vec!["//"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_root_children_of_double_slash() -> anyhow::Result<()> { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("//").await?; + + assert_eq!(actual, vec!["//baredir0/", "//dir3/"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_single_slash_to_double_slash_in_sub_cell() -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell1")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("/").await?; + + assert_eq!(actual, vec!["//", "//:"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_cell_children_of_double_slash() -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell1")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("//").await?; + + assert_eq!(actual, vec!["//:", "//buck2/", "//buck2:"]); + Ok(()) + } + + #[tokio::test] + async fn test_normalizes_absolute_paths() -> anyhow::Result<()> { + let (roots, cwd) = in_root()?; + let absolute_partial = cwd.as_path().join("bare"); + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete(absolute_partial.to_str().unwrap()).await?; + + assert_eq!(actual, vec!["root//bare"]); + Ok(()) + } + + #[tokio::test] + async fn test_bails_on_nonexistent_path() -> anyhow::Result<()> { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("nonexistent").await?; + + assert_eq!(actual.len(), 0); + Ok(()) + } + + #[tokio::test] + async fn test_bails_on_nonexistent_path_in_subcell() -> anyhow::Result<()> { + let (roots, cwd) = in_root()?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1//nonexistent").await?; + + assert_eq!(actual.len(), 0); + Ok(()) + } + + #[tokio::test] + async fn test_package_completion_completes_packages_with_cell_aliases_as_aliases() + -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell1/buck2/prelude")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1_alias//b").await?; + + assert_eq!(actual, vec!["cell1_alias//buck2/", "cell1_alias//buck2:"]); + Ok(()) + } + + #[tokio::test] + async fn test_package_completion_only_uses_aliases_in_cells_definining_them() + -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell1/buck2")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual_result = uut.complete("cell1_alias//b").await; + + assert!(is_err(actual_result)); + Ok(()) + } + + #[tokio::test] + async fn test_completes_cell_aliases_alongside_cells() -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell1/buck2/prelude")?; + let uut = PackageCompleter::new(&cwd, &roots).await?; + + let actual = uut.complete("cell1").await?; + + assert_eq!( + actual, + vec!["cell1//", "cell1//:", "cell1_alias//", "cell1_alias//:"] + ); + Ok(()) + } +} diff --git a/app/buck2_cmd_completion_client/src/complete/path_completer.rs b/app/buck2_cmd_completion_client/src/complete/path_completer.rs new file mode 100644 index 0000000000000..d4d9e63e64a8a --- /dev/null +++ b/app/buck2_cmd_completion_client/src/complete/path_completer.rs @@ -0,0 +1,118 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; + +use super::path_sanitizer::PathSanitizer; +use super::path_sanitizer::SanitizedPath; +use super::results::CompletionResults; + +pub(crate) struct PathCompleter<'a, 'b> { + cwd: AbsNormPathBuf, + sanitizer: &'b PathSanitizer, + results: &'b mut CompletionResults<'a>, +} + +impl<'a, 'b> PathCompleter<'a, 'b> { + pub(crate) fn new( + cwd: &AbsNormPath, + sanitizer: &'b PathSanitizer, + results: &'b mut CompletionResults<'a>, + ) -> anyhow::Result { + Ok(Self { + cwd: cwd.to_owned(), + sanitizer, + results, + }) + } + + pub(crate) async fn complete(mut self, given_path: &str) -> CommandOutcome<()> { + let path = self.sanitizer.sanitize(given_path)?; + if path.given() != given_path && self.completes_to_dir(&path)? { + // There are potential completions to this string, but we're + // correcting it on the first tab to minimize surprise and help + // the user learn to type paths the right way. + // + // The completes_to_dir() check guards against complete_dir_fragment() + // completing "cell//" -> "cell1//", "root//cell/" + self.results.insert(path.given()); + } else if path.is_ready_for_next_dir() { + self.complete_subdirs(&path).await?; + } else { + self.complete_dir_fragment(&path).await?; + } + CommandOutcome::Success(()) + } + + async fn complete_subdirs(&mut self, partial: &SanitizedPath) -> CommandOutcome<()> { + let partial_dir = partial.abs_path(); + + let given_dir = partial.given(); + + for entry in partial_dir.read_dir()?.flatten() { + if entry.path().is_dir() { + let path = self.sanitize(&(given_dir.to_owned() + &file_name_string(&entry)))?; + if path.cell_name() == partial.cell_name() { + self.results.insert_path(&path).await; + } + } + } + CommandOutcome::Success(()) + } + + async fn complete_dir_fragment(&mut self, partial: &SanitizedPath) -> CommandOutcome<()> { + let partial_path = partial.abs_path(); + let partial_base = partial_path.file_name().unwrap().to_str().unwrap(); + + let given_dir = &partial.given()[..partial.given().len() - partial_base.len()]; + + let mut scan_dir = self.cwd.to_path_buf(); + if let Some(offset_dir) = partial_path.parent() { + scan_dir = scan_dir.join(offset_dir); + } + + for entry_result in scan_dir.read_dir()? { + let entry = entry_result?; + if entry.path().is_dir() && file_name_string(&entry).starts_with(partial_base) { + let given_expanded = + self.sanitize(&(given_dir.to_owned() + &file_name_string(&entry)))?; + self.results.insert_path(&given_expanded).await; + } + } + CommandOutcome::Success(()) + } + + fn completes_to_dir(&self, partial: &SanitizedPath) -> anyhow::Result { + let partial_path = partial.abs_path(); + let partial_base = partial_path.file_name().unwrap().to_str().unwrap(); + + let mut scan_dir = self.cwd.to_path_buf(); + if let Some(offset_dir) = partial_path.parent() { + scan_dir = scan_dir.join(offset_dir); + } + + for entry_result in scan_dir.read_dir()? { + let entry = entry_result?; + if entry.path().is_dir() && file_name_string(&entry).starts_with(partial_base) { + return Ok(true); + } + } + Ok(false) + } + + fn sanitize(&self, given: &str) -> anyhow::Result { + self.sanitizer.sanitize(given) + } +} + +fn file_name_string(entry: &std::fs::DirEntry) -> String { + entry.file_name().into_string().unwrap() +} diff --git a/app/buck2_cmd_completion_client/src/complete/path_sanitizer.rs b/app/buck2_cmd_completion_client/src/complete/path_sanitizer.rs new file mode 100644 index 0000000000000..352af89541a3a --- /dev/null +++ b/app/buck2_cmd_completion_client/src/complete/path_sanitizer.rs @@ -0,0 +1,578 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; +use std::path::Path; + +use buck2_common::invocation_roots::find_invocation_roots; +use buck2_common::invocation_roots::InvocationRoots; +use buck2_common::legacy_configs::cells::BuckConfigBasedCells; +use buck2_core::cells::name::CellName; +use buck2_core::cells::paths::CellRelativePath; +use buck2_core::cells::paths::CellRelativePathBuf; +use buck2_core::cells::CellAliasResolver; +use buck2_core::cells::CellResolver; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; + +#[derive(Debug, Clone)] +pub(crate) struct SanitizedPath { + given: String, + abs_path: AbsNormPathBuf, + cell_name: CellName, + // cell_path: CellRelativePathBuf, +} + +impl SanitizedPath { + pub(crate) fn abs_path(&self) -> &AbsNormPath { + &self.abs_path + } + + // pub(crate) fn canonical(&self) -> String { + // format!("{}//{}", self.cell_name, self.cell_path()) + // } + + pub(crate) fn cell_name(&self) -> &CellName { + &self.cell_name + } + + // pub(crate) fn cell_path(&self) -> &CellRelativePath { + // &self.cell_path + // } + + pub(crate) fn given(&self) -> &str { + &self.given + } + + pub(crate) fn is_ready_for_next_dir(&self) -> bool { + let is_root_dir = self.given == ""; + let is_slash_terminated_dir = self.abs_path.is_dir() && self.given.ends_with('/'); + is_root_dir || is_slash_terminated_dir + } +} + +impl std::fmt::Display for SanitizedPath { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> { + write!(f, "{}", self.given) + } +} + +pub(crate) struct PathSanitizer { + cell_resovler: CellResolver, + alias_resolver: CellAliasResolver, + cwd: AbsNormPathBuf, + cwd_roots: InvocationRoots, +} + +impl PathSanitizer { + pub(crate) async fn new( + cell_configs: &BuckConfigBasedCells, + cwd: &AbsNormPath, + ) -> anyhow::Result { + let cwd_roots = find_invocation_roots(&cwd)?; + let cell_resolver = cell_configs.cell_resolver.clone(); + let alias_resolver = cell_configs + .get_cell_alias_resolver_for_cwd_fast( + &cwd_roots.project_root, + &cwd_roots.project_root.relativize(&cwd)?, + ) + .await?; + Ok(Self { + cell_resovler: cell_resolver, + alias_resolver, + cwd: cwd.to_owned(), + cwd_roots, + }) + } + + pub(crate) fn sanitize(&self, given: &str) -> anyhow::Result { + match given.split("//").collect::>()[..] { + [path] => self.sanitize_relative_path(given, path), + [cell, path] => self.sanitize_cell_based_path(given, cell, path), + _ => Err(anyhow::Error::msg("Poorly formatted BuckPath string")), + } + } + + fn sanitize_relative_path( + &self, + given: &str, + path_str: &str, + ) -> Result { + let path = Path::new(path_str); + let abs_path = if path.is_absolute() { + AbsNormPathBuf::new(path.to_owned())? + } else { + self.cwd.join_normalized(path_str)? + }; + + let cwd_cell_name = self.resolve_cell(&self.cwd_roots.cell_root)?; + + let cell_name = self.resolve_cell(&abs_path)?; + let cell_path = self.relative_to_cell(&abs_path)?; + + if cell_name != cwd_cell_name || !self.is_normalized_path_and_in_cell(given, &cell_path) { + let fixed_given = if given.ends_with('/') && cell_path.as_str() != "" { + format!("{}//{}/", cell_name, cell_path) + } else { + format!("{}//{}", cell_name, cell_path) + }; + Ok(SanitizedPath { + given: fixed_given, + abs_path, + cell_name, + // cell_path, + }) + } else { + Ok(SanitizedPath { + given: given.to_owned(), + abs_path, + cell_name, + // cell_path, + }) + } + } + + fn sanitize_cell_based_path( + &self, + given: &str, + given_cell_str: &str, + cell_path: &str, + ) -> Result { + let given_cell = if given_cell_str == "" { + self.resolve_cell(&self.cwd_roots.cell_root)? + } else { + self.resolve_alias(given_cell_str)? + }; + let abs_path = self.cell_abs_path(given_cell)?.join_normalized(cell_path)?; + let actual_cell = self.resolve_cell(&abs_path)?; + if given_cell == actual_cell { + Ok(SanitizedPath { + given: given.to_owned(), + abs_path, + cell_name: given_cell, + // cell_path: CellRelativePath::from_path(cell_path)?.to_owned(), + }) + } else { + // This is a bit ugly because it breaks expectations -- + // the "given" path is malformed, so it is corrected to + // the closest equivalent path that actually works. + let corrected_cell_root = self.cell_abs_path(actual_cell)?; + let corrected_cell_path = abs_path.strip_prefix(corrected_cell_root)?; + Ok(SanitizedPath { + given: format!("{}//{}", actual_cell, corrected_cell_path), + abs_path, + cell_name: actual_cell, + // cell_path: corrected_cell_path, + }) + } + } + + fn cell_abs_path(&self, cell: CellName) -> anyhow::Result { + let root_to_cell = self + .cell_resovler + .get(cell)? + .path() + .as_forward_relative_path(); + self.project_root_dir().join_normalized(root_to_cell) + } + + /// Checks whether a given path str is properly a member of the given cell + /// + /// Both `given_fragment` and `proper_cell_path` refer to the same path, + /// but `given_fragment` is user input and may be malformed in ways that + /// buck will not tolerate. (absolute, cross-cell, absolute, ../, etc) + /// + /// This function returns true if `given_fragment` is acceptable to buck + /// as a relative reference to a path in the cell that `proper_cell_path` + /// is based on. + fn is_normalized_path_and_in_cell( + &self, + given_fragment: &str, + proper_cell_path: &CellRelativePath, + ) -> bool { + let path = Path::new(given_fragment); + match ForwardRelativePath::new(path) { + Ok(forward_rel_path) => proper_cell_path.ends_with(forward_rel_path), + Err(_) => false, + } + } + + fn project_root(&self) -> &ProjectRoot { + &self.cwd_roots.project_root + } + + fn project_root_dir(&self) -> &AbsNormPath { + self.cwd_roots.project_root.root() + } + + fn resolve_alias(&self, dir: &str) -> anyhow::Result { + self.alias_resolver.resolve(dir) + } + + fn resolve_cell(&self, path: &AbsNormPath) -> anyhow::Result { + let project_relative = &self.relative_to_project(path)?; + self.cell_resovler + .find::(project_relative) + } + + fn relative_to_cell(&self, dir: &AbsNormPath) -> anyhow::Result { + Ok(self + .cell_resovler + .get_cell_path_from_abs_path(dir, &self.project_root())? + .path() + .to_owned()) + } + + fn relative_to_project<'a>( + &'a self, + dir: &'a AbsNormPath, + ) -> anyhow::Result> { + self.project_root().relativize(dir) + } +} + +#[cfg(test)] +mod tests { + use buck2_common::legacy_configs::cells::BuckConfigBasedCells; + use paste::paste; + + use super::*; + + fn paths_to_test_data() -> &'static [&'static str] { + &[ + "fbcode/buck2/app/buck2_cmd_completion_client/test_data", + "app/buck2_cmd_completion_client/test_data", + "test_data", + ] + } + + fn in_dir(d: &str) -> anyhow::Result { + let cwd = AbsNormPathBuf::new(std::env::current_dir().unwrap())?; + + for path in paths_to_test_data() { + let candidate = cwd.join_normalized(path)?.join_normalized(d)?; + if candidate.exists() { + return Ok(candidate); + } + } + + Err(anyhow::anyhow!("test_data directory not found")) + } + + fn in_root() -> anyhow::Result { + let cwd = AbsNormPathBuf::new(std::env::current_dir().unwrap())?; + + for path in paths_to_test_data() { + let candidate = cwd.join_normalized(path)?; + if candidate.exists() { + return Ok(candidate); + } + } + + Err(anyhow::anyhow!("test_data directory not found")) + } + + fn abs_path_from_root(relative: &str) -> anyhow::Result { + let root = in_root()?; + root.join_normalized(relative) + } + + fn abs_str_from_root(relative: &str) -> anyhow::Result { + let path = abs_path_from_root(relative)?; + Ok(path.to_string()) + } + + fn cell_configs(cwd: &Path) -> anyhow::Result { + let cwd_roots = find_invocation_roots(cwd)?; + futures::executor::block_on(BuckConfigBasedCells::parse_with_config_args( + &cwd_roots.project_root, + &[], + &cwd_roots.project_root.relativize(&AbsNormPath::new(cwd)?)?, + )) + } + + macro_rules! testy { + ($test_name:ident($in_dir:expr, $partial:expr) -> { + abs_path: from_root($abs_dir:literal), + canonical: $canonical:literal, + cell_name: $cell_name:literal, + cell_path: $cell_path:literal, + given: $given:literal, + display: $display:literal, + to_string: $to_string:literal, + }) => { + paste! { + #[tokio::test] + async fn []() -> anyhow::Result<()> { + let cwd = $in_dir; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize($partial)?; + + assert_eq!(actual.abs_path(), abs_path_from_root($abs_dir)?); + + Ok(()) + } + + // #[tokio::test] + // fn []() -> anyhow::Result<()> { + // let cwd = $in_dir; + // let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + // let actual = uut.sanitize($partial)?; + + // assert_eq!(actual.canonical(), $canonical); + + // Ok(()) + // } + + #[tokio::test] + async fn []() -> anyhow::Result<()> { + let cwd = $in_dir; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize($partial)?; + + assert_eq!(actual.cell_name(), &CellName::testing_new($cell_name)); + + Ok(()) + } + + // #[tokio::test] + // async fn []() -> anyhow::Result<()> { + // let cwd = $in_dir; + // let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + // let actual = uut.sanitize($partial)?; + + // assert_eq!(actual.cell_path(), &CellRelativePath::testing_new($cell_path.into())); + + // Ok(()) + // } + + #[tokio::test] + async fn []() -> anyhow::Result<()> { + let cwd = $in_dir; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize($partial)?; + + assert_eq!(actual.given(), $given); + + Ok(()) + } + + #[tokio::test] + async fn []() -> anyhow::Result<()> { + let cwd = $in_dir; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize($partial)?; + + assert_eq!(format!("{}", actual), $display); + + Ok(()) + } + + #[tokio::test] + async fn []() -> anyhow::Result<()> { + let cwd = $in_dir; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize($partial)?; + + assert_eq!(actual.to_string(), $to_string); + + Ok(()) + } + } + }; + } + + #[tokio::test] + async fn test_can_create_from_a_canonical_path() -> anyhow::Result<()> { + let cwd = in_root()?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + uut.sanitize("root//baredir0/buckdir0a")?; + + Ok(()) + } + + #[tokio::test] + async fn test_can_create_from_a_str_relative_path() -> anyhow::Result<()> { + let cwd = in_root()?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + uut.sanitize("baredir0/buckdir0a")?; + + Ok(()) + } + + testy!(canonical_path_in_root_from_root(in_root()?, "root//baredir0/buckdir0a") -> { + abs_path: from_root("baredir0/buckdir0a"), + canonical: "root//baredir0/buckdir0a", + cell_name: "root", + cell_path: "baredir0/buckdir0a", + given: "root//baredir0/buckdir0a", + display: "root//baredir0/buckdir0a", + to_string: "root//baredir0/buckdir0a", + }); + + testy!(anonymous_cell_from_root(in_root()?, "//") -> { + abs_path: from_root(""), + canonical: "//", + cell_name: "root", + cell_path: "", + given: "//", + display: "//", + to_string: "//", + }); + + testy!(canonical_cell_path_from_root(in_root()?, "cell1//buck2") -> { + abs_path: from_root("cell1/buck2"), + canonical: "cell1//buck2", + cell_name: "cell1", + cell_path: "buck2", + given: "cell1//buck2", + display: "cell1//buck2", + to_string: "cell1//buck2", + }); + + testy!(relative_path_from_root(in_root()?, "baredir0/buckdir0a") -> { + abs_path: from_root("baredir0/buckdir0a"), + canonical: "root//baredir0/buckdir0a", + cell_name: "root", + cell_path: "baredir0/buckdir0a", + given: "baredir0/buckdir0a", + display: "baredir0/buckdir0a", + to_string: "baredir0/buckdir0a", + }); + + testy!(cross_cell_forward_path_from_root(in_root()?, "cell1/buck2") -> { + abs_path: from_root("cell1/buck2"), + canonical: "cell1//buck2", + cell_name: "cell1", + cell_path: "buck2", + given: "cell1//buck2", // BuckPath is documented as correcting this to cell1//buck2 + display: "cell1//buck2", + to_string: "cell1//buck2", + }); + + testy!(corrects_malformed_cross_cell_forward_path_from_root(in_root()?, "root//cell1/buck2") -> { + abs_path: from_root("cell1/buck2"), + canonical: "cell1//buck2", + cell_name: "cell1", + cell_path: "buck2", + given: "cell1//buck2", + display: "cell1//buck2", + to_string: "cell1//buck2", + }); + + #[tokio::test] + async fn test_root_dir_as_empty_string_is_ready_for_subdirs() -> anyhow::Result<()> { + let cwd = in_root()?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize("")?; + + assert!(actual.is_ready_for_next_dir()); + + Ok(()) + } + + #[tokio::test] + async fn test_slash_terminated_dir_is_ready_for_subdirs() -> anyhow::Result<()> { + let cwd = in_root()?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize("baredir0/")?; + + assert!(actual.is_ready_for_next_dir()); + + Ok(()) + } + + #[tokio::test] + async fn test_partial_with_no_slash_is_not_ready_for_subdirs() -> anyhow::Result<()> { + let cwd = in_root()?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize("baredir0")?; + + assert!(!actual.is_ready_for_next_dir()); + + Ok(()) + } + + #[tokio::test] + async fn test_fully_qualified_cell_is_ready_for_subdirs() -> anyhow::Result<()> { + let cwd = in_root()?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + let actual = uut.sanitize("cell1//")?; + + assert!(actual.is_ready_for_next_dir()); + + Ok(()) + } + + #[tokio::test] + async fn test_bails_on_nonexistent_cell() -> anyhow::Result<()> { + let cwd = in_root()?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + assert!(uut.sanitize("boguscell//").is_err()); + + Ok(()) + } + + testy!(absolute_path_from_root(in_root()?, &abs_str_from_root("baredir0")?) -> { + abs_path: from_root("baredir0"), + canonical: "root//baredir0", + cell_name: "root", + cell_path: "baredir0", + given: "root//baredir0", + display: "root//baredir0", + to_string: "root//baredir0", + }); + + testy!(absolute_path_in_subcell(in_dir("cell1")?, &abs_str_from_root("cell1/buck2")?) -> { + abs_path: from_root("cell1/buck2"), + canonical: "cell1//buck2", + cell_name: "cell1", + cell_path: "buck2", + given: "cell1//buck2", + display: "cell1//buck2", + to_string: "cell1//buck2", + }); + + testy!(aliased_cell(in_dir("cell1/buck2/prelude")?, "cell1_alias//buck2") -> { + abs_path: from_root("cell1/buck2"), + canonical: "cell1//buck2", + cell_name: "cell1", + cell_path: "buck2", + given: "cell1_alias//buck2", + display: "cell1_alias//buck2", + to_string: "cell1_alias//buck2", + }); + + #[tokio::test] + async fn test_creation_returns_error_on_non_local_alias() -> anyhow::Result<()> { + let cwd = in_dir("cell1/buck2")?; + let uut = PathSanitizer::new(&cell_configs(&cwd)?, &cwd).await?; + + assert!(uut.sanitize("cell1_alias//buck2").is_err()); + + Ok(()) + } +} diff --git a/app/buck2_cmd_completion_client/src/complete/results.rs b/app/buck2_cmd_completion_client/src/complete/results.rs new file mode 100644 index 0000000000000..d551347ea5687 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/complete/results.rs @@ -0,0 +1,100 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::hash_map::Entry; +use std::collections::BTreeSet; +use std::collections::HashMap; +use std::sync::Arc; + +use buck2_common::buildfiles::parse_buildfile_name; +use buck2_common::invocation_roots::InvocationRoots; +use buck2_common::legacy_configs::cells::BuckConfigBasedCells; +use buck2_core::cells::name::CellName; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::file_name::FileNameBuf; + +use super::path_sanitizer::SanitizedPath; + +pub(crate) struct CompletionResults<'a> { + roots: &'a InvocationRoots, + cell_configs: Arc, + buildfiles: HashMap>, + results: BTreeSet, +} + +impl<'a> CompletionResults<'a> { + pub(crate) fn new(roots: &'a InvocationRoots, cell_configs: Arc) -> Self { + Self { + roots, + cell_configs, + buildfiles: HashMap::new(), + results: BTreeSet::::new(), + } + } + + pub(crate) fn insert(&mut self, target: &str) -> &mut Self { + self.results.insert(target.to_owned()); + self + } + + pub(crate) async fn insert_path(&mut self, path: &SanitizedPath) -> &mut Self { + self.insert_dir(&path.abs_path(), path.given()).await + } + + pub(crate) async fn insert_dir(&mut self, abs_dir: &AbsNormPath, nickname: &str) -> &mut Self { + if nickname.ends_with("//") { + self.insert(nickname); + self.insert_package_colon_if_buildfile_exists(abs_dir, nickname) + .await; + } else if nickname.ends_with('/') { + self.insert(nickname); + } else { + self.insert(&format!("{}/", nickname)); + self.insert_package_colon_if_buildfile_exists(abs_dir, nickname) + .await; + } + self + } + + pub(crate) async fn insert_package_colon_if_buildfile_exists( + &mut self, + abs_dir: &AbsNormPath, + nickname: &str, + ) -> &mut Self { + for f in self.buildfile_names(abs_dir).await.unwrap() { + if abs_dir.join(f).exists() { + self.insert(&format!("{}:", nickname)); + break; + } + } + self + } + + async fn buildfile_names( + &mut self, + abs_dir: &AbsNormPath, + ) -> anyhow::Result<&Vec> { + let relative_to_project = self.roots.project_root.relativize(abs_dir)?; + let cell_configs = &self.cell_configs; + let cell_name = cell_configs.cell_resolver.find(&relative_to_project)?; + if let Entry::Vacant(e) = self.buildfiles.entry(cell_name) { + let cell_config = cell_configs + .parse_single_cell(cell_name, &self.roots.project_root) + .await?; + e.insert(parse_buildfile_name(&cell_config)?); + } + Ok(self.buildfiles.get(&cell_name).unwrap()) + } +} + +impl<'a> From> for Vec { + fn from(pr: CompletionResults) -> Self { + pr.results.iter().map(String::from).collect() + } +} diff --git a/app/buck2_cmd_completion_client/src/complete/target.rs b/app/buck2_cmd_completion_client/src/complete/target.rs new file mode 100644 index 0000000000000..d9c7bf21cc95c --- /dev/null +++ b/app/buck2_cmd_completion_client/src/complete/target.rs @@ -0,0 +1,513 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; +use std::time::Instant; + +use buck2_cli_proto::new_generic::CompleteRequest; +use buck2_cli_proto::new_generic::NewGenericRequest; +use buck2_cli_proto::new_generic::NewGenericResponse; +use buck2_cli_proto::ClientContext; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::command_outcome::CommandOutcome; +use buck2_client_ctx::common::target_cfg::TargetCfgOptions; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::CommonBuildConfigurationOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::daemon::client::BuckdClientConnector; +use buck2_client_ctx::daemon::client::FlushingBuckdClient; +use buck2_client_ctx::exit_result::ExitCode; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::streaming::StreamingCommand; +use buck2_common::invocation_roots::InvocationRoots; +use buck2_common::legacy_configs::cells::BuckConfigBasedCells; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use clap::ArgMatches; +use futures::future::BoxFuture; +use futures::FutureExt; +use tokio::time; + +use super::path_sanitizer::PathSanitizer; +use super::results::CompletionResults; + +type CompleteCallback = fn(CommandOutcome>) -> ExitResult; + +pub(crate) trait TargetResolver: Send { + fn resolve(&mut self, partial_target: String) -> BoxFuture>>; +} + +pub(crate) struct CompleteTargetCommand { + target_cfg: TargetCfgOptions, + + cwd: AbsNormPathBuf, + package: String, + partial_target: String, + + deadline: Instant, + callback: CompleteCallback, +} + +impl CompleteTargetCommand { + pub(crate) fn new( + cwd: &AbsNormPath, + package: String, + partial_target: String, + deadline: Instant, + callback: CompleteCallback, + ) -> Self { + let target_cfg = TargetCfgOptions::default(); + Self { + target_cfg, + cwd: cwd.to_owned(), + package, + partial_target, + deadline, + callback, + } + } +} + +#[async_trait::async_trait] +impl StreamingCommand for CompleteTargetCommand { + const COMMAND_NAME: &'static str = "complete"; + + async fn exec_impl( + self, + buckd: &mut BuckdClientConnector, + matches: &ArgMatches, + ctx: &mut ClientCommandContext<'_>, + ) -> ExitResult { + let buckd_client = buckd.with_flushing(); + let context = ctx.client_context(matches, &self)?; + let mut target_resolver = DaemonTargetResolver { + buckd_client, + context, + target_cfg: self.target_cfg, + }; + + let completer = TargetCompleter::new(&self.cwd, &ctx.paths()?.roots, &mut target_resolver) + .await + .expect("Failed to create target completer"); + let task = completer.complete(&self.package, &self.partial_target); + + let remaining_time = self.deadline.saturating_duration_since(Instant::now()); + match time::timeout(remaining_time, task).await { + Ok(CommandOutcome::Success(completions)) => { + (self.callback)(CommandOutcome::Success(completions)) + } + Ok(CommandOutcome::Failure(err)) => err, + Err(_) => ExitResult::status(ExitCode::Timeout), + } + } + + fn console_opts(&self) -> &CommonConsoleOptions { + CommonConsoleOptions::none_ref() + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + CommonEventLogOptions::default_ref() + } + + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + CommonBuildConfigurationOptions::reuse_current_config_ref() + } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + CommonStarlarkOptions::default_ref() + } +} +pub(crate) struct TargetCompleter<'a> { + cwd: AbsNormPathBuf, + cell_configs: Arc, + target_resolver: &'a mut dyn TargetResolver, + results: CompletionResults<'a>, +} + +impl<'a> TargetCompleter<'a> { + pub(crate) async fn new( + cwd: &AbsNormPath, + roots: &'a InvocationRoots, + target_resolver: &'a mut dyn TargetResolver, + ) -> anyhow::Result { + let cell_configs = Arc::new( + BuckConfigBasedCells::parse_with_config_args( + &roots.project_root, + &[], + ProjectRelativePath::empty(), + ) + .await?, + ); + Ok(Self { + cwd: cwd.to_owned(), + cell_configs: cell_configs.clone(), + target_resolver, + results: CompletionResults::new(roots, cell_configs.clone()), + }) + } + + /// Complete the target in a partial label. + /// + /// Returns a collection of possible completions, each including the partial + /// target including the cell/package name(s). + pub(crate) async fn complete( + mut self, + given_package: &str, + partial_target: &str, + ) -> CommandOutcome> { + let sanitizer = PathSanitizer::new(&self.cell_configs, &self.cwd).await?; + let path = sanitizer.sanitize(given_package)?; + let completions = self + .target_resolver + .resolve(path.given().to_owned() + ":") + .await?; + + for label in completions { + let target = label.split(':').next_back().unwrap(); + if target.starts_with(partial_target) { + let completion = path.given().to_owned() + ":" + target; + self.results.insert(&completion); + } + } + CommandOutcome::Success(self.results.into()) + } +} + +struct DaemonTargetResolver<'a, 'b> { + buckd_client: FlushingBuckdClient<'a, 'b>, + context: ClientContext, + target_cfg: TargetCfgOptions, +} + +impl<'a, 'b> TargetResolver for DaemonTargetResolver<'a, 'b> { + fn resolve(&mut self, partial_target: String) -> BoxFuture>> { + let request = NewGenericRequest::Complete(CompleteRequest { + target_cfg: self.target_cfg.target_cfg(), + partial_target, + }); + self.buckd_client + .new_generic(self.context.clone(), request, None) + .then(|res| async move { + match res { + Ok(CommandOutcome::Success(NewGenericResponse::Complete(res))) => { + CommandOutcome::Success(res.completions) + } + Ok(CommandOutcome::Success(_)) => CommandOutcome::Failure(ExitResult::bail( + "Unexpected response type from generic command", + )), + Ok(CommandOutcome::Failure(result)) => CommandOutcome::Failure(result), + Err(e) => CommandOutcome::Failure(ExitResult::err(e)), + } + }) + .boxed() + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use buck2_common::invocation_roots::find_invocation_roots; + use futures::future; + + use super::*; + + fn paths_to_test_data() -> &'static [&'static str] { + &[ + "fbcode/buck2/app/buck2_cmd_completion_client/test_data", + "app/buck2_cmd_completion_client/test_data", + "test_data", + ] + } + + fn in_dir(d: &str) -> anyhow::Result<(InvocationRoots, AbsNormPathBuf)> { + let cwd = AbsNormPathBuf::new(std::env::current_dir().unwrap())?; + + for path in paths_to_test_data() { + let candidate = cwd.join_normalized(path)?.join_normalized(d)?; + if candidate.exists() { + return Ok((find_invocation_roots(&candidate)?, candidate)); + } + } + + Err(anyhow::anyhow!("test_data directory not found")) + } + + fn in_root() -> anyhow::Result<(InvocationRoots, AbsNormPathBuf)> { + let cwd = AbsNormPathBuf::new(std::env::current_dir().unwrap())?; + + for path in paths_to_test_data() { + let candidate = cwd.join_normalized(path)?; + if candidate.exists() { + return Ok((find_invocation_roots(&candidate)?, candidate)); + } + } + + Err(anyhow::anyhow!("test_data directory not found")) + } + + async fn target_complete_helper( + uut: TargetCompleter<'_>, + partial_target: &str, + ) -> CommandOutcome> { + match partial_target.split(':').collect::>()[..] { + [package, partial_target] => uut.complete(package, partial_target).await, + _ => panic!("unexpected target {}", partial_target), + } + } + + type TestResult = Result<(), ExitResult>; + + struct FakeTargetResolver { + target_responses: HashMap>, + } + + impl FakeTargetResolver { + fn new() -> Self { + FakeTargetResolver { + target_responses: HashMap::new(), + } + } + fn add_response(&mut self, request: &str, response: Vec<&str>) { + self.target_responses.insert( + request.to_owned(), + response.into_iter().map(|s| s.to_owned()).collect(), + ); + } + } + + impl TargetResolver for FakeTargetResolver { + fn resolve(&mut self, partial_target: String) -> BoxFuture>> { + let res = self.target_responses.get(&partial_target).unwrap(); + Box::pin(future::ready(CommandOutcome::Success(res.clone()))) + } + } + + #[tokio::test] + async fn test_handles_degenerate_buck_directory_with_no_targets() -> TestResult { + let (roots, cwd) = in_root()?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response("baredir0:", vec![]); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "baredir0:").await?; + + let expected: Vec = vec![]; + assert_eq!(actual, expected); + Ok(()) + } + + #[tokio::test] + async fn test_provides_targets_for_path_ending_with_a_colon() -> TestResult { + let (roots, cwd) = in_root()?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response( + "baredir0/buckdir0b:", + vec![ + "root//baredir0/buckdir0b:target1", + "root//baredir0/buckdir0b:target2", + "root//baredir0/buckdir0b:target3", + ], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "baredir0/buckdir0b:").await?; + + assert_eq!( + actual, + vec![ + "baredir0/buckdir0b:target1", + "baredir0/buckdir0b:target2", + "baredir0/buckdir0b:target3", + ], + ); + Ok(()) + } + + #[tokio::test] + async fn test_provides_targets_in_nested_cell() -> TestResult { + let (roots, cwd) = in_dir("cell1")?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response( + "buck2:", + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "buck2:").await?; + + assert_eq!( + actual, + vec!["buck2:buck2", "buck2:symlinked_buck2_and_tpx",] + ); + Ok(()) + } + + #[tokio::test] + async fn test_completes_a_partial_target() -> TestResult { + let (roots, cwd) = in_dir("cell1")?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response( + "buck2:", + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "buck2:bu").await?; + + assert_eq!(actual, vec!["buck2:buck2",]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_targets_for_fully_qualified_cell() -> TestResult { + let (roots, cwd) = in_root()?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response("cell1//:", vec!["cell1//:target1"]); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "cell1//:").await?; + + assert_eq!(actual, vec!["cell1//:target1"]); + Ok(()) + } + + #[tokio::test] + async fn test_completes_other_cell_canonical_path_targets_from_subdirectory_in_this_cell() + -> TestResult { + let (roots, cwd) = in_dir("baredir0")?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response( + "cell1//buck2:", + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "cell1//buck2:").await?; + + assert_eq!( + actual, + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"] + ); + Ok(()) + } + + #[tokio::test] + async fn test_expands_cell_to_canonical_in_middle_of_input_text_with_target_colon() -> TestResult + { + let (roots, cwd) = in_root()?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response( + "cell1//buck2:", + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "cell1/buck2:").await?; + + assert_eq!( + actual, + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx",] + ); + Ok(()) + } + + #[tokio::test] + async fn test_expands_cell_to_canonical_in_middle_of_input_text_with_partial_target() + -> TestResult { + let (roots, cwd) = in_root()?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response( + "cell1//buck2:", + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, "cell1/buck2:bu").await?; + + assert_eq!(actual, vec!["cell1//buck2:buck2"]); + Ok(()) + } + + #[tokio::test] + async fn test_expands_targets_for_a_bare_colon_in_a_buck_directory() -> TestResult { + let (roots, cwd) = in_dir("cell1/buck2")?; + let mut resolver = FakeTargetResolver::new(); + resolver.add_response( + ":", + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, ":").await?; + + assert_eq!(actual, vec![":buck2", ":symlinked_buck2_and_tpx",]); + Ok(()) + } + + #[tokio::test] + async fn test_returns_nothing_for_a_bare_colon_in_a_non_buck_directory() -> TestResult { + let (roots, cwd) = in_dir("baredir0")?; + let mut resolver = FakeTargetResolver::new(); + // This is weird, but it reflects the behavior of the server API + // when given an invalid ":" as its argument + resolver.add_response(":", vec![]); + let uut = TargetCompleter::new(&cwd, &roots, &mut resolver).await?; + + let actual = target_complete_helper(uut, ":").await?; + + assert_eq!(actual.len(), 0); + Ok(()) + } + + #[tokio::test] + async fn test_target_completion_works_correctly_with_aliased_cells() -> anyhow::Result<()> { + let (roots, cwd) = in_dir("cell1/buck2/prelude")?; + let mut target_resolver = FakeTargetResolver::new(); + target_resolver.add_response( + "cell1_alias//buck2:", + vec!["cell1//buck2:buck2", "cell1//buck2:symlinked_buck2_and_tpx"], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut target_resolver).await?; + + let actual = target_complete_helper(uut, "cell1_alias//buck2:").await?; + + assert_eq!( + actual, + vec![ + "cell1_alias//buck2:buck2", + "cell1_alias//buck2:symlinked_buck2_and_tpx", + ] + ); + Ok(()) + } + + #[tokio::test] + async fn test_target_completion_fails_with_error_with_nonexistent_alias() -> anyhow::Result<()> + { + let (roots, cwd) = in_dir("cell1/buck2")?; + let mut target_resolver = FakeTargetResolver::new(); + target_resolver.add_response( + "cell1_alias//buck2:", + vec![ + "cell1//buck2:buck2", + "cell1//buck2:symlinked_buck2_and_tpx", + "", + ], + ); + let uut = TargetCompleter::new(&cwd, &roots, &mut target_resolver).await?; + + match target_complete_helper(uut, "cell1_alias//buck2:").await { + CommandOutcome::Success(_) => panic!("Expected error"), + CommandOutcome::Failure(_) => Ok(()), + } + } +} diff --git a/app/buck2_cmd_completion_client/src/completion.rs b/app/buck2_cmd_completion_client/src/completion.rs new file mode 100644 index 0000000000000..8acd12cfdff43 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/completion.rs @@ -0,0 +1,161 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use clap::Command; +use clap::ValueEnum; +use clap_complete::generate; + +// This file is the entry point for the target-completing delegate for buck2 +// command line completions. Its completion commands are called from shell +// scripts which perform the actual completion logic. These shell scripts +// ignore non-zero return values and allow stderr to pass through to the +// user. As such, caution should be taken to ensure error messages are +// understandable in the context of argument completion. + +#[derive(ValueEnum, Clone, Debug, Copy)] +#[clap(rename_all = "kebab-case")] +enum Shell { + Bash, + Fish, + Zsh, +} + +#[derive(Debug, clap::Parser)] +#[clap(name = "completion", verbatim_doc_comment)] +/// Print completion configuration for shell +/// +/// For a one-time setup, run one of the following commands: +/// source <(buck2 completion bash) +/// source <(buck2 completion zsh) +pub struct CompletionCommand { + #[clap( + value_enum, + help = "shell for which to generate completion script", + group = "operation" + )] + shell: Shell, + + // FIXME(JakobDegen): Remove after rollout + #[clap(help = "Only emit completions for option flags", long, hide = true)] + options_only: bool, +} + +impl CompletionCommand { + pub fn exec( + self, + command: Command, + _matches: &clap::ArgMatches, + _ctx: ClientCommandContext<'_>, + ) -> ExitResult { + let mut command = command; + print_completion_script(self.shell, self.options_only, &mut command)?; + ExitResult::success() + } +} + +const GENERATED_INSERTION_POINT: &str = "# %INSERT_GENERATED_LINE%"; +const GENERATED_TAG: &str = concat!("@", "generated"); +const COMPLETION_INSERTION_POINT: &str = "# %INSERT_OPTION_COMPLETION%"; + +fn completion_wrapper(shell: Shell) -> &'static str { + #[cfg(buck_build)] + { + match shell { + Shell::Bash => completion_wrapper_bash::get(), + Shell::Fish => completion_wrapper_fish::get(), + Shell::Zsh => completion_wrapper_zsh::get(), + } + } + #[cfg(not(buck_build))] + { + match shell { + Shell::Bash => include_str!("completion/completion-wrapper.bash"), + Shell::Fish => include_str!("completion/completion-wrapper.fish"), + Shell::Zsh => include_str!("completion/completion-wrapper.zsh"), + } + } +} + +fn options_wrapper(shell: Shell) -> &'static str { + #[cfg(buck_build)] + { + match shell { + Shell::Bash => options_wrapper_bash::get(), + Shell::Fish => options_wrapper_fish::get(), + Shell::Zsh => options_wrapper_zsh::get(), + } + } + #[cfg(not(buck_build))] + { + match shell { + Shell::Bash => include_str!("completion/options-wrapper.bash"), + Shell::Fish => include_str!("completion/options-wrapper.fish"), + Shell::Zsh => include_str!("completion/options-wrapper.zsh"), + } + } +} + +fn print_completion_script( + shell_arg: Shell, + options_only: bool, + cmd: &mut Command, +) -> anyhow::Result<()> { + let wrapper = if options_only { + options_wrapper(shell_arg) + } else { + completion_wrapper(shell_arg) + }; + let shell = match shell_arg { + Shell::Bash => clap_complete::Shell::Bash, + Shell::Zsh => clap_complete::Shell::Zsh, + Shell::Fish => clap_complete::Shell::Fish, + }; + + let mut wrapper_iter = wrapper.lines(); + let mut found_insertion_point = false; + + for line in wrapper_iter.by_ref() { + match line { + GENERATED_INSERTION_POINT => { + buck2_client_ctx::println!( + "# {} by `{}`", + GENERATED_TAG, + std::env::args().collect::>().join(" ") + )?; + } + COMPLETION_INSERTION_POINT => { + found_insertion_point = true; + + buck2_client_ctx::println!("{}", option_completions(shell, cmd)?)?; + } + s => { + buck2_client_ctx::println!("{}", s)?; + } + } + } + + if !found_insertion_point { + Err(anyhow::anyhow!( + "Failed to find {} in {:?} completion template", + COMPLETION_INSERTION_POINT, + shell_arg + )) + } else { + Ok(()) + } +} + +fn option_completions(shell: clap_complete::Shell, cmd: &mut Command) -> anyhow::Result { + let mut v = Vec::new(); + // FIXME: it appears that this might silently swallow errors; would require a PR to fix + generate(shell, cmd, cmd.get_name().to_owned(), &mut v); + Ok(String::from_utf8(v)?) +} diff --git a/app/buck2_cmd_completion_client/src/completion/completion-wrapper.bash b/app/buck2_cmd_completion_client/src/completion/completion-wrapper.bash new file mode 100644 index 0000000000000..7e8c4523fc392 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/completion/completion-wrapper.bash @@ -0,0 +1,117 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# %INSERT_GENERATED_LINE% + +# clap_complete generated content BEGINS +# %INSERT_OPTION_COMPLETION% +# clap_complete generated content ENDS + +complete -r buck2 + +_BUCK_COMPLETE_BIN="${_BUCK_COMPLETE_BIN:-buck2}" + +__buck2_takes_target() +{ + case "$1" in + build|ctargets|install|run|targets|test|utargets) + return 0 + ;; + *) + return 1 + ;; + esac +} + +__buck2_subcommand() +{ + local subcommand= + for w in "${COMP_WORDS[@]:1:$COMP_CWORD - 1}"; do + case "$w" in + --) + # This marker should only occur after certain subcommands + exit 1 + ;; + -*|@*) + ;; + *) + if [[ -z $subcommand ]]; then + subcommand="$w" + fi + ;; + esac + done + if [[ -n $subcommand ]]; then + echo "$subcommand" + fi +} + +__buck2_add_target_completions() +{ + local completions=() + while read -r; do + if [[ $REPLY =~ [:]. ]]; then + completions+=("${REPLY#*:}") + else + completions+=("$REPLY") + fi + done < <("${_BUCK_COMPLETE_BIN[@]}" complete --target="$1" 2>/dev/null) + COMPREPLY=("${completions[@]}") +} + +__buck2_completions_queued() +{ + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + return 255 + elif [[ ${#COMPREPLY[@]} -eq 1 && ${COMPREPLY[1]} = % ]]; then + return 255 + else + return 0 + fi +} + +__buck2_fix() +{ + local cur="${COMP_WORDS[COMP_CWORD]}" + local prev="${COMP_WORDS[COMP_CWORD-1]}" + local pprev="${COMP_WORDS[COMP_CWORD-2]}" + + # Bash treats `:` as a separate word, so we have to do some work to + # recover a partial target name + if [[ $cur = : ]]; then + if [[ "${COMP_LINE:0:$COMP_POINT}" =~ .*$prev: ]]; then + cur="$prev:" + fi + elif [[ $prev = : ]]; then + if [[ "${COMP_LINE:0:$COMP_POINT}" =~ .*$pprev:$cur ]]; then + cur="$pprev:$cur" + else + cur=":$cur" + fi + fi + + if __buck2_takes_target "$(__buck2_subcommand)"; then + if [[ $cur =~ ^- ]]; then + _buck2 "$@" + else + _buck2 "$@" + if ! __buck2_completions_queued; then + __buck2_add_target_completions "$cur" + fi + fi + else + _buck2 "$@" + fi +} + +if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then + complete -F __buck2_fix -o nosort -o bashdefault -o default -o nospace buck + complete -F __buck2_fix -o nosort -o bashdefault -o default -o nospace buck2 +else + complete -F __buck2_fix -o bashdefault -o default -o nospace buck + complete -F __buck2_fix -o bashdefault -o default -o nospace buck2 +fi diff --git a/app/buck2_cmd_completion_client/src/completion/completion-wrapper.fish b/app/buck2_cmd_completion_client/src/completion/completion-wrapper.fish new file mode 100644 index 0000000000000..50aa7a5bfb351 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/completion/completion-wrapper.fish @@ -0,0 +1,52 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# %INSERT_GENERATED_LINE% + +# clap_complete generated content BEGINS +# %INSERT_OPTION_COMPLETION% +# clap_complete generated content ENDS + +function __buck2_subcommand + for w in $argv[2..] + switch $w + case -- + return 1 + case '-*' + continue + case '*' + if test -n $w + echo $w + return + end + end + end + return 1 +end + +function __buck2_takes_target + set -l cmd (commandline --current-process --tokenize --cut-at-cursor) + if contains -- -- $cmd[..-1] + return 1 + end + set -l subcommand (__buck2_subcommand $cmd) + test -n $subcommand || return + + contains $subcommand build ctargets install run targets test utargets + return $status +end + +function __buck2_add_target_completions + set -l cur (commandline --current-token) + + string match --quiet -- '-*' $cur && return + + buck2 complete --target="$cur" 2>/dev/null +end + +complete -c buck2 -n '__buck2_takes_target' -f -a '(__buck2_add_target_completions)' +complete -c buck -w buck2 diff --git a/app/buck2_cmd_completion_client/src/completion/completion-wrapper.zsh b/app/buck2_cmd_completion_client/src/completion/completion-wrapper.zsh new file mode 100644 index 0000000000000..e85065836bfcb --- /dev/null +++ b/app/buck2_cmd_completion_client/src/completion/completion-wrapper.zsh @@ -0,0 +1,114 @@ +#compdef buck2 buck +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# %INSERT_GENERATED_LINE% + +# clap_complete generated content BEGINS +# %INSERT_OPTION_COMPLETION% +# clap_complete generated content ENDS + +compdef -d buck2 + +_BUCK_COMPLETE_BIN="${_BUCK_COMPLETE_BIN:-buck2}" + +__buck2_takes_target() +{ + case "$1" in + build|ctargets|install|run|targets|test|utargets) + return 0 + ;; + *) + return 1 + ;; + esac +} + +__buck2_subcommand() +{ + local subcommand= + for w in "${words[@]:1:$CURRENT - 1}"; do + case "$w" in + -*|@*) + ;; + *) + if [[ -z $subcommand ]]; then + subcommand="$w" + fi + ;; + esac + done + if [[ -n $subcommand ]]; then + echo "$subcommand" + fi +} + +__buck2_add_target_completions() +{ + local completions=() + while read -r; do + completions+="$REPLY" + done < <("${_BUCK_COMPLETE_BIN[@]}" complete --target="$1" 2>/dev/null) + + compadd -S '' -- "${completions[@]}" +} + +__buck2_completions_queued() +{ + if [[ ${compstate[nmatches]} -eq 0 ]]; then + return 255 + else + return 0 + fi +} + +__buck2_fix() +{ + for w in "${words[@]:1:$CURRENT - 1}"; do + if [[ "$w" = '--' ]]; then + # We're running completions after a `--` - just report file completions and otherwise + # exit out + _files + return + fi + done + + local cur="${words[CURRENT]}" + local prev="${words[CURRENT-1]}" + local pprev="${words[CURRENT-2]}" + + # Zsh treats `:` as a separate word, so we have to do some work to + # recover a partial target name + if [[ $cur = : ]]; then + if [[ "${BUFFER:0:$CURRENT}" =~ .*$prev: ]]; then + cur="$prev:" + fi + elif [[ $prev = : ]]; then + if [[ "${BUFFER:0:$CURRENT}" =~ .*$prev: ]]; then + cur="$pprev:$cur" + else + cur=":$cur" + fi + fi + + if __buck2_takes_target "$(__buck2_subcommand)"; then + if [[ $cur =~ ^- ]]; then + _buck2 "$@" + else + _buck2 "$@" + if ! __buck2_completions_queued; then + __buck2_add_target_completions "$cur" + fi + fi + else + _buck2 "$@" + fi + + compstate[insert]="automenu-unambiguous" +} + +compdef __buck2_fix buck buck2 diff --git a/app/buck2_cmd_completion_client/src/completion/options-wrapper.bash b/app/buck2_cmd_completion_client/src/completion/options-wrapper.bash new file mode 100644 index 0000000000000..0861317bb4a14 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/completion/options-wrapper.bash @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# %INSERT_GENERATED_LINE% + +# clap_complete generated content BEGINS +# %INSERT_OPTION_COMPLETION% +# clap_complete generated content ENDS + +if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then + complete -F _buck2 -o nosort -o bashdefault -o default -o nospace buck +else + complete -F _buck2 -o bashdefault -o default -o nospace buck +fi diff --git a/app/buck2_cmd_completion_client/src/completion/options-wrapper.fish b/app/buck2_cmd_completion_client/src/completion/options-wrapper.fish new file mode 100644 index 0000000000000..14e79abd954a7 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/completion/options-wrapper.fish @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# %INSERT_GENERATED_LINE% + +# clap_complete generated content BEGINS +# %INSERT_OPTION_COMPLETION% +# clap_complete generated content ENDS + +complete -c buck -w buck2 diff --git a/app/buck2_cmd_completion_client/src/completion/options-wrapper.zsh b/app/buck2_cmd_completion_client/src/completion/options-wrapper.zsh new file mode 100644 index 0000000000000..ffa43b4b3453c --- /dev/null +++ b/app/buck2_cmd_completion_client/src/completion/options-wrapper.zsh @@ -0,0 +1,15 @@ +#compdef buck2 buck +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# %INSERT_GENERATED_LINE% + +# clap_complete generated content BEGINS +# %INSERT_OPTION_COMPLETION% +# clap_complete generated content ENDS + +compdef _buck2 buck diff --git a/app/buck2_cmd_completion_client/src/lib.rs b/app/buck2_cmd_completion_client/src/lib.rs new file mode 100644 index 0000000000000..198436edd6c64 --- /dev/null +++ b/app/buck2_cmd_completion_client/src/lib.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(used_with_arg)] + +pub mod complete; +pub mod completion; diff --git a/app/buck2_cmd_completion_client/test_data/.buckconfig b/app/buck2_cmd_completion_client/test_data/.buckconfig new file mode 100644 index 0000000000000..1816323661d01 --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/.buckconfig @@ -0,0 +1,10 @@ +[repositories] + root = . + prelude = cell1/buck2/prelude + cell1 = cell1 + cell2 = cell2 + cell3a = dir3/cell3a + cell3b = dir3/cell3b + +[buildfile] + name = BUCK.fixture diff --git a/examples/toolchains/.buckroot b/app/buck2_cmd_completion_client/test_data/.buckroot similarity index 100% rename from examples/toolchains/.buckroot rename to app/buck2_cmd_completion_client/test_data/.buckroot diff --git a/app/buck2_cmd_completion_client/test_data/.gitignore b/app/buck2_cmd_completion_client/test_data/.gitignore new file mode 100644 index 0000000000000..1e4d4c40e39d8 --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/.gitignore @@ -0,0 +1 @@ +/buck-out/ diff --git a/examples/remote_execution/buildbarn/prelude/prelude.bzl b/app/buck2_cmd_completion_client/test_data/baredir0/baredir0a/unused similarity index 100% rename from examples/remote_execution/buildbarn/prelude/prelude.bzl rename to app/buck2_cmd_completion_client/test_data/baredir0/baredir0a/unused diff --git a/examples/remote_execution/buildbuddy/prelude/prelude.bzl b/app/buck2_cmd_completion_client/test_data/baredir0/buckdir0b/BUCK.fixture similarity index 100% rename from examples/remote_execution/buildbuddy/prelude/prelude.bzl rename to app/buck2_cmd_completion_client/test_data/baredir0/buckdir0b/BUCK.fixture diff --git a/app/buck2_cmd_completion_client/test_data/cell1/.buckconfig b/app/buck2_cmd_completion_client/test_data/cell1/.buckconfig new file mode 100644 index 0000000000000..39b71899d32b8 --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/cell1/.buckconfig @@ -0,0 +1,10 @@ +[repositories] + cell1 = . + root = .. + prelude = buck2/prelude + cell2 = ../cell2 + cell3a = ../dir3/cell3a + cell3b = ../dir3/cell3b + +[buildfile] + name = TARGETS.fixture diff --git a/examples/remote_execution/engflow/prelude/prelude.bzl b/app/buck2_cmd_completion_client/test_data/cell1/TARGETS.fixture similarity index 100% rename from examples/remote_execution/engflow/prelude/prelude.bzl rename to app/buck2_cmd_completion_client/test_data/cell1/TARGETS.fixture diff --git a/examples/remote_execution/internal/prelude/prelude.bzl b/app/buck2_cmd_completion_client/test_data/cell1/buck2/TARGETS.fixture similarity index 100% rename from examples/remote_execution/internal/prelude/prelude.bzl rename to app/buck2_cmd_completion_client/test_data/cell1/buck2/TARGETS.fixture diff --git a/app/buck2_cmd_completion_client/test_data/cell1/buck2/prelude/.buckconfig b/app/buck2_cmd_completion_client/test_data/cell1/buck2/prelude/.buckconfig new file mode 100644 index 0000000000000..be8d6aacabf7d --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/cell1/buck2/prelude/.buckconfig @@ -0,0 +1,10 @@ +[repositories] + prelude = . + root = ../../.. + cell1 = ../.. + cell2 = ../../../cell2 + cell3a = ../../../dir3/cell3a + cell3b = ../../../dir3/cell3b + +[repository_aliases] + cell1_alias = cell1 diff --git a/app/buck2_cmd_completion_client/test_data/cell1/buck2/prelude/prelude.bzl b/app/buck2_cmd_completion_client/test_data/cell1/buck2/prelude/prelude.bzl new file mode 100644 index 0000000000000..a869e838b4c7c --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/cell1/buck2/prelude/prelude.bzl @@ -0,0 +1,6 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. diff --git a/app/buck2_cmd_completion_client/test_data/cell2/.buckconfig b/app/buck2_cmd_completion_client/test_data/cell2/.buckconfig new file mode 100644 index 0000000000000..5625177051bf8 --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/cell2/.buckconfig @@ -0,0 +1,7 @@ +[repositories] + cell2 = . + root = .. + prelude = ../cell1/buck2/prelude + cell1 = ../cell1 + cell3a = ../dir3/cell3a + cell3b = ../dir3/cell3b diff --git a/app/buck2_cmd_completion_client/test_data/dir3/cell3a/.buckconfig b/app/buck2_cmd_completion_client/test_data/dir3/cell3a/.buckconfig new file mode 100644 index 0000000000000..2e7be3f544b6d --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/dir3/cell3a/.buckconfig @@ -0,0 +1,10 @@ +[repositories] + cell3a = . + root = ../.. + prelude = ../../cell1/buck2/prelude + cell1 = ../../cell1 + cell2 = ../../cell2 + cell3b = ../cell3b + +[buildfile] + name = BUCK.fixture diff --git a/app/buck2_cmd_completion_client/test_data/dir3/cell3a/BUCK.fixture b/app/buck2_cmd_completion_client/test_data/dir3/cell3a/BUCK.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/app/buck2_cmd_completion_client/test_data/dir3/cell3b/.buckconfig b/app/buck2_cmd_completion_client/test_data/dir3/cell3b/.buckconfig new file mode 100644 index 0000000000000..b547cc000a93b --- /dev/null +++ b/app/buck2_cmd_completion_client/test_data/dir3/cell3b/.buckconfig @@ -0,0 +1,7 @@ +[repositories] + cell3b = . + root = ../.. + prelude = ../../cell1/buck2/prelude + cell1 = ../../cell1 + cell2 = ../../cell2 + cell3a = ../cell3a diff --git a/app/buck2_cmd_docs/BUCK b/app/buck2_cmd_docs/BUCK new file mode 100644 index 0000000000000..291c09333ec89 --- /dev/null +++ b/app/buck2_cmd_docs/BUCK @@ -0,0 +1,19 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_cmd_docs", + srcs = glob(["src/**/*.rs"]), + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:termimad", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_client_ctx:buck2_client_ctx", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_query:buck2_query", + "//buck2/gazebo/dupe:dupe", + ], +) diff --git a/app/buck2_cmd_docs/Cargo.toml b/app/buck2_cmd_docs/Cargo.toml new file mode 100644 index 0000000000000..b39572e976170 --- /dev/null +++ b/app/buck2_cmd_docs/Cargo.toml @@ -0,0 +1,19 @@ +[package] +description = "`buck2 docs` command client implementation" +edition = "2021" +license = { workspace = true } +name = "buck2_cmd_docs" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +clap = { workspace = true } +dupe = { workspace = true } +termimad = { workspace = true } + +buck2_cli_proto = { workspace = true } +buck2_client_ctx = { workspace = true } +buck2_error = { workspace = true } +buck2_query = { workspace = true } diff --git a/app/buck2_cmd_docs/src/lib.rs b/app/buck2_cmd_docs/src/lib.rs new file mode 100644 index 0000000000000..10cfb856f0785 --- /dev/null +++ b/app/buck2_cmd_docs/src/lib.rs @@ -0,0 +1,63 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::streaming::BuckSubcommand; + +use crate::query::DocsAqueryCommand; +use crate::query::DocsCqueryCommand; +use crate::query::DocsUqueryCommand; +use crate::starlark::DocsStarlarkCommand; +use crate::starlark_builtins::StarlarkBuiltinsCommand; + +mod query; +mod starlark; +mod starlark_builtins; + +#[allow(clippy::large_enum_variant)] +#[derive(Debug, clap::Parser)] +enum DocsKind { + Starlark(DocsStarlarkCommand), + StarlarkBuiltins(StarlarkBuiltinsCommand), + Uquery(DocsUqueryCommand), + Cquery(DocsCqueryCommand), + Aquery(DocsAqueryCommand), +} + +#[derive(Debug, clap::Parser)] +#[clap(name = "docs", about = "Print documentation of specified symbols")] +pub struct DocsCommand { + #[clap(subcommand)] + docs_kind: DocsKind, +} + +impl DocsCommand { + pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + if let DocsKind::Uquery(_) | DocsKind::Cquery(_) | DocsKind::Aquery(_) = &self.docs_kind { + // The docs for these are late-bound from the query impls, which is kind of hard to + // separate from the rest of the query graph + if let Some(res) = ExitResult::retry_command_with_full_binary()? { + return res; + } + } + + let submatches = match matches.subcommand().map(|s| s.1) { + Some(submatches) => submatches, + None => panic!("Parsed a subcommand but couldn't extract subcommand argument matches"), + }; + match self.docs_kind { + DocsKind::Starlark(cmd) => cmd.exec(submatches, ctx), + DocsKind::StarlarkBuiltins(cmd) => cmd.exec(submatches, ctx), + DocsKind::Uquery(cmd) => cmd.exec(submatches, ctx), + DocsKind::Cquery(cmd) => cmd.exec(submatches, ctx), + DocsKind::Aquery(cmd) => cmd.exec(submatches, ctx), + } + } +} diff --git a/app/buck2_cmd_docs/src/query.rs b/app/buck2_cmd_docs/src/query.rs new file mode 100644 index 0000000000000..30c7f0bebe9e6 --- /dev/null +++ b/app/buck2_cmd_docs/src/query.rs @@ -0,0 +1,116 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_query::query::syntax::simple::functions::description::QueryType; +use buck2_query::query::syntax::simple::functions::description::QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE; +use buck2_query::query::syntax::simple::functions::docs::MarkdownOptions; +use buck2_query::query::syntax::simple::functions::docs::QueryEnvironmentDescription; +use dupe::Dupe; + +#[derive(Debug, Clone, Dupe, clap::ValueEnum)] +#[clap(rename_all = "snake_case")] +enum OutputFormatArg { + Markdown, + Rendered, +} + +#[derive(Debug, clap::Parser)] +struct OutputFormatOptions { + /// How to format the documentation + #[clap( + long = "format", + default_value = "rendered", + value_enum, + ignore_case = true + )] + format: OutputFormatArg, +} + +impl OutputFormatOptions { + fn emit_markdown(&self, markdown: &str) -> anyhow::Result<()> { + match self.format { + OutputFormatArg::Markdown => { + buck2_client_ctx::println!("{}", markdown)?; + } + OutputFormatArg::Rendered => { + let skin = termimad::MadSkin::default(); + let area = termimad::Area::full_screen(); + let width = std::cmp::min(100, area.width) as usize; + let rendered = skin.text(markdown, Some(width)); + buck2_client_ctx::println!("{}", rendered)?; + } + } + + Ok(()) + } +} + +#[derive(Debug, clap::Parser)] +#[clap(name = "docs-uquery", about = "Print documentation for query/uquery")] +pub(crate) struct DocsUqueryCommand { + #[clap(flatten)] + docs_options: OutputFormatOptions, +} + +#[derive(Debug, clap::Parser)] +#[clap(name = "docs-cquery", about = "Print documentation for cquery")] +pub(crate) struct DocsCqueryCommand { + #[clap(flatten)] + docs_options: OutputFormatOptions, +} + +#[derive(Debug, clap::Parser)] +#[clap(name = "docs-aquery", about = "Print documentation for aquery")] +pub(crate) struct DocsAqueryCommand { + #[clap(flatten)] + docs_options: OutputFormatOptions, +} + +fn output(options: OutputFormatOptions, description: QueryEnvironmentDescription) -> ExitResult { + let markdown = description.render_markdown(&MarkdownOptions { + include_alt_text: true, + }); + options.emit_markdown(&markdown)?; + ExitResult::success() +} + +impl DocsUqueryCommand { + pub(crate) fn exec( + self, + _matches: &clap::ArgMatches, + _ctx: ClientCommandContext<'_>, + ) -> ExitResult { + let description = (QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE.get()?)(QueryType::Uquery); + output(self.docs_options, description) + } +} + +impl DocsCqueryCommand { + pub(crate) fn exec( + self, + _matches: &clap::ArgMatches, + _ctx: ClientCommandContext<'_>, + ) -> ExitResult { + let description = (QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE.get()?)(QueryType::Cquery); + output(self.docs_options, description) + } +} + +impl DocsAqueryCommand { + pub(crate) fn exec( + self, + _matches: &clap::ArgMatches, + _ctx: ClientCommandContext<'_>, + ) -> ExitResult { + let description = (QUERY_ENVIRONMENT_DESCRIPTION_BY_TYPE.get()?)(QueryType::Aquery); + output(self.docs_options, description) + } +} diff --git a/app/buck2_cmd_docs/src/starlark.rs b/app/buck2_cmd_docs/src/starlark.rs new file mode 100644 index 0000000000000..a9e5727e2670a --- /dev/null +++ b/app/buck2_cmd_docs/src/starlark.rs @@ -0,0 +1,131 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use async_trait::async_trait; +use buck2_cli_proto::new_generic::DocsOutputFormat; +use buck2_cli_proto::new_generic::DocsRequest; +use buck2_cli_proto::new_generic::DocsStarlarkRequest; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::CommonBuildConfigurationOptions; +use buck2_client_ctx::common::CommonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::daemon::client::BuckdClientConnector; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::path_arg::PathArg; +use buck2_client_ctx::streaming::StreamingCommand; +use buck2_error::BuckErrorContext; +use dupe::Dupe; + +#[derive(Debug, Clone, Dupe, clap::ValueEnum)] +#[clap(rename_all = "snake_case")] +enum DocsOutputFormatArg { + Json, + MarkdownFiles, +} + +#[derive(Debug, clap::Parser)] +#[clap( + name = "docs-starlark", + about = "Print documentation of user-defined starlark symbols" +)] +pub(crate) struct DocsStarlarkCommand { + #[clap( + name = "SYMBOL_PATTERNS", + help = "Patterns to interpret. //foo:bar.bzl is 'every symbol in //foo:bar.bzl', //foo:bar.bzl:baz only returns the documentation for the symbol 'baz' in //foo:bar.bzl" + )] + patterns: Vec, + + /// Directory to write markdown files to. Required if format is markdown_files. + #[clap( + long = "output-dir", + required_if_eq("format", "markdown_files"), + help = "Directory to write markdown files to. Required if format is markdown_files." + )] + output_dir: Option, + + #[clap( + long = "format", + help = "how to format the returned documentation", + default_value = "json", + value_enum, + ignore_case = true + )] + format: DocsOutputFormatArg, + + #[clap(flatten)] + common_opts: CommonCommandOptions, +} + +#[async_trait] +impl StreamingCommand for DocsStarlarkCommand { + const COMMAND_NAME: &'static str = "docs starlark"; + async fn exec_impl( + self, + buckd: &mut BuckdClientConnector, + matches: &clap::ArgMatches, + ctx: &mut ClientCommandContext<'_>, + ) -> ExitResult { + let client_context = ctx.client_context(matches, &self)?; + + let format = match self.format { + DocsOutputFormatArg::Json => DocsOutputFormat::Json, + DocsOutputFormatArg::MarkdownFiles => { + let p = self + .output_dir + .as_ref() + .internal_error_anyhow("Checked by clap")? + .resolve(&ctx.working_dir); + DocsOutputFormat::Markdown(p) + } + }; + + let response = buckd + .with_flushing() + .new_generic( + client_context, + buck2_cli_proto::new_generic::NewGenericRequest::Docs(DocsRequest::Starlark( + DocsStarlarkRequest { + symbol_patterns: self.patterns.clone(), + format, + }, + )), + ctx.stdin() + .console_interaction_stream(&self.common_opts.console_opts), + ) + .await??; + + let buck2_cli_proto::new_generic::NewGenericResponse::Docs(response) = response else { + return ExitResult::bail("Unexpected response type from generic command"); + }; + + if let Some(json_output) = response.json_output { + buck2_client_ctx::println!("{}", json_output.trim_end())?; + } + + ExitResult::success() + } + + fn console_opts(&self) -> &CommonConsoleOptions { + &self.common_opts.console_opts + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + &self.common_opts.event_log_opts + } + + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + &self.common_opts.config_opts + } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } +} diff --git a/app/buck2_cmd_docs/src/starlark_builtins.rs b/app/buck2_cmd_docs/src/starlark_builtins.rs new file mode 100644 index 0000000000000..03bc32d28d0a3 --- /dev/null +++ b/app/buck2_cmd_docs/src/starlark_builtins.rs @@ -0,0 +1,84 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::new_generic::DocsRequest; +use buck2_cli_proto::new_generic::DocsStarlarkBuiltinsRequest; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::CommonBuildConfigurationOptions; +use buck2_client_ctx::common::CommonCommandOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::daemon::client::BuckdClientConnector; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::path_arg::PathArg; +use buck2_client_ctx::streaming::StreamingCommand; + +/// Generate documentation for starlark builtins. +/// +/// This command is designed to support buck2's doc generation and does not have stable output. +#[derive(Debug, clap::Parser)] +#[clap(name = "docs starlark-builtins")] +pub(crate) struct StarlarkBuiltinsCommand { + /// The directory to output files to + #[clap(long, required = true)] + output_dir: PathArg, + + #[clap(flatten)] + common_opts: CommonCommandOptions, +} + +#[async_trait::async_trait] +impl StreamingCommand for StarlarkBuiltinsCommand { + const COMMAND_NAME: &'static str = "docs starlark-builtins"; + async fn exec_impl( + self, + buckd: &mut BuckdClientConnector, + matches: &clap::ArgMatches, + ctx: &mut ClientCommandContext<'_>, + ) -> ExitResult { + let client_context = ctx.client_context(matches, &self)?; + + let p = self.output_dir.resolve(&ctx.working_dir).to_string(); + + let response = buckd + .with_flushing() + .new_generic( + client_context, + buck2_cli_proto::new_generic::NewGenericRequest::Docs( + DocsRequest::StarlarkBuiltins(DocsStarlarkBuiltinsRequest { path: p }), + ), + ctx.stdin() + .console_interaction_stream(&self.common_opts.console_opts), + ) + .await??; + + let buck2_cli_proto::new_generic::NewGenericResponse::Docs(_) = response else { + return ExitResult::bail("Unexpected response type from generic command"); + }; + + ExitResult::success() + } + + fn console_opts(&self) -> &CommonConsoleOptions { + &self.common_opts.console_opts + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + &self.common_opts.event_log_opts + } + + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + &self.common_opts.config_opts + } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.common_opts.starlark_opts + } +} diff --git a/app/buck2_cmd_docs_server/BUCK b/app/buck2_cmd_docs_server/BUCK new file mode 100644 index 0000000000000..747cf3906e174 --- /dev/null +++ b/app/buck2_cmd_docs_server/BUCK @@ -0,0 +1,27 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_cmd_docs_server", + srcs = glob(["src/**/*.rs"]), + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:serde_json", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_interpreter:buck2_interpreter", + "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", + "//buck2/app/buck2_server_ctx:buck2_server_ctx", + "//buck2/dice/dice:dice", + "//buck2/gazebo/dupe:dupe", + "//buck2/starlark-rust/starlark:starlark", + ], +) diff --git a/app/buck2_cmd_docs_server/Cargo.toml b/app/buck2_cmd_docs_server/Cargo.toml new file mode 100644 index 0000000000000..4fcf298ea5936 --- /dev/null +++ b/app/buck2_cmd_docs_server/Cargo.toml @@ -0,0 +1,27 @@ +[package] +description = "`buck2 docs` command server implementation" +edition = "2021" +license = { workspace = true } +name = "buck2_cmd_docs_server" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +dice = { workspace = true } +dupe = { workspace = true } +futures = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +starlark = { workspace = true } + +buck2_cli_proto = { workspace = true } +buck2_common = { workspace = true } +buck2_core = { workspace = true } +buck2_data = { workspace = true } +buck2_error = { workspace = true } +buck2_events = { workspace = true } +buck2_interpreter = { workspace = true } +buck2_interpreter_for_build = { workspace = true } +buck2_server_ctx = { workspace = true } diff --git a/app/buck2_cmd_docs_server/src/builtins.rs b/app/buck2_cmd_docs_server/src/builtins.rs new file mode 100644 index 0000000000000..5ed5a6ff9b16a --- /dev/null +++ b/app/buck2_cmd_docs_server/src/builtins.rs @@ -0,0 +1,118 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::BTreeMap; + +use buck2_cli_proto::new_generic::DocsResponse; +use buck2_cli_proto::new_generic::DocsStarlarkBuiltinsRequest; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use buck2_interpreter_for_build::interpreter::globals::register_analysis_natives; +use buck2_interpreter_for_build::interpreter::globals::register_bxl_natives; +use buck2_interpreter_for_build::interpreter::globals::register_load_natives; +use buck2_interpreter_for_build::interpreter::globals::starlark_library_extensions_for_buck2; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use dice::DiceTransaction; +use starlark::docs::multipage::render_markdown_multipage; +use starlark::docs::multipage::DocModuleInfo; +use starlark::docs::DocItem; +use starlark::environment::Globals; +use starlark::environment::GlobalsBuilder; + +fn write_docs_to_subdir( + modules_infos: Vec>, + base_path: &str, +) -> anyhow::Result<()> { + let base_path = AbsPathBuf::new(base_path)?; + let path_mapper = |p: &str| format!("/docs/api/{}", p); + let mut docs: BTreeMap<_, _> = render_markdown_multipage(modules_infos, Some(&path_mapper)) + .into_iter() + .collect(); + while let Some((mut doc_path, rendered)) = docs.pop_first() { + let mut path = base_path.clone(); + // Map: + // - "" -> "index.md" + // - "bxl" -> "bxl/index.md" + // - "bxl/typename" -> "bxl/typename.md" + if doc_path.is_empty() + || docs + .first_key_value() + .is_some_and(|(k, _)| k.starts_with(&format!("{}/", doc_path))) + { + path.push( + ForwardRelativePath::new(&doc_path) + .internal_error("Doc paths should be forward relative")? + .as_path(), + ); + path.push(ForwardRelativePath::new("index.md").unwrap().as_path()); + } else { + doc_path.push_str(".md"); + path.push( + ForwardRelativePath::new(&doc_path) + .internal_error("Doc paths should be forward relative")? + .as_path(), + ); + } + + if let Some(parent) = path.parent() { + fs_util::create_dir_all(parent)?; + } + fs_util::write(path, &rendered)?; + } + + Ok(()) +} + +pub(crate) async fn docs_starlark_builtins( + _server_ctx: &dyn ServerCommandContextTrait, + _dice_ctx: DiceTransaction, + request: &DocsStarlarkBuiltinsRequest, +) -> anyhow::Result { + let starlark = Globals::extended_by(starlark_library_extensions_for_buck2()).documentation(); + + let build = GlobalsBuilder::new() + .with(register_load_natives) + .with(register_analysis_natives) + .build() + .documentation(); + + let mut bxl = GlobalsBuilder::new() + .with(register_bxl_natives) + .build() + .documentation(); + + let Some(DocItem::Module(bxl)) = bxl.members.shift_remove("bxl") else { + return Err(internal_error_anyhow!("bxl namespace should exist")); + }; + + let modules_infos = vec![ + DocModuleInfo { + module: &starlark, + name: "Starlark APIs".to_owned(), + page_path: "starlark".to_owned(), + }, + DocModuleInfo { + module: &build, + name: "Build APIs".to_owned(), + page_path: "build".to_owned(), + }, + DocModuleInfo { + module: &bxl, + name: "Bxl APIs".to_owned(), + page_path: "bxl".to_owned(), + }, + ]; + + write_docs_to_subdir(modules_infos, &request.path)?; + + Ok(DocsResponse { json_output: None }) +} diff --git a/app/buck2_cmd_docs_server/src/json.rs b/app/buck2_cmd_docs_server/src/json.rs new file mode 100644 index 0000000000000..09e2f61342fe5 --- /dev/null +++ b/app/buck2_cmd_docs_server/src/json.rs @@ -0,0 +1,339 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; + +use buck2_core::bzl::ImportPath; +use dupe::Dupe; +use serde::Serialize; +use starlark::collections::SmallMap; +use starlark::docs::DocItem; +use starlark::docs::DocMember; +use starlark::docs::DocModule; +use starlark::typing::Ty; + +fn serialize_ty(ty: &Ty, s: S) -> Result { + s.serialize_str(&ty.to_string()) +} + +fn serialize_opt_ty(ty: &Option, s: S) -> Result { + match ty { + Some(ty) => serialize_ty(ty, s), + None => s.serialize_none(), + } +} + +#[derive(Serialize)] +struct JsonDoc { + id: JsonIdentifier, + item: JsonDocItem, + custom_attrs: HashMap, +} + +#[derive(Serialize)] +struct JsonIdentifier { + name: String, + location: Option, +} + +#[derive(Serialize)] +struct JsonLocation { + path: String, +} + +impl JsonDoc { + fn from_starlark(doc: Doc) -> Self { + Self { + id: JsonIdentifier { + name: doc.name, + location: Some(JsonLocation { path: doc.location }), + }, + item: JsonDocItem::from_starlark(doc.item), + custom_attrs: HashMap::new(), + } + } +} + +#[derive(Serialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +enum JsonDocItem { + Module(JsonDocModule), + Object(JsonDocObject), + Function(JsonDocFunction), + Property(JsonDocProperty), +} + +impl JsonDocItem { + fn from_starlark(item: starlark::docs::DocItem) -> Self { + match item { + starlark::docs::DocItem::Module(m) => Self::Module(JsonDocModule::from_starlark(m)), + starlark::docs::DocItem::Type(o) => Self::Object(JsonDocObject::from_starlark(o)), + starlark::docs::DocItem::Member(starlark::docs::DocMember::Function(f)) => { + Self::Function(JsonDocFunction::from_starlark(f)) + } + starlark::docs::DocItem::Member(starlark::docs::DocMember::Property(p)) => { + Self::Property(JsonDocProperty::from_starlark(p)) + } + } + } +} + +#[derive(Serialize)] +struct JsonDocModule { + docs: Option, + members: SmallMap, +} + +impl JsonDocModule { + fn from_starlark(m: starlark::docs::DocModule) -> Self { + Self { + docs: m.docs.map(JsonDocString::from_starlark), + members: m + .members + .into_iter() + .filter_map(|(k, v)| match v { + starlark::docs::DocItem::Member(v) => { + Some((k, JsonDocMember::from_starlark(v))) + } + _ => None, + }) + .collect(), + } + } +} + +#[derive(Serialize)] +struct JsonDocObject { + docs: Option, + members: SmallMap, +} + +impl JsonDocObject { + fn from_starlark(o: starlark::docs::DocType) -> Self { + Self { + docs: o.docs.map(JsonDocString::from_starlark), + members: o + .members + .into_iter() + .map(|(k, v)| (k, JsonDocMember::from_starlark(v))) + .collect(), + } + } +} + +#[derive(Serialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +enum JsonDocMember { + Property(JsonDocProperty), + Function(JsonDocFunction), +} + +impl JsonDocMember { + fn from_starlark(m: starlark::docs::DocMember) -> Self { + match m { + starlark::docs::DocMember::Property(p) => { + Self::Property(JsonDocProperty::from_starlark(p)) + } + starlark::docs::DocMember::Function(f) => { + Self::Function(JsonDocFunction::from_starlark(f)) + } + } + } +} + +/// A single property of an object. These are explicitly not functions (see [`DocMember`]). +#[derive(Serialize)] +struct JsonDocProperty { + docs: Option, + #[serde(rename = "type", serialize_with = "serialize_ty")] + typ: Ty, +} + +impl JsonDocProperty { + fn from_starlark(p: starlark::docs::DocProperty) -> Self { + Self { + docs: p.docs.map(JsonDocString::from_starlark), + typ: p.typ, + } + } +} + +#[derive(Serialize)] +struct JsonDocFunction { + docs: Option, + params: Vec, + ret: JsonDocReturn, + #[serde(serialize_with = "serialize_opt_ty")] + as_type: Option, +} + +impl JsonDocFunction { + fn from_starlark(f: starlark::docs::DocFunction) -> Self { + Self { + docs: f.docs.map(JsonDocString::from_starlark), + params: f + .params + .fmt_params() + .map(JsonDocParam::from_starlark) + .collect(), + ret: JsonDocReturn::from_starlark(f.ret), + as_type: None, + } + } +} + +#[derive(Serialize)] +struct JsonDocReturn { + docs: Option, + #[serde(rename = "type", serialize_with = "serialize_ty")] + typ: Ty, +} + +impl JsonDocReturn { + fn from_starlark(ret: starlark::docs::DocReturn) -> Self { + Self { + docs: ret.docs.map(JsonDocString::from_starlark), + typ: ret.typ, + } + } +} + +/// A single parameter of a function. +#[derive(Serialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +enum JsonDocParam { + Arg { + name: String, + docs: Option, + #[serde(rename = "type", serialize_with = "serialize_ty")] + typ: Ty, + default_value: Option, + }, + OnlyNamedAfter, + OnlyPosBefore, + Args { + name: String, + docs: Option, + #[serde(rename = "type", serialize_with = "serialize_ty")] + tuple_elem_ty: Ty, + }, + Kwargs { + name: String, + docs: Option, + #[serde(rename = "type", serialize_with = "serialize_ty")] + dict_value_ty: Ty, + }, +} + +impl JsonDocParam { + fn from_starlark(param: starlark::docs::FmtParam<&'_ starlark::docs::DocParam>) -> Self { + match param { + starlark::docs::FmtParam::Regular(starlark::docs::DocParam { + name, + docs, + typ, + default_value, + }) => Self::Arg { + name: name.clone(), + docs: docs.clone().map(JsonDocString::from_starlark), + typ: typ.dupe(), + default_value: default_value.clone(), + }, + starlark::docs::FmtParam::Slash => Self::OnlyPosBefore, + starlark::docs::FmtParam::Star => Self::OnlyNamedAfter, + starlark::docs::FmtParam::Args(starlark::docs::DocParam { + name, + docs, + typ, + default_value: _, + }) => Self::Args { + name: name.clone(), + docs: docs.clone().map(JsonDocString::from_starlark), + tuple_elem_ty: typ.dupe(), + }, + starlark::docs::FmtParam::Kwargs(starlark::docs::DocParam { + name, + docs, + typ, + default_value: _, + }) => Self::Kwargs { + name: name.clone(), + docs: docs.clone().map(JsonDocString::from_starlark), + dict_value_ty: typ.dupe(), + }, + } + } +} + +#[derive(Serialize)] +struct JsonDocString { + summary: String, + details: Option, +} + +impl JsonDocString { + fn from_starlark(s: starlark::docs::DocString) -> Self { + Self { + summary: s.summary, + details: s.details, + } + } +} + +// Note(JakobDegen): The particular format of the output is not really by design, but mostly a +// historical accident. +pub(crate) fn to_json(docs: Vec<(ImportPath, DocModule)>) -> anyhow::Result { + let docs: Vec<_> = docs + .into_iter() + .flat_map(|(p, d)| to_docs_list(&p, d)) + .map(JsonDoc::from_starlark) + .collect(); + Ok(serde_json::to_string(&docs)?) +} + +struct Doc { + name: String, + location: String, + item: DocItem, +} + +fn to_docs_list(import_path: &ImportPath, module_docs: DocModule) -> Vec { + // Do this so that we don't get the '@' in the display if we're printing targets from a + // different cell root. i.e. `//foo:bar.bzl`, rather than `//foo:bar.bzl @ cell` + let import_path_string = format!( + "{}:{}", + import_path.path().parent().unwrap(), + import_path.path().path().file_name().unwrap() + ); + + let mut docs = vec![]; + + if let Some(module_doc) = module_docs.docs { + docs.push(Doc { + name: import_path_string.clone(), + location: import_path_string.clone(), + item: DocItem::Module(DocModule { + docs: Some(module_doc), + members: SmallMap::new(), + }), + }); + } + docs.extend(module_docs.members.into_iter().filter_map(|(symbol, d)| { + Some(Doc { + name: symbol, + location: import_path_string.clone(), + item: match d.try_as_member_with_collapsed_object().ok()? { + DocMember::Function(f) => DocItem::Member(DocMember::Function(f)), + DocMember::Property(p) => DocItem::Member(DocMember::Property(p)), + }, + }) + })); + + docs +} diff --git a/app/buck2_cmd_docs_server/src/lib.rs b/app/buck2_cmd_docs_server/src/lib.rs new file mode 100644 index 0000000000000..c15d25bab473b --- /dev/null +++ b/app/buck2_cmd_docs_server/src/lib.rs @@ -0,0 +1,92 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] + +use async_trait::async_trait; +use buck2_cli_proto::new_generic::DocsRequest; +use buck2_cli_proto::new_generic::DocsResponse; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::late_bindings::DocsServerComamnd; +use buck2_server_ctx::late_bindings::DOCS_SERVER_COMMAND; +use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::template::run_server_command; +use buck2_server_ctx::template::ServerCommandTemplate; +use dice::DiceTransaction; + +use crate::builtins::docs_starlark_builtins; +use crate::starlark_::docs_starlark; + +mod builtins; +mod json; +mod markdown; +mod starlark_; + +struct DocsServerCommandImpl; + +#[async_trait::async_trait] +impl DocsServerComamnd for DocsServerCommandImpl { + async fn docs( + &self, + context: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: DocsRequest, + ) -> anyhow::Result { + run_server_command( + DocsServerCommand { req }, + context, + partial_result_dispatcher, + ) + .await + } +} + +pub fn init_late_bindings() { + DOCS_SERVER_COMMAND.init(&DocsServerCommandImpl); +} + +struct DocsServerCommand { + req: DocsRequest, +} + +#[async_trait] +impl ServerCommandTemplate for DocsServerCommand { + type StartEvent = buck2_data::DocsCommandStart; + type EndEvent = buck2_data::DocsCommandEnd; + type Response = DocsResponse; + type PartialResult = NoPartialResult; + + async fn command( + &self, + server_ctx: &dyn ServerCommandContextTrait, + _partial_result_dispatcher: PartialResultDispatcher, + ctx: DiceTransaction, + ) -> anyhow::Result { + docs(server_ctx, ctx, &self.req).await + } + + fn is_success(&self, _response: &Self::Response) -> bool { + // No response if we failed. + true + } +} + +async fn docs( + server_ctx: &dyn ServerCommandContextTrait, + dice_ctx: DiceTransaction, + request: &DocsRequest, +) -> anyhow::Result { + match request { + DocsRequest::Starlark(request) => docs_starlark(server_ctx, dice_ctx, request).await, + DocsRequest::StarlarkBuiltins(request) => { + docs_starlark_builtins(server_ctx, dice_ctx, request).await + } + } +} diff --git a/app/buck2_cmd_docs_server/src/markdown.rs b/app/buck2_cmd_docs_server/src/markdown.rs new file mode 100644 index 0000000000000..54269a4c141b4 --- /dev/null +++ b/app/buck2_cmd_docs_server/src/markdown.rs @@ -0,0 +1,60 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; + +use buck2_core::bzl::ImportPath; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_path::AbsPath; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::paths::file_name::FileName; +use buck2_events::dispatch::console_message; +use starlark::docs::markdown::render_doc_item; +use starlark::docs::DocItem; +use starlark::docs::DocModule; +use starlark::typing::TypeRenderConfig; + +fn add_md(mut p: AbsPathBuf) -> AbsPathBuf { + let mut file = p.file_name().unwrap().to_owned(); + file.push(".md"); + p.pop(); + p.push(file); + p +} + +pub(crate) fn generate_markdown_files( + output_dir: &AbsPath, + docs: Vec<(ImportPath, DocModule)>, +) -> anyhow::Result<()> { + let mut outputs = HashMap::new(); + + for (path, docs) in docs { + let rendered = render_doc_item( + &path.to_string(), + &DocItem::Module(docs), + &TypeRenderConfig::Default, + ); + let cell = FileName::new(path.cell().as_str())?; + let path = output_dir + .join(cell) + .join(path.path().path().as_forward_relative_path().as_path()); + outputs.insert(add_md(path), rendered); + } + + for (path, contents) in outputs.iter() { + console_message(format!("Writing to {}", path.display())); + + if let Some(dir) = path.parent() { + fs_util::create_dir_all(dir)?; + } + fs_util::write(path, contents)?; + } + + Ok(()) +} diff --git a/app/buck2_cmd_docs_server/src/starlark_.rs b/app/buck2_cmd_docs_server/src/starlark_.rs new file mode 100644 index 0000000000000..a6c3050ca18cb --- /dev/null +++ b/app/buck2_cmd_docs_server/src/starlark_.rs @@ -0,0 +1,93 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashSet; + +use buck2_cli_proto::new_generic::DocsOutputFormat; +use buck2_cli_proto::new_generic::DocsResponse; +use buck2_cli_proto::new_generic::DocsStarlarkRequest; +use buck2_common::dice::cells::HasCellResolver; +use buck2_core::bzl::ImportPath; +use buck2_core::cells::build_file_cell::BuildFileCell; +use buck2_core::cells::cell_path::CellPath; +use buck2_core::cells::CellAliasResolver; +use buck2_interpreter::load_module::InterpreterCalculation; +use buck2_interpreter::parse_import::parse_bzl_path_with_config; +use buck2_interpreter::parse_import::ParseImportOptions; +use buck2_interpreter::parse_import::RelativeImports; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use dice::DiceTransaction; +use futures::FutureExt; + +use crate::json; +use crate::markdown::generate_markdown_files; + +fn parse_import_paths( + cell_resolver: &CellAliasResolver, + current_dir: &CellPath, + current_cell: BuildFileCell, + symbol_patterns: &[String], +) -> anyhow::Result> { + let parse_options = ParseImportOptions { + allow_missing_at_symbol: true, + relative_import_option: RelativeImports::Allow { current_dir }, + }; + + symbol_patterns + .iter() + .map(|symbol_pattern| { + parse_bzl_path_with_config(cell_resolver, symbol_pattern, &parse_options, current_cell) + }) + .collect() +} + +pub(crate) async fn docs_starlark( + server_ctx: &dyn ServerCommandContextTrait, + mut dice_ctx: DiceTransaction, + request: &DocsStarlarkRequest, +) -> anyhow::Result { + let cell_resolver = dice_ctx.get_cell_resolver().await?; + let cwd = server_ctx.working_dir(); + let current_cell_path = cell_resolver.get_cell_path(cwd)?; + let current_cell = BuildFileCell::new(current_cell_path.cell()); + let cell_alias_resolver = dice_ctx + .get_cell_alias_resolver(current_cell_path.cell()) + .await?; + + let lookups = parse_import_paths( + &cell_alias_resolver, + ¤t_cell_path, + current_cell, + &request.symbol_patterns, + )?; + + let docs: Vec<_> = dice_ctx + .try_compute_join(lookups, |ctx, import_path| { + async move { + let doc = ctx + .get_loaded_module_from_import_path(&import_path) + .await? + .env() + .documentation(); + anyhow::Ok((import_path, doc)) + } + .boxed() + }) + .await?; + + let json_output = match &request.format { + DocsOutputFormat::Json => Some(json::to_json(docs)?), + DocsOutputFormat::Markdown(path) => { + generate_markdown_files(&path, docs)?; + None + } + }; + + Ok(DocsResponse { json_output }) +} diff --git a/app/buck2_cmd_starlark_client/BUCK b/app/buck2_cmd_starlark_client/BUCK new file mode 100644 index 0000000000000..507a1bc166bb6 --- /dev/null +++ b/app/buck2_cmd_starlark_client/BUCK @@ -0,0 +1,25 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_cmd_starlark_client", + srcs = glob(["src/**/*.rs"]), + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:debugserver-types", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:once_cell", + "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:serde_json", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_client_ctx:buck2_client_ctx", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_event_observer:buck2_event_observer", + "//buck2/app/buck2_events:buck2_events", + ], +) diff --git a/app/buck2_cmd_starlark_client/Cargo.toml b/app/buck2_cmd_starlark_client/Cargo.toml new file mode 100644 index 0000000000000..db3d5d3723d02 --- /dev/null +++ b/app/buck2_cmd_starlark_client/Cargo.toml @@ -0,0 +1,25 @@ +[package] +description = "`buck2 starlark` command client implementation" +edition = "2021" +license = { workspace = true } +name = "buck2_cmd_starlark_client" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +clap = { workspace = true } +debugserver-types = { workspace = true } +futures = { workspace = true } +once_cell = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } + +buck2_cli_proto = { workspace = true } +buck2_client_ctx = { workspace = true } +buck2_common = { workspace = true } +buck2_data = { workspace = true } +buck2_error = { workspace = true } +buck2_event_observer = { workspace = true } +buck2_events = { workspace = true } diff --git a/app/buck2_cmd_starlark_client/src/debug.rs b/app/buck2_cmd_starlark_client/src/debug.rs new file mode 100644 index 0000000000000..f2d5864f7a9a3 --- /dev/null +++ b/app/buck2_cmd_starlark_client/src/debug.rs @@ -0,0 +1,237 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io::Write; + +use async_trait::async_trait; +use buck2_cli_proto::DapRequest; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::ui::ConsoleType; +use buck2_client_ctx::common::CommonBuildConfigurationOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::daemon::client::BuckdClientConnector; +use buck2_client_ctx::events_ctx::PartialResultCtx; +use buck2_client_ctx::events_ctx::PartialResultHandler; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::ide_support::ide_message_stream; +use buck2_client_ctx::stream_util::reborrow_stream_for_static; +use buck2_client_ctx::streaming::StreamingCommand; +use buck2_client_ctx::subscribers::subscriber::EventSubscriber; +use buck2_event_observer::unpack_event::unpack_event; +use buck2_event_observer::unpack_event::UnpackedBuckEvent; +use buck2_events::BuckEvent; +use futures::StreamExt; +use once_cell::sync::Lazy; + +/// Run the starlark debug adapter protocol server +/// +/// This forwards requests received on stdin to a debug server running in the +/// buck daemon. DAP events and responses are returned from the daemon and sent +/// to this command's stdout. +#[derive(Debug, clap::Parser)] +#[clap(name = "starlark-debug-attach")] +pub struct StarlarkDebugAttachCommand { + #[clap(flatten)] + config_opts: CommonBuildConfigurationOptions, + + #[clap(flatten)] + starlark_opts: CommonStarlarkOptions, + + #[clap(flatten)] + event_log_opts: CommonEventLogOptions, +} + +pub fn write_dap_message(out: &mut impl Write, msg: &[u8]) -> anyhow::Result<()> { + write!(out, "Content-Length: {}\r\n\r\n", msg.len())?; + out.write_all(msg)?; + out.flush()?; + Ok(()) +} + +/// All DAP messages are written to stdout. +fn send_message_to_dap_client(msg: &[u8]) -> anyhow::Result<()> { + let stdout = std::io::stdout(); + let mut stdout = stdout.lock(); + write_dap_message(&mut stdout, msg)?; + Ok(()) +} + +#[async_trait] +impl StreamingCommand for StarlarkDebugAttachCommand { + const COMMAND_NAME: &'static str = "starlark-debug-attach"; + + async fn exec_impl( + self, + buckd: &mut BuckdClientConnector, + matches: &clap::ArgMatches, + ctx: &mut ClientCommandContext<'_>, + ) -> ExitResult { + let client_context = ctx.client_context(matches, &self)?; + + let stream = ide_message_stream::<_, debugserver_types::Request>(ctx.stdin()).filter_map( + |m| async move { + match m { + Ok(dap_json) => Some(DapRequest { dap_json }), + Err(e) => { + let _ignored = buck2_client_ctx::eprintln!( + "Could not read message from stdin: `{}`", + e + ); + // TODO(cjhopman): the client just hangs at this point. We should probably error out (or + // distinguish between FramedRead errors and errors of us converting to a Request). + None + } + } + }, + ); + + let mut partial_result_handler = DapPartialResultHandler; + + reborrow_stream_for_static( + stream, + |stream| async move { + buckd + .with_flushing() + .dap(client_context, stream, &mut partial_result_handler) + .await + }, + // The DAP server side does not handle hangups. So, until it does... we never hang up: + || None, + ) + .await??; + + ExitResult::success() + } + + fn console_opts(&self) -> &CommonConsoleOptions { + // This should only be communicated with by an IDE, so disable anything other + // than the simple console + static SIMPLE_CONSOLE: Lazy = Lazy::new(|| CommonConsoleOptions { + console_type: ConsoleType::Simple, + ui: vec![], + no_interactive_console: true, + }); + &SIMPLE_CONSOLE + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + &self.event_log_opts + } + + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + &self.config_opts + } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.starlark_opts + } + + fn should_expect_spans(&self) -> bool { + // If we're running the debugger, do not show "Waiting for daemon..." if we do not get any spans. + false + } + + fn extra_subscribers(&self) -> Vec> { + /// We add an additional subscriber that converts a handful of informative events + /// to DAP "output" events. Without this, at best these would go to stderr, but vscode's + /// executable DAP client ignores stderr, so this subscriber allows us to get that information + /// into somewhere visible to the user. + + struct ConvertToDap; + + impl ConvertToDap { + fn write_console(&self, msg: &str) -> anyhow::Result<()> { + let ev = debugserver_types::OutputEvent { + type_: "event".to_owned(), + event: "output".to_owned(), + // All other events are being sent by the debug support in the server and that's + // maintaining the sequence numbers. For us to get the correct sequence number + // here would be tricky. Instead, we just set it to 0 and hope that nobody notices/cares + // that it's out of order/invalid. The alternative would probably be to + // deserialize all events from the server and rewrite their sequence numbers (and + // potentially references to those sequence numbers coming back from the dap client). + seq: 0, + body: debugserver_types::OutputEventBody { + category: None, + column: None, + data: None, + line: None, + output: format!("{}\n", msg), + source: None, + variables_reference: None, + }, + }; + send_message_to_dap_client(&serde_json::to_vec(&ev)?) + } + } + + #[async_trait] + impl EventSubscriber for ConvertToDap { + async fn handle_output(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { + self.write_console(&String::from_utf8_lossy(raw_output)) + } + + async fn handle_tailer_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { + self.write_console(stderr) + } + + async fn handle_events( + &mut self, + events: &[std::sync::Arc], + ) -> anyhow::Result<()> { + for ev in events { + match unpack_event(ev)? { + UnpackedBuckEvent::Instant(_, _, data) => match data { + buck2_data::instant_event::Data::StructuredError(soft_error) => { + if !soft_error.quiet { + self.write_console(&format!( + "soft error: {}", + &soft_error.payload + ))?; + } + } + buck2_data::instant_event::Data::ConsoleMessage(message) => { + self.write_console(&message.message)?; + } + _ => {} + }, + _ => {} + } + } + Ok(()) + } + + async fn handle_error(&mut self, error: &buck2_error::Error) -> anyhow::Result<()> { + self.write_console(&format!( + "buck2 starlark-attach debugserver error: {}", + error + )) + } + } + + vec![Box::new(ConvertToDap)] + } +} + +struct DapPartialResultHandler; + +#[async_trait] +impl PartialResultHandler for DapPartialResultHandler { + type PartialResult = buck2_cli_proto::DapMessage; + + async fn handle_partial_result( + &mut self, + mut _ctx: PartialResultCtx<'_, '_>, + partial_res: buck2_cli_proto::DapMessage, + ) -> anyhow::Result<()> { + send_message_to_dap_client(&partial_res.dap_json) + } +} diff --git a/app/buck2_cmd_starlark_client/src/lib.rs b/app/buck2_cmd_starlark_client/src/lib.rs new file mode 100644 index 0000000000000..20d49ae2dec3b --- /dev/null +++ b/app/buck2_cmd_starlark_client/src/lib.rs @@ -0,0 +1,139 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] +#![feature(try_blocks)] + +use async_trait::async_trait; +use buck2_cli_proto::GenericRequest; +use buck2_client_ctx::client_ctx::ClientCommandContext; +use buck2_client_ctx::common::ui::CommonConsoleOptions; +use buck2_client_ctx::common::CommonBuildConfigurationOptions; +use buck2_client_ctx::common::CommonEventLogOptions; +use buck2_client_ctx::common::CommonStarlarkOptions; +use buck2_client_ctx::daemon::client::BuckdClientConnector; +use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; +use buck2_client_ctx::exit_result::ExitResult; +use buck2_client_ctx::streaming::BuckSubcommand; +use buck2_client_ctx::streaming::StreamingCommand; +use buck2_common::argv::Argv; +use buck2_common::argv::SanitizedArgv; + +use crate::debug::StarlarkDebugAttachCommand; +use crate::lint::StarlarkLintCommand; +use crate::typecheck::StarlarkTypecheckCommand; + +mod debug; +pub mod lint; +pub mod typecheck; + +#[derive(Debug, clap::Subcommand)] +#[clap(name = "starlark", about = "Run Starlark operations")] +pub enum StarlarkCommand { + #[clap(flatten)] + Opaque(StarlarkSubcommand), + DebugAttach(StarlarkDebugAttachCommand), +} + +// Used for subcommands that follow `buck2 audit`'s "opaque" pattern where the command object is serialized +// to the daemon and deserialized there and has a `server_execute()` on the Command object itself (as opposed +// to using structured endpoints in the daemon protocol). +#[derive(Debug, clap::Subcommand, serde::Serialize, serde::Deserialize)] +pub enum StarlarkSubcommand { + Lint(StarlarkLintCommand), + Typecheck(StarlarkTypecheckCommand), +} + +impl StarlarkSubcommand { + fn as_client_subcommand(&self) -> &dyn StarlarkClientSubcommand { + match self { + StarlarkSubcommand::Lint(cmd) => cmd, + StarlarkSubcommand::Typecheck(cmd) => cmd, + } + } +} + +trait StarlarkClientSubcommand { + fn common_opts(&self) -> &StarlarkCommandCommonOptions; +} + +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] +pub struct StarlarkCommandCommonOptions { + #[clap(flatten)] + config_opts: CommonBuildConfigurationOptions, + + #[clap(flatten)] + starlark_opts: CommonStarlarkOptions, + + #[clap(flatten)] + console_opts: CommonConsoleOptions, + + #[clap(flatten)] + event_log_opts: CommonEventLogOptions, +} + +#[async_trait] +impl StreamingCommand for StarlarkSubcommand { + const COMMAND_NAME: &'static str = "starlark"; + + /// Starlark subcommands are all implemented as a generic request to the buckd server that will deserialize the command object. + async fn exec_impl( + self, + buckd: &mut BuckdClientConnector, + matches: &clap::ArgMatches, + ctx: &mut ClientCommandContext<'_>, + ) -> ExitResult { + let serialized = serde_json::to_string(&self)?; + + let context = ctx.client_context(matches, &self)?; + + buckd + .with_flushing() + .starlark( + GenericRequest { + context: Some(context), + serialized_opts: serialized, + }, + ctx.stdin().console_interaction_stream(self.console_opts()), + &mut StdoutPartialResultHandler, + ) + .await??; + ExitResult::success() + } + + fn console_opts(&self) -> &CommonConsoleOptions { + &self.as_client_subcommand().common_opts().console_opts + } + + fn event_log_opts(&self) -> &CommonEventLogOptions { + &self.as_client_subcommand().common_opts().event_log_opts + } + + fn build_config_opts(&self) -> &CommonBuildConfigurationOptions { + &self.as_client_subcommand().common_opts().config_opts + } + + fn starlark_opts(&self) -> &CommonStarlarkOptions { + &self.as_client_subcommand().common_opts().starlark_opts + } +} + +impl StarlarkCommand { + pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { + let matches = matches.subcommand().expect("subcommand not found").1; + match self { + StarlarkCommand::Opaque(cmd) => cmd.exec(matches, ctx), + StarlarkCommand::DebugAttach(cmd) => cmd.exec(matches, ctx), + } + } + + pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { + argv.no_need_to_sanitize() + } +} diff --git a/app/buck2_cmd_starlark_client/src/lint.rs b/app/buck2_cmd_starlark_client/src/lint.rs new file mode 100644 index 0000000000000..fbcd3872ca9e5 --- /dev/null +++ b/app/buck2_cmd_starlark_client/src/lint.rs @@ -0,0 +1,29 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::path_arg::PathArg; + +use crate::StarlarkClientSubcommand; +use crate::StarlarkCommandCommonOptions; + +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] +#[clap(name = "starlark-lint", about = "Run the Starlark linter.")] +pub struct StarlarkLintCommand { + #[clap(flatten)] + pub common_opts: StarlarkCommandCommonOptions, + + #[clap(value_name = "PATH", required = true)] + pub paths: Vec, +} + +impl StarlarkClientSubcommand for StarlarkLintCommand { + fn common_opts(&self) -> &StarlarkCommandCommonOptions { + &self.common_opts + } +} diff --git a/app/buck2_cmd_starlark_client/src/typecheck.rs b/app/buck2_cmd_starlark_client/src/typecheck.rs new file mode 100644 index 0000000000000..72ddd869ad981 --- /dev/null +++ b/app/buck2_cmd_starlark_client/src/typecheck.rs @@ -0,0 +1,29 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_client_ctx::path_arg::PathArg; + +use crate::StarlarkClientSubcommand; +use crate::StarlarkCommandCommonOptions; + +#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] +#[clap(name = "starlark-typecheck", about = "Run the Starlark typechecker.")] +pub struct StarlarkTypecheckCommand { + #[clap(flatten)] + pub common_opts: StarlarkCommandCommonOptions, + + #[clap(value_name = "PATH", required = true)] + pub paths: Vec, +} + +impl StarlarkClientSubcommand for StarlarkTypecheckCommand { + fn common_opts(&self) -> &StarlarkCommandCommonOptions { + &self.common_opts + } +} diff --git a/app/buck2_cmd_starlark_server/BUCK b/app/buck2_cmd_starlark_server/BUCK new file mode 100644 index 0000000000000..48c1d45cc1fed --- /dev/null +++ b/app/buck2_cmd_starlark_server/BUCK @@ -0,0 +1,28 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_cmd_starlark_server", + srcs = glob(["src/**/*.rs"]), + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-recursion", + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:serde_json", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_client_ctx:buck2_client_ctx", + "//buck2/app/buck2_cmd_starlark_client:buck2_cmd_starlark_client", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_interpreter:buck2_interpreter", + "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", + "//buck2/app/buck2_server_ctx:buck2_server_ctx", + "//buck2/dice/dice:dice", + "//buck2/gazebo/dupe:dupe", + "//buck2/starlark-rust/starlark:starlark", + ], +) diff --git a/app/buck2_cmd_starlark_server/Cargo.toml b/app/buck2_cmd_starlark_server/Cargo.toml new file mode 100644 index 0000000000000..e5d8ce75f0e9e --- /dev/null +++ b/app/buck2_cmd_starlark_server/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = "`buck2 starlark` command server implementation" +edition = "2021" +license = { workspace = true } +name = "buck2_cmd_starlark_server" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +async-recursion = { workspace = true } +async-trait = { workspace = true } +dice = { workspace = true } +dupe = { workspace = true } +serde_json = { workspace = true } +starlark = { workspace = true } + +buck2_cli_proto = { workspace = true } +buck2_client_ctx = { workspace = true } +buck2_cmd_starlark_client = { workspace = true } +buck2_common = { workspace = true } +buck2_core = { workspace = true } +buck2_data = { workspace = true } +buck2_error = { workspace = true } +buck2_events = { workspace = true } +buck2_interpreter = { workspace = true } +buck2_interpreter_for_build = { workspace = true } +buck2_server_ctx = { workspace = true } diff --git a/app/buck2_cmd_starlark_server/src/lib.rs b/app/buck2_cmd_starlark_server/src/lib.rs new file mode 100644 index 0000000000000..8301e159cd043 --- /dev/null +++ b/app/buck2_cmd_starlark_server/src/lib.rs @@ -0,0 +1,103 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] + +mod lint; +mod typecheck; +mod util; + +use async_trait::async_trait; +use buck2_cli_proto::ClientContext; +use buck2_cmd_starlark_client::StarlarkSubcommand; +use buck2_events::dispatch::span_async; +use buck2_server_ctx::commands::command_end; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::late_bindings::StarlarkServerCommand; +use buck2_server_ctx::late_bindings::STARLARK_SERVER_COMMAND; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; + +pub fn init_late_bindings() { + STARLARK_SERVER_COMMAND.init(&StarlarkServerCommandImpl); +} + +struct StarlarkServerCommandImpl; + +#[async_trait] +impl StarlarkServerCommand for StarlarkServerCommandImpl { + async fn starlark( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::GenericRequest, + ) -> anyhow::Result { + let start_event = buck2_data::CommandStart { + metadata: ctx.request_metadata().await?, + data: Some(buck2_data::StarlarkCommandStart {}.into()), + }; + + span_async( + start_event, + server_starlark_command_inner(ctx, partial_result_dispatcher, req), + ) + .await + } +} + +#[async_trait] +pub(crate) trait StarlarkServerSubcommand: Send + Sync + 'static { + async fn server_execute( + &self, + server_ctx: &dyn ServerCommandContextTrait, + stdout: PartialResultDispatcher, + client_server_ctx: ClientContext, + ) -> anyhow::Result<()>; +} + +async fn server_starlark_command_inner( + context: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::GenericRequest, +) -> ( + anyhow::Result, + buck2_data::CommandEnd, +) { + let result = parse_command_and_execute(context, partial_result_dispatcher, req) + .await + .map_err(Into::into); + let end_event = command_end(&result, buck2_data::StarlarkCommandEnd {}); + + let result = result + .map(|()| buck2_cli_proto::GenericResponse {}) + .map_err(Into::into); + + (result, end_event) +} + +async fn parse_command_and_execute( + context: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::GenericRequest, +) -> anyhow::Result<()> { + let command: StarlarkSubcommand = serde_json::from_str(&req.serialized_opts)?; + as_server_subcommand(&command) + .server_execute( + context, + partial_result_dispatcher, + req.context.expect("buck cli always sets a client context"), + ) + .await +} + +fn as_server_subcommand(cmd: &StarlarkSubcommand) -> &dyn StarlarkServerSubcommand { + match cmd { + StarlarkSubcommand::Lint(cmd) => cmd, + StarlarkSubcommand::Typecheck(cmd) => cmd, + } +} diff --git a/app/buck2_cmd_starlark_server/src/lint.rs b/app/buck2_cmd_starlark_server/src/lint.rs new file mode 100644 index 0000000000000..bceda4465371f --- /dev/null +++ b/app/buck2_cmd_starlark_server/src/lint.rs @@ -0,0 +1,144 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::collections::HashSet; +use std::io::Write; +use std::sync::Arc; + +use anyhow::Context; +use async_trait::async_trait; +use buck2_cli_proto::ClientContext; +use buck2_cmd_starlark_client::lint::StarlarkLintCommand; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::dice::data::HasIoProvider; +use buck2_common::io::IoProvider; +use buck2_core::cells::name::CellName; +use buck2_core::cells::CellResolver; +use buck2_interpreter::file_type::StarlarkFileType; +use buck2_interpreter::paths::path::StarlarkPath; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::ctx::ServerCommandDiceContext; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use dice::DiceTransaction; +use dupe::Dupe; +use dupe::OptionDupedExt; +use starlark::analysis::AstModuleLint; +use starlark::codemap::FileSpan; +use starlark::errors::EvalSeverity; +use starlark::errors::Lint; +use starlark::syntax::AstModule; + +use crate::util::environment::Environment; +use crate::util::paths::starlark_files; +use crate::StarlarkServerSubcommand; + +/// The cache of names for a path, keyed by its CellName and its path type. +struct Cache<'a> { + dice: &'a DiceTransaction, + cached: HashMap<(CellName, StarlarkFileType), Arc>>, +} + +impl<'a> Cache<'a> { + pub(crate) fn new(dice: &'a DiceTransaction) -> Cache<'a> { + Self { + dice, + cached: HashMap::new(), + } + } + + pub(crate) async fn get_names( + &mut self, + path: &StarlarkPath<'_>, + ) -> anyhow::Result>> { + let path_type = path.file_type(); + let cell = path.cell(); + if let Some(res) = self.cached.get(&(cell, path_type)) { + return Ok(res.dupe()); + } + let env: Environment = Environment::new(cell, path_type, &mut self.dice.clone()).await?; + let res = Arc::new(env.get_names(path_type, self.dice).await?); + self.cached.insert((cell, path_type), res.dupe()); + Ok(res) + } +} + +async fn lint_file( + path: &StarlarkPath<'_>, + cell_resolver: &CellResolver, + io: &dyn IoProvider, + cache: &mut Cache<'_>, +) -> anyhow::Result> { + let dialect = path.file_type().dialect(false); + let proj_path = cell_resolver.resolve_path(path.path().as_ref().as_ref())?; + let path_str = proj_path.to_string(); + let content = io + .read_file_if_exists(proj_path) + .await? + .with_context(|| format!("File not found: `{}`", path_str))?; + match AstModule::parse(&path_str, content.clone(), &dialect) { + Ok(ast) => Ok(ast.lint(Some(&*cache.get_names(path).await?))), + Err(err) => { + // There was a parse error, so we don't want to fail, we want to give a nice error message + // Do the best we can - it is probably a `Diagnostic`, which gives us more precise info. + Ok(vec![Lint { + location: err + .span() + .duped() + .unwrap_or_else(|| FileSpan::new(path_str, content)), + short_name: "parse_error".to_owned(), + severity: EvalSeverity::Error, + problem: format!("{:#}", err.without_diagnostic()), + original: "".to_owned(), + }]) + } + } +} + +#[async_trait] +impl StarlarkServerSubcommand for StarlarkLintCommand { + async fn server_execute( + &self, + server_ctx: &dyn ServerCommandContextTrait, + mut stdout: PartialResultDispatcher, + _client_ctx: ClientContext, + ) -> anyhow::Result<()> { + server_ctx + .with_dice_ctx(|server_ctx, mut ctx| async move { + let cell_resolver = &ctx.get_cell_resolver().await?; + let io = &ctx.global_data().get_io_provider(); + + let mut stdout = stdout.as_writer(); + let mut lint_count = 0; + let files = + starlark_files(&mut ctx, &self.paths, server_ctx, &cell_resolver, &**io) + .await?; + let mut cache = Cache::new(&ctx); + + for file in &files { + let lints = lint_file(&file.borrow(), cell_resolver, &**io, &mut cache).await?; + lint_count += lints.len(); + for lint in lints { + writeln!(stdout, "{}", lint)?; + } + } + if lint_count > 0 { + Err(anyhow::anyhow!("Found {} lints", lint_count)) + } else { + writeln!( + server_ctx.stderr()?, + "Found no lints in {} files", + files.len() + )?; + Ok(()) + } + }) + .await + } +} diff --git a/app/buck2_cmd_starlark_server/src/typecheck.rs b/app/buck2_cmd_starlark_server/src/typecheck.rs new file mode 100644 index 0000000000000..8027117741e2e --- /dev/null +++ b/app/buck2_cmd_starlark_server/src/typecheck.rs @@ -0,0 +1,177 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::io::Write; + +use anyhow::Context; +use async_recursion::async_recursion; +use async_trait::async_trait; +use buck2_cli_proto::ClientContext; +use buck2_cmd_starlark_client::typecheck::StarlarkTypecheckCommand; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::dice::data::HasIoProvider; +use buck2_common::io::IoProvider; +use buck2_core::cells::name::CellName; +use buck2_core::cells::CellResolver; +use buck2_interpreter::file_type::StarlarkFileType; +use buck2_interpreter::paths::module::OwnedStarlarkModulePath; +use buck2_interpreter::paths::path::OwnedStarlarkPath; +use buck2_interpreter_for_build::interpreter::dice_calculation_delegate::HasCalculationDelegate; +use buck2_interpreter_for_build::interpreter::interpreter_for_cell::ParseData; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::ctx::ServerCommandDiceContext; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use dice::DiceTransaction; +use dupe::Dupe; +use starlark::environment::Globals; +use starlark::typing::AstModuleTypecheck; +use starlark::typing::Interface; + +use crate::util::environment::Environment; +use crate::util::paths::starlark_files; +use crate::StarlarkServerSubcommand; + +struct Cache<'a> { + // Things we have access to get information + dice: &'a DiceTransaction, + io: &'a dyn IoProvider, + cell_resolver: &'a CellResolver, + // Things we have access to write information + stdout: &'a mut (dyn Write + Send + Sync), + stderr: &'a mut (dyn Write + Send + Sync), + // Our accumulated state + oracle: HashMap<(CellName, StarlarkFileType), Globals>, + cache: HashMap, +} + +impl<'a> Cache<'a> { + async fn typecheck(&mut self, path: OwnedStarlarkPath) -> anyhow::Result<()> { + self.run(path).await?; + Ok(()) + } + + async fn get_oracle( + &mut self, + cell: CellName, + path_type: StarlarkFileType, + ) -> anyhow::Result { + match self.oracle.get(&(cell, path_type)) { + Some(g) => Ok(g.dupe()), + None => { + let globals = Environment::new(cell, path_type, &mut self.dice.clone()) + .await? + .globals; + self.oracle.insert((cell, path_type), globals.dupe()); + Ok(globals) + } + } + } + + async fn get(&mut self, path: OwnedStarlarkModulePath) -> anyhow::Result { + match self.cache.get(&path) { + Some(x) => Ok(x.dupe()), + None => { + let res = self.run(path.clone().into_starlark_path()).await?; + self.cache.insert(path, res.dupe()); + Ok(res) + } + } + } + + #[async_recursion] + async fn run(&mut self, path: OwnedStarlarkPath) -> anyhow::Result { + let path_ref = path.borrow(); + writeln!(self.stderr, "Type checking: {path_ref}")?; + let proj_path = self + .cell_resolver + .resolve_path(path_ref.path().as_ref().as_ref())?; + let path_str = proj_path.to_string(); + let src = self + .io + .read_file_if_exists(proj_path) + .await? + .with_context(|| format!("File not found: `{path_str}`"))?; + + let mut dice = self.dice.clone(); + let interp = dice + .get_interpreter_calculator(path_ref.cell(), path_ref.build_file_cell()) + .await?; + + let ParseData(ast, _) = interp.prepare_eval_with_content(path_ref, src)??; + let mut loads = HashMap::new(); + for x in ast.loads() { + let y = interp.resolve_load(path_ref, x.module_id).await?; + let interface = self.get(y).await?; + loads.insert(x.module_id.to_owned(), interface); + } + let globals = self + .get_oracle(path_ref.cell(), path_ref.file_type()) + .await?; + let (errors, bindings, interface, approxiomations) = ast.typecheck(&globals, &loads); + + if !approxiomations.is_empty() { + writeln!(self.stderr, "\n\nAPPROXIMATIONS:")?; + for x in approxiomations { + writeln!(self.stderr, "{x}")?; + } + } + + writeln!(self.stderr, "\n\nBINDINGS:\n{bindings}")?; + + let errors_count = errors.len(); + if errors_count == 0 { + Ok(interface) + } else { + writeln!(self.stdout, "\n\nERRORS:")?; + for x in errors { + writeln!(self.stdout, "{x}")?; + } + Err(anyhow::anyhow!("Detected {errors_count} errors")) + } + } +} + +#[async_trait] +impl StarlarkServerSubcommand for StarlarkTypecheckCommand { + async fn server_execute( + &self, + server_ctx: &dyn ServerCommandContextTrait, + mut stdout: PartialResultDispatcher, + _client_ctx: ClientContext, + ) -> anyhow::Result<()> { + server_ctx + .with_dice_ctx(|server_ctx, mut dice| async move { + let cell_resolver = &dice.get_cell_resolver().await?; + let io = &dice.global_data().get_io_provider(); + + let files = + starlark_files(&mut dice, &self.paths, server_ctx, cell_resolver, &**io) + .await?; + let mut stdout = stdout.as_writer(); + let mut stderr = server_ctx.stderr()?; + let mut cache = Cache { + dice: &dice, + io: &**io, + cell_resolver, + stdout: &mut stdout, + stderr: &mut stderr, + oracle: HashMap::new(), + cache: HashMap::new(), + }; + for file in files { + cache.typecheck(file).await?; + } + let file_count = cache.cache.len(); + writeln!(stderr, "Found no type errors in {file_count} files")?; + Ok(()) + }) + .await + } +} diff --git a/app/buck2_starlark/src/util/mod.rs b/app/buck2_cmd_starlark_server/src/util.rs similarity index 100% rename from app/buck2_starlark/src/util/mod.rs rename to app/buck2_cmd_starlark_server/src/util.rs diff --git a/app/buck2_cmd_starlark_server/src/util/environment.rs b/app/buck2_cmd_starlark_server/src/util/environment.rs new file mode 100644 index 0000000000000..1656ff81bf221 --- /dev/null +++ b/app/buck2_cmd_starlark_server/src/util/environment.rs @@ -0,0 +1,106 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashSet; + +use buck2_core::bzl::ImportPath; +use buck2_core::cells::build_file_cell::BuildFileCell; +use buck2_core::cells::name::CellName; +use buck2_interpreter::file_type::StarlarkFileType; +use buck2_interpreter::import_paths::HasImportPaths; +use buck2_interpreter::load_module::InterpreterCalculation; +use buck2_interpreter::load_module::INTERPRETER_CALCULATION_IMPL; +use buck2_interpreter::prelude_path::PreludePath; +use dice::DiceTransaction; +use starlark::environment::Globals; + +/// The environment in which a Starlark file is evaluated. +pub(crate) struct Environment { + /// The globals that are driven from Rust. + pub(crate) globals: Globals, + /// The path to the prelude, if the prelude is loaded in this file. + /// Note that in a BUCK file the `native` value is also exploded into the top-level. + prelude: Option, + /// A path that is implicitly loaded as additional globals. + preload: Option, +} + +impl Environment { + pub(crate) async fn new( + cell: CellName, + path_type: StarlarkFileType, + dice: &mut DiceTransaction, + ) -> anyhow::Result { + // Find the information from the globals + let globals = INTERPRETER_CALCULATION_IMPL.get()?.global_env(dice).await?; + + // Next grab the prelude, unless we are in the prelude cell and not a build file + let prelude = match INTERPRETER_CALCULATION_IMPL + .get()? + .prelude_import(dice) + .await? + { + Some(prelude) + if path_type == StarlarkFileType::Buck || prelude.import_path().cell() != cell => + { + Some(prelude) + } + _ => None, + }; + + // Now grab the pre-load things + let preload = dice + .import_paths_for_cell(BuildFileCell::new(cell)) + .await? + .root_import() + .cloned(); + + Ok(Environment { + globals, + prelude, + preload, + }) + } + + pub(crate) async fn get_names( + &self, + path_type: StarlarkFileType, + dice: &DiceTransaction, + ) -> anyhow::Result> { + let mut dice = dice.clone(); + let mut names = HashSet::new(); + + for x in self.globals.names() { + names.insert(x.as_str().to_owned()); + } + + if let Some(prelude) = &self.prelude { + let m = dice + .get_loaded_module_from_import_path(prelude.import_path()) + .await?; + for x in m.env().names() { + names.insert(x.as_str().to_owned()); + } + if path_type == StarlarkFileType::Buck { + for (name, _value) in m.extra_globals_from_prelude_for_buck_files()? { + names.insert(name.to_owned()); + } + } + } + + if let Some(preload) = &self.preload { + let m = dice.get_loaded_module_from_import_path(preload).await?; + for x in m.env().names() { + names.insert(x.as_str().to_owned()); + } + } + + Ok(names) + } +} diff --git a/app/buck2_cmd_starlark_server/src/util/paths.rs b/app/buck2_cmd_starlark_server/src/util/paths.rs new file mode 100644 index 0000000000000..b583c20248358 --- /dev/null +++ b/app/buck2_cmd_starlark_server/src/util/paths.rs @@ -0,0 +1,142 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::ops::Deref; + +use async_recursion::async_recursion; +use buck2_client_ctx::path_arg::PathArg; +use buck2_common::dice::file_ops::DiceFileComputations; +use buck2_common::file_ops::FileType; +use buck2_common::file_ops::RawPathMetadata; +use buck2_common::io::IoProvider; +use buck2_core::build_file_path::BuildFilePath; +use buck2_core::bzl::ImportPath; +use buck2_core::cells::CellResolver; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_core::package::PackageLabel; +use buck2_interpreter::paths::bxl::BxlFilePath; +use buck2_interpreter::paths::package::PackageFilePath; +use buck2_interpreter::paths::path::OwnedStarlarkPath; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use dice::DiceComputations; +use dupe::Dupe; + +#[derive(Debug, buck2_error::Error)] +enum StarlarkFilesError { + #[error("File not found, `{0}`")] + FileNotFound(ProjectRelativePathBuf), + #[error("Symlinks and other esoteric files are not supported, `{0}`")] + UnsupportedFileType(ProjectRelativePathBuf), +} + +#[async_recursion] +async fn starlark_file( + ctx: &mut DiceComputations<'_>, + proj_path: ProjectRelativePathBuf, + // None = this file was given explicitly + // Some = it was a directory traversal (and we know its type) + recursive: Option, + cell_resolver: &CellResolver, + io: &dyn IoProvider, + files: &mut Vec, +) -> anyhow::Result<()> { + let cell_path = cell_resolver.get_cell_path(&proj_path)?; + if recursive.is_some() + && DiceFileComputations::is_ignored(ctx, cell_path.as_ref()) + .await? + .is_ignored() + { + // File is ignored by Buck, give up on it + return Ok(()); + } + + let typ = match &recursive { + Some(typ) => typ.dupe(), + None => match io.read_path_metadata_if_exists(proj_path.clone()).await? { + None => { + return Err(StarlarkFilesError::FileNotFound(proj_path).into()); + } + Some(RawPathMetadata::Directory) => FileType::Directory, + Some(RawPathMetadata::File(_)) => { + // It's a shame we throw away the digest we calculated, but not a huge deal (its cheap compared to parsing) + FileType::File + } + Some(RawPathMetadata::Symlink { .. }) => FileType::Symlink, + }, + }; + + match typ { + FileType::Directory => { + for x in io.read_dir(proj_path.clone()).await? { + let Ok(file_name) = FileName::new(&x.file_name) else { + // Skip files which buck does not like: + // this function works with `CellPath` values, + // which cannot be constructed from paths not acceptable by buck. + continue; + }; + let mut child_path = proj_path.clone(); + child_path.push(file_name); + starlark_file(ctx, child_path, Some(x.file_type), cell_resolver, io, files).await?; + } + } + FileType::File => { + // It's a shame we throw away the digest we calculated, but not a huge deal (its cheap compared to parsing) + let is_buildfile = match proj_path.file_name() { + None => false, + Some(file_name) => DiceFileComputations::buildfiles(ctx, cell_path.cell()) + .await? + .iter() + .any(|x| (*x).deref() == file_name), + }; + + if is_buildfile { + files.push(OwnedStarlarkPath::BuildFile(BuildFilePath::new( + PackageLabel::from_cell_path(cell_path.parent().unwrap()), + proj_path.file_name().unwrap().to_owned(), + ))); + } else if proj_path.as_str().ends_with(".bxl") { + files.push(OwnedStarlarkPath::BxlFile(BxlFilePath::new(cell_path)?)); + } else if let Some(path) = PackageFilePath::from_file_path(cell_path.as_ref()) { + files.push(OwnedStarlarkPath::PackageFile(path)); + } else if recursive.is_none() || proj_path.as_str().ends_with(".bzl") { + // If a file was asked for explicitly, and is nothing else, treat it as .bzl file + // If it's not explicit, just ignore it (probably a source file) + files.push(OwnedStarlarkPath::LoadFile(ImportPath::new_same_cell( + cell_path, + )?)); + } + } + FileType::Symlink | FileType::Unknown => { + if recursive.is_none() { + return Err(StarlarkFilesError::UnsupportedFileType(proj_path).into()); + } + } + } + Ok(()) +} + +/// Find the paths to apply Starlark to (e.g. linter, typecheck) +pub(crate) async fn starlark_files( + ctx: &mut DiceComputations<'_>, + paths: &[PathArg], + context: &dyn ServerCommandContextTrait, + cell_resolver: &CellResolver, + io: &dyn IoProvider, +) -> anyhow::Result> { + let mut files = Vec::new(); + + for path in paths { + let path = path.resolve(context.working_dir_abs()); + let cell_path = cell_resolver.get_cell_path_from_abs_path(&path, context.project_root())?; + let proj_path = cell_resolver.resolve_path(cell_path.as_ref())?; + starlark_file(ctx, proj_path, None, cell_resolver, io, &mut files).await?; + } + Ok(files) +} diff --git a/app/buck2_common/BUCK b/app/buck2_common/BUCK index cdf334a777abd..8ba26c55cef9e 100644 --- a/app/buck2_common/BUCK +++ b/app/buck2_common/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -8,12 +7,10 @@ rust_library( srcs = glob( ["src/**/*.rs"], ), - doctests = False, # FIXME os_deps = [ ( "linux", [ - "fbsource//third-party/rust:hyper-unix-connector", "fbsource//third-party/rust:nix", "fbsource//third-party/rust:tower", "fbsource//third-party/rust:xattr", @@ -22,7 +19,6 @@ rust_library( ( "macos", [ - "fbsource//third-party/rust:hyper-unix-connector", "fbsource//third-party/rust:nix", "fbsource//third-party/rust:tower", "fbsource//third-party/rust:xattr", @@ -31,16 +27,17 @@ rust_library( ], test_deps = [ "fbsource//third-party/rust:assert_matches", - "fbsource//third-party/rust:httptest", "fbsource//third-party/rust:indoc", "fbsource//third-party/rust:maplit", "fbsource//third-party/rust:tempfile", "fbsource//third-party/rust:test-case", + "fbsource//third-party/rust:triomphe", ], deps = [ - "fbsource//third-party/blake3:blake3-rust", "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-scoped", "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:blake3", "fbsource//third-party/rust:bytes", "fbsource//third-party/rust:chrono", "fbsource//third-party/rust:compact_str", @@ -49,54 +46,48 @@ rust_library( "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:digest", "fbsource//third-party/rust:dirs", + "fbsource//third-party/rust:fs4", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:globset", "fbsource//third-party/rust:hex", - "fbsource//third-party/rust:http", "fbsource//third-party/rust:hyper", - "fbsource//third-party/rust:hyper-proxy", - "fbsource//third-party/rust:hyper-rustls", - "fbsource//third-party/rust:hyper-timeout", "fbsource//third-party/rust:indexmap", - "fbsource//third-party/rust:ipnetwork", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:num_enum", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:parking_lot", - "fbsource//third-party/rust:pin-project", "fbsource//third-party/rust:prost-types", "fbsource//third-party/rust:rand", "fbsource//third-party/rust:ref-cast", "fbsource//third-party/rust:regex", "fbsource//third-party/rust:rusqlite", - "fbsource//third-party/rust:rustls", - "fbsource//third-party/rust:rustls-native-certs", - "fbsource//third-party/rust:rustls-pemfile", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:sha1", "fbsource//third-party/rust:sha2", "fbsource//third-party/rust:smallvec", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", - "fbsource//third-party/rust:tokio-rustls", - "fbsource//third-party/rust:tokio-util", "fbsource//third-party/rust:tonic", "fbsource//third-party/rust:tracing", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/app/buck2_http:buck2_http", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", - "//buck2/facebook/allocator-stats:allocator-stats", - "//buck2/facebook/find_certs:find_certs", + # @oss-disable: "//buck2/facebook/allocator-stats:allocator-stats", "//buck2/gazebo/cmp_any:cmp_any", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark_map:starlark_map", - # @oss-disable: "//common/rust/cpe:cpe", - "//common/rust/folly/memory:memory", + # @oss-disable: "//common/rust/folly/logging:logging", + # @oss-disable: "//common/rust/folly/memory:memory", + # @oss-disable: "//common/rust/gflags:gflags", + "//common/rust/shed/fbinit:fbinit", + # @oss-disable: "//common/rust/shed/hostcaps:hostcaps", ], ) diff --git a/app/buck2_common/Cargo.toml b/app/buck2_common/Cargo.toml index 615b682026b21..1dc371b98e3a9 100644 --- a/app/buck2_common/Cargo.toml +++ b/app/buck2_common/Cargo.toml @@ -1,86 +1,76 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_common" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } +async-scoped = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } bytes = { workspace = true } chrono = { workspace = true } compact_str = { workspace = true } dashmap = { workspace = true } +derivative = { workspace = true } derive_more = { workspace = true } digest = { workspace = true } dirs = { workspace = true } -faccess = { workspace = true } +fbinit = { workspace = true } +fs4 = { workspace = true } futures = { workspace = true } globset = { workspace = true } hex = { workspace = true } -http = { workspace = true } hyper = { workspace = true } -hyper-proxy = { workspace = true } -hyper-rustls = { workspace = true } -hyper-timeout = { workspace = true } indexmap = { workspace = true } -ipnetwork = { workspace = true } itertools = { workspace = true } +num_enum = { workspace = true } once_cell = { workspace = true } -pin-project = { workspace = true } +parking_lot = { workspace = true } prost-types = { workspace = true } rand = { workspace = true } ref-cast = { workspace = true } regex = { workspace = true } rusqlite = { workspace = true } -rustls = { workspace = true } -rustls-native-certs = { workspace = true } -rustls-pemfile = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } sha1 = { workspace = true } sha2 = { workspace = true } smallvec = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } -tokio-rustls = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } -num_enum = { workspace = true } -derivative = { workspace = true } -parking_lot = { workspace = true } -tokio-util = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } - +triomphe = { workspace = true } allocative = { workspace = true } +cmp_any = { workspace = true } dice = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -cmp_any = { workspace = true } -more_futures = { workspace = true } +gazebo = { workspace = true } starlark_map = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } +buck2_futures = { workspace = true } +buck2_http = { workspace = true } buck2_util = { workspace = true } [target.'cfg(unix)'.dependencies] -hyper-unix-connector = { workspace = true } nix = { workspace = true } tower = { workspace = true } xattr = { workspace = true } -[features] -# @oss-disable: default = ["gazebo_lint"] - [dev-dependencies] +assert_matches = { workspace = true } indoc = { workspace = true } maplit = { workspace = true } -assert_matches = { workspace = true } tempfile = { workspace = true } test-case = { workspace = true } -httptest = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_client_ctx/src/argv.rs b/app/buck2_common/src/argv.rs similarity index 100% rename from app/buck2_client_ctx/src/argv.rs rename to app/buck2_common/src/argv.rs diff --git a/app/buck2_common/src/buckd_connection.rs b/app/buck2_common/src/buckd_connection.rs index c97b2149723dc..f7788f8cf6bd2 100644 --- a/app/buck2_common/src/buckd_connection.rs +++ b/app/buck2_common/src/buckd_connection.rs @@ -16,7 +16,7 @@ use gazebo::prelude::StrExt; pub const BUCK_AUTH_TOKEN_HEADER: &str = "x-buck-auth-token"; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ConnectionTypeError { #[error("Failed to parse correct endpoint information {0}")] ParseError(String), diff --git a/app/buck2_common/src/build_count.rs b/app/buck2_common/src/build_count.rs new file mode 100644 index 0000000000000..6983195a199a5 --- /dev/null +++ b/app/buck2_common/src/build_count.rs @@ -0,0 +1,421 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::time::Duration; + +use anyhow::Context; +use buck2_core::fs::async_fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::file_name::FileName; +use buck2_data::ParsedTargetPatterns; +use fs4::FileExt; +use serde::Deserialize; +use serde::Serialize; + +use crate::client_utils; + +// Version for serialized BuildCount on disk. +// Update if changing BuildCount to allow building with deployed and compiled buck on the same rev. +pub const BUILD_COUNT_VERSION: u64 = 1; + +#[derive( + Default, + Clone, + Copy, + Serialize, + Deserialize, + PartialEq, + Eq, + PartialOrd, + Ord, + Debug +)] +pub struct BuildCount { + pub successful_build_count: u64, + pub attempted_build_count: u64, +} + +impl BuildCount { + pub fn new(successful_build_count: u64, attempted_build_count: u64) -> Self { + Self { + successful_build_count, + attempted_build_count, + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct BuildCountMap(HashMap); + +impl BuildCountMap { + pub fn increment(&mut self, patterns: &ParsedTargetPatterns, is_success: bool) { + for target in patterns.target_patterns.iter() { + match self.0.get_mut(&target.value) { + Some(count) => { + count.attempted_build_count += 1; + if is_success { + count.successful_build_count += 1; + } + } + None => { + let count = BuildCount::new(if is_success { 1 } else { 0 }, 1); + self.0.insert(target.value.clone(), count); + } + } + } + } + + pub fn min_count(&self, patterns: &ParsedTargetPatterns) -> BuildCount { + if patterns.target_patterns.is_empty() { + return Default::default(); + } + + // If the target has never been successfully built it won't be in the map, in that case its count is 0. + return patterns + .target_patterns + .iter() + .map(|v| self.0.get(&v.value).copied().unwrap_or(Default::default())) + .min() + .unwrap(); // target_patterns is non-empty, so min() should return Some + } +} + +/// BuildCountManager keeps track of how many times each target has been successfully built since rebase. +/// This helps understand how much the performance differs between first and incremental builds. +pub struct BuildCountManager { + base_dir: AbsNormPathBuf, +} + +impl BuildCountManager { + const LOCK_FILE_NAME: &'static str = "build_count.lock"; + const LOCK_TIMEOUT: Duration = Duration::from_millis(2000); + + pub fn new(base_dir: AbsNormPathBuf) -> Self { + Self { base_dir } + } + + async fn ensure_dir(&self) -> anyhow::Result<()> { + async_fs_util::create_dir_all(&self.base_dir).await + } + + async fn read(&self, file_name: &FileName) -> anyhow::Result { + let path = self.base_dir.join(file_name); + match async_fs_util::read_to_string_if_exists(&path).await? { + Some(buffer) => Ok(serde_json::from_str(&buffer) + .with_context(|| format!("Parsing JSON from {}", path.display()))?), + None => { + // it is normal after rebase, clean, etc. + Ok(BuildCountMap(HashMap::new())) + } + } + } + + async fn write(&self, build_count: &BuildCountMap, file_name: &FileName) -> anyhow::Result<()> { + self.ensure_dir().await?; + let path = self.base_dir.join(file_name); + async_fs_util::write(path, &serde_json::to_vec(build_count)?).await + } + + async fn lock_with_timeout(&self, timeout: Duration) -> anyhow::Result { + self.ensure_dir().await?; + let file = std::fs::File::create(self.base_dir.join(FileName::new(Self::LOCK_FILE_NAME)?))?; + let fileref = &file; + client_utils::retrying( + Duration::from_millis(5), + Duration::from_millis(100), + timeout, + || async { anyhow::Ok(fileref.try_lock_exclusive()?) }, + ) + .await?; + Ok(FileLockGuard { file }) + } + + /// Updates the build counts for set of targets (on success) and returns the min. + pub async fn increment( + &self, + merge_base: &str, + target_patterns: &ParsedTargetPatterns, + is_success: bool, + ) -> anyhow::Result { + self.mutate( + merge_base, + target_patterns, + Some(|build_count_map: &mut BuildCountMap| { + build_count_map.increment(target_patterns, is_success); + }), + ) + .await + } + + /// Returns the existing min build count for the set of targets. + pub async fn min_count( + &self, + merge_base: &str, + target_patterns: &ParsedTargetPatterns, + ) -> anyhow::Result { + self.mutate(merge_base, target_patterns, None::) + .await + } + + async fn mutate( + &self, + merge_base: &str, + target_patterns: &ParsedTargetPatterns, + mutation: Option, + ) -> anyhow::Result { + let file_name_str = format!("{}-{}", merge_base, BUILD_COUNT_VERSION); + let file_name = FileName::new(&file_name_str)?; + let _guard = self.lock_with_timeout(Self::LOCK_TIMEOUT).await?; + let mut build_count_map = self.read(file_name).await?; + if let Some(mutation) = mutation { + mutation(&mut build_count_map); + self.write(&build_count_map, file_name).await?; + } + Ok(build_count_map.min_count(target_patterns)) + } +} + +#[must_use] +struct FileLockGuard { + file: std::fs::File, +} + +impl Drop for FileLockGuard { + fn drop(&mut self) { + self.file + .unlock() + .expect("Unexpected failure to release a lock file for build count"); + } +} + +#[cfg(test)] +mod tests { + use gazebo::prelude::VecExt; + + use super::*; + + fn make_patterns(targets: Vec<&'static str>) -> ParsedTargetPatterns { + ParsedTargetPatterns { + target_patterns: targets.into_map(|v| buck2_data::TargetPattern { + value: v.to_owned(), + }), + } + } + + #[test] + fn test_update_normal_input() -> anyhow::Result<()> { + let mut before = HashMap::new(); + before.insert("//some:target".to_owned(), BuildCount::new(1, 1)); + before.insert("//some/other:target".to_owned(), BuildCount::new(2, 2)); + let mut bc = BuildCountMap(before); + let target_patterns = make_patterns(vec!["//some/other:target", "//yet/another:target"]); + bc.increment(&target_patterns, true); + let mut expected = HashMap::new(); + expected.insert("//some:target".to_owned(), BuildCount::new(1, 1)); + expected.insert("//some/other:target".to_owned(), BuildCount::new(3, 3)); + expected.insert("//yet/another:target".to_owned(), BuildCount::new(1, 1)); + assert_eq!(bc.0, expected); + + Ok(()) + } + + #[test] + fn test_update_empty_input() -> anyhow::Result<()> { + let mut before = HashMap::new(); + before.insert("//some:target".to_owned(), BuildCount::new(1, 1)); + let expected = before.clone(); + let mut bc = BuildCountMap(before); + let target_patterns = make_patterns(vec![]); + bc.increment(&target_patterns, true); + assert_eq!(bc.0, expected); + + Ok(()) + } + + #[test] + fn test_min_count_some_value() -> anyhow::Result<()> { + let mut data = HashMap::new(); + data.insert("//some:target1".to_owned(), BuildCount::new(3, 3)); + data.insert("//some:target2".to_owned(), BuildCount::new(4, 4)); + data.insert("//some:target3".to_owned(), BuildCount::new(5, 5)); + let bc = BuildCountMap(data); + let target_patterns = make_patterns(vec!["//some:target1", "//some:target2"]); + assert_eq!(bc.min_count(&target_patterns), BuildCount::new(3, 3)); + + Ok(()) + } + + #[test] + fn test_min_count_ignores_others() -> anyhow::Result<()> { + let mut data = HashMap::new(); + data.insert("//some:target1".to_owned(), BuildCount::new(3, 3)); + data.insert("//some:target2".to_owned(), BuildCount::new(4, 4)); + data.insert("//some:target3".to_owned(), BuildCount::new(5, 5)); + let bc = BuildCountMap(data); + let target_patterns = make_patterns(vec!["//some:target2"]); + assert_eq!(bc.min_count(&target_patterns), BuildCount::new(4, 4)); + + Ok(()) + } + + #[test] + fn test_min_count_empty_data() -> anyhow::Result<()> { + let data = HashMap::new(); + let bc = BuildCountMap(data); + assert_eq!(bc.min_count(&make_patterns(vec![])), BuildCount::new(0, 0)); + + Ok(()) + } + + #[tokio::test] + async fn test_read_no_such_file() -> anyhow::Result<()> { + let no_such_dir = if cfg!(windows) { + "C:\\no\\such\\dir" + } else { + "/no/such/dir" + }; + let bcm = BuildCountManager::new(AbsNormPathBuf::from(no_such_dir.to_owned())?); + let bc = bcm.read(FileName::new("no_such_file")?).await?; + assert_eq!(bc.0, HashMap::new()); + + Ok(()) + } + + #[tokio::test] + async fn test_read_normal_file() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + tokio::fs::write(temp_dir.path().join(file_name), "{\"//some:target\":[1,1]}").await?; + let mut expected = HashMap::new(); + expected.insert("//some:target".to_owned(), BuildCount::new(1, 1)); + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + let bc = bcm.read(FileName::new(file_name)?).await?; + assert_eq!(bc.0, expected); + + Ok(()) + } + + #[tokio::test] + async fn test_read_illegal_file_contents() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + tokio::fs::write(temp_dir.path().join(file_name), "aaa").await?; + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + assert!(bcm.read(FileName::new(file_name)?).await.is_err()); + + Ok(()) + } + + #[tokio::test] + async fn test_write_normal_input() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + let mut data = HashMap::new(); + data.insert("//some:target".to_owned(), BuildCount::new(1, 1)); + bcm.write(&BuildCountMap(data), FileName::new(file_name)?) + .await?; + assert_eq!( + std::str::from_utf8(&tokio::fs::read(temp_dir.path().join(file_name)).await?)?, + "{\"//some:target\":{\"successful_build_count\":1,\"attempted_build_count\":1}}" + ); + + Ok(()) + } + + #[tokio::test] + async fn test_write_empty_input() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + let data = HashMap::new(); + bcm.write(&BuildCountMap(data), FileName::new(file_name)?) + .await?; + assert_eq!( + &tokio::fs::read(temp_dir.path().join(file_name)).await?, + b"{}" + ); + + Ok(()) + } + + #[tokio::test] + async fn test_increment_normal_input() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + tokio::fs::write(temp_dir.path().join(file_name), "{\"//some:target\":[1,1]}").await?; + let target_patterns = make_patterns(vec!["//some:target", "//some/other:target"]); + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + assert_eq!( + bcm.increment(file_name, &target_patterns, true).await?, + BuildCount::new(1, 1), + ); + assert_eq!( + bcm.increment(file_name, &target_patterns, true).await?, + BuildCount::new(2, 2), + ); + + Ok(()) + } + + #[tokio::test] + async fn test_increment_on_failure() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + tokio::fs::write(temp_dir.path().join(file_name), "{\"//some:target\":[1,1]}").await?; + let target_patterns = make_patterns(vec!["//some:target", "//some/other:target"]); + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + assert_eq!( + bcm.increment(file_name, &target_patterns, true).await?, + BuildCount::new(1, 1), + ); + assert_eq!( + bcm.increment(file_name, &target_patterns, false).await?, + BuildCount::new(1, 2), + ); + + Ok(()) + } + + #[tokio::test] + async fn test_increment_empty_input() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + tokio::fs::write(temp_dir.path().join(file_name), "{}").await?; + let target_patterns = make_patterns(vec![]); + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + assert_eq!( + bcm.increment(file_name, &target_patterns, true).await?, + BuildCount::new(0, 0), + ); + + Ok(()) + } + + #[tokio::test] + async fn test_min_count_no_increment() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_name = "some_file"; + tokio::fs::write(temp_dir.path().join(file_name), "{\"//some:target\":[1,1]}").await?; + let target_patterns = make_patterns(vec!["//some:target", "//some/other:target"]); + let bcm = BuildCountManager::new(temp_dir.path().to_path_buf().try_into()?); + let _ = bcm.increment(file_name, &target_patterns, true).await?; + assert_eq!( + bcm.min_count(file_name, &target_patterns).await?, + BuildCount::new(1, 1), + ); + assert_eq!( + bcm.min_count(file_name, &target_patterns).await?, + BuildCount::new(1, 1), + ); + + Ok(()) + } +} diff --git a/app/buck2_common/src/buildfiles.rs b/app/buck2_common/src/buildfiles.rs new file mode 100644 index 0000000000000..bd355d7da3abb --- /dev/null +++ b/app/buck2_common/src/buildfiles.rs @@ -0,0 +1,211 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::future::Future; +use std::sync::Arc; + +use buck2_core::cells::name::CellName; +use buck2_core::fs::paths::file_name::FileNameBuf; +use dice::CancellationContext; +use dice::DiceComputations; +use dice::Key; +use gazebo::prelude::SliceExt as _; +use gazebo::prelude::VecExt as _; + +use crate::legacy_configs::dice::HasLegacyConfigs; +use crate::legacy_configs::key::BuckconfigKeyRef; +use crate::legacy_configs::view::LegacyBuckConfigView; + +const DEFAULT_BUILDFILES: &[&str] = &["BUCK.v2", "BUCK"]; + +/// Deal with the `buildfile.name` key (and `name_v2`) +pub fn parse_buildfile_name( + mut config: impl LegacyBuckConfigView, +) -> anyhow::Result> { + // For buck2, we support a slightly different mechanism for setting the buildfile to + // assist with easier migration from v1 to v2. + // First, we check the key `buildfile.name_v2`, if this is provided, we use it. + // Second, if that wasn't provided, we will use `buildfile.name` like buck1 does, + // but for every entry `FOO` we will insert a preceding `FOO.v2`. + // If neither of those is provided, we will use the default of `["BUCK.v2", "BUCK"]`. + // This scheme provides a natural progression to buckv2, with the ability to use separate + // buildfiles for the two where necessary. + let mut base = if let Some(buildfiles_value) = + config.parse_list::(BuckconfigKeyRef { + section: "buildfile", + property: "name_v2", + })? { + buildfiles_value.into_try_map(FileNameBuf::try_from)? + } else if let Some(buildfiles_value) = config.parse_list::(BuckconfigKeyRef { + section: "buildfile", + property: "name", + })? { + let mut buildfiles = Vec::new(); + for buildfile in buildfiles_value { + buildfiles.push(FileNameBuf::try_from(format!("{}.v2", buildfile))?); + buildfiles.push(FileNameBuf::try_from(buildfile)?); + } + buildfiles + } else { + DEFAULT_BUILDFILES.map(|&n| FileNameBuf::try_from(n.to_owned()).unwrap()) + }; + + if let Some(buildfile) = config.parse::(BuckconfigKeyRef { + section: "buildfile", + property: "extra_for_test", + })? { + base.push(FileNameBuf::try_from(buildfile)?); + } + + Ok(base) +} + +pub trait HasBuildfiles { + fn get_buildfiles( + &mut self, + cell: CellName, + ) -> impl Future>>; +} + +#[derive( + Clone, + derive_more::Display, + Debug, + Hash, + Eq, + PartialEq, + allocative::Allocative +)] +#[display("BuildfilesKey({})", self.0)] +struct BuildfilesKey(CellName); + +#[async_trait::async_trait] +impl Key for BuildfilesKey { + type Value = buck2_error::Result>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let config = ctx.get_legacy_config_on_dice(self.0).await?; + Ok(parse_buildfile_name(config.view(ctx))?.into()) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } +} + +impl HasBuildfiles for DiceComputations<'_> { + async fn get_buildfiles(&mut self, cell: CellName) -> anyhow::Result> { + Ok(self.compute(&BuildfilesKey(cell)).await??) + } +} + +#[cfg(test)] +mod tests { + use buck2_core::cells::name::CellName; + use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; + use buck2_core::fs::project::ProjectRoot; + use buck2_core::fs::project_rel_path::ProjectRelativePath; + use gazebo::prelude::SliceExt; + use indoc::indoc; + + use crate::buildfiles::parse_buildfile_name; + use crate::legacy_configs::cells::BuckConfigBasedCells; + use crate::legacy_configs::configs::testing::TestConfigParserFileOps; + + fn create_project_filesystem() -> ProjectRoot { + #[cfg(not(windows))] + let root_path = "/".to_owned(); + #[cfg(windows)] + let root_path = "C:/".to_owned(); + ProjectRoot::new_unchecked(AbsNormPathBuf::try_from(root_path).unwrap()) + } + + #[tokio::test] + async fn test_buildfiles() -> anyhow::Result<()> { + let mut file_ops = TestConfigParserFileOps::new(&[ + ( + ".buckconfig", + indoc!( + r#" + [cells] + root = . + other = other/ + third_party = third_party/ + "# + ), + ), + ( + "other/.buckconfig", + indoc!( + r#" + [cells] + other = . + [buildfile] + name = TARGETS + extra_for_test = TARGETS.test + "# + ), + ), + ( + "third_party/.buckconfig", + indoc!( + r#" + [cells] + third_party = . + [buildfile] + name_v2 = OKAY + name = OKAY_v1 + "# + ), + ), + ])?; + + let project_fs = create_project_filesystem(); + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[], + ProjectRelativePath::empty(), + ) + .await?; + + let config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("root"), &mut file_ops) + .await?; + assert_eq!( + vec!["BUCK.v2", "BUCK"], + parse_buildfile_name(&config)?.map(|f| f.as_str()), + ); + + let config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("other"), &mut file_ops) + .await?; + assert_eq!( + vec!["TARGETS.v2", "TARGETS", "TARGETS.test"], + parse_buildfile_name(&config)?.map(|f| f.as_str()), + ); + + let config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("third_party"), &mut file_ops) + .await?; + assert_eq!( + vec!["OKAY"], + parse_buildfile_name(&config)?.map(|f| f.as_str()), + ); + + Ok(()) + } +} diff --git a/app/buck2_common/src/cas_digest.rs b/app/buck2_common/src/cas_digest.rs index 92b276eeb50b6..acc3d88ef8769 100644 --- a/app/buck2_common/src/cas_digest.rs +++ b/app/buck2_common/src/cas_digest.rs @@ -25,13 +25,13 @@ use derivative::Derivative; use derive_more::Display; use digest::Digest; use dupe::Clone_; +use dupe::Copy_; use dupe::Dupe; use dupe::Dupe_; use num_enum::TryFromPrimitive; use once_cell::sync::Lazy; use sha1::Sha1; use sha2::Sha256; -use thiserror::Error; /// The number of bytes required by a SHA-1 hash pub const SHA1_SIZE: usize = 20; @@ -43,7 +43,7 @@ pub const SHA256_SIZE: usize = 32; pub const BLAKE3_SIZE: usize = 32; /// The bytes that make up a file digest. -#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Allocative, Clone)] +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Allocative, Clone, Copy)] pub enum RawDigest { // TODO: Perhaps this should be represented as a (DigestAlgorithmKind, [0;32]) Sha1([u8; SHA1_SIZE]), @@ -65,12 +65,12 @@ impl RawDigest { } } - pub fn algorithm(&self) -> DigestAlgorithmKind { + pub fn algorithm(&self) -> DigestAlgorithmFamily { match self { - Self::Sha1(..) => DigestAlgorithmKind::Sha1, - Self::Sha256(..) => DigestAlgorithmKind::Sha256, - Self::Blake3(..) => DigestAlgorithmKind::Blake3, - Self::Blake3Keyed(..) => DigestAlgorithmKind::Blake3Keyed, + Self::Sha1(..) => DigestAlgorithmFamily::Sha1, + Self::Sha256(..) => DigestAlgorithmFamily::Sha256, + Self::Blake3(..) => DigestAlgorithmFamily::Blake3, + Self::Blake3Keyed(..) => DigestAlgorithmFamily::Blake3Keyed, } } @@ -105,7 +105,7 @@ impl fmt::Display for RawDigest { } } -/// The kind of digest algorithm associated with a digest. This tells you what kind of digest it +/// The family of digest algorithm associated with a digest. This tells you what kind of digest it /// is, but it might not be sufficient in order to actually recreate the digest. For example, this /// could contain keyed digests, but then you wouldn't have the key. We use this to store our /// digest kind when it's informative-only, like in our materializer state on disk. @@ -122,23 +122,23 @@ impl fmt::Display for RawDigest { Allocative )] #[repr(u8)] -pub enum DigestAlgorithmKind { - #[display(fmt = "SHA1")] +pub enum DigestAlgorithmFamily { + #[display("SHA1")] Sha1, - #[display(fmt = "SHA256")] + #[display("SHA256")] Sha256, - #[display(fmt = "BLAKE3")] + #[display("BLAKE3")] Blake3, - #[display(fmt = "BLAKE3-KEYED")] + #[display("BLAKE3-KEYED")] Blake3Keyed, } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] #[error("Invalid Digest algorithm: `{0}`")] -pub struct InvalidDigestAlgorithmKind(String); +pub struct InvalidDigestAlgorithmFamily(String); -impl std::str::FromStr for DigestAlgorithmKind { - type Err = InvalidDigestAlgorithmKind; +impl std::str::FromStr for DigestAlgorithmFamily { + type Err = InvalidDigestAlgorithmFamily; fn from_str(s: &str) -> Result { if s == "SHA1" { @@ -157,7 +157,7 @@ impl std::str::FromStr for DigestAlgorithmKind { return Ok(Self::Blake3Keyed); } - Err(InvalidDigestAlgorithmKind(s.to_owned())) + Err(InvalidDigestAlgorithmFamily(s.to_owned())) } } @@ -171,12 +171,12 @@ pub enum DigestAlgorithm { } impl DigestAlgorithm { - fn kind(self) -> DigestAlgorithmKind { + fn family(self) -> DigestAlgorithmFamily { match self { - Self::Sha1 => DigestAlgorithmKind::Sha1, - Self::Sha256 => DigestAlgorithmKind::Sha256, - Self::Blake3 => DigestAlgorithmKind::Blake3, - Self::Blake3Keyed { .. } => DigestAlgorithmKind::Blake3Keyed, + Self::Sha1 => DigestAlgorithmFamily::Sha1, + Self::Sha256 => DigestAlgorithmFamily::Sha256, + Self::Blake3 => DigestAlgorithmFamily::Blake3, + Self::Blake3Keyed { .. } => DigestAlgorithmFamily::Blake3Keyed, } } } @@ -259,8 +259,8 @@ impl fmt::Display for CasDigestConfig { write!( f, "CasDigestConfig(preferred = {}, source preferred = {})", - self.preferred_algorithm().kind(), - self.source_files_config().preferred_algorithm().kind() + self.preferred_algorithm().family(), + self.source_files_config().preferred_algorithm().family() ) } } @@ -343,19 +343,23 @@ impl CasDigestConfigInner { } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum CasDigestConfigError { #[error("At least one algorithm must be enabled")] NotConfigured, #[error("The preferred source algorithm must be in the algorithms list")] InvalidPreferredSourceAlgorithm, - #[error("Two algorithms were enabled for the same size: `{}` and `{}`", .0.kind(), .1.kind())] + #[error("Two algorithms were enabled for the same size: `{}` and `{}`", .0.family(), .1.family())] Conflict(DigestAlgorithm, DigestAlgorithm), } -pub struct Digester { +pub struct DataDigester { variant: DigesterVariant, size: u64, +} + +pub struct Digester { + data: DataDigester, kind: PhantomData, } @@ -366,7 +370,7 @@ enum DigesterVariant { Blake3Keyed(Box), // Same as above } -impl Digester { +impl DataDigester { pub fn update(&mut self, data: &[u8]) { // Explicit dynamic dispatch because we need to match on which variant it was. match &mut self.variant { @@ -387,23 +391,23 @@ impl Digester { self.size += data.len() as u64; } - pub fn finalize(self) -> CasDigest { + pub fn finalize(self) -> CasDigestData { match self.variant { - DigesterVariant::Sha1(h) => CasDigest::new_sha1(h.finalize().into(), self.size), - DigesterVariant::Sha256(h) => CasDigest::new_sha256(h.finalize().into(), self.size), - DigesterVariant::Blake3(h) => CasDigest::new_blake3(h.finalize().into(), self.size), + DigesterVariant::Sha1(h) => CasDigestData::new_sha1(h.finalize().into(), self.size), + DigesterVariant::Sha256(h) => CasDigestData::new_sha256(h.finalize().into(), self.size), + DigesterVariant::Blake3(h) => CasDigestData::new_blake3(h.finalize().into(), self.size), DigesterVariant::Blake3Keyed(h) => { - CasDigest::new_blake3_keyed(h.finalize().into(), self.size) + CasDigestData::new_blake3_keyed(h.finalize().into(), self.size) } } } - pub fn algorithm(&self) -> DigestAlgorithmKind { + pub fn algorithm(&self) -> DigestAlgorithmFamily { match &self.variant { - DigesterVariant::Sha1(..) => DigestAlgorithmKind::Sha1, - DigesterVariant::Sha256(..) => DigestAlgorithmKind::Sha256, - DigesterVariant::Blake3(..) => DigestAlgorithmKind::Blake3, - DigesterVariant::Blake3Keyed(..) => DigestAlgorithmKind::Blake3Keyed, + DigesterVariant::Sha1(..) => DigestAlgorithmFamily::Sha1, + DigesterVariant::Sha256(..) => DigestAlgorithmFamily::Sha256, + DigesterVariant::Blake3(..) => DigestAlgorithmFamily::Blake3, + DigesterVariant::Blake3Keyed(..) => DigestAlgorithmFamily::Blake3Keyed, } } @@ -412,21 +416,87 @@ impl Digester { } } +impl Digester { + pub fn update(&mut self, data: &[u8]) { + self.data.update(data); + } + + pub fn finalize(self) -> CasDigest { + CasDigest { + data: self.data.finalize(), + kind: PhantomData, + } + } + + pub fn algorithm(&self) -> DigestAlgorithmFamily { + self.data.algorithm() + } + + pub fn bytes_read(&self) -> u64 { + self.data.bytes_read() + } +} + /// Separate struct to allow us to use `repr(transparent)` below and guarantee an identical /// layout. -#[derive(Display, PartialEq, Eq, PartialOrd, Ord, Hash, Allocative, Clone, Dupe)] -#[display(fmt = "{}:{}", digest, size)] -struct CasDigestData { +#[derive( + Display, PartialEq, Eq, PartialOrd, Ord, Hash, Allocative, Clone, Dupe, Copy +)] +#[display("{}:{}", digest, size)] +pub struct CasDigestData { size: u64, digest: RawDigest, } -#[derive(Display, Derivative, Allocative, Clone_, Dupe_)] +impl CasDigestData { + fn new(digest: RawDigest, size: u64) -> Self { + CasDigestData { size, digest } + } + + pub fn new_sha1(sha1: [u8; SHA1_SIZE], size: u64) -> Self { + Self::new(RawDigest::Sha1(sha1), size) + } + + pub fn new_sha256(sha256: [u8; SHA256_SIZE], size: u64) -> Self { + Self::new(RawDigest::Sha256(sha256), size) + } + + pub fn new_blake3(blake3: [u8; BLAKE3_SIZE], size: u64) -> Self { + Self::new(RawDigest::Blake3(blake3), size) + } + + pub fn new_blake3_keyed(blake3: [u8; BLAKE3_SIZE], size: u64) -> Self { + Self::new(RawDigest::Blake3Keyed(blake3), size) + } + + pub fn digester(config: CasDigestConfig) -> DataDigester { + Self::digester_for_algorithm(config.preferred_algorithm()) + } + + pub fn digester_for_algorithm(algorithm: DigestAlgorithm) -> DataDigester { + let variant = match algorithm { + DigestAlgorithm::Sha1 => DigesterVariant::Sha1(Sha1::new()), + DigestAlgorithm::Sha256 => DigesterVariant::Sha256(Sha256::new()), + DigestAlgorithm::Blake3 => DigesterVariant::Blake3(Box::new(blake3::Hasher::new())), + DigestAlgorithm::Blake3Keyed { key } => { + DigesterVariant::Blake3Keyed(Box::new(blake3::Hasher::new_keyed(key))) + } + }; + + DataDigester { variant, size: 0 } + } + + pub fn raw_digest(&self) -> &RawDigest { + &self.digest + } +} + +#[derive(Display, Derivative, Allocative, Clone_, Dupe_, Copy_)] #[allocative(bound = "")] #[derivative(PartialEq, Eq, PartialOrd, Ord, Hash)] -#[display(fmt = "{}", data)] +#[display("{}", data)] #[repr(transparent)] -pub struct CasDigest { +pub struct CasDigest { data: CasDigestData, #[derivative( Hash = "ignore", @@ -437,23 +507,23 @@ pub struct CasDigest { kind: PhantomData, } -impl fmt::Debug for CasDigest { +impl fmt::Debug for CasDigest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self) } } -impl CasDigest { +impl CasDigest { pub fn from_digest_bytes( - kind: DigestAlgorithmKind, + kind: DigestAlgorithmFamily, digest: &[u8], size: u64, ) -> anyhow::Result { Ok(match kind { - DigestAlgorithmKind::Sha1 => Self::new_sha1(digest.try_into()?, size), - DigestAlgorithmKind::Sha256 => Self::new_sha256(digest.try_into()?, size), - DigestAlgorithmKind::Blake3 => Self::new_blake3(digest.try_into()?, size), - DigestAlgorithmKind::Blake3Keyed => Self::new_blake3_keyed(digest.try_into()?, size), + DigestAlgorithmFamily::Sha1 => Self::new_sha1(digest.try_into()?, size), + DigestAlgorithmFamily::Sha256 => Self::new_sha256(digest.try_into()?, size), + DigestAlgorithmFamily::Blake3 => Self::new_blake3(digest.try_into()?, size), + DigestAlgorithmFamily::Blake3Keyed => Self::new_blake3_keyed(digest.try_into()?, size), }) } @@ -465,47 +535,23 @@ impl CasDigest { } pub fn new_sha1(sha1: [u8; SHA1_SIZE], size: u64) -> Self { - Self { - data: CasDigestData { - size, - digest: RawDigest::Sha1(sha1), - }, - kind: PhantomData, - } + Self::new(RawDigest::Sha1(sha1), size) } pub fn new_sha256(sha256: [u8; SHA256_SIZE], size: u64) -> Self { - Self { - data: CasDigestData { - size, - digest: RawDigest::Sha256(sha256), - }, - kind: PhantomData, - } + Self::new(RawDigest::Sha256(sha256), size) } pub fn new_blake3(blake3: [u8; BLAKE3_SIZE], size: u64) -> Self { - Self { - data: CasDigestData { - size, - digest: RawDigest::Blake3(blake3), - }, - kind: PhantomData, - } + Self::new(RawDigest::Blake3(blake3), size) } pub fn new_blake3_keyed(blake3: [u8; BLAKE3_SIZE], size: u64) -> Self { - Self { - data: CasDigestData { - size, - digest: RawDigest::Blake3Keyed(blake3), - }, - kind: PhantomData, - } + Self::new(RawDigest::Blake3Keyed(blake3), size) } pub fn raw_digest(&self) -> &RawDigest { - &self.data.digest + self.data.raw_digest() } pub fn size(&self) -> u64 { @@ -566,18 +612,8 @@ impl CasDigest { } pub fn digester_for_algorithm(algorithm: DigestAlgorithm) -> Digester { - let variant = match algorithm { - DigestAlgorithm::Sha1 => DigesterVariant::Sha1(Sha1::new()), - DigestAlgorithm::Sha256 => DigesterVariant::Sha256(Sha256::new()), - DigestAlgorithm::Blake3 => DigesterVariant::Blake3(Box::new(blake3::Hasher::new())), - DigestAlgorithm::Blake3Keyed { key } => { - DigesterVariant::Blake3Keyed(Box::new(blake3::Hasher::new_keyed(key))) - } - }; - Digester { - variant, - size: 0, + data: CasDigestData::digester_for_algorithm(algorithm), kind: PhantomData, } } @@ -616,7 +652,7 @@ impl CasDigest { Ok(digester.finalize()) } - pub fn coerce(self) -> CasDigest { + pub fn coerce(self) -> CasDigest { CasDigest { data: self.data, kind: PhantomData, @@ -624,19 +660,19 @@ impl CasDigest { } } -pub trait TrackedCasDigestKind: Sized + 'static { +pub trait CasDigestKind: Sized + 'static { /// This needs to be a concrete implementation since we share the empty instance in a static /// but we can't have static generics. fn empty_digest(config: CasDigestConfig) -> Option>; } #[derive(Display)] -#[display(fmt = "{}", "hex::encode(&of.raw_digest().as_bytes()[0..4])")] -pub struct TinyDigest<'a, Kind> { +#[display("{}", hex::encode(&of.raw_digest().as_bytes()[0..4]))] +pub struct TinyDigest<'a, Kind: CasDigestKind> { of: &'a CasDigest, } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum CasDigestParseError { #[error("The digest is missing a size separator, it should look like `HASH:SIZE`")] MissingSizeSeparator, @@ -663,19 +699,19 @@ pub enum CasDigestParseError { /// contents. #[derive(Allocative)] #[allocative(bound = "")] -struct TrackedCasDigestInner { +struct TrackedCasDigestInner { data: CasDigest, expires: AtomicI64, } #[derive(Display, Dupe_, Allocative)] #[allocative(bound = "")] -#[display(fmt = "{}", "self.data()")] -pub struct TrackedCasDigest { +#[display("{}", self.data())] +pub struct TrackedCasDigest { inner: Arc>, } -impl Clone for TrackedCasDigest { +impl Clone for TrackedCasDigest { fn clone(&self) -> Self { Self { inner: self.inner.dupe(), @@ -683,45 +719,45 @@ impl Clone for TrackedCasDigest { } } -impl Borrow> for TrackedCasDigest { +impl Borrow> for TrackedCasDigest { fn borrow(&self) -> &CasDigest { self.data() } } -impl<'a, Kind> Borrow> for &'a TrackedCasDigest { +impl<'a, Kind: CasDigestKind> Borrow> for &'a TrackedCasDigest { fn borrow(&self) -> &CasDigest { self.data() } } -impl PartialOrd for TrackedCasDigest { +impl PartialOrd for TrackedCasDigest { fn partial_cmp(&self, other: &Self) -> Option { self.data().partial_cmp(other.data()) } } -impl Ord for TrackedCasDigest { +impl Ord for TrackedCasDigest { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.data().cmp(other.data()) } } -impl PartialEq for TrackedCasDigest { +impl PartialEq for TrackedCasDigest { fn eq(&self, other: &Self) -> bool { self.data().eq(other.data()) } } -impl Eq for TrackedCasDigest {} +impl Eq for TrackedCasDigest {} -impl Hash for TrackedCasDigest { +impl Hash for TrackedCasDigest { fn hash(&self, state: &mut H) { self.data().hash(state) } } -impl fmt::Debug for TrackedCasDigest { +impl fmt::Debug for TrackedCasDigest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, @@ -732,12 +768,17 @@ impl fmt::Debug for TrackedCasDigest { } } -impl buck2_core::directory::DirectoryDigest for TrackedCasDigest {} +impl buck2_core::directory_digest::DirectoryDigest for TrackedCasDigest {} + +impl buck2_core::directory_digest::InternableDirectoryDigest + for TrackedCasDigest +{ +} -impl TrackedCasDigest { +impl TrackedCasDigest { pub fn new(data: CasDigest, config: CasDigestConfig) -> Self where - Kind: TrackedCasDigestKind, + Kind: CasDigestKind, { if data.size() == 0 { return Self::empty(config); @@ -757,7 +798,7 @@ impl TrackedCasDigest { config: CasDigestConfig, ) -> Self where - Kind: TrackedCasDigestKind, + Kind: CasDigestKind, { let res = Self::new(data, config); res.update_expires(expiry); @@ -766,7 +807,7 @@ impl TrackedCasDigest { pub fn empty(config: CasDigestConfig) -> Self where - Kind: TrackedCasDigestKind, + Kind: CasDigestKind, { match Kind::empty_digest(config) { Some(o) => o, @@ -781,7 +822,7 @@ impl TrackedCasDigest { pub fn from_content(bytes: &[u8], config: CasDigestConfig) -> Self where - Kind: TrackedCasDigestKind, + Kind: CasDigestKind, { if bytes.is_empty() { return Self::empty(config); @@ -876,13 +917,14 @@ pub mod testing { #[cfg(test)] mod tests { use super::*; + use crate::file_ops::FileDigestKind; #[test] fn test_digest_from_str() { let s = "0000000000000000000000000000000000000000:123"; let config = CasDigestConfig::testing_default(); assert_eq!( - CasDigest::<()>::parse_digest(s, config) + CasDigest::::parse_digest(s, config) .unwrap() .0 .to_string(), @@ -895,21 +937,27 @@ mod tests { let content = &b"foo"[..]; assert_eq!( - CasDigest::<()>::from_reader_for_algorithm(content, DigestAlgorithm::Sha1) + CasDigest::::from_reader_for_algorithm(content, DigestAlgorithm::Sha1) .unwrap() .to_string(), "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33:3" ); assert_eq!( - CasDigest::<()>::from_reader_for_algorithm(content, DigestAlgorithm::Sha256) - .unwrap() - .to_string(), + CasDigest::::from_reader_for_algorithm( + content, + DigestAlgorithm::Sha256 + ) + .unwrap() + .to_string(), "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae:3" ); assert_eq!( - CasDigest::<()>::from_reader_for_algorithm(content, DigestAlgorithm::Blake3) - .unwrap() - .to_string(), + CasDigest::::from_reader_for_algorithm( + content, + DigestAlgorithm::Blake3 + ) + .unwrap() + .to_string(), "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9:3" ); } @@ -919,17 +967,24 @@ mod tests { let content = &b"foo"[..]; assert_eq!( - CasDigest::<()>::from_content_for_algorithm(content, DigestAlgorithm::Sha1).to_string(), + CasDigest::::from_content_for_algorithm(content, DigestAlgorithm::Sha1) + .to_string(), "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33:3" ); assert_eq!( - CasDigest::<()>::from_content_for_algorithm(content, DigestAlgorithm::Sha256) - .to_string(), + CasDigest::::from_content_for_algorithm( + content, + DigestAlgorithm::Sha256 + ) + .to_string(), "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae:3" ); assert_eq!( - CasDigest::<()>::from_content_for_algorithm(content, DigestAlgorithm::Blake3) - .to_string(), + CasDigest::::from_content_for_algorithm( + content, + DigestAlgorithm::Blake3 + ) + .to_string(), "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9:3" ); } @@ -939,58 +994,64 @@ mod tests { let content = &b"foo"[..]; assert_eq!( - CasDigest::<()>::from_content(content, testing::sha1(),).to_string(), + CasDigest::::from_content(content, testing::sha1(),).to_string(), "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33:3" ); assert_eq!( - CasDigest::<()>::from_content(content, testing::sha256(),).to_string(), + CasDigest::::from_content(content, testing::sha256(),).to_string(), "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae:3" ); assert_eq!( - CasDigest::<()>::from_content(content, testing::blake3(),).to_string(), + CasDigest::::from_content(content, testing::blake3(),).to_string(), "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9:3" ); } #[test] fn test_parse_digest() { - let sha1 = CasDigest::<()>::parse_digest( + let sha1 = CasDigest::::parse_digest( "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33:3", testing::sha256_sha1(), ) .unwrap() .0; - assert_eq!(sha1.raw_digest().algorithm(), DigestAlgorithmKind::Sha1); + assert_eq!(sha1.raw_digest().algorithm(), DigestAlgorithmFamily::Sha1); assert_eq!( sha1.to_string(), "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33:3" ); - let sha256 = CasDigest::<()>::parse_digest( + let sha256 = CasDigest::::parse_digest( "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae:3", testing::sha1_sha256(), ) .unwrap() .0; - assert_eq!(sha256.raw_digest().algorithm(), DigestAlgorithmKind::Sha256); + assert_eq!( + sha256.raw_digest().algorithm(), + DigestAlgorithmFamily::Sha256 + ); assert_eq!( sha256.to_string(), "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae:3" ); - let blake3 = CasDigest::<()>::parse_digest( + let blake3 = CasDigest::::parse_digest( "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9:3", testing::sha1_blake3(), ) .unwrap() .0; - assert_eq!(blake3.raw_digest().algorithm(), DigestAlgorithmKind::Blake3); + assert_eq!( + blake3.raw_digest().algorithm(), + DigestAlgorithmFamily::Blake3 + ); assert_eq!( blake3.to_string(), "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9:3" ); - let blake3_keyed = CasDigest::<()>::parse_digest( + let blake3_keyed = CasDigest::::parse_digest( "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9:3", testing::blake3_keyed(), ) @@ -998,7 +1059,7 @@ mod tests { .0; assert_eq!( blake3_keyed.raw_digest().algorithm(), - DigestAlgorithmKind::Blake3Keyed + DigestAlgorithmFamily::Blake3Keyed ); assert_eq!( blake3_keyed.to_string(), @@ -1009,10 +1070,10 @@ mod tests { #[test] fn test_digest_algorithm_kind_roundtrip() { for v in [ - DigestAlgorithmKind::Sha1, - DigestAlgorithmKind::Sha256, - DigestAlgorithmKind::Blake3, - DigestAlgorithmKind::Blake3Keyed, + DigestAlgorithmFamily::Sha1, + DigestAlgorithmFamily::Sha256, + DigestAlgorithmFamily::Blake3, + DigestAlgorithmFamily::Blake3Keyed, ] { assert_eq!(v, v.to_string().parse().unwrap()); } diff --git a/app/buck2_client_ctx/src/chunk_reader.rs b/app/buck2_common/src/chunk_reader.rs similarity index 79% rename from app/buck2_client_ctx/src/chunk_reader.rs rename to app/buck2_common/src/chunk_reader.rs index 1ac37b814b269..1b5586c874289 100644 --- a/app/buck2_client_ctx/src/chunk_reader.rs +++ b/app/buck2_common/src/chunk_reader.rs @@ -8,7 +8,7 @@ */ use anyhow::Context as _; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use tokio::io::AsyncRead; use tokio::io::AsyncReadExt; @@ -19,11 +19,13 @@ pub struct ChunkReader { impl ChunkReader { pub fn new() -> anyhow::Result { - static CHUNK_SIZE: EnvHelper = EnvHelper::new("BUCK2_TEST_MANIFOLD_CHUNK_BYTES"); - - Ok(Self { - chunk_size: CHUNK_SIZE.get_copied()?.unwrap_or(8 * 1024 * 1024), - }) + let chunk_size = buck2_env_anyhow!( + "BUCK2_TEST_MANIFOLD_CHUNK_BYTES", + type=u64, + applicability=testing, + )? + .unwrap_or(8 * 1024 * 1024); + Ok(ChunkReader { chunk_size }) } pub async fn read(&self, reader: &mut R) -> Result, anyhow::Error> diff --git a/app/buck2_common/src/client_utils.rs b/app/buck2_common/src/client_utils.rs index 32315b1bcdb3e..b041dfc5cd173 100644 --- a/app/buck2_common/src/client_utils.rs +++ b/app/buck2_common/src/client_utils.rs @@ -97,7 +97,7 @@ pub async fn get_channel_tcp(socket_addr: Ipv4Addr, port: u16) -> anyhow::Result .with_context(|| format!("failed to connect to port {}", port)) } -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum RetryError { #[error("Timed out after {0:.2}s")] Timeout(f64), diff --git a/app/buck2_common/src/daemon_dir.rs b/app/buck2_common/src/daemon_dir.rs index 6bc0281a158a3..518f2a8571e78 100644 --- a/app/buck2_common/src/daemon_dir.rs +++ b/app/buck2_common/src/daemon_dir.rs @@ -12,7 +12,7 @@ use buck2_core::fs::paths::file_name::FileName; /// `~/.buck/buckd/repo-path` directory. #[derive(Debug, Clone, derive_more::Display)] -#[display(fmt = "{}", path.display())] +#[display("{}", path.display())] pub struct DaemonDir { pub path: AbsNormPathBuf, } diff --git a/app/buck2_common/src/dice/mod.rs b/app/buck2_common/src/dice.rs similarity index 100% rename from app/buck2_common/src/dice/mod.rs rename to app/buck2_common/src/dice.rs diff --git a/app/buck2_common/src/dice/cells.rs b/app/buck2_common/src/dice/cells.rs index c97a1666f3142..874e9c22beaaf 100644 --- a/app/buck2_common/src/dice/cells.rs +++ b/app/buck2_common/src/dice/cells.rs @@ -11,18 +11,37 @@ use allocative::Allocative; use async_trait::async_trait; +use buck2_core::cells::name::CellName; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; +use buck2_core::fs::project_rel_path::ProjectRelativePath; use derive_more::Display; +use dice::CancellationContext; use dice::DiceComputations; use dice::DiceTransactionUpdater; use dice::InjectedKey; +use dice::InvalidationSourcePriority; +use dice::Key; use dupe::Dupe; +use crate::legacy_configs::cells::BuckConfigBasedCells; +use crate::legacy_configs::dice::HasLegacyConfigs; + #[async_trait] pub trait HasCellResolver { - async fn get_cell_resolver(&self) -> anyhow::Result; + async fn get_cell_resolver(&mut self) -> anyhow::Result; + + async fn is_cell_resolver_key_set(&mut self) -> anyhow::Result; + + async fn get_cell_alias_resolver( + &mut self, + cell: CellName, + ) -> anyhow::Result; - async fn is_cell_resolver_key_set(&self) -> anyhow::Result; + async fn get_cell_alias_resolver_for_dir( + &mut self, + dir: &ProjectRelativePath, + ) -> anyhow::Result; } pub trait SetCellResolver { @@ -32,7 +51,7 @@ pub trait SetCellResolver { } #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct CellResolverKey; impl InjectedKey for CellResolverKey { @@ -45,19 +64,74 @@ impl InjectedKey for CellResolverKey { (_, _) => false, } } + + fn invalidation_source_priority() -> InvalidationSourcePriority { + InvalidationSourcePriority::Ignored + } } #[async_trait] -impl HasCellResolver for DiceComputations { - async fn get_cell_resolver(&self) -> anyhow::Result { +impl HasCellResolver for DiceComputations<'_> { + async fn get_cell_resolver(&mut self) -> anyhow::Result { self.compute(&CellResolverKey).await?.ok_or_else(|| { panic!("Tried to retrieve CellResolverKey from the graph, but key has None value") }) } - async fn is_cell_resolver_key_set(&self) -> anyhow::Result { + async fn is_cell_resolver_key_set(&mut self) -> anyhow::Result { Ok(self.compute(&CellResolverKey).await?.is_some()) } + + async fn get_cell_alias_resolver( + &mut self, + cell: CellName, + ) -> anyhow::Result { + Ok(self.compute(&CellAliasResolverKey(cell)).await??) + } + + async fn get_cell_alias_resolver_for_dir( + &mut self, + dir: &ProjectRelativePath, + ) -> anyhow::Result { + let cell = self.get_cell_resolver().await?.find(dir)?; + self.get_cell_alias_resolver(cell).await + } +} + +/// Only used for cell alias resolvers parsed within dice, currently those for external cells +#[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] +struct CellAliasResolverKey(CellName); + +#[async_trait] +impl Key for CellAliasResolverKey { + type Value = buck2_error::Result; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let resolver = ctx.get_cell_resolver().await?; + let root_aliases = resolver.root_cell_cell_alias_resolver(); + let config = ctx.get_legacy_config_for_cell(self.0).await?; + // Cell alias resolvers that are parsed within dice differ from those outside of dice in + // that they cannot create new cells, and so respect only their `cell_aliases` section, not + // their `cells` section. This is the expected behavior for external cells, moving other + // cell resolver parsing into dice would require this code to be adjusted. + CellAliasResolver::new_for_non_root_cell( + self.0, + root_aliases, + BuckConfigBasedCells::get_cell_aliases_from_config(&config)?, + ) + .map_err(Into::into) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + (_, _) => false, + } + } } impl SetCellResolver for DiceTransactionUpdater { diff --git a/app/buck2_common/src/dice/cycles.rs b/app/buck2_common/src/dice/cycles.rs index e23818517b76a..a777247cb44e5 100644 --- a/app/buck2_common/src/dice/cycles.rs +++ b/app/buck2_common/src/dice/cycles.rs @@ -7,28 +7,29 @@ * of this source tree. */ -use std::any::Any; use std::fmt::Debug; +use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use buck2_util::cycle_detector::CycleDescriptor; use buck2_util::cycle_detector::LazyCycleDetector; use buck2_util::cycle_detector::LazyCycleDetectorGuard; use derive_more::Display; use dice::DiceComputations; +use dice::DynKey; use dice::Key; use dice::UserCycleDetector; use dice::UserCycleDetectorGuard; use futures::Future; -use more_futures::cancellation::CancellationContext; use tracing::debug; /// Additional requirement for a CycleDescriptor to be used for defining a Dice UserCycleDetector through /// the CycleDetectorAdapter. Simply requires converting the Dice Key to the CycleDescriptor::Key type. pub trait CycleAdapterDescriptor: CycleDescriptor { - /// Will be provided a &dyn Any for a Dice Key implementation. - fn to_key(key: &dyn Any) -> Option; + /// Will be provided a &DynKey for a Dice Key implementation. + fn to_key(key: &DynKey) -> Option; } /// This allows using the LazyCycleDetector as a Dice UserCycleDetector. All it needs is an implementation of the normal @@ -38,17 +39,36 @@ pub struct CycleDetectorAdapter { inner: LazyCycleDetector, } -#[async_trait] -pub trait CycleGuard { - async fn guard_this + Send>( - ctx: &DiceComputations, - fut: Fut, - ) -> anyhow::Result>; +pub struct CycleGuardResult(anyhow::Result>); + +impl CycleGuardResult { + /// Converts the result from GuardThis into an anyhow::Result. + /// + /// This is a separate function to get the borrowing of the &mut DiceComputations, (which in + /// guard_this will have been borrowed by the passed in future). + pub async fn into_result(self, ctx: &mut DiceComputations<'_>) -> anyhow::Result> { + match &self.0 { + Ok(Ok(_)) => {} + _ => { + // The cycle detector either hit an error or it detected a cycle. In either case, we + // want to make sure that dice doesn't cache this node. To do that, we add a dep on our + // PoisonedDueToDetectedCycle key. We shouldn't hit a dice error, but we know we're already + // returning an error so just ignore it. + let _unused = ctx.compute(&PoisonedDueToDetectedCycleKey).await; + } + } + self.0 + } } -#[async_trait] -impl CycleGuard for D { - /// Use this to wrap futures waiting on dependencies where you want to detect cycles. All dice keys involved +pub struct CycleGuard(Option>>); + +impl CycleGuard { + pub fn new(ctx: &DiceComputations<'_>) -> anyhow::Result { + Ok(Self(ctx.cycle_guard()?)) + } + + /// Use this to wrap a computation waiting on dependencies where you want to detect cycles. All dice keys involved /// in the cycle must be supported by the CycleAdapterDescriptor implementation. If the cycle is FooKey(1) -> /// BarKey(a) -> FooKey(1) and the descriptor only support FooKey, the cycle won't be detected. /// @@ -59,24 +79,19 @@ impl CycleGuard for D { /// /// It's probably the case that the keys in the cycle should treat the cycle error case as invalid (in the sense /// of Dice Key::validity()). - async fn guard_this + Send>( - ctx: &DiceComputations, + pub async fn guard_this + Send>( + &self, fut: Fut, - ) -> anyhow::Result> { - match ctx.cycle_guard::>()? { - Some(v) => match v.guard.guard_this(fut).await { - Ok(Ok(v)) => Ok(Ok(v)), - v => { - // The cycle detector either hit an error or it detected a cycle. In either case, we - // want to make sure that dice doesn't cache this node. To do that, we add a dep on our - // PoisonedDueToDetectedCycle key. We shouldn't hit a dice error, but we know we're already - // returning an error so just ignore it. - let _unused = ctx.compute(&PoisonedDueToDetectedCycleKey).await; - v - } - }, - None => Ok(Ok(fut.await)), - } + ) -> CycleGuardResult { + #[allow(clippy::redundant_closure_call)] + let res: anyhow::Result> = (|| async move { + match &self.0 { + Some(v) => v.guard.guard_this(fut).await, + None => Ok(Ok(fut.await)), + } + })() + .await; + CycleGuardResult(res) } } @@ -85,7 +100,7 @@ impl CycleGuard for D { /// flow of data that is potentially not tracked by dice, and while we may be able to identify those /// and fix them it'll still be fragile and its best to just make sure they aren't cached). #[derive(Allocative, Debug, Display, Clone, PartialEq, Eq, Hash)] -#[display(fmt = "poisoned_due_to_detected_cycle")] +#[display("poisoned_due_to_detected_cycle")] struct PoisonedDueToDetectedCycleKey; #[async_trait] @@ -126,16 +141,16 @@ impl CycleDescriptor for CycleDetectorAdapter { } impl UserCycleDetector for CycleDetectorAdapter { - fn start_computing_key(&self, key: &dyn Any) -> Option> { + fn start_computing_key(&self, key: &DynKey) -> Option> { match D::to_key(key) { None => None, - Some(v) => Some(Box::new(CycleAdapterGuard { + Some(v) => Some(Arc::new(CycleAdapterGuard { guard: self.inner.start(v), })), } } - fn finished_computing_key(&self, key: &dyn Any) { + fn finished_computing_key(&self, key: &DynKey) { if let Some(v) = D::to_key(key) { debug!("finish computing key {}", v); self.inner.finish(v); @@ -148,16 +163,12 @@ pub struct CycleAdapterGuard { } impl UserCycleDetectorGuard for CycleAdapterGuard { - fn add_edge(&self, key: &dyn Any) { + fn add_edge(&self, key: &DynKey) { if let Some(k) = D::to_key(key) { self.guard.add_edge(k); } } - fn as_any(&self) -> &dyn Any { - self - } - fn type_name(&self) -> &'static str { std::any::type_name::() } @@ -167,7 +178,7 @@ impl UserCycleDetectorGuard for CycleAdapterGuard pub struct PairDiceCycleDetector(pub A, pub B); impl UserCycleDetector for PairDiceCycleDetector { - fn start_computing_key(&self, key: &dyn Any) -> Option> { + fn start_computing_key(&self, key: &DynKey) -> Option> { // Right now, only one of the inner detectors is allowed to claim a key. We could feasibly change that, but it's a bit trickier. if let Some(v) = self.0.start_computing_key(key) { return Some(v); @@ -178,7 +189,7 @@ impl UserCycleDetector for PairDiceC None } - fn finished_computing_key(&self, key: &dyn Any) { + fn finished_computing_key(&self, key: &DynKey) { self.0.finished_computing_key(key); self.1.finished_computing_key(key); } @@ -186,11 +197,17 @@ impl UserCycleDetector for PairDiceC #[cfg(test)] mod tests { - use std::any::Any; use std::fmt::Debug; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; - + use std::sync::Arc; + + use allocative::Allocative; + use async_trait::async_trait; + use dice::CancellationContext; + use dice::DiceComputations; + use dice::DynKey; + use dice::Key; use dice::UserCycleDetector; use dice::UserCycleDetectorGuard; @@ -198,14 +215,29 @@ mod tests { #[test] fn pair_cycle_detector() { - struct TestingGuard; + #[derive(Allocative, Debug, derive_more::Display, Clone, PartialEq, Eq, Hash)] + struct K; + #[async_trait] + impl Key for K { + type Value = (); - impl UserCycleDetectorGuard for TestingGuard { - fn add_edge(&self, _key: &dyn Any) { - unreachable!("testing") + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + unreachable!() } - fn as_any(&self) -> &dyn Any { + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + struct TestingGuard; + + impl UserCycleDetectorGuard for TestingGuard { + fn add_edge(&self, _key: &DynKey) { unreachable!("testing") } @@ -223,13 +255,13 @@ mod tests { impl UserCycleDetector for ReceivesStartAndFinish { fn start_computing_key( &self, - _key: &dyn Any, - ) -> Option> { + _key: &DynKey, + ) -> Option> { self.got_start.store(true, Ordering::SeqCst); - Some(Box::new(TestingGuard)) + Some(Arc::new(TestingGuard)) } - fn finished_computing_key(&self, _key: &dyn Any) { + fn finished_computing_key(&self, _key: &DynKey) { assert!(self.got_start.load(Ordering::SeqCst)); self.got_finish.store(true, Ordering::SeqCst); } @@ -243,12 +275,12 @@ mod tests { impl UserCycleDetector for ReceivesOnlyFinish { fn start_computing_key( &self, - _key: &dyn Any, - ) -> Option> { + _key: &DynKey, + ) -> Option> { panic!("shouldn't be called") } - fn finished_computing_key(&self, _key: &dyn Any) { + fn finished_computing_key(&self, _key: &DynKey) { self.got_finish.store(true, Ordering::SeqCst); } } @@ -258,9 +290,9 @@ mod tests { ReceivesOnlyFinish::default(), ); - assert!(detector.start_computing_key(&()).is_some()); + assert!(detector.start_computing_key(&DynKey::from_key(K)).is_some()); - detector.finished_computing_key(&()); + detector.finished_computing_key(&DynKey::from_key(K)); assert!(detector.0.got_start.load(Ordering::SeqCst)); assert!(detector.0.got_finish.load(Ordering::SeqCst)); diff --git a/app/buck2_common/src/dice/file_ops.rs b/app/buck2_common/src/dice/file_ops.rs index 33c778e346029..0b2ca7b38d9b2 100644 --- a/app/buck2_common/src/dice/file_ops.rs +++ b/app/buck2_common/src/dice/file_ops.rs @@ -13,259 +13,143 @@ use std::hash::Hash; use std::sync::Arc; use allocative::Allocative; -use anyhow::Context; use async_trait::async_trait; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_path::CellPathRef; -use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; -use buck2_core::cells::unchecked_cell_rel_path::UncheckedCellRelativePath; -use buck2_core::cells::CellResolver; use buck2_core::fs::paths::file_name::FileNameBuf; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_events::dispatch::console_message; +use buck2_futures::cancellation::CancellationContext; use cmp_any::PartialEqAny; -use derivative::Derivative; use derive_more::Display; use dice::DiceComputations; use dice::DiceTransactionUpdater; +use dice::InvalidationSourcePriority; use dice::Key; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; +use futures::future::BoxFuture; +use futures::FutureExt; -use crate::dice::cells::HasCellResolver; -use crate::dice::data::HasIoProvider; -use crate::dice::file_ops::keys::FileOpsKey; -use crate::dice::file_ops::keys::FileOpsValue; +use crate::buildfiles::HasBuildfiles; +use crate::dice::file_ops::delegate::get_delegated_file_ops; use crate::file_ops::FileOps; -use crate::file_ops::RawDirEntry; +use crate::file_ops::FileOpsError; use crate::file_ops::RawPathMetadata; use crate::file_ops::ReadDirOutput; -use crate::file_ops::SimpleDirEntry; -use crate::ignores::all_cells::AllCellIgnores; -use crate::ignores::all_cells::HasAllCellIgnores; -use crate::io::IoProvider; -use crate::result::SharedResult; -use crate::result::ToSharedResultExt; -use crate::result::ToUnsharedResultExt; - -pub trait HasFileOps<'c> { - type T: FileOps; - fn file_ops(&'c self) -> Self::T; -} - -impl<'c> HasFileOps<'c> for DiceComputations { - type T = DiceFileOps<'c>; - fn file_ops(&'c self) -> DiceFileOps<'c> { - DiceFileOps(self) +use crate::ignores::file_ignores::FileIgnoreResult; +use crate::io::ReadDirError; + +pub mod delegate; + +/// A wrapper around DiceComputations for places that want to interact with a dyn FileOps. +/// +/// In general, it's better to use DiceFileComputations directly. +pub struct DiceFileOps<'c, 'd>(pub &'c LinearRecomputeDiceComputations<'d>); + +pub struct DiceFileComputations; + +/// Functions for accessing files with keys on the dice graph. +impl DiceFileComputations { + /// Filters out ignored paths + pub async fn read_dir( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> anyhow::Result { + ctx.compute(&ReadDirKey { + path: path.to_owned(), + check_ignores: CheckIgnores::Yes, + }) + .await? + .map_err(anyhow::Error::from) } -} -// TODO(cjhopman, bobyf): This FileToken can go away once Dice has support for -// transient values. -/// This is used as the "result" of a read_file computation so that we don't -/// need to store the file content's in dice's cache. -#[derive(Clone, Dupe, Allocative)] -struct FileToken(Arc); - -impl FileToken { - async fn read_if_exists(&self, fs: &dyn FileOps) -> anyhow::Result> { - fs.read_file_if_exists((*self.0).as_ref()).await + pub async fn read_dir_include_ignores( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> anyhow::Result { + ctx.compute(&ReadDirKey { + path: path.to_owned(), + check_ignores: CheckIgnores::No, + }) + .await? + .map_err(anyhow::Error::from) } -} - -#[derive(Clone, Dupe, Allocative)] -pub struct DiceFileOps<'c>(#[allocative(skip)] pub &'c DiceComputations); - -pub mod keys { - use std::sync::Arc; - - use allocative::Allocative; - use derive_more::Display; - use dupe::Dupe; - - use crate::file_ops::FileOps; - - #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "{:?}", self)] - pub struct FileOpsKey(); - #[derive(Dupe, Clone, Allocative)] - pub struct FileOpsValue(#[allocative(skip)] pub Arc); -} - -async fn get_default_file_ops(dice: &DiceComputations) -> SharedResult> { - #[derive(Clone, Dupe, Derivative, Allocative)] - #[derivative(PartialEq)] - struct DiceFileOpsDelegate { - // Safe to ignore because `io` does not change during the lifetime of the daemon. - #[derivative(PartialEq = "ignore")] - io: Arc, - cells: CellResolver, - ignores: Arc, - } - - impl DiceFileOpsDelegate { - fn resolve(&self, path: CellPathRef) -> anyhow::Result { - let cell_root = self.resolve_cell_root(path.cell())?; - Ok(cell_root.project_relative_path().join(path.path())) - } - - fn resolve_cell_root(&self, cell: CellName) -> anyhow::Result { - Ok(self.cells.get(cell).unwrap().path().to_buf()) - } - - fn get_cell_path(&self, path: &ProjectRelativePath) -> anyhow::Result { - self.cells.get_cell_path(path) - } - - fn io_provider(&self) -> &dyn IoProvider { - self.io.as_ref() - } + /// Like read_dir, but with extended error information. This may add additional dice dependencies. + pub async fn read_dir_ext( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> Result { + read_dir_ext(ctx, path).await } - #[async_trait] - impl FileOps for DiceFileOpsDelegate { - async fn read_file_if_exists( - &self, - path: CellPathRef<'async_trait>, - ) -> anyhow::Result> { - // TODO(cjhopman): error on ignored paths, maybe. - let project_path = self.resolve(path)?; - self.io_provider().read_file_if_exists(project_path).await - } - - async fn read_dir(&self, path: CellPathRef<'async_trait>) -> anyhow::Result { - // TODO(cjhopman): This should also probably verify that the parent chain is not ignored. - self.ignores - .check_ignored(path.cell(), UncheckedCellRelativePath::new(path.path()))? - .into_result() - .with_context(|| format!("Error checking whether dir `{}` is ignored", path))?; - - let project_path = self.resolve(path)?; - let mut entries = self - .io_provider() - .read_dir(project_path) - .await - .with_context(|| format!("Error listing dir `{}`", path))?; - - // Make sure entries are deterministic, since read_dir isn't. - entries.sort_by(|a, b| a.file_name.cmp(&b.file_name)); - - let is_ignored = |file_name: &str| { - let mut cell_relative_path_buf; - let cell_relative_path: &str = if path.path().is_empty() { - file_name - } else { - cell_relative_path_buf = - String::with_capacity(path.path().as_str().len() + 1 + file_name.len()); - cell_relative_path_buf.push_str(path.path().as_str()); - cell_relative_path_buf.push('/'); - cell_relative_path_buf.push_str(file_name); - &cell_relative_path_buf - }; - - let cell_relative_path = - UncheckedCellRelativePath::unchecked_new(cell_relative_path); - let is_ignored = self - .ignores - .check_ignored(path.cell(), cell_relative_path)? - .is_ignored(); - anyhow::Ok(is_ignored) - }; - - // Filter out any entries that are ignored. - let mut included_entries = Vec::new(); - for e in entries { - let RawDirEntry { - file_type, - file_name, - } = e; - - if !is_ignored(&file_name)? { - let file_name = match FileNameBuf::try_from_or_get_back(file_name) { - Ok(file_name) => file_name, - Err(file_name) => { - console_message(format!( - "File name `{file_name}` is not valid. \ - Add the path to `project.ignore` to mute this message", - )); - continue; - } - }; - included_entries.push(SimpleDirEntry { - file_name, - file_type, - }); - } - } - - Ok(ReadDirOutput { - included: included_entries.into(), - }) - } - - async fn read_path_metadata_if_exists( - &self, - path: CellPathRef<'async_trait>, - ) -> anyhow::Result> { - let project_path = self.resolve(path)?; - - let res = self - .io_provider() - .read_path_metadata_if_exists(project_path) - .await - .with_context(|| format!("Error accessing metadata for path `{}`", path))?; - res.map(|meta| meta.try_map(|path| Ok(Arc::new(self.get_cell_path(&path)?)))) - .transpose() - } + /// Does not check if the path is ignored + /// + /// TODO(cjhopman): error on ignored paths, maybe. + pub async fn read_file_if_exists( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> anyhow::Result> { + let file_ops = get_delegated_file_ops(ctx, path.cell(), CheckIgnores::No).await?; + let () = ctx.compute(&ReadFileKey(Arc::new(path.to_owned()))).await?; + // FIXME(JakobDegen): We intentionally avoid storing the result of this function in dice. + // However, that also means that the `ReadFileKey` is not marked as transient if this + // returns an error, which is unfortunate. + file_ops.read_file_if_exists(path.path()).await + } - async fn is_ignored(&self, path: CellPathRef<'async_trait>) -> anyhow::Result { - Ok(self - .ignores - .check_ignored(path.cell(), UncheckedCellRelativePath::new(path.path()))? - .is_ignored()) - } + /// Does not check if the path is ignored + pub async fn read_file( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> anyhow::Result { + Self::read_file_if_exists(ctx, path) + .await? + .ok_or_else(|| FileOpsError::FileNotFound(path.to_string()).into()) + } - fn eq_token(&self) -> PartialEqAny { - PartialEqAny::new(self) - } + /// Does not check if the path is ignored + pub async fn read_path_metadata_if_exists( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> anyhow::Result> { + ctx.compute(&PathMetadataKey(path.to_owned())) + .await? + .map_err(anyhow::Error::from) } - #[async_trait] - impl Key for FileOpsKey { - type Value = SharedResult; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - let cells = ctx.get_cell_resolver().await?; - let io = ctx.global_data().get_io_provider(); - - let ignores = ctx.new_all_cell_ignores().await?; - - Ok(FileOpsValue(Arc::new(DiceFileOpsDelegate { - io, - cells, - ignores, - }))) - } + /// Does not check if the path is ignored + pub async fn read_path_metadata( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> anyhow::Result { + Self::read_path_metadata_if_exists(ctx, path) + .await? + .ok_or_else(|| FileOpsError::FileNotFound(path.to_string()).into()) + } - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => *x.0 == *y.0, - _ => false, - } - } + pub async fn is_ignored( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, + ) -> anyhow::Result { + get_delegated_file_ops(ctx, path.cell(), CheckIgnores::Yes) + .await? + .is_ignored(path.path()) + .await + } - fn validity(x: &Self::Value) -> bool { - x.is_ok() - } + pub async fn buildfiles<'a>( + ctx: &mut DiceComputations<'_>, + cell: CellName, + ) -> anyhow::Result> { + ctx.get_buildfiles(cell).await } +} - Ok(dice.compute(&FileOpsKey()).await??.0) +#[derive(Debug, Display, Clone, Dupe, Copy, PartialEq, Eq, Hash, Allocative)] +pub(crate) enum CheckIgnores { + Yes, + No, } #[derive(Allocative)] @@ -298,6 +182,17 @@ impl FileChangeTracker { self.paths_to_dirty.insert(PathMetadataKey(path)); } + fn insert_dir_keys(&mut self, path: CellPath) { + self.dirs_to_dirty.insert(ReadDirKey { + path: path.clone(), + check_ignores: CheckIgnores::No, + }); + self.dirs_to_dirty.insert(ReadDirKey { + path, + check_ignores: CheckIgnores::Yes, + }); + } + pub fn file_added_or_removed(&mut self, path: CellPath) { let parent = path.parent(); @@ -307,7 +202,7 @@ impl FileChangeTracker { // That never happens in established repos, but if you are setting one up, it's not uncommon. // Since we don't include paths in different cells, the fact we don't dirty the parent // (which is in an enclosing cell) doesn't matter. - self.dirs_to_dirty.insert(ReadDirKey(parent.to_owned())); + self.insert_dir_keys(parent.to_owned()); } } @@ -319,8 +214,7 @@ impl FileChangeTracker { // That never happens in established repos, but if you are setting one up, it's not uncommon. // Since we don't include paths in different cells, the fact we don't dirty the parent // (which is in an enclosing cell) doesn't matter. - self.dirs_to_dirty - .extend([ReadDirKey(path), ReadDirKey(parent)]); + self.insert_dir_keys(parent); } } @@ -338,7 +232,7 @@ impl FileChangeTracker { pub fn dir_changed(&mut self, path: CellPath) { self.paths_to_dirty.insert(PathMetadataKey(path.clone())); - self.dirs_to_dirty.insert(ReadDirKey(path)); + self.insert_dir_keys(path); } pub fn dir_added(&mut self, path: CellPath) { @@ -355,36 +249,43 @@ struct ReadFileKey(Arc); #[async_trait] impl Key for ReadFileKey { - type Value = FileToken; + type Value = (); async fn compute( &self, _ctx: &mut DiceComputations, _cancellations: &CancellationContext, ) -> Self::Value { - FileToken(self.0.dupe()) } fn equality(_: &Self::Value, _: &Self::Value) -> bool { false } + + fn invalidation_source_priority() -> InvalidationSourcePriority { + InvalidationSourcePriority::High + } } #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -struct ReadDirKey(CellPath); +#[display("{}", path)] +struct ReadDirKey { + path: CellPath, + check_ignores: CheckIgnores, +} #[async_trait] impl Key for ReadDirKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellations: &CancellationContext, ) -> Self::Value { - get_default_file_ops(ctx) + get_delegated_file_ops(ctx, self.path.cell(), self.check_ignores) .await? - .read_dir(self.0.as_ref()) + .read_dir(self.path.as_ref().path()) .await - .shared_error() + .map_err(buck2_error::Error::from) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -404,15 +305,15 @@ struct PathMetadataKey(CellPath); #[async_trait] impl Key for PathMetadataKey { - type Value = SharedResult>; + type Value = buck2_error::Result>; async fn compute( &self, ctx: &mut DiceComputations, _cancellations: &CancellationContext, ) -> Self::Value { - let res = get_default_file_ops(ctx) + let res = get_delegated_file_ops(ctx, self.0.cell(), CheckIgnores::No) .await? - .read_path_metadata_if_exists(self.0.as_ref()) + .read_path_metadata_if_exists(self.0.as_ref().path()) .await?; match res { @@ -438,44 +339,37 @@ impl Key for PathMetadataKey { fn validity(x: &Self::Value) -> bool { x.is_ok() } + + fn invalidation_source_priority() -> InvalidationSourcePriority { + InvalidationSourcePriority::High + } } #[async_trait] -impl<'c> FileOps for DiceFileOps<'c> { +impl FileOps for DiceFileOps<'_, '_> { async fn read_file_if_exists( &self, path: CellPathRef<'async_trait>, ) -> anyhow::Result> { - let path = path.to_owned(); - let file_ops = get_default_file_ops(self.0).await?; - - self.0 - .compute(&ReadFileKey(Arc::new(path))) - .await? - .read_if_exists(&*file_ops) - .await + DiceFileComputations::read_file_if_exists(&mut self.0.get(), path).await } async fn read_dir(&self, path: CellPathRef<'async_trait>) -> anyhow::Result { - self.0 - .compute(&ReadDirKey(path.to_owned())) - .await? - .unshared_error() + DiceFileComputations::read_dir(&mut self.0.get(), path).await } async fn read_path_metadata_if_exists( &self, path: CellPathRef<'async_trait>, ) -> anyhow::Result> { - self.0 - .compute(&PathMetadataKey(path.to_owned())) - .await? - .unshared_error() + DiceFileComputations::read_path_metadata_if_exists(&mut self.0.get(), path).await } - async fn is_ignored(&self, path: CellPathRef<'async_trait>) -> anyhow::Result { - let file_ops = get_default_file_ops(self.0).await?; - file_ops.is_ignored(path).await + async fn is_ignored( + &self, + path: CellPathRef<'async_trait>, + ) -> anyhow::Result { + DiceFileComputations::is_ignored(&mut self.0.get(), path).await } fn eq_token(&self) -> PartialEqAny { @@ -483,8 +377,71 @@ impl<'c> FileOps for DiceFileOps<'c> { // Also we cannot do `PartialEqAny` here because `Self` is not `'static`. PartialEqAny::always_false() } + + async fn buildfiles<'a>(&self, cell: CellName) -> anyhow::Result> { + DiceFileComputations::buildfiles(&mut self.0.get(), cell).await + } } -pub mod testing { - pub use super::keys::FileOpsKey; +fn extended_ignore_error<'a>( + ctx: &'a mut DiceComputations<'_>, + path: CellPathRef<'a>, +) -> BoxFuture<'a, Option> { + async move { + match path.parent() { + Some(parent) => match DiceFileComputations::read_dir_ext(ctx, parent).await { + Ok(v) => { + // the parent can be read fine, check if this path is ignored first (if it's ignored it won't appear in the read_dir results). + if let Ok(FileIgnoreResult::Ignored(reason)) = + DiceFileComputations::is_ignored(ctx, path).await + { + return Some(ReadDirError::DirectoryIsIgnored(path.to_owned(), reason)); + } + + match path.path().file_name() { + Some(file_name) if !v.contains(file_name) => { + return Some(ReadDirError::DirectoryDoesNotExist(path.to_owned())); + } + _ => {} + } + + match DiceFileComputations::read_path_metadata(ctx, path).await { + Ok(RawPathMetadata::Directory) => {} + Ok(RawPathMetadata::Symlink { .. }) => { + // not sure how we should handle symlink here, if it's pointing to a dir is it potentially correct? + } + Err(_) => { + // we ignore this, we don't know what the error is and so we can't be sure that + // it's not missing important data that the original error would have. + } + Ok(RawPathMetadata::File(..)) => { + return Some(ReadDirError::NotADirectory( + path.to_owned(), + "file".to_owned(), + )); + } + } + + None + } + Err(e) => Some(e), + }, + None => None, + } + } + .boxed() +} + +/// out-of-line impl for DiceComputations::read_dir_ext so it doesn't add noise to the api +async fn read_dir_ext( + ctx: &mut DiceComputations<'_>, + path: CellPathRef<'_>, +) -> Result { + match DiceFileComputations::read_dir(ctx, path).await { + Ok(v) => Ok(v), + Err(e) => match extended_ignore_error(ctx, path).await { + Some(e) => Err(e), + None => Err(e.into()), + }, + } } diff --git a/app/buck2_common/src/dice/file_ops/delegate.rs b/app/buck2_common/src/dice/file_ops/delegate.rs new file mode 100644 index 0000000000000..fc48ab0888402 --- /dev/null +++ b/app/buck2_common/src/dice/file_ops/delegate.rs @@ -0,0 +1,332 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use anyhow::Context; +use async_trait::async_trait; +use buck2_core::cells::cell_path::CellPath; +use buck2_core::cells::name::CellName; +use buck2_core::cells::paths::CellRelativePath; +use buck2_core::cells::unchecked_cell_rel_path::UncheckedCellRelativePath; +use buck2_core::cells::CellResolver; +use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_events::dispatch::console_message; +use buck2_futures::cancellation::CancellationContext; +use cmp_any::PartialEqAny; +use derivative::Derivative; +use dice::DiceComputations; +use dice::Key; +use dupe::Dupe; + +use crate::dice::cells::HasCellResolver; +use crate::dice::data::HasIoProvider; +use crate::dice::file_ops::delegate::keys::FileOpsKey; +use crate::dice::file_ops::delegate::keys::FileOpsValue; +use crate::dice::file_ops::CheckIgnores; +use crate::external_cells::EXTERNAL_CELLS_IMPL; +use crate::file_ops::RawDirEntry; +use crate::file_ops::RawPathMetadata; +use crate::file_ops::ReadDirOutput; +use crate::file_ops::SimpleDirEntry; +use crate::ignores::all_cells::HasCellFileIgnores; +use crate::ignores::file_ignores::CellFileIgnores; +use crate::ignores::file_ignores::FileIgnoreResult; +use crate::io::IoProvider; + +/// Note: Everything in this mini-module exists only so that it can be replaced by a `TestFileOps` +/// in unittests +mod keys { + use allocative::Allocative; + use buck2_core::cells::name::CellName; + use derive_more::Display; + use dupe::Dupe; + + use crate::dice::file_ops::delegate::FileOpsDelegateWithIgnores; + use crate::dice::file_ops::CheckIgnores; + + #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] + #[display("{:?}", self)] + pub(crate) struct FileOpsKey { + pub cell: CellName, + pub check_ignores: CheckIgnores, + } + + #[derive(Dupe, Clone, Allocative)] + pub(crate) struct FileOpsValue(#[allocative(skip)] pub FileOpsDelegateWithIgnores); +} + +#[async_trait] +pub trait FileOpsDelegate: Send + Sync { + async fn read_file_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result>; + + /// Return the list of file outputs, sorted. + async fn read_dir( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result>; + + async fn read_path_metadata_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result>; + + fn eq_token(&self) -> PartialEqAny; +} + +/// A `FileOpsDelegate` implementation that calls out to the `IoProvider` to read files. +/// +/// This is used for everything except 1) tests, and 2) external cells. +#[derive(Clone, Dupe, Derivative, Allocative)] +#[derivative(PartialEq)] +struct IoFileOpsDelegate { + // Safe to ignore because `io` does not change during the lifetime of the daemon. + #[derivative(PartialEq = "ignore")] + io: Arc, + cells: CellResolver, + cell: CellName, +} + +impl IoFileOpsDelegate { + fn resolve(&self, path: &CellRelativePath) -> ProjectRelativePathBuf { + let cell_root = self.cells.get(self.cell).unwrap().path(); + cell_root.as_project_relative_path().join(path) + } + + fn get_cell_path(&self, path: &ProjectRelativePath) -> anyhow::Result { + self.cells.get_cell_path(path) + } + + fn io_provider(&self) -> &dyn IoProvider { + self.io.as_ref() + } +} + +#[async_trait] +impl FileOpsDelegate for IoFileOpsDelegate { + async fn read_file_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let project_path = self.resolve(path); + self.io_provider().read_file_if_exists(project_path).await + } + + async fn read_dir( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let project_path = self.resolve(path); + let mut entries = self + .io_provider() + .read_dir(project_path) + .await + .with_context(|| format!("Error listing dir `{}`", path))?; + + // Make sure entries are deterministic, since read_dir isn't. + entries.sort_by(|a, b| a.file_name.cmp(&b.file_name)); + + Ok(entries) + } + + async fn read_path_metadata_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let project_path = self.resolve(path); + + let res = self + .io_provider() + .read_path_metadata_if_exists(project_path) + .await + .with_context(|| format!("Error accessing metadata for path `{}`", path))?; + res.map(|meta| meta.try_map(|path| Ok(Arc::new(self.get_cell_path(&path)?)))) + .transpose() + } + + fn eq_token(&self) -> PartialEqAny { + PartialEqAny::new(self) + } +} + +#[async_trait] +impl Key for FileOpsKey { + type Value = buck2_error::Result; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let cells = ctx.get_cell_resolver().await?; + let ignores = if self.check_ignores == CheckIgnores::Yes { + Some(ctx.new_cell_ignores(self.cell).await?) + } else { + None + }; + + let out = if let Some(origin) = cells.get(self.cell)?.external() { + let delegate = EXTERNAL_CELLS_IMPL + .get()? + .get_file_ops_delegate(ctx, self.cell, origin.dupe()) + .await?; + FileOpsDelegateWithIgnores::new(ignores, delegate) + } else { + let io = ctx.global_data().get_io_provider(); + let delegate = IoFileOpsDelegate { + io, + cells, + cell: self.cell, + }; + FileOpsDelegateWithIgnores::new(ignores, Arc::new(delegate)) + }; + + Ok(FileOpsValue(out)) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x.0 == y.0, + _ => false, + } + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } +} + +pub(crate) async fn get_delegated_file_ops( + dice: &mut DiceComputations<'_>, + cell: CellName, + check_ignores: CheckIgnores, +) -> buck2_error::Result { + Ok(dice + .compute(&FileOpsKey { + cell, + check_ignores, + }) + .await?? + .0) +} + +#[derive(Clone, Dupe)] +pub struct FileOpsDelegateWithIgnores { + ignores: Option>, + delegate: Arc, +} + +impl PartialEq for FileOpsDelegateWithIgnores { + fn eq(&self, other: &Self) -> bool { + self.ignores == other.ignores && self.delegate.eq_token() == other.delegate.eq_token() + } +} + +impl FileOpsDelegateWithIgnores { + pub(crate) fn new( + ignores: Option>, + delegate: Arc, + ) -> Self { + Self { ignores, delegate } + } +} + +impl FileOpsDelegateWithIgnores { + fn check_ignores(&self, path: &UncheckedCellRelativePath) -> FileIgnoreResult { + match self.ignores.as_ref() { + Some(ignores) => ignores.check(path), + None => FileIgnoreResult::Ok, + } + } + + pub async fn read_file_if_exists( + &self, + path: &CellRelativePath, + ) -> anyhow::Result> { + self.delegate.read_file_if_exists(path).await + } + + /// Return the list of file outputs, sorted. + pub async fn read_dir(&self, path: &CellRelativePath) -> anyhow::Result { + // TODO(cjhopman): This should also probably verify that the parent chain is not ignored. + self.check_ignores(UncheckedCellRelativePath::new(path)) + .into_result()?; + + let entries = self.delegate.read_dir(path).await?; + + let is_ignored = |file_name: &str| { + let mut cell_relative_path_buf; + let cell_relative_path: &str = if path.is_empty() { + file_name + } else { + cell_relative_path_buf = + String::with_capacity(path.as_str().len() + 1 + file_name.len()); + cell_relative_path_buf.push_str(path.as_str()); + cell_relative_path_buf.push('/'); + cell_relative_path_buf.push_str(file_name); + &cell_relative_path_buf + }; + + let cell_relative_path = UncheckedCellRelativePath::unchecked_new(cell_relative_path); + let is_ignored = self.check_ignores(cell_relative_path).is_ignored(); + anyhow::Ok(is_ignored) + }; + + // Filter out any entries that are ignored. + let mut included_entries = Vec::new(); + for e in entries { + let RawDirEntry { + file_type, + file_name, + } = e; + + if !is_ignored(&file_name)? { + let file_name = match FileNameBuf::try_from_or_get_back(file_name) { + Ok(file_name) => file_name, + Err(file_name) => { + console_message(format!( + "File name `{file_name}` is not valid. \ + Add the path to `project.ignore` to mute this message", + )); + continue; + } + }; + included_entries.push(SimpleDirEntry { + file_name, + file_type, + }); + } + } + + Ok(ReadDirOutput { + included: included_entries.into(), + }) + } + + pub async fn read_path_metadata_if_exists( + &self, + path: &CellRelativePath, + ) -> anyhow::Result> { + self.delegate.read_path_metadata_if_exists(path).await + } + + pub async fn is_ignored(&self, path: &CellRelativePath) -> anyhow::Result { + Ok(self.check_ignores(UncheckedCellRelativePath::new(path))) + } +} + +pub(crate) mod testing { + pub(crate) use super::keys::FileOpsKey; + pub(crate) use super::keys::FileOpsValue; +} diff --git a/app/buck2_common/src/error_report.rs b/app/buck2_common/src/error_report.rs deleted file mode 100644 index 4d5f28cebd734..0000000000000 --- a/app/buck2_common/src/error_report.rs +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use crate::result::recursive_shared_downcast_ref; -use crate::result::MayProvideAnyhowError; - -pub trait CreateErrorReport { - fn create_error_report(&self) -> Option; -} - -impl CreateErrorReport for T -where - T: MayProvideAnyhowError, -{ - fn create_error_report(&self) -> Option { - let err = self.as_anyhow()?; - - // Infra error by default if no category tag is set - let category = Some( - recursive_shared_downcast_ref::(err) - .map_or(buck2_data::ErrorCategory::Infra as i32, |c| *c as i32), - ); - let cause = recursive_shared_downcast_ref::(err).map(|c| *c as i32); - let error_message = format!("{:#}", err); - - Some(buck2_data::ErrorReport { - category, - cause, - error_message, - }) - } -} diff --git a/app/buck2_common/src/external_cells.rs b/app/buck2_common/src/external_cells.rs new file mode 100644 index 0000000000000..3a8da587d901c --- /dev/null +++ b/app/buck2_common/src/external_cells.rs @@ -0,0 +1,42 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use async_trait::async_trait; +use buck2_core::cells::cell_root_path::CellRootPath; +use buck2_core::cells::external::ExternalCellOrigin; +use buck2_core::cells::name::CellName; +use buck2_util::late_binding::LateBinding; +use dice::DiceComputations; + +use crate::dice::file_ops::delegate::FileOpsDelegate; + +#[async_trait] +pub trait ExternalCellsImpl: Send + Sync + 'static { + async fn get_file_ops_delegate( + &self, + ctx: &mut DiceComputations<'_>, + cell_name: CellName, + origin: ExternalCellOrigin, + ) -> anyhow::Result>; + + fn check_bundled_cell_exists(&self, cell_name: CellName) -> anyhow::Result<()>; + + async fn expand( + &self, + ctx: &mut DiceComputations<'_>, + cell_name: CellName, + origin: ExternalCellOrigin, + path: &CellRootPath, + ) -> anyhow::Result<()>; +} + +pub static EXTERNAL_CELLS_IMPL: LateBinding<&'static dyn ExternalCellsImpl> = + LateBinding::new("EXTERNAL_CELLS_IMPL"); diff --git a/app/buck2_common/src/external_symlink.rs b/app/buck2_common/src/external_symlink.rs index 54dec1b00d00a..83772dc49b6b5 100644 --- a/app/buck2_common/src/external_symlink.rs +++ b/app/buck2_common/src/external_symlink.rs @@ -31,7 +31,7 @@ pub struct ExternalSymlink { // We can't use AbsPathBuf because there might be "." or ".." in the path abs_target: Utf8Path, /// What goes after the external target path. - remaining_path: Option, + remaining_path: ForwardRelativePathBuf, } impl fmt::Display for ExternalSymlink { @@ -43,7 +43,7 @@ impl fmt::Display for ExternalSymlink { impl ExternalSymlink { pub fn new( abs_target: PathBuf, - remaining_path: Option, + remaining_path: ForwardRelativePathBuf, ) -> anyhow::Result { let abs_target = match abs_target.into_os_string().into_string() { Ok(string) => string, @@ -64,23 +64,27 @@ impl ExternalSymlink { Path::new(&self.abs_target) } - pub fn remaining_path(&self) -> Option<&ForwardRelativePath> { - self.remaining_path.as_ref().map(|p| p.as_ref()) + pub fn remaining_path(&self) -> &ForwardRelativePath { + &self.remaining_path } /// Returns the complete path as a [`PathBuf`] pub fn to_path_buf(&self) -> PathBuf { - match &self.remaining_path { - Some(p) => Path::new(&self.abs_target).join(p.as_str()), - None => Path::new(&self.abs_target).to_owned(), + if !self.remaining_path.is_empty() { + Path::new(&self.abs_target).join(self.remaining_path.as_str()) + } else { + Path::new(&self.abs_target).to_owned() } } /// Returns a new `ExternalSymlink` with its target being the full target /// of `self` (i.e. `{self.target}/{self.remaining_path}`). pub fn with_full_target(self: &Arc) -> anyhow::Result> { - if self.remaining_path.is_some() { - Ok(Arc::new(Self::new(self.to_path_buf(), None)?)) + if !self.remaining_path.is_empty() { + Ok(Arc::new(Self::new( + self.to_path_buf(), + ForwardRelativePathBuf::default(), + )?)) } else { Ok(self.dupe()) } @@ -88,10 +92,10 @@ impl ExternalSymlink { /// Returns a new `ExternalSymlink` with `remaining_path` discarded. pub fn without_remaining_path(self: &Arc) -> Arc { - if self.remaining_path.is_some() { + if !self.remaining_path.is_empty() { Arc::new(Self { abs_target: self.abs_target.clone(), - remaining_path: None, + remaining_path: ForwardRelativePathBuf::default(), }) } else { self.dupe() @@ -109,13 +113,6 @@ impl ExternalSymlink { &self, path: &'a ForwardRelativePath, ) -> Option<&'a ForwardRelativePath> { - if let Some(remaining) = self.remaining_path() { - path.as_str() - .strip_suffix(remaining.as_str()) - .map(|p| p.strip_suffix('/').unwrap_or(p)) - .map(ForwardRelativePath::unchecked_new) - } else { - Some(path) - } + path.strip_suffix_opt(&self.remaining_path) } } diff --git a/app/buck2_common/src/fbinit.rs b/app/buck2_common/src/fbinit.rs new file mode 100644 index 0000000000000..8e1c5ed010df2 --- /dev/null +++ b/app/buck2_common/src/fbinit.rs @@ -0,0 +1,55 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::OnceLock; + +use fbinit::FacebookInit; + +fn run_init() -> FacebookInit { + // SAFETY: Only called within a oncelock + let fb = unsafe { fbinit::perform_init() }; + + #[cfg(fbcode_build)] + { + use gflags::GflagValue; + + // There are two sources of log spew when building buck2 with Buck and linking against fbcode: + // 1. folly/logging/xlog, which can be configured via a special configuration string, which we use to + // log only critical-level logs. https://github.com/facebook/folly/blob/master/folly/logging/docs/Config.md + // 2. google log (glog), which is older but still used, which can configured using a flag at runtime. + // + // This first call handles the folly config. + logging::update_logging_config(fb, "CRITICAL"); + drop(gflags::set_gflag_value( + fb, + "minloglevel", + GflagValue::U32(5), + )); + drop(gflags::set_gflag_value( + fb, + "stderrthreshold", + GflagValue::U32(5), + )); + } + + fb +} + +/// Gets an fbinit token. +/// +/// This function is lazy and safe to call from multiple threads, however: +/// 1. You should still prefer to explicit pass `FacebookInit` around where possible. Use of this +/// function is primarily intended for a very early point in buck2's lifecycle where fbinit +/// initialization is lazy. +/// 2. This function may not be called before any forks, as it may spawn threads. +pub fn get_or_init_fbcode_globals() -> FacebookInit { + static FB: OnceLock = OnceLock::new(); + + *FB.get_or_init(run_init) +} diff --git a/app/buck2_common/src/file_ops.rs b/app/buck2_common/src/file_ops.rs index 88eef433d854e..1df0362c4f239 100644 --- a/app/buck2_common/src/file_ops.rs +++ b/app/buck2_common/src/file_ops.rs @@ -13,9 +13,10 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_core::buck2_env_anyhow; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_path::CellPathRef; -use buck2_core::env_helper::EnvHelper; +use buck2_core::cells::name::CellName; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::paths::file_name::FileNameBuf; @@ -27,13 +28,18 @@ use gazebo::variants::VariantName; use crate::cas_digest::CasDigest; use crate::cas_digest::CasDigestConfig; +use crate::cas_digest::CasDigestKind; use crate::cas_digest::TrackedCasDigest; -use crate::cas_digest::TrackedCasDigestKind; use crate::external_symlink::ExternalSymlink; +use crate::ignores::file_ignores::FileIgnoreResult; -#[derive(Debug, thiserror::Error)] -enum FileOpsError { +#[derive(Debug, buck2_error::Error)] +pub(crate) enum FileOpsError { #[error("File not found: `{0}`")] + // File not found errors are not inherently always user errors; however, we only use these + // methods with source files, and in that case this is correct + #[buck2(input)] + #[buck2(tag = IoNotFound)] FileNotFound(String), } @@ -111,7 +117,7 @@ pub struct FileDigestKind { _private: (), } -impl TrackedCasDigestKind for FileDigestKind { +impl CasDigestKind for FileDigestKind { fn empty_digest(config: CasDigestConfig) -> Option> { Some(config.empty_file_digest()) } @@ -148,9 +154,7 @@ impl FileDigestConfig { impl FileDigest { /// Obtain the digest of the file if you can. pub fn from_file(file: &AbsPath, config: FileDigestConfig) -> anyhow::Result { - static DISABLE_FILE_ATTR: EnvHelper = EnvHelper::new("BUCK2_DISABLE_FILE_ATTR"); - - if !DISABLE_FILE_ATTR.get_copied()?.unwrap_or_default() { + if !buck2_env_anyhow!("BUCK2_DISABLE_FILE_ATTR", bool)? { if let Some(digest) = Self::from_file_attr(file, config) { return Ok(digest); } @@ -219,7 +223,7 @@ impl FileDigest { /// Stores the relevant metadata for a file. // New fields should be added as needed, and unused fields removed. #[derive(Debug, Dupe, Hash, PartialEq, Eq, Clone, Display, Allocative)] -#[display(fmt = "File({})", digest)] +#[display("File(digest={}, is_executable={})", digest, is_executable)] pub struct FileMetadata { pub digest: TrackedFileDigest, pub is_executable: bool, @@ -330,7 +334,7 @@ impl From for PathMetadataOrRedirection { } #[async_trait] -pub trait FileOps: Allocative + Send + Sync { +pub trait FileOps: Send + Sync { async fn read_file_if_exists( &self, path: CellPathRef<'async_trait>, @@ -339,7 +343,8 @@ pub trait FileOps: Allocative + Send + Sync { /// Return the list of file outputs, sorted. async fn read_dir(&self, path: CellPathRef<'async_trait>) -> anyhow::Result; - async fn is_ignored(&self, path: CellPathRef<'async_trait>) -> anyhow::Result; + async fn is_ignored(&self, path: CellPathRef<'async_trait>) + -> anyhow::Result; async fn read_path_metadata_if_exists( &self, @@ -347,6 +352,8 @@ pub trait FileOps: Allocative + Send + Sync { ) -> anyhow::Result>; fn eq_token(&self) -> PartialEqAny; + + async fn buildfiles<'a>(&self, cell: CellName) -> anyhow::Result>; } impl dyn FileOps + '_ { @@ -381,20 +388,31 @@ pub mod testing { use async_trait::async_trait; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_path::CellPathRef; + use buck2_core::cells::name::CellName; + use buck2_core::cells::paths::CellRelativePath; + use buck2_core::fs::paths::file_name::FileNameBuf; use cmp_any::PartialEqAny; + use dice::testing::DiceBuilder; use dupe::Dupe; use itertools::Itertools; use crate::cas_digest::CasDigestConfig; + use crate::dice::file_ops::delegate::testing::FileOpsKey; + use crate::dice::file_ops::delegate::testing::FileOpsValue; + use crate::dice::file_ops::delegate::FileOpsDelegate; + use crate::dice::file_ops::delegate::FileOpsDelegateWithIgnores; + use crate::dice::file_ops::CheckIgnores; use crate::external_symlink::ExternalSymlink; use crate::file_ops::FileMetadata; use crate::file_ops::FileOps; use crate::file_ops::FileType; + use crate::file_ops::RawDirEntry; use crate::file_ops::RawPathMetadata; use crate::file_ops::RawSymlink; use crate::file_ops::ReadDirOutput; use crate::file_ops::SimpleDirEntry; use crate::file_ops::TrackedFileDigest; + use crate::ignores::file_ignores::FileIgnoreResult; enum TestFileOpsEntry { File(String /*data*/, FileMetadata), @@ -405,18 +423,48 @@ pub mod testing { #[derive(Allocative)] pub struct TestFileOps { #[allocative(skip)] - entries: BTreeMap, + entries: Arc>, } impl TestFileOps { - fn new(entries: BTreeMap) -> Self { - let mut file_ops = Self { - entries: BTreeMap::new(), - }; - for (path, entry) in entries { - file_ops.add_entry(path, entry); + fn new(inputs: BTreeMap) -> Self { + let mut entries = BTreeMap::new(); + for (path, entry) in inputs { + let mut file_type = match entry { + TestFileOpsEntry::Directory(..) => FileType::Directory, + TestFileOpsEntry::ExternalSymlink(..) => FileType::Symlink, + TestFileOpsEntry::File(..) => FileType::File, + }; + // make sure the test setup is correct and concise + assert!( + entries.insert(path.to_owned(), entry).is_none(), + "Adding `{}`, it already exists.", + path + ); + + let mut path = path.as_ref(); + + // now add to / create the parent directories + while let (Some(dir), Some(name)) = (path.parent(), path.path().file_name()) { + let dir_entry = entries + .entry(dir.to_owned()) + .or_insert_with(|| TestFileOpsEntry::Directory(BTreeSet::new())); + match dir_entry { + TestFileOpsEntry::Directory(listing) => { + listing.insert(SimpleDirEntry { + file_type, + file_name: name.to_owned(), + }); + file_type = FileType::Directory; + path = dir; + } + _ => panic!("Adding `{}`, but `{}` exists and is not a dir", path, dir), + }; + } + } + TestFileOps { + entries: Arc::new(entries), } - file_ops } pub fn new_with_files(files: BTreeMap) -> Self { @@ -462,39 +510,31 @@ pub mod testing { ) } - fn add_entry(&mut self, path: CellPath, entry: TestFileOpsEntry) { - let mut file_type = match entry { - TestFileOpsEntry::Directory(..) => FileType::Directory, - TestFileOpsEntry::ExternalSymlink(..) => FileType::Symlink, - TestFileOpsEntry::File(..) => FileType::File, - }; - // make sure the test setup is correct and concise - assert!( - self.entries.insert(path.to_owned(), entry).is_none(), - "Adding `{}`, it already exists.", - path - ); - - let mut path = path.as_ref(); - - // now add to / create the parent directories - while let (Some(dir), Some(name)) = (path.parent(), path.path().file_name()) { - let dir_entry = self - .entries - .entry(dir.to_owned()) - .or_insert_with(|| TestFileOpsEntry::Directory(BTreeSet::new())); - match dir_entry { - TestFileOpsEntry::Directory(listing) => { - listing.insert(SimpleDirEntry { - file_type, - file_name: name.to_owned(), - }); - file_type = FileType::Directory; - path = dir; - } - _ => panic!("Adding `{}`, but `{}` exists and is not a dir", path, dir), - }; - } + pub fn mock_in_cell(&self, cell: CellName, builder: DiceBuilder) -> DiceBuilder { + let data = Ok(FileOpsValue(FileOpsDelegateWithIgnores::new( + None, + Arc::new(TestCellFileOps( + cell, + Self { + entries: Arc::clone(&self.entries), + }, + )), + ))); + builder + .mock_and_return( + FileOpsKey { + cell, + check_ignores: CheckIgnores::Yes, + }, + data.dupe(), + ) + .mock_and_return( + FileOpsKey { + cell, + check_ignores: CheckIgnores::No, + }, + data, + ) } } @@ -543,8 +583,56 @@ pub mod testing { }) } - async fn is_ignored(&self, _path: CellPathRef<'async_trait>) -> anyhow::Result { - Ok(false) + async fn is_ignored( + &self, + _path: CellPathRef<'async_trait>, + ) -> anyhow::Result { + Ok(FileIgnoreResult::Ok) + } + + fn eq_token(&self) -> PartialEqAny { + PartialEqAny::always_false() + } + + async fn buildfiles<'a>(&self, _cell: CellName) -> anyhow::Result> { + Ok(Arc::from_iter([FileNameBuf::unchecked_new("BUCK")])) + } + } + + pub struct TestCellFileOps(CellName, TestFileOps); + + #[async_trait] + impl FileOpsDelegate for TestCellFileOps { + async fn read_file_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let path = CellPath::new(self.0, path.to_owned()); + FileOps::read_file_if_exists(&self.1, path.as_ref()).await + } + + /// Return the list of file outputs, sorted. + async fn read_dir( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let path = CellPath::new(self.0, path.to_owned()); + let simple_entries = FileOps::read_dir(&self.1, path.as_ref()).await?.included; + Ok(simple_entries + .iter() + .map(|e| RawDirEntry { + file_name: e.file_name.clone().into_inner(), + file_type: e.file_type.clone(), + }) + .collect()) + } + + async fn read_path_metadata_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let path = CellPath::new(self.0, path.to_owned()); + FileOps::read_path_metadata_if_exists(&self.1, path.as_ref()).await } fn eq_token(&self) -> PartialEqAny { diff --git a/app/buck2_common/src/global_cfg_options.rs b/app/buck2_common/src/global_cfg_options.rs new file mode 100644 index 0000000000000..0ad9f1b0ab976 --- /dev/null +++ b/app/buck2_common/src/global_cfg_options.rs @@ -0,0 +1,22 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use buck2_core::target::label::label::TargetLabel; +use dupe::Dupe; + +#[derive( + Default, Debug, Dupe, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Allocative +)] +pub struct GlobalCfgOptions { + pub target_platform: Option, + pub cli_modifiers: Arc>, +} diff --git a/app/buck2_common/src/home_buck_tmp.rs b/app/buck2_common/src/home_buck_tmp.rs index 4d32266dd995f..a59f3ecdc0677 100644 --- a/app/buck2_common/src/home_buck_tmp.rs +++ b/app/buck2_common/src/home_buck_tmp.rs @@ -16,13 +16,10 @@ use buck2_core::fs::paths::file_name::FileName; use once_cell::sync::Lazy; use crate::invocation_roots::home_buck_dir; -use crate::result::SharedResult; -use crate::result::ToSharedResultExt; /// `~/.buck/tmp` after old files removed. /// /// We use this directory when we need tmp dir with short file names (to connect to unix socket). -#[allow(clippy::needless_borrow)] // False positive. pub fn home_buck_tmp_dir() -> anyhow::Result<&'static AbsNormPath> { fn remove_old_files(tmp_dir: &AbsNormPath) -> anyhow::Result<()> { let mut now = None; @@ -55,7 +52,8 @@ pub fn home_buck_tmp_dir() -> anyhow::Result<&'static AbsNormPath> { Ok(tmp_dir) } - static DIR: Lazy> = Lazy::new(|| find_dir().shared_error()); + static DIR: Lazy> = + Lazy::new(|| find_dir().map_err(buck2_error::Error::from)); - Ok(&Lazy::force(&DIR).as_ref()?) + Ok(&Lazy::force(&DIR).as_ref().map_err(dupe::Dupe::dupe)?) } diff --git a/app/buck2_common/src/http.rs b/app/buck2_common/src/http.rs new file mode 100644 index 0000000000000..4f1285eec7c36 --- /dev/null +++ b/app/buck2_common/src/http.rs @@ -0,0 +1,37 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_http::HttpClient; +use dice::UserComputationData; +use dupe::Dupe; + +/// Dice implementations so we can pass along the HttpClient to various subsystems +/// that need to use it (Materializer, RunActions, etc). +pub trait HasHttpClient { + fn get_http_client(&self) -> HttpClient; +} + +pub trait SetHttpClient { + fn set_http_client(&mut self, client: HttpClient); +} + +impl HasHttpClient for UserComputationData { + fn get_http_client(&self) -> HttpClient { + self.data + .get::() + .expect("HttpClient should be set") + .dupe() + } +} + +impl SetHttpClient for UserComputationData { + fn set_http_client(&mut self, client: HttpClient) { + self.data.set(client); + } +} diff --git a/app/buck2_common/src/http/client/builder.rs b/app/buck2_common/src/http/client/builder.rs deleted file mode 100644 index c790c8e85b502..0000000000000 --- a/app/buck2_common/src/http/client/builder.rs +++ /dev/null @@ -1,500 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::path::Path; -use std::sync::Arc; -use std::time::Duration; - -use anyhow::Context; -use buck2_core::is_open_source; -use hyper::client::HttpConnector; -use hyper::service::Service; -use hyper::Body; -use hyper::Uri; -use hyper_proxy::Proxy; -use hyper_proxy::ProxyConnector; -use hyper_rustls::HttpsConnector; -use hyper_rustls::HttpsConnectorBuilder; -use hyper_timeout::TimeoutConnector; -use rustls::ClientConfig; -use tokio::io::AsyncRead; -use tokio::io::AsyncWrite; -use tokio_rustls::TlsConnector; - -use super::HttpClient; -use super::RequestClient; -use crate::http::proxy; -use crate::http::stats::HttpNetworkStats; -use crate::http::tls; -use crate::http::x2p; -use crate::legacy_configs::init::DaemonStartupConfig; -use crate::legacy_configs::init::Timeout; - -/// Support following up to 10 redirects, after which a redirected request will -/// error out. -const DEFAULT_MAX_REDIRECTS: usize = 10; -const DEFAULT_CONNECT_TIMEOUT_MS: u64 = 5000; -const DEFAULT_READ_TIMEOUT_MS: u64 = 10000; - -#[derive(Clone, Debug, Default, PartialEq)] -struct TimeoutConfig { - connect_timeout: Option, - read_timeout: Option, - write_timeout: Option, -} - -impl TimeoutConfig { - fn to_connector(&self, connector: C) -> TimeoutConnector - where - C: Service + Send, - C::Response: AsyncRead + AsyncWrite + Send + Unpin, - C::Future: Send + 'static, - C::Error: Into>, - { - let mut timeout_connector = TimeoutConnector::new(connector); - timeout_connector.set_connect_timeout(self.connect_timeout); - timeout_connector.set_read_timeout(self.read_timeout); - timeout_connector.set_write_timeout(self.write_timeout); - timeout_connector - } -} - -pub struct HttpClientBuilder { - tls_config: ClientConfig, - proxies: Vec, - max_redirects: Option, - supports_vpnless: bool, - timeout_config: Option, -} - -impl HttpClientBuilder { - /// General-purpose builder to get a regular HTTP client for use throughout the - /// buck2 codebase. - /// - /// This should work for internal and OSS use cases. - /// TODO(skarlage): Remove `allow_vpnless` when vpnless becomes default. - pub fn with_sensible_defaults(allow_vpnless: bool) -> anyhow::Result { - let mut builder = Self::https_with_system_roots()?; - if is_open_source() { - tracing::debug!("Using OSS client"); - builder.with_proxy_from_env()?; - } else if allow_vpnless && x2p::supports_vpnless() { - tracing::debug!("Using vpnless client"); - let proxy = x2p::find_proxy()?.context("Expected unix domain socket or http proxy port for x2p client but did not find either")?; - builder.with_x2p_proxy(proxy); - } else if let Ok(Some(cert_path)) = tls::find_internal_cert() { - tracing::debug!("Using internal https client"); - builder.with_client_auth_cert(cert_path)?; - } else { - tracing::debug!("Using default https client"); - } - - Ok(builder) - } - - /// Creates a barebones https client using system roots for TLS authentication. - pub fn https_with_system_roots() -> anyhow::Result { - let tls_config = tls::tls_config_with_system_roots()?; - Ok(Self { - tls_config, - proxies: Vec::new(), - max_redirects: None, - supports_vpnless: false, - timeout_config: None, - }) - } - - /// Customize an http client based on http.* legacy buckconfigs. - pub fn from_startup_config(config: &DaemonStartupConfig) -> anyhow::Result { - let mut builder = Self::with_sensible_defaults(config.allow_vpnless)?; - builder.with_max_redirects(config.http.max_redirects.unwrap_or(DEFAULT_MAX_REDIRECTS)); - match config.http.connect_timeout() { - Timeout::Value(d) => { - builder.with_connect_timeout(Some(d)); - } - Timeout::Default => { - builder - .with_connect_timeout(Some(Duration::from_millis(DEFAULT_CONNECT_TIMEOUT_MS))); - } - _ => {} - } - match config.http.read_timeout() { - Timeout::Value(d) => { - builder.with_read_timeout(Some(d)); - } - Timeout::Default => { - builder.with_read_timeout(Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS))); - } - _ => {} - } - match config.http.write_timeout() { - Timeout::Value(d) => { - builder.with_write_timeout(Some(d)); - } - _ => {} - } - - Ok(builder) - } - - pub fn with_tls_config(&mut self, tls_config: ClientConfig) -> &mut Self { - self.tls_config = tls_config; - self - } - - pub fn with_client_auth_cert>(&mut self, path: P) -> anyhow::Result<&mut Self> { - let tls_config = tls::tls_config_with_single_cert(path.as_ref(), path.as_ref())?; - Ok(self.with_tls_config(tls_config)) - } - - pub fn with_proxy(&mut self, proxy: Proxy) -> &mut Self { - self.proxies.push(proxy); - self - } - - pub fn with_x2p_proxy(&mut self, proxy: Proxy) -> &mut Self { - self.with_proxy(proxy).supports_vpnless(); - self - } - - pub fn with_proxy_from_env(&mut self) -> anyhow::Result<&mut Self> { - if let Some(proxy) = proxy::https_proxy_from_env()? { - self.with_proxy(proxy); - } - if let Some(proxy) = proxy::http_proxy_from_env()? { - self.with_proxy(proxy); - } - Ok(self) - } - - pub fn with_connect_timeout(&mut self, connect_timeout: Option) -> &mut Self { - if let Some(timeout_config) = &mut self.timeout_config { - timeout_config.connect_timeout = connect_timeout; - } else { - self.timeout_config = Some(TimeoutConfig { - connect_timeout, - read_timeout: None, - write_timeout: None, - }); - } - self - } - - pub fn with_read_timeout(&mut self, read_timeout: Option) -> &mut Self { - if let Some(timeout_config) = &mut self.timeout_config { - timeout_config.read_timeout = read_timeout; - } else { - self.timeout_config = Some(TimeoutConfig { - read_timeout, - connect_timeout: None, - write_timeout: None, - }); - } - self - } - - pub fn with_write_timeout(&mut self, write_timeout: Option) -> &mut Self { - if let Some(timeout_config) = &mut self.timeout_config { - timeout_config.write_timeout = write_timeout; - } else { - self.timeout_config = Some(TimeoutConfig { - write_timeout, - connect_timeout: None, - read_timeout: None, - }); - } - self - } - - pub fn with_max_redirects(&mut self, max_redirects: usize) -> &mut Self { - self.max_redirects = Some(max_redirects); - self - } - - fn supports_vpnless(&mut self) -> &mut Self { - self.supports_vpnless = true; - self - } - - fn build_inner(&self) -> Arc { - match (self.proxies.as_slice(), &self.timeout_config) { - // Construct x2p unix socket client. - // Note: This ignores (and does not require) the TLS config. - #[cfg(unix)] - (proxies @ [_, ..], Some(timeout_config)) if let Some(unix_socket) = find_unix_proxy(proxies) => { - let timeout_connector = timeout_config.to_connector(hyper_unix_connector::UnixClient); - let proxy_connector = build_proxy_connector(&[unix_socket.clone()], timeout_connector, None); - Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) - } - #[cfg(unix)] - (proxies @ [_, ..], None) if let Some(unix_socket) = find_unix_proxy(proxies) => { - let proxy_connector = build_proxy_connector(&[unix_socket.clone()], hyper_unix_connector::UnixClient, None); - Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) - }, - - // Construct x2p http proxy client. - (proxies @ [_, ..], Some(timeout_config)) if self.supports_vpnless => { - let mut http_connector = HttpConnector::new(); - // When talking to local x2pagent proxy, only http is supported. - http_connector.enforce_http(true); - let timeout_connector = timeout_config.to_connector(http_connector); - let proxy_connector = build_proxy_connector(proxies, timeout_connector, None); - Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) - } - (proxies @ [_, ..], None) if self.supports_vpnless => { - let mut http_connector = HttpConnector::new(); - // When talking to local x2pagent proxy, only http is supported. - http_connector.enforce_http(true); - let proxy_connector = build_proxy_connector(proxies, http_connector, None); - Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) - } - - // Proxied http client with TLS. - (proxies @ [_, ..], Some(timeout_config)) => { - let https_connector = build_https_connector(self.tls_config.clone()); - let timeout_connector = timeout_config.to_connector(https_connector); - // Re-use TLS config from https connection for communication with proxies. - let proxy_connector = build_proxy_connector(proxies, timeout_connector, Some(self.tls_config.clone())); - Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) - }, - (proxies @ [_, ..], None) => { - let https_connector = build_https_connector(self.tls_config.clone()); - let proxy_connector = build_proxy_connector(proxies, https_connector, Some(self.tls_config.clone())); - Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) - }, - - // Client with TLS only. - ([], Some(timeout_config)) => { - let https_connector = build_https_connector(self.tls_config.clone()); - let timeout_connector = timeout_config.to_connector(https_connector); - Arc::new(hyper::Client::builder().build::<_, Body>(timeout_connector)) - }, - ([], None) => { - let https_connector = build_https_connector(self.tls_config.clone()); - Arc::new(hyper::Client::builder().build::<_, Body>(https_connector)) - }, - } - } - - pub fn build(&self) -> HttpClient { - HttpClient { - inner: self.build_inner(), - max_redirects: self.max_redirects, - supports_vpnless: self.supports_vpnless, - stats: HttpNetworkStats::new(), - } - } -} - -fn build_https_connector(tls_config: ClientConfig) -> HttpsConnector { - HttpsConnectorBuilder::new() - .with_tls_config(tls_config) - .https_or_http() - .enable_http1() - .enable_http2() - .build() -} - -/// Build a proxy connector using `proxies`, wrapping underlying `connector`, -/// and optionally using `tls_config` to secure communications with the proxy. -/// -/// Note: Not all proxy connectors built by this client need TLS communication -/// with the proxy, e.g. if the proxy is on localhost. -fn build_proxy_connector( - proxies: &[Proxy], - connector: C, - tls_config: Option, -) -> ProxyConnector -where - C: Service + Send, - C::Response: AsyncRead + AsyncWrite + Send + Unpin, - C::Future: Send + 'static, - C::Error: Into>, -{ - // Note: we use the `unsecured()` constructor here, but all that does is - // not load the default TLS config. You can optionally pass your own tls - // config if needed. - let mut proxy_connector = ProxyConnector::unsecured(connector); - proxy_connector.extend_proxies(proxies.iter().cloned()); - if let Some(tls_config) = tls_config { - proxy_connector.set_tls(Some(TlsConnector::from(Arc::new(tls_config)))); - } - proxy_connector -} - -/// Helper function to find any proxies with unix:// as the scheme (which -/// indicates we want to proxy through a unix domain socket). -/// -/// Note: This _does_ compile on non-unix, but is only used at runtime in unix; -/// adding this to silence dead code warnings. -#[cfg(unix)] -fn find_unix_proxy(proxies: &[Proxy]) -> Option<&Proxy> { - proxies - .iter() - .find(|proxy| proxy.uri().scheme_str() == Some("unix")) -} - -#[cfg(test)] -mod tests { - use hyper_proxy::Intercept; - use indoc::indoc; - - use super::*; - use crate::legacy_configs::testing::parse; - - #[test] - fn test_default_builder() -> anyhow::Result<()> { - let builder = HttpClientBuilder::https_with_system_roots()?; - - assert_eq!(None, builder.max_redirects); - assert!(builder.proxies.is_empty()); - assert!(!builder.supports_vpnless); - Ok(()) - } - - #[test] - fn test_supports_vpnless_set_true() -> anyhow::Result<()> { - let mut builder = HttpClientBuilder::https_with_system_roots()?; - builder.supports_vpnless(); - - assert!(builder.supports_vpnless); - Ok(()) - } - - #[test] - fn test_with_max_redirects_overrides_default() -> anyhow::Result<()> { - let mut builder = HttpClientBuilder::https_with_system_roots()?; - builder.with_max_redirects(5); - - assert_eq!(5, builder.max_redirects.unwrap()); - Ok(()) - } - - #[test] - fn test_builder_with_proxy_adds_proxy() -> anyhow::Result<()> { - let proxy = Proxy::new(Intercept::All, "http://localhost:12345".try_into()?); - let mut builder = HttpClientBuilder::https_with_system_roots()?; - builder.with_proxy(proxy); - - assert_eq!(1, builder.proxies.len()); - Ok(()) - } - - #[test] - fn test_set_connect_timeout() -> anyhow::Result<()> { - let mut builder = HttpClientBuilder::https_with_system_roots()?; - builder.with_connect_timeout(Some(Duration::from_millis(1000))); - - assert_eq!( - Some(TimeoutConfig { - connect_timeout: Some(Duration::from_millis(1000)), - read_timeout: None, - write_timeout: None, - }), - builder.timeout_config, - ); - - Ok(()) - } - - #[test] - fn test_set_connect_and_read_timeouts() -> anyhow::Result<()> { - let mut builder = HttpClientBuilder::https_with_system_roots()?; - builder - .with_connect_timeout(Some(Duration::from_millis(1000))) - .with_read_timeout(Some(Duration::from_millis(2000))); - - assert_eq!( - Some(TimeoutConfig { - connect_timeout: Some(Duration::from_millis(1000)), - read_timeout: Some(Duration::from_millis(2000)), - write_timeout: None, - }), - builder.timeout_config, - ); - Ok(()) - } - - #[test] - fn test_from_startup_config_defaults_internal() -> anyhow::Result<()> { - let builder = - HttpClientBuilder::from_startup_config(&DaemonStartupConfig::testing_empty())?; - assert_eq!(DEFAULT_MAX_REDIRECTS, builder.max_redirects.unwrap()); - assert!(!builder.supports_vpnless); - assert_eq!( - Some(TimeoutConfig { - connect_timeout: Some(Duration::from_millis(DEFAULT_CONNECT_TIMEOUT_MS)), - read_timeout: Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS)), - write_timeout: None, - }), - builder.timeout_config - ); - - Ok(()) - } - - #[test] - fn test_from_startup_config_overrides() -> anyhow::Result<()> { - let config = parse( - &[( - "/config", - indoc!( - r#" - [http] - max_redirects = 5 - connect_timeout_ms = 10 - write_timeout_ms = 5 - "# - ), - )], - "/config", - )?; - let startup_config = DaemonStartupConfig::new(&config)?; - let builder = HttpClientBuilder::from_startup_config(&startup_config)?; - assert_eq!(5, builder.max_redirects.unwrap()); - assert_eq!( - Some(TimeoutConfig { - connect_timeout: Some(Duration::from_millis(10)), - read_timeout: Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS)), - write_timeout: Some(Duration::from_millis(5)), - }), - builder.timeout_config - ); - - Ok(()) - } - - #[test] - fn test_from_startup_config_zero_for_unset() -> anyhow::Result<()> { - let config = parse( - &[( - "/config", - indoc!( - r#" - [http] - connect_timeout_ms = 0 - "#, - ), - )], - "/config", - )?; - let startup_config = DaemonStartupConfig::new(&config)?; - let builder = HttpClientBuilder::from_startup_config(&startup_config)?; - assert_eq!( - Some(TimeoutConfig { - connect_timeout: None, - read_timeout: Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS)), - write_timeout: None, - }), - builder.timeout_config - ); - - Ok(()) - } -} diff --git a/app/buck2_common/src/http/client/mod.rs b/app/buck2_common/src/http/client/mod.rs deleted file mode 100644 index 7bf82281eaef6..0000000000000 --- a/app/buck2_common/src/http/client/mod.rs +++ /dev/null @@ -1,968 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; - -use allocative::Allocative; -use bytes::Bytes; -use dupe::Dupe; -use futures::stream::BoxStream; -use futures::StreamExt; -use futures::TryStreamExt; -use http::request::Builder; -use http::uri::Scheme; -use http::Method; -use http::Uri; -use hyper::client::connect::Connect; -use hyper::client::ResponseFuture; -use hyper::Body; -use hyper::Request; -use hyper::Response; -use tokio::io::AsyncReadExt; -use tokio_util::io::StreamReader; - -use crate::http::redirect::PendingRequest; -use crate::http::redirect::RedirectEngine; -use crate::http::stats::CountingStream; -use crate::http::stats::HttpNetworkStats; -use crate::http::x2p::X2PAgentError; -use crate::http::HttpError; - -mod builder; -pub use builder::HttpClientBuilder; - -const DEFAULT_USER_AGENT: &str = "Buck2"; - -#[derive(Allocative, Clone, Dupe)] -pub struct HttpClient { - // hyper::Client doesn't impl Allocative. - #[allocative(skip)] - inner: Arc, - max_redirects: Option, - supports_vpnless: bool, - stats: HttpNetworkStats, -} - -impl HttpClient { - fn request_builder(&self, uri: &str) -> Builder { - Request::builder() - .uri(uri) - .header(http::header::USER_AGENT, DEFAULT_USER_AGENT) - } - - /// Send a HEAD request. Assumes no body will be returned. If one is returned, it will be ignored. - pub async fn head(&self, uri: &str) -> Result, HttpError> { - let req = self - .request_builder(uri) - .method(Method::HEAD) - .body(Bytes::new()) - .map_err(HttpError::BuildRequest)?; - self.request(req).await.map(|resp| resp.map(|_| ())) - } - - /// Send a GET request. - pub async fn get( - &self, - uri: &str, - ) -> Result>>, HttpError> { - let req = self - .request_builder(uri) - .method(Method::GET) - .body(Bytes::new()) - .map_err(HttpError::BuildRequest)?; - self.request(req).await - } - - pub async fn post( - &self, - uri: &str, - body: Bytes, - headers: Vec<(String, String)>, - ) -> Result>>, HttpError> { - let mut builder = self.request_builder(uri).method(Method::POST); - for (name, value) in headers { - builder = builder.header(name, value); - } - let req = builder.body(body).map_err(HttpError::BuildRequest)?; - self.request(req).await - } - - pub async fn put( - &self, - uri: &str, - body: Bytes, - headers: Vec<(String, String)>, - ) -> Result>>, HttpError> { - let mut builder = self.request_builder(uri).method(Method::PUT); - for (name, value) in headers { - builder = builder.header(name, value); - } - let req = builder.body(body).map_err(HttpError::BuildRequest)?; - self.request(req).await - } - - async fn send_request_impl( - &self, - mut request: Request, - ) -> Result>>, HttpError> { - let uri = request.uri().to_string(); - let now = tokio::time::Instant::now(); - - // x2p requires scheme to be http since it handles all TLS. - if self.supports_vpnless() { - tracing::debug!( - "http: request: changing scheme for '{}' to http for vpnless", - request.uri() - ); - change_scheme_to_http(&mut request); - } - let resp = self.inner.request(request).await.map_err(|e| { - if is_hyper_error_due_to_timeout(&e) { - HttpError::Timeout { - uri, - duration: now.elapsed().as_secs(), - } - } else { - HttpError::SendRequest { uri, source: e } - } - })?; - Ok( - resp.map(|body| { - CountingStream::new(body, self.stats.downloaded_bytes().dupe()).boxed() - }), - ) - } - - /// Send a generic request. - async fn request( - &self, - request: Request, - ) -> Result>>, HttpError> { - let pending_request = PendingRequest::from_request(&request); - let uri = request.uri().clone(); - tracing::debug!("http: request: {:?}", request); - let resp = self.send_request_impl(request).await?; - tracing::debug!("http: response: {:?}", resp.status()); - - // Handle redirects up to self.max_redirects times. - let resp = if let Some(max_redirects) = self.max_redirects { - let redirect_engine = RedirectEngine::new(max_redirects, pending_request, resp); - redirect_engine - .handle_redirects(|req| self.send_request_impl(req)) - .await? - } else { - resp - }; - - if !resp.status().is_success() { - // Handle x2p errors as indicated by headers. - if let Some(x2p_err) = X2PAgentError::from_headers(&uri, resp.headers()) { - return Err(HttpError::X2P { - uri: uri.to_string(), - source: x2p_err, - }); - } - - let status = resp.status(); - let text = read_truncated_error_response(resp).await; - return Err(HttpError::Status { - status, - uri: uri.to_string(), - text, - }); - } - - Ok(resp) - } - - pub fn stats(&self) -> &HttpNetworkStats { - &self.stats - } - - /// Whether this client supports vpnless operation. When set, will make requests - /// to the `vpnless_url` attribute in the `download_file` action rather than the - /// normal `url` attribute. - pub fn supports_vpnless(&self) -> bool { - self.supports_vpnless - } -} - -/// Trait wrapper around a hyper::Client because hyper::Client is parameterized by -/// the connector. At runtime, we want to pick different connectors (e.g. HttpsConnector, -/// ProxyConnector>, etc); thus wrap the client so we can switch -/// out the concrete type without exposing implementation details to callers. -pub(super) trait RequestClient: Send + Sync { - fn request(&self, request: Request) -> ResponseFuture; -} - -impl RequestClient for hyper::Client -where - C: Connect + Clone + Send + Sync + 'static, -{ - fn request(&self, request: Request) -> ResponseFuture { - self.request(request.map(Body::from)) - } -} - -async fn read_truncated_error_response( - mut resp: Response>>, -) -> String { - let read = StreamReader::new( - resp.body_mut() - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)), - ); - let mut buf = Vec::with_capacity(1024); - read.take(1024).read_to_end(&mut buf).await.map_or_else( - |e| format!("Error decoding response: {:?}", e), - |_| String::from_utf8_lossy(buf.as_ref()).into_owned(), - ) -} - -/// x2pagent proxies only speak plain HTTP, so we need to mutate requests prior -/// to sending them off. -fn change_scheme_to_http(request: &mut Request) { - let uri = request.uri().clone(); - let mut parts = uri.into_parts(); - parts.scheme = Some(Scheme::HTTP); - *request.uri_mut() = Uri::from_parts(parts).expect("Unexpected invalid URI from request"); -} - -/// Helper function to check if any error in the chain of errors produced by -/// hyper is due to a timeout. -fn is_hyper_error_due_to_timeout(e: &hyper::Error) -> bool { - use std::error::Error; - - let mut cause = e.source(); - while let Some(err) = cause { - if let Some(io_err) = err.downcast_ref::() { - if let std::io::ErrorKind::TimedOut = io_err.kind() { - return true; - } - } - cause = err.source(); - } - - false -} - -#[cfg(test)] -mod tests { - use http::StatusCode; - use httptest::matchers::*; - use httptest::responders; - use httptest::Expectation; - - use super::*; - - #[test] - fn test_change_scheme_to_http_succeeds() -> anyhow::Result<()> { - let mut request = Request::builder() - .method(Method::GET) - .uri("https://some.site/foo") - .body(Bytes::new())?; - change_scheme_to_http(&mut request); - - assert_eq!( - Scheme::HTTP, - *request - .uri() - .scheme() - .expect("should have scheme after mutating request") - ); - Ok(()) - } - - #[test] - fn test_change_scheme_to_http_no_effect() -> anyhow::Result<()> { - let uri: Uri = "http://some.site/foo".try_into()?; - let mut request = Request::builder() - .method(Method::GET) - .uri(uri.clone()) - .body(Bytes::new())?; - change_scheme_to_http(&mut request); - - assert_eq!(&uri, request.uri()); - Ok(()) - } - - #[tokio::test] - async fn test_simple_get_success() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(request::method_path("GET", "/foo")) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let resp = client.get(&test_server.url_str("/foo")).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_simple_put_success() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(all_of![ - request::method_path("PUT", "/foo"), - request::body("Hello, world!") - ]) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let bytes = Bytes::from_static(b"Hello, world!"); - let resp = client - .put( - &test_server.url_str("/foo"), - bytes, - vec![("key".to_owned(), "value".to_owned())], - ) - .await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_simple_post_success() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(all_of![ - request::method_path("POST", "/foo"), - request::body("Hello, world!") - ]) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let bytes = Bytes::from_static(b"Hello, world!"); - let resp = client - .post( - &test_server.url_str("/foo"), - bytes, - vec![("key".to_owned(), "value".to_owned())], - ) - .await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_404_not_found_is_error() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(request::method_path("GET", "/foo")) - .respond_with(responders::status_code(404)), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let url = test_server.url_str("/foo"); - let result = client.get(&url).await; - assert!(result.is_err()); - if let HttpError::Status { status, uri, text } = result.as_ref().err().unwrap() { - assert_eq!(StatusCode::NOT_FOUND, *status); - assert_eq!(url.to_owned(), *uri); - assert!(text.is_empty()); - } else { - unreachable!( - "Expected HttpError::Status, got {:?}", - result.err().unwrap() - ); - } - - Ok(()) - } - - #[tokio::test] - async fn test_count_response_size() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(request::method_path("GET", "/foo")) - .times(2) - // Response body is 100 bytes in size. - .respond_with(responders::status_code(200).body(vec![0; 100])), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let mut resp = client.get(&test_server.url_str("/foo")).await?; - - // Consume the stream so we trigger a count. - while (resp.body_mut().next().await).is_some() {} - assert_eq!(100, client.stats().get_downloaded_bytes()); - - let mut resp = client.get(&test_server.url_str("/foo")).await?; - - // Consume the stream so we trigger a count. - while (resp.body_mut().next().await).is_some() {} - assert_eq!(200, client.stats().get_downloaded_bytes()); - - Ok(()) - } - - #[tokio::test] - async fn test_follows_redirects() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - // Chain of two redirects /foo -> /bar -> /baz. - test_server.expect( - Expectation::matching(request::method_path("GET", "/foo")) - .times(1) - .respond_with( - responders::status_code(302).append_header(http::header::LOCATION, "/bar"), - ), - ); - test_server.expect( - Expectation::matching(request::method_path("GET", "/bar")) - .times(1) - .respond_with( - responders::status_code(302).append_header(http::header::LOCATION, "/baz"), - ), - ); - test_server.expect( - Expectation::matching(request::method_path("GET", "/baz")) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()? - .with_max_redirects(10) - .build(); - let resp = client.get(&test_server.url_str("/foo")).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_head_changes_to_get_on_redirect() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - // Chain of two redirects /foo -> /bar -> /baz. - test_server.expect( - Expectation::matching(request::method_path("HEAD", "/foo")) - .times(1) - .respond_with( - responders::status_code(302).append_header(http::header::LOCATION, "/bar"), - ), - ); - test_server.expect( - Expectation::matching(request::method_path("GET", "/bar")) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()? - .with_max_redirects(10) - .build(); - let resp = client.head(&test_server.url_str("/foo")).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_post_gets_redirected() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - // Redirect /foo -> /bar - test_server.expect( - Expectation::matching(all_of![ - request::method_path("POST", "/foo"), - request::body("Hello, world!"), - ]) - .times(1) - .respond_with( - responders::status_code(307).append_header(http::header::LOCATION, "/bar"), - ), - ); - test_server.expect( - Expectation::matching(all_of![ - request::method_path("POST", "/bar"), - request::body("Hello, world!"), - request::headers(not(contains(key(hyper::header::ORIGIN.as_str())))), - request::headers(not(contains(key(hyper::header::AUTHORIZATION.as_str())))), - request::headers(not(contains(key(hyper::header::WWW_AUTHENTICATE.as_str())))), - request::headers(not(contains(key(hyper::header::COOKIE.as_str())))), - request::headers(not(contains(key( - hyper::header::PROXY_AUTHORIZATION.as_str() - )))), - ]) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()? - .with_max_redirects(10) - .build(); - let bytes = Bytes::from_static(b"Hello, world!"); - let resp = client - .post( - &test_server.url_str("/foo"), - bytes, - vec![("key".to_owned(), "value".to_owned())], - ) - .await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_too_many_redirects_fails() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - // Chain of three redirects /foo -> /bar -> /baz -> /boo. - test_server.expect( - Expectation::matching(request::method_path("GET", "/foo")) - .times(1) - .respond_with( - responders::status_code(302).append_header(http::header::LOCATION, "/bar"), - ), - ); - test_server.expect( - Expectation::matching(request::method_path("GET", "/bar")) - .times(1) - .respond_with( - responders::status_code(302).append_header(http::header::LOCATION, "/baz"), - ), - ); - test_server.expect( - Expectation::matching(request::method_path("GET", "/baz")) - .times(1) - .respond_with( - responders::status_code(302).append_header(http::header::LOCATION, "/boo"), - ), - ); - test_server.expect( - Expectation::matching(request::method_path("GET", "/boo")) - .times(0) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()? - .with_max_redirects(1) - .build(); - let url = test_server.url_str("/foo"); - let result = client.get(&url).await; - if let HttpError::TooManyRedirects { uri, max_redirects } = result.as_ref().err().unwrap() { - assert_eq!(url.to_owned(), *uri); - assert_eq!(1, *max_redirects); - } else { - unreachable!( - "Expected HttpError::TooManyRedirects, got {:?}", - result.err().unwrap() - ); - } - - Ok(()) - } - - #[cfg(unix)] - mod unix { - use std::convert::Infallible; - use std::path::PathBuf; - - use anyhow::Context; - use hyper::service::make_service_fn; - use hyper::service::service_fn; - use hyper::Server; - use hyper_unix_connector::UnixConnector; - - use super::*; - - /// Conceptually similar to crate::http::tests::ProxyServer, but sets up a - /// local unix domain socket instead. - pub struct UnixSocketProxyServer { - pub socket: PathBuf, - // Need to hold a ref so when Drop runs on Self we cancel the task. - #[allow(dead_code)] - handle: tokio::task::JoinHandle<()>, - // Need to hold ref so socket doesn't get removed. - #[allow(dead_code)] - tempdir: tempfile::TempDir, - } - - impl UnixSocketProxyServer { - pub async fn new() -> anyhow::Result { - let tempdir = tempfile::tempdir()?; - let socket = tempdir.path().join("test-uds.sock"); - - let listener: UnixConnector = tokio::net::UnixListener::bind(&socket) - .context("binding to unix socket")? - .into(); - let handler_func = make_service_fn(|_conn| async move { - Ok::<_, Infallible>(service_fn(|mut req: Request| async move { - let client = hyper::Client::new(); - req.headers_mut().insert( - http::header::VIA, - http::HeaderValue::from_static("testing-proxy-server"), - ); - println!("Proxying request: {:?}", req); - client - .request(req.map(Body::from)) - .await - .context("Failed sending requeest to destination") - })) - }); - - let handle = tokio::task::spawn(async move { - println!("started proxy server"); - Server::builder(listener) - .serve(handler_func) - .await - .expect("Proxy server exited unexpectedly"); - }); - - Ok(Self { - socket, - handle, - tempdir, - }) - } - } - } - - #[cfg(unix)] - #[tokio::test] - async fn test_proxies_through_unix_socket_when_set() -> anyhow::Result<()> { - let proxy_server = unix::UnixSocketProxyServer::new().await?; - - let test_server = httptest::Server::run(); - let url = test_server.url("/foo"); - let host = url.authority().unwrap().to_string(); - test_server.expect( - Expectation::matching(all_of![ - request::method_path("GET", "/foo"), - request::headers(contains(("via", "testing-proxy-server"))), - request::headers(contains(("host", host))), - ]) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let client = HttpClientBuilder::https_with_system_roots()? - .with_x2p_proxy(hyper_proxy::Proxy::new( - hyper_proxy::Intercept::Http, - hyper_unix_connector::Uri::new(proxy_server.socket, "/").into(), - )) - .build(); - let resp = client.get(&url.to_string()).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_x2p_error_response_is_forbidden_host() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - let url = test_server.url("/foo"); - test_server.expect( - Expectation::matching(all_of![request::method_path("GET", "/foo")]) - .times(1) - .respond_with( - responders::status_code(400) - .append_header("x-x2pagentd-error-type", "FORBIDDEN_HOST") - .append_header("x-x2pagentd-error-msg", "Nope"), - ), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let result = client.get(&url.to_string()).await; - assert!(result.is_err()); - assert!(matches!( - result, - Err(HttpError::X2P { - source: X2PAgentError::ForbiddenHost { .. }, - .. - }) - )); - - Ok(()) - } - - #[tokio::test] - async fn test_x2p_error_response_is_access_denied() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - let url = test_server.url("/foo"); - test_server.expect( - Expectation::matching(all_of![request::method_path("GET", "/foo")]) - .times(1) - .respond_with( - responders::status_code(400) - .append_header("x-fb-validated-x2pauth-decision", "deny") - .append_header("x-x2pagentd-error-msg", "Nope"), - ), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let result = client.get(&url.to_string()).await; - assert!(result.is_err()); - assert!(matches!( - result, - Err(HttpError::X2P { - source: X2PAgentError::AccessDenied { .. }, - .. - }), - )); - - Ok(()) - } - - #[tokio::test] - async fn test_x2p_error_response_is_generic_error() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - let url = test_server.url("/foo"); - test_server.expect( - Expectation::matching(all_of![request::method_path("GET", "/foo")]) - .times(1) - .respond_with( - responders::status_code(400) - .append_header("x-x2pagentd-error-msg", "Something else happened"), - ), - ); - - let client = HttpClientBuilder::https_with_system_roots()?.build(); - let result = client.get(&url.to_string()).await; - assert!(result.is_err()); - assert!(matches!( - result, - Err(HttpError::X2P { - source: X2PAgentError::Error(..), - .. - }), - )); - - Ok(()) - } -} - -// TODO(skarlage, T160529958): Debug why these tests fail on CircleCI -#[cfg(all(test, any(fbcode_build, cargo_internal_build)))] -mod proxy_tests { - use std::convert::Infallible; - use std::net::TcpListener; - use std::net::ToSocketAddrs; - use std::time::Duration; - - use anyhow::Context; - use httptest::matchers::*; - use httptest::responders; - use httptest::Expectation; - use hyper::service::make_service_fn; - use hyper::service::service_fn; - use hyper::Server; - use hyper_proxy::Intercept; - use hyper_proxy::Proxy; - - use super::*; - use crate::http::proxy::DefaultSchemeUri; - - const HEADER_SLEEP_DURATION_MS: &str = "x-buck2-test-proxy-sleep-duration-ms"; - - /// Barebones proxy server implementation that simply forwards requests onto - /// the destination server. - struct ProxyServer { - addr: std::net::SocketAddr, - // Need to hold a ref to the task so when Drop runs on Self we cancel - // the task. - #[allow(dead_code)] - handle: tokio::task::JoinHandle<()>, - } - - impl ProxyServer { - async fn new() -> anyhow::Result { - let proxy_server_addr = "[::1]:0".to_socket_addrs().unwrap().next().unwrap(); - let listener = - TcpListener::bind(proxy_server_addr).context("failed to bind to local address")?; - let proxy_server_addr = listener.local_addr()?; - - let make_proxy_service = make_service_fn(|_conn| async move { - Ok::<_, Infallible>(service_fn(|mut req: Request| async move { - // Sleep if requested to simulate slow reads. - if let Some(s) = req.headers().get(HEADER_SLEEP_DURATION_MS) { - let sleep_duration = - Duration::from_millis(s.to_str().unwrap().parse().unwrap()); - tokio::time::sleep(sleep_duration).await; - } - - let client = hyper::Client::new(); - req.headers_mut().insert( - http::header::VIA, - http::HeaderValue::from_static("testing-proxy-server"), - ); - println!("Proxying request: {:?}", req); - client - .request(req) - .await - .context("Failed sending requeest to destination") - })) - }); - - let handle = tokio::task::spawn(async move { - println!("started proxy server"); - Server::from_tcp(listener) - .unwrap() - .serve(make_proxy_service) - .await - .expect("Proxy server exited unexpectedly"); - }); - - Ok(Self { - addr: proxy_server_addr, - handle, - }) - } - - fn uri(&self) -> anyhow::Result { - http::Uri::builder() - .scheme("http") - .authority(self.addr.to_string().as_str()) - .path_and_query("/") - .build() - .context("failed to build proxy server URI") - } - } - - #[tokio::test] - async fn test_uses_http_proxy() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(all_of![ - request::method_path("GET", "/foo"), - request::headers(contains(("via", "testing-proxy-server"))) - ]) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let proxy_server = ProxyServer::new().await?; - println!("proxy_server uri: {}", proxy_server.uri()?); - - let client = HttpClientBuilder::https_with_system_roots()? - .with_proxy(Proxy::new(Intercept::Http, proxy_server.uri()?)) - .build(); - let resp = client.get(&test_server.url_str("/foo")).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_uses_http_proxy_with_no_scheme_in_proxy_uri() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(all_of![ - request::method_path("GET", "/foo"), - request::headers(contains(("via", "testing-proxy-server"))) - ]) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let proxy_server = ProxyServer::new().await?; - - let authority = proxy_server.uri()?.authority().unwrap().clone(); - let proxy_uri = format!("{}:{}", authority.host(), authority.port().unwrap()); - println!("proxy_uri: {}", proxy_uri); - let client = HttpClientBuilder::https_with_system_roots()? - .with_proxy(Proxy::new( - Intercept::Http, - DefaultSchemeUri(proxy_uri.try_into()?).into(), - )) - .build(); - let resp = client.get(&test_server.url_str("/foo")).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_does_not_proxy_when_no_proxy_matches() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(all_of![request::method_path("GET", "/foo")]) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let proxy_server = ProxyServer::new().await?; - println!("proxy_server uri: {}", proxy_server.uri()?); - - let test_server_host = test_server - .url("/") - .authority() - .unwrap() - .clone() - .host() - .to_owned(); - let no_proxy = crate::http::proxy::NoProxy::new(http::uri::Scheme::HTTP, test_server_host); - - // Don't proxy connections to test_server. - let client = HttpClientBuilder::https_with_system_roots()? - .with_proxy(Proxy::new( - no_proxy.into_proxy_intercept(), - proxy_server.uri()?, - )) - .build(); - let resp = client.get(&test_server.url_str("/foo")).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - #[tokio::test] - async fn test_proxies_when_no_proxy_does_not_match() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - test_server.expect( - Expectation::matching(all_of![ - request::method_path("GET", "/foo"), - request::headers(contains(("via", "testing-proxy-server"))) - ]) - .times(1) - .respond_with(responders::status_code(200)), - ); - - let proxy_server = ProxyServer::new().await?; - println!("proxy_server uri: {}", proxy_server.uri()?); - - // Don't proxy HTTPS connections to *.foobar.com - let no_proxy = crate::http::proxy::NoProxy::new(http::uri::Scheme::HTTP, ".foobar.com"); - - let client = HttpClientBuilder::https_with_system_roots()? - .with_proxy(Proxy::new( - no_proxy.into_proxy_intercept(), - proxy_server.uri()?, - )) - .build(); - let resp = client.get(&test_server.url_str("/foo")).await?; - assert_eq!(200, resp.status().as_u16()); - - Ok(()) - } - - // Use proxy server harness to test slow connections. - #[tokio::test] - async fn test_timeout() -> anyhow::Result<()> { - let test_server = httptest::Server::run(); - let proxy_server = ProxyServer::new().await?; - - let client = HttpClientBuilder::https_with_system_roots()? - .with_proxy(Proxy::new(Intercept::Http, proxy_server.uri()?)) - .with_read_timeout(Some(Duration::from_millis(10))) - .build(); - - let req = Request::builder() - .uri(test_server.url_str("/foo")) - .header(HEADER_SLEEP_DURATION_MS, "200") - .method(Method::GET) - .body(Bytes::new())?; - let res = client.request(req).await; - assert!(matches!(res, Err(HttpError::Timeout { .. }))); - Ok(()) - } -} diff --git a/app/buck2_common/src/http/mod.rs b/app/buck2_common/src/http/mod.rs deleted file mode 100644 index b7fc97be3a98f..0000000000000 --- a/app/buck2_common/src/http/mod.rs +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use dice::UserComputationData; -use dupe::Dupe; -use hyper::StatusCode; -use thiserror::Error; - -mod client; -mod proxy; -mod redirect; -pub mod retries; -mod stats; -pub mod tls; -mod x2p; - -pub use client::HttpClient; -pub use client::HttpClientBuilder; - -/// Dice implementations so we can pass along the HttpClient to various subsystems -/// that need to use it (Materializer, RunActions, etc). -pub trait HasHttpClient { - fn get_http_client(&self) -> HttpClient; -} - -pub trait SetHttpClient { - fn set_http_client(&mut self, client: HttpClient); -} - -impl HasHttpClient for UserComputationData { - fn get_http_client(&self) -> HttpClient { - self.data - .get::() - .expect("HttpClient should be set") - .dupe() - } -} - -impl SetHttpClient for UserComputationData { - fn set_http_client(&mut self, client: HttpClient) { - self.data.set(client); - } -} - -fn http_error_label(status: StatusCode) -> &'static str { - if status.is_server_error() { - "Server" - } else if status.is_client_error() { - "Client" - } else { - "Unknown" - } -} - -#[derive(Debug, Error)] -pub enum HttpError { - #[error("HTTP URI Error: URI {uri} is malformed: {source:?}")] - InvalidUri { - uri: String, - #[source] - source: http::uri::InvalidUri, - }, - #[error("HTTP: Error building request")] - BuildRequest(#[from] http::Error), - #[error("HTTP: Error sending request to {uri}")] - SendRequest { - uri: String, - #[source] - source: hyper::Error, - }, - #[error("HTTP {} Error ({status}) when querying URI: {uri}. Response text: {text}", http_error_label(*.status))] - Status { - status: StatusCode, - uri: String, - text: String, - }, - #[error("HTTP Error: Exceeded max redirects ({max_redirects}) while fetching URI: {uri}. ")] - TooManyRedirects { uri: String, max_redirects: usize }, - #[error("HTTP: Error mutating request")] - MutateRequest(#[from] anyhow::Error), - #[error("HTTP: Timed out while making request to URI: {uri} after {duration} seconds.")] - Timeout { uri: String, duration: u64 }, - #[error("While making request to {uri} via x2p")] - X2P { - uri: String, - #[source] - source: x2p::X2PAgentError, - }, -} diff --git a/app/buck2_common/src/http/tls.rs b/app/buck2_common/src/http/tls.rs deleted file mode 100644 index 4c0d047c63525..0000000000000 --- a/app/buck2_common/src/http/tls.rs +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::ffi::OsString; -use std::fs::File; -use std::io::BufReader; -use std::path::Path; - -use anyhow::Context; -use gazebo::prelude::VecExt; -use rustls::Certificate; -use rustls::ClientConfig; -use rustls::PrivateKey; -use rustls::RootCertStore; - -const MACOS_CORP_CERTS: &str = "/opt/facebook/certs/rc_digicert_ca.pem"; - -/// Load the system root certificates using native frameworks. -fn load_system_root_certs_native() -> anyhow::Result>> { - let native_certs: Vec<_> = rustls_native_certs::load_native_certs() - .context("Error loading system root certificates")? - .into_map(|cert| cert.0); - Ok(native_certs) -} - -/// Fallback path: load from disk (only implemented for specific platforms). -fn load_system_root_certs_disk(path: &str) -> anyhow::Result>> { - let file = File::open(path).with_context(|| format!("Opening root certs at: {}", path))?; - let mut reader = BufReader::new(file); - let certs = rustls_pemfile::certs(&mut reader) - .with_context(|| format!("Loading root certs at: {}", path))? - .into_iter() - .collect(); - - Ok(certs) -} - -fn load_system_root_certs() -> anyhow::Result { - let mut roots = RootCertStore::empty(); - let root_certs = match load_system_root_certs_native() { - Ok(certs) => certs, - Err(_) if cfg!(target_os = "macos") && Path::new(MACOS_CORP_CERTS).exists() => { - load_system_root_certs_disk(MACOS_CORP_CERTS) - .context("Loading corp system certs from disk")? - } - Err(e) => { - anyhow::bail!(e.context("Error loading system root certificates")); - } - }; - - // According to [`rustls` documentation](https://docs.rs/rustls/latest/rustls/struct.RootCertStore.html#method.add_parsable_certificates), - // it's better to only add parseable certs when loading system certs because - // there are typically many system certs and not all of them can be valid. This - // is pertinent for e.g. macOS which may have a lot of old certificates that may - // not parse correctly. - let (valid, invalid) = roots.add_parsable_certificates(root_certs.as_slice()); - - // But make sure we get at least _one_ valid cert, otherwise we legitimately won't be - // able to make any connections via https. - anyhow::ensure!( - valid > 0, - "Error loading system certs: unable to find any valid system certs" - ); - tracing::debug!("Loaded {} valid system root certs", valid); - tracing::debug!("Loaded {} invalid system root certs", invalid); - Ok(roots) -} - -/// Deserialize certificate pair at `cert` and `key` into structures that can -/// be inserted into rustls CertStore. -fn load_cert_pair>( - cert: P, - key: P, -) -> anyhow::Result<(Vec, PrivateKey)> { - let cert = cert.as_ref(); - let key = key.as_ref(); - let cert_file = File::open(cert) - .with_context(|| format!("Error opening certificate file `{}`", cert.display()))?; - let key_file = - File::open(key).with_context(|| format!("Error opening key file `{}`", key.display()))?; - let mut cert_reader = BufReader::new(&cert_file); - let mut key_reader = BufReader::new(&key_file); - - let certs = rustls_pemfile::certs(&mut cert_reader) - .with_context(|| format!("Error parsing certificate file `{}`", cert.display()))? - .into_map(Certificate); - - let private_key = rustls_pemfile::pkcs8_private_keys(&mut key_reader) - .with_context(|| format!("Error parsing key file `{}`", key.display()))? - .pop() - .with_context(|| format!("Found no private key in key file `{}`", key.display()))?; - let key = PrivateKey(private_key); - - Ok((certs, key)) -} - -pub fn tls_config_with_system_roots() -> anyhow::Result { - let system_roots = load_system_root_certs()?; - Ok(ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(system_roots) - .with_no_client_auth()) -} - -pub fn tls_config_with_single_cert>( - cert_path: P, - key_path: P, -) -> anyhow::Result { - let system_roots = load_system_root_certs()?; - let (cert, key) = - load_cert_pair(cert_path, key_path).context("Error loading certificate pair")?; - // TODO: replace with_single_cert with with_client_auth_cert - // once rustls get upgraded to >0.21.4 - #[allow(deprecated)] - ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(system_roots) - .with_single_cert(cert, key) - .context("Error creating TLS config with cert and key path") -} - -/// Find TLS certs. -/// -/// Return `None` in Cargo or open source builds. -/// Return `Err` if certificates cannot be found in internal buck2 builds. -pub fn find_internal_cert() -> anyhow::Result> { - #[cfg(fbcode_build)] - return find_certs::find_tls_cert().map(Some); - - #[cfg(not(fbcode_build))] - return Ok(None); -} diff --git a/app/buck2_common/src/ignores/mod.rs b/app/buck2_common/src/ignores.rs similarity index 100% rename from app/buck2_common/src/ignores/mod.rs rename to app/buck2_common/src/ignores.rs diff --git a/app/buck2_common/src/ignores/all_cells.rs b/app/buck2_common/src/ignores/all_cells.rs index 1690de321b8f1..96bb85bd18b03 100644 --- a/app/buck2_common/src/ignores/all_cells.rs +++ b/app/buck2_common/src/ignores/all_cells.rs @@ -7,73 +7,50 @@ * of this source tree. */ -use std::collections::HashMap; use std::sync::Arc; -use allocative::Allocative; use async_trait::async_trait; use buck2_core::cells::name::CellName; -use buck2_core::cells::unchecked_cell_rel_path::UncheckedCellRelativePath; use dice::DiceComputations; -use itertools::Itertools; use crate::dice::cells::HasCellResolver; -use crate::ignores::file_ignores::FileIgnoreResult; -use crate::ignores::file_ignores::FileIgnores; +use crate::ignores::file_ignores::CellFileIgnores; use crate::legacy_configs::dice::HasLegacyConfigs; - -/// Ignored path configurations for all cells. -#[derive(Allocative, Debug, Eq, PartialEq)] -pub(crate) struct AllCellIgnores { - ignores: HashMap, -} - -impl AllCellIgnores { - pub(crate) fn check_ignored( - &self, - cell: CellName, - path: &UncheckedCellRelativePath, - ) -> anyhow::Result { - Ok(self - .ignores - .get(&cell) - .ok_or_else(|| { - anyhow::anyhow!( - "Internal error: Should've had an ignore spec for `{}`. Had `{}`", - cell, - self.ignores.keys().join(", ") - ) - })? - .check(path)) - } -} +use crate::legacy_configs::key::BuckconfigKeyRef; #[async_trait] -pub(crate) trait HasAllCellIgnores { - async fn new_all_cell_ignores(&self) -> anyhow::Result>; +pub(crate) trait HasCellFileIgnores { + async fn new_cell_ignores( + &mut self, + cell_name: CellName, + ) -> anyhow::Result>; } #[async_trait] -impl HasAllCellIgnores for DiceComputations { - async fn new_all_cell_ignores(&self) -> anyhow::Result> { +impl HasCellFileIgnores for DiceComputations<'_> { + async fn new_cell_ignores( + &mut self, + cell_name: CellName, + ) -> anyhow::Result> { let cells = self.get_cell_resolver().await?; - let configs = self.get_legacy_configs_on_dice().await?; - - let mut ignores = HashMap::new(); - - for (cell_name, instance) in cells.cells() { - let config = configs.get(cell_name).unwrap(); - let ignore_spec = config.get("project", "ignore")?; - let ignore_spec = ignore_spec.as_ref().map_or("", |s| &**s); - - let cell_ignores = FileIgnores::new_for_interpreter( - ignore_spec, - instance.nested_cells().clone(), - cells.is_root_cell(cell_name), - )?; - ignores.insert(cell_name, cell_ignores); - } - - Ok(Arc::new(AllCellIgnores { ignores })) + let instance = cells.get(cell_name)?; + let config = self.get_legacy_config_on_dice(cell_name).await?; + + let ignore_spec = config.lookup( + self, + BuckconfigKeyRef { + section: "project", + property: "ignore", + }, + )?; + let ignore_spec = ignore_spec.as_ref().map_or("", |s| &**s); + + let cell_ignores = CellFileIgnores::new_for_interpreter( + ignore_spec, + instance.nested_cells().clone(), + cells.is_root_cell(cell_name), + )?; + + Ok(Arc::new(cell_ignores)) } } diff --git a/app/buck2_common/src/ignores/file_ignores.rs b/app/buck2_common/src/ignores/file_ignores.rs index 3192ffcb46966..eee5aab18beb1 100644 --- a/app/buck2_common/src/ignores/file_ignores.rs +++ b/app/buck2_common/src/ignores/file_ignores.rs @@ -14,39 +14,61 @@ use buck2_core::cells::unchecked_cell_rel_path::UncheckedCellRelativePath; use crate::ignores::ignore_set::IgnoreSet; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum FileOpsError { #[error("Tried to read ignored dir `{0}` (reason: {1}).")] ReadIgnoredDir(String, String), } -pub(crate) enum FileIgnoreResult { +#[derive(Debug, Allocative)] +pub enum FileIgnoreReason { + IgnoredByPattern { path: String, pattern: String }, + IgnoredByCell { path: String, cell_name: CellName }, +} + +impl FileIgnoreReason { + pub fn describe(&self) -> String { + match self { + FileIgnoreReason::IgnoredByPattern { pattern, .. } => { + format!("config project.ignore contains `{}`", pattern) + } + FileIgnoreReason::IgnoredByCell { cell_name, .. } => { + format!("path is contained in cell `{}`", cell_name) + } + } + } +} + +#[derive(Debug, Allocative)] +pub enum FileIgnoreResult { Ok, - IgnoredByPattern(String, String), - IgnoredByCell(String, CellName), + Ignored(FileIgnoreReason), } impl FileIgnoreResult { /// Converts the FileIgnoreResult to a Result<()> where any ignored case is converted to an Err /// with appropriate message. This should be used when it would be an error to interact with an /// ignored file. - pub(crate) fn into_result(self) -> anyhow::Result<()> { + pub fn into_result(self) -> anyhow::Result<()> { match self { FileIgnoreResult::Ok => Ok(()), - FileIgnoreResult::IgnoredByPattern(path, pattern) => { + FileIgnoreResult::Ignored(FileIgnoreReason::IgnoredByPattern { path, pattern }) => { Err(anyhow::anyhow!(FileOpsError::ReadIgnoredDir( path, format!("file is matched by pattern `{}`", pattern) ))) } - FileIgnoreResult::IgnoredByCell(path, cell_name) => Err(anyhow::anyhow!( - FileOpsError::ReadIgnoredDir(path, format!("file is part of cell `{}`", cell_name)) - )), + FileIgnoreResult::Ignored(FileIgnoreReason::IgnoredByCell { path, cell_name }) => { + Err(anyhow::anyhow!(FileOpsError::ReadIgnoredDir( + path, + format!("file is part of cell `{}`", cell_name) + ))) + } } } /// Returns true if the file is ignored, false otherwise. - pub(crate) fn is_ignored(&self) -> bool { + pub fn is_ignored(&self) -> bool { match self { FileIgnoreResult::Ok => false, _ => true, @@ -56,12 +78,12 @@ impl FileIgnoreResult { /// Ignores files based on configured ignore patterns and cell paths. #[derive(PartialEq, Eq, Allocative, Debug)] -pub struct FileIgnores { +pub struct CellFileIgnores { ignores: IgnoreSet, cell_ignores: NestedCells, } -impl FileIgnores { +impl CellFileIgnores { /// Creates a new FileIgnores intended for use by the interpreter. /// /// This will ignore files/dirs in the ignore spec and those in other cells. @@ -69,8 +91,8 @@ impl FileIgnores { ignore_spec: &str, nested_cells: NestedCells, root_cell: bool, - ) -> anyhow::Result { - Ok(FileIgnores { + ) -> anyhow::Result { + Ok(CellFileIgnores { ignores: IgnoreSet::from_ignore_spec(ignore_spec, root_cell)?, cell_ignores: nested_cells, }) @@ -80,14 +102,17 @@ impl FileIgnores { let candidate = globset::Candidate::new(path.as_str()); if let Some(pattern) = self.ignores.matches_candidate(&candidate) { - return FileIgnoreResult::IgnoredByPattern( - path.as_str().to_owned(), - pattern.to_owned(), - ); + return FileIgnoreResult::Ignored(FileIgnoreReason::IgnoredByPattern { + path: path.as_str().to_owned(), + pattern: pattern.to_owned(), + }); } if let Some((_, cell_name, _)) = self.cell_ignores.matches(path) { - return FileIgnoreResult::IgnoredByCell(path.as_str().to_owned(), cell_name); + return FileIgnoreResult::Ignored(FileIgnoreReason::IgnoredByCell { + path: path.as_str().to_owned(), + cell_name, + }); } FileIgnoreResult::Ok @@ -102,7 +127,7 @@ mod tests { use buck2_core::cells::unchecked_cell_rel_path::UncheckedCellRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePath; - use crate::ignores::file_ignores::FileIgnores; + use crate::ignores::file_ignores::CellFileIgnores; #[test] fn file_ignores() -> anyhow::Result<()> { @@ -121,7 +146,7 @@ mod tests { ), ]; let nested_cells = NestedCells::from_cell_roots(cells, CellRootPath::testing_new("root")); - let ignores = FileIgnores::new_for_interpreter( + let ignores = CellFileIgnores::new_for_interpreter( "**/*.java , some/dir/**, one/*, \n recursive, trailing_slash/", nested_cells, true, diff --git a/app/buck2_common/src/init.rs b/app/buck2_common/src/init.rs new file mode 100644 index 0000000000000..239afa59e6ca9 --- /dev/null +++ b/app/buck2_common/src/init.rs @@ -0,0 +1,373 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::str::FromStr; +use std::time::Duration; + +use allocative::Allocative; +use anyhow::Context; +use buck2_core::buck2_env_anyhow; +use serde::Deserialize; +use serde::Serialize; + +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::key::BuckconfigKeyRef; + +/// Helper enum to categorize the kind of timeout we get from the startup config. +#[derive(Clone, Debug)] +pub enum Timeout { + /// Timeout value is set in the config, use that. + Value(Duration), + /// Timeout value was not set in config, apply the default. + Default, + /// Timeout value was explicitly set to 0, meaning we shouldn't use a timeout. + NoTimeout, +} + +impl Timeout { + pub fn new(value: Option) -> Self { + match value { + Some(Duration::ZERO) => Self::NoTimeout, + Some(value) => Self::Value(value), + None => Self::Default, + } + } +} + +#[derive( + Allocative, + Clone, + Debug, + Default, + Serialize, + Deserialize, + PartialEq, + Eq +)] +pub struct HttpConfig { + connect_timeout_ms: Option, + read_timeout_ms: Option, + write_timeout_ms: Option, + pub http2: bool, + pub max_redirects: Option, +} + +impl HttpConfig { + pub fn from_config(config: &LegacyBuckConfig) -> anyhow::Result { + let connect_timeout_ms = config.parse(BuckconfigKeyRef { + section: "http", + property: "connect_timeout_ms", + })?; + let read_timeout_ms = config.parse(BuckconfigKeyRef { + section: "http", + property: "read_timeout_ms", + })?; + let write_timeout_ms = config.parse(BuckconfigKeyRef { + section: "http", + property: "write_timeout_ms", + })?; + let max_redirects = config.parse(BuckconfigKeyRef { + section: "http", + property: "max_redirects", + })?; + let http2 = config + .parse(BuckconfigKeyRef { + section: "http", + property: "http2", + })? + .unwrap_or(true); + + Ok(Self { + connect_timeout_ms, + read_timeout_ms, + write_timeout_ms, + max_redirects, + http2, + }) + } + + pub fn connect_timeout(&self) -> Timeout { + match self.connect_timeout_ms.map(Duration::from_millis) { + Some(Duration::ZERO) => Timeout::NoTimeout, + Some(value) => Timeout::Value(value), + None => Timeout::Default, + } + } + + pub fn read_timeout(&self) -> Timeout { + match self.read_timeout_ms.map(Duration::from_millis) { + Some(Duration::ZERO) => Timeout::NoTimeout, + Some(value) => Timeout::Value(value), + None => Timeout::Default, + } + } + + pub fn write_timeout(&self) -> Timeout { + match self.write_timeout_ms.map(Duration::from_millis) { + Some(Duration::ZERO) => Timeout::NoTimeout, + Some(value) => Timeout::Value(value), + None => Timeout::Default, + } + } +} + +#[derive( + Allocative, + Clone, + Debug, + Default, + Serialize, + Deserialize, + PartialEq, + Eq +)] +pub struct SystemWarningConfig { + /// A threshold that is used to determine the percent of memory buck2 uses to display memory pressure warnings. + /// If None, we don't warn the user. + /// The corresponding buckconfig is `buck2_system_warning.memory_pressure_threshold_percent`. + pub memory_pressure_threshold_percent: Option, + /// A threshold that is used to determine remaining disk space buck2 uses to display disk space warnings. + /// If None, we don't warn the user. + /// The corresponding buckconfig is `buck2_system_warning.remaining_disk_space_threshold`. + pub remaining_disk_space_threshold_gb: Option, + /// Minimum number of bytes downloaded to measure average download speed. + /// If None, we don't warn the user. + /// The corresponding buckconfig is `buck2_system_warning.min_re_download_bytes_threshold`. + pub min_re_download_bytes_threshold: Option, + /// A threshold that is used to determine if download speed is too low and display a warning. + /// If None, we don't warn the user. + /// The corresponding buckconfig is `buck2_system_warning.avg_re_download_bytes_per_sec_threshold`. + pub avg_re_download_bytes_per_sec_threshold: Option, + /// A threshold that is used to determine if cache hit rate is too low and display a warning. + /// If None, we don't warn the user. + /// The corresponding buckconfig is `buck2_system_warning.min_cache_hit_threshold_percent`. + /// The value is in the range of [0, 100]. + pub min_cache_hit_threshold_percent: Option, + /// Minimum % completion of the command before we start to warn the user about low cache hit rate. + /// If None, we warn the user immediately after the command starts based on cache misses. + /// The corresponding buckconfig is `buck2_system_warning.cache_warning_min_completion_threshold_percent`. + pub cache_warning_min_completion_threshold_percent: Option, + /// Minimum number of actions to run before we start to warn the user about low cache hit rate. + /// If None, we warn the user immediately after the command stats based on cache misses. + /// The corresponding buckconfig is `buck2_system_warning.cache_warning_min_actions_count`. + pub cache_warning_min_actions_count: Option, +} + +impl SystemWarningConfig { + pub fn from_config(config: &LegacyBuckConfig) -> anyhow::Result { + let memory_pressure_threshold_percent = config.parse(BuckconfigKeyRef { + section: "buck2_system_warning", + property: "memory_pressure_threshold_percent", + })?; + let remaining_disk_space_threshold_gb = config.parse(BuckconfigKeyRef { + section: "buck2_system_warning", + property: "remaining_disk_space_threshold_gb", + })?; + let min_re_download_bytes_threshold = config.parse(BuckconfigKeyRef { + section: "buck2_system_warning", + property: "min_re_download_bytes_threshold", + })?; + let avg_re_download_bytes_per_sec_threshold = config.parse(BuckconfigKeyRef { + section: "buck2_system_warning", + property: "avg_re_download_bytes_per_sec_threshold", + })?; + let min_cache_hit_threshold_percent = config.parse(BuckconfigKeyRef { + section: "buck2_system_warning", + property: "min_cache_hit_threshold_percent", + })?; + let cache_warning_min_completion_threshold_percent = config.parse(BuckconfigKeyRef { + section: "buck2_system_warning", + property: "cache_warning_min_completion_threshold_percent", + })?; + let cache_warning_min_actions_count = config.parse(BuckconfigKeyRef { + section: "buck2_system_warning", + property: "cache_warning_min_actions_count", + })?; + Ok(Self { + memory_pressure_threshold_percent, + remaining_disk_space_threshold_gb, + min_re_download_bytes_threshold, + avg_re_download_bytes_per_sec_threshold, + min_cache_hit_threshold_percent, + cache_warning_min_completion_threshold_percent, + cache_warning_min_actions_count, + }) + } + + pub fn serialize(&self) -> anyhow::Result { + serde_json::to_string(&self).context("Error serializing SystemWarningConfig") + } + + pub fn deserialize(s: &str) -> anyhow::Result { + serde_json::from_str::(s).context("Error deserializing SystemWarningConfig") + } +} + +#[derive( + Allocative, + Clone, + Debug, + Default, + Serialize, + Deserialize, + PartialEq, + Eq +)] +pub struct ResourceControlConfig { + /// A config to determine if the resource control should be activated or not. + /// The corresponding buckconfig is `buck2_resource_control.status` that can take + /// one of `{off | if_available | required}`. + pub status: ResourceControlStatus, + /// A memory threshold that buck2 daemon and workers are allowed to allocate. The units + /// like `M` and `G` may be used (e.g. 64G,) or also `%` is accepted in this field (e.g. 90%.) + /// The behavior when the combined amount of memory usage of the daemon and workers exceeds this + /// is that all the processes are killed by OOMKiller. + /// The corresponding buckconfig is `buck2_resource_control.memory_max`. + pub memory_max: Option, +} + +#[derive( + Allocative, + Clone, + Debug, + Default, + Serialize, + Deserialize, + PartialEq, + Eq +)] +pub enum ResourceControlStatus { + #[default] + /// The resource is not controlled or limited. + Off, + /// The resource is controlled by `systemd` if it's available on the system, otherwise off. + IfAvailable, + /// The resource is controlled by `systemd`. If it is not available on the system, + /// buck2 errors it out and the command returns with an error exit code. + Required, +} + +impl FromStr for ResourceControlStatus { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + match s { + "off" => Ok(Self::Off), + "if_available" => Ok(Self::IfAvailable), + "required" => Ok(Self::Required), + _ => Err(anyhow::anyhow!("Invalid resource control status: `{}`", s)), + } + } +} + +impl ResourceControlConfig { + pub fn from_config(config: &LegacyBuckConfig) -> anyhow::Result { + if let Some(env_conf) = buck2_env_anyhow!( + "BUCK2_TEST_RESOURCE_CONTROL_CONFIG", + applicability = testing, + )? { + Ok(Self::deserialize(env_conf)?) + } else { + let status = config + .parse(BuckconfigKeyRef { + section: "buck2_resource_control", + property: "status", + })? + .unwrap_or(ResourceControlStatus::Off); + let memory_max = config.parse(BuckconfigKeyRef { + section: "buck2_resource_control", + property: "memory_max", + })?; + Ok(Self { status, memory_max }) + } + } + + pub fn serialize(&self) -> anyhow::Result { + serde_json::to_string(&self).context("Error serializing ResourceControlConfig") + } + + pub fn deserialize(s: &str) -> anyhow::Result { + serde_json::from_str::(s).context("Error deserializing ResourceControlConfig") + } +} + +/// Configurations that are used at startup by the daemon. Those are actually read by the client, +/// and passed on to the daemon. +/// +/// The fields here are often raw String we get from the buckconfig, the daemon will do +/// deserialization once it receives them. That said, this is not a requirement. +/// +/// Backwards compatibility on Serialize / Deserialize is not required: if the client cannot read +/// the DaemonStartupConfig provided by the daemon when it tries to connect, it will reject that +/// daemon and restart (and in fact it will probably not get that far since a version check is done +/// before parsing DaemonStartupConfig). +#[derive(Allocative, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct DaemonStartupConfig { + pub daemon_buster: Option, + pub digest_algorithms: Option, + pub source_digest_algorithm: Option, + pub paranoid: bool, + pub materializations: Option, + pub http: HttpConfig, + pub resource_control: ResourceControlConfig, +} + +impl DaemonStartupConfig { + pub fn new(config: &LegacyBuckConfig) -> anyhow::Result { + // Intepreted client side because we need the value here. + Ok(Self { + daemon_buster: config + .get(BuckconfigKeyRef { + section: "buck2", + property: "daemon_buster", + }) + .map(ToOwned::to_owned), + digest_algorithms: config + .get(BuckconfigKeyRef { + section: "buck2", + property: "digest_algorithms", + }) + .map(ToOwned::to_owned), + source_digest_algorithm: config + .get(BuckconfigKeyRef { + section: "buck2", + property: "source_digest_algorithm", + }) + .map(ToOwned::to_owned), + paranoid: false, // Setup later in ImmediateConfig + materializations: config + .get(BuckconfigKeyRef { + section: "buck2", + property: "materializations", + }) + .map(ToOwned::to_owned), + http: HttpConfig::from_config(config)?, + resource_control: ResourceControlConfig::from_config(config)?, + }) + } + + pub fn serialize(&self) -> anyhow::Result { + serde_json::to_string(&self).context("Error serializing DaemonStartupConfig") + } + + pub fn deserialize(s: &str) -> anyhow::Result { + serde_json::from_str::(s).context("Error deserializing DaemonStartupConfig") + } + + pub fn testing_empty() -> Self { + Self { + daemon_buster: None, + digest_algorithms: None, + source_digest_algorithm: None, + paranoid: false, + materializations: None, + http: HttpConfig::default(), + resource_control: ResourceControlConfig::default(), + } + } +} diff --git a/app/buck2_common/src/invocation_paths.rs b/app/buck2_common/src/invocation_paths.rs index 5f743ac8c8b70..054b54508e778 100644 --- a/app/buck2_common/src/invocation_paths.rs +++ b/app/buck2_common/src/invocation_paths.rs @@ -9,7 +9,6 @@ //! //! Defines utilities to obtain the basic paths for buck2 client and the daemon. -//! use std::borrow::Cow; @@ -42,13 +41,15 @@ pub struct InvocationPaths { /// be written or read from directories that include this component. /// /// This form of isolation is currently supported primarily for two uses: + /// /// 1. testing - it allows us to run isolated daemons on a project for tests. This is - /// particularly useful to allow a test in a project to recursively invoke buck, but also - /// useful to write tests against a project's macros and rules and using a project's real - /// configuration. + /// particularly useful to allow a test in a project to recursively invoke buck, but also + /// useful to write tests against a project's macros and rules and using a project's real + /// configuration. + /// /// 2. generally to support recursive buck invocations. while our ideal may be that these - /// eventually are not allowed, the most pragmatic approach currently is to support them - /// but push them into isolated, temporary daemons. + /// eventually are not allowed, the most pragmatic approach currently is to support them + /// but push them into isolated, temporary daemons. pub isolation: FileNameBuf, } diff --git a/app/buck2_common/src/invocation_paths_result.rs b/app/buck2_common/src/invocation_paths_result.rs new file mode 100644 index 0000000000000..63ebe8a123970 --- /dev/null +++ b/app/buck2_common/src/invocation_paths_result.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use crate::invocation_paths::InvocationPaths; + +#[derive(Clone)] +pub enum InvocationPathsResult { + OtherError(buck2_error::Error), + Paths(InvocationPaths), + OutsideOfRepo(buck2_error::Error), // this error ignored for creating invocation record for log commands +} + +impl InvocationPathsResult { + pub fn get_result(self) -> anyhow::Result { + match self { + InvocationPathsResult::OtherError(e) => Err(e.into()), + InvocationPathsResult::Paths(paths) => Ok(paths), + InvocationPathsResult::OutsideOfRepo(e) => Err(e.into()), + } + } +} diff --git a/app/buck2_common/src/invocation_roots.rs b/app/buck2_common/src/invocation_roots.rs index a97852975b519..5aa76a88a748f 100644 --- a/app/buck2_common/src/invocation_roots.rs +++ b/app/buck2_common/src/invocation_roots.rs @@ -12,22 +12,18 @@ use std::path::PathBuf; use allocative::Allocative; use anyhow::Context as _; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::abs_path::AbsPathBuf; use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::project::ProjectRoot; use once_cell::sync::Lazy; -use thiserror::Error; -use crate::result::SharedResult; -use crate::result::ToSharedResultExt; - -#[derive(Debug, Error)] -enum BuckCliError { +#[derive(Debug, buck2_error::Error)] +pub enum BuckCliError { #[error( - "Couldn't find a buck project root for directory `{0}`. Expected to find a .buckconfig file." + "Couldn't find a buck project root for directory `{}`. Expected to find a .buckconfig file.", _0.display() )] NoBuckRoot(PathBuf), } @@ -45,10 +41,8 @@ impl InvocationRoots { pub fn paranoid_info_path(&self) -> anyhow::Result { // Used in tests - static PARANOID_PATH: EnvHelper = EnvHelper::new("BUCK2_PARANOID_PATH"); - - if let Some(p) = PARANOID_PATH.get()? { - return Ok(p.clone()); + if let Some(p) = buck2_env_anyhow!("BUCK2_PARANOID_PATH")? { + return AbsPathBuf::try_from(p.to_owned()); } Ok(self @@ -121,12 +115,13 @@ pub fn find_invocation_roots(from: &Path) -> anyhow::Result { /// vulnerability. /// /// There's a couple ways we could resolve this: +/// /// 1. Use a shared .buckd information directory and have the client verify the identity of -/// the server before doing anything with it. If the identity is different, kill it and -/// start a new one. +/// the server before doing anything with it. If the identity is different, kill it and +/// start a new one. +/// /// 2. Keep user-owned .buckd directory, use some other mechanism to move ownership of -/// output directories between different buckd instances. -#[allow(clippy::needless_borrow)] // False positive. +/// output directories between different buckd instances. pub(crate) fn home_buck_dir() -> anyhow::Result<&'static AbsNormPath> { fn find_dir() -> anyhow::Result { let home = dirs::home_dir().context("Expected a HOME directory to be available")?; @@ -134,7 +129,8 @@ pub(crate) fn home_buck_dir() -> anyhow::Result<&'static AbsNormPath> { Ok(home.join(FileName::new(".buck")?)) } - static DIR: Lazy> = Lazy::new(|| find_dir().shared_error()); + static DIR: Lazy> = + Lazy::new(|| find_dir().map_err(buck2_error::Error::from)); - Ok(&Lazy::force(&DIR).as_ref()?) + Ok(&Lazy::force(&DIR).as_ref().map_err(dupe::Dupe::dupe)?) } diff --git a/app/buck2_common/src/io.rs b/app/buck2_common/src/io.rs new file mode 100644 index 0000000000000..be79432fcd6c5 --- /dev/null +++ b/app/buck2_common/src/io.rs @@ -0,0 +1,102 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod fs; +pub mod trace; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_core::cells::cell_path::CellPath; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_error::BuckErrorContext; +use buck2_error::ErrorTag; + +use crate::file_ops::RawDirEntry; +use crate::file_ops::RawPathMetadata; +use crate::ignores::file_ignores::FileIgnoreReason; + +#[derive(Debug, Allocative, buck2_error::Error)] +pub enum ReadDirError { + #[error("Directory `{0}` does not exist")] + DirectoryDoesNotExist(CellPath), + #[error("Directory `{0}` is ignored ({})", .1.describe())] + DirectoryIsIgnored(CellPath, FileIgnoreReason), + #[error("Path `{0}` is `{1}`, not a directory")] + NotADirectory(CellPath, String), + #[error(transparent)] + Anyhow(anyhow::Error), +} + +impl From for ReadDirError { + fn from(value: anyhow::Error) -> Self { + Self::Anyhow(value) + } +} + +impl From for ReadDirError { + fn from(value: buck2_error::Error) -> Self { + Self::Anyhow(value.into()) + } +} + +#[async_trait] +pub trait IoProvider: Allocative + Send + Sync { + async fn read_file_if_exists_impl( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result>; + + async fn read_dir_impl(&self, path: ProjectRelativePathBuf) + -> anyhow::Result>; + + async fn read_path_metadata_if_exists_impl( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result>>; + + /// Request that this I/O provider be up to date with whatever I/O operations the user might + /// have done until this point. + async fn settle(&self) -> anyhow::Result<()>; + + fn name(&self) -> &'static str; + + /// Returns the Eden version of the underlying system of the IoProvider, if available. + async fn eden_version(&self) -> anyhow::Result>; + + fn project_root(&self) -> &ProjectRoot; + + fn as_any(&self) -> &dyn std::any::Any; +} + +impl<'a> dyn IoProvider + 'a { + pub async fn read_file_if_exists( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result> { + self.read_file_if_exists_impl(path) + .await + .tag_anyhow(ErrorTag::IoSource) + } + + pub async fn read_dir(&self, path: ProjectRelativePathBuf) -> anyhow::Result> { + self.read_dir_impl(path) + .await + .tag_anyhow(ErrorTag::IoSource) + } + + pub async fn read_path_metadata_if_exists( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result>> { + self.read_path_metadata_if_exists_impl(path) + .await + .tag_anyhow(ErrorTag::IoSource) + } +} diff --git a/app/buck2_common/src/io/fs.rs b/app/buck2_common/src/io/fs.rs index 362383727f006..58aa595155936 100644 --- a/app/buck2_common/src/io/fs.rs +++ b/app/buck2_common/src/io/fs.rs @@ -14,8 +14,8 @@ use std::sync::Arc; use allocative::Allocative; use anyhow::Context as _; use async_trait::async_trait; -use buck2_core; use buck2_core::fs::fs_util; +use buck2_core::fs::fs_util::IoError; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::paths::abs_path::AbsPathBuf; use buck2_core::fs::paths::file_name::FileName; @@ -26,7 +26,6 @@ use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use compact_str::CompactString; use dupe::Dupe; use once_cell::sync::Lazy; -use thiserror::Error; use tokio::sync::Semaphore; use crate::cas_digest::CasDigestConfig; @@ -59,7 +58,7 @@ impl FsIoProvider { } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ReadSymlinkAtExactPathError { #[error("The path does not exist")] DoesNotExist, @@ -89,8 +88,8 @@ impl FsIoProvider { } } -#[derive(Debug, Error)] -enum ReadDirError { +#[derive(Debug, buck2_error::Error)] +enum FsIoError { #[error("File name `{0:?}` is not UTF-8")] NotUtf8(OsString), } @@ -100,7 +99,7 @@ enum ReadDirError { /// edenfs, for example). #[async_trait] impl IoProvider for FsIoProvider { - async fn read_file_if_exists( + async fn read_file_if_exists_impl( &self, path: ProjectRelativePathBuf, ) -> anyhow::Result> { @@ -112,10 +111,15 @@ impl IoProvider for FsIoProvider { static SEMAPHORE: Lazy = Lazy::new(|| Semaphore::new(100)); let _permit = SEMAPHORE.acquire().await.unwrap(); - tokio::task::spawn_blocking(move || fs_util::read_to_string_if_exists(path)).await? + tokio::task::spawn_blocking(move || fs_util::read_to_string_if_exists(path)) + .await? + .map_err(IoError::categorize_for_source_file) } - async fn read_dir(&self, path: ProjectRelativePathBuf) -> anyhow::Result> { + async fn read_dir_impl( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result> { // Don't want to totally saturate the executor with these so that some other work can progress. // For normal fs (or warm eden), something smaller would probably be fine, for eden couple hundred is probably // good (current plan in that impl is to allow multiple batches of 128 dirs at a time). @@ -125,7 +129,8 @@ impl IoProvider for FsIoProvider { let path = self.fs.resolve(&path); tokio::task::spawn_blocking(move || { - let dir_entries = fs_util::read_dir(path)?; + let dir_entries = + fs_util::read_dir(path).map_err(IoError::categorize_for_source_file)?; let mut entries = Vec::new(); @@ -134,7 +139,7 @@ impl IoProvider for FsIoProvider { let file_name = e.file_name(); let file_name = file_name .to_str() - .ok_or_else(|| ReadDirError::NotUtf8(file_name.clone()))?; + .ok_or_else(|| FsIoError::NotUtf8(file_name.clone()))?; entries.push(RawDirEntry { file_type: e.file_type()?.into(), file_name: CompactString::from(file_name), @@ -147,7 +152,7 @@ impl IoProvider for FsIoProvider { .context("Error listing directory") } - async fn read_path_metadata_if_exists( + async fn read_path_metadata_if_exists_impl( &self, path: ProjectRelativePathBuf, ) -> anyhow::Result>> { @@ -231,9 +236,8 @@ fn read_path_metadata>( match ExactPathMetadata::from_exact_path(&curr)? { ExactPathMetadata::DoesNotExist => return Ok(None), ExactPathMetadata::Symlink(symlink) => { - return Ok(Some( - symlink.to_raw_path_metadata(curr, relpath_components.collect())?, - )); + let rest: ForwardRelativePathBuf = relpath_components.collect(); + return Ok(Some(symlink.to_raw_path_metadata(curr, rest)?)); } ExactPathMetadata::FileOrDirectory(path_meta) => { meta = Some(path_meta); @@ -281,31 +285,36 @@ enum ExactPathMetadata { impl ExactPathMetadata { fn from_exact_path(curr: &PathAndAbsPath) -> anyhow::Result { - Ok(match fs_util::symlink_metadata_if_exists(&curr.abspath)? { - Some(meta) if meta.file_type().is_symlink() => { - let dest = fs_util::read_link(&curr.abspath)?; - - let out = if dest.is_absolute() { - ExactPathSymlinkMetadata::ExternalSymlink(dest) - } else { - // Remove the symlink name. - let link_path = curr - .path - .parent() - .expect("We pushed a component to this so it cannot be empty") - .join_system(&dest) - .with_context(|| { - format!("Invalid symlink at `{}`: `{}`", curr.path, dest.display()) - })?; - - ExactPathSymlinkMetadata::InternalSymlink(link_path) - }; - - ExactPathMetadata::Symlink(out) - } - Some(meta) => ExactPathMetadata::FileOrDirectory(meta), - None => ExactPathMetadata::DoesNotExist, - }) + Ok( + match fs_util::symlink_metadata_if_exists(&curr.abspath) + .map_err(IoError::categorize_for_source_file)? + { + Some(meta) if meta.file_type().is_symlink() => { + let dest = fs_util::read_link(&curr.abspath) + .map_err(IoError::categorize_for_source_file)?; + + let out = if dest.is_absolute() { + ExactPathSymlinkMetadata::ExternalSymlink(dest) + } else { + // Remove the symlink name. + let link_path = curr + .path + .parent() + .expect("We pushed a component to this so it cannot be empty") + .join_system(&dest) + .with_context(|| { + format!("Invalid symlink at `{}`: `{}`", curr.path, dest.display()) + })?; + + ExactPathSymlinkMetadata::InternalSymlink(link_path) + }; + + ExactPathMetadata::Symlink(out) + } + Some(meta) => ExactPathMetadata::FileOrDirectory(meta), + None => ExactPathMetadata::DoesNotExist, + }, + ) } } @@ -318,7 +327,7 @@ impl ExactPathSymlinkMetadata { fn to_raw_path_metadata( self, curr: PathAndAbsPath, - rest: Option, + rest: ForwardRelativePathBuf, ) -> anyhow::Result> { Ok(match self { Self::ExternalSymlink(link_path) => RawPathMetadata::Symlink { @@ -326,9 +335,7 @@ impl ExactPathSymlinkMetadata { to: RawSymlink::External(Arc::new(ExternalSymlink::new(link_path, rest)?)), }, Self::InternalSymlink(mut link_path) => { - if let Some(rest) = rest { - link_path.push(&rest); - } + link_path.push(&rest); RawPathMetadata::Symlink { at: curr.path, to: RawSymlink::Relative(link_path), @@ -365,12 +372,14 @@ fn read_unchecked>( ReadUncheckedOptions::Symlink => Err(ReadSymlinkAtExactPathError::NotASymlink.into()), ReadUncheckedOptions::Anything => convert_metadata(&curr, meta, file_digest_config), }, - ExactPathMetadata::Symlink(link) => link.to_raw_path_metadata(curr, None), + ExactPathMetadata::Symlink(link) => { + link.to_raw_path_metadata(curr, ForwardRelativePathBuf::default()) + } } } #[cfg(unix)] -fn is_executable(meta: &std::fs::Metadata) -> bool { +pub fn is_executable(meta: &std::fs::Metadata) -> bool { use std::os::unix::fs::PermissionsExt; // We check 0o111 (user,group,other) instead of 0o100 (user) because even if the user // doesn't have permission, if ANYONE does we assume the file is an executable @@ -378,7 +387,7 @@ fn is_executable(meta: &std::fs::Metadata) -> bool { } #[cfg(not(unix))] -fn is_executable(_meta: &std::fs::Metadata) -> bool { +pub fn is_executable(_meta: &std::fs::Metadata) -> bool { false } @@ -387,7 +396,6 @@ mod tests { use std::os::unix; use assert_matches::assert_matches; - use buck2_core::fs::paths::abs_path::AbsPath; use tempfile::TempDir; use super::*; diff --git a/app/buck2_common/src/io/mod.rs b/app/buck2_common/src/io/mod.rs deleted file mode 100644 index c5ed6b424ee3c..0000000000000 --- a/app/buck2_common/src/io/mod.rs +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod fs; -pub mod trace; - -use allocative::Allocative; -use async_trait::async_trait; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; - -use crate::file_ops::RawDirEntry; -use crate::file_ops::RawPathMetadata; - -#[async_trait] -pub trait IoProvider: Allocative + Send + Sync { - async fn read_file_if_exists( - &self, - path: ProjectRelativePathBuf, - ) -> anyhow::Result>; - - async fn read_dir(&self, path: ProjectRelativePathBuf) -> anyhow::Result>; - - async fn read_path_metadata_if_exists( - &self, - path: ProjectRelativePathBuf, - ) -> anyhow::Result>>; - - /// Request that this I/O provider be up to date with whatever I/O operations the user might - /// have done until this point. - async fn settle(&self) -> anyhow::Result<()>; - - fn name(&self) -> &'static str; - - /// Returns the Eden version of the underlying system of the IoProvider, if available. - async fn eden_version(&self) -> anyhow::Result>; - - fn project_root(&self) -> &ProjectRoot; - - fn as_any(&self) -> &dyn std::any::Any; -} diff --git a/app/buck2_common/src/io/trace.rs b/app/buck2_common/src/io/trace.rs index b823056a56626..3af2832725bfc 100644 --- a/app/buck2_common/src/io/trace.rs +++ b/app/buck2_common/src/io/trace.rs @@ -7,8 +7,6 @@ * of this source tree. */ -use std::borrow::Cow; - use allocative::Allocative; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; @@ -82,6 +80,10 @@ impl TracingIoProvider { } } + pub fn from_io(io: &dyn IoProvider) -> Option<&Self> { + io.as_any().downcast_ref::() + } + pub fn add_project_path(&self, path: ProjectRelativePathBuf) { self.trace.project_entries.insert(path); } @@ -98,19 +100,6 @@ impl TracingIoProvider { self.trace.symlinks.insert(link); } - pub fn add_config_paths(&self, project_root: &ProjectRoot, paths: I) - where - I: IntoIterator, - { - for abspath in paths.into_iter() { - if let Ok(project_path) = project_root.relativize(&abspath).map(Cow::into_owned) { - self.add_project_path(project_path); - } else { - self.add_external_path(abspath); - } - } - } - pub fn trace(&self) -> &Trace { &self.trace } @@ -124,12 +113,15 @@ impl IoProvider for TracingIoProvider { /// /// This makes code working with the exported I/O manifest much easier to /// work with at the expense of some additional I/O during tracing builds. - async fn read_file_if_exists( + async fn read_file_if_exists_impl( &self, path: ProjectRelativePathBuf, ) -> anyhow::Result> { - self.add_project_path(path.clone()); - self.io.read_file_if_exists(path).await + let res = self.io.read_file_if_exists_impl(path.clone()).await?; + if res.is_some() { + self.add_project_path(path); + } + Ok(res) } /// Combination of read_file_if_exists from underlying fs struct and reading @@ -138,8 +130,11 @@ impl IoProvider for TracingIoProvider { /// /// This makes code working with the exported I/O manifest much easier to /// work with at the expense of some additional I/O during tracing builds. - async fn read_dir(&self, path: ProjectRelativePathBuf) -> anyhow::Result> { - let entries = self.io.read_dir(path.clone()).await?; + async fn read_dir_impl( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result> { + let entries = self.io.read_dir_impl(path.clone()).await?; self.add_project_path(path.clone()); for entry in entries.iter() { self.add_project_path(path.join(ForwardRelativePath::unchecked_new(&entry.file_name))); @@ -148,11 +143,14 @@ impl IoProvider for TracingIoProvider { Ok(entries) } - async fn read_path_metadata_if_exists( + async fn read_path_metadata_if_exists_impl( &self, path: ProjectRelativePathBuf, ) -> anyhow::Result>> { - let res = self.io.read_path_metadata_if_exists(path.clone()).await?; + let res = self + .io + .read_path_metadata_if_exists_impl(path.clone()) + .await?; match &res { Some(RawPathMetadata::File(_)) | Some(RawPathMetadata::Directory) => { self.add_project_path(path); diff --git a/app/buck2_common/src/kill_util.rs b/app/buck2_common/src/kill_util.rs index 0d6d1b7b6ac29..1d3db7921c50e 100644 --- a/app/buck2_common/src/kill_util.rs +++ b/app/buck2_common/src/kill_util.rs @@ -108,7 +108,6 @@ mod unix { mod tests { use std::process::Stdio; - use tempfile; use tokio::fs::File; use tokio::io::AsyncBufReadExt; use tokio::io::AsyncWriteExt; diff --git a/app/buck2_common/src/legacy_configs.rs b/app/buck2_common/src/legacy_configs.rs new file mode 100644 index 0000000000000..1caa7ebc4d6a7 --- /dev/null +++ b/app/buck2_common/src/legacy_configs.rs @@ -0,0 +1,24 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Contains utilities for dealing with buckv1 concepts (ex. buckv1's +//! .buckconfig files as configuration) + +mod access; +mod aggregator; +pub mod args; +pub mod cells; +pub mod configs; +pub mod dice; +pub mod diffs; +pub mod file_ops; +pub mod key; +mod parser; +pub(crate) mod path; +pub mod view; diff --git a/app/buck2_common/src/legacy_configs/access.rs b/app/buck2_common/src/legacy_configs/access.rs new file mode 100644 index 0000000000000..ceb72c23366bf --- /dev/null +++ b/app/buck2_common/src/legacy_configs/access.rs @@ -0,0 +1,194 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::str::FromStr; +use std::sync::Arc; + +use anyhow::Context; +use gazebo::eq_chain; + +use crate::legacy_configs::configs::ConfigValue; +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::configs::LegacyBuckConfigSection; +use crate::legacy_configs::configs::LegacyBuckConfigValue; +use crate::legacy_configs::key::BuckconfigKeyRef; +use crate::legacy_configs::view::LegacyBuckConfigView; + +#[derive(buck2_error::Error, Debug)] +enum ConfigValueError { + #[error( + "Invalid value for buckconfig `{section}.{key}`: conversion to {ty} failed, value as `{value}`" + )] + ParseFailed { + section: String, + key: String, + value: String, + ty: &'static str, + }, +} + +impl LegacyBuckConfigView for &LegacyBuckConfig { + fn get(&mut self, key: BuckconfigKeyRef) -> anyhow::Result>> { + Ok(LegacyBuckConfig::get(self, key).map(|v| v.to_owned().into())) + } +} + +impl LegacyBuckConfigSection { + /// configs are equal if the data they resolve in is equal, regardless of the origin of the config + pub(crate) fn compare(&self, other: &Self) -> bool { + eq_chain!( + self.values.len() == other.values.len(), + self.values.iter().all(|(name, value)| other + .values + .get(name) + .map_or(false, |other_val| other_val.as_str() == value.as_str())) + ) + } + + pub fn iter(&self) -> impl Iterator { + self.values + .iter() + .map(move |(key, value)| (key.as_str(), LegacyBuckConfigValue { value })) + } + + pub fn keys(&self) -> impl Iterator { + self.values.keys() + } + + pub fn get(&self, key: &str) -> Option { + self.values + .get(key) + .map(move |value| LegacyBuckConfigValue { value }) + } +} + +impl LegacyBuckConfig { + fn get_config_value(&self, key: BuckconfigKeyRef) -> Option<&ConfigValue> { + let BuckconfigKeyRef { section, property } = key; + self.0 + .values + .get(section) + .and_then(|s| s.values.get(property)) + } + + pub fn get(&self, key: BuckconfigKeyRef) -> Option<&str> { + self.get_config_value(key).map(|s| s.as_str()) + } + + /// Iterate all entries. + pub fn iter(&self) -> impl Iterator)> { + self.0.values.iter().map(|(section, section_values)| { + ( + section.as_str(), + section_values + .values + .iter() + .map(|(key, value)| (key.as_str(), value.as_str())), + ) + }) + } + + fn parse_impl(key: BuckconfigKeyRef, value: &str) -> anyhow::Result + where + anyhow::Error: From<::Err>, + { + let BuckconfigKeyRef { section, property } = key; + value + .parse() + .map_err(anyhow::Error::from) + .with_context(|| ConfigValueError::ParseFailed { + section: section.to_owned(), + key: property.to_owned(), + value: value.to_owned(), + ty: std::any::type_name::(), + }) + } + + pub fn parse(&self, key: BuckconfigKeyRef) -> anyhow::Result> + where + anyhow::Error: From<::Err>, + { + self.get_config_value(key) + .map(|s| { + Self::parse_impl(key, s.as_str()).with_context(|| { + format!("Defined {}", s.source.as_legacy_buck_config_location()) + }) + }) + .transpose() + } + + pub fn parse_value( + key: BuckconfigKeyRef, + value: Option<&str>, + ) -> anyhow::Result> + where + anyhow::Error: From<::Err>, + { + value.map(|s| Self::parse_impl(key, s)).transpose() + } + + pub fn parse_list(&self, key: BuckconfigKeyRef) -> anyhow::Result>> + where + anyhow::Error: From<::Err>, + { + Self::parse_list_value(key, self.get(key)) + } + + pub fn parse_list_value( + key: BuckconfigKeyRef, + value: Option<&str>, + ) -> anyhow::Result>> + where + anyhow::Error: From<::Err>, + { + /// A wrapper type so we can use .parse() on this. + struct ParseList(Vec); + + impl FromStr for ParseList + where + T: FromStr, + { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + Ok(Self( + s.split(',').map(T::from_str).collect::>()?, + )) + } + } + + Ok(Self::parse_value::>(key, value)?.map(|l| l.0)) + } + + pub fn sections(&self) -> impl Iterator { + self.0.values.keys() + } + + pub fn all_sections(&self) -> impl Iterator + '_ { + self.0.values.iter() + } + + pub fn get_section(&self, section: &str) -> Option<&LegacyBuckConfigSection> { + self.0.values.get(section) + } + + /// configs are equal if the data they resolve in is equal, regardless of the origin of the config + pub(crate) fn compare(&self, other: &Self) -> bool { + eq_chain!( + self.0.values.len() == other.0.values.len(), + self.0.values.iter().all(|(section_name, section)| { + other + .0 + .values + .get(section_name) + .map_or(false, |other_sec| other_sec.compare(section)) + }) + ) + } +} diff --git a/app/buck2_common/src/legacy_configs/aggregator.rs b/app/buck2_common/src/legacy_configs/aggregator.rs new file mode 100644 index 0000000000000..727b419aa8745 --- /dev/null +++ b/app/buck2_common/src/legacy_configs/aggregator.rs @@ -0,0 +1,210 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::fmt::Debug; + +use buck2_core::cells::alias::NonEmptyCellAlias; +use buck2_core::cells::cell_root_path::CellRootPath; +use buck2_core::cells::cell_root_path::CellRootPathBuf; +use buck2_core::cells::external::ExternalCellOrigin; +use buck2_core::cells::instance; +use buck2_core::cells::name::CellName; +use buck2_core::cells::nested::NestedCells; +use buck2_core::cells::CellAliasResolver; +use buck2_core::cells::CellResolver; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_error::BuckErrorContext; +use instance::CellInstance; + +/// Errors from cell creation +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] +enum CellError { + #[error( + "Cell name `{0}` should be an alias for an existing cell, but `{1}` isn't a known alias" + )] + AliasOnlyCell(NonEmptyCellAlias, NonEmptyCellAlias), + #[error("No cell name for the root path, add an entry for `.`")] + NoRootCell, + #[error("`{0}` is not a known cell alias")] + UnknownCellAlias(NonEmptyCellAlias), + #[error("`{0}` was provided both as a cell name and as an alias")] + AliasAndName(NonEmptyCellAlias), + #[error("Cell `{0}` was marked as external twice")] + DuplicateExternalCell(CellName), +} + +/// Aggregates cell information as we parse cell configs and keeps state to +/// generate a final 'CellResolver' +#[derive(Debug)] +pub(crate) struct CellsAggregator { + cell_infos: HashMap, + root_aliases: HashMap, + root_cell: CellName, +} + +#[derive(Debug)] +struct CellAggregatorInfo { + path: CellRootPathBuf, + external: Option, +} + +impl CellsAggregator { + pub(crate) fn new( + // This is order sensitive + cells: Vec<(CellName, CellRootPathBuf)>, + root_aliases: HashMap, + ) -> anyhow::Result { + let mut path_rmap = HashMap::new(); + let mut infos = HashMap::new(); + let mut combined_aliases = HashMap::new(); + for (cell, path) in cells { + let real_cell = match path_rmap.try_insert(path.clone(), cell) { + Ok(_) => { + infos.insert( + cell, + CellAggregatorInfo { + path, + external: None, + }, + ); + cell + } + Err(occupied) => *occupied.entry.get(), + }; + combined_aliases.insert(NonEmptyCellAlias::new(cell.as_str().to_owned())?, real_cell); + } + + let Some(&root_cell) = path_rmap.get(CellRootPath::new(ProjectRelativePath::empty())) + else { + return Err(CellError::NoRootCell.into()); + }; + + for (from, to) in root_aliases { + let Some(cell) = combined_aliases.get(&to) else { + return Err(CellError::AliasOnlyCell(from, to).into()); + }; + if combined_aliases.insert(from.clone(), *cell).is_some() { + return Err(CellError::AliasAndName(from).into()); + } + } + + Ok(Self { + cell_infos: infos, + root_aliases: combined_aliases, + root_cell, + }) + } + + pub(crate) fn resolve_root_alias(&self, alias: NonEmptyCellAlias) -> anyhow::Result { + self.root_aliases + .get(&alias) + .copied() + .ok_or_else(|| CellError::UnknownCellAlias(alias).into()) + } + + pub(crate) fn mark_external_cell( + &mut self, + cell: CellName, + origin: ExternalCellOrigin, + ) -> anyhow::Result<()> { + let info = self + .cell_infos + .get_mut(&cell) + .internal_error_anyhow("cell name is not a cell")?; + if info.external.is_some() { + return Err(CellError::DuplicateExternalCell(cell).into()); + } + info.external = Some(origin); + Ok(()) + } + + pub(crate) fn make_cell_resolver(self) -> anyhow::Result { + let all_cell_roots_for_nested_cells: Vec<_> = self + .cell_infos + .iter() + .map(|(name, info)| (*name, info.path.as_path())) + .collect(); + + let instances = self + .cell_infos + .iter() + .map(|(name, info)| { + let nested_cells = + NestedCells::from_cell_roots(&all_cell_roots_for_nested_cells, &info.path); + CellInstance::new( + *name, + info.path.clone(), + info.external.clone(), + nested_cells, + ) + }) + .collect::>>()?; + + let root_cell_alias_resolver = CellAliasResolver::new(self.root_cell, self.root_aliases)?; + + CellResolver::new(instances, root_cell_alias_resolver) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_duplicate_paths() -> anyhow::Result<()> { + let root = CellName::testing_new("root"); + let root_path = CellRootPathBuf::new(ProjectRelativePath::empty().to_owned()); + let other1 = CellName::testing_new("other1"); + let other2 = CellName::testing_new("other2"); + let other_path = CellRootPathBuf::new(ProjectRelativePath::new("random/path")?.to_owned()); + + let cell_resolver = CellsAggregator::new( + vec![ + (root, root_path.clone()), + (other1, other_path.clone()), + (other2, other_path.clone()), + ], + HashMap::new(), + ) + .unwrap() + .make_cell_resolver() + .unwrap(); + assert!( + cell_resolver + .get(CellName::testing_new("root")) + .unwrap() + .path() + == root_path.as_path() + ); + assert!( + cell_resolver + .get(CellName::testing_new("other1")) + .unwrap() + .path() + == other_path.as_path() + ); + Ok(()) + } + + #[test] + fn test_alias_only_error() { + assert!( + CellsAggregator::new( + Vec::new(), + HashMap::from_iter([( + NonEmptyCellAlias::testing_new("root"), + NonEmptyCellAlias::testing_new("does_not_exist") + )]) + ) + .is_err() + ); + } +} diff --git a/app/buck2_common/src/legacy_configs/args.rs b/app/buck2_common/src/legacy_configs/args.rs new file mode 100644 index 0000000000000..8810767909a6c --- /dev/null +++ b/app/buck2_common/src/legacy_configs/args.rs @@ -0,0 +1,198 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::path::Path; + +use anyhow::Context; +use buck2_cli_proto::config_override::ConfigType; +use buck2_cli_proto::ConfigOverride; +use buck2_core::cells::cell_root_path::CellRootPathBuf; +use buck2_core::fs::paths::abs_path::AbsPath; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; + +use crate::legacy_configs::configs::parse_config_section_and_key; +use crate::legacy_configs::configs::ConfigArgumentParseError; +use crate::legacy_configs::configs::ConfigSectionAndKey; +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::file_ops::ConfigParserFileOps; +use crate::legacy_configs::file_ops::ConfigPath; +use crate::legacy_configs::parser::LegacyConfigParser; + +/// Representation of a processed config arg, namely after file path resolution has been performed. +#[derive(Debug, Clone, PartialEq, Eq, allocative::Allocative)] +pub(crate) enum ResolvedLegacyConfigArg { + /// A single config key-value pair (in `a.b=c` format). + Flag(ResolvedConfigFlag), + /// A file containing additional config values (in `.buckconfig` format). + File(ResolvedConfigFile), +} + +#[derive(Clone, Debug, PartialEq, Eq, allocative::Allocative)] +pub(crate) enum ResolvedConfigFile { + /// If the config file is project relative, the path of the file + Project(ProjectRelativePathBuf), + /// If the config file is external, we pre-parse it to be able to insert the results into dice + Global(LegacyConfigParser), +} + +#[derive(Clone, Debug, PartialEq, Eq, allocative::Allocative)] +pub(crate) struct ResolvedConfigFlag { + pub(crate) section: String, + pub(crate) key: String, + // None value means this config is unset. + pub(crate) value: Option, + // If this arg only applies to one cell, the root of that cell. + pub(crate) cell: Option, +} + +fn resolve_config_flag_arg( + cell: Option, + raw_arg: &str, +) -> anyhow::Result { + let (raw_section_and_key, raw_value) = raw_arg + .split_once('=') + .ok_or_else(|| ConfigArgumentParseError::NoEqualsSeparator(raw_arg.to_owned()))?; + let ConfigSectionAndKey { section, key } = + parse_config_section_and_key(raw_section_and_key, Some(raw_arg))?; + + let value = match raw_value { + "" => None, // An empty string unsets this config. + v => Some(v.to_owned()), + }; + + Ok(ResolvedConfigFlag { + cell, + section, + key, + value, + }) +} + +async fn resolve_config_file_arg( + cell: Option, + arg: &str, + project_filesystem: &ProjectRoot, + cwd: &ProjectRelativePath, + file_ops: &mut dyn ConfigParserFileOps, +) -> anyhow::Result { + if let Some(cell_path) = cell { + let proj_path = cell_path.as_project_relative_path().join_normalized(arg)?; + return Ok(ResolvedConfigFile::Project(proj_path)); + } + + let path = Path::new(arg); + let path = if path.is_absolute() { + AbsPath::new(path)?.to_owned() + } else { + let cwd = project_filesystem.resolve(cwd); + cwd.into_abs_path_buf().join(path) + }; + + Ok(ResolvedConfigFile::Global( + LegacyBuckConfig::start_parse_for_external_files( + &[ConfigPath::Global(path)], + file_ops, + // Note that when reading immediate configs that don't follow includes, we don't apply + // config args either + true, // follow includes + ) + .await?, + )) +} + +pub(crate) async fn resolve_config_args( + args: &[ConfigOverride], + project_fs: &ProjectRoot, + cwd: &ProjectRelativePath, + file_ops: &mut dyn ConfigParserFileOps, +) -> anyhow::Result> { + let mut resolved_args = Vec::new(); + + for u in args { + let config_type = ConfigType::from_i32(u.config_type).with_context(|| { + format!( + "Unknown ConfigType enum value `{}` when trying to deserialize", + u.config_type + ) + })?; + let resolved = match config_type { + ConfigType::Value => { + let cell = u.get_cell()?.map(|p| p.to_buf()); + let resolved_flag = resolve_config_flag_arg(cell, &u.config_override)?; + ResolvedLegacyConfigArg::Flag(resolved_flag) + } + ConfigType::File => { + let cell = u.get_cell()?.map(|p| p.to_buf()); + let resolved_path = + resolve_config_file_arg(cell, &u.config_override, project_fs, cwd, file_ops) + .await?; + ResolvedLegacyConfigArg::File(resolved_path) + } + }; + resolved_args.push(resolved); + } + + Ok(resolved_args) +} + +#[cfg(test)] +mod tests { + use super::resolve_config_flag_arg; + + #[test] + fn test_argument_pair() -> anyhow::Result<()> { + // Valid Formats + + let normal_pair = resolve_config_flag_arg(None, "apple.key=value")?; + + assert_eq!("apple", normal_pair.section); + assert_eq!("key", normal_pair.key); + assert_eq!(Some("value".to_owned()), normal_pair.value); + + let unset_pair = resolve_config_flag_arg(None, "apple.key=")?; + + assert_eq!("apple", unset_pair.section); + assert_eq!("key", unset_pair.key); + assert_eq!(None, unset_pair.value); + + // Whitespace + + let section_leading_whitespace = resolve_config_flag_arg(None, " apple.key=value")?; + assert_eq!("apple", section_leading_whitespace.section); + assert_eq!("key", section_leading_whitespace.key); + assert_eq!(Some("value".to_owned()), section_leading_whitespace.value); + + let pair_with_whitespace_in_key = resolve_config_flag_arg(None, "apple. key=value"); + assert!(pair_with_whitespace_in_key.is_err()); + + let pair_with_whitespace_in_value = + resolve_config_flag_arg(None, "apple.key= value with whitespace ")?; + assert_eq!("apple", pair_with_whitespace_in_value.section); + assert_eq!("key", pair_with_whitespace_in_value.key); + assert_eq!( + Some(" value with whitespace ".to_owned()), + pair_with_whitespace_in_value.value + ); + + // Invalid Formats + + let pair_without_section = resolve_config_flag_arg(None, "key=value"); + assert!(pair_without_section.is_err()); + + let pair_without_equals = resolve_config_flag_arg(None, "apple.keyvalue"); + assert!(pair_without_equals.is_err()); + + let pair_without_section_or_equals = resolve_config_flag_arg(None, "applekeyvalue"); + assert!(pair_without_section_or_equals.is_err()); + + Ok(()) + } +} diff --git a/app/buck2_common/src/legacy_configs/cells.rs b/app/buck2_common/src/legacy_configs/cells.rs index 33a33418ed70b..8da4d6467ec7e 100644 --- a/app/buck2_common/src/legacy_configs/cells.rs +++ b/app/buck2_common/src/legacy_configs/cells.rs @@ -7,162 +7,240 @@ * of this source tree. */ -use std::cell::OnceCell; -use std::collections::HashMap; use std::collections::HashSet; +use std::sync::Arc; +use allocative::Allocative; use anyhow::Context; +use buck2_core::buck2_env_anyhow; use buck2_core::cells::alias::NonEmptyCellAlias; +use buck2_core::cells::cell_root_path::CellRootPath; use buck2_core::cells::cell_root_path::CellRootPathBuf; +use buck2_core::cells::external::ExternalCellOrigin; +use buck2_core::cells::external::GitCellSetup; +use buck2_core::cells::name::CellName; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; -use buck2_core::cells::CellsAggregator; -use buck2_core::env_helper::EnvHelper; -use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::RelativePath; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use gazebo::prelude::*; - -use crate::legacy_configs::init::DaemonStartupConfig; -use crate::legacy_configs::path::BuckConfigFile; -use crate::legacy_configs::path::DEFAULT_BUCK_CONFIG_FILES; -use crate::legacy_configs::push_all_files_from_a_directory; -use crate::legacy_configs::BuckConfigParseOptions; -use crate::legacy_configs::CellResolutionState; -use crate::legacy_configs::ConfigParserFileOps; -use crate::legacy_configs::DefaultConfigParserFileOps; -use crate::legacy_configs::LegacyBuckConfig; -use crate::legacy_configs::LegacyBuckConfigs; -use crate::legacy_configs::LegacyConfigCmdArg; -use crate::legacy_configs::MainConfigFile; - -#[derive(Debug, thiserror::Error)] -enum CellsError { - #[error( - "Repository root buckconfig must have `[repositories]` section with a pointer to itself \ - like `root = .` which defines the root cell name" - )] - MissingRootCellName, +use dice::DiceComputations; +use dupe::Dupe; + +use crate::cas_digest::RawDigest; +use crate::dice::cells::HasCellResolver; +use crate::dice::data::HasIoProvider; +use crate::external_cells::EXTERNAL_CELLS_IMPL; +use crate::legacy_configs::aggregator::CellsAggregator; +use crate::legacy_configs::args::resolve_config_args; +use crate::legacy_configs::args::ResolvedLegacyConfigArg; +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::dice::HasInjectedLegacyConfigs; +use crate::legacy_configs::file_ops::push_all_files_from_a_directory; +use crate::legacy_configs::file_ops::ConfigDirEntry; +use crate::legacy_configs::file_ops::ConfigParserFileOps; +use crate::legacy_configs::file_ops::ConfigPath; +use crate::legacy_configs::file_ops::DefaultConfigParserFileOps; +use crate::legacy_configs::file_ops::DiceConfigFileOps; +use crate::legacy_configs::key::BuckconfigKeyRef; +use crate::legacy_configs::parser::LegacyConfigParser; +use crate::legacy_configs::path::ExternalConfigSource; +use crate::legacy_configs::path::ProjectConfigSource; +use crate::legacy_configs::path::DEFAULT_EXTERNAL_CONFIG_SOURCES; +use crate::legacy_configs::path::DEFAULT_PROJECT_CONFIG_SOURCES; + +/// Buckconfigs can partially be loaded from within dice. However, some parts of what makes up the +/// buckconfig comes from outside the buildgraph, and this type represents those parts. +#[derive(PartialEq, Eq, Allocative)] +pub struct ExternalBuckconfigData { + parse_state: LegacyConfigParser, + args: Vec, +} + +impl ExternalBuckconfigData { + pub fn testing_default() -> Self { + Self { + parse_state: LegacyConfigParser::new(), + args: Vec::new(), + } + } + + pub fn filter_values(&self, filter: F) -> Self + where + F: Fn(&BuckconfigKeyRef) -> bool, + { + Self { + parse_state: self.parse_state.clone(), + args: self + .args + .iter() + .filter(|arg| match arg { + ResolvedLegacyConfigArg::Flag(flag) => { + flag.cell.is_some() + || filter(&BuckconfigKeyRef { + section: &flag.section, + property: &flag.key, + }) + } + _ => true, + }) + .cloned() + .collect(), + } + } } /// Used for creating a CellResolver in a buckv1-compatible way based on values /// in .buckconfig in each cell. /// -/// We'll traverse the structure of the `[repositories]` sections starting from +/// We'll traverse the structure of the `[cells]` sections starting from /// the root .buckconfig. All aliases found in the root config will also be /// available in all other cells (v1 provides that same behavior). /// /// We don't (currently) enforce that all aliases appear in the root config, but /// unlike v1, our cells implementation works just fine if that isn't the case. +#[derive(Clone)] pub struct BuckConfigBasedCells { - pub configs_by_name: LegacyBuckConfigs, pub cell_resolver: CellResolver, - pub config_paths: HashSet, + pub root_config: LegacyBuckConfig, + pub config_paths: HashSet, + pub external_data: Arc, } impl BuckConfigBasedCells { - /// Performs a parse of the root `.buckconfig` for the cell _only_ without following includes - /// and without parsing any configs for any referenced cells. This means this function might return - /// an empty mapping if the root `.buckconfig` does not contain the cell definitions. - pub fn parse_immediate_config(project_fs: &ProjectRoot) -> anyhow::Result { - Self::parse_immediate_config_with_file_ops(project_fs, &mut DefaultConfigParserFileOps {}) + /// In the client and one place in the daemon, we need access to the alias resolver for the cwd + /// in some places where we don't have normal dice access + /// + /// This function reads buckconfigs to compute an appropriate cell alias resolver to make that + /// possible. + pub async fn get_cell_alias_resolver_for_cwd_fast( + &self, + project_fs: &ProjectRoot, + cwd: &ProjectRelativePath, + ) -> anyhow::Result { + self.get_cell_alias_resolver_for_cwd_fast_with_file_ops( + &mut DefaultConfigParserFileOps { + project_fs: project_fs.dupe(), + }, + cwd, + ) + .await } - /// Private function with semantics of `parse_immediate_config` but usable for testing. - pub(crate) fn parse_immediate_config_with_file_ops( - project_fs: &ProjectRoot, + pub(crate) async fn get_cell_alias_resolver_for_cwd_fast_with_file_ops( + &self, file_ops: &mut dyn ConfigParserFileOps, - ) -> anyhow::Result { - let opts = BuckConfigParseOptions { - follow_includes: false, - }; - let cells = Self::parse_with_file_ops_and_options( - project_fs, + cwd: &ProjectRelativePath, + ) -> anyhow::Result { + let cell_name = self.cell_resolver.find(cwd)?; + let cell_path = self.cell_resolver.get(cell_name)?.path(); + + let follow_includes = false; + + let config_paths = get_project_buckconfig_paths(cell_path, file_ops).await?; + let config = LegacyBuckConfig::finish_parse( + self.external_data.parse_state.clone(), + &config_paths, + cell_path, file_ops, &[], - ProjectRelativePath::empty(), - opts, - )?; + follow_includes, + ) + .await?; - let root_config = cells - .configs_by_name - .get(cells.cell_resolver.root_cell()) - .context("No config for root cell")?; - - Ok(ImmediateConfig { - cell_resolver: cells.cell_resolver, - daemon_startup_config: DaemonStartupConfig::new(root_config) - .context("Error loading daemon startup config")?, - }) + CellAliasResolver::new_for_non_root_cell( + cell_name, + self.cell_resolver.root_cell_cell_alias_resolver(), + BuckConfigBasedCells::get_cell_aliases_from_config(&config)?, + ) } - pub fn parse(project_fs: &ProjectRoot) -> anyhow::Result { - Self::parse_with_file_ops( + pub async fn parse_with_config_args( + project_fs: &ProjectRoot, + config_args: &[buck2_cli_proto::ConfigOverride], + cwd: &ProjectRelativePath, + ) -> anyhow::Result { + Self::parse_with_file_ops_and_options( project_fs, - &mut DefaultConfigParserFileOps {}, - &[], - ProjectRelativePath::empty(), + &mut DefaultConfigParserFileOps { + project_fs: project_fs.dupe(), + }, + config_args, + cwd, + false, /* follow includes */ ) + .await } - pub fn parse_with_config_args( + pub async fn testing_parse_with_file_ops( project_fs: &ProjectRoot, - config_args: &[LegacyConfigCmdArg], + file_ops: &mut dyn ConfigParserFileOps, + config_args: &[buck2_cli_proto::ConfigOverride], cwd: &ProjectRelativePath, ) -> anyhow::Result { - Self::parse_with_file_ops( + Self::parse_with_file_ops_and_options( project_fs, - &mut DefaultConfigParserFileOps {}, + file_ops, config_args, cwd, + true, /* follow includes */ ) + .await } - pub fn parse_with_file_ops( - project_fs: &ProjectRoot, + async fn parse_with_file_ops_and_options( + project_root: &ProjectRoot, file_ops: &mut dyn ConfigParserFileOps, - config_args: &[LegacyConfigCmdArg], + config_args: &[buck2_cli_proto::ConfigOverride], cwd: &ProjectRelativePath, + follow_includes: bool, ) -> anyhow::Result { - let opts = BuckConfigParseOptions { - follow_includes: true, - }; - Self::parse_with_file_ops_and_options(project_fs, file_ops, config_args, cwd, opts) + Self::parse_with_file_ops_and_options_inner( + project_root, + file_ops, + config_args, + cwd, + follow_includes, + ) + .await + .with_context(|| format!("Parsing cells with project root `{project_root}`, cwd `{cwd}`",)) } - fn parse_with_file_ops_and_options( + async fn parse_with_file_ops_and_options_inner( project_fs: &ProjectRoot, file_ops: &mut dyn ConfigParserFileOps, - config_args: &[LegacyConfigCmdArg], + config_args: &[buck2_cli_proto::ConfigOverride], cwd: &ProjectRelativePath, - options: BuckConfigParseOptions, + follow_includes: bool, ) -> anyhow::Result { // Tracing file ops to record config file accesses on command invocation. struct TracingFileOps<'a> { inner: &'a mut dyn ConfigParserFileOps, - trace: HashSet, + trace: HashSet, } + #[async_trait::async_trait] impl ConfigParserFileOps for TracingFileOps<'_> { - fn read_file_lines( + async fn read_file_lines_if_exists( &mut self, - path: &AbsNormPath, - ) -> anyhow::Result>>> - { - self.trace.insert(path.to_buf()); - self.inner.read_file_lines(path) - } + path: &ConfigPath, + ) -> anyhow::Result< + Option> + Send>>, + > { + let res = self.inner.read_file_lines_if_exists(path).await?; + + if res.is_some() { + self.trace.insert(path.clone()); + } - fn file_exists(&self, path: &AbsNormPath) -> bool { - self.inner.file_exists(path) + Ok(res) } - fn file_id(&self, path: &AbsNormPath) -> String { - self.inner.file_id(path) + async fn read_dir(&mut self, path: &ConfigPath) -> anyhow::Result> { + self.inner.read_dir(path).await } } @@ -171,287 +249,344 @@ impl BuckConfigBasedCells { trace: Default::default(), }; - let mut buckconfigs = HashMap::new(); - let mut work = vec![CellRootPathBuf::new(ProjectRelativePathBuf::try_from( - "".to_owned(), - )?)]; - let mut cells_aggregator = CellsAggregator::new(); - let mut root_aliases = HashMap::new(); - - // By definition, cell resolution should be happening against the cell mapping defined - // by the .buckconfig of the project root. - let cell_resolution = CellResolutionState { - project_filesystem: project_fs, - cell_resolver: OnceCell::new(), - cwd: &project_fs.resolve(cwd), - }; // NOTE: This will _not_ perform IO unless it needs to. - let processed_config_args = LegacyBuckConfig::process_config_args( - config_args, - Some(&cell_resolution), - &mut file_ops, - )?; + let processed_config_args = + resolve_config_args(&config_args, project_fs, cwd, &mut file_ops).await?; - static SKIP_DEFAULT_EXTERNAL_CONFIG: EnvHelper = - EnvHelper::::new("BUCK2_TEST_SKIP_DEFAULT_EXTERNAL_CONFIG"); + let external_paths = get_external_buckconfig_paths(&mut file_ops).await?; + let started_parse = LegacyBuckConfig::start_parse_for_external_files( + &external_paths, + &mut file_ops, + follow_includes, + ) + .await?; - static EXTRA_EXTERNAL_CONFIG: EnvHelper = - EnvHelper::::new("BUCK2_TEST_EXTRA_EXTERNAL_CONFIG"); + let root_path = CellRootPathBuf::new(ProjectRelativePath::empty().to_owned()); - let skip_default_external_config = SKIP_DEFAULT_EXTERNAL_CONFIG - .get()? - .copied() - .unwrap_or_default(); + let buckconfig_paths = get_project_buckconfig_paths(&root_path, &mut file_ops).await?; - while let Some(path) = work.pop() { - if buckconfigs.contains_key(&path) { - continue; + let root_config = LegacyBuckConfig::finish_parse( + started_parse.clone(), + buckconfig_paths.as_slice(), + &root_path, + &mut file_ops, + &processed_config_args, + follow_includes, + ) + .await?; + + let mut cell_definitions = Vec::new(); + + // `cells` is preferred over `repositories` since it's more clear, however it's unlikely + // that we'll ever remove `repositories` since that's probably unnecessary breakage in OSS. + // + // Note that `cells` is buck2-only + let repositories = root_config + .get_section("cells") + .or_else(|| root_config.get_section("repositories")); + if let Some(repositories) = repositories { + for (alias, alias_path) in repositories.iter() { + let alias_path = CellRootPathBuf::new( + root_path.as_project_relative_path() + .join_normalized(RelativePath::new(alias_path.as_str())) + .with_context(|| { + format!( + "expected alias path to be a relative path, but found `{}` for `{}`", + alias_path.as_str(), + alias, + ) + })? + ); + let name = CellName::unchecked_new(alias)?; + cell_definitions.push((name, alias_path)); } + } - let mut buckconfig_paths: Vec = Vec::new(); - - for buckconfig in DEFAULT_BUCK_CONFIG_FILES { - if skip_default_external_config && buckconfig.is_external() { - continue; - } - - match buckconfig { - BuckConfigFile::ProjectRelativeFile(file) => { - let buckconfig_path = ForwardRelativePath::new(file)?; - buckconfig_paths.push(MainConfigFile { - path: project_fs - .resolve(&path.project_relative_path().join(buckconfig_path)), - owned_by_project: true, - }); - } - - BuckConfigFile::ProjectRelativeFolder(folder) => { - let buckconfig_folder_path = ForwardRelativePath::new(folder)?; - let buckconfig_folder_abs_path = project_fs - .resolve(&path.project_relative_path().join(buckconfig_folder_path)); - push_all_files_from_a_directory( - &mut buckconfig_paths, - &buckconfig_folder_abs_path, - true, - )?; - } - BuckConfigFile::UserFile(file) => { - let home_dir = dirs::home_dir(); - if let Some(home_dir_path) = home_dir { - let buckconfig_path = ForwardRelativePath::new(file)?; - buckconfig_paths.push(MainConfigFile { - path: AbsNormPath::new(&home_dir_path)? - .join_normalized(buckconfig_path)?, - owned_by_project: false, - }); - } - } - BuckConfigFile::UserFolder(folder) => { - let home_dir = dirs::home_dir(); - if let Some(home_dir_path) = home_dir { - let buckconfig_path = ForwardRelativePath::new(folder)?; - let buckconfig_folder_abs_path = AbsNormPath::new(&home_dir_path)? - .join_normalized(buckconfig_path)?; - push_all_files_from_a_directory( - &mut buckconfig_paths, - &buckconfig_folder_abs_path, - false, - )?; - } - } - BuckConfigFile::GlobalFile(file) => { - buckconfig_paths.push(MainConfigFile { - path: AbsNormPathBuf::from(String::from(*file))?, - owned_by_project: false, - }); - } - BuckConfigFile::GlobalFolder(folder) => { - let buckconfig_folder_abs_path = - AbsNormPathBuf::from(String::from(*folder))?; - push_all_files_from_a_directory( - &mut buckconfig_paths, - &buckconfig_folder_abs_path, - false, - )?; + let root_aliases = Self::get_cell_aliases_from_config(&root_config)?.collect(); + + let mut aggregator = CellsAggregator::new(cell_definitions, root_aliases)?; + + if let Some(external_cells) = root_config.get_section("external_cells") { + for (alias, origin) in external_cells.iter() { + let alias = NonEmptyCellAlias::new(alias.to_owned())?; + let name = aggregator.resolve_root_alias(alias)?; + let origin = Self::parse_external_cell_origin(name, origin.as_str(), &root_config)?; + if let ExternalCellOrigin::Bundled(name) = origin { + // This code is executed both in the client and in the daemon. When in the + // client and using a client-only build, this late binding might not be bound, + // and so we can't check this. That doesn't matter though, as we'll get an error + // when this fails in the daemon anyway + if let Ok(imp) = EXTERNAL_CELLS_IMPL.get() { + imp.check_bundled_cell_exists(name)?; } } + aggregator.mark_external_cell(name, origin)?; } + } - if let Some(f) = EXTRA_EXTERNAL_CONFIG.get()? { - buckconfig_paths.push(MainConfigFile { - path: AbsNormPathBuf::from(f.to_owned())?, - owned_by_project: false, - }); + let cell_resolver = aggregator.make_cell_resolver()?; + + Ok(Self { + cell_resolver, + root_config, + config_paths: file_ops.trace, + external_data: Arc::new(ExternalBuckconfigData { + parse_state: started_parse, + args: processed_config_args, + }), + }) + } + + pub(crate) fn get_cell_aliases_from_config( + config: &LegacyBuckConfig, + ) -> anyhow::Result> { + let mut aliases = Vec::new(); + if let Some(section) = config + .get_section("cell_aliases") + .or_else(|| config.get_section("repository_aliases")) + { + for (alias, destination) in section.iter() { + let alias = NonEmptyCellAlias::new(alias.to_owned())?; + let destination = NonEmptyCellAlias::new(destination.as_str().to_owned())?; + aliases.push((alias, destination)); } + } + Ok(aliases.into_iter()) + } - let existing_configs: Vec = buckconfig_paths - .into_iter() - .filter(|main_config_file| file_ops.file_exists(&main_config_file.path)) - .collect(); + pub(crate) async fn parse_single_cell_with_dice( + ctx: &mut DiceComputations<'_>, + cell_path: &CellRootPath, + ) -> anyhow::Result { + let resolver = ctx.get_cell_resolver().await?; + let io_provider = ctx.global_data().get_io_provider(); + let project_fs = io_provider.project_root(); + let external_data = ctx.get_injected_external_buckconfig_data().await?; - // Must contains a buckconfig owned by project, otherwise no cell can be found. - // This also check if existing_configs is empty - let has_project_owned_config = existing_configs - .iter() - .any(|main_config_file| main_config_file.owned_by_project); + let mut file_ops = DiceConfigFileOps::new(ctx, project_fs, &resolver); - if !has_project_owned_config { - buckconfigs.insert(path, LegacyBuckConfig::empty()); - continue; - }; + Self::parse_single_cell_with_file_ops_inner(&external_data, &mut file_ops, cell_path).await + } - let config = LegacyBuckConfig::parse_with_file_ops_with_includes( - existing_configs.as_slice(), - &mut file_ops, - &processed_config_args, - options.follow_includes, - )?; - - let is_root = path.is_repo_root(); - - let repositories = config.get_section("repositories"); - if let Some(repositories) = repositories { - let mut seen_dot = false; - for (alias, alias_path) in repositories.iter() { - if alias_path.as_str() == "." { - seen_dot = true; - } + pub async fn parse_single_cell( + &self, + cell: CellName, + project_fs: &ProjectRoot, + ) -> anyhow::Result { + self.parse_single_cell_with_file_ops( + cell, + &mut DefaultConfigParserFileOps { + project_fs: project_fs.dupe(), + }, + ) + .await + } - let alias_path = CellRootPathBuf::new(path - .join_normalized(RelativePath::new(alias_path.as_str())) - .with_context(|| { - format!( - "expected alias path to be a relative path, but found `{}` for `{}` in buckconfig `{}`", - alias_path.as_str(), - alias, - path - ) - })?); - let alias = NonEmptyCellAlias::new(alias.to_owned())?; - if is_root { - root_aliases.insert(alias.clone(), alias_path.clone()); - } - cells_aggregator.add_cell_entry(path.clone(), alias, alias_path.clone())?; - work.push(alias_path); - } + pub(crate) async fn parse_single_cell_with_file_ops( + &self, + cell: CellName, + file_ops: &mut dyn ConfigParserFileOps, + ) -> anyhow::Result { + Self::parse_single_cell_with_file_ops_inner( + &self.external_data, + file_ops, + self.cell_resolver.get(cell)?.path(), + ) + .await + } - if is_root && !seen_dot { - return Err(CellsError::MissingRootCellName.into()); - } - } else if is_root { - return Err(CellsError::MissingRootCellName.into()); - } + async fn parse_single_cell_with_file_ops_inner( + external_data: &ExternalBuckconfigData, + file_ops: &mut dyn ConfigParserFileOps, + cell_path: &CellRootPath, + ) -> anyhow::Result { + let config_paths = get_project_buckconfig_paths(cell_path, file_ops).await?; + LegacyBuckConfig::finish_parse( + external_data.parse_state.clone(), + &config_paths, + cell_path, + file_ops, + external_data.args.as_ref(), + /* follow includes */ true, + ) + .await + } - if let Some(aliases) = config.get_section("repository_aliases") { - for (alias, destination) in aliases.iter() { - let alias = NonEmptyCellAlias::new(alias.to_owned())?; - let destination = NonEmptyCellAlias::new(destination.as_str().to_owned())?; - let alias_path = cells_aggregator.add_cell_alias( - path.clone(), - alias.clone(), - destination, - )?; - if path.as_str() == "" { - root_aliases.insert(alias, alias_path.clone()); - } - } - } + fn parse_external_cell_origin( + cell: CellName, + value: &str, + config: &LegacyBuckConfig, + ) -> anyhow::Result { + #[derive(buck2_error::Error, Debug)] + enum ExternalCellOriginParseError { + #[error("Unknown external cell origin `{0}`")] + Unknown(String), + #[error("Missing buckconfig `{0}.{1}` for external cell configuration")] + MissingConfiguration(String, String), + } - if let Some(buildfiles) = Self::parse_buildfile_name(&config)? { - cells_aggregator.set_buildfiles(path.clone(), buildfiles); - } - if let Some(buildfile) = config.parse::("buildfile", "extra_for_test")? { - cells_aggregator.add_buildfile(path.clone(), FileNameBuf::try_from(buildfile)?); - } + let get_config = |section: &str, property: &str| { + config + .get(crate::legacy_configs::key::BuckconfigKeyRef { section, property }) + .ok_or_else(|| { + ExternalCellOriginParseError::MissingConfiguration( + section.to_owned(), + property.to_owned(), + ) + }) + }; - buckconfigs.insert(path, config); + if value == "bundled" { + Ok(ExternalCellOrigin::Bundled(cell)) + } else if value == "git" { + let section = &format!("external_cell_{}", cell.as_str()); + let commit: Arc = get_config(section, "commit_hash")?.into(); + // No use in storing the commit hash as a byte array, but let's reuse existing code to + // check for validity + let _ = RawDigest::parse_sha1(commit.as_bytes())?; + Ok(ExternalCellOrigin::Git(GitCellSetup { + git_origin: get_config(section, "git_origin")?.into(), + commit, + })) + } else { + Err(ExternalCellOriginParseError::Unknown(value.to_owned()).into()) } + } +} - for cell_path in buckconfigs.keys() { - for (alias, alias_path) in &root_aliases { - cells_aggregator.add_cell_entry( - cell_path.clone(), - alias.clone(), - alias_path.clone(), - )?; +async fn get_external_buckconfig_paths( + file_ops: &mut dyn ConfigParserFileOps, +) -> anyhow::Result> { + let skip_default_external_config = buck2_env_anyhow!( + "BUCK2_TEST_SKIP_DEFAULT_EXTERNAL_CONFIG", + bool, + applicability = testing + )?; + + let mut buckconfig_paths: Vec = Vec::new(); + + if !skip_default_external_config { + for buckconfig in DEFAULT_EXTERNAL_CONFIG_SOURCES { + match buckconfig { + ExternalConfigSource::UserFile(file) => { + let home_dir = dirs::home_dir(); + if let Some(home_dir_path) = home_dir { + let buckconfig_path = ForwardRelativePath::new(file)?; + buckconfig_paths.push(ConfigPath::Global( + AbsPath::new(&home_dir_path)?.join(buckconfig_path.as_str()), + )); + } + } + ExternalConfigSource::UserFolder(folder) => { + let home_dir = dirs::home_dir(); + if let Some(home_dir_path) = home_dir { + let buckconfig_path = ForwardRelativePath::new(folder)?; + let buckconfig_folder_abs_path = + AbsPath::new(&home_dir_path)?.join(buckconfig_path.as_str()); + push_all_files_from_a_directory( + &mut buckconfig_paths, + &ConfigPath::Global(buckconfig_folder_abs_path), + file_ops, + ) + .await?; + } + } + ExternalConfigSource::GlobalFile(file) => { + buckconfig_paths.push(ConfigPath::Global(AbsPath::new(*file)?.to_owned())); + } + ExternalConfigSource::GlobalFolder(folder) => { + let buckconfig_folder_abs_path = AbsPath::new(*folder)?.to_owned(); + push_all_files_from_a_directory( + &mut buckconfig_paths, + &ConfigPath::Global(buckconfig_folder_abs_path), + file_ops, + ) + .await?; + } } } + } - let cell_resolver = cells_aggregator.make_cell_resolver()?; - let configs_by_name = buckconfigs - .into_iter() - .map(|(path, config)| Ok((cell_resolver.find(path.project_relative_path())?, config))) - .collect::>()?; + let extra_external_config = + buck2_env_anyhow!("BUCK2_TEST_EXTRA_EXTERNAL_CONFIG", applicability = testing)?; - Ok(Self { - configs_by_name: LegacyBuckConfigs::new(configs_by_name), - cell_resolver, - config_paths: file_ops.trace, - }) + if let Some(f) = extra_external_config { + buckconfig_paths.push(ConfigPath::Global(AbsPath::new(f)?.to_owned())); } - /// Deal with the `buildfile.name` key (and `name_v2`) - fn parse_buildfile_name(config: &LegacyBuckConfig) -> anyhow::Result>> { - // For buck2, we support a slightly different mechanism for setting the buildfile to - // assist with easier migration from v1 to v2. - // First, we check the key `buildfile.name_v2`, if this is provided, we use it. - // Second, if that wasn't provided, we will use `buildfile.name` like buck1 does, - // but for every entry `FOO` we will insert a preceding `FOO.v2`. - // If neither of those is provided, we will use the default of `["BUCK.v2", "BUCK"]`. - // This scheme provides a natural progression to buckv2, with the ability to use separate - // buildfiles for the two where necessary. - if let Some(buildfiles_value) = config.parse_list::("buildfile", "name_v2")? { - Ok(Some(buildfiles_value.into_try_map(FileNameBuf::try_from)?)) - } else if let Some(buildfiles_value) = config.parse_list::("buildfile", "name")? { - let mut buildfiles = Vec::new(); - for buildfile in buildfiles_value { - buildfiles.push(FileNameBuf::try_from(format!("{}.v2", buildfile))?); - buildfiles.push(FileNameBuf::try_from(buildfile)?); + Ok(buckconfig_paths) +} + +async fn get_project_buckconfig_paths( + path: &CellRootPath, + file_ops: &mut dyn ConfigParserFileOps, +) -> anyhow::Result> { + let mut buckconfig_paths: Vec = Vec::new(); + + for buckconfig in DEFAULT_PROJECT_CONFIG_SOURCES { + match buckconfig { + ProjectConfigSource::CellRelativeFile(file) => { + let buckconfig_path = ForwardRelativePath::new(file)?; + buckconfig_paths.push(ConfigPath::Project( + path.as_project_relative_path().join(buckconfig_path), + )); + } + ProjectConfigSource::CellRelativeFolder(folder) => { + let buckconfig_folder_path = ForwardRelativePath::new(folder)?; + let buckconfig_folder_path = + path.as_project_relative_path().join(buckconfig_folder_path); + push_all_files_from_a_directory( + &mut buckconfig_paths, + &ConfigPath::Project(buckconfig_folder_path), + file_ops, + ) + .await?; } - Ok(Some(buildfiles)) - } else { - Ok(None) } } + + Ok(buckconfig_paths) } -/// Limited view of the root config. This does not follow includes. -pub struct ImmediateConfig { - pub cell_resolver: CellResolver, - pub daemon_startup_config: DaemonStartupConfig, +pub(crate) fn create_project_filesystem() -> ProjectRoot { + #[cfg(not(windows))] + let root_path = "/".to_owned(); + #[cfg(windows)] + let root_path = "C:/".to_owned(); + ProjectRoot::new_unchecked(AbsNormPathBuf::try_from(root_path).unwrap()) } #[cfg(test)] mod tests { + use std::sync::Arc; + use buck2_cli_proto::ConfigOverride; + use buck2_core::cells::cell_root_path::CellRootPath; + use buck2_core::cells::cell_root_path::CellRootPathBuf; + use buck2_core::cells::external::ExternalCellOrigin; + use buck2_core::cells::external::GitCellSetup; use buck2_core::cells::name::CellName; - use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; - use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; - use gazebo::prelude::*; + use dice::DiceComputations; use indoc::indoc; + use crate::dice::file_ops::delegate::FileOpsDelegate; + use crate::external_cells::ExternalCellsImpl; + use crate::external_cells::EXTERNAL_CELLS_IMPL; + use crate::legacy_configs::cells::create_project_filesystem; use crate::legacy_configs::cells::BuckConfigBasedCells; - use crate::legacy_configs::testing::TestConfigParserFileOps; - use crate::legacy_configs::tests::assert_config_value; - use crate::legacy_configs::LegacyConfigCmdArg; - - fn create_project_filesystem() -> ProjectRoot { - #[cfg(not(windows))] - let root_path = "/".to_owned(); - #[cfg(windows)] - let root_path = "C:/".to_owned(); - ProjectRoot::new_unchecked(AbsNormPathBuf::try_from(root_path).unwrap()) - } + use crate::legacy_configs::configs::testing::TestConfigParserFileOps; + use crate::legacy_configs::configs::tests::assert_config_value; + use crate::legacy_configs::key::BuckconfigKeyRef; - #[test] - fn test_cells() -> anyhow::Result<()> { + #[tokio::test] + async fn test_cells() -> anyhow::Result<()> { let mut file_ops = TestConfigParserFileOps::new(&[ ( - "/.buckconfig", + ".buckconfig", indoc!( r#" - [repositories] + [cells] root = . other = other/ other_alias = other/ @@ -460,40 +595,35 @@ mod tests { ), ), ( - "/other/.buckconfig", + "other/.buckconfig", indoc!( r#" - [repositories] + [cells] root = .. other = . third_party = ../third_party/ - [buildfile] - name = TARGETS - extra_for_test = TARGETS.test "# ), ), ( - "/third_party/.buckconfig", + "third_party/.buckconfig", indoc!( r#" - [repositories] + [cells] third_party = . - [buildfile] - name_v2 = OKAY - name = OKAY_v1 "# ), ), ])?; let project_fs = create_project_filesystem(); - let cells = BuckConfigBasedCells::parse_with_file_ops( + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( &project_fs, &mut file_ops, &[], ProjectRelativePath::empty(), - )?; + ) + .await?; let resolver = &cells.cell_resolver; @@ -501,47 +631,38 @@ mod tests { let other_instance = resolver.get(CellName::testing_new("other"))?; let tp_instance = resolver.get(CellName::testing_new("third_party"))?; - assert_eq!( - vec!["BUCK.v2", "BUCK"], - root_instance.buildfiles().map(|n| n.as_str()) - ); - assert_eq!( - vec!["TARGETS.v2", "TARGETS", "TARGETS.test"], - other_instance.buildfiles().map(|n| n.as_str()) - ); - assert_eq!(vec!["OKAY"], tp_instance.buildfiles().map(|n| n.as_str())); + assert_eq!("", root_instance.path().as_str()); + assert_eq!("other", other_instance.path().as_str()); + assert_eq!("third_party", tp_instance.path().as_str()); assert_eq!( "other", - root_instance - .cell_alias_resolver() + resolver + .root_cell_cell_alias_resolver() .resolve("other_alias")? .as_str() ); - assert_eq!( - "other", - tp_instance - .cell_alias_resolver() - .resolve("other_alias")? - .as_str() - ); + let tp_resolver = cells + .get_cell_alias_resolver_for_cwd_fast_with_file_ops( + &mut file_ops, + tp_instance.path().as_project_relative_path(), + ) + .await?; - assert_eq!("", root_instance.path().as_str()); - assert_eq!("other", other_instance.path().as_str()); - assert_eq!("third_party", tp_instance.path().as_str()); + assert_eq!("other", tp_resolver.resolve("other_alias")?.as_str()); Ok(()) } - #[test] - fn test_multi_cell_with_config_file() -> anyhow::Result<()> { + #[tokio::test] + async fn test_multi_cell_with_config_file() -> anyhow::Result<()> { let mut file_ops = TestConfigParserFileOps::new(&[ ( - "/.buckconfig", + ".buckconfig", indoc!( r#" - [repositories] + [cells] root = . other = other/ other_alias = other/ @@ -550,10 +671,10 @@ mod tests { ), ), ( - "/other/.buckconfig", + "other/.buckconfig", indoc!( r#" - [repositories] + [cells] root = .. other = . third_party = ../third_party/ @@ -563,10 +684,10 @@ mod tests { ), ), ( - "/third_party/.buckconfig", + "third_party/.buckconfig", indoc!( r#" - [repositories] + [cells] third_party = . [buildfile] name_v2 = OKAY @@ -575,7 +696,7 @@ mod tests { ), ), ( - "/other/cli-conf", + "other/cli-conf", indoc!( r#" [foo] @@ -586,44 +707,67 @@ mod tests { ])?; let project_fs = create_project_filesystem(); - #[cfg(not(windows))] - let file_arg = "/other/cli-conf"; - #[cfg(windows)] - let file_arg = "C:/other/cli-conf"; - let cells = BuckConfigBasedCells::parse_with_file_ops( + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( &project_fs, &mut file_ops, - &[LegacyConfigCmdArg::file(file_arg)?], + &[ConfigOverride::file( + "cli-conf", + Some(CellRootPathBuf::testing_new("other")), + )], ProjectRelativePath::empty(), - )?; + ) + .await?; - let configs = &cells.configs_by_name; - let root_config = configs.get(CellName::testing_new("root")).unwrap(); - let other_config = configs.get(CellName::testing_new("other")).unwrap(); - let tp_config = configs.get(CellName::testing_new("third_party")).unwrap(); + let root_config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("root"), &mut file_ops) + .await?; + let other_config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("other"), &mut file_ops) + .await?; + let tp_config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("third_party"), &mut file_ops) + .await?; - assert_eq!(root_config.get("foo", "bar"), Some("blah")); - assert_eq!(other_config.get("foo", "bar"), Some("blah")); - assert_eq!(tp_config.get("foo", "bar"), Some("blah")); + assert_eq!( + root_config.get(BuckconfigKeyRef { + section: "foo", + property: "bar" + }), + Some("blah") + ); + assert_eq!( + other_config.get(BuckconfigKeyRef { + section: "foo", + property: "bar" + }), + Some("blah") + ); + assert_eq!( + tp_config.get(BuckconfigKeyRef { + section: "foo", + property: "bar" + }), + Some("blah") + ); Ok(()) } - #[test] - fn test_multi_cell_no_repositories_in_non_root_cell() -> anyhow::Result<()> { + #[tokio::test] + async fn test_multi_cell_no_repositories_in_non_root_cell() -> anyhow::Result<()> { let mut file_ops = TestConfigParserFileOps::new(&[ ( - "/.buckconfig", + ".buckconfig", indoc!( r#" - [repositories] + [cells] root = . other = other/ "# ), ), ( - "/other/.buckconfig", + "other/.buckconfig", indoc!( r#" [foo] @@ -634,37 +778,44 @@ mod tests { ])?; let project_fs = create_project_filesystem(); - let cells = BuckConfigBasedCells::parse_with_file_ops( + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( &project_fs, &mut file_ops, &[], ProjectRelativePath::empty(), - )?; - - let configs = &cells.configs_by_name; + ) + .await?; - let other_config = configs.get(CellName::testing_new("other")).unwrap(); + let other_config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("other"), &mut file_ops) + .await?; - assert_eq!(other_config.get("foo", "bar"), Some("baz")); + assert_eq!( + other_config.get(BuckconfigKeyRef { + section: "foo", + property: "bar" + }), + Some("baz") + ); Ok(()) } - #[test] - fn test_multi_cell_with_cell_relative() -> anyhow::Result<()> { + #[tokio::test] + async fn test_multi_cell_with_cell_relative() -> anyhow::Result<()> { let mut file_ops = TestConfigParserFileOps::new(&[ ( - "/.buckconfig", + ".buckconfig", indoc!( r#" - [repositories] + [cells] root = . other = other/ "# ), ), ( - "/global-conf", + "global-conf", indoc!( r#" [apple] @@ -673,10 +824,10 @@ mod tests { ), ), ( - "/other/.buckconfig", + "other/.buckconfig", indoc!( r#" - [repositories] + [cells] root = .. other = . [buildfile] @@ -685,7 +836,7 @@ mod tests { ), ), ( - "/other/app-conf", + "other/app-conf", indoc!( r#" [apple] @@ -696,33 +847,47 @@ mod tests { ])?; let project_fs = create_project_filesystem(); - let cells = BuckConfigBasedCells::parse_with_file_ops( + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( &project_fs, &mut file_ops, &[ - LegacyConfigCmdArg::file("other//app-conf")?, - LegacyConfigCmdArg::file("//global-conf")?, + ConfigOverride::file("app-conf", Some(CellRootPathBuf::testing_new("other"))), + ConfigOverride::file("global-conf", Some(CellRootPathBuf::testing_new(""))), ], ProjectRelativePath::empty(), - )?; + ) + .await?; - let configs = &cells.configs_by_name; - let other_config = configs.get(CellName::testing_new("other")).unwrap(); + let other_config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("other"), &mut file_ops) + .await?; - assert_eq!(other_config.get("apple", "ide"), Some("Xcode")); - assert_eq!(other_config.get("apple", "test_tool"), Some("xctool")); + assert_eq!( + other_config.get(BuckconfigKeyRef { + section: "apple", + property: "ide" + }), + Some("Xcode") + ); + assert_eq!( + other_config.get(BuckconfigKeyRef { + section: "apple", + property: "test_tool" + }), + Some("xctool") + ); Ok(()) } - #[test] - fn test_local_config_file_overwrite_config_file() -> anyhow::Result<()> { + #[tokio::test] + async fn test_local_config_file_overwrite_config_file() -> anyhow::Result<()> { let mut file_ops = TestConfigParserFileOps::new(&[ ( - "/.buckconfig", + ".buckconfig", indoc!( r#" - [repositories] + [cells] root = . [apple] key = value1 @@ -731,7 +896,7 @@ mod tests { ), ), ( - "/.buckconfig.local", + ".buckconfig.local", indoc!( r#" [orange] @@ -745,35 +910,37 @@ mod tests { ])?; let project_fs = create_project_filesystem(); - let cells = BuckConfigBasedCells::parse_with_file_ops( + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( &project_fs, &mut file_ops, &[], ProjectRelativePath::empty(), - )?; + ) + .await?; - let configs = &cells.configs_by_name; - let config = configs.get(CellName::testing_new("root")).unwrap(); + let config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("root"), &mut file_ops) + .await?; // No local override - assert_config_value(config, "apple", "key", "value1"); + assert_config_value(&config, "apple", "key", "value1"); // local override to new value - assert_config_value(config, "apple", "key2", "value5"); + assert_config_value(&config, "apple", "key2", "value5"); // local override new field - assert_config_value(config, "apple", "key3", "value4"); + assert_config_value(&config, "apple", "key3", "value4"); // local override new section - assert_config_value(config, "orange", "key", "value3"); + assert_config_value(&config, "orange", "key", "value3"); Ok(()) } - #[test] - fn test_multi_cell_local_config_file_overwrite_config_file() -> anyhow::Result<()> { + #[tokio::test] + async fn test_multi_cell_local_config_file_overwrite_config_file() -> anyhow::Result<()> { let mut file_ops = TestConfigParserFileOps::new(&[ ( - "/.buckconfig", + ".buckconfig", indoc!( r#" - [repositories] + [cells] root = . other = other/ [apple] @@ -783,7 +950,7 @@ mod tests { ), ), ( - "/.buckconfig.local", + ".buckconfig.local", indoc!( r#" [orange] @@ -795,10 +962,10 @@ mod tests { ), ), ( - "/other/.buckconfig", + "other/.buckconfig", indoc!( r#" - [repositories] + [cells] root = .. other = . [apple] @@ -808,7 +975,7 @@ mod tests { ), ), ( - "/other/.buckconfig.local", + "other/.buckconfig.local", indoc!( r#" [orange] @@ -822,34 +989,352 @@ mod tests { ])?; let project_fs = create_project_filesystem(); - let cells = BuckConfigBasedCells::parse_with_file_ops( + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( &project_fs, &mut file_ops, &[], ProjectRelativePath::empty(), - )?; + ) + .await?; - let configs = &cells.configs_by_name; - let root_config = configs.get(CellName::testing_new("root")).unwrap(); - let other_config = configs.get(CellName::testing_new("other")).unwrap(); + let root_config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("root"), &mut file_ops) + .await?; + let other_config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("other"), &mut file_ops) + .await?; // No local override - assert_config_value(root_config, "apple", "key", "value1"); + assert_config_value(&root_config, "apple", "key", "value1"); // local override to new value - assert_config_value(root_config, "apple", "key2", "value5"); + assert_config_value(&root_config, "apple", "key2", "value5"); // local override new field - assert_config_value(root_config, "apple", "key3", "value4"); + assert_config_value(&root_config, "apple", "key3", "value4"); // local override new section - assert_config_value(root_config, "orange", "key", "value3"); + assert_config_value(&root_config, "orange", "key", "value3"); // No local override - assert_config_value(other_config, "apple", "key", "othervalue1"); + assert_config_value(&other_config, "apple", "key", "othervalue1"); // local override to new value - assert_config_value(other_config, "apple", "key2", "othervalue5"); + assert_config_value(&other_config, "apple", "key2", "othervalue5"); // local override new field - assert_config_value(other_config, "apple", "key3", "othervalue4"); + assert_config_value(&other_config, "apple", "key3", "othervalue4"); // local override new section - assert_config_value(other_config, "orange", "key", "othervalue3"); + assert_config_value(&other_config, "orange", "key", "othervalue3"); + + Ok(()) + } + + #[tokio::test] + async fn test_config_arg_with_no_buckconfig() -> anyhow::Result<()> { + let mut file_ops = TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [repositories] + root = . + other = other + "# + ), + )])?; + let project_fs = create_project_filesystem(); + + let cells = BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[ConfigOverride::flag_no_cell("some_section.key=value1")], + ProjectRelativePath::empty(), + ) + .await?; + let config = cells + .parse_single_cell_with_file_ops(CellName::testing_new("other"), &mut file_ops) + .await?; + + assert_config_value(&config, "some_section", "key", "value1"); + + Ok(()) + } + + #[tokio::test] + async fn test_cell_config_section_name() -> anyhow::Result<()> { + let mut file_ops = TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [repositories] + root = . + other = other/ + [repository_aliases] + other_alias = other + "# + ), + )])?; + + let project_fs = create_project_filesystem(); + let resolver = BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[], + ProjectRelativePath::empty(), + ) + .await? + .cell_resolver; + + assert_eq!( + "other", + resolver + .root_cell_cell_alias_resolver() + .resolve("other_alias")? + .as_str(), + ); + + Ok(()) + } + + fn initialize_external_cells_impl() { + struct TestExternalCellsImpl; + + #[async_trait::async_trait] + impl ExternalCellsImpl for TestExternalCellsImpl { + async fn get_file_ops_delegate( + &self, + _ctx: &mut DiceComputations<'_>, + _cell_name: CellName, + _origin: ExternalCellOrigin, + ) -> anyhow::Result> { + // Not used in these tests + unreachable!() + } + + fn check_bundled_cell_exists(&self, cell_name: CellName) -> anyhow::Result<()> { + if cell_name.as_str() == "test_bundled_cell" { + Ok(()) + } else { + Err(anyhow::anyhow!("No bundled cell with name `{}`", cell_name)) + } + } + + async fn expand( + &self, + _ctx: &mut DiceComputations<'_>, + _cell_name: CellName, + _origin: ExternalCellOrigin, + _path: &CellRootPath, + ) -> anyhow::Result<()> { + // Not used in these tests + unreachable!() + } + } + + static INIT: std::sync::Once = std::sync::Once::new(); + + // Sometimes multiple unittests are run in the same process + INIT.call_once(|| { + EXTERNAL_CELLS_IMPL.init(&TestExternalCellsImpl); + }); + } + + #[tokio::test] + async fn test_external_cell_configs() -> anyhow::Result<()> { + initialize_external_cells_impl(); + + let mut file_ops = TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [cells] + root = . + test_bundled_cell = other1/ + other2 = other2/ + [cell_aliases] + other_alias = test_bundled_cell + [external_cells] + other_alias = bundled + "# + ), + )])?; + + let project_fs = create_project_filesystem(); + let resolver = BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[], + ProjectRelativePath::empty(), + ) + .await? + .cell_resolver; + + let other1 = resolver + .root_cell_cell_alias_resolver() + .resolve("other_alias") + .unwrap(); + let other2 = resolver + .root_cell_cell_alias_resolver() + .resolve("other2") + .unwrap(); + + assert_eq!( + resolver.get(other1).unwrap().external(), + Some(&ExternalCellOrigin::Bundled(CellName::testing_new( + "test_bundled_cell" + ))), + ); + assert_eq!(resolver.get(other2).unwrap().external(), None,); + assert_eq!( + resolver + .root_cell_cell_alias_resolver() + .resolve("other_alias") + .unwrap() + .as_str(), + "test_bundled_cell", + ); + + Ok(()) + } + + #[tokio::test] + async fn test_nested_external_cell_configs() -> anyhow::Result<()> { + initialize_external_cells_impl(); + + let mut file_ops = TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [cells] + root = . + test_bundled_cell = foo/ + bar = foo/bar/ + [external_cells] + test_bundled_cell = bundled + "# + ), + )])?; + + let project_fs = create_project_filesystem(); + BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[], + ProjectRelativePath::empty(), + ) + .await + .err() + .unwrap(); + + Ok(()) + } + + #[tokio::test] + async fn test_missing_bundled_cell() -> anyhow::Result<()> { + initialize_external_cells_impl(); + + let mut file_ops = TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [cells] + root = . + foo = foo/ + bar = foo/bar/ + [external_cells] + foo = bundled + "# + ), + )])?; + + let project_fs = create_project_filesystem(); + let e = BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[], + ProjectRelativePath::empty(), + ) + .await + .err() + .unwrap(); + + let e = format!("{:?}", e); + assert!(e.contains("No bundled cell"), "error: {}", e); + + Ok(()) + } + + #[tokio::test] + async fn test_git_external_cell() -> anyhow::Result<()> { + initialize_external_cells_impl(); + + let mut file_ops = TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [cells] + root = . + libfoo = foo/ + [external_cells] + libfoo = git + [external_cell_libfoo] + git_origin = https://github.com/jeff/libfoo.git + commit_hash = aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee + "# + ), + )])?; + + let project_fs = create_project_filesystem(); + let resolver = BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[], + ProjectRelativePath::empty(), + ) + .await? + .cell_resolver; + + let instance = resolver.get(CellName::testing_new("libfoo")).unwrap(); + + assert_eq!( + instance.external(), + Some(&ExternalCellOrigin::Git(GitCellSetup { + git_origin: "https://github.com/jeff/libfoo.git".into(), + commit: "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee".into(), + })), + ); + + Ok(()) + } + + #[tokio::test] + async fn test_git_external_cell_invalid_sha1() -> anyhow::Result<()> { + initialize_external_cells_impl(); + + let mut file_ops = TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [cells] + root = . + libfoo = foo/ + [external_cells] + libfoo = git + [external_cell_libfoo] + git_origin = https://github.com/jeff/libfoo.git + commit_hash = abcde + "# + ), + )])?; + + let project_fs = create_project_filesystem(); + let e = BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut file_ops, + &[], + ProjectRelativePath::empty(), + ) + .await + .err() + .unwrap(); + + let e = format!("{:?}", e); + assert!(e.contains("not a valid SHA1 digest"), "error: {}", e); Ok(()) } diff --git a/app/buck2_common/src/legacy_configs/configs.rs b/app/buck2_common/src/legacy_configs/configs.rs new file mode 100644 index 0000000000000..c14c54e33f9b9 --- /dev/null +++ b/app/buck2_common/src/legacy_configs/configs.rs @@ -0,0 +1,864 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::fmt; +use std::fmt::Display; +use std::io::BufRead; +use std::sync::Arc; + +use allocative::Allocative; +use buck2_cli_proto::ConfigOverride; +use buck2_core::cells::cell_root_path::CellRootPath; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use dupe::Dupe; +use starlark_map::sorted_map::SortedMap; + +use crate::legacy_configs::args::ResolvedConfigFile; +use crate::legacy_configs::args::ResolvedLegacyConfigArg; +use crate::legacy_configs::file_ops::ConfigParserFileOps; +use crate::legacy_configs::file_ops::ConfigPath; +use crate::legacy_configs::key::BuckconfigKeyRef; +use crate::legacy_configs::parser::LegacyConfigParser; + +#[derive(Clone, Dupe, Debug, Allocative)] +pub struct LegacyBuckConfig(pub(crate) Arc); + +#[derive(Debug, Allocative)] +pub(crate) struct ConfigData { + pub(crate) values: SortedMap, +} + +#[derive(Clone, Debug, PartialEq, Eq, Allocative)] +pub(crate) enum ResolvedValue { + // A placeholder used before we do resolution. + Unknown, + // Indicates that there's no resolution required, the resolved value and raw value are the same. + Literal, + // The resolved value for non-literals. + Resolved(String), +} + +#[derive(Debug, PartialEq, Eq, Allocative)] +pub(crate) struct ConfigFileLocation { + pub(crate) path: String, + pub(crate) include_source: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Allocative)] +pub(crate) struct ConfigFileLocationWithLine { + pub(crate) source_file: Arc, + pub(crate) line: usize, +} + +#[derive(Clone, Debug, PartialEq, Eq, Allocative)] +pub(crate) enum Location { + File(ConfigFileLocationWithLine), + CommandLineArgument, +} + +impl Location { + pub(crate) fn as_legacy_buck_config_location(&self) -> LegacyBuckConfigLocation { + match self { + Self::File(x) => LegacyBuckConfigLocation::File(&x.source_file.path, x.line), + Self::CommandLineArgument => LegacyBuckConfigLocation::CommandLineArgument, + } + } +} + +// Represents a config section and key only, for example, `cxx.compiler`. +#[derive(Clone, Debug)] +pub struct ConfigSectionAndKey { + // TODO(scottcao): Add cell_path + pub section: String, + pub key: String, +} + +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] +pub(crate) enum ConfigArgumentParseError { + #[error("Could not find section separator (`.`) in pair `{0}`")] + NoSectionDotSeparator(String), + #[error("Could not find equals sign (`=`) in pair `{0}`")] + NoEqualsSeparator(String), + + #[error("Expected key-value in format of `section.key=value` but only got `{0}`")] + MissingData(String), + + #[error("Contains whitespace in key-value pair `{0}`")] + WhitespaceInKeyOrValue(String), + + #[error("Specifying cells via cli config overrides is banned (`{0}.key=value`)")] + CellOverrideViaCliConfig(&'static str), +} + +// Parses config key in the format `section.key` +pub fn parse_config_section_and_key( + raw_section_and_key: &str, + raw_arg_in_err: Option<&str>, // Used in error strings to preserve the original config argument, not just section and key +) -> anyhow::Result { + let raw_arg = raw_arg_in_err.unwrap_or(raw_section_and_key); + let (raw_section, raw_key) = raw_section_and_key + .split_once('.') + .ok_or_else(|| ConfigArgumentParseError::NoSectionDotSeparator(raw_arg.to_owned()))?; + + // We only trim the section + key, whitespace in values needs to be preserved. For example, + // Buck can be invoked with --config section.key="Some Value" that contains important whitespace. + let trimmed_section = raw_section.trim_start(); + if trimmed_section.find(char::is_whitespace).is_some() + || raw_key.find(char::is_whitespace).is_some() + { + return Err(anyhow::anyhow!( + ConfigArgumentParseError::WhitespaceInKeyOrValue(raw_arg.to_owned()) + )); + } + + if trimmed_section.is_empty() || raw_key.is_empty() { + return Err(anyhow::anyhow!(ConfigArgumentParseError::MissingData( + raw_arg.to_owned() + ))); + } + + Ok(ConfigSectionAndKey { + section: trimmed_section.to_owned(), + key: raw_key.to_owned(), + }) +} + +#[derive(Debug, Clone, PartialEq, Eq, Allocative)] +pub(crate) struct ConfigValue { + raw_value: String, + pub(crate) resolved_value: ResolvedValue, + pub(crate) source: Location, +} + +#[derive(Debug, Default, Allocative)] +pub struct LegacyBuckConfigSection { + pub(crate) values: SortedMap, +} + +impl ConfigValue { + pub(crate) fn new_raw(source: ConfigFileLocationWithLine, value: String) -> Self { + Self { + raw_value: value, + resolved_value: ResolvedValue::Unknown, + source: Location::File(source), + } + } + + pub(crate) fn new_raw_arg(raw_value: String) -> Self { + Self { + raw_value, + resolved_value: ResolvedValue::Unknown, + source: Location::CommandLineArgument, + } + } + + pub(crate) fn raw_value(&self) -> &str { + &self.raw_value + } + + pub(crate) fn as_str(&self) -> &str { + match &self.resolved_value { + ResolvedValue::Literal => &self.raw_value, + ResolvedValue::Resolved(v) => v, + ResolvedValue::Unknown => { + unreachable!("cannot call as_str() until all values are resolved") + } + } + } +} + +pub struct LegacyBuckConfigValue<'a> { + pub(crate) value: &'a ConfigValue, +} + +#[derive(PartialEq, Debug)] +pub enum LegacyBuckConfigLocation<'a> { + File(&'a str, usize), + CommandLineArgument, +} + +impl<'a> Display for LegacyBuckConfigLocation<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::File(file, line) => { + write!(f, "at {}:{}", file, line) + } + Self::CommandLineArgument => { + write!(f, "on the command line") + } + } + } +} + +impl<'a> LegacyBuckConfigValue<'a> { + pub fn as_str(&self) -> &'a str { + self.value.as_str() + } + + pub fn raw_value(&self) -> &str { + self.value.raw_value() + } + + pub fn location(&self) -> LegacyBuckConfigLocation { + match &self.value.source { + Location::File(file) => { + LegacyBuckConfigLocation::File(&file.source_file.path, file.line) + } + Location::CommandLineArgument => LegacyBuckConfigLocation::CommandLineArgument, + } + } + + pub fn location_stack(&self) -> Vec { + let mut res = Vec::new(); + let mut location = Some(&self.value.source); + + while let Some(loc) = location.take() { + match &loc { + Location::File(loc) => { + res.push(LegacyBuckConfigLocation::File( + &loc.source_file.path, + loc.line, + )); + location = loc.source_file.include_source.as_ref(); + } + Location::CommandLineArgument => { + // No stack + } + } + } + res + } +} + +impl LegacyBuckConfig { + pub fn empty() -> Self { + Self(Arc::new(ConfigData { + values: SortedMap::new(), + })) + } + + pub fn filter_values(&self, filter: F) -> Self + where + F: Fn(&BuckconfigKeyRef) -> bool, + { + let values = self + .0 + .values + .iter() + .filter_map(|(section, section_data)| { + let values: SortedMap<_, _> = section_data + .values + .iter() + .filter(|(property, _)| filter(&BuckconfigKeyRef { section, property })) + .map(|(property, value)| (property.clone(), value.clone())) + .collect(); + if values.is_empty() { + None + } else { + Some((section.clone(), LegacyBuckConfigSection { values })) + } + }) + .collect(); + Self(Arc::new(ConfigData { values })) + } + + pub(crate) async fn start_parse_for_external_files( + config_paths: &[ConfigPath], + file_ops: &mut dyn ConfigParserFileOps, + follow_includes: bool, + ) -> anyhow::Result { + let mut parser = LegacyConfigParser::new(); + for main_config_file in config_paths { + parser + .parse_file(&main_config_file, None, follow_includes, file_ops) + .await?; + } + Ok(parser) + } + + pub(crate) async fn finish_parse( + mut parser: LegacyConfigParser, + main_config_files: &[ConfigPath], + current_cell: &CellRootPath, + file_ops: &mut dyn ConfigParserFileOps, + config_args: &[ResolvedLegacyConfigArg], + follow_includes: bool, + ) -> anyhow::Result { + for main_config_file in main_config_files { + parser + .parse_file(&main_config_file, None, follow_includes, file_ops) + .await?; + } + + for config_arg in config_args { + match config_arg { + ResolvedLegacyConfigArg::Flag(config_value) => { + parser.apply_config_arg(config_value, current_cell)? + } + ResolvedLegacyConfigArg::File(ResolvedConfigFile::Project(path)) => { + parser + .parse_file( + &ConfigPath::Project(path.to_owned()), + Some(Location::CommandLineArgument), + follow_includes, + file_ops, + ) + .await? + } + ResolvedLegacyConfigArg::File(ResolvedConfigFile::Global(other)) => { + parser.join(other); + } + }; + } + + parser.finish() + } +} + +pub mod testing { + use std::cmp::min; + + use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; + + use super::*; + use crate::legacy_configs::args::resolve_config_args; + use crate::legacy_configs::cells::create_project_filesystem; + use crate::legacy_configs::file_ops::ConfigDirEntry; + + pub fn parse(data: &[(&str, &str)], path: &str) -> anyhow::Result { + parse_with_config_args(data, path, &[]) + } + + pub fn parse_with_config_args( + data: &[(&str, &str)], + cell_path: &str, + config_args: &[ConfigOverride], + ) -> anyhow::Result { + let mut file_ops = TestConfigParserFileOps::new(data)?; + let path = ProjectRelativePath::new(cell_path)?; + futures::executor::block_on(async { + // As long as people don't pass config files, making up values here is ok + let processed_config_args = resolve_config_args( + config_args, + &create_project_filesystem(), + &ProjectRelativePath::empty(), + &mut file_ops, + ) + .await?; + LegacyBuckConfig::finish_parse( + LegacyConfigParser::new(), + &[ConfigPath::Project(path.to_owned())], + CellRootPath::new(ProjectRelativePath::empty()), + &mut file_ops, + &processed_config_args, + true, + ) + .await + }) + } + + pub struct TestConfigParserFileOps { + data: HashMap, + } + + impl TestConfigParserFileOps { + pub fn new(data: &[(&str, &str)]) -> anyhow::Result { + let mut holder_data = HashMap::new(); + for (file, content) in data { + holder_data.insert( + ProjectRelativePath::new(*file)?.to_owned(), + (*content).to_owned(), + ); + } + Ok(TestConfigParserFileOps { data: holder_data }) + } + } + + #[async_trait::async_trait] + #[allow(private_interfaces)] + impl ConfigParserFileOps for TestConfigParserFileOps { + async fn read_file_lines_if_exists( + &mut self, + path: &ConfigPath, + ) -> anyhow::Result< + Option< + Box< + ( + dyn std::iter::Iterator> + + Send + + 'static + ), + >, + >, + > { + let ConfigPath::Project(path) = path else { + return Ok(None); + }; + let Some(content) = self.data.get(path) else { + return Ok(None); + }; + // Need a Read implementation that owns the bytes. + struct StringReader(Vec, usize); + impl std::io::Read for StringReader { + fn read(&mut self, buf: &mut [u8]) -> Result { + let remaining = self.0.len() - self.1; + let to_return = min(remaining, buf.len()); + buf[..to_return].clone_from_slice(&self.0[self.1..self.1 + to_return]); + self.1 += to_return; + Ok(to_return) + } + } + let file = std::io::BufReader::new(StringReader(content.to_owned().into_bytes(), 0)); + Ok(Some(Box::new(file.lines()))) + } + + async fn read_dir(&mut self, _path: &ConfigPath) -> anyhow::Result> { + // This is only used for listing files in `buckconfig.d` directories, which we can just + // say are always empty in tests + Ok(Vec::new()) + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use buck2_core::cells::cell_root_path::CellRootPathBuf; + use indoc::indoc; + use itertools::Itertools; + + use super::testing::*; + use super::*; + use crate::legacy_configs::key::BuckconfigKeyRef; + + pub(crate) fn assert_config_value( + config: &LegacyBuckConfig, + section: &str, + key: &str, + expected: &str, + ) { + match config.get_section(section) { + None => { + panic!( + "Expected config to have section `{}`, but had sections `<{}>`", + section, + config.sections().join(", ") + ); + } + Some(values) => match values.get(key) { + None => panic!( + "Expected section `{}` to have key `{}`, but had keys `<{}>`", + section, + key, + values.keys().join(", ") + ), + Some(v) if v.as_str() != expected => { + panic!( + "Expected `{}.{}` to have value `{}`. Got `{}`.", + section, + key, + expected, + v.as_str() + ); + } + _ => {} + }, + } + } + + fn assert_config_value_is_empty(config: &LegacyBuckConfig, section: &str, key: &str) { + match config.get_section(section) { + Some(values) => match values.get(key) { + Some(v) => { + panic!( + "Expected `{}.{}` to not exist. Got `{}` for value.", + section, + key, + v.as_str() + ); + } + _ => {} + }, + _ => {} + }; + } + + #[test] + fn test_simple() -> anyhow::Result<()> { + let config = parse( + &[( + "config", + indoc!( + r#" + [section] + int = 1 + string = hello + multiline = hello \ + world\ + ! + + # this is a comment + commented = okay + + [new_section] + overridden = 1 + + [another_section] + some_val = 2 + + [new_section] + reopened = ok + # override overridden + overridden = 3 + + # note trailing whitespace + [bad_formatting] + + value = 1 + "# + ), + )], + "config", + )?; + + assert_eq!( + None, + config.get(BuckconfigKeyRef { + section: "section", + property: "missing" + }) + ); + assert_eq!( + None, + config.get(BuckconfigKeyRef { + section: "missing", + property: "int" + }) + ); + assert_config_value(&config, "section", "int", "1"); + assert_config_value(&config, "section", "string", "hello"); + // Note that lines are all trimmed, so leading whitespace after a newline is + // dropped. + assert_config_value(&config, "section", "multiline", "hello world!"); + assert_config_value(&config, "section", "commented", "okay"); + assert_config_value(&config, "another_section", "some_val", "2"); + assert_config_value(&config, "new_section", "reopened", "ok"); + assert_config_value(&config, "new_section", "overridden", "3"); + assert_config_value(&config, "bad_formatting", "value", "1"); + Ok(()) + } + + #[test] + fn test_comments() -> anyhow::Result<()> { + let config = parse( + &[( + "config", + indoc!( + r#" + [section1] # stuff + key1 = value1 + [section2#name] + key2 = value2 + "# + ), + )], + "config", + )?; + assert_config_value(&config, "section1", "key1", "value1"); + assert_config_value(&config, "section2#name", "key2", "value2"); + Ok(()) + } + + #[test] + fn test_references() -> anyhow::Result<()> { + let config = parse( + &[( + "config", + indoc!( + r#" + + [section1] + ref1_1 = ref1_1<$(config section3.ref3_2)> + + [section2] + ref2_1 = ref2_1<$(config section3.ref3_1)> + ref2_2 = ref2_2<$(config section2.ref2_1)> + [section3] + ref3_1 = ref3_1<$(config section1.ref1_1), $(config section3.ref3_2)> + ref3_2 = ref3_2 + + [simple] + s1 = $(config simple.s2)$(config simple.s2)$(config simple.s2) + s2 = $(config simple.s3)$(config simple.s3)$(config simple.s3) + s3 = x + "# + ), + )], + "config", + )?; + + assert_config_value( + &config, + "section2", + "ref2_2", + "ref2_2, ref3_2>>>", + ); + + assert_config_value(&config, "simple", "s1", "xxxxxxxxx"); + Ok(()) + } + + #[test] + fn test_reference_cycle() -> anyhow::Result<()> { + let res = parse( + &[( + "config", + indoc!( + r#" + + [x] + a = $(config x.b) + b = $(config x.c) + c = $(config x.d) + d = $(config x.e) + e = $(config x.f) + f = $(config x.g) + g = $(config x.d) + "# + ), + )], + "config", + ); + + match res { + Ok(_) => panic!("Expected failure."), + Err(e) => { + let message = e.to_string(); + let cycle = "`x.d` -> `x.e` -> `x.f` -> `x.g` -> `x.d`"; + assert!( + message.contains(cycle), + "Expected error to contain \"{}\", but was `{}`", + cycle, + message + ); + } + } + + Ok(()) + } + + #[test] + fn test_includes() -> anyhow::Result<()> { + let config = parse( + &[ + ( + "base", + indoc!( + r#" + base = okay! + "# + ), + ), + ( + "section", + indoc!( + r#" + [section] + "# + ), + ), + ( + "some/deep/dir/includes_base", + indoc!( + r#" + + "# + ), + ), + ( + "includes_section", + indoc!( + r#" + + "# + ), + ), + ( + "config", + indoc!( + r#" + # use a couple optional includes in here to ensure those work when the file exists. + [opened_section] + # include into an already open section + + # start a section with an include + + key = wild + + [other_section] + # ensure can reopen section with an include + + other_key=wildtoo + + # Check that an optional include for a file that doesn't exist is okay. + + "# + ), + ), + ( + "test_bad_include", + indoc!( + r#" + + "# + ), + ), + ], + "config", + )?; + + assert_config_value(&config, "opened_section", "base", "okay!"); + assert_config_value(&config, "section", "base", "okay!"); + // Note that lines are all trimmed, so leading whitespace after a newline is + // dropped. + assert_config_value(&config, "section", "key", "wild"); + assert_config_value(&config, "section", "other_key", "wildtoo"); + Ok(()) + } + + #[test] + fn test_config_args_ordering() -> anyhow::Result<()> { + let config_args = vec![ + ConfigOverride::flag_no_cell("apple.key=value1"), + ConfigOverride::flag_no_cell("apple.key=value2"), + ]; + let config = parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args)?; + assert_config_value(&config, "apple", "key", "value2"); + + Ok(()) + } + + #[test] + fn test_config_args_empty() -> anyhow::Result<()> { + let config_args = vec![ConfigOverride::flag_no_cell("apple.key=")]; + let config = parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args)?; + assert_config_value_is_empty(&config, "apple", "key"); + + Ok(()) + } + + #[test] + fn test_config_args_overwrite_config_file() -> anyhow::Result<()> { + let config_args = vec![ConfigOverride::flag_no_cell("apple.key=value2")]; + let config = parse_with_config_args( + &[( + "config", + indoc!( + r#" + [apple] + key = value1 + "# + ), + )], + "config", + &config_args, + )?; + + assert_config_value(&config, "apple", "key", "value2"); + + let apple_section = config.get_section("apple").unwrap(); + let key_value = apple_section.get("key").unwrap(); + assert_eq!( + key_value.location(), + LegacyBuckConfigLocation::CommandLineArgument + ); + + Ok(()) + } + + #[test] + fn test_section_and_key() -> anyhow::Result<()> { + // Valid Formats + + let normal_section_and_key = parse_config_section_and_key("apple.key", None)?; + + assert_eq!("apple", normal_section_and_key.section); + assert_eq!("key", normal_section_and_key.key); + + // Whitespace + + let section_leading_whitespace = parse_config_section_and_key(" apple.key", None)?; + assert_eq!("apple", section_leading_whitespace.section); + assert_eq!("key", section_leading_whitespace.key); + + let pair_with_whitespace_in_key = parse_config_section_and_key("apple. key", None); + assert!(pair_with_whitespace_in_key.is_err()); + + // Invalid Formats + + let pair_without_dot = parse_config_section_and_key("applekey", None); + assert!(pair_without_dot.is_err()); + + Ok(()) + } + + #[test] + fn test_config_file_args_overwrite_config_file() -> anyhow::Result<()> { + let config_args = vec![ + ConfigOverride::flag_no_cell("apple.key=value3"), + ConfigOverride::file("cli-config", Some(CellRootPathBuf::testing_new(""))), + ]; + let config = parse_with_config_args( + &[ + ( + ".buckconfig", + indoc!( + r#" + [cells] + root = . + + [apple] + key = value1 + "# + ), + ), + ( + "cli-config", + indoc!( + r#" + [apple] + key = value2 + "# + ), + ), + ], + ".buckconfig", + &config_args, + )?; + + assert_config_value(&config, "apple", "key", "value2"); + + let apple_section = config.get_section("apple").unwrap(); + let key_value = apple_section.get("key").unwrap(); + let expected_path = LegacyBuckConfigLocation::File("cli-config", 2); + assert_eq!(key_value.location(), expected_path); + + Ok(()) + } + + #[test] + fn test_config_args_cell_in_value() -> anyhow::Result<()> { + let config_args = vec![ConfigOverride::flag_no_cell("apple.key=foo//value1")]; + let config = parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args)?; + assert_config_value(&config, "apple", "key", "foo//value1"); + + Ok(()) + } +} diff --git a/app/buck2_common/src/legacy_configs/dice.rs b/app/buck2_common/src/legacy_configs/dice.rs index ba16dbd47d782..85b39f77da603 100644 --- a/app/buck2_common/src/legacy_configs/dice.rs +++ b/app/buck2_common/src/legacy_configs/dice.rs @@ -9,12 +9,14 @@ //! Dice operations for legacy configuration +use std::future::Future; use std::str::FromStr; use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; use buck2_core::cells::name::CellName; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DiceComputations; use dice::DiceProjectionComputations; @@ -24,121 +26,125 @@ use dice::Key; use dice::OpaqueValue; use dice::ProjectionKey; use dupe::Dupe; -use dupe::OptionDupedExt; -use more_futures::cancellation::CancellationContext; -use starlark_map::sorted_map::SortedMap; use crate::dice::cells::HasCellResolver; +use crate::legacy_configs::cells::BuckConfigBasedCells; +use crate::legacy_configs::cells::ExternalBuckconfigData; +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::diffs::ConfigDiffTracker; +use crate::legacy_configs::key::BuckconfigKeyRef; use crate::legacy_configs::view::LegacyBuckConfigView; -use crate::legacy_configs::view::LegacyBuckConfigsView; -use crate::legacy_configs::ConfigError; -use crate::legacy_configs::LegacyBuckConfig; -use crate::legacy_configs::LegacyBuckConfigs; -use crate::result::SharedResult; -use crate::result::ToSharedResultExt; /// Buckconfig view which queries buckconfig entry from DICE. -#[derive(Clone, Dupe, Debug)] -pub struct LegacyBuckConfigOnDice<'a> { - config: Arc>, +#[derive(Clone, Dupe)] +pub struct OpaqueLegacyBuckConfigOnDice { + config: Arc>, } -impl<'a> LegacyBuckConfigView for LegacyBuckConfigOnDice<'a> { - fn get(&self, section: &str, key: &str) -> anyhow::Result>> { - self.get(section, key) +impl std::fmt::Debug for OpaqueLegacyBuckConfigOnDice { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LegacyBuckConfigOnDice") + .field("config", &self.config) + .finish() } } -impl<'a> LegacyBuckConfigOnDice<'a> { - pub fn get(&self, section: &str, property: &str) -> anyhow::Result>> { - Ok(self - .config - .projection(&LegacyBuckConfigPropertyProjectionKey { +impl OpaqueLegacyBuckConfigOnDice { + pub fn lookup( + &self, + ctx: &mut DiceComputations, + key: BuckconfigKeyRef, + ) -> anyhow::Result>> { + let BuckconfigKeyRef { section, property } = key; + Ok(ctx.projection( + &*self.config, + &LegacyBuckConfigPropertyProjectionKey { section: section.to_owned(), property: property.to_owned(), - })?) + }, + )?) + } + + pub fn view<'a, 'd>( + &'a self, + ctx: &'a mut DiceComputations<'d>, + ) -> LegacyBuckConfigOnDice<'a, 'd> { + LegacyBuckConfigOnDice { ctx, config: self } } } -#[derive(Debug)] -pub struct LegacyBuckConfigsOnDice<'a> { - configs: SortedMap>, +pub struct LegacyBuckConfigOnDice<'a, 'd> { + ctx: &'a mut DiceComputations<'d>, + config: &'a OpaqueLegacyBuckConfigOnDice, } -impl<'a> LegacyBuckConfigsOnDice<'a> { - pub fn get(&self, cell_name: CellName) -> anyhow::Result> { - self.configs - .get(&cell_name) - .duped() - .ok_or_else(|| ConfigError::UnknownCell(cell_name.to_owned()).into()) +impl LegacyBuckConfigOnDice<'_, '_> { + pub fn parse(&mut self, key: BuckconfigKeyRef) -> anyhow::Result> + where + anyhow::Error: From<::Err>, + { + LegacyBuckConfig::parse_value(key, self.get(key)?.as_deref()) } } -impl<'a> LegacyBuckConfigsView for LegacyBuckConfigsOnDice<'a> { - fn get<'x>(&'x self, cell_name: CellName) -> anyhow::Result<&'x dyn LegacyBuckConfigView> { - let config = self - .configs - .get(&cell_name) - .ok_or_else(|| anyhow::Error::new(ConfigError::UnknownCell(cell_name.to_owned())))?; - Ok(config) +impl std::fmt::Debug for LegacyBuckConfigOnDice<'_, '_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LegacyBuckConfigOnDice") + .field("config", &self.config) + .finish() } +} - fn iter<'x>( - &'x self, - ) -> Box + 'x> { - Box::new( - self.configs - .iter() - .map(|(cell_name, config)| (*cell_name, config as &dyn LegacyBuckConfigView)), - ) +impl<'a, 'd> LegacyBuckConfigView for LegacyBuckConfigOnDice<'a, 'd> { + fn get(&mut self, key: BuckconfigKeyRef) -> anyhow::Result>> { + self.config.lookup(self.ctx, key) } } +pub trait HasInjectedLegacyConfigs { + fn get_injected_external_buckconfig_data( + &mut self, + ) -> impl Future>>; + + fn is_injected_external_buckconfig_data_key_set( + &mut self, + ) -> impl Future>; +} + #[async_trait] pub trait HasLegacyConfigs { /// Get buckconfigs. /// /// This operation does not record buckconfig as a dependency of current computation. /// Accessing specific buckconfig property, records that key as dependency. - async fn get_legacy_configs_on_dice(&self) -> anyhow::Result; - async fn get_legacy_config_on_dice( - &self, + &mut self, cell_name: CellName, - ) -> anyhow::Result; - - async fn get_legacy_root_config_on_dice(&self) -> anyhow::Result; - - /// Use this function carefully: a computation which fetches this key will be recomputed - /// if any buckconfig property changes. - /// - /// Consider using `get_legacy_config_property` instead. - async fn get_legacy_configs(&self) -> anyhow::Result; + ) -> anyhow::Result; - /// Checks if LegacyBuckConfigsKey has been set in the DICE graph. - async fn is_legacy_configs_key_set(&self) -> anyhow::Result; + async fn get_legacy_root_config_on_dice( + &mut self, + ) -> anyhow::Result; /// Use this function carefully: a computation which fetches this key will be recomputed /// if any buckconfig property changes. /// /// Consider using `get_legacy_config_property` instead. async fn get_legacy_config_for_cell( - &self, + &mut self, cell_name: CellName, - ) -> SharedResult; + ) -> buck2_error::Result; async fn get_legacy_config_property( - &self, + &mut self, cell_name: CellName, - section: &str, - property: &str, + key: BuckconfigKeyRef<'_>, ) -> anyhow::Result>>; async fn parse_legacy_config_property( - &self, + &mut self, cell_name: CellName, - section: &str, - key: &str, + key: BuckconfigKeyRef<'_>, ) -> anyhow::Result> where anyhow::Error: From<::Err>, @@ -146,47 +152,50 @@ pub trait HasLegacyConfigs { } pub trait SetLegacyConfigs { - fn set_legacy_configs(&mut self, legacy_configs: LegacyBuckConfigs) -> anyhow::Result<()>; + fn set_legacy_config_external_data( + &mut self, + overrides: Arc, + ) -> anyhow::Result<()>; - fn set_none_legacy_configs(&mut self) -> anyhow::Result<()>; + fn set_none_legacy_config_external_data(&mut self) -> anyhow::Result<()>; } #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] -struct LegacyBuckConfigKey; +#[display("{:?}", self)] +struct LegacyExternalBuckConfigDataKey; -impl InjectedKey for LegacyBuckConfigKey { - type Value = Option; +impl InjectedKey for LegacyExternalBuckConfigDataKey { + type Value = Option>; fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Some(x), Some(y)) => x.compare(y), - (None, None) => true, - (_, _) => false, - } + x == y } } #[derive(Clone, Display, Debug, Hash, Eq, PartialEq, Allocative)] -#[display(fmt = "LegacyBuckConfigForCellKey({})", "self.cell_name")] +#[display("LegacyBuckConfigForCellKey({})", self.cell_name)] struct LegacyBuckConfigForCellKey { cell_name: CellName, } #[async_trait] impl Key for LegacyBuckConfigForCellKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellations: &CancellationContext, - ) -> SharedResult { - let legacy_configs = ctx.get_legacy_configs().await?; - legacy_configs - .get(self.cell_name) - .map(|x| x.dupe()) - .shared_error() + ) -> buck2_error::Result { + let cells = ctx.get_cell_resolver().await?; + let this_cell = cells.get(self.cell_name)?; + let config = + BuckConfigBasedCells::parse_single_cell_with_dice(ctx, this_cell.path()).await?; + let config = config.filter_values(should_ignore_config_change); + + ConfigDiffTracker::report_computed_config(ctx, self.cell_name, &config); + + Ok(config) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -197,39 +206,35 @@ impl Key for LegacyBuckConfigForCellKey { } } -#[derive(Debug, Display, Clone, Eq, PartialEq, Hash, Allocative)] -#[display(fmt = "{}//{}.{}", cell_name, section, property)] -struct LegacyBuckConfigPropertyKey { - cell_name: CellName, - section: String, - property: String, -} +/// The computation `LegacyBuckConfigForCellKey` computation might encounter an error. +/// +/// We can't return that error immediately, because we only compute the opaque value. We could +/// return the error when doing the projection to the buckconfig values, but that would result in us +/// increasing the size of the value returned from that computation. Instead, we'll use a different +/// projection key to extract just the error from the cell computation, and compute that when +/// constructing the `OpaqueLegacyBuckConfigOnDice`. +#[derive(Debug, Display, Hash, Eq, PartialEq, Clone, Allocative)] +struct LegacyBuckConfigErrorKey(); -#[async_trait] -impl Key for LegacyBuckConfigPropertyKey { - type Value = SharedResult>>; +impl ProjectionKey for LegacyBuckConfigErrorKey { + type DeriveFromKey = LegacyBuckConfigForCellKey; + type Value = Option; - async fn compute( + fn compute( &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> SharedResult>> { - let legacy_config = ctx.get_legacy_config_for_cell(self.cell_name).await?; - Ok(legacy_config - .get(&self.section, &self.property) - .map(|s| s.to_owned().into())) + config: &buck2_error::Result, + _ctx: &DiceProjectionComputations, + ) -> Option { + config.as_ref().err().cloned() } fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } + x.is_none() && y.is_none() } } #[derive(Debug, Display, Hash, Eq, PartialEq, Clone, Allocative)] -#[display(fmt = "{}.{}", section, property)] +#[display("{}.{}", section, property)] struct LegacyBuckConfigPropertyProjectionKey { section: String, property: String, @@ -241,14 +246,16 @@ impl ProjectionKey for LegacyBuckConfigPropertyProjectionKey { fn compute( &self, - config: &SharedResult, + config: &buck2_error::Result, _ctx: &DiceProjectionComputations, ) -> Option> { - // This is safe, because this code is only called from `DiceLegacyBuckConfig` - // which is known to be constructed from a valid cell. + // See the comment in `LegacyBuckConfigErrorKey` for why this is safe let config = config.as_ref().unwrap(); config - .get(&self.section, &self.property) + .get(BuckconfigKeyRef { + section: &self.section, + property: &self.property, + }) .map(|s| s.to_owned().into()) } @@ -257,231 +264,135 @@ impl ProjectionKey for LegacyBuckConfigPropertyProjectionKey { } } -#[derive(Debug, Display, Hash, PartialEq, Eq, Clone, Dupe, Allocative)] -#[display(fmt = "{:?}", self)] -struct LegacyBuckConfigCellNamesKey; - -impl ProjectionKey for LegacyBuckConfigCellNamesKey { - type DeriveFromKey = LegacyBuckConfigKey; - type Value = Arc>; - - fn compute( - &self, - configs: &Option, - _ctx: &DiceProjectionComputations, - ) -> Arc> { - let cell_names: Vec<_> = configs - .as_ref() - .unwrap_or_else(|| { - panic!( - "Tried to retrieve LegacyBuckConfigKey from the graph, but key has None value" - ) - }) - .iter() - .map(|(k, _)| k) - .collect(); - assert!( - cell_names.is_sorted(), - "configs.iter() must return a sorted iterator" - ); - Arc::new(cell_names) +impl HasInjectedLegacyConfigs for DiceComputations<'_> { + async fn get_injected_external_buckconfig_data( + &mut self, + ) -> anyhow::Result> { + self.compute(&LegacyExternalBuckConfigDataKey).await?.ok_or_else(|| { + panic!("Tried to retrieve LegacyBuckConfigOverridesKey from the graph, but key has None value") + }) } - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y + async fn is_injected_external_buckconfig_data_key_set(&mut self) -> anyhow::Result { + Ok(self + .compute(&LegacyExternalBuckConfigDataKey) + .await? + .is_some()) } } #[async_trait] -impl HasLegacyConfigs for DiceComputations { - async fn get_legacy_configs_on_dice(&self) -> anyhow::Result { - let configs = self.compute_opaque(&LegacyBuckConfigKey).await?; - let cell_names = configs.projection(&LegacyBuckConfigCellNamesKey)?; - let mut configs_on_dice = Vec::with_capacity(cell_names.len()); - for cell_name in &*cell_names { - let config = self - .compute_opaque(&LegacyBuckConfigForCellKey { - cell_name: *cell_name, - }) - .await?; - configs_on_dice.push(( - *cell_name, - LegacyBuckConfigOnDice { - config: Arc::new(config), - }, - )); - } - Ok(LegacyBuckConfigsOnDice { - configs: SortedMap::from_iter(configs_on_dice), - }) - } - +impl HasLegacyConfigs for DiceComputations<'_> { async fn get_legacy_config_on_dice( - &self, + &mut self, cell_name: CellName, - ) -> anyhow::Result { - self.get_legacy_configs_on_dice().await?.get(cell_name) + ) -> anyhow::Result { + let config = self + .compute_opaque(&LegacyBuckConfigForCellKey { cell_name }) + .await?; + if let Some(error) = self.projection(&config, &LegacyBuckConfigErrorKey())? { + return Err(error.into()); + } + Ok(OpaqueLegacyBuckConfigOnDice { + config: Arc::new(config), + }) } - async fn get_legacy_root_config_on_dice(&self) -> anyhow::Result { + async fn get_legacy_root_config_on_dice( + &mut self, + ) -> anyhow::Result { let cell_resolver = self.get_cell_resolver().await?; self.get_legacy_config_on_dice(cell_resolver.root_cell()) .await } - async fn get_legacy_configs(&self) -> anyhow::Result { - self.compute(&LegacyBuckConfigKey).await?.ok_or_else(|| { - panic!("Tried to retrieve LegacyBuckConfigKey from the graph, but key has None value") - }) - } - - async fn is_legacy_configs_key_set(&self) -> anyhow::Result { - Ok(self.compute(&LegacyBuckConfigKey).await?.is_some()) - } - async fn get_legacy_config_for_cell( - &self, + &mut self, cell_name: CellName, - ) -> SharedResult { + ) -> buck2_error::Result { self.compute(&LegacyBuckConfigForCellKey { cell_name }) .await? } async fn get_legacy_config_property( - &self, + &mut self, cell_name: CellName, - section: &str, - property: &str, + key: BuckconfigKeyRef<'_>, ) -> anyhow::Result>> { - Ok(self - .compute(&LegacyBuckConfigPropertyKey { - cell_name, - section: section.to_owned(), - property: property.to_owned(), - }) - .await??) + self.get_legacy_config_on_dice(cell_name) + .await? + .lookup(self, key) } async fn parse_legacy_config_property( - &self, + &mut self, cell_name: CellName, - section: &str, - key: &str, + key: BuckconfigKeyRef<'_>, ) -> anyhow::Result> where anyhow::Error: From<::Err>, T: Send + Sync + 'static, { - let v = self - .get_legacy_config_property(cell_name, section, key) - .await?; - match v { - None => Ok(None), - Some(v) => Ok(Some(LegacyBuckConfig::parse_impl(section, key, &v)?)), - } + LegacyBuckConfig::parse_value( + key, + self.get_legacy_config_property(cell_name, key) + .await? + .as_deref(), + ) } } impl SetLegacyConfigs for DiceTransactionUpdater { - fn set_legacy_configs(&mut self, legacy_configs: LegacyBuckConfigs) -> anyhow::Result<()> { - Ok(self.changed_to(vec![(LegacyBuckConfigKey, Some(legacy_configs))])?) + fn set_legacy_config_external_data( + &mut self, + data: Arc, + ) -> anyhow::Result<()> { + // Don't invalidate state if RE use case is overridden. + let data = data.filter_values(should_ignore_config_change); + Ok(self.changed_to(vec![( + LegacyExternalBuckConfigDataKey, + Some(Arc::new(data)), + )])?) } - fn set_none_legacy_configs(&mut self) -> anyhow::Result<()> { - Ok(self.changed_to(vec![(LegacyBuckConfigKey, None)])?) + fn set_none_legacy_config_external_data(&mut self) -> anyhow::Result<()> { + Ok(self.changed_to(vec![(LegacyExternalBuckConfigDataKey, None)])?) } } +fn should_ignore_config_change(config_key: &BuckconfigKeyRef) -> bool { + !(config_key.section == "buck2_re_client" && config_key.property == "override_use_case") +} + #[cfg(test)] mod tests { - use buck2_core::cells::name::CellName; - use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; - use dice::InjectedKey; + use buck2_cli_proto::ConfigOverride; - use crate::legacy_configs::dice::LegacyBuckConfigKey; - use crate::legacy_configs::testing::TestConfigParserFileOps; - use crate::legacy_configs::LegacyBuckConfig; - use crate::legacy_configs::LegacyBuckConfigs; - use crate::legacy_configs::LegacyConfigCmdArg; + use crate::legacy_configs::configs::testing::parse_with_config_args; #[test] fn config_equals() -> anyhow::Result<()> { - #[cfg(not(windows))] - let path = &AbsNormPathBuf::from("/test".to_owned())?; - #[cfg(windows)] - let path = &AbsNormPathBuf::from("C:/test".to_owned())?; - let config1 = Some(LegacyBuckConfigs::new(hashmap![ - CellName::testing_new("cell1") - => { - let mut file_ops = TestConfigParserFileOps::new(&[("/test", "[sec1]\na=b\n[sec2]\nx=y")])?; - LegacyBuckConfig::parse_with_file_ops( - path, - &mut file_ops, - &[LegacyConfigCmdArg::flag("sec1.a=c")?], - )? - }, - CellName::testing_new("cell2") - => { - let mut file_ops = TestConfigParserFileOps::new(&[("/test", "[sec1]\nx=y\n[sec2]\na=b")])?; - LegacyBuckConfig::parse_with_file_ops( - path, - &mut file_ops, - &[], - )? - } - ])); - - let config2 = Some(LegacyBuckConfigs::new(hashmap![ - CellName::testing_new("cell1") - => { - let mut file_ops = TestConfigParserFileOps::new(&[("/test", "[sec1]\na=b\n[sec2]\nx=y")])?; - LegacyBuckConfig::parse_with_file_ops( - path, - &mut file_ops, - &[LegacyConfigCmdArg::flag("sec1.a=c")?], - )? - }, - ])); - - let config3 = Some(LegacyBuckConfigs::new(hashmap![ - CellName::testing_new("cell1") - => { - let mut file_ops = TestConfigParserFileOps::new(&[("/test", "[sec1]\na=c\n[sec2]\nx=y")])?; - LegacyBuckConfig::parse_with_file_ops( - path, - &mut file_ops, - &[], - )? - }, - ])); - - let config4 = Some(LegacyBuckConfigs::new(hashmap![ - CellName::testing_new("cell1") - => { - let mut file_ops = TestConfigParserFileOps::new(&[("/test", "[sec1]\na=b\n[sec2]\nx=y")])?; - LegacyBuckConfig::parse_with_file_ops( - path, - &mut file_ops, - &[LegacyConfigCmdArg::flag("sec1.d=e")?], - )? - }, - ])); - - let config5: Option = None; - let config6: Option = None; - - assert_eq!(LegacyBuckConfigKey::equality(&config1, &config1), true); - assert_eq!(LegacyBuckConfigKey::equality(&config2, &config2), true); - assert_eq!(LegacyBuckConfigKey::equality(&config3, &config3), true); - assert_eq!(LegacyBuckConfigKey::equality(&config4, &config4), true); - assert_eq!(LegacyBuckConfigKey::equality(&config1, &config2), false); - assert_eq!(LegacyBuckConfigKey::equality(&config1, &config3), false); - assert_eq!(LegacyBuckConfigKey::equality(&config1, &config4), false); - assert_eq!(LegacyBuckConfigKey::equality(&config2, &config3), true); - assert_eq!(LegacyBuckConfigKey::equality(&config2, &config4), false); - assert_eq!(LegacyBuckConfigKey::equality(&config3, &config4), false); - assert_eq!(LegacyBuckConfigKey::equality(&config5, &config1), false); - assert_eq!(LegacyBuckConfigKey::equality(&config5, &config6), true); + let path = "test"; + let config1 = parse_with_config_args( + &[("test", "[sec1]\na=b\n[sec2]\nx=y")], + path, + &[ConfigOverride::flag_no_cell("sec1.a=c")], + )?; + + let config2 = parse_with_config_args(&[("test", "[sec1]\na=c\n[sec2]\nx=y")], path, &[])?; + + let config3 = parse_with_config_args( + &[("test", "[sec1]\na=b\n[sec2]\nx=y")], + path, + &[ConfigOverride::flag_no_cell("sec1.d=e")], + )?; + + assert_eq!(config1.compare(&config1), true); + assert_eq!(config2.compare(&config2), true); + assert_eq!(config3.compare(&config3), true); + assert_eq!(config1.compare(&config2), true); + assert_eq!(config1.compare(&config3), false); + assert_eq!(config2.compare(&config3), false); Ok(()) } diff --git a/app/buck2_common/src/legacy_configs/diffs.rs b/app/buck2_common/src/legacy_configs/diffs.rs new file mode 100644 index 0000000000000..3d5575ab2ec55 --- /dev/null +++ b/app/buck2_common/src/legacy_configs/diffs.rs @@ -0,0 +1,449 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cmp::Ordering; +use std::collections::HashMap; +use std::hash::Hash; +use std::sync::Mutex; + +use buck2_core::cells::name::CellName; +use buck2_events::dispatch::get_dispatcher; +use dice::DiceComputations; +use dice::UserComputationData; +use dupe::Dupe; +use itertools::Itertools; +use starlark_map::sorted_map::SortedMap; + +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::configs::LegacyBuckConfigSection; +use crate::legacy_configs::key::BuckconfigKeyRef; + +/// This is a helper struct to track the config diffs between two commands. +/// +/// This type is stored in the `UserComputationData` - at the beginning of each command, a new one +/// is created by promoting the one from the previous command. Whenever a cell's buckconfigs are +/// computed, this type is informed and it sends an event to the client. +pub struct ConfigDiffTracker { + previous: HashMap, + current: Mutex>, + size_limit: Option, +} + +impl ConfigDiffTracker { + pub fn promote_into( + previous: &mut DiceComputations<'_>, + next: &mut UserComputationData, + root_config: &LegacyBuckConfig, + ) { + // `None` indicates that this is the first command + let previous = match previous.per_transaction_data().data.get::() { + Ok(previous) => { + // It's not enough to just take the current set from the previous command, because + // the previous command might not have computed some of the buckconfigs - maybe + // because it didn't need them, or maybe because they didn't change. If we didn't + // compute them previously, the right thing to do is to assume that the most recent + // ones that were computed are still good, and diff against those next command. + let mut new = previous.previous.clone(); + new.extend( + previous + .current + .lock() + .unwrap() + .iter() + .map(|(x, y)| (*x, y.dupe())), + ); + new + } + Err(_) => HashMap::new(), + }; + + // We parse this here, instead of doing it dynamically, to ensure that we don't take a + // dependency on the root config from every other config + let size_limit = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "config_diff_size_limit", + }) + // FIXME(JakobDegen): Don't ignore errors + .unwrap_or_default(); + + let val = ConfigDiffTracker { + previous, + current: Mutex::new(HashMap::new()), + size_limit, + }; + + next.data.set(val); + } + + pub(crate) fn report_computed_config( + ctx: &mut DiceComputations<'_>, + cell: CellName, + config: &LegacyBuckConfig, + ) { + let Ok(this) = ctx.per_transaction_data().data.get::() else { + // This can happen in tests + return; + }; + + if this + .current + .lock() + .unwrap() + .try_insert(cell, config.dupe()) + .is_err() + { + // This is a bit suspicious, we normally should not compute the same key twice. It does + // mean that the diff was already reported though, so doing nothing seems safe + return; + } + + let event = if let Some(previous) = this.previous.get(&cell) { + CellConfigDiff::new(Some(previous), Some(config), &this.size_limit).inner + } else { + // If there is no previous set, that usually means that this is a new daemon, or maybe + // that this particular cell was not loaded in the previous command. To avoid generating + // a very large diff, we do not report this to the client - but we still need to tell + // the client that there were some new configs + buck2_data::CellConfigDiff { + new_config_indicator_only: true, + ..Default::default() + } + }; + + get_dispatcher().instant_event(event); + } +} + +// section name to config diffs +#[derive(Debug, Clone, Default, PartialEq)] +struct CellConfigDiff { + inner: buck2_data::CellConfigDiff, + diff_size_exceeded: bool, +} + +impl CellConfigDiff { + fn new( + new: Option<&LegacyBuckConfig>, + old: Option<&LegacyBuckConfig>, + diff_size_limit: &Option, + ) -> CellConfigDiff { + let mut this = Self::default(); + let empty = SortedMap::new(); + let new_conf = new.map(|n| &n.0.values).unwrap_or(&empty); + let old_conf = old.map(|o| &o.0.values).unwrap_or(&empty); + + for (section, new_conf, old_conf) in merge(&new_conf, &old_conf) { + if let Some(diff) = this.section_diff(new_conf, old_conf, diff_size_limit) { + this.inner.section_diff.insert(section.to_owned(), diff); + } + } + + this + } + + fn section_diff( + &mut self, + new: Option<&LegacyBuckConfigSection>, + old: Option<&LegacyBuckConfigSection>, + diff_size_limit: &Option, + ) -> Option { + let mut result = HashMap::new(); + let empty = SortedMap::new(); + let new_section = new.map(|n| &n.values).unwrap_or(&empty); + let old_section = old.map(|o| &o.values).unwrap_or(&empty); + + for (name, new_conf, old_conf) in merge(&new_section, &old_section) { + let new_conf = new_conf.map(|x| x.as_str()); + let old_conf = old_conf.map(|x| x.as_str()); + if new_conf == old_conf { + continue; + } + self.inner.config_diff_count += 1; + self.inner.config_diff_size += + (name.len() + old_conf.map_or(0, |x| x.len()) + new_conf.map_or(0, |x| x.len())) + as u64; + self.insert_if_fits( + &mut result, + name, + buck2_data::ConfigDiff { + old_value: old_conf.map(|x| x.to_owned()), + new_value: new_conf.map(|x| x.to_owned()), + }, + diff_size_limit, + ); + } + + if result.is_empty() { + None + } else { + Some(buck2_data::SectionConfigDiff { + config_diff: result, + }) + } + } + + fn insert_if_fits( + &mut self, + map: &mut HashMap, + name: &String, + entry: buck2_data::ConfigDiff, + diff_size_limit: &Option, + ) { + if let Some(limit) = diff_size_limit { + if self.inner.config_diff_size < (*limit) as u64 { + map.insert(name.to_owned(), entry); + } else { + self.diff_size_exceeded = true; + } + } + } +} +/// produces ordered elements of a two maps merged together by the key +fn merge<'a, K, V>( + map0: &'a SortedMap, + map1: &'a SortedMap, +) -> impl Iterator, Option<&'a V>)> +where + K: Ord + Hash, +{ + map0.iter() + .map(|(k, v)| (k, Some(v), None)) + .merge_by(map1.iter().map(|(k, v)| (k, None, Some(v))), |k0, k1| { + k0.0 <= k1.0 + }) + .coalesce(|x, y| match x.0.cmp(y.0) { + Ordering::Less => Err((x, y)), + Ordering::Equal => Ok((x.0, x.1, y.2)), + Ordering::Greater => unreachable!("should be sorted"), + }) +} + +#[cfg(test)] +mod tests { + use buck2_cli_proto::ConfigOverride; + use indoc::indoc; + use maplit::hashmap; + use starlark_map::sorted_map::SortedMap; + + use super::merge; + use super::*; + use crate::legacy_configs::configs::testing::*; + + #[test] + fn test_merge_empty() { + let empty1: SortedMap = SortedMap::new(); + let empty2: SortedMap = SortedMap::new(); + + let expected: Vec<(&u8, Option<&u8>, Option<&u8>)> = vec![]; + let actual: Vec<(&u8, Option<&u8>, Option<&u8>)> = merge(&empty1, &empty2).collect(); + + assert_eq!(expected, actual); + } + + #[test] + fn test_merge_map_vs_empty() { + let key = "str"; + let value = 2; + let map: SortedMap<&str, u8> = SortedMap::from_iter([(key, value)]); + let empty: SortedMap<&str, u8> = SortedMap::new(); + + let expected = vec![(&key, Some(&value), None)]; + let actual: Vec<_> = merge(&map, &empty).collect(); + + assert_eq!(expected, actual); + } + + #[test] + fn test_merge_empty_vs_map() { + let key = "str"; + let value = 2; + let map: SortedMap<&str, u8> = SortedMap::from_iter([(key, value)]); + let empty: SortedMap<&str, u8> = SortedMap::new(); + + let expected = vec![(&key, None, Some(&value))]; + let actual: Vec<_> = merge(&empty, &map).collect(); + + assert_eq!(expected, actual); + } + + #[test] + fn test_merge_map_vs_map_in_order() { + let key1 = "str"; + let value1_1 = 1; + let value1_2 = 2; + let key2 = "str2"; + let value2 = 7; + let key3 = "str3"; + let value3 = 3; + + let map1: SortedMap<&str, u8> = SortedMap::from_iter([(key1, value1_1), (key3, value3)]); + let map2: SortedMap<&str, u8> = SortedMap::from_iter([(key1, value1_2), (key2, value2)]); + + let expected = vec![ + (&key1, Some(&value1_1), Some(&value1_2)), + (&key2, None, Some(&value2)), + (&key3, Some(&value3), None), + ]; + let actual: Vec<_> = merge(&map1, &map2).collect(); + + assert_eq!(expected, actual); + } + + #[test] + fn test_diff_metrics_equal_configs() -> anyhow::Result<()> { + let config_args = vec![ConfigOverride::flag_no_cell("apple.key=value1")]; + let config = parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args)?; + + let metrics = CellConfigDiff::new(Some(&config), Some(&config), &Some(10000)); + + assert_eq!(metrics.inner.config_diff_count, 0); + assert_eq!(metrics.inner.config_diff_size, 0); + assert_eq!(metrics.inner.section_diff, HashMap::new()); + assert_eq!(metrics.diff_size_exceeded, false); + Ok(()) + } + + #[test] + fn test_diff_metrics_with_empty() -> anyhow::Result<()> { + let key = "key"; + let value = "value1"; + let limit_key = "config_diff_size_limit"; + let limit_value = "10000"; + let config_args = vec![ + ConfigOverride::flag_no_cell(&format!("buck2.{limit_key}={limit_value}")), + ConfigOverride::flag_no_cell(&format!("apple.{key}={value}")), + ]; + let config = parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args)?; + + let metrics = CellConfigDiff::new(Some(&config), None, &Some(10000)); + + assert_eq!(metrics.inner.config_diff_count, 2); + assert_eq!( + metrics.inner.config_diff_size as usize, + key.len() + value.len() + limit_key.len() + limit_value.len() + ); + let expected = hashmap![ + "apple".to_owned() => buck2_data::SectionConfigDiff { + config_diff: hashmap![ + key.to_owned() => buck2_data::ConfigDiff { + old_value: None, + new_value: Some(value.to_owned()), + }, + ], + }, + "buck2".to_owned() => buck2_data::SectionConfigDiff { + config_diff: hashmap![ + limit_key.to_owned() => buck2_data::ConfigDiff { + old_value: None, + new_value: Some(limit_value.to_owned()), + }, + ], + }, + ]; + assert_eq!(metrics.inner.section_diff, expected); + assert_eq!(metrics.diff_size_exceeded, false); + Ok(()) + } + + #[test] + fn test_diff_metrics_only_changed() -> anyhow::Result<()> { + let key1 = "key1"; + let value1 = "value1"; + let key2 = "key2"; + let value2_1 = "value2"; + let value2_2 = "value3"; + let key3 = "key3"; + let value3 = "value3"; + + let config_args1 = vec![ + ConfigOverride::flag_no_cell(&format!("apple.{key1}={value1}")), + ConfigOverride::flag_no_cell(&format!("apple.{key2}={value2_1}")), + ]; + let config1 = + parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args1)?; + + let config_args2 = vec![ + ConfigOverride::flag_no_cell(&format!("apple.{key1}={value1}")), + ConfigOverride::flag_no_cell(&format!("apple.{key2}={value2_2}")), + ConfigOverride::flag_no_cell(&format!("apple.{key3}={value3}")), + ]; + let config2 = + parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args2)?; + + let metrics = CellConfigDiff::new(Some(&config1), Some(&config2), &Some(1000)); + + assert_eq!(metrics.inner.config_diff_count, 2); + assert_eq!( + metrics.inner.config_diff_size as usize, + key2.len() + value2_1.len() + value2_2.len() + key3.len() + value3.len() + ); + + let expected = hashmap![ + "apple".to_owned() => buck2_data::SectionConfigDiff { + config_diff: hashmap![ + key2.to_owned() => buck2_data::ConfigDiff { + old_value: Some(value2_2.to_owned()), + new_value: Some(value2_1.to_owned()), + }, + key3.to_owned() => buck2_data::ConfigDiff { + old_value: Some(value3.to_owned()), + new_value: None + }, + ] + } + ]; + assert_eq!(metrics.inner.section_diff, expected); + assert_eq!(metrics.diff_size_exceeded, false); + Ok(()) + } + + #[test] + fn test_diff_metrics_size_exceeded() -> anyhow::Result<()> { + let key1 = "key1"; + let value1 = "value1"; + let key2 = "key2"; + let value2 = "value2"; + + let config_args1 = vec![ConfigOverride::flag_no_cell(&format!( + "apple.{key1}={value1}" + ))]; + let config1 = + parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args1)?; + + let config_args2 = vec![ConfigOverride::flag_no_cell(&format!( + "apple.{key2}={value2}" + ))]; + let config2 = + parse_with_config_args(&[("config", indoc!(r#""#))], "config", &config_args2)?; + + let metrics = CellConfigDiff::new(Some(&config1), Some(&config2), &Some(12)); + + assert_eq!(metrics.inner.config_diff_count, 2); + assert_eq!( + metrics.inner.config_diff_size as usize, + key1.len() + value1.len() + key2.len() + value2.len() + ); + + let expected = hashmap![ + "apple".to_owned() => buck2_data::SectionConfigDiff { + config_diff: hashmap![ + key1.to_owned() => buck2_data::ConfigDiff { + old_value: None, + new_value: Some(value1.to_owned()), + }, + ], + }, + ]; + assert_eq!(metrics.inner.section_diff, expected); + assert_eq!(metrics.diff_size_exceeded, true); + + Ok(()) + } +} diff --git a/app/buck2_common/src/legacy_configs/file_ops.rs b/app/buck2_common/src/legacy_configs/file_ops.rs new file mode 100644 index 0000000000000..0e2bd1d82d93d --- /dev/null +++ b/app/buck2_common/src/legacy_configs/file_ops.rs @@ -0,0 +1,380 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io; +use std::io::BufRead; + +use allocative::Allocative; +use anyhow::Context; +use buck2_core::cells::CellResolver; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::paths::RelativePath; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use dice::DiceComputations; +use dupe::Dupe; +use futures::future::BoxFuture; +use futures::FutureExt; + +use crate::dice::file_ops::DiceFileComputations; +use crate::file_ops::FileType; +use crate::file_ops::RawPathMetadata; + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Allocative, derive_more::Display)] +pub enum ConfigPath { + #[display("{}", _0)] + Project(ProjectRelativePathBuf), + #[display("{}", _0)] + Global(AbsPathBuf), +} + +impl ConfigPath { + pub(crate) fn resolve_absolute(&self, project_fs: &ProjectRoot) -> AbsPathBuf { + match self { + ConfigPath::Project(path) => project_fs.resolve(path).into_abs_path_buf(), + ConfigPath::Global(path) => path.clone(), + } + } + + pub(crate) fn join_to_parent_normalized(&self, rel: &RelativePath) -> anyhow::Result { + match self { + ConfigPath::Project(path) => path + .parent() + .context("file has no parent")? + .join_normalized(rel) + .map(ConfigPath::Project), + ConfigPath::Global(path) => Ok(ConfigPath::Global( + path.parent() + .context("file has no parent")? + .join(rel.as_str()), + )), + } + } + + pub(crate) fn join(&self, p: impl AsRef) -> Self { + match self { + ConfigPath::Project(path) => ConfigPath::Project(path.join(p)), + ConfigPath::Global(path) => ConfigPath::Global(path.join(p.as_ref().as_path())), + } + } +} + +pub struct ConfigDirEntry { + pub(crate) name: FileNameBuf, + pub(crate) is_dir: bool, +} + +#[async_trait::async_trait] +#[allow(private_interfaces)] +pub trait ConfigParserFileOps: Send + Sync { + async fn read_file_lines_if_exists( + &mut self, + path: &ConfigPath, + ) -> anyhow::Result> + Send>>>; + + async fn read_dir(&mut self, path: &ConfigPath) -> anyhow::Result>; +} + +#[derive(buck2_error::Error, Debug)] +enum ReadDirError { + #[error("Non-utf8 entry `{0}` in directory `{1}`")] + NotUtf8(String, String), +} + +pub(crate) struct DefaultConfigParserFileOps { + pub(crate) project_fs: ProjectRoot, +} + +#[async_trait::async_trait] +impl ConfigParserFileOps for DefaultConfigParserFileOps { + async fn read_file_lines_if_exists( + &mut self, + path: &ConfigPath, + ) -> anyhow::Result> + Send>>> + { + let path = path.resolve_absolute(&self.project_fs); + let Some(f) = fs_util::open_file_if_exists(&path) + .with_context(|| format!("Reading file `{:?}`", path))? + else { + return Ok(None); + }; + let file = std::io::BufReader::new(f); + Ok(Some(Box::new(file.lines()))) + } + + async fn read_dir(&mut self, path: &ConfigPath) -> anyhow::Result> { + let path = path.resolve_absolute(&self.project_fs); + let read_dir = match std::fs::read_dir(path.as_path()) { + Ok(read_dir) => read_dir, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()), + Err(e) if e.kind() == std::io::ErrorKind::NotADirectory => { + tracing::warn!("Expected a directory of buckconfig files at: `{}`", path); + return Ok(Vec::new()); + } + Err(e) => return Err(e.into()), + }; + let mut entries = Vec::new(); + for entry in read_dir { + let entry = entry?; + let name = entry.file_name().into_string().map_err(|s| { + ReadDirError::NotUtf8( + std::path::Path::display(s.as_ref()).to_string(), + path.to_string(), + ) + })?; + let name = FileNameBuf::try_from(name)?; + let file_type = entry.file_type()?; + if file_type.is_file() { + entries.push(ConfigDirEntry { + name, + is_dir: false, + }); + } else if file_type.is_dir() { + entries.push(ConfigDirEntry { name, is_dir: true }); + } else { + tracing::warn!( + "Expected a directory of buckconfig files at `{}`, but this entry was not a file or directory: `{}`", + path, + name, + ); + } + } + Ok(entries) + } +} + +pub(crate) struct DiceConfigFileOps<'a, 'b> { + ctx: &'a mut DiceComputations<'b>, + cell_resolver: &'a CellResolver, + io_ops: DefaultConfigParserFileOps, +} + +impl<'a, 'b> DiceConfigFileOps<'a, 'b> { + pub(crate) fn new( + ctx: &'a mut DiceComputations<'b>, + project_fs: &'a ProjectRoot, + cell_resolver: &'a CellResolver, + ) -> Self { + let io_ops = DefaultConfigParserFileOps { + project_fs: project_fs.dupe(), + }; + Self { + ctx, + cell_resolver, + io_ops, + } + } +} + +#[async_trait::async_trait] +impl ConfigParserFileOps for DiceConfigFileOps<'_, '_> { + async fn read_file_lines_if_exists( + &mut self, + path: &ConfigPath, + ) -> anyhow::Result< + Option> + Send + 'static)>>, + > { + let ConfigPath::Project(path) = path else { + return self.io_ops.read_file_lines_if_exists(path).await; + }; + let path = self.cell_resolver.get_cell_path(path)?; + let Some(data) = DiceFileComputations::read_file_if_exists(self.ctx, path.as_ref()).await? + else { + return Ok(None); + }; + let lines = data.lines().map(ToOwned::to_owned).collect::>(); + Ok(Some(Box::new(lines.into_iter().map(Ok)))) + } + + async fn read_dir(&mut self, path: &ConfigPath) -> anyhow::Result> { + let ConfigPath::Project(path) = path else { + return self.io_ops.read_dir(path).await; + }; + let path = self.cell_resolver.get_cell_path(path)?; + + // This trait expects some slightly non-standard behavior wrt errors, so make sure + // to match what the `DefaultConfigParserFileOps` do + match DiceFileComputations::read_path_metadata_if_exists(self.ctx, path.as_ref()).await? { + Some(RawPathMetadata::Directory) => {} + Some(_) | None => return Ok(Vec::new()), + } + + let out = DiceFileComputations::read_dir_include_ignores(self.ctx, path.as_ref()) + .await? + .included + .iter() + .filter_map(|e| match e.file_type { + FileType::Directory => Some(ConfigDirEntry { + name: e.file_name.clone(), + is_dir: true, + }), + FileType::File => Some(ConfigDirEntry { + name: e.file_name.clone(), + is_dir: false, + }), + FileType::Symlink | FileType::Unknown => None, + }) + .collect(); + Ok(out) + } +} + +pub(crate) fn push_all_files_from_a_directory<'a>( + buckconfig_paths: &'a mut Vec, + folder_path: &'a ConfigPath, + file_ops: &'a mut dyn ConfigParserFileOps, +) -> BoxFuture<'a, anyhow::Result<()>> { + async move { + for entry in file_ops.read_dir(folder_path).await? { + let entry_path = folder_path.join(&entry.name); + if entry.is_dir { + push_all_files_from_a_directory(buckconfig_paths, &entry_path, file_ops).await?; + } else { + buckconfig_paths.push(entry_path); + } + } + + Ok(()) + } + .boxed() +} + +#[cfg(test)] +mod tests { + use buck2_core::fs::fs_util; + use buck2_core::fs::paths::abs_path::AbsPath; + + use super::*; + use crate::legacy_configs::cells::create_project_filesystem; + + #[test] + fn dir_with_file() -> anyhow::Result<()> { + let mut v = vec![]; + let dir = tempfile::tempdir()?; + let root = AbsPath::new(dir.path())?; + let file = root.join("foo"); + fs_util::write(&file, "")?; + + let file = AbsPath::new(&file)?; + let dir = AbsPath::new(dir.path())?; + + futures::executor::block_on(push_all_files_from_a_directory( + &mut v, + &ConfigPath::Global(dir.to_owned()), + &mut DefaultConfigParserFileOps { + project_fs: create_project_filesystem(), + }, + ))?; + assert_eq!(v, vec![ConfigPath::Global(file.to_owned())]); + + Ok(()) + } + + #[test] + fn empty_dir() -> anyhow::Result<()> { + let mut v = vec![]; + let dir = tempfile::tempdir()?; + let dir = AbsPath::new(dir.path())?; + + futures::executor::block_on(push_all_files_from_a_directory( + &mut v, + &ConfigPath::Global(dir.to_owned()), + &mut DefaultConfigParserFileOps { + project_fs: create_project_filesystem(), + }, + ))?; + assert_eq!(v, vec![]); + + Ok(()) + } + + #[test] + fn non_existent_dir() -> anyhow::Result<()> { + let mut v = vec![]; + let dir = tempfile::tempdir()?; + let dir = dir.path().join("bad"); + let dir = AbsPath::new(&dir)?; + + futures::executor::block_on(push_all_files_from_a_directory( + &mut v, + &ConfigPath::Global(dir.to_owned()), + &mut DefaultConfigParserFileOps { + project_fs: create_project_filesystem(), + }, + ))?; + assert_eq!(v, vec![]); + + Ok(()) + } + + #[test] + fn dir_in_dir() -> anyhow::Result<()> { + let mut v = vec![]; + let dir = tempfile::tempdir()?; + let dir = AbsPath::new(dir.path())?; + fs_util::create_dir_all(dir.join("bad"))?; + + futures::executor::block_on(push_all_files_from_a_directory( + &mut v, + &ConfigPath::Global(AbsPath::new(dir)?.to_owned()), + &mut DefaultConfigParserFileOps { + project_fs: create_project_filesystem(), + }, + ))?; + assert_eq!(v, vec![]); + + Ok(()) + } + + #[test] + fn file() -> anyhow::Result<()> { + let mut v = vec![]; + let file = tempfile::NamedTempFile::new()?; + let file = AbsPath::new(file.path())?; + + futures::executor::block_on(push_all_files_from_a_directory( + &mut v, + &ConfigPath::Global(file.to_owned()), + &mut DefaultConfigParserFileOps { + project_fs: create_project_filesystem(), + }, + ))?; + assert_eq!(v, vec![]); + + Ok(()) + } + + #[test] + fn dir_with_file_in_dir() -> anyhow::Result<()> { + let mut v = vec![]; + let dir = tempfile::tempdir()?; + let dir = AbsPath::new(dir.path())?; + let nested_dir = dir.join("nested"); + fs_util::create_dir_all(&nested_dir)?; + let file = nested_dir.join("foo"); + fs_util::write(&file, "")?; + + let file = AbsPath::new(&file)?; + let dir = AbsPath::new(&dir)?; + + futures::executor::block_on(push_all_files_from_a_directory( + &mut v, + &ConfigPath::Global(dir.to_owned()), + &mut DefaultConfigParserFileOps { + project_fs: create_project_filesystem(), + }, + ))?; + assert_eq!(v, vec![ConfigPath::Global(file.to_owned())]); + + Ok(()) + } +} diff --git a/app/buck2_common/src/legacy_configs/init.rs b/app/buck2_common/src/legacy_configs/init.rs deleted file mode 100644 index 3029ee6d14556..0000000000000 --- a/app/buck2_common/src/legacy_configs/init.rs +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::time::Duration; - -use allocative::Allocative; -use anyhow::Context; -use serde::Deserialize; -use serde::Serialize; - -use crate::legacy_configs::LegacyBuckConfig; - -/// Helper enum to categorize the kind of timeout we get from the startup config. -#[derive(Clone, Debug)] -pub enum Timeout { - /// Timeout value is set in the config, use that. - Value(Duration), - /// Timeout value was not set in config, apply the default. - Default, - /// Timeout value was explicitly set to 0, meaning we shouldn't use a timeout. - NoTimeout, -} - -impl Timeout { - pub fn new(value: Option) -> Self { - match value { - Some(Duration::ZERO) => Self::NoTimeout, - Some(value) => Self::Value(value), - None => Self::Default, - } - } -} - -#[derive( - Allocative, - Clone, - Debug, - Default, - Serialize, - Deserialize, - PartialEq, - Eq -)] -pub struct HttpConfig { - connect_timeout_ms: Option, - read_timeout_ms: Option, - write_timeout_ms: Option, - pub max_redirects: Option, -} - -impl HttpConfig { - pub fn from_config(config: &LegacyBuckConfig) -> anyhow::Result { - let connect_timeout_ms = config.parse("http", "connect_timeout_ms")?; - let read_timeout_ms = config.parse("http", "read_timeout_ms")?; - let write_timeout_ms = config.parse("http", "write_timeout_ms")?; - let max_redirects = config.parse("http", "max_redirects")?; - - Ok(Self { - connect_timeout_ms, - read_timeout_ms, - write_timeout_ms, - max_redirects, - }) - } - - pub fn connect_timeout(&self) -> Timeout { - match self.connect_timeout_ms.map(Duration::from_millis) { - Some(Duration::ZERO) => Timeout::NoTimeout, - Some(value) => Timeout::Value(value), - None => Timeout::Default, - } - } - - pub fn read_timeout(&self) -> Timeout { - match self.read_timeout_ms.map(Duration::from_millis) { - Some(Duration::ZERO) => Timeout::NoTimeout, - Some(value) => Timeout::Value(value), - None => Timeout::Default, - } - } - - pub fn write_timeout(&self) -> Timeout { - match self.write_timeout_ms.map(Duration::from_millis) { - Some(Duration::ZERO) => Timeout::NoTimeout, - Some(value) => Timeout::Value(value), - None => Timeout::Default, - } - } -} - -/// Configurations that are used at startup by the daemon. Those are actually read by the client, -/// and passed on to the daemon. -/// -/// The fields here are often raw String we get from the buckconfig, the daemon will do -/// deserialization once it receives them. That said, this is not a requirement. -/// -/// Backwards compatibility on Serialize / Deserialize is not required: if the client cannot read -/// the DaemonStartupConfig provided by the daemon when it tries to connect, it will reject that -/// daemon and restart (and in fact it will probably not get that far since a version check is done -/// before parsing DaemonStartupConfig). -#[derive(Allocative, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -pub struct DaemonStartupConfig { - pub daemon_buster: Option, - pub digest_algorithms: Option, - pub source_digest_algorithm: Option, - pub allow_vpnless: bool, - pub allow_vpnless_for_logging: bool, - pub paranoid: bool, - pub materializations: Option, - pub http: HttpConfig, -} - -impl DaemonStartupConfig { - pub fn new(config: &LegacyBuckConfig) -> anyhow::Result { - // Intepreted client side because we need the value here. - let allow_vpnless = config.parse("buck2", "allow_vpnless")?.unwrap_or_default(); - let allow_vpnless_for_logging = config - .parse("buck2", "allow_vpnless_for_logging")? - .unwrap_or(allow_vpnless); - - Ok(Self { - daemon_buster: config.get("buck2", "daemon_buster").map(ToOwned::to_owned), - digest_algorithms: config - .get("buck2", "digest_algorithms") - .map(ToOwned::to_owned), - source_digest_algorithm: config - .get("buck2", "source_digest_algorithm") - .map(ToOwned::to_owned), - allow_vpnless, - allow_vpnless_for_logging, - paranoid: false, // Setup later in ImmediateConfig - materializations: config - .get("buck2", "materializations") - .map(ToOwned::to_owned), - http: HttpConfig::from_config(config)?, - }) - } - - pub fn serialize(&self) -> anyhow::Result { - serde_json::to_string(&self).context("Error serializing DaemonStartupConfig") - } - - pub fn deserialize(s: &str) -> anyhow::Result { - serde_json::from_str::(s).context("Error deserializing DaemonStartupConfig") - } - - pub fn testing_empty() -> Self { - Self { - daemon_buster: None, - digest_algorithms: None, - source_digest_algorithm: None, - allow_vpnless: false, - allow_vpnless_for_logging: false, - paranoid: false, - materializations: None, - http: HttpConfig::default(), - } - } -} diff --git a/app/buck2_common/src/legacy_configs/key.rs b/app/buck2_common/src/legacy_configs/key.rs new file mode 100644 index 0000000000000..5a9bb6072fb87 --- /dev/null +++ b/app/buck2_common/src/legacy_configs/key.rs @@ -0,0 +1,19 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use dupe::Dupe; + +// TODO(nga): implement `buck2 help-buckconfig` +// https://www.internalfb.com/tasks/?t=183528129 +#[derive(derive_more::Display, Debug, Copy, Clone, Dupe)] +#[display("{}.{}", section, property)] +pub struct BuckconfigKeyRef<'a> { + pub section: &'a str, + pub property: &'a str, +} diff --git a/app/buck2_common/src/legacy_configs/mod.rs b/app/buck2_common/src/legacy_configs/mod.rs deleted file mode 100644 index 207213627eee7..0000000000000 --- a/app/buck2_common/src/legacy_configs/mod.rs +++ /dev/null @@ -1,2051 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Contains utilities for dealing with buckv1 concepts (ex. buckv1's -//! .buckconfig files as configuration) - -pub mod cells; -pub mod dice; -pub mod init; -pub(crate) mod path; -pub mod view; - -use std::cell::OnceCell; -use std::collections::BTreeMap; -use std::collections::HashMap; -use std::fmt; -use std::fmt::Display; -use std::fs; -use std::io::prelude::*; -use std::path::PathBuf; -use std::str::FromStr; -use std::sync::Arc; - -use allocative::Allocative; -use anyhow::Context; -use buck2_core::cells::name::CellName; -use buck2_core::cells::CellResolver; -use buck2_core::fs::paths::abs_norm_path::AbsNormPath; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::*; -use buck2_core::fs::project::*; -use derive_more::Display; -use dupe::Dupe; -use gazebo::eq_chain; -use gazebo::prelude::*; -use itertools::Itertools; -use once_cell::sync::Lazy; -use regex::Regex; -use starlark_map::sorted_map::SortedMap; -use thiserror::Error; - -use crate::legacy_configs::cells::BuckConfigBasedCells; -use crate::legacy_configs::view::LegacyBuckConfigView; -use crate::legacy_configs::view::LegacyBuckConfigsView; -use crate::target_aliases::BuckConfigTargetAliasResolver; - -#[derive(Error, Debug)] -pub(crate) enum ConfigError { - #[error("Expected line of the form `key = value` but key was empty. Line was `{0}`")] - EmptyKey(String), - #[error("Included file doesn't exist `{0}`")] - MissingInclude(String), - #[error("Improperly formatted section. Expected something of the form `[section]`, got {0}")] - SectionMissingTrailingBracket(String), - #[error("Improperly include directive path. Got {0}")] - BadIncludePath(String), - #[error( - "Couldn't parse line. Expected include directive (``), section(`[some_section]`), or key assignment (`some_key = some_value`). Got `{0}`" - )] - InvalidLine(String), - #[error("Detected cycles in buckconfig $(config) references: {}", format_cycle(.0))] - ReferenceCycle(Vec<(String, String)>), - #[error("Unable to resolve cell-relative path `{0}`")] - UnableToResolveCellRelativePath(String), - #[error( - "Invalid value for buckconfig `{section}.{key}`: conversion to {ty} failed, value as `{value}`" - )] - ParseFailed { - section: String, - key: String, - value: String, - ty: &'static str, - }, - #[error("Unknown cell: `{0}`")] - UnknownCell(CellName), -} - -/// A collection of configs, keyed by cell. -#[derive(Clone, Dupe, Debug, Allocative)] -pub struct LegacyBuckConfigs { - data: Arc>, -} - -impl LegacyBuckConfigs { - pub fn new(data: HashMap) -> Self { - let data = SortedMap::from_iter(data); - Self { - data: Arc::new(data), - } - } - - pub fn get<'a>(&'a self, cell_name: CellName) -> anyhow::Result<&'a LegacyBuckConfig> { - self.data - .get(&cell_name) - .ok_or_else(|| ConfigError::UnknownCell(cell_name).into()) - } - - pub fn iter(&self) -> impl Iterator { - self.data.iter().map(|(name, config)| (*name, config)) - } - - pub(crate) fn compare(&self, other: &Self) -> bool { - let x = &self.data; - let y = &other.data; - - eq_chain! { - x.len() == y.len(), - x.iter().all(|(cell, config)| { - y.get(cell).map_or(false, |y_config| y_config.compare(config)) - }) - } - } -} - -fn format_cycle(cycle: &[(String, String)]) -> String { - cycle - .iter() - .map(|(section, key)| format!("`{}.{}`", section, key)) - .join(" -> ") -} - -#[derive(Clone, Debug, Allocative)] -struct ConfigFileLocation { - source_file: Arc, - line: usize, -} - -#[derive(Debug, PartialEq, Eq)] -struct MainConfigFile { - path: AbsNormPathBuf, - - /// if a main config file is in project or global - owned_by_project: bool, -} - -#[derive(Debug, Allocative)] -struct ConfigFile { - id: String, - include_source: Option, -} - -#[derive(Clone, Dupe, Debug, Allocative)] -pub struct LegacyBuckConfig(Arc); - -impl LegacyBuckConfig { - /// configs are equal if the data they resolve in is equal, regardless of the origin of the config - pub(crate) fn compare(&self, other: &Self) -> bool { - eq_chain!( - self.0.values.len() == other.0.values.len(), - self.0.values.iter().all(|(section_name, section)| { - other - .0 - .values - .get(section_name) - .map_or(false, |other_sec| other_sec.compare(section)) - }) - ) - } -} - -impl LegacyBuckConfigView for LegacyBuckConfig { - fn get(&self, section: &str, key: &str) -> anyhow::Result>> { - Ok(self.get(section, key).map(|v| v.to_owned().into())) - } -} - -impl LegacyBuckConfigsView for LegacyBuckConfigs { - fn get(&self, cell_name: CellName) -> anyhow::Result<&dyn LegacyBuckConfigView> { - Ok(self.get(cell_name)?) - } - - fn iter<'a>( - &'a self, - ) -> Box + 'a> { - Box::new( - self.iter() - .map(|(cell, config)| (cell, config as &dyn LegacyBuckConfigView)), - ) - } -} - -#[derive(Debug, Default)] -struct SectionBuilder { - values: BTreeMap, -} - -impl SectionBuilder { - fn finish(self) -> LegacyBuckConfigSection { - LegacyBuckConfigSection { - values: SortedMap::from_iter(self.values), - } - } -} - -#[derive(Debug, Allocative)] -struct ConfigData { - values: SortedMap, -} - -#[derive(Clone, Debug, Allocative)] -enum ResolvedValue { - // A placeholder used before we do resolution. - Unknown, - // Indicates that there's no resolution required, the resolved value and raw value are the same. - Literal, - // The resolved value for non-literals. - Resolved(String), -} - -#[derive(Clone, Debug, Allocative)] -enum Location { - File(ConfigFileLocation), - CommandLineArgument, -} - -impl Location { - fn as_legacy_buck_config_location(&self) -> LegacyBuckConfigLocation { - match self { - Self::File(x) => LegacyBuckConfigLocation::File(&x.source_file.id, x.line), - Self::CommandLineArgument => LegacyBuckConfigLocation::CommandLineArgument, - } - } -} - -#[derive(Clone, Debug)] -struct ConfigArgumentPair { - section: String, - key: String, - // None value means this config is unset. - value: Option, - // Stores config's cell dir for resolving cell, when applicable. - // cell name not used due to the many-to-one mapping of cell aliases to - // actual cells, which complicates parsing. - cell_path: Option, -} - -// Represents a config section and key only, for example, `cxx.compiler`. -#[derive(Clone, Debug)] -pub struct ConfigSectionAndKey { - // TODO(scottcao): Add cell_path - pub section: String, - pub key: String, -} - -/// Represents a configuration argument that can be passed -/// on the command line. For example, `--config foo.bar=val` -/// or `--config-file foo.bcfg`. -#[derive(Debug, Display)] -pub enum LegacyConfigCmdArg { - /// A single config key-value pair (in `a.b=c` format). - Flag(LegacyConfigCmdArgFlag), - /// A file containing additional config values (in `.buckconfig` format). - File(LegacyConfigCmdArgFile), -} - -impl LegacyConfigCmdArg { - pub fn flag(val: &str) -> anyhow::Result { - let (cell, val) = match val.split_once("//") { - Some((cell, val)) if !cell.contains('=') => (Some(cell.to_owned()), val), - _ => (None, val), - }; - - let ParsedConfigArg { - section, - key, - value, - } = parse_config_arg(val)?; - - Ok(Self::Flag(LegacyConfigCmdArgFlag { - cell, - section, - key, - value, - })) - } - - pub fn file(val: &str) -> anyhow::Result { - let (cell, val) = match val.split_once("//") { - Some((cell, val)) => (Some(cell.to_owned()), val), // This should also reject =? - _ => (None, val), - }; - - Ok(LegacyConfigCmdArg::File(LegacyConfigCmdArgFile { - cell, - path: val.to_owned(), - })) - } -} - -#[derive(Debug)] -pub struct LegacyConfigCmdArgFlag { - cell: Option, - section: String, - key: String, - value: Option, -} - -impl fmt::Display for LegacyConfigCmdArgFlag { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(cell) = &self.cell { - write!(f, "{}//", cell)?; - } - write!(f, "{}.{}=", self.section, self.key)?; - if let Some(value) = &self.value { - write!(f, "{}", value)?; - } - Ok(()) - } -} - -#[derive(Debug)] -pub struct LegacyConfigCmdArgFile { - cell: Option, - path: String, -} - -impl fmt::Display for LegacyConfigCmdArgFile { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(cell) = &self.cell { - write!(f, "{}//", cell)?; - } - write!(f, "{}", self.path) - } -} - -/// Private representation of a processed config arg, namely after file -/// path resolution has been performed. -#[derive(Debug)] -enum ResolvedLegacyConfigArg { - /// A single config key-value pair (in `a.b=c` format). - Flag(ConfigArgumentPair), - /// A file containing additional config values (in `.buckconfig` format). - File(AbsNormPathBuf), -} - -/// State required to perform resolution of cell-relative paths. -struct CellResolutionState<'a> { - project_filesystem: &'a ProjectRoot, - cwd: &'a AbsNormPath, - cell_resolver: OnceCell, -} - -#[derive(Error, Debug)] -enum ConfigArgumentParseError { - #[error("Could not find section separator (`.`) in pair `{0}`")] - NoSectionDotSeparator(String), - #[error("Could not find equals sign (`=`) in pair `{0}`")] - NoEqualsSeparator(String), - - #[error("Expected key-value in format of `section.key=value` but only got `{0}`")] - MissingData(String), - - #[error("Contains whitespace in key-value pair `{0}`")] - WhitespaceInKeyOrValue(String), - - #[error("Specifying cells via cli config overrides is banned (`repositories.key=value`)")] - CellOverrideViaCliConfig, -} - -// Parses config key in the format `section.key` -pub fn parse_config_section_and_key( - raw_section_and_key: &str, - raw_arg_in_err: Option<&str>, // Used in error strings to preserve the original config argument, not just section and key -) -> anyhow::Result { - let raw_arg = raw_arg_in_err.unwrap_or(raw_section_and_key); - let (raw_section, raw_key) = raw_section_and_key - .split_once('.') - .ok_or_else(|| ConfigArgumentParseError::NoSectionDotSeparator(raw_arg.to_owned()))?; - - // We only trim the section + key, whitespace in values needs to be preserved. For example, - // Buck can be invoked with --config section.key="Some Value" that contains important whitespace. - let trimmed_section = raw_section.trim_start(); - if trimmed_section.find(char::is_whitespace).is_some() - || raw_key.find(char::is_whitespace).is_some() - { - return Err(anyhow::anyhow!( - ConfigArgumentParseError::WhitespaceInKeyOrValue(raw_arg.to_owned()) - )); - } - - if trimmed_section.is_empty() || raw_key.is_empty() { - return Err(anyhow::anyhow!(ConfigArgumentParseError::MissingData( - raw_arg.to_owned() - ))); - } - - Ok(ConfigSectionAndKey { - section: trimmed_section.to_owned(), - key: raw_key.to_owned(), - }) -} - -struct ParsedConfigArg { - section: String, - key: String, - value: Option, -} - -/// Parses key-value pairs in the format `section.key=value` or `section.key=`. -fn parse_config_arg(raw_arg: &str) -> anyhow::Result { - let (raw_section_and_key, raw_value) = raw_arg - .split_once('=') - .ok_or_else(|| ConfigArgumentParseError::NoEqualsSeparator(raw_arg.to_owned()))?; - let config_section_and_key = parse_config_section_and_key(raw_section_and_key, Some(raw_arg))?; - - let value = match raw_value { - "" => None, // An empty string unsets this config. - v => Some(v.to_owned()), - }; - - Ok(ParsedConfigArg { - section: config_section_and_key.section, - key: config_section_and_key.key, - value, - }) -} - -#[derive(Debug, Allocative)] -struct ConfigValue { - raw_value: String, - resolved_value: ResolvedValue, - source: Location, -} - -#[derive(Debug, Default, Allocative)] -pub struct LegacyBuckConfigSection { - values: SortedMap, -} - -impl LegacyBuckConfigSection { - /// configs are equal if the data they resolve in is equal, regardless of the origin of the config - pub(crate) fn compare(&self, other: &Self) -> bool { - eq_chain!( - self.values.len() == other.values.len(), - self.values.iter().all(|(name, value)| other - .values - .get(name) - .map_or(false, |other_val| other_val.as_str() == value.as_str())) - ) - } - - pub fn iter(&self) -> impl Iterator { - self.values - .iter() - .map(move |(key, value)| (key.as_str(), LegacyBuckConfigValue { value })) - } - - pub fn keys(&self) -> impl Iterator { - self.values.keys() - } - - pub fn get(&self, key: &str) -> Option { - self.values - .get(key) - .map(move |value| LegacyBuckConfigValue { value }) - } -} - -impl ConfigValue { - fn new_raw(source: ConfigFileLocation, value: String) -> Self { - Self { - raw_value: value, - resolved_value: ResolvedValue::Unknown, - source: Location::File(source), - } - } - - fn new_raw_arg(raw_value: String) -> Self { - Self { - raw_value, - resolved_value: ResolvedValue::Unknown, - source: Location::CommandLineArgument, - } - } - - fn raw_value(&self) -> &str { - &self.raw_value - } - - fn as_str(&self) -> &str { - match &self.resolved_value { - ResolvedValue::Literal => &self.raw_value, - ResolvedValue::Resolved(v) => v, - ResolvedValue::Unknown => { - unreachable!("cannot call as_str() until all values are resolved") - } - } - } -} - -struct LegacyConfigParser<'a> { - file_ops: &'a mut dyn ConfigParserFileOps, - include_stack: Vec, - current_file: Option>, - values: BTreeMap, - current_section: (String, BTreeMap), -} - -/// Matches file include directives. `optional` indicates whether it's an -/// optional include, `include` is the path. Examples: -/// -/// -/// -/// -static FILE_INCLUDE: Lazy = - Lazy::new(|| Regex::new("<(?P\\?)?file:(?P..*)>").unwrap()); - -pub trait ConfigParserFileOps { - fn read_file_lines( - &mut self, - path: &AbsNormPath, - ) -> anyhow::Result>>>; - - fn file_exists(&self, path: &AbsNormPath) -> bool; - - fn file_id(&self, path: &AbsNormPath) -> String { - path.to_string() - } -} - -struct DefaultConfigParserFileOps {} - -impl ConfigParserFileOps for DefaultConfigParserFileOps { - fn read_file_lines( - &mut self, - path: &AbsNormPath, - ) -> anyhow::Result>>> { - let f = std::fs::File::open(path).with_context(|| format!("Reading file `{:?}`", path))?; - let file = std::io::BufReader::new(f); - Ok(Box::new(file.lines())) - } - - fn file_exists(&self, path: &AbsNormPath) -> bool { - PathBuf::from(path.as_os_str()).exists() - } -} - -impl<'a> LegacyConfigParser<'a> { - fn new(file_ops: &'a mut dyn ConfigParserFileOps) -> Self { - LegacyConfigParser { - values: BTreeMap::new(), - include_stack: Vec::new(), - current_file: None, - current_section: Self::unspecified_section(), - file_ops, - } - } - - fn unspecified_section() -> (String, BTreeMap) { - ("__unspecified__".to_owned(), BTreeMap::new()) - } - - fn parse_file( - &mut self, - path: &AbsNormPath, - source: Option, - follow_includes: bool, - ) -> anyhow::Result<()> { - self.start_file(path, source)?; - self.parse_file_on_stack(path, follow_includes) - .with_context(|| format!("Error parsing buckconfig `{}`", path))?; - self.finish_file(); - Ok(()) - } - - fn push_file(&mut self, line: usize, path: &AbsNormPath) -> anyhow::Result<()> { - let include_source = ConfigFileLocation { - source_file: self.current_file.dupe().unwrap_or_else(|| panic!("push_file() called without any files on the include stack. top-level files should use start_file()")), - line, - }; - - self.include_stack.push(include_source.clone()); - - let source_file = Arc::new(ConfigFile { - id: self.file_ops.file_id(path), - include_source: Some(Location::File(include_source)), - }); - self.current_file = Some(source_file); - Ok(()) - } - - fn start_file(&mut self, path: &AbsNormPath, source: Option) -> anyhow::Result<()> { - let source_file = Arc::new(ConfigFile { - id: self.file_ops.file_id(path), - include_source: source, - }); - self.current_file = Some(source_file); - Ok(()) - } - - fn pop_file(&mut self) { - match self.include_stack.pop() { - Some(loc) => { - self.current_file = Some(loc.source_file); - } - None => { - self.current_file = None; - } - } - } - - fn location(&self, line_number: usize) -> ConfigFileLocation { - ConfigFileLocation { - source_file: self - .current_file - .dupe() - .unwrap_or_else(|| panic!("tried to get location without any current file.")), - // Our line numbers at this point are 0-based, but most people expect file line numbers to be 1-based. - line: line_number + 1, - } - } - - fn apply_config_arg( - &mut self, - config_pair: &ConfigArgumentPair, - current_cell_path: AbsNormPathBuf, - ) -> anyhow::Result<()> { - if config_pair.section == "repositories" { - return Err(anyhow::anyhow!( - ConfigArgumentParseError::CellOverrideViaCliConfig - )); - }; - let pair = config_pair.to_owned(); - let cell_matches = pair.cell_path == Some(current_cell_path) || pair.cell_path.is_none(); - if cell_matches { - let config_section = self - .values - .entry(pair.section) - .or_insert_with(SectionBuilder::default); - - match pair.value { - Some(raw_value) => { - let config_value = ConfigValue::new_raw_arg(raw_value); - config_section.values.insert(pair.key, config_value) - } - None => config_section.values.remove(&pair.key), - }; - } - Ok(()) - } - - fn parse_file_on_stack( - &mut self, - path: &AbsNormPath, - parse_includes: bool, - ) -> anyhow::Result<()> { - let parent = path - .parent() - .context("parent should give directory containing the config file")?; - let file_lines = self.file_ops.read_file_lines(path)?; - self.parse_lines(parent, file_lines, parse_includes) - } - - fn strip_line_comment(line: &str) -> &str { - match line.split_once(" #") { - Some((before, _)) => before, - None => line, - } - } - - fn parse_section_marker(line: &str) -> anyhow::Result> { - // We allow trailing comment markers at the end of sections, since otherwise - // using oss-enable/oss-disable is super tricky - match line.strip_prefix('[') { - Some(remaining) => match Self::strip_line_comment(remaining).strip_suffix(']') { - None => Err(ConfigError::SectionMissingTrailingBracket(line.to_owned()).into()), - Some(section) => Ok(Some(section)), - }, - None => Ok(None), - } - } - - fn parse_lines( - &mut self, - dir: &AbsNormPath, - lines: T, - parse_includes: bool, - ) -> anyhow::Result<()> - where - T: IntoIterator>, - E: std::error::Error + Send + Sync + 'static, - { - let lines: Vec = lines.into_iter().collect::, _>>()?; - - let lines = lines - .into_iter() - // Trim leading/trailing whitespace. - .map(|line| line.trim().to_owned()) - // add line numbers - .enumerate() - // Coalesce escaped newlines. - .coalesce(|(i, mut prev), (j, next)| { - if prev.ends_with('\\') { - prev.truncate(prev.len() - 1); - prev.push_str(&next); - Ok((i, prev)) - } else { - Err(((i, prev), (j, next))) - } - }) - // Remove commented lines. - // This needs to come after the coalesce in case someone has an empty line after an escaped newline - // Remove empty lines and comment lines (support both '#' and ';' for comment lines) - .filter(|(_, l)| !l.is_empty() && !l.starts_with('#') && !l.starts_with(';')); - - for (i, line) in lines { - if let Some(section) = Self::parse_section_marker(&line)? { - // Start the new section, grabbing the recorded values for the previous - // section. - let section = std::mem::replace( - &mut self.current_section, - (section.to_owned(), BTreeMap::new()), - ); - self.commit_section(section) - } else if let Some((key, val)) = line.split_once('=') { - let key = key.trim(); - let val = val.trim(); - if key.is_empty() { - return Err(anyhow::anyhow!(ConfigError::EmptyKey(line.to_owned()))); - } - self.current_section.1.insert( - key.to_owned(), - ConfigValue::new_raw(self.location(i), val.to_owned()), - ); - } else if let Some(m) = FILE_INCLUDE.captures(&line) { - if parse_includes { - let include = m.name("include").unwrap().as_str(); - let include = if cfg!(windows) && include.contains(':') { - // On Windows absolute includes look like /C:/foo/bar. - // For compatibility with Python parser we need to support this. - include.trim_start_matches('/') - } else { - include - }; - let optional = m.name("optional").is_some(); - let include_file = if let Ok(absolute) = AbsNormPath::new(include) { - absolute.to_owned() - } else { - let relative = RelativePath::new(include); - match dir.join_normalized(relative) { - Ok(d) => d, - Err(_) => { - return Err(anyhow::anyhow!(ConfigError::BadIncludePath( - include.to_owned() - ))); - } - } - }; - - match (optional, self.file_ops.file_exists(&include_file)) { - (_, true) => { - self.push_file(i, &include_file)?; - self.parse_file_on_stack(&include_file, parse_includes)?; - self.pop_file(); - } - (false, false) => { - return Err(anyhow::anyhow!(ConfigError::MissingInclude( - include.to_owned() - ))); - } - (true, _) => { - // optional case, missing is okay. - } - } - } - } else { - return Err(anyhow::anyhow!(ConfigError::InvalidLine(line.to_owned()))); - } - } - Ok(()) - } - - fn commit_section(&mut self, section: (String, BTreeMap)) { - let (section, values) = section; - // Commit the previous section. - let committed = self - .values - .entry(section) - .or_insert_with(SectionBuilder::default); - values.into_iter().for_each(|(k, v)| { - committed.values.insert(k, v); - }); - } - - fn finish_file(&mut self) { - self.pop_file(); - - let section = std::mem::replace(&mut self.current_section, Self::unspecified_section()); - self.commit_section(section); - } - - fn finish(self) -> anyhow::Result { - let LegacyConfigParser { values, .. } = self; - - let values = ConfigResolver::resolve(values)?; - - Ok(LegacyBuckConfig(Arc::new(ConfigData { values }))) - } -} - -// Since we can't change other entries in values while we iterate over the configuration, we use -// ResolvedItems to store information about recursive resolutions and the current resolution stack. -struct ResolvedItems( - // Maintains map of items that are resolved in the process of resolving requested items. - BTreeMap>, - // Maintains the resolution stack to provide error messages when a cycle is detected. - Vec<(String, String)>, -); - -enum ResolveState { - Resolving, - Done(String), -} - -impl ResolvedItems { - fn start_resolving(&mut self, section: &str, key: &str) -> anyhow::Result<()> { - let section_values = match self.0.get_mut(section) { - Some(v) => v, - None => { - self.0.insert(section.to_owned(), BTreeMap::new()); - self.0.get_mut(section).unwrap() - } - }; - - if section_values - .insert(key.to_owned(), ResolveState::Resolving) - .is_some() - { - return Err(anyhow::anyhow!(self.cycle_error(section, key))); - } - - self.1.push((section.to_owned(), key.to_owned())); - - Ok(()) - } - - fn finish_resolving(&mut self, section: &str, key: &str, value: String) { - let entry = self.0.get_mut(section).unwrap().get_mut(key).unwrap(); - *entry = ResolveState::Done(value); - self.1.pop(); - } - - fn cycle_error(&self, section: &str, key: &str) -> ConfigError { - let mut iter = self.1.iter(); - for v in &mut iter { - if v.0 == section && v.1 == key { - break; - } - } - - let mut cycle = vec![(section.to_owned(), key.to_owned())]; - cycle.extend(iter.cloned()); - cycle.push((section.to_owned(), key.to_owned())); - - ConfigError::ReferenceCycle(cycle) - } - - fn get(&self, section: &str, key: &str) -> Option<&String> { - self.0 - .get(section) - .and_then(|e| e.get(key)) - .and_then(|e| match e { - ResolveState::Resolving => None, - ResolveState::Done(v) => Some(v), - }) - } - - fn drain_to(self, value: &mut BTreeMap) -> anyhow::Result<()> { - assert!(self.1.is_empty(), "All values should have been resolved."); - for (section, items) in self.0.into_iter() { - let result_section = value.get_mut(§ion).unwrap_or_else( - || panic!("Shouldn't have a resolved value for something that doesn't appear in the base config")); - for (key, value) in items.into_iter() { - match value { - ResolveState::Resolving => { - unreachable!("All values should have been resolved."); - } - ResolveState::Done(v) => { - result_section.values.get_mut(&key).unwrap().resolved_value = - ResolvedValue::Resolved(v); - } - } - } - } - - Ok(()) - } -} - -struct ConfigResolver { - values: BTreeMap, -} - -impl ConfigResolver { - #[allow(clippy::from_iter_instead_of_collect)] - fn resolve( - values: BTreeMap, - ) -> anyhow::Result> { - let mut resolver = Self { values }; - resolver.resolve_all()?; - Ok(SortedMap::from_iter( - resolver.values.into_iter().map(|(k, v)| (k, v.finish())), - )) - } - - fn resolve_all(&mut self) -> anyhow::Result<()> { - // First, identify all the values that need to be resolved and mark all the others as literals. - let mut to_resolve = Vec::new(); - for (section_name, section) in &mut self.values { - for (key, value) in &mut section.values { - // if it's been resolved already, move the resolved value into values. - if Self::regex().is_match(value.raw_value()) { - to_resolve.push((section_name.to_owned(), key.to_owned())); - } else { - value.resolved_value = ResolvedValue::Literal; - } - } - } - - // Now, resolve all the items. - for (section, key) in to_resolve { - let mut resolved_items = ResolvedItems(BTreeMap::new(), Vec::new()); - self.resolve_item(&mut resolved_items, §ion, &key)?; - resolved_items.drain_to(&mut self.values)?; - } - Ok(()) - } - - fn regex() -> &'static Regex { - static RE: Lazy = Lazy::new(|| Regex::new(r"\$\(config ([^)]*)\)").unwrap()); - &RE - } - - fn resolve_item<'a>( - &'a self, - resolved_items: &'a mut ResolvedItems, - section: &str, - key: &str, - ) -> anyhow::Result<&'a str> { - let raw_value = match self.values.get(section).and_then(|e| e.values.get(key)) { - None => return Ok(""), - Some(v) => match &v.resolved_value { - ResolvedValue::Unknown => v.raw_value(), - ResolvedValue::Literal => { - return Ok(v.raw_value()); - } - ResolvedValue::Resolved(v) => { - return Ok(v); - } - }, - }; - - if resolved_items.get(section, key).is_none() { - resolved_items.start_resolving(section, key)?; - let v = self.do_resolve(resolved_items, raw_value)?; - resolved_items.finish_resolving(section, key, v); - } - - Ok(resolved_items.get(section, key).unwrap()) - } - - fn do_resolve( - &self, - resolved_items: &mut ResolvedItems, - raw_value: &str, - ) -> anyhow::Result { - let mut resolved = String::new(); - let mut last = 0; - - let re = Self::regex(); - - // TODO(cjhopman): Should add support for escaping the call, I guess. - for capture in re.captures_iter(raw_value) { - let m = capture.get(0).unwrap(); - - resolved.push_str(&raw_value[last..m.start()]); - last = m.end(); - - let captures = re.captures(m.as_str()).unwrap(); - - let config_key = captures.get(1).unwrap().as_str(); - - let config_section_and_key = parse_config_section_and_key(config_key, None)?; - - resolved.push_str(self.resolve_item( - resolved_items, - &config_section_and_key.section, - &config_section_and_key.key, - )?); - } - - resolved.push_str(&raw_value[last..]); - Ok(resolved) - } -} - -pub struct LegacyBuckConfigValue<'a> { - value: &'a ConfigValue, -} - -#[derive(PartialEq, Debug)] -pub enum LegacyBuckConfigLocation<'a> { - File(&'a str, usize), - CommandLineArgument, -} - -impl<'a> Display for LegacyBuckConfigLocation<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::File(file, line) => { - write!(f, "at {}:{}", file, line) - } - Self::CommandLineArgument => { - write!(f, "on the command line") - } - } - } -} - -impl<'a> LegacyBuckConfigValue<'a> { - pub fn as_str(&self) -> &'a str { - self.value.as_str() - } - - pub fn raw_value(&self) -> &str { - self.value.raw_value() - } - - pub fn location(&self) -> LegacyBuckConfigLocation { - match &self.value.source { - Location::File(file) => LegacyBuckConfigLocation::File(&file.source_file.id, file.line), - Location::CommandLineArgument => LegacyBuckConfigLocation::CommandLineArgument, - } - } - - pub fn location_stack(&self) -> Vec { - let mut res = Vec::new(); - let mut location = Some(&self.value.source); - - while let Some(loc) = location.take() { - match &loc { - Location::File(loc) => { - res.push(LegacyBuckConfigLocation::File( - &loc.source_file.id, - loc.line, - )); - location = loc.source_file.include_source.as_ref(); - } - Location::CommandLineArgument => { - // No stack - } - } - } - res - } -} - -impl LegacyBuckConfig { - pub fn empty() -> Self { - Self(Arc::new(ConfigData { - values: SortedMap::new(), - })) - } - - pub fn target_alias_resolver(&self) -> BuckConfigTargetAliasResolver { - BuckConfigTargetAliasResolver::new(self.dupe()) - } - - pub fn parse_with_file_ops( - path: &AbsNormPath, - file_ops: &mut dyn ConfigParserFileOps, - config_args: &[LegacyConfigCmdArg], - ) -> anyhow::Result { - // This function is only used internally for tests, so it's to skip cell resolution - // as we do not have a `ProjectFilesystem`. Either way, this will fail gracefully - // if there's a cell-relative config arg, so this can updated appropriately. - let processed_config_args = - LegacyBuckConfig::process_config_args(config_args, None, file_ops)?; - Self::parse_with_file_ops_with_includes( - &[MainConfigFile { - path: path.to_buf(), - owned_by_project: true, - }], - file_ops, - &processed_config_args, - true, - ) - } - - fn resolve_config_flag_arg( - flag_arg: &LegacyConfigCmdArgFlag, - cell_resolution: Option<&CellResolutionState>, - file_ops: &mut dyn ConfigParserFileOps, - ) -> anyhow::Result { - let cell_path = flag_arg - .cell - .as_ref() - .map(|cell| { - Self::resolve_config_file_arg( - &LegacyConfigCmdArgFile { - cell: Some(cell.clone()), - path: "".to_owned(), - }, - cell_resolution, - file_ops, - ) - }) - .transpose()?; - - Ok(ConfigArgumentPair { - section: flag_arg.section.clone(), - key: flag_arg.key.clone(), - value: flag_arg.value.clone(), - cell_path, - }) - } - - fn resolve_config_file_arg( - file_arg: &LegacyConfigCmdArgFile, - cell_resolution: Option<&CellResolutionState>, - file_ops: &mut dyn ConfigParserFileOps, - ) -> anyhow::Result { - if let Some(cell_alias) = &file_arg.cell { - let cell_resolution_state = cell_resolution.ok_or_else(|| { - anyhow::anyhow!(ConfigError::UnableToResolveCellRelativePath(format!( - "{}//{}", - cell_alias, file_arg.path - ))) - })?; - if let Some(cell_resolver) = cell_resolution_state.cell_resolver.get() { - return cell_resolver.resolve_cell_relative_path( - cell_alias, - &file_arg.path, - cell_resolution_state.project_filesystem, - cell_resolution_state.cwd, - ); - } else { - // Reading an immediate cell mapping is extremely fast as we just read a single - // config file (which would already be in memory). There is another alternative, - // we can take advantage of the fact that config files argument resolution happens - // _after_ initial parsing of root. But this requires quite a bit more work to - // access the unresolved parts and making further assumptions. The saving would - // be < 1ms, so we take this approach here. It can easily be changed later. - let cell_resolver = BuckConfigBasedCells::parse_immediate_config_with_file_ops( - cell_resolution_state.project_filesystem, - file_ops, - )? - .cell_resolver; - let resolved_path = cell_resolver.resolve_cell_relative_path( - cell_alias, - &file_arg.path, - cell_resolution_state.project_filesystem, - cell_resolution_state.cwd, - ); - let set_result = cell_resolution_state.cell_resolver.set(cell_resolver); - assert!(set_result.is_ok()); - return resolved_path; - } - } - - // Cargo relative file paths are expanded before they make it into the daemon - AbsNormPathBuf::try_from(file_arg.path.to_owned()) - } - - fn process_config_args( - args: &[LegacyConfigCmdArg], - cell_resolution: Option<&CellResolutionState>, - file_ops: &mut dyn ConfigParserFileOps, - ) -> anyhow::Result> { - let resolved_args = args.map(|unprocessed_arg| match unprocessed_arg { - LegacyConfigCmdArg::Flag(value) => { - let resolved_flag = - Self::resolve_config_flag_arg(value, cell_resolution, file_ops)?; - Ok(ResolvedLegacyConfigArg::Flag(resolved_flag)) - } - LegacyConfigCmdArg::File(file) => { - let resolved_path = Self::resolve_config_file_arg(file, cell_resolution, file_ops)?; - Ok(ResolvedLegacyConfigArg::File(resolved_path)) - } - }); - - resolved_args.into_try_map(|x| x) - } - - fn parse_with_file_ops_with_includes( - main_config_files: &[MainConfigFile], - file_ops: &mut dyn ConfigParserFileOps, - config_args: &[ResolvedLegacyConfigArg], - follow_includes: bool, - ) -> anyhow::Result { - let mut parser = LegacyConfigParser::new(file_ops); - let mut cell_path = None; - for main_config_file in main_config_files { - parser.parse_file(&main_config_file.path, None, follow_includes)?; - if main_config_file.owned_by_project { - cell_path = match main_config_file.path.parent() { - Some(cell) => Some(cell), - None => panic!("Encountered invalid .buckconfig directory (no parent)"), - }; - } - } - if cell_path.is_none() { - panic!("Could not find cell path"); - } - - for config_arg in config_args { - match config_arg { - ResolvedLegacyConfigArg::Flag(config_value) => { - parser.apply_config_arg(config_value, cell_path.unwrap().to_buf())? - } - ResolvedLegacyConfigArg::File(file_path) => parser.parse_file( - file_path, - Some(Location::CommandLineArgument), - follow_includes, - )?, - }; - } - - parser.finish() - } - - fn get_config_value(&self, section: &str, key: &str) -> Option<&ConfigValue> { - self.0.values.get(section).and_then(|s| s.values.get(key)) - } - - pub fn get(&self, section: &str, key: &str) -> Option<&str> { - self.get_config_value(section, key).map(|s| s.as_str()) - } - - /// Iterate all entries. - pub fn iter(&self) -> impl Iterator)> { - self.0.values.iter().map(|(section, section_values)| { - ( - section.as_str(), - section_values - .values - .iter() - .map(|(key, value)| (key.as_str(), value.as_str())), - ) - }) - } - - fn parse_impl(section: &str, key: &str, value: &str) -> anyhow::Result - where - anyhow::Error: From<::Err>, - { - value - .parse() - .map_err(anyhow::Error::from) - .with_context(|| ConfigError::ParseFailed { - section: section.to_owned(), - key: key.to_owned(), - value: value.to_owned(), - ty: std::any::type_name::(), - }) - } - - pub fn parse(&self, section: &str, key: &str) -> anyhow::Result> - where - anyhow::Error: From<::Err>, - { - self.get_config_value(section, key) - .map(|s| { - Self::parse_impl(section, key, s.as_str()).with_context(|| { - format!("Defined {}", s.source.as_legacy_buck_config_location()) - }) - }) - .transpose() - } - - pub fn parse_list(&self, section: &str, key: &str) -> anyhow::Result>> - where - anyhow::Error: From<::Err>, - { - /// A wrapper type so we can use .parse() on this. - struct ParseList(Vec); - - impl FromStr for ParseList - where - T: FromStr, - { - type Err = ::Err; - - fn from_str(s: &str) -> Result { - Ok(Self( - s.split(',').map(T::from_str).collect::>()?, - )) - } - } - - Ok(self.parse::>(section, key)?.map(|l| l.0)) - } - pub fn sections(&self) -> impl Iterator { - self.0.values.keys() - } - - pub fn all_sections(&self) -> impl Iterator + '_ { - self.0.values.iter() - } - - pub fn get_section(&self, section: &str) -> Option<&LegacyBuckConfigSection> { - self.0.values.get(section) - } -} - -// Options on how to exactly parse config files -struct BuckConfigParseOptions { - // Defines whether includes are followed, this can significantly reduce parse time. - follow_includes: bool, -} - -fn push_all_files_from_a_directory( - buckconfig_paths: &mut Vec, - folder_path: &AbsNormPath, - owned_by_project: bool, -) -> anyhow::Result<()> { - let readdir = match fs::read_dir(folder_path) { - Ok(p) => p, - Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(()), - Err(e) if e.kind() == std::io::ErrorKind::NotADirectory => { - tracing::warn!( - "Expected a directory of buckconfig files at: `{}`", - folder_path - ); - return Ok(()); - } - Err(e) => { - return Err(anyhow::Error::from(e) - .context(format!("Error reading configs in `{}`", folder_path))); - } - }; - - for entry in readdir { - let entry_path = entry?.path(); - if entry_path.is_file() { - buckconfig_paths.push(MainConfigFile { - path: AbsNormPath::new(&entry_path)?.to_buf(), - owned_by_project, - }); - } else if entry_path.is_dir() { - push_all_files_from_a_directory( - buckconfig_paths, - &AbsNormPathBuf::try_from(entry_path)?, - owned_by_project, - )?; - } else { - tracing::warn!( - "Expected a directory of buckconfig files at `{}`, but this entry was not a file or directory: `{}`", - folder_path, - entry_path.display() - ); - } - } - - Ok(()) -} - -pub mod testing { - use std::cmp::min; - - use super::*; - - pub fn legacy_buck_config_from_entries<'a>( - entries: impl IntoIterator, - ) -> anyhow::Result { - let mut values: BTreeMap = BTreeMap::new(); - for (section, key, value) in entries { - values - .entry(section.to_owned()) - .or_default() - .values - .insert(key.to_owned(), ConfigValue::new_raw_arg(value.to_owned())); - } - let values = ConfigResolver::resolve(values)?; - Ok(LegacyBuckConfig(Arc::new(ConfigData { values }))) - } - - pub fn parse(data: &[(&str, &str)], path: &str) -> anyhow::Result { - parse_with_config_args(data, path, &[]) - } - - pub fn parse_with_config_args( - data: &[(&str, &str)], - path: &str, - config_args: &[LegacyConfigCmdArg], - ) -> anyhow::Result { - let mut file_ops = TestConfigParserFileOps::new(data)?; - #[cfg(not(windows))] - let path = &AbsNormPathBuf::from(path.into())?; - // Need to add some disk drive on Windows to make path absolute. - #[cfg(windows)] - let path = &AbsNormPathBuf::from(format!("C:{}", path))?; - LegacyBuckConfig::parse_with_file_ops(path, &mut file_ops, config_args) - } - - pub struct TestConfigParserFileOps { - data: HashMap, - } - - impl TestConfigParserFileOps { - pub fn new(data: &[(&str, &str)]) -> anyhow::Result { - let mut holder_data = HashMap::new(); - for (file, content) in data { - #[cfg(not(windows))] - let file_path = (*file).to_owned(); - // Need to add some disk drive on Windows to make path absolute. - #[cfg(windows)] - let file_path = format!("C:{}", file); - holder_data.insert(AbsNormPathBuf::from(file_path)?, (*content).to_owned()); - } - Ok(TestConfigParserFileOps { data: holder_data }) - } - } - - impl ConfigParserFileOps for TestConfigParserFileOps { - fn file_exists(&self, path: &AbsNormPath) -> bool { - self.data.contains_key(path) - } - - fn read_file_lines( - &mut self, - path: &AbsNormPath, - ) -> anyhow::Result< - Box<(dyn std::iter::Iterator> + 'static)>, - > { - let content = self - .data - .get(path) - .ok_or_else(|| anyhow::anyhow!("didn't have data for {:?}", path))? - .to_owned(); - // Need a Read implementation that owns the bytes. - struct StringReader(Vec, usize); - impl Read for StringReader { - fn read(&mut self, buf: &mut [u8]) -> Result { - let remaining = self.0.len() - self.1; - let to_return = min(remaining, buf.len()); - buf[..to_return].clone_from_slice(&self.0[self.1..self.1 + to_return]); - self.1 += to_return; - Ok(to_return) - } - } - let file = std::io::BufReader::new(StringReader(content.into_bytes(), 0)); - Ok(Box::new(file.lines())) - } - } -} - -#[cfg(test)] -mod tests { - use buck2_core::fs::paths::abs_path::AbsPath; - use indoc::indoc; - use itertools::Itertools; - - use super::testing::*; - use super::*; - - pub(crate) fn assert_config_value( - config: &LegacyBuckConfig, - section: &str, - key: &str, - expected: &str, - ) { - match config.get_section(section) { - None => { - panic!( - "Expected config to have section `{}`, but had sections `<{}>`", - section, - config.sections().join(", ") - ); - } - Some(values) => match values.get(key) { - None => panic!( - "Expected section `{}` to have key `{}`, but had keys `<{}>`", - section, - key, - values.keys().join(", ") - ), - Some(v) if v.as_str() != expected => { - panic!( - "Expected `{}.{}` to have value `{}`. Got `{}`.", - section, - key, - expected, - v.as_str() - ); - } - _ => {} - }, - } - } - - fn assert_config_value_is_empty(config: &LegacyBuckConfig, section: &str, key: &str) { - match config.get_section(section) { - Some(values) => match values.get(key) { - Some(v) => { - panic!( - "Expected `{}.{}` to not exist. Got `{}` for value.", - section, - key, - v.as_str() - ); - } - _ => {} - }, - _ => {} - }; - } - - #[test] - fn test_simple() -> anyhow::Result<()> { - let config = parse( - &[( - "/config", - indoc!( - r#" - [section] - int = 1 - string = hello - multiline = hello \ - world\ - ! - - # this is a comment - commented = okay - - [new_section] - overridden = 1 - - [another_section] - some_val = 2 - - [new_section] - reopened = ok - # override overridden - overridden = 3 - - # note trailing whitespace - [bad_formatting] - - value = 1 - "# - ), - )], - "/config", - )?; - - assert_eq!(None, config.get("section", "missing")); - assert_eq!(None, config.get("missing", "int")); - assert_config_value(&config, "section", "int", "1"); - assert_config_value(&config, "section", "string", "hello"); - // Note that lines are all trimmed, so leading whitespace after a newline is - // dropped. - assert_config_value(&config, "section", "multiline", "hello world!"); - assert_config_value(&config, "section", "commented", "okay"); - assert_config_value(&config, "another_section", "some_val", "2"); - assert_config_value(&config, "new_section", "reopened", "ok"); - assert_config_value(&config, "new_section", "overridden", "3"); - assert_config_value(&config, "bad_formatting", "value", "1"); - Ok(()) - } - - #[test] - fn test_comments() -> anyhow::Result<()> { - let config = parse( - &[( - "/config", - indoc!( - r#" - [section1] # stuff - key1 = value1 - [section2#name] - key2 = value2 - "# - ), - )], - "/config", - )?; - assert_config_value(&config, "section1", "key1", "value1"); - assert_config_value(&config, "section2#name", "key2", "value2"); - Ok(()) - } - - #[test] - fn test_references() -> anyhow::Result<()> { - let config = parse( - &[( - "/config", - indoc!( - r#" - - [section1] - ref1_1 = ref1_1<$(config section3.ref3_2)> - - [section2] - ref2_1 = ref2_1<$(config section3.ref3_1)> - ref2_2 = ref2_2<$(config section2.ref2_1)> - [section3] - ref3_1 = ref3_1<$(config section1.ref1_1), $(config section3.ref3_2)> - ref3_2 = ref3_2 - - [simple] - s1 = $(config simple.s2)$(config simple.s2)$(config simple.s2) - s2 = $(config simple.s3)$(config simple.s3)$(config simple.s3) - s3 = x - "# - ), - )], - "/config", - )?; - - assert_config_value( - &config, - "section2", - "ref2_2", - "ref2_2, ref3_2>>>", - ); - - assert_config_value(&config, "simple", "s1", "xxxxxxxxx"); - Ok(()) - } - - #[test] - fn test_reference_cycle() -> anyhow::Result<()> { - let res = parse( - &[( - "/config", - indoc!( - r#" - - [x] - a = $(config x.b) - b = $(config x.c) - c = $(config x.d) - d = $(config x.e) - e = $(config x.f) - f = $(config x.g) - g = $(config x.d) - "# - ), - )], - "/config", - ); - - match res { - Ok(_) => panic!("Expected failure."), - Err(e) => { - let message = e.to_string(); - let cycle = "`x.d` -> `x.e` -> `x.f` -> `x.g` -> `x.d`"; - assert!( - message.contains(cycle), - "Expected error to contain \"{}\", but was `{}`", - cycle, - message - ); - } - } - - Ok(()) - } - - #[test] - fn test_includes() -> anyhow::Result<()> { - let config = parse( - &[ - ( - "/base", - indoc!( - r#" - base = okay! - "# - ), - ), - ( - "/section", - indoc!( - r#" - [section] - "# - ), - ), - ( - "/some/deep/dir/includes_base", - indoc!( - r#" - - "# - ), - ), - ( - "/includes_section", - indoc!( - r#" - - "# - ), - ), - ( - "/config", - indoc!( - r#" - # use a couple optional includes in here to ensure those work when the file exists. - [opened_section] - # include into an already open section - - # start a section with an include - - key = wild - - [other_section] - # ensure can reopen section with an include - - other_key=wildtoo - - # Check that an optional include for a file that doesn't exist is okay. - - "# - ), - ), - ( - "/test_bad_include", - indoc!( - r#" - - "# - ), - ), - ], - "/config", - )?; - - assert_config_value(&config, "opened_section", "base", "okay!"); - assert_config_value(&config, "section", "base", "okay!"); - // Note that lines are all trimmed, so leading whitespace after a newline is - // dropped. - assert_config_value(&config, "section", "key", "wild"); - assert_config_value(&config, "section", "other_key", "wildtoo"); - Ok(()) - } - - #[test] - fn test_config_args_ordering() -> anyhow::Result<()> { - let config_args = vec![ - LegacyConfigCmdArg::flag("apple.key=value1")?, - LegacyConfigCmdArg::flag("apple.key=value2")?, - ]; - let config = - parse_with_config_args(&[("/config", indoc!(r#""#))], "/config", &config_args)?; - assert_config_value(&config, "apple", "key", "value2"); - - Ok(()) - } - - #[test] - fn test_config_args_empty() -> anyhow::Result<()> { - let config_args = vec![LegacyConfigCmdArg::flag("apple.key=")?]; - let config = - parse_with_config_args(&[("/config", indoc!(r#""#))], "/config", &config_args)?; - assert_config_value_is_empty(&config, "apple", "key"); - - Ok(()) - } - - #[test] - fn test_config_args_overwrite_config_file() -> anyhow::Result<()> { - let config_args = vec![LegacyConfigCmdArg::flag("apple.key=value2")?]; - let config = parse_with_config_args( - &[( - "/config", - indoc!( - r#" - [apple] - key = value1 - "# - ), - )], - "/config", - &config_args, - )?; - - assert_config_value(&config, "apple", "key", "value2"); - - let apple_section = config.get_section("apple").unwrap(); - let key_value = apple_section.get("key").unwrap(); - assert_eq!( - key_value.location(), - LegacyBuckConfigLocation::CommandLineArgument - ); - - Ok(()) - } - - #[test] - fn test_argument_pair() -> anyhow::Result<()> { - // Valid Formats - - let normal_pair = parse_config_arg("apple.key=value")?; - - assert_eq!("apple", normal_pair.section); - assert_eq!("key", normal_pair.key); - assert_eq!(Some("value".to_owned()), normal_pair.value); - - let unset_pair = parse_config_arg("apple.key=")?; - - assert_eq!("apple", unset_pair.section); - assert_eq!("key", unset_pair.key); - assert_eq!(None, unset_pair.value); - - // Whitespace - - let section_leading_whitespace = parse_config_arg(" apple.key=value")?; - assert_eq!("apple", section_leading_whitespace.section); - assert_eq!("key", section_leading_whitespace.key); - assert_eq!(Some("value".to_owned()), section_leading_whitespace.value); - - let pair_with_whitespace_in_key = parse_config_arg("apple. key=value"); - assert!(pair_with_whitespace_in_key.is_err()); - - let pair_with_whitespace_in_value = parse_config_arg("apple.key= value with whitespace ")?; - assert_eq!("apple", pair_with_whitespace_in_value.section); - assert_eq!("key", pair_with_whitespace_in_value.key); - assert_eq!( - Some(" value with whitespace ".to_owned()), - pair_with_whitespace_in_value.value - ); - - // Invalid Formats - - let pair_without_section = parse_config_arg("key=value"); - assert!(pair_without_section.is_err()); - - let pair_without_equals = parse_config_arg("apple.keyvalue"); - assert!(pair_without_equals.is_err()); - - let pair_without_section_or_equals = parse_config_arg("applekeyvalue"); - assert!(pair_without_section_or_equals.is_err()); - - Ok(()) - } - - #[test] - fn test_section_and_key() -> anyhow::Result<()> { - // Valid Formats - - let normal_section_and_key = parse_config_section_and_key("apple.key", None)?; - - assert_eq!("apple", normal_section_and_key.section); - assert_eq!("key", normal_section_and_key.key); - - // Whitespace - - let section_leading_whitespace = parse_config_section_and_key(" apple.key", None)?; - assert_eq!("apple", section_leading_whitespace.section); - assert_eq!("key", section_leading_whitespace.key); - - let pair_with_whitespace_in_key = parse_config_section_and_key("apple. key", None); - assert!(pair_with_whitespace_in_key.is_err()); - - // Invalid Formats - - let pair_without_dot = parse_config_section_and_key("applekey", None); - assert!(pair_without_dot.is_err()); - - Ok(()) - } - - #[test] - fn test_config_file_args_overwrite_config_file() -> anyhow::Result<()> { - #[cfg(not(windows))] - let file_arg = "/cli-config"; - #[cfg(windows)] - let file_arg = "C:/cli-config"; - let config_args = vec![ - LegacyConfigCmdArg::flag("apple.key=value3")?, - LegacyConfigCmdArg::file(file_arg)?, - ]; - let config = parse_with_config_args( - &[ - ( - "/config", - indoc!( - r#" - [apple] - key = value1 - "# - ), - ), - ( - "/cli-config", - indoc!( - r#" - [apple] - key = value2 - "# - ), - ), - ], - "/config", - &config_args, - )?; - - assert_config_value(&config, "apple", "key", "value2"); - - let apple_section = config.get_section("apple").unwrap(); - let key_value = apple_section.get("key").unwrap(); - #[cfg(not(windows))] - let expected_path = LegacyBuckConfigLocation::File("/cli-config", 2); - #[cfg(windows)] - let expected_path = LegacyBuckConfigLocation::File("C:/cli-config", 2); - assert_eq!(key_value.location(), expected_path); - - Ok(()) - } - - #[test] - fn test_config_args_cell_in_value() -> anyhow::Result<()> { - let config_args = vec![LegacyConfigCmdArg::flag("apple.key=foo//value1")?]; - let config = - parse_with_config_args(&[("/config", indoc!(r#""#))], "/config", &config_args)?; - assert_config_value(&config, "apple", "key", "foo//value1"); - - Ok(()) - } - - mod test_push_all_files_from_a_directory { - use buck2_core::fs::fs_util; - - use super::*; - - #[test] - fn dir_with_file() -> anyhow::Result<()> { - let mut v = vec![]; - let dir = tempfile::tempdir()?; - let root = AbsPath::new(dir.path())?; - let file = root.join("foo"); - fs_util::write(&file, "")?; - - let file = AbsNormPath::new(&file)?; - let dir = AbsNormPath::new(&dir)?; - - push_all_files_from_a_directory(&mut v, dir, false)?; - assert_eq!( - v, - vec![MainConfigFile { - path: file.to_owned(), - owned_by_project: false, - }] - ); - - Ok(()) - } - - #[test] - fn empty_dir() -> anyhow::Result<()> { - let mut v = vec![]; - let dir = tempfile::tempdir()?; - let dir = AbsNormPath::new(&dir)?; - - push_all_files_from_a_directory(&mut v, dir, false)?; - assert_eq!(v, vec![]); - - Ok(()) - } - - #[test] - fn non_existent_dir() -> anyhow::Result<()> { - let mut v = vec![]; - let dir = tempfile::tempdir()?; - let dir = dir.path().join("bad"); - let dir = AbsNormPath::new(&dir)?; - - push_all_files_from_a_directory(&mut v, dir, false)?; - assert_eq!(v, vec![]); - - Ok(()) - } - - #[test] - fn dir_in_dir() -> anyhow::Result<()> { - let mut v = vec![]; - let dir = tempfile::tempdir()?; - let dir = AbsPath::new(dir.path())?; - fs_util::create_dir_all(dir.join("bad"))?; - - push_all_files_from_a_directory(&mut v, AbsNormPath::new(dir)?, false)?; - assert_eq!(v, vec![]); - - Ok(()) - } - - #[test] - fn file() -> anyhow::Result<()> { - let mut v = vec![]; - let file = tempfile::NamedTempFile::new()?; - let file = AbsNormPath::new(file.path())?; - - push_all_files_from_a_directory(&mut v, file, false)?; - assert_eq!(v, vec![]); - - Ok(()) - } - - #[test] - fn dir_with_file_in_dir() -> anyhow::Result<()> { - let mut v = vec![]; - let dir = tempfile::tempdir()?; - let dir = AbsPath::new(dir.path())?; - let nested_dir = dir.join("nested"); - fs_util::create_dir_all(&nested_dir)?; - let file = nested_dir.join("foo"); - fs_util::write(&file, "")?; - - let file = AbsNormPath::new(&file)?; - let dir = AbsNormPath::new(&dir)?; - - push_all_files_from_a_directory(&mut v, dir, false)?; - assert_eq!( - v, - vec![MainConfigFile { - path: file.to_owned(), - owned_by_project: false, - }] - ); - - Ok(()) - } - } - - #[test] - fn test_arg_display() -> anyhow::Result<()> { - assert_eq!( - LegacyConfigCmdArg::flag("foo.bar=baz")?.to_string(), - "foo.bar=baz" - ); - assert_eq!( - LegacyConfigCmdArg::flag("foo//bar.baz=")?.to_string(), - "foo//bar.baz=" - ); - assert_eq!(LegacyConfigCmdArg::file("foo")?.to_string(), "foo"); - assert_eq!( - LegacyConfigCmdArg::file("foo//bar")?.to_string(), - "foo//bar" - ); - Ok(()) - } -} diff --git a/app/buck2_common/src/legacy_configs/parser.rs b/app/buck2_common/src/legacy_configs/parser.rs new file mode 100644 index 0000000000000..fe4e4723f6bd8 --- /dev/null +++ b/app/buck2_common/src/legacy_configs/parser.rs @@ -0,0 +1,397 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::BTreeMap; +use std::sync::Arc; + +use allocative::Allocative; +use anyhow::Context; +use buck2_core::cells::cell_root_path::CellRootPath; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::RelativePath; +use dupe::Dupe; +use futures::future::BoxFuture; +use futures::FutureExt; +use itertools::Itertools; +use once_cell::sync::Lazy; +use regex::Regex; +use starlark_map::sorted_map::SortedMap; + +use crate::legacy_configs::args::ResolvedConfigFlag; +use crate::legacy_configs::configs::ConfigArgumentParseError; +use crate::legacy_configs::configs::ConfigData; +use crate::legacy_configs::configs::ConfigFileLocation; +use crate::legacy_configs::configs::ConfigFileLocationWithLine; +use crate::legacy_configs::configs::ConfigValue; +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::configs::LegacyBuckConfigSection; +use crate::legacy_configs::configs::Location; +use crate::legacy_configs::file_ops::ConfigParserFileOps; +use crate::legacy_configs::file_ops::ConfigPath; +use crate::legacy_configs::parser::resolver::ConfigResolver; + +mod resolver; + +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] +enum ConfigError { + #[error("Expected line of the form `key = value` but key was empty. Line was `{0}`")] + EmptyKey(String), + #[error("Included file doesn't exist `{0}`")] + MissingInclude(String), + #[error("Improperly formatted section. Expected something of the form `[section]`, got {0}")] + SectionMissingTrailingBracket(String), + #[error("Improperly include directive path. Got {0}")] + BadIncludePath(String), + #[error( + "Couldn't parse line. Expected include directive (``), section(`[some_section]`), or key assignment (`some_key = some_value`). Got `{0}`" + )] + InvalidLine(String), + #[error("Detected cycles in buckconfig $(config) references: {}", format_cycle(.0))] + ReferenceCycle(Vec<(String, String)>), +} + +fn format_cycle(cycle: &[(String, String)]) -> String { + cycle + .iter() + .map(|(section, key)| format!("`{}.{}`", section, key)) + .join(" -> ") +} + +#[derive(Debug, Default, Clone, PartialEq, Eq, Allocative)] +struct SectionBuilder { + values: BTreeMap, +} + +impl SectionBuilder { + fn finish(self) -> LegacyBuckConfigSection { + LegacyBuckConfigSection { + values: SortedMap::from_iter(self.values), + } + } +} + +/// Represents the state associated with a buckconfig that is being parsed right now. +/// +/// A buckconfig will generally be parsed by combining multiple command args and files +#[derive(Debug, Clone, PartialEq, Eq, Allocative)] +pub(crate) struct LegacyConfigParser { + values: BTreeMap, +} + +/// Represents the state associated with parsing a single file into a buckconfig. +struct LegacyConfigFileParser<'p> { + include_stack: Vec, + current_file: Option>, + current_section: (String, BTreeMap), + values: &'p mut LegacyConfigParser, +} + +/// Matches file include directives. `optional` indicates whether it's an +/// optional include, `include` is the path. Examples: +/// +/// +/// +/// +static FILE_INCLUDE: Lazy = + Lazy::new(|| Regex::new("<(?P\\?)?file:(?P..*)>").unwrap()); + +impl LegacyConfigParser { + pub(crate) fn new() -> Self { + LegacyConfigParser { + values: BTreeMap::new(), + } + } + + pub(crate) async fn parse_file( + &mut self, + path: &ConfigPath, + source: Option, + follow_includes: bool, + file_ops: &mut dyn ConfigParserFileOps, + ) -> anyhow::Result<()> { + let mut file_parser = LegacyConfigFileParser::new(self); + file_parser.start_file(path, source)?; + file_parser + .parse_file_on_stack(path, follow_includes, file_ops) + .await + .with_context(|| format!("Error parsing buckconfig `{}`", path))?; + file_parser.finish_file(); + + Ok(()) + } + + pub(crate) fn apply_config_arg( + &mut self, + config_pair: &ResolvedConfigFlag, + current_cell: &CellRootPath, + ) -> anyhow::Result<()> { + for banned_section in ["repositories", "cells"] { + if config_pair.section == banned_section { + return Err( + ConfigArgumentParseError::CellOverrideViaCliConfig(banned_section).into(), + ); + }; + } + let pair = config_pair.to_owned(); + let cell_matches = pair.cell.as_deref() == Some(current_cell) || pair.cell.is_none(); + if cell_matches { + let config_section = self + .values + .entry(pair.section) + .or_insert_with(SectionBuilder::default); + + match pair.value { + Some(raw_value) => { + let config_value = ConfigValue::new_raw_arg(raw_value); + config_section.values.insert(pair.key, config_value) + } + None => config_section.values.remove(&pair.key), + }; + } + Ok(()) + } + + pub(crate) fn finish(self) -> anyhow::Result { + let LegacyConfigParser { values } = self; + + let values = ConfigResolver::resolve(values)?; + + Ok(LegacyBuckConfig(Arc::new(ConfigData { values }))) + } + + pub(crate) fn join(&mut self, other: &LegacyConfigParser) { + for (section, section_builder) in other.values.iter() { + for (key, value) in section_builder.values.iter() { + self.values + .entry(section.to_owned()) + .or_insert_with(SectionBuilder::default) + .values + .insert(key.to_owned(), value.clone()); + } + } + } +} + +impl<'p> LegacyConfigFileParser<'p> { + fn new(values: &'p mut LegacyConfigParser) -> Self { + LegacyConfigFileParser { + include_stack: Vec::new(), + current_file: None, + current_section: Self::unspecified_section(), + values, + } + } + + fn unspecified_section() -> (String, BTreeMap) { + ("__unspecified__".to_owned(), BTreeMap::new()) + } + + fn push_file(&mut self, line: usize, path: &ConfigPath) -> anyhow::Result<()> { + let include_source = ConfigFileLocationWithLine { + source_file: self.current_file.dupe().unwrap_or_else(|| panic!("push_file() called without any files on the include stack. top-level files should use start_file()")), + line, + }; + + self.include_stack.push(include_source.clone()); + + let source_file = Arc::new(ConfigFileLocation { + path: path.to_string(), + include_source: Some(Location::File(include_source)), + }); + self.current_file = Some(source_file); + Ok(()) + } + + fn start_file(&mut self, path: &ConfigPath, source: Option) -> anyhow::Result<()> { + let source_file = Arc::new(ConfigFileLocation { + path: path.to_string(), + include_source: source, + }); + self.current_file = Some(source_file); + Ok(()) + } + + fn pop_file(&mut self) { + match self.include_stack.pop() { + Some(loc) => { + self.current_file = Some(loc.source_file); + } + None => { + self.current_file = None; + } + } + } + + fn location(&self, line_number: usize) -> ConfigFileLocationWithLine { + ConfigFileLocationWithLine { + source_file: self + .current_file + .dupe() + .unwrap_or_else(|| panic!("tried to get location without any current file.")), + // Our line numbers at this point are 0-based, but most people expect file line numbers to be 1-based. + line: line_number + 1, + } + } + + /// Return value indicates whether the file existed or not + fn parse_file_on_stack<'a>( + &'a mut self, + config_path: &'a ConfigPath, + parse_includes: bool, + file_ops: &'a mut dyn ConfigParserFileOps, + ) -> BoxFuture<'a, anyhow::Result> { + async move { + let Some(file_lines) = file_ops.read_file_lines_if_exists(config_path).await? else { + return Ok(false); + }; + self.parse_lines(config_path, file_lines, parse_includes, file_ops) + .await?; + Ok(true) + } + .boxed() + } + + fn strip_line_comment(line: &str) -> &str { + match line.split_once(" #") { + Some((before, _)) => before, + None => line, + } + } + + fn parse_section_marker(line: &str) -> anyhow::Result> { + // We allow trailing comment markers at the end of sections, since otherwise + // using oss-enable/oss-disable is super tricky + match line.strip_prefix('[') { + Some(remaining) => match Self::strip_line_comment(remaining).strip_suffix(']') { + None => Err(ConfigError::SectionMissingTrailingBracket(line.to_owned()).into()), + Some(section) => Ok(Some(section)), + }, + None => Ok(None), + } + } + + async fn parse_lines( + &mut self, + config_path: &ConfigPath, + lines: T, + parse_includes: bool, + file_ops: &mut dyn ConfigParserFileOps, + ) -> anyhow::Result<()> + where + T: IntoIterator>, + E: std::error::Error + Send + Sync + 'static, + { + let lines: Vec = lines.into_iter().collect::, _>>()?; + + let lines = lines + .into_iter() + // Trim leading/trailing whitespace. + .map(|line| line.trim().to_owned()) + // add line numbers + .enumerate() + // Coalesce escaped newlines. + .coalesce(|(i, mut prev), (j, next)| { + if prev.ends_with('\\') { + prev.truncate(prev.len() - 1); + prev.push_str(&next); + Ok((i, prev)) + } else { + Err(((i, prev), (j, next))) + } + }) + // Remove commented lines. + // This needs to come after the coalesce in case someone has an empty line after an escaped newline + // Remove empty lines and comment lines (support both '#' and ';' for comment lines) + .filter(|(_, l)| !l.is_empty() && !l.starts_with('#') && !l.starts_with(';')); + + for (i, line) in lines { + if let Some(section) = Self::parse_section_marker(&line)? { + // Start the new section, grabbing the recorded values for the previous + // section. + let section = std::mem::replace( + &mut self.current_section, + (section.to_owned(), BTreeMap::new()), + ); + self.commit_section(section) + } else if let Some((key, val)) = line.split_once('=') { + let key = key.trim(); + let val = val.trim(); + if key.is_empty() { + return Err(anyhow::anyhow!(ConfigError::EmptyKey(line.to_owned()))); + } + self.current_section.1.insert( + key.to_owned(), + ConfigValue::new_raw(self.location(i), val.to_owned()), + ); + } else if let Some(m) = FILE_INCLUDE.captures(&line) { + if parse_includes { + let include = m.name("include").unwrap().as_str(); + let include = if cfg!(windows) && include.contains(':') { + // On Windows absolute includes look like /C:/foo/bar. + // For compatibility with Python parser we need to support this. + include.trim_start_matches('/') + } else { + include + }; + let optional = m.name("optional").is_some(); + // Note: Using `AbsNormPath` to preserve existing behavior of requiring normalized paths + let include_file = if let Ok(absolute) = AbsNormPath::new(include) { + ConfigPath::Global(absolute.to_owned().into_abs_path_buf()) + } else { + let relative = RelativePath::new(include); + match config_path.join_to_parent_normalized(relative) { + Ok(d) => d, + Err(_) => { + return Err(anyhow::anyhow!(ConfigError::BadIncludePath( + include.to_owned() + ))); + } + } + }; + + self.push_file(i, &include_file)?; + let exists = self + .parse_file_on_stack(&include_file, parse_includes, file_ops) + .await?; + self.pop_file(); + + if !exists && !optional { + return Err(anyhow::anyhow!(ConfigError::MissingInclude( + include.to_owned() + ))); + } + } + } else { + return Err(anyhow::anyhow!(ConfigError::InvalidLine(line.to_owned()))); + } + } + Ok(()) + } + + fn commit_section(&mut self, section: (String, BTreeMap)) { + let (section, values) = section; + // Commit the previous section. + let committed = self + .values + .values + .entry(section) + .or_insert_with(SectionBuilder::default); + values.into_iter().for_each(|(k, v)| { + committed.values.insert(k, v); + }); + } + + fn finish_file(&mut self) { + self.pop_file(); + + let section = std::mem::replace(&mut self.current_section, Self::unspecified_section()); + self.commit_section(section); + } +} diff --git a/app/buck2_common/src/legacy_configs/parser/resolver.rs b/app/buck2_common/src/legacy_configs/parser/resolver.rs new file mode 100644 index 0000000000000..3daf50f9849f4 --- /dev/null +++ b/app/buck2_common/src/legacy_configs/parser/resolver.rs @@ -0,0 +1,215 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::BTreeMap; + +use once_cell::sync::Lazy; +use regex::Regex; +use starlark_map::sorted_map::SortedMap; + +use crate::legacy_configs::configs::parse_config_section_and_key; +use crate::legacy_configs::configs::LegacyBuckConfigSection; +use crate::legacy_configs::configs::ResolvedValue; +use crate::legacy_configs::parser::ConfigError; +use crate::legacy_configs::parser::SectionBuilder; + +// Since we can't change other entries in values while we iterate over the configuration, we use +// ResolvedItems to store information about recursive resolutions and the current resolution stack. +struct ResolvedItems( + // Maintains map of items that are resolved in the process of resolving requested items. + BTreeMap>, + // Maintains the resolution stack to provide error messages when a cycle is detected. + Vec<(String, String)>, +); + +enum ResolveState { + Resolving, + Done(String), +} + +impl ResolvedItems { + fn start_resolving(&mut self, section: &str, key: &str) -> anyhow::Result<()> { + let section_values = match self.0.get_mut(section) { + Some(v) => v, + None => { + self.0.insert(section.to_owned(), BTreeMap::new()); + self.0.get_mut(section).unwrap() + } + }; + + if section_values + .insert(key.to_owned(), ResolveState::Resolving) + .is_some() + { + return Err(anyhow::anyhow!(self.cycle_error(section, key))); + } + + self.1.push((section.to_owned(), key.to_owned())); + + Ok(()) + } + + fn finish_resolving(&mut self, section: &str, key: &str, value: String) { + let entry = self.0.get_mut(section).unwrap().get_mut(key).unwrap(); + *entry = ResolveState::Done(value); + self.1.pop(); + } + + fn cycle_error(&self, section: &str, key: &str) -> ConfigError { + let mut iter = self.1.iter(); + for v in &mut iter { + if v.0 == section && v.1 == key { + break; + } + } + + let mut cycle = vec![(section.to_owned(), key.to_owned())]; + cycle.extend(iter.cloned()); + cycle.push((section.to_owned(), key.to_owned())); + + ConfigError::ReferenceCycle(cycle) + } + + fn get(&self, section: &str, key: &str) -> Option<&String> { + self.0 + .get(section) + .and_then(|e| e.get(key)) + .and_then(|e| match e { + ResolveState::Resolving => None, + ResolveState::Done(v) => Some(v), + }) + } + + fn drain_to(self, value: &mut BTreeMap) -> anyhow::Result<()> { + assert!(self.1.is_empty(), "All values should have been resolved."); + for (section, items) in self.0.into_iter() { + let result_section = value.get_mut(§ion).unwrap_or_else( + || panic!("Shouldn't have a resolved value for something that doesn't appear in the base config")); + for (key, value) in items.into_iter() { + match value { + ResolveState::Resolving => { + unreachable!("All values should have been resolved."); + } + ResolveState::Done(v) => { + result_section.values.get_mut(&key).unwrap().resolved_value = + ResolvedValue::Resolved(v); + } + } + } + } + + Ok(()) + } +} + +pub struct ConfigResolver { + values: BTreeMap, +} + +impl ConfigResolver { + pub fn resolve( + values: BTreeMap, + ) -> anyhow::Result> { + let mut resolver = Self { values }; + resolver.resolve_all()?; + Ok(SortedMap::from_iter( + resolver.values.into_iter().map(|(k, v)| (k, v.finish())), + )) + } + + fn resolve_all(&mut self) -> anyhow::Result<()> { + // First, identify all the values that need to be resolved and mark all the others as literals. + let mut to_resolve = Vec::new(); + for (section_name, section) in &mut self.values { + for (key, value) in &mut section.values { + // if it's been resolved already, move the resolved value into values. + if Self::regex().is_match(value.raw_value()) { + to_resolve.push((section_name.to_owned(), key.to_owned())); + } else { + value.resolved_value = ResolvedValue::Literal; + } + } + } + + // Now, resolve all the items. + for (section, key) in to_resolve { + let mut resolved_items = ResolvedItems(BTreeMap::new(), Vec::new()); + self.resolve_item(&mut resolved_items, §ion, &key)?; + resolved_items.drain_to(&mut self.values)?; + } + Ok(()) + } + + fn regex() -> &'static Regex { + static RE: Lazy = Lazy::new(|| Regex::new(r"\$\(config ([^)]*)\)").unwrap()); + &RE + } + + fn resolve_item<'a>( + &'a self, + resolved_items: &'a mut ResolvedItems, + section: &str, + key: &str, + ) -> anyhow::Result<&'a str> { + let raw_value = match self.values.get(section).and_then(|e| e.values.get(key)) { + None => return Ok(""), + Some(v) => match &v.resolved_value { + ResolvedValue::Unknown => v.raw_value(), + ResolvedValue::Literal => { + return Ok(v.raw_value()); + } + ResolvedValue::Resolved(v) => { + return Ok(v); + } + }, + }; + + if resolved_items.get(section, key).is_none() { + resolved_items.start_resolving(section, key)?; + let v = self.do_resolve(resolved_items, raw_value)?; + resolved_items.finish_resolving(section, key, v); + } + + Ok(resolved_items.get(section, key).unwrap()) + } + + fn do_resolve( + &self, + resolved_items: &mut ResolvedItems, + raw_value: &str, + ) -> anyhow::Result { + let mut resolved = String::new(); + let mut last = 0; + + let re = Self::regex(); + + // TODO(cjhopman): Should add support for escaping the call, I guess. + for capture in re.captures_iter(raw_value) { + let m = capture.get(0).unwrap(); + + resolved.push_str(&raw_value[last..m.start()]); + last = m.end(); + + let captures = re.captures(m.as_str()).unwrap(); + + let config_key = captures.get(1).unwrap().as_str(); + + let config_section_and_key = parse_config_section_and_key(config_key, None)?; + + resolved.push_str(self.resolve_item( + resolved_items, + &config_section_and_key.section, + &config_section_and_key.key, + )?); + } + + resolved.push_str(&raw_value[last..]); + Ok(resolved) + } +} diff --git a/app/buck2_common/src/legacy_configs/path.rs b/app/buck2_common/src/legacy_configs/path.rs index cca9088f68eaf..8bad51b16fc76 100644 --- a/app/buck2_common/src/legacy_configs/path.rs +++ b/app/buck2_common/src/legacy_configs/path.rs @@ -7,13 +7,7 @@ * of this source tree. */ -pub(crate) enum BuckConfigFile { - // Buckconfig file in the cell relative to project root, such as .buckconfig or .buckconfig.local - ProjectRelativeFile(&'static str), - - // Buckconfig folder in the cell, assuming all files in this folder are buckconfig - ProjectRelativeFolder(&'static str), - +pub(crate) enum ExternalConfigSource { // Buckconfig file in the user's home directory UserFile(&'static str), @@ -27,40 +21,34 @@ pub(crate) enum BuckConfigFile { GlobalFolder(&'static str), } -impl BuckConfigFile { - /// Returns whether this specific BuckConfigFile is external to the current project. - pub fn is_external(&self) -> bool { - match self { - Self::ProjectRelativeFile(..) | Self::ProjectRelativeFolder(..) => false, - Self::UserFile(..) - | Self::UserFolder(..) - | Self::GlobalFile(..) - | Self::GlobalFolder(..) => true, - } - } +pub(crate) enum ProjectConfigSource { + // Buckconfig file in the cell relative to project root, such as .buckconfig or .buckconfig.local + CellRelativeFile(&'static str), + + // Buckconfig folder in the cell, assuming all files in this folder are buckconfig + CellRelativeFolder(&'static str), } -/// The override order of buck config, from highest priority to lowest -/// 1. .buckconfig.local in repo -/// 2. .buckconfig in repo -/// 3. files in .buckconfig.d folder in repo -/// 4. .buckconfig.local in user's home directory -/// 5. files in .buckconfig.d folder in user's home directory -/// 6. global file /etc/buckconfig -/// 7. files in global directory /etc/buckconfig.d -pub(crate) static DEFAULT_BUCK_CONFIG_FILES: &[BuckConfigFile] = &[ +/// The default places from which buckconfigs are sourced. +/// +/// Later entries take precedence over earlier ones, and project configs take precedence over +/// external configs. +pub(crate) static DEFAULT_EXTERNAL_CONFIG_SOURCES: &[ExternalConfigSource] = &[ #[cfg(not(windows))] - BuckConfigFile::GlobalFolder("/etc/buckconfig.d"), + ExternalConfigSource::GlobalFolder("/etc/buckconfig.d"), #[cfg(not(windows))] - BuckConfigFile::GlobalFile("/etc/buckconfig"), + ExternalConfigSource::GlobalFile("/etc/buckconfig"), // TODO: use %PROGRAMDATA% on Windows #[cfg(windows)] - BuckConfigFile::GlobalFolder("C:\\ProgramData\\buckconfig.d"), + ExternalConfigSource::GlobalFolder("C:\\ProgramData\\buckconfig.d"), #[cfg(windows)] - BuckConfigFile::GlobalFile("C:\\ProgramData\\buckconfig"), - BuckConfigFile::UserFolder(".buckconfig.d"), - BuckConfigFile::UserFile(".buckconfig.local"), - BuckConfigFile::ProjectRelativeFolder(".buckconfig.d"), - BuckConfigFile::ProjectRelativeFile(".buckconfig"), - BuckConfigFile::ProjectRelativeFile(".buckconfig.local"), + ExternalConfigSource::GlobalFile("C:\\ProgramData\\buckconfig"), + ExternalConfigSource::UserFolder(".buckconfig.d"), + ExternalConfigSource::UserFile(".buckconfig.local"), +]; + +pub(crate) static DEFAULT_PROJECT_CONFIG_SOURCES: &[ProjectConfigSource] = &[ + ProjectConfigSource::CellRelativeFolder(".buckconfig.d"), + ProjectConfigSource::CellRelativeFile(".buckconfig"), + ProjectConfigSource::CellRelativeFile(".buckconfig.local"), ]; diff --git a/app/buck2_common/src/legacy_configs/view.rs b/app/buck2_common/src/legacy_configs/view.rs index 0c7309164f2de..51a2be67d86ad 100644 --- a/app/buck2_common/src/legacy_configs/view.rs +++ b/app/buck2_common/src/legacy_configs/view.rs @@ -8,9 +8,11 @@ */ use std::fmt::Debug; +use std::str::FromStr; use std::sync::Arc; -use buck2_core::cells::name::CellName; +use crate::legacy_configs::configs::LegacyBuckConfig; +use crate::legacy_configs::key::BuckconfigKeyRef; /// Buckconfig trait. /// @@ -18,13 +20,19 @@ use buck2_core::cells::name::CellName; /// * simple implementation which is backed by a buckconfig object, used in tests /// * DICE-backed implementation which records a dependency on buckconfig property in DICE pub trait LegacyBuckConfigView: Debug { - fn get(&self, section: &str, key: &str) -> anyhow::Result>>; -} + fn get(&mut self, key: BuckconfigKeyRef) -> anyhow::Result>>; + + fn parse(&mut self, key: BuckconfigKeyRef) -> anyhow::Result> + where + anyhow::Error: From<::Err>, + { + LegacyBuckConfig::parse_value(key, self.get(key)?.as_deref()) + } -/// All cell buckconfigs traits. -pub trait LegacyBuckConfigsView { - fn get<'a>(&'a self, cell_name: CellName) -> anyhow::Result<&'a dyn LegacyBuckConfigView>; - fn iter<'a>( - &'a self, - ) -> Box + 'a>; + fn parse_list(&mut self, key: BuckconfigKeyRef) -> anyhow::Result>> + where + anyhow::Error: From<::Err>, + { + LegacyBuckConfig::parse_list_value(key, self.get(key)?.as_deref()) + } } diff --git a/app/buck2_common/src/lib.rs b/app/buck2_common/src/lib.rs index 17bf394d75f6a..9accfd5f555d9 100644 --- a/app/buck2_common/src/lib.rs +++ b/app/buck2_common/src/lib.rs @@ -7,50 +7,53 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! Common core components of buck2 -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] -#![feature(fs_try_exists)] #![feature(io_error_more)] -#![feature(if_let_guard)] #![feature(is_sorted)] -#![feature(trait_alias)] +#![feature(map_try_insert)] #![feature(never_type)] -#![feature(pattern)] - -#[cfg(test)] -#[macro_use] -extern crate maplit; +#![feature(used_with_arg)] +pub mod argv; pub mod buckd_connection; +pub mod build_count; +pub mod buildfiles; pub mod cas_digest; +pub mod chunk_reader; pub mod client_utils; pub mod convert; pub mod daemon_dir; pub mod dice; -pub mod error_report; pub mod events; +pub mod external_cells; pub mod external_symlink; +pub mod fbinit; pub mod file_ops; pub mod find_buildfile; +pub mod global_cfg_options; pub mod home_buck_tmp; pub mod http; pub mod ignores; +pub mod init; pub mod invocation_paths; +pub mod invocation_paths_result; pub mod invocation_roots; pub mod io; pub mod kill_util; pub mod legacy_configs; pub mod liveliness_observer; pub mod local_resource_state; +pub mod manifold; pub mod memory; pub mod package_boundary; pub mod package_listing; pub mod pattern; -pub mod result; +pub mod scope; pub mod sqlite; +pub mod starlark_profiler; +pub mod systemd; pub mod target_aliases; pub mod temp_path; diff --git a/app/buck2_common/src/liveliness_observer.rs b/app/buck2_common/src/liveliness_observer.rs index 499ece52fd468..7c8f0a4374171 100644 --- a/app/buck2_common/src/liveliness_observer.rs +++ b/app/buck2_common/src/liveliness_observer.rs @@ -8,15 +8,19 @@ */ use std::sync::Arc; +use std::time::Duration; use async_trait::async_trait; use dupe::Dupe; -use thiserror::Error; +use futures::future::FutureExt; +use futures::future::Shared; use tokio::sync::OwnedRwLockWriteGuard; use tokio::sync::RwLock; +use tokio::time::Sleep; -#[derive(Debug, Error, Copy, Clone, Dupe)] +#[derive(Debug, buck2_error::Error, Copy, Clone, Dupe)] #[error("LivelinessObserver reports this session is shutting down")] +#[allow(dead_code)] struct NotAlive; /// A LivelinessObserver can be passed to notify callees that they should stop work and return @@ -37,12 +41,22 @@ pub trait LivelinessObserver: Send + Sync { async fn while_alive(&self); } +pub trait LivelinessObserverSync: LivelinessObserver { + fn is_alive_sync(&self) -> bool; +} + impl dyn LivelinessObserver { pub async fn is_alive(&self) -> bool { futures::poll!(self.while_alive()).is_pending() } } +impl dyn LivelinessObserverSync { + pub async fn is_alive(&self) -> bool { + futures::poll!(self.while_alive()).is_pending() + } +} + /// A LivelinessObserver with an implementation backed by an RW Lock. While the lock is held with /// write access, this LivelinessObserver is alive. /// @@ -67,7 +81,7 @@ pub struct LivelinessGuard { } impl LivelinessGuard { - pub fn create() -> (Arc, LivelinessGuard) { + fn create_impl() -> (Arc, LivelinessGuard) { let manager = Arc::new(LivelinessObserverForGuard::new( LivelinessObserverState::AliveWhenLocked, )); @@ -80,6 +94,16 @@ impl LivelinessGuard { (manager.dupe() as _, LivelinessGuard { guard, manager }) } + pub fn create() -> (Arc, LivelinessGuard) { + let (manager, guard) = LivelinessGuard::create_impl(); + (manager.dupe() as _, guard) + } + + pub fn create_sync() -> (Arc, LivelinessGuard) { + let (manager, guard) = LivelinessGuard::create_impl(); + (manager.dupe() as _, guard) + } + /// Declare that this liveliness manager is no longer alive. Dropping the guard does the same, /// but this allows potentially restoring it later. pub fn cancel(self) -> CancelledLivelinessGuard { @@ -134,6 +158,12 @@ impl LivelinessObserver for LivelinessObserverForGuard { } } +impl LivelinessObserverSync for LivelinessObserverForGuard { + fn is_alive_sync(&self) -> bool { + self.try_read().is_err() + } +} + /// Always alive. pub struct NoopLivelinessObserver; @@ -191,12 +221,31 @@ where } #[async_trait] -impl LivelinessObserver for more_futures::cancellable_future::CancellationObserver { +impl LivelinessObserver for buck2_futures::cancellable_future::CancellationObserver { async fn while_alive(&self) { self.dupe().await } } +pub struct TimeoutLivelinessObserver { + inner: Shared, +} + +impl TimeoutLivelinessObserver { + pub fn new(duration: Duration) -> Self { + Self { + inner: tokio::time::sleep(duration).shared(), + } + } +} + +#[async_trait] +impl LivelinessObserver for TimeoutLivelinessObserver { + async fn while_alive(&self) { + self.inner.clone().await + } +} + #[cfg(test)] mod tests { use super::*; @@ -209,6 +258,14 @@ mod tests { assert!(!manager.is_alive().await); } + #[tokio::test] + async fn test_guard_is_alive_sync() { + let (manager, guard) = LivelinessGuard::create_sync(); + assert!(manager.is_alive_sync()); + drop(guard); + assert!(!manager.is_alive_sync()); + } + #[tokio::test] async fn test_and() { let (manager_a, guard) = LivelinessGuard::create(); @@ -236,4 +293,19 @@ mod tests { restored.forget(); assert!(manager.is_alive().await); } + + #[tokio::test] + async fn test_timeout() { + let obs = TimeoutLivelinessObserver::new(Duration::from_secs(1)); + + // It is alive for a little while. + tokio::time::timeout(Duration::from_millis(100), obs.while_alive()) + .await + .unwrap_err(); + + // It eventually becomes not-alive. + tokio::time::timeout(Duration::from_secs(10), obs.while_alive()) + .await + .unwrap(); + } } diff --git a/app/buck2_client_ctx/src/manifold.rs b/app/buck2_common/src/manifold.rs similarity index 87% rename from app/buck2_client_ctx/src/manifold.rs rename to app/buck2_common/src/manifold.rs index a7020f5ec4ac3..6342b052237ae 100644 --- a/app/buck2_client_ctx/src/manifold.rs +++ b/app/buck2_common/src/manifold.rs @@ -12,17 +12,16 @@ use std::time::Duration; use std::time::SystemTime; use std::time::UNIX_EPOCH; -use buck2_common::http::retries::http_retry; -use buck2_common::http::retries::AsHttpError; -use buck2_common::http::retries::HttpError; -use buck2_common::http::HttpClient; -use buck2_common::http::HttpClientBuilder; +use buck2_http::retries::http_retry; +use buck2_http::retries::AsHttpError; +use buck2_http::retries::HttpError; +use buck2_http::HttpClient; +use buck2_http::HttpClientBuilder; use bytes::Bytes; use dupe::Dupe; use futures::stream::BoxStream; use futures::stream::StreamExt; use hyper::Response; -use thiserror::Error; use tokio::io::AsyncRead; use crate::chunk_reader::ChunkReader; @@ -38,6 +37,17 @@ impl Ttl { duration: Duration::from_secs(ttl), } } + + pub fn from_days(days: u64) -> Self { + let secs = days * 24 * 60 * 60; + Self { + duration: Duration::from_secs(secs), + } + } + + pub fn as_secs(&self) -> u64 { + self.duration.as_secs() + } } impl Default for Ttl { @@ -46,16 +56,16 @@ impl Default for Ttl { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum HttpWriteError { - #[error("Error performing write request")] - Client(#[from] HttpError), + #[error(transparent)] + Client(HttpError), } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum HttpAppendError { - #[error("Error performing append request")] - Client(#[from] HttpError), + #[error(transparent)] + Client(HttpError), } impl AsHttpError for HttpWriteError { @@ -74,7 +84,7 @@ impl AsHttpError for HttpAppendError { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] pub enum UploadError { #[error( "No result code from uploading path `{0}` to Manifold, probably due to signal interrupt" @@ -95,7 +105,7 @@ pub enum UploadError { #[error("File not found")] FileNotFound, #[error(transparent)] - Other(#[from] anyhow::Error), + Other(anyhow::Error), } impl From for UploadError { @@ -129,7 +139,7 @@ impl Bucket { /// Return the place to upload logs, or None to not upload logs at all fn log_upload_url(use_vpnless: bool) -> Option<&'static str> { - #[cfg(any(fbcode_build, cargo_internal_build))] + #[cfg(fbcode_build)] if hostcaps::is_prod() { Some("https://manifold.facebook.net") } else if use_vpnless { @@ -137,10 +147,8 @@ fn log_upload_url(use_vpnless: bool) -> Option<&'static str> { } else { Some("https://manifold.c2p.facebook.net") } - #[cfg(not(any(fbcode_build, cargo_internal_build)))] + #[cfg(not(fbcode_build))] { - #[cfg(fbcode_build)] - compile_error!("this code is not meant to be compiled in fbcode"); let _unused = use_vpnless; None } @@ -152,8 +160,8 @@ pub struct ManifoldClient { } impl ManifoldClient { - pub fn new(allow_vpnless: bool) -> anyhow::Result { - let client = HttpClientBuilder::with_sensible_defaults(allow_vpnless)?.build(); + pub async fn new() -> anyhow::Result { + let client = HttpClientBuilder::internal().await?.build(); let manifold_url = log_upload_url(client.supports_vpnless()).map(|s| s.to_owned()); Ok(Self { @@ -316,3 +324,14 @@ impl<'a> ManifoldChunkedUploader<'a> { self.position } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_days_to_secs() { + assert_eq!(Ttl::from_days(1).duration.as_secs(), 86400); + assert_eq!(Ttl::from_days(3).duration.as_secs(), 86400 * 3); + } +} diff --git a/app/buck2_common/src/package_boundary.rs b/app/buck2_common/src/package_boundary.rs index 1f6ab54e29b88..97d7f1424bb6d 100644 --- a/app/buck2_common/src/package_boundary.rs +++ b/app/buck2_common/src/package_boundary.rs @@ -11,7 +11,6 @@ use std::collections::HashMap; use std::sync::Arc; use allocative::Allocative; -use anyhow::Context as _; use async_trait::async_trait; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_path::CellPathRef; @@ -21,16 +20,15 @@ use buck2_core::cells::paths::CellRelativePathBuf; use buck2_core::fs::paths::file_name::FileNameBuf; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use ref_cast::RefCast; use crate::legacy_configs::dice::HasLegacyConfigs; -use crate::legacy_configs::LegacyBuckConfigs; -use crate::result::SharedResult; +use crate::legacy_configs::key::BuckconfigKeyRef; #[derive(PartialEq, Allocative)] pub struct PackageBoundaryExceptions(HashMap); @@ -96,111 +94,83 @@ impl CellPackageBoundaryExceptions { } } -impl PackageBoundaryExceptions { - fn new(configs: &LegacyBuckConfigs) -> anyhow::Result { - Ok(Self( - configs - .iter() - .filter_map(|(name, cell_configs)| { - cell_configs - .get("project", "package_boundary_exceptions") - .map(|v| { - let e = CellPackageBoundaryExceptions::new(v).with_context( - || format!("Error parsing `project.package_boundary_exceptions` key from cell `{}`", name) - )?; - Ok((name, e)) - }) - }) - .collect::>()?, - )) - } +#[derive(Hash, Eq, PartialEq, Clone, Dupe, Display, Debug, Allocative)] +#[display("{:?}", self)] +struct CellPackageBoundaryExceptionsKey(CellName); - /// Returns the package boundary exception path that covers this path, if it exists - pub fn get_package_boundary_exception_path(&self, path: &CellPath) -> Option { - if let Some(exceptions) = self.0.get(&path.cell()) { - exceptions - .get_package_boundary_exception_path(path.path()) - .map(|p| CellPath::new(path.cell(), p)) +#[async_trait] +impl Key for CellPackageBoundaryExceptionsKey { + type Value = buck2_error::Result>>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let s = ctx + .get_legacy_config_property( + self.0, + BuckconfigKeyRef { + section: "project", + property: "package_boundary_exceptions", + }, + ) + .await?; + if let Some(s) = s { + Ok(Some(Arc::new(CellPackageBoundaryExceptions::new(&s)?))) } else { - None + Ok(None) } } - pub fn contains(&self, path: &CellPath) -> bool { - self.get_package_boundary_exception_path(path).is_some() + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } } } #[async_trait] pub trait HasPackageBoundaryExceptions { - async fn get_package_boundary_exceptions(&self) - -> SharedResult>; - async fn get_package_boundary_exception( - &self, + &mut self, path: CellPathRef<'async_trait>, - ) -> SharedResult; + ) -> buck2_error::Result>>; } #[async_trait] -impl HasPackageBoundaryExceptions for DiceComputations { - async fn get_package_boundary_exceptions( - &self, - ) -> SharedResult> { - #[derive(Hash, Eq, PartialEq, Clone, Dupe, Display, Debug, Allocative)] - #[display(fmt = "{:?}", self)] - struct PackageBoundaryExceptionsKey; - - #[async_trait] - impl Key for PackageBoundaryExceptionsKey { - type Value = SharedResult>; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - Ok(Arc::new(PackageBoundaryExceptions::new( - &ctx.get_legacy_configs().await?, - )?)) - } - - fn validity(x: &Self::Value) -> bool { - x.is_ok() - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } - } - - self.compute(&PackageBoundaryExceptionsKey).await? - } - +impl HasPackageBoundaryExceptions for DiceComputations<'_> { async fn get_package_boundary_exception( - &self, + &mut self, path: CellPathRef<'async_trait>, - ) -> SharedResult { + ) -> buck2_error::Result>> { #[derive(Hash, Eq, PartialEq, Clone, Display, Debug, RefCast, Allocative)] #[repr(transparent)] struct PackageBoundaryExceptionKey(CellPath); #[async_trait] impl Key for PackageBoundaryExceptionKey { - type Value = SharedResult; + type Value = buck2_error::Result>>; async fn compute( &self, ctx: &mut DiceComputations, _cancellations: &CancellationContext, ) -> Self::Value { - Ok(ctx - .get_package_boundary_exceptions() - .await? - .contains(&self.0)) + let Some(exceptions) = ctx + .compute(&CellPackageBoundaryExceptionsKey(self.0.cell())) + .await?? + else { + return Ok(None); + }; + Ok(exceptions + .get_package_boundary_exception_path(self.0.path()) + .map(|p| Arc::new(CellPath::new(self.0.cell(), p)))) } fn validity(x: &Self::Value) -> bool { @@ -222,7 +192,6 @@ impl HasPackageBoundaryExceptions for DiceComputations { #[cfg(test)] mod tests { - use buck2_core::cells::paths::CellRelativePath; use super::*; diff --git a/app/buck2_common/src/package_listing/mod.rs b/app/buck2_common/src/package_listing.rs similarity index 100% rename from app/buck2_common/src/package_listing/mod.rs rename to app/buck2_common/src/package_listing.rs diff --git a/app/buck2_common/src/package_listing/dice.rs b/app/buck2_common/src/package_listing/dice.rs index b0c3cd6b6bde8..45475672a1e35 100644 --- a/app/buck2_common/src/package_listing/dice.rs +++ b/app/buck2_common/src/package_listing/dice.rs @@ -7,7 +7,6 @@ * of this source tree. */ -use std::sync::Arc; use std::time::Duration; use std::time::Instant; @@ -17,47 +16,15 @@ use buck2_core::cells::cell_path::CellPathRef; use buck2_core::package::PackageLabel; use buck2_events::dispatch::async_record_root_spans; use buck2_events::span::SpanId; +use buck2_futures::cancellation::CancellationContext; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use smallvec::SmallVec; -use crate::dice::cells::HasCellResolver; -use crate::dice::file_ops::HasFileOps; use crate::package_listing::interpreter::InterpreterPackageListingResolver; use crate::package_listing::listing::PackageListing; use crate::package_listing::resolver::PackageListingResolver; -use crate::result::SharedResult; -use crate::result::ToUnsharedResultExt; - -#[async_trait] -pub trait HasPackageListingResolver<'c> { - type PL: PackageListingResolver + 'c; - fn get_package_listing_resolver(&'c self) -> Self::PL; - async fn resolve_package_listing( - &self, - package: PackageLabel, - ) -> anyhow::Result; -} - -#[async_trait] -impl<'c> HasPackageListingResolver<'c> for DiceComputations { - type PL = DicePackageListingResolver<'c>; - fn get_package_listing_resolver(&'c self) -> Self::PL { - DicePackageListingResolver(self) - } - - async fn resolve_package_listing( - &self, - package: PackageLabel, - ) -> anyhow::Result { - self.get_package_listing_resolver() - .resolve(package) - .await - .unshared_error() - } -} #[derive( Clone, @@ -78,7 +45,7 @@ pub struct PackageListingKeyActivationData { #[async_trait] impl Key for PackageListingKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, @@ -86,11 +53,8 @@ impl Key for PackageListingKey { ) -> Self::Value { let now = Instant::now(); - let cell_resolver = ctx.get_cell_resolver().await?; - let file_ops = ctx.file_ops(); let (result, spans) = async_record_root_spans( - InterpreterPackageListingResolver::new(cell_resolver, Arc::new(file_ops)) - .resolve(self.0.dupe()), + InterpreterPackageListingResolver::new(ctx).resolve(self.0.dupe()), ) .await; @@ -110,35 +74,39 @@ impl Key for PackageListingKey { } } -#[derive(Clone, Dupe)] -pub struct DicePackageListingResolver<'compute>(&'compute DiceComputations); +pub struct DicePackageListingResolver<'compute, 'dice>(pub &'compute mut DiceComputations<'dice>); #[async_trait] -impl<'c> PackageListingResolver for DicePackageListingResolver<'c> { - async fn resolve(&self, package: PackageLabel) -> SharedResult { - self.0.compute(&PackageListingKey(package.dupe())).await? +impl<'c, 'd> PackageListingResolver for DicePackageListingResolver<'c, 'd> { + async fn resolve(&mut self, package: PackageLabel) -> buck2_error::Result { + self.0.compute(&PackageListingKey(package)).await? } async fn get_enclosing_package( - &self, + &mut self, path: CellPathRef<'async_trait>, ) -> anyhow::Result { - let cell_resolver = self.0.get_cell_resolver().await?; - let file_ops = self.0.file_ops(); - InterpreterPackageListingResolver::new(cell_resolver, Arc::new(file_ops)) + InterpreterPackageListingResolver::new(self.0) .get_enclosing_package(path) .await } async fn get_enclosing_packages( - &self, + &mut self, path: CellPathRef<'async_trait>, enclosing_violation_path: CellPathRef<'async_trait>, ) -> anyhow::Result> { - let cell_resolver = self.0.get_cell_resolver().await?; - let file_ops = self.0.file_ops(); - InterpreterPackageListingResolver::new(cell_resolver, Arc::new(file_ops)) + InterpreterPackageListingResolver::new(self.0) .get_enclosing_packages(path, enclosing_violation_path) .await } } + +impl DicePackageListingResolver<'_, '_> { + pub async fn resolve_package_listing( + &mut self, + package: PackageLabel, + ) -> anyhow::Result { + self.resolve(package).await.map_err(anyhow::Error::from) + } +} diff --git a/app/buck2_common/src/package_listing/file_listing.rs b/app/buck2_common/src/package_listing/file_listing.rs index 3d6218fae3131..f7dbdc5f8fc52 100644 --- a/app/buck2_common/src/package_listing/file_listing.rs +++ b/app/buck2_common/src/package_listing/file_listing.rs @@ -91,7 +91,6 @@ pub mod testing { use crate::package_listing::file_listing::PackageFileListing; - #[allow(clippy::from_iter_instead_of_collect)] impl PackageFileListing { pub fn testing_new(files: &[&str]) -> PackageFileListing { let files = files @@ -105,7 +104,7 @@ pub mod testing { } #[cfg(test)] -mod test { +mod tests { use super::*; #[test] diff --git a/app/buck2_common/src/package_listing/interpreter.rs b/app/buck2_common/src/package_listing/interpreter.rs index eacb582a38514..1fb50022ba4d7 100644 --- a/app/buck2_common/src/package_listing/interpreter.rs +++ b/app/buck2_common/src/package_listing/interpreter.rs @@ -7,59 +7,54 @@ * of this source tree. */ -use std::sync::Arc; - -use anyhow::Context; use async_trait::async_trait; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_path::CellPathRef; -use buck2_core::cells::CellResolver; +use buck2_core::cells::paths::CellRelativePath; use buck2_core::fs::paths::file_name::FileNameBuf; use buck2_core::package::package_relative_path::PackageRelativePath; +use buck2_core::package::package_relative_path::PackageRelativePathBuf; use buck2_core::package::PackageLabel; use buck2_util::arc_str::ArcS; +use dice::DiceComputations; use dupe::Dupe; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use futures::future::BoxFuture; +use futures::FutureExt; +use itertools::Itertools; use starlark_map::sorted_set::SortedSet; use starlark_map::sorted_vec::SortedVec; -use thiserror::Error; -use crate::file_ops::FileOps; -use crate::file_ops::SimpleDirEntry; +use crate::dice::file_ops::DiceFileComputations; use crate::find_buildfile::find_buildfile; +use crate::ignores::file_ignores::FileIgnoreReason; +use crate::io::ReadDirError; use crate::package_listing::listing::PackageListing; use crate::package_listing::resolver::PackageListingResolver; -use crate::result::SharedResult; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum PackageListingError { - #[error("Expected `{0}` to be a package directory, but there was no buildfile there, expected one of `{}`", .1.join("`, `"))] - NoBuildFile(CellPath, Vec), #[error("Expected `{0}` to be within a package directory, but there was no buildfile in any parent directories. Expected one of `{}`", .1.join("`, `"))] NoContainingPackage(CellPath, Vec), } #[async_trait] -impl<'c> PackageListingResolver for InterpreterPackageListingResolver<'c> { - async fn resolve(&self, package: PackageLabel) -> SharedResult { - Ok(self - .gather_package_listing(package.dupe()) - .await - .context(buck2_data::ErrorCause::InvalidPackage) - .with_context(|| format!("Error gathering package listing for `{}`", package))?) +impl PackageListingResolver for InterpreterPackageListingResolver<'_, '_> { + async fn resolve(&mut self, package: PackageLabel) -> buck2_error::Result { + Ok(self.gather_package_listing(package.dupe()).await?) } async fn get_enclosing_package( - &self, + &mut self, path: CellPathRef<'async_trait>, ) -> anyhow::Result { - let cell_instance = self.cell_resolver.get(path.cell())?; - let buildfile_candidates = cell_instance.buildfiles(); + let buildfile_candidates = + DiceFileComputations::buildfiles(&mut self.ctx, path.cell()).await?; if let Some(path) = path.parent() { for path in path.ancestors() { - let listing = self.fs.read_dir(path.dupe()).await?.included; - if find_buildfile(buildfile_candidates, &listing).is_some() { + let listing = DiceFileComputations::read_dir(self.ctx, path) + .await? + .included; + if find_buildfile(&buildfile_candidates, &listing).is_some() { return Ok(PackageLabel::from_cell_path(path)); } } @@ -72,12 +67,12 @@ impl<'c> PackageListingResolver for InterpreterPackageListingResolver<'c> { } async fn get_enclosing_packages( - &self, + &mut self, path: CellPathRef<'async_trait>, enclosing_path: CellPathRef<'async_trait>, ) -> anyhow::Result> { - let cell_instance = self.cell_resolver.get(path.cell())?; - let buildfile_candidates = cell_instance.buildfiles(); + let buildfile_candidates = + DiceFileComputations::buildfiles(&mut self.ctx, path.cell()).await?; if let Some(path) = path.parent() { let mut packages = Vec::new(); for path in path.ancestors() { @@ -85,8 +80,10 @@ impl<'c> PackageListingResolver for InterpreterPackageListingResolver<'c> { // stop when we are no longer within the enclosing path break; } - let listing = self.fs.read_dir(path.dupe()).await?.included; - if find_buildfile(buildfile_candidates, &listing).is_some() { + let listing = DiceFileComputations::read_dir(self.ctx, path.dupe()) + .await? + .included; + if find_buildfile(&buildfile_candidates, &listing).is_some() { packages.push(PackageLabel::from_cell_path(path)); } } @@ -101,99 +98,415 @@ impl<'c> PackageListingResolver for InterpreterPackageListingResolver<'c> { } } -pub struct InterpreterPackageListingResolver<'c> { - cell_resolver: CellResolver, - fs: Arc, +pub struct InterpreterPackageListingResolver<'c, 'd> { + ctx: &'c mut DiceComputations<'d>, +} + +#[derive(Debug, buck2_error::Error)] +pub enum GatherPackageListingError { + #[buck2(input)] + NoBuildFile { + package: CellPath, + candidates: Vec, + }, + #[buck2(input)] + DirectoryDoesNotExist { + package: CellPath, + expected_path: CellPath, + // TODO(cjhopman): would be nice to get the absolute path here + }, + #[buck2(input)] + DirectoryIsIgnored { + package: CellPath, + path: CellPath, + ignore_reason: FileIgnoreReason, + }, + #[buck2(input)] + NotADirectory { + package: CellPath, + path: CellPath, + node_type: String, + }, + Anyhow { + package: CellPath, + #[source] + error: anyhow::Error, + }, +} + +impl GatherPackageListingError { + fn anyhow>( + package_path: CellPathRef<'_>, + err: E, + ) -> GatherPackageListingError { + GatherPackageListingError::Anyhow { + package: package_path.to_owned(), + error: err.into(), + } + } + + fn from_read_dir( + package_path: CellPathRef<'_>, + err: ReadDirError, + ) -> GatherPackageListingError { + match err { + ReadDirError::DirectoryDoesNotExist(expected_path) => { + GatherPackageListingError::DirectoryDoesNotExist { + package: package_path.to_owned(), + expected_path, + } + } + ReadDirError::DirectoryIsIgnored(path, ignore_reason) => { + GatherPackageListingError::DirectoryIsIgnored { + package: package_path.to_owned(), + path, + ignore_reason, + } + } + ReadDirError::NotADirectory(path, node_type) => { + GatherPackageListingError::NotADirectory { + package: package_path.to_owned(), + path, + node_type, + } + } + ReadDirError::Anyhow(e) => GatherPackageListingError::Anyhow { + package: package_path.to_owned(), + error: e.into(), + }, + } + } + + fn no_build_file( + package_path: CellPathRef<'_>, + candidates: Vec, + ) -> GatherPackageListingError { + GatherPackageListingError::NoBuildFile { + package: package_path.to_owned(), + candidates, + } + } +} + +impl std::fmt::Display for GatherPackageListingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + /* + package `fbsource//foo/target/x/y/lmnop:` does not exist + ^--------------------^ + dir `fbsource//foo/target/x` does not exist + + package `fbsource//foo/target/x/y/lmnop:` does not exist + ^--------------------^ + dir `fbsource//foo/target/x` is ignored (config project.ignore contains `foo/target/ **`) + + package `fbsource//fbcode/target/x/y/lmnop:` does not exist + ^--------------^ + this package is using the wrong cell, use `fbcode//target/x/y/lmnop:` instead + + + package `fbsource//foo/target/x/y/lmnop:` does not exist + ^--------------------^ + path `fbsource//foo/target/x` is a file, not a directory + + package `fbsource//foo/target/x/y/lmnop:` does not exist + missing `TARGETS` file (also missing alternatives `TARGETS.v2`, `BUCK`, `BUCK.v2`) + + error loading package `fbsource//foo/target/x/y/lmnop:` + ... # just display the anyhow error for now + */ + + let prefix = "package `"; + let underlined = |path_as_string: &str| { + format!( + "{}^{}^", + " ".repeat(prefix.len()), + "-".repeat(path_as_string.len().saturating_sub(2)) + ) + }; + + let (package, submessage) = match self { + GatherPackageListingError::Anyhow { package, .. } => { + // in this case we return the anyhow as our source and we're just displayed as context + write!(f, "gathering package listing for `{}`", &package)?; + return Ok(()); + } + GatherPackageListingError::NoBuildFile { + candidates, + package, + } => { + if let Some(primary_candidate) = + candidates.iter().find(|v| v.extension() != Some("v2")) + { + ( + package, + format!( + " missing `{}` file (also missing alternatives {})", + primary_candidate, + candidates.iter().map(|v| format!("`{}`", v)).join(", ") + ), + ) + } else { + unreachable!() + } + } + GatherPackageListingError::DirectoryDoesNotExist { + package, + expected_path, + } => { + let path_as_str = expected_path.to_string(); + ( + package, + format!( + "{}\n dir `{}` does not exist", + underlined(&path_as_str), + path_as_str, + ), + ) + } + GatherPackageListingError::NotADirectory { + package, + path, + node_type, + } => { + let path_as_str = path.to_string(); + ( + package, + format!( + "{}\n path `{}` is a {}, not a directory", + underlined(&path_as_str), + path_as_str, + node_type + ), + ) + } + GatherPackageListingError::DirectoryIsIgnored { + package, + path, + ignore_reason: FileIgnoreReason::IgnoredByPattern { pattern, .. }, + } => { + let path_as_str = path.to_string(); + ( + package, + format!( + "{}\n dir `{}` does not exist (project.ignore contains `{}`)", + underlined(&path_as_str), + path_as_str, + &pattern + ), + ) + } + GatherPackageListingError::DirectoryIsIgnored { + package, + path, + ignore_reason: FileIgnoreReason::IgnoredByCell { cell_name, .. }, + } => { + let path_as_str = path.to_string(); + let corrected = { + match package.strip_prefix(path.as_ref()) { + Ok(fixed) => { + CellPath::new(*cell_name, CellRelativePath::new(fixed).to_owned()) + .to_string() + } + _ => format!("{}//", cell_name), + } + }; + ( + package, + format!( + "{}\n this package is using the wrong cell, use `{}` instead", + underlined(&path_as_str), + corrected, + ), + ) + } + }; + + writeln!(f, "{}{}:` does not exist", prefix, package)?; + f.write_str(&submessage)?; + Ok(()) + } } -impl<'c> InterpreterPackageListingResolver<'c> { - pub fn new(cell_resolver: CellResolver, fs: Arc) -> Self { - Self { cell_resolver, fs } +impl<'c, 'd> InterpreterPackageListingResolver<'c, 'd> { + pub fn new(ctx: &'c mut DiceComputations<'d>) -> Self { + Self { ctx } } pub async fn gather_package_listing<'a>( - &'a self, + &mut self, root: PackageLabel, - ) -> anyhow::Result { - let cell_instance = self.cell_resolver.get(root.cell_name())?; - let buildfile_candidates = cell_instance.buildfiles(); + ) -> Result { + gather_package_listing_impl(self.ctx, root).await + } +} + +struct Directory { + path: ArcS, + files: Vec>, + subdirs: Vec, + subpackages: Vec>, + buildfile: Option, - let mut files: Vec> = Vec::new(); - let mut dirs: Vec> = Vec::new(); - let mut subpackages: Vec> = Vec::new(); + recursive_files_count: usize, + recursive_dirs_count: usize, + recursive_subpackages_count: usize, +} - let root_entries = self - .fs - .read_dir(root.as_cell_path()) +impl Directory { + // Ok(None) indicates that the path is a subpackage + async fn gather( + ctx: &mut DiceComputations<'_>, + buildfile_candidates: &[FileNameBuf], + root: CellPathRef<'_>, + path: &PackageRelativePath, + is_root: bool, + ) -> Result, GatherPackageListingError> { + let cell_path = root.join(path.as_forward_rel_path()); + let entries = DiceFileComputations::read_dir_ext(ctx, cell_path.as_ref()) .await - .context(buck2_data::ErrorCategory::User)? + .map_err(|e| GatherPackageListingError::from_read_dir(cell_path.as_ref(), e))? .included; - let buildfile = find_buildfile(buildfile_candidates, &root_entries) - .ok_or_else(|| { - PackageListingError::NoBuildFile( - root.as_cell_path().to_owned(), + let buildfile = find_buildfile(buildfile_candidates, &entries); + + match (is_root, buildfile) { + (true, None) => { + return Err(GatherPackageListingError::no_build_file( + cell_path.as_ref(), buildfile_candidates.to_vec(), - ) - }) - .context(buck2_data::ErrorCategory::User)?; - - let mut work = FuturesUnordered::new(); - - let root = &root; - let process_entries = |work: &mut FuturesUnordered<_>, - files: &mut Vec>, - path: &PackageRelativePath, - entries: &[SimpleDirEntry]| - -> anyhow::Result<()> { - for d in entries { - let child_path = path.join(&d.file_name).to_arc(); - if d.file_type.is_dir() { - work.push(async move { - let entries = self - .fs - .read_dir( - root.as_cell_path() - .join(child_path.as_forward_rel_path()) - .as_ref(), - ) - .await; - (child_path, entries) - }); - } else { - files.push(child_path); - } + )); } - Ok(()) - }; + (false, Some(_)) => { + return Ok(None); + } + _ => {} + } - process_entries( - &mut work, - &mut files, - PackageRelativePath::empty(), - &root_entries, - )?; - - while let Some((path, entries_result)) = work.next().await { - let entries = entries_result?.included; - if find_buildfile(buildfile_candidates, &entries).is_none() { - process_entries(&mut work, &mut files, path.as_ref(), &entries)?; - dirs.push(path); + let mut subdirs = Vec::new(); + let mut files = Vec::new(); + + for d in &*entries { + let child_path = path.join(&d.file_name); + if d.file_type.is_dir() { + subdirs.push(child_path); } else { - subpackages.push(path); + files.push(child_path.to_arc()); + } + } + + let (subdirs, subpackages) = + Self::gather_subdirs(ctx, buildfile_candidates, root, subdirs).await?; + + let mut recursive_files_count = files.len(); + let mut recursive_dirs_count = subdirs.len(); + let mut recursive_subpackages_count = subpackages.len(); + for d in &subdirs { + recursive_files_count += d.recursive_files_count; + recursive_dirs_count += d.recursive_dirs_count; + recursive_subpackages_count += d.recursive_subpackages_count; + } + + Ok(Some(Directory { + path: path.to_arc(), + files, + subdirs, + subpackages, + buildfile: buildfile.map(|v| v.to_owned()), + recursive_files_count, + recursive_dirs_count, + recursive_subpackages_count, + })) + } + + fn gather_subdirs<'a, 'd>( + ctx: &'a mut DiceComputations<'d>, + buildfile_candidates: &'a [FileNameBuf], + root: CellPathRef<'a>, + subdirs: Vec, + ) -> BoxFuture< + 'a, + Result<(Vec, Vec>), GatherPackageListingError>, + > { + async move { + let mut new_subdirs = Vec::new(); + let mut subpackages = Vec::new(); + + for res in ctx + .compute_join(subdirs, |ctx: &mut DiceComputations, path| { + async move { + let res = Directory::gather(ctx, buildfile_candidates, root, &path, false) + .await?; + Ok((path, res)) + } + .boxed() + }) + .await + { + let (path, res) = res?; + match res { + Some(v) => new_subdirs.push(v), + None => subpackages.push(path.to_arc()), + } } + Ok((new_subdirs, subpackages)) } + .boxed() + } + + fn collect_into( + self, + files: &mut Vec>, + dirs: &mut Vec>, + pkgs: &mut Vec>, + ) { + files.extend(self.files); + pkgs.extend(self.subpackages); + if !self.path.is_empty() { + dirs.push(self.path); + } + for d in self.subdirs { + d.collect_into(files, dirs, pkgs) + } + } - // The files are discovered in a non-deterministic order so we need to fix - // that here. + fn flatten(mut self) -> PackageListing { + let buildfile = self.buildfile.take().unwrap(); + let mut files = Vec::with_capacity(self.recursive_files_count); + let mut dirs = Vec::with_capacity(self.recursive_dirs_count); + let mut subpackages = Vec::with_capacity(self.recursive_subpackages_count); + + self.collect_into(&mut files, &mut dirs, &mut subpackages); + + // The files are discovered in a deterministic order but not necessarily sorted. + // TODO(cjhopman): Do we require that they be sorted for anything? let files = SortedVec::from(files); let dirs = SortedVec::from(dirs); let subpackages = SortedVec::from(subpackages); - Ok(PackageListing::new( + PackageListing::new( SortedSet::from(files), SortedSet::from(dirs), subpackages, - buildfile.to_owned(), - )) + buildfile, + ) } } + +async fn gather_package_listing_impl( + ctx: &mut DiceComputations<'_>, + root: PackageLabel, +) -> Result { + let cell_path = root.as_cell_path(); + let buildfile_candidates = DiceFileComputations::buildfiles(ctx, root.cell_name()) + .await + .map_err(|e| GatherPackageListingError::anyhow(cell_path, e))?; + Ok(Directory::gather( + ctx, + &buildfile_candidates, + cell_path, + PackageRelativePath::empty(), + true, + ) + .await? + .unwrap() + .flatten()) +} diff --git a/app/buck2_common/src/package_listing/listing.rs b/app/buck2_common/src/package_listing/listing.rs index fce73cd6a26b3..29bc1a7e4298c 100644 --- a/app/buck2_common/src/package_listing/listing.rs +++ b/app/buck2_common/src/package_listing/listing.rs @@ -124,7 +124,6 @@ pub mod testing { Self::testing_new(files, "BUCK") } - #[allow(clippy::from_iter_instead_of_collect)] fn testing_new(files: &[&str], buildfile: &str) -> Self { let files = files.iter().map(|f| { PackageRelativePathBuf::try_from((*f).to_owned()) diff --git a/app/buck2_common/src/package_listing/resolver.rs b/app/buck2_common/src/package_listing/resolver.rs index edd152e948c91..42562c9256774 100644 --- a/app/buck2_common/src/package_listing/resolver.rs +++ b/app/buck2_common/src/package_listing/resolver.rs @@ -12,19 +12,18 @@ use buck2_core::cells::cell_path::CellPathRef; use buck2_core::package::PackageLabel; use crate::package_listing::listing::PackageListing; -use crate::result::SharedResult; #[async_trait] pub trait PackageListingResolver: Send + Sync { - async fn resolve(&self, package: PackageLabel) -> SharedResult; + async fn resolve(&mut self, package: PackageLabel) -> buck2_error::Result; async fn get_enclosing_package( - &self, + &mut self, path: CellPathRef<'async_trait>, ) -> anyhow::Result; async fn get_enclosing_packages( - &self, + &mut self, path: CellPathRef<'async_trait>, enclosing_path: CellPathRef<'async_trait>, ) -> anyhow::Result>; diff --git a/app/buck2_common/src/pattern.rs b/app/buck2_common/src/pattern.rs new file mode 100644 index 0000000000000..12282a6840064 --- /dev/null +++ b/app/buck2_common/src/pattern.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod package_roots; +pub mod parse_from_cli; +pub mod resolve; diff --git a/app/buck2_common/src/pattern/mod.rs b/app/buck2_common/src/pattern/mod.rs deleted file mode 100644 index 1218c3a186f34..0000000000000 --- a/app/buck2_common/src/pattern/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod package_roots; -pub mod resolve; diff --git a/app/buck2_common/src/pattern/package_roots.rs b/app/buck2_common/src/pattern/package_roots.rs index d049b4a7f81cb..9df4710d2d0df 100644 --- a/app/buck2_common/src/pattern/package_roots.rs +++ b/app/buck2_common/src/pattern/package_roots.rs @@ -10,9 +10,9 @@ use std::collections::HashSet; use buck2_core::cells::cell_path::CellPath; -use buck2_core::cells::CellResolver; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::package::PackageLabel; +use buck2_futures::drop::DropTogether; +use buck2_futures::spawn::spawn_cancellable; use dice::DiceTransaction; use dupe::Dupe; use futures::channel::mpsc; @@ -21,13 +21,10 @@ use futures::stream::FuturesUnordered; use futures::Stream; use futures::StreamExt; use gazebo::prelude::*; -use more_futures::drop::DropTogether; -use more_futures::spawn::spawn_cancellable; use once_cell::sync::Lazy; use tokio::sync::Semaphore; -use crate::dice::cells::HasCellResolver; -use crate::dice::file_ops::HasFileOps; +use crate::dice::file_ops::DiceFileOps; use crate::file_ops::FileOps; use crate::find_buildfile::find_buildfile; @@ -35,10 +32,10 @@ use crate::find_buildfile::find_buildfile; /// packages recursively contained in the paths (used for resolving patterns /// like `//module/...`). There's no guarantees about the order that results /// are returned, if ordering is important the caller needs to handle it. -pub fn find_package_roots_stream( - ctx: &DiceTransaction, +pub fn find_package_roots_stream<'a>( + ctx: &'a DiceTransaction, paths: Vec, -) -> impl Stream> { +) -> impl Stream> + 'a { // Ideally we wouldn't take a Transaction here, but if we pull things like the package_listing_resolver // out of the ctx, that resolver would have a lifetime bound to the ctx and then we couldn't // do a tokio::spawn. So, we need to only pull those things out within the spawned task. @@ -49,17 +46,19 @@ pub fn find_package_roots_stream( // We don't wait on the task finishing. The packages_rx we return will naturally end when the tx side is dropped. let ctx_data = ctx.per_transaction_data(); - let ctx = ctx.dupe(); + let mut ctx = ctx.dupe(); let spawned = spawn_cancellable( |_cancellations| { async move { - let file_ops = ctx.file_ops(); - let cell_resolver = ctx.get_cell_resolver().await?; // ignore because the errors will be sent back via the stream - let _ignored = collect_package_roots(&file_ops, &cell_resolver, paths, |res| { - packages_tx.unbounded_send(res) - }) - .await; + let _ignored = ctx + .with_linear_recompute(|ctx| async move { + collect_package_roots(&DiceFileOps(&ctx), paths, |res| { + packages_tx.unbounded_send(res) + }) + .await + }) + .await; anyhow::Ok(()) } @@ -74,7 +73,6 @@ pub fn find_package_roots_stream( pub async fn collect_package_roots( file_ops: &dyn FileOps, - cell_resolver: &CellResolver, paths: Vec, mut collector: impl FnMut(anyhow::Result) -> Result<(), E>, ) -> Result<(), E> { @@ -96,7 +94,11 @@ pub async fn collect_package_roots( }; for path in paths { - match file_ops.is_ignored(path.as_ref()).await { + match file_ops + .is_ignored(path.as_ref()) + .await + .map(|v| v.is_ignored()) + { Ok(true) => { // TODO(cjhopman): Ignoring this matches buck1 behavior, but we'd like this to be an error. } @@ -112,21 +114,26 @@ pub async fn collect_package_roots( } while let Some((path, listing)) = queue.next().await { - let (buildfile_candidates, listing) = match cell_resolver - .get(path.cell()) - .and_then(|cell_instance| anyhow::Ok((cell_instance.buildfiles(), listing?.included))) - { - Ok(r) => r, - Err(e) => { - collector(Err(e.context(format!( - "Error resolving recursive spec `{}/...`", - path - ))))?; - continue; + let (buildfile_candidates, listing) = { + let r = async { + let buildfiles = file_ops.buildfiles(path.cell()).await?; + anyhow::Ok((buildfiles, listing?.included)) + } + .await; + + match r { + Ok(r) => r, + Err(e) => { + collector(Err(e.context(format!( + "Error resolving recursive spec `{}/...`", + path + ))))?; + continue; + } } }; - if find_buildfile(buildfile_candidates, &listing).is_some() { + if find_buildfile(&buildfile_candidates, &listing).is_some() { collector(Ok(PackageLabel::from_cell_path(path.as_ref())))?; } @@ -135,7 +142,7 @@ pub async fn collect_package_roots( // (due to having some huge things in an `apps/` dir). for entry in listing.iter().rev() { if entry.file_type.is_dir() { - let child = path.join(ForwardRelativePath::unchecked_new(&entry.file_name)); + let child = path.join(&entry.file_name); if seen.insert(child.clone()) { queue.push(list_dir(child)); } @@ -152,10 +159,9 @@ pub async fn collect_package_roots( pub(crate) async fn find_package_roots( cell_path: CellPath, fs: &dyn FileOps, - cells: &CellResolver, ) -> anyhow::Result> { let mut results = Vec::new(); - collect_package_roots(fs, cells, vec![cell_path], |res| { + collect_package_roots(fs, vec![cell_path], |res| { results.push(res); Result::<_, !>::Ok(()) }) diff --git a/app/buck2_common/src/pattern/parse_from_cli.rs b/app/buck2_common/src/pattern/parse_from_cli.rs new file mode 100644 index 0000000000000..9a03ba1a1cedc --- /dev/null +++ b/app/buck2_common/src/pattern/parse_from_cli.rs @@ -0,0 +1,94 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::cells::cell_path::CellPath; +use buck2_core::cells::CellAliasResolver; +use buck2_core::cells::CellResolver; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::pattern::pattern::ParsedPattern; +use buck2_core::pattern::pattern_type::PatternType; +use buck2_core::pattern::unparsed::UnparsedPatterns; +use dice::DiceComputations; +use gazebo::prelude::*; + +use crate::dice::cells::HasCellResolver; +use crate::pattern::resolve::ResolveTargetPatterns; +use crate::pattern::resolve::ResolvedPattern; +use crate::target_aliases::BuckConfigTargetAliasResolver; +use crate::target_aliases::HasTargetAliasResolver; + +struct PatternParser { + cell_resolver: CellResolver, + cell_alias_resolver: CellAliasResolver, + cwd: CellPath, + target_alias_resolver: BuckConfigTargetAliasResolver, +} + +impl PatternParser { + async fn new( + ctx: &mut DiceComputations<'_>, + cwd: &ProjectRelativePath, + ) -> anyhow::Result { + let cell_resolver = ctx.get_cell_resolver().await?; + + let cwd = cell_resolver.get_cell_path(&cwd)?; + let cell_name = cwd.cell(); + + let target_alias_resolver = ctx.target_alias_resolver().await?; + let cell_alias_resolver = ctx.get_cell_alias_resolver(cell_name).await?; + + Ok(Self { + cell_resolver, + cell_alias_resolver, + cwd, + target_alias_resolver, + }) + } + + fn parse_pattern(&self, pattern: &str) -> anyhow::Result> { + ParsedPattern::parse_relaxed( + &self.target_alias_resolver, + self.cwd.as_ref(), + pattern, + &self.cell_resolver, + &self.cell_alias_resolver, + ) + } +} + +/// Parse target patterns out of command line arguments. +/// +/// The format allowed here is more relaxed than in build files and elsewhere, so only use this +/// with strings passed by the user on the CLI. +/// See `ParsedPattern::parse_relaxed` for details. +pub async fn parse_patterns_from_cli_args( + ctx: &mut DiceComputations<'_>, + target_patterns: &[String], + cwd: &ProjectRelativePath, +) -> anyhow::Result>> { + let parser = PatternParser::new(ctx, cwd).await?; + + target_patterns.try_map(|value| parser.parse_pattern(&value)) +} + +pub async fn parse_patterns_from_cli_args_typed( + ctx: &mut DiceComputations<'_>, + patterns: &UnparsedPatterns, +) -> anyhow::Result>> { + parse_patterns_from_cli_args(ctx, patterns.patterns(), patterns.working_dir()).await +} + +pub async fn parse_and_resolve_patterns_from_cli_args( + ctx: &mut DiceComputations<'_>, + target_patterns: &[String], + cwd: &ProjectRelativePath, +) -> anyhow::Result> { + let patterns = parse_patterns_from_cli_args(ctx, target_patterns, cwd).await?; + ResolveTargetPatterns::resolve(ctx, &patterns).await +} diff --git a/app/buck2_common/src/pattern/resolve.rs b/app/buck2_common/src/pattern/resolve.rs index c658daa1298fe..88154c2906f5f 100644 --- a/app/buck2_common/src/pattern/resolve.rs +++ b/app/buck2_common/src/pattern/resolve.rs @@ -8,22 +8,23 @@ */ use anyhow::Context; -use buck2_core::cells::CellResolver; use buck2_core::package::PackageLabel; -use buck2_core::pattern::display_precise_pattern; +use buck2_core::pattern::pattern::display_precise_pattern; +use buck2_core::pattern::pattern::PackageSpec; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; use buck2_core::pattern::pattern_type::PatternType; -use buck2_core::pattern::PackageSpec; -use buck2_core::pattern::ParsedPattern; use buck2_core::target::name::TargetName; +use dice::DiceComputations; use dupe::Dupe; use gazebo::prelude::VecExt; use indexmap::IndexMap; +use crate::dice::file_ops::DiceFileOps; use crate::file_ops::FileOps; use crate::pattern::package_roots::find_package_roots; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ResolvedPatternError { #[error("Expecting {0} pattern, got `{1}`")] InvalidPattern(&'static str, String), @@ -88,9 +89,22 @@ impl ResolvedPattern { } } -/// Resolves a list of [ParsedPattern] to a [ResolvedPattern]. -pub async fn resolve_target_patterns( - cell_resolver: &CellResolver, +pub struct ResolveTargetPatterns; + +impl ResolveTargetPatterns { + /// Resolves a list of [ParsedPattern] to a [ResolvedPattern]. + pub async fn resolve( + ctx: &mut DiceComputations<'_>, + patterns: &[ParsedPattern

], + ) -> anyhow::Result> { + ctx.with_linear_recompute(|ctx| async move { + resolve_target_patterns_impl(patterns, &DiceFileOps(&ctx)).await + }) + .await + } +} + +async fn resolve_target_patterns_impl( patterns: &[ParsedPattern

], file_ops: &dyn FileOps, ) -> anyhow::Result> { @@ -104,7 +118,7 @@ pub async fn resolve_target_patterns( resolved.add_package(package.dupe()); } ParsedPattern::Recursive(cell_path) => { - let roots = find_package_roots(cell_path.clone(), file_ops, cell_resolver) + let roots = find_package_roots(cell_path.clone(), file_ops) .await .context("Error resolving recursive target pattern.")?; for package in roots { @@ -120,22 +134,19 @@ pub async fn resolve_target_patterns( mod tests { use std::collections::BTreeMap; use std::collections::BTreeSet; - use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; - use buck2_core::cells::alias::NonEmptyCellAlias; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; - use buck2_core::cells::CellsAggregator; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::package::PackageLabel; + use buck2_core::pattern::pattern::PackageSpec; + use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::PatternType; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; use buck2_core::pattern::pattern_type::TargetPatternExtra; - use buck2_core::pattern::PackageSpec; - use buck2_core::pattern::ParsedPattern; use buck2_core::provider::label::NonDefaultProvidersName; use buck2_core::provider::label::ProviderName; use buck2_core::provider::label::ProvidersName; @@ -146,7 +157,7 @@ mod tests { use crate::file_ops::testing::TestFileOps; use crate::file_ops::FileOps; - use crate::pattern::resolve::resolve_target_patterns; + use crate::pattern::resolve::resolve_target_patterns_impl; use crate::pattern::resolve::ResolvedPattern; #[derive(Clone)] @@ -158,26 +169,17 @@ mod tests { impl TestPatternResolver { fn new(cells: &[(&str, &str)], files: &[&str]) -> anyhow::Result { let resolver = { - let mut agg = CellsAggregator::new(); - let mut cell_paths = HashMap::new(); - for (name, path) in cells { - cell_paths.insert(*name, *path); - } + let cells: Vec<_> = cells + .iter() + .map(|(name, path)| { + ( + CellName::testing_new(name), + CellRootPathBuf::testing_new(path), + ) + }) + .collect(); - for (_, path) in cells { - for (alias, alias_path) in &cell_paths { - agg.add_cell_entry( - CellRootPathBuf::new(ProjectRelativePathBuf::try_from( - (*path).to_owned(), - )?), - NonEmptyCellAlias::new((*alias).to_owned())?, - CellRootPathBuf::new(ProjectRelativePathBuf::try_from( - (*alias_path).to_owned(), - )?), - )?; - } - } - agg.make_cell_resolver()? + CellResolver::testing_with_names_and_paths(&cells) }; let resolved_files = files @@ -201,11 +203,16 @@ mod tests { T: PatternType, { let patterns: Vec<_> = patterns.map(|p| { - ParsedPattern::::parse_precise(p, CellName::testing_new("root"), &self.resolver) - .unwrap() + ParsedPattern::::parse_precise( + p, + CellName::testing_new("root"), + &self.resolver, + &self.resolver.root_cell_cell_alias_resolver(), + ) + .unwrap() }); - resolve_target_patterns(&self.resolver, &patterns, &*self.file_ops).await + resolve_target_patterns_impl(&patterns, &*self.file_ops).await } } @@ -262,11 +269,8 @@ mod tests { ( PackageLabel::testing_parse("root//some"), PackageSpec::Targets(vec![ - (TargetName::unchecked_new("target"), TargetPatternExtra), - ( - TargetName::unchecked_new("other_target"), - TargetPatternExtra, - ), + (TargetName::testing_new("target"), TargetPatternExtra), + (TargetName::testing_new("other_target"), TargetPatternExtra), ]), ), ( @@ -296,19 +300,21 @@ mod tests { PackageLabel::testing_parse("root//some"), PackageSpec::Targets(vec![ ( - TargetName::unchecked_new("target"), + TargetName::testing_new("target"), ProvidersPatternExtra { providers: ProvidersName::Default, }, ), ( - TargetName::unchecked_new("other_target"), + TargetName::testing_new("other_target"), ProvidersPatternExtra { - providers: ProvidersName::NonDefault(Box::new( - NonDefaultProvidersName::Named(Box::new([ProviderName::new( - "my-label".to_owned(), - ) - .unwrap()])), + providers: ProvidersName::NonDefault(triomphe::Arc::new( + NonDefaultProvidersName::Named( + buck2_util::arc_str::ArcSlice::new([ProviderName::new( + "my-label".to_owned(), + ) + .unwrap()]), + ), )), }, ), diff --git a/app/buck2_common/src/result.rs b/app/buck2_common/src/result.rs deleted file mode 100644 index 666e132cebd3e..0000000000000 --- a/app/buck2_common/src/result.rs +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt::Debug; -use std::fmt::Display; -use std::fmt::Formatter; -use std::sync::Arc; - -use allocative::Allocative; -use dice::DiceError; -use dupe::Dupe; - -/// SharedError is a simple, cloneable Error wrapper. It holds the inner error in an Arc to support Clone. -/// -/// Propagation of errors via `?` convert automatically between error types, but propagating results directly -/// as different error types requires use of `shared_error()` or `unshared_error()`. -/// -/// ``` -/// use buck2_common::result::*; -/// -/// fn foo() -> Result<(), anyhow::Error> { -/// bar()?; -/// bar().unshared_error() -/// } -/// -/// fn bar() -> Result<(), SharedError> { -/// foo()?; -/// io()?; -/// foo().shared_error() -/// } -/// -/// fn io() -> Result<(), std::io::Error> { -/// Ok(()) -/// } -/// -/// ``` -/// -/// Caveat: Each std::error::Error implementation requires its own explicit `impl From for SharedError`, so we add those as needed. -#[derive(Clone, Dupe, Allocative)] -pub struct SharedError(Arc); - -impl SharedError { - pub fn new(e: impl Into) -> SharedError { - SharedError(Arc::new(e.into())) - } - - pub fn inner(&self) -> &anyhow::Error { - &self.0 - } -} - -impl Display for SharedError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - // Normally when displaying an anyhow, we format with {:#}. - // However, because we also give up source(), that can lead - // to O(n^2) output. See the test at the bottom of this file. - Display::fmt(&self.0, f) - } -} - -/// Because `anyhow::Error` overrides `Debug` to display, we override `Debug` too. -impl Debug for SharedError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - Debug::fmt(&self.0, f) - } -} - -impl std::error::Error for SharedError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - self.0.source() - } - - fn cause(&self) -> Option<&dyn std::error::Error> { - #[allow(deprecated)] - self.0.cause() - } -} - -impl From<&SharedError> for SharedError { - fn from(v: &SharedError) -> Self { - v.dupe() - } -} - -impl From for SharedError { - fn from(err: anyhow::Error) -> Self { - SharedError(Arc::new(err)) - } -} - -impl From> for SharedError { - fn from(err: Arc) -> Self { - SharedError(err) - } -} - -impl From for SharedError { - fn from(err: std::io::Error) -> Self { - SharedError(Arc::new(err.into())) - } -} - -impl From for SharedError { - fn from(err: tokio::task::JoinError) -> Self { - SharedError(Arc::new(err.into())) - } -} - -impl From for SharedError { - fn from(err: DiceError) -> Self { - SharedError(Arc::new(err.into())) - } -} - -pub type SharedResult = Result; - -/// shared_error() can convert some non-shared Result to Result. -/// -/// The `?` operator will automatically convert non-shared Error to SharedError, so shared_error() -/// is usually unnecessary except when propagating a Result directly as a return value. -pub trait ToSharedResultExt { - fn shared_error(self) -> SharedResult; -} - -impl ToSharedResultExt for Result -where - SharedError: From, -{ - fn shared_error(self) -> SharedResult { - Ok(self?) - } -} - -/// unshared_error() can be used to convert a SharedResult to a normal anyhow::Result. The inner error will still be shared. -/// -/// The `?` operator will automatically convert SharedError to non-shared Error in the same way, -/// so unshared_error() is only necessary when propagating a SharedResult directly. -pub trait ToUnsharedResultExt { - fn unshared_error(self) -> Result; -} - -impl ToUnsharedResultExt for SharedResult { - fn unshared_error(self) -> Result { - self.map_err(|e| e.into()) - } -} - -/// Like `downcast_ref()`, but if the error is a [`SharedError`], attempt to downcast -/// its `inner()` error instead. -pub fn shared_downcast_ref<'a, E: std::error::Error + Display + Debug + Send + Sync + 'static>( - error: &'a (dyn std::error::Error + 'static), -) -> Option<&'a E> { - match error.downcast_ref::() { - Some(e) => Some(e), - None => match error.downcast_ref::() { - Some(shared_err) => shared_err.inner().downcast_ref::(), - None => None, - }, - } -} - -/// Traverse `anyhow::Error` in `SharedError` recursively until the context `E` is found. -pub fn recursive_shared_downcast_ref(error: &anyhow::Error) -> Option<&E> -where - E: Display + Debug + Send + Sync + 'static, -{ - let mut err = error; - loop { - match err.downcast_ref::() { - Some(e) => return Some(e), - None => match err.downcast_ref::() { - Some(shared_err) => { - err = shared_err.inner(); - } - None => return None, - }, - } - } -} - -pub trait MayProvideAnyhowError { - fn as_anyhow(&self) -> Option<&anyhow::Error>; -} - -impl MayProvideAnyhowError for Result { - fn as_anyhow(&self) -> Option<&anyhow::Error> { - self.as_ref().err().map(|e| e.inner()) - } -} - -impl MayProvideAnyhowError for anyhow::Result { - fn as_anyhow(&self) -> Option<&anyhow::Error> { - self.as_ref().err() - } -} - -impl MayProvideAnyhowError for SharedError { - fn as_anyhow(&self) -> Option<&anyhow::Error> { - Some(self.inner()) - } -} - -impl MayProvideAnyhowError for anyhow::Error { - fn as_anyhow(&self) -> Option<&anyhow::Error> { - Some(self) - } -} - -#[cfg(test)] -mod tests { - - use super::*; - - // Which ~VALUE words are in the string - // Use {||} to make sure we only get real signal - fn parts(x: &str) -> Vec<&str> { - let mut res: Vec<&str> = x - .split_whitespace() - .filter_map(|x| x.strip_prefix('~')) - .collect(); - res.sort_unstable(); - res - } - - #[test] - fn test_shared_error_display() { - // With anyhow/context its easy to either lose the original error (if you forget {:#}), - // and also easy to get O(n^2) instances if you do alternative formatting, AND - // anyhow looks at Error::source (which it does with alternative formatting). - - let x = anyhow::anyhow!(" ~A "); - assert_eq!(parts(&format!("{}", x)), &["A"]); - assert_eq!(parts(&format!("{:#}", x)), &["A"]); - - let x = x.context(" ~B "); - assert_eq!(parts(&format!("{}", x)), &["B"]); - assert_eq!(parts(&format!("{:#}", x)), &["A", "B"]); - - let x: SharedError = x.into(); - assert_eq!(parts(&format!("{}", x)), &["B"]); - assert_eq!(parts(&format!("{:#}", x)), &["A", "B"]); - - let x: anyhow::Error = x.into(); - assert_eq!(parts(&format!("{}", x)), &["B"]); - assert_eq!(parts(&format!("{:#}", x)), &["A", "B"]); - - let x = x.context(" ~C "); - assert_eq!(parts(&format!("{}", x)), &["C"]); - assert_eq!(parts(&format!("{:#}", x)), &["A", "B", "C"]); - } - - #[derive(thiserror::Error, Debug)] - #[error("Test error")] - struct TestError { - source: Option, - } - - #[derive(thiserror::Error, Debug)] - #[error("Inner error")] - struct TestInnerError; - - #[test] - fn test_anyhow_chain_works() { - let err: anyhow::Error = TestError { source: None }.into(); - let err_with_source: anyhow::Error = TestError { - source: Some(TestInnerError {}.into()), - } - .into(); - - let shared_err: anyhow::Error = SharedError::from(err).into(); - let shared_err_with_source: anyhow::Error = SharedError::from(err_with_source).into(); - - let found_err = shared_err - .chain() - .find_map(|e| shared_downcast_ref::(e)); - assert!(found_err.is_some()); - - let found_err = shared_err_with_source - .chain() - .find_map(|e| shared_downcast_ref::(e)); - assert!(found_err.is_some()); - } - - #[test] - fn test_anyhow_recursive_downcast() { - #[derive(thiserror::Error, Debug)] - #[error("Bottom")] - struct ContextBottom; - - #[derive(thiserror::Error, Debug)] - #[error("Middle")] - struct ContextMiddle; - - #[derive(thiserror::Error, Debug)] - #[error("Top")] - struct ContextTop; - - #[derive(thiserror::Error, Debug)] - #[error("None")] - struct ContextNone; - - // Construct an error stack such that - // - // anyhow::Error - ContextTop - // v - // SharedError - // v - // anyhow::Error - ContextMiddle - // v - // SharedError - // v - // SharedError - // v - // anyhow::Error - ContextBottom - let error_stack = anyhow::Error::from(SharedError::new( - anyhow::Error::from(SharedError::new(SharedError::from( - anyhow::anyhow!("bottom").context(ContextBottom {}), - ))) - .context(ContextMiddle {}), - )) - .context(ContextTop {}); - - assert!(recursive_shared_downcast_ref::(&error_stack).is_some()); - assert!(recursive_shared_downcast_ref::(&error_stack).is_some()); - assert!(recursive_shared_downcast_ref::(&error_stack).is_some()); - assert!(recursive_shared_downcast_ref::(&error_stack).is_none()); - } -} diff --git a/app/buck2_common/src/scope.rs b/app/buck2_common/src/scope.rs new file mode 100644 index 0000000000000..bfbcaada9466e --- /dev/null +++ b/app/buck2_common/src/scope.rs @@ -0,0 +1,76 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::future::Future; + +use buck2_events::dispatch::with_dispatcher_async; +use buck2_events::dispatch::EventDispatcher; +use dice::DiceComputations; +use dupe::Dupe; + +use crate::events::HasEvents; + +pub struct Scope<'a, 'x, T> +where + T: Send + 'static, + 'a: 'x, +{ + scope: &'x mut async_scoped::TokioScope<'a, T>, + dispatcher: EventDispatcher, +} + +impl<'a, 'x, T> Scope<'a, 'x, T> +where + T: Send + 'static, + 'a: 'x, +{ + pub fn spawn_cancellable + Send + 'a, Fu: FnOnce() -> T + Send + 'a>( + &mut self, + f: F, + default: Fu, + ) { + self.scope + .spawn_cancellable(with_dispatcher_async(self.dispatcher.dupe(), f), default) + } +} + +/// Wrap `async_scoped::TokioScope::scope_and_collect` propagating the event dispatcher. +pub async unsafe fn scope_and_collect_with_dispatcher<'d, 'a, T, R, F>( + dispatcher: EventDispatcher, + f: F, +) -> ( + R, + Vec<>::FutureOutput>, +) + where + T: Send + 'static, + F: for<'x> FnOnce(&mut Scope<'a, 'x, T>) -> R, +{ + async_scoped::TokioScope::scope_and_collect(|scope| { + let mut scope = Scope { scope, dispatcher }; + f(&mut scope) + }) + .await +} + +/// Wrap `async_scoped::TokioScope::scope_and_collect` propagating the event dispatcher. +pub async unsafe fn scope_and_collect_with_dice<'c, 'd, 'a, T, R, F>( + ctx: &'c mut DiceComputations<'d>, + f: F, +) -> ( + R, + Vec<>::FutureOutput>, +) +where + T: Send + 'static, + F: for<'x> FnOnce(&'c mut DiceComputations<'d>, &mut Scope<'a, 'x, T>) -> R, +{ + let dispatcher = ctx.per_transaction_data().get_dispatcher().dupe(); + scope_and_collect_with_dispatcher(dispatcher, |scope| f(ctx, scope)).await +} diff --git a/app/buck2_common/src/sqlite.rs b/app/buck2_common/src/sqlite.rs index 532c8f8634c66..ce47d04d0884c 100644 --- a/app/buck2_common/src/sqlite.rs +++ b/app/buck2_common/src/sqlite.rs @@ -93,7 +93,6 @@ impl KeyValueSqliteTable { #[cfg(test)] mod tests { - use std::collections::HashMap; use buck2_core::fs::project::ProjectRootTemp; use buck2_core::fs::project_rel_path::ProjectRelativePath; diff --git a/app/buck2_common/src/starlark_profiler.rs b/app/buck2_common/src/starlark_profiler.rs new file mode 100644 index 0000000000000..37ea40785d083 --- /dev/null +++ b/app/buck2_common/src/starlark_profiler.rs @@ -0,0 +1,18 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::any::Any; +use std::fmt::Debug; + +use allocative::Allocative; + +/// `StarlarkProfileDataAndStats`, but without dependency on starlark. +pub trait StarlarkProfileDataAndStatsDyn: Debug + Allocative + Any + Send + Sync + 'static { + fn as_any(&self) -> &dyn Any; +} diff --git a/app/buck2_common/src/systemd.rs b/app/buck2_common/src/systemd.rs new file mode 100644 index 0000000000000..a64241a9aa0f2 --- /dev/null +++ b/app/buck2_common/src/systemd.rs @@ -0,0 +1,242 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::ffi::OsStr; +use std::io::ErrorKind; +use std::num::ParseIntError; +use std::sync::OnceLock; + +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_util::process; + +use crate::init::ResourceControlConfig; +use crate::init::ResourceControlStatus; + +const SYSTEMD_MIN_VERSION: u32 = 253; +static AVAILABILITY: OnceLock> = OnceLock::new(); + +#[derive(Debug, buck2_error::Error)] +enum SystemdNotAvailableReason { + #[error("Unexpected `systemctl --version` output format: {0}")] + UnexpectedVersionOutputFormat(String), + #[error("Failed to parse systemd version number into u32: {:#}", .0)] + VersionNumberParseError(ParseIntError), + #[error("Detected systemd version {detected}. Minimum requirement is {min_required}.")] + TooOldSystemdVersion { detected: u32, min_required: u32 }, + #[error("Systemctl command returned non-zero: {0}")] + SystemctlCommandReturnedNonZero(String), + #[error("Systemctl command failed to launch: {:#}", .0)] + SystemctlCommandLaunchFailed(std::io::Error), + #[error("Systemctl command not found in PATH.")] + SystemctlCommandNotFound, + #[error("Resource control with systemd is only supported on Linux.")] + UnsupportedPlatform, +} + +pub enum SystemdPropertySetType { + Daemon, + Worker, +} + +pub struct SystemdRunner { + fixed_systemd_args: Vec, +} + +impl SystemdRunner { + fn create(property_set_type: SystemdPropertySetType, config: &ResourceControlConfig) -> Self { + // Common settings + let mut args = vec![ + "--user".to_owned(), + "--scope".to_owned(), + "--quiet".to_owned(), + ]; + if let Some(memory_max) = &config.memory_max { + args.push(format!("--property=MemoryMax={}", memory_max.to_owned())); + // Without setting `MemorySwapMax`, the process starts using swap until it's + // filled when the total memory usage reaches to `MemoryMax`. This may seem + // counterintuitive for mostly expected use cases. Setting `MemorySwapMax` + // to zero makes `MemoryMax` to be a 'hard limit' at which the process is + // stopped by OOM killer + args.push("--property=MemorySwapMax=0".to_owned()); + // Set `OOMPolicy=kill` explicitly since otherwise (`OOMPolicy=continue`) + // some workers can keep alive even after buck2 daemon has gone due to OOM. + args.push("--property=OOMPolicy=kill".to_owned()); + } + + // Type-specific settings + match property_set_type { + SystemdPropertySetType::Daemon => { + // Set `--collect` because this is the outermost scope in buck2's context + // and we don't assume the upper-layer unit collects the garbage of this + // scope after being killed. + args.push("--collect".to_owned()); + } + SystemdPropertySetType::Worker => { // TODO + } + } + + Self { + fixed_systemd_args: args, + } + } + + pub fn create_if_enabled( + property_set_type: SystemdPropertySetType, + config: &ResourceControlConfig, + ) -> anyhow::Result> { + match config.status { + ResourceControlStatus::Off => Ok(None), + ResourceControlStatus::IfAvailable | ResourceControlStatus::Required => { + if let Err(e) = is_available() { + if config.status == ResourceControlStatus::Required { + return Err(e.context("Systemd is unavailable but required by buckconfig")); + } + tracing::warn!( + "Systemd is not available on this system. Continuing without resource control: {:#}", + e + ); + Ok(None) + } else { + Ok(Some(Self::create(property_set_type, config))) + } + } + } + } + + /// Creates `std::process::Command` to run `program` under a systemd scope unit. `unit_name` is + /// an arbitrary string that you name the unit so it can be identified by the name later. + pub fn background_command_linux>( + &self, + program: S, + unit_name: String, + working_directory: AbsNormPathBuf, + ) -> std::process::Command { + let mut cmd = process::background_command("systemd-run"); + cmd.args(&self.fixed_systemd_args); + cmd.arg(format!("--working-directory={}", working_directory)) + .arg(format!("--unit={}", unit_name)); + cmd.arg(program); + cmd + } +} + +fn validate_systemd_version(raw_stdout: &[u8]) -> Result<(), SystemdNotAvailableReason> { + let stdout = String::from_utf8_lossy(raw_stdout); + let version = stdout + .split(' ') + .nth(1) + .ok_or_else(|| { + SystemdNotAvailableReason::UnexpectedVersionOutputFormat(stdout.to_string()) + })? + .parse::() + .map_err(SystemdNotAvailableReason::VersionNumberParseError)?; + + if version < SYSTEMD_MIN_VERSION { + Err(SystemdNotAvailableReason::TooOldSystemdVersion { + detected: version, + min_required: SYSTEMD_MIN_VERSION, + }) + } else { + Ok(()) + } +} + +fn is_available() -> anyhow::Result<()> { + if !cfg!(target_os = "linux") { + return Err(SystemdNotAvailableReason::UnsupportedPlatform.into()); + } + + let unavailable_reason = AVAILABILITY.get_or_init(|| -> Option { + match process::background_command("systemctl") + .arg("--version") + .output() + { + Ok(output) => { + if output.status.success() { + match validate_systemd_version(&output.stdout) { + Ok(_) => None, + Err(e) => Some(e), + } + } else { + Some(SystemdNotAvailableReason::SystemctlCommandReturnedNonZero( + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } + Err(e) => match e.kind() { + ErrorKind::NotFound => Some(SystemdNotAvailableReason::SystemctlCommandNotFound), + _ => Some(SystemdNotAvailableReason::SystemctlCommandLaunchFailed(e)), + }, + } + }); + + match unavailable_reason { + None => Ok(()), + Some(r) => Err(r.into()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_validate_systemd_version_normal() { + let raw_output = "systemd 253 (v253.7-1.9.hs+fb.el9)".as_bytes(); + assert!(validate_systemd_version(raw_output).is_ok()); + } + + #[test] + fn test_validate_systemd_version_unexpected_format() { + let raw_output = "abc".as_bytes(); + assert!(matches!( + validate_systemd_version(raw_output).unwrap_err(), + SystemdNotAvailableReason::UnexpectedVersionOutputFormat(..) + )); + } + + #[test] + fn test_validate_systemd_version_empty() { + let raw_output = "".as_bytes(); + assert!(matches!( + validate_systemd_version(raw_output).unwrap_err(), + SystemdNotAvailableReason::UnexpectedVersionOutputFormat(..) + )); + } + + #[test] + fn test_validate_systemd_version_unexpected_version() { + let raw_output = "systemd v253.7-1.9.hs+fb.el9".as_bytes(); + assert!(matches!( + validate_systemd_version(raw_output).unwrap_err(), + SystemdNotAvailableReason::VersionNumberParseError(..) + )); + } + + #[test] + fn test_validate_systemd_version_old_version() { + let raw_output = "systemd 111 (v253.7-1.9.hs+fb.el9)".as_bytes(); + assert!(matches!( + validate_systemd_version(raw_output).unwrap_err(), + SystemdNotAvailableReason::TooOldSystemdVersion { .. } + )); + } + + #[cfg(not(target_os = "linux"))] + #[test] + fn test_always_unavailable_on_nonlinux() { + assert!(matches!( + is_available() + .unwrap_err() + .downcast::() + .unwrap(), + SystemdNotAvailableReason::UnsupportedPlatform + )); + } +} diff --git a/app/buck2_common/src/target_aliases.rs b/app/buck2_common/src/target_aliases.rs index ae64eda9185f5..794ef84404a9d 100644 --- a/app/buck2_common/src/target_aliases.rs +++ b/app/buck2_common/src/target_aliases.rs @@ -9,24 +9,20 @@ use allocative::Allocative; use async_trait::async_trait; -use buck2_core::cells::name::CellName; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::package::PackageLabel; use buck2_core::target_aliases::TargetAliasResolver; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DiceComputations; use dice::Key; use dupe::Dupe; use indexmap::IndexSet; use itertools::Itertools; -use more_futures::cancellation::CancellationContext; use crate::dice::cells::HasCellResolver; +use crate::legacy_configs::configs::LegacyBuckConfig; use crate::legacy_configs::dice::HasLegacyConfigs; -use crate::legacy_configs::LegacyBuckConfig; -use crate::result::SharedResult; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum AliasResolutionError { #[error("No [alias] section in buckconfig")] MissingAliasSection, @@ -74,7 +70,7 @@ impl TargetAliasResolver for BuckConfigTargetAliasResolver { } impl BuckConfigTargetAliasResolver { - pub fn new(config: LegacyBuckConfig) -> Self { + fn new(config: LegacyBuckConfig) -> Self { Self { config } } @@ -129,33 +125,24 @@ impl BuckConfigTargetAliasResolver { #[async_trait] pub trait HasTargetAliasResolver { - async fn target_alias_resolver_for_cell( - &self, - cell_name: CellName, - ) -> anyhow::Result; - - async fn target_alias_resolver_for_working_dir( - &self, - working_dir: &ProjectRelativePath, - ) -> anyhow::Result; + async fn target_alias_resolver(&mut self) -> anyhow::Result; } #[derive(Debug, Display, Hash, PartialEq, Eq, Clone, Allocative)] -struct TargetAliasResolverKey { - cell_name: CellName, -} +struct TargetAliasResolverKey(); #[async_trait] impl Key for TargetAliasResolverKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellations: &CancellationContext, - ) -> SharedResult { - let legacy_configs = ctx.get_legacy_config_for_cell(self.cell_name).await?; - Ok(legacy_configs.target_alias_resolver()) + ) -> buck2_error::Result { + let root_cell = ctx.get_cell_resolver().await?.root_cell(); + let legacy_configs = ctx.get_legacy_config_for_cell(root_cell).await?; + Ok(BuckConfigTargetAliasResolver::new(legacy_configs.dupe())) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -167,25 +154,9 @@ impl Key for TargetAliasResolverKey { } #[async_trait] -impl HasTargetAliasResolver for DiceComputations { - async fn target_alias_resolver_for_cell( - &self, - cell_name: CellName, - ) -> anyhow::Result { - Ok(self - .compute(&TargetAliasResolverKey { cell_name }) - .await??) - } - - async fn target_alias_resolver_for_working_dir( - &self, - working_dir: &ProjectRelativePath, - ) -> anyhow::Result { - let cell_resolver = self.get_cell_resolver().await?; - let working_dir = - PackageLabel::from_cell_path(cell_resolver.get_cell_path(&working_dir)?.as_ref()); - let cell_name = working_dir.as_cell_path().cell(); - self.target_alias_resolver_for_cell(cell_name).await +impl HasTargetAliasResolver for DiceComputations<'_> { + async fn target_alias_resolver(&mut self) -> anyhow::Result { + Ok(self.compute(&TargetAliasResolverKey()).await??) } } @@ -200,9 +171,9 @@ mod tests { #[test] fn test_aliases() -> anyhow::Result<()> { - let config = legacy_configs::testing::parse( + let config = legacy_configs::configs::testing::parse( &[( - "/config", + "config", indoc!( r#" [alias] @@ -219,7 +190,7 @@ mod tests { "# ), )], - "/config", + "config", )?; let target_alias_resolver = BuckConfigTargetAliasResolver::new(config); diff --git a/app/buck2_common/src/temp_path.rs b/app/buck2_common/src/temp_path.rs index b2e41111d92b9..a51d89b4b3993 100644 --- a/app/buck2_common/src/temp_path.rs +++ b/app/buck2_common/src/temp_path.rs @@ -75,14 +75,14 @@ mod tests { let temp_path = TempPath::new().unwrap(); let path = temp_path.path().to_path_buf(); - assert!(!fs::try_exists(&path).unwrap()); + assert!(!path.try_exists().unwrap()); fs::write(&path, "hello").unwrap(); - assert!(fs::try_exists(&path).unwrap(), "Sanity check"); + assert!(path.try_exists().unwrap(), "Sanity check"); temp_path.close().unwrap(); - assert!(!fs::try_exists(&path).unwrap()); + assert!(!path.try_exists().unwrap()); } } diff --git a/app/buck2_configured/BUCK b/app/buck2_configured/BUCK index cda09eaac5583..62be0c0f10141 100644 --- a/app/buck2_configured/BUCK +++ b/app/buck2_configured/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -13,19 +12,19 @@ rust_library( "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:indexmap", - "fbsource//third-party/rust:thiserror", + "fbsource//third-party/rust:itertools", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_build_api:buck2_build_api", + "//buck2/app/buck2_build_signals:buck2_build_signals", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", - "//buck2/app/buck2_execute:buck2_execute", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark_map:starlark_map", ], ) diff --git a/app/buck2_configured/Cargo.toml b/app/buck2_configured/Cargo.toml index 9ca17e9e5828d..09f90cb6945e5 100644 --- a/app/buck2_configured/Cargo.toml +++ b/app/buck2_configured/Cargo.toml @@ -1,26 +1,28 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_configured" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } derive_more = { workspace = true } futures = { workspace = true } -indexmap = { workspace = true } -thiserror = { workspace = true } +itertools = { workspace = true } allocative = { workspace = true } dice = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } -more_futures = { workspace = true } starlark_map = { workspace = true } buck2_build_api = { workspace = true } +buck2_build_signals = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } -buck2_execute = { workspace = true } +buck2_error = { workspace = true } +buck2_futures = { workspace = true } buck2_node = { workspace = true } buck2_util = { workspace = true } diff --git a/app/buck2_configured/src/calculation.rs b/app/buck2_configured/src/calculation.rs index fdad3757fdba8..2bb322cc5e881 100644 --- a/app/buck2_configured/src/calculation.rs +++ b/app/buck2_configured/src/calculation.rs @@ -11,23 +11,27 @@ use std::sync::Arc; use async_trait::async_trait; use buck2_common::dice::cycles::CycleAdapterDescriptor; -use buck2_common::result::SharedResult; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::configuration::data::ConfigurationData; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_core::target::target_configured_target_label::TargetConfiguredTargetLabel; use buck2_node::cfg_constructor::CFG_CONSTRUCTOR_CALCULATION_IMPL; use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::nodes::unconfigured::RuleKind; +use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::super_package::SuperPackage; use buck2_node::target_calculation::ConfiguredTargetCalculationImpl; use buck2_node::target_calculation::CONFIGURED_TARGET_CALCULATION; use buck2_util::cycle_detector::CycleDescriptor; use derive_more::Display; use dice::DiceComputations; +use dice::DynKey; use dupe::Dupe; use gazebo::prelude::*; -use thiserror::Error; use crate::configuration::calculation::ConfigurationCalculation; +use crate::configuration::calculation::ExecutionPlatformResolutionKey; use crate::nodes::calculation::get_execution_platform_toolchain_dep; use crate::nodes::calculation::ConfiguredTargetNodeKey; @@ -41,14 +45,20 @@ pub(crate) fn init_configured_target_calculation() { impl ConfiguredTargetCalculationImpl for ConfiguredTargetCalculationInstance { async fn get_configured_target( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &TargetLabel, - global_target_platform: Option<&TargetLabel>, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result { - let node = ctx.get_target_node(target).await?; - - let get_platform_configuration = async || -> SharedResult { - let current_cfg = match global_target_platform { + let (node, super_package) = ctx.get_target_node_with_super_package(target).await?; + + async fn get_platform_configuration( + ctx: &mut DiceComputations<'_>, + global_cfg_options: &GlobalCfgOptions, + target: &TargetLabel, + node: &TargetNode, + super_package: &SuperPackage, + ) -> buck2_error::Result { + let current_cfg = match global_cfg_options.target_platform.as_ref() { Some(global_target_platform) => { ctx.get_platform_configuration(global_target_platform) .await? @@ -61,44 +71,68 @@ impl ConfiguredTargetCalculationImpl for ConfiguredTargetCalculationInstance { Ok(CFG_CONSTRUCTOR_CALCULATION_IMPL .get()? - .eval_cfg_constructor(ctx, current_cfg) + .eval_cfg_constructor( + ctx, + node.as_ref(), + super_package, + current_cfg, + &global_cfg_options.cli_modifiers, + node.rule_type(), + ) .await?) - }; + } match node.rule_kind() { RuleKind::Configuration => Ok(target.configure(ConfigurationData::unbound())), - RuleKind::Normal => Ok(target.configure(get_platform_configuration().await?)), + RuleKind::Normal => Ok(target.configure( + get_platform_configuration(ctx, global_cfg_options, target, &node, &super_package) + .await?, + )), RuleKind::Toolchain => { - let cfg = get_platform_configuration().await?; - let exec_cfg = - get_execution_platform_toolchain_dep(ctx, &target.configure(cfg.dupe()), &node) - .await? - .require_compatible()? - .cfg(); + let cfg = get_platform_configuration( + ctx, + global_cfg_options, + target, + &node, + &super_package, + ) + .await?; + let exec_cfg = get_execution_platform_toolchain_dep( + ctx, + &TargetConfiguredTargetLabel::new_configure(target, cfg.dupe()), + node.as_ref(), + ) + .await? + .require_compatible()? + .cfg(); Ok(target.configure_with_exec(cfg, exec_cfg.cfg().dupe())) } } } } -#[derive(Error, Debug, Clone, Dupe)] +#[derive(Debug, buck2_error::Error, Clone, Dupe)] +#[buck2(input)] +#[error("{}", display_configured_graph_cycle_error(&.cycle[..]))] pub struct ConfiguredGraphCycleError { cycle: Arc>, } -impl std::fmt::Display for ConfiguredGraphCycleError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!( - f, - "Configured target cycle detected (`->` means \"depends on\"):" - )?; - for p in self.cycle.iter() { - writeln!(f, " {} ->", p)?; - } - // point back at the first item in the cycle. - writeln!(f, " {}", self.cycle.first().unwrap())?; - Ok(()) +fn display_configured_graph_cycle_error(cycle: &[ConfiguredGraphCycleKeys]) -> String { + use std::fmt::Write; + + let mut s = String::new(); + writeln!( + s, + "Configured target cycle detected (`->` means \"depends on\"):" + ) + .unwrap(); + for p in cycle.iter() { + writeln!(s, " {} ->", p).unwrap(); } + // point back at the first item in the cycle. + writeln!(s, " {}", cycle.first().unwrap()).unwrap(); + s } // TODO(cjhopman): There's other keys that could be involved in a cycle in the configured graph and they should probably also be tracked @@ -106,8 +140,10 @@ impl std::fmt::Display for ConfiguredGraphCycleError { // configured graph cycles. #[derive(Debug, Display, Clone, Eq, PartialEq, Hash)] pub enum ConfiguredGraphCycleKeys { - #[display(fmt = "{}", _0)] + #[display("{}", _0)] ConfiguredTargetNode(ConfiguredTargetNodeKey), + #[display("{}", _0)] + ExecutionPlatformResolution(ExecutionPlatformResolutionKey), } #[derive(Debug)] @@ -126,10 +162,16 @@ impl CycleDescriptor for ConfiguredGraphCycleDescriptor { } impl CycleAdapterDescriptor for ConfiguredGraphCycleDescriptor { - fn to_key(key: &dyn std::any::Any) -> Option { + fn to_key(key: &DynKey) -> Option { if let Some(v) = key.downcast_ref::() { return Some(ConfiguredGraphCycleKeys::ConfiguredTargetNode(v.dupe())); } + if let Some(v) = key.downcast_ref::() { + return Some(ConfiguredGraphCycleKeys::ExecutionPlatformResolution( + v.dupe(), + )); + } + None } } diff --git a/app/buck2_configured/src/nodes/mod.rs b/app/buck2_configured/src/configuration.rs similarity index 100% rename from app/buck2_configured/src/nodes/mod.rs rename to app/buck2_configured/src/configuration.rs diff --git a/app/buck2_configured/src/configuration/calculation.rs b/app/buck2_configured/src/configuration/calculation.rs index fe08be3dc92a4..3e691fb718aa2 100644 --- a/app/buck2_configured/src/configuration/calculation.rs +++ b/app/buck2_configured/src/configuration/calculation.rs @@ -8,24 +8,24 @@ */ use std::sync::Arc; - -use allocative::Allocative;use starlark_map::small_map::SmallMap; use anyhow::Context; + +use itertools::Itertools; +use allocative::Allocative; use async_trait::async_trait;use buck2_build_api::interpreter::rule_defs::provider::builtin::platform_info::FrozenPlatformInfo; use buck2_common::dice::cells::HasCellResolver; use buck2_common::legacy_configs::dice::HasLegacyConfigs; -use buck2_common::legacy_configs::parse_config_section_and_key; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; +use buck2_common::legacy_configs::configs::parse_config_section_and_key; +use buck2_error::AnyhowContextForError; use buck2_core::cells::name::CellName; +use futures::FutureExt; use starlark_map::unordered_map::UnorderedMap; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::configuration::config_setting::ConfigSettingData; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::pair::ConfigurationNoExec; -use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_core::provider::label::ProvidersLabel; use buck2_core::execution_types::execution::ExecutionPlatform; use buck2_core::execution_types::execution::ExecutionPlatformError; use buck2_core::execution_types::execution::ExecutionPlatformIncompatibleReason; @@ -34,6 +34,7 @@ use buck2_core::execution_types::execution_platforms::ExecutionPlatformFallback; use buck2_core::execution_types::execution_platforms::ExecutionPlatforms; use buck2_core::execution_types::execution_platforms::ExecutionPlatformsData; use buck2_node::configuration::resolved::ConfigurationNode; +use buck2_node::configuration::resolved::ResolvedConfigurationSettings; use buck2_node::configuration::resolved::ConfigurationSettingKey; use buck2_node::configuration::resolved::ResolvedConfiguration; use buck2_node::configuration::target_platform_detector::TargetPlatformDetector; @@ -43,15 +44,18 @@ use derive_more::Display; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use gazebo::prelude::*; -use indexmap::IndexSet; -use more_futures::cancellation::CancellationContext; -use thiserror::Error; +use buck2_futures::cancellation::CancellationContext; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::interpreter::rule_defs::provider::builtin::configuration_info::FrozenConfigurationInfo; use buck2_build_api::interpreter::rule_defs::provider::builtin::execution_platform_registration_info::FrozenExecutionPlatformRegistrationInfo; - -#[derive(Debug, Error)] +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_core::target::target_configured_target_label::TargetConfiguredTargetLabel; +use buck2_node::configuration::calculation::{ConfigurationCalculationDyn, CONFIGURATION_CALCULATION}; +use buck2_node::execution::{GetExecutionPlatformsImpl, GET_EXECUTION_PLATFORMS, GetExecutionPlatforms, EXECUTION_PLATFORMS_BUCKCONFIG}; +use crate::nodes::calculation::ExecutionPlatformConstraints; + +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] pub enum ConfigurationError { #[error("Expected a ConfigurationInfo provider from `{0}`.")] MissingConfigurationInfoProvider(TargetLabel), @@ -68,17 +72,17 @@ pub enum ConfigurationError { } async fn get_target_platform_detector( - ctx: &DiceComputations, -) -> SharedResult> { + ctx: &mut DiceComputations<'_>, +) -> buck2_error::Result> { // This requires a bit of computation so cache it on the graph. // TODO(cjhopman): Should we construct this (and similar buckconfig-derived objects) as part of the buck config itself? #[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "TargetPlatformDetectorKey")] + #[display("TargetPlatformDetectorKey")] struct TargetPlatformDetectorKey; #[async_trait] impl Key for TargetPlatformDetectorKey { - type Value = SharedResult>; + type Value = buck2_error::Result>; async fn compute( &self, ctx: &mut DiceComputations, @@ -88,18 +92,26 @@ async fn get_target_platform_detector( // TODO(cjhopman): Consider revisiting that approach. let resolver = ctx.get_cell_resolver().await?; let root_cell = resolver.root_cell(); + let cell_alias_resolver = ctx.get_cell_alias_resolver(root_cell).await?; Ok(Arc::new( match ctx .get_legacy_config_property( root_cell, - "parser", - "target_platform_detector_spec", + BuckconfigKeyRef { + section: "parser", + property: "target_platform_detector_spec", + }, ) .await? { None => TargetPlatformDetector::empty(), - Some(spec) => TargetPlatformDetector::parse_spec(&spec, root_cell, &resolver)?, + Some(spec) => TargetPlatformDetector::parse_spec( + &spec, + root_cell, + &resolver, + &cell_alias_resolver, + )?, }, )) } @@ -116,26 +128,29 @@ async fn get_target_platform_detector( } /// Returns the configured [ExecutionPlatforms] or None if `build.execution_platforms` is not configured. -async fn get_execution_platforms( - ctx: &DiceComputations, -) -> SharedResult> { +async fn compute_execution_platforms( + ctx: &mut DiceComputations<'_>, +) -> buck2_error::Result> { let cells = ctx.get_cell_resolver().await?; + let cell_alias_resolver = ctx.get_cell_alias_resolver(cells.root_cell()).await?; let execution_platforms_target = ctx - .get_legacy_config_property(cells.root_cell(), "build", "execution_platforms") + .get_legacy_config_property(cells.root_cell(), EXECUTION_PLATFORMS_BUCKCONFIG) .await?; let execution_platforms_target = match execution_platforms_target { - Some(v) => TargetLabel::parse(&v, cells.root_cell(), &cells)?, + Some(v) => TargetLabel::parse(&v, cells.root_cell(), &cells, &cell_alias_resolver)?, None => { return Ok(None); } }; - let analysis_result = ctx - .get_configuration_analysis_result(&execution_platforms_target) + let providers = &ctx + // Execution platform won't be supplied as a subtarget + .get_configuration_analysis_result(&ProvidersLabel::default_for( + execution_platforms_target.dupe(), + )) .await?; - let providers = analysis_result.providers(); let result = providers .provider_collection() @@ -143,7 +158,7 @@ async fn get_execution_platforms( .ok_or_else(|| { anyhow::anyhow!( ConfigurationError::MissingExecutionPlatformRegistrationInfo( - execution_platforms_target + execution_platforms_target.dupe() ) ) })?; @@ -153,6 +168,7 @@ async fn get_execution_platforms( platforms.push(platform.to_execution_platform()?); } Ok(Some(Arc::new(ExecutionPlatformsData::new( + execution_platforms_target, platforms, result.fallback()?, )))) @@ -161,38 +177,37 @@ async fn get_execution_platforms( /// Check if a particular execution platform is compatible with the constraints or not. /// Return either Ok/Ok if it is, or a reason if not. async fn check_execution_platform( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target_node_cell: CellName, - exec_compatible_with: &[TargetLabel], - exec_deps: &IndexSet, + exec_compatible_with: &[ConfigurationSettingKey], + exec_deps: &[TargetLabel], exec_platform: &ExecutionPlatform, toolchain_allows: &[ToolchainConstraints], ) -> anyhow::Result> { - // First check if the platform satisfies the toolchain requirements - for allowed in toolchain_allows { - if let Err(e) = allowed.allows(exec_platform) { - return Ok(Err( - ExecutionPlatformIncompatibleReason::ExecutionDependencyIncompatible(e), - )); - } - } - let resolved_platform_configuration = ctx .get_resolved_configuration( exec_platform.cfg(), target_node_cell, - exec_compatible_with.iter(), + toolchain_allows + .iter() + .flat_map(ToolchainConstraints::exec_compatible_with) + .chain(exec_compatible_with), ) .await?; // Then check if the platform satisfies compatible_with - for constraint in exec_compatible_with { + for constraint in toolchain_allows + .iter() + .flat_map(ToolchainConstraints::exec_compatible_with) + .chain(exec_compatible_with) + { if resolved_platform_configuration - .matches(constraint) + .settings() + .setting_matches(constraint) .is_none() { return Ok(Err( - ExecutionPlatformIncompatibleReason::ConstraintNotSatisfied(constraint.dupe()), + ExecutionPlatformIncompatibleReason::ConstraintNotSatisfied(constraint.dupe().0), )); } } @@ -200,9 +215,13 @@ async fn check_execution_platform( // Then check that all exec_deps are compatible with the platform. We collect errors separately, // so that we do not report an error if we would later find an incompatibility let mut errs = Vec::new(); - for dep in exec_deps { + for dep in toolchain_allows + .iter() + .flat_map(ToolchainConstraints::exec_deps) + .chain(exec_deps) + { match ctx - .get_configured_target_node( + .get_internal_configured_target_node( &dep.configure_pair_no_exec(exec_platform.cfg_pair_no_exec().dupe()), ) .await @@ -230,48 +249,20 @@ async fn check_execution_platform( } async fn get_execution_platforms_enabled( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result { ctx.get_execution_platforms() .await? .context("Execution platforms are not enabled") } -async fn resolve_toolchain_constraints_from_constraints( - ctx: &DiceComputations, - target: &ConfiguredTargetLabel, - exec_compatible_with: &[TargetLabel], - exec_deps: &IndexSet, - toolchain_allows: &[ToolchainConstraints], -) -> SharedResult { - let mut incompatible = SmallMap::new(); - for exec_platform in get_execution_platforms_enabled(ctx).await?.candidates() { - if let Err(e) = check_execution_platform( - ctx, - target.pkg().cell_name(), - exec_compatible_with, - exec_deps, - exec_platform, - toolchain_allows, - ) - .await? - { - incompatible.insert( - exec_platform.dupe(), - Arc::new(e.into_incompatible_platform_reason(target.dupe())), - ); - } - } - Ok(ToolchainConstraints::new(incompatible)) -} - async fn resolve_execution_platform_from_constraints( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target_node_cell: CellName, - exec_compatible_with: &[TargetLabel], - exec_deps: &IndexSet, + exec_compatible_with: &[ConfigurationSettingKey], + exec_deps: &[TargetLabel], toolchain_allows: &[ToolchainConstraints], -) -> SharedResult { +) -> buck2_error::Result { let mut skipped = Vec::new(); let execution_platforms = get_execution_platforms_enabled(ctx).await?; for exec_platform in execution_platforms.candidates() { @@ -313,11 +304,11 @@ async fn resolve_execution_platform_from_constraints( } async fn configuration_matches( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, cfg: &ConfigurationData, target_node_cell: CellName, constraints_and_configs: &ConfigSettingData, -) -> SharedResult { +) -> buck2_error::Result { for (key, value) in &constraints_and_configs.constraints { match cfg.get_constraint_value(key)? { Some(v) if v == value => {} @@ -332,8 +323,10 @@ async fn configuration_matches( let v = ctx .get_legacy_config_property( target_node_cell, - &config_section_and_key.section, - &config_section_and_key.key, + BuckconfigKeyRef { + section: &config_section_and_key.section, + property: &config_section_and_key.key, + }, ) .await?; match v { @@ -346,58 +339,58 @@ async fn configuration_matches( } #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "ExecutionPlatforms")] +#[display("ExecutionPlatforms")] pub struct ExecutionPlatformsKey; #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "ConfigurationNode({}, {})", cfg_target, target_cfg)] +#[display("ConfigurationNode({}, {})", cfg_target, target_cfg)] struct ConfigurationNodeKey { target_cfg: ConfigurationData, target_cell: CellName, - cfg_target: TargetLabel, + cfg_target: ConfigurationSettingKey, } #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] #[display( - fmt = "ResolvedConfigurationKey(target_cfg: {}, cell: {}, configuration_deps size {})", + "ResolvedConfigurationKey(target_cfg: {}, cell: {}, configuration_deps size {})", target_cfg, target_cell, - "configuration_deps.len()" + configuration_deps.len() )] struct ResolvedConfigurationKey { target_cfg: ConfigurationData, target_cell: CellName, - configuration_deps: Vec, + configuration_deps: Vec, } #[async_trait] -pub trait ConfigurationCalculation { - async fn get_default_platform(&self, target: &TargetLabel) -> SharedResult; +pub(crate) trait ConfigurationCalculation { + async fn get_default_platform( + &mut self, + target: &TargetLabel, + ) -> buck2_error::Result; async fn get_platform_configuration( - &self, + &mut self, target: &TargetLabel, ) -> anyhow::Result; - async fn get_resolved_configuration<'a, T: IntoIterator + Send>( - &self, + async fn get_resolved_configuration< + 'a, + T: IntoIterator + Send, + >( + &mut self, target_cfg: &ConfigurationData, target_node_cell: CellName, configuration_deps: T, - ) -> SharedResult; + ) -> buck2_error::Result; async fn get_configuration_node( - &self, + &mut self, target_cfg: &ConfigurationData, target_cell: CellName, - cfg_target: &TargetLabel, - ) -> SharedResult; - - /// Returns a list of the configured execution platforms. This looks up the providers on the target - /// configured **in the root cell's buckconfig** with key `build.execution_platforms`. If there's no - /// value configured, it will return `None` which indicates we should fallback to the legacy execution - /// platform behavior. - async fn get_execution_platforms(&self) -> SharedResult>; + cfg_target: &ConfigurationSettingKey, + ) -> buck2_error::Result; /// Gets the compatible execution platforms for a give list of compatible_with constraints and execution deps. /// @@ -408,29 +401,50 @@ pub trait ConfigurationCalculation { /// those nodes to just have a single dice dep. This approach has the downside that it is less incremental, but /// we expect these things to change rarely. async fn resolve_execution_platform_from_constraints( - &self, + &mut self, target_node_cell: CellName, - exec_compatible_with: &[TargetLabel], - exec_deps: &IndexSet, - toolchain_allows: &[ToolchainConstraints], - ) -> SharedResult; + exec_compatible_with: Arc<[ConfigurationSettingKey]>, + exec_deps: Arc<[TargetLabel]>, + toolchain_allows: Arc<[ToolchainConstraints]>, + ) -> buck2_error::Result; +} + +struct ConfigurationCalculationDynImpl; - async fn resolve_toolchain_constraints_from_constraints( +#[async_trait] +impl ConfigurationCalculationDyn for ConfigurationCalculationDynImpl { + async fn get_platform_configuration( &self, - target: &ConfiguredTargetLabel, - exec_compatible_with: &[TargetLabel], - exec_deps: &IndexSet, - toolchain_allows: &[ToolchainConstraints], - ) -> SharedResult; + ctx: &mut DiceComputations<'_>, + target: &TargetLabel, + ) -> anyhow::Result { + ctx.get_platform_configuration(target).await + } + + async fn get_resolved_configuration( + &self, + ctx: &mut DiceComputations<'_>, + target_cfg: &ConfigurationData, + target_node_cell: CellName, + configuration_deps: &[ConfigurationSettingKey], + ) -> buck2_error::Result { + ctx.get_resolved_configuration(target_cfg, target_node_cell, configuration_deps) + .await + } +} + +pub(crate) fn init_configuration_calculation() { + CONFIGURATION_CALCULATION.init(&ConfigurationCalculationDynImpl); } async fn compute_platform_configuration_no_label_check( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &TargetLabel, ) -> anyhow::Result { - let result = ctx.get_configuration_analysis_result(target).await?; - let platform_info = result - .providers() + let platform_info = (&ctx + // TODO(T198223238): Not supporting platforms being supplied via subtargets for now + .get_configuration_analysis_result(&ProvidersLabel::default_for(target.dupe())) + .await?) .provider_collection() .builtin_provider::() .ok_or_else(|| ConfigurationError::MissingPlatformInfo(target.dupe()))?; @@ -439,16 +453,20 @@ async fn compute_platform_configuration_no_label_check( /// Basically, evaluate `platform()` rule. async fn compute_platform_configuration( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &TargetLabel, ) -> anyhow::Result { let configuration_data = compute_platform_configuration_no_label_check(ctx, target).await?; let cell_resolver = ctx.get_cell_resolver().await?; + let cell_alias_resolver = ctx + .get_cell_alias_resolver(cell_resolver.root_cell()) + .await?; let parsed_target = TargetLabel::parse( configuration_data.label()?, cell_resolver.root_cell(), &cell_resolver, + &cell_alias_resolver, ) .context("`PlatformInfo` label for `platform()` rule should be a valid target label")?; @@ -456,8 +474,14 @@ async fn compute_platform_configuration( // `target` may be an `alias` target. In this case we evaluate the label // from the configuration and check it resolves to the same configuration. - let cfg_again = compute_platform_configuration_no_label_check(ctx, &parsed_target).await - .context("Checking whether label of returned `PlatformInfo` resolves to the same configuration")?; + let cfg_again = compute_platform_configuration_no_label_check( + ctx, + &parsed_target, + ) + .await + .context( + "Checking whether label of returned `PlatformInfo` resolves to the same configuration", + )?; if cfg_again != configuration_data { return Err(ConfigurationError::PlatformEvalUnequalConfiguration( target.dupe(), @@ -471,9 +495,93 @@ async fn compute_platform_configuration( } #[async_trait] -impl ConfigurationCalculation for DiceComputations { - async fn get_platform_configuration( +impl Key for ResolvedConfigurationKey { + type Value = buck2_error::Result; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let config_nodes = ctx + .compute_join(self.configuration_deps.iter(), |ctx, d| { + async move { + ( + d.dupe(), + ctx.get_configuration_node(&self.target_cfg, self.target_cell, d) + .await, + ) + } + .boxed() + }) + .await; + + let mut resolved_settings = UnorderedMap::with_capacity(config_nodes.len()); + for (label, node) in config_nodes { + let node = node?; + resolved_settings.insert(label, node); + } + let resolved_settings = ResolvedConfigurationSettings::new(resolved_settings); + Ok(ResolvedConfiguration::new( + ConfigurationNoExec::new(self.target_cfg.dupe()), + resolved_settings, + )) + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + false + } +} + +#[async_trait] +impl Key for ConfigurationNodeKey { + type Value = buck2_error::Result; + + async fn compute( &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let providers = ctx + // TODO(T198210718) + .get_configuration_analysis_result(&ProvidersLabel::default_for( + self.cfg_target.0.dupe(), + )) + .await?; + + // capture the result so the temporaries get dropped before providers + let result = match providers + .provider_collection() + .builtin_provider::() + { + Some(configuration_info) => configuration_info, + None => { + return Err::<_, buck2_error::Error>( + ConfigurationError::MissingConfigurationInfoProvider(self.cfg_target.dupe().0) + .into(), + ); + } + } + .to_config_setting_data(); + + let matches = + configuration_matches(ctx, &self.target_cfg, self.target_cell, &result).await?; + + Ok(ConfigurationNode::new(Some(result).filter(|_| matches))) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } +} + +#[async_trait] +impl ConfigurationCalculation for DiceComputations<'_> { + async fn get_platform_configuration( + &mut self, target: &TargetLabel, ) -> anyhow::Result { #[derive(derive_more::Display, Debug, Eq, Hash, PartialEq, Clone, Allocative)] @@ -481,7 +589,7 @@ impl ConfigurationCalculation for DiceComputations { #[async_trait] impl Key for PlatformConfigurationKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, @@ -490,7 +598,7 @@ impl ConfigurationCalculation for DiceComputations { ) -> Self::Value { compute_platform_configuration(ctx, &self.0) .await - .shared_error() + .map_err(buck2_error::Error::from) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -503,55 +611,34 @@ impl ConfigurationCalculation for DiceComputations { self.compute(&PlatformConfigurationKey(target.dupe())) .await? - .unshared_error() + .map_err(anyhow::Error::from) } - async fn get_default_platform(&self, target: &TargetLabel) -> SharedResult { + async fn get_default_platform( + &mut self, + target: &TargetLabel, + ) -> buck2_error::Result { let detector = get_target_platform_detector(self).await?; if let Some(target) = detector.detect(target) { - return self.get_platform_configuration(target).await.shared_error(); + return self + .get_platform_configuration(target) + .await + .map_err(buck2_error::Error::from); } // TODO(cjhopman): This needs to implement buck1's approach to determining target platform, it's currently missing the fallback to buckconfig parser.target_platform. Ok(ConfigurationData::unspecified()) } - async fn get_resolved_configuration<'a, T: IntoIterator + Send>( - &self, + async fn get_resolved_configuration< + 'a, + T: IntoIterator + Send, + >( + &mut self, target_cfg: &ConfigurationData, target_cell: CellName, configuration_deps: T, - ) -> SharedResult { - #[async_trait] - impl Key for ResolvedConfigurationKey { - type Value = SharedResult; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - let config_futures: Vec<_> = self - .configuration_deps - .map(|d| ctx.get_configuration_node(&self.target_cfg, self.target_cell, d)); - let config_nodes = futures::future::join_all(config_futures).await; - - let mut resolved_settings = UnorderedMap::with_capacity(config_nodes.len()); - for node in config_nodes { - let node = node?; - resolved_settings.insert(ConfigurationSettingKey(node.label().dupe()), node); - } - Ok(ResolvedConfiguration::new( - ConfigurationNoExec::new(self.target_cfg.dupe()), - resolved_settings, - )) - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - false - } - } - - let configuration_deps: Vec = + ) -> buck2_error::Result { + let configuration_deps: Vec = configuration_deps.into_iter().map(|t| t.dupe()).collect(); self.compute(&ResolvedConfigurationKey { target_cfg: target_cfg.dupe(), @@ -562,62 +649,11 @@ impl ConfigurationCalculation for DiceComputations { } async fn get_configuration_node( - &self, + &mut self, target_cfg: &ConfigurationData, target_cell: CellName, - cfg_target: &TargetLabel, - ) -> SharedResult { - #[async_trait] - impl Key for ConfigurationNodeKey { - type Value = SharedResult; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - let analysis_result = ctx - .get_configuration_analysis_result(&self.cfg_target) - .await?; - let providers = analysis_result.providers(); - - // capture the result so the temporaries get dropped before analysis_result - let result = match providers - .provider_collection() - .builtin_provider::() - { - Some(configuration_info) => configuration_info, - None => { - return Err::<_, anyhow::Error>( - ConfigurationError::MissingConfigurationInfoProvider( - self.cfg_target.dupe(), - ) - .into(), - ) - .shared_error(); - } - } - .to_config_setting_data(); - - let matches = - configuration_matches(ctx, &self.target_cfg, self.target_cell, &result).await?; - - Ok(ConfigurationNode::new( - self.target_cfg.dupe(), - self.cfg_target.dupe(), - result, - matches, - )) - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } - } - + cfg_target: &ConfigurationSettingKey, + ) -> buck2_error::Result { self.compute(&ConfigurationNodeKey { target_cfg: target_cfg.dupe(), target_cell, @@ -630,61 +666,149 @@ impl ConfigurationCalculation for DiceComputations { cfg_target, target_cfg, ) }) - .shared_error() + .map_err(buck2_error::Error::from) } - async fn get_execution_platforms(&self) -> SharedResult> { - #[async_trait] - impl Key for ExecutionPlatformsKey { - type Value = SharedResult>; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - get_execution_platforms(ctx).await - } + async fn resolve_execution_platform_from_constraints( + &mut self, + target_node_cell: CellName, + exec_compatible_with: Arc<[ConfigurationSettingKey]>, + exec_deps: Arc<[TargetLabel]>, + toolchain_allows: Arc<[ToolchainConstraints]>, + ) -> buck2_error::Result { + self.compute(&ExecutionPlatformResolutionKey { + target_node_cell, + exec_compatible_with, + exec_deps, + toolchain_allows, + }) + .await? + } +} - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - // TODO(cjhopman) should these be comparable for caching - false - } +#[derive(Clone, Dupe, Debug, Eq, Hash, PartialEq, Allocative)] +pub struct ExecutionPlatformResolutionKey { + target_node_cell: CellName, + exec_compatible_with: Arc<[ConfigurationSettingKey]>, + exec_deps: Arc<[TargetLabel]>, + toolchain_allows: Arc<[ToolchainConstraints]>, +} + +impl Display for ExecutionPlatformResolutionKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Resolving execution platform: cell:{}", + self.target_node_cell + )?; + + if !self.exec_compatible_with.is_empty() { + write!( + f, + ", exec_compatible_with=[{}]", + self.exec_compatible_with.iter().join(", ") + )? + } + + if !self.exec_deps.is_empty() { + write!(f, ", exec_deps=[{}]", self.exec_deps.iter().join(", "))? + } + + let mut iter = self + .toolchain_allows + .iter() + .flat_map(|v| v.exec_compatible_with()) + .peekable(); + if iter.peek().is_some() { + write!(f, ", toolchain_exec_compatible_with=[{}]", iter.join(", "))? + } + + let mut iter = self + .toolchain_allows + .iter() + .flat_map(|v| v.exec_deps()) + .peekable(); + if iter.peek().is_some() { + write!(f, ", toolchain_exec_deps=[{}]", iter.join(", "))? } - self.compute(&ExecutionPlatformsKey).await? + Ok(()) } +} - async fn resolve_execution_platform_from_constraints( +#[async_trait] +impl Key for ExecutionPlatformResolutionKey { + type Value = buck2_error::Result; + + async fn compute( &self, - target_node_cell: CellName, - exec_compatible_with: &[TargetLabel], - exec_deps: &IndexSet, - toolchain_allows: &[ToolchainConstraints], - ) -> SharedResult { + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { resolve_execution_platform_from_constraints( - self, - target_node_cell, - exec_compatible_with, - exec_deps, - toolchain_allows, + ctx, + self.target_node_cell, + &self.exec_compatible_with, + &self.exec_deps, + &self.toolchain_allows, ) .await } - async fn resolve_toolchain_constraints_from_constraints( + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } +} + +struct GetExecutionPlatformsInstance; + +#[async_trait] +impl Key for ExecutionPlatformsKey { + type Value = buck2_error::Result>; + async fn compute( &self, - target: &ConfiguredTargetLabel, - exec_compatible_with: &[TargetLabel], - exec_deps: &IndexSet, - toolchain_allows: &[ToolchainConstraints], - ) -> SharedResult { - resolve_toolchain_constraints_from_constraints( - self, - target, - exec_compatible_with, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + compute_execution_platforms(ctx).await + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + // TODO(cjhopman) should these be comparable for caching + false + } +} + +#[async_trait] +impl GetExecutionPlatformsImpl for GetExecutionPlatformsInstance { + async fn get_execution_platforms_impl( + &self, + ctx: &mut DiceComputations<'_>, + ) -> buck2_error::Result> { + ctx.compute(&ExecutionPlatformsKey).await? + } + + async fn execution_platform_resolution_one_for_cell( + &self, + dice: &mut DiceComputations<'_>, + exec_deps: Arc<[TargetLabel]>, + toolchain_deps: Arc<[TargetConfiguredTargetLabel]>, + exec_compatible_with: Arc<[ConfigurationSettingKey]>, + cell: CellName, + ) -> buck2_error::Result { + ExecutionPlatformConstraints::new_constraints( exec_deps, - toolchain_allows, + toolchain_deps, + exec_compatible_with, ) + .one_for_cell(dice, cell) .await } } + +pub(crate) fn init_get_execution_platforms() { + GET_EXECUTION_PLATFORMS.init(&GetExecutionPlatformsInstance); +} diff --git a/app/buck2_configured/src/lib.rs b/app/buck2_configured/src/lib.rs index 99a49ac78b633..68c529e25df27 100644 --- a/app/buck2_configured/src/lib.rs +++ b/app/buck2_configured/src/lib.rs @@ -7,7 +7,8 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] +#![feature(trait_upcasting)] pub mod calculation; pub mod configuration; @@ -15,5 +16,7 @@ pub mod nodes; pub fn init_late_bindings() { calculation::init_configured_target_calculation(); + configuration::calculation::init_get_execution_platforms(); + configuration::calculation::init_configuration_calculation(); nodes::calculation::init_configured_target_node_calculation(); } diff --git a/app/buck2_configured/src/nodes.rs b/app/buck2_configured/src/nodes.rs new file mode 100644 index 0000000000000..de345a0e1fd6a --- /dev/null +++ b/app/buck2_configured/src/nodes.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod calculation; diff --git a/app/buck2_configured/src/nodes/calculation.rs b/app/buck2_configured/src/nodes/calculation.rs index b4c07e4bad0bf..b6a9c2b1de0db 100644 --- a/app/buck2_configured/src/nodes/calculation.rs +++ b/app/buck2_configured/src/nodes/calculation.rs @@ -9,22 +9,26 @@ //! Calculations relating to 'TargetNode's that runs on Dice +use std::iter; use std::sync::Arc; use allocative::Allocative; use anyhow::Context; use async_trait::async_trait; +use buck2_build_api::actions::execute::dice_data::HasFallbackExecutorConfig; +use buck2_build_api::transition::TRANSITION_ATTRS_PROVIDER; use buck2_build_api::transition::TRANSITION_CALCULATION; +use buck2_build_signals::node_key::BuildSignalsNodeKey; +use buck2_build_signals::node_key::BuildSignalsNodeKeyImpl; +use buck2_common::dice::cells::HasCellResolver; use buck2_common::dice::cycles::CycleGuard; -use buck2_common::result::SharedError; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; +use buck2_common::legacy_configs::dice::HasLegacyConfigs; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_common::legacy_configs::view::LegacyBuckConfigView; use buck2_core::cells::name::CellName; use buck2_core::configuration::compatibility::IncompatiblePlatformReason; use buck2_core::configuration::compatibility::IncompatiblePlatformReasonCause; use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_core::configuration::config_setting::ConfigSettingData; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::pair::ConfigurationNoExec; use buck2_core::configuration::pair::ConfigurationWithExec; @@ -32,17 +36,26 @@ use buck2_core::configuration::transition::applied::TransitionApplied; use buck2_core::configuration::transition::id::TransitionId; use buck2_core::execution_types::execution::ExecutionPlatform; use buck2_core::execution_types::execution::ExecutionPlatformResolution; +use buck2_core::pattern::pattern::ParsedPattern; +use buck2_core::pattern::pattern_type::TargetPatternExtra; use buck2_core::plugins::PluginKind; use buck2_core::plugins::PluginKindSet; use buck2_core::plugins::PluginListElemKind; use buck2_core::plugins::PluginLists; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; +use buck2_core::soft_error; +use buck2_core::target::configured_or_unconfigured::ConfiguredOrUnconfiguredTargetLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; -use buck2_execute::execute::dice_data::HasFallbackExecutorConfig; +use buck2_core::target::label::label::TargetLabel; +use buck2_core::target::target_configured_target_label::TargetConfiguredTargetLabel; +use buck2_error::internal_error_anyhow; +use buck2_error::AnyhowContextForError; +use buck2_error::BuckErrorContext; +use buck2_futures::cancellation::CancellationContext; use buck2_node::attrs::configuration_context::AttrConfigurationContext; use buck2_node::attrs::configuration_context::AttrConfigurationContextImpl; +use buck2_node::attrs::configuration_context::PlatformConfigurationError; use buck2_node::attrs::configured_attr::ConfiguredAttr; use buck2_node::attrs::configured_traversal::ConfiguredAttrTraversal; use buck2_node::attrs::display::AttrDisplayWithContextExt; @@ -50,31 +63,34 @@ use buck2_node::attrs::inspect_options::AttrInspectOptions; use buck2_node::attrs::internal::EXEC_COMPATIBLE_WITH_ATTRIBUTE_FIELD; use buck2_node::attrs::internal::LEGACY_TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD; use buck2_node::attrs::internal::TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD; -use buck2_node::configuration::resolved::ConfigurationSettingKeyRef; +use buck2_node::configuration::resolved::ConfigurationSettingKey; use buck2_node::configuration::resolved::ResolvedConfiguration; +use buck2_node::configuration::resolved::ResolvedConfigurationSettings; use buck2_node::configuration::toolchain_constraints::ToolchainConstraints; +use buck2_node::execution::GetExecutionPlatforms; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculationImpl; use buck2_node::nodes::configured_frontend::CONFIGURED_TARGET_NODE_CALCULATION; use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::nodes::unconfigured::TargetNodeRef; use buck2_node::visibility::VisibilityError; use derive_more::Display; +use dice::Demand; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use indexmap::IndexSet; -use more_futures::cancellation::CancellationContext; +use futures::FutureExt; +use itertools::Itertools; use starlark_map::ordered_map::OrderedMap; use starlark_map::small_map::SmallMap; use starlark_map::small_set::SmallSet; -use thiserror::Error; use crate::calculation::ConfiguredGraphCycleDescriptor; use crate::configuration::calculation::ConfigurationCalculation; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum NodeCalculationError { #[error("expected `{0}` attribute to be a list but got `{1}`")] TargetCompatibleNotList(String, String), @@ -84,6 +100,36 @@ enum NodeCalculationError { LEGACY_TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD )] BothTargetCompatibleWith(String), + #[error( + "Target {0} configuration transitioned\n\ + old: {1}\n\ + new: {2}\n\ + but attribute: {3}\n\ + resolved with old configuration to: {4}\n\ + resolved with new configuration to: {5}" + )] + TransitionAttrIncompatibleChange( + TargetLabel, + ConfigurationData, + ConfigurationData, + String, + String, + String, + ), + + #[error( + "Target {0} configuration transition is not idempotent + in initial configuration `{1}` + first transitioned to cfg `{2}` + then transitions to cfg `{3}` + Use `buck2 audit configurations {1} {2} {3}` to see the configurations." + )] + TransitionNotIdempotent( + TargetLabel, + ConfigurationData, + ConfigurationData, + ConfigurationData, + ), } enum CompatibilityConstraints { @@ -92,8 +138,8 @@ enum CompatibilityConstraints { } async fn compute_platform_cfgs( - ctx: &DiceComputations, - node: &TargetNode, + ctx: &mut DiceComputations<'_>, + node: TargetNodeRef<'_>, ) -> anyhow::Result> { let mut platform_map = OrderedMap::new(); for platform_target in node.platform_deps() { @@ -105,7 +151,7 @@ async fn compute_platform_cfgs( } async fn legacy_execution_platform( - ctx: &DiceComputations, + ctx: &DiceComputations<'_>, cfg: &ConfigurationNoExec, ) -> ExecutionPlatform { ExecutionPlatform::legacy_execution_platform( @@ -114,7 +160,8 @@ async fn legacy_execution_platform( ) } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum ToolchainDepError { #[error("Can't find toolchain_dep execution platform using configuration `{0}`")] ToolchainDepMissingPlatform(ConfigurationData), @@ -126,17 +173,17 @@ enum ToolchainDepError { ToolchainTransitionDep(TargetLabel), } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum PluginDepError { #[error("Plugin dep `{0}` is a toolchain rule")] PluginDepIsToolchainRule(TargetLabel), } pub async fn find_execution_platform_by_configuration( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, exec_cfg: &ConfigurationData, cfg: &ConfigurationData, -) -> SharedResult { +) -> buck2_error::Result { match ctx.get_execution_platforms().await? { Some(platforms) if exec_cfg != &ConfigurationData::unbound_exec() => { for c in platforms.candidates() { @@ -144,7 +191,7 @@ pub async fn find_execution_platform_by_configuration( return Ok(c.dupe()); } } - Err(SharedError::new( + Err(buck2_error::Error::new( ToolchainDepError::ToolchainDepMissingPlatform(exec_cfg.dupe()), )) } @@ -152,18 +199,17 @@ pub async fn find_execution_platform_by_configuration( } } -#[derive(Default)] -pub struct ExecutionPlatformConstraints { - exec_deps: IndexSet, - toolchain_deps: IndexSet, - exec_compatible_with: Vec, +pub(crate) struct ExecutionPlatformConstraints { + exec_deps: Arc<[TargetLabel]>, + toolchain_deps: Arc<[TargetConfiguredTargetLabel]>, + exec_compatible_with: Arc<[ConfigurationSettingKey]>, } impl ExecutionPlatformConstraints { - pub fn new_constraints( - exec_deps: IndexSet, - toolchain_deps: IndexSet, - exec_compatible_with: Vec, + pub(crate) fn new_constraints( + exec_deps: Arc<[TargetLabel]>, + toolchain_deps: Arc<[TargetConfiguredTargetLabel]>, + exec_compatible_with: Arc<[ConfigurationSettingKey]>, ) -> Self { Self { exec_deps, @@ -173,12 +219,11 @@ impl ExecutionPlatformConstraints { } fn new( - node: &TargetNode, + node: TargetNodeRef, gathered_deps: &GatheredDeps, cfg_ctx: &(dyn AttrConfigurationContext + Sync), - ) -> SharedResult { - let mut exec_compatible_with = Vec::new(); - if let Some(a) = node.attr_or_none( + ) -> buck2_error::Result { + let exec_compatible_with: Arc<[_]> = if let Some(a) = node.attr_or_none( EXEC_COMPATIBLE_WITH_ATTRIBUTE_FIELD, AttrInspectOptions::All, ) { @@ -188,13 +233,16 @@ impl ExecutionPlatformConstraints { a.name ) })?; - for label in ConfiguredTargetNode::attr_as_target_compatible_with(configured_attr.value) - { - exec_compatible_with.push(label.with_context(|| { - format!("attribute `{}`", EXEC_COMPATIBLE_WITH_ATTRIBUTE_FIELD) - })?); - } - } + ConfiguredTargetNode::attr_as_target_compatible_with(configured_attr.value) + .map(|label| { + label.with_context(|| { + format!("attribute `{}`", EXEC_COMPATIBLE_WITH_ATTRIBUTE_FIELD) + }) + }) + .collect::>()? + } else { + Arc::new([]) + }; Ok(Self::new_constraints( gathered_deps @@ -205,7 +253,7 @@ impl ExecutionPlatformConstraints { gathered_deps .toolchain_deps .iter() - .map(|c| c.target().dupe()) + .map(|c| c.dupe()) .collect(), exec_compatible_with, )) @@ -213,71 +261,59 @@ impl ExecutionPlatformConstraints { async fn toolchain_allows( &self, - ctx: &DiceComputations, - ) -> SharedResult> { + ctx: &mut DiceComputations<'_>, + ) -> buck2_error::Result> { // We could merge these constraints together, but the time to do that // probably outweighs the benefits given there are likely to only be a few // execution platforms to test. let mut result = Vec::with_capacity(self.toolchain_deps.len()); - for x in &self.toolchain_deps { + for x in self.toolchain_deps.iter() { result.push(execution_platforms_for_toolchain(ctx, x.dupe()).await?) } - Ok(result) + Ok(result.into()) } async fn one( - &self, - ctx: &DiceComputations, - node: &TargetNode, - ) -> SharedResult { + self, + ctx: &mut DiceComputations<'_>, + node: TargetNodeRef<'_>, + ) -> buck2_error::Result { + let toolchain_allows = self.toolchain_allows(ctx).await?; ctx.resolve_execution_platform_from_constraints( node.label().pkg().cell_name(), - &self.exec_compatible_with, - &self.exec_deps, - &self.toolchain_allows(ctx).await?, + self.exec_compatible_with, + self.exec_deps, + toolchain_allows, ) .await } - pub async fn one_for_cell( - &self, - ctx: &DiceComputations, + pub(crate) async fn one_for_cell( + self, + ctx: &mut DiceComputations<'_>, cell: CellName, - ) -> SharedResult { + ) -> buck2_error::Result { + let toolchain_allows = self.toolchain_allows(ctx).await?; ctx.resolve_execution_platform_from_constraints( cell, - &self.exec_compatible_with, - &self.exec_deps, - &self.toolchain_allows(ctx).await?, - ) - .await - } - - async fn many( - &self, - ctx: &DiceComputations, - target: &ConfiguredTargetLabel, - ) -> SharedResult { - ctx.resolve_toolchain_constraints_from_constraints( - target, - &self.exec_compatible_with, - &self.exec_deps, - &self.toolchain_allows(ctx).await?, + self.exec_compatible_with, + self.exec_deps, + toolchain_allows, ) .await } } async fn execution_platforms_for_toolchain( - ctx: &DiceComputations, - target: ConfiguredTargetLabel, -) -> SharedResult { + ctx: &mut DiceComputations<'_>, + target: TargetConfiguredTargetLabel, +) -> buck2_error::Result { #[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] - struct ExecutionPlatformsForToolchainKey(ConfiguredTargetLabel); + struct ExecutionPlatformsForToolchainKey(TargetConfiguredTargetLabel); #[async_trait] impl Key for ExecutionPlatformsForToolchainKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, @@ -287,9 +323,9 @@ async fn execution_platforms_for_toolchain( if node.transition_deps().next().is_some() { // We could actually check this when defining the rule, but a bit of a corner // case, and much simpler to do so here. - return Err(SharedError::new(ToolchainDepError::ToolchainTransitionDep( - self.0.unconfigured().dupe(), - ))); + return Err(buck2_error::Error::new( + ToolchainDepError::ToolchainTransitionDep(self.0.unconfigured().dupe()), + )); } let resolved_configuration = &ctx .get_resolved_configuration( @@ -298,7 +334,7 @@ async fn execution_platforms_for_toolchain( node.get_configuration_deps(), ) .await?; - let platform_cfgs = compute_platform_cfgs(ctx, &node).await?; + let platform_cfgs = compute_platform_cfgs(ctx, node.as_ref()).await?; // We don't really need `resolved_transitions` here: // `Traversal` declared above ignores transitioned dependencies. // But we pass `resolved_transitions` here to prevent breakages in the future @@ -311,14 +347,20 @@ async fn execution_platforms_for_toolchain( &platform_cfgs, ); let (gathered_deps, errors_and_incompats) = - gather_deps(&self.0, &node, &cfg_ctx, ctx).await?; + gather_deps(&self.0, node.as_ref(), &cfg_ctx, ctx).await?; if let Some(ret) = errors_and_incompats.finalize() { // Statically assert that we hit one of the `?`s enum Void {} let _: Void = ret?.require_compatible()?; } - let constraints = ExecutionPlatformConstraints::new(&node, &gathered_deps, &cfg_ctx)?; - constraints.many(ctx, &self.0).await + let constraints = + ExecutionPlatformConstraints::new(node.as_ref(), &gathered_deps, &cfg_ctx)?; + let toolchain_allows = constraints.toolchain_allows(ctx).await?; + Ok(ToolchainConstraints::new( + &constraints.exec_deps, + &constraints.exec_compatible_with, + &toolchain_allows, + )) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -334,10 +376,10 @@ async fn execution_platforms_for_toolchain( } pub async fn get_execution_platform_toolchain_dep( - ctx: &DiceComputations, - target_label: &ConfiguredTargetLabel, - target_node: &TargetNode, -) -> SharedResult> { + ctx: &mut DiceComputations<'_>, + target_label: &TargetConfiguredTargetLabel, + target_node: TargetNodeRef<'_>, +) -> buck2_error::Result> { assert!(target_node.is_toolchain_rule()); let target_cfg = target_label.cfg(); let target_cell = target_node.label().pkg().cell_name(); @@ -349,9 +391,9 @@ pub async fn get_execution_platform_toolchain_dep( ) .await?; if target_node.transition_deps().next().is_some() { - Err(SharedError::new(ToolchainDepError::ToolchainTransitionDep( - target_label.unconfigured().dupe(), - ))) + Err(buck2_error::Error::new( + ToolchainDepError::ToolchainTransitionDep(target_label.unconfigured().dupe()), + )) } else { let platform_cfgs = compute_platform_cfgs(ctx, target_node).await?; let resolved_transitions = OrderedMap::new(); @@ -380,12 +422,12 @@ pub async fn get_execution_platform_toolchain_dep( } async fn resolve_execution_platform( - ctx: &DiceComputations, - node: &TargetNode, + ctx: &mut DiceComputations<'_>, + node: TargetNodeRef<'_>, resolved_configuration: &ResolvedConfiguration, gathered_deps: &GatheredDeps, cfg_ctx: &(dyn AttrConfigurationContext + Sync), -) -> SharedResult { +) -> buck2_error::Result { // If no execution platforms are configured, we fall back to the legacy execution // platform behavior. We currently only support legacy execution platforms. That behavior is that there is a // single executor config (the fallback config) and the execution platform is in the same @@ -404,7 +446,7 @@ async fn resolve_execution_platform( } fn unpack_target_compatible_with_attr( - target_node: &TargetNode, + target_node: TargetNodeRef, resolved_cfg: &ResolvedConfiguration, attr_name: &str, ) -> anyhow::Result> { @@ -419,21 +461,20 @@ fn unpack_target_compatible_with_attr( } impl<'c> AttrConfigurationContext for AttrConfigurationContextToResolveCompatibleWith<'c> { - fn matches<'a>(&'a self, label: &TargetLabel) -> Option<&'a ConfigSettingData> { - self.resolved_cfg - .setting_matches(ConfigurationSettingKeyRef(label)) + fn resolved_cfg_settings(&self) -> &ResolvedConfigurationSettings { + self.resolved_cfg.settings() } fn cfg(&self) -> ConfigurationNoExec { self.resolved_cfg.cfg().dupe() } - fn exec_cfg(&self) -> ConfigurationNoExec { - unreachable!( + fn exec_cfg(&self) -> anyhow::Result { + Err(internal_error_anyhow!( "exec_cfg() is not needed to resolve `{}` or `{}`", TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD, LEGACY_TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD - ) + )) } fn toolchain_cfg(&self) -> ConfigurationWithExec { @@ -448,12 +489,14 @@ fn unpack_target_compatible_with_attr( ) } - fn resolved_transitions(&self) -> &OrderedMap, Arc> { - unreachable!( + fn resolved_transitions( + &self, + ) -> anyhow::Result<&OrderedMap, Arc>> { + Err(internal_error_anyhow!( "resolved_transitions() is not needed to resolve `{}` or `{}`", TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD, LEGACY_TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD - ) + )) } } @@ -479,7 +522,7 @@ fn unpack_target_compatible_with_attr( fn check_compatible( target_label: &ConfiguredTargetLabel, - target_node: &TargetNode, + target_node: TargetNodeRef, resolved_cfg: &ResolvedConfiguration, ) -> anyhow::Result> { let target_compatible_with = unpack_target_compatible_with_attr( @@ -515,7 +558,7 @@ fn check_compatible( let mut right = Vec::new(); for label in ConfiguredTargetNode::attr_as_target_compatible_with(attr) { let label = label?; - match resolved_cfg.matches(&label) { + match resolved_cfg.settings().setting_matches(&label) { Some(_) => left.push(label), None => right.push(label), } @@ -557,7 +600,7 @@ fn check_compatible( Ok(MaybeCompatible::Incompatible(Arc::new( IncompatiblePlatformReason { target: target_label.dupe(), - cause: IncompatiblePlatformReasonCause::UnsatisfiedConfig(incompatible_target), + cause: IncompatiblePlatformReasonCause::UnsatisfiedConfig(incompatible_target.0), }, ))) } @@ -566,7 +609,7 @@ fn check_compatible( /// implement. Naively implementing this check on unconfigured nodes doesn't work because it results /// in dice cycles when there are cycles in the unconfigured graph. async fn check_plugin_deps( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target_label: &ConfiguredTargetLabel, plugin_deps: &PluginLists, ) -> anyhow::Result<()> { @@ -606,7 +649,7 @@ struct ErrorsAndIncompatibilities { impl ErrorsAndIncompatibilities { pub fn unpack_dep_into( &mut self, - target_label: &ConfiguredTargetLabel, + target_label: &TargetConfiguredTargetLabel, result: anyhow::Result>, check_visibility: CheckVisibility, list: &mut Vec, @@ -616,7 +659,7 @@ impl ErrorsAndIncompatibilities { fn unpack_dep( &mut self, - target_label: &ConfiguredTargetLabel, + target_label: &TargetConfiguredTargetLabel, result: anyhow::Result>, check_visibility: CheckVisibility, ) -> Option { @@ -626,7 +669,7 @@ impl ErrorsAndIncompatibilities { } Ok(MaybeCompatible::Incompatible(reason)) => { self.incompats.push(Arc::new(IncompatiblePlatformReason { - target: target_label.dupe(), + target: target_label.inner().dupe(), cause: IncompatiblePlatformReasonCause::Dependency(reason.dupe()), })); } @@ -639,11 +682,13 @@ impl ErrorsAndIncompatibilities { return Some(dep); } Ok(false) => { - self.errs - .push(anyhow::anyhow!(VisibilityError::NotVisibleTo( + self.errs.push( + VisibilityError::NotVisibleTo( dep.label().unconfigured().dupe(), target_label.unconfigured().dupe(), - ))); + ) + .into(), + ); } Err(e) => { self.errs.push(e); @@ -671,27 +716,27 @@ impl ErrorsAndIncompatibilities { struct GatheredDeps { deps: Vec, exec_deps: SmallMap, - toolchain_deps: SmallSet, + toolchain_deps: SmallSet, plugin_lists: PluginLists, } async fn gather_deps( - target_label: &ConfiguredTargetLabel, - target_node: &TargetNode, + target_label: &TargetConfiguredTargetLabel, + target_node: TargetNodeRef<'_>, attr_cfg_ctx: &(dyn AttrConfigurationContext + Sync), - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result<(GatheredDeps, ErrorsAndIncompatibilities)> { #[derive(Default)] struct Traversal { deps: OrderedMap>, exec_deps: SmallMap, - toolchain_deps: SmallSet, + toolchain_deps: SmallSet, plugin_lists: PluginLists, } impl ConfiguredAttrTraversal for Traversal { fn dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - self.deps.entry(dep.clone()).or_insert_with(SmallSet::new); + self.deps.entry(dep.dupe()).or_insert_with(SmallSet::new); Ok(()) } @@ -701,19 +746,22 @@ async fn gather_deps( plugin_kinds: &PluginKindSet, ) -> anyhow::Result<()> { self.deps - .entry(dep.clone()) + .entry(dep.dupe()) .or_insert_with(SmallSet::new) .insert(plugin_kinds.dupe()); Ok(()) } fn exec_dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - self.exec_deps.insert(dep.clone(), CheckVisibility::Yes); + self.exec_deps.insert(dep.dupe(), CheckVisibility::Yes); Ok(()) } fn toolchain_dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - self.toolchain_deps.insert(dep.clone()); + self.toolchain_deps + .insert(TargetConfiguredTargetLabel::new_without_exec_cfg( + dep.target().dupe(), + )); Ok(()) } @@ -730,13 +778,11 @@ async fn gather_deps( configured_attr.traverse(target_node.label().pkg(), &mut traversal)?; } - let dep_futures = traversal - .deps - .iter() - .map(|v| ctx.get_configured_target_node(v.0.target())); - let dep_results = - ConfiguredGraphCycleDescriptor::guard_this(ctx, futures::future::join_all(dep_futures)) - .await??; + let dep_results = ctx + .compute_join(traversal.deps.iter(), |ctx, v| { + async move { ctx.get_internal_configured_target_node(v.0.target()).await }.boxed() + }) + .await; let mut plugin_lists = traversal.plugin_lists; let mut deps = Vec::new(); @@ -777,7 +823,9 @@ async fn gather_deps( for plugin_label in plugin_lists.iter_for_kind(kind).map(|(target, _)| { attr_cfg_ctx.configure_exec_target(&ProvidersLabel::default_for(target.dupe())) }) { - exec_deps.entry(plugin_label).or_insert(CheckVisibility::No); + exec_deps + .entry(plugin_label?) + .or_insert(CheckVisibility::No); } } @@ -792,12 +840,140 @@ async fn gather_deps( )) } +/// Resolves configured attributes of target node needed to compute transitions +async fn resolve_transition_attrs<'a>( + transitions: impl Iterator, + target_node: &'a TargetNode, + resolved_cfg: &ResolvedConfiguration, + platform_cfgs: &OrderedMap, + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result>> { + struct AttrConfigurationContextToResolveTransitionAttrs<'c> { + resolved_cfg: &'c ResolvedConfiguration, + toolchain_cfg: ConfigurationWithExec, + platform_cfgs: &'c OrderedMap, + } + + impl<'c> AttrConfigurationContext for AttrConfigurationContextToResolveTransitionAttrs<'c> { + fn resolved_cfg_settings(&self) -> &ResolvedConfigurationSettings { + self.resolved_cfg.settings() + } + + fn cfg(&self) -> ConfigurationNoExec { + self.resolved_cfg.cfg().dupe() + } + + fn exec_cfg(&self) -> anyhow::Result { + Err(internal_error_anyhow!( + "exec_cfg() is not needed in pre transition attribute resolution." + )) + } + + fn toolchain_cfg(&self) -> ConfigurationWithExec { + self.toolchain_cfg.dupe() + } + + fn platform_cfg(&self, label: &TargetLabel) -> anyhow::Result { + match self.platform_cfgs.get(label) { + Some(configuration) => Ok(configuration.dupe()), + None => Err(PlatformConfigurationError::UnknownPlatformTarget(label.dupe()).into()), + } + } + + fn resolved_transitions( + &self, + ) -> anyhow::Result<&OrderedMap, Arc>> { + Err(internal_error_anyhow!( + "resolved_transitions() can't be used before transition execution." + )) + } + } + + let cfg_ctx = AttrConfigurationContextToResolveTransitionAttrs { + resolved_cfg, + platform_cfgs, + toolchain_cfg: resolved_cfg + .cfg() + .make_toolchain(&ConfigurationNoExec::unbound_exec()), + }; + let mut result = OrderedMap::default(); + for tr in transitions { + let attrs = TRANSITION_ATTRS_PROVIDER + .get()? + .transition_attrs(ctx, &tr) + .await?; + if let Some(attrs) = attrs { + for attr in attrs.as_ref() { + // Multiple outgoing transitions may refer the same attribute. + if result.contains_key(attr.as_str()) { + continue; + } + + if let Some(coerced_attr) = target_node.attr(&attr, AttrInspectOptions::All)? { + let configured_attr = coerced_attr.configure(&cfg_ctx)?; + if let Some(old_val) = + result.insert(configured_attr.name, Arc::new(configured_attr.value)) + { + return Err(internal_error_anyhow!( + "Found duplicated value `{}` for attr `{}` on target `{}`", + &old_val.as_display_no_ctx(), + attr, + target_node.label() + )); + } + } + } + } + } + Ok(result) +} + +/// Verifies if configured node's attributes are equal to the same attributes configured with pre-transition configuration. +/// Only check attributes used in transition. +fn verify_transitioned_attrs<'a>( + // Attributes resolved with pre-transition configuration + pre_transition_attrs: &OrderedMap<&'a str, Arc>, + pre_transition_config: &ConfigurationData, + node: &ConfiguredTargetNode, +) -> anyhow::Result<()> { + for (attr, attr_value) in pre_transition_attrs { + let transition_configured_attr = node + .get(attr, AttrInspectOptions::All) + .with_internal_error_anyhow(|| { + format!( + "Attr {} was not found in transition for target {} ({})", + attr, + node.label(), + node.attrs(AttrInspectOptions::All) + .format_with(", ", |v, f| f(&format_args!("{:?}", v))) + ) + })?; + if &transition_configured_attr.value != attr_value.as_ref() { + return Err(NodeCalculationError::TransitionAttrIncompatibleChange( + node.label().unconfigured().dupe(), + pre_transition_config.dupe(), + node.label().cfg().dupe(), + attr.to_string(), + attr_value.as_display_no_ctx().to_string(), + transition_configured_attr + .value + .as_display_no_ctx() + .to_string(), + ) + .into()); + } + } + Ok(()) +} + /// Compute configured target node ignoring transition for this node. async fn compute_configured_target_node_no_transition( target_label: &ConfiguredTargetLabel, target_node: TargetNode, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result> { + let partial_target_label = + &TargetConfiguredTargetLabel::new_without_exec_cfg(target_label.dupe()); let target_cfg = target_label.cfg(); let target_cell = target_node.label().pkg().cell_name(); let resolved_configuration = ctx @@ -811,22 +987,31 @@ async fn compute_configured_target_node_no_transition( // Must check for compatibility before evaluating non-compatibility attributes. if let MaybeCompatible::Incompatible(reason) = - check_compatible(target_label, &target_node, &resolved_configuration)? + check_compatible(target_label, target_node.as_ref(), &resolved_configuration)? { return Ok(MaybeCompatible::Incompatible(reason)); } + let platform_cfgs = compute_platform_cfgs(ctx, target_node.as_ref()).await?; + let mut resolved_transitions = OrderedMap::new(); + let attrs = resolve_transition_attrs( + target_node.transition_deps().map(|(_, tr)| tr.as_ref()), + &target_node, + &resolved_configuration, + &platform_cfgs, + ctx, + ) + .boxed() + .await?; for (_dep, tr) in target_node.transition_deps() { let resolved_cfg = TRANSITION_CALCULATION .get()? - .apply_transition(ctx, &target_node, target_cfg, tr) + .apply_transition(ctx, &attrs, target_cfg, tr) .await?; resolved_transitions.insert(tr.dupe(), resolved_cfg); } - let platform_cfgs = compute_platform_cfgs(ctx, &target_node).await?; - // We need to collect deps and to ensure that all attrs can be successfully // configured so that we don't need to support propagate configuration errors on attr access. let attr_cfg_ctx = AttrConfigurationContextImpl::new( @@ -838,10 +1023,18 @@ async fn compute_configured_target_node_no_transition( &resolved_transitions, &platform_cfgs, ); - let (gathered_deps, mut errors_and_incompats) = - gather_deps(target_label, &target_node, &attr_cfg_ctx, ctx).await?; + let (gathered_deps, mut errors_and_incompats) = gather_deps( + partial_target_label, + target_node.as_ref(), + &attr_cfg_ctx, + ctx, + ) + .boxed() + .await?; - check_plugin_deps(ctx, target_label, &gathered_deps.plugin_lists).await?; + check_plugin_deps(ctx, target_label, &gathered_deps.plugin_lists) + .boxed() + .await?; let execution_platform_resolution = if target_cfg.is_unbound() { // The unbound configuration is used when evaluation configuration nodes. @@ -867,52 +1060,84 @@ async fn compute_configured_target_node_no_transition( } else { resolve_execution_platform( ctx, - &target_node, + target_node.as_ref(), &resolved_configuration, &gathered_deps, &attr_cfg_ctx, ) + .boxed() .await? }; let execution_platform = execution_platform_resolution.cfg(); // We now need to replace the dummy exec config we used above with the real one - let toolchain_dep_futures = gathered_deps - .toolchain_deps - .iter() - .map(|v| v.target().map_exec_cfg(execution_platform.cfg())) - .map(|v| async move { ctx.get_configured_target_node(&v).await }); - - let exec_dep_futures = gathered_deps - .exec_deps - .iter() - .map(|(v, check_visibility)| { - ( - v.target() - .unconfigured() - .configure_pair(execution_platform.cfg_pair().dupe()), - check_visibility, + + let execution_platform = &execution_platform; + let toolchain_deps = &gathered_deps.toolchain_deps; + let exec_deps = &gathered_deps.exec_deps; + + let get_toolchain_deps = DiceComputations::declare_closure(move |ctx| { + async move { + ctx.compute_join( + toolchain_deps, + |ctx, target: &TargetConfiguredTargetLabel| { + async move { + ctx.get_internal_configured_target_node( + &target.with_exec_cfg(execution_platform.cfg().dupe()), + ) + .await + } + .boxed() + }, ) - }) - .map(|(v, check_visibility)| async move { - (ctx.get_configured_target_node(&v).await, check_visibility) - }); + .await + } + .boxed() + }); + + let get_exec_deps = DiceComputations::declare_closure(|ctx| { + async move { + ctx.compute_join(exec_deps, |ctx, (target, check_visibility)| { + async move { + ( + ctx.get_internal_configured_target_node( + &target + .target() + .unconfigured() + .configure_pair(execution_platform.cfg_pair().dupe()), + ) + .await, + *check_visibility, + ) + } + .boxed() + }) + .await + } + .boxed() + }); - let fut = futures::future::join( - futures::future::join_all(toolchain_dep_futures), - futures::future::join_all(exec_dep_futures), - ); let (toolchain_dep_results, exec_dep_results): (Vec<_>, Vec<_>) = - ConfiguredGraphCycleDescriptor::guard_this(ctx, fut).await??; + ctx.compute2(get_toolchain_deps, get_exec_deps).await; let mut deps = gathered_deps.deps; let mut exec_deps = Vec::with_capacity(gathered_deps.exec_deps.len()); for dep in toolchain_dep_results { - errors_and_incompats.unpack_dep_into(target_label, dep, CheckVisibility::Yes, &mut deps); + errors_and_incompats.unpack_dep_into( + partial_target_label, + dep, + CheckVisibility::Yes, + &mut deps, + ); } for (dep, check_visibility) in exec_dep_results { - errors_and_incompats.unpack_dep_into(target_label, dep, *check_visibility, &mut exec_deps); + errors_and_incompats.unpack_dep_into( + partial_target_label, + dep, + check_visibility, + &mut exec_deps, + ); } if let Some(ret) = errors_and_incompats.finalize() { @@ -932,37 +1157,9 @@ async fn compute_configured_target_node_no_transition( ))) } -/// Compute configured target node after transition is applied to the target. -/// -/// This function creates two node: transitioned node and a forward node. -/// Forward node is returned. -async fn compute_configured_target_node_with_transition( - key: &ConfiguredTransitionedNodeKey, - ctx: &DiceComputations, -) -> anyhow::Result> { - assert_eq!( - key.forward.unconfigured(), - key.transitioned.unconfigured(), - "Transition can be done only to the nodes with different configuration; \ - this valid case was ruled out earlier" - ); - assert_ne!( - key.forward, key.transitioned, - "Transition can only happen to a node with the same unconfigured target" - ); - - let target_node = ctx.get_target_node(key.transitioned.unconfigured()).await?; - let transitioned_node = - compute_configured_target_node_no_transition(&key.transitioned, target_node.dupe(), ctx) - .await?; - transitioned_node.try_map(|transitioned_node| { - ConfiguredTargetNode::new_forward(key.forward.dupe(), transitioned_node) - }) -} - async fn compute_configured_target_node( key: &ConfiguredTargetNodeKey, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result> { let target_node = ctx .get_target_node(key.0.unconfigured()) @@ -990,103 +1187,348 @@ async fn compute_configured_target_node( _ => {} } - if let Some(transition_id) = &target_node.0.rule.cfg { - #[async_trait] - impl Key for ConfiguredTransitionedNodeKey { - type Value = SharedResult>; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> SharedResult> { - compute_configured_target_node_with_transition(self, ctx) - .await - .shared_error() - } + if let Some(transition_id) = &target_node.rule.cfg { + compute_configured_forward_target_node(key, &target_node, transition_id, ctx).await + } else { + // We are not caching `ConfiguredTransitionedNodeKey` because this is cheap, + // and no need to fetch `target_node` again. + compute_configured_target_node_no_transition(&key.0.dupe(), target_node, ctx).await + } +} - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - if let (Ok(x), Ok(y)) = (x, y) { - x == y - } else { - false - } - } - } +async fn compute_configured_forward_target_node( + key: &ConfiguredTargetNodeKey, + target_node: &TargetNode, + transition_id: &TransitionId, + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result> { + let target_label_before_transition = &key.0; + let platform_cfgs = compute_platform_cfgs(ctx, target_node.as_ref()) + .boxed() + .await?; + let resolved_configuration = ctx + .get_resolved_configuration( + target_label_before_transition.cfg(), + target_node.label().pkg().cell_name(), + target_node.get_configuration_deps(), + ) + .await + .with_context(|| { + format!( + "Error resolving configuration deps of `{}`", + target_label_before_transition + ) + })?; - let cfg = TRANSITION_CALCULATION - .get()? - .apply_transition(ctx, &target_node, key.0.cfg(), transition_id) + let attrs = resolve_transition_attrs( + iter::once(transition_id), + target_node, + &resolved_configuration, + &platform_cfgs, + ctx, + ) + .boxed() + .await?; + + let cfg = TRANSITION_CALCULATION + .get()? + .apply_transition( + ctx, + &attrs, + target_label_before_transition.cfg(), + transition_id, + ) + .await?; + let target_label_after_transition = target_label_before_transition + .unconfigured() + .configure(cfg.single()?.dupe()); + + if &target_label_after_transition == target_label_before_transition { + // Transitioned to identical configured target, no need to create a forward node. + compute_configured_target_node_no_transition( + target_label_before_transition, + target_node.dupe(), + ctx, + ) + .boxed() + .await + } else { + // This must call through dice to get the configured target node so that it is the correct + // instance (because ConfiguredTargetNode uses reference equality on its deps). + // This also helps further verify idempotence (as we will get the real result with the any transition applied again). + let transitioned_node = ctx + .get_internal_configured_target_node(&target_label_after_transition) .await?; - let configured_target_label = key.0.unconfigured().configure(cfg.single()?.dupe()); - if configured_target_label == key.0 { - // Transitioned to identical configured target, no need to create a forward node. - compute_configured_target_node_no_transition(&key.0, target_node.dupe(), ctx).await - } else { - Ok(ctx - .compute(&ConfiguredTransitionedNodeKey { - forward: key.0.dupe(), - transitioned: configured_target_label, - }) - .await??) + // In apply_transition() above we've checked that the transition is idempotent when applied again with the same attrs (but the + // transitioned cfg) we don't know if it causes an attr change (and then a subsequent change in the transition + // result). We verify that here. If we're in a case where it is changing the attr in a way that causes the transition + // to introduce a cycle, we depend on the dice cycle detection to identify it. Alternatively we could directly recompute + // the node and check the attrs, but we'd still need to request the real node from dice and it doesn't seem worth + // that extra cost just for a slightly improved error message. + if let MaybeCompatible::Compatible(node) = &transitioned_node { + // check that the attrs weren't changed first. This should be the only way that we can hit non-idempotence + // here and gives a better error than if we just give the general idempotence error. + verify_transitioned_attrs(&attrs, resolved_configuration.cfg().cfg(), node)?; + + if let Some(forward) = node.forward_target() { + return Err(NodeCalculationError::TransitionNotIdempotent( + target_label_before_transition.unconfigured().dupe(), + target_label_before_transition.cfg().dupe(), + target_label_after_transition.cfg().dupe(), + forward.label().cfg().dupe(), + )) + .internal_error_anyhow("idempotence should have been enforced by transition idempotence and attr change checks"); + } } - } else { - // We are not caching `ConfiguredTransitionedNodeKey` because this is cheap, - // and no need to fetch `target_node` again. - compute_configured_target_node_no_transition(&key.0.dupe(), target_node, ctx).await + + let configured_target_node = transitioned_node.try_map(|transitioned_node| { + ConfiguredTargetNode::new_forward( + target_label_before_transition.dupe(), + transitioned_node, + ) + })?; + + Ok(configured_target_node) } } #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] pub struct ConfiguredTargetNodeKey(pub ConfiguredTargetLabel); -/// Similar to [`ConfiguredTargetNodeKey`], but used when the target -/// is transitioned to different configuration because rule definition requires it. -#[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "ConfiguredTransitionedNodeKey({}, {})", forward, transitioned)] -pub struct ConfiguredTransitionedNodeKey { - /// Forward node label. - forward: ConfiguredTargetLabel, - /// Transitional node label. - transitioned: ConfiguredTargetLabel, -} - struct ConfiguredTargetNodeCalculationInstance; pub(crate) fn init_configured_target_node_calculation() { CONFIGURED_TARGET_NODE_CALCULATION.init(&ConfiguredTargetNodeCalculationInstance); } +#[derive(Debug, Allocative, Eq, PartialEq)] +struct LookingUpConfiguredNodeContext { + target: ConfiguredTargetLabel, + len: usize, + rest: Option>, +} + +impl buck2_error::TypedContext for LookingUpConfiguredNodeContext { + fn eq(&self, other: &dyn buck2_error::TypedContext) -> bool { + match (other as &dyn std::any::Any).downcast_ref::() { + Some(v) => self == v, + None => false, + } + } +} + +impl LookingUpConfiguredNodeContext { + fn new(target: ConfiguredTargetLabel, parent: Option>) -> Self { + let (len, rest) = match parent { + Some(v) => (v.len + 1, Some(v.clone())), + None => (1, None), + }; + Self { target, len, rest } + } + + fn add_context(res: anyhow::Result, target: ConfiguredTargetLabel) -> anyhow::Result { + res.compute_context( + |parent_ctx: Arc| Self::new(target.dupe(), Some(parent_ctx)), + || Self::new(target.dupe(), None), + ) + } +} + +impl std::fmt::Display for LookingUpConfiguredNodeContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.len == 1 { + write!(f, "Error looking up configured node {}", &self.target)?; + } else { + writeln!( + f, + "Error in configured node dependency, dependency chain follows (-> indicates depends on, ^ indicates same configuration as previous):" + )?; + + let mut curr = self; + let mut prev_cfg = None; + let mut is_first = true; + + loop { + f.write_str(" ")?; + if is_first { + f.write_str(" ")?; + } else { + f.write_str("-> ")?; + } + + write!(f, "{}", curr.target.unconfigured())?; + let cfg = Some(curr.target.cfg()); + f.write_str(" (")?; + if cfg == prev_cfg { + f.write_str("^")?; + } else { + std::fmt::Display::fmt(curr.target.cfg(), f)?; + } + f.write_str(")\n")?; + is_first = false; + prev_cfg = Some(curr.target.cfg()); + match &curr.rest { + Some(v) => curr = &**v, + None => break, + } + } + } + Ok(()) + } +} + +#[async_trait] +impl Key for ConfiguredTargetNodeKey { + type Value = buck2_error::Result>; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let res = CycleGuard::::new(ctx)? + .guard_this(compute_configured_target_node(self, ctx)) + .await + .into_result(ctx) + .await??; + Ok(LookingUpConfiguredNodeContext::add_context( + res, + self.0.dupe(), + )?) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } + + fn provide<'a>(&'a self, demand: &mut Demand<'a>) { + demand.provide_value_with(|| BuildSignalsNodeKey::new(self.dupe())) + } +} + +impl BuildSignalsNodeKeyImpl for ConfiguredTargetNodeKey {} + #[async_trait] impl ConfiguredTargetNodeCalculationImpl for ConfiguredTargetNodeCalculationInstance { async fn get_configured_target_node( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &ConfiguredTargetLabel, + check_dependency_incompatibility: bool, ) -> anyhow::Result> { - #[async_trait] - impl Key for ConfiguredTargetNodeKey { - type Value = SharedResult>; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - let res = compute_configured_target_node(self, ctx).await; - Ok(res.with_context(|| format!("Error looking up configured node {}", self.0))?) + let maybe_compatible_node = ctx + .compute(&ConfiguredTargetNodeKey(target.dupe())) + .await??; + if check_dependency_incompatibility { + if let MaybeCompatible::Incompatible(reason) = &maybe_compatible_node { + if matches!( + &reason.cause, + &IncompatiblePlatformReasonCause::Dependency(_) + ) { + if check_error_on_incompatible_dep(ctx, target.unconfigured_label()).await? { + return Err(reason.to_err()); + } else { + soft_error!( + "dep_only_incompatible_version_two", reason.to_err().into(), + quiet: false, + // Log at least one sample per unique package. + low_cardinality_key_for_additional_logview_samples: Some(Box::new(target.unconfigured().pkg())) + )?; + } + } } + } + Ok(maybe_compatible_node) + } +} - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } +async fn check_error_on_incompatible_dep( + ctx: &mut DiceComputations<'_>, + target_label: &TargetLabel, +) -> anyhow::Result { + #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] + struct ErrorOnIncompatibleDepKey; + + #[async_trait] + impl Key for ErrorOnIncompatibleDepKey { + type Value = buck2_error::Result>>>; + + async fn compute( + &self, + mut ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let cell_resolver = ctx.get_cell_resolver().await?; + let root_cell = cell_resolver.root_cell(); + let alias_resolver = ctx.get_cell_alias_resolver(root_cell).await?; + let root_conf = ctx.get_legacy_root_config_on_dice().await?; + let patterns: Vec = root_conf + .view(&mut ctx) + .parse_list(BuckconfigKeyRef { + section: "buck2", + property: "error_on_dep_only_incompatible", + })? + .unwrap_or_default(); + + let mut result = Vec::new(); + for pattern in patterns { + result.push(ParsedPattern::parse_precise( + pattern.trim(), + root_cell, + &cell_resolver, + &alias_resolver, + )?); } + Ok(result.into()) } - ctx.compute(&ConfiguredTargetNodeKey(target.dupe())) - .await? - .unshared_error() + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } } + + let patterns = ctx.compute(&ErrorOnIncompatibleDepKey).await??; + for pattern in patterns.iter() { + if pattern.matches(target_label) { + return Ok(true); + } + } + + Ok(false) +} + +#[allow(unused)] +fn _assert_compute_configured_target_node_no_transition_size() { + const fn sz(_: &F) -> usize + where + F: FnOnce(T1, T2, T3) -> R, + { + std::mem::size_of::() + } + + const _: () = assert!( + sz(&compute_configured_target_node_no_transition) <= 700, + "compute_configured_target_node_no_transition size is larger than 700 bytes", + ); +} + +#[allow(unused)] +fn _assert_compute_configured_forward_target_node_size() { + const fn sz(_: &F) -> usize + where + F: FnOnce(T1, T2, T3, T4) -> R, + { + std::mem::size_of::() + } + + const _: () = assert!( + sz(&compute_configured_forward_target_node) <= 700, + "compute_configured_forward_target_node size is larger than 700 bytes", + ); } diff --git a/app/buck2_core/BUCK b/app/buck2_core/BUCK index fb91629a4f1fc..ed451c53c88cd 100644 --- a/app/buck2_core/BUCK +++ b/app/buck2_core/BUCK @@ -1,6 +1,5 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") load("@fbcode_macros//build_defs:rust_unittest.bzl", "rust_unittest") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -9,7 +8,17 @@ rust_library( srcs = glob( ["src/**/*.rs"], ) + ["src/pattern/target_pattern.md"], - doctests = False, # FIXME + doc_deps = [ + "fbsource//third-party/rust:maplit", + ], + doctests = select({ + "DEFAULT": True, + # ``` + # The command line is too long. + # Couldn't compile the test. + # ``` + "ovr_config//os:windows": False, + }), os_deps = [ ( "linux", @@ -27,31 +36,30 @@ rust_library( "windows", [ "fbsource//third-party/rust:common-path", + "fbsource//third-party/rust:winapi", ], ), ], test_deps = [ "fbsource//third-party/rust:assert_matches", - "fbsource//third-party/rust:maplit", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:test-case", ], deps = [ - "fbsource//third-party/blake3:blake3-rust", "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:arc-swap", + "fbsource//third-party/rust:blake3", "fbsource//third-party/rust:compact_str", - "fbsource//third-party/rust:dashmap", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:dunce", - "fbsource//third-party/rust:either", "fbsource//third-party/rust:equivalent", - "fbsource//third-party/rust:fnv", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:hostname", "fbsource//third-party/rust:indent_write", "fbsource//third-party/rust:itertools", + "fbsource//third-party/rust:libc", + "fbsource//third-party/rust:linkme", "fbsource//third-party/rust:memchr", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:os_str_bytes", @@ -65,20 +73,20 @@ rust_library( "fbsource//third-party/rust:smallvec", "fbsource//third-party/rust:static_assertions", "fbsource//third-party/rust:tempfile", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tracing", "fbsource//third-party/rust:tracing-subscriber", "fbsource//third-party/rust:triomphe", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_util:buck2_util", "//buck2/gazebo/cmp_any:cmp_any", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/internment_tweaks:internment_tweaks", + "//buck2/shed/lock_free_hashtable:lock_free_hashtable", + "//buck2/shed/static_interner:static_interner", "//buck2/starlark-rust/starlark_map:starlark_map", - "//common/rust/shed/sorted_vector_map:sorted_vector_map", ], ) @@ -90,5 +98,6 @@ rust_unittest( deps = [ "fbsource//third-party/rust:anyhow", ":buck2_core", + "//buck2/app/buck2_error:buck2_error", ], ) diff --git a/app/buck2_core/Cargo.toml b/app/buck2_core/Cargo.toml index dce72050411a1..ae02d51f69eb3 100644 --- a/app/buck2_core/Cargo.toml +++ b/app/buck2_core/Cargo.toml @@ -1,70 +1,66 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_core" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } -async-trait = { workspace = true } arc-swap = { workspace = true } blake3 = { workspace = true } compact_str = { workspace = true } -tempfile = { workspace = true } -derive_more = { workspace = true } derivative = { workspace = true } +derive_more = { workspace = true } dunce = { workspace = true } equivalent = { workspace = true } -fnv = { workspace = true } futures = { workspace = true } hostname = { workspace = true } indent_write = { workspace = true } itertools = { workspace = true } libc = { workspace = true } +linkme = { workspace = true } memchr = { workspace = true } once_cell = { workspace = true } os_str_bytes = { workspace = true } pin-project = { workspace = true } -relative-path = { workspace = true } +rand = { workspace = true } ref-cast = { workspace = true } +regex = { workspace = true } +relative-path = { workspace = true } sequence_trie = { workspace = true } serde = { workspace = true } +smallvec = { workspace = true } +starlark_map = { workspace = true } static_assertions = { workspace = true } -thiserror = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } -regex = { workspace = true } -dashmap = { workspace = true } -either = { workspace = true } -assert_matches = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } triomphe = { workspace = true } -rand = { workspace = true } -smallvec = { workspace = true } -starlark_map = { workspace = true } -gazebo = { workspace = true } -dupe = { workspace = true } -cmp_any = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -internment_tweaks = { path = "../../shed/internment_tweaks" } allocative = { workspace = true } -sorted_vector_map = { workspace = true } +cmp_any = { workspace = true } +dupe = { workspace = true } +gazebo = { workspace = true } +lock_free_hashtable = { workspace = true } +static_interner = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_util = { workspace = true } [target.'cfg(unix)'.dependencies] nix = { workspace = true } [target.'cfg(windows)'.dependencies] -common-path = { workspace = true} +common-path = { workspace = true } +winapi = { workspace = true } [dev-dependencies] -maplit = { workspace = true } -test-case = { workspace = true } +assert_matches = { workspace = true } serde_json = { workspace = true } +test-case = { workspace = true } -[features] -# @oss-disable: default = ["gazebo_lint"] +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_core/src/async_once_cell.rs b/app/buck2_core/src/async_once_cell.rs index 0fdba18875119..baa4e28674b6c 100644 --- a/app/buck2_core/src/async_once_cell.rs +++ b/app/buck2_core/src/async_once_cell.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use std::convert::Infallible; use std::sync::OnceLock; use allocative::Allocative; @@ -32,24 +33,37 @@ impl AsyncOnceCell { self.cell.get() } - pub async fn get_or_init>(&self, fut: F) -> &T { + pub async fn get_or_try_init>>( + &self, + fut: F, + ) -> Result<&T, E> { if let Some(val) = self.cell.get() { - return val; + return Ok(val); } let _guard = self.initialized.lock().await; if let Some(val) = self.cell.get() { - return val; + return Ok(val); } - let val = fut.await; + let val = fut.await?; match self.cell.set(val) { - Ok(()) => self.cell.get().unwrap(), + Ok(()) => Ok(self.cell.get().unwrap()), Err(_) => unreachable!(), } } + + pub async fn get_or_init>(&self, fut: F) -> &T { + match self + .get_or_try_init(async { Ok::<_, Infallible>(fut.await) }) + .await + { + Ok(val) => val, + Err(infallible) => match infallible {}, + } + } } #[cfg(test)] @@ -78,4 +92,46 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_get_or_try_init() -> anyhow::Result<()> { + let cell1 = AsyncOnceCell::new(); + + assert_eq!( + &43, + cell1 + .get_or_try_init(async { anyhow::Ok(43) }) + .await + .unwrap() + ); + assert_eq!( + &43, + cell1 + .get_or_try_init(async { anyhow::Ok(55) }) + .await + .unwrap() + ); + + let cell2 = AsyncOnceCell::new(); + cell2 + .get_or_try_init(async { Err(anyhow::anyhow!("foo")) }) + .await + .unwrap_err(); + assert_eq!( + &44, + cell2 + .get_or_try_init(async { anyhow::Ok(44) }) + .await + .unwrap() + ); + assert_eq!( + &44, + cell2 + .get_or_try_init(async { anyhow::Ok(56) }) + .await + .unwrap() + ); + + Ok(()) + } } diff --git a/app/buck2_core/src/base_deferred_key.rs b/app/buck2_core/src/base_deferred_key.rs index 9a7027f72fc85..177173213153b 100644 --- a/app/buck2_core/src/base_deferred_key.rs +++ b/app/buck2_core/src/base_deferred_key.rs @@ -9,6 +9,7 @@ use std::any::Any; use std::borrow::Cow; +use std::collections::hash_map::DefaultHasher; use std::fmt::Debug; use std::fmt::Display; use std::hash::Hash; @@ -46,11 +47,20 @@ pub trait BaseDeferredKeyDyn: Debug + Display + Any + Allocative + Send + Sync + fn execution_platform_resolution(&self) -> &ExecutionPlatformResolution; } +#[derive(Debug, derive_more::Display, Dupe, Clone, Allocative)] +pub struct BaseDeferredKeyBxl(pub Arc); + +impl PartialEq for BaseDeferredKeyBxl { + fn eq(&self, other: &Self) -> bool { + self.0.eq_token() == other.0.eq_token() + } +} + #[derive(Debug, derive_more::Display, Dupe, Clone, Allocative)] pub enum BaseDeferredKey { TargetLabel(ConfiguredTargetLabel), AnonTarget(Arc), - BxlLabel(Arc), + BxlLabel(BaseDeferredKeyBxl), } impl PartialEq for BaseDeferredKey { @@ -62,9 +72,7 @@ impl PartialEq for BaseDeferredKey { a.eq_token() == b.eq_token() } (BaseDeferredKey::AnonTarget(_), _) => false, - (BaseDeferredKey::BxlLabel(a), BaseDeferredKey::BxlLabel(b)) => { - a.eq_token() == b.eq_token() - } + (BaseDeferredKey::BxlLabel(a), BaseDeferredKey::BxlLabel(b)) => a == b, (BaseDeferredKey::BxlLabel(_), _) => false, } } @@ -76,7 +84,9 @@ impl Hash for BaseDeferredKey { fn hash(&self, state: &mut H) { match self { BaseDeferredKey::TargetLabel(a) => a.hash(state), - BaseDeferredKey::AnonTarget(d) | BaseDeferredKey::BxlLabel(d) => d.hash().hash(state), + BaseDeferredKey::AnonTarget(d) | BaseDeferredKey::BxlLabel(BaseDeferredKeyBxl(d)) => { + d.hash().hash(state) + } } } } @@ -89,12 +99,22 @@ impl BaseDeferredKey { } } + pub fn configured_label(&self) -> Option { + match self { + BaseDeferredKey::TargetLabel(label) => Some(label.dupe()), + BaseDeferredKey::AnonTarget(t) | BaseDeferredKey::BxlLabel(BaseDeferredKeyBxl(t)) => { + t.configured_label() + } + } + } + pub fn make_hashed_path( &self, base: &ProjectRelativePath, prefix: &ForwardRelativePath, action_key: Option<&str>, path: &ForwardRelativePath, + fully_hash_path: bool, ) -> ProjectRelativePathBuf { match self { BaseDeferredKey::TargetLabel(target) => { @@ -104,13 +124,7 @@ impl BaseDeferredKey { // It is performance critical that we use slices and allocate via `join` instead of // repeated calls to `join` on the path object because `join` allocates on each call, // which has a significant impact. - let parts = [ - base.as_str(), - "/", - prefix.as_str(), - "/", - target.pkg().cell_name().as_str(), - "/", + let path_identifier = [ target.cfg().output_hash().as_str(), if target.exec_cfg().is_some() { "-" } else { "" }, target @@ -135,12 +149,31 @@ impl BaseDeferredKey { }, action_key.unwrap_or_default(), if action_key.is_none() { "" } else { "__/" }, + ]; + + let path_or_hash = if fully_hash_path { + let mut hasher = DefaultHasher::new(); + path_identifier.hash(&mut hasher); + + format!("{:x}/", hasher.finish()) + } else { + path_identifier.concat() + }; + + let hashed_path = [ + base.as_str(), + "/", + prefix.as_str(), + "/", + target.pkg().cell_name().as_str(), + "/", + path_or_hash.as_str(), path.as_str(), ]; - ProjectRelativePathBuf::unchecked_new(parts.concat()) + ProjectRelativePathBuf::unchecked_new(hashed_path.concat()) } - BaseDeferredKey::AnonTarget(d) | BaseDeferredKey::BxlLabel(d) => { + BaseDeferredKey::AnonTarget(d) | BaseDeferredKey::BxlLabel(BaseDeferredKeyBxl(d)) => { d.make_hashed_path(base, prefix, action_key, path) } } @@ -170,7 +203,9 @@ impl BaseDeferredKey { pub fn to_proto(&self) -> BaseDeferredKeyProto { match self { BaseDeferredKey::TargetLabel(t) => BaseDeferredKeyProto::TargetLabel(t.as_proto()), - BaseDeferredKey::AnonTarget(d) | BaseDeferredKey::BxlLabel(d) => d.to_proto(), + BaseDeferredKey::AnonTarget(d) | BaseDeferredKey::BxlLabel(BaseDeferredKeyBxl(d)) => { + d.to_proto() + } } } } diff --git a/app/buck2_core/src/buck_path/mod.rs b/app/buck2_core/src/buck_path.rs similarity index 100% rename from app/buck2_core/src/buck_path/mod.rs rename to app/buck2_core/src/buck_path.rs diff --git a/app/buck2_core/src/buck_path/path.rs b/app/buck2_core/src/buck_path/path.rs deleted file mode 100644 index 9a5ad83ce5132..0000000000000 --- a/app/buck2_core/src/buck_path/path.rs +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocative::Allocative; -use buck2_util::arc_str::ArcS; -use derive_more::Display; -use dupe::Dupe; - -use crate::cells::cell_path::CellPath; -use crate::package::package_relative_path::PackageRelativePath; -use crate::package::PackageLabel; - -/// Represents a resolvable path corresponding to some path that is part of a -/// 'Package'. The 'BuckPath' refers to only paths in the repo source, not -/// outputs of a 'Package'. -#[derive( - Clone, - Debug, - derive_more::Display, - Hash, - Eq, - PartialEq, - Ord, - PartialOrd, - Allocative -)] -#[display(fmt = "{}", "self.as_ref()")] -pub struct BuckPath { - pkg: PackageLabel, - path: ArcS, -} - -impl BuckPath { - #[inline] - pub fn new(pkg: PackageLabel, path: ArcS) -> Self { - BuckPath { pkg, path } - } - - /// This is slow, but OK to use in tests. - pub fn testing_new(pkg: PackageLabel, path: impl AsRef) -> Self { - BuckPath::new(pkg, ArcS::from(path.as_ref())) - } - - #[inline] - pub fn package(&self) -> PackageLabel { - self.pkg.dupe() - } - - #[inline] - pub fn path(&self) -> &PackageRelativePath { - &self.path - } - - #[inline] - pub fn to_cell_path(&self) -> CellPath { - self.as_ref().to_cell_path() - } - - #[inline] - pub fn as_ref(&self) -> BuckPathRef { - BuckPathRef { - pkg: self.pkg.dupe(), - path: &self.path, - } - } -} - -#[derive(Display, Debug, Eq, Hash, PartialEq, Clone, Dupe)] -#[display(fmt = "{}/{}", pkg, "path.as_str()")] -pub struct BuckPathRef<'a> { - pkg: PackageLabel, - path: &'a ArcS, -} - -impl<'a> BuckPathRef<'a> { - #[inline] - pub fn new(pkg: PackageLabel, path: &'a ArcS) -> BuckPathRef<'a> { - BuckPathRef { pkg, path } - } - - #[inline] - pub fn package(&self) -> PackageLabel { - self.pkg.dupe() - } - - #[inline] - pub fn path(&self) -> &PackageRelativePath { - self.path - } - - #[inline] - pub fn to_cell_path(&self) -> CellPath { - self.pkg - .as_cell_path() - .join(self.path.as_forward_rel_path()) - } - - #[inline] - pub fn to_buck_path(&self) -> BuckPath { - BuckPath { - pkg: self.pkg.dupe(), - path: self.path.dupe(), - } - } -} diff --git a/app/buck2_core/src/build_file_path.rs b/app/buck2_core/src/build_file_path.rs index d77b01bb6029c..96db0424ab810 100644 --- a/app/buck2_core/src/build_file_path.rs +++ b/app/buck2_core/src/build_file_path.rs @@ -19,7 +19,7 @@ use crate::package::PackageLabel; /// Path of a build file (e.g. `BUCK`) only. (`bzl` files are not included). #[derive(Clone, Hash, Eq, PartialEq, Debug, derive_more::Display, Allocative)] -#[display(fmt = "{}:{}", package, filename)] +#[display("{}:{}", package, filename)] pub struct BuildFilePath { /// The package of this build file package: PackageLabel, diff --git a/app/buck2_core/src/bzl.rs b/app/buck2_core/src/bzl.rs index 23b8e601de745..3bd2918922052 100644 --- a/app/buck2_core/src/bzl.rs +++ b/app/buck2_core/src/bzl.rs @@ -19,7 +19,8 @@ use crate::cells::name::CellName; use crate::cells::paths::CellRelativePath; use crate::fs::paths::file_name::FileName; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum ImportPathError { #[error("Invalid import path `{0}`")] Invalid(CellPath), diff --git a/app/buck2_core/src/category.rs b/app/buck2_core/src/category.rs index 7629d2199e158..e720cbbc897fe 100644 --- a/app/buck2_core/src/category.rs +++ b/app/buck2_core/src/category.rs @@ -14,55 +14,61 @@ //! category of all actions that invoke a C++ compiler, of which there are potentially many in a single C++ rule //! implementation. -use std::fmt; - use allocative::Allocative; +use dupe::Dupe; use once_cell::sync::Lazy; use regex::Regex; -use thiserror::Error; /// A category, representing a family of actions. -#[derive(Clone, Debug, PartialEq, Eq, Hash, Allocative)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Allocative, derive_more::Display)] pub struct Category(String); +#[derive(Debug, Clone, Copy, Dupe, PartialEq, Eq, Hash, derive_more::Display)] +pub struct CategoryRef<'a>(&'a str); + impl Category { + pub fn new(s: String) -> anyhow::Result { + CategoryRef::new(&s)?; + Ok(Category(s)) + } + /// Returns a string representation of this category. pub fn as_str(&self) -> &str { self.0.as_str() } + + pub fn as_ref(&self) -> CategoryRef<'_> { + CategoryRef(self.0.as_str()) + } } -impl TryFrom for Category { - type Error = CategoryParseError; +impl<'a> CategoryRef<'a> { + pub fn unchecked_new(s: &'static str) -> Self { + CategoryRef(s) + } + + pub fn as_str(self) -> &'a str { + self.0 + } - fn try_from(value: String) -> Result { + pub fn new(s: &'a str) -> anyhow::Result { static CATEGORY_REGEX: Lazy = Lazy::new(|| Regex::new("^[a-z][a-z0-9]*(_[a-z][a-z0-9]*)*$").unwrap()); - if !CATEGORY_REGEX.is_match(&value) { - Err(CategoryParseError::NotSnakeCase(value)) + if !CATEGORY_REGEX.is_match(s) { + Err(CategoryParseError::NotSnakeCase(s.to_owned()).into()) } else { - Ok(Category(value)) + Ok(CategoryRef(s)) } } -} - -impl TryFrom<&str> for Category { - type Error = CategoryParseError; - - fn try_from(value: &str) -> Result { - Category::try_from(value.to_owned()) - } -} -impl fmt::Display for Category { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) + pub fn to_owned(self) -> Category { + Category(self.0.to_owned()) } } -#[derive(Debug, Error)] -pub enum CategoryParseError { +#[derive(Debug, buck2_error::Error)] +enum CategoryParseError { #[error( "Invalid category `{0}`. Must be a snake_cased identifier consisting of lowercase alphanumeric characters, e.g. `cxx_compile`. Each section of the snake_cased identifier must begin with a lowercase letter (not a number)." )] @@ -71,21 +77,21 @@ pub enum CategoryParseError { #[cfg(test)] mod tests { - use super::Category; + use super::CategoryRef; #[test] fn valid_categories() { - Category::try_from("valid_category").unwrap(); - Category::try_from("valid_category_with_numbers10").unwrap(); - Category::try_from("singleword").unwrap(); + CategoryRef::new("valid_category").unwrap(); + CategoryRef::new("valid_category_with_numbers10").unwrap(); + CategoryRef::new("singleword").unwrap(); } #[test] fn invalid_categories() { - Category::try_from("_leading_underscore").unwrap_err(); - Category::try_from("NotSnakeCase").unwrap_err(); - Category::try_from("Not_Snake_Case").unwrap_err(); - Category::try_from("contains_4_number").unwrap_err(); - Category::try_from("trailing_underscore_").unwrap_err(); + CategoryRef::new("_leading_underscore").unwrap_err(); + CategoryRef::new("NotSnakeCase").unwrap_err(); + CategoryRef::new("Not_Snake_Case").unwrap_err(); + CategoryRef::new("contains_4_number").unwrap_err(); + CategoryRef::new("trailing_underscore_").unwrap_err(); } } diff --git a/app/buck2_core/src/cells.rs b/app/buck2_core/src/cells.rs new file mode 100644 index 0000000000000..ac3a7701d37f5 --- /dev/null +++ b/app/buck2_core/src/cells.rs @@ -0,0 +1,667 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! # Cell +//! A 'Cell' is sub-project within the main project for Buck. All files +//! reachable by Buck is belongs to a single Cell. +//! Cells can be sub-directories of other cells, but that makes that +//! sub-directory part of the sub-cell and no longer part of the parent cell. +//! For example, let's say there's cells 'parent-cell' and 'sub-cell' declared +//! in folders of the same names. +//! ```text +//! parent-cell +//! +-- folder1 +//! +-- folder2 +//! +-- sub-cell +//! | +-- folder3 +//! ``` +//! All files part of `folder1` and `folder2` will be part of 'parent-cell'. +//! Anything part of `sub-cell`, including `folder3`, are only part of the +//! 'sub-cell'. +//! +//! For users, each Cell is identified by 'CellAlias's. A 'CellAlias' is a +//! human-readable name that contains alphanumeric characters and underscores. +//! (i.e. shouldn't contain any special characters like `/`). Something like `1` +//! is a valid identifier, though not we do not suggest such naming as it's not +//! very descriptive. +//! +//! It's possible that in certain cell contexts, some Cells are not reachable by +//! any 'CellAlias'. However, in the global context, every Cell will be +//! reachable by at least one 'CellAlias'. +//! +//! ## Cell Alias +//! The cell alias appears within a fully qualified target with the syntax +//! `//`. For example, in `foo//some:target`, `foo` is +//! the cell alias. Examples like `foo/bar//some:target` has an invalid cell +//! alias of `foo/bar` since special characters are forbidden. +//! +//! The 'CellAlias' is specified via configuration files per cell. A +//! configuration specifies these with the syntax `=`. We allow a many to one mapping from 'CellAlias' to Cell. +//! +//! Each Cell may give different aliases to the same cell. The 'CellAlias' will +//! be resolved based on the contextual cell that the alias appears in. +//! e.g. `mycell//foo:bar` build file will have any aliases that appears within +//! it be resolved using the aliases defined in `mycell` cell. +//! +//! Cells may omit declaring aliases for cells that exists globally. This means +//! that there will be no alias for those cells, and hence render those cells +//! inaccessible from the cell context that doesn't declare them. +//! +//! ### The Empty Cell Alias +//! The empty cell alias is a special alias injected by Buck to represent the +//! current contextual cell. That means, inside `mycell` cell, references to the +//! 'CellAlias' `""` will resolve to the `mycell` cell. +//! +//! ## Cell Name +//! Each Cell is uniquely identifier globally via a one to one mapping to a +//! 'CellName'. A 'CellName' is a canonicalized, human-readable name that +//! corresponds to a 'CellInstance'. The cell name is inferred from the global +//! list of 'CellAlias's available, by picking the first alias for each cell +//! path based on lexicogrpahic ordering of the aliases. The 'CellName' is +//! subject to the same character restrictions as 'CellAlias'. +//! +//! # Resolving Cells +//! Cells are represented by 'CellInstance'. The 'CellResolver' is able to +//! resolve 'CellNames' to 'CellInstance's. It is also able to find the +//! containing Cell given a path. 'CellAlias' can be resolved with an +//! 'CellAliasResolver'. Each 'CellInstance' contains a 'CellAliasResolver' for +//! the cell alias mapping for that particular cell. + +pub mod alias; +pub mod build_file_cell; +pub mod cell_path; +pub mod cell_root_path; +pub mod external; +pub mod instance; +pub mod name; +pub mod nested; +pub mod paths; +pub(crate) mod sequence_trie_allocative; +pub mod unchecked_cell_rel_path; + +use std::collections::hash_map; +use std::collections::HashMap; +use std::fmt::Debug; +use std::sync::Arc; + +use allocative::Allocative; +use anyhow::Context; +use dupe::Dupe; +use dupe::OptionDupedExt; +use gazebo::prelude::*; +use instance::CellInstance; +use itertools::Itertools; +use sequence_trie::SequenceTrie; + +use crate::cells::alias::CellAlias; +use crate::cells::alias::NonEmptyCellAlias; +use crate::cells::cell_path::CellPath; +use crate::cells::cell_path::CellPathRef; +use crate::cells::cell_root_path::CellRootPathBuf; +use crate::cells::name::CellName; +use crate::cells::nested::NestedCells; +use crate::fs::paths::abs_path::AbsPath; +use crate::fs::paths::file_name::FileNameBuf; +use crate::fs::project::ProjectRoot; +use crate::fs::project_rel_path::ProjectRelativePath; +use crate::fs::project_rel_path::ProjectRelativePathBuf; + +/// Errors from cell creation +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] +enum CellError { + #[error("Cell paths `{1}` and `{2}` had the same cell name `{0}`.")] + DuplicateNames(CellName, CellRootPathBuf, CellRootPathBuf), + #[error("Two cells, `{0}` and `{1}`, share the same path `{2}`")] + DuplicatePaths(CellName, CellName, CellRootPathBuf), + #[error("cannot find the cell at current path `{0}`. Known roots are `<{}>`", .1.join(", "))] + UnknownCellPath(ProjectRelativePathBuf, Vec), + #[error("unknown cell alias: `{0}`. In cell `{1}`, known aliases are: `{}`", .2.iter().join(", "))] + UnknownCellAlias(CellAlias, CellName, Vec), + #[error("unknown cell name: `{0}`. known cell names are `{}`", .1.iter().join(", "))] + UnknownCellName(CellName, Vec), + #[error( + "Cell name `{0}` should be an alias for an existing cell, but `{1}` isn't a known alias" + )] + AliasOnlyCell(NonEmptyCellAlias, NonEmptyCellAlias), + #[error("Cell `{0}` alias `{0}` should point to itself, but it points to `{1}`")] + WrongSelfAlias(CellName, CellName), + #[error("No cell name for the root path, add an entry for `.`")] + NoRootCell, +} + +/// A 'CellAliasResolver' is unique to a 'CellInstance'. +/// It is responsible for resolving all 'CellAlias' encountered within the +/// 'CellInstance' into the global canonical 'CellName's +#[derive(Clone, Dupe, Debug, PartialEq, Eq, Allocative)] +pub struct CellAliasResolver { + /// Current cell name. + current: CellName, + aliases: Arc>, +} + +impl CellAliasResolver { + /// Create an instance of `CellAliasResolver`. The special alias `""` must be present, or + /// this will fail + pub fn new( + current: CellName, + mut aliases: HashMap, + ) -> anyhow::Result { + let current_as_alias = NonEmptyCellAlias::new(current.as_str().to_owned())?; + if let Some(alias_target) = aliases.insert(current_as_alias, current) { + if alias_target != current { + return Err(CellError::WrongSelfAlias(current, alias_target).into()); + } + } + + let aliases = Arc::new(aliases); + + Ok(CellAliasResolver { current, aliases }) + } + + pub fn new_for_non_root_cell( + current: CellName, + root_aliases: &CellAliasResolver, + alias_list: impl IntoIterator, + ) -> anyhow::Result { + let mut aliases: HashMap<_, _> = root_aliases + .mappings() + .map(|(x, y)| (x.to_owned(), y)) + .collect(); + for (alias, destination) in alias_list { + let Some(name) = aliases.get(&destination) else { + return Err(CellError::AliasOnlyCell(alias, destination).into()); + }; + aliases.insert(alias, *name); + } + CellAliasResolver::new(current, aliases) + } + + /// resolves a 'CellAlias' into its corresponding 'CellName' + pub fn resolve(&self, alias: &str) -> anyhow::Result { + if alias.is_empty() { + return Ok(self.current); + } + self.aliases.get(alias).duped().ok_or_else(|| { + anyhow::Error::from(CellError::UnknownCellAlias( + CellAlias::new(alias.to_owned()), + self.current, + self.aliases.keys().cloned().collect(), + )) + }) + } + + /// finds the 'CellName' for the current cell (with the alias `""`. See module docs) + pub fn resolve_self(&self) -> CellName { + self.current + } + + pub fn mappings(&self) -> impl Iterator { + self.aliases.iter().map(|(alias, name)| (alias, *name)) + } +} + +/// Resolves 'CellName's into 'CellInstance's. +// TODO(bobyf) we need to check if cells changed +#[derive(Clone, Dupe, PartialEq, Eq, Debug, Allocative)] +pub struct CellResolver(Arc); + +#[derive(PartialEq, Eq, Debug, Allocative)] +struct CellResolverInternals { + cells: HashMap, + #[allocative(visit = crate::cells::sequence_trie_allocative::visit_sequence_trie)] + path_mappings: SequenceTrie, + root_cell: CellName, + root_cell_alias_resolver: CellAliasResolver, +} + +impl CellResolver { + pub fn new( + cells: Vec, + root_cell_alias_resolver: CellAliasResolver, + ) -> anyhow::Result { + let mut path_mappings: SequenceTrie = SequenceTrie::new(); + let mut root_cell = None; + for cell in &cells { + if cell.path().is_empty() { + root_cell = Some(cell.name()); + } + let prev = path_mappings.insert(cell.path().iter(), cell.name()); + if let Some(prev) = prev { + return Err( + CellError::DuplicatePaths(cell.name(), prev, cell.path().to_buf()).into(), + ); + } + } + + let mut cells_map: HashMap = HashMap::with_capacity(cells.len()); + for cell in cells { + match cells_map.entry(cell.name()) { + hash_map::Entry::Occupied(entry) => { + return Err(CellError::DuplicateNames( + cell.name(), + entry.get().path().to_buf(), + cell.path().to_buf(), + ) + .into()); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(cell); + } + } + } + + let root_cell = root_cell.ok_or(CellError::NoRootCell)?; + Ok(CellResolver(Arc::new(CellResolverInternals { + cells: cells_map, + root_cell, + path_mappings, + root_cell_alias_resolver, + }))) + } + + /// Get a `Cell` from the `CellMap` + pub fn get(&self, cell: CellName) -> anyhow::Result<&CellInstance> { + self.0.cells.get(&cell).ok_or_else(|| { + anyhow::Error::from(CellError::UnknownCellName( + cell, + self.0.cells.keys().copied().collect(), + )) + }) + } + + pub fn is_root_cell(&self, name: CellName) -> bool { + name == self.0.root_cell + } + + pub fn root_cell(&self) -> CellName { + self.0.root_cell + } + + pub fn root_cell_instance(&self) -> &CellInstance { + self.get(self.root_cell()) + .expect("Should have had a root cell") + } + + pub fn root_cell_cell_alias_resolver(&self) -> &CellAliasResolver { + &self.0.root_cell_alias_resolver + } + + /// Get a `CellName` from a path by finding the best matching cell path that + /// is a prefix of the current path relative to the project root. e.g. `fbcode/foo/bar` matches + /// cell path `fbcode`. + pub fn find + ?Sized>( + &self, + path: &P, + ) -> anyhow::Result { + self.0 + .path_mappings + .get_ancestor(path.as_ref().iter()) + .copied() + .ok_or_else(|| { + anyhow::Error::from(CellError::UnknownCellPath( + path.as_ref().to_buf(), + self.0 + .path_mappings + .keys() + .map(|p| p.iter().join("/")) + .collect(), + )) + }) + } + + pub fn get_cell_path + ?Sized>( + &self, + path: &P, + ) -> anyhow::Result { + let path = path.as_ref(); + let cell = self.find(path)?; + let instance = self.get(cell)?; + let relative = path.strip_prefix(instance.path().as_project_relative_path())?; + Ok(CellPath::new(cell, relative.to_owned().into())) + } + + pub fn get_cell_path_from_abs_path( + &self, + path: &AbsPath, + fs: &ProjectRoot, + ) -> anyhow::Result { + let abs_path = AbsPath::new(path)?; + self.get_cell_path(&fs.relativize_any(abs_path)?) + } + + pub fn cells(&self) -> impl Iterator { + self.0 + .cells + .iter() + .map(|(name, instance)| (*name, instance)) + } + + /// Resolves a given 'Package' to the 'ProjectRelativePath' that points to + /// the 'Package' + /// + /// ``` + /// use std::convert::TryFrom; + /// + /// use buck2_core::cells::cell_path::CellPath; + /// use buck2_core::cells::cell_root_path::CellRootPathBuf; + /// use buck2_core::cells::name::CellName; + /// use buck2_core::cells::paths::CellRelativePathBuf; + /// use buck2_core::cells::CellResolver; + /// use buck2_core::fs::project_rel_path::ProjectRelativePath; + /// use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; + /// + /// let cell_path = ProjectRelativePath::new("my/cell")?; + /// let cells = CellResolver::testing_with_name_and_path( + /// CellName::testing_new("mycell"), + /// CellRootPathBuf::new(cell_path.to_buf()), + /// ); + /// + /// let cell_path = CellPath::new( + /// CellName::testing_new("mycell"), + /// CellRelativePathBuf::unchecked_new("some/path".to_owned()), + /// ); + /// + /// assert_eq!( + /// cells.resolve_path(cell_path.as_ref())?, + /// ProjectRelativePathBuf::unchecked_new("my/cell/some/path".into()), + /// ); + /// + /// # anyhow::Ok(()) + /// ``` + pub fn resolve_path(&self, cell_path: CellPathRef) -> anyhow::Result { + Ok(self.get(cell_path.cell())?.path().join(cell_path.path())) + } + + // These are constructors for tests. + + pub fn testing_with_name_and_path( + other_name: CellName, + other_path: CellRootPathBuf, + ) -> CellResolver { + // It is an error to build a CellResolver that doesn't cover the root. + // Therefore, if it isn't needed for the test, just make one up. + if other_path.is_empty() { + Self::testing_with_names_and_paths_with_alias( + &[(other_name, other_path)], + HashMap::new(), + ) + } else { + Self::testing_with_names_and_paths_with_alias( + &[ + (other_name, other_path), + ( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ), + ], + HashMap::new(), + ) + } + } + + pub fn testing_with_names_and_paths(cells: &[(CellName, CellRootPathBuf)]) -> CellResolver { + Self::testing_with_names_and_paths_with_alias( + &cells.map(|(name, path)| (*name, path.clone())), + HashMap::new(), + ) + } + + pub fn testing_with_names_and_paths_with_alias( + cells: &[(CellName, CellRootPathBuf)], + mut root_cell_aliases: HashMap, + ) -> CellResolver { + assert_eq!( + cells.len(), + cells.iter().map(|(cell, _)| *cell).unique().count(), + "duplicate cell name" + ); + assert_eq!( + cells.len(), + cells + .iter() + .map(|(_, path)| path.as_path()) + .unique() + .count(), + "duplicate cell paths" + ); + + let all_roots = cells + .iter() + .map(|(cell, path)| (*cell, path.as_path())) + .collect::>(); + let instances: Vec = cells + .iter() + .map(|(name, path)| { + CellInstance::new( + *name, + path.clone(), + None, + NestedCells::from_cell_roots(&all_roots, path), + ) + .unwrap() + }) + .collect(); + + let mut root = None; + for (cell, p) in cells { + root_cell_aliases.insert( + NonEmptyCellAlias::new(cell.as_str().to_owned()).unwrap(), + *cell, + ); + if p.is_repo_root() { + root = Some(*cell); + } + } + + let root_aliases = CellAliasResolver::new(root.unwrap(), root_cell_aliases).unwrap(); + + CellResolver::new(instances, root_aliases).unwrap() + } + + pub(crate) fn resolve_path_crossing_cell_boundaries<'a>( + &self, + mut path: CellPathRef<'a>, + ) -> anyhow::Result> { + let mut rem: u32 = 1000; + loop { + // Sanity check. Should never happen. + rem = rem + .checked_sub(1) + .context("Overflow computing cell boundaries")?; + + let nested_cells = self.get(path.cell())?.nested_cells(); + match nested_cells.matches_checked(path.path()) { + None => return Ok(path), + Some((_, new_cell_path)) => { + path = new_cell_path; + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cells::cell_root_path::CellRootPath; + use crate::fs::paths::forward_rel_path::ForwardRelativePath; + use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; + + #[test] + fn test_of_names_and_paths() -> anyhow::Result<()> { + use crate::fs::project_rel_path::ProjectRelativePathBuf; + + let cell_resolver = CellResolver::testing_with_name_and_path( + CellName::testing_new("foo"), + CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("bar".into())), + ); + + let cell = cell_resolver.get(CellName::testing_new("foo"))?; + assert_eq!(CellName::testing_new("foo"), cell.name()); + assert_eq!("bar", cell.path().as_str()); + + Ok(()) + } + + #[test] + fn test_cells() -> anyhow::Result<()> { + let cell1_path = CellRootPath::new(ProjectRelativePath::new("my/cell1")?); + let cell2_path = CellRootPath::new(ProjectRelativePath::new("cell2")?); + let cell3_path = CellRootPath::new(ProjectRelativePath::new("my/cell3")?); + + let cells = CellResolver::testing_with_names_and_paths(&[ + ( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ), + (CellName::testing_new("cell1"), cell1_path.to_buf()), + (CellName::testing_new("cell2"), cell2_path.to_buf()), + (CellName::testing_new("cell3"), cell3_path.to_buf()), + ]); + + assert_eq!(cells.find(cell1_path)?, CellName::testing_new("cell1")); + assert_eq!(cells.find(cell2_path)?, CellName::testing_new("cell2")); + assert_eq!(cells.find(cell3_path)?, CellName::testing_new("cell3")); + assert_eq!( + cells.find( + &cell2_path + .as_project_relative_path() + .join(ForwardRelativePath::new("fake/cell3")?) + )?, + CellName::testing_new("cell2") + ); + assert_eq!( + cells.find( + &cell3_path + .as_project_relative_path() + .join(ForwardRelativePath::new("more/foo")?) + )?, + CellName::testing_new("cell3") + ); + + assert_eq!( + cells.get_cell_path(cell1_path)?, + CellPath::new( + CellName::testing_new("cell1"), + ForwardRelativePathBuf::unchecked_new("".to_owned()).into() + ) + ); + + assert_eq!( + cells.get_cell_path(cell2_path)?, + CellPath::new( + CellName::testing_new("cell2"), + ForwardRelativePathBuf::unchecked_new("".to_owned()).into() + ) + ); + + assert_eq!( + cells.get_cell_path( + &cell2_path + .as_project_relative_path() + .join(ForwardRelativePath::new("fake/cell3")?) + )?, + CellPath::new( + CellName::testing_new("cell2"), + ForwardRelativePathBuf::unchecked_new("fake/cell3".to_owned()).into() + ) + ); + + Ok(()) + } + + #[test] + fn test_resolve_path_crossing_cell_boundaries() { + let cell_resolver = CellResolver::testing_with_names_and_paths(&[ + ( + CellName::testing_new("fbsource"), + CellRootPathBuf::testing_new(""), + ), + ( + CellName::testing_new("fbcode"), + CellRootPathBuf::testing_new("fbcode"), + ), + ( + CellName::testing_new("fbcode_macros"), + CellRootPathBuf::testing_new("fbcode/something/macros"), + ), + ]); + // Test starting with `fbsource//`. + assert_eq!( + CellPathRef::testing_new("fbsource//"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new("fbsource//")) + .unwrap() + ); + assert_eq!( + CellPathRef::testing_new("fbcode//"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new("fbsource//fbcode")) + .unwrap() + ); + assert_eq!( + CellPathRef::testing_new("fbcode//something"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( + "fbsource//fbcode/something" + )) + .unwrap() + ); + assert_eq!( + CellPathRef::testing_new("fbcode_macros//"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( + "fbsource//fbcode/something/macros" + )) + .unwrap() + ); + assert_eq!( + CellPathRef::testing_new("fbcode_macros//xx"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( + "fbsource//fbcode/something/macros/xx" + )) + .unwrap() + ); + // Now test starting with `fbcode//`. + assert_eq!( + CellPathRef::testing_new("fbcode//"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new("fbcode//")) + .unwrap() + ); + assert_eq!( + CellPathRef::testing_new("fbcode//something"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( + "fbcode//something" + )) + .unwrap() + ); + assert_eq!( + CellPathRef::testing_new("fbcode_macros//"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( + "fbcode//something/macros" + )) + .unwrap() + ); + assert_eq!( + CellPathRef::testing_new("fbcode_macros//xx"), + cell_resolver + .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( + "fbcode//something/macros/xx" + )) + .unwrap() + ); + } +} diff --git a/app/buck2_core/src/cells/alias.rs b/app/buck2_core/src/cells/alias.rs index 4b1e0c3c1d21d..697202d3eb12b 100644 --- a/app/buck2_core/src/cells/alias.rs +++ b/app/buck2_core/src/cells/alias.rs @@ -11,7 +11,7 @@ use std::borrow::Borrow; use allocative::Allocative; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum CellAliasError { #[error("Empty alias where non-empty is required")] EmptyAlias, diff --git a/app/buck2_core/src/cells/cell_path.rs b/app/buck2_core/src/cells/cell_path.rs index ad23a238f0f2b..37690865c800d 100644 --- a/app/buck2_core/src/cells/cell_path.rs +++ b/app/buck2_core/src/cells/cell_path.rs @@ -17,7 +17,7 @@ use crate::cells::paths::CellRelativePath; use crate::cells::paths::CellRelativePathBuf; use crate::fs::paths::forward_rel_path::ForwardRelativePath; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] #[error("attempted to strip prefix of two CellPath with different cell names `{0}` and `{1}`")] struct StripPrefixError(CellName, CellName); @@ -34,7 +34,7 @@ struct StripPrefixError(CellName, CellName); PartialOrd, Allocative )] -#[display(fmt = "{}", "self.as_ref()")] +#[display("{}", self.as_ref())] pub struct CellPath { cell: CellName, path: Box, @@ -63,18 +63,21 @@ impl CellPath { /// /// ``` /// use buck2_core::cells::cell_path::CellPath; - /// use buck2_core::cells::paths::{CellRelativePathBuf}; /// use buck2_core::cells::name::CellName; + /// use buck2_core::cells::paths::CellRelativePathBuf; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// /// let path = CellPath::new( /// CellName::testing_new("cell"), - /// CellRelativePathBuf::unchecked_new("foo/bar".into()) + /// CellRelativePathBuf::unchecked_new("foo/bar".into()), /// ); /// let other = ForwardRelativePath::new("baz")?; /// assert_eq!( - /// CellPath::new(CellName::testing_new("cell"), - /// CellRelativePathBuf::unchecked_new("foo/bar/baz".into())), path.join(other) + /// CellPath::new( + /// CellName::testing_new("cell"), + /// CellRelativePathBuf::unchecked_new("foo/bar/baz".into()) + /// ), + /// path.join(other) /// ); /// /// # anyhow::Ok(()) @@ -88,18 +91,20 @@ impl CellPath { /// /// ``` /// use buck2_core::cells::cell_path::CellPath; - /// use buck2_core::cells::paths::{CellRelativePathBuf}; /// use buck2_core::cells::name::CellName; + /// use buck2_core::cells::paths::CellRelativePathBuf; /// /// assert_eq!( - /// Some( - /// CellPath::new(CellName::testing_new("cell"), - /// CellRelativePathBuf::unchecked_new("foo".into())) - /// ), + /// Some(CellPath::new( + /// CellName::testing_new("cell"), + /// CellRelativePathBuf::unchecked_new("foo".into()) + /// )), /// CellPath::new( /// CellName::testing_new("cell"), /// CellRelativePathBuf::unchecked_new("foo/bar".into()) - /// ).parent().map(|p| p.to_owned()), + /// ) + /// .parent() + /// .map(|p| p.to_owned()), /// ); /// /// # anyhow::Ok(()) @@ -116,15 +121,24 @@ impl CellPath { /// /// ``` /// use buck2_core::cells::cell_path::CellPath; - /// use buck2_core::cells::paths::{CellRelativePathBuf}; /// use buck2_core::cells::name::CellName; + /// use buck2_core::cells::paths::CellRelativePathBuf; /// /// let path = CellPath::testing_new("cell//foo/bar"); /// let mut ancestors = path.ancestors(); /// - /// assert_eq!(ancestors.next(), Some(CellPath::testing_new("cell//foo/bar").as_ref())); - /// assert_eq!(ancestors.next(), Some(CellPath::testing_new("cell//foo").as_ref())); - /// assert_eq!(ancestors.next(), Some(CellPath::testing_new("cell//").as_ref())); + /// assert_eq!( + /// ancestors.next(), + /// Some(CellPath::testing_new("cell//foo/bar").as_ref()) + /// ); + /// assert_eq!( + /// ancestors.next(), + /// Some(CellPath::testing_new("cell//foo").as_ref()) + /// ); + /// assert_eq!( + /// ancestors.next(), + /// Some(CellPath::testing_new("cell//").as_ref()) + /// ); /// assert_eq!(ancestors.next(), None); /// /// # anyhow::Ok(()) @@ -142,13 +156,13 @@ impl CellPath { /// /// ``` /// use buck2_core::cells::cell_path::CellPath; - /// use buck2_core::cells::paths::{CellRelativePathBuf}; /// use buck2_core::cells::name::CellName; + /// use buck2_core::cells::paths::CellRelativePathBuf; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; /// /// let path = CellPath::new( /// CellName::testing_new("cell"), - /// CellRelativePathBuf::unchecked_new("test/haha/foo.txt".into()) + /// CellRelativePathBuf::unchecked_new("test/haha/foo.txt".into()), /// ); /// /// assert_eq!( @@ -156,7 +170,8 @@ impl CellPath { /// CellPath::new( /// CellName::testing_new("cell"), /// CellRelativePathBuf::unchecked_new("test".into()), - /// ).as_ref() + /// ) + /// .as_ref() /// )?, /// ForwardRelativePathBuf::unchecked_new("haha/foo.txt".into()) /// ); @@ -165,8 +180,10 @@ impl CellPath { /// CellPath::new( /// CellName::testing_new("cell"), /// CellRelativePathBuf::unchecked_new("asdf".into()), - /// ).as_ref() - /// ).is_err(), + /// ) + /// .as_ref() + /// ) + /// .is_err(), /// true /// ); /// assert_eq!( @@ -174,8 +191,10 @@ impl CellPath { /// CellPath::new( /// CellName::testing_new("another"), /// CellRelativePathBuf::unchecked_new("test".into()), - /// ).as_ref() - /// ).is_err(), + /// ) + /// .as_ref() + /// ) + /// .is_err(), /// true /// ); /// @@ -193,17 +212,18 @@ impl CellPath { /// normalized. /// /// ``` - /// - /// use buck2_core::cells::paths::CellRelativePathBuf; - /// use buck2_core::cells::name::CellName; /// use std::convert::TryFrom; + /// /// use buck2_core::cells::cell_path::CellPath; + /// use buck2_core::cells::name::CellName; + /// use buck2_core::cells::paths::CellRelativePathBuf; /// /// assert_eq!( /// CellPath::new( /// CellName::testing_new("cell"), /// CellRelativePathBuf::unchecked_new("foo/bar".into()) - /// ).join_normalized("../baz.txt")?, + /// ) + /// .join_normalized("../baz.txt")?, /// CellPath::new( /// CellName::testing_new("cell"), /// CellRelativePathBuf::unchecked_new("foo/baz.txt".into()) @@ -214,7 +234,9 @@ impl CellPath { /// CellPath::new( /// CellName::testing_new("cell"), /// CellRelativePathBuf::unchecked_new("foo".into()) - /// ).join_normalized("../../baz.txt").is_err(), + /// ) + /// .join_normalized("../../baz.txt") + /// .is_err(), /// true /// ); /// @@ -227,20 +249,24 @@ impl CellPath { /// Checks that cell matches and `self` path starts with `base` path /// /// ``` + /// use std::convert::TryFrom; /// - /// use buck2_core::cells::paths::CellRelativePathBuf; /// use buck2_core::cells::cell_path::CellPath; /// use buck2_core::cells::name::CellName; - /// use std::convert::TryFrom; + /// use buck2_core::cells::paths::CellRelativePathBuf; /// /// assert!( /// CellPath::new( /// CellName::testing_new("cell"), /// CellRelativePathBuf::unchecked_new("foo/bar".into()) - /// ).starts_with(CellPath::new( - /// CellName::testing_new("cell"), - /// CellRelativePathBuf::unchecked_new("foo".into()) - /// ).as_ref()), + /// ) + /// .starts_with( + /// CellPath::new( + /// CellName::testing_new("cell"), + /// CellRelativePathBuf::unchecked_new("foo".into()) + /// ) + /// .as_ref() + /// ), /// ); /// /// # anyhow::Ok(()) @@ -250,6 +276,11 @@ impl CellPath { self.as_ref().starts_with(base) } + #[inline] + pub fn ends_with(&self, suffix: &ForwardRelativePath) -> bool { + self.as_ref().ends_with(suffix) + } + #[inline] pub fn into_parts(self) -> (CellName, Box) { (self.cell, self.path) @@ -273,7 +304,7 @@ impl CellPath { } #[derive(Debug, Clone, Dupe, Copy, Eq, PartialEq, Hash, derive_more::Display)] -#[display(fmt = "{}//{}", cell, path)] +#[display("{}//{}", cell, path)] pub struct CellPathRef<'a> { cell: CellName, path: &'a CellRelativePath, @@ -355,6 +386,11 @@ impl<'a> CellPathRef<'a> { self.cell() == base.cell() && self.path().starts_with(base.path()) } + #[inline] + pub fn ends_with(&self, suffix: &ForwardRelativePath) -> bool { + self.path().ends_with(suffix) + } + #[inline] pub fn strip_prefix(&self, base: CellPathRef) -> anyhow::Result<&'a ForwardRelativePath> { if self.cell != base.cell { diff --git a/app/buck2_core/src/cells/cell_root_path.rs b/app/buck2_core/src/cells/cell_root_path.rs index b5cb431c3829a..eecb926721dff 100644 --- a/app/buck2_core/src/cells/cell_root_path.rs +++ b/app/buck2_core/src/cells/cell_root_path.rs @@ -81,11 +81,6 @@ impl CellRootPathBuf { CellRootPathBuf::new(ProjectRelativePathBuf::testing_new(path)) } - /// Project relative path to the cell root. - pub fn project_relative_path(&self) -> &ProjectRelativePath { - &self.0 - } - pub fn as_path(&self) -> &CellRootPath { CellRootPath::new(&self.0) } diff --git a/app/buck2_core/src/cells/external.rs b/app/buck2_core/src/cells/external.rs new file mode 100644 index 0000000000000..cb2defa78264a --- /dev/null +++ b/app/buck2_core/src/cells/external.rs @@ -0,0 +1,47 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; +use std::sync::Arc; + +use dupe::Dupe; + +use crate::cells::name::CellName; + +#[derive(Debug, Clone, Dupe, allocative::Allocative, PartialEq, Eq)] +pub enum ExternalCellOrigin { + Bundled(CellName), + Git(GitCellSetup), +} + +#[derive( + Debug, + derive_more::Display, + Clone, + Dupe, + allocative::Allocative, + PartialEq, + Eq, + Hash +)] +#[display("git({}, {})", git_origin, commit)] +pub struct GitCellSetup { + pub git_origin: Arc, + // Guaranteed to be a valid sha1 commit hash + pub commit: Arc, +} + +impl fmt::Display for ExternalCellOrigin { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Bundled(cell) => write!(f, "bundled({})", cell), + Self::Git(git) => write!(f, "{}", git), + } + } +} diff --git a/app/buck2_core/src/cells/instance.rs b/app/buck2_core/src/cells/instance.rs index 1fbab42cf46cd..775bb76df0db4 100644 --- a/app/buck2_core/src/cells/instance.rs +++ b/app/buck2_core/src/cells/instance.rs @@ -15,20 +15,23 @@ use dupe::Dupe; use crate::cells::cell_root_path::CellRootPath; use crate::cells::cell_root_path::CellRootPathBuf; +use crate::cells::external::ExternalCellOrigin; use crate::cells::name::CellName; use crate::cells::nested::NestedCells; -use crate::cells::CellAliasResolver; -use crate::fs::paths::file_name::FileNameBuf; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum CellInstanceError { - #[error("Inconsistent cell name: `{0}` in instance, but `{1}` in alias resolver")] - InconsistentCellName(CellName, CellName), + #[error( + "Attempted to refer to cell `{0}`; however, this is an external cell which cannot be used from `{1}`" + )] + ExpectedNonExternalCell(CellName, &'static str), + #[error("External cell `{0}` cannot have a nested cell `{1}`")] + NestedInExternalCell(CellName, CellName), } /// A 'CellInstance', contains a 'CellName' and a path for that cell. #[derive(Clone, Debug, derive_more::Display, Dupe, PartialEq, Eq, Allocative)] -#[display(fmt = "{}", "_0.name")] +#[display("{}", _0.name)] pub struct CellInstance(Arc); #[derive(Derivative, PartialEq, Eq, Allocative)] @@ -38,32 +41,26 @@ struct CellData { name: CellName, /// the project relative path to this 'CellInstance' path: CellRootPathBuf, - /// a list of potential buildfile names for this cell (e.g. 'BUCK', 'TARGETS', - /// 'TARGET.v2'). The candidates are listed in priority order, buck will use - /// the first one it encounters in a directory. - buildfiles: Vec, - #[derivative(Debug = "ignore")] - /// the aliases of this specific cell - aliases: CellAliasResolver, + external: Option, nested_cells: NestedCells, } impl CellInstance { - pub(crate) fn new( + pub fn new( name: CellName, path: CellRootPathBuf, - buildfiles: Vec, - aliases: CellAliasResolver, + external: Option, nested_cells: NestedCells, ) -> anyhow::Result { - if name != aliases.current { - return Err(CellInstanceError::InconsistentCellName(name, aliases.current).into()); + if external.is_some() + && let Some(nested) = nested_cells.check_empty() + { + return Err(CellInstanceError::NestedInExternalCell(name, nested).into()); } Ok(CellInstance(Arc::new(CellData { name, path, - buildfiles, - aliases, + external, nested_cells, }))) } @@ -80,19 +77,21 @@ impl CellInstance { &self.0.path } - // Get the name of build files for the cell. #[inline] - pub fn buildfiles(&self) -> &[FileNameBuf] { - &self.0.buildfiles + pub fn nested_cells(&self) -> &NestedCells { + &self.0.nested_cells } #[inline] - pub fn cell_alias_resolver(&self) -> &CellAliasResolver { - &self.0.aliases + pub fn external(&self) -> Option<&ExternalCellOrigin> { + self.0.external.as_ref() } #[inline] - pub fn nested_cells(&self) -> &NestedCells { - &self.0.nested_cells + pub fn expect_non_external(&self, context: &'static str) -> anyhow::Result<()> { + match self.0.external { + Some(_) => Err(CellInstanceError::ExpectedNonExternalCell(self.name(), context).into()), + None => Ok(()), + } } } diff --git a/app/buck2_core/src/cells/mod.rs b/app/buck2_core/src/cells/mod.rs deleted file mode 100644 index 5813f9af7cee7..0000000000000 --- a/app/buck2_core/src/cells/mod.rs +++ /dev/null @@ -1,1045 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! # Cell -//! A 'Cell' is sub-project within the main project for Buck. All files -//! reachable by Buck is belongs to a single Cell. -//! Cells can be sub-directories of other cells, but that makes that -//! sub-directory part of the sub-cell and no longer part of the parent cell. -//! For example, let's say there's cells 'parent-cell' and 'sub-cell' declared -//! in folders of the same names. -//! ```text -//! parent-cell -//! +-- folder1 -//! +-- folder2 -//! +-- sub-cell -//! | +-- folder3 -//! ``` -//! All files part of `folder1` and `folder2` will be part of 'parent-cell'. -//! Anything part of `sub-cell`, including `folder3`, are only part of the -//! 'sub-cell'. -//! -//! For users, each Cell is identified by 'CellAlias's. A 'CellAlias' is a -//! human-readable name that contains alphanumeric characters and underscores. -//! (i.e. shouldn't contain any special characters like `/`). Something like `1` -//! is a valid identifier, though not we do not suggest such naming as it's not -//! very descriptive. -//! -//! It's possible that in certain cell contexts, some Cells are not reachable by -//! any 'CellAlias'. However, in the global context, every Cell will be -//! reachable by at least one 'CellAlias'. -//! -//! ## Cell Alias -//! The cell alias appears within a fully qualified target with the syntax -//! `//`. For example, in `foo//some:target`, `foo` is -//! the cell alias. Examples like `foo/bar//some:target` has an invalid cell -//! alias of `foo/bar` since special characters are forbidden. -//! -//! The 'CellAlias' is specified via configuration files per cell. A -//! configuration specifies these with the syntax `=`. We allow a many to one mapping from 'CellAlias' to Cell. -//! -//! Each Cell may give different aliases to the same cell. The 'CellAlias' will -//! be resolved based on the contextual cell that the alias appears in. -//! e.g. `mycell//foo:bar` build file will have any aliases that appears within -//! it be resolved using the aliases defined in `mycell` cell. -//! -//! Cells may omit declaring aliases for cells that exists globally. This means -//! that there will be no alias for those cells, and hence render those cells -//! inaccessible from the cell context that doesn't declare them. -//! -//! ### The Empty Cell Alias -//! The empty cell alias is a special alias injected by Buck to represent the -//! current contextual cell. That means, inside `mycell` cell, references to the -//! 'CellAlias' `""` will resolve to the `mycell` cell. -//! -//! ## Cell Name -//! Each Cell is uniquely identifier globally via a one to one mapping to a -//! 'CellName'. A 'CellName' is a canonicalized, human-readable name that -//! corresponds to a 'CellInstance'. The cell name is inferred from the global -//! list of 'CellAlias's available, by picking the first alias for each cell -//! path based on lexicogrpahic ordering of the aliases. The 'CellName' is -//! subject to the same character restrictions as 'CellAlias'. -//! -//! # Resolving Cells -//! Cells are represented by 'CellInstance'. The 'CellResolver' is able to -//! resolve 'CellNames' to 'CellInstance's. It is also able to find the -//! containing Cell given a path. 'CellAlias' can be resolved with an -//! 'CellAliasResolver'. Each 'CellInstance' contains a 'CellAliasResolver' for -//! the cell alias mapping for that particular cell. -//! -//! e.g. -//! ``` -//! use buck2_core::fs::project_rel_path::{ProjectRelativePath, ProjectRelativePathBuf}; -//! use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -//! use buck2_core::cells::CellResolver; -//! use std::convert::TryFrom; -//! use maplit::hashmap; -//! use buck2_core::cells::cell_root_path::CellRootPathBuf; -//! use buck2_core::cells::name::CellName; -//! use buck2_core::cells::alias::CellAlias; -//! use dupe::Dupe; -//! use buck2_core::cells::alias::NonEmptyCellAlias; -//! -//! let cell_config = ForwardRelativePathBuf::try_from(".buckconfig".to_owned())?; -//! let fbsource = ProjectRelativePath::new("")?; -//! let fbcode = ProjectRelativePath::new("fbcode")?; -//! -//! let cells = CellResolver::testing_with_names_and_paths_with_alias(&[ -//! (CellName::testing_new("fbsource"), CellRootPathBuf::new(fbsource.to_buf()), hashmap![ -//! NonEmptyCellAlias::new("fbsource".to_owned()).unwrap() => CellName::testing_new("fbsource"), -//! NonEmptyCellAlias::new("fbcode".to_owned()).unwrap() => CellName::testing_new("fbcode"), -//! ]), -//! (CellName::testing_new("fbcode"), CellRootPathBuf::new(fbcode.to_buf()), hashmap![ -//! NonEmptyCellAlias::new("fbcode".to_owned()).unwrap() => CellName::testing_new("fbcode"), -//! NonEmptyCellAlias::new("fbsource".to_owned()).unwrap() => CellName::testing_new("fbsource"), -//! ]) -//! ]); -//! -//! let fbsource_cell_name = cells.find(ProjectRelativePath::new("something/in/fbsource")?)?.dupe(); -//! assert_eq!(fbsource_cell_name, CellName::testing_new("fbsource")); -//! -//! let fbcode_cell_name = cells.find(ProjectRelativePath::new("fbcode/something/in/fbcode")?)?.dupe(); -//! assert_eq!(fbcode_cell_name, CellName::testing_new("fbcode")); -//! -//! let fbsource_cell = cells.get(fbsource_cell_name)?; -//! assert_eq!(fbsource_cell.name(), CellName::testing_new("fbsource")); -//! let fbcode_cell = cells.get(fbcode_cell_name)?; -//! assert_eq!(fbcode_cell.name(), CellName::testing_new("fbcode")); -//! -//! let fbsource_aliases = fbsource_cell.cell_alias_resolver(); -//! assert_eq!(fbsource_aliases.resolve("")?, CellName::testing_new("fbsource")); -//! assert_eq!(fbsource_aliases.resolve("fbsource")?, CellName::testing_new("fbsource")); -//! assert_eq!(fbsource_aliases.resolve("fbcode")?, CellName::testing_new("fbcode")); -//! -//! let fbcode_aliases = fbcode_cell.cell_alias_resolver(); -//! assert_eq!(fbcode_aliases.resolve("")?, CellName::testing_new("fbcode")); -//! assert_eq!(fbcode_aliases.resolve("fbsource")?, CellName::testing_new("fbsource")); -//! assert_eq!(fbcode_aliases.resolve("fbcode")?, CellName::testing_new("fbcode")); -//! -//! # anyhow::Ok(()) -//! ``` -//! - -pub mod alias; -pub mod build_file_cell; -pub mod cell_path; -pub mod cell_root_path; -pub mod instance; -pub mod name; -pub mod nested; -pub mod paths; -pub(crate) mod sequence_trie_allocative; -pub mod unchecked_cell_rel_path; - -use std::collections::hash_map; -use std::collections::HashMap; -use std::fmt::Debug; -use std::sync::Arc; - -use allocative::Allocative; -use anyhow::Context; -use dupe::Dupe; -use dupe::OptionDupedExt; -use gazebo::prelude::*; -use instance::CellInstance; -use itertools::Itertools; -use sequence_trie::SequenceTrie; -use thiserror::Error; - -use crate::buck_path::path::BuckPathRef; -use crate::cells::alias::CellAlias; -use crate::cells::alias::NonEmptyCellAlias; -use crate::cells::cell_path::CellPath; -use crate::cells::cell_path::CellPathRef; -use crate::cells::cell_root_path::CellRootPath; -use crate::cells::cell_root_path::CellRootPathBuf; -use crate::cells::name::CellName; -use crate::cells::nested::NestedCells; -use crate::fs::paths::abs_norm_path::AbsNormPath; -use crate::fs::paths::abs_norm_path::AbsNormPathBuf; -use crate::fs::paths::abs_path::AbsPath; -use crate::fs::paths::file_name::FileNameBuf; -use crate::fs::project::ProjectRoot; -use crate::fs::project_rel_path::ProjectRelativePath; -use crate::fs::project_rel_path::ProjectRelativePathBuf; -use crate::package::PackageLabel; - -/// Errors from cell creation -#[derive(Error, Debug)] -enum CellError { - #[error("Cell paths `{1}` and `{2}` had the same alias `{0}`.")] - DuplicateAliases(NonEmptyCellAlias, CellRootPathBuf, CellRootPathBuf), - #[error("Cell paths `{1}` and `{2}` had the same cell name `{0}`.")] - DuplicateNames(CellName, CellRootPathBuf, CellRootPathBuf), - #[error("Two cells, `{0}` and `{1}`, share the same path `{2}`")] - DuplicatePaths(CellName, CellName, CellRootPathBuf), - #[error("cannot find the cell at current path `{0}`. Known roots are `<{}>`", .1.join(", "))] - UnknownCellPath(ProjectRelativePathBuf, Vec), - #[error("unknown cell alias: `{0}`. In cell `{1}`, known aliases are: `{}`", .2.iter().join(", "))] - UnknownCellAlias(CellAlias, CellName, Vec), - #[error("unknown cell name: `{0}`. known cell names are `{}`", .1.iter().join(", "))] - UnknownCellName(CellName, Vec), - #[error( - "Cell name `{0}` should be an alias for an existing cell, but `{1}` isn't a known alias" - )] - AliasOnlyCell(NonEmptyCellAlias, NonEmptyCellAlias), - #[error("Cell `{0}` alias `{0}` should point to itself, but it points to `{1}`")] - WrongSelfAlias(CellName, CellName), - #[error("No cell name for the root path, add an entry for `.`")] - NoRootCell, -} - -/// A 'CellAliasResolver' is unique to a 'CellInstance'. -/// It is responsible for resolving all 'CellAlias' encountered within the -/// 'CellInstance' into the global canonical 'CellName's -#[derive(Clone, Dupe, Debug, PartialEq, Eq, Allocative)] -pub struct CellAliasResolver { - /// Current cell name. - current: CellName, - aliases: Arc>, -} - -impl CellAliasResolver { - /// Create an instance of `CellAliasResolver`. The special alias `""` must be present, or - /// this will fail - pub fn new( - current: CellName, - mut aliases: HashMap, - ) -> anyhow::Result { - let current_as_alias = NonEmptyCellAlias::new(current.as_str().to_owned())?; - if let Some(alias_target) = aliases.insert(current_as_alias, current) { - if alias_target != current { - return Err(CellError::WrongSelfAlias(current, alias_target).into()); - } - } - - let aliases = Arc::new(aliases); - - Ok(CellAliasResolver { current, aliases }) - } - - /// resolves a 'CellAlias' into its corresponding 'CellName' - pub fn resolve(&self, alias: &str) -> anyhow::Result { - if alias.is_empty() { - return Ok(self.current); - } - self.aliases.get(alias).duped().ok_or_else(|| { - anyhow::Error::new(CellError::UnknownCellAlias( - CellAlias::new(alias.to_owned()), - self.current, - self.aliases.keys().cloned().collect(), - )) - }) - } - - /// finds the 'CellName' for the current cell (with the alias `""`. See module docs) - pub fn resolve_self(&self) -> CellName { - self.current - } - - pub fn mappings(&self) -> impl Iterator { - self.aliases.iter().map(|(alias, name)| (alias, *name)) - } -} - -/// Resolves 'CellName's into 'CellInstance's. -// TODO(bobyf) we need to check if cells changed -#[derive(Clone, Dupe, PartialEq, Eq, Debug, Allocative)] -pub struct CellResolver(Arc); - -#[derive(PartialEq, Eq, Debug, Allocative)] -struct CellResolverInternals { - cells: HashMap, - #[allocative(visit = crate::cells::sequence_trie_allocative::visit_sequence_trie)] - path_mappings: SequenceTrie, - root_cell: CellName, -} - -impl CellResolver { - // Make this public till we start parsing config files from cells - pub fn new(cells: Vec) -> anyhow::Result { - let mut path_mappings: SequenceTrie = SequenceTrie::new(); - let mut root_cell = None; - for cell in &cells { - if cell.path().is_empty() { - root_cell = Some(cell.name()); - } - let prev = path_mappings.insert(cell.path().iter(), cell.name()); - if let Some(prev) = prev { - return Err( - CellError::DuplicatePaths(cell.name(), prev, cell.path().to_buf()).into(), - ); - } - } - - let mut cells_map: HashMap = HashMap::with_capacity(cells.len()); - for cell in cells { - match cells_map.entry(cell.name()) { - hash_map::Entry::Occupied(entry) => { - return Err(CellError::DuplicateNames( - cell.name(), - entry.get().path().to_buf(), - cell.path().to_buf(), - ) - .into()); - } - hash_map::Entry::Vacant(entry) => { - entry.insert(cell); - } - } - } - - let root_cell = root_cell.ok_or(CellError::NoRootCell)?; - Ok(CellResolver(Arc::new(CellResolverInternals { - cells: cells_map, - root_cell, - path_mappings, - }))) - } - - /// Get a `Cell` from the `CellMap` - pub fn get(&self, cell: CellName) -> anyhow::Result<&CellInstance> { - self.0.cells.get(&cell).ok_or_else(|| { - anyhow::Error::new(CellError::UnknownCellName( - cell, - self.0.cells.keys().copied().collect(), - )) - }) - } - - pub fn is_root_cell(&self, name: CellName) -> bool { - name == self.0.root_cell - } - - pub fn root_cell(&self) -> CellName { - self.0.root_cell - } - - pub fn root_cell_instance(&self) -> &CellInstance { - self.get(self.root_cell()) - .expect("Should have had a root cell") - } - - pub fn root_cell_cell_alias_resolver(&self) -> &CellAliasResolver { - self.root_cell_instance().cell_alias_resolver() - } - - /// Get a `CellName` from a path by finding the best matching cell path that - /// is a prefix of the current path relative to the project root. e.g. `fbcode/foo/bar` matches - /// cell path `fbcode`. - pub fn find + ?Sized>( - &self, - path: &P, - ) -> anyhow::Result { - self.0 - .path_mappings - .get_ancestor(path.as_ref().iter()) - .copied() - .ok_or_else(|| { - anyhow::Error::new(CellError::UnknownCellPath( - path.as_ref().to_buf(), - self.0 - .path_mappings - .keys() - .map(|p| p.iter().join("/")) - .collect(), - )) - }) - } - - pub fn get_cell_path + ?Sized>( - &self, - path: &P, - ) -> anyhow::Result { - let path = path.as_ref(); - let cell = self.find(path)?; - let instance = self.get(cell)?; - let relative = path.strip_prefix(instance.path().as_project_relative_path())?; - Ok(CellPath::new(cell, relative.to_owned().into())) - } - - pub fn get_cell_path_from_abs_path( - &self, - path: &AbsPath, - fs: &ProjectRoot, - ) -> anyhow::Result { - let abs_path = AbsPath::new(path)?; - self.get_cell_path(&fs.relativize_any(abs_path)?) - } - - pub fn cells(&self) -> impl Iterator { - self.0 - .cells - .iter() - .map(|(name, instance)| (*name, instance)) - } - - /// Resolves a cell alias and a cell relative path into an absolute path. - /// `cwd` is used to perform contextual resolution and figure out which - /// cell mapping to use (i.e., map from alias to cell name). - pub fn resolve_cell_relative_path( - &self, - cell_alias: &str, - cell_relative_path: &str, - project_filesystem: &ProjectRoot, - cwd: &AbsNormPath, - ) -> anyhow::Result { - // We expect this to always succeed as long as the client connects to the - // appropriate daemon. - let proj_relative_path = project_filesystem - .relativize(cwd) - .with_context(|| format!("Error relativizing cwd (`{}`)", cwd))?; - let context_cell_name = self.find(&proj_relative_path)?; - let context_cell = self.get(context_cell_name)?; - - let resolved_cell_name = context_cell.cell_alias_resolver().resolve(cell_alias)?; - let cell = self.get(resolved_cell_name)?; - let cell_absolute_path = project_filesystem.resolve(cell.path().as_project_relative_path()); - cell_absolute_path.join_normalized(cell_relative_path) - } - - /// Resolves a given 'Package' to the 'ProjectRelativePath' that points to - /// the 'Package' - /// - /// ``` - /// use buck2_core::cells::CellResolver; - /// use buck2_core::fs::project_rel_path::{ProjectRelativePath, ProjectRelativePathBuf}; - /// use std::convert::TryFrom; - /// use buck2_core::cells::cell_path::CellPath; - /// use buck2_core::cells::cell_root_path::CellRootPathBuf; - /// use buck2_core::cells::name::CellName; - /// use buck2_core::cells::paths::CellRelativePathBuf; - /// - /// let cell_path = ProjectRelativePath::new("my/cell")?; - /// let cells = CellResolver::testing_with_name_and_path( - /// CellName::testing_new("mycell"), - /// CellRootPathBuf::new(cell_path.to_buf()), - /// ); - /// - /// let cell_path = CellPath::new( - /// CellName::testing_new("mycell"), - /// CellRelativePathBuf::unchecked_new("some/path".to_owned())); - /// - /// assert_eq!( - /// cells.resolve_path(cell_path.as_ref())?, - /// ProjectRelativePathBuf::unchecked_new("my/cell/some/path".into()), - /// ); - /// - /// # anyhow::Ok(()) - /// ``` - pub fn resolve_path(&self, cell_path: CellPathRef) -> anyhow::Result { - Ok(self.get(cell_path.cell())?.path().join(cell_path.path())) - } - - /// resolves a given 'Package' to the 'ProjectRelativePath' that points to - /// the 'Package' - /// - /// ``` - /// use buck2_core::cells::CellResolver; - /// use buck2_core::fs::project_rel_path::{ProjectRelativePath, ProjectRelativePathBuf}; - /// use buck2_core::fs::paths::forward_rel_path::{ForwardRelativePathBuf, ForwardRelativePath}; - /// use buck2_core::package::PackageLabel; - /// use std::convert::TryFrom; - /// use buck2_core::cells::cell_root_path::CellRootPathBuf; - /// use buck2_core::cells::name::CellName; - /// use buck2_core::cells::paths::CellRelativePath; - /// - /// let cell_path = ProjectRelativePath::new("my/cell")?; - /// - /// let cells = CellResolver::testing_with_name_and_path( - /// CellName::testing_new("mycell"), - /// CellRootPathBuf::new(cell_path.to_buf()), - /// ); - /// - /// let pkg = PackageLabel::new( - /// CellName::testing_new("mycell"), - /// CellRelativePath::unchecked_new("somepkg"), - /// ); - /// - /// assert_eq!( - /// cells.resolve_package(pkg)?, - /// ProjectRelativePathBuf::unchecked_new("my/cell/somepkg".into()), - /// ); - /// - /// # anyhow::Ok(()) - /// ``` - pub fn resolve_package(&self, pkg: PackageLabel) -> anyhow::Result { - self.resolve_path(pkg.as_cell_path()) - } - - /// Resolves a 'BuckPath' into a 'ProjectRelativePath' based on the package - /// and cell. - pub fn resolve_buck_path(&self, path: BuckPathRef) -> anyhow::Result { - Ok(self.resolve_package(path.package())?.join(path.path())) - } - - // These are constructors for tests. - - pub fn testing_with_name_and_path( - other_name: CellName, - other_path: CellRootPathBuf, - ) -> CellResolver { - // It is an error to build a CellResolver that doesn't cover the root. - // Therefore, if it isn't needed for the test, just make one up. - if other_path.is_empty() { - Self::testing_with_names_and_paths_with_alias(&[( - other_name, - other_path, - HashMap::new(), - )]) - } else { - Self::testing_with_names_and_paths_with_alias(&[ - (other_name, other_path, HashMap::new()), - ( - CellName::testing_new("root"), - CellRootPathBuf::testing_new(""), - HashMap::new(), - ), - ]) - } - } - - pub fn testing_with_names_and_paths(cells: &[(CellName, CellRootPathBuf)]) -> CellResolver { - Self::testing_with_names_and_paths_with_alias( - &cells.map(|(name, path)| (*name, path.clone(), HashMap::new())), - ) - } - - pub fn testing_with_names_and_paths_with_alias( - cells: &[( - CellName, - CellRootPathBuf, - HashMap, - )], - ) -> CellResolver { - let cell_path_by_name: HashMap = cells - .iter() - .map(|(name, path, _)| (*name, path.clone())) - .collect(); - - assert_eq!(cell_path_by_name.len(), cells.len(), "duplicate cell names"); - assert_eq!( - cells.len(), - cells - .iter() - .map(|(_, path, _)| path.as_path()) - .unique() - .count(), - "duplicate cell paths" - ); - - let mut cell_aggregator = CellsAggregator::new(); - - for (name, path, alias) in cells { - cell_aggregator.cell_info(path.clone()).name = Some(*name); - - for (alias, name) in alias { - cell_aggregator - .add_cell_entry( - path.clone(), - alias.clone(), - cell_path_by_name.get(name).unwrap().clone(), - ) - .unwrap(); - } - } - - cell_aggregator.make_cell_resolver().unwrap() - } - - pub(crate) fn resolve_path_crossing_cell_boundaries<'a>( - &self, - mut path: CellPathRef<'a>, - ) -> anyhow::Result> { - let mut rem: u32 = 1000; - loop { - // Sanity check. Should never happen. - rem = rem - .checked_sub(1) - .context("Overflow computing cell boundaries")?; - - let nested_cells = self.get(path.cell())?.nested_cells(); - match nested_cells.matches_checked(path.path()) { - None => return Ok(path), - Some((_, new_cell_path)) => { - path = new_cell_path; - } - } - } - } -} - -/// Aggregates cell information as we parse cell configs and keeps state to -/// generate a final 'CellResolver' -#[derive(Debug)] -pub struct CellsAggregator { - cell_infos: HashMap, -} - -fn default_buildfiles() -> Vec { - (["BUCK.v2", "BUCK"][..]).map(|&n| FileNameBuf::try_from(n.to_owned()).unwrap()) -} - -#[derive(Debug)] -struct CellAggregatorInfo { - /// The name to use for this alias. - /// So that it is predictable, we always use the first name we encounter, - /// so the root file can choose what the alias is called. - name: Option, - /// All the aliases known by this cell. - alias_mapping: HashMap, - /// The build file name in this if it's been set. If it hasn't we'll use the - /// default `["BUCK.v2", "BUCK"]` when building the resolver. - buildfiles: Vec, -} - -impl Default for CellAggregatorInfo { - fn default() -> Self { - Self { - name: None, - alias_mapping: HashMap::new(), - buildfiles: default_buildfiles(), - } - } -} - -impl CellAggregatorInfo { - fn add_alias_mapping( - &mut self, - from: NonEmptyCellAlias, - to: CellRootPathBuf, - ) -> anyhow::Result<()> { - let old = self.alias_mapping.insert(from.clone(), to.clone()); - if let Some(old) = old { - if old != to { - return Err(CellError::DuplicateAliases(from, old, to).into()); - } - } - Ok(()) - } -} - -impl CellsAggregator { - pub fn new() -> Self { - Self { - cell_infos: HashMap::new(), - } - } - - fn cell_info(&mut self, cell_path: CellRootPathBuf) -> &mut CellAggregatorInfo { - self.cell_infos - .entry(cell_path) - .or_insert_with(CellAggregatorInfo::default) - } - - /// Adds a cell configuration entry - pub fn add_cell_entry( - &mut self, - cell_root: CellRootPathBuf, - parsed_alias: NonEmptyCellAlias, - alias_path: CellRootPathBuf, - ) -> anyhow::Result<()> { - let name = &mut self.cell_info(alias_path.clone()).name; - if name.is_none() { - *name = Some(CellName::unchecked_new(parsed_alias.as_str())?); - } - self.cell_info(cell_root) - .add_alias_mapping(parsed_alias, alias_path) - } - - /// Adds a cell alias configuration entry - pub fn add_cell_alias( - &mut self, - cell_root: CellRootPathBuf, - parsed_alias: NonEmptyCellAlias, - alias_destination: NonEmptyCellAlias, - ) -> anyhow::Result { - let cell_info = self.cell_info(cell_root); - let alias_path = match cell_info.alias_mapping.get(&alias_destination) { - None => return Err(CellError::AliasOnlyCell(parsed_alias, alias_destination).into()), - Some(alias_path) => alias_path.clone(), - }; - cell_info.add_alias_mapping(parsed_alias, alias_path.clone())?; - Ok(alias_path) - } - - pub fn set_buildfiles(&mut self, cell_root: CellRootPathBuf, buildfiles: Vec) { - let cell_info = self.cell_info(cell_root); - cell_info.buildfiles = buildfiles; - } - - pub fn add_buildfile(&mut self, cell_root: CellRootPathBuf, buildfile: FileNameBuf) { - self.cell_info(cell_root).buildfiles.push(buildfile); - } - - fn get_cell_name_from_path(&self, path: &CellRootPath) -> anyhow::Result { - self.cell_infos - .get(path) - .and_then(|info| info.name) - .ok_or_else(|| { - anyhow::anyhow!(CellError::UnknownCellPath( - path.as_project_relative_path().to_buf(), - self.cell_infos - .keys() - .map(|p| p.as_str().to_owned()) - .collect() - )) - }) - } - - /// Creates the 'CellResolver' from all the entries that were aggregated - pub fn make_cell_resolver(self) -> anyhow::Result { - let mut cell_mappings = Vec::new(); - - let all_cell_roots_for_nested_cells: Vec<_> = self - .cell_infos - .keys() - .map(|path| Ok((self.get_cell_name_from_path(path)?, path.as_path()))) - .collect::>()?; - - for (cell_path, cell_info) in &self.cell_infos { - let nested_cells = - NestedCells::from_cell_roots(&all_cell_roots_for_nested_cells, cell_path); - - let mut aliases_for_cell = HashMap::new(); - let cell_name = self.get_cell_name_from_path(cell_path)?; - - for (alias, path_for_alias) in &cell_info.alias_mapping { - aliases_for_cell - .insert(alias.clone(), self.get_cell_name_from_path(path_for_alias)?); - } - - cell_mappings.push(CellInstance::new( - cell_name, - cell_path.clone(), - cell_info.buildfiles.clone(), - CellAliasResolver::new(cell_name, aliases_for_cell)?, - nested_cells, - )?); - } - - CellResolver::new(cell_mappings) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cells::CellResolver; - use crate::fs::paths::forward_rel_path::ForwardRelativePath; - use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; - - #[test] - fn test_of_names_and_paths() -> anyhow::Result<()> { - use crate::fs::project_rel_path::ProjectRelativePathBuf; - - let cell_resolver = CellResolver::testing_with_name_and_path( - CellName::testing_new("foo"), - CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("bar".into())), - ); - - let cell = cell_resolver.get(CellName::testing_new("foo"))?; - assert_eq!(CellName::testing_new("foo"), cell.name()); - assert_eq!("bar", cell.path().as_str()); - - Ok(()) - } - - #[test] - fn test_cells() -> anyhow::Result<()> { - let cell1_path = CellRootPath::new(ProjectRelativePath::new("my/cell1")?); - let cell2_path = CellRootPath::new(ProjectRelativePath::new("cell2")?); - let cell3_path = CellRootPath::new(ProjectRelativePath::new("my/cell3")?); - - let cells = CellResolver::testing_with_names_and_paths_with_alias(&[ - ( - CellName::testing_new("root"), - CellRootPathBuf::testing_new(""), - HashMap::new(), - ), - ( - CellName::testing_new("cell1"), - cell1_path.to_buf(), - hashmap![ - NonEmptyCellAlias::new("cell1".to_owned()).unwrap() => CellName::testing_new("cell1"), - NonEmptyCellAlias::new("cell2".to_owned()).unwrap() => CellName::testing_new("cell2"), - NonEmptyCellAlias::new("cell3".to_owned()).unwrap() => CellName::testing_new("cell3"), - ], - ), - ( - CellName::testing_new("cell2"), - cell2_path.to_buf(), - hashmap![ - NonEmptyCellAlias::new("cell2".to_owned()).unwrap() => CellName::testing_new("cell2"), - NonEmptyCellAlias::new("cell1".to_owned()).unwrap() => CellName::testing_new("cell1"), - NonEmptyCellAlias::new("cell3".to_owned()).unwrap() => CellName::testing_new("cell3"), - ], - ), - ( - CellName::testing_new("cell3"), - cell3_path.to_buf(), - hashmap![ - NonEmptyCellAlias::new("z_cell3".to_owned()).unwrap() => CellName::testing_new("cell3"), - NonEmptyCellAlias::new("z_cell1".to_owned()).unwrap() => CellName::testing_new("cell1"), - NonEmptyCellAlias::new("z_cell2".to_owned()).unwrap() => CellName::testing_new("cell2"), - ], - ), - ]); - - { - let cell1 = cells.get(CellName::testing_new("cell1")).unwrap(); - assert_eq!(cell1.path(), cell1_path); - - let aliases = cell1.cell_alias_resolver(); - assert_eq!(aliases.resolve("").unwrap(), CellName::testing_new("cell1")); - assert_eq!( - aliases.resolve("cell1").unwrap(), - CellName::testing_new("cell1") - ); - assert_eq!( - aliases.resolve("cell2").unwrap(), - CellName::testing_new("cell2") - ); - assert_eq!( - aliases.resolve("cell3").unwrap(), - CellName::testing_new("cell3") - ); - } - - { - let cell2 = cells.get(CellName::testing_new("cell2")).unwrap(); - assert_eq!(cell2.path(), cell2_path); - - let aliases = cell2.cell_alias_resolver(); - assert_eq!(aliases.resolve("").unwrap(), CellName::testing_new("cell2")); - assert_eq!( - aliases.resolve("cell1").unwrap(), - CellName::testing_new("cell1") - ); - assert_eq!( - aliases.resolve("cell2").unwrap(), - CellName::testing_new("cell2") - ); - assert_eq!( - aliases.resolve("cell3").unwrap(), - CellName::testing_new("cell3") - ); - } - - { - let cell3 = cells.get(CellName::testing_new("cell3")).unwrap(); - assert_eq!(cell3.path(), cell3_path); - - let aliases = cell3.cell_alias_resolver(); - assert_eq!(aliases.resolve("").unwrap(), CellName::testing_new("cell3")); - assert_eq!( - aliases.resolve("z_cell1").unwrap(), - CellName::testing_new("cell1") - ); - assert_eq!( - aliases.resolve("z_cell2").unwrap(), - CellName::testing_new("cell2") - ); - assert_eq!( - aliases.resolve("z_cell3").unwrap(), - CellName::testing_new("cell3") - ); - } - - assert_eq!(cells.find(cell1_path)?, CellName::testing_new("cell1")); - assert_eq!(cells.find(cell2_path)?, CellName::testing_new("cell2")); - assert_eq!(cells.find(cell3_path)?, CellName::testing_new("cell3")); - assert_eq!( - cells.find( - &cell2_path - .as_project_relative_path() - .join(ForwardRelativePath::new("fake/cell3")?) - )?, - CellName::testing_new("cell2") - ); - assert_eq!( - cells.find( - &cell3_path - .as_project_relative_path() - .join(ForwardRelativePath::new("more/foo")?) - )?, - CellName::testing_new("cell3") - ); - - assert_eq!( - cells.get_cell_path(cell1_path)?, - CellPath::new( - CellName::testing_new("cell1"), - ForwardRelativePathBuf::unchecked_new("".to_owned()).into() - ) - ); - - assert_eq!( - cells.get_cell_path(cell2_path)?, - CellPath::new( - CellName::testing_new("cell2"), - ForwardRelativePathBuf::unchecked_new("".to_owned()).into() - ) - ); - - assert_eq!( - cells.get_cell_path( - &cell2_path - .as_project_relative_path() - .join(ForwardRelativePath::new("fake/cell3")?) - )?, - CellPath::new( - CellName::testing_new("cell2"), - ForwardRelativePathBuf::unchecked_new("fake/cell3".to_owned()).into() - ) - ); - - Ok(()) - } - - #[test] - fn test_duplicate_aliases() -> anyhow::Result<()> { - let mut agg = CellsAggregator::new(); - - let cell_root = CellRootPathBuf::new(ProjectRelativePathBuf::try_from("".to_owned())?); - let alias_path = - CellRootPathBuf::new(ProjectRelativePathBuf::try_from("random/path".to_owned())?); - - agg.add_cell_entry( - cell_root.clone(), - NonEmptyCellAlias::new("root".to_owned()).unwrap(), - cell_root.clone(), - )?; - agg.add_cell_entry( - cell_root.clone(), - NonEmptyCellAlias::new("hello".to_owned()).unwrap(), - alias_path.clone(), - )?; - agg.add_cell_entry( - cell_root.clone(), - NonEmptyCellAlias::new("cruel".to_owned()).unwrap(), - alias_path.clone(), - )?; - agg.add_cell_entry( - cell_root, - NonEmptyCellAlias::new("world".to_owned()).unwrap(), - alias_path, - )?; - - // We want the first alias to win (hello), rather than the lexiographically first (cruel) - let cell_resolver = agg.make_cell_resolver()?; - assert!(cell_resolver.get(CellName::testing_new("hello")).is_ok()); - assert!(cell_resolver.get(CellName::testing_new("cruel")).is_err()); - Ok(()) - } - - #[test] - fn test_alias_only_error() -> anyhow::Result<()> { - let mut agg = CellsAggregator::new(); - - let cell_root = CellRootPathBuf::new(ProjectRelativePathBuf::try_from("".to_owned())?); - assert!( - agg.add_cell_alias( - cell_root, - NonEmptyCellAlias::new("root".to_owned()).unwrap(), - NonEmptyCellAlias::new("does_not_exist".to_owned()).unwrap(), - ) - .is_err() - ); - Ok(()) - } - - #[test] - fn test_resolve_path_crossing_cell_boundaries() { - let cell_resolver = CellResolver::testing_with_names_and_paths(&[ - ( - CellName::testing_new("fbsource"), - CellRootPathBuf::testing_new(""), - ), - ( - CellName::testing_new("fbcode"), - CellRootPathBuf::testing_new("fbcode"), - ), - ( - CellName::testing_new("fbcode_macros"), - CellRootPathBuf::testing_new("fbcode/something/macros"), - ), - ]); - // Test starting with `fbsource//`. - assert_eq!( - CellPathRef::testing_new("fbsource//"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new("fbsource//")) - .unwrap() - ); - assert_eq!( - CellPathRef::testing_new("fbcode//"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new("fbsource//fbcode")) - .unwrap() - ); - assert_eq!( - CellPathRef::testing_new("fbcode//something"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( - "fbsource//fbcode/something" - )) - .unwrap() - ); - assert_eq!( - CellPathRef::testing_new("fbcode_macros//"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( - "fbsource//fbcode/something/macros" - )) - .unwrap() - ); - assert_eq!( - CellPathRef::testing_new("fbcode_macros//xx"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( - "fbsource//fbcode/something/macros/xx" - )) - .unwrap() - ); - // Now test starting with `fbcode//`. - assert_eq!( - CellPathRef::testing_new("fbcode//"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new("fbcode//")) - .unwrap() - ); - assert_eq!( - CellPathRef::testing_new("fbcode//something"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( - "fbcode//something" - )) - .unwrap() - ); - assert_eq!( - CellPathRef::testing_new("fbcode_macros//"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( - "fbcode//something/macros" - )) - .unwrap() - ); - assert_eq!( - CellPathRef::testing_new("fbcode_macros//xx"), - cell_resolver - .resolve_path_crossing_cell_boundaries(CellPathRef::testing_new( - "fbcode//something/macros/xx" - )) - .unwrap() - ); - } -} diff --git a/app/buck2_core/src/cells/name.rs b/app/buck2_core/src/cells/name.rs index ec785ce9e7079..82004e65a5e80 100644 --- a/app/buck2_core/src/cells/name.rs +++ b/app/buck2_core/src/cells/name.rs @@ -11,14 +11,14 @@ use std::hash::Hash; use std::hash::Hasher; use allocative::Allocative; +use buck2_util::hash::BuckHasher; use derive_more::Display; use dupe::Dupe; use equivalent::Equivalent; -use fnv::FnvHasher; -use internment_tweaks::Intern; -use internment_tweaks::StaticInterner; +use static_interner::Intern; +use static_interner::Interner; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum CellNameError { #[error("Cell name must be non-empty")] Empty, @@ -49,7 +49,7 @@ impl<'a> From> for CellNameData { } } -static INTERNER: StaticInterner = StaticInterner::new(); +static INTERNER: Interner = Interner::new(); /// A 'CellName' is a canonicalized, human-readable name that corresponds to a /// 'CellInstance'. There should be a one to one mapping between a 'CellName' diff --git a/app/buck2_core/src/cells/nested.rs b/app/buck2_core/src/cells/nested.rs index 45ec91ab502ab..21d808e6f812a 100644 --- a/app/buck2_core/src/cells/nested.rs +++ b/app/buck2_core/src/cells/nested.rs @@ -95,6 +95,10 @@ impl NestedCells { ) }) } + + pub(crate) fn check_empty(&self) -> Option { + self.paths.first().map(|(_, cell_name)| *cell_name) + } } #[cfg(test)] diff --git a/app/buck2_core/src/cells/paths.rs b/app/buck2_core/src/cells/paths.rs index 2263889c908de..09876cd050d36 100644 --- a/app/buck2_core/src/cells/paths.rs +++ b/app/buck2_core/src/cells/paths.rs @@ -15,7 +15,6 @@ use std::borrow::Borrow; use std::ops::Deref; -use std::path::Path; use std::path::PathBuf; use allocative::Allocative; @@ -112,23 +111,20 @@ impl CellRelativePath { /// forward, normalized relative path, otherwise error. /// /// ``` - /// use buck2_core::cells::paths::CellRelativePath; /// use std::path::Path; /// + /// use buck2_core::cells::paths::CellRelativePath; + /// /// assert!(CellRelativePath::from_path("foo/bar").is_ok()); /// assert!(CellRelativePath::from_path("").is_ok()); /// assert!(CellRelativePath::from_path("/abs/bar").is_err()); /// assert!(CellRelativePath::from_path("normalize/./bar").is_err()); /// assert!(CellRelativePath::from_path("normalize/../bar").is_err()); - /// - /// assert!(CellRelativePath::from_path(Path::new("foo/bar")).is_ok()); - /// assert!(CellRelativePath::from_path(Path::new("")).is_ok()); - /// assert!(CellRelativePath::from_path(Path::new("/abs/bar")).is_err()); - /// assert!(CellRelativePath::from_path(Path::new("normalize/./bar")).is_err()); - /// assert!(CellRelativePath::from_path(Path::new("normalize/../bar")).is_err()); /// ``` - pub fn from_path>(p: &P) -> anyhow::Result<&CellRelativePath> { - Ok(CellRelativePath::ref_cast(ForwardRelativePath::new(p)?)) + pub fn from_path>(p: &P) -> anyhow::Result<&CellRelativePath> { + Ok(CellRelativePath::ref_cast(ForwardRelativePath::new( + p.as_ref(), + )?)) } pub fn new(path: &ForwardRelativePath) -> &CellRelativePath { @@ -151,12 +147,17 @@ impl CellRelativePath { /// /// ``` /// use std::path::Path; + /// + /// use buck2_core::cells::paths::CellRelativePath; + /// use buck2_core::cells::paths::CellRelativePathBuf; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// use buck2_core::cells::paths::{CellRelativePathBuf, CellRelativePath}; /// /// let path = CellRelativePath::from_path("foo/bar")?; /// let other = ForwardRelativePath::new("baz")?; - /// assert_eq!(CellRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), path.join(other)); + /// assert_eq!( + /// CellRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), + /// path.join(other) + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -190,7 +191,10 @@ impl CellRelativePath { /// use buck2_core::cells::paths::CellRelativePath; /// use buck2_core::fs::paths::file_name::FileName; /// - /// assert_eq!(Some(FileName::unchecked_new("bin")), CellRelativePath::from_path("usr/bin")?.file_name()); + /// assert_eq!( + /// Some(FileName::unchecked_new("bin")), + /// CellRelativePath::from_path("usr/bin")?.file_name() + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -205,8 +209,8 @@ impl CellRelativePath { /// path is not a 'ForwardRelativePath' /// /// ``` - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use buck2_core::cells::paths::CellRelativePath; + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// /// let path = CellRelativePath::from_path("test/haha/foo.txt")?; /// @@ -214,7 +218,11 @@ impl CellRelativePath { /// path.strip_prefix(CellRelativePath::from_path("test")?)?, /// ForwardRelativePath::new("haha/foo.txt")? /// ); - /// assert_eq!(path.strip_prefix(CellRelativePath::from_path("asdf")?).is_err(), true); + /// assert_eq!( + /// path.strip_prefix(CellRelativePath::from_path("asdf")?) + /// .is_err(), + /// true + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -228,7 +236,6 @@ impl CellRelativePath { /// Determines whether `base` is a prefix of `self`. /// /// ``` - /// /// use buck2_core::cells::paths::CellRelativePath; /// /// let path = CellRelativePath::from_path("some/foo")?; @@ -246,6 +253,7 @@ impl CellRelativePath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::cells::paths::CellRelativePath; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// @@ -285,10 +293,12 @@ impl CellRelativePath { /// Extracts the extension of [`self.file_name`], if possible. /// /// ``` - /// /// use buck2_core::cells::paths::CellRelativePath; /// - /// assert_eq!(Some("rs"), CellRelativePath::from_path("hi/foo.rs")?.extension()); + /// assert_eq!( + /// Some("rs"), + /// CellRelativePath::from_path("hi/foo.rs")?.extension() + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -300,17 +310,20 @@ impl CellRelativePath { /// normalized. /// /// ``` - /// - /// use buck2_core::cells::paths::{CellRelativePath, CellRelativePathBuf}; /// use std::convert::TryFrom; /// + /// use buck2_core::cells::paths::CellRelativePath; + /// use buck2_core::cells::paths::CellRelativePathBuf; + /// /// assert_eq!( /// CellRelativePath::from_path("foo/bar")?.join_normalized("../baz.txt")?, /// CellRelativePathBuf::unchecked_new("foo/baz.txt".into()), /// ); /// /// assert_eq!( - /// CellRelativePath::from_path("foo")?.join_normalized("../../baz.txt").is_err(), + /// CellRelativePath::from_path("foo")? + /// .join_normalized("../../baz.txt") + /// .is_err(), /// true /// ); /// @@ -334,22 +347,10 @@ impl CellRelativePath { /// let p = CellRelativePath::from_path("foo/bar/baz")?; /// let mut it = p.iter(); /// - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("foo")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("bar")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("baz")) - /// ); - /// assert_eq!( - /// it.next(), - /// None - /// ); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("foo"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("bar"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("baz"))); + /// assert_eq!(it.next(), None); /// /// # anyhow::Ok(()) /// ``` @@ -369,14 +370,17 @@ impl CellRelativePath { impl<'a> From<&'a ForwardRelativePath> for &'a CellRelativePath { /// /// ``` + /// use std::convert::From; /// /// use buck2_core::cells::paths::CellRelativePath; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// use std::convert::From; /// /// let f = ForwardRelativePath::new("foo")?; /// - /// assert_eq!(<&CellRelativePath>::from(f), CellRelativePath::from_path("foo")?); + /// assert_eq!( + /// <&CellRelativePath>::from(f), + /// CellRelativePath::from_path("foo")? + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -453,9 +457,9 @@ impl<'a> TryFrom<&'a str> for &'a CellRelativePath { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; /// /// use buck2_core::cells::paths::CellRelativePath; - /// use std::convert::TryFrom; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// /// assert!(<&CellRelativePath>::try_from("foo/bar").is_ok()); @@ -475,9 +479,9 @@ impl<'a> TryFrom<&'a RelativePath> for &'a CellRelativePath { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; /// /// use buck2_core::cells::paths::CellRelativePath; - /// use std::convert::TryFrom; /// use buck2_core::fs::paths::RelativePath; /// /// assert!(<&CellRelativePath>::try_from(RelativePath::new("foo/bar")).is_ok()); @@ -498,9 +502,9 @@ impl TryFrom for CellRelativePathBuf { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; /// /// use buck2_core::cells::paths::CellRelativePathBuf; - /// use std::convert::TryFrom; /// /// assert!(CellRelativePathBuf::try_from("foo/bar".to_owned()).is_ok()); /// assert!(CellRelativePathBuf::try_from("".to_owned()).is_ok()); @@ -522,9 +526,10 @@ impl TryFrom for CellRelativePathBuf { /// conversion) /// /// ``` + /// use std::convert::TryFrom; + /// /// use buck2_core::cells::paths::CellRelativePathBuf; /// use buck2_core::fs::paths::RelativePathBuf; - /// use std::convert::TryFrom; /// /// assert!(CellRelativePathBuf::try_from(RelativePathBuf::from("foo/bar")).is_ok()); /// assert!(CellRelativePathBuf::try_from(RelativePathBuf::from("")).is_ok()); @@ -544,11 +549,11 @@ impl TryFrom for CellRelativePathBuf { /// no allocation conversion /// /// ``` - /// - /// use buck2_core::cells::paths::CellRelativePathBuf; /// use std::convert::TryFrom; /// use std::path::PathBuf; /// + /// use buck2_core::cells::paths::CellRelativePathBuf; + /// /// assert!(CellRelativePathBuf::try_from(PathBuf::from("foo/bar")).is_ok()); /// assert!(CellRelativePathBuf::try_from(PathBuf::from("")).is_ok()); /// assert!(CellRelativePathBuf::try_from(PathBuf::from("/abs/bar")).is_err()); diff --git a/app/buck2_core/src/ci.rs b/app/buck2_core/src/ci.rs new file mode 100644 index 0000000000000..0bf2da3448f91 --- /dev/null +++ b/app/buck2_core/src/ci.rs @@ -0,0 +1,52 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use crate::buck2_env_anyhow; + +/// Are we running in CI? +pub fn is_ci() -> anyhow::Result { + // The CI environment variable is consistently set by CI providers. + // + // - GitHub Actions: https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables + // - GitLab CI/CD: https://docs.gitlab.com/ee/ci/variables/predefined_variables.html + // - CircleCI: https://circleci.com/docs/variables/#built-in-environment-variables + // - many others + // + // Internally, CI should be setting SANDCASTLE env var. + Ok( + buck2_env_anyhow!("SANDCASTLE", applicability = internal)?.is_some() + || buck2_env_anyhow!("CI", bool)?, + ) +} + +/// Returns a list of possible identifiers for the currently running CI job, in `(name, value)` form +/// +/// Earlier items in the list are better identifiers +pub fn ci_identifiers() -> anyhow::Result)>> +{ + Ok([ + ( + "sandcastle_job_info", + buck2_env_anyhow!("SANDCASTLE_JOB_INFO", applicability = internal)?, + ), + ( + "skycastle_workflow_run_id", + buck2_env_anyhow!("SKYCASTLE_WORKFLOW_RUN_ID", applicability = internal)?, + ), + ( + "sandcastle_alias", + buck2_env_anyhow!("SANDCASTLE_ALIAS", applicability = internal)?, + ), + ( + "skycastle_workflow_alias", + buck2_env_anyhow!("SKYCASTLE_WORKFLOW_ALIAS", applicability = internal)?, + ), + ] + .into_iter()) +} diff --git a/app/buck2_core/src/client_only.rs b/app/buck2_core/src/client_only.rs new file mode 100644 index 0000000000000..98abaaf791cfd --- /dev/null +++ b/app/buck2_core/src/client_only.rs @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_util::late_binding::LateBinding; + +pub static CLIENT_ONLY_VAL: LateBinding = LateBinding::new("client_only_val"); + +pub fn is_client_only() -> anyhow::Result { + CLIENT_ONLY_VAL.get().copied() +} diff --git a/app/buck2_core/src/configuration.rs b/app/buck2_core/src/configuration.rs new file mode 100644 index 0000000000000..37698d39d1548 --- /dev/null +++ b/app/buck2_core/src/configuration.rs @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! A 'Configuration' is a set of attributes that are attached to each node in +//! the 'static graph' that affects the behaviour of the build. Examples of +//! these attributes are the target platform, and compiler settings. +//! +//! 'Configuration's are propagated from the top level request node to each of +//! the transitive child nodes. During propagation, the configuration may change +//! under a "transition". Multiple distinct configurations may be applied to the +//! transitive graph, effectively duplicating the graph to create two distinct +//! graphs with different build behaviours (split-transitions). + +pub mod bound_id; +pub mod bound_label; +pub(crate) mod builtin; +pub mod cfg_diff; +pub mod compatibility; +pub mod config_setting; +pub mod constraints; +pub mod data; +pub mod hash; +pub mod pair; +pub mod transition; diff --git a/app/buck2_core/src/configuration/bound_id.rs b/app/buck2_core/src/configuration/bound_id.rs index aebb266cf0b4d..e9be47705b8bf 100644 --- a/app/buck2_core/src/configuration/bound_id.rs +++ b/app/buck2_core/src/configuration/bound_id.rs @@ -12,7 +12,7 @@ use anyhow::Context; use crate::configuration::bound_label::BoundConfigurationLabel; use crate::configuration::hash::ConfigurationHash; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum BoundConfigurationIdError { #[error("Bound configuration id must contain a hash, got: `{0}`")] MissingHash(String), @@ -21,7 +21,7 @@ enum BoundConfigurationIdError { } #[derive(derive_more::Display, Eq, PartialEq, Clone, Debug)] -#[display(fmt = "{}#{}", label, hash)] +#[display("{}#{}", label, hash)] pub struct BoundConfigurationId { pub label: BoundConfigurationLabel, pub hash: ConfigurationHash, diff --git a/app/buck2_core/src/configuration/bound_label.rs b/app/buck2_core/src/configuration/bound_label.rs index 104b7b47793c4..cc196d6dbbf44 100644 --- a/app/buck2_core/src/configuration/bound_label.rs +++ b/app/buck2_core/src/configuration/bound_label.rs @@ -11,7 +11,7 @@ use allocative::Allocative; use crate::configuration::builtin::BuiltinPlatform; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum BoundConfigurationLabelError { #[error("Configuration label is empty")] LabelIsEmpty, diff --git a/app/buck2_core/src/configuration/cfg_diff.rs b/app/buck2_core/src/configuration/cfg_diff.rs index 519f65622075a..125ee62c9a5c0 100644 --- a/app/buck2_core/src/configuration/cfg_diff.rs +++ b/app/buck2_core/src/configuration/cfg_diff.rs @@ -154,7 +154,7 @@ mod tests { use crate::configuration::constraints::ConstraintValue; use crate::configuration::data::ConfigurationData; use crate::configuration::data::ConfigurationDataData; - use crate::target::label::TargetLabel; + use crate::target::label::label::TargetLabel; #[test] fn test_diff() { diff --git a/app/buck2_core/src/configuration/compatibility.rs b/app/buck2_core/src/configuration/compatibility.rs index 96d3fc338e270..a2171fbfa7296 100644 --- a/app/buck2_core/src/configuration/compatibility.rs +++ b/app/buck2_core/src/configuration/compatibility.rs @@ -16,20 +16,20 @@ use std::sync::Arc; use allocative::Allocative; use dupe::Dupe; -use thiserror::Error; use crate::target::configured_target_label::ConfiguredTargetLabel; -use crate::target::label::TargetLabel; +use crate::target::label::label::TargetLabel; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum CompatibilityErrors { #[error("{0}")] + #[buck2(input)] TargetIncompatible(IncompatiblePlatformReason), } /// MaybeCompatible is used to gracefully deal with things that are incompatible /// with the target platform. The main place this comes up is that targets provided on the -/// cli may be incompatible with the default or requested platform and we want to skip +/// cli may be incompatible with the default or requested platform, and we want to skip /// building those rather than have it be an error. #[derive(Clone, Dupe, Debug, Eq, PartialEq, Hash, Allocative)] pub enum MaybeCompatible { @@ -40,7 +40,7 @@ pub enum MaybeCompatible { impl MaybeCompatible { /// Converts to a result. Incompatible values get converted to an error. /// - /// This is just a convencience for treating incompatibility as an error. + /// This is just a convenience for treating incompatibility as an error. pub fn require_compatible(self) -> anyhow::Result { match self { MaybeCompatible::Incompatible(reason) => Err(reason.to_err()), @@ -128,6 +128,8 @@ impl Display for IncompatiblePlatformReason { match &self.cause { IncompatiblePlatformReasonCause::UnsatisfiedConfig(unsatisfied_config) => write!( f, + // WARN: CI uses this message to filter targets + // If you change this message, please also update https://fburl.com/code/f00ezpfn "{} is incompatible with {} ({} unsatisfied), check the target's compatibility attributes", self.target.unconfigured(), self.target.cfg(), @@ -148,7 +150,7 @@ impl Display for IncompatiblePlatformReason { mod tests { use crate::configuration::compatibility::IncompatiblePlatformReason; use crate::configuration::data::ConfigurationData; - use crate::target::label::TargetLabel; + use crate::target::label::label::TargetLabel; #[test] fn test_skipping_message_for_multiple() { diff --git a/app/buck2_core/src/configuration/config_setting.rs b/app/buck2_core/src/configuration/config_setting.rs index 46ed4d88cf5bb..a0451f87dfa05 100644 --- a/app/buck2_core/src/configuration/config_setting.rs +++ b/app/buck2_core/src/configuration/config_setting.rs @@ -41,6 +41,15 @@ impl ConfigSettingData { && Self::is_subset(&that.constraints, &self.constraints) && Self::is_subset(&that.buckconfigs, &self.buckconfigs) } + + pub fn testing_new( + constraint_values: BTreeMap, + ) -> ConfigSettingData { + ConfigSettingData { + constraints: constraint_values, + buckconfigs: BTreeMap::new(), + } + } } #[cfg(test)] @@ -52,7 +61,7 @@ mod tests { use crate::configuration::config_setting::ConfigSettingData; use crate::configuration::constraints::ConstraintKey; use crate::configuration::constraints::ConstraintValue; - use crate::target::label::TargetLabel; + use crate::target::label::label::TargetLabel; #[test] fn is_subset() { diff --git a/app/buck2_core/src/configuration/constraints.rs b/app/buck2_core/src/configuration/constraints.rs index f6522d80f6791..fc20d85743718 100644 --- a/app/buck2_core/src/configuration/constraints.rs +++ b/app/buck2_core/src/configuration/constraints.rs @@ -19,7 +19,7 @@ use allocative::Allocative; use derive_more::Display; use dupe::Dupe; -use crate::target::label::TargetLabel; +use crate::target::label::label::TargetLabel; /// A ConstraintKey is a label for a `constraint_setting()` target. #[derive( @@ -27,7 +27,19 @@ use crate::target::label::TargetLabel; )] pub struct ConstraintKey(pub TargetLabel); +impl ConstraintKey { + pub fn testing_new(label: &str) -> ConstraintKey { + ConstraintKey(TargetLabel::testing_parse(label)) + } +} + #[derive( Clone, Dupe, Debug, Display, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative )] pub struct ConstraintValue(pub TargetLabel); + +impl ConstraintValue { + pub fn testing_new(label: &str) -> ConstraintValue { + ConstraintValue(TargetLabel::testing_parse(label)) + } +} diff --git a/app/buck2_core/src/configuration/data.rs b/app/buck2_core/src/configuration/data.rs index 2418b0ccce97e..7e8c04eb86ffb 100644 --- a/app/buck2_core/src/configuration/data.rs +++ b/app/buck2_core/src/configuration/data.rs @@ -14,13 +14,14 @@ use std::hash::Hasher; use allocative::Allocative; use buck2_data::ToProtoMessage; +use buck2_util::hash::BuckHasher; use dupe::Dupe; use equivalent::Equivalent; -use internment_tweaks::Intern; -use internment_tweaks::StaticInterner; use once_cell::sync::Lazy; use serde::Serialize; use serde::Serializer; +use static_interner::Intern; +use static_interner::Interner; use crate::configuration::bound_id::BoundConfigurationId; use crate::configuration::bound_label::BoundConfigurationLabel; @@ -29,7 +30,8 @@ use crate::configuration::constraints::ConstraintKey; use crate::configuration::constraints::ConstraintValue; use crate::configuration::hash::ConfigurationHash; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum ConfigurationError { #[error( "Attempted to access the configuration data for the {0} platform. \ @@ -45,7 +47,7 @@ enum ConfigurationError { UnspecifiedExec, } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ConfigurationLookupError { #[error(" Could not find configuration `{0}`. Configuration lookup by string requires @@ -66,6 +68,8 @@ enum ConfigurationLookupError { Debug, Eq, PartialEq, + Hash, + Dupe, Ord, PartialOrd, Allocative, @@ -73,16 +77,6 @@ enum ConfigurationLookupError { )] pub struct ConfigurationData(Intern); -/// Intern doesn't implement Hash. -#[allow(clippy::derived_hash_with_manual_eq)] // The derived PartialEq (that uses pointer equality) is still correct. -impl Hash for ConfigurationData { - fn hash(&self, state: &mut H) { - self.0.hash(state); - } -} - -impl Dupe for ConfigurationData {} - #[derive(Hash)] struct ConfigurationHashRef<'a>(&'a str); @@ -92,7 +86,7 @@ impl<'a> Equivalent for ConfigurationHashRef<'a> { } } -static INTERNER: StaticInterner = StaticInterner::new(); +static INTERNER: Interner = Interner::new(); impl ConfigurationData { /// Produces a "bound" configuration for a platform. The label should be a unique identifier for the data. @@ -357,7 +351,7 @@ impl ConfigurationDataData { Allocative, derive_more::Display )] -#[display(fmt = "{}", full_name)] +#[display("{}", full_name)] pub(crate) struct HashedConfigurationPlatform { configuration_platform: ConfigurationPlatform, // The remaining fields are computed from `platform_configuration_data`. @@ -406,7 +400,7 @@ mod tests { use crate::configuration::constraints::ConstraintValue; use crate::configuration::data::ConfigurationData; use crate::configuration::data::ConfigurationDataData; - use crate::target::label::TargetLabel; + use crate::target::label::label::TargetLabel; /// We don't want the output hash to change by accident. This test is here to assert that it /// doesn't. If we have a legit reason to update the config hash, we can update the hash here, @@ -430,10 +424,10 @@ mod tests { ) .unwrap(); - assert_eq!(configuration.output_hash().as_str(), "fd698fb05d52efbc"); + assert_eq!(configuration.output_hash().as_str(), "7978e19328f9f229"); assert_eq!( configuration.to_string(), - "cfg_for//:testing_exec#fd698fb05d52efbc" + "cfg_for//:testing_exec#7978e19328f9f229" ); Ok(()) @@ -459,12 +453,12 @@ mod tests { .unwrap(); assert_eq!( - "cfg_for//:testing_exec#fd698fb05d52efbc", + "cfg_for//:testing_exec#7978e19328f9f229", configuration.to_string() ); let looked_up = ConfigurationData::lookup_bound( - BoundConfigurationId::parse("cfg_for//:testing_exec#fd698fb05d52efbc").unwrap(), + BoundConfigurationId::parse("cfg_for//:testing_exec#7978e19328f9f229").unwrap(), ) .unwrap(); assert_eq!(configuration, looked_up); diff --git a/app/buck2_core/src/configuration/hash.rs b/app/buck2_core/src/configuration/hash.rs index 25f658ce3c6de..6b1cabac522ec 100644 --- a/app/buck2_core/src/configuration/hash.rs +++ b/app/buck2_core/src/configuration/hash.rs @@ -9,7 +9,7 @@ use allocative::Allocative; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ConfigurationHashError { #[error("Configuration hash must be 16 hex digits, got: `{0}`")] Invalid(String), diff --git a/app/buck2_core/src/configuration/mod.rs b/app/buck2_core/src/configuration/mod.rs deleted file mode 100644 index 32cd85597be21..0000000000000 --- a/app/buck2_core/src/configuration/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! A 'Configuration' is a set of attributes that are attached to each node in -//! the 'static graph' that affects the behaviour of the build. Examples of -//! these attributes are the target platform, and compiler settings. -//! -//! 'Configuration's are propagated from the top level request node to each of -//! the transitive child nodes. During propagation, the configuration may change -//! under a "transition". Multiple distinct configurations may be applied to the -//! transitive graph, effectively duplicating the graph to create two distinct -//! graphs with different build behaviours (split-transitions). -//! - -pub mod bound_id; -pub mod bound_label; -pub(crate) mod builtin; -pub mod cfg_diff; -pub mod compatibility; -pub mod config_setting; -pub mod constraints; -pub mod data; -pub mod hash; -pub mod pair; -pub mod transition; diff --git a/app/buck2_core/src/configuration/pair.rs b/app/buck2_core/src/configuration/pair.rs index 7393e838a6c02..e16651daad20d 100644 --- a/app/buck2_core/src/configuration/pair.rs +++ b/app/buck2_core/src/configuration/pair.rs @@ -8,15 +8,15 @@ */ use allocative::Allocative; +use buck2_util::hash::BuckHasher; use dupe::Dupe; -use fnv::FnvHasher; -use internment_tweaks::Intern; -use internment_tweaks::StaticInterner; use once_cell::sync::Lazy; +use static_interner::Intern; +use static_interner::Interner; use crate::configuration::data::ConfigurationData; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ConfigurationError { #[error("`ConfigurationPair` has unexpected `exec_cfg`")] HasExecCfg, @@ -34,7 +34,7 @@ struct ConfigurationPairData { #[derive(Debug, Clone, Dupe, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative)] pub struct Configuration(Intern); -static INTERNER: StaticInterner = StaticInterner::new(); +static INTERNER: Interner = Interner::new(); impl Configuration { #[inline] @@ -74,7 +74,7 @@ impl Configuration { Allocative, derive_more::Display )] -#[display(fmt = "{}", "self.cfg()")] +#[display("{}", self.cfg())] pub struct ConfigurationNoExec(Configuration); impl ConfigurationNoExec { diff --git a/app/buck2_core/src/configuration/transition/mod.rs b/app/buck2_core/src/configuration/transition.rs similarity index 100% rename from app/buck2_core/src/configuration/transition/mod.rs rename to app/buck2_core/src/configuration/transition.rs diff --git a/app/buck2_core/src/configuration/transition/applied.rs b/app/buck2_core/src/configuration/transition/applied.rs index d6948d6872391..7491655a8c0bb 100644 --- a/app/buck2_core/src/configuration/transition/applied.rs +++ b/app/buck2_core/src/configuration/transition/applied.rs @@ -12,11 +12,9 @@ use starlark_map::sorted_map::SortedMap; use crate::configuration::data::ConfigurationData; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum TransitionAppliedError { - #[error( - "Transition object is declared split, but transition to one is needed in this context" - )] + #[error("Transition object is declared split, but transition to one is needed in this context")] SplitWhereSingleExpected, #[error( "Transition object is declared non-split, but split transition is needed in this context" diff --git a/app/buck2_core/src/configuration/transition/id.rs b/app/buck2_core/src/configuration/transition/id.rs index 3270c96927320..51237509b3fae 100644 --- a/app/buck2_core/src/configuration/transition/id.rs +++ b/app/buck2_core/src/configuration/transition/id.rs @@ -14,7 +14,7 @@ use crate::bzl::ImportPath; /// Identifier of transition function. #[derive(Debug, Clone, Hash, Eq, PartialEq, Display, Allocative)] -#[display(fmt = "{}#{}", path, name)] +#[display("{}#{}", path, name)] pub struct TransitionId { pub path: ImportPath, pub name: String, diff --git a/app/buck2_core/src/directory/builder.rs b/app/buck2_core/src/directory/builder.rs deleted file mode 100644 index 8d4496850b9c4..0000000000000 --- a/app/buck2_core/src/directory/builder.rs +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocative::Allocative; -use derivative::Derivative; -use either::Either; -use starlark_map::small_map::Entry; -use starlark_map::small_map::SmallMap; -use thiserror::Error; - -use super::Directory; -use super::DirectoryData; -use super::DirectoryDigest; -use super::DirectoryEntries; -use super::DirectoryEntry; -use super::DirectoryHasher; -use super::DirectoryMut; -use super::ExclusiveDirectory; -use super::FingerprintedDirectory; -use super::ImmutableDirectory; -use super::PathAccumulator; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::file_name::FileNameBuf; -use crate::fs::paths::IntoFileNameBufIterator; - -#[derive(Debug, Error)] -pub enum DirectoryInsertError { - #[error("Path is empty")] - EmptyPath, - - #[error("Insert conflicts with an existing leaf at path: `{}`", .path)] - CannotTraverseLeaf { path: PathAccumulator }, -} - -#[derive(Debug, Error)] -pub enum DirectoryMkdirError { - #[error("Mkdir conflicts with an existing leaf at path: `{}`", .path)] - CannotTraverseLeaf { path: PathAccumulator }, -} - -#[derive(Debug, Error)] -pub enum DirectoryMergeError { - #[error("Merge conflicts with an existing leaf at path: `{}`", .path)] - CannotTraverseLeaf { path: PathAccumulator }, -} - -/// A copy-on-write DirectoryBuilder. -#[derive(Derivative, Allocative)] -#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] -#[derivative(Clone(bound = "L: ::std::clone::Clone"))] -pub enum DirectoryBuilder -where - H: DirectoryDigest, -{ - /// This has a dedicated copy and we can mutate it. - Mutable(SmallMap, L>>), - Immutable(ImmutableDirectory), -} - -impl DirectoryBuilder -where - H: DirectoryDigest, -{ - pub fn empty() -> Self { - Self::Mutable(Default::default()) - } - - fn entries( - &self, - ) -> impl Iterator, &'_ L>)> { - match self { - Self::Mutable(e) => { - let it = e.iter().map(|(k, v)| { - let k = k.as_ref(); - let v = v.as_ref().map_dir(|v| v as &dyn Directory); - (k, v) - }); - Either::Left(it) - } - Self::Immutable(e) => Either::Right(Directory::entries(e)), - } - } -} - -impl DirectoryBuilder -where - L: Clone, - H: DirectoryDigest, -{ - /// Insert the entry `val` at `path`. - /// - /// If this replaces a portion of the tree, Ok(Some) is returned. For example inserting a file - /// at `a/b` when the tree contains `a/b/c` would return a directory containing `c`, which is - /// the node that was replaced at `a/b`. No path is returned under those circumstances since - /// this can only happen at the input path. - /// - /// If this would conflict with an existing portion of the tree, Err is returned. This happens - /// when inserting at a path that traverses through an existing file. For example, inserting at - /// `a/b/c` when the current directory contains a file at `a/b` will return an error. The error - /// indicates the path where the conflict occurred. - pub fn insert( - &mut self, - path: impl IntoFileNameBufIterator, - val: DirectoryEntry, L>, - ) -> Result, L>>, DirectoryInsertError> { - let mut path = path.into_iter(); - - let path_needle = match path.next() { - Some(path_needle) => path_needle, - None => return Err(DirectoryInsertError::EmptyPath), - }; - - self.insert_inner(path_needle, path, val) - .map_err(|path| DirectoryInsertError::CannotTraverseLeaf { path }) - } - - fn insert_inner( - &mut self, - path_needle: FileNameBuf, - mut path_rest: impl Iterator, - val: DirectoryEntry, L>, - ) -> Result, L>>, PathAccumulator> { - let entries = self.as_mut(); - - let next_path_needle = path_rest.next(); - - match next_path_needle { - Some(next_path_needle) => match entries.entry(path_needle) { - Entry::Occupied(mut entry) => match entry.get_mut() { - DirectoryEntry::Dir(d) => d - .insert_inner(next_path_needle, path_rest, val) - .map_err(|acc| acc.with(entry.key())), - _ => Err(PathAccumulator::new(entry.key())), - }, - Entry::Vacant(entry) => { - let mut dir = DirectoryBuilder::empty(); - dir.insert_inner(next_path_needle, path_rest, val) - .map_err(|acc| acc.with(entry.key()))?; - entry.insert(DirectoryEntry::Dir(dir)); - Ok(None) - } - }, - None => Ok(entries.insert(path_needle, val)), - } - } - - /// Create a directory at path. If the directory already exists, this does nothing. If this - /// would overwrite a leaf, it fails. - pub fn mkdir(&mut self, path: impl IntoFileNameBufIterator) -> Result<(), DirectoryMkdirError> { - let path = path.into_iter(); - - self.mkdir_inner(path) - .map_err(|path| DirectoryMkdirError::CannotTraverseLeaf { path }) - } - - fn mkdir_inner( - &mut self, - mut path: impl Iterator, - ) -> Result<(), PathAccumulator> { - let entries = self.as_mut(); - - let path_needle = match path.next() { - Some(p) => p, - None => return Ok(()), - }; - - match entries.entry(path_needle) { - Entry::Occupied(mut entry) => match entry.get_mut() { - DirectoryEntry::Dir(d) => { - d.mkdir_inner(path).map_err(|acc| acc.with(entry.key()))? - } - _ => return Err(PathAccumulator::new(entry.key())), - }, - Entry::Vacant(entry) => { - let mut dir = DirectoryBuilder::empty(); - dir.mkdir_inner(path).map_err(|acc| acc.with(entry.key()))?; - entry.insert(DirectoryEntry::Dir(dir)); - } - }; - - Ok(()) - } - - pub fn merge(&mut self, other: Self) -> Result<(), DirectoryMergeError> { - self.merge_inner(other) - .map_err(|path| DirectoryMergeError::CannotTraverseLeaf { path }) - } - - fn merge_inner(&mut self, mut other: Self) -> Result<(), PathAccumulator> { - match (&self, &other) { - (Self::Immutable(d1), Self::Immutable(d2)) if d1.fingerprint() == d2.fingerprint() => { - return Ok(()); - } - _ => {} - } - - let other = std::mem::take(other.as_mut()); - - let entries = self.as_mut(); - - for (k, v) in other.into_iter() { - match entries.entry(k) { - Entry::Occupied(mut entry) => match (entry.get_mut(), v) { - (DirectoryEntry::Dir(d), DirectoryEntry::Dir(o)) => { - d.merge_inner(o).map_err(|e| e.with(entry.key()))?; - } - (entry, DirectoryEntry::Leaf(o)) => { - *entry = DirectoryEntry::Leaf(o); - } - _ => return Err(PathAccumulator::new(entry.key())), - }, - Entry::Vacant(entry) => { - entry.insert(v); - } - } - } - - Ok(()) - } - - pub(super) fn as_mut( - &mut self, - ) -> &mut SmallMap, L>> { - if let Self::Mutable(ref mut dir) = self { - return dir; - }; - - let entries = match std::mem::replace(self, DirectoryBuilder::Mutable(Default::default())) { - Self::Immutable(d) => d.into_entries::>(), - Self::Mutable(..) => unreachable!(), - }; - - match self { - Self::Mutable(ref mut e) => { - *e = entries; - e - } - Self::Immutable(..) => unreachable!(), - } - } -} - -impl Directory for DirectoryBuilder -where - H: DirectoryDigest, -{ - fn entries(&self) -> DirectoryEntries<'_, L, H> { - Box::new(self.entries()) - } - - fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>> { - match self { - Self::Mutable(ref dir) => dir - .get(needle) - .map(|v| v.as_ref().map_dir(|d| d as &dyn Directory)), - Self::Immutable(ref dir) => Directory::get(dir, needle), - } - } - - fn to_builder(&self) -> DirectoryBuilder - where - L: Clone, - { - self.clone() - } -} - -impl DirectoryMut for DirectoryBuilder -where - H: DirectoryDigest, - L: Clone, -{ - fn get_mut<'a>( - &'a mut self, - needle: &'_ FileName, - ) -> Option, &'a mut L>> { - self.as_mut() - .get_mut(needle) - .map(|v| v.as_mut().map_dir(|d| d as &mut dyn DirectoryMut)) - } -} - -impl DirectoryBuilder -where - H: DirectoryDigest, -{ - pub fn fingerprint(self, hasher: &impl DirectoryHasher) -> ImmutableDirectory { - match self { - Self::Mutable(entries) => { - let entries = entries - .into_iter() - .map(|(k, v)| (k, v.map_dir(|v| v.fingerprint(hasher)))) - .collect(); - ImmutableDirectory::Exclusive(ExclusiveDirectory { - data: DirectoryData::new(entries, hasher), - }) - } - Self::Immutable(c) => c, - } - } -} diff --git a/app/buck2_core/src/directory/directory.rs b/app/buck2_core/src/directory/directory.rs deleted file mode 100644 index 95647eb9a551a..0000000000000 --- a/app/buck2_core/src/directory/directory.rs +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt; - -use super::DirectoryBuilder; -use super::DirectoryDigest; -use super::DirectoryEntry; -use super::OrderedDirectoryWalk; -use super::UnorderedDirectoryWalk; -use crate::fs::paths::file_name::FileName; - -pub type DirectoryEntries<'a, L, H> = - Box, &'a L>)> + 'a>; - -/// A Directory that may or may not be fingerprinted. This means it only exposes the common -/// denominator of operations available on such Directories, which is to access entries in them. -pub trait Directory { - fn entries(&self) -> DirectoryEntries<'_, L, H>; - - fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>>; - - fn unordered_walk<'a>(&'a self) -> UnorderedDirectoryWalk<'a, L, H> { - UnorderedDirectoryWalk::new(self) - } - - fn ordered_walk<'a>(&'a self) -> OrderedDirectoryWalk<'a, L, H> { - OrderedDirectoryWalk::new(self) - } - - fn to_builder(&self) -> DirectoryBuilder - where - L: Clone, - H: DirectoryDigest; -} - -impl<'a, L, H> fmt::Debug for &'a dyn Directory { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Directory") - } -} diff --git a/app/buck2_core/src/directory/directory_hasher.rs b/app/buck2_core/src/directory/directory_hasher.rs deleted file mode 100644 index f98aad4a08d35..0000000000000 --- a/app/buck2_core/src/directory/directory_hasher.rs +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt::Debug; -use std::fmt::Display; -use std::hash::Hash; - -use allocative::Allocative; -use derive_more::Display; -use dupe::Dupe; - -use super::DirectoryEntry; -use super::FingerprintedDirectory; -use crate::fs::paths::file_name::FileName; - -pub trait DirectoryDigest: - Allocative + PartialEq + Eq + Hash + Clone + Dupe + Debug + Display -{ -} - -// TODO: Rename to DirectoryDigester -pub trait DirectoryHasher { - fn hash_entries<'a, D, I>(&self, entries: I) -> H - where - I: IntoIterator)>, - D: FingerprintedDirectory + 'a, - L: 'a, - Self: Sized; -} - -#[allow(unused)] -#[derive(Clone, Debug, Eq, PartialEq, Hash, Allocative, Display)] -pub struct NoDigest(!); - -impl Dupe for NoDigest {} - -impl DirectoryDigest for NoDigest {} diff --git a/app/buck2_core/src/directory/directory_iterator.rs b/app/buck2_core/src/directory/directory_iterator.rs deleted file mode 100644 index 43bb0bd93b22f..0000000000000 --- a/app/buck2_core/src/directory/directory_iterator.rs +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt; - -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; - -/// A trait shared by iterators on Directories. Unlike a regular Iterator, this returns an accessor -/// to give us the current path in addition to the current item (which borrows from the iterator -/// itself, which is why this cannot be an iterator). -pub trait DirectoryIterator { - /// The way this iterator will report its current path. - type PathStack: DirectoryIteratorPathStack; - - /// The items this iterator will yield. - type Item; - - /// Provide the next item. - fn next<'a>( - &'a mut self, - ) -> Option<( - DirectoryIteratorPathAccessor<'a, Self::PathStack>, - Self::Item, - )>; - - /// Compute all paths in this iterator. This returns a regular Iterator since we no longer - /// need to borrow from self in next. - fn with_paths(self) -> DirectoryIteratorWithPaths - where - Self: Sized, - { - DirectoryIteratorWithPaths { inner: self } - } - - /// Compute none of the paths in this iterator. Here again, this is a reglar Iteraotr. - fn without_paths(self) -> DirectoryIteratorWithoutPaths - where - Self: Sized, - { - DirectoryIteratorWithoutPaths { inner: self } - } -} - -/// The stack of paths for this DirectoryIterator. This must allow iterating over the path -/// components htat make up the DirectoryIterator's current location. -pub trait DirectoryIteratorPathStack { - // NOTE: Ideally we'd want this to return an iterator defined as an associated type, but that's - // annoying to spell out without using type Foo = impl ... This is available behind - // `type_alias_impl_trait`, but that causes lots of compiler crashes at this time. - fn for_each_path<'a, F>(&'a self, f: F) - where - F: FnMut(&'a FileName); -} - -/// A thin struct that can be used to produce a path on demand. -pub struct DirectoryIteratorPathAccessor<'a, T> { - pub(super) stack: &'a T, - pub(super) leaf: Option<&'a FileName>, -} - -impl<'a, T> DirectoryIteratorPathAccessor<'a, T> -where - T: DirectoryIteratorPathStack, -{ - fn for_each_path<'this, F>(&'this self, mut f: F) - where - F: FnMut(&'this FileName), - { - self.stack.for_each_path(&mut f); - if let Some(leaf) = self.leaf { - f(leaf); - } - } - - pub fn name(&self) -> Option<&'a FileName> { - self.leaf - } - - pub fn get(&self) -> ForwardRelativePathBuf { - // Evaluate the size of our path. - let mut size = 0; - self.for_each_path(|name| size += name.as_str().len() + 1); - - // Remove extra "/" we accounted for. - size = size.saturating_sub(1); - - // Produce it. - let mut first = true; - let mut path = String::with_capacity(size); - self.for_each_path(|name| { - if !first { - path.push('/'); - } - first = false; - path.push_str(name.as_str()); - }); - - #[cfg(test)] - { - assert_eq!( - size, - path.len(), - "reserved the wrong capacity ({}) for {:?}", - size, - path - ); - } - - // Concatenating FileName guarantees we get a ForwardRelativePathBuf. - ForwardRelativePathBuf::unchecked_new(path) - } -} - -impl<'a, T> fmt::Display for DirectoryIteratorPathAccessor<'a, T> -where - T: DirectoryIteratorPathStack, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.get()) - } -} - -impl<'a, T> fmt::Debug for DirectoryIteratorPathAccessor<'a, T> -where - T: DirectoryIteratorPathStack, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "DirectoryIteratorPathAccessor({})", self.get()) - } -} - -/// Iterate over a DirectoryIterator with the paths. -pub struct DirectoryIteratorWithPaths { - inner: T, -} - -impl Iterator for DirectoryIteratorWithPaths -where - T: DirectoryIterator, -{ - type Item = (ForwardRelativePathBuf, ::Item); - - fn next(&mut self) -> Option { - let (path, item) = self.inner.next()?; - let path = path.get(); - Some((path, item)) - } -} - -/// Iterate over a DirectoryIterator without the paths. -pub struct DirectoryIteratorWithoutPaths { - inner: T, -} - -impl Iterator for DirectoryIteratorWithoutPaths -where - T: DirectoryIterator, -{ - type Item = ::Item; - - fn next(&mut self) -> Option { - let (_, item) = self.inner.next()?; - Some(item) - } -} diff --git a/app/buck2_core/src/directory/directory_selector.rs b/app/buck2_core/src/directory/directory_selector.rs deleted file mode 100644 index d59644eb905c5..0000000000000 --- a/app/buck2_core/src/directory/directory_selector.rs +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use starlark_map::small_map::SmallMap; -use thiserror::Error; - -use super::Directory; -use super::DirectoryBuilder; -use super::DirectoryDigest; -use super::DirectoryEntries; -use super::DirectoryEntry; -use super::DirectoryIterator; -use super::DirectoryIteratorPathAccessor; -use super::DirectoryIteratorPathStack; -use super::FingerprintedDirectory; -use super::FingerprintedDirectoryEntries; -use super::FingerprintedOrderedDirectoryEntries; -use super::OrderedDirectoryEntries; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::file_name::FileNameBuf; -use crate::fs::paths::IntoFileNameBufIterator; - -#[derive(Debug, Error)] -pub enum DirectorySearchError { - #[error("Search traverses a leaf")] - CannotTraverseLeaf { leaf: L }, -} - -impl DirectorySearchError { - pub fn into_leaf(self) -> L { - let Self::CannotTraverseLeaf { leaf } = self; - leaf - } -} - -#[derive(Debug, Error)] -pub enum DirectoryFilterError { - #[error("Filter traverses a leaf")] - CannotTraverseLeaf, -} - -/// A query builder for filtering or search operations on directories. It's a tree of paths and -/// what action we want to take on them. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum DirectorySelector { - /// Traverse only the netries that match this filename. - Traverse(SmallMap), - /// Take this entire tree. - Take, -} - -impl DirectorySelector { - pub fn empty() -> Self { - Self::Traverse(Default::default()) - } - - pub fn is_empty(&self) -> bool { - match self { - Self::Traverse(d) => d.is_empty(), - Self::Take => false, - } - } - - /// Add a path to this DirectorySelector. - pub fn select(&mut self, path: impl IntoFileNameBufIterator) { - let path = path.into_iter(); - self.select_inner(path) - } - - fn select_inner(&mut self, mut path: impl Iterator) { - let entry = match path.next() { - Some(e) => e, - None => { - *self = Self::Take; - return; - } - }; - - match self { - Self::Traverse(s) => s - .entry(entry) - .or_insert_with(DirectorySelector::empty) - .select_inner(path), - Self::Take => {} - } - } - - /// Filter a DirectoryBuilder by only retaining matching entries. - pub fn filter(&self, dir: &mut DirectoryBuilder) -> Result<(), DirectoryFilterError> - where - L: Clone, - H: DirectoryDigest, - { - let mut res = Ok(()); - - match self { - Self::Traverse(filter) => filter_inner(dir, filter, &mut res), - Self::Take => {} - }; - - res - } -} - -fn filter_inner( - dir: &mut DirectoryBuilder, - filter: &SmallMap, - res: &mut Result<(), DirectoryFilterError>, -) where - L: Clone, - H: DirectoryDigest, -{ - let dir = dir.as_mut(); - - let entries = std::mem::take(dir); - - for (k, mut v) in entries.into_iter() { - let selector = match filter.get(&k) { - Some(s) => s, - None => continue, - }; - - match selector { - DirectorySelector::Traverse(next_map) => match v { - DirectoryEntry::Dir(ref mut d) => filter_inner(d, next_map, res), - DirectoryEntry::Leaf(..) => { - *res = Err(DirectoryFilterError::CannotTraverseLeaf); - } - }, - DirectorySelector::Take => {} - }; - - dir.insert(k, v); - } -} - -macro_rules! impl_directory_search { - ( - $search_ty: ident, - $dir_ty: ident, - $entries_ty: ident, - $entries_method: ident, - $mod: ident, - $selector_search_method: ident, - ) => { - mod $mod { - use super::*; - - enum SearchFrame<'a, 'b, L, H> { - /// The search consists of just returning the root. - ReturnRoot { - root: Option, &'b L>>, - }, - - /// Continue the search. - Search { - search: &'a SmallMap, - name: Option<&'b FileName>, - entries: $entries_ty<'b, L, H>, - }, - } - - pub struct $search_ty<'a, 'b, L, H> { - stack: Vec>, - } - - impl<'a, 'b, L, H> $search_ty<'a, 'b, L, H> { - pub fn new(selector: &'a DirectorySelector, root: &'b D) -> Self - where - D: $dir_ty, - { - match selector { - DirectorySelector::Traverse(ref search) => Self { - stack: vec![SearchFrame::Search { - search, - name: None, - entries: $entries_ty::from(root.$entries_method()), - }], - }, - DirectorySelector::Take => Self { - stack: vec![SearchFrame::ReturnRoot { - root: Some(DirectoryEntry::Dir(root as &dyn $dir_ty)), - }], - }, - } - } - } - - impl<'a, 'b, L, H> DirectoryIterator for $search_ty<'a, 'b, L, H> { - type PathStack = Self; - type Item = Result< - DirectoryEntry<&'b dyn $dir_ty, &'b L>, - DirectorySearchError<&'b L>, - >; - - fn next<'c>( - &'c mut self, - ) -> Option<(DirectoryIteratorPathAccessor<'c, Self>, Self::Item)> { - loop { - let frame = self.stack.last_mut()?; - - match frame { - SearchFrame::ReturnRoot { root } => { - let root = root.take()?; - return Some(( - DirectoryIteratorPathAccessor { - leaf: None, - stack: self, - }, - Ok(root), - )); - } - SearchFrame::Search { - search, entries, .. - } => { - if let Some((name, entry)) = entries.next() { - let search = search.get(name); - - match search { - Some(DirectorySelector::Traverse(t)) => match entry { - // Traverse into this directory ... assuming it's a directory :) - DirectoryEntry::Dir(d) => { - self.stack.push(SearchFrame::Search { - name: Some(name), - search: t, - entries: $entries_ty::from(d.$entries_method()), - }); - continue; - } - DirectoryEntry::Leaf(leaf) => { - return Some(( - DirectoryIteratorPathAccessor { - leaf: Some(name), - stack: self, - }, - Err(DirectorySearchError::CannotTraverseLeaf { - leaf, - }), - )); - } - }, - Some(DirectorySelector::Take) => { - // Return the entry. Do not traverse further. - return Some(( - DirectoryIteratorPathAccessor { - leaf: Some(name), - stack: self, - }, - Ok(entry), - )); - } - None => { - // Ignore this node entirely. - continue; - } - } - } - } - }; - - // We've exhausted this iterator. Go back to the previous stack frame. - self.stack.pop(); - } - } - } - - impl<'a, 'b, L, H> DirectoryIteratorPathStack for $search_ty<'a, 'b, L, H> { - fn for_each_path<'this, F>(&'this self, mut f: F) - where - F: FnMut(&'this FileName), - { - let it = self.stack.iter().filter_map(|frame| match frame { - SearchFrame::ReturnRoot { .. } => None, - SearchFrame::Search { name, .. } => name.as_deref(), - }); - - for path in it { - f(path); - } - } - } - } - - impl DirectorySelector { - pub fn $selector_search_method<'a, 'b, L, H, D: $dir_ty>( - &'a self, - dir: &'b D, - ) -> $search_ty<'a, 'b, L, H> { - $search_ty::new(self, dir) - } - } - - pub use $mod::$search_ty; - }; -} - -impl_directory_search!( - UnorderedDirectorySearch, - Directory, - DirectoryEntries, - entries, - unordered_directory_search, - unordered_search, -); - -impl_directory_search!( - OrderedDirectorySearch, - Directory, - OrderedDirectoryEntries, - entries, - ordered_directory_search, - ordered_search, -); - -impl_directory_search!( - FingerprintedUnorderedDirectorySearch, - FingerprintedDirectory, - FingerprintedDirectoryEntries, - fingerprinted_entries, - fingerprinted_unordered_directory_search, - fingerprinted_unordered_search, -); - -impl_directory_search!( - FingerprintedOrderedDirectorySearch, - FingerprintedDirectory, - FingerprintedOrderedDirectoryEntries, - fingerprinted_entries, - fingerprinted_ordered_directory_search, - fingerprinted_ordered_search, -); diff --git a/app/buck2_core/src/directory/entries.rs b/app/buck2_core/src/directory/entries.rs deleted file mode 100644 index 2098ad77c2351..0000000000000 --- a/app/buck2_core/src/directory/entries.rs +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use super::Directory; -use super::DirectoryEntries; -use super::DirectoryEntry; -use super::FingerprintedDirectory; -use super::FingerprintedDirectoryEntries; -use crate::fs::paths::file_name::FileName; - -macro_rules! impl_ordered_entries { - ($ordered_entries_ty: ident, $dir_ty: ident, $from_ty: ident,) => { - /// A wrapper struct that allows an ordered traversal of directory entries. - pub struct $ordered_entries_ty<'a, L, H> { - entries: Vec<(&'a FileName, DirectoryEntry<&'a dyn $dir_ty, &'a L>)>, - } - - impl<'a, L, H> From<$from_ty<'a, L, H>> for $ordered_entries_ty<'a, L, H> { - fn from(entries: $from_ty<'a, L, H>) -> Self { - let mut entries = entries.collect::>(); - entries.sort_by(|(name1, _), (name2, _)| name2.cmp(name1)); - Self { entries } - } - } - - impl<'a, L, H> Iterator for $ordered_entries_ty<'a, L, H> { - type Item = (&'a FileName, DirectoryEntry<&'a dyn $dir_ty, &'a L>); - - fn next(&mut self) -> Option { - self.entries.pop() - } - } - }; -} - -impl_ordered_entries!( - FingerprintedOrderedDirectoryEntries, - FingerprintedDirectory, - FingerprintedDirectoryEntries, -); - -impl_ordered_entries!(OrderedDirectoryEntries, Directory, DirectoryEntries,); diff --git a/app/buck2_core/src/directory/exclusive_directory.rs b/app/buck2_core/src/directory/exclusive_directory.rs deleted file mode 100644 index 08d0ba04a08b9..0000000000000 --- a/app/buck2_core/src/directory/exclusive_directory.rs +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocative::Allocative; -use derivative::Derivative; -use derive_more::Display; - -use super::impl_fingerprinted_directory; -use super::DashMapDirectoryInterner; -use super::Directory; -use super::DirectoryBuilder; -use super::DirectoryData; -use super::DirectoryDigest; -use super::DirectoryEntries; -use super::DirectoryEntry; -use super::FingerprintedDirectory; -use super::FingerprintedDirectoryEntries; -use super::ImmutableDirectory; -use super::SharedDirectory; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::file_name::FileNameBuf; - -#[derive(Derivative, Display, Allocative)] -#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] -#[derivative(Clone(bound = "L: ::std::clone::Clone"))] -#[display(fmt = "{}", "self.data")] -pub struct ExclusiveDirectory -where - H: DirectoryDigest, -{ - pub(super) data: DirectoryData, L, H>, -} - -impl ExclusiveDirectory -where - H: DirectoryDigest, -{ - pub fn shared(self, interner: &DashMapDirectoryInterner) -> SharedDirectory { - if let Some(shared) = interner.get(self.fingerprint()) { - return shared; - } - - let DirectoryData { - entries, - fingerprint, - _hash, - } = self.data; - - let entries = entries - .into_iter() - .map(|(k, v)| (k, v.map_dir(|d| d.shared(interner)))) - .collect(); - - let new_data = DirectoryData { - entries, - fingerprint, - _hash, - }; - - interner.intern(new_data) - } - - pub fn into_entries(self) -> C - where - C: FromIterator<(FileNameBuf, DirectoryEntry, L>)>, - { - self.data - .entries - .into_iter() - .map(|(k, v)| (k, v.map_dir(|v| v.into_builder()))) - .collect() - } - - pub fn entries( - &self, - ) -> impl IntoIterator, L>)> + '_ - { - &self.data.entries - } - - pub fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>> { - self.data.entries.get(needle).as_ref().map(|v| v.as_ref()) - } - - pub fn fingerprint(&self) -> &H { - self.data.fingerprint() - } - - pub fn into_builder(self) -> DirectoryBuilder { - DirectoryBuilder::Immutable(ImmutableDirectory::Exclusive(self)) - } -} - -impl_fingerprinted_directory!(ExclusiveDirectory); diff --git a/app/buck2_core/src/directory/find.rs b/app/buck2_core/src/directory/find.rs deleted file mode 100644 index 90eb49599d2e8..0000000000000 --- a/app/buck2_core/src/directory/find.rs +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use thiserror::Error; - -use super::Directory; -use super::DirectoryEntry; -use super::DirectoryMut; -use super::FingerprintedDirectory; -use super::PathAccumulator; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; - -#[derive(Debug, Error)] -pub enum DirectoryFindError { - #[error("Path is empty")] - EmptyPath, - - #[error("Find would traverse a leaf at path: `{}`", .path)] - CannotTraverseLeaf { path: PathAccumulator }, -} - -trait FindConflict { - fn new<'b>(path: &'b FileName, remaining: impl Iterator, leaf: T) -> Self; - - fn with<'b>(self, path: &'b FileName) -> Self; -} - -impl FindConflict for PathAccumulator { - fn new<'b>( - path: &'b FileName, - _remaining: impl Iterator, - _leaf: T, - ) -> Self { - PathAccumulator::new(path) - } - - fn with<'b>(self, path: &'b FileName) -> Self { - PathAccumulator::with(self, path) - } -} - -struct PrefixLookupContainer { - leaf: T, - path: ForwardRelativePathBuf, -} - -impl FindConflict for PrefixLookupContainer { - fn new<'b>(path: &'b FileName, remaining: impl Iterator, leaf: T) -> Self { - Self { - leaf, - path: std::iter::once(path) - .chain(remaining) - .collect::>() - .expect("We know there is at least one path component"), - } - } - - fn with<'b>(self, _path: &'b FileName) -> Self { - self - } -} - -macro_rules! impl_find { - ( - $dir_ty: ident, - $getter: ident, - $find_name: ident, - $find_prefix_name: ident, - $mod: ident, - $( $mutability:tt, )* - ) => { - mod $mod { - use super::*; - - pub fn $find_name<'a, 'b, L, H, D: $dir_ty>( - dir: &'a $($mutability)* D, - path: impl IntoIterator, - ) -> Result, &'a $($mutability)* L>>, DirectoryFindError> - { - let mut path = path.into_iter(); - - let path_needle = match path.next() { - Some(path_needle) => path_needle, - None => return Err(DirectoryFindError::EmptyPath), - }; - - find_inner::<_, _, PathAccumulator>(dir, path_needle, path) - .map_err(|path| DirectoryFindError::CannotTraverseLeaf { path }) - } - - pub fn $find_prefix_name<'a, 'b, L, H, D: $dir_ty>( - dir: &'a $($mutability)* D, - path: impl IntoIterator, - ) -> Result< - Option<( - DirectoryEntry<&'a $($mutability)* dyn $dir_ty, &'a $($mutability)* L>, - Option, - )>, - DirectoryFindError, - > { - let mut path = path.into_iter(); - - let path_needle = match path.next() { - Some(path_needle) => path_needle, - None => return Err(DirectoryFindError::EmptyPath), - }; - - match find_inner::<_, _, PrefixLookupContainer<&'a $($mutability)* L>>(dir, path_needle, path) { - Ok(maybe_leaf) => Ok((maybe_leaf.map(|l| (l, None)))), - Err(PrefixLookupContainer { leaf, path }) => { - Ok(Some((DirectoryEntry::Leaf(leaf), Some(path)))) - } - } - } - - fn find_inner<'a, 'b, L, H, A>( - dir: &'a $($mutability)* dyn $dir_ty, - path_needle: &'b FileName, - mut path_rest: impl Iterator, - ) -> Result, &'a $($mutability)* L>>, A> - where - A: FindConflict<&'a $($mutability)* L>, - { - let entry = match dir.$getter(path_needle) { - Some(entry) => entry, - None => return Ok(None), - }; - - let next_path_needle = match path_rest.next() { - Some(next_path_needle) => next_path_needle, - None => return Ok(Some(entry)), - }; - - match entry { - DirectoryEntry::Dir(dir) => { - find_inner::<_, _, A>(dir, next_path_needle, path_rest) - .map_err(|acc| acc.with(path_needle)) - } - DirectoryEntry::Leaf(leaf) => Err(A::new(next_path_needle, path_rest, leaf)), - } - } - } - - pub use $mod::$find_name; - pub use $mod::$find_prefix_name; - }; -} - -impl_find!( - FingerprintedDirectory, - get, - find_fingerprinted, - find_prefix_fingerprinted, - impl_find_fingerprinted, -); -impl_find!(Directory, get, find, find_prefix, impl_find,); -impl_find!( - DirectoryMut, - get_mut, - find_mut, - find_prefix_mut, - impl_find_mut, - mut, -); diff --git a/app/buck2_core/src/directory/fingerprinted_directory.rs b/app/buck2_core/src/directory/fingerprinted_directory.rs deleted file mode 100644 index 9f532b53b3681..0000000000000 --- a/app/buck2_core/src/directory/fingerprinted_directory.rs +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt; - -use super::DirectoryDigest; -use super::DirectoryEntry; -use super::FingerprintedOrderedDirectoryWalk; -use super::FingerprintedUnorderedDirectoryWalk; -use crate::fs::paths::file_name::FileName; - -pub type FingerprintedDirectoryEntries<'a, L, H> = Box< - dyn Iterator< - Item = ( - &'a FileName, - DirectoryEntry<&'a dyn FingerprintedDirectory, &'a L>, - ), - > + 'a, ->; - -pub trait FingerprintedDirectory { - fn fingerprinted_entries(&self) -> FingerprintedDirectoryEntries<'_, L, H>; - - fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>>; - - fn fingerprinted_unordered_walk(&self) -> FingerprintedUnorderedDirectoryWalk<'_, L, H> { - FingerprintedUnorderedDirectoryWalk::new(self) - } - - fn fingerprinted_ordered_walk(&self) -> FingerprintedOrderedDirectoryWalk<'_, L, H> { - FingerprintedOrderedDirectoryWalk::new(self) - } - - fn fingerprint(&self) -> &H - where - H: DirectoryDigest; -} - -impl<'a, L, H> fmt::Debug for &'a dyn FingerprintedDirectory { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "FingerprintedDirectory") - } -} diff --git a/app/buck2_core/src/directory/immutable_directory.rs b/app/buck2_core/src/directory/immutable_directory.rs deleted file mode 100644 index 52e88016a318c..0000000000000 --- a/app/buck2_core/src/directory/immutable_directory.rs +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_use_dupe))] - -use allocative::Allocative; -use derivative::Derivative; -use derive_more::Display; - -use super::DashMapDirectoryInterner; -use super::Directory; -use super::DirectoryBuilder; -use super::DirectoryDigest; -use super::DirectoryEntries; -use super::DirectoryEntry; -use super::ExclusiveDirectory; -use super::FingerprintedDirectory; -use super::FingerprintedDirectoryEntries; -use super::SharedDirectory; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::file_name::FileNameBuf; - -#[derive(Derivative, Display, Allocative)] -#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] -#[derivative(Clone(bound = "L: ::std::clone::Clone"))] -pub enum ImmutableDirectory -where - H: DirectoryDigest, -{ - Exclusive(ExclusiveDirectory), - Shared(SharedDirectory), -} - -impl ImmutableDirectory -where - H: DirectoryDigest, -{ - pub fn shared(self, interner: &DashMapDirectoryInterner) -> SharedDirectory { - match self { - Self::Exclusive(dir) => dir.shared(interner), - Self::Shared(dir) => dir, - } - } - - pub fn into_builder(self) -> DirectoryBuilder { - match self { - Self::Exclusive(d) => d.into_builder(), - Self::Shared(s) => s.into_builder(), - } - } -} - -impl ImmutableDirectory -where - L: Clone, - H: DirectoryDigest, -{ - pub fn into_entries(self) -> C - where - C: FromIterator<(FileNameBuf, DirectoryEntry, L>)>, - { - match self { - Self::Exclusive(dir) => dir.into_entries(), - Self::Shared(dir) => dir.into_entries(), - } - } -} - -impl Directory for ImmutableDirectory -where - H: DirectoryDigest, -{ - fn entries(&self) -> DirectoryEntries<'_, L, H> { - match self { - Self::Exclusive(dir) => Directory::entries(dir), - Self::Shared(dir) => Directory::entries(dir), - } - } - - fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>> { - match self { - Self::Exclusive(dir) => Directory::get(dir, needle), - Self::Shared(dir) => Directory::get(dir, needle), - } - } - - fn to_builder(&self) -> DirectoryBuilder - where - L: Clone, - { - self.clone().into_builder() - } -} - -impl FingerprintedDirectory for ImmutableDirectory -where - H: DirectoryDigest, -{ - fn fingerprinted_entries<'a>(&'a self) -> FingerprintedDirectoryEntries<'a, L, H> { - match self { - Self::Exclusive(dir) => FingerprintedDirectory::fingerprinted_entries(dir), - Self::Shared(dir) => FingerprintedDirectory::fingerprinted_entries(dir), - } - } - - fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>> { - match self { - Self::Exclusive(dir) => FingerprintedDirectory::get(dir, needle), - Self::Shared(dir) => FingerprintedDirectory::get(dir, needle), - } - } - - fn fingerprint(&self) -> &H { - match self { - Self::Exclusive(dir) => FingerprintedDirectory::fingerprint(dir), - Self::Shared(dir) => FingerprintedDirectory::fingerprint(dir), - } - } -} - -impl PartialEq for ImmutableDirectory -where - H: DirectoryDigest, -{ - fn eq(&self, other: &Self) -> bool { - self.fingerprint() == other.fingerprint() - } -} - -impl Eq for ImmutableDirectory where H: DirectoryDigest {} diff --git a/app/buck2_core/src/directory/macros.rs b/app/buck2_core/src/directory/macros.rs deleted file mode 100644 index c209e46dd86da..0000000000000 --- a/app/buck2_core/src/directory/macros.rs +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -macro_rules! impl_fingerprinted_directory { - ( - $this: ident - ) => { - impl Directory for $this - where - H: DirectoryDigest, - { - fn entries<'a>(&'a self) -> DirectoryEntries<'a, L, H> { - let it = self.entries().into_iter().map(|(k, v)| { - let k = k.as_ref(); - let v = v.as_ref().map_dir(|v| v as &dyn Directory); - (k, v) - }); - Box::new(it) - } - - fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>> { - $this::get(self, needle).map(|v| v.map_dir(|d| d as &dyn Directory)) - } - - fn to_builder(&self) -> DirectoryBuilder - where - L: Clone, - { - self.clone().into_builder() - } - } - - impl FingerprintedDirectory for $this - where - H: DirectoryDigest, - { - fn fingerprinted_entries<'a>(&'a self) -> FingerprintedDirectoryEntries<'a, L, H> { - let it = self.entries().into_iter().map(|(k, v)| { - let k = k.as_ref(); - let v = v - .as_ref() - .map_dir(|v| v as &dyn FingerprintedDirectory); - (k, v) - }); - Box::new(it) - } - - fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>> { - $this::get(self, needle) - .map(|v| v.map_dir(|d| d as &dyn FingerprintedDirectory)) - } - - fn fingerprint(&self) -> &H { - $this::fingerprint(self) - } - } - - impl PartialEq for $this - where - H: DirectoryDigest, - { - fn eq(&self, other: &Self) -> bool { - self.fingerprint() == other.fingerprint() - } - } - - impl Eq for $this where H: DirectoryDigest {} - }; -} - -pub(super) use impl_fingerprinted_directory; diff --git a/app/buck2_core/src/directory/mod.rs b/app/buck2_core/src/directory/mod.rs deleted file mode 100644 index 2d3f6ff3f64b7..0000000000000 --- a/app/buck2_core/src/directory/mod.rs +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![allow(clippy::module_inception)] - -mod builder; -mod dashmap_directory_interner; -mod directory; -mod directory_data; -mod directory_hasher; -mod directory_iterator; -mod directory_mut; -mod directory_selector; -mod entries; -mod entry; -mod exclusive_directory; -mod find; -mod fingerprinted_directory; -mod immutable_directory; -mod macros; -mod no_hasher; -mod path_accumulator; -mod shared_directory; -mod test; -mod walk; - -pub use builder::DirectoryBuilder; -pub use builder::DirectoryInsertError; -pub use builder::DirectoryMergeError; -pub use builder::DirectoryMkdirError; -pub use dashmap_directory_interner::DashMapDirectoryInterner; -pub use directory::Directory; -pub use directory::DirectoryEntries; -pub use directory_data::DirectoryData; -pub use directory_hasher::DirectoryDigest; -pub use directory_hasher::DirectoryHasher; -pub use directory_hasher::NoDigest; -pub use directory_iterator::DirectoryIterator; -pub use directory_iterator::DirectoryIteratorPathAccessor; -pub use directory_iterator::DirectoryIteratorPathStack; -pub use directory_iterator::DirectoryIteratorWithPaths; -pub use directory_iterator::DirectoryIteratorWithoutPaths; -pub use directory_mut::DirectoryMut; -pub use directory_selector::DirectorySearchError; -pub use directory_selector::DirectorySelector; -pub use directory_selector::FingerprintedOrderedDirectorySearch; -pub use directory_selector::FingerprintedUnorderedDirectorySearch; -pub use directory_selector::OrderedDirectorySearch; -pub use directory_selector::UnorderedDirectorySearch; -use entries::FingerprintedOrderedDirectoryEntries; -use entries::OrderedDirectoryEntries; -pub use entry::DirectoryEntry; -pub use exclusive_directory::ExclusiveDirectory; -pub use find::find; -pub use find::find_fingerprinted; -pub use find::find_mut; -pub use find::find_prefix; -pub use find::find_prefix_fingerprinted; -pub use find::find_prefix_mut; -pub use find::DirectoryFindError; -pub use fingerprinted_directory::FingerprintedDirectory; -pub use fingerprinted_directory::FingerprintedDirectoryEntries; -pub use immutable_directory::ImmutableDirectory; -pub use path_accumulator::PathAccumulator; -pub use shared_directory::SharedDirectory; -pub use shared_directory::SharedDirectoryData; -pub use shared_directory::SharedDirectoryInner; -pub use walk::fingerprinted_ordered_entry_walk; -pub use walk::fingerprinted_unordered_entry_walk; -pub use walk::ordered_entry_walk; -pub use walk::unordered_entry_walk; -pub use walk::DirectoryEntryWalk; -pub use walk::FingerprintedOrderedDirectoryWalk; -pub use walk::FingerprintedUnorderedDirectoryWalk; -pub use walk::OrderedDirectoryWalk; -pub use walk::UnorderedDirectoryWalk; - -use self::macros::impl_fingerprinted_directory; diff --git a/app/buck2_core/src/directory/no_hasher.rs b/app/buck2_core/src/directory/no_hasher.rs deleted file mode 100644 index 775b095277634..0000000000000 --- a/app/buck2_core/src/directory/no_hasher.rs +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocative::Allocative; -use derive_more::Display; -use dupe::Dupe; - -#[allow(unused)] -#[derive(Eq, PartialEq, Copy, Clone, Display, Debug, Hash, Allocative)] -pub struct NoHash(!); - -impl Dupe for NoHash {} diff --git a/app/buck2_core/src/directory/shared_directory.rs b/app/buck2_core/src/directory/shared_directory.rs deleted file mode 100644 index 9980874cc0ff3..0000000000000 --- a/app/buck2_core/src/directory/shared_directory.rs +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_use_dupe))] - -use std::sync::Arc; - -use allocative::Allocative; -use derivative::Derivative; -use derive_more::Display; -use dupe::Clone_; -use dupe::Dupe_; - -use super::impl_fingerprinted_directory; -use super::DashMapDirectoryInterner; -use super::Directory; -use super::DirectoryBuilder; -use super::DirectoryData; -use super::DirectoryDigest; -use super::DirectoryEntries; -use super::DirectoryEntry; -use super::FingerprintedDirectory; -use super::FingerprintedDirectoryEntries; -use super::ImmutableDirectory; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::file_name::FileNameBuf; - -pub type SharedDirectoryData = DirectoryData, L, H>; - -#[derive(Derivative, Display, Allocative)] -#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] -#[display(fmt = "{}", "self.data")] -pub struct SharedDirectoryInner -where - H: DirectoryDigest, -{ - pub(super) data: SharedDirectoryData, - - #[derivative(Debug = "ignore")] - pub(super) interner: DashMapDirectoryInterner, -} - -impl Drop for SharedDirectoryInner -where - H: DirectoryDigest, -{ - fn drop(&mut self) { - self.interner.dropped(&self.data) - } -} - -#[derive(Derivative, Clone_, Dupe_, Display, Allocative)] -#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] -#[display(fmt = "{}", "self.inner")] -pub struct SharedDirectory -where - H: DirectoryDigest, -{ - pub(super) inner: Arc>, -} - -impl SharedDirectory -where - H: DirectoryDigest, -{ - pub fn as_immutable(self) -> ImmutableDirectory { - ImmutableDirectory::Shared(self) - } - - pub fn entries( - &self, - ) -> impl IntoIterator, L>)> + '_ - { - &self.inner.data.entries - } - - pub fn get<'a>( - &'a self, - needle: &'_ FileName, - ) -> Option, &'a L>> { - self.inner - .data - .entries - .get(needle) - .as_ref() - .map(|v| v.as_ref()) - } - - pub fn fingerprint(&self) -> &H { - self.inner.data.fingerprint() - } - - pub fn into_builder(self) -> DirectoryBuilder { - DirectoryBuilder::Immutable(self.as_immutable()) - } - - pub fn ptr_eq(&self, other: &SharedDirectory) -> bool { - Arc::ptr_eq(&self.inner, &other.inner) - } -} - -impl SharedDirectory -where - L: Clone, - H: DirectoryDigest, -{ - pub fn into_entries(self) -> C - where - C: FromIterator<(FileNameBuf, DirectoryEntry, L>)>, - { - self.entries() - .into_iter() - .map(|(k, v)| (k.clone(), v.clone().map_dir(|v| v.into_builder()))) - .collect() - } -} - -impl_fingerprinted_directory!(SharedDirectory); diff --git a/app/buck2_core/src/directory/test.rs b/app/buck2_core/src/directory/test.rs deleted file mode 100644 index 0944d5d93e515..0000000000000 --- a/app/buck2_core/src/directory/test.rs +++ /dev/null @@ -1,636 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![cfg(test)] - -use std::collections::hash_map::DefaultHasher; -use std::hash::Hash; -use std::hash::Hasher; - -use allocative::Allocative; -use assert_matches::assert_matches; -use derive_more::Display; -use dupe::Dupe; - -use super::*; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::forward_rel_path::ForwardRelativePath; - -#[derive(Clone, Dupe, Debug, Eq, PartialEq, Hash)] -pub struct NopEntry; - -pub struct TestHasher; - -#[derive(Clone, Dupe, Debug, Eq, PartialEq, Hash, Allocative, Display)] -struct TestDigest(u64); - -impl DirectoryDigest for TestDigest {} - -impl DirectoryHasher for TestHasher { - fn hash_entries<'a, D, I>(&self, entries: I) -> TestDigest - where - I: IntoIterator)>, - D: FingerprintedDirectory + 'a, - { - let mut hasher = DefaultHasher::new(); - - let mut entries = entries - .into_iter() - .map(|(name, entry)| { - let entry = entry.map_dir(|d| d.fingerprint()); - (name, entry) - }) - .collect::>(); - entries.sort_by_key(|(name, _)| *name); - - entries.hash(&mut hasher); - TestDigest(hasher.finish()) - } -} - -type TestDirectoryBuilder = DirectoryBuilder; -type NoHasherDirectoryBuilder = DirectoryBuilder; - -fn path<'a>(s: &'a str) -> &'a ForwardRelativePath { - ForwardRelativePath::unchecked_new(s) -} - -#[test] -fn test_insert() -> anyhow::Result<()> { - let mut b = NoHasherDirectoryBuilder::empty(); - - assert_matches!( - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry)), - Ok(None) - ); - - assert_matches!( - b.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry)), - Err(DirectoryInsertError::CannotTraverseLeaf { path }) => { - assert_eq!(path.to_string(), "a/b"); - } - ); - - assert_matches!( - b.insert(path("a"), DirectoryEntry::Leaf(NopEntry)), - Ok(Some(DirectoryEntry::Dir(..))) - ); - - Ok(()) -} - -#[test] -fn test_walk() -> anyhow::Result<()> { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - b.insert( - path("b"), - DirectoryEntry::Dir(TestDirectoryBuilder::empty()), - )?; - - { - let mut it = b.ordered_walk().with_paths(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/b")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("b")) - ); - - assert_matches!(it.next(), None); - } - - { - let it = b.unordered_walk().with_paths(); - let mut collected = it.collect::>(); - collected.sort_by_key(|(name, _)| name.clone()); - let mut it = collected.into_iter(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/b")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("b")) - ); - - assert_matches!(it.next(), None); - } - - let b = b.fingerprint(&TestHasher); - - { - let mut it = b.fingerprinted_ordered_walk().with_paths(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/b")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("b")) - ); - - assert_matches!(it.next(), None); - } - - { - let it = b.fingerprinted_unordered_walk().with_paths(); - let mut collected = it.collect::>(); - collected.sort_by_key(|(name, _)| name.clone()); - let mut it = collected.into_iter(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/b")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("b")) - ); - - assert_matches!(it.next(), None); - } - - { - let mut walk = b.fingerprinted_ordered_walk(); - - assert_matches!( - walk.next(), - Some((p, _)) => assert_eq!(p.get(), path("a")) - ); - - assert_matches!( - walk.next(), - Some((p, _)) => assert_eq!(p.get(), path("a/b")) - ); - - assert_matches!( - walk.next(), - Some((p, _)) => assert_eq!(p.get(), path("b")) - ); - - assert_matches!(walk.next(), None); - } - - { - // Test that the API we envision does compile. - let mut expected_paths = vec![path("a"), path("a/b"), path("b")].into_iter(); - let mut walk = b.fingerprinted_ordered_walk(); - while let Some((p, _)) = walk.next() { - assert_eq!(p.get(), expected_paths.next().unwrap()); - } - } - - Ok(()) -} - -#[test] -fn test_merge() -> anyhow::Result<()> { - let mut a = TestDirectoryBuilder::empty(); - a.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/c"), DirectoryEntry::Leaf(NopEntry))?; - - a.merge(b)?; - - let mut it = a.ordered_walk().with_paths(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/b")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/c")) - ); - - assert_matches!(it.next(), None); - - Ok(()) -} - -#[test] -fn test_merge_overwrite() -> anyhow::Result<()> { - let mut a = TestDirectoryBuilder::empty(); - a.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a"), DirectoryEntry::Leaf(NopEntry))?; - - a.merge(b)?; - - Ok(()) -} - -#[test] -fn test_merge_conflict() -> anyhow::Result<()> { - let mut a = TestDirectoryBuilder::empty(); - a.insert(path("a"), DirectoryEntry::Leaf(NopEntry))?; - - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - - assert_matches!( - a.merge(b), - Err(DirectoryMergeError::CannotTraverseLeaf { path }) => { - assert_eq!(path.to_string(), "a"); - } - ); - - Ok(()) -} - -#[test] -fn test_copy_on_write() -> anyhow::Result<()> { - let empty = TestDirectoryBuilder::empty().fingerprint(&TestHasher); - - let mut a = TestDirectoryBuilder::empty(); - a.insert(path("a"), DirectoryEntry::Dir(empty.into_builder()))?; - - a.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - - let mut it = a.ordered_walk().with_paths(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/b")) - ); - - Ok(()) -} - -#[test] -fn test_find() -> anyhow::Result<()> { - let mut a = TestDirectoryBuilder::empty(); - a.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry))?; - - assert_matches!(find(&a, path("a/b/c")), Ok(Some(DirectoryEntry::Leaf(..)))); - - assert_matches!(find(&a, path("a/b")), Ok(Some(DirectoryEntry::Dir(..)))); - - Ok(()) -} - -#[test] -fn test_find_prefix() -> anyhow::Result<()> { - let mut a = TestDirectoryBuilder::empty(); - a.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry))?; - - assert_matches!( - find_prefix(&a, path("a/b/c")), - Ok(Some((DirectoryEntry::Leaf(..), None))) - ); - assert_matches!( - find_prefix(&a, path("a/b")), - Ok(Some((DirectoryEntry::Dir(..), None))) - ); - - assert_matches!( - find_prefix(&a, path("a/b/c/d")), - Ok(Some((DirectoryEntry::Leaf(..), Some(rest)))) => { - assert_eq!(rest, path("d")); - } - ); - assert_matches!( - find_prefix(&a, path("a/b/c/d/e")), - Ok(Some((DirectoryEntry::Leaf(..), Some(rest)))) => { - assert_eq!(rest, path("d/e")); - } - ); - - Ok(()) -} - -#[test] -fn test_find_mut() -> anyhow::Result<()> { - // Fewer tests than test_find since under the hood it's the exact same implementation. - let mut a = TestDirectoryBuilder::empty(); - a.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry))?; - - assert_matches!( - find_mut(&mut a, path("a/b/c")), - Ok(Some(DirectoryEntry::Leaf(..))) - ); - - assert_matches!( - find_prefix_mut(&mut a, path("a/b/c/d")), - Ok(Some((DirectoryEntry::Leaf(..), Some(rest)))) => { - assert_eq!(rest, path("d")); - } - ); - - Ok(()) -} - -#[test] -fn test_search() -> anyhow::Result<()> { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("b/c"), DirectoryEntry::Leaf(NopEntry))?; - let d = b.fingerprint(&TestHasher); - - { - let mut selector = DirectorySelector::empty(); - selector.select(path("a/b")); - - let mut it = selector.fingerprinted_ordered_search(&d).with_paths(); - - assert_matches!( - it.next(), - Some((p, Ok(DirectoryEntry::Leaf(..)))) => assert_eq!(p, path("a/b")) - ); - assert_matches!(it.next(), None) - } - - { - let mut selector = DirectorySelector::empty(); - selector.select(path("a/b/c")); - selector.select(path("b/c")); - - let mut it = selector.fingerprinted_ordered_search(&d).with_paths(); - - assert_matches!( - it.next(), - Some((p, Err(DirectorySearchError::CannotTraverseLeaf { .. }))) => assert_eq!(p, path("a/b")) - ); - assert_matches!( - it.next(), - Some((p, Ok(DirectoryEntry::Leaf(..)))) => assert_eq!(p, path("b/c")) - ); - assert_matches!(it.next(), None) - } - - { - let mut selector = DirectorySelector::empty(); - selector.select(path("a")); - - let mut it = selector.fingerprinted_ordered_search(&d).with_paths(); - assert_matches!( - it.next(), - Some((p, Ok(DirectoryEntry::Dir(..)))) => assert_eq!(p, path("a")) - ); - } - - Ok(()) -} - -#[test] -fn test_filter() -> anyhow::Result<()> { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/aa"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("a/a"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("b/b"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("b/bb"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("c/c"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("c/cc"), DirectoryEntry::Leaf(NopEntry))?; - - let mut selector = DirectorySelector::empty(); - selector.select(path("a")); - selector.select(path("b/b")); - - selector.filter(&mut b)?; - - let mut it = b.ordered_walk().with_paths(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/a")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/aa")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("b")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("b/b")) - ); - - assert_matches!(it.next(), None); - - Ok(()) -} - -#[test] -fn test_entry_walk() { - { - let e = DirectoryEntry::::Leaf(NopEntry); - let mut it = ordered_entry_walk(e.as_ref()); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p.get(), path("")) - ); - - assert_matches!(it.next(), None); - } - - { - let e = DirectoryEntry::<_, NopEntry>::Dir(TestDirectoryBuilder::empty()); - let mut it = ordered_entry_walk(e.as_ref()); - - assert_matches!(it.next(), None); - } -} - -#[test] -fn test_bounds() { - fn assert_impls_debug() {} - fn assert_impls_clone() {} - fn assert_impls_eq() {} - - assert_impls_debug::(); - assert_impls_clone::(); - assert_impls_eq::, NopEntry>>(); - assert_impls_eq::, NopEntry>>(); - assert_impls_eq::, NopEntry>>(); -} - -#[test] -fn test_mkdir() -> anyhow::Result<()> { - let mut b = TestDirectoryBuilder::empty(); - b.mkdir(path("foo/bar"))?; - b.mkdir(path("foo"))?; - - let mut it = b.ordered_walk().with_paths(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("foo")) - ); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("foo/bar")) - ); - - assert_matches!(it.next(), None); - - Ok(()) -} - -#[test] -fn test_mkdir_overwrite() -> anyhow::Result<()> { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - - assert_matches!( - b.mkdir(path("a/b/c")), - Err(DirectoryMkdirError::CannotTraverseLeaf { path }) => { - assert_eq!(path.to_string(), "a/b"); - } - ); - - Ok(()) -} - -#[test] -fn test_directory_interner() -> anyhow::Result<()> { - let interner = DashMapDirectoryInterner::new(); - - let d1 = { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - b.fingerprint(&TestHasher).shared(&interner) - }; - - let d2 = { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - b.fingerprint(&TestHasher).shared(&interner) - }; - - assert!(d1.ptr_eq(&d2)); - - assert_eq!(interner.len(), 2); - - drop(d1); - assert_eq!(interner.len(), 2); - - drop(d2); - assert_eq!(interner.len(), 0); - - Ok(()) -} - -#[test] -fn test_directory_interner_deep() -> anyhow::Result<()> { - let interner = DashMapDirectoryInterner::new(); - - let d1 = { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; - b.fingerprint(&TestHasher).shared(&interner) - }; - - let _d2 = { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("b"), DirectoryEntry::Leaf(NopEntry))?; - b.fingerprint(&TestHasher).shared(&interner) - }; - - assert_eq!(interner.len(), 2); - - drop(d1); - - // Now we only have d2. - assert_eq!(interner.len(), 1); - - Ok(()) -} - -#[test] -fn test_filter_continues_on_error() -> anyhow::Result<()> { - let mut b = TestDirectoryBuilder::empty(); - b.insert(path("a/aa/aaa"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("a/aa/bbb"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("a/bb"), DirectoryEntry::Leaf(NopEntry))?; - b.insert(path("c"), DirectoryEntry::Leaf(NopEntry))?; - - let mut selector = DirectorySelector::empty(); - selector.select(path("a/aa")); - selector.select(path("c/d")); - - assert_matches!(selector.filter(&mut b), Err(..)); - - let mut it = b.ordered_walk().with_paths(); - - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/aa")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/aa/aaa")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("a/aa/bbb")) - ); - assert_matches!( - it.next(), - Some((p, _)) => assert_eq!(p, path("c")) - ); - - assert_matches!(it.next(), None); - - Ok(()) -} diff --git a/app/buck2_core/src/directory/walk.rs b/app/buck2_core/src/directory/walk.rs deleted file mode 100644 index da0a4831bdefd..0000000000000 --- a/app/buck2_core/src/directory/walk.rs +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use derivative::Derivative; - -use super::Directory; -use super::DirectoryEntries; -use super::DirectoryEntry; -use super::DirectoryIterator; -use super::DirectoryIteratorPathAccessor; -use super::DirectoryIteratorPathStack; -use super::FingerprintedDirectory; -use super::FingerprintedDirectoryEntries; -use super::FingerprintedOrderedDirectoryEntries; -use super::OrderedDirectoryEntries; -use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; - -macro_rules! impl_directory_walk { - ( - $walk_ty: ident, - $entry_walk_fn: ident, - $dir_ty: ident, - $entries_ty: ident, - $entries_method: ident, - $mod: ident, - ) => { - mod $mod { - use $crate::directory::DirectoryEntry; - use $crate::directory::DirectoryIterator; - use $crate::directory::DirectoryIteratorPathAccessor; - use $crate::directory::DirectoryIteratorPathStack; - - use super::$dir_ty; - use super::$entries_ty; - use super::DirectoryEntryWalk; - use crate::fs::paths::file_name::FileName; - - struct WalkFrame<'a, L, H> { - name: Option<&'a FileName>, - entries: $entries_ty<'a, L, H>, - } - - pub struct $walk_ty<'a, L, H> { - stack: Vec>, - } - - impl<'a, L, H> $walk_ty<'a, L, H> { - pub fn new(root: &'a D) -> Self - where - D: $dir_ty + ?Sized, - { - Self { - stack: vec![WalkFrame { - name: None, - entries: $entries_ty::from($dir_ty::$entries_method(root)), - }], - } - } - } - - impl<'a, L, H> DirectoryIterator for $walk_ty<'a, L, H> { - type PathStack = Self; - type Item = DirectoryEntry<&'a dyn $dir_ty, &'a L>; - - fn next<'b>( - &'b mut self, - ) -> Option<(DirectoryIteratorPathAccessor<'b, Self>, Self::Item)> { - loop { - let frame = self.stack.last_mut()?; - - if let Some((name, entry)) = frame.entries.next() { - let leaf_name = match entry { - DirectoryEntry::Dir(dir) => { - self.stack.push(WalkFrame { - name: Some(name), - entries: $entries_ty::from($dir_ty::$entries_method(dir)), - }); - None - } - DirectoryEntry::Leaf(..) => Some(name), - }; - - return Some(( - DirectoryIteratorPathAccessor { - leaf: leaf_name, - stack: self, - }, - entry, - )); - } - - self.stack.pop(); - } - } - } - - impl<'a, L, H> DirectoryIteratorPathStack for $walk_ty<'a, L, H> { - fn for_each_path<'this, F>(&'this self, mut f: F) - where - F: FnMut(&'this FileName), - { - let it = self.stack.iter().filter_map(|frame| match frame { - WalkFrame { name, .. } => name.as_deref(), - }); - - for path in it { - f(path); - } - } - } - - pub fn $entry_walk_fn<'a, D, L, H>( - entry: DirectoryEntry<&'a D, &'a L>, - ) -> DirectoryEntryWalk<'a, L, $walk_ty<'a, L, H>> - where - D: $dir_ty + ?Sized, - { - match entry { - DirectoryEntry::Dir(d) => DirectoryEntryWalk::Dir { - inner: $walk_ty::new(d), - }, - DirectoryEntry::Leaf(d) => DirectoryEntryWalk::Leaf { entry: Some(d) }, - } - } - } - - pub use $mod::$entry_walk_fn; - pub use $mod::$walk_ty; - }; -} - -pub enum DirectoryEntryWalk<'a, L, I> { - Dir { inner: I }, - Leaf { entry: Option<&'a L> }, -} - -impl<'a, D, L, I> DirectoryEntryWalk<'a, L, I> -where - I: DirectoryIterator>, - D: 'a, -{ - pub fn next<'this>( - &'this mut self, - ) -> Option<( - DirectoryEntryWalkPathAccessor<'this, ::PathStack>, - DirectoryEntry, - )> { - match self { - Self::Dir { inner } => { - let (accessor, item) = inner.next()?; - Some(( - DirectoryEntryWalkPathAccessor { - inner: Some(accessor), - }, - item, - )) - } - Self::Leaf { entry } => { - let entry = entry.take()?; - Some(( - DirectoryEntryWalkPathAccessor { inner: None }, - DirectoryEntry::Leaf(entry), - )) - } - } - } -} - -#[derive(Derivative)] -#[derivative(Debug(bound = "T: DirectoryIteratorPathStack"))] -pub struct DirectoryEntryWalkPathAccessor<'a, T> { - inner: Option>, -} - -impl<'a, T> DirectoryEntryWalkPathAccessor<'a, T> -where - T: DirectoryIteratorPathStack, -{ - pub fn get(&self) -> ForwardRelativePathBuf { - match self.inner.as_ref() { - Some(i) => i.get(), - None => ForwardRelativePathBuf::unchecked_new("".to_owned()), - } - } -} - -impl_directory_walk!( - FingerprintedUnorderedDirectoryWalk, - fingerprinted_unordered_entry_walk, - FingerprintedDirectory, - FingerprintedDirectoryEntries, - fingerprinted_entries, - fingerprinted_unordered_directory_walk_impl, -); - -impl_directory_walk!( - UnorderedDirectoryWalk, - unordered_entry_walk, - Directory, - DirectoryEntries, - entries, - unordered_directory_walk_impl, -); - -impl_directory_walk!( - FingerprintedOrderedDirectoryWalk, - fingerprinted_ordered_entry_walk, - FingerprintedDirectory, - FingerprintedOrderedDirectoryEntries, - fingerprinted_entries, - fingerprinted_ordered_directory_walk_impl, -); - -impl_directory_walk!( - OrderedDirectoryWalk, - ordered_entry_walk, - Directory, - OrderedDirectoryEntries, - entries, - ordered_directory_walk_impl, -); diff --git a/app/buck2_core/src/directory_digest.rs b/app/buck2_core/src/directory_digest.rs new file mode 100644 index 0000000000000..7ec522aedc212 --- /dev/null +++ b/app/buck2_core/src/directory_digest.rs @@ -0,0 +1,26 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Debug; +use std::fmt::Display; +use std::hash::Hash; + +use allocative::Allocative; +use dupe::Dupe; + +pub trait DirectoryDigest: + Allocative + PartialEq + Eq + Hash + Clone + Dupe + Debug + Display +{ +} + +/// Indicates that this type of digest is suitable for use for interning. +/// +/// Specifically, this is not implemented for `NoDigest`, as that returns the same `()` digest for +/// all directories. +pub trait InternableDirectoryDigest: DirectoryDigest {} diff --git a/app/buck2_core/src/env.rs b/app/buck2_core/src/env.rs new file mode 100644 index 0000000000000..93f6d94fe8891 --- /dev/null +++ b/app/buck2_core/src/env.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod __macro_refs; +pub mod helper; +pub mod registry; diff --git a/app/buck2_core/src/env/__macro_refs.rs b/app/buck2_core/src/env/__macro_refs.rs new file mode 100644 index 0000000000000..40b4672eead94 --- /dev/null +++ b/app/buck2_core/src/env/__macro_refs.rs @@ -0,0 +1,337 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![doc(hidden)] + +use std::str::FromStr; + +pub use linkme; + +pub fn convert_from_str(v: &str) -> anyhow::Result +where + T: FromStr, + anyhow::Error: From<::Err>, +{ + Ok(T::from_str(v)?) +} + +/// This macro is used to register environment variables that are used by Buck2. +/// +/// The first argument to the macro must always be a string literal with the name of the environment +/// variable. +/// +/// Additionally, you can specify the following, comma separated: +/// +/// - `type=` - the Rust type that the environment variable should be converted to, using +/// `FromStr::from_str`. Defaults to `&'static str` if not specified. +/// - `default=` - an expression for the default value to use if the environment variable is +/// not set. +/// - `converter=` - a function to use as an alternative to the `FromStr::from_str` +/// conversion. Must have signature `fn(&str) -> Result` +/// - `applicability=` - to indicate that the variable is not used in OSS or only +/// for self-testing of buck2 +/// +/// The macro expands to an expression of type `buck2_error::Result` if a default is set, and +/// `buck2_error::Result` otherwise. +pub macro buck2_env { + ($var:expr, bool $(, $($rest:tt)*)?) => {{ + let v: buck2_error::Result = $crate::env::__macro_refs::buck2_env!($var, type=bool, default=false, converter = |s| { + match s.to_lowercase().as_str() { + "1" | "true" => Ok(true), + "0" | "false" => Ok(false), + _ => Err(buck2_error::buck2_error!("Invalid bool value: {}", s)), + } + }, $($($rest)*)?); + v + }}, + ($var:expr, type=$ty:ty, default=$default:expr, converter=$converter:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse2!( + ( + var=$var, + parser=$converter, + stored_type=$ty, + processor=|x| x.copied().unwrap_or_else(|| $default), + output_type=$ty, + default_repr=std::option::Option::Some(stringify!($default)), + ), + $($($rest)*)? + ) + }}, + ($var:expr, type=$ty:ty, default=$default:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse2!( + ( + var=$var, + parser=$crate::env::__macro_refs::convert_from_str, + stored_type=$ty, + processor=|x| x.copied().unwrap_or_else(|| $default), + output_type=$ty, + default_repr=std::option::Option::Some(stringify!($default)), + ), + $($($rest)*)? + ) + }}, + ($var:expr, type=$ty:ty, converter=$converter:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse2!( + ( + var=$var, + parser=$converter, + stored_type=$ty, + processor=|x| x, + output_type=std::option::Option<&$ty>, + default_repr=std::option::Option::None, + ), + $($($rest)*)? + ) + }}, + ($var:expr, type=$ty:ty $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse2!( + ( + var=$var, + parser=$crate::env::__macro_refs::convert_from_str, + stored_type=$ty, + processor=|x| x.copied(), + output_type=std::option::Option<$ty>, + default_repr=std::option::Option::None, + ), + $($($rest)*)? + ) + }}, + ($var:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse2!( + ( + var=$var, + parser=$crate::env::__macro_refs::convert_from_str, + stored_type=std::string::String, + processor=|x| x.map(|x| x.as_str()), + output_type=std::option::Option<&'static str>, + default_repr=std::option::Option::None, + ), + $($($rest)*)? + ) + }}, +} + +/// Register env name to be shown in `buck2 help-env`. +pub macro buck2_env_name($var:expr) {{ + $crate::env::__macro_refs::register!( + $var, + ty = std::string::String, + default = std::option::Option::None, + applicability = $crate::env::registry::Applicability::All + ); + + $var +}} + +#[allow(unused_macros)] +macro parse2 { + ( + $already_parsed:tt, + applicability=internal$(,)? + ) => { + $crate::env::__macro_refs::expand!($already_parsed, applicability=$crate::env::registry::Applicability::Internal,) + }, + ( + $already_parsed:tt, + applicability=testing$(,)? + ) => { + $crate::env::__macro_refs::expand!($already_parsed, applicability=$crate::env::registry::Applicability::Testing,) + }, + ( + $already_parsed:tt, + $(,)? + ) => { + $crate::env::__macro_refs::expand!($already_parsed, applicability=$crate::env::registry::Applicability::All,) + }, +} + +#[allow(unused_macros)] +/// `parser` is `&str -> buck2_error::Result<$stored_type>`, `processor` is `Option<& $stored_type> -> $output_type` +/// +/// The extra set of parentheses is a trick to let us pass things through `parse2` transparently +macro expand( + ( + var=$var:expr, + parser=$parser:expr, + stored_type=$stored_ty:ty, + processor=$processor:expr, + output_type=$output_ty:ty, + default_repr=$default_repr:expr, + ), + applicability=$applicability:expr, +) {{ + $crate::env::__macro_refs::register!( + $var, + ty = $stored_ty, + default = $default_repr, + applicability = $applicability + ); + static ENV_HELPER: $crate::env::helper::EnvHelper<$stored_ty> = + $crate::env::helper::EnvHelper::with_converter_from_macro($var, $parser); + let v: buck2_error::Result<$output_ty> = ENV_HELPER.get().map($processor); + v +}} + +macro register($var:expr, ty=$ty:ty, default=$default:expr, applicability=$applicability:expr) {{ + use $crate::env::__macro_refs::linkme; + #[linkme::distributed_slice($crate::env::registry::ENV_INFO)] + #[linkme(crate = $crate::env::__macro_refs::linkme)] + static ENV_INFO: $crate::env::registry::EnvInfoEntry = $crate::env::registry::EnvInfoEntry { + name: $var, + ty: stringify!($ty), + default: $default, + applicability: $applicability, + }; +}} + +/// Code below returns anyhow::Error, it is used while we transition from anyhow to buck2_error in buck2/app +/// TODO(minglunli): Delete the code below once we have fully transitioned to buck2_error + +/// This macro is used to register environment variables that are used by Buck2. +/// +/// The first argument to the macro must always be a string literal with the name of the environment +/// variable. +/// +/// Additionally, you can specify the following, comma separated: +/// +/// - `type=` - the Rust type that the environment variable should be converted to, using +/// `FromStr::from_str`. Defaults to `&'static str` if not specified. +/// - `default=` - an expression for the default value to use if the environment variable is +/// not set. +/// - `converter=` - a function to use as an alternative to the `FromStr::from_str` +/// conversion. Must have signature `fn(&str) -> Result` +/// - `applicability=` - to indicate that the variable is not used in OSS or only +/// for self-testing of buck2 +/// +/// The macro expands to an expression of type `anyhow::Result` if a default is set, and +/// `anyhow::Result` otherwise. +pub macro buck2_env_anyhow { + ($var:expr, bool $(, $($rest:tt)*)?) => {{ + let v: anyhow::Result = $crate::env::__macro_refs::buck2_env_anyhow!($var, type=bool, default=false, converter = |s| { + match s.to_lowercase().as_str() { + "1" | "true" => Ok(true), + "0" | "false" => Ok(false), + _ => Err(anyhow::anyhow!("Invalid bool value: {}", s)), + } + }, $($($rest)*)?); + v + }}, + ($var:expr, type=$ty:ty, default=$default:expr, converter=$converter:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse_anyhow!( + ( + var=$var, + parser=$converter, + stored_type=$ty, + processor=|x| x.copied().unwrap_or_else(|| $default), + output_type=$ty, + default_repr=std::option::Option::Some(stringify!($default)), + ), + $($($rest)*)? + ) + }}, + ($var:expr, type=$ty:ty, default=$default:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse_anyhow!( + ( + var=$var, + parser=$crate::env::__macro_refs::convert_from_str, + stored_type=$ty, + processor=|x| x.copied().unwrap_or_else(|| $default), + output_type=$ty, + default_repr=std::option::Option::Some(stringify!($default)), + ), + $($($rest)*)? + ) + }}, + ($var:expr, type=$ty:ty, converter=$converter:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse_anyhow!( + ( + var=$var, + parser=$converter, + stored_type=$ty, + processor=|x| x, + output_type=std::option::Option<&$ty>, + default_repr=std::option::Option::None, + ), + $($($rest)*)? + ) + }}, + ($var:expr, type=$ty:ty $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse_anyhow!( + ( + var=$var, + parser=$crate::env::__macro_refs::convert_from_str, + stored_type=$ty, + processor=|x| x.copied(), + output_type=std::option::Option<$ty>, + default_repr=std::option::Option::None, + ), + $($($rest)*)? + ) + }}, + ($var:expr $(, $($rest:tt)*)?) => {{ + $crate::env::__macro_refs::parse_anyhow!( + ( + var=$var, + parser=$crate::env::__macro_refs::convert_from_str, + stored_type=std::string::String, + processor=|x| x.map(|x| x.as_str()), + output_type=std::option::Option<&'static str>, + default_repr=std::option::Option::None, + ), + $($($rest)*)? + ) + }}, +} + +macro parse_anyhow { + ( + $already_parsed:tt, + applicability=internal$(,)? + ) => { + $crate::env::__macro_refs::expand_anyhow!($already_parsed, applicability=$crate::env::registry::Applicability::Internal,) + }, + ( + $already_parsed:tt, + applicability=testing$(,)? + ) => { + $crate::env::__macro_refs::expand_anyhow!($already_parsed, applicability=$crate::env::registry::Applicability::Testing,) + }, + ( + $already_parsed:tt, + $(,)? + ) => { + $crate::env::__macro_refs::expand_anyhow!($already_parsed, applicability=$crate::env::registry::Applicability::All,) + }, +} + +/// `parser` is `&str -> anyhow::Result<$stored_type>`, `processor` is `Option<& $stored_type> -> $output_type` +/// +/// The extra set of parentheses is a trick to let us pass things through `parse2` transparently +macro expand_anyhow( + ( + var=$var:expr, + parser=$parser:expr, + stored_type=$stored_ty:ty, + processor=$processor:expr, + output_type=$output_ty:ty, + default_repr=$default_repr:expr, + ), + applicability=$applicability:expr, +) {{ + $crate::env::__macro_refs::register!( + $var, + ty = $stored_ty, + default = $default_repr, + applicability = $applicability + ); + static ENV_HELPER: $crate::env::helper::EnvHelper<$stored_ty> = + $crate::env::helper::EnvHelper::with_converter_from_macro($var, $parser); + let v: anyhow::Result<$output_ty> = ENV_HELPER.get_anyhow().map($processor); + v +}} diff --git a/app/buck2_core/src/env/helper.rs b/app/buck2_core/src/env/helper.rs new file mode 100644 index 0000000000000..3f625782246aa --- /dev/null +++ b/app/buck2_core/src/env/helper.rs @@ -0,0 +1,61 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::env; +use std::env::VarError; +use std::sync::OnceLock; + +use buck2_error::buck2_error; +use buck2_error::BuckErrorContext; + +pub struct EnvHelper { + convert: fn(&str) -> anyhow::Result, + var: &'static str, + cell: OnceLock>, +} + +impl EnvHelper { + pub const fn with_converter_from_macro( + var: &'static str, + convert: fn(&str) -> anyhow::Result, + ) -> Self { + Self { + convert, + var, + cell: OnceLock::new(), + } + } + + // This code does not really require `'static` lifetime. + // `EnvHelper` caches computed value. When it is used like + // `EnvHelper::new(...).get(...)`, it performs unnecessary work. + // To avoid it, we require `'static` lifetime, to force placing `EnvHelper` in static variable. + pub fn get(&'static self) -> buck2_error::Result> { + let var = self.var; + let convert = self.convert; + + self.cell + .get_or_try_init(move || match env::var(var) { + Ok(v) => { + tracing::info!("Env override found: ${} = {}", var, v); + Ok(Some((convert)(&v).map_err(anyhow::Error::from)?)) + } + Err(VarError::NotPresent) => Ok(None), + Err(VarError::NotUnicode(..)) => { + Err(buck2_error::buck2_error!([], "Variable is not unicode")) + } + }) + .map(Option::as_ref) + .with_buck_error_context(|| format!("Invalid value for ${}", var)) + } + + pub fn get_anyhow(&'static self) -> anyhow::Result> { + self.get().map_err(anyhow::Error::from) + } +} diff --git a/app/buck2_core/src/env/registry.rs b/app/buck2_core/src/env/registry.rs new file mode 100644 index 0000000000000..30af1aaf5dd5a --- /dev/null +++ b/app/buck2_core/src/env/registry.rs @@ -0,0 +1,82 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use dupe::Dupe; + +#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Copy, Clone, Dupe)] +pub enum Applicability { + All, + /// Not meaningful in open source + Internal, + /// Only used in self-tests of buck2 + Testing, +} + +/// Environment variable description. +#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Copy, Clone, Dupe)] +pub struct EnvInfoEntry { + pub name: &'static str, + pub ty: &'static str, + pub default: Option<&'static str>, + #[allow(dead_code)] // TODO(JakobDegen): Use next diff + pub applicability: Applicability, +} + +impl EnvInfoEntry { + pub fn ty_short(&self) -> &'static str { + self.ty.rfind(':').map_or(self.ty, |i| &self.ty[i + 2..]) + } +} + +#[linkme::distributed_slice] +pub static ENV_INFO: [EnvInfoEntry]; + +#[cfg(test)] +mod tests { + use crate::buck2_env_anyhow; + use crate::env::registry::Applicability; + use crate::env::registry::EnvInfoEntry; + use crate::env::registry::ENV_INFO; + + #[test] + fn test_env_info() { + let _ignore = buck2_env_anyhow!("TEST_VAR_1", applicability = internal); + let _ignore = buck2_env_anyhow!("TEST_VAR_2", type = u32, default=20); + let var_1 = ENV_INFO.iter().find(|e| e.name == "TEST_VAR_1").unwrap(); + let var_2 = ENV_INFO.iter().find(|e| e.name == "TEST_VAR_2").unwrap(); + assert_eq!( + &EnvInfoEntry { + name: "TEST_VAR_1", + ty: "std :: string :: String", + default: None, + applicability: Applicability::Internal, + }, + var_1 + ); + assert_eq!( + &EnvInfoEntry { + name: "TEST_VAR_2", + ty: "u32", + default: Some("20"), + applicability: Applicability::All, + }, + var_2 + ); + } + + #[test] + fn test_ty_short() { + let _ignore = buck2_env_anyhow!("TEST_VAR_TY_SHORT"); + let var = ENV_INFO + .iter() + .find(|e| e.name == "TEST_VAR_TY_SHORT") + .unwrap(); + assert_eq!("String", var.ty_short()); + } +} diff --git a/app/buck2_core/src/env_helper.rs b/app/buck2_core/src/env_helper.rs deleted file mode 100644 index 68c88944332bb..0000000000000 --- a/app/buck2_core/src/env_helper.rs +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::env; -use std::env::VarError; -use std::str::FromStr; -use std::sync::OnceLock; - -use anyhow::Context; - -pub struct EnvHelper { - convert: fn(&str) -> anyhow::Result, - var: &'static str, - cell: OnceLock>, -} - -impl EnvHelper { - pub const fn with_converter(var: &'static str, convert: fn(&str) -> anyhow::Result) -> Self { - Self { - convert, - var, - cell: OnceLock::new(), - } - } - - pub const fn new(var: &'static str) -> Self - where - T: FromStr, - anyhow::Error: From<::Err>, - { - fn convert_from_str(v: &str) -> anyhow::Result - where - T: FromStr, - anyhow::Error: From<::Err>, - { - Ok(T::from_str(v)?) - } - - Self::with_converter(var, convert_from_str::) - } - - // This code does not really require `'static` lifetime. - // `EnvHelper` caches computed value. When it is used like - // `EnvHelper::new(...).get(...)`, it performs unnecessary work. - // To avoid it, we require `'static` lifetime, to force placing `EnvHelper` in static variable. - pub fn get(&'static self) -> anyhow::Result> { - let var = self.var; - let convert = self.convert; - - self.cell - .get_or_try_init(move || match env::var(var) { - Ok(v) => { - tracing::info!("Env override found: ${} = {}", var, v); - Ok(Some((convert)(&v).map_err(anyhow::Error::from)?)) - } - Err(VarError::NotPresent) => Ok(None), - Err(VarError::NotUnicode(..)) => Err(anyhow::anyhow!("Variable is not unicode")), - }) - .map(Option::as_ref) - .with_context(|| format!("Invalid value for ${}", var)) - } - - pub fn get_copied(&'static self) -> anyhow::Result> - where - T: Copy, - { - Ok(self.get()?.copied()) - } -} diff --git a/app/buck2_core/src/error.rs b/app/buck2_core/src/error.rs index 07bca6c3e1c1e..bd953b3600288 100644 --- a/app/buck2_core/src/error.rs +++ b/app/buck2_core/src/error.rs @@ -14,16 +14,15 @@ use std::sync::Arc; use std::sync::Mutex; use std::sync::OnceLock; -use anyhow::Context; use arc_swap::ArcSwapOption; +use buck2_error::BuckErrorContext; use starlark_map::small_set::SmallSet; -use thiserror::Error; -use crate::env_helper::EnvHelper; +use crate::env::__macro_refs::buck2_env_anyhow; use crate::is_open_source; type StructuredErrorHandler = Box< - dyn for<'a> Fn(&'a str, &anyhow::Error, (&'a str, u32, u32), StructuredErrorOptions) + dyn for<'a> Fn(&'a str, &buck2_error::Error, (&'a str, u32, u32), StructuredErrorOptions) + Send + Sync + 'static, @@ -31,7 +30,9 @@ type StructuredErrorHandler = Box< static HANDLER: OnceLock = OnceLock::new(); -pub static BUCK2_HARD_ERROR_ENV_VAR: EnvHelper = EnvHelper::new("BUCK2_HARD_ERROR"); +pub fn buck2_hard_error_env() -> anyhow::Result> { + buck2_env_anyhow!("BUCK2_HARD_ERROR") +} static HARD_ERROR_CONFIG: HardErrorConfigHolder = HardErrorConfigHolder { config: ArcSwapOption::const_empty(), @@ -39,16 +40,20 @@ static HARD_ERROR_CONFIG: HardErrorConfigHolder = HardErrorConfigHolder { static ALL_SOFT_ERROR_COUNTERS: Mutex> = Mutex::new(Vec::new()); -/// Throw a "soft_error" i.e. one that is destined to become a hard error -/// in the near future. The macro lives in this crate to allow it be -/// made available everywhere. Calling programs are responsible for -/// calling initialize() to provide a handler for logging these soft_errors. +/// Throw a "soft_error" ie. a non-fatal error logged to logview. +/// Errors will also be logged to stderr as warnings to the user, unless `quiet=true` is passed. +/// Logview will generate tasks for each error category, unless `task=false` is passed. +/// If `deprecation=true` this error should ideally become a hard error in the future. +/// +/// The macro lives in this crate to allow it be made available everywhere. +/// Calling programs are responsible for calling initialize() to provide a handler for +/// logging these soft_errors. /// /// You should pass two arguments: /// /// * The category string that will remain constant and identifies this specific soft error /// (used to report as a key). -/// * The error is an `anyhow::Error` will in the future will be propagated as the error. +/// * The error is a `buck2_error::Error`. /// /// Soft errors from Meta internal runs can be viewed /// [in logview](https://www.internalfb.com/logview/overview/buck2). @@ -119,13 +124,13 @@ fn hard_error_config() -> anyhow::Result> { return Ok(config); } - let config = BUCK2_HARD_ERROR_ENV_VAR.get()?.map_or("", |s| s.as_str()); + let config = buck2_hard_error_env()?.unwrap_or_default(); let config = HardErrorConfig::from_str(config)?; HARD_ERROR_CONFIG.config.store(Some(Arc::new(config))); HARD_ERROR_CONFIG .config .load_full() - .context("Just stored a value (internal error)") + .internal_error_anyhow("Just stored a value") } pub fn reload_hard_error_config(var_value: &str) -> anyhow::Result<()> { @@ -137,9 +142,16 @@ pub struct StructuredErrorOptions { pub quiet: bool, /// Create a task for this error. pub task: bool, + pub deprecation: bool, pub daemon_in_memory_state_is_corrupted: bool, pub daemon_materializer_state_is_corrupted: bool, pub action_cache_is_corrupted: bool, + // By default, we only get a handful of traces per error category in Logview. + // This key, if specified, enables logging one trace per unique key using + // the "trace cut" feature of Logview. Note that the dimensionality of this + // key must not be too large otherwise it can bring significant capacity cost + // and may even bring down Logview. + pub low_cardinality_key_for_additional_logview_samples: Option>, } impl Default for StructuredErrorOptions { @@ -147,9 +159,11 @@ impl Default for StructuredErrorOptions { Self { quiet: false, task: true, + deprecation: false, daemon_in_memory_state_is_corrupted: false, daemon_materializer_state_is_corrupted: false, action_cache_is_corrupted: false, + low_cardinality_key_for_additional_logview_samples: None, } } } @@ -158,12 +172,12 @@ impl Default for StructuredErrorOptions { #[doc(hidden)] pub fn handle_soft_error( category: &str, - err: anyhow::Error, + err: buck2_error::Error, count: &'static AtomicUsize, once: &std::sync::Once, loc: (&'static str, u32, u32), options: StructuredErrorOptions, -) -> anyhow::Result { +) -> Result { validate_category(category)?; if cfg!(test) { @@ -183,7 +197,9 @@ pub fn handle_soft_error( } if hard_error_config()?.should_hard_error(category) { - return Err(err.context("Upgraded warning to failure via $BUCK2_HARD_ERROR")); + return Err(err + .context("Upgraded warning to failure via $BUCK2_HARD_ERROR") + .into()); } if is_open_source() { @@ -273,11 +289,14 @@ impl HardErrorConfigHolder { } } -#[derive(Error, Debug)] -#[error("Invalid hard error config: `{0}`")] +#[derive(buck2_error::Error, Debug)] +#[error( + "Invalid hard error config: `{0}`\n\ + Valid examples: empty, `true`, `false`, `only=category1,category2`" +)] struct InvalidHardErrorConfig(String); -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum InvalidSoftError { #[error("Invalid category, must be lower_snake_case, got `{0}`")] InvalidCategory(String), @@ -310,7 +329,6 @@ pub(crate) mod tests { use assert_matches::assert_matches; use super::*; - use crate::error::HardErrorConfig; #[test] fn test_hard_error() -> anyhow::Result<()> { diff --git a/app/buck2_core/src/execution_types/mod.rs b/app/buck2_core/src/execution_types.rs similarity index 100% rename from app/buck2_core/src/execution_types/mod.rs rename to app/buck2_core/src/execution_types.rs diff --git a/app/buck2_core/src/execution_types/execution.rs b/app/buck2_core/src/execution_types/execution.rs index b4dd14405374d..77f6e1a2ee085 100644 --- a/app/buck2_core/src/execution_types/execution.rs +++ b/app/buck2_core/src/execution_types/execution.rs @@ -13,7 +13,6 @@ use allocative::Allocative; use dupe::Dupe; use indent_write::indentable::Indentable; use itertools::Itertools; -use thiserror::Error; use crate::configuration::compatibility::IncompatiblePlatformReason; use crate::configuration::compatibility::IncompatiblePlatformReasonCause; @@ -21,7 +20,7 @@ use crate::configuration::data::ConfigurationData; use crate::configuration::pair::ConfigurationNoExec; use crate::execution_types::executor_config::CommandExecutorConfig; use crate::target::configured_target_label::ConfiguredTargetLabel; -use crate::target::label::TargetLabel; +use crate::target::label::label::TargetLabel; /// An execution platform is used for the execution deps of a target, those dependencies that /// need to be invoked as part of a build action or otherwise need to be configured against the @@ -143,7 +142,8 @@ impl std::fmt::Display for ExecutionPlatformIncompatibleReason { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] pub enum ExecutionPlatformError { // .indented() losing the alternate flag that we want to use to format the reason so we need to explicitly do that. #[error("No compatible execution platform.\n{}", .0.iter().map(|(id, reason)| format!(" `{}` skipped because:\n{}", id, format!("{:#}", reason).indented(" "))).join("\n"))] diff --git a/app/buck2_core/src/execution_types/execution_platforms.rs b/app/buck2_core/src/execution_types/execution_platforms.rs index e3556d7f739cc..1df5b17939083 100644 --- a/app/buck2_core/src/execution_types/execution_platforms.rs +++ b/app/buck2_core/src/execution_types/execution_platforms.rs @@ -12,6 +12,7 @@ use std::sync::Arc; use allocative::Allocative; use crate::execution_types::execution::ExecutionPlatform; +use crate::target::label::label::TargetLabel; pub type ExecutionPlatforms = Arc; @@ -24,18 +25,28 @@ pub enum ExecutionPlatformFallback { #[derive(Debug, Allocative)] pub struct ExecutionPlatformsData { + execution_platforms_target: TargetLabel, platforms: Vec, fallback: ExecutionPlatformFallback, } impl ExecutionPlatformsData { - pub fn new(platforms: Vec, fallback: ExecutionPlatformFallback) -> Self { + pub fn new( + execution_platforms_target: TargetLabel, + platforms: Vec, + fallback: ExecutionPlatformFallback, + ) -> Self { Self { + execution_platforms_target, platforms, fallback, } } + pub fn execution_platforms_target(&self) -> &TargetLabel { + &self.execution_platforms_target + } + pub fn candidates(&self) -> impl Iterator { self.platforms.iter() } diff --git a/app/buck2_core/src/execution_types/executor_config.rs b/app/buck2_core/src/execution_types/executor_config.rs index 7406c09ab209a..4fe72e4932d8f 100644 --- a/app/buck2_core/src/execution_types/executor_config.rs +++ b/app/buck2_core/src/execution_types/executor_config.rs @@ -7,7 +7,6 @@ * of this source tree. */ -use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; use std::hash::Hasher; @@ -15,24 +14,85 @@ use std::str::FromStr; use std::sync::Arc; use allocative::Allocative; +use anyhow::Context; +use buck2_util::hash::BuckHasher; use derive_more::Display; use dupe::Dupe; -use internment_tweaks::Intern; -use internment_tweaks::StaticInterner; +use itertools::Itertools; use once_cell::sync::Lazy; +use starlark_map::small_map::SmallMap; use starlark_map::sorted_map::SortedMap; +use static_interner::Intern; +use static_interner::Interner; -#[derive(Debug, Default, Eq, Hash, PartialEq, Clone, Dupe, Allocative)] +#[derive(Debug, Eq, Hash, PartialEq, Clone, Dupe, Allocative)] pub struct LocalExecutorOptions { pub use_persistent_workers: bool, } +impl Default for LocalExecutorOptions { + fn default() -> Self { + Self { + use_persistent_workers: true, + } + } +} +#[derive(Debug, Eq, Hash, PartialEq, Clone, Allocative)] +pub struct RemoteEnabledExecutorOptions { + pub executor: RemoteEnabledExecutor, + pub re_properties: RePlatformFields, + pub re_use_case: RemoteExecutorUseCase, + pub re_action_key: Option, + pub cache_upload_behavior: CacheUploadBehavior, + pub remote_cache_enabled: bool, + pub remote_dep_file_cache_enabled: bool, + pub dependencies: Vec, +} + +#[derive(Debug, buck2_error::Error)] +enum RemoteExecutorDependencyErrors { + #[error("RE dependency requires `{0}` to be set")] + MissingField(&'static str), + #[error("too many fields set for RE dependency: `{0}`")] + UnsupportedFields(String), +} + +/// A Remote Action can specify a list of dependencies that are required before starting the execution `https://fburl.com/wiki/offzl3ox` +#[derive(Debug, Eq, PartialEq, Clone, Hash, Allocative)] +pub struct RemoteExecutorDependency { + /// The SMC tier that the Remote Executor will query to try to acquire the dependency + pub smc_tier: String, + /// The id of the dependency to acquire + pub id: String, +} + +impl RemoteExecutorDependency { + pub fn parse(dep_map: SmallMap<&str, &str>) -> anyhow::Result { + let smc_tier = dep_map + .get("smc_tier") + .context(RemoteExecutorDependencyErrors::MissingField("smc_tier"))?; + let id = dep_map + .get("id") + .context(RemoteExecutorDependencyErrors::MissingField("id"))?; + if dep_map.len() > 2 { + return Err(RemoteExecutorDependencyErrors::UnsupportedFields( + dep_map.keys().join(", "), + ) + .into()); + } + Ok(RemoteExecutorDependency { + smc_tier: smc_tier.to_string(), + id: id.to_string(), + }) + } +} + #[derive(Debug, Eq, PartialEq, Copy, Clone, Dupe, Display, Allocative)] pub struct RemoteExecutorUseCase(Intern); impl RemoteExecutorUseCase { pub fn new(use_case: String) -> Self { - static USE_CASE_INTERNER: StaticInterner = StaticInterner::new(); + static USE_CASE_INTERNER: Interner = Interner::new(); Self(USE_CASE_INTERNER.intern(use_case)) } @@ -57,10 +117,19 @@ impl Hash for RemoteExecutorUseCase { } } +impl FromStr for RemoteExecutorUseCase { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(RemoteExecutorUseCase::new(s.to_owned())) + } +} + #[derive(Debug, Default, Eq, PartialEq, Clone, Hash, Allocative)] pub struct RemoteExecutorOptions { pub re_max_input_files_bytes: Option, pub re_max_queue_time_ms: Option, + pub re_resource_units: Option, } /// The actual executor portion of a RemoteEnabled executor. It's possible for a RemoteEnabled @@ -68,11 +137,11 @@ pub struct RemoteExecutorOptions { /// with a RE backend for caching". #[derive(Display, Debug, Eq, PartialEq, Clone, Hash, Allocative)] pub enum RemoteEnabledExecutor { - #[display(fmt = "local")] + #[display("local")] Local(LocalExecutorOptions), - #[display(fmt = "remote")] + #[display("remote")] Remote(RemoteExecutorOptions), - #[display(fmt = "hybrid")] + #[display("hybrid")] Hybrid { local: LocalExecutorOptions, remote: RemoteExecutorOptions, @@ -80,6 +149,12 @@ pub enum RemoteEnabledExecutor { }, } +/// Normalized `remote_execution::Platform`. Also implements `Eq`, `Hash`. +#[derive(Default, Debug, Clone, PartialEq, Eq, Hash, Allocative)] +pub struct RePlatformFields { + pub properties: Arc>, +} + #[derive(Debug, Eq, PartialEq, Clone, Hash, Allocative)] pub enum Executor { /// This executor only runs local commands. @@ -87,15 +162,7 @@ pub enum Executor { /// This executor interacts with a RE backend. It may use that to read or write to caches, or /// to execute commands. - RemoteEnabled { - executor: RemoteEnabledExecutor, - re_properties: SortedMap, - re_use_case: RemoteExecutorUseCase, - re_action_key: Option, - cache_upload_behavior: CacheUploadBehavior, - remote_cache_enabled: bool, - remote_dep_file_cache_enabled: bool, - }, + RemoteEnabled(RemoteEnabledExecutorOptions), } impl Display for Executor { @@ -108,27 +175,19 @@ impl Display for Executor { options.use_persistent_workers ) } - Self::RemoteEnabled { - executor, - re_properties: _, - re_use_case: _, - re_action_key: _, - cache_upload_behavior, - remote_cache_enabled, - remote_dep_file_cache_enabled, - } => { - let cache = match remote_cache_enabled { + Self::RemoteEnabled(options) => { + let cache = match options.remote_cache_enabled { true => "enabled", false => "disabled", }; - let dep_file_cache = match remote_dep_file_cache_enabled { + let dep_file_cache = match options.remote_dep_file_cache_enabled { true => "enabled", false => "disabled", }; write!( f, "RemoteEnabled + executor {} + remote cache {} + cache upload {} + remote dep file cache {}", - executor, cache, cache_upload_behavior, dep_file_cache + options.executor, cache, options.cache_upload_behavior, dep_file_cache ) } } @@ -188,9 +247,9 @@ impl Default for OutputPathsBehavior { #[derive(Display, Debug, Eq, PartialEq, Clone, Copy, Dupe, Hash, Allocative)] pub enum CacheUploadBehavior { - #[display(fmt = "enabled")] + #[display("enabled")] Enabled { max_bytes: Option }, - #[display(fmt = "disabled")] + #[display("disabled")] Disabled, } @@ -206,7 +265,7 @@ pub struct CommandGenerationOptions { pub output_paths_behavior: OutputPathsBehavior, } -#[derive(Debug, Eq, PartialEq, Hash, Allocative)] +#[derive(Debug, Eq, PartialEq, Hash, Allocative, Clone)] pub struct CommandExecutorConfig { pub executor: Executor, pub options: CommandGenerationOptions, diff --git a/app/buck2_core/src/fs/mod.rs b/app/buck2_core/src/fs.rs similarity index 100% rename from app/buck2_core/src/fs/mod.rs rename to app/buck2_core/src/fs.rs diff --git a/app/buck2_core/src/fs/artifact_path_resolver.rs b/app/buck2_core/src/fs/artifact_path_resolver.rs index 4c5a1e6e40315..a8b7987252b87 100644 --- a/app/buck2_core/src/fs/artifact_path_resolver.rs +++ b/app/buck2_core/src/fs/artifact_path_resolver.rs @@ -8,18 +8,19 @@ */ use allocative::Allocative; +use dupe::Dupe; -use crate::buck_path::path::BuckPathRef; use crate::cells::cell_path::CellPathRef; use crate::cells::CellResolver; use crate::fs::buck_out_path::BuckOutPath; use crate::fs::buck_out_path::BuckOutPathResolver; use crate::fs::project::ProjectRoot; use crate::fs::project_rel_path::ProjectRelativePathBuf; +use crate::package::source_path::SourcePathRef; #[derive(Clone, Allocative)] pub struct ArtifactFs { - buck_path_resolver: CellResolver, + cell_resolver: CellResolver, buck_out_path_resolver: BuckOutPathResolver, project_filesystem: ProjectRoot, } @@ -31,7 +32,7 @@ impl ArtifactFs { project_filesystem: ProjectRoot, ) -> Self { Self { - buck_path_resolver, + cell_resolver: buck_path_resolver, buck_out_path_resolver, project_filesystem, } @@ -46,15 +47,27 @@ impl ArtifactFs { } pub fn resolve_cell_path(&self, path: CellPathRef) -> anyhow::Result { - self.buck_path_resolver.resolve_path(path) + self.cell_resolver.resolve_path(path) } pub fn resolve_source( &self, - source_artifact_path: BuckPathRef, + source_artifact_path: SourcePathRef, ) -> anyhow::Result { - self.buck_path_resolver - .resolve_buck_path(source_artifact_path) + let cell_resolver = self.cell_resolver(); + if let Some(origin) = cell_resolver + .get(source_artifact_path.package().cell_name())? + .external() + { + Ok(self.buck_out_path_resolver.resolve_external_cell_source( + source_artifact_path.to_cell_path().path(), + origin.dupe(), + )) + } else { + Ok(cell_resolver + .resolve_path(source_artifact_path.package().as_cell_path())? + .join(source_artifact_path.path())) + } } pub fn resolve_offline_output_cache_path(&self, path: &BuckOutPath) -> ProjectRelativePathBuf { @@ -69,7 +82,7 @@ impl ArtifactFs { &self.buck_out_path_resolver } - pub fn buck_path_resolver(&self) -> &CellResolver { - &self.buck_path_resolver + pub fn cell_resolver(&self) -> &CellResolver { + &self.cell_resolver } } diff --git a/app/buck2_core/src/fs/async_fs_util.rs b/app/buck2_core/src/fs/async_fs_util.rs index efdfbcf99aab6..c356a3a380cd1 100644 --- a/app/buck2_core/src/fs/async_fs_util.rs +++ b/app/buck2_core/src/fs/async_fs_util.rs @@ -8,12 +8,12 @@ */ use anyhow::Context; -use tokio::fs::File; +use crate::fs::fs_util; use crate::fs::paths::abs_path::AbsPath; use crate::io_counters::IoCounterKey; -pub async fn open>(path: P) -> anyhow::Result { +pub async fn open>(path: P) -> anyhow::Result { let _guard = IoCounterKey::Read.guard(); tokio::fs::File::open(path.as_ref().as_maybe_relativized()) .await @@ -21,8 +21,24 @@ pub async fn open>(path: P) -> anyhow::Result { } pub async fn write>(path: P, content: impl AsRef<[u8]>) -> anyhow::Result<()> { - let _guard = IoCounterKey::Write.guard(); - tokio::fs::write(path.as_ref().as_maybe_relativized(), content.as_ref()) - .await - .with_context(|| format!("write({})", path.as_ref().display())) + let path = path.as_ref().to_owned(); + let content = content.as_ref().to_owned(); + Ok(tokio::task::spawn_blocking(move || fs_util::write(path, content)).await??) +} + +pub async fn read_to_string>(path: P) -> anyhow::Result { + let path = path.as_ref().to_owned(); + Ok(tokio::task::spawn_blocking(move || fs_util::read_to_string(path)).await??) +} + +pub async fn read_to_string_if_exists>( + path: P, +) -> anyhow::Result> { + let path = path.as_ref().to_owned(); + Ok(tokio::task::spawn_blocking(move || fs_util::read_to_string_if_exists(path)).await??) +} + +pub async fn create_dir_all>(dir: P) -> anyhow::Result<()> { + let dir = dir.as_ref().to_owned(); + Ok(tokio::task::spawn_blocking(move || fs_util::create_dir_all(dir)).await??) } diff --git a/app/buck2_core/src/fs/buck_out_path.rs b/app/buck2_core/src/fs/buck_out_path.rs index 4e8f8f3b002c4..36c0f4d573bd9 100644 --- a/app/buck2_core/src/fs/buck_out_path.rs +++ b/app/buck2_core/src/fs/buck_out_path.rs @@ -15,23 +15,29 @@ use std::sync::Arc; use allocative::Allocative; use derive_more::Display; use dupe::Dupe; +use itertools::Itertools; use crate::base_deferred_key::BaseDeferredKey; -use crate::category::Category; +use crate::category::CategoryRef; +use crate::cells::external::ExternalCellOrigin; +use crate::cells::paths::CellRelativePath; use crate::fs::paths::forward_rel_path::ForwardRelativePath; use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; use crate::fs::project_rel_path::ProjectRelativePath; use crate::fs::project_rel_path::ProjectRelativePathBuf; +use crate::provider::label::ConfiguredProvidersLabel; +use crate::provider::label::NonDefaultProvidersName; +use crate::provider::label::ProvidersName; #[derive(Clone, Debug, Display, Allocative, Hash, Eq, PartialEq)] -#[display(fmt = "({})/{}", owner, "path.as_str()")] +#[display("({})/{}", owner, path.as_str())] struct BuckOutPathData { /// The owner responsible for creating this path. owner: BaseDeferredKey, /// The unique identifier for this action (only set for outputs inside dynamic actions) - action_key: Option>, + dynamic_actions_action_key: Option>, /// The path relative to that target. - path: ForwardRelativePathBuf, + path: Box, } /// Represents a resolvable path corresponding to outputs of rules that are part @@ -49,18 +55,18 @@ pub struct BuckOutPath(Arc); impl BuckOutPath { pub fn new(owner: BaseDeferredKey, path: ForwardRelativePathBuf) -> Self { - Self::with_action_key(owner, path, None) + Self::with_dynamic_actions_action_key(owner, path, None) } - pub fn with_action_key( + pub fn with_dynamic_actions_action_key( owner: BaseDeferredKey, path: ForwardRelativePathBuf, - action_key: Option>, + dynamic_actions_action_key: Option>, ) -> Self { BuckOutPath(Arc::new(BuckOutPathData { owner, - action_key, - path, + dynamic_actions_action_key, + path: path.into_box(), })) } @@ -68,22 +74,28 @@ impl BuckOutPath { &self.0.owner } - pub fn action_key(&self) -> Option<&str> { - self.0.action_key.as_deref() + pub fn dynamic_actions_action_key(&self) -> Option<&str> { + self.0.dynamic_actions_action_key.as_deref() } pub fn path(&self) -> &ForwardRelativePath { &self.0.path } + + pub fn len(&self) -> usize { + self.0.path.as_str().len() + } } #[derive(Clone, Debug, Display, Eq, PartialEq)] -#[display(fmt = "tmp/({})/{}", owner, "path.as_str()")] +#[display("tmp/({})/{}", owner, path.as_str())] pub struct BuckOutScratchPath { /// The deferred responsible for creating this path. owner: BaseDeferredKey, /// The path relative to that target. path: ForwardRelativePathBuf, + /// The unique identifier for this action + action_key: String, } impl BuckOutScratchPath { @@ -91,8 +103,9 @@ impl BuckOutScratchPath { /// really hard to normalise anything the user supplies. pub fn new( owner: BaseDeferredKey, - category: &Category, + category: CategoryRef, identifier: Option<&str>, + action_key: String, ) -> anyhow::Result { const MAKE_SENSIBLE_PREFIX: &str = "_buck_"; // Windows has MAX_PATH limit (260 chars). @@ -139,11 +152,15 @@ impl BuckOutScratchPath { _ => path.to_buf(), }; - Ok(Self { owner, path }) + Ok(Self { + owner, + path, + action_key, + }) } } -#[derive(Debug, PartialEq, Eq, Hash, Clone)] +#[derive(Debug, PartialEq, Eq, Hash, Clone, Allocative)] pub struct BuckOutTestPath { /// A base path. This is primarily useful when e.g. set of tests should all be in the same /// path. @@ -184,8 +201,9 @@ impl BuckOutPathResolver { self.prefixed_path_for_owner( ForwardRelativePath::unchecked_new("gen"), path.owner(), - path.action_key(), + path.dynamic_actions_action_key(), path.path(), + false, ) } @@ -193,17 +211,44 @@ impl BuckOutPathResolver { self.prefixed_path_for_owner( ForwardRelativePath::unchecked_new("offline-cache"), path.owner(), - path.action_key(), + path.dynamic_actions_action_key(), path.path(), + false, ) } + pub fn resolve_external_cell_source( + &self, + path: &CellRelativePath, + origin: ExternalCellOrigin, + ) -> ProjectRelativePathBuf { + ProjectRelativePathBuf::from(ForwardRelativePathBuf::concat([ + self.0.as_forward_relative_path(), + ForwardRelativePath::new("external_cells").unwrap(), + match origin { + ExternalCellOrigin::Bundled(_) => ForwardRelativePath::new("bundled").unwrap(), + ExternalCellOrigin::Git(_) => ForwardRelativePath::new("git").unwrap(), + }, + match &origin { + ExternalCellOrigin::Bundled(cell) => { + ForwardRelativePath::new(cell.as_str()).unwrap() + } + ExternalCellOrigin::Git(setup) => { + ForwardRelativePath::new(setup.commit.as_ref()).unwrap() + } + }, + path.as_ref(), + ])) + } + pub fn resolve_scratch(&self, path: &BuckOutScratchPath) -> ProjectRelativePathBuf { self.prefixed_path_for_owner( ForwardRelativePath::unchecked_new("tmp"), &path.owner, - None, + Some(&path.action_key), &path.path, + // Fully hash scratch path as it can be very long and cause path too long issue on Windows. + true, ) } @@ -217,14 +262,42 @@ impl BuckOutPathResolver { ])) } + /// Resolve a test path for test discovery + pub fn resolve_test_discovery( + &self, + label: &ConfiguredProvidersLabel, + ) -> ProjectRelativePathBuf { + let path = match label.name() { + ProvidersName::Default => "default".into(), + ProvidersName::NonDefault(nd) => match nd.as_ref() { + NonDefaultProvidersName::Named(names) => names + .iter() + // Replacing / with + to avoid the path clash for ["foo/bar"] and ["foo", "bar"] + .map(|name| name.as_str().replace("/", "+")) + .join("/") + .into(), + NonDefaultProvidersName::UnrecognizedFlavor(s) => s.dupe(), + }, + }; + let path = ForwardRelativePath::unchecked_new(&path); + self.prefixed_path_for_owner( + ForwardRelativePath::unchecked_new("test_discovery"), + &BaseDeferredKey::TargetLabel(label.target().dupe()), + None, + &path, + true, + ) + } + fn prefixed_path_for_owner( &self, prefix: &ForwardRelativePath, owner: &BaseDeferredKey, action_key: Option<&str>, path: &ForwardRelativePath, + fully_hash_path: bool, ) -> ProjectRelativePathBuf { - owner.make_hashed_path(&self.0, prefix, action_key, path) + owner.make_hashed_path(&self.0, prefix, action_key, path, fully_hash_path) } /// This function returns the exact location of the symlink of a given target. @@ -245,27 +318,33 @@ impl BuckOutPathResolver { #[cfg(test)] mod tests { + use std::path::Path; use std::sync::Arc; use dupe::Dupe; use regex::Regex; use crate::base_deferred_key::BaseDeferredKey; - use crate::buck_path::path::BuckPath; - use crate::category::Category; + use crate::category::CategoryRef; use crate::cells::cell_root_path::CellRootPathBuf; use crate::cells::name::CellName; use crate::cells::paths::CellRelativePath; use crate::cells::CellResolver; use crate::configuration::data::ConfigurationData; + use crate::fs::artifact_path_resolver::ArtifactFs; use crate::fs::buck_out_path::BuckOutPath; use crate::fs::buck_out_path::BuckOutPathResolver; use crate::fs::buck_out_path::BuckOutScratchPath; + use crate::fs::paths::abs_norm_path::AbsNormPathBuf; use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; + use crate::fs::project::ProjectRoot; use crate::fs::project_rel_path::ProjectRelativePathBuf; - use crate::package::package_relative_path::PackageRelativePathBuf; + use crate::package::source_path::SourcePath; use crate::package::PackageLabel; - use crate::target::label::TargetLabel; + use crate::provider::label::ConfiguredProvidersLabel; + use crate::provider::label::ProviderName; + use crate::provider::label::ProvidersName; + use crate::target::label::label::TargetLabel; use crate::target::name::TargetNameRef; #[test] @@ -274,17 +353,27 @@ mod tests { CellName::testing_new("foo"), CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("bar-cell".into())), ); + let buck_out_path_resolver = BuckOutPathResolver::new( + ProjectRelativePathBuf::unchecked_new("base/buck-out/v2".into()), + ); + let artifact_fs = ArtifactFs::new( + cell_resolver, + buck_out_path_resolver, + ProjectRoot::new_unchecked( + AbsNormPathBuf::new( + Path::new(if cfg!(windows) { + "C:\\project" + } else { + "/project" + }) + .to_owned(), + ) + .unwrap(), + ), + ); - let resolved = cell_resolver.resolve_buck_path( - BuckPath::testing_new( - PackageLabel::new( - CellName::testing_new("foo"), - CellRelativePath::unchecked_new("baz-package"), - ), - PackageRelativePathBuf::unchecked_new("faz.file".into()), - ) - .as_ref(), - )?; + let resolved = artifact_fs + .resolve_source(SourcePath::testing_new("foo//baz-package", "faz.file").as_ref())?; assert_eq!( ProjectRelativePathBuf::unchecked_new("bar-cell/baz-package/faz.file".into()), @@ -292,17 +381,8 @@ mod tests { ); assert_eq!( - cell_resolver - .resolve_buck_path( - BuckPath::testing_new( - PackageLabel::new( - CellName::testing_new("none_existent"), - CellRelativePath::unchecked_new("baz") - ), - PackageRelativePathBuf::unchecked_new("fazx".into()) - ) - .as_ref() - ) + artifact_fs + .resolve_source(SourcePath::testing_new("none_existent//baz", "fazx").as_ref()) .is_err(), true ); @@ -322,19 +402,39 @@ mod tests { ); let target = TargetLabel::new(pkg, TargetNameRef::unchecked_new("target-name")); let cfg_target = target.configure(ConfigurationData::testing_new()); + let owner = BaseDeferredKey::TargetLabel(cfg_target); - let resolved = path_resolver.resolve_gen(&BuckOutPath::new( - BaseDeferredKey::TargetLabel(cfg_target), + let resolved_gen_path = path_resolver.resolve_gen(&BuckOutPath::new( + owner.dupe(), ForwardRelativePathBuf::unchecked_new("faz.file".into()), )); - let re = + let expected_gen_path = Regex::new("base/buck-out/v2/gen/foo/[0-9a-z]+/baz-package/__target-name__/faz.file")?; assert!( - re.is_match(resolved.as_str()), + expected_gen_path.is_match(resolved_gen_path.as_str()), "{}.is_match({})", - re, - resolved + expected_gen_path, + resolved_gen_path + ); + + let resolved_scratch_path = path_resolver.resolve_scratch( + &BuckOutScratchPath::new( + owner, + CategoryRef::new("category").unwrap(), + Some(&String::from("blah.file")), + "1_2".to_owned(), + ) + .unwrap(), + ); + + let expected_scratch_path = + Regex::new("base/buck-out/v2/tmp/foo/[0-9a-z]+/category/blah.file")?; + assert!( + expected_scratch_path.is_match(resolved_scratch_path.as_str()), + "{}.is_match({})", + expected_scratch_path, + resolved_scratch_path ); Ok(()) } @@ -350,35 +450,58 @@ mod tests { ); let target = TargetLabel::new(pkg, TargetNameRef::unchecked_new("target-name")); let cfg_target = target.configure(ConfigurationData::testing_new()); + let owner = BaseDeferredKey::TargetLabel(cfg_target); - let resolved = path_resolver.resolve_gen(&BuckOutPath::new( - BaseDeferredKey::TargetLabel(cfg_target.dupe()), + let resolved_gen_path = path_resolver.resolve_gen(&BuckOutPath::new( + owner.dupe(), ForwardRelativePathBuf::unchecked_new("quux".to_owned()), )); - let re = Regex::new("buck-out/gen/foo/[0-9a-z]+/baz-package/__target-name__/quux")?; + let expected_gen_path: Regex = + Regex::new("buck-out/gen/foo/[0-9a-z]+/baz-package/__target-name__/quux")?; assert!( - re.is_match(resolved.as_str()), + expected_gen_path.is_match(resolved_gen_path.as_str()), "{}.is_match({})", - re, - resolved + expected_gen_path, + resolved_gen_path ); - let path = BuckOutPath::with_action_key( - BaseDeferredKey::TargetLabel(cfg_target), + let path = BuckOutPath::with_dynamic_actions_action_key( + owner.dupe(), ForwardRelativePathBuf::unchecked_new("quux".to_owned()), Some(Arc::from("xxx")), ); - let resolved = path_resolver.resolve_gen(&path); + let resolved_gen_path = path_resolver.resolve_gen(&path); - let re = Regex::new( + let expected_gen_path = Regex::new( "buck-out/gen/foo/[0-9a-z]+/baz-package/__target-name__/__action__xxx__/quux", )?; assert!( - re.is_match(resolved.as_str()), + expected_gen_path.is_match(resolved_gen_path.as_str()), "{}.is_match({})", - re, - resolved + expected_gen_path, + resolved_gen_path + ); + + let resolved_scratch_path = path_resolver.resolve_scratch( + &BuckOutScratchPath::new( + owner, + CategoryRef::new("category").unwrap(), + Some(&String::from( + "xxx_some_crazy_long_file_name_that_causes_it_to_be_hashed_xxx.txt", + )), + "xxx_some_long_action_key_but_it_doesnt_matter_xxx".to_owned(), + ) + .unwrap(), + ); + + let expected_scratch_path = + Regex::new("buck-out/tmp/foo/[0-9a-z]+/category/_buck_[0-9a-z]+")?; + assert!( + expected_scratch_path.is_match(resolved_scratch_path.as_str()), + "{}.is_match({})", + expected_scratch_path, + resolved_scratch_path ); Ok(()) @@ -392,21 +515,23 @@ mod tests { ); let target = TargetLabel::new(pkg, TargetNameRef::unchecked_new("target-name")); let cfg_target = target.configure(ConfigurationData::testing_new()); - let category = Category::try_from("category").unwrap(); + let category = CategoryRef::new("category").unwrap(); // We expect these all to be valid paths, avoiding weird things we throw in BuckOutScratchPath::new( BaseDeferredKey::TargetLabel(cfg_target.dupe()), - &category, + category, None, + "1_2".to_owned(), ) .unwrap(); let mk = move |s| { BuckOutScratchPath::new( BaseDeferredKey::TargetLabel(cfg_target.dupe()), - &category, + category, Some(s), + "3_4".to_owned(), ) .unwrap() .path @@ -429,4 +554,67 @@ mod tests { assert_eq!(mk("weird <>"), mk("weird <>")); assert_ne!(mk("weird <>"), mk("weird ><")) } + + #[test] + fn test_scratch_path_is_unique() { + let path_resolver = BuckOutPathResolver::new(ProjectRelativePathBuf::unchecked_new( + "base/buck-out/v2".into(), + )); + let pkg = PackageLabel::new( + CellName::testing_new("foo"), + CellRelativePath::unchecked_new("baz-package"), + ); + let target = TargetLabel::new(pkg, TargetNameRef::unchecked_new("target-name")); + let cfg_target = target.configure(ConfigurationData::testing_new()); + + let mk = move |s: &str, id: &str| { + path_resolver + .resolve_scratch( + &BuckOutScratchPath::new( + BaseDeferredKey::TargetLabel(cfg_target.dupe()), + CategoryRef::new("category").unwrap(), + Some(id), + s.to_owned(), + ) + .unwrap(), + ) + .as_str() + .to_owned() + }; + + // Same action_key, same identifier are equal + assert_eq!(mk("same_key", "same_id"), mk("same_key", "same_id")); + assert_eq!(mk("same_key", "_buck_same"), mk("same_key", "_buck_same")); + + // Same action_key, different identifier are not equal + assert_ne!(mk("same_key", "diff_id1"), mk("same_key", "diff_id2")); + assert_ne!(mk("same_key", "_buck_1"), mk("same_key", "_buck_2")); + + // Different action_key, same identifier are not equal + assert_ne!(mk("diff_key1", "same_id"), mk("diff_key2", "same_id")); + assert_ne!(mk("diff_key1", "_buck_same"), mk("diff_key2", "_buck_same")); + + // Different action_key, different identifier are not equal + assert_ne!(mk("diff_key1", "diff_id1"), mk("diff_key2", "diff_id2")); + assert_ne!(mk("diff_key1", "_buck_1"), mk("diff_key2", "_buck_2")); + } + + #[test] + fn test_resolve_test_discovery() -> anyhow::Result<()> { + let path_resolver = + BuckOutPathResolver::new(ProjectRelativePathBuf::unchecked_new("buck-out".into())); + + let pkg = PackageLabel::new( + CellName::testing_new("foo"), + CellRelativePath::unchecked_new("baz-package"), + ); + let target = TargetLabel::new(pkg, TargetNameRef::unchecked_new("target-name")); + let cfg_target = target.configure(ConfigurationData::testing_new()); + let providers = ProvidersName::Default.push(ProviderName::new_unchecked("bar/baz".into())); + let providers_label = ConfiguredProvidersLabel::new(cfg_target, providers); + let result = path_resolver.resolve_test_discovery(&providers_label); + let expected_result = Regex::new("buck-out/test_discovery/foo/[0-9a-z]+/bar\\+baz")?; + assert!(expected_result.is_match(result.as_str())); + Ok(()) + } } diff --git a/app/buck2_core/src/fs/cwd.rs b/app/buck2_core/src/fs/cwd.rs index baccba737d8a0..585a9d4365bb6 100644 --- a/app/buck2_core/src/fs/cwd.rs +++ b/app/buck2_core/src/fs/cwd.rs @@ -115,7 +115,7 @@ pub type WorkingDirectory = WorkingDirectoryGen; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum CwdError { #[error("cwd is already set to `{}`", _0.display())] CwdAlreadySet(AbsPathBuf), diff --git a/app/buck2_core/src/fs/fs_util.rs b/app/buck2_core/src/fs/fs_util.rs index 4e11728cfce65..2ad05a87db999 100644 --- a/app/buck2_core/src/fs/fs_util.rs +++ b/app/buck2_core/src/fs/fs_util.rs @@ -21,6 +21,7 @@ use std::path::Path; use std::path::PathBuf; use anyhow::Context as _; +use buck2_error::ErrorTag; use relative_path::RelativePath; use relative_path::RelativePathBuf; @@ -31,19 +32,100 @@ use crate::fs::paths::abs_path::AbsPath; use crate::io_counters::IoCounterGuard; use crate::io_counters::IoCounterKey; +// https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499- +// "The process cannot access the file because it is being used by another process." +pub const ERROR_SHARING_VIOLATION: i32 = 32; + +fn io_error_kind_tag(e: &io::Error) -> Option { + 'from_kind: { + let from_kind = match e.kind() { + io::ErrorKind::NotFound => ErrorTag::IoNotFound, + io::ErrorKind::PermissionDenied => ErrorTag::IoPermissionDenied, + io::ErrorKind::TimedOut => ErrorTag::IoTimeout, + io::ErrorKind::ExecutableFileBusy => ErrorTag::IoExecutableFileBusy, + io::ErrorKind::BrokenPipe => ErrorTag::IoBrokenPipe, + io::ErrorKind::StorageFull => ErrorTag::IoStorageFull, + io::ErrorKind::ConnectionAborted => ErrorTag::IoConnectionAborted, + _ => break 'from_kind, + }; + return Some(from_kind); + } + + if let Some(os_error_code) = e.raw_os_error() { + 'from_os: { + let from_os = match os_error_code { + libc::ENOTCONN => ErrorTag::IoNotConnected, + libc::ECONNABORTED => ErrorTag::IoConnectionAborted, + _ => break 'from_os, + }; + return Some(from_os); + } + + if cfg!(windows) && os_error_code == ERROR_SHARING_VIOLATION { + return Some(ErrorTag::IoWindowsSharingViolation); + } + } + + None +} + +impl IoError { + pub fn categorize_for_source_file(self) -> anyhow::Error { + if self.e.kind() == io::ErrorKind::NotFound { + buck2_error::Error::from(self) + .context(buck2_error::Tier::Input) + .into() + } else { + self.into() + } + } +} + +#[derive(buck2_error::Error, Debug)] +#[buck2(tag = IoSystem)] +#[buck2(tag = io_error_kind_tag(&self.e))] +#[error("{}", .op)] +pub struct IoError { + op: String, + #[source] + e: io::Error, +} + +macro_rules! make_error { + ($val:expr, $context:expr $(,)?) => {{ + match ($val) { + Ok(v) => Ok(v), + Err(e) => Err(IoError { op: $context, e }), + } + }}; +} + +macro_rules! make_anyhow_error { + ($val:expr, $context:expr $(,)?) => {{ ($val).with_context(|| $context) }}; +} + +fn if_exists(r: io::Result) -> io::Result> { + match r { + Ok(v) => Ok(Some(v)), + Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(e), + } +} + pub fn symlink(original: P, link: Q) -> anyhow::Result<()> where P: AsRef, Q: AsRef, { let _guard = IoCounterKey::Symlink.guard(); - symlink_impl(original.as_ref(), link.as_ref()).with_context(|| { + make_anyhow_error!( + symlink_impl(original.as_ref(), link.as_ref()), format!( "symlink(original={}, link={})", original.as_ref().display(), link.as_ref().display() - ) - }) + ), + ) } #[cfg(unix)] @@ -125,51 +207,58 @@ fn symlink_impl(original: &Path, link: &AbsPath) -> anyhow::Result<()> { pub fn set_current_dir>(path: P) -> anyhow::Result<()> { assert_cwd_is_not_set()?; - env::set_current_dir(path.as_ref()) - .with_context(|| format!("set_current_dir({})", P::as_ref(&path).display())) + make_anyhow_error!( + env::set_current_dir(path.as_ref()), + format!("set_current_dir({})", P::as_ref(&path).display()), + ) } -pub fn create_dir_all>(path: P) -> anyhow::Result<()> { +pub fn create_dir_all>(path: P) -> Result<(), IoError> { let _guard = IoCounterKey::MkDir.guard(); - fs::create_dir_all(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("create_dir_all({})", P::as_ref(&path).display()))?; - Ok(()) + make_error!( + fs::create_dir_all(path.as_ref().as_maybe_relativized()), + format!("create_dir_all({})", P::as_ref(&path).display()), + ) } -pub fn create_dir>(path: P) -> anyhow::Result<()> { +pub fn create_dir>(path: P) -> Result<(), IoError> { let _guard = IoCounterKey::MkDir.guard(); - fs::create_dir(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("create_dir({})", P::as_ref(&path).display()))?; - Ok(()) + make_error!( + fs::create_dir(path.as_ref().as_maybe_relativized()), + format!("create_dir({})", P::as_ref(&path).display()) + ) } /// Create directory if not exists. /// /// Fail if exists but is not a directory or creation failed. -pub fn create_dir_if_not_exists>(path: P) -> anyhow::Result<()> { +pub fn create_dir_if_not_exists>(path: P) -> Result<(), IoError> { let path = path.as_ref(); let _guard = IoCounterKey::MkDir.guard(); - let e = match fs::create_dir(path.as_maybe_relativized()) - .with_context(|| format!("create_dir({})", path.display())) - { - Ok(()) => return Ok(()), - Err(e) => e, - }; - - match symlink_metadata(path) { - Ok(metadata) => { - if metadata.is_dir() { - Ok(()) - } else { - // File exists but not a directory, return original error. - Err(e) + make_error!( + { + let e = match fs::create_dir(path.as_maybe_relativized()) { + Ok(()) => return Ok(()), + Err(e) => e, + }; + + match symlink_metadata(path) { + Ok(metadata) => { + if metadata.is_dir() { + Ok(()) + } else { + // File exists but not a directory, return original error. + Err(e) + } + } + Err(_) => { + // `lstat` failed, means something like permission denied, return original error. + Err(e) + } } - } - Err(_) => { - // `lstat` failed, means something like permission denied, return original error. - Err(e) - } - } + }, + format!("create_dir({})", path.display()) + ) } /// `DirEntry` which is known to contain absolute path. @@ -210,47 +299,46 @@ impl Iterator for ReadDir { } } -pub fn read_dir>(path: P) -> anyhow::Result { +pub fn read_dir>(path: P) -> Result { let _guard = IoCounterKey::ReadDir.guard(); - fs::read_dir(path.as_ref()) - .with_context(|| format!("read_dir({})", P::as_ref(&path).display())) - .map(|read_dir| ReadDir { read_dir, _guard }) + make_error!( + fs::read_dir(path.as_ref()).map(|read_dir| ReadDir { read_dir, _guard }), + format!("read_dir({})", P::as_ref(&path).display()), + ) } -pub fn read_dir_if_exists>(path: P) -> anyhow::Result> { +pub fn read_dir_if_exists>(path: P) -> Result, IoError> { let _guard = IoCounterKey::ReadDir.guard(); - let read_dir = fs::read_dir(path.as_ref()); - let read_dir = match read_dir { - Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None), - Err(e) => { - return Err(e) - .with_context(|| format!("read_dir_if_exists({})", P::as_ref(&path).display())); - } - Ok(x) => x, - }; - Ok(Some(ReadDir { read_dir, _guard })) + make_error!( + if_exists(fs::read_dir(path.as_ref()).map(|read_dir| ReadDir { read_dir, _guard })), + format!("read_dir_if_exists({})", P::as_ref(&path).display()), + ) } -pub fn try_exists>(path: P) -> anyhow::Result { +pub fn try_exists>(path: P) -> Result { let _guard = IoCounterKey::Stat.guard(); - fs::try_exists(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("try_exists({})", P::as_ref(&path).display())) + make_error!( + path.as_ref().as_maybe_relativized().try_exists(), + format!("try_exists({})", P::as_ref(&path).display()) + ) } -pub fn remove_file>(path: P) -> anyhow::Result<()> { +pub fn remove_file>(path: P) -> Result<(), IoError> { let _guard = IoCounterKey::Remove.guard(); - remove_file_impl(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("remove_file({})", P::as_ref(&path).display())) + make_error!( + remove_file_impl(path.as_ref().as_maybe_relativized()), + format!("remove_file({})", P::as_ref(&path).display()), + ) } #[cfg(unix)] -fn remove_file_impl(path: &Path) -> anyhow::Result<()> { +fn remove_file_impl(path: &Path) -> io::Result<()> { fs::remove_file(path)?; Ok(()) } #[cfg(windows)] -fn remove_file_impl(path: &Path) -> anyhow::Result<()> { +fn remove_file_impl(path: &Path) -> io::Result<()> { use std::os::windows::fs::FileTypeExt; let file_type = path.symlink_metadata()?.file_type(); @@ -262,67 +350,74 @@ fn remove_file_impl(path: &Path) -> anyhow::Result<()> { Ok(()) } -pub fn copy, Q: AsRef>(from: P, to: Q) -> anyhow::Result { +pub fn copy, Q: AsRef>(from: P, to: Q) -> Result { let _guard = IoCounterKey::Copy.guard(); - fs::copy( - from.as_ref().as_maybe_relativized(), - to.as_ref().as_maybe_relativized(), - ) - .with_context(|| { + make_error!( + fs::copy( + from.as_ref().as_maybe_relativized(), + to.as_ref().as_maybe_relativized(), + ), format!( "copy(from={}, to={})", P::as_ref(&from).display(), Q::as_ref(&to).display() - ) - }) + ), + ) } -pub fn read_link>(path: P) -> anyhow::Result { +pub fn read_link>(path: P) -> Result { let _guard = IoCounterKey::ReadLink.guard(); - fs::read_link(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("read_link({})", P::as_ref(&path).display())) + make_error!( + fs::read_link(path.as_ref().as_maybe_relativized()), + format!("read_link({})", P::as_ref(&path).display()), + ) } -pub fn rename, Q: AsRef>(from: P, to: Q) -> anyhow::Result<()> { +pub fn rename, Q: AsRef>(from: P, to: Q) -> Result<(), IoError> { let _guard = IoCounterKey::Rename.guard(); - fs::rename( - from.as_ref().as_maybe_relativized(), - to.as_ref().as_maybe_relativized(), - ) - .with_context(|| { + make_error!( + fs::rename( + from.as_ref().as_maybe_relativized(), + to.as_ref().as_maybe_relativized(), + ), format!( "rename(from={}, to={})", P::as_ref(&from).display(), Q::as_ref(&to).display() - ) - })?; - Ok(()) + ), + ) } -pub fn write, C: AsRef<[u8]>>(path: P, contents: C) -> anyhow::Result<()> { +pub fn write, C: AsRef<[u8]>>(path: P, contents: C) -> Result<(), IoError> { let _guard = IoCounterKey::Write.guard(); - fs::write(path.as_ref().as_maybe_relativized(), &contents) - .with_context(|| format!("write({}, _)", P::as_ref(&path).display()))?; - Ok(()) + make_error!( + fs::write(path.as_ref().as_maybe_relativized(), &contents), + format!("write({}, _)", P::as_ref(&path).display()), + ) } -pub fn metadata>(path: P) -> anyhow::Result { +pub fn metadata>(path: P) -> Result { let _guard = IoCounterKey::Stat.guard(); - fs::metadata(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("metadata({})", P::as_ref(&path).display())) + make_error!( + fs::metadata(path.as_ref().as_maybe_relativized()), + format!("metadata({})", P::as_ref(&path).display()), + ) } -pub fn symlink_metadata>(path: P) -> anyhow::Result { +pub fn symlink_metadata>(path: P) -> Result { let _guard = IoCounterKey::Stat.guard(); - fs::symlink_metadata(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("symlink_metadata({})", P::as_ref(&path).display())) + make_error!( + fs::symlink_metadata(path.as_ref().as_maybe_relativized()), + format!("symlink_metadata({})", P::as_ref(&path).display()), + ) } -pub fn set_permissions>(path: P, perm: fs::Permissions) -> anyhow::Result<()> { +pub fn set_permissions>(path: P, perm: fs::Permissions) -> Result<(), IoError> { let _guard = IoCounterKey::Chmod.guard(); - fs::set_permissions(path.as_ref().as_maybe_relativized(), perm) - .with_context(|| format!("set_permissions({}, _)", P::as_ref(&path).display()))?; - Ok(()) + make_error!( + fs::set_permissions(path.as_ref().as_maybe_relativized(), perm), + format!("set_permissions({}, _)", P::as_ref(&path).display()), + ) } pub fn set_executable>(path: P) -> anyhow::Result<()> { @@ -347,43 +442,38 @@ pub fn set_executable>(path: P) -> anyhow::Result<()> { Ok(()) } -pub fn remove_dir_all>(path: P) -> anyhow::Result<()> { +pub fn remove_dir_all>(path: P) -> Result<(), IoError> { let _guard = IoCounterKey::RmDirAll.guard(); - fs::remove_dir_all(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("remove_dir_all({})", P::as_ref(&path).display()))?; - Ok(()) + make_error!( + fs::remove_dir_all(path.as_ref().as_maybe_relativized()), + format!("remove_dir_all({})", P::as_ref(&path).display()), + ) } /// `None` if file does not exist. pub fn symlink_metadata_if_exists>( path: P, -) -> anyhow::Result> { - let _guard = IoCounterKey::Stat.guard(); - match fs::symlink_metadata(path.as_ref().as_maybe_relativized()) { - Ok(metadata) => Ok(Some(metadata)), - Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None), - Err(err) => { - Err(err).with_context(|| format!("symlink_metadata({})", path.as_ref().display())) - } - } -} - -/// Like fs::exists but gives you the metadata. More efficient than calling `symlink_metadata().ok()` (no anyhow, no backtrace) or `exists()` (one stat call). -pub fn symlink_metadata_if_available>(path: P) -> Option { +) -> Result, IoError> { let _guard = IoCounterKey::Stat.guard(); - fs::symlink_metadata(path.as_ref().as_maybe_relativized()).ok() + make_error!( + if_exists(fs::symlink_metadata(path.as_ref().as_maybe_relativized())), + format!("symlink_metadata({})", path.as_ref().display()) + ) } /// Remove whatever exists at `path`, be it a file, directory, pipe, broken symlink, etc. /// Do nothing if `path` does not exist. -pub fn remove_all>(path: P) -> anyhow::Result<()> { - let guard = IoCounterKey::RmDirAll.guard(); - let metadata = match symlink_metadata_if_exists(&path)? { - Some(s) => s, - None => return Ok(()), - }; - - drop(guard); +pub fn remove_all>(path: P) -> Result<(), IoError> { + // There are no counters because every function called here has its own counter. + let metadata = match symlink_metadata_if_exists(&path) { + Ok(None) => return Ok(()), + Ok(Some(s)) => Ok(s), + // `NotADirectory` means we are trying to delete a path (e.g. "/foo/bar") that has a subpath + // pointing to a regular file (e.g. "/foo"). In this case do not fail and behave similarly to as + // when path we are trying to delete does not exist. + Err(e) if e.e.kind() == io::ErrorKind::NotADirectory => return Ok(()), + Err(e) => Err(e), + }?; let r = if metadata.is_dir() { remove_dir_all(&path) @@ -397,59 +487,58 @@ pub fn remove_all>(path: P) -> anyhow::Result<()> { r } -pub fn read>(path: P) -> anyhow::Result> { +pub fn read>(path: P) -> Result, IoError> { let _guard = IoCounterKey::Read.guard(); - fs::read(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("read({})", P::as_ref(&path).display())) + make_error!( + fs::read(path.as_ref().as_maybe_relativized()), + format!("read({})", P::as_ref(&path).display()), + ) } -pub fn read_to_string>(path: P) -> anyhow::Result { +pub fn read_to_string>(path: P) -> Result { let _guard = IoCounterKey::Read.guard(); - fs::read_to_string(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("read_to_string({})", P::as_ref(&path).display())) + make_error!( + fs::read_to_string(path.as_ref().as_maybe_relativized()), + format!("read_to_string({})", P::as_ref(&path).display()), + ) } /// Read a file, if it exists. Returns `None` when the file does not exist. -pub fn read_to_string_if_exists>(path: P) -> anyhow::Result> { +pub fn read_to_string_if_exists>(path: P) -> Result, IoError> { let _guard = IoCounterKey::Read.guard(); - match fs::read_to_string(path.as_ref().as_maybe_relativized()) { - Ok(d) => Ok(Some(d)), - Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None), - Err(e) => Err(anyhow::Error::from(e).context(format!( - "read_to_string_if_exists({})", - P::as_ref(&path).display() - ))), - } + make_error!( + if_exists(fs::read_to_string(path.as_ref().as_maybe_relativized())), + format!("read_to_string_if_exists({})", P::as_ref(&path).display()), + ) } /// Read a file, if it exists. Returns `None` when the file does not exist. -pub fn read_if_exists>(path: P) -> anyhow::Result>> { +pub fn read_if_exists>(path: P) -> Result>, IoError> { let _guard = IoCounterKey::Read.guard(); - match fs::read(path.as_ref().as_maybe_relativized()) { - Ok(d) => Ok(Some(d)), - Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None), - Err(e) => Err(anyhow::Error::from(e) - .context(format!("read_if_exists({})", P::as_ref(&path).display()))), - } + make_error!( + if_exists(fs::read(path.as_ref().as_maybe_relativized())), + format!("read_if_exists({})", P::as_ref(&path).display()), + ) } -pub fn canonicalize>(path: P) -> anyhow::Result { +pub fn canonicalize>(path: P) -> anyhow::Result { let _guard = IoCounterKey::Canonicalize.guard(); - let path = dunce::canonicalize(&path) - .with_context(|| format!("canonicalize({})", P::as_ref(&path).display()))?; + let path = make_error!( + dunce::canonicalize(path.as_ref()), + format!("canonicalize({})", P::as_ref(&path).display()), + )?; AbsNormPathBuf::new(path) } -pub fn canonicalize_if_exists>(path: P) -> anyhow::Result> { +pub fn canonicalize_if_exists>( + path: P, +) -> anyhow::Result> { let _guard = IoCounterKey::Canonicalize.guard(); - match dunce::canonicalize(&path) { - Ok(path) => Some(AbsNormPathBuf::new(path)).transpose(), - Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None), - Err(e) => Err(anyhow::Error::new(e).context(format!( - "canonicalize_if_exists({})", - P::as_ref(&path).display() - ))), - } + let path = make_error!( + if_exists(dunce::canonicalize(path.as_ref())), + format!("canonicalize_if_exists({})", P::as_ref(&path).display()), + )?; + path.map(AbsNormPathBuf::new).transpose() } /// Convert Windows UNC path to regular path. @@ -459,10 +548,103 @@ pub fn simplified(path: &AbsPath) -> anyhow::Result<&AbsPath> { AbsPath::new(path) } -pub fn remove_dir>(path: P) -> anyhow::Result<()> { +pub fn remove_dir>(path: P) -> Result<(), IoError> { let _guard = IoCounterKey::RmDir.guard(); - fs::remove_dir(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("remove_dir({})", P::as_ref(&path).display())) + make_error!( + fs::remove_dir(path.as_ref().as_maybe_relativized()), + format!("remove_dir({})", P::as_ref(&path).display()), + ) +} + +pub struct DiskSpaceStats { + pub free_space: u64, + pub total_space: u64, +} + +/// Free and total disk space on given path. Path does not have to be disk root. +/// When the path does not exist, the behavior is not specified. +pub fn disk_space_stats>(path: P) -> anyhow::Result { + #[cfg(not(windows))] + fn disk_space_stats_impl(path: &Path) -> anyhow::Result { + use std::ffi::CString; + use std::mem::MaybeUninit; + use std::os::unix::ffi::OsStrExt; + + let path_c = CString::new(path.as_os_str().as_bytes()) + .with_context(|| format!("Failed to convert path to CString: {:?}", path))?; + let mut statvfs = unsafe { MaybeUninit::::zeroed().assume_init() }; + unsafe { + let r = libc::statvfs(path_c.as_ptr(), &mut statvfs); + if r != 0 { + let e = io::Error::last_os_error(); + return Err(IoError { + op: format!("statvfs({})", path.display()), + e, + } + .into()); + } + } + let fr_size = u64::from(statvfs.f_frsize); + let free_space = u64::from(statvfs.f_bavail) + .checked_mul(fr_size) + .with_context(|| { + format!( + "Multiplication overflow for statvfs free space for `{}`", + path.display() + ) + })?; + + let total_space = u64::from(statvfs.f_blocks) + .checked_mul(fr_size) + .with_context(|| { + format!( + "Multiplication overflow for statvfs total space for `{}`", + path.display() + ) + })?; + Ok(DiskSpaceStats { + free_space, + total_space, + }) + } + + #[cfg(windows)] + fn disk_space_stats_impl(path: &Path) -> anyhow::Result { + use std::mem::MaybeUninit; + use std::ptr; + + use buck2_util::os::win::os_str::os_str_to_wide_null_term; + + let path_c = os_str_to_wide_null_term(path.as_os_str()); + + unsafe { + let mut free_bytes = + MaybeUninit::::zeroed().assume_init(); + let mut total_bytes = + MaybeUninit::::zeroed().assume_init(); + let r = winapi::um::fileapi::GetDiskFreeSpaceExW( + path_c.as_ptr(), + &mut free_bytes as *mut _, // lpFreeBytesAvailableToCaller + &mut total_bytes as *mut _, // lpTotalNumberOfBytes + ptr::null_mut(), // lpTotalNumberOfFreeBytes + ); + if r == 0 { + let e = io::Error::last_os_error(); + return Err(IoError { + op: format!("GetDiskFreeSpaceExW({})", path.display()), + e, + } + .into()); + } + Ok(DiskSpaceStats { + free_space: *free_bytes.QuadPart(), + total_space: *total_bytes.QuadPart(), + }) + } + } + + let _guard = IoCounterKey::Stat.guard(); + disk_space_stats_impl(path.as_ref()) } pub struct FileWriteGuard { @@ -480,16 +662,35 @@ impl Write for FileWriteGuard { } } -pub fn create_file>(path: P) -> anyhow::Result { +pub fn create_file>(path: P) -> Result { let guard = IoCounterKey::Write.guard(); - let file = File::create(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("create_file({})", P::as_ref(&path).display()))?; + let file = make_error!( + File::create(path.as_ref().as_maybe_relativized()), + format!("create_file({})", P::as_ref(&path).display()), + )?; Ok(FileWriteGuard { file, _guard: guard, }) } +pub fn create_file_if_not_exists>( + path: P, +) -> Result, IoError> { + let guard = IoCounterKey::Write.guard(); + match File::create_new(path.as_ref().as_maybe_relativized()) { + Ok(file) => Ok(Some(FileWriteGuard { + file, + _guard: guard, + })), + Err(e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(None), + Err(e) => make_error!( + Err(e), + format!("create_file_new({})", P::as_ref(&path).display()), + )?, + } +} + pub struct FileReadGuard { file: File, _guard: IoCounterGuard, @@ -501,16 +702,33 @@ impl Read for FileReadGuard { } } -pub fn open_file>(path: P) -> anyhow::Result { +pub fn open_file>(path: P) -> Result { let guard = IoCounterKey::Read.guard(); - let file = File::open(path.as_ref().as_maybe_relativized()) - .with_context(|| format!("open_file({})", P::as_ref(&path).display()))?; + let file = make_error!( + File::open(path.as_ref().as_maybe_relativized()), + format!("open_file({})", P::as_ref(&path).display()), + )?; Ok(FileReadGuard { file, _guard: guard, }) } +pub fn open_file_if_exists>(path: P) -> Result, IoError> { + let guard = IoCounterKey::Read.guard(); + let Some(file) = make_error!( + if_exists(File::open(path.as_ref().as_maybe_relativized())), + format!("open_file({})", P::as_ref(&path).display()), + )? + else { + return Ok(None); + }; + Ok(Some(FileReadGuard { + file, + _guard: guard, + })) +} + // Create a relative path in a cross-patform way, we need this since RelativePath fails when // converting backslashes which means windows paths end up failing. RelativePathBuf doesn't have // this problem and we can easily coerce it into a RelativePath. @@ -842,7 +1060,7 @@ mod tests { let dir_path = root.join("dir"); create_dir_all(AbsPath::new(&dir_path)?)?; assert_matches!(remove_file(&dir_path), Err(..)); - assert!(fs::try_exists(&dir_path)?); + assert!(dir_path.try_exists()?); Ok(()) } @@ -884,7 +1102,7 @@ mod tests { let path = root.join("file"); fs::write(&path, b"regular")?; remove_all(&path)?; - assert!(!fs::try_exists(&path)?); + assert!(!path.try_exists()?); Ok(()) } @@ -896,7 +1114,7 @@ mod tests { fs::create_dir(&path)?; fs::write(path.join("file"), b"regular file in a dir")?; remove_all(&path)?; - assert!(!fs::try_exists(&path)?); + assert!(!path.try_exists()?); Ok(()) } @@ -926,6 +1144,18 @@ mod tests { Ok(()) } + #[cfg(unix)] + #[test] + fn remove_all_path_contains_regular_file() -> anyhow::Result<()> { + let tempdir = tempfile::tempdir()?; + let root = AbsPath::new(tempdir.path())?; + let regular_file = root.join("foo"); + fs_util::write(®ular_file, b"data")?; + let path = root.join("foo/bar"); + assert!(remove_all(&path).is_ok()); + Ok(()) + } + #[test] fn remove_dir_all_does_not_remove_file() -> anyhow::Result<()> { let tempdir = tempfile::tempdir()?; @@ -933,7 +1163,7 @@ mod tests { let file_path = root.join("file"); fs::write(&file_path, b"File content")?; assert!(remove_dir_all(&file_path).is_err()); - assert!(fs::try_exists(&file_path)?); + assert!(file_path.try_exists()?); Ok(()) } @@ -1031,4 +1261,39 @@ mod tests { assert_eq!(0o111, mode & 0o111); } } + + #[cfg(unix)] + #[test] + fn test_remove_all_removes_readonly_path() -> anyhow::Result<()> { + let tempdir = tempfile::tempdir()?; + let root = AbsPath::new(tempdir.path())?; + let path = root.join("foo/bar/link"); + fs_util::create_dir_all(path.parent().unwrap())?; + fs_util::write(&path, b"data")?; + let mut perm = fs_util::metadata(&path)?.permissions(); + perm.set_readonly(true); + fs_util::set_permissions(&path, perm)?; + fs_util::remove_all(&path)?; + assert!(!path.exists()); + Ok(()) + } + + #[test] + fn test_create_file_if_not_exists() -> anyhow::Result<()> { + let tempdir = tempfile::tempdir()?; + let root = AbsPath::new(tempdir.path())?; + let path = root.join("foo.txt"); + let _file = fs_util::create_file_if_not_exists(&path)?.unwrap(); + assert!(fs_util::create_file_if_not_exists(path)?.is_none()); + Ok(()) + } + + #[test] + fn test_disk_space_stats() -> anyhow::Result<()> { + let tempdir = tempfile::tempdir()?; + let root = AbsPath::new(tempdir.path())?; + let disk_space = fs_util::disk_space_stats(&root)?; + assert!(disk_space.total_space > disk_space.free_space); + Ok(()) + } } diff --git a/app/buck2_core/src/fs/paths.rs b/app/buck2_core/src/fs/paths.rs new file mode 100644 index 0000000000000..a0536e466df8b --- /dev/null +++ b/app/buck2_core/src/fs/paths.rs @@ -0,0 +1,111 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! The paths module for buck2. +//! +//! Introduces 'ForwardRelativePath', 'ForwardRelativePathBuf', 'AbsPath', and +//! 'AbsPathBuf', which are equivalents of 'Path' and 'PathBuf'. +//! +//! ForwardRelativePaths are fully normalized relative platform agnostic paths +//! that only points forward. This means that there is no `.` or `..` in this +//! path, and does not begin with `/`. These are resolved to the 'PathBuf' by +//! resolving them against an 'AbsPath'. +//! +//! 'AbsPath' are absolute paths, meaning they must start with a directory root +//! of either `/` or some windows root directory like `c:`. These behave +//! roughly like 'Path'. + +pub mod abs_norm_path; +pub mod abs_path; +mod cmp_impls; +pub mod file_name; +pub(crate) mod fmt; +pub mod forward_rel_path; +mod into_filename_buf_iterator; +pub mod path_util; +// non public internal references + +pub use into_filename_buf_iterator::*; +pub use relative_path::RelativePath; +pub use relative_path::RelativePathBuf; + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use crate::fs::paths::abs_norm_path::AbsNormPath; + use crate::fs::paths::abs_norm_path::AbsNormPathBuf; + use crate::fs::paths::forward_rel_path::ForwardRelativePath; + use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; + use crate::fs::project_rel_path::ProjectRelativePath; + + #[test] + fn wrapped_paths_work_in_maps() -> anyhow::Result<()> { + let mut map = HashMap::new(); + + let p1 = ForwardRelativePath::new("foo")?; + let p2 = ProjectRelativePath::new("bar")?; + + map.insert(p1.to_buf(), p2.to_buf()); + + assert_eq!(Some(p2), map.get(p1).map(|p| p.as_ref())); + + Ok(()) + } + + #[test] + fn path_buf_is_clonable() -> anyhow::Result<()> { + let buf = ForwardRelativePathBuf::unchecked_new("foo".into()); + let buf_ref = &buf; + + let cloned: ForwardRelativePathBuf = buf_ref.clone(); + assert_eq!(buf, cloned); + + Ok(()) + } + + #[test] + fn relative_path_display_is_readable() -> anyhow::Result<()> { + let buf = ForwardRelativePathBuf::unchecked_new("foo/bar".into()); + assert_eq!("foo/bar", format!("{}", buf)); + assert_eq!("ForwardRelativePathBuf(\"foo/bar\")", format!("{:?}", buf)); + let refpath: &ForwardRelativePath = &buf; + assert_eq!("foo/bar", format!("{}", refpath)); + assert_eq!("ForwardRelativePath(\"foo/bar\")", format!("{:?}", refpath)); + + Ok(()) + } + + #[cfg(not(windows))] + #[test] + fn absolute_path_display_is_readable() -> anyhow::Result<()> { + let buf = AbsNormPathBuf::from("/foo/bar".into())?; + assert_eq!("/foo/bar", format!("{}", buf)); + assert_eq!("AbsNormPathBuf(\"/foo/bar\")", format!("{:?}", buf)); + let refpath: &AbsNormPath = &buf; + assert_eq!("/foo/bar", format!("{}", refpath)); + assert_eq!("AbsNormPath(\"/foo/bar\")", format!("{:?}", refpath)); + + Ok(()) + } + + #[cfg(windows)] + #[test] + fn absolute_path_display_is_readable() -> anyhow::Result<()> { + let buf = AbsNormPathBuf::from("C:/foo/bar".into())?; + assert_eq!("C:/foo/bar", format!("{}", buf)); + assert_eq!("AbsNormPathBuf(\"C:/foo/bar\")", format!("{:?}", buf)); + let refpath: &AbsNormPath = &buf; + assert_eq!("C:/foo/bar", format!("{}", refpath)); + assert_eq!("AbsNormPath(\"C:/foo/bar\")", format!("{:?}", refpath)); + + Ok(()) + } +} diff --git a/app/buck2_core/src/fs/paths/abs_norm_path.rs b/app/buck2_core/src/fs/paths/abs_norm_path.rs index 82d9415c0983b..f7dfa0c860535 100644 --- a/app/buck2_core/src/fs/paths/abs_norm_path.rs +++ b/app/buck2_core/src/fs/paths/abs_norm_path.rs @@ -14,6 +14,7 @@ use std::ffi::OsString; use std::ops::Deref; use std::path::Path; use std::path::PathBuf; +use std::str::FromStr; use allocative::Allocative; use derive_more::Display; @@ -22,7 +23,6 @@ use relative_path::RelativePath; use serde::de::Error; use serde::Deserialize; use serde::Serialize; -use thiserror::Error; use crate::fs::paths::abs_path::AbsPath; use crate::fs::paths::abs_path::AbsPathBuf; @@ -39,7 +39,7 @@ use crate::fs::paths::forward_rel_path::ForwardRelativePathNormalizer; /// * TODO(nga): validate UTF-8 /// * the path is **not** canonicalized #[derive(Display, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, RefCast)] -#[display(fmt = "{}", "_0.display()")] +#[display("{}", _0.display())] #[repr(transparent)] pub struct AbsNormPath(AbsPath); @@ -47,7 +47,7 @@ pub struct AbsNormPath(AbsPath); #[derive( Clone, Display, Debug, Hash, PartialEq, Eq, Ord, PartialOrd, Allocative )] -#[display(fmt = "{}", "_0.display()")] +#[display("{}", _0.display())] pub struct AbsNormPathBuf(AbsPathBuf); impl AsRef for AbsNormPath { @@ -109,9 +109,9 @@ impl AbsNormPath { /// /// assert!(AbsNormPath::new("foo/bar").is_err()); /// if cfg!(windows) { - /// assert!(AbsNormPath::new("C:\\foo\\bar").is_ok()); + /// assert!(AbsNormPath::new("C:\\foo\\bar").is_ok()); /// } else { - /// assert!(AbsNormPath::new("/foo/bar").is_ok()); + /// assert!(AbsNormPath::new("/foo/bar").is_ok()); /// } /// ``` pub fn new>(p: &P) -> anyhow::Result<&AbsNormPath> { @@ -124,28 +124,44 @@ impl AbsNormPath { /// /// ``` /// use std::path::Path; - /// use buck2_core::fs::paths::abs_norm_path::{AbsNormPath, AbsNormPathBuf}; + /// + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// /// if cfg!(not(windows)) { /// let abs_path = AbsNormPath::new("/my")?; - /// assert_eq!(AbsNormPathBuf::from("/my/foo/bar".into())?, abs_path.join(ForwardRelativePath::new("foo/bar")?)); + /// assert_eq!( + /// AbsNormPathBuf::from("/my/foo/bar".into())?, + /// abs_path.join(ForwardRelativePath::new("foo/bar")?) + /// ); /// } else { /// let abs_path = AbsNormPath::new("C:\\my")?; - /// assert_eq!("C:\\my\\foo\\bar", abs_path.join(ForwardRelativePath::new("foo/bar")?).to_string()); + /// assert_eq!( + /// "C:\\my\\foo\\bar", + /// abs_path + /// .join(ForwardRelativePath::new("foo/bar")?) + /// .to_string() + /// ); /// } /// # anyhow::Ok(()) /// ``` #[allow(clippy::collapsible_else_if)] pub fn join>(&self, path: P) -> AbsNormPathBuf { + self.join_cow(path).into_owned() + } + + pub fn join_cow<'a, P: AsRef>(&'a self, path: P) -> Cow<'a, AbsNormPath> { let path = path.as_ref(); if path.is_empty() { - self.to_buf() + Cow::Borrowed(self) } else { if cfg!(windows) { - AbsNormPathBuf(self.0.join(path.as_str().replace('/', "\\"))) + Cow::Owned(AbsNormPathBuf( + self.0.join(path.as_str().replace('/', "\\")), + )) } else { - AbsNormPathBuf(self.0.join(path.as_str())) + Cow::Owned(AbsNormPathBuf(self.0.join(path.as_str()))) } } } @@ -154,6 +170,7 @@ impl AbsNormPath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; /// /// if cfg!(not(windows)) { @@ -161,19 +178,13 @@ impl AbsNormPath { /// Some(AbsNormPath::new("/")?), /// AbsNormPath::new("/my")?.parent() /// ); - /// assert_eq!( - /// None, - /// AbsNormPath::new("/")?.parent() - /// ); + /// assert_eq!(None, AbsNormPath::new("/")?.parent()); /// } else { /// assert_eq!( /// Some(AbsNormPath::new("c:/")?), /// AbsNormPath::new("c:/my")?.parent() /// ); - /// assert_eq!( - /// None, - /// AbsNormPath::new("c:/")?.parent() - /// ); + /// assert_eq!(None, AbsNormPath::new("c:/")?.parent()); /// } /// /// # anyhow::Ok(()) @@ -189,7 +200,9 @@ impl AbsNormPath { /// path is not a 'ForwardRelativePath' /// /// ``` - /// use std::{borrow::Cow, path::Path}; + /// use std::borrow::Cow; + /// use std::path::Path; + /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// @@ -233,8 +246,16 @@ impl AbsNormPath { /// shared_path.strip_prefix(AbsNormPath::new(r"\\?\UNC\server\share\foo")?)?, /// Cow::Borrowed(ForwardRelativePath::new("bar.txt")?) /// ); - /// assert!(shared_path.strip_prefix(AbsNormPath::new(r"\\server\share2\foo")?).is_err()); - /// assert!(shared_path.strip_prefix(AbsNormPath::new(r"\\server\share\fo")?).is_err()); + /// assert!( + /// shared_path + /// .strip_prefix(AbsNormPath::new(r"\\server\share2\foo")?) + /// .is_err() + /// ); + /// assert!( + /// shared_path + /// .strip_prefix(AbsNormPath::new(r"\\server\share\fo")?) + /// .is_err() + /// ); /// } /// /// # anyhow::Ok(()) @@ -267,6 +288,7 @@ impl AbsNormPath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; /// /// if cfg!(not(windows)) { @@ -319,6 +341,7 @@ impl AbsNormPath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; /// /// if cfg!(not(windows)) { @@ -338,7 +361,8 @@ impl AbsNormPath { /// Build an owned `AbsPathBuf`, joined with the given path and normalized. /// /// ``` - /// use buck2_core::fs::paths::abs_norm_path::{AbsNormPath, AbsNormPathBuf}; + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// /// if cfg!(not(windows)) { /// assert_eq!( @@ -347,7 +371,9 @@ impl AbsNormPath { /// ); /// /// assert_eq!( - /// AbsNormPath::new("/foo")?.join_normalized("../../baz.txt").is_err(), + /// AbsNormPath::new("/foo")? + /// .join_normalized("../../baz.txt") + /// .is_err(), /// true /// ); /// } else { @@ -357,7 +383,9 @@ impl AbsNormPath { /// ); /// /// assert_eq!( - /// AbsNormPath::new("c:/foo")?.join_normalized("../../baz.txt").is_err(), + /// AbsNormPath::new("c:/foo")? + /// .join_normalized("../../baz.txt") + /// .is_err(), /// true /// ); /// } @@ -412,11 +440,23 @@ impl AbsNormPath { /// assert_eq!("D", AbsNormPath::new("d:/foo/bar")?.windows_prefix()?); /// assert_eq!("D", AbsNormPath::new(r"D:\foo\bar")?.windows_prefix()?); /// assert_eq!("E", AbsNormPath::new(r"\\?\E:\foo\bar")?.windows_prefix()?); - /// assert_eq!("server\\share", AbsNormPath::new(r"\\server\share")?.windows_prefix()?); - /// assert_eq!("server\\share", AbsNormPath::new(r"\\server\share\foo\bar")?.windows_prefix()?); - /// assert_eq!("server\\share", AbsNormPath::new(r"\\?\UNC\server\share")?.windows_prefix()?); + /// assert_eq!( + /// "server\\share", + /// AbsNormPath::new(r"\\server\share")?.windows_prefix()? + /// ); + /// assert_eq!( + /// "server\\share", + /// AbsNormPath::new(r"\\server\share\foo\bar")?.windows_prefix()? + /// ); + /// assert_eq!( + /// "server\\share", + /// AbsNormPath::new(r"\\?\UNC\server\share")?.windows_prefix()? + /// ); /// assert_eq!("COM42", AbsNormPath::new(r"\\.\COM42")?.windows_prefix()?); - /// assert_eq!("COM42", AbsNormPath::new(r"\\.\COM42\foo\bar")?.windows_prefix()?); + /// assert_eq!( + /// "COM42", + /// AbsNormPath::new(r"\\.\COM42\foo\bar")?.windows_prefix()? + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -452,16 +492,41 @@ impl AbsNormPath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; /// - /// assert_eq!(Path::new(""), AbsNormPath::new("C:/")?.strip_windows_prefix()?); - /// assert_eq!(Path::new(""), AbsNormPath::new("C:\\")?.strip_windows_prefix()?); - /// assert_eq!(Path::new("foo/bar"), AbsNormPath::new("d:/foo/bar")?.strip_windows_prefix()?); - /// assert_eq!(Path::new("foo\\bar"), AbsNormPath::new(r"D:\foo\bar")?.strip_windows_prefix()?); - /// assert_eq!(Path::new("foo\\bar"), AbsNormPath::new(r"\\?\D:\foo\bar")?.strip_windows_prefix()?); - /// assert_eq!(Path::new("path"), AbsNormPath::new(r"\\server\share\path")?.strip_windows_prefix()?); - /// assert_eq!(Path::new("path"), AbsNormPath::new(r"\\?\UNC\server\share\path")?.strip_windows_prefix()?); - /// assert_eq!(Path::new("abc"), AbsNormPath::new(r"\\.\COM42\abc")?.strip_windows_prefix()?); + /// assert_eq!( + /// Path::new(""), + /// AbsNormPath::new("C:/")?.strip_windows_prefix()? + /// ); + /// assert_eq!( + /// Path::new(""), + /// AbsNormPath::new("C:\\")?.strip_windows_prefix()? + /// ); + /// assert_eq!( + /// Path::new("foo/bar"), + /// AbsNormPath::new("d:/foo/bar")?.strip_windows_prefix()? + /// ); + /// assert_eq!( + /// Path::new("foo\\bar"), + /// AbsNormPath::new(r"D:\foo\bar")?.strip_windows_prefix()? + /// ); + /// assert_eq!( + /// Path::new("foo\\bar"), + /// AbsNormPath::new(r"\\?\D:\foo\bar")?.strip_windows_prefix()? + /// ); + /// assert_eq!( + /// Path::new("path"), + /// AbsNormPath::new(r"\\server\share\path")?.strip_windows_prefix()? + /// ); + /// assert_eq!( + /// Path::new("path"), + /// AbsNormPath::new(r"\\?\UNC\server\share\path")?.strip_windows_prefix()? + /// ); + /// assert_eq!( + /// Path::new("abc"), + /// AbsNormPath::new(r"\\.\COM42\abc")?.strip_windows_prefix()? + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -540,24 +605,26 @@ impl AbsNormPathBuf { /// Pushes a `ForwardRelativePath` to the existing buffer /// ``` - /// /// use std::path::PathBuf; + /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// - /// let prefix = if cfg!(windows) { - /// "C:" - /// } else { - /// "" - /// }; + /// let prefix = if cfg!(windows) { "C:" } else { "" }; /// /// let mut path = AbsNormPathBuf::try_from(format!("{prefix}/foo")).unwrap(); /// path.push(ForwardRelativePath::unchecked_new("bar")); /// - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/foo/bar")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/foo/bar")).unwrap(), + /// path + /// ); /// /// path.push(ForwardRelativePath::unchecked_new("more/file.rs")); - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more/file.rs")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more/file.rs")).unwrap(), + /// path + /// ); /// ``` pub fn push>(&mut self, path: P) { if cfg!(windows) { @@ -571,35 +638,48 @@ impl AbsNormPathBuf { /// Note that this does not visit the filesystem to resolve `..`s. Instead, it cancels out the /// components directly, similar to `join_normalized`. /// ``` - /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// use buck2_core::fs::paths::RelativePath; /// - /// let prefix = if cfg!(windows) { - /// "C:" - /// } else { - /// "" - /// }; + /// let prefix = if cfg!(windows) { "C:" } else { "" }; /// /// let mut path = AbsNormPathBuf::try_from(format!("{prefix}/foo")).unwrap(); /// path.push_normalized(RelativePath::new("bar"))?; /// - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/foo/bar")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/foo/bar")).unwrap(), + /// path + /// ); /// /// path.push_normalized(RelativePath::new("more/file.rs"))?; - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more/file.rs")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more/file.rs")).unwrap(), + /// path + /// ); /// /// path.push_normalized(RelativePath::new("../other.rs"))?; - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more/other.rs")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more/other.rs")).unwrap(), + /// path + /// ); /// /// path.push_normalized(RelativePath::new(".."))?; - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/foo/bar/more")).unwrap(), + /// path + /// ); /// /// path.push_normalized(RelativePath::new("../.."))?; - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/foo")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/foo")).unwrap(), + /// path + /// ); /// /// path.push_normalized(RelativePath::new(".."))?; - /// assert_eq!(AbsNormPathBuf::try_from(format!("{prefix}/")).unwrap(), path); + /// assert_eq!( + /// AbsNormPathBuf::try_from(format!("{prefix}/")).unwrap(), + /// path + /// ); /// /// assert!(path.push_normalized(RelativePath::new("..")).is_err()); /// @@ -637,9 +717,9 @@ impl TryFrom for AbsNormPathBuf { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; /// /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; - /// use std::convert::TryFrom; /// /// assert!(AbsNormPathBuf::try_from("relative/bar".to_owned()).is_err()); /// @@ -675,11 +755,11 @@ impl TryFrom for AbsNormPathBuf { /// no allocation conversion /// /// ``` - /// - /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// use std::convert::TryFrom; /// use std::path::PathBuf; /// + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; + /// /// assert!(AbsNormPathBuf::try_from(PathBuf::from("relative/bar")).is_err()); /// /// if cfg!(not(windows)) { @@ -701,6 +781,14 @@ impl TryFrom for AbsNormPathBuf { } } +impl FromStr for AbsNormPathBuf { + type Err = anyhow::Error; + + fn from_str(s: &str) -> anyhow::Result { + AbsNormPathBuf::try_from(s.to_owned()) + } +} + impl ToOwned for AbsNormPath { type Owned = AbsNormPathBuf; @@ -735,13 +823,37 @@ impl Deref for AbsNormPathBuf { } } +impl PartialEq for AbsNormPathBuf { + fn eq(&self, other: &AbsNormPath) -> bool { + self.0 == other.0 + } +} + +impl PartialEq<&'_ AbsNormPath> for AbsNormPathBuf { + fn eq(&self, other: &&AbsNormPath) -> bool { + self.0 == other.0 + } +} + +impl PartialEq for AbsNormPath { + fn eq(&self, other: &AbsNormPathBuf) -> bool { + other == self + } +} + +impl PartialEq for &'_ AbsNormPath { + fn eq(&self, other: &AbsNormPathBuf) -> bool { + other == self + } +} + // Separate function so windows path verification can be tested on Unix. fn verify_abs_path_windows_part(path: &str) -> bool { // UNC device path. // TODO(nga): behavior of UNC paths is under-specified in `AbsPath`. let path = path.strip_prefix("\\\\.\\").unwrap_or(path); - for component in path.split(|c| c == '/' || c == '\\') { + for component in path.split(['/', '\\']) { if component == "." || component == ".." { return false; } @@ -750,7 +862,7 @@ fn verify_abs_path_windows_part(path: &str) -> bool { true } -/// Verifier for AbsPath to ensure the path is absolute +/// Verifier for AbsPath to ensure the path is normalized fn verify_abs_path(path: &AbsPath) -> anyhow::Result<()> { // `Path::components` normalizes '.'s away so we cannot iterate with it. // TODO maybe we actually want to allow "."s and just @@ -784,14 +896,14 @@ fn verify_abs_path(path: &AbsPath) -> anyhow::Result<()> { } /// Errors from 'AbsPath' creation -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum AbsNormPathError { #[error("expected a normalized path, but found a non-normalized path instead: `{0}`")] PathNotNormalized(AbsPathBuf), } /// Errors from normalizing paths -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum PathNormalizationError { #[error( "no such path: normalizing `{}` requires the parent directory of the root of `{}`", @@ -852,37 +964,12 @@ mod tests { let path2 = AbsNormPath::new(foo_string.as_str())?; let path3 = AbsNormPath::new(bar_string.as_str())?; - let str2 = foo_string.as_str(); - let str3 = bar_string.as_str(); - let str_not_abs = "ble"; - - let string_not_abs = "ble".to_owned(); - assert_eq!(path1_buf, path2_buf); assert_ne!(path1_buf, path3_buf); assert_eq!(path1, path2); assert_ne!(path1, path3); - assert_eq!(path1_buf, path2); - assert_ne!(path1, path3_buf); - - assert_eq!(path1_buf, str2); - assert_ne!(path1_buf, str3); - assert_ne!(path1_buf, str_not_abs); - - assert_eq!(path1, str2); - assert_ne!(path1, str3); - assert_ne!(path1, str_not_abs); - - assert_eq!(path1_buf, foo_string); - assert_ne!(path1_buf, bar_string); - assert_ne!(path1_buf, string_not_abs); - - assert_eq!(path1, foo_string); - assert_ne!(path1, bar_string); - assert_ne!(path1, string_not_abs); - Ok(()) } diff --git a/app/buck2_core/src/fs/paths/abs_path.rs b/app/buck2_core/src/fs/paths/abs_path.rs index 49a046066748f..affb612128c99 100644 --- a/app/buck2_core/src/fs/paths/abs_path.rs +++ b/app/buck2_core/src/fs/paths/abs_path.rs @@ -18,13 +18,12 @@ use std::str::FromStr; use allocative::Allocative; use derive_more::Display; use ref_cast::RefCast; -use thiserror::Error; use crate::fs::cwd; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum AbsPathError { - #[error("expected an absolute path but got a relative path instead: `{0}`")] + #[error("expected an absolute path but got a relative path instead: `{}`", _0.display())] PathNotAbsolute(PathBuf), #[error("Cannot convert path to UTF-8, `{0:?}`")] PathCannotBeConvertedToUtf8(OsString), @@ -46,7 +45,7 @@ pub struct AbsPath(Path); serde::Serialize, serde::Deserialize )] -#[display(fmt = "{}", "_0.display()")] +#[display("{}", _0.display())] pub struct AbsPathBuf(PathBuf); impl fmt::Debug for AbsPath { @@ -115,14 +114,42 @@ impl ToOwned for AbsPath { } } +impl PartialEq for AbsPathBuf { + fn eq(&self, other: &AbsPath) -> bool { + self.0 == other.0 + } +} + +impl PartialEq<&'_ AbsPath> for AbsPathBuf { + fn eq(&self, other: &&AbsPath) -> bool { + self.0 == other.0 + } +} + +impl PartialEq for AbsPath { + fn eq(&self, other: &AbsPathBuf) -> bool { + self.0 == other.0 + } +} + +impl PartialEq for &'_ AbsPath { + fn eq(&self, other: &AbsPathBuf) -> bool { + self.0 == other.0 + } +} + impl AbsPath { - pub fn new(path: &Path) -> anyhow::Result<&AbsPath> { - if path.is_absolute() { - // SAFETY: repr transparent. - Ok(unsafe { &*(path as *const Path as *const AbsPath) }) - } else { - Err(AbsPathError::PathNotAbsolute(path.to_path_buf()).into()) + pub fn new<'a, P: AsRef + ?Sized>(path: &'a P) -> anyhow::Result<&'a AbsPath> { + // Wrapper function to make sure the lifetimes are right + fn inner(path: &Path) -> anyhow::Result<&AbsPath> { + if path.is_absolute() { + // SAFETY: repr transparent. + Ok(unsafe { &*(path as *const Path as *const AbsPath) }) + } else { + Err(AbsPathError::PathNotAbsolute(path.to_path_buf()).into()) + } } + inner(path.as_ref()) } pub fn as_path(&self) -> &Path { @@ -138,8 +165,7 @@ impl AbsPath { pub fn join>(&self, other: P) -> AbsPathBuf { let path = self.0.join(other); - assert!(path.is_absolute()); - AbsPathBuf(path) + AbsPathBuf::new(path).unwrap() } pub fn parent(&self) -> Option<&AbsPath> { diff --git a/app/buck2_core/src/fs/paths/cmp_impls.rs b/app/buck2_core/src/fs/paths/cmp_impls.rs index bb6bf86ad61d1..8cf0c5d5be6c5 100644 --- a/app/buck2_core/src/fs/paths/cmp_impls.rs +++ b/app/buck2_core/src/fs/paths/cmp_impls.rs @@ -9,7 +9,6 @@ //! //! General macros useful for path declaration -//! use std::cmp; @@ -48,6 +47,16 @@ macro_rules! impl_cmp { }; } +#[inline] +fn eq_as_ref(a: impl AsRef, b: impl AsRef) -> bool { + a.as_ref() == b.as_ref() +} + +#[inline] +fn partial_cmp_as_ref(a: impl AsRef, b: impl AsRef) -> Option { + a.as_ref().partial_cmp(b.as_ref()) +} + /// /// Generates ['cmp::PartialEq'] and ['cmp::PartialOrd'] for the `lhs` and `rhs` /// string types, where `ty` is the unowned, reference path type. @@ -56,40 +65,28 @@ macro_rules! impl_cmp_str { impl cmp::PartialEq<$rhs> for $lhs { #[inline] fn eq(&self, other: &$rhs) -> bool { - match <$ty>::new(other) { - Ok(other) => <$ty as cmp::PartialEq>::eq(self, other), - _ => false, - } + eq_as_ref(self, other) } } impl cmp::PartialEq<$lhs> for $rhs { #[inline] fn eq(&self, other: &$lhs) -> bool { - match <$ty>::new(self) { - Ok(this) => <$ty as cmp::PartialEq>::eq(this, other), - _ => false, - } + eq_as_ref(self, other) } } impl cmp::PartialOrd<$rhs> for $lhs { #[inline] fn partial_cmp(&self, other: &$rhs) -> Option { - match <$ty>::new(other) { - Ok(other) => <$ty as cmp::PartialOrd>::partial_cmp(self, other), - _ => None, - } + partial_cmp_as_ref(self, other) } } impl cmp::PartialOrd<$lhs> for $rhs { #[inline] fn partial_cmp(&self, other: &$lhs) -> Option { - match <$ty>::new(self) { - Ok(this) => <$ty as cmp::PartialOrd>::partial_cmp(this, other), - _ => None, - } + partial_cmp_as_ref(self, other) } } }; @@ -118,21 +115,6 @@ impl_cmp_str!(ForwardRelativePath, String, ForwardRelativePath); impl_cmp_str!(&'_ ForwardRelativePath, str, ForwardRelativePath); impl_cmp_str!(&'_ ForwardRelativePath, String, ForwardRelativePath); -use crate::fs::paths::abs_norm_path::AbsNormPath; -use crate::fs::paths::abs_norm_path::AbsNormPathBuf; - -impl_cmp!(AbsNormPathBuf, AbsNormPath, AbsNormPath); -impl_cmp!(AbsNormPathBuf, &'_ AbsNormPath, AbsNormPath); - -impl_cmp_str!(AbsNormPathBuf, str, AbsNormPath); -impl_cmp_str!(AbsNormPathBuf, &'_ str, AbsNormPath); -impl_cmp_str!(AbsNormPathBuf, String, AbsNormPath); -impl_cmp_str!(AbsNormPath, str, AbsNormPath); -impl_cmp_str!(AbsNormPath, &'_ str, AbsNormPath); -impl_cmp_str!(AbsNormPath, String, AbsNormPath); -impl_cmp_str!(&'_ AbsNormPath, str, AbsNormPath); -impl_cmp_str!(&'_ AbsNormPath, String, AbsNormPath); - use crate::package::package_relative_path::PackageRelativePath; use crate::package::package_relative_path::PackageRelativePathBuf; diff --git a/app/buck2_core/src/fs/paths/file_name.rs b/app/buck2_core/src/fs/paths/file_name.rs index 87269a365628b..c0efca2b63ad7 100644 --- a/app/buck2_core/src/fs/paths/file_name.rs +++ b/app/buck2_core/src/fs/paths/file_name.rs @@ -19,14 +19,13 @@ use compact_str::CompactString; use derive_more::Display; use ref_cast::RefCast; use relative_path::RelativePath; -use thiserror::Error; use crate::cells::paths::CellRelativePath; use crate::fs::paths::forward_rel_path::ForwardRelativePath; use crate::package::package_relative_path::PackageRelativePath; /// Errors from ForwardRelativePath creation -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum FileNameError { #[error("file name is empty")] Empty, @@ -68,6 +67,12 @@ impl PartialEq for FileName { } } +impl AsRef for FileName { + fn as_ref(&self) -> &FileName { + self + } +} + impl PartialEq for str { #[inline] fn eq(&self, other: &FileName) -> bool { @@ -178,7 +183,6 @@ impl FileName { /// Extracts the extension of [`self.file_name`], if possible. /// /// ``` - /// /// use buck2_core::fs::paths::file_name::FileName; /// /// assert_eq!(Some("rs"), FileName::new("foo.rs")?.extension()); diff --git a/app/buck2_core/src/fs/paths/forward_rel_path.rs b/app/buck2_core/src/fs/paths/forward_rel_path.rs index 047601e668234..6dfabb5a25959 100644 --- a/app/buck2_core/src/fs/paths/forward_rel_path.rs +++ b/app/buck2_core/src/fs/paths/forward_rel_path.rs @@ -15,22 +15,20 @@ use std::path::Path; use std::path::PathBuf; use allocative::Allocative; +use buck2_util::arc_str::StringInside; use derive_more::Display; use gazebo::transmute; -use ref_cast::RefCast; +use ref_cast::ref_cast_custom; +use ref_cast::RefCastCustom; use relative_path::RelativePath; use relative_path::RelativePathBuf; use serde::Deserialize; use serde::Deserializer; use serde::Serialize; use smallvec::SmallVec; -use thiserror::Error; use crate::fs::fs_util; -use crate::fs::paths::abs_norm_path::AbsNormPath; -use crate::fs::paths::abs_norm_path::AbsNormPathBuf; use crate::fs::paths::file_name::FileName; -use crate::fs::paths::file_name::FileNameBuf; use crate::fs::paths::path_util::path_remove_prefix; /// A forward pointing, fully normalized relative path and owned pathbuf. @@ -39,7 +37,16 @@ use crate::fs::paths::path_util::path_remove_prefix; /// /// This path is platform agnostic, so path separators are always '/'. #[derive( - Display, Debug, RefCast, PartialEq, Eq, PartialOrd, Ord, Hash, Allocative + Display, + Debug, + Serialize, + RefCastCustom, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Allocative )] #[repr(transparent)] pub struct ForwardRelativePath( @@ -50,7 +57,7 @@ pub struct ForwardRelativePath( /// The owned version of 'ForwardRelativePath', like how 'PathBuf' relates to /// 'Path' #[derive( - Clone, Display, Debug, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash, Allocative + Default, Clone, Display, Debug, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash, Allocative )] #[repr(transparent)] pub struct ForwardRelativePathBuf(String); @@ -65,6 +72,18 @@ impl<'de> Deserialize<'de> for ForwardRelativePathBuf { } } +impl StringInside for ForwardRelativePath { + #[inline] + fn as_str(wrapper: &Self) -> &str { + wrapper.as_str() + } + + #[inline] + fn from_str(s: &str) -> &Self { + ForwardRelativePath::unchecked_new(s) + } +} + impl AsRef for ForwardRelativePath { #[inline] fn as_ref(&self) -> &RelativePath { @@ -79,8 +98,29 @@ impl AsRef for ForwardRelativePathBuf { } } +impl AsRef for ForwardRelativePath { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl AsRef for ForwardRelativePathBuf { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + pub struct ForwardRelativePathIter<'a>(&'a ForwardRelativePath); +impl<'a> ForwardRelativePathIter<'a> { + /// Remaining path in the iterator. + pub fn as_path(&self) -> &'a ForwardRelativePath { + self.0 + } +} + impl<'a> Iterator for ForwardRelativePathIter<'a> { type Item = &'a FileName; @@ -92,6 +132,15 @@ impl<'a> Iterator for ForwardRelativePathIter<'a> { } } +impl<'a> DoubleEndedIterator for ForwardRelativePathIter<'a> { + #[inline] + fn next_back(&mut self) -> Option<&'a FileName> { + let (rem, last) = self.0.split_last()?; + self.0 = rem; + Some(last) + } +} + impl<'a> Clone for ForwardRelativePathIter<'a> { fn clone(&self) -> Self { ForwardRelativePathIter(ForwardRelativePath::unchecked_new(self.0.as_str())) @@ -99,6 +148,9 @@ impl<'a> Clone for ForwardRelativePathIter<'a> { } impl ForwardRelativePath { + #[ref_cast_custom] + const fn ref_cast(s: &str) -> &ForwardRelativePath; + #[inline] pub fn unchecked_new>(s: &S) -> &Self { ForwardRelativePath::ref_cast(s.as_ref()) @@ -113,17 +165,18 @@ impl ForwardRelativePath { } #[inline] - pub fn empty() -> &'static Self { - ForwardRelativePath::unchecked_new("") + pub const fn empty() -> &'static Self { + ForwardRelativePath::ref_cast("") } /// Creates an 'ForwardRelativePath' if the given path represents a forward, /// normalized relative path, otherwise error. /// /// ``` - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use std::path::Path; /// + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; + /// /// assert!(ForwardRelativePath::new("foo/bar").is_ok()); /// assert!(ForwardRelativePath::new("").is_ok()); /// assert!(ForwardRelativePath::new("./bar").is_err()); @@ -174,30 +227,6 @@ impl ForwardRelativePath { ForwardRelativePath::new(path) } - /// Build an owned `AbsPathBuf` relative to `path` for the current relative - /// path based on the supplied root. - /// - /// ``` - /// - /// use std::path::Path; - /// use buck2_core::fs::paths::abs_norm_path::{AbsNormPath, AbsNormPathBuf}; - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// - /// if cfg!(not(windows)) { - /// let path = ForwardRelativePath::new("foo/bar")?.resolve(AbsNormPath::new("/some")?); - /// assert_eq!(AbsNormPathBuf::from("/some/foo/bar".into())?, path); - /// } else { - /// let path = ForwardRelativePath::new("foo/bar")?.resolve(AbsNormPath::new("c:/some")?); - /// assert_eq!(AbsNormPathBuf::from("c:/some/foo/bar".into())?, path); - /// } - /// - /// # anyhow::Ok(()) - /// ``` - #[inline] - pub fn resolve>(&self, relative_to: P) -> AbsNormPathBuf { - relative_to.as_ref().join(self) - } - #[inline] pub fn as_str(&self) -> &str { &self.0 @@ -217,11 +246,16 @@ impl ForwardRelativePath { /// /// ``` /// use std::path::Path; - /// use buck2_core::fs::paths::forward_rel_path::{ForwardRelativePathBuf, ForwardRelativePath}; + /// + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; /// /// let path = ForwardRelativePath::new("foo/bar")?; /// let other = ForwardRelativePath::new("baz")?; - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), path.join(other)); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), + /// path.join(other) + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -241,6 +275,16 @@ impl ForwardRelativePath { } } + pub fn join_cow<'a>(&'a self, path: &'a ForwardRelativePath) -> Cow<'a, ForwardRelativePath> { + if self.is_empty() { + Cow::Borrowed(path) + } else if path.is_empty() { + Cow::Borrowed(self) + } else { + Cow::Owned(self.join(path)) + } + } + /// Returns a relative path of the parent directory /// /// ``` @@ -254,10 +298,7 @@ impl ForwardRelativePath { /// Some(ForwardRelativePath::new("")?), /// ForwardRelativePath::new("foo")?.parent() /// ); - /// assert_eq!( - /// None, - /// ForwardRelativePath::new("")?.parent() - /// ); + /// assert_eq!(None, ForwardRelativePath::new("")?.parent()); /// /// # anyhow::Ok(()) /// ``` @@ -282,12 +323,21 @@ impl ForwardRelativePath { /// a directory, this is the directory name. /// /// ``` - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use buck2_core::fs::paths::file_name::FileName; + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// - /// assert_eq!(Some(FileName::unchecked_new("ls")), ForwardRelativePath::new("usr/bin/ls")?.file_name()); - /// assert_eq!(Some(FileName::unchecked_new("bin")), ForwardRelativePath::new("usr/bin")?.file_name()); - /// assert_eq!(Some(FileName::unchecked_new("usr")), ForwardRelativePath::new("usr")?.file_name()); + /// assert_eq!( + /// Some(FileName::unchecked_new("ls")), + /// ForwardRelativePath::new("usr/bin/ls")?.file_name() + /// ); + /// assert_eq!( + /// Some(FileName::unchecked_new("bin")), + /// ForwardRelativePath::new("usr/bin")?.file_name() + /// ); + /// assert_eq!( + /// Some(FileName::unchecked_new("usr")), + /// ForwardRelativePath::new("usr")?.file_name() + /// ); /// assert_eq!(None, ForwardRelativePath::new("")?.file_name()); /// /// # anyhow::Ok(()) @@ -325,6 +375,24 @@ impl ForwardRelativePath { } } + /// Split off the last component of the path. + pub fn split_last(&self) -> Option<(&ForwardRelativePath, &FileName)> { + let s = &self.0; + for (i, b) in s.bytes().enumerate().rev() { + if b == b'/' { + return Some(( + ForwardRelativePath::unchecked_new(&s[..i]), + FileName::unchecked_new(&s[i + 1..]), + )); + } + } + if s.is_empty() { + None + } else { + Some((ForwardRelativePath::empty(), FileName::unchecked_new(s))) + } + } + /// Returns a 'ForwardRelativePath' that, when joined onto `base`, yields /// `self`. /// @@ -352,32 +420,75 @@ impl ForwardRelativePath { /// path.strip_prefix(ForwardRelativePath::new("")?)?, /// ForwardRelativePath::new("test/haha/foo.txt")? /// ); - /// assert_eq!(path.strip_prefix(ForwardRelativePath::new("asdf")?).is_err(), true); + /// assert_eq!( + /// path.strip_prefix(ForwardRelativePath::new("asdf")?) + /// .is_err(), + /// true + /// ); /// /// # anyhow::Ok(()) /// ``` pub fn strip_prefix>( &self, - base: P, + prefix: P, ) -> anyhow::Result<&ForwardRelativePath> { - let base = base.as_ref(); - self.strip_prefix_opt(base) - .ok_or_else(|| StripPrefixError(base.as_str().to_owned(), self.0.to_owned()).into()) + let prefix = prefix.as_ref(); + self.strip_prefix_opt(prefix).ok_or_else(|| { + ForwardRelativePathError::StripPrefix( + self.as_str().to_owned(), + prefix.as_str().to_owned(), + ) + .into() + }) } pub fn strip_prefix_opt>( &self, - base: P, + prefix: P, + ) -> Option<&ForwardRelativePath> { + let prefix = prefix.as_ref(); + if prefix.0.is_empty() { + Some(self) + } else if self.starts_with(prefix) { + if self.0.len() == prefix.0.len() { + Some(ForwardRelativePath::empty()) + } else { + Some(ForwardRelativePath::unchecked_new( + &self.0[prefix.0.len() + 1..], + )) + } + } else { + None + } + } + + pub fn strip_suffix>( + &self, + suffix: P, + ) -> anyhow::Result<&ForwardRelativePath> { + let suffix = suffix.as_ref(); + self.strip_suffix_opt(suffix).ok_or_else(|| { + ForwardRelativePathError::StripSuffix( + self.as_str().to_owned(), + suffix.as_str().to_owned(), + ) + .into() + }) + } + + pub fn strip_suffix_opt>( + &self, + suffix: P, ) -> Option<&ForwardRelativePath> { - let base = base.as_ref(); - if base.0.is_empty() { + let suffix = suffix.as_ref(); + if suffix.0.is_empty() { Some(self) - } else if self.starts_with(base) { - if self.0.len() == base.0.len() { + } else if self.ends_with(suffix) { + if self.0.len() == suffix.0.len() { Some(ForwardRelativePath::empty()) } else { Some(ForwardRelativePath::unchecked_new( - &self.0[base.0.len() + 1..], + &self.0[..self.0.len() - suffix.0.len() - 1], )) } } else { @@ -388,7 +499,6 @@ impl ForwardRelativePath { /// Determines whether `base` is a prefix of `self`. /// /// ``` - /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// /// let path = ForwardRelativePath::new("some/foo")?; @@ -411,6 +521,7 @@ impl ForwardRelativePath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// /// let path = ForwardRelativePath::new("some/foo")?; @@ -446,7 +557,10 @@ impl ForwardRelativePath { /// let path = ForwardRelativePath::new("foo.rs")?; /// /// assert_eq!(Some("foo"), path.file_stem()); - /// assert_eq!(Some("foo.bar"), ForwardRelativePath::new("hi/foo.bar.rs")?.file_stem()); + /// assert_eq!( + /// Some("foo.bar"), + /// ForwardRelativePath::new("hi/foo.bar.rs")?.file_stem() + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -467,11 +581,16 @@ impl ForwardRelativePath { /// Extracts the extension of [`self.file_name`], if possible. /// /// ``` - /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// - /// assert_eq!(Some("rs"), ForwardRelativePath::new("hi/foo.rs")?.extension()); - /// assert_eq!(Some("rs"), ForwardRelativePath::new("hi/foo.bar.rs")?.extension()); + /// assert_eq!( + /// Some("rs"), + /// ForwardRelativePath::new("hi/foo.rs")?.extension() + /// ); + /// assert_eq!( + /// Some("rs"), + /// ForwardRelativePath::new("hi/foo.bar.rs")?.extension() + /// ); /// assert_eq!(None, ForwardRelativePath::new(".git")?.extension()); /// assert_eq!(None, ForwardRelativePath::new("foo/.git")?.extension()); /// assert_eq!(None, ForwardRelativePath::new("")?.extension()); @@ -503,8 +622,8 @@ impl ForwardRelativePath { /// normalized. /// /// ``` - /// - /// use buck2_core::fs::paths::forward_rel_path::{ForwardRelativePath, ForwardRelativePathBuf}; + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; /// /// assert_eq!( /// ForwardRelativePathBuf::unchecked_new("foo/baz.txt".into()), @@ -512,7 +631,9 @@ impl ForwardRelativePath { /// ); /// /// assert_eq!( - /// ForwardRelativePath::new("foo")?.join_normalized("../../baz.txt").is_err(), + /// ForwardRelativePath::new("foo")? + /// .join_normalized("../../baz.txt") + /// .is_err(), /// true /// ); /// @@ -544,26 +665,11 @@ impl ForwardRelativePath { /// let p = ForwardRelativePath::new("foo/bar/baz")?; /// let mut it = p.iter(); /// - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("foo")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("bar")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("baz")) - /// ); - /// assert_eq!( - /// it.next(), - /// None - /// ); - /// assert_eq!( - /// it.next(), - /// None - /// ); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("foo"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("bar"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("baz"))); + /// assert_eq!(it.next(), None); + /// assert_eq!(it.next(), None); /// /// # anyhow::Ok(()) /// ``` @@ -595,10 +701,7 @@ impl ForwardRelativePath { /// p.strip_prefix_components(3), /// Some(ForwardRelativePath::new("")?), /// ); - /// assert_eq!( - /// p.strip_prefix_components(4), - /// None, - /// ); + /// assert_eq!(p.strip_prefix_components(4), None,); /// # anyhow::Ok(()) /// ``` pub fn strip_prefix_components(&self, components: usize) -> Option<&Self> { @@ -649,6 +752,23 @@ impl ForwardRelativePathBuf { Self(String::with_capacity(cap)) } + pub fn with_capacity_for_concat( + items: impl IntoIterator>, + ) -> Self { + let mut cap = 0; + for item in items { + let item = item.as_ref(); + if !item.is_empty() { + if cap != 0 { + // `/`. + cap += 1; + } + cap += item.0.len(); + } + } + ForwardRelativePathBuf::with_capacity(cap) + } + /// Returns the capacity of the underlying 'String' #[inline] pub fn capacity(&self) -> usize { @@ -676,55 +796,67 @@ impl ForwardRelativePathBuf { /// Pushes a `ForwardRelativePath` to the existing buffer /// /// ``` - /// use buck2_core::fs::paths::forward_rel_path::{ForwardRelativePath, ForwardRelativePathBuf}; + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; /// /// let mut path = ForwardRelativePathBuf::unchecked_new("foo".to_owned()); /// path.push(ForwardRelativePath::unchecked_new("bar")); /// - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar".to_owned()), + /// path + /// ); /// /// path.push(ForwardRelativePath::unchecked_new("more/file.rs")); - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar/more/file.rs".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar/more/file.rs".to_owned()), + /// path + /// ); /// /// path.push(ForwardRelativePath::empty()); - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar/more/file.rs".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar/more/file.rs".to_owned()), + /// path + /// ); /// /// let mut path = ForwardRelativePathBuf::unchecked_new("".to_owned()); /// path.push(ForwardRelativePath::unchecked_new("foo")); - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo".to_owned()), + /// path + /// ); /// /// # anyhow::Ok(()) /// ``` pub fn push>(&mut self, path: P) { - if path.as_ref().0.is_empty() { + let path = path.as_ref(); + if path.is_empty() { return; } - if !self.0.is_empty() { + if self.is_empty() { + self.0.push_str(path.as_str()); + } else { + self.reserve(1 + path.0.len()); self.0.push('/'); + self.0.push_str(path.as_str()); } - self.0.push_str(path.as_ref().as_str()) + } + + /// Pop the last component of the path, if there is one. + pub fn pop(&mut self) -> bool { + let Some((me, _pop)) = self.split_last() else { + return false; + }; + self.0.truncate(me.0.len()); + true } pub fn concat<'a, I: IntoIterator + Copy>( items: I, ) -> ForwardRelativePathBuf { - let mut cap = 0; - for item in items { - if !item.is_empty() { - if cap != 0 { - // `/`. - cap += 1; - } - cap += item.0.len(); - } - } - let mut path = ForwardRelativePathBuf::with_capacity(cap); - for item in items { - path.push(item); - } - // Cheap self-test. - assert!(path.0.len() == cap); + let mut path = ForwardRelativePathBuf::with_capacity_for_concat(items); + path.extend(items); path } @@ -733,26 +865,40 @@ impl ForwardRelativePathBuf { /// components directly, similar to `join_normalized`. /// /// ``` - /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; /// use buck2_core::fs::paths::RelativePath; /// /// let mut path = ForwardRelativePathBuf::unchecked_new("foo".to_owned()); /// path.push_normalized(RelativePath::new("bar"))?; /// - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar".to_owned()), + /// path + /// ); /// /// path.push_normalized(RelativePath::new("more/file.rs"))?; - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar/more/file.rs".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar/more/file.rs".to_owned()), + /// path + /// ); /// /// path.push_normalized(RelativePath::new("../other.rs"))?; - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar/more/other.rs".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar/more/other.rs".to_owned()), + /// path + /// ); /// /// path.push_normalized(RelativePath::new(".."))?; - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo/bar/more".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo/bar/more".to_owned()), + /// path + /// ); /// /// path.push_normalized(RelativePath::new("../.."))?; - /// assert_eq!(ForwardRelativePathBuf::unchecked_new("foo".to_owned()), path); + /// assert_eq!( + /// ForwardRelativePathBuf::unchecked_new("foo".to_owned()), + /// path + /// ); /// /// path.push_normalized(RelativePath::new(".."))?; /// assert_eq!(ForwardRelativePathBuf::unchecked_new("".to_owned()), path); @@ -862,8 +1008,16 @@ impl Clone for Box { } } +impl> Extend

{ - ParsedPattern::Package(PackageLabel::testing_new(cell, path)) - } - - fn mk_recursive(cell: &str, path: &str) -> ParsedPattern

{ - ParsedPattern::Recursive(CellPath::new( - CellName::testing_new(cell), - CellRelativePathBuf::unchecked_new(path.to_owned()), - )) - } - - fn mk_target(cell: &str, path: &str, target: &str) -> ParsedPattern { - ParsedPattern::Target( - PackageLabel::testing_new(cell, path), - TargetName::unchecked_new(target), - TargetPatternExtra, - ) - } - - fn mk_providers( - cell: &str, - path: &str, - target: &str, - providers: Option<&[&str]>, - ) -> ParsedPattern { - ParsedPattern::Target( - PackageLabel::testing_new(cell, path), - TargetName::unchecked_new(target), - ProvidersPatternExtra { - providers: providers.map_or(ProvidersName::Default, |n| { - ProvidersName::NonDefault(Box::new(NonDefaultProvidersName::Named( - n.map(|s| ProviderName::new((*s).to_owned()).unwrap()) - .into_boxed_slice(), - ))) - }), - }, - ) - } - - fn mk_configured_providers( - cell: &str, - path: &str, - target: &str, - providers: Option<&[&str]>, - cfg: ConfigurationPredicate, - ) -> ParsedPattern { - mk_providers(cell, path, target, providers) - .try_map(|ProvidersPatternExtra { providers }| { - Ok(ConfiguredProvidersPatternExtra { providers, cfg }) - }) - .unwrap() - } - - fn fails(x: anyhow::Result, msgs: &[&str]) { - match x { - Err(e) => { - let s = format!("{:#}", e); - for msg in msgs { - if !s.contains(msg) { - panic!("Expected `{}` but missing from error `{:#}`", msg, e) - } - } - } - Ok(_) => panic!("Expected failure but succeeded"), - } - } - - struct NoAliases; - - impl TargetAliasResolver for NoAliases { - fn get<'a>(&'a self, _name: &str) -> anyhow::Result> { - Ok(None) - } - } - - fn aliases(aliases: &[(&str, &str)]) -> impl TargetAliasResolver { - struct Aliases(Vec<(String, String)>); - - impl TargetAliasResolver for Aliases { - fn get<'a>(&'a self, name: &str) -> anyhow::Result> { - Ok(self - .0 - .iter() - .find(|(a, _)| *a == name) - .map(|(_, b)| b.as_str())) - } - } - - Aliases( - aliases - .iter() - .map(|(a, b)| ((*a).to_owned(), (*b).to_owned())) - .collect(), - ) - } - - fn resolver() -> CellResolver { - CellResolver::testing_with_names_and_paths_with_alias(&[ - ( - CellName::testing_new("root"), - CellRootPathBuf::testing_new(""), - HashMap::from_iter([ - ( - NonEmptyCellAlias::testing_new("cell1"), - CellName::testing_new("cell1"), - ), - ( - NonEmptyCellAlias::testing_new("alias2"), - CellName::testing_new("cell2"), - ), - ]), - ), - ( - CellName::testing_new("cell1"), - CellRootPathBuf::testing_new("cell1"), - HashMap::new(), - ), - ( - CellName::testing_new("cell2"), - CellRootPathBuf::testing_new("cell2"), - HashMap::new(), - ), - ]) - } - - #[test_case(PhantomData::< TargetPatternExtra >; "parsing TargetPattern")] - #[test_case(PhantomData::< ProvidersPatternExtra >; "parsing ProvidersPattern")] - #[test_case(PhantomData::< ConfiguredTargetPatternExtra >; "parsing ConfiguredTargetPatternExtra")] - #[test_case(PhantomData::< ConfiguredProvidersPatternExtra >; "parsing ConfiguredProvidersPatternExtra")] - fn parse_absolute_pattern(_: PhantomData) { - let package = CellPath::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package/path").to_owned(), - ); - - assert_eq!( - mk_package::("root", "package/path"), - ParsedPattern::::parse_precise( - "//package/path:", - CellName::testing_new("root"), - &resolver() - ) - .unwrap() - ); - assert_eq!( - mk_package::("root", ""), - ParsedPattern::::parse_precise("//:", CellName::testing_new("root"), &resolver()) - .unwrap() - ); - assert_eq!( - mk_package::("cell1", "package/path"), - ParsedPattern::::parse_precise( - "cell1//package/path:", - CellName::testing_new("root"), - &resolver() - ) - .unwrap() - ); - assert_matches!( - ParsedPattern::::parse_precise("package/path:", CellName::testing_new("root"), &resolver()), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(TargetPatternParseError::AbsoluteRequired) - ); - } - ); - assert_eq!( - mk_package::("cell2", "package/path"), - ParsedPattern::::parse_precise( - "alias2//package/path:", - CellName::testing_new("root"), - &resolver() - ) - .unwrap() - ); - assert_eq!( - mk_package::("cell2", "package/path"), - ParsedPattern::::parse_precise( - "@alias2//package/path:", - CellName::testing_new("root"), - &resolver() - ) - .unwrap() - ); - assert_eq!( - mk_recursive::("root", "package/path"), - ParsedPattern::::parse_precise( - "//package/path/...", - CellName::testing_new("root"), - &resolver() - ) - .unwrap() - ); - assert_eq!( - mk_recursive::("root", "package/path"), - ParsedPattern::::parse_relative(&NoAliases, package.as_ref(), "...", &resolver(),) - .unwrap() - ); - assert_eq!( - mk_recursive::("root", "package/path/foo"), - ParsedPattern::::parse_relative( - &NoAliases, - package.as_ref(), - "foo/...", - &resolver(), - ) - .unwrap() - ); - } - - #[test] - fn parse_relative_pattern() -> anyhow::Result<()> { - let package = CellPath::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package/path").to_owned(), - ); - - assert_eq!( - mk_target("root", "package/path", "target"), - ParsedPattern::parse_precise( - "//package/path:target", - CellName::testing_new("root"), - &resolver() - )? - ); - assert_eq!( - mk_target("root", "package/path/foo", "target"), - ParsedPattern::parse_relative(&NoAliases, package.as_ref(), "foo:target", &resolver(),)? - ); - Ok(()) - } - - #[test] - fn test_relaxed() -> anyhow::Result<()> { - let package = CellPath::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package").to_owned(), - ); - - assert_matches!( - ParsedPattern::::parse_relative( - &NoAliases, - package.as_ref(), - "path", - &resolver(), - ), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(TargetPatternParseError::UnexpectedFormat) - ); - } - ); - - assert_eq!( - mk_target("root", "package/path", "path"), - ParsedPattern::parse_relaxed( - &NoAliases, - package.as_ref(), - "//package/path", - &resolver(), - )? - ); - assert_eq!( - mk_target("root", "package/path", "path"), - ParsedPattern::parse_relaxed(&NoAliases, package.as_ref(), "path", &resolver(),)? - ); - assert_eq!( - mk_providers("root", "package/path", "path", Some(&["provider"])), - ParsedPattern::parse_relaxed( - &NoAliases, - package.as_ref(), - "path[provider]", - &resolver(), - )? - ); - assert_eq!( - mk_providers( - "root", - "package/path/subpath", - "subpath", - Some(&["provider"]) - ), - ParsedPattern::parse_relaxed( - &NoAliases, - package.as_ref(), - "path/subpath[provider]", - &resolver(), - )? - ); - assert_eq!( - mk_target("root", "package/path/subpath", "subpath"), - ParsedPattern::parse_relaxed( - &NoAliases, - package.as_ref(), - "path/subpath", - &resolver(), - )? - ); - assert_eq!( - mk_target("root", "package/path", "path"), - ParsedPattern::parse_relaxed( - &NoAliases, - package.as_ref(), - "//package/path/", - &resolver(), - )? - ); - assert_eq!( - mk_target("root", "package/path", "target"), - ParsedPattern::parse_relaxed( - &NoAliases, - package.as_ref(), - "//package/path/:target", - &resolver(), - )? - ); - - // Awkward but technically valid? - assert_eq!( - mk_target("root", "package", "foo"), - ParsedPattern::parse_relaxed(&NoAliases, package.as_ref(), "/:foo", &resolver(),)? - ); - - // There's no target here so this is invalid. - assert_matches!( - ParsedPattern::::parse_relaxed( - &NoAliases, - package.as_ref(), - "/", - &resolver(), - ), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(TargetPatternParseError::PackageIsEmpty) - ); - } - ); - - Ok(()) - } - - #[test] - fn test_parsed_opt_absolute() -> anyhow::Result<()> { - let package = CellPath::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package/path").to_owned(), - ); - - assert_eq!( - mk_target("root", "other", "target"), - ParsedPattern::parsed_opt_absolute( - "//other:target", - Some(package.as_ref()), - CellName::testing_new("root"), - &resolver(), - )? - ); - assert_eq!( - mk_target("root", "package/path", "target"), - ParsedPattern::parsed_opt_absolute( - ":target", - Some(package.as_ref()), - CellName::testing_new("root"), - &resolver(), - )? - ); - let err = ParsedPattern::::parsed_opt_absolute( - ":target", - None, - CellName::testing_new("root"), - &resolver(), - ) - .unwrap_err(); - assert!( - err.to_string() - .contains("Invalid absolute target pattern `:target` is not allowed"), - "{}", - err - ); - // But this should be fine. - assert_eq!( - mk_target("cell1", "", "target"), - ParsedPattern::parsed_opt_absolute( - "cell1//:target", - None, - CellName::testing_new("root"), - &resolver(), - )? - ); - - assert_matches!( - ParsedPattern::::parsed_opt_absolute( - "foo/bar", - Some(package.as_ref()), - CellName::testing_new("root"), - &resolver(), - ), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(TargetPatternParseError::UnexpectedFormat) - ); - } - ); - - assert_matches!( - ParsedPattern::::parsed_opt_absolute( - "foo/bar:bar", - Some(package.as_ref()), - CellName::testing_new("root"), - &resolver(), - ), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(TargetPatternParseError::AbsoluteRequired) - ); - } - ); - assert_matches!( - ParsedPattern::::parsed_opt_absolute( - "foo/bar:bar", - None, - CellName::testing_new("root"), - &resolver(), - ), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(TargetPatternParseError::AbsoluteRequired) - ); - } - ); - - Ok(()) - } - - #[test] - fn test_aliases() -> anyhow::Result<()> { - let package = CellPath::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package").to_owned(), - ); - - let config = aliases(&[ - ("foo", "cell1//foo/bar:target"), - ("invalid/alias", "cell1//foo/bar:target"), - ("badalias", "cell1//foo/bar:"), - ]); - - assert_eq!( - mk_target("cell1", "foo/bar", "target"), - ParsedPattern::parse_relaxed(&config, package.as_ref(), "foo", &resolver(),)? - ); - - assert_matches!( - ParsedPattern::::parse_relaxed( - &config, - package.as_ref(), - "invalid/alias", - &resolver(), - ), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(ResolveTargetAliasError::InvalidAlias { .. }) - ); - } - ); - - assert_matches!( - ParsedPattern::::parse_relaxed( - &config, - package.as_ref(), - "badalias", - &resolver(), - ), - Err(e) => { - assert_matches!( - e.downcast_ref::(), - Some(ResolveTargetAliasError::AliasIsNotATarget { .. }) - ); - } - ); - - Ok(()) - } - - #[test] - fn parse_providers_pattern() -> anyhow::Result<()> { - assert_eq!( - mk_providers("root", "package/path", "target", None), - ParsedPattern::parse_precise( - "//package/path:target", - CellName::testing_new("root"), - &resolver() - )? - ); - assert_eq!( - mk_providers("root", "package/path", "target", Some(&["java-output"])), - ParsedPattern::parse_precise( - "//package/path:target[java-output]", - CellName::testing_new("root"), - &resolver() - )? - ); - assert_eq!( - mk_providers( - "root", - "package/path", - "target", - Some(&["FDSIcon+FDSInternal.h"]), - ), - ParsedPattern::parse_precise( - "//package/path:target[FDSIcon+FDSInternal.h]", - CellName::testing_new("root"), - &resolver() - )? - ); - - let (package, target_name, providers) = ParsedPattern::parse_precise( - "//package/path:target#flavor", - CellName::testing_new("root"), - &resolver(), - )? - .as_literal("")?; - assert_eq!( - "root//package/path:target#flavor", - ProvidersPatternExtra::into_providers_label(providers, package, target_name.as_ref()) - .to_string(), - ); - Ok(()) - } - - #[test] - fn parse_providers_pattern_with_alias() -> anyhow::Result<()> { - let package = CellPath::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package").to_owned(), - ); - - let config = aliases(&[("foo", "cell1//foo/bar:target")]); - - assert_eq!( - mk_providers("cell1", "foo/bar", "target", Some(&["qux"])), - ParsedPattern::parse_relaxed(&config, package.as_ref(), "foo[qux]", &resolver(),)? - ); - - Ok(()) - } - - #[test] - fn test_parse_configured_providers_pattern() -> anyhow::Result<()> { - assert_eq!( - mk_configured_providers( - "root", - "package/path", - "target", - None, - ConfigurationPredicate::Any - ), - ParsedPattern::parse_precise( - "//package/path:target", - CellName::testing_new("root"), - &resolver() - )? - ); - assert_eq!( - mk_configured_providers( - "root", - "package/path", - "target", - None, - ConfigurationPredicate::Builtin(BuiltinPlatform::Unspecified) - ), - ParsedPattern::parse_precise( - "//package/path:target ()", - CellName::testing_new("root"), - &resolver() - )? - ); - assert_eq!( - mk_configured_providers( - "root", - "package/path", - "target", - Some(&["P"]), - ConfigurationPredicate::Bound( - BoundConfigurationLabel::new("".to_owned()).unwrap(), - None - ), - ), - ParsedPattern::parse_precise( - "//package/path:target[P] ()", - CellName::testing_new("root"), - &resolver() - )? - ); - assert_eq!( - mk_configured_providers( - "root", - "package/path", - "target", - Some(&["P"]), - ConfigurationPredicate::Bound( - BoundConfigurationLabel::new("".to_owned()).unwrap(), - Some(ConfigurationHash::from_str("0123456789abcdef").unwrap()), - ), - ), - ParsedPattern::parse_precise( - "//package/path:target[P] (#0123456789abcdef)", - CellName::testing_new("root"), - &resolver(), - )? - ); - Ok(()) - } - - #[test_case(PhantomData::< TargetPatternExtra >; "parsing TargetPattern")] - #[test_case(PhantomData::< ProvidersPatternExtra >; "parsing ProvidersPattern")] - #[test_case(PhantomData::< ConfiguredTargetPatternExtra >; "parsing ConfiguredTargetPatternExtra")] - #[test_case(PhantomData::< ConfiguredProvidersPatternExtra >; "parsing ConfiguredProvidersPatternExtra")] - fn parse_pattern_failure(_: PhantomData) { - fails( - ParsedPattern::::parse_precise("", CellName::testing_new("root"), &resolver()), - &[], - ); - fails( - ParsedPattern::::parse_precise( - "//package/path", - CellName::testing_new("root"), - &resolver(), - ), - &[], - ); - fails( - ParsedPattern::::parse_precise( - "//package...", - CellName::testing_new("root"), - &resolver(), - ), - &[], - ); - fails( - ParsedPattern::::parse_precise( - "package", - CellName::testing_new("root"), - &resolver(), - ), - &[], - ); - fails( - ParsedPattern::::parse_precise( - "bad_alias//package/path:", - CellName::testing_new("root"), - &resolver(), - ), - &[ - "bad_alias//package/path:", - "unknown cell alias: `bad_alias`.", - ], - ); - fails( - ParsedPattern::::parse_precise( - "//package/path/:target", - CellName::testing_new("root"), - &resolver(), - ), - &[], - ); - fails( - ParsedPattern::::parse_precise( - "//package/path/", - CellName::testing_new("root"), - &resolver(), - ), - &[], - ); - fails( - ParsedPattern::::parse_precise( - "$(exe my macro)", - CellName::testing_new("root"), - &resolver(), - ), - &[ - "$(exe my macro)", - "You may be trying to use a macro instead of a target pattern. Macro usage is invalid here", - ], - ); - } - - #[test] - fn bad_providers_label() { - fails( - ParsedPattern::::parse_precise( - "//package/path:target[unclosed", - CellName::testing_new("root"), - &resolver(), - ), - &[ - "//package/path:target[unclosed", - "target pattern with `[` must end with `]` to mark end of providers set label", - ], - ); - fails( - ParsedPattern::::parse_precise( - "//package/path:target[out]wrong", - CellName::testing_new("root"), - &resolver(), - ), - &[ - "//package/path:target[out]wrong", - "target pattern with `[` must end with `]` to mark end of providers set label", - ], - ); - fails( - ParsedPattern::::parse_precise( - "$(exe my macro)", - CellName::testing_new("root"), - &resolver(), - ), - &[ - "$(exe my macro)", - "You may be trying to use a macro instead of a target pattern. Macro usage is invalid here", - ], - ); - } - - #[test] - fn parsed_pattern_contains() -> anyhow::Result<()> { - let pkg1 = PackageLabel::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package/path"), - ); - let pkg2 = PackageLabel::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package"), - ); - let pkg3 = PackageLabel::new( - CellName::testing_new("root"), - CellRelativePath::unchecked_new("package2"), - ); - let pkg_in_different_cell = PackageLabel::new( - CellName::testing_new("cell1"), - CellRelativePath::unchecked_new("package/path"), - ); - - let target_in_pkg1 = TargetLabel::new(pkg1.dupe(), TargetNameRef::new("target")?); - let another_target_in_pkg1 = TargetLabel::new(pkg1, TargetNameRef::new("target2")?); - let target_in_pkg2 = TargetLabel::new(pkg2, TargetNameRef::new("target")?); - let target_in_pkg3 = TargetLabel::new(pkg3, TargetNameRef::new("target")?); - let target_in_different_cell = - TargetLabel::new(pkg_in_different_cell, TargetNameRef::new("target")?); - - // Testing ParsedPattern::Target - - let pattern = ParsedPattern::parse_precise( - "//package/path:target", - CellName::testing_new("root"), - &resolver(), - )?; - assert!(pattern.matches(&target_in_pkg1)); - assert!(!pattern.matches(&another_target_in_pkg1)); - assert!(!pattern.matches(&target_in_pkg2)); - assert!(!pattern.matches(&target_in_pkg3)); - assert!(!pattern.matches(&target_in_different_cell)); - - // Testing ParsedPattern::Package - - let pattern = ParsedPattern::parse_precise( - "//package/path:", - CellName::testing_new("root"), - &resolver(), - )?; - assert!(pattern.matches(&target_in_pkg1)); - assert!(pattern.matches(&another_target_in_pkg1)); - assert!(!pattern.matches(&target_in_pkg2)); - assert!(!pattern.matches(&target_in_pkg3)); - assert!(!pattern.matches(&target_in_different_cell)); - - let pattern = - ParsedPattern::parse_precise("//package:", CellName::testing_new("root"), &resolver())?; - assert!(!pattern.matches(&target_in_pkg1)); - assert!(!pattern.matches(&another_target_in_pkg1)); - assert!(pattern.matches(&target_in_pkg2)); - assert!(!pattern.matches(&target_in_pkg3)); - assert!(!pattern.matches(&target_in_different_cell)); - - // Testing ParsedPattern::Recursive - - let pattern = ParsedPattern::parse_precise( - "//package/path/...", - CellName::testing_new("root"), - &resolver(), - )?; - assert!(pattern.matches(&target_in_pkg1)); - assert!(pattern.matches(&another_target_in_pkg1)); - assert!(!pattern.matches(&target_in_pkg2)); - assert!(!pattern.matches(&target_in_pkg3)); - assert!(!pattern.matches(&target_in_different_cell)); - - let pattern = ParsedPattern::parse_precise( - "//package/...", - CellName::testing_new("root"), - &resolver(), - )?; - assert!(pattern.matches(&target_in_pkg1)); - assert!(pattern.matches(&another_target_in_pkg1)); - assert!(pattern.matches(&target_in_pkg2)); - assert!(!pattern.matches(&target_in_pkg3)); - assert!(!pattern.matches(&target_in_different_cell)); - - let pattern = - ParsedPattern::parse_precise("//...", CellName::testing_new("root"), &resolver())?; - assert!(pattern.matches(&target_in_pkg1)); - assert!(pattern.matches(&another_target_in_pkg1)); - assert!(pattern.matches(&target_in_pkg2)); - assert!(pattern.matches(&target_in_pkg3)); - assert!(!pattern.matches(&target_in_different_cell)); - - let pattern = - ParsedPattern::parse_precise("cell1//...", CellName::testing_new("root"), &resolver())?; - assert!(!pattern.matches(&target_in_pkg1)); - assert!(!pattern.matches(&another_target_in_pkg1)); - assert!(!pattern.matches(&target_in_pkg2)); - assert!(!pattern.matches(&target_in_pkg3)); - assert!(pattern.matches(&target_in_different_cell)); - - Ok(()) - } - - #[test] - fn test_parsed_pattern_display() { - assert_eq!( - "foo//bar:baz", - ParsedPattern::::testing_parse("foo//bar:baz").to_string() - ); - assert_eq!( - "foo//bar:", - ParsedPattern::::testing_parse("foo//bar:").to_string() - ); - assert_eq!( - "foo//bar/...", - ParsedPattern::::testing_parse("foo//bar/...").to_string() - ); - assert_eq!( - "foo//:", - ParsedPattern::::testing_parse("foo//:").to_string() - ); - assert_eq!( - "foo//...", - ParsedPattern::::testing_parse("foo//...").to_string() - ); - } - - #[test] - fn test_cross_cell_boundary() { - let cell_resolver = CellResolver::testing_with_names_and_paths(&[ - ( - CellName::testing_new("root"), - CellRootPathBuf::testing_new(""), - ), - ( - CellName::testing_new("cell1"), - CellRootPathBuf::testing_new("cell1"), - ), - ( - CellName::testing_new("cell2"), - CellRootPathBuf::testing_new("cell1/xx/cell2"), - ), - ]); - - let err = ParsedPattern::::parse_precise( - "root//cell1/xx/cell2/yy/...", - CellName::testing_new("root"), - &cell_resolver, - ) - .unwrap_err(); - let err = format!("{:?}", err); - assert!( - err.contains("Pattern `root//cell1/xx/cell2/yy/...` is parsed as `root//cell1/xx/cell2/yy/...` which crosses cell boundaries. Try `cell2//yy/...`"), - "Error is: {}", - err); - } -} diff --git a/app/buck2_core/src/pattern/package.rs b/app/buck2_core/src/pattern/package.rs new file mode 100644 index 0000000000000..6517a5c74cc2c --- /dev/null +++ b/app/buck2_core/src/pattern/package.rs @@ -0,0 +1,47 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; + +use crate::cells::cell_path::CellPath; +use crate::package::PackageLabel; + +#[derive(Clone, Debug, Eq, PartialEq, Allocative)] +pub enum PackagePattern { + Package(PackageLabel), + Recursive(CellPath), +} + +impl PackagePattern { + pub fn matches(&self, package: PackageLabel) -> bool { + match self { + PackagePattern::Package(pattern) => pattern == &package, + PackagePattern::Recursive(cell_path) => { + package.as_cell_path().starts_with(cell_path.as_ref()) + } + } + } +} + +#[derive(Clone, Debug, Eq, PartialEq, Allocative)] +pub enum PackagePredicate { + Any, + AnyOf(Vec), +} + +impl PackagePredicate { + pub fn matches(&self, package: PackageLabel) -> bool { + match self { + PackagePredicate::Any => true, + PackagePredicate::AnyOf(patterns) => { + patterns.iter().any(|pattern| pattern.matches(package)) + } + } + } +} diff --git a/app/buck2_core/src/pattern/parse_package.rs b/app/buck2_core/src/pattern/parse_package.rs index 5999124046ce6..a8176ff7c8fd2 100644 --- a/app/buck2_core/src/pattern/parse_package.rs +++ b/app/buck2_core/src/pattern/parse_package.rs @@ -14,7 +14,7 @@ use crate::cells::CellAliasResolver; use crate::fs::paths::forward_rel_path::ForwardRelativePath; use crate::package::PackageLabel; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ParsePackageError { #[error("Package should contain `//`: `{0}`")] NoSlashSlash(String), diff --git a/app/buck2_core/src/pattern/pattern.rs b/app/buck2_core/src/pattern/pattern.rs new file mode 100644 index 0000000000000..ef33b2a3ee6e6 --- /dev/null +++ b/app/buck2_core/src/pattern/pattern.rs @@ -0,0 +1,2091 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; +use std::fmt::Display; + +use allocative::Allocative; +use anyhow::Context; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use dupe::Dupe; +use once_cell::sync::Lazy; +use regex::Regex; + +use crate::cells::alias::CellAlias; +use crate::cells::cell_path::CellPath; +use crate::cells::cell_path::CellPathCow; +use crate::cells::cell_path::CellPathRef; +use crate::cells::cell_root_path::CellRootPathBuf; +use crate::cells::name::CellName; +use crate::cells::paths::CellRelativePath; +use crate::cells::CellAliasResolver; +use crate::cells::CellResolver; +use crate::configuration::bound_label::BoundConfigurationLabel; +use crate::configuration::builtin::BuiltinPlatform; +use crate::configuration::hash::ConfigurationHash; +use crate::fs::paths::forward_rel_path::ForwardRelativePath; +use crate::package::PackageLabel; +use crate::pattern::ascii_pattern::split1_opt_ascii; +use crate::pattern::ascii_pattern::strip_suffix_ascii; +use crate::pattern::ascii_pattern::trim_prefix_ascii; +use crate::pattern::ascii_pattern::AsciiChar; +use crate::pattern::ascii_pattern::AsciiStr; +use crate::pattern::ascii_pattern::AsciiStr2; +use crate::pattern::package::PackagePattern; +use crate::pattern::pattern_type::ConfigurationPredicate; +use crate::pattern::pattern_type::ConfiguredProvidersPatternExtra; +use crate::pattern::pattern_type::PatternType; +use crate::pattern::pattern_type::ProvidersPatternExtra; +use crate::pattern::pattern_type::TargetPatternExtra; +use crate::provider::flavors::map_flavors; +use crate::provider::label::NonDefaultProvidersName; +use crate::provider::label::ProviderName; +use crate::provider::label::ProvidersLabel; +use crate::provider::label::ProvidersName; +use crate::target::label::label::TargetLabel; +use crate::target::name::TargetName; +use crate::target::name::TargetNameRef; +use crate::target_aliases::TargetAliasResolver; + +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] +enum TargetPatternParseError { + #[error("Expected a `:`, a trailing `/...` or the literal `...`.")] + UnexpectedFormat, + #[error("Package is empty")] + PackageIsEmpty, + #[error("Must be absolute, with a `//` or no package just `:`.")] + AbsoluteRequired, + #[error( + "Packages may not end with a trailing `/` (except when provided on the command line where it's tolerated)" + )] + PackageTrailingSlash, + #[error("Required a target literal, but got a non-literal pattern `{0}`")] + TargetLiteralRequired(String), + #[error( + "You may be trying to use a macro instead of a target pattern. Macro usage is invalid here" + )] + PossibleMacroUsage, + #[error("Expecting {0} pattern, got: `{1}`")] + ExpectingPatternOfType(&'static str, String), + #[error("Configuration part of the pattern must be enclosed in `()`")] + ConfigurationPartMustBeEnclosedInParentheses, + #[error("Pattern `{0}` is parsed as `{1}` which crosses cell boundaries. Try `{2}` instead")] + PatternCrossesCellBoundaries(String, String, String), +} + +pub fn display_precise_pattern<'a, T: PatternType>( + package: &'a PackageLabel, + target_name: &'a TargetNameRef, + extra: &'a T, +) -> impl Display + 'a { + #[derive(derive_more::Display)] + #[display("{}:{}{}", package, target_name, extra)] + struct Impl<'a, T: PatternType> { + package: &'a PackageLabel, + target_name: &'a TargetNameRef, + extra: &'a T, + } + Impl { + package, + target_name, + extra, + } +} + +/// Extract provider name from a target pattern. +pub(crate) fn split_providers_name(s: &str) -> anyhow::Result<(&str, ProvidersName)> { + if let Some((t, flavors)) = split1_opt_ascii(s, AsciiChar::new('#')) { + let name = map_flavors(flavors, s)?; + Ok((t, name)) + } else if let Some((t, p)) = split1_opt_ascii(s, AsciiChar::new('[')) { + let mut names = Vec::new(); + + let mut remaining = if let Some((p, r)) = split1_opt_ascii(p, AsciiChar::new(']')) { + names.push(ProviderName::new(p.to_owned())?); + r + } else { + return Err(anyhow::anyhow!( + "target pattern with `[` must end with `]` to mark end of providers set label" + )); + }; + + while !remaining.is_empty() { + if let Some(("", r)) = split1_opt_ascii(remaining, AsciiChar::new('[')) { + if let Some((p, r)) = split1_opt_ascii(r, AsciiChar::new(']')) { + names.push(ProviderName::new(p.to_owned())?); + remaining = r; + continue; + } + } + return Err(anyhow::anyhow!( + "target pattern with `[` must end with `]` to mark end of providers set label" + )); + } + + Ok(( + t, + ProvidersName::NonDefault(triomphe::Arc::new(NonDefaultProvidersName::Named( + buck2_util::arc_str::ArcSlice::from_iter(names), + ))), + )) + } else { + Ok((s, ProvidersName::Default)) + } +} + +/// All possible labels. +/// - target label +/// - configured target label +/// - providers label +/// - configured providers label +pub struct TargetLabelWithExtra { + pub target_label: TargetLabel, + pub extra: T, +} + +impl TargetLabelWithExtra { + pub fn into_target_label(self) -> TargetLabel { + self.target_label + } +} + +impl TargetLabelWithExtra { + pub fn into_providers_label(self) -> ProvidersLabel { + ProvidersLabel::new(self.target_label, self.extra.providers) + } +} + +/// A parsed target pattern. +#[derive(Clone, Debug, Hash, Eq, PartialEq, Allocative)] +pub enum ParsedPattern { + /// A target pattern that matches a explicit target pattern type T. See + /// `PatternType` for pattern + Target(PackageLabel, TargetName, T), + /// A target pattern that matches an entire package. Ex. `//some/package:` + Package(PackageLabel), + /// A target pattern that matches all recursive packages. Ex. + /// `//some/package/...`. The path component here is not required to be + /// an actual package (i.e. a build file is not required at the path) + /// and so we don't hold this as a [PackageLabel]. + Recursive(CellPath), +} + +impl ParsedPattern { + /// Extract [`TargetLabel`] from a [`ParsedPattern`]. + pub fn as_target_label(self, original: &str) -> anyhow::Result { + let (target_label, TargetPatternExtra) = self.as_literal(original)?; + Ok(target_label) + } + + /// Check if a [`ParsedPattern`] matches a [`TargetLabel`] + pub fn matches(&self, target: &TargetLabel) -> bool { + let target_pkg = target.pkg(); + match self { + ParsedPattern::Target(pkg, t, TargetPatternExtra) => { + *pkg == target_pkg && t.as_ref() == target.name() + } + ParsedPattern::Package(pkg) => target_pkg.as_cell_path() == pkg.as_cell_path(), + ParsedPattern::Recursive(cell_path) => { + target_pkg.as_cell_path().starts_with(cell_path.as_ref()) + } + } + } +} + +impl ParsedPattern { + /// Extract [`ProvidersLabel`] from a [`ParsedPattern`]. + pub fn as_providers_label(self, original: &str) -> anyhow::Result { + let (target_label, ProvidersPatternExtra { providers }) = self.as_literal(original)?; + Ok(ProvidersLabel::new(target_label, providers)) + } +} + +impl ParsedPattern { + pub(crate) fn cell_path(&self) -> CellPathRef { + match self { + ParsedPattern::Target(pkg, _, _) => pkg.as_cell_path(), + ParsedPattern::Package(pkg) => pkg.as_cell_path(), + ParsedPattern::Recursive(cell_path) => cell_path.as_ref(), + } + } + + pub fn try_map( + self, + f: impl FnOnce(T) -> anyhow::Result, + ) -> anyhow::Result> { + match self { + ParsedPattern::Target(package, target_name, val) => { + Ok(ParsedPattern::Target(package, target_name, f(val)?)) + } + ParsedPattern::Package(package) => Ok(ParsedPattern::Package(package)), + ParsedPattern::Recursive(cell_path) => Ok(ParsedPattern::Recursive(cell_path)), + } + } + + pub fn map(self, f: impl FnOnce(T) -> U) -> ParsedPattern { + match self { + ParsedPattern::Target(package, target_name, val) => { + ParsedPattern::Target(package, target_name, f(val)) + } + ParsedPattern::Package(package) => ParsedPattern::Package(package), + ParsedPattern::Recursive(cell_path) => ParsedPattern::Recursive(cell_path), + } + } + + pub fn into_package_pattern_ignore_target(self) -> PackagePattern { + match self { + ParsedPattern::Target(p, ..) | ParsedPattern::Package(p) => PackagePattern::Package(p), + ParsedPattern::Recursive(p) => PackagePattern::Recursive(p), + } + } + + /// Extract a literal from a [ParsedPattern], or `Err` if it is not a literal. + pub fn as_literal(self, original: &str) -> anyhow::Result<(TargetLabel, T)> { + // FIXME: Would be better if we had a Display on self, so we could produce a nice error message. + // For now, just require the original string to be passed in for good errors. + match self { + ParsedPattern::Target(package, target_name, val) => { + Ok((TargetLabel::new(package, target_name.as_ref()), val)) + } + _ => Err(TargetPatternParseError::TargetLiteralRequired(original.to_owned()).into()), + } + } + + /// Parse a TargetPattern, but where there there is no relative directory. + pub fn parse_precise( + pattern: &str, + cell: CellName, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + ) -> anyhow::Result { + parse_target_pattern( + cell, + cell_resolver, + cell_alias_resolver, + None, + TargetParsingOptions::precise(), + pattern, + ) + .with_context(|| { + format!( + "Invalid absolute target pattern `{}` is not allowed", + pattern + ) + }) + } + + pub fn parsed_opt_absolute( + pattern: &str, + relative_dir: Option, + cell: CellName, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + ) -> anyhow::Result { + parse_target_pattern( + cell, + cell_resolver, + cell_alias_resolver, + None, + TargetParsingOptions { + relative: TargetParsingRel::RequireAbsolute(relative_dir), + infer_target: false, + strip_package_trailing_slash: false, + }, + pattern, + ) + .with_context(|| { + format!( + "Invalid absolute target pattern `{}` is not allowed", + pattern + ) + }) + } + + /// Parse a TargetPattern out, resolving aliases via `cell_resolver`, and resolving relative + /// targets via `enclosing_package`, if provided. + /// Allows everything from `parse_absolute`, plus relative patterns. + pub fn parse_relative( + target_alias_resolver: &dyn TargetAliasResolver, + relative_dir: CellPathRef, + pattern: &str, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + ) -> anyhow::Result { + parse_target_pattern( + relative_dir.cell(), + cell_resolver, + cell_alias_resolver, + Some(target_alias_resolver), + TargetParsingOptions { + relative: TargetParsingRel::AllowRelative(relative_dir), + infer_target: false, + strip_package_trailing_slash: false, + }, + pattern, + ) + .with_context(|| { + format!( + "Invalid relative target pattern `{}` is not allowed", + pattern + ) + }) + } + + /// Parse a TargetPattern out, resolving aliases via `cell_resolver`, resolving relative + /// targets via `relative_dir`, inferring a target name if no target or recursive pattern + /// is provided (e.g. `//foo/bar` is inferred to be equivalent to `//foo/bar:bar`), and + /// stripping trailing `/` in package names instead of rejecting them. + /// + /// This should only be used with user-provided command line arguments, as precision is + /// generally preferred elsewhere. + pub fn parse_relaxed( + target_alias_resolver: &dyn TargetAliasResolver, + relative_dir: CellPathRef, + pattern: &str, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + ) -> anyhow::Result { + parse_target_pattern( + relative_dir.cell(), + cell_resolver, + cell_alias_resolver, + Some(target_alias_resolver), + TargetParsingOptions { + relative: TargetParsingRel::AllowRelative(relative_dir), + infer_target: true, + strip_package_trailing_slash: true, + }, + pattern, + ) + .with_context(|| format!("Parsing target pattern `{}`", pattern)) + } + + pub fn testing_parse(pattern: &str) -> Self { + let cell_name = pattern.split_once("//").unwrap().0; + let cell_name = CellName::testing_new(cell_name); + let cell_resolver = + CellResolver::testing_with_name_and_path(cell_name, CellRootPathBuf::testing_new("")); + Self::parse_precise( + pattern, + cell_name, + &cell_resolver, + cell_resolver.root_cell_cell_alias_resolver(), + ) + .unwrap() + } +} + +impl Display for ParsedPattern { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ParsedPattern::Target(package, target_name, pattern) => { + write!( + f, + "{}", + display_precise_pattern(package, target_name.as_ref(), pattern) + ) + } + ParsedPattern::Package(package) => { + write!(f, "{}:", package.as_cell_path()) + } + ParsedPattern::Recursive(path) => { + if path.path().is_empty() { + write!(f, "{}...", path) + } else { + write!(f, "{}/...", path) + } + } + } + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Allocative)] +pub enum ParsedPatternPredicate { + Any, + AnyOf(Vec>), +} + +impl ParsedPatternPredicate { + pub fn matches(&self, target: &TargetLabel) -> bool { + match self { + ParsedPatternPredicate::Any => true, + ParsedPatternPredicate::AnyOf(patterns) => { + patterns.iter().any(|pattern| pattern.matches(target)) + } + } + } +} + +#[derive(Debug)] +pub struct PatternParts<'a, T: PatternType> { + /// Is there a `foo//` or `//` part. + pub cell_alias: Option<&'a str>, + pub pattern: PatternDataOrAmbiguous<'a, T>, +} + +impl<'a, T: PatternType> PatternParts<'a, T> { + fn try_map anyhow::Result>( + self, + f: F, + ) -> anyhow::Result> { + let PatternParts { + cell_alias, + pattern, + } = self; + Ok(PatternParts { + cell_alias, + pattern: pattern.try_map(f)?, + }) + } +} + +#[derive(Debug, derive_more::From)] +pub enum PatternDataOrAmbiguous<'a, T: PatternType> { + /// We successfully extracted PatternData. + PatternData(PatternData<'a, T>), + + /// This pattern looks like `foo/bar`, `foo/bar/` or `foo`. It could be a package + target if + /// we allow inference (i.e. expanding `foo/bar:bar`). + Ambiguous { + /// The pattern. If we allow inference this will become the package. + pattern: &'a str, + /// Whether we should strip trailing slashes out of this pattern before doing inference + /// (rather than throwing an error). + strip_package_trailing_slash: bool, + extra: T, + }, +} + +impl<'a, T: PatternType> PatternDataOrAmbiguous<'a, T> { + fn try_map( + self, + f: impl FnOnce(T) -> anyhow::Result, + ) -> anyhow::Result> { + match self { + PatternDataOrAmbiguous::PatternData(d) => { + Ok(PatternDataOrAmbiguous::PatternData(d.try_map(f)?)) + } + PatternDataOrAmbiguous::Ambiguous { + pattern, + strip_package_trailing_slash, + extra, + } => Ok(PatternDataOrAmbiguous::Ambiguous { + pattern, + strip_package_trailing_slash, + extra: f(extra)?, + }), + } + } +} + +impl<'a, T> PatternDataOrAmbiguous<'a, T> +where + T: PatternType, +{ + /// If the pattern is ambiguous, try to infer a target. This would convert `foo/bar` into + /// `foo/bar:bar`. + pub fn infer_target(self) -> anyhow::Result> { + match self { + Self::PatternData(d) => Ok(d), + Self::Ambiguous { + pattern, + strip_package_trailing_slash, + extra, + } => { + let package = normalize_package(pattern, strip_package_trailing_slash)?; + + let target = package + .file_name() + .context(TargetPatternParseError::PackageIsEmpty)?; + + let target_name = TargetName::new(target.as_ref())?; + + Ok(PatternData::TargetInPackage { + package, + target_name, + extra, + }) + } + } + } + + /// If the pattern is ambiguous, error out. + pub fn reject_ambiguity(self) -> anyhow::Result> { + match self { + Self::PatternData(d) => Ok(d), + Self::Ambiguous { pattern, .. } => { + // Check if the user maybe tried to use a macro + if pattern.contains('$') + && pattern.contains(' ') + && pattern.contains('(') + && pattern.contains(')') + { + return Err(TargetPatternParseError::PossibleMacroUsage.into()); + } + Err(TargetPatternParseError::UnexpectedFormat.into()) + } + } + } +} + +/// The pattern data we extracted. +#[derive(Debug)] +pub enum PatternData<'a, T: PatternType> { + /// A pattern like `foo/bar/...`. + Recursive { package: &'a ForwardRelativePath }, + + /// A pattern like `foo/bar:`, or `:` + AllTargetsInPackage { package: &'a ForwardRelativePath }, + + /// A pattern like `foo/bar:qux`, or `:qux`. The target will never be empty. + TargetInPackage { + package: &'a ForwardRelativePath, + target_name: TargetName, + extra: T, + }, +} + +impl<'a, T: PatternType> PatternData<'a, T> { + fn try_map( + self, + f: impl FnOnce(T) -> anyhow::Result, + ) -> anyhow::Result> { + match self { + PatternData::Recursive { package } => Ok(PatternData::Recursive { package }), + PatternData::AllTargetsInPackage { package } => { + Ok(PatternData::AllTargetsInPackage { package }) + } + PatternData::TargetInPackage { + package, + target_name, + extra, + } => Ok(PatternData::TargetInPackage { + package, + target_name, + extra: f(extra)?, + }), + } + } + + pub fn package_path(&self) -> &'a ForwardRelativePath { + match self { + Self::Recursive { package } => package, + Self::AllTargetsInPackage { package } => package, + Self::TargetInPackage { package, .. } => package, + } + } + + pub fn target(&self) -> Option<(&TargetName, &T)> { + match self { + Self::Recursive { .. } => None, + Self::AllTargetsInPackage { .. } => None, + Self::TargetInPackage { + target_name, extra, .. + } => Some((target_name, extra)), + } + } + + /// Whether this is a target that looks like `:target`. + pub fn is_adjacent_target(&self) -> bool { + self.package_path().is_empty() && self.target().is_some() + } +} + +// Splits a pattern into cell alias and forward relative path if "//" is present, otherwise returns None, +pub fn maybe_split_cell_alias_and_relative_path<'a>( + pattern: &'a str, +) -> anyhow::Result> { + Ok(match split1_opt_ascii(pattern, AsciiStr2::new("//")) { + Some((a, p)) => Some(( + CellAlias::new(trim_prefix_ascii(a, AsciiChar::new('@')).to_owned()), + ForwardRelativePath::new(p)?, + )), + None => None, + }) +} + +fn lex_provider_pattern<'a>( + pattern: &'a str, + strip_package_trailing_slash: bool, +) -> anyhow::Result> { + let (cell_alias, pattern) = match split1_opt_ascii(pattern, AsciiStr2::new("//")) { + Some((a, p)) => (Some(trim_prefix_ascii(a, AsciiChar::new('@'))), p), + None => (None, pattern), + }; + + let pattern = match split1_opt_ascii(pattern, AsciiChar::new(':')) { + Some((package, "")) => PatternData::AllTargetsInPackage { + package: normalize_package(package, strip_package_trailing_slash)?, + } + .into(), + Some((package, target)) => { + let (target, providers) = split_providers_name(target)?; + let target_name = TargetName::new(target)?; + let extra = ProvidersPatternExtra { providers }; + PatternData::TargetInPackage { + package: normalize_package(package, strip_package_trailing_slash)?, + target_name, + extra, + } + .into() + } + None => { + if let Some(package) = strip_suffix_ascii(pattern, AsciiStr::new("/...")) { + PatternData::Recursive { + package: ForwardRelativePath::new(package)?, + } + .into() + } else if pattern == "..." { + PatternData::Recursive { + package: ForwardRelativePath::new("")?, + } + .into() + } else if !pattern.is_empty() { + let (pattern, providers) = split_providers_name(pattern)?; + PatternDataOrAmbiguous::Ambiguous { + pattern, + strip_package_trailing_slash, + extra: ProvidersPatternExtra { providers }, + } + } else { + return Err(TargetPatternParseError::UnexpectedFormat.into()); + } + } + }; + + Ok(PatternParts { + cell_alias, + pattern, + }) +} + +fn lex_configuration_predicate(pattern: &str) -> anyhow::Result { + let pattern = pattern + .strip_prefix('(') + .context(TargetPatternParseError::ConfigurationPartMustBeEnclosedInParentheses)?; + let pattern = pattern + .strip_suffix(')') + .context(TargetPatternParseError::ConfigurationPartMustBeEnclosedInParentheses)?; + match pattern.split_once('#') { + Some((cfg, hash)) => { + let cfg = BoundConfigurationLabel::new(cfg.to_owned())?; + let hash = ConfigurationHash::from_str(hash)?; + Ok(ConfigurationPredicate::Bound(cfg, Some(hash))) + } + None => { + if let Some(builtin) = BuiltinPlatform::from_label(pattern) { + Ok(ConfigurationPredicate::Builtin(builtin)) + } else { + Ok(ConfigurationPredicate::Bound( + BoundConfigurationLabel::new(pattern.to_owned())?, + None, + )) + } + } + } +} + +/// Split target pattern and configuration preserving parentheses for better diagnostics. +fn split_cfg(s: &str) -> Option<(&str, &str)> { + // Fast path. + if !s.contains(' ') { + return None; + } + + let mut braces: u32 = 0; + for (i, c) in s.char_indices() { + match c { + '(' => braces += 1, + ')' => match braces.checked_sub(1) { + Some(b) => braces = b, + None => { + // Pattern is invalid, let parser fail elsewhere. + return None; + } + }, + ' ' if braces == 0 => return Some((&s[..i], &s[i + 1..])), + _ => {} + } + } + None +} + +pub fn lex_configured_providers_pattern<'a>( + pattern: &'a str, + strip_package_trailing_slash: bool, +) -> anyhow::Result> { + let (provider_pattern, cfg) = match split_cfg(pattern) { + Some((providers, cfg)) => { + let provider_pattern = lex_provider_pattern(providers, strip_package_trailing_slash)?; + let cfg = lex_configuration_predicate(cfg)?; + (provider_pattern, cfg) + } + None => ( + lex_provider_pattern(pattern, strip_package_trailing_slash)?, + ConfigurationPredicate::Any, + ), + }; + provider_pattern.try_map(|ProvidersPatternExtra { providers }| { + Ok(ConfiguredProvidersPatternExtra { providers, cfg }) + }) +} + +// Lex the target pattern into the relevant pieces. +pub fn lex_target_pattern<'a, T: PatternType>( + pattern: &'a str, + strip_package_trailing_slash: bool, +) -> anyhow::Result> { + let provider_pattern = lex_configured_providers_pattern(pattern, strip_package_trailing_slash)?; + provider_pattern + .try_map(|extra| T::from_configured_providers(extra)) + .with_context(|| { + // This can only fail when `PatternType = TargetName`, so the message is correct. + TargetPatternParseError::ExpectingPatternOfType(T::NAME, pattern.to_owned()) + }) +} + +fn normalize_package<'a>( + package: &'a str, + strip_package_trailing_slash: bool, +) -> anyhow::Result<&'a ForwardRelativePath> { + // Strip or reject trailing `/`, such as in `foo/:bar`. + if let Some(stripped) = strip_suffix_ascii(package, AsciiChar::new('/')) { + if strip_package_trailing_slash { + return ForwardRelativePath::new(stripped); + } else { + return Err(anyhow::Error::from( + TargetPatternParseError::PackageTrailingSlash, + )); + } + } + + ForwardRelativePath::new(package) +} + +#[derive(Clone, Dupe)] +enum TargetParsingRel<'a> { + /// The dir this pattern should be interpreted relative to. + AllowRelative(CellPathRef<'a>), + /// The dir this pattern should be interpreted relative to. + /// This is only used for targets such as `:foo`. + RequireAbsolute(Option>), +} + +impl<'a> TargetParsingRel<'a> { + fn dir(&self) -> Option> { + match self { + TargetParsingRel::AllowRelative(dir) => Some(*dir), + TargetParsingRel::RequireAbsolute(dir) => *dir, + } + } + + fn allow_relative(&self) -> bool { + match self { + TargetParsingRel::AllowRelative(_) => true, + TargetParsingRel::RequireAbsolute(_) => false, + } + } +} + +#[derive(Clone, Dupe)] +struct TargetParsingOptions<'a> { + relative: TargetParsingRel<'a>, + /// Whether to infer the target in a pattern such as `foo/bar` (to `foo/bar:bar`). + infer_target: bool, + /// Whether to strip trailing slashes in package names, in e.g. `foo/bar/` or `foo/bar/:qux`. + /// If not set, trailing slashes are an error. Note that this happens before target inference + /// (if enabled), so e.g. `foo/bar/` becomes `foo/bar:bar`. + strip_package_trailing_slash: bool, +} + +impl<'a> TargetParsingOptions<'a> { + fn precise() -> TargetParsingOptions<'a> { + TargetParsingOptions { + relative: TargetParsingRel::RequireAbsolute(None), + infer_target: false, + strip_package_trailing_slash: false, + } + } +} + +/// Parse a TargetPattern out, resolving aliases via `cell_resolver`, and resolving relative +/// targets via `enclosing_package`, if provided. +fn parse_target_pattern( + cell_name: CellName, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + target_alias_resolver: Option<&dyn TargetAliasResolver>, + opts: TargetParsingOptions, + pattern: &str, +) -> anyhow::Result> +where + T: PatternType, +{ + let res: anyhow::Result<_> = try { + let parsed_pattern = parse_target_pattern_no_validate::( + cell_name, + cell_resolver, + cell_alias_resolver, + target_alias_resolver, + opts, + pattern, + )?; + + let crossed_path = + cell_resolver.resolve_path_crossing_cell_boundaries(parsed_pattern.cell_path())?; + if crossed_path != parsed_pattern.cell_path() { + let new_pattern = match &parsed_pattern { + ParsedPattern::Target(_, target_name, extra) => ParsedPattern::Target( + PackageLabel::from_cell_path(crossed_path), + target_name.dupe(), + extra.clone(), + ), + ParsedPattern::Package(_) => { + ParsedPattern::Package(PackageLabel::from_cell_path(crossed_path)) + } + ParsedPattern::Recursive(_) => ParsedPattern::Recursive(crossed_path.to_owned()), + }; + + soft_error!( + "pattern_crosses_cell_boundary", + TargetPatternParseError::PatternCrossesCellBoundaries( + pattern.to_owned(), + parsed_pattern.to_string(), + new_pattern.to_string(), + ) + .into() + )?; + } + + parsed_pattern + }; + + res.input_anyhow() +} + +fn parse_target_pattern_no_validate( + cell_name: CellName, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + target_alias_resolver: Option<&dyn TargetAliasResolver>, + opts: TargetParsingOptions, + pattern: &str, +) -> anyhow::Result> +where + T: PatternType, +{ + let TargetParsingOptions { + relative, + infer_target, + strip_package_trailing_slash, + } = opts; + + if let Some(dir) = relative.dir() { + if dir.cell() != cell_name { + return Err(internal_error_anyhow!( + "Cell resolver cell `{cell_name}` does not match the given relative dir `{dir}`" + )); + } + } + + let lex = lex_target_pattern(pattern, strip_package_trailing_slash)?; + + if let Some(target_alias_resolver) = target_alias_resolver { + if let Some(aliased) = resolve_target_alias( + cell_name, + cell_resolver, + cell_alias_resolver, + target_alias_resolver, + &lex, + )? { + return Ok(aliased); + } + } + + let PatternParts { + cell_alias, + pattern, + } = lex; + + let pattern = if infer_target { + pattern.infer_target()? + } else { + pattern.reject_ambiguity()? + }; + + // This allows things of the form `//foo` (having a cell alias) or `:bar` (no cell, no package, + // just relative target). This is a bit of a wonky definition of "is_absolute" but we rely on + // it. + let is_absolute_or_adjacent = cell_alias.is_some() || pattern.is_adjacent_target(); + if !relative.allow_relative() && !is_absolute_or_adjacent { + return Err(TargetPatternParseError::AbsoluteRequired.into()); + } + + // Prohibit parsing `:foo` as `root//:foo`. + if relative.dir().is_none() && cell_alias.is_none() { + return Err(TargetPatternParseError::AbsoluteRequired.into()); + } + + // We ask for the cell, but if the pattern is relative we might not use it + let cell = cell_alias_resolver.resolve(cell_alias.unwrap_or_default())?; + + let package_path = pattern.package_path(); + + let path = match relative.dir() { + Some(rel) + if cell_alias.is_none() && (relative.allow_relative() || package_path.is_empty()) => + { + CellPathCow::Owned(rel.join(package_path)) + } + _ => CellPathCow::Borrowed(CellPathRef::new(cell, CellRelativePath::new(package_path))), + }; + + match pattern { + PatternData::Recursive { .. } => Ok(ParsedPattern::Recursive(path.into_owned())), + PatternData::AllTargetsInPackage { .. } => Ok(ParsedPattern::Package( + PackageLabel::from_cell_path(path.as_ref()), + )), + PatternData::TargetInPackage { + target_name, extra, .. + } => Ok(ParsedPattern::Target( + PackageLabel::from_cell_path(path.as_ref()), + target_name, + extra, + )), + } +} + +#[derive(buck2_error::Error, Debug)] +enum ResolveTargetAliasError { + #[error("Error dereferencing alias `{}` -> `{}`", target, alias)] + ErrorDereferencing { target: String, alias: String }, + + #[error("Invalid alias: `{}`", alias)] + InvalidAlias { alias: String }, + + #[error("Alias for `{}` is not a target: `{}`", target, alias)] + AliasIsNotATarget { target: String, alias: String }, +} + +fn resolve_target_alias( + cell_name: CellName, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + target_alias_resolver: &dyn TargetAliasResolver, + lex: &PatternParts, +) -> anyhow::Result>> +where + T: PatternType, +{ + // Imported from Buck1 + static ALIAS_REGEX: Lazy = + Lazy::new(|| Regex::new("^[a-zA-Z_-][a-zA-Z0-9_-]*$").unwrap()); + + // If the input starts with a cell path, it can't be an alias. + if lex.cell_alias.is_some() { + return Ok(None); + } + + // Unless the input is a standalone bit of ambiguous text then it cannot be an alias. + let (target, extra) = match &lex.pattern { + PatternDataOrAmbiguous::Ambiguous { pattern, extra, .. } => (*pattern, extra), + _ => return Ok(None), + }; + + // Check if this is an alias after all. + let alias = match target_alias_resolver.get(target)? { + Some(alias) => alias, + None => return Ok(None), + }; + + // Now that we know it's an alias, check it matches the regex. We only do this once we know the + // alias is valid so that we avoid throwing "alias is invalid" if the user didn't mean to use + // an alias. + if !ALIAS_REGEX.is_match(target) { + return Err(ResolveTargetAliasError::InvalidAlias { + alias: alias.to_owned(), + } + .into()); + } + + // We found a matching alias. Parse the alias as a target. + let res = parse_target_pattern::( + cell_name, + cell_resolver, + cell_alias_resolver, + None, + TargetParsingOptions::precise(), + alias, + ) + .with_context(|| ResolveTargetAliasError::ErrorDereferencing { + target: target.to_owned(), + alias: alias.to_owned(), + })?; + + // And finally, put the `T` we were looking for back together. + let res = match res { + ParsedPattern::Target(package, target_name, TargetPatternExtra) => { + ParsedPattern::Target(package, target_name, extra.clone()) + } + _ => { + return Err(ResolveTargetAliasError::AliasIsNotATarget { + target: target.to_owned(), + alias: alias.to_owned(), + } + .into()); + } + }; + + Ok(Some(res)) +} + +#[derive(Debug, Eq, PartialEq)] +pub enum PackageSpec { + /// Given targets in a package. + Targets(Vec<(TargetName, T)>), + /// All targets in a package, without subpackages. + /// Syntax for this variant is `foo:`. + All, +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::marker::PhantomData; + + use assert_matches::assert_matches; + use dupe::Dupe; + use gazebo::prelude::*; + use test_case::test_case; + + use crate::cells::alias::NonEmptyCellAlias; + use crate::cells::cell_path::CellPath; + use crate::cells::cell_root_path::CellRootPathBuf; + use crate::cells::name::CellName; + use crate::cells::paths::CellRelativePath; + use crate::cells::paths::CellRelativePathBuf; + use crate::cells::CellAliasResolver; + use crate::cells::CellResolver; + use crate::configuration::bound_label::BoundConfigurationLabel; + use crate::configuration::builtin::BuiltinPlatform; + use crate::configuration::hash::ConfigurationHash; + use crate::package::PackageLabel; + use crate::pattern::pattern::ParsedPattern; + use crate::pattern::pattern::TargetPatternParseError; + use crate::pattern::pattern_type::ConfigurationPredicate; + use crate::pattern::pattern_type::ConfiguredProvidersPatternExtra; + use crate::pattern::pattern_type::ConfiguredTargetPatternExtra; + use crate::pattern::pattern_type::PatternType; + use crate::pattern::pattern_type::ProvidersPatternExtra; + use crate::pattern::pattern_type::TargetPatternExtra; + use crate::provider::label::NonDefaultProvidersName; + use crate::provider::label::ProviderName; + use crate::provider::label::ProvidersName; + use crate::target::label::label::TargetLabel; + use crate::target::name::TargetName; + use crate::target::name::TargetNameRef; + use crate::target_aliases::TargetAliasResolver; + + fn mk_package(cell: &str, path: &str) -> ParsedPattern

{ + ParsedPattern::Package(PackageLabel::testing_new(cell, path)) + } + + fn mk_recursive(cell: &str, path: &str) -> ParsedPattern

{ + ParsedPattern::Recursive(CellPath::new( + CellName::testing_new(cell), + CellRelativePathBuf::unchecked_new(path.to_owned()), + )) + } + + fn mk_target(cell: &str, path: &str, target: &str) -> ParsedPattern { + ParsedPattern::Target( + PackageLabel::testing_new(cell, path), + TargetName::testing_new(target), + TargetPatternExtra, + ) + } + + fn mk_providers( + cell: &str, + path: &str, + target: &str, + providers: Option<&[&str]>, + ) -> ParsedPattern { + ParsedPattern::Target( + PackageLabel::testing_new(cell, path), + TargetName::testing_new(target), + ProvidersPatternExtra { + providers: providers.map_or(ProvidersName::Default, |n| { + ProvidersName::NonDefault(triomphe::Arc::new(NonDefaultProvidersName::Named( + buck2_util::arc_str::ArcSlice::from_iter( + n.map(|s| ProviderName::new((*s).to_owned()).unwrap()), + ), + ))) + }), + }, + ) + } + + fn mk_configured_providers( + cell: &str, + path: &str, + target: &str, + providers: Option<&[&str]>, + cfg: ConfigurationPredicate, + ) -> ParsedPattern { + mk_providers(cell, path, target, providers) + .try_map(|ProvidersPatternExtra { providers }| { + Ok(ConfiguredProvidersPatternExtra { providers, cfg }) + }) + .unwrap() + } + + fn fails(x: anyhow::Result, msgs: &[&str]) { + match x { + Err(e) => { + let s = format!("{:#}", e); + for msg in msgs { + if !s.contains(msg) { + panic!("Expected `{}` but missing from error `{:#}`", msg, e) + } + } + } + Ok(_) => panic!("Expected failure but succeeded"), + } + } + + struct NoAliases; + + impl TargetAliasResolver for NoAliases { + fn get<'a>(&'a self, _name: &str) -> anyhow::Result> { + Ok(None) + } + } + + fn aliases(aliases: &[(&str, &str)]) -> impl TargetAliasResolver { + struct Aliases(Vec<(String, String)>); + + impl TargetAliasResolver for Aliases { + fn get<'a>(&'a self, name: &str) -> anyhow::Result> { + Ok(self + .0 + .iter() + .find(|(a, _)| *a == name) + .map(|(_, b)| b.as_str())) + } + } + + Aliases( + aliases + .iter() + .map(|(a, b)| ((*a).to_owned(), (*b).to_owned())) + .collect(), + ) + } + + fn resolver() -> CellResolver { + CellResolver::testing_with_names_and_paths_with_alias( + &[ + ( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ), + ( + CellName::testing_new("cell1"), + CellRootPathBuf::testing_new("cell1"), + ), + ( + CellName::testing_new("cell2"), + CellRootPathBuf::testing_new("cell2"), + ), + ], + HashMap::from_iter([ + ( + NonEmptyCellAlias::testing_new("cell1"), + CellName::testing_new("cell1"), + ), + ( + NonEmptyCellAlias::testing_new("alias2"), + CellName::testing_new("cell2"), + ), + ]), + ) + } + + fn alias_resolver() -> CellAliasResolver { + resolver().root_cell_cell_alias_resolver().clone() + } + + #[test_case(PhantomData::< TargetPatternExtra >; "parsing TargetPattern")] + #[test_case(PhantomData::< ProvidersPatternExtra >; "parsing ProvidersPattern")] + #[test_case(PhantomData::< ConfiguredTargetPatternExtra >; "parsing ConfiguredTargetPatternExtra")] + #[test_case(PhantomData::< ConfiguredProvidersPatternExtra >; "parsing ConfiguredProvidersPatternExtra")] + fn parse_absolute_pattern(_: PhantomData) { + let package = CellPath::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package/path").to_owned(), + ); + + assert_eq!( + mk_package::("root", "package/path"), + ParsedPattern::::parse_precise( + "//package/path:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + assert_eq!( + mk_package::("root", ""), + ParsedPattern::::parse_precise( + "//:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + assert_eq!( + mk_package::("cell1", "package/path"), + ParsedPattern::::parse_precise( + "cell1//package/path:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + assert_matches!( + ParsedPattern::::parse_precise("package/path:", CellName::testing_new("root"), &resolver(), + &alias_resolver(),), + Err(e) => { + assert!( + format!("{:?}", e).contains(&format!("{}", TargetPatternParseError::AbsoluteRequired)) + ); + } + ); + assert_eq!( + mk_package::("cell2", "package/path"), + ParsedPattern::::parse_precise( + "alias2//package/path:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + assert_eq!( + mk_package::("cell2", "package/path"), + ParsedPattern::::parse_precise( + "@alias2//package/path:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + assert_eq!( + mk_recursive::("root", "package/path"), + ParsedPattern::::parse_precise( + "//package/path/...", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + assert_eq!( + mk_recursive::("root", "package/path"), + ParsedPattern::::parse_relative( + &NoAliases, + package.as_ref(), + "...", + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + assert_eq!( + mk_recursive::("root", "package/path/foo"), + ParsedPattern::::parse_relative( + &NoAliases, + package.as_ref(), + "foo/...", + &resolver(), + &alias_resolver(), + ) + .unwrap() + ); + } + + #[test] + fn parse_relative_pattern() -> anyhow::Result<()> { + let package = CellPath::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package/path").to_owned(), + ); + + assert_eq!( + mk_target("root", "package/path", "target"), + ParsedPattern::parse_precise( + "//package/path:target", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_target("root", "package/path/foo", "target"), + ParsedPattern::parse_relative( + &NoAliases, + package.as_ref(), + "foo:target", + &resolver(), + &alias_resolver(), + )? + ); + Ok(()) + } + + #[test] + fn test_relaxed() -> anyhow::Result<()> { + let package = CellPath::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package").to_owned(), + ); + + assert_matches!( + ParsedPattern::::parse_relative( + &NoAliases, + package.as_ref(), + "path", + &resolver(), + &alias_resolver(), + ), + Err(e) => { + assert!( + format!("{:?}", e).contains(&format!("{}", TargetPatternParseError::UnexpectedFormat)) + ); + } + ); + + assert_eq!( + mk_target("root", "package/path", "path"), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "//package/path", + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_target("root", "package/path", "path"), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "path", + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_providers("root", "package/path", "path", Some(&["provider"])), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "path[provider]", + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_providers( + "root", + "package/path/subpath", + "subpath", + Some(&["provider"]) + ), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "path/subpath[provider]", + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_target("root", "package/path/subpath", "subpath"), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "path/subpath", + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_target("root", "package/path", "path"), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "//package/path/", + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_target("root", "package/path", "target"), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "//package/path/:target", + &resolver(), + &alias_resolver(), + )? + ); + + // Awkward but technically valid? + assert_eq!( + mk_target("root", "package", "foo"), + ParsedPattern::parse_relaxed( + &NoAliases, + package.as_ref(), + "/:foo", + &resolver(), + &alias_resolver(), + )? + ); + + // There's no target here so this is invalid. + assert_matches!( + ParsedPattern::::parse_relaxed( + &NoAliases, + package.as_ref(), + "/", + &resolver(), + &alias_resolver(), + ), + Err(e) => { + assert!( + format!("{:?}", e).contains(&format!("{}", TargetPatternParseError::PackageIsEmpty)) + ); + } + ); + + Ok(()) + } + + #[test] + fn test_parsed_opt_absolute() -> anyhow::Result<()> { + let package = CellPath::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package/path").to_owned(), + ); + + assert_eq!( + mk_target("root", "other", "target"), + ParsedPattern::parsed_opt_absolute( + "//other:target", + Some(package.as_ref()), + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_target("root", "package/path", "target"), + ParsedPattern::parsed_opt_absolute( + ":target", + Some(package.as_ref()), + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + let err = ParsedPattern::::parsed_opt_absolute( + ":target", + None, + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ) + .unwrap_err(); + assert!( + err.to_string() + .contains("Invalid absolute target pattern `:target` is not allowed"), + "{}", + err + ); + // But this should be fine. + assert_eq!( + mk_target("cell1", "", "target"), + ParsedPattern::parsed_opt_absolute( + "cell1//:target", + None, + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + + assert_matches!( + ParsedPattern::::parsed_opt_absolute( + "foo/bar", + Some(package.as_ref()), + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + Err(e) => { + assert!( + format!("{:?}", e).contains(&format!("{}", TargetPatternParseError::UnexpectedFormat)) + ); + } + ); + + assert_matches!( + ParsedPattern::::parsed_opt_absolute( + "foo/bar:bar", + Some(package.as_ref()), + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + Err(e) => { + assert!( + format!("{:?}", e).contains(&format!("{}", TargetPatternParseError::AbsoluteRequired)) + ); + } + ); + assert_matches!( + ParsedPattern::::parsed_opt_absolute( + "foo/bar:bar", + None, + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + Err(e) => { + assert!( + format!("{:?}", e).contains(&format!("{}", TargetPatternParseError::AbsoluteRequired)) + ); + } + ); + + Ok(()) + } + + #[test] + fn test_aliases() -> anyhow::Result<()> { + let package = CellPath::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package").to_owned(), + ); + + let config = aliases(&[ + ("foo", "cell1//foo/bar:target"), + ("invalid/alias", "cell1//foo/bar:target"), + ("badalias", "cell1//foo/bar:"), + ]); + + assert_eq!( + mk_target("cell1", "foo/bar", "target"), + ParsedPattern::parse_relaxed( + &config, + package.as_ref(), + "foo", + &resolver(), + &alias_resolver(), + )? + ); + + assert_matches!( + ParsedPattern::::parse_relaxed( + &config, + package.as_ref(), + "invalid/alias", + &resolver(), + &alias_resolver(), + ), + Err(e) => { + assert!( + format!("{:?}", e).contains("Invalid alias") + ); + } + ); + + assert_matches!( + ParsedPattern::::parse_relaxed( + &config, + package.as_ref(), + "badalias", + &resolver(), + &alias_resolver(), + ), + Err(e) => { + assert!( + format!("{:?}", e).contains("is not a target") + ); + } + ); + + Ok(()) + } + + #[test] + fn parse_providers_pattern() -> anyhow::Result<()> { + assert_eq!( + mk_providers("root", "package/path", "target", None), + ParsedPattern::parse_precise( + "//package/path:target", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_providers("root", "package/path", "target", Some(&["java-output"])), + ParsedPattern::parse_precise( + "//package/path:target[java-output]", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_providers( + "root", + "package/path", + "target", + Some(&["FDSIcon+FDSInternal.h"]), + ), + ParsedPattern::parse_precise( + "//package/path:target[FDSIcon+FDSInternal.h]", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + + let (target_label, providers) = ParsedPattern::parse_precise( + "//package/path:target#flavor", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + .as_literal("")?; + assert_eq!( + "root//package/path:target#flavor", + ProvidersPatternExtra::into_providers_label( + providers, + target_label.pkg(), + target_label.name() + ) + .to_string(), + ); + Ok(()) + } + + #[test] + fn parse_providers_pattern_with_alias() -> anyhow::Result<()> { + let package = CellPath::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package").to_owned(), + ); + + let config = aliases(&[("foo", "cell1//foo/bar:target")]); + + assert_eq!( + mk_providers("cell1", "foo/bar", "target", Some(&["qux"])), + ParsedPattern::parse_relaxed( + &config, + package.as_ref(), + "foo[qux]", + &resolver(), + &alias_resolver(), + )? + ); + + Ok(()) + } + + #[test] + fn test_parse_configured_providers_pattern() -> anyhow::Result<()> { + assert_eq!( + mk_configured_providers( + "root", + "package/path", + "target", + None, + ConfigurationPredicate::Any + ), + ParsedPattern::parse_precise( + "//package/path:target", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_configured_providers( + "root", + "package/path", + "target", + None, + ConfigurationPredicate::Builtin(BuiltinPlatform::Unspecified) + ), + ParsedPattern::parse_precise( + "//package/path:target ()", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_configured_providers( + "root", + "package/path", + "target", + Some(&["P"]), + ConfigurationPredicate::Bound( + BoundConfigurationLabel::new("".to_owned()).unwrap(), + None + ), + ), + ParsedPattern::parse_precise( + "//package/path:target[P] ()", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + assert_eq!( + mk_configured_providers( + "root", + "package/path", + "target", + Some(&["P"]), + ConfigurationPredicate::Bound( + BoundConfigurationLabel::new("".to_owned()).unwrap(), + Some(ConfigurationHash::from_str("0123456789abcdef").unwrap()), + ), + ), + ParsedPattern::parse_precise( + "//package/path:target[P] (#0123456789abcdef)", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )? + ); + Ok(()) + } + + #[test_case(PhantomData::< TargetPatternExtra >; "parsing TargetPattern")] + #[test_case(PhantomData::< ProvidersPatternExtra >; "parsing ProvidersPattern")] + #[test_case(PhantomData::< ConfiguredTargetPatternExtra >; "parsing ConfiguredTargetPatternExtra")] + #[test_case(PhantomData::< ConfiguredProvidersPatternExtra >; "parsing ConfiguredProvidersPatternExtra")] + fn parse_pattern_failure(_: PhantomData) { + fails( + ParsedPattern::::parse_precise( + "", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[], + ); + fails( + ParsedPattern::::parse_precise( + "//package/path", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[], + ); + fails( + ParsedPattern::::parse_precise( + "//package...", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[], + ); + fails( + ParsedPattern::::parse_precise( + "package", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[], + ); + fails( + ParsedPattern::::parse_precise( + "bad_alias//package/path:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[ + "bad_alias//package/path:", + "unknown cell alias: `bad_alias`.", + ], + ); + fails( + ParsedPattern::::parse_precise( + "//package/path/:target", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[], + ); + fails( + ParsedPattern::::parse_precise( + "//package/path/", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[], + ); + fails( + ParsedPattern::::parse_precise( + "$(exe my macro)", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[ + "$(exe my macro)", + "You may be trying to use a macro instead of a target pattern. Macro usage is invalid here", + ], + ); + } + + #[test] + fn bad_providers_label() { + fails( + ParsedPattern::::parse_precise( + "//package/path:target[unclosed", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[ + "//package/path:target[unclosed", + "target pattern with `[` must end with `]` to mark end of providers set label", + ], + ); + fails( + ParsedPattern::::parse_precise( + "//package/path:target[out]wrong", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[ + "//package/path:target[out]wrong", + "target pattern with `[` must end with `]` to mark end of providers set label", + ], + ); + fails( + ParsedPattern::::parse_precise( + "$(exe my macro)", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + ), + &[ + "$(exe my macro)", + "You may be trying to use a macro instead of a target pattern. Macro usage is invalid here", + ], + ); + } + + #[test] + fn parsed_pattern_contains() -> anyhow::Result<()> { + let pkg1 = PackageLabel::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package/path"), + ); + let pkg2 = PackageLabel::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package"), + ); + let pkg3 = PackageLabel::new( + CellName::testing_new("root"), + CellRelativePath::unchecked_new("package2"), + ); + let pkg_in_different_cell = PackageLabel::new( + CellName::testing_new("cell1"), + CellRelativePath::unchecked_new("package/path"), + ); + + let target_in_pkg1 = TargetLabel::new(pkg1.dupe(), TargetNameRef::new("target")?); + let another_target_in_pkg1 = TargetLabel::new(pkg1, TargetNameRef::new("target2")?); + let target_in_pkg2 = TargetLabel::new(pkg2, TargetNameRef::new("target")?); + let target_in_pkg3 = TargetLabel::new(pkg3, TargetNameRef::new("target")?); + let target_in_different_cell = + TargetLabel::new(pkg_in_different_cell, TargetNameRef::new("target")?); + + // Testing ParsedPattern::Target + + let pattern = ParsedPattern::parse_precise( + "//package/path:target", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )?; + assert!(pattern.matches(&target_in_pkg1)); + assert!(!pattern.matches(&another_target_in_pkg1)); + assert!(!pattern.matches(&target_in_pkg2)); + assert!(!pattern.matches(&target_in_pkg3)); + assert!(!pattern.matches(&target_in_different_cell)); + + // Testing ParsedPattern::Package + + let pattern = ParsedPattern::parse_precise( + "//package/path:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )?; + assert!(pattern.matches(&target_in_pkg1)); + assert!(pattern.matches(&another_target_in_pkg1)); + assert!(!pattern.matches(&target_in_pkg2)); + assert!(!pattern.matches(&target_in_pkg3)); + assert!(!pattern.matches(&target_in_different_cell)); + + let pattern = ParsedPattern::parse_precise( + "//package:", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )?; + assert!(!pattern.matches(&target_in_pkg1)); + assert!(!pattern.matches(&another_target_in_pkg1)); + assert!(pattern.matches(&target_in_pkg2)); + assert!(!pattern.matches(&target_in_pkg3)); + assert!(!pattern.matches(&target_in_different_cell)); + + // Testing ParsedPattern::Recursive + + let pattern = ParsedPattern::parse_precise( + "//package/path/...", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )?; + assert!(pattern.matches(&target_in_pkg1)); + assert!(pattern.matches(&another_target_in_pkg1)); + assert!(!pattern.matches(&target_in_pkg2)); + assert!(!pattern.matches(&target_in_pkg3)); + assert!(!pattern.matches(&target_in_different_cell)); + + let pattern = ParsedPattern::parse_precise( + "//package/...", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )?; + assert!(pattern.matches(&target_in_pkg1)); + assert!(pattern.matches(&another_target_in_pkg1)); + assert!(pattern.matches(&target_in_pkg2)); + assert!(!pattern.matches(&target_in_pkg3)); + assert!(!pattern.matches(&target_in_different_cell)); + + let pattern = ParsedPattern::parse_precise( + "//...", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )?; + assert!(pattern.matches(&target_in_pkg1)); + assert!(pattern.matches(&another_target_in_pkg1)); + assert!(pattern.matches(&target_in_pkg2)); + assert!(pattern.matches(&target_in_pkg3)); + assert!(!pattern.matches(&target_in_different_cell)); + + let pattern = ParsedPattern::parse_precise( + "cell1//...", + CellName::testing_new("root"), + &resolver(), + &alias_resolver(), + )?; + assert!(!pattern.matches(&target_in_pkg1)); + assert!(!pattern.matches(&another_target_in_pkg1)); + assert!(!pattern.matches(&target_in_pkg2)); + assert!(!pattern.matches(&target_in_pkg3)); + assert!(pattern.matches(&target_in_different_cell)); + + Ok(()) + } + + #[test] + fn test_parsed_pattern_display() { + assert_eq!( + "foo//bar:baz", + ParsedPattern::::testing_parse("foo//bar:baz").to_string() + ); + assert_eq!( + "foo//bar:", + ParsedPattern::::testing_parse("foo//bar:").to_string() + ); + assert_eq!( + "foo//bar/...", + ParsedPattern::::testing_parse("foo//bar/...").to_string() + ); + assert_eq!( + "foo//:", + ParsedPattern::::testing_parse("foo//:").to_string() + ); + assert_eq!( + "foo//...", + ParsedPattern::::testing_parse("foo//...").to_string() + ); + } + + #[test] + fn test_cross_cell_boundary() { + let cell_resolver = CellResolver::testing_with_names_and_paths(&[ + ( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ), + ( + CellName::testing_new("cell1"), + CellRootPathBuf::testing_new("cell1"), + ), + ( + CellName::testing_new("cell2"), + CellRootPathBuf::testing_new("cell1/xx/cell2"), + ), + ]); + + let err = ParsedPattern::::parse_precise( + "root//cell1/xx/cell2/yy/...", + CellName::testing_new("root"), + &cell_resolver, + &alias_resolver(), + ) + .unwrap_err(); + let err = format!("{:?}", err); + assert!( + err.contains("Pattern `root//cell1/xx/cell2/yy/...` is parsed as `root//cell1/xx/cell2/yy/...` which crosses cell boundaries. Try `cell2//yy/...`"), + "Error is: {}", + err); + } +} diff --git a/app/buck2_core/src/pattern/pattern_type.rs b/app/buck2_core/src/pattern/pattern_type.rs index addfb339d0f25..cff983ac68091 100644 --- a/app/buck2_core/src/pattern/pattern_type.rs +++ b/app/buck2_core/src/pattern/pattern_type.rs @@ -21,10 +21,11 @@ use crate::configuration::hash::ConfigurationHash; use crate::package::PackageLabel; use crate::provider::label::ProvidersLabel; use crate::provider::label::ProvidersName; -use crate::target::label::TargetLabel; +use crate::target::label::label::TargetLabel; use crate::target::name::TargetNameRef; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum PatternTypeError { #[error("Expecting target pattern, without providers")] ExpectingTargetNameWithoutProviders, @@ -74,7 +75,7 @@ pub trait PatternType: PartialOrd, Allocative )] -#[display(fmt = "")] +#[display("")] pub struct TargetPatternExtra; impl PatternType for TargetPatternExtra { @@ -155,14 +156,30 @@ impl PatternType for ProvidersPatternExtra { } } -#[derive(Default, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Allocative)] +#[derive( + Default, + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + Allocative, + derive_more::Display +)] pub enum ConfigurationPredicate { /// Matches any configuration. #[default] + #[display("")] Any, /// Matches builtin platform. Builtin(BuiltinPlatform), /// Matches user defined configuration. + #[display( + "{}{}", + _0, + _1.as_ref().map_or(String::new(), |h| format!("#{}", h)) + )] Bound( BoundConfigurationLabel, /// None means match any configuration with given label. diff --git a/app/buck2_core/src/pattern/query_file_literal.rs b/app/buck2_core/src/pattern/query_file_literal.rs index 85b8d98e8ad88..6b9fc10851da9 100644 --- a/app/buck2_core/src/pattern/query_file_literal.rs +++ b/app/buck2_core/src/pattern/query_file_literal.rs @@ -15,7 +15,7 @@ use crate::cells::CellAliasResolver; use crate::cells::CellResolver; use crate::fs::paths::abs_norm_path::AbsNormPath; use crate::fs::project::ProjectRoot; -use crate::pattern::maybe_split_cell_alias_and_relative_path; +use crate::pattern::pattern::maybe_split_cell_alias_and_relative_path; pub fn parse_query_file_literal( literal: &str, diff --git a/app/buck2_core/src/pattern/target_pattern.md b/app/buck2_core/src/pattern/target_pattern.md index 2d2d407db50bb..7c9d8c9669500 100644 --- a/app/buck2_core/src/pattern/target_pattern.md +++ b/app/buck2_core/src/pattern/target_pattern.md @@ -1,34 +1,60 @@ -A __target pattern__ is a string that describes a set of one or more targets. You can use target patterns as arguments to commands, such as buck build and buck query. You can also use target patterns in the Visibility argument of your build rules. - -The simplest target pattern `//apps/myapp:app` matches exactly the target of the same name `//apps/myapp:app`. - - -A target pattern that ends with a colon matches all targets in the build file at the preceding directory path. For example, suppose that the build file `apps/myapp/BUCK` defines the rules: app_debug and app_release, then the target pattern `//apps/myapp:` matches `//apps/myapp:app_debug` and `//apps/myapp:app_release`. - - -A target pattern that ends with an ellipsis "/..." matches all targets in the build file in the directory that precedes the ellipsis and also all targets in build files in subdirectories (within the same cell). For example, suppose that you have the following build files: `apps/BUCK`, `apps/myapp/BUCK`. Then the target pattern `//apps/...` would match (for example) `//apps:common` and `//apps/myapp:app`. The pattern `//...` would match the same (even though there's no build file in the root directory). +A **target pattern** is a string that describes a set of one or more targets. +You can use target patterns as arguments to commands, such as buck build and +buck query. You can also use target patterns in the Visibility argument of your +build rules. + +The simplest target pattern `//apps/myapp:app` matches exactly the target of the +same name `//apps/myapp:app`. + +A target pattern that ends with a colon matches all targets in the build file at +the preceding directory path. For example, suppose that the build file +`apps/myapp/BUCK` defines the rules: app_debug and app_release, then the target +pattern `//apps/myapp:` matches `//apps/myapp:app_debug` and +`//apps/myapp:app_release`. + +A target pattern that ends with an ellipsis "/..." matches all targets in the +build file in the directory that precedes the ellipsis and also all targets in +build files in subdirectories (within the same cell). For example, suppose that +you have the following build files: `apps/BUCK`, `apps/myapp/BUCK`. Then the +target pattern `//apps/...` would match (for example) `//apps:common` and +`//apps/myapp:app`. The pattern `//...` would match the same (even though +there's no build file in the root directory). ## Cell resolution -Cells will be resolved in the context where the target pattern appears. When used as arguments to the command line, they will be resolved based on the cell of the directory in which the command is invoked. +Cells will be resolved in the context where the target pattern appears. When +used as arguments to the command line, they will be resolved based on the cell +of the directory in which the command is invoked. -If `~/project` and `~/project/cell` are both cells with names `project` and `cell` respectively, then `//some:target` would resolve to `project//some:target` if it appears in `~/project/BUCK` and `cell//some:target` if it appears in `~/project/cell/BUCK`. +If `~/project` and `~/project/cell` are both cells with names `project` and +`cell` respectively, then `//some:target` would resolve to +`project//some:target` if it appears in `~/project/BUCK` and `cell//some:target` +if it appears in `~/project/cell/BUCK`. ## Relative patterns -Target patterns can be absolute (`//my/app:target`, `cell//other:target`) or relative `app:target`. A relative pattern will be resolved relative to the working directory of the command. +Target patterns can be absolute (`//my/app:target`, `cell//other:target`) or +relative `app:target`. A relative pattern will be resolved relative to the +working directory of the command. ## Restrictions -A target pattern should not include any `..` segments. This applies to both absolute and relative patterns. +A target pattern should not include any `..` segments. This applies to both +absolute and relative patterns. ## Inner Providers -A target pattern used where providers labels are expected can refer to rule's inner providers via `//my/app:target[]` syntax. -The inner providers label will refer to a specific set of providers exposed by a rule, such as a particular set of outputs from the rule. -The providers label can be used for commands: buck builds, provider queries, and action queries. -Any rule's dependencies also refers to a providers label. -However, configuration rules (i.e config_settings) should be referred to without providers. -(TODO: experiment and see if we should just use provider labels everywhere). +A target pattern used where providers labels are expected can refer to rule's +inner providers via `//my/app:target[]` syntax. The inner +providers label will refer to a specific set of providers exposed by a rule, +such as a particular set of outputs from the rule. + +The providers label can be used for commands: buck builds, provider queries, and +action queries. Any rule's dependencies also refers to a providers label. +However, configuration rules (i.e config_settings) should be referred to without +providers. (TODO: experiment and see if we should just use provider labels +everywhere). -The providers label syntax can only be used when the pattern is of a specific rule. Package and recursive patterns (e.g. `//some/pkg:` and `//some/...`) cannot have providers labels. +The providers label syntax can only be used when the pattern is of a specific +rule. Package and recursive patterns (e.g. `//some/pkg:` and `//some/...`) +cannot have providers labels. diff --git a/app/buck2_core/src/pattern/unparsed.rs b/app/buck2_core/src/pattern/unparsed.rs new file mode 100644 index 0000000000000..03d6fd0c67e6f --- /dev/null +++ b/app/buck2_core/src/pattern/unparsed.rs @@ -0,0 +1,49 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::marker::PhantomData; + +use allocative::Allocative; + +use crate::fs::project_rel_path::ProjectRelativePath; +use crate::fs::project_rel_path::ProjectRelativePathBuf; +use crate::pattern::pattern_type::PatternType; + +#[derive(Allocative, Debug, Clone, PartialEq, Eq)] +pub struct UnparsedPatterns { + /// Patterns, e.g. `[":foo", "//bar/..."]`. + patterns: Vec, + /// Patterns are to be resolved relative to this directory. + working_dir: ProjectRelativePathBuf, + _marker: PhantomData, +} + +impl UnparsedPatterns { + pub fn new(patterns: Vec, working_dir: ProjectRelativePathBuf) -> Self { + UnparsedPatterns { + patterns, + working_dir, + _marker: PhantomData, + } + } + + pub fn patterns(&self) -> &[String] { + &self.patterns + } + + pub fn working_dir(&self) -> &ProjectRelativePath { + &self.working_dir + } +} + +#[derive(Allocative, Debug, Clone, PartialEq, Eq)] +pub enum UnparsedPatternPredicate { + Any, + AnyOf(UnparsedPatterns), +} diff --git a/app/buck2_core/src/plugins.rs b/app/buck2_core/src/plugins.rs index 8ae27c8d1497d..dfd01ac1c1282 100644 --- a/app/buck2_core/src/plugins.rs +++ b/app/buck2_core/src/plugins.rs @@ -10,20 +10,21 @@ use std::collections::BTreeMap; use allocative::Allocative; +use buck2_util::hash::BuckHasher; use derive_more::Display; use dupe::Dupe; -use internment_tweaks::Intern; -use internment_tweaks::StaticInterner; use starlark_map::ordered_map::OrderedMap; use starlark_map::small_map::Entry; +use static_interner::Intern; +use static_interner::Interner; use crate::cells::cell_path::CellPath; -use crate::target::label::TargetLabel; +use crate::target::label::label::TargetLabel; #[derive( Clone, Debug, Display, Eq, PartialEq, Hash, Ord, PartialOrd, Allocative )] -#[display(fmt = "{name}")] +#[display("{name}")] struct PluginKindInner { // The name and cell path aren't used for anything except that they serve as a unique identifier // for the plugin kind. This allows us to treat `plugins.kind()` as if it returns a new value @@ -43,7 +44,7 @@ impl<'a> From<&'a PluginKindInner> for PluginKindInner { )] pub struct PluginKind(Intern); -static PLUGIN_KIND_INTERNER: StaticInterner = StaticInterner::new(); +static PLUGIN_KIND_INTERNER: Interner = Interner::new(); impl PluginKind { /// Creates a new `PluginKind` instance. @@ -80,7 +81,7 @@ enum PluginKindSetUnpacked { static_assertions::assert_eq_size!(PluginKindSet, usize); static_assertions::assert_eq_size!(PluginKindSetUnpacked, [usize; 2]); -static PLUGIN_KIND_SET_INTERNER: StaticInterner> = StaticInterner::new(); +static PLUGIN_KIND_SET_INTERNER: Interner, BuckHasher> = Interner::new(); impl PluginKindSet { pub const EMPTY: Self = Self::pack(PluginKindSetUnpacked::None); @@ -125,7 +126,7 @@ impl PluginKindSet { } else if self.0 as usize == 1 { PluginKindSetUnpacked::All } else { - // SAFETY: Instances of this type are only creaeted by `pack` + // SAFETY: Instances of this type are only created by `pack` PluginKindSetUnpacked::Interned(unsafe { Intern::from_ptr(self.0 as *const _) }) } } @@ -223,29 +224,27 @@ impl PluginLists { } } - pub fn iter<'a>( - &'a self, - ) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.0 .iter() .flat_map(|(k, v)| v.iter().map(move |t| (k, t.0, t.1))) } - pub fn iter_by_kind<'a>( - &'a self, + pub fn iter_by_kind( + &self, ) -> impl Iterator< Item = ( - &'a PluginKind, - impl Iterator, + &PluginKind, + impl Iterator, ), > { self.0.iter().map(|(k, v)| (k, v.iter())) } - pub fn iter_for_kind<'a>( - &'a self, + pub fn iter_for_kind( + &self, kind: &PluginKind, - ) -> impl Iterator { + ) -> impl Iterator { self.0.get(kind).into_iter().flatten() } } diff --git a/app/buck2_core/src/provider/mod.rs b/app/buck2_core/src/provider.rs similarity index 100% rename from app/buck2_core/src/provider/mod.rs rename to app/buck2_core/src/provider.rs diff --git a/app/buck2_core/src/provider/flavors.rs b/app/buck2_core/src/provider/flavors.rs index 894164aa43e7e..a4f2f9a14f389 100644 --- a/app/buck2_core/src/provider/flavors.rs +++ b/app/buck2_core/src/provider/flavors.rs @@ -14,7 +14,6 @@ use regex::RegexSet; use crate::provider::label::NonDefaultProvidersName; use crate::provider::label::ProviderName; use crate::provider::label::ProvidersName; -use crate::soft_error; static PLATFORM_REGEX_SET: OnceLock = OnceLock::new(); @@ -56,7 +55,8 @@ pub fn map_flavors(flavors: &str, full_target: &str) -> anyhow::Result anyhow::Result { + [] => { // Some targets specifically ask for a given platform, we just ignore them return Ok(ProvidersName::Default); } - 1 => match flavors_parts[0] { - // android_binary intermediate/secondary outputs. See https://fburl.com/diffusion/jd3cmnfw - "package_string_assets" => "package_string_assets".to_owned(), - "aapt2_link" => "aapt2_link".to_owned(), - "unstripped_native_libraries" => "unstripped_native_libraries".to_owned(), - "proguard_text_output" => "proguard_text_output".to_owned(), - "generate_string_resources" => "generate_string_resources".to_owned(), - "generate_voltron_string_resources" => { - "generate_voltron_string_resources".to_owned() - } - "exo_symlink_tree" => "exo_symlink_tree".to_owned(), - - // android_library secondary outputs - "dummy_r_dot_java" => "dummy_r_dot_java".to_owned(), - - // Rules depend on `#headers` flavor of C++ libraries to use a - // dep's headers without linking against it. - "headers" => "headers".to_owned(), - - // This is used by Rust quite a bit - "check" => "check".to_owned(), - - // FIXME(ndmitchell): Most users shouldn't be using strip-debug. - // We currently can't handle strip-debug, and it's a dependency of Eden, - // so just ignore it for now. D27984137 aims to add it back properly. - "strip-debug" => return Ok(ProvidersName::Default), - - // Used in JEX builder script (https://fburl.com/code/2w2gjkey) - "shared" => "shared".to_owned(), - - // Used by Nullsafe for (android|java)_libraries - "nullsafex-json" => "nullsafex-json".to_owned(), - - // This is for js_bundle. We strip it and let the configuration handle it instead. - "android" => return Ok(ProvidersName::Default), - - // Java/Kotlin sub-targets - "class-abi" => "class-abi".to_owned(), - "source-abi" => "source-abi".to_owned(), - "source-only-abi" => "source-only-abi".to_owned(), - - _ => { - return Ok(ProvidersName::NonDefault(Box::new( - NonDefaultProvidersName::UnrecognizedFlavor(flavors.into()), - ))); - } - }, + // android_binary intermediate/secondary outputs. See https://fburl.com/diffusion/jd3cmnfw + ["package_string_assets"] => "package_string_assets".to_owned(), + ["aapt2_link"] => "aapt2_link".to_owned(), + ["unstripped_native_libraries"] => "unstripped_native_libraries".to_owned(), + ["proguard_text_output"] => "proguard_text_output".to_owned(), + ["generate_string_resources"] => "generate_string_resources".to_owned(), + ["generate_voltron_string_resources"] => { + "generate_voltron_string_resources".to_owned() + } + ["exo_symlink_tree"] => "exo_symlink_tree".to_owned(), + + // android_library secondary outputs + ["dummy_r_dot_java"] => "dummy_r_dot_java".to_owned(), + + // Rules depend on `#headers` flavor of C++ libraries to use a + // dep's headers without linking against it. + ["headers"] => "headers".to_owned(), + + // This is used by Rust quite a bit + ["check"] => "check".to_owned(), + + // FIXME(ndmitchell): Most users shouldn't be using strip-debug. + // We currently can't handle strip-debug, and it's a dependency of Eden, + // so just ignore it for now. D27984137 aims to add it back properly. + ["strip-debug"] => return Ok(ProvidersName::Default), + + // Used in JEX builder script (https://fburl.com/code/2w2gjkey) + ["shared"] => "shared".to_owned(), + + // Used by Nullsafe for (android|java)_libraries + ["nullsafex-json"] => "nullsafex-json".to_owned(), + + // Java/Kotlin sub-targets + ["class-abi"] => "class-abi".to_owned(), + ["source-abi"] => "source-abi".to_owned(), + ["source-only-abi"] => "source-only-abi".to_owned(), + + ["compilation-database"] => "compilation-database".to_owned(), // For js_bundle rules. The platform and optimization ("release") flavors are stripped // and handled by the configuration. The other flavors are mapped to named outputs. - 2 => match (flavors_parts[0], flavors_parts[1]) { - ("android", "dependencies") => "dependencies".to_owned(), - ("android", "misc") => "misc".to_owned(), - ("android", "source_map") => "source_map".to_owned(), - ("android", "release") => return Ok(ProvidersName::Default), - _ => { - return Ok(ProvidersName::NonDefault(Box::new( - NonDefaultProvidersName::UnrecognizedFlavor(flavors.into()), - ))); - } - }, - - 3 => match (flavors_parts[0], flavors_parts[1], flavors_parts[2]) { - ("android", "dependencies", "release") => "dependencies".to_owned(), - ("android", "misc", "release") => "misc".to_owned(), - ("android", "release", "source_map") => "source_map".to_owned(), - _ => { - return Ok(ProvidersName::NonDefault(Box::new( - NonDefaultProvidersName::UnrecognizedFlavor(flavors.into()), - ))); - } - }, - - 4 => match ( - flavors_parts[0], - flavors_parts[1], - flavors_parts[2], - flavors_parts[3], - ) { - ("android", "misc", "rambundle-indexed", "release") => { - "rambundle-indexed-misc".to_owned() - } - _ => { - return Ok(ProvidersName::NonDefault(Box::new( - NonDefaultProvidersName::UnrecognizedFlavor(flavors.into()), - ))); - } - }, + ["android"] | ["android", "release"] => return Ok(ProvidersName::Default), + ["android", "dependencies"] | ["android", "dependencies", "release"] => { + "dependencies".to_owned() + } + ["android", "misc"] | ["android", "misc", "release"] => "misc".to_owned(), + ["android", "source_map"] | ["android", "release", "source_map"] => { + "source_map".to_owned() + } + ["android", "misc", "rambundle-indexed", "release"] => { + "rambundle-indexed-misc".to_owned() + } // This allows us to pass parsing for this thing. _ => { - return Ok(ProvidersName::NonDefault(Box::new( + return Ok(ProvidersName::NonDefault(triomphe::Arc::new( NonDefaultProvidersName::UnrecognizedFlavor(flavors.into()), ))); } - }, - )])), + }), + ])), ))) } diff --git a/app/buck2_core/src/provider/label.rs b/app/buck2_core/src/provider/label.rs index 35f6132ed8f49..6a256bc395094 100644 --- a/app/buck2_core/src/provider/label.rs +++ b/app/buck2_core/src/provider/label.rs @@ -8,33 +8,39 @@ */ use std::fmt; -use std::fmt::Display; use std::fmt::Formatter; use std::iter; use allocative::Allocative; +use buck2_util::arc_str::ArcSlice; +use buck2_util::arc_str::ArcStr; use derive_more::Display; use dupe::Dupe; use serde::Serialize; use serde::Serializer; use static_assertions::assert_eq_size; -use thiserror::Error; +use triomphe::Arc; use crate::ascii_char_set::AsciiCharSet; +use crate::cells::name::CellName; +use crate::cells::CellAliasResolver; +use crate::cells::CellResolver; use crate::configuration::data::ConfigurationData; use crate::configuration::pair::Configuration; use crate::configuration::pair::ConfigurationNoExec; +use crate::pattern::pattern::ParsedPattern; +use crate::pattern::pattern_type::ProvidersPatternExtra; use crate::target::configured_target_label::ConfiguredTargetLabel; -use crate::target::label::TargetLabel; +use crate::target::label::label::TargetLabel; #[derive( Display, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative )] pub struct ProviderName(String); -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] #[error( - "Invalid provider name `{}`. Inner providers names can only contain non-empty alpha numeric characters, and symbols `,`, '=', `-`, `/`, `+` and `_`. No other characters are allowed.", + "Invalid provider name `{}`. Inner providers names can only contain non-empty alpha numeric characters, and symbols `,`, `=`, `-`, `/`, `+` and `_`. No other characters are allowed.", _0 )] struct InvalidProviderName(String); @@ -66,14 +72,14 @@ impl ProviderName { } } -#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative)] +#[derive(Clone, Dupe, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative)] pub enum NonDefaultProvidersName { - Named(Box<[ProviderName]>), + Named(ArcSlice), // For some flavors from buck1, we can translate them to ProvidersName::Named // as we know that we can implement them as a subtarget. For many flavored targets, // we can't do that. For those cases, we parse them to this "UnrecognizedFlavor" so // that we can defer any errors related to us not supporting it. - UnrecognizedFlavor(Box), + UnrecognizedFlavor(ArcStr), // TODO(cjhopman): We should add an InferredNamed for flavors where we infer a name // so that we can display them in their original form. } @@ -87,11 +93,13 @@ pub enum NonDefaultProvidersName { #[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative)] pub enum ProvidersName { Default, - NonDefault(Box), + NonDefault(Arc), } assert_eq_size!(ProvidersName, [usize; 1]); +impl Dupe for ProvidersName {} + impl Default for ProvidersName { fn default() -> Self { Self::Default @@ -104,15 +112,17 @@ impl Display for ProvidersName { ProvidersName::Default => { write!(f, "") } - ProvidersName::NonDefault(box NonDefaultProvidersName::Named(names)) => { - for name in &**names { - write!(f, "[{}]", name)?; + ProvidersName::NonDefault(flavor) => match flavor.as_ref() { + NonDefaultProvidersName::Named(names) => { + for name in &**names { + write!(f, "[{}]", name)?; + } + Ok(()) } - Ok(()) - } - ProvidersName::NonDefault(box NonDefaultProvidersName::UnrecognizedFlavor(s)) => { - write!(f, "#{}", s) - } + NonDefaultProvidersName::UnrecognizedFlavor(s) => { + write!(f, "#{}", s) + } + }, } } } @@ -125,11 +135,11 @@ impl ProvidersName { NonDefaultProvidersName::Named(xs) => { xs.iter().cloned().chain(iter::once(name)).collect() } - NonDefaultProvidersName::UnrecognizedFlavor(_) => return self.clone(), + NonDefaultProvidersName::UnrecognizedFlavor(_) => return self.dupe(), }, }; - ProvidersName::NonDefault(Box::new(NonDefaultProvidersName::Named( - items.into_boxed_slice(), + ProvidersName::NonDefault(Arc::new(NonDefaultProvidersName::Named( + ArcSlice::from_iter(items), ))) } } @@ -139,9 +149,9 @@ impl ProvidersName { /// the 'ProvidersName' referring to the specific set of inner providers of a /// rule. #[derive( - Clone, Debug, Display, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative + Clone, Dupe, Debug, Display, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative )] -#[display(fmt = "{}{}", target, name)] +#[display("{}{}", target, name)] pub struct ProvidersLabel { target: TargetLabel, name: ProvidersName, @@ -170,12 +180,28 @@ impl ProvidersLabel { &self.name } + pub fn parse( + label: &str, + cell_name: CellName, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + ) -> anyhow::Result { + let providers_label = ParsedPattern::::parse_precise( + label, + cell_name, + cell_resolver, + cell_alias_resolver, + )? + .as_providers_label(label)?; + Ok(providers_label) + } + /// Creates a 'ConfiguredProvidersLabel' from ['Self'] based on the provided /// configuration. pub fn configure(&self, cfg: ConfigurationData) -> ConfiguredProvidersLabel { ConfiguredProvidersLabel { target: self.target.configure(cfg), - name: self.name.clone(), + name: self.name.dupe(), } } @@ -187,7 +213,7 @@ impl ProvidersLabel { ) -> ConfiguredProvidersLabel { ConfiguredProvidersLabel { target: self.target.configure_with_exec(cfg, exec_cfg), - name: self.name.clone(), + name: self.name.dupe(), } } @@ -195,7 +221,7 @@ impl ProvidersLabel { pub fn configure_pair(&self, cfg_pair: Configuration) -> ConfiguredProvidersLabel { ConfiguredProvidersLabel { target: self.target.configure_pair(cfg_pair), - name: self.name.clone(), + name: self.name.dupe(), } } @@ -224,9 +250,9 @@ impl Serialize for ProvidersLabel { /// /// A configured 'ProvidersLabel'. #[derive( - Clone, Debug, Display, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative + Clone, Dupe, Debug, Display, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative )] -#[display(fmt = "{}{} ({})", "target.unconfigured()", "name", "target.cfg()")] +#[display("{}{} ({})", target.unconfigured(), name, target.cfg())] pub struct ConfiguredProvidersLabel { target: ConfiguredTargetLabel, name: ProvidersName, @@ -254,7 +280,7 @@ impl ConfiguredProvidersLabel { } pub fn unconfigured(&self) -> ProvidersLabel { - ProvidersLabel::new(self.target.unconfigured().dupe(), self.name.clone()) + ProvidersLabel::new(self.target.unconfigured().dupe(), self.name.dupe()) } pub fn name(&self) -> &ProvidersName { @@ -282,7 +308,6 @@ pub mod testing { use super::*; use crate::package::PackageLabel; - use crate::target::label::TargetLabel; use crate::target::name::TargetNameRef; pub trait ProvidersLabelTestExt { @@ -307,9 +332,8 @@ pub mod testing { TargetNameRef::new(target).unwrap(), ), match name { - Some(n) => ProvidersName::NonDefault(Box::new(NonDefaultProvidersName::Named( - n.map(|s| ProviderName::new((*s).to_owned()).unwrap()) - .into_boxed_slice(), + Some(n) => ProvidersName::NonDefault(Arc::new(NonDefaultProvidersName::Named( + ArcSlice::from_iter(n.map(|s| ProviderName::new((*s).to_owned()).unwrap())), ))), _ => ProvidersName::Default, }, diff --git a/app/buck2_core/src/rollout_percentage.rs b/app/buck2_core/src/rollout_percentage.rs index 3d95860e1b990..a278cee4bc96d 100644 --- a/app/buck2_core/src/rollout_percentage.rs +++ b/app/buck2_core/src/rollout_percentage.rs @@ -16,11 +16,14 @@ use rand::Rng; /// Returns true or false for percentage-based feature rollouts based on a configuration string. /// Configurations supported today are random and hostname. +/// /// - Random: Enabled by directly setting a decimal value. -/// Checks whether to enable feature based on a random roll -/// - Hostname: Set by `hostname=`, ex. `hostname=0.5`. Checks whether to roll out feature -/// based on hash of hostname. Useful when you want the same host to consistently get the -/// same feature enabled/disabled. +/// Checks whether to enable feature based on a random roll +/// +/// - Hostname: Set by `hostname:`, ex. `hostname:0.5`. Checks whether to roll out feature +/// based on hash of hostname. Useful when you want the same host to consistently get the +/// same feature enabled/disabled. +/// /// It's possible to extend this system to support per-username rollout as well in addition to /// per-host rollout. #[derive(Copy, Clone, Dupe, Debug)] diff --git a/app/buck2_core/src/sandcastle.rs b/app/buck2_core/src/sandcastle.rs deleted file mode 100644 index 6a5694bc669ae..0000000000000 --- a/app/buck2_core/src/sandcastle.rs +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use crate::env_helper::EnvHelper; - -/// Are we running on sandcastle? -pub fn is_sandcastle() -> anyhow::Result { - static SANDCASTLE: EnvHelper = EnvHelper::new("SANDCASTLE"); - - Ok(SANDCASTLE.get()?.is_some()) -} diff --git a/app/buck2_core/src/target.rs b/app/buck2_core/src/target.rs new file mode 100644 index 0000000000000..02018f945cc2c --- /dev/null +++ b/app/buck2_core/src/target.rs @@ -0,0 +1,41 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! A 'target' is an instance of a rule declared in the build file. Each +//! 'target' is a node on the 'static graph'. Targets are determined by parsing; +//! no extra analysis or building is necessary to determine the 'target's +//! available. +//! +//! For example, the below is a target, defined in some 'Package' with the given +//! name "foo". +//! +//!```ignored +//! java_library( +//! name = "foo", +//! srcs = [ ... ], +//! ... +//! ) +//! ``` +//! +//! Target names are limited to non-empty alpha numeric characters `,`, `=`, +//! `-`, `/`, and `_`. No other special characters, e.g. spaces, are allowed. +//! Currently, `+` is allow for backwards compatibility but may be removed. +//! +//! 'TargetLabel's are labels/keys that uniquely map to a 'target' in the static +//! graph. These are of the form `//:`. +//! e.g. `mycell//my/package/path:my_target`, where `mycell` is the cell, +//! `my/package/path` is the package, and `my_target` is the target name +//! belonging to the package. + +pub mod configured_or_unconfigured; +pub mod configured_target_label; +pub mod label; +pub mod name; +pub mod target_configured_target_label; diff --git a/app/buck2_core/src/target/configured_or_unconfigured.rs b/app/buck2_core/src/target/configured_or_unconfigured.rs new file mode 100644 index 0000000000000..caca7f1f9829a --- /dev/null +++ b/app/buck2_core/src/target/configured_or_unconfigured.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use crate::target::configured_target_label::ConfiguredTargetLabel; +use crate::target::label::label::TargetLabel; + +pub trait ConfiguredOrUnconfiguredTargetLabel { + fn unconfigured_label(&self) -> &TargetLabel; +} + +impl ConfiguredOrUnconfiguredTargetLabel for TargetLabel { + fn unconfigured_label(&self) -> &TargetLabel { + self + } +} + +impl ConfiguredOrUnconfiguredTargetLabel for ConfiguredTargetLabel { + fn unconfigured_label(&self) -> &TargetLabel { + self.unconfigured() + } +} diff --git a/app/buck2_core/src/target/configured_target_label.rs b/app/buck2_core/src/target/configured_target_label.rs index 7b83379c440de..9d185254a0bb0 100644 --- a/app/buck2_core/src/target/configured_target_label.rs +++ b/app/buck2_core/src/target/configured_target_label.rs @@ -7,8 +7,10 @@ * of this source tree. */ -use std::fmt; +use std::fmt::Debug; use std::fmt::Display; +use std::fmt::{self}; +use std::hash::Hash; use std::str; use allocative::Allocative; @@ -20,13 +22,13 @@ use serde::Serializer; use crate::configuration::data::ConfigurationData; use crate::configuration::pair::Configuration; use crate::package::PackageLabel; -use crate::target::label::TargetLabel; +use crate::target::label::label::TargetLabel; use crate::target::name::TargetNameRef; /// 'ConfiguredTargetLabel' are 'TargetLabel's with an 'Configuration' attached. /// These uniquely map to nodes of the build graph with 'Configuration's /// applied. -#[derive(Clone, Dupe, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative)] +#[derive(Clone, Dupe, Hash, Eq, PartialEq, Ord, PartialOrd, Allocative)] pub struct ConfiguredTargetLabel { pub(crate) target: TargetLabel, pub(crate) cfg_pair: Configuration, @@ -42,6 +44,12 @@ impl Display for ConfiguredTargetLabel { } } +impl Debug for ConfiguredTargetLabel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Display::fmt(self, f) + } +} + impl ConfiguredTargetLabel { #[inline] pub fn pkg(&self) -> PackageLabel { @@ -73,15 +81,11 @@ impl ConfiguredTargetLabel { self.cfg_pair.exec_cfg() } - /// Updates the exec config, but only if it's present - pub fn map_exec_cfg(&self, new_exec_cfg: &ConfigurationData) -> Self { - if self.exec_cfg().is_some() { - Self { - target: self.target.dupe(), - cfg_pair: Configuration::new(self.cfg().dupe(), Some(new_exec_cfg.dupe())), - } - } else { - self.dupe() + /// Sets the exec cfg to the given one + pub fn with_exec_cfg(&self, new_exec_cfg: ConfigurationData) -> Self { + Self { + target: self.target.dupe(), + cfg_pair: Configuration::new(self.cfg().dupe(), Some(new_exec_cfg)), } } diff --git a/app/buck2_core/src/target/label.rs b/app/buck2_core/src/target/label.rs index 60d6b46d644a2..6b59ab0349f08 100644 --- a/app/buck2_core/src/target/label.rs +++ b/app/buck2_core/src/target/label.rs @@ -7,224 +7,6 @@ * of this source tree. */ -use std::cmp::Ordering; -use std::fmt; -use std::fmt::Debug; -use std::hash::Hash; -use std::hash::Hasher; -use std::str; - -use allocative::Allocative; -use buck2_data::ToProtoMessage; -use dupe::Dupe; -use fnv::FnvHasher; -use serde::Serialize; -use serde::Serializer; -use triomphe::ThinArc; - -use crate::cells::name::CellName; -use crate::cells::paths::CellRelativePath; -use crate::cells::CellResolver; -use crate::configuration::data::ConfigurationData; -use crate::configuration::pair::Configuration; -use crate::configuration::pair::ConfigurationNoExec; -use crate::package::PackageLabel; -use crate::pattern::lex_target_pattern; -use crate::pattern::pattern_type::TargetPatternExtra; -use crate::pattern::ParsedPattern; -use crate::target::configured_target_label::ConfiguredTargetLabel; -use crate::target::name::TargetNameRef; - -#[derive(Eq, PartialEq, Allocative)] -struct TargetLabelHeader { - /// Hash of target label (not package, not name). - /// Place hash first to make equality check faster. - hash: u32, - pkg: PackageLabel, - // TODO(nga): this struct has 4 bytes of padding. -} - -/// 'TargetLabel' that uniquely maps to a 'target' -/// It contains a 'Package' which is the 'Package' defined by the build fine -/// that contains this 'target', and a 'name' which is a 'TargetName' -/// representing the target name given to the particular target. -#[derive(Clone, derive_more::Display, Eq, PartialEq, Allocative)] -#[display(fmt = "{}", "self.as_ref()")] -pub struct TargetLabel( - ThinArc< - TargetLabelHeader, - // `u8` type argument means `ThinArc` stores `[u8]` inline. - // We store string target name in that `[u8]`. - u8, - >, -); - -impl Debug for TargetLabel { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TargetLabel") - .field("pkg", &self.pkg()) - .field("name", &self.name()) - .finish() - } -} - -impl Dupe for TargetLabel {} - -#[allow(clippy::derived_hash_with_manual_eq)] -impl Hash for TargetLabel { - #[inline] - fn hash(&self, state: &mut H) { - self.0.header.header.hash.hash(state); - } -} - -impl Ord for TargetLabel { - #[inline] - fn cmp(&self, other: &Self) -> Ordering { - self.as_ref().cmp(&other.as_ref()) - } -} - -impl PartialOrd for TargetLabel { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl TargetLabel { - pub fn new(pkg: PackageLabel, name: &TargetNameRef) -> Self { - // TODO(nga): unnecessary to take `TargetName` by value. - - // Hash should be stable because it is used to generate the configuration hash. - let key = &(pkg.dupe(), &name); - let mut hasher = FnvHasher::default(); - key.hash(&mut hasher); - let hash = hasher.finish() as u32; - - TargetLabel(ThinArc::from_header_and_slice( - TargetLabelHeader { hash, pkg }, - name.as_str().as_bytes(), - )) - } - - #[inline] - pub fn pkg(&self) -> PackageLabel { - self.0.header.header.pkg.dupe() - } - - #[inline] - pub fn name(&self) -> &TargetNameRef { - let name = unsafe { str::from_utf8_unchecked(&self.0.slice) }; - TargetNameRef::unchecked_new(name) - } - - /// Creates a 'ConfiguredTargetLabel' from ['Self'] based on the provided - /// configuration. - #[inline] - pub fn configure(&self, cfg: ConfigurationData) -> ConfiguredTargetLabel { - self.configure_pair(Configuration::new(cfg, None)) - } - - /// Like `configure`, but forces the execution configuration too. - #[inline] - pub fn configure_with_exec( - &self, - cfg: ConfigurationData, - exec_cfg: ConfigurationData, - ) -> ConfiguredTargetLabel { - self.configure_pair(Configuration::new(cfg, Some(exec_cfg))) - } - - #[inline] - pub fn configure_pair(&self, cfg_pair: Configuration) -> ConfiguredTargetLabel { - ConfiguredTargetLabel { - target: self.dupe(), - cfg_pair, - } - } - - #[inline] - pub fn configure_pair_no_exec(&self, cfg: ConfigurationNoExec) -> ConfiguredTargetLabel { - self.configure_pair(cfg.cfg_pair().dupe()) - } - - #[inline] - pub fn as_ref(&self) -> TargetLabelRef { - TargetLabelRef::new(self.pkg(), self.name()) - } - - pub fn parse( - label: &str, - cell_name: CellName, - cell_resolver: &CellResolver, - ) -> anyhow::Result { - let (pkg, name, TargetPatternExtra) = - ParsedPattern::::parse_precise(label, cell_name, cell_resolver)? - .as_literal(label)?; - Ok(TargetLabel::new(pkg, name.as_ref())) - } - - /// Simple and incorrect target label parser which can be used in tests. - pub fn testing_parse(target_label: &str) -> TargetLabel { - let parts = lex_target_pattern(target_label, false).expect("failed to parse"); - let cell_name = CellName::testing_new(parts.cell_alias.expect("must have cell name")); - - let pattern_data = parts - .pattern - .reject_ambiguity() - .expect("target label must be unambiguous"); - let (target_name, TargetPatternExtra) = - pattern_data.target().expect("target label must be precise"); - - TargetLabel::new( - PackageLabel::new( - cell_name, - CellRelativePath::new(pattern_data.package_path()), - ), - target_name, - ) - } -} - -impl Serialize for TargetLabel { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} - -impl ToProtoMessage for TargetLabel { - type Message = buck2_data::TargetLabel; - - fn as_proto(&self) -> Self::Message { - buck2_data::TargetLabel { - package: self.pkg().to_string(), - name: self.name().to_string(), - } - } -} - -#[derive( - Clone, - Dupe, - Eq, - PartialEq, - Ord, - PartialOrd, - Debug, - derive_more::Display -)] -#[display(fmt = "{}:{}", pkg, name)] -pub struct TargetLabelRef<'a> { - pkg: PackageLabel, - name: &'a TargetNameRef, -} - -impl<'a> TargetLabelRef<'a> { - #[inline] - pub fn new(pkg: PackageLabel, name: &'a TargetNameRef) -> TargetLabelRef<'a> { - TargetLabelRef { pkg, name } - } -} +pub mod interner; +pub mod label; +pub(crate) mod triomphe_thin_arc_borrow; diff --git a/app/buck2_core/src/target/label/interner.rs b/app/buck2_core/src/target/label/interner.rs new file mode 100644 index 0000000000000..d4564db624578 --- /dev/null +++ b/app/buck2_core/src/target/label/interner.rs @@ -0,0 +1,77 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Debug; +use std::fmt::Formatter; +use std::hash::BuildHasher; + +use allocative::Allocative; +use buck2_util::hash::BuckHasherBuilder; +use lock_free_hashtable::sharded::ShardedLockFreeRawTable; + +use crate::target::label::label::TargetLabel; + +/// Concurrent target label interner. +#[derive(Default, Allocative)] +pub struct ConcurrentTargetLabelInterner { + table: ShardedLockFreeRawTable, +} + +impl Debug for ConcurrentTargetLabelInterner { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ConcurrentTargetLabelInterner") + .finish_non_exhaustive() + } +} + +impl PartialEq for ConcurrentTargetLabelInterner { + fn eq(&self, _other: &Self) -> bool { + true + } +} + +impl ConcurrentTargetLabelInterner { + pub fn intern(&self, target_label: TargetLabel) -> TargetLabel { + let hash = BuckHasherBuilder.hash_one(&target_label); + + if let Some(r) = self + .table + .lookup(hash, |entry_ref| entry_ref == target_label.arc_borrow()) + { + return r.to_owned(); + } + + let (entry, _) = self.table.insert( + hash, + target_label, + |a, b| a == b, + |a| BuckHasherBuilder.hash_one(a), + ); + entry.to_owned() + } +} + +#[cfg(test)] +mod tests { + use std::ptr; + + use crate::target::label::interner::ConcurrentTargetLabelInterner; + use crate::target::label::label::TargetLabel; + + #[test] + fn test_interner() { + let interner = ConcurrentTargetLabelInterner::default(); + + let label1 = interner.intern(TargetLabel::testing_parse("foo//:bar")); + let label2 = interner.intern(TargetLabel::testing_parse("foo//:bar")); + assert!(ptr::eq(label1.as_raw(), label2.as_raw())); + + // We would like to check refcount, but there's no public API for that. + } +} diff --git a/app/buck2_core/src/target/label/label.rs b/app/buck2_core/src/target/label/label.rs new file mode 100644 index 0000000000000..244639025c7b5 --- /dev/null +++ b/app/buck2_core/src/target/label/label.rs @@ -0,0 +1,327 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cmp::Ordering; +use std::fmt; +use std::fmt::Debug; +use std::hash::Hash; +use std::hash::Hasher; +use std::ptr; +use std::str; + +use allocative::Allocative; +use buck2_data::ToProtoMessage; +use buck2_util::hash::BuckHasher; +use dupe::Dupe; +use lock_free_hashtable::atomic_value::AtomicValue; +use ref_cast::ref_cast_custom; +use ref_cast::RefCastCustom; +use serde::Serialize; +use serde::Serializer; +use triomphe::ThinArc; + +use crate::cells::name::CellName; +use crate::cells::paths::CellRelativePath; +use crate::cells::CellAliasResolver; +use crate::cells::CellResolver; +use crate::configuration::data::ConfigurationData; +use crate::configuration::pair::Configuration; +use crate::configuration::pair::ConfigurationNoExec; +use crate::package::PackageLabel; +use crate::pattern::pattern::lex_target_pattern; +use crate::pattern::pattern::ParsedPattern; +use crate::pattern::pattern_type::TargetPatternExtra; +use crate::target::configured_target_label::ConfiguredTargetLabel; +use crate::target::label::triomphe_thin_arc_borrow::ThinArcBorrow; +use crate::target::name::TargetNameRef; + +#[derive(Eq, PartialEq, Allocative)] +struct TargetLabelHeader { + /// Hash of target label (not package, not name). + /// Place hash first to make equality check faster. + hash: u32, + pkg: PackageLabel, + // TODO(nga): this struct has 4 bytes of padding. +} + +/// 'TargetLabel' that uniquely maps to a 'target' +/// It contains a 'Package' which is the 'Package' defined by the build fine +/// that contains this 'target', and a 'name' which is a 'TargetName' +/// representing the target name given to the particular target. +#[derive(Clone, derive_more::Display, Eq, PartialEq, Allocative, RefCastCustom)] +#[display("{}", self.as_ref())] +#[repr(transparent)] +pub struct TargetLabel( + ThinArc< + TargetLabelHeader, + // `u8` type argument means `ThinArc` stores `[u8]` inline. + // We store string target name in that `[u8]`. + u8, + >, +); + +impl Debug for TargetLabel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TargetLabel") + .field("pkg", &self.pkg()) + .field("name", &self.name()) + .finish() + } +} + +impl Dupe for TargetLabel {} + +#[allow(clippy::derived_hash_with_manual_eq)] +impl Hash for TargetLabel { + #[inline] + fn hash(&self, state: &mut H) { + self.0.header.header.hash.hash(state); + } +} + +impl Ord for TargetLabel { + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + self.as_ref().cmp(&other.as_ref()) + } +} + +impl PartialOrd for TargetLabel { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl TargetLabel { + pub fn new(pkg: PackageLabel, name: &TargetNameRef) -> Self { + // TODO(nga): unnecessary to take `TargetName` by value. + + // Hash should be stable because it is used to generate the configuration hash. + let key = &(pkg.dupe(), &name); + let mut hasher = BuckHasher::default(); + key.hash(&mut hasher); + let hash = hasher.finish() as u32; + + TargetLabel(ThinArc::from_header_and_slice( + TargetLabelHeader { hash, pkg }, + name.as_str().as_bytes(), + )) + } + + #[ref_cast_custom] + fn ref_cast(arc: &ThinArc) -> &Self; + + #[inline] + pub fn pkg(&self) -> PackageLabel { + self.0.header.header.pkg.dupe() + } + + #[inline] + pub fn name(&self) -> &TargetNameRef { + let name = unsafe { str::from_utf8_unchecked(&self.0.slice) }; + TargetNameRef::unchecked_new(name) + } + + /// Creates a 'ConfiguredTargetLabel' from ['Self'] based on the provided + /// configuration. + #[inline] + pub fn configure(&self, cfg: ConfigurationData) -> ConfiguredTargetLabel { + self.configure_pair(Configuration::new(cfg, None)) + } + + /// Like `configure`, but forces the execution configuration too. + #[inline] + pub fn configure_with_exec( + &self, + cfg: ConfigurationData, + exec_cfg: ConfigurationData, + ) -> ConfiguredTargetLabel { + self.configure_pair(Configuration::new(cfg, Some(exec_cfg))) + } + + #[inline] + pub fn configure_pair(&self, cfg_pair: Configuration) -> ConfiguredTargetLabel { + ConfiguredTargetLabel { + target: self.dupe(), + cfg_pair, + } + } + + #[inline] + pub fn configure_pair_no_exec(&self, cfg: ConfigurationNoExec) -> ConfiguredTargetLabel { + self.configure_pair(cfg.cfg_pair().dupe()) + } + + #[inline] + pub fn as_ref(&self) -> TargetLabelRef { + TargetLabelRef::new(self.pkg(), self.name()) + } + + pub fn parse( + label: &str, + cell_name: CellName, + cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, + ) -> anyhow::Result { + let (target_label, TargetPatternExtra) = + ParsedPattern::::parse_precise( + label, + cell_name, + cell_resolver, + cell_alias_resolver, + )? + .as_literal(label)?; + Ok(target_label) + } + + fn into_raw(self) -> *const () { + ThinArc::into_raw(self.0) as *const () + } + + #[cfg(test)] + pub(crate) fn as_raw(&self) -> *const () { + ThinArc::as_ptr(&self.0) as *const () + } + + unsafe fn from_raw(raw: *const ()) -> Self { + TargetLabel(ThinArc::from_raw(raw as *const _)) + } + + pub(crate) fn arc_borrow(&self) -> TargetLabelBorrow { + TargetLabelBorrow { + borrow: ThinArcBorrow::borrow(&self.0), + } + } + + /// Simple and incorrect target label parser which can be used in tests. + pub fn testing_parse(target_label: &str) -> TargetLabel { + let parts = lex_target_pattern(target_label, false).expect("failed to parse"); + let cell_name = CellName::testing_new(parts.cell_alias.expect("must have cell name")); + + let pattern_data = parts + .pattern + .reject_ambiguity() + .expect("target label must be unambiguous"); + let (target_name, TargetPatternExtra) = + pattern_data.target().expect("target label must be precise"); + + TargetLabel::new( + PackageLabel::new( + cell_name, + CellRelativePath::new(pattern_data.package_path()), + ), + target_name, + ) + } +} + +impl Serialize for TargetLabel { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl ToProtoMessage for TargetLabel { + type Message = buck2_data::TargetLabel; + + fn as_proto(&self) -> Self::Message { + buck2_data::TargetLabel { + package: self.pkg().to_string(), + name: self.name().to_string(), + } + } +} + +#[derive( + Clone, + Dupe, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + derive_more::Display +)] +#[display("{}:{}", pkg, name)] +pub struct TargetLabelRef<'a> { + pkg: PackageLabel, + name: &'a TargetNameRef, +} + +impl<'a> TargetLabelRef<'a> { + #[inline] + pub fn new(pkg: PackageLabel, name: &'a TargetNameRef) -> TargetLabelRef<'a> { + TargetLabelRef { pkg, name } + } +} + +/// `TargetLabel` but without refcounter increment. +#[derive(Copy, Clone, Dupe)] +#[doc(hidden)] // `impl AtomicValue` is wants this to be public. +pub struct TargetLabelBorrow<'a> { + borrow: ThinArcBorrow<'a, TargetLabelHeader, u8>, +} + +impl<'a> TargetLabelBorrow<'a> { + /// Obtain a temporary reference to the `TargetLabel`. + fn with_target_label(self, mut f: impl FnMut(&TargetLabel) -> R) -> R { + self.borrow.with_arc(|arc| f(TargetLabel::ref_cast(arc))) + } + + /// Upgrade to `TargetLabel`. + pub(crate) fn to_owned(self) -> TargetLabel { + TargetLabel(self.borrow.to_owned()) + } + + pub(crate) unsafe fn from_raw(raw: *const ()) -> Self { + TargetLabelBorrow { + borrow: ThinArcBorrow::from_raw(raw), + } + } +} + +impl<'a> PartialEq for TargetLabelBorrow<'a> { + fn eq(&self, other: &Self) -> bool { + self.with_target_label(|a| other.with_target_label(|b| a == b)) + } +} + +impl<'a> Hash for TargetLabelBorrow<'a> { + fn hash(&self, state: &mut H) { + self.with_target_label(|a| a.hash(state)) + } +} + +impl AtomicValue for TargetLabel { + type Raw = *const (); + type Ref<'a> = TargetLabelBorrow<'a> where Self: 'a; + + fn null() -> Self::Raw { + ptr::null() + } + + fn is_null(this: Self::Raw) -> bool { + this.is_null() + } + + fn into_raw(this: Self) -> Self::Raw { + TargetLabel::into_raw(this) + } + + unsafe fn from_raw(raw: Self::Raw) -> Self { + TargetLabel::from_raw(raw) + } + + unsafe fn deref<'a>(raw: Self::Raw) -> Self::Ref<'a> { + TargetLabelBorrow::from_raw(raw) + } +} diff --git a/app/buck2_core/src/target/label/triomphe_thin_arc_borrow.rs b/app/buck2_core/src/target/label/triomphe_thin_arc_borrow.rs new file mode 100644 index 0000000000000..9b40eac06abe4 --- /dev/null +++ b/app/buck2_core/src/target/label/triomphe_thin_arc_borrow.rs @@ -0,0 +1,59 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::marker::PhantomData; +use std::mem::ManuallyDrop; +use std::ptr::NonNull; + +use dupe::Clone_; +use dupe::Copy_; +use dupe::Dupe_; + +/// Like `triomphe::ArcBorrow`, but for `triomphe::ThinArc`. +#[derive(Copy_, Clone_, Dupe_)] +pub(crate) struct ThinArcBorrow<'a, H, T> { + /// `ThinArc` without holding a reference counter. + ptr: NonNull<()>, + /// There's no `ThinArcBorrow`, use `ArcBorrow` because they are similar. + _marker: PhantomData>, +} + +impl<'a, H, T> ThinArcBorrow<'a, H, T> { + /// Borrow. + pub(crate) fn borrow(arc: &'a triomphe::ThinArc) -> ThinArcBorrow<'a, H, T> { + ThinArcBorrow { + ptr: NonNull::new(triomphe::ThinArc::as_ptr(arc) as *mut _).unwrap(), + _marker: PhantomData, + } + } + + /// Obtain a temporary reference to the `ThinArc`. + pub(crate) fn with_arc(self, mut f: impl FnMut(&triomphe::ThinArc) -> R) -> R { + // Tricky part: we create a `ThinArc` without incrementing the reference counter + // (which must be already >= 1 by the contract of `ThinArcBorrow`). + // And we put it into `ManuallyDrop` to prevent reference counter decrement. + unsafe { + let arc = ManuallyDrop::new(triomphe::ThinArc::from_raw(self.ptr.as_ptr() as *const _)); + f(&arc) + } + } + + /// Upgrade to `triomphe::ThinArc`. + pub(crate) fn to_owned(self) -> triomphe::ThinArc { + self.with_arc(|arc| arc.clone()) + } + + /// Create from a raw pointer produced by `triomphe::ThinArc::into_raw`. + pub(crate) unsafe fn from_raw(ptr: *const ()) -> Self { + ThinArcBorrow { + ptr: NonNull::new(ptr as *mut _).unwrap(), + _marker: PhantomData, + } + } +} diff --git a/app/buck2_core/src/target/mod.rs b/app/buck2_core/src/target/mod.rs deleted file mode 100644 index 435b98b99272f..0000000000000 --- a/app/buck2_core/src/target/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! A 'target' is an instance of a rule declared in the build file. Each -//! 'target' is a node on the 'static graph'. Targets are determined by parsing; -//! no extra analysis or building is necessary to determine the 'target's -//! available. -//! -//! For example, the below is a target, defined in some 'Package' with the given -//! name "foo". -//! -//!```ignored -//! java_library( -//! name = "foo", -//! srcs = [ ... ], -//! ... -//! ) -//! ``` -//! -//! Target names are limited to non-empty alpha numeric characters `,`, `=`, -//! `-`, `/`, and `_`. No other special characters, e.g. spaces, are allowed. -//! Currently, `+` is allow for backwards compatibility but may be removed. -//! -//! 'TargetLabel's are labels/keys that uniquely map to a 'target' in the static -//! graph. These are of the form `//:`. -//! e.g. `mycell//my/package/path:my_target`, where `mycell` is the cell, -//! `my/package/path` is the package, and `my_target` is the target name -//! belonging to the package. - -pub mod configured_target_label; -pub mod label; -pub mod name; diff --git a/app/buck2_core/src/target/name.rs b/app/buck2_core/src/target/name.rs index 74ea31254673a..e860e0e31eeac 100644 --- a/app/buck2_core/src/target/name.rs +++ b/app/buck2_core/src/target/name.rs @@ -35,7 +35,8 @@ pub const EQ_SIGN_SUBST: &str = "_eqsb_"; // TODO intern this? pub struct TargetName(ThinArcStr); -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] enum TargetNameError { #[error( "Invalid target name `{}`. Target names are non-empty strings and can only contain alpha numeric characters, and symbols \ @@ -63,9 +64,8 @@ impl TargetName { Ok(Self(ThinArcStr::from(name))) } - #[inline] - pub fn unchecked_new(name: &str) -> Self { - Self(ThinArcStr::from(name)) + pub fn testing_new(name: &str) -> Self { + TargetName::new(name).unwrap() } fn bad_name_error(name: &str) -> anyhow::Error { @@ -99,6 +99,7 @@ impl TargetName { soft_error!( "label_has_comma", TargetNameError::LabelHasSpecialCharacter(name.to_owned(), ',').into(), + deprecation: true, quiet: true )?; } @@ -106,6 +107,7 @@ impl TargetName { soft_error!( "label_has_dollar_sign", TargetNameError::LabelHasSpecialCharacter(name.to_owned(), '$').into(), + deprecation: true, quiet: true )?; } @@ -162,6 +164,7 @@ impl Deref for TargetName { pub struct TargetNameRef(str); impl TargetNameRef { + #[inline] pub fn new(name: &str) -> anyhow::Result<&TargetNameRef> { TargetName::verify(name)?; Ok(TargetNameRef::unchecked_new(name)) @@ -182,7 +185,7 @@ impl TargetNameRef { #[inline] pub fn to_owned(&self) -> TargetName { - TargetName::unchecked_new(&self.0) + TargetName(ThinArcStr::from(&self.0)) } } @@ -245,7 +248,7 @@ mod tests { assert_eq!( hash(TargetNameRef::unchecked_new("foo")), - hash(&TargetName::unchecked_new("foo")) + hash(&TargetName::testing_new("foo")) ); } } diff --git a/app/buck2_core/src/target/target_configured_target_label.rs b/app/buck2_core/src/target/target_configured_target_label.rs new file mode 100644 index 0000000000000..d736a05b01d9e --- /dev/null +++ b/app/buck2_core/src/target/target_configured_target_label.rs @@ -0,0 +1,70 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use dupe::Dupe; + +use crate::configuration::data::ConfigurationData; +use crate::package::PackageLabel; +use crate::target::configured_target_label::ConfiguredTargetLabel; +use crate::target::label::label::TargetLabel; + +/// A wrapper around a configured target label. +/// +/// The semantics of this type are exactly the same as that of a configured target label, except +/// with one distinction - when the target being referred to is a toolchain target, the label will +/// still only be configured with a target platform, not an exec platform. +/// +/// This is used to mark code which deals with configurations of toolchains, but does not actually +/// care about the toolchain's exec platform. +#[derive( + Clone, + Dupe, + Debug, + derive_more::Display, + Hash, + Eq, + PartialEq, + Ord, + PartialOrd, + allocative::Allocative +)] +pub struct TargetConfiguredTargetLabel(ConfiguredTargetLabel); + +impl TargetConfiguredTargetLabel { + pub fn new_without_exec_cfg(label: ConfiguredTargetLabel) -> Self { + Self(label.unconfigured().configure(label.cfg().dupe())) + } + + pub fn new_configure(label: &TargetLabel, cfg: ConfigurationData) -> Self { + Self(label.configure(cfg)) + } + + pub fn unconfigured(&self) -> &TargetLabel { + self.0.unconfigured() + } + + pub fn cfg(&self) -> &ConfigurationData { + self.0.cfg() + } + + pub fn pkg(&self) -> PackageLabel { + self.0.pkg() + } + + /// Sets the exec configuration. + /// + /// Should only be used with toolchain targets. + pub fn with_exec_cfg(&self, cfg: ConfigurationData) -> ConfiguredTargetLabel { + self.0.with_exec_cfg(cfg) + } + + pub fn inner(&self) -> &ConfiguredTargetLabel { + &self.0 + } +} diff --git a/app/buck2_core/tests/soft_error.rs b/app/buck2_core/tests/soft_error.rs index e53731680ccc2..631df540b051a 100644 --- a/app/buck2_core/tests/soft_error.rs +++ b/app/buck2_core/tests/soft_error.rs @@ -21,7 +21,7 @@ static RESULT: Mutex> = Mutex::new(Vec::new()); fn mock_handler( category: &str, - err: &anyhow::Error, + err: &buck2_error::Error, loc: (&str, u32, u32), options: StructuredErrorOptions, ) { @@ -57,7 +57,7 @@ fn test_soft_error() { let before_error_line = line!(); let _ignore_hard_error = soft_error!( "test_logged_soft_error", - anyhow::anyhow!("Should be logged") + anyhow::anyhow!("Should be logged").into() ); assert_eq!( Some(&format!( @@ -65,7 +65,7 @@ fn test_soft_error() { file!(), before_error_line + 1, )), - RESULT.lock().unwrap().get(0) + RESULT.lock().unwrap().first() ); } @@ -79,7 +79,7 @@ fn test_reset_counters() { assert_eq!(0, RESULT.lock().unwrap().len(), "Sanity check"); for _ in 0..100 { - let _ignore = soft_error!("test_reset_counters", anyhow::anyhow!("Message")); + let _ignore = soft_error!("test_reset_counters", anyhow::anyhow!("Message").into()); } assert_eq!( @@ -91,7 +91,7 @@ fn test_reset_counters() { reset_soft_error_counters(); for _ in 0..100 { - let _ignore = soft_error!("test_reset_counters", anyhow::anyhow!("Message")); + let _ignore = soft_error!("test_reset_counters", anyhow::anyhow!("Message").into()); } assert_eq!( diff --git a/app/buck2_critical_path/BUCK b/app/buck2_critical_path/BUCK index 885232c0cce34..08d50920b27b4 100644 --- a/app/buck2_critical_path/BUCK +++ b/app/buck2_critical_path/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -15,7 +14,7 @@ rust_library( "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:crossbeam", "fbsource//third-party/rust:derive_more", - "fbsource//third-party/rust:thiserror", + "//buck2/app/buck2_error:buck2_error", "//buck2/starlark-rust/starlark_map:starlark_map", ], ) diff --git a/app/buck2_critical_path/Cargo.toml b/app/buck2_critical_path/Cargo.toml index 12d0ead77b9d2..97af3c4ae298f 100644 --- a/app/buck2_critical_path/Cargo.toml +++ b/app/buck2_critical_path/Cargo.toml @@ -1,17 +1,19 @@ [package] +authors = ["Meta"] +description = "Critical path calculations for Buck2" +edition = "2021" +license = { workspace = true } name = "buck2_critical_path" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Critical path calculations for Buck2" -license = "MIT OR Apache-2.0" -authors = ["Meta"] [dependencies] anyhow = { workspace = true } -starlark_map = { workspace = true } -thiserror = { workspace = true } -derive_more = { workspace = true } crossbeam = { workspace = true } +derive_more = { workspace = true } +starlark_map = { workspace = true } + +buck2_error = { workspace = true } [dev-dependencies] rand = { workspace = true } diff --git a/app/buck2_critical_path/src/builder.rs b/app/buck2_critical_path/src/builder.rs index e5a0c8c5551a0..7bbb52a5bb8fa 100644 --- a/app/buck2_critical_path/src/builder.rs +++ b/app/buck2_critical_path/src/builder.rs @@ -12,7 +12,6 @@ use std::hash::Hash; use starlark_map::small_map::SmallMap; use starlark_map::Hashed; -use thiserror::Error; use crate::graph::Graph; use crate::graph::GraphVertex; @@ -20,7 +19,7 @@ use crate::types::VertexData; use crate::types::VertexId; use crate::types::VertexKeys; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum PushError { #[error("duplicate key: {key}")] DuplicateKey { key: K }, @@ -52,7 +51,7 @@ where pub fn push( &mut self, key: K, - deps: impl Iterator, + deps: impl IntoIterator, data: D, ) -> Result<(), PushError> { let idx: u32 = self @@ -116,7 +115,7 @@ where } #[cfg(test)] -mod test { +mod tests { use super::*; #[test] diff --git a/app/buck2_critical_path/src/graph.rs b/app/buck2_critical_path/src/graph.rs index 82f8b1c773663..71eaa675a0557 100644 --- a/app/buck2_critical_path/src/graph.rs +++ b/app/buck2_critical_path/src/graph.rs @@ -7,8 +7,6 @@ * of this source tree. */ -use thiserror::Error; - use crate::types::OptionalVertexId; use crate::types::VertexData; use crate::types::VertexId; @@ -27,7 +25,7 @@ pub struct Graph { impl Graph { #[inline] - pub fn iter_vertices(&self) -> impl Iterator + DoubleEndedIterator { + pub fn iter_vertices(&self) -> impl DoubleEndedIterator { self.vertices.keys() } @@ -237,7 +235,7 @@ impl Graph { } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum TopoSortError { #[error("cycle")] Cycle, @@ -258,7 +256,7 @@ impl AddEdges for VertexData { } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum AddEdgesError { #[error("overflow")] Overflow, @@ -281,7 +279,7 @@ pub struct PathCost { } #[cfg(test)] -mod test { +mod tests { use super::*; use crate::builder::GraphBuilder; use crate::test_utils::make_dag; diff --git a/app/buck2_critical_path/src/lib.rs b/app/buck2_critical_path/src/lib.rs index 5f6e1a1263d42..94ae455fe83e4 100644 --- a/app/buck2_critical_path/src/lib.rs +++ b/app/buck2_critical_path/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + mod builder; mod graph; mod potential; diff --git a/app/buck2_critical_path/src/potential.rs b/app/buck2_critical_path/src/potential.rs index 54aa051954e8b..fe22885959119 100644 --- a/app/buck2_critical_path/src/potential.rs +++ b/app/buck2_critical_path/src/potential.rs @@ -281,7 +281,7 @@ pub fn compute_critical_path_potentials( } #[cfg(test)] -mod test { +mod tests { use std::time::Instant; use rand::SeedableRng; diff --git a/app/buck2_critical_path/src/types.rs b/app/buck2_critical_path/src/types.rs index c28bf907e8935..ac6ae7cd03522 100644 --- a/app/buck2_critical_path/src/types.rs +++ b/app/buck2_critical_path/src/types.rs @@ -30,7 +30,7 @@ impl VertexKind for CriticalPathIndexKind {} /// The ID of a Vertex. This can be used to index into AbstractVertexData. Those IDs are given a /// kind so we don't confuse indices in a critical path with vertex indices in a graph. #[derive(Copy, Clone, Default, Ord, PartialOrd, PartialEq, Eq, Display, Hash)] -#[display(fmt = "{}", "self.0")] +#[display("{}", self.0)] pub struct AbstractVertexId(u32, PhantomData); impl AbstractVertexId @@ -135,13 +135,13 @@ where Self(v, PhantomData) } - pub fn keys(&self) -> impl Iterator> + DoubleEndedIterator { + pub fn keys(&self) -> impl DoubleEndedIterator> { // By construction the length of this is always less than the maximum vertex id. let len: u32 = self.0.len().try_into().unwrap(); (0..len).map(AbstractVertexId::new) } - pub fn iter(&self) -> impl Iterator, &T)> + DoubleEndedIterator { + pub fn iter(&self) -> impl DoubleEndedIterator, &T)> { self.keys().map(|k| (k, &self.0[k.0 as usize])) } @@ -208,7 +208,7 @@ where Self(v) } - pub fn iter(&self) -> impl Iterator, &K)> + DoubleEndedIterator { + pub fn iter(&self) -> impl DoubleEndedIterator, &K)> { self.0.iter().map(|(key, idx)| (*idx, key)) } diff --git a/app/buck2_daemon/BUCK b/app/buck2_daemon/BUCK new file mode 100644 index 0000000000000..500ffdcc32987 --- /dev/null +++ b/app/buck2_daemon/BUCK @@ -0,0 +1,54 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_daemon", + srcs = glob(["src/**/*.rs"]), + os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:nix", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:nix", + ], + ), + ( + "windows", + [ + "fbsource//third-party/rust:winapi", + ], + ), + ], + test_deps = [ + "//common/rust/shed/fbinit:fbinit", + ], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:libc", + "fbsource//third-party/rust:rand", + "fbsource//third-party/rust:serde_json", + "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tokio-stream", + "fbsource//third-party/rust:tracing", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_client_ctx:buck2_client_ctx", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_server:buck2_server", + "//buck2/app/buck2_util:buck2_util", + "//buck2/dice/dice:dice", + "//buck2/gazebo/dupe:dupe", + # @oss-disable: "//common/rust/gflags:gflags", + ], +) diff --git a/app/buck2_daemon/Cargo.toml b/app/buck2_daemon/Cargo.toml new file mode 100644 index 0000000000000..bec6a0860bc60 --- /dev/null +++ b/app/buck2_daemon/Cargo.toml @@ -0,0 +1,42 @@ +[package] +description = "" +edition = "2021" +name = "buck2_daemon" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +clap = { workspace = true } +futures = { workspace = true } +libc = { workspace = true } +rand = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tracing = { workspace = true } + +allocative = { workspace = true } +dice = { workspace = true } +dupe = { workspace = true } + +buck2_cli_proto = { workspace = true } +buck2_client_ctx = { workspace = true } +buck2_common = { workspace = true } +buck2_core = { workspace = true } +buck2_error = { workspace = true } +buck2_events = { workspace = true } +buck2_server = { workspace = true } +buck2_util = { workspace = true } + +[target.'cfg(unix)'.dependencies] +nix = { workspace = true } + +[target.'cfg(windows)'.dependencies] +winapi = { workspace = true } + +[dev-dependencies] +fbinit = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(buck_build)", "cfg(client_only)", "cfg(fbcode_build)"] } diff --git a/app/buck2_daemon/daemon_lifecycle.md b/app/buck2_daemon/daemon_lifecycle.md new file mode 100644 index 0000000000000..23f2bb529bd98 --- /dev/null +++ b/app/buck2_daemon/daemon_lifecycle.md @@ -0,0 +1,96 @@ +# buckd + +Buck runs a persistent daemon process (buckd) to reuse work between commands. +Most work is done by the daemon process. When executing a buck command, the +process running the command is a client to the buckd server. The buckd server +exposes a simple grpc service that the client uses to implement the various buck +commands. + +There's a small set of commands/arguments that don't require the daemon +(`buck help`, cli arg parse failures, `buck version`, ...), but most commands +will require it. + +For almost all commands, buck requires that the client and server are the same +version of buck and may restart buckd to ensure that's the case. + +# daemon process flow + +The daemon process is started with the (hidden) `buck daemon` command. + +The daemon process has a simple startup. It will first daemonize itself and +write its pid to a locked file "buckd.pid" in the "daemon directory" (a +directory in `$HOME/.buck` specific to that repository+output directory). The +file is locked exclusively by the daemon process until it exits. This means that +only a single daemon is allowed at a time. It redirects its stdout and stderr to +files in the daemon directory. + +The daemon then starts up the grpc DaemonApi server. Once that is running, it +will write the port it is running on (along with some other information) to the +"buckd.info" file in the daemon dir. Once that is done, the server is ready to +be used. + +There are 3 ways that the buckd process will shutdown: + +1. The grpc api includes a `kill()` call that will shutdown buckd. +2. buckd will periodically (every 100s or so) check the "buckd.pid" and + "buckd.info" files to ensure that they still match that buckd process. +3. If buckd hits a rust `panic()` the buckd process will exit + +# client connection and buckd startup + +When the client is processing a command that requires communicating with the +buckd server it will follow this approach: + +1. read the "buckd.info" file to get the port the grpc api is being served on +2. connect to the api on that port +3. send a `status()` request to get the version + +If there is an error during 1-3, or if there is a version mismatch the client +needs to (re)start the buck daemon. Otherwise, the client can continue as it now +has made a connection with a correctly versioned buckd. + +When the client is killing or starting the buckd process, it will grab an +exclusive lock on the "lifecycle.lock" file in the daemon directory to ensure +that multiple clients aren't racing with each other. + +To start/restart the buckd process, the client does: + +1. lock the "lifecycle.lock" file +2. send a kill command to the existing buckd +3. ensure the buckd process has exited (based on pid) +4. run a `buck daemon` command to start buckd +5. wait for the daemon to start up and the grpc server to be ready +6. release the "lifecycle.lock" file + +After that, it will repeat the connection steps (including verifying the version +after connecting). + +# buck kill and other daemon restarts + +If there are other invocations currently using the buck daemon when it is killed +or restarted by a client, those invocations will fail due to the early +disconnection. + +Generally, we support concurrent buck invocations using the same buck version, +but if there are concurrent invocations with different versions, they may +unexpectedly fail or otherwise work incorrectly. This is sufficient for the +normal buck workflow where the buckversion is checked into the repo, in that +case, it's not expected that buck commands will work across a rebase or other +operation that changes the buckversion. + +# correctness + +We have a couple of guarantees here. + +1. Only a single buckd is running at a time +2. Only a single client is killing/starting a buckd at a time +3. A client only uses a buckd connection after making sure it has a compatible + version + +The main way that we could run into issues would be if there are multiple +clients that are racing and they want different versions of buck. In that case, +one might cause the other two fail to connect to a buckd with the correct +version or one of the client's connections may be prematurely disconnected. A +client **will not** use a server with a mismatched version. While this is a +failure, no expected workflow would hit this case, all concurrent commands +should be using the same buck version. diff --git a/app/buck2_daemon/src/daemon.rs b/app/buck2_daemon/src/daemon.rs new file mode 100644 index 0000000000000..18811e06a7a17 --- /dev/null +++ b/app/buck2_daemon/src/daemon.rs @@ -0,0 +1,637 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; +use std::process; +use std::sync::Arc; +use std::time::Duration; + +use allocative::Allocative; +use anyhow::Context as _; +use buck2_cli_proto::DaemonProcessInfo; +use buck2_client_ctx::daemon_constraints::gen_daemon_constraints; +use buck2_client_ctx::version::BuckVersion; +use buck2_common::buckd_connection::ConnectionType; +use buck2_common::daemon_dir::DaemonDir; +use buck2_common::init::DaemonStartupConfig; +use buck2_common::invocation_paths::InvocationPaths; +use buck2_common::memory; +use buck2_core::buck2_env_anyhow; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::logging::LogConfigurationReloadHandle; +use buck2_server::daemon::daemon_tcp::create_listener; +use buck2_server::daemon::server::BuckdServer; +use buck2_server::daemon::server::BuckdServerDelegate; +use buck2_server::daemon::server::BuckdServerInitPreferences; +use buck2_util::threads::thread_spawn; +use buck2_util::tokio_runtime::new_tokio_runtime; +use dice::DetectCycles; +use dice::WhichDice; +use futures::channel::mpsc; +use futures::channel::mpsc::UnboundedSender; +use futures::pin_mut; +use futures::select; +use futures::FutureExt; +use futures::StreamExt; +use rand::Rng; +use tokio::runtime::Builder; + +use crate::daemon_lower_priority::daemon_lower_priority; +use crate::schedule_termination::maybe_schedule_termination; + +#[derive(Debug, buck2_error::Error)] +enum DaemonError { + #[error("The buckd pid file at `{}` had a mismatched pid, expected `{1}`, got `{2}`", _0.display())] + PidFileMismatch(PathBuf, u32, u32), +} + +/// Start or run buck daemon. +/// +/// This is an internal command, not intended to be used directly. +/// Buck client invokes it to spawn a server process. +#[derive(Clone, Debug, clap::Parser)] +pub struct DaemonCommand { + /// Sets the interval for how often the daemon performs consistency checks. + /// These are used to ensure that the daemon is still the one referenced + /// by files in the daemon dir. + #[clap(long, default_value("60"))] + checker_interval_seconds: u64, + /// Run buck daemon but do not daemonize the process. + #[clap(long)] + dont_daemonize: bool, + /// This flag is set to prevent infinite recursion when the process is restarted + /// with lower priority. + #[clap(long)] + skip_macos_qos: bool, + /// Early configs that the daemon needs at startup. Those are read by the client then passed to + /// the daemon. The client will restart the daemon if they mismatch. + #[clap(value_parser = DaemonStartupConfig::deserialize)] + daemon_startup_config: DaemonStartupConfig, + + #[clap(env("ENABLE_TRACE_IO"), long)] + enable_trace_io: bool, + + /// If passed a given materializer identity, if the materializer state DB matches that + /// identity, the daemon will not use it and will instead create a new empty materializer + /// state. + #[clap(long)] + reject_materializer_state: Option, +} + +impl DaemonCommand { + /// Command instance for `--no-buckd`. + pub(crate) fn new_in_process(daemon_startup_config: DaemonStartupConfig) -> DaemonCommand { + DaemonCommand { + checker_interval_seconds: 60, + dont_daemonize: true, + skip_macos_qos: true, + daemon_startup_config, + enable_trace_io: false, + reject_materializer_state: None, + } + } +} + +pub(crate) fn init_listener() -> anyhow::Result<(std::net::TcpListener, ConnectionType)> { + let (endpoint, listener) = create_listener()?; + + tracing::info!("Listener created on {}", &endpoint); + + Ok((listener, endpoint)) +} + +pub(crate) fn write_process_info( + daemon_dir: &DaemonDir, + process_info: &DaemonProcessInfo, +) -> anyhow::Result<()> { + let file = File::create(daemon_dir.buckd_info())?; + serde_json::to_writer(&file, &process_info)?; + Ok(()) +} + +fn verify_current_daemon(daemon_dir: &DaemonDir) -> anyhow::Result<()> { + let file = daemon_dir.buckd_pid(); + let my_pid = process::id(); + + let recorded_pid: u32 = fs_util::read_to_string(&file)?.trim().parse()?; + if recorded_pid != my_pid { + return Err( + DaemonError::PidFileMismatch(file.into_path_buf(), my_pid, recorded_pid).into(), + ); + } + + Ok(()) +} + +fn gen_auth_token() -> String { + (0..20) + .map(|_| rand::thread_rng().gen_range('a'..='z')) + .collect() +} + +fn terminate_on_panic() { + let orig_hook = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |panic_info| { + orig_hook(panic_info); + // We are using `_exit` instead of `exit` to avoid running global destructors. + // This is similar to what default rust panic handler does + // when there `panic=abort`: it does `abort`. + unsafe { libc::_exit(1) } + })); +} + +fn verify_buck_out_dir(paths: &InvocationPaths) -> anyhow::Result<()> { + let path = paths.buck_out_path(); + fs_util::create_dir_all(path.clone())?; + + const CACHEDIR_TAG_CONTENTS: &str = r#"Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by Buck2. +# For information about cache directory tags, see: +# http://www.brynosaurus.com/cachedir/ +"#; + + if let Some(mut file) = + fs_util::create_file_if_not_exists(path.join(ForwardRelativePath::new("CACHEDIR.TAG")?))? + { + file.write_all(CACHEDIR_TAG_CONTENTS.as_bytes())?; + } + + Ok(()) +} + +impl DaemonCommand { + fn run( + self, + log_reload_handle: Arc, + paths: InvocationPaths, + in_process: bool, + listener_created: impl FnOnce() + Send, + ) -> anyhow::Result<()> { + // NOTE: Do not create any threads before this point. + // Daemonize does not preserve threads. + + let server_init_ctx = BuckdServerInitPreferences { + detect_cycles: buck2_env_anyhow!("DICE_DETECT_CYCLES_UNSTABLE", type=DetectCycles)?, + which_dice: buck2_env_anyhow!("WHICH_DICE_UNSTABLE", type=WhichDice)?, + enable_trace_io: self.enable_trace_io, + reject_materializer_state: self.reject_materializer_state.map(|s| s.into()), + daemon_startup_config: self.daemon_startup_config, + }; + + let span = tracing::info_span!("daemon_listener"); + let span_guard = span.enter(); + + let daemon_dir = paths.daemon_dir()?; + let pid_path = daemon_dir.buckd_pid(); + let stdout_path = daemon_dir.buckd_stdout(); + let stderr_path = daemon_dir.buckd_stderr(); + // Even if we don't redirect output, we still need to create stdout/stderr files, + // because tailer opens them. This is untidy. + let stdout = File::create(stdout_path)?; + let stderr = File::create(stderr_path)?; + + let auth_token = gen_auth_token(); + + let (listener, process_info, endpoint) = if !self.dont_daemonize { + // We must create stdout/stderr before creating a listener, + // otherwise it is race: + // * daemon parent process exits + // * client successfully connects to the unix socket + // * but stdout/stderr may be not yet created, so tailer fails to open them + let (listener, endpoint) = init_listener()?; + + Self::daemonize(stdout, stderr)?; + + fs_util::write(&pid_path, format!("{}", process::id()))?; + + let pid = process::id(); + let process_info = DaemonProcessInfo { + pid: pid as i64, + endpoint: endpoint.to_string(), + version: BuckVersion::get().unique_id().to_owned(), + auth_token, + }; + + // TODO(nga): this code is executed after server daemonization, + // so client has to retry to read it. Fix it. + write_process_info(&daemon_dir, &process_info)?; + + tracing::info!("Daemonized."); + + (listener, process_info, endpoint) + } else { + fs_util::write(&pid_path, format!("{}", process::id()))?; + + if !in_process { + Self::redirect_output(stdout, stderr)?; + } + + let (listener, endpoint) = init_listener()?; + + let process_info = DaemonProcessInfo { + pid: process::id() as i64, + endpoint: endpoint.to_string(), + version: BuckVersion::get().unique_id().to_owned(), + auth_token, + }; + + write_process_info(&daemon_dir, &process_info)?; + + (listener, process_info, endpoint) + }; + + tracing::info!("Starting Buck2 daemon"); + tracing::info!("Version: {}", BuckVersion::get_version()); + tracing::info!("PID: {}", process::id()); + tracing::info!("ID: {}", *buck2_events::daemon_id::DAEMON_UUID); + tracing::info!("Endpoint: {}", endpoint); + + listener_created(); + + terminate_on_panic(); + + maybe_schedule_termination()?; + + // Higher performance for jemalloc, recommended (but may not have any effect on Mac) + // https://github.com/jemalloc/jemalloc/blob/dev/TUNING.md#notable-runtime-options-for-performance-tuning + memory::enable_background_threads()?; + + let fb = buck2_common::fbinit::get_or_init_fbcode_globals(); + + if cfg!(target_os = "linux") { + #[cfg(fbcode_build)] + { + gflags::set_gflag_value( + fb, + "cgroup2_reader_update_interval_ms", + gflags::GflagValue::U32(2000), + ) + .expect("failed to set gflag --cgroup2_reader_update_interval_ms"); + } + } + + // Unfortunately, buck-out doesn't really have a well-defined place/time at which it creates + // the buck-out dir, instead just creating it whenever it first wants to write something to + // it. We don't want to create it unconditionally on all commands as there are lots of + // client commands (help, log, etc.) that should not require a buck-out just to be able to + // run. However, at the point at which we're starting a daemon it does seem sensible to now + // ensure that it always exists, primarily so that we can put a file into it to mark it as a + // cachedir. + verify_buck_out_dir(&paths)?; + + let mut builder = new_tokio_runtime("buck2-rt"); + builder.enable_all(); + + if let Some(threads) = buck2_env_anyhow!("BUCK2_RUNTIME_THREADS", type=usize)? { + builder.worker_threads(threads); + } + + if let Some(threads) = buck2_env_anyhow!("BUCK2_MAX_BLOCKING_THREADS", type=usize)? { + builder.max_blocking_threads(threads); + } + + tracing::info!("Starting tokio runtime..."); + + let rt = builder.build().context("Error creating Tokio runtime")?; + let handle = rt.handle().clone(); + + let rt = new_tokio_runtime("buck2-tn") + .enable_all() + // These values are arbitrary, but I/O shouldn't take up many threads. + .worker_threads(2) + .max_blocking_threads(2) + .build() + .context("Error creating Tonic Tokio runtime")?; + + rt.block_on(async move { + // Once any item is received on the hard_shutdown_receiver, the daemon process will exit immediately. + let (hard_shutdown_sender, mut hard_shutdown_receiver) = mpsc::unbounded(); + + #[derive(Allocative)] + struct Delegate { + #[allocative(skip)] + hard_shutdown_sender: UnboundedSender, + } + + impl BuckdServerDelegate for Delegate { + fn force_shutdown_with_timeout(&self, reason: String, timeout: Duration) { + let sender = self.hard_shutdown_sender.clone(); + tokio::spawn(async move { + tokio::time::sleep(timeout).await; + sender.unbounded_send(reason).expect("Shouldn't happen."); + }); + } + } + + let delegate = Box::new(Delegate { + hard_shutdown_sender: hard_shutdown_sender.clone(), + }); + let daemon_dir = paths.daemon_dir()?; + + listener.set_nonblocking(true)?; + let listener = tokio::net::TcpListener::from_std(listener)?; + let listener = tokio_stream::wrappers::TcpListenerStream::new(listener); + + tracing::info!("Listening."); + + drop(span_guard); + + let daemon_constraints = + gen_daemon_constraints(&server_init_ctx.daemon_startup_config)?; + + let buckd_server = BuckdServer::run( + fb, + log_reload_handle, + paths, + delegate, + server_init_ctx, + process_info, + daemon_constraints, + Box::pin(listener), + handle, + ) + .fuse(); + let shutdown_future = async move { hard_shutdown_receiver.next().await }.fuse(); + pin_mut!(buckd_server); + pin_mut!(shutdown_future); + + let checker_interval_seconds = self.checker_interval_seconds; + + thread_spawn("check-daemon-dir", move || { + Self::check_daemon_dir_thread( + checker_interval_seconds, + daemon_dir, + hard_shutdown_sender, + ) + })?; + + tracing::info!("Initialization complete, running the server."); + + select! { + res = buckd_server => { + tracing::info!("server shutdown"); + res + } + reason = shutdown_future => { + let reason = reason.as_deref().unwrap_or("no reason available"); + tracing::info!("server forced shutdown: {}", reason); + anyhow::Ok(()) + }, + } + }) + } + + /// We start a dedicated thread to periodically check that the files in the daemon + /// dir still reflect that we are the current buckd and verify that when you connect + /// to the server it is our server. + /// It gets a dedicated thread so that if somehow the main runtime gets all jammed up, + /// this will still run (and presumably connecting to the server or our request would + /// then fail and we'd do a hard shutdown). + fn check_daemon_dir_thread( + checker_interval_seconds: u64, + daemon_dir: DaemonDir, + hard_shutdown_sender: UnboundedSender, + ) { + let this_rt = Builder::new_current_thread().enable_all().build().unwrap(); + + this_rt.block_on(async move { + loop { + tokio::time::sleep(Duration::from_secs(checker_interval_seconds)).await; + match verify_current_daemon(&daemon_dir) { + Ok(()) => {} + Err(e) => { + // This bit of code cannot relay errors, ignoring that we can't log + // a warning is reasonable. + let _ignored = buck2_client_ctx::eprintln!( + "daemon verification failed, forcing shutdown: {:#}", + e + ); + + // If this is already shutting down, we don't need to do it again. + let _ignored = hard_shutdown_sender + .unbounded_send("Daemon verification failed".to_owned()); + } + }; + } + }) + } + + pub fn exec( + self, + log_reload_handle: Arc, + paths: InvocationPaths, + in_process: bool, + listener_created: impl FnOnce() + Send, + ) -> anyhow::Result<()> { + daemon_lower_priority(self.skip_macos_qos)?; + + let project_root = paths.project_root(); + let daemon_dir = paths.daemon_dir()?; + + if !daemon_dir.path.is_dir() { + fs_util::create_dir_all(&daemon_dir.path)?; + } + + // TODO(nga): this breaks relative paths in `--no-buckd`. + // `--no-buckd` should capture correct directories earlier. + // Or even better, client should set current directory to project root, + // and resolve all paths relative to original cwd. + fs_util::set_current_dir(project_root.root())?; + + self.run(log_reload_handle, paths, in_process, listener_created)?; + Ok(()) + } + + #[cfg(unix)] + fn redirect_output(stdout: File, stderr: File) -> anyhow::Result<()> { + use std::os::unix::io::AsRawFd; + + nix::unistd::dup2(stdout.as_raw_fd(), nix::libc::STDOUT_FILENO)?; + nix::unistd::dup2(stderr.as_raw_fd(), nix::libc::STDERR_FILENO)?; + Ok(()) + } + + #[cfg(windows)] + fn redirect_output(stdout: File, stderr: File) -> anyhow::Result<()> { + use std::os::windows::io::AsRawHandle; + + unsafe { + let stdout_fd = libc::open_osfhandle(stdout.as_raw_handle() as isize, libc::O_RDWR); + let stderr_fd = libc::open_osfhandle(stderr.as_raw_handle() as isize, libc::O_RDWR); + if stdout_fd == -1 || stderr_fd == -1 { + return Err(anyhow::Error::msg( + "Can't get file descriptors for output files", + )); + } + // MSVC libc doesn't export STDOUT_FILENO and STDERR_FILENO. + let stdout_exit_code = libc::dup2(stdout_fd, 1); + let stderr_exit_code = libc::dup2(stderr_fd, 2); + if stdout_exit_code == -1 || stderr_exit_code == -1 { + return Err(anyhow::Error::msg("Failed to redirect daemon output")); + } + } + Ok(()) + } + + #[cfg(unix)] + fn daemonize(stdout: File, stderr: File) -> anyhow::Result<()> { + // TODO(cjhopman): Daemonize is pretty un-maintained. We may need to move + // to something else or just do it ourselves. + let daemonize = crate::daemonize::Daemonize::new() + .stdout(stdout) + .stderr(stderr); + daemonize.start()?; + Ok(()) + } + + #[cfg(windows)] + /// Restart current process in detached mode with '--dont-daemonize' flag. + fn daemonize(_stdout: File, _stderr: File) -> anyhow::Result<()> { + Err(anyhow::anyhow!("Cannot daemonize on Windows")) + } +} + +#[cfg(test)] +mod tests { + use std::process; + use std::time::Duration; + + use allocative::Allocative; + use anyhow::Context; + use buck2_cli_proto::DaemonProcessInfo; + use buck2_cli_proto::KillRequest; + use buck2_cli_proto::PingRequest; + use buck2_client_ctx::daemon::client::connect::new_daemon_api_client; + use buck2_client_ctx::daemon_constraints::gen_daemon_constraints; + use buck2_common::init::DaemonStartupConfig; + use buck2_common::invocation_paths::InvocationPaths; + use buck2_common::invocation_roots::InvocationRoots; + use buck2_core::fs::paths::file_name::FileNameBuf; + use buck2_core::fs::project::ProjectRootTemp; + use buck2_core::logging::LogConfigurationReloadHandle; + use buck2_server::daemon::daemon_tcp::create_listener; + use buck2_server::daemon::server::BuckdServer; + use buck2_server::daemon::server::BuckdServerDelegate; + use buck2_server::daemon::server::BuckdServerInitPreferences; + use dupe::Dupe; + use rand::RngCore; + use rand::SeedableRng; + use tokio::runtime::Handle; + + // `fbinit_tokio` is not on crates, so we cannot use `#[fbinit::test]`. + #[tokio::test] + async fn test_daemon_smoke() { + // TODO(nga): this should be `fbinit::perform_init`, but it is not on crates yet. + let fbinit = unsafe { fbinit::assume_init() }; + + buck2_core::client_only::CLIENT_ONLY_VAL.init(false); + + let project_root = ProjectRootTemp::new().unwrap(); + + let (endpoint, listener) = create_listener().unwrap(); + listener.set_nonblocking(true).unwrap(); + let listener = tokio::net::TcpListener::from_std(listener).unwrap(); + let listener = tokio_stream::wrappers::TcpListenerStream::new(listener); + + let invocation_paths = InvocationPaths { + roots: InvocationRoots { + cell_root: project_root.path().root().to_buf(), + project_root: project_root.path().dupe(), + }, + isolation: FileNameBuf::try_from("v2".to_owned()).unwrap(), + }; + + #[derive(Allocative)] + struct Delegate; + + impl BuckdServerDelegate for Delegate { + fn force_shutdown_with_timeout(&self, _reason: String, _timeout: Duration) {} + } + + let process_info = DaemonProcessInfo { + endpoint: endpoint.to_string(), + pid: process::id() as i64, + version: "13.17.19".to_owned(), + auth_token: "abc".to_owned(), + }; + + let handle = tokio::spawn(BuckdServer::run( + fbinit, + ::noop(), + invocation_paths, + Box::new(Delegate), + BuckdServerInitPreferences { + detect_cycles: None, + which_dice: None, + enable_trace_io: false, + reject_materializer_state: None, + daemon_startup_config: DaemonStartupConfig::testing_empty(), + }, + process_info.clone(), + gen_daemon_constraints(&DaemonStartupConfig::testing_empty()).unwrap(), + Box::pin(listener), + Handle::current(), + )); + + let mut client = new_daemon_api_client(endpoint.clone(), process_info.auth_token) + .await + .unwrap(); + + client.ping(PingRequest::default()).await.unwrap(); + + let mut client_with_wrong_token = new_daemon_api_client(endpoint, "wrong_token".to_owned()) + .await + .unwrap(); + + let err = format!( + "{:#}", + client_with_wrong_token + .ping(PingRequest::default()) + .await + .unwrap_err() + ); + assert!(err.contains("invalid auth token"), "Error is: {}", err); + + client.ping(PingRequest::default()).await.unwrap(); + + for req_size in [0, 1 << 10, 1 << 20, 10 << 20, 100 << 20] { + let mut payload = vec![0; req_size]; + rand::rngs::SmallRng::seed_from_u64(20).fill_bytes(&mut payload); + client + .ping(PingRequest { + payload, + ..PingRequest::default() + }) + .await + .context(format!("req_size={}", req_size)) + .unwrap(); + } + + for resp_size in [0, 1 << 10, 1 << 20, 10 << 20, 100 << 20] { + client + .ping(PingRequest { + response_payload_size: resp_size, + ..PingRequest::default() + }) + .await + .context(format!("resp_size={}", resp_size)) + .unwrap(); + } + + client.kill(KillRequest::default()).await.unwrap(); + + handle + .await + .expect("handle join failed") + .expect("daemon returned error"); + } +} diff --git a/app/buck2/src/commands/daemon_lower_priority.rs b/app/buck2_daemon/src/daemon_lower_priority.rs similarity index 95% rename from app/buck2/src/commands/daemon_lower_priority.rs rename to app/buck2_daemon/src/daemon_lower_priority.rs index f27790e2ad6fc..5dec51ecc25d4 100644 --- a/app/buck2/src/commands/daemon_lower_priority.rs +++ b/app/buck2_daemon/src/daemon_lower_priority.rs @@ -7,7 +7,7 @@ * of this source tree. */ -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; /// Buck2 sets priority class = utility on macOS. /// @@ -15,11 +15,8 @@ use buck2_core::env_helper::EnvHelper; /// /// To experiment with other priority classes, set this variable to `true`, /// and start `buck2` daemon like `taskpolicy -c utility buck2 ...`. -static DISABLE_MACOS_QOS: EnvHelper = EnvHelper::new("BUCK2_DISABLE_MACOS_QOS"); - fn enable_macos_qos() -> anyhow::Result { - let disable_qos = DISABLE_MACOS_QOS.get_copied()?.unwrap_or(false); - Ok(!disable_qos) + Ok(!buck2_env_anyhow!("BUCK2_DISABLE_MACOS_QOS", bool)?) } pub(crate) fn daemon_lower_priority(skip_macos_qos_flag: bool) -> anyhow::Result<()> { @@ -150,7 +147,7 @@ fn do_lower_priority() -> anyhow::Result<()> { } } - #[derive(Debug, thiserror::Error)] + #[derive(Debug, buck2_error::Error)] #[error("`posix_spawnp` with `POSIX_SPAWN_SETEXEC` flag should not return on success.")] struct Unreachable; diff --git a/app/buck2/src/commands/daemonize.rs b/app/buck2_daemon/src/daemonize.rs similarity index 98% rename from app/buck2/src/commands/daemonize.rs rename to app/buck2_daemon/src/daemonize.rs index cf263bf2c5f35..14d184337e30a 100644 --- a/app/buck2/src/commands/daemonize.rs +++ b/app/buck2_daemon/src/daemonize.rs @@ -19,7 +19,6 @@ use std::fmt; use std::fs::File; use std::os::unix::io::AsRawFd; -use std::process::exit; use dupe::Dupe; @@ -73,7 +72,6 @@ enum Outcome { /// * change root directory; /// * change the pid-file ownership to provided user (and/or) group; /// * execute any provided action just before dropping privileges. -/// pub(crate) struct Daemonize { stdin: Stdio, stdout: Stdio, @@ -122,7 +120,7 @@ impl Daemonize { /// result to the child. pub(crate) fn start(self) -> anyhow::Result<()> { match self.execute() { - Outcome::Parent(Ok(_)) => exit(0), + Outcome::Parent(Ok(_)) => unsafe { libc::_exit(0) }, Outcome::Parent(Err(err)) => Err(err), Outcome::Child(Ok(())) => Ok(()), Outcome::Child(Err(err)) => Err(err), @@ -151,7 +149,7 @@ impl Daemonize { libc::umask(0o022); if perform_fork()?.is_some() { - exit(0) + libc::_exit(0) }; redirect_standard_streams(self.stdin, self.stdout, self.stderr)?; @@ -202,6 +200,7 @@ type Errno = libc::c_int; /// This error type for `Daemonize` `start` method. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Dupe)] +#[allow(dead_code)] struct Error { kind: ErrorKind, } diff --git a/app/buck2_daemon/src/lib.rs b/app/buck2_daemon/src/lib.rs new file mode 100644 index 0000000000000..ece69ca140a53 --- /dev/null +++ b/app/buck2_daemon/src/lib.rs @@ -0,0 +1,17 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] +#![feature(used_with_arg)] + +pub mod daemon; +mod daemon_lower_priority; +mod daemonize; +pub mod no_buckd; +mod schedule_termination; diff --git a/app/buck2/src/no_buckd.rs b/app/buck2_daemon/src/no_buckd.rs similarity index 87% rename from app/buck2/src/no_buckd.rs rename to app/buck2_daemon/src/no_buckd.rs index f7a46b44c69ab..8ba4608310a03 100644 --- a/app/buck2/src/no_buckd.rs +++ b/app/buck2_daemon/src/no_buckd.rs @@ -7,26 +7,21 @@ * of this source tree. */ -use std::thread; - use anyhow::Context; -use buck2_client::commands::kill::kill_command_impl; use buck2_client_ctx::daemon::client::connect::buckd_startup_timeout; +use buck2_client_ctx::daemon::client::kill::kill_command_impl; use buck2_client_ctx::daemon::client::BuckdLifecycleLock; use buck2_client_ctx::startup_deadline::StartupDeadline; +use buck2_common::init::DaemonStartupConfig; use buck2_common::invocation_paths::InvocationPaths; -use buck2_common::legacy_configs::init::DaemonStartupConfig; use buck2_core::logging::LogConfigurationReloadHandle; -use fbinit::FacebookInit; +use buck2_util::threads::thread_spawn; -use crate::commands::daemon::DaemonCommand; -use crate::DaemonBeforeSubcommandOptions; +use crate::daemon::DaemonCommand; -pub(crate) fn start_in_process_daemon( - init: FacebookInit, +pub fn start_in_process_daemon( daemon_startup_config: &DaemonStartupConfig, paths: InvocationPaths, - daemon_opts: DaemonBeforeSubcommandOptions, runtime: &tokio::runtime::Runtime, ) -> anyhow::Result anyhow::Result<()> + Send + Sync>>> { let daemon_dir = paths.daemon_dir()?; @@ -49,13 +44,11 @@ pub(crate) fn start_in_process_daemon( Ok(Some(Box::new(move || { let (tx, rx) = std::sync::mpsc::channel(); // Spawn a thread which runs the daemon. - thread::spawn(move || { + thread_spawn("buck2-no-buckd", move || { let tx_clone = tx.clone(); let result = DaemonCommand::new_in_process(daemon_startup_config).exec( - init, ::noop(), paths, - daemon_opts, true, move || drop(tx_clone.send(Ok(()))), ); @@ -74,7 +67,7 @@ pub(crate) fn start_in_process_daemon( )), } } - }); + })?; // Wait for listener to start (or to fail). match rx.recv() { Ok(r) => r, diff --git a/app/buck2_daemon/src/schedule_termination.rs b/app/buck2_daemon/src/schedule_termination.rs new file mode 100644 index 0000000000000..b97c7fefa320e --- /dev/null +++ b/app/buck2_daemon/src/schedule_termination.rs @@ -0,0 +1,80 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::thread; +use std::time::Duration; + +use buck2_core::buck2_env_anyhow; +use buck2_util::process_stats::process_cpu_time_us; +use buck2_util::threads::thread_spawn; + +fn elapsed_cpu_time_as_percents( + cpu_time_before_us: Option, + cpu_time_after_us: Option, + duration_seconds: u64, +) -> Option { + let cpu_time_before = cpu_time_before_us?; + let cpu_time_after = cpu_time_after_us?; + let elapsed_cpu_time_us = cpu_time_after.checked_sub(cpu_time_before)?; + let elapsed_cpu_time_us_avg_per_second = elapsed_cpu_time_us.checked_div(duration_seconds)?; + elapsed_cpu_time_us_avg_per_second.checked_div(1_000_000 / 100) +} + +/// Our tests sometimes don't exit Buck 2 cleanly, and they might not get an oppportunity to do so +/// if they are terminated. This allows the daemon to self-destruct. +pub(crate) fn maybe_schedule_termination() -> anyhow::Result<()> { + if let Some(duration) = + buck2_env_anyhow!("BUCK2_TERMINATE_AFTER", type=u64, applicability=testing)? + { + thread_spawn("buck2-terminate-after", move || { + const MEASURE_CPU_TIME_FOR: u64 = 10; + let (sleep_before, sleep_after) = match duration.checked_sub(MEASURE_CPU_TIME_FOR) { + Some(sleep_before) => (sleep_before, MEASURE_CPU_TIME_FOR), + None => (0, duration), + }; + + thread::sleep(Duration::from_secs(sleep_before)); + let process_cpu_time_us_before = process_cpu_time_us(); + thread::sleep(Duration::from_secs(sleep_after)); + let process_cpu_time_us_after = process_cpu_time_us(); + + let elapsed_cpu_time_avg_in_percents = elapsed_cpu_time_as_percents( + process_cpu_time_us_before, + process_cpu_time_us_after, + sleep_after, + ); + if let Some(elapsed_cpu_time_avg_in_percents) = elapsed_cpu_time_avg_in_percents { + panic!( + "Buck is exiting after {}s elapsed; avg process CPU in the last {}s is {}%", + duration, sleep_after, elapsed_cpu_time_avg_in_percents + ); + } else { + panic!("Buck is exiting after {}s elapsed", duration); + } + })?; + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::schedule_termination::elapsed_cpu_time_as_percents; + + #[test] + fn test_elapsed_cpu_time_as_percents() { + // 12 seconds wall time + // 6 seconds of CPU time + // equivalent to 50% CPU usage + assert_eq!( + Some(50), + elapsed_cpu_time_as_percents(Some(1_000_123), Some(7_000_123), 12) + ); + } +} diff --git a/app/buck2_data/BUCK b/app/buck2_data/BUCK index 4017dbad9dd14..99f7da642b95e 100644 --- a/app/buck2_data/BUCK +++ b/app/buck2_data/BUCK @@ -1,5 +1,6 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") +load(":export_file_with.bzl", "export_file_with") oncall("build_infra") @@ -7,8 +8,10 @@ rust_protobuf_library( name = "buck2_data", srcs = glob(["src/**/*.rs"]), build_script = "build.rs", - doctests = False, # FIXME - protos = ["data.proto"], + protos = [ + "data.proto", + "error.proto", + ], deps = [ "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:hex", @@ -20,3 +23,18 @@ rust_protobuf_library( "//buck2/gazebo/gazebo:gazebo", ], ) + +export_file_with( + name = "data_proto", + src = "data.proto", + attach = ["error.proto"], + visibility = ["PUBLIC"], +) + +export_file( + name = "error.proto", +) + +export_file( + name = "data.proto", +) diff --git a/app/buck2_data/Cargo.toml b/app/buck2_data/Cargo.toml index 5bbfce3ce39ed..99bc028e9120b 100644 --- a/app/buck2_data/Cargo.toml +++ b/app/buck2_data/Cargo.toml @@ -1,20 +1,21 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_data" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] +allocative = { workspace = true } derive_more = { workspace = true } hex = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } serde = { workspace = true } tonic = { workspace = true } -allocative = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } [build-dependencies] buck2_protoc_dev = { workspace = true } -tonic-build = { workspace = true } diff --git a/app/buck2_data/build.rs b/app/buck2_data/build.rs index c5ad7320c54ce..8d86182a3a9e5 100644 --- a/app/buck2_data/build.rs +++ b/app/buck2_data/build.rs @@ -10,7 +10,7 @@ use std::io; fn main() -> io::Result<()> { - let proto_files = &["data.proto"]; + let proto_files = &["data.proto", "error.proto"]; buck2_protoc_dev::configure() .setup_protoc() @@ -79,6 +79,10 @@ fn main() -> io::Result<()> { "buck.data.ActionExecutionEnd.error", "#[derive(::derive_more::From, ::gazebo::variants::VariantName)]", ) + .type_attribute( + "buck.data.ActionError.error", + "#[derive(::derive_more::From, ::gazebo::variants::VariantName)]", + ) .type_attribute( "buck.data.CommandExecutionDetails.command", "#[derive(::derive_more::From, ::gazebo::variants::VariantName)]", @@ -116,6 +120,7 @@ fn main() -> io::Result<()> { "buck.data.MaterializationMethod", "#[derive(::gazebo::variants::VariantName)]", ) + .type_attribute("buck.data.CpuCounter", "#[derive(Copy, dupe::Dupe)]") .type_attribute("buck.data.CommandExecutionStats", "#[derive(Copy, dupe::Dupe)]") .type_attribute(".", "#[derive(::serde::Serialize, ::serde::Deserialize)]") .type_attribute(".", "#[derive(::allocative::Allocative)]") @@ -169,6 +174,10 @@ fn main() -> io::Result<()> { "bxl_ensure_artifacts_duration", "#[serde(rename = \"bxl_ensure_artifacts_duration_us\", with = \"crate::serialize_duration_as_micros\")]", ) + .field_attribute( + "install_duration", + "#[serde(rename = \"install_duration_us\", with = \"crate::serialize_duration_as_micros\")]", + ) .field_attribute( "CriticalPathEntry2.user_duration", "#[serde(rename = \"user_duration_us\", with = \"crate::serialize_duration_as_micros\")]", @@ -181,6 +190,10 @@ fn main() -> io::Result<()> { "CriticalPathEntry2.potential_improvement_duration", "#[serde(rename = \"potential_improvement_duration_us\", with = \"crate::serialize_duration_as_micros\")]", ) + .field_attribute( + "CriticalPathEntry2.queue_duration", + "#[serde(rename = \"queue_duration_us\", with = \"crate::serialize_duration_as_micros\")]", + ) .type_attribute( "buck.data.CriticalPathEntry2.entry", "#[derive(::derive_more::From, ::gazebo::variants::VariantName)]", @@ -237,6 +250,10 @@ fn main() -> io::Result<()> { "buck.data.CommandExecutionMetadata.hashing_duration", "#[serde(rename = \"hashing_duration_us\", with = \"crate::serialize_duration_as_micros\")]", ) + .field_attribute( + "buck.data.CommandExecutionMetadata.queue_duration", + "#[serde(rename = \"queue_duration_us\", with = \"crate::serialize_duration_as_micros\")]", + ) .boxed("RecordEvent.data.invocation_record") .boxed("SpanEndEvent.data.action_execution") .boxed("SpanEndEvent.data.cache_upload") diff --git a/app/buck2_data/data.proto b/app/buck2_data/data.proto index 341a52d3a4673..67e03f0175b24 100644 --- a/app/buck2_data/data.proto +++ b/app/buck2_data/data.proto @@ -12,6 +12,7 @@ syntax = "proto3"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/empty.proto"; +import "error.proto"; package buck.data; @@ -26,10 +27,10 @@ message BuckEvent { // A globally-unique ID (UUIDv4) of this trace. Required. string trace_id = 2; // A trace-unique 64-bit integer identifying this event's span ID, if this - // event begins a new span or belongs to one. Required. + // event begins a new span or belongs to one. uint64 span_id = 3; // A trace-unique 64-bit identifying the span that this event is logically - // parented to. Required. + // parented to. uint64 parent_id = 4; // The payload of this event. Required. oneof data { @@ -51,6 +52,7 @@ message SpanStartEvent { CommandStart command = 50; ActionExecutionStart action_execution = 51; AnalysisStart analysis = 52; + AnalysisResolveQueriesStart analysis_resolve_queries = 5201; LoadBuildFileStart load = 53; ExecutorStageStart executor_stage = 54; TestDiscoveryStart test_discovery = 55; @@ -82,6 +84,10 @@ message SpanStartEvent { ReleaseLocalResourcesStart release_local_resources = 81; // Measure total time it takes to ensure BXL artifacts. BxlEnsureArtifactsStart bxl_ensure_artifacts = 82; + CreateOutputHashesFileStart create_output_hashes_file = 84; + ActionErrorHandlerExecutionStart action_error_handler_execution = 85; + CqueryUniverseBuildStart cquery_universe_build = 87; + DepFileUploadStart dep_file_upload = 88; // Used in Buck unit tests. FakeStart fake = 999; } @@ -98,6 +104,7 @@ message SpanEndEvent { CommandEnd command = 50; ActionExecutionEnd action_execution = 51; AnalysisEnd analysis = 52; + AnalysisResolveQueriesEnd analysis_resolve_queries = 5201; LoadBuildFileEnd load = 53; ExecutorStageEnd executor_stage = 54; TestDiscoveryEnd test_discovery = 55; @@ -129,6 +136,10 @@ message SpanEndEvent { SetupLocalResourcesEnd local_resources = 81; ReleaseLocalResourcesEnd release_local_resources = 82; BxlEnsureArtifactsEnd bxl_ensure_artifacts = 83; + CreateOutputHashesFileEnd create_output_hashes_file = 85; + ActionErrorHandlerExecutionEnd action_error_handler_execution = 86; + CqueryUniverseBuildEnd cquery_universe_build = 87; + DepFileUploadEnd dep_file_upload = 88; // Used in Buck unit tests. FakeEnd fake = 999; } @@ -171,9 +182,40 @@ message CommandOptions { uint64 concurrency = 1; } +message BuckConfigs { + reserved 1, 2, 3; + // config diff by cell name + map cell_diff = 4; +} + +message CellConfigDiff { + // config diff by section name + map section_diff = 1; + uint64 config_diff_count = 2; + // key + old value + new value + uint64 config_diff_size = 3; + // If this is set, then this event indicates that buck has loaded configs for + // a new cell for which configs had not previously been loaded. In this case, + // the other fields are left empty + bool new_config_indicator_only = 4; +} + +message SectionConfigDiff { + // config diff by config name + map config_diff = 1; +} + +message ConfigDiff { + // both fields set means config was updated + // old_value set and new_value empty means config was removed + // new_value set and old_value empty means config was added + optional string old_value = 1; + optional string new_value = 2; +} + // An event that represents a single point in time. message InstantEvent { - reserved 2, 8, 9, 13, 22, 24; + reserved 2, 8, 9, 12, 13, 22, 24, 38; oneof data { StructuredError structured_error = 1; @@ -221,10 +263,6 @@ message InstantEvent { // Stacktrace from the fail_no_stacktrace() call. StarlarkFailNoStacktrace starlark_fail_no_stacktrace = 25; - // Emitted when the user requests that concurrent commands with different - // states should be exited immediately - ExitWhenDifferentState exit_when_different_state = 26; - // Snapshot of current debug adapter state. Only sent when a debugger is // attached. DebugAdapterSnapshot debug_adapter_snapshot = 27; @@ -247,10 +285,38 @@ message InstantEvent { ConcurrentCommands concurrent_commands = 32; // Info coming from the `buck2 debug persist-event-log` subprocess - PersistSubprocess persist_subprocess = 33; + PersistEventLogSubprocess persist_event_log_subprocess = 33; + + // An action error encountered during the build + ActionError action_error = 34; + + ConsoleWarning console_warning = 35; + + MaterializerCommand materializer_command = 36; + + CleanStaleResult clean_stale_result = 37; + + CellConfigDiff cell_config_diff = 41; + + InstallFinished install_finished = 39; + + SystemInfo system_info = 40; + + VersionControlRevision version_control_revision = 42; + + TargetCfg target_cfg = 43; + + // Just something for us to be able to easily propagate out internal + // information. Used for testing. + QuickUnstableE2eData unstable_e2e_data = 44; } +} - reserved 12; // Log +// This should only be used as a mechanism to easily get information into the +// event log for e2e tests. +message QuickUnstableE2eData { + string key = 1; + string data = 2; } message DebugAdapterStoppedEval { @@ -281,6 +347,8 @@ message DiceKeyState { uint32 finished = 2; uint32 check_deps_started = 3; uint32 check_deps_finished = 4; + uint32 compute_started = 5; + uint32 compute_finished = 6; } message RemoteExecutionSessionCreated { @@ -350,6 +418,8 @@ message StructuredError { // Is this error indicative of the RE action cache returning corrupted // results? bool action_cache_is_corrupted = 10; + // Whether the error should be converted to a hard error in the future. + bool deprecation = 11; } message CriticalPathEntry { @@ -378,6 +448,11 @@ message CriticalPathEntry2 { BxlFunctionKey bxl_key = 3; AnonTarget anon_target = 4; } + + ActionExecutionKind execution_kind = 5; + + optional string target_rule_type_name = 6; + optional string action_digest = 7; } message Materialization { @@ -419,6 +494,10 @@ message CriticalPathEntry2 { // `duration` (since it can't exceed it). optional google.protobuf.Duration potential_improvement_duration = 5; + // The subset of the duration that can be attributed to waiting + // for actions to run + optional google.protobuf.Duration queue_duration = 6; + oneof entry { Analysis analysis = 100; ActionExecution action_execution = 101; @@ -472,6 +551,25 @@ message RageResult { google.protobuf.Duration command_duration = 12; } +message VersionControlRevision { + // 40 characters hash. + optional string hg_revision = 1; + // Here are the possible values: + // True: Has local changes that are not committed. + // False: All changes committed and can be identified by revision hash. + // Unset: Unknown state. + optional bool has_local_changes = 2; + optional string command_error = 3; +} + +// Event sent during build commands +// providing information about global configuration options. +message TargetCfg { + // resolved target platforms + repeated string target_platforms = 1; + repeated string cli_modifiers = 2; +} + // A snapshot of current system state, with useful info. message Snapshot { // Resident set size in bytes of the buck2 daemon. @@ -540,6 +638,8 @@ message Snapshot { optional uint64 malloc_bytes_active = 8; // (stats.allocated) Total number of bytes allocated by the application optional uint64 malloc_bytes_allocated = 9; + // Total number of bytes of used disk space on machine + optional uint64 used_disk_space_bytes = 10; uint64 dice_key_count = 101; // the number of keys actively present in the per transaction cache @@ -553,10 +653,20 @@ message Snapshot { optional uint64 sink_successes = 105; // Cumulative count of messages that failed to be emitted. optional uint64 sink_failures = 106; + // These are only stored in snapshot events, but not written to scuba. + optional uint64 sink_failures_invalid_request = 1201; + optional uint64 sink_failures_unauthorized = 1202; + optional uint64 sink_failures_rate_limited = 1203; + optional uint64 sink_failures_pushed_back = 1204; + optional uint64 sink_failures_enqueue_failed = 1205; + optional uint64 sink_failures_internal_error = 1206; + optional uint64 sink_failures_timed_out = 1207; + optional uint64 sink_failures_unknown = 1208; // Current number of messages queued up to be submitted. optional uint64 sink_buffer_depth = 107; // Cumulative count of messages that were dropped (i.e. not even processed). optional uint64 sink_dropped = 108; + optional uint64 sink_bytes_written = 111; // Network statistics for "interesting" network interfaces. map network_interface_stats = 109; @@ -568,6 +678,26 @@ message Snapshot { optional UnixSystemStats unix_system_stats = 300; + uint64 zdb_download_queries = 400; + uint64 zdb_download_bytes = 401; + uint64 zdb_upload_queries = 402; + uint64 zdb_upload_bytes = 403; + + uint64 zgateway_download_queries = 410; + uint64 zgateway_download_bytes = 411; + uint64 zgateway_upload_queries = 412; + uint64 zgateway_upload_bytes = 413; + + uint64 manifold_download_queries = 420; + uint64 manifold_download_bytes = 421; + uint64 manifold_upload_queries = 422; + uint64 manifold_upload_bytes = 423; + + uint64 hedwig_download_queries = 430; + uint64 hedwig_download_bytes = 431; + uint64 hedwig_upload_queries = 432; + uint64 hedwig_upload_bytes = 433; + // Client side metrics. // Delay between time snapshot is created and time it is received @@ -655,6 +785,9 @@ message CommandStart { TraceIoCommandStart trace = 37; ConfiguredTargetsCommandStart ctargets = 38; StarlarkDebugAttachCommandStart starlark_debug_attach = 39; + ExplainCommandStart explain = 40; + ExpandExternalCellCommandStart expand_external_cell = 41; + CompleteCommandStart complete = 42; } } @@ -675,15 +808,11 @@ message CommandCriticalStart { string dice_version = 3; } -message AuditCommandStart { - // TODO(swgillespie) fill this with useful fields -} +message AuditCommandStart {} message StarlarkCommandStart {} -message BuildCommandStart { - // TODO(swgillespie) fill this with useful fields -} +message BuildCommandStart {} message BxlCommandStart { // The full bxl label that was run, excluding the arguments to the bxl @@ -694,52 +823,42 @@ message LspCommandStart {} message StarlarkDebugAttachCommandStart {} -message TargetsCommandStart { - // TODO(swgillespie) fill this with useful fields -} +message TargetsCommandStart {} message ConfiguredTargetsCommandStart {} -message QueryCommandStart { - // TODO(swgillespie) fill this with useful fields -} +message QueryCommandStart {} -message AqueryCommandStart { - // TODO(swgillespie) fill this with useful fields -} +message AqueryCommandStart {} message CQueryCommandStart { string query = 1; string query_args = 2; string target_universe = 3; - // TODO(swgillespie) fill this with useful fields } -message TestCommandStart { - // TODO(swgillespie) fill this with useful fields -} +message TestCommandStart {} -message DocsCommandStart { - // TODO(nmj) fill this with useful fields -} +message DocsCommandStart {} -message CleanCommandStart { - // TODO fill this with useful fields -} +message ExplainCommandStart {} -message InstallCommandStart { - // TODO fill this with useful fields -} +message CleanCommandStart {} -message MaterializeCommandStart { - // TODO fill this with useful fields -} +message InstallCommandStart {} + +message MaterializeCommandStart {} message FileStatusCommandStart {} message ProfileCommandStart {} +message ExpandExternalCellCommandStart {} + +message CompleteCommandStart {} + message CommandEnd { + reserved 3; oneof data { BuildCommandEnd build = 20; TargetsCommandEnd targets = 21; @@ -761,10 +880,13 @@ message CommandEnd { TraceIoCommandEnd trace = 37; ConfiguredTargetsCommandEnd ctargets = 38; StarlarkDebugAttachCommandEnd starlark_debug_attach = 39; + ExplainCommandEnd explain = 40; + ExpandExternalCellCommandEnd expand_external_cell = 41; + CompleteCommandEnd complete = 42; } bool is_success = 2; - repeated string error_messages = 3; + repeated ErrorReport errors = 4; } // Marks the exit of the `CommandCriticalStart` event, such that the command has @@ -806,62 +928,79 @@ message TargetsCommandEnd { message ConfiguredTargetsCommandEnd {} -message QueryCommandEnd { - // TODO(swgillespie) fill this with useful fields -} +message QueryCommandEnd {} -message CQueryCommandEnd { - // TODO(swgillespie) fill this with useful fields -} +message CQueryCommandEnd {} -message AqueryCommandEnd { - // TODO(swgillespie) fill this with useful fields -} +message AqueryCommandEnd {} message TestCommandEnd { repeated TargetPattern unresolved_target_patterns = 1; } -message DocsCommandEnd { - // TODO(nmj) fill this with useful fields -} +message DocsCommandEnd {} + +message ExplainCommandEnd {} message CleanCommandEnd { optional CleanStaleStats clean_stale_stats = 1; } message CleanStaleStats { + reserved 7; uint64 stale_artifact_count = 1; uint64 stale_bytes = 2; uint64 retained_artifact_count = 3; uint64 retained_bytes = 4; uint64 untracked_artifact_count = 5; uint64 untracked_bytes = 6; - uint64 cleaned_path_count = 7; uint64 cleaned_artifact_count = 8; uint64 cleaned_bytes = 9; + uint64 total_duration_s = 10; + uint64 scan_duration_s = 11; + uint64 clean_duration_s = 12; +} + +enum CleanStaleResultKind { + FINISHED = 0; + INTERRUPTED = 1; + FAILED = 2; + SKIPPED_DRY_RUN = 3; + SKIPPED_NO_GEN_DIR = 4; + SKIPPED_DEFER_WRITE_DISABLED = 5; + SKIPPED_SQLITE_DISABLED = 6; +} + +message CleanStaleResult { + map metadata = 1; + CleanStaleResultKind kind = 2; + optional ErrorReport error = 3; + CleanStaleStats stats = 4; + // Set if clean stale is invoked as a command + optional string command_uuid = 5; } message InstallCommandEnd { repeated TargetPattern unresolved_target_patterns = 1; } -message MaterializeCommandEnd { - // TODO fill this with useful fields -} +message MaterializeCommandEnd {} message FileStatusCommandEnd {} message ProfileCommandEnd {} +message ExpandExternalCellCommandEnd {} + +message CompleteCommandEnd {} + message LoadPackageStart { string path = 1; } message LoadPackageEnd { - reserved 2; + reserved 2, 3; string path = 1; - optional ErrorReport error = 3; } message LoadBuildFileStart { @@ -873,6 +1012,13 @@ message LoadBuildFileEnd { string module_id = 1; string cell = 2; optional string error = 3; + // Peak allocated memory in starlark mutable heap during evaluation of BUCK + // file. + optional uint64 starlark_peak_allocated_bytes = 4; + // Number of CPU instructions during evaluation of BUCK file. + optional uint64 cpu_instruction_count = 5; + // Number of targets + optional uint64 target_count = 6; } message SharedTaskStart { @@ -1019,8 +1165,15 @@ message CommandExecutionMetadata { CommandExecutionStats execution_stats = 5; - /// How long it took to materialize the action's inputs. + /// Sum of all hashing times of individual files. Can be larger than user time + /// because hashing is done in parallel. + google.protobuf.Duration hashing_duration = 6; + + uint64 hashed_artifacts_count = 7; + + /// How long this command spent waiting to run + optional google.protobuf.Duration queue_duration = 8; } message CommandOutputsMissing { @@ -1092,6 +1245,7 @@ message ActionExecutionEnd { // If this action failed, contains an object that represents the nature of the // error. Can be just a string, in which case it is `unknown`, or may be a // richer object depending on the error. + // TODO(JakobDegen): Consider reusing the `ActionError` type below oneof error { // An error message whose nature is unknown. This often comes from cases // where action execution fails for reasons other than the command failing. @@ -1155,8 +1309,101 @@ message ActionExecutionEnd { // Remote dep file key (the digest we use to populate the action cache). // This is set if the action contains a dep file optional string dep_file_key = 37; + + // Additional diagnostics, if an action error handler was provided + optional ActionErrorDiagnostics error_diagnostics = 38; + + optional uint64 input_files_bytes = 39; + + optional CommandInvalidationInfo invalidation_info = 40; +} + +message CommandInvalidationInfo { + message InvalidationSource {} + + optional InvalidationSource changed_any = 1; + optional InvalidationSource changed_file = 2; +} + +message ActionError { + ActionKey key = 1; + ActionName name = 2; + + // Matches the definition in `ActionExecutionEnd` + // FIXME(JakobDegen): Extract and deduplicate + oneof error { + string unknown = 3; + CommandOutputsMissing missing_outputs = 4; + CommandExecutionError command_execution_error = 5; + }; + + // The last command executed as a part of the action, if any + optional CommandExecution last_command = 6; + + // Additional diagnostics, if an action error handler was provided + optional ActionErrorDiagnostics error_diagnostics = 7; +} + +// Either the produced `ActionSubError`s, or the error that occured when +// invoking the error handler +message ActionErrorDiagnostics { + oneof data { + // list of action error subcategories and their metadata + ActionSubErrors sub_errors = 1; + // error that may have occured when invoking the handler + string handler_invocation_error = 2; + } +} + +// Wrapper around `ActionSubError` so we can use `oneof` in +// `ActionErrorDiagnostics` +message ActionSubErrors { + repeated ActionSubError sub_errors = 1; +} + +message ActionSubError { + // Error category produced by the error handler function provided by the rule + // author. + // + // These should be finer grain error categorizations provided by the rule + // authors, and tend to be language specific. These should not be any kind of + // shared concepts among all errors for all languages/rules. For example, + // timeouts and infra errors should not go here - buck2 tries to categorize + // these types of errors automatically. An example of a finer grain error + // category may be the error code for rustc outputs. + + string category = 1; + + // Optional freeform string for rule author to populate. The message will be + // emitted to the build report, and to the stderr in the error diagnostics + // section. + + optional string message = 2; + + // Optional list of file locations/lines for rule author to populate. + optional ActionErrorLocations locations = 3; +} + +// Wrapper around `ActionErrorLocation` so we can differentiate between null and +// empty locations +message ActionErrorLocations { + repeated ActionErrorLocation locations = 1; +} + +message ActionErrorLocation { + // The file path of the error location. Should be project relative or absolute + string file = 1; + optional uint64 line = 2; } +message ActionErrorHandlerExecutionStart {} + +message ActionErrorHandlerExecutionEnd {} + +message CqueryUniverseBuildStart {} + +message CqueryUniverseBuildEnd {} + // The beginning of materialization for the output of a target requested, // inclusive of all dependent artifacts it might recursively request to // materialize. @@ -1177,6 +1424,7 @@ message AnalysisStart { oneof target { ConfiguredTargetLabel standard_target = 1; AnonTarget anon_target = 3; + DynamicLambdaOwner dynamic_lambda = 4; } string rule = 2; } @@ -1185,20 +1433,29 @@ message AnalysisEnd { oneof target { ConfiguredTargetLabel standard_target = 1; AnonTarget anon_target = 4; + DynamicLambdaOwner dynamic_lambda = 5; } string rule = 3; AnalysisProfile profile = 2; + optional uint64 declared_actions = 6; + optional uint64 declared_artifacts = 7; } message AnalysisStageStart { + reserved 1; oneof stage { - google.protobuf.Empty resolve_queries = 1; google.protobuf.Empty evaluate_rule = 2; } } message AnalysisStageEnd {} +message AnalysisResolveQueriesStart { + ConfiguredTargetLabel standard_target = 1; +} + +message AnalysisResolveQueriesEnd {} + message ExecutorStageStart { oneof stage { ReStage re = 20; @@ -1224,6 +1481,7 @@ message CacheQuery { message CacheHit { string action_digest = 1; optional string action_key = 3; + CacheType cache_type = 4; } message ReStage { @@ -1236,13 +1494,17 @@ message ReStage { ReWorkerDownload worker_download = 8; ReWorkerUpload worker_upload = 9; ReUnknown unknown = 10; + MaterializeFailedInputs materialize_failed_inputs = 11; } } +message MaterializeFailedInputs {} + message ReExecute { string action_digest = 1; RePlatform platform = 2; optional string action_key = 3; + string use_case = 4; } message RePlatform { @@ -1257,14 +1519,17 @@ message ReDownload {} message ReQueue { string action_digest = 1; + string use_case = 2; } message ReWorkerDownload { string action_digest = 1; + string use_case = 2; } message ReWorkerUpload { string action_digest = 1; + string use_case = 2; } message ReUnknown { @@ -1348,6 +1613,8 @@ enum FileWatcherProvider { WATCHMAN = 0; // The Rust `notify` crate RUST_NOTIFY = 1; + + FS_HASH_CRAWLER = 2; } enum FileWatcherEventType { @@ -1378,6 +1645,7 @@ message FileWatcherStats { uint64 events_total = 2; uint64 events_processed = 3; optional string branched_from_revision = 4; + optional uint64 branched_from_global_rev = 10; repeated FileWatcherEvent events = 6; // Present if the results are incomplete optional string incomplete_events_reason = 7; @@ -1469,6 +1737,12 @@ message ConsoleMessage { string message = 1; } +/// A message that should be printed to the user as a warning in yellow color, +/// generally via stderr +message ConsoleWarning { + string message = 1; +} + message EnvironmentEntry { // The environment key. string key = 1; @@ -1517,6 +1791,11 @@ message RemoteCommand { CacheHitType cache_hit_type = 5; optional string remote_dep_file_key = 6; + + // Project relative paths to the materialized inputs for failed + + // actions, if `--materialize-failed-inputs` was passed to build options + repeated string materialized_inputs_for_failed = 7; } message RemoteCommandDetails { @@ -1559,9 +1838,28 @@ message TypedMetadata { map strings = 2; } -// Record event sent directly to scribe. +// Why current command started buckd. +enum DaemonWasStartedReason { + UNKNOWN_REASON = 0; + CONSTRAINT_MISMATCH_VERSION = 1; + CONSTRAINT_MISMATCH_USER_VERSION = 2; + CONSTRAINT_MISMATCH_STARTUP_CONFIG = 3; + CONSTRAINT_REJECT_DAEMON_ID = 4; + CONSTRAINT_MISMATCH_TRACE_IO = 5; + CONSTRAINT_MISMATCH_MATERIALIZER_STATE_IDENTITY = 6; + COULD_NOT_CONNECT_TO_DAEMON = 11; + TIMED_OUT_CONNECTING_TO_DAEMON = 12; + TIMEOUT_CALCULATION_ERROR = 13; + NO_BUCKD_INFO = 14; + COULD_NOT_LOAD_BUCKD_INFO = 15; + // `buckd.info` exists, but buckd is not running. + NO_DAEMON_PROCESS = 16; +} + +// This is the origin for every sample in buck2_builds scuba table +// It's sent from the client to Scribe at the end of each invocation message InvocationRecord { - reserved 1, 22, 27, 28, 36, 61, 66; + reserved 1, 22, 27, 28, 36, 61, 62, 66, 77; // Optional - present if ever sent to client. // Will be missing on a cancelled build. @@ -1600,7 +1898,8 @@ message InvocationRecord { // If any, the name of the RE experiment config that was used by the RE // client in this build. string re_experiment_name = 23; - // Minimum build count among the targets that were involved in the command + // Minimum successful builds among the build counts for each target + // involved in the command. uint64 min_build_count_since_rebase = 24; // The number of cache uploads done by this build. uint64 cache_upload_count = 25; @@ -1641,6 +1940,8 @@ message InvocationRecord { optional uint64 system_total_memory_bytes = 46; // Information of file changes provided by file watcher FileWatcherStats file_watcher_stats = 47; + // How long did we spend querying the file watcher + optional uint64 file_watcher_duration_ms = 83; // Time elapsed from a build's start until the last action ends execution. optional uint64 time_to_last_action_execution_end_ms = 48; // Isolation directory. @@ -1651,6 +1952,7 @@ message InvocationRecord { optional uint64 sink_success_count = 51; optional uint64 sink_failure_count = 52; optional uint64 sink_dropped_count = 53; + optional uint64 sink_bytes_written = 101; optional uint64 sink_max_buffer_depth = 54; // Version number of watchman optional string watchman_version = 55; @@ -1663,7 +1965,6 @@ message InvocationRecord { google.protobuf.Duration concurrent_command_blocking_duration = 59; // How many analyses were executed optional uint64 analysis_count = 60; - optional bool exit_when_different_state = 62; optional string restarted_trace_id = 63; optional bool has_command_result = 64; optional uint64 compressed_event_log_size_bytes = 65; @@ -1680,13 +1981,85 @@ message InvocationRecord { repeated string concurrent_command_ids = 74; // The client has failed to connect to the daemon. optional bool daemon_connection_failure = 75; + // Daemon was started by this command. + // Unset if the the command connected to existing daemon or did not start one. + optional DaemonWasStartedReason daemon_was_started = 751; // Metadata provided by the client. Unlike TypedMetadata, this won't become // its own column in Scuba, all those entries will land in a NormVector. repeated ClientMetadata client_metadata = 76; - // The error messages that were shown to the user. - repeated string error_messages = 77; + // The errors that occured during the command. + repeated ProcessedErrorReport errors = 79; + // The most interesting error tag that occurred during the command. + // Uppercase of `ErrorTag` enum variant, e.g. `STARLARK_FAIL`. + optional string best_error_tag = 82; // Cache hit rate as it appears in the console float cache_hit_rate = 78; + repeated string target_rule_type_names = 80; + // Time elapsed from a build's start until first test discovery begins. + optional uint64 time_to_first_test_discovery_ms = 81; + optional bool new_configs_used = 84; + // Max sustained RE download speed + optional uint64 re_max_download_speed = 85; + // Max sustained RE upload speed + optional uint64 re_max_upload_speed = 91; + // Optional - for install commands, the time elapsed between the last + // materialization event and the last install event. + google.protobuf.Duration install_duration = 86; + // Peak process memory which is max of RSS and malloc-active bytes + optional uint64 peak_process_memory_bytes = 87; + // How many buck configs changed, set only after config change + optional uint64 buckconfig_diff_count = 88; + // Approximate config change size in bytes: key + old value + new value + optional uint64 buckconfig_diff_size = 89; + optional uint64 event_log_manifold_ttl_s = 90; + optional uint64 peak_used_disk_space_bytes = 92; + optional uint64 total_disk_space_bytes = 93; + // Average RE download speed + optional uint64 re_avg_download_speed = 94; + // Average RE upload speed + optional uint64 re_avg_upload_speed = 95; + // The number of dep file uploads done by this build. + uint64 dep_file_upload_count = 96; + // The number of dep file uploads attempted by this build. + uint64 dep_file_upload_attempt_count = 97; + // Minimum attempted builds among the build counts for each target + // involved in the command. + uint64 min_attempted_build_count_since_rebase = 98; + // Metadata about devices installed to by an install command. + repeated DeviceMetadata install_device_metadata = 99; + // The category key for the error with best error tag. + optional string best_error_category_key = 100; + // The list of active network devices + repeated NetworkKind active_networks_kinds = 102; + // Configuration used during the build command. + optional TargetCfg target_cfg = 103; + // Source code revision where invocation happened + optional VersionControlRevision version_control_revision = 104; + + // Detailed per-backend RE stats. + optional uint64 zdb_download_queries = 200; + optional uint64 zdb_download_bytes = 201; + optional uint64 zdb_upload_queries = 202; + optional uint64 zdb_upload_bytes = 203; + + optional uint64 zgateway_download_queries = 210; + optional uint64 zgateway_download_bytes = 211; + optional uint64 zgateway_upload_queries = 212; + optional uint64 zgateway_upload_bytes = 213; + + optional uint64 manifold_download_queries = 220; + optional uint64 manifold_download_bytes = 221; + optional uint64 manifold_upload_queries = 222; + optional uint64 manifold_upload_bytes = 223; + + optional uint64 hedwig_download_queries = 230; + optional uint64 hedwig_download_bytes = 231; + optional uint64 hedwig_upload_queries = 232; + optional uint64 hedwig_upload_bytes = 233; + + // If `errors` is not empty, the category of the error + // assumed to have triggered the command failure. + optional string error_category = 234; } // Record event sent directly to scribe. @@ -1707,25 +2080,18 @@ message ClientMetadata { string value = 2; } -enum CacheUploadReason { - // The action was executed locally - LOCAL_EXECUTION = 0; - // The action has dep files that can be cached - DEP_FILE = 1; -} - message CacheUploadStart { + reserved 4; // A unique key identifying this action within the build. ActionKey key = 1; // A pair of category and identifier describing this action. ActionName name = 2; // The digest of the action being uploaded. string action_digest = 3; - // Reason for why this upload took place - CacheUploadReason reason = 4; } message CacheUploadEnd { + reserved 10; // A unique key identifying this action within the build. ActionKey key = 1; // A pair of category and identifier describing this action. @@ -1744,8 +2110,31 @@ message CacheUploadEnd { optional uint64 output_bytes = 8; // If a RE error occurred, the error code. optional string re_error_code = 9; - // Reason for why this upload took place - CacheUploadReason reason = 10; +} + +message DepFileUploadStart { + // A unique key identifying this action within the build. + ActionKey key = 1; + // A pair of category and identifier describing this action. + ActionName name = 2; + // The dep file key, or digest of the dep file action + string remote_dep_file_key = 3; +} + +message DepFileUploadEnd { + // A unique key identifying this action within the build. + ActionKey key = 1; + // A pair of category and identifier describing this action. + ActionName name = 2; + // The dep file key, or digest of the dep file action + string remote_dep_file_key = 3; + // Whether the upload was actually completed. This may be false if it wasn't + // representable in RE, or if an error occurred. + bool success = 4; + // An error, if any occurred. + string error = 5; + // If a RE error occurred, the error code. + optional string re_error_code = 9; } message CreateOutputSymlinksStart {}; @@ -1754,6 +2143,10 @@ message CreateOutputSymlinksEnd { uint64 created = 1; }; +message CreateOutputHashesFileStart {}; + +message CreateOutputHashesFileEnd {}; + message InstallEventInfoStart { string artifact_name = 1; string file_path = 2; @@ -1809,8 +2202,6 @@ message DiceSynchronizeSectionStart {} message DiceSynchronizeSectionEnd {} -message ExitWhenDifferentState {} - message BxlExecutionStart { string name = 1; } @@ -1846,21 +2237,50 @@ message DiceEqualityCheck { message NoActiveDiceState {} -enum ErrorCategory { - USER = 0; - INFRA = 1; -} - -enum ErrorCause { - INVALID_PACKAGE = 0; - DAEMON_IS_BUSY = 1; - // Add causes here as needed -} - message ErrorReport { - optional ErrorCategory category = 1; - optional ErrorCause cause = 2; - string error_message = 3; + reserved 2; + optional buck.data.error.ErrorTier tier = 1; + // The error message that is shown to users on the CLI + string message = 3; + // If different from `message`, the error messsage that is shown to the + // build report or logged in scuba. Currently, this distinction only exists + // for action errors - they show the full stderr in telemetry and in a + // separate event, but the error message only contains `failed to build target + // foo` + optional string telemetry_message = 4; + // The location in source code where this error was created. Typically a Rust + // source file, but the exact format is not guaranteed. + optional string source_location = 5; + repeated buck.data.error.ErrorTag tags = 6; + repeated string sub_error_categories = 7; + optional string category_key = 8; +} + +// Identical to `ErrorReport`, but with the typ and tags converted to strings. +// This makes it easy to change those enums without having to worry about uses +// of this protobuf definition outside of buck2. Currently used in the +// invocation record and nowhere else. +message ProcessedErrorReport { + reserved 2; + optional buck.data.error.ErrorTier tier = 1; + string message = 3; + optional string telemetry_message = 4; + optional string source_location = 5; + repeated string tags = 6; + // `buck2_error` crate has logic of selecting the most interesting error tag + // among all error tags. This is such tag. + optional string best_tag = 7; + repeated string sub_error_categories = 8; + optional string category_key = 9; +} + +message CommandReport { + // A globally-unique ID (UUIDv4) to ensure it's for the right command. + string trace_id = 1; + // Command ExitCode + uint32 exit_code = 2; + // List of error messages seen during the command execution + repeated string error_messages = 3; } message MaterializerStateInfo { @@ -1876,8 +2296,14 @@ message ConcurrentCommands { repeated string trace_ids = 1; } -message PersistSubprocess { - repeated string errors = 1; +message PersistEventLogSubprocess { + repeated string local_error_messages = 1; + optional string local_error_category = 2; + bool local_success = 3; + repeated string remote_error_messages = 4; + optional string remote_error_category = 5; + bool remote_success = 6; + map metadata = 8; } message StarlarkFailNoStacktrace { @@ -1919,6 +2345,14 @@ message DynamicLambdaStart { } } +message DynamicLambdaOwner { + oneof owner { + ConfiguredTargetLabel target_label = 1; + BxlFunctionKey bxl_key = 2; + AnonTarget anon_target = 3; + } +} + message DeferredEvaluationEnd {} // Wraps the RE upload stage of execution to report useful data on network @@ -1927,9 +2361,15 @@ message DeferredEvaluationEnd {} // dedicated spans. message ReUploadStart {} +message ReUploadMetrics { + uint64 digests_uploaded = 1; + uint64 bytes_uploaded = 2; +} + message ReUploadEnd { optional uint64 digests_uploaded = 1; optional uint64 bytes_uploaded = 2; + map stats_by_extension = 3; } message ConnectToInstallerStart { @@ -1938,21 +2378,63 @@ message ConnectToInstallerStart { message ConnectToInstallerEnd {} +message DeviceMetadata { + message Entry { + string key = 1; + string value = 2; + } + repeated Entry entry = 1; +} + +message InstallFinished { + google.protobuf.Duration duration = 1; + repeated DeviceMetadata device_metadata = 2; +} + +message SystemInfo { + optional uint64 system_total_memory_bytes = 1; + optional uint64 memory_pressure_threshold_percent = 2; + optional uint64 total_disk_space_bytes = 3; + optional uint64 remaining_disk_space_threshold_gb = 4; + optional uint64 min_re_download_bytes_threshold = 5; + optional uint64 avg_re_download_bytes_per_sec_threshold = 6; + optional uint64 min_cache_hit_threshold_percent = 7; + optional uint64 cache_warning_min_completion_threshold_percent = 9; + optional uint64 cache_warning_min_actions_count = 10; +} + +message CpuCounter { + uint64 count = 1; + uint64 time_enabled = 2; + uint64 time_running = 3; +} + message CommandExecutionStats { optional uint64 cpu_instructions_user = 1; optional uint64 cpu_instructions_kernel = 2; + optional CpuCounter userspace_events = 3; + optional CpuCounter kernel_events = 4; +} + +enum NetworkKind { + WI_FI = 0; + ETHERNET = 1; + UNKNOWN_NET_KIND = 2; } message NetworkInterfaceStats { uint64 tx_bytes = 1; uint64 rx_bytes = 2; + NetworkKind network_kind = 3; } message TraceIoCommandStart {} message TraceIoCommandEnd {} -message SetupLocalResourcesStart {} +message SetupLocalResourcesStart { + ConfiguredTargetLabel target_label = 1; +} message SetupLocalResourcesEnd {} @@ -1963,3 +2445,28 @@ message ReleaseLocalResourcesEnd {} message RestartConfiguration { bool enable_restarter = 1; } + +// Events capturing commands to the deferred materializer. +// Currently only enabled with `-c buck2.verbose_materializer_event_log=true`. +// Events are reported by the command processor, so they are logged +// when they are processed by the materializer, not when they are sent +// to the materializer +message MaterializerCommand { + message Declare { + string path = 1; + } + + message Ensure { + repeated string paths = 1; + } + + message InvalidateFilePaths { + repeated string paths = 1; + } + + oneof data { + Declare declare = 1; + Ensure ensure = 2; + InvalidateFilePaths invalidate_file_paths = 3; + } +} diff --git a/app/buck2_data/error.proto b/app/buck2_data/error.proto new file mode 100644 index 0000000000000..81bd02dcb47c1 --- /dev/null +++ b/app/buck2_data/error.proto @@ -0,0 +1,191 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +syntax = "proto3"; + +// In protobuf, enum values are not namespaced in their type. That means that in +// any protbuf file, you can only have one enum variant with any given name. The +// only reason this file exists is to work around that limitation, especially +// for error types, which may often have a name that might also make sense +// elsewhere. +package buck.data.error; + +enum ErrorTier { + // Same as above + UNUSED_DEFAULT_CATEGORY = 0; + // Unexpected errors in buck2 or core dependencies. + // It should be possible to eliminate these, in theory. + TIER0 = 1; + // Errors that may be triggered by issues with the host, + // resource limits, non-explicit dependencies or potentially + // ambiguous input errors. + // These can be tracked but not eliminated. + ENVIRONMENT = 3; + // Expected errors in inputs explicitly tracked by buck. + INPUT = 2; +} + +// Error types are - by design - restricted to being set exactly once at the +// error definition site. While they are useful, that means that they are enough +// on their own to represent all the error metadata we want. Until we figure out +// what exactly a more complete model looks like, error tags a mechanism that +// offers maximum flexibility - they can be added to any error anywhere. +// +// Feel free to continue using error types if you like. +enum ErrorTag { + // Same as above + UNUSED_DEFAULT_TAG = 0; + STARLARK_FAIL = 1; + STARLARK_STACK_OVERFLOW = 102; + WATCHMAN_TIMEOUT = 2; + WATCHMAN_REQUEST_ERROR = 201; + // Taken from watchman_client::Error + WATCHMAN_CONNECTION_ERROR = 202; + WATCHMAN_CONNECTION_LOST = 203; + WATCHMAN_CONNECTION_DISCOVERY = 204; + WATCHMAN_SERVER_ERROR = 205; + WATCHMAN_RESPONSE_ERROR = 206; + WATCHMAN_MISSING_FIELD = 207; + WATCHMAN_DESERIALIZE = 208; + WATCHMAN_SERIALIZE = 209; + WATCHMAN_CONNECT = 210; + WATCHMAN_ROOT_NOT_CONNECTED_ERROR = 211; + WATCHMAN_CHECKOUT_IN_PROGRESS = 212; + + HTTP = 3; + // Client error (4xx). + HTTP_CLIENT = 301; + // Server error (5xx). + HTTP_SERVER = 302; + // gRPC protocol error between client and server from the client side. + // - Protocol error (e.g. malformed frame, or too large frame) + // - Transport error (e.g. connection closed) + // - Not application error (e.g. bzl file not found) + CLIENT_GRPC = 4; + // Connect to buckd failed. + DAEMON_CONNECT = 5; + // Daemon is running another command. + DAEMON_IS_BUSY = 501; + // Daemon was preempted during preemptible command by another command. + DAEMON_PREEMPTED = 502; + // Too large gRPC message. + GRPC_RESPONSE_MESSAGE_TOO_LARGE = 6; + // `visibility`, `within_view`. + VISIBILITY = 8; + // Server stderr is empty. + SERVER_STDERR_EMPTY = 11; + // Server stderr indicates that the server panicked. + SERVER_PANICKED = 12; + // Server stack overflow. + SERVER_STACK_OVERFLOW = 13; + // SEGV. + SERVER_SEGV = 14; + // Server received SIGTERM + SERVER_SIGTERM = 400; + // Server disconnect with no error but memory pressure was detected. + SERVER_MEMORY_PRESSURE = 401; + // Jemalloc assertion failure. + SERVER_JEMALLOC_ASSERT = 15; + // The reason for server failure is unknown. + SERVER_STDERR_UNKNOWN = 19; + // Internal error in buck2. This is a bug. + INTERNAL_ERROR = 21; + // Artifact projection to a path that does not exist + PROJECT_MISSING_PATH = 22; + // The daemon reported that it was shutting down during the execution of this + // command + INTERRUPTED_BY_DAEMON_SHUTDOWN = 23; + // The daemon couldn't be killed + DAEMON_WONT_DIE_FROM_KILL = 24; + // No valid internal or VPNless certs could be found + NO_VALID_CERTS = 25; + // Build failed during materialization + MATERIALIZATION_ERROR = 26; + + // Errors during buck2 install. + INSTALL = 200; + + //// High level descriptions of the "phase" of the build during which the + // error occurred + ANALYSIS = 7; + ANY_ACTION_EXECUTION = 2000; + ANY_STARLARK_EVALUATION = 2001; + + ///// IO SECTION + // + // Indicates that the IO operation went through the standard system + // interfaces, and not through EdenIO - note that the operation may still have + // been accessing an Eden mount though + IO_SYSTEM = 1000; + // IO done on a source file in the repo + IO_SOURCE = 1010; + // The nature of the failure, designed after (but not identical to) Rust's + // `io::ErrorKind` + IO_NOT_FOUND = 1020; + IO_PERMISSION_DENIED = 1021; + IO_BROKEN_PIPE = 1022; + IO_STORAGE_FULL = 1023; + IO_EXECUTABLE_FILE_BUSY = 1024; + IO_CONNECTION_ABORTED = 1025; + IO_NOT_CONNECTED = 1026; + IO_TIMEOUT = 1027; + IO_WINDOWS_SHARING_VIOLATION = 1028; + + // + // Eden IO Section + // Indicates that the IO operation went through Eden + IO_EDEN = 1100; + // Failures indicating that Eden Failed to Connect or Mount + IO_EDEN_CONNECTION_ERROR = 1110; + IO_EDEN_REQUEST_ERROR = 1111; + IO_EDEN_MOUNT_DOES_NOT_EXIST = 1112; + IO_EDEN_MOUNT_NOT_READY = 1113; + // The underlying cause of request failures, copied from `edenfs::EdenError` + IO_EDEN_WIN32_ERROR = 1150; + IO_EDEN_HRESULT_ERROR = 1151; + IO_EDEN_ARGUMENT_ERROR = 1152; + IO_EDEN_GENERIC_ERROR = 1153; + IO_EDEN_MOUNT_GENERATION_CHANGED = 1154; + IO_EDEN_JOURNAL_TRUNCATED = 1155; + IO_EDEN_CHECKOUT_IN_PROGRESS = 1156; + IO_EDEN_OUT_OF_DATE_PARENT = 1157; + IO_EDEN_UNKNOWN_FIELD = 1160; + IO_MATERIALIZER_FILE_BUSY = 1161; + + // Client IO + // Broken pipe specifically from client stdio streams + IO_CLIENT_BROKEN_PIPE = 1201; + + ///// Remote Execution + RE_UNKNOWN_TCODE = 1300; + // RE TCode values: https://fburl.com/code/1ael5pmz + RE_CANCELLED = 1301; + RE_UNKNOWN = 1302; + RE_INVALID_ARGUMENT = 1303; + RE_DEADLINE_EXCEEDED = 1304; + RE_NOT_FOUND = 1305; + RE_ALREADY_EXISTS = 1306; + RE_PERMISSION_DENIED = 1307; + RE_RESOURCE_EXHAUSTED = 1308; + RE_FAILED_PRECONDITION = 1309; + RE_ABORTED = 1310; + RE_OUT_OF_RANGE = 1311; + RE_UNIMPLEMENTED = 1312; + RE_INTERNAL = 1313; + RE_UNAVAILABLE = 1314; + RE_DATA_LOSS = 1315; + RE_UNAUTHENTICATED = 1316; + + // Error during attribute configuration during target configuration. + CONFIGURE_ATTR = 3001; + // Action execution + DOWNLOAD_FILE_HEAD_REQUEST = 4001; + // Tests + TEST_DEADLINE_EXPIRED = 5001; +} diff --git a/app/buck2_data/export_file_with.bzl b/app/buck2_data/export_file_with.bzl new file mode 100644 index 0000000000000..4a06b48d0d909 --- /dev/null +++ b/app/buck2_data/export_file_with.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A version of the `export_file` rule that allows attaching some associated +# artifacts +def _impl(ctx): + out = ctx.attrs.src + out = out.with_associated_artifacts(ctx.attrs.attach) + return [DefaultInfo(default_output = out)] + +export_file_with = rule( + impl = _impl, + attrs = { + "attach": attrs.list(attrs.source()), + "src": attrs.source(), + }, +) diff --git a/app/buck2_data/src/action_key_owner.rs b/app/buck2_data/src/action_key_owner.rs index 98822be5a93aa..073d907643615 100644 --- a/app/buck2_data/src/action_key_owner.rs +++ b/app/buck2_data/src/action_key_owner.rs @@ -46,6 +46,22 @@ impl From for crate::critical_path_entry2::materialization } } +impl From for crate::DynamicLambdaOwner { + fn from(value: BaseDeferredKeyProto) -> Self { + Self { + owner: Some(match value { + BaseDeferredKeyProto::TargetLabel(t) => { + crate::dynamic_lambda_owner::Owner::TargetLabel(t) + } + BaseDeferredKeyProto::BxlKey(b) => crate::dynamic_lambda_owner::Owner::BxlKey(b), + BaseDeferredKeyProto::AnonTarget(a) => { + crate::dynamic_lambda_owner::Owner::AnonTarget(a) + } + }), + } + } +} + impl From for crate::dynamic_lambda_start::Owner { fn from(value: BaseDeferredKeyProto) -> Self { match value { diff --git a/app/buck2_data/src/lib.rs b/app/buck2_data/src/lib.rs index de73ed2e6fb12..f546313830a9e 100644 --- a/app/buck2_data/src/lib.rs +++ b/app/buck2_data/src/lib.rs @@ -7,12 +7,14 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + use std::borrow::Cow; use std::fmt; pub mod action_key_owner; -mod serialize_timestamp { +pub mod serialize_timestamp { use serde::Deserialize; use serde::Deserializer; use serde::Serialize; @@ -93,6 +95,10 @@ mod serialize_action_kind { tonic::include_proto!("buck.data"); +pub mod error { + tonic::include_proto!("buck.data.error"); +} + /// Trait for things that can be converted into protobuf messages, for ease of emitting events. There are many core Buck /// types that are represented in the Daemon API that use this trait to ease conversion. pub trait ToProtoMessage { @@ -101,30 +107,6 @@ pub trait ToProtoMessage { fn as_proto(&self) -> Self::Message; } -/// Write out a human-readable description of the error tags -/// that is printed out in the context stack when program fails. -impl fmt::Display for ErrorCategory { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let msg = match &self { - ErrorCategory::Infra => "This error is an internal Buck2 error", - ErrorCategory::User => "This error was caused by the end user", - }; - - write!(f, "{}", msg) - } -} - -impl fmt::Display for ErrorCause { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let msg = match &self { - ErrorCause::InvalidPackage => "The package is invalid", - ErrorCause::DaemonIsBusy => "Buck daemon is busy processing another command", - }; - - write!(f, "{}", msg) - } -} - impl fmt::Display for DaemonShutdown { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}, caller:", self.reason)?; @@ -198,7 +180,7 @@ pub mod serialize_duration_as_micros { } #[cfg(test)] - mod test { + mod tests { use super::*; #[test] diff --git a/app/buck2_directory/BUCK b/app/buck2_directory/BUCK new file mode 100644 index 0000000000000..b2bc19812814a --- /dev/null +++ b/app/buck2_directory/BUCK @@ -0,0 +1,28 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_directory", + srcs = glob( + ["src/**/*.rs"], + ), + test_deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:assert_matches", + ], + deps = [ + "fbsource//third-party/rust:dashmap", + "fbsource//third-party/rust:derivative", + "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:either", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_util:buck2_util", + "//buck2/gazebo/dupe:dupe", + "//buck2/gazebo/gazebo:gazebo", + "//buck2/starlark-rust/starlark_map:starlark_map", + "//common/rust/shed/sorted_vector_map:sorted_vector_map", + ], +) diff --git a/app/buck2_directory/Cargo.toml b/app/buck2_directory/Cargo.toml new file mode 100644 index 0000000000000..e1cc9ef9dca25 --- /dev/null +++ b/app/buck2_directory/Cargo.toml @@ -0,0 +1,26 @@ +[package] +edition = "2021" +license = { workspace = true } +name = "buck2_directory" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +dashmap = { workspace = true } +derivative = { workspace = true } +derive_more = { workspace = true } +either = { workspace = true } + +allocative = { workspace = true } +dupe = { workspace = true } +gazebo = { workspace = true } +sorted_vector_map = { workspace = true } +starlark_map = { workspace = true } + +buck2_core = { workspace = true } +buck2_error = { workspace = true } +buck2_util = { workspace = true } + +[dev-dependencies] +assert_matches = "1.5" diff --git a/app/buck2_directory/src/directory.rs b/app/buck2_directory/src/directory.rs new file mode 100644 index 0000000000000..3750e1d938409 --- /dev/null +++ b/app/buck2_directory/src/directory.rs @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod builder; +pub mod dashmap_directory_interner; +pub mod directory; +pub mod directory_data; +pub mod directory_hasher; +pub mod directory_iterator; +mod directory_mut; +pub mod directory_ref; +pub mod directory_selector; +pub mod entry; +mod exclusive_directory; +pub mod find; +pub mod fingerprinted_directory; +pub mod immutable_directory; +pub mod immutable_or_exclusive; +mod macros; +mod no_hasher; +mod path_accumulator; +pub mod shared_directory; +mod test; +pub mod walk; diff --git a/app/buck2_directory/src/directory/builder.rs b/app/buck2_directory/src/directory/builder.rs new file mode 100644 index 0000000000000..591792712fadb --- /dev/null +++ b/app/buck2_directory/src/directory/builder.rs @@ -0,0 +1,433 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::mem; + +use allocative::Allocative; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::paths::IntoFileNameBufIterator; +use derivative::Derivative; +use dupe::Clone_; +use dupe::Copy_; +use starlark_map::small_map::Entry; +use starlark_map::small_map::SmallMap; + +use crate::directory::directory::Directory; +use crate::directory::directory_data::DirectoryData; +use crate::directory::directory_hasher::DirectoryHasher; +use crate::directory::directory_mut::DirectoryMut; +use crate::directory::directory_ref::DirectoryRef; +use crate::directory::entry::DirectoryEntry; +use crate::directory::exclusive_directory::ExclusiveDirectory; +use crate::directory::find::find; +use crate::directory::find::DirectoryFindError; +use crate::directory::fingerprinted_directory::FingerprintedDirectory; +use crate::directory::immutable_directory::ImmutableDirectory; +use crate::directory::immutable_or_exclusive::ImmutableOrExclusiveDirectoryEntries; +use crate::directory::immutable_or_exclusive::ImmutableOrExclusiveDirectoryRef; +use crate::directory::path_accumulator::PathAccumulator; + +#[derive(Debug, buck2_error::Error)] +pub enum DirectoryInsertError { + #[error("Path is empty")] + EmptyPath, + + #[error("Insert conflicts with an existing leaf at path: `{}`", .path)] + CannotTraverseLeaf { path: PathAccumulator }, +} + +#[derive(Debug, buck2_error::Error)] +pub enum DirectoryMkdirError { + #[error("Mkdir conflicts with an existing leaf at path: `{}`", .path)] + CannotTraverseLeaf { path: PathAccumulator }, +} + +#[derive(Debug, buck2_error::Error)] +pub enum DirectoryMergeError { + #[error("Merge conflicts with an existing leaf at path: `{}`", .path)] + CannotTraverseLeaf { path: PathAccumulator }, +} + +/// A copy-on-write DirectoryBuilder. +#[derive(Derivative, Allocative)] +#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] +#[derivative(Clone(bound = "L: ::std::clone::Clone"))] +pub enum DirectoryBuilder +where + H: DirectoryDigest, +{ + /// This has a dedicated copy and we can mutate it. + Mutable(SmallMap, L>>), + Immutable(ImmutableDirectory), +} + +impl DirectoryBuilder +where + H: DirectoryDigest, +{ + pub fn empty() -> Self { + Self::Mutable(Default::default()) + } +} + +impl DirectoryBuilder +where + L: Clone, + H: DirectoryDigest, +{ + /// Insert the entry `val` at `path`. + /// + /// If this replaces a portion of the tree, Ok(Some) is returned. For example inserting a file + /// at `a/b` when the tree contains `a/b/c` would return a directory containing `c`, which is + /// the node that was replaced at `a/b`. No path is returned under those circumstances since + /// this can only happen at the input path. + /// + /// If this would conflict with an existing portion of the tree, Err is returned. This happens + /// when inserting at a path that traverses through an existing file. For example, inserting at + /// `a/b/c` when the current directory contains a file at `a/b` will return an error. The error + /// indicates the path where the conflict occurred. + pub fn insert( + &mut self, + path: impl IntoFileNameBufIterator, + val: DirectoryEntry, L>, + ) -> Result, L>>, DirectoryInsertError> { + let mut path = path.into_iter(); + + let path_needle = match path.next() { + Some(path_needle) => path_needle, + None => return Err(DirectoryInsertError::EmptyPath), + }; + + self.insert_inner(path_needle, path, val) + .map_err(|path| DirectoryInsertError::CannotTraverseLeaf { path }) + } + + fn insert_inner( + &mut self, + path_needle: FileNameBuf, + path_rest: impl IntoIterator, + val: DirectoryEntry, L>, + ) -> Result, L>>, PathAccumulator> { + let entries = self.as_mut(); + + let mut path_rest = path_rest.into_iter(); + let next_path_needle = path_rest.next(); + + match next_path_needle { + Some(next_path_needle) => match entries.entry(path_needle) { + Entry::Occupied(mut entry) => match entry.get_mut() { + DirectoryEntry::Dir(d) => d + .insert_inner(next_path_needle, path_rest, val) + .map_err(|acc| acc.with(entry.key())), + _ => Err(PathAccumulator::new(entry.key())), + }, + Entry::Vacant(entry) => { + let mut dir = DirectoryBuilder::empty(); + dir.insert_inner(next_path_needle, path_rest, val) + .map_err(|acc| acc.with(entry.key()))?; + entry.insert(DirectoryEntry::Dir(dir)); + Ok(None) + } + }, + None => Ok(entries.insert(path_needle, val)), + } + } + + /// Create a directory at path. If the directory already exists, this does nothing. If this + /// would overwrite a leaf, it fails. + pub fn mkdir(&mut self, path: impl IntoFileNameBufIterator) -> Result<(), DirectoryMkdirError> { + let path = path.into_iter(); + + self.mkdir_inner(path) + .map_err(|path| DirectoryMkdirError::CannotTraverseLeaf { path }) + } + + fn mkdir_inner( + &mut self, + path: impl IntoIterator, + ) -> Result<(), PathAccumulator> { + let entries = self.as_mut(); + + let mut path = path.into_iter(); + + let path_needle = match path.next() { + Some(p) => p, + None => return Ok(()), + }; + + match entries.entry(path_needle) { + Entry::Occupied(mut entry) => match entry.get_mut() { + DirectoryEntry::Dir(d) => { + d.mkdir_inner(path).map_err(|acc| acc.with(entry.key()))? + } + _ => return Err(PathAccumulator::new(entry.key())), + }, + Entry::Vacant(entry) => { + let mut dir = DirectoryBuilder::empty(); + dir.mkdir_inner(path).map_err(|acc| acc.with(entry.key()))?; + entry.insert(DirectoryEntry::Dir(dir)); + } + }; + + Ok(()) + } + + pub fn merge(&mut self, other: Self) -> Result<(), DirectoryMergeError> { + self.merge_inner(other) + .map_err(|path| DirectoryMergeError::CannotTraverseLeaf { path }) + } + + fn merge_inner(&mut self, mut other: Self) -> Result<(), PathAccumulator> { + match (&self, &other) { + (Self::Immutable(d1), Self::Immutable(d2)) if d1.fingerprint() == d2.fingerprint() => { + return Ok(()); + } + _ => {} + } + + let other = std::mem::take(other.as_mut()); + + let entries = self.as_mut(); + + for (k, v) in other.into_iter() { + match entries.entry(k) { + Entry::Occupied(mut entry) => match (entry.get_mut(), v) { + (DirectoryEntry::Dir(d), DirectoryEntry::Dir(o)) => { + d.merge_inner(o).map_err(|e| e.with(entry.key()))?; + } + (entry, DirectoryEntry::Leaf(o)) => { + *entry = DirectoryEntry::Leaf(o); + } + _ => return Err(PathAccumulator::new(entry.key())), + }, + Entry::Vacant(entry) => { + entry.insert(v); + } + } + } + + Ok(()) + } + + pub(super) fn as_mut( + &mut self, + ) -> &mut SmallMap, L>> { + if let Self::Mutable(ref mut dir) = self { + return dir; + }; + + let entries = match std::mem::replace(self, DirectoryBuilder::Mutable(Default::default())) { + Self::Immutable(d) => d.into_entries::>(), + Self::Mutable(..) => unreachable!(), + }; + + match self { + Self::Mutable(ref mut e) => { + *e = entries; + e + } + Self::Immutable(..) => unreachable!(), + } + } + + /// Remove everything under `path`. + pub fn remove_prefix( + &mut self, + path: &ForwardRelativePath, + ) -> Result, L>>, DirectoryFindError> { + // If this is already mut, we could skip `find` to avoid traversing twice. + match find(self.as_ref(), path)? { + None => Ok(None), + Some(_) => Ok(Some(self.do_remove_prefix(path))), + } + } + + fn do_remove_prefix( + &mut self, + path: &ForwardRelativePath, + ) -> DirectoryEntry, L> { + let Some((path, last)) = path.split_last() else { + return DirectoryEntry::Dir(mem::replace(self, DirectoryBuilder::empty())); + }; + let mut this = self; + for name in path { + this = match this.as_mut().get_mut(name).unwrap() { + DirectoryEntry::Dir(d) => d, + DirectoryEntry::Leaf(_) => unreachable!(), + } + } + let removed = this.as_mut().shift_remove(last); + removed.unwrap() + } +} + +pub enum DirectoryBuilderDirectoryEntries<'a, L, H> +where + H: DirectoryDigest, +{ + Immutable(ImmutableOrExclusiveDirectoryEntries<'a, L, H>), + Mutable( + starlark_map::small_map::Iter<'a, FileNameBuf, DirectoryEntry, L>>, + ), +} + +impl<'a, L, H> Iterator for DirectoryBuilderDirectoryEntries<'a, L, H> +where + H: DirectoryDigest, +{ + type Item = ( + &'a FileName, + DirectoryEntry, &'a L>, + ); + + fn next(&mut self) -> Option { + match self { + Self::Immutable(iter) => { + let (name, entry) = iter.next()?; + Some((name, entry.map_dir(DirectoryBuilderDirectoryRef::Immutable))) + } + Self::Mutable(iter) => { + let (name, entry) = iter.next()?; + Some(( + name, + entry + .as_ref() + .map_dir(DirectoryBuilderDirectoryRef::Mutable), + )) + } + } + } + + fn size_hint(&self) -> (usize, Option) { + match self { + Self::Immutable(iter) => iter.size_hint(), + Self::Mutable(iter) => iter.size_hint(), + } + } +} + +#[derive(Copy_, Clone_, Derivative)] +#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] +pub enum DirectoryBuilderDirectoryRef<'a, L, H> +where + H: DirectoryDigest, +{ + Immutable(ImmutableOrExclusiveDirectoryRef<'a, L, H>), + Mutable(&'a DirectoryBuilder), +} + +impl<'a, L, H> DirectoryRef<'a> for DirectoryBuilderDirectoryRef<'a, L, H> +where + H: DirectoryDigest, +{ + type Leaf = L; + type DirectoryDigest = H; + type Entries = DirectoryBuilderDirectoryEntries<'a, L, H>; + + fn get(self, name: &FileName) -> Option> { + match self { + DirectoryBuilderDirectoryRef::Immutable(d) => Some( + d.get(name)? + .map_dir(DirectoryBuilderDirectoryRef::Immutable), + ), + DirectoryBuilderDirectoryRef::Mutable(d) => match d { + DirectoryBuilder::Mutable(d) => Some( + d.get(name)? + .as_ref() + .map_dir(|v| DirectoryBuilderDirectoryRef::Mutable(v)), + ), + DirectoryBuilder::Immutable(d) => Some( + d.as_ref() + .get(name)? + .map_dir(|v| DirectoryBuilderDirectoryRef::Immutable(v)), + ), + }, + } + } + + fn entries(self) -> Self::Entries { + match self { + Self::Immutable(d) => DirectoryBuilderDirectoryEntries::Immutable(d.entries()), + Self::Mutable(d) => match d { + DirectoryBuilder::Mutable(d) => DirectoryBuilderDirectoryEntries::Mutable(d.iter()), + DirectoryBuilder::Immutable(d) => DirectoryBuilderDirectoryEntries::Immutable( + ImmutableOrExclusiveDirectoryRef::from_immutable(d).entries(), + ), + }, + } + } + + fn as_dyn(self) -> &'a dyn Directory { + match self { + Self::Immutable(d) => d.as_dyn(), + Self::Mutable(d) => d, + } + } +} + +impl Directory for DirectoryBuilder +where + H: DirectoryDigest, +{ + type DirectoryRef<'a> = DirectoryBuilderDirectoryRef<'a, L, H> + where Self: Sized + 'a, + L: 'a; + + fn as_ref<'a>(&'a self) -> Self::DirectoryRef<'a> + where + Self: Sized + 'a, + { + DirectoryBuilderDirectoryRef::Mutable(self) + } + + fn to_builder(&self) -> DirectoryBuilder + where + L: Clone, + { + self.clone() + } +} + +impl DirectoryMut for DirectoryBuilder +where + H: DirectoryDigest, + L: Clone, +{ + fn get_mut<'a>( + &'a mut self, + needle: &'_ FileName, + ) -> Option, &'a mut L>> { + self.as_mut() + .get_mut(needle) + .map(|v| v.as_mut().map_dir(|d| d as &mut dyn DirectoryMut)) + } +} + +impl DirectoryBuilder +where + H: DirectoryDigest, +{ + pub fn fingerprint(self, hasher: &impl DirectoryHasher) -> ImmutableDirectory { + match self { + Self::Mutable(entries) => { + let entries = entries + .into_iter() + .map(|(k, v)| (k, v.map_dir(|v| v.fingerprint(hasher)))) + .collect(); + ImmutableDirectory::Exclusive(ExclusiveDirectory { + data: DirectoryData::new(entries, hasher), + }) + } + Self::Immutable(c) => c, + } + } +} diff --git a/app/buck2_core/src/directory/dashmap_directory_interner.rs b/app/buck2_directory/src/directory/dashmap_directory_interner.rs similarity index 76% rename from app/buck2_core/src/directory/dashmap_directory_interner.rs rename to app/buck2_directory/src/directory/dashmap_directory_interner.rs index 1e79cea4dcbc8..e4f8c39ed8fd8 100644 --- a/app/buck2_core/src/directory/dashmap_directory_interner.rs +++ b/app/buck2_directory/src/directory/dashmap_directory_interner.rs @@ -11,32 +11,34 @@ use std::sync::Arc; use std::sync::Weak; use allocative::Allocative; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::directory_digest::InternableDirectoryDigest; +use buck2_util::hash::BuckHasherBuilder; use dashmap::mapref::entry::Entry; use dashmap::DashMap; use dupe::Clone_; use dupe::Dupe; use dupe::Dupe_; -use super::DirectoryDigest; -use super::SharedDirectory; -use super::SharedDirectoryData; -use super::SharedDirectoryInner; +use crate::directory::shared_directory::SharedDirectory; +use crate::directory::shared_directory::SharedDirectoryData; +use crate::directory::shared_directory::SharedDirectoryInner; #[derive(Dupe_, Clone_, Allocative)] pub struct DashMapDirectoryInterner where H: DirectoryDigest, { - inner: Arc>>>, + inner: Arc>, BuckHasherBuilder>>, } impl DashMapDirectoryInterner where - H: DirectoryDigest, + H: InternableDirectoryDigest, { pub fn new() -> Self { Self { - inner: Arc::new(DashMap::new()), + inner: Arc::new(DashMap::with_hasher(BuckHasherBuilder)), } } @@ -91,7 +93,16 @@ where SharedDirectory { inner: new_inner } } +} +impl DashMapDirectoryInterner +where + // Note: We "should" require `H: InternableDirectoryDigest` here; however, we can't do that + // because `Drop` impls having to be always-applicable would force us to require `H: + // InternableDirectoryDigest` on `ImmutableDirectory`. This should still be ok though, because + // you can't create a `SharedDirectory` for which that trait bound is not met. + H: DirectoryDigest, +{ /// Notify the interner that an entry has been removed. pub fn dropped(&self, data: &SharedDirectoryData) { // Note: we still check the count here, since you could hypothetically have a race where diff --git a/app/buck2_directory/src/directory/directory.rs b/app/buck2_directory/src/directory/directory.rs new file mode 100644 index 0000000000000..2e106f13076d7 --- /dev/null +++ b/app/buck2_directory/src/directory/directory.rs @@ -0,0 +1,79 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; + +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; + +use crate::directory::builder::DirectoryBuilder; +use crate::directory::directory_iterator::DirectoryIterator; +use crate::directory::directory_ref::DirectoryRef; +use crate::directory::entry::DirectoryEntry; +use crate::directory::walk::OrderedDirectoryWalk; +use crate::directory::walk::UnorderedDirectoryWalk; + +pub type DirectoryEntries<'a, L, H> = + Box, &'a L>)> + 'a>; + +/// A Directory that may or may not be fingerprinted. This means it only exposes the common +/// denominator of operations available on such Directories, which is to access entries in them. +pub trait Directory { + type DirectoryRef<'a>: DirectoryRef<'a, Leaf = L, DirectoryDigest = H> + where + Self: Sized + 'a, + L: 'a; + + fn as_ref<'a>(&'a self) -> Self::DirectoryRef<'a> + where + Self: Sized + 'a; + + fn unordered_walk<'a>(&'a self) -> UnorderedDirectoryWalk<'a, Self::DirectoryRef<'a>> + where + Self: Sized, + { + UnorderedDirectoryWalk::new(self.as_ref()) + } + + fn unordered_walk_leaves<'a>(&'a self) -> impl DirectoryIterator + where + Self: Sized, + H: 'a, + L: 'a, + { + self.unordered_walk().leaves() + } + + fn ordered_walk<'a>(&'a self) -> OrderedDirectoryWalk<'a, Self::DirectoryRef<'a>> + where + Self: Sized, + { + OrderedDirectoryWalk::new(self.as_ref()) + } + + fn ordered_walk_leaves<'a>(&'a self) -> impl DirectoryIterator + where + Self: Sized, + H: 'a, + L: 'a, + { + self.ordered_walk().leaves() + } + + fn to_builder(&self) -> DirectoryBuilder + where + L: Clone, + H: DirectoryDigest; +} + +impl<'a, L, H> fmt::Debug for &'a dyn Directory { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Directory") + } +} diff --git a/app/buck2_core/src/directory/directory_data.rs b/app/buck2_directory/src/directory/directory_data.rs similarity index 75% rename from app/buck2_core/src/directory/directory_data.rs rename to app/buck2_directory/src/directory/directory_data.rs index 59ddc5080c723..e4bfc3a79cf58 100644 --- a/app/buck2_core/src/directory/directory_data.rs +++ b/app/buck2_directory/src/directory/directory_data.rs @@ -7,25 +7,23 @@ * of this source tree. */ -#![cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_use_dupe))] - use std::marker::PhantomData; use allocative::Allocative; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileNameBuf; use derivative::Derivative; use derive_more::Display; use sorted_vector_map::SortedVectorMap; -use super::DirectoryDigest; -use super::DirectoryEntry; -use super::DirectoryHasher; -use super::FingerprintedDirectory; -use crate::fs::paths::file_name::FileNameBuf; +use crate::directory::directory_hasher::DirectoryHasher; +use crate::directory::entry::DirectoryEntry; +use crate::directory::fingerprinted_directory::FingerprintedDirectory; #[derive(Derivative, Display, Allocative)] #[derivative(Debug(bound = "D: ::std::fmt::Debug, L: ::std::fmt::Debug"))] #[derivative(Clone(bound = "D: ::std::clone::Clone, L: ::std::clone::Clone"))] -#[display(fmt = "Directory({})", "self.fingerprint")] +#[display("Directory({})", self.fingerprint)] pub struct DirectoryData where H: DirectoryDigest, @@ -58,8 +56,11 @@ where entries: SortedVectorMap>, hasher: &impl DirectoryHasher, ) -> Self { - let fingerprint = - hasher.hash_entries(entries.iter().map(|(k, e)| (k.as_ref(), e.as_ref()))); + let fingerprint = hasher.hash_entries( + entries + .iter() + .map(|(k, e)| (k.as_ref(), e.as_ref().map_dir(|d| d.as_fingerprinted_ref()))), + ); Self { entries, fingerprint, diff --git a/app/buck2_directory/src/directory/directory_hasher.rs b/app/buck2_directory/src/directory/directory_hasher.rs new file mode 100644 index 0000000000000..c27524492a0d1 --- /dev/null +++ b/app/buck2_directory/src/directory/directory_hasher.rs @@ -0,0 +1,52 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Debug; +use std::hash::Hash; + +use allocative::Allocative; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; +use derive_more::Display; +use dupe::Dupe; + +use crate::directory::directory_ref::FingerprintedDirectoryRef; +use crate::directory::entry::DirectoryEntry; + +// TODO: Rename to DirectoryDigester +pub trait DirectoryHasher { + fn hash_entries<'a, D, I>(&self, entries: I) -> H + where + I: IntoIterator)>, + D: FingerprintedDirectoryRef<'a, Leaf = L, DirectoryDigest = H> + 'a, + L: 'a, + Self: Sized; +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash, Allocative, Display)] +#[display("NoDigest")] +pub struct NoDigest(()); + +impl Dupe for NoDigest {} + +impl DirectoryDigest for NoDigest {} + +pub struct NoDigestDigester; + +impl DirectoryHasher for NoDigestDigester { + fn hash_entries<'a, D, I>(&self, _entries: I) -> NoDigest + where + I: IntoIterator)>, + D: FingerprintedDirectoryRef<'a, Leaf = L, DirectoryDigest = NoDigest>, + L: 'a, + Self: Sized, + { + NoDigest(()) + } +} diff --git a/app/buck2_directory/src/directory/directory_iterator.rs b/app/buck2_directory/src/directory/directory_iterator.rs new file mode 100644 index 0000000000000..6e1750571db77 --- /dev/null +++ b/app/buck2_directory/src/directory/directory_iterator.rs @@ -0,0 +1,181 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; +use std::mem; + +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; + +use crate::directory::entry::DirectoryEntry; + +/// A trait shared by iterators on Directories. Unlike a regular Iterator, this returns an accessor +/// to give us the current path in addition to the current item (which borrows from the iterator +/// itself, which is why this cannot be an iterator). +pub trait DirectoryIterator: Sized { + /// The way this iterator will report its current path. + type PathStack<'a>: DirectoryIteratorPathStack + 'a + where + Self: 'a; + + /// The items this iterator will yield. + type Item; + + /// Provide the next item. + fn next<'a>(&'a mut self) -> Option<(Self::PathStack<'a>, Self::Item)>; + + /// Compute all paths in this iterator. This returns a regular Iterator since we no longer + /// need to borrow from self in next. + fn with_paths(self) -> DirectoryIteratorWithPaths { + DirectoryIteratorWithPaths { inner: self } + } + + /// Compute none of the paths in this iterator. Here again, this is a reglar Iteraotr. + fn without_paths(self) -> DirectoryIteratorWithoutPaths { + DirectoryIteratorWithoutPaths { inner: self } + } + + /// Only take the paths from this iterator. + fn paths(self) -> impl Iterator { + self.with_paths().map(|(path, _)| path) + } + + fn filter_map(self, f: F) -> impl DirectoryIterator + where + F: FnMut(Self::Item) -> Option, + { + struct FilterMap { + inner: T, + f: F, + } + + impl DirectoryIterator for FilterMap + where + T: DirectoryIterator, + F: FnMut(T::Item) -> Option, + { + type PathStack<'a> = T::PathStack<'a> where Self: 'a; + type Item = B; + + fn next<'b>(&'b mut self) -> Option<(T::PathStack<'b>, B)> { + loop { + let (path, item) = self.inner.next()?; + if let Some(item) = (self.f)(item) { + // SAFETY: This is a complication introduced by the lending-iterator pattern + // of this trait. The compiler otherwise does not understand that our borrow + // of `self.inner` expires in the none case. However, because `item` does + // not have a lifetime tied to `'b`, it indeed does. + let path = + unsafe { mem::transmute::, T::PathStack<'b>>(path) }; + return Some((path, item)); + } + } + } + } + + FilterMap { inner: self, f } + } + + /// Only include leaves. + fn leaves(self) -> impl DirectoryIterator + where + Self: DirectoryIterator>, + { + self.filter_map(|entry| entry.leaf()) + } +} + +/// The stack of paths for this DirectoryIterator. This must allow iterating over the path +/// components htat make up the DirectoryIterator's current location. +pub trait DirectoryIteratorPathStack { + fn path(&self) -> impl Iterator; + + fn get(&self) -> ForwardRelativePathBuf { + let mut path = ForwardRelativePathBuf::with_capacity_for_concat(self.path()); + path.extend(self.path()); + path + } +} + +/// A thin struct that can be used to produce a path on demand. +pub struct DirectoryIteratorPathAccessor<'a, T> { + pub(super) stack: &'a T, + pub(super) leaf: Option<&'a FileName>, +} + +impl<'a, T> DirectoryIteratorPathStack for DirectoryIteratorPathAccessor<'a, T> +where + T: DirectoryIteratorPathStack, +{ + fn path(&self) -> impl Iterator { + self.stack.path().chain(self.leaf) + } +} + +impl<'a, T> DirectoryIteratorPathAccessor<'a, T> +where + T: DirectoryIteratorPathStack, +{ + pub fn name(&self) -> Option<&'a FileName> { + self.leaf + } +} + +impl<'a, T> fmt::Display for DirectoryIteratorPathAccessor<'a, T> +where + T: DirectoryIteratorPathStack, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.get()) + } +} + +impl<'a, T> fmt::Debug for DirectoryIteratorPathAccessor<'a, T> +where + T: DirectoryIteratorPathStack, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "DirectoryIteratorPathAccessor({})", self.get()) + } +} + +/// Iterate over a DirectoryIterator with the paths. +pub struct DirectoryIteratorWithPaths { + inner: T, +} + +impl Iterator for DirectoryIteratorWithPaths +where + T: DirectoryIterator, +{ + type Item = (ForwardRelativePathBuf, ::Item); + + fn next(&mut self) -> Option { + let (path, item) = self.inner.next()?; + let path = path.get(); + Some((path, item)) + } +} + +/// Iterate over a DirectoryIterator without the paths. +pub struct DirectoryIteratorWithoutPaths { + inner: T, +} + +impl Iterator for DirectoryIteratorWithoutPaths +where + T: DirectoryIterator, +{ + type Item = ::Item; + + fn next(&mut self) -> Option { + let (_, item) = self.inner.next()?; + Some(item) + } +} diff --git a/app/buck2_core/src/directory/directory_mut.rs b/app/buck2_directory/src/directory/directory_mut.rs similarity index 84% rename from app/buck2_core/src/directory/directory_mut.rs rename to app/buck2_directory/src/directory/directory_mut.rs index 0453c90479012..08b53a758977a 100644 --- a/app/buck2_core/src/directory/directory_mut.rs +++ b/app/buck2_directory/src/directory/directory_mut.rs @@ -9,9 +9,10 @@ use std::fmt; -use super::Directory; -use super::DirectoryEntry; -use crate::fs::paths::file_name::FileName; +use buck2_core::fs::paths::file_name::FileName; + +use crate::directory::directory::Directory; +use crate::directory::entry::DirectoryEntry; /// A directory that isn't fingerprinted, and as such is mutable; pub trait DirectoryMut: Directory { diff --git a/app/buck2_directory/src/directory/directory_ref.rs b/app/buck2_directory/src/directory/directory_ref.rs new file mode 100644 index 0000000000000..21cd0b4a72158 --- /dev/null +++ b/app/buck2_directory/src/directory/directory_ref.rs @@ -0,0 +1,43 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; + +use crate::directory::builder::DirectoryBuilder; +use crate::directory::directory::Directory; +use crate::directory::entry::DirectoryEntry; +use crate::directory::fingerprinted_directory::FingerprintedDirectory; + +pub trait DirectoryRef<'a>: Copy + 'a + Sized { + type Leaf; + type DirectoryDigest; + + type Entries: Iterator)>; + + fn get(self, name: &FileName) -> Option>; + + fn entries(self) -> Self::Entries; + + fn as_dyn(self) -> &'a dyn Directory; + + fn to_builder(self) -> DirectoryBuilder + where + Self::DirectoryDigest: DirectoryDigest, + Self::Leaf: Clone, + { + self.as_dyn().to_builder() + } +} + +pub trait FingerprintedDirectoryRef<'a>: DirectoryRef<'a> { + fn as_fingerprinted_dyn( + self, + ) -> &'a dyn FingerprintedDirectory; +} diff --git a/app/buck2_directory/src/directory/directory_selector.rs b/app/buck2_directory/src/directory/directory_selector.rs new file mode 100644 index 0000000000000..fe1d41fc67b72 --- /dev/null +++ b/app/buck2_directory/src/directory/directory_selector.rs @@ -0,0 +1,277 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::iter; + +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::paths::IntoFileNameBufIterator; +use either::Either; +use starlark_map::small_map::SmallMap; + +use crate::directory::builder::DirectoryBuilder; +use crate::directory::directory::Directory; +use crate::directory::directory_iterator::DirectoryIterator; +use crate::directory::directory_iterator::DirectoryIteratorPathAccessor; +use crate::directory::directory_iterator::DirectoryIteratorPathStack; +use crate::directory::entry::DirectoryEntry; +use crate::directory::walk::OrderedDirectoryWalkType; +use crate::directory::walk::UnorderedDirectoryWalkType; +use crate::directory::walk::WalkType; + +#[derive(Debug, buck2_error::Error)] +pub enum DirectorySearchError { + #[error("Search traverses a leaf")] + CannotTraverseLeaf { leaf: L }, +} + +impl DirectorySearchError { + pub fn into_leaf(self) -> L { + let Self::CannotTraverseLeaf { leaf } = self; + leaf + } +} + +#[derive(Debug, buck2_error::Error)] +pub enum DirectoryFilterError { + #[error("Filter traverses a leaf")] + CannotTraverseLeaf, +} + +/// A query builder for filtering or search operations on directories. It's a tree of paths and +/// what action we want to take on them. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum DirectorySelector { + /// Traverse only the netries that match this filename. + Traverse(SmallMap), + /// Take this entire tree. + Take, +} + +impl DirectorySelector { + pub fn empty() -> Self { + Self::Traverse(Default::default()) + } + + pub fn is_empty(&self) -> bool { + match self { + Self::Traverse(d) => d.is_empty(), + Self::Take => false, + } + } + + /// Add a path to this DirectorySelector. + pub fn select(&mut self, path: impl IntoFileNameBufIterator) { + let path = path.into_iter(); + self.select_inner(path) + } + + fn select_inner(&mut self, path: impl IntoIterator) { + let mut path = path.into_iter(); + let entry = match path.next() { + Some(e) => e, + None => { + *self = Self::Take; + return; + } + }; + + match self { + Self::Traverse(s) => s + .entry(entry) + .or_insert_with(DirectorySelector::empty) + .select_inner(path), + Self::Take => {} + } + } + + /// Filter a DirectoryBuilder by only retaining matching entries. + pub fn filter(&self, dir: &mut DirectoryBuilder) -> Result<(), DirectoryFilterError> + where + L: Clone, + H: DirectoryDigest, + { + let mut res = Ok(()); + + match self { + Self::Traverse(filter) => filter_inner(dir, filter, &mut res), + Self::Take => {} + }; + + res + } +} + +fn filter_inner( + dir: &mut DirectoryBuilder, + filter: &SmallMap, + res: &mut Result<(), DirectoryFilterError>, +) where + L: Clone, + H: DirectoryDigest, +{ + let dir = dir.as_mut(); + + let entries = std::mem::take(dir); + + for (k, mut v) in entries.into_iter() { + let selector = match filter.get(&k) { + Some(s) => s, + None => continue, + }; + + match selector { + DirectorySelector::Traverse(next_map) => match v { + DirectoryEntry::Dir(ref mut d) => filter_inner(d, next_map, res), + DirectoryEntry::Leaf(..) => { + *res = Err(DirectoryFilterError::CannotTraverseLeaf); + } + }, + DirectorySelector::Take => {} + }; + + dir.insert(k, v); + } +} + +/// Continue the search. +struct SearchFrame<'a, 'b, T: WalkType<'b>> { + search: &'a SmallMap, + name: Option<&'b FileName>, + entries: T::Entries, +} + +enum SearchInner<'a, 'b, T: WalkType<'b>> { + ReturnRoot(Option>), + Stack(Vec>), +} + +pub struct Search<'a, 'b, T: WalkType<'b>> { + inner: SearchInner<'a, 'b, T>, +} + +impl<'a, 'b, T: WalkType<'b>> Search<'a, 'b, T> { + pub fn new(selector: &'a DirectorySelector, root: T::Directory) -> Self { + match selector { + DirectorySelector::Traverse(ref search) => Search { + inner: SearchInner::Stack(vec![SearchFrame { + search, + name: None, + entries: T::directory_entries(root).into(), + }]), + }, + DirectorySelector::Take => Search { + inner: SearchInner::ReturnRoot(Some(DirectoryEntry::Dir(root))), + }, + } + } +} + +impl<'a, 'b, T: WalkType<'b>> DirectoryIterator for Search<'a, 'b, T> { + type PathStack<'c> = DirectoryIteratorPathAccessor<'c, Self> where Self: 'c; + type Item = + Result, DirectorySearchError<&'b T::Leaf>>; + + fn next<'c>(&'c mut self) -> Option<(DirectoryIteratorPathAccessor<'c, Self>, Self::Item)> { + match &mut self.inner { + SearchInner::ReturnRoot(root) => { + let root = root.take()?; + Some(( + DirectoryIteratorPathAccessor { + leaf: None, + stack: self, + }, + Ok(root), + )) + } + SearchInner::Stack(stack) => { + loop { + let frame = stack.last_mut()?; + + let SearchFrame { + search, entries, .. + } = frame; + + if let Some((name, entry)) = entries.next() { + let search = search.get(name); + + match search { + Some(DirectorySelector::Traverse(t)) => match entry { + // Traverse into this directory ... assuming it's a directory :) + DirectoryEntry::Dir(d) => { + stack.push(SearchFrame { + name: Some(name), + search: t, + entries: T::directory_entries(d).into(), + }); + continue; + } + DirectoryEntry::Leaf(leaf) => { + return Some(( + DirectoryIteratorPathAccessor { + leaf: Some(name), + stack: self, + }, + Err(DirectorySearchError::CannotTraverseLeaf { leaf }), + )); + } + }, + Some(DirectorySelector::Take) => { + // Return the entry. Do not traverse further. + return Some(( + DirectoryIteratorPathAccessor { + leaf: Some(name), + stack: self, + }, + Ok(entry), + )); + } + None => { + // Ignore this node entirely. + continue; + } + } + } + + // We've exhausted this iterator. Go back to the previous stack frame. + stack.pop(); + } + } + } + } +} + +impl<'a, 'b, T: WalkType<'b>> DirectoryIteratorPathStack for Search<'a, 'b, T> { + fn path(&self) -> impl Iterator { + match &self.inner { + SearchInner::ReturnRoot(_) => Either::Left(iter::empty()), + SearchInner::Stack(stack) => Either::Right(stack.iter().filter_map(|frame| frame.name)), + } + } +} + +pub type UnorderedDirectorySearch<'a, 'b, D> = Search<'a, 'b, UnorderedDirectoryWalkType<'b, D>>; +pub type OrderedDirectorySearch<'a, 'b, D> = Search<'a, 'b, OrderedDirectoryWalkType<'b, D>>; + +impl DirectorySelector { + pub fn unordered_search<'a, 'b, L, H, D: Directory>( + &'a self, + dir: &'b D, + ) -> UnorderedDirectorySearch<'a, 'b, D::DirectoryRef<'b>> { + UnorderedDirectorySearch::new(self, dir.as_ref()) + } + + pub fn ordered_search<'a, 'b, L, H, D: Directory>( + &'a self, + dir: &'b D, + ) -> OrderedDirectorySearch<'a, 'b, D::DirectoryRef<'b>> { + OrderedDirectorySearch::new(self, dir.as_ref()) + } +} diff --git a/app/buck2_core/src/directory/entry.rs b/app/buck2_directory/src/directory/entry.rs similarity index 84% rename from app/buck2_core/src/directory/entry.rs rename to app/buck2_directory/src/directory/entry.rs index 8f1889e0ac6b7..32b248c845d51 100644 --- a/app/buck2_core/src/directory/entry.rs +++ b/app/buck2_directory/src/directory/entry.rs @@ -12,6 +12,8 @@ use derive_more::Display; use dupe::Dupe; use gazebo::variants::UnpackVariants; +use crate::directory::directory::Directory; + /// An entry in a Directory, parameterized by the type of children directories and the type of leaf /// nodes. We expect to be able to traverse directories, and we don't traverse leaves. #[derive( @@ -25,7 +27,7 @@ use gazebo::variants::UnpackVariants; UnpackVariants, Allocative )] -#[display(bound = "D: ::std::fmt::Display, L: ::std::fmt::Display")] +#[display(bound(D: ::std::fmt::Display, L: ::std::fmt::Display))] pub enum DirectoryEntry { Dir(D), Leaf(L), @@ -53,6 +55,16 @@ impl DirectoryEntry { } } + pub fn as_ref_dyn(&self) -> DirectoryEntry<&dyn Directory, &L> + where + D: Directory, + { + match self { + Self::Dir(d) => DirectoryEntry::Dir(d), + Self::Leaf(l) => DirectoryEntry::Leaf(l), + } + } + pub fn as_mut(&mut self) -> DirectoryEntry<&'_ mut D, &'_ mut L> { match self { Self::Dir(ref mut d) => DirectoryEntry::Dir(d), diff --git a/app/buck2_directory/src/directory/exclusive_directory.rs b/app/buck2_directory/src/directory/exclusive_directory.rs new file mode 100644 index 0000000000000..3d9e4a3b4a515 --- /dev/null +++ b/app/buck2_directory/src/directory/exclusive_directory.rs @@ -0,0 +1,131 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::directory_digest::InternableDirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::file_name::FileNameBuf; +use derivative::Derivative; +use derive_more::Display; + +use crate::directory::builder::DirectoryBuilder; +use crate::directory::dashmap_directory_interner::DashMapDirectoryInterner; +use crate::directory::directory::Directory; +use crate::directory::directory_data::DirectoryData; +use crate::directory::entry::DirectoryEntry; +use crate::directory::immutable_directory::ImmutableDirectory; +use crate::directory::immutable_or_exclusive::ImmutableOrExclusiveDirectoryRef; +use crate::directory::macros::impl_fingerprinted_directory; +use crate::directory::shared_directory::SharedDirectory; + +#[derive(Derivative, Display, Allocative)] +#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] +#[derivative(Clone(bound = "L: ::std::clone::Clone"))] +#[display("{}", self.data)] +pub struct ExclusiveDirectory +where + H: DirectoryDigest, +{ + pub(super) data: DirectoryData, L, H>, +} + +impl ExclusiveDirectory +where + H: InternableDirectoryDigest, +{ + pub fn shared(self, interner: &DashMapDirectoryInterner) -> SharedDirectory { + if let Some(shared) = interner.get(self.fingerprint()) { + return shared; + } + + let DirectoryData { + entries, + fingerprint, + _hash, + } = self.data; + + let entries = entries + .into_iter() + .map(|(k, v)| (k, v.map_dir(|d| d.shared(interner)))) + .collect(); + + let new_data = DirectoryData { + entries, + fingerprint, + _hash, + }; + + interner.intern(new_data) + } +} + +impl ExclusiveDirectory +where + H: DirectoryDigest, +{ + pub fn into_entries(self) -> C + where + C: FromIterator<(FileNameBuf, DirectoryEntry, L>)>, + { + self.data + .entries + .into_iter() + .map(|(k, v)| (k, v.map_dir(|v| v.into_builder()))) + .collect() + } + + pub fn entries( + &self, + ) -> impl IntoIterator, L>)> + '_ + { + &self.data.entries + } + + pub fn get<'a>( + &'a self, + needle: &'_ FileName, + ) -> Option, &'a L>> { + self.data.entries.get(needle).as_ref().map(|v| v.as_ref()) + } + + pub fn fingerprint(&self) -> &H { + self.data.fingerprint() + } + + pub fn into_builder(self) -> DirectoryBuilder { + DirectoryBuilder::Immutable(ImmutableDirectory::Exclusive(self)) + } +} + +impl Directory for ExclusiveDirectory +where + H: DirectoryDigest, +{ + type DirectoryRef<'a> = ImmutableOrExclusiveDirectoryRef<'a, L, H> + where + Self: Sized + 'a, + L: 'a; + + fn as_ref<'a>(&'a self) -> ImmutableOrExclusiveDirectoryRef<'a, L, H> + where + Self: Sized + 'a, + { + ImmutableOrExclusiveDirectoryRef::Exclusive(self) + } + + fn to_builder(&self) -> DirectoryBuilder + where + L: Clone, + { + self.clone().into_builder() + } +} + +impl_fingerprinted_directory!(ExclusiveDirectory); diff --git a/app/buck2_directory/src/directory/find.rs b/app/buck2_directory/src/directory/find.rs new file mode 100644 index 0000000000000..ea4302340ca95 --- /dev/null +++ b/app/buck2_directory/src/directory/find.rs @@ -0,0 +1,135 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::fs::paths::file_name::FileName; + +use crate::directory::directory_ref::DirectoryRef; +use crate::directory::entry::DirectoryEntry; +use crate::directory::path_accumulator::PathAccumulator; + +#[derive(Debug, buck2_error::Error)] +pub enum DirectoryFindError { + #[error("Find would traverse a leaf at path: `{}`", .path)] + CannotTraverseLeaf { path: PathAccumulator }, +} + +trait FindConflict { + fn new<'b>(path: &'b FileName, remaining: impl Iterator, leaf: T) -> Self; + + fn with<'b>(self, path: &'b FileName) -> Self; +} + +impl FindConflict for PathAccumulator { + fn new<'b>( + path: &'b FileName, + _remaining: impl Iterator, + _leaf: T, + ) -> Self { + PathAccumulator::new(path) + } + + fn with<'b>(self, path: &'b FileName) -> Self { + PathAccumulator::with(self, path) + } +} + +#[cfg(test)] +struct PrefixLookupContainer { + leaf: T, + path: buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf, +} + +#[cfg(test)] +impl FindConflict for PrefixLookupContainer { + fn new<'b>(path: &'b FileName, remaining: impl Iterator, leaf: T) -> Self { + Self { + leaf, + path: std::iter::once(path).chain(remaining).collect(), + } + } + + fn with<'b>(self, _path: &'b FileName) -> Self { + self + } +} + +pub fn find<'a, 'b, D: DirectoryRef<'a>>( + dir: D, + path: impl IntoIterator, +) -> Result>, DirectoryFindError> { + let mut path = path.into_iter(); + + let path_needle = match path.next() { + Some(path_needle) => path_needle, + None => return Ok(Some(DirectoryEntry::Dir(dir))), + }; + + find_inner::<_, PathAccumulator>(dir, path_needle, path) + .map_err(move |path| DirectoryFindError::CannotTraverseLeaf { path }) +} + +#[cfg(test)] // Dead code. +pub(crate) fn find_prefix<'a, 'b, D: DirectoryRef<'a>>( + dir: D, + path: impl IntoIterator, +) -> Result< + Option<( + DirectoryEntry, + // Remaining path. + buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf, + )>, + DirectoryFindError, +> { + let mut path = path.into_iter(); + + let path_needle = match path.next() { + Some(path_needle) => path_needle, + None => { + return Ok(Some(( + DirectoryEntry::Dir(dir), + buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf::default(), + ))); + } + }; + + match find_inner::<_, PrefixLookupContainer<&'a D::Leaf>>(dir, path_needle, path) { + Ok(maybe_leaf) => Ok(maybe_leaf.map(|l| { + ( + l, + buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf::default(), + ) + })), + Err(PrefixLookupContainer { leaf, path }) => Ok(Some((DirectoryEntry::Leaf(leaf), path))), + } +} + +fn find_inner<'a, 'b, D: DirectoryRef<'a>, A>( + dir: D, + path_needle: &'b FileName, + mut path_rest: impl Iterator, +) -> Result>, A> +where + A: FindConflict<&'a D::Leaf>, +{ + let entry = match dir.get(path_needle) { + Some(entry) => entry, + None => return Ok(None), + }; + + let next_path_needle = match path_rest.next() { + Some(next_path_needle) => next_path_needle, + None => return Ok(Some(entry)), + }; + + match entry { + DirectoryEntry::Dir(dir) => find_inner::<_, A>(dir, next_path_needle, path_rest) + .map_err(|acc| acc.with(path_needle)), + DirectoryEntry::Leaf(leaf) => Err(A::new(next_path_needle, path_rest, leaf)), + } +} diff --git a/app/buck2_directory/src/directory/fingerprinted_directory.rs b/app/buck2_directory/src/directory/fingerprinted_directory.rs new file mode 100644 index 0000000000000..8f7efaa172de0 --- /dev/null +++ b/app/buck2_directory/src/directory/fingerprinted_directory.rs @@ -0,0 +1,36 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; + +use buck2_core::directory_digest::DirectoryDigest; + +use crate::directory::directory::Directory; +use crate::directory::directory_ref::FingerprintedDirectoryRef; + +pub trait FingerprintedDirectory: Directory { + type FingerprintedDirectoryRef<'a>: FingerprintedDirectoryRef<'a, Leaf = L, DirectoryDigest = H> + where + Self: Sized + 'a, + L: 'a; + + fn as_fingerprinted_ref<'a>(&'a self) -> Self::FingerprintedDirectoryRef<'a> + where + Self: Sized + 'a; + + fn fingerprint(&self) -> &H + where + H: DirectoryDigest; +} + +impl<'a, L, H> fmt::Debug for &'a dyn FingerprintedDirectory { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "FingerprintedDirectory") + } +} diff --git a/app/buck2_directory/src/directory/immutable_directory.rs b/app/buck2_directory/src/directory/immutable_directory.rs new file mode 100644 index 0000000000000..6599d27c6e399 --- /dev/null +++ b/app/buck2_directory/src/directory/immutable_directory.rs @@ -0,0 +1,134 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::directory_digest::InternableDirectoryDigest; +use buck2_core::fs::paths::file_name::FileNameBuf; +use derivative::Derivative; +use derive_more::Display; + +use crate::directory::builder::DirectoryBuilder; +use crate::directory::dashmap_directory_interner::DashMapDirectoryInterner; +use crate::directory::directory::Directory; +use crate::directory::entry::DirectoryEntry; +use crate::directory::exclusive_directory::ExclusiveDirectory; +use crate::directory::fingerprinted_directory::FingerprintedDirectory; +use crate::directory::immutable_or_exclusive::ImmutableOrExclusiveDirectoryRef; +use crate::directory::shared_directory::SharedDirectory; + +#[derive(Derivative, Display, Allocative)] +#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] +#[derivative(Clone(bound = "L: ::std::clone::Clone"))] +pub enum ImmutableDirectory +where + H: DirectoryDigest, +{ + Exclusive(ExclusiveDirectory), + Shared(SharedDirectory), +} + +impl ImmutableDirectory +where + H: InternableDirectoryDigest, +{ + pub fn shared(self, interner: &DashMapDirectoryInterner) -> SharedDirectory { + match self { + Self::Exclusive(dir) => dir.shared(interner), + Self::Shared(dir) => dir, + } + } +} + +impl ImmutableDirectory +where + H: DirectoryDigest, +{ + pub fn into_builder(self) -> DirectoryBuilder { + match self { + Self::Exclusive(d) => d.into_builder(), + Self::Shared(s) => s.into_builder(), + } + } +} + +impl ImmutableDirectory +where + L: Clone, + H: DirectoryDigest, +{ + pub fn into_entries(self) -> C + where + C: FromIterator<(FileNameBuf, DirectoryEntry, L>)>, + { + match self { + Self::Exclusive(dir) => dir.into_entries(), + Self::Shared(dir) => dir.into_entries(), + } + } +} + +impl Directory for ImmutableDirectory +where + H: DirectoryDigest, +{ + type DirectoryRef<'a> = ImmutableOrExclusiveDirectoryRef<'a, L, H> + where + Self: Sized + 'a, + L: 'a; + + fn as_ref<'a>(&'a self) -> Self::DirectoryRef<'a> + where + Self: Sized + 'a, + { + ImmutableOrExclusiveDirectoryRef::from_immutable(self) + } + + fn to_builder(&self) -> DirectoryBuilder + where + L: Clone, + { + self.clone().into_builder() + } +} + +impl FingerprintedDirectory for ImmutableDirectory +where + H: DirectoryDigest, +{ + type FingerprintedDirectoryRef<'a> = ImmutableOrExclusiveDirectoryRef<'a, L, H> + where + Self: Sized + 'a, + L: 'a; + + fn as_fingerprinted_ref<'a>(&'a self) -> Self::FingerprintedDirectoryRef<'a> + where + Self: Sized + 'a, + { + self.as_ref() + } + + fn fingerprint(&self) -> &H { + match self { + Self::Exclusive(dir) => FingerprintedDirectory::fingerprint(dir), + Self::Shared(dir) => FingerprintedDirectory::fingerprint(dir), + } + } +} + +impl PartialEq for ImmutableDirectory +where + H: DirectoryDigest, +{ + fn eq(&self, other: &Self) -> bool { + self.fingerprint() == other.fingerprint() + } +} + +impl Eq for ImmutableDirectory where H: DirectoryDigest {} diff --git a/app/buck2_directory/src/directory/immutable_or_exclusive.rs b/app/buck2_directory/src/directory/immutable_or_exclusive.rs new file mode 100644 index 0000000000000..cb931eea4efcb --- /dev/null +++ b/app/buck2_directory/src/directory/immutable_or_exclusive.rs @@ -0,0 +1,148 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::file_name::FileNameBuf; +use derivative::Derivative; +use dupe::Clone_; +use dupe::Copy_; + +use crate::directory::directory::Directory; +use crate::directory::directory_ref::DirectoryRef; +use crate::directory::directory_ref::FingerprintedDirectoryRef; +use crate::directory::entry::DirectoryEntry; +use crate::directory::exclusive_directory::ExclusiveDirectory; +use crate::directory::fingerprinted_directory::FingerprintedDirectory; +use crate::directory::immutable_directory::ImmutableDirectory; +use crate::directory::shared_directory::SharedDirectory; + +pub enum ImmutableOrExclusiveDirectoryEntries<'a, L, H> +where + H: DirectoryDigest, +{ + Immutable( + sorted_vector_map::map::Iter<'a, FileNameBuf, DirectoryEntry, L>>, + ), + Shared(sorted_vector_map::map::Iter<'a, FileNameBuf, DirectoryEntry, L>>), +} + +impl<'a, L, H> Iterator for ImmutableOrExclusiveDirectoryEntries<'a, L, H> +where + H: DirectoryDigest, +{ + type Item = ( + &'a FileName, + DirectoryEntry, &'a L>, + ); + + fn next(&mut self) -> Option { + match self { + Self::Immutable(iter) => { + let (name, entry) = iter.next()?; + Some(( + name, + entry + .as_ref() + .map_dir(ImmutableOrExclusiveDirectoryRef::from_immutable), + )) + } + Self::Shared(iter) => { + let (name, entry) = iter.next()?; + Some(( + name, + entry + .as_ref() + .map_dir(ImmutableOrExclusiveDirectoryRef::Shared), + )) + } + } + } + + fn size_hint(&self) -> (usize, Option) { + match self { + Self::Immutable(iter) => iter.size_hint(), + Self::Shared(iter) => iter.size_hint(), + } + } +} + +#[derive(Copy_, Clone_, Derivative)] +#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] +pub enum ImmutableOrExclusiveDirectoryRef<'a, L, H> +where + H: DirectoryDigest, +{ + Exclusive(&'a ExclusiveDirectory), + Shared(&'a SharedDirectory), +} + +impl<'a, L, H> ImmutableOrExclusiveDirectoryRef<'a, L, H> +where + H: DirectoryDigest, +{ + pub fn from_immutable(dir: &'a ImmutableDirectory) -> Self { + match dir { + ImmutableDirectory::Exclusive(d) => Self::Exclusive(d), + ImmutableDirectory::Shared(d) => Self::Shared(d), + } + } +} + +impl<'a, L, H> DirectoryRef<'a> for ImmutableOrExclusiveDirectoryRef<'a, L, H> +where + H: DirectoryDigest, +{ + type Leaf = L; + type DirectoryDigest = H; + type Entries = ImmutableOrExclusiveDirectoryEntries<'a, L, H>; + + fn get(self, name: &FileName) -> Option> { + match self { + Self::Exclusive(dir) => dir + .get(name) + .map(|entry| entry.map_dir(ImmutableOrExclusiveDirectoryRef::from_immutable)), + Self::Shared(dir) => dir + .get(name) + .map(|entry| entry.map_dir(ImmutableOrExclusiveDirectoryRef::Shared)), + } + } + + fn entries(self) -> Self::Entries { + match self { + Self::Exclusive(dir) => { + ImmutableOrExclusiveDirectoryEntries::Immutable(dir.data.entries.iter()) + } + Self::Shared(dir) => { + ImmutableOrExclusiveDirectoryEntries::Shared(dir.inner.data.entries.iter()) + } + } + } + + fn as_dyn(self) -> &'a dyn Directory { + match self { + Self::Exclusive(dir) => dir, + Self::Shared(dir) => dir, + } + } +} + +impl<'a, L, H> FingerprintedDirectoryRef<'a> for ImmutableOrExclusiveDirectoryRef<'a, L, H> +where + H: DirectoryDigest, +{ + fn as_fingerprinted_dyn( + self, + ) -> &'a dyn FingerprintedDirectory { + match self { + Self::Exclusive(dir) => dir, + Self::Shared(dir) => dir, + } + } +} diff --git a/app/buck2_directory/src/directory/macros.rs b/app/buck2_directory/src/directory/macros.rs new file mode 100644 index 0000000000000..0217db68b221b --- /dev/null +++ b/app/buck2_directory/src/directory/macros.rs @@ -0,0 +1,45 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +macro_rules! impl_fingerprinted_directory { + ( + $this: ident + ) => { + impl $crate::directory::fingerprinted_directory::FingerprintedDirectory for $this + where + H: DirectoryDigest, + { + type FingerprintedDirectoryRef<'a> = >::DirectoryRef<'a> + where + Self: Sized + 'a, + L: 'a; + + fn as_fingerprinted_ref<'a>(&'a self) -> Self::FingerprintedDirectoryRef<'a> where Self: Sized + 'a { + self.as_ref() + } + + fn fingerprint(&self) -> &H { + $this::fingerprint(self) + } + } + + impl PartialEq for $this + where + H: DirectoryDigest, + { + fn eq(&self, other: &Self) -> bool { + self.fingerprint() == other.fingerprint() + } + } + + impl Eq for $this where H: DirectoryDigest {} + }; +} + +pub(super) use impl_fingerprinted_directory; diff --git a/app/buck2_directory/src/directory/no_hasher.rs b/app/buck2_directory/src/directory/no_hasher.rs new file mode 100644 index 0000000000000..430bb9195cc38 --- /dev/null +++ b/app/buck2_directory/src/directory/no_hasher.rs @@ -0,0 +1,26 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; + +use allocative::Allocative; +use derive_more::Display; +use dupe::Dupe; + +#[allow(unused)] +#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash, Allocative)] +pub struct NoHash(!); + +impl Display for NoHash { + fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0 + } +} + +impl Dupe for NoHash {} diff --git a/app/buck2_core/src/directory/path_accumulator.rs b/app/buck2_directory/src/directory/path_accumulator.rs similarity index 95% rename from app/buck2_core/src/directory/path_accumulator.rs rename to app/buck2_directory/src/directory/path_accumulator.rs index 2220733d8c48e..d6d0650fb2b05 100644 --- a/app/buck2_core/src/directory/path_accumulator.rs +++ b/app/buck2_directory/src/directory/path_accumulator.rs @@ -9,8 +9,8 @@ use std::fmt; -use crate::fs::paths::file_name::FileName; -use crate::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::file_name::FileNameBuf; /// Accumulate path components in reverse order. This is used to show the path where an issue /// occurred in Directory operations. diff --git a/app/buck2_directory/src/directory/shared_directory.rs b/app/buck2_directory/src/directory/shared_directory.rs new file mode 100644 index 0000000000000..a19237438171d --- /dev/null +++ b/app/buck2_directory/src/directory/shared_directory.rs @@ -0,0 +1,200 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::file_name::FileNameBuf; +use derivative::Derivative; +use derive_more::Display; +use dupe::Clone_; +use dupe::Dupe_; + +use crate::directory::builder::DirectoryBuilder; +use crate::directory::dashmap_directory_interner::DashMapDirectoryInterner; +use crate::directory::directory::Directory; +use crate::directory::directory_data::DirectoryData; +use crate::directory::directory_ref::DirectoryRef; +use crate::directory::directory_ref::FingerprintedDirectoryRef; +use crate::directory::entry::DirectoryEntry; +use crate::directory::fingerprinted_directory::FingerprintedDirectory; +use crate::directory::immutable_directory::ImmutableDirectory; +use crate::directory::macros::impl_fingerprinted_directory; + +pub type SharedDirectoryData = DirectoryData, L, H>; + +#[derive(Derivative, Display, Allocative)] +#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] +#[display("{}", self.data)] +pub struct SharedDirectoryInner +where + H: DirectoryDigest, +{ + pub(super) data: SharedDirectoryData, + + #[derivative(Debug = "ignore")] + pub(super) interner: DashMapDirectoryInterner, +} + +impl Drop for SharedDirectoryInner +where + H: DirectoryDigest, +{ + fn drop(&mut self) { + self.interner.dropped(&self.data) + } +} + +#[derive(Derivative, Clone_, Dupe_, Display, Allocative)] +#[derivative(Debug(bound = "L: ::std::fmt::Debug"))] +#[display("{}", self.inner)] +pub struct SharedDirectory +where + H: DirectoryDigest, +{ + pub(super) inner: Arc>, +} + +impl SharedDirectory +where + H: DirectoryDigest, +{ + pub fn as_immutable(self) -> ImmutableDirectory { + ImmutableDirectory::Shared(self) + } + + pub fn entries( + &self, + ) -> impl IntoIterator, L>)> + '_ + { + &self.inner.data.entries + } + + pub fn get<'a>( + &'a self, + needle: &'_ FileName, + ) -> Option, &'a L>> { + self.inner + .data + .entries + .get(needle) + .as_ref() + .map(|v| v.as_ref()) + } + + pub fn fingerprint(&self) -> &H { + self.inner.data.fingerprint() + } + + pub fn into_builder(self) -> DirectoryBuilder { + DirectoryBuilder::Immutable(self.as_immutable()) + } + + pub fn ptr_eq(&self, other: &SharedDirectory) -> bool { + Arc::ptr_eq(&self.inner, &other.inner) + } +} + +impl SharedDirectory +where + L: Clone, + H: DirectoryDigest, +{ + pub fn into_entries(self) -> C + where + C: FromIterator<(FileNameBuf, DirectoryEntry, L>)>, + { + self.entries() + .into_iter() + .map(|(k, v)| (k.clone(), v.clone().map_dir(|v| v.into_builder()))) + .collect() + } +} + +pub struct SharedDirectoryEntries<'a, L, H>( + sorted_vector_map::map::Iter<'a, FileNameBuf, DirectoryEntry, L>>, +) +where + H: DirectoryDigest; + +impl<'a, L, H> Iterator for SharedDirectoryEntries<'a, L, H> +where + H: DirectoryDigest, +{ + type Item = ( + &'a FileName, + DirectoryEntry<&'a SharedDirectory, &'a L>, + ); + + fn next(&mut self) -> Option { + let (name, entry) = self.0.next()?; + Some((name, entry.as_ref())) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl<'a, L, H> DirectoryRef<'a> for &'a SharedDirectory +where + H: DirectoryDigest, +{ + type Leaf = L; + type DirectoryDigest = H; + type Entries = SharedDirectoryEntries<'a, L, H>; + + fn get(self, name: &FileName) -> Option> { + self.get(name) + } + + fn entries(self) -> Self::Entries { + SharedDirectoryEntries(self.inner.data.entries.iter()) + } + + fn as_dyn(self) -> &'a dyn Directory { + self + } +} + +impl<'a, L, H> FingerprintedDirectoryRef<'a> for &'a SharedDirectory +where + H: DirectoryDigest, +{ + fn as_fingerprinted_dyn( + self, + ) -> &'a dyn FingerprintedDirectory { + self + } +} + +impl Directory for SharedDirectory +where + H: DirectoryDigest, +{ + type DirectoryRef<'a> = &'a SharedDirectory where Self: Sized + 'a; + + fn as_ref<'a>(&'a self) -> Self::DirectoryRef<'a> + where + Self: Sized + 'a, + { + self + } + + fn to_builder(&self) -> DirectoryBuilder + where + L: Clone, + { + self.clone().into_builder() + } +} + +impl_fingerprinted_directory!(SharedDirectory); diff --git a/app/buck2_directory/src/directory/test.rs b/app/buck2_directory/src/directory/test.rs new file mode 100644 index 0000000000000..47d1b1f0de5da --- /dev/null +++ b/app/buck2_directory/src/directory/test.rs @@ -0,0 +1,637 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(test)] + +use std::collections::hash_map::DefaultHasher; +use std::hash::Hash; +use std::hash::Hasher; + +use allocative::Allocative; +use assert_matches::assert_matches; +use buck2_core::directory_digest::DirectoryDigest; +use buck2_core::directory_digest::InternableDirectoryDigest; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use derive_more::Display; +use dupe::Dupe; + +use crate::directory::builder::DirectoryBuilder; +use crate::directory::builder::DirectoryInsertError; +use crate::directory::builder::DirectoryMergeError; +use crate::directory::builder::DirectoryMkdirError; +use crate::directory::dashmap_directory_interner::DashMapDirectoryInterner; +use crate::directory::directory::Directory; +use crate::directory::directory_hasher::DirectoryHasher; +use crate::directory::directory_hasher::NoDigest; +use crate::directory::directory_iterator::DirectoryIterator; +use crate::directory::directory_iterator::DirectoryIteratorPathStack; +use crate::directory::directory_ref::FingerprintedDirectoryRef; +use crate::directory::directory_selector::DirectorySearchError; +use crate::directory::directory_selector::DirectorySelector; +use crate::directory::entry::DirectoryEntry; +use crate::directory::exclusive_directory::ExclusiveDirectory; +use crate::directory::find::find; +use crate::directory::find::find_prefix; +use crate::directory::immutable_directory::ImmutableDirectory; +use crate::directory::shared_directory::SharedDirectory; +use crate::directory::walk::ordered_entry_walk; + +#[derive(Clone, Dupe, Debug, Eq, PartialEq, Hash)] +pub struct NopEntry; + +pub struct TestHasher; + +#[derive(Clone, Dupe, Debug, Eq, PartialEq, Hash, Allocative, Display)] +struct TestDigest(u64); + +impl DirectoryDigest for TestDigest {} + +impl InternableDirectoryDigest for TestDigest {} + +impl DirectoryHasher for TestHasher { + fn hash_entries<'a, D, I>(&self, entries: I) -> TestDigest + where + I: IntoIterator)>, + D: FingerprintedDirectoryRef<'a, Leaf = NopEntry, DirectoryDigest = TestDigest>, + { + let mut hasher = DefaultHasher::new(); + + let mut entries = entries + .into_iter() + .map(|(name, entry)| { + let entry = entry.map_dir(|d| d.as_fingerprinted_dyn().fingerprint()); + (name, entry) + }) + .collect::>(); + entries.sort_by_key(|(name, _)| *name); + + entries.hash(&mut hasher); + TestDigest(hasher.finish()) + } +} + +type TestDirectoryBuilder = DirectoryBuilder; +type NoHasherDirectoryBuilder = DirectoryBuilder; + +fn path<'a>(s: &'a str) -> &'a ForwardRelativePath { + ForwardRelativePath::unchecked_new(s) +} + +#[test] +fn test_insert() -> anyhow::Result<()> { + let mut b = NoHasherDirectoryBuilder::empty(); + + assert_matches!( + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry)), + Ok(None) + ); + + assert_matches!( + b.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry)), + Err(DirectoryInsertError::CannotTraverseLeaf { path }) => { + assert_eq!(path.to_string(), "a/b"); + } + ); + + assert_matches!( + b.insert(path("a"), DirectoryEntry::Leaf(NopEntry)), + Ok(Some(DirectoryEntry::Dir(..))) + ); + + Ok(()) +} + +#[test] +fn test_walk() -> anyhow::Result<()> { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + b.insert( + path("b"), + DirectoryEntry::Dir(TestDirectoryBuilder::empty()), + )?; + + { + let mut it = b.ordered_walk().with_paths(); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/b")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("b")) + ); + + assert_matches!(it.next(), None); + } + + { + let it = b.unordered_walk().with_paths(); + let mut collected = it.collect::>(); + collected.sort_by_key(|(name, _)| name.clone()); + let mut it = collected.into_iter(); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/b")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("b")) + ); + + assert_matches!(it.next(), None); + } + + Ok(()) +} + +#[test] +fn test_merge() -> anyhow::Result<()> { + let mut a = TestDirectoryBuilder::empty(); + a.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/c"), DirectoryEntry::Leaf(NopEntry))?; + + a.merge(b)?; + + let mut it = a.ordered_walk().with_paths(); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/b")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/c")) + ); + + assert_matches!(it.next(), None); + + Ok(()) +} + +#[test] +fn test_merge_overwrite() -> anyhow::Result<()> { + let mut a = TestDirectoryBuilder::empty(); + a.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a"), DirectoryEntry::Leaf(NopEntry))?; + + a.merge(b)?; + + Ok(()) +} + +#[test] +fn test_merge_conflict() -> anyhow::Result<()> { + let mut a = TestDirectoryBuilder::empty(); + a.insert(path("a"), DirectoryEntry::Leaf(NopEntry))?; + + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + + assert_matches!( + a.merge(b), + Err(DirectoryMergeError::CannotTraverseLeaf { path }) => { + assert_eq!(path.to_string(), "a"); + } + ); + + Ok(()) +} + +#[test] +fn test_copy_on_write() -> anyhow::Result<()> { + let empty = TestDirectoryBuilder::empty().fingerprint(&TestHasher); + + let mut a = TestDirectoryBuilder::empty(); + a.insert(path("a"), DirectoryEntry::Dir(empty.into_builder()))?; + + a.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + + let mut it = a.ordered_walk().with_paths(); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/b")) + ); + + Ok(()) +} + +#[test] +fn test_find() -> anyhow::Result<()> { + let mut a = TestDirectoryBuilder::empty(); + a.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry))?; + + assert_matches!( + find(a.as_ref(), path("a/b/c")), + Ok(Some(DirectoryEntry::Leaf(..))) + ); + + assert_matches!( + find(a.as_ref(), path("a/b")), + Ok(Some(DirectoryEntry::Dir(..))) + ); + + assert_matches!( + find(a.as_ref(), path("")), + Ok(Some(DirectoryEntry::Dir(..))) + ); + + Ok(()) +} + +#[test] +fn test_find_prefix() -> anyhow::Result<()> { + let mut a = TestDirectoryBuilder::empty(); + a.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry))?; + + assert_matches!( + find_prefix(a.as_ref(), path("a/b/c")), + Ok(Some(( + DirectoryEntry::Leaf(..), + path + ))) if path.is_empty() + ); + assert_matches!( + find_prefix(a.as_ref(), path("a/b")), + Ok(Some(( + DirectoryEntry::Dir(..), + path + ))) if path.is_empty() + ); + + assert_matches!( + find_prefix(a.as_ref(), path("a/b/c/d")), + Ok(Some((DirectoryEntry::Leaf(..), rest))) => { + assert_eq!(rest, path("d")); + } + ); + assert_matches!( + find_prefix(a.as_ref(), path("a/b/c/d/e")), + Ok(Some((DirectoryEntry::Leaf(..), rest))) => { + assert_eq!(rest, path("d/e")); + } + ); + + Ok(()) +} + +#[test] +fn test_search() -> anyhow::Result<()> { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("b/c"), DirectoryEntry::Leaf(NopEntry))?; + let d = b.fingerprint(&TestHasher); + + { + let mut selector = DirectorySelector::empty(); + selector.select(path("a/b")); + + let mut it = selector.ordered_search(&d).with_paths(); + + assert_matches!( + it.next(), + Some((p, Ok(DirectoryEntry::Leaf(..)))) => assert_eq!(p, path("a/b")) + ); + assert_matches!(it.next(), None) + } + + { + let mut selector = DirectorySelector::empty(); + selector.select(path("a/b/c")); + selector.select(path("b/c")); + + let mut it = selector.ordered_search(&d).with_paths(); + + assert_matches!( + it.next(), + Some((p, Err(DirectorySearchError::CannotTraverseLeaf { .. }))) => assert_eq!(p, path("a/b")) + ); + assert_matches!( + it.next(), + Some((p, Ok(DirectoryEntry::Leaf(..)))) => assert_eq!(p, path("b/c")) + ); + assert_matches!(it.next(), None) + } + + { + let mut selector = DirectorySelector::empty(); + selector.select(path("a")); + + let mut it = selector.ordered_search(&d).with_paths(); + assert_matches!( + it.next(), + Some((p, Ok(DirectoryEntry::Dir(..)))) => assert_eq!(p, path("a")) + ); + } + + Ok(()) +} + +#[test] +fn test_filter() -> anyhow::Result<()> { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/aa"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("a/a"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("b/b"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("b/bb"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("c/c"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("c/cc"), DirectoryEntry::Leaf(NopEntry))?; + + let mut selector = DirectorySelector::empty(); + selector.select(path("a")); + selector.select(path("b/b")); + + selector.filter(&mut b)?; + + let mut it = b.ordered_walk().with_paths(); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/a")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/aa")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("b")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("b/b")) + ); + + assert_matches!(it.next(), None); + + Ok(()) +} + +#[test] +fn test_entry_walk() { + { + let e = DirectoryEntry::::Leaf(NopEntry); + let mut it = ordered_entry_walk(e.as_ref().map_dir(|d| d.as_ref())); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p.get(), path("")) + ); + + assert_matches!(it.next(), None); + } + + { + let e = DirectoryEntry::<_, NopEntry>::Dir(TestDirectoryBuilder::empty()); + let mut it = ordered_entry_walk(e.as_ref().map_dir(|d| d.as_ref())); + + assert_matches!(it.next(), None); + } +} + +#[test] +fn test_bounds() { + fn assert_impls_debug() {} + fn assert_impls_clone() {} + fn assert_impls_eq() {} + + assert_impls_debug::(); + assert_impls_clone::(); + assert_impls_eq::, NopEntry>>(); + assert_impls_eq::, NopEntry>>(); + assert_impls_eq::, NopEntry>>(); +} + +#[test] +fn test_mkdir() -> anyhow::Result<()> { + let mut b = TestDirectoryBuilder::empty(); + b.mkdir(path("foo/bar"))?; + b.mkdir(path("foo"))?; + + let mut it = b.ordered_walk().with_paths(); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("foo")) + ); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("foo/bar")) + ); + + assert_matches!(it.next(), None); + + Ok(()) +} + +#[test] +fn test_mkdir_overwrite() -> anyhow::Result<()> { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + + assert_matches!( + b.mkdir(path("a/b/c")), + Err(DirectoryMkdirError::CannotTraverseLeaf { path }) => { + assert_eq!(path.to_string(), "a/b"); + } + ); + + Ok(()) +} + +#[test] +fn test_directory_interner() -> anyhow::Result<()> { + let interner = DashMapDirectoryInterner::new(); + + let d1 = { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + b.fingerprint(&TestHasher).shared(&interner) + }; + + let d2 = { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + b.fingerprint(&TestHasher).shared(&interner) + }; + + assert!(d1.ptr_eq(&d2)); + + assert_eq!(interner.len(), 2); + + drop(d1); + assert_eq!(interner.len(), 2); + + drop(d2); + assert_eq!(interner.len(), 0); + + Ok(()) +} + +#[test] +fn test_directory_interner_deep() -> anyhow::Result<()> { + let interner = DashMapDirectoryInterner::new(); + + let d1 = { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry))?; + b.fingerprint(&TestHasher).shared(&interner) + }; + + let _d2 = { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("b"), DirectoryEntry::Leaf(NopEntry))?; + b.fingerprint(&TestHasher).shared(&interner) + }; + + assert_eq!(interner.len(), 2); + + drop(d1); + + // Now we only have d2. + assert_eq!(interner.len(), 1); + + Ok(()) +} + +#[test] +fn test_filter_continues_on_error() -> anyhow::Result<()> { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/aa/aaa"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("a/aa/bbb"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("a/bb"), DirectoryEntry::Leaf(NopEntry))?; + b.insert(path("c"), DirectoryEntry::Leaf(NopEntry))?; + + let mut selector = DirectorySelector::empty(); + selector.select(path("a/aa")); + selector.select(path("c/d")); + + assert_matches!(selector.filter(&mut b), Err(..)); + + let mut it = b.ordered_walk().with_paths(); + + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/aa")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/aa/aaa")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("a/aa/bbb")) + ); + assert_matches!( + it.next(), + Some((p, _)) => assert_eq!(p, path("c")) + ); + + assert_matches!(it.next(), None); + + Ok(()) +} + +#[test] +fn test_remove_prefix_empty() { + let mut b = TestDirectoryBuilder::empty(); + assert_eq!( + Vec::::new(), + b.ordered_walk_leaves().paths().collect::>() + ); + b.remove_prefix(path("")).unwrap(); + b.remove_prefix(path("a")).unwrap(); + b.remove_prefix(path("b/c")).unwrap(); + assert_eq!( + Vec::::new(), + b.ordered_walk_leaves().paths().collect::>() + ); +} + +#[test] +fn test_remove_prefix_error() { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry)) + .unwrap(); + assert!(b.remove_prefix(path("a/b/c")).is_err()); + assert_eq!( + vec![path("a/b")], + b.ordered_walk_leaves().paths().collect::>() + ); +} + +#[test] +fn test_remove_prefix_leaf() { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b"), DirectoryEntry::Leaf(NopEntry)) + .unwrap(); + b.insert(path("a/x"), DirectoryEntry::Leaf(NopEntry)) + .unwrap(); + b.remove_prefix(path("a/b")).unwrap(); + assert_eq!( + vec![path("a/x")], + b.ordered_walk_leaves().paths().collect::>() + ); +} + +#[test] +fn test_remove_prefix_tree() { + let mut b = TestDirectoryBuilder::empty(); + b.insert(path("a/b/c"), DirectoryEntry::Leaf(NopEntry)) + .unwrap(); + b.insert(path("a/b/d"), DirectoryEntry::Leaf(NopEntry)) + .unwrap(); + b.insert(path("a/x"), DirectoryEntry::Leaf(NopEntry)) + .unwrap(); + b.remove_prefix(path("a/b")).unwrap(); + assert_eq!( + vec![path("a/x")], + b.ordered_walk_leaves().paths().collect::>() + ); +} diff --git a/app/buck2_directory/src/directory/walk.rs b/app/buck2_directory/src/directory/walk.rs new file mode 100644 index 0000000000000..fd49376c4d907 --- /dev/null +++ b/app/buck2_directory/src/directory/walk.rs @@ -0,0 +1,214 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::iter; +use std::marker::PhantomData; +use std::vec; + +use buck2_core::fs::paths::file_name::FileName; +use either::Either; + +use crate::directory::directory_iterator::DirectoryIterator; +use crate::directory::directory_iterator::DirectoryIteratorPathAccessor; +use crate::directory::directory_iterator::DirectoryIteratorPathStack; +use crate::directory::directory_ref::DirectoryRef; +use crate::directory::entry::DirectoryEntry; + +pub trait WalkType<'a> { + type Leaf: 'a; + type Directory: DirectoryRef<'a, Leaf = Self::Leaf>; + type Entries: Iterator< + Item = ( + &'a FileName, + DirectoryEntry, + ), + >; + + fn directory_entries(directory: Self::Directory) -> Self::Entries; +} + +pub(crate) struct WalkFrame<'a, T: WalkType<'a>> { + name: Option<&'a FileName>, + entries: T::Entries, + _phantom: PhantomData, +} + +impl<'a, T: WalkType<'a>> WalkFrame<'a, T> { + pub(crate) fn new(entries: T::Entries) -> Self { + Self { + name: None, + entries, + _phantom: PhantomData, + } + } +} + +pub struct Walk<'a, T: WalkType<'a>> { + pub(crate) stack: Vec>, +} + +impl<'a, T: WalkType<'a>> Walk<'a, T> { + pub(crate) fn new(directory: T::Directory) -> Self { + Walk { + stack: vec![WalkFrame::new(T::directory_entries(directory))], + } + } +} + +impl<'a, T: WalkType<'a>> DirectoryIteratorPathStack for Walk<'a, T> { + fn path(&self) -> impl Iterator { + self.stack.iter().filter_map(|frame| frame.name) + } +} + +impl<'a, T: WalkType<'a>> DirectoryIterator for Walk<'a, T> { + type PathStack<'b> = DirectoryIteratorPathAccessor<'b, Self> where Self: 'b; + type Item = DirectoryEntry; + + fn next<'b>(&'b mut self) -> Option<(DirectoryIteratorPathAccessor<'b, Self>, Self::Item)> { + loop { + let frame = self.stack.last_mut()?; + + if let Some((name, entry)) = frame.entries.next() { + let leaf_name = match entry { + DirectoryEntry::Dir(dir) => { + self.stack.push(WalkFrame { + name: Some(name), + entries: T::Entries::from(T::directory_entries(dir)), + _phantom: PhantomData, + }); + None + } + DirectoryEntry::Leaf(..) => Some(name), + }; + + return Some(( + DirectoryIteratorPathAccessor { + leaf: leaf_name, + stack: self, + }, + entry, + )); + } + + self.stack.pop(); + } + } +} + +fn entry_walk_impl<'a, T: WalkType<'a>>( + entry: DirectoryEntry, +) -> DirectoryEntryWalk<'a, T::Leaf, Walk<'a, T>> { + match entry { + DirectoryEntry::Dir(d) => DirectoryEntryWalk::Dir { + inner: Walk::::new(d), + }, + DirectoryEntry::Leaf(d) => DirectoryEntryWalk::Leaf { entry: Some(d) }, + } +} + +pub type UnorderedDirectoryWalk<'a, D> = Walk<'a, UnorderedDirectoryWalkType<'a, D>>; +pub type OrderedDirectoryWalk<'a, D> = Walk<'a, OrderedDirectoryWalkType<'a, D>>; + +pub fn unordered_entry_walk<'a, D: DirectoryRef<'a>>( + entry: DirectoryEntry, +) -> DirectoryEntryWalk<'a, D::Leaf, UnorderedDirectoryWalk<'a, D>> { + entry_walk_impl::>(entry) +} + +pub fn ordered_entry_walk<'a, D: DirectoryRef<'a>>( + entry: DirectoryEntry, +) -> DirectoryEntryWalk<'a, D::Leaf, OrderedDirectoryWalk<'a, D>> { + entry_walk_impl::>(entry) +} + +pub enum DirectoryEntryWalk<'a, L, I> { + Dir { inner: I }, + Leaf { entry: Option<&'a L> }, +} + +impl<'a, D, L, I> DirectoryIterator for DirectoryEntryWalk<'a, L, I> +where + I: DirectoryIterator>, + D: 'a, +{ + type PathStack<'b> = DirectoryEntryWalkPathAccessor<::PathStack<'b>> + where + Self: 'b; + + type Item = DirectoryEntry; + + fn next<'this>( + &'this mut self, + ) -> Option<( + DirectoryEntryWalkPathAccessor<::PathStack<'this>>, + DirectoryEntry, + )> { + match self { + DirectoryEntryWalk::Dir { inner } => { + let (accessor, item) = inner.next()?; + Some(( + DirectoryEntryWalkPathAccessor { + inner: Some(accessor), + }, + item, + )) + } + DirectoryEntryWalk::Leaf { entry } => { + let entry = entry.take()?; + Some(( + DirectoryEntryWalkPathAccessor { inner: None }, + DirectoryEntry::Leaf(entry), + )) + } + } + } +} + +#[derive(Debug)] +pub struct DirectoryEntryWalkPathAccessor { + inner: Option, +} + +impl DirectoryIteratorPathStack for DirectoryEntryWalkPathAccessor +where + T: DirectoryIteratorPathStack, +{ + fn path(&self) -> impl Iterator { + match &self.inner { + Some(inner) => Either::Left(inner.path()), + None => Either::Right(iter::empty()), + } + } +} + +pub struct UnorderedDirectoryWalkType<'a, D: DirectoryRef<'a>>(PhantomData<&'a D>); +pub struct OrderedDirectoryWalkType<'a, D: DirectoryRef<'a>>(PhantomData<&'a D>); + +impl<'a, D: DirectoryRef<'a>> WalkType<'a> for UnorderedDirectoryWalkType<'a, D> { + type Leaf = D::Leaf; + type Directory = D; + type Entries = D::Entries; + + fn directory_entries(directory: Self::Directory) -> Self::Entries { + directory.entries() + } +} + +impl<'a, D: DirectoryRef<'a>> WalkType<'a> for OrderedDirectoryWalkType<'a, D> { + type Leaf = D::Leaf; + type Directory = D; + type Entries = vec::IntoIter<(&'a FileName, DirectoryEntry)>; + + fn directory_entries(directory: Self::Directory) -> Self::Entries { + let mut entries = Vec::from_iter(directory.entries()); + entries.sort_by_key(|(name, _)| *name); + entries.into_iter() + } +} diff --git a/app/buck2_directory/src/lib.rs b/app/buck2_directory/src/lib.rs new file mode 100644 index 0000000000000..24e66e670b232 --- /dev/null +++ b/app/buck2_directory/src/lib.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] +#![feature(impl_trait_in_assoc_type)] +#![feature(never_type)] + +pub mod directory; diff --git a/app/buck2_downward_api/BUCK b/app/buck2_downward_api/BUCK index 4b3774452ca03..c184d0a1f5806 100644 --- a/app/buck2_downward_api/BUCK +++ b/app/buck2_downward_api/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/app/buck2_downward_api/Cargo.toml b/app/buck2_downward_api/Cargo.toml index 79fed761713e2..92b027f26ffaa 100644 --- a/app/buck2_downward_api/Cargo.toml +++ b/app/buck2_downward_api/Cargo.toml @@ -1,7 +1,9 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_downward_api" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } diff --git a/app/buck2_downward_api/src/lib.rs b/app/buck2_downward_api/src/lib.rs index c2970971a5336..ac708dc68b90b 100644 --- a/app/buck2_downward_api/src/lib.rs +++ b/app/buck2_downward_api/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! The downward api for external processes. This crate defines a trait of downward api that Buck //! will need to handle as the process runner. diff --git a/app/buck2_downward_api_proto/BUCK b/app/buck2_downward_api_proto/BUCK index ed4d970ef8a51..2dd15f3e12824 100644 --- a/app/buck2_downward_api_proto/BUCK +++ b/app/buck2_downward_api_proto/BUCK @@ -1,5 +1,4 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -7,7 +6,6 @@ rust_protobuf_library( name = "buck2_downward_api_proto", srcs = glob(["src/**/*.rs"]), build_script = "build.rs", - doctests = False, # FIXME protos = ["downward_api.proto"], deps = [ "fbsource//third-party/rust:anyhow", diff --git a/app/buck2_downward_api_proto/Cargo.toml b/app/buck2_downward_api_proto/Cargo.toml index 1ab83427443a5..e6aa731f3326d 100644 --- a/app/buck2_downward_api_proto/Cargo.toml +++ b/app/buck2_downward_api_proto/Cargo.toml @@ -2,13 +2,14 @@ name = "buck2_downward_api_proto" edition = "2021" +license = { workspace = true } +repository = { workspace = true } version = "0.1.0" [dependencies] +anyhow = { workspace = true } prost = { workspace = true } -prost-types = { workspace = true } tonic = { workspace = true } -anyhow = { workspace = true } tracing = { workspace = true } [build-dependencies] diff --git a/app/buck2_downward_api_proto/src/convert.rs b/app/buck2_downward_api_proto/src/convert.rs index e75e7662c1e96..98c772f5c5c48 100644 --- a/app/buck2_downward_api_proto/src/convert.rs +++ b/app/buck2_downward_api_proto/src/convert.rs @@ -31,7 +31,7 @@ impl TryInto> for proto::Event { e.insert(value); } Entry::Occupied(e) => { - anyhow::bail!("Duplicate key: {}", e.key()); + return Err(anyhow::anyhow!("Duplicate key: {}", e.key())); } } } @@ -66,7 +66,7 @@ impl TryInto for proto::LogLevel { let value = Value::from_i32(value).context("Invalid `value`")?; Ok(match value { - Value::NotSet => anyhow::bail!("Missing `value`"), + Value::NotSet => return Err(anyhow::anyhow!("Missing `value`")), Value::Trace => Level::TRACE, Value::Debug => Level::DEBUG, Value::Info => Level::INFO, @@ -88,7 +88,7 @@ impl TryFrom for proto::LogLevel { v if v == Level::INFO => Value::Info, v if v == Level::WARN => Value::Warn, v if v == Level::ERROR => Value::Error, - v => anyhow::bail!("Unsupported Level: {:?}", v), + v => return Err(anyhow::anyhow!("Unsupported Level: {:?}", v)), }; Ok(proto::LogLevel { diff --git a/app/buck2_downward_api_proto/src/lib.rs b/app/buck2_downward_api_proto/src/lib.rs index cabbb0369ce83..e0383f640ee13 100644 --- a/app/buck2_downward_api_proto/src/lib.rs +++ b/app/buck2_downward_api_proto/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! Protobufs for ineteracting with Buck's DownwardApi over GPRC. This isn't the protocol Buck v1 //! speaks, where the DownwardApi is accessed over named pipes with serialized JSON payloads. This //! is a different way to make the same calls. diff --git a/app/buck2_eden/BUCK b/app/buck2_eden/BUCK index 6758c29cd1b83..72ec90ce2d6bb 100644 --- a/app/buck2_eden/BUCK +++ b/app/buck2_eden/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -7,29 +6,28 @@ rust_library( name = "buck2_eden", srcs = glob(["src/**/*.rs"]), named_deps = { - "edenfs": "//eden/fs/service:thrift-rust", - "fb303_core": "//fb303/thrift:fb303_core-rust", + # @oss-disable: "edenfs": "//eden/fs/service:thrift-rust", + # @oss-disable: "edenfs_clients": "//eden/fs/service:thrift-rust-clients", }, - test_deps = [ - ], deps = [ - "fbsource//third-party/rust:anyhow", - "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:compact_str", - "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:libc", - "fbsource//third-party/rust:parking_lot", - "fbsource//third-party/rust:serde", - "fbsource//third-party/rust:thiserror", - "fbsource//third-party/rust:tokio", - "fbsource//third-party/rust:toml", - "fbsource//third-party/rust:tracing", - "//buck2/allocative/allocative:allocative", - "//buck2/app/buck2_common:buck2_common", - "//buck2/app/buck2_core:buck2_core", - "//buck2/gazebo/dupe:dupe", - "//common/rust/shed/fbinit:fbinit", - "//common/rust/shed/sorted_vector_map:sorted_vector_map", - "//common/rust/thrift/bareclient:thriftclient", + # @oss-disable: "fbsource//third-party/rust:anyhow", + # @oss-disable: "fbsource//third-party/rust:async-trait", + # @oss-disable: "fbsource//third-party/rust:compact_str", + # @oss-disable: "fbsource//third-party/rust:futures", + # @oss-disable: "fbsource//third-party/rust:libc", + # @oss-disable: "fbsource//third-party/rust:parking_lot", + # @oss-disable: "fbsource//third-party/rust:serde", + # @oss-disable: "fbsource//third-party/rust:tokio", + # @oss-disable: "fbsource//third-party/rust:toml", + # @oss-disable: "fbsource//third-party/rust:tracing", + # @oss-disable: "//buck2/allocative/allocative:allocative", + # @oss-disable: "//buck2/app/buck2_common:buck2_common", + # @oss-disable: "//buck2/app/buck2_core:buck2_core", + # @oss-disable: "//buck2/app/buck2_error:buck2_error", + # @oss-disable: "//buck2/gazebo/dupe:dupe", + # @oss-disable: "//common/rust/shed/fbinit:fbinit", + # @oss-disable: "//common/rust/shed/sorted_vector_map:sorted_vector_map", + # @oss-disable: "//common/rust/thrift/bareclient:thriftclient", + # @oss-disable: "//fb303/thrift:fb303_core-rust-clients", ], ) diff --git a/app/buck2_eden/Cargo.toml b/app/buck2_eden/Cargo.toml index aa0f23c601b8e..588c6f3ca441c 100644 --- a/app/buck2_eden/Cargo.toml +++ b/app/buck2_eden/Cargo.toml @@ -1,30 +1,11 @@ [package] +authors = ["Meta"] +description = "Eden integration for Buck2" +edition = "2021" +license = { workspace = true } name = "buck2_eden" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Eden integration for Buck2" -license = "MIT OR Apache-2.0" -authors = ["Meta"] - -[dependencies] -anyhow = { workspace = true } -allocative = { workspace = true } -async-trait = { workspace = true } -buck2_common = { workspace = true } -buck2_core = { workspace = true } -compact_str = { workspace = true } -dupe = { workspace = true } -# @oss-disable: edenfs = { package = "thrift", path = "../../../eden/fs/service" } -# @oss-disable: fb303_core = { package = "fb303_core", path = "../../../fb303/thrift" } -fbinit = { workspace = true } -futures = { workspace = true } -libc = { workspace = true } -parking_lot = { workspace = true } -serde = { workspace = true } -sorted_vector_map = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true } -toml = { workspace = true } -tracing = { workspace = true } -[dev-dependencies] +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_eden/src/connection.rs b/app/buck2_eden/src/connection.rs index 81ae59cffa799..b575f613f3766 100644 --- a/app/buck2_eden/src/connection.rs +++ b/app/buck2_eden/src/connection.rs @@ -18,23 +18,26 @@ use std::time::Duration; use allocative::Allocative; use anyhow::Context as _; -use buck2_common::result::SharedResult; use buck2_core; use buck2_core::fs::fs_util; -use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_path::AbsPath; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_error::ErrorTag; use dupe::Dupe; -use edenfs::client::EdenService; -use edenfs::errors::eden_service::ListMountsError; -use edenfs::types::BinaryHash; -use edenfs::types::EdenErrorType; -use edenfs::types::FileAttributeData; -use edenfs::types::FileAttributeDataOrErrorV2; -use edenfs::types::FileAttributeDataV2; -use edenfs::types::MountState; -use edenfs::types::PathString; -use edenfs::types::SourceControlType; -use fb303_core::client::BaseService; +use edenfs::BinaryHash; +use edenfs::EdenErrorType; +use edenfs::FileAttributeData; +use edenfs::FileAttributeDataOrErrorV2; +use edenfs::FileAttributeDataV2; +use edenfs::MountState; +use edenfs::PathString; +use edenfs::SourceControlType; +use edenfs_clients::errors::ListMountsError; +use edenfs_clients::EdenService; +use fb303_core_clients::BaseService; use fbinit::FacebookInit; use futures::future::BoxFuture; use futures::future::Future; @@ -43,7 +46,6 @@ use futures::future::Shared; use parking_lot::Mutex; use serde::Deserialize; use sorted_vector_map::SortedVectorMap; -use thiserror::Error; use tokio::sync::Semaphore; #[derive(Allocative)] @@ -57,6 +59,8 @@ pub struct EdenConnectionManager { /// to run or not. #[allocative(skip)] semaphore: Semaphore, + /// The project root, relative to the eden mount point + project_root: ForwardRelativePathBuf, } #[derive(Deserialize, Debug)] @@ -71,17 +75,32 @@ struct EdenConfig { config: Config, } +#[derive(Allocative)] +struct EdenMountPoint(AbsPathBuf); + impl EdenConnectionManager { pub fn new( fb: FacebookInit, - root: &AbsNormPath, + project_root: &ProjectRoot, semaphore: Semaphore, ) -> anyhow::Result> { - let eden_root = root.as_abs_path().join(".eden"); - if !eden_root.exists() { + let dot_eden_dir = project_root.root().as_abs_path().join(".eden"); + if !dot_eden_dir.exists() { return Ok(None); } - let connector = Self::get_eden_connector(fb, &eden_root)?; + let connector = Self::get_eden_connector(fb, &dot_eden_dir)?; + + let canon_project_root = fs_util::canonicalize(project_root.root())?; + let canon_eden_mount = fs_util::canonicalize(&connector.mount.0)?; + + let rel_project_root = canon_project_root + .strip_prefix(&canon_eden_mount) + .with_context(|| { + format!( + "Eden root {} was not a prefix of the project root {}", + canon_eden_mount, canon_project_root + ) + })?; let connection = Mutex::new(EdenConnection { epoch: 0, @@ -92,35 +111,49 @@ impl EdenConnectionManager { connector, connection, semaphore, + project_root: rel_project_root.into_owned(), })) } - fn get_eden_connector(fb: FacebookInit, eden_root: &AbsPath) -> anyhow::Result { + fn get_eden_connector( + fb: FacebookInit, + dot_eden_dir: &AbsPath, + ) -> anyhow::Result { // Based off of how watchman picks up the config: fbcode/watchman/watcher/eden.cpp:138 if cfg!(windows) { - let config_path = eden_root.join("config"); + let config_path = dot_eden_dir.join("config"); let config_contents = fs_util::read_to_string(config_path)?; let config: EdenConfig = toml::from_str(&config_contents)?; - let root = Arc::new(config.config.root); - let socket = PathBuf::from(config.config.socket); - Ok(EdenConnector { fb, root, socket }) + let mount = Arc::new(EdenMountPoint(AbsPathBuf::new(config.config.root)?)); + let socket = AbsPathBuf::new(PathBuf::from(config.config.socket))?; + Ok(EdenConnector { fb, mount, socket }) } else { - let root = fs_util::read_link(eden_root.join("root"))? - .to_str() - .context("Eden root is not UTF-8")? - .to_owned(); - let root = Arc::new(root); - let socket = fs_util::read_link(eden_root.join("socket"))?; - Ok(EdenConnector { fb, root, socket }) + let mount = fs_util::read_link(dot_eden_dir.join("root"))?; + let mount = Arc::new(EdenMountPoint(AbsPathBuf::new(mount)?)); + let socket = AbsPathBuf::new(fs_util::read_link(dot_eden_dir.join("socket"))?)?; + Ok(EdenConnector { fb, mount, socket }) } } - pub fn get_root(&self) -> &str { - &self.connector.root + pub fn get_mount_point(&self) -> Vec { + self.connector + .mount + .0 + .as_path() + .as_os_str() + .as_encoded_bytes() + .to_vec() } - pub fn get_mount_point(&self) -> Vec { - self.connector.root.as_bytes().to_vec() + /// Converts project relative paths to values that are suitable for passing to Eden requests + pub fn project_paths_as_eden_paths<'a>( + &self, + paths: impl IntoIterator, + ) -> Vec> { + paths + .into_iter() + .map(|p| self.project_root.join(p).to_string().into_bytes()) + .collect() } /// Returns a string like "20220102-030405", assuming this is a release version. This is @@ -209,7 +242,7 @@ impl EdenConnectionManager { /// A (potentially pending) Eden client. type EdenClientFuture = - Shared>>>; + Shared>>>; /// An Eden client and an epoch to keep track of reconnections. #[derive(Clone, Allocative)] @@ -226,76 +259,73 @@ struct EdenConnection { struct EdenConnector { #[allocative(skip)] fb: FacebookInit, - root: Arc, - socket: PathBuf, + mount: Arc, + socket: AbsPathBuf, +} + +fn thrift_builder( + fb: FacebookInit, + socket: &AbsPathBuf, +) -> anyhow::Result<::thriftclient::ThriftChannelBuilder> { + // NOTE: This timeout is absurdly high, but bear in mind that what we're + // "comparing" to is a FS call that has no timeouts at all. + const THRIFT_TIMEOUT_MS: u32 = 120_000; + + Ok( + ::thriftclient::ThriftChannelBuilder::from_path(fb, socket.as_path())? + .with_conn_timeout(THRIFT_TIMEOUT_MS) + .with_recv_timeout(THRIFT_TIMEOUT_MS) + .with_secure(false), + ) } impl EdenConnector { fn connect(&self) -> EdenClientFuture { let socket = self.socket.clone(); let fb = self.fb; - let root = self.root.dupe(); + let mount = self.mount.dupe(); tokio::task::spawn(async move { tracing::info!("Creating a new Eden connection via `{}`", socket.display()); - let eden: anyhow::Result>; + let eden: Arc = thrift_builder(fb, &socket)? + .build_client(::edenfs_clients::make_EdenService) + .context("Error constructing Eden client")?; - #[cfg(fbcode_build)] - { - eden = fbcode::thrift_builder(fb, socket)? - .build_client(::edenfs::client::make_EdenService); - } - - #[cfg(not(fbcode_build))] - { - let _ignored = fb; - let _ignored = socket; - eden = Err(anyhow::anyhow!("Eden I/O is not available in Cargo builds")) - } - - let eden = eden.context("Error constructing Eden client")?; - - wait_until_mount_is_ready(eden.as_ref(), &root).await?; + wait_until_mount_is_ready(eden.as_ref(), &mount).await?; Ok(eden) }) .map(|r| match r { Ok(r) => r, - Err(e) => Err(e.into()), // Turn the JoinError into a SharedError. + Err(e) => Err(e.into()), // Turn the JoinError into a buck2_error::Error. }) .boxed() .shared() } - #[cfg(fbcode_build)] fn connect_fb303(&self) -> anyhow::Result> { - fbcode::thrift_builder(self.fb, &self.socket)? - .build_client(::fb303_core::client::make_BaseService) - } - - #[cfg(not(fbcode_build))] - fn connect_fb303(&self) -> anyhow::Result> { - Err(anyhow::anyhow!("Eden I/O is not available in Cargo builds")) + thrift_builder(self.fb, &self.socket)?.build_client(::fb303_core_clients::make_BaseService) } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(tag = IoEdenMountNotReady)] #[error("Mount never became ready: `{}`", self.mount)] struct MountNeverBecameReady { - mount: Arc, + mount: AbsPathBuf, } /// Delay until a mount becomes ready (up to 10 seconds). async fn wait_until_mount_is_ready( eden: &(dyn EdenService + Send + Sync), - root: &Arc, + mount: &EdenMountPoint, ) -> anyhow::Result<()> { let mut interval = tokio::time::interval(Duration::from_secs(1)); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); for _ in 0..10 { interval.tick().await; - match is_mount_ready(eden, root).await { + match is_mount_ready(eden, mount).await { Ok(true) => return Ok(()), Ok(false) => { // Fallthrough to keep going @@ -307,42 +337,50 @@ async fn wait_until_mount_is_ready( } } - Err(MountNeverBecameReady { mount: root.dupe() }.into()) + Err(MountNeverBecameReady { + mount: mount.0.clone(), + } + .into()) } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum IsMountReadyError { #[error("Mount does not exist in Eden: `{}`", .mount)] - MountDoesNotExist { mount: Arc }, + #[buck2(tag = IoEdenMountDoesNotExist)] + MountDoesNotExist { mount: AbsPathBuf }, #[error(transparent)] + #[buck2(tag = IoEdenRequestError)] RequestError(ListMountsError), } /// Check if a given mount is ready. async fn is_mount_ready( eden: &(dyn EdenService + Send + Sync), - root: &Arc, + mount: &EdenMountPoint, ) -> Result { let mounts = eden .listMounts() .await .map_err(IsMountReadyError::RequestError)?; - for mount in mounts { - if mount.mountPoint == root.as_bytes() { - return Ok(mount.state == MountState::RUNNING); + for candidate in mounts { + if candidate.mountPoint == mount.0.as_path().as_os_str().as_encoded_bytes() { + return Ok(candidate.state == MountState::RUNNING); } } - Err(IsMountReadyError::MountDoesNotExist { mount: root.dupe() }) + Err(IsMountReadyError::MountDoesNotExist { + mount: mount.0.clone(), + }) } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum ConnectAndRequestError { #[error(transparent)] + #[buck2(tag = IoEdenConnectionError)] ConnectionError(anyhow::Error), - #[error(transparent)] + #[buck2(tag = IoEdenRequestError)] RequestError(E), } @@ -377,7 +415,7 @@ impl HasErrorHandlingStrategy for IsMountReadyError { macro_rules! impl_has_error_handling_strategy { ($err: ident) => { - impl HasErrorHandlingStrategy for ::edenfs::errors::eden_service::$err { + impl HasErrorHandlingStrategy for ::edenfs_clients::errors::$err { fn error_handling_strategy(&self) -> ErrorHandlingStrategy { match self { Self::ThriftError(..) => ErrorHandlingStrategy::Reconnect, @@ -400,23 +438,52 @@ impl_has_error_handling_strategy!(EnsureMaterializedError); impl_has_error_handling_strategy!(ReaddirError); impl_has_error_handling_strategy!(GetSHA1Error); -#[derive(Debug, Error)] +fn eden_error_kind_tag(e: &EdenError) -> Option { + let tag = match e { + EdenError::PosixError { code, .. } => match *code { + libc::ENOENT => ErrorTag::IoNotFound, + libc::EACCES | libc::EPERM => ErrorTag::IoPermissionDenied, + libc::ETIMEDOUT => ErrorTag::IoTimeout, + libc::EBUSY => ErrorTag::IoExecutableFileBusy, + libc::EPIPE => ErrorTag::IoBrokenPipe, + libc::ENOSPC => ErrorTag::IoStorageFull, + libc::ECONNABORTED => ErrorTag::IoConnectionAborted, + libc::ENOTCONN => ErrorTag::IoNotConnected, + _ => return None, + }, + EdenError::ServiceError { error } => match error.errorType { + EdenErrorType::WIN32_ERROR => ErrorTag::IoEdenWin32Error, + EdenErrorType::HRESULT_ERROR => ErrorTag::IoEdenHresultError, + EdenErrorType::ARGUMENT_ERROR => ErrorTag::IoEdenArgumentError, + EdenErrorType::GENERIC_ERROR => ErrorTag::IoEdenGenericError, + EdenErrorType::MOUNT_GENERATION_CHANGED => ErrorTag::IoEdenMountGenerationChanged, + EdenErrorType::JOURNAL_TRUNCATED => ErrorTag::IoEdenJournalTruncated, + EdenErrorType::CHECKOUT_IN_PROGRESS => ErrorTag::IoEdenCheckoutInProgress, + EdenErrorType::OUT_OF_DATE_PARENT => ErrorTag::IoEdenOutOfDateParent, + _ => return None, + }, + EdenError::UnknownField { .. } => ErrorTag::IoEdenUnknownField, + }; + + Some(tag) +} + +#[derive(Debug, buck2_error::Error)] +#[buck2(tag = IoEden)] +#[buck2(tag = eden_error_kind_tag(&self))] pub enum EdenError { #[error("Eden POSIX error (code = {}): {}", .code, .error.message)] - PosixError { - error: edenfs::types::EdenError, - code: i32, - }, + PosixError { error: edenfs::EdenError, code: i32 }, #[error("Eden service error: {}", .error.message)] - ServiceError { error: edenfs::types::EdenError }, + ServiceError { error: edenfs::EdenError }, #[error("Eden returned an unexpected field: {}", .field)] UnknownField { field: i32 }, } -impl From for EdenError { - fn from(error: edenfs::types::EdenError) -> Self { +impl From for EdenError { + fn from(error: edenfs::EdenError) -> Self { if error.errorType == EdenErrorType::POSIX_ERROR { if let Some(error_code) = error.errorCode { return Self::PosixError { @@ -438,7 +505,7 @@ pub trait EdenDataIntoResult { macro_rules! impl_eden_data_into_result { ($typ: ident, $data: ty, $ok_variant: ident) => { - impl EdenDataIntoResult for ::edenfs::types::$typ { + impl EdenDataIntoResult for ::edenfs::$typ { type Data = $data; fn into_result(self) -> Result { @@ -477,24 +544,3 @@ impl_eden_data_into_result!( SortedVectorMap, dirListAttributeData ); - -#[cfg(fbcode_build)] -mod fbcode { - use std::path::Path; - - use super::*; - - pub fn thrift_builder>( - fb: FacebookInit, - socket: P, - ) -> anyhow::Result<::thriftclient::ThriftChannelBuilder> { - // NOTE: This timeout is absurdly high, but bear in mind that what we're - // "comparing" to is a FS call that has no timeouts at all. - const THRIFT_TIMEOUT_MS: u32 = 120_000; - - Ok(::thriftclient::ThriftChannelBuilder::from_path(fb, socket)? - .with_conn_timeout(THRIFT_TIMEOUT_MS) - .with_recv_timeout(THRIFT_TIMEOUT_MS) - .with_secure(false)) - } -} diff --git a/app/buck2_eden/src/io_provider.rs b/app/buck2_eden/src/io_provider.rs index ac06a1f532196..b46cf77562812 100644 --- a/app/buck2_eden/src/io_provider.rs +++ b/app/buck2_eden/src/io_provider.rs @@ -24,24 +24,21 @@ use buck2_common::io::fs::FsIoProvider; use buck2_common::io::fs::ReadUncheckedOptions; use buck2_common::io::IoProvider; use buck2_core; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::io_counters::IoCounterKey; -use buck2_core::soft_error; +use buck2_error::BuckErrorContext; +use buck2_error::ErrorTag; use compact_str::CompactString; use dupe::Dupe; -use edenfs::types::FileAttributes; -use edenfs::types::GetAttributesFromFilesParams; -use edenfs::types::ReaddirParams; -use edenfs::types::SourceControlType; -use edenfs::types::SyncBehavior; -use edenfs::types::SynchronizeWorkingCopyParams; +use edenfs::FileAttributes; +use edenfs::GetAttributesFromFilesParams; +use edenfs::ReaddirParams; +use edenfs::SourceControlType; +use edenfs::SyncBehavior; +use edenfs::SynchronizeWorkingCopyParams; use fbinit::FacebookInit; -use libc::EINVAL; -use libc::EISDIR; -use libc::ENOENT; -use libc::ENOTDIR; use tokio::sync::Semaphore; use crate::connection::EdenConnectionManager; @@ -61,17 +58,17 @@ enum Digest { Blake3Keyed, } +enum PathMetadataResult { + Result(Option>), + Error(EdenError), +} + impl EdenIoProvider { pub async fn new( fb: FacebookInit, fs: &ProjectRoot, cas_digest_config: CasDigestConfig, ) -> anyhow::Result> { - if cfg!(not(fbcode_build)) { - tracing::warn!("Disabling Eden I/O: Cargo build detected"); - return Ok(None); - } - let (digest, min_eden_version) = if cas_digest_config.source_files_config().allows_sha1() { (Digest::Sha1, "20220905-214046") } else if cas_digest_config @@ -84,14 +81,12 @@ impl EdenIoProvider { return Ok(None); }; - static EDEN_SEMAPHORE: EnvHelper = EnvHelper::new("BUCK2_EDEN_SEMAPHORE"); - let eden_semaphore = EDEN_SEMAPHORE.get_copied()?.unwrap_or(2048); + let eden_semaphore = buck2_env_anyhow!("BUCK2_EDEN_SEMAPHORE", type=usize, default=2048, applicability=internal)?; - let manager = - match EdenConnectionManager::new(fb, fs.root(), Semaphore::new(eden_semaphore))? { - Some(manager) => manager, - None => return Ok(None), - }; + let manager = match EdenConnectionManager::new(fb, fs, Semaphore::new(eden_semaphore))? { + Some(manager) => manager, + None => return Ok(None), + }; let eden_version = manager .get_eden_version() @@ -119,14 +114,11 @@ impl EdenIoProvider { digest, })) } -} -#[async_trait] -impl IoProvider for EdenIoProvider { - async fn read_path_metadata_if_exists( + async fn read_path_metadata_if_exists_impl( &self, - path: ProjectRelativePathBuf, - ) -> anyhow::Result>> { + path: &ProjectRelativePathBuf, + ) -> anyhow::Result { let _guard = IoCounterKey::StatEden.guard(); let hash_attribute = match self.digest { @@ -143,7 +135,7 @@ impl IoProvider for EdenIoProvider { let params = GetAttributesFromFilesParams { mountPoint: self.manager.get_mount_point(), - paths: vec![path.to_string().into_bytes()], + paths: self.manager.project_paths_as_eden_paths([path.as_ref()]), requestedAttributes: requested_attributes, sync: no_sync(), ..Default::default() @@ -172,7 +164,7 @@ impl IoProvider for EdenIoProvider { .context("Eden returned an error for sourceControlType")?; if source_control_type == SourceControlType::TREE { - return Ok(Some(RawPathMetadata::Directory)); + return Ok(PathMetadataResult::Result(Some(RawPathMetadata::Directory))); }; if source_control_type == SourceControlType::SYMLINK { @@ -196,7 +188,7 @@ impl IoProvider for EdenIoProvider { ) })?; - return Ok(Some(meta)); + return Ok(PathMetadataResult::Result(Some(meta))); }; let size = data @@ -245,50 +237,25 @@ impl IoProvider for EdenIoProvider { is_executable, }; - Ok(Some(RawPathMetadata::File(meta))) - } - Err(EdenError::PosixError { code, .. }) if code == EISDIR => { - tracing::debug!("getAttributesFromFilesV2({}): EISDIR", path); - soft_error!( - "eden_io_eisdir", - anyhow::anyhow!("Eden returned EISDIR for {}", path) - )?; - Ok(Some(RawPathMetadata::Directory)) - } - Err(EdenError::PosixError { code, .. }) if code == ENOENT => { - tracing::debug!("getAttributesFromFilesV2({}): ENOENT", path); - Ok(None) + Ok(PathMetadataResult::Result(Some(RawPathMetadata::File( + meta, + )))) } - Err(EdenError::PosixError { code, .. }) if code == EINVAL || code == ENOTDIR => { - // If we get EINVAL it means the target wasn't a file, and since we know it - // existed and it wasn't a dir, then that means it must be a symlink. If we get - // ENOTDIR, that means we tried to traverse a path component that was a - // symlink. In both cases, we need to both a) handle ExternalSymlink and b) - // look through to the target, so we do that. - tracing::debug!("getAttributesFromFilesV2({}): fallthrough", path); - self.fs.read_path_metadata_if_exists(path).await - } - Err(err) => Err(err.into()), + Err(err) => Ok(PathMetadataResult::Error(err)), } } - async fn read_file_if_exists( + async fn read_dir_impl( &self, path: ProjectRelativePathBuf, - ) -> anyhow::Result> { - self.fs.read_file_if_exists(path).await - } - - async fn read_dir(&self, path: ProjectRelativePathBuf) -> anyhow::Result> { + ) -> anyhow::Result> { let _guard = IoCounterKey::ReadDirEden.guard(); let requested_attributes = i64::from(i32::from(FileAttributes::SOURCE_CONTROL_TYPE)); let params = ReaddirParams { mountPoint: self.manager.get_mount_point(), - // TODO(nga): this assumes eden mount point is the project root. - // Which is not the case for isolated test data directories for example. - directoryPaths: vec![path.to_string().into_bytes()], + directoryPaths: self.manager.project_paths_as_eden_paths([path.as_ref()]), requestedAttributes: requested_attributes, sync: no_sync(), ..Default::default() @@ -341,6 +308,54 @@ impl IoProvider for EdenIoProvider { Ok(entries) } +} + +#[async_trait] +impl IoProvider for EdenIoProvider { + async fn read_path_metadata_if_exists_impl( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result>> { + match self.read_path_metadata_if_exists_impl(&path).await { + Ok(PathMetadataResult::Result(res)) => Ok(res), + Ok(PathMetadataResult::Error(err)) => { + match err { + EdenError::PosixError { code, .. } if code == libc::ENOENT => { + tracing::debug!("getAttributesFromFilesV2({}): ENOENT", path); + Ok(None) + } + EdenError::PosixError { code, .. } + if code == libc::EINVAL || code == libc::ENOTDIR => + { + // If we get EINVAL it means the target wasn't a file, and since we know it + // existed and it wasn't a dir, then that means it must be a symlink. If we get + // ENOTDIR, that means we tried to traverse a path component that was a + // symlink. In both cases, we need to both a) handle ExternalSymlink and b) + // look through to the target, so we do that. + tracing::debug!("getAttributesFromFilesV2({}): fallthrough", path); + self.fs.read_path_metadata_if_exists_impl(path).await + } + _ => Err(err.into()), + } + } + Err(err) => Err(err).tag_anyhow(ErrorTag::IoEden), + } + } + + async fn read_file_if_exists_impl( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result> { + // Don't tag as IoEden because it uses regular file I/O. + self.fs.read_file_if_exists_impl(path).await + } + + async fn read_dir_impl( + &self, + path: ProjectRelativePathBuf, + ) -> anyhow::Result> { + self.read_dir_impl(path).await.tag_anyhow(ErrorTag::IoEden) + } async fn settle(&self) -> anyhow::Result<()> { let _guard = IoCounterKey::EdenSettle.guard(); @@ -362,6 +377,7 @@ impl IoProvider for EdenIoProvider { }) .await .context("Error synchronizing Eden working copy") + .tag_anyhow(ErrorTag::IoEden) } fn name(&self) -> &'static str { diff --git a/app/buck2_eden/src/lib.rs b/app/buck2_eden/src/lib.rs index 521e69d8f7354..b4bbbac4d177c 100644 --- a/app/buck2_eden/src/lib.rs +++ b/app/buck2_eden/src/lib.rs @@ -8,7 +8,9 @@ */ // This can't be built in our OSS implementation. -#![cfg(any(fbcode_build, cargo_internal_build))] +#![cfg(fbcode_build)] +#![feature(error_generic_member_access)] +#![feature(used_with_arg)] pub mod connection; pub mod io_provider; diff --git a/app/buck2_error/BUCK b/app/buck2_error/BUCK index 1917adb705f7e..a8c823ac0e109 100644 --- a/app/buck2_error/BUCK +++ b/app/buck2_error/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("buck2") @@ -9,12 +8,17 @@ rust_library( ["src/**/*.rs"], ), deps = [ - "fbcode//buck2/app/buck2_error_derive:buck2_error_derive", "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:either", + "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:ref-cast", + "fbsource//third-party/rust:smallvec", "fbsource//third-party/rust:thiserror", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error_derive:buck2_error_derive", "//buck2/gazebo/dupe:dupe", + "//buck2/starlark-rust/starlark_syntax:starlark_syntax", ], ) diff --git a/app/buck2_error/Cargo.toml b/app/buck2_error/Cargo.toml index 97b78f483beff..50e357e8b8b7c 100644 --- a/app/buck2_error/Cargo.toml +++ b/app/buck2_error/Cargo.toml @@ -1,18 +1,26 @@ [package] -name = "buck2_error" -version = "0.1.0" -edition = "2021" description = """ Library providing an `anyhow` inspired error type for buck2 """ +edition = "2021" +license = { workspace = true } +name = "buck2_error" +repository = { workspace = true } +version = "0.1.0" [dependencies] anyhow = { workspace = true } derive_more = { workspace = true } +either = { workspace = true } +itertools = { workspace = true } ref-cast = { workspace = true } +smallvec = { workspace = true } thiserror = { workspace = true } allocative = { workspace = true } dupe = { workspace = true } +buck2_data = { workspace = true } buck2_error_derive = { workspace = true } + +starlark_syntax = { workspace = true } diff --git a/app/buck2_error/src/any.rs b/app/buck2_error/src/any.rs index e879160992f45..661c32f76ecce 100644 --- a/app/buck2_error/src/any.rs +++ b/app/buck2_error/src/any.rs @@ -7,191 +7,305 @@ * of this source tree. */ -//! Integrations of `buck2_error::Error` with `anyhow::Error` and `std::error::Error`. +//! Integrations of `buck2_error::Error` with `anyhow::Error` and `StdError`. -use std::fmt::Debug; -use std::fmt::Display; +use std::error::request_value; +use std::error::Error as StdError; +use std::fmt; use std::sync::Arc; use ref_cast::RefCast; -use crate::error::ErrorImpl; - -/// Represents an arbitrary `buck2_error` compatible error type. -/// -/// This trait is implemented for `buck2_error::Error`, `anyhow::Error` and any `std::error::Error`. -pub trait AnyError: Sealed + Into + Debug + Display + Sync + Send + 'static {} -pub trait Sealed {} - -impl AnyError for crate::Error {} -impl Sealed for crate::Error {} +use crate::error::ErrorKind; +use crate::root::ErrorRoot; // This implementation is fairly magic and is what allows us to bypass the issue with conflicting -// implementations between `anyhow::Error` and `T: std::error::Error`. The `T: Into` -// bound is what we actually make use of in the implementation, while the other bound is needed to -// make sure this impl does not accidentally cover too many types. Importantly, this impl does not -// conflict with `T: From` -impl From for crate::Error +// implementations between `anyhow::Error` and `T: StdError`. The `T: Into` bound is +// what we actually make use of in the implementation, while the other bound is needed to make sure +// this impl does not accidentally cover too many types. Importantly, this impl does not conflict +// with `T: From` +impl From for crate::Error where T: Into, Result<(), T>: anyhow::Context<(), T>, { + #[track_caller] + #[cold] fn from(value: T) -> crate::Error { - // `Self` may be an `anyhow::Error` or any `std::error::Error`. We'll check by downcasting - let mut e = Some(value); - let r: &mut dyn std::any::Any = &mut e; - if let Some(e) = r.downcast_mut::>() { - return from_anyhow_for_crate(e.take().unwrap()); - } - - // Otherwise, we'll use the strategy for `std::error::Error` - let anyhow = e.unwrap().into(); - let std_err: Box = anyhow.into(); - crate::Error(Arc::new(crate::error::ErrorImpl::Root(Arc::from(std_err)))) + let source_location = + crate::source_location::from_file(std::panic::Location::caller().file(), None); + let anyhow: anyhow::Error = value.into(); + recover_crate_error(anyhow.as_ref(), source_location) } } -impl AnyError for T -where - T: Into, - Result<(), T>: anyhow::Context<(), T>, -{ -} -impl Sealed for T -where - T: Into, - Result<(), T>: anyhow::Context<(), T>, -{ + +fn maybe_add_context_from_metadata(mut e: crate::Error, context: &dyn StdError) -> crate::Error { + if let Some(metadata) = request_value::(context) { + if let Some(category) = metadata.category { + e = e.context(category); + } + if !metadata.tags.is_empty() { + e = e.tag(metadata.tags.iter().copied()); + } + e + } else { + e + } } -fn from_anyhow_for_crate(value: anyhow::Error) -> crate::Error { - // Instead of just turning this into an error root, we will first check if this - // `anyhow::Error` was created from a `buck2_error::Error`. If so, we can recover the context in - // a structured way. +pub(crate) fn recover_crate_error( + value: &'_ (dyn StdError + 'static), + source_location: Option, +) -> crate::Error { + // Instead of just turning this into an error root, we'll extract the whole context stack and + // convert it manually. let mut context_stack = Vec::new(); - let mut chain = value.chain(); - let base = loop { - match chain.next() { - None => { - // This error was not created from a `buck2_error::Error`, so we can't do anything - // smart - return crate::Error::new(AnyhowAsStdError(value)); - } - Some(e) => { - if let Some(base) = e.downcast_ref::() { - break base; - } else { - context_stack.push(e); - } + let mut cur = value; + // We allow all of these to appear more than once in the context chain, however we always use + // the bottom-most value when actually generating the root + let mut source_location = source_location; + let mut action_error = None; + let base = 'base: loop { + // Handle the `cur` error + if let Some(base) = cur.downcast_ref::() { + break base.0.clone(); + } + + if let Some(metadata) = request_value::(cur) { + source_location = crate::source_location::from_file( + metadata.source_file, + metadata.source_location_extra, + ); + if metadata.action_error.is_some() { + action_error = metadata.action_error; } } + + // Compute the next element in the source chain + if let Some(new_cur) = cur.source() { + context_stack.push(cur); + cur = new_cur; + continue; + } + + // `anyhow` only ever uses the standard `Display` formatting of error types, never the + // alternate or debug formatting. We can match that behavior by just converting the error to + // a string. That prevents us from having to deal with the type returned by `source` being + // potentially non-`Send` or non-`Sync`. + let description = format!("{}", cur); + let e = crate::Error(Arc::new(ErrorKind::Root(Box::new(ErrorRoot::new( + description, + source_location, + action_error, + ))))); + break 'base maybe_add_context_from_metadata(e, cur); }; - // We've discovered that this `anyhow::Error` has a cause chain that includes a - // `buck2_error::Error`. We'll try and recover a properly structured error by converting the - // part of the cause chain that's not in the base to context on the buck2_error error. - // Unfortunately, we cannot detect whether the remainder of the error chain is actually - // associated with `.context` calls on the anyhow error or not. If it is, this will all work - // correctly. If not, we might get some whacky formatting. However, in order for this to go - // wrong, someone else has to have put an `anyhow::Error` into their custom error type, - // which they really shouldn't be doing anyway. - let mut e = base.0.clone(); - for context in context_stack.into_iter().rev() { - // Even for proper context objects, anyhow does not give us access to them directly. The - // best we can do is turn them into strings. - let context = format!("{}", context); - e = e.context(context); + // We've converted the base error to a `buck2_error::Error`. Next, we need to add back any + // context that is not included in the `base` error yet. + let mut e = base; + for context_value in context_stack.into_iter().rev() { + if let Some(starlark_err) = cur.downcast_ref::() { + e = e.context(format!("{}", starlark_err)); + } else { + // First, just add the value directly. This value is only used for formatting + e = e.context(format!("{}", context_value)); + // Now add any additional information from the metadata, if it's available + e = maybe_add_context_from_metadata(e, context_value); + } } e } impl From for anyhow::Error { + #[cold] fn from(value: crate::Error) -> Self { Into::into(CrateAsStdError(value)) } } -#[derive(derive_more::Display)] -pub(crate) struct AnyhowAsStdError(pub anyhow::Error); - -impl Debug for AnyhowAsStdError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Debug::fmt(&self.0, f) - } -} - -impl std::error::Error for AnyhowAsStdError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - std::error::Error::source(&*self.0) - } -} - #[derive(derive_more::Display, RefCast)] #[repr(transparent)] pub(crate) struct CrateAsStdError(pub(crate) crate::Error); -impl Debug for CrateAsStdError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Debug::fmt(&self.0, f) +impl fmt::Debug for CrateAsStdError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) } } -impl std::error::Error for CrateAsStdError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl StdError for CrateAsStdError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { match &*self.0.0 { - ErrorImpl::Root(r) => std::error::Error::source(r), - ErrorImpl::WithContext(_, r) => Some(CrateAsStdError::ref_cast(r)), + ErrorKind::Root(_) => None, + ErrorKind::WithContext(_, r) | ErrorKind::Emitted(_, r) => { + Some(CrateAsStdError::ref_cast(r)) + } } } } +/// This can be `provide`d by an error to inject buck2-specific information about it. +/// +/// For `typ`, `action_error`, and the source information, only the value that appears last in the +/// source chain will be used. The derive macro typically handles this to prevent any surprises, +/// however if this value is being provided manually then care may need to be taken. +#[derive(Clone)] +pub struct ProvidableMetadata { + pub category: Option, + pub tags: Vec, + pub source_file: &'static str, + /// Extra information to add to the end of the source location - typically a type/variant name, + /// and the same thing as gets passed to `buck2_error::source_location::from_file`. + pub source_location_extra: Option<&'static str>, + /// The protobuf ActionError, if the root was an action error + pub action_error: Option, +} + #[cfg(test)] mod tests { - use std::sync::Arc; + use std::error::Request; - use crate::error::ErrorImpl; + use allocative::Allocative; + + use super::*; + use crate as buck2_error; + use crate::TypedContext; #[derive(Debug, derive_more::Display)] struct TestError; - impl std::error::Error for TestError {} - - fn check_equal(mut a: &crate::Error, mut b: &crate::Error) { - loop { - match (&*a.0, &*b.0) { - (ErrorImpl::Root(a), ErrorImpl::Root(b)) => { - // Avoid comparing vtable pointers - assert_eq!(Arc::as_ptr(a).cast::<()>(), Arc::as_ptr(b).cast::<()>()); - return; - } - ( - ErrorImpl::WithContext(a_context, a_inner), - ErrorImpl::WithContext(b_context, b_inner), - ) => { - assert_eq!(format!("{}", a_context), format!("{}", b_context)); - a = a_inner; - b = b_inner; - } - (ErrorImpl::WithContext(_, _), ErrorImpl::Root(_)) => { - panic!("Left side had more context than right!") - } - (ErrorImpl::Root(_), ErrorImpl::WithContext(_, _)) => { - panic!("Right side had more context than left!") - } - } - } - } + impl StdError for TestError {} #[test] - fn test_rountrip_no_context() { + fn test_roundtrip_no_context() { let e = crate::Error::new(TestError).context("context 1"); let e2 = crate::Error::from(anyhow::Error::from(e.clone())); - check_equal(&e, &e2); + crate::Error::check_equal(&e, &e2); } #[test] - fn test_rountrip_with_context() { + fn test_roundtrip_with_context() { let e = crate::Error::new(TestError).context("context 1"); let e2 = crate::Error::from(anyhow::Error::from(e.clone()).context("context 2")); let e3 = e.context("context 2"); - check_equal(&e2, &e3); + crate::Error::check_equal(&e2, &e3); + } + + #[test] + fn test_roundtrip_with_typed_context() { + #[derive(Debug, Allocative, Eq, PartialEq)] + struct T(usize); + impl std::fmt::Display for T { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } + } + + impl TypedContext for T { + fn eq(&self, other: &dyn TypedContext) -> bool { + match (other as &dyn std::any::Any).downcast_ref::() { + Some(right) => self == right, + None => false, + } + } + } + + let e = crate::Error::new(TestError).context(T(1)); + let e2 = crate::Error::from(anyhow::Error::from(e.clone()).context("context 2")); + let e3 = e.context("context 2"); + crate::Error::check_equal(&e2, &e3); + } + + #[derive(Debug, derive_more::Display)] + struct FullMetadataError; + + impl StdError for FullMetadataError { + fn provide<'a>(&'a self, request: &mut Request<'a>) { + request.provide_value(ProvidableMetadata { + action_error: None, + source_file: file!(), + source_location_extra: Some("FullMetadataError"), + tags: vec![ + crate::ErrorTag::WatchmanTimeout, + crate::ErrorTag::StarlarkFail, + crate::ErrorTag::WatchmanTimeout, + ], + category: Some(crate::Tier::Tier0), + }); + } + } + + #[test] + fn test_metadata() { + for e in [ + FullMetadataError.into(), + crate::Error::new(FullMetadataError), + ] { + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); + assert_eq!( + e.source_location(), + Some("buck2_error/src/any.rs::FullMetadataError") + ); + assert_eq!( + &e.tags(), + &[ + crate::ErrorTag::StarlarkFail, + crate::ErrorTag::WatchmanTimeout + ] + ); + } + } + + #[test] + fn test_metadata_through_anyhow() { + let e: anyhow::Error = FullMetadataError.into(); + let e = e.context("anyhow"); + let e: crate::Error = e.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); + assert!(format!("{:?}", e).contains("anyhow")); + } + + #[derive(Debug, thiserror::Error)] + #[error("wrapper")] + struct WrapperError(#[source] FullMetadataError); + + #[test] + fn test_metadata_through_wrapper() { + let e: crate::Error = WrapperError(FullMetadataError).into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); + assert!(format!("{:?}", e).contains("wrapper")); + } + + #[derive(Debug, buck2_error_derive::Error)] + #[buck2(tier0)] + #[error("wrapper2")] + struct FullMetadataContextWrapperError(#[source] FullMetadataError); + + #[test] + fn test_context_in_wrapper() { + let e: crate::Error = FullMetadataContextWrapperError(FullMetadataError).into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); + assert_eq!( + e.source_location(), + Some("buck2_error/src/any.rs::FullMetadataError") + ); + assert!(format!("{:?}", e).contains("wrapper2")); + } + + #[derive(Debug, buck2_error_derive::Error)] + #[buck2(input)] + #[error("unused")] + struct UserMetadataError; + + #[derive(Debug, buck2_error_derive::Error)] + #[buck2(tier0)] + #[error("unused")] + struct InfraMetadataWrapperError(#[source] UserMetadataError); + + #[test] + fn test_no_root_metadata_context() { + let e = InfraMetadataWrapperError(UserMetadataError); + let e: crate::Error = e.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); } } diff --git a/app/buck2_error/src/classify.rs b/app/buck2_error/src/classify.rs new file mode 100644 index 0000000000000..d8de0901b45ef --- /dev/null +++ b/app/buck2_error/src/classify.rs @@ -0,0 +1,336 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_data::error::ErrorTag; +use buck2_data::error::ErrorTier; + +use crate::Tier; + +/// When there's no tag, but we want to put something in Scuba, we use this. +pub const ERROR_TAG_UNCLASSIFIED: &str = "UNCLASSIFIED"; + +fn tier_rank(tier: Option) -> u32 { + match tier { + Some(tier) => match tier { + Tier::Environment => 10000, + Tier::Tier0 => 10001, + Tier::Input => 20000, + }, + None => 30000, + } +} + +macro_rules! rank { + ( $tier:ident ) => { + match stringify!($tier) { + "environment" => (Some(Tier::Environment), line!()), + "tier0" => (Some(Tier::Tier0), line!()), + "input" => (Some(Tier::Input), tier_rank(Some(Tier::Tier0)) + line!()), + "unspecified" => (None, tier_rank(Some(Tier::Input)) + line!()), + _ => unreachable!(), + } + }; +} + +/// Ordering determines tag rank, more interesting tags first +pub(crate) fn category_and_rank(tag: ErrorTag) -> (Option, u32) { + match tag { + // Environment errors + ErrorTag::NoValidCerts => rank!(environment), + ErrorTag::ServerSigterm => rank!(environment), + ErrorTag::IoMaterializerFileBusy => rank!(environment), + ErrorTag::IoClientBrokenPipe => rank!(environment), + ErrorTag::WatchmanRootNotConnectedError => rank!(environment), + ErrorTag::WatchmanCheckoutInProgress => rank!(environment), + ErrorTag::DownloadFileHeadRequest => rank!(environment), + ErrorTag::ServerMemoryPressure => rank!(environment), + // Daemon was likely SIGKILLed, otherwise it should have written something to stderr + ErrorTag::ServerStderrEmpty => rank!(environment), + + // Tier 0 errors + ErrorTag::ServerJemallocAssert => rank!(tier0), + ErrorTag::ServerStackOverflow => rank!(tier0), + ErrorTag::ServerPanicked => rank!(tier0), + ErrorTag::ServerSegv => rank!(tier0), + ErrorTag::DaemonConnect => rank!(tier0), + ErrorTag::ServerStderrUnknown => rank!(tier0), + ErrorTag::InternalError => rank!(tier0), + ErrorTag::DaemonWontDieFromKill => rank!(tier0), + ErrorTag::GrpcResponseMessageTooLarge => rank!(tier0), + ErrorTag::ClientGrpc => rank!(tier0), + ErrorTag::ReUnknownTcode => rank!(tier0), + ErrorTag::ReCancelled => rank!(tier0), + ErrorTag::ReUnknown => rank!(tier0), + ErrorTag::ReInvalidArgument => rank!(tier0), + ErrorTag::ReDeadlineExceeded => rank!(tier0), + ErrorTag::ReNotFound => rank!(tier0), + ErrorTag::ReAlreadyExists => rank!(tier0), + ErrorTag::RePermissionDenied => rank!(tier0), + ErrorTag::ReResourceExhausted => rank!(tier0), + ErrorTag::ReFailedPrecondition => rank!(tier0), + ErrorTag::ReAborted => rank!(tier0), + ErrorTag::ReOutOfRange => rank!(tier0), + ErrorTag::ReUnimplemented => rank!(tier0), + ErrorTag::ReInternal => rank!(tier0), + ErrorTag::ReUnavailable => rank!(tier0), + ErrorTag::ReDataLoss => rank!(tier0), + ErrorTag::ReUnauthenticated => rank!(tier0), + ErrorTag::IoConnectionAborted => rank!(tier0), + ErrorTag::IoTimeout => rank!(tier0), + ErrorTag::IoEdenMountNotReady => rank!(tier0), + // TODO(minglunli): Check how often Win32 Errors are actually hit, potentially do the same as POSIX + ErrorTag::IoEdenWin32Error => rank!(tier0), + ErrorTag::IoEdenHresultError => rank!(tier0), + ErrorTag::IoEdenArgumentError => rank!(tier0), + ErrorTag::IoEdenGenericError => rank!(tier0), + ErrorTag::IoEdenMountGenerationChanged => rank!(tier0), + ErrorTag::IoEdenJournalTruncated => rank!(tier0), + ErrorTag::IoEdenOutOfDateParent => rank!(tier0), + ErrorTag::WatchmanTimeout => rank!(tier0), + ErrorTag::WatchmanConnectionError => rank!(tier0), + ErrorTag::WatchmanConnectionLost => rank!(tier0), + ErrorTag::WatchmanConnectionDiscovery => rank!(tier0), + ErrorTag::WatchmanServerError => rank!(tier0), + ErrorTag::WatchmanResponseError => rank!(tier0), + ErrorTag::WatchmanMissingField => rank!(tier0), + ErrorTag::WatchmanDeserialize => rank!(tier0), + ErrorTag::WatchmanSerialize => rank!(tier0), + ErrorTag::WatchmanConnect => rank!(tier0), + ErrorTag::WatchmanRequestError => rank!(tier0), + ErrorTag::HttpServer => rank!(tier0), + + // Input errors + // FIXME(JakobDegen): Make this bad experience once that's available. Usually when this + // happens, it's probably because the user tried to shut down with Ctrl+C and something + // about that didn't work + ErrorTag::InterruptedByDaemonShutdown => rank!(input), + ErrorTag::DaemonIsBusy => rank!(input), + ErrorTag::DaemonPreempted => rank!(input), + ErrorTag::ConfigureAttr => rank!(input), + ErrorTag::IoEdenCheckoutInProgress => rank!(input), // User switching branches during Eden operation + ErrorTag::IoNotConnected => rank!(input), // This typically means eden is not mounted + ErrorTag::IoExecutableFileBusy => rank!(input), + ErrorTag::IoStorageFull => rank!(input), + ErrorTag::IoPermissionDenied => rank!(input), + ErrorTag::IoEdenMountDoesNotExist => rank!(input), + ErrorTag::ProjectMissingPath => rank!(input), + ErrorTag::StarlarkFail => rank!(input), + ErrorTag::StarlarkStackOverflow => rank!(input), + ErrorTag::Visibility => rank!(input), + ErrorTag::HttpClient => rank!(input), + ErrorTag::Analysis => rank!(input), + ErrorTag::TestDeadlineExpired => rank!(input), + + // Unspecified errors + ErrorTag::IoBrokenPipe => rank!(unspecified), + ErrorTag::IoWindowsSharingViolation => rank!(unspecified), + ErrorTag::IoNotFound => rank!(unspecified), + ErrorTag::IoSource => rank!(unspecified), + ErrorTag::IoSystem => rank!(unspecified), + ErrorTag::IoEden => rank!(unspecified), + ErrorTag::IoEdenConnectionError => rank!(unspecified), + ErrorTag::IoEdenRequestError => rank!(unspecified), + ErrorTag::IoEdenUnknownField => rank!(unspecified), + ErrorTag::MaterializationError => rank!(unspecified), + ErrorTag::Http => rank!(unspecified), + ErrorTag::Install => rank!(unspecified), + ErrorTag::AnyActionExecution => rank!(unspecified), + ErrorTag::AnyStarlarkEvaluation => rank!(unspecified), + ErrorTag::UnusedDefaultTag => rank!(unspecified), + } +} + +pub trait ErrorLike { + fn best_tag(&self) -> Option; + + fn get_tier(&self) -> Option; + + fn error_rank(&self) -> u32; + + fn category(&self) -> String; +} + +const TIER0: &str = "INFRA"; +const ENVIRONMENT: &str = "ENVIRONMENT"; +const INPUT: &str = "USER"; + +impl ErrorLike for buck2_data::ErrorReport { + fn best_tag(&self) -> Option { + best_tag(self.tags.iter().filter_map(|t| { + // This should never be `None`, but with weak prost types, + // it is safer to just ignore incorrect integers. + ErrorTag::from_i32(*t) + })) + } + + fn get_tier(&self) -> Option { + self.tier + .map(|tier| match ErrorTier::from_i32(tier) { + Some(tier) => match tier { + ErrorTier::Tier0 => Some(Tier::Tier0), + ErrorTier::Environment => Some(Tier::Environment), + ErrorTier::Input => Some(Tier::Input), + ErrorTier::UnusedDefaultCategory => None, + }, + None => None, + }) + .flatten() + } + + fn error_rank(self: &buck2_data::ErrorReport) -> u32 { + let tag_rank = self.best_tag().map(tag_rank).unwrap_or(u32::MAX); + let tier_rank = tier_rank(self.get_tier()); + + std::cmp::min(tag_rank, tier_rank) + } + + fn category(&self) -> String { + let tier = self + .best_tag() + .map(|t| category_and_rank(t).0) + .flatten() + .or(self.get_tier()) + .unwrap_or(Tier::Tier0); + + match tier { + Tier::Tier0 => TIER0.to_owned(), + Tier::Environment => ENVIRONMENT.to_owned(), + Tier::Input => INPUT.to_owned(), + } + } +} + +/// Pick the most interesting error by best tag. +pub fn best_error<'a>( + tags: impl IntoIterator, +) -> Option<&'a buck2_data::ErrorReport> { + tags.into_iter().min_by_key(|e| e.error_rank()) +} + +/// Pick the most interesting tag from a list of tags. +pub fn best_tag(tags: impl IntoIterator) -> Option { + tags.into_iter().min_by_key(|t| tag_rank(*t)) +} + +/// Tag rank: smaller is more interesting. +fn tag_rank(tag: ErrorTag) -> u32 { + category_and_rank(tag).1 +} + +/// Some tags are known to be either infrastructure or user errors. +pub(crate) fn error_tag_category(tag: ErrorTag) -> Option { + category_and_rank(tag).0 +} + +#[cfg(test)] +mod tests { + use buck2_data::error::ErrorTag; + use buck2_data::error::ErrorTier; + use buck2_data::ErrorReport; + + use super::*; + use crate::classify::best_tag; + + #[test] + fn test_best_tag() { + assert_eq!( + Some(ErrorTag::ServerPanicked), + best_tag([ErrorTag::ServerPanicked, ErrorTag::WatchmanTimeout]) + ) + } + + #[test] + fn test_rank() { + assert!( + super::tag_rank(ErrorTag::ServerJemallocAssert) + < super::tag_rank(ErrorTag::UnusedDefaultTag) + ) + } + + #[test] + fn test_user_and_infra() { + let errors = vec![ + ErrorReport { + tier: Some(ErrorTier::Input as i32), + ..ErrorReport::default() + }, + ErrorReport { + tier: Some(ErrorTier::Tier0 as i32), + ..ErrorReport::default() + }, + ]; + + assert_eq!( + best_error(&errors).map(|e| e.category()), + Some(TIER0.to_owned()) + ); + } + + #[test] + fn test_default_is_infra() { + let errors = vec![ErrorReport { + tier: Some(ErrorTier::UnusedDefaultCategory as i32), + ..ErrorReport::default() + }]; + + assert_eq!( + best_error(&errors).map(|e| e.category()), + Some(TIER0.to_owned()) + ); + } + + #[test] + fn test_ranked_infra() { + let errors = vec![ + ErrorReport { + tags: vec![ErrorTag::ServerJemallocAssert as i32], + ..ErrorReport::default() + }, + ErrorReport { + tier: Some(ErrorTier::Tier0 as i32), + ..ErrorReport::default() + }, + ]; + + assert_eq!( + best_error(&errors).map(|e| e.tags.clone()), + Some(vec![ErrorTag::ServerJemallocAssert as i32]), + ); + } + + #[test] + fn test_tag_overrides_tier() { + let errors = vec![ErrorReport { + tier: Some(ErrorTier::Tier0 as i32), + tags: vec![ErrorTag::StarlarkFail as i32], + ..ErrorReport::default() + }]; + + assert_eq!( + best_error(&errors).map(|e| e.category()), + Some(INPUT.to_owned()) + ); + } + + #[test] + fn test_ranked_tags() { + let errors = vec![ErrorReport { + tags: vec![ + ErrorTag::ServerStderrEmpty as i32, + ErrorTag::ClientGrpc as i32, + ], + ..ErrorReport::default() + }]; + + assert_eq!( + best_error(&errors).map(|e| e.category()), + Some(ENVIRONMENT.to_owned()) + ); + } +} diff --git a/app/buck2_error/src/context.rs b/app/buck2_error/src/context.rs index 3600b9cdf5b8d..b0f3811e5751c 100644 --- a/app/buck2_error/src/context.rs +++ b/app/buck2_error/src/context.rs @@ -9,22 +9,11 @@ use std::sync::Arc; -use crate::error::ErrorImpl; -use crate::AnyError; +use smallvec::smallvec; -impl crate::Error { - pub fn context(self, context: C) -> Self { - Self(Arc::new(ErrorImpl::WithContext(Arc::new(context), self))) - } - - #[cold] - fn new_anyhow_with_context( - e: E, - c: C, - ) -> anyhow::Error { - Into::::into(e).context(c).into() - } -} +use crate::context_value::ContextValue; +use crate::context_value::TypedContext; +use crate::{self as buck2_error}; /// Provides the `context` method for `Result`. /// @@ -32,23 +21,132 @@ impl crate::Error { /// in the near future, uses of `anyhow::Context` in `buck2/app` will be broadly replaced with use /// of this trait. Subsequently, additional APIs will be provided for annotating errors with /// structured context data. -pub trait Context: Sealed { - fn context(self, context: C) - -> anyhow::Result; +pub trait BuckErrorContext: Sealed { + #[track_caller] + fn buck_error_context>(self, context: C) -> crate::Result; - fn with_context(self, f: F) -> anyhow::Result + #[track_caller] + fn with_buck_error_context(self, f: F) -> crate::Result where - C: std::fmt::Display + Send + Sync + 'static, + C: Into, F: FnOnce() -> C; + + #[track_caller] + fn input(self) -> crate::Result { + self.buck_error_context(crate::Tier::Input) + } + + #[track_caller] + fn tier0(self) -> crate::Result { + self.buck_error_context(crate::Tier::Tier0) + } + + #[track_caller] + fn tag(self, tag: crate::ErrorTag) -> crate::Result { + self.buck_error_context(ContextValue::Tags(smallvec![tag])) + } + + #[track_caller] + fn internal_error(self, message: &str) -> crate::Result { + self.with_internal_error(|| message.to_owned()) + } + + #[track_caller] + fn with_internal_error(self, f: F) -> crate::Result + where + F: FnOnce() -> String, + { + self.with_buck_error_context(|| format!("{} (internal error)", f())) + .tag(crate::ErrorTag::InternalError) + } + + /// Code below returns an anyhow::Error, it is used while we transition from anyhow to buck2_error in buck2/app + /// TODO(minglunli): Delete the code below once we have fully transitioned to buck2_error + #[track_caller] + fn buck_error_context_anyhow>(self, context: C) -> anyhow::Result; + + #[track_caller] + fn with_buck_error_context_anyhow(self, f: F) -> anyhow::Result + where + C: Into, + F: FnOnce() -> C; + + #[track_caller] + fn input_anyhow(self) -> anyhow::Result { + self.buck_error_context_anyhow(crate::Tier::Input) + } + + #[track_caller] + fn tier0_anyhow(self) -> anyhow::Result { + self.buck_error_context_anyhow(crate::Tier::Tier0) + } + + #[track_caller] + fn tag_anyhow(self, tag: crate::ErrorTag) -> anyhow::Result { + self.buck_error_context_anyhow(ContextValue::Tags(smallvec![tag])) + } + + #[track_caller] + fn internal_error_anyhow(self, message: &str) -> anyhow::Result { + self.with_internal_error_anyhow(|| message.to_owned()) + } + + #[track_caller] + fn with_internal_error_anyhow(self, f: F) -> anyhow::Result + where + F: FnOnce() -> String, + { + self.with_buck_error_context_anyhow(|| format!("{} (internal error)", f())) + .tag_anyhow(crate::ErrorTag::InternalError) + } + + /// Supports adding context to an error by either augmenting the most recent context if its + /// the requested type or by adding a new context. + #[track_caller] + fn compute_context< + TC: TypedContext, + C1: Into, + C2: Into, + F: FnOnce(Arc) -> C1, + F2: FnOnce() -> C2, + >( + self, + map_context: F, + new_context: F2, + ) -> anyhow::Result; } -pub trait Sealed {} +pub trait Sealed: Sized {} -impl Sealed for std::result::Result {} +impl Sealed for std::result::Result where crate::Error: From {} -impl Context for std::result::Result { - fn context(self, c: C) -> anyhow::Result +impl BuckErrorContext for std::result::Result +where + crate::Error: From, +{ + fn buck_error_context(self, c: C) -> crate::Result where - C: std::fmt::Display + Send + Sync + 'static, + C: Into, + { + match self { + Ok(x) => Ok(x), + Err(e) => Err(crate::Error::from(e).context(c)), + } + } + + fn with_buck_error_context(self, f: F) -> crate::Result + where + C: Into, + F: FnOnce() -> C, + { + match self { + Ok(x) => Ok(x), + Err(e) => Err(crate::Error::from(e).context(f())), + } + } + + fn buck_error_context_anyhow(self, c: C) -> anyhow::Result + where + C: Into, { match self { Ok(x) => Ok(x), @@ -56,9 +154,9 @@ impl Context for std::result::Result { } } - fn with_context(self, f: F) -> anyhow::Result + fn with_buck_error_context_anyhow(self, f: F) -> anyhow::Result where - C: std::fmt::Display + Send + Sync + 'static, + C: Into, F: FnOnce() -> C, { match self { @@ -66,23 +164,90 @@ impl Context for std::result::Result { Err(e) => Err(crate::Error::new_anyhow_with_context(e, f())), } } + + #[track_caller] + fn compute_context< + TC: TypedContext, + C1: Into, + C2: Into, + F: FnOnce(Arc) -> C1, + F2: FnOnce() -> C2, + >( + self, + map_context: F, + new_context: F2, + ) -> anyhow::Result { + match self { + Ok(x) => Ok(x), + Err(e) => Err(crate::Error::from(e).compute_context(map_context, new_context)), + } + } +} + +/// Similar to `anyhow::Context`, but works for `crate::Result`. +pub trait AnyhowContextForError: Sealed { + fn context(self, context: C) -> anyhow::Result + where + C: Into; + + fn with_context(self, f: F) -> anyhow::Result + where + C: Into, + F: FnOnce() -> C; } -// FIXME(JakobDegen): This impl should not exist. We should make people write error types for these -// conditions. Let's have it for now to ease migration though. +impl AnyhowContextForError for crate::Result { + #[inline] + #[track_caller] + fn context(self, context: C) -> anyhow::Result + where + C: Into, + { + self.buck_error_context_anyhow(context) + } + + #[inline] + #[track_caller] + fn with_context(self, f: F) -> anyhow::Result + where + C: Into, + F: FnOnce() -> C, + { + self.with_buck_error_context_anyhow(f) + } +} -// Can't use our derive macro because it creates reference to `::buck2_error`, which doesn't exist in this -// crate #[derive(Debug, buck2_error_derive::Error)] #[error("NoneError")] struct NoneError; impl Sealed for Option {} -impl Context for Option { - fn context(self, c: C) -> anyhow::Result +impl BuckErrorContext for Option { + fn buck_error_context(self, c: C) -> crate::Result where - C: std::fmt::Display + Send + Sync + 'static, + C: Into, + { + match self { + Some(x) => Ok(x), + None => Err(crate::Error::from(NoneError).context(c)), + } + } + + fn with_buck_error_context(self, f: F) -> crate::Result + where + C: Into, + F: FnOnce() -> C, + { + match self { + Some(x) => Ok(x), + None => Err(crate::Error::from(NoneError).context(f())), + } + } + + fn buck_error_context_anyhow(self, c: C) -> anyhow::Result + where + C: Into, { match self { Some(x) => Ok(x), @@ -90,9 +255,9 @@ impl Context for Option { } } - fn with_context(self, f: F) -> anyhow::Result + fn with_buck_error_context_anyhow(self, f: F) -> anyhow::Result where - C: std::fmt::Display + Send + Sync + 'static, + C: Into, F: FnOnce() -> C, { match self { @@ -100,4 +265,91 @@ impl Context for Option { None => Err(crate::Error::new_anyhow_with_context(NoneError, f())), } } + + #[track_caller] + fn compute_context< + TC: TypedContext, + C1: Into, + C2: Into, + F: FnOnce(Arc) -> C1, + F2: FnOnce() -> C2, + >( + self, + _map_context: F, + new_context: F2, + ) -> anyhow::Result { + Err(crate::Error::new_anyhow_with_context( + NoneError, + new_context(), + )) + } +} + +#[cfg(test)] +mod tests { + use std::any::Any; + use std::error::Error as StdError; + use std::fmt::Display; + + use allocative::Allocative; + + use super::*; + + #[derive(Debug, derive_more::Display)] + struct TestError; + + impl StdError for TestError {} + + #[derive(Debug, Allocative, Eq, PartialEq)] + struct SomeContext(Vec); + + impl Display for SomeContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } + } + + impl TypedContext for SomeContext { + fn eq(&self, other: &dyn TypedContext) -> bool { + match (other as &dyn Any).downcast_ref::() { + Some(v) => self == v, + None => false, + } + } + } + + #[test] + fn test_compute_context() { + crate::Error::check_equal( + &crate::Error::new(TestError).context("string"), + &crate::Error::from(crate::Error::from(TestError).compute_context( + |_t: Arc| -> SomeContext { panic!() }, + || "string", + )), + ); + + crate::Error::check_equal( + &crate::Error::new(TestError).context(SomeContext(vec![0, 1, 2])), + &crate::Error::from( + crate::Error::from(TestError) + .context(SomeContext(vec![])) + .compute_context( + |_t: Arc| -> SomeContext { SomeContext(vec![0, 1, 2]) }, + || "string", + ), + ), + ); + + crate::Error::check_equal( + &crate::Error::from(Option::<()>::None.buck_error_context("string").unwrap_err()), + &crate::Error::from( + Option::<()>::None + .compute_context( + |_t: Arc| -> SomeContext { panic!() }, + || "string", + ) + .unwrap_err(), + ), + ); + } } diff --git a/app/buck2_error/src/context_value.rs b/app/buck2_error/src/context_value.rs new file mode 100644 index 0000000000000..59ccd595a9468 --- /dev/null +++ b/app/buck2_error/src/context_value.rs @@ -0,0 +1,215 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; +use std::sync::Arc; + +use smallvec::SmallVec; +use starlark_syntax::codemap::FileSpan; +use starlark_syntax::span_display::span_display; + +#[derive(allocative::Allocative, Clone)] +pub enum ContextValue { + Dyn(Arc), + Tier(Tier), + Tags(SmallVec<[crate::ErrorTag; 1]>), + Typed(Arc), + // Stable value for category key + Key(Arc), + StarlarkError(StarlarkContext), +} + +impl ContextValue { + /// Returns whether the context should be included in the error message + pub(crate) fn should_display(&self) -> bool { + match self { + Self::Dyn(..) => true, + Self::Typed(e) => e.should_display(), + // Displaying the category in the middle of an error message doesn't seem useful + Self::Tier(_) => false, + Self::Tags(_) => false, + Self::Key(..) => false, + Self::StarlarkError(..) => false, + } + } + + #[cfg(test)] + pub(crate) fn assert_eq(&self, other: &Self) { + match (self, other) { + (ContextValue::Dyn(a), ContextValue::Dyn(b)) => { + assert_eq!(a, b); + } + (ContextValue::Tier(a), ContextValue::Tier(b)) => { + assert_eq!(a, b); + } + (ContextValue::Tags(a), ContextValue::Tags(b)) => { + assert_eq!(a, b); + } + (ContextValue::Typed(left), ContextValue::Typed(right)) => { + assert!(left.eq(&**right)) + } + (ContextValue::Key(a), ContextValue::Key(b)) => { + assert_eq!(a, b); + } + (ContextValue::StarlarkError(a), ContextValue::StarlarkError(b)) => { + assert_eq!(a, b); + } + (_, _) => panic!("context variants don't match!"), + } + } +} + +impl std::fmt::Display for ContextValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Dyn(v) => f.write_str(v), + Self::Tier(category) => write!(f, "{:?}", category), + Self::Tags(tags) => write!(f, "{:?}", tags), + Self::Typed(v) => std::fmt::Display::fmt(v, f), + Self::Key(v) => f.write_str(v), + Self::StarlarkError(v) => write!(f, "{}", v), + } + } +} + +impl From for ContextValue { + fn from(value: String) -> Self { + ContextValue::Dyn(value.into()) + } +} + +impl<'a> From<&'a str> for ContextValue { + fn from(value: &str) -> Self { + ContextValue::Dyn(value.into()) + } +} + +#[derive( + allocative::Allocative, + PartialEq, + Eq, + Copy, + Clone, + Debug, + PartialOrd, + Ord +)] +pub enum Tier { + Input, + Environment, + Tier0, +} + +impl Tier { + pub fn combine(self, other: Option) -> Tier { + let Some(other) = other else { return self }; + std::cmp::max(self, other) + } +} + +impl From for ContextValue { + fn from(value: Tier) -> Self { + ContextValue::Tier(value) + } +} + +pub trait TypedContext: + allocative::Allocative + Send + Sync + std::fmt::Display + std::any::Any + 'static +{ + fn eq(&self, other: &dyn TypedContext) -> bool; + + fn should_display(&self) -> bool { + true + } +} + +impl From for ContextValue { + fn from(value: T) -> Self { + ContextValue::Typed(Arc::new(value)) + } +} + +#[derive(Clone, allocative::Allocative, Debug, PartialEq, Eq, Hash)] +pub struct StarlarkContext { + // TODO(minglunli): We could take in the CallStack type and do some magic to make it look nicer + // but I think just concatenating the string form and it's accurate and look good enough + pub call_stack: String, + pub error_msg: String, + pub span: Option, +} + +impl StarlarkContext { + pub fn concat(&self, other: Option) -> Self { + if let Some(ctx) = other { + let trimmed = ctx + .call_stack + .trim_start_matches(starlark_syntax::call_stack::CALL_STACK_TRACEBACK_PREFIX); + let call_stack = format!("{}{}", self.call_stack, trimmed); + Self { + call_stack, + error_msg: ctx.error_msg.clone(), + span: ctx.span.clone(), + } + } else { + self.clone() + } + } +} + +impl std::fmt::Display for StarlarkContext { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let span = span_display( + self.span.as_ref().map(|s| s.as_ref()), + self.error_msg.as_str(), + false, + ); + write!(f, "{}\n{}", self.call_stack, span) + } +} + +#[cfg(test)] +mod tests { + use crate::Tier; + use crate::{self as buck2_error}; + + #[derive(buck2_error_derive::Error, Debug)] + #[error("test error")] + struct TestError; + + #[test] + fn test_category_not_in_formatting() { + let e: crate::Error = TestError.into(); + let e = e.context("foo"); + let e2 = e.clone().context(crate::Tier::Input); + assert_eq!(format!("{:#}", e), format!("{:#}", e2)); + } + + #[test] + fn test_category_infra_preferred() { + let e: crate::Error = TestError.into(); + let e = e + .clone() + .context(crate::Tier::Tier0) + .context(crate::Tier::Input); + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); + } + + #[test] + fn test_combine() { + assert_eq!(Tier::Input.combine(None), Tier::Input); + assert_eq!(Tier::Input.combine(Some(Tier::Input)), Tier::Input); + assert_eq!( + Tier::Input.combine(Some(Tier::Environment)), + Tier::Environment + ); + assert_eq!(Tier::Input.combine(Some(Tier::Tier0)), Tier::Tier0); + assert_eq!(Tier::Environment.combine(Some(Tier::Tier0)), Tier::Tier0); + assert_eq!(Tier::Tier0.combine(Some(Tier::Input)), Tier::Tier0); + } +} diff --git a/app/buck2_error/src/derive_tests.rs b/app/buck2_error/src/derive_tests.rs new file mode 100644 index 0000000000000..32ce318f45032 --- /dev/null +++ b/app/buck2_error/src/derive_tests.rs @@ -0,0 +1,207 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(test)] + +use crate as buck2_error; + +#[derive(buck2_error_derive::Error, Debug)] +#[error("foo")] +#[buck2(input)] +pub struct Error1; + +#[test] +fn test_derive_error1() { + let e: crate::Error = Error1.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Input)); + + let e: anyhow::Error = Error1.into(); + let e: crate::Error = e.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Input)); +} + +#[derive(buck2_error_derive::Error, Debug)] +#[error("foo")] +#[buck2(tier0)] +#[allow(unused)] +struct Error2((), ()); + +#[test] +fn test_derive_error2() { + let e: crate::Error = Error2((), ()).into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); +} + +#[derive(buck2_error_derive::Error, Debug)] +pub enum Error3 { + #[error("foo")] + #[buck2(input)] + VariantA, + #[error("bar")] + #[buck2(tier0)] + VariantB, + #[error("baz")] + VariantC, +} + +#[test] +fn test_derive_error3() { + let e: crate::Error = Error3::VariantA.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Input)); + + let e: crate::Error = Error3::VariantB.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); + + let e: crate::Error = Error3::VariantC.into(); + assert_eq!(e.get_tier(), None); +} + +#[derive(buck2_error_derive::Error, Debug)] +#[error("Generic error")] +pub struct GenericError(G); + +#[test] +fn test_generic_error() { + let _e: crate::Error = GenericError(42).into(); +} + +/// Test that no unused fields warning is emitted. +#[derive(buck2_error_derive::Error, Debug)] +#[error("Unused")] +pub struct WithField { + x: u8, +} + +#[test] +fn test_with_field() { + let _e: crate::Error = WithField { x: 42 }.into(); +} + +#[derive(buck2_error_derive::Error, Debug)] +#[error("Unused")] +struct NoAttrsStruct; + +#[derive(buck2_error_derive::Error, Debug)] +#[error("Unused")] +enum NoAttrsEnum { + Variant, +} + +#[test] +fn test_source_location_no_attrs() { + let e: crate::Error = NoAttrsStruct.into(); + assert_eq!( + e.source_location(), + Some("buck2_error/src/derive_tests.rs::NoAttrsStruct") + ); + let e: crate::Error = NoAttrsEnum::Variant.into(); + assert_eq!( + e.source_location(), + Some("buck2_error/src/derive_tests.rs::NoAttrsEnum::Variant") + ); +} + +#[derive(buck2_error_derive::Error, Debug)] +#[error("Unused")] +#[buck2(input)] +enum EnumWithTypeOption { + Variant, +} + +#[test] +fn test_enum_with_type_option() { + let e: crate::Error = EnumWithTypeOption::Variant.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Input)); + assert_eq!( + e.source_location(), + Some("buck2_error/src/derive_tests.rs::EnumWithTypeOption::Variant"), + ); +} + +#[derive(buck2_error_derive::Error, Debug)] +#[error("Unused")] +#[buck2(input)] +struct ErrorWithSpelledOutCategory; + +#[test] +fn test_error_with_spelled_out_category() { + let e: crate::Error = ErrorWithSpelledOutCategory.into(); + assert_eq!(e.get_tier(), Some(crate::Tier::Input)); +} + +#[test] +fn test_root_is_applied_conditionally() { + #[derive(buck2_error_derive::Error, Debug)] + #[error("Unused")] + struct WatchmanError; + + #[derive(buck2_error_derive::Error, Debug)] + #[error("Unused")] + #[buck2(tag = compute(self))] + enum MaybeWatchmanError { + Some(#[source] WatchmanError), + None, + } + + fn compute(x: &MaybeWatchmanError) -> Option { + match x { + MaybeWatchmanError::Some(_) => None, + MaybeWatchmanError::None => Some(crate::ErrorTag::AnyActionExecution), + } + } + + let e: crate::Error = MaybeWatchmanError::None.into(); + assert!(e.has_tag(crate::ErrorTag::AnyActionExecution)); + + let e: crate::Error = MaybeWatchmanError::Some(WatchmanError).into(); + assert!(e.tags().is_empty()); +} + +#[test] +fn test_error_tags() { + fn f(_: &TaggedError) -> Option { + Some(crate::ErrorTag::StarlarkFail) + } + + #[derive(buck2_error_derive::Error, Debug)] + #[error("Unused")] + #[buck2(tag = WatchmanTimeout)] + enum TaggedError { + #[buck2(tag = f(self))] + A, + #[buck2(tag = WatchmanTimeout)] + B, + } + + let a: crate::Error = TaggedError::A.into(); + assert_eq!( + &a.tags(), + &[ + crate::ErrorTag::StarlarkFail, + crate::ErrorTag::WatchmanTimeout + ] + ); + let b: crate::Error = TaggedError::B.into(); + assert_eq!(&b.tags(), &[crate::ErrorTag::WatchmanTimeout]); +} + +#[test] +fn test_correct_transparent() { + #[derive(buck2_error_derive::Error, Debug)] + #[error("Unused")] + #[buck2(tier0)] + struct E; + + #[derive(buck2_error_derive::Error, Debug)] + #[error(transparent)] + struct T(E); + + let t: crate::Error = T(E).into(); + assert_eq!(t.get_tier(), Some(crate::Tier::Tier0)); +} diff --git a/app/buck2_error/src/error.rs b/app/buck2_error/src/error.rs index a91d754e0e2ba..52d0149ab4ba8 100644 --- a/app/buck2_error/src/error.rs +++ b/app/buck2_error/src/error.rs @@ -7,16 +7,38 @@ * of this source tree. */ -use std::fmt::Display; +use std::error::Error as StdError; +use std::fmt; use std::sync::Arc; +use either::Either; +use itertools::Itertools; +use smallvec::SmallVec; + +use crate::__for_macro::AsDynError; +use crate::classify::best_tag; +use crate::classify::error_tag_category; +use crate::context_value::ContextValue; +use crate::context_value::StarlarkContext; +use crate::context_value::TypedContext; +use crate::format::into_anyhow_for_format; +use crate::root::ErrorRoot; +use crate::Tier; +use crate::UniqueRootId; + +pub type DynLateFormat = dyn Fn(&mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync + 'static; + /// The core error type provided by this crate. /// /// While this type has many of the features of `anyhow::Error`, in most places you should continue /// to use `anyhow`. This type is only expected to appear on a small number of APIs which require a /// clonable error. +/// +/// Unlike `anyhow::Error`, this type supports no downcasting. That is an intentional choice - +/// downcasting errors is fragile and becomes difficult to support in conjunction with anyhow +/// compatibility. #[derive(allocative::Allocative, Clone, dupe::Dupe)] -pub struct Error(pub(crate) Arc); +pub struct Error(pub(crate) Arc); /// The actual error representation. /// @@ -25,19 +47,337 @@ pub struct Error(pub(crate) Arc); /// /// Right now, this type can represent an error root, together with a stack of context information. #[derive(allocative::Allocative)] -pub(crate) enum ErrorImpl { +pub(crate) enum ErrorKind { + Root(Box), + /// For now we use untyped context to maximize compatibility with anyhow. + WithContext(ContextValue, Error), + /// Indicates that the error has been emitted, ie shown to the user. // This `Arc` should ideally be a `Box`. However, that doesn't work right now because of the // implementation of `into_anyhow_for_format`. - Root(#[allocative(skip)] Arc), - /// For now we use untyped context to maximize compatibility with anyhow. - WithContext( - #[allocative(skip)] Arc, - Error, - ), + #[allocative(skip)] // FIXME(JakobDegen): "Implementation is not general enough" + Emitted(Arc, Error), } impl Error { - pub fn new(e: E) -> Self { - Self(Arc::new(ErrorImpl::Root(Arc::new(e)))) + #[track_caller] + #[cold] + pub fn new(e: E) -> Self { + let source_location = + crate::source_location::from_file(std::panic::Location::caller().file(), None); + crate::any::recover_crate_error(&e, source_location) + } + + #[track_caller] + #[cold] + pub fn from_anyhow_ref(e: &anyhow::Error) -> Self { + let source_location = + crate::source_location::from_file(std::panic::Location::caller().file(), None); + crate::any::recover_crate_error(e.as_dyn_error(), source_location) + } + + fn iter_kinds<'a>(&'a self) -> impl Iterator { + let mut cur = Some(self); + std::iter::from_fn(move || { + let out = cur?; + match &*out.0 { + ErrorKind::WithContext(_, next) | ErrorKind::Emitted(_, next) => cur = Some(next), + ErrorKind::Root(_) => cur = None, + }; + Some(out.0.as_ref()) + }) + } + + fn root(&self) -> &ErrorRoot { + let Some(ErrorKind::Root(r)) = self.iter_kinds().last() else { + unreachable!() + }; + r + } + + pub fn action_error(&self) -> Option<&buck2_data::ActionError> { + self.root().action_error() + } + + pub(crate) fn iter_context<'a>(&'a self) -> impl Iterator { + self.iter_kinds().filter_map(|kind| match kind { + ErrorKind::WithContext(ctx, _) => Some(ctx), + _ => None, + }) + } + + pub fn mark_emitted(self, late_format: Arc) -> Self { + // Have to write this kind of weird to get the compiler to infer a higher ranked closure + Self(Arc::new(ErrorKind::Emitted(late_format, self))) + } + + /// If the error has not been emitted yet, returns `None`, otherwise `Some`. + /// + /// Most errors are only shown to the user once. However, some errors, specifically action + /// errors, are shown to the user twice: Once when the error occurs, and again at the end of the + /// build in the form of a short "Failed to build target" summary. + /// + /// After the error has been shown to the user for the first time, it is marked as emitted. The + /// late formatter that is returned here is what should be printed at the end of the build + pub fn is_emitted<'a>(&'a self) -> Option { + let (val, was_late_formatted) = into_anyhow_for_format(self, true); + if was_late_formatted { Some(val) } else { None } + } + + /// Only intended to be used for debugging, helps to understand the structure of the error + pub fn get_stack_for_debug(&self) -> String { + use fmt::Write; + let mut s = String::new(); + for kind in self.iter_kinds() { + match kind { + ErrorKind::Root(r) => { + writeln!(s, "ROOT:\n{:#?}", r).unwrap(); + } + ErrorKind::Emitted(_, _) => { + writeln!(s, "EMITTED").unwrap(); + } + ErrorKind::WithContext(ctx, _) => { + writeln!(s, "CONTEXT: {:#}", ctx).unwrap(); + } + } + } + s + } + + /// Identifier for deduplication during a build. + pub fn root_id(&self) -> UniqueRootId { + self.root().id() + } + + /// Stable identifier for grouping errors. + pub fn category_key(&self) -> String { + let tags = self.tags().into_iter().map(|tag| tag.as_str_name()); + + let key_values = self.iter_context().filter_map(|kind| match kind { + ContextValue::Key(val) => Some(val.to_string()), + _ => None, + }); + + let mut values = vec![self.source_location().unwrap_or("unknown_location")] + .into_iter() + .chain(tags) + .map(|s| s.to_owned()) + .chain(key_values); + + values.join(":").to_owned() + } + + pub fn source_location(&self) -> Option<&str> { + self.root().source_location() + } + + pub fn context>(self, context: C) -> Self { + Self(Arc::new(ErrorKind::WithContext(context.into(), self))) + } + + pub fn context_for_key(self, context: &str) -> Self { + Self(Arc::new(ErrorKind::WithContext( + ContextValue::Key(context.into()), + self, + ))) + } + + pub fn context_for_starlark_backtrace(self, context: StarlarkContext) -> Self { + Self(Arc::new(ErrorKind::WithContext( + ContextValue::StarlarkError(context), + self, + ))) + } + + #[cold] + #[track_caller] + pub(crate) fn new_anyhow_with_context>(e: E, c: C) -> anyhow::Error + where + Error: From, + { + crate::Error::from(e).context(c).into() + } + + pub fn tag(self, tags: impl IntoIterator) -> Self { + let tags = SmallVec::from_iter(tags); + if tags.is_empty() { + self + } else { + self.context(ContextValue::Tags(tags)) + } + } + + pub fn get_tier(&self) -> Option { + let mut out = None; + // TODO(nga): remove tiers marking and only rely on tags. + let context_tiers = self.iter_context().flat_map(|kind| match kind { + ContextValue::Tier(t) => Either::Left(Some(*t).into_iter()), + ContextValue::Tags(tags) => { + Either::Right(tags.iter().copied().filter_map(error_tag_category)) + } + _ => Either::Left(None.into_iter()), + }); + + for t in context_tiers { + // It's a tier0 error if it was ever marked as a tier0 error + match t { + Tier::Tier0 => return Some(t), + Tier::Environment => out = std::cmp::max(out, Some(t)), + Tier::Input => out = std::cmp::max(out, Some(t)), + } + } + out + } + + /// All tags unsorted and with duplicates. + fn tags_unsorted(&self) -> impl Iterator + '_ { + self.iter_context() + .filter_map(|kind| match kind { + ContextValue::Tags(tags) => Some(tags.iter().copied()), + _ => None, + }) + .flatten() + } + + pub fn find_typed_context(&self) -> Option> { + self.iter_context().find_map(|kind| match kind { + ContextValue::Typed(v) => { + if let Ok(typed) = Arc::downcast(v.clone()) { + Some(typed) + } else { + None + } + } + _ => None, + }) + } + + /// Get all the tags that have been added to this error + pub fn tags(&self) -> Vec { + let mut tags: Vec<_> = self.tags_unsorted().collect(); + tags.sort_unstable_by_key(|tag| tag.as_str_name()); + tags.dedup(); + tags + } + + /// The most interesting tag among this error tags. + pub fn best_tag(&self) -> Option { + best_tag(self.tags_unsorted()) + } + + pub fn has_tag(&self, tag: crate::ErrorTag) -> bool { + self.tags_unsorted().any(|t| t == tag) + } + + pub(crate) fn compute_context< + TC: TypedContext, + C1: Into, + C2: Into, + F: FnOnce(Arc) -> C1, + F2: FnOnce() -> C2, + >( + self, + map_context: F, + new_context: F2, + ) -> anyhow::Error { + if let ErrorKind::WithContext(crate::context_value::ContextValue::Typed(v), err) = &*self.0 + { + if let Ok(typed) = Arc::downcast(v.clone()) { + return Self(Arc::new(ErrorKind::WithContext( + map_context(typed).into(), + err.clone(), + ))) + .into(); + } + } + self.context(new_context()).into() + } + + #[cfg(test)] + pub(crate) fn check_equal(mut a: &Self, mut b: &Self) { + loop { + match (&*a.0, &*b.0) { + (ErrorKind::Root(a), ErrorKind::Root(b)) => { + // Avoid comparing vtable pointers + assert!(a.test_equal(b)); + return; + } + ( + ErrorKind::WithContext(a_context, a_inner), + ErrorKind::WithContext(b_context, b_inner), + ) => { + a_context.assert_eq(b_context); + a = a_inner; + b = b_inner; + } + (ErrorKind::Emitted(_, a_inner), ErrorKind::Emitted(_, b_inner)) => { + a = a_inner; + b = b_inner; + } + (_, _) => { + panic!("Left side did not match right: {:?} {:?}", a, b) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use crate::Tier; + + #[derive(Debug, thiserror::Error)] + #[error("Test")] + struct TestError; + + #[test] + fn test_emitted_works() { + let e: crate::Error = TestError.into(); + assert!(e.is_emitted().is_none()); + let e = e.mark_emitted(Arc::new(|_| Ok(()))); + assert!(e.is_emitted().is_some()); + let e: anyhow::Error = e.into(); + let e: crate::Error = e.context("context").into(); + assert!(e.is_emitted().is_some()); + } + + #[test] + fn test_root_id() { + let e1: crate::Error = TestError.into(); + let e1x = e1.clone().context("context"); + let e1y = e1.clone().context("context2"); + + let e2: crate::Error = TestError.into(); + + assert_eq!(e1.root_id(), e1x.root_id()); + assert_eq!(e1.root_id(), e1y.root_id()); + assert_eq!(e1x.root_id(), e1y.root_id()); + + assert_ne!(e1.root_id(), e2.root_id()); + } + + #[test] + fn test_get_tier() { + let e: crate::Error = crate::Error::new(TestError) + .context(Tier::Tier0) + .context(Tier::Environment); + assert_eq!(e.get_tier(), Some(Tier::Tier0)); + let e: crate::Error = crate::Error::new(TestError) + .context(Tier::Environment) + .context(Tier::Input); + assert_eq!(e.get_tier(), Some(Tier::Environment)); + } + + #[test] + fn test_category_key() { + let err: crate::Error = TestError.into(); + assert_eq!(err.category_key(), err.source_location().unwrap()); + + let err = err.tag([crate::ErrorTag::Analysis]); + assert_eq!( + err.category_key(), + format!("{}:{}", err.source_location().unwrap(), "ANALYSIS") + ); } } diff --git a/app/buck2_error/src/format.rs b/app/buck2_error/src/format.rs index 5d83046cc8e48..91cb85d1dd867 100644 --- a/app/buck2_error/src/format.rs +++ b/app/buck2_error/src/format.rs @@ -7,11 +7,16 @@ * of this source tree. */ -use std::fmt::Debug; -use std::fmt::Display; +use std::error::Error as StdError; +use std::fmt; use std::sync::Arc; -use crate::error::ErrorImpl; +use dupe::Dupe; + +use crate::__for_macro::ContextValue; +use crate::context_value::StarlarkContext; +use crate::error::ErrorKind; +use crate::DynLateFormat; /// We currently implement formatting in the laziest way possible - we convert to an equivalent /// `anyhow::Error` and format that. @@ -19,99 +24,101 @@ use crate::error::ErrorImpl; /// In the long term, this is not what we want to do. Writing our own error formatter is not that /// hard and will give us a huge amount of flexibility. However, the goal right now is to get large /// amounts of `anyhow` compatibility with minimal work, and this achieves that. -fn into_anyhow_for_format(mut error: &crate::Error) -> anyhow::Error { +/// +/// If `should_late_format` is set, attempts to use the late formatter instead of the standard one. +/// That might not be present, so additionally returns a bool which indicates whether the late +/// formatter was used. +pub(crate) fn into_anyhow_for_format( + mut error: &crate::Error, + should_late_format: bool, +) -> (anyhow::Error, bool) { let mut context_stack = Vec::new(); + let mut was_late_formatted = false; let base = loop { match error.0.as_ref() { - ErrorImpl::Root(root) => break Arc::clone(root), - ErrorImpl::WithContext(context, inner) => { - context_stack.push(context.clone()); + ErrorKind::Root(root) => { + break AnyhowWrapperForFormat::Root(root.description().to_owned()); + } + ErrorKind::WithContext(context, inner) => { + context_stack.push(context); + error = inner; + } + ErrorKind::Emitted(late_format, inner) => { + if should_late_format { + was_late_formatted = true; + break AnyhowWrapperForFormat::LateFormat(late_format.dupe()); + } error = inner; } } }; + let mut starlark_error: Option = None; let mut out: anyhow::Error = base.into(); for context in context_stack.into_iter().rev() { - out = out.context(context); - } - out -} - -impl Debug for crate::Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Debug::fmt(&into_anyhow_for_format(self), f) + if let ContextValue::StarlarkError(ctx) = context { + // Because context_stack is reversed, the right ordering would be first error last to preserve stack ordering + starlark_error = Some(ctx.concat(starlark_error)); + continue; + } + if let Some(ctx) = starlark_error { + out = out.context(format!("{}", ctx)); + starlark_error = None; + } + if context.should_display() { + out = out.context(format!("{}", context)); + } } -} -impl Display for crate::Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Display::fmt(&into_anyhow_for_format(self), f) + if let Some(ctx) = starlark_error { + out = out.context(format!("{}", ctx)); } + (out, was_late_formatted) } -#[cfg(test)] -mod tests { - use crate as buck2_error; +// Keep 3 variables +// backtrace string which just continuously concatenates +// error message which is the first error message since stack is reversed so first error is the right one +// span which is the same as error message - #[derive(Debug, thiserror::Error)] - #[error("test error")] - struct TestError; - - fn trim_rust_backtrace(error: &str) -> &str { - match error.find("\nStack backtrace:") { - Some(pos) => error[..pos].trim_end(), - None => error.trim_end(), - } +impl fmt::Debug for crate::Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&into_anyhow_for_format(self, false).0, f) } +} - fn assert_eq_no_backtrace, U: AsRef>(a: T, b: U) { - assert_eq!( - trim_rust_backtrace(a.as_ref()), - trim_rust_backtrace(b.as_ref()) - ); +impl fmt::Display for crate::Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&into_anyhow_for_format(self, false).0, f) } +} - #[test] - fn test_shows_context() { - let e = buck2_error::Error::from(TestError) - .context("context 1") - .context("context 2"); - assert_eq_no_backtrace( - format!("{:?}", e), - r#"context 2 +enum AnyhowWrapperForFormat { + Root(String), + LateFormat(Arc), +} -Caused by: - 0: context 1 - 1: test error"#, - ); +impl fmt::Debug for AnyhowWrapperForFormat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Root(root) => fmt::Display::fmt(root, f), + Self::LateFormat(late_format) => late_format(f), + } } +} - #[test] - fn test_shows_anyhow_context() { - // This context can't be understood by `buck2_error` - let e = anyhow::Error::from(TestError).context("context 1"); - let e = buck2_error::Error::from(e).context("context 2"); - assert_eq_no_backtrace( - format!("{:?}", e), - r#"context 2 - -Caused by: - 0: context 1 - 1: test error"#, - ); +impl fmt::Display for AnyhowWrapperForFormat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Root(root) => fmt::Display::fmt(root, f), + Self::LateFormat(late_format) => late_format(f), + } } +} - #[test] - fn test_after_anyhow_conversion() { - let e = buck2_error::Error::from(TestError).context("context"); - let e2 = anyhow::Error::from(e.clone()); - assert_eq_no_backtrace(format!("{}", e), format!("{}", e2)); - assert_eq_no_backtrace(format!("{:?}", e), format!("{:?}", e2)); - - let e3 = buck2_error::Error::from(e2); - assert_eq_no_backtrace(format!("{}", e), format!("{}", e3)); - assert_eq_no_backtrace(format!("{:?}", e), format!("{:?}", e3)); +impl StdError for AnyhowWrapperForFormat { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + None } } diff --git a/app/buck2_error/src/lib.rs b/app/buck2_error/src/lib.rs index ebf25c0d9e61d..abb73edeb28f6 100644 --- a/app/buck2_error/src/lib.rs +++ b/app/buck2_error/src/lib.rs @@ -7,30 +7,66 @@ * of this source tree. */ +#![feature(error_generic_member_access)] +#![feature(let_chains)] +#![feature(trait_alias)] +#![feature(trait_upcasting)] + mod any; +pub mod classify; mod context; +mod context_value; +mod derive_tests; mod error; mod format; +pub mod macros; +mod root; +mod source_location; +pub mod starlark_error; + +use std::error::Request; -pub use any::AnyError; -pub use context::Context; +pub use context::AnyhowContextForError; +pub use context::BuckErrorContext; +/// A piece of metadata to indicate whether this error is an infra or user error. +/// +/// You can attach this to an error by passing it to the [`Error::context`] method. Alternatively, +/// you can call [`.input()`](`crate::BuckErrorContext::input_anyhow`) or +/// [`.tier0()`](`crate::BuckErrorContext::tier0_anyhow`) on a [`buck2_error::Result`][`Result`]. +/// +/// The category is fundamentally closed - the expectation is that it will not grow new variants in +/// the future. +#[doc(inline)] +pub use context_value::Tier; +pub use context_value::TypedContext; +pub use error::DynLateFormat; pub use error::Error; +pub use root::UniqueRootId; pub type Result = std::result::Result; +/// Allows simpler construction of the Ok case when the result type can't be inferred. +#[allow(non_snake_case)] +pub fn Ok(t: T) -> Result { + Result::Ok(t) +} + +/// See the documentation in the `error.proto` file for details. +pub use buck2_data::error::ErrorTag; /// Generates an error impl for the type. /// /// This macro is a drop-in replacement for [`thiserror::Error`]. In the near future, all uses of /// `thiserror` in `buck2/app` will be replaced with this macro. /// /// Currently, the only distinction from `thiserror::Error` is that an additional impl of -/// [`AnyError`] is generated for the type, which makes some of the interactions with `buck2_error` more +/// `AnyError` is generated for the type, which makes some of the interactions with `buck2_error` more /// ergonomic. In the future, this macro will also be used to be able to annotate errors with /// additional structured context information. /// /// ## Example /// /// ```rust +/// # #![feature(error_generic_member_access)] /// #[derive(Debug, buck2_error::Error)] /// #[error("My error type")] /// struct MyError; @@ -39,10 +75,58 @@ pub type Result = std::result::Result; /// assert_eq!(&format!("{}", e), "My error type"); /// ``` #[doc(inline)] -pub use buck2_error_derive::ErrorForReexport as Error; +pub use buck2_error_derive::Error; + +use crate::any::ProvidableMetadata; + +/// Provide metadata about an error. +/// +/// This is a manual alternative to deriving `buck2_error::Error`, which should be preferred if at +/// all possible. This function has a pretty strict contract: You must call it within the `provide` +/// implementation for an error type `E`, and must pass `E` as the type parameter. +/// +/// The `source_file` should just be `std::file!()`; the `source_location_extra` should be the type +/// - and possibly variant - name, formatted as either `Type` or `Type::Variant`. +pub fn provide_metadata<'a, 'b>( + request: &'b mut Request<'a>, + category: Option, + tags: impl IntoIterator, + source_file: &'static str, + source_location_extra: Option<&'static str>, + action_error: Option, +) { + let metadata = ProvidableMetadata { + action_error, + category, + tags: tags.into_iter().collect(), + source_file, + source_location_extra, + }; + Request::provide_value(request, metadata); +} #[doc(hidden)] pub mod __for_macro { - pub use buck2_error_derive::exterminate; + use std::error::Error as StdError; + + pub use anyhow; pub use thiserror; + + pub use crate::context_value::ContextValue; + + pub trait AsDynError { + fn as_dyn_error<'a>(&'a self) -> &'a (dyn StdError + 'static); + } + + impl AsDynError for dyn StdError + Sync + Send + 'static { + fn as_dyn_error<'a>(&'a self) -> &'a (dyn StdError + 'static) { + self + } + } + + impl AsDynError for T { + fn as_dyn_error<'a>(&'a self) -> &'a (dyn StdError + 'static) { + self + } + } } diff --git a/app/buck2_error/src/macros.rs b/app/buck2_error/src/macros.rs new file mode 100644 index 0000000000000..5251ae7b04c6b --- /dev/null +++ b/app/buck2_error/src/macros.rs @@ -0,0 +1,86 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Arguments; + +#[doc(hidden)] +#[cold] +#[track_caller] +pub fn buck2_error_impl(tags: &[crate::ErrorTag], args: Arguments) -> crate::Error { + let anyhow_error = anyhow::anyhow!("{args}"); + let error = crate::Error::from(anyhow_error).tag(tags.iter().copied()); + error +} + +#[doc(hidden)] +#[cold] +#[track_caller] +pub fn internal_error_impl(args: Arguments) -> crate::Error { + buck2_error_impl( + &[crate::ErrorTag::InternalError], + format_args!("{args} (internal error)"), + ) +} + +#[macro_export] +macro_rules! buck2_error { + ($tags:expr, $format:expr) => { + buck2_error!($tags, $format,) + }; + ($tags:expr, $format:expr, $($arg:tt)*) => { + $crate::macros::buck2_error_impl(&$tags, format_args!($format, $($arg)*)) + }; +} + +/// Indicates a bug in buck2. +#[macro_export] +macro_rules! internal_error { + ($format:expr) => { + internal_error!($format,) + }; + ($format:expr , $($arg:tt)*) => { + $crate::macros::internal_error_impl(format_args!($format, $($arg)*)) + }; +} + +#[doc(hidden)] +#[cold] +#[track_caller] +pub fn buck2_error_anyhow_impl(tags: &[crate::ErrorTag], args: Arguments) -> anyhow::Error { + let error = buck2_error_impl(tags, args); + anyhow::Error::from(error) +} + +#[doc(hidden)] +#[cold] +#[track_caller] +pub fn internal_error_anyhow_impl(args: Arguments) -> anyhow::Error { + let error = internal_error_impl(args); + anyhow::Error::from(error) +} + +#[macro_export] +macro_rules! buck2_error_anyhow { + ($tags:expr, $format:expr) => { + buck2_error_anyhow!($tags, $format,) + }; + ($tags:expr, $format:expr, $($arg:tt)*) => { + $crate::macros::buck2_error_anyhow_impl(&$tags, format_args!($format, $($arg)*)) + }; + } + +#[macro_export] +macro_rules! internal_error_anyhow { + ($format:expr) => { + internal_error_anyhow!($format,) + }; + ($format:expr , $($arg:tt)*) => { + $crate::macros::internal_error_anyhow_impl(format_args!($format, $($arg)*)) + }; + } diff --git a/app/buck2_error/src/root.rs b/app/buck2_error/src/root.rs new file mode 100644 index 0000000000000..c94a970581b40 --- /dev/null +++ b/app/buck2_error/src/root.rs @@ -0,0 +1,82 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; + +static NEXT_ROOT_ID: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0); + +/// Uniquely identifies an instance of an error root +/// +/// This can be used to deduplicate errors, ie determine that they are caused by the same thing. +/// +/// Note that while this type implements `Hash` and `Debug`, the behavior of both implementations is +/// unstable across executions, and one should be careful not to cause non-determinism with it. +#[derive( + allocative::Allocative, + Copy, + Clone, + Debug, + dupe::Dupe, + PartialEq, + Eq, + Hash +)] +pub struct UniqueRootId(u64); + +#[derive(allocative::Allocative)] +pub(crate) struct ErrorRoot { + id: UniqueRootId, + description: String, + source_location: Option, + action_error: Option, +} + +impl ErrorRoot { + pub(crate) fn new( + description: String, + source_location: Option, + action_error: Option, + ) -> Self { + let id = UniqueRootId(NEXT_ROOT_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed)); + Self { + id, + description, + source_location, + action_error, + } + } + + pub(crate) fn description(&self) -> &str { + self.description.as_ref() + } + + /// Equality comparison for use in tests only + #[cfg(test)] + pub(crate) fn test_equal(&self, other: &Self) -> bool { + self.description == other.description + } + + pub(crate) fn id(&self) -> UniqueRootId { + self.id + } + + pub(crate) fn source_location(&self) -> Option<&str> { + self.source_location.as_deref() + } + + pub fn action_error(&self) -> Option<&buck2_data::ActionError> { + self.action_error.as_ref() + } +} + +impl fmt::Debug for ErrorRoot { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.description, f) + } +} diff --git a/app/buck2_error/src/source_location.rs b/app/buck2_error/src/source_location.rs new file mode 100644 index 0000000000000..0c1bccc2dc506 --- /dev/null +++ b/app/buck2_error/src/source_location.rs @@ -0,0 +1,154 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +/// Converts a file path returned by `file!` or `Location::file()` to a value suitable for use as a +/// `source_location`. +/// +/// The extra parameter, if present, will be appended to the end of the path. +/// +/// May return `None` if the path is not in `buck2/app`. +pub(crate) fn from_file(path: &str, extra: Option<&str>) -> Option { + // The path is passed in as a host path, not a target path. So we need to manually standardize + // the path separators + let path: String = path + .chars() + .map(|c| if c == '\\' { '/' } else { c }) + .collect(); + // `buck2_error` should only be used within `buck2/app`, giving us a nice way to make sure we + // strip any leading parts of the path we don't want. + let (_, path) = path.split_once("app/")?; + + let extra_delimiter = if extra.is_some() { "::" } else { "" }; + + Some(format!( + "{}{}{}", + path, + extra_delimiter, + extra.unwrap_or("") + )) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_this_file() { + assert_eq!( + from_file(file!(), None).as_deref(), + Some("buck2_error/src/source_location.rs"), + ); + + assert_eq!( + from_file(file!(), Some("Type::Variant")).as_deref(), + Some("buck2_error/src/source_location.rs::Type::Variant"), + ); + } + + #[test] + fn test_windows_path() { + assert_eq!( + from_file( + r"C:\whatever\repo\buck2\app\buck2_error\src\source_location.rs", + None + ) + .as_deref(), + Some("buck2_error/src/source_location.rs"), + ); + } + + #[derive(Debug, thiserror::Error)] + #[error("My error")] + struct MyError; + + #[test] + fn test_via_error_new() { + let err: crate::Error = crate::Error::new(MyError); + assert_eq!( + err.source_location(), + Some("buck2_error/src/source_location.rs"), + ); + } + + #[test] + fn test_via_anyhow_from() { + let err: anyhow::Error = anyhow::Error::new(MyError); + let err: crate::Error = crate::Error::from(err); + assert_eq!( + err.source_location(), + Some("buck2_error/src/source_location.rs"), + ); + } + + #[test] + fn test_via_anyhow_into() { + let err: anyhow::Error = anyhow::Error::new(MyError); + let err: crate::Error = err.into(); + // This doesn't work because the `#[track_caller]` location points to the body of the + // `impl Into for T` in std. This is not really fixable with this approach. + // Update: Since 1.77.1 this now has a value not `None`. + assert_eq!( + err.source_location(), + Some("buck2_error/src/source_location.rs") + ); + } + + #[test] + fn test_via_try() { + fn foo() -> crate::Result<()> { + Err::<(), _>(MyError)?; + Ok(()) + } + + let err = foo().unwrap_err(); + assert_eq!( + err.source_location(), + Some("buck2_error/src/source_location.rs"), + ); + } + + #[test] + fn test_via_context() { + use crate::BuckErrorContext; + + let e: anyhow::Error = Err::<(), _>(MyError) + .buck_error_context_anyhow("foo") + .unwrap_err(); + let e: crate::Error = e.into(); + assert_eq!( + e.source_location(), + Some("buck2_error/src/source_location.rs"), + ); + + let e: anyhow::Error = Err::<(), _>(MyError).input_anyhow().unwrap_err(); + let e: crate::Error = e.into(); + assert_eq!( + e.source_location(), + Some("buck2_error/src/source_location.rs"), + ); + } + + #[test] + fn test_via_derive() { + use crate::derive_tests::Error1; + use crate::derive_tests::Error3; + + let e: crate::Error = Error1.into(); + assert_eq!( + e.source_location(), + Some("buck2_error/src/derive_tests.rs::Error1") + ); + + let e: crate::Error = Error3::VariantB.into(); + assert_eq!( + e.source_location(), + Some("buck2_error/src/derive_tests.rs::Error3::VariantB") + ); + } +} diff --git a/app/buck2_error/src/starlark_error.rs b/app/buck2_error/src/starlark_error.rs new file mode 100644 index 0000000000000..54580fe264bb7 --- /dev/null +++ b/app/buck2_error/src/starlark_error.rs @@ -0,0 +1,246 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Starlark::Error <-> buck2_error::Error conversion implementation. + +use std::fmt; +use std::sync::Arc; + +use ref_cast::RefCast; + +use crate::any::recover_crate_error; +use crate::context_value::StarlarkContext; +use crate::error::ErrorKind; +use crate::root::ErrorRoot; + +impl From for starlark_syntax::Error { + fn from(e: crate::Error) -> starlark_syntax::Error { + let error = Into::into(StarlarkErrorWrapper(e)); + let error_kind = starlark_syntax::ErrorKind::Native(error); + starlark_syntax::Error::new_kind(error_kind) + } +} + +/// Whether or not to mark a starlark error as an input/user error. +/// TODO(minglunli): This is used as an intermediate step so Native errors are categorized. +/// This is not 100% accurate and should be remove once Native errors are categorized properly +#[derive(PartialEq)] +pub enum NativeErrorHandling { + InputError, + Unknown, +} + +// Need to do this for now instead `impl From for crate::Error` +// Otherwise it will conflict with `From for crate::Error` impl in any.rs +// This will change once the `anyhow` migration is complete at which point we can update this +#[cold] +pub fn from_starlark(e: starlark_syntax::Error) -> crate::Error { + from_starlark_impl(e, NativeErrorHandling::InputError, false) +} + +#[cold] +pub fn from_starlark_with_options( + e: starlark_syntax::Error, + error_handling: NativeErrorHandling, + skip_stacktrace: bool, +) -> crate::Error { + from_starlark_impl(e, error_handling, skip_stacktrace) +} + +fn from_starlark_impl( + e: starlark_syntax::Error, + error_handling: NativeErrorHandling, + skip_stacktrace: bool, +) -> crate::Error { + if let starlark_syntax::ErrorKind::Native(err) = e.kind() { + if let Some(wrapper) = err.downcast_ref::() { + if !e.has_diagnostic() { + return wrapper.0.clone(); + } + + let starlark_context = StarlarkContext { + call_stack: format!("{}", e.call_stack()), + error_msg: format!("{}", e.without_diagnostic()), + span: e.span().cloned(), + }; + + let buck2_err = wrapper + .0 + .clone() + .context_for_starlark_backtrace(starlark_context); + + return buck2_err; + } + }; + + let category = match e.kind() { + starlark_syntax::ErrorKind::Fail(_) + | starlark_syntax::ErrorKind::StackOverflow(_) + | starlark_syntax::ErrorKind::Value(_) + | starlark_syntax::ErrorKind::Function(_) + | starlark_syntax::ErrorKind::Scope(_) + | starlark_syntax::ErrorKind::Parser(_) => Some(crate::Tier::Input), + starlark_syntax::ErrorKind::Internal(_) => Some(crate::Tier::Tier0), + starlark_syntax::ErrorKind::Native(_) | starlark_syntax::ErrorKind::Other(_) + if error_handling == NativeErrorHandling::InputError => + { + Some(crate::Tier::Input) + } + _ => None, + }; + let tags = match e.kind() { + starlark_syntax::ErrorKind::Fail(_) => vec![crate::ErrorTag::StarlarkFail], + starlark_syntax::ErrorKind::StackOverflow(_) => { + vec![crate::ErrorTag::StarlarkStackOverflow] + } + _ => vec![crate::ErrorTag::AnyStarlarkEvaluation], + }; + + let variant_name = match e.kind() { + starlark_syntax::ErrorKind::Fail(_) => "StarlarkError::Fail", + starlark_syntax::ErrorKind::StackOverflow(_) => "StarlarkError::StackOverflow", + starlark_syntax::ErrorKind::Internal(_) => "StarlarkError::Internal", + starlark_syntax::ErrorKind::Value(_) => "StarlarkError::Value", + starlark_syntax::ErrorKind::Function(_) => "StarlarkError::Function", + starlark_syntax::ErrorKind::Scope(_) => "StarlarkError::Scope", + starlark_syntax::ErrorKind::Parser(_) => "StarlarkError::Parser", + starlark_syntax::ErrorKind::Native(_) => "StarlarkError::Native", + _ => "StarlarkError", + }; + let source_location = crate::source_location::from_file(std::file!(), Some(variant_name)); + let description = if skip_stacktrace { + format!("{}", e.without_diagnostic()) + } else { + format!("{}", e) + }; + + let mut e = match e.into_kind() { + starlark_syntax::ErrorKind::Fail(e) + | starlark_syntax::ErrorKind::StackOverflow(e) + | starlark_syntax::ErrorKind::Internal(e) + | starlark_syntax::ErrorKind::Value(e) + | starlark_syntax::ErrorKind::Function(e) + | starlark_syntax::ErrorKind::Scope(e) + | starlark_syntax::ErrorKind::Parser(e) + | starlark_syntax::ErrorKind::Other(e) + | starlark_syntax::ErrorKind::Native(e) => { + let error: anyhow::Error = Into::into(BuckStarlarkError(e, description)); + let std_err: &'_ (dyn std::error::Error + 'static) = error.as_ref(); + + recover_crate_error(std_err, source_location) + } + _ => crate::Error(Arc::new(ErrorKind::Root(Box::new(ErrorRoot::new( + description, + source_location, + None, + ))))), + }; + + e = e.tag(tags); + if let Some(tier) = category { + e.context(tier) + } else { + e + } +} + +pub(crate) struct BuckStarlarkError(pub(crate) anyhow::Error, String); + +impl fmt::Debug for BuckStarlarkError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.1) + } +} + +impl fmt::Display for BuckStarlarkError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.1) + } +} + +impl std::error::Error for BuckStarlarkError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + self.0.source() + } + + fn provide<'a>(&'a self, request: &mut std::error::Request<'a>) { + self.0.provide(request); + } +} + +#[derive(derive_more::Display, RefCast)] +#[repr(transparent)] +struct StarlarkErrorWrapper(crate::Error); + +impl fmt::Debug for StarlarkErrorWrapper { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } +} + +impl std::error::Error for StarlarkErrorWrapper {} + +#[cfg(test)] +mod tests { + use std::error::Request; + + use super::from_starlark; + use crate::any::ProvidableMetadata; + + #[derive(Debug, derive_more::Display)] + struct FullMetadataError; + + impl std::error::Error for FullMetadataError { + fn provide<'a>(&'a self, request: &mut Request<'a>) { + request.provide_value(ProvidableMetadata { + action_error: None, + source_file: file!(), + source_location_extra: Some("FullMetadataError"), + tags: vec![ + crate::ErrorTag::WatchmanTimeout, + crate::ErrorTag::StarlarkFail, + ], + category: Some(crate::Tier::Tier0), + }); + } + } + + #[test] + fn test_roundtrip_starlark() { + // Tests buck2_error->starlark->buck2_error + let e = crate::Error::new(FullMetadataError).context("context 1"); + let e2 = from_starlark(starlark_syntax::Error::from(e.clone())); + crate::Error::check_equal(&e, &e2); + } + + #[test] + fn test_metadata_roundtrip_with_anyhow() { + // Tests buck2_error->anyhow->starlark->buck2_error + let e = crate::Error::new(FullMetadataError); + let e = e.context("test context 123"); + let e: anyhow::Error = e.into(); + let e: starlark_syntax::Error = e.into(); + let e = from_starlark(e); + + assert_eq!(e.get_tier(), Some(crate::Tier::Tier0)); + assert!(format!("{:?}", e).contains("test context 123")); + assert_eq!( + e.source_location(), + Some("buck2_error/src/starlark_error.rs::FullMetadataError") + ); + assert_eq!( + &e.tags(), + &[ + crate::ErrorTag::AnyStarlarkEvaluation, + crate::ErrorTag::StarlarkFail, + crate::ErrorTag::WatchmanTimeout + ] + ); + } +} diff --git a/app/buck2_error_derive/BUCK b/app/buck2_error_derive/BUCK index 7a3cfdd076dc6..7306fe7d3675a 100644 --- a/app/buck2_error_derive/BUCK +++ b/app/buck2_error_derive/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("buck2") @@ -10,6 +9,7 @@ rust_library( ), proc_macro = True, deps = [ + "fbsource//third-party/rust:proc-macro2", "fbsource//third-party/rust:quote", "fbsource//third-party/rust:syn", ], diff --git a/app/buck2_error_derive/Cargo.toml b/app/buck2_error_derive/Cargo.toml index ec28b99cfe1a7..caf6bfc3286f5 100644 --- a/app/buck2_error_derive/Cargo.toml +++ b/app/buck2_error_derive/Cargo.toml @@ -1,10 +1,12 @@ [package] -name = "buck2_error_derive" -version = "0.1.0" -edition = "2021" description = """ -Macros used by buck2_error +Fork of thiserror used by buck2_error """ +edition = "2021" +license = { workspace = true } +name = "buck2_error_derive" +repository = { workspace = true } +version = "0.1.0" [lib] proc-macro = true diff --git a/app/buck2_error_derive/LICENSE-APACHE b/app/buck2_error_derive/LICENSE-APACHE new file mode 100644 index 0000000000000..1b5ec8b78e237 --- /dev/null +++ b/app/buck2_error_derive/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/app/buck2_error_derive/LICENSE-MIT b/app/buck2_error_derive/LICENSE-MIT new file mode 100644 index 0000000000000..31aa79387f27e --- /dev/null +++ b/app/buck2_error_derive/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/app/buck2_error_derive/src/ast.rs b/app/buck2_error_derive/src/ast.rs new file mode 100644 index 0000000000000..48003e5c26ef8 --- /dev/null +++ b/app/buck2_error_derive/src/ast.rs @@ -0,0 +1,186 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. + +use proc_macro2::Span; +use syn::Data; +use syn::DataEnum; +use syn::DataStruct; +use syn::DeriveInput; +use syn::Error; +use syn::Fields; +use syn::Generics; +use syn::Ident; +use syn::Index; +use syn::Member; +use syn::Result; +use syn::Type; + +use crate::attr; +use crate::attr::Attrs; +use crate::generics::ParamsInScope; + +pub enum Input<'a> { + Struct(Struct<'a>), + Enum(Enum<'a>), +} + +pub struct Struct<'a> { + pub original: &'a DeriveInput, + pub attrs: Attrs<'a>, + pub ident: Ident, + pub generics: &'a Generics, + pub fields: Vec>, +} + +pub struct Enum<'a> { + pub original: &'a DeriveInput, + pub attrs: Attrs<'a>, + pub ident: Ident, + pub generics: &'a Generics, + pub variants: Vec>, +} + +pub struct Variant<'a> { + pub original: &'a syn::Variant, + pub attrs: Attrs<'a>, + pub ident: Ident, + pub fields: Vec>, +} + +pub struct Field<'a> { + pub original: &'a syn::Field, + pub attrs: Attrs<'a>, + pub member: Member, + pub ty: &'a Type, + pub contains_generic: bool, +} + +impl<'a> Input<'a> { + pub fn from_syn(node: &'a DeriveInput) -> Result { + match &node.data { + Data::Struct(data) => Struct::from_syn(node, data).map(Input::Struct), + Data::Enum(data) => Enum::from_syn(node, data).map(Input::Enum), + Data::Union(_) => Err(Error::new_spanned( + node, + "union as errors are not supported", + )), + } + } +} + +impl<'a> Struct<'a> { + fn from_syn(node: &'a DeriveInput, data: &'a DataStruct) -> Result { + let mut attrs = attr::get(&node.attrs)?; + let scope = ParamsInScope::new(&node.generics); + let span = attrs.span().unwrap_or_else(Span::call_site); + let fields = Field::multiple_from_syn(&data.fields, &scope, span)?; + if let Some(display) = &mut attrs.display { + display.expand_shorthand(&fields); + } + Ok(Struct { + original: node, + attrs, + ident: node.ident.clone(), + generics: &node.generics, + fields, + }) + } +} + +impl<'a> Enum<'a> { + fn from_syn(node: &'a DeriveInput, data: &'a DataEnum) -> Result { + let attrs = attr::get(&node.attrs)?; + let scope = ParamsInScope::new(&node.generics); + let span = attrs.span().unwrap_or_else(Span::call_site); + let variants = data + .variants + .iter() + .map(|node| { + let mut variant = Variant::from_syn(node, &scope, span)?; + if let display @ None = &mut variant.attrs.display { + *display = attrs.display.clone(); + } + if let Some(display) = &mut variant.attrs.display { + display.expand_shorthand(&variant.fields); + } else if variant.attrs.transparent.is_none() { + variant.attrs.transparent = attrs.transparent; + } + Ok(variant) + }) + .collect::>()?; + Ok(Enum { + original: node, + attrs, + ident: node.ident.clone(), + generics: &node.generics, + variants, + }) + } +} + +impl<'a> Variant<'a> { + fn from_syn(node: &'a syn::Variant, scope: &ParamsInScope<'a>, span: Span) -> Result { + let attrs = attr::get(&node.attrs)?; + let span = attrs.span().unwrap_or(span); + Ok(Variant { + original: node, + attrs, + ident: node.ident.clone(), + fields: Field::multiple_from_syn(&node.fields, scope, span)?, + }) + } +} + +impl<'a> Field<'a> { + fn multiple_from_syn( + fields: &'a Fields, + scope: &ParamsInScope<'a>, + span: Span, + ) -> Result> { + fields + .iter() + .enumerate() + .map(|(i, field)| Field::from_syn(i, field, scope, span)) + .collect() + } + + fn from_syn( + i: usize, + node: &'a syn::Field, + scope: &ParamsInScope<'a>, + span: Span, + ) -> Result { + Ok(Field { + original: node, + attrs: attr::get(&node.attrs)?, + member: node.ident.clone().map(Member::Named).unwrap_or_else(|| { + Member::Unnamed(Index { + index: i as u32, + span, + }) + }), + ty: &node.ty, + contains_generic: scope.intersects(&node.ty), + }) + } +} + +impl Attrs<'_> { + pub fn span(&self) -> Option { + if let Some(display) = &self.display { + Some(display.fmt.span()) + } else if let Some(transparent) = &self.transparent { + Some(transparent.span) + } else { + None + } + } +} diff --git a/app/buck2_error_derive/src/attr.rs b/app/buck2_error_derive/src/attr.rs new file mode 100644 index 0000000000000..aa48900409752 --- /dev/null +++ b/app/buck2_error_derive/src/attr.rs @@ -0,0 +1,292 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. + +use std::collections::BTreeSet as Set; + +use proc_macro2::Delimiter; +use proc_macro2::Group; +use proc_macro2::Span; +use proc_macro2::TokenStream; +use proc_macro2::TokenTree; +use quote::format_ident; +use quote::quote; +use quote::ToTokens; +use syn::braced; +use syn::bracketed; +use syn::parenthesized; +use syn::parse::Parse; +use syn::parse::ParseStream; +use syn::parse::Parser; +use syn::punctuated::Punctuated; +use syn::spanned::Spanned; +use syn::token; +use syn::Attribute; +use syn::Error; +use syn::Ident; +use syn::Index; +use syn::LitInt; +use syn::LitStr; +use syn::Result; +use syn::Token; + +/// Did the user provide an explicit value for the option, or a function from which to compute it +#[derive(Clone)] +pub enum OptionStyle { + Explicit(syn::Ident), + ByExpr(syn::Expr), +} + +impl OptionStyle { + pub fn span(&self) -> Span { + match self { + Self::Explicit(ident) => ident.span(), + Self::ByExpr(expr) => expr.span(), + } + } +} + +impl Parse for OptionStyle { + fn parse(input: ParseStream) -> syn::Result { + // syn does not have `is_empty2` + let fork = input.fork(); + if fork.parse::().is_ok() && (fork.peek(Token![,]) || fork.is_empty()) { + let ident = input.parse::()?; + Ok(Self::Explicit(ident.clone())) + } else { + Ok(Self::ByExpr(input.parse()?)) + } + } +} + +enum MacroOption { + Category(OptionStyle), + Tag(OptionStyle), +} + +impl Parse for MacroOption { + fn parse(input: ParseStream) -> syn::Result { + let name: syn::Ident = input.parse()?; + if name == "input" { + let ident = syn::Ident::new("Input", name.span()); + Ok(MacroOption::Category(OptionStyle::Explicit(ident))) + } else if name == "tier0" { + let ident = syn::Ident::new("Tier0", name.span()); + Ok(MacroOption::Category(OptionStyle::Explicit(ident))) + } else if name == "environment" { + let ident = syn::Ident::new("Environment", name.span()); + Ok(MacroOption::Category(OptionStyle::Explicit(ident))) + } else if name == "tag" { + let _eq: Token![=] = input.parse()?; + Ok(MacroOption::Tag(input.parse()?)) + } else { + Err(syn::Error::new_spanned(name, "expected option")) + } + } +} + +pub struct Attrs<'a> { + pub display: Option>, + pub source: Option<&'a Attribute>, + pub transparent: Option>, + pub category: Option, + pub tags: Vec, +} + +#[derive(Clone)] +pub struct Display<'a> { + pub original: &'a Attribute, + pub fmt: LitStr, + pub args: TokenStream, + pub implied_bounds: Set<(usize, Trait)>, +} + +#[derive(Copy, Clone)] +pub struct Transparent<'a> { + pub original: &'a Attribute, + pub span: Span, +} + +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] +pub enum Trait { + Debug, + Display, + Octal, + LowerHex, + UpperHex, + Pointer, + Binary, + LowerExp, + UpperExp, +} + +pub fn get(input: &[Attribute]) -> Result { + let mut attrs = Attrs { + display: None, + source: None, + transparent: None, + category: None, + tags: Vec::new(), + }; + + for attr in input { + if attr.path().is_ident("error") { + parse_error_attribute(&mut attrs, attr)?; + } else if attr.path().is_ident("source") { + attr.meta.require_path_only()?; + if attrs.source.is_some() { + return Err(Error::new_spanned(attr, "duplicate #[source] attribute")); + } + attrs.source = Some(attr); + } else if attr.path().is_ident("buck2") { + let meta = attr.meta.require_list()?; + let parsed = Punctuated::::parse_terminated + .parse2(meta.tokens.clone())?; + for option in parsed { + match option { + MacroOption::Category(style) => { + if attrs.category.is_some() { + return Err(syn::Error::new(style.span(), "duplicate category")); + } + attrs.category = Some(style); + } + MacroOption::Tag(style) => { + attrs.tags.push(style); + } + } + } + } + } + + Ok(attrs) +} + +fn parse_error_attribute<'a>(attrs: &mut Attrs<'a>, attr: &'a Attribute) -> Result<()> { + syn::custom_keyword!(transparent); + + attr.parse_args_with(|input: ParseStream| { + if let Some(kw) = input.parse::>()? { + if attrs.transparent.is_some() { + return Err(Error::new_spanned( + attr, + "duplicate #[error(transparent)] attribute", + )); + } + attrs.transparent = Some(Transparent { + original: attr, + span: kw.span, + }); + return Ok(()); + } + + let display = Display { + original: attr, + fmt: input.parse()?, + args: parse_token_expr(input, false)?, + implied_bounds: Set::new(), + }; + if attrs.display.is_some() { + return Err(Error::new_spanned( + attr, + "only one #[error(...)] attribute is allowed", + )); + } + attrs.display = Some(display); + Ok(()) + }) +} + +fn parse_token_expr(input: ParseStream, mut begin_expr: bool) -> Result { + let mut tokens = Vec::new(); + while !input.is_empty() { + if begin_expr && input.peek(Token![.]) { + if input.peek2(Ident) { + input.parse::()?; + begin_expr = false; + continue; + } + if input.peek2(LitInt) { + input.parse::()?; + let int: Index = input.parse()?; + let ident = format_ident!("_{}", int.index, span = int.span); + tokens.push(TokenTree::Ident(ident)); + begin_expr = false; + continue; + } + } + + begin_expr = input.peek(Token![break]) + || input.peek(Token![continue]) + || input.peek(Token![if]) + || input.peek(Token![in]) + || input.peek(Token![match]) + || input.peek(Token![mut]) + || input.peek(Token![return]) + || input.peek(Token![while]) + || input.peek(Token![+]) + || input.peek(Token![&]) + || input.peek(Token![!]) + || input.peek(Token![^]) + || input.peek(Token![,]) + || input.peek(Token![/]) + || input.peek(Token![=]) + || input.peek(Token![>]) + || input.peek(Token![<]) + || input.peek(Token![|]) + || input.peek(Token![%]) + || input.peek(Token![;]) + || input.peek(Token![*]) + || input.peek(Token![-]); + + let token: TokenTree = if input.peek(token::Paren) { + let content; + let delimiter = parenthesized!(content in input); + let nested = parse_token_expr(&content, true)?; + let mut group = Group::new(Delimiter::Parenthesis, nested); + group.set_span(delimiter.span.join()); + TokenTree::Group(group) + } else if input.peek(token::Brace) { + let content; + let delimiter = braced!(content in input); + let nested = parse_token_expr(&content, true)?; + let mut group = Group::new(Delimiter::Brace, nested); + group.set_span(delimiter.span.join()); + TokenTree::Group(group) + } else if input.peek(token::Bracket) { + let content; + let delimiter = bracketed!(content in input); + let nested = parse_token_expr(&content, true)?; + let mut group = Group::new(Delimiter::Bracket, nested); + group.set_span(delimiter.span.join()); + TokenTree::Group(group) + } else { + input.parse()? + }; + tokens.push(token); + } + Ok(TokenStream::from_iter(tokens)) +} + +impl ToTokens for Display<'_> { + fn to_tokens(&self, tokens: &mut TokenStream) { + let fmt = &self.fmt; + let args = &self.args; + tokens.extend(quote! { + std::write!(__formatter, #fmt #args) + }); + } +} + +impl ToTokens for Trait { + fn to_tokens(&self, tokens: &mut TokenStream) { + let trait_name = format_ident!("{}", format!("{:?}", self)); + tokens.extend(quote!(std::fmt::#trait_name)); + } +} diff --git a/app/buck2_error_derive/src/expand.rs b/app/buck2_error_derive/src/expand.rs new file mode 100644 index 0000000000000..b0a784a7a55a3 --- /dev/null +++ b/app/buck2_error_derive/src/expand.rs @@ -0,0 +1,404 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. + +use std::collections::BTreeSet as Set; + +use proc_macro2::Span; +use proc_macro2::TokenStream; +use quote::format_ident; +use quote::quote; +use quote::quote_spanned; +use quote::ToTokens; +use syn::spanned::Spanned; +use syn::Data; +use syn::DeriveInput; +use syn::Ident; +use syn::Member; +use syn::Result; +use syn::Token; +use syn::Visibility; + +use crate::ast::Enum; +use crate::ast::Field; +use crate::ast::Input; +use crate::ast::Struct; +use crate::attr::Attrs; +use crate::attr::OptionStyle; +use crate::attr::Trait; +use crate::generics::InferredBounds; + +pub fn derive(node: &DeriveInput) -> Result { + let input = Input::from_syn(node)?; + input.validate()?; + Ok(match input { + Input::Struct(input) => impl_struct(input), + Input::Enum(input) => impl_enum(input), + }) +} + +fn impl_struct(input: Struct) -> TokenStream { + let ty = &input.ident; + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + let mut error_inferred_bounds = InferredBounds::new(); + + let source_body = if input.attrs.transparent.is_some() { + let only_field = &input.fields[0]; + if only_field.contains_generic { + error_inferred_bounds.insert(only_field.ty, quote!(std::error::Error)); + } + let member = &only_field.member; + Some(quote! { + std::error::Error::source(self.#member.as_dyn_error()) + }) + } else if let Some(source_field) = input.source_field() { + let source = &source_field.member; + if source_field.contains_generic { + error_inferred_bounds.insert(source_field.ty, quote!(std::error::Error + 'static)); + } + let dyn_error = quote_spanned!(source.span()=> self.#source .as_dyn_error()); + Some(quote! { + std::option::Option::Some(#dyn_error) + }) + } else { + None + }; + let source_method = source_body.map(|body| { + quote! { + fn source(&self) -> std::option::Option<&(dyn std::error::Error + 'static)> { + use buck2_error::__for_macro::AsDynError; + #body + } + } + }); + + let provide_body = gen_provide_contents(&input.attrs, &input.fields, ty, None); + let pat = fields_pat(&input.fields); + let provide_method = quote! { + fn provide<'__macro>(&'__macro self, __request: &mut std::error::Request<'__macro>) { + #[allow(unused_variables, deprecated)] + let Self #pat = self; + #provide_body + } + }; + + let mut display_implied_bounds = Set::new(); + let display_body = if input.attrs.transparent.is_some() { + let only_field = &input.fields[0].member; + display_implied_bounds.insert((0, Trait::Display)); + Some(quote! { + std::fmt::Display::fmt(&self.#only_field, __formatter) + }) + } else if let Some(display) = &input.attrs.display { + display_implied_bounds = display.implied_bounds.clone(); + Some(quote! { + #[allow(unused_variables, deprecated)] + let Self #pat = self; + #display + }) + } else { + None + }; + let display_impl = display_body.map(|body| { + let mut display_inferred_bounds = InferredBounds::new(); + for (field, bound) in display_implied_bounds { + let field = &input.fields[field]; + if field.contains_generic { + display_inferred_bounds.insert(field.ty, bound); + } + } + let display_where_clause = display_inferred_bounds.augment_where_clause(input.generics); + quote! { + #[allow(unused_qualifications)] + impl #impl_generics std::fmt::Display for #ty #ty_generics #display_where_clause { + #[allow(clippy::used_underscore_binding)] + fn fmt(&self, __formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + #body + } + } + } + }); + + let error_trait = spanned_error_trait(input.original); + if input.generics.type_params().next().is_some() { + let self_token = ::default(); + error_inferred_bounds.insert(self_token, Trait::Debug); + error_inferred_bounds.insert(self_token, Trait::Display); + error_inferred_bounds.insert(self_token, quote!(std::marker::Send)); + error_inferred_bounds.insert(self_token, quote!(std::marker::Sync)); + error_inferred_bounds.insert(self_token, quote!('static)); + } + let error_where_clause = error_inferred_bounds.augment_where_clause(input.generics); + + quote! { + #[allow(unused_qualifications)] + impl #impl_generics #error_trait for #ty #ty_generics #error_where_clause { + #source_method + #provide_method + } + #display_impl + } +} + +fn impl_enum(mut input: Enum) -> TokenStream { + let ty = &input.ident; + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + let mut error_inferred_bounds = InferredBounds::new(); + + // We let people specify these on the type or variant, so make sure that they always show up on + // the variant and we don't have to re-check the type + for variant in &mut input.variants { + if input.attrs.category.is_some() { + variant.attrs.category = input.attrs.category.clone(); + } + variant.attrs.tags.extend(input.attrs.tags.iter().cloned()); + } + + let source_method = if input.has_source() { + let arms = input.variants.iter().map(|variant| { + let ident = &variant.ident; + if variant.attrs.transparent.is_some() { + let only_field = &variant.fields[0]; + if only_field.contains_generic { + error_inferred_bounds.insert(only_field.ty, quote!(std::error::Error)); + } + let member = &only_field.member; + let source = quote!(std::error::Error::source(transparent.as_dyn_error())); + quote! { + #ty::#ident {#member: transparent} => #source, + } + } else if let Some(source_field) = variant.source_field() { + let source = &source_field.member; + if source_field.contains_generic { + error_inferred_bounds + .insert(source_field.ty, quote!(std::error::Error + 'static)); + } + let varsource = quote!(source); + let dyn_error = quote_spanned!(source.span()=> #varsource.as_dyn_error()); + quote! { + #ty::#ident {#source: #varsource, ..} => std::option::Option::Some(#dyn_error), + } + } else { + quote! { + #ty::#ident {..} => std::option::Option::None, + } + } + }); + Some(quote! { + fn source(&self) -> std::option::Option<&(dyn std::error::Error + 'static)> { + use buck2_error::__for_macro::AsDynError; + #[allow(deprecated)] + match self { + #(#arms)* + } + } + }) + } else { + None + }; + + let provide_arms = input.variants.iter().map(|variant| { + let content = + gen_provide_contents(&variant.attrs, &variant.fields, ty, Some(&variant.ident)); + let ident = &variant.ident; + let pat = fields_pat(&variant.fields); + quote! { + #[allow(unused_variables, deprecated)] + #ty::#ident #pat => { + #content + }, + } + }); + let provide_method = quote! { + fn provide<'__macro>(&'__macro self, __request: &mut std::error::Request<'__macro>) { + match self { + #(#provide_arms)* + } + } + }; + + let display_impl = if input.has_display() { + let mut display_inferred_bounds = InferredBounds::new(); + let void_deref = if input.variants.is_empty() { + Some(quote!(*)) + } else { + None + }; + let arms = input.variants.iter().map(|variant| { + let mut display_implied_bounds = Set::new(); + let display = match &variant.attrs.display { + Some(display) => { + display_implied_bounds = display.implied_bounds.clone(); + display.to_token_stream() + } + None => { + let only_field = match &variant.fields[0].member { + Member::Named(ident) => ident.clone(), + Member::Unnamed(index) => format_ident!("_{}", index), + }; + display_implied_bounds.insert((0, Trait::Display)); + quote!(std::fmt::Display::fmt(#only_field, __formatter)) + } + }; + for (field, bound) in display_implied_bounds { + let field = &variant.fields[field]; + if field.contains_generic { + display_inferred_bounds.insert(field.ty, bound); + } + } + let ident = &variant.ident; + let pat = fields_pat(&variant.fields); + quote! { + #ty::#ident #pat => #display + } + }); + let arms = arms.collect::>(); + let display_where_clause = display_inferred_bounds.augment_where_clause(input.generics); + Some(quote! { + #[allow(unused_qualifications)] + impl #impl_generics std::fmt::Display for #ty #ty_generics #display_where_clause { + fn fmt(&self, __formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + #[allow(unused_variables, deprecated, clippy::used_underscore_binding)] + match #void_deref self { + #(#arms,)* + } + } + } + + impl #impl_generics From<#ty #ty_generics> for buck2_error::__for_macro::ContextValue #display_where_clause { + fn from(value: #ty #ty_generics) -> buck2_error::__for_macro::ContextValue { + format!("{}", value).into() + } + } + }) + } else { + None + }; + + let error_trait = spanned_error_trait(input.original); + if input.generics.type_params().next().is_some() { + let self_token = ::default(); + error_inferred_bounds.insert(self_token, Trait::Debug); + error_inferred_bounds.insert(self_token, Trait::Display); + error_inferred_bounds.insert(self_token, quote!(std::marker::Send)); + error_inferred_bounds.insert(self_token, quote!(std::marker::Sync)); + error_inferred_bounds.insert(self_token, quote!('static)); + } + let error_where_clause = error_inferred_bounds.augment_where_clause(input.generics); + + quote! { + #[allow(unused_qualifications)] + impl #impl_generics #error_trait for #ty #ty_generics #error_where_clause { + #source_method + #provide_method + } + #display_impl + } +} + +/// Generates the provided data for either a variant or the whole struct +fn gen_provide_contents( + attrs: &Attrs, + fields: &[Field], + type_name: &Ident, + variant_name: Option<&Ident>, +) -> syn::Stmt { + let type_and_variant = match variant_name { + Some(variant_name) => format!("{}::{}", type_name, variant_name), + None => type_name.to_string(), + }; + let source_location_extra = syn::LitStr::new(&type_and_variant, Span::call_site()); + let category: syn::Expr = match &attrs.category { + Some(OptionStyle::Explicit(cat)) => syn::parse_quote! { + core::option::Option::Some(buck2_error::Tier::#cat) + }, + Some(OptionStyle::ByExpr(e)) => e.clone(), + None => syn::parse_quote! { + core::option::Option::None + }, + }; + let tags: Vec = attrs + .tags + .iter() + .map(|tag| match tag { + OptionStyle::Explicit(tag) => syn::parse_quote! { + core::option::Option::Some(buck2_error::ErrorTag::#tag) + }, + OptionStyle::ByExpr(e) => e.clone(), + }) + .collect(); + let num_tags = syn::LitInt::new(&format!("{}", tags.len()), Span::call_site()); + + let metadata: syn::Stmt = syn::parse_quote! { + buck2_error::provide_metadata( + __request, + #category, + <[Option; #num_tags] as IntoIterator>::into_iter([#(#tags,)*]).flatten(), + core::file!(), + core::option::Option::Some(#source_location_extra), + core::option::Option::None, + ); + }; + + let forward_transparent = if attrs.transparent.is_some() { + let only_field = match &fields[0].member { + Member::Named(ident) => ident.clone(), + Member::Unnamed(index) => format_ident!("_{}", index), + }; + quote! { + use buck2_error::__for_macro::AsDynError; + std::error::Error::provide(#only_field.as_dyn_error(), __request); + } + } else { + quote! {} + }; + // When the same type is provided to the `request` more than once, the first value is used and + // later values are ignored. As such, make sure we put the `forward_transparent` first, so that + // if the underlying error has metadata, that's the one that gets used + syn::parse_quote! { + { + #forward_transparent + #metadata + } + } +} + +fn fields_pat(fields: &[Field]) -> TokenStream { + let mut members = fields.iter().map(|field| &field.member).peekable(); + match members.peek() { + Some(Member::Named(_)) => quote!({ #(#members),* }), + Some(Member::Unnamed(_)) => { + let vars = members.map(|member| match member { + Member::Unnamed(member) => format_ident!("_{}", member), + Member::Named(_) => unreachable!(), + }); + quote!((#(#vars),*)) + } + None => quote!({}), + } +} + +fn spanned_error_trait(input: &DeriveInput) -> TokenStream { + let vis_span = match &input.vis { + Visibility::Public(vis) => Some(vis.span), + Visibility::Restricted(vis) => Some(vis.pub_token.span), + Visibility::Inherited => None, + }; + let data_span = match &input.data { + Data::Struct(data) => data.struct_token.span, + Data::Enum(data) => data.enum_token.span, + Data::Union(data) => data.union_token.span, + }; + let first_span = vis_span.unwrap_or(data_span); + let last_span = input.ident.span(); + let path = quote_spanned!(first_span=> std::error::); + let error = quote_spanned!(last_span=> Error); + quote!(#path #error) +} diff --git a/app/buck2_error_derive/src/fmt.rs b/app/buck2_error_derive/src/fmt.rs new file mode 100644 index 0000000000000..011e92e57c4a6 --- /dev/null +++ b/app/buck2_error_derive/src/fmt.rs @@ -0,0 +1,186 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. + +use std::collections::BTreeSet as Set; +use std::collections::HashMap as Map; + +use proc_macro2::TokenTree; +use quote::format_ident; +use quote::quote_spanned; +use syn::ext::IdentExt; +use syn::parse::ParseStream; +use syn::parse::Parser; +use syn::Ident; +use syn::Index; +use syn::LitStr; +use syn::Member; +use syn::Result; +use syn::Token; + +use crate::ast::Field; +use crate::attr::Display; +use crate::attr::Trait; + +impl Display<'_> { + // Transform `"error {var}"` to `"error {}", var`. + pub fn expand_shorthand(&mut self, fields: &[Field]) { + let raw_args = self.args.clone(); + let mut named_args = explicit_named_args.parse2(raw_args).unwrap(); + let mut member_index = Map::new(); + for (i, field) in fields.iter().enumerate() { + member_index.insert(&field.member, i); + } + + let span = self.fmt.span(); + let fmt = self.fmt.value(); + let mut read = fmt.as_str(); + let mut out = String::new(); + let mut args = self.args.clone(); + let mut implied_bounds = Set::new(); + + let mut has_trailing_comma = false; + if let Some(TokenTree::Punct(punct)) = args.clone().into_iter().last() { + if punct.as_char() == ',' { + has_trailing_comma = true; + } + } + + while let Some(brace) = read.find('{') { + out += &read[..brace + 1]; + read = &read[brace + 1..]; + if read.starts_with('{') { + out.push('{'); + read = &read[1..]; + continue; + } + let next = match read.chars().next() { + Some(next) => next, + None => return, + }; + let member = match next { + '0'..='9' => { + let int = take_int(&mut read); + let member = match int.parse::() { + Ok(index) => Member::Unnamed(Index { index, span }), + Err(_) => return, + }; + if !member_index.contains_key(&member) { + out += ∫ + continue; + } + member + } + 'a'..='z' | 'A'..='Z' | '_' => { + let mut ident = take_ident(&mut read); + ident.set_span(span); + Member::Named(ident) + } + _ => continue, + }; + if let Some(&field) = member_index.get(&member) { + let end_spec = match read.find('}') { + Some(end_spec) => end_spec, + None => return, + }; + let bound = match read[..end_spec].chars().next_back() { + Some('?') => Trait::Debug, + Some('o') => Trait::Octal, + Some('x') => Trait::LowerHex, + Some('X') => Trait::UpperHex, + Some('p') => Trait::Pointer, + Some('b') => Trait::Binary, + Some('e') => Trait::LowerExp, + Some('E') => Trait::UpperExp, + Some(_) | None => Trait::Display, + }; + implied_bounds.insert((field, bound)); + } + let local = match &member { + Member::Unnamed(index) => format_ident!("_{}", index), + Member::Named(ident) => ident.clone(), + }; + let mut formatvar = local.clone(); + if formatvar.to_string().starts_with("r#") { + formatvar = format_ident!("r_{}", formatvar); + } + if formatvar.to_string().starts_with('_') { + // Work around leading underscore being rejected by 1.40 and + // older compilers. https://github.com/rust-lang/rust/pull/66847 + formatvar = format_ident!("field_{}", formatvar); + } + out += &formatvar.to_string(); + if !named_args.insert(formatvar.clone()) { + // Already specified in the format argument list. + continue; + } + if !has_trailing_comma { + args.extend(quote_spanned!(span=> ,)); + } + args.extend(quote_spanned!(span=> #formatvar = #local)); + has_trailing_comma = false; + } + + out += read; + self.fmt = LitStr::new(&out, self.fmt.span()); + self.args = args; + self.implied_bounds = implied_bounds; + } +} + +fn explicit_named_args(input: ParseStream) -> Result> { + let mut named_args = Set::new(); + + while !input.is_empty() { + if input.peek(Token![,]) && input.peek2(Ident::peek_any) && input.peek3(Token![=]) { + input.parse::()?; + let ident = input.call(Ident::parse_any)?; + input.parse::()?; + named_args.insert(ident); + } else { + input.parse::()?; + } + } + + Ok(named_args) +} + +fn take_int(read: &mut &str) -> String { + let mut int = String::new(); + for (i, ch) in read.char_indices() { + match ch { + '0'..='9' => int.push(ch), + _ => { + *read = &read[i..]; + break; + } + } + } + int +} + +fn take_ident(read: &mut &str) -> Ident { + let mut ident = String::new(); + let raw = read.starts_with("r#"); + if raw { + ident.push_str("r#"); + *read = &read[2..]; + } + for (i, ch) in read.char_indices() { + match ch { + 'a'..='z' | 'A'..='Z' | '0'..='9' | '_' => ident.push(ch), + _ => { + *read = &read[i..]; + break; + } + } + } + Ident::parse_any.parse_str(&ident).unwrap() +} diff --git a/app/buck2_error_derive/src/generics.rs b/app/buck2_error_derive/src/generics.rs new file mode 100644 index 0000000000000..456bc9de45bac --- /dev/null +++ b/app/buck2_error_derive/src/generics.rs @@ -0,0 +1,103 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. + +use std::collections::btree_map::Entry; +use std::collections::BTreeMap as Map; +use std::collections::BTreeSet as Set; + +use proc_macro2::TokenStream; +use quote::ToTokens; +use syn::parse_quote; +use syn::punctuated::Punctuated; +use syn::GenericArgument; +use syn::Generics; +use syn::Ident; +use syn::PathArguments; +use syn::Token; +use syn::Type; +use syn::WhereClause; + +pub struct ParamsInScope<'a> { + names: Set<&'a Ident>, +} + +impl<'a> ParamsInScope<'a> { + pub fn new(generics: &'a Generics) -> Self { + ParamsInScope { + names: generics.type_params().map(|param| ¶m.ident).collect(), + } + } + + pub fn intersects(&self, ty: &Type) -> bool { + let mut found = false; + crawl(self, ty, &mut found); + found + } +} + +fn crawl(in_scope: &ParamsInScope, ty: &Type, found: &mut bool) { + if let Type::Path(ty) = ty { + if ty.qself.is_none() { + if let Some(ident) = ty.path.get_ident() { + if in_scope.names.contains(ident) { + *found = true; + } + } + } + for segment in &ty.path.segments { + if let PathArguments::AngleBracketed(arguments) = &segment.arguments { + for arg in &arguments.args { + if let GenericArgument::Type(ty) = arg { + crawl(in_scope, ty, found); + } + } + } + } + } +} + +pub struct InferredBounds { + bounds: Map, Punctuated)>, + order: Vec, +} + +impl InferredBounds { + pub fn new() -> Self { + InferredBounds { + bounds: Map::new(), + order: Vec::new(), + } + } + + #[allow(clippy::type_repetition_in_bounds, clippy::trait_duplication_in_bounds)] // clippy bug: https://github.com/rust-lang/rust-clippy/issues/8771 + pub fn insert(&mut self, ty: impl ToTokens, bound: impl ToTokens) { + let ty = ty.to_token_stream(); + let bound = bound.to_token_stream(); + let entry = self.bounds.entry(ty.to_string()); + if let Entry::Vacant(_) = entry { + self.order.push(ty); + } + let (set, tokens) = entry.or_default(); + if set.insert(bound.to_string()) { + tokens.push(bound); + } + } + + pub fn augment_where_clause(&self, generics: &Generics) -> WhereClause { + let mut generics = generics.clone(); + let where_clause = generics.make_where_clause(); + for ty in &self.order { + let (_set, bounds) = &self.bounds[&ty.to_string()]; + where_clause.predicates.push(parse_quote!(#ty: #bounds)); + } + generics.where_clause.unwrap() + } +} diff --git a/app/buck2_error_derive/src/lib.rs b/app/buck2_error_derive/src/lib.rs index 9ea200b99730f..7b57be8723e84 100644 --- a/app/buck2_error_derive/src/lib.rs +++ b/app/buck2_error_derive/src/lib.rs @@ -7,59 +7,27 @@ * of this source tree. */ -use proc_macro::TokenStream; -use syn::parse_macro_input; -use syn::parse_quote; +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. -fn derive_error_impl(input: TokenStream, krate: syn::Path) -> TokenStream { - // For now, this is more or less just a stub that forwards to `thiserror`. It doesn't yet do - // anything interesting. - let input = parse_macro_input!(input as syn::DeriveInput); +#![allow(clippy::manual_map)] +#![feature(let_chains)] - // We need to generate an invocation of `thiserror::Derive`. This comes with two pieces of - // complexity, each requiring some cleverness: - // 1. We can't just generate a `#[derive(thiserror::Error)]`, because we cannot modify the - // input! The trick we use is to generate an exact copy of our input, prefixed with the two - // attributes below. The `derive` will run first and do all the normal work of the derive - // macro, including generating the items we want. `exterminate` will then run next - all it - // does is delete the item it's attached to, leaving us with the result we want. - // 2. `thiserror::Error` generates code which refers to the `thiserror` crate. However, users - // of `buck2_error` will want to avoid importing this crate. We could deal with this by - // adding a `use buck2_error::reexport::thiserror;` - that in turn has the disadvantage that - // the `thiserror` name will show up in the module (and possibly in IDE suggestions). So - // instead, we put the entire thing inside a function, to limit the scope of the reexport. - let name = syn::Ident::new( - &format!("__macro_generated_by_buck2_error_hidden_{}", input.ident), - input.ident.span(), - ); - quote::quote! { - #[doc(hidden)] - #[allow(non_snake_case)] - #[allow(unused)] - fn #name() { - use #krate::__for_macro::thiserror; +mod ast; +mod attr; +mod expand; +mod fmt; +mod generics; +mod prop; +mod valid; - #[derive(thiserror::Error)] - #[#krate::__for_macro::exterminate] - #input - } - } - .into() -} - -#[proc_macro_derive(ErrorForReexport, attributes(backtrace, error, from, source))] -pub fn derive_error_for_reexport(input: TokenStream) -> TokenStream { - derive_error_impl(input, parse_quote! { ::buck2_error }) -} +use proc_macro::TokenStream; +use syn::parse_macro_input; +use syn::DeriveInput; -#[proc_macro_derive(Error, attributes(backtrace, error, from, source))] +#[proc_macro_derive(Error, attributes(error, source, buck2))] pub fn derive_error(input: TokenStream) -> TokenStream { - derive_error_impl(input, parse_quote! { crate }) -} - -// Implementation detail of `derive_error` -#[doc(hidden)] -#[proc_macro_attribute] -pub fn exterminate(_attr: TokenStream, _input: TokenStream) -> TokenStream { - TokenStream::default() + let input = parse_macro_input!(input as DeriveInput); + expand::derive(&input) + .unwrap_or_else(|err| err.to_compile_error()) + .into() } diff --git a/app/buck2_error_derive/src/prop.rs b/app/buck2_error_derive/src/prop.rs new file mode 100644 index 0000000000000..d3737d6f76ca1 --- /dev/null +++ b/app/buck2_error_derive/src/prop.rs @@ -0,0 +1,65 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. + +use syn::Member; + +use crate::ast::Enum; +use crate::ast::Field; +use crate::ast::Struct; +use crate::ast::Variant; + +impl Struct<'_> { + pub(crate) fn source_field(&self) -> Option<&Field> { + source_field(&self.fields) + } +} + +impl Enum<'_> { + pub(crate) fn has_source(&self) -> bool { + self.variants + .iter() + .any(|variant| variant.source_field().is_some() || variant.attrs.transparent.is_some()) + } + + pub(crate) fn has_display(&self) -> bool { + self.attrs.display.is_some() + || self.attrs.transparent.is_some() + || self + .variants + .iter() + .any(|variant| variant.attrs.display.is_some()) + || self + .variants + .iter() + .all(|variant| variant.attrs.transparent.is_some()) + } +} + +impl Variant<'_> { + pub(crate) fn source_field(&self) -> Option<&Field> { + source_field(&self.fields) + } +} + +fn source_field<'a, 'b>(fields: &'a [Field<'b>]) -> Option<&'a Field<'b>> { + for field in fields { + if field.attrs.source.is_some() { + return Some(field); + } + } + for field in fields { + match &field.member { + Member::Named(ident) if ident == "source" => return Some(field), + _ => {} + } + } + None +} diff --git a/app/buck2_error_derive/src/valid.rs b/app/buck2_error_derive/src/valid.rs new file mode 100644 index 0000000000000..03f6067bb2f1c --- /dev/null +++ b/app/buck2_error_derive/src/valid.rs @@ -0,0 +1,205 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// This code is adapted from https://github.com/dtolnay/thiserror licensed under Apache-2.0 or MIT. + +use syn::Error; +use syn::GenericArgument; +use syn::PathArguments; +use syn::Result; +use syn::Type; + +use crate::ast::Enum; +use crate::ast::Field; +use crate::ast::Input; +use crate::ast::Struct; +use crate::ast::Variant; +use crate::attr::Attrs; + +impl Input<'_> { + pub(crate) fn validate(&self) -> Result<()> { + match self { + Input::Struct(input) => input.validate(), + Input::Enum(input) => input.validate(), + } + } +} + +impl Struct<'_> { + fn validate(&self) -> Result<()> { + check_non_field_attrs(&self.attrs, None)?; + if let Some(transparent) = self.attrs.transparent { + if self.fields.len() != 1 { + return Err(Error::new_spanned( + transparent.original, + "#[error(transparent)] requires exactly one field", + )); + } + if let Some(source) = self.fields.iter().find_map(|f| f.attrs.source) { + return Err(Error::new_spanned( + source, + "transparent error struct can't contain #[source]", + )); + } + } + check_field_attrs(&self.fields)?; + for field in &self.fields { + field.validate()?; + } + Ok(()) + } +} + +impl Enum<'_> { + fn validate(&self) -> Result<()> { + check_non_field_attrs(&self.attrs, None)?; + let has_display = self.has_display(); + for variant in &self.variants { + variant.validate(&self.attrs)?; + if has_display && variant.attrs.display.is_none() && variant.attrs.transparent.is_none() + { + return Err(Error::new_spanned( + variant.original, + "missing #[error(\"...\")] display attribute", + )); + } + } + Ok(()) + } +} + +impl Variant<'_> { + fn validate(&self, parent_attrs: &Attrs) -> Result<()> { + check_non_field_attrs(&self.attrs, Some(parent_attrs))?; + if self.attrs.transparent.is_some() { + if self.fields.len() != 1 { + return Err(Error::new_spanned( + self.original, + "#[error(transparent)] requires exactly one field", + )); + } + if let Some(source) = self.fields.iter().find_map(|f| f.attrs.source) { + return Err(Error::new_spanned( + source, + "transparent variant can't contain #[source]", + )); + } + } + check_field_attrs(&self.fields)?; + for field in &self.fields { + field.validate()?; + } + Ok(()) + } +} + +impl Field<'_> { + fn validate(&self) -> Result<()> { + if let Some(display) = &self.attrs.display { + return Err(Error::new_spanned( + display.original, + "not expected here; the #[error(...)] attribute belongs on top of a struct or an enum variant", + )); + } + Ok(()) + } +} + +/// If we're checking the attrs on an enum variant, `parsed_earlier` are the attrs on the enum +/// itself. +fn check_non_field_attrs(attrs: &Attrs, parsed_earlier: Option<&Attrs>) -> Result<()> { + if let Some(source) = &attrs.source { + return Err(Error::new_spanned( + source, + "not expected here; the #[source] attribute belongs on a specific field", + )); + } + if let Some(display) = &attrs.display { + if attrs.transparent.is_some() { + return Err(Error::new_spanned( + display.original, + "cannot have both #[error(transparent)] and a display attribute", + )); + } + } + if let Some(parsed_earlier) = parsed_earlier { + if let Some(category) = &attrs.category + && parsed_earlier.category.is_some() + { + return Err(Error::new(category.span(), "already specified on enum")); + } + } + Ok(()) +} + +fn check_field_attrs(fields: &[Field]) -> Result<()> { + let mut source_field = None; + for field in fields { + if let Some(source) = field.attrs.source { + if source_field.is_some() { + return Err(Error::new_spanned(source, "duplicate #[source] attribute")); + } + source_field = Some(field); + } + if let Some(transparent) = field.attrs.transparent { + return Err(Error::new_spanned( + transparent.original, + "#[error(transparent)] needs to go outside the enum or struct, not on an individual field", + )); + } + let style = if let Some(style) = &field.attrs.category { + Some(style) + } else if let Some(style) = field.attrs.tags.first() { + Some(style) + } else { + None + }; + if let Some(style) = style { + return Err(Error::new( + style.span(), + "not expected here; the #[buck2(...)] attribute belongs on top of a struct or an enum variant", + )); + } + } + if let Some(source_field) = source_field { + if contains_non_static_lifetime(source_field.ty) { + return Err(Error::new_spanned( + &source_field.original.ty, + "non-static lifetimes are not allowed in the source of an error, because std::error::Error requires the source is dyn Error + 'static", + )); + } + } + Ok(()) +} + +fn contains_non_static_lifetime(ty: &Type) -> bool { + match ty { + Type::Path(ty) => { + let bracketed = match &ty.path.segments.last().unwrap().arguments { + PathArguments::AngleBracketed(bracketed) => bracketed, + _ => return false, + }; + for arg in &bracketed.args { + match arg { + GenericArgument::Type(ty) if contains_non_static_lifetime(ty) => return true, + GenericArgument::Lifetime(lifetime) if lifetime.ident != "static" => { + return true; + } + _ => {} + } + } + false + } + Type::Reference(ty) => ty + .lifetime + .as_ref() + .map_or(false, |lifetime| lifetime.ident != "static"), + _ => false, // maybe implement later if there are common other cases + } +} diff --git a/app/buck2_error_tests/BUCK b/app/buck2_error_tests/BUCK new file mode 100644 index 0000000000000..9afe4e4d35c28 --- /dev/null +++ b/app/buck2_error_tests/BUCK @@ -0,0 +1,29 @@ +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup") +load("@fbcode_macros//build_defs:rust_unittest.bzl", "rust_unittest") + +oncall("build_infra") + +buck_filegroup( + name = "tests", + srcs = glob([ + "src/**/*.golden", + "src/**", + ]), +) + +rust_unittest( + name = "buck2_error_tests", + srcs = glob(["src/**/*.rs"]), + env = { + # Some of our tests include testcase files relative to CARGO_MANIFEST_DIR. + # This is a hack that allows both `cargo test` and `buck test` to work. + "CARGO_MANIFEST_DIR": "$(location :tests)", + }, + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:thiserror", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_util:buck2_util", + "//buck2/starlark-rust/starlark:starlark", + ], +) diff --git a/app/buck2_error_tests/Cargo.toml b/app/buck2_error_tests/Cargo.toml new file mode 100644 index 0000000000000..63b3b7635732a --- /dev/null +++ b/app/buck2_error_tests/Cargo.toml @@ -0,0 +1,17 @@ +[package] +description = "Tests for buck2_error" +edition = "2021" +license = { workspace = true } +name = "buck2_error_tests" +repository = { workspace = true } +version = "0.1.0" + +[dev-dependencies] +anyhow = { workspace = true } +thiserror = { workspace = true } + +starlark = { workspace = true } +starlark_syntax = { workspace = true } + +buck2_error = { workspace = true } +buck2_util = { workspace = true } diff --git a/app/buck2_error_tests/src/conversion.rs b/app/buck2_error_tests/src/conversion.rs new file mode 100644 index 0000000000000..2adf4cdb702d2 --- /dev/null +++ b/app/buck2_error_tests/src/conversion.rs @@ -0,0 +1,142 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use anyhow::Context; +use buck2_error::starlark_error::from_starlark; +use buck2_util::golden_test_helper::golden_test_template; +use buck2_util::golden_test_helper::trim_rust_backtrace; +use starlark::assert::Assert; +use starlark::environment::GlobalsBuilder; +use starlark::starlark_module; +use starlark::values::none::NoneType; + +fn starlark_conversion_helper() -> starlark::Error { + fn fail1() -> anyhow::Result<()> { + Err(anyhow::anyhow!("fail 1")) + } + + fn fail2() -> anyhow::Result<()> { + fail1().context("fail 2") + } + + fn fail3() -> buck2_error::Result<()> { + fail2().map_err(|e| buck2_error::Error::from(e).context("rust failure")) + } + + #[starlark_module] + fn module(builder: &mut GlobalsBuilder) { + fn rust_failure() -> starlark::Result { + fail3()?; + Ok(NoneType) + } + } + + let mut a = Assert::new(); + a.globals_add(module); + + a.module( + "imported", + r#" +# blank lines to make line numbers bigger and more obvious +# +# +# +# +x = [] +def should_fail(): + rust_failure()"#, + ); + + a.fail( + r#" +load('imported', 'should_fail') +should_fail()"#, + "rust failure", + ) +} + +#[test] +fn test_format_starlark_stacktrace_with_later_context() { + let e = starlark_conversion_helper(); + let test_context = + from_starlark(e).context("Adding a context after should still keep backtrace on top"); + golden_test_template( + "src/golden/test_starlark_callstack_context.golden", + trim_rust_backtrace(&format!("{:?}", test_context)), + ); +} + +#[test] +fn test_starlark_multiple_stacktrace() { + #[starlark_module] + fn outer_module(builder: &mut GlobalsBuilder) { + fn outer_rust_failure() -> starlark::Result { + let e: buck2_error::Error = from_starlark(starlark_conversion_helper()); + Err(e.into()) + } + } + + let mut a = Assert::new(); + a.globals_add(outer_module); + + a.module( + "outer_import", + r#" +x = [] +def outer_fail(): + outer_rust_failure()"#, + ); + + let e = a.fail( + r#" +load('outer_import', 'outer_fail') +outer_fail()"#, + "rust failure", + ); + + golden_test_template( + "src/golden/test_starlark_callstack_backtrace.golden", + trim_rust_backtrace(&format!("{:?}", from_starlark(e))), + ); +} + +#[test] +fn test_starlark_multiple_stacktrace_with_context_inbetween() { + #[starlark_module] + fn outer_module(builder: &mut GlobalsBuilder) { + fn outer_rust_failure() -> starlark::Result { + let e: buck2_error::Error = from_starlark(starlark_conversion_helper()); + let e = e.context("Adding a context in between backtraces"); + Err(e.into()) + } + } + + let mut a = Assert::new(); + a.globals_add(outer_module); + + a.module( + "outer_import", + r#" +x = [] +def outer_fail(): + outer_rust_failure()"#, + ); + + let e = a.fail( + r#" +load('outer_import', 'outer_fail') +outer_fail()"#, + "Adding a context in between backtraces", + ); + + golden_test_template( + "src/golden/test_starlark_callstack_backtrace_with_context_inbetween.golden", + trim_rust_backtrace(&format!("{:?}", from_starlark(e))), + ); +} diff --git a/app/buck2_error_tests/src/format.rs b/app/buck2_error_tests/src/format.rs new file mode 100644 index 0000000000000..136a3804674cc --- /dev/null +++ b/app/buck2_error_tests/src/format.rs @@ -0,0 +1,86 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_util::golden_test_helper::trim_rust_backtrace; + +#[derive(Debug, thiserror::Error)] +#[error("test error")] +struct TestError; + +fn assert_eq_no_backtrace, U: AsRef>(a: T, b: U) { + assert_eq!( + trim_rust_backtrace(a.as_ref()), + trim_rust_backtrace(b.as_ref()) + ); +} + +#[test] +fn test_shows_context() { + let e = buck2_error::Error::from(TestError) + .context("context 1") + .context("context 2"); + assert_eq_no_backtrace( + format!("{:?}", e), + r#"context 2 + +Caused by: + 0: context 1 + 1: test error"#, + ); + assert_eq_no_backtrace(format!("{:#}", e), r#"context 2: context 1: test error"#); +} + +#[test] +fn test_shows_anyhow_context() { + // This context can't be understood by `buck2_error` + let e = anyhow::Error::from(TestError).context("context 1"); + let e = buck2_error::Error::from(e).context("context 2"); + assert_eq_no_backtrace( + format!("{:?}", e), + r#"context 2 + +Caused by: + 0: context 1 + 1: test error"#, + ); +} + +#[test] +fn test_after_anyhow_conversion() { + let e = buck2_error::Error::from(TestError).context("context"); + let e2 = anyhow::Error::from(e.clone()); + assert_eq_no_backtrace(format!("{}", e), format!("{}", e2)); + assert_eq_no_backtrace(format!("{:?}", e), format!("{:?}", e2)); + assert_eq_no_backtrace(format!("{:#}", e), format!("{:#}", e2)); + + let e3 = buck2_error::Error::from(e2); + assert_eq_no_backtrace(format!("{}", e), format!("{}", e3)); + assert_eq_no_backtrace(format!("{:?}", e), format!("{:?}", e3)); + assert_eq_no_backtrace(format!("{:#}", e), format!("{:#}", e3)); +} + +#[test] +fn test_with_context_from_source() { + #[derive(buck2_error::Error, Debug)] + #[error("with source")] + struct E(#[source] TestError); + + let e = buck2_error::Error::from(E(TestError)).context("context"); + + assert_eq_no_backtrace( + format!("{:?}", e), + r#"context + +Caused by: + 0: with source + 1: test error"#, + ); + assert_eq_no_backtrace(format!("{:#}", e), r#"context: with source: test error"#); + assert_eq_no_backtrace(format!("{}", e), r#"context"#); +} diff --git a/app/buck2_error_tests/src/golden/test_starlark_callstack_backtrace.golden b/app/buck2_error_tests/src/golden/test_starlark_callstack_backtrace.golden new file mode 100644 index 0000000000000..dc71bcf2624c9 --- /dev/null +++ b/app/buck2_error_tests/src/golden/test_starlark_callstack_backtrace.golden @@ -0,0 +1,25 @@ +# @generated +# To regenerate, append -- --env BUCK2_RUST_REGENERATE_GOLDEN_TESTS=1 and re-run the test + +Traceback (most recent call last): + * assert.bzl:3, in + outer_fail() + * outer_import.bzl:4, in outer_fail + outer_rust_failure() + + * assert.bzl:3, in + should_fail() + * imported.bzl:9, in should_fail + rust_failure() + +error: rust failure + --> imported.bzl:9:6 + | +9 | rust_failure() + | ^^^^^^^^^^^^^^ + | + +Caused by: + 0: rust failure + 1: fail 2 + 2: fail 1 diff --git a/app/buck2_error_tests/src/golden/test_starlark_callstack_backtrace_with_context_inbetween.golden b/app/buck2_error_tests/src/golden/test_starlark_callstack_backtrace_with_context_inbetween.golden new file mode 100644 index 0000000000000..eeac2f4209c44 --- /dev/null +++ b/app/buck2_error_tests/src/golden/test_starlark_callstack_backtrace_with_context_inbetween.golden @@ -0,0 +1,33 @@ +# @generated +# To regenerate, append -- --env BUCK2_RUST_REGENERATE_GOLDEN_TESTS=1 and re-run the test + +Traceback (most recent call last): + * assert.bzl:3, in + outer_fail() + * outer_import.bzl:4, in outer_fail + outer_rust_failure() + +error: Adding a context in between backtraces + --> outer_import.bzl:4:6 + | +4 | outer_rust_failure() + | ^^^^^^^^^^^^^^^^^^^^ + | + +Caused by: + 0: Adding a context in between backtraces + 1: Traceback (most recent call last): + * assert.bzl:3, in + should_fail() + * imported.bzl:9, in should_fail + rust_failure() + + error: rust failure + --> imported.bzl:9:6 + | + 9 | rust_failure() + | ^^^^^^^^^^^^^^ + | + 2: rust failure + 3: fail 2 + 4: fail 1 diff --git a/app/buck2_error_tests/src/golden/test_starlark_callstack_context.golden b/app/buck2_error_tests/src/golden/test_starlark_callstack_context.golden new file mode 100644 index 0000000000000..06348e3c6dfe0 --- /dev/null +++ b/app/buck2_error_tests/src/golden/test_starlark_callstack_context.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, append -- --env BUCK2_RUST_REGENERATE_GOLDEN_TESTS=1 and re-run the test + +Adding a context after should still keep backtrace on top + +Caused by: + 0: Traceback (most recent call last): + * assert.bzl:3, in + should_fail() + * imported.bzl:9, in should_fail + rust_failure() + + error: rust failure + --> imported.bzl:9:6 + | + 9 | rust_failure() + | ^^^^^^^^^^^^^^ + | + 1: rust failure + 2: fail 2 + 3: fail 1 diff --git a/app/buck2_error_tests/src/lib.rs b/app/buck2_error_tests/src/lib.rs new file mode 100644 index 0000000000000..a62fa1eef3a54 --- /dev/null +++ b/app/buck2_error_tests/src/lib.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(test)] +#![feature(error_generic_member_access)] + +mod conversion; +mod format; diff --git a/app/buck2_event_log/BUCK b/app/buck2_event_log/BUCK new file mode 100644 index 0000000000000..da992a72ed71d --- /dev/null +++ b/app/buck2_event_log/BUCK @@ -0,0 +1,49 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_event_log", + srcs = glob(["src/**/*.rs"]), + os_deps = [ + ( + "windows", + [ + "fbsource//third-party/rust:winapi", + ], + ), + ], + test_deps = [ + "fbsource//third-party/rust:maplit", + "fbsource//third-party/rust:tempfile", + ], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-compression", + "fbsource//third-party/rust:chrono", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:itertools", + "fbsource//third-party/rust:pin-project", + "fbsource//third-party/rust:prost", + "fbsource//third-party/rust:regex", + "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:serde_json", + "fbsource//third-party/rust:shlex", + "fbsource//third-party/rust:thiserror", + "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tokio-stream", + "fbsource//third-party/rust:tokio-util", + "fbsource//third-party/rust:tracing", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_event_observer:buck2_event_observer", + "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_util:buck2_util", + "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", + "//buck2/gazebo/dupe:dupe", + "//buck2/gazebo/gazebo:gazebo", + ], +) diff --git a/app/buck2_event_log/Cargo.toml b/app/buck2_event_log/Cargo.toml new file mode 100644 index 0000000000000..254b9c451fe69 --- /dev/null +++ b/app/buck2_event_log/Cargo.toml @@ -0,0 +1,44 @@ +[package] +description = "Code supporting buck2 event log handling" +edition = "2021" +license = { workspace = true } +name = "buck2_event_log" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +allocative = { workspace = true } +anyhow = { workspace = true } +async-compression = { workspace = true } +chrono = { workspace = true } +dupe = { workspace = true } +futures = { workspace = true } +gazebo = { workspace = true } +itertools = { workspace = true } +pin-project = { workspace = true } +prost = { workspace = true } +regex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +shlex = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tokio-util = { workspace = true } +tracing = { workspace = true } + +buck2_cli_proto = { workspace = true } +buck2_common = { workspace = true } +buck2_core = { workspace = true } +buck2_data = { workspace = true } +buck2_event_observer = { workspace = true } +buck2_events = { workspace = true } +buck2_util = { workspace = true } +buck2_wrapper_common = { workspace = true } + +[dev-dependencies] +maplit = { workspace = true } +tempfile = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(tokio_unstable)"] } diff --git a/app/buck2_client_ctx/src/subscribers/event_log/file_names.rs b/app/buck2_event_log/src/file_names.rs similarity index 88% rename from app/buck2_client_ctx/src/subscribers/event_log/file_names.rs rename to app/buck2_event_log/src/file_names.rs index 89462ef152414..32eef1961e8e3 100644 --- a/app/buck2_client_ctx/src/subscribers/event_log/file_names.rs +++ b/app/buck2_event_log/src/file_names.rs @@ -8,6 +8,7 @@ */ use anyhow::Context; +use buck2_common::invocation_paths::InvocationPaths; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; @@ -19,10 +20,9 @@ use chrono::Utc; use futures::StreamExt; use gazebo::prelude::VecExt; -use crate::client_ctx::ClientCommandContext; -use crate::subscribers::event_log::read::EventLogPathBuf; -use crate::subscribers::event_log::utils::Encoding; -use crate::subscribers::event_log::utils::EventLogErrors; +use crate::read::EventLogPathBuf; +use crate::utils::Encoding; +use crate::utils::EventLogErrors; pub(crate) fn get_logfile_name( event: &BuckEvent, @@ -48,7 +48,7 @@ pub(crate) async fn remove_old_logs(logdir: &AbsNormPath) { if let Ok(logfiles) = get_files_in_log_dir(logdir) { futures::stream::iter(logfiles.into_iter().rev().skip(N_LOGS_RETAINED - 1)) - .then(async move |file| { + .then(|file| async move { // The oldest logs might be open from another concurrent build, so suppress error. tokio::fs::remove_file(file).await.ok() }) @@ -107,10 +107,10 @@ pub fn do_find_log_by_trace_id( } pub fn retrieve_nth_recent_log( - ctx: &ClientCommandContext, + paths: &InvocationPaths, n: usize, ) -> anyhow::Result { - let log_dir = ctx.paths().context("Error identifying log dir")?.log_dir(); + let log_dir = paths.log_dir(); let mut logfiles = get_local_logs(&log_dir)?; logfiles.reverse(); // newest first let chosen = logfiles @@ -123,7 +123,7 @@ pub fn retrieve_nth_recent_log( Ok(chosen.clone()) } -pub fn retrieve_all_logs(ctx: &ClientCommandContext) -> anyhow::Result> { - let log_dir = ctx.paths().context("Error identifying log dir")?.log_dir(); +pub fn retrieve_all_logs(paths: &InvocationPaths) -> anyhow::Result> { + let log_dir = paths.log_dir(); get_local_logs(&log_dir) } diff --git a/app/buck2_event_log/src/lib.rs b/app/buck2_event_log/src/lib.rs new file mode 100644 index 0000000000000..f94b503c62ba5 --- /dev/null +++ b/app/buck2_event_log/src/lib.rs @@ -0,0 +1,89 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(used_with_arg)] + +use std::io; +use std::process; +use std::time::Duration; + +use anyhow::Context as _; +use buck2_core::buck2_env_anyhow; +use buck2_core::ci::is_ci; +use tokio::process::Child; +use tokio::task::JoinHandle; + +pub mod file_names; +pub mod read; +pub mod stream_value; +pub mod ttl; +pub mod user_event_types; +pub mod utils; +pub mod write; +pub mod writer; + +pub fn should_upload_log() -> anyhow::Result { + if buck2_core::is_open_source() { + return Ok(false); + } + Ok(!buck2_env_anyhow!( + "BUCK2_TEST_DISABLE_LOG_UPLOAD", + bool, + applicability = testing + )?) +} + +pub fn should_block_on_log_upload() -> anyhow::Result { + // `BUCK2_TEST_BLOCK_ON_UPLOAD` is used by our tests. + Ok( + is_ci()? + || buck2_env_anyhow!("BUCK2_TEST_BLOCK_ON_UPLOAD", bool, applicability = internal)?, + ) +} + +/// Wait for the child to finish. Assume its stderr was piped. +pub async fn wait_for_child_and_log(child: FutureChildOutput, reason: &str) { + async fn inner(child: FutureChildOutput) -> anyhow::Result<()> { + let res = tokio::time::timeout(Duration::from_secs(20), child.task) + .await + .context("Timed out")? + .context("Task failed")? + .context("Process failed")?; + + if !res.status.success() { + let stderr = String::from_utf8_lossy(&res.stderr); + return Err(anyhow::anyhow!( + "Upload exited with status `{}`. Stderr: `{}`", + res.status, + stderr.trim(), + )); + }; + Ok(()) + } + + match inner(child).await { + Ok(_) => {} + Err(e) => { + tracing::warn!("Error uploading {}: {:#}", reason, e); + } + } +} + +/// Ensure that if we spawn children, we don't block their stderr. +pub struct FutureChildOutput { + task: JoinHandle>, +} + +impl FutureChildOutput { + pub fn new(child: Child) -> Self { + Self { + task: tokio::task::spawn(async move { child.wait_with_output().await }), + } + } +} diff --git a/app/buck2_client_ctx/src/subscribers/event_log/read.rs b/app/buck2_event_log/src/read.rs similarity index 91% rename from app/buck2_client_ctx/src/subscribers/event_log/read.rs rename to app/buck2_event_log/src/read.rs index dcacc78572154..2397e33fb9244 100644 --- a/app/buck2_client_ctx/src/subscribers/event_log/read.rs +++ b/app/buck2_event_log/src/read.rs @@ -41,14 +41,13 @@ use tokio_stream::wrappers::LinesStream; use tokio_util::codec::FramedRead; use crate::stream_value::StreamValue; -use crate::subscribers::event_log::utils::Compression; -use crate::subscribers::event_log::utils::Encoding; -use crate::subscribers::event_log::utils::EventLogErrors; -use crate::subscribers::event_log::utils::EventLogInferenceError; -use crate::subscribers::event_log::utils::Invocation; -use crate::subscribers::event_log::utils::LogMode; -use crate::subscribers::event_log::utils::NoInference; -use crate::subscribers::event_log::utils::KNOWN_ENCODINGS; +use crate::utils::Compression; +use crate::utils::Encoding; +use crate::utils::EventLogErrors; +use crate::utils::EventLogInferenceError; +use crate::utils::Invocation; +use crate::utils::LogMode; +use crate::utils::KNOWN_ENCODINGS; type EventLogReader<'a> = Box; @@ -130,8 +129,10 @@ pub struct EventLogSummary { impl EventLogPathBuf { pub fn infer(path: AbsPathBuf) -> anyhow::Result { - Self::infer_opt(path)? - .map_err(|NoInference(path)| EventLogInferenceError::InvalidExtension(path).into()) + match Self::infer_opt(&path)? { + Some(v) => Ok(v), + None => Err(EventLogInferenceError::InvalidExtension(path).into()), + } } pub fn path(&self) -> &AbsPath { @@ -159,21 +160,31 @@ impl EventLogPathBuf { TraceId::from_str(uuid).context("Failed to create TraceId from uuid") } - pub(crate) fn infer_opt(path: AbsPathBuf) -> anyhow::Result> { - let name = Self::file_name(&path)?; + // TODO iguridi: this should be done by parsing file header + pub fn command_from_filename(&self) -> anyhow::Result<&str> { + let file_name = Self::file_name(&self.path)?; + // format is of the form "{ts}_{command}_{uuid}_events{ext}" + match file_name.split('_').nth(1) { + Some(command) => Ok(command), + None => Err(anyhow::anyhow!("No command in filename")), + } + } + + pub(crate) fn infer_opt(path: &AbsPathBuf) -> anyhow::Result> { + let name = Self::file_name(path)?; for encoding in KNOWN_ENCODINGS { for extension in encoding.extensions { if name.ends_with(extension) { - return Ok(Ok(Self { - path, + return Ok(Some(Self { + path: path.clone(), encoding: *encoding, })); } } } - Ok(Err(NoInference(path))) + Ok(None) } async fn unpack_stream_json<'a>( @@ -350,7 +361,7 @@ mod tests { use buck2_events::span::SpanId; use super::*; - use crate::subscribers::event_log::file_names::get_logfile_name; + use crate::file_names::get_logfile_name; #[test] fn test_get_uuid_from_logfile_name() -> anyhow::Result<()> { @@ -373,8 +384,8 @@ mod tests { let event = BuckEvent::new( SystemTime::now(), TraceId::from_str("7b797fa8-62f1-4123-85f9-875cd74b0a63")?, - Some(SpanId::new()), - Some(SpanId::new()), + Some(SpanId::next()), + Some(SpanId::next()), SpanStartEvent { data: Some( CommandStart { diff --git a/app/buck2_client_ctx/src/stream_value.rs b/app/buck2_event_log/src/stream_value.rs similarity index 100% rename from app/buck2_client_ctx/src/stream_value.rs rename to app/buck2_event_log/src/stream_value.rs diff --git a/app/buck2_event_log/src/ttl.rs b/app/buck2_event_log/src/ttl.rs new file mode 100644 index 0000000000000..fa113db4914d9 --- /dev/null +++ b/app/buck2_event_log/src/ttl.rs @@ -0,0 +1,120 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_common::manifold::Ttl; +use buck2_core::buck2_env_anyhow; +use buck2_events::metadata::username; +use buck2_events::schedule_type::ScheduleType; + +// Copied from "Is User Command" from scuba buck2_builds +const ROBOTS: &[&str] = &[ + "twsvcscm", + "svcscm", + "facebook", + "root", + "svc-si_admin", + "svc-fbsi_datamgr", +]; + +const USER_TTL_DAYS: u64 = 365; +const DEFAULT_TTL_DAYS: u64 = 60; +// diff signal retention is 4 weeks +const CI_EXCEPT_CONTINUOUS_TTL_DAYS: u64 = 28; + +pub fn manifold_event_log_ttl() -> anyhow::Result { + manifold_event_log_ttl_impl(ROBOTS, username().ok().flatten(), ScheduleType::new()?) +} + +fn manifold_event_log_ttl_impl( + robots: &[&str], + username: Option, + schedule_type: ScheduleType, +) -> anyhow::Result { + // 1. return if this is a test + let env = buck2_env_anyhow!("BUCK2_TEST_MANIFOLD_TTL_S", type=u64, applicability=testing)?; + if let Some(env) = env { + return Ok::(Ttl::from_secs(env)); + } + + // 2. return if this is a user + if let Some(username) = username { + if !robots.contains(&(username.as_str())) { + return Ok::(Ttl::from_days(USER_TTL_DAYS)); + } + } + + // 3. return if it's not continuous + if schedule_type.is_some() && !schedule_type.is_continuous() { + return Ok(Ttl::from_days(CI_EXCEPT_CONTINUOUS_TTL_DAYS)); + } + + // 4. use default + Ok::(Ttl::from_days(DEFAULT_TTL_DAYS)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_is_a_user() -> anyhow::Result<()> { + assert_eq!( + manifold_event_log_ttl_impl( + &["twsvcscm"], + Some("random_person".to_owned()), + ScheduleType::testing_new("continuous") + )? + .as_secs(), + 365 * 24 * 60 * 60, + ); + Ok(()) + } + + #[test] + fn test_not_a_user() -> anyhow::Result<()> { + assert_eq!( + manifold_event_log_ttl_impl( + &["twsvcscm"], + Some("twsvcscm".to_owned()), + ScheduleType::testing_empty() + )? + .as_secs(), + 60 * 24 * 60 * 60, + ); + Ok(()) + } + + #[test] + fn test_not_a_user_and_not_continuous() -> anyhow::Result<()> { + assert_eq!( + manifold_event_log_ttl_impl( + &["twsvcscm"], + Some("twsvcscm".to_owned()), + ScheduleType::testing_new("foo") + )? + .as_secs(), + 28 * 24 * 60 * 60, + ); + Ok(()) + } + + #[test] + fn test_not_a_user_and_continuous() -> anyhow::Result<()> { + assert_eq!( + manifold_event_log_ttl_impl( + &["twsvcscm"], + Some("twsvcscm".to_owned()), + ScheduleType::testing_new("continuous") + )? + .as_secs(), + 60 * 24 * 60 * 60, + ); + Ok(()) + } +} diff --git a/app/buck2_client_ctx/src/subscribers/event_log/user_event_types.rs b/app/buck2_event_log/src/user_event_types.rs similarity index 100% rename from app/buck2_client_ctx/src/subscribers/event_log/user_event_types.rs rename to app/buck2_event_log/src/user_event_types.rs diff --git a/app/buck2_event_log/src/utils.rs b/app/buck2_event_log/src/utils.rs new file mode 100644 index 0000000000000..1c92c7d9a8424 --- /dev/null +++ b/app/buck2_event_log/src/utils.rs @@ -0,0 +1,186 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use anyhow::Context; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_wrapper_common::invocation_id::TraceId; +use dupe::Dupe; +use itertools::Itertools; +use serde::Deserialize; +use serde::Serialize; +use thiserror::Error; + +#[derive(Error, Debug)] +pub(crate) enum EventLogErrors { + #[error( + "Trying to write to logfile that hasn't been opened yet - this is an internal error, please report. Unwritten event: {serialized_event}" + )] + LogNotOpen { serialized_event: String }, + + #[error("Reached End of File before reading BuckEvent in log `{0}`")] + EndOfFile(String), + #[error("No event log available for {idx}th last command (have latest {num_logfiles})")] + RecentIndexOutOfBounds { idx: usize, num_logfiles: usize }, +} + +#[derive(Copy, Clone, Dupe, Debug)] +pub struct Encoding { + pub(crate) mode: LogMode, + pub(crate) compression: Compression, + /// List of extensions used to detect file type. + /// + /// The first extension is the default one, used when writing a file. + pub extensions: &'static [&'static str], +} + +impl Encoding { + pub(crate) const JSON: Encoding = Encoding { + mode: LogMode::Json, + compression: Compression::None, + extensions: &[".json-lines"], + }; + + pub(crate) const JSON_GZIP: Encoding = Encoding { + mode: LogMode::Json, + compression: Compression::Gzip, + extensions: &[".json-lines.gz"], + }; + + pub(crate) const JSON_ZSTD: Encoding = Encoding { + mode: LogMode::Json, + compression: Compression::Zstd, + extensions: &[".json-lines.zst"], + }; + + pub(crate) const PROTO: Encoding = Encoding { + mode: LogMode::Protobuf, + compression: Compression::None, + extensions: &[".pb", ".proto"], + }; + + pub(crate) const PROTO_GZIP: Encoding = Encoding { + mode: LogMode::Protobuf, + compression: Compression::Gzip, + extensions: &[".pb.gz", ".proto.gz"], + }; + + pub const PROTO_ZSTD: Encoding = Encoding { + mode: LogMode::Protobuf, + compression: Compression::Zstd, + extensions: &[".pb.zst"], + }; +} + +pub(crate) const KNOWN_ENCODINGS: &[Encoding] = &[ + // Don't forget to update these lists when this is updated: + // * https://fburl.com/code/zgdxtryb + // * https://fburl.com/code/antguytj + Encoding::JSON_GZIP, + Encoding::JSON, + Encoding::JSON_ZSTD, + Encoding::PROTO, + Encoding::PROTO_GZIP, + Encoding::PROTO_ZSTD, +]; + +#[derive(Error, Debug)] +pub(crate) enum EventLogInferenceError { + #[error("Event log at path {} has no filename", .0.display())] + NoFilename(AbsPathBuf), + + #[error("Event log at path {} has a non-utf-8 filename", .0.display())] + InvalidFilename(AbsPathBuf), + + #[error( + "Event log at path {} has an extension that was not recognized. Valid extensions are: {}.", + .0.display(), display_valid_extensions() + )] + InvalidExtension(AbsPathBuf), + + #[error("Event log at path {} has no uuid in its filename", .0.display())] + NoUuidInFilename(AbsPathBuf), +} + +fn display_valid_extensions() -> String { + let mut exts = KNOWN_ENCODINGS + .iter() + .flat_map(|encoding| encoding.extensions); + exts.join(", ") +} + +#[derive(Copy, Clone, Dupe, Debug, PartialEq, Eq)] +pub(crate) enum LogMode { + Json, + Protobuf, +} + +#[derive(Copy, Clone, Dupe, Debug)] +pub(crate) enum Compression { + None, + Gzip, + Zstd, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +pub struct Invocation { + pub command_line_args: Vec, + /// Command line args with expanded `@` args. + #[serde(default)] // For backwards compatibility. Delete after 2023-08-01. + pub expanded_command_line_args: Vec, + /// This is `String` not `AbsPathBuf` because event log is cross-platform + /// and `AbsPathBuf` is not. + pub working_dir: String, + #[serde(default = "TraceId::null")] + pub trace_id: TraceId, +} + +impl Invocation { + pub fn display_command_line(&self) -> String { + shlex::try_join(self.command_line_args.iter().map(|e| e.as_str())) + .expect("Null byte unexpected") + } + + pub fn display_expanded_command_line(&self) -> String { + shlex::try_join(self.expanded_command_line_args.iter().map(|e| e.as_str())) + .expect("Null byte unexpected") + } + + pub(crate) fn parse_json_line(json: &str) -> anyhow::Result { + serde_json::from_str::(json) + .with_context(|| format!("Invalid header: {}", json.trim_end())) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use buck2_wrapper_common::invocation_id::TraceId; + + use crate::utils::Invocation; + + #[test] + fn test_parse_json_line() { + // Make sure serialization format is backwards compatible. + let line = r#"{"command_line_args":["/some/path/buck2","test","@//mode/mac","app/..."],"working_dir":"/Users/nga/dir45","trace_id":"281d1c16-8930-40cd-8fc1-7d71355c20f5"}"#; + let line = Invocation::parse_json_line(line).unwrap(); + let expected = Invocation { + command_line_args: vec![ + "/some/path/buck2".to_owned(), + "test".to_owned(), + "@//mode/mac".to_owned(), + "app/...".to_owned(), + ], + working_dir: "/Users/nga/dir45".to_owned(), + expanded_command_line_args: Vec::new(), + trace_id: TraceId::from_str("281d1c16-8930-40cd-8fc1-7d71355c20f5").unwrap(), + }; + assert_eq!(expected, line); + } +} diff --git a/app/buck2_event_log/src/write.rs b/app/buck2_event_log/src/write.rs new file mode 100644 index 0000000000000..cdca9baa7b474 --- /dev/null +++ b/app/buck2_event_log/src/write.rs @@ -0,0 +1,625 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::mem; +use std::process::Stdio; +use std::sync::atomic::AtomicU64; +use std::sync::Arc; + +use anyhow::Context as _; +use buck2_cli_proto::*; +use buck2_common::argv::SanitizedArgv; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::working_dir::WorkingDir; +use buck2_events::BuckEvent; +use buck2_wrapper_common::invocation_id::TraceId; +use futures::future::Future; +use prost::Message; +use serde::Serialize; +use tokio::fs::OpenOptions; + +use crate::file_names::get_logfile_name; +use crate::file_names::remove_old_logs; +use crate::read::EventLogPathBuf; +use crate::should_block_on_log_upload; +use crate::should_upload_log; +use crate::user_event_types::try_get_user_event; +use crate::utils::Encoding; +use crate::utils::EventLogErrors; +use crate::utils::Invocation; +use crate::wait_for_child_and_log; +use crate::writer::EventLogType; +use crate::writer::NamedEventLogWriter; +use crate::writer::SerializeForLog; +use crate::FutureChildOutput; + +enum LogWriterState { + Unopened { + logdir: AbsNormPathBuf, + extra_path: Option, + extra_user_event_log_path: Option, + }, + Opened { + writers: Vec, + }, + Closed, +} + +pub struct WriteEventLog { + state: LogWriterState, + sanitized_argv: SanitizedArgv, + command_name: String, + working_dir: WorkingDir, + /// Allocation cache. Must be cleaned before use. + buf: Vec, + log_size_counter_bytes: Option>, +} + +impl WriteEventLog { + pub fn new( + logdir: AbsNormPathBuf, + working_dir: WorkingDir, + extra_path: Option, + extra_user_event_log_path: Option, + sanitized_argv: SanitizedArgv, + command_name: String, + log_size_counter_bytes: Option>, + ) -> anyhow::Result { + Ok(Self { + state: LogWriterState::Unopened { + logdir, + extra_path, + extra_user_event_log_path, + }, + sanitized_argv, + command_name, + working_dir, + buf: Vec::new(), + log_size_counter_bytes, + }) + } + + /// Get the command line arguments and cwd and serialize them for replaying later. + async fn log_invocation(&mut self, trace_id: TraceId) -> anyhow::Result<()> { + let command_line_args = self.sanitized_argv.argv.clone(); + let expanded_command_line_args = self.sanitized_argv.expanded_argv.clone(); + let invocation = Invocation { + command_line_args, + expanded_command_line_args, + working_dir: self.working_dir.to_string(), + trace_id, + }; + self.write_ln(&[invocation]).await + } + + async fn write_ln<'b, T, I>(&'b mut self, events: I) -> anyhow::Result<()> + where + T: SerializeForLog + 'b, + I: IntoIterator + Clone + 'b, + { + match &mut self.state { + LogWriterState::Opened { writers, .. } => { + for writer in writers { + self.buf.clear(); + + writer.write_events(&mut self.buf, &events).await?; + + if self.buf.len() > 1_000_000 { + // Make sure we don't keep too much memory if encountered one large event. + self.buf = Vec::new(); + } + } + Ok(()) + } + LogWriterState::Unopened { .. } | LogWriterState::Closed => { + self.buf.clear(); + if let Some(event) = events.into_iter().next() { + event.serialize_to_json(&mut self.buf)?; + } else { + // Unreachable. + } + Err(EventLogErrors::LogNotOpen { + serialized_event: String::from_utf8(mem::take(&mut self.buf)) + .context("Failed to serialize event for debug")?, + } + .into()) + } + } + } + + async fn ensure_log_writers_opened(&mut self, event: &BuckEvent) -> anyhow::Result<()> { + let (logdir, maybe_extra_path, maybe_extra_user_event_log_path) = match &self.state { + LogWriterState::Unopened { + logdir, + extra_path, + extra_user_event_log_path, + } => (logdir, extra_path, extra_user_event_log_path), + LogWriterState::Opened { .. } => return Ok(()), + LogWriterState::Closed => { + return Err(anyhow::anyhow!("Received events after logs were closed")); + } + }; + tokio::fs::create_dir_all(logdir) + .await + .with_context(|| format!("Error creating event log directory: `{}`", logdir))?; + remove_old_logs(logdir).await; + + let encoding = Encoding::PROTO_ZSTD; + let file_name = &get_logfile_name(event, encoding, &self.command_name)?; + let path = EventLogPathBuf { + path: logdir.as_abs_path().join(file_name), + encoding, + }; + let writer = start_persist_event_log_subprocess( + path, + event.trace_id()?.clone(), + self.log_size_counter_bytes.clone(), + ) + .await?; + let mut writers = vec![writer]; + + // Also open the user's log file, if any as provided, with no encoding. + if let Some(extra_path) = maybe_extra_path { + writers.push( + open_event_log_for_writing( + EventLogPathBuf::infer_opt(&extra_path)?.unwrap_or_else(|| EventLogPathBuf { + path: extra_path.clone(), + encoding: Encoding::JSON_GZIP, + }), + self.log_size_counter_bytes.clone(), + EventLogType::System, + ) + .await?, + ); + } + + // Also open the user's simple log file, if any as provided, json-line formatted with no compression if no extensions are detected. + if let Some(extra_user_event_log_path) = maybe_extra_user_event_log_path { + writers.push( + open_event_log_for_writing( + EventLogPathBuf::infer_opt(&extra_user_event_log_path)?.unwrap_or_else(|| { + EventLogPathBuf { + path: extra_user_event_log_path.clone(), + encoding: Encoding::JSON, + } + }), + self.log_size_counter_bytes.clone(), + EventLogType::User, + ) + .await?, + ); + } + + self.state = LogWriterState::Opened { writers }; + self.log_invocation(event.trace_id()?).await + } + + pub fn exit(&mut self) -> impl Future + 'static + Send + Sync { + // Shut down writers, flush all our files before exiting. + let state = std::mem::replace(&mut self.state, LogWriterState::Closed); + + async move { + let mut writers = match state { + LogWriterState::Opened { writers } => writers, + LogWriterState::Unopened { .. } | LogWriterState::Closed => { + // Nothing to do in this case, though this should be unreachable + // since we just did a write_ln. + return; + } + }; + + for writer in writers.iter_mut() { + writer.shutdown().await + } + + // NOTE: We call `into_iter()` here and that implicitly drops the `writer.file`, which + // is necessary for an actual `close` call to be send to the child FD (it is a bit of + // an odd behavior in Tokio that `shutdown` doesn't do that). + let futs = writers + .into_iter() + .filter_map(|w| w.child()) + .map(|proc| wait_for_child_and_log(proc, "Event Log")); + + futures::future::join_all(futs).await; + } + } +} + +async fn start_persist_event_log_subprocess( + path: EventLogPathBuf, + trace_id: TraceId, + bytes_written: Option>, +) -> anyhow::Result { + let current_exe = std::env::current_exe().context("No current_exe")?; + let mut command = buck2_util::process::async_background_command(current_exe); + // @oss-disable: #[cfg(unix)] + #[cfg(all(tokio_unstable, unix))] // @oss-enable + { + // Ensure that if we get CTRL-C, the persist-event-logs process does not get it. + command.process_group(0); + } + let manifold_name = &format!("{}{}", trace_id, path.extension()); + // TODO T184566736: detach subprocess + command + .args(["debug", "persist-event-logs"]) + .args(["--manifold-name", manifold_name]) + .args(["--local-path".as_ref(), path.path.as_os_str()]) + .args(["--trace-id", &trace_id.to_string()]); + if !should_upload_log()? { + command.arg("--no-upload"); + }; + command.stdout(Stdio::null()).stdin(Stdio::piped()); + + let block = should_block_on_log_upload()?; + if block { + command.stderr(Stdio::piped()); + } else { + command.stderr(Stdio::null()); + } + + let mut child = command.spawn().with_context(|| { + format!( + "Failed to open event log subprocess for writing at `{}`", + path.path.display() + ) + })?; + let pipe = child.stdin.take().expect("stdin was piped"); + + // Only spawn this if we are going to wait. + let process_to_wait_for = if block { + Some(FutureChildOutput::new(child)) + } else { + None + }; + + Ok(NamedEventLogWriter::new( + path, + pipe, + bytes_written, + EventLogType::System, + process_to_wait_for, + )) +} + +async fn open_event_log_for_writing( + path: EventLogPathBuf, + bytes_written: Option>, + event_log_type: EventLogType, +) -> anyhow::Result { + let file = OpenOptions::new() + .create(true) + .append(true) + .open(&path.path) + .await + .with_context(|| { + format!( + "Failed to open event log for writing at `{}`", + path.path.display() + ) + })?; + + Ok(NamedEventLogWriter::new( + path, + file, + bytes_written, + event_log_type, + None, + )) +} + +impl WriteEventLog { + pub async fn write_events(&mut self, events: &[Arc]) -> anyhow::Result<()> { + let mut event_refs = Vec::new(); + let mut first = true; + for event in events { + if first { + self.ensure_log_writers_opened(event).await?; + first = false; + } + + event_refs.push(StreamValueForWrite::Event(event.event())); + } + + if event_refs.is_empty() { + return Ok(()); + } + + self.write_ln(&event_refs).await + } + + pub async fn write_result( + &mut self, + result: &buck2_cli_proto::CommandResult, + ) -> anyhow::Result<()> { + match &self.state { + LogWriterState::Opened { .. } | LogWriterState::Closed => {} + LogWriterState::Unopened { .. } => { + // This is a bit wonky. We can receive a CommandResult before we opened log files + // if the command crashed before it started. That can happen if the daemon + // initialization is what fails, since we need the daemon to initialize in order to + // access request metadata, which we need for the command start event. To keep + // things simple, just tolerate this happening. + return Ok(()); + } + } + + let event = StreamValueForWrite::Result(result); + + self.write_ln(&[event]).await + } + + pub async fn flush_files(&mut self) -> anyhow::Result<()> { + let writers = match &mut self.state { + LogWriterState::Opened { writers } => writers, + LogWriterState::Unopened { .. } | LogWriterState::Closed => return Ok(()), + }; + + for writer in writers { + writer.flush().await?; + } + + Ok(()) + } +} + +impl SerializeForLog for Invocation { + fn serialize_to_json(&self, buf: &mut Vec) -> anyhow::Result<()> { + serde_json::to_writer(buf, &self).context("Failed to serialize event") + } + + fn serialize_to_protobuf_length_delimited(&self, buf: &mut Vec) -> anyhow::Result<()> { + let invocation = buck2_data::Invocation { + command_line_args: self.command_line_args.clone(), + expanded_command_line_args: self.expanded_command_line_args.clone(), + working_dir: self.working_dir.clone(), + trace_id: Some(self.trace_id.to_string()), + }; + invocation.encode_length_delimited(buf)?; + Ok(()) + } + + // Always log invocation record to user event log for `buck2 log show` compatibility + fn maybe_serialize_user_event(&self, buf: &mut Vec) -> anyhow::Result { + serde_json::to_writer(buf, &self).context("Failed to serialize event")?; + Ok(true) + } +} + +#[derive(Serialize)] +pub enum StreamValueForWrite<'a> { + Result(&'a CommandResult), + Event(&'a buck2_data::BuckEvent), +} + +impl<'a> SerializeForLog for StreamValueForWrite<'a> { + fn serialize_to_json(&self, buf: &mut Vec) -> anyhow::Result<()> { + serde_json::to_writer(buf, &self).context("Failed to serialize event") + } + + fn serialize_to_protobuf_length_delimited(&self, buf: &mut Vec) -> anyhow::Result<()> { + // We use `CommandProgressForWrite` here to avoid cloning `BuckEvent`. + // `CommandProgressForWrite` serialization is bitwise identical to `CommandProgress`. + // See the protobuf spec + // https://developers.google.com/protocol-buffers/docs/encoding#length-types + // for the details about protobuf wire format. + let progress = match self { + Self::Event(e) => command_progress_for_write::Progress::Event(e.encode_to_vec()), + Self::Result(res) => command_progress_for_write::Progress::Result((*res).clone()), + }; + let stream_val = buck2_cli_proto::CommandProgressForWrite { + progress: Some(progress), + }; + stream_val.encode_length_delimited(buf)?; + Ok(()) + } + + fn maybe_serialize_user_event(&self, buf: &mut Vec) -> anyhow::Result { + if let StreamValueForWrite::Event(event) = self { + if let Some(user_event) = try_get_user_event(event)? { + serde_json::to_writer(buf, &user_event).context("Failed to serialize event")?; + return Ok(true); + } + } + + Ok(false) + } +} + +#[cfg(test)] +mod tests { + use std::time::SystemTime; + + use buck2_data::LoadBuildFileStart; + use buck2_data::SpanStartEvent; + use buck2_events::span::SpanId; + use futures::TryStreamExt; + use tempfile::TempDir; + + use super::*; + use crate::stream_value::StreamValue; + use crate::utils::Compression; + + impl WriteEventLog { + async fn new_test(log: EventLogPathBuf) -> anyhow::Result { + Ok(Self { + state: LogWriterState::Opened { + writers: vec![ + open_event_log_for_writing(log, None, EventLogType::System).await?, + ], + }, + sanitized_argv: SanitizedArgv { + argv: vec!["buck2".to_owned()], + expanded_argv: vec!["buck2".to_owned()], + }, + command_name: "testtest".to_owned(), + working_dir: WorkingDir::current_dir()?, + buf: Vec::new(), + log_size_counter_bytes: None, + }) + } + } + + fn make_event() -> BuckEvent { + BuckEvent::new( + SystemTime::now(), + TraceId::new(), + Some(SpanId::next()), + None, + buck2_data::buck_event::Data::SpanStart(SpanStartEvent { + data: Some(buck2_data::span_start_event::Data::Load( + LoadBuildFileStart { + module_id: "foo".to_owned(), + cell: "bar".to_owned(), + }, + )), + }), + ) + } + + #[tokio::test] + async fn test_protobuf_decoding_gzip() -> anyhow::Result<()> { + test_protobuf_decoding(Encoding::PROTO_GZIP).await + } + + #[tokio::test] + async fn test_protobuf_decoding_zstd() -> anyhow::Result<()> { + test_protobuf_decoding(Encoding::PROTO_ZSTD).await + } + + async fn test_protobuf_decoding(encoding: Encoding) -> anyhow::Result<()> { + //Create log dir + let tmp_dir = TempDir::new()?; + + //Create mock event + let event = make_event(); + + // Create event log + let log = EventLogPathBuf { + path: AbsPathBuf::try_from(tmp_dir.path().join("log")).unwrap(), + encoding, + }; + + let mut write_event_log = WriteEventLog::new_test(log.clone()).await?; + + //Log event + let value = StreamValueForWrite::Event(event.event()); + write_event_log.log_invocation(event.trace_id()?).await?; + write_event_log.write_ln(&[value]).await?; + write_event_log.exit().await; + + //Get and decode log + let (_invocation, mut events) = log.unpack_stream().await?; + + //Get event + let retrieved_event = match events.try_next().await?.expect("Failed getting log") { + StreamValue::Event(e) => BuckEvent::try_from(e), + _ => panic!("expected event"), + }?; + + //Assert it's the same event created in the beginning + assert_eq!(retrieved_event.timestamp(), event.timestamp()); + assert_eq!( + retrieved_event.trace_id().unwrap(), + event.trace_id().unwrap() + ); + assert_eq!(retrieved_event.span_id().unwrap(), event.span_id().unwrap()); + assert_eq!(retrieved_event.data(), event.data()); + + assert!( + events.try_next().await.unwrap().is_none(), + "expecting no more events" + ); + + Ok(()) + } + + #[tokio::test] + async fn test_tick_makes_valid_log_zstd() -> anyhow::Result<()> { + test_tick_makes_valid_log(Encoding::PROTO_ZSTD).await + } + + async fn test_tick_makes_valid_log(encoding: Encoding) -> anyhow::Result<()> { + if cfg!(windows) { + // Do not want to deal with exclusivity issues on Windows. + return Ok(()); + } + + let tmp_dir = TempDir::new()?; + + let log = EventLogPathBuf { + path: AbsPathBuf::try_from(tmp_dir.path().join("test_tick_makes_valid_log.pb.gz")) + .unwrap(), + encoding, + }; + + let mut write_event_log = WriteEventLog::new_test(log.clone()).await?; + + let event = make_event(); + let value = StreamValueForWrite::Event(event.event()); + write_event_log.log_invocation(event.trace_id()?).await?; + write_event_log.write_ln(&[value]).await?; + + assert!( + log.unpack_stream().await.is_err(), + "Sanity check: gzip was not flushed, so the log is invalid" + ); + + // Now flush the gzip stream. + write_event_log.flush_files().await?; + + // Do not close the log, and open it. + let (_invocation, mut events) = log.unpack_stream().await?; + + let retrieved_event = match events.try_next().await?.expect("Failed getting log") { + StreamValue::Event(e) => BuckEvent::try_from(e).unwrap(), + _ => panic!("expecting event"), + }; + + assert_eq!(retrieved_event.timestamp(), event.timestamp()); + assert_eq!( + retrieved_event.trace_id().unwrap(), + event.trace_id().unwrap() + ); + assert_eq!(retrieved_event.span_id(), event.span_id()); + assert_eq!(retrieved_event.data(), event.data()); + + match encoding.compression { + Compression::Gzip => { + // TODO(nga): `tick` does not write gzip footer, so even after `tick` + // generated file is not a valid gzip file. + // assert!(events.try_next().await.unwrap().is_none(), "expecting no more events"); + assert!(events.try_next().await.is_err()); + } + Compression::Zstd => { + assert!( + events.try_next().await.unwrap().is_none(), + "expecting no more events" + ); + } + Compression::None => unreachable!(), + } + + Ok(()) + } + + #[test] + fn test_stream_value_serialize_to_protobuf_length_delimited() { + let event = make_event(); + let mut actual = Vec::new(); + StreamValueForWrite::Event(event.event()) + .serialize_to_protobuf_length_delimited(&mut actual) + .unwrap(); + let expected = buck2_cli_proto::CommandProgress { + progress: Some(command_progress::Progress::Event(event.into())), + } + .encode_length_delimited_to_vec(); + assert_eq!(expected, actual); + } +} diff --git a/app/buck2_event_log/src/writer.rs b/app/buck2_event_log/src/writer.rs new file mode 100644 index 0000000000000..de67b9174c5cf --- /dev/null +++ b/app/buck2_event_log/src/writer.rs @@ -0,0 +1,206 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io; +use std::pin::Pin; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; + +use async_compression::tokio::write::GzipEncoder; +use async_compression::tokio::write::ZstdEncoder; +use counting_reader::CountingReader; +use pin_project::pin_project; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; + +use crate::read::EventLogPathBuf; +use crate::utils::Compression; +use crate::utils::LogMode; +use crate::FutureChildOutput; + +type EventLogWriter = Box; + +mod counting_reader { + use super::*; + + #[pin_project] + pub struct CountingReader { + #[pin] + pub(super) inner: T, + pub(super) stats: Option>, + } +} + +impl CountingReader { + fn new(inner: T, stats: Option>) -> Self { + Self { inner, stats } + } +} + +impl AsyncWrite for CountingReader +where + T: AsyncWrite, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let this = self.project(); + let bytes = futures::ready!(this.inner.poll_write(cx, buf))?; + if let Some(stats) = this.stats { + stats.fetch_add(bytes as u64, Ordering::Relaxed); + } + + Poll::Ready(Ok(bytes)) + } + + fn poll_flush( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.project().inner.poll_flush(cx) + } + + fn poll_shutdown( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.project().inner.poll_shutdown(cx) + } +} + +#[derive(Eq, PartialEq, Copy, Clone)] +pub(crate) enum EventLogType { + System, + User, +} +pub(crate) struct NamedEventLogWriter { + path: EventLogPathBuf, + file: EventLogWriter, + event_log_type: EventLogType, + /// If this writing is done by a subprocess, that process's output, assuming we intend to wait + /// for it to exit. + process_to_wait_for: Option, +} + +impl NamedEventLogWriter { + pub(crate) fn new( + path: EventLogPathBuf, + file: impl AsyncWrite + std::marker::Send + std::marker::Unpin + std::marker::Sync + 'static, + bytes_written: Option>, + event_log_type: EventLogType, + process_to_wait_for: Option, + ) -> Self { + let file = match path.encoding.compression { + Compression::None => { + Box::new(CountingReader::new(file, bytes_written)) as EventLogWriter + } + Compression::Gzip => Box::new(GzipEncoder::with_quality( + CountingReader::new(file, bytes_written), + async_compression::Level::Fastest, + )) as EventLogWriter, + Compression::Zstd => Box::new(ZstdEncoder::with_quality( + CountingReader::new(file, bytes_written), + async_compression::Level::Default, + )) as EventLogWriter, + }; + Self { + path, + file, + event_log_type, + process_to_wait_for, + } + } + + pub(crate) async fn flush(&mut self) -> anyhow::Result<()> { + match self.file.flush().await { + Ok(_) => Ok(()), + Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => { + // The subprocess exited with some kind of error. That is logged separately, so + // here we just ignore it. + Ok(()) + } + Err(e) => Err(anyhow::Error::from(e).context(format!( + "Error flushing log file at {}", + self.path.path.display() + ))), + } + } + + pub(crate) async fn shutdown(&mut self) { + if let Err(e) = self.file.shutdown().await { + tracing::warn!("Failed to flush log file at `{}`: {:#}", self.path.path, e); + } + } + + pub(crate) fn child(mut self) -> Option { + self.process_to_wait_for.take() + } + + fn serialize_event<'b, T>(&self, mut buf: &mut Vec, event: &T) -> anyhow::Result<()> + where + T: SerializeForLog + 'b, + { + match self.event_log_type { + EventLogType::System => { + match self.path.encoding.mode { + LogMode::Json => { + event.serialize_to_json(&mut buf)?; + buf.push(b'\n'); + } + LogMode::Protobuf => event.serialize_to_protobuf_length_delimited(&mut buf)?, + }; + } + EventLogType::User => { + if event.maybe_serialize_user_event(&mut buf)? { + buf.push(b'\n'); + } + } + } + Ok(()) + } + + async fn write_all(&mut self, buf: &[u8]) -> anyhow::Result<()> { + match self.file.write_all(buf).await { + Ok(_) => Ok(()), + Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => { + // The subprocess exited with some kind of error. That is logged separately, so + // here we just ignore it. + Ok(()) + } + Err(e) => Err(anyhow::Error::from(e).context("Failed to write event")), + } + } + + pub(crate) async fn write_events<'b, T, I>( + &mut self, + mut buf: &mut Vec, + events: &I, + ) -> Result<(), anyhow::Error> + where + T: SerializeForLog + 'b, + I: IntoIterator + Clone + 'b, + { + for event in events.clone() { + self.serialize_event(&mut buf, event)?; + } + self.write_all(&buf).await?; + Ok(()) + } +} + +pub(crate) trait SerializeForLog { + fn serialize_to_json(&self, buf: &mut Vec) -> anyhow::Result<()>; + fn serialize_to_protobuf_length_delimited(&self, buf: &mut Vec) -> anyhow::Result<()>; + fn maybe_serialize_user_event(&self, buf: &mut Vec) -> anyhow::Result; +} diff --git a/app/buck2_event_observer/BUCK b/app/buck2_event_observer/BUCK index 52bffb8128763..2178ed46ef691 100644 --- a/app/buck2_event_observer/BUCK +++ b/app/buck2_event_observer/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -11,15 +10,19 @@ rust_library( ], deps = [ "fbsource//third-party/rust:anyhow", - "fbsource//third-party/rust:clap-3", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:linked-hash-map", + "fbsource//third-party/rust:prost-types", + "fbsource//third-party/rust:regex", "fbsource//third-party/rust:shlex", - "fbsource//third-party/rust:thiserror", + "fbsource//third-party/rust:termwiz", "fbsource//third-party/rust:tracing", "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_test_api:buck2_test_api", "//buck2/app/buck2_util:buck2_util", diff --git a/app/buck2_event_observer/Cargo.toml b/app/buck2_event_observer/Cargo.toml index dcae173fd0452..ddb85ba1dea9b 100644 --- a/app/buck2_event_observer/Cargo.toml +++ b/app/buck2_event_observer/Cargo.toml @@ -1,29 +1,34 @@ [package] +description = "utilities to interpret the buck2 event stream" +edition = "2021" +license = { workspace = true } name = "buck2_event_observer" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "utilities to interpret the buck2 event stream" [dependencies] starlark_map = { workspace = true } anyhow = { workspace = true } buck2_common = { workspace = true } +buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_test_api = { workspace = true } +buck2_util = { workspace = true } buck2_wrapper_common = { workspace = true } clap = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } -itertools = { workspace = true } linked-hash-map = { workspace = true } +prost-types = { workspace = true } +regex = { workspace = true } shlex = { workspace = true } superconsole = { version = "0.2.0", path = "../../superconsole" } -thiserror = { workspace = true } -buck2_util = { workspace = true } +termwiz = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/app/buck2_event_observer/src/action_stats.rs b/app/buck2_event_observer/src/action_stats.rs index 6fe17bdb4c343..1027572b3bd5c 100644 --- a/app/buck2_event_observer/src/action_stats.rs +++ b/app/buck2_event_observer/src/action_stats.rs @@ -73,6 +73,10 @@ impl ActionStats { } pub fn update(&mut self, action: &buck2_data::ActionExecutionEnd) { + // TODO(ezgi): consolidate with InvocationRecord creation at https://fburl.com/code/c8iitvvy + if action.kind != buck2_data::ActionKind::Run as i32 { + return; + } if was_fallback_action(action) { self.fallback_actions += 1; } diff --git a/app/buck2_event_observer/src/action_util.rs b/app/buck2_event_observer/src/action_util.rs new file mode 100644 index 0000000000000..87861cbc532ee --- /dev/null +++ b/app/buck2_event_observer/src/action_util.rs @@ -0,0 +1,35 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub fn get_action_digest(commands: &[buck2_data::CommandExecution]) -> Option { + if let Some(command_execution) = commands.last() { + if let Some(details) = &command_execution.details { + if let Some(command_kind) = &details.command_kind { + if let Some(command) = &command_kind.command { + return match command { + buck2_data::command_execution_kind::Command::RemoteCommand( + remote_command, + ) => Some(remote_command.action_digest.to_owned()), + buck2_data::command_execution_kind::Command::LocalCommand( + local_command, + ) => Some(local_command.action_digest.to_owned()), + buck2_data::command_execution_kind::Command::WorkerCommand( + worker_command, + ) => Some(worker_command.action_digest.to_owned()), + buck2_data::command_execution_kind::Command::OmittedLocalCommand( + omitted_local_command, + ) => Some(omitted_local_command.action_digest.to_owned()), + _ => None, + }; + } + } + } + } + None +} diff --git a/app/buck2_event_observer/src/cold_build_detector.rs b/app/buck2_event_observer/src/cold_build_detector.rs new file mode 100644 index 0000000000000..61d5ee2a90939 --- /dev/null +++ b/app/buck2_event_observer/src/cold_build_detector.rs @@ -0,0 +1,93 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_common::build_count::BuildCountManager; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::soft_error; +use buck2_data::FileWatcherEnd; +use buck2_data::ParsedTargetPatterns; + +/// Detects if this is the first build since a rebase. +/// The state is relevant per command since the detector is recreated for each command. +pub struct ColdBuildDetector { + build_count_manager: BuildCountManager, + merge_base: Option, + first_build_since_rebase: Option, + target_patterns: Option, +} + +impl ColdBuildDetector { + pub fn new(build_count_dir: AbsNormPathBuf) -> Self { + Self { + build_count_manager: BuildCountManager::new(build_count_dir), + merge_base: None, + first_build_since_rebase: None, + target_patterns: None, + } + } + + pub fn first_build_since_rebase(&self) -> Option { + self.first_build_since_rebase + } + + pub async fn update_merge_base(&mut self, file_watcher: &FileWatcherEnd) -> anyhow::Result<()> { + if let Some(merge_base) = file_watcher + .stats + .as_ref() + .and_then(|stats| stats.branched_from_revision.as_ref()) + { + // We could get multiple updates. If the filewatcher restarts, it could send a new merge base. + // Recompute the first_build_since_rebase only if the merge base changed. + if self.merge_base.as_deref() == Some(merge_base) { + return Ok(()); + } + self.merge_base = Some(merge_base.clone()); + self.try_compute_first_build_since_rebase().await?; + } + Ok(()) + } + + pub async fn update_parsed_target_patterns( + &mut self, + patterns: &ParsedTargetPatterns, + ) -> anyhow::Result<()> { + if self.target_patterns.is_some() { + soft_error!( + "parsed_target_patterns_changed_unexpectedly", + anyhow::anyhow!( + "unexpected parsed target patterns update from: {:?} to: {:?}", + self.target_patterns, + patterns + ) + .into() + )?; + return Ok(()); + } + self.target_patterns = Some(patterns.clone()); + self.try_compute_first_build_since_rebase().await?; + Ok(()) + } + + async fn try_compute_first_build_since_rebase(&mut self) -> anyhow::Result<()> { + if self.first_build_since_rebase.is_some() { + // This value should be valid for the lifetime of the detector. + return Ok(()); + } + + // Compute first_build_since_rebase only if both `merge base` and `target patterns` are available. + if let (Some(merge_base), Some(patterns)) = (&self.merge_base, &self.target_patterns) { + let build_count = self + .build_count_manager + .min_count(merge_base, patterns) + .await?; + self.first_build_since_rebase = Some(build_count.successful_build_count < 1); + } + Ok(()) + } +} diff --git a/app/buck2_event_observer/src/debug_events.rs b/app/buck2_event_observer/src/debug_events.rs index e38c2c973548a..e1fb67d1f53c9 100644 --- a/app/buck2_event_observer/src/debug_events.rs +++ b/app/buck2_event_observer/src/debug_events.rs @@ -13,6 +13,7 @@ use std::time::Duration; use std::time::Instant; use std::time::SystemTime; +use buck2_data::SpanEndEvent; use buck2_events::BuckEvent; use gazebo::variants::VariantName; @@ -84,59 +85,71 @@ impl DebugEventsState { } match unpack_event(event)? { - UnpackedBuckEvent::SpanStart(_, _, data) => { - let name = data.variant_name(); - - let entry = { - match self.spans.get_mut(name) { - Some(v) => v, - None => { - self.spans.insert(name.to_owned(), SpanData::default()); - self.spans.get_mut(name).unwrap() - } - } - }; - entry.started += 1; - } - UnpackedBuckEvent::SpanEnd(_, span_end, data) => { - // Right now, matching these end events to the start events depends on the field names in the protobufs - // matching. That works but is fragile, if it breaks at some point we can do the match and explicitly - // match them ourselves. - let name = data.variant_name(); - - let entry = { - match self.spans.get_mut(name) { - Some(v) => v, - None => { - self.spans.insert(name.to_owned(), SpanData::default()); - self.spans.get_mut(name).unwrap() - } - } - }; - entry.finished += 1; - if let Some(v) = &span_end.stats { - entry.total_poll_time += Duration::from_micros(v.max_poll_time_us); - entry.total_max_poll_time += Duration::from_micros(v.max_poll_time_us); + UnpackedBuckEvent::SpanStart(_, _, data) => self.span_started(Some(data)), + UnpackedBuckEvent::UnrecognizedSpanStart(_, _) => self.span_started(None), + UnpackedBuckEvent::SpanEnd(_, span_end, data) => self.span_end(span_end, Some(data)), + UnpackedBuckEvent::UnrecognizedSpanEnd(_, span_end) => self.span_end(span_end, None), + UnpackedBuckEvent::Instant(_, _, data) => self.instant(Some(data)), + UnpackedBuckEvent::UnrecognizedInstant(_, _) => self.instant(None), + } + + Ok(()) + } + + fn span_started(&mut self, data: Option<&buck2_data::span_start_event::Data>) { + let name = data.map_or("Unrecognized", |d| d.variant_name()); + let entry = { + match self.spans.get_mut(name) { + Some(v) => v, + None => { + self.spans.insert(name.to_owned(), SpanData::default()); + self.spans.get_mut(name).unwrap() } } - UnpackedBuckEvent::Instant(_, _, data) => { - let name = data.variant_name(); - - let entry = { - match self.instants.get_mut(name) { - Some(v) => v, - None => { - self.instants - .insert(name.to_owned(), InstantData::default()); - self.instants.get_mut(name).unwrap() - } - } - }; - - entry.count += 1; + }; + entry.started += 1; + } + + fn span_end( + &mut self, + span_end: &SpanEndEvent, + data: Option<&buck2_data::span_end_event::Data>, + ) { + // Right now, matching these end events to the start events depends on the field names in the protobufs + // matching. That works but is fragile, if it breaks at some point we can do the match and explicitly + // match them ourselves. + let name = data.map_or("Unrecognized", |d| d.variant_name()); + + let entry = { + match self.spans.get_mut(name) { + Some(v) => v, + None => { + self.spans.insert(name.to_owned(), SpanData::default()); + self.spans.get_mut(name).unwrap() + } } + }; + entry.finished += 1; + if let Some(v) = &span_end.stats { + entry.total_poll_time += Duration::from_micros(v.max_poll_time_us); + entry.total_max_poll_time += Duration::from_micros(v.max_poll_time_us); } + } - Ok(()) + fn instant(&mut self, data: Option<&buck2_data::instant_event::Data>) { + let name = data.map_or("Unrecognized", |d| d.variant_name()); + + let entry = { + match self.instants.get_mut(name) { + Some(v) => v, + None => { + self.instants + .insert(name.to_owned(), InstantData::default()); + self.instants.get_mut(name).unwrap() + } + } + }; + + entry.count += 1; } } diff --git a/app/buck2_event_observer/src/display.rs b/app/buck2_event_observer/src/display.rs index 9410d1c6489cf..21b6f5786bea3 100644 --- a/app/buck2_event_observer/src/display.rs +++ b/app/buck2_event_observer/src/display.rs @@ -9,7 +9,6 @@ // TODO(brasselsprouts): move this onto the original core types and convert in events -use std::borrow::Cow; use std::fmt; use std::fmt::Write; use std::sync::Arc; @@ -36,10 +35,13 @@ use superconsole::style::Stylize; use superconsole::Line; use superconsole::Lines; use superconsole::Span; -use thiserror::Error; +use termwiz::escape::Action; +use termwiz::escape::ControlCode; use crate::fmt_duration; use crate::verbosity::Verbosity; +use crate::what_ran::command_to_string; +use crate::what_ran::worker_command_as_fallback_to_string; #[derive(Copy, Clone, Dupe)] pub struct TargetDisplayOptions { @@ -53,6 +55,12 @@ impl TargetDisplayOptions { } } + pub fn for_build_report() -> Self { + Self { + with_configuration: true, + } + } + pub fn for_console(with_configuration: bool) -> Self { Self { with_configuration } } @@ -85,6 +93,19 @@ pub fn display_configured_target_label( } } +fn display_configured_target_label_opt( + ctl: Option<&ConfiguredTargetLabel>, + opts: TargetDisplayOptions, +) -> anyhow::Result { + Ok(match ctl { + Some(ctl) => display_configured_target_label(ctl, opts)?, + None => { + // Should never happen, but better not error here. + "unknown target".to_owned() + } + }) +} + pub fn display_anon_target(ctl: &AnonTarget) -> anyhow::Result { if let AnonTarget { name: Some(TargetLabel { package, name }), @@ -107,6 +128,16 @@ pub fn display_analysis_target( match target { Target::StandardTarget(ctl) => display_configured_target_label(ctl, opts), Target::AnonTarget(anon) => display_anon_target(anon), + Target::DynamicLambda(dynamic) => { + use buck2_data::dynamic_lambda_owner::Owner; + match dynamic.owner.as_ref().context("Missing `owner`")? { + Owner::TargetLabel(target_label) => { + display_configured_target_label(target_label, opts) + } + Owner::BxlKey(bxl_key) => display_bxl_key(bxl_key), + Owner::AnonTarget(anon_target) => display_anon_target(anon_target), + } + } } } @@ -150,7 +181,7 @@ pub fn display_action_key( } } -fn display_action_name_opt(name: Option<&ActionName>) -> String { +pub fn display_action_name_opt(name: Option<&ActionName>) -> String { match name { Some(name) if name.identifier.is_empty() => name.category.clone(), Some(name) => format!("{} {}", name.category, name.identifier), @@ -230,6 +261,13 @@ pub fn display_event(event: &BuckEvent, opts: TargetDisplayOptions) -> anyhow::R let stage = display_analysis_stage(stage); Ok(stage.into()) } + Data::AnalysisResolveQueries(resolve_queries) => Ok(format!( + "{} -- analysis queries", + display_configured_target_label_opt( + resolve_queries.standard_target.as_ref(), + opts + )? + )), Data::LoadPackage(load) => Ok(format!("{} -- loading package file tree", load.path)), Data::Load(load) => Ok(format!("{} -- evaluating build file", load.module_id)), Data::ExecutorStage(info) => { @@ -270,15 +308,12 @@ pub fn display_event(event: &BuckEvent, opts: TargetDisplayOptions) -> anyhow::R }; Ok(format!("dep_files({},{})", detail, location)) } - Data::SharedTask(..) => Ok("Waiting on task from another command".to_owned()), - Data::CacheUpload(upload) => { - let reason = match buck2_data::CacheUploadReason::from_i32(upload.reason) { - Some(buck2_data::CacheUploadReason::DepFile) => "dep_file", - Some(buck2_data::CacheUploadReason::LocalExecution) => "action", - None => "unknown", - }; - Ok(format!("upload ({})", reason)) - } + Data::SharedTask(buck2_data::SharedTaskStart { owner_trace_id }) => Ok(format!( + "Waiting on task from another command: {}", + owner_trace_id + )), + Data::CacheUpload(_) => Ok("upload (action)".to_owned()), + Data::DepFileUpload(_) => Ok("upload (dep_file)".to_owned()), Data::CreateOutputSymlinks(..) => Ok("Creating output symlinks".to_owned()), Data::InstallEventInfo(info) => Ok(format!( "Sending {} at path {}", @@ -330,7 +365,12 @@ pub fn display_event(event: &BuckEvent, opts: TargetDisplayOptions) -> anyhow::R Data::Fake(fake) => Ok(format!("{} -- speak of the devil", fake.caramba)), Data::LocalResources(..) => Ok("Local resources setup".to_owned()), Data::ReleaseLocalResources(..) => Ok("Releasing local resources".to_owned()), + Data::CreateOutputHashesFile(..) => Ok("Creating output hashes file".to_owned()), Data::BxlEnsureArtifacts(..) => Err(ParseEventError::UnexpectedEvent.into()), + Data::ActionErrorHandlerExecution(..) => { + Ok("Running error handler on action failure".to_owned()) + } + Data::CqueryUniverseBuild(..) => Ok("Building cquery universe".to_owned()), }; // This shouldn't really be necessary, but that's how try blocks work :( @@ -344,6 +384,7 @@ fn display_file_watcher(provider: i32) -> &'static str { match buck2_data::FileWatcherProvider::from_i32(provider) { Some(buck2_data::FileWatcherProvider::Watchman) => "Watchman", Some(buck2_data::FileWatcherProvider::RustNotify) => "notify", + Some(buck2_data::FileWatcherProvider::FsHashCrawler) => "fs_hash_crawler", None => "unknown mechanism", } } @@ -352,7 +393,6 @@ pub fn display_analysis_stage(stage: &buck2_data::analysis_stage_start::Stage) - use buck2_data::analysis_stage_start::Stage; match stage { - Stage::ResolveQueries(()) => "resolve_queries", Stage::EvaluateRule(()) => "evaluate_rule", } } @@ -439,6 +479,7 @@ pub fn display_executor_stage( Stage::WorkerDownload(..) => "re_worker_download", Stage::WorkerUpload(..) => "re_worker_upload", Stage::Unknown(..) => "re_unknown", + Stage::MaterializeFailedInputs(..) => "re_materialize_failed_inputs", } } Stage::Local(local) => { @@ -461,7 +502,7 @@ pub fn display_executor_stage( Some(label) } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ParseEventError { #[error("Missing configured target label")] MissingConfiguredTargetLabel, @@ -485,7 +526,7 @@ enum ParseEventError { UnexpectedEvent, } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] #[error("Invalid buck event: `{0:?}`")] pub struct InvalidBuckEvent(pub Arc); @@ -543,47 +584,173 @@ pub fn format_test_result(test_result: &buck2_data::TestResult) -> anyhow::Resul pub struct ActionErrorDisplay<'a> { pub action_id: String, pub reason: String, - pub command: Option>, + pub command: Option<&'a buck2_data::CommandExecutionDetails>, + pub error_diagnostics: Option<&'a buck2_data::ActionErrorDiagnostics>, } -impl<'a> ActionErrorDisplay<'a> { - pub fn to_static(self) -> ActionErrorDisplay<'static> { - ActionErrorDisplay { - action_id: self.action_id, - reason: self.reason, - command: self.command.map(|c| Cow::Owned(c.into_owned())), - } +fn strip_trailing_newline(stream_contents: &str) -> &str { + match stream_contents.strip_suffix('\n') { + None => stream_contents, + Some(s) => s.strip_suffix('\r').unwrap_or(s), } } -pub fn display_action_error<'a>( - action: &'a buck2_data::ActionExecutionEnd, - error: &'a buck2_data::action_execution_end::Error, - opts: TargetDisplayOptions, -) -> anyhow::Result> { - use buck2_data::action_execution_end::Error; +impl<'a> ActionErrorDisplay<'a> { + /// Format the error message in a way that is suitable for use with the build report + /// + /// The output may include terminal colors that need to be sanitized. + pub fn simple_format_for_build_report(&self) -> String { + let s = self.simple_format_inner(None::<&'static mut dyn for<'x> FnMut(&'x str) -> String>); + sanitize_output_colors(s.as_bytes()) + } - let command = action.commands.last().and_then(|c| c.details.as_ref()); + /// Format the error message in a way that is suitable for use with the simpleconsole + /// + /// The output may include terminal colors that need to be sanitized + pub fn simple_format_with_timestamps( + &self, + with_timestamps: impl FnMut(&str) -> String, + ) -> String { + self.simple_format_inner(Some(with_timestamps)) + } - let reason = match error { - Error::MissingOutputs(missing_outputs) => { - format!("Required outputs are missing: {}", missing_outputs.message) + fn simple_format_inner( + &self, + mut with_timestamps: Option String>, + ) -> String { + let mut s = String::new(); + macro_rules! append { + ($fmt:expr $(, $args:expr)*) => {{ + let mut message = format!($fmt $(, $args)*); + if let Some(with_timestamps) = &mut with_timestamps { + message = with_timestamps(&message); + } + writeln!(s, "{message}").unwrap(); + }}; } - Error::Unknown(error_string) => { - format!("Internal error: {}", error_string) + append!("Action failed: {}", self.action_id); + append!("{}", self.reason); + let Some(command_failed) = &self.command else { + return s; + }; + if let Some(command_kind) = command_failed.command_kind.as_ref() { + use buck2_data::command_execution_kind::Command; + match command_kind.command.as_ref() { + Some(Command::LocalCommand(local_command)) => { + append!("Local command: {}", command_to_string(local_command)); + } + Some(Command::WorkerCommand(worker_command)) => { + append!( + "Local worker command: {}", + worker_command_as_fallback_to_string(worker_command) + ); + } + Some(Command::WorkerInitCommand(worker_init_command)) => { + append!( + "Local worker initialization command: {}", + command_to_string(worker_init_command) + ); + } + Some(Command::RemoteCommand(remote_command)) => { + if !buck2_core::is_open_source() { + append!( + "Remote action{}, reproduce with: `frecli cas download-action {}`", + if remote_command.cache_hit { + " cache hit" + } else { + "" + }, + remote_command.action_digest + ); + } + } + Some(Command::OmittedLocalCommand(..)) | None => { + // Nothing to show in this case. + } + }; } - Error::CommandExecutionError(buck2_data::CommandExecutionError {}) => { - match action.commands.last() { - Some(c) => failure_reason_for_command_execution(c)?, - None => "Unexpected command status".to_owned(), + + let mut append_stream = |name, contents: &str| { + if contents.is_empty() { + append!("{name}: "); + } else { + append!("{name}:"); + let contents = strip_trailing_newline(contents); + writeln!(s, "{}", contents).unwrap(); } + }; + + append_stream("Stdout", &command_failed.stdout); + append_stream("Stderr", &command_failed.stderr); + + if let Some(error_diagnostics) = self.error_diagnostics { + match error_diagnostics.data.as_ref().unwrap() { + buck2_data::action_error_diagnostics::Data::SubErrors(sub_errors) => { + let sub_errors = &sub_errors.sub_errors; + let mut all_sub_errors = String::new(); + if !sub_errors.is_empty() { + for sub_error in sub_errors { + let mut sub_error_line = String::new(); + + write!(sub_error_line, "[{}]", sub_error.category).unwrap(); + if let Some(message) = &sub_error.message { + write!(sub_error_line, " {}", message).unwrap(); + } + + // TODO(@wendyy) - handle locations later + writeln!(all_sub_errors, "- {}", sub_error_line).unwrap(); + } + } + append_stream( + "\nAction sub-errors produced by error handlers", + &all_sub_errors, + ); + } + buck2_data::action_error_diagnostics::Data::HandlerInvocationError(error) => { + append_stream("\nCould not produce error diagnostics", error); + } + }; } - }; + s + } +} + +pub fn get_action_error_reason<'a>(error: &'a buck2_data::ActionError) -> anyhow::Result { + use buck2_data::action_error::Error; + + Ok( + match error + .error + .as_ref() + .context("Internal error: Missing error in action error")? + { + Error::MissingOutputs(missing_outputs) => { + format!("Required outputs are missing: {}", missing_outputs.message) + } + Error::Unknown(error_string) => error_string.to_owned(), + Error::CommandExecutionError(buck2_data::CommandExecutionError {}) => { + match &error.last_command { + Some(c) => failure_reason_for_command_execution(c)?, + None => "Unexpected command status".to_owned(), + } + } + }, + ) +} + +pub fn display_action_error<'a>( + error: &'a buck2_data::ActionError, + opts: TargetDisplayOptions, +) -> anyhow::Result> { + let command = error.last_command.as_ref().and_then(|c| c.details.as_ref()); + + let reason = get_action_error_reason(error)?; Ok(ActionErrorDisplay { - action_id: display_action_identity(action.key.as_ref(), action.name.as_ref(), opts)?, + action_id: display_action_identity(error.key.as_ref(), error.name.as_ref(), opts)?, reason, - command: command.map(Cow::Borrowed), + command, + error_diagnostics: error.error_diagnostics.as_ref(), }) } @@ -685,3 +852,68 @@ pub fn success_stderr<'a>( Ok(Some(stderr)) } + +pub fn sanitize_output_colors(stderr: &[u8]) -> String { + let mut sanitized = String::with_capacity(stderr.len()); + let mut parser = termwiz::escape::parser::Parser::new(); + parser.parse(stderr, |a| match a { + Action::Print(c) => sanitized.push(c), + Action::Control(cc) => match cc { + ControlCode::CarriageReturn => sanitized.push('\r'), + ControlCode::LineFeed => sanitized.push('\n'), + ControlCode::HorizontalTab => sanitized.push('\t'), + _ => {} + }, + _ => {} + }); + sanitized +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn removes_color_characters() { + let message = "\x1b[0mFoo\t\x1b[34mBar\n\x1b[DBaz\r\nQuz"; + + let sanitized = sanitize_output_colors(message.as_bytes()); + + assert_eq!("Foo\tBar\nBaz\r\nQuz", sanitized); + } + + #[test] + fn strips_trailing_newline_character() { + let stream_contents = "test\n"; + let res = strip_trailing_newline(stream_contents); + assert_eq!(res, "test"); + } + + #[test] + fn preserves_duplicate_newlines() { + let stream_contents = "test\n\n"; + let res = strip_trailing_newline(stream_contents); + assert_eq!(res, "test\n"); + } + + #[test] + fn preserves_other_trailing_whitespace() { + let stream_contents = "test \t"; + let res = strip_trailing_newline(stream_contents); + assert_eq!(res, stream_contents); + } + + #[test] + fn preserves_leading_whitespace() { + let stream_contents = "\n test"; + let res = strip_trailing_newline(stream_contents); + assert_eq!(res, stream_contents); + } + + #[test] + fn correctly_handles_carriage_return() { + let stream_contents = "test\r\n"; + let res = strip_trailing_newline(stream_contents); + assert_eq!(res, "test"); + } +} diff --git a/app/buck2_event_observer/src/event_observer.rs b/app/buck2_event_observer/src/event_observer.rs index b87614688e362..5c6977bbe2fa5 100644 --- a/app/buck2_event_observer/src/event_observer.rs +++ b/app/buck2_event_observer/src/event_observer.rs @@ -11,12 +11,16 @@ use std::sync::Arc; use std::time::Instant; use anyhow::Context; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_events::BuckEvent; +use buck2_util::network_speed_average::NetworkSpeedAverage; use buck2_wrapper_common::invocation_id::TraceId; use crate::action_stats::ActionStats; +use crate::cold_build_detector::ColdBuildDetector; use crate::debug_events::DebugEventsState; use crate::dice_state::DiceState; +use crate::progress::BuildProgressStateTracker; use crate::re_state::ReState; use crate::session_info::SessionInfo; use crate::span_tracker::BuckEventSpanTracker; @@ -29,9 +33,14 @@ pub struct EventObserver { pub action_stats: ActionStats, re_state: ReState, two_snapshots: TwoSnapshots, // NOTE: We got many more copies of this than we should. + system_info: buck2_data::SystemInfo, session_info: SessionInfo, test_state: TestState, starlark_debugger_state: StarlarkDebuggerState, + re_avg_download_speed: NetworkSpeedAverage, + pub cold_build_detector: Option, + dice_state: DiceState, + pub concurrent_commands: bool, /// When running without the Superconsole, we skip some state that we don't need. This might be /// premature optimization. extra: E, @@ -41,24 +50,34 @@ impl EventObserver where E: EventObserverExtra, { - pub fn new(trace_id: TraceId) -> Self { + pub fn new(trace_id: TraceId, build_count_dir: Option) -> Self { + let cold_build_detector = build_count_dir.map(ColdBuildDetector::new); Self { span_tracker: BuckEventSpanTracker::new(), action_stats: ActionStats::default(), re_state: ReState::new(), two_snapshots: TwoSnapshots::default(), + system_info: buck2_data::SystemInfo::default(), session_info: SessionInfo { trace_id, test_session: None, - modern_dice: false, + legacy_dice: false, }, test_state: TestState::default(), starlark_debugger_state: StarlarkDebuggerState::new(), + re_avg_download_speed: NetworkSpeedAverage::default(), + cold_build_detector, + dice_state: DiceState::new(), + concurrent_commands: false, extra: E::new(), } } - pub fn observe(&mut self, receive_time: Instant, event: &Arc) -> anyhow::Result<()> { + pub async fn observe( + &mut self, + receive_time: Instant, + event: &Arc, + ) -> anyhow::Result<()> { self.span_tracker.handle_event(receive_time, event)?; { @@ -72,6 +91,11 @@ where ActionExecution(action_execution_end) => { self.action_stats.update(action_execution_end); } + buck2_data::span_end_event::Data::FileWatcher(file_watcher) => { + if let Some(cold_build_detector) = &mut self.cold_build_detector { + cold_build_detector.update_merge_base(file_watcher).await?; + } + } _ => {} } } @@ -89,6 +113,8 @@ where Snapshot(snapshot) => { self.re_state.update(snapshot); self.two_snapshots.update(event.timestamp(), snapshot); + self.re_avg_download_speed + .update(event.timestamp(), snapshot.re_download_bytes); } TestDiscovery(discovery) => { use buck2_data::test_discovery::Data::*; @@ -114,10 +140,27 @@ where .update(event.timestamp(), snapshot)?; } TagEvent(tags) => { - if tags.tags.contains(&"which-dice:Modern".to_owned()) { - self.session_info.modern_dice = true; + if tags.tags.contains(&"which-dice:Legacy".to_owned()) { + self.session_info.legacy_dice = true; + } + } + SystemInfo(system_info) => { + self.system_info = system_info.clone(); + } + TargetPatterns(tag) => { + if let Some(cold_build_detector) = &mut self.cold_build_detector { + cold_build_detector + .update_parsed_target_patterns(tag) + .await?; } } + DiceStateSnapshot(dice) => { + self.dice_state.update(dice); + } + ConcurrentCommands(concurrent_commands) => { + self.concurrent_commands = + self.concurrent_commands || concurrent_commands.trace_ids.len() > 1; + } _ => {} } } @@ -146,6 +189,10 @@ where &self.two_snapshots } + pub fn system_info(&self) -> &buck2_data::SystemInfo { + &self.system_info + } + pub fn session_info(&self) -> &SessionInfo { &self.session_info } @@ -158,9 +205,17 @@ where &self.test_state } + pub fn re_avg_download_speed(&self) -> &NetworkSpeedAverage { + &self.re_avg_download_speed + } + pub fn extra(&self) -> &E { &self.extra } + + pub fn dice_state(&self) -> &DiceState { + &self.dice_state + } } pub trait EventObserverExtra: Send { @@ -171,55 +226,34 @@ pub trait EventObserverExtra: Send { /// This has more fields for debug info. We don't always capture those. pub struct DebugEventObserverExtra { - dice_state: DiceState, debug_events: DebugEventsState, + progress_state: BuildProgressStateTracker, } impl EventObserverExtra for DebugEventObserverExtra { fn new() -> Self { Self { - dice_state: DiceState::new(), debug_events: DebugEventsState::new(), + progress_state: BuildProgressStateTracker::new(), } } fn observe(&mut self, receive_time: Instant, event: &Arc) -> anyhow::Result<()> { self.debug_events.handle_event(receive_time, event)?; - - { - use buck2_data::buck_event::Data::*; - - match event.data() { - Instant(instant) => { - use buck2_data::instant_event::Data::*; - - match instant - .data - .as_ref() - .context("Missing `data` in `Instant`")? - { - DiceStateSnapshot(dice) => { - self.dice_state.update(dice); - } - _ => {} - } - } - _ => {} - } - } + self.progress_state.handle_event(receive_time, event)?; Ok(()) } } impl DebugEventObserverExtra { - pub fn dice_state(&self) -> &DiceState { - &self.dice_state - } - pub fn debug_events(&self) -> &DebugEventsState { &self.debug_events } + + pub fn progress_state(&self) -> &BuildProgressStateTracker { + &self.progress_state + } } pub struct NoopEventObserverExtra; diff --git a/app/buck2_event_observer/src/humanized.rs b/app/buck2_event_observer/src/humanized.rs index 3b0984ba01cad..8823bde00f363 100644 --- a/app/buck2_event_observer/src/humanized.rs +++ b/app/buck2_event_observer/src/humanized.rs @@ -10,6 +10,7 @@ use std::fmt; /// Write out a u64 representing bytes as something more readable +#[derive(Debug)] pub struct HumanizedBytes { bytes: u64, fixed_width: bool, @@ -168,7 +169,7 @@ impl fmt::Display for HumanizedCount { } #[cfg(test)] -mod test { +mod tests { use super::HumanizedBytes; use super::HumanizedBytesPerSecond; use super::HumanizedCount; diff --git a/app/buck2_event_observer/src/last_command_execution_kind.rs b/app/buck2_event_observer/src/last_command_execution_kind.rs index 6883118229aad..1421d6c399cbc 100644 --- a/app/buck2_event_observer/src/last_command_execution_kind.rs +++ b/app/buck2_event_observer/src/last_command_execution_kind.rs @@ -60,3 +60,13 @@ pub fn get_last_command_execution_kind( LastCommandExecutionKind::NoCommand } } + +pub fn get_last_command_execution_time(action: &buck2_data::ActionExecutionEnd) -> Option { + action + .commands + .last() + .and_then(|c| c.details.as_ref()) + .and_then(|c| c.metadata.as_ref()) + .and_then(|c| c.execution_time.as_ref()) + .map(|c| c.seconds as u64 * 1000 + c.nanos as u64 / 1000000) +} diff --git a/app/buck2_event_observer/src/lib.rs b/app/buck2_event_observer/src/lib.rs index 216acb478c6f2..cd4a5c7f4ebd2 100644 --- a/app/buck2_event_observer/src/lib.rs +++ b/app/buck2_event_observer/src/lib.rs @@ -7,10 +7,13 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(try_blocks)] pub mod action_stats; +pub mod action_util; pub mod cache_hit_rate; +pub mod cold_build_detector; pub mod debug_events; pub mod dice_state; pub mod display; @@ -19,6 +22,7 @@ pub mod fmt_duration; pub mod humanized; pub mod last_command_execution_kind; pub mod pending_estimate; +pub mod progress; pub mod re_state; pub mod session_info; pub mod span_tracker; diff --git a/app/buck2_event_observer/src/pending_estimate.rs b/app/buck2_event_observer/src/pending_estimate.rs index f5c5a498a4a36..264a27b304b00 100644 --- a/app/buck2_event_observer/src/pending_estimate.rs +++ b/app/buck2_event_observer/src/pending_estimate.rs @@ -16,9 +16,10 @@ use crate::span_tracker::SpanTrackable; pub fn pending_estimate(roots: &Roots, dice: &DiceState) -> u64 { let mut total = 0; for k in &["BuildKey", "AnalysisKey"] { - let from_dice = dice.key_states().get(*k).map_or(0, |v| { - v.started + v.check_deps_started - v.finished - v.check_deps_finished - }); + let from_dice = dice + .key_states() + .get(*k) + .map_or(0, |v| v.started - v.finished); let from_roots = roots.dice_counts().get(k).copied().unwrap_or(0); @@ -27,3 +28,152 @@ pub fn pending_estimate(roots: &Roots, dice: &DiceState) -> total } + +pub fn estimate_completion_percentage(roots: &Roots, dice: &DiceState) -> u8 { + let from_dice = dice + .key_states() + .get("BuildKey") + .map_or((0, 0), |v| (v.started, v.finished)); + + let from_roots = roots.dice_counts().get("BuildKey").copied().unwrap_or(0); + + let started = u64::from(from_dice.0).saturating_sub(from_roots); + let finished = u64::from(from_dice.1).saturating_sub(from_roots); + if started == 0 { + // Avoid divide by zero. + return 0; + } + ((finished as f64 / started as f64) * 100f64) as u8 +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::sync::Arc; + use std::time::Instant; + use std::time::UNIX_EPOCH; + + use buck2_data::SpanStartEvent; + use buck2_events::span::SpanId; + use buck2_events::BuckEvent; + use buck2_wrapper_common::invocation_id::TraceId; + + use crate::dice_state::DiceState; + use crate::pending_estimate::estimate_completion_percentage; + use crate::span_tracker::BuckEventSpanTracker; + + fn setup_roots(tracker: &mut BuckEventSpanTracker) { + let t0 = Instant::now(); + let span = Arc::new(BuckEvent::new( + UNIX_EPOCH, + TraceId::new(), + Some(SpanId::next()), + None, + SpanStartEvent { + data: Some( + buck2_data::ActionExecutionStart { + key: Some(buck2_data::ActionKey { + id: Default::default(), + owner: Some(buck2_data::action_key::Owner::TargetLabel( + buck2_data::ConfiguredTargetLabel { + label: Some(buck2_data::TargetLabel { + package: "pkg".into(), + name: "target".into(), + }), + configuration: Some(buck2_data::Configuration { + full_name: "conf".into(), + }), + execution_configuration: None, + }, + )), + key: "".to_owned(), + }), + name: Some(buck2_data::ActionName { + category: "category".into(), + identifier: "identifier".into(), + }), + kind: buck2_data::ActionKind::NotSet as i32, + } + .into(), + ), + } + .into(), + )); + tracker.start_at(&span, t0).unwrap(); + } + + fn setup_dice_state(dice_state: &mut DiceState, finished: u32, total: u32) { + dice_state.update(&buck2_data::DiceStateSnapshot { + key_states: { + let mut map = HashMap::new(); + map.insert( + "BuildKey".to_owned(), + buck2_data::DiceKeyState { + started: total, + finished, + check_deps_started: 0, + check_deps_finished: 0, + compute_started: 0, + compute_finished: 0, + }, + ); + map + }, + }); + } + + #[test] + fn test_completion_no_progress() -> anyhow::Result<()> { + let mut dice = DiceState::new(); + let mut tracker = BuckEventSpanTracker::new(); + + setup_roots(&mut tracker); + setup_dice_state(&mut dice, 0, 100); + assert_eq!(estimate_completion_percentage(tracker.roots(), &dice), 0); + Ok(()) + } + + #[test] + fn test_completion_percentage_build_complete() -> anyhow::Result<()> { + let mut dice = DiceState::new(); + let mut tracker = BuckEventSpanTracker::new(); + + setup_roots(&mut tracker); + setup_dice_state(&mut dice, 100, 100); + assert_eq!(estimate_completion_percentage(tracker.roots(), &dice), 100); + Ok(()) + } + + #[test] + fn test_completion_percentage_intermediate_state() -> anyhow::Result<()> { + let mut dice = DiceState::new(); + let mut tracker = BuckEventSpanTracker::new(); + + setup_roots(&mut tracker); + // 26/101 -> 25/100 since we have 1 subtracted for the ActionExecutionStart + setup_dice_state(&mut dice, 26, 101); + assert_eq!(estimate_completion_percentage(tracker.roots(), &dice), 25); + Ok(()) + } + + #[test] + fn test_completion_percentage_invalid_dice_state() -> anyhow::Result<()> { + let mut dice = DiceState::new(); + let mut tracker = BuckEventSpanTracker::new(); + + setup_roots(&mut tracker); + setup_dice_state(&mut dice, 10, 0); + assert_eq!(estimate_completion_percentage(tracker.roots(), &dice), 0); + Ok(()) + } + + #[test] + fn test_completion_percentage_empty_span() -> anyhow::Result<()> { + let mut dice = DiceState::new(); + let tracker = BuckEventSpanTracker::new(); + + setup_dice_state(&mut dice, 26, 101); + assert_eq!(estimate_completion_percentage(tracker.roots(), &dice), 25); + Ok(()) + } +} diff --git a/app/buck2_event_observer/src/progress.rs b/app/buck2_event_observer/src/progress.rs new file mode 100644 index 0000000000000..1951b9db0e837 --- /dev/null +++ b/app/buck2_event_observer/src/progress.rs @@ -0,0 +1,543 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::time::Instant; + +use buck2_data::analysis_stage_start; +use buck2_data::executor_stage_start; +use buck2_data::instant_event; +use buck2_data::local_stage; +use buck2_data::re_stage; +use buck2_data::span_end_event; +use buck2_data::span_start_event; +use buck2_data::ActionExecutionStart; +use buck2_data::AnalysisEnd; +use buck2_data::AnalysisStageStart; +use buck2_data::AnalysisStart; +use buck2_data::ExecutorStageStart; +use buck2_data::LoadBuildFileEnd; +use buck2_data::LocalStage; +use buck2_data::ReStage; +use buck2_events::span::SpanId; +use buck2_events::BuckEvent; + +use crate::last_command_execution_kind::get_last_command_execution_kind; +use crate::last_command_execution_kind::get_last_command_execution_time; +use crate::last_command_execution_kind::LastCommandExecutionKind; +use crate::unpack_event::unpack_event; +use crate::unpack_event::UnpackedBuckEvent; + +#[derive(Debug, Clone, Copy)] +enum State { + Started, + Running, + Finished, +} + +#[derive(Debug, Default)] +pub struct SpanMap { + map: HashMap, + running: u64, + finished: u64, + cancelled: u64, + + min_started: u64, + min_finished: u64, +} + +impl SpanMap { + fn started(&mut self, id: SpanId, data: T) { + self.map.insert(id, (State::Started, data)); + self.cancelled = self.cancelled.saturating_sub(1); + } + + fn cancelled(&mut self, id: SpanId) -> Option { + self.map.remove(&id).map(|(state, v)| { + match state { + State::Started => {} + State::Running => { + self.running -= 1; + } + State::Finished => { + self.finished -= 1; + } + } + self.cancelled += 1; + v + }) + } + + fn running(&mut self, id: SpanId) -> Option<&mut T> { + match self.map.get_mut(&id) { + Some((state, v)) => { + match state { + State::Started => { + *state = State::Running; + self.running += 1; + } + _ => {} + } + Some(v) + } + None => None, + } + } + + fn finished(&mut self, id: SpanId) -> Option<&mut T> { + match self.map.get_mut(&id) { + Some((state, v)) => { + match state { + State::Started => { + self.finished += 1; + } + State::Running => { + self.running -= 1; + self.finished += 1; + } + State::Finished => {} + } + + *state = State::Finished; + Some(v) + } + None => None, + } + } + + fn get_stats(&self) -> BuildProgressPhaseStatsItem { + BuildProgressPhaseStatsItem { + started: std::cmp::max(self.min_started, self.map.len() as u64 + self.cancelled), + finished: std::cmp::max(self.finished, self.min_finished), + running: self.running, + } + } +} + +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct BuildProgressPhaseStatsItem { + pub started: u64, + pub finished: u64, + pub running: u64, +} + +impl BuildProgressPhaseStatsItem { + pub fn pending(&self) -> u64 { + self.started - self.finished + } + + pub fn mark_all_finished(&mut self) { + self.finished = self.started; + self.running = 0; + } +} + +/// Tracks some stats about what we've completed in this build. +#[derive(Default)] + +pub struct BuildProgressStats { + pub dirs_read: u64, + pub targets: u64, + + pub actions_declared: u64, + pub artifacts_declared: u64, + + pub running_local: u64, + pub running_remote: u64, + + pub exec_time_ms: u64, + pub cached_exec_time_ms: u64, +} + +/// Tracks stats about ongoing work in the main phases of the build. +#[derive(Debug, Clone)] +pub struct BuildProgressPhaseStats { + pub loads: BuildProgressPhaseStatsItem, + pub analyses: BuildProgressPhaseStatsItem, + pub actions: BuildProgressPhaseStatsItem, +} + +#[derive(Default, Clone, Copy)] +struct TrackedActionSpan { + running_local: bool, + running_remote: bool, +} + +#[derive(Default)] + +struct TrackedLoadSpan {} + +#[derive(Default)] + +struct TrackedAnalysisSpan {} + +#[derive(Default)] +pub struct BuildProgressStateTracker { + stats: BuildProgressStats, + + loads: SpanMap, + analyses: SpanMap, + actions: SpanMap, +} + +impl BuildProgressStateTracker { + pub fn new() -> Self { + Self { + ..Default::default() + } + } + + pub fn handle_event( + &mut self, + _processed_time: Instant, + event: &BuckEvent, + ) -> anyhow::Result<()> { + let ev = unpack_event(event)?; + + self.handle_load(&ev)?; + self.handle_analysis(&ev)?; + self.handle_actions(&ev)?; + + match unpack_event(event)? { + UnpackedBuckEvent::Instant(_, _, instant_event::Data::DiceStateSnapshot(snapshot)) => { + if let Some(read_dir_states) = snapshot.key_states.get("ReadDirKey") { + self.stats.dirs_read = read_dir_states.finished as u64; + } + + let mut analysis_min_started = 0; + let mut analysis_min_finished = 0; + + if let Some(states) = snapshot.key_states.get("AnalysisKey") { + analysis_min_started += states.started as u64; + analysis_min_finished += states.finished as u64; + } + + if let Some(states) = snapshot.key_states.get("AnonTargetKey") { + analysis_min_started += states.started as u64; + analysis_min_finished += states.finished as u64; + } + + if let Some(states) = snapshot.key_states.get("DeferredCompute") { + analysis_min_started += states.started as u64; + analysis_min_finished += states.finished as u64; + } + + self.analyses.min_started = analysis_min_started; + self.analyses.min_finished = analysis_min_finished; + + if let Some(states) = snapshot.key_states.get("BuildKey") { + self.actions.min_started = states.started as u64; + self.actions.min_finished = states.finished as u64; + } + } + UnpackedBuckEvent::SpanEnd( + BuckEvent { + span_id: Some(span_id), + .. + }, + _, + span_end_event::Data::SpanCancelled(..), + ) => { + self.loads.cancelled(*span_id); + self.analyses.cancelled(*span_id); + if let Some(v) = self.actions.cancelled(*span_id) { + self.action_finished(v); + } + } + _ => { + // ignored + } + } + + Ok(()) + } + + fn handle_load(&mut self, ev: &UnpackedBuckEvent) -> anyhow::Result<()> { + match ev { + UnpackedBuckEvent::SpanStart( + BuckEvent { + span_id: Some(span_id), + .. + }, + _, + span_start_event::Data::Load(..), + ) => { + self.loads.started(*span_id, TrackedLoadSpan {}); + self.loads.running(*span_id); + } + UnpackedBuckEvent::SpanEnd( + BuckEvent { + span_id: Some(span_id), + .. + }, + _, + span_end_event::Data::Load(LoadBuildFileEnd { target_count, .. }), + ) => { + self.loads.finished(*span_id); + if let Some(c) = target_count { + self.stats.targets += c; + } + } + _ => {} + } + + Ok(()) + } + + fn handle_analysis(&mut self, ev: &UnpackedBuckEvent) -> anyhow::Result<()> { + match ev { + UnpackedBuckEvent::SpanStart( + BuckEvent { + span_id: Some(span_id), + .. + }, + _, + span_start_event::Data::Analysis(AnalysisStart { .. }), + ) => { + self.analyses.started(*span_id, TrackedAnalysisSpan {}); + } + UnpackedBuckEvent::SpanStart( + BuckEvent { + parent_id: Some(parent_id), + .. + }, + _, + span_start_event::Data::AnalysisStage(AnalysisStageStart { + stage: Some(analysis_stage_start::Stage::EvaluateRule(..)), + }), + ) => { + self.analyses.running(*parent_id); + } + UnpackedBuckEvent::SpanEnd( + BuckEvent { + span_id: Some(span_id), + .. + }, + _, + span_end_event::Data::Analysis(AnalysisEnd { + declared_actions, + declared_artifacts, + .. + }), + ) => { + self.stats.actions_declared += declared_actions.unwrap_or(0); + self.stats.artifacts_declared += declared_artifacts.unwrap_or(0); + self.analyses.finished(*span_id); + } + _ => {} + } + Ok(()) + } + + fn action_finished(&mut self, data: TrackedActionSpan) { + if data.running_local { + self.stats.running_local -= 1; + } + if data.running_remote { + self.stats.running_remote -= 1; + } + } + + fn handle_actions(&mut self, ev: &UnpackedBuckEvent) -> anyhow::Result<()> { + match ev { + UnpackedBuckEvent::SpanStart( + BuckEvent { + span_id: Some(span_id), + .. + }, + _, + span_start_event::Data::ActionExecution(ActionExecutionStart { .. }), + ) => { + self.actions.started(*span_id, TrackedActionSpan::default()); + } + UnpackedBuckEvent::SpanStart( + BuckEvent { + parent_id: Some(parent_id), + .. + }, + _, + span_start_event::Data::ExecutorStage(ExecutorStageStart { stage: Some(stage) }), + ) => { + match stage { + executor_stage_start::Stage::Re(ReStage { + stage: Some(re_stage::Stage::Execute(..)), + }) => { + if let Some(data) = self.actions.running(*parent_id) { + data.running_remote = true; + self.stats.running_remote += 1; + } + } + executor_stage_start::Stage::Local(LocalStage { + stage: Some(local_stage::Stage::Execute(..)), + }) => { + if let Some(data) = self.actions.running(*parent_id) { + data.running_local = true; + self.stats.running_local += 1; + } + } + _ => {} + }; + } + UnpackedBuckEvent::SpanEnd( + BuckEvent { + span_id: Some(span_id), + .. + }, + _, + span_end_event::Data::ActionExecution(end), + ) => { + if let Some(data) = self.actions.finished(*span_id) { + let data = *data; + self.action_finished(data); + } + + let exec_time = get_last_command_execution_time(end).unwrap_or(0); + self.stats.exec_time_ms += exec_time; + + match get_last_command_execution_kind(end) { + LastCommandExecutionKind::Cached + | LastCommandExecutionKind::RemoteDepFileCached => { + self.stats.cached_exec_time_ms += exec_time; + } + _ => {} + } + } + _ => {} + } + Ok(()) + } + + pub fn phase_stats(&self) -> BuildProgressPhaseStats { + BuildProgressPhaseStats { + loads: self.loads.get_stats(), + analyses: self.analyses.get_stats(), + actions: self.actions.get_stats(), + } + } + + pub fn progress_stats(&self) -> &BuildProgressStats { + &self.stats + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_span_map() -> anyhow::Result<()> { + let mut map: SpanMap = SpanMap::default(); + + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 0, + finished: 0, + running: 0 + } + ); + + map.started(SpanId::from_u64(1).unwrap(), 1); + + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 1, + finished: 0, + running: 0 + } + ); + + assert_eq!(map.running(SpanId::from_u64(1).unwrap()).copied(), Some(1)); + + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 1, + finished: 0, + running: 1 + } + ); + + assert!(map.finished(SpanId::from_u64(1).unwrap()).is_some()); + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 1, + finished: 1, + running: 0 + } + ); + + map.started(SpanId::from_u64(2).unwrap(), 2); + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 2, + finished: 1, + running: 0 + } + ); + + map.cancelled(SpanId::from_u64(2).unwrap()); + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 2, + finished: 1, + running: 0 + } + ); + + // started shouldn't be incremented because we had a cancellation + map.started(SpanId::from_u64(3).unwrap(), 3); + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 2, + finished: 1, + running: 0 + } + ); + + // started should now increment + map.started(SpanId::from_u64(4).unwrap(), 4); + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 3, + finished: 1, + running: 0 + } + ); + + map.min_started = 8; + + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 8, + finished: 1, + running: 0 + } + ); + + map.min_finished = 4; + assert_eq!( + map.get_stats(), + BuildProgressPhaseStatsItem { + started: 8, + finished: 4, + running: 0 + } + ); + + Ok(()) + } +} diff --git a/app/buck2_event_observer/src/re_state.rs b/app/buck2_event_observer/src/re_state.rs index ee2f08f66ca76..ec07eaddcd0d6 100644 --- a/app/buck2_event_observer/src/re_state.rs +++ b/app/buck2_event_observer/src/re_state.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use buck2_data::Snapshot; use superconsole::DrawMode; use superconsole::Line; use superconsole::Lines; @@ -38,6 +39,10 @@ impl ReState { } } + pub fn first_snapshot(&self) -> &Option { + &self.first_snapshot + } + pub fn render_header( &self, two_snapshots: &TwoSnapshots, diff --git a/app/buck2_event_observer/src/session_info.rs b/app/buck2_event_observer/src/session_info.rs index ade88e8f01e43..230f85a4df3d0 100644 --- a/app/buck2_event_observer/src/session_info.rs +++ b/app/buck2_event_observer/src/session_info.rs @@ -13,5 +13,5 @@ use buck2_wrapper_common::invocation_id::TraceId; pub struct SessionInfo { pub trace_id: TraceId, pub test_session: Option, - pub modern_dice: bool, + pub legacy_dice: bool, } diff --git a/app/buck2_event_observer/src/span_tracker.rs b/app/buck2_event_observer/src/span_tracker.rs index 2c09fb8e0dddb..3114bd1050c17 100644 --- a/app/buck2_event_observer/src/span_tracker.rs +++ b/app/buck2_event_observer/src/span_tracker.rs @@ -9,6 +9,7 @@ use std::collections::HashMap; use std::fmt; +use std::num::NonZeroU64; use std::sync::Arc; use std::time::Instant; @@ -23,7 +24,7 @@ use linked_hash_map::LinkedHashMap; use crate::what_ran::WhatRanRelevantAction; use crate::what_ran::WhatRanState; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum SpanTrackerError { #[error( "Tried to end a child (`{child:#?}`) that did not exist for its parent (`{parent:#?}`)." @@ -514,6 +515,7 @@ pub fn is_span_shown(event: &BuckEvent) -> bool { Data::ActionExecution(..) | Data::FinalMaterialization(..) | Data::Analysis(..) + | Data::AnalysisResolveQueries(..) | Data::Load(..) | Data::LoadPackage(..) | Data::TestDiscovery(..) @@ -528,6 +530,7 @@ pub fn is_span_shown(event: &BuckEvent) -> bool { | Data::ExecutorStage(..) | Data::MatchDepFiles(..) | Data::CacheUpload(..) + | Data::DepFileUpload(..) | Data::DiceBlockConcurrentCommand(..) | Data::DiceSynchronizeSection(..) | Data::DiceCleanup(..) @@ -539,7 +542,10 @@ pub fn is_span_shown(event: &BuckEvent) -> bool { | Data::ReUpload(..) | Data::ConnectToInstaller(..) | Data::LocalResources(..) - | Data::ReleaseLocalResources(..), + | Data::ReleaseLocalResources(..) + | Data::CreateOutputHashesFile(..) + | Data::ActionErrorHandlerExecution(..) + | Data::CqueryUniverseBuild(..), ) => true, None => false, } @@ -564,10 +570,8 @@ impl BuckEventSpanTracker { } } -impl WhatRanState for SpanTracker> { - fn get(&self, span_id: OptionalSpanId) -> Option> { - let span_id = span_id.0?; - +impl WhatRanState for SpanTracker> { + fn get(&self, span_id: SpanId) -> Option> { self.all .get(&span_id) .map(|e| e.info.event.data()) @@ -577,8 +581,14 @@ impl WhatRanState for SpanTracker> { /// A wrapper type to make calls to emit_event_if_relevant more convenient, since parent_id is /// `Option` on BuckEvent. -#[derive(From, Copy, Clone, Dupe)] -pub struct OptionalSpanId(Option); +#[derive(From, Copy, Clone, Dupe, Eq, PartialEq, Hash)] +pub struct OptionalSpanId(pub Option); + +impl OptionalSpanId { + pub fn from_u64(optional_span_id: u64) -> OptionalSpanId { + OptionalSpanId(NonZeroU64::new(optional_span_id).map(SpanId)) + } +} impl fmt::Display for OptionalSpanId { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { @@ -591,7 +601,7 @@ impl fmt::Display for OptionalSpanId { } #[cfg(test)] -mod test { +mod tests { use std::sync::atomic::AtomicI64; use std::sync::atomic::Ordering; diff --git a/app/buck2_event_observer/src/two_snapshots.rs b/app/buck2_event_observer/src/two_snapshots.rs index c911ad133afd4..4e1503b07660a 100644 --- a/app/buck2_event_observer/src/two_snapshots.rs +++ b/app/buck2_event_observer/src/two_snapshots.rs @@ -47,6 +47,16 @@ impl TwoSnapshots { self.per_micro_second(|s| (s.buck2_user_cpu_us + s.buck2_system_cpu_us) * 100) } + /// User CPU time between two snapshots in percents. + pub fn user_cpu_percents(&self) -> Option { + self.per_micro_second(|s| s.buck2_user_cpu_us * 100) + } + + /// System CPU time between two snapshots in percents. + pub fn system_cpu_percents(&self) -> Option { + self.per_micro_second(|s| s.buck2_system_cpu_us * 100) + } + /// Measure bytes-per-second rate between two snapshots for some field. fn bytes_per_second(&self, field: impl Fn(&buck2_data::Snapshot) -> u64) -> Option { self.per_micro_second(|s| field(s) * 1_000_000) @@ -79,6 +89,8 @@ mod tests { let mut two_snapshots = TwoSnapshots::default(); assert_eq!(None, two_snapshots.cpu_percents()); + assert_eq!(None, two_snapshots.user_cpu_percents()); + assert_eq!(None, two_snapshots.system_cpu_percents()); two_snapshots.update( t0, &buck2_data::Snapshot { @@ -88,6 +100,8 @@ mod tests { }, ); assert_eq!(None, two_snapshots.cpu_percents()); + assert_eq!(None, two_snapshots.user_cpu_percents()); + assert_eq!(None, two_snapshots.system_cpu_percents()); two_snapshots.update( t0.add(Duration::from_secs(2)), &buck2_data::Snapshot { @@ -98,6 +112,8 @@ mod tests { ); // 2 seconds real time, 14 seconds user + system time, so 700% CPU. assert_eq!(Some(700), two_snapshots.cpu_percents()); + assert_eq!(Some(300), two_snapshots.user_cpu_percents()); + assert_eq!(Some(400), two_snapshots.system_cpu_percents()); } #[test] diff --git a/app/buck2_event_observer/src/unpack_event.rs b/app/buck2_event_observer/src/unpack_event.rs index 9f663ecc67290..99d319f3baac6 100644 --- a/app/buck2_event_observer/src/unpack_event.rs +++ b/app/buck2_event_observer/src/unpack_event.rs @@ -12,9 +12,8 @@ use buck2_data::InstantEvent; use buck2_data::SpanEndEvent; use buck2_data::SpanStartEvent; use buck2_events::BuckEvent; -use thiserror::Error; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum VisitorError { #[error("Sent an event missing one or more fields: `{0:?}`")] MissingField(BuckEvent), @@ -40,31 +39,34 @@ pub enum UnpackedBuckEvent<'a> { &'a InstantEvent, &'a buck2_data::instant_event::Data, ), + UnrecognizedSpanStart(&'a BuckEvent, &'a SpanStartEvent), + UnrecognizedSpanEnd(&'a BuckEvent, &'a SpanEndEvent), + UnrecognizedInstant(&'a BuckEvent, &'a InstantEvent), } pub fn unpack_event(event: &BuckEvent) -> anyhow::Result { match &event.data() { - buck_event::Data::SpanStart(v) => Ok(UnpackedBuckEvent::SpanStart( - event, - v, - v.data - .as_ref() - .ok_or_else(|| VisitorError::MissingField(event.clone()))?, - )), - buck_event::Data::SpanEnd(v) => Ok(UnpackedBuckEvent::SpanEnd( - event, - v, - v.data - .as_ref() - .ok_or_else(|| VisitorError::MissingField(event.clone()))?, - )), - buck_event::Data::Instant(v) => Ok(UnpackedBuckEvent::Instant( - event, - v, - v.data - .as_ref() - .ok_or_else(|| VisitorError::MissingField((*event).clone()))?, - )), + buck_event::Data::SpanStart(v) => Ok({ + if let Some(data) = v.data.as_ref() { + UnpackedBuckEvent::SpanStart(event, v, data) + } else { + UnpackedBuckEvent::UnrecognizedSpanStart(event, v) + } + }), + buck_event::Data::SpanEnd(v) => Ok({ + if let Some(data) = v.data.as_ref() { + UnpackedBuckEvent::SpanEnd(event, v, data) + } else { + UnpackedBuckEvent::UnrecognizedSpanEnd(event, v) + } + }), + buck_event::Data::Instant(v) => Ok({ + if let Some(data) = v.data.as_ref() { + UnpackedBuckEvent::Instant(event, v, data) + } else { + UnpackedBuckEvent::UnrecognizedInstant(event, v) + } + }), buck_event::Data::Record(_) => Err(VisitorError::UnexpectedRecord(event.clone()).into()), } } diff --git a/app/buck2_event_observer/src/verbosity.rs b/app/buck2_event_observer/src/verbosity.rs index 77c6feacbb2f5..4a12fbbe1e1d4 100644 --- a/app/buck2_event_observer/src/verbosity.rs +++ b/app/buck2_event_observer/src/verbosity.rs @@ -11,11 +11,11 @@ use std::collections::HashSet; use dupe::Dupe; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum VerbosityError { #[error("Can't have more than 1 level set at a time")] MoreThan1Level, - #[error("Item name not reconized: {0}")] + #[error("Item name not recognized: {0}")] UnknownItem(String), } diff --git a/app/buck2_event_observer/src/what_ran.rs b/app/buck2_event_observer/src/what_ran.rs index 7f88710015447..8b6213d2a0b7f 100644 --- a/app/buck2_event_observer/src/what_ran.rs +++ b/app/buck2_event_observer/src/what_ran.rs @@ -9,15 +9,21 @@ use std::borrow::Cow; use std::fmt; +use std::fmt::Display; +use std::fmt::Formatter; use buck2_data::re_platform::Property; +use buck2_data::ActionName; +use buck2_events::span::SpanId; use dupe::Dupe; +use regex::Regex; use superconsole::Line; use superconsole::Lines; use superconsole::SuperConsole; use crate::display; use crate::display::TargetDisplayOptions; +use crate::span_tracker::OptionalSpanId; /// Options controlling what WhatRan produces. #[derive(Debug, Default, clap::Parser)] @@ -30,6 +36,28 @@ pub struct WhatRanOptions { pub skip_remote_executions: bool, #[clap(long)] pub skip_local_executions: bool, + #[clap(long)] + /// Regular expression to filter commands by given action category (i.e. type of of actions that are + /// similar but operate on different inputs, such as invocations of a C++ + /// compiler (whose category would be `cxx_compile`)). Matches by full string. + pub filter_category: Option, +} + +pub struct WhatRanOptionsRegex<'a> { + pub options: &'a WhatRanOptions, + filter_category_regex: Option, +} +impl<'a> WhatRanOptionsRegex<'a> { + pub fn from_options(options: &'a WhatRanOptions) -> anyhow::Result { + let filter_category_regex = match &options.filter_category { + Some(filter_category) => Some(Regex::new(&format!(r"^{}$", filter_category))?), + None => None, + }; + Ok(Self { + options, + filter_category_regex, + }) + } } /// An action that makes sense to use to contextualize a command we ran. @@ -38,6 +66,7 @@ pub enum WhatRanRelevantAction<'a> { ActionExecution(&'a buck2_data::ActionExecutionStart), TestDiscovery(&'a buck2_data::TestDiscoveryStart), TestRun(&'a buck2_data::TestRunStart), + SetupLocalResources(&'a buck2_data::SetupLocalResourcesStart), } impl<'a> WhatRanRelevantAction<'a> { @@ -54,6 +83,9 @@ impl<'a> WhatRanRelevantAction<'a> { Some(buck2_data::span_start_event::Data::TestStart(test)) => { Some(Self::TestRun(test)) } + Some(buck2_data::span_start_event::Data::LocalResources(setup)) => { + Some(Self::SetupLocalResources(setup)) + } _ => None, }, _ => None, @@ -62,27 +94,36 @@ impl<'a> WhatRanRelevantAction<'a> { } pub struct WhatRanOutputCommand<'a> { - reason: &'a str, - identity: &'a str, - repro: CommandReproducer<'a>, - extra: Option>, + pub reason: &'a str, + pub identity: &'a str, + pub repro: CommandReproducer<'a>, + pub extra: Option>, + pub std_err: Option<&'a str>, + pub duration: Option, } -impl WhatRanOutputCommand<'_> { - pub fn reason(&self) -> &str { - self.reason - } - pub fn identity(&self) -> &str { - self.identity - } - pub fn repro(&self) -> CommandReproducer<'_> { - self.repro - } - pub fn extra(&self) -> Option> { - self.extra +impl<'a> WhatRanOutputCommand<'a> { + pub fn as_tabulated_reproducer(&self) -> impl fmt::Display + '_ { + WhatRanOutputCommandHeader { cmd: self } } } +struct WhatRanOutputCommandHeader<'r, 'a> { + cmd: &'r WhatRanOutputCommand<'a>, +} + +impl Display for WhatRanOutputCommandHeader<'_, '_> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}\t{}\t{}\t{}", + self.cmd.reason, + self.cmd.identity, + self.cmd.repro.executor(), + self.cmd.repro.as_human_readable(), + ) + } +} #[derive(Clone, Copy, Dupe)] pub enum WhatRanOutputCommandExtra<'a> { TestCases(&'a [String]), @@ -95,42 +136,73 @@ pub trait WhatRanOutputWriter { /// Storage provided for events. The expectations is that any previously event that would qualify /// as a WhatRanRelevantAction was captured in this and will be returned. -pub trait WhatRanState { - fn get(&self, span_id: T) -> Option>; +pub trait WhatRanState { + fn get(&self, span_id: SpanId) -> Option>; } +pub fn matches_category(action: Option>, pattern: &Regex) -> bool { + match action { + Some(WhatRanRelevantAction::ActionExecution(action)) => match action.name.as_ref() { + Some(ActionName { category, .. }) => pattern.is_match(category), + _ => false, + }, + _ => false, + } +} /// Presented with an event and its containing span, emit it to the output if it's relevant. The /// state is used to associate the parent with something meaningful. This does not take the parent /// directly because *most* events are *not* relevant so we save the lookup in that case. -pub fn emit_event_if_relevant( - parent_span_id: T, +pub fn emit_event_if_relevant( + parent_span_id: OptionalSpanId, data: &buck2_data::buck_event::Data, - state: &impl WhatRanState, + state: &impl WhatRanState, output: &mut impl WhatRanOutputWriter, - options: &WhatRanOptions, + options: &WhatRanOptionsRegex, ) -> anyhow::Result<()> { - if let Some(repro) = CommandReproducer::from_buck_data(data, options) { - emit(parent_span_id, repro, state, output)?; + if let Some(repro) = CommandReproducer::from_buck_data(data, options.options) { + let data = match data { + buck2_data::buck_event::Data::SpanEnd(span) => &span.data, + _ => &None, + }; + + emit(parent_span_id, repro, state, data, output, options)?; } Ok(()) } /// Find and format the parent span (if any), then emit the relevant command. -fn emit( - parent_span_id: T, +fn emit( + parent_span_id: OptionalSpanId, repro: CommandReproducer<'_>, - state: &impl WhatRanState, + state: &impl WhatRanState, + data: &Option, output: &mut impl WhatRanOutputWriter, + options: &WhatRanOptionsRegex, ) -> anyhow::Result<()> { - emit_reproducer(state.get(parent_span_id), repro, output) + let action = match parent_span_id.0 { + None => None, + Some(parent_span_id) => state.get(parent_span_id), + }; + + emit_what_ran_entry(action, repro, data, output, options) } -pub fn emit_reproducer( +pub fn emit_what_ran_entry( action: Option>, repro: CommandReproducer<'_>, + data: &Option, output: &mut impl WhatRanOutputWriter, + options: &WhatRanOptionsRegex, ) -> anyhow::Result<()> { + let should_emit = options + .filter_category_regex + .as_ref() + .map_or(true, |category| matches_category(action, category)); + + if !should_emit { + return Ok(()); + } let (reason, identity, extra) = match action { Some(WhatRanRelevantAction::ActionExecution(action)) => ( "build", @@ -154,14 +226,46 @@ pub fn emit_reproducer( ), None => ("test.run", Cow::Borrowed("unknown test suite"), None), }, + Some(WhatRanRelevantAction::SetupLocalResources(setup)) => ( + "test.local_resource_setup", + if let Some(target_label) = &setup.target_label { + Cow::Owned(display::display_configured_target_label( + target_label, + TargetDisplayOptions::for_log(), + )?) + } else { + Cow::Borrowed("") + }, + None, + ), None => ("unknown", Cow::Borrowed("unknown action"), None), }; + let std_err = match data { + Some(buck2_data::span_end_event::Data::ActionExecution(action_exec)) => action_exec + .commands + .iter() + .last() + .and_then(|cmd| cmd.details.as_ref().map(|d| d.stderr.as_ref())), + _ => None, + }; + let duration = match data { + Some(buck2_data::span_end_event::Data::ActionExecution(action_exec)) => action_exec + .wall_time + .as_ref() + .map(|prost_types::Duration { seconds, nanos }| { + std::time::Duration::new(*seconds as u64, *nanos as u32) + }), + + _ => None, + }; output.emit_command(WhatRanOutputCommand { reason, identity: &identity, repro, extra, + std_err, + duration, })?; Ok(()) @@ -182,7 +286,14 @@ impl<'a> CommandReproducer<'a> { pub fn executor(&self) -> String { match self { Self::CacheQuery(..) => "cache_query".to_owned(), - Self::CacheHit(..) => "cache".to_owned(), + Self::CacheHit(&buck2_data::CacheHit { cache_type, .. }) => { + match buck2_data::CacheHitType::from_i32(cache_type) { + Some(buck2_data::CacheHitType::RemoteDepFileCache) => { + "re_dep_file_cache".to_owned() + } + _ => "cache".to_owned(), + } + } Self::ReExecute(execute) => executor_with_platform(execute), Self::LocalExecute(..) => "local".to_owned(), Self::WorkerExecute(..) => "worker".to_owned(), @@ -361,17 +472,16 @@ pub fn command_to_string<'a>(command: impl Into>) -> String { for arg in command.argv.iter() { cmd.push(Cow::Borrowed(arg)); } - - shlex::join(cmd.iter().map(|e| e.as_ref())) + shlex::try_join(cmd.iter().map(|e| e.as_ref())).expect("Null byte unexpected") } impl WhatRanOutputWriter for SuperConsole { fn emit_command(&mut self, command: WhatRanOutputCommand<'_>) -> anyhow::Result<()> { // TODO: Change this API to just produce a String. let msg = WhatRanCommandConsoleFormat { - reason: command.reason(), - identity: command.identity(), - repro: command.repro(), + reason: command.reason, + identity: command.identity, + repro: command.repro, } .to_string(); self.emit(Lines(vec![Line::sanitized(&msg)])); @@ -415,7 +525,6 @@ fn executor_with_platform(execute: &buck2_data::ReExecute) -> String { #[cfg(test)] mod tests { - use buck2_data::re_platform::Property; use buck2_data::ReExecute; use buck2_data::RePlatform; @@ -438,6 +547,7 @@ mod tests { ], }), action_key: None, + use_case: "".to_owned(), }; let result = executor_with_platform(&execute); assert_eq!( diff --git a/app/buck2_event_publisher_proto/BUCK b/app/buck2_event_publisher_proto/BUCK new file mode 100644 index 0000000000000..a9ded5a686d2f --- /dev/null +++ b/app/buck2_event_publisher_proto/BUCK @@ -0,0 +1,18 @@ +load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") +load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") + +oncall("build_infra") + +rust_protobuf_library( + name = "buck2_event_publisher_proto", + srcs = glob(["src/**/*.rs"]), + build_script = "build.rs", + build_env = { + "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location //buck2/app/buck2_data:data_proto)", + }, + protos = ["event_publisher.proto"], + deps = [ + "fbsource//third-party/rust:tonic", + "//buck2/app/buck2_data:buck2_data", + ], +) diff --git a/app/buck2_event_publisher_proto/Cargo.toml b/app/buck2_event_publisher_proto/Cargo.toml new file mode 100644 index 0000000000000..39b5e1a80f701 --- /dev/null +++ b/app/buck2_event_publisher_proto/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "buck2_event_publisher_proto" + +edition = "2021" +license = { workspace = true } +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +prost = { workspace = true } +tonic = { workspace = true } + +buck2_data = { workspace = true } + +[build-dependencies] +buck2_protoc_dev = { workspace = true } diff --git a/app/buck2_event_publisher_proto/build.rs b/app/buck2_event_publisher_proto/build.rs new file mode 100644 index 0000000000000..25553e85e676f --- /dev/null +++ b/app/buck2_event_publisher_proto/build.rs @@ -0,0 +1,28 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::env; +use std::io; +use std::path::PathBuf; + +fn main() -> io::Result<()> { + let proto_files = &["event_publisher.proto"]; + + let data_include = if let Ok(value) = env::var("BUCK_HACK_DATA_PROTOC_INCLUDE") { + let path = PathBuf::from(value); + path.parent().unwrap().to_str().unwrap().to_owned() + } else { + "../buck2_data".to_owned() + }; + + buck2_protoc_dev::configure() + .setup_protoc() + .extern_path(".buck.data", "::buck2_data") + .compile(proto_files, &[".", &data_include]) +} diff --git a/app/buck2_event_publisher_proto/event_publisher.proto b/app/buck2_event_publisher_proto/event_publisher.proto new file mode 100644 index 0000000000000..1963474c4e749 --- /dev/null +++ b/app/buck2_event_publisher_proto/event_publisher.proto @@ -0,0 +1,33 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +syntax = "proto3"; + +import "data.proto"; + +package event_publisher; + +message BuckEventRequest { + // A trace-unique 64-bit identifying the stream. + uint64 stream_id = 1; + + buck.data.BuckEvent event = 2; +}; + +message BuckEventResponse { + // A trace-unique 64-bit identifying the stream. + uint64 stream_id = 1; + + // The trace ID of the event that has been committed. + uint64 trace_id = 2; +}; + +service BuckEventPublisher { + rpc StreamBuckEvent(stream BuckEventRequest) returns (stream BuckEventResponse); +}; diff --git a/app/buck2_event_publisher_proto/src/lib.rs b/app/buck2_event_publisher_proto/src/lib.rs new file mode 100644 index 0000000000000..4392dad159d73 --- /dev/null +++ b/app/buck2_event_publisher_proto/src/lib.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] + +tonic::include_proto!("event_publisher"); diff --git a/app/buck2_events/BUCK b/app/buck2_events/BUCK index ef142f05f0121..8e435a745c0e1 100644 --- a/app/buck2_events/BUCK +++ b/app/buck2_events/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -8,6 +7,20 @@ rust_library( srcs = glob( ["src/**/*.rs"], ), + os_deps = [ + ( + "windows", + ["fbsource//third-party/rust:winver"], + ), + ( + "linux", + ["fbsource//third-party/rust:sys-info"], + ), + ( + "macos", + ["fbsource//third-party/rust:sys-info"], + ), + ], test_deps = ["fbsource//third-party/rust:tokio"], deps = [ "fbsource//third-party/rust:anyhow", @@ -18,12 +31,10 @@ rust_library( "fbsource//third-party/rust:is_proc_translated", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:pin-project", - "fbsource//third-party/rust:prost", + # @oss-disable: "fbsource//third-party/rust:prost", "fbsource//third-party/rust:serde", - "fbsource//third-party/rust:serde_json", + # @oss-disable: "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:smallvec", - "fbsource//third-party/rust:sys-info", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:uuid", "//buck2/allocative/allocative:allocative", @@ -31,12 +42,14 @@ rust_library( "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_util:buck2_util", "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", - "//buck2/facebook/scribe_client:scribe_client", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", + # @oss-disable: "//buck2/shed/scribe_client:scribe_client", "//common/rust/shed/fbinit:fbinit", # @oss-disable: "//common/rust/user:user", + # @oss-disable: "//devx_www/cross_env_session_id:cross_env_session_id", ], ) diff --git a/app/buck2_events/Cargo.toml b/app/buck2_events/Cargo.toml index 10027323fb1fd..0e2cb6c2bb723 100644 --- a/app/buck2_events/Cargo.toml +++ b/app/buck2_events/Cargo.toml @@ -1,46 +1,48 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_events" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } -async-trait = { workspace = true } -base64 = { workspace = true } -byteorder = { workspace = true } +async-stream = { workspace = true } +crossbeam-channel = { workspace = true } derive_more = { workspace = true } futures = { workspace = true } hostname = { workspace = true } +is_proc_translated = { workspace = true } once_cell = { workspace = true } +pin-project = { workspace = true } prost = { workspace = true } +prost-types = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } +smallvec = { workspace = true } sys-info = { workspace = true } -thiserror = { workspace = true } +tonic = { workspace = true } tokio = { workspace = true } -tracing = { workspace = true } -crossbeam-channel = { workspace = true } -crossbeam-epoch = { workspace = true } -pin-project = { workspace = true } -is_proc_translated = { workspace = true } +tokio-stream = { workspace = true } uuid = { workspace = true } -smallvec = { workspace = true } +allocative = { workspace = true } +dupe = { workspace = true } fbinit = { workspace = true } gazebo = { workspace = true } -dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -# @oss-disable: user = { path = "../../../common/rust/user" } -allocative = { workspace = true } -buck2_cli_proto = { workspace = true } +bazel_event_publisher_proto = { workspace = true } + buck2_build_info = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_event_publisher_proto = { workspace = true } +buck2_error = { workspace = true } buck2_util = { workspace = true } buck2_wrapper_common = { workspace = true } -[features] -# @oss-disable: default = ["gazebo_lint"] +[target."cfg(windows)".dependencies] +winver = "1" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_events/src/daemon_id.rs b/app/buck2_events/src/daemon_id.rs index ca47bdd42eb6c..bd6b649df29ee 100644 --- a/app/buck2_events/src/daemon_id.rs +++ b/app/buck2_events/src/daemon_id.rs @@ -11,7 +11,7 @@ use once_cell::sync::Lazy; use uuid::Uuid; #[derive(derive_more::Display)] -#[display(fmt = "{}", uuid.hyphenated())] +#[display("{}", uuid.hyphenated())] pub struct DaemonId { uuid: Uuid, } diff --git a/app/buck2_events/src/dispatch.rs b/app/buck2_events/src/dispatch.rs index c508ae4686a71..8d10ac9fff15e 100644 --- a/app/buck2_events/src/dispatch.rs +++ b/app/buck2_events/src/dispatch.rs @@ -23,7 +23,6 @@ use std::time::Duration; use std::time::Instant; use std::time::SystemTime; -use buck2_core::env_helper::EnvHelper; use buck2_data::buck_event; use buck2_data::span_end_event; use buck2_data::span_start_event; @@ -98,6 +97,10 @@ impl EventDispatcher { self.instant_event(buck2_data::ConsoleMessage { message }) } + pub fn console_warning(&self, message: String) { + self.instant_event(buck2_data::ConsoleWarning { message }) + } + fn event_with_span_id>( &self, data: E, @@ -232,7 +235,7 @@ impl Span { where D: Into, { - let span_id = SpanId::new(); + let span_id = SpanId::next(); let start_instant = Instant::now(); dispatcher.event_with_span_id( @@ -348,10 +351,11 @@ impl Drop for Span { } thread_local! { - static CURRENT_SPAN: Cell> = Cell::new(None); + static CURRENT_SPAN: Cell> = const { Cell::new(None) }; } use allocative::Allocative; +use buck2_core::buck2_env_anyhow; tokio::task_local! { pub static EVENTS: EventDispatcher; @@ -385,19 +389,12 @@ pub fn get_dispatcher_opt() -> Option { } pub fn get_dispatcher() -> EventDispatcher { - static ENFORCE_DISPATCHER_SET: EnvHelper = EnvHelper::new("ENFORCE_DISPATCHER_SET"); + let enforce_event_dispatcher_set = buck2_env_anyhow!("ENFORCE_DISPATCHER_SET", bool).unwrap(); match get_dispatcher_opt() { Some(dispatcher) => dispatcher, None => { - let should_error = ENFORCE_DISPATCHER_SET - .get() - .ok() - .flatten() - .copied() - .unwrap_or_default(); - - if should_error { + if enforce_event_dispatcher_set { panic!("dispatcher is not set") } else { // TODO: This is firing millions of times, needs to fix this up before it's made a soft error. @@ -433,6 +430,11 @@ pub fn console_message(message: String) { get_dispatcher().console_message(message) } +/// Send console warning from the server. +pub fn console_warning(message: String) { + get_dispatcher().console_warning(message) +} + /// Introduces a new span and immediately fires the given start event. When the given future resolves, the span is /// closed and the event is emitted. This span is a "suspending span"; it is intended to suspend and resume whenever /// the future itself is suspended and resumed, respectively. @@ -447,6 +449,21 @@ where get_dispatcher().span_async(start, fut) } +/// Simpler version of `span_async` where the end event +/// can be constructed without requiring the result of the future. +pub fn span_async_simple( + start: Start, + fut: Fut, + end: End, +) -> impl Future +where + Start: Into, + End: Into, + Fut: Future, +{ + span_async(start, async { (fut.await, end) }) +} + /// To use when wrapping via span() and span_async is not convenient. This produces a Span guard /// that must be ended. The span is not automatically entered. pub fn create_span(start: impl Into) -> Span { @@ -503,7 +520,7 @@ where } } - let previous_recorder = with_thread_local_recorder(|tl_recorder| std::mem::take(tl_recorder)); + let previous_recorder = with_thread_local_recorder(std::mem::take); let _guard = RestoreRecorder { previous_recorder }; f() } @@ -513,7 +530,7 @@ where for<'a> F: FnOnce(&'a mut Option) -> O, { thread_local! { - static ROOT_SPAN_RECORDER: UnsafeCell> = UnsafeCell::new(None); + static ROOT_SPAN_RECORDER: UnsafeCell> = const { UnsafeCell::new(None) }; } // SAFETY: Nobody can possibly hold a reference to the contents of this cell, since the thread @@ -565,7 +582,6 @@ pub fn async_record_root_spans(fut: Fut) -> RootSpansRecordingFuture { mod tests { use buck2_data::CommandEnd; use buck2_data::CommandStart; - use buck2_data::SpanStartEvent; use tokio::task::JoinHandle; use super::*; @@ -595,7 +611,7 @@ mod tests { let end = CommandEnd { data: Default::default(), is_success: true, - error_messages: vec![], + errors: vec![], }; (start, end) diff --git a/app/buck2_events/src/errors.rs b/app/buck2_events/src/errors.rs new file mode 100644 index 0000000000000..6d1e290d486c3 --- /dev/null +++ b/app/buck2_events/src/errors.rs @@ -0,0 +1,57 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use gazebo::prelude::SliceExt; + +pub fn create_error_report(err: &buck2_error::Error) -> buck2_data::ErrorReport { + // Tier0 error by default if no tier is set + let tier: Option = err.get_tier().map(|c| match c { + buck2_error::Tier::Input => buck2_data::error::ErrorTier::Input, + buck2_error::Tier::Environment => buck2_data::error::ErrorTier::Environment, + buck2_error::Tier::Tier0 => buck2_data::error::ErrorTier::Tier0, + }); + + let (message, telemetry_message) = if let Some(f) = err.is_emitted() { + (format!("{:?}", f), Some(format!("{:?}", err))) + } else { + (format!("{:?}", err), None) + }; + + let source_location = err.source_location().map(ToOwned::to_owned); + let category_key = err.category_key(); + + let sub_error_categories = if let Some(error_diagnostics) = err + .action_error() + .and_then(|e| e.error_diagnostics.as_ref()) + { + if let Some(buck2_data::action_error_diagnostics::Data::SubErrors(sub_errors)) = + &error_diagnostics.data + { + sub_errors + .sub_errors + .iter() + .map(|s| s.category.clone()) + .collect::>() + } else { + vec![] + } + } else { + vec![] + }; + + buck2_data::ErrorReport { + tier: tier.map(|c| c as i32), + message, + telemetry_message, + source_location, + tags: err.tags().map(|t| *t as i32), + sub_error_categories, + category_key: Some(category_key), + } +} diff --git a/app/buck2_events/src/lib.rs b/app/buck2_events/src/lib.rs index 6cff03db3be6e..2fb133d5b50d2 100644 --- a/app/buck2_events/src/lib.rs +++ b/app/buck2_events/src/lib.rs @@ -7,9 +7,9 @@ * of this source tree. */ -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] +#![feature(error_generic_member_access)] +#![feature(used_with_arg)] +#![feature(once_cell_try)] //! //! Events and event streams for Buck2. @@ -27,7 +27,9 @@ pub mod daemon_id; pub mod dispatch; +pub mod errors; pub mod metadata; +pub mod schedule_type; pub mod sink; pub mod source; pub mod span; @@ -44,7 +46,6 @@ use buck2_wrapper_common::invocation_id::TraceId; use derive_more::From; use gazebo::variants::UnpackVariants; use serde::Serialize; -use thiserror::Error; use crate::sink::channel::ChannelEventSink; use crate::source::ChannelEventSource; @@ -66,11 +67,11 @@ pub struct BuckEvent { /// If this event starts a new span, the span ID assigned to this span, or None if this event is a leaf event /// that does not start a new span. - span_id: Option, + pub span_id: Option, /// The ID of the span that contains this event. Will be non-None in all Events except the first and last events /// of a trace. - parent_id: Option, + pub parent_id: Option, } impl BuckEvent { @@ -192,7 +193,7 @@ impl TryFrom> for BuckEvent { } /// The set of events that can flow out of an EventSource. -#[derive(Clone, From, UnpackVariants)] +#[derive(Debug, Clone, From, UnpackVariants)] #[allow(clippy::large_enum_variant)] pub enum Event { /// A command result, produced upon completion of a command. @@ -208,24 +209,47 @@ pub enum Event { pub struct EventSinkStats { /// Count of number of successful messages (e.g. those that have been processed by their downstream destination). pub successes: u64, - /// Count of messages that failed to be submitted and will not be retried. - pub failures: u64, + // Count of messages that failed to be submitted and will not be retried. + pub failures_invalid_request: u64, + pub failures_unauthorized: u64, + pub failures_rate_limited: u64, + pub failures_pushed_back: u64, + pub failures_enqueue_failed: u64, + pub failures_internal_error: u64, + pub failures_timed_out: u64, + pub failures_unknown: u64, /// How many messages are currently buffered by this sink. pub buffered: u64, /// How many messages were not even enqueued by this sink. pub dropped: u64, + /// How many bytes were written into this sink. + pub bytes_written: u64, } impl EventSinkStats { - /// Since there can be one or more sinks (e.g. with [`sink::TeeSink`](???)), we need to aggregate these into - /// useful singular values. - pub fn aggregate(&self, other: &Self) -> Self { - Self { - successes: self.successes + other.successes, - failures: self.failures + other.failures, - buffered: self.buffered + other.buffered, - dropped: self.dropped + other.dropped, - } + pub fn failures(&self) -> u64 { + let EventSinkStats { + successes: _, + failures_invalid_request, + failures_unauthorized, + failures_rate_limited, + failures_pushed_back, + failures_enqueue_failed, + failures_internal_error, + failures_timed_out, + failures_unknown, + buffered: _, + dropped: _, + bytes_written: _, + } = self; + *failures_invalid_request + + *failures_unauthorized + + *failures_rate_limited + + *failures_pushed_back + + *failures_enqueue_failed + + *failures_internal_error + + *failures_timed_out + + *failures_unknown } } @@ -242,7 +266,7 @@ pub trait EventSinkWithStats: Send + Sync { fn to_event_sync(self: Arc) -> Arc; /// Collects stats on this sink (e.g. messages accepted, rejected). - fn stats(&self) -> Option; + fn stats(&self) -> EventSinkStats; } impl EventSink for Arc { @@ -260,7 +284,7 @@ pub fn create_source_sink_pair() -> (ChannelEventSource, impl EventSink) { } #[allow(clippy::large_enum_variant)] -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum BuckEventError { #[error("The `buck2_data::BuckEvent` provided has no `Timestamp`")] MissingTimestamp, @@ -284,8 +308,8 @@ mod tests { let test = BuckEvent::new( SystemTime::now(), TraceId::new(), - Some(SpanId::new()), - Some(SpanId::new()), + Some(SpanId::next()), + Some(SpanId::next()), SpanStartEvent { data: Some( CommandStart { diff --git a/app/buck2_events/src/metadata.rs b/app/buck2_events/src/metadata.rs index cc039b5c16c12..2b7056cadaeba 100644 --- a/app/buck2_events/src/metadata.rs +++ b/app/buck2_events/src/metadata.rs @@ -12,6 +12,7 @@ use std::collections::HashMap; use std::env; use std::sync::OnceLock; +use buck2_core::ci::ci_identifiers; use buck2_core::facebook_only; use buck2_wrapper_common::BUCK2_WRAPPER_ENV_VAR; @@ -52,6 +53,10 @@ pub fn collect() -> HashMap { map.insert("rosetta".to_owned(), "1".to_owned()); } + if let Some(ces_id) = ces_id() { + map.insert("ces_id".to_owned(), ces_id); + } + // Global trace ID map.insert("daemon_uuid".to_owned(), DAEMON_UUID.to_string()); @@ -60,12 +65,19 @@ pub fn collect() -> HashMap { map.insert("os_version".to_owned(), version); } - add_env_var(&mut map, "sandcastle_job_info", "SANDCASTLE_JOB_INFO"); - add_env_var(&mut map, "sandcastle_alias", "SANDCASTLE_ALIAS"); add_env_var(&mut map, "launched_via_wrapper", BUCK2_WRAPPER_ENV_VAR); add_env_var(&mut map, "fbpackage_name", "FBPACKAGE_PACKAGE_NAME"); add_env_var(&mut map, "fbpackage_version", "FBPACKAGE_PACKAGE_VERSION"); add_env_var(&mut map, "fbpackage_release", "FBPACKAGE_PACKAGE_RELEASE"); + + if let Ok(ci_identifiers) = ci_identifiers() { + for (ci_name, ci_value) in ci_identifiers { + if let Some(ci_value) = ci_value { + map.insert(ci_name.to_owned(), ci_value.to_owned()); + } + } + } + map } @@ -78,21 +90,13 @@ pub struct SystemInfo { pub fn system_info() -> SystemInfo { let hostname = hostname(); - let username; - #[cfg(any(fbcode_build, cargo_internal_build))] - { - username = user::current_username().ok(); - } - #[cfg(not(any(fbcode_build, cargo_internal_build)))] - { - username = None; - } + let username = username().ok().flatten(); SystemInfo { hostname, username, os: os_type(), - os_version: sys_info::os_release().ok(), + os_version: os_version(), } } @@ -109,6 +113,16 @@ fn os_type() -> String { } } +#[cfg(target_os = "windows")] +fn os_version() -> Option { + winver::WindowsVersion::detect().map(|v| v.to_string()) +} + +#[cfg(any(target_os = "linux", target_os = "macos"))] +fn os_version() -> Option { + sys_info::os_release().ok() +} + pub fn hostname() -> Option { static CELL: OnceLock> = OnceLock::new(); @@ -119,3 +133,44 @@ pub fn hostname() -> Option { }) .clone() } + +pub fn ces_id() -> Option { + #[cfg(fbcode_build)] + { + use cross_env_session_id::CrossEnvironmentSessionId; + + CrossEnvironmentSessionId::get() + } + #[cfg(not(fbcode_build))] + { + None + } +} + +pub fn username() -> anyhow::Result> { + #[cfg(fbcode_build)] + { + Ok(Some(user::current_username()?)) + } + #[cfg(not(fbcode_build))] + { + Ok::, anyhow::Error>(None) + } +} + +#[cfg(all(test, target_os = "windows"))] +mod tests { + use super::*; + + #[test] + fn os_version_produces_reasonable_windows_version() { + let data = collect(); + // This logic used to use the `GetVersionExW` win32 API, which + // always returns the value below on recent versions of windows. See + // https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getversionexw + // for more details. + assert_ne!(data["os_version"], "6.2.9200"); + // This is true for both Windows 10 and Windows 11: https://learn.microsoft.com/en-us/windows/win32/sysinfo/operating-system-version + assert!(data["os_version"].starts_with("10.0.")); + } +} diff --git a/app/buck2_events/src/schedule_type.rs b/app/buck2_events/src/schedule_type.rs new file mode 100644 index 0000000000000..db64fd5c57695 --- /dev/null +++ b/app/buck2_events/src/schedule_type.rs @@ -0,0 +1,54 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::buck2_env_anyhow; + +pub struct ScheduleType { + schedule_type: Option<&'static str>, +} + +impl ScheduleType { + const SCHEDULE_TYPE_CONTINUOUS: &'static str = "continuous"; + const SCHEDULE_TYPE_DIFF: &'static str = "diff"; + + pub fn new() -> anyhow::Result { + // Same as RE does https://fburl.com/code/sj13r130 + let schedule_type = + if let Some(env) = buck2_env_anyhow!("SCHEDULE_TYPE", applicability = internal)? { + Some(env) + } else { + buck2_env_anyhow!("SANDCASTLE_SCHEDULE_TYPE", applicability = internal)? + }; + Ok(Self { schedule_type }) + } + + pub fn is_continuous(&self) -> bool { + self.schedule_type == Some(Self::SCHEDULE_TYPE_CONTINUOUS) + } + + pub fn is_some(&self) -> bool { + self.schedule_type.is_some() + } + + pub fn is_diff(&self) -> bool { + self.schedule_type == Some(Self::SCHEDULE_TYPE_DIFF) + } + + pub fn testing_new(schedule_type: &'static str) -> Self { + Self { + schedule_type: Some(schedule_type), + } + } + + pub fn testing_empty() -> Self { + Self { + schedule_type: None, + } + } +} diff --git a/app/buck2_events/src/sink.rs b/app/buck2_events/src/sink.rs index 229f1f5479bfe..d430b58408d8f 100644 --- a/app/buck2_events/src/sink.rs +++ b/app/buck2_events/src/sink.rs @@ -11,5 +11,6 @@ //! sink during normal operation. pub(crate) mod channel; pub(crate) mod null; -pub mod scribe; +pub mod remote; +pub(crate) mod smart_truncate_event; pub mod tee; diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs new file mode 100644 index 0000000000000..e6b101d9f23ad --- /dev/null +++ b/app/buck2_events/src/sink/remote.rs @@ -0,0 +1,1156 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! A Sink for forwarding events directly to Remote service. +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::time::Duration; + +use fbinit::FacebookInit; + +#[cfg(fbcode_build)] +mod fbcode { + use std::sync::Arc; + use std::time::Duration; + use std::time::SystemTime; + + use buck2_core::buck2_env_anyhow; + use buck2_data::InstantEvent; + use buck2_data::Location; + use buck2_data::StructuredError; + use buck2_util::truncate::truncate; + use fbinit::FacebookInit; + use prost::Message; + + use crate::metadata; + use crate::schedule_type::ScheduleType; + use crate::sink::smart_truncate_event::smart_truncate_event; + use crate::BuckEvent; + use crate::Event; + use crate::EventSink; + use crate::EventSinkStats; + use crate::EventSinkWithStats; + use crate::TraceId; + + // 1 MiB limit + static SCRIBE_MESSAGE_SIZE_LIMIT: usize = 1024 * 1024; + // 50k characters + static TRUNCATED_SCRIBE_MESSAGE_SIZE: usize = 50000; + + /// RemoteEventSink is a ScribeSink backed by the Thrift-based client in the `buck2_scribe_client` crate. + pub struct RemoteEventSink { + category: String, + client: scribe_client::ScribeClient, + schedule_type: ScheduleType, + } + + impl RemoteEventSink { + /// Creates a new RemoteEventSink that forwards messages onto the Thrift-backed Scribe client. + pub fn new( + fb: FacebookInit, + category: String, + buffer_size: usize, + retry_backoff: Duration, + retry_attempts: usize, + message_batch_size: Option, + ) -> anyhow::Result { + let client = scribe_client::ScribeClient::new( + fb, + buffer_size, + retry_backoff, + retry_attempts, + message_batch_size, + )?; + + // schedule_type can change for the same daemon, because on OD some builds are pre warmed for users + // This would be problematic, because this is run just once on the daemon + // But in this case we only check for 'diff' type, which shouldn't change + let schedule_type = ScheduleType::new()?; + Ok(RemoteEventSink { + category, + client, + schedule_type, + }) + } + + // Send this event now, bypassing internal message queue. + pub async fn send_now(&self, event: BuckEvent) { + self.send_messages_now(vec![event]).await; + } + + // Send multiple events now, bypassing internal message queue. + pub async fn send_messages_now(&self, events: Vec) { + let messages = events + .into_iter() + .filter_map(|e| { + let message_key = e.trace_id().unwrap().hash(); + Self::encode_message(e, false).map(|bytes| scribe_client::Message { + category: self.category.clone(), + message: bytes, + message_key: Some(message_key), + }) + }) + .collect(); + self.client.send_messages_now(messages).await; + } + + // Send this event by placing it on the internal message queue. + pub fn offer(&self, event: BuckEvent) { + let message_key = event.trace_id().unwrap().hash(); + if let Some(bytes) = Self::encode_message(event, false) { + self.client.offer(scribe_client::Message { + category: self.category.clone(), + message: bytes, + message_key: Some(message_key), + }); + } + } + + // Encodes message into something scribe understands. + fn encode_message(mut event: BuckEvent, is_truncated: bool) -> Option> { + smart_truncate_event(event.data_mut()); + let mut proto: Box = event.into(); + + Self::prepare_event(&mut proto); + + // Add a header byte to indicate this is _not_ base64 encoding. + let mut buf = Vec::with_capacity(proto.encoded_len() + 1); + buf.push(b'!'); + let mut proto_bytes = proto.encode_to_vec(); + buf.append(&mut proto_bytes); + + if buf.len() > SCRIBE_MESSAGE_SIZE_LIMIT { + // if this BuckEvent is already a truncated one but the buffer byte size exceeds the limit, + // do not send Scribe another truncated version + if is_truncated { + return None; + } + let json = serde_json::to_string(&proto).unwrap(); + + Self::encode_message( + BuckEvent::new( + SystemTime::now(), + TraceId::new(), + None, + None, + buck2_data::buck_event::Data::Instant(InstantEvent { + data: Some( + StructuredError { + location: Some(Location { + file: file!().to_owned(), + line: line!(), + column: column!(), + }), + payload: format!("Soft Error: oversized_scribe: Message is oversized. Event data: {}. Original message size: {}", truncate(&json, TRUNCATED_SCRIBE_MESSAGE_SIZE), + buf.len()), + metadata: metadata::collect(), + backtrace: Vec::new(), + quiet: false, + task: Some(true), + soft_error_category: Some("oversized_scribe".to_owned()), + daemon_in_memory_state_is_corrupted: false, + daemon_materializer_state_is_corrupted: false, + action_cache_is_corrupted: false, + deprecation: false, + } + .into(), + ), + }), + ), + true, + ) + } else { + Some(buf) + } + } + + fn prepare_event(event: &mut buck2_data::BuckEvent) { + use buck2_data::buck_event::Data; + + match &mut event.data { + Some(Data::SpanEnd(s)) => match &mut s.data { + Some(buck2_data::span_end_event::Data::ActionExecution(action)) => { + let mut is_cache_hit = false; + + for command in action.commands.iter_mut() { + let Some(details) = command.details.as_mut() else { + continue; + }; + + { + let Some(ref command_kind) = details.command_kind else { + continue; + }; + let Some(ref command) = command_kind.command else { + continue; + }; + let buck2_data::command_execution_kind::Command::RemoteCommand( + ref remote, + ) = command + else { + continue; + }; + if !remote.cache_hit { + continue; + } + } + + is_cache_hit = true; + details.metadata = None; + } + + if is_cache_hit { + action.dep_file_key = None; + action.outputs.clear(); + } + } + _ => {} + }, + _ => {} + } + } + } + + impl EventSink for RemoteEventSink { + fn send(&self, event: Event) { + match event { + Event::Buck(event) => { + if should_send_event(event.data(), &self.schedule_type) { + self.offer(event); + } + } + Event::CommandResult(..) => {} + Event::PartialResult(..) => {} + } + } + } + + impl EventSinkWithStats for RemoteEventSink { + fn to_event_sync(self: Arc) -> Arc { + self as _ + } + + fn stats(&self) -> EventSinkStats { + let counters = self.client.export_counters(); + EventSinkStats { + successes: counters.successes, + failures_invalid_request: counters.failures_invalid_request, + failures_unauthorized: counters.failures_unauthorized, + failures_rate_limited: counters.failures_rate_limited, + failures_pushed_back: counters.failures_pushed_back, + failures_enqueue_failed: counters.failures_enqueue_failed, + failures_internal_error: counters.failures_internal_error, + failures_timed_out: counters.failures_timed_out, + failures_unknown: counters.failures_unknown, + buffered: counters.queue_depth, + dropped: counters.dropped, + bytes_written: counters.bytes_written, + } + } + } + + fn should_send_event(d: &buck2_data::buck_event::Data, schedule_type: &ScheduleType) -> bool { + use buck2_data::buck_event::Data; + + match d { + Data::SpanStart(s) => { + use buck2_data::span_start_event::Data; + + match &s.data { + Some(Data::Command(..)) => true, + None => false, + _ => false, + } + } + Data::SpanEnd(s) => { + use buck2_data::span_end_event::Data; + use buck2_data::ActionExecutionKind; + + match &s.data { + Some(Data::Command(..)) => true, + Some(Data::ActionExecution(a)) => { + match ActionExecutionKind::from_i32(a.execution_kind) { + // Those kinds are not used in downstreams + Some(ActionExecutionKind::Simple) => false, + Some(ActionExecutionKind::Deferred) => false, + Some(ActionExecutionKind::NotSet) => false, + _ => true, + } + } + Some(Data::Analysis(..)) => !schedule_type.is_diff(), + Some(Data::Load(..)) => true, + Some(Data::CacheUpload(..)) => true, + Some(Data::DepFileUpload(..)) => true, + Some(Data::Materialization(..)) => true, + Some(Data::TestDiscovery(..)) => true, + Some(Data::TestEnd(..)) => true, + None => false, + _ => false, + } + } + Data::Instant(i) => { + use buck2_data::instant_event::Data; + + match i.data { + Some(Data::BuildGraphInfo(..)) => true, + Some(Data::RageResult(..)) => true, + Some(Data::ReSession(..)) => true, + Some(Data::StructuredError(..)) => true, + Some(Data::PersistEventLogSubprocess(..)) => true, + Some(Data::CleanStaleResult(..)) => true, + None => false, + _ => false, + } + } + Data::Record(r) => { + use buck2_data::record_event::Data; + + match r.data { + Some(Data::InvocationRecord(..)) => true, + Some(Data::BuildGraphStats(..)) => true, + None => false, + } + } + } + } + + pub fn scribe_category() -> anyhow::Result { + const DEFAULT_SCRIBE_CATEGORY: &str = "buck2_events"; + // Note that both daemon and client are emitting events, and that changing this variable has + // no effect on the daemon until buckd is restarted but has effect on the client. + Ok( + buck2_env_anyhow!("BUCK2_SCRIBE_CATEGORY", applicability = internal)? + .unwrap_or(DEFAULT_SCRIBE_CATEGORY) + .to_owned(), + ) + } +} + +#[cfg(not(fbcode_build))] +mod fbcode { + use std::collections::HashMap; + use std::sync::Arc; + use std::thread::JoinHandle; + + use anyhow::Context; + + use async_stream::stream; + + use bazel_event_publisher_proto::build_event_stream; + use bazel_event_publisher_proto::build_event_stream::build_event_id; + use bazel_event_publisher_proto::build_event_stream::BuildEventId; + use bazel_event_publisher_proto::google::devtools::build::v1; + use buck2_data; + use buck2_data::BuildCommandStart; + use buck2_util::future::try_join_all; + use futures::stream; + + use futures::Stream; + use futures::StreamExt; + use tonic::transport::channel::ClientTlsConfig; + use tonic::transport::Certificate; + use tonic::transport::Channel; + use tonic::Request; + + use std::time::Duration; + + use tokio::fs::OpenOptions; + use tokio::runtime::Builder; + use tokio::sync::mpsc; + use tokio::sync::mpsc::UnboundedReceiver; + use tokio::sync::mpsc::UnboundedSender; + + use tokio_stream::wrappers::UnboundedReceiverStream; + + use bazel_event_publisher_proto::google::devtools::build::v1::OrderedBuildEvent; + use bazel_event_publisher_proto::google::devtools::build::v1::publish_build_event_client::PublishBuildEventClient; + use bazel_event_publisher_proto::google::devtools::build::v1::PublishBuildToolEventStreamRequest; + use bazel_event_publisher_proto::google::devtools::build::v1::PublishLifecycleEventRequest; + use bazel_event_publisher_proto::google::devtools::build::v1::StreamId; + use bazel_event_publisher_proto::google::devtools::build::v1::BuildStatus; + + use prost; + use prost::Message; + use prost_types; + + use crate::BuckEvent; + use crate::Event; + use crate::EventSink; + use crate::EventSinkStats; + use crate::EventSinkWithStats; + + pub struct RemoteEventSink { + _handler: JoinHandle<()>, + send: UnboundedSender>, + } + + async fn connect_build_event_server() -> anyhow::Result> { + let address = std::env::var("BES_URI")?; + let uri = address.parse().context("connect_build_event_server - Invalid address")?; + println!("connect_build_event_server - uri: {:?}", uri); + let mut channel = Channel::builder(uri); + println!("connect_build_event_server built channel"); + // TODO: enable TLS and handle API token + let mut tls_config = ClientTlsConfig::new(); + let data = tokio::fs::read("/Users/nlopez/src/ef/.secrets/opal/engflow.includesprivatekey.pem") + .await + .with_context(|| format!("Error reading `{}`", "/Users/nlopez/src/ef/.secrets/opal/engflow.includesprivatekey.pem"))?; + tls_config = tls_config.ca_certificate(Certificate::from_pem(data)); + channel = channel.tls_config(tls_config.clone())?; + println!("connect_build_event_server connecting..."); + channel + .connect() + .await + .with_context(|| format!("Error connecting to `{}`", address))?; + println!("connect_build_event_server channel connected"); + let client = PublishBuildEventClient::connect(channel) + .await + .context("creating Bazel event stream gRPC client")?; + println!("connect_build_event_server Bazel event stream gRPC client created"); + Ok(client) + } + + fn buck_to_bazel_events>(events: S, client: PublishBuildEventClient) -> impl Stream { + let mut target_actions: HashMap<(String, String), Vec<(BuildEventId, bool)>> = HashMap::new(); + stream! { + for await event in events { + // println!("EVENT {:?} {:?}", event.event.trace_id, event); + match event.data() { + buck2_data::buck_event::Data::SpanStart(start) => { + //println!("START {:?}", start); + //println!("START **"); + match start.data.as_ref() { + None => { + //println!("Entered NONE 1"); + }, + Some(buck2_data::span_start_event::Data::Command(command)) => { + match command.data.as_ref() { + None => { + //println!("Entered NONE 2"); + }, + Some(buck2_data::command_start::Data::Build(BuildCommandStart {})) => { + println!("Entered Some(buck2_data...))"); + + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::Started(build_event_stream::build_event_id::BuildStartedId {})) }), + children: vec![], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Started(build_event_stream::BuildStarted { + uuid: event.event.trace_id.clone(), + start_time_millis: 0, + start_time: Some(event.timestamp().into()), + build_tool_version: "BUCK2".to_owned(), + options_description: "UNKNOWN".to_owned(), + command: "build".to_owned(), + working_directory: "UNKNOWN".to_owned(), + workspace_directory: "UNKNOWN".to_owned(), + server_pid: std::process::id() as i64, + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + + + // SEND ENQUEUED LIFECYCLE EVENT + let enqueued_event = v1::build_event::BuildEnqueued { + details: None, + }; + + let build_enqueued_event = v1::BuildEvent { + event: Some(v1::build_event::Event::BuildEnqueued(enqueued_event)), + event_time: Some(event.timestamp().into()), + }; + println!("Enqueued {:?}", build_enqueued_event); + + let lifecycle_request = Request::new(PublishLifecycleEventRequest{ + build_event: Some(OrderedBuildEvent { + sequence_number: 1, + event: Some(build_enqueued_event), + stream_id: Some(StreamId{ + build_id: event.event.trace_id.clone(), + // I think it should not be set but its being requested. + invocation_id: event.event.trace_id.clone(), + // hardcode CONTROLLER + component: 1, + }), + }), + //service_level: Some(PublishLifecycleEventRequest::ServiceLevel::INTERACTIVE), + service_level: 1, + project_id: "default".to_owned(), + check_preceding_lifecycle_events_present: false, + notification_keywords: [].to_vec(), + stream_timeout: None, + }); + + let mut client = client.clone(); + let response = client.publish_lifecycle_event(lifecycle_request).await; + // END ENQUEUED LIFECYCLE EVENT + + + // SEND InvocationAttemptStarted event + + let inv_started_event = v1::build_event::InvocationAttemptStarted { + attempt_number: 1, + details: None, + }; + + let invocation_started_event = v1::BuildEvent { + event: Some(v1::build_event::Event::InvocationAttemptStarted(inv_started_event)), + event_time: Some(event.timestamp().into()), + }; + println!("inv_started_event {:?}", invocation_started_event); + + let lifecycle_request = Request::new(PublishLifecycleEventRequest{ + build_event: Some(OrderedBuildEvent { + sequence_number: 1, + event: Some(invocation_started_event), + stream_id: Some(StreamId{ + build_id: event.event.trace_id.clone(), + // I think it should not be set but its being requested. + invocation_id: event.event.trace_id.clone(), + // hardcode CONTROLLER + component: 1, + }), + }), + //service_level: Some(PublishLifecycleEventRequest::ServiceLevel::INTERACTIVE), + service_level: 1, + project_id: "default".to_owned(), + check_preceding_lifecycle_events_present: false, + notification_keywords: [].to_vec(), + stream_timeout: None, + }); + + let mut client = client.clone(); + let response = client.publish_lifecycle_event(lifecycle_request).await; + // END InvocationAttemptStarted event + + //println!("START {:?}", bazel_event); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + }, + Some(_) => { + //println!("Entered SOME 2"); + }, + } + }, + Some(buck2_data::span_start_event::Data::Analysis(analysis)) => { + //println!("Analysis event"); + let label = match analysis.target.as_ref() { + None => None, + Some(buck2_data::analysis_start::Target::StandardTarget(label)) => + label.label.as_ref().map(|label| format!("{}:{}", label.package, label.name)), + Some(buck2_data::analysis_start::Target::AnonTarget(_anon)) => None, // TODO + Some(buck2_data::analysis_start::Target::DynamicLambda(_owner)) => None, // TODO + }; + let mut label_str = "".to_owned() + label.as_ref().unwrap(); + label_str = label_str.replace("root//:","//"); + //label_str = label_str.replace("//:","/"); + //label_str = "//".to_owned() + &label_str; + //println!("Analysis event label {:?}", label_str); + match label { + None => {}, + Some(label) => { + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::TargetConfigured(build_event_id::TargetConfiguredId { + label: label_str.clone(), + aspect: "".to_owned(), + })) }), + children: vec![], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Configured(bazel_event_publisher_proto::build_event_stream::TargetConfigured { + target_kind: "UNKNOWN".to_owned(), + test_size: 0, + tag: vec![], + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + //println!("Analysis label1 {:?}", bazel_event); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::Pattern(build_event_id::PatternExpandedId { + pattern: vec![label.clone()], + })) }), + children: vec![ + build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::TargetConfigured(bazel_event_publisher_proto::build_event_stream::build_event_id::TargetConfiguredId { + label: label_str.clone(), + aspect: "".to_owned(), + }))}, + ], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Expanded(bazel_event_publisher_proto::build_event_stream::PatternExpanded { + test_suite_expansions: vec![], + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + //println!("Analysis label2 {:?}", bazel_event); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + }, + } + }, + Some(_) => { + //println!("Entered SOME 1"); + }, + } + }, + buck2_data::buck_event::Data::SpanEnd(end) => { + //println!("END {:?}", end); + match end.data.as_ref() { + None => {}, + Some(buck2_data::span_end_event::Data::Command(command)) => { + match command.data.as_ref() { + None => {}, + Some(buck2_data::command_end::Data::Build(_build)) => { + + + // flush the target completed map. + for ((label, config), actions) in target_actions.into_iter() { + //println!("flush the target completed map"); + let mut label_str = "".to_owned() + &label.clone(); + label_str = label_str.replace("root//:","//"); + //label_str = label_str.replace("//:","/"); + //label_str = "//".to_owned() + &label_str; + let success = actions.iter().all(|(_, success)| *success); + let children: Vec<_> = actions.into_iter().map(|(id, _)| id).collect(); + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::TargetCompleted(build_event_id::TargetCompletedId { + label: label_str.clone(), + configuration: Some(build_event_id::ConfigurationId { id: config }), + aspect: "".to_owned(), + })) }), + children: children, + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Completed(build_event_stream::TargetComplete { + success: success, + target_kind: "".to_owned(), + test_size: 0, + output_group: vec![], + important_output: vec![], + directory_output: vec![], + tag: vec![], + test_timeout_seconds: 0, + test_timeout: None, + failure_detail: None, + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + //println!("flush event {:?}", bazel_event); + + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + } + + + + // InvocationAttemptFinished Send event + + + let inv_finished_event = v1::build_event::InvocationAttemptFinished { + details: None, + invocation_status: Some( + if command.is_success { + BuildStatus { + result: 1, + final_invocation_id: event.event.trace_id.clone(), + build_tool_exit_code: Some(1), + error_message: "".to_string(), + details: None, + } + } else { + BuildStatus { + result: 2, + final_invocation_id: event.event.trace_id.clone(), + build_tool_exit_code: Some(2), + error_message: "".to_string(), + details: None, + } + }), + }; + + let build_inv_finished_event = v1::BuildEvent { + event: Some(v1::build_event::Event::InvocationAttemptFinished(inv_finished_event)), + event_time: Some(event.timestamp().into()), + }; + //println!("INV Finished ev {:?}", build_inv_finished_event); + + let lifecycle_request = Request::new(PublishLifecycleEventRequest{ + build_event: Some(OrderedBuildEvent { + sequence_number: 2, + event: Some(build_inv_finished_event), + stream_id: Some(StreamId{ + build_id: event.event.trace_id.clone(), + // I think it should not be set but its being requested. + invocation_id: event.event.trace_id.clone(), + // hardcode CONTROLLER + component: 1, + }), + }), + service_level: 1, + project_id: "default".to_owned(), + check_preceding_lifecycle_events_present: false, + notification_keywords: [].to_vec(), + stream_timeout: None, + }); + + let mut client = client.clone(); + let response = client.publish_lifecycle_event(lifecycle_request).await; + + // END InvocationAttemptFinished Send event + + + // BuildFinished Send event + + let finished_event = v1::build_event::BuildFinished { + details: None, + status: Some( + if command.is_success { + BuildStatus { + result: 1, + final_invocation_id: event.event.trace_id.clone(), + build_tool_exit_code: Some(1), + error_message: "".to_string(), + details: None, + } + } else { + BuildStatus { + result: 2, + final_invocation_id: event.event.trace_id.clone(), + build_tool_exit_code: Some(2), + error_message: "".to_string(), + details: None, + } + }), + }; + + let build_finished_event = v1::BuildEvent { + event: Some(v1::build_event::Event::BuildFinished(finished_event)), + event_time: Some(event.timestamp().into()), + }; + //println!("Finished {:?}", build_finished_event); + + let lifecycle_request = Request::new(PublishLifecycleEventRequest{ + build_event: Some(OrderedBuildEvent { + sequence_number: 2, + event: Some(build_finished_event), + stream_id: Some(StreamId{ + build_id: event.event.trace_id.clone(), + // I think it should not be set but its being requested. + invocation_id: event.event.trace_id.clone(), + // hardcode CONTROLLER + component: 1, + }), + }), + service_level: 1, + project_id: "default".to_owned(), + check_preceding_lifecycle_events_present: false, + notification_keywords: [].to_vec(), + stream_timeout: None, + }); + + let mut client = client.clone(); + let response = client.publish_lifecycle_event(lifecycle_request).await; + // END BuildFinished Send event + + /*let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::BuildFinished(build_event_stream::build_event_id::BuildFinishedId {})) }), + children: vec![], + last_message: true, + payload: Some(build_event_stream::build_event::Payload::Finished(build_event_stream::BuildFinished { + overall_success: command.is_success, + exit_code: Some( + if command.is_success { + build_event_stream::build_finished::ExitCode { + name: "SUCCESS".to_owned(), + code: 0, + } + } else { + build_event_stream::build_finished::ExitCode { + name: "FAILURE".to_owned(), + code: 1, + } + }), + finish_time_millis: 0, + finish_time: Some(event.timestamp().into()), + anomaly_report: None, + // TODO: convert Buck2 ErrorReport + failure_detail: None, + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + println!("END {:?}", bazel_event); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + };*/ + break; + }, + Some(_) => {}, + } + }, + Some(buck2_data::span_end_event::Data::ActionExecution(action)) => { + let configuration = match &action.key { + None => None, + Some(key) => match &key.owner { + None => None, + Some(owner) => match owner { + buck2_data::action_key::Owner::TargetLabel(target) => target.configuration.clone(), + buck2_data::action_key::Owner::TestTargetLabel(test) => test.configuration.clone(), + buck2_data::action_key::Owner::LocalResourceSetup(resource) => resource.configuration.clone(), + buck2_data::action_key::Owner::AnonTarget(_anon) => None, // TODO: execution configuration? + buck2_data::action_key::Owner::BxlKey(_bxl) => None, + }, + }, + }.map(|configuration| build_event_id::ConfigurationId { id: configuration.full_name.clone() }); + let label = match &action.key { + None => None, + Some(key) => match &key.owner { + None => None, + Some(owner) => match owner { + buck2_data::action_key::Owner::TargetLabel(target) => target.label.clone(), + buck2_data::action_key::Owner::TestTargetLabel(test) => test.label.clone(), + buck2_data::action_key::Owner::LocalResourceSetup(resource) => resource.label.clone(), + buck2_data::action_key::Owner::AnonTarget(anon) => anon.name.clone(), + buck2_data::action_key::Owner::BxlKey(_bxl) => None, // TODO: handle bxl + }, + }, + }.map(|label| format!("{}:{}", label.package, label.name)); + let mut label_str = "".to_owned() + &label.clone().unwrap_or("UNKOWN".to_owned()); + label_str = label_str.replace("root//:","//"); + //label_str = label_str.replace("//:","/"); + //label_str = "//".to_owned() + &label_str; + let action_id = BuildEventId {id: Some(build_event_id::Id::ActionCompleted(build_event_id::ActionCompletedId { + configuration: configuration.clone(), + label: label_str, + primary_output: "UNKNOWN".to_owned(), + }))}; + let mnemonic = action.name.as_ref().map(|name| name.category.clone()).unwrap_or("UNKNOWN".to_owned()); + let success = !action.failed; + let last_command_details = action.commands.last().and_then(|command| command.details.as_ref()); + let command_line: Vec = match last_command_details.and_then(|command| command.command_kind.as_ref()).and_then(|kind| kind.command.as_ref()) { + None => vec![], + Some(buck2_data::command_execution_kind::Command::LocalCommand(command)) => command.argv.clone(), + Some(_) => vec![], // TODO: handle remote, worker, and other commands + }; + let exit_code = last_command_details.and_then(|details| details.signed_exit_code).unwrap_or(0); + let stdout = last_command_details.map(|details| details.stdout.clone()); + let stderr = last_command_details.map(|details| details.stderr.clone()); + let stdout_file = stdout.map(|stdout| bazel_event_publisher_proto::build_event_stream::File { + path_prefix: vec![], + name: "stdout".to_owned(), + digest: "".to_owned(), + length: stdout.len() as i64, + file: Some(bazel_event_publisher_proto::build_event_stream::file::File::Contents(stdout.into())), + }); + let stderr_file = stderr.clone().map(|stderr| bazel_event_publisher_proto::build_event_stream::File { + path_prefix: vec![], + name: "stderr".to_owned(), + digest: "".to_owned(), + length: stderr.len() as i64, + file: Some(bazel_event_publisher_proto::build_event_stream::file::File::Contents(stderr.into())), + }); + let start_time = last_command_details.and_then(|details| details.metadata.as_ref().and_then(|metadata| metadata.start_time.clone())); + //let wall_time = last_command_details.and_then(|details| details.metadata.as_ref().and_then(|metadata| metadata.wall_time.clone())); + //let end_time = ...; // TODO: add start_time and wall_time + match (label.as_ref(), configuration.as_ref()) { + (Some(label), Some(configuration)) => { + target_actions + .entry((label.clone(), configuration.id.clone())) + .or_default() + .push((action_id.clone(), success)); + }, + _ => {}, + } + let failure_detail = if success { None } else { + Some(bazel_event_publisher_proto::failure_details::FailureDetail { + message: stderr.unwrap_or("UNKNOWN".to_owned()), + category: None, // TODO + }) + }; + let bes_event = build_event_stream::BuildEvent { + id: Some(action_id), + children: vec![], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Action(build_event_stream::ActionExecuted { + success: success, + r#type: mnemonic, + exit_code: exit_code, + stdout: stdout_file, + stderr: stderr_file, + label: "".to_owned(), + configuration: None, + primary_output: None, + command_line: command_line, + action_metadata_logs: vec![], + failure_detail: failure_detail, + start_time: start_time, // TODO: should we deduct queue time? + end_time: None, + strategy_details: vec![], + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + }, + + + + + Some(_) => {}, + } + }, + buck2_data::buck_event::Data::Instant(instant) => { + //println!("INST {:?}", instant); + //println!("INST **"); + }, + buck2_data::buck_event::Data::Record(record) => { + println!("REC {:?}", record); + }, + } + } + } + } + + fn stream_build_tool_events>(trace_id: String, events: S) -> impl Stream { + println!("stream_build_tool_events - invocation_id: {:?}", trace_id); + + stream::iter(1..) + .zip(events) + .map(move |(sequence_number, event)| { + PublishBuildToolEventStreamRequest { + check_preceding_lifecycle_events_present: false, + notification_keywords: vec![], + ordered_build_event: Some(OrderedBuildEvent { + stream_id: Some(StreamId { + build_id: trace_id.clone(), + invocation_id: trace_id.clone(), + component: 1, + }), + sequence_number, + event: Some(event), + }), + project_id: "default".to_owned(), // TODO: needed + } + }) + } + + async fn event_sink_loop(recv: UnboundedReceiver>) -> anyhow::Result<()> { + println!("event_sink_loop starts"); + let mut handlers: HashMap, tokio::task::JoinHandle>)> = HashMap::new(); + println!("connect_build_event_server starting"); + let client = connect_build_event_server().await?; + println!("connect_build_event_server done"); + let mut recv = UnboundedReceiverStream::new(recv) + .flat_map(|v|stream::iter(v)); + println!("Starting loop for events"); + while let Some(event) = recv.next().await { + let dbg_trace_id = event.event.trace_id.clone(); + // println!("event_sink_loop event {:?}", &dbg_trace_id); + // println!("event_sink_loop event TRACE ID **"); + if let Some((send, _)) = handlers.get(&event.event.trace_id) { + // println!("event_sink_loop redirect {:?}", &dbg_trace_id); + //println!("event_sink_loop redirect {:?}", &event.event.trace_id); + send.send(event).unwrap_or_else(|e| println!("build event send failed {:?}", e)); + } else { + // println!("event_sink_loop new handler {:?}", event.event.trace_id); + let (send, recv) = mpsc::unbounded_channel::(); + let mut client = client.clone(); + let dbg_trace_id = dbg_trace_id.clone(); + let trace_id = event.event.trace_id.clone(); + let handler = tokio::spawn(async move { + let recv = UnboundedReceiverStream::new(recv); + let bazel_event = buck_to_bazel_events(recv, client.clone()); + let mut request = Request::new(stream_build_tool_events(trace_id, bazel_event)); + println!("new handler request {:?}", &dbg_trace_id); + let response = client.publish_build_tool_event_stream(request).await?; + println!("new handler response {:?}", &dbg_trace_id); + let mut inbound = response.into_inner(); + while let Some(ack) = inbound.message().await? { + // TODO: Handle ACKs properly and add retry. + println!("ACK {:?}", ack); + } + Ok(()) + }); + handlers.insert(event.event.trace_id.to_owned(), (send, handler)); + } + } + println!("event_sink_loop recv CLOSED"); + // TODO: handle closure and retry. + // close send handles and await all handlers. + let handlers: Vec>> = handlers.into_values().map(|(_, handler)|handler).collect(); + // TODO: handle retry. + try_join_all(handlers).await?.into_iter().collect::>>()?; + Ok(()) + } + + impl RemoteEventSink { + pub fn new() -> anyhow::Result { + let (send, recv) = mpsc::unbounded_channel::>(); + let handler = std::thread::Builder::new() + .name("buck-event-producer".to_owned()) + .spawn({ + move || { + let runtime = Builder::new_current_thread().enable_all().build().unwrap(); + runtime.block_on(event_sink_loop(recv)).unwrap(); + } + }).context("spawning buck-event-producer thread")?; + Ok(RemoteEventSink { + _handler: handler, + send, + }) + } + pub async fn send_now(&self, event: BuckEvent) { + self.send_messages_now(vec![event]).await; + } + pub async fn send_messages_now(&self, events: Vec) { + // TODO: does this make sense for BES? If so, implement send now variant. + if let Err(err) = self.send.send(events) { + // TODO: proper error handling + dbg!(err); + } + } + pub fn offer(&self, event: BuckEvent) { + if let Err(err) = self.send.send(vec![event]) { + // TODO: proper error handling + dbg!(err); + } + } + } + + impl EventSink for RemoteEventSink { + fn send(&self, event: Event) { + match event { + Event::Buck(event) => { + self.offer(event); + } + Event::CommandResult(..) => {}, + Event::PartialResult(..) => {}, + } + } + } + + impl EventSinkWithStats for RemoteEventSink { + fn to_event_sync(self: Arc) -> Arc { + self as _ + } + + fn stats(&self) -> EventSinkStats { + EventSinkStats { + successes: 0, + failures_invalid_request: 0, + failures_unauthorized: 0, + failures_rate_limited: 0, + failures_pushed_back: 0, + failures_enqueue_failed: 0, + failures_internal_error: 0, + failures_timed_out: 0, + failures_unknown: 0, + buffered: 0, + dropped: 0, + bytes_written: 0, + } + } + } +} + +pub use fbcode::*; + +fn new_remote_event_sink_if_fbcode( + fb: FacebookInit, + buffer_size: usize, + retry_backoff: Duration, + retry_attempts: usize, + message_batch_size: Option, +) -> anyhow::Result> { + #[cfg(fbcode_build)] + { + Ok(Some(RemoteEventSink::new( + fb, + scribe_category()?, + buffer_size, + retry_backoff, + retry_attempts, + message_batch_size, + )?)) + } + #[cfg(not(fbcode_build))] + { + let _ = ( + fb, + buffer_size, + retry_backoff, + retry_attempts, + message_batch_size, + ); + Ok(Some(RemoteEventSink::new()?)) + } +} + +pub fn new_remote_event_sink_if_enabled( + fb: FacebookInit, + buffer_size: usize, + retry_backoff: Duration, + retry_attempts: usize, + message_batch_size: Option, +) -> anyhow::Result> { + if is_enabled() { + new_remote_event_sink_if_fbcode( + fb, + buffer_size, + retry_backoff, + retry_attempts, + message_batch_size, + ) + } else { + Ok(None) + } +} + +/// Whether or not remote event logging is enabled for this process. It must be explicitly disabled via `disable()`. +static REMOTE_EVENT_SINK_ENABLED: AtomicBool = AtomicBool::new(true); + +/// Returns whether this process should actually write to remote sink, even if it is fully supported by the platform and +/// binary. +pub fn is_enabled() -> bool { + REMOTE_EVENT_SINK_ENABLED.load(Ordering::Relaxed) +} + +/// Disables remote event logging for this process. Remote event logging must be disabled explicitly on startup, otherwise it is +/// on by default. +pub fn disable() { + REMOTE_EVENT_SINK_ENABLED.store(false, Ordering::Relaxed); +} diff --git a/app/buck2_events/src/sink/scribe.rs b/app/buck2_events/src/sink/scribe.rs deleted file mode 100644 index e63960eda1415..0000000000000 --- a/app/buck2_events/src/sink/scribe.rs +++ /dev/null @@ -1,951 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! A Sink for forwarding events directly to Scribe. -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering; -use std::time::Duration; - -use buck2_core::env_helper::EnvHelper; -use fbinit::FacebookInit; - -#[cfg(fbcode_build)] -mod fbcode { - - use std::sync::Arc; - use std::time::Duration; - use std::time::SystemTime; - - use buck2_data::InstantEvent; - use buck2_data::Location; - use buck2_data::StructuredError; - use buck2_util::truncate::truncate; - use fbinit::FacebookInit; - use prost::Message; - - use crate::metadata; - use crate::BuckEvent; - use crate::Event; - use crate::EventSink; - use crate::EventSinkStats; - use crate::EventSinkWithStats; - use crate::TraceId; - - // 1 MiB limit - static SCRIBE_MESSAGE_SIZE_LIMIT: usize = 1024 * 1024; - // 50k characters - static TRUNCATED_SCRIBE_MESSAGE_SIZE: usize = 50000; - - /// ThriftScribeSink is a ScribeSink backed by the Thrift-based client in the `buck2_scribe_client` crate. - pub struct ThriftScribeSink { - category: String, - client: scribe_client::ScribeClient, - } - - impl ThriftScribeSink { - /// Creates a new ThriftScribeSink that forwards messages onto the Thrift-backed Scribe client. - pub fn new( - fb: FacebookInit, - category: String, - buffer_size: usize, - retry_backoff: Duration, - retry_attempts: usize, - message_batch_size: Option, - ) -> anyhow::Result { - let client = scribe_client::ScribeClient::new( - fb, - buffer_size, - retry_backoff, - retry_attempts, - message_batch_size, - )?; - Ok(ThriftScribeSink { category, client }) - } - - // Send this event now, bypassing internal message queue. - pub async fn send_now(&self, event: BuckEvent) { - let message_key = event.trace_id().unwrap().hash(); - if let Some(bytes) = Self::encode_message(event, false) { - self.client - .send_now(scribe_client::Message { - category: self.category.clone(), - message: bytes, - message_key: Some(message_key), - }) - .await; - } - } - - // Send this event by placing it on the internal message queue. - pub fn offer(&self, event: BuckEvent) { - let message_key = event.trace_id().unwrap().hash(); - if let Some(bytes) = Self::encode_message(event, false) { - self.client.offer(scribe_client::Message { - category: self.category.clone(), - message: bytes, - message_key: Some(message_key), - }); - } - } - - // Encodes message into something scribe understands. - fn encode_message(mut event: BuckEvent, is_truncated: bool) -> Option> { - Self::smart_truncate_event(event.data_mut()); - let proto: Box = event.into(); - - // Add a header byte to indicate this is _not_ base64 encoding. - let mut buf = Vec::with_capacity(proto.encoded_len() + 1); - buf.push(b'!'); - let mut proto_bytes = proto.encode_to_vec(); - buf.append(&mut proto_bytes); - - if buf.len() > SCRIBE_MESSAGE_SIZE_LIMIT { - // if this BuckEvent is already a truncated one but the buffer byte size exceeds the limit, - // do not send Scribe another truncated version - if is_truncated { - return None; - } - let json = serde_json::to_string(&proto).unwrap(); - - Self::encode_message( - BuckEvent::new( - SystemTime::now(), - TraceId::new(), - None, - None, - buck2_data::buck_event::Data::Instant(InstantEvent { - data: Some( - StructuredError { - location: Some(Location { - file: file!().to_string(), - line: line!(), - column: column!(), - }), - payload: format!("Soft Error: oversized_scribe: Message is oversized. Event data: {}. Original message size: {}", truncate(&json, TRUNCATED_SCRIBE_MESSAGE_SIZE), - buf.len()), - metadata: metadata::collect(), - backtrace: Vec::new(), - quiet: false, - task: Some(true), - soft_error_category: Some("oversized_scribe".to_owned()), - daemon_in_memory_state_is_corrupted: false, - daemon_materializer_state_is_corrupted: false, - action_cache_is_corrupted: false, - } - .into(), - ), - }), - ), - true, - ) - } else { - Some(buf) - } - } - - fn smart_truncate_event(d: &mut buck2_data::buck_event::Data) { - use buck2_data::buck_event::Data; - - match d { - Data::SpanEnd(ref mut s) => { - use buck2_data::span_end_event::Data; - - match &mut s.data { - Some(Data::ActionExecution(ref mut action_execution)) => { - Self::truncate_action_execution_end(action_execution); - } - Some(Data::Command(ref mut command_end)) => { - Self::truncate_command_end(command_end, false); - } - Some(Data::TestEnd(ref mut test_end)) => { - Self::truncate_test_end(test_end); - } - _ => {} - }; - } - Data::Instant(ref mut inst) => { - use buck2_data::instant_event::Data; - match &mut inst.data { - Some(Data::TestResult(ref mut test_result)) => { - Self::truncate_test_result(test_result); - } - Some(Data::TargetPatterns(ref mut target_patterns)) => { - Self::truncate_target_patterns(&mut target_patterns.target_patterns); - } - _ => {} - } - } - Data::Record(ref mut rec) => { - if let Some(buck2_data::record_event::Data::InvocationRecord( - ref mut invocation_record, - )) = rec.data - { - if let Some(ref mut file_watcher_stats) = - invocation_record.file_watcher_stats - { - Self::truncate_file_watcher_stats(file_watcher_stats); - } - if let Some(ref mut resolved_target_patterns) = - invocation_record.parsed_target_patterns - { - Self::truncate_target_patterns( - &mut resolved_target_patterns.target_patterns, - ); - // Clear `unresolved_traget_patterns` to save bandwidth. It has less information - // than `resolved` one does, and will never be used if `resolved` one is available. - if let Some(ref mut command_end) = invocation_record.command_end { - Self::truncate_command_end(command_end, true); - } - } else if let Some(ref mut command_end) = invocation_record.command_end { - Self::truncate_command_end(command_end, false); - } - - const MAX_CLI_ARGS_BYTES: usize = 512 * 1024; - let orig_len = invocation_record.cli_args.len(); - let mut bytes: usize = 0; - for (index, arg) in invocation_record.cli_args.iter().enumerate() { - bytes += arg.len(); - if bytes > MAX_CLI_ARGS_BYTES { - invocation_record.cli_args.truncate(index); - invocation_record.cli_args.push(format!( - "<>", - index, orig_len - )); - break; - } - } - } - } - _ => {} - }; - } - - fn truncate_action_execution_end( - action_execution_end: &mut buck2_data::ActionExecutionEnd, - ) { - // truncate(...) can panic if asked to truncate too short. - const MIN_CMD_TRUNCATION: usize = 20; - let per_command_size_budget = - ((500 * 1024) / action_execution_end.commands.len().max(1)).max(MIN_CMD_TRUNCATION); - - let truncate_cmd = |cmd: &mut buck2_data::CommandExecution, truncate_all: bool| { - if let Some(details) = &mut cmd.details { - details.stderr = if truncate_all { - "<>".to_owned() - } else { - truncate(&details.stderr, per_command_size_budget) - }; - } - }; - - if let Some((last_command, retries)) = action_execution_end.commands.split_last_mut() { - for retried in retries { - truncate_cmd(retried, false); - } - // Current Scribe tailers don't read stderr of successful actions. - // Save some bytes. - truncate_cmd(last_command, !action_execution_end.failed); - } - } - - fn truncate_command_end( - command_end: &mut buck2_data::CommandEnd, - clear_target_patterns: bool, - ) { - use buck2_data::command_end::Data; - - if let Some(ref mut target_patterns) = match &mut command_end.data { - Some(Data::Build(build_command_end)) => { - Some(&mut build_command_end.unresolved_target_patterns) - } - Some(Data::Test(test_command_end)) => { - Some(&mut test_command_end.unresolved_target_patterns) - } - Some(Data::Install(install_command_end)) => { - Some(&mut install_command_end.unresolved_target_patterns) - } - Some(Data::Targets(targets_command_end)) => { - Some(&mut targets_command_end.unresolved_target_patterns) - } - _ => None, - } { - if clear_target_patterns { - target_patterns.clear(); - } else { - Self::truncate_target_patterns(target_patterns); - } - } - } - - fn truncate_file_watcher_stats(file_watcher_stats: &mut buck2_data::FileWatcherStats) { - const MAX_FILE_CHANGE_BYTES: usize = 100 * 1024; - let mut bytes: usize = 0; - for (index, ev) in file_watcher_stats.events.iter().enumerate() { - bytes += ev.path.len(); - if bytes > MAX_FILE_CHANGE_BYTES { - file_watcher_stats.events.truncate(index); - file_watcher_stats.incomplete_events_reason = Some(format!( - "Too long file change records ({} bytes, max {} bytes)", - bytes, MAX_FILE_CHANGE_BYTES - )); - break; - } - } - } - - fn truncate_test_result(test_result: &mut buck2_data::TestResult) { - const TRUNCATED_DETAILS_LENGTH: usize = 512 * 1024; // 512Kb - test_result.details = truncate(&test_result.details, TRUNCATED_DETAILS_LENGTH); - } - - fn truncate_test_end(test_end: &mut buck2_data::TestRunEnd) { - const MAX_TEST_NAMES_BYTES: usize = 512 * 1024; - if let Some(ref mut suite) = test_end.suite { - let orig_len = suite.test_names.len(); - let mut bytes: usize = 0; - for (index, test_name) in suite.test_names.iter().enumerate() { - bytes += test_name.len(); - if bytes > MAX_TEST_NAMES_BYTES { - suite.test_names.truncate(index); - let warn = format!("<>", index, orig_len); - suite.test_names.push(warn); - break; - } - } - } - } - - fn truncate_target_patterns(target_patterns: &mut Vec) { - const MAX_TARGET_PATTERNS_BYTES: usize = 512 * 1024; - let orig_len = target_patterns.len(); - let mut bytes: usize = 0; - for (index, target) in target_patterns.iter().enumerate() { - bytes += target.value.len(); - if bytes > MAX_TARGET_PATTERNS_BYTES { - target_patterns.truncate(index); - let warn = format!("<>", index, orig_len); - target_patterns.push(buck2_data::TargetPattern { value: warn }); - break; - } - } - } - } - - impl EventSink for ThriftScribeSink { - fn send(&self, event: Event) { - match event { - Event::Buck(event) => { - if should_send_event(event.data()) { - self.offer(event); - } - } - Event::CommandResult(..) => {} - Event::PartialResult(..) => {} - } - } - } - - impl EventSinkWithStats for ThriftScribeSink { - fn to_event_sync(self: Arc) -> Arc { - self as _ - } - - fn stats(&self) -> Option { - self.client - .export_counters() - .map(|counters| EventSinkStats { - successes: counters.successes, - failures: counters.failures, - buffered: counters.queue_depth, - dropped: counters.dropped, - }) - } - } - - fn should_send_event(d: &buck2_data::buck_event::Data) -> bool { - use buck2_data::buck_event::Data; - use buck2_data::ActionKind; - - match d { - Data::SpanStart(s) => { - use buck2_data::span_start_event::Data; - - match &s.data { - Some(Data::Command(..)) => true, - Some(Data::ActionExecution(a)) => match ActionKind::from_i32(a.kind) { - // Simple actions are not useful for most log analysis cases - Some(ActionKind::Copy) - | Some(ActionKind::SymlinkedDir) - | Some(ActionKind::Write) - | Some(ActionKind::WriteMacrosToFile) => false, - _ => true, - }, - None => false, - _ => false, - } - } - Data::SpanEnd(s) => { - use buck2_data::span_end_event::Data; - use buck2_data::ActionExecutionKind; - - match &s.data { - Some(Data::Command(..)) => true, - Some(Data::ActionExecution(a)) => { - match ActionExecutionKind::from_i32(a.execution_kind) { - // Not useful for most log analysis cases - Some(ActionExecutionKind::Simple) => false, - _ => true, - } - } - Some(Data::Analysis(..)) => true, - Some(Data::Load(..)) => true, - Some(Data::CacheUpload(..)) => true, - Some(Data::Materialization(..)) => true, - Some(Data::TestDiscovery(..)) => true, - Some(Data::TestEnd(..)) => true, - None => false, - _ => false, - } - } - Data::Instant(i) => { - use buck2_data::instant_event::Data; - - match i.data { - Some(Data::BuildGraphInfo(..)) => true, - Some(Data::RageResult(..)) => true, - Some(Data::ReSession(..)) => true, - Some(Data::StructuredError(..)) => true, - Some(Data::TestResult(..)) => true, - Some(Data::PersistSubprocess(..)) => true, - None => false, - _ => false, - } - } - Data::Record(r) => { - use buck2_data::record_event::Data; - - match r.data { - Some(Data::InvocationRecord(..)) => true, - Some(Data::BuildGraphStats(..)) => true, - None => false, - } - } - } - } - - #[cfg(test)] - mod tests { - use super::*; - - fn make_invocation_record( - data: buck2_data::InvocationRecord, - ) -> buck2_data::buck_event::Data { - buck2_data::buck_event::Data::Record(buck2_data::RecordEvent { - data: Some(buck2_data::record_event::Data::InvocationRecord(Box::new( - data, - ))), - }) - } - - fn make_action_execution_end( - data: buck2_data::ActionExecutionEnd, - ) -> buck2_data::buck_event::Data { - buck2_data::buck_event::Data::SpanEnd(buck2_data::SpanEndEvent { - data: Some(buck2_data::span_end_event::Data::ActionExecution(Box::new( - data, - ))), - ..Default::default() - }) - } - - fn make_command_end(data: buck2_data::CommandEnd) -> buck2_data::buck_event::Data { - buck2_data::buck_event::Data::SpanEnd(buck2_data::SpanEndEvent { - data: Some(buck2_data::span_end_event::Data::Command(data)), - ..Default::default() - }) - } - - fn make_build_command_end( - unresolved_target_patterns: Vec, - ) -> buck2_data::CommandEnd { - buck2_data::CommandEnd { - data: Some(buck2_data::command_end::Data::Build( - buck2_data::BuildCommandEnd { - unresolved_target_patterns, - }, - )), - ..Default::default() - } - } - - fn make_test_end(data: buck2_data::TestRunEnd) -> buck2_data::buck_event::Data { - buck2_data::buck_event::Data::SpanEnd(buck2_data::SpanEndEvent { - data: Some(buck2_data::span_end_event::Data::TestEnd(data)), - ..Default::default() - }) - } - - fn make_command_execution_with_stderr(stderr: String) -> buck2_data::CommandExecution { - buck2_data::CommandExecution { - details: Some(buck2_data::CommandExecutionDetails { - stderr, - ..Default::default() - }), - ..Default::default() - } - } - - #[test] - fn smart_truncate_resolved_target_patterns_clears_unresolved_one() { - let mut record = buck2_data::InvocationRecord::default(); - let mut record_expected = record.clone(); - - let resolved_target_patterns = vec![buck2_data::TargetPattern { - value: "some_resolved_target".to_owned(), - }]; - record.parsed_target_patterns = Some(buck2_data::ParsedTargetPatterns { - target_patterns: resolved_target_patterns.clone(), - }); - // resolved_target_patterns is expected to be unchanged. - record_expected.parsed_target_patterns = Some(buck2_data::ParsedTargetPatterns { - target_patterns: resolved_target_patterns, - }); - - let unresolved_target_patterns = vec![buck2_data::TargetPattern { - value: "some_unresolved_target".to_owned(), - }]; - record.command_end = Some(make_build_command_end(unresolved_target_patterns)); - - // unresolved_target_patterns is expected to be empty. - record_expected.command_end = Some(make_build_command_end(vec![])); - - let mut event_data = make_invocation_record(record); - let event_data_expected = make_invocation_record(record_expected); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_unresolved_target_used_when_resolved_one_unavailable() { - let mut record = buck2_data::InvocationRecord::default(); - let mut record_expected = record.clone(); - - record.parsed_target_patterns = None; - record_expected.parsed_target_patterns = None; - - let unresolved_target_patterns = vec![buck2_data::TargetPattern { - value: "some_unresolved_target".to_owned(), - }]; - let command_end = make_build_command_end(unresolved_target_patterns); - - record.command_end = Some(command_end.clone()); - // unresolved_target_patterns is expected to be unchanged. - record_expected.command_end = Some(command_end); - - let mut event_data = make_invocation_record(record); - let event_data_expected = make_invocation_record(record_expected); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_action_execution_end_one_last_command_truncated() { - let command_execution_with_stderr = - make_command_execution_with_stderr("this is a test".to_owned()); - let command_execution_stderr_omitted = - make_command_execution_with_stderr("<>".to_owned()); - - let action_execution_end_with_stderrs = buck2_data::ActionExecutionEnd { - commands: vec![command_execution_with_stderr], - ..Default::default() - }; - let action_execution_end_last_stderr_omitted = buck2_data::ActionExecutionEnd { - commands: vec![command_execution_stderr_omitted], - ..Default::default() - }; - let mut event_data = make_action_execution_end(action_execution_end_with_stderrs); - let event_data_expected = - make_action_execution_end(action_execution_end_last_stderr_omitted); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_action_execution_end_long_stderr_command_truncated() { - let command_execution_with_stderr = - make_command_execution_with_stderr("this is a test".to_owned()); - let mut over_sized_str = "0123456789".repeat(10 * 1024); - over_sized_str.push_str("0123456789"); // 100k + 10; 10-byte over - let command_execution_with_long_stderr = - make_command_execution_with_stderr(over_sized_str); - let mut omitted_str = "0123456789".repeat(10 * 1024); - omitted_str.replace_range((50 * 1024 - 6)..(50 * 1024 + 6), "<>"); - let command_execution_stderr_partially_omitted = - make_command_execution_with_stderr(omitted_str); - let command_execution_stderr_all_omitted = - make_command_execution_with_stderr("<>".to_owned()); - - let action_execution_end_with_stderrs = buck2_data::ActionExecutionEnd { - commands: vec![ - command_execution_with_stderr.clone(), - command_execution_with_long_stderr.clone(), - command_execution_with_stderr.clone(), - command_execution_with_long_stderr, - command_execution_with_stderr.clone(), - ], - ..Default::default() - }; - let action_execution_end_last_stderr_omitted = buck2_data::ActionExecutionEnd { - commands: vec![ - command_execution_with_stderr.clone(), - command_execution_stderr_partially_omitted.clone(), - command_execution_with_stderr, - command_execution_stderr_partially_omitted, - command_execution_stderr_all_omitted, - ], - ..Default::default() - }; - let mut event_data = make_action_execution_end(action_execution_end_with_stderrs); - let event_data_expected = - make_action_execution_end(action_execution_end_last_stderr_omitted); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_build_command_end_short_target_patterns_not_truncated() { - let unresolved_target_patterns = vec![ - buck2_data::TargetPattern { - value: "hello".to_owned(), - }, - buck2_data::TargetPattern { - value: "world".to_owned(), - }, - buck2_data::TargetPattern { - value: "!\n".to_owned(), - }, - ]; - let command_end = make_build_command_end(unresolved_target_patterns); - - let mut event_data = make_command_end(command_end); - let event_data_expected = event_data.clone(); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_build_command_end_long_target_patterns_truncated() { - let unresolved_target_patterns = vec![ - buck2_data::TargetPattern { - value: "0123456789".repeat(20 * 1024), - }, - buck2_data::TargetPattern { - value: "0123456789".repeat(20 * 1024), - }, - buck2_data::TargetPattern { - value: "0123456789".repeat(20 * 1024), // 600k in total; 88k-byte over - }, - ]; - let command_end = make_build_command_end(unresolved_target_patterns); - - let unresolved_target_patterns_truncated = vec![ - buck2_data::TargetPattern { - value: "0123456789".repeat(20 * 1024), - }, - buck2_data::TargetPattern { - value: "0123456789".repeat(20 * 1024), - }, - buck2_data::TargetPattern { - value: "<>".to_owned(), - }, - ]; - let command_end_truncated = - make_build_command_end(unresolved_target_patterns_truncated); - - let mut event_data = make_command_end(command_end); - let event_data_expected = make_command_end(command_end_truncated); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_long_file_watcher_stats_truncated() { - let file_watcher_event = buck2_data::FileWatcherEvent { - path: "0123456789".repeat(3 * 1024), - ..Default::default() - }; - let file_watcher_stats = buck2_data::FileWatcherStats { - events: vec![ - file_watcher_event.clone(), - file_watcher_event.clone(), - file_watcher_event.clone(), - file_watcher_event.clone(), // 120k in total; 20k-byte over - ], - ..Default::default() - }; - let file_watcher_stats_truncated = buck2_data::FileWatcherStats { - events: vec![ - file_watcher_event.clone(), - file_watcher_event.clone(), - file_watcher_event, - ], - incomplete_events_reason: Some(format!( - "Too long file change records ({} bytes, max {} bytes)", - 120 * 1024, - 100 * 1024 - )), - ..Default::default() - }; - let record = buck2_data::InvocationRecord { - file_watcher_stats: Some(file_watcher_stats), - ..Default::default() - }; - let record_truncated = buck2_data::InvocationRecord { - file_watcher_stats: Some(file_watcher_stats_truncated), - ..Default::default() - }; - let mut event_data = make_invocation_record(record); - let event_data_expected = make_invocation_record(record_truncated); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_short_file_watcher_stats_not_truncated() { - let file_watcher_event = buck2_data::FileWatcherEvent { - path: "this is a test".to_owned(), - ..Default::default() - }; - let file_watcher_stats = buck2_data::FileWatcherStats { - events: vec![ - file_watcher_event.clone(), - file_watcher_event.clone(), - file_watcher_event, - ], - ..Default::default() - }; - let record = buck2_data::InvocationRecord { - file_watcher_stats: Some(file_watcher_stats), - ..Default::default() - }; - let mut event_data = make_invocation_record(record); - let event_data_expected = event_data.clone(); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_invocation_record_long_cli_args_truncated() { - let cli_args = vec![ - "0123456789".repeat(20 * 1024), - "0123456789".repeat(20 * 1024), - "0123456789".repeat(20 * 1024), // 600k in total; 88k-byte over - ]; - let cli_args_truncated = vec![ - "0123456789".repeat(20 * 1024), - "0123456789".repeat(20 * 1024), - "<>".to_owned(), - ]; - - let record = buck2_data::InvocationRecord { - cli_args, - ..Default::default() - }; - let record_truncated = buck2_data::InvocationRecord { - cli_args: cli_args_truncated, - ..Default::default() - }; - - let mut event_data = make_invocation_record(record); - let event_data_expected = make_invocation_record(record_truncated); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_invocation_record_short_cli_args_truncated() { - let cli_args = vec!["this is".to_owned(), "a test".to_owned()]; - - let record = buck2_data::InvocationRecord { - cli_args, - ..Default::default() - }; - - let mut event_data = make_invocation_record(record); - let event_data_expected = event_data.clone(); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - - #[test] - fn smart_truncate_test_end_long_test_names_truncated() { - let test_names = vec![ - "0123456789".repeat(20 * 1024), - "0123456789".repeat(20 * 1024), - "0123456789".repeat(20 * 1024), // 600k in total; 88k-byte over - ]; - let test_names_truncated = vec![ - "0123456789".repeat(20 * 1024), - "0123456789".repeat(20 * 1024), - "<>".to_owned(), - ]; - - let test_end = buck2_data::TestRunEnd { - suite: Some(buck2_data::TestSuite { - test_names, - ..Default::default() - }), - ..Default::default() - }; - let test_end_truncated = buck2_data::TestRunEnd { - suite: Some(buck2_data::TestSuite { - test_names: test_names_truncated, - ..Default::default() - }), - ..Default::default() - }; - - let mut event_data = make_test_end(test_end); - let event_data_expected = make_test_end(test_end_truncated); - - ThriftScribeSink::smart_truncate_event(&mut event_data); - - assert_eq!(event_data, event_data_expected); - } - } -} - -#[cfg(not(fbcode_build))] -mod fbcode { - use std::sync::Arc; - - use crate::BuckEvent; - use crate::Event; - use crate::EventSink; - use crate::EventSinkStats; - use crate::EventSinkWithStats; - - pub struct ThriftScribeSink; - - impl ThriftScribeSink { - pub async fn send_now(&self, _event: BuckEvent) {} - } - - impl EventSink for ThriftScribeSink { - fn send(&self, _event: Event) {} - } - - impl EventSinkWithStats for ThriftScribeSink { - fn to_event_sync(self: Arc) -> Arc { - self as _ - } - - fn stats(&self) -> Option { - None - } - } -} - -pub use fbcode::*; - -fn new_thrift_scribe_sink_if_fbcode( - fb: FacebookInit, - buffer_size: usize, - retry_backoff: Duration, - retry_attempts: usize, - message_batch_size: Option, -) -> anyhow::Result> { - #[cfg(fbcode_build)] - { - Ok(Some(ThriftScribeSink::new( - fb, - scribe_category()?, - buffer_size, - retry_backoff, - retry_attempts, - message_batch_size, - )?)) - } - #[cfg(not(fbcode_build))] - { - let _ = ( - fb, - buffer_size, - retry_backoff, - retry_attempts, - message_batch_size, - ); - Ok(None) - } -} - -pub fn new_thrift_scribe_sink_if_enabled( - fb: FacebookInit, - buffer_size: usize, - retry_backoff: Duration, - retry_attempts: usize, - message_batch_size: Option, -) -> anyhow::Result> { - if is_enabled() { - new_thrift_scribe_sink_if_fbcode( - fb, - buffer_size, - retry_backoff, - retry_attempts, - message_batch_size, - ) - } else { - Ok(None) - } -} - -/// Whether or not Scribe logging is enabled for this process. It must be explicitly disabled via `disable()`. -static SCRIBE_ENABLED: AtomicBool = AtomicBool::new(true); - -/// Returns whether this process should actually write to Scribe, even if it is fully supported by the platform and -/// binary. -pub fn is_enabled() -> bool { - SCRIBE_ENABLED.load(Ordering::Relaxed) -} - -/// Disables Scribe logging for this process. Scribe logging must be disabled explicitly on startup, otherwise it is -/// on by default. -pub fn disable() { - SCRIBE_ENABLED.store(false, Ordering::Relaxed); -} - -pub fn scribe_category() -> anyhow::Result { - const DEFAULT_SCRIBE_CATEGORY: &str = "buck2_events"; - // Note that both daemon and client are emitting events, and that changing this variable has - // no effect on the daemon until buckd is restarted but has effect on the client. - static SCRIBE_CATEGORY: EnvHelper = EnvHelper::new("BUCK2_SCRIBE_CATEGORY"); - Ok(SCRIBE_CATEGORY - .get()? - .map_or_else(|| DEFAULT_SCRIBE_CATEGORY.to_owned(), |c| c.clone())) -} diff --git a/app/buck2_events/src/sink/smart_truncate_event.rs b/app/buck2_events/src/sink/smart_truncate_event.rs new file mode 100644 index 0000000000000..13ba8f9d8b2a8 --- /dev/null +++ b/app/buck2_events/src/sink/smart_truncate_event.rs @@ -0,0 +1,628 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +fn truncate(s: &str, max_bytes: usize) -> String { + // Scuba has a tendency to OOM on queries if we ever report very long strings, so truncate them + // to a reasonable length unconditionally + const MAX_STRING_BYTES: usize = 20 * 1024; + + buck2_util::truncate::truncate(s, max_bytes.max(MAX_STRING_BYTES)) +} + +#[cfg_attr(not(fbcode_build), allow(dead_code))] +pub(crate) fn smart_truncate_event(d: &mut buck2_data::buck_event::Data) { + use buck2_data::buck_event::Data; + + match d { + Data::SpanEnd(ref mut s) => { + use buck2_data::span_end_event::Data; + + match &mut s.data { + Some(Data::ActionExecution(ref mut action_execution)) => { + truncate_action_execution_end(action_execution); + } + Some(Data::Command(ref mut command_end)) => { + truncate_command_end(command_end, false); + } + Some(Data::TestEnd(ref mut test_end)) => { + truncate_test_end(test_end); + } + _ => {} + }; + } + Data::Instant(ref mut inst) => { + use buck2_data::instant_event::Data; + match &mut inst.data { + Some(Data::TargetPatterns(ref mut target_patterns)) => { + truncate_target_patterns(&mut target_patterns.target_patterns); + } + _ => {} + } + } + Data::Record(ref mut rec) => { + if let Some(buck2_data::record_event::Data::InvocationRecord(invocation_record)) = + &mut rec.data + { + truncate_invocation_record(invocation_record) + } + } + _ => {} + }; +} + +fn truncate_invocation_record(invocation_record: &mut buck2_data::InvocationRecord) { + // FIXME(JakobDegen): The sum of the per-field limits adds up to more than the 1MB scribe limits + if let Some(ref mut file_watcher_stats) = invocation_record.file_watcher_stats { + truncate_file_watcher_stats(file_watcher_stats); + } + if let Some(ref mut resolved_target_patterns) = invocation_record.parsed_target_patterns { + truncate_target_patterns(&mut resolved_target_patterns.target_patterns); + // Clear `unresolved_traget_patterns` to save bandwidth. It has less information + // than `resolved` one does, and will never be used if `resolved` one is available. + if let Some(ref mut command_end) = invocation_record.command_end { + truncate_command_end(command_end, true); + } + } else if let Some(ref mut command_end) = invocation_record.command_end { + truncate_command_end(command_end, false); + } + + const MAX_CLI_ARGS_BYTES: usize = 512 * 1024; + let orig_len = invocation_record.cli_args.len(); + let mut bytes: usize = 0; + for (index, arg) in invocation_record.cli_args.iter().enumerate() { + bytes += arg.len(); + if bytes > MAX_CLI_ARGS_BYTES { + invocation_record.cli_args.truncate(index); + invocation_record + .cli_args + .push(format!("<>", index, orig_len)); + break; + } + } + + const MAX_ERROR_REPORT_BYTS: usize = 512 * 1024; + let max_per_report = MAX_ERROR_REPORT_BYTS / invocation_record.errors.len().max(1); + for error in &mut invocation_record.errors { + error.message = truncate(&error.message, max_per_report / 2); + if let Some(telemetry_message) = &mut error.telemetry_message { + *telemetry_message = truncate(telemetry_message, max_per_report / 2); + } + } +} + +fn truncate_action_execution_end(action_execution_end: &mut buck2_data::ActionExecutionEnd) { + let per_command_size_budget = (500 * 1024) / action_execution_end.commands.len().max(1); + + let truncate_cmd = |cmd: &mut buck2_data::CommandExecution, truncate_all: bool| { + if let Some(details) = &mut cmd.details { + details.stderr = if truncate_all { + "<>".to_owned() + } else { + truncate(&details.stderr, per_command_size_budget) + }; + } + }; + + if let Some((last_command, retries)) = action_execution_end.commands.split_last_mut() { + for retried in retries { + truncate_cmd(retried, false); + } + // Current Scribe tailers don't read stderr of successful actions. + // Save some bytes. + truncate_cmd(last_command, !action_execution_end.failed); + } +} + +fn truncate_command_end(command_end: &mut buck2_data::CommandEnd, clear_target_patterns: bool) { + use buck2_data::command_end::Data; + + if let Some(ref mut target_patterns) = match &mut command_end.data { + Some(Data::Build(build_command_end)) => { + Some(&mut build_command_end.unresolved_target_patterns) + } + Some(Data::Test(test_command_end)) => { + Some(&mut test_command_end.unresolved_target_patterns) + } + Some(Data::Install(install_command_end)) => { + Some(&mut install_command_end.unresolved_target_patterns) + } + Some(Data::Targets(targets_command_end)) => { + Some(&mut targets_command_end.unresolved_target_patterns) + } + _ => None, + } { + if clear_target_patterns { + target_patterns.clear(); + } else { + truncate_target_patterns(target_patterns); + } + } +} + +fn truncate_file_watcher_stats(file_watcher_stats: &mut buck2_data::FileWatcherStats) { + const MAX_FILE_CHANGE_BYTES: usize = 100 * 1024; + let mut bytes: usize = 0; + for (index, ev) in file_watcher_stats.events.iter().enumerate() { + bytes += ev.path.len(); + if bytes > MAX_FILE_CHANGE_BYTES { + file_watcher_stats.events.truncate(index); + file_watcher_stats.incomplete_events_reason = Some(format!( + "Too long file change records ({} bytes, max {} bytes)", + bytes, MAX_FILE_CHANGE_BYTES + )); + break; + } + } +} + +fn truncate_test_end(test_end: &mut buck2_data::TestRunEnd) { + const MAX_TEST_NAMES_BYTES: usize = 512 * 1024; + if let Some(ref mut suite) = test_end.suite { + let orig_len = suite.test_names.len(); + let mut bytes: usize = 0; + for (index, test_name) in suite.test_names.iter().enumerate() { + bytes += test_name.len(); + if bytes > MAX_TEST_NAMES_BYTES { + suite.test_names.truncate(index); + let warn = format!("<>", index, orig_len); + suite.test_names.push(warn); + break; + } + } + } +} + +fn truncate_target_patterns(target_patterns: &mut Vec) { + const MAX_TARGET_PATTERNS_BYTES: usize = 512 * 1024; + let orig_len = target_patterns.len(); + let mut bytes: usize = 0; + for (index, target) in target_patterns.iter().enumerate() { + bytes += target.value.len(); + if bytes > MAX_TARGET_PATTERNS_BYTES { + target_patterns.truncate(index); + let warn = format!("<>", index, orig_len); + target_patterns.push(buck2_data::TargetPattern { value: warn }); + break; + } + } +} + +#[cfg(test)] +mod tests { + use crate::sink::smart_truncate_event::smart_truncate_event; + + fn make_invocation_record(data: buck2_data::InvocationRecord) -> buck2_data::buck_event::Data { + buck2_data::buck_event::Data::Record(buck2_data::RecordEvent { + data: Some(buck2_data::record_event::Data::InvocationRecord(Box::new( + data, + ))), + }) + } + + fn make_action_execution_end( + data: buck2_data::ActionExecutionEnd, + ) -> buck2_data::buck_event::Data { + buck2_data::buck_event::Data::SpanEnd(buck2_data::SpanEndEvent { + data: Some(buck2_data::span_end_event::Data::ActionExecution(Box::new( + data, + ))), + ..Default::default() + }) + } + + fn make_command_end(data: buck2_data::CommandEnd) -> buck2_data::buck_event::Data { + buck2_data::buck_event::Data::SpanEnd(buck2_data::SpanEndEvent { + data: Some(buck2_data::span_end_event::Data::Command(data)), + ..Default::default() + }) + } + + fn make_build_command_end( + unresolved_target_patterns: Vec, + ) -> buck2_data::CommandEnd { + buck2_data::CommandEnd { + data: Some(buck2_data::command_end::Data::Build( + buck2_data::BuildCommandEnd { + unresolved_target_patterns, + }, + )), + ..Default::default() + } + } + + fn make_test_end(data: buck2_data::TestRunEnd) -> buck2_data::buck_event::Data { + buck2_data::buck_event::Data::SpanEnd(buck2_data::SpanEndEvent { + data: Some(buck2_data::span_end_event::Data::TestEnd(data)), + ..Default::default() + }) + } + + fn make_command_execution_with_stderr(stderr: String) -> buck2_data::CommandExecution { + buck2_data::CommandExecution { + details: Some(buck2_data::CommandExecutionDetails { + stderr, + ..Default::default() + }), + ..Default::default() + } + } + + #[test] + fn smart_truncate_resolved_target_patterns_clears_unresolved_one() { + let mut record = buck2_data::InvocationRecord::default(); + let mut record_expected = record.clone(); + + let resolved_target_patterns = vec![buck2_data::TargetPattern { + value: "some_resolved_target".to_owned(), + }]; + record.parsed_target_patterns = Some(buck2_data::ParsedTargetPatterns { + target_patterns: resolved_target_patterns.clone(), + }); + // resolved_target_patterns is expected to be unchanged. + record_expected.parsed_target_patterns = Some(buck2_data::ParsedTargetPatterns { + target_patterns: resolved_target_patterns, + }); + + let unresolved_target_patterns = vec![buck2_data::TargetPattern { + value: "some_unresolved_target".to_owned(), + }]; + record.command_end = Some(make_build_command_end(unresolved_target_patterns)); + + // unresolved_target_patterns is expected to be empty. + record_expected.command_end = Some(make_build_command_end(vec![])); + + let mut event_data = make_invocation_record(record); + let event_data_expected = make_invocation_record(record_expected); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_unresolved_target_used_when_resolved_one_unavailable() { + let mut record = buck2_data::InvocationRecord::default(); + let mut record_expected = record.clone(); + + record.parsed_target_patterns = None; + record_expected.parsed_target_patterns = None; + + let unresolved_target_patterns = vec![buck2_data::TargetPattern { + value: "some_unresolved_target".to_owned(), + }]; + let command_end = make_build_command_end(unresolved_target_patterns); + + record.command_end = Some(command_end.clone()); + // unresolved_target_patterns is expected to be unchanged. + record_expected.command_end = Some(command_end); + + let mut event_data = make_invocation_record(record); + let event_data_expected = make_invocation_record(record_expected); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_action_execution_end_one_last_command_truncated() { + let command_execution_with_stderr = + make_command_execution_with_stderr("this is a test".to_owned()); + let command_execution_stderr_omitted = + make_command_execution_with_stderr("<>".to_owned()); + + let action_execution_end_with_stderrs = buck2_data::ActionExecutionEnd { + commands: vec![command_execution_with_stderr], + ..Default::default() + }; + let action_execution_end_last_stderr_omitted = buck2_data::ActionExecutionEnd { + commands: vec![command_execution_stderr_omitted], + ..Default::default() + }; + let mut event_data = make_action_execution_end(action_execution_end_with_stderrs); + let event_data_expected = + make_action_execution_end(action_execution_end_last_stderr_omitted); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_action_execution_end_long_stderr_command_truncated() { + let command_execution_with_stderr = + make_command_execution_with_stderr("this is a test".to_owned()); + let mut over_sized_str = "0123456789".repeat(10 * 1024); + over_sized_str.push_str("0123456789"); // 100k + 10; 10-byte over + let command_execution_with_long_stderr = make_command_execution_with_stderr(over_sized_str); + let mut omitted_str = "0123456789".repeat(10 * 1024); + omitted_str.replace_range((50 * 1024 - 6)..(50 * 1024 + 6), "<>"); + let command_execution_stderr_partially_omitted = + make_command_execution_with_stderr(omitted_str); + let command_execution_stderr_all_omitted = + make_command_execution_with_stderr("<>".to_owned()); + + let action_execution_end_with_stderrs = buck2_data::ActionExecutionEnd { + commands: vec![ + command_execution_with_stderr.clone(), + command_execution_with_long_stderr.clone(), + command_execution_with_stderr.clone(), + command_execution_with_long_stderr, + command_execution_with_stderr.clone(), + ], + ..Default::default() + }; + let action_execution_end_last_stderr_omitted = buck2_data::ActionExecutionEnd { + commands: vec![ + command_execution_with_stderr.clone(), + command_execution_stderr_partially_omitted.clone(), + command_execution_with_stderr, + command_execution_stderr_partially_omitted, + command_execution_stderr_all_omitted, + ], + ..Default::default() + }; + let mut event_data = make_action_execution_end(action_execution_end_with_stderrs); + let event_data_expected = + make_action_execution_end(action_execution_end_last_stderr_omitted); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_build_command_end_short_target_patterns_not_truncated() { + let unresolved_target_patterns = vec![ + buck2_data::TargetPattern { + value: "hello".to_owned(), + }, + buck2_data::TargetPattern { + value: "world".to_owned(), + }, + buck2_data::TargetPattern { + value: "!\n".to_owned(), + }, + ]; + let command_end = make_build_command_end(unresolved_target_patterns); + + let mut event_data = make_command_end(command_end); + let event_data_expected = event_data.clone(); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_build_command_end_long_target_patterns_truncated() { + let unresolved_target_patterns = vec![ + buck2_data::TargetPattern { + value: "0123456789".repeat(20 * 1024), + }, + buck2_data::TargetPattern { + value: "0123456789".repeat(20 * 1024), + }, + buck2_data::TargetPattern { + value: "0123456789".repeat(20 * 1024), // 600k in total; 88k-byte over + }, + ]; + let command_end = make_build_command_end(unresolved_target_patterns); + + let unresolved_target_patterns_truncated = vec![ + buck2_data::TargetPattern { + value: "0123456789".repeat(20 * 1024), + }, + buck2_data::TargetPattern { + value: "0123456789".repeat(20 * 1024), + }, + buck2_data::TargetPattern { + value: "<>".to_owned(), + }, + ]; + let command_end_truncated = make_build_command_end(unresolved_target_patterns_truncated); + + let mut event_data = make_command_end(command_end); + let event_data_expected = make_command_end(command_end_truncated); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_long_file_watcher_stats_truncated() { + let file_watcher_event = buck2_data::FileWatcherEvent { + path: "0123456789".repeat(3 * 1024), + ..Default::default() + }; + let file_watcher_stats = buck2_data::FileWatcherStats { + events: vec![ + file_watcher_event.clone(), + file_watcher_event.clone(), + file_watcher_event.clone(), + file_watcher_event.clone(), // 120k in total; 20k-byte over + ], + ..Default::default() + }; + let file_watcher_stats_truncated = buck2_data::FileWatcherStats { + events: vec![ + file_watcher_event.clone(), + file_watcher_event.clone(), + file_watcher_event, + ], + incomplete_events_reason: Some(format!( + "Too long file change records ({} bytes, max {} bytes)", + 120 * 1024, + 100 * 1024 + )), + ..Default::default() + }; + let record = buck2_data::InvocationRecord { + file_watcher_stats: Some(file_watcher_stats), + ..Default::default() + }; + let record_truncated = buck2_data::InvocationRecord { + file_watcher_stats: Some(file_watcher_stats_truncated), + ..Default::default() + }; + let mut event_data = make_invocation_record(record); + let event_data_expected = make_invocation_record(record_truncated); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_short_file_watcher_stats_not_truncated() { + let file_watcher_event = buck2_data::FileWatcherEvent { + path: "this is a test".to_owned(), + ..Default::default() + }; + let file_watcher_stats = buck2_data::FileWatcherStats { + events: vec![ + file_watcher_event.clone(), + file_watcher_event.clone(), + file_watcher_event, + ], + ..Default::default() + }; + let record = buck2_data::InvocationRecord { + file_watcher_stats: Some(file_watcher_stats), + ..Default::default() + }; + let mut event_data = make_invocation_record(record); + let event_data_expected = event_data.clone(); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_invocation_record_long_cli_args_truncated() { + let cli_args = vec![ + "0123456789".repeat(20 * 1024), + "0123456789".repeat(20 * 1024), + "0123456789".repeat(20 * 1024), // 600k in total; 88k-byte over + ]; + let cli_args_truncated = vec![ + "0123456789".repeat(20 * 1024), + "0123456789".repeat(20 * 1024), + "<>".to_owned(), + ]; + + let record = buck2_data::InvocationRecord { + cli_args, + ..Default::default() + }; + let record_truncated = buck2_data::InvocationRecord { + cli_args: cli_args_truncated, + ..Default::default() + }; + + let mut event_data = make_invocation_record(record); + let event_data_expected = make_invocation_record(record_truncated); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_invocation_record_short_cli_args_truncated() { + let cli_args = vec!["this is".to_owned(), "a test".to_owned()]; + + let record = buck2_data::InvocationRecord { + cli_args, + ..Default::default() + }; + + let mut event_data = make_invocation_record(record); + let event_data_expected = event_data.clone(); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } + + #[test] + fn smart_truncate_invocation_record_error_reports_truncated() { + let errors = vec![ + buck2_data::ProcessedErrorReport { + message: "0123456789".repeat(200 * 1024), + telemetry_message: None, + ..Default::default() + }, + buck2_data::ProcessedErrorReport { + message: "0123456789".repeat(200 * 1024), + telemetry_message: Some("0123456789".repeat(200 * 1024)), + ..Default::default() + }, + ]; + + let mut event_data = make_invocation_record(buck2_data::InvocationRecord { + errors, + ..Default::default() + }); + smart_truncate_event(&mut event_data); + + let buck2_data::buck_event::Data::Record(record_event) = event_data else { + unreachable!() + }; + let Some(buck2_data::record_event::Data::InvocationRecord(invocation_record)) = + record_event.data + else { + unreachable!() + }; + let size = invocation_record + .errors + .into_iter() + .map(|e| e.message.len() + e.telemetry_message.map_or(0, |s| s.len())) + .sum::(); + assert!(size < 500 * 1024); + } + + #[test] + fn smart_truncate_test_end_long_test_names_truncated() { + let test_names = vec![ + "0123456789".repeat(20 * 1024), + "0123456789".repeat(20 * 1024), + "0123456789".repeat(20 * 1024), // 600k in total; 88k-byte over + ]; + let test_names_truncated = vec![ + "0123456789".repeat(20 * 1024), + "0123456789".repeat(20 * 1024), + "<>".to_owned(), + ]; + + let test_end = buck2_data::TestRunEnd { + suite: Some(buck2_data::TestSuite { + test_names, + ..Default::default() + }), + ..Default::default() + }; + let test_end_truncated = buck2_data::TestRunEnd { + suite: Some(buck2_data::TestSuite { + test_names: test_names_truncated, + ..Default::default() + }), + ..Default::default() + }; + + let mut event_data = make_test_end(test_end); + let event_data_expected = make_test_end(test_end_truncated); + + smart_truncate_event(&mut event_data); + + assert_eq!(event_data, event_data_expected); + } +} diff --git a/app/buck2_events/src/span.rs b/app/buck2_events/src/span.rs index 21b269ea2015d..32f4e7aeac987 100644 --- a/app/buck2_events/src/span.rs +++ b/app/buck2_events/src/span.rs @@ -12,6 +12,7 @@ use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; use allocative::Allocative; +use anyhow::Context; use dupe::Dupe; use serde::Serialize; @@ -29,12 +30,20 @@ use serde::Serialize; derive_more::Display, Allocative )] -pub struct SpanId(pub(crate) NonZeroU64); +pub struct SpanId(pub NonZeroU64); impl SpanId { + pub fn from_u64(span_id: u64) -> anyhow::Result { + SpanId::from_u64_opt(span_id).context("zero span id") + } + + pub fn from_u64_opt(span_id: u64) -> Option { + NonZeroU64::new(span_id).map(SpanId) + } + /// Generates a new SpanId, suitable for identifying a particular span within the context of a trace. Span IDs are /// increasing nonzero 64-bit integers. - pub fn new() -> SpanId { + pub fn next() -> SpanId { static NEXT_ID: AtomicU64 = AtomicU64::new(1); loop { let next_id = NEXT_ID.fetch_add(1, Ordering::AcqRel); diff --git a/app/buck2_execute/BUCK b/app/buck2_execute/BUCK index 59a42af3141dc..a43f36b558aed 100644 --- a/app/buck2_execute/BUCK +++ b/app/buck2_execute/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -8,15 +7,13 @@ rust_library( srcs = glob( ["src/**/*.rs"], ), - named_deps = { - "edenfs": "//eden/fs/service:thrift-rust", - }, test_deps = [ "fbsource//third-party/rust:assert_matches", "fbsource//third-party/rust:prost-types", ], deps = [ "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-recursion", "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:bytes", "fbsource//third-party/rust:chrono", @@ -33,36 +30,39 @@ rust_library( "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:num_cpus", "fbsource//third-party/rust:once_cell", + "fbsource//third-party/rust:pathdiff", "fbsource//third-party/rust:prost", "fbsource//third-party/rust:ref-cast", "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:sha1", "fbsource//third-party/rust:sha2", "fbsource//third-party/rust:slog", "fbsource//third-party/rust:smallvec", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", - "fbsource//third-party/rust:toml", "fbsource//third-party/rust:tracing", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_action_metadata_proto:buck2_action_metadata_proto", + "//buck2/app/buck2_build_info:buck2_build_info", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", - "//buck2/app/buck2_eden:buck2_eden", + "//buck2/app/buck2_directory:buck2_directory", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/app/buck2_http:buck2_http", "//buck2/app/buck2_miniperf_proto:buck2_miniperf_proto", "//buck2/app/buck2_re_configuration:buck2_re_configuration", "//buck2/app/buck2_util:buck2_util", "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", "//buck2/dice/dice:dice", - "//buck2/facebook/test_env_allowlist:test_env_allowlist", + # @oss-disable: "//buck2/facebook/test_env_allowlist:test_env_allowlist", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", "//buck2/host_sharing:host_sharing", "//buck2/remote_execution:remote_execution", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark_map:starlark_map", "//common/rust/shed/fbinit:fbinit", "//common/rust/shed/sorted_vector_map:sorted_vector_map", diff --git a/app/buck2_execute/Cargo.toml b/app/buck2_execute/Cargo.toml index 3226a5647bd34..e90c9e1c01ca1 100644 --- a/app/buck2_execute/Cargo.toml +++ b/app/buck2_execute/Cargo.toml @@ -1,15 +1,18 @@ [package] description = "Remote-execution support for Buck" edition = "2021" +license = { workspace = true } name = "buck2_execute" +repository = { workspace = true } version = "0.1.0" [dependencies] anyhow = { workspace = true } +async-recursion = { workspace = true } async-trait = { workspace = true } bytes = { workspace = true } -crossbeam-channel = { workspace = true } chrono = { workspace = true } +crossbeam-channel = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } digest = { workspace = true } @@ -22,42 +25,46 @@ indexmap = { workspace = true } itertools = { workspace = true } num_cpus = { workspace = true } once_cell = { workspace = true } +pathdiff = { workspace = true } prost = { workspace = true } ref-cast = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } sha1 = { workspace = true } sha2 = { workspace = true } -slog = { workspace = true } smallvec = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } -toml = { workspace = true } tracing = { workspace = true } +allocative = { workspace = true } dice = { workspace = true } -# @oss-disable: edenfs = { package = "thrift", path = "../../../eden/fs/service" } +dupe = { workspace = true } fbinit = { workspace = true } gazebo = { workspace = true } -dupe = { workspace = true } host_sharing = { workspace = true } -more_futures = { workspace = true } remote_execution = { workspace = true } sorted_vector_map = { workspace = true } -allocative = { workspace = true } starlark_map = { workspace = true } +buck2_action_metadata_proto = { workspace = true } +buck2_build_info = { workspace = true } buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_directory = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } +buck2_futures = { workspace = true } +buck2_http = { workspace = true } buck2_miniperf_proto = { workspace = true } buck2_re_configuration = { workspace = true } buck2_util = { workspace = true } buck2_wrapper_common = { workspace = true } -buck2_eden = { workspace = true } -buck2_action_metadata_proto = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } prost-types = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_execute/src/artifact/mod.rs b/app/buck2_execute/src/artifact.rs similarity index 100% rename from app/buck2_execute/src/artifact/mod.rs rename to app/buck2_execute/src/artifact.rs diff --git a/app/buck2_execute/src/artifact/artifact_dyn.rs b/app/buck2_execute/src/artifact/artifact_dyn.rs index 981477529a01f..1e6193a6c2580 100644 --- a/app/buck2_execute/src/artifact/artifact_dyn.rs +++ b/app/buck2_execute/src/artifact/artifact_dyn.rs @@ -12,5 +12,7 @@ use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; pub trait ArtifactDyn: Send + Sync + 'static { fn resolve_path(&self, fs: &ArtifactFs) -> anyhow::Result; - fn is_source(&self) -> bool; + /// Build artifacts and source artifacts from external cells require materialization. Other + /// source artifacts do not. + fn requires_materialization(&self, fs: &ArtifactFs) -> bool; } diff --git a/app/buck2_execute/src/artifact/group/mod.rs b/app/buck2_execute/src/artifact/group.rs similarity index 100% rename from app/buck2_execute/src/artifact/group/mod.rs rename to app/buck2_execute/src/artifact/group.rs diff --git a/app/buck2_execute/src/artifact_utils.rs b/app/buck2_execute/src/artifact_utils.rs index ef1e9fd0b1175..e746b4a2af61c 100644 --- a/app/buck2_execute/src/artifact_utils.rs +++ b/app/buck2_execute/src/artifact_utils.rs @@ -10,10 +10,10 @@ use std::sync::Arc; use anyhow::Context; -use buck2_core::directory::DirectoryEntry; use buck2_core::fs::fs_util; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_directory::directory::entry::DirectoryEntry; use dupe::Dupe; use crate::artifact_value::ArtifactValue; diff --git a/app/buck2_execute/src/bxl/mod.rs b/app/buck2_execute/src/bxl.rs similarity index 100% rename from app/buck2_execute/src/bxl/mod.rs rename to app/buck2_execute/src/bxl.rs diff --git a/app/buck2_execute/src/digest.rs b/app/buck2_execute/src/digest.rs index de80e693568e8..8849be8698d9b 100644 --- a/app/buck2_execute/src/digest.rs +++ b/app/buck2_execute/src/digest.rs @@ -10,16 +10,16 @@ use std::fmt; use buck2_common::cas_digest::CasDigest; +use buck2_common::cas_digest::CasDigestKind; use buck2_common::cas_digest::CasDigestParseError; use buck2_common::cas_digest::DigestAlgorithm; use buck2_common::cas_digest::TrackedCasDigest; use remote_execution::Digest; use remote_execution::TDigest; -use thiserror::Error; use crate::digest_config::DigestConfig; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum DigestConversionError { #[error("Error parsing digest: `{}`", digest)] ParseError { @@ -56,7 +56,7 @@ pub trait CasDigestToReExt { fn to_grpc(&self) -> Digest; } -impl CasDigestFromReExt for CasDigest { +impl CasDigestFromReExt for CasDigest { fn from_re_with_algo( digest: &TDigest, digest_config: DigestConfig, @@ -89,7 +89,9 @@ pub trait CasDigestConversionResultExt { fn as_display(&self) -> &dyn fmt::Display; } -impl CasDigestConversionResultExt for Result, DigestConversionError> { +impl CasDigestConversionResultExt + for Result, DigestConversionError> +{ fn as_display(&self) -> &dyn fmt::Display { match self { Self::Ok(ref v) => v as _, @@ -98,7 +100,7 @@ impl CasDigestConversionResultExt for Result, DigestConver } } -impl CasDigestToReExt for TrackedCasDigest { +impl CasDigestToReExt for TrackedCasDigest { fn to_re(&self) -> TDigest { self.data().to_re() } @@ -108,7 +110,7 @@ impl CasDigestToReExt for TrackedCasDigest { } } -impl CasDigestToReExt for CasDigest { +impl CasDigestToReExt for CasDigest { fn to_re(&self) -> TDigest { TDigest { hash: self.raw_digest().to_string(), diff --git a/app/buck2_execute/src/directory.rs b/app/buck2_execute/src/directory.rs index 917420a99ba3e..2bfead37e9fc4 100644 --- a/app/buck2_execute/src/directory.rs +++ b/app/buck2_execute/src/directory.rs @@ -23,25 +23,31 @@ use buck2_common::external_symlink::ExternalSymlink; use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; -use buck2_core::directory::find; -use buck2_core::directory::unordered_entry_walk; -use buck2_core::directory::DashMapDirectoryInterner; -use buck2_core::directory::Directory; -use buck2_core::directory::DirectoryBuilder; -use buck2_core::directory::DirectoryEntry; -use buck2_core::directory::DirectoryHasher; -use buck2_core::directory::DirectoryIterator; -use buck2_core::directory::DirectorySelector; -use buck2_core::directory::FingerprintedDirectory; -use buck2_core::directory::ImmutableDirectory; -use buck2_core::directory::SharedDirectory; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::paths::file_name::FileNameBuf; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::paths::RelativePath; use buck2_core::fs::paths::RelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_directory::directory::builder::DirectoryBuilder; +use buck2_directory::directory::dashmap_directory_interner::DashMapDirectoryInterner; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_hasher::DirectoryHasher; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::directory_iterator::DirectoryIteratorPathStack; +use buck2_directory::directory::directory_ref::DirectoryRef; +use buck2_directory::directory::directory_ref::FingerprintedDirectoryRef; +use buck2_directory::directory::directory_selector::DirectorySelector; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::find::find; +use buck2_directory::directory::find::DirectoryFindError; +use buck2_directory::directory::fingerprinted_directory::FingerprintedDirectory; +use buck2_directory::directory::immutable_directory::ImmutableDirectory; +use buck2_directory::directory::shared_directory::SharedDirectory; +use buck2_directory::directory::walk::unordered_entry_walk; +use buck2_error::internal_error_anyhow; use chrono::DateTime; use chrono::Utc; use derive_more::Display; @@ -50,7 +56,6 @@ use once_cell::sync::Lazy; use ref_cast::RefCast; use remote_execution as RE; use starlark_map::small_map::SmallMap; -use thiserror::Error; use crate::artifact_value::ArtifactValue; use crate::digest::CasDigestFromReExt; @@ -83,9 +88,14 @@ pub type ActionDirectoryBuilder = DirectoryBuilder; +pub trait ActionDirectoryRef<'a> = + DirectoryRef<'a, Leaf = ActionDirectoryMember, DirectoryDigest = TrackedFileDigest>; + pub trait ActionFingerprintedDirectory = FingerprintedDirectory; +pub trait ActionFingerprintedDirectoryRef<'a> = FingerprintedDirectoryRef<'a, Leaf = ActionDirectoryMember, DirectoryDigest = TrackedFileDigest>; + #[derive(Allocative, RefCast)] #[repr(transparent)] pub struct ReDirectorySerializer { @@ -95,13 +105,8 @@ pub struct ReDirectorySerializer { impl ReDirectorySerializer { fn create_re_directory<'a, D, I>(entries: I) -> RE::Directory where - I: IntoIterator< - Item = ( - &'a FileName, - DirectoryEntry<&'a D, &'a ActionDirectoryMember>, - ), - >, - D: ActionFingerprintedDirectory + ?Sized + 'a, + I: IntoIterator)>, + D: ActionFingerprintedDirectoryRef<'a>, { let mut files: Vec = Vec::new(); let mut directories: Vec = Vec::new(); @@ -114,7 +119,7 @@ impl ReDirectorySerializer { DirectoryEntry::Dir(d) => { directories.push(RE::DirectoryNode { name, - digest: Some(d.fingerprint().to_grpc()), + digest: Some(d.as_fingerprinted_dyn().fingerprint().to_grpc()), }); } DirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => { @@ -156,13 +161,8 @@ impl ReDirectorySerializer { pub fn serialize_entries<'a, D, I>(entries: I) -> Vec where - I: IntoIterator< - Item = ( - &'a FileName, - DirectoryEntry<&'a D, &'a ActionDirectoryMember>, - ), - >, - D: ActionFingerprintedDirectory + ?Sized + 'a, + I: IntoIterator)>, + D: ActionFingerprintedDirectoryRef<'a>, { proto_serialize(&Self::create_re_directory(entries)) } @@ -177,13 +177,8 @@ fn proto_serialize(m: &M) -> Vec { impl DirectoryHasher for ReDirectorySerializer { fn hash_entries<'a, D, I>(&self, entries: I) -> TrackedFileDigest where - I: IntoIterator< - Item = ( - &'a FileName, - DirectoryEntry<&'a D, &'a ActionDirectoryMember>, - ), - >, - D: ActionFingerprintedDirectory + 'a, + I: IntoIterator)>, + D: ActionFingerprintedDirectoryRef<'a>, { TrackedFileDigest::from_content(&Self::serialize_entries(entries), self.cas_digest_config) } @@ -218,7 +213,7 @@ pub fn new_symlink>(target: T) -> anyhow::Result>(target: T) -> anyhow::Result RE::Tree { +pub fn directory_to_re_tree(directory: &T) -> RE::Tree +where + T: ActionFingerprintedDirectory, + for<'a> T::DirectoryRef<'a>: FingerprintedDirectoryRef<'a>, +{ let children = directory - .fingerprinted_ordered_walk() + .ordered_walk() .without_paths() .filter_map(|entry| match entry { DirectoryEntry::Dir(d) => Some(d), DirectoryEntry::Leaf(..) => None, }) - .map(|d| ReDirectorySerializer::create_re_directory(d.fingerprinted_entries())) + .map(|d| ReDirectorySerializer::create_re_directory(d.entries())) .collect(); - let root = ReDirectorySerializer::create_re_directory(directory.fingerprinted_entries()); + let root = ReDirectorySerializer::create_re_directory(directory.as_ref().entries()); RE::Tree { root: Some(root), @@ -264,7 +263,7 @@ pub async fn re_directory_to_re_tree( }) .collect(); let mut retrieved = client - .download_typed_blobs::(digests, use_case) + .download_typed_blobs::(None, digests, use_case) .await?; frontier = retrieved .iter() @@ -432,7 +431,7 @@ pub fn re_tree_to_directory( ) } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum DirectoryReConversionError { // Conversion from RE::Tree errors (these shouldn't happen unless something is broken on RE side) #[error("Converting RE::Tree to Directory, dir `{dir}` has child `{name}` with digest=None.")] @@ -452,7 +451,7 @@ impl<'a> TryFrom<&'a RE::SymlinkNode> for ActionDirectoryMember { let symlink = if node.target.starts_with('/') { ActionDirectoryMember::ExternalSymlink(Arc::new(ExternalSymlink::new( PathBuf::from(node.target.as_str()), - None, + ForwardRelativePathBuf::default(), )?)) } else { ActionDirectoryMember::Symlink(Arc::new(Symlink(RelativePathBuf::from( @@ -471,17 +470,17 @@ pub fn relativize_directory( let mut replacements = ActionDirectoryBuilder::empty(); { - let mut walk = builder.unordered_walk(); + let mut walk = builder.unordered_walk_leaves(); while let Some((path, entry)) = walk.next() { let link = match entry { - DirectoryEntry::Leaf(ActionDirectoryMember::Symlink(link)) => link, + ActionDirectoryMember::Symlink(link) => link, _ => continue, }; let path = path.get(); - let orig_path = orig_root.join_normalized(&path)?; - let new_path = new_root.join_normalized(&path)?; + let orig_path = orig_root.join(&path); + let new_path = new_root.join(&path); let orig_dest = orig_path .parent() @@ -697,7 +696,7 @@ pub fn extract_artifact_value( path: &ProjectRelativePath, digest_config: DigestConfig, ) -> anyhow::Result> { - let entry = match find(builder, path.as_forward_relative_path())? { + let entry = match find(builder.as_ref(), path.as_forward_relative_path())? { Some(entry) => entry, _ => return Ok(None), }; @@ -745,6 +744,17 @@ pub fn extract_artifact_value( .shared(&*INTERNER) }); + // Do not depend on the artifact itself. + match deps.remove_prefix(path.as_forward_relative_path()) { + Ok(_) => {} + Err(DirectoryFindError::CannotTraverseLeaf { .. }) => { + return Err(internal_error_anyhow!( + "Dependency artifact is parent of output artifact: {}", + path + )); + } + } + let deps = if has_deps { Some( deps.fingerprint(digest_config.as_directory_serializer()) @@ -922,7 +932,7 @@ mod tests { for p in &["d6/s4", "d6/f4", "d1/d2/d4", "f1"] { let path = path(p); - let entry = find(&root, path.as_forward_relative_path())? + let entry = find(root.as_ref(), path.as_forward_relative_path())? .with_context(|| format!("Missing {}", path))? .map_dir(|d| d.to_builder()) .map_leaf(|l| l.dupe()); diff --git a/app/buck2_execute/src/entry.rs b/app/buck2_execute/src/entry.rs index 7b1aedc9def73..9378a111af986 100644 --- a/app/buck2_execute/src/entry.rs +++ b/app/buck2_execute/src/entry.rs @@ -11,64 +11,111 @@ use std::time::Duration; use std::time::Instant; use anyhow::Context as _; +use async_recursion::async_recursion; use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::FileDigestConfig; use buck2_common::file_ops::FileMetadata; +use buck2_common::file_ops::FileType; use buck2_common::file_ops::TrackedFileDigest; -use buck2_core::directory::DirectoryEntry; use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::paths::RelativePath; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_util::future::try_join_all; +use derive_more::Add; use faccess::PathExt; +use futures::future::try_join; +use futures::Future; +use once_cell::sync::Lazy; +use pathdiff::diff_paths; +use tokio::sync::Semaphore; use crate::directory::new_symlink; use crate::directory::ActionDirectoryBuilder; use crate::directory::ActionDirectoryEntry; use crate::directory::ActionDirectoryMember; +use crate::execute::blocking::BlockingExecutor; -pub fn build_entry_from_disk( - mut path: AbsNormPathBuf, +#[derive(Add, Default)] +pub struct HashingInfo { + pub hashing_duration: Duration, + pub hashed_artifacts_count: u64, +} + +impl HashingInfo { + fn new(hashing_duration: Duration, hashed_artifacts_count: u64) -> HashingInfo { + HashingInfo { + hashing_duration, + hashed_artifacts_count, + } + } +} + +pub async fn build_entry_from_disk( + path: AbsNormPathBuf, digest_config: FileDigestConfig, + blocking_executor: &dyn BlockingExecutor, + project_root: &AbsNormPath, ) -> anyhow::Result<( Option>, - Duration, + HashingInfo, )> { // Get file metadata. If the file is missing, ignore it. + // TODO(nga): explain why we ignore missing files. let m = match std::fs::symlink_metadata(&path) { Ok(m) => m, Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => { - return Ok((None, Duration::ZERO)); + return Ok((None, HashingInfo::default())); } Err(err) => return Err(err.into()), }; - let hashing_start = Instant::now(); - - let value = if m.file_type().is_symlink() { - DirectoryEntry::Leaf(new_symlink(fs_util::read_link(&path)?)?) - } else if m.is_file() { - DirectoryEntry::Leaf(ActionDirectoryMember::File(FileMetadata { - digest: TrackedFileDigest::new( - FileDigest::from_file(&path, digest_config)?, - digest_config.as_cas_digest_config(), - ), - is_executable: path.executable(), - })) - } else if m.is_dir() { - DirectoryEntry::Dir(build_dir_from_disk(&mut path, digest_config)?) - } else { - anyhow::bail!("Path {:?} is of an unknown file type.", path) + let mut hashing_info = HashingInfo::default(); + let value = match FileType::from(m.file_type()) { + FileType::File => { + let (file_metadata, file_hashing_info): (FileMetadata, HashingInfo) = + build_file_metadata(path, digest_config, blocking_executor).await?; + hashing_info = hashing_info.add(file_hashing_info); + DirectoryEntry::Leaf(ActionDirectoryMember::File(file_metadata)) + } + FileType::Symlink => DirectoryEntry::Leaf(create_symlink(&path, project_root)?), + FileType::Directory => { + let (dir, dir_hashing_info) = + build_dir_from_disk(path, digest_config, blocking_executor, project_root).await?; + hashing_info = hashing_info.add(dir_hashing_info); + DirectoryEntry::Dir(dir) + } + FileType::Unknown => { + return Err(anyhow::anyhow!( + "Path {:?} is of an unknown file type.", + path + )); + } }; - let hashing_time = hashing_start.elapsed(); - Ok((Some(value), hashing_time)) + + Ok((Some(value), hashing_info)) } -fn build_dir_from_disk( - disk_path: &mut AbsNormPathBuf, +#[async_recursion] +async fn build_dir_from_disk( + disk_path: AbsNormPathBuf, digest_config: FileDigestConfig, -) -> anyhow::Result { + blocking_executor: &dyn BlockingExecutor, + project_root: &AbsNormPath, +) -> anyhow::Result<(ActionDirectoryBuilder, HashingInfo)> { let mut builder = ActionDirectoryBuilder::empty(); + let mut hashing_info = HashingInfo::default(); - for file in fs_util::read_dir(&disk_path)? { + let mut directory_names: Vec = Vec::new(); + let mut directory_futures: Vec<_> = Vec::new(); + let mut file_names: Vec = Vec::new(); + let mut file_futures: Vec<_> = Vec::new(); + + let files = blocking_executor + .execute_io_inline(|| fs_util::read_dir(&disk_path).map_err(Into::into)) + .await?; + for file in files { let file = file?; let filetype = file.file_type()?; let filename = file.file_name(); @@ -77,33 +124,110 @@ fn build_dir_from_disk( .to_str() .context("Filename is not UTF-8") .and_then(|f| FileNameBuf::try_from(f.to_owned())) - .with_context(|| format!("Invalid filename: {}", disk_path.display()))?; - - disk_path.push(&filename); - - if filetype.is_dir() { - let dir = build_dir_from_disk(disk_path, digest_config)?; - builder.insert(filename, DirectoryEntry::Dir(dir))?; - } else if filetype.is_symlink() { - builder.insert( - filename, - DirectoryEntry::Leaf(new_symlink(fs_util::read_link(&disk_path)?)?), - )?; - } else if filetype.is_file() { - let metadata = FileMetadata { - digest: TrackedFileDigest::new( - FileDigest::from_file(disk_path, digest_config)?, - digest_config.as_cas_digest_config(), - ), - is_executable: file.path().executable(), - }; - builder.insert( - filename, - DirectoryEntry::Leaf(ActionDirectoryMember::File(metadata)), - )?; - } - disk_path.pop(); + .with_context(|| format!("Invalid filename: {}", disk_path.clone().display()))?; + + let mut child_disk_path = disk_path.clone(); + child_disk_path.push(&filename); + + match FileType::from(filetype) { + FileType::File => { + let file_future = + build_file_metadata(child_disk_path, digest_config, blocking_executor); + file_names.push(filename); + file_futures.push(file_future) + } + FileType::Symlink => { + builder.insert( + filename, + DirectoryEntry::Leaf(create_symlink(&child_disk_path, project_root)?), + )?; + } + FileType::Directory => { + let dir_future = build_dir_from_disk( + child_disk_path, + digest_config, + blocking_executor, + project_root, + ); + directory_names.push(filename); + directory_futures.push(dir_future); + } + FileType::Unknown => (), + }; } - Ok(builder) + let (file_results, dir_results) = + try_join(try_join_all(file_futures), try_join_all(directory_futures)).await?; + + for (filename, file_res) in file_names.into_iter().zip(file_results.into_iter()) { + let (file_metadata, file_hashing_info) = file_res; + hashing_info = hashing_info.add(file_hashing_info); + builder.insert( + filename, + DirectoryEntry::Leaf(ActionDirectoryMember::File(file_metadata)), + )?; + } + + for (filename, dir_res) in directory_names.into_iter().zip(dir_results.into_iter()) { + let (dir_builder, dir_hashing_info) = dir_res; + hashing_info = hashing_info.add(dir_hashing_info); + builder.insert(filename, DirectoryEntry::Dir(dir_builder))?; + } + + Ok((builder, hashing_info)) +} + +fn build_file_metadata( + disk_path: AbsNormPathBuf, + digest_config: FileDigestConfig, + blocking_executor: &dyn BlockingExecutor, +) -> impl Future> + '_ { + static SEMAPHORE: Lazy = Lazy::new(|| Semaphore::new(100)); + let exec_path = disk_path.clone(); + let executable = blocking_executor.execute_io_inline(move || Ok(exec_path.executable())); + let file_digest = + tokio::task::spawn_blocking(move || FileDigest::from_file(&disk_path, digest_config)); + + async move { + let _permit = SEMAPHORE.acquire().await.unwrap(); + let hashing_start = Instant::now(); + let file_digest = file_digest.await??; + let hashing_duration = HashingInfo::new(hashing_start.elapsed(), 1); + let file_metadata = FileMetadata { + digest: TrackedFileDigest::new(file_digest, digest_config.as_cas_digest_config()), + is_executable: executable.await?, + }; + + Ok((file_metadata, hashing_duration)) + } +} + +fn create_symlink( + path: &AbsNormPathBuf, + project_root: &AbsNormPath, +) -> anyhow::Result { + let mut symlink_target = fs_util::read_link(path)?; + if cfg!(windows) && symlink_target.is_relative() { + let directory_path = path + .parent() + .context(format!("failed to get parent of {}", path.display()))?; + let canonical_path = fs_util::canonicalize(directory_path).context(format!( + "failed to get canonical path of {}", + directory_path.display() + ))?; + if !canonical_path.starts_with(project_root) { + let normalized_target = symlink_target + .to_str() + .context("can't convert path to str")? + .replace('\\', "/"); + let target_abspath = + canonical_path.join_normalized(RelativePath::from_path(&normalized_target)?)?; + // Recalculate symlink target if it points from symlinked buck-out to the files inside project root. + if target_abspath.starts_with(project_root) { + symlink_target = diff_paths(target_abspath, directory_path) + .context("can't calculate relative path")?; + } + } + } + new_symlink(symlink_target) } diff --git a/app/buck2_execute/src/execute.rs b/app/buck2_execute/src/execute.rs new file mode 100644 index 0000000000000..13036e7be3136 --- /dev/null +++ b/app/buck2_execute/src/execute.rs @@ -0,0 +1,59 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod action_digest; +pub mod action_digest_and_blobs; +pub mod blobs; +pub mod blocking; +pub mod cache_uploader; +pub mod claim; +pub mod clean_output_paths; +pub mod command_executor; +pub mod dep_file_digest; +pub mod environment_inheritance; +pub mod inputs_directory; +pub mod kind; +pub mod manager; +pub mod output; +pub mod paths_with_digest; +pub mod prepared; +pub mod request; +pub mod result; +pub mod target; +pub mod testing_dry_run; + +use std::future::Future; + +use buck2_events::dispatch::span; +use buck2_events::dispatch::span_async_simple; + +pub fn executor_stage_async( + stage: impl Into, + f: F, +) -> impl Future::Output> { + // We avoid using `async fn` or `async move` here to avoid doubling the + // future size. See https://github.com/rust-lang/rust/issues/62958 + let event = buck2_data::ExecutorStageStart { + stage: Some(stage.into()), + }; + span_async_simple(event, f, buck2_data::ExecutorStageEnd {}) +} + +pub fn executor_stage(stage: impl Into, f: F) -> R +where + F: FnOnce() -> R, +{ + let event = buck2_data::ExecutorStageStart { + stage: Some(stage.into()), + }; + span(event, || { + let r = f(); + (r, buck2_data::ExecutorStageEnd {}) + }) +} diff --git a/app/buck2_execute/src/execute/action_digest.rs b/app/buck2_execute/src/execute/action_digest.rs index 8067739c7b070..6c86b75c79c1b 100644 --- a/app/buck2_execute/src/execute/action_digest.rs +++ b/app/buck2_execute/src/execute/action_digest.rs @@ -9,14 +9,14 @@ use buck2_common::cas_digest::CasDigest; use buck2_common::cas_digest::CasDigestConfig; +use buck2_common::cas_digest::CasDigestKind; use buck2_common::cas_digest::TrackedCasDigest; -use buck2_common::cas_digest::TrackedCasDigestKind; pub struct ActionDigestKind { _private: (), } -impl TrackedCasDigestKind for ActionDigestKind { +impl CasDigestKind for ActionDigestKind { fn empty_digest(_config: CasDigestConfig) -> Option> { // No reason to optimize "empty" actions. None diff --git a/app/buck2_execute/src/execute/action_digest_and_blobs.rs b/app/buck2_execute/src/execute/action_digest_and_blobs.rs new file mode 100644 index 0000000000000..743efa5438ac4 --- /dev/null +++ b/app/buck2_execute/src/execute/action_digest_and_blobs.rs @@ -0,0 +1,55 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_common::file_ops::TrackedFileDigest; +use dupe::Dupe; +use remote_execution as RE; + +use crate::digest_config::DigestConfig; +use crate::execute::action_digest::ActionDigest; +use crate::execute::blobs::ActionBlobs; +use crate::execute::paths_with_digest::PathsWithDigestBlobData; + +#[derive(Clone)] +pub struct ActionDigestAndBlobs { + pub action: ActionDigest, + /// The encoded action and other messages referenced from it by digest (e.g. RE::Command). + /// Does not include the files referenced in inputs. + pub blobs: ActionBlobs, +} + +pub struct ActionDigestAndBlobsBuilder { + digest_config: DigestConfig, + blobs: ActionBlobs, +} + +impl ActionDigestAndBlobsBuilder { + pub fn new(digest_config: DigestConfig) -> ActionDigestAndBlobsBuilder { + ActionDigestAndBlobsBuilder { + digest_config, + blobs: ActionBlobs::new(digest_config), + } + } + + pub fn add_paths(&mut self, digest: TrackedFileDigest, paths: PathsWithDigestBlobData) { + self.blobs.add_blob(digest, paths.0); + } + + pub fn add_command(&mut self, command: &RE::Command) -> TrackedFileDigest { + self.blobs.add_protobuf_message(command, self.digest_config) + } + + pub fn build(mut self, action: &RE::Action) -> ActionDigestAndBlobs { + let action = self.blobs.add_protobuf_message(action, self.digest_config); + ActionDigestAndBlobs { + action: action.data().dupe().coerce(), + blobs: self.blobs, + } + } +} diff --git a/app/buck2_execute/src/execute/blobs.rs b/app/buck2_execute/src/execute/blobs.rs index 4feb93b9d2e8b..8d80fb9f80a43 100644 --- a/app/buck2_execute/src/execute/blobs.rs +++ b/app/buck2_execute/src/execute/blobs.rs @@ -11,12 +11,16 @@ use std::collections::HashMap; use buck2_common::file_ops::TrackedFileDigest; use dupe::Dupe; -use prost::Message; +use remote_execution::InlinedBlobWithDigest; +use crate::digest::CasDigestToReExt; use crate::digest_config::DigestConfig; +use crate::execute::request::ActionMetadataBlobData; +use crate::execute::request::ActionMetadataBlobMessage; /// Contains small blobs referenced from action messages (does not include any file contents blobs). -pub struct ActionBlobs(HashMap>); +#[derive(Clone)] +pub struct ActionBlobs(HashMap); impl ActionBlobs { pub fn new(digest_config: DigestConfig) -> Self { @@ -25,24 +29,22 @@ impl ActionBlobs { let mut blobs = HashMap::new(); blobs.insert( TrackedFileDigest::empty(digest_config.cas_digest_config()), - Vec::new(), + ActionMetadataBlobData(Vec::new()), ); Self(blobs) } - pub fn add_blob(&mut self, digest: TrackedFileDigest, data: Vec) { + pub fn add_blob(&mut self, digest: TrackedFileDigest, data: ActionMetadataBlobData) { self.0.insert(digest, data); } pub fn add_protobuf_message( &mut self, - m: &impl Message, + m: &impl ActionMetadataBlobMessage, digest_config: DigestConfig, ) -> TrackedFileDigest { - let mut blob = Vec::new(); - // Unwrap is safe because it only fails in OOM conditions, which we pretend don't happen - m.encode(&mut blob).unwrap(); - let digest = TrackedFileDigest::from_content(&blob, digest_config.cas_digest_config()); + let blob = ActionMetadataBlobData::from_message(m); + let digest = TrackedFileDigest::from_content(&blob.0, digest_config.cas_digest_config()); self.0.insert(digest.dupe(), blob); digest } @@ -51,7 +53,24 @@ impl ActionBlobs { self.0.keys() } - pub fn get(&self, digest: &TrackedFileDigest) -> Option<&Vec> { + pub fn get(&self, digest: &TrackedFileDigest) -> Option<&ActionMetadataBlobData> { self.0.get(digest) } + + pub fn iter( + &self, + ) -> std::collections::hash_map::Iter<'_, TrackedFileDigest, ActionMetadataBlobData> { + self.0.iter() + } + + pub fn to_inlined_blobs(&self) -> Vec { + self.0 + .iter() + .map(|(digest, data)| InlinedBlobWithDigest { + blob: data.0.clone(), + digest: digest.to_re(), + ..Default::default() + }) + .collect() + } } diff --git a/app/buck2_execute/src/execute/blocking.rs b/app/buck2_execute/src/execute/blocking.rs index d45dc480c3977..03e40933f244e 100644 --- a/app/buck2_execute/src/execute/blocking.rs +++ b/app/buck2_execute/src/execute/blocking.rs @@ -12,15 +12,16 @@ use std::sync::Arc; use allocative::Allocative; use anyhow::Context as _; use async_trait::async_trait; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_core::fs::project::ProjectRoot; +use buck2_futures::cancellation::CancellationContext; +use buck2_util::threads::thread_spawn; use crossbeam_channel::unbounded; use dice::DiceComputations; use dice::UserComputationData; use dupe::Dupe; use futures::future::BoxFuture; use futures::future::FutureExt; -use more_futures::cancellation::CancellationContext; use tokio::sync::oneshot; use tokio::sync::Semaphore; @@ -85,36 +86,32 @@ impl BuckBlockingExecutor { /// We choose the default concurrency as follows: /// /// - For operations executed by the thread pool, we choose a fairly low concurrency level. - /// This is because those operations do exclusively I/O work, and that work consists of - /// modifying the directory structure of the FS, which scales negatively as soon as you add - /// more than 4 threads on all systems we care about (sometimes it does so earlier, but for now - /// 4 is the one-size-fits-all solution we have). D33922298 has benchmark details. + /// This is because those operations do exclusively I/O work, and that work consists of + /// modifying the directory structure of the FS, which scales negatively as soon as you add + /// more than 4 threads on all systems we care about (sometimes it does so earlier, but for now + /// 4 is the one-size-fits-all solution we have). D33922298 has benchmark details. /// /// - For operations that primarily write data, we default to the number of threads on the - /// host. This is because those operations often have to do CPU bound work to generate the data - /// they are trying to write, and writing to multiple files doesn't have the negative scaling - /// issues modifying the directory structure does. + /// host. This is because those operations often have to do CPU bound work to generate the data + /// they are trying to write, and writing to multiple files doesn't have the negative scaling + /// issues modifying the directory structure does. pub fn default_concurrency(fs: ProjectRoot) -> anyhow::Result { - static IO_THREADS: EnvHelper = EnvHelper::new("BUCK2_IO_THREADS"); - static IO_SEMAPHORE: EnvHelper = EnvHelper::new("BUCK2_IO_SEMAPHORE"); - - let io_threads = IO_THREADS.get_copied()?.unwrap_or(4); - let io_semaphore = IO_SEMAPHORE.get_copied()?.unwrap_or_else(num_cpus::get); + let io_threads = buck2_env_anyhow!("BUCK2_IO_THREADS", type=usize, default=4)?; + let io_semaphore = + buck2_env_anyhow!("BUCK2_IO_SEMAPHORE", type=usize, default=num_cpus::get())?; let (command_sender, command_receiver) = unbounded(); for i in 0..io_threads { let command_receiver = command_receiver.clone(); let fs = fs.dupe(); - std::thread::Builder::new() - .name(format!("buck-io-{}", i)) - .spawn(move || { - for ThreadPoolIoRequest { sender, io } in command_receiver.iter() { - let res = io.execute(&fs); - let _ignored = sender.send(res); - } - }) - .context("Failed to spawn io worker")?; + thread_spawn(&format!("buck-io-{}", i), move || { + for ThreadPoolIoRequest { sender, io } in command_receiver.iter() { + let res = io.execute(&fs); + let _ignored = sender.send(res); + } + }) + .context("Failed to spawn io worker")?; } Ok(Self { @@ -174,7 +171,7 @@ impl SetBlockingExecutor for UserComputationData { } } -impl HasBlockingExecutor for DiceComputations { +impl HasBlockingExecutor for DiceComputations<'_> { fn get_blocking_executor(&self) -> Arc { self.per_transaction_data() .data diff --git a/app/buck2_execute/src/execute/cache_uploader.rs b/app/buck2_execute/src/execute/cache_uploader.rs index df2672d4398b0..b33817c70ca03 100644 --- a/app/buck2_execute/src/execute/cache_uploader.rs +++ b/app/buck2_execute/src/execute/cache_uploader.rs @@ -9,22 +9,31 @@ use async_trait::async_trait; use buck2_action_metadata_proto::RemoteDepFile; +use buck2_core::buck2_env_anyhow; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use remote_execution::TActionResult2; use crate::digest_config::DigestConfig; -use crate::execute::action_digest::ActionDigest; -use crate::execute::dep_file_digest::DepFileDigest; +use crate::execute::action_digest_and_blobs::ActionDigestAndBlobs; use crate::execute::result::CommandExecutionResult; use crate::execute::target::CommandExecutionTarget; +use crate::materialize::materializer::Materializer; pub struct CacheUploadInfo<'a> { pub target: &'a dyn CommandExecutionTarget, - pub action_digest: ActionDigest, pub digest_config: DigestConfig, } -pub struct DepFileEntry { - pub key: DepFileDigest, - pub entry: RemoteDepFile, +#[async_trait] +pub trait IntoRemoteDepFile: Send { + fn remote_dep_file_action(&self) -> &ActionDigestAndBlobs; + + async fn make_remote_dep_file( + &mut self, + digest_config: DigestConfig, + fs: &ArtifactFs, + materializer: &dyn Materializer, + ) -> anyhow::Result; } pub struct CacheUploadResult { @@ -32,6 +41,15 @@ pub struct CacheUploadResult { pub did_dep_file_cache_upload: bool, } +// This is for quick testing of cache upload without configuring executors. +pub fn force_cache_upload() -> anyhow::Result { + buck2_env_anyhow!( + "BUCK2_TEST_FORCE_CACHE_UPLOAD", + bool, + applicability = testing + ) +} + /// A single purpose trait to handle cache uploads #[async_trait] pub trait UploadCache: Send + Sync { @@ -42,7 +60,9 @@ pub trait UploadCache: Send + Sync { &self, info: &CacheUploadInfo<'_>, execution_result: &CommandExecutionResult, - dep_file_entry: Option, + re_result: Option, + dep_file_bundle: Option<&mut dyn IntoRemoteDepFile>, + action_digest_and_blobs: &ActionDigestAndBlobs, ) -> anyhow::Result; } @@ -55,7 +75,9 @@ impl UploadCache for NoOpCacheUploader { &self, _info: &CacheUploadInfo<'_>, _execution_result: &CommandExecutionResult, - _dep_file_entry: Option, + _re_result: Option, + _dep_file_bundle: Option<&mut dyn IntoRemoteDepFile>, + _action_digest_and_blobs: &ActionDigestAndBlobs, ) -> anyhow::Result { Ok(CacheUploadResult { did_cache_upload: false, diff --git a/app/buck2_execute/src/execute/claim.rs b/app/buck2_execute/src/execute/claim.rs index 4360abfadd824..1c363f70bd967 100644 --- a/app/buck2_execute/src/execute/claim.rs +++ b/app/buck2_execute/src/execute/claim.rs @@ -79,7 +79,7 @@ impl ClaimManager for MutexClaimManager { } #[derive(Display, Derivative)] -#[display(fmt = "MutexClaim")] +#[display("MutexClaim")] #[derivative(Debug)] pub struct MutexClaim { // No point in printing this as it will *always* be Claimed. diff --git a/app/buck2_execute/src/execute/clean_output_paths.rs b/app/buck2_execute/src/execute/clean_output_paths.rs index 21ffb07c9c1df..7dfec158120be 100644 --- a/app/buck2_execute/src/execute/clean_output_paths.rs +++ b/app/buck2_execute/src/execute/clean_output_paths.rs @@ -23,7 +23,7 @@ pub struct CleanOutputPaths { impl CleanOutputPaths { pub fn clean<'a>( - paths: impl Iterator, + paths: impl IntoIterator, fs: &'a ProjectRoot, ) -> anyhow::Result<()> { for path in paths { @@ -34,12 +34,40 @@ impl CleanOutputPaths { } } +#[cfg(unix)] +fn tag_environment_error(error: buck2_error::Error) -> buck2_error::Error { + error +} + +#[cfg(windows)] +fn tag_environment_error(error: buck2_error::Error) -> buck2_error::Error { + use buck2_error::ErrorTag; + if error.has_tag(ErrorTag::IoWindowsSharingViolation) + | error.has_tag(ErrorTag::IoPermissionDenied) + { + error + .tag([ErrorTag::IoMaterializerFileBusy]) + .context("Binary being executed, please close the process first") + } else { + error + } +} + +use buck2_core::fs::fs_util::IoError; +fn tag_cleanup_path_env_error(res: Result<(), IoError>) -> anyhow::Result<()> { + let error = res + .map_err(buck2_error::Error::from) + .map_err(tag_environment_error); + Ok(error?) +} + #[tracing::instrument(level = "debug", skip(fs), fields(path = %path))] pub fn cleanup_path(fs: &ProjectRoot, path: &ProjectRelativePath) -> anyhow::Result<()> { + let path = fs.resolve(path); + // This will remove the path if it exists. - fs.remove_path_recursive(path)?; + tag_cleanup_path_env_error(fs_util::remove_all(&path))?; - let path = fs.resolve(path); let mut path: &AbsNormPath = &path; // Be aware of T85589819 - the parent directory might already exist, but as a _file_. It might @@ -66,7 +94,7 @@ pub fn cleanup_path(fs: &ProjectRoot, path: &ProjectRelativePath) -> anyhow::Res // There was a file or a symlink, so it's safe to delete and then we can exit // because we'll be able to create a dir here. tracing::trace!(path = %path, "remove_file"); - fs_util::remove_file(path)?; + tag_cleanup_path_env_error(fs_util::remove_file(path))?; } return Ok(()); } diff --git a/app/buck2_execute/src/execute/command_executor.rs b/app/buck2_execute/src/execute/command_executor.rs index 86eaf250f4269..5cd308c90470c 100644 --- a/app/buck2_execute/src/execute/command_executor.rs +++ b/app/buck2_execute/src/execute/command_executor.rs @@ -13,26 +13,31 @@ use std::time::Duration; use anyhow::Context; use buck2_common::file_ops::TrackedFileDigest; -use buck2_core::directory::FingerprintedDirectory; use buck2_core::execution_types::executor_config::CommandGenerationOptions; use buck2_core::execution_types::executor_config::OutputPathsBehavior; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::fingerprinted_directory::FingerprintedDirectory; +use buck2_futures::cancellation::CancellationContext; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use remote_execution as RE; +use remote_execution::TActionResult2; use sorted_vector_map::SortedVectorMap; use super::cache_uploader::CacheUploadResult; use crate::artifact::fs::ExecutorFs; use crate::digest::CasDigestToReExt; use crate::digest_config::DigestConfig; -use crate::execute::blobs::ActionBlobs; +use crate::execute::action_digest_and_blobs::ActionDigestAndBlobs; +use crate::execute::action_digest_and_blobs::ActionDigestAndBlobsBuilder; use crate::execute::cache_uploader::CacheUploadInfo; -use crate::execute::cache_uploader::DepFileEntry; +use crate::execute::cache_uploader::IntoRemoteDepFile; use crate::execute::cache_uploader::UploadCache; use crate::execute::executor_stage; use crate::execute::manager::CommandExecutionManager; +use crate::execute::paths_with_digest::PathsWithDigestBlobData; use crate::execute::prepared::PreparedAction; use crate::execute::prepared::PreparedCommand; use crate::execute::prepared::PreparedCommandExecutor; @@ -104,6 +109,10 @@ impl CommandExecutor { ExecutorFs::new(&self.0.artifact_fs, self.0.options.path_separator) } + pub fn re_platform(&self) -> &RE::Platform { + &self.0.re_platform + } + /// Check if the action can be served by the action cache. pub async fn action_cache( &self, @@ -121,11 +130,19 @@ impl CommandExecutor { &self, info: &CacheUploadInfo<'_>, execution_result: &CommandExecutionResult, - dep_file_entry: Option, + re_result: Option, + dep_file_bundle: Option<&mut dyn IntoRemoteDepFile>, + action_digest_and_blobs: &ActionDigestAndBlobs, ) -> anyhow::Result { self.0 .cache_uploader - .upload(info, execution_result, dep_file_entry) + .upload( + info, + execution_result, + re_result, + dep_file_bundle, + action_digest_and_blobs, + ) .await } @@ -170,7 +187,7 @@ impl CommandExecutor { let action = re_create_action( request.all_args_vec(), request.paths().output_paths(), - request.working_directory().map(|p| p.as_str().to_owned()), + request.working_directory(), request.env(), input_digest, action_metadata_blobs, @@ -180,6 +197,7 @@ impl CommandExecutor { digest_config, self.0.options.output_paths_behavior, request.unique_input_inodes(), + request.remote_execution_dependencies(), )?; anyhow::Ok(action) @@ -190,21 +208,22 @@ impl CommandExecutor { fn re_create_action( args: Vec, outputs: &[(ProjectRelativePathBuf, OutputType)], - workdir: Option, + working_directory: &ProjectRelativePath, environment: &SortedVectorMap, input_digest: &TrackedFileDigest, - blobs: impl Iterator, TrackedFileDigest)>, + blobs: impl IntoIterator, timeout: Option, platform: RE::Platform, do_not_cache: bool, digest_config: DigestConfig, output_paths_behavior: OutputPathsBehavior, unique_input_inodes: bool, + remote_execution_dependencies: &Vec, ) -> anyhow::Result { let mut command = RE::Command { arguments: args, platform: Some(platform), - working_directory: workdir.unwrap_or_default(), + working_directory: working_directory.as_str().to_owned(), environment_variables: environment .iter() .map(|(k, v)| RE::EnvironmentVariable { @@ -260,18 +279,28 @@ fn re_create_action( } } - let mut prepared_blobs = ActionBlobs::new(digest_config); + let mut action_and_blobs = ActionDigestAndBlobsBuilder::new(digest_config); + for (data, digest) in blobs { - prepared_blobs.add_blob(digest, data); + action_and_blobs.add_paths(digest, data); + } + + // Required by the RE spec to be sorted: + // https://github.com/bazelbuild/remote-apis/blob/1f36c310b28d762b258ea577ed08e8203274efae/build/bazel/remote/execution/v2/remote_execution.proto#L589 + command.output_directories.sort(); + command.output_files.sort(); + #[cfg(not(fbcode_build))] + { + command.output_paths.sort(); + command.output_node_properties.sort(); } + command + .environment_variables + .sort_by(|e1, e2| e1.name.cmp(&e2.name)); let mut action = RE::Action { input_root_digest: Some(input_digest.to_grpc()), - command_digest: Some( - prepared_blobs - .add_protobuf_message(&command, digest_config) - .to_grpc(), - ), + command_digest: Some(action_and_blobs.add_command(&command).to_grpc()), timeout: timeout .map(|t| t.try_into()) .transpose() @@ -297,12 +326,13 @@ fn re_create_action( let _unused = &mut action; } - let action = prepared_blobs.add_protobuf_message(&action, digest_config); + let action_and_blobs = action_and_blobs.build(&action); + Ok(PreparedAction { - action: action.data().dupe().coerce(), - blobs: prepared_blobs, + action_and_blobs, platform: command .platform .expect("We did put a platform a few lines up"), + remote_execution_dependencies: remote_execution_dependencies.to_owned(), }) } diff --git a/app/buck2_execute/src/execute/dep_file_digest.rs b/app/buck2_execute/src/execute/dep_file_digest.rs index d525b60accd0b..e41429b96604a 100644 --- a/app/buck2_execute/src/execute/dep_file_digest.rs +++ b/app/buck2_execute/src/execute/dep_file_digest.rs @@ -9,14 +9,14 @@ use buck2_common::cas_digest::CasDigest; use buck2_common::cas_digest::CasDigestConfig; +use buck2_common::cas_digest::CasDigestKind; use buck2_common::cas_digest::TrackedCasDigest; -use buck2_common::cas_digest::TrackedCasDigestKind; pub struct DepFileDigestKind { _private: (), } -impl TrackedCasDigestKind for DepFileDigestKind { +impl CasDigestKind for DepFileDigestKind { fn empty_digest(_config: CasDigestConfig) -> Option> { // No reason to optimize "empty" actions. None diff --git a/app/buck2_execute/src/execute/dice_data.rs b/app/buck2_execute/src/execute/dice_data.rs deleted file mode 100644 index 7c83476e301db..0000000000000 --- a/app/buck2_execute/src/execute/dice_data.rs +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Attaching command execution related data to Dice - -use std::sync::Arc; - -use buck2_core::execution_types::executor_config::CommandExecutorConfig; -use buck2_core::fs::artifact_path_resolver::ArtifactFs; -use dice::DiceComputations; -use dice::DiceData; -use dice::UserComputationData; -use dupe::Dupe; -use remote_execution as RE; - -use super::prepared::PreparedCommandOptionalExecutor; -use crate::execute::cache_uploader::UploadCache; -use crate::execute::prepared::PreparedCommandExecutor; -use crate::re::manager::ManagedRemoteExecutionClient; - -pub struct CommandExecutorResponse { - pub executor: Arc, - pub platform: RE::Platform, - pub cache_checker: Arc, - pub cache_uploader: Arc, -} - -pub trait SetCommandExecutor { - fn set_command_executor(&mut self, init: Box); -} - -pub trait HasCommandExecutor { - fn get_command_executor( - &self, - artifact_fs: &ArtifactFs, - config: &CommandExecutorConfig, - ) -> anyhow::Result; -} - -impl SetCommandExecutor for UserComputationData { - fn set_command_executor( - &mut self, - delegate: Box, - ) { - self.data.set(HasCommandExecutorHolder { delegate }) - } -} - -impl HasCommandExecutor for DiceComputations { - fn get_command_executor( - &self, - artifact_fs: &ArtifactFs, - config: &CommandExecutorConfig, - ) -> anyhow::Result { - let holder = self - .per_transaction_data() - .data - .get::() - .expect("CommandExecutorDelegate should be set"); - holder.delegate.get_command_executor(artifact_fs, config) - } -} - -struct HasCommandExecutorHolder { - delegate: Box, -} - -pub trait HasFallbackExecutorConfig { - fn get_fallback_executor_config(&self) -> &Arc; -} - -impl HasFallbackExecutorConfig for DiceComputations { - fn get_fallback_executor_config(&self) -> &Arc { - self.per_transaction_data() - .data - .get::>() - .expect("CommandExecutorConfig should be set") - } -} - -pub fn set_fallback_executor_config(data: &mut DiceData, config: Arc) { - data.set(config) -} - -pub trait SetReClient { - fn set_re_client(&mut self, re_client: ManagedRemoteExecutionClient); -} - -pub trait GetReClient { - fn get_re_client(&self) -> ManagedRemoteExecutionClient; -} - -impl SetReClient for UserComputationData { - fn set_re_client(&mut self, re_client: ManagedRemoteExecutionClient) { - self.data.set(re_client); - } -} - -impl GetReClient for UserComputationData { - fn get_re_client(&self) -> ManagedRemoteExecutionClient { - self.data - .get::() - .expect("Materializer should be set") - .dupe() - } -} diff --git a/app/buck2_execute/src/execute/inputs_directory.rs b/app/buck2_execute/src/execute/inputs_directory.rs index e98dd6a353c81..39bb4b80d9e25 100644 --- a/app/buck2_execute/src/execute/inputs_directory.rs +++ b/app/buck2_execute/src/execute/inputs_directory.rs @@ -8,8 +8,8 @@ */ use buck2_common::file_ops::FileMetadata; -use buck2_core::directory::DirectoryEntry; use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_directory::directory::entry::DirectoryEntry; use dupe::Dupe; use crate::directory::ActionDirectoryBuilder; diff --git a/app/buck2_execute/src/execute/kind.rs b/app/buck2_execute/src/execute/kind.rs index b29a1fbdd3666..9d407d3bf827c 100644 --- a/app/buck2_execute/src/execute/kind.rs +++ b/app/buck2_execute/src/execute/kind.rs @@ -9,8 +9,12 @@ use std::time::Duration; +use allocative::Allocative; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_data::RePlatform; use derive_more::Display; +use gazebo::prelude::SliceExt; use remote_execution as RE; use sorted_vector_map::SortedVectorMap; @@ -18,10 +22,10 @@ use crate::execute::action_digest::ActionDigest; use crate::execute::dep_file_digest::DepFileDigest; use crate::re::convert::platform_to_proto; -#[derive(Debug, Display, Clone)] +#[derive(Debug, Display, Clone, Allocative)] pub enum CommandExecutionKind { /// This action was executed locally. - #[display(fmt = "local")] + #[display("local")] Local { // Even though this did not run on RE, we still produced this, so we might as well report // it. @@ -30,32 +34,35 @@ pub enum CommandExecutionKind { env: SortedVectorMap, }, /// This action was executed via a remote executor. - #[display(fmt = "remote")] + #[display("remote")] Remote { details: RemoteCommandExecutionDetails, /// How long this command queued in RE. This value excludes execution time, i.e. for action cache hit, /// this value represents how long a request has to wait for server to handle. queue_time: Duration, + /// Local paths to the materialized inputs for failed actions, if `--materialize-failed-re-action-inputs` + /// was passed to build options + materialized_inputs_for_failed: Option>, }, /// This action was served by the action cache and not executed. - #[display(fmt = "action_cache")] + #[display("action_cache")] ActionCache { details: RemoteCommandExecutionDetails, }, /// This action was served by the action cache (remote dep file) and not executed. - #[display(fmt = "remote_dep_file_cache")] + #[display("remote_dep_file_cache")] RemoteDepFileCache { details: RemoteCommandExecutionDetails, }, /// This action would have executed via a local worker but failed during worker initialization. - #[display(fmt = "worker_init")] + #[display("worker_init")] LocalWorkerInit { command: Vec, env: SortedVectorMap, }, /// This action was executed via a local worker. - #[display(fmt = "worker")] + #[display("worker")] LocalWorker { digest: ActionDigest, command: Vec, @@ -107,6 +114,7 @@ impl CommandExecutionKind { Self::Remote { details, queue_time, + materialized_inputs_for_failed, } => Command::RemoteCommand(buck2_data::RemoteCommand { action_digest: details.action_digest.to_string(), cache_hit: false, @@ -114,6 +122,10 @@ impl CommandExecutionKind { remote_dep_file_key: None, queue_time: (*queue_time).try_into().ok(), details: details.to_proto(omit_details), + materialized_inputs_for_failed: materialized_inputs_for_failed + .as_ref() + .map(|paths| paths.clone().map(|p| format!("{}", p))) + .unwrap_or_default(), }), Self::ActionCache { details } => Command::RemoteCommand(buck2_data::RemoteCommand { @@ -123,6 +135,7 @@ impl CommandExecutionKind { queue_time: None, details: details.to_proto(omit_details), remote_dep_file_key: None, + materialized_inputs_for_failed: Vec::new(), }), Self::RemoteDepFileCache { details } => { @@ -136,6 +149,7 @@ impl CommandExecutionKind { .remote_dep_file_key .as_ref() .map(|k| k.to_string()), + materialized_inputs_for_failed: Vec::new(), }) } @@ -176,16 +190,32 @@ impl CommandExecutionKind { } /// Structured data for a RE request. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Allocative)] pub struct RemoteCommandExecutionDetails { pub action_digest: ActionDigest, pub remote_dep_file_key: Option, pub session_id: Option, pub use_case: RemoteExecutorUseCase, - pub platform: RE::Platform, + pub platform: RePlatform, } impl RemoteCommandExecutionDetails { + pub fn new( + action_digest: ActionDigest, + remote_dep_file_key: Option, + session_id: Option, + use_case: RemoteExecutorUseCase, + platform: &RE::Platform, + ) -> Self { + Self { + action_digest, + remote_dep_file_key, + session_id, + use_case, + platform: platform_to_proto(platform), + } + } + fn to_proto(&self, omit_details: bool) -> Option { if omit_details { return None; @@ -194,7 +224,7 @@ impl RemoteCommandExecutionDetails { Some(buck2_data::RemoteCommandDetails { session_id: self.session_id.clone(), use_case: self.use_case.to_string(), - platform: Some(platform_to_proto(&self.platform)), + platform: Some(self.platform.clone()), }) } } diff --git a/app/buck2_execute/src/execute/manager.rs b/app/buck2_execute/src/execute/manager.rs index 338f960afbaf1..4381d1591f4c3 100644 --- a/app/buck2_execute/src/execute/manager.rs +++ b/app/buck2_execute/src/execute/manager.rs @@ -12,6 +12,8 @@ use std::time::Duration; use buck2_common::liveliness_observer::LivelinessObserver; use buck2_events::dispatch::EventDispatcher; +use futures::future::Future; +use futures::future::FutureExt; use indexmap::IndexMap; use crate::artifact_value::ArtifactValue; @@ -20,6 +22,7 @@ use crate::execute::claim::ClaimManager; use crate::execute::kind::CommandExecutionKind; use crate::execute::output::CommandStdStreams; use crate::execute::request::CommandExecutionOutput; +use crate::execute::result::CommandExecutionErrorType; use crate::execute::result::CommandExecutionMetadata; use crate::execute::result::CommandExecutionReport; use crate::execute::result::CommandExecutionResult; @@ -35,14 +38,21 @@ trait CommandExecutionManagerLike: Sized { exit_code: Option, timing: CommandExecutionMetadata, ) -> CommandExecutionResult; + + fn execution_kind(&self) -> Option; } -/// This tracker helps track the information that will go into the BuckCommandExecutionMetadata -pub struct CommandExecutionManager { +pub struct CommandExecutionManagerInner { pub claim_manager: Box, pub events: EventDispatcher, pub liveliness_observer: Arc, pub intend_to_fallback_on_failure: bool, + pub execution_kind: Option, +} + +/// This tracker helps track the information that will go into the BuckCommandExecutionMetadata +pub struct CommandExecutionManager { + pub inner: Box, } impl CommandExecutionManager { @@ -52,26 +62,36 @@ impl CommandExecutionManager { liveliness_observer: Arc, ) -> Self { Self { - claim_manager, - events, - liveliness_observer, - intend_to_fallback_on_failure: false, + inner: Box::new(CommandExecutionManagerInner { + claim_manager, + events, + liveliness_observer, + intend_to_fallback_on_failure: false, + execution_kind: None, + }), } } /// Acquire a claim. This might never return if the claim has been taken. - pub async fn claim(self) -> CommandExecutionManagerWithClaim { - let claim = self.claim_manager.claim().await; - - CommandExecutionManagerWithClaim { - claim, - events: self.events, - liveliness_observer: self.liveliness_observer, - } + pub fn claim(self) -> impl Future { + let events = self.inner.events; + let liveliness_observer = self.inner.liveliness_observer; + let execution_kind = self.inner.execution_kind; + self.inner + .claim_manager + .claim() + .map(|claim| CommandExecutionManagerWithClaim { + inner: Box::new(CommandExecutionManagerWithClaimInner { + claim, + events, + liveliness_observer, + execution_kind, + }), + }) } pub fn on_result_delayed(&mut self) { - self.claim_manager.on_result_delayed(); + self.inner.claim_manager.on_result_delayed(); } pub fn cancel(self) -> CommandExecutionResult { @@ -88,7 +108,12 @@ impl CommandExecutionManager { mut self, intend_to_fallback_on_failure: bool, ) -> Self { - self.intend_to_fallback_on_failure = intend_to_fallback_on_failure; + self.inner.intend_to_fallback_on_failure = intend_to_fallback_on_failure; + self + } + + pub fn with_execution_kind(mut self, execution_kind: CommandExecutionKind) -> Self { + self.inner.execution_kind = Some(execution_kind); self } } @@ -117,16 +142,26 @@ impl CommandExecutionManagerLike for CommandExecutionManager { dep_file_key: None, eligible_for_full_hybrid: false, dep_file_metadata: None, + action_result: None, } } + + fn execution_kind(&self) -> Option { + self.inner.execution_kind.clone() + } } -pub struct CommandExecutionManagerWithClaim { +pub struct CommandExecutionManagerWithClaimInner { pub events: EventDispatcher, pub liveliness_observer: Arc, + pub execution_kind: Option, claim: Box, } +pub struct CommandExecutionManagerWithClaim { + pub inner: Box, +} + /// Like CommandExecutionManager but provides access to things that are only allowed with a Claim; impl CommandExecutionManagerWithClaim { /// Explicitly requires a Claim here to help implementors remember to claim things since a @@ -156,6 +191,11 @@ impl CommandExecutionManagerWithClaim { CommandExecutionMetadata::default(), ) } + + pub fn with_execution_kind(mut self, execution_kind: CommandExecutionKind) -> Self { + self.inner.execution_kind = Some(execution_kind); + self + } } impl CommandExecutionManagerLike for CommandExecutionManagerWithClaim { @@ -170,7 +210,7 @@ impl CommandExecutionManagerLike for CommandExecutionManagerWithClaim { CommandExecutionResult { outputs, report: CommandExecutionReport { - claim: Some(self.claim), + claim: Some(self.inner.claim), status, timing, std_streams, @@ -182,8 +222,13 @@ impl CommandExecutionManagerLike for CommandExecutionManagerWithClaim { dep_file_key: None, eligible_for_full_hybrid: false, dep_file_metadata: None, + action_result: None, } } + + fn execution_kind(&self) -> Option { + self.inner.execution_kind.clone() + } } pub trait CommandExecutionManagerExt: Sized { @@ -204,7 +249,20 @@ pub trait CommandExecutionManagerExt: Sized { timing: CommandExecutionMetadata, ) -> CommandExecutionResult; - fn error(self, stage: &'static str, error: impl Into) -> CommandExecutionResult; + fn error( + self, + stage: &'static str, + error: impl Into, + ) -> CommandExecutionResult { + self.error_classified(stage, error, CommandExecutionErrorType::Other) + } + + fn error_classified( + self, + stage: &'static str, + error: impl Into, + error_type: CommandExecutionErrorType, + ) -> CommandExecutionResult; } impl CommandExecutionManagerExt for T @@ -247,11 +305,19 @@ where ) } - fn error(self, stage: &'static str, error: impl Into) -> CommandExecutionResult { + fn error_classified( + self, + stage: &'static str, + error: impl Into, + error_type: CommandExecutionErrorType, + ) -> CommandExecutionResult { + let execution_kind = self.execution_kind(); self.result( CommandExecutionStatus::Error { stage, error: error.into(), + execution_kind, + typ: error_type, }, IndexMap::new(), Default::default(), diff --git a/app/buck2_execute/src/execute/mod.rs b/app/buck2_execute/src/execute/mod.rs deleted file mode 100644 index 2268e9ca6af83..0000000000000 --- a/app/buck2_execute/src/execute/mod.rs +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod action_digest; -pub mod blobs; -pub mod blocking; -pub mod cache_uploader; -pub mod claim; -pub mod clean_output_paths; -pub mod command_executor; -pub mod dep_file_digest; -pub mod dice_data; -pub mod environment_inheritance; -pub mod inputs_directory; -pub mod kind; -pub mod manager; -pub mod output; -pub mod prepared; -pub mod request; -pub mod result; -pub mod target; -pub mod testing_dry_run; - -use std::future::Future; - -use buck2_events::dispatch::span; -use buck2_events::dispatch::span_async; -use futures::FutureExt; - -pub fn executor_stage_async( - stage: impl Into, - f: F, -) -> impl Future::Output> { - // We avoid using `async fn` or `async move` here to avoid doubling the - // future size. See https://github.com/rust-lang/rust/issues/62958 - let event = buck2_data::ExecutorStageStart { - stage: Some(stage.into()), - }; - span_async(event, f.map(|v| (v, buck2_data::ExecutorStageEnd {}))) -} - -pub fn executor_stage(stage: impl Into, f: F) -> R -where - F: FnOnce() -> R, -{ - let event = buck2_data::ExecutorStageStart { - stage: Some(stage.into()), - }; - span(event, || { - let r = f(); - (r, buck2_data::ExecutorStageEnd {}) - }) -} diff --git a/app/buck2_execute/src/execute/output.rs b/app/buck2_execute/src/execute/output.rs index a2a27d80a8779..2b156aba2a585 100644 --- a/app/buck2_execute/src/execute/output.rs +++ b/app/buck2_execute/src/execute/output.rs @@ -63,6 +63,17 @@ impl ReStdStream { } } + fn download_blob_help(digest: &TDigest, digest_config: DigestConfig) -> String { + if buck2_core::is_open_source() { + String::new() + } else { + format!( + " - to view type `frecli cas download-blob {}`", + FileDigest::from_re(digest, digest_config).as_display() + ) + } + } + pub(crate) async fn to_lossy( &self, client: &ManagedRemoteExecutionClient, @@ -81,8 +92,8 @@ impl ReStdStream { Err(e) => { tracing::warn!("Failed to download action stderr: {:#}", e); format!( - "Result could not be downloaded - to view type `frecli cas download-blob {}`", - FileDigest::from_re(digest, digest_config).as_display(), + "Result could not be downloaded{}", + Self::download_blob_help(digest, digest_config), ) } } @@ -90,8 +101,8 @@ impl ReStdStream { Self::PrefetchedLossy { data, .. } => data.clone(), Self::Digest(digest) => { format!( - "Result too large to display - to view type `frecli cas download-blob {}`", - FileDigest::from_re(digest, digest_config).as_display(), + "Result too large to display{}", + Self::download_blob_help(digest, digest_config), ) } Self::None => String::new(), diff --git a/app/buck2_execute/src/execute/paths_with_digest.rs b/app/buck2_execute/src/execute/paths_with_digest.rs new file mode 100644 index 0000000000000..4d29bb80975b0 --- /dev/null +++ b/app/buck2_execute/src/execute/paths_with_digest.rs @@ -0,0 +1,64 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Display; + +use buck2_common::file_ops::FileDigest; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use serde::Serialize; +use serde::Serializer; + +use crate::execute::request::ActionMetadataBlobData; + +#[derive(Clone)] +pub struct PathsWithDigestBlobData(pub ActionMetadataBlobData); + +fn stringify(value: &T, serializer: S) -> Result +where + T: Display, + S: Serializer, +{ + serializer.collect_str(value) +} + +#[derive(Serialize)] +struct PathWithDigest<'a> { + path: ForwardRelativePathBuf, + #[serde(serialize_with = "stringify")] + digest: &'a FileDigest, +} + +#[derive(Serialize)] +struct MetadataJson<'a> { + version: i32, + digests: Vec>, +} + +#[derive(Default)] +pub struct PathsWithDigestBuilder<'a> { + paths: Vec>, +} + +impl<'a> PathsWithDigestBuilder<'a> { + pub fn add(&mut self, path: ForwardRelativePathBuf, digest: &'a FileDigest) { + self.paths.push(PathWithDigest { path, digest }); + } + + pub fn build(self) -> anyhow::Result { + let json = MetadataJson { + digests: self.paths, + // Increment this version if format changes + version: 1, + }; + let json_string = serde_json::to_string(&json)?; + Ok(PathsWithDigestBlobData(ActionMetadataBlobData::from_json( + json_string, + ))) + } +} diff --git a/app/buck2_execute/src/execute/prepared.rs b/app/buck2_execute/src/execute/prepared.rs index 61864764112dc..7c6ad606256ba 100644 --- a/app/buck2_execute/src/execute/prepared.rs +++ b/app/buck2_execute/src/execute/prepared.rs @@ -11,12 +11,14 @@ use std::ops::ControlFlow; use std::sync::Arc; use async_trait::async_trait; -use more_futures::cancellation::CancellationContext; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; +use buck2_futures::cancellation::CancellationContext; +use dupe::Dupe; use remote_execution as RE; use crate::digest_config::DigestConfig; use crate::execute::action_digest::ActionDigest; -use crate::execute::blobs::ActionBlobs; +use crate::execute::action_digest_and_blobs::ActionDigestAndBlobs; use crate::execute::manager::CommandExecutionManager; use crate::execute::request::CommandExecutionRequest; use crate::execute::request::ExecutorPreference; @@ -24,11 +26,15 @@ use crate::execute::result::CommandExecutionResult; use crate::execute::target::CommandExecutionTarget; pub struct PreparedAction { - pub action: ActionDigest, - // The encoded action and other messages referenced from it by digest (e.g. RE::Command). - // Does not include the files referenced in inputs. - pub blobs: ActionBlobs, + pub action_and_blobs: ActionDigestAndBlobs, pub platform: RE::Platform, + pub remote_execution_dependencies: Vec, +} + +impl PreparedAction { + pub fn digest(&self) -> ActionDigest { + self.action_and_blobs.action.dupe() + } } pub struct PreparedCommand<'a, 'b> { diff --git a/app/buck2_execute/src/execute/request.rs b/app/buck2_execute/src/execute/request.rs index 0b4a70b486aaa..291c276804954 100644 --- a/app/buck2_execute/src/execute/request.rs +++ b/app/buck2_execute/src/execute/request.rs @@ -7,16 +7,14 @@ * of this source tree. */ -use std::fmt::Display; +use std::sync::Arc; use std::time::Duration; use allocative::Allocative; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; use buck2_common::local_resource_state::LocalResourceState; -use buck2_core::directory::DirectoryEntry; -use buck2_core::directory::DirectoryIterator; -use buck2_core::directory::FingerprintedDirectory; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_core::fs::buck_out_path::BuckOutScratchPath; @@ -24,15 +22,19 @@ use buck2_core::fs::buck_out_path::BuckOutTestPath; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::soft_error; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::entry::DirectoryEntry; use derive_more::Display; use dupe::Dupe; use gazebo::variants::UnpackVariants; use host_sharing::host_sharing::HostSharingRequirements; use indexmap::IndexSet; use itertools::Itertools; +use prost::Message; +use remote_execution as RE; use sorted_vector_map::SortedVectorMap; use starlark_map::sorted_set::SortedSet; -use thiserror::Error; use super::dep_file_digest::DepFileDigest; use crate::artifact::group::artifact_group_values_dyn::ArtifactGroupValuesDyn; @@ -42,10 +44,34 @@ use crate::directory::ActionDirectoryMember; use crate::directory::ActionImmutableDirectory; use crate::execute::environment_inheritance::EnvironmentInheritance; use crate::execute::inputs_directory::inputs_directory; +use crate::execute::paths_with_digest::PathsWithDigestBlobData; + +/// What protobuf messages can be stored in the action metadata blobs. +pub trait ActionMetadataBlobMessage: Message {} + +impl ActionMetadataBlobMessage for RE::Action {} +impl ActionMetadataBlobMessage for RE::Command {} +impl ActionMetadataBlobMessage for RE::Tree {} + +#[derive(Clone)] +pub struct ActionMetadataBlobData(pub Vec); + +impl ActionMetadataBlobData { + pub fn from_message(m: &impl ActionMetadataBlobMessage) -> ActionMetadataBlobData { + let mut blob = Vec::new(); + // Unwrap is safe because it only fails in OOM conditions, which we pretend don't happen + m.encode(&mut blob).unwrap(); + ActionMetadataBlobData(blob) + } + + pub fn from_json(json: String) -> ActionMetadataBlobData { + ActionMetadataBlobData(json.into_bytes()) + } +} #[derive(Clone)] pub struct ActionMetadataBlob { - pub data: Vec, + pub data: PathsWithDigestBlobData, pub digest: TrackedFileDigest, pub path: BuckOutPath, } @@ -62,8 +88,9 @@ pub enum OutputCreationBehavior { Parent, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] #[error("Incompatible executor preferences: `{}` & `{}`", a, b)] +#[buck2(input)] struct IncompatibleExecutorPreferences { a: ExecutorPreference, b: ExecutorPreference, @@ -215,12 +242,9 @@ impl CommandExecutionPaths { let mut input_files_bytes = 0; - for entry in input_directory - .fingerprinted_unordered_walk() - .without_paths() - { + for entry in input_directory.unordered_walk_leaves().without_paths() { match entry { - DirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => { + ActionDirectoryMember::File(f) => { input_files_bytes += f.digest.size(); } _ => {} @@ -268,12 +292,12 @@ pub struct CommandExecutionRequest { paths: CommandExecutionPaths, env: SortedVectorMap, timeout: Option, - executor_preference: ExecutorPreference, - host_sharing_requirements: HostSharingRequirements, + pub executor_preference: ExecutorPreference, + host_sharing_requirements: Arc, // Used to disable the low pass filter for concurrent local actions. Enabled by default low_pass_filter: bool, /// Working directory, relative to the project root. - working_directory: Option, + working_directory: ProjectRelativePathBuf, /// Whether we should always prefetch stderr when executing. When it's needed, this lets us /// overlap stderr download with output downloads, which might be marginally useful to improve /// latency. @@ -296,6 +320,8 @@ pub struct CommandExecutionRequest { /// Remote dep file key, if the action has a dep file. /// If this key is set and remote dep file caching is enabled, it will be used to query the cache. pub remote_dep_file_key: Option, + /// RE dependencies to pass in action metadata. + remote_execution_dependencies: Vec, } impl CommandExecutionRequest { @@ -312,9 +338,9 @@ impl CommandExecutionRequest { env, timeout: None, executor_preference: ExecutorPreference::Default, - host_sharing_requirements: HostSharingRequirements::default(), + host_sharing_requirements: Arc::new(HostSharingRequirements::default()), low_pass_filter: true, - working_directory: None, + working_directory: ProjectRelativePathBuf::default(), prefetch_lossy_stderr: false, outputs_cleanup: true, local_environment_inheritance: None, @@ -324,6 +350,7 @@ impl CommandExecutionRequest { worker: None, unique_input_inodes: false, remote_dep_file_key: None, + remote_execution_dependencies: Vec::new(), } } @@ -343,7 +370,7 @@ impl CommandExecutionRequest { pub fn with_host_sharing_requirements( mut self, - host_sharing_requirements: HostSharingRequirements, + host_sharing_requirements: Arc, ) -> Self { self.host_sharing_requirements = host_sharing_requirements; self @@ -355,7 +382,7 @@ impl CommandExecutionRequest { } pub fn with_working_directory(mut self, working_directory: ProjectRelativePathBuf) -> Self { - self.working_directory = Some(working_directory); + self.working_directory = working_directory; self } @@ -443,8 +470,8 @@ impl CommandExecutionRequest { self.low_pass_filter } - pub fn working_directory(&self) -> Option<&ProjectRelativePath> { - self.working_directory.as_deref() + pub fn working_directory(&self) -> &ProjectRelativePath { + &self.working_directory } pub fn with_local_environment_inheritance( @@ -503,6 +530,18 @@ impl CommandExecutionRequest { pub fn unique_input_inodes(&self) -> bool { self.unique_input_inodes } + + pub fn with_remote_execution_dependencies( + mut self, + remote_execution_dependencies: Vec, + ) -> Self { + self.remote_execution_dependencies = remote_execution_dependencies; + self + } + + pub fn remote_execution_dependencies(&self) -> &Vec { + &self.remote_execution_dependencies + } } /// Is an output a file or a directory @@ -517,7 +556,7 @@ pub enum OutputType { Directory, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum OutputTypeError { #[error("Expected {1:?}, but `{0}` is already declared as {2:?}")] CheckPath(String, OutputType, OutputType), @@ -541,6 +580,7 @@ impl OutputType { OutputType::File ) .into(), + deprecation: true, quiet: true )?; Ok(()) diff --git a/app/buck2_execute/src/execute/result.rs b/app/buck2_execute/src/execute/result.rs index 0b929bc217739..c0255274c5b53 100644 --- a/app/buck2_execute/src/execute/result.rs +++ b/app/buck2_execute/src/execute/result.rs @@ -14,19 +14,29 @@ use std::ops::FromResidual; use std::time::Duration; use std::time::SystemTime; +use allocative::Allocative; use buck2_action_metadata_proto::RemoteDepFile; use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use derivative::Derivative; use dupe::Dupe; use indexmap::IndexMap; +use remote_execution::TActionResult2; use crate::artifact_value::ArtifactValue; use crate::execute::claim::Claim; +use crate::execute::dep_file_digest::DepFileDigest; use crate::execute::kind::CommandExecutionKind; use crate::execute::output::CommandStdStreams; use crate::execute::request::CommandExecutionOutput; use crate::execute::request::ResolvedCommandExecutionOutput; use crate::output_size::OutputSize; +#[derive(Debug)] +pub enum CommandExecutionErrorType { + StorageResourceExhausted, + Other, +} + /// "Status" of an action execution indicating how it finished. E.g. "built_remotely", "local_fallback", "action_cache". #[derive(Debug)] pub enum CommandExecutionStatus { @@ -38,7 +48,9 @@ pub enum CommandExecutionStatus { }, Error { stage: &'static str, - error: anyhow::Error, + error: buck2_error::Error, + execution_kind: Option, + typ: CommandExecutionErrorType, }, TimedOut { execution_kind: CommandExecutionKind, @@ -53,7 +65,7 @@ impl CommandExecutionStatus { match self { CommandExecutionStatus::Success { execution_kind, .. } => Some(execution_kind), CommandExecutionStatus::Failure { execution_kind } => Some(execution_kind), - CommandExecutionStatus::Error { .. } => None, + CommandExecutionStatus::Error { execution_kind, .. } => execution_kind.as_ref(), CommandExecutionStatus::TimedOut { execution_kind, .. } => Some(execution_kind), CommandExecutionStatus::Cancelled => None, } @@ -69,7 +81,20 @@ impl Display for CommandExecutionStatus { CommandExecutionStatus::Failure { execution_kind } => { write!(f, "failure {}", execution_kind,) } - CommandExecutionStatus::Error { stage, error } => { + CommandExecutionStatus::Error { + stage, + error, + execution_kind: Some(execution_kind), + .. + } => { + write!(f, "error {}:{}\n{:#}", execution_kind, stage, error) + } + CommandExecutionStatus::Error { + stage, + error, + execution_kind: None, + .. + } => { write!(f, "error:{}\n{:#}", stage, error) } CommandExecutionStatus::TimedOut { duration, .. } => { @@ -82,7 +107,7 @@ impl Display for CommandExecutionStatus { /// Unlike action where we only really have just 1 time, commands can have slightly richer timing /// data. -#[derive(Debug, Copy, Clone, Dupe)] +#[derive(Debug, Copy, Clone, Dupe, Allocative)] pub struct CommandExecutionMetadata { /// How long this build actually waited for this action to complete pub wall_time: Duration, @@ -103,9 +128,19 @@ pub struct CommandExecutionMetadata { /// How long we spent hashing the action's inputs. pub hashing_duration: Duration, + + /// How many artifacts we hashed + pub hashed_artifacts_count: u64, + + /// How long this command spent waiting to run + pub queue_duration: Option, } impl CommandExecutionMetadata { + pub fn end_time(&self) -> SystemTime { + self.start_time + self.wall_time + } + pub fn to_proto(&self) -> buck2_data::CommandExecutionMetadata { let metadata = self.dupe(); buck2_data::CommandExecutionMetadata { @@ -115,6 +150,8 @@ impl CommandExecutionMetadata { input_materialization_duration: metadata.input_materialization_duration.try_into().ok(), execution_stats: metadata.execution_stats, hashing_duration: metadata.hashing_duration.try_into().ok(), + hashed_artifacts_count: metadata.hashed_artifacts_count.try_into().ok().unwrap_or(0), + queue_duration: metadata.queue_duration.and_then(|d| d.try_into().ok()), } } } @@ -128,12 +165,15 @@ impl Default for CommandExecutionMetadata { execution_stats: None, input_materialization_duration: Duration::default(), hashing_duration: Duration::default(), + hashed_artifacts_count: 0, + queue_duration: None, } } } /// CommandExecutionResult is the result of an executor executing a command. -#[derive(Debug)] +#[derive(Derivative)] +#[derivative(Debug)] pub struct CommandExecutionResult { /// The outputs produced by this command pub outputs: IndexMap, @@ -146,13 +186,17 @@ pub struct CommandExecutionResult { /// Whether dep file information for this action was uploaded to cache, by Buck2. pub did_dep_file_cache_upload: bool, // Remote dep file key, if we did upload a dep file entry - pub dep_file_key: Option, + pub dep_file_key: Option, /// Whether this command was eligible for hybrid execution. pub eligible_for_full_hybrid: bool, /// Execution metadata used for remote dep file lookups. /// This is picked up from the action result's auxiliary metadata and /// is used to verify the dep file cache lookup result pub dep_file_metadata: Option, + /// If the action executed on RE, the original action result + /// to be re-used when uploading the remote dep file. + #[derivative(Debug = "ignore")] + pub action_result: Option, } impl CommandExecutionResult { @@ -180,6 +224,15 @@ impl CommandExecutionResult { } } + pub fn was_remotely_executed(&self) -> bool { + match self.report.status { + CommandExecutionStatus::Success { + execution_kind: CommandExecutionKind::Remote { .. }, + } => true, + _ => false, + } + } + pub fn was_locally_executed(&self) -> bool { match self.report.status { CommandExecutionStatus::Success { @@ -189,6 +242,15 @@ impl CommandExecutionResult { } } + pub fn was_action_cache_hit(&self) -> bool { + match self.report.status { + CommandExecutionStatus::Success { + execution_kind: CommandExecutionKind::ActionCache { .. }, + } => true, + _ => false, + } + } + pub fn resolve_outputs<'a>( &'a self, fs: &'a ArtifactFs, @@ -236,7 +298,7 @@ impl CommandExecutionReport { } .into() } - CommandExecutionStatus::Error { stage, error } => { + CommandExecutionStatus::Error { stage, error, .. } => { buck2_data::command_execution::Error { stage: (*stage).to_owned(), error: format!("{:#}", error), @@ -330,9 +392,21 @@ mod tests { execution_stats: Some(buck2_data::CommandExecutionStats { cpu_instructions_user: Some(4), cpu_instructions_kernel: Some(5), + userspace_events: Some(buck2_data::CpuCounter { + count: 4, + time_enabled: 100, + time_running: 100, + }), + kernel_events: Some(buck2_data::CpuCounter { + count: 10, + time_enabled: 50, + time_running: 100, + }), }), input_materialization_duration: Duration::from_secs(6), hashing_duration: Duration::from_secs(7), + hashed_artifacts_count: 8, + queue_duration: Some(Duration::from_secs(9)), }; let std_streams = CommandStdStreams::Local { stdout: [65, 66, 67].to_vec(), // ABC @@ -368,6 +442,16 @@ mod tests { let command_execution_stats = buck2_data::CommandExecutionStats { cpu_instructions_user: Some(4), cpu_instructions_kernel: Some(5), + userspace_events: Some(buck2_data::CpuCounter { + count: 4, + time_enabled: 100, + time_running: 100, + }), + kernel_events: Some(buck2_data::CpuCounter { + count: 10, + time_enabled: 50, + time_running: 100, + }), }; let command_execution_metadata = buck2_data::CommandExecutionMetadata { wall_time: Some(Duration { @@ -391,6 +475,11 @@ mod tests { seconds: 7, nanos: 0, }), + hashed_artifacts_count: 8, + queue_duration: Some(Duration { + seconds: 9, + nanos: 0, + }), }; let command_execution_details = buck2_data::CommandExecutionDetails { signed_exit_code: Some(456), diff --git a/app/buck2_execute/src/execute/testing_dry_run.rs b/app/buck2_execute/src/execute/testing_dry_run.rs index 1a93a40364665..22533b78226cf 100644 --- a/app/buck2_execute/src/execute/testing_dry_run.rs +++ b/app/buck2_execute/src/execute/testing_dry_run.rs @@ -12,8 +12,8 @@ use std::sync::Mutex; use async_trait::async_trait; use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_futures::cancellation::CancellationContext; use indexmap::IndexMap; -use more_futures::cancellation::CancellationContext; use sorted_vector_map::SortedVectorMap; use crate::artifact_value::ArtifactValue; diff --git a/app/buck2_execute/src/lib.rs b/app/buck2_execute/src/lib.rs index 05a24aa0f4a7b..aebb7770d0381 100644 --- a/app/buck2_execute/src/lib.rs +++ b/app/buck2_execute/src/lib.rs @@ -7,10 +7,13 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(never_type)] #![feature(trait_alias)] #![feature(try_blocks)] #![feature(try_trait_v2)] +#![feature(used_with_arg)] +#![feature(trait_upcasting)] pub mod artifact; pub mod artifact_utils; diff --git a/app/buck2_execute/src/materialize.rs b/app/buck2_execute/src/materialize.rs new file mode 100644 index 0000000000000..3a57afa5cb814 --- /dev/null +++ b/app/buck2_execute/src/materialize.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod http; + +pub mod materializer; +pub mod nodisk; diff --git a/app/buck2_execute/src/materialize/eden_api.rs b/app/buck2_execute/src/materialize/eden_api.rs deleted file mode 100644 index fe2e18adc9138..0000000000000 --- a/app/buck2_execute/src/materialize/eden_api.rs +++ /dev/null @@ -1,463 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::BTreeMap; -use std::fs; -use std::path::Path; -use std::sync::Arc; - -use allocative::Allocative; -use anyhow::Context; -use buck2_core::directory::DirectoryEntry; -use buck2_core::env_helper::EnvHelper; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_eden::connection::EdenConnectionManager; -use buck2_util::process::background_command; -use edenfs::client::EdenService; -use edenfs::CheckoutMode; -use edenfs::EnsureMaterializedParams; -use edenfs::ObjectType; -use edenfs::RemoveRecursivelyParams; -use edenfs::SetPathObjectIdParams; -use fbinit::FacebookInit; -use more_futures::cancellation::CancellationContext; -use serde::Deserialize; -use thiserror::Error; -use tokio::sync::Semaphore; - -use crate::artifact_value::ArtifactValue; -use crate::directory::ActionDirectoryMember; -use crate::re::re_get_session_id::ReGetSessionId; - -pub type EdenFsClient = Arc; - -const RE_SYMLINK_PREFIX: &str = "re-symlink"; - -#[derive(Error, Debug)] -pub enum SetupBuckOutError { - #[error("Could not read Eden Config")] - EdenConfigIOError(#[source] std::io::Error), - - #[error("Error during parsing the Eden Config")] - InvalidEdenConfig(#[source] toml::de::Error), - - #[error("Could not clean buck-out")] - CleanBuckOutFailed(#[source] std::io::Error), - - #[error("Could not create Eden buck-out")] - CreateEdenBuckoutOutputFailed(#[source] std::io::Error), - - #[error("Failed to run Eden clone to create buck-out: {0}")] - FailedToRunEdenClone(String), - - #[error("Could not add {0} to the redirection list of Eden buck-out: {1}")] - EdenRedirectAddFailed(String, #[source] std::io::Error), - - #[error("Failed to run Eden redirection add path {0} to bind a dir to buck-out: {1}")] - FailedToRunEdenRedirectionAdd(String, String), -} - -#[derive(Deserialize, Debug)] -struct Repository { - #[serde(rename = "type")] - repo_type: String, -} - -#[derive(Deserialize, Debug)] -struct EdenConfig { - repository: Repository, - redirections: Option>, -} - -/// Eden has a magic .eden dir for all directories and its config is in .eden/client/config.toml. -/// Read the TOML file and make sure it is recas type mount. -fn read_eden_config(config_path: &Path) -> Result { - let config_contents = - fs::read_to_string(config_path).map_err(SetupBuckOutError::EdenConfigIOError)?; - let config: EdenConfig = - toml::from_str(&config_contents).map_err(SetupBuckOutError::InvalidEdenConfig)?; - Ok(config) -} - -pub fn is_recas_eden_mount(buck_out: &AbsNormPathBuf) -> Result { - let config_path = buck_out.as_path().join(".eden/client/config.toml"); - if fs::metadata(&config_path).is_err() { - return Ok(false); - } - let config = read_eden_config(&config_path)?; - Ok(config.repository.repo_type.as_str() == "recas") -} - -fn execute_eden_clone(buck_out: &AbsNormPathBuf) -> Result<(), SetupBuckOutError> { - // If the buck-out directory exists - if fs::metadata(buck_out).is_ok() { - // If it is already an eden-based buck-out, then it's good - if is_recas_eden_mount(buck_out)? { - return Ok(()); - } - // If it is regular FS, remove it first - fs::remove_dir_all(buck_out).map_err(SetupBuckOutError::CleanBuckOutFailed)?; - } - - let eden_clone = background_command("eden") - .arg("clone") - .arg("") - .arg(buck_out.as_os_str()) - .arg("--backing-store=recas") - .arg("--allow-nested-checkout") - .current_dir("/") - .output() - .map_err(SetupBuckOutError::CreateEdenBuckoutOutputFailed)?; - - if !eden_clone.status.success() { - return Err(SetupBuckOutError::FailedToRunEdenClone( - String::from_utf8_lossy(&eden_clone.stderr).to_string(), - )); - } - Ok(()) -} - -fn is_path_redirect(buck_out: &AbsNormPathBuf, path: &str) -> Result { - let config_path = buck_out.as_path().join(".eden/client/config.toml"); - if fs::metadata(&config_path).is_err() { - return Ok(false); - } - let config = read_eden_config(&config_path)?; - - if let Some(redir_config) = config.redirections { - if let Some(redir_type) = redir_config.get(path) { - return Ok(redir_type.as_str() == "bind"); - } - } - - Ok(false) -} - -fn execute_eden_redirection_add( - buck_out: &AbsNormPathBuf, - path: &str, -) -> Result<(), SetupBuckOutError> { - if is_path_redirect(buck_out, path)? { - return Ok(()); - } - let eden_direct = background_command("eden") - .arg("redirect") - .arg("add") - .arg(path) - .arg("bind") - .current_dir(buck_out.as_os_str()) - .output() - .map_err(|e| SetupBuckOutError::EdenRedirectAddFailed(String::from(path), e))?; - - if !eden_direct.status.success() { - return Err(SetupBuckOutError::FailedToRunEdenRedirectionAdd( - String::from(path), - String::from_utf8_lossy(&eden_direct.stderr).to_string(), - )); - } - Ok(()) -} - -fn get_symlink_object_id(target: &str) -> String { - format!("{}:{}", RE_SYMLINK_PREFIX, target) -} - -fn get_object_id_and_type(value: &ArtifactValue) -> (String, ObjectType) { - match value.entry() { - DirectoryEntry::Dir(dir) => (dir.fingerprint().to_string(), ObjectType::TREE), - DirectoryEntry::Leaf(ActionDirectoryMember::File(file)) => { - let object_type = match file.is_executable { - true => ObjectType::EXECUTABLE_FILE, - false => ObjectType::REGULAR_FILE, - }; - (file.digest.to_string(), object_type) - } - DirectoryEntry::Leaf(ActionDirectoryMember::Symlink(symlink)) => ( - get_symlink_object_id(symlink.target().as_str()), - ObjectType::SYMLINK, - ), - DirectoryEntry::Leaf(ActionDirectoryMember::ExternalSymlink(external_sym)) => ( - get_symlink_object_id(external_sym.to_path_buf().to_str().unwrap()), - ObjectType::SYMLINK, - ), - } -} - -fn setup(buck_out: &AbsNormPathBuf) -> Result<(), SetupBuckOutError> { - // Run "eden clone" command to create a brand new buck-out - execute_eden_clone(buck_out)?; - - // Run eden redirection to exclude write heavy dirs from using EdenFS - ["log", "tmp", "re_logs", "cache"] - .into_iter() - .try_for_each(|dir| -> Result<(), SetupBuckOutError> { - execute_eden_redirection_add(buck_out, dir)?; - Ok(()) - })?; - - Ok(()) -} - -#[derive(Allocative)] -pub struct EdenBuckOut { - /// Relative path of buck_out, i.e "buck-out/v2" - buck_out_path: ProjectRelativePathBuf, - connection_manager: EdenConnectionManager, - re_get_session_id: Arc, -} - -impl EdenBuckOut { - pub fn new( - fb: FacebookInit, - buck_out_path: ProjectRelativePathBuf, - mount_point: AbsNormPathBuf, - re_get_session_id: Arc, - ) -> anyhow::Result { - setup(&mount_point)?; - - // Default to the number of CPUs. This value is very conservative - // TODO (yipu): Benchmark and figure out optimal default concurrency - static CONCURRENCY: EnvHelper = EnvHelper::new("BUCK2_EDEN_CONCURRENCY"); - let concurrency = CONCURRENCY.get_copied()?.unwrap_or_else(num_cpus::get); - let connection_manager = - EdenConnectionManager::new(fb, &mount_point, Semaphore::new(concurrency))? - .expect("EdenFS mount does not setup correctly"); - - Ok(Self { - buck_out_path, - connection_manager, - re_get_session_id, - }) - } - - // TODO(yipu): This needs to take a RE use case as input and pass it to Eden. - pub async fn set_path_object_id( - &self, - path: &ProjectRelativePathBuf, - value: &ArtifactValue, - ) -> anyhow::Result<()> { - // Eden's SetPathObjectId requires object_id and object_type, the object_id is a string of - // the RE digest for a file or symlink target for a symlink or external symlink - let (object_id, object_type) = get_object_id_and_type(value); - let relpath_to_buck_out = path.strip_prefix(&self.buck_out_path).with_context(|| { - format!( - "Invalid artifact: path might not in the buck-out directory: {}", - path - ) - })?; - - let re_session_id = self.re_get_session_id.get_session_id().await?; - - let params = SetPathObjectIdParams { - mountPoint: self.connection_manager.get_mount_point(), - path: relpath_to_buck_out.as_str().as_bytes().to_vec(), - objectId: object_id.into_bytes(), - r#type: object_type, - mode: CheckoutMode::FORCE, - requestInfo: Some(BTreeMap::from([( - String::from("session-id"), - re_session_id, - )])), - ..Default::default() - }; - self.connection_manager - .with_eden(move |eden| eden.setPathObjectId(¶ms)) - .await?; - Ok(()) - } - - async fn remove_path_recursive( - &self, - project_fs: &ProjectRoot, - path: &ProjectRelativePathBuf, - cancellations: &CancellationContext<'_>, - ) -> anyhow::Result<()> { - // Existence check would not trigger materialization since EdenFS will fast - // return by not materialization the path. So the only cost is Eden will load - // path's ancestors, which should be relatively inexpensive. - if !project_fs.resolve(path).exists() { - return Ok(()); - } - - let relpath_to_buck_out = path.strip_prefix(&self.buck_out_path).with_context(|| { - format!( - "Invalid artifact: path might not in the buck-out directory: {}", - path - ) - })?; - - cancellations - .critical_section(|| async move { - let params = RemoveRecursivelyParams { - mountPoint: self.connection_manager.get_mount_point(), - path: relpath_to_buck_out.as_str().as_bytes().to_vec(), - ..Default::default() - }; - - self.connection_manager - .with_eden(move |eden| eden.removeRecursively(¶ms)) - .await?; - Ok(()) - }) - .await - } - - pub async fn remove_paths_recursive( - &self, - project_fs: &ProjectRoot, - paths: Vec, - cancellations: &CancellationContext<'_>, - ) -> anyhow::Result<()> { - // TODO(bobyf, torozco) does this need to be critical section - - let futs = paths.iter().map(|path| async move { - self.remove_path_recursive(project_fs, path, cancellations) - .await - .with_context(|| format!("[eden] Error cleaning up path {}", path)) - }); - - futures::future::try_join_all(futs).await?; - - Ok(()) - } - - pub async fn ensure_materialized( - &self, - paths: Vec, - ) -> anyhow::Result<()> { - let file_paths = paths - .iter() - .filter_map(|path| path.strip_prefix(&self.buck_out_path).ok()) - .map(|relpath| relpath.as_str().as_bytes().to_vec()) - .collect::>(); - static ENSURE_MATERIALIZED_IN_BACKGROUND: EnvHelper = - EnvHelper::new("BUCK2_EDEN_ENSURE_MATERIALIZED_IN_BACKGROUND"); - let background = ENSURE_MATERIALIZED_IN_BACKGROUND - .get_copied()? - .unwrap_or(true); - let params = EnsureMaterializedParams { - mountPoint: self.connection_manager.get_mount_point(), - paths: file_paths, - background, - followSymlink: true, // Also materialize symlink targets - ..Default::default() - }; - - self.connection_manager - .with_eden(move |eden| eden.ensureMaterialized(¶ms)) - .await?; - Ok(()) - } -} - -#[cfg(all(test, not(windows)))] -mod tests { - use std::path::PathBuf; - use std::sync::Arc; - - use buck2_common::external_symlink::ExternalSymlink; - use buck2_common::file_ops::FileMetadata; - use buck2_common::file_ops::TrackedFileDigest; - use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; - use buck2_core::fs::paths::RelativePathBuf; - use dupe::Dupe; - - use super::*; - use crate::artifact_value::ArtifactValue; - use crate::digest_config::DigestConfig; - use crate::directory::Symlink; - - #[test] - fn test_symlink_object_id() -> anyhow::Result<()> { - let symlink = Symlink::new(RelativePathBuf::from("path/include")); - assert_eq!( - "re-symlink:path/include", - get_symlink_object_id(symlink.target().as_str()) - ); - Ok(()) - } - - #[test] - fn test_external_symlink_object_id() -> anyhow::Result<()> { - let symlink = ExternalSymlink::new( - PathBuf::from("/mnt/gvfs"), - Some(ForwardRelativePathBuf::unchecked_new("include".to_owned())), - ) - .unwrap(); - assert_eq!( - "re-symlink:/mnt/gvfs/include", - get_symlink_object_id(&Arc::new(symlink).to_path_buf().to_string_lossy()) - ); - Ok(()) - } - - #[test] - fn test_get_object_id_and_type_blob() -> anyhow::Result<()> { - let digest_config = DigestConfig::testing_default(); - - let digest = TrackedFileDigest::from_content(b"content", digest_config.cas_digest_config()); - let metadata_executable = FileMetadata { - digest: digest.dupe(), - is_executable: true, - }; - - let value = ArtifactValue::from(DirectoryEntry::Leaf(ActionDirectoryMember::File( - metadata_executable, - ))); - let (object_id, object_type) = get_object_id_and_type(&value); - assert_eq!(digest.to_string(), object_id); - assert_eq!(ObjectType::EXECUTABLE_FILE, object_type); - - let metadata_regular = FileMetadata { - digest: digest.dupe(), - is_executable: false, - }; - - let value = ArtifactValue::from(DirectoryEntry::Leaf(ActionDirectoryMember::File( - metadata_regular, - ))); - let (object_id, object_type) = get_object_id_and_type(&value); - assert_eq!(digest.to_string(), object_id); - assert_eq!(ObjectType::REGULAR_FILE, object_type); - Ok(()) - } - - #[test] - fn test_get_object_id_and_type_symlink() -> anyhow::Result<()> { - let symlink = Symlink::new(RelativePathBuf::from("path/include")); - let value = ArtifactValue::from(DirectoryEntry::Leaf(ActionDirectoryMember::Symlink( - Arc::new(symlink), - ))); - let (object_id, object_type) = get_object_id_and_type(&value); - assert_eq!("re-symlink:path/include", object_id); - assert_eq!(ObjectType::SYMLINK, object_type); - Ok(()) - } - - #[test] - fn test_get_object_id_and_type_external_symlink() -> anyhow::Result<()> { - let symlink = Arc::new( - ExternalSymlink::new( - PathBuf::from("/mnt/gvfs"), - Some(ForwardRelativePathBuf::unchecked_new("include".to_owned())), - ) - .unwrap(), - ); - - let value = ArtifactValue::from(DirectoryEntry::Leaf( - ActionDirectoryMember::ExternalSymlink(symlink), - )); - let (object_id, object_type) = get_object_id_and_type(&value); - assert_eq!("re-symlink:/mnt/gvfs/include", object_id); - assert_eq!(ObjectType::SYMLINK, object_type); - Ok(()) - } -} diff --git a/app/buck2_execute/src/materialize/http.rs b/app/buck2_execute/src/materialize/http.rs index 006d4e09768e3..51298e375f91d 100644 --- a/app/buck2_execute/src/materialize/http.rs +++ b/app/buck2_execute/src/materialize/http.rs @@ -14,16 +14,18 @@ use std::time::Duration; use allocative::Allocative; use anyhow::Context as _; use buck2_common::cas_digest::CasDigestConfig; -use buck2_common::cas_digest::DigestAlgorithmKind; +use buck2_common::cas_digest::DigestAlgorithmFamily; +use buck2_common::cas_digest::SHA1_SIZE; +use buck2_common::cas_digest::SHA256_SIZE; use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::TrackedFileDigest; -use buck2_common::http::retries::http_retry; -use buck2_common::http::retries::AsHttpError; -use buck2_common::http::retries::HttpError; -use buck2_common::http::HttpClient; use buck2_core::fs::fs_util; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_http::retries::http_retry; +use buck2_http::retries::AsHttpError; +use buck2_http::retries::HttpError; +use buck2_http::HttpClient; use bytes::Bytes; use digest::DynDigest; use dupe::Dupe; @@ -34,7 +36,6 @@ use sha1::Digest; use sha1::Sha1; use sha2::Sha256; use smallvec::SmallVec; -use thiserror::Error; use crate::digest_config::DigestConfig; @@ -45,7 +46,74 @@ pub enum Checksum { Both { sha1: Arc, sha256: Arc }, } +#[derive(buck2_error::Error, Debug)] +enum DownloadFileError { + #[error("Must pass in at least one checksum (e.g. `sha1 = ...`)")] + MissingChecksum, + #[error("Invalid digest for `{digest_type}` argument, expected length of {expected_len} but got {}, digest `{digest}`", digest.len())] + InvalidDigestLength { + digest: String, + expected_len: usize, + digest_type: &'static str, + }, + #[error( + "Invalid digest for `{digest_type}` argument, expected 0-9 a-z hex characters, but got `{bad_char}`, digest `{digest}`" + )] + InvalidDigestCharacter { + digest: String, + bad_char: char, + digest_type: &'static str, + }, +} + impl Checksum { + pub fn new(sha1: Option<&str>, sha256: Option<&str>) -> anyhow::Result { + fn is_hex_digit(x: char) -> bool { + let x = x.to_ascii_lowercase(); + x.is_ascii_digit() || ('a'..='f').contains(&x) + } + + fn validate_digest( + digest: Option<&str>, + digest_len: usize, + digest_type: &'static str, + ) -> anyhow::Result>> { + match digest { + None => Ok(None), + Some(digest) => { + let expected_len = digest_len * 2; + if digest.len() != expected_len { + return Err(DownloadFileError::InvalidDigestLength { + digest: digest.to_owned(), + expected_len, + digest_type, + } + .into()); + } + if let Some(bad_char) = digest.chars().find(|x| !is_hex_digit(*x)) { + return Err(DownloadFileError::InvalidDigestCharacter { + digest: digest.to_owned(), + bad_char, + digest_type, + } + .into()); + } + Ok(Some(Arc::from(digest.to_ascii_lowercase()))) + } + } + } + + match ( + validate_digest(sha1, SHA1_SIZE, "sha1")?, + validate_digest(sha256, SHA256_SIZE, "sha256")?, + ) { + (Some(sha1), None) => Ok(Checksum::Sha1(sha1)), + (None, Some(sha256)) => Ok(Checksum::Sha256(sha256)), + (Some(sha1), Some(sha256)) => Ok(Checksum::Both { sha1, sha256 }), + (None, None) => Err(DownloadFileError::MissingChecksum.into()), + } + } + pub fn sha1(&self) -> Option<&str> { match self { Self::Sha1(sha1) => Some(sha1), @@ -63,18 +131,25 @@ impl Checksum { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum HttpHeadError { #[error("Error performing http_head request")] - Client(#[from] HttpError), + Client(#[source] HttpError), +} + +impl From for HttpHeadError { + fn from(e: HttpError) -> Self { + Self::Client(e) + } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum HttpDownloadError { #[error("Error performing http_download request")] - Client(#[from] HttpError), + Client(#[source] HttpError), #[error("Invalid {0} digest. Expected {1}, got {2}. URL: {3}")] + #[buck2(input)] InvalidChecksum(&'static str, String, String, String), #[error( @@ -92,6 +167,12 @@ enum HttpDownloadError { IoError(anyhow::Error), } +impl From for HttpDownloadError { + fn from(e: HttpError) -> Self { + Self::Client(e) + } +} + impl AsHttpError for HttpHeadError { fn as_http_error(&self) -> Option<&HttpError> { match self { @@ -136,12 +217,13 @@ pub async fn http_download( ) -> anyhow::Result { let abs_path = fs.resolve(path); if let Some(dir) = abs_path.parent() { - fs_util::create_dir_all(fs.resolve(dir))?; + fs_util::create_dir_all(dir)?; } Ok(http_retry( || async { - let file = fs_util::create_file(&abs_path).map_err(HttpDownloadError::IoError)?; + let file = fs_util::create_file(&abs_path) + .map_err(|e| HttpDownloadError::IoError(anyhow::Error::from(e)))?; let stream = client .get(url) @@ -199,7 +281,7 @@ async fn copy_and_hash( let mut validators = SmallVec::<[_; 2]>::new(); if let Some(sha1) = checksum.sha1() { - let validator = if digester.algorithm() == DigestAlgorithmKind::Sha1 { + let validator = if digester.algorithm() == DigestAlgorithmFamily::Sha1 { Validator::PrimaryDigest } else { Validator::ExtraDigest(Box::new(Sha1::new()) as _) @@ -209,7 +291,7 @@ async fn copy_and_hash( } if let Some(sha256) = checksum.sha256() { - let validator = if digester.algorithm() == DigestAlgorithmKind::Sha256 { + let validator = if digester.algorithm() == DigestAlgorithmFamily::Sha256 { Validator::PrimaryDigest } else { Validator::ExtraDigest(Box::new(Sha256::new()) as _) @@ -273,7 +355,7 @@ async fn copy_and_hash( } #[cfg(test)] -mod test { +mod tests { use assert_matches::assert_matches; use buck2_common::cas_digest::testing; use futures::stream; @@ -326,12 +408,12 @@ mod test { #[tokio::test] async fn test_copy_and_hash_invalid_primary_hash() -> anyhow::Result<()> { assert_matches!( - do_test(testing::sha1(), &Checksum::Sha1(Arc::from("oops")),).await, + do_test(testing::sha1(), &Checksum::Sha1(Arc::from("oops"))).await, Err(HttpDownloadError::InvalidChecksum(..)) ); assert_matches!( - do_test(testing::sha256(), &Checksum::Sha256(Arc::from("oops")),).await, + do_test(testing::sha256(), &Checksum::Sha256(Arc::from("oops"))).await, Err(HttpDownloadError::InvalidChecksum(..)) ); @@ -341,12 +423,12 @@ mod test { #[tokio::test] async fn test_copy_and_hash_invalid_secondary_hash() -> anyhow::Result<()> { assert_matches!( - do_test(testing::blake3(), &Checksum::Sha1(Arc::from("oops")),).await, + do_test(testing::blake3(), &Checksum::Sha1(Arc::from("oops"))).await, Err(HttpDownloadError::InvalidChecksum(..)) ); assert_matches!( - do_test(testing::blake3(), &Checksum::Sha256(Arc::from("oops")),).await, + do_test(testing::blake3(), &Checksum::Sha256(Arc::from("oops"))).await, Err(HttpDownloadError::InvalidChecksum(..)) ); diff --git a/app/buck2_execute/src/materialize/materializer.rs b/app/buck2_execute/src/materialize/materializer.rs index 37548225dbbc4..0e22669ed7a67 100644 --- a/app/buck2_execute/src/materialize/materializer.rs +++ b/app/buck2_execute/src/materialize/materializer.rs @@ -14,10 +14,13 @@ use allocative::Allocative; use async_trait::async_trait; use buck2_common::file_ops::FileMetadata; use buck2_core::base_deferred_key::BaseDeferredKey; -use buck2_core::directory::DirectoryEntry; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::walk::ordered_entry_walk; use buck2_events::dispatch::EventDispatcher; +use buck2_futures::cancellation::CancellationContext; use chrono::DateTime; use chrono::Duration; use chrono::Utc; @@ -26,8 +29,6 @@ use dice::UserComputationData; use dupe::Dupe; use futures::stream::BoxStream; use futures::stream::TryStreamExt; -use more_futures::cancellation::CancellationContext; -use thiserror::Error; use crate::artifact_value::ArtifactValue; use crate::directory::ActionDirectoryEntry; @@ -35,43 +36,73 @@ use crate::directory::ActionDirectoryMember; use crate::directory::ActionImmutableDirectory; use crate::directory::ActionSharedDirectory; use crate::execute::action_digest::TrackedActionDigest; -#[cfg(any(fbcode_build, cargo_internal_build))] -use crate::materialize::eden_api::EdenBuckOut; use crate::materialize::http::Checksum; -// Add a stub EdenBuckOut for when we don't have Eden output enabled -#[cfg(not(any(fbcode_build, cargo_internal_build)))] -pub struct EdenBuckOut { - not_implemented: !, +pub struct WriteRequest { + pub path: ProjectRelativePathBuf, + pub content: Vec, + pub is_executable: bool, } -#[cfg(not(any(fbcode_build, cargo_internal_build)))] -impl EdenBuckOut { - pub async fn remove_paths_recursive( - &self, - _project_fs: &buck2_core::fs::project::ProjectRoot, - _paths: Vec, - _cancellations: &CancellationContext<'_>, - ) -> anyhow::Result<()> { - self.not_implemented +#[cold] +fn format_directory_entry_leaves( + directory: &ActionDirectoryEntry, +) -> String { + let walk = ordered_entry_walk(directory.as_ref()); + let only_files = walk + .filter_map(|entry| match entry { + DirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => Some(&f.digest), + _ => { + // We only download files from RE, not symlinks or directories. + // https://fburl.com/code/3o8ht6b6. + None + } + }) + .with_paths(); + const MAX_COUNT: usize = 10; + const TABULATION: &str = " "; + let mut count = 0; + let mut result = String::new(); + for (path, digest) in only_files { + count += 1; + if count > MAX_COUNT { + continue; + } + result.push_str(&format!("{}{}: {}\n", TABULATION, path, digest)); } - - pub async fn set_path_object_id( - &self, - __path: &ProjectRelativePathBuf, - __value: &ArtifactValue, - ) -> anyhow::Result<()> { - self.not_implemented + if count > MAX_COUNT { + result.push_str(&format!( + "{}... and {} more omitted", + TABULATION, + count - MAX_COUNT + )); } + result } -pub struct WriteRequest { - pub path: ProjectRelativePathBuf, - pub content: Vec, - pub is_executable: bool, +#[derive(buck2_error::Error, Debug, Clone, Dupe)] +#[error( + "Your build requires materializing an artifact that has expired in the \ + RE CAS and Buck does not have it. \ + This likely happened because your Buck daemon \ + has been online for a long time. This error is currently unrecoverable. \ + To proceed, you should restart Buck using `buck2 killall`. + +Debug information: + Path: {} + Digest origin: {} + Directory:\n{}", .path, .info.origin.as_display_for_not_found(), format_directory_entry_leaves(.directory))] +#[buck2(tag = MaterializationError)] +pub struct CasNotFoundError { + pub path: Arc, + pub info: Arc, + pub directory: ActionDirectoryEntry, + #[source] + pub error: Arc, } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(tag = MaterializationError)] pub enum MaterializationError { #[error("Error materializing artifact at path `{}`", .path)] Error { @@ -82,21 +113,8 @@ pub enum MaterializationError { }, /// The artifact wasn't found. This typically means it expired in the CAS. - #[error( - "Your build requires materializing an artifact that has expired in the \ - RE CAS and Buck does not have it (path: {}, digest origin: {}). \ - This likely happened because your Buck daemon \ - has been online for a long time. This error is currently unrecoverable. \ - To proceed, you should restart Buck using `buck2 killall` (debug info: {})", - .path, - .info.origin.as_display_for_not_found(), - .debug - )] - NotFound { - path: ProjectRelativePathBuf, - info: Arc, - debug: Arc, - }, + #[error(transparent)] + NotFound { source: CasNotFoundError }, #[error("Error inserting entry into materializer state sqlite for artifact at `{}`", .path)] SqliteDbError { @@ -180,6 +198,18 @@ pub trait Materializer: Allocative + Send + Sync + 'static { artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, ) -> anyhow::Result; + /// Ask the materializer if there is a "tracked" artifact at the given path. + /// + /// While this method provides no information about what that artifact actually is, it can be + /// used to verify that the artifact has not been eg corrupted by a partial clean-stale since + /// being materialized. + /// + /// Furthermore, if this method returns `true`, the artifact is also treated as having been + /// declared in the current daemon. + /// + /// This method does not guarantee that the artifact was materialized. + async fn has_artifact_at(&self, path: ProjectRelativePathBuf) -> anyhow::Result; + /// Declare an artifact at `path` exists. This will overwrite any pre-existing materialization /// methods for this file and indicate that no materialization is necessary. async fn invalidate(&self, path: ProjectRelativePathBuf) -> anyhow::Result<()> { @@ -250,12 +280,6 @@ pub trait Materializer: Allocative + Send + Sync + 'static { file_paths: Vec, ) -> anyhow::Result>>; - /// Expose Eden based buck-out if the materializer is Eden - /// Return None if not based on Eden. - fn eden_buck_out(&self) -> Option<&EdenBuckOut> { - None - } - fn as_deferred_materializer_extension(&self) -> Option<&dyn DeferredMaterializerExtensions> { None } @@ -334,14 +358,11 @@ impl dyn Materializer { fn check_declared_external_symlink(&self, value: &ArtifactValue) -> anyhow::Result<()> { match value.entry() { DirectoryEntry::Leaf(ActionDirectoryMember::ExternalSymlink(external_symlink)) => { - match external_symlink.remaining_path() { - Some(_) => { - return Err(anyhow::anyhow!( - "Internal error: external symlink should not be declared on materializer with non-empty remaining path: '{}'", - external_symlink.dupe().to_path_buf().display() - )); - } - None => {} + if !external_symlink.remaining_path().is_empty() { + return Err(anyhow::anyhow!( + "Internal error: external symlink should not be declared on materializer with non-empty remaining path: '{}'", + external_symlink.dupe().to_path_buf().display() + )); } } _ => {} @@ -518,7 +539,7 @@ impl CasDownloadInfo { /// Information about a CAS download we might require when an artifact is not materialized. #[derive(Debug, Display)] -#[display(fmt = "{} declared by {}", "self.url", "self.owner")] +#[display("{} declared by {}", self.url, self.owner)] pub struct HttpDownloadInfo { /// URL to download the file from. pub url: Arc, @@ -534,7 +555,7 @@ pub struct HttpDownloadInfo { pub owner: BaseDeferredKey, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum ArtifactNotMaterializedReason { #[error( "The artifact at path '{}' ({}) was produced by a RE action ({}), \ @@ -600,14 +621,12 @@ pub enum MaterializationMethod { Deferred, /// Materialize only when needed, do not materialize final artifacts DeferredSkipFinalArtifacts, - /// Let Eden delegate materialzation - Eden, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum MaterializationMethodError { #[error( - "Invalid value for buckconfig `[buck2] materializations`. Got `{0}`. Expected one of `all`, `deferred`, `deferred_skip_final_artifacts` or `eden`." + "Invalid value for buckconfig `[buck2] materializations`. Got `{0}`. Expected one of `all`, `deferred`, or `deferred_skip_final_artifacts`." )] InvalidValueForConfig(String), } @@ -615,12 +634,11 @@ pub enum MaterializationMethodError { impl MaterializationMethod { pub fn try_new_from_config_value(config_value: Option<&str>) -> anyhow::Result { match config_value { - None | Some("") | Some("all") => Ok(MaterializationMethod::Immediate), - Some("deferred") => Ok(MaterializationMethod::Deferred), + None | Some("") | Some("deferred") => Ok(MaterializationMethod::Deferred), + Some("all") => Ok(MaterializationMethod::Immediate), Some("deferred_skip_final_artifacts") => { Ok(MaterializationMethod::DeferredSkipFinalArtifacts) } - Some("eden") => Ok(MaterializationMethod::Eden), Some(v) => Err(MaterializationMethodError::InvalidValueForConfig(v.to_owned()).into()), } } @@ -630,6 +648,12 @@ impl MaterializationMethod { /// `DeferredMaterializerEntry` lives in a crate that depends on this one. pub trait DeferredMaterializerEntry: Send + Sync + std::fmt::Display {} +pub struct DeferredMaterializerIterItem { + pub artifact_path: ProjectRelativePathBuf, + pub artifact_display: Box, + pub deps: Vec<(ProjectRelativePathBuf, &'static str)>, +} + /// Obtain notifications for entries as they are materialized, and request eager materialization of /// those paths. #[async_trait] @@ -649,11 +673,9 @@ pub trait DeferredMaterializerSubscription: Send + Sync { /// Extensions to the Materializer trait that are only available in the Deferred materializer. #[async_trait] pub trait DeferredMaterializerExtensions: Send + Sync { - fn iterate( - &self, - ) -> anyhow::Result< - BoxStream<'static, (ProjectRelativePathBuf, Box)>, - >; + fn iterate(&self) -> anyhow::Result>; + + fn list_subscriptions(&self) -> anyhow::Result>; /// Obtain a list of files that don't match their in-memory representation. This may not catch /// all discrepancies. diff --git a/app/buck2_execute/src/materialize/mod.rs b/app/buck2_execute/src/materialize/mod.rs deleted file mode 100644 index c98726964df82..0000000000000 --- a/app/buck2_execute/src/materialize/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#[cfg(any(fbcode_build, cargo_internal_build))] -pub mod eden_api; -pub mod http; - -pub mod materializer; -pub mod nodisk; diff --git a/app/buck2_execute/src/materialize/nodisk.rs b/app/buck2_execute/src/materialize/nodisk.rs index 92187f2ea0d04..0d8a47ece2090 100644 --- a/app/buck2_execute/src/materialize/nodisk.rs +++ b/app/buck2_execute/src/materialize/nodisk.rs @@ -12,11 +12,11 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_futures::cancellation::CancellationContext; use futures::stream; use futures::stream::BoxStream; use futures::stream::StreamExt; use gazebo::prelude::*; -use more_futures::cancellation::CancellationContext; use crate::artifact_value::ArtifactValue; use crate::materialize::materializer::ArtifactNotMaterializedReason; @@ -84,6 +84,11 @@ impl Materializer for NoDiskMaterializer { Ok(DeclareMatchOutcome::NotMatch) } + async fn has_artifact_at(&self, _path: ProjectRelativePathBuf) -> anyhow::Result { + // This materializer does not keep track of state + Ok(false) + } + async fn declare_write<'a>( &self, _gen: Box anyhow::Result> + Send + 'a>, diff --git a/app/buck2_execute/src/output_size.rs b/app/buck2_execute/src/output_size.rs index 4a20e9fb4e9df..92bc5a7272f42 100644 --- a/app/buck2_execute/src/output_size.rs +++ b/app/buck2_execute/src/output_size.rs @@ -7,8 +7,10 @@ * of this source tree. */ -use buck2_core::directory::unordered_entry_walk; -use buck2_core::directory::DirectoryEntry; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::walk::unordered_entry_walk; use crate::artifact_value::ArtifactValue; use crate::directory::ActionDirectory; @@ -35,7 +37,7 @@ where fn calc_output_count_and_bytes(&self) -> OutputCountAndBytes { let mut bytes = 0; let mut count = 0; - let mut walk = unordered_entry_walk(self.as_ref()); + let mut walk = unordered_entry_walk(self.as_ref().map_dir(|d| Directory::as_ref(d))); while let Some((_path, entry)) = walk.next() { match entry { DirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => { diff --git a/app/buck2_execute/src/path/mod.rs b/app/buck2_execute/src/path.rs similarity index 100% rename from app/buck2_execute/src/path/mod.rs rename to app/buck2_execute/src/path.rs diff --git a/app/buck2_execute/src/path/artifact_path.rs b/app/buck2_execute/src/path/artifact_path.rs index ef4cd47ba5021..d875fd50a7706 100644 --- a/app/buck2_execute/src/path/artifact_path.rs +++ b/app/buck2_execute/src/path/artifact_path.rs @@ -10,26 +10,23 @@ use std::borrow::Cow; use std::fmt; use std::hash::Hash; -use std::hash::Hasher; use anyhow::Context; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::buck_out_path::BuckOutPath; use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use dupe::Dupe; +use buck2_core::package::source_path::SourcePathRef; use either::Either; use gazebo::cell::ARef; -use gazebo::eq_chain; -#[derive(Debug)] +#[derive(Debug, Eq, PartialEq, Hash)] pub struct ArtifactPath<'a> { - pub base_path: Either, BuckPathRef<'a>>, - pub projected_path: Option<&'a ForwardRelativePath>, + pub base_path: Either, SourcePathRef<'a>>, + pub projected_path: &'a ForwardRelativePath, /// The number of components at the prefix of that path that are internal details to the rule, - /// not returned by `.short_path`. Omitted from Eq and Hash comparisons. + /// not returned by `.short_path`. pub hidden_components_count: usize, } @@ -38,9 +35,9 @@ impl<'a> ArtifactPath<'a> { where for<'b> F: FnOnce(anyhow::Result<&'b FileName>) -> T, { - let file_name = match self.projected_path.as_ref() { - Some(projected_path) => projected_path, - None => match self.base_path.as_ref() { + let file_name = match self.projected_path.is_empty() { + false => self.projected_path, + true => match self.base_path.as_ref() { Either::Left(buck_out) => buck_out.path(), Either::Right(buck) => buck.path().as_ref(), }, @@ -60,10 +57,7 @@ impl<'a> ArtifactPath<'a> { Either::Right(buck) => buck.path().as_ref(), }; - let path = match self.projected_path.as_ref() { - Some(projected_path) => Cow::Owned(base_short_path.join(projected_path)), - None => Cow::Borrowed(base_short_path), - }; + let path = base_short_path.join_cow(self.projected_path); let path = match path.strip_prefix_components(self.hidden_components_count) { Some(p) => p, @@ -87,10 +81,7 @@ impl<'a> ArtifactPath<'a> { ), }; - let path = match self.projected_path.as_ref() { - Some(projected_path) => Cow::Owned(base_path.join(projected_path)), - None => base_path, - }; + let path = base_path.join_cow(self.projected_path); f(&path) } @@ -104,36 +95,13 @@ impl<'a> ArtifactPath<'a> { let base_path = match base_path { Either::Left(build) => artifact_fs.buck_out_path_resolver().resolve_gen(build), - Either::Right(source) => artifact_fs - .buck_path_resolver() - .resolve_buck_path(source.dupe())?, + Either::Right(source) => artifact_fs.resolve_source(*source)?, }; - Ok(match projected_path { - Some(projected_path) => base_path.join(projected_path), - None => base_path, - }) - } -} - -impl Hash for ArtifactPath<'_> { - fn hash(&self, state: &mut H) { - self.base_path.hash(state); - self.projected_path.as_ref().hash(state); + Ok(base_path.join(projected_path)) } } -impl PartialEq for ArtifactPath<'_> { - fn eq(&self, other: &Self) -> bool { - eq_chain! { - self.base_path == other.base_path, - self.projected_path.as_ref() == other.projected_path.as_ref() - } - } -} - -impl Eq for ArtifactPath<'_> {} - impl fmt::Display for ArtifactPath<'_> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { // NOTE: This produces a representation we tend to use in Starlark for those, which isn't diff --git a/app/buck2_execute/src/re.rs b/app/buck2_execute/src/re.rs new file mode 100644 index 0000000000000..f3242340c01d1 --- /dev/null +++ b/app/buck2_execute/src/re.rs @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod action_identity; +pub mod client; +pub mod convert; +pub mod error; +pub mod manager; +pub mod metadata; +pub mod re_get_session_id; +pub mod remote_action_result; +mod stats; +pub mod streams; +pub mod uploader; diff --git a/app/buck2_execute/src/re/client.rs b/app/buck2_execute/src/re/client.rs index f7a6bf241aa60..79fa20fbda9ee 100644 --- a/app/buck2_execute/src/re/client.rs +++ b/app/buck2_execute/src/re/client.rs @@ -12,27 +12,23 @@ use std::time::Duration; use allocative::Allocative; use anyhow::Context; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; -use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_re_configuration::RemoteExecutionStaticMetadata; use buck2_re_configuration::RemoteExecutionStaticMetadataImpl; use chrono::DateTime; use chrono::Utc; use dupe::Dupe; use either::Either; -use fbinit::FacebookInit; use futures::stream::BoxStream; use futures::FutureExt; use futures::StreamExt; -use futures::TryFutureExt; use gazebo::prelude::*; use itertools::Itertools; use prost::Message; use remote_execution as RE; -use remote_execution::ActionHistoryInfo; use remote_execution::ActionResultRequest; use remote_execution::ActionResultResponse; use remote_execution::BuckInfo; @@ -40,20 +36,22 @@ use remote_execution::DownloadRequest; use remote_execution::ExecuteRequest; use remote_execution::ExecuteResponse; use remote_execution::ExecuteWithProgressResponse; +use remote_execution::ExtendDigestsTtlRequest; use remote_execution::GetDigestsTtlRequest; -use remote_execution::HostResourceRequirements; use remote_execution::InlinedBlobWithDigest; use remote_execution::NamedDigest; use remote_execution::NamedDigestWithPermissions; use remote_execution::REClient; use remote_execution::REClientBuilder; -use remote_execution::REClientError; use remote_execution::RemoteExecutionMetadata; use remote_execution::Stage; use remote_execution::TActionResult2; use remote_execution::TCode; +use remote_execution::TDependency; use remote_execution::TDigest; use remote_execution::TExecutionPolicy; +use remote_execution::THostResourceRequirements; +use remote_execution::THostRuntimeRequirements; use remote_execution::UploadRequest; use remote_execution::WriteActionResultRequest; use remote_execution::WriteActionResultResponse; @@ -70,6 +68,10 @@ use crate::knobs::ExecutorGlobalKnobs; use crate::materialize::materializer::Materializer; use crate::re::action_identity::ReActionIdentity; use crate::re::convert::platform_to_proto; +use crate::re::error::test_re_error; +use crate::re::error::with_error_handler; +use crate::re::error::RemoteExecutionError; +use crate::re::manager::RemoteExecutionConfig; use crate::re::metadata::RemoteExecutionMetadataExt; use crate::re::stats::OpStats; use crate::re::stats::RemoteExecutionClientOpStats; @@ -99,26 +101,16 @@ struct RemoteExecutionClientData { materializes: OpStats, write_action_results: OpStats, get_digest_expirations: OpStats, + extend_digest_ttl: OpStats, } impl RemoteExecutionClient { - pub async fn new( - fb: FacebookInit, - skip_remote_cache: bool, - static_metadata: Arc, - logs_dir_path: Option<&AbsNormPath>, - buck_out_path: &AbsNormPath, - is_paranoid_mode: bool, - ) -> anyhow::Result { - let client = RemoteExecutionClientImpl::new( - fb, - skip_remote_cache, - static_metadata, - logs_dir_path, - buck_out_path, - is_paranoid_mode, - ) - .await?; + pub async fn new(re_config: &RemoteExecutionConfig) -> anyhow::Result { + if buck2_env_anyhow!("BUCK2_TEST_FAIL_CONNECT", bool, applicability = testing)? { + return Err(anyhow::anyhow!("Injected RE Connection error")); + } + + let client = RemoteExecutionClientImpl::new(re_config).await?; Ok(Self { data: Arc::new(RemoteExecutionClientData { @@ -130,33 +122,24 @@ impl RemoteExecutionClient { materializes: OpStats::default(), write_action_results: OpStats::default(), get_digest_expirations: OpStats::default(), + extend_digest_ttl: OpStats::default(), }), }) } - pub async fn new_retry( - fb: FacebookInit, - skip_remote_cache: bool, - times: usize, // 0 is treated as 1 - static_metadata: Arc, - logs_dir_path: Option<&AbsNormPath>, - buck_out_path: &AbsNormPath, - is_paranoid_mode: bool, - ) -> anyhow::Result { + pub async fn new_retry(re_config: &RemoteExecutionConfig) -> anyhow::Result { // Loop happens times-1 times at most - for i in 1..times { - match Self::new( - fb, - skip_remote_cache, - static_metadata.dupe(), - logs_dir_path, - buck_out_path, - is_paranoid_mode, - ) - .await - { + for i in 1..re_config.connection_retries { + match Self::new(re_config).await { Ok(v) => return Ok(v), Err(e) => { + let e: buck2_error::Error = e.into(); + if e.find_typed_context::().is_none() { + // If we cannot connect to RE due to some non-RE error, we should not retry + // And should just return the error immediately as it's unlikely to be flakey + return Err(e.into()); + } + tracing::warn!( "Failed to connect to RE, retrying after sleeping {} seconds: {:#?}", i, @@ -166,22 +149,7 @@ impl RemoteExecutionClient { } } } - Self::new( - fb, - skip_remote_cache, - static_metadata, - logs_dir_path, - buck_out_path, - is_paranoid_mode, - ) - .await - } - - fn decorate_error(&self, source: anyhow::Error) -> anyhow::Error { - source.context(format!( - "Remote Execution Error ({})", - self.get_session_id() - )) + Self::new(re_config).await } pub async fn action_cache( @@ -203,23 +171,21 @@ impl RemoteExecutionClient { dir_path: &ProjectRelativePath, input_dir: &ActionImmutableDirectory, use_case: RemoteExecutorUseCase, + identity: Option<&ReActionIdentity<'_>>, digest_config: DigestConfig, ) -> anyhow::Result { self.data .uploads - .op(self - .data - .client - .upload( - fs, - materializer, - blobs, - dir_path, - input_dir, - use_case, - digest_config, - ) - .map_err(|e| self.decorate_error(e))) + .op(self.data.client.upload( + fs, + materializer, + blobs, + dir_path, + input_dir, + use_case, + identity, + digest_config, + )) .await } @@ -232,48 +198,44 @@ impl RemoteExecutionClient { ) -> anyhow::Result<()> { self.data .uploads - .op(self - .data - .client - .upload_files_and_directories( - files_with_digest, - directories, - inlined_blobs_with_digest, - use_case, - ) - .map_err(|e| self.decorate_error(e))) + .op(self.data.client.upload_files_and_directories( + files_with_digest, + directories, + inlined_blobs_with_digest, + use_case, + )) .await } - pub async fn execute( + pub async fn execute<'a>( &self, action_digest: ActionDigest, platform: &RE::Platform, + dependencies: impl IntoIterator, use_case: RemoteExecutorUseCase, identity: &ReActionIdentity<'_>, manager: &mut CommandExecutionManager, skip_cache_read: bool, skip_cache_write: bool, re_max_queue_time: Option, + re_resource_units: Option, knobs: &ExecutorGlobalKnobs, ) -> anyhow::Result { self.data .executes - .op(self - .data - .client - .execute( - action_digest, - platform, - use_case, - identity, - manager, - skip_cache_read, - skip_cache_write, - re_max_queue_time, - knobs, - ) - .map_err(|e| self.decorate_error(e))) + .op(self.data.client.execute( + action_digest, + platform, + dependencies, + use_case, + identity, + manager, + skip_cache_read, + skip_cache_write, + re_max_queue_time, + re_resource_units, + knobs, + )) .await } @@ -290,6 +252,7 @@ impl RemoteExecutionClient { pub async fn download_typed_blobs( &self, + identity: Option<&ReActionIdentity<'_>>, digests: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result> { @@ -298,8 +261,7 @@ impl RemoteExecutionClient { .op(self .data .client - .download_typed_blobs(digests, use_case) - .map_err(|e| self.decorate_error(e))) + .download_typed_blobs(identity, digests, use_case)) .await } @@ -310,11 +272,7 @@ impl RemoteExecutionClient { ) -> anyhow::Result> { self.data .downloads - .op(self - .data - .client - .download_blob(digest, use_case) - .map_err(|e| self.decorate_error(e))) + .op(self.data.client.download_blob(digest, use_case)) .await } @@ -325,11 +283,7 @@ impl RemoteExecutionClient { ) -> anyhow::Result { self.data .uploads - .op(self - .data - .client - .upload_blob(blob, use_case) - .map_err(|e| self.decorate_error(e))) + .op(self.data.client.upload_blob(blob, use_case)) .await } @@ -340,17 +294,25 @@ impl RemoteExecutionClient { ) -> anyhow::Result)>> { self.data .get_digest_expirations - .op(self - .data - .client - .get_digest_expirations(digests, use_case) - .map_err(|e| self.decorate_error(e))) + .op(self.data.client.get_digest_expirations(digests, use_case)) + .await + } + + pub async fn extend_digest_ttl( + &self, + digests: Vec, + ttl: Duration, + use_case: RemoteExecutorUseCase, + ) -> anyhow::Result<()> { + self.data + .extend_digest_ttl + .op(self.data.client.extend_digest_ttl(digests, ttl, use_case)) .await } pub async fn write_action_result( &self, - digest: TDigest, + digest: ActionDigest, result: TActionResult2, use_case: RemoteExecutorUseCase, platform: &RE::Platform, @@ -360,13 +322,12 @@ impl RemoteExecutionClient { .op(self .data .client - .write_action_result(digest, result, use_case, platform) - .map_err(|e| self.decorate_error(e))) + .write_action_result(digest, result, use_case, platform)) .await } pub fn get_session_id(&self) -> &str { - self.data.client.client().get_session_id() + self.data.client.get_session_id() } pub fn get_experiment_name(&self) -> anyhow::Result> { @@ -400,6 +361,8 @@ struct RemoteExecutionClientImpl { /// How many files to kick off downloading concurrently for one request. This should be smaller /// than the files semaphore to ensure we can actually *acquire* that semaphore. download_chunk_size: usize, + /// Preserve file symlinks as symlinks when uploading action result. + respect_file_symlinks: bool, } fn re_platform(x: &RE::Platform) -> remote_execution::TPlatform { @@ -414,24 +377,17 @@ fn re_platform(x: &RE::Platform) -> remote_execution::TPlatform { } impl RemoteExecutionClientImpl { - async fn new( - fb: FacebookInit, - skip_remote_cache: bool, - static_metadata: Arc, - maybe_logs_dir_path: Option<&AbsNormPath>, - buck_out_path: &AbsNormPath, - is_paranoid_mode: bool, - ) -> anyhow::Result { + async fn new(re_config: &RemoteExecutionConfig) -> anyhow::Result { + let op_name = "REClientBuilder"; tracing::info!("Creating a new RE client"); let res: anyhow::Result = try { - static DOWNLOAD_CONCURRENCY: EnvHelper = - EnvHelper::new("BUCK2_RE_DOWNLOAD_CONCURRENCY"); - - let download_concurrency = DOWNLOAD_CONCURRENCY.get_copied()?.unwrap_or(256); + let download_concurrency = + buck2_env_anyhow!("BUCK2_RE_DOWNLOAD_CONCURRENCY", type=usize, default=256)?; // Split things up into smaller chunks. let download_chunk_size = std::cmp::max(download_concurrency / 8, 1); + let static_metadata = &re_config.static_metadata; #[cfg(fbcode_build)] let client = { @@ -462,7 +418,7 @@ impl RemoteExecutionClientImpl { ..Default::default() }; - if is_paranoid_mode { + if re_config.is_paranoid_mode { // Dedupe is not compatible with us downloading blobs and moving them. embedded_cas_daemon_config.disable_download_dedup = true; } @@ -480,8 +436,10 @@ impl RemoteExecutionClientImpl { // want to tell the RE client to rely on an external // CAS daemon to manage the cache. if let Some(shared_cache) = &static_metadata.cas_shared_cache { + use remote_execution::RemoteCASdAddress; use remote_execution::RemoteCacheConfig; use remote_execution::RemoteCacheManagerMode; + use remote_execution::RemoteFetchPolicy; let mode = match static_metadata .cas_shared_cache_mode @@ -493,12 +451,42 @@ impl RemoteExecutionClientImpl { { "BIG_FILES" => RemoteCacheManagerMode::BIG_FILES, "ALL_FILES" => RemoteCacheManagerMode::ALL_FILES, - unknown => anyhow::bail!("Unknown RemoteCacheManagerMode: {}", unknown), + "ALL_FILES_LOCAL_WITHOUT_SYNC" => { + RemoteCacheManagerMode::ALL_FILES_LOCAL_WITHOUT_SYNC + } + unknown => { + return Err(anyhow::anyhow!( + "Unknown RemoteCacheManagerMode: {}", + unknown + )); + } + }; + + let (small_files_policy, large_files_policy) = match mode { + RemoteCacheManagerMode::BIG_FILES => ( + RemoteFetchPolicy::LOCAL_FETCH_WITHOUT_SYNC, + RemoteFetchPolicy::REMOTE_FETCH, + ), + RemoteCacheManagerMode::ALL_FILES => ( + RemoteFetchPolicy::REMOTE_FETCH, + RemoteFetchPolicy::REMOTE_FETCH, + ), + RemoteCacheManagerMode::ALL_FILES_LOCAL_WITHOUT_SYNC => ( + RemoteFetchPolicy::LOCAL_FETCH_WITHOUT_SYNC, + RemoteFetchPolicy::LOCAL_FETCH_WITHOUT_SYNC, + ), + _ => unreachable!(), }; + let remote_cache_config = { let mut remote_cache_config = RemoteCacheConfig { mode, port: static_metadata.cas_shared_cache_port, + small_files: small_files_policy, + large_files: large_files_policy, + address: RemoteCASdAddress::tcp_port( + static_metadata.cas_shared_cache_port.unwrap_or(23333), + ), ..Default::default() }; if let Some(tls) = static_metadata.cas_shared_cache_tls { @@ -557,7 +545,8 @@ impl RemoteExecutionClientImpl { // Will either choose the SOFT_COPY (on some linux fs like btrfs/extfs etc, on Mac if using APFS) or FULL_COPY otherwise embedded_cas_daemon_config.copy_policy = CopyPolicy::BEST_AVAILABLE; embedded_cas_daemon_config.meterialization_mount_path = Some( - buck_out_path + re_config + .buck_out_path .to_str() .context("invalid meterialization_mount_path")? .to_owned(), @@ -577,19 +566,31 @@ impl RemoteExecutionClientImpl { let minimal_blob_ttl_threshold = static_metadata.minimal_blob_ttl_seconds.unwrap_or(3600); + let remaining_ttl_fraction_refresh_threshold = static_metadata + .remaining_ttl_fraction_refresh_threshold + .unwrap_or(0.1) + as f64; + let remaining_ttl_random_extra_threshold = static_metadata + .remaining_ttl_random_extra_threshold + .unwrap_or(0.25) + as f64; embedded_cas_daemon_config.ttl_extending_config = Some(TTLExtendingConfig { blocking_ttl_extending_seconds_threshold: minimal_blob_ttl_threshold, + remaining_ttl_fraction_refresh_threshold, + remaining_ttl_random_extra_threshold, ..Default::default() }); embedded_cas_daemon_config.action_cache_ttl_extending_config = Some(TTLExtendingConfig { blocking_ttl_extending_seconds_threshold: minimal_blob_ttl_threshold, + remaining_ttl_fraction_refresh_threshold, + remaining_ttl_random_extra_threshold, ..Default::default() }); re_client_config.cas_client_config = CASDaemonClientCfg::embedded_config(embedded_cas_daemon_config); - if let Some(logs_dir_path) = maybe_logs_dir_path { + if let Some(logs_dir_path) = &re_config.logs_dir_path { // make sure that the log dir exists as glog is expecting that :( fs_util::create_dir_all(logs_dir_path)?; re_client_config.log_file_location = Some( @@ -636,35 +637,67 @@ impl RemoteExecutionClientImpl { re_client_config.features_config_path = static_metadata .features_config_path .as_deref() - .unwrap_or("remote_execution/features/client_buck2") + .unwrap_or( + if static_metadata.use_zippy_rich_client && cfg!(target_os = "linux") { + "remote_execution/features/client_buck2" + } else { + "remote_execution/features/client_buck2_alternative" + }, + ) .to_owned(); + re_client_config.disable_fallocate = static_metadata.disable_fallocate; + + re_client_config.execute_over_thrift = static_metadata.execute_over_thrift; + // TODO(ndmitchell): For now, we just drop RE log messages, but ideally we'd put them in our log stream. let logger = slog::Logger::root(slog::Discard, slog::o!()); - REClientBuilder::new(fb) - .with_config(re_client_config) - .with_logger(logger) - .build_and_connect() - .await? + // TODO T179215751: If RE client fails we don't get the RE session ID and we can't find the RE logs. + // Better to generate the RE session ID ourselves and pass it to the RE client. + with_error_handler( + op_name, + "", + REClientBuilder::new(re_config.fb) + .with_config(re_client_config) + .with_logger(logger) + .build_and_connect() + .await, + ) + .await? }; #[cfg(not(fbcode_build))] let client = { - let _unused = (fb, maybe_logs_dir_path, buck_out_path, is_paranoid_mode); + with_error_handler( + op_name, + "", + REClientBuilder::build_and_connect(&static_metadata.0).await, + ) + .await? + }; - REClientBuilder::build_and_connect(&static_metadata.0).await? + let respect_file_symlinks = { + #[cfg(fbcode_build)] + { + static_metadata.respect_file_symlinks + } + #[cfg(not(fbcode_build))] + { + false + } }; Self { client: Some(client), - skip_remote_cache, + skip_remote_cache: re_config.skip_remote_cache, cas_semaphore: Arc::new(Semaphore::new(static_metadata.cas_semaphore_size())), download_files_semapore: Arc::new(Semaphore::new(download_concurrency)), download_chunk_size, + respect_file_symlinks, } }; - res.context("RE: creating client") + res } fn client(&self) -> &REClient { @@ -673,33 +706,38 @@ impl RemoteExecutionClientImpl { .expect("REClient is always present unless dropped") } + fn get_session_id(&self) -> &str { + self.client().get_session_id() + } + async fn action_cache( &self, action_digest: ActionDigest, use_case: RemoteExecutorUseCase, ) -> anyhow::Result> { - let res = self - .client() - .get_action_cache_client() - .get_action_result( - use_case.metadata(), - ActionResultRequest { - digest: action_digest.to_re(), - ..Default::default() - }, - ) - .await; + let res = with_error_handler( + "action_cache", + self.get_session_id(), + self.client() + .get_action_cache_client() + .get_action_result( + use_case.metadata(None), + ActionResultRequest { + digest: action_digest.to_re(), + ..Default::default() + }, + ) + .await, + ) + .await; match res { Ok(r) => Ok(Some(r)), Err(e) => { - if e.downcast_ref::() - .map(|e| e.code == TCode::NOT_FOUND) - == Some(true) - { - Ok(None) - } else { - Err(e) + let e: buck2_error::Error = e.into(); + match e.find_typed_context::() { + Some(e) if e.code == TCode::NOT_FOUND => Ok(None), + _ => Err(e.into()), } } } @@ -713,19 +751,27 @@ impl RemoteExecutionClientImpl { dir_path: &ProjectRelativePath, input_dir: &ActionImmutableDirectory, use_case: RemoteExecutorUseCase, + identity: Option<&ReActionIdentity<'_>>, digest_config: DigestConfig, ) -> anyhow::Result { // Actually upload to CAS let _cas = self.cas_semaphore.acquire().await; - Uploader::upload( - fs, - self.client().get_cas_client(), - materializer, - dir_path, - input_dir, - blobs, - use_case, - digest_config, + + with_error_handler( + "upload", + self.get_session_id(), + Uploader::upload( + fs, + self.client().get_cas_client(), + materializer, + dir_path, + input_dir, + blobs, + use_case, + identity, + digest_config, + ) + .await, ) .await } @@ -737,19 +783,24 @@ impl RemoteExecutionClientImpl { inlined_blobs_with_digest: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result<()> { - self.client() - .get_cas_client() - .upload( - use_case.metadata(), - UploadRequest { - files_with_digest: Some(files_with_digest), - inlined_blobs_with_digest: Some(inlined_blobs_with_digest), - directories: Some(directories), - upload_only_missing: true, - ..Default::default() - }, - ) - .await?; + with_error_handler( + "upload_files_and_directories", + self.get_session_id(), + self.client() + .get_cas_client() + .upload( + use_case.metadata(None), + UploadRequest { + files_with_digest: Some(files_with_digest), + inlined_blobs_with_digest: Some(inlined_blobs_with_digest), + directories: Some(directories), + upload_only_missing: true, + ..Default::default() + }, + ) + .await, + ) + .await?; Ok(()) } @@ -779,6 +830,8 @@ impl RemoteExecutionClientImpl { None }; + let re_use_case = metadata.use_case_id.clone(); + #[allow(clippy::large_enum_variant)] enum ResponseOrStateChange { Present(ExecuteWithProgressResponse), @@ -801,7 +854,7 @@ impl RemoteExecutionClientImpl { async move { loop { let next = futures::future::select( - manager.liveliness_observer.while_alive(), + manager.inner.liveliness_observer.while_alive(), receiver.next(), ); @@ -848,20 +901,27 @@ impl RemoteExecutionClientImpl { action_digest: String, platform: &remote_execution::Platform, action_key: &Option, + use_case: String, ) -> re_stage::Stage { match stage { - Stage::QUEUED => re_stage::Stage::Queue(ReQueue { action_digest }), - Stage::MATERIALIZING_INPUT => { - re_stage::Stage::WorkerDownload(ReWorkerDownload { action_digest }) - } + Stage::QUEUED => re_stage::Stage::Queue(ReQueue { + action_digest, + use_case, + }), + Stage::MATERIALIZING_INPUT => re_stage::Stage::WorkerDownload(ReWorkerDownload { + action_digest, + use_case, + }), Stage::EXECUTING => re_stage::Stage::Execute(ReExecute { action_digest, platform: Some(platform_to_proto(platform)), action_key: action_key.clone(), + use_case, + }), + Stage::UPLOADING_OUTPUT => re_stage::Stage::WorkerUpload(ReWorkerUpload { + action_digest, + use_case, }), - Stage::UPLOADING_OUTPUT => { - re_stage::Stage::WorkerUpload(ReWorkerUpload { action_digest }) - } _ => { tracing::debug!( "Received unexpected RE stage {:#?} for action: {}", @@ -905,6 +965,7 @@ impl RemoteExecutionClientImpl { action_digest_str.clone(), platform, &action_key, + re_use_case.clone(), ), manager, re_max_queue_time, @@ -928,60 +989,82 @@ impl RemoteExecutionClientImpl { } } - pub async fn execute( + pub async fn execute<'a>( &self, action_digest: ActionDigest, platform: &RE::Platform, + dependencies: impl IntoIterator, use_case: RemoteExecutorUseCase, identity: &ReActionIdentity<'_>, manager: &mut CommandExecutionManager, skip_cache_read: bool, skip_cache_write: bool, re_max_queue_time: Option, + re_resource_units: Option, knobs: &ExecutorGlobalKnobs, ) -> anyhow::Result { let metadata = RemoteExecutionMetadata { - action_history_info: Some(ActionHistoryInfo { - action_key: identity.action_key.clone(), - disable_retry_on_oom: false, - ..Default::default() - }), - host_resource_requirements: Some(HostResourceRequirements { - affinity_keys: vec![identity.affinity_key.clone()], - input_files_bytes: identity.paths.input_files_bytes() as i64, - ..Default::default() - }), platform: Some(re_platform(platform)), do_not_cache: skip_cache_write, buck_info: Some(BuckInfo { + version: buck2_build_info::revision() + .map(|s| s.to_owned()) + .unwrap_or_default(), build_id: identity.trace_id.to_string(), ..Default::default() }), - ..use_case.metadata() + respect_file_symlinks: Some(self.respect_file_symlinks), + ..use_case.metadata(Some(identity)) }; let request = ExecuteRequest { skip_cache_lookup: self.skip_remote_cache || skip_cache_read, - execution_policy: Some(TExecutionPolicy::default()), + execution_policy: Some(TExecutionPolicy { + affinity_keys: vec![identity.affinity_key.clone()], + ..Default::default() + }), action_digest: action_digest.to_re(), + host_runtime_requirements: THostRuntimeRequirements { + platform: re_platform(platform), + host_resource_requirements: THostResourceRequirements { + input_files_bytes: identity.paths.input_files_bytes() as i64, + resource_units: re_resource_units.unwrap_or_default(), + ..Default::default() + }, + dependencies: dependencies + .into_iter() + .map(|dep| TDependency { + smc_tier: dep.smc_tier.clone(), + id: dep.id.clone(), + ..Default::default() + }) + .collect(), + ..Default::default() + }, ..Default::default() }; - self.execute_impl( - metadata, - request, - &action_digest, - manager, - re_max_queue_time, - platform, - knobs, + let re_action = format!("Execute with digest {}", &action_digest); + with_error_handler( + re_action.as_str(), + self.get_session_id(), + self.execute_impl( + metadata, + request, + &action_digest, + manager, + re_max_queue_time, + platform, + knobs, + ) + .await, ) .await - .with_context(|| format!("RE: execution with digest {}", &action_digest)) } /// Fetches a list of digests from the CAS and casts them to Tree objects. /// If fetching or decoding fails for one or more digests, returns an Err. async fn download_typed_blobs( &self, + identity: Option<&ReActionIdentity<'_>>, digests: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result> { @@ -989,17 +1072,21 @@ impl RemoteExecutionClientImpl { return Ok(Vec::new()); } let expected_blobs = digests.len(); - let response = self - .client() - .get_cas_client() - .download( - use_case.metadata(), - DownloadRequest { - inlined_digests: Some(digests), - ..Default::default() - }, - ) - .await?; + let response = with_error_handler( + "download_typed_blobs", + self.get_session_id(), + self.client() + .get_cas_client() + .download( + use_case.metadata(identity), + DownloadRequest { + inlined_digests: Some(digests), + ..Default::default() + }, + ) + .await, + ) + .await?; let mut blobs: Vec = Vec::with_capacity(expected_blobs); if let Some(ds) = response.inlined_blobs { @@ -1025,20 +1112,24 @@ impl RemoteExecutionClientImpl { digest: &TDigest, use_case: RemoteExecutorUseCase, ) -> anyhow::Result> { - let response = self - .client() - .get_cas_client() - .download( - use_case.metadata(), - DownloadRequest { - inlined_digests: Some(vec![digest.clone()]), - ..Default::default() - }, - ) - // boxed() to segment the future - .boxed() - .await - .with_context(|| format!("Download request failed for digest {}", digest))?; + let re_action = format!("download_blob for digest {}", digest); + let response = with_error_handler( + re_action.as_str(), + self.get_session_id(), + self.client() + .get_cas_client() + .download( + use_case.metadata(None), + DownloadRequest { + inlined_digests: Some(vec![digest.clone()]), + ..Default::default() + }, + ) + // boxed() to segment the future + .boxed() + .await, + ) + .await?; response .inlined_blobs @@ -1054,7 +1145,14 @@ impl RemoteExecutionClientImpl { blob: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result { - self.client().upload_blob(blob, use_case.metadata()).await + with_error_handler( + "upload_blob", + self.get_session_id(), + self.client() + .upload_blob(blob, use_case.metadata(None)) + .await, + ) + .await } async fn materialize_files( @@ -1062,9 +1160,12 @@ impl RemoteExecutionClientImpl { files: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result<()> { - static FAIL_RE_DOWNLOADS: EnvHelper = EnvHelper::new("BUCK2_TEST_FAIL_RE_DOWNLOADS"); - if FAIL_RE_DOWNLOADS.get()?.copied().unwrap_or_default() { - return Err(anyhow::anyhow!("Injected error")); + if buck2_env_anyhow!( + "BUCK2_TEST_FAIL_RE_DOWNLOADS", + bool, + applicability = testing + )? { + return Err(test_re_error("Injected error", TCode::NOT_FOUND).into()); } let use_case = &use_case; @@ -1076,21 +1177,26 @@ impl RemoteExecutionClientImpl { .await .context("Failed to acquire download_files_semapore")?; - self.client() - .get_cas_client() - .download( - use_case.metadata(), - DownloadRequest { - file_digests: Some(chunk), - ..Default::default() - }, - ) - .await?; + with_error_handler( + "materialize_files", + self.get_session_id(), + self.client() + .get_cas_client() + .download( + use_case.metadata(None), + DownloadRequest { + file_digests: Some(chunk), + ..Default::default() + }, + ) + .await, + ) + .await?; anyhow::Ok(()) }); - futures::future::try_join_all(futs).await?; + buck2_util::future::try_join_all(futs).await?; Ok(()) } @@ -1102,18 +1208,22 @@ impl RemoteExecutionClientImpl { ) -> anyhow::Result)>> { let now = Utc::now(); - let ttls = self - .client() - .get_cas_client() - .get_digests_ttl( - use_case.metadata(), - GetDigestsTtlRequest { - digests, - ..Default::default() - }, - ) - .await? - .digests_with_ttl; + let ttls = with_error_handler( + "get_digest_expirations", + self.get_session_id(), + self.client() + .get_cas_client() + .get_digests_ttl( + use_case.metadata(None), + GetDigestsTtlRequest { + digests, + ..Default::default() + }, + ) + .await, + ) + .await? + .digests_with_ttl; Ok(ttls .into_iter() @@ -1121,27 +1231,59 @@ impl RemoteExecutionClientImpl { .collect()) } + async fn extend_digest_ttl( + &self, + digests: Vec, + ttl: Duration, + use_case: RemoteExecutorUseCase, + ) -> anyhow::Result<()> { + let use_case = &use_case; + // TODO(arr): use batch API from RE when it becomes available + with_error_handler( + "extend_digest_ttl", + self.get_session_id(), + self.client() + .get_cas_client() + .extend_digest_ttl( + use_case.metadata(None), + ExtendDigestsTtlRequest { + digests, + ttl: ttl.as_secs() as i64, + ..Default::default() + }, + ) + .await, + ) + .await?; + Ok(()) + } + async fn write_action_result( &self, - digest: TDigest, + digest: ActionDigest, result: TActionResult2, use_case: RemoteExecutorUseCase, platform: &RE::Platform, ) -> anyhow::Result { - self.client() - .get_action_cache_client() - .write_action_result( - RemoteExecutionMetadata { - platform: Some(re_platform(platform)), - ..use_case.metadata() - }, - WriteActionResultRequest { - action_digest: digest, - action_result: result, - ..Default::default() - }, - ) - .await + with_error_handler( + "write_action_result", + self.get_session_id(), + self.client() + .get_action_cache_client() + .write_action_result( + RemoteExecutionMetadata { + platform: Some(re_platform(platform)), + ..use_case.metadata(None) + }, + WriteActionResultRequest { + action_digest: digest.to_re(), + action_result: result, + ..Default::default() + }, + ) + .await, + ) + .await } } @@ -1162,7 +1304,6 @@ impl Drop for RemoteExecutionClientImpl { } } -#[allow(clippy::needless_collect)] // chunks() is not Send. fn chunks(v: Vec, chunk_size: usize) -> impl Iterator> { if !v.is_empty() && v.len() <= chunk_size { return Either::Left(std::iter::once(v)); diff --git a/app/buck2_execute/src/re/error.rs b/app/buck2_execute/src/re/error.rs new file mode 100644 index 0000000000000..ac8d546be35ef --- /dev/null +++ b/app/buck2_execute/src/re/error.rs @@ -0,0 +1,131 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_error::ErrorTag; +use buck2_error::TypedContext; +use remote_execution::REClientError; +use remote_execution::TCode; +use remote_execution::TCodeReasonGroup; + +pub fn get_re_error_tag(tcode: TCode) -> ErrorTag { + match tcode { + TCode::CANCELLED => ErrorTag::ReCancelled, + TCode::UNKNOWN => ErrorTag::ReUnknown, + TCode::INVALID_ARGUMENT => ErrorTag::ReInvalidArgument, + TCode::DEADLINE_EXCEEDED => ErrorTag::ReDeadlineExceeded, + TCode::NOT_FOUND => ErrorTag::ReNotFound, + TCode::ALREADY_EXISTS => ErrorTag::ReAlreadyExists, + TCode::PERMISSION_DENIED => ErrorTag::RePermissionDenied, + TCode::RESOURCE_EXHAUSTED => ErrorTag::ReResourceExhausted, + TCode::FAILED_PRECONDITION => ErrorTag::ReFailedPrecondition, + TCode::ABORTED => ErrorTag::ReAborted, + TCode::OUT_OF_RANGE => ErrorTag::ReOutOfRange, + TCode::UNIMPLEMENTED => ErrorTag::ReUnimplemented, + TCode::INTERNAL => ErrorTag::ReInternal, + TCode::UNAVAILABLE => ErrorTag::ReUnavailable, + TCode::DATA_LOSS => ErrorTag::ReDataLoss, + TCode::UNAUTHENTICATED => ErrorTag::ReUnauthenticated, + _ => ErrorTag::ReUnknownTcode, + } +} + +#[derive(Allocative, Debug, Clone, buck2_error::Error)] +#[error("Remote Execution Error on {} for ReSession {}\nError: ({})", .re_action, .re_session_id, .message)] +pub struct RemoteExecutionError { + re_action: String, + re_session_id: String, + pub message: String, + #[allocative(skip)] + pub code: TCode, + #[allocative(skip)] + pub group: TCodeReasonGroup, +} + +impl TypedContext for RemoteExecutionError { + fn eq(&self, other: &dyn TypedContext) -> bool { + match (other as &dyn std::any::Any).downcast_ref::() { + Some(right) => self.eq(right), + None => false, + } + } + + fn should_display(&self) -> bool { + false + } +} + +fn re_error( + re_action: &str, + re_session_id: &str, + message: String, + code: TCode, + group: TCodeReasonGroup, +) -> buck2_error::Error { + let err = RemoteExecutionError { + re_action: re_action.to_owned(), + re_session_id: re_session_id.to_owned(), + message, + code, + group, + }; + let buck2_error: buck2_error::Error = err.clone().into(); + + buck2_error + .context(err) + .tag([get_re_error_tag(code)]) + .context_for_key(&group.to_string()) +} + +pub(crate) async fn with_error_handler( + re_action: &str, + re_session_id: &str, + result: anyhow::Result, +) -> anyhow::Result { + match result { + Ok(val) => Ok(val), + Err(e) => { + let (code, group) = e + .downcast_ref::() + .map(|e| (e.code, e.group)) + .unwrap_or((TCode::UNKNOWN, TCodeReasonGroup::UNKNOWN)); + + Err(re_error(re_action, re_session_id, format!("{:#}", e), code, group).into()) + } + } +} + +pub fn test_re_error(message: &str, code: TCode) -> buck2_error::Error { + re_error( + "test", + "test", + message.to_owned(), + code, + TCodeReasonGroup::UNKNOWN, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extract_re_error() { + let error: buck2_error::Error = re_error( + "test", + "test", + "test".to_owned(), + TCode::UNKNOWN, + TCodeReasonGroup::UNKNOWN, + ); + + let err = error.find_typed_context::().unwrap(); + assert_eq!(err.code, TCode::UNKNOWN); + } +} diff --git a/app/buck2_execute/src/re/manager.rs b/app/buck2_execute/src/re/manager.rs index 10c80c295bde9..14a629825adb0 100644 --- a/app/buck2_execute/src/re/manager.rs +++ b/app/buck2_execute/src/re/manager.rs @@ -19,8 +19,9 @@ use std::time::Duration; use allocative::Allocative; use anyhow::Context as _; use async_trait::async_trait; -use buck2_common::result::SharedResult; use buck2_core::async_once_cell::AsyncOnceCell; +use buck2_core::buck2_env_anyhow; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; @@ -80,32 +81,23 @@ use crate::re::uploader::UploadStats; /// same RE session. Concurrent commands will share an RE session. #[derive(Clone, Allocative)] -struct RemoteExecutionConfig { +pub struct RemoteExecutionConfig { #[allocative(skip)] // TODO(nga): implement in `allocative`. - fb: FacebookInit, + pub fb: FacebookInit, /// whether to skip the cache when performing RE - skip_remote_cache: bool, + pub skip_remote_cache: bool, /// number of retries when attempting the initial RE connection - connection_retries: usize, - static_metadata: Arc, - logs_dir_path: Option, - buck_out_path: AbsNormPathBuf, + pub connection_retries: usize, + pub static_metadata: Arc, + pub logs_dir_path: Option, + pub buck_out_path: AbsNormPathBuf, /// Whether Buck is running in paranoid mode. - is_paranoid_mode: bool, + pub is_paranoid_mode: bool, } impl RemoteExecutionConfig { async fn connect_now(&self) -> anyhow::Result { - RemoteExecutionClient::new_retry( - self.fb, - self.skip_remote_cache, - self.connection_retries, - self.static_metadata.dupe(), - self.logs_dir_path.as_deref(), - &self.buck_out_path, - self.is_paranoid_mode, - ) - .await + RemoteExecutionClient::new_retry(&self).await } } @@ -115,7 +107,7 @@ pub trait ReConnectionObserver: Allocative + 'static + Send + Sync { #[derive(Allocative)] struct LazyRemoteExecutionClient { - client: AsyncOnceCell>, + client: AsyncOnceCell>, observers: Mutex>>, config: RemoteExecutionConfig, } @@ -155,7 +147,7 @@ impl LazyRemoteExecutionClient { } } - async fn init(&self) -> SharedResult { + async fn init(&self) -> buck2_error::Result { let client = self.config.connect_now().await?; let mut observers = self.observers.lock().unwrap(); @@ -252,6 +244,11 @@ impl ReConnectionManager { ..Default::default() }; + res.upload_stats + .fill_from_re_client_metrics(&client_stats.upload_storage_stats); + res.download_stats + .fill_from_re_client_metrics(&client_stats.download_storage_stats); + // The rest of the fields are known to be their default value if we don't have a client, so // we ask the client to fill them iff we have one. let conn = self.data.read().unwrap().upgrade(); @@ -278,7 +275,6 @@ pub struct ReConnectionHandle { // TODO(cjhopman): While we vend out Weak with this, due to the way this is stored/used on // the dice graph we are guaranteed that there's an Arc that outlives all the Weak from it. That // kind of defeats the purpose of this. - #[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_arc_on_dupe))] connection: Arc>, // We use a similar Arc/Weak to ensure that the observer only lives as long as the connection // handle. Otherwise it would be possible to observer a session create attached to one command @@ -306,6 +302,7 @@ impl ReConnectionHandle { pub fn get_client(&self) -> ManagedRemoteExecutionClient { ManagedRemoteExecutionClient { data: Arc::downgrade(&self.connection), + re_use_case_override: None, } } } @@ -313,10 +310,15 @@ impl ReConnectionHandle { #[derive(Clone, Dupe)] pub struct ManagedRemoteExecutionClient { data: Weak>, + re_use_case_override: Option, } impl ManagedRemoteExecutionClient { - #[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_arc_on_dupe))] + pub fn with_re_use_case_override(mut self, use_case: Option) -> Self { + self.re_use_case_override = use_case; + self + } + fn lock(&self) -> anyhow::Result>> { self.data .upgrade() @@ -328,6 +330,7 @@ impl ManagedRemoteExecutionClient { action_digest: ActionDigest, use_case: RemoteExecutorUseCase, ) -> anyhow::Result> { + let use_case = self.re_use_case_override.unwrap_or(use_case); Ok(self .lock()? .get() @@ -346,8 +349,10 @@ impl ManagedRemoteExecutionClient { dir_path: &ProjectRelativePath, input_dir: &ActionImmutableDirectory, use_case: RemoteExecutorUseCase, + identity: Option<&ReActionIdentity<'_>>, digest_config: DigestConfig, ) -> anyhow::Result { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? @@ -358,6 +363,7 @@ impl ManagedRemoteExecutionClient { dir_path, input_dir, use_case, + identity, digest_config, ) .await @@ -370,6 +376,7 @@ impl ManagedRemoteExecutionClient { inlined_blobs_with_digest: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result<()> { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? @@ -382,30 +389,35 @@ impl ManagedRemoteExecutionClient { .await } - pub async fn execute( + pub async fn execute<'a>( &self, action_digest: ActionDigest, platform: &RE::Platform, + dependencies: impl IntoIterator, use_case: RemoteExecutorUseCase, identity: &ReActionIdentity<'_>, manager: &mut CommandExecutionManager, skip_cache_read: bool, skip_cache_write: bool, re_max_queue_time: Option, + re_resource_units: Option, knobs: &ExecutorGlobalKnobs, ) -> anyhow::Result { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? .execute( action_digest, platform, + dependencies, use_case, identity, manager, skip_cache_read, skip_cache_write, re_max_queue_time, + re_resource_units, knobs, ) .await @@ -416,6 +428,7 @@ impl ManagedRemoteExecutionClient { files: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result<()> { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? @@ -425,13 +438,15 @@ impl ManagedRemoteExecutionClient { pub async fn download_typed_blobs( &self, + identity: Option<&ReActionIdentity<'_>>, digests: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result> { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? - .download_typed_blobs(digests, use_case) + .download_typed_blobs(identity, digests, use_case) .await } @@ -440,6 +455,7 @@ impl ManagedRemoteExecutionClient { digest: &TDigest, use_case: RemoteExecutorUseCase, ) -> anyhow::Result> { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? @@ -452,6 +468,7 @@ impl ManagedRemoteExecutionClient { blob: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()?.get().await?.upload_blob(blob, use_case).await } @@ -460,6 +477,7 @@ impl ManagedRemoteExecutionClient { digests: Vec, use_case: RemoteExecutorUseCase, ) -> anyhow::Result)>> { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? @@ -467,20 +485,46 @@ impl ManagedRemoteExecutionClient { .await } - pub async fn write_action_result( + pub async fn extend_digest_ttl( &self, - digest: TDigest, - result: TActionResult2, + digests: Vec, + ttl: Duration, use_case: RemoteExecutorUseCase, - platform: &RE::Platform, - ) -> anyhow::Result { + ) -> anyhow::Result<()> { + let use_case = self.re_use_case_override.unwrap_or(use_case); self.lock()? .get() .await? - .write_action_result(digest, result, use_case, platform) + .extend_digest_ttl(digests, ttl, use_case) .await } + pub async fn write_action_result( + &self, + digest: ActionDigest, + result: TActionResult2, + use_case: RemoteExecutorUseCase, + platform: &RE::Platform, + ) -> anyhow::Result { + let use_case = self.re_use_case_override.unwrap_or(use_case); + if buck2_env_anyhow!( + "BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", + bool, + applicability = testing + )? { + Ok(WriteActionResultResponse { + actual_action_result: result, + ..Default::default() + }) + } else { + self.lock()? + .get() + .await? + .write_action_result(digest, result, use_case, platform) + .await + } + } + pub async fn get_session_id(&self) -> anyhow::Result { let session_id = self.lock()?.get().await?.get_session_id().to_owned(); Ok(session_id) @@ -489,6 +533,9 @@ impl ManagedRemoteExecutionClient { /// Construct a dummy ManagedRemoteExecutionClient that won't actually work. This is only /// remotely useful in tests. pub fn testing_new_dummy() -> Self { - Self { data: Weak::new() } + Self { + data: Weak::new(), + re_use_case_override: None, + } } } diff --git a/app/buck2_execute/src/re/metadata.rs b/app/buck2_execute/src/re/metadata.rs index a1ce3c260b54a..d014b1fad6f5d 100644 --- a/app/buck2_execute/src/re/metadata.rs +++ b/app/buck2_execute/src/re/metadata.rs @@ -8,16 +8,35 @@ */ use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; +use buck2_events::dispatch::get_dispatcher_opt; +use remote_execution::ActionHistoryInfo; +use remote_execution::BuckInfo; use remote_execution::RemoteExecutionMetadata; +use crate::re::action_identity::ReActionIdentity; + pub trait RemoteExecutionMetadataExt { - fn metadata(&self) -> RemoteExecutionMetadata; + fn metadata(&self, identity: Option<&ReActionIdentity>) -> RemoteExecutionMetadata; } impl RemoteExecutionMetadataExt for RemoteExecutorUseCase { - fn metadata(&self) -> RemoteExecutionMetadata { + fn metadata(&self, identity: Option<&ReActionIdentity>) -> RemoteExecutionMetadata { + let trace_id = match get_dispatcher_opt() { + Some(dispatcher) => dispatcher.trace_id().to_string(), + // See the FIXME added in D54396421 + None => String::new(), + }; RemoteExecutionMetadata { use_case_id: self.as_str().to_owned(), + buck_info: Some(BuckInfo { + build_id: trace_id, + ..Default::default() + }), + action_history_info: identity.map(|identity| ActionHistoryInfo { + action_key: identity.action_key.clone(), + disable_retry_on_oom: false, + ..Default::default() + }), ..Default::default() } } diff --git a/app/buck2_execute/src/re/mod.rs b/app/buck2_execute/src/re/mod.rs deleted file mode 100644 index 47c76ddbf4c55..0000000000000 --- a/app/buck2_execute/src/re/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod action_identity; -pub mod client; -pub mod convert; -pub mod manager; -pub mod metadata; -pub mod re_get_session_id; -pub mod remote_action_result; -mod stats; -pub mod streams; -pub mod uploader; diff --git a/app/buck2_execute/src/re/remote_action_result.rs b/app/buck2_execute/src/re/remote_action_result.rs index 7c471b7a9acfa..18fa891c21268 100644 --- a/app/buck2_execute/src/re/remote_action_result.rs +++ b/app/buck2_execute/src/re/remote_action_result.rs @@ -12,6 +12,7 @@ use std::time::SystemTime; use anyhow::Context as _; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_miniperf_proto::MiniperfCounter; use remote_execution::ActionResultResponse; use remote_execution::ExecuteResponse; @@ -20,6 +21,7 @@ use remote_execution::TExecutedActionMetadata; use remote_execution::TFile; use remote_execution::TPerfCount; use remote_execution::TSubsysPerfCount; +use remote_execution::TSymlink; use remote_execution::TTimestamp; use crate::digest_config::DigestConfig; @@ -29,14 +31,22 @@ use crate::execute::result::CommandExecutionMetadata; use crate::re::manager::ManagedRemoteExecutionClient; use crate::re::streams::RemoteCommandStdStreams; -pub struct RemoteDepFileResult(pub ActionResultResponse); +pub struct ActionCacheResult(pub ActionResultResponse, pub buck2_data::CacheType); pub trait RemoteActionResult: Send + Sync { fn output_files(&self) -> &[TFile]; fn output_directories(&self) -> &[TDirectory2]; + fn output_symlinks(&self) -> &[TSymlink]; fn execution_kind(&self, details: RemoteCommandExecutionDetails) -> CommandExecutionKind; + /// This is only called after we inspect the action result, and the exit code is not 0 + fn execution_kind_with_materialized_inputs_for_failed( + &self, + details: RemoteCommandExecutionDetails, + materialized_inputs_for_failed: Option>, + ) -> CommandExecutionKind; + fn timing(&self) -> CommandExecutionMetadata; fn std_streams( @@ -59,7 +69,19 @@ impl RemoteActionResult for ExecuteResponse { &self.action_result.output_directories } + fn output_symlinks(&self) -> &[TSymlink] { + &self.action_result.output_symlinks + } + fn execution_kind(&self, details: RemoteCommandExecutionDetails) -> CommandExecutionKind { + self.execution_kind_with_materialized_inputs_for_failed(details, None) + } + + fn execution_kind_with_materialized_inputs_for_failed( + &self, + details: RemoteCommandExecutionDetails, + materialized_inputs_for_failed: Option>, + ) -> CommandExecutionKind { let meta = &self.action_result.execution_metadata; let queue_time = meta .last_queued_timestamp @@ -68,6 +90,7 @@ impl RemoteActionResult for ExecuteResponse { CommandExecutionKind::Remote { details, queue_time, + materialized_inputs_for_failed, } } @@ -89,86 +112,43 @@ impl RemoteActionResult for ExecuteResponse { } } -impl RemoteActionResult for Box { +impl RemoteActionResult for ActionCacheResult { fn output_files(&self) -> &[TFile] { - self.as_ref().output_files() + &self.0.action_result.output_files } fn output_directories(&self) -> &[TDirectory2] { - self.as_ref().output_directories() - } - - fn execution_kind(&self, details: RemoteCommandExecutionDetails) -> CommandExecutionKind { - self.as_ref().execution_kind(details) - } - - fn timing(&self) -> CommandExecutionMetadata { - self.as_ref().timing() - } - - fn std_streams( - &self, - client: &ManagedRemoteExecutionClient, - use_case: RemoteExecutorUseCase, - digest_config: DigestConfig, - ) -> RemoteCommandStdStreams { - self.as_ref().std_streams(client, use_case, digest_config) - } - - fn ttl(&self) -> i64 { - self.as_ref().ttl() - } -} - -impl RemoteActionResult for ActionResultResponse { - fn output_files(&self) -> &[TFile] { - &self.action_result.output_files + &self.0.action_result.output_directories } - fn output_directories(&self) -> &[TDirectory2] { - &self.action_result.output_directories + fn output_symlinks(&self) -> &[TSymlink] { + &self.0.action_result.output_symlinks } fn execution_kind(&self, details: RemoteCommandExecutionDetails) -> CommandExecutionKind { - CommandExecutionKind::ActionCache { details } - } - - fn timing(&self) -> CommandExecutionMetadata { - let mut timing = timing_from_re_metadata(&self.action_result.execution_metadata); - timing.wall_time = Duration::ZERO; // This was a cache hit so we didn't wait. - timing.input_materialization_duration = Duration::ZERO; // This was a cache hit so we didn't wait. - timing + match self.1 { + buck2_data::CacheType::ActionCache => CommandExecutionKind::ActionCache { details }, + buck2_data::CacheType::RemoteDepFileCache => { + CommandExecutionKind::RemoteDepFileCache { details } + } + } } - fn std_streams( + fn execution_kind_with_materialized_inputs_for_failed( &self, - client: &ManagedRemoteExecutionClient, - use_case: RemoteExecutorUseCase, - digest_config: DigestConfig, - ) -> RemoteCommandStdStreams { - RemoteCommandStdStreams::new(&self.action_result, client, use_case, digest_config) - } - - fn ttl(&self) -> i64 { - self.ttl - } -} - -impl RemoteActionResult for RemoteDepFileResult { - fn output_files(&self) -> &[TFile] { - self.0.output_files() - } - - fn output_directories(&self) -> &[TDirectory2] { - self.0.output_directories() - } - - fn execution_kind(&self, details: RemoteCommandExecutionDetails) -> CommandExecutionKind { - CommandExecutionKind::RemoteDepFileCache { details } + details: RemoteCommandExecutionDetails, + _materialized_inputs_for_failed: Option>, + ) -> CommandExecutionKind { + self.execution_kind(details) } fn timing(&self) -> CommandExecutionMetadata { - self.0.timing() + let mut timing = timing_from_re_metadata(&self.0.action_result.execution_metadata); + // This was a cache hit so we didn't wait at all + timing.wall_time = Duration::ZERO; + timing.input_materialization_duration = Duration::ZERO; + timing.queue_duration = None; + timing } fn std_streams( @@ -177,11 +157,11 @@ impl RemoteActionResult for RemoteDepFileResult { use_case: RemoteExecutorUseCase, digest_config: DigestConfig, ) -> RemoteCommandStdStreams { - self.0.std_streams(client, use_case, digest_config) + RemoteCommandStdStreams::new(&self.0.action_result, client, use_case, digest_config) } fn ttl(&self) -> i64 { - self.0.ttl() + self.0.ttl } } @@ -207,6 +187,10 @@ fn timing_from_re_metadata(meta: &TExecutedActionMetadata) -> CommandExecutionMe .input_fetch_completed_timestamp .saturating_duration_since(&meta.input_fetch_start_timestamp); + let queue_duration = meta + .worker_start_timestamp + .saturating_duration_since(&meta.queued_timestamp); + CommandExecutionMetadata { wall_time: execution_time, execution_time, @@ -214,17 +198,23 @@ fn timing_from_re_metadata(meta: &TExecutedActionMetadata) -> CommandExecutionMe execution_stats, input_materialization_duration: fetch_input_time, hashing_duration: Duration::ZERO, + hashed_artifacts_count: 0, + queue_duration: Some(queue_duration), } } fn convert_perf_counts( perf_counts: &TPerfCount, ) -> anyhow::Result { - Ok(buck2_data::CommandExecutionStats { - cpu_instructions_user: convert_perf_count(&perf_counts.userspace_events)? - .map(|p| p.adjusted_count()), - cpu_instructions_kernel: convert_perf_count(&perf_counts.kernel_events)? - .map(|p| p.adjusted_count()), + Ok({ + let userspace_counter = convert_perf_count(&perf_counts.userspace_events)?; + let kernel_counter = convert_perf_count(&perf_counts.kernel_events)?; + buck2_data::CommandExecutionStats { + cpu_instructions_user: userspace_counter.map(|p| p.adjusted_count()), + cpu_instructions_kernel: kernel_counter.map(|p| p.adjusted_count()), + userspace_events: userspace_counter.map(|p| p.to_proto()), + kernel_events: kernel_counter.map(|p| p.to_proto()), + } }) } diff --git a/app/buck2_execute/src/re/stats.rs b/app/buck2_execute/src/re/stats.rs index de758db62547d..85b32bf207ed8 100644 --- a/app/buck2_execute/src/re/stats.rs +++ b/app/buck2_execute/src/re/stats.rs @@ -37,6 +37,11 @@ pub struct RemoteExecutionClientStats { pub uploaded: u64, /// In bytes. pub downloaded: u64, + + pub upload_stats: PerBackendRemoteExecutionClientStats, + pub download_stats: PerBackendRemoteExecutionClientStats, + + // Per per-operation stats tracked below. pub uploads: RemoteExecutionClientOpStats, pub downloads: RemoteExecutionClientOpStats, pub action_cache: RemoteExecutionClientOpStats, @@ -72,3 +77,41 @@ impl OpStats { }) } } + +#[derive(Default)] +pub struct PerBackendRemoteExecutionClientStats { + pub zdb: BackendStats, + pub zgateway: BackendStats, + pub manifold: BackendStats, + pub hedwig: BackendStats, +} + +#[derive(Default)] +pub struct BackendStats { + pub queries: u64, + pub bytes: u64, +} + +impl PerBackendRemoteExecutionClientStats { + pub fn fill_from_re_client_metrics(&mut self, metrics: &remote_execution::TStorageStats) { + #[cfg(fbcode_build)] + { + for (typ, re_stats) in metrics.per_backend_stats.iter() { + let stats = match *typ { + remote_execution::TStorageBackendType::ZDB => &mut self.zdb, + remote_execution::TStorageBackendType::ZGATEWAY => &mut self.zgateway, + remote_execution::TStorageBackendType::MANIFOLD => &mut self.manifold, + remote_execution::TStorageBackendType::HEDWIG => &mut self.hedwig, + _ => continue, + }; + stats.queries = re_stats.queries_count as _; + stats.bytes = re_stats.bytes as _; + } + } + + #[cfg(not(fbcode_build))] + { + let _unused = metrics; + } + } +} diff --git a/app/buck2_execute/src/re/uploader.rs b/app/buck2_execute/src/re/uploader.rs index 08dcd6613aad2..cf7088a19130a 100644 --- a/app/buck2_execute/src/re/uploader.rs +++ b/app/buck2_execute/src/re/uploader.rs @@ -7,7 +7,9 @@ * of this source tree. */ +use std::collections::HashMap; use std::collections::HashSet; +use std::path::Path; use std::str::FromStr; use std::sync::Arc; @@ -16,14 +18,18 @@ use buck2_common::cas_digest::TrackedCasDigest; use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::FileDigestKind; use buck2_common::file_ops::TrackedFileDigest; -use buck2_core::directory::DirectoryEntry; -use buck2_core::directory::DirectoryIterator; -use buck2_core::directory::FingerprintedDirectory; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::soft_error; +use buck2_data::ReUploadMetrics; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::directory_iterator::DirectoryIteratorPathStack; +use buck2_directory::directory::directory_ref::FingerprintedDirectoryRef; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::fingerprinted_directory::FingerprintedDirectory; use chrono::Duration; use chrono::Utc; use futures::FutureExt; @@ -41,19 +47,20 @@ use crate::digest::CasDigestFromReExt; use crate::digest::CasDigestToReExt; use crate::digest_config::DigestConfig; use crate::directory::ActionDirectoryMember; -use crate::directory::ActionFingerprintedDirectory; +use crate::directory::ActionFingerprintedDirectoryRef; use crate::directory::ActionImmutableDirectory; use crate::directory::ReDirectorySerializer; use crate::execute::blobs::ActionBlobs; use crate::materialize::materializer::ArtifactNotMaterializedReason; use crate::materialize::materializer::CasDownloadInfo; use crate::materialize::materializer::Materializer; +use crate::re::action_identity::ReActionIdentity; use crate::re::metadata::RemoteExecutionMetadataExt; #[derive(Clone, Debug, Default)] pub struct UploadStats { - pub bytes_uploaded: u64, - pub digests_uploaded: u64, + pub total: ReUploadMetrics, + pub by_extension: HashMap, } pub struct Uploader {} @@ -64,6 +71,7 @@ impl Uploader { input_dir: &'a ActionImmutableDirectory, blobs: &'a ActionBlobs, use_case: &RemoteExecutorUseCase, + identity: Option<&ReActionIdentity<'_>>, digest_config: DigestConfig, ) -> anyhow::Result<( Vec, @@ -82,9 +90,9 @@ impl Uploader { let mut input_digests = blobs.keys().collect::>(); let digest_ttls = { // Collect the digests we need to upload - for entry in input_dir.fingerprinted_unordered_walk().without_paths() { + for entry in input_dir.unordered_walk().without_paths() { let digest = match entry { - DirectoryEntry::Dir(d) => d.fingerprint(), + DirectoryEntry::Dir(d) => d.as_fingerprinted_dyn().fingerprint(), DirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => &f.digest, DirectoryEntry::Leaf(..) => continue, }; @@ -105,7 +113,7 @@ impl Uploader { ..Default::default() }; client - .get_digests_ttl(use_case.metadata(), request) + .get_digests_ttl(use_case.metadata(identity), request) .boxed() .await? .digests_with_ttl @@ -144,7 +152,7 @@ impl Uploader { match blobs.get(digest) { Some(blob) => { upload_blobs.push(InlinedBlobWithDigest { - blob: blob.clone(), + blob: blob.clone().0, digest: digest.to_re(), ..Default::default() }); @@ -171,10 +179,12 @@ impl Uploader { input_dir: &ActionImmutableDirectory, blobs: &ActionBlobs, use_case: RemoteExecutorUseCase, + identity: Option<&ReActionIdentity<'_>>, digest_config: DigestConfig, ) -> anyhow::Result { let (mut upload_blobs, mut missing_digests) = - Self::find_missing(client, input_dir, blobs, &use_case, digest_config).await?; + Self::find_missing(client, input_dir, blobs, &use_case, identity, digest_config) + .await?; if upload_blobs.is_empty() && missing_digests.is_empty() { return Ok(UploadStats::default()); @@ -191,10 +201,10 @@ impl Uploader { let mut upload_file_digests = Vec::new(); { - let mut walk = input_dir.fingerprinted_unordered_walk(); + let mut walk = input_dir.unordered_walk(); while let Some((path, entry)) = walk.next() { let digest = match entry { - DirectoryEntry::Dir(d) => d.fingerprint(), + DirectoryEntry::Dir(d) => d.as_fingerprinted_dyn().fingerprint(), DirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => &f.digest, DirectoryEntry::Leaf(..) => continue, }; @@ -217,7 +227,7 @@ impl Uploader { } if missing_digests.remove(input_dir.fingerprint()) { - upload_blobs.push(directory_to_blob(input_dir)); + upload_blobs.push(directory_to_blob(input_dir.as_fingerprinted_ref())); } assert!( @@ -266,7 +276,7 @@ impl Uploader { "{} missing (origin: {})", file.digest, info.origin.as_display_for_not_found(), - ), + ).into(), daemon_in_memory_state_is_corrupted: true, action_cache_is_corrupted: info.origin.guaranteed_by_action_cache() )?; @@ -288,7 +298,7 @@ impl Uploader { file.digest, file.digest.expires(), err - ), + ).into(), quiet: true )?; @@ -327,13 +337,18 @@ impl Uploader { // Compute stats of digests we're about to upload so we can report them // to the span end event of this stage of execution. let stats = { - let named_digest_byte_count: u64 = upload_files - .iter() - .map(|nd| { - let byte_count: u64 = nd.digest.size_in_bytes.try_into().unwrap_or_default(); - byte_count - }) - .sum(); + let mut stats_by_extension = HashMap::new(); + let mut named_digest_byte_count: u64 = 0; + for nd in &upload_files { + // Aggregate metrics by file extension. + let byte_count: u64 = nd.digest.size_in_bytes.try_into().unwrap_or_default(); + let extension = extract_file_extension(&nd.name); + let ext_stats: &mut ReUploadMetrics = + stats_by_extension.entry(extension).or_default(); + ext_stats.digests_uploaded += 1; + ext_stats.bytes_uploaded += byte_count; + named_digest_byte_count += byte_count; + } let blob_byte_count: u64 = upload_blobs .iter() .map(|blob| { @@ -343,16 +358,19 @@ impl Uploader { .sum(); UploadStats { - digests_uploaded: (upload_files.len() + upload_blobs.len()) as u64, - bytes_uploaded: named_digest_byte_count + blob_byte_count, + total: ReUploadMetrics { + digests_uploaded: (upload_files.len() + upload_blobs.len()) as u64, + bytes_uploaded: named_digest_byte_count + blob_byte_count, + }, + by_extension: stats_by_extension, } }; // Upload - let upload_res = if !upload_files.is_empty() || !upload_blobs.is_empty() { + if !upload_files.is_empty() || !upload_blobs.is_empty() { client .upload( - use_case.metadata(), + use_case.metadata(identity), UploadRequest { files_with_digest: Some(upload_files), inlined_blobs_with_digest: Some(upload_blobs), @@ -364,25 +382,17 @@ impl Uploader { ) .boxed() .await - .map(|_| ()) - } else { - Ok(()) + .map_err(|e| match e.downcast_ref::() { + Some(re_client_error) if re_client_error.code == TCode::INVALID_ARGUMENT => + anyhow::anyhow!( + "RE Upload failed. It looks like you might have modified files while the build \ + was in progress. Retry your build to proceed. Debug information: {:#}", + e + ), + _ => e, + })?; }; - if let Err(e) = upload_res.as_ref() { - if let Some(re_client_error) = e.downcast_ref::() { - if re_client_error.code == TCode::INVALID_ARGUMENT { - return Err(anyhow::anyhow!( - "RE Upload failed. It looks like you might have modified files while the build \ - was in progress. Retry your build to proceed. Debug information: {:#}", - e - )); - } - } - } - - upload_res.context("RE: upload")?; - Ok(stats) } } @@ -404,13 +414,13 @@ fn should_error_for_missing_digest(info: &CasDownloadInfo) -> bool { } } -fn directory_to_blob(d: &D) -> InlinedBlobWithDigest +fn directory_to_blob<'a, D>(d: D) -> InlinedBlobWithDigest where - D: ActionFingerprintedDirectory + ?Sized, + D: ActionFingerprintedDirectoryRef<'a>, { InlinedBlobWithDigest { - digest: d.fingerprint().to_re(), - blob: ReDirectorySerializer::serialize_entries(d.fingerprinted_entries()), + digest: d.as_fingerprinted_dyn().fingerprint().to_re(), + blob: ReDirectorySerializer::serialize_entries(d.entries()), ..Default::default() } } @@ -446,10 +456,13 @@ fn add_injected_missing_digests<'a>( .collect() } - static INJECTED_DIGESTS: EnvHelper> = - EnvHelper::with_converter("BUCK2_TEST_INJECTED_MISSING_DIGESTS", convert_digests); - - if let Some(digests) = INJECTED_DIGESTS.get()? { + let ingested_digests = buck2_env_anyhow!( + "BUCK2_TEST_INJECTED_MISSING_DIGESTS", + type=Vec, + converter=convert_digests, + applicability=testing + )?; + if let Some(digests) = ingested_digests { for d in digests { if let Some(i) = input_digests.get(d) { missing_digests.insert(i); @@ -459,3 +472,11 @@ fn add_injected_missing_digests<'a>( Ok(()) } + +fn extract_file_extension(path: &str) -> String { + let path = Path::new(path); + match path.extension() { + Some(ext) => ext.to_string_lossy().to_lowercase(), + None => "".to_owned(), + } +} diff --git a/app/buck2_execute_impl/BUCK b/app/buck2_execute_impl/BUCK index 41bdc23291aac..45cbf9def8a06 100644 --- a/app/buck2_execute_impl/BUCK +++ b/app/buck2_execute_impl/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -8,6 +7,22 @@ rust_library( srcs = glob( ["src/**/*.rs"], ), + os_deps = [ + ( + "linux", + [ + "//buck2/app/buck2_forkserver_proto:buck2_forkserver_proto", + # @oss-disable: "//common/rust/shed/hostcaps:hostcaps", + # @oss-disable: "//justknobs/rust:justknobs", + ], + ), + ( + "macos", + [ + "//buck2/app/buck2_forkserver_proto:buck2_forkserver_proto", + ], + ), + ], test_deps = [ "fbsource//third-party/rust:assert_matches", ], @@ -16,6 +31,7 @@ rust_library( "fbsource//third-party/rust:async-condvar-fair", "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:chrono", + "fbsource//third-party/rust:dashmap", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:futures", @@ -26,8 +42,8 @@ rust_library( "fbsource//third-party/rust:parking_lot", "fbsource//third-party/rust:pin-project", "fbsource//third-party/rust:prost", + "fbsource//third-party/rust:regex", "fbsource//third-party/rust:rusqlite", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tokio-stream", "fbsource//third-party/rust:tonic", @@ -39,10 +55,13 @@ rust_library( "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_directory:buck2_directory", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_forkserver:buck2_forkserver", - "//buck2/app/buck2_forkserver_proto:buck2_forkserver_proto", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/app/buck2_http:buck2_http", "//buck2/app/buck2_util:buck2_util", "//buck2/app/buck2_worker_proto:buck2_worker_proto", "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", @@ -50,6 +69,5 @@ rust_library( "//buck2/gazebo/gazebo:gazebo", "//buck2/host_sharing:host_sharing", "//buck2/remote_execution:remote_execution", - "//buck2/shed/more_futures:more_futures", ], ) diff --git a/app/buck2_execute_impl/Cargo.toml b/app/buck2_execute_impl/Cargo.toml index 7f80c48a7d128..8cbf57159c225 100644 --- a/app/buck2_execute_impl/Cargo.toml +++ b/app/buck2_execute_impl/Cargo.toml @@ -1,52 +1,61 @@ [package] +description = "Implementations of executors and materializers" +edition = "2021" +license = { workspace = true } name = "buck2_execute_impl" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Implementations of executors and materializers" [dependencies] anyhow = { workspace = true } async-condvar-fair = { workspace = true } async-trait = { workspace = true } chrono = { workspace = true } +dashmap = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } -faccess = { workspace = true } +dupe = { workspace = true } futures = { workspace = true } +gazebo = { workspace = true } +host_sharing = { workspace = true } indexmap = { workspace = true } -pin-project = { workspace = true } itertools = { workspace = true } once_cell = { workspace = true } parking_lot = { workspace = true } +pin-project = { workspace = true } prost = { workspace = true } +regex = { workspace = true } +remote_execution = { workspace = true } rusqlite = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } zstd = { workspace = true } -hostname = { workspace = true } -gazebo = { workspace = true } -dupe = { workspace = true } -host_sharing = { workspace = true } -more_futures = { workspace = true } -remote_execution = { workspace = true } allocative = { workspace = true } buck2_action_metadata_proto = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } -buck2_cli_proto = { workspace = true } buck2_data = { workspace = true } +buck2_directory = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_execute = { workspace = true } buck2_forkserver = { workspace = true } -buck2_forkserver_proto = { workspace = true } +buck2_futures = { workspace = true } +buck2_http = { workspace = true } buck2_util = { workspace = true } buck2_worker_proto = { workspace = true } buck2_wrapper_common = { workspace = true } +[target.'cfg(unix)'.dependencies] +buck2_forkserver_proto = { workspace = true } + [dev-dependencies] assert_matches = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_execute_impl/src/executors.rs b/app/buck2_execute_impl/src/executors.rs new file mode 100644 index 0000000000000..033c1b8f872e0 --- /dev/null +++ b/app/buck2_execute_impl/src/executors.rs @@ -0,0 +1,19 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod action_cache; +pub mod action_cache_upload_permission_checker; +pub mod caching; +pub(crate) mod empty_action_result; +pub mod hybrid; +pub mod local; +pub mod re; +pub mod stacked; +pub mod to_re_platform; +pub mod worker; diff --git a/app/buck2_execute_impl/src/executors/action_cache.rs b/app/buck2_execute_impl/src/executors/action_cache.rs index c8a085398894f..4f1b88d9620d8 100644 --- a/app/buck2_execute_impl/src/executors/action_cache.rs +++ b/app/buck2_execute_impl/src/executors/action_cache.rs @@ -20,6 +20,7 @@ use buck2_execute::execute::action_digest::ActionDigest; use buck2_execute::execute::action_digest::ActionDigestKind; use buck2_execute::execute::dep_file_digest::DepFileDigest; use buck2_execute::execute::executor_stage_async; +use buck2_execute::execute::kind::CommandExecutionKind; use buck2_execute::execute::kind::RemoteCommandExecutionDetails; use buck2_execute::execute::manager::CommandExecutionManager; use buck2_execute::execute::manager::CommandExecutionManagerExt; @@ -30,10 +31,9 @@ use buck2_execute::knobs::ExecutorGlobalKnobs; use buck2_execute::materialize::materializer::Materializer; use buck2_execute::re::action_identity::ReActionIdentity; use buck2_execute::re::manager::ManagedRemoteExecutionClient; -use buck2_execute::re::remote_action_result::RemoteActionResult; -use buck2_execute::re::remote_action_result::RemoteDepFileResult; +use buck2_execute::re::remote_action_result::ActionCacheResult; +use buck2_futures::cancellation::CancellationContext; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use prost::Message; use crate::re::download::download_action_results; @@ -81,10 +81,10 @@ async fn query_action_cache_and_download_result( cancellations: &CancellationContext<'_>, upload_all_actions: bool, log_action_keys: bool, + details: RemoteCommandExecutionDetails, ) -> ControlFlow { let request = command.request; - let platform = &command.prepared_action.platform; - let action_blobs = &command.prepared_action.blobs; + let action_blobs = &command.prepared_action.action_and_blobs.blobs; let digest_config = command.digest_config; let digest = match &cache_type { @@ -101,6 +101,7 @@ async fn query_action_cache_and_download_result( ) .await; + let identity = None; // TODO(#503): implement this if upload_all_actions { match re_client .upload( @@ -110,6 +111,7 @@ async fn query_action_cache_and_download_result( ProjectRelativePath::empty(), request.paths().input_directory(), re_use_case, + identity, digest_config, ) .await @@ -131,47 +133,37 @@ async fn query_action_cache_and_download_result( let action_exit_code = response.action_result.exit_code; - // Select the RemoteActionResult type so that we set the CommandExecutionKind properly. - let (response, dep_file_metadata): (Box, Option) = - match &cache_type { - CacheType::ActionCache => (Box::new(response) as _, None), - CacheType::RemoteDepFileCache(_) => { - let metadata = response - .action_result - .execution_metadata - .auxiliary_metadata - .iter() - .find(|k| k.type_url == REMOTE_DEP_FILE_KEY); + let dep_file_metadata: Option = match &cache_type { + CacheType::ActionCache => None, + CacheType::RemoteDepFileCache(_) => { + let metadata = response + .action_result + .execution_metadata + .auxiliary_metadata + .iter() + .find(|k| k.type_url == REMOTE_DEP_FILE_KEY); - if metadata.is_none() { - // No entry found - return ControlFlow::Continue(manager); - } - let dep_file_entry = match RemoteDepFile::decode(metadata.unwrap().value.as_slice()) - { - Ok(entry) => entry, - Err(e) => { - return ControlFlow::Break(manager.error("remote_dep_file", e)); - } - }; - ( - Box::new(RemoteDepFileResult(response)) as _, - Some(dep_file_entry), - ) + if metadata.is_none() { + // No entry found + return ControlFlow::Continue(manager); } - }; - - let action_key = if log_action_keys { - let identity = ReActionIdentity::new( - command.target, - re_action_key.as_deref(), - command.request.paths(), - ); - Some(identity.action_key) - } else { - None + let dep_file_entry = match RemoteDepFile::decode(metadata.unwrap().value.as_slice()) { + Ok(entry) => entry, + Err(e) => { + return ControlFlow::Break(manager.error("remote_dep_file", e)); + } + }; + Some(dep_file_entry) + } }; + let identity = ReActionIdentity::new( + command.target, + re_action_key.as_deref(), + command.request.paths(), + ); + + let response = ActionCacheResult(response, cache_type.to_proto()); let res = download_action_results( request, materializer.as_ref(), @@ -179,24 +171,26 @@ async fn query_action_cache_and_download_result( re_use_case, digest_config, manager, + &identity, buck2_data::CacheHit { action_digest: digest.to_string(), - action_key, + action_key: if log_action_keys { + Some(identity.action_key.clone()) + } else { + None + }, + cache_type: cache_type.to_proto().into(), } .into(), request.paths(), request.outputs(), - RemoteCommandExecutionDetails { - action_digest: digest.dupe(), - session_id: re_client.get_session_id().await.ok(), - use_case: re_use_case, - platform: platform.clone(), - remote_dep_file_key: request.remote_dep_file_key().clone(), - }, + details, &response, paranoid.as_ref(), cancellations, action_exit_code, + artifact_fs, + false, ) .await; @@ -215,6 +209,7 @@ async fn query_action_cache_and_download_result( command.request.all_args_str(), action_digest, ); + res.action_result = Some(response.0.action_result); } } @@ -229,9 +224,21 @@ impl PreparedCommandOptionalExecutor for ActionCacheChecker { manager: CommandExecutionManager, cancellations: &CancellationContext, ) -> ControlFlow { - let action_digest = &command.prepared_action.action; + let action_digest = &command.prepared_action.action_and_blobs.action; + let details = RemoteCommandExecutionDetails::new( + action_digest.dupe(), + *command.request.remote_dep_file_key(), + self.re_client.get_session_id().await.ok(), + self.re_use_case, + &command.prepared_action.platform, + ); + let cache_type = CacheType::ActionCache; + let manager = manager.with_execution_kind(command_execution_kind_for_cache_type( + &cache_type, + details.clone(), + )); let result = query_action_cache_and_download_result( - CacheType::ActionCache, + cache_type, &self.artifact_fs, &self.materializer, &self.re_client, @@ -244,6 +251,7 @@ impl PreparedCommandOptionalExecutor for ActionCacheChecker { cancellations, self.upload_all_actions, self.knobs.log_action_keys, + details, ) .await; @@ -286,23 +294,46 @@ impl PreparedCommandOptionalExecutor for RemoteDepFileCacheChecker { Some(key) => key.dupe(), }; - let action_digest = &command.prepared_action.action; + let cache_type = CacheType::RemoteDepFileCache(remote_dep_file_key); + let action_digest = remote_dep_file_key.dupe().coerce::(); + let details = RemoteCommandExecutionDetails::new( + action_digest.dupe(), + Some(remote_dep_file_key.dupe()), + self.re_client.get_session_id().await.ok(), + self.re_use_case, + &command.prepared_action.platform, + ); + let manager = manager.with_execution_kind(command_execution_kind_for_cache_type( + &cache_type, + details.clone(), + )); query_action_cache_and_download_result( - CacheType::RemoteDepFileCache(remote_dep_file_key), + cache_type, &self.artifact_fs, &self.materializer, &self.re_client, self.re_use_case, &self.re_action_key, &self.paranoid, - action_digest, + &action_digest, command, manager, cancellations, self.upload_all_actions, self.knobs.log_action_keys, + details, ) .await } } + +fn command_execution_kind_for_cache_type( + cache_type: &CacheType, + details: RemoteCommandExecutionDetails, +) -> CommandExecutionKind { + match cache_type { + CacheType::ActionCache => CommandExecutionKind::ActionCache { details }, + CacheType::RemoteDepFileCache(_) => CommandExecutionKind::RemoteDepFileCache { details }, + } +} diff --git a/app/buck2_execute_impl/src/executors/action_cache_upload_permission_checker.rs b/app/buck2_execute_impl/src/executors/action_cache_upload_permission_checker.rs new file mode 100644 index 0000000000000..13ef8a5b597ee --- /dev/null +++ b/app/buck2_execute_impl/src/executors/action_cache_upload_permission_checker.rs @@ -0,0 +1,122 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; +use std::sync::Arc; + +use anyhow::Context; +use buck2_core::async_once_cell::AsyncOnceCell; +use buck2_core::execution_types::executor_config::RePlatformFields; +use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; +use buck2_execute::re::error::RemoteExecutionError; +use buck2_execute::re::manager::ManagedRemoteExecutionClient; +use dashmap::DashMap; +use dupe::Dupe; +use remote_execution::TCode; + +use crate::executors::empty_action_result::empty_action_result; +use crate::executors::to_re_platform::RePlatformFieldsToRePlatform; + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +struct CacheKey { + re_use_case: RemoteExecutorUseCase, + platform: RePlatformFields, +} + +struct CacheValue { + has_permission_to_upload_to_cache: AsyncOnceCell>, +} + +/// Check permission to upload to action cache and cache result. +pub struct ActionCacheUploadPermissionChecker { + re_client: ManagedRemoteExecutionClient, + /// Permission check does not depend on RE use case, + /// but since we use these to upload, it is safer to cache the result by them. + has_permission_to_upload_to_cache: DashMap>, +} + +impl ActionCacheUploadPermissionChecker { + pub fn new(re_client: ManagedRemoteExecutionClient) -> ActionCacheUploadPermissionChecker { + ActionCacheUploadPermissionChecker { + re_client, + has_permission_to_upload_to_cache: DashMap::new(), + } + } + + async fn do_has_permission_to_upload_to_cache( + &self, + re_use_case: RemoteExecutorUseCase, + platform: &RePlatformFields, + ) -> anyhow::Result> { + let (action, action_result) = empty_action_result(platform)?; + + // This is CAS upload, if it fails, something is very broken. + self.re_client + .upload_files_and_directories( + Vec::new(), + Vec::new(), + action.blobs.to_inlined_blobs(), + re_use_case, + ) + .await?; + + // This operation requires permission to write. + let result = self + .re_client + .write_action_result( + action.action, + action_result.clone(), + re_use_case, + &platform.to_re_platform(), + ) + .await; + match result { + Ok(_) => Ok(Ok(())), + Err(e) => { + let e: buck2_error::Error = e.into(); + match e.find_typed_context::() { + Some(e) if e.code == TCode::PERMISSION_DENIED => Ok(Err(e.message.clone())), + _ => Err(e.into()), + } + } + } + } + + fn cache_value( + &self, + re_use_case: RemoteExecutorUseCase, + platform: &RePlatformFields, + ) -> Arc { + self.has_permission_to_upload_to_cache + .entry(CacheKey { + re_use_case, + platform: platform.clone(), + }) + .or_insert_with(|| { + Arc::new(CacheValue { + has_permission_to_upload_to_cache: AsyncOnceCell::new(), + }) + }) + .dupe() + } + + pub(crate) async fn has_permission_to_upload_to_cache( + &self, + re_use_case: RemoteExecutorUseCase, + platform: &RePlatformFields, + ) -> anyhow::Result> { + let cache_value = self.cache_value(re_use_case, platform); + cache_value + .has_permission_to_upload_to_cache + .get_or_try_init(self.do_has_permission_to_upload_to_cache(re_use_case, platform)) + .await + .cloned() + .context("Upload for permission check") + } +} diff --git a/app/buck2_execute_impl/src/executors/caching.rs b/app/buck2_execute_impl/src/executors/caching.rs index 06a30b9aba56e..0fb1baaf1b1d1 100644 --- a/app/buck2_execute_impl/src/executors/caching.rs +++ b/app/buck2_execute_impl/src/executors/caching.rs @@ -8,173 +8,121 @@ */ use std::fmt::Debug; -use std::fmt::Display; use std::sync::Arc; use std::time::SystemTime; use anyhow::Context as _; use async_trait::async_trait; use buck2_action_metadata_proto::REMOTE_DEP_FILE_KEY; -use buck2_core::directory::DirectoryEntry; -use buck2_core::env_helper::EnvHelper; +use buck2_common::file_ops::TrackedFileDigest; +use buck2_core::buck2_env_anyhow; +use buck2_core::execution_types::executor_config::RePlatformFields; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_directory::directory::entry::DirectoryEntry; use buck2_events::dispatch::span_async; use buck2_execute::digest::CasDigestToReExt; use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::directory_to_re_tree; use buck2_execute::directory::ActionDirectoryMember; -use buck2_execute::execute::action_digest::ActionDigest; +use buck2_execute::execute::action_digest_and_blobs::ActionDigestAndBlobs; use buck2_execute::execute::blobs::ActionBlobs; use buck2_execute::execute::cache_uploader::CacheUploadInfo; use buck2_execute::execute::cache_uploader::CacheUploadResult; -use buck2_execute::execute::cache_uploader::DepFileEntry; +use buck2_execute::execute::cache_uploader::IntoRemoteDepFile; use buck2_execute::execute::cache_uploader::UploadCache; use buck2_execute::execute::result::CommandExecutionResult; -use buck2_execute::execute::target::CommandExecutionTarget; -use buck2_execute::knobs::ExecutorGlobalKnobs; use buck2_execute::materialize::materializer::Materializer; +use buck2_execute::re::error::RemoteExecutionError; use buck2_execute::re::manager::ManagedRemoteExecutionClient; use derive_more::Display; use dupe::Dupe; use futures::future; use futures::future::FutureExt; +use gazebo::prelude::VecExt; use prost::Message; -use remote_execution as RE; use remote_execution::DigestWithStatus; use remote_execution::NamedDigest; -use remote_execution::REClientError; use remote_execution::TActionResult2; use remote_execution::TAny; use remote_execution::TCode; -use remote_execution::TDigest; use remote_execution::TDirectory2; use remote_execution::TExecutedActionMetadata; use remote_execution::TFile; use remote_execution::TStatus; use remote_execution::TTimestamp; +use crate::executors::action_cache_upload_permission_checker::ActionCacheUploadPermissionChecker; +use crate::executors::to_re_platform::RePlatformFieldsToRePlatform; + // Whether to throw errors when cache uploads fail (primarily for tests). -static ERROR_ON_CACHE_UPLOAD: EnvHelper = EnvHelper::new("BUCK2_TEST_ERROR_ON_CACHE_UPLOAD"); +fn error_on_cache_upload() -> anyhow::Result { + buck2_env_anyhow!( + "BUCK2_TEST_ERROR_ON_CACHE_UPLOAD", + bool, + applicability = testing + ) +} /// A PreparedCommandExecutor that will write to cache after invoking the inner executor pub struct CacheUploader { - pub artifact_fs: ArtifactFs, - pub materializer: Arc, - pub re_client: ManagedRemoteExecutionClient, - pub re_use_case: RemoteExecutorUseCase, - pub platform: RE::Platform, - pub knobs: ExecutorGlobalKnobs, - pub max_bytes: Option, + artifact_fs: ArtifactFs, + materializer: Arc, + re_client: ManagedRemoteExecutionClient, + re_use_case: RemoteExecutorUseCase, + platform: RePlatformFields, + max_bytes: Option, + cache_upload_permission_checker: Arc, } impl CacheUploader { - // Only return error on upload failure if we pass a flag - fn modify_upload_result( - digest: &dyn Display, - result: anyhow::Result, - error_on_cache_upload: bool, - ) -> anyhow::Result { - match result { - Err(e) => { - if error_on_cache_upload { - Err(e).context("cache_upload") - } else { - tracing::warn!("Cache upload for `{}` failed: {:#}", digest, e); - Ok(false) - } - } - _ => result, + pub fn new( + artifact_fs: ArtifactFs, + materializer: Arc, + re_client: ManagedRemoteExecutionClient, + re_use_case: RemoteExecutorUseCase, + platform: RePlatformFields, + max_bytes: Option, + cache_upload_permission_checker: Arc, + ) -> CacheUploader { + CacheUploader { + artifact_fs, + materializer, + re_client, + re_use_case, + platform, + max_bytes, + cache_upload_permission_checker, } } - async fn upload_action_result( - &self, - target: &dyn CommandExecutionTarget, - action_digest: &ActionDigest, - result: &CommandExecutionResult, - digest_config: DigestConfig, - error_on_cache_upload: bool, - ) -> anyhow::Result { - tracing::debug!("Uploading action result for `{}`", action_digest); - let result = self - .perform_cache_upload( - target, - result, - digest_config, - action_digest.to_re(), - vec![], - buck2_data::CacheUploadReason::LocalExecution, - ) - .await; - Self::modify_upload_result(action_digest, result, error_on_cache_upload) - } - - /// Upload an action result with additional information about dep files to the RE action cache. - /// The conditions for the upload are: the action must have been successful and produced a depfile - /// and cache uploads must have been enabled for this action. - async fn upload_dep_file_result( - &self, - action_digest: &ActionDigest, - target: &dyn CommandExecutionTarget, - result: &CommandExecutionResult, - digest_config: DigestConfig, - dep_file_entry: DepFileEntry, - error_on_cache_upload: bool, - ) -> anyhow::Result { - tracing::debug!( - "Uploading dep file entry for action `{}` with dep file key `{}`", - action_digest, - dep_file_entry.key - ); - let digest_re = dep_file_entry.key.to_re(); - let dep_file_tany = TAny { - type_url: REMOTE_DEP_FILE_KEY.to_owned(), - value: dep_file_entry.entry.encode_to_vec(), - ..Default::default() - }; - let result = self - .perform_cache_upload( - target, - result, - digest_config, - digest_re, - vec![dep_file_tany], - buck2_data::CacheUploadReason::DepFile, - ) - .await; - - Self::modify_upload_result(&dep_file_entry.key, result, error_on_cache_upload) - } - /// Upload an action result to the RE action cache, assuming conditions for the upload are met: /// the action must have been successful and must have run locally (not much point in caching /// something that ran on RE and is already cached), and cache uploads must be enabled for this particular action. /// The CacheUploader should only be used if cache uploads are enabled. - async fn perform_cache_upload( + async fn upload_local_outputs( &self, - target: &dyn CommandExecutionTarget, + info: &CacheUploadInfo<'_>, result: &CommandExecutionResult, - digest_config: DigestConfig, - digest: TDigest, - metadata: Vec, - reason: buck2_data::CacheUploadReason, - ) -> anyhow::Result { + action_digest_and_blobs: &ActionDigestAndBlobs, + error_on_cache_upload: bool, + has_depfile_entry: bool, + ) -> anyhow::Result { + let digest = action_digest_and_blobs.action; let digest_str = digest.to_string(); let output_bytes = result.calc_output_size_bytes(); span_async( buck2_data::CacheUploadStart { - key: Some(target.as_proto_action_key()), - name: Some(target.as_proto_action_name()), + key: Some(info.target.as_proto_action_key()), + name: Some(info.target.as_proto_action_name()), action_digest: digest_str.clone(), - reason: reason.into(), }, async { let mut file_digests = Vec::new(); let mut tree_digests = Vec::new(); - let res: std::result::Result = async { + let outcome = async { if let Some(max_bytes) = self.max_bytes { if output_bytes > max_bytes { return Ok(CacheUploadOutcome::Rejected( @@ -183,13 +131,29 @@ impl CacheUploader { } } + if let Err(rejected) = self.check_upload_permission().await? { + return Ok(rejected); + } + + // upload Action to CAS. + // This is necessary when writing to the ActionCache through CAS, since CAS needs to inspect the Action related to the ActionResult. + // Without storing the Action itself to CAS, ActionCache writes would fail. + self.re_client + .upload_files_and_directories( + vec![], + vec![], + action_digest_and_blobs.blobs.to_inlined_blobs(), + self.re_use_case, + ) + .await?; + + // upload ActionResult to ActionCache let result: TActionResult2 = match self .upload_files_and_directories( result, &mut file_digests, &mut tree_digests, - digest_config, - metadata, + info.digest_config, ) .await? { @@ -198,66 +162,159 @@ impl CacheUploader { } Ok(taction2) => taction2, }; + // Skip expensive clone if it's not needed + let result_for_dep_file = if has_depfile_entry { + Some(result.clone()) + } else { + None + }; self.re_client - .write_action_result(digest, result, self.re_use_case, &self.platform) + .write_action_result( + digest, + result, + self.re_use_case, + &self.platform.to_re_platform(), + ) .await?; - Ok(CacheUploadOutcome::Success) + Ok(CacheUploadOutcome::Success(result_for_dep_file)) } - .await; + .await + .map_err(|e: anyhow::Error| buck2_error::Error::from(e)) + .unwrap_or_else(CacheUploadOutcome::Failed); + + let cache_upload_end_event = buck2_data::CacheUploadEnd { + key: Some(info.target.as_proto_action_key()), + name: Some(info.target.as_proto_action_name()), + action_digest: digest_str.clone(), + success: outcome.uploaded(), + error: outcome.error(), + re_error_code: outcome.re_error_code(), + file_digests: file_digests.into_map(|d| d.to_string()), + tree_digests: tree_digests.into_map(|d| d.to_string()), + output_bytes: Some(output_bytes), + }; + ( + outcome.log_and_create_result(&digest_str, error_on_cache_upload), + Box::new(cache_upload_end_event), + ) + }, + ) + .await + } - let (success, error, re_error_code) = match &res { - Ok(CacheUploadOutcome::Success) => { - tracing::info!("Cache upload for `{}` succeeded", digest_str); - (true, String::new(), None) - } - Ok(CacheUploadOutcome::Rejected(reason)) => { - tracing::info!("Cache upload for `{}` rejected: {:#}", digest_str, reason); - (false, format!("Rejected: {}", reason), None) + /// Upload an action result with additional information about dep files to the RE action cache. + /// The conditions for the upload are: the action must have been successful and produced a depfile + /// and cache uploads must have been enabled for this action. + async fn upload_dep_file( + &self, + info: &CacheUploadInfo<'_>, + action_result: Option, + dep_file_bundle: &mut dyn IntoRemoteDepFile, + error_on_cache_upload: bool, + ) -> anyhow::Result { + let remote_dep_file_action = dep_file_bundle.remote_dep_file_action().clone(); + let remote_dep_file_key = remote_dep_file_action.action.to_string(); + span_async( + buck2_data::DepFileUploadStart { + key: Some(info.target.as_proto_action_key()), + name: Some(info.target.as_proto_action_name()), + remote_dep_file_key: remote_dep_file_key.clone(), + }, + async { + let outcome = async { + let mut action_result = action_result.ok_or( + DepFileReActionResultMissingError(remote_dep_file_key.clone()), + )?; + + if let Err(rejected) = self.check_upload_permission().await? { + return Ok(rejected); } - Err(e) => ( - false, - format!("{:#}", e), - e.downcast_ref::() - .map(|e| e.code.to_string()), - ), - }; + let remote_dep_file = dep_file_bundle + .make_remote_dep_file( + info.digest_config, + &self.artifact_fs, + self.materializer.as_ref(), + ) + .await?; + let digest = remote_dep_file_action.action; + let dep_file_tany = TAny { + type_url: REMOTE_DEP_FILE_KEY.to_owned(), + value: remote_dep_file.encode_to_vec(), + ..Default::default() + }; + action_result.execution_metadata.auxiliary_metadata = vec![dep_file_tany]; + + // upload Action to CAS. + // This is necessary when writing to the ActionCache through CAS, since CAS needs to inspect the Action related to the ActionResult. + // Without storing the Action itself to CAS, ActionCache writes would fail. + self.re_client + .upload_files_and_directories( + vec![], + vec![], + remote_dep_file_action.blobs.to_inlined_blobs(), + self.re_use_case, + ) + .await?; + + // upload ActionResult to ActionCache + self.re_client + .write_action_result( + digest, + action_result, + self.re_use_case, + &self.platform.to_re_platform(), + ) + .await?; + Ok(CacheUploadOutcome::Success(None)) + } + .await + .unwrap_or_else(CacheUploadOutcome::Failed); + + let end_event = buck2_data::DepFileUploadEnd { + key: Some(info.target.as_proto_action_key()), + name: Some(info.target.as_proto_action_name()), + remote_dep_file_key: remote_dep_file_key.clone(), + success: outcome.uploaded(), + error: outcome.error(), + re_error_code: outcome.re_error_code(), + }; ( - Ok(success), - Box::new(buck2_data::CacheUploadEnd { - key: Some(target.as_proto_action_key()), - name: Some(target.as_proto_action_name()), - action_digest: digest_str.clone(), - success, - error, - re_error_code, - file_digests, - tree_digests, - output_bytes: Some(output_bytes), - reason: reason.into(), - }), + outcome.log_and_create_result(&remote_dep_file_key, error_on_cache_upload), + end_event, ) }, ) .await } + async fn check_upload_permission(&self) -> anyhow::Result> { + let outcome = if let Err(reason) = self + .cache_upload_permission_checker + .has_permission_to_upload_to_cache(self.re_use_case, &self.platform) + .await? + { + Err(CacheUploadOutcome::Rejected( + CacheUploadRejectionReason::PermissionDenied(reason), + )) + } else { + Ok(()) + }; + Ok(outcome) + } + async fn upload_files_and_directories( &self, result: &CommandExecutionResult, - file_digests: &mut Vec, - tree_digests: &mut Vec, + file_digests: &mut Vec, + tree_digests: &mut Vec, digest_config: DigestConfig, - // metadata to be added in the auxiliary_metadata field of TActionResult - metadata: Vec, - ) -> anyhow::Result> { - let timing = result.report.timing; - + ) -> anyhow::Result> { let mut upload_futs = vec![]; - let mut output_files = vec![]; - let mut output_directories = vec![]; + let mut output_files: Vec = Vec::new(); + let mut output_directories: Vec = Vec::new(); for (output, value) in result.resolve_outputs(&self.artifact_fs) { match value.entry().as_ref() { @@ -299,7 +356,7 @@ impl CacheUploader { .await }; - file_digests.push(f.digest.to_string()); + file_digests.push(f.digest.dupe()); upload_futs.push(fut.boxed()); } DirectoryEntry::Dir(d) => { @@ -314,6 +371,7 @@ impl CacheUploader { ..Default::default() }); + let identity = None; // TODO(#503): implement this let fut = async move { self.re_client .upload( @@ -323,6 +381,7 @@ impl CacheUploader { output.path(), &d.dupe().as_immutable(), self.re_use_case, + identity, digest_config, ) .await @@ -330,9 +389,11 @@ impl CacheUploader { }; upload_futs.push(fut.boxed()); - tree_digests.push(tree_digest.to_string()); + tree_digests.push(tree_digest); } - DirectoryEntry::Leaf(..) => { + DirectoryEntry::Leaf( + ActionDirectoryMember::Symlink(..) | ActionDirectoryMember::ExternalSymlink(..), + ) => { // Bail, there is something that is not a file here and we don't handle this. // This will happen if the value is a symlink. The primary output of a command // being a symlink is probably unlikely. Unfortunately, we can't represent this @@ -344,7 +405,7 @@ impl CacheUploader { } let uploads = async { - future::try_join_all(upload_futs) + buck2_util::future::try_join_all(upload_futs) .await .context("Error uploading outputs")?; @@ -390,12 +451,13 @@ impl CacheUploader { execution_metadata: TExecutedActionMetadata { worker, execution_dir: "".to_owned(), - execution_start_timestamp: systemtime_to_ttimestamp(timing.start_time)?, + execution_start_timestamp: systemtime_to_ttimestamp( + result.report.timing.start_time, + )?, execution_completed_timestamp: systemtime_to_ttimestamp( - timing.start_time + timing.wall_time, + result.report.timing.end_time(), )?, execution_attempts: 1, - auxiliary_metadata: metadata, ..Default::default() }, ..Default::default() @@ -406,67 +468,147 @@ impl CacheUploader { } /// Whether we completed a cache upload. -#[derive(Copy, Clone, Dupe, Debug)] +#[allow(clippy::large_enum_variant)] enum CacheUploadOutcome { - Success, + Success(Option), Rejected(CacheUploadRejectionReason), + Failed(buck2_error::Error), +} + +impl CacheUploadOutcome { + fn uploaded(&self) -> bool { + match self { + CacheUploadOutcome::Success(_) => true, + _ => false, + } + } + + fn error(&self) -> String { + match self { + CacheUploadOutcome::Success(_) => String::new(), + CacheUploadOutcome::Rejected(reason) => format!("Rejected: {}", reason), + CacheUploadOutcome::Failed(e) => format!("{:#}", e), + } + } + + fn re_error_code(&self) -> Option { + match self { + CacheUploadOutcome::Success(_) => None, + CacheUploadOutcome::Rejected(reason) => match reason { + CacheUploadRejectionReason::SymlinkOutput + | CacheUploadRejectionReason::OutputExceedsLimit { .. } => None, + CacheUploadRejectionReason::PermissionDenied(_) => { + Some(TCode::PERMISSION_DENIED.to_string()) + } + }, + CacheUploadOutcome::Failed(e) => match e.find_typed_context::() { + Some(e) => Some(e.code.to_string()), + _ => Some("OTHER_ERRORS".to_owned()), + }, + } + } + + fn log_and_create_result( + self, + digest_str: &String, + error_on_cache_upload: bool, + ) -> anyhow::Result { + match &self { + CacheUploadOutcome::Success(_) => { + tracing::info!("Cache upload for `{}` succeeded", digest_str); + } + CacheUploadOutcome::Rejected(reason) => { + tracing::info!("Cache upload for `{}` rejected: {:#}", digest_str, reason); + } + CacheUploadOutcome::Failed(e) => { + tracing::warn!("Cache upload for `{}` failed: {:#}", digest_str, e); + } + }; + if !self.uploaded() && error_on_cache_upload { + Err(anyhow::anyhow!("cache_upload_failed")) + } else { + Ok(self) + } + } } /// A reason why we chose not to upload. -#[derive(Copy, Clone, Dupe, Debug, Display)] +#[derive(Clone, Debug, Display)] enum CacheUploadRejectionReason { - #[display(fmt = "SymlinkOutput")] + #[display("SymlinkOutput")] SymlinkOutput, - #[display(fmt = "OutputExceedsLimit({})", max_bytes)] + #[display("OutputExceedsLimit({})", max_bytes)] OutputExceedsLimit { max_bytes: u64 }, + #[display("PermissionDenied (permission check error: {})", _0)] + PermissionDenied(String), } +#[derive(Debug, buck2_error::Error)] +#[error("Missing action result for dep file key `{0}`")] +struct DepFileReActionResultMissingError(String); + #[async_trait] impl UploadCache for CacheUploader { async fn upload( &self, info: &CacheUploadInfo<'_>, res: &CommandExecutionResult, - dep_file_entry: Option, + re_result: Option, + dep_file_bundle: Option<&mut dyn IntoRemoteDepFile>, + action_digest_and_blobs: &ActionDigestAndBlobs, ) -> anyhow::Result { - let error_on_cache_upload = match ERROR_ON_CACHE_UPLOAD.get_copied() { - Ok(r) => r.unwrap_or_default(), - Err(e) => return Err(e).context("cache_upload"), - }; - let action = &info.action_digest; + let error_on_cache_upload = error_on_cache_upload().context("cache_upload")?; - let did_cache_upload = if res.was_locally_executed() { + let (did_cache_upload, action_result) = if res.was_locally_executed() { + tracing::debug!( + "Uploading action result for `{}`", + action_digest_and_blobs.action + ); // TODO(bobyf, torozco) should these be critical sections? - self.upload_action_result( - info.target.dupe(), - action, - res, - info.digest_config, - error_on_cache_upload, + let outcome = self + .upload_local_outputs( + info, + res, + &action_digest_and_blobs, + error_on_cache_upload, + dep_file_bundle.is_some(), + ) + .await?; + + ( + outcome.uploaded(), + if let CacheUploadOutcome::Success(action_result) = outcome { + action_result + } else { + None + }, ) - .await? + } else if dep_file_bundle.is_some() { + (false, re_result) } else { - tracing::info!("Cache upload for `{}` not attempted", action); - false + tracing::info!( + "Cache upload for `{}` not attempted", + action_digest_and_blobs.action + ); + (false, None) }; - // Cache upload should only invoked for successful actions only. Double check here. - let did_dep_file_cache_upload = match dep_file_entry { - Some(dep_file_entry) if res.was_success() => { - self.upload_dep_file_result( - action, - info.target.dupe(), - res, - info.digest_config, - dep_file_entry, - error_on_cache_upload, - ) + // note uploads aren't attempted for local worker actions because we don't upload outputs for them so there is no action result + let should_upload_dep_file = + res.was_locally_executed() || res.was_remotely_executed() || res.was_action_cache_hit(); + + let did_dep_file_cache_upload = if let Some(dep_file_bundle) = dep_file_bundle + && should_upload_dep_file + { + self.upload_dep_file(info, action_result, dep_file_bundle, error_on_cache_upload) .await? - } - _ => { - tracing::info!("Dep file cache upload for `{}` not attempted", action); - false - } + .uploaded() + } else { + tracing::info!( + "Dep file cache upload for `{}` not attempted", + action_digest_and_blobs.action + ); + false }; Ok(CacheUploadResult { diff --git a/app/buck2_execute_impl/src/executors/empty_action_result.rs b/app/buck2_execute_impl/src/executors/empty_action_result.rs new file mode 100644 index 0000000000000..4d67fec5e6895 --- /dev/null +++ b/app/buck2_execute_impl/src/executors/empty_action_result.rs @@ -0,0 +1,63 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_common::cas_digest::DigestAlgorithm; +use buck2_core::execution_types::executor_config::RePlatformFields; +use buck2_execute::digest::CasDigestToReExt; +use buck2_execute::digest_config::DigestConfig; +use buck2_execute::execute::action_digest_and_blobs::ActionDigestAndBlobs; +use buck2_execute::execute::action_digest_and_blobs::ActionDigestAndBlobsBuilder; +use once_cell::sync::OnceCell; +use remote_execution as RE; +use remote_execution::TActionResult2; +use remote_execution::TExecutedActionMetadata; + +use crate::executors::to_re_platform::RePlatformFieldsToRePlatform; + +/// Create an empty action result for permission check. +pub(crate) fn empty_action_result( + platform: &RePlatformFields, +) -> anyhow::Result<(ActionDigestAndBlobs, TActionResult2)> { + static DIGEST_CONFIG: OnceCell = OnceCell::new(); + let digest_config = *DIGEST_CONFIG + .get_or_try_init(|| DigestConfig::leak_new(vec![DigestAlgorithm::Sha1], None))?; + + let mut blobs = ActionDigestAndBlobsBuilder::new(digest_config); + + let command = blobs.add_command(&RE::Command { + arguments: vec![ + "/command".to_owned(), + "-to".to_owned(), + "check".to_owned(), + "permission".to_owned(), + // Random string for xbgs. + "EMPTY_ACTION_RESULT_fztiucvwawdmarhheqoz".to_owned(), + ], + platform: Some(platform.to_re_platform()), + ..Default::default() + }); + + let action = blobs.build(&RE::Action { + command_digest: Some(command.to_grpc()), + ..Default::default() + }); + + let action_result = TActionResult2 { + stdout_raw: Some(Vec::new()), + stderr_raw: Some(Vec::new()), + exit_code: 0, + execution_metadata: TExecutedActionMetadata { + execution_attempts: 0, + ..Default::default() + }, + ..Default::default() + }; + + Ok((action, action_result)) +} diff --git a/app/buck2_execute_impl/src/executors/hybrid.rs b/app/buck2_execute_impl/src/executors/hybrid.rs index 633ffb9648877..9d0e6cd3589e5 100644 --- a/app/buck2_execute_impl/src/executors/hybrid.rs +++ b/app/buck2_execute_impl/src/executors/hybrid.rs @@ -7,6 +7,8 @@ * of this source tree. */ +use std::sync::atomic::AtomicI64; +use std::sync::atomic::Ordering; use std::sync::Arc; use anyhow::Context; @@ -26,8 +28,10 @@ use buck2_execute::execute::prepared::PreparedCommand; use buck2_execute::execute::prepared::PreparedCommandExecutor; use buck2_execute::execute::request::CommandExecutionPaths; use buck2_execute::execute::request::ExecutorPreference; +use buck2_execute::execute::result::CommandExecutionErrorType; use buck2_execute::execute::result::CommandExecutionResult; use buck2_execute::execute::result::CommandExecutionStatus; +use buck2_futures::cancellation::CancellationContext; use derivative::Derivative; use dupe::Dupe; use futures::future::BoxFuture; @@ -35,7 +39,6 @@ use futures::future::Either; use futures::future::Future; use futures::FutureExt; use host_sharing::HostSharingRequirements; -use more_futures::cancellation::CancellationContext; use crate::executors::local::LocalExecutor; use crate::low_pass_filter::LowPassFilter; @@ -53,6 +56,7 @@ pub struct HybridExecutor { pub executor_preference: ExecutorPreference, pub low_pass_filter: Arc, pub re_max_input_files_bytes: u64, + pub fallback_tracker: Arc, } impl HybridExecutor @@ -162,9 +166,10 @@ where let local_result = self.local_exec_cmd( command, Box::new(claim_manager.dupe()), - manager.events.dupe(), + manager.inner.events.dupe(), Arc::new( manager + .inner .liveliness_observer .dupe() .and(local_execution_liveliness_observer.dupe()), @@ -179,8 +184,8 @@ where Box::new(claim_manager), remote_execution_liveliness_guard, )), - manager.events.dupe(), - manager.liveliness_observer.dupe(), + manager.inner.events.dupe(), + manager.inner.liveliness_observer.dupe(), cancellations, fallback_on_failure, ); @@ -207,37 +212,41 @@ where let weight = match command.request.host_sharing_requirements() { HostSharingRequirements::ExclusiveAccess => self.low_pass_filter.capacity(), - HostSharingRequirements::OnePerToken(.., class) => self - .local - .host_sharing_broker - .requested_permits(class) - .into_count_uncapped(), - HostSharingRequirements::Shared(class) => self + HostSharingRequirements::OnePerToken(.., class) + | HostSharingRequirements::Shared(class) => self .local .host_sharing_broker .requested_permits(class) .into_count_uncapped(), }; - let is_retryable_status = move |r: &CommandExecutionResult| { - match &r.report.status { - // This does need be retried since if we get a cancelled result that would - // typically mean the other result asked for cancellation and we're about to - // receive the result here, or it could mean we're being asked to cancel by our - // caller. - CommandExecutionStatus::Cancelled => true, - // If the execution is successful, use the result. - CommandExecutionStatus::Success { .. } => false, - // Retry commands that failed (i.e. exit 1) only if we're instructed to do so. - CommandExecutionStatus::Failure { .. } => fallback_on_failure, - // Don't retry timeouts. They are used for tests and falling back on a timeout is - // sort of the opposite of what's been requested. - CommandExecutionStatus::TimedOut { .. } => false, - // Errors are infra errors and are always retried because that is the point of - // falling back. - CommandExecutionStatus::Error { .. } => true, - } - }; + let is_retryable_status = + move |r: &CommandExecutionResult, ignore_fallback_tracker: bool| { + match &r.report.status { + // This does need be retried since if we get a cancelled result that would + // typically mean the other result asked for cancellation and we're about to + // receive the result here, or it could mean we're being asked to cancel by our + // caller. + CommandExecutionStatus::Cancelled => true, + // If the execution is successful, use the result. + CommandExecutionStatus::Success { .. } => false, + // Retry commands that failed (i.e. exit 1) only if we're instructed to do so. + CommandExecutionStatus::Failure { .. } => fallback_on_failure, + // Don't retry timeouts. They are used for tests and falling back on a timeout is + // sort of the opposite of what's been requested. + CommandExecutionStatus::TimedOut { .. } => false, + // Don't retry storage resource exhaustion errors as retries might only increase the traffic to storage. + CommandExecutionStatus::Error { + typ: CommandExecutionErrorType::StorageResourceExhausted, + .. + } => ignore_fallback_tracker, + // Errors are infra errors and are always retried because that is the point of + // falling back. + CommandExecutionStatus::Error { .. } => { + ignore_fallback_tracker || self.fallback_tracker.can_fallback() + } + } + }; let fallback_only = fallback_only && !command.request.force_full_hybrid_if_capable(); @@ -281,7 +290,7 @@ where jobs.execute_concurrent().await }; - let mut res = if is_retryable_status(&first_res) { + let mut res = if is_retryable_status(&first_res, false) { // If the first result had made a claim, then cancel it now to let the other result // proceed. if let Some(claim) = first_res.report.claim.take() { @@ -298,7 +307,7 @@ where // For the purposes of giving users a good UX, if both things failed, give them the // local executor's error, which is likely to not have failed because of e.g. // sandboxing. - let (mut primary_res, mut secondary_res) = if is_retryable_status(&second_res) { + let (mut primary_res, mut secondary_res) = if is_retryable_status(&second_res, true) { if first_priority > second_priority { (first_res, second_res) } else { @@ -519,3 +528,41 @@ where #[derive(PartialOrd, Ord, PartialEq, Eq)] struct JobPriority(u8); + +pub struct FallbackTracker { + count_fallbacks: AtomicI64, +} + +impl FallbackTracker { + pub fn new() -> Self { + FallbackTracker { + count_fallbacks: AtomicI64::new(0), + } + } + + pub fn can_fallback(&self) -> bool { + let retried = self.count_fallbacks.fetch_add(1, Ordering::Relaxed); + + #[cfg(all(fbcode_build, target_os = "linux"))] + let max = if hostcaps::is_prod() { + justknobs::get("buck2/buck2:max_fallback", None).ok() + } else { + None + }; + + #[cfg(not(all(fbcode_build, target_os = "linux")))] + let max = None; + + if let Some(max) = max { + if retried >= max { + tracing::warn!( + "Not falling back to local because too many actions have fallen back already. \ + If you would like to proceed anyway, pass `--prefer-local`." + ); + return false; + } + } + + true + } +} diff --git a/app/buck2_execute_impl/src/executors/local.rs b/app/buck2_execute_impl/src/executors/local.rs index 9fff234463d4a..d5e7b82ad2218 100644 --- a/app/buck2_execute_impl/src/executors/local.rs +++ b/app/buck2_execute_impl/src/executors/local.rs @@ -7,7 +7,6 @@ * of this source tree. */ -use std::borrow::Cow; use std::ffi::OsStr; use std::ffi::OsString; use std::ops::ControlFlow; @@ -38,6 +37,7 @@ use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::extract_artifact_value; use buck2_execute::directory::insert_entry; use buck2_execute::entry::build_entry_from_disk; +use buck2_execute::entry::HashingInfo; use buck2_execute::execute::action_digest::ActionDigest; use buck2_execute::execute::blocking::BlockingExecutor; use buck2_execute::execute::clean_output_paths::CleanOutputPaths; @@ -66,6 +66,8 @@ use buck2_forkserver::run::gather_output; use buck2_forkserver::run::maybe_absolutize_exe; use buck2_forkserver::run::timeout_into_cancellation; use buck2_forkserver::run::GatherOutputStatus; +use buck2_futures::cancellable_future::CancellationObserver; +use buck2_futures::cancellation::CancellationContext; use buck2_util::process::background_command; use derive_more::From; use dupe::Dupe; @@ -78,15 +80,13 @@ use host_sharing::host_sharing::HostSharingGuard; use host_sharing::HostSharingBroker; use host_sharing::HostSharingRequirements; use indexmap::IndexMap; -use more_futures::cancellable_future::CancellationObserver; -use more_futures::cancellation::CancellationContext; -use thiserror::Error; use tracing::info; use crate::executors::worker::WorkerHandle; use crate::executors::worker::WorkerPool; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum LocalExecutionError { #[error("Args list was empty")] NoArgs, @@ -140,7 +140,7 @@ impl LocalExecutor { exe: &'a str, args: impl IntoIterator + Send> + Send + 'a, env: impl IntoIterator + Send, impl AsRef + Send)> + Send + 'a, - working_directory: Option<&'a ProjectRelativePath>, + working_directory: &'a ProjectRelativePath, timeout: Option, env_inheritance: Option<&'a EnvironmentInheritance>, liveliness_observer: impl LivelinessObserver + 'static, @@ -150,10 +150,7 @@ impl LocalExecutor { > + Send + 'a { async move { - let working_directory = match working_directory { - Some(d) => Cow::Owned(self.root.join(d)), - None => Cow::Borrowed(&self.root), - }; + let working_directory = self.root.join_cow(working_directory); match &self.forkserver { Some(forkserver) => { @@ -222,7 +219,7 @@ impl LocalExecutor { return manager.error("no_args", LocalExecutionError::NoArgs); } - let (scratch_path, input_materialization_duration) = match executor_stage_async( + let executor_stage_result = executor_stage_async( buck2_data::LocalStage { stage: Some(buck2_data::LocalMaterializeInputs {}.into()), }, @@ -231,7 +228,8 @@ impl LocalExecutor { let (r1, r2) = future::join( async { - materialize_inputs(&self.artifact_fs, &self.materializer, request).await + materialize_inputs(&self.artifact_fs, self.materializer.as_ref(), request) + .await }, async { // When user requests to not perform a cleanup for a specific action @@ -240,7 +238,7 @@ impl LocalExecutor { if !request.outputs_cleanup { materialize_build_outputs_from_previous_run( &self.artifact_fs, - &self.materializer, + self.materializer.as_ref(), request, ) .await @@ -251,14 +249,16 @@ impl LocalExecutor { ) .await; - let scratch_path = r1?; + let scratch_path = r1?.scratch; r2?; anyhow::Ok((scratch_path, start.elapsed())) }, ) - .await - { + .boxed() + .await; + + let (scratch_path, input_materialization_duration) = match executor_stage_result { Ok((scratch_path, input_materialization_duration)) => { (scratch_path, input_materialization_duration) } @@ -266,7 +266,7 @@ impl LocalExecutor { }; // TODO: Release here. - let manager = manager.claim().await; + let manager = manager.claim().boxed().await; let scratch_path = &scratch_path.0; @@ -288,6 +288,7 @@ impl LocalExecutor { anyhow::Ok(()) }, ) + .boxed() .await { return manager.error("prepare_output_dirs_failed", e); @@ -367,9 +368,12 @@ impl LocalExecutor { StrOrOsStr::from(build_id), ))) }; - let liveliness_observer = manager.liveliness_observer.dupe().and(cancellation); + let liveliness_observer = manager.inner.liveliness_observer.dupe().and(cancellation); - let (worker, manager) = self.initialize_worker(request, manager, dispatcher).await?; + let (worker, manager) = self + .initialize_worker(request, manager, dispatcher) + .boxed() + .await?; let execution_kind = match worker { None => CommandExecutionKind::Local { @@ -424,7 +428,9 @@ impl LocalExecutor { .into_iter() .map(|(k, v)| (OsString::from(k), v.to_owned())) .collect(); - Ok(worker.exec_cmd(request.args(), env).await) + Ok(worker + .exec_cmd(request.args(), env, request.timeout()) + .await) } else { self.exec( &args[0], @@ -441,23 +447,28 @@ impl LocalExecutor { let execution_time = execution_start.elapsed(); - let timing = CommandExecutionMetadata { + let timing = Box::new(CommandExecutionMetadata { wall_time: execution_time, execution_time, start_time, execution_stats: None, // We fill this in later if available. input_materialization_duration, - hashing_duration: Duration::ZERO, // We fill this in later if available. - }; + hashing_duration: Duration::ZERO, // We fill hashing info in later if available. + hashed_artifacts_count: 0, + queue_duration: None, + }); (timing, r) }, ) + .boxed() .await; let (status, stdout, stderr) = match res { Ok(res) => res, - Err(e) => return manager.error("exec_failed", e), // TODO (torozco): Can this take CommandExecutionKind? Should this be a failure? + Err(e) => { + return manager.error("exec_failed", e); + } }; let std_streams = CommandStdStreams::Local { stdout, stderr }; @@ -469,17 +480,21 @@ impl LocalExecutor { } => { let (outputs, hashing_time) = match self .calculate_and_declare_output_values(request, digest_config) + .boxed() .await { Ok((output_values, hashing_time)) => (output_values, hashing_time), - Err(e) => return manager.error("calculate_output_values_failed", e), + Err(e) => { + return manager.error("calculate_output_values_failed", e); + } }; timing.execution_stats = execution_stats; - timing.hashing_duration = hashing_time; + timing.hashing_duration = hashing_time.hashing_duration; + timing.hashed_artifacts_count = hashing_time.hashed_artifacts_count; if exit_code == 0 { - manager.success(execution_kind, outputs, std_streams, timing) + manager.success(execution_kind, outputs, std_streams, *timing) } else { let manager = check_inputs( manager, @@ -487,6 +502,7 @@ impl LocalExecutor { self.blocking_executor.as_ref(), request, ) + .boxed() .await?; manager.failure( @@ -494,7 +510,7 @@ impl LocalExecutor { outputs, std_streams, Some(exit_code), - timing, + *timing, ) } } @@ -505,6 +521,7 @@ impl LocalExecutor { self.blocking_executor.as_ref(), request, ) + .boxed() .await?; // We are lying about the std streams here because we don't have a good mechanism @@ -519,11 +536,11 @@ impl LocalExecutor { .into_bytes(), }, None, - timing, + *timing, ) } GatherOutputStatus::TimedOut(duration) => { - manager.timeout(execution_kind, duration, std_streams, timing) + manager.timeout(execution_kind, duration, std_streams, *timing) } GatherOutputStatus::Cancelled => manager.cancel_claim(), } @@ -533,21 +550,26 @@ impl LocalExecutor { &self, request: &CommandExecutionRequest, digest_config: DigestConfig, - ) -> anyhow::Result<(IndexMap, Duration)> { + ) -> anyhow::Result<(IndexMap, HashingInfo)> { let mut builder = inputs_directory(request.inputs(), &self.artifact_fs)?; // Read outputs from disk and add them to the builder let mut entries = Vec::new(); let mut total_hashing_time = Duration::ZERO; + let mut total_hashed_outputs = 0; for output in request.outputs() { let path = output.resolve(&self.artifact_fs).into_path(); let abspath = self.root.join(&path); - let (entry, hashing_time) = build_entry_from_disk( + let (entry, hashing_info) = build_entry_from_disk( abspath, FileDigestConfig::build(digest_config.cas_digest_config()), + self.blocking_executor.as_ref(), + self.artifact_fs.fs().root(), ) + .await .with_context(|| format!("collecting output {:?}", path))?; - total_hashing_time += hashing_time; + total_hashing_time += hashing_info.hashing_duration; + total_hashed_outputs += hashing_info.hashed_artifacts_count; if let Some(entry) = entry { insert_entry(&mut builder, &path, entry)?; entries.push((output.cloned(), path)); @@ -578,7 +600,13 @@ impl LocalExecutor { self.materializer.declare_existing(to_declare).await?; - Ok((mapped_outputs, total_hashing_time)) + Ok(( + mapped_outputs, + HashingInfo { + hashing_duration: total_hashing_time, + hashed_artifacts_count: total_hashed_outputs, + }, + )) } async fn acquire_worker_permit( @@ -694,6 +722,11 @@ impl PreparedCommandExecutor for LocalExecutor { manager: CommandExecutionManager, cancellations: &CancellationContext, ) -> CommandExecutionResult { + let manager = manager.with_execution_kind(CommandExecutionKind::Local { + digest: command.prepared_action.digest(), + command: command.request.all_args_vec(), + env: command.request.env().clone(), + }); if command.request.executor_preference().requires_remote() { return manager.error("local_prepare", LocalExecutionError::RemoteOnlyAction); } @@ -706,11 +739,8 @@ impl PreparedCommandExecutor for LocalExecutor { } = command; let local_resource_holders = executor_stage_async( - { - let a = buck2_data::AcquireLocalResource {}; - buck2_data::LocalStage { - stage: Some(a.into()), - } + buck2_data::LocalStage { + stage: Some(buck2_data::AcquireLocalResource {}.into()), }, async move { let mut holders = vec![]; @@ -744,7 +774,7 @@ impl PreparedCommandExecutor for LocalExecutor { .with_structured_cancellation(|cancellation| { Self::exec_request( self, - &prepared_action.action, + &prepared_action.action_and_blobs.action, request, manager, cancellation, @@ -785,15 +815,20 @@ impl<'a> StrOrOsStr<'a> { } } +pub struct MaterializedInputPaths { + pub scratch: ScratchPath, + pub paths: Vec, +} + /// Materialize all inputs artifact for CommandExecutionRequest so the command can be executed locally. /// /// This also discovers the scratch directory if any was passed (if multiple are passed, one of /// them is returned). pub async fn materialize_inputs( artifact_fs: &ArtifactFs, - materializer: &Arc, + materializer: &dyn Materializer, request: &CommandExecutionRequest, -) -> anyhow::Result { +) -> anyhow::Result { let mut paths = vec![]; let mut scratch = ScratchPath(None); @@ -801,7 +836,7 @@ pub async fn materialize_inputs( match input { CommandExecutionInput::Artifact(group) => { for (artifact, _) in group.iter() { - if !artifact.is_source() { + if artifact.requires_materialization(artifact_fs) { paths.push(artifact.resolve_path(artifact_fs)?); } } @@ -811,7 +846,9 @@ pub async fn materialize_inputs( .buck_out_path_resolver() .resolve_gen(&metadata.path); CleanOutputPaths::clean(std::iter::once(path.as_ref()), artifact_fs.fs())?; - artifact_fs.fs().write_file(&path, &metadata.data, false)?; + artifact_fs + .fs() + .write_file(&path, &metadata.data.0.0, false)?; } CommandExecutionInput::ScratchPath(path) => { let path = artifact_fs.buck_out_path_resolver().resolve_scratch(path); @@ -825,21 +862,22 @@ pub async fn materialize_inputs( } } - let mut stream = materializer.materialize_many(paths).await?; + let mut stream = materializer.materialize_many(paths.clone()).await?; while let Some(res) = stream.next().await { match res { Ok(()) => {} - Err(MaterializationError::NotFound { path, info, debug }) => { - let corrupted = info.origin.guaranteed_by_action_cache(); + Err(MaterializationError::NotFound { source }) => { + let corrupted = source.info.origin.guaranteed_by_action_cache(); return Err(tag_error!( "cas_missing_fatal", - MaterializationError::NotFound { path, info, debug }.into(), + MaterializationError::NotFound { source }.into(), quiet: true, task: false, daemon_in_memory_state_is_corrupted: true, action_cache_is_corrupted: corrupted - )); + ) + .into()); } Err(e) => { return Err(e.into()); @@ -847,7 +885,7 @@ pub async fn materialize_inputs( } } - Ok(scratch) + Ok(MaterializedInputPaths { scratch, paths }) } /// A scratch path discovered during `materialize_inputs`. @@ -865,7 +903,7 @@ async fn check_inputs( match input { CommandExecutionInput::Artifact(group) => { for (artifact, _) in group.iter() { - if !artifact.is_source() { + if artifact.requires_materialization(artifact_fs) { let path = artifact.resolve_path(artifact_fs)?; let abs_path = artifact_fs.fs().resolve(&path); @@ -874,7 +912,7 @@ async fn check_inputs( // want to propagate it. let _ignored = tag_result!( "missing_local_inputs", - fs_util::symlink_metadata(&abs_path).context("Missing input"), + fs_util::symlink_metadata(&abs_path).context("Missing input").map_err(|e| e.into()), quiet: true, task: false, daemon_materializer_state_is_corrupted: true @@ -907,7 +945,7 @@ async fn check_inputs( /// Such incremental state in fact serves as the input while being output as well. pub async fn materialize_build_outputs_from_previous_run( artifact_fs: &ArtifactFs, - materializer: &Arc, + materializer: &dyn Materializer, request: &CommandExecutionRequest, ) -> anyhow::Result<()> { let mut paths = vec![]; @@ -952,23 +990,15 @@ pub async fn create_output_dirs( if request.outputs_cleanup { // TODO(scottcao): Move this deletion logic into materializer itself. - // Use Eden's clean up API if possible, it is significantly faster on Eden compared with - // the native method as the API does not load and materialize files or folders - if let Some(eden_buck_out) = materializer.eden_buck_out() { - eden_buck_out - .remove_paths_recursive(artifact_fs.fs(), output_paths, cancellations) - .await?; - } else { - blocking_executor - .execute_io( - Box::new(CleanOutputPaths { - paths: output_paths, - }), - cancellations, - ) - .await - .context("Failed to cleanup output directory")?; - } + blocking_executor + .execute_io( + Box::new(CleanOutputPaths { + paths: output_paths, + }), + cancellations, + ) + .await + .context("Failed to cleanup output directory")?; } let project_fs = artifact_fs.fs(); @@ -1044,8 +1074,6 @@ impl EnvironmentBuilder for Command { mod unix { use std::os::unix::ffi::OsStrExt; - use buck2_execute::execute::environment_inheritance::EnvironmentInheritance; - use super::*; pub async fn exec_via_forkserver( @@ -1074,6 +1102,7 @@ mod unix { timeout: command_timeout.try_map(|d| d.try_into())?, enable_miniperf, std_redirects: None, + graceful_shutdown_timeout_s: None, }; apply_local_execution_environment(&mut req, working_directory, env, env_inheritance); forkserver @@ -1129,105 +1158,20 @@ mod unix { mod tests { use std::collections::HashMap; use std::str; - use std::sync::Arc; - use std::time::Instant; use buck2_common::liveliness_observer::NoopLivelinessObserver; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; - use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::buck_out_path::BuckOutPathResolver; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project::ProjectRootTemp; - use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_execute::execute::blocking::testing::DummyBlockingExecutor; use buck2_execute::materialize::nodisk::NoDiskMaterializer; use host_sharing::HostSharingStrategy; use super::*; - #[tokio::test] - async fn test_gather_output() -> anyhow::Result<()> { - let mut cmd = if cfg!(windows) { - background_command("powershell") - } else { - background_command("sh") - }; - cmd.args(["-c", "echo hello"]); - - let (status, stdout, stderr) = gather_output(cmd, futures::future::pending()).await?; - assert!(matches!(status, GatherOutputStatus::Finished{ exit_code, .. } if exit_code == 0)); - assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); - assert_eq!(stderr, b""); - - Ok(()) - } - - #[tokio::test] - async fn test_gather_does_not_wait_for_children() -> anyhow::Result<()> { - // If we wait for sleep, this will time out. - let mut cmd = if cfg!(windows) { - background_command("powershell") - } else { - background_command("sh") - }; - if cfg!(windows) { - cmd.args([ - "-c", - "Start-Job -ScriptBlock {sleep 10} | Out-Null; echo hello", - ]); - } else { - cmd.args(["-c", "(sleep 10 &) && echo hello"]); - } - - let timeout = if cfg!(windows) { 9 } else { 1 }; - let (status, stdout, stderr) = gather_output( - cmd, - timeout_into_cancellation(Some(Duration::from_secs(timeout))), - ) - .await?; - assert!( - matches!(status, GatherOutputStatus::Finished { exit_code, .. } if exit_code == 0), - "status: {:?}", - status - ); - assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); - assert_eq!(stderr, b""); - - Ok(()) - } - - #[tokio::test] - async fn test_gather_output_timeout() -> anyhow::Result<()> { - let now = Instant::now(); - - let mut cmd = if cfg!(windows) { - background_command("powershell") - } else { - background_command("sh") - }; - cmd.args(["-c", "echo hello; sleep 10; echo bye"]); - - let timeout = if cfg!(windows) { 5 } else { 1 }; - let (status, stdout, stderr) = gather_output( - cmd, - timeout_into_cancellation(Some(Duration::from_secs(timeout))), - ) - .await?; - assert!( - matches!(status, GatherOutputStatus::TimedOut(..)), - "status: {:?}", - status - ); - assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); - assert_eq!(stderr, b""); - - assert!(now.elapsed() < Duration::from_secs(9)); // Lots of leeway here. - - Ok(()) - } - fn artifact_fs(project_fs: ProjectRoot) -> ArtifactFs { ArtifactFs::new( CellResolver::testing_with_name_and_path( @@ -1273,7 +1217,7 @@ mod tests { interpreter, ["-c", "echo $PWD; pwd"], &HashMap::::default(), - None, + ProjectRelativePath::empty(), None, None, NoopLivelinessObserver::create(), @@ -1309,7 +1253,7 @@ mod tests { "sh", ["-c", "echo $USER"], &HashMap::::default(), - None, + ProjectRelativePath::empty(), None, Some(&EnvironmentInheritance::empty()), NoopLivelinessObserver::create(), diff --git a/app/buck2_execute_impl/src/executors/mod.rs b/app/buck2_execute_impl/src/executors/mod.rs deleted file mode 100644 index c83120b09b39c..0000000000000 --- a/app/buck2_execute_impl/src/executors/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod action_cache; -pub mod caching; -pub mod hybrid; -pub mod local; -pub mod re; -pub mod stacked; -pub mod worker; diff --git a/app/buck2_execute_impl/src/executors/re.rs b/app/buck2_execute_impl/src/executors/re.rs index 4be9904192a7b..b2560b90eb223 100644 --- a/app/buck2_execute_impl/src/executors/re.rs +++ b/app/buck2_execute_impl/src/executors/re.rs @@ -12,6 +12,7 @@ use std::sync::Arc; use std::time::Duration; use async_trait::async_trait; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::project::ProjectRoot; @@ -21,6 +22,7 @@ use buck2_events::dispatch::span_async; use buck2_execute::digest_config::DigestConfig; use buck2_execute::execute::action_digest::ActionDigest; use buck2_execute::execute::blobs::ActionBlobs; +use buck2_execute::execute::kind::CommandExecutionKind; use buck2_execute::execute::kind::RemoteCommandExecutionDetails; use buck2_execute::execute::manager::CommandExecutionManager; use buck2_execute::execute::manager::CommandExecutionManagerExt; @@ -31,31 +33,34 @@ use buck2_execute::execute::prepared::PreparedCommandExecutor; use buck2_execute::execute::request::CommandExecutionPaths; use buck2_execute::execute::request::CommandExecutionRequest; use buck2_execute::execute::request::ExecutorPreference; +use buck2_execute::execute::result::CommandExecutionErrorType; use buck2_execute::execute::result::CommandExecutionResult; -use buck2_execute::execute::target::CommandExecutionTarget; use buck2_execute::knobs::ExecutorGlobalKnobs; use buck2_execute::materialize::materializer::Materializer; use buck2_execute::re::action_identity::ReActionIdentity; use buck2_execute::re::client::ExecuteResponseOrCancelled; +use buck2_execute::re::error::get_re_error_tag; +use buck2_execute::re::error::RemoteExecutionError; use buck2_execute::re::manager::ManagedRemoteExecutionClient; use buck2_execute::re::remote_action_result::RemoteActionResult; +use buck2_futures::cancellation::CancellationContext; use dupe::Dupe; use futures::FutureExt; use indexmap::IndexMap; -use more_futures::cancellation::CancellationContext; use remote_execution as RE; use remote_execution::ExecuteResponse; use remote_execution::TCode; -use thiserror::Error; use tracing::info; use crate::re::download::download_action_results; use crate::re::download::DownloadResult; use crate::re::paranoid_download::ParanoidDownloader; +use crate::storage_resource_exhausted::is_storage_resource_exhausted; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum RemoteExecutorError { #[error("Trying to execute a `local_only = True` action on remote executor")] + #[buck2(input)] LocalOnlyAction, } @@ -70,13 +75,17 @@ pub struct ReExecutor { pub skip_cache_read: bool, pub skip_cache_write: bool, pub re_max_queue_time_ms: Option, + pub re_resource_units: Option, pub paranoid: Option, + pub materialize_failed_inputs: bool, + pub dependencies: Vec, } impl ReExecutor { async fn upload( &self, manager: CommandExecutionManager, + identity: &ReActionIdentity<'_>, blobs: &ActionBlobs, paths: &CommandExecutionPaths, digest_config: DigestConfig, @@ -92,6 +101,7 @@ impl ReExecutor { ProjectRelativePath::empty(), paths.input_directory(), self.re_use_case, + Some(identity), digest_config, ) .await; @@ -99,8 +109,9 @@ impl ReExecutor { Ok(stats) => ( Ok(()), buck2_data::ReUploadEnd { - digests_uploaded: Some(stats.digests_uploaded), - bytes_uploaded: Some(stats.bytes_uploaded), + digests_uploaded: Some(stats.total.digests_uploaded), + bytes_uploaded: Some(stats.total.bytes_uploaded), + stats_by_extension: stats.by_extension, }, ), Err(e) => (Err(e), buck2_data::ReUploadEnd::default()), @@ -110,20 +121,38 @@ impl ReExecutor { match upload_response { Ok(()) => {} - Err(e) => return ControlFlow::Break(manager.error("remote_upload_error", e)), + Err(e) => { + let e: buck2_error::Error = e.into(); + let is_storage_resource_exhausted = e + .find_typed_context::() + .map_or(false, |re_client_error| { + is_storage_resource_exhausted(re_client_error.as_ref()) + }); + let error_type = if is_storage_resource_exhausted { + CommandExecutionErrorType::StorageResourceExhausted + } else { + CommandExecutionErrorType::Other + }; + return ControlFlow::Break(manager.error_classified( + "remote_upload_error", + e, + error_type, + )); + } }; ControlFlow::Continue(manager) } - async fn re_execute( + async fn re_execute<'a>( &self, mut manager: CommandExecutionManager, - action: &dyn CommandExecutionTarget, + identity: &ReActionIdentity<'_>, request: &CommandExecutionRequest, action_digest: &ActionDigest, digest_config: DigestConfig, platform: &RE::Platform, + dependencies: impl IntoIterator, ) -> ControlFlow { info!( "RE command line:\n```\n$ {}\n```\n for action `{}`", @@ -131,20 +160,19 @@ impl ReExecutor { action_digest, ); - let identity = - ReActionIdentity::new(action, self.re_action_key.as_deref(), request.paths()); - let execute_response = self .re_client .execute( action_digest.dupe(), platform, + dependencies, self.re_use_case, &identity, &mut manager, self.skip_cache_read, self.skip_cache_write, self.re_max_queue_time_ms.map(Duration::from_millis), + self.re_resource_units, &self.knobs, ) .await; @@ -157,21 +185,23 @@ impl ReExecutor { Err(e) => return ControlFlow::Break(manager.error("remote_call_error", e)), }; - let remote_details = RemoteCommandExecutionDetails { - action_digest: action_digest.dupe(), - session_id: self.re_client.get_session_id().await.ok(), - use_case: self.re_use_case, - platform: platform.clone(), - remote_dep_file_key: None, - }; + let remote_details = RemoteCommandExecutionDetails::new( + action_digest.dupe(), + None, + self.re_client.get_session_id().await.ok(), + self.re_use_case, + &platform, + ); - if response.error.code != TCode::OK { - let res = if let Some(out) = as_missing_outputs_error(&response.error) { + let execution_kind = response.execution_kind(remote_details); + let manager = manager.with_execution_kind(execution_kind.clone()); + if response.status.code != TCode::OK { + let res = if let Some(out) = as_missing_outputs_error(&response.status) { // TODO: Add a dedicated report variant for this. // NOTE: We don't get stdout / stderr from RE when this happens, so the best we can // do here is just pass on the error. manager.failure( - response.execution_kind(remote_details), + execution_kind, IndexMap::new(), CommandStdStreams::Local { stdout: Vec::new(), @@ -181,9 +211,9 @@ impl ReExecutor { None, Default::default(), ) - } else if is_timeout_error(&response.error) && request.timeout().is_some() { + } else if is_timeout_error(&response.status) && request.timeout().is_some() { manager.timeout( - response.execution_kind(remote_details), + execution_kind, // Checked above: we fallthrough to the error path if we didn't set a timeout // and yet received one. request.timeout().unwrap(), @@ -195,12 +225,18 @@ impl ReExecutor { response.timing(), ) } else { - manager.error( + let error_type = if is_storage_resource_exhausted(&response.status) { + CommandExecutionErrorType::StorageResourceExhausted + } else { + CommandExecutionErrorType::Other + }; + manager.error_classified( "remote_exec_error", ReErrorWrapper { action_digest: action_digest.dupe(), - inner: response.error, + inner: response.status, }, + error_type, ) }; @@ -215,10 +251,11 @@ impl ReExecutor { "re_timeout_exceeded", anyhow::anyhow!( "Command {} exceeded its timeout (ran for {}s, timeout was {}s)", - action.re_action_key(), + &identity.action_key, execution_time.as_secs(), timeout.as_secs(), ) + .into() ); if let Err(e) = res { @@ -244,35 +281,61 @@ impl PreparedCommandExecutor for ReExecutor { target, prepared_action: PreparedAction { - action: action_digest, - blobs, + action_and_blobs, platform, + remote_execution_dependencies, }, digest_config, } = command; + let details = RemoteCommandExecutionDetails::new( + command.prepared_action.digest(), + command.request.remote_dep_file_key, + self.re_client.get_session_id().await.ok(), + self.re_use_case, + &platform, + ); + let manager = manager.with_execution_kind(CommandExecutionKind::Remote { + details: details.clone(), + queue_time: Duration::ZERO, + materialized_inputs_for_failed: None, + }); + if command.request.executor_preference().requires_local() { return ControlFlow::Break( manager.error("remote_prepare", RemoteExecutorError::LocalOnlyAction), )?; } + let identity = + ReActionIdentity::new(*target, self.re_action_key.as_deref(), request.paths()); + // TODO(bobyf, torozco): remote execution probably needs to explicitly handle cancellations let manager = self - .upload(manager, blobs, request.paths(), *digest_config) + .upload( + manager, + &identity, + &action_and_blobs.blobs, + request.paths(), + *digest_config, + ) .await?; let (manager, response) = self .re_execute( manager, - *target, + &identity, request, - action_digest, + &action_and_blobs.action, *digest_config, platform, + self.dependencies + .iter() + .chain(remote_execution_dependencies.iter()), ) .await?; + let exit_code = response.action_result.exit_code; let res = download_action_results( request, &*self.materializer, @@ -280,29 +343,26 @@ impl PreparedCommandExecutor for ReExecutor { self.re_use_case, *digest_config, manager, + &identity, buck2_data::ReStage { stage: Some(buck2_data::ReDownload {}.into()), } .into(), request.paths(), request.outputs(), - RemoteCommandExecutionDetails { - action_digest: action_digest.dupe(), - session_id: self.re_client.get_session_id().await.ok(), - use_case: self.re_use_case, - platform: platform.clone(), - remote_dep_file_key: None, - }, + details, &response, self.paranoid.as_ref(), cancellations, - response.action_result.exit_code, + exit_code, + &self.artifact_fs, + self.materialize_failed_inputs, ) .boxed() .await; - let DownloadResult::Result(res) = res; - + let DownloadResult::Result(mut res) = res; + res.action_result = Some(response.action_result); res } @@ -311,20 +371,20 @@ impl PreparedCommandExecutor for ReExecutor { } } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] #[error( - "action_digest={}, re_code={}, re_location={}, re_message={}", + "action_digest={}, re_code={}, re_message={}", .action_digest, .inner.code, - .inner.error_location, .inner.message )] -pub struct ReErrorWrapper { +#[buck2(tier0, tag = Some(get_re_error_tag(self.inner.code)))] +struct ReErrorWrapper { action_digest: ActionDigest, - inner: remote_execution::REError, + inner: remote_execution::TStatus, } -fn as_missing_outputs_error(err: &remote_execution::REError) -> Option<&str> { +fn as_missing_outputs_error(err: &remote_execution::TStatus) -> Option<&str> { // A dedicated error code would be better for this :( if err.message.contains("OUTMISS") { Some(&err.message) @@ -333,7 +393,7 @@ fn as_missing_outputs_error(err: &remote_execution::REError) -> Option<&str> { } } -fn is_timeout_error(err: &remote_execution::REError) -> bool { +fn is_timeout_error(err: &remote_execution::TStatus) -> bool { #[cfg(fbcode_build)] { // Not ideal, but DEADLINE_EXCEEDED will show up if you e.g. timeout connecting to RE, so we diff --git a/app/buck2_execute_impl/src/executors/stacked.rs b/app/buck2_execute_impl/src/executors/stacked.rs index d0fef7f8f74ca..0d9c2937fa896 100644 --- a/app/buck2_execute_impl/src/executors/stacked.rs +++ b/app/buck2_execute_impl/src/executors/stacked.rs @@ -14,7 +14,7 @@ use buck2_execute::execute::prepared::PreparedCommandExecutor; use buck2_execute::execute::prepared::PreparedCommandOptionalExecutor; use buck2_execute::execute::request::ExecutorPreference; use buck2_execute::execute::result::CommandExecutionResult; -use more_futures::cancellation::CancellationContext; +use buck2_futures::cancellation::CancellationContext; pub struct StackedExecutor { pub optional: O, diff --git a/app/buck2_execute_impl/src/executors/to_re_platform.rs b/app/buck2_execute_impl/src/executors/to_re_platform.rs new file mode 100644 index 0000000000000..366ba8fbc5d44 --- /dev/null +++ b/app/buck2_execute_impl/src/executors/to_re_platform.rs @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::execution_types::executor_config::RePlatformFields; +use remote_execution as RE; + +pub trait RePlatformFieldsToRePlatform { + fn to_re_platform(&self) -> RE::Platform; +} + +impl RePlatformFieldsToRePlatform for RePlatformFields { + fn to_re_platform(&self) -> RE::Platform { + RE::Platform { + properties: self + .properties + .iter() + .map(|(k, v)| RE::Property { + name: k.clone(), + value: v.clone(), + }) + .collect(), + } + } +} diff --git a/app/buck2_execute_impl/src/executors/worker.rs b/app/buck2_execute_impl/src/executors/worker.rs index ff210ebd9588e..5ad269eb5ac8d 100644 --- a/app/buck2_execute_impl/src/executors/worker.rs +++ b/app/buck2_execute_impl/src/executors/worker.rs @@ -16,7 +16,6 @@ use buck2_common::client_utils::get_channel_uds; use buck2_common::client_utils::retrying; use buck2_common::liveliness_observer::LivelinessGuard; use buck2_common::liveliness_observer::LivelinessObserver; -use buck2_common::result::SharedError; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::file_name::FileName; @@ -45,7 +44,7 @@ use indexmap::IndexMap; use tokio::task::JoinHandle; use tonic::transport::Channel; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] pub enum WorkerInitError { #[error("Worker failed to spawn: {0}")] SpawnFailed(String), @@ -59,7 +58,7 @@ pub enum WorkerInitError { ConnectionTimeout(f64, String), /// Any error not related to worker behavior #[error("Error initializing worker `{0}`")] - InternalError(SharedError), + InternalError(buck2_error::Error), } impl WorkerInitError { @@ -73,6 +72,7 @@ impl WorkerInitError { command: worker_spec.exe.clone(), env: request.env().clone(), }; + let manager = manager.with_execution_kind(execution_kind.clone()); match self { WorkerInitError::EarlyExit { @@ -124,6 +124,7 @@ fn spawn_via_forkserver( stdout_path: &AbsNormPathBuf, stderr_path: &AbsNormPathBuf, socket_path: &AbsNormPathBuf, + graceful_shutdown_timeout_s: Option, ) -> JoinHandle> { use std::os::unix::ffi::OsStrExt; @@ -147,6 +148,7 @@ fn spawn_via_forkserver( stdout: stdout_path.as_os_str().as_bytes().into(), stderr: stderr_path.as_os_str().as_bytes().into(), }), + graceful_shutdown_timeout_s, }; apply_local_execution_environment(&mut req, &working_directory, env, None); let res = forkserver @@ -174,6 +176,7 @@ fn spawn_via_forkserver( _stdout_path: &AbsNormPathBuf, _stderr_path: &AbsNormPathBuf, _socket_path: &AbsNormPathBuf, + _graceful_shutdown_timeout_s: Option, ) -> JoinHandle> { unreachable!("workers should not be initialized off unix") } @@ -184,6 +187,8 @@ async fn spawn_worker( root: &AbsNormPathBuf, forkserver: ForkserverClient, dispatcher: EventDispatcher, + graceful_shutdown_timeout_s: Option, + check_child_liveness: bool, ) -> Result { // Use fixed length path at /tmp to avoid 108 character limit for unix domain sockets let dir_name = format!("{}-{}", dispatcher.trace_id(), worker_spec.id); @@ -225,13 +230,14 @@ async fn spawn_worker( &stdout_path, &stderr_path, &socket_path, + graceful_shutdown_timeout_s, ); let initial_delay = Duration::from_millis(50); let max_delay = Duration::from_millis(500); // Might want to make this configurable, and/or measure impact of worker initialization on critical path let timeout = Duration::from_secs(60); - let channel = { + let (channel, check_exit) = { let stdout_path = &stdout_path; let stderr_path = &stderr_path; let socket_path = &socket_path; @@ -246,18 +252,20 @@ async fn spawn_worker( spawn_fut .await .map_err(|e| WorkerInitError::InternalError(e.into()))? - }; + } + .boxed(); futures::pin_mut!(connect); - futures::pin_mut!(check_exit); match futures::future::select(connect, check_exit).await { - futures::future::Either::Left((connection_result, _)) => match connection_result { - Ok(channel) => Ok(channel), - Err(e) => Err(WorkerInitError::ConnectionTimeout( - timeout.as_secs_f64(), - e.to_string(), - )), - }, + futures::future::Either::Left((connection_result, check_exit)) => { + match connection_result { + Ok(channel) => Ok((channel, check_exit)), + Err(e) => Err(WorkerInitError::ConnectionTimeout( + timeout.as_secs_f64(), + e.to_string(), + )), + } + } futures::future::Either::Right((command_result, _)) => Err(match command_result { Ok(GatherOutputStatus::SpawnFailed(e)) => WorkerInitError::SpawnFailed(e), Ok(GatherOutputStatus::Finished { exit_code, .. }) => { @@ -281,10 +289,21 @@ async fn spawn_worker( }? }; + let (child_exited_observer, child_exited_guard) = LivelinessGuard::create(); + tokio::spawn(async move { + drop(check_exit.await); + if check_child_liveness { + drop(child_exited_guard); + } else { + child_exited_guard.forget(); + } + }); + tracing::info!("Connected to socket for spawned worker: {}", socket_path); let client = WorkerClient::new(channel); Ok(WorkerHandle::new( client, + child_exited_observer, stdout_path, stderr_path, liveliness_guard, @@ -296,14 +315,18 @@ type WorkerFuture = Shared, Arc>>, brokers: Arc>>>, + graceful_shutdown_timeout_s: Option, + check_child_liveness: bool, } impl WorkerPool { - pub fn new() -> WorkerPool { + pub fn new(graceful_shutdown_timeout_s: Option, check_child_liveness: bool) -> WorkerPool { tracing::info!("Creating new WorkerPool"); WorkerPool { workers: Arc::new(parking_lot::Mutex::new(HashMap::default())), brokers: Arc::new(parking_lot::Mutex::new(HashMap::default())), + graceful_shutdown_timeout_s, + check_child_liveness, } } @@ -338,8 +361,20 @@ impl WorkerPool { let worker_spec = worker_spec.clone(); let root = root.clone(); let env: Vec<(OsString, OsString)> = env.into_iter().collect(); + let graceful_shutdown_timeout_s = self.graceful_shutdown_timeout_s; + let check_child_liveness = self.check_child_liveness; let fut = async move { - match spawn_worker(&worker_spec, env, &root, forkserver, dispatcher).await { + match spawn_worker( + &worker_spec, + env, + &root, + forkserver, + dispatcher, + graceful_shutdown_timeout_s, + check_child_liveness, + ) + .await + { Ok(worker) => Ok(Arc::new(worker)), Err(e) => Err(Arc::new(e)), } @@ -355,6 +390,7 @@ impl WorkerPool { pub struct WorkerHandle { client: WorkerClient, + child_exited_observer: Arc, stdout_path: AbsNormPathBuf, stderr_path: AbsNormPathBuf, _liveliness_guard: LivelinessGuard, @@ -363,12 +399,14 @@ pub struct WorkerHandle { impl WorkerHandle { fn new( client: WorkerClient, + child_exited_observer: Arc, stdout_path: AbsNormPathBuf, stderr_path: AbsNormPathBuf, liveliness_guard: LivelinessGuard, ) -> Self { Self { client, + child_exited_observer, stdout_path, stderr_path, _liveliness_guard: liveliness_guard, @@ -397,6 +435,7 @@ impl WorkerHandle { &self, args: &[String], env: Vec<(OsString, OsString)>, + timeout: Option, ) -> (GatherOutputStatus, Vec, Vec) { tracing::info!( "Sending worker command:\nExecuteCommand {{ argv: {:?}, env: {:?} }}\n", @@ -406,29 +445,55 @@ impl WorkerHandle { let argv: Vec> = args.iter().map(|s| s.as_str().into()).collect(); let env: Vec = env_entries(&env); - let request = ExecuteCommand { argv, env }; - let response = self.client.clone().execute(request).await; + let request = ExecuteCommand { + argv, + env, + timeout_s: timeout.map(|v| v.as_secs()), + }; - match response { - Ok(response) => { - let exec_response: ExecuteResponse = response.into_inner(); - tracing::info!("Worker response:\n{:?}\n", exec_response); - ( - GatherOutputStatus::Finished { - exit_code: exec_response.exit_code, - execution_stats: None, - }, - vec![], - exec_response.stderr.into(), - ) + let mut client = self.client.clone(); + tokio::select! { + response = client.execute(request) => { + match response { + Ok(response) => { + let exec_response: ExecuteResponse = response.into_inner(); + tracing::info!("Worker response:\n{:?}\n", exec_response); + if let Some(timeout) = exec_response.timed_out_after_s { + ( + GatherOutputStatus::TimedOut(Duration::from_secs(timeout)), + vec![], + exec_response.stderr.into(), + ) + } else { + ( + GatherOutputStatus::Finished { + exit_code: exec_response.exit_code, + execution_stats: None, + }, + vec![], + exec_response.stderr.into(), + ) + } + } + Err(err) => { + ( + GatherOutputStatus::SpawnFailed(format!( + "Error sending ExecuteCommand to worker: {:?}, see worker logs:\n{}\n{}", + err, self.stdout_path, self.stderr_path, + )), + // stdout/stderr logs for worker are for multiple commands, probably do not want to dump contents here + vec![], + vec![], + ) + } + } } - Err(err) => { + _ = self.child_exited_observer.while_alive() => { ( GatherOutputStatus::SpawnFailed(format!( - "Error sending ExecuteCommand to worker: {:?}, see worker logs:\n{}\n{}", - err, self.stdout_path, self.stderr_path, + "Worker exited while running command, see worker logs:\n{}\n{}", + self.stdout_path, self.stderr_path, )), - // stdout/stderr logs for worker are for multiple commands, probably do not want to dump contents here vec![], vec![], ) diff --git a/app/buck2_execute_impl/src/lib.rs b/app/buck2_execute_impl/src/lib.rs index f53e37f963481..04e8bb1dd24ba 100644 --- a/app/buck2_execute_impl/src/lib.rs +++ b/app/buck2_execute_impl/src/lib.rs @@ -7,12 +7,16 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(try_blocks)] #![feature(box_patterns)] #![feature(try_trait_v2)] #![feature(control_flow_enum)] +#![feature(used_with_arg)] +#![feature(let_chains)] pub mod executors; pub mod low_pass_filter; pub mod materializers; pub mod re; +mod storage_resource_exhausted; diff --git a/app/buck2_execute_impl/src/low_pass_filter.rs b/app/buck2_execute_impl/src/low_pass_filter.rs index 520056e7514c7..bee6bd64679cb 100644 --- a/app/buck2_execute_impl/src/low_pass_filter.rs +++ b/app/buck2_execute_impl/src/low_pass_filter.rs @@ -59,7 +59,6 @@ impl LowPassFilter { /// To make things predictable, we synchronously increment the accessors count. This ensures /// that there is no ambiguity about whether an accessor has already incremented the count or /// not depending on whether it was polled. - #[allow(clippy::await_holding_lock)] // wait() actually releases the lock. #[allow(clippy::manual_async_fn)] // so you don't need to poll before access() takes effect pub fn access(&self, weight: usize) -> impl Future> { let go = { @@ -114,7 +113,6 @@ impl Drop for LowPassFilterGuard<'_> { #[cfg(test)] mod tests { - use futures::future::FutureExt; use super::*; diff --git a/app/buck2_execute_impl/src/materializers.rs b/app/buck2_execute_impl/src/materializers.rs new file mode 100644 index 0000000000000..a2741d1ad56a0 --- /dev/null +++ b/app/buck2_execute_impl/src/materializers.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod deferred; +pub mod immediate; +pub mod io; +pub mod sqlite; diff --git a/app/buck2_execute_impl/src/materializers/deferred.rs b/app/buck2_execute_impl/src/materializers/deferred.rs new file mode 100644 index 0000000000000..282a385e6f522 --- /dev/null +++ b/app/buck2_execute_impl/src/materializers/deferred.rs @@ -0,0 +1,2553 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod clean_stale; +mod extension; +mod file_tree; +mod io_handler; +mod subscriptions; + +#[cfg(test)] +mod tests; + +use std::collections::HashSet; +use std::collections::VecDeque; +use std::fmt::Formatter; +use std::pin::Pin; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; + +use allocative::Allocative; +use anyhow::Context as _; +use async_trait::async_trait; +use buck2_common::file_ops::FileMetadata; +use buck2_common::file_ops::TrackedFileDigest; +use buck2_common::liveliness_observer::LivelinessGuard; +use buck2_core::buck2_env_anyhow; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_core::soft_error; +use buck2_data::error::ErrorTag; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::directory_iterator::DirectoryIteratorPathStack; +use buck2_directory::directory::directory_ref::DirectoryRef; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::walk::unordered_entry_walk; +use buck2_error::AnyhowContextForError; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::current_span; +use buck2_events::dispatch::get_dispatcher; +use buck2_events::dispatch::get_dispatcher_opt; +use buck2_events::dispatch::with_dispatcher_async; +use buck2_events::dispatch::EventDispatcher; +use buck2_events::span::SpanId; +use buck2_execute::artifact_value::ArtifactValue; +use buck2_execute::digest_config::DigestConfig; +use buck2_execute::directory::ActionDirectory; +use buck2_execute::directory::ActionDirectoryEntry; +use buck2_execute::directory::ActionDirectoryMember; +use buck2_execute::directory::ActionDirectoryRef; +use buck2_execute::directory::ActionSharedDirectory; +use buck2_execute::execute::blocking::BlockingExecutor; +use buck2_execute::materialize::materializer::ArtifactNotMaterializedReason; +use buck2_execute::materialize::materializer::CasDownloadInfo; +use buck2_execute::materialize::materializer::CasNotFoundError; +use buck2_execute::materialize::materializer::CopiedArtifact; +use buck2_execute::materialize::materializer::DeclareMatchOutcome; +use buck2_execute::materialize::materializer::DeferredMaterializerExtensions; +use buck2_execute::materialize::materializer::HttpDownloadInfo; +use buck2_execute::materialize::materializer::MaterializationError; +use buck2_execute::materialize::materializer::Materializer; +use buck2_execute::materialize::materializer::WriteRequest; +use buck2_execute::output_size::OutputSize; +use buck2_execute::re::manager::ReConnectionManager; +use buck2_futures::cancellation::CancellationContext; +use buck2_http::HttpClient; +use buck2_util::threads::check_stack_overflow; +use buck2_util::threads::thread_spawn; +use buck2_wrapper_common::invocation_id::TraceId; +use chrono::DateTime; +use chrono::Duration; +use chrono::Utc; +use derivative::Derivative; +use derive_more::Display; +use dupe::Dupe; +use dupe::OptionDupedExt; +use futures::future; +use futures::future::BoxFuture; +use futures::future::FutureExt; +use futures::future::Shared; +use futures::future::TryFutureExt; +use futures::stream::BoxStream; +use futures::stream::FuturesOrdered; +use futures::stream::Stream; +use futures::stream::StreamExt; +use futures::Future; +use gazebo::prelude::*; +use itertools::Itertools; +use parking_lot::Mutex; +use pin_project::pin_project; +use tokio::runtime::Handle; +use tokio::sync::mpsc; +use tokio::sync::mpsc::UnboundedReceiver; +use tokio::sync::oneshot; +use tokio::sync::oneshot::error::TryRecvError; +use tokio::task::JoinHandle; +use tokio::time::Instant; +use tokio::time::Interval; +use tracing::instrument; + +use crate::materializers::deferred::clean_stale::CleanResult; +use crate::materializers::deferred::clean_stale::CleanStaleArtifactsCommand; +use crate::materializers::deferred::clean_stale::CleanStaleConfig; +use crate::materializers::deferred::extension::ExtensionCommand; +use crate::materializers::deferred::file_tree::FileTree; +use crate::materializers::deferred::io_handler::DefaultIoHandler; +use crate::materializers::deferred::io_handler::IoHandler; +use crate::materializers::deferred::subscriptions::MaterializerSubscriptionOperation; +use crate::materializers::deferred::subscriptions::MaterializerSubscriptions; +use crate::materializers::sqlite::MaterializerState; +use crate::materializers::sqlite::MaterializerStateSqliteDb; + +/// Materializer implementation that defers materialization of declared +/// artifacts until they are needed (i.e. `ensure_materialized` is called). +/// +/// # Important +/// +/// This materializer defers both CAS fetches and local copies. Therefore, one +/// needs to be careful when choosing to call `ensure_materialized`. +/// Between `declare` and `ensure` calls, the local files could have changed. +/// +/// This limits us to only "safely" using the materializer within the +/// computation of a build rule, and only to materialize inputs or outputs of +/// the rule, not random artifacts/paths. That's because: +/// - file changes before/after a build are handled by DICE, which invalidates +/// the outputs that depend on it. The materializer ends up having the wrong +/// information about these outputs. But because it's only used within the +/// build rules, the affected rule is recomputed and therefore has its +/// artifacts re-declared. So when `ensure` is called the materializer has +/// up-to-date information about the artifacts. +/// - file changes during a build are not properly supported by Buck and +/// treated as undefined behaviour, so there's no need to worry about them. +#[derive(Allocative)] +pub struct DeferredMaterializerAccessor { + /// Sender to emit commands to the command loop. See `MaterializerCommand`. + #[allocative(skip)] + command_sender: Arc>, + /// Handle of the command loop thread. Aborted on Drop. + /// This thread serves as a queue for declare/ensure requests, making + /// sure only one executes at a time and in the order they came in. + /// TODO(rafaelc): aim to replace it with a simple mutex. + #[allocative(skip)] + command_thread: Option>, + /// Determines what to do on `try_materialize_final_artifact`: if true, + /// materializes them, otherwise skips them. + materialize_final_artifacts: bool, + defer_write_actions: bool, + + io: Arc, + + /// Tracked for logging purposes. + materializer_state_info: buck2_data::MaterializerStateInfo, + + stats: Arc, + + /// Logs verbose events about materializer to the event log when enabled. + verbose_materializer_log: bool, +} + +pub type DeferredMaterializer = DeferredMaterializerAccessor; + +impl Drop for DeferredMaterializerAccessor { + fn drop(&mut self) { + // We don't try to stop the underlying thread, since in practice when we drop the + // DeferredMaterializer we are about to just terminate the process. + } +} + +/// Statistics we collect while operating the Deferred Materializer. +#[derive(Allocative, Default)] +pub struct DeferredMaterializerStats { + declares: AtomicU64, + declares_reused: AtomicU64, +} + +fn access_time_update_max_buffer_size() -> anyhow::Result { + buck2_env_anyhow!("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE", type=usize, default=5000) +} + +pub struct DeferredMaterializerConfigs { + pub materialize_final_artifacts: bool, + pub defer_write_actions: bool, + pub ttl_refresh: TtlRefreshConfiguration, + pub update_access_times: AccessTimesUpdates, + pub verbose_materializer_log: bool, + pub clean_stale_config: Option, +} + +pub struct TtlRefreshConfiguration { + pub frequency: std::time::Duration, + pub min_ttl: Duration, + pub enabled: bool, +} + +#[derive(Clone, Copy, Debug, Dupe, PartialEq)] +pub enum AccessTimesUpdates { + /// Flushes when the buffer is full and periodically + Full, + ///Flushes only when buffer is full + Partial, + /// Does not flush at all + Disabled, +} + +#[derive(Debug, buck2_error::Error)] +pub enum AccessTimesUpdatesError { + #[error( + "Invalid value for buckconfig `[buck2] update_access_times`. Got `{0}`. Expected one of `full`, `partial` or `disabled`." + )] + InvalidValueForConfig(String), +} + +impl AccessTimesUpdates { + pub fn try_new_from_config_value(config_value: Option<&str>) -> anyhow::Result { + match config_value { + None | Some("") | Some("full") => Ok(AccessTimesUpdates::Full), + Some("partial") => Ok(AccessTimesUpdates::Partial), + Some("disabled") => Ok(AccessTimesUpdates::Disabled), + Some(v) => Err(AccessTimesUpdatesError::InvalidValueForConfig(v.to_owned()).into()), + } + } +} + +#[derive(Copy, Dupe, Clone)] +struct MaterializerCounters { + sent: &'static AtomicUsize, + received: &'static AtomicUsize, +} + +impl MaterializerCounters { + /// New counters. Note that this leaks the underlying data. See comments on MaterializerSender. + fn leak_new() -> Self { + Self { + sent: Box::leak(Box::new(AtomicUsize::new(0))), + received: Box::leak(Box::new(AtomicUsize::new(0))), + } + } + + fn ack_received(&self) { + self.received.fetch_add(1, Ordering::Relaxed); + } + + fn queue_size(&self) -> usize { + self.sent + .load(Ordering::Relaxed) + .saturating_sub(self.received.load(Ordering::Relaxed)) + } +} + +pub struct MaterializerSender { + /// High priority commands are processed in order. + high_priority: mpsc::UnboundedSender>, + /// Low priority commands are processed in order relative to each other, but high priority + /// commands can be reordered ahead of them. + low_priority: mpsc::UnboundedSender, + counters: MaterializerCounters, + /// Liveliness guard held while clean stale executes, dropped to interrupt clean. + clean_guard: Mutex>, +} + +impl MaterializerSender { + fn send( + &self, + command: MaterializerCommand, + ) -> Result<(), mpsc::error::SendError>> { + *self.clean_guard.lock() = None; + let res = self.high_priority.send(command); + self.counters.sent.fetch_add(1, Ordering::Relaxed); + res + } + + fn send_low_priority( + &self, + command: LowPriorityMaterializerCommand, + ) -> Result<(), mpsc::error::SendError> { + let res = self.low_priority.send(command); + self.counters.sent.fetch_add(1, Ordering::Relaxed); + res + } +} + +struct MaterializerReceiver { + high_priority: mpsc::UnboundedReceiver>, + low_priority: mpsc::UnboundedReceiver, + counters: MaterializerCounters, +} + +pub(crate) struct DeferredMaterializerCommandProcessor { + io: Arc, + sqlite_db: Option, + /// The runtime the deferred materializer will spawn futures on. This is normally the runtime + /// used by the rest of Buck. + rt: Handle, + defer_write_actions: bool, + log_buffer: LogBuffer, + /// Keep track of artifact versions to avoid callbacks clobbering state if the state has moved + /// forward. + version_tracker: VersionTracker, + /// Send messages back to the materializer. + command_sender: Arc>, + /// The actual materializer state. + tree: ArtifactTree, + /// Active subscriptions + subscriptions: MaterializerSubscriptions, + /// History of refreshes. This *does* grow without bound, but considering the data is pretty + /// small and we create it infrequently, that's fine. + ttl_refresh_history: Vec, + /// The current ttl_refresh instance, if any exists. + ttl_refresh_instance: Option, anyhow::Result<()>)>>, + cancellations: &'static CancellationContext<'static>, + stats: Arc, + access_times_buffer: Option>, + verbose_materializer_log: bool, + daemon_dispatcher: EventDispatcher, +} + +struct TtlRefreshHistoryEntry { + at: DateTime, + outcome: Option>, +} + +// NOTE: This doesn't derive `Error` and that's on purpose. We don't want to make it easy (or +// possible, in fact) to add `context` to this SharedProcessingError and lose the variant. +#[derive(Debug, Clone, Dupe)] +pub enum SharedMaterializingError { + Error(buck2_error::Error), + NotFound(CasNotFoundError), +} + +#[derive(buck2_error::Error, Debug)] +pub enum MaterializeEntryError { + #[error(transparent)] + Error(anyhow::Error), + + /// The artifact wasn't found. This typically means it expired in the CAS. + #[error(transparent)] + NotFound(CasNotFoundError), +} + +impl From for MaterializeEntryError { + fn from(e: anyhow::Error) -> MaterializeEntryError { + Self::Error(e) + } +} + +impl From for SharedMaterializingError { + fn from(e: MaterializeEntryError) -> SharedMaterializingError { + match e { + MaterializeEntryError::Error(e) => Self::Error(e.into()), + MaterializeEntryError::NotFound(e) => Self::NotFound(e), + } + } +} + +/// A future that is materializing on a separate task spawned by the materializer +type MaterializingFuture = Shared>>; +/// A future that is cleaning paths on a separate task spawned by the materializer +type CleaningFuture = Shared>>; + +#[derive(Clone)] +enum ProcessingFuture { + Materializing(MaterializingFuture), + Cleaning(CleaningFuture), +} + +/// Message taken by the `DeferredMaterializer`'s command loop. +enum MaterializerCommand { + // [Materializer trait methods -> Command thread] + /// Takes a list of file paths, computes the materialized file paths of all + /// of them, and sends the result through the oneshot. + /// See `Materializer::get_materialized_file_paths` for more information. + GetMaterializedFilePaths( + Vec, + oneshot::Sender>>, + ), + + /// Declares that a set of artifacts already exist + DeclareExisting( + Vec<(ProjectRelativePathBuf, ArtifactValue)>, + Option, + Option, + ), + + /// Declares an artifact: its path, value, and how to materialize it. + Declare( + ProjectRelativePathBuf, + ArtifactValue, + Box, // Boxed to avoid growing all variants + EventDispatcher, + ), + + MatchArtifacts( + Vec<(ProjectRelativePathBuf, ArtifactValue)>, + oneshot::Sender, + ), + + HasArtifact(ProjectRelativePathBuf, oneshot::Sender), + + /// Declares that given paths are no longer eligible to be materialized by this materializer. + /// This typically should reflect a change made to the underlying filesystem, either because + /// the file was created, or because it was removed.. + InvalidateFilePaths( + Vec, + oneshot::Sender, + EventDispatcher, + ), + + /// Takes a list of artifact paths, and materializes all artifacts in the + /// list that have been declared but not yet been materialized. When the + /// materialization starts, a future is sent back through the provided + /// Sender; this future will be resolved when the materialization + /// concludes (whether successfully or not). + Ensure( + Vec, + EventDispatcher, + oneshot::Sender>>, + ), + + Subscription(MaterializerSubscriptionOperation), + + Extension(Box>), + + /// Terminate command processor loop, used by tests + #[allow(dead_code)] + Abort, +} + +impl std::fmt::Debug for MaterializerCommand { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MaterializerCommand::GetMaterializedFilePaths(paths, _) => { + write!(f, "GetMaterializedFilePaths({:?}, _)", paths,) + } + MaterializerCommand::DeclareExisting(paths, current_span, trace_id) => { + write!( + f, + "DeclareExisting({:?}, {:?}, {:?})", + paths, current_span, trace_id + ) + } + MaterializerCommand::Declare(path, value, method, _dispatcher) => { + write!(f, "Declare({:?}, {:?}, {:?})", path, value, method,) + } + MaterializerCommand::MatchArtifacts(paths, _) => { + write!(f, "MatchArtifacts({:?})", paths) + } + MaterializerCommand::HasArtifact(path, _) => { + write!(f, "HasArtifact({:?})", path) + } + MaterializerCommand::InvalidateFilePaths(paths, ..) => { + write!(f, "InvalidateFilePaths({:?})", paths) + } + MaterializerCommand::Ensure(paths, _, _) => write!(f, "Ensure({:?}, _)", paths,), + MaterializerCommand::Subscription(op) => write!(f, "Subscription({:?})", op,), + MaterializerCommand::Extension(ext) => write!(f, "Extension({:?})", ext), + MaterializerCommand::Abort => write!(f, "Abort"), + } + } +} + +/// Materializer commands that can be reordered with regard to other commands. +#[derive(Debug)] +enum LowPriorityMaterializerCommand { + /// [Materialization task -> Command thread] + /// Notifies the command thread that an artifact was materialized. It takes + /// the artifact path and the version that was materialized, such that if + /// a newer version was declared during materialization - which should not + /// happen under normal conditions - we can react accordingly. + MaterializationFinished { + path: ProjectRelativePathBuf, + timestamp: DateTime, + version: Version, + result: Result<(), SharedMaterializingError>, + }, + + CleanupFinished { + path: ProjectRelativePathBuf, + version: Version, + result: Result<(), SharedMaterializingError>, + }, +} + +/// Tree that stores materialization data for each artifact. Used internally by +/// the `DeferredMaterializer` to keep track of artifacts and how to +/// materialize them. +type ArtifactTree = FileTree>; + +/// The Version of a processing future associated with an artifact. We use this to know if we can +/// clear the processing field when a callback is received, or if more work is expected. +#[derive(Eq, PartialEq, Copy, Clone, Dupe, Debug, Ord, PartialOrd, Display)] +pub struct Version(u64); + +#[derive(Debug)] +struct VersionTracker(Version); + +impl VersionTracker { + fn new() -> Self { + // Each Declare bumps the version, so that if an artifact is declared + // a second time mid materialization of its previous version, we don't + // incorrectly assume we materialized the latest version. We start with + // 1 with because any disk state restored will start with version 0. + Self(Version(1)) + } + + fn current(&self) -> Version { + self.0 + } + + /// Increment the current version, return the previous value + fn next(&mut self) -> Version { + let ret = self.current(); + self.0.0 += 1; + ret + } +} + +pub struct ArtifactMaterializationData { + /// Taken from `deps` of `ArtifactValue`. Used to materialize deps of the artifact. + deps: Option, + stage: ArtifactMaterializationStage, + /// An optional future that may be processing something at the current path + /// (for example, materializing or deleting). Any other future that needs to process + /// this path would need to wait on the existing future to finish. + /// TODO(scottcao): Turn this into a queue of pending futures. + processing: Processing, +} + +/// Represents a processing future + the version at which it was issued. When receiving +/// notifications about processing futures that finish, their changes are only applied if their +/// version is greater than the current version. +/// +/// The version is an internal counter that is shared between the current processing_fut and +/// this data. When multiple operations are queued on a ArtifactMaterializationData, this +/// allows us to identify which one is current. +enum Processing { + Done(Version), + Active { + future: ProcessingFuture, + version: Version, + }, +} + +impl Processing { + fn current_version(&self) -> Version { + match self { + Self::Done(version) => *version, + Self::Active { version, .. } => *version, + } + } + + fn into_future(self) -> Option { + match self { + Self::Done(..) => None, + Self::Active { future, .. } => Some(future), + } + } +} + +/// Fingerprint used to identify `ActionSharedDirectory`. We give it an explicit +/// alias because `TrackedFileDigest` can look confusing. +pub type ActionDirectoryFingerprint = TrackedFileDigest; + +/// Metadata used to identify an artifact entry without all of its content. Stored on materialized +/// artifacts to check matching artifact optimizations. For `ActionSharedDirectory`, we use its fingerprint, +/// For everything else (files, symlinks, and external symlinks), we use `ActionDirectoryMember` +/// as is because it already holds the metadata we need. +#[derive(Clone, Dupe, Debug)] +pub struct ArtifactMetadata(pub ActionDirectoryEntry); + +#[derive(Clone, Dupe, Debug, Display)] +#[display("DirectoryMetadata(digest:{},size:{})", fingerprint, total_size)] +pub struct DirectoryMetadata { + pub fingerprint: ActionDirectoryFingerprint, + /// Size on disk, if the artifact is a directory. + /// Storing separately from ArtifactMetadata to avoid calculating when + /// checking matching artifacts. + pub total_size: u64, +} + +impl ArtifactMetadata { + fn matches_entry(&self, entry: &ActionDirectoryEntry) -> bool { + match (&self.0, entry) { + ( + DirectoryEntry::Dir(DirectoryMetadata { fingerprint, .. }), + DirectoryEntry::Dir(dir), + ) => fingerprint == dir.fingerprint(), + (DirectoryEntry::Leaf(l1), DirectoryEntry::Leaf(l2)) => { + // In Windows, the 'executable bit' absence can cause Buck2 to re-download identical artifacts. + // To avoid this, we exclude the executable bit from the comparison. + if cfg!(windows) { + match (l1, l2) { + ( + ActionDirectoryMember::File(meta1), + ActionDirectoryMember::File(meta2), + ) => return meta1.digest == meta2.digest, + _ => (), + } + } + l1 == l2 + } + _ => false, + } + } + + fn new(entry: &ActionDirectoryEntry) -> Self { + let new_entry = match entry { + DirectoryEntry::Dir(dir) => DirectoryEntry::Dir(DirectoryMetadata { + fingerprint: dir.fingerprint().dupe(), + total_size: entry.calc_output_count_and_bytes().bytes, + }), + DirectoryEntry::Leaf(leaf) => DirectoryEntry::Leaf(leaf.dupe()), + }; + Self(new_entry) + } + + fn size(&self) -> u64 { + match &self.0 { + DirectoryEntry::Dir(dir) => dir.total_size, + DirectoryEntry::Leaf(ActionDirectoryMember::File(file_metadata)) => { + file_metadata.digest.size() + } + DirectoryEntry::Leaf(_) => 0, + } + } +} + +enum ArtifactMaterializationStage { + /// The artifact was declared, but the materialization hasn't started yet. + /// If it did start but end with an error, it returns to this stage. + /// When the the artifact was declared, we spawn a deletion future to delete + /// all existing paths that conflict with the output paths. + Declared { + /// Taken from `entry` of `ArtifactValue`. Used to materialize the actual artifact. + entry: ActionDirectoryEntry, + method: Arc, + }, + /// This artifact was materialized + Materialized { + /// Once the artifact is materialized, we don't need the full entry anymore. + /// We can throw away most of the entry and just keep some metadata used to + /// check if materialized artifact matches declared artifact. + metadata: ArtifactMetadata, + /// Used to clean older artifacts from buck-out. + last_access_time: DateTime, + /// Artifact declared by running daemon. + /// Should not be deleted without invalidating DICE nodes, which currently + /// means killing the daemon. + active: bool, + }, +} + +/// Different ways to materialize the files of an artifact. Some artifacts need +/// to be fetched from the CAS, others copied locally. +#[derive(Debug, Display)] +pub enum ArtifactMaterializationMethod { + /// The files must be copied from a local path. + #[display("local copy")] + LocalCopy( + /// A map `[dest => src]`, meaning that a file at + /// `{artifact_path}/{dest}/{p}` needs to be copied from `{src}/{p}`. + FileTree, + /// Raw list of copied artifacts, as received in `declare_copy`. + Vec, + ), + + #[display("write")] + Write(Arc), + + /// The files must be fetched from the CAS. + #[display("cas download (action: {})", info.origin)] + CasDownload { + /// The digest of the action that produced this output + info: Arc, + }, + + /// The file must be fetched over HTTP. + #[display("http download ({})", info)] + HttpDownload { info: HttpDownloadInfo }, + + #[cfg(test)] + Test, +} + +trait MaterializationMethodToProto { + fn to_proto(&self) -> buck2_data::MaterializationMethod; +} + +impl MaterializationMethodToProto for ArtifactMaterializationMethod { + fn to_proto(&self) -> buck2_data::MaterializationMethod { + match self { + ArtifactMaterializationMethod::LocalCopy { .. } => { + buck2_data::MaterializationMethod::LocalCopy + } + ArtifactMaterializationMethod::CasDownload { .. } => { + buck2_data::MaterializationMethod::CasDownload + } + ArtifactMaterializationMethod::Write { .. } => buck2_data::MaterializationMethod::Write, + ArtifactMaterializationMethod::HttpDownload { .. } => { + buck2_data::MaterializationMethod::HttpDownload + } + #[cfg(test)] + ArtifactMaterializationMethod::Test => unimplemented!(), + } + } +} + +#[async_trait] +impl Materializer for DeferredMaterializerAccessor { + fn name(&self) -> &str { + "deferred" + } + + async fn declare_existing( + &self, + artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, + ) -> anyhow::Result<()> { + let cmd = MaterializerCommand::DeclareExisting( + artifacts, + current_span(), + get_dispatcher_opt().map(|d| d.trace_id().dupe()), + ); + self.command_sender.send(cmd)?; + Ok(()) + } + + async fn declare_copy_impl( + &self, + path: ProjectRelativePathBuf, + value: ArtifactValue, + srcs: Vec, + _cancellations: &CancellationContext, + ) -> anyhow::Result<()> { + // TODO(rafaelc): get rid of this tree; it'd save a lot of memory. + let mut srcs_tree = FileTree::new(); + for copied_artifact in srcs.iter() { + let dest = copied_artifact.dest.strip_prefix(&path)?; + + { + let mut walk = unordered_entry_walk( + copied_artifact + .dest_entry + .as_ref() + .map_dir(Directory::as_ref), + ); + while let Some((path, entry)) = walk.next() { + if let DirectoryEntry::Leaf(ActionDirectoryMember::File(..)) = entry { + let path = path.get(); + let dest_iter = dest.iter().chain(path.iter()).map(|f| f.to_owned()); + let src = copied_artifact.src.join(&path); + srcs_tree.insert(dest_iter, src); + } + } + } + } + let cmd = MaterializerCommand::Declare( + path, + value, + Box::new(ArtifactMaterializationMethod::LocalCopy(srcs_tree, srcs)), + get_dispatcher(), + ); + self.command_sender.send(cmd)?; + Ok(()) + } + + async fn declare_cas_many_impl<'a, 'b>( + &self, + info: Arc, + artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, + _cancellations: &CancellationContext, + ) -> anyhow::Result<()> { + for (path, value) in artifacts { + let cmd = MaterializerCommand::Declare( + path, + value, + Box::new(ArtifactMaterializationMethod::CasDownload { info: info.dupe() }), + get_dispatcher(), + ); + self.command_sender.send(cmd)?; + } + Ok(()) + } + + async fn declare_http( + &self, + path: ProjectRelativePathBuf, + info: HttpDownloadInfo, + _cancellations: &CancellationContext, + ) -> anyhow::Result<()> { + let cmd = MaterializerCommand::Declare( + path, + ArtifactValue::file(info.metadata.dupe()), + Box::new(ArtifactMaterializationMethod::HttpDownload { info }), + get_dispatcher(), + ); + self.command_sender.send(cmd)?; + + Ok(()) + } + + async fn declare_write<'a>( + &self, + gen: Box anyhow::Result> + Send + 'a>, + ) -> anyhow::Result> { + if !self.defer_write_actions { + return self.io.immediate_write(gen).await; + } + + let contents = gen()?; + + let mut paths = Vec::with_capacity(contents.len()); + let mut values = Vec::with_capacity(contents.len()); + let mut methods = Vec::with_capacity(contents.len()); + + for WriteRequest { + path, + content, + is_executable, + } in contents + { + let digest = TrackedFileDigest::from_content( + &content, + self.io.digest_config().cas_digest_config(), + ); + + let meta = FileMetadata { + digest, + is_executable, + }; + + // NOTE: The zstd crate doesn't release extra capacity of its encoding buffer so it's + // important to do so here (or the compressed Vec is the same capacity as the input!). + let compressed_data = zstd::bulk::compress(&content, 0) + .with_context(|| format!("Error compressing {} bytes", content.len()))? + .into_boxed_slice(); + + paths.push(path); + values.push(ArtifactValue::file(meta)); + methods.push(ArtifactMaterializationMethod::Write(Arc::new(WriteFile { + compressed_data, + decompressed_size: content.len(), + is_executable, + }))); + } + + for (path, (value, method)) in std::iter::zip(paths, std::iter::zip(values.iter(), methods)) + { + self.command_sender.send(MaterializerCommand::Declare( + path, + value.dupe(), + Box::new(method), + get_dispatcher(), + ))?; + } + + Ok(values) + } + + async fn declare_match( + &self, + artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, + ) -> anyhow::Result { + let (sender, recv) = oneshot::channel(); + + self.command_sender + .send(MaterializerCommand::MatchArtifacts(artifacts, sender))?; + + let is_match = recv + .await + .context("Recv'ing match future from command thread.")?; + + Ok(is_match.into()) + } + + async fn has_artifact_at(&self, path: ProjectRelativePathBuf) -> anyhow::Result { + let (sender, recv) = oneshot::channel(); + + self.command_sender + .send(MaterializerCommand::HasArtifact(path, sender))?; + + let has_artifact = recv + .await + .context("Recv'ing match future from command thread.")?; + + Ok(has_artifact) + } + + async fn invalidate_many(&self, paths: Vec) -> anyhow::Result<()> { + let (sender, recv) = oneshot::channel(); + + self.command_sender + .send(MaterializerCommand::InvalidateFilePaths( + paths, + sender, + get_dispatcher(), + ))?; + + // Wait on future to finish before invalidation can continue. + let invalidate_fut = recv.await?; + invalidate_fut.await.map_err(anyhow::Error::from) + } + + async fn materialize_many( + &self, + artifact_paths: Vec, + ) -> anyhow::Result>> { + let event_dispatcher = get_dispatcher(); + + // TODO: display [materializing] in superconsole + let (sender, recv) = oneshot::channel(); + self.command_sender + .send(MaterializerCommand::Ensure( + artifact_paths, + event_dispatcher, + sender, + )) + .context("Sending Ensure() command.")?; + let materialization_fut = recv + .await + .context("Receiving materialization future from command thread.")?; + Ok(materialization_fut) + } + + async fn try_materialize_final_artifact( + &self, + artifact_path: ProjectRelativePathBuf, + ) -> anyhow::Result { + if self.materialize_final_artifacts { + self.ensure_materialized(vec![artifact_path]).await?; + Ok(true) + } else { + Ok(false) + } + } + + async fn get_materialized_file_paths( + &self, + paths: Vec, + ) -> anyhow::Result>> { + if paths.is_empty() { + return Ok(Vec::new()); + } + let (sender, recv) = oneshot::channel(); + self.command_sender + .send(MaterializerCommand::GetMaterializedFilePaths(paths, sender))?; + Ok(recv.await?) + } + + fn as_deferred_materializer_extension(&self) -> Option<&dyn DeferredMaterializerExtensions> { + Some(self as _) + } + + fn log_materializer_state(&self, events: &EventDispatcher) { + events.instant_event(self.materializer_state_info.clone()) + } + + fn add_snapshot_stats(&self, snapshot: &mut buck2_data::Snapshot) { + snapshot.deferred_materializer_declares = self.stats.declares.load(Ordering::Relaxed); + snapshot.deferred_materializer_declares_reused = + self.stats.declares_reused.load(Ordering::Relaxed); + snapshot.deferred_materializer_queue_size = self.command_sender.counters.queue_size() as _; + } +} + +impl DeferredMaterializerAccessor { + /// Spawns two threads (`materialization_loop` and `command_loop`). + /// Creates and returns a new `DeferredMaterializer` that aborts those + /// threads when dropped. + pub fn new( + fs: ProjectRoot, + digest_config: DigestConfig, + buck_out_path: ProjectRelativePathBuf, + re_client_manager: Arc, + io_executor: Arc, + configs: DeferredMaterializerConfigs, + sqlite_db: Option, + sqlite_state: Option, + http_client: HttpClient, + daemon_dispatcher: EventDispatcher, + ) -> anyhow::Result { + let (high_priority_sender, high_priority_receiver) = mpsc::unbounded_channel(); + let (low_priority_sender, low_priority_receiver) = mpsc::unbounded_channel(); + + let counters = MaterializerCounters::leak_new(); + + let command_sender = Arc::new(MaterializerSender { + high_priority: high_priority_sender, + low_priority: low_priority_sender, + counters, + clean_guard: Mutex::new(None), + }); + + let command_receiver = MaterializerReceiver { + high_priority: high_priority_receiver, + low_priority: low_priority_receiver, + counters, + }; + + let stats = Arc::new(DeferredMaterializerStats::default()); + + let num_entries_from_sqlite = sqlite_state.as_ref().map_or(0, |s| s.len()) as u64; + let materializer_state_info = buck2_data::MaterializerStateInfo { + num_entries_from_sqlite, + }; + let access_times_buffer = + (!matches!(configs.update_access_times, AccessTimesUpdates::Disabled)) + .then(HashSet::new); + + let tree = ArtifactTree::initialize(sqlite_state); + + let io = Arc::new(DefaultIoHandler::new( + fs, + digest_config, + buck_out_path, + re_client_manager, + io_executor, + http_client, + )); + + let command_processor = { + let command_sender = command_sender.dupe(); + let rt = Handle::current(); + let stats = stats.dupe(); + let io = io.dupe(); + move |cancellations| DeferredMaterializerCommandProcessor { + io, + sqlite_db, + rt, + defer_write_actions: configs.defer_write_actions, + log_buffer: LogBuffer::new(25), + version_tracker: VersionTracker::new(), + command_sender, + tree, + subscriptions: MaterializerSubscriptions::new(), + ttl_refresh_history: Vec::new(), + ttl_refresh_instance: None, + cancellations, + stats, + access_times_buffer, + verbose_materializer_log: configs.verbose_materializer_log, + daemon_dispatcher, + } + }; + + let access_time_update_max_buffer_size = access_time_update_max_buffer_size()?; + + let command_thread = thread_spawn("buck2-dm", { + move || { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + + let cancellations = CancellationContext::never_cancelled(); + + rt.block_on(command_processor(cancellations).run( + command_receiver, + configs.ttl_refresh, + access_time_update_max_buffer_size, + configs.update_access_times, + configs.clean_stale_config, + )); + } + }) + .context("Cannot start materializer thread")?; + + Ok(Self { + command_thread: Some(command_thread), + command_sender, + materialize_final_artifacts: configs.materialize_final_artifacts, + defer_write_actions: configs.defer_write_actions, + io, + materializer_state_info, + stats, + verbose_materializer_log: configs.verbose_materializer_log, + }) + } +} + +/// Simple ring buffer for tracking recent commands, to be shown on materializer error +#[derive(Clone)] +struct LogBuffer { + inner: VecDeque, +} + +impl LogBuffer { + pub fn new(capacity: usize) -> Self { + Self { + inner: VecDeque::with_capacity(capacity), + } + } + + pub fn push(&mut self, item: String) { + if self.inner.len() == self.inner.capacity() { + self.inner.pop_front(); + self.inner.push_back(item); + } else { + self.inner.push_back(item); + } + } +} + +impl std::fmt::Display for LogBuffer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.inner.iter().join("\n")) + } +} + +#[pin_project] +struct CommandStream { + high_priority: UnboundedReceiver>, + low_priority: UnboundedReceiver, + refresh_ttl_ticker: Option, + io_buffer_ticker: Interval, + clean_stale_ticker: Option, + clean_stale_fut: Option>>, +} + +enum Op { + Command(MaterializerCommand), + LowPriorityCommand(LowPriorityMaterializerCommand), + RefreshTtls, + Tick, + CleanStaleRequest, +} + +impl Stream for CommandStream { + type Item = Op; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + + if let Poll::Ready(Some(e)) = this.high_priority.poll_recv(cx) { + if let MaterializerCommand::Abort = e { + return Poll::Ready(None); + } + return Poll::Ready(Some(Op::Command(e))); + } + + if let Poll::Ready(Some(e)) = this.low_priority.poll_recv(cx) { + return Poll::Ready(Some(Op::LowPriorityCommand(e))); + } + + if let Some(ticker) = this.refresh_ttl_ticker.as_mut() { + if ticker.poll_tick(cx).is_ready() { + return Poll::Ready(Some(Op::RefreshTtls)); + } + } + + if this.io_buffer_ticker.poll_tick(cx).is_ready() { + return Poll::Ready(Some(Op::Tick)); + } + + // Ensure last clean completed before requesting a new one. + if let Some(fut) = this.clean_stale_fut.as_mut() { + if std::pin::pin!(fut).poll(cx).is_ready() { + *this.clean_stale_fut = None; + } + } else if let Some(ticker) = this.clean_stale_ticker.as_mut() { + if ticker.poll_tick(cx).is_ready() { + return Poll::Ready(Some(Op::CleanStaleRequest)); + } + } + + // We can never be done because we never drop the senders, so let's not bother. + Poll::Pending + } +} + +#[derive(Copy, Clone, Dupe)] +enum MaterializeStack<'a> { + Empty, + Child(&'a MaterializeStack<'a>, &'a ProjectRelativePath), +} + +impl<'a> Display for MaterializeStack<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if let MaterializeStack::Empty = self { + return write!(f, "(empty)"); + } + + // Avoid recursion because we are fighting with stack overflow here, + // and we do not want another stack overflow when producing error message. + let mut stack = Vec::new(); + let mut current = *self; + while let MaterializeStack::Child(parent, path) = current { + stack.push(path); + current = *parent; + } + write!(f, "{}", stack.iter().rev().join(" -> ")) + } +} + +impl DeferredMaterializerCommandProcessor { + fn spawn_from_rt(rt: &Handle, f: F) -> JoinHandle + where + F: std::future::Future + Send + 'static, + F::Output: Send + 'static, + { + // FIXME(JakobDegen): Ideally there wouldn't be a `None` case, but I don't know this code + // well enough to be confident in removing it + match get_dispatcher_opt() { + Some(dispatcher) => rt.spawn(with_dispatcher_async(dispatcher, f)), + None => rt.spawn(f), + } + } + + fn spawn(&self, f: F) -> JoinHandle + where + F: std::future::Future + Send + 'static, + F::Output: Send + 'static, + { + Self::spawn_from_rt(&self.rt, f) + } + + /// Loop that runs for as long as the materializer is alive. + /// + /// It takes commands via the `Materializer` trait methods. + async fn run( + mut self, + commands: MaterializerReceiver, + ttl_refresh: TtlRefreshConfiguration, + access_time_update_max_buffer_size: usize, + access_time_updates: AccessTimesUpdates, + clean_stale_config: Option, + ) { + let MaterializerReceiver { + high_priority, + low_priority, + counters, + } = commands; + + let refresh_ttl_ticker = if ttl_refresh.enabled { + Some(tokio::time::interval_at( + tokio::time::Instant::now() + ttl_refresh.frequency, + ttl_refresh.frequency, + )) + } else { + None + }; + + let clean_stale_ticker = clean_stale_config.as_ref().map(|clean_stale_config| { + tokio::time::interval_at( + tokio::time::Instant::now() + clean_stale_config.start_offset, + clean_stale_config.clean_period, + ) + }); + + let io_buffer_ticker = tokio::time::interval(std::time::Duration::from_secs(5)); + + let mut stream = CommandStream { + high_priority, + low_priority, + refresh_ttl_ticker, + io_buffer_ticker, + clean_stale_ticker, + clean_stale_fut: None, + }; + + while let Some(op) = stream.next().await { + match op { + Op::Command(command) => { + self.log_buffer.push(format!("{:?}", command)); + self.process_one_command(command); + counters.ack_received(); + self.flush_access_times(access_time_update_max_buffer_size); + } + Op::LowPriorityCommand(command) => { + self.log_buffer.push(format!("{:?}", command)); + self.process_one_low_priority_command(command); + counters.ack_received(); + } + Op::RefreshTtls => { + // It'd be neat to just implement this in the refresh_stream itself and simply + // have this loop implicitly drive it, but we can't do that as the stream's + // and_then callback would have to capture `&tree`. So, instead, we store the + // JoinHandle and just avoid scheduling more than one, though this means we'll + // just miss ticks if we do take longer than a tick to run. + + self.poll_current_ttl_refresh(); + + if self.ttl_refresh_instance.is_none() { + let ttl_refresh = self + .io + .create_ttl_refresh(&self.tree, ttl_refresh.min_ttl) + .map(|fut| { + // We sue a channel here and not JoinHandle so we get blocking + // `try_recv`. + let (tx, rx) = oneshot::channel(); + + self.spawn(async { + let res = fut.await; + let _ignored = tx.send((Utc::now(), res)); + }); + + rx + }); + + match ttl_refresh { + Some(ttl_refresh) => { + self.ttl_refresh_instance = Some(ttl_refresh); + } + None => self.ttl_refresh_history.push(TtlRefreshHistoryEntry { + at: Utc::now(), + outcome: None, + }), + } + } + } + Op::Tick => { + if matches!(access_time_updates, AccessTimesUpdates::Full) { + // Force a periodic flush. + self.flush_access_times(0); + }; + } + Op::CleanStaleRequest => { + if let Some(config) = clean_stale_config.as_ref() { + let dispatcher = self.daemon_dispatcher.dupe(); + let cmd = CleanStaleArtifactsCommand { + keep_since_time: chrono::Utc::now() - config.artifact_ttl, + dry_run: config.dry_run, + tracked_only: false, + dispatcher, + }; + stream.clean_stale_fut = Some(cmd.create_clean_fut(&mut self, None)); + } else { + // This should never happen + soft_error!( + "clean_stale_no_config", + anyhow::anyhow!("clean scheduled without being configured").into(), + quiet: true + ) + .unwrap(); + } + } + } + } + } + + fn process_one_command(&mut self, command: MaterializerCommand) { + match command { + // Entry point for `get_materialized_file_paths` calls + MaterializerCommand::GetMaterializedFilePaths(paths, result_sender) => { + let result = + paths.into_map(|p| self.tree.file_contents_path(p, self.io.digest_config())); + result_sender.send(result).ok(); + } + MaterializerCommand::DeclareExisting(artifacts, ..) => { + for (path, artifact) in artifacts { + self.declare_existing(&path, artifact); + } + } + // Entry point for `declare_{copy|cas}` calls + MaterializerCommand::Declare(path, value, method, event_dispatcher) => { + self.maybe_log_command(&event_dispatcher, || { + buck2_data::materializer_command::Data::Declare( + buck2_data::materializer_command::Declare { + path: path.to_string(), + }, + ) + }); + + self.declare(&path, value, method); + + if self.subscriptions.should_materialize_eagerly(&path) { + self.materialize_artifact(&path, event_dispatcher); + } + } + MaterializerCommand::MatchArtifacts(paths, sender) => { + let all_matches = paths + .into_iter() + .all(|(path, value)| self.match_artifact(path, value)); + sender.send(all_matches).ok(); + } + MaterializerCommand::HasArtifact(path, sender) => { + sender.send(self.has_artifact(path)).ok(); + } + MaterializerCommand::InvalidateFilePaths(paths, sender, event_dispatcher) => { + tracing::trace!( + paths = ?paths, + "invalidate paths", + ); + self.maybe_log_command(&event_dispatcher, || { + buck2_data::materializer_command::Data::InvalidateFilePaths( + buck2_data::materializer_command::InvalidateFilePaths { + paths: paths.iter().map(|p| p.to_string()).collect::>(), + }, + ) + }); + + let existing_futs = self + .tree + .invalidate_paths_and_collect_futures(paths, self.sqlite_db.as_mut()); + + // TODO: This probably shouldn't return a CleanFuture + sender + .send( + async move { + join_all_existing_futs(existing_futs?) + .await + .map_err(buck2_error::Error::from) + } + .boxed() + .shared(), + ) + .ok(); + } + // Entry point for `ensure_materialized` calls + MaterializerCommand::Ensure(paths, event_dispatcher, fut_sender) => { + self.maybe_log_command(&event_dispatcher, || { + buck2_data::materializer_command::Data::Ensure( + buck2_data::materializer_command::Ensure { + paths: paths.iter().map(|p| p.to_string()).collect::>(), + }, + ) + }); + + fut_sender + .send(self.materialize_many_artifacts(paths, event_dispatcher)) + .ok(); + } + MaterializerCommand::Subscription(sub) => sub.execute(self), + MaterializerCommand::Extension(ext) => ext.execute(self), + MaterializerCommand::Abort => unreachable!(), + } + } + + fn process_one_low_priority_command(&mut self, command: LowPriorityMaterializerCommand) { + match command { + // Materialization of artifact succeeded + LowPriorityMaterializerCommand::MaterializationFinished { + path, + timestamp, + version, + result, + } => { + self.materialization_finished(path, timestamp, version, result); + } + LowPriorityMaterializerCommand::CleanupFinished { + path, + version, + result, + } => { + self.tree.cleanup_finished(path, version, result); + } + } + } + + /// Poll the current TTL refresh and remove it if it's done. Add the outcome to + /// ttl_refresh_history. + fn poll_current_ttl_refresh(&mut self) { + self.ttl_refresh_instance = match self.ttl_refresh_instance.take() { + Some(mut curr) => match curr.try_recv() { + Ok((at, outcome)) => { + // Done + self.ttl_refresh_history.push(TtlRefreshHistoryEntry { + at, + outcome: Some(outcome), + }); + None + } + Err(TryRecvError::Empty) => { + // Leave it alone. + Some(curr) + } + Err(TryRecvError::Closed) => { + // Shouldnt really happen unless Tokio is shutting down, but be safe. + self.ttl_refresh_history.push(TtlRefreshHistoryEntry { + at: Utc::now(), + outcome: Some(Err(anyhow::anyhow!("Shutdown"))), + }); + None + } + }, + None => None, + }; + } + + fn is_path_materialized(&self, path: &ProjectRelativePath) -> bool { + match self.tree.prefix_get(&mut path.iter()) { + None => false, + Some(data) => { + matches!( + data.stage, + ArtifactMaterializationStage::Materialized { .. } + ) + } + } + } + + fn flush_access_times(&mut self, max_buffer_size: usize) -> String { + if let Some(access_times_buffer) = self.access_times_buffer.as_mut() { + let size = access_times_buffer.len(); + if size < max_buffer_size { + return "Access times buffer is not full yet".to_owned(); + } + + let buffer = std::mem::take(access_times_buffer); + let now = Instant::now(); + tracing::debug!("Flushing access times buffer"); + if let Some(sqlite_db) = self.sqlite_db.as_mut() { + if let Err(e) = sqlite_db + .materializer_state_table() + .update_access_times(buffer.iter().collect::>()) + { + soft_error!( + "materializer_materialize_error", + e.context(self.log_buffer.clone()).into(), + quiet: true + ) + .unwrap(); + return "Found error while updating access times in sqlite db".to_owned(); + } + } + return format!( + "Finished flushing {} entries in {} ms", + size, + now.elapsed().as_millis(), + ); + } + "Access time updates are disabled. Consider removing `update_access_times = false` from your .buckconfig".to_owned() + } + + fn materialize_many_artifacts( + &mut self, + paths: Vec, + event_dispatcher: EventDispatcher, + ) -> BoxStream<'static, Result<(), MaterializationError>> { + let tasks = paths.into_iter().filter_map(|path| { + self.materialize_artifact(path.as_ref(), event_dispatcher.dupe()) + .map(move |fut| { + fut.map_err(move |e| match e { + SharedMaterializingError::Error(source) => MaterializationError::Error { + path, + source: source.into(), + }, + SharedMaterializingError::NotFound(source) => { + MaterializationError::NotFound { source } + } + }) + }) + }); + + tasks.collect::>().boxed() + } + + fn declare_existing(&mut self, path: &ProjectRelativePath, value: ArtifactValue) { + let metadata = ArtifactMetadata::new(value.entry()); + on_materialization( + self.sqlite_db.as_mut(), + &self.log_buffer, + &self.subscriptions, + path, + &metadata, + Utc::now(), + "materializer_declare_existing_error", + ); + + self.tree.insert( + path.iter().map(|f| f.to_owned()), + Box::new(ArtifactMaterializationData { + deps: value.deps().duped(), + stage: ArtifactMaterializationStage::Materialized { + metadata, + last_access_time: Utc::now(), + active: true, + }, + processing: Processing::Done(self.version_tracker.next()), + }), + ); + } + + fn declare( + &mut self, + path: &ProjectRelativePath, + value: ArtifactValue, + method: Box, + ) { + self.stats.declares.fetch_add(1, Ordering::Relaxed); + + // Check if artifact to be declared is same as artifact that's already materialized. + let mut path_iter = path.iter(); + if let Some(data) = self.tree.prefix_get_mut(&mut path_iter) { + match &data.stage { + ArtifactMaterializationStage::Materialized { + metadata, + last_access_time, + .. + } => { + // NOTE: This is for testing performance when hitting mismatches with disk + // state. Unwrapping isn't ideal, but we can't report errors here. + let force_mismatch = buck2_env_anyhow!( + "BUCK2_TEST_FORCE_DECLARE_MISMATCH", + bool, + applicability = testing + ) + .unwrap(); + + if path_iter.next().is_none() + && metadata.matches_entry(value.entry()) + && !force_mismatch + { + // In this case, the entry declared matches the already materialized + // entry on disk, so just update the deps field but leave + // the artifact as materialized. + tracing::trace!( + path = %path, + "already materialized, updating deps only", + ); + let deps = value.deps().duped(); + data.stage = ArtifactMaterializationStage::Materialized { + metadata: metadata.dupe(), + last_access_time: *last_access_time, + active: true, + }; + data.deps = deps; + + self.stats.declares_reused.fetch_add(1, Ordering::Relaxed); + + return; + } + } + _ => {} + } + } + + // We don't have a matching artifact. Declare it. + let version = self.version_tracker.next(); + + tracing::trace!( + path = %path, + method = %method, + value = %value.entry(), + version = %version, + "declare artifact", + ); + + // Always invalidate materializer state before actual deleting from filesystem + // so there will never be a moment where artifact is deleted but materializer + // thinks it still exists. + let existing_futs = self + .tree + .invalidate_paths_and_collect_futures(vec![path.to_owned()], self.sqlite_db.as_mut()); + + let existing_futs = ExistingFutures(existing_futs); + + let method = Arc::from(method); + + // Dispatch Write actions eagerly if possible. We can do this if no cleanup is required. We + // also check that there are no deps, though for writes there should never be deps. + + // Gate this to not macs for now because we are seeing some instances of extremely slow I/O on macs. + // This is a very hacky and temporary fix. + // TODO(scottcao): Eagerly dispatch writes on a lower priority. + let can_use_write_fast_path = + !cfg!(target_os = "macos") && existing_futs.is_empty() && value.deps().is_none(); + + let future = match &*method { + ArtifactMaterializationMethod::Write(write) if can_use_write_fast_path => { + let materialize = self.io.write( + path.to_owned(), + write.dupe(), + version, + self.command_sender.dupe(), + self.cancellations, + ); + ProcessingFuture::Materializing(materialize.shared()) + } + _ => ProcessingFuture::Cleaning(clean_path( + &self.io, + path.to_owned(), + version, + self.command_sender.dupe(), + existing_futs, + &self.rt, + self.cancellations, + )), + }; + + let data = Box::new(ArtifactMaterializationData { + deps: value.deps().duped(), + stage: ArtifactMaterializationStage::Declared { + entry: value.entry().dupe(), + method, + }, + processing: Processing::Active { future, version }, + }); + self.tree.insert(path.iter().map(|f| f.to_owned()), data); + } + + /// Check if artifact to be declared is same as artifact that's already materialized. + #[instrument(level = "debug", skip(self), fields(path = %path, value = %value.entry()))] + fn match_artifact(&mut self, path: ProjectRelativePathBuf, value: ArtifactValue) -> bool { + let mut path_iter = path.iter(); + let data = match self.tree.prefix_get_mut(&mut path_iter) { + Some(data) => data, + None => { + tracing::trace!("overlapping below"); + return false; + } + }; + + // Something was declared above our path. + if path_iter.next().is_some() { + tracing::trace!("overlapping above"); + return false; + } + + let is_match = match &data.stage { + ArtifactMaterializationStage::Materialized { metadata, .. } => { + let is_match = value.entry(); + tracing::trace!("materialized: found {}, is_match: {}", metadata.0, is_match); + metadata.matches_entry(is_match) + } + ArtifactMaterializationStage::Declared { entry, .. } => { + // NOTE: In theory, if something was declared here, we should probably be able to + // just re-declare over it? + let is_match = value.entry() == entry; + tracing::trace!("declared: found {}, is_match: {}", entry, is_match); + is_match + } + }; + + // In practice, having a matching artifact with different deps isn't actually *possible* + // right now, because the deps are derived from the artifact value and we'll always have + // declared them before. But, if we have a local action cache and persist that as well as + // materializer state across restarts, then eventually we could have a match with something + // that hasn't had its deps populated yet (since the materializer state does not know about + // deps). + if is_match { + if let Some(deps) = value.deps() { + data.deps = Some(deps.dupe()) + } + } + + is_match + } + + fn has_artifact(&mut self, path: ProjectRelativePathBuf) -> bool { + let mut path_iter = path.iter(); + let Some(data) = self.tree.prefix_get_mut(&mut path_iter) else { + return false; + }; + // Something was declared above our path. + if path_iter.next().is_some() { + return false; + } + + match &mut data.stage { + ArtifactMaterializationStage::Materialized { + metadata: _, + last_access_time, + active, + } => { + // Treat this case much like a `declare_existing` + *active = true; + *last_access_time = Utc::now(); + if let Some(sqlite_db) = &mut self.sqlite_db { + if let Err(e) = sqlite_db + .materializer_state_table() + .update_access_times(vec![&path]) + { + soft_error!("has_artifact_update_time", e.context(self.log_buffer.clone()).into(), quiet: true).unwrap(); + } + } + } + ArtifactMaterializationStage::Declared { .. } => { + // Nothing to do here + } + } + + true + } + + #[instrument(level = "debug", skip(self), fields(path = %path))] + fn materialize_artifact( + &mut self, + path: &ProjectRelativePath, + event_dispatcher: EventDispatcher, + ) -> Option { + self.materialize_artifact_recurse(MaterializeStack::Empty, path, event_dispatcher) + } + + fn materialize_artifact_recurse( + &mut self, + stack: MaterializeStack<'_>, + path: &ProjectRelativePath, + event_dispatcher: EventDispatcher, + ) -> Option { + let stack = MaterializeStack::Child(&stack, path); + // We only add context to outer error, because adding context to the future + // is expensive. Errors in futures should add stack context themselves. + match self.materialize_artifact_inner(stack, path, event_dispatcher) { + Ok(res) => res, + Err(e) => Some( + future::err(SharedMaterializingError::Error( + e.context(format!("materializing {}", stack)).into(), + )) + .boxed() + .shared(), + ), + } + } + + fn materialize_artifact_inner( + &mut self, + stack: MaterializeStack<'_>, + path: &ProjectRelativePath, + event_dispatcher: EventDispatcher, + ) -> anyhow::Result> { + // TODO(nga): rewrite without recursion or figure out why we overflow stack here. + check_stack_overflow().tag_anyhow(ErrorTag::ServerStackOverflow)?; + + // Get the data about the artifact, or return early if materializing/materialized + let mut path_iter = path.iter(); + let data = match self.tree.prefix_get_mut(&mut path_iter) { + // Never declared, nothing to do + None => { + tracing::debug!("not known"); + return Ok(None); + } + Some(data) => data, + }; + + let path = path.strip_suffix(path_iter.as_path()).unwrap(); + + let cleaning_fut = match &data.processing { + Processing::Active { + future: ProcessingFuture::Cleaning(f), + .. + } => Some(f.clone()), + Processing::Active { + future: ProcessingFuture::Materializing(f), + .. + } => { + tracing::debug!("join existing future"); + return Ok(Some(f.clone())); + } + Processing::Done(..) => None, + }; + + let deps = data.deps.dupe(); + let check_deps = deps.is_some(); + let entry_and_method = match &mut data.stage { + ArtifactMaterializationStage::Declared { entry, method } => { + Some((entry.dupe(), method.dupe())) + } + ArtifactMaterializationStage::Materialized { + ref mut last_access_time, + .. + } => match check_deps { + true => None, + false => { + if let Some(ref mut buffer) = self.access_times_buffer.as_mut() { + // TODO (torozco): Why is it legal for something to be Materialized + Cleaning? + let timestamp = Utc::now(); + *last_access_time = timestamp; + + // NOTE (T142264535): We mostly expect that artifacts are always declared + // before they are materialized, but there's one case where that doesn't + // happen. In particular, when incremental actions execute, they will trigger + // materialization of outputs from a previous run. The artifact isn't really + // "active" (it's not an output that we'll use), but we do warn here (when we + // probably shouldn't). + // + // if !active { + // tracing::warn!(path = %path, "Expected artifact to be marked active by declare") + // } + if buffer.insert(path.to_buf()) { + tracing::debug!( + "nothing to materialize, adding to access times buffer" + ); + } + } + + return Ok(None); + } + }, + }; + + let version = self.version_tracker.next(); + + tracing::debug!( + has_entry_and_method = entry_and_method.is_some(), + method = ?entry_and_method.as_ref().map(|(_, m)| m), + has_deps = deps.is_some(), + version = %version, + cleaning = cleaning_fut.is_some(), + "materialize artifact" + ); + + // If the artifact copies from other artifacts, we must materialize them first + let deps_tasks = match entry_and_method.as_ref() { + Some((_, m)) => match m.as_ref() { + ArtifactMaterializationMethod::CasDownload { .. } + | ArtifactMaterializationMethod::HttpDownload { .. } + | ArtifactMaterializationMethod::Write { .. } => Vec::new(), + ArtifactMaterializationMethod::LocalCopy(_, copied_artifacts) => copied_artifacts + .iter() + .filter_map(|a| { + self.materialize_artifact_recurse( + MaterializeStack::Child(&stack, path), + a.src.as_ref(), + event_dispatcher.dupe(), + ) + }) + .collect::>(), + #[cfg(test)] + ArtifactMaterializationMethod::Test => Vec::new(), + }, + _ => Vec::new(), + }; + + // The artifact might have symlinks pointing to other artifacts. We must + // materialize them as well, to avoid dangling synlinks. + let link_deps_tasks = match deps.as_ref() { + None => Vec::new(), + Some(deps) => self + .tree + .find_artifacts(deps) + .into_iter() + .filter_map(|p| { + self.materialize_artifact_recurse( + MaterializeStack::Child(&stack, path), + p.as_ref(), + event_dispatcher.dupe(), + ) + }) + .collect::>(), + }; + + // Create a task to await deps and materialize ourselves + let path_buf = path.to_buf(); + let path_buf_dup = path_buf.clone(); + let io = self.io.dupe(); + let command_sender = self.command_sender.dupe(); + let task = self + .spawn(async move { + let cancellations = CancellationContext::never_cancelled(); // spawned + + // Materialize the deps and this entry. This *must* happen in a try block because we + // need to notify the materializer regardless of whether this succeeds or fails. + + let timestamp = Utc::now(); + let res: Result<(), SharedMaterializingError> = try { + // If there is an existing future trying to delete conflicting paths, we must wait for it + // to finish before we can start materialization. + if let Some(cleaning_fut) = cleaning_fut { + cleaning_fut + .await + .with_context(|| "Error cleaning output path") + .map_err(|e| SharedMaterializingError::Error(e.into()))?; + }; + + // In case this is a local copy, we first need to materialize the + // artifacts we are copying from, before we can copy them. + for t in deps_tasks { + t.await?; + } + + if let Some((entry, method)) = entry_and_method { + let materialize = || { + io.materialize_entry( + path_buf.clone(), + method, + entry.dupe(), + event_dispatcher.dupe(), + cancellations, + ) + }; + + // Windows symlinks need to be specified whether it is to a file or target. We rely on the + // target file existing to determine this. Ensure symlink targets exist before the entry + // is materialized for Windows. For non-Windows, do everything concurrently. + if cfg!(windows) { + for t in link_deps_tasks { + t.await?; + } + materialize().await?; + } else { + materialize().await?; + for t in link_deps_tasks { + t.await?; + } + } + } else { + for t in link_deps_tasks { + t.await?; + } + } + }; + + // Materialization finished, notify the command thread + let _ignored = command_sender.send_low_priority( + LowPriorityMaterializerCommand::MaterializationFinished { + path: path_buf_dup, + timestamp, + version, + result: res.dupe(), + }, + ); + + res + }) + .map(|r| match r { + Ok(r) => r, + Err(e) => Err(SharedMaterializingError::Error(e.into())), // Turn the JoinError into a buck2_error::Error. + }) + .boxed() + .shared(); + + let data = self.tree.prefix_get_mut(&mut path.iter()).unwrap(); + data.processing = Processing::Active { + future: ProcessingFuture::Materializing(task.clone()), + version, + }; + + Ok(Some(task)) + } + + #[instrument(level = "debug", skip(self, result), fields(path = %artifact_path))] + fn materialization_finished( + &mut self, + artifact_path: ProjectRelativePathBuf, + timestamp: DateTime, + version: Version, + result: Result<(), SharedMaterializingError>, + ) { + match self.tree.prefix_get_mut(&mut artifact_path.iter()) { + Some(info) => { + if info.processing.current_version() > version { + // We can only unset the future if version matches. + // Otherwise, we may be unsetting a different future from a newer version. + tracing::debug!("version conflict"); + return; + } + + if result.is_err() { + let version = self.version_tracker.next(); + match &info.stage { + ArtifactMaterializationStage::Materialized { .. } => { + tracing::debug!("artifact deps materialization failed, doing nothing"); + // If already materialized, we only attempted to materialize deps, which means the error did + // not occur when materializing the artifact itself. There is no need to clean the artifact path + // and doing so will make the filesystem out of sync with materializer state. + info.processing = Processing::Done(version); + } + ArtifactMaterializationStage::Declared { .. } => { + tracing::debug!("materialization failed, redeclaring artifact"); + // Even though materialization failed, something may have still materialized at artifact_path, + // so we need to delete anything at artifact_path before we ever retry materializing it. + // TODO(scottcao): Once command processor accepts an ArtifactTree instead of initializing one, + // add a test case to ensure this behavior. + let future = ProcessingFuture::Cleaning(clean_path( + &self.io, + artifact_path.clone(), + version, + self.command_sender.dupe(), + ExistingFutures::empty(), + &self.rt, + self.cancellations, + )); + info.processing = Processing::Active { future, version }; + } + } + } else { + tracing::debug!(has_deps = info.deps.is_some(), "transition to Materialized"); + let new_stage = match &info.stage { + ArtifactMaterializationStage::Materialized { .. } => { + // This happens if deps = true. In this case, the entry itself was not + // materialized again, but its deps have been. We need to clear the + // waiting future regardless. + tracing::debug!("artifact is already materialized"); + None + } + ArtifactMaterializationStage::Declared { + entry, + method: _method, + } => { + let metadata = ArtifactMetadata::new(entry); + // NOTE: We only insert this artifact if there isn't an in-progress cleanup + // future on this path. + on_materialization( + self.sqlite_db.as_mut(), + &self.log_buffer, + &self.subscriptions, + &artifact_path, + &metadata, + timestamp, + "materializer_finished_error", + ); + + Some(ArtifactMaterializationStage::Materialized { + metadata, + last_access_time: timestamp, + active: true, + }) + } + }; + + if let Some(new_stage) = new_stage { + info.stage = new_stage; + } + + info.processing = Processing::Done(version); + } + } + None => { + // NOTE: This can happen if a path got invalidated while it was being materialized. + tracing::debug!("materialization_finished but path is vacant!") + } + } + } + + fn maybe_log_command(&self, event_dispatcher: &EventDispatcher, f: F) + where + F: FnOnce() -> buck2_data::materializer_command::Data, + { + if self.verbose_materializer_log { + let data = Some(f()); + event_dispatcher.instant_event(buck2_data::MaterializerCommand { data }); + } + } +} + +/// Run callbacks for an artifact being materialized at `path`. +fn on_materialization( + sqlite_db: Option<&mut MaterializerStateSqliteDb>, + log_buffer: &LogBuffer, + subscriptions: &MaterializerSubscriptions, + path: &ProjectRelativePath, + metadata: &ArtifactMetadata, + timestamp: DateTime, + error_name: &'static str, +) { + if let Some(sqlite_db) = sqlite_db { + if let Err(e) = sqlite_db + .materializer_state_table() + .insert(path, metadata, timestamp) + { + soft_error!(error_name, e.context(log_buffer.clone()).into(), quiet: true).unwrap(); + } + } + + subscriptions.on_materialization_finished(path); +} + +impl ArtifactTree { + fn initialize(sqlite_state: Option) -> Self { + let mut tree = ArtifactTree::new(); + if let Some(sqlite_state) = sqlite_state { + for (path, (metadata, last_access_time)) in sqlite_state.into_iter() { + tree.insert( + path.iter().map(|f| f.to_owned()), + Box::new(ArtifactMaterializationData { + deps: None, + stage: ArtifactMaterializationStage::Materialized { + metadata, + last_access_time, + active: false, + }, + processing: Processing::Done(Version(0)), + }), + ); + } + } + tree + } + + /// Given a path that's (possibly) not yet materialized, returns the path + /// `contents_path` where its contents can be found. Returns Err if the + /// contents cannot be found (ex. if it requires HTTP or CAS download) + /// + /// Note that the returned `contents_path` could be the same as `path`. + #[instrument(level = "trace", skip(self), fields(path = %path))] + fn file_contents_path( + &self, + path: ProjectRelativePathBuf, + digest_config: DigestConfig, + ) -> Result { + let mut path_iter = path.iter(); + let materialization_data = match self.prefix_get(&mut path_iter) { + // Not in tree. Assume it's a source file that doesn't require materialization from materializer. + None => return Ok(path), + Some(data) => data, + }; + let (entry, method) = match &materialization_data.stage { + ArtifactMaterializationStage::Materialized { .. } => { + return Ok(path); + } + ArtifactMaterializationStage::Declared { entry, method } => { + (entry.dupe(), method.dupe()) + } + }; + match method.as_ref() { + ArtifactMaterializationMethod::CasDownload { info } => { + let path_iter = path_iter.peekable(); + + let root_entry: ActionDirectoryEntry = entry.dupe(); + let mut entry = Some(entry.as_ref()); + + // Check if the path we are asking for exists in this entry. + for name in path_iter { + entry = match entry { + Some(DirectoryEntry::Dir(d)) => d.get(name), + _ => break, + } + } + + match entry { + Some(entry) => Err(ArtifactNotMaterializedReason::RequiresCasDownload { + path, + // TODO (@torozco): A nicer API to get an Immutable directory here. + entry: entry + .map_dir(|d| { + d.as_dyn() + .to_builder() + .fingerprint(digest_config.as_directory_serializer()) + }) + .map_leaf(|l| l.dupe()), + info: info.dupe(), + }), + None => Err( + ArtifactNotMaterializedReason::DeferredMaterializerCorruption { + path, + entry: root_entry, + info: info.dupe(), + }, + ), + } + } + ArtifactMaterializationMethod::HttpDownload { .. } + | ArtifactMaterializationMethod::Write { .. } => { + // TODO: Do the write directly to RE instead of materializing locally? + Err(ArtifactNotMaterializedReason::RequiresMaterialization { path }) + } + // TODO: also record and check materialized_files for LocalCopy + ArtifactMaterializationMethod::LocalCopy(srcs, _) => { + match srcs.prefix_get(&mut path_iter) { + None => Ok(path), + Some(src_path) => match path_iter.next() { + None => self.file_contents_path(src_path.clone(), digest_config), + // This is not supposed to be reachable, and if it's, there + // is a bug somewhere else. Panic to prevent the bug from + // propagating. + Some(part) => panic!( + "While getting materialized path of {:?}: path {:?} is a file, so subpath {:?} doesn't exist within.", + path, src_path, part, + ), + }, + } + } + #[cfg(test)] + ArtifactMaterializationMethod::Test => unimplemented!(), + } + } + + #[instrument(level = "debug", skip(self, result), fields(path = %artifact_path))] + fn cleanup_finished( + &mut self, + artifact_path: ProjectRelativePathBuf, + version: Version, + result: Result<(), SharedMaterializingError>, + ) { + match self + .prefix_get_mut(&mut artifact_path.iter()) + .context("Path is vacant") + { + Ok(info) => { + if info.processing.current_version() > version { + // We can only unset the future if version matches. + // Otherwise, we may be unsetting a different future from a newer version. + tracing::debug!("version conflict"); + return; + } + + if result.is_err() { + // Leave it alone, don't keep retrying. + } else { + info.processing = Processing::Done(version); + } + } + Err(e) => { + // NOTE: This shouldn't normally happen? + soft_error!("cleanup_finished_vacant", e.into(), quiet: true).unwrap(); + } + } + } + + /// Removes paths from tree and returns a pair of two vecs. + /// First vec is a list of paths removed. Second vec is a list of + /// pairs of removed paths to futures that haven't finished. + fn invalidate_paths_and_collect_futures( + &mut self, + paths: Vec, + sqlite_db: Option<&mut MaterializerStateSqliteDb>, + ) -> anyhow::Result> { + let mut invalidated_paths = Vec::new(); + let mut futs = Vec::new(); + + for path in paths { + for (path, data) in self.remove_path(&path) { + if let Some(processing_fut) = data.processing.into_future() { + futs.push((path.clone(), processing_fut)); + } + invalidated_paths.push(path); + } + } + + #[cfg(test)] + { + for path in &invalidated_paths { + if path.as_str() == "test/invalidate/failure" { + return Err(anyhow::anyhow!("Injected error")); + } + } + } + + // We can invalidate the paths here even if materializations are currently running on + // the underlying nodes, because when materialization finishes we'll check the version + // number. + if let Some(sqlite_db) = sqlite_db { + sqlite_db + .materializer_state_table() + .delete(invalidated_paths) + .context("Error invalidating paths in materializer state")?; + } + + Ok(futs) + } +} + +enum FoundArtifact { + /// Proper artifact. + Found, + /// Found a directory artifact with dependencies inside it. + FoundForDir, + // TODO(nga): figure the meaning of remaining. Are these bugs? + /// Dependency dir not found in tree. + DirNotFound, + /// Leaf pointing to a dir. + LeafPointsToDir, +} + +impl FileTree { + /// Finds all the paths in `deps` that are artifacts in `self` + fn find_artifacts(&self, deps: &D) -> Vec + where + D: ActionDirectory, + { + let mut artifacts = Vec::new(); + self.find_artifacts_impl(deps, |path, found| match found { + FoundArtifact::Found | FoundArtifact::FoundForDir => { + artifacts.push(path.to_buf()); + } + FoundArtifact::DirNotFound | FoundArtifact::LeafPointsToDir => {} + }); + artifacts + } + + fn find_artifacts_for_debug(&self, deps: &D) -> Vec<(ProjectRelativePathBuf, &'static str)> + where + D: ActionDirectory, + { + let mut result = Vec::new(); + self.find_artifacts_impl(deps, |path, found| { + let found = match found { + FoundArtifact::Found => "Found", + FoundArtifact::FoundForDir => "FoundForDir", + FoundArtifact::DirNotFound => "DirNotFound", + FoundArtifact::LeafPointsToDir => "LeafPointsToDir", + }; + result.push((path.to_buf(), found)); + }); + result + } + + fn find_artifacts_impl( + &self, + deps: &D, + mut listener: impl FnMut(&ProjectRelativePath, FoundArtifact), + ) where + D: ActionDirectory, + { + fn walk_deps<'a, V, D>( + tree: &FileTree, + entry: DirectoryEntry, + path: &mut ProjectRelativePathBuf, + listener: &mut impl FnMut(&ProjectRelativePath, FoundArtifact), + ) where + D: ActionDirectoryRef<'a>, + { + match (tree, entry) { + (FileTree::Data(_), DirectoryEntry::Leaf(_)) => { + listener(path, FoundArtifact::Found); + } + (FileTree::Data(_), DirectoryEntry::Dir(_)) => { + listener(path, FoundArtifact::FoundForDir); + } + (FileTree::Tree(tree_children), DirectoryEntry::Dir(d)) => { + // Not an artifact, but if entry is a directory we can search deeper within + for (name, child) in d.entries() { + path.push(name); + if let Some(subtree) = tree_children.get(name) { + walk_deps(subtree, child, path, listener); + } else { + listener(path, FoundArtifact::DirNotFound); + } + let popped = path.pop(); + assert!(popped); + } + } + (FileTree::Tree(_), DirectoryEntry::Leaf(_)) => { + listener(path, FoundArtifact::LeafPointsToDir); + } + } + } + + let mut path_buf = ProjectRelativePathBuf::default(); + walk_deps( + self, + DirectoryEntry::Dir(Directory::as_ref(deps)), + &mut path_buf, + &mut listener, + ); + assert!(path_buf.is_empty()); + } + + /// Removes path from FileTree. Returns an iterator of pairs of path and entry removed + /// from the tree. + fn remove_path( + &mut self, + path: &ProjectRelativePath, + ) -> Box> { + let mut path_iter = path.iter(); + let removed = self.remove(&mut path_iter); + + let mut path = path; + // Rewind the `path` up to the entry we *actually* found. + for _ in path_iter { + path = path + .parent() + .expect("Path iterator cannot cause us to rewind past the last parent"); + } + let path = path.to_owned(); + + match removed { + Some(tree) => Box::new( + tree.into_iter_with_paths() + .map(move |(k, v)| ((path).join(k), v)), + ), + None => Box::new(std::iter::empty()), + } + } +} + +/// Wait on all futures in `futs` to finish. Return Error for first future that failed +/// in the Vec. +async fn join_all_existing_futs( + existing_futs: Vec<(ProjectRelativePathBuf, ProcessingFuture)>, +) -> buck2_error::Result<()> { + // We can await inside a loop here because all ProcessingFuture's are spawned. + for (path, fut) in existing_futs.into_iter() { + match fut { + ProcessingFuture::Materializing(f) => { + // We don't care about errors from previous materializations. + // We are trying to delete anything that has been materialized, + // so these errors can be ignored. + f.await.ok(); + } + ProcessingFuture::Cleaning(f) => { + f.await.with_context(|| { + format!( + "Error waiting for a previous future to finish cleaning output path {}", + path + ) + })?; + } + }; + } + + Ok(()) +} + +/// Spawns a future to clean output paths while waiting for any +/// pending future to finish. +fn clean_path( + io: &Arc, + path: ProjectRelativePathBuf, + version: Version, + command_sender: Arc>, + existing_futs: ExistingFutures, + rt: &Handle, + cancellations: &'static CancellationContext, +) -> CleaningFuture { + if existing_futs.is_empty() { + return io + .clean_path(path, version, command_sender, cancellations) + .shared(); + } + + DeferredMaterializerCommandProcessor::::spawn_from_rt(rt, { + let io = io.dupe(); + let cancellations = CancellationContext::never_cancelled(); + async move { + join_all_existing_futs(existing_futs.into_result()?).await?; + io.clean_path(path, version, command_sender, cancellations) + .await + } + }) + .map(|r| match r { + Ok(r) => r, + Err(e) => Err(e.into()), // Turn the JoinError into a buck2_error::Error. + }) + .boxed() + .shared() +} + +/// A wrapper type around the Result it contains. Used to expose some extra methods. +struct ExistingFutures(anyhow::Result>); + +impl ExistingFutures { + fn is_empty(&self) -> bool { + self.0.as_ref().map_or(false, |f| f.is_empty()) + } + + fn into_result(self) -> anyhow::Result> { + self.0 + } + + fn empty() -> Self { + Self(Ok(Vec::new())) + } +} + +#[derive(Derivative)] +#[derivative(Debug)] +pub struct WriteFile { + #[derivative(Debug = "ignore")] + compressed_data: Box<[u8]>, + decompressed_size: usize, + is_executable: bool, +} diff --git a/app/buck2_execute_impl/src/materializers/deferred/clean_stale.rs b/app/buck2_execute_impl/src/materializers/deferred/clean_stale.rs index 478b05fb17236..ceb55cfce1002 100644 --- a/app/buck2_execute_impl/src/materializers/deferred/clean_stale.rs +++ b/app/buck2_execute_impl/src/materializers/deferred/clean_stale.rs @@ -9,9 +9,14 @@ use std::collections::HashMap; use std::sync::Arc; +use std::time::Instant; use anyhow::Context; use buck2_common::file_ops::FileType; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_common::liveliness_observer::LivelinessGuard; +use buck2_common::liveliness_observer::LivelinessObserverSync; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::file_name::FileName; @@ -20,16 +25,21 @@ use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::soft_error; +use buck2_data::CleanStaleResultKind; +use buck2_data::CleanStaleStats; use buck2_events::dispatch::EventDispatcher; -use buck2_execute::execute::clean_output_paths::CleanOutputPaths; +use buck2_events::errors::create_error_report; +use buck2_events::metadata; +use buck2_execute::execute::blocking::IoRequest; +use buck2_execute::execute::clean_output_paths::cleanup_path; +use buck2_futures::cancellation::CancellationContext; +use buck2_wrapper_common::invocation_id::TraceId; use chrono::DateTime; use chrono::Utc; use derivative::Derivative; use dupe::Dupe; use futures::future::BoxFuture; use futures::FutureExt; -use more_futures::cancellation::CancellationContext; -use thiserror::Error; use tokio::sync::oneshot::Sender; use tracing::error; @@ -42,186 +52,410 @@ use crate::materializers::deferred::ArtifactTree; use crate::materializers::deferred::DeferredMaterializerCommandProcessor; use crate::materializers::sqlite::MaterializerStateSqliteDb; -#[derive(Derivative)] -#[derivative(Debug)] -pub struct CleanStaleArtifacts { +#[derive(Debug, Clone)] +pub struct CleanStaleArtifactsCommand { pub keep_since_time: DateTime, pub dry_run: bool, pub tracked_only: bool, - #[derivative(Debug = "ignore")] - pub sender: Sender>>, pub dispatcher: EventDispatcher, } -fn skip_clean_response_with_message( - message: &str, -) -> anyhow::Result<( - BoxFuture<'static, anyhow::Result<()>>, - buck2_cli_proto::CleanStaleResponse, -)> { - Ok(( - futures::future::ready(Ok(())).boxed(), - buck2_cli_proto::CleanStaleResponse { - message: Some(message.to_owned()), - stats: None, - }, - )) +#[derive(Derivative)] +#[derivative(Debug)] +pub struct CleanStaleArtifactsExtensionCommand { + pub cmd: CleanStaleArtifactsCommand, + #[derivative(Debug = "ignore")] + pub sender: Sender>>, +} + +#[derive(Clone)] +pub struct CleanResult { + kind: CleanStaleResultKind, + stats: CleanStaleStats, +} + +enum PendingCleanResult { + Finished(CleanResult), + Pending(BoxFuture<'static, anyhow::Result>), +} + +impl From for PendingCleanResult { + fn from(val: CleanStaleResultKind) -> Self { + PendingCleanResult::Finished(CleanResult { + kind: val, + stats: CleanStaleStats::default(), + }) + } +} + +impl From for PendingCleanResult { + fn from(val: CleanResult) -> Self { + PendingCleanResult::Finished(val) + } +} + +impl From for buck2_cli_proto::CleanStaleResponse { + fn from(result: CleanResult) -> Self { + let message = match result.kind { + CleanStaleResultKind::SkippedNoGenDir => Some("Nothing to clean"), + CleanStaleResultKind::SkippedDeferWriteDisabled => { + Some("Skipping clean, set buck2.defer_write_actions to use clean --stale") + } + CleanStaleResultKind::SkippedSqliteDisabled => { + Some("Skipping clean, set buck2.sqlite_materializer_state to use clean --stale") + } + CleanStaleResultKind::SkippedDryRun => None, + CleanStaleResultKind::Interrupted => Some("Interrupted"), + CleanStaleResultKind::Finished => None, + CleanStaleResultKind::Failed => None, + }; + Self { + message: message.map(|m| m.to_owned()), + stats: Some(result.stats), + } + } } -impl ExtensionCommand for CleanStaleArtifacts { +fn create_result( + result: Result, + trace_id: Option, + total_duration_s: u64, +) -> buck2_data::CleanStaleResult { + let (kind, mut stats, error) = match result { + Ok(result) => (result.kind, result.stats, None), + Err(e) => ( + CleanStaleResultKind::Failed, + CleanStaleStats::default(), + Some(create_error_report(&e)), + ), + }; + stats.total_duration_s = total_duration_s; + buck2_data::CleanStaleResult { + kind: kind.into(), + stats: Some(stats), + metadata: metadata::collect(), + error, + command_uuid: trace_id.map(|id| id.to_string()), + } +} + +impl ExtensionCommand for CleanStaleArtifactsExtensionCommand { fn execute(self: Box, processor: &mut DeferredMaterializerCommandProcessor) { - let res = if let Some(sqlite_db) = processor.sqlite_db.as_mut() { + let trace_id = self.cmd.dispatcher.trace_id().clone(); + let fut = self.cmd.create_clean_fut(processor, Some(trace_id)); + let _ignored = self.sender.send(fut); + } +} + +impl CleanStaleArtifactsCommand { + pub(crate) fn create_clean_fut( + &self, + processor: &mut DeferredMaterializerCommandProcessor, + trace_id: Option, + ) -> BoxFuture<'static, anyhow::Result> { + let start_time = Instant::now(); + let pending_result = self.create_pending_clean_result(processor); + let dispatcher_dup = self.dispatcher.dupe(); + async move { + let result = match pending_result { + Ok(res) => match res { + PendingCleanResult::Finished(result) => Ok(result), + PendingCleanResult::Pending(fut) => fut.await, + }, + Err(e) => Err(e), + }; + let result: Result = result.map_err(|e| e.into()); + let result_event: buck2_data::CleanStaleResult = create_result( + result.clone(), + trace_id, + (Instant::now() - start_time).as_secs(), + ); + dispatcher_dup.instant_event(result_event); + Ok(result?.into()) + } + .boxed() + } + + fn create_pending_clean_result( + &self, + processor: &mut DeferredMaterializerCommandProcessor, + ) -> anyhow::Result { + let (liveliness_observer, liveliness_guard) = LivelinessGuard::create_sync(); + *processor.command_sender.clean_guard.lock() = Some(liveliness_guard); + + if let Some(sqlite_db) = processor.sqlite_db.as_mut() { if !processor.defer_write_actions { - skip_clean_response_with_message( - "Skipping clean, set buck2.defer_write_actions to use clean --stale", - ) + Ok(CleanStaleResultKind::SkippedDeferWriteDisabled.into()) } else { - gather_clean_futures_for_stale_artifacts( + self.scan_and_create_clean_fut( &mut processor.tree, - self.keep_since_time, - self.dry_run, - self.tracked_only, sqlite_db, &processor.io, processor.cancellations, - &self.dispatcher, + liveliness_observer.clone(), ) } } else { - skip_clean_response_with_message( - "Skipping clean, set buck2.sqlite_materializer_state to use clean --stale", - ) + Ok(CleanStaleResultKind::SkippedSqliteDisabled.into()) + } + } + + fn scan_and_create_clean_fut( + &self, + tree: &mut ArtifactTree, + sqlite_db: &mut MaterializerStateSqliteDb, + io: &Arc, + cancellations: &'static CancellationContext, + liveliness_observer: Arc, + ) -> anyhow::Result { + let start_time = Instant::now(); + let gen_path = io + .buck_out_path() + .join(ProjectRelativePathBuf::unchecked_new("gen".to_owned())); + let gen_dir = io.fs().resolve(&gen_path); + if !fs_util::try_exists(&gen_dir)? { + return Ok(CleanStaleResultKind::SkippedNoGenDir.into()); + } + tracing::trace!(gen_dir = %gen_dir, "Scanning"); + + let mut found_paths = Vec::new(); + if self.tracked_only { + find_stale_tracked_only(tree, self.keep_since_time, &mut found_paths)? + } else { + let gen_subtree = tree + .get_subtree(&mut gen_path.iter()) + .context("Found a file where gen dir expected")?; + + let empty; + + let gen_subtree = match gen_subtree { + Some(t) => t, + None => { + empty = HashMap::new(); + &empty + } + }; + + StaleFinder { + io: io.dupe(), + keep_since_time: self.keep_since_time, + found_paths: &mut found_paths, + liveliness_observer: liveliness_observer.clone(), + } + .visit_recursively(gen_path, gen_subtree)?; }; - let fut = async move { - let (fut, response) = res?; - fut.await?; - tracing::trace!("finished cleaning stale artifacts"); - Ok(response) + + let mut stats = stats_for_paths(&found_paths); + stats.scan_duration_s = (Instant::now() - start_time).as_secs(); + + // Log limited number of untracked artifacts to avoid logging spikes if schema changes. + for (path, file_type) in found_paths + .iter() + .filter_map(|x| match x { + FoundPath::Untracked(path, file_type, _) => Some((path, file_type)), + _ => None, + }) + .take(2000) + { + self.dispatcher.instant_event(buck2_data::UntrackedFile { + path: path.to_string(), + file_type: format!("{:?}", file_type), + }); + } + + if !liveliness_observer.is_alive_sync() { + return Ok(PendingCleanResult::Finished(CleanResult { + kind: CleanStaleResultKind::Interrupted, + stats, + })); + } + + // If no stale or retained artifact founds, the db should be empty. + if stats.stale_artifact_count + stats.retained_artifact_count == 0 { + // Just need to know if any entries exist, could be a simpler query. + // Checking the db directly in case tree is somehow not in sync. + let materializer_state = sqlite_db + .materializer_state_table() + .read_all(io.digest_config())?; + + // Entries in the db should have been found in buck-out, return error and skip cleaning untracked artifacts. + if !materializer_state.is_empty() { + let error = CleanStaleError { + db_size: materializer_state.len(), + stats, + }; + // quiet just because it's also returned, soft_error to log to scribe + return Err(soft_error!("clean_stale_error", error.into(), quiet: true) + .map(|e| e.into())?); + } + } + + if self.dry_run { + Ok(PendingCleanResult::Finished(CleanResult { + kind: CleanStaleResultKind::SkippedDryRun, + stats, + })) + } else { + Ok(PendingCleanResult::Pending(create_clean_fut( + found_paths, + stats, + tree, + sqlite_db, + io, + cancellations, + liveliness_observer, + )?)) } - .boxed(); - let _ignored = self.sender.send(fut); } } -#[derive(Debug, Clone, Error)] +#[derive(Debug, Clone, buck2_error::Error)] #[error("Internal error: materializer state exists (num db entries: {}) but no artifacts were found by clean ({:?}). Not cleaning untracked artifacts.", .db_size, .stats)] pub(crate) struct CleanStaleError { db_size: usize, stats: buck2_data::CleanStaleStats, } -fn gather_clean_futures_for_stale_artifacts( +fn stats_for_paths(paths: &Vec) -> buck2_data::CleanStaleStats { + let mut stats = buck2_data::CleanStaleStats::default(); + for path in paths { + match path { + FoundPath::Untracked(_, _, size) => { + stats.untracked_artifact_count += 1; + stats.untracked_bytes += *size; + } + FoundPath::Stale(_, size) => { + stats.stale_artifact_count += 1; + stats.stale_bytes += *size; + } + FoundPath::Retained(size) => { + stats.retained_artifact_count += 1; + stats.retained_bytes += *size; + } + } + } + stats +} + +fn create_clean_fut( + found_paths: Vec, + mut stats: CleanStaleStats, tree: &mut ArtifactTree, - keep_since_time: DateTime, - dry_run: bool, - tracked_only: bool, sqlite_db: &mut MaterializerStateSqliteDb, io: &Arc, cancellations: &'static CancellationContext, - dispatcher: &EventDispatcher, -) -> anyhow::Result<( - BoxFuture<'static, anyhow::Result<()>>, - buck2_cli_proto::CleanStaleResponse, -)> { - let gen_path = io - .buck_out_path() - .join(ProjectRelativePathBuf::unchecked_new("gen".to_owned())); - let gen_dir = io.fs().resolve(&gen_path); - if !fs_util::try_exists(&gen_dir)? { - return skip_clean_response_with_message("Nothing to clean"); + liveliness_observer: Arc, +) -> anyhow::Result>> { + let io = io.dupe(); + + let paths_to_invalidate: Vec = found_paths + .iter() + .filter_map(|x| match x { + FoundPath::Stale(p, ..) => Some(p.clone()), + _ => None, + }) + .collect(); + + let existing_clean_futs = + tree.invalidate_paths_and_collect_futures(paths_to_invalidate, Some(sqlite_db))?; + let mut existing_materialization_futs = vec![]; + for data in tree.iter_without_paths() { + match &data.processing { + super::Processing::Active { + future: super::ProcessingFuture::Materializing(future), + .. + } => existing_materialization_futs.push(future.clone()), + _ => (), + }; } - tracing::trace!(gen_dir = %gen_dir, "Scanning"); - - let mut stats = buck2_data::CleanStaleStats::default(); - let mut paths_to_remove = Vec::new(); - let mut paths_to_invalidate = Vec::new(); - - if tracked_only { - find_stale_tracked_only(tree, keep_since_time, &mut stats, &mut paths_to_invalidate)? - } else { - let gen_subtree = tree - .get_subtree(&mut gen_path.iter()) - .context("Found a file where gen dir expected")?; - let empty; + let fut = async move { + let start_time = Instant::now(); + // Wait for all in-progress operations to finish on the paths we are about to + // remove from disk. + join_all_existing_futs(existing_clean_futs).await?; + // Untracked artifacts can be produced during materialization that should not be cleaned while materialization is in progress. + // Wait for all materializations since the path for the future may not be associated with the untracked path. + for fut in existing_materialization_futs.into_iter() { + fut.await.ok(); + } - let gen_subtree = match gen_subtree { - Some(t) => t, - None => { - empty = HashMap::new(); - &empty - } + // Then actually delete them. Note that we kick off one CleanOutputPaths per path. We + // do this to get parallelism. + let res = buck2_util::future::try_join_all( + found_paths + .into_iter() + .filter_map(|x| match x { + FoundPath::Untracked(p, _, size) => Some((p, size)), + FoundPath::Stale(p, size) => Some((p, size)), + _ => None, + }) + .map(|(path, size)| { + clean_artifact(path, size, cancellations, &io, liveliness_observer.dupe()) + }), + ) + .await?; + + let cleaned_sizes: Vec = res.iter().filter_map(|x| *x).collect(); + stats.cleaned_artifact_count += cleaned_sizes.len() as u64; + stats.cleaned_bytes = cleaned_sizes.iter().sum(); + stats.clean_duration_s = (Instant::now() - start_time).as_secs(); + let kind = if !liveliness_observer.is_alive().await { + CleanStaleResultKind::Interrupted + } else { + CleanStaleResultKind::Finished }; - - StaleFinder { - fs: io.fs(), - dispatcher, - keep_since_time, - stats: &mut stats, - paths_to_remove: &mut paths_to_remove, - paths_to_invalidate: &mut paths_to_invalidate, - } - .visit_recursively(gen_path, gen_subtree)?; + Ok(CleanResult { kind, stats }) }; + Ok(fut.boxed()) +} - // If no stale or retained artifact founds, the db should be empty. - if stats.stale_artifact_count + stats.retained_artifact_count == 0 { - // Just need to know if any entries exist, could be a simpler query. - // Checking the db directly in case tree is somehow not in sync. - let materializer_state = sqlite_db - .materializer_state_table() - .read_all(io.digest_config())?; - - // Entries in the db should have been found in buck-out, return error and skip cleaning untracked artifacts. - if !materializer_state.is_empty() { - let error = CleanStaleError { - db_size: materializer_state.len(), - stats, - }; - // quiet just because it's also returned, soft_error to log to scribe - return Err(soft_error!("clean_stale_error", error.into(), quiet: true)?); +async fn clean_artifact( + path: ProjectRelativePathBuf, + size: u64, + cancellations: &'static CancellationContext<'_>, + io: &Arc, + liveliness_observer: Arc, +) -> anyhow::Result> { + match io + .clean_invalidated_path( + CleanInvalidatedPathRequest { + path, + liveliness_observer: liveliness_observer.dupe(), + }, + cancellations, + ) + .await + { + Ok(()) => Ok(Some(size)), + Err(e) => { + // Not downcasting would require larger clean up to IoRequest + if e.downcast_ref::().is_some() { + Ok(None) + } else { + Err(e) + } } } +} - let fut = if dry_run { - futures::future::ready(Ok(())).boxed() - } else { - let io = io.dupe(); - - stats.cleaned_path_count = paths_to_remove.len() as u64; - stats.cleaned_artifact_count = stats.stale_artifact_count + stats.untracked_artifact_count; - stats.cleaned_bytes = stats.untracked_bytes + stats.stale_bytes; - - let existing_futs = - tree.invalidate_paths_and_collect_futures(paths_to_invalidate, Some(sqlite_db))?; +pub struct CleanInvalidatedPathRequest { + path: ProjectRelativePathBuf, + pub(crate) liveliness_observer: Arc, +} - async move { - // Wait for all in-progress operations to finish on the paths we are about to - // remove from disk. - join_all_existing_futs(existing_futs).await?; - - // Then actually delete them. Note that we kick off one CleanOutputPaths per path. We - // do this to get parallelism. - futures::future::try_join_all(paths_to_remove.into_iter().map(|path| { - io.io_executor().execute_io( - Box::new(CleanOutputPaths { paths: vec![path] }), - cancellations, - ) - })) - .await?; +#[derive(buck2_error::Error, Debug)] +#[error("Interrupt")] +struct CleanInterrupt; - anyhow::Ok(()) +impl IoRequest for CleanInvalidatedPathRequest { + fn execute(self: Box, project_fs: &ProjectRoot) -> anyhow::Result<()> { + if !self.liveliness_observer.is_alive_sync() { + return Err(CleanInterrupt.into()); } - .boxed() - }; - - Ok(( - fut, - buck2_cli_proto::CleanStaleResponse { - message: None, - stats: Some(stats), - }, - )) + cleanup_path(project_fs, &self.path)?; + Ok(()) + } } /// Get file size or directory size, without following symlinks @@ -237,18 +471,23 @@ pub fn get_size(path: &AbsNormPath) -> anyhow::Result { Ok(result) } -struct StaleFinder<'a> { - fs: &'a ProjectRoot, - dispatcher: &'a EventDispatcher, +struct StaleFinder<'a, T: IoHandler> { + io: Arc, keep_since_time: DateTime, - stats: &'a mut buck2_data::CleanStaleStats, - /// Those paths will be deleted on disk. - paths_to_remove: &'a mut Vec, - /// Those paths will be invalidated in the materiaizer. - paths_to_invalidate: &'a mut Vec, + found_paths: &'a mut Vec, + liveliness_observer: Arc, +} + +#[derive(Clone)] +enum FoundPath { + /// These will be deleted on disk. + Untracked(ProjectRelativePathBuf, FileType, u64), + /// These will be invalidated in the materiaizer. + Stale(ProjectRelativePathBuf, u64), + Retained(u64), } -impl<'a> StaleFinder<'a> { +impl<'a, T: IoHandler> StaleFinder<'a, T> { /// Start from `path` and `subtree` and visit everything below. fn visit_recursively<'t>( &mut self, @@ -258,6 +497,9 @@ impl<'a> StaleFinder<'a> { let mut queue = vec![(path, subtree)]; while let Some((path, tree)) = queue.pop() { + if !self.liveliness_observer.is_alive_sync() { + break; + } self.visit(&path, tree, &mut queue)?; } @@ -274,9 +516,9 @@ impl<'a> StaleFinder<'a> { &'t HashMap, )>, ) -> anyhow::Result<()> { - let abs_path = self.fs.resolve(path); + let abs_path = self.io.fs().resolve(path); - for child in fs_util::read_dir(&abs_path)? { + for child in self.io.read_dir(&abs_path)? { let child = child?; let file_name = child.file_name(); @@ -300,15 +542,11 @@ impl<'a> StaleFinder<'a> { None => { // This path is not tracked by the materializer, we can delete it. tracing::trace!(path = %path, file_type = ?file_type, "marking as untracked"); - self.stats.untracked_artifact_count += 1; - self.stats.untracked_bytes += get_size(&child.path())?; - if self.stats.untracked_artifact_count <= 2000 { - self.dispatcher.instant_event(buck2_data::UntrackedFile { - path: path.to_string(), - file_type: format!("{:?}", file_type), - }); - } - self.paths_to_remove.push(path); + self.found_paths.push(FoundPath::Untracked( + path, + file_type, + get_size(&child.path())?, + )); continue; } }; @@ -328,18 +566,15 @@ impl<'a> StaleFinder<'a> { }) if *last_access_time < self.keep_since_time => { // This is something we can invalidate. tracing::trace!(path = %path, file_type = ?file_type, "marking as stale"); - self.stats.stale_artifact_count += 1; - self.stats.stale_bytes += metadata.size(); - self.paths_to_invalidate.push(path.clone()); - self.paths_to_remove.push(path); + self.found_paths + .push(FoundPath::Stale(path, metadata.size())); } ArtifactTree::Data(box ArtifactMaterializationData { stage: ArtifactMaterializationStage::Materialized { metadata, .. }, .. }) => { tracing::trace!(path = %path, file_type = ?file_type, "marking as retained"); - self.stats.retained_artifact_count += 1; - self.stats.retained_bytes += metadata.size(); + self.found_paths.push(FoundPath::Retained(metadata.size())); } _ => { // What we have on disk does not match what we have in the materializer (which is @@ -356,8 +591,7 @@ impl<'a> StaleFinder<'a> { fn find_stale_tracked_only( tree: &ArtifactTree, keep_since_time: DateTime, - stats: &mut buck2_data::CleanStaleStats, - paths_to_invalidate: &mut Vec, + found_paths: &mut Vec, ) -> anyhow::Result<()> { for (f_path, v) in tree.iter_with_paths() { if let ArtifactMaterializationStage::Materialized { @@ -369,13 +603,74 @@ fn find_stale_tracked_only( let path = ProjectRelativePathBuf::from(f_path); if *last_access_time < keep_since_time && !active { tracing::trace!(path = %path, "stale artifact"); - stats.stale_artifact_count += 1; - paths_to_invalidate.push(path); + found_paths.push(FoundPath::Stale(path, 0)); } else { tracing::trace!(path = %path, "retaining artifact"); - stats.retained_artifact_count += 1; + found_paths.push(FoundPath::Retained(0)); } } } Ok(()) } + +pub struct CleanStaleConfig { + // Time before running first clean, after daemon start + pub start_offset: std::time::Duration, + pub clean_period: std::time::Duration, + pub artifact_ttl: std::time::Duration, + pub dry_run: bool, +} + +impl CleanStaleConfig { + pub fn from_buck_config(root_config: &LegacyBuckConfig) -> anyhow::Result> { + let clean_stale_enabled = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "clean_stale_enabled", + })? + .unwrap_or(false); + let clean_stale_artifact_ttl_hours = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "clean_stale_artifact_ttl_hours", + })? + .unwrap_or(24.0 * 7.0); + let clean_stale_period_hours = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "clean_stale_period_hours", + })? + .unwrap_or(24.0); + let clean_stale_start_offset_hours = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "clean_stale_start_offset_hours", + })? + .unwrap_or(12.0); + let clean_stale_dry_run = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "clean_stale_dry_run", + })? + .unwrap_or(false); + + let secs_in_hour = 60.0 * 60.0; + let clean_stale_config = if clean_stale_enabled { + Some(Self { + clean_period: std::time::Duration::from_secs_f64( + secs_in_hour * clean_stale_period_hours, + ), + artifact_ttl: std::time::Duration::from_secs_f64( + secs_in_hour * clean_stale_artifact_ttl_hours, + ), + start_offset: std::time::Duration::from_secs_f64( + secs_in_hour * clean_stale_start_offset_hours, + ), + dry_run: clean_stale_dry_run, + }) + } else { + None + }; + Ok(clean_stale_config) + } +} diff --git a/app/buck2_execute_impl/src/materializers/deferred/extension.rs b/app/buck2_execute_impl/src/materializers/deferred/extension.rs index 26ae516ddb417..1a0bae43e7bb7 100644 --- a/app/buck2_execute_impl/src/materializers/deferred/extension.rs +++ b/app/buck2_execute_impl/src/materializers/deferred/extension.rs @@ -15,13 +15,14 @@ use std::sync::Arc; use anyhow::Context as _; use async_trait::async_trait; -use buck2_core::directory::DirectoryEntry; use buck2_core::fs::fs_util; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::entry::DirectoryEntry; use buck2_events::dispatch::get_dispatcher; use buck2_execute::directory::ActionDirectoryMember; use buck2_execute::materialize::materializer::DeferredMaterializerEntry; use buck2_execute::materialize::materializer::DeferredMaterializerExtensions; +use buck2_execute::materialize::materializer::DeferredMaterializerIterItem; use buck2_execute::materialize::materializer::DeferredMaterializerSubscription; use chrono::DateTime; use chrono::Duration; @@ -38,7 +39,8 @@ use tokio::sync::oneshot::Sender; use tokio::task::JoinHandle; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::materializers::deferred::clean_stale::CleanStaleArtifacts; +use crate::materializers::deferred::clean_stale::CleanStaleArtifactsCommand; +use crate::materializers::deferred::clean_stale::CleanStaleArtifactsExtensionCommand; use crate::materializers::deferred::io_handler::create_ttl_refresh; use crate::materializers::deferred::io_handler::IoHandler; use crate::materializers::deferred::subscriptions::MaterializerSubscriptionOperation; @@ -47,13 +49,21 @@ use crate::materializers::deferred::ArtifactMaterializationStage; use crate::materializers::deferred::DeferredMaterializerAccessor; use crate::materializers::deferred::DeferredMaterializerCommandProcessor; use crate::materializers::deferred::MaterializerCommand; +use crate::materializers::deferred::Processing; +use crate::materializers::deferred::ProcessingFuture; pub(super) trait ExtensionCommand: Debug + Sync + Send + 'static { fn execute(self: Box, processor: &mut DeferredMaterializerCommandProcessor); } #[derive(Debug)] -enum PathData { +struct PathData { + stage: PathStage, + processing: PathProcessing, +} + +#[derive(Debug)] +enum PathStage { Materialized { ts: DateTime, size: Option, @@ -61,18 +71,39 @@ enum PathData { Declared(Arc), } +#[derive(Debug)] +enum PathProcessing { + Done, + Materializing, + Cleaning, +} + impl Display for PathData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - PathData::Materialized { ts, size } => { + match &self.stage { + PathStage::Materialized { ts, size } => { if let Some(size) = size { - write!(f, "materialized (ts={:?}, size={})", ts, size) + write!(f, "materialized (ts={:?}, size={})", ts, size)?; } else { - write!(f, "materialized (ts={:?})", ts) + write!(f, "materialized (ts={:?})", ts)?; } } - PathData::Declared(method) => write!(f, "declared: {}", method), + PathStage::Declared(method) => { + write!(f, "declared: {}", method)?; + } + } + + match &self.processing { + PathProcessing::Done => {} + PathProcessing::Materializing => { + write!(f, " (materializing")?; + } + PathProcessing::Cleaning => { + write!(f, " (cleaning)")?; + } } + + Ok(()) } } @@ -84,15 +115,17 @@ struct Iterate { /// This is for debug commands so we use an unbounded channel to avoid locking up the /// materializer command thread. #[derivative(Debug = "ignore")] - sender: UnboundedSender<(ProjectRelativePathBuf, Box)>, + sender: UnboundedSender, } -impl ExtensionCommand for Iterate { +impl ExtensionCommand for Iterate { fn execute(self: Box, processor: &mut DeferredMaterializerCommandProcessor) { + // Ensure up to date access times + processor.flush_access_times(0); for (path, data) in processor.tree.iter_with_paths() { - let path_data = match &data.stage { + let stage = match &data.stage { ArtifactMaterializationStage::Declared { method, .. } => { - PathData::Declared(method.dupe()) + PathStage::Declared(method.dupe()) } ArtifactMaterializationStage::Materialized { last_access_time, @@ -111,16 +144,57 @@ impl ExtensionCommand for Iterate { .timestamp_opt(last_access_time.timestamp(), 0) .single() .unwrap(); - PathData::Materialized { + PathStage::Materialized { ts, size: Some(size), } } }; + let processing = match &data.processing { + Processing::Done(..) => PathProcessing::Done, + Processing::Active { + future: ProcessingFuture::Materializing(..), + .. + } => PathProcessing::Materializing, + Processing::Active { + future: ProcessingFuture::Cleaning(..), + .. + } => PathProcessing::Cleaning, + }; + + let path_data = PathData { stage, processing }; + let path = ProjectRelativePathBuf::from(path); - match self.sender.send((path, Box::new(path_data) as _)) { + let deps = match &data.deps { + Some(deps) => processor.tree.find_artifacts_for_debug(deps), + None => Vec::new(), + }; + + match self.sender.send(DeferredMaterializerIterItem { + artifact_path: path, + artifact_display: Box::new(path_data) as _, + deps, + }) { + Ok(..) => {} + Err(..) => break, // No use sending more if the client disconnected. + } + } + } +} + +#[derive(Derivative)] +#[derivative(Debug)] +struct ListSubscriptions { + #[derivative(Debug = "ignore")] + sender: UnboundedSender, +} + +impl ExtensionCommand for ListSubscriptions { + fn execute(self: Box, processor: &mut DeferredMaterializerCommandProcessor) { + for path in processor.subscriptions.list_subscribed_paths() { + match self.sender.send(path.to_owned()) { Ok(..) => {} Err(..) => break, // No use sending more if the client disconnected. } @@ -156,7 +230,7 @@ impl ExtensionCommand for Fsck { match res { Ok(..) => {} Err(e) => { - let _ignored = self.sender.send((path, e)); + let _ignored = self.sender.send((path, e.into())); } } } @@ -178,7 +252,7 @@ impl ExtensionCommand for RefreshTtls { Duration::seconds(self.min_ttl), processor.io.digest_config(), ) - .map(|f| processor.rt.spawn(f)); + .map(|f| processor.spawn(f)); let _ignored = self.sender.send(task); } } @@ -283,11 +357,7 @@ impl ExtensionCommand for FlushAccessTimes { #[async_trait] impl DeferredMaterializerExtensions for DeferredMaterializerAccessor { - fn iterate( - &self, - ) -> anyhow::Result< - BoxStream<'static, (ProjectRelativePathBuf, Box)>, - > { + fn iterate(&self) -> anyhow::Result> { let (sender, receiver) = mpsc::unbounded_channel(); self.command_sender.send(MaterializerCommand::Extension( Box::new(Iterate { sender }) as _ @@ -295,6 +365,15 @@ impl DeferredMaterializerExtensions for DeferredMaterializerAccess Ok(UnboundedReceiverStream::new(receiver).boxed()) } + fn list_subscriptions(&self) -> anyhow::Result> { + let (sender, receiver) = mpsc::unbounded_channel(); + self.command_sender + .send(MaterializerCommand::Extension( + Box::new(ListSubscriptions { sender }) as _, + ))?; + Ok(UnboundedReceiverStream::new(receiver).boxed()) + } + fn fsck(&self) -> anyhow::Result> { let (sender, receiver) = mpsc::unbounded_channel(); self.command_sender.send(MaterializerCommand::Extension( @@ -338,15 +417,17 @@ impl DeferredMaterializerExtensions for DeferredMaterializerAccess let (sender, recv) = oneshot::channel(); self.command_sender .send(MaterializerCommand::Extension(Box::new( - CleanStaleArtifacts { - keep_since_time, - dry_run, - tracked_only, + CleanStaleArtifactsExtensionCommand { + cmd: CleanStaleArtifactsCommand { + keep_since_time, + dry_run, + tracked_only, + dispatcher, + }, sender, - dispatcher, }, )))?; - recv.await?.await + recv.await?.await.map(|res| res.into()) } async fn test_iter(&self, count: usize) -> anyhow::Result { diff --git a/app/buck2_execute_impl/src/materializers/deferred/file_tree.rs b/app/buck2_execute_impl/src/materializers/deferred/file_tree.rs index 4dcd24cf65b58..ab973f9e915e0 100644 --- a/app/buck2_execute_impl/src/materializers/deferred/file_tree.rs +++ b/app/buck2_execute_impl/src/materializers/deferred/file_tree.rs @@ -236,8 +236,7 @@ impl<'a> FromIterator<&'a FileNameBuf> for NoopCollector { impl FileTree { pub fn iter_with_paths(&self) -> impl Iterator { - self.iter::>() - .map(|(k, v)| (k.unwrap_or_else(ForwardRelativePathBuf::empty), v)) + self.iter::() } pub fn iter_without_paths(&self) -> impl Iterator { @@ -245,8 +244,7 @@ impl FileTree { } pub fn into_iter_with_paths(self) -> impl Iterator { - self.into_iter::>() - .map(|(k, v)| (k.unwrap_or_else(ForwardRelativePathBuf::empty), v)) + self.into_iter::() } #[allow(unused)] diff --git a/app/buck2_execute_impl/src/materializers/deferred/io_handler.rs b/app/buck2_execute_impl/src/materializers/deferred/io_handler.rs index 095b9cc14440a..36b11b3d59aaf 100644 --- a/app/buck2_execute_impl/src/materializers/deferred/io_handler.rs +++ b/app/buck2_execute_impl/src/materializers/deferred/io_handler.rs @@ -16,15 +16,20 @@ use allocative::Allocative; use anyhow::Context; use async_trait::async_trait; use buck2_common::file_ops::FileDigest; -use buck2_common::http::HttpClient; -use buck2_common::result::SharedError; -use buck2_common::result::ToSharedResultExt; -use buck2_core::directory::unordered_entry_walk; -use buck2_core::directory::DirectoryEntry; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; +use buck2_core::fs::fs_util; +use buck2_core::fs::fs_util::IoError; +use buck2_core::fs::fs_util::ReadDir; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::directory_iterator::DirectoryIteratorPathStack; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::walk::unordered_entry_walk; use buck2_events::dispatch::EventDispatcher; +use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest::CasDigestFromReExt; use buck2_execute::digest::CasDigestToReExt; use buck2_execute::digest_config::DigestConfig; @@ -35,8 +40,13 @@ use buck2_execute::execute::blocking::BlockingExecutor; use buck2_execute::execute::blocking::IoRequest; use buck2_execute::execute::clean_output_paths::cleanup_path; use buck2_execute::materialize::http::http_download; +use buck2_execute::materialize::materializer::CasNotFoundError; +use buck2_execute::materialize::materializer::WriteRequest; use buck2_execute::output_size::OutputSize; +use buck2_execute::re::error::RemoteExecutionError; use buck2_execute::re::manager::ReConnectionManager; +use buck2_futures::cancellation::CancellationContext; +use buck2_http::HttpClient; use chrono::Duration; use chrono::Utc; use dupe::Dupe; @@ -45,15 +55,14 @@ use futures::future::Future; use futures::future::FutureExt; use futures::future::TryFutureExt; use gazebo::prelude::VecExt; -use more_futures::cancellation::CancellationContext; use once_cell::sync::Lazy; use remote_execution::NamedDigest; use remote_execution::NamedDigestWithPermissions; -use remote_execution::REClientError; use remote_execution::TCode; use remote_execution::TDigest; use tracing::instrument; +use crate::materializers::deferred::clean_stale::CleanInvalidatedPathRequest; use crate::materializers::deferred::ArtifactMaterializationMethod; use crate::materializers::deferred::ArtifactMaterializationStage; use crate::materializers::deferred::ArtifactTree; @@ -64,6 +73,7 @@ use crate::materializers::deferred::MaterializerSender; use crate::materializers::deferred::SharedMaterializingError; use crate::materializers::deferred::Version; use crate::materializers::deferred::WriteFile; +use crate::materializers::immediate; use crate::materializers::io::materialize_files; use crate::materializers::io::MaterializeTreeStructure; @@ -90,17 +100,28 @@ pub trait IoHandler: Sized + Sync + Send + 'static { path: ProjectRelativePathBuf, write: Arc, version: Version, - command_sender: MaterializerSender, + command_sender: Arc>, cancellations: &'a CancellationContext<'a>, ) -> BoxFuture<'a, Result<(), SharedMaterializingError>>; + async fn immediate_write<'a>( + self: &Arc, + gen: Box anyhow::Result> + Send + 'a>, + ) -> anyhow::Result>; + fn clean_path<'a>( self: &Arc, path: ProjectRelativePathBuf, version: Version, - command_sender: MaterializerSender, + command_sender: Arc>, + cancellations: &'a CancellationContext, + ) -> BoxFuture<'a, Result<(), buck2_error::Error>>; + + async fn clean_invalidated_path<'a>( + self: &Arc, + request: CleanInvalidatedPathRequest, cancellations: &'a CancellationContext, - ) -> BoxFuture<'a, Result<(), SharedError>>; + ) -> anyhow::Result<()>; async fn materialize_entry( self: &Arc, @@ -117,8 +138,8 @@ pub trait IoHandler: Sized + Sync + Send + 'static { min_ttl: Duration, ) -> Option>>; + fn read_dir(&self, path: &AbsNormPathBuf) -> Result; fn buck_out_path(&self) -> &ProjectRelativePathBuf; - fn io_executor(&self) -> &dyn BlockingExecutor; fn re_client_manager(&self) -> &Arc; fn fs(&self) -> &ProjectRoot; fn digest_config(&self) -> DigestConfig; @@ -169,11 +190,11 @@ impl DefaultIoHandler { let mut files = Vec::new(); { - let mut walk = unordered_entry_walk(entry.as_ref()); + let mut walk = unordered_entry_walk(entry.as_ref().map_dir(Directory::as_ref)); while let Some((entry_path, entry)) = walk.next() { if let DirectoryEntry::Leaf(ActionDirectoryMember::File(f)) = entry { - let name = path.join_normalized(entry_path.get())?; + let name = path.join(entry_path.get()); let digest = maybe_tombstone_digest(f.digest.data())?.to_re(); tracing::trace!(name = %name, digest = %digest, "push download"); @@ -207,17 +228,28 @@ impl DefaultIoHandler { re_client .materialize_files(files, info.re_use_case) .await - .map_err(|e| match e.downcast_ref::() { - Some(e) if e.code == TCode::NOT_FOUND => MaterializeEntryError::NotFound { - info: info.dupe(), - debug: Arc::from(e.message.as_str()), - }, - _ => MaterializeEntryError::Error(e.context({ - format!( - "Error materializing files declared by action: {}", - info.origin - ) - })), + .map_err(|e| { + let e: buck2_error::Error = e.into(); + match e.find_typed_context::() { + Some(re_error) if re_error.code == TCode::NOT_FOUND => { + let e: anyhow::Error = e.into(); + MaterializeEntryError::NotFound(CasNotFoundError { + path: Arc::from(path), + info: info.dupe(), + directory: entry, + error: Arc::from(e), + }) + } + _ => MaterializeEntryError::Error( + e.context({ + format!( + "Error materializing files declared by action: {}", + info.origin + ) + }) + .into(), + ), + } })?; } ArtifactMaterializationMethod::HttpDownload { info } => { @@ -301,7 +333,7 @@ impl IoHandler for DefaultIoHandler { path: ProjectRelativePathBuf, write: Arc, version: Version, - command_sender: MaterializerSender, + command_sender: Arc>, cancellations: &'a CancellationContext, ) -> BoxFuture<'a, Result<(), SharedMaterializingError>> { self.io_executor @@ -318,13 +350,26 @@ impl IoHandler for DefaultIoHandler { .boxed() } + async fn immediate_write<'a>( + self: &Arc, + gen: Box anyhow::Result> + Send + 'a>, + ) -> anyhow::Result> { + immediate::write_to_disk( + self.fs(), + self.io_executor.as_ref(), + self.digest_config(), + gen, + ) + .await + } + fn clean_path<'a>( self: &Arc, path: ProjectRelativePathBuf, version: Version, - command_sender: MaterializerSender, + command_sender: Arc>, cancellations: &'a CancellationContext, - ) -> BoxFuture<'a, Result<(), SharedError>> { + ) -> BoxFuture<'a, Result<(), buck2_error::Error>> { self.io_executor .execute_io( Box::new(CleanIoRequest { @@ -334,10 +379,21 @@ impl IoHandler for DefaultIoHandler { }), cancellations, ) - .map(|r| r.shared_error()) + .map(|r| r.map_err(buck2_error::Error::from)) .boxed() } + /// Used to clean paths that are already invalidated and don't need to notify the materializer + async fn clean_invalidated_path<'a>( + self: &Arc, + request: CleanInvalidatedPathRequest, + cancellations: &'a CancellationContext, + ) -> anyhow::Result<()> { + self.io_executor + .execute_io(Box::new(request), cancellations) + .await + } + /// Materializes an `entry` at `path`, using the materialization `method` #[instrument(level = "debug", skip(self, cancellations), fields(path = %path, method = %method, entry = %entry))] async fn materialize_entry( @@ -371,7 +427,12 @@ impl IoHandler for DefaultIoHandler { ( res, buck2_data::MaterializationEnd { - action_digest: None, + action_digest: match method.as_ref() { + ArtifactMaterializationMethod::CasDownload { info } => { + info.action_digest().map(|digest| digest.to_string()) + } + _ => None, + }, file_count: stat.file_count, total_bytes: stat.total_bytes, path: path_string, @@ -394,12 +455,12 @@ impl IoHandler for DefaultIoHandler { .map(|f| f.boxed()) } - fn buck_out_path(&self) -> &ProjectRelativePathBuf { - &self.buck_out_path + fn read_dir(&self, path: &AbsNormPathBuf) -> Result { + fs_util::read_dir(path) } - fn io_executor(&self) -> &dyn BlockingExecutor { - self.io_executor.as_ref() + fn buck_out_path(&self) -> &ProjectRelativePathBuf { + &self.buck_out_path } fn re_client_manager(&self) -> &Arc { @@ -434,10 +495,13 @@ fn maybe_tombstone_digest(digest: &FileDigest) -> anyhow::Result<&FileDigest> { .collect() } - static TOMBSTONED_DIGESTS: EnvHelper> = - EnvHelper::with_converter("BUCK2_TEST_TOMBSTONED_DIGESTS", convert_digests); - - if let Some(digests) = TOMBSTONED_DIGESTS.get()? { + let tombstoned_digests = buck2_env_anyhow!( + "BUCK2_TEST_TOMBSTONED_DIGESTS", + type=HashSet, + converter=convert_digests, + applicability=testing, + )?; + if let Some(digests) = tombstoned_digests { if digests.contains(digest) { return Ok(&*TOMBSTONE_DIGEST); } @@ -461,7 +525,7 @@ pub(super) fn create_ttl_refresh( match &data.stage { ArtifactMaterializationStage::Declared { entry, method } => match method.as_ref() { ArtifactMaterializationMethod::CasDownload { info } => { - let mut walk = unordered_entry_walk(entry.as_ref()); + let mut walk = unordered_entry_walk(entry.as_ref().map_dir(Directory::as_ref)); while let Some((_entry_path, entry)) = walk.next() { if let DirectoryEntry::Leaf(ActionDirectoryMember::File(file)) = entry { let needs_refresh = file.digest.expires() < ttl_deadline; @@ -546,7 +610,7 @@ struct WriteIoRequest { path: ProjectRelativePathBuf, write: Arc, version: Version, - command_sender: MaterializerSender, + command_sender: Arc>, } impl WriteIoRequest { @@ -564,7 +628,9 @@ impl IoRequest for WriteIoRequest { fn execute(self: Box, project_fs: &ProjectRoot) -> anyhow::Result<()> { // NOTE: No spans here! We should perhaps add one, but this needs to be considered // carefully as it's a lot of spans, and we haven't historically emitted those for writes. - let res = self.execute_inner(project_fs).shared_error(); + let res = self + .execute_inner(project_fs) + .map_err(buck2_error::Error::from); // If the materializer has shut down, we ignore this. let _ignored = self.command_sender.send_low_priority( @@ -583,14 +649,14 @@ impl IoRequest for WriteIoRequest { struct CleanIoRequest { path: ProjectRelativePathBuf, version: Version, - command_sender: MaterializerSender, + command_sender: Arc>, } impl IoRequest for CleanIoRequest { fn execute(self: Box, project_fs: &ProjectRoot) -> anyhow::Result<()> { // NOTE: No spans here! We should perhaps add one, but this needs to be considered // carefully as it's a lot of spans, and we haven't historically emitted those for writes. - let res = cleanup_path(project_fs, &self.path).shared_error(); + let res = cleanup_path(project_fs, &self.path).map_err(buck2_error::Error::from); // If the materializer has shut down, we ignore this. let _ignored = self.command_sender.send_low_priority( diff --git a/app/buck2_execute_impl/src/materializers/deferred/mod.rs b/app/buck2_execute_impl/src/materializers/deferred/mod.rs deleted file mode 100644 index 995ce25af9da3..0000000000000 --- a/app/buck2_execute_impl/src/materializers/deferred/mod.rs +++ /dev/null @@ -1,2223 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod clean_stale; -mod extension; -mod file_tree; -mod io_handler; -mod subscriptions; - -#[cfg(test)] -mod tests; -use std::borrow::Cow; -use std::collections::HashSet; -use std::collections::VecDeque; -use std::pin::Pin; -use std::sync::atomic::AtomicU64; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; - -use allocative::Allocative; -use anyhow::Context as _; -use async_trait::async_trait; -use buck2_common::file_ops::FileMetadata; -use buck2_common::file_ops::TrackedFileDigest; -use buck2_common::http::HttpClient; -use buck2_common::result::SharedError; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; -use buck2_core::directory::unordered_entry_walk; -use buck2_core::directory::DirectoryEntry; -use buck2_core::env_helper::EnvHelper; -use buck2_core::fs::paths::RelativePathBuf; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::soft_error; -use buck2_events::dispatch::current_span; -use buck2_events::dispatch::get_dispatcher; -use buck2_events::dispatch::get_dispatcher_opt; -use buck2_events::dispatch::EventDispatcher; -use buck2_events::span::SpanId; -use buck2_execute::artifact_value::ArtifactValue; -use buck2_execute::digest_config::DigestConfig; -use buck2_execute::directory::ActionDirectory; -use buck2_execute::directory::ActionDirectoryEntry; -use buck2_execute::directory::ActionDirectoryMember; -use buck2_execute::directory::ActionSharedDirectory; -use buck2_execute::execute::blocking::BlockingExecutor; -use buck2_execute::materialize::materializer::ArtifactNotMaterializedReason; -use buck2_execute::materialize::materializer::CasDownloadInfo; -use buck2_execute::materialize::materializer::CopiedArtifact; -use buck2_execute::materialize::materializer::DeclareMatchOutcome; -use buck2_execute::materialize::materializer::DeferredMaterializerExtensions; -use buck2_execute::materialize::materializer::HttpDownloadInfo; -use buck2_execute::materialize::materializer::MaterializationError; -use buck2_execute::materialize::materializer::Materializer; -use buck2_execute::materialize::materializer::WriteRequest; -use buck2_execute::output_size::OutputSize; -use buck2_execute::re::manager::ReConnectionManager; -use buck2_wrapper_common::invocation_id::TraceId; -use chrono::DateTime; -use chrono::Duration; -use chrono::Utc; -use derivative::Derivative; -use derive_more::Display; -use dupe::Clone_; -use dupe::Dupe; -use dupe::OptionDupedExt; -use futures::future::BoxFuture; -use futures::future::FutureExt; -use futures::future::Shared; -use futures::future::TryFutureExt; -use futures::stream::BoxStream; -use futures::stream::FuturesOrdered; -use futures::stream::Stream; -use futures::stream::StreamExt; -use gazebo::prelude::*; -use itertools::Itertools; -use more_futures::cancellation::CancellationContext; -use pin_project::pin_project; -use thiserror::Error; -use tokio::runtime::Handle; -use tokio::sync::mpsc; -use tokio::sync::mpsc::UnboundedReceiver; -use tokio::sync::oneshot; -use tokio::sync::oneshot::error::TryRecvError; -use tokio::time::Instant; -use tokio::time::Interval; -use tracing::instrument; - -use crate::materializers::deferred::extension::ExtensionCommand; -use crate::materializers::deferred::file_tree::FileTree; -use crate::materializers::deferred::io_handler::DefaultIoHandler; -use crate::materializers::deferred::io_handler::IoHandler; -use crate::materializers::deferred::subscriptions::MaterializerSubscriptionOperation; -use crate::materializers::deferred::subscriptions::MaterializerSubscriptions; -use crate::materializers::immediate; -use crate::materializers::sqlite::MaterializerState; -use crate::materializers::sqlite::MaterializerStateSqliteDb; - -/// Materializer implementation that defers materialization of declared -/// artifacts until they are needed (i.e. `ensure_materialized` is called). -/// -/// # Important -/// -/// This materializer defers both CAS fetches and local copies. Therefore, one -/// needs to be careful when choosing to call `ensure_materialized`. -/// Between `declare` and `ensure` calls, the local files could have changed. -/// -/// This limits us to only "safely" using the materializer within the -/// computation of a build rule, and only to materialize inputs or outputs of -/// the rule, not random artifacts/paths. That's because: -/// - file changes before/after a build are handled by DICE, which invalidates -/// the outputs that depend on it. The materializer ends up having the wrong -/// information about these outputs. But because it's only used within the -/// build rules, the affected rule is recomputed and therefore has its -/// artifacts re-declared. So when `ensure` is called the materializer has -/// up-to-date information about the artifacts. -/// - file changes during a build are not properly supported by Buck and -/// treated as undefined behaviour, so there's no need to worry about them. -#[derive(Allocative)] -pub struct DeferredMaterializerAccessor { - /// Sender to emit commands to the command loop. See `MaterializerCommand`. - #[allocative(skip)] - command_sender: MaterializerSender, - /// Handle of the command loop thread. Aborted on Drop. - /// This thread serves as a queue for declare/ensure requests, making - /// sure only one executes at a time and in the order they came in. - /// TODO(rafaelc): aim to replace it with a simple mutex. - #[allocative(skip)] - command_thread: std::thread::JoinHandle<()>, - /// Determines what to do on `try_materialize_final_artifact`: if true, - /// materializes them, otherwise skips them. - materialize_final_artifacts: bool, - defer_write_actions: bool, - - io: Arc, - - /// Tracked for logging purposes. - materializer_state_info: buck2_data::MaterializerStateInfo, - - stats: Arc, -} - -pub type DeferredMaterializer = DeferredMaterializerAccessor; - -impl Drop for DeferredMaterializerAccessor { - fn drop(&mut self) { - // We don't try to stop the underlying thread, since in practice when we drop the - // DeferredMaterializer we are about to just terminate the process. - } -} - -/// Statistics we collect while operating the Deferred Materializer. -#[derive(Allocative, Default)] -pub struct DeferredMaterializerStats { - declares: AtomicU64, - declares_reused: AtomicU64, -} - -static ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE: EnvHelper = - EnvHelper::new("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE"); - -pub struct DeferredMaterializerConfigs { - pub materialize_final_artifacts: bool, - pub defer_write_actions: bool, - pub ttl_refresh: TtlRefreshConfiguration, - pub update_access_times: AccessTimesUpdates, -} - -pub struct TtlRefreshConfiguration { - pub frequency: std::time::Duration, - pub min_ttl: Duration, - pub enabled: bool, -} - -#[derive(Clone, Copy, Debug, Dupe, PartialEq)] -pub enum AccessTimesUpdates { - /// Flushes when the buffer is full and periodically - Full, - ///Flushes only when buffer is full - Partial, - /// Does not flush at all - Disabled, -} - -#[derive(Debug, Error)] -pub enum AccessTimesUpdatesError { - #[error( - "Invalid value for buckconfig `[buck2] update_access_times`. Got `{0}`. Expected one of `full`, `partial` or `disabled`." - )] - InvalidValueForConfig(String), -} - -impl AccessTimesUpdates { - pub fn try_new_from_config_value(config_value: Option<&str>) -> anyhow::Result { - match config_value { - None | Some("") | Some("full") => Ok(AccessTimesUpdates::Full), - Some("partial") => Ok(AccessTimesUpdates::Partial), - Some("disabled") => Ok(AccessTimesUpdates::Disabled), - Some(v) => Err(AccessTimesUpdatesError::InvalidValueForConfig(v.to_owned()).into()), - } - } -} - -#[derive(Copy, Dupe, Clone)] -struct MaterializerCounters { - sent: &'static AtomicUsize, - received: &'static AtomicUsize, -} - -impl MaterializerCounters { - /// New counters. Note that this leaks the underlying data. See comments on MaterializerSender. - fn leak_new() -> Self { - Self { - sent: Box::leak(Box::new(AtomicUsize::new(0))), - received: Box::leak(Box::new(AtomicUsize::new(0))), - } - } - - fn ack_received(&self) { - self.received.fetch_add(1, Ordering::Relaxed); - } - - fn queue_size(&self) -> usize { - self.sent - .load(Ordering::Relaxed) - .saturating_sub(self.received.load(Ordering::Relaxed)) - } -} - -// NOTE: When constructing a MaterializerSender, we just leak the underlying channel. We do this -// because the materializer lives for the lifetime of the process anyway, so there's no value in -// refcounting any of this (though we make many copies of it). -#[derive(Clone_)] -pub struct MaterializerSender { - /// High priority commands are processed in order. - high_priority: Cow<'static, mpsc::UnboundedSender>>, - /// Low priority commands are processed in order relative to each other, but high priority - /// commands can be reordered ahead of them. - low_priority: Cow<'static, mpsc::UnboundedSender>, - counters: MaterializerCounters, -} - -// Unbounded channels can be cheaply cloned. -impl Dupe for MaterializerSender {} - -impl MaterializerSender { - fn send( - &self, - command: MaterializerCommand, - ) -> Result<(), mpsc::error::SendError>> { - let res = self.high_priority.send(command); - self.counters.sent.fetch_add(1, Ordering::Relaxed); - res - } - - fn send_low_priority( - &self, - command: LowPriorityMaterializerCommand, - ) -> Result<(), mpsc::error::SendError> { - let res = self.low_priority.send(command); - self.counters.sent.fetch_add(1, Ordering::Relaxed); - res - } -} - -struct MaterializerReceiver { - high_priority: mpsc::UnboundedReceiver>, - low_priority: mpsc::UnboundedReceiver, - counters: MaterializerCounters, -} - -struct DeferredMaterializerCommandProcessor { - io: Arc, - sqlite_db: Option, - /// The runtime the deferred materializer will spawn futures on. This is normally the runtime - /// used by the rest of Buck. - rt: Handle, - defer_write_actions: bool, - log_buffer: LogBuffer, - /// Keep track of artifact versions to avoid callbacks clobbering state if the state has moved - /// forward. - version_tracker: VersionTracker, - /// Send messages back to the materializer. - command_sender: MaterializerSender, - /// The actual materializer state. - tree: ArtifactTree, - /// Active subscriptions - subscriptions: MaterializerSubscriptions, - /// History of refreshes. This *does* grow without bound, but considering the data is pretty - /// small and we create it infrequently, that's fine. - ttl_refresh_history: Vec, - /// The current ttl_refresh instance, if any exists. - ttl_refresh_instance: Option, anyhow::Result<()>)>>, - cancellations: &'static CancellationContext<'static>, - stats: Arc, - access_times_buffer: Option>, -} - -struct TtlRefreshHistoryEntry { - at: DateTime, - outcome: Option>, -} - -// NOTE: This doesn't derive `Error` and that's on purpose. We don't want to make it easy (or -// possible, in fact) to add `context` to this SharedProcessingError and lose the variant. -#[derive(Debug, Clone, Dupe)] -pub enum SharedMaterializingError { - Error(SharedError), - NotFound { - info: Arc, - debug: Arc, - }, -} - -#[derive(Error, Debug)] -pub enum MaterializeEntryError { - #[error(transparent)] - Error(#[from] anyhow::Error), - - /// The artifact wasn't found. This typically means it expired in the CAS. - #[error("Artifact not found (digest origin: {}, debug: {})", .info.origin.as_display_for_not_found(), .debug)] - NotFound { - info: Arc, - debug: Arc, - }, -} - -impl From for SharedMaterializingError { - fn from(e: MaterializeEntryError) -> SharedMaterializingError { - match e { - MaterializeEntryError::Error(e) => Self::Error(e.into()), - MaterializeEntryError::NotFound { info, debug } => Self::NotFound { info, debug }, - } - } -} - -/// A future that is materializing on a separate task spawned by the materializer -type MaterializingFuture = Shared>>; -/// A future that is cleaning paths on a separate task spawned by the materializer -type CleaningFuture = Shared>>; - -#[derive(Clone)] -enum ProcessingFuture { - Materializing(MaterializingFuture), - Cleaning(CleaningFuture), -} - -/// Message taken by the `DeferredMaterializer`'s command loop. -enum MaterializerCommand { - // [Materializer trait methods -> Command thread] - /// Takes a list of file paths, computes the materialized file paths of all - /// of them, and sends the result through the oneshot. - /// See `Materializer::get_materialized_file_paths` for more information. - GetMaterializedFilePaths( - Vec, - oneshot::Sender>>, - ), - - /// Declares that a set of artifacts already exist - DeclareExisting( - Vec<(ProjectRelativePathBuf, ArtifactValue)>, - Option, - Option, - ), - - /// Declares an artifact: its path, value, and how to materialize it. - Declare( - ProjectRelativePathBuf, - ArtifactValue, - Box, // Boxed to avoid growing all variants - EventDispatcher, - ), - - MatchArtifacts( - Vec<(ProjectRelativePathBuf, ArtifactValue)>, - oneshot::Sender, - ), - - /// Declares that given paths are no longer eligible to be materialized by this materializer. - /// This typically should reflect a change made to the underlying filesystem, either because - /// the file was created, or because it was removed.. - InvalidateFilePaths(Vec, oneshot::Sender), - - /// Takes a list of artifact paths, and materializes all artifacts in the - /// list that have been declared but not yet been materialized. When the - /// materialization starts, a future is sent back through the provided - /// Sender; this future will be resolved when the materialization - /// concludes (whether successfully or not). - Ensure( - Vec, - EventDispatcher, - oneshot::Sender>>, - ), - - Subscription(MaterializerSubscriptionOperation), - - Extension(Box>), -} - -impl std::fmt::Debug for MaterializerCommand { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - MaterializerCommand::GetMaterializedFilePaths(paths, _) => { - write!(f, "GetMaterializedFilePaths({:?}, _)", paths,) - } - MaterializerCommand::DeclareExisting(paths, current_span, trace_id) => { - write!( - f, - "DeclareExisting({:?}, {:?}, {:?})", - paths, current_span, trace_id - ) - } - MaterializerCommand::Declare(path, value, method, _dispatcher) => { - write!(f, "Declare({:?}, {:?}, {:?})", path, value, method,) - } - MaterializerCommand::MatchArtifacts(paths, _) => { - write!(f, "MatchArtifacts({:?})", paths) - } - MaterializerCommand::InvalidateFilePaths(paths, _) => { - write!(f, "InvalidateFilePaths({:?})", paths) - } - MaterializerCommand::Ensure(paths, _, _) => write!(f, "Ensure({:?}, _)", paths,), - MaterializerCommand::Subscription(op) => write!(f, "Subscription({:?})", op,), - MaterializerCommand::Extension(ext) => write!(f, "Extension({:?})", ext), - } - } -} - -/// Materializer commands that can be reordered with regard to other commands. -#[derive(Debug)] -enum LowPriorityMaterializerCommand { - /// [Materialization task -> Command thread] - /// Notifies the command thread that an artifact was materialized. It takes - /// the artifact path and the version that was materialized, such that if - /// a newer version was declared during materialization - which should not - /// happen under normal conditions - we can react accordingly. - MaterializationFinished { - path: ProjectRelativePathBuf, - timestamp: DateTime, - version: Version, - result: Result<(), SharedMaterializingError>, - }, - - CleanupFinished { - path: ProjectRelativePathBuf, - version: Version, - result: Result<(), SharedMaterializingError>, - }, -} - -/// Tree that stores materialization data for each artifact. Used internally by -/// the `DeferredMaterializer` to keep track of artifacts and how to -/// materialize them. -type ArtifactTree = FileTree>; - -/// The Version of a processing future associated with an artifact. We use this to know if we can -/// clear the processing field when a callback is received, or if more work is expected. -#[derive(Eq, PartialEq, Copy, Clone, Dupe, Debug, Ord, PartialOrd, Display)] -pub struct Version(u64); - -#[derive(Debug)] -struct VersionTracker(Version); - -impl VersionTracker { - fn new() -> Self { - // Each Declare bumps the version, so that if an artifact is declared - // a second time mid materialization of its previous version, we don't - // incorrectly assume we materialized the latest version. We start with - // 1 with because any disk state restored will start with version 0. - Self(Version(1)) - } - - fn current(&self) -> Version { - self.0 - } - - /// Increment the current version, return the previous value - fn next(&mut self) -> Version { - let ret = self.current(); - self.0.0 += 1; - ret - } -} - -pub struct ArtifactMaterializationData { - /// Taken from `deps` of `ArtifactValue`. Used to materialize deps of the artifact. - deps: Option, - stage: ArtifactMaterializationStage, - /// An optional future that may be processing something at the current path - /// (for example, materializing or deleting). Any other future that needs to process - /// this path would need to wait on the existing future to finish. - /// TODO(scottcao): Turn this into a queue of pending futures. - processing: Processing, -} - -/// Represents a processing future + the version at which it was issued. When receiving -/// notifications about processing futures that finish, their changes are only applied if their -/// version is greater than the current version. -/// -/// The version is an internal counter that is shared between the current processing_fut and -/// this data. When multiple operations are queued on a ArtifactMaterializationData, this -/// allows us to identify which one is current. -enum Processing { - Done(Version), - Active { - future: ProcessingFuture, - version: Version, - }, -} - -impl Processing { - fn current_version(&self) -> Version { - match self { - Self::Done(version) => *version, - Self::Active { version, .. } => *version, - } - } - - fn into_future(self) -> Option { - match self { - Self::Done(..) => None, - Self::Active { future, .. } => Some(future), - } - } -} - -/// Fingerprint used to identify `ActionSharedDirectory`. We give it an explicit -/// alias because `TrackedFileDigest` can look confusing. -pub type ActionDirectoryFingerprint = TrackedFileDigest; - -/// Metadata used to identify an artifact entry without all of its content. Stored on materialized -/// artifacts to check matching artifact optimizations. For `ActionSharedDirectory`, we use its fingerprint, -/// For everything else (files, symlinks, and external symlinks), we use `ActionDirectoryMember` -/// as is because it already holds the metadata we need. -#[derive(Clone, Dupe, Debug)] -pub struct ArtifactMetadata(pub ActionDirectoryEntry); - -#[derive(Clone, Dupe, Debug, Display)] -#[display(fmt = "DirectoryMetadata(digest:{},size:{})", fingerprint, total_size)] -pub struct DirectoryMetadata { - pub fingerprint: ActionDirectoryFingerprint, - /// Size on disk, if the artifact is a directory. - /// Storing separately from ArtifactMetadata to avoid calculating when - /// checking matching artifacts. - pub total_size: u64, -} - -impl ArtifactMetadata { - fn matches_entry(&self, entry: &ActionDirectoryEntry) -> bool { - match (&self.0, entry) { - ( - DirectoryEntry::Dir(DirectoryMetadata { fingerprint, .. }), - DirectoryEntry::Dir(dir), - ) => fingerprint == dir.fingerprint(), - (DirectoryEntry::Leaf(l1), DirectoryEntry::Leaf(l2)) => l1 == l2, - _ => false, - } - } - - fn new(entry: &ActionDirectoryEntry) -> Self { - let new_entry = match entry { - DirectoryEntry::Dir(dir) => DirectoryEntry::Dir(DirectoryMetadata { - fingerprint: dir.fingerprint().dupe(), - total_size: entry.calc_output_count_and_bytes().bytes, - }), - DirectoryEntry::Leaf(leaf) => DirectoryEntry::Leaf(leaf.dupe()), - }; - Self(new_entry) - } - - fn size(&self) -> u64 { - match &self.0 { - DirectoryEntry::Dir(dir) => dir.total_size, - DirectoryEntry::Leaf(ActionDirectoryMember::File(file_metadata)) => { - file_metadata.digest.size() - } - DirectoryEntry::Leaf(_) => 0, - } - } -} - -enum ArtifactMaterializationStage { - /// The artifact was declared, but the materialization hasn't started yet. - /// If it did start but end with an error, it returns to this stage. - /// When the the artifact was declared, we spawn a deletion future to delete - /// all existing paths that conflict with the output paths. - Declared { - /// Taken from `entry` of `ArtifactValue`. Used to materialize the actual artifact. - entry: ActionDirectoryEntry, - method: Arc, - }, - /// This artifact was materialized - Materialized { - /// Once the artifact is materialized, we don't need the full entry anymore. - /// We can throw away most of the entry and just keep some metadata used to - /// check if materialized artifact matches declared artifact. - metadata: ArtifactMetadata, - /// Used to clean older artifacts from buck-out. - last_access_time: DateTime, - /// Artifact declared by running daemon. - /// Should not be deleted without invalidating DICE nodes, which currently - /// means killing the daemon. - active: bool, - }, -} - -/// Different ways to materialize the files of an artifact. Some artifacts need -/// to be fetched from the CAS, others copied locally. -#[derive(Debug, Display)] -pub enum ArtifactMaterializationMethod { - /// The files must be copied from a local path. - /// - /// The first argument is a map `[dest => src]`, meaning that a file at - /// `{artifact_path}/{dest}/{p}` needs to be copied from `{src}/{p}`. - /// - /// The second argument is the raw list of copied artifacts, as received - /// in `declare_copy`. - #[display(fmt = "local copy")] - LocalCopy(FileTree, Vec), - - #[display(fmt = "write")] - Write(Arc), - - /// The files must be fetched from the CAS. - #[display(fmt = "cas download (action: {})", "info.origin")] - CasDownload { - /// The digest of the action that produced this output - info: Arc, - }, - - /// The file must be fetched over HTTP. - #[display(fmt = "http download ({})", info)] - HttpDownload { info: HttpDownloadInfo }, - - #[cfg(test)] - Test, -} - -trait MaterializationMethodToProto { - fn to_proto(&self) -> buck2_data::MaterializationMethod; -} - -impl MaterializationMethodToProto for ArtifactMaterializationMethod { - fn to_proto(&self) -> buck2_data::MaterializationMethod { - match self { - ArtifactMaterializationMethod::LocalCopy { .. } => { - buck2_data::MaterializationMethod::LocalCopy - } - ArtifactMaterializationMethod::CasDownload { .. } => { - buck2_data::MaterializationMethod::CasDownload - } - ArtifactMaterializationMethod::Write { .. } => buck2_data::MaterializationMethod::Write, - ArtifactMaterializationMethod::HttpDownload { .. } => { - buck2_data::MaterializationMethod::HttpDownload - } - #[cfg(test)] - ArtifactMaterializationMethod::Test => unimplemented!(), - } - } -} - -#[async_trait] -impl Materializer for DeferredMaterializerAccessor { - fn name(&self) -> &str { - "deferred" - } - - async fn declare_existing( - &self, - artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, - ) -> anyhow::Result<()> { - let cmd = MaterializerCommand::DeclareExisting( - artifacts, - current_span(), - get_dispatcher_opt().map(|d| d.trace_id().dupe()), - ); - self.command_sender.send(cmd)?; - Ok(()) - } - - async fn declare_copy_impl( - &self, - path: ProjectRelativePathBuf, - value: ArtifactValue, - srcs: Vec, - _cancellations: &CancellationContext, - ) -> anyhow::Result<()> { - // TODO(rafaelc): get rid of this tree; it'd save a lot of memory. - let mut srcs_tree = FileTree::new(); - for copied_artifact in srcs.iter() { - let dest = copied_artifact.dest.strip_prefix(&path)?; - - { - let mut walk = unordered_entry_walk(copied_artifact.dest_entry.as_ref()); - while let Some((path, entry)) = walk.next() { - if let DirectoryEntry::Leaf(ActionDirectoryMember::File(..)) = entry { - let path = path.get(); - let dest_iter = dest.iter().chain(path.iter()).map(|f| f.to_owned()); - let src = if path.as_str().is_empty() { - copied_artifact.src.clone() - } else { - copied_artifact.src.join(&path) - }; - srcs_tree.insert(dest_iter, src); - } - } - } - } - let cmd = MaterializerCommand::Declare( - path, - value, - Box::new(ArtifactMaterializationMethod::LocalCopy(srcs_tree, srcs)), - get_dispatcher(), - ); - self.command_sender.send(cmd)?; - Ok(()) - } - - async fn declare_cas_many_impl<'a, 'b>( - &self, - info: Arc, - artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, - _cancellations: &CancellationContext, - ) -> anyhow::Result<()> { - for (path, value) in artifacts { - let cmd = MaterializerCommand::Declare( - path, - value, - Box::new(ArtifactMaterializationMethod::CasDownload { info: info.dupe() }), - get_dispatcher(), - ); - self.command_sender.send(cmd)?; - } - Ok(()) - } - - async fn declare_http( - &self, - path: ProjectRelativePathBuf, - info: HttpDownloadInfo, - _cancellations: &CancellationContext, - ) -> anyhow::Result<()> { - let cmd = MaterializerCommand::Declare( - path, - ArtifactValue::file(info.metadata.dupe()), - Box::new(ArtifactMaterializationMethod::HttpDownload { info }), - get_dispatcher(), - ); - self.command_sender.send(cmd)?; - - Ok(()) - } - - async fn declare_write<'a>( - &self, - gen: Box anyhow::Result> + Send + 'a>, - ) -> anyhow::Result> { - if !self.defer_write_actions { - return immediate::write_to_disk( - self.io.fs(), - self.io.io_executor(), - self.io.digest_config(), - gen, - ) - .await; - } - - let contents = gen()?; - - let mut paths = Vec::with_capacity(contents.len()); - let mut values = Vec::with_capacity(contents.len()); - let mut methods = Vec::with_capacity(contents.len()); - - for WriteRequest { - path, - content, - is_executable, - } in contents - { - let digest = TrackedFileDigest::from_content( - &content, - self.io.digest_config().cas_digest_config(), - ); - - let meta = FileMetadata { - digest, - is_executable, - }; - - // NOTE: The zstd crate doesn't release extra capacity of its encoding buffer so it's - // important to do so here (or the compressed Vec is the same capacity as the input!). - let compressed_data = zstd::bulk::compress(&content, 0) - .with_context(|| format!("Error compressing {} bytes", content.len()))? - .into_boxed_slice(); - - paths.push(path); - values.push(ArtifactValue::file(meta)); - methods.push(ArtifactMaterializationMethod::Write(Arc::new(WriteFile { - compressed_data, - decompressed_size: content.len(), - is_executable, - }))); - } - - for (path, (value, method)) in std::iter::zip(paths, std::iter::zip(values.iter(), methods)) - { - self.command_sender.send(MaterializerCommand::Declare( - path, - value.dupe(), - Box::new(method), - get_dispatcher(), - ))?; - } - - Ok(values) - } - - async fn declare_match( - &self, - artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, - ) -> anyhow::Result { - let (sender, recv) = oneshot::channel(); - - self.command_sender - .send(MaterializerCommand::MatchArtifacts(artifacts, sender))?; - - let is_match = recv - .await - .context("Recv'ing match future from command thread.")?; - - Ok(is_match.into()) - } - - async fn invalidate_many(&self, paths: Vec) -> anyhow::Result<()> { - let (sender, recv) = oneshot::channel(); - - self.command_sender - .send(MaterializerCommand::InvalidateFilePaths(paths, sender))?; - - // Wait on future to finish before invalidation can continue. - let invalidate_fut = recv.await?; - invalidate_fut.await.unshared_error() - } - - async fn materialize_many( - &self, - artifact_paths: Vec, - ) -> anyhow::Result>> { - let event_dispatcher = get_dispatcher(); - - // TODO: display [materializing] in superconsole - let (sender, recv) = oneshot::channel(); - self.command_sender - .send(MaterializerCommand::Ensure( - artifact_paths, - event_dispatcher, - sender, - )) - .context("Sending Ensure() command.")?; - let materialization_fut = recv - .await - .context("Receiving materialization future from command thread.")?; - Ok(materialization_fut) - } - - async fn try_materialize_final_artifact( - &self, - artifact_path: ProjectRelativePathBuf, - ) -> anyhow::Result { - if self.materialize_final_artifacts { - self.ensure_materialized(vec![artifact_path]).await?; - Ok(true) - } else { - Ok(false) - } - } - - async fn get_materialized_file_paths( - &self, - paths: Vec, - ) -> anyhow::Result>> { - if paths.is_empty() { - return Ok(Vec::new()); - } - let (sender, recv) = oneshot::channel(); - self.command_sender - .send(MaterializerCommand::GetMaterializedFilePaths(paths, sender))?; - Ok(recv.await?) - } - - fn as_deferred_materializer_extension(&self) -> Option<&dyn DeferredMaterializerExtensions> { - Some(self as _) - } - - fn log_materializer_state(&self, events: &EventDispatcher) { - events.instant_event(self.materializer_state_info.clone()) - } - - fn add_snapshot_stats(&self, snapshot: &mut buck2_data::Snapshot) { - snapshot.deferred_materializer_declares = self.stats.declares.load(Ordering::Relaxed); - snapshot.deferred_materializer_declares_reused = - self.stats.declares_reused.load(Ordering::Relaxed); - snapshot.deferred_materializer_queue_size = self.command_sender.counters.queue_size() as _; - } -} - -impl DeferredMaterializerAccessor { - /// Spawns two threads (`materialization_loop` and `command_loop`). - /// Creates and returns a new `DeferredMaterializer` that aborts those - /// threads when dropped. - pub fn new( - fs: ProjectRoot, - digest_config: DigestConfig, - buck_out_path: ProjectRelativePathBuf, - re_client_manager: Arc, - io_executor: Arc, - configs: DeferredMaterializerConfigs, - sqlite_db: Option, - sqlite_state: Option, - http_client: HttpClient, - ) -> anyhow::Result { - let (high_priority_sender, high_priority_receiver) = mpsc::unbounded_channel(); - let (low_priority_sender, low_priority_receiver) = mpsc::unbounded_channel(); - - let counters = MaterializerCounters::leak_new(); - - let command_sender = MaterializerSender { - high_priority: Cow::Borrowed(Box::leak(Box::new(high_priority_sender))), - low_priority: Cow::Borrowed(Box::leak(Box::new(low_priority_sender))), - counters, - }; - - let command_receiver = MaterializerReceiver { - high_priority: high_priority_receiver, - low_priority: low_priority_receiver, - counters, - }; - - let stats = Arc::new(DeferredMaterializerStats::default()); - - let num_entries_from_sqlite = sqlite_state.as_ref().map_or(0, |s| s.len()) as u64; - let materializer_state_info = buck2_data::MaterializerStateInfo { - num_entries_from_sqlite, - }; - let access_times_buffer = - (!matches!(configs.update_access_times, AccessTimesUpdates::Disabled)) - .then(HashSet::new); - - let mut tree = ArtifactTree::new(); - if let Some(sqlite_state) = sqlite_state { - for (path, (metadata, last_access_time)) in sqlite_state.into_iter() { - tree.insert( - path.iter().map(|f| f.to_owned()), - Box::new(ArtifactMaterializationData { - deps: None, - stage: ArtifactMaterializationStage::Materialized { - metadata, - last_access_time, - active: false, - }, - processing: Processing::Done(Version(0)), - }), - ); - } - } - - let io = Arc::new(DefaultIoHandler::new( - fs, - digest_config, - buck_out_path, - re_client_manager, - io_executor, - http_client, - )); - - let command_processor = { - let command_sender = command_sender.dupe(); - let rt = Handle::current(); - let stats = stats.dupe(); - let io = io.dupe(); - move |cancellations| DeferredMaterializerCommandProcessor { - io, - sqlite_db, - rt, - defer_write_actions: configs.defer_write_actions, - log_buffer: LogBuffer::new(25), - version_tracker: VersionTracker::new(), - command_sender, - tree, - subscriptions: MaterializerSubscriptions::new(), - ttl_refresh_history: Vec::new(), - ttl_refresh_instance: None, - cancellations, - stats, - access_times_buffer, - } - }; - - let access_time_update_max_buffer_size = ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE - .get_copied()? - .unwrap_or(256); - - let command_thread = std::thread::Builder::new() - .name("buck2-dm".to_owned()) - .spawn({ - move || { - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - - let cancellations = CancellationContext::never_cancelled(); - - rt.block_on(command_processor(cancellations).run( - command_receiver, - configs.ttl_refresh, - access_time_update_max_buffer_size, - configs.update_access_times, - )); - } - }) - .context("Cannot start materializer thread")?; - - Ok(Self { - command_thread, - command_sender, - materialize_final_artifacts: configs.materialize_final_artifacts, - defer_write_actions: configs.defer_write_actions, - io, - materializer_state_info, - stats, - }) - } -} - -/// Simple ring buffer for tracking recent commands, to be shown on materializer error -#[derive(Clone)] -struct LogBuffer { - inner: VecDeque, -} - -impl LogBuffer { - pub fn new(capacity: usize) -> Self { - Self { - inner: VecDeque::with_capacity(capacity), - } - } - - pub fn push(&mut self, item: String) { - if self.inner.len() == self.inner.capacity() { - self.inner.pop_front(); - self.inner.push_back(item); - } else { - self.inner.push_back(item); - } - } -} - -impl std::fmt::Display for LogBuffer { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.inner.iter().join("\n")) - } -} - -#[pin_project] -struct CommandStream { - high_priority: UnboundedReceiver>, - low_priority: UnboundedReceiver, - refresh_ttl_ticker: Option, - io_buffer_ticker: Interval, -} - -enum Op { - Command(MaterializerCommand), - LowPriorityCommand(LowPriorityMaterializerCommand), - RefreshTtls, - Tick, -} - -impl Stream for CommandStream { - type Item = Op; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - if let Poll::Ready(Some(e)) = this.high_priority.poll_recv(cx) { - return Poll::Ready(Some(Op::Command(e))); - } - - if let Poll::Ready(Some(e)) = this.low_priority.poll_recv(cx) { - return Poll::Ready(Some(Op::LowPriorityCommand(e))); - } - - if let Some(ticker) = this.refresh_ttl_ticker.as_mut() { - if ticker.poll_tick(cx).is_ready() { - return Poll::Ready(Some(Op::RefreshTtls)); - } - } - - if this.io_buffer_ticker.poll_tick(cx).is_ready() { - return Poll::Ready(Some(Op::Tick)); - } - - // We can never be done because we never drop the senders, so let's not bother. - Poll::Pending - } -} - -impl DeferredMaterializerCommandProcessor { - /// Loop that runs for as long as the materializer is alive. - /// - /// It takes commands via the `Materializer` trait methods. - async fn run( - mut self, - commands: MaterializerReceiver, - ttl_refresh: TtlRefreshConfiguration, - access_time_update_max_buffer_size: usize, - access_time_updates: AccessTimesUpdates, - ) { - let MaterializerReceiver { - high_priority, - low_priority, - counters, - } = commands; - - let refresh_ttl_ticker = if ttl_refresh.enabled { - Some(tokio::time::interval_at( - tokio::time::Instant::now() + ttl_refresh.frequency, - ttl_refresh.frequency, - )) - } else { - None - }; - - let io_buffer_ticker = tokio::time::interval(std::time::Duration::from_secs(5)); - - let mut stream = CommandStream { - high_priority, - low_priority, - refresh_ttl_ticker, - io_buffer_ticker, - }; - - while let Some(op) = stream.next().await { - match op { - Op::Command(command) => { - self.log_buffer.push(format!("{:?}", command)); - self.process_one_command(command); - counters.ack_received(); - self.flush_access_times(access_time_update_max_buffer_size); - } - Op::LowPriorityCommand(command) => { - self.log_buffer.push(format!("{:?}", command)); - self.process_one_low_priority_command(command); - counters.ack_received(); - } - Op::RefreshTtls => { - // It'd be neat to just implement this in the refresh_stream itself and simply - // have this loop implicitly drive it, but we can't do that as the stream's - // and_then callback would have to capture `&tree`. So, instead, we store the - // JoinHandle and just avoid scheduling more than one, though this means we'll - // just miss ticks if we do take longer than a tick to run. - - self.poll_current_ttl_refresh(); - - if self.ttl_refresh_instance.is_none() { - let ttl_refresh = self - .io - .create_ttl_refresh(&self.tree, ttl_refresh.min_ttl) - .map(|fut| { - // We sue a channel here and not JoinHandle so we get blocking - // `try_recv`. - let (tx, rx) = oneshot::channel(); - - self.rt.spawn(async { - let res = fut.await; - let _ignored = tx.send((Utc::now(), res)); - }); - - rx - }); - - match ttl_refresh { - Some(ttl_refresh) => { - self.ttl_refresh_instance = Some(ttl_refresh); - } - None => self.ttl_refresh_history.push(TtlRefreshHistoryEntry { - at: Utc::now(), - outcome: None, - }), - } - } - } - Op::Tick => { - if matches!(access_time_updates, AccessTimesUpdates::Full) { - // Force a periodic flush. - self.flush_access_times(0); - }; - } - } - } - } - - fn process_one_command(&mut self, command: MaterializerCommand) { - match command { - // Entry point for `get_materialized_file_paths` calls - MaterializerCommand::GetMaterializedFilePaths(paths, result_sender) => { - let result = - paths.into_map(|p| self.tree.file_contents_path(p, self.io.digest_config())); - result_sender.send(result).ok(); - } - MaterializerCommand::DeclareExisting(artifacts, ..) => { - for (path, artifact) in artifacts { - self.declare_existing(&path, artifact); - } - } - // Entry point for `declare_{copy|cas}` calls - MaterializerCommand::Declare(path, value, method, event_dispatcher) => { - self.declare(&path, value, method); - - if self.subscriptions.should_materialize_eagerly(&path) { - self.materialize_artifact(&path, event_dispatcher); - } - } - MaterializerCommand::MatchArtifacts(paths, sender) => { - let all_matches = paths - .into_iter() - .all(|(path, value)| self.match_artifact(path, value)); - sender.send(all_matches).ok(); - } - MaterializerCommand::InvalidateFilePaths(paths, sender) => { - tracing::trace!( - paths = ?paths, - "invalidate paths", - ); - - let existing_futs = self - .tree - .invalidate_paths_and_collect_futures(paths, self.sqlite_db.as_mut()); - - // TODO: This probably shouldn't return a CleanFuture - sender - .send( - async move { - join_all_existing_futs(existing_futs.shared_error()?) - .await - .shared_error() - } - .boxed() - .shared(), - ) - .ok(); - } - // Entry point for `ensure_materialized` calls - MaterializerCommand::Ensure(paths, event_dispatcher, fut_sender) => { - fut_sender - .send(self.materialize_many_artifacts(paths, event_dispatcher)) - .ok(); - } - MaterializerCommand::Subscription(sub) => sub.execute(self), - MaterializerCommand::Extension(ext) => ext.execute(self), - } - } - - fn process_one_low_priority_command(&mut self, command: LowPriorityMaterializerCommand) { - match command { - // Materialization of artifact succeeded - LowPriorityMaterializerCommand::MaterializationFinished { - path, - timestamp, - version, - result, - } => { - self.materialization_finished(path, timestamp, version, result); - } - LowPriorityMaterializerCommand::CleanupFinished { - path, - version, - result, - } => { - self.tree.cleanup_finished(path, version, result); - } - } - } - - /// Poll the current TTL refresh and remove it if it's done. Add the outcome to - /// ttl_refresh_history. - fn poll_current_ttl_refresh(&mut self) { - self.ttl_refresh_instance = match self.ttl_refresh_instance.take() { - Some(mut curr) => match curr.try_recv() { - Ok((at, outcome)) => { - // Done - self.ttl_refresh_history.push(TtlRefreshHistoryEntry { - at, - outcome: Some(outcome), - }); - None - } - Err(TryRecvError::Empty) => { - // Leave it alone. - Some(curr) - } - Err(TryRecvError::Closed) => { - // Shouldnt really happen unless Tokio is shutting down, but be safe. - self.ttl_refresh_history.push(TtlRefreshHistoryEntry { - at: Utc::now(), - outcome: Some(Err(anyhow::anyhow!("Shutdown"))), - }); - None - } - }, - None => None, - }; - } - - fn is_path_materialized(&self, path: &ProjectRelativePath) -> bool { - match self.tree.prefix_get(&mut path.iter()) { - None => false, - Some(data) => { - matches!( - data.stage, - ArtifactMaterializationStage::Materialized { .. } - ) - } - } - } - - fn flush_access_times(&mut self, max_buffer_size: usize) -> String { - if let Some(access_times_buffer) = self.access_times_buffer.as_mut() { - let size = access_times_buffer.len(); - if size < max_buffer_size { - return "Access times buffer is not full yet".to_owned(); - } - - let buffer = std::mem::take(access_times_buffer); - let now = Instant::now(); - tracing::debug!("Flushing access times buffer"); - if let Some(sqlite_db) = self.sqlite_db.as_mut() { - if let Err(e) = sqlite_db - .materializer_state_table() - .update_access_times(buffer.iter().collect::>()) - { - soft_error!( - "materializer_materialize_error", - e.context(self.log_buffer.clone()), - quiet: true - ) - .unwrap(); - return "Found error while updating access times in sqlite db".to_owned(); - } - } - return format!( - "Finished flushing {} entries in {} ms", - size, - now.elapsed().as_millis(), - ); - } - "Access time updates are disabled. Consider removing `update_access_times = false` from your .buckconfig".to_owned() - } - - fn materialize_many_artifacts( - &mut self, - paths: Vec, - event_dispatcher: EventDispatcher, - ) -> BoxStream<'static, Result<(), MaterializationError>> { - let tasks = paths.into_iter().filter_map(|path| { - self.materialize_artifact(path.as_ref(), event_dispatcher.dupe()) - .map(move |fut| { - fut.map_err(move |e| match e { - SharedMaterializingError::Error(source) => MaterializationError::Error { - path, - source: source.into(), - }, - SharedMaterializingError::NotFound { info, debug } => { - MaterializationError::NotFound { path, info, debug } - } - }) - }) - }); - - tasks.collect::>().boxed() - } - - fn declare_existing(&mut self, path: &ProjectRelativePath, value: ArtifactValue) { - let metadata = ArtifactMetadata::new(value.entry()); - on_materialization( - self.sqlite_db.as_mut(), - &self.log_buffer, - &self.subscriptions, - path, - &metadata, - Utc::now(), - "materializer_declare_existing_error", - ); - - self.tree.insert( - path.iter().map(|f| f.to_owned()), - Box::new(ArtifactMaterializationData { - deps: value.deps().duped(), - stage: ArtifactMaterializationStage::Materialized { - metadata, - last_access_time: Utc::now(), - active: true, - }, - processing: Processing::Done(self.version_tracker.next()), - }), - ); - } - - fn declare( - &mut self, - path: &ProjectRelativePath, - value: ArtifactValue, - method: Box, - ) { - self.stats.declares.fetch_add(1, Ordering::Relaxed); - - // Check if artifact to be declared is same as artifact that's already materialized. - let mut path_iter = path.iter(); - if let Some(data) = self.tree.prefix_get_mut(&mut path_iter) { - match &data.stage { - ArtifactMaterializationStage::Materialized { - metadata, - last_access_time, - .. - } => { - // NOTE: This is for testing performance when hitting mismatches with disk - // state. Unwrapping isn't ideal, but we can't report errors here. - static FORCE_DECLARE_MISMATCH: EnvHelper = - EnvHelper::new("BUCK2_TEST_FORCE_DECLARE_MISMATCH"); - let force_mismatch = FORCE_DECLARE_MISMATCH - .get() - .unwrap() - .copied() - .unwrap_or_default(); - - if path_iter.next().is_none() - && metadata.matches_entry(value.entry()) - && !force_mismatch - { - // In this case, the entry declared matches the already materialized - // entry on disk, so just update the deps field but leave - // the artifact as materialized. - tracing::trace!( - path = %path, - "already materialized, updating deps only", - ); - let deps = value.deps().duped(); - data.stage = ArtifactMaterializationStage::Materialized { - metadata: metadata.dupe(), - last_access_time: *last_access_time, - active: true, - }; - data.deps = deps; - - self.stats.declares_reused.fetch_add(1, Ordering::Relaxed); - - return; - } - } - _ => {} - } - } - - // We don't have a matching artifact. Declare it. - let version = self.version_tracker.next(); - - tracing::trace!( - path = %path, - method = %method, - value = %value.entry(), - version = %version, - "declare artifact", - ); - - // Always invalidate materializer state before actual deleting from filesystem - // so there will never be a moment where artifact is deleted but materializer - // thinks it still exists. - let existing_futs = self - .tree - .invalidate_paths_and_collect_futures(vec![path.to_owned()], self.sqlite_db.as_mut()); - - let existing_futs = ExistingFutures(existing_futs); - - let method = Arc::from(method); - - // Dispatch Write actions eagerly if possible. We can do this if no cleanup is required. We - // also check that there are no deps, though for writes there should never be deps. - - let can_use_write_fast_path = existing_futs.is_empty() && value.deps().is_none(); - - let future = match &*method { - ArtifactMaterializationMethod::Write(write) if can_use_write_fast_path => { - let materialize = self.io.write( - path.to_owned(), - write.dupe(), - version, - self.command_sender.dupe(), - self.cancellations, - ); - ProcessingFuture::Materializing(materialize.shared()) - } - _ => ProcessingFuture::Cleaning(clean_path( - &self.io, - path.to_owned(), - version, - self.command_sender.dupe(), - existing_futs, - &self.rt, - self.cancellations, - )), - }; - - let data = Box::new(ArtifactMaterializationData { - deps: value.deps().duped(), - stage: ArtifactMaterializationStage::Declared { - entry: value.entry().dupe(), - method, - }, - processing: Processing::Active { future, version }, - }); - self.tree.insert(path.iter().map(|f| f.to_owned()), data); - } - - /// Check if artifact to be declared is same as artifact that's already materialized. - #[instrument(level = "debug", skip(self), fields(path = %path, value = %value.entry()))] - fn match_artifact(&mut self, path: ProjectRelativePathBuf, value: ArtifactValue) -> bool { - let mut path_iter = path.iter(); - let data = match self.tree.prefix_get_mut(&mut path_iter) { - Some(data) => data, - None => { - tracing::trace!("overlapping below"); - return false; - } - }; - - // Something was declared above our path. - if path_iter.next().is_some() { - tracing::trace!("overlapping above"); - return false; - } - - let is_match = match &data.stage { - ArtifactMaterializationStage::Materialized { metadata, .. } => { - let is_match = value.entry(); - tracing::trace!("materialized: found {}, is_match: {}", metadata.0, is_match); - metadata.matches_entry(is_match) - } - ArtifactMaterializationStage::Declared { entry, .. } => { - // NOTE: In theory, if something was declared here, we should probably be able to - // just re-declare over it? - let is_match = value.entry() == entry; - tracing::trace!("declared: found {}, is_match: {}", entry, is_match); - is_match - } - }; - - // In practice, having a matching artifact with different deps isn't actually *possible* - // right now, because the deps are derived from the artifact value and we'll always have - // declared them before. But, if we have a local action cache and persist that as well as - // materializer state across restarts, then eventually we could have a match with something - // that hasn't had its deps populated yet (since the materializer state does not know about - // deps). - if is_match { - if let Some(deps) = value.deps() { - data.deps = Some(deps.dupe()) - } - } - - is_match - } - - #[instrument(level = "debug", skip(self), fields(path = %path))] - fn materialize_artifact( - &mut self, - mut path: &ProjectRelativePath, - event_dispatcher: EventDispatcher, - ) -> Option { - // Get the data about the artifact, or return early if materializing/materialized - let mut path_iter = path.iter(); - let data = match self.tree.prefix_get_mut(&mut path_iter) { - // Never declared, nothing to do - None => { - tracing::debug!("not known"); - return None; - } - Some(data) => data, - }; - - // Rewind the `path` up to the entry we *actually* found. - for _ in path_iter { - path = path - .parent() - .expect("Path iterator cannot cause us to rewind past the last parent"); - } - - let cleaning_fut = match &data.processing { - Processing::Active { - future: ProcessingFuture::Cleaning(f), - .. - } => Some(f.clone()), - Processing::Active { - future: ProcessingFuture::Materializing(f), - .. - } => { - tracing::debug!("join existing future"); - return Some(f.clone()); - } - Processing::Done(..) => None, - }; - - let deps = data.deps.dupe(); - let check_deps = deps.is_some(); - let entry_and_method = match &mut data.stage { - ArtifactMaterializationStage::Declared { entry, method } => { - Some((entry.dupe(), method.dupe())) - } - ArtifactMaterializationStage::Materialized { - ref mut last_access_time, - .. - } => match check_deps { - true => None, - false => { - if let Some(ref mut buffer) = self.access_times_buffer.as_mut() { - // TODO (torozco): Why is it legal for something to be Materialized + Cleaning? - let timestamp = Utc::now(); - *last_access_time = timestamp; - - // NOTE (T142264535): We mostly expect that artifacts are always declared - // before they are materialized, but there's one case where that doesn't - // happen. In particular, when incremental actions execute, they will trigger - // materialization of outputs from a previous run. The artifact isn't really - // "active" (it's not an output that we'll use), but we do warn here (when we - // probably shouldn't). - // - // if !active { - // tracing::warn!(path = %path, "Expected artifact to be marked active by declare") - // } - if buffer.insert(path.to_buf()) { - tracing::debug!( - "nothing to materialize, adding to access times buffer" - ); - } - } - - return None; - } - }, - }; - - let version = self.version_tracker.next(); - - tracing::debug!( - has_entry_and_method = entry_and_method.is_some(), - method = ?entry_and_method.as_ref().map(|(_, m)| m), - has_deps = deps.is_some(), - version = %version, - cleaning = cleaning_fut.is_some(), - "materialize artifact" - ); - - // If the artifact copies from other artifacts, we must materialize them first - let deps_tasks = match entry_and_method.as_ref() { - Some((_, m)) => match m.as_ref() { - ArtifactMaterializationMethod::CasDownload { .. } - | ArtifactMaterializationMethod::HttpDownload { .. } - | ArtifactMaterializationMethod::Write { .. } => Vec::new(), - ArtifactMaterializationMethod::LocalCopy(_, copied_artifacts) => copied_artifacts - .iter() - .filter_map(|a| { - self.materialize_artifact(a.src.as_ref(), event_dispatcher.dupe()) - }) - .collect::>(), - #[cfg(test)] - ArtifactMaterializationMethod::Test => Vec::new(), - }, - _ => Vec::new(), - }; - - // The artifact might have symlinks pointing to other artifacts. We must - // materialize them as well, to avoid dangling synlinks. - let link_deps_tasks = match deps.as_ref() { - None => Vec::new(), - Some(deps) => self - .tree - .find_artifacts(deps) - .into_iter() - .filter_map(|p| self.materialize_artifact(p.as_ref(), event_dispatcher.dupe())) - .collect::>(), - }; - - // Create a task to await deps and materialize ourselves - let path_buf = path.to_buf(); - let path_buf_dup = path_buf.clone(); - let io = self.io.dupe(); - let command_sender = self.command_sender.dupe(); - let task = self - .rt - .spawn(async move { - let cancellations = CancellationContext::never_cancelled(); // spawned - - // Materialize the deps and this entry. This *must* happen in a try block because we - // need to notify the materializer regardless of whether this succeeds or fails. - - let timestamp = Utc::now(); - let res: Result<(), SharedMaterializingError> = try { - // If there is an existing future trying to delete conflicting paths, we must wait for it - // to finish before we can start materialization. - if let Some(cleaning_fut) = cleaning_fut { - cleaning_fut - .await - .with_context(|| format!( - "Error waiting for a previous future to finish cleaning output path {}", - &path_buf - )) - .map_err(|e| SharedMaterializingError::Error(e.into()))?; - }; - - // In case this is a local copy, we first need to materialize the - // artifacts we are copying from, before we can copy them. - for t in deps_tasks { - t.await?; - } - - if let Some((entry, method)) = entry_and_method { - let materialize = || { - io.materialize_entry( - path_buf.clone(), - method, - entry.dupe(), - event_dispatcher.dupe(), - cancellations, - ) - }; - - // Windows symlinks need to be specified whether it is to a file or target. We rely on the - // target file existing to determine this. Ensure symlink targets exist before the entry - // is materialized for Windows. For non-Windows, do everything concurrently. - if cfg!(windows) { - for t in link_deps_tasks { - t.await?; - } - materialize().await?; - } else { - materialize().await?; - for t in link_deps_tasks { - t.await?; - } - } - } else { - for t in link_deps_tasks { - t.await?; - } - } - }; - - // Materialization finished, notify the command thread - let _ignored = command_sender.send_low_priority( - LowPriorityMaterializerCommand::MaterializationFinished { - path: path_buf_dup, - timestamp, - version, - result: res.dupe(), - }, - ); - - res - }) - .map(|r| match r { - Ok(r) => r, - Err(e) => Err(SharedMaterializingError::Error(e.into())), // Turn the JoinError into a SharedError. - }) - .boxed() - .shared(); - - let data = self.tree.prefix_get_mut(&mut path.iter()).unwrap(); - data.processing = Processing::Active { - future: ProcessingFuture::Materializing(task.clone()), - version, - }; - - Some(task) - } - - #[instrument(level = "debug", skip(self, result), fields(path = %artifact_path))] - fn materialization_finished( - &mut self, - artifact_path: ProjectRelativePathBuf, - timestamp: DateTime, - version: Version, - result: Result<(), SharedMaterializingError>, - ) { - match self.tree.prefix_get_mut(&mut artifact_path.iter()) { - Some(info) => { - if info.processing.current_version() > version { - // We can only unset the future if version matches. - // Otherwise, we may be unsetting a different future from a newer version. - tracing::debug!("version conflict"); - return; - } - - if result.is_err() { - tracing::debug!("materialization failed, redeclaring artifact"); - // Even though materialization failed, something may have still materialized at artifact_path, - // so we need to delete anything at artifact_path before we ever retry materializing it. - // TODO(scottcao): Once command processor accepts an ArtifactTree instead of initializing one, - // add a test case to ensure this behavior. - let version = self.version_tracker.next(); - let future = ProcessingFuture::Cleaning(clean_path( - &self.io, - artifact_path.clone(), - version, - self.command_sender.dupe(), - ExistingFutures::empty(), - &self.rt, - self.cancellations, - )); - info.processing = Processing::Active { future, version }; - } else { - tracing::debug!(has_deps = info.deps.is_some(), "transition to Materialized"); - let new_stage = match &info.stage { - ArtifactMaterializationStage::Materialized { .. } => { - // This happens if deps = true. In this case, the entry itself was not - // materialized again, but its deps have been. We need to clear the - // waiting future regardless. - tracing::debug!("artifact is already materialized"); - None - } - ArtifactMaterializationStage::Declared { - entry, - method: _method, - } => { - let metadata = ArtifactMetadata::new(entry); - // NOTE: We only insert this artifact if there isn't an in-progress cleanup - // future on this path. - on_materialization( - self.sqlite_db.as_mut(), - &self.log_buffer, - &self.subscriptions, - &artifact_path, - &metadata, - timestamp, - "materializer_finished_error", - ); - - Some(ArtifactMaterializationStage::Materialized { - metadata, - last_access_time: timestamp, - active: true, - }) - } - }; - - if let Some(new_stage) = new_stage { - info.stage = new_stage; - } - - info.processing = Processing::Done(version); - } - } - None => { - // NOTE: This can happen if a path got invalidted while it was being materialized. - tracing::debug!("materialization_finished but path is vacant!") - } - } - } -} - -/// Run callbacks for an artifact being materialized at `path`. -fn on_materialization( - sqlite_db: Option<&mut MaterializerStateSqliteDb>, - log_buffer: &LogBuffer, - subscriptions: &MaterializerSubscriptions, - path: &ProjectRelativePath, - metadata: &ArtifactMetadata, - timestamp: DateTime, - error_name: &'static str, -) { - if let Some(sqlite_db) = sqlite_db { - if let Err(e) = sqlite_db - .materializer_state_table() - .insert(path, metadata, timestamp) - { - soft_error!(error_name, e.context(log_buffer.clone()), quiet: true).unwrap(); - } - } - - subscriptions.on_materialization_finished(path); -} - -impl ArtifactTree { - /// Given a path that's (possibly) not yet materialized, returns the path - /// `contents_path` where its contents can be found. Returns Err if the - /// contents cannot be found (ex. if it requires HTTP or CAS download) - /// - /// Note that the returned `contents_path` could be the same as `path`. - #[instrument(level = "trace", skip(self), fields(path = %path))] - fn file_contents_path( - &self, - path: ProjectRelativePathBuf, - digest_config: DigestConfig, - ) -> Result { - let mut path_iter = path.iter(); - let materialization_data = match self.prefix_get(&mut path_iter) { - // Not in tree. Assume it's a source file that doesn't require materialization from materializer. - None => return Ok(path), - Some(data) => data, - }; - let (entry, method) = match &materialization_data.stage { - ArtifactMaterializationStage::Materialized { .. } => { - return Ok(path); - } - ArtifactMaterializationStage::Declared { entry, method } => { - (entry.dupe(), method.dupe()) - } - }; - match method.as_ref() { - ArtifactMaterializationMethod::CasDownload { info } => { - let path_iter = path_iter.peekable(); - - let root_entry = entry.dupe(); - let mut entry = Some(entry.as_ref().map_dir(|d| d as &dyn ActionDirectory)); - - // Check if the path we are asking for exists in this entry. - for name in path_iter { - entry = match entry { - Some(DirectoryEntry::Dir(d)) => d.get(name), - _ => break, - } - } - - match entry { - Some(entry) => Err(ArtifactNotMaterializedReason::RequiresCasDownload { - path, - // TODO (@torozco): A nicer API to get an Immutable directory here. - entry: entry - .map_dir(|d| { - d.to_builder() - .fingerprint(digest_config.as_directory_serializer()) - }) - .map_leaf(|l| l.dupe()), - info: info.dupe(), - }), - None => Err( - ArtifactNotMaterializedReason::DeferredMaterializerCorruption { - path, - entry: root_entry, - info: info.dupe(), - }, - ), - } - } - ArtifactMaterializationMethod::HttpDownload { .. } - | ArtifactMaterializationMethod::Write { .. } => { - // TODO: Do the write directly to RE instead of materializing locally? - Err(ArtifactNotMaterializedReason::RequiresMaterialization { path }) - } - // TODO: also record and check materialized_files for LocalCopy - ArtifactMaterializationMethod::LocalCopy(srcs, _) => { - match srcs.prefix_get(&mut path_iter) { - None => Ok(path), - Some(src_path) => match path_iter.next() { - None => self.file_contents_path(src_path.clone(), digest_config), - // This is not supposed to be reachable, and if it's, there - // is a bug somewhere else. Panic to prevent the bug from - // propagating. - Some(part) => panic!( - "While getting materialized path of {:?}: path {:?} is a file, so subpath {:?} doesn't exist within.", - path, src_path, part, - ), - }, - } - } - #[cfg(test)] - ArtifactMaterializationMethod::Test => unimplemented!(), - } - } - - #[instrument(level = "debug", skip(self, result), fields(path = %artifact_path))] - fn cleanup_finished( - &mut self, - artifact_path: ProjectRelativePathBuf, - version: Version, - result: Result<(), SharedMaterializingError>, - ) { - match self - .prefix_get_mut(&mut artifact_path.iter()) - .context("Path is vacant") - { - Ok(info) => { - if info.processing.current_version() > version { - // We can only unset the future if version matches. - // Otherwise, we may be unsetting a different future from a newer version. - tracing::debug!("version conflict"); - return; - } - - if result.is_err() { - // Leave it alone, don't keep retrying. - } else { - info.processing = Processing::Done(version); - } - } - Err(e) => { - // NOTE: This shouldn't normally happen? - soft_error!("cleanup_finished_vacant", e, quiet: true).unwrap(); - } - } - } - - /// Removes paths from tree and returns a pair of two vecs. - /// First vec is a list of paths removed. Second vec is a list of - /// pairs of removed paths to futures that haven't finished. - fn invalidate_paths_and_collect_futures( - &mut self, - paths: Vec, - sqlite_db: Option<&mut MaterializerStateSqliteDb>, - ) -> anyhow::Result> { - let mut invalidated_paths = Vec::new(); - let mut futs = Vec::new(); - - for path in paths { - for (path, data) in self.remove_path(&path) { - if let Some(processing_fut) = data.processing.into_future() { - futs.push((path.clone(), processing_fut)); - } - invalidated_paths.push(path); - } - } - - #[cfg(test)] - { - for path in &invalidated_paths { - if path.as_str() == "test/invalidate/failure" { - return Err(anyhow::anyhow!("Injected error")); - } - } - } - - // We can invalidate the paths here even if materializations are currently running on - // the underlying nodes, because when materialization finishes we'll check the version - // number. - if let Some(sqlite_db) = sqlite_db { - sqlite_db - .materializer_state_table() - .delete(invalidated_paths) - .context("Error invalidating paths in materializer state")?; - } - - Ok(futs) - } -} - -impl FileTree { - /// Finds all the paths in `deps` that are artifacts in `self` - fn find_artifacts(&self, deps: &D) -> Vec - where - D: ActionDirectory + ?Sized, - { - fn walk_deps( - tree: &FileTree, - entry: DirectoryEntry<&D, &ActionDirectoryMember>, - path: &mut RelativePathBuf, - found_artifacts: &mut Vec, - ) where - D: ActionDirectory + ?Sized, - { - match tree { - FileTree::Data(_) => { - found_artifacts.push(ProjectRelativePathBuf::unchecked_new(path.to_string())); - } - FileTree::Tree(tree_children) => { - // Not an artifact, but if entry is a directory we can search deeper within - if let DirectoryEntry::Dir(d) = entry { - for (name, child) in d.entries() { - if let Some(subtree) = tree_children.get(name) { - path.push(name); - walk_deps(subtree, child, path, found_artifacts); - path.pop(); - } - } - } - } - } - } - - let mut artifacts = Vec::new(); - walk_deps( - self, - DirectoryEntry::Dir(deps), - &mut RelativePathBuf::new(), - &mut artifacts, - ); - artifacts - } - - /// Removes path from FileTree. Returns an iterator of pairs of path and entry removed - /// from the tree. - fn remove_path( - &mut self, - path: &ProjectRelativePath, - ) -> Box> { - let mut path_iter = path.iter(); - let removed = self.remove(&mut path_iter); - - let mut path = path; - // Rewind the `path` up to the entry we *actually* found. - for _ in path_iter { - path = path - .parent() - .expect("Path iterator cannot cause us to rewind past the last parent"); - } - let path = path.to_owned(); - - match removed { - Some(tree) => Box::new( - tree.into_iter_with_paths() - .map(move |(k, v)| ((path).join(k), v)), - ), - None => Box::new(std::iter::empty()), - } - } -} - -/// Wait on all futures in `futs` to finish. Return Error for first future that failed -/// in the Vec. -async fn join_all_existing_futs( - existing_futs: Vec<(ProjectRelativePathBuf, ProcessingFuture)>, -) -> SharedResult<()> { - // We can await inside a loop here because all ProcessingFuture's are spawned. - for (path, fut) in existing_futs.into_iter() { - match fut { - ProcessingFuture::Materializing(f) => { - // We don't care about errors from previous materializations. - // We are trying to delete anything that has been materialized, - // so these errors can be ignored. - f.await.ok(); - } - ProcessingFuture::Cleaning(f) => { - f.await.with_context(|| { - format!( - "Error waiting for a previous future to finish cleaning output path {}", - path - ) - })?; - } - }; - } - - Ok(()) -} - -/// Spawns a future to clean output paths while waiting for any -/// pending future to finish. -fn clean_path( - io: &Arc, - path: ProjectRelativePathBuf, - version: Version, - command_sender: MaterializerSender, - existing_futs: ExistingFutures, - rt: &Handle, - cancellations: &'static CancellationContext, -) -> CleaningFuture { - if existing_futs.is_empty() { - return io - .clean_path(path, version, command_sender, cancellations) - .shared(); - } - - rt.spawn({ - let io = io.dupe(); - let cancellations = CancellationContext::never_cancelled(); - async move { - join_all_existing_futs(existing_futs.into_result()?).await?; - io.clean_path(path, version, command_sender, cancellations) - .await - } - }) - .map(|r| match r { - Ok(r) => r, - Err(e) => Err(e.into()), // Turn the JoinError into a SharedError. - }) - .boxed() - .shared() -} - -/// A wrapper type around the Result it contains. Used to expose some extra methods. -struct ExistingFutures(anyhow::Result>); - -impl ExistingFutures { - fn is_empty(&self) -> bool { - self.0.as_ref().map_or(false, |f| f.is_empty()) - } - - fn into_result(self) -> anyhow::Result> { - self.0 - } - - fn empty() -> Self { - Self(Ok(Vec::new())) - } -} - -#[derive(Derivative)] -#[derivative(Debug)] -pub struct WriteFile { - #[derivative(Debug = "ignore")] - compressed_data: Box<[u8]>, - decompressed_size: usize, - is_executable: bool, -} diff --git a/app/buck2_execute_impl/src/materializers/deferred/subscriptions.rs b/app/buck2_execute_impl/src/materializers/deferred/subscriptions.rs index 1be02250b7564..3301d8f14d5c8 100644 --- a/app/buck2_execute_impl/src/materializers/deferred/subscriptions.rs +++ b/app/buck2_execute_impl/src/materializers/deferred/subscriptions.rs @@ -11,11 +11,13 @@ use std::collections::HashMap; use std::collections::HashSet; +use std::sync::Arc; use anyhow::Context as _; use async_trait::async_trait; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_events::dispatch::EventDispatcher; use buck2_execute::materialize::materializer::DeferredMaterializerSubscription; use derivative::Derivative; use derive_more::Display; @@ -78,6 +80,16 @@ impl MaterializerSubscriptions { pub(super) fn has_any_subscriptions(&self) -> bool { !self.active.is_empty() } + + pub(super) fn list_subscribed_paths(&self) -> impl Iterator { + let mut seen = HashSet::new(); + + self.active + .values() + .flat_map(|v| v.paths.iter()) + .map(|p| p.as_ref()) + .filter(move |p| seen.insert(*p)) + } } struct SubscriptionData { @@ -162,6 +174,8 @@ where for path in &paths { if dm.is_path_materialized(path) { paths_to_report.push(path.to_owned()); + } else { + dm.materialize_artifact(path, EventDispatcher::null()); } } @@ -206,7 +220,7 @@ where pub(super) struct SubscriptionHandle { index: SubscriptionIndex, #[derivative(Debug = "ignore")] - command_sender: MaterializerSender, + command_sender: Arc>, /// Channel to send back notifications. #[derivative(Debug = "ignore")] receiver: UnboundedReceiver, diff --git a/app/buck2_execute_impl/src/materializers/deferred/tests.rs b/app/buck2_execute_impl/src/materializers/deferred/tests.rs index 3ce530bc2336b..dbdaf41516f04 100644 --- a/app/buck2_execute_impl/src/materializers/deferred/tests.rs +++ b/app/buck2_execute_impl/src/materializers/deferred/tests.rs @@ -7,16 +7,15 @@ * of this source tree. */ -use std::borrow::Cow; use std::collections::HashMap; -use std::collections::HashSet; use buck2_common::file_ops::FileMetadata; +use buck2_core::fs::fs_util::IoError; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::insert_file; use buck2_execute::directory::ActionDirectoryBuilder; use buck2_execute::materialize::materializer::DeferredMaterializerSubscription; -use dupe::Dupe; use super::Version; use super::VersionTracker; @@ -35,10 +34,14 @@ fn test_find_artifacts() -> anyhow::Result<()> { // Build deps with artifacts 1-3, and non-artifacts 1-2 let mut builder = ActionDirectoryBuilder::empty(); - insert_file(&mut builder, &artifact1.join_normalized("f1")?, file.dupe())?; insert_file( &mut builder, - &artifact2.join_normalized("d/f1")?, + &artifact1.join(ForwardRelativePath::new("f1").unwrap()), + file.dupe(), + )?; + insert_file( + &mut builder, + &artifact2.join(ForwardRelativePath::new("d/f1").unwrap()), file.dupe(), )?; insert_file(&mut builder, &artifact3, file.dupe())?; @@ -88,28 +91,63 @@ fn test_remove_path() { mod state_machine { use std::path::Path; + use std::sync::Barrier; + use std::thread; + use anyhow::Context; use assert_matches::assert_matches; + use buck2_core::fs::fs_util; + use buck2_core::fs::fs_util::ReadDir; + use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; + use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; + use buck2_core::fs::paths::RelativePathBuf; + use buck2_core::fs::project::ProjectRootTemp; + use buck2_events::source::ChannelEventSource; use buck2_execute::directory::Symlink; use buck2_execute::directory::INTERNER; - use parking_lot::Mutex; + use buck2_execute::execute::blocking::IoRequest; + use buck2_util::threads::ignore_stack_overflow_checks_for_future; use tokio::time::sleep; use tokio::time::Duration as TokioDuration; use super::*; + use crate::materializers::deferred::clean_stale::CleanInvalidatedPathRequest; + use crate::materializers::deferred::subscriptions::SubscriptionHandle; + use crate::materializers::sqlite::testing_materializer_state_sqlite_db; - #[derive(Debug, Eq, PartialEq)] + #[derive(Debug, Eq, PartialEq, Allocative)] enum Op { Clean, Materialize, + MaterializeError, } + #[derive(Allocative)] struct StubIoHandler { log: Mutex>, fail: Mutex, + fail_paths: Mutex>, // If set, add a sleep when materializing to simulate a long materialization period materialization_config: HashMap, + #[allocative(skip)] + read_dir_barriers: Option>, + #[allocative(skip)] + clean_barriers: Option>, digest_config: DigestConfig, + buck_out_path: ProjectRelativePathBuf, + fs: ProjectRoot, + } + + impl DeferredMaterializerAccessor { + // Ensure that the command thread ends so that the command processor is dropped, + // and the sqlite connection is flushed and closed. + // Needed since the default destructor assumes the process is about to die and shouldn't need to block. + fn abort(mut self) { + self.command_sender + .send(MaterializerCommand::Abort) + .unwrap(); + self.command_thread.take().unwrap().join().unwrap(); + } } impl StubIoHandler { @@ -121,26 +159,84 @@ mod state_machine { *self.fail.lock() = fail; } - pub fn new(materialization_config: HashMap) -> Self { + fn set_fail_on(&self, paths: Vec) { + *self.fail_paths.lock() = paths; + } + + pub fn new(fs: ProjectRoot) -> Self { Self { log: Default::default(), fail: Default::default(), - materialization_config, + fail_paths: Default::default(), + materialization_config: HashMap::new(), + read_dir_barriers: None, + clean_barriers: None, digest_config: DigestConfig::testing_default(), + buck_out_path: make_path("buck-out/v2"), + fs, } } + + pub fn with_materialization_config( + mut self, + materialization_config: HashMap, + ) -> Self { + self.materialization_config = materialization_config; + self + } + + pub fn with_read_dir_barriers( + mut self, + read_dir_barriers: Arc<(Barrier, Barrier)>, + ) -> Self { + self.read_dir_barriers = Some(read_dir_barriers); + self + } + + pub fn with_clean_barriers(mut self, clean_barriers: Arc<(Barrier, Barrier)>) -> Self { + self.clean_barriers = Some(clean_barriers); + self + } + } + + impl StubIoHandler { + fn actually_write(self: &Arc, path: &ProjectRelativePathBuf, write: &Arc) { + let data = zstd::bulk::decompress(&write.compressed_data, write.decompressed_size) + .context("Error decompressing data") + .unwrap(); + self.fs.write_file(path, data, write.is_executable).unwrap(); + } } #[async_trait] impl IoHandler for StubIoHandler { fn write<'a>( self: &Arc, - _path: ProjectRelativePathBuf, - _write: Arc, - _version: Version, - _command_sender: MaterializerSender, + path: ProjectRelativePathBuf, + write: Arc, + version: Version, + command_sender: Arc>, _cancellations: &'a CancellationContext<'a>, ) -> BoxFuture<'a, Result<(), SharedMaterializingError>> { + self.actually_write(&path, &write); + async move { + let _ignored = command_sender.send_low_priority( + LowPriorityMaterializerCommand::MaterializationFinished { + path, + timestamp: Utc::now(), + version, + result: Ok(()), + }, + ); + Ok(()) + } + .boxed() + } + + async fn immediate_write<'a>( + self: &Arc, + _gen: Box anyhow::Result> + Send + 'a>, + ) -> anyhow::Result> { unimplemented!() } @@ -148,9 +244,9 @@ mod state_machine { self: &Arc, path: ProjectRelativePathBuf, version: Version, - command_sender: MaterializerSender, + command_sender: Arc>, _cancellations: &'a CancellationContext, - ) -> BoxFuture<'a, Result<(), SharedError>> { + ) -> BoxFuture<'a, Result<(), buck2_error::Error>> { self.log.lock().push((Op::Clean, path.clone())); async move { @@ -166,6 +262,19 @@ mod state_machine { .boxed() } + async fn clean_invalidated_path<'a>( + self: &Arc, + request: CleanInvalidatedPathRequest, + _cancellations: &'a CancellationContext, + ) -> anyhow::Result<()> { + if let Some(barriers) = self.clean_barriers.as_ref() { + // Allow tests to advance here, execute something and then continue + barriers.as_ref().0.wait(); + barriers.as_ref().1.wait(); + } + Box::new(request).execute(&self.fs) + } + async fn materialize_entry( self: &Arc, path: ProjectRelativePathBuf, @@ -181,11 +290,18 @@ mod state_machine { } None => (), } - self.log.lock().push((Op::Materialize, path)); - if *self.fail.lock() { + if (*self.fail_paths.lock()).contains(&path) || *self.fail.lock() { + self.log.lock().push((Op::MaterializeError, path)); Err(anyhow::anyhow!("Injected error").into()) } else { + match _method.as_ref() { + ArtifactMaterializationMethod::Write(write) => { + self.actually_write(&path, write); + } + _ => {} + } + self.log.lock().push((Op::Materialize, path)); Ok(()) } } @@ -198,12 +314,17 @@ mod state_machine { unimplemented!() } - fn buck_out_path(&self) -> &ProjectRelativePathBuf { - unimplemented!() + fn read_dir(&self, path: &AbsNormPathBuf) -> Result { + if let Some(barriers) = self.read_dir_barriers.as_ref() { + // Allow tests to advance here, execute something and then continue + barriers.as_ref().0.wait(); + barriers.as_ref().1.wait(); + } + fs_util::read_dir(path) } - fn io_executor(&self) -> &dyn BlockingExecutor { - unimplemented!() + fn buck_out_path(&self) -> &ProjectRelativePathBuf { + &self.buck_out_path } fn re_client_manager(&self) -> &Arc { @@ -211,7 +332,7 @@ mod state_machine { } fn fs(&self) -> &ProjectRoot { - unimplemented!() + &self.fs } fn digest_config(&self) -> DigestConfig { @@ -221,7 +342,7 @@ mod state_machine { /// A stub command sender. We are calling materializer methods directly so that's all we need. fn channel() -> ( - MaterializerSender, + Arc>, MaterializerReceiver, ) { // We don't use those counts in tests. @@ -236,11 +357,12 @@ mod state_machine { }; ( - MaterializerSender { - high_priority: Cow::Owned(hi_send), - low_priority: Cow::Owned(lo_send), + Arc::new(MaterializerSender { + high_priority: hi_send, + low_priority: lo_send, counters, - }, + clean_guard: Default::default(), + }), MaterializerReceiver { high_priority: hi_recv, low_priority: lo_recv, @@ -253,83 +375,215 @@ mod state_machine { ProjectRelativePath::new(p).unwrap().to_owned() } - fn make_processor( - materialization_config: HashMap, + fn temp_root() -> ProjectRoot { + ProjectRootTemp::new().unwrap().path().clone() + } + + async fn materialize_write( + path: &ProjectRelativePathBuf, + contents: &'static [u8], + handle: &mut SubscriptionHandle, + dm: &DeferredMaterializerAccessor, + ) -> anyhow::Result<()> { + dm.declare_write(Box::new(|| { + Ok(vec![WriteRequest { + path: path.clone(), + content: contents.to_vec(), + is_executable: false, + }]) + })) + .await?; + + handle.subscribe_to_paths(vec![path.clone()]); + + dm.materialize_many(vec![path.clone()]) + .await? + .next() + .await + .unwrap()?; + // block until materialization_finished updates the tree + handle.receiver().recv().await; + Ok(()) + } + + fn make_db(fs: &ProjectRoot) -> (MaterializerStateSqliteDb, Option) { + let (db, state) = testing_materializer_state_sqlite_db( + fs, + HashMap::from([("version".to_owned(), "0".to_owned())]), + HashMap::new(), + None, + ) + .unwrap(); + (db, state.ok()) + } + + fn make_processor_for_io( + io: Arc, ) -> ( DeferredMaterializerCommandProcessor, + Arc>, MaterializerReceiver, + ChannelEventSource, ) { - let (command_sender, command_receiver) = channel(); + let (db, sqlite_state) = make_db(io.fs()); + let tree = ArtifactTree::initialize(sqlite_state); + let (daemon_dispatcher_events, daemon_dispatcher_sink) = + buck2_events::create_source_sink_pair(); + let daemon_dispatcher = EventDispatcher::new(TraceId::null(), daemon_dispatcher_sink); + + let (command_sender, command_receiver) = channel(); ( DeferredMaterializerCommandProcessor { - io: Arc::new(StubIoHandler::new(materialization_config)), - sqlite_db: None, + io, + sqlite_db: Some(db), rt: Handle::current(), defer_write_actions: true, log_buffer: LogBuffer::new(1), version_tracker: VersionTracker::new(), - command_sender, - tree: ArtifactTree::new(), + command_sender: command_sender.dupe(), + tree, subscriptions: MaterializerSubscriptions::new(), ttl_refresh_history: Default::default(), ttl_refresh_instance: Default::default(), cancellations: CancellationContext::testing(), stats: Arc::new(DeferredMaterializerStats::default()), access_times_buffer: Default::default(), + verbose_materializer_log: true, + daemon_dispatcher, }, + command_sender, command_receiver, + daemon_dispatcher_events, + ) + } + + fn make_processor( + materialization_config: HashMap, + ) -> ( + DeferredMaterializerCommandProcessor, + MaterializerReceiver, + ) { + let (dm, _, receiver, _) = make_processor_for_io(Arc::new( + StubIoHandler::new(temp_root()).with_materialization_config(materialization_config), + )); + (dm, receiver) + } + + async fn make_materializer( + io: Arc, + clean_stale_config: Option, + ) -> ( + DeferredMaterializerAccessor, + SubscriptionHandle, + ChannelEventSource, + ) { + let (mut processor, command_sender, command_receiver, daemon_dispatcher_events) = + make_processor_for_io(io.dupe()); + + let handle = { + let (sender, recv) = oneshot::channel(); + MaterializerSubscriptionOperation::Create { sender }.execute(&mut processor); + recv.await.unwrap() + }; + + let command_thread = thread_spawn("buck2-dm", { + move || { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + + rt.block_on(processor.run( + command_receiver, + TtlRefreshConfiguration { + frequency: std::time::Duration::default(), + min_ttl: chrono::Duration::zero(), + enabled: false, + }, + 0, + AccessTimesUpdates::Disabled, + clean_stale_config, + )); + } + }) + .context("Cannot start materializer thread") + .unwrap(); + + ( + DeferredMaterializerAccessor { + command_thread: Some(command_thread), + command_sender, + materialize_final_artifacts: true, + defer_write_actions: true, + io, + materializer_state_info: buck2_data::MaterializerStateInfo { + num_entries_from_sqlite: 0, + }, + stats: Arc::new(DeferredMaterializerStats::default()), + verbose_materializer_log: true, + }, + handle, + daemon_dispatcher_events, ) } #[tokio::test] async fn test_declare_reuse() -> anyhow::Result<()> { - let (mut dm, _) = make_processor(Default::default()); - let digest_config = dm.io.digest_config(); + ignore_stack_overflow_checks_for_future(async { + let (mut dm, _) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); - let path = make_path("foo/bar"); - let value = ArtifactValue::file(digest_config.empty_file()); + let path = make_path("foo/bar"); + let value = ArtifactValue::file(digest_config.empty_file()); - dm.declare( - &path, - value.dupe(), - Box::new(ArtifactMaterializationMethod::Test), - ); - assert_eq!(dm.io.take_log(), &[(Op::Clean, path.clone())]); - - let res = dm - .materialize_artifact(&path, EventDispatcher::null()) - .context("Expected a future")? - .await; - assert_eq!(dm.io.take_log(), &[(Op::Materialize, path.clone())]); - - dm.materialization_finished(path.clone(), Utc::now(), dm.version_tracker.current(), res); - assert_eq!(dm.io.take_log(), &[]); - - // When redeclaring the same artifact nothing happens. - dm.declare( - &path, - value.dupe(), - Box::new(ArtifactMaterializationMethod::Test), - ); - assert_eq!(dm.io.take_log(), &[]); - - // When declaring the same artifact but under it, we clean it and it's a new artifact. - let path2 = make_path("foo/bar/baz"); - dm.declare( - &path2, - value.dupe(), - Box::new(ArtifactMaterializationMethod::Test), - ); - assert_eq!(dm.io.take_log(), &[(Op::Clean, path2.clone())]); + dm.declare( + &path, + value.dupe(), + Box::new(ArtifactMaterializationMethod::Test), + ); + assert_eq!(dm.io.take_log(), &[(Op::Clean, path.clone())]); + + let res = dm + .materialize_artifact(&path, EventDispatcher::null()) + .context("Expected a future")? + .await; + assert_eq!(dm.io.take_log(), &[(Op::Materialize, path.clone())]); + + dm.materialization_finished( + path.clone(), + Utc::now(), + dm.version_tracker.current(), + res, + ); + assert_eq!(dm.io.take_log(), &[]); - let _ignore = dm - .materialize_artifact(&path2, EventDispatcher::null()) - .context("Expected a future")? - .await; - assert_eq!(dm.io.take_log(), &[(Op::Materialize, path2.clone())]); + // When redeclaring the same artifact nothing happens. + dm.declare( + &path, + value.dupe(), + Box::new(ArtifactMaterializationMethod::Test), + ); + assert_eq!(dm.io.take_log(), &[]); + + // When declaring the same artifact but under it, we clean it and it's a new artifact. + let path2 = make_path("foo/bar/baz"); + dm.declare( + &path2, + value.dupe(), + Box::new(ArtifactMaterializationMethod::Test), + ); + assert_eq!(dm.io.take_log(), &[(Op::Clean, path2.clone())]); - Ok(()) + let _ignore = dm + .materialize_artifact(&path2, EventDispatcher::null()) + .context("Expected a future")? + .await; + assert_eq!(dm.io.take_log(), &[(Op::Materialize, path2.clone())]); + + Ok(()) + }) + .await } fn make_artifact_value_with_symlink_dep( @@ -356,133 +610,139 @@ mod state_machine { #[tokio::test] async fn test_materialize_symlink_and_target() -> anyhow::Result<()> { - // Construct a tree with a symlink and its target, materialize both at once - let symlink_path = make_path("foo/bar_symlink"); - let target_path = make_path("foo/bar_target"); - let target_from_symlink = RelativePathBuf::from_path(Path::new("bar_target"))?; - - let mut materialization_config = HashMap::new(); - // Materialize the symlink target slowly so that we actually hit the logic point where we - // await for symlink targets and the entry materialization - materialization_config.insert(target_path.clone(), TokioDuration::from_millis(100)); - - let (mut dm, _) = make_processor(materialization_config); - let digest_config = dm.io.digest_config(); - - // Declare symlink target - dm.declare( - &target_path, - ArtifactValue::file(digest_config.empty_file()), - Box::new(ArtifactMaterializationMethod::Test), - ); - assert_eq!(dm.io.take_log(), &[(Op::Clean, target_path.clone())]); - - // Declare symlink - let symlink_value = make_artifact_value_with_symlink_dep( - &target_path, - &target_from_symlink, - digest_config, - )?; - dm.declare( - &symlink_path, - symlink_value, - Box::new(ArtifactMaterializationMethod::Test), - ); - assert_eq!(dm.io.take_log(), &[(Op::Clean, symlink_path.clone())]); - - dm.materialize_artifact(&symlink_path, EventDispatcher::null()) - .context("Expected a future")? - .await - .map_err(|_| anyhow::anyhow!("error materializing"))?; - - let logs = dm.io.take_log(); - if cfg!(unix) { - assert_eq!( - logs, - &[ - (Op::Materialize, symlink_path.clone()), - (Op::Materialize, target_path.clone()) - ] + ignore_stack_overflow_checks_for_future(async { + // Construct a tree with a symlink and its target, materialize both at once + let symlink_path = make_path("foo/bar_symlink"); + let target_path = make_path("foo/bar_target"); + let target_from_symlink = RelativePathBuf::from_path(Path::new("bar_target"))?; + + let mut materialization_config = HashMap::new(); + // Materialize the symlink target slowly so that we actually hit the logic point where we + // await for symlink targets and the entry materialization + materialization_config.insert(target_path.clone(), TokioDuration::from_millis(100)); + + let (mut dm, _) = make_processor(materialization_config); + let digest_config = dm.io.digest_config(); + + // Declare symlink target + dm.declare( + &target_path, + ArtifactValue::file(digest_config.empty_file()), + Box::new(ArtifactMaterializationMethod::Test), ); - } else { - assert_eq!( - logs, - &[ - (Op::Materialize, target_path.clone()), - (Op::Materialize, symlink_path.clone()) - ] + assert_eq!(dm.io.take_log(), &[(Op::Clean, target_path.clone())]); + + // Declare symlink + let symlink_value = make_artifact_value_with_symlink_dep( + &target_path, + &target_from_symlink, + digest_config, + )?; + dm.declare( + &symlink_path, + symlink_value, + Box::new(ArtifactMaterializationMethod::Test), ); - } - Ok(()) + assert_eq!(dm.io.take_log(), &[(Op::Clean, symlink_path.clone())]); + + dm.materialize_artifact(&symlink_path, EventDispatcher::null()) + .context("Expected a future")? + .await + .map_err(|_| anyhow::anyhow!("error materializing"))?; + + let logs = dm.io.take_log(); + if cfg!(unix) { + assert_eq!( + logs, + &[ + (Op::Materialize, symlink_path.clone()), + (Op::Materialize, target_path.clone()) + ] + ); + } else { + assert_eq!( + logs, + &[ + (Op::Materialize, target_path.clone()), + (Op::Materialize, symlink_path.clone()) + ] + ); + } + Ok(()) + }) + .await } #[tokio::test] async fn test_materialize_symlink_first_then_target() -> anyhow::Result<()> { - // Materialize a symlink, then materialize the target. Test that we still - // materialize deps if the main artifact has already been materialized. - let symlink_path = make_path("foo/bar_symlink"); - let target_path = make_path("foo/bar_target"); - let target_from_symlink = RelativePathBuf::from_path(Path::new("bar_target"))?; - - let mut materialization_config = HashMap::new(); - // Materialize the symlink target slowly so that we actually hit the logic point where we - // await for symlink targets and the entry materialization - materialization_config.insert(target_path.clone(), TokioDuration::from_millis(100)); - - let (mut dm, _) = make_processor(materialization_config); - let digest_config = dm.io.digest_config(); - - // Declare symlink - let symlink_value = make_artifact_value_with_symlink_dep( - &target_path, - &target_from_symlink, - digest_config, - )?; - dm.declare( - &symlink_path, - symlink_value, - Box::new(ArtifactMaterializationMethod::Test), - ); - assert_eq!(dm.io.take_log(), &[(Op::Clean, symlink_path.clone())]); - - // Materialize the symlink, at this point the target is not in the tree so it's ignored - let res = dm - .materialize_artifact(&symlink_path, EventDispatcher::null()) - .context("Expected a future")? - .await; - - let logs = dm.io.take_log(); - assert_eq!(logs, &[(Op::Materialize, symlink_path.clone())]); - - // Mark the symlink as materialized - dm.materialization_finished( - symlink_path.clone(), - Utc::now(), - dm.version_tracker.current(), - res, - ); - assert_eq!(dm.io.take_log(), &[]); - - // Declare symlink target - dm.declare( - &target_path, - ArtifactValue::file(digest_config.empty_file()), - Box::new(ArtifactMaterializationMethod::Test), - ); - assert_eq!(dm.io.take_log(), &[(Op::Clean, target_path.clone())]); - - // Materialize the symlink again. - // This time, we don't re-materialize the symlink as that's already been done. - // But we still materialize the target as that has not been materialized yet. - dm.materialize_artifact(&symlink_path, EventDispatcher::null()) - .context("Expected a future")? - .await - .map_err(|_| anyhow::anyhow!("error materializing"))?; - - let logs = dm.io.take_log(); - assert_eq!(logs, &[(Op::Materialize, target_path.clone())]); + ignore_stack_overflow_checks_for_future(async { + // Materialize a symlink, then materialize the target. Test that we still + // materialize deps if the main artifact has already been materialized. + let symlink_path = make_path("foo/bar_symlink"); + let target_path = make_path("foo/bar_target"); + let target_from_symlink = RelativePathBuf::from_path(Path::new("bar_target"))?; + + let mut materialization_config = HashMap::new(); + // Materialize the symlink target slowly so that we actually hit the logic point where we + // await for symlink targets and the entry materialization + materialization_config.insert(target_path.clone(), TokioDuration::from_millis(100)); + + let (mut dm, _) = make_processor(materialization_config); + let digest_config = dm.io.digest_config(); + + // Declare symlink + let symlink_value = make_artifact_value_with_symlink_dep( + &target_path, + &target_from_symlink, + digest_config, + )?; + dm.declare( + &symlink_path, + symlink_value, + Box::new(ArtifactMaterializationMethod::Test), + ); + assert_eq!(dm.io.take_log(), &[(Op::Clean, symlink_path.clone())]); + + // Materialize the symlink, at this point the target is not in the tree so it's ignored + let res = dm + .materialize_artifact(&symlink_path, EventDispatcher::null()) + .context("Expected a future")? + .await; + + let logs = dm.io.take_log(); + assert_eq!(logs, &[(Op::Materialize, symlink_path.clone())]); + + // Mark the symlink as materialized + dm.materialization_finished( + symlink_path.clone(), + Utc::now(), + dm.version_tracker.current(), + res, + ); + assert_eq!(dm.io.take_log(), &[]); - Ok(()) + // Declare symlink target + dm.declare( + &target_path, + ArtifactValue::file(digest_config.empty_file()), + Box::new(ArtifactMaterializationMethod::Test), + ); + assert_eq!(dm.io.take_log(), &[(Op::Clean, target_path.clone())]); + + // Materialize the symlink again. + // This time, we don't re-materialize the symlink as that's already been done. + // But we still materialize the target as that has not been materialized yet. + dm.materialize_artifact(&symlink_path, EventDispatcher::null()) + .context("Expected a future")? + .await + .map_err(|_| anyhow::anyhow!("error materializing"))?; + + let logs = dm.io.take_log(); + assert_eq!(logs, &[(Op::Materialize, target_path.clone())]); + + Ok(()) + }) + .await } #[tokio::test] @@ -508,168 +768,567 @@ mod state_machine { #[tokio::test] async fn test_subscription_notifications() { - let (mut dm, mut channel) = make_processor(Default::default()); - let digest_config = dm.io.digest_config(); - let value = ArtifactValue::file(digest_config.empty_file()); - - let mut handle = { - let (sender, recv) = oneshot::channel(); - MaterializerSubscriptionOperation::Create { sender }.execute(&mut dm); - recv.await.unwrap() - }; + ignore_stack_overflow_checks_for_future(async { + let (mut dm, mut channel) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); + let value = ArtifactValue::file(digest_config.empty_file()); + + let mut handle = { + let (sender, recv) = oneshot::channel(); + MaterializerSubscriptionOperation::Create { sender }.execute(&mut dm); + recv.await.unwrap() + }; + + let foo_bar = make_path("foo/bar"); + let foo_bar_baz = make_path("foo/bar/baz"); + let bar = make_path("bar"); + let qux = make_path("qux"); + + dm.declare_existing(&foo_bar, value.dupe()); + + handle.subscribe_to_paths(vec![foo_bar_baz.clone(), bar.clone()]); + while let Ok(cmd) = channel.high_priority.try_recv() { + dm.process_one_command(cmd); + } - let foo_bar = make_path("foo/bar"); - let foo_bar_baz = make_path("foo/bar/baz"); - let bar = make_path("bar"); - let qux = make_path("qux"); + dm.declare_existing(&bar, value.dupe()); + dm.declare_existing(&foo_bar_baz, value.dupe()); + dm.declare_existing(&qux, value.dupe()); - dm.declare_existing(&foo_bar, value.dupe()); + let mut paths = Vec::new(); + while let Ok(path) = handle.receiver().try_recv() { + paths.push(path); + } - handle.subscribe_to_paths(vec![foo_bar_baz.clone(), bar.clone()]); - while let Ok(cmd) = channel.high_priority.try_recv() { - dm.process_one_command(cmd); - } + assert_eq!(paths, vec![foo_bar_baz.clone(), bar, foo_bar_baz]); + }) + .await + } - dm.declare_existing(&bar, value.dupe()); - dm.declare_existing(&foo_bar_baz, value.dupe()); - dm.declare_existing(&qux, value.dupe()); + #[tokio::test] + async fn test_subscription_subscribe_also_materializes() -> anyhow::Result<()> { + ignore_stack_overflow_checks_for_future(async { + let (mut dm, mut channel) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); + let value = ArtifactValue::file(digest_config.empty_file()); + + let mut handle = { + let (sender, recv) = oneshot::channel(); + MaterializerSubscriptionOperation::Create { sender }.execute(&mut dm); + recv.await.unwrap() + }; + + let foo_bar = make_path("foo/bar"); + + dm.declare( + &foo_bar, + value.dupe(), + Box::new(ArtifactMaterializationMethod::Test), + ); - let mut paths = Vec::new(); - while let Ok(path) = handle.receiver().try_recv() { - paths.push(path); - } + handle.subscribe_to_paths(vec![foo_bar.clone()]); + while let Ok(cmd) = channel.high_priority.try_recv() { + dm.process_one_command(cmd); + } - assert_eq!(paths, vec![foo_bar_baz.clone(), bar, foo_bar_baz]); - } + // We need to yield to let the materialization task run. If we had a handle to it, we'd + // just await it, but the subscription isn't retaining those handles. + let mut log = Vec::new(); + while log.len() < 2 { + log.extend(dm.io.take_log()); + tokio::task::yield_now().await; + } - #[tokio::test] - async fn test_subscription_unsubscribe() { - let (mut dm, mut channel) = make_processor(Default::default()); - let digest_config = dm.io.digest_config(); - let value1 = ArtifactValue::file(digest_config.empty_file()); - let value2 = ArtifactValue::dir(digest_config.empty_directory()); + assert_eq!( + &log, + &[ + (Op::Clean, foo_bar.clone()), + (Op::Materialize, foo_bar.clone()) + ] + ); - let mut handle = { - let (sender, recv) = oneshot::channel(); - MaterializerSubscriptionOperation::Create { sender }.execute(&mut dm); - recv.await.unwrap() - }; + // Drain low priority commands. This should include our materialization finished message, + // at which point we'll notify the subscription handle. + while let Ok(cmd) = channel.low_priority.try_recv() { + dm.process_one_low_priority_command(cmd); + } - let path = make_path("foo/bar"); + let mut paths = Vec::new(); + while let Ok(path) = handle.receiver().try_recv() { + paths.push(path); + } + assert_eq!(paths, vec![foo_bar]); - handle.subscribe_to_paths(vec![path.clone()]); - while let Ok(cmd) = channel.high_priority.try_recv() { - dm.process_one_command(cmd); - } + Ok(()) + }) + .await + } - dm.declare_existing(&path, value1.dupe()); + #[tokio::test] + async fn test_subscription_unsubscribe() { + ignore_stack_overflow_checks_for_future(async { + let (mut dm, mut channel) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); + let value1 = ArtifactValue::file(digest_config.empty_file()); + let value2 = ArtifactValue::dir(digest_config.empty_directory()); + + let mut handle = { + let (sender, recv) = oneshot::channel(); + MaterializerSubscriptionOperation::Create { sender }.execute(&mut dm); + recv.await.unwrap() + }; + + let path = make_path("foo/bar"); + + handle.subscribe_to_paths(vec![path.clone()]); + while let Ok(cmd) = channel.high_priority.try_recv() { + dm.process_one_command(cmd); + } - handle.unsubscribe_from_paths(vec![path.clone()]); - while let Ok(cmd) = channel.high_priority.try_recv() { - dm.process_one_command(cmd); - } + dm.declare_existing(&path, value1.dupe()); - dm.declare_existing(&path, value2.dupe()); + handle.unsubscribe_from_paths(vec![path.clone()]); + while let Ok(cmd) = channel.high_priority.try_recv() { + dm.process_one_command(cmd); + } - let mut paths = Vec::new(); - while let Ok(path) = handle.receiver().try_recv() { - paths.push(path); - } + dm.sqlite_db + .as_mut() + .expect("db missing") + .materializer_state_table() + .delete(vec![path.clone()]) + .context("delete failed") + .unwrap(); + dm.declare_existing(&path, value2.dupe()); + + let mut paths = Vec::new(); + while let Ok(path) = handle.receiver().try_recv() { + paths.push(path); + } - // Expect only one notification - assert_eq!(paths, vec![path]); + // Expect only one notification + assert_eq!(paths, vec![path]); + }) + .await } #[tokio::test] async fn test_invalidate_error() -> anyhow::Result<()> { - let (mut dm, _) = make_processor(Default::default()); - let digest_config = dm.io.digest_config(); + ignore_stack_overflow_checks_for_future(async{ + let (mut dm, _) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); - let path = make_path("test/invalidate/failure"); - let value1 = ArtifactValue::file(digest_config.empty_file()); - let value2 = ArtifactValue::dir(digest_config.empty_directory()); + let path = make_path("test/invalidate/failure"); + let value1 = ArtifactValue::file(digest_config.empty_file()); + let value2 = ArtifactValue::dir(digest_config.empty_directory()); - // Start from having something. - dm.declare_existing(&path, value1); + // Start from having something. + dm.declare_existing(&path, value1); - // This will collect the existing future and invalidate, and then fail in doing so. - dm.declare(&path, value2, Box::new(ArtifactMaterializationMethod::Test)); + // This will collect the existing future and invalidate, and then fail in doing so. + dm.declare(&path, value2, Box::new(ArtifactMaterializationMethod::Test)); - // Now we check that materialization fails. This needs to wait on the previous clean. - let res = dm - .materialize_artifact(&path, EventDispatcher::null()) - .context("Expected a future")? - .await; + // Now we check that materialization fails. This needs to wait on the previous clean. + let res = dm + .materialize_artifact(&path, EventDispatcher::null()) + .context("Expected a future")? + .await; - assert_matches!( + assert_matches!( res, Err(SharedMaterializingError::Error(e)) if format!("{:#}", e).contains("Injected error") ); - // We do not actually get to materializing or cleaning. - assert_eq!(dm.io.take_log(), &[]); + // We do not actually get to materializing or cleaning. + assert_eq!(dm.io.take_log(), &[]); - Ok(()) + Ok(()) + }).await + } + + #[tokio::test] + async fn test_materialize_dep_error() -> anyhow::Result<()> { + ignore_stack_overflow_checks_for_future(async { + // Construct a tree with a symlink and its target, materialize both at once + let symlink_path = make_path("foo/bar_symlink"); + let target_path = make_path("foo/bar_target"); + let target_from_symlink = RelativePathBuf::from_path(Path::new("bar_target"))?; + + let (mut dm, mut channel) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); + + let target_value = ArtifactValue::file(digest_config.empty_file()); + let symlink_value = make_artifact_value_with_symlink_dep( + &target_path, + &target_from_symlink, + digest_config, + )?; + // Declare and materialize symlink and target + dm.declare( + &target_path, + target_value.clone(), + Box::new(ArtifactMaterializationMethod::Test), + ); + dm.declare( + &symlink_path, + symlink_value.clone(), + Box::new(ArtifactMaterializationMethod::Test), + ); + dm.materialize_artifact(&symlink_path, EventDispatcher::null()) + .context("Expected a future")? + .await + .map_err(|err| anyhow::anyhow!("error materializing {:?}", err))?; + assert_eq!( + dm.io.take_log(), + &[ + (Op::Clean, target_path.clone()), + (Op::Clean, symlink_path.clone()), + (Op::Materialize, target_path.clone()), + (Op::Materialize, symlink_path.clone()), + ] + ); + + // Process materialization_finished, change symlink stage to materialized + while let Ok(cmd) = channel.low_priority.try_recv() { + dm.process_one_low_priority_command(cmd); + } + + // Change symlink target value and re-declare + let content = b"not empty"; + let meta = FileMetadata { + digest: TrackedFileDigest::from_content(content, digest_config.cas_digest_config()), + is_executable: false, + }; + let target_value = ArtifactValue::file(meta); + dm.declare( + &target_path, + target_value, + Box::new(ArtifactMaterializationMethod::Test), + ); + assert_eq!(dm.io.take_log(), &[(Op::Clean, target_path.clone())]); + + // Request to materialize symlink, fail to materialize target + dm.io.set_fail_on(vec![target_path.clone()]); + let res = dm + .materialize_artifact(&symlink_path, EventDispatcher::null()) + .context("Expected a future")? + .await; + assert_matches!( + res, + Err(SharedMaterializingError::Error(e)) if format!("{:#}", e).contains("Injected error") + ); + assert_eq!( + dm.io.take_log(), + &[(Op::MaterializeError, target_path.clone())] + ); + // Process materialization_finished, _only_ target is cleaned, not symlink + while let Ok(cmd) = channel.low_priority.try_recv() { + dm.process_one_low_priority_command(cmd); + } + assert_eq!(dm.io.take_log(), &[(Op::Clean, target_path.clone())]); + + // Request symlink again, target is materialized and symlink materialization succeeds + dm.io.set_fail_on(vec![]); + dm.materialize_artifact(&symlink_path, EventDispatcher::null()) + .context("Expected a future")? + .await + .map_err(|err| anyhow::anyhow!("error materializing 2 {:?}", err))?; + assert_eq!(dm.io.take_log(), &[(Op::Materialize, target_path.clone()), ]); + Ok(()) + }).await } #[tokio::test] async fn test_retry() -> anyhow::Result<()> { - let (mut dm, mut channel) = make_processor(Default::default()); - let digest_config = dm.io.digest_config(); + ignore_stack_overflow_checks_for_future(async { + let (mut dm, mut channel) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); - let path = make_path("test"); - let value1 = ArtifactValue::file(digest_config.empty_file()); + let path = make_path("test"); + let value1 = ArtifactValue::file(digest_config.empty_file()); - // Declare a value. - dm.declare(&path, value1, Box::new(ArtifactMaterializationMethod::Test)); + // Declare a value. + dm.declare(&path, value1, Box::new(ArtifactMaterializationMethod::Test)); - // Make materializations fail - dm.io.set_fail(true); + // Make materializations fail + dm.io.set_fail(true); - // Materializing it fails. - let res = dm - .materialize_artifact(&path, EventDispatcher::null()) - .context("Expected a future")? - .await; + // Materializing it fails. + let res = dm + .materialize_artifact(&path, EventDispatcher::null()) + .context("Expected a future")? + .await; - assert_matches!( - res, - Err(SharedMaterializingError::Error(e)) if format!("{:#}", e).contains("Injected error") - ); + assert_matches!( + res, + Err(SharedMaterializingError::Error(e)) if format!("{:#}", e).contains("Injected error") + ); - // Unset fail, but we haven't processed materialization_finished yet so this does nothing. - dm.io.set_fail(false); + // Unset fail, but we haven't processed materialization_finished yet so this does nothing. + dm.io.set_fail(false); - // Rejoining the existing future fails. - let res = dm - .materialize_artifact(&path, EventDispatcher::null()) - .context("Expected a future")? - .await; + // Rejoining the existing future fails. + let res = dm + .materialize_artifact(&path, EventDispatcher::null()) + .context("Expected a future")? + .await; - assert_matches!( - res, - Err(SharedMaterializingError::Error(e)) if format!("{:#}", e).contains("Injected error") - ); + assert_matches!( + res, + Err(SharedMaterializingError::Error(e)) if format!("{:#}", e).contains("Injected error") + ); - // Now process cleanup_finished_vacant and materialization_finished. - let mut processed = 0; + // Now process cleanup_finished_vacant and materialization_finished. + let mut processed = 0; - while let Ok(cmd) = channel.low_priority.try_recv() { - eprintln!("got cmd = {:?}", cmd); - dm.process_one_low_priority_command(cmd); - processed += 1; - } + while let Ok(cmd) = channel.low_priority.try_recv() { + eprintln!("got cmd = {:?}", cmd); + dm.process_one_low_priority_command(cmd); + processed += 1; + } - assert_eq!(processed, 2); + assert_eq!(processed, 2); - // Materializing works now: - let res = dm - .materialize_artifact(&path, EventDispatcher::null()) - .context("Expected a future")? - .await; + // Materializing works now: + let res = dm + .materialize_artifact(&path, EventDispatcher::null()) + .context("Expected a future")? + .await; - assert_matches!(res, Ok(())); + assert_matches!(res, Ok(())); - Ok(()) + Ok(()) + }).await + } + + #[tokio::test] + async fn test_clean_stale() -> anyhow::Result<()> { + ignore_stack_overflow_checks_for_future(async { + let path = make_path("buck-out/v2/gen/foo/bar"); + let project_root = temp_root(); + let io = Arc::new(StubIoHandler::new(project_root.clone())); + let (dm, mut handle, _) = make_materializer(io.dupe(), None).await; + materialize_write(&path, b"contents", &mut handle, &dm).await?; + // Drop dm and flush sqlite connection. + dm.abort(); + // Create new materializer from db state so that artifacts are not active + let (dm, _, _) = make_materializer(io, None).await; + + let res = dm + .clean_stale_artifacts(DateTime::::MAX_UTC, false, false) + .await?; + + let &buck2_data::CleanStaleStats { + stale_artifact_count, + stale_bytes, + cleaned_artifact_count, + cleaned_bytes, + .. + } = res + .stats + .as_ref() + .unwrap_or_else(|| panic!("{}", res.message.unwrap())); + assert_eq!( + ( + stale_artifact_count, + stale_bytes, + cleaned_artifact_count, + cleaned_bytes + ), + (1, 8, 1, 8) + ); + Ok(()) + }) + .await + } + + #[tokio::test] + async fn test_clean_stale_interrupt() -> anyhow::Result<()> { + ignore_stack_overflow_checks_for_future(async { + let path = make_path("buck-out/v2/gen/foo/bar"); + let project_root = temp_root(); + let io = Arc::new(StubIoHandler::new(project_root.clone())); + let (dm, mut handle, _) = make_materializer(io.dupe(), None).await; + materialize_write(&path, b"contents", &mut handle, &dm).await?; + + let read_dir_barriers = + Arc::new((std::sync::Barrier::new(2), std::sync::Barrier::new(2))); + let io = Arc::new( + StubIoHandler::new(project_root.dupe()) + .with_read_dir_barriers(read_dir_barriers.dupe()), + ); + let (dm, _, _) = make_materializer(io, None).await; + + // Interrupt while scanning buck-out + let dm = Arc::new(dm); + let dm_dup = dm.dupe(); + let fut = dm_dup.clean_stale_artifacts(DateTime::::MAX_UTC, false, false); + thread::spawn(move || { + // Wait until a read_dir request is about to execute + read_dir_barriers.0.wait(); + // Sending a high_priority command will interrupt the processor + let noop_command = MaterializerCommand::DeclareExisting(vec![], None, None); + let _unused = dm.command_sender.send(noop_command); + // Wait after sending so that a second request doesn't start + read_dir_barriers.1.wait(); + }); + let res = fut.await?; + let &buck2_data::CleanStaleStats { + stale_artifact_count, + stale_bytes, + cleaned_artifact_count, + cleaned_bytes, + .. + } = res.stats.as_ref().unwrap(); + assert_eq!( + ( + stale_artifact_count, + stale_bytes, + cleaned_artifact_count, + cleaned_bytes + ), + (0, 0, 0, 0) + ); + + let clean_barriers = Arc::new((Barrier::new(2), Barrier::new(2))); + let io = Arc::new( + StubIoHandler::new(project_root.dupe()).with_clean_barriers(clean_barriers.dupe()), + ); + let (dm, _, _) = make_materializer(io, None).await; + + // Interrupt while deleting files + let dm = Arc::new(dm); + let dm_dup = dm.dupe(); + let fut = dm_dup.clean_stale_artifacts(DateTime::::MAX_UTC, false, false); + thread::spawn(move || { + // Wait until a single clean request is about to execute + clean_barriers.0.wait(); + // Sending a high_priority command will drop the clean guard immediately (from this thread) + let noop_command = MaterializerCommand::DeclareExisting(vec![], None, None); + let _unused = dm.command_sender.send(noop_command); + // Wait after sending, executing clean request will complete but a second request doesn't start because + // the single io thread is blocked + clean_barriers.1.wait(); + }); + let res = fut.await?; + let &buck2_data::CleanStaleStats { + stale_artifact_count, + stale_bytes, + cleaned_artifact_count, + cleaned_bytes, + .. + } = res.stats.as_ref().unwrap(); + assert_eq!( + ( + stale_artifact_count, + stale_bytes, + cleaned_artifact_count, + cleaned_bytes + ), + (1, 8, 0, 0) + ); + + Ok(()) + }) + .await } + + #[tokio::test] + async fn test_clean_stale_schedule() -> anyhow::Result<()> { + ignore_stack_overflow_checks_for_future(async { + let path = make_path("buck-out/v2/gen/foo/bar"); + let project_root = temp_root(); + // dry run because it's easier and since this is only testing that cleans are triggered by the materializer + let clean_stale_config = CleanStaleConfig { + clean_period: std::time::Duration::from_secs(1), + artifact_ttl: std::time::Duration::from_secs(0), + start_offset: std::time::Duration::from_secs(0), + dry_run: true, + }; + let io = Arc::new(StubIoHandler::new(project_root.dupe())); + let (dm, mut handle, mut daemon_dispatcher_events) = + make_materializer(io.dupe(), Some(clean_stale_config)).await; + materialize_write(&path, b"contents", &mut handle, &dm).await?; + + let receive_clean_result = |events: &mut ChannelEventSource| { + let event = events.receive().unwrap(); + match event.unpack_buck().unwrap().data() { + buck2_data::buck_event::Data::Instant(instant) => match instant.data.as_ref() { + Some(buck2_data::instant_event::Data::CleanStaleResult(res)) => { + Some(res.clone()) + } + _ => None, + }, + _ => None, + } + .unwrap() + }; + // The first clean stale request is scheduled at roughly the same time as materialize_write so we may receive an initial clean event + // before anything is materialized, if so ignore events until an artifact is found (retained != 0). + // It should only be neccesary to wait for a single clean (1 second) but wait for up to 5 just in case. + let mut i = 0; + while i < 5 { + let res = receive_clean_result(&mut daemon_dispatcher_events); + let stats = res.stats.unwrap(); + if let buck2_data::CleanStaleStats { + retained_artifact_count: 0, + .. + } = stats + { + i += 1; + } else { + break; + } + } + let res = receive_clean_result(&mut daemon_dispatcher_events); + let buck2_data::CleanStaleStats { + retained_artifact_count, + .. + } = res.stats.unwrap(); + assert_eq!(retained_artifact_count, 1); + // check it's scheduled more than once + let res = receive_clean_result(&mut daemon_dispatcher_events); + let buck2_data::CleanStaleStats { + retained_artifact_count, + .. + } = res.stats.unwrap(); + assert_eq!(retained_artifact_count, 1); + Ok(()) + }) + .await + } + + #[tokio::test] + async fn test_has_artifact_at() -> anyhow::Result<()> { + ignore_stack_overflow_checks_for_future(async { + let (mut dm, _) = make_processor(Default::default()); + let digest_config = dm.io.digest_config(); + + let path = make_path("test/dir/path"); + let value1 = ArtifactValue::dir(digest_config.empty_directory()); + dm.declare_existing(&path, value1); + + assert!(dm.has_artifact(path.clone())); + assert!(!dm.has_artifact(path.join(ForwardRelativePath::new("foo").unwrap()))); + assert!(!dm.has_artifact(path.parent().unwrap().to_owned())); + + dm.materialize_artifact(&path, EventDispatcher::null()); + assert!(dm.has_artifact(path.clone())); + assert!(!dm.has_artifact(path.join(ForwardRelativePath::new("foo").unwrap()))); + assert!(!dm.has_artifact(path.parent().unwrap().to_owned())); + + Ok(()) + }) + .await + } +} + +#[test] +fn test_materialize_stack_display() { + let s = MaterializeStack::Empty; + assert_eq!("(empty)", s.to_string()); + let s = MaterializeStack::Child(&s, ProjectRelativePath::new("foo").unwrap()); + assert_eq!("foo", s.to_string()); + let s = MaterializeStack::Child(&s, ProjectRelativePath::new("bar/baz").unwrap()); + assert_eq!("foo -> bar/baz", s.to_string()); } diff --git a/app/buck2_execute_impl/src/materializers/eden.rs b/app/buck2_execute_impl/src/materializers/eden.rs deleted file mode 100644 index 2ec4147699b70..0000000000000 --- a/app/buck2_execute_impl/src/materializers/eden.rs +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; - -use allocative::Allocative; -use anyhow::Context as _; -use async_trait::async_trait; -use buck2_common::file_ops::FileMetadata; -use buck2_common::file_ops::TrackedFileDigest; -use buck2_common::http::HttpClient; -use buck2_core::directory::DirectoryEntry; -use buck2_core::directory::FingerprintedDirectory; -use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_execute::artifact_value::ArtifactValue; -use buck2_execute::digest::CasDigestToReExt; -use buck2_execute::digest_config::DigestConfig; -use buck2_execute::directory::insert_artifact; -use buck2_execute::directory::ActionDirectoryBuilder; -use buck2_execute::directory::ActionDirectoryMember; -use buck2_execute::execute::blobs::ActionBlobs; -use buck2_execute::execute::blocking::BlockingExecutor; -use buck2_execute::materialize::eden_api::EdenBuckOut; -use buck2_execute::materialize::materializer::ArtifactNotMaterializedReason; -use buck2_execute::materialize::materializer::CasDownloadInfo; -use buck2_execute::materialize::materializer::CopiedArtifact; -use buck2_execute::materialize::materializer::DeclareMatchOutcome; -use buck2_execute::materialize::materializer::HttpDownloadInfo; -use buck2_execute::materialize::materializer::MaterializationError; -use buck2_execute::materialize::materializer::Materializer; -use buck2_execute::materialize::materializer::WriteRequest; -use buck2_execute::re::manager::ReConnectionManager; -use dupe::Dupe; -use futures::stream; -use futures::stream::BoxStream; -use futures::stream::StreamExt; -use gazebo::prelude::*; -use more_futures::cancellation::CancellationContext; -use remote_execution::InlinedBlobWithDigest; -use remote_execution::NamedDigest; - -use crate::materializers::immediate::ImmediateMaterializer; - -#[derive(Allocative)] -pub struct EdenMaterializer { - re_client_manager: Arc, - delegator: Arc, - eden_buck_out: EdenBuckOut, - fs: ProjectRoot, - re_use_case: RemoteExecutorUseCase, - digest_config: DigestConfig, -} - -#[async_trait] -impl Materializer for EdenMaterializer { - fn name(&self) -> &str { - "eden" - } - - async fn declare_existing( - &self, - _artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, - ) -> anyhow::Result<()> { - // Nothing to do, we don't keep track of state; - Ok(()) - } - - /// For Eden, copying could be an expensive operation when large amount of - /// file system materialization is required. Instead, uploading to CAS then - /// declare on Eden would be faster so not all tree nodes would be actually materialized. - async fn declare_copy_impl( - &self, - path: ProjectRelativePathBuf, - value: ArtifactValue, - srcs: Vec, - cancellations: &CancellationContext, - ) -> anyhow::Result<()> { - // Use eden's remove_paths_recursive because it's faster. - self.eden_buck_out - .remove_paths_recursive(&self.fs, vec![path.to_owned()], cancellations) - .await?; - - // First upload the src to CAS if missing - let mut files: Vec = Vec::new(); - let mut directories: Vec = Vec::new(); - for copied_artifact in srcs { - match copied_artifact.dest_entry { - DirectoryEntry::Leaf(ActionDirectoryMember::File(file)) => { - files.push(NamedDigest { - name: self - .fs - .resolve(&copied_artifact.src) - .as_maybe_relativized_str()? - .to_owned(), - digest: file.digest.to_re(), - ..Default::default() - }); - } - DirectoryEntry::Dir(dir) => { - directories.push(remote_execution::Path { - path: self - .fs - .resolve(&copied_artifact.src) - .as_maybe_relativized_str()? - .to_owned(), - follow_symlinks: false, - digest: Some(dir.fingerprint().to_re()), - ..Default::default() - }); - } - DirectoryEntry::Leaf(..) => continue, - }; - } - - self.re_client_manager - .get_re_connection() - .get_client() - .upload_files_and_directories(files, directories, Vec::new(), self.re_use_case) - .await?; - - // Second upload the tree structure that contains directories/file/symlink metadata - // TODO(yipu) We don't need to upload CAS, and we should pass ArtifactValue to eden directly - let mut builder = ActionDirectoryBuilder::empty(); - insert_artifact(&mut builder, path.as_ref(), &value)?; - let input_dir = builder.fingerprint(self.digest_config.as_directory_serializer()); - - self.re_client_manager - .get_re_connection() - .get_client() - .upload( - &self.fs, - &self.delegator, - &ActionBlobs::new(self.digest_config), - ProjectRelativePath::empty(), - &input_dir, - self.re_use_case, - self.digest_config, - ) - .await?; - - self.eden_buck_out - .set_path_object_id(&path, &value) - .await - .with_context(|| { - format!( - "[eden] Error declaring artifact {:?} at path {}", - value, path - ) - })?; - - Ok(()) - } - - // This method will call Eden's setPathObjectId method, which is to placehold a - // tree or a blob to a path of an Eden mount. - async fn declare_cas_many_impl<'a, 'b>( - &self, - _info: Arc, - artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, - cancellations: &CancellationContext, - ) -> anyhow::Result<()> { - // Use eden's remove_paths_recursive because it's faster. - self.eden_buck_out - .remove_paths_recursive( - &self.fs, - artifacts.map(|(p, _)| p.to_owned()), - cancellations, - ) - .await?; - - let futs = artifacts.iter().map(|(path, value)| async move { - self.eden_buck_out - .set_path_object_id(path, value) - .await - .with_context(|| { - format!( - "[eden] Error declaring artifact {:?} at path {}", - value, path - ) - }) - }); - - futures::future::try_join_all(futs).await?; - - Ok(()) - } - - async fn declare_http( - &self, - path: ProjectRelativePathBuf, - info: HttpDownloadInfo, - cancellations: &CancellationContext, - ) -> anyhow::Result<()> { - // Use eden's remove_paths_recursive because it's faster. - self.eden_buck_out - .remove_paths_recursive(&self.fs, vec![path.to_owned()], cancellations) - .await?; - - self.delegator.declare_http(path, info, cancellations).await - } - - async fn declare_match( - &self, - _artifacts: Vec<(ProjectRelativePathBuf, ArtifactValue)>, - ) -> anyhow::Result { - // This would require querying Eden at a minimum. - Ok(DeclareMatchOutcome::NotMatch) - } - - async fn declare_write<'a>( - &self, - gen: Box anyhow::Result> + Send + 'a>, - ) -> anyhow::Result> { - let (paths, values) = write_to_cas( - self.re_client_manager.as_ref(), - self.re_use_case, - self.digest_config, - gen, - ) - .await?; - - futures::future::try_join_all( - std::iter::zip(paths.iter(), values.iter()) - .map(|(path, value)| self.eden_buck_out.set_path_object_id(path, value)), - ) - .await?; - - Ok(values) - } - - async fn materialize_many( - &self, - artifact_paths: Vec, - ) -> anyhow::Result>> { - // EdenFS' thrift method ensureMaterialized will force materializing a list of provided paths - self.eden_buck_out - .ensure_materialized(artifact_paths.clone()) - .await?; - Ok(stream::iter(artifact_paths.into_iter().map(|_| Ok(()))).boxed()) - } - - async fn get_materialized_file_paths( - &self, - paths: Vec, - ) -> anyhow::Result>> { - self.delegator.get_materialized_file_paths(paths).await - } - - async fn try_materialize_final_artifact( - &self, - artifact_path: ProjectRelativePathBuf, - ) -> anyhow::Result { - self.ensure_materialized(vec![artifact_path]).await?; - Ok(true) - } - - async fn invalidate_many(&self, paths: Vec) -> anyhow::Result<()> { - self.delegator.invalidate_many(paths).await - } - - fn eden_buck_out(&self) -> Option<&EdenBuckOut> { - Some(&self.eden_buck_out) - } -} - -impl EdenMaterializer { - pub fn new( - fs: ProjectRoot, - digest_config: DigestConfig, - re_client_manager: Arc, - blocking_executor: Arc, - eden_buck_out: EdenBuckOut, - http_client: HttpClient, - ) -> anyhow::Result { - Ok(Self { - re_client_manager: re_client_manager.dupe(), - delegator: Arc::new(ImmediateMaterializer::new( - fs.dupe(), - digest_config, - re_client_manager, - blocking_executor, - http_client, - )), - eden_buck_out, - fs, - digest_config, - re_use_case: RemoteExecutorUseCase::buck2_default(), // TODO (yipu): Should this be configurable? - }) - } -} - -async fn write_to_cas<'a>( - re: &ReConnectionManager, - re_use_case: RemoteExecutorUseCase, - digest_config: DigestConfig, - gen: Box anyhow::Result> + Send + 'a>, -) -> anyhow::Result<(Vec, Vec)> { - let contents = gen()?; - - let mut uploads = Vec::with_capacity(contents.len()); - let mut paths = Vec::with_capacity(contents.len()); - let mut values = Vec::with_capacity(contents.len()); - - for WriteRequest { - path, - content, - is_executable, - } in contents - { - let digest = TrackedFileDigest::from_content(&content, digest_config.cas_digest_config()); - - let meta = FileMetadata { - digest, - is_executable, - }; - - uploads.push(InlinedBlobWithDigest { - blob: content, - digest: meta.digest.to_re(), - ..Default::default() - }); - paths.push(path); - values.push(ArtifactValue::file(meta)); - } - - re.get_re_connection() - .get_client() - .upload_files_and_directories(Vec::new(), Vec::new(), uploads, re_use_case) - .await?; - - Ok((paths, values)) -} diff --git a/app/buck2_execute_impl/src/materializers/immediate.rs b/app/buck2_execute_impl/src/materializers/immediate.rs index b4e55a3807d5b..3981980b67a12 100644 --- a/app/buck2_execute_impl/src/materializers/immediate.rs +++ b/app/buck2_execute_impl/src/materializers/immediate.rs @@ -14,11 +14,13 @@ use anyhow::Context; use async_trait::async_trait; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; -use buck2_common::http::HttpClient; -use buck2_core::directory::unordered_entry_walk; -use buck2_core::directory::DirectoryEntry; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::directory_iterator::DirectoryIteratorPathStack; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::walk::unordered_entry_walk; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest::CasDigestToReExt; use buck2_execute::digest_config::DigestConfig; @@ -36,12 +38,13 @@ use buck2_execute::materialize::materializer::MaterializationError; use buck2_execute::materialize::materializer::Materializer; use buck2_execute::materialize::materializer::WriteRequest; use buck2_execute::re::manager::ReConnectionManager; +use buck2_futures::cancellation::CancellationContext; +use buck2_http::HttpClient; use dupe::Dupe; use futures::stream; use futures::stream::BoxStream; use futures::stream::StreamExt; use gazebo::prelude::*; -use more_futures::cancellation::CancellationContext; use remote_execution::NamedDigest; use remote_execution::NamedDigestWithPermissions; @@ -193,6 +196,11 @@ impl Materializer for ImmediateMaterializer { Ok(DeclareMatchOutcome::NotMatch) } + async fn has_artifact_at(&self, _path: ProjectRelativePathBuf) -> anyhow::Result { + // This materializer does not keep track of state + Ok(false) + } + async fn declare_write<'a>( &self, gen: Box anyhow::Result> + Send + 'a>, @@ -298,14 +306,14 @@ pub async fn cas_download<'a, 'b>( let mut files = Vec::new(); for (path, value) in artifacts.iter() { - let mut walk = unordered_entry_walk(value.entry().as_ref()); + let mut walk = unordered_entry_walk(value.entry().as_ref().map_dir(Directory::as_ref)); while let Some((entry_path, entry)) = walk.next() { if let DirectoryEntry::Leaf(ActionDirectoryMember::File(m)) = entry { files.push(NamedDigestWithPermissions { named_digest: NamedDigest { digest: m.digest.to_re(), name: fs - .resolve(&path.join_normalized(entry_path.get())?) + .resolve(&path.join(entry_path.get())) .as_maybe_relativized_str()? .to_owned(), ..Default::default() diff --git a/app/buck2_execute_impl/src/materializers/io.rs b/app/buck2_execute_impl/src/materializers/io.rs index ba9ed0060b16e..acbdf9779ee98 100644 --- a/app/buck2_execute_impl/src/materializers/io.rs +++ b/app/buck2_execute_impl/src/materializers/io.rs @@ -9,15 +9,17 @@ use std::collections::HashMap; -use buck2_core::directory::DirectoryEntry; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::entry::DirectoryEntry; use buck2_execute::directory::ActionDirectory; use buck2_execute::directory::ActionDirectoryEntry; use buck2_execute::directory::ActionDirectoryMember; +use buck2_execute::directory::ActionDirectoryRef; use buck2_execute::directory::ActionSharedDirectory; use buck2_execute::execute::blocking::IoRequest; @@ -58,7 +60,12 @@ where fs_util::create_dir_all(parent)?; } } - materialize_recursively(entry, &mut dest, materialize_dirs_and_syms, &mut file_src) + materialize_recursively( + entry.map_dir(|d| Directory::as_ref(d)), + &mut dest, + materialize_dirs_and_syms, + &mut file_src, + ) } /// Materializes the directories and symlinks of an entry at `dest`. Files @@ -121,15 +128,15 @@ where materialize(entry, dest.as_ref(), false, file_src) } -fn materialize_recursively( - entry: DirectoryEntry<&D, &ActionDirectoryMember>, +fn materialize_recursively<'a, F, D>( + entry: DirectoryEntry, dest: &mut AbsNormPathBuf, materialize_dirs_and_syms: bool, file_src: &mut F, ) -> anyhow::Result<()> where F: FnMut(&AbsNormPath) -> Option, - D: ActionDirectory + ?Sized, + D: ActionDirectoryRef<'a>, { match entry { DirectoryEntry::Dir(d) => { diff --git a/app/buck2_execute_impl/src/materializers/mod.rs b/app/buck2_execute_impl/src/materializers/mod.rs deleted file mode 100644 index 956760b0a0375..0000000000000 --- a/app/buck2_execute_impl/src/materializers/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#[cfg(any(fbcode_build, cargo_internal_build))] -pub mod eden; - -pub mod deferred; -pub mod immediate; -pub mod io; -pub mod sqlite; diff --git a/app/buck2_execute_impl/src/materializers/sqlite.rs b/app/buck2_execute_impl/src/materializers/sqlite.rs index 404a78175c516..f5e8ff011e845 100644 --- a/app/buck2_execute_impl/src/materializers/sqlite.rs +++ b/app/buck2_execute_impl/src/materializers/sqlite.rs @@ -17,13 +17,15 @@ use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; use buck2_common::sqlite::KeyValueSqliteTable; -use buck2_core::directory::DirectoryEntry; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::entry::DirectoryEntry; use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::ActionDirectoryMember; use buck2_execute::directory::Symlink; @@ -39,7 +41,6 @@ use itertools::Itertools; use once_cell::sync::Lazy; use parking_lot::Mutex; use rusqlite::Connection; -use thiserror::Error; use crate::materializers::deferred::ArtifactMetadata; use crate::materializers::deferred::DirectoryMetadata; @@ -59,7 +60,7 @@ const IDENTITY_KEY: &str = "timestamp_on_initialization"; pub type MaterializerState = Vec<(ProjectRelativePathBuf, (ArtifactMetadata, DateTime))>; -#[derive(Error, Debug, PartialEq, Eq)] +#[derive(buck2_error::Error, Debug, PartialEq, Eq)] pub(crate) enum ArtifactMetadataSqliteConversionError { #[error("Internal error: expected field `{}` to be not null for artifact type '{}'", .field, .artifact_type)] ExpectedNotNull { @@ -279,7 +280,7 @@ fn convert_artifact_metadata( }) })? .into(), - None, + ForwardRelativePathBuf::default(), )?; DirectoryEntry::Leaf(ActionDirectoryMember::ExternalSymlink(Arc::new( external_symlink, @@ -473,7 +474,7 @@ impl MaterializerStateSqliteTable { } } -#[derive(Error, Debug, PartialEq, Eq)] +#[derive(buck2_error::Error, Debug, PartialEq, Eq)] enum MaterializerStateSqliteDbError { #[error("Path {} does not exist", .0)] PathDoesNotExist(AbsNormPathBuf), @@ -704,20 +705,30 @@ impl MaterializerStateTables { } } +#[allow(unused)] // Used by test modules +pub(crate) fn testing_materializer_state_sqlite_db( + fs: &ProjectRoot, + versions: HashMap, + metadata: HashMap, + reject_identity: Option<&MaterializerStateIdentity>, +) -> anyhow::Result<(MaterializerStateSqliteDb, anyhow::Result)> { + MaterializerStateSqliteDb::initialize_impl( + fs.resolve(ProjectRelativePath::unchecked_new( + "buck-out/v2/cache/materializer_state", + )), + versions, + metadata, + DigestConfig::testing_default(), + reject_identity, + ) +} + #[cfg(test)] mod tests { - use std::collections::HashMap; use assert_matches::assert_matches; - use buck2_common::file_ops::FileMetadata; - use buck2_common::file_ops::TrackedFileDigest; - use buck2_core::directory::DirectoryEntry; - use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project::ProjectRootTemp; - use buck2_core::fs::project_rel_path::ProjectRelativePath; - use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::new_symlink; - use buck2_execute::directory::ActionDirectoryMember; use super::*; @@ -889,23 +900,6 @@ mod tests { assert_eq!(artifacts, state.into_iter().collect::>()); } - fn testing_materializer_state_sqlite_db( - fs: &ProjectRoot, - versions: HashMap, - metadata: HashMap, - reject_identity: Option<&MaterializerStateIdentity>, - ) -> anyhow::Result<(MaterializerStateSqliteDb, anyhow::Result)> { - MaterializerStateSqliteDb::initialize_impl( - fs.resolve(ProjectRelativePath::unchecked_new( - "buck-out/v2/cache/materializer_state", - )), - versions, - metadata, - DigestConfig::testing_default(), - reject_identity, - ) - } - // Only implementing for tests, actual code should use `matches_entry` (and not check total_size) impl PartialEq for DirectoryMetadata { fn eq(&self, other: &DirectoryMetadata) -> bool { diff --git a/app/buck2_execute_impl/src/re/mod.rs b/app/buck2_execute_impl/src/re.rs similarity index 100% rename from app/buck2_execute_impl/src/re/mod.rs rename to app/buck2_execute_impl/src/re.rs diff --git a/app/buck2_execute_impl/src/re/download.rs b/app/buck2_execute_impl/src/re/download.rs index 6ae62cc7e639b..c77dc72e2c156 100644 --- a/app/buck2_execute_impl/src/re/download.rs +++ b/app/buck2_execute_impl/src/re/download.rs @@ -10,23 +10,27 @@ use std::convert::Infallible; use std::ops::ControlFlow; use std::ops::FromResidual; +use std::path::Path; use std::sync::Arc; use anyhow::Context as _; use buck2_common::file_ops::FileDigest; use buck2_common::file_ops::FileMetadata; use buck2_common::file_ops::TrackedFileDigest; -use buck2_core::directory::DirectoryEntry; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::paths::RelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_events::dispatch::console_message; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest::CasDigestFromReExt; use buck2_execute::digest_config::DigestConfig; use buck2_execute::directory::extract_artifact_value; use buck2_execute::directory::re_tree_to_directory; use buck2_execute::directory::ActionDirectoryMember; -use buck2_execute::execute::action_digest::ActionDigest; +use buck2_execute::directory::Symlink; use buck2_execute::execute::action_digest::TrackedActionDigest; use buck2_execute::execute::executor_stage_async; use buck2_execute::execute::kind::RemoteCommandExecutionDetails; @@ -38,11 +42,15 @@ use buck2_execute::execute::request::CommandExecutionOutput; use buck2_execute::execute::request::CommandExecutionOutputRef; use buck2_execute::execute::request::CommandExecutionPaths; use buck2_execute::execute::request::CommandExecutionRequest; +use buck2_execute::execute::result::CommandExecutionErrorType; use buck2_execute::execute::result::CommandExecutionResult; use buck2_execute::materialize::materializer::CasDownloadInfo; use buck2_execute::materialize::materializer::Materializer; +use buck2_execute::re::action_identity::ReActionIdentity; +use buck2_execute::re::error::RemoteExecutionError; use buck2_execute::re::manager::ManagedRemoteExecutionClient; use buck2_execute::re::remote_action_result::RemoteActionResult; +use buck2_futures::cancellation::CancellationContext; use chrono::DateTime; use chrono::Duration; use chrono::Utc; @@ -51,11 +59,11 @@ use futures::future; use futures::FutureExt; use gazebo::prelude::*; use indexmap::IndexMap; -use more_futures::cancellation::CancellationContext; use remote_execution as RE; -use thiserror::Error; +use crate::executors::local::materialize_inputs; use crate::re::paranoid_download::ParanoidDownloader; +use crate::storage_resource_exhausted::is_storage_resource_exhausted; pub async fn download_action_results<'a>( request: &CommandExecutionRequest, @@ -64,14 +72,17 @@ pub async fn download_action_results<'a>( re_use_case: RemoteExecutorUseCase, digest_config: DigestConfig, manager: CommandExecutionManager, + identity: &ReActionIdentity<'_>, stage: buck2_data::executor_stage_start::Stage, paths: &CommandExecutionPaths, - requested_outputs: impl Iterator>, + requested_outputs: impl IntoIterator>, details: RemoteCommandExecutionDetails, response: &dyn RemoteActionResult, paranoid: Option<&ParanoidDownloader>, cancellations: &CancellationContext<'_>, action_exit_code: i32, + artifact_fs: &ArtifactFs, + materialize_failed_re_action_inputs: bool, ) -> DownloadResult { let std_streams = response.std_streams(re_client, re_use_case, digest_config); let std_streams = async { @@ -82,7 +93,7 @@ pub async fn download_action_results<'a>( } }; - if action_exit_code != 0 && manager.intend_to_fallback_on_failure { + if action_exit_code != 0 && manager.inner.intend_to_fallback_on_failure { // Do not attempt to download outputs in this case so // as to avoid cancelling in-flight local execution: // either local already finished and the outputs are @@ -108,11 +119,12 @@ pub async fn download_action_results<'a>( let download = downloader.download( manager, + identity, stage, paths, requested_outputs, - &details.action_digest, response, + &details, cancellations, ); @@ -126,13 +138,41 @@ pub async fn download_action_results<'a>( CommandStdStreams::Remote(std_streams), response.timing(), ), - e => manager.failure( - response.execution_kind(details), - outputs, - CommandStdStreams::Remote(std_streams), - Some(e), - response.timing(), - ), + e => { + let materialized_inputs = if materialize_failed_re_action_inputs { + executor_stage_async( + buck2_data::ReStage { + stage: Some(buck2_data::MaterializeFailedInputs {}.into()), + }, + async move { + match materialize_inputs(artifact_fs, materializer, request).await { + Ok(materialized_paths) => Some(materialized_paths.paths.clone()), + Err(e) => { + console_message(format!( + "Failed to materialize inputs for failed action: {}", + e + )); + None + } + } + }, + ) + .await + } else { + None + }; + + manager.failure( + response.execution_kind_with_materialized_inputs_for_failed( + details, + materialized_inputs, + ), + outputs, + CommandStdStreams::Remote(std_streams), + Some(e), + response.timing(), + ) + } }; DownloadResult::Result(res) @@ -150,11 +190,12 @@ impl CasDownloader<'_> { async fn download<'a>( &self, manager: CommandExecutionManager, + identity: &ReActionIdentity<'_>, stage: buck2_data::executor_stage_start::Stage, paths: &CommandExecutionPaths, - requested_outputs: impl Iterator>, - action_digest: &ActionDigest, + requested_outputs: impl IntoIterator>, output_spec: &dyn RemoteActionResult, + details: &RemoteCommandExecutionDetails, cancellations: &CancellationContext<'_>, ) -> ControlFlow< DownloadResult, @@ -163,24 +204,38 @@ impl CasDownloader<'_> { IndexMap, ), > { + let manager = manager.with_execution_kind(output_spec.execution_kind(details.clone())); executor_stage_async(stage, async { let artifacts = self - .extract_artifacts(paths, requested_outputs, output_spec) + .extract_artifacts(identity, paths, requested_outputs, output_spec) .await; - let artifacts = match artifacts { - Ok(artifacts) => artifacts, - Err(e) => { - return ControlFlow::Break(DownloadResult::Result(manager.error( - "extract_artifacts", - e.context(format!("action_digest={}", action_digest)), - ))); - } - }; + let artifacts = + match artifacts { + Ok(artifacts) => artifacts, + Err(e) => { + let error: buck2_error::Error = e + .context(format!("action_digest={}", details.action_digest)) + .into(); + let is_storage_resource_exhausted = error + .find_typed_context::() + .map_or(false, |re_client_error| { + is_storage_resource_exhausted(re_client_error.as_ref()) + }); + let error_type = if is_storage_resource_exhausted { + CommandExecutionErrorType::StorageResourceExhausted + } else { + CommandExecutionErrorType::Other + }; + return ControlFlow::Break(DownloadResult::Result( + manager.error_classified("extract_artifacts", error, error_type), + )); + } + }; let info = CasDownloadInfo::new_execution( TrackedActionDigest::new_expires( - action_digest.dupe(), + details.action_digest.dupe(), artifacts.expires, self.digest_config.cas_digest_config(), ), @@ -216,7 +271,7 @@ impl CasDownloader<'_> { Err(e) => { return ControlFlow::Break(DownloadResult::Result(manager.error( "materialize_outputs", - e.context(format!("action_digest={}", action_digest)), + e.context(format!("action_digest={}", details.action_digest)), ))); } }; @@ -232,8 +287,9 @@ impl CasDownloader<'_> { async fn extract_artifacts<'a>( &self, + identity: &ReActionIdentity<'_>, paths: &CommandExecutionPaths, - requested_outputs: impl Iterator>, + requested_outputs: impl IntoIterator>, output_spec: &dyn RemoteActionResult, ) -> anyhow::Result { let now = Utc::now(); @@ -264,11 +320,19 @@ impl CasDownloader<'_> { input_dir.insert(re_forward_path(x.name.as_str())?, entry)?; } + for x in output_spec.output_symlinks() { + let entry = DirectoryEntry::Leaf(ActionDirectoryMember::Symlink(Arc::new( + Symlink::new(RelativePathBuf::from_path(Path::new(&x.target))?), + ))); + input_dir.insert(re_forward_path(x.name.as_str())?, entry)?; + } + // Compute the re_outputs from the output_directories // This requires traversing the trees to find symlinks that point outside such trees let trees = self .re_client .download_typed_blobs::( + Some(identity), output_spec .output_directories() .map(|x| x.tree_digest.clone()), @@ -289,7 +353,7 @@ impl CasDownloader<'_> { let mut to_declare = Vec::with_capacity(output_paths.len()); let mut mapped_outputs = IndexMap::with_capacity(output_paths.len()); - for (requested, (path, _)) in requested_outputs.zip(output_paths.iter()) { + for (requested, (path, _)) in requested_outputs.into_iter().zip(output_paths.iter()) { let value = extract_artifact_value(&input_dir, path, self.digest_config)?; if let Some(value) = value { to_declare.push((path.to_owned(), value.dupe())); @@ -332,7 +396,7 @@ fn re_forward_path(re_path: &str) -> anyhow::Result<&ForwardRelativePath> { .context(DownloadError::InvalidPathFromRe) } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum DownloadError { #[error("Failed to declare in materializer")] Materialization, diff --git a/app/buck2_execute_impl/src/re/paranoid_download.rs b/app/buck2_execute_impl/src/re/paranoid_download.rs index 773df8c68a92e..8f66121e6aef6 100644 --- a/app/buck2_execute_impl/src/re/paranoid_download.rs +++ b/app/buck2_execute_impl/src/re/paranoid_download.rs @@ -11,7 +11,6 @@ use std::ops::ControlFlow; use std::sync::Arc; use allocative::Allocative; -use buck2_common::result::SharedResult; use buck2_core::fs::fs_util; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; @@ -26,12 +25,12 @@ use buck2_execute::execute::result::CommandExecutionResult; use buck2_execute::materialize::materializer::CasDownloadInfo; use buck2_execute::materialize::materializer::Materializer; use buck2_execute::re::manager::ReConnectionManager; +use buck2_futures::cancellation::CancellationContext; use dupe::Dupe; use futures::future::BoxFuture; use futures::future::FutureExt; use futures::future::Shared; use gazebo::prelude::*; -use more_futures::cancellation::CancellationContext; use crate::materializers::immediate::cas_download; @@ -107,7 +106,7 @@ impl ParanoidDownloader { ) .await?; - SharedResult::Ok(()) + buck2_error::Result::Ok(()) }) .map(|r| match r { Ok(r) => r, @@ -192,7 +191,7 @@ impl CacheDownload { struct CacheDownloadInner { io: Arc, - future: Shared>>, + future: Shared>>, paths: Vec, } diff --git a/app/buck2_execute_impl/src/storage_resource_exhausted.rs b/app/buck2_execute_impl/src/storage_resource_exhausted.rs new file mode 100644 index 0000000000000..a8b52e70c33f2 --- /dev/null +++ b/app/buck2_execute_impl/src/storage_resource_exhausted.rs @@ -0,0 +1,74 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_execute::re::error::RemoteExecutionError; +use remote_execution::TCode; +use remote_execution::TStatus; + +#[cfg_attr(not(fbcode_build), allow(dead_code))] +pub(crate) trait REErrorWithCodeAndMessage { + fn message(&self) -> &str; + fn code(&self) -> &TCode; +} + +impl REErrorWithCodeAndMessage for RemoteExecutionError { + fn message(&self) -> &str { + &self.message + } + + fn code(&self) -> &TCode { + &self.code + } +} + +impl REErrorWithCodeAndMessage for TStatus { + fn message(&self) -> &str { + &self.message + } + + fn code(&self) -> &TCode { + &self.code + } +} + +pub(crate) fn is_storage_resource_exhausted(err: &T) -> bool { + #[cfg(fbcode_build)] + { + use once_cell::sync::Lazy; + use regex::Regex; + + fn regex() -> &'static Regex { + // Taken from https://fburl.com/code/7n3qg2jj + static RE: Lazy = + Lazy::new(|| Regex::new(r"^.*has exceeded quota.*DemandControl.*$").unwrap()); + &RE + } + + if *err.code() != TCode::RESOURCE_EXHAUSTED { + return false; + } + let message = err.message(); + if message.contains("CAS resource exhausted") { + return true; + } + if message.contains("Use case throttling") { + return true; + } + if regex().is_match(message) { + return true; + } + false + } + + #[cfg(not(fbcode_build))] + { + let _ignored = err; + false + } +} diff --git a/app/buck2_explain/BUCK b/app/buck2_explain/BUCK new file mode 100644 index 0000000000000..ebcb5f63ed564 --- /dev/null +++ b/app/buck2_explain/BUCK @@ -0,0 +1,84 @@ +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup", "buck_genrule") +load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary") +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") +load("@fbsource//xplat/third-party/yarn:yarn_offline_mirror_helper.bzl", "yarn_offline_mirror_path") + +oncall("build_infra") + +rust_library( + name = "buck2_explain", + srcs = glob( + ["src/*"], + ), + mapped_srcs = { + "//buck2/app/buck2_explain:explain_html": "src/explain.html", + ":schema_rust[explain_generated.rs]": "src/explain_generated.rs", + }, + test_deps = [ + "fbsource//third-party/rust:serde_json", + "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", + "//buck2/app/buck2_util:buck2_util", + "//buck2/gazebo/dupe:dupe", + "//buck2/starlark-rust/starlark:starlark", + "//buck2/starlark-rust/starlark_map:starlark_map", + ], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:base64", + "fbsource//third-party/rust:flatbuffers", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_node:buck2_node", + "//buck2/app/buck2_query:buck2_query", + "//buck2/gazebo/gazebo:gazebo", + ], +) + +buck_genrule( + name = "schema_rust", + srcs = [ + "explain.fbs", + ], + outs = { + "explain_generated.rs": ["explain_generated.rs"], + }, + cmd = "$(exe fbsource//third-party/flatbuffers-23.5.26:flatc) --rust -o ${OUT} ${SRCS}", + default_outs = ["."], +) + +buck_genrule( + name = "explain_html", + srcs = [":files"], + out = "explain.html", + cmd = '$(exe :build_html_script) --yarn "$(exe fbsource//xplat/third-party/yarn:yarn)" --yarn-offline-mirror "{yarn_offline_mirror}" -o "$OUT" --src "$(location :files)" --tmp "$TMP"'.format(yarn_offline_mirror = yarn_offline_mirror_path(yarn_lock = "js/yarn.lock")), +) + +buck_filegroup( + name = "files", + srcs = glob( + [ + "js/src/**", + "js/*", + ], + exclude = [ + "js/node_modules/**", + "js/dist/**", + ], + ) + [":schema_ts"], +) + +buck_genrule( + name = "schema_ts", + srcs = [ + "explain.fbs", + ], + out = "js/src/fbs", + cmd = "$(exe fbsource//third-party/flatbuffers-23.5.26:flatc) --ts -o ${OUT} ${SRCS}", + default_outs = ["."], +) + +python_binary( + name = "build_html_script", + main_function = ".build_html.main", + main_src = "build_html.py", +) diff --git a/app/buck2_explain/build_html.py b/app/buck2_explain/build_html.py new file mode 100644 index 0000000000000..1b46d57e28fdf --- /dev/null +++ b/app/buck2_explain/build_html.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# credits to implementation in https://www.internalfb.com/code/fbsource/fbcode/eden/addons/build-tar.py + +import argparse +import atexit +import functools +import glob +import os +import shlex +import shutil +import subprocess +import sys + +import tempfile +from typing import List + +CSS_LINK = '' +JS_SCRIPT = '' + + +rm_rf = functools.partial(shutil.rmtree, ignore_errors=True) +print_err = functools.partial(print, file=sys.stderr) +glob_r = functools.partial(glob.glob, recursive=True) + + +def run(command: List[str], cwd=None, env=None): + print_err(f"{cwd if cwd else ' '} $ {shlex.join(command)}") + + if env is not None: + env = {**os.environ, **env} + + # shell=True with a List `command` seems buggy on *nix. + # It might run ['sh', '-c', 'a', 'b'] instead of ['sh', '-c', 'a b']. + subprocess.run(command, shell=(os.name == "nt"), check=True, cwd=cwd, env=env) + + +def realpath_args(args: List[str]) -> List[str]: + return [os.path.realpath(arg) if os.path.exists(arg) else arg for arg in args] + + +def copy_writable(src, dst, *, follow_symlinks=True): + """shutil.copy, but ensure that yarn.lock is writable + - RE might make src/ read-only with its "restrictive mode". + - When copying the RE "restrictive" src/, yarn.lock is read-only. + - yarn wants yarn.lock to be writable, even with --frozen-lockfile. + """ + shutil.copy(src, dst, follow_symlinks=follow_symlinks) + if dst.endswith("yarn.lock") and os.name != "nt": + os.chmod(dst, 0o666) + + +def main(): + parser = argparse.ArgumentParser(description="Creates a html from explain source.") + parser.add_argument( + "-o", + "--output", + nargs="?", + default="explain.html", + help="Path to the output '.html' file.", + ) + parser.add_argument( + "--yarn", + default="", + help="Path to yarn executable.", + ) + parser.add_argument( + "--yarn-offline-mirror", + default=None, + help="Path to the yarn offline mirror.", + ) + parser.add_argument( + "--src", + default=None, + help="Directory that contains the source code.", + ) + parser.add_argument( + "--tmp", + default=None, + help="Temporary directory to run build. Do not modify src in-place.", + ) + + args = parser.parse_args() + + # posix=False prevents shlex.split from treating \\ as escape character, breaking Windows. + yarn = realpath_args( + shlex.split(args.yarn or os.getenv("YARN") or "yarn", posix=False) + ) + + src = args.src or "." + out = args.output + + if args.tmp: + # copy source to a temporary directory + # used by buck genrule, which does not guarantee src is writable + tmp_src_path = tempfile.mkdtemp(prefix="explain_src", dir=args.tmp) + atexit.register(lambda: rm_rf(tmp_src_path)) + print_err(f"copying source {src} to {tmp_src_path}") + shutil.copytree( + src, tmp_src_path, dirs_exist_ok=True, copy_function=copy_writable + ) + src = tmp_src_path + + src_join = functools.partial(os.path.join, src, "js") + + if args.yarn_offline_mirror: + env = {"YARN_YARN_OFFLINE_MIRROR": os.path.realpath(args.yarn_offline_mirror)} + run( + yarn + + [ + "--cwd", + src_join(), + "install", + "--offline", + "--frozen-lockfile", + "--ignore-scripts", + "--check-files", + ], + env=env, + ) + else: + run(yarn + ["--cwd", src_join(), "install", "--prefer-offline"]) + + rm_rf(src_join("dist")) + + # build + run(yarn + ["--cwd", src_join(), "run", "build"], env={"CI": "false"}) + + # inline js and css into html file + with open(src_join("dist/App.js"), "r") as f: + js_content = f.read() + with open(src_join("dist/App.css"), "r") as f: + css_content = f.read() + with open(src_join("index.html"), "r") as f: + html_content = f.read() + + assert JS_SCRIPT in html_content + assert CSS_LINK in html_content + html_content = html_content.replace(CSS_LINK, f"") + html_content = html_content.replace(JS_SCRIPT, f"") + + with open(out, "w") as out_file: + out_file.write(html_content) + + +if __name__ == "__main__": + main() diff --git a/app/buck2_explain/explain.fbs b/app/buck2_explain/explain.fbs new file mode 100644 index 0000000000000..1dedb7210a782 --- /dev/null +++ b/app/buck2_explain/explain.fbs @@ -0,0 +1,61 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +namespace explain; + +table CodePointer { + file_path: string; + line: int; +} + +enum TargetValueType : byte { Bool, Int, String, List, Dict } + + +table TargetValue { + type: TargetValueType = String; + key: TargetValue; // used for dicts + bool_value: bool = null; + int_value: long = null; + string_value: string; + list_value: [TargetValue]; + dict_value: [TargetValue]; +} + +table TargetField { + name: string; + value: TargetValue (required); +} + +table ConfiguredTargetLabel { + target_label: string; + cfg: string; + exec_cfg: string; +} + +table ConfiguredTargetNode { + name: string; + // special attrs + type: string; + deps: [ConfiguredTargetLabel]; + package: string; + oncall: string; + target_configuration: string; + execution_platform: string; + plugins: [string]; + // user attrs + attrs: [TargetField]; + // extras + label: ConfiguredTargetLabel; + srcs: long; + code_pointer: CodePointer; +} + +table Build { + targets: [ConfiguredTargetNode]; +} diff --git a/app/buck2_explain/js/.gitignore b/app/buck2_explain/js/.gitignore new file mode 100644 index 0000000000000..c9bb395cbaca4 --- /dev/null +++ b/app/buck2_explain/js/.gitignore @@ -0,0 +1,4 @@ +node_modules/ +dist/ +src/data.ts +src/fbs/ diff --git a/app/buck2_explain/js/.prettierrc b/app/buck2_explain/js/.prettierrc new file mode 100644 index 0000000000000..2c5726d68dc6f --- /dev/null +++ b/app/buck2_explain/js/.prettierrc @@ -0,0 +1,12 @@ +{ + "arrowParens": "avoid", + "bracketSpacing": false, + "bracketSameLine": true, + "useTabs": false, + "singleQuote": true, + "tabWidth": 2, + "printWidth": 100, + "trailingComma": "all", + "semi": false, + "singleLine": true +} diff --git a/app/buck2_explain/js/README.md b/app/buck2_explain/js/README.md new file mode 100644 index 0000000000000..5e0cee910a14b --- /dev/null +++ b/app/buck2_explain/js/README.md @@ -0,0 +1,9 @@ +For fast js dev iteration do this (tested on Mac): + +``` +# run a build command +./load.sh buck2 +yarn --watch +``` + +open index.html in the browser diff --git a/app/buck2_explain/js/index.html b/app/buck2_explain/js/index.html new file mode 100644 index 0000000000000..9a8cba0fb4f56 --- /dev/null +++ b/app/buck2_explain/js/index.html @@ -0,0 +1,19 @@ + + + + + + Buck2 explain + + + + + + +

+ + + + diff --git a/app/buck2_explain/js/load.sh b/app/buck2_explain/js/load.sh new file mode 100755 index 0000000000000..bb39762b25457 --- /dev/null +++ b/app/buck2_explain/js/load.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# We use this script to load the data for quick local iteration on the frontend +# $1: buck2 executable +set -e +$1 explain --output /dev/null --fbs-dump /tmp/fbs +cd $(dirname "$0") +echo "export const DATA = '$(cat /tmp/fbs)';" > src/data.ts +cp -rfX "$(buck2 build //buck2/app/buck2_explain:schema_ts --show-full-simple-output)" src diff --git a/app/buck2_explain/js/package.json b/app/buck2_explain/js/package.json new file mode 100644 index 0000000000000..5408e782f317b --- /dev/null +++ b/app/buck2_explain/js/package.json @@ -0,0 +1,22 @@ +{ + "name": "buck2_explain", + "version": "0.0.1", + "license": "MIT OR Apache-2.0", + "dependencies": { + "flatbuffers": "23.5.26", + "flexsearch-ts": "^0.7.35", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-force-graph-2d": "^1.25.5" + }, + "devDependencies": { + "@types/react": "^18.3.1", + "@types/react-dom": "^18.3.0", + "esbuild": "0.20.2" + }, + "scripts": { + "build": "esbuild src/App.tsx --bundle --outdir=dist", + "watch": "esbuild src/App.tsx --bundle --outdir=dist --watch", + "check": "tsc -noEmit -p tsconfig.json" + } +} \ No newline at end of file diff --git a/app/buck2_explain/js/src/App.tsx b/app/buck2_explain/js/src/App.tsx new file mode 100644 index 0000000000000..12c9522c9f7b1 --- /dev/null +++ b/app/buck2_explain/js/src/App.tsx @@ -0,0 +1,105 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// Add css to the bundle +import './app.css' + +import React, {useEffect, useState} from 'react' +import {createRoot} from 'react-dom/client' + +import {ByteBuffer} from 'flatbuffers' +import {Build, ConfiguredTargetNode} from './fbs/explain' +import {QueryKey, ROOT_VIEW, Router} from './Router' +import {RootView} from './RootView' +import {TargetView} from './TargetView' +import {SearchView} from './SearchView' +import {GraphView} from './graph/GraphView' +import {Navbar} from './Navbar' +import {formatTargetLabel} from './formatTargetLabel' + +const INITIAL_STATE = { + build: null, + rootTarget: null, + allTargets: {}, +} + +type STATE_TYPE = { + build: Build | null + rootTarget: ConfiguredTargetNode | null + allTargets: {[key: string]: number} +} + +export const DataContext = React.createContext(INITIAL_STATE) + +function App() { + const [data, setData] = useState(INITIAL_STATE) + + /** + * Loads initial information on page load + */ + useEffect(() => { + const fetchData = async () => { + // keep this line as is, it will be replaced later + let blobBase64 = 'XXDATAXX' + try { + blobBase64 = (await import('./data')).DATA + } catch (error) { + console.info('./data.ts not found, using replaced data') + } + + const decodedString = atob(blobBase64) + // TODO iguridi: decode blob better + const byteArray = new Uint8Array(decodedString.length) + for (let i = 0; i < decodedString.length; i++) { + byteArray[i] = decodedString.charCodeAt(i) + } + + const buf = new ByteBuffer(byteArray) + + // Get an accessor to the root object inside the buffer. + const build = Build.getRootAsBuild(buf) + // TODO iguridi: just show 1 target for now + const rootTarget = build.targets(0) + + const allTargets: {[key: string]: number} = {} + for (let i = 0; i < build.targetsLength(); i++) { + let target = build.targets(i)! + // Unique identifier for target + let label = formatTargetLabel(target.label()!) + allTargets[label] = i + } + + // This should run just once total + setData({build, allTargets, rootTarget}) + } + fetchData() + }, []) + + const rootTarget = data.rootTarget + + if (rootTarget == null) return

Loading...

+ else { + return ( + + + + + + + + + + ) + } +} + +const container = document.getElementById('root') as HTMLElement +const root = createRoot(container) + +root.render() diff --git a/app/buck2_explain/js/src/Navbar.tsx b/app/buck2_explain/js/src/Navbar.tsx new file mode 100644 index 0000000000000..0c46654c7001b --- /dev/null +++ b/app/buck2_explain/js/src/Navbar.tsx @@ -0,0 +1,50 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useContext} from 'react' +import {DataContext} from './App' +import {Link} from './Router' +import {SearchBox} from './SearchBox' + +/** + * Header that goes on every view + */ +export function Navbar() { + const {rootTarget} = useContext(DataContext) + + if (!rootTarget) { + return null + } + + return ( + + ) +} diff --git a/app/buck2_explain/js/src/RootView.tsx b/app/buck2_explain/js/src/RootView.tsx new file mode 100644 index 0000000000000..e9525adb9a6d8 --- /dev/null +++ b/app/buck2_explain/js/src/RootView.tsx @@ -0,0 +1,23 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useContext} from 'react' +import {Target} from './Target' +import {DataContext} from './App' + +/** + * Shows the root target + */ +export function RootView(props: {view: string}) { + const {rootTarget} = useContext(DataContext) + + const view = + rootTarget == null ?

No root target

: + return
{view}
+} diff --git a/app/buck2_explain/js/src/Router.tsx b/app/buck2_explain/js/src/Router.tsx new file mode 100644 index 0000000000000..8f75e4ca22c4b --- /dev/null +++ b/app/buck2_explain/js/src/Router.tsx @@ -0,0 +1,110 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {ReactNode, useContext, useEffect, useState} from 'react' + +export const ROOT_VIEW = '' + +export enum QueryKey { + SearchView = 'search', + GraphView = 'graph', + TargetView = 'target', + TargetTab = 'target_tab', +} + +export const RouterContext = React.createContext({params: '', setParams: (_s: string) => {}}) + +/** + * Decides what to show based on existing query params + * Inspired by reach-router library + */ +export function Router(props: {children: ReactNode}) { + const [params, setUrlParams] = useState(window.location.search) + + const setParams = (s: string) => { + const url = new URL(window.location.toString()) + const params = new URLSearchParams(s) + url.search = params.toString() + window.history.pushState(null, '', url.toString()) + setUrlParams(s) + } + + useEffect(() => { + const handlePopState = () => { + setUrlParams(document.location.search) + } + window.addEventListener('popstate', handlePopState) + return () => { + window.removeEventListener('popstate', handlePopState) + } + }, []) + + const urlParams = new URLSearchParams(params) + const all = Array.from(urlParams.keys()) + + const res = React.Children.map(props.children, child => { + if (React.isValidElement(child)) { + if (child.props.view === undefined) { + return child + } + if (urlParams.has(child.props.view)) { + return child + } + if (child.props.view === ROOT_VIEW && all.length === 0) { + return child + } + } + return null + }) + + return res?.length ? ( + setParams(p)}}> + {res} + + ) : ( +

View not found

+ ) +} + +/** + * Link with specified query params + */ +export function Link(props: { + to: {[key in QueryKey]?: string | null} + children: ReactNode + className?: string +}) { + const {setParams} = useContext(RouterContext) + + const {to, children} = props + + const url = new URL(window.location.toString()) + const params = new URLSearchParams(url.search) + + for (let [k, _] of params.entries()) { + params.delete(k) + } + + for (let k of Object.keys(to)) { + params.set(k, to[k as QueryKey] ?? '') + } + + url.search = params.toString() + + const handleClick = (e: React.MouseEvent, url: URL) => { + e.preventDefault() + setParams(url.search) + } + + return ( +
handleClick(e, url)}> + {children} + + ) +} diff --git a/app/buck2_explain/js/src/SearchBox.tsx b/app/buck2_explain/js/src/SearchBox.tsx new file mode 100644 index 0000000000000..b51af8ef30d0c --- /dev/null +++ b/app/buck2_explain/js/src/SearchBox.tsx @@ -0,0 +1,51 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useContext, useState} from 'react' +import {RouterContext, QueryKey} from './Router' + +export function SearchBox() { + const {params, setParams} = useContext(RouterContext) + const urlParams = new URLSearchParams(params) + + const [searchTerm, setSearchTerm] = useState(urlParams.get(QueryKey.SearchView) ?? '') + + const goSearch = () => { + const url = new URL(window.location.toString()) + const params = new URLSearchParams(url.search) + + for (let [k, _] of params.entries()) { + params.delete(k) + } + + params.set(QueryKey.SearchView, searchTerm) + + setParams(params.toString()) + } + return ( + <> + setSearchTerm(event.target.value)} + placeholder="Search targets" + onKeyDown={event => { + event.key == 'Enter' ? goSearch() : null + }} + /> + + + ) +} diff --git a/app/buck2_explain/js/src/SearchView.tsx b/app/buck2_explain/js/src/SearchView.tsx new file mode 100644 index 0000000000000..cf6ac2924f6f8 --- /dev/null +++ b/app/buck2_explain/js/src/SearchView.tsx @@ -0,0 +1,99 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useContext, useState} from 'react' +import {DataContext} from './App' +import {Link, QueryKey, RouterContext} from './Router' +import {indexCache, indexEverything} from './flexSearch' + +function Checkbox(props: { + checked: boolean + onChange: (event: {target: {checked: boolean | ((prevState: boolean) => boolean)}}) => void +}) { + const {checked, onChange} = props + return ( +
+ + +
+ ) +} + +export function SearchView(props: {view: QueryKey}) { + const {build, allTargets} = useContext(DataContext) + const {params} = useContext(RouterContext) + + const [universalSearch, setUniversalSearch] = useState(false) + function handleChange(event: {target: {checked: boolean | ((prevState: boolean) => boolean)}}) { + setUniversalSearch(event.target.checked) + if (build != null) { + if (indexCache) { + return + } else { + indexEverything(build) + } + } + } + + const urlParams = new URLSearchParams(params) + const search = urlParams.get(props.view) + + if (search == null || search.length < 3) { + return

Invalid search "{search}", try again

+ } + + let res = null + if (universalSearch) { + res = indexCache?.search(search) + } else { + res = [] + for (let k of Object.keys(allTargets)) { + if (k.includes(search)) { + res.push(k) + } + } + } + + // Not sure where the dups are coming from, but we want to dedup to prevent + // undefined behavior in React + const deduped = dedupeArray((res ?? []).map(v => v.toString())) + + const view = + res == null || res.length == 0 ? ( + <> + +

No results for search

+ + ) : ( + <> + +
Showing targets labels containing "{search}"
+
    + {deduped.map(label => ( +
  • + {label} +
  • + ))} +
+ + ) + + return
{view}
+} + +function dedupeArray(res: string[]): string[] { + const array = res.map((value, index) => [value, index]) + const deduped = Array.from( + new Map(array as Iterable).keys(), + ) as string[] + return deduped +} diff --git a/app/buck2_explain/js/src/Target.tsx b/app/buck2_explain/js/src/Target.tsx new file mode 100644 index 0000000000000..4fd7a46c92505 --- /dev/null +++ b/app/buck2_explain/js/src/Target.tsx @@ -0,0 +1,279 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useContext} from 'react' +import {DataContext} from './App' +import {ConfiguredTargetNode, TargetValueType, TargetField, TargetValue} from './fbs/explain' +import {Link} from './Router' +import {formatTargetLabel} from './formatTargetLabel' + +const TARGET_ATTRS = 'target_attrs' +const TARGET_DEPS = 'target_deps' +const TARGET_RDEPS = 'target_rdeps' + +/* + * If we have an object associated with this string, make it a link. + * Otherwise, just render the string. + */ +function PossibleLink(props: {value: string}) { + const {allTargets} = useContext(DataContext) + const {value} = props + + let res = null + if (allTargets.hasOwnProperty(value)) { + res = {value} + } else { + res = <>{value} + } + return res +} + +function List(props: {attr: (i: number) => string; length: number}): JSX.Element { + if (props.length === 0) { + return [] + } + + const items: JSX.Element[] = [] + for (let i = 0; i < props.length; i++) { + items.push( +
  • + +
  • , + ) + } + return ( +
    +
      {items}
    +
    + ) +} + +function ListAttr(props: { + getItem: (i: number) => TargetValue | null + length: number +}): JSX.Element { + const {getItem, length} = props + if (length === 0) { + return + } + + const items: JSX.Element[] = [] + for (let i = 0; i < props.length; i++) { + items.push( +
  • + +
  • , + ) + } + return ( +
    +
      {items}
    +
    + ) +} + +function DictAttr(props: { + getItem: (i: number) => TargetValue | null + length: number +}): JSX.Element { + const {getItem, length} = props + if (length === 0) { + return [] + } + + const items: JSX.Element[] = [] + for (let i = 0; i < props.length; i++) { + let value = getItem(i) + items.push( +
  • + : + +
  • , + ) + } + return
      {{items}}
    +} + +function Attr(props: {value: TargetValue | null | undefined}): JSX.Element { + const {value} = props + if (value == null) { + return <>null value + } + const valueType = value?.type() + if (valueType == null) { + return <>null value type + } + switch (valueType) { + case TargetValueType.Bool: + return <>{value.boolValue()?.toString()} + case TargetValueType.Int: + return <>{value.intValue()?.toString()} + case TargetValueType.String: + // TODO iguridi: update this once we have ConfiguredTargetLabel type + return + case TargetValueType.List: + return value.listValue(i)} length={value.listValueLength()} /> + case TargetValueType.Dict: + return value.dictValue(i)} length={value.dictValueLength()} /> + } +} + +function Attrs(props: {attr: (i: number) => TargetField | null; length: number}): JSX.Element { + const items: JSX.Element[] = [] + for (let i = 0; i < props.length; i++) { + const attr = props.attr(i) + if (attr == null) { + continue + } + const row = ( + + {attr.name()} + + + + + ) + items.push(row) + } + return <>{items} +} + +export function Target(props: {target: ConfiguredTargetNode; tab: string | null}) { + const target = props.target! + const tab = props.tab ?? TARGET_ATTRS + + const filePath = target.codePointer()?.filePath() + const lineNumber = (target.codePointer()?.line() ?? 0) + 1 + // TODO iguridi: make it work outside of fbsource + const codePointer = `https://www.internalfb.com/code/fbsource/${filePath}?lines=${lineNumber}` + + const targetLabel = target.label()!.targetLabel() + + return ( +
    +
    + (codehub) +

    {targetLabel}

    +
      +
    • + Rule type +

      {target.type()}

      +
    • +
    +
    +
    +
      +
    • + + Attributes + + + + +
    • +
    • + + Dependencies + + + + +
    • +
    • + + Reverse dependencies + + + + +
    • +
    +
    + {tab === TARGET_ATTRS ? : null} + {tab === TARGET_DEPS ? : null} + {tab === TARGET_RDEPS ? : null} +
    + ) +} + +function TargetDeps(props: {target: ConfiguredTargetNode}) { + const {target} = props + return formatTargetLabel(target!.deps(i)!)} length={target.depsLength()} /> +} + +function TargetRdeps(props: {target: ConfiguredTargetNode}) { + const {target} = props + const {allTargets, build} = useContext(DataContext) + + if (allTargets == null || build == null) { + return + } + + const label = formatTargetLabel(target!.label()!) + + let rdeps: Array = [] + Object.values(allTargets).forEach(i => { + let target2 = build?.targets(i) + let depsLength = target2?.depsLength() ?? 0 + for (let i = 0; i < depsLength; i++) { + const dep = formatTargetLabel(target2?.deps(i)!) + const rdepLabel = formatTargetLabel(target2!.label()!) + if (dep === label) { + rdeps.push(rdepLabel) + } + } + }) + + return rdeps[i]} length={rdeps.length} /> +} + +function TargetAttrs(props: {target: ConfiguredTargetNode}) { + const {target} = props + return ( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + target.attrs(i)} length={target.attrsLength()} /> + +
    AttributeValue
    name{target.name()}
    type{target.type()}
    package{target.package_()}
    oncall{target.oncall()}
    target_configuration{target.targetConfiguration()}
    execution_platform{target.executionPlatform()}
    + ) +} diff --git a/app/buck2_explain/js/src/TargetView.tsx b/app/buck2_explain/js/src/TargetView.tsx new file mode 100644 index 0000000000000..4be534eacc78a --- /dev/null +++ b/app/buck2_explain/js/src/TargetView.tsx @@ -0,0 +1,27 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useContext} from 'react' +import {DataContext} from './App' +import {Target} from './Target' +import {QueryKey, RouterContext} from './Router' + +export function TargetView(props: {view: QueryKey}) { + const {allTargets, build} = useContext(DataContext) + const {params} = useContext(RouterContext) + + const urlParams = new URLSearchParams(params) + const targetLabel = urlParams.get(props.view) ?? null + const target = targetLabel == null ? null : build?.targets(allTargets[targetLabel]) + + const tab = urlParams.get(QueryKey.TargetTab) + + const view = target == null ?

    No target found

    : + return
    {view}
    +} diff --git a/app/buck2_explain/js/src/app.css b/app/buck2_explain/js/src/app.css new file mode 100644 index 0000000000000..196b1da16e7ea --- /dev/null +++ b/app/buck2_explain/js/src/app.css @@ -0,0 +1,10 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +@import 'https://cdn.jsdelivr.net/npm/bulma@1.0.1/css/versions/bulma-no-dark-mode.css'; diff --git a/app/buck2_explain/js/src/flexSearch.tsx b/app/buck2_explain/js/src/flexSearch.tsx new file mode 100644 index 0000000000000..efbd047dc9e01 --- /dev/null +++ b/app/buck2_explain/js/src/flexSearch.tsx @@ -0,0 +1,125 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import {Index} from 'flexsearch-ts' +import {Build, TargetField, TargetValue, TargetValueType} from './fbs/explain' +import {formatTargetLabel} from './formatTargetLabel' + +export let indexCache: Index | null = null + +function addIfExists(index: Index, key: string, element: string | null | undefined) { + if (element != null) { + index.append(key, element) + } +} +function addList( + index: Index, + key: string, + attr: (i: number) => TargetValue | null | undefined, + length: number, +) { + for (let i = 0; i < length; i++) { + addTargetValue(index, key, attr(i)) + } +} +function addListOfStrings(index: Index, key: string, attr: (i: number) => string, length: number) { + for (let i = 0; i < length; i++) { + const value = attr(i) + if (value != null) { + addIfExists(index, key, value) + } + } +} +function addDict( + index: Index, + key: string, + attr: (i: number) => TargetValue | null | undefined, + length: number, +) { + for (let i = 0; i < length; i++) { + const value = attr(i) + addTargetValue(index, key, value?.key()) + addTargetValue(index, key, value) + } +} +function addTargetField(index: Index, key: string, field: TargetField | null) { + if (field == null) { + return + } + const name = field?.name() + addIfExists(index, key, name) + addTargetValue(index, key, field?.value()) +} +function addTargetValue(index: Index, key: string, value: TargetValue | null | undefined) { + if (value == null) { + return + } + const valueType = value?.type() + if (valueType == null) { + return + } + switch (valueType) { + case TargetValueType.Bool: + addIfExists(index, key, value.boolValue()?.toString()) + case TargetValueType.Int: + addIfExists(index, key, value.intValue()?.toString()) + case TargetValueType.String: + addIfExists(index, key, value.stringValue()) + case TargetValueType.List: + addList(index, key, i => value.listValue(i), value.listValueLength()) + case TargetValueType.Dict: + addDict(index, key, i => value.dictValue(i), value.dictValueLength()) + } +} +function addListOfTargetFields( + index: Index, + key: string, + attr: (i: number) => TargetField | null, + length: number, +) { + for (let i = 0; i < length; i++) { + const value = attr(i) + if (value == null) { + continue + } + addTargetField(index, key, value) + } +} + +export async function indexEverything(build: Build): Promise { + // TODO iguridi: make this in a js worker + const searchIndex = new Index({tokenize: 'forward', stemmer: 'false'}) + for (let i = 0; i < build.targetsLength(); i++) { + let target = build.targets(i)! + const label = target.label()! + let identifier = formatTargetLabel(label) + addIfExists(searchIndex, identifier, target.name()) + addIfExists(searchIndex, identifier, target.oncall()) + addIfExists(searchIndex, identifier, target.executionPlatform()) + addIfExists(searchIndex, identifier, target.package_()) + addIfExists(searchIndex, identifier, target.targetConfiguration()) + addIfExists(searchIndex, identifier, target.type()) + addIfExists(searchIndex, identifier, label.targetLabel()!) + addIfExists(searchIndex, identifier, label.cfg()!) + addIfExists(searchIndex, identifier, label.execCfg()!) + addListOfStrings( + searchIndex, + identifier, + i => formatTargetLabel(target.deps(i)!), + target.depsLength(), + ) + addListOfTargetFields( + searchIndex, + identifier, + (i: number) => target.attrs(i), + target.attrsLength(), + ) + } + indexCache = searchIndex +} diff --git a/app/buck2_explain/js/src/formatTargetLabel.ts b/app/buck2_explain/js/src/formatTargetLabel.ts new file mode 100644 index 0000000000000..9d06acf4c238b --- /dev/null +++ b/app/buck2_explain/js/src/formatTargetLabel.ts @@ -0,0 +1,15 @@ +import {ConfiguredTargetLabel} from './fbs/explain' + +// Unique identifier for a configured target. +// Javascript copy of `impl Display for ConfiguredTargetLabel` in rust code +export function formatTargetLabel(label: ConfiguredTargetLabel): string { + const unconfigured = label.targetLabel() ?? '' + const cfg = label.cfg() ?? '' + const execCfg = label.execCfg() ?? '' + + let res = `${unconfigured} (${cfg})` + if (execCfg) { + res = `${res} (${execCfg})` + } + return res +} diff --git a/app/buck2_explain/js/src/graph/GraphImpl.tsx b/app/buck2_explain/js/src/graph/GraphImpl.tsx new file mode 100644 index 0000000000000..a6013f04505d4 --- /dev/null +++ b/app/buck2_explain/js/src/graph/GraphImpl.tsx @@ -0,0 +1,420 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useRef, useState} from 'react' +import {Build} from '../fbs/explain' +import {RuleTypeDropdown} from './RuleTypeDropdown' +import {Node} from './GraphView' +import {GraphViz} from './GraphViz' +import {LinkObject, NodeObject} from 'react-force-graph-2d' +import {longestPathTree, shortestPathTree} from './graph' +import {formatTargetLabel} from '../formatTargetLabel' + +enum ShowPaths { + Shortest, + Longest, + All, +} + +enum DisplayType { + rootNode, + passesFilters, + somepath, + hidden, + highlighted, +} + +const displayTypeColors: {[key in DisplayType]: string} = { + // https://coolors.co/1c77c3-39a9db-9ec1a3-cfe0c3-e9724c + [DisplayType.rootNode]: '#1a181b', + [DisplayType.passesFilters]: '#1c77c3', + [DisplayType.somepath]: '#9EC1A3', + [DisplayType.highlighted]: '#e9724c', + [DisplayType.hidden]: 'gray', // doesn't matter +} + +interface DisplayNode extends Node { + allowedDeps: Map + displayType: DisplayType +} + +function showNode(node: DisplayNode) { + return node.displayType != DisplayType.hidden +} + +type DepsGraph = Map + +function toLeanGraph(graph: DepsGraph): Map { + let newGraph = new Map() + for (const [k, node] of graph) { + newGraph.set(k, node.deps) + } + return newGraph +} + +function fromLeanGraph(graph: Map): DepsGraph { + let newGraph = new Map() + for (const [k, deps] of graph) { + newGraph.set(k, {deps, rdeps: [], value: k}) + } + for (const [k, deps] of graph) { + for (const d of deps) { + newGraph.get(d)!.rdeps.push(k) + } + } + return newGraph +} + +// Here it goes everything that has to recompute on user interaction. +// On big graphs recomputing less matters +export function GraphImpl(props: { + nodes: Map + build: Build + allTargets: {[key: string]: number} + categoryOptions: {category: string; count: number; checked: boolean}[] +}) { + const {nodes, build, categoryOptions, allTargets} = props + + const nodeMap: Map = new Map() + for (const [k, node] of nodes) { + nodeMap.set(k, {...node, allowedDeps: new Map(), displayType: DisplayType.hidden}) + } + + const [categories, setCategories] = useState(categoryOptions) + const [colorByCfg, setColorByCfg] = useState(false) + const [showLabels, setShowLabels] = useState(false) + const [includeContaining, setIncludeContaining] = useState([]) + const [excludeContaining, setExcludeContaining] = useState([]) + const [somepath, setSomepath] = useState>(new Set()) + const [highlighted, setHighlighted] = useState(null) + const [showPaths, setShowPaths] = useState(ShowPaths.All) + + // Choose which edges to show + const chooseEdges = (graph: DepsGraph, show: ShowPaths) => { + let newGraph + const lean = toLeanGraph(graph) + if (show === ShowPaths.Shortest) { + newGraph = shortestPathTree(lean, 0) + } else if (show === ShowPaths.Longest) { + newGraph = longestPathTree(lean, 0) + } else { + newGraph = lean // ShowPaths.All + } + return fromLeanGraph(newGraph) + } + const graphDeps = chooseEdges(props.nodes, showPaths) + + const activeCategories = categories.filter(v => v.checked).map(v => v.category) + + if (somepath.size > 0) { + for (const k of somepath) { + nodeMap.get(k)!.displayType = DisplayType.somepath + } + } else { + // Intersection of 'includes', minus 'excludes' + for (const [k, node] of nodeMap) { + const target = build.targets(k)! + const label = formatTargetLabel(target.label()!) + + // When null, means it wasn't affected by any of the filters and to use default + let passesFilters = null + + // Filter by category + if (activeCategories.length > 0) { + passesFilters = activeCategories.includes(target.type()!) + } + + // Filter by label + if (includeContaining.length > 0) { + let contains = false + for (const v of includeContaining) { + if (label.includes(v)) { + contains = true + break + } + } + passesFilters = passesFilters !== false && contains + } + + // Exclude by label + for (const v of excludeContaining) { + if (label.includes(v)) { + passesFilters = false + } + } + + if (passesFilters === true) { + node.displayType = DisplayType.passesFilters + } + + // Add highlighted + if (highlighted) { + if (label.includes(highlighted)) { + node.displayType = DisplayType.highlighted + } + } + } + + // Always set root node + nodeMap.get(0)!.displayType = DisplayType.rootNode + } + + let displayNodes: Map = new Map() + let filteredNodes = new Map() + for (const [k, node] of nodeMap) { + if (showNode(node)) { + filteredNodes.set(k, node) + } + } + + // For each node A that goes, traverse the graph bottom up BFS + // until another node that goes is found, then add node A as allowedDep + // Also stores shortest path length from last allowed to later add as edge label + + for (const [k, _] of filteredNodes) { + let visited: Map = new Map() + visited.set(k, 0) + let stack = [k] + + while (stack.length > 0) { + const n1 = stack.shift() + + for (const r of graphDeps.get(n1)!.rdeps) { + if (visited.has(r)) { + continue + } + const distance = visited.get(n1)! + 1 + visited.set(r, distance) + if (showNode(nodeMap.get(r)!)) { + nodeMap.get(r)!.allowedDeps.set(k, distance) + } else { + stack.push(r) + } + } + } + } + + // Build graph in a format that the graph library understands + const data: NodeObject[] = [] + const edges: LinkObject[] = [] + + for (const [k, node] of filteredNodes) { + const target = build.targets(k)! + + // Add nodes to graph + data.push({ + val: 0.5, + id: k, + name: formatTargetLabel(target.label()!), + color: colorByCfg ? undefined : displayTypeColors[node.displayType], + cfg: target.label()!.cfg()!, + }) + } + + for (const [k, node] of filteredNodes) { + // Add edges + for (const [d, counter] of node.allowedDeps) { + if (!filteredNodes.has(d)) { + throw Error("this shouldn't be possible") + } + edges.push({ + source: k, + target: d, + name: `steps: ${counter}`, + color: 'rgba(20, 20, 20, 0.5)', + }) + } + } + + function applyFilters() { + // TODO iguridi: this is nasty, but should do for now + setSomepath(new Set()) + + const inputValue = (id: string) => + (document.getElementById(id) as HTMLInputElement).value.trim() + + // Include exclude by label + const inc = inputValue('includeContaining') + setIncludeContaining(inc ? inc.split(',') : []) + const exc = inputValue('excludeContaining') + setExcludeContaining(exc ? exc.split(',') : []) + + // Highlight by label + setHighlighted(inputValue('highlightNode')) + + // Include by rule type + const checkboxes = document.querySelectorAll('#checkboxes input[type="checkbox"]') + for (let i = 0; i < checkboxes.length; i++) { + categories[i].checked = checkboxes[i].checked + } + setCategories([...categories]) + } + + function findPath() { + const inputValue = (id: string) => + (document.getElementById(id) as HTMLInputElement).value.trim() + + // Include by path + const pathFrom = inputValue('pathFrom') + const pathTo = inputValue('pathTo') + + if (pathFrom && pathTo && allTargets) { + const from = allTargets[pathFrom] + const to = allTargets[pathTo] + const parentOf = new Map() + parentOf.set(from, null) + const queue = [from] + + while (queue.length > 0) { + let node = queue.shift()! + if (node === to) { + break + } + for (let d of graphDeps.get(node)!.deps) { + if (!parentOf.has(d)) { + parentOf.set(d, node) + queue.push(d) + } + } + } + + // set allowed if in path + let path = new Set() + let node = to + while (node) { + path.add(node) + node = parentOf.get(node) + } + + setSomepath(path) + } + } + + return ( + <> +
    +
    +
    + +
    + +
    +
    + +
    +
    +
    +
    +
    + +
    + +
    +
    +
    +
    +
    +
    + + +
    +
    + +
    +
    +
    + +
    + + +
    +
    +
    + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    + Number of nodes: {data.length}
    + Number of edges: {edges.length} +
    +
    +
    + + { + const fromInput = document.getElementById('pathFrom') as HTMLInputElement + const toInput = document.getElementById('pathTo') as HTMLInputElement + if (!fromInput.value) { + fromInput.value = name + } else if (!toInput.value) { + toInput.value = name + } else { + fromInput.value = name + toInput.value = '' + } + }} + openTarget={(name: string) => { + const url = new URL(window.location.href) + url.searchParams.set('target', name) + url.searchParams.delete('graph') + window.open(url.toString(), '_blank') + }} + /> + + ) +} diff --git a/app/buck2_explain/js/src/graph/GraphView.tsx b/app/buck2_explain/js/src/graph/GraphView.tsx new file mode 100644 index 0000000000000..9e51f82108ea0 --- /dev/null +++ b/app/buck2_explain/js/src/graph/GraphView.tsx @@ -0,0 +1,104 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useContext} from 'react' +import {DataContext} from '../App' +import {GraphImpl} from './GraphImpl' +import {QueryKey} from '../Router' +import {formatTargetLabel} from '../formatTargetLabel' + +export interface Node { + value: number + deps: number[] + rdeps: number[] +} + +type CategoryOption = {category: string; count: number; checked: false} + +function defaultNode(): Node { + return { + value: 0, + deps: [], + rdeps: [], + } +} + +export function GraphView(props: {view: QueryKey}) { + const {build, allTargets} = useContext(DataContext) + if (build == null) { + // TODO: this should show a loading sign + return null + } + + // Build better data structure + let nodeMap = new Map() + + // Create nodes + for (let i = 0; i < build.targetsLength(); i++) { + if (nodeMap.get(i) == null) { + nodeMap.set(i, { + ...defaultNode(), + value: i, + }) + } + } + + // Record deps and rdeps + for (const [k, node] of nodeMap) { + const target = build.targets(k)! + + for (let i = 0; i < target.depsLength(); i++) { + const d = allTargets[formatTargetLabel(target.deps(i)!)] + + // Deps + node.deps.push(d) + + // Rdeps + if (d === k) { + throw Error('wth') + } + node.rdeps.push(k) + } + } + + const extractCategories = (): CategoryOption[] => { + const categoriesCounter = new Map() + for (const [k, _node] of nodeMap) { + const target = build.targets(k) + const type = target!.type()! + categoriesCounter.set(type, (categoriesCounter.get(type) ?? 0) + 1) + } + + let categoryOptions: CategoryOption[] = [] + for (const [category, count] of categoriesCounter) { + categoryOptions.push({category, count, checked: false}) + } + categoryOptions.sort((a, b) => { + if (a.category < b.category) { + return -1 + } else { + return 1 + } + }) + + return categoryOptions + } + const categoryOptions = extractCategories() + + return ( +
    + +
    + ) +} diff --git a/app/buck2_explain/js/src/graph/GraphViz.tsx b/app/buck2_explain/js/src/graph/GraphViz.tsx new file mode 100644 index 0000000000000..8c306381da30b --- /dev/null +++ b/app/buck2_explain/js/src/graph/GraphViz.tsx @@ -0,0 +1,74 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, {useRef} from 'react' +import ForceGraph2D, {LinkObject, NodeObject, ForceGraphProps} from 'react-force-graph-2d' + +export function GraphViz(props: { + nodes: NodeObject[] + links: LinkObject[] + setPath: (name: string) => void + openTarget: (name: string) => void + colorByCfg: boolean + showLabels: boolean +}) { + const {nodes, links, setPath, openTarget, showLabels} = props + const graphRef = useRef(null) + const dagMode = links.length / nodes.length > 3 ? 'td' : undefined + + // Show labels optionally + let paintLabels: ForceGraphProps['nodeCanvasObject'] = undefined + let paintMode: ForceGraphProps['nodeCanvasObjectMode'] = undefined + if (showLabels) { + paintLabels = (node, ctx, _globalScale) => { + const label = node.name.split(' ')[0].split(':')[1] + const fontSize = 2 + ctx.font = `${fontSize}px Sans-Serif` + const textWidth = ctx.measureText(label).width + const padding = fontSize * 0.1 + const bckgDimensions = [textWidth + padding, fontSize + padding] // some padding + + ctx.fillStyle = 'rgba(255, 255, 255, 0.9)' + ctx.fillRect(node.x!, node.y! - bckgDimensions[1] / 2, bckgDimensions[0], bckgDimensions[1]) + + ctx.textAlign = 'left' + ctx.textBaseline = 'middle' + ctx.fillStyle = '#000' + ctx.fillText(label, node.x! + padding, node.y!) + } + paintMode = _node => 'after' + } + + return ( + { + setPath(node.name) + }} + onNodeRightClick={(node, _event) => { + openTarget(node.name) + }} + nodeCanvasObjectMode={paintMode} + nodeCanvasObject={paintLabels} + // cooldown + warmup ticks make the graph render already in its final form + cooldownTicks={1} + enableNodeDrag={true} + warmupTicks={100} + // looks + nodeAutoColorBy={props.colorByCfg ? 'cfg' : undefined} + linkDirectionalArrowLength={3 / Math.pow(nodes.length, 0.2)} + linkDirectionalArrowRelPos={1} + linkCurvature={0.2} + linkWidth={10 / Math.pow(links.length, 0.5)} + linkHoverPrecision={6} + dagMode={dagMode} + /> + ) +} diff --git a/app/buck2_explain/js/src/graph/RuleTypeDropdown.tsx b/app/buck2_explain/js/src/graph/RuleTypeDropdown.tsx new file mode 100644 index 0000000000000..a1d48bb26c9f1 --- /dev/null +++ b/app/buck2_explain/js/src/graph/RuleTypeDropdown.tsx @@ -0,0 +1,49 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import React, { useState } from 'react' + +export function RuleTypeDropdown(props: { options: { category: string; count: number }[], activeCount: number }) { + const [dropdownActive, setDropdownActive] = useState(false) + + return ( +
    +
    + +
    + +
    + ) +} + +function CheckboxItem(props: { label: string; count: number }) { + return ( +
    + +
    + ) +} diff --git a/app/buck2_explain/js/src/graph/graph.ts b/app/buck2_explain/js/src/graph/graph.ts new file mode 100644 index 0000000000000..238e496231ae4 --- /dev/null +++ b/app/buck2_explain/js/src/graph/graph.ts @@ -0,0 +1,101 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +type Graph = Map + +interface Data { + distance: number + previous: number | null +} + +export function shortestPathTree(graph: Graph, source: number): Graph { + return pathTree(graph, source, 1) +} + +export function longestPathTree(graph: Graph, source: number): Graph { + return pathTree(graph, source, -1) +} + +function pathTree(graph: Graph, source: number, weight: number): Graph { + const distances: Map = new Map() + // Initialize distances to all vertices as infinite, except the source + for (const [k, _] of graph) { + distances.set(k, {distance: Infinity, previous: null}) + } + distances.set(source, {distance: 0, previous: null}) + // Traverse all vertices in topologically sorted order + const sortedVertices = toposort(graph) + for (const u of sortedVertices) { + const deps = graph.get(u)! + if (deps && distances.get(u)!.distance !== Infinity) { + for (const v of deps) { + // Update distance if current is shorter + const newDistance = distances.get(u)!.distance + weight + if (newDistance < distances.get(v)!.distance) { + distances.set(v, {distance: newDistance, previous: u}) + } + } + } + } + + // Go through all .previous to create tree + let resGraph = new Map() + for (const [k, _] of distances) { + resGraph.set(k, []) + } + for (const [k, data] of distances) { + if (data.previous == null) { + continue + } + resGraph.get(data.previous)!.push(k) + } + return resGraph +} + +export function toposort(graph: Graph): number[] { + const inDegree: Map = new Map() + const zeroInDegreeQueue: number[] = [] + const topOrder: number[] = [] + // Initialize inDegree to 0 for all vertices + for (const node of graph.keys()) { + inDegree.set(node, 0) + } + // Calculate in-degree of each node + for (const edges of graph.values()) { + for (const edge of edges) { + inDegree.set(edge, (inDegree.get(edge) || 0) + 1) + } + } + // Enqueue all vertices with in-degree 0 + for (const [node, degree] of inDegree) { + if (degree === 0) { + zeroInDegreeQueue.push(node) + } + } + // Process the queue + while (zeroInDegreeQueue.length) { + const node = zeroInDegreeQueue.shift()! + topOrder.push(node) + // For each adjacent vertex, reduce the in-degree + // If in-degree becomes zero, add it to the queue + const nodeEdges = graph.get(node) || [] + for (const edge of nodeEdges) { + const updatedInDegree = (inDegree.get(edge) || 0) - 1 + inDegree.set(edge, updatedInDegree) + if (updatedInDegree === 0) { + zeroInDegreeQueue.push(edge) + } + } + } + // Check if there was a cycle in the graph + if (topOrder.length !== graph.size) { + throw new Error('There might be a cycle in the graph: ' + topOrder.length) + } + return topOrder +} diff --git a/app/buck2_explain/js/src/graph/testGraph.ts b/app/buck2_explain/js/src/graph/testGraph.ts new file mode 100644 index 0000000000000..2add6d7652122 --- /dev/null +++ b/app/buck2_explain/js/src/graph/testGraph.ts @@ -0,0 +1,85 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import {toposort, shortestPathTree, longestPathTree} from './graph' + +function assert(condition, message) { + if (!condition) { + throw new Error(message || 'Assertion failed') + } +} + +function arraysEqual(a, b) { + // Check if the arrays are the same length + if (a.length !== b.length) { + return false + } + // Check if all items exist and are in the same order + for (let i = 0; i < a.length; i++) { + if (Array.isArray(a[i]) && Array.isArray(b[i])) { + // Recursively check for nested arrays + if (!arraysEqual(a[i], b[i])) { + return false + } + } else if (a[i] !== b[i]) { + // Check if elements are different + return false + } + } + // Otherwise, return true + return true +} + +function testToposort() { + const graph = new Map() + + graph.set(0, [1, 2]) + graph.set(1, [2]) + graph.set(2, []) + + const res = toposort(graph) + assert(arraysEqual(res, [0, 1, 2]), 'assertion failed, value received: ' + res) + console.log(arguments.callee.name, 'passed') +} + +testToposort() + +function testShortestTree() { + const graph = new Map() + + graph.set(0, [1, 2]) + graph.set(1, [2]) + graph.set(2, []) + + const res = shortestPathTree(graph, 0) + + assert(arraysEqual(res.get(0), [1, 2]), 'assertion failed for node 0, value received: ' + res) + assert(arraysEqual(res.get(1), []), 'assertion failed for node 1, value received: ' + res) + assert(arraysEqual(res.get(2), []), 'assertion failed for node 2, value received: ' + res) + console.log(arguments.callee.name, 'passed') +} + +testShortestTree() + +function testLongestTree() { + const graph = new Map() + + graph.set(0, [1, 2]) + graph.set(1, [2]) + graph.set(2, []) + + const res = longestPathTree(graph, 0) + + assert(arraysEqual(res.get(0), [1]), 'assertion failed for node 0, value received: ' + res) + assert(arraysEqual(res.get(1), [2]), 'assertion failed for node 1, value received: ' + res) + assert(arraysEqual(res.get(2), []), 'assertion failed for node 2, value received: ' + res) + console.log(arguments.callee.name, 'passed') +} + +testLongestTree() diff --git a/app/buck2_explain/js/tsconfig.json b/app/buck2_explain/js/tsconfig.json new file mode 100644 index 0000000000000..d0d0102080d11 --- /dev/null +++ b/app/buck2_explain/js/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES6", + "lib": [ + "dom", + "dom.iterable", + "esnext" + ], + "allowJs": true, + "skipLibCheck": true, + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "module": "esnext", + "moduleResolution": "node", + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx" + }, + "include": [ + "./src/App.tsx" + ] +} \ No newline at end of file diff --git a/app/buck2_explain/js/yarn.lock b/app/buck2_explain/js/yarn.lock new file mode 100644 index 0000000000000..fb7568ac8f422 --- /dev/null +++ b/app/buck2_explain/js/yarn.lock @@ -0,0 +1,464 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@esbuild/aix-ppc64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz#a70f4ac11c6a1dfc18b8bbb13284155d933b9537" + integrity sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g== + +"@esbuild/android-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz#db1c9202a5bc92ea04c7b6840f1bbe09ebf9e6b9" + integrity sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg== + +"@esbuild/android-arm@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.20.2.tgz#3b488c49aee9d491c2c8f98a909b785870d6e995" + integrity sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w== + +"@esbuild/android-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.20.2.tgz#3b1628029e5576249d2b2d766696e50768449f98" + integrity sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg== + +"@esbuild/darwin-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz#6e8517a045ddd86ae30c6608c8475ebc0c4000bb" + integrity sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA== + +"@esbuild/darwin-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz#90ed098e1f9dd8a9381695b207e1cff45540a0d0" + integrity sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA== + +"@esbuild/freebsd-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz#d71502d1ee89a1130327e890364666c760a2a911" + integrity sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw== + +"@esbuild/freebsd-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz#aa5ea58d9c1dd9af688b8b6f63ef0d3d60cea53c" + integrity sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw== + +"@esbuild/linux-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz#055b63725df678379b0f6db9d0fa85463755b2e5" + integrity sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A== + +"@esbuild/linux-arm@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz#76b3b98cb1f87936fbc37f073efabad49dcd889c" + integrity sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg== + +"@esbuild/linux-ia32@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz#c0e5e787c285264e5dfc7a79f04b8b4eefdad7fa" + integrity sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig== + +"@esbuild/linux-loong64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz#a6184e62bd7cdc63e0c0448b83801001653219c5" + integrity sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ== + +"@esbuild/linux-mips64el@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz#d08e39ce86f45ef8fc88549d29c62b8acf5649aa" + integrity sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA== + +"@esbuild/linux-ppc64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz#8d252f0b7756ffd6d1cbde5ea67ff8fd20437f20" + integrity sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg== + +"@esbuild/linux-riscv64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz#19f6dcdb14409dae607f66ca1181dd4e9db81300" + integrity sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg== + +"@esbuild/linux-s390x@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz#3c830c90f1a5d7dd1473d5595ea4ebb920988685" + integrity sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ== + +"@esbuild/linux-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz#86eca35203afc0d9de0694c64ec0ab0a378f6fff" + integrity sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw== + +"@esbuild/netbsd-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz#e771c8eb0e0f6e1877ffd4220036b98aed5915e6" + integrity sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ== + +"@esbuild/openbsd-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz#9a795ae4b4e37e674f0f4d716f3e226dd7c39baf" + integrity sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ== + +"@esbuild/sunos-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz#7df23b61a497b8ac189def6e25a95673caedb03f" + integrity sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w== + +"@esbuild/win32-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz#f1ae5abf9ca052ae11c1bc806fb4c0f519bacf90" + integrity sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ== + +"@esbuild/win32-ia32@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz#241fe62c34d8e8461cd708277813e1d0ba55ce23" + integrity sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ== + +"@esbuild/win32-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz#9c907b21e30a52db959ba4f80bb01a0cc403d5cc" + integrity sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ== + +"@tweenjs/tween.js@18 - 23": + version "23.1.3" + resolved "https://registry.yarnpkg.com/@tweenjs/tween.js/-/tween.js-23.1.3.tgz#eff0245735c04a928bb19c026b58c2a56460539d" + integrity sha512-vJmvvwFxYuGnF2axRtPYocag6Clbb5YS7kLL+SO/TeVFzHqDIWrNKYtcsPMibjDx9O+bu+psAy9NKfWklassUA== + +"@types/prop-types@*": + version "15.7.12" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.12.tgz#12bb1e2be27293c1406acb6af1c3f3a1481d98c6" + integrity sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q== + +"@types/react-dom@^18.3.0": + version "18.3.0" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.3.0.tgz#0cbc818755d87066ab6ca74fbedb2547d74a82b0" + integrity sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg== + dependencies: + "@types/react" "*" + +"@types/react@*", "@types/react@^18.3.1": + version "18.3.1" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.1.tgz#fed43985caa834a2084d002e4771e15dfcbdbe8e" + integrity sha512-V0kuGBX3+prX+DQ/7r2qsv1NsdfnCLnTgnRJ1pYnxykBhGMz+qj+box5lq7XsO5mtZsBqpjwwTu/7wszPfMBcw== + dependencies: + "@types/prop-types" "*" + csstype "^3.0.2" + +accessor-fn@1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/accessor-fn/-/accessor-fn-1.5.1.tgz#7b2d063c16deba5040a46292824ed67d925cb8ea" + integrity sha512-zZpFYBqIL1Aqg+f2qmYHJ8+yIZF7/tP6PUGx2/QM0uGPSO5UegpinmkNwDohxWtOj586BpMPVRUjce2HI6xB3A== + +"bezier-js@3 - 6": + version "6.1.4" + resolved "https://registry.yarnpkg.com/bezier-js/-/bezier-js-6.1.4.tgz#c7828f6c8900562b69d5040afb881bcbdad82001" + integrity sha512-PA0FW9ZpcHbojUCMu28z9Vg/fNkwTj5YhusSAjHHDfHDGLxJ6YUKrAN2vk1fP2MMOxVw4Oko16FMlRGVBGqLKg== + +canvas-color-tracker@1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/canvas-color-tracker/-/canvas-color-tracker-1.2.2.tgz#fe1339a247b845d9eea57a6829b01faa244625ad" + integrity sha512-r+u/Ft2ka4Rj274Ts4L9bhYZLuMvbuJ/yL4seP0s+Pi+i9CM0caD+Sd//yseS5EVBJ2SKSmq36h2mNYUCdmTfA== + dependencies: + tinycolor2 "^1.6.0" + +csstype@^3.0.2: + version "3.1.3" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" + integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== + +"d3-array@1 - 3", "d3-array@2 - 3", "d3-array@2.10.0 - 3": + version "3.2.4" + resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.2.4.tgz#15fec33b237f97ac5d7c986dc77da273a8ed0bb5" + integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg== + dependencies: + internmap "1 - 2" + +d3-binarytree@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/d3-binarytree/-/d3-binarytree-1.0.2.tgz#ed43ebc13c70fbabfdd62df17480bc5a425753cc" + integrity sha512-cElUNH+sHu95L04m92pG73t2MEJXKu+GeKUN1TJkFsu93E5W8E9Sc3kHEGJKgenGvj19m6upSn2EunvMgMD2Yw== + +"d3-color@1 - 3": + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-color/-/d3-color-3.1.0.tgz#395b2833dfac71507f12ac2f7af23bf819de24e2" + integrity sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA== + +"d3-dispatch@1 - 3": + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-dispatch/-/d3-dispatch-3.0.1.tgz#5fc75284e9c2375c36c839411a0cf550cbfc4d5e" + integrity sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg== + +"d3-drag@2 - 3": + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-drag/-/d3-drag-3.0.0.tgz#994aae9cd23c719f53b5e10e3a0a6108c69607ba" + integrity sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg== + dependencies: + d3-dispatch "1 - 3" + d3-selection "3" + +"d3-ease@1 - 3": + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-ease/-/d3-ease-3.0.1.tgz#9658ac38a2140d59d346160f1f6c30fda0bd12f4" + integrity sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w== + +"d3-force-3d@2 - 3": + version "3.0.5" + resolved "https://registry.yarnpkg.com/d3-force-3d/-/d3-force-3d-3.0.5.tgz#9c8931b49acc3554f9110e128bc580cd3ab830f2" + integrity sha512-tdwhAhoTYZY/a6eo9nR7HP3xSW/C6XvJTbeRpR92nlPzH6OiE+4MliN9feuSFd0tPtEUo+191qOhCTWx3NYifg== + dependencies: + d3-binarytree "1" + d3-dispatch "1 - 3" + d3-octree "1" + d3-quadtree "1 - 3" + d3-timer "1 - 3" + +"d3-format@1 - 3": + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-format/-/d3-format-3.1.0.tgz#9260e23a28ea5cb109e93b21a06e24e2ebd55641" + integrity sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA== + +"d3-interpolate@1 - 3", "d3-interpolate@1.2.0 - 3": + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-interpolate/-/d3-interpolate-3.0.1.tgz#3c47aa5b32c5b3dfb56ef3fd4342078a632b400d" + integrity sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g== + dependencies: + d3-color "1 - 3" + +d3-octree@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/d3-octree/-/d3-octree-1.0.2.tgz#b39026b82701e45c7163e34ee056dc492035a017" + integrity sha512-Qxg4oirJrNXauiuC94uKMbgxwnhdda9xRLl9ihq45srlJ4Ga3CSgqGcAL8iW7N5CIv4Oz8x3E734ulxyvHPvwA== + +"d3-quadtree@1 - 3": + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-quadtree/-/d3-quadtree-3.0.1.tgz#6dca3e8be2b393c9a9d514dabbd80a92deef1a4f" + integrity sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw== + +"d3-scale-chromatic@1 - 3": + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz#34c39da298b23c20e02f1a4b239bd0f22e7f1314" + integrity sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ== + dependencies: + d3-color "1 - 3" + d3-interpolate "1 - 3" + +"d3-scale@1 - 4": + version "4.0.2" + resolved "https://registry.yarnpkg.com/d3-scale/-/d3-scale-4.0.2.tgz#82b38e8e8ff7080764f8dcec77bd4be393689396" + integrity sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ== + dependencies: + d3-array "2.10.0 - 3" + d3-format "1 - 3" + d3-interpolate "1.2.0 - 3" + d3-time "2.1.1 - 3" + d3-time-format "2 - 4" + +"d3-selection@2 - 3", d3-selection@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-selection/-/d3-selection-3.0.0.tgz#c25338207efa72cc5b9bd1458a1a41901f1e1b31" + integrity sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ== + +"d3-time-format@2 - 4": + version "4.1.0" + resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-4.1.0.tgz#7ab5257a5041d11ecb4fe70a5c7d16a195bb408a" + integrity sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg== + dependencies: + d3-time "1 - 3" + +"d3-time@1 - 3", "d3-time@2.1.1 - 3": + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-time/-/d3-time-3.1.0.tgz#9310db56e992e3c0175e1ef385e545e48a9bb5c7" + integrity sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q== + dependencies: + d3-array "2 - 3" + +"d3-timer@1 - 3": + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-timer/-/d3-timer-3.0.1.tgz#6284d2a2708285b1abb7e201eda4380af35e63b0" + integrity sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA== + +"d3-transition@2 - 3": + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-transition/-/d3-transition-3.0.1.tgz#6869fdde1448868077fdd5989200cb61b2a1645f" + integrity sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w== + dependencies: + d3-color "1 - 3" + d3-dispatch "1 - 3" + d3-ease "1 - 3" + d3-interpolate "1 - 3" + d3-timer "1 - 3" + +"d3-zoom@2 - 3": + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-zoom/-/d3-zoom-3.0.0.tgz#d13f4165c73217ffeaa54295cd6969b3e7aee8f3" + integrity sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw== + dependencies: + d3-dispatch "1 - 3" + d3-drag "2 - 3" + d3-interpolate "1 - 3" + d3-selection "2 - 3" + d3-transition "2 - 3" + +esbuild@0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.20.2.tgz#9d6b2386561766ee6b5a55196c6d766d28c87ea1" + integrity sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g== + optionalDependencies: + "@esbuild/aix-ppc64" "0.20.2" + "@esbuild/android-arm" "0.20.2" + "@esbuild/android-arm64" "0.20.2" + "@esbuild/android-x64" "0.20.2" + "@esbuild/darwin-arm64" "0.20.2" + "@esbuild/darwin-x64" "0.20.2" + "@esbuild/freebsd-arm64" "0.20.2" + "@esbuild/freebsd-x64" "0.20.2" + "@esbuild/linux-arm" "0.20.2" + "@esbuild/linux-arm64" "0.20.2" + "@esbuild/linux-ia32" "0.20.2" + "@esbuild/linux-loong64" "0.20.2" + "@esbuild/linux-mips64el" "0.20.2" + "@esbuild/linux-ppc64" "0.20.2" + "@esbuild/linux-riscv64" "0.20.2" + "@esbuild/linux-s390x" "0.20.2" + "@esbuild/linux-x64" "0.20.2" + "@esbuild/netbsd-x64" "0.20.2" + "@esbuild/openbsd-x64" "0.20.2" + "@esbuild/sunos-x64" "0.20.2" + "@esbuild/win32-arm64" "0.20.2" + "@esbuild/win32-ia32" "0.20.2" + "@esbuild/win32-x64" "0.20.2" + +flatbuffers@23.5.26: + version "23.5.26" + resolved "https://registry.yarnpkg.com/flatbuffers/-/flatbuffers-23.5.26.tgz#01358e272a61239f0faf3bfbe4e014f3ace9d746" + integrity sha512-vE+SI9vrJDwi1oETtTIFldC/o9GsVKRM+s6EL0nQgxXlYV1Vc4Tk30hj4xGICftInKQKj1F3up2n8UbIVobISQ== + +flexsearch-ts@^0.7.35: + version "0.7.35" + resolved "https://registry.yarnpkg.com/flexsearch-ts/-/flexsearch-ts-0.7.35.tgz#9475533ab893c5b71f3f79028a8bb4d07c36e7a0" + integrity sha512-u+9rKRd9lsoUXyesQcDJ/aFdfFAWInigK95DUnmifbeOLU5LvjGiwqq+Xr55rCUbJCOSG8KTY+BoWWqFx5gbwA== + +force-graph@1: + version "1.43.5" + resolved "https://registry.yarnpkg.com/force-graph/-/force-graph-1.43.5.tgz#f1b1c1c014a01d435c48b8618d4d54c194fcbd8f" + integrity sha512-HveLELh9yhZXO/QOfaFS38vlwJZ/3sKu+jarfXzRmbmihSOH/BbRWnUvmg8wLFiYy6h4HlH4lkRfZRccHYmXgA== + dependencies: + "@tweenjs/tween.js" "18 - 23" + accessor-fn "1" + bezier-js "3 - 6" + canvas-color-tracker "1" + d3-array "1 - 3" + d3-drag "2 - 3" + d3-force-3d "2 - 3" + d3-scale "1 - 4" + d3-scale-chromatic "1 - 3" + d3-selection "2 - 3" + d3-zoom "2 - 3" + index-array-by "1" + kapsule "^1.14" + lodash-es "4" + +fromentries@^1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/fromentries/-/fromentries-1.3.2.tgz#e4bca6808816bf8f93b52750f1127f5a6fd86e3a" + integrity sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg== + +index-array-by@1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/index-array-by/-/index-array-by-1.4.2.tgz#d6f82e9fbff3201c4dab64ba415d4d2923242fea" + integrity sha512-SP23P27OUKzXWEC/TOyWlwLviofQkCSCKONnc62eItjp69yCZZPqDQtr3Pw5gJDnPeUMqExmKydNZaJO0FU9pw== + +"internmap@1 - 2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" + integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== + +jerrypick@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/jerrypick/-/jerrypick-1.1.1.tgz#db0b15841a53cfe492de2db9544eecf8de73203c" + integrity sha512-XTtedPYEyVp4t6hJrXuRKr/jHj8SC4z+4K0b396PMkov6muL+i8IIamJIvZWe3jUspgIJak0P+BaWKawMYNBLg== + +"js-tokens@^3.0.0 || ^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +kapsule@^1.14: + version "1.14.5" + resolved "https://registry.yarnpkg.com/kapsule/-/kapsule-1.14.5.tgz#c0bc7c1d4c693ee2647182e5b4ffbf95a4d65f72" + integrity sha512-H0iSpTynUzZw3tgraDmReprpFRmH5oP5GPmaNsurSwLx2H5iCpOMIkp5q+sfhB4Tz/UJd1E1IbEE9Z6ksnJ6RA== + dependencies: + lodash-es "4" + +lodash-es@4: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.21.tgz#43e626c46e6591b7750beb2b50117390c609e3ee" + integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== + +loose-envify@^1.1.0, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + +prop-types@15: + version "15.8.1" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.13.1" + +react-dom@^18.2.0: + version "18.2.0" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-18.2.0.tgz#22aaf38708db2674ed9ada224ca4aa708d821e3d" + integrity sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g== + dependencies: + loose-envify "^1.1.0" + scheduler "^0.23.0" + +react-force-graph-2d@^1.25.5: + version "1.25.5" + resolved "https://registry.yarnpkg.com/react-force-graph-2d/-/react-force-graph-2d-1.25.5.tgz#d51bb009f5a3723cd803585d13e3700579042563" + integrity sha512-3u8WjZZorpwZSDs3n3QeOS9ZoxFPM+IR9SStYJVQ/qKECydMHarxnf7ynV/MKJbC6kUsc60soD0V+Uq/r2vz7Q== + dependencies: + force-graph "1" + prop-types "15" + react-kapsule "2" + +react-is@^16.13.1: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-kapsule@2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/react-kapsule/-/react-kapsule-2.4.0.tgz#50d296ed2872a6db89f2de176eebb1a4ce2cccb8" + integrity sha512-w4Yv9CgWdj8kWGQEPNWFGJJ08dYEZHZpiaFR/DgZjCMBNqv9wus2Gy1qvHVJmJbzvAZbq6jdvFC+NYzEqAlNhQ== + dependencies: + fromentries "^1.3.2" + jerrypick "^1.1.1" + +react@^18.2.0: + version "18.2.0" + resolved "https://registry.yarnpkg.com/react/-/react-18.2.0.tgz#555bd98592883255fa00de14f1151a917b5d77d5" + integrity sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ== + dependencies: + loose-envify "^1.1.0" + +scheduler@^0.23.0: + version "0.23.0" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.23.0.tgz#ba8041afc3d30eb206a487b6b384002e4e61fdfe" + integrity sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw== + dependencies: + loose-envify "^1.1.0" + +tinycolor2@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/tinycolor2/-/tinycolor2-1.6.0.tgz#f98007460169b0263b97072c5ae92484ce02d09e" + integrity sha512-XPaBkWQJdsf3pLKJV9p4qN/S+fm2Oj8AIPo1BTUhg5oxkvm9+SVEGFdhyOz7tTdUTfvxMiAs4sp6/eZO2Ew+pw== diff --git a/app/buck2_explain/src/flatbuffers.rs b/app/buck2_explain/src/flatbuffers.rs new file mode 100644 index 0000000000000..c9882dd912922 --- /dev/null +++ b/app/buck2_explain/src/flatbuffers.rs @@ -0,0 +1,1102 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_node::attrs::configured_attr::ConfiguredAttr; +use buck2_node::attrs::display::AttrDisplayWithContextExt; +use buck2_node::attrs::inspect_options::AttrInspectOptions; +use buck2_node::attrs::internal::NAME_ATTRIBUTE_FIELD; +use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::visibility::VisibilityPattern; +use buck2_node::visibility::VisibilityPatternList; +use buck2_query::query::environment::QueryTarget; +use flatbuffers::FlatBufferBuilder; +use flatbuffers::WIPOffset; +use gazebo::prelude::SliceExt; + +mod fbs { + pub use crate::explain_generated::explain::Build; + pub use crate::explain_generated::explain::BuildArgs; + pub use crate::explain_generated::explain::CodePointer; + pub use crate::explain_generated::explain::CodePointerArgs; + pub use crate::explain_generated::explain::ConfiguredTargetLabel; + pub use crate::explain_generated::explain::ConfiguredTargetLabelArgs; + pub use crate::explain_generated::explain::ConfiguredTargetNode; + pub use crate::explain_generated::explain::ConfiguredTargetNodeArgs; + pub use crate::explain_generated::explain::TargetField; + pub use crate::explain_generated::explain::TargetFieldArgs; + pub use crate::explain_generated::explain::TargetValue; + pub use crate::explain_generated::explain::TargetValueArgs; + pub use crate::explain_generated::explain::TargetValueType; +} + +enum AttrField<'a> { + Bool(&'a str, bool), + Int(&'a str, i64), + String(&'a str, String), + StringList(&'a str, Vec), + StringDict(&'a str, Vec<(String, String)>), +} + +pub(crate) fn gen_fbs( + data: Vec, +) -> anyhow::Result> { + let mut builder = FlatBufferBuilder::new(); + + let targets: Result, _> = data + .iter() + .map(|node| target_to_fbs(&mut builder, node)) + .collect(); + + let targets = builder.create_vector(&targets?); + let build = fbs::Build::create( + &mut builder, + &fbs::BuildArgs { + targets: Some(targets), + }, + ); + builder.finish(build, None); + Ok(builder) +} + +fn target_to_fbs<'a>( + builder: &'_ mut FlatBufferBuilder<'static>, + node: &'_ ConfiguredTargetNode, +) -> anyhow::Result>, anyhow::Error> { + // special attrs + let name = builder.create_shared_string(&node.name()); + let target_label = get_target_label(builder, node); + + let oncall = node.oncall().map(|v| builder.create_shared_string(v)); + let type_ = builder.create_shared_string(node.rule_type().name()); + let package = builder.create_shared_string(&node.buildfile_path().to_string()); + let target_configuration = + builder.create_shared_string(&node.target_configuration().to_string()); + let execution_platform = builder.create_shared_string(&node.execution_platform()?.id()); + let deps = { + let res = &node + .deps() + .map(|d| get_target_label(builder, d)) + .collect::>>(); + builder.create_vector(res) + }; + + let plugins = list_of_strings_to_fbs( + builder, + node.plugin_lists() + .iter() + .map(|(kind, _, _)| kind.to_string()) + .collect(), + ); + + let code_pointer = node + .root_location() + .map(|l| fbs::CodePointerArgs { + file_path: Some(builder.create_shared_string(&l.file)), + line: l.line as i32, + }) + .as_ref() + .map(|r| fbs::CodePointer::create(builder, r)); + + let srcs = node + .get("srcs", AttrInspectOptions::DefinedOnly) + .map(|v| match categorize(v.value, v.name) { + AttrField::StringList(_, v) => v.len(), + AttrField::StringDict(_, v) => v.len(), + _ => 0, + }) + .unwrap_or(0) as i64; + + // defined attrs + let attrs = node + .attrs(AttrInspectOptions::DefinedOnly) + .filter(|a| a.name != NAME_ATTRIBUTE_FIELD && a.name != "srcs") + .map(|a| categorize(a.value, a.name)); + + let list: Vec<_> = attrs + .map(|attr| { + let (name, value) = match attr { + AttrField::Bool(n, value) => { + let name = builder.create_shared_string(n); + ( + name, + fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + type_: fbs::TargetValueType::Bool, + bool_value: Some(value), + ..Default::default() + }, + ), + ) + } + AttrField::Int(n, v) => { + let name = builder.create_shared_string(n); + ( + name, + fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + type_: fbs::TargetValueType::Int, + int_value: Some(v), + ..Default::default() + }, + ), + ) + } + AttrField::String(n, v) => { + let name = builder.create_shared_string(n); + let value = Some(builder.create_shared_string(&v)); + ( + name, + fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + type_: fbs::TargetValueType::String, + string_value: value, + ..Default::default() + }, + ), + ) + } + AttrField::StringList(n, v) => { + let name = builder.create_shared_string(n); + let value = list_of_strings_to_target_value(builder, v.to_vec()); + ( + name, + fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + type_: fbs::TargetValueType::List, + list_value: value, + ..Default::default() + }, + ), + ) + } + AttrField::StringDict(n, v) => { + let name = builder.create_shared_string(n); + let value = dict_of_strings_to_target_field(builder, v.to_vec()); + ( + name, + fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + type_: fbs::TargetValueType::Dict, + dict_value: value, + ..Default::default() + }, + ), + ) + } + }; + fbs::TargetField::create( + builder, + &fbs::TargetFieldArgs { + name: Some(name), + value: Some(value), + }, + ) + }) + .collect(); + + let all_attrs = Some(builder.create_vector(&list)); + + let target = fbs::ConfiguredTargetNode::create( + builder, + &fbs::ConfiguredTargetNodeArgs { + name: Some(name), + // special attrs + label: Some(target_label), + type_: Some(type_), + deps: Some(deps), + package: Some(package), + oncall, + target_configuration: Some(target_configuration), + execution_platform: Some(execution_platform), + plugins, + // defined attrs + attrs: all_attrs, + srcs, + code_pointer, + }, + ); + Ok(target) +} + +fn get_target_label<'a>( + builder: &mut FlatBufferBuilder<'static>, + node: &ConfiguredTargetNode, +) -> WIPOffset> { + let label = &node.label(); + let target_label = builder.create_shared_string(&label.unconfigured().to_string()); + let cfg = builder.create_shared_string(&label.cfg().to_string()); + let exec_cfg = label + .exec_cfg() + .as_ref() + .map(|c| builder.create_shared_string(&c.to_string())); + fbs::ConfiguredTargetLabel::create( + builder, + &fbs::ConfiguredTargetLabelArgs { + target_label: Some(target_label), + cfg: Some(cfg), + exec_cfg, + }, + ) +} + +fn categorize<'a>(a: ConfiguredAttr, name: &'a str) -> AttrField<'a> { + match a { + ConfiguredAttr::Bool(v) => AttrField::Bool(name, v.0), + ConfiguredAttr::String(v) => AttrField::String(name, v.0.to_string()), + ConfiguredAttr::List(v) => { + let mut list = vec![]; + v.0.iter().for_each(|v| { + match v { + ConfiguredAttr::String(v) => list.push(v.0.to_string()), + _ => list.push( + v.as_display_no_ctx() + .to_string() + .trim_matches('"') + .to_owned(), + ), // TODO iguridi: make a "printer_for_explain" for attrs + } + }); + AttrField::StringList(name, list) + } + ConfiguredAttr::None => AttrField::String(name, "null".to_owned()), + ConfiguredAttr::Visibility(v) => { + let list = match v.0 { + VisibilityPatternList::Public => vec![VisibilityPattern::PUBLIC.to_owned()], + VisibilityPatternList::List(patterns) => patterns.map(|p| p.to_string()), + }; + AttrField::StringList(name, list) + } + ConfiguredAttr::Int(v) => AttrField::Int(name, v), + ConfiguredAttr::EnumVariant(v) => AttrField::String(name, v.0.to_string()), + ConfiguredAttr::Tuple(v) => { + let mut list = vec![]; + v.0.iter().for_each(|v| { + match v { + ConfiguredAttr::String(v) => list.push(v.0.to_string()), + _ => list.push( + v.as_display_no_ctx() + .to_string() + .trim_matches('"') + .to_owned(), + ), // TODO iguridi: make a "printer_for_explain" for attrs + } + }); + AttrField::StringList(name, list) + } + ConfiguredAttr::Dict(v) => { + let string_pairs: Vec<_> = + v.0.iter() + .map(|(k, v)| match (k, v) { + (ConfiguredAttr::String(k), ConfiguredAttr::String(v)) => { + (k.0.to_string(), v.0.to_string()) + } + _ => ( + k.as_display_no_ctx() + .to_string() + .trim_matches('"') + .to_owned(), + v.as_display_no_ctx() + .to_string() + .trim_matches('"') + .to_owned(), + ), // TODO iguridi: make a "printer_for_explain" for attrs + }) + .collect(); + AttrField::StringDict(name, string_pairs) + } + ConfiguredAttr::OneOf(v, _) => categorize(*v, name), + ConfiguredAttr::WithinView(v) => { + let list = match v.0 { + VisibilityPatternList::Public => vec![VisibilityPattern::PUBLIC.to_owned()], + VisibilityPatternList::List(patterns) => patterns.map(|p| p.to_string()), + }; + AttrField::StringList(name, list) + } + ConfiguredAttr::ExplicitConfiguredDep(v) => AttrField::String(name, v.to_string()), // TODO iguridi: structure this + ConfiguredAttr::SplitTransitionDep(v) => AttrField::String(name, v.to_string()), // TODO iguridi: structure this + ConfiguredAttr::ConfigurationDep(v) => AttrField::String(name, v.to_string()), + ConfiguredAttr::PluginDep(v, _) => AttrField::String(name, v.to_string()), + ConfiguredAttr::Dep(v) => { + // TODO iguridi: make fbs type for labels + AttrField::String(name, v.to_string()) + } + ConfiguredAttr::SourceLabel(v) => AttrField::String(name, v.to_string()), + ConfiguredAttr::Label(v) => AttrField::String(name, v.to_string()), + ConfiguredAttr::Arg(v) => AttrField::String(name, v.to_string()), + ConfiguredAttr::Query(v) => AttrField::String(name, v.query.query), + ConfiguredAttr::SourceFile(v) => AttrField::String(name, v.path().to_string()), + ConfiguredAttr::Metadata(v) => AttrField::String(name, v.to_string()), + ConfiguredAttr::TargetModifiers(v) => AttrField::String(name, v.to_string()), + } +} + +fn list_of_strings_to_fbs<'a>( + builder: &'_ mut FlatBufferBuilder<'static>, + list: Vec, +) -> Option>>> { + let list = list + .into_iter() + .map(|v| builder.create_shared_string(&v)) + .collect::>>(); + Some(builder.create_vector(&list)) +} + +fn list_of_strings_to_target_value<'a>( + builder: &'_ mut FlatBufferBuilder<'static>, + list: Vec, +) -> Option< + WIPOffset>>>, +> { + let list = list + .into_iter() + .map(|v| { + let value = Some(builder.create_shared_string(&v)); + fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + type_: fbs::TargetValueType::String, + string_value: value, + ..Default::default() + }, + ) + }) + .collect::>>>(); + Some(builder.create_vector(&list)) +} + +fn dict_of_strings_to_target_field<'a>( + builder: &'_ mut FlatBufferBuilder<'static>, + dict: Vec<(String, String)>, +) -> Option< + WIPOffset>>>, +> { + let list = dict + .into_iter() + .map(|(k, v)| { + let key = Some(builder.create_shared_string(&k)); + let key = Some(fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + type_: fbs::TargetValueType::String, + string_value: key, + ..Default::default() + }, + )); + let value = Some(builder.create_shared_string(&v)); + fbs::TargetValue::create( + builder, + &fbs::TargetValueArgs { + key, + type_: fbs::TargetValueType::String, + string_value: value, + ..Default::default() + }, + ) + }) + .collect::>>>(); + Some(builder.create_vector(&list)) +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use buck2_core::cells::cell_path::CellPath; + use buck2_core::configuration::data::ConfigurationData; + use buck2_core::execution_types::execution::ExecutionPlatform; + use buck2_core::execution_types::execution::ExecutionPlatformResolution; + use buck2_core::execution_types::executor_config::CommandExecutorConfig; + use buck2_core::package::package_relative_path::PackageRelativePath; + use buck2_core::package::PackageLabel; + use buck2_core::plugins::PluginKind; + use buck2_core::plugins::PluginKindSet; + use buck2_core::provider::label::ProvidersLabel; + use buck2_core::provider::label::ProvidersName; + use buck2_core::target::label::label::TargetLabel; + use buck2_core::target::name::TargetName; + use buck2_interpreter_for_build::call_stack::StarlarkCallStackWrapper; + use buck2_node::attrs::attr::Attribute; + use buck2_node::attrs::attr_type::arg::StringWithMacros; + use buck2_node::attrs::attr_type::bool::BoolLiteral; + use buck2_node::attrs::attr_type::dep::DepAttr; + use buck2_node::attrs::attr_type::dep::DepAttrTransition; + use buck2_node::attrs::attr_type::dep::DepAttrType; + use buck2_node::attrs::attr_type::dict::DictLiteral; + use buck2_node::attrs::attr_type::list::ListLiteral; + use buck2_node::attrs::attr_type::query::QueryAttr; + use buck2_node::attrs::attr_type::query::QueryAttrBase; + use buck2_node::attrs::attr_type::query::ResolvedQueryLiterals; + use buck2_node::attrs::attr_type::string::StringLiteral; + use buck2_node::attrs::attr_type::tuple::TupleLiteral; + use buck2_node::attrs::attr_type::AttrType; + use buck2_node::attrs::coerced_attr::CoercedAttr; + use buck2_node::attrs::coerced_path::CoercedPath; + use buck2_node::attrs::internal::METADATA_ATTRIBUTE_FIELD; + use buck2_node::attrs::internal::VISIBILITY_ATTRIBUTE_FIELD; + use buck2_node::attrs::internal::WITHIN_VIEW_ATTRIBUTE_FIELD; + use buck2_node::call_stack::StarlarkCallStack; + use buck2_node::metadata::key::MetadataKey; + use buck2_node::metadata::map::MetadataMap; + use buck2_node::metadata::value::MetadataValue; + use buck2_node::provider_id_set::ProviderIdSet; + use buck2_node::visibility::VisibilitySpecification; + use buck2_node::visibility::WithinViewSpecification; + use buck2_util::arc_str::ArcSlice; + use dupe::Dupe; + use starlark::codemap::FileSpan; + use starlark::errors::Frame; + use starlark::eval::CallStack; + use starlark_map::small_map::SmallMap; + + use super::*; + pub use crate::explain_generated::explain::Build; + + #[test] + fn test_bool_attr() { + let data = gen_data( + vec![( + "bool_field", + Attribute::new(None, "", AttrType::bool()), + CoercedAttr::Bool(BoolLiteral(false)), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("bool_field")); + } + + #[test] + fn test_int_attr() { + let data = gen_data( + vec![( + "int_field", + Attribute::new(None, "", AttrType::int()), + CoercedAttr::Int(1), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("int_field")); + assert_eq!(target.attrs().unwrap().get(0).value().int_value(), Some(1)); + } + + #[test] + fn test_string_attr() { + let data = gen_data( + vec![( + "bar", + Attribute::new(None, "", AttrType::string()), + CoercedAttr::String(StringLiteral("foo".into())), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("bar")); + assert_eq!( + target.attrs().unwrap().get(0).value().string_value(), + Some("foo") + ); + } + + #[test] + fn test_enum_attr() -> anyhow::Result<()> { + let data = gen_data( + vec![( + "enum_field", + Attribute::new(None, "", AttrType::enumeration(vec!["field".to_owned()])?), + CoercedAttr::EnumVariant(StringLiteral("some_string".into())), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("enum_field")); + assert_eq!( + target.attrs().unwrap().get(0).value().string_value(), + Some("some_string") + ); + Ok(()) + } + + #[test] + fn test_arg_attr() { + let data = gen_data( + vec![( + "bar", + Attribute::new(None, "", AttrType::arg(false)), + CoercedAttr::Arg(StringWithMacros::StringPart( + "$(location :relative_path_test_file)".into(), + )), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("bar")); + assert_eq!( + target.attrs().unwrap().get(0).value().string_value(), + Some("$(location :relative_path_test_file)") + ); + } + + #[test] + fn test_source_path_attr() { + let data = gen_data( + vec![( + "bar", + Attribute::new(None, "", AttrType::source(false)), + CoercedAttr::SourceFile(CoercedPath::File( + PackageRelativePath::new("foo/bar").unwrap().to_arc(), + )), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("bar")); + assert_eq!( + target.attrs().unwrap().get(0).value().string_value(), + Some("foo/bar") + ); + } + + #[test] + fn test_srcs_count() { + let data = gen_data( + vec![( + "srcs", + Attribute::new(None, "", AttrType::list(AttrType::source(false))), + CoercedAttr::List(ListLiteral(ArcSlice::new([ + CoercedAttr::SourceFile(CoercedPath::File( + PackageRelativePath::new("foo/bar").unwrap().to_arc(), + )), + CoercedAttr::SourceFile(CoercedPath::File( + PackageRelativePath::new("foo/bar2").unwrap().to_arc(), + )), + ]))), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.srcs(), 2); + } + + #[test] + fn test_query_attr() { + let pkg = PackageLabel::testing_parse("cell//foo/bar"); + let name = TargetName::testing_new("t2"); + let label = TargetLabel::new(pkg, name.as_ref()); + let mut map: BTreeMap = BTreeMap::new(); + map.insert("key1".to_owned(), ProvidersLabel::default_for(label)); + + let data = gen_data( + vec![( + "bar", + Attribute::new(None, "", AttrType::query()), + CoercedAttr::Query(Box::new(QueryAttr { + query: QueryAttrBase { + query: "$(query_targets deps(:foo))".to_owned(), + resolved_literals: ResolvedQueryLiterals(map), + }, + providers: ProviderIdSet::EMPTY, + })), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("bar")); + assert_eq!( + target.attrs().unwrap().get(0).value().string_value(), + Some("$(query_targets deps(:foo))") + ); + } + + #[test] + fn test_plugin_dep() { + let pkg = PackageLabel::testing_parse("cell//foo/bar"); + let name = TargetName::testing_new("t2"); + let label = TargetLabel::new(pkg, name.as_ref()); + let data = gen_data( + vec![( + "plugin_dep_field", + Attribute::new( + None, + "", + AttrType::plugin_dep(PluginKind::new( + "foo".to_owned(), + CellPath::testing_new("cell//foo/bar"), + )), + ), + CoercedAttr::PluginDep(label), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!( + target.attrs().unwrap().get(0).name(), + Some("plugin_dep_field") + ); + assert_eq!( + target.attrs().unwrap().get(0).value().string_value(), + Some("cell//foo/bar:t2") + ); + } + + fn check_label( + f: impl Fn(TargetLabel) -> (&'static str, Attribute, CoercedAttr), + ) -> Result<(), anyhow::Error> { + let pkg = PackageLabel::testing_parse("cell//foo/bar"); + let name = TargetName::testing_new("t2"); + let label = TargetLabel::new(pkg, name.as_ref()); + let tuple = f(label); + let data = gen_data(vec![tuple], vec![]); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert!( + target + .attrs() + .unwrap() + .get(0) + .value() + .string_value() + .unwrap() + .contains("cell//foo/bar:t2 (#") + ); + Ok(()) + } + + #[test] + fn test_deps_attr() -> anyhow::Result<()> { + let f = |label| { + ( + "label_field", + Attribute::new( + None, + "", + AttrType::dep(ProviderIdSet::EMPTY, PluginKindSet::EMPTY), + ), + CoercedAttr::Dep(ProvidersLabel::default_for(label)), + ) + }; + check_label(f) + } + + #[test] + fn test_label_attr() -> anyhow::Result<()> { + let f = |label| { + ( + "label_field", + Attribute::new(None, "", AttrType::label()), + CoercedAttr::Label(ProvidersLabel::default_for(label)), + ) + }; + check_label(f) + } + + #[test] + fn test_source_label_attr() -> anyhow::Result<()> { + let f = |label| { + ( + "label_field", + Attribute::new(None, "", AttrType::source(false)), + CoercedAttr::SourceLabel(ProvidersLabel::default_for(label)), + ) + }; + check_label(f) + } + + #[test] + fn test_configured_dep_attr() -> anyhow::Result<()> { + let f = |label: TargetLabel| { + ( + "label_field", + Attribute::new(None, "", AttrType::label()), + CoercedAttr::ConfiguredDep(Box::new(DepAttr { + attr_type: DepAttrType::new( + ProviderIdSet::EMPTY, + DepAttrTransition::Identity(PluginKindSet::EMPTY), + ), + label: ProvidersLabel::default_for(label) + .configure(ConfigurationData::testing_new()), + })), + ) + }; + check_label(f) + } + + #[test] + fn test_tuple_attr() { + let data = gen_data( + vec![( + "some_tuple", + Attribute::new( + None, + "", + AttrType::tuple(vec![AttrType::string(), AttrType::string()]), + ), + CoercedAttr::Tuple(TupleLiteral(ArcSlice::new([ + CoercedAttr::String(StringLiteral("some_string1".into())), + CoercedAttr::String(StringLiteral("some_string2".into())), + ]))), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!( + target + .attrs() + .unwrap() + .get(0) + .value() + .list_value() + .unwrap() + .get(0) + .string_value(), + Some("some_string1") + ); + } + + #[test] + fn test_list_of_strings() { + let pkg = PackageLabel::testing_parse("cell//foo/bar"); + let name = TargetName::testing_new("t2"); + let label = TargetLabel::new(pkg, name.as_ref()); + let data = gen_data( + vec![( + "some_deps", + Attribute::new( + None, + "", + AttrType::list(AttrType::dep(ProviderIdSet::EMPTY, PluginKindSet::EMPTY)), + ), + CoercedAttr::List(ListLiteral(ArcSlice::new([CoercedAttr::Dep( + ProvidersLabel::new(label, ProvidersName::Default), + )]))), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("some_deps")); + } + + #[test] + fn test_visibility() { + let data = gen_data( + vec![], + vec![( + VISIBILITY_ATTRIBUTE_FIELD, + Attribute::new(None, "", AttrType::visibility()), + CoercedAttr::Visibility(VisibilitySpecification(VisibilityPatternList::Public)), + )], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!( + target.attrs().unwrap().get(0).name(), + Some(VISIBILITY_ATTRIBUTE_FIELD) + ); + assert_eq!( + target + .attrs() + .unwrap() + .get(0) + .value() + .list_value() + .unwrap() + .get(0) + .string_value(), + Some("PUBLIC") + ); + } + + #[test] + fn test_one_of_attr() { + let data = gen_data( + vec![( + "one_of_field", + Attribute::new(None, "", AttrType::one_of(vec![AttrType::int()])), + CoercedAttr::OneOf(Box::new(CoercedAttr::Int(7)), 0), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("one_of_field")); + assert_eq!(target.attrs().unwrap().get(0).value().int_value(), Some(7)); + } + + #[test] + fn test_within_view() { + let data = gen_data( + vec![], + vec![( + WITHIN_VIEW_ATTRIBUTE_FIELD, + Attribute::new(None, "", AttrType::within_view()), + CoercedAttr::WithinView(WithinViewSpecification(VisibilityPatternList::Public)), + )], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!( + target.attrs().unwrap().get(0).name(), + Some(WITHIN_VIEW_ATTRIBUTE_FIELD) + ); + assert_eq!( + target + .attrs() + .unwrap() + .get(0) + .value() + .list_value() + .unwrap() + .get(0) + .string_value(), + Some("PUBLIC") + ); + } + + #[test] + fn test_dict_of_strings() { + let data = gen_data( + vec![( + "dict_field", + Attribute::new( + None, + "", + AttrType::dict(AttrType::string(), AttrType::string(), false), + ), + CoercedAttr::Dict(DictLiteral(ArcSlice::new([( + CoercedAttr::String(StringLiteral("foo".into())), + CoercedAttr::String(StringLiteral("bar".into())), + )]))), + )], + vec![], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!(target.attrs().unwrap().get(0).name(), Some("dict_field")); + assert_eq!( + target + .attrs() + .unwrap() + .get(0) + .value() + .dict_value() + .unwrap() + .get(0) + .string_value(), + Some("bar") + ); + } + + #[test] + fn test_metadata_attr() -> anyhow::Result<()> { + let mut map = SmallMap::new(); + map.insert( + MetadataKey::try_from("key.something".to_owned())?, + MetadataValue::new(serde_json::Value::String("foo".to_owned())), + ); + let data = gen_data( + vec![], + vec![( + METADATA_ATTRIBUTE_FIELD, + Attribute::new(None, "", AttrType::metadata()), + CoercedAttr::Metadata(MetadataMap::new(map)), + )], + ); + + let fbs = gen_fbs(data).unwrap(); + let fbs = fbs.finished_data(); + let build = flatbuffers::root::(fbs).unwrap(); + let target = build.targets().unwrap().get(0); + + assert_things(target, build); + assert_eq!( + target.attrs().unwrap().get(0).value().string_value(), + Some("{\"key.something\":\"foo\"}") + ); + Ok(()) + } + + fn assert_things(target: fbs::ConfiguredTargetNode<'_>, build: fbs::Build<'_>) { + // special attrs + let label = target.label().unwrap(); + assert!(label.cfg().unwrap().contains("#")); + assert_eq!(label.target_label().unwrap(), "cell//pkg:foo"); + assert_eq!(target.name(), Some("foo")); + assert_eq!(target.type_(), Some("foo_lib")); + assert_eq!(target.package(), Some("cell//pkg:BUCK")); + assert_eq!(target.oncall(), None); + assert_eq!(target.execution_platform(), Some("cell//pkg:bar")); + assert_eq!( + target.code_pointer().unwrap().file_path(), + Some("cell/pkg/BUCK") + ); + assert_eq!(target.code_pointer().unwrap().line(), 0); + assert_eq!(target.deps().unwrap().is_empty(), true); + assert_eq!(target.plugins().unwrap().is_empty(), true); + + let target2 = build.targets().unwrap().get(1); + assert_eq!( + target2.label().unwrap().target_label(), + Some("cell//pkg:baz"), + ); + } + + fn gen_data( + attrs: Vec<( + &str, + buck2_node::attrs::attr::Attribute, + buck2_node::attrs::coerced_attr::CoercedAttr, + )>, + internal_attrs: Vec<( + &str, + buck2_node::attrs::attr::Attribute, + buck2_node::attrs::coerced_attr::CoercedAttr, + )>, + ) -> Vec { + // Setup data + let target_label = TargetLabel::testing_parse("cell//pkg:foo"); + let configured_target_label = target_label.configure(ConfigurationData::testing_new()); + + let execution_platform_resolution = { + let platform_label = TargetLabel::testing_parse("cell//pkg:bar"); + let platform = ExecutionPlatform::platform( + platform_label, + ConfigurationData::testing_new(), + CommandExecutorConfig::testing_local(), + ); + ExecutionPlatformResolution::new(Some(platform), Vec::new()) + }; + + let target = ConfiguredTargetNode::testing_new( + configured_target_label, + "foo_lib", + execution_platform_resolution.dupe(), + attrs, + internal_attrs, + Some(StarlarkCallStack::new(StarlarkCallStackWrapper( + CallStack { + frames: vec![Frame { + name: "foo".to_owned(), + location: Some(FileSpan::new( + "cell/pkg/BUCK".to_owned(), + "source".to_owned(), + )), + }], + }, + ))), + ); + + let target_label2 = TargetLabel::testing_parse("cell//pkg:baz"); + let configured_target_label2 = target_label2.configure(ConfigurationData::testing_new()); + let target2 = ConfiguredTargetNode::testing_new( + configured_target_label2, + "foo_lib", + execution_platform_resolution, + vec![], + vec![], + None, + ); + vec![target, target2] + } +} diff --git a/app/buck2_explain/src/lib.rs b/app/buck2_explain/src/lib.rs new file mode 100644 index 0000000000000..a486b40681af4 --- /dev/null +++ b/app/buck2_explain/src/lib.rs @@ -0,0 +1,70 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fs; +use std::io::Cursor; + +use base64::engine::general_purpose::STANDARD; +use base64::Engine; +use buck2_core::fs::paths::abs_path::AbsPathBuf; + +#[allow(unused_imports)] +#[allow(unused_extern_crates)] +#[allow(clippy::extra_unused_lifetimes)] +mod explain_generated; +mod flatbuffers; +use buck2_common::manifold::Bucket; +use buck2_common::manifold::ManifoldClient; +use buck2_node::nodes::configured::ConfiguredTargetNode; + +const HTML_PLACEHOLDER: &str = "XXDATAXX"; + +pub async fn main( + data: Vec, + output: Option<&AbsPathBuf>, + fbs_dump: Option<&AbsPathBuf>, + manifold_path: Option<&str>, +) -> anyhow::Result<()> { + let fbs = flatbuffers::gen_fbs(data)?; + + let fbs = fbs.finished_data(); + let base64 = STANDARD.encode(&fbs); + + // For dev purposes, dump the base64 encoded flatbuffer to a file + if let Some(fbs_dump) = fbs_dump { + fs::write(fbs_dump, &base64)?; + } + + let html_out = { + let html_in = include_str!("explain.html"); + if !html_in.contains(HTML_PLACEHOLDER) { + return Err(anyhow::anyhow!("HTML template is not valid")); + } + + html_in.replace(HTML_PLACEHOLDER, &base64) + }; + + let mut cursor = &mut Cursor::new(html_out.as_bytes()); + + if let Some(o) = output { + fs::write(o, &html_out)? + }; + + if let Some(p) = manifold_path { + // TODO iguridi: compress before upload + // TODO iguridi: write and upload concurrently + let manifold = ManifoldClient::new().await?; + + manifold + .read_and_upload(Bucket::EVENT_LOGS, &p, Default::default(), &mut cursor) + .await?; + } + + Ok(()) +} diff --git a/app/buck2_external_cells/BUCK b/app/buck2_external_cells/BUCK new file mode 100644 index 0000000000000..8e4d3685e7207 --- /dev/null +++ b/app/buck2_external_cells/BUCK @@ -0,0 +1,29 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_external_cells", + srcs = glob(["src/**/*.rs"]), + test_deps = [ + "fbsource//third-party/rust:tokio", + ], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:tokio", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_build_api:buck2_build_api", + "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_directory:buck2_directory", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_execute:buck2_execute", + "//buck2/app/buck2_external_cells_bundled:buck2_external_cells_bundled", + "//buck2/app/buck2_util:buck2_util", + "//buck2/dice/dice:dice", + "//buck2/gazebo/cmp_any:cmp_any", + "//buck2/gazebo/dupe:dupe", + ], +) diff --git a/app/buck2_external_cells/Cargo.toml b/app/buck2_external_cells/Cargo.toml new file mode 100644 index 0000000000000..be127ef1a6b0b --- /dev/null +++ b/app/buck2_external_cells/Cargo.toml @@ -0,0 +1,30 @@ +[package] +description = "Buck2 external cells implementation" +edition = "2021" +license = { workspace = true } +name = "buck2_external_cells" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +allocative = { workspace = true } +cmp_any = { workspace = true } +dice = { workspace = true } +dupe = { workspace = true } + +anyhow = { workspace = true } +async-trait = { workspace = true } +derive_more = { workspace = true } +tokio = { workspace = true } + +buck2_build_api = { workspace = true } +buck2_common = { workspace = true } +buck2_core = { workspace = true } +buck2_directory = { workspace = true } +buck2_error = { workspace = true } +buck2_execute = { workspace = true } +buck2_external_cells_bundled = { workspace = true } +buck2_util = { workspace = true } + +[dev-dependencies] +tokio = { workspace = true } diff --git a/app/buck2_external_cells/src/bundled.rs b/app/buck2_external_cells/src/bundled.rs new file mode 100644 index 0000000000000..ac0ac706fe127 --- /dev/null +++ b/app/buck2_external_cells/src/bundled.rs @@ -0,0 +1,484 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::env; +use std::fs; +use std::path::Path; +use std::sync::Arc; +use std::sync::OnceLock; + +use anyhow::Context; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; +use buck2_common::dice::file_ops::delegate::FileOpsDelegate; +use buck2_common::file_ops::FileMetadata; +use buck2_common::file_ops::FileType; +use buck2_common::file_ops::RawDirEntry; +use buck2_common::file_ops::RawPathMetadata; +use buck2_common::file_ops::TrackedFileDigest; +use buck2_common::io::fs::is_executable; +use buck2_core::cells::external::ExternalCellOrigin; +use buck2_core::cells::name::CellName; +use buck2_core::cells::paths::CellRelativePath; +use buck2_core::cells::paths::CellRelativePathBuf; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::paths::file_name::FileName; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::builder::DirectoryBuilder; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_hasher::NoDigest; +use buck2_directory::directory::directory_hasher::NoDigestDigester; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::directory_ref::DirectoryRef; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_directory::directory::find::find; +use buck2_directory::directory::find::DirectoryFindError; +use buck2_directory::directory::immutable_directory::ImmutableDirectory; +use buck2_error::BuckErrorContext; +use buck2_execute::digest_config::DigestConfig; +use buck2_execute::digest_config::HasDigestConfig; +use buck2_execute::materialize::materializer::HasMaterializer; +use buck2_execute::materialize::materializer::WriteRequest; +use buck2_external_cells_bundled::get_bundled_data; +use buck2_external_cells_bundled::BundledCell; +use buck2_external_cells_bundled::BundledFile; +use cmp_any::PartialEqAny; +use dice::CancellationContext; +use dice::DiceComputations; +use dice::Key; + +fn load_nano_prelude() -> anyhow::Result { + let path = env::var("NANO_PRELUDE").context( + "NANO_PRELUDE env var must be set to the location of nano prelude\n\ + Consider `export NANO_PRELUDE=$HOME/fbsource/fbcode/buck2/tests/e2e_util/nano_prelude`", + )?; + if path.is_empty() { + return Err(anyhow::anyhow!("NANO_PRELUDE env var must not be empty")); + } + let path = AbsPathBuf::new(Path::new(&path)) + .context("NANO_PRELUDE env var must point to absolute path")?; + + let mut files = Vec::new(); + let mut dir_stack = Vec::new(); + dir_stack.push((path, ForwardRelativePathBuf::empty())); + while let Some((dir, rel_path)) = dir_stack.pop() { + for entry in fs::read_dir(dir)? { + let entry = entry?; + let entry_path = AbsPathBuf::new(entry.path())?; + let entry_rel_path = rel_path.join(FileName::new( + entry.file_name().to_str().context("not UTF-8 string")?, + )?); + match FileType::from(entry.file_type()?) { + FileType::Directory => dir_stack.push((entry_path, entry_rel_path)), + FileType::File => { + let contents = fs_util::read(&entry_path)?; + files.push(BundledFile { + path: entry_rel_path.as_str().to_owned().leak(), + contents: contents.leak(), + is_executable: is_executable(&entry.metadata()?), + }); + } + FileType::Symlink | FileType::Unknown => { + // We don't have these in nano-prelude. + } + } + } + } + + Ok(BundledCell { + name: "nano_prelude", + files: files.leak(), + is_testing: true, + }) +} + +fn nano_prelude() -> anyhow::Result { + static NANO_PRELUDE: OnceLock = OnceLock::new(); + Ok(*NANO_PRELUDE.get_or_try_init(|| load_nano_prelude().context("loading nano_prelude"))?) +} + +pub(crate) fn find_bundled_data(cell_name: CellName) -> anyhow::Result { + #[derive(buck2_error::Error, Debug)] + #[error("No bundled cell named `{0}`, options are `{}`", _1.join(", "))] + struct CellNotBundled(String, Vec<&'static str>); + + let cell_name = cell_name.as_str(); + + if cell_name == "nano_prelude" { + return nano_prelude(); + } + + get_bundled_data() + .iter() + .find(|data| data.name == cell_name) + .copied() + .ok_or_else(|| { + CellNotBundled( + cell_name.to_owned(), + get_bundled_data() + .iter() + .filter(|data| !data.is_testing) + .map(|data| data.name) + .collect(), + ) + .into() + }) +} + +#[derive(Clone, PartialEq, Eq, Hash, Debug, allocative::Allocative)] +struct ContentsAndMetadata { + contents: &'static [u8], + metadata: FileMetadata, +} + +#[derive(allocative::Allocative, PartialEq, Eq)] +pub(crate) struct BundledFileOpsDelegate { + dir: ImmutableDirectory, +} + +#[derive(buck2_error::Error, Debug)] +enum BundledPathSearchError { + #[error("Expected a directory at `{0}` but found a file")] + ExpectedDirectory(String), + #[error("Path not found: `{0}`")] + MissingFile(CellRelativePathBuf), + #[error("Expected file at `{0}` but found a directory")] + ExpectedFile(CellRelativePathBuf), +} + +impl BundledFileOpsDelegate { + fn get_entry_at_path_if_exists( + &self, + path: &CellRelativePath, + ) -> anyhow::Result< + Option< + DirectoryEntry< + impl DirectoryRef, + &ContentsAndMetadata, + >, + >, + > { + match find(self.dir.as_ref(), path.iter()) { + Ok(entry) => Ok(entry), + Err(DirectoryFindError::CannotTraverseLeaf { path }) => { + Err(BundledPathSearchError::ExpectedDirectory(path.to_string()).into()) + } + } + } + + fn get_entry_at_path( + &self, + path: &CellRelativePath, + ) -> anyhow::Result< + DirectoryEntry< + impl DirectoryRef, + &ContentsAndMetadata, + >, + > { + self.get_entry_at_path_if_exists(path)? + .ok_or_else(|| BundledPathSearchError::MissingFile(path.to_owned()).into()) + } +} + +#[async_trait::async_trait] +impl FileOpsDelegate for BundledFileOpsDelegate { + async fn read_file_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + match self.get_entry_at_path_if_exists(path)? { + Some(DirectoryEntry::Leaf(leaf)) => { + Ok(Some(String::from_utf8(leaf.contents.to_vec())?)) + } + Some(DirectoryEntry::Dir(_)) => { + Err(BundledPathSearchError::ExpectedFile(path.to_owned()).into()) + } + None => Ok(None), + } + } + + /// Return the list of file outputs, sorted. + async fn read_dir( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let dir = match self.get_entry_at_path(path)? { + DirectoryEntry::Dir(dir) => dir, + DirectoryEntry::Leaf(_) => { + return Err(BundledPathSearchError::ExpectedDirectory(path.to_string()).into()); + } + }; + + let entries = dir + .entries() + .map(|(name, entry)| RawDirEntry { + file_name: name.to_owned().into_inner(), + file_type: match entry { + DirectoryEntry::Leaf(_) => FileType::File, + DirectoryEntry::Dir(_) => FileType::Directory, + }, + }) + .collect(); + + Ok(entries) + } + + async fn read_path_metadata_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + match self.get_entry_at_path_if_exists(path)? { + Some(DirectoryEntry::Leaf(leaf)) => { + Ok(Some(RawPathMetadata::File(leaf.metadata.clone()))) + } + Some(DirectoryEntry::Dir(_)) => Ok(Some(RawPathMetadata::Directory)), + None => Ok(None), + } + } + + fn eq_token(&self) -> PartialEqAny { + PartialEqAny::new(self) + } +} + +fn get_file_ops_delegate_impl( + data: BundledCell, + digest_config: DigestConfig, +) -> anyhow::Result { + let mut builder: DirectoryBuilder = DirectoryBuilder::empty(); + let digest_config = digest_config.cas_digest_config().source_files_config(); + for file in data.files { + let path = ForwardRelativePath::new(file.path) + .internal_error_anyhow("non-forward relative bundled path")?; + let metadata = FileMetadata { + digest: TrackedFileDigest::from_content(file.contents, digest_config), + is_executable: file.is_executable, + }; + + builder + .insert( + path, + DirectoryEntry::Leaf(ContentsAndMetadata { + contents: file.contents, + metadata, + }), + ) + .internal_error_anyhow("conflicting bundled source paths")?; + } + Ok(BundledFileOpsDelegate { + dir: builder.fingerprint(&NoDigestDigester), + }) +} + +async fn declare_all_source_artifacts( + ctx: &mut DiceComputations<'_>, + cell_name: CellName, + ops: &BundledFileOpsDelegate, +) -> anyhow::Result<()> { + let mut requests = Vec::new(); + let artifact_fs = ctx.get_artifact_fs().await?; + let buck_out_resolver = artifact_fs.buck_out_path_resolver(); + + for (path, entry) in ops.dir.unordered_walk_leaves().with_paths() { + let path = buck_out_resolver.resolve_external_cell_source( + CellRelativePath::new(path.as_ref()), + ExternalCellOrigin::Bundled(cell_name), + ); + requests.push(WriteRequest { + path, + content: entry.contents.to_vec(), + is_executable: entry.metadata.is_executable, + }); + } + + let materializer = ctx.per_transaction_data().get_materializer(); + materializer + .declare_write(Box::new(move || Ok(requests))) + .await + .map(|_| ()) +} + +pub(crate) async fn get_file_ops_delegate( + ctx: &mut DiceComputations<'_>, + cell_name: CellName, +) -> anyhow::Result> { + #[derive( + dupe::Dupe, + Clone, + Copy, + Debug, + derive_more::Display, + PartialEq, + Eq, + Hash, + allocative::Allocative + )] + struct BundledFileOpsDelegateKey(CellName); + + #[async_trait::async_trait] + impl Key for BundledFileOpsDelegateKey { + type Value = buck2_error::Result>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let data = find_bundled_data(self.0)?; + let ops = get_file_ops_delegate_impl(data, ctx.global_data().get_digest_config())?; + declare_all_source_artifacts(ctx, self.0, &ops).await?; + Ok(Arc::new(ops)) + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + // No need for non-trivial equality, because this has no deps and is never recomputed + false + } + } + + Ok(ctx.compute(&BundledFileOpsDelegateKey(cell_name)).await??) +} + +pub(crate) async fn materialize_all( + ctx: &mut DiceComputations<'_>, + cell: CellName, +) -> anyhow::Result { + let artifact_fs = ctx.get_artifact_fs().await?; + let buck_out_resolver = artifact_fs.buck_out_path_resolver(); + + let ops = get_file_ops_delegate(ctx, cell).await?; + let materializer = ctx.per_transaction_data().get_materializer(); + let mut paths = Vec::new(); + for (path, _entry) in ops.dir.unordered_walk_leaves().with_paths() { + let path = buck_out_resolver.resolve_external_cell_source( + CellRelativePath::new(path.as_ref()), + ExternalCellOrigin::Bundled(cell), + ); + paths.push(path); + } + + materializer.ensure_materialized(paths).await?; + Ok(buck_out_resolver.resolve_external_cell_source( + CellRelativePath::unchecked_new(""), + ExternalCellOrigin::Bundled(cell), + )) +} + +#[cfg(test)] +mod tests { + use std::assert_matches::assert_matches; + + use super::*; + + fn testing_ops() -> BundledFileOpsDelegate { + let data = find_bundled_data(CellName::testing_new("test_bundled_cell")).unwrap(); + get_file_ops_delegate_impl(data, DigestConfig::testing_default()).unwrap() + } + + #[tokio::test] + async fn test_smoke_read() { + let ops = testing_ops(); + let content = ops + .read_file_if_exists(&CellRelativePath::unchecked_new("dir/src.txt")) + .await + .unwrap() + .unwrap(); + let content = if cfg!(windows) { + // Git may check out files on Windows with \r\n as line separator. + // We could configure git, but it's more reliable to handle it in the test. + content.replace("\r\n", "\n") + } else { + content + }; + assert_eq!(content, "foobar\n"); + assert!( + ops.read_file_if_exists(&CellRelativePath::unchecked_new("dir/does_not_exist.txt")) + .await + .unwrap() + .is_none() + ); + } + + #[tokio::test] + async fn test_executable_bit() { + let ops = testing_ops(); + assert_matches!( + ops.read_path_metadata_if_exists(&CellRelativePath::unchecked_new("dir/src.txt")) + .await + .unwrap() + .unwrap(), + RawPathMetadata::File(FileMetadata { + digest: _, + is_executable: false, + }), + ); + assert_matches!( + ops.read_path_metadata_if_exists(&CellRelativePath::unchecked_new("dir/src2.txt")) + .await + .unwrap() + .unwrap(), + RawPathMetadata::File(FileMetadata { + digest: _, + is_executable: true, + }), + ); + } + + #[tokio::test] + async fn test_dir_listing() { + let ops = testing_ops(); + + let root = CellRelativePath::unchecked_new(""); + let root_metadata = ops + .read_path_metadata_if_exists(root) + .await + .unwrap() + .unwrap(); + assert_matches!(root_metadata, RawPathMetadata::Directory); + let root_entries = ops.read_dir(root).await.unwrap(); + assert!(root_entries.is_sorted()); + assert_eq!( + &root_entries, + &[ + RawDirEntry { + file_name: ".buckconfig".into(), + file_type: FileType::File + }, + RawDirEntry { + file_name: "BUCK_TREE".into(), + file_type: FileType::File + }, + RawDirEntry { + file_name: "dir".into(), + file_type: FileType::Directory + }, + ], + ); + + let dir = CellRelativePath::unchecked_new("dir"); + let dir_metadata = ops + .read_path_metadata_if_exists(dir) + .await + .unwrap() + .unwrap(); + assert_matches!(dir_metadata, RawPathMetadata::Directory); + let dir_entries = ops.read_dir(dir).await.unwrap(); + assert!(dir_entries.is_sorted()); + assert_eq!(dir_entries.len(), 5); + } + + #[test] + fn test_load_all_bundled_cells() { + for c in get_bundled_data() { + get_file_ops_delegate_impl(*c, DigestConfig::testing_default()).unwrap(); + } + } +} diff --git a/app/buck2_external_cells/src/git.rs b/app/buck2_external_cells/src/git.rs new file mode 100644 index 0000000000000..e2abe5e487fd3 --- /dev/null +++ b/app/buck2_external_cells/src/git.rs @@ -0,0 +1,397 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::hash_map; +use std::collections::HashMap; +use std::process::Command; +use std::process::ExitStatus; +use std::process::Stdio; +use std::sync::Arc; +use std::sync::Mutex; +use std::sync::OnceLock; + +use anyhow::Context; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; +use buck2_common::dice::data::HasIoProvider; +use buck2_common::dice::file_ops::delegate::FileOpsDelegate; +use buck2_common::file_ops::FileDigestConfig; +use buck2_common::file_ops::RawDirEntry; +use buck2_common::file_ops::RawPathMetadata; +use buck2_common::io::fs::FsIoProvider; +use buck2_common::io::IoProvider; +use buck2_core::cells::cell_path::CellPath; +use buck2_core::cells::external::ExternalCellOrigin; +use buck2_core::cells::external::GitCellSetup; +use buck2_core::cells::name::CellName; +use buck2_core::cells::paths::CellRelativePath; +use buck2_core::fs::buck_out_path::BuckOutPathResolver; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_directory::directory::directory::Directory; +use buck2_error::internal_error_anyhow; +use buck2_execute::artifact_value::ArtifactValue; +use buck2_execute::digest_config::HasDigestConfig; +use buck2_execute::directory::INTERNER; +use buck2_execute::entry::build_entry_from_disk; +use buck2_execute::execute::blocking::HasBlockingExecutor; +use buck2_execute::execute::blocking::IoRequest; +use buck2_execute::execute::clean_output_paths::CleanOutputPaths; +use buck2_execute::materialize::materializer::HasMaterializer; +use buck2_execute::materialize::materializer::Materializer; +use buck2_util::process::background_command; +use cmp_any::PartialEqAny; +use dice::CancellationContext; +use dice::DiceComputations; +use dice::Key; +use dupe::Dupe; +use tokio::sync::Semaphore; + +#[derive(buck2_error::Error, Debug)] +enum GitError { + #[error("Error fetching external cell with git, exit code: {exit_code:?}, stderr:\n{stderr}")] + Unsuccessful { + exit_code: ExitStatus, + stderr: String, + }, + #[error("Expected git to create a directory at the checkout location")] + NoDirectory, +} + +struct GitFetchIoRequest { + setup: GitCellSetup, + path: ProjectRelativePathBuf, +} + +impl IoRequest for GitFetchIoRequest { + fn execute( + self: Box, + project_fs: &buck2_core::fs::project::ProjectRoot, + ) -> anyhow::Result<()> { + let path = project_fs.resolve(&self.path); + fs_util::create_dir_all(path.clone())?; + + // FIXME(JakobDegen): Ideally we'd use libgit2 directly here instead of shelling out, but + // unfortunately the third party situation for that library in fbsource isn't great, so + // let's do this for now + fn run_git(cwd: &AbsNormPath, f: impl FnOnce(&mut Command)) -> anyhow::Result<()> { + let mut cmd = background_command("git"); + f(&mut cmd); + let output = cmd + .current_dir(cwd) + .stderr(Stdio::piped()) + .stdout(Stdio::null()) + .output() + .context("Could not run git to fetch external cell")?; + + if !output.status.success() { + return Err(GitError::Unsuccessful { + exit_code: output.status, + stderr: String::from_utf8_lossy(&output.stderr).to_string(), + } + .into()); + } + + Ok(()) + } + + run_git(&path, |c| { + c.arg("init"); + })?; + + run_git(&path, |c| { + c.arg("remote") + .arg("add") + .arg("origin") + .arg(self.setup.git_origin.as_ref()); + })?; + + run_git(&path, |c| { + c.arg("fetch").arg("origin").arg(self.setup.commit.as_ref()); + })?; + + run_git(&path, |c| { + c.arg("reset").arg("--hard").arg("FETCH_HEAD"); + })?; + + Ok(()) + } +} + +async fn download_impl( + ctx: &mut DiceComputations<'_>, + setup: &GitCellSetup, + path: &ProjectRelativePath, + materializer: &dyn Materializer, + cancellations: &CancellationContext<'_>, +) -> anyhow::Result<()> { + let io = ctx.get_blocking_executor(); + io.execute_io( + Box::new(CleanOutputPaths { + paths: vec![path.to_owned()], + }), + cancellations, + ) + .await?; + + io.execute_io( + Box::new(GitFetchIoRequest { + setup: setup.dupe(), + path: path.to_owned(), + }), + cancellations, + ) + .await?; + + // Unfortunately, there's no way to ask git not to create this, but it's important that we + // delete it so that we don't use it or waste cycles hashing it. + io.execute_io( + Box::new(CleanOutputPaths { + paths: vec![path.join(ForwardRelativePath::new(".git").unwrap())], + }), + cancellations, + ) + .await?; + + // Read and hash the contents. We have to do this because the materializer requires an artifact + // value. This work is kind of duplicated with the reading in the fileops, but only the first + // time the contents are downloaded. On subsequent invocations of the daemon, we won't rerun + // this however, so that case will still avoid doing unnecessary work. + let io_prov = ctx.global_data().get_io_provider(); + let proj_root = io_prov.project_root().root(); + let abs_path = proj_root.join(path); + let digest_config = ctx.global_data().get_digest_config(); + let file_digest_config = FileDigestConfig::build(digest_config.cas_digest_config()); + let entry = build_entry_from_disk(abs_path, file_digest_config, &*io, proj_root) + .await? + .0 + .ok_or(GitError::NoDirectory)?; + let entry = entry.map_dir(|d| { + d.to_builder() + .fingerprint(digest_config.as_directory_serializer()) + .shared(&*INTERNER) + }); + + materializer + .declare_existing(vec![(path.to_owned(), ArtifactValue::new(entry, None))]) + .await?; + + Ok(()) +} + +async fn download_and_materialize( + ctx: &mut DiceComputations<'_>, + path: &ProjectRelativePath, + setup: &GitCellSetup, + cancellations: &CancellationContext<'_>, +) -> anyhow::Result<()> { + let materializer = ctx.per_transaction_data().get_materializer(); + + if materializer.has_artifact_at(path.to_owned()).await? { + return Ok(()); + } + + // A map of commit hashes to semaphores that are actually condvars which protect access to the + // directory associated with that commit + static DIRECTORY_LICENSES: OnceLock, Arc>>> = OnceLock::new(); + + // We have to write this in a slightly funny way to convince the compiler that there's no + // `map_guard` being held across an await point + let semaphore; + let semaphore_guard; + 'populate: { + 'wait: { + let mut map_guard = DIRECTORY_LICENSES + .get_or_init(Default::default) + .lock() + .unwrap(); + let entry = map_guard.entry(setup.commit.dupe()); + + match entry { + hash_map::Entry::Occupied(entry) => { + // There's another key simultaneously populating this directory. Just wait for + // it to finish and then return. We don't need to check the contents of the + // directory, since we assume that the commit hash uniquely identifies those. + semaphore = entry.get().dupe(); + break 'wait; + } + hash_map::Entry::Vacant(entry) => { + // It's on us to populate this directory. Make a condvar so that we block other accesses + semaphore = Arc::new(Semaphore::new(1)); + semaphore_guard = semaphore.try_acquire().unwrap(); // we know there's a permit available + entry.insert(semaphore.dupe()); + break 'populate; + } + } + } + + drop(semaphore.acquire().await.unwrap()); + return Ok(()); + } + + // Don't allow the actual download step to be cancelled. In principle it might be possible to + // properly clean up after a cancellation within the execution of this key, but we'd also have + // to deal with another key that might be waiting on this download to finish, which would be + // pretty complicated to deal with. + let res = cancellations + .critical_section(|| download_impl(ctx, setup, path, &*materializer, cancellations)) + .await; + + // Give up our lock + drop(semaphore_guard); + DIRECTORY_LICENSES + .get() + .unwrap() + .lock() + .unwrap() + .remove(&setup.commit) + .unwrap(); + + res +} + +#[derive(allocative::Allocative)] +pub(crate) struct GitFileOpsDelegate { + buck_out_resolver: BuckOutPathResolver, + cell: CellName, + setup: GitCellSetup, + // The fs accesses in this code are sort of a mix between source file accesses and buck-out + // accesses. Unconditionally using an `FsIoProvider` turns out to give all the right behavior + io: FsIoProvider, +} + +impl GitFileOpsDelegate { + fn resolve(&self, path: &CellRelativePath) -> ProjectRelativePathBuf { + self.buck_out_resolver + .resolve_external_cell_source(path, ExternalCellOrigin::Git(self.setup.dupe())) + } + + fn get_base_path(&self) -> ProjectRelativePathBuf { + self.resolve(CellRelativePath::empty()) + } +} + +#[async_trait::async_trait] +impl FileOpsDelegate for GitFileOpsDelegate { + async fn read_file_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let project_path = self.resolve(path); + (&self.io as &dyn IoProvider) + .read_file_if_exists(project_path) + .await + } + + async fn read_dir( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let project_path = self.resolve(path); + let mut entries = (&self.io as &dyn IoProvider) + .read_dir(project_path) + .await + .with_context(|| format!("Error listing dir `{}`", path))?; + + // Make sure entries are deterministic, since read_dir isn't. + entries.sort_by(|a, b| a.file_name.cmp(&b.file_name)); + + Ok(entries) + } + + async fn read_path_metadata_if_exists( + &self, + path: &'async_trait CellRelativePath, + ) -> anyhow::Result> { + let project_path = self.resolve(path); + + let Some(metadata) = (&self.io as &dyn IoProvider) + .read_path_metadata_if_exists(project_path) + .await + .with_context(|| format!("Error accessing metadata for path `{}`", path))? + else { + return Ok(None); + }; + Ok(Some(metadata.try_map( + |path| match path.strip_prefix_opt(&self.get_base_path()) { + Some(path) => Ok(Arc::new(CellPath::new(self.cell, path.to_owned().into()))), + None => Err(internal_error_anyhow!( + "Non-cell internal symlink at `{}` in cell `{}`", + path, + self.cell + )), + }, + )?)) + } + + fn eq_token(&self) -> PartialEqAny { + PartialEqAny::always_false() + } +} + +pub(crate) async fn get_file_ops_delegate( + ctx: &mut DiceComputations<'_>, + cell: CellName, + setup: GitCellSetup, +) -> anyhow::Result> { + #[derive( + dupe::Dupe, + Clone, + Debug, + derive_more::Display, + PartialEq, + Eq, + Hash, + allocative::Allocative + )] + #[display("({}, {})", _0, _1)] + struct GitFileOpsDelegateKey(CellName, GitCellSetup); + + #[async_trait::async_trait] + impl Key for GitFileOpsDelegateKey { + type Value = buck2_error::Result>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + cancellations: &CancellationContext, + ) -> Self::Value { + let artifact_fs = ctx.get_artifact_fs().await?; + let ops = GitFileOpsDelegate { + buck_out_resolver: artifact_fs.buck_out_path_resolver().clone(), + cell: self.0, + setup: self.1.dupe(), + io: FsIoProvider::new( + artifact_fs.fs().dupe(), + ctx.global_data().get_digest_config().cas_digest_config(), + ), + }; + download_and_materialize(ctx, &ops.get_base_path(), &self.1, cancellations).await?; + Ok(Arc::new(ops)) + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + Ok(ctx.compute(&GitFileOpsDelegateKey(cell, setup)).await??) +} + +pub(crate) async fn materialize_all( + ctx: &mut DiceComputations<'_>, + cell: CellName, + setup: GitCellSetup, +) -> anyhow::Result { + // Get the `GitFileOpsDelegate` instance to make sure all the data is materialized. + let ops = get_file_ops_delegate(ctx, cell, setup.dupe()).await?; + Ok(ops.get_base_path()) +} diff --git a/app/buck2_external_cells/src/lib.rs b/app/buck2_external_cells/src/lib.rs new file mode 100644 index 0000000000000..e5fff89d81790 --- /dev/null +++ b/app/buck2_external_cells/src/lib.rs @@ -0,0 +1,101 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(assert_matches)] +#![feature(is_sorted)] +#![feature(error_generic_member_access)] +#![feature(once_cell_try)] + +use std::sync::Arc; + +use async_trait::async_trait; +use buck2_common::dice::data::HasIoProvider; +use buck2_common::dice::file_ops::delegate::FileOpsDelegate; +use buck2_common::file_ops::RawPathMetadata; +use buck2_core::cells::cell_root_path::CellRootPath; +use buck2_core::cells::external::ExternalCellOrigin; +use buck2_core::cells::name::CellName; +use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use dice::DiceComputations; + +mod bundled; +mod git; + +struct ConcreteExternalCellsImpl; + +#[derive(buck2_error::Error, Debug)] +enum ExternalCellsError { + #[error("Tried to expand external cell to `{0}`, but that directory already contains data!")] + ExpandDataAlreadyPresent(ProjectRelativePathBuf), +} + +#[async_trait] +impl buck2_common::external_cells::ExternalCellsImpl for ConcreteExternalCellsImpl { + async fn get_file_ops_delegate( + &self, + ctx: &mut DiceComputations<'_>, + cell_name: CellName, + origin: ExternalCellOrigin, + ) -> anyhow::Result> { + match origin { + ExternalCellOrigin::Bundled(cell_name) => { + Ok(bundled::get_file_ops_delegate(ctx, cell_name).await? as _) + } + ExternalCellOrigin::Git(setup) => { + Ok(git::get_file_ops_delegate(ctx, cell_name, setup).await? as _) + } + } + } + + fn check_bundled_cell_exists(&self, cell_name: CellName) -> anyhow::Result<()> { + bundled::find_bundled_data(cell_name).map(|_| ()) + } + + async fn expand( + &self, + ctx: &mut DiceComputations<'_>, + cell: CellName, + origin: ExternalCellOrigin, + path: &CellRootPath, + ) -> anyhow::Result<()> { + let dest_path = path.as_project_relative_path().to_buf(); + let io = ctx.global_data().get_io_provider(); + + // Make sure we're not about to overwrite existing data + match io.read_path_metadata_if_exists(dest_path.clone()).await? { + None => (), + Some(RawPathMetadata::Directory) => { + let data = io.read_dir(dest_path.clone()).await?; + if !data.is_empty() { + return Err(ExternalCellsError::ExpandDataAlreadyPresent(dest_path).into()); + } + } + Some(_) => { + return Err(ExternalCellsError::ExpandDataAlreadyPresent(dest_path).into()); + } + } + + // Materialize the whole cell, and then copy it into the repository. + // + // FIXME(JakobDegen): Ideally we'd be able to ask the materializer to just make a copy + // without doing the actual materialization. However, that's not currently possible without + // it resulting in the materializer tracking paths in the repo, so this will have to do for + // now. + let materialized_path = match origin { + ExternalCellOrigin::Bundled(cell) => bundled::materialize_all(ctx, cell).await?, + ExternalCellOrigin::Git(setup) => git::materialize_all(ctx, cell, setup).await?, + }; + + io.project_root().copy(&materialized_path, &dest_path) + } +} + +pub fn init_late_bindings() { + buck2_common::external_cells::EXTERNAL_CELLS_IMPL.init(&ConcreteExternalCellsImpl); +} diff --git a/app/buck2_external_cells_bundled/BUCK b/app/buck2_external_cells_bundled/BUCK new file mode 100644 index 0000000000000..ade8be34c87fa --- /dev/null +++ b/app/buck2_external_cells_bundled/BUCK @@ -0,0 +1,36 @@ +load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") +load(":bundled_cell.bzl", "bundled_cell") + +oncall("build_infra") + +bundled_cell( + name = "prelude", + include_from_file = "src/lib.rs", + source_listing = "prelude//:source_listing", +) + +rust_library( + name = "buck2_external_cells_bundled", + srcs = glob([ + "src/**/*.rs", + "test_data/**/*", + "test_data/**/.*", + ]), + mapped_srcs = { + ":prelude": "src/prelude", + }, + # Do not add non-third party deps. This should store the data only, so that + # we avoid recompiling if at all possible. + # + # FIXME(JakobDegen): Ideally this would be a `rust_linkable_symbol`-like + # thing, but that needs to be moved to the prelude after buck1-death. It + # also can't be made to work with cargo builds, so we'd need to deprecate + # those more too. + deps = [], +) + +rust_binary( + name = "processor", + srcs = ["processor.rs"], +) diff --git a/app/buck2_external_cells_bundled/Cargo.toml b/app/buck2_external_cells_bundled/Cargo.toml new file mode 100644 index 0000000000000..3f0dd78ea2fb4 --- /dev/null +++ b/app/buck2_external_cells_bundled/Cargo.toml @@ -0,0 +1,19 @@ +[package] +build = "build.rs" +description = "Buck2 external cells bundled data" +edition = "2021" +license = { workspace = true } +name = "buck2_external_cells_bundled" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +async-trait = { workspace = true } + +buck2_common = { workspace = true } + +[build-dependencies] +walkdir = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(buck_build)"] } diff --git a/app/buck2_external_cells_bundled/build.rs b/app/buck2_external_cells_bundled/build.rs new file mode 100644 index 0000000000000..da393f90e677b --- /dev/null +++ b/app/buck2_external_cells_bundled/build.rs @@ -0,0 +1,88 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Generate source file containing buck2/prelude tree with contents. + +use std::io; +use std::path::Path; + +fn main() { + imp().unwrap(); +} + +fn imp() -> io::Result<()> { + let out_path = std::env::var_os("OUT_DIR").unwrap(); + let include_file = Path::new(&out_path).join("include.rs"); + let manifest_path = std::env::var_os("CARGO_MANIFEST_DIR").unwrap(); + let prelude_path = Path::new(&manifest_path) + .parent() + .unwrap() + .parent() + .unwrap() + .join("prelude"); + + // Self-check. + assert!(prelude_path.join("prelude.bzl").exists()); + + println!("cargo:rerun-if-changed={}", prelude_path.display()); + + write_include_file(&prelude_path, std::fs::File::create(&include_file)?)?; + + Ok(()) +} + +fn as_unix_like(path: &Path) -> String { + path.to_str().unwrap().replace('\\', "/") +} + +fn write_include_file(prelude: &Path, mut include_file: impl io::Write) -> io::Result<()> { + #[allow(clippy::write_literal)] + writeln!(include_file, "// {}generated by crate build.rs", "@")?; + + writeln!( + include_file, + "pub(crate) const DATA: &[crate::BundledFile] = &[" + )?; + + for res in walkdir::WalkDir::new(prelude) { + let entry = res.map_err(|e| e.into_io_error().unwrap())?; + if !entry.file_type().is_file() { + continue; + } + + writeln!(include_file, "crate::BundledFile {{")?; + writeln!( + include_file, + " path: r\"{}\",", + as_unix_like(entry.path().strip_prefix(prelude).unwrap()) + )?; + writeln!( + include_file, + " contents: include_bytes!(r\"{}\"),", + entry.path().display() + )?; + + let exec_bit; + #[cfg(unix)] + { + use std::os::unix::fs::MetadataExt; + exec_bit = entry.metadata()?.mode() & 0o111 != 0; + } + #[cfg(not(unix))] + { + exec_bit = false; + } + + writeln!(include_file, " is_executable: {},", exec_bit)?; + writeln!(include_file, "}},")?; + } + + writeln!(include_file, "];")?; + Ok(()) +} diff --git a/app/buck2_external_cells_bundled/bundled_cell.bzl b/app/buck2_external_cells_bundled/bundled_cell.bzl new file mode 100644 index 0000000000000..bb739ae330b6a --- /dev/null +++ b/app/buck2_external_cells_bundled/bundled_cell.bzl @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:source_listing.bzl", "SourceListingInfo") + +def _impl(ctx: AnalysisContext) -> list[Provider]: + files = {} + + parts = ["pub(crate) const DATA: &[crate::BundledFile] = &["] + for path, art in ctx.attrs.source_listing[SourceListingInfo].sources.items(): + exec_bit_rel_path = "includes/" + path + "/__exec_bit.txt" + data_rel_path = "includes/" + path + "/__data" + + # In the future, this can be extended to additionally compress the data + exec_bit = ctx.actions.declare_output("processed/" + path + "/__exec_bit.txt") + ctx.actions.run( + cmd_args( + ctx.attrs._processor[RunInfo], + art, + exec_bit.as_output(), + ), + category = "process_source_file", + identifier = "process " + path, + ) + files[exec_bit_rel_path] = exec_bit + files[data_rel_path] = art + + parts.append("crate::BundledFile {") + parts.append(" path: \"" + path + "\",") + parts.append(" contents: include_bytes!(\"" + data_rel_path + "\"),") + parts.append(" is_executable: include!(\"" + exec_bit_rel_path + "\"),") + parts.append("},") + + parts.append("];") + + contents = ctx.actions.write("contents.rs", cmd_args(parts, delimiter = "\n"), with_inputs = True) + files["contents.rs"] = contents + + out = ctx.actions.symlinked_dir("out", files) + + return [DefaultInfo(default_output = out)] + +# This rule outputs a default info with a section of a Rust source file that +# contains a `&[BundledFile]`. It gets supplied as a `mapped_src`, immediately +# adjacent to the `include_from_file`. +_bundled_cell = rule( + impl = _impl, + attrs = { + "include_from_file": attrs.source(), + "source_listing": attrs.dep(), + "_processor": attrs.exec_dep(), + }, +) + +def bundled_cell(**kwargs): + _bundled_cell( + _processor = ":processor", + **kwargs + ) diff --git a/app/buck2_external_cells_bundled/processor.rs b/app/buck2_external_cells_bundled/processor.rs new file mode 100644 index 0000000000000..52a853cf7c13a --- /dev/null +++ b/app/buck2_external_cells_bundled/processor.rs @@ -0,0 +1,34 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#[cfg(unix)] +fn is_executable(path: &str) -> bool { + use std::os::unix::fs::PermissionsExt; + + let permissions = std::fs::metadata(path) + .expect("Failed to get metadata") + .permissions(); + permissions.mode() & 0o111 != 0 +} + +#[cfg(not(unix))] +fn is_executable(_path: &str) -> bool { + false +} + +fn main() { + let inp_path = std::env::args() + .nth(1) + .expect("Expected path to a source file"); + let exec_bit_path = std::env::args() + .nth(2) + .expect("Expected path to write the executable bit"); + let is_executable = is_executable(&inp_path); + std::fs::write(&exec_bit_path, is_executable.to_string()).expect("Failed to write"); +} diff --git a/app/buck2_external_cells_bundled/src/lib.rs b/app/buck2_external_cells_bundled/src/lib.rs new file mode 100644 index 0000000000000..9cf55a3a64359 --- /dev/null +++ b/app/buck2_external_cells_bundled/src/lib.rs @@ -0,0 +1,119 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#[derive(Copy, Clone)] +pub struct BundledFile { + pub path: &'static str, + /// FIXME(JakobDegen): Consider compressing the data + pub contents: &'static [u8], + pub is_executable: bool, +} + +#[derive(Copy, Clone)] +pub struct BundledCell { + pub name: &'static str, + pub files: &'static [BundledFile], + pub is_testing: bool, +} + +#[cfg(buck_build)] +mod prelude { + include!("prelude/contents.rs"); +} + +#[cfg(not(buck_build))] +mod prelude { + include!(concat!(env!("OUT_DIR"), "/include.rs")); +} + +const PRELUDE: BundledCell = BundledCell { + name: "prelude", + files: prelude::DATA, + is_testing: false, +}; + +const TEST_CELL: BundledCell = BundledCell { + name: "test_bundled_cell", + files: &[ + BundledFile { + path: ".buckconfig", + contents: include_bytes!("../test_data/.buckconfig"), + is_executable: false, + }, + BundledFile { + path: "BUCK_TREE", + contents: include_bytes!("../test_data/BUCK_TREE"), + is_executable: false, + }, + BundledFile { + path: "dir/src.txt", + contents: include_bytes!("../test_data/dir/src.txt"), + is_executable: false, + }, + BundledFile { + path: "dir/src2.txt", + contents: include_bytes!("../test_data/dir/src2.txt"), + is_executable: true, + }, + BundledFile { + path: "dir/src3.txt", + contents: include_bytes!("../test_data/dir/src3.txt"), + is_executable: true, + }, + BundledFile { + path: "dir/BUCK.fixture", + contents: include_bytes!("../test_data/dir/BUCK.fixture"), + is_executable: false, + }, + BundledFile { + path: "dir/defs.bzl", + contents: include_bytes!("../test_data/dir/defs.bzl"), + is_executable: false, + }, + ], + is_testing: true, +}; + +pub const fn get_bundled_data() -> &'static [BundledCell] { + &[TEST_CELL, PRELUDE] +} + +#[cfg(test)] +mod tests { + #[test] + fn test_sanity_check() { + let c = super::TEST_CELL; + assert!(c.files.iter().any(|file| { + file.path == "dir/src.txt" + // Git may check out files on Windows with \r\n as line separator. + && std::str::from_utf8(file.contents).unwrap().replace("\r\n", "\n") == "foobar\n" + })) + } + + #[test] + fn test_bundled_prelude_data() { + let c = super::PRELUDE; + // Make sure there's a buckconfig + assert!(c.files.iter().any(|file| { + file.path == ".buckconfig" + && std::str::from_utf8(file.contents) + .unwrap() + .contains("prelude = .") + })); + + // And that there's at least 50 files with a reasonable amount of data + assert!( + c.files + .iter() + .filter(|file| file.contents.len() > 100) + .count() + > 50 + ); + } +} diff --git a/app/buck2_external_cells_bundled/test_data/.buckconfig b/app/buck2_external_cells_bundled/test_data/.buckconfig new file mode 100644 index 0000000000000..f345059546ebe --- /dev/null +++ b/app/buck2_external_cells_bundled/test_data/.buckconfig @@ -0,0 +1,11 @@ +[cell_aliases] + prelude_alias = prelude + +[buildfile] + name = BUCK.fixture + +[user_section] + key = value + +[project] + ignore = dir/src3.txt diff --git a/app/buck2_external_cells_bundled/test_data/BUCK_TREE b/app/buck2_external_cells_bundled/test_data/BUCK_TREE new file mode 100644 index 0000000000000..00b31cb919946 --- /dev/null +++ b/app/buck2_external_cells_bundled/test_data/BUCK_TREE @@ -0,0 +1,5 @@ +# The parent package value is set outside of the bundled cell in the test that +# uses this data. This is used to verify that tree files work across cell +# boundaries even for external cells. +_parent_val = read_parent_package_value("tree.value") +write_package_value("tree.value", _parent_val + 2, overwrite = True) diff --git a/app/buck2_external_cells_bundled/test_data/dir/BUCK.fixture b/app/buck2_external_cells_bundled/test_data/dir/BUCK.fixture new file mode 100644 index 0000000000000..ebc72c5bb76ed --- /dev/null +++ b/app/buck2_external_cells_bundled/test_data/dir/BUCK.fixture @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":defs.bzl", "test_rule", "export_file") +load("@prelude_alias//:alias.bzl", "alias") + +test_rule( + name = "test_hidden", + data = [read_config("user_section", "key"), str(read_package_value("tree.value"))], + srcs = glob(["src*"]), +) + +alias( + name = "test", + actual = ":test_hidden", + visibility = ["PUBLIC"], +) + +export_file( + name = "exported", + src = "src.txt", +) diff --git a/app/buck2_external_cells_bundled/test_data/dir/defs.bzl b/app/buck2_external_cells_bundled/test_data/dir/defs.bzl new file mode 100644 index 0000000000000..829ef630995c8 --- /dev/null +++ b/app/buck2_external_cells_bundled/test_data/dir/defs.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + tmp = ctx.actions.write("temp.txt", "".join([s + "\n" for s in ctx.attrs.data])) + out = ctx.actions.declare_output("out.txt") + + # Good enough for tests + if host_info().os.is_windows: + files = cmd_args(tmp, ctx.attrs.srcs, delimiter = "+") + cmd = cmd_args("cmd", "/c", "copy", "/b", files, out.as_output()) + ctx.actions.run(cmd, category = "run") + else: + cmd = cmd_args("cat", tmp, ctx.attrs.srcs, ">", out.as_output(), delimiter = " ") + ctx.actions.run(cmd_args("bash", "-c", cmd), category = "run") + return [DefaultInfo(default_output = out)] + +test_rule = rule( + impl = _impl, + attrs = { + "data": attrs.list(attrs.string()), + "srcs": attrs.list(attrs.source()), + }, +) + +def _export_impl(ctx): + return [DefaultInfo(default_output = ctx.attrs.src)] + +export_file = rule( + impl = _export_impl, + attrs = { + "src": attrs.source(), + }, +) diff --git a/app/buck2_external_cells_bundled/test_data/dir/src.txt b/app/buck2_external_cells_bundled/test_data/dir/src.txt new file mode 100644 index 0000000000000..323fae03f4606 --- /dev/null +++ b/app/buck2_external_cells_bundled/test_data/dir/src.txt @@ -0,0 +1 @@ +foobar diff --git a/app/buck2_external_cells_bundled/test_data/dir/src2.txt b/app/buck2_external_cells_bundled/test_data/dir/src2.txt new file mode 100644 index 0000000000000..1c46fdb149e8c --- /dev/null +++ b/app/buck2_external_cells_bundled/test_data/dir/src2.txt @@ -0,0 +1 @@ +foobar2 diff --git a/app/buck2_external_cells_bundled/test_data/dir/src3.txt b/app/buck2_external_cells_bundled/test_data/dir/src3.txt new file mode 100644 index 0000000000000..329b32cd3b169 --- /dev/null +++ b/app/buck2_external_cells_bundled/test_data/dir/src3.txt @@ -0,0 +1 @@ +foobar3 diff --git a/app/buck2_file_watcher/BUCK b/app/buck2_file_watcher/BUCK index 2bfe540981b7c..6efd545a894f5 100644 --- a/app/buck2_file_watcher/BUCK +++ b/app/buck2_file_watcher/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -7,23 +6,42 @@ rust_library( name = "buck2_file_watcher", srcs = glob(["src/**/*.rs"]), test_deps = [ - "fbsource//third-party/rust:assert_matches", "fbsource//third-party/rust:tempfile", - "//buck2/app/buck2_util:buck2_util", - "//buck2/gazebo/gazebo:gazebo", + ], + test_os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:assert_matches", + "//buck2/app/buck2_util:buck2_util", + "//buck2/gazebo/gazebo:gazebo", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:assert_matches", + "//buck2/app/buck2_util:buck2_util", + "//buck2/gazebo/gazebo:gazebo", + ], + ), ], deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:blake3", + "fbsource//third-party/rust:compact_str", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:notify", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tracing", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_certs:buck2_certs", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", diff --git a/app/buck2_file_watcher/Cargo.toml b/app/buck2_file_watcher/Cargo.toml index ae4da8d42303e..b33849bad41ac 100644 --- a/app/buck2_file_watcher/Cargo.toml +++ b/app/buck2_file_watcher/Cargo.toml @@ -1,7 +1,9 @@ [package] description = "Buck2 file watcher" edition = "2021" +license = { workspace = true } name = "buck2_file_watcher" +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -14,20 +16,29 @@ tokio = { workspace = true } tracing = { workspace = true } allocative = { workspace = true } -dupe = { workspace = true } +blake3 = { workspace = true } +compact_str = { workspace = true } dice = { workspace = true } +dupe = { workspace = true } starlark_map = { workspace = true } -watchman_client.version = "0.8.0" # @oss-disable: watchman_client.path = "../../../watchman/rust/watchman_client" +watchman_client.version = "0.9.0" +buck2_certs = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_util = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } tempfile = { workspace = true } + +[target.'cfg(unix)'.dev-dependencies] +assert_matches = { workspace = true } buck2_util = { workspace = true } gazebo = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_file_watcher/src/dep_files.rs b/app/buck2_file_watcher/src/dep_files.rs index b1de478a9d7eb..6d93e2546c8c8 100644 --- a/app/buck2_file_watcher/src/dep_files.rs +++ b/app/buck2_file_watcher/src/dep_files.rs @@ -10,9 +10,16 @@ use buck2_util::late_binding::LateBinding; pub static FLUSH_DEP_FILES: LateBinding = LateBinding::new("FLUSH_DEP_FILES"); +pub static FLUSH_NON_LOCAL_DEP_FILES: LateBinding = + LateBinding::new("FLUSH_NON_LOCAL_DEP_FILES"); /// Forget about all dep files. This isn't really meant to be commonly used, but if an invalid dep /// file was produced and the user wants unblocking, this will provide it. pub fn flush_dep_files() { (FLUSH_DEP_FILES.get().unwrap())(); } + +/// Forget about all dep files that were not produced locally. +pub fn flush_non_local_dep_files() { + (FLUSH_NON_LOCAL_DEP_FILES.get().unwrap())(); +} diff --git a/app/buck2_file_watcher/src/file_watcher.rs b/app/buck2_file_watcher/src/file_watcher.rs index abcb7fce1b888..3f496f0e56142 100644 --- a/app/buck2_file_watcher/src/file_watcher.rs +++ b/app/buck2_file_watcher/src/file_watcher.rs @@ -14,13 +14,15 @@ use allocative::Allocative; use anyhow::Context; use async_trait::async_trait; use buck2_common::ignores::ignore_set::IgnoreSet; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; use buck2_core::fs::project::ProjectRoot; use buck2_core::is_open_source; use dice::DiceTransactionUpdater; +use crate::fs_hash_crawler::FsHashCrawler; use crate::mergebase::Mergebase; use crate::notify::NotifyFileWatcher; use crate::watchman::interface::WatchmanFileWatcher; @@ -48,7 +50,13 @@ impl dyn FileWatcher { "watchman" }; - match root_config.get("buck2", "file_watcher").unwrap_or(default) { + match root_config + .get(BuckconfigKeyRef { + section: "buck2", + property: "file_watcher", + }) + .unwrap_or(default) + { "watchman" => Ok(Arc::new( WatchmanFileWatcher::new(project_root.root(), root_config, cells, ignore_specs) .context("Creating watchman file watcher")?, @@ -57,6 +65,10 @@ impl dyn FileWatcher { NotifyFileWatcher::new(project_root, cells, ignore_specs) .context("Creating notify file watcher")?, )), + "fs_hash_crawler" => Ok(Arc::new( + FsHashCrawler::new(project_root, cells, ignore_specs) + .context("Creating fs_crawler file watcher")?, + )), other => Err(anyhow::anyhow!("Invalid buck2.file_watcher: {}", other)), } } diff --git a/app/buck2_file_watcher/src/fs_hash_crawler.rs b/app/buck2_file_watcher/src/fs_hash_crawler.rs new file mode 100644 index 0000000000000..f61ff4d970d9b --- /dev/null +++ b/app/buck2_file_watcher/src/fs_hash_crawler.rs @@ -0,0 +1,383 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::fs::File; +use std::io::Read; +use std::mem; +use std::path::Path; +use std::sync::Arc; +use std::sync::Mutex; + +use allocative::Allocative; +use anyhow::Context; +use async_trait::async_trait; +use blake3::Hash; +use buck2_common::dice::file_ops::FileChangeTracker; +use buck2_common::file_ops::FileType; +use buck2_common::ignores::ignore_set::IgnoreSet; +use buck2_common::invocation_paths::InvocationPaths; +use buck2_core::cells::cell_path::CellPath; +use buck2_core::cells::name::CellName; +use buck2_core::cells::CellResolver; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_data::FileWatcherEventType; +use buck2_data::FileWatcherKind; +use buck2_events::dispatch::span_async; +use compact_str::CompactString; +use dice::DiceTransactionUpdater; +use dupe::Dupe; + +use crate::file_watcher::FileWatcher; +use crate::mergebase::Mergebase; +use crate::stats::FileWatcherStats; + +// On each sync, recomputes hashes of all files in the repository. +// Useful for tests on unreliable filesystems, +// but probably not much elsewhere. +#[derive(Allocative)] +pub struct FsHashCrawler { + root: ProjectRoot, + cells: CellResolver, + ignore_specs: HashMap, + snapshot: Arc>, +} + +impl FsHashCrawler { + pub fn new( + root: &ProjectRoot, + cells: CellResolver, + ignore_specs: HashMap, + ) -> anyhow::Result { + let snapshot = Arc::new(Mutex::new(FsSnapshot::build(root, &cells)?)); + Ok(Self { + root: root.dupe(), + cells, + ignore_specs, + snapshot, + }) + } + + async fn update( + &self, + mut dice: DiceTransactionUpdater, + ) -> anyhow::Result<(buck2_data::FileWatcherStats, DiceTransactionUpdater)> { + let root = self.root.dupe(); + let cells = self.cells.dupe(); + let new_snapshot = + tokio::task::spawn_blocking(move || FsSnapshot::build(&root, &cells)).await??; + let mut guard = self.snapshot.lock().unwrap(); + let old_snapshot = mem::replace(&mut *guard, new_snapshot); + let (stats, changes) = old_snapshot.get_updates_for_dice(&guard, &self.ignore_specs)?; + changes.write_to_dice(&mut dice)?; + Ok((stats, dice)) + } +} + +#[async_trait] +impl FileWatcher for FsHashCrawler { + async fn sync( + &self, + dice: DiceTransactionUpdater, + ) -> anyhow::Result<(DiceTransactionUpdater, Mergebase)> { + span_async( + buck2_data::FileWatcherStart { + provider: buck2_data::FileWatcherProvider::FsHashCrawler as i32, + }, + async { + let (stats, res) = match self.update(dice).await { + Ok((stats, dice)) => { + let mergebase = Mergebase(Arc::new(stats.branched_from_revision.clone())); + ((Some(stats)), Ok((dice, mergebase))) + } + Err(e) => (None, Err(e)), + }; + (res, buck2_data::FileWatcherEnd { stats }) + }, + ) + .await + } +} + +#[derive(Ord, PartialOrd, Eq, PartialEq, Debug)] +struct FsEvent { + cell_path: CellPath, + event: FileWatcherEventType, + kind: FileWatcherKind, +} + +#[derive(Allocative)] +enum EntryInfo { + #[allocative(skip)] + File(Hash), + Directory, + Symlink, +} + +impl EntryInfo { + fn to_file_watcher_kind(&self) -> FileWatcherKind { + match self { + EntryInfo::File(_) => FileWatcherKind::File, + EntryInfo::Directory => FileWatcherKind::Directory, + EntryInfo::Symlink => FileWatcherKind::Symlink, + } + } +} + +#[derive(Allocative)] +struct FsSnapshot(HashMap); + +impl FsSnapshot { + fn build(root: &ProjectRoot, cells: &CellResolver) -> anyhow::Result { + let mut snapshot = FsSnapshot(HashMap::new()); + snapshot.build_fs_snapshot(root, cells, root.root())?; + Ok(snapshot) + } + + fn add_entry(&mut self, cell: CellPath, info: EntryInfo) { + self.0.insert(cell, info); + } + + fn get_updates(&self, new_snapshot: &FsSnapshot) -> anyhow::Result> { + let mut events = Vec::new(); + for (cell_path, prev_info) in self.0.iter() { + if let Some(current_info) = new_snapshot.0.get(cell_path) { + match (current_info, prev_info) { + (EntryInfo::File(cur), EntryInfo::File(prev)) if cur != prev => { + events.push(FsEvent { + cell_path: cell_path.to_owned(), + event: FileWatcherEventType::Modify, + kind: prev_info.to_file_watcher_kind(), + }); + } + _ => (), + } + } else { + events.push(FsEvent { + cell_path: cell_path.to_owned(), + event: FileWatcherEventType::Delete, + kind: prev_info.to_file_watcher_kind(), + }); + } + } + let new_entries = new_snapshot + .0 + .iter() + .filter(|(path, _)| !self.0.contains_key(*path)); + for (cell_path, info) in new_entries { + events.push(FsEvent { + cell_path: cell_path.to_owned(), + event: FileWatcherEventType::Create, + kind: info.to_file_watcher_kind(), + }); + } + Ok(events) + } + + fn get_updates_for_dice( + &self, + new_snapshot: &FsSnapshot, + ignore_specs: &HashMap, + ) -> anyhow::Result<(buck2_data::FileWatcherStats, FileChangeTracker)> { + let events = self.get_updates(new_snapshot)?; + let mut changed = FileChangeTracker::new(); + let mut stats = FileWatcherStats::new(Default::default(), events.len()); + let mut ignored = 0; + for event in events.into_iter() { + let ignore = ignore_specs + .get(&event.cell_path.cell()) + .map_or(false, |i| i.is_match(event.cell_path.path())); + + if ignore { + ignored += 1; + continue; + } + + stats.add(event.cell_path.to_string(), event.event, event.kind); + match (event.event, event.kind) { + ( + FileWatcherEventType::Create, + FileWatcherKind::File | FileWatcherKind::Symlink, + ) => { + changed.file_added(event.cell_path); + } + (FileWatcherEventType::Create, FileWatcherKind::Directory) => { + changed.dir_added(event.cell_path); + } + ( + FileWatcherEventType::Modify, + FileWatcherKind::File | FileWatcherKind::Symlink, + ) => { + changed.file_changed(event.cell_path); + } + (FileWatcherEventType::Modify, FileWatcherKind::Directory) => { + changed.dir_changed(event.cell_path); + } + ( + FileWatcherEventType::Delete, + FileWatcherKind::File | FileWatcherKind::Symlink, + ) => { + changed.file_removed(event.cell_path); + } + (FileWatcherEventType::Delete, FileWatcherKind::Directory) => { + changed.dir_removed(event.cell_path); + } + } + } + stats.add_ignored(ignored); + Ok((stats.finish(), changed)) + } + + fn build_fs_snapshot( + &mut self, + root: &ProjectRoot, + cells: &CellResolver, + disk_path: &AbsNormPath, + ) -> anyhow::Result<()> { + for file in fs_util::read_dir(disk_path)? { + let file = file?; + let filetype = file.file_type()?; + let filename = file.file_name(); + + let filename = FileNameBuf::try_from(CompactString::new( + filename.to_str().context("Filename is not UTF-8")?, + )) + .with_context(|| format!("Invalid filename: {}", disk_path.display()))?; + + let disk_path = disk_path.join(filename); + let rel_path = root.relativize(&disk_path)?; + let cell_path = cells.get_cell_path(&rel_path)?; + + // We ignore buck-out and .hg dirs, as those are uninteresting events caused by us. + if rel_path.starts_with(InvocationPaths::buck_out_dir_prefix()) + || rel_path.starts_with(ProjectRelativePath::unchecked_new(".hg")) + { + continue; + } + + let filetype = FileType::from(filetype); + match filetype { + FileType::File => { + let hash = file_hash(disk_path.as_maybe_relativized())?; + self.add_entry(cell_path, EntryInfo::File(hash)); + } + FileType::Directory => { + self.build_fs_snapshot(root, cells, &disk_path)?; + self.add_entry(cell_path, EntryInfo::Directory); + } + FileType::Symlink => { + self.add_entry(cell_path, EntryInfo::Symlink); + } + FileType::Unknown => (), + } + } + Ok(()) + } +} + +fn file_hash(path: &Path) -> anyhow::Result { + let mut reader = File::open(path)?; + let mut hasher = blake3::Hasher::new(); + + let mut buffer = [0; 16 * 1024]; + loop { + let count = reader.read(&mut buffer)?; + if count == 0 { + break; + } + hasher.update(&buffer[..count]); + } + + Ok(hasher.finalize()) +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use buck2_core::cells::cell_path::CellPath; + use buck2_core::cells::cell_root_path::CellRootPathBuf; + use buck2_core::cells::name::CellName; + use buck2_core::cells::CellResolver; + use buck2_core::fs::fs_util; + use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; + use buck2_core::fs::paths::abs_path::AbsPathBuf; + use buck2_core::fs::project::ProjectRoot; + use buck2_data::FileWatcherEventType; + use buck2_data::FileWatcherKind; + + use crate::fs_hash_crawler::FsEvent; + use crate::fs_hash_crawler::FsSnapshot; + + #[tokio::test] + async fn test_fs_snapshot() -> anyhow::Result<()> { + let cell_resolver = CellResolver::testing_with_name_and_path( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ); + let tempdir = tempfile::tempdir()?; + let root_path = fs_util::canonicalize(AbsNormPathBuf::new(tempdir.path().to_owned())?)?; + let proj_root = ProjectRoot::new(root_path)?; + + let get_path = |root: &AbsPathBuf, path| -> anyhow::Result<(AbsPathBuf, CellPath)> { + let path = root.join(path); + let cell_path = cell_resolver.get_cell_path_from_abs_path(&path, &proj_root)?; + Ok((path, cell_path)) + }; + let root = proj_root.root().to_owned().into_abs_path_buf(); + let dir1 = root.join("dir1"); + let (file1, file1_cell) = get_path(&dir1, "file1")?; + let (dir2, dir2_cell) = get_path(&root, "dir2")?; + let (file2, file2_cell) = get_path(&dir2, "file2")?; + let (file3, file3_cell) = get_path(&dir1, "file3")?; + fs_util::create_dir_all(dir1)?; + fs_util::write(&file1, "old content")?; + fs_util::create_dir_all(&dir2)?; + fs_util::write(file2, "old content")?; + + let old_snapshot = FsSnapshot::build(&proj_root, &cell_resolver)?; + fs_util::write(file1, "new content")?; + fs_util::remove_all(dir2)?; + fs_util::write(file3, "new content")?; + let new_snapshot = FsSnapshot::build(&proj_root, &cell_resolver)?; + let events = old_snapshot.get_updates(&new_snapshot)?; + + let expected = [ + FsEvent { + cell_path: file1_cell, + event: FileWatcherEventType::Modify, + kind: FileWatcherKind::File, + }, + FsEvent { + cell_path: file3_cell, + event: FileWatcherEventType::Create, + kind: FileWatcherKind::File, + }, + FsEvent { + cell_path: dir2_cell, + event: FileWatcherEventType::Delete, + kind: FileWatcherKind::Directory, + }, + FsEvent { + cell_path: file2_cell, + event: FileWatcherEventType::Delete, + kind: FileWatcherKind::File, + }, + ]; + + let events = events.iter().collect::>(); + let expected = expected.iter().collect::>(); + assert_eq!(events, expected); + Ok(()) + } +} diff --git a/app/buck2_file_watcher/src/lib.rs b/app/buck2_file_watcher/src/lib.rs index 97762c0d6781f..03a3fa6b9b123 100644 --- a/app/buck2_file_watcher/src/lib.rs +++ b/app/buck2_file_watcher/src/lib.rs @@ -7,8 +7,12 @@ * of this source tree. */ +#![feature(error_generic_member_access)] +#![feature(used_with_arg)] + pub mod dep_files; pub mod file_watcher; +mod fs_hash_crawler; pub mod mergebase; mod notify; mod stats; diff --git a/app/buck2_file_watcher/src/notify.rs b/app/buck2_file_watcher/src/notify.rs index 97d33d3e270f5..dc0c565afd2c7 100644 --- a/app/buck2_file_watcher/src/notify.rs +++ b/app/buck2_file_watcher/src/notify.rs @@ -120,8 +120,8 @@ impl NotifyFileData { let cell_path = cells.get_cell_path(&path)?; let ignore = ignore_specs .get(&cell_path.cell()) - .expect("unexpected cell name mismatch") - .is_match(cell_path.path()); + // See the comment on the analogous code in `watchman/interface.rs` + .map_or(false, |ignore| ignore.is_match(cell_path.path())); info!( "FileWatcher: {:?} {:?} (ignore = {})", @@ -159,7 +159,7 @@ impl NotifyFileData { changed_paths.insert(cell_path_str); } - let mut stats = FileWatcherStats::new(changed_paths.len(), None, None); + let mut stats = FileWatcherStats::new(Default::default(), changed_paths.len()); stats.add_ignored(self.ignored); for path in changed_paths { // The event type and watcher kind are just made up, but that's not a big deal diff --git a/app/buck2_file_watcher/src/stats.rs b/app/buck2_file_watcher/src/stats.rs index 2a26b6f455960..2598b8e45d500 100644 --- a/app/buck2_file_watcher/src/stats.rs +++ b/app/buck2_file_watcher/src/stats.rs @@ -26,17 +26,7 @@ pub(crate) struct FileWatcherStats { } impl FileWatcherStats { - pub(crate) fn new( - min_count: usize, - mergebase: Option<&str>, - watchman_version: Option, - ) -> Self { - let stats = buck2_data::FileWatcherStats { - branched_from_revision: mergebase.map(ToOwned::to_owned), - watchman_version, - ..Default::default() - }; - + pub(crate) fn new(stats: buck2_data::FileWatcherStats, min_count: usize) -> Self { let changes = Vec::with_capacity(std::cmp::min(MAX_FILE_CHANGE_RECORDS, min_count)); Self { diff --git a/app/buck2_file_watcher/src/watchman/mod.rs b/app/buck2_file_watcher/src/watchman.rs similarity index 100% rename from app/buck2_file_watcher/src/watchman/mod.rs rename to app/buck2_file_watcher/src/watchman.rs diff --git a/app/buck2_file_watcher/src/watchman/core.rs b/app/buck2_file_watcher/src/watchman/core.rs index f785b7ca861f5..e65bb03557e13 100644 --- a/app/buck2_file_watcher/src/watchman/core.rs +++ b/app/buck2_file_watcher/src/watchman/core.rs @@ -16,7 +16,10 @@ use std::time::Duration; use anyhow::Context as _; use async_trait::async_trait; -use buck2_core::env_helper::EnvHelper; +use buck2_certs::validate::validate_certs; +use buck2_core::buck2_env_anyhow; +use buck2_error::internal_error_anyhow; +use buck2_error::ErrorTag; use dupe::Dupe; use futures::future::Future; use serde::Deserialize; @@ -25,6 +28,46 @@ use tokio::sync::mpsc::UnboundedSender; use tokio::sync::oneshot; use watchman_client::prelude::*; +fn watchman_error_tag(e: &watchman_client::Error) -> Option { + let tag = match e { + watchman_client::Error::ConnectionError { .. } => ErrorTag::WatchmanConnectionError, + watchman_client::Error::ConnectionLost(_) => ErrorTag::WatchmanConnectionLost, + watchman_client::Error::ConnectionDiscovery { .. } => ErrorTag::WatchmanConnectionDiscovery, + watchman_client::Error::WatchmanServerError { message, .. } => { + if message.contains("RootNotConnectedError") { + ErrorTag::WatchmanRootNotConnectedError + } else if message + .contains("cannot compute status while a checkout is currently in progress") + { + ErrorTag::WatchmanCheckoutInProgress + } else { + ErrorTag::WatchmanServerError + } + } + watchman_client::Error::WatchmanResponseError { .. } => ErrorTag::WatchmanResponseError, + watchman_client::Error::MissingField { .. } => ErrorTag::WatchmanMissingField, + watchman_client::Error::Deserialize { .. } => ErrorTag::WatchmanDeserialize, + watchman_client::Error::Serialize { .. } => ErrorTag::WatchmanSerialize, + watchman_client::Error::Connect { .. } => ErrorTag::WatchmanConnect, + }; + Some(tag) +} + +#[derive(Debug, buck2_error::Error)] +enum WatchmanClientError { + #[buck2(input)] + #[error("Configured timeout is zero")] + ZeroTimeout, + #[buck2(tag = WatchmanTimeout)] + #[error( + "Watchman request timed out after {0}s; try restarting watchman, probably via `watchman shutdown-server`" + )] + Timeout(u64), + #[buck2(tag = watchman_error_tag(source))] + #[error(transparent)] + RequestFailed { source: watchman_client::Error }, +} + // We use the "new" field. This is marked as deprecated, but buck1 uses it and // I'm unaware of issues due to its use there. // @@ -60,7 +103,7 @@ mod types { let event = match (*self.exists, *self.new) { (true, true) => WatchmanEventType::Create, (false, _) => WatchmanEventType::Delete, - _ => WatchmanEventType::Modify, + (true, false) => WatchmanEventType::Modify, }; Some(WatchmanEvent { @@ -75,14 +118,14 @@ mod types { use types::*; #[derive(Debug)] -pub enum WatchmanEventType { +pub(crate) enum WatchmanEventType { Create, Modify, Delete, } #[derive(Debug)] -pub enum WatchmanKind { +pub(crate) enum WatchmanKind { File, Directory, Symlink, @@ -99,7 +142,7 @@ impl WatchmanKind { } #[derive(Debug)] -pub struct WatchmanEvent { +pub(crate) struct WatchmanEvent { pub kind: WatchmanKind, pub event: WatchmanEventType, pub path: PathBuf, @@ -118,7 +161,7 @@ impl Display for WatchmanEvent { } #[derive(Dupe, Clone)] -pub struct WatchmanClient(Arc<(watchman_client::Client, ResolvedRoot)>); +struct WatchmanClient(Arc<(watchman_client::Client, ResolvedRoot)>); impl Debug for WatchmanClient { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -126,31 +169,44 @@ impl Debug for WatchmanClient { } } +async fn with_timeout( + fut: impl Future> + Send, +) -> anyhow::Result { + let timeout = buck2_env_anyhow!("BUCK2_WATCHMAN_TIMEOUT", type=u64, default=57)?; + if timeout == 0 { + return Err(WatchmanClientError::ZeroTimeout.into()); + } + match tokio::time::timeout(Duration::from_secs(timeout), fut).await { + Ok(Ok(res)) => Ok(res), + Ok(Err(e)) => { + validate_certs().await.context("Watchman Request Failed")?; + Err(WatchmanClientError::RequestFailed { source: e }.into()) + } + Err(_) => { + validate_certs().await.context("Watchman Timed Out")?; + Err(WatchmanClientError::Timeout(timeout).into()) + } + } +} + impl WatchmanClient { - pub async fn connect( - connector: &Connector, - path: CanonicalPath, - ) -> anyhow::Result { - let client = connector.connect().await?; - let root = client.resolve_root(path).await?; + async fn connect(connector: &Connector, path: CanonicalPath) -> anyhow::Result { + let client = with_timeout(connector.connect()) + .await + .context("Connecting to watchman")?; + let root = with_timeout(client.resolve_root(path)) + .await + .context("Resolving watchman root")?; Ok(Self(Arc::new((client, root)))) } - pub async fn query< - F: serde::de::DeserializeOwned + std::fmt::Debug + Clone + QueryFieldList, - >( + async fn query( &self, query: QueryRequestCommon, ) -> anyhow::Result> { - static WATCHMAN_TIMEOUT: EnvHelper = EnvHelper::new("BUCK2_WATCHMAN_TIMEOUT"); let fut = self.client().query(self.root(), query); - Ok(match WATCHMAN_TIMEOUT.get_copied()? { - Some(timeout) => tokio::time::timeout(Duration::from_secs(timeout), fut) - .await - .context("Watchman request timed out")?, - None => fut.await, - }?) + with_timeout(fut).await } fn root(&self) -> &ResolvedRoot { @@ -163,7 +219,7 @@ impl WatchmanClient { } #[async_trait] -pub trait SyncableQueryProcessor: Send + Sync { +pub(crate) trait SyncableQueryProcessor: Send + Sync { type Output; type Payload; @@ -180,6 +236,7 @@ pub trait SyncableQueryProcessor: Send + Sync { async fn on_fresh_instance( &mut self, dice: Self::Payload, + events: Vec, mergebase: &Option, watchman_version: Option, ) -> anyhow::Result<(Self::Output, Self::Payload)>; @@ -204,8 +261,9 @@ pub struct SyncableQuery { control_tx: UnboundedSender>, } -pub enum WatchmanSyncResult { +enum WatchmanSyncResult { FreshInstance { + events: Vec, merge_base: Option, clock: ClockSpec, watchman_version: Option, @@ -239,7 +297,7 @@ where T: Send + 'static, P: Send + 'static, { - async fn run_loop(&mut self) { + async fn run_loop(mut self) { // We discard the first error here, if there is one. It's a bit unfortunate but it makes // everything a lot simpler below, and kicking it off earlier is desirable because it can // give Watchman time to warm up. @@ -297,7 +355,7 @@ where } else { ( self.processor - .on_fresh_instance(payload, &merge_base, watchman_version) + .on_fresh_instance(payload, events, &merge_base, watchman_version) .await?, merge_base, clock, @@ -305,12 +363,13 @@ where } } WatchmanSyncResult::FreshInstance { + events, merge_base, clock, watchman_version, } => ( self.processor - .on_fresh_instance(payload, &merge_base, watchman_version) + .on_fresh_instance(payload, events, &merge_base, watchman_version) .await?, merge_base, clock, @@ -338,9 +397,7 @@ where &mut self, client: &mut Option, ) -> anyhow::Result { - self.reconnect(client) - .await - .context("Error reconnecting to Watchman")?; + self.reconnect(client).await?; let out = self.sync_query(client).await?; @@ -383,19 +440,26 @@ where // While we use scm-based queries, the processor api doesn't really support them yet so we just treat it as a fresh instance. let (new_mergebase, clock) = unpack_clock(clock); + let events = match files { + None if is_fresh_instance => vec![], + None => { + return Err(internal_error_anyhow!( + "unexpected missing files in watchman query" + )); + } + Some(v) => v.into_iter().filter_map(|f| f.into_event()).collect(), + }; + Ok(if is_fresh_instance { WatchmanSyncResult::FreshInstance { + events, merge_base: new_mergebase, clock, watchman_version: Some(version), } } else { WatchmanSyncResult::Events { - events: files - .ok_or_else(|| anyhow::anyhow!(""))? - .into_iter() - .filter_map(|f| f.into_event()) - .collect(), + events, merge_base: new_mergebase, clock, watchman_version: Some(version), @@ -429,7 +493,10 @@ where P: Send + 'static, { /// Ensures that the processor has been sent all changes that watchman has seen. - pub fn sync(&self, dice: P) -> impl Future> + Send + 'static { + pub(crate) fn sync( + &self, + dice: P, + ) -> impl Future> + Send + 'static { let (sync_done_tx, sync_done_rx) = tokio::sync::oneshot::channel(); let tx_res = self .control_tx @@ -447,12 +514,13 @@ where } } - pub fn new( + pub(crate) fn new( connector: Connector, path: impl AsRef, expr: Expr, processor: Box>, mergebase_with: Option, + empty_on_fresh_instance: bool, ) -> anyhow::Result> { let path = path.as_ref(); let path = CanonicalPath::canonicalize(path) @@ -461,7 +529,7 @@ where let query = QueryRequestCommon { expression: Some(expr), fields: vec!["name"], - empty_on_fresh_instance: true, + empty_on_fresh_instance, relative_root: None, case_sensitive: true, dedup_results: false, @@ -477,7 +545,7 @@ where tokio::sync::mpsc::unbounded_channel::>(); tokio::spawn(async move { - let mut handler = SyncableQueryHandler { + let handler = SyncableQueryHandler { connector, path, query, diff --git a/app/buck2_file_watcher/src/watchman/interface.rs b/app/buck2_file_watcher/src/watchman/interface.rs index 58040bfb2c3b5..d04a95492f204 100644 --- a/app/buck2_file_watcher/src/watchman/interface.rs +++ b/app/buck2_file_watcher/src/watchman/interface.rs @@ -16,13 +16,15 @@ use anyhow::Context as _; use async_trait::async_trait; use buck2_common::dice::file_ops::FileChangeTracker; use buck2_common::ignores::ignore_set::IgnoreSet; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::rollout_percentage::RolloutPercentage; use buck2_events::dispatch::span_async; +use buck2_util::process::async_background_command; use dice::DiceTransactionUpdater; use tracing::info; use tracing::warn; @@ -40,10 +42,15 @@ use crate::watchman::core::WatchmanEventType; use crate::watchman::core::WatchmanKind; struct WatchmanQueryProcessor { + // FIXME(JakobDegen): Storing these values statically is completely broken. See + // `tests/e2e/cells/test_file_watcher_resolution:test_changing_cell_location_bug` for a repro of + // a bug. cells: CellResolver, ignore_specs: HashMap, - retain_dep_files_on_watchman_fresh_instance: bool, + empty_on_fresh_instance: bool, + report_global_rev: bool, last_mergebase: Option, + last_mergebase_global_rev: Option, } /// Used in process_one_change @@ -58,11 +65,11 @@ impl WatchmanQueryProcessor { &self, mut ctx: DiceTransactionUpdater, events: Vec, - mergebase: &Option, - watchman_version: Option, + base_stats: buck2_data::FileWatcherStats, ) -> anyhow::Result<(buck2_data::FileWatcherStats, DiceTransactionUpdater)> { let mut handler = FileChangeTracker::new(); - let mut stats = FileWatcherStats::new(events.len(), mergebase.as_deref(), watchman_version); + + let mut stats = FileWatcherStats::new(base_stats, events.len()); for ev in events { // If the path is invalid, then walk up all the way until you find a valid dir to @@ -105,8 +112,10 @@ impl WatchmanQueryProcessor { let ignore = self .ignore_specs .get(&cell_path.cell()) - .expect("unexpected cell name mismatch") - .is_match(cell_path.path()); + // This shouldn't ever really happen. However, because of the bugs caused by just + // storing the `CellResolver` in the watcher permanantly, sometimes it can, so we just + // default to not ignoring the file in that case + .map_or(false, |ignore| ignore.is_match(cell_path.path())); info!("Watchman: {:?} (ignore = {})", ev, ignore); @@ -204,6 +213,25 @@ fn find_first_valid_parent(mut path: &Path) -> Option<&ProjectRelativePath> { } } +async fn try_fetch_global_rev(hash: &str) -> Option { + // There's a variety of ways in which this might go wrong: `PATH` is messed up, this somehow got + // turned on in a non-`hg` repo, etc. To make sure we don't fail any builds from this, ignore + // all errors. + let command = async_background_command("hg") + .args(["log", "-r", hash, "-T", "{get(extras, \"global_rev\")}"]) + .env("HPGPLAIN", "1") + .output(); + let output = tokio::time::timeout(std::time::Duration::from_millis(500), command) + .await + .ok()? + .ok()?; + if !output.status.success() { + return None; + } + let stdout = String::from_utf8(output.stdout).ok()?; + stdout.trim().parse::().ok() +} + #[async_trait] impl SyncableQueryProcessor for WatchmanQueryProcessor { type Output = buck2_data::FileWatcherStats; @@ -217,23 +245,31 @@ impl SyncableQueryProcessor for WatchmanQueryProcessor { watchman_version: Option, ) -> anyhow::Result<(Self::Output, DiceTransactionUpdater)> { self.last_mergebase = mergebase.clone(); - self.process_events_impl(dice, events, mergebase, watchman_version) - .await + self.process_events_impl( + dice, + events, + buck2_data::FileWatcherStats { + branched_from_revision: self.last_mergebase.clone(), + branched_from_global_rev: self.last_mergebase_global_rev, + watchman_version, + ..Default::default() + }, + ) + .await } async fn on_fresh_instance( &mut self, ctx: DiceTransactionUpdater, + events: Vec, mergebase: &Option, watchman_version: Option, ) -> anyhow::Result<(Self::Output, DiceTransactionUpdater)> { let has_new_mergebase = self.last_mergebase.as_ref() != mergebase.as_ref(); - let clear_dep_files = - has_new_mergebase || !self.retain_dep_files_on_watchman_fresh_instance; + let clear_dep_files = has_new_mergebase; - // We'll clear dep files if we're configured to do so on all fresh instances. Otherwise, - // we'll drop them if the mergebase has changed, which means our dep files are likely + // We'll clear dep files if the mergebase has changed, which means our dep files are likely // irrelevant. // // This is imperfect. If the user rebased from yesterday's stable to today's stable, then @@ -242,33 +278,46 @@ impl SyncableQueryProcessor for WatchmanQueryProcessor { // (where we'll rebuild things our dep files *could* have avoided) as not flushing in the // former (where we'll fetch loads of dep files that all miss), so we err on the side of // being safe and drop them when the mergebase changes. + // + // We do retain dep files that were produced locally, since we don't need to fetch them as + // they are already on disk. if clear_dep_files { - crate::dep_files::flush_dep_files(); + crate::dep_files::flush_non_local_dep_files(); } self.last_mergebase = mergebase.clone(); + if let Some(hash) = self.last_mergebase.as_ref() { + if self.report_global_rev { + self.last_mergebase_global_rev = try_fetch_global_rev(hash).await; + } + } + // TODO(cjhopman): could probably get away with just invalidating all fs things, but that's not supported. // Dropping the entire DICE map can be somewhat computationally expensive as there // are a lot of destructors to run. On the other hand, we don't have to wait for // it. So, we just send it off to its own thread. let ctx = ctx.unstable_take(); - Ok(( - buck2_data::FileWatcherStats { - fresh_instance: true, - branched_from_revision: mergebase.clone(), - incomplete_events_reason: Some("Fresh instance".to_owned()), - watchman_version, - fresh_instance_data: Some(buck2_data::FreshInstance { - new_mergebase: has_new_mergebase, - cleared_dice: true, - cleared_dep_files: clear_dep_files, - }), - ..Default::default() - }, - ctx, - )) + let mut base_stats = buck2_data::FileWatcherStats { + fresh_instance: true, + branched_from_revision: mergebase.clone(), + branched_from_global_rev: self.last_mergebase_global_rev, + watchman_version, + fresh_instance_data: Some(buck2_data::FreshInstance { + new_mergebase: has_new_mergebase, + cleared_dice: true, + cleared_dep_files: clear_dep_files, + }), + ..Default::default() + }; + + if self.empty_on_fresh_instance { + base_stats.incomplete_events_reason = Some("Fresh instance".to_owned()); + Ok((base_stats, ctx)) + } else { + self.process_events_impl(ctx, events, base_stats).await + } } } @@ -290,13 +339,36 @@ impl WatchmanFileWatcher { ignore_specs: HashMap, ) -> anyhow::Result { let watchman_merge_base = root_config - .get("project", "watchman_merge_base") + .get(BuckconfigKeyRef { + section: "project", + property: "watchman_merge_base", + }) .map(|s| s.to_owned()); - let retain_dep_files_on_watchman_fresh_instance = root_config - .parse::("buck2", "retain_dep_files_on_watchman_fresh_instance")? - .unwrap_or_else(RolloutPercentage::always) - .roll(); + let empty_on_fresh_instance = if watchman_merge_base.is_some() { + // double negative here because we'd prefer that rollout changes config value from false->true. + !root_config + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "disable_watchman_empty_on_fresh_instance", + })? + .unwrap_or_else(RolloutPercentage::never) + .roll() + } else { + // When not using scm-aware queries, fresh instances would list every file in + // the repo. That's a lot and not very useful. + // TODO(cjhopman): If we find we get a lot of value from the invalidation tracking that + // getting changed files since branch point gives us, we could try to get a better + // approach here for the non scm-aware case. + true + }; + + let report_global_rev = root_config + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "watchman_report_global_rev", + })? + .unwrap_or(false); let query = SyncableQuery::new( Connector::new(), @@ -309,10 +381,13 @@ impl WatchmanFileWatcher { Box::new(WatchmanQueryProcessor { cells, ignore_specs, - retain_dep_files_on_watchman_fresh_instance, + empty_on_fresh_instance, + report_global_rev, last_mergebase: None, + last_mergebase_global_rev: None, }), watchman_merge_base, + empty_on_fresh_instance, )?; Ok(Self { query }) diff --git a/app/buck2_file_watcher/src/watchman/test.rs b/app/buck2_file_watcher/src/watchman/test.rs index 095d00e00794c..784db93aa87e1 100644 --- a/app/buck2_file_watcher/src/watchman/test.rs +++ b/app/buck2_file_watcher/src/watchman/test.rs @@ -32,7 +32,7 @@ struct TestQueryProcessor; #[derive(PartialEq, Eq, Debug)] enum Out { - FreshInstance, + FreshInstance(Vec), Files(Vec), } @@ -57,10 +57,14 @@ impl SyncableQueryProcessor for TestQueryProcessor { async fn on_fresh_instance( &mut self, payload: Self::Payload, + events: Vec, _mergebase: &Option, _watchman_version: Option, ) -> anyhow::Result<(Self::Output, Self::Payload)> { - Ok((Out::FreshInstance, payload)) + Ok(( + Out::FreshInstance(events.into_map(|e| e.path.display().to_string())), + payload, + )) } } @@ -195,10 +199,11 @@ async fn test_syncable_query() -> anyhow::Result<()> { Expr::Any(vec![Expr::FileType(FileType::Regular)]), Box::new(TestQueryProcessor), None, + true, )?; // Startup - assert_eq!(watchman_query.sync(()).await?.0, Out::FreshInstance); + assert_eq!(watchman_query.sync(()).await?.0, Out::FreshInstance(vec![])); assert_eq!(watchman_query.sync(()).await?.0, Out::Files(vec![])); // Create a file, see that we receive it. @@ -215,7 +220,7 @@ async fn test_syncable_query() -> anyhow::Result<()> { // Restart Watchman, we should be fixed now, and get a fresh instance. let mut watchman_instance = spawn_watchman(&watchman_dir).await?; - assert_eq!(watchman_query.sync(()).await?.0, Out::FreshInstance); + assert_eq!(watchman_query.sync(()).await?.0, Out::FreshInstance(vec![])); assert_eq!(watchman_query.sync(()).await?.0, Out::Files(vec![])); // Clean up diff --git a/app/buck2_forkserver/BUCK b/app/buck2_forkserver/BUCK index 11da3e6b588f6..9196391e0bfa8 100644 --- a/app/buck2_forkserver/BUCK +++ b/app/buck2_forkserver/BUCK @@ -1,5 +1,5 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:rust_linkable_symbol.bzl", "rust_linkable_symbol") oncall("build_infra") @@ -8,37 +8,48 @@ rust_library( srcs = glob( ["src/**/*.rs"], ), - mapped_srcs = select({ - "DEFAULT": {}, - "ovr_config//os:linux": { - "//buck2/app/buck2_miniperf:buck2_miniperf": "src/unix/miniperf.bin", - }, - }), os_deps = [ ( "linux", [ "fbsource//third-party/rust:nix", + "fbsource//third-party/rust:rand", + "//buck2/app/buck2_grpc:buck2_grpc", + "//buck2/app/buck2_util:buck2_util", + ":buck2_miniperf_data", ], ), ( "macos", [ "fbsource//third-party/rust:nix", + "fbsource//third-party/rust:rand", + "//buck2/app/buck2_grpc:buck2_grpc", + "//buck2/app/buck2_util:buck2_util", ], ), ( "windows", [ "fbsource//third-party/rust:winapi", + "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", ], ), ], test_deps = [ "fbsource//third-party/rust:assert_matches", + "fbsource//third-party/rust:sysinfo", "fbsource//third-party/rust:tempfile", "fbsource//third-party/rust:tokio-stream", ], + test_os_deps = [ + ( + "windows", + [ + "//buck2/app/buck2_util:buck2_util", + ], + ), + ], deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:arc-swap", @@ -47,9 +58,7 @@ rust_library( "fbsource//third-party/rust:bytes", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:libc", - "fbsource//third-party/rust:nix", "fbsource//third-party/rust:pin-project", - "fbsource//third-party/rust:rand", "fbsource//third-party/rust:take_mut", "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", @@ -60,10 +69,14 @@ rust_library( "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_forkserver_proto:buck2_forkserver_proto", - "//buck2/app/buck2_grpc:buck2_grpc", "//buck2/app/buck2_miniperf_proto:buck2_miniperf_proto", - "//buck2/app/buck2_util:buck2_util", "//buck2/gazebo/dupe:dupe", ], ) + +rust_linkable_symbol( + name = "buck2_miniperf_data", + content_bytes = "//buck2/app/buck2_miniperf:buck2_miniperf", +) diff --git a/app/buck2_forkserver/Cargo.toml b/app/buck2_forkserver/Cargo.toml index 9ff663e605515..e059c732b3a8d 100644 --- a/app/buck2_forkserver/Cargo.toml +++ b/app/buck2_forkserver/Cargo.toml @@ -1,7 +1,9 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_forkserver" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } @@ -12,40 +14,44 @@ bytes = { workspace = true } futures = { workspace = true } libc = { workspace = true } pin-project = { workspace = true } -rand = { workspace = true } take_mut = { workspace = true } thiserror = { workspace = true } -tokio-util = { workspace = true } tokio = { workspace = true } +tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } allocative = { workspace = true } dupe = { workspace = true } -gazebo = { workspace = true } -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -gazebo_lint.version = "0.1" buck2_common = { workspace = true } buck2_core = { workspace = true } -buck2_forkserver_proto = { workspace = true } buck2_data = { workspace = true } -buck2_grpc = { workspace = true } -buck2_util = { workspace = true } +buck2_error = { workspace = true } +buck2_forkserver_proto = { workspace = true } buck2_miniperf_proto = { workspace = true } [target.'cfg(unix)'.dependencies] nix = { workspace = true } +rand = { workspace = true } + +buck2_grpc = { workspace = true } +buck2_util = { workspace = true } [target.'cfg(windows)'.dependencies] winapi = { workspace = true } +buck2_wrapper_common = { workspace = true } + [dev-dependencies] assert_matches = { workspace = true } bytes = { workspace = true } +sysinfo = { workspace = true } tempfile = { workspace = true } tokio-stream = { workspace = true } -[features] -# @oss-disable: default = ["gazebo_lint"] +[target.'cfg(windows)'.dev-dependencies] +buck2_util = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_forkserver/src/client.rs b/app/buck2_forkserver/src/client.rs index bfff7ecd506e9..9731eb386cf5d 100644 --- a/app/buck2_forkserver/src/client.rs +++ b/app/buck2_forkserver/src/client.rs @@ -14,7 +14,6 @@ use std::sync::Arc; use allocative::Allocative; use anyhow::Context; use arc_swap::ArcSwapOption; -use buck2_common::result::SharedError; use buck2_core::tag_error; use dupe::Dupe; use futures::future; @@ -35,7 +34,7 @@ pub struct ForkserverClient { inner: Arc, } -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ForkserverError { #[error("Error on Forkserver wait()")] WaitError(#[source] io::Error), @@ -47,7 +46,7 @@ enum ForkserverError { struct ForkserverClientInner { /// Error from the forkserver process, if any. #[allocative(skip)] - error: Arc>, + error: Arc>, pid: u32, #[allocative(skip)] rpc: buck2_forkserver_proto::forkserver_client::ForkserverClient, @@ -73,7 +72,7 @@ impl ForkserverClient { Err(e) => ForkserverError::WaitError(e), }; - let err = anyhow::Error::new(err).context("Forkserver is unavailable"); + let err = buck2_error::Error::new(err).context("Forkserver is unavailable"); error.swap(Some(Arc::new(err))); } @@ -99,11 +98,12 @@ impl ForkserverClient { if let Some(err) = &*self.inner.error.load() { return Err(tag_error!( "forkserver_exit", - SharedError::from(err.dupe()).into(), + err.as_ref().dupe().into(), quiet: true, task: false, daemon_in_memory_state_is_corrupted: true, - )); + ) + .into()); } let stream = stream::once(future::ready(buck2_forkserver_proto::RequestEvent { diff --git a/app/buck2_forkserver/src/convert.rs b/app/buck2_forkserver/src/convert.rs index 393304d2fa495..f78a333b27b3a 100644 --- a/app/buck2_forkserver/src/convert.rs +++ b/app/buck2_forkserver/src/convert.rs @@ -15,7 +15,8 @@ use futures::stream::StreamExt; use crate::run::CommandEvent; use crate::run::GatherOutputStatus; -pub fn encode_event_stream( +#[allow(dead_code)] +pub(crate) fn encode_event_stream( s: S, ) -> impl Stream> where @@ -61,7 +62,7 @@ where s.map(|r| r.map(convert_event).map_err(convert_err)) } -pub fn decode_event_stream(s: S) -> impl Stream> +pub(crate) fn decode_event_stream(s: S) -> impl Stream> where S: Stream>, { diff --git a/app/buck2_forkserver/src/lib.rs b/app/buck2_forkserver/src/lib.rs index 5b8f419403569..eb78b7741c2f0 100644 --- a/app/buck2_forkserver/src/lib.rs +++ b/app/buck2_forkserver/src/lib.rs @@ -7,12 +7,14 @@ * of this source tree. */ -#![cfg_attr(unix, allow(stable_features))] -#![cfg_attr(unix, feature(process_set_process_group))] - +#![feature(error_generic_member_access)] +#![cfg_attr(windows, feature(windows_process_extensions_main_thread_handle))] pub mod client; -pub mod convert; +pub(crate) mod convert; pub mod run; #[cfg(unix)] pub mod unix; + +#[cfg(windows)] +mod win; diff --git a/app/buck2_forkserver/src/run.rs b/app/buck2_forkserver/src/run.rs new file mode 100644 index 0000000000000..926fd2c954972 --- /dev/null +++ b/app/buck2_forkserver/src/run.rs @@ -0,0 +1,744 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod interruptible_async_read; +pub mod process_group; +pub mod status_decoder; + +use std::borrow::Cow; +use std::path::Path; +use std::pin::Pin; +use std::process::Command; +use std::process::ExitStatus; +use std::task::Context; +use std::task::Poll; +use std::time::Duration; + +use anyhow::Context as _; +use async_trait::async_trait; +use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_path::AbsPath; +use bytes::Bytes; +use futures::future::Future; +use futures::future::FutureExt; +use futures::stream::Stream; +use futures::stream::StreamExt; +use futures::stream::TryStreamExt; +use pin_project::pin_project; +use tokio_util::codec::BytesCodec; +use tokio_util::codec::FramedRead; + +use self::interruptible_async_read::InterruptNotifiable; +use self::interruptible_async_read::InterruptibleAsyncRead; +use self::status_decoder::DecodedStatus; +use self::status_decoder::DefaultStatusDecoder; +use self::status_decoder::StatusDecoder; +use crate::run::process_group::ProcessCommand; +use crate::run::process_group::ProcessGroup; +use crate::run::process_group::SpawnError; + +#[derive(Debug)] +pub enum GatherOutputStatus { + /// Contains the exit code. + Finished { + exit_code: i32, + execution_stats: Option, + }, + TimedOut(Duration), + Cancelled, + SpawnFailed(String), +} + +impl From for GatherOutputStatus { + fn from(d: DecodedStatus) -> Self { + match d { + DecodedStatus::Status { + exit_code, + execution_stats, + } => Self::Finished { + exit_code, + execution_stats, + }, + DecodedStatus::SpawnFailed(v) => Self::SpawnFailed(v), + } + } +} + +#[derive(Debug)] +pub(crate) enum CommandEvent { + Stdout(Bytes), + Stderr(Bytes), + Exit(GatherOutputStatus), +} + +enum StdioEvent { + Stdout(Bytes), + Stderr(Bytes), +} + +impl From for CommandEvent { + fn from(stdio: StdioEvent) -> Self { + match stdio { + StdioEvent::Stdout(bytes) => CommandEvent::Stdout(bytes), + StdioEvent::Stderr(bytes) => CommandEvent::Stderr(bytes), + } + } +} + +/// This stream will yield [CommandEvent] whenever we have something on stdout or stderr (this is +/// our stdio stream), and it'll finish up the stream with the exit status. This is basically like +/// a select, but with the exit guaranteed to come last. +#[pin_project] +struct CommandEventStream { + exit: Option>, + + done: bool, + + #[pin] + status: futures::future::Fuse, + + #[pin] + stdio: futures::stream::Fuse, +} + +impl CommandEventStream +where + Status: Future, + Stdio: Stream, +{ + fn new(status: Status, stdio: Stdio) -> Self { + Self { + exit: None, + done: false, + status: status.fuse(), + stdio: stdio.fuse(), + } + } +} + +impl Stream for CommandEventStream +where + Status: Future>, + Stdio: Stream> + InterruptNotifiable, +{ + type Item = anyhow::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + if *this.done { + return Poll::Ready(None); + } + + // This future is fused so it's guaranteed to be ready once. If it does, capture the exit + // status, we'll return it later. + if let Poll::Ready(status) = this.status.poll(cx) { + *this.exit = Some(status); + this.stdio.as_mut().get_pin_mut().notify_interrupt(); + } + + // This stream is also fused, so if it returns None, we'll know it's done for good and we'll + // return the exit status if it's available. + if let Some(stdio) = futures::ready!(this.stdio.poll_next(cx)) { + return Poll::Ready(Some(stdio.map(|event| event.into()))); + } + + // If we got here that means the stream is done. If we have it we return, and if we don't + // we report we're pending, because we'll have polled it already earlier. + if let Some(exit) = this.exit.take() { + *this.done = true; + return Poll::Ready(Some(exit.map(CommandEvent::Exit))); + } + + Poll::Pending + } +} + +pub async fn timeout_into_cancellation( + timeout: Option, +) -> anyhow::Result { + match timeout { + Some(t) => { + tokio::time::sleep(t).await; + Ok(GatherOutputStatus::TimedOut(t)) + } + None => futures::future::pending().await, + } +} + +pub(crate) fn stream_command_events( + process_group: anyhow::Result, + cancellation: T, + decoder: impl StatusDecoder, + kill_process: impl KillProcess, + stream_stdio: bool, +) -> anyhow::Result>> +where + T: Future> + Send, +{ + let mut process_group = match process_group { + Ok(process_group) => process_group, + Err(e) => { + let event = Ok(CommandEvent::Exit(GatherOutputStatus::SpawnFailed( + e.to_string(), + ))); + return Ok(futures::stream::once(futures::future::ready(event)).left_stream()); + } + }; + + let stdio = if stream_stdio { + let stdout = process_group + .take_stdout() + .context("Child stdout is not piped")?; + let stderr = process_group + .take_stderr() + .context("Child stderr is not piped")?; + + #[cfg(unix)] + type Drainer = self::interruptible_async_read::UnixNonBlockingDrainer; + + // On Windows, for the time being we just give ourselves a timeout to finish reading. + // Ideally this would perform a non-blocking read on self instead like we do on Unix. + #[cfg(not(unix))] + type Drainer = self::interruptible_async_read::TimeoutDrainer; + + let stdout = InterruptibleAsyncRead::<_, Drainer<_>>::new(stdout); + let stderr = InterruptibleAsyncRead::<_, Drainer<_>>::new(stderr); + let stdout = FramedRead::new(stdout, BytesCodec::new()) + .map(|data| anyhow::Ok(StdioEvent::Stdout(data?.freeze()))); + let stderr = FramedRead::new(stderr, BytesCodec::new()) + .map(|data| anyhow::Ok(StdioEvent::Stderr(data?.freeze()))); + + futures::stream::select(stdout, stderr).left_stream() + } else { + futures::stream::empty().right_stream() + }; + + let status = async move { + enum Outcome { + Finished(ExitStatus), + Cancelled(GatherOutputStatus), + } + + // NOTE: This wrapping here is so that we release the borrow of `child` that stems from + // `wait()` by the time we call kill_process a few lines down. + let execute = async { + let status = process_group.wait(); + futures::pin_mut!(status); + futures::pin_mut!(cancellation); + + anyhow::Ok(match futures::future::select(status, cancellation).await { + futures::future::Either::Left((status, _)) => Outcome::Finished(status?), + futures::future::Either::Right((res, _)) => Outcome::Cancelled(res?), + }) + }; + + anyhow::Ok(match execute.await? { + Outcome::Finished(status) => decoder.decode_status(status).await?.into(), + Outcome::Cancelled(res) => { + kill_process + .kill(&mut process_group) + .await + .context("Failed to terminate child after timeout")?; + + decoder + .cancel() + .await + .context("Failed to cancel status decoder after timeout")?; + + // We just killed the child, so this should finish immediately. We should still call + // this to release any process. + process_group + .wait() + .await + .context("Failed to await child after kill")?; + + res + } + }) + }; + + Ok(CommandEventStream::new(status, stdio).right_stream()) +} + +pub(crate) async fn decode_command_event_stream( + stream: S, +) -> anyhow::Result<(GatherOutputStatus, Vec, Vec)> +where + S: Stream>, +{ + futures::pin_mut!(stream); + + let mut stdout = Vec::::new(); + let mut stderr = Vec::::new(); + + while let Some(event) = stream.try_next().await? { + match event { + CommandEvent::Stdout(bytes) => stdout.extend(&bytes), + CommandEvent::Stderr(bytes) => stderr.extend(&bytes), + CommandEvent::Exit(exit) => return Ok((exit, stdout, stderr)), + } + } + + Err(anyhow::Error::msg( + "Stream did not yield CommandEvent::Exit", + )) +} + +pub async fn gather_output( + cmd: Command, + cancellation: T, +) -> anyhow::Result<(GatherOutputStatus, Vec, Vec)> +where + T: Future> + Send, +{ + let cmd = ProcessCommand::new(cmd); + + let process_details = + spawn_retry_txt_busy(cmd, || tokio::time::sleep(Duration::from_millis(50))).await; + + let stream = stream_command_events( + process_details, + cancellation, + DefaultStatusDecoder, + DefaultKillProcess::default(), + true, + )?; + decode_command_event_stream(stream).await +} + +/// Dependency injection for kill. We use this in testing. +#[async_trait] +pub(crate) trait KillProcess { + async fn kill(self, process: &mut ProcessGroup) -> anyhow::Result<()>; +} + +#[derive(Default)] +pub(crate) struct DefaultKillProcess { + pub graceful_shutdown_timeout_s: Option, +} + +#[async_trait] +impl KillProcess for DefaultKillProcess { + async fn kill(self, process_group: &mut ProcessGroup) -> anyhow::Result<()> { + let pid = match process_group.id() { + Some(pid) => pid, + None => { + // Child just exited, so in this case we don't want to kill anything. + return Ok(()); + } + }; + tracing::info!("Killing process {}", pid); + process_group.kill(self.graceful_shutdown_timeout_s).await + } +} + +/// Unify the the behavior of using a relative path for the executable between Unix and Windows. On +/// UNIX, the path is understood to be relative to the cwd of the *spawned process*, whereas on +/// Windows, it's relative ot the cwd of the *spawning* process. +/// +/// Here, we unify the two behaviors since we always run our subprocesses with a known cwd: we +/// check if the executable actually exists relative to said cwd, and if it does, we use that. +pub fn maybe_absolutize_exe<'a>( + exe: &'a (impl AsRef + ?Sized), + spawned_process_cwd: &'_ AbsPath, +) -> anyhow::Result> { + let exe = exe.as_ref(); + + let abs = spawned_process_cwd.join(exe); + if fs_util::try_exists(&abs).context("Error absolute-izing executable")? { + return Ok(abs.into_path_buf().into()); + } + + Ok(exe.into()) +} + +/// fork-exec is a bit tricky in a busy process. We often have files open to writing just prior to +/// executing them (as we download from RE), and many processes being spawned concurrently. We do +/// close the fds properly before the exec, but what can happn is: +/// +/// - Some thread forks +/// +/// - We close the file. At this time we don't have it open, but the forked process does. +/// +/// - We try to exec the file. This fails because the file is open for writing (by the forked +/// process). +/// +/// - The forked process execs. At this point the file is closed (because everything is CLOEXEC). +/// +/// The window during which the forked process holds the fd is small, so retrying a couple times +/// here should let us make this work. +/// +/// The more correct solution for this here would be to start a fork server in a separate process +/// when we start. However, until we get there, this should do the trick. +async fn spawn_retry_txt_busy( + mut cmd: ProcessCommand, + mut delay: F, +) -> anyhow::Result +where + F: FnMut() -> D, + D: Future, +{ + let mut attempts = 10; + + loop { + let res = cmd.spawn(); + + let res_errno = res.as_ref().map_err(|e| match e { + SpawnError::IoError(e) => e.raw_os_error(), + SpawnError::GenericError(_) => None, + }); + let is_txt_busy = matches!(res_errno, Err(Some(libc::ETXTBSY))); + + if attempts == 0 || !is_txt_busy { + return res.map_err(anyhow::Error::from); + } + + delay().await; + + attempts -= 1; + } +} + +#[cfg(test)] +mod tests { + use std::str; + use std::str::FromStr; + use std::sync::Arc; + use std::sync::Mutex; + use std::time::Instant; + + use assert_matches::assert_matches; + use buck2_util::process::background_command; + use dupe::Dupe; + + use super::*; + + #[tokio::test] + async fn test_gather_output() -> anyhow::Result<()> { + let mut cmd = if cfg!(windows) { + background_command("powershell") + } else { + background_command("sh") + }; + cmd.args(["-c", "echo hello"]); + + let (status, stdout, stderr) = gather_output(cmd, futures::future::pending()).await?; + assert!(matches!(status, GatherOutputStatus::Finished { exit_code, .. } if exit_code == 0)); + assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); + assert_eq!(stderr, b""); + + Ok(()) + } + + #[tokio::test] + async fn test_gather_does_not_wait_for_children() -> anyhow::Result<()> { + // If we wait for sleep, this will time out. + let mut cmd = if cfg!(windows) { + background_command("powershell") + } else { + background_command("sh") + }; + if cfg!(windows) { + cmd.args([ + "-c", + "Start-Job -ScriptBlock {sleep 10} | Out-Null; echo hello", + ]); + } else { + cmd.args(["-c", "(sleep 10 &) && echo hello"]); + } + + let timeout = if cfg!(windows) { 9 } else { 1 }; + let (status, stdout, stderr) = gather_output( + cmd, + timeout_into_cancellation(Some(Duration::from_secs(timeout))), + ) + .await?; + assert!( + matches!(status, GatherOutputStatus::Finished { exit_code, .. } if exit_code == 0), + "status: {:?}", + status + ); + assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); + assert_eq!(stderr, b""); + + Ok(()) + } + + #[tokio::test] + async fn test_gather_output_timeout() -> anyhow::Result<()> { + let now = Instant::now(); + + let cmd = if cfg!(windows) { + let mut cmd = background_command("powershell"); + cmd.args(["-c", "echo hello; sleep 10; echo bye"]); + cmd + } else { + let mut cmd = background_command("sh"); + cmd.args(["-c", "echo hello && sleep 10 && echo bye"]); + cmd + }; + + let timeout = if cfg!(windows) { 5 } else { 3 }; + let (status, stdout, _stderr) = gather_output( + cmd, + timeout_into_cancellation(Some(Duration::from_secs(timeout))), + ) + .await?; + assert!( + matches!(status, GatherOutputStatus::TimedOut(..)), + "status: {:?}", + status + ); + assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); + // Do not check stderr because stderr may contain a message like: + // ``` + // sh: line 1: 41348 Killed: 9 + // ``` + // or it can be empty, which depends on which process is killed first by killpg. + + assert!(now.elapsed() < Duration::from_secs(9)); // Lots of leeway here. + + Ok(()) + } + + #[cfg(unix)] + #[tokio::test] + async fn test_spawn_retry_txt_busy() -> anyhow::Result<()> { + use futures::future; + use tokio::fs::OpenOptions; + use tokio::io::AsyncWriteExt; + + let tempdir = tempfile::tempdir()?; + let bin = tempdir.path().join("bin"); + + let mut file = OpenOptions::new() + .mode(0o755) + .write(true) + .truncate(true) + .create(true) + .open(&bin) + .await?; + + file.write_all(b"#!/usr/bin/env bash\ntrue\n").await?; + + let cmd = background_command(&bin); + let cmd = ProcessCommand::new(cmd); + let mut process_group = spawn_retry_txt_busy(cmd, { + let mut file = Some(file); + move || { + file.take(); + future::ready(()) + } + }) + .await?; + + let status = process_group.wait().await?; + assert_eq!(status.code(), Some(0)); + + Ok(()) + } + + #[tokio::test] + async fn test_spawn_retry_other_error() -> anyhow::Result<()> { + let tempdir = tempfile::tempdir()?; + let bin = tempdir.path().join("bin"); // Does not actually exist + + let cmd = background_command(&bin); + let cmd = ProcessCommand::new(cmd); + let res = spawn_retry_txt_busy(cmd, || async { panic!("Should not be called!") }).await; + assert!(res.is_err()); + + Ok(()) + } + + #[tokio::test] + async fn test_kill_terminates_process_group() -> anyhow::Result<()> { + use sysinfo::Pid; + use sysinfo::System; + + let cmd = if cfg!(windows) { + let mut cmd = background_command("powershell"); + cmd.args([ + "-c", + "echo $PID; Start-Process -FilePath \"powershell\" -Wait -NoNewWindow -ArgumentList \ + 'echo $PID; Sleep 1000'", + ]); + cmd + } else { + let mut cmd = background_command("sh"); + cmd.arg("-c") + .arg("( ( echo $PPID && echo $$ && sleep 1000 ) )"); + cmd + }; + + // On windows we need more time to run powershell + let timeout = if cfg!(windows) { 7 } else { 1 }; + let (_status, stdout, _stderr) = gather_output( + cmd, + timeout_into_cancellation(Some(Duration::from_secs(timeout))), + ) + .await?; + let out = str::from_utf8(&stdout)?; + let pids: Vec<&str> = out.split('\n').collect(); + let ppid = Pid::from_str(pids.first().context("no ppid")?.trim())?; + let pid = Pid::from_str(pids.get(1).context("no pid")?.trim())?; + let sys = System::new_all(); + + // we want to check if existed process doesn't have the same parent because of pid reuse + if let Some(process) = sys.process(pid) { + if let Some(parent) = process.parent() { + if parent != ppid { + return Ok(()); + } + } + return Err(anyhow::anyhow!("PID still exits: {}", pid)); + } + Ok(()) + } + + #[tokio::test] + async fn test_stream_command_events_ends() -> anyhow::Result<()> { + let mut cmd = if cfg!(windows) { + background_command("powershell") + } else { + background_command("sh") + }; + cmd.args(["-c", "exit 0"]); + + let mut cmd = ProcessCommand::new(cmd); + let process = cmd.spawn().map_err(anyhow::Error::from); + let mut events = stream_command_events( + process, + futures::future::pending(), + DefaultStatusDecoder, + DefaultKillProcess::default(), + true, + )? + .boxed(); + assert_matches!(events.next().await, Some(Ok(CommandEvent::Exit(..)))); + assert_matches!(futures::poll!(events.next()), Poll::Ready(None)); + Ok(()) + } + + #[cfg(unix)] + #[tokio::test] + async fn test_signal_exit_code() -> anyhow::Result<()> { + use nix::sys::signal::Signal; + + let mut cmd = background_command("sh"); + cmd.arg("-c").arg("kill -KILL \"$$\""); + let (status, _stdout, _stderr) = gather_output(cmd, futures::future::pending()).await?; + + assert_matches!( + status, + GatherOutputStatus::Finished { exit_code, .. } if exit_code == 128 + Signal::SIGKILL as i32 + ); + + Ok(()) + } + + #[tokio::test] + async fn timeout_kills_before_dropping_decoder() -> anyhow::Result<()> { + struct Kill { + killed: Arc>, + } + + #[async_trait] + impl KillProcess for Kill { + async fn kill(self, process_group: &mut ProcessGroup) -> anyhow::Result<()> { + *self.killed.lock().unwrap() = true; + + // We still need to kill the process. On Windows in particular our test will hang + // if we do not. + DefaultKillProcess::default().kill(process_group).await + } + } + + struct Decoder { + killed: Arc>, + cancelled: Arc>, + } + + #[async_trait::async_trait] + impl StatusDecoder for Decoder { + async fn decode_status(self, _status: ExitStatus) -> anyhow::Result { + panic!("Should not be called in this test since we timeout") + } + + async fn cancel(self) -> anyhow::Result<()> { + assert!(*self.killed.lock().unwrap()); + *self.cancelled.lock().unwrap() = true; + Ok(()) + } + } + + let killed = Arc::new(Mutex::new(false)); + let cancelled = Arc::new(Mutex::new(false)); + + let mut cmd = if cfg!(windows) { + background_command("powershell") + } else { + background_command("sh") + }; + cmd.args(["-c", "sleep 10000"]); + + let mut cmd = ProcessCommand::new(cmd); + let process = cmd.spawn().map_err(anyhow::Error::from); + + let stream = stream_command_events( + process, + timeout_into_cancellation(Some(Duration::from_secs(1))), + Decoder { + killed: killed.dupe(), + cancelled: cancelled.dupe(), + }, + Kill { + killed: killed.dupe(), + }, + true, + )?; + + let (status, _stdout, _stderr) = decode_command_event_stream(stream).await?; + assert!(matches!(status, GatherOutputStatus::TimedOut(..))); + + assert!(*killed.lock().unwrap()); + assert!(*cancelled.lock().unwrap()); + + Ok(()) + } + + #[cfg(unix)] + #[tokio::test] + async fn test_no_stdio_stream_command_events() -> anyhow::Result<()> { + let mut cmd = background_command("sh"); + cmd.args(["-c", "echo hello"]); + + let tempdir = tempfile::tempdir()?; + let stdout = tempdir.path().join("stdout"); + let mut cmd = ProcessCommand::new(cmd); + cmd.stdout(std::fs::File::create(stdout.clone())?); + + let process_group = cmd.spawn().map_err(anyhow::Error::from); + let mut events = stream_command_events( + process_group, + futures::future::pending(), + DefaultStatusDecoder, + DefaultKillProcess::default(), + false, + )? + .boxed(); + assert_matches!(events.next().await, Some(Ok(CommandEvent::Exit(..)))); + assert_matches!(futures::poll!(events.next()), Poll::Ready(None)); + + assert_matches!(tokio::fs::read_to_string(stdout).await?.as_str(), "hello\n"); + + Ok(()) + } +} diff --git a/app/buck2_forkserver/src/run/interruptible_async_read.rs b/app/buck2_forkserver/src/run/interruptible_async_read.rs index d1bcb6f5cf9bc..0d76c307f7896 100644 --- a/app/buck2_forkserver/src/run/interruptible_async_read.rs +++ b/app/buck2_forkserver/src/run/interruptible_async_read.rs @@ -28,12 +28,12 @@ use tokio_util::codec::FramedRead; /// This trait represents the ability to transition a Reader (R) to a Drainer (Self). Both are /// [AsyncRead], but we expect the Drainer (which implements this trait) to not wait longer for /// more data to be produced. -pub trait DrainerFromReader { +pub(crate) trait DrainerFromReader { fn from_reader(reader: R) -> Self; } /// This trait represents a AsyncRead that can be told to interrupt (and transition to draining). -pub trait InterruptNotifiable { +pub(crate) trait InterruptNotifiable { fn notify_interrupt(self: Pin<&mut Self>); } @@ -42,7 +42,7 @@ pub trait InterruptNotifiable { /// proceed to "drain" the reader, which means reading the data that is there but not waiting for /// any further data to get written. #[pin_project] -pub struct InterruptibleAsyncRead { +pub(crate) struct InterruptibleAsyncRead { state: InterruptibleAsyncReadState, } @@ -95,7 +95,7 @@ mod unix_non_blocking_drainer { /// hasn't completed `select()` on the pipe we want to drain, we'll still get to execute /// `read()` (and potentially get WouldBlock if there is nothing to read and the pipe isn't /// ready). - pub struct UnixNonBlockingDrainer { + pub(crate) struct UnixNonBlockingDrainer { fd: RawFd, // Kept so this is dropped and closed properly. _owner: R, @@ -140,10 +140,10 @@ mod unix_non_blocking_drainer { } #[cfg(unix)] -pub use unix_non_blocking_drainer::*; +pub(crate) use unix_non_blocking_drainer::*; #[pin_project] -pub struct TimeoutDrainer { +pub(crate) struct TimeoutDrainer { state: TimeoutDrainerState, // To have a generic parameter like UnixNonBlockingDrainer does. _phantom: PhantomData, @@ -281,7 +281,6 @@ mod tests { use dupe::Dupe; use futures::stream::Stream; use tokio_util::codec::BytesCodec; - use tokio_util::codec::FramedRead; use super::*; diff --git a/app/buck2_forkserver/src/run/mod.rs b/app/buck2_forkserver/src/run/mod.rs deleted file mode 100644 index e5c88238b52ac..0000000000000 --- a/app/buck2_forkserver/src/run/mod.rs +++ /dev/null @@ -1,732 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod interruptible_async_read; -pub mod status_decoder; - -use std::borrow::Cow; -use std::io; -use std::path::Path; -use std::pin::Pin; -use std::process::Command; -use std::process::ExitStatus; -use std::process::Stdio; -use std::task::Context; -use std::task::Poll; -use std::time::Duration; - -use anyhow::Context as _; -use buck2_core::fs::fs_util; -use buck2_core::fs::paths::abs_path::AbsPath; -use bytes::Bytes; -use futures::future::Future; -use futures::future::FutureExt; -use futures::stream::Stream; -use futures::stream::StreamExt; -use futures::stream::TryStreamExt; -use pin_project::pin_project; -use tokio::process::Child; -use tokio_util::codec::BytesCodec; -use tokio_util::codec::FramedRead; - -use self::interruptible_async_read::InterruptNotifiable; -use self::interruptible_async_read::InterruptibleAsyncRead; -use self::status_decoder::DecodedStatus; -use self::status_decoder::DefaultStatusDecoder; -use self::status_decoder::StatusDecoder; - -#[derive(Debug)] -pub enum GatherOutputStatus { - /// Contains the exit code. - Finished { - exit_code: i32, - execution_stats: Option, - }, - TimedOut(Duration), - Cancelled, - SpawnFailed(String), -} - -impl From for GatherOutputStatus { - fn from(d: DecodedStatus) -> Self { - match d { - DecodedStatus::Status { - exit_code, - execution_stats, - } => Self::Finished { - exit_code, - execution_stats, - }, - DecodedStatus::SpawnFailed(v) => Self::SpawnFailed(v), - } - } -} - -#[derive(Debug)] -pub enum CommandEvent { - Stdout(Bytes), - Stderr(Bytes), - Exit(GatherOutputStatus), -} - -enum StdioEvent { - Stdout(Bytes), - Stderr(Bytes), -} - -impl From for CommandEvent { - fn from(stdio: StdioEvent) -> Self { - match stdio { - StdioEvent::Stdout(bytes) => CommandEvent::Stdout(bytes), - StdioEvent::Stderr(bytes) => CommandEvent::Stderr(bytes), - } - } -} - -/// This stream will yield [CommandEvent] whenever we have something on stdout or stderr (this is -/// our stdio stream), and it'll finish up the stream with the exit status. This is basically like -/// a select, but with the exit guaranteed to come last. -#[pin_project] -struct CommandEventStream { - exit: Option>, - - done: bool, - - #[pin] - status: futures::future::Fuse, - - #[pin] - stdio: futures::stream::Fuse, -} - -impl CommandEventStream -where - Status: Future, - Stdio: Stream, -{ - fn new(status: Status, stdio: Stdio) -> Self { - Self { - exit: None, - done: false, - status: status.fuse(), - stdio: stdio.fuse(), - } - } -} - -impl Stream for CommandEventStream -where - Status: Future>, - Stdio: Stream> + InterruptNotifiable, -{ - type Item = anyhow::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if *this.done { - return Poll::Ready(None); - } - - // This future is fused so it's guaranteed to be ready once. If it does, capture the exit - // status, we'll return it later. - if let Poll::Ready(status) = this.status.poll(cx) { - *this.exit = Some(status); - this.stdio.as_mut().get_pin_mut().notify_interrupt(); - } - - // This stream is also fused, so if it returns None, we'll know it's done for good and we'll - // return the exit status if it's available. - if let Some(stdio) = futures::ready!(this.stdio.poll_next(cx)) { - return Poll::Ready(Some(stdio.map(|event| event.into()))); - } - - // If we got here that means the stream is done. If we have it we return, and if we don't - // we report we're pending, because we'll have polled it already earlier. - if let Some(exit) = this.exit.take() { - *this.done = true; - return Poll::Ready(Some(exit.map(CommandEvent::Exit))); - } - - Poll::Pending - } -} - -pub async fn timeout_into_cancellation( - timeout: Option, -) -> anyhow::Result { - match timeout { - Some(t) => { - tokio::time::sleep(t).await; - Ok(GatherOutputStatus::TimedOut(t)) - } - None => futures::future::pending().await, - } -} - -pub fn stream_command_events( - child: io::Result, - cancellation: T, - decoder: impl StatusDecoder, - kill_process: impl KillProcess, - stream_stdio: bool, -) -> anyhow::Result>> -where - T: Future> + Send, -{ - let mut child = match child { - Ok(child) => child, - Err(e) => { - let event = Ok(CommandEvent::Exit(GatherOutputStatus::SpawnFailed( - e.to_string(), - ))); - return Ok(futures::stream::once(futures::future::ready(event)).left_stream()); - } - }; - - let stdio = if stream_stdio { - let stdout = child.stdout.take().context("Child stdout is not piped")?; - let stderr = child.stderr.take().context("Child stderr is not piped")?; - - #[cfg(unix)] - type Drainer = self::interruptible_async_read::UnixNonBlockingDrainer; - - // On Windows, for the time being we just give ourselves a timeout to finish reading. - // Ideally this would perform a non-blocking read on self instead like we do on Unix. - #[cfg(not(unix))] - type Drainer = self::interruptible_async_read::TimeoutDrainer; - - let stdout = InterruptibleAsyncRead::<_, Drainer<_>>::new(stdout); - let stderr = InterruptibleAsyncRead::<_, Drainer<_>>::new(stderr); - let stdout = FramedRead::new(stdout, BytesCodec::new()) - .map(|data| anyhow::Ok(StdioEvent::Stdout(data?.freeze()))); - let stderr = FramedRead::new(stderr, BytesCodec::new()) - .map(|data| anyhow::Ok(StdioEvent::Stderr(data?.freeze()))); - - futures::stream::select(stdout, stderr).left_stream() - } else { - futures::stream::empty().right_stream() - }; - - let status = async move { - enum Outcome { - Finished(ExitStatus), - Cancelled(GatherOutputStatus), - } - - // NOTE: This wrapping here is so that we release the borrow of `child` that stems from - // `wait()` by the time we call kill_process a few lines down. - let execute = async { - let status = child.wait(); - futures::pin_mut!(status); - futures::pin_mut!(cancellation); - - anyhow::Ok(match futures::future::select(status, cancellation).await { - futures::future::Either::Left((status, _)) => Outcome::Finished(status?), - futures::future::Either::Right((res, _)) => Outcome::Cancelled(res?), - }) - }; - - anyhow::Ok(match execute.await? { - Outcome::Finished(status) => decoder.decode_status(status).await?.into(), - Outcome::Cancelled(res) => { - kill_process - .kill(&mut child) - .context("Failed to terminate child after timeout")?; - - decoder - .cancel() - .await - .context("Failed to cancel status decoder after timeout")?; - - // We just killed the child, so this should finish immediately. We should still call - // this to release any process. - child - .wait() - .await - .context("Failed to await child after kill")?; - - res - } - }) - }; - - Ok(CommandEventStream::new(status, stdio).right_stream()) -} - -pub(crate) async fn decode_command_event_stream( - stream: S, -) -> anyhow::Result<(GatherOutputStatus, Vec, Vec)> -where - S: Stream>, -{ - futures::pin_mut!(stream); - - let mut stdout = Vec::::new(); - let mut stderr = Vec::::new(); - - while let Some(event) = stream.try_next().await? { - match event { - CommandEvent::Stdout(bytes) => stdout.extend(&bytes), - CommandEvent::Stderr(bytes) => stderr.extend(&bytes), - CommandEvent::Exit(exit) => return Ok((exit, stdout, stderr)), - } - } - - Err(anyhow::Error::msg( - "Stream did not yield CommandEvent::Exit", - )) -} - -pub async fn gather_output( - cmd: Command, - cancellation: T, -) -> anyhow::Result<(GatherOutputStatus, Vec, Vec)> -where - T: Future> + Send, -{ - let cmd = prepare_command(cmd); - - let child = spawn_retry_txt_busy(cmd, || tokio::time::sleep(Duration::from_millis(50))).await; - let stream = stream_command_events( - child, - cancellation, - DefaultStatusDecoder, - DefaultKillProcess, - true, - )?; - decode_command_event_stream(stream).await -} - -/// Dependency injection for kill. We use this in testing. -pub trait KillProcess { - fn kill(self, child: &mut Child) -> anyhow::Result<()>; -} - -pub struct DefaultKillProcess; - -impl KillProcess for DefaultKillProcess { - fn kill(self, child: &mut Child) -> anyhow::Result<()> { - let pid = match child.id() { - Some(pid) => pid, - None => { - // Child just exited, so in this case we don't want to kill anything. - return Ok(()); - } - }; - tracing::info!("Killing process {}", pid); - #[cfg(unix)] - { - if true { - // On unix we want killpg, so we don't use the default impl. - // We use `if true` here to do less conditional compilation - // or conditional dependencies. - return kill_process_impl(pid); - } - } - // `start_kill` is just `std::process::Child::kill` on Windows. - // Ignore the error because `kill` fails on Windows if the process has been terminated - // even if we did not wait for it. - let _ignore = child.start_kill(); - Ok(()) - } -} - -#[cfg(unix)] -fn kill_process_impl(pid: u32) -> anyhow::Result<()> { - use nix::sys::signal; - use nix::sys::signal::Signal; - use nix::unistd::Pid; - - let pid: i32 = pid.try_into().context("PID does not fit a i32")?; - - signal::killpg(Pid::from_raw(pid), Signal::SIGKILL) - .with_context(|| format!("Failed to kill process {}", pid)) -} - -/// Unify the the behavior of using a relative path for the executable between Unix and Windows. On -/// UNIX, the path is understood to be relative to the cwd of the *spawned process*, whereas on -/// Windows, it's relative ot the cwd of the *spawning* process. -/// -/// Here, we unify the two behaviors since we always run our subprocesses with a known cwd: we -/// check if the executable actually exists relative to said cwd, and if it does, we use that. -pub fn maybe_absolutize_exe<'a>( - exe: &'a (impl AsRef + ?Sized), - spawned_process_cwd: &'_ AbsPath, -) -> anyhow::Result> { - let exe = exe.as_ref(); - - let abs = spawned_process_cwd.join(exe); - if fs_util::try_exists(&abs).context("Error absolute-izing executable")? { - return Ok(abs.into_path_buf().into()); - } - - Ok(exe.into()) -} - -pub fn prepare_command(mut cmd: Command) -> tokio::process::Command { - #[cfg(unix)] - { - use std::os::unix::process::CommandExt; - cmd.process_group(0); - } - - cmd.stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - cmd.into() -} - -/// fork-exec is a bit tricky in a busy process. We often have files open to writing just prior to -/// executing them (as we download from RE), and many processes being spawned concurrently. We do -/// close the fds properly before the exec, but what can happn is: -/// -/// - Some thread forks -/// - We close the file. At this time we don't have it open, but the forked process does. -/// - We try to exec the file. This fails because the file is open for writing (by the forked -/// process). -/// - The forked process execs. At this point the file is closed (because everything is CLOEXEC). -/// -/// The window during which the forked process holds the fd is small, so retrying a couple times -/// here should let us make this work. -/// -/// The more correct solution for this here would be to start a fork server in a separate process -/// when we start. However, until we get there, this should do the trick. -async fn spawn_retry_txt_busy( - mut cmd: tokio::process::Command, - mut delay: F, -) -> io::Result -where - F: FnMut() -> D, - D: Future, -{ - let mut attempts = 10; - - loop { - let res = cmd.spawn(); - - let res_errno = res.as_ref().map_err(|e| e.raw_os_error()); - let is_txt_busy = matches!(res_errno, Err(Some(libc::ETXTBSY))); - - if attempts == 0 || !is_txt_busy { - return res; - } - - delay().await; - - attempts -= 1; - } -} - -#[cfg(test)] -mod tests { - use std::str; - use std::sync::Arc; - use std::sync::Mutex; - use std::time::Instant; - - use assert_matches::assert_matches; - use buck2_util::process::async_background_command; - use buck2_util::process::background_command; - use dupe::Dupe; - - use super::*; - - #[tokio::test] - async fn test_gather_output() -> anyhow::Result<()> { - let mut cmd = if cfg!(windows) { - background_command("powershell") - } else { - background_command("sh") - }; - cmd.args(["-c", "echo hello"]); - - let (status, stdout, stderr) = gather_output(cmd, futures::future::pending()).await?; - assert!(matches!(status, GatherOutputStatus::Finished { exit_code, .. } if exit_code == 0)); - assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); - assert_eq!(stderr, b""); - - Ok(()) - } - - #[tokio::test] - async fn test_gather_does_not_wait_for_children() -> anyhow::Result<()> { - // If we wait for sleep, this will time out. - let mut cmd = if cfg!(windows) { - background_command("powershell") - } else { - background_command("sh") - }; - if cfg!(windows) { - cmd.args([ - "-c", - "Start-Job -ScriptBlock {sleep 10} | Out-Null; echo hello", - ]); - } else { - cmd.args(["-c", "(sleep 10 &) && echo hello"]); - } - - let timeout = if cfg!(windows) { 5 } else { 1 }; - let (status, stdout, stderr) = gather_output( - cmd, - timeout_into_cancellation(Some(Duration::from_secs(timeout))), - ) - .await?; - assert!(matches!(status, GatherOutputStatus::Finished { exit_code, .. } if exit_code == 0)); - assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); - assert_eq!(stderr, b""); - - Ok(()) - } - - #[tokio::test] - async fn test_gather_output_timeout() -> anyhow::Result<()> { - let now = Instant::now(); - - let cmd = if cfg!(windows) { - let mut cmd = background_command("powershell"); - cmd.args(["-c", "echo hello; sleep 10; echo bye"]); - cmd - } else { - let mut cmd = background_command("sh"); - cmd.args(["-c", "echo hello && sleep 10 && echo bye"]); - cmd - }; - - let timeout = if cfg!(windows) { 5 } else { 1 }; - let (status, stdout, _stderr) = gather_output( - cmd, - timeout_into_cancellation(Some(Duration::from_secs(timeout))), - ) - .await?; - assert!(matches!(status, GatherOutputStatus::TimedOut(..))); - assert_eq!(str::from_utf8(&stdout)?.trim(), "hello"); - // Do not check stderr because stderr may contain a message like: - // sh: line 1: 41348 Killed: 9 - - assert!(now.elapsed() < Duration::from_secs(9)); // Lots of leeway here. - - Ok(()) - } - - #[cfg(unix)] - #[tokio::test] - async fn test_spawn_retry_txt_busy() -> anyhow::Result<()> { - use futures::future; - use tokio::fs::OpenOptions; - use tokio::io::AsyncWriteExt; - - let tempdir = tempfile::tempdir()?; - let bin = tempdir.path().join("bin"); - - let mut file = OpenOptions::new() - .mode(0o755) - .write(true) - .create(true) - .open(&bin) - .await?; - - file.write_all(b"#!/usr/bin/env bash\ntrue\n").await?; - - let cmd = async_background_command(&bin); - let mut child = spawn_retry_txt_busy(cmd, { - let mut file = Some(file); - move || { - file.take(); - future::ready(()) - } - }) - .await?; - - let status = child.wait().await?; - assert_eq!(status.code(), Some(0)); - - Ok(()) - } - - #[tokio::test] - async fn test_spawn_retry_other_error() -> anyhow::Result<()> { - let tempdir = tempfile::tempdir()?; - let bin = tempdir.path().join("bin"); // Does not actually exist - - let cmd = async_background_command(&bin); - let res = spawn_retry_txt_busy(cmd, || async { panic!("Should not be called!") }).await; - assert!(res.is_err()); - - Ok(()) - } - - #[cfg(unix)] - #[tokio::test] - async fn test_kill_terminates_process_group() -> anyhow::Result<()> { - use std::str::FromStr; - - use nix::errno::Errno; - use nix::sys::signal; - use nix::unistd::Pid; - - // This command will spawn 2 subprocesses (subshells) and print the PID of the 2nd shell. - let mut cmd = background_command("sh"); - cmd.arg("-c").arg("( ( echo $$ && sleep 1000 ) )"); - let (_status, stdout, _stderr) = - gather_output(cmd, timeout_into_cancellation(Some(Duration::from_secs(1)))).await?; - let pid = i32::from_str(std::str::from_utf8(&stdout)?.trim())?; - - for _ in 0..10 { - // This does rely on no PID reuse but the odds of PIDs wrapping around all the way to the - // same PID we just used before we issue this kill seem low. So, we expect this to error - // out. - if matches!(signal::kill(Pid::from_raw(pid), None), Err(e) if e == Errno::ESRCH) { - return Ok(()); - } - - // This is awkward but unfortunately the process does not immediately disappear. - tokio::time::sleep(Duration::from_secs(1)).await; - } - - Err(anyhow::anyhow!("PID did not exit: {}", pid)) - } - - #[tokio::test] - async fn test_stream_command_events_ends() -> anyhow::Result<()> { - let mut cmd = if cfg!(windows) { - background_command("powershell") - } else { - background_command("sh") - }; - cmd.args(["-c", "exit 0"]); - - let child = prepare_command(cmd).spawn(); - let mut events = stream_command_events( - child, - futures::future::pending(), - DefaultStatusDecoder, - DefaultKillProcess, - true, - )? - .boxed(); - assert_matches!(events.next().await, Some(Ok(CommandEvent::Exit(..)))); - assert_matches!(futures::poll!(events.next()), Poll::Ready(None)); - Ok(()) - } - - #[cfg(unix)] - #[tokio::test] - async fn test_signal_exit_code() -> anyhow::Result<()> { - use nix::sys::signal::Signal; - - let mut cmd = background_command("sh"); - cmd.arg("-c").arg("kill -KILL \"$$\""); - let (status, _stdout, _stderr) = gather_output(cmd, futures::future::pending()).await?; - - assert_matches!( - status, - GatherOutputStatus::Finished { exit_code, .. } if exit_code == 128 + Signal::SIGKILL as i32 - ); - - Ok(()) - } - - #[tokio::test] - async fn timeout_kills_before_dropping_decoder() -> anyhow::Result<()> { - struct Kill { - killed: Arc>, - } - - impl KillProcess for Kill { - fn kill(self, child: &mut Child) -> anyhow::Result<()> { - *self.killed.lock().unwrap() = true; - - // We still need to kill the process. On Windows in particular our test will hang - // if we do not. - DefaultKillProcess.kill(child) - } - } - - struct Decoder { - killed: Arc>, - cancelled: Arc>, - } - - #[async_trait::async_trait] - impl StatusDecoder for Decoder { - async fn decode_status(self, _status: ExitStatus) -> anyhow::Result { - panic!("Should not be called in this test since we timeout") - } - - async fn cancel(self) -> anyhow::Result<()> { - assert!(*self.killed.lock().unwrap()); - *self.cancelled.lock().unwrap() = true; - Ok(()) - } - } - - let killed = Arc::new(Mutex::new(false)); - let cancelled = Arc::new(Mutex::new(false)); - - let mut cmd = if cfg!(windows) { - background_command("powershell") - } else { - background_command("sh") - }; - cmd.args(["-c", "sleep 10000"]); - - let mut cmd = prepare_command(cmd); - let child = cmd.spawn(); - - let stream = stream_command_events( - child, - timeout_into_cancellation(Some(Duration::from_secs(1))), - Decoder { - killed: killed.dupe(), - cancelled: cancelled.dupe(), - }, - Kill { - killed: killed.dupe(), - }, - true, - )?; - - let (status, _stdout, _stderr) = decode_command_event_stream(stream).await?; - assert!(matches!(status, GatherOutputStatus::TimedOut(..))); - - assert!(*killed.lock().unwrap()); - assert!(*cancelled.lock().unwrap()); - - Ok(()) - } - - #[cfg(unix)] - #[tokio::test] - async fn test_no_stdio_stream_command_events() -> anyhow::Result<()> { - let mut cmd = background_command("sh"); - cmd.args(["-c", "echo hello"]); - - let mut cmd = prepare_command(cmd); - let tempdir = tempfile::tempdir()?; - let stdout = tempdir.path().join("stdout"); - cmd.stdout(std::fs::File::create(stdout.clone())?); - - let child = cmd.spawn(); - let mut events = stream_command_events( - child, - futures::future::pending(), - DefaultStatusDecoder, - DefaultKillProcess, - false, - )? - .boxed(); - assert_matches!(events.next().await, Some(Ok(CommandEvent::Exit(..)))); - assert_matches!(futures::poll!(events.next()), Poll::Ready(None)); - - assert_matches!(tokio::fs::read_to_string(stdout).await?.as_str(), "hello\n"); - - Ok(()) - } -} diff --git a/app/buck2_forkserver/src/run/process_group.rs b/app/buck2_forkserver/src/run/process_group.rs new file mode 100644 index 0000000000000..da49463294702 --- /dev/null +++ b/app/buck2_forkserver/src/run/process_group.rs @@ -0,0 +1,132 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::process::Command as StdCommand; +use std::process::ExitStatus; +use std::process::Stdio; + +use thiserror::Error; +use tokio::io; +use tokio::process::ChildStderr; +use tokio::process::ChildStdout; + +#[cfg(unix)] +use crate::unix::process_group as imp; +#[cfg(windows)] +use crate::win::process_group as imp; + +#[derive(Error, Debug)] +pub(crate) enum SpawnError { + #[error("Failed to spawn a process")] + IoError(#[from] io::Error), + #[error("Failed to create a process group")] + GenericError(#[from] anyhow::Error), +} + +pub(crate) struct ProcessCommand { + inner: imp::ProcessCommandImpl, +} + +impl ProcessCommand { + pub(crate) fn new(mut cmd: StdCommand) -> Self { + cmd.stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + Self { + inner: imp::ProcessCommandImpl::new(cmd), + } + } + + pub(crate) fn spawn(&mut self) -> anyhow::Result { + let child = self.inner.spawn()?; + Ok(ProcessGroup { + inner: imp::ProcessGroupImpl::new(child)?, + }) + } + + #[allow(dead_code)] + pub(crate) fn stdout>(&mut self, cfg: T) -> &mut ProcessCommand { + self.inner.stdout(cfg.into()); + self + } + + #[allow(dead_code)] + pub(crate) fn stderr>(&mut self, cfg: T) -> &mut ProcessCommand { + self.inner.stderr(cfg.into()); + self + } +} + +pub(crate) struct ProcessGroup { + inner: imp::ProcessGroupImpl, +} + +impl ProcessGroup { + pub(crate) fn take_stdout(&mut self) -> Option { + self.inner.take_stdout() + } + + pub(crate) fn take_stderr(&mut self) -> Option { + self.inner.take_stderr() + } + + pub(crate) async fn wait(&mut self) -> io::Result { + self.inner.wait().await + } + + pub(crate) fn id(&self) -> Option { + self.inner.id() + } + + pub(crate) async fn kill( + &self, + graceful_shutdown_timeout_s: Option, + ) -> anyhow::Result<()> { + self.inner.kill(graceful_shutdown_timeout_s).await + } +} + +#[cfg(test)] +mod tests { + use buck2_util::process::background_command; + + use crate::run::process_group::ProcessCommand; + + // The test check basic functionality of process implementation as it differs on Unix and Windows + #[tokio::test] + async fn test_process_impl() -> anyhow::Result<()> { + let mut cmd; + + if cfg!(windows) { + cmd = background_command("cmd"); + cmd.arg("/c"); + } else { + cmd = background_command("sh"); + cmd.arg("-c"); + } + cmd.arg("exit 2"); + + let mut cmd = ProcessCommand::new(cmd); + let mut child = cmd.spawn().unwrap(); + + let id = child.id().expect("missing id"); + assert!(id > 0); + + let status = child.wait().await?; + assert_eq!(status.code(), Some(2)); + + // test that the `.wait()` method is fused like tokio + let status = child.wait().await?; + assert_eq!(status.code(), Some(2)); + + // Can't get id after process has exited + assert_eq!(child.id(), None); + Ok(()) + } +} diff --git a/app/buck2_forkserver/src/run/status_decoder.rs b/app/buck2_forkserver/src/run/status_decoder.rs index 8b7087e9451fe..1263561078689 100644 --- a/app/buck2_forkserver/src/run/status_decoder.rs +++ b/app/buck2_forkserver/src/run/status_decoder.rs @@ -14,7 +14,7 @@ use async_trait::async_trait; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_miniperf_proto::MiniperfOutput; -pub enum DecodedStatus { +pub(crate) enum DecodedStatus { /// An actual status. Status { exit_code: i32, @@ -26,7 +26,7 @@ pub enum DecodedStatus { } #[async_trait] -pub trait StatusDecoder { +pub(crate) trait StatusDecoder { /// Status decoders receive the exit status of the command we ran, but they might also obtain /// information out of band to obtain a different exit status. async fn decode_status(self, status: ExitStatus) -> anyhow::Result; @@ -35,7 +35,7 @@ pub trait StatusDecoder { async fn cancel(self) -> anyhow::Result<()>; } -pub struct DefaultStatusDecoder; +pub(crate) struct DefaultStatusDecoder; #[async_trait] impl StatusDecoder for DefaultStatusDecoder { @@ -51,7 +51,7 @@ impl StatusDecoder for DefaultStatusDecoder { } } -pub fn default_decode_exit_code(status: ExitStatus) -> i32 { +fn default_decode_exit_code(status: ExitStatus) -> i32 { let exit_code; #[cfg(unix)] @@ -70,11 +70,12 @@ pub fn default_decode_exit_code(status: ExitStatus) -> i32 { exit_code.unwrap_or(-1) } -pub struct MiniperfStatusDecoder { +pub(crate) struct MiniperfStatusDecoder { out_path: AbsNormPathBuf, } impl MiniperfStatusDecoder { + #[allow(dead_code)] pub fn new(out_path: AbsNormPathBuf) -> Self { Self { out_path } } @@ -121,6 +122,8 @@ impl StatusDecoder for MiniperfStatusDecoder { cpu_instructions_kernel: Some( counters.kernel_instructions.adjusted_count(), ), + userspace_events: Some(counters.user_instructions.to_proto()), + kernel_events: Some(counters.kernel_instructions.to_proto()), }); if let Err(e) = execution_stats.as_ref() { diff --git a/app/buck2_forkserver/src/unix.rs b/app/buck2_forkserver/src/unix.rs new file mode 100644 index 0000000000000..e783bbd78909a --- /dev/null +++ b/app/buck2_forkserver/src/unix.rs @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod command; +mod launch; +pub(crate) mod process_group; +mod service; + +pub use command::run_forkserver; +pub use launch::launch_forkserver; diff --git a/app/buck2_forkserver/src/unix/launch.rs b/app/buck2_forkserver/src/unix/launch.rs index 3687b9e766df9..c95d1ca9270b5 100644 --- a/app/buck2_forkserver/src/unix/launch.rs +++ b/app/buck2_forkserver/src/unix/launch.rs @@ -25,6 +25,7 @@ pub async fn launch_forkserver( exe: impl AsRef, args: impl IntoIterator>, state_dir: &AbsNormPath, + resource_control_arg: String, ) -> anyhow::Result { let (client_io, server_io) = UnixStream::pair().context("Failed to create fork server channel")?; @@ -45,7 +46,9 @@ pub async fn launch_forkserver( .arg("--fd") .arg(server_io.as_raw_fd().to_string()) .arg("--state-dir") - .arg(state_dir.as_path()); + .arg(state_dir.as_path()) + .arg("--resource-control") + .arg(resource_control_arg); let fds = [server_io.as_raw_fd()]; diff --git a/app/buck2_forkserver/src/unix/mod.rs b/app/buck2_forkserver/src/unix/mod.rs deleted file mode 100644 index 714bcc281f0ba..0000000000000 --- a/app/buck2_forkserver/src/unix/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod command; -mod launch; -mod service; - -pub use command::run_forkserver; -pub use launch::launch_forkserver; diff --git a/app/buck2_forkserver/src/unix/process_group.rs b/app/buck2_forkserver/src/unix/process_group.rs new file mode 100644 index 0000000000000..811049b7c265d --- /dev/null +++ b/app/buck2_forkserver/src/unix/process_group.rs @@ -0,0 +1,98 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::os::unix::process::CommandExt; +use std::process::Command as StdCommand; +use std::process::ExitStatus; +use std::process::Stdio; +use std::time::Duration; + +use anyhow::Context; +use buck2_common::kill_util::try_terminate_process_gracefully; +use nix::sys::signal; +use nix::sys::signal::Signal; +use nix::unistd::Pid; +use tokio::io; +use tokio::process::Child; +use tokio::process::ChildStderr; +use tokio::process::ChildStdout; +use tokio::process::Command; + +pub(crate) struct ProcessCommandImpl { + inner: Command, +} + +impl ProcessCommandImpl { + pub(crate) fn new(mut cmd: StdCommand) -> Self { + cmd.process_group(0); + Self { inner: cmd.into() } + } + + pub(crate) fn spawn(&mut self) -> io::Result { + self.inner.spawn() + } + + pub(crate) fn stdout(&mut self, stdout: Stdio) { + self.inner.stdout(stdout); + } + + pub(crate) fn stderr(&mut self, stdout: Stdio) { + self.inner.stderr(stdout); + } +} + +pub(crate) struct ProcessGroupImpl { + inner: Child, +} + +impl ProcessGroupImpl { + pub(crate) fn new(child: Child) -> anyhow::Result { + Ok(ProcessGroupImpl { inner: child }) + } + + pub(crate) fn take_stdout(&mut self) -> Option { + self.inner.stdout.take() + } + + pub(crate) fn take_stderr(&mut self) -> Option { + self.inner.stderr.take() + } + + pub(crate) async fn wait(&mut self) -> io::Result { + self.inner.wait().await + } + + pub(crate) fn id(&self) -> Option { + self.inner.id() + } + + // On unix we use killpg to kill the whole process tree + pub(crate) async fn kill( + &self, + graceful_shutdown_timeout_s: Option, + ) -> anyhow::Result<()> { + let pid: i32 = self + .inner + .id() + .and_then(|id| id.try_into().ok()) + .context("PID does not fit a i32")?; + + if let Some(graceful_shutdown_timeout_s) = graceful_shutdown_timeout_s { + try_terminate_process_gracefully( + pid, + Duration::from_secs(graceful_shutdown_timeout_s as u64), + ) + .await + .with_context(|| format!("Failed to terminate process {} gracefully", pid)) + } else { + signal::killpg(Pid::from_raw(pid), Signal::SIGKILL) + .with_context(|| format!("Failed to kill process {}", pid)) + } + } +} diff --git a/app/buck2_forkserver/src/unix/service.rs b/app/buck2_forkserver/src/unix/service.rs index 6d211d784264a..b149e6a158c66 100644 --- a/app/buck2_forkserver/src/unix/service.rs +++ b/app/buck2_forkserver/src/unix/service.rs @@ -44,7 +44,7 @@ use tonic::Streaming; use crate::convert::encode_event_stream; use crate::run::maybe_absolutize_exe; -use crate::run::prepare_command; +use crate::run::process_group::ProcessCommand; use crate::run::status_decoder::DefaultStatusDecoder; use crate::run::status_decoder::MiniperfStatusDecoder; use crate::run::stream_command_events; @@ -114,6 +114,7 @@ impl Forkserver for UnixForkserverService { timeout, enable_miniperf, std_redirects, + graceful_shutdown_timeout_s, } = msg; let exe = OsStr::from_bytes(&exe); @@ -159,13 +160,14 @@ impl Forkserver for UnixForkserverService { } } - let mut cmd = prepare_command(cmd); let stream_stdio = std_redirects.is_none(); + let mut cmd = ProcessCommand::new(cmd); if let Some(std_redirects) = std_redirects { cmd.stdout(File::create(OsStr::from_bytes(&std_redirects.stdout))?); cmd.stderr(File::create(OsStr::from_bytes(&std_redirects.stderr))?); } - let child = cmd.spawn(); + + let process_group = cmd.spawn().map_err(anyhow::Error::from); let timeout = timeout_into_cancellation(timeout); @@ -173,18 +175,22 @@ impl Forkserver for UnixForkserverService { let stream = match miniperf_output { Some(out) => stream_command_events( - child, + process_group, cancellation, MiniperfStatusDecoder::new(out), - DefaultKillProcess, + DefaultKillProcess { + graceful_shutdown_timeout_s, + }, stream_stdio, )? .left_stream(), None => stream_command_events( - child, + process_group, cancellation, DefaultStatusDecoder, - DefaultKillProcess, + DefaultKillProcess { + graceful_shutdown_timeout_s, + }, stream_stdio, )? .right_stream(), @@ -223,7 +229,7 @@ impl MiniperfContainer { #[cfg(all(fbcode_build, target_os = "linux"))] { - miniperf_bin = Some(include_bytes!("miniperf.bin").as_slice()); + miniperf_bin = Some(buck2_miniperf_data::get()); } #[cfg(not(all(fbcode_build, target_os = "linux")))] diff --git a/app/buck2_forkserver/src/win.rs b/app/buck2_forkserver/src/win.rs new file mode 100644 index 0000000000000..a948cda15ea15 --- /dev/null +++ b/app/buck2_forkserver/src/win.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod child_process; +pub(crate) mod job_object; +pub(crate) mod process_group; +mod utils; diff --git a/app/buck2_forkserver/src/win/child_process.rs b/app/buck2_forkserver/src/win/child_process.rs new file mode 100644 index 0000000000000..5efcb641a9b45 --- /dev/null +++ b/app/buck2_forkserver/src/win/child_process.rs @@ -0,0 +1,145 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::future::Future; +use std::os::windows::io::AsRawHandle; +use std::pin::Pin; +use std::process::Child; +use std::process::ExitStatus; +use std::task::Context; +use std::task::Poll; + +use tokio::io; +use tokio::sync::oneshot; +use winapi::um::handleapi; +use winapi::um::threadpoollegacyapiset::UnregisterWaitEx; +use winapi::um::winbase; +use winapi::um::winnt; +use winapi::um::winnt::HANDLE; + +pub(crate) struct ChildProcess { + inner: Child, + waiting: Option, +} + +impl ChildProcess { + pub(crate) fn new(child: Child) -> Self { + Self { + inner: child, + waiting: None, + } + } + + pub(crate) fn as_std(&self) -> &Child { + &self.inner + } + + pub(crate) fn as_std_mut(&mut self) -> &mut Child { + &mut self.inner + } +} + +// This implementation is a copy of tokio internal Future implementation on their Child. +// See https://github.com/tokio-rs/tokio/blob/master/tokio/src/process/windows.rs#L102 +impl Future for ChildProcess { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let inner = Pin::get_mut(self); + loop { + if let Some(ref mut w) = inner.waiting { + match Pin::new(&mut w.rx).poll(cx) { + Poll::Ready(Ok(())) => {} + Poll::Ready(Err(e)) => Err(io::Error::new(io::ErrorKind::Other, e))?, + Poll::Pending => return Poll::Pending, + } + let status = inner.inner.try_wait()?.ok_or(io::Error::new( + io::ErrorKind::Other, + "exit status is not available", + ))?; + return Poll::Ready(Ok(status)); + } + + if let Some(e) = inner.inner.try_wait()? { + return Poll::Ready(Ok(e)); + } + let (tx, rx) = oneshot::channel(); + let ptr = Box::into_raw(Box::new(Some(tx))); + let mut wait_object = handleapi::INVALID_HANDLE_VALUE; + let rc = unsafe { + winbase::RegisterWaitForSingleObject( + &mut wait_object, + inner.inner.as_raw_handle(), + Some(callback), + ptr as *mut _, + winbase::INFINITE, + winnt::WT_EXECUTEINWAITTHREAD | winnt::WT_EXECUTEONLYONCE, + ) + }; + if rc == 0 { + let err = io::Error::last_os_error(); + drop(unsafe { Box::from_raw(ptr) }); + return Poll::Ready(Err(err)); + } + inner.waiting = Some(Waiting { + rx, + wait_object, + tx: ptr, + }); + } + } +} + +// Waiting for a signal from a `wait_object` handle +struct Waiting { + rx: oneshot::Receiver<()>, + wait_object: HANDLE, + // we're using raw pointer to pass it through ffi to callback + tx: *mut Option>, +} + +unsafe impl Sync for Waiting {} +unsafe impl Send for Waiting {} + +impl Drop for Waiting { + fn drop(&mut self) { + unsafe { + let rc = UnregisterWaitEx(self.wait_object, handleapi::INVALID_HANDLE_VALUE); + if rc == 0 { + panic!("failed to unregister: {}", io::Error::last_os_error()); + } + drop(Box::from_raw(self.tx)); + } + } +} + +unsafe extern "system" fn callback(ptr: *mut std::ffi::c_void, _timer_fired: winnt::BOOLEAN) { + let complete = &mut *(ptr as *mut Option>); + complete.take().unwrap().send(()).unwrap(); +} + +#[cfg(test)] +mod tests { + use buck2_util::process::background_command; + + use crate::win::child_process::ChildProcess; + + #[tokio::test] + async fn test_child_process() -> anyhow::Result<()> { + let mut cmd = background_command("cmd"); + let cmd = cmd.arg("/c").arg("exit 2"); + + let child = cmd.spawn().unwrap(); + let proc = ChildProcess::new(child); + + let status = proc.await?; + assert_eq!(status.code(), Some(2)); + Ok(()) + } +} diff --git a/app/buck2_forkserver/src/win/job_object.rs b/app/buck2_forkserver/src/win/job_object.rs new file mode 100644 index 0000000000000..0a628a48142e5 --- /dev/null +++ b/app/buck2_forkserver/src/win/job_object.rs @@ -0,0 +1,178 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![allow(dead_code)] + +use std::mem; +use std::ptr; +use std::sync::Arc; + +use anyhow::Context; +use buck2_wrapper_common::win::winapi_handle::WinapiHandle; +use dupe::Dupe; +use winapi::shared::basetsd::ULONG_PTR; +use winapi::shared::minwindef::DWORD; +use winapi::shared::minwindef::FALSE; +use winapi::shared::minwindef::LPVOID; +use winapi::um::handleapi; +use winapi::um::ioapiset; +use winapi::um::jobapi2; +use winapi::um::minwinbase::OVERLAPPED; +use winapi::um::winbase::INFINITE; +use winapi::um::winnt::JobObjectAssociateCompletionPortInformation; +use winapi::um::winnt::JobObjectExtendedLimitInformation; +use winapi::um::winnt::HANDLE; +use winapi::um::winnt::JOBOBJECT_ASSOCIATE_COMPLETION_PORT; +use winapi::um::winnt::JOBOBJECT_EXTENDED_LIMIT_INFORMATION; +use winapi::um::winnt::JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; +use winapi::um::winnt::JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO; + +use crate::win::utils::result_bool; + +pub(crate) struct JobObject { + job_handle: Arc, + completion_handle: Arc, +} + +impl JobObject { + pub(crate) fn new() -> anyhow::Result { + let job_handle = unsafe { + WinapiHandle::new_check_last_os_error(jobapi2::CreateJobObjectW( + ptr::null_mut(), + ptr::null_mut(), + )) + .context("CreateJobObject")? + }; + + let completion_handle = unsafe { + WinapiHandle::new_check_last_os_error(ioapiset::CreateIoCompletionPort( + handleapi::INVALID_HANDLE_VALUE, // FileHandle + ptr::null_mut(), // ExistingCompletionPort + 0, // CompletionKey + 1, // NumberOfConcurrentThreads + )) + .context("CreateIoCompletionPort")? + }; + + associate_job_with_completion_port(&job_handle, &completion_handle)?; + set_job_limits(&job_handle)?; + + Ok(Self { + job_handle: Arc::new(job_handle), + completion_handle: Arc::new(completion_handle), + }) + } + + pub(crate) fn assign_process(&self, process: HANDLE) -> anyhow::Result<()> { + result_bool(unsafe { jobapi2::AssignProcessToJobObject(self.job_handle.handle(), process) }) + } + + pub(crate) async fn terminate(&self, exit_code: u32) -> anyhow::Result<()> { + result_bool(unsafe { jobapi2::TerminateJobObject(self.job_handle.handle(), exit_code) })?; + self.wait().await + } + + // waits until all processes in a job have exited + // https://devblogs.microsoft.com/oldnewthing/20130405-00/?p=4743 + async fn wait(&self) -> anyhow::Result<()> { + const MAX_RETRY_ATTEMPT: usize = 10; + let job = self.job_handle.dupe(); + let completion_port = self.completion_handle.dupe(); + + // try to wait all the processes exit before spawn a blocking task + for _ in 0..MAX_RETRY_ATTEMPT { + if let Ok(false) = has_active_processes(&job, &completion_port, 0) { + break; + } + } + + tokio::task::spawn_blocking(move || { + let completion_port = completion_port; + while has_active_processes(&job, &completion_port, INFINITE)? {} + Ok(()) + }) + .await? + } +} + +fn has_active_processes( + job: &WinapiHandle, + completion_port: &WinapiHandle, + timeout: DWORD, +) -> anyhow::Result { + let mut completion_code: DWORD = 0; + let mut completion_key: ULONG_PTR = 0; + let mut overlapped = mem::MaybeUninit::::uninit(); + let mut lp_overlapped = overlapped.as_mut_ptr(); + + let result = unsafe { + ioapiset::GetQueuedCompletionStatus( + completion_port.handle(), + &mut completion_code, + &mut completion_key, + &mut lp_overlapped, + timeout, + ) + }; + + // ignore timeout errors unless the timeout was specified to INFINITE + // https://docs.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus + if timeout != INFINITE && result == FALSE && lp_overlapped.is_null() { + return Ok(true); + } + + result_bool(result)?; + + // we are interested only in the specific event from the job object + // ignore the rest in case some other I/O gets queued to our completion port + if completion_key != job.handle() as ULONG_PTR + || completion_code != JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO + { + return Ok(true); + } + + Ok(false) +} + +fn associate_job_with_completion_port( + job: &WinapiHandle, + completion_port: &WinapiHandle, +) -> anyhow::Result<()> { + let mut associate_completion = JOBOBJECT_ASSOCIATE_COMPLETION_PORT { + CompletionKey: job.handle(), + CompletionPort: completion_port.handle(), + }; + + result_bool(unsafe { + jobapi2::SetInformationJobObject( + job.handle(), + JobObjectAssociateCompletionPortInformation, + &mut associate_completion as *mut _ as LPVOID, + mem::size_of_val(&associate_completion) + .try_into() + .expect("cannot safely cast to DWORD"), + ) + }) +} + +fn set_job_limits(job: &WinapiHandle) -> anyhow::Result<()> { + let mut info = JOBOBJECT_EXTENDED_LIMIT_INFORMATION::default(); + info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + + result_bool(unsafe { + jobapi2::SetInformationJobObject( + job.handle(), + JobObjectExtendedLimitInformation, + &mut info as *mut _ as LPVOID, + mem::size_of_val(&info) + .try_into() + .expect("cannot safely cast to DWORD"), + ) + }) +} diff --git a/app/buck2_forkserver/src/win/process_group.rs b/app/buck2_forkserver/src/win/process_group.rs new file mode 100644 index 0000000000000..61e4eddd5e6e7 --- /dev/null +++ b/app/buck2_forkserver/src/win/process_group.rs @@ -0,0 +1,160 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::os::windows::io::AsRawHandle; +use std::os::windows::process::ChildExt; +use std::os::windows::process::CommandExt; +use std::process::Child; +use std::process::Command; +use std::process::ExitStatus; +use std::process::Stdio; +use std::time::Duration; + +use anyhow::Context; +use tokio::io; +use tokio::process::ChildStderr; +use tokio::process::ChildStdout; +use winapi::um::processthreadsapi; + +use crate::win::child_process::ChildProcess; +use crate::win::job_object::JobObject; +use crate::win::utils::result_dword; + +pub(crate) struct ProcessCommandImpl { + inner: Command, +} + +impl ProcessCommandImpl { + pub(crate) fn new(mut cmd: Command) -> Self { + // On windows we create suspended process to assign it to a job (group) and then resume. + // This is necessary because the process might finish before we add it to a job + cmd.creation_flags( + winapi::um::winbase::CREATE_NO_WINDOW | winapi::um::winbase::CREATE_SUSPENDED, + ); + Self { inner: cmd } + } + + pub(crate) fn spawn(&mut self) -> io::Result { + self.inner.spawn() + } + + #[allow(dead_code)] + pub(crate) fn stdout(&mut self, stdout: Stdio) { + self.inner.stdout(stdout); + } + + #[allow(dead_code)] + pub(crate) fn stderr(&mut self, stdout: Stdio) { + self.inner.stderr(stdout); + } +} + +/// Keeps track of the exit status of a child process without worrying about +/// polling the underlying futures even after they have completed. +enum FusedChild { + Child(ChildProcess), + Done(ExitStatus), +} + +impl FusedChild { + fn as_option(&self) -> Option<&ChildProcess> { + match &self { + FusedChild::Child(child) => Some(child), + FusedChild::Done(_) => None, + } + } + + fn as_option_mut(&mut self) -> Option<&mut ChildProcess> { + match self { + FusedChild::Child(child) => Some(child), + FusedChild::Done(_) => None, + } + } +} + +pub(crate) struct ProcessGroupImpl { + child: FusedChild, + job: JobObject, +} + +impl ProcessGroupImpl { + pub(crate) fn new(child: Child) -> anyhow::Result { + let job = JobObject::new()?; + job.assign_process(child.as_raw_handle())?; + let process = ProcessGroupImpl { + child: FusedChild::Child(ChildProcess::new(child)), + job, + }; + // We create suspended process to assign it to a job (group) + // So we resume the process after assignment + process.resume()?; + Ok(process) + } + + pub(crate) fn take_stdout(&mut self) -> Option { + self.child + .as_option_mut()? + .as_std_mut() + .stdout + .take() + .and_then(|s| ChildStdout::from_std(s).ok()) + } + + pub(crate) fn take_stderr(&mut self) -> Option { + self.child + .as_option_mut()? + .as_std_mut() + .stderr + .take() + .and_then(|s| ChildStderr::from_std(s).ok()) + } + + pub(crate) async fn wait(&mut self) -> io::Result { + match &mut self.child { + FusedChild::Done(exit) => Ok(*exit), + FusedChild::Child(child) => { + // Ensure stdin is closed so the child isn't stuck waiting on + // input while the parent is waiting for it to exit. + drop(child.as_std_mut().stdin.take()); + let ret = child.await; + + if let Ok(exit) = ret { + self.child = FusedChild::Done(exit); + } + + ret + } + } + } + + pub(crate) fn id(&self) -> Option { + Some(self.child.as_option()?.as_std().id()) + } + + // On Windows we use JobObject API to kill the whole process tree + pub(crate) async fn kill( + &self, + _graceful_shutdown_timeout_s: Option, + ) -> anyhow::Result<()> { + tokio::time::timeout(Duration::from_secs(10), self.job.terminate(0)) + .await + .map_err(|_| anyhow::anyhow!("Timed out on job object termination"))? + } + + fn resume(&self) -> anyhow::Result<()> { + let handle = self + .child + .as_option() + .context("can't resume an exited process")? + .as_std() + .main_thread_handle() + .as_raw_handle(); + result_dword(unsafe { processthreadsapi::ResumeThread(handle) }) + } +} diff --git a/app/buck2_forkserver/src/win/utils.rs b/app/buck2_forkserver/src/win/utils.rs new file mode 100644 index 0000000000000..d136149082509 --- /dev/null +++ b/app/buck2_forkserver/src/win/utils.rs @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io::Error; + +use winapi::shared::minwindef::BOOL; +use winapi::shared::minwindef::DWORD; +use winapi::shared::minwindef::FALSE; + +pub(crate) fn result_bool(ret: BOOL) -> anyhow::Result<()> { + if ret == FALSE { + Err(anyhow::anyhow!(Error::last_os_error())) + } else { + Ok(()) + } +} + +pub(crate) fn result_dword(ret: DWORD) -> anyhow::Result<()> { + if ret == DWORD::MAX { + Err(anyhow::anyhow!(Error::last_os_error())) + } else { + Ok(()) + } +} diff --git a/app/buck2_forkserver_proto/BUCK b/app/buck2_forkserver_proto/BUCK index ead0890bad7d1..5dec40df96fa0 100644 --- a/app/buck2_forkserver_proto/BUCK +++ b/app/buck2_forkserver_proto/BUCK @@ -1,5 +1,5 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs/lib:oss.bzl", "translate_target") oncall("build_infra") @@ -7,10 +7,11 @@ rust_protobuf_library( name = "buck2_forkserver_proto", srcs = glob(["src/**/*.rs"]), build_env = { - "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location //buck2/app/buck2_data:data.proto)", + "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location {})".format( + translate_target("//buck2/app/buck2_data:data_proto"), + ), }, build_script = "build.rs", - doctests = False, # FIXME protos = ["forkserver.proto"], deps = [ "fbsource//third-party/rust:derive_more", diff --git a/app/buck2_forkserver_proto/Cargo.toml b/app/buck2_forkserver_proto/Cargo.toml index 1820858d46876..09e1058474ace 100644 --- a/app/buck2_forkserver_proto/Cargo.toml +++ b/app/buck2_forkserver_proto/Cargo.toml @@ -2,14 +2,16 @@ name = "buck2_forkserver_proto" edition = "2021" +license = { workspace = true } +repository = { workspace = true } version = "0.1.0" [dependencies] +derive_more = { workspace = true } +gazebo = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } tonic = { workspace = true } -derive_more = { workspace = true } -gazebo = { workspace = true } buck2_data = { workspace = true } diff --git a/app/buck2_forkserver_proto/forkserver.proto b/app/buck2_forkserver_proto/forkserver.proto index e3be3cfdf9e5a..22b98719e4c0d 100644 --- a/app/buck2_forkserver_proto/forkserver.proto +++ b/app/buck2_forkserver_proto/forkserver.proto @@ -15,7 +15,7 @@ import "data.proto"; package buck.forkserver; message CommandRequest { - reserved 3, 4, 7; + reserved 3, 4, 7, 13; // The executable that should be run. bytes exe = 1; // The arguments to give the executable. @@ -36,6 +36,10 @@ message CommandRequest { // Used to optionally redirect stdout and stderr to files. // If set, stdout and stderr events will not be streamed. optional StdRedirectPaths std_redirects = 12; + // If set, how long to give the process time to shutdown after SIGTERM + // before sending SIGKILL. + // Should only be needed for daemonized processes (workers). + optional uint32 graceful_shutdown_timeout_s = 14; } message WorkingDirectory { diff --git a/app/buck2_forkserver_proto/src/lib.rs b/app/buck2_forkserver_proto/src/lib.rs index ada279f134b69..713a4c1636692 100644 --- a/app/buck2_forkserver_proto/src/lib.rs +++ b/app/buck2_forkserver_proto/src/lib.rs @@ -7,4 +7,6 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + tonic::include_proto!("buck.forkserver"); diff --git a/app/buck2_futures/BUCK b/app/buck2_futures/BUCK new file mode 100644 index 0000000000000..25391b785a4d6 --- /dev/null +++ b/app/buck2_futures/BUCK @@ -0,0 +1,29 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_futures", + srcs = glob( + ["src/**/*.rs"], + ), + test_deps = [ + "fbsource//third-party/rust:assert_matches", + ], + deps = [ + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:once_cell", + "fbsource//third-party/rust:parking_lot", + "fbsource//third-party/rust:pin-project", + "fbsource//third-party/rust:slab", + "fbsource//third-party/rust:take_mut", + "fbsource//third-party/rust:thiserror", + "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tracing", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", + "//buck2/gazebo/dupe:dupe", + ], +) diff --git a/app/buck2_futures/Cargo.toml b/app/buck2_futures/Cargo.toml new file mode 100644 index 0000000000000..b53ba9e85e76d --- /dev/null +++ b/app/buck2_futures/Cargo.toml @@ -0,0 +1,25 @@ +[package] +edition = "2021" +license = { workspace = true } +name = "buck2_futures" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +allocative = { workspace = true } +buck2_data = { workspace = true } +buck2_events = { workspace = true } +buck2_wrapper_common = { workspace = true } +dupe = { workspace = true } +futures = "0.3" +once_cell = { workspace = true } +parking_lot = { workspace = true } +pin-project = "0.4" +slab = "0.4.7" +take_mut = { workspace = true } +thiserror = { workspace = true } +tokio = { version = "1.5", features = ["full"] } +tracing = "0.1.22" + +[dev-dependencies] +assert_matches = { workspace = true } diff --git a/shed/more_futures/src/cancellable_future.rs b/app/buck2_futures/src/cancellable_future.rs similarity index 99% rename from shed/more_futures/src/cancellable_future.rs rename to app/buck2_futures/src/cancellable_future.rs index 9780bc5913b7f..165ab4ddc9d0b 100644 --- a/shed/more_futures/src/cancellable_future.rs +++ b/app/buck2_futures/src/cancellable_future.rs @@ -31,7 +31,7 @@ use crate::cancellation::future::CancellationNotificationFuture; thread_local! { /// The ExecutionContext for the currently executing CancellableFuture. - static CURRENT: RefCell>> = RefCell::new(None); + static CURRENT: RefCell>> = const { RefCell::new(None) }; } enum State { diff --git a/shed/more_futures/src/cancellation.rs b/app/buck2_futures/src/cancellation.rs similarity index 98% rename from shed/more_futures/src/cancellation.rs rename to app/buck2_futures/src/cancellation.rs index f5647fc3cdbcf..ad88244b8da73 100644 --- a/shed/more_futures/src/cancellation.rs +++ b/app/buck2_futures/src/cancellation.rs @@ -39,11 +39,6 @@ static INSTANCE: Lazy = pub struct CancellationContext<'a>(CancellationContextInner<'a>); impl<'a> CancellationContext<'a> { - /// TODO replace with real initialization - pub fn todo() -> Self { - CancellationContext(CancellationContextInner::ThreadLocal) - } - pub fn testing() -> &'a Self { &INSTANCE } diff --git a/app/buck2_futures/src/cancellation/future.rs b/app/buck2_futures/src/cancellation/future.rs new file mode 100644 index 0000000000000..017c551dbf16e --- /dev/null +++ b/app/buck2_futures/src/cancellation/future.rs @@ -0,0 +1,1261 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! A future that can be canceled via an explicit `CancellationHandle`. +//! This future is intended to be spawned on tokio-runtime directly, and for its results to be +//! accessed via the joinhandle. +//! It is not intended to be polled directly. + +use std::future::Future; +use std::mem; +use std::pin::Pin; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::AtomicU8; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; +use std::task::Waker; + +use dupe::Clone_; +use dupe::Dupe; +use dupe::Dupe_; +use futures::future::BoxFuture; +use futures::task::AtomicWaker; +use parking_lot::Mutex; +use pin_project::pin_project; +use slab::Slab; + +use crate::cancellation::ExplicitCancellationContext; +use crate::maybe_future::MaybeFuture; +use crate::owning_future::OwningFuture; + +pub(crate) fn make_cancellable_future( + f: F, +) -> (ExplicitlyCancellableFuture, CancellationHandle) +where + F: for<'a> FnOnce(&'a ExplicitCancellationContext) -> BoxFuture<'a, T> + Send, +{ + let context = ExecutionContext::new(); + + let fut = { + let context = context.dupe(); + let cancel = ExplicitCancellationContext { inner: context }; + + OwningFuture::new(cancel, |d| f(d)) + }; + + let state = SharedState::new(); + + let fut = ExplicitlyCancellableFuture::new(fut, state.dupe(), context); + let handle = CancellationHandle::new(state); + + (fut, handle) +} + +/// Defines a future that operates with the 'CancellationContext' to provide explicit cancellation. +/// +/// NOTE: this future is intended only to be polled in a consistent tokio runtime, and never moved +/// from one executor to another. +/// The general safe way of using this future is to spawn it directly via `tokio::spawn`. +#[pin_project] +pub struct ExplicitlyCancellableFuture { + #[pin] + fut: MaybeFuture>, +} + +struct ExplicitlyCancellableFutureInner { + shared: SharedState, + + execution: ExecutionContext, + + /// NOTE: this is duplicative of the `SharedState`, but unlike that state this is not behind a + /// lock. This avoids us needing to grab the lock to check if we're Pending every time we poll. + started: bool, + + future: Pin>>, +} + +impl ExplicitlyCancellableFuture { + fn new( + future: Pin>>, + shared: SharedState, + execution: ExecutionContext, + ) -> Self { + ExplicitlyCancellableFuture { + fut: MaybeFuture::Fut(ExplicitlyCancellableFutureInner { + shared, + execution, + started: false, + future, + }), + } + } +} + +impl Future for ExplicitlyCancellableFuture { + type Output = Option; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + match this.fut.as_mut().poll(cx) { + Poll::Ready(res) => { + this.fut.take(); + Poll::Ready(res) + } + Poll::Pending => Poll::Pending, + } + } +} + +impl ExplicitlyCancellableFutureInner { + fn poll_inner(self: &mut Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let is_cancelled = self.shared.inner.cancelled.load(Ordering::SeqCst); + + if is_cancelled { + let mut execution = self.execution.shared.lock(); + if execution.can_exit() { + return Poll::Ready(None); + } + execution.notify_cancelled(); + } + + let res = Pin::new(&mut self.future).poll(cx).map(Some); + + // If we were using structured cancellation but just exited the critical section, then we + // should exit now. + if is_cancelled && self.execution.shared.lock().can_exit() { + return Poll::Ready(None); + } + + res + } +} + +impl Future for ExplicitlyCancellableFutureInner { + type Output = Option; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Update the state before we check for cancellation so that the cancellation logic can + // observe whether this future has entered `poll` or not. This lets cancellation set the + // termination observer correctly so that the state is picked up. + // Once we start, the `poll_inner` will check whether we are actually canceled and return + // the proper poll value. + if !self.started { + // we only update the Waker once at the beginning of the poll. For the same tokio + // runtime, this is always safe and behaves correctly, as such, this future is + // restricted to be ran on the same tokio executor and never moved from one runtime to + // another + take_mut::take( + &mut *self.shared.inner.state.lock(), + |future| match future { + State::Pending => State::Polled { + waker: cx.waker().clone(), + }, + other => other, + }, + ); + + self.started = true; + } + + let poll = self.poll_inner(cx); + + // When we exit, release our waker to ensure we don't keep create a reference cycle for + // this task. + if poll.is_ready() { + let inner = self.shared.inner.dupe(); + let mut locked_state = inner.state.lock(); + let state = mem::replace(&mut *locked_state, State::Exited); + + match state { + State::Cancelled => { + if self.execution.shared.lock().can_exit() { + return Poll::Ready(None); + } + } + _ => {} + } + } else if self.execution.shared.lock().should_exit() { + // the future itself indicated that we should cancel + + return Poll::Ready(None); + } + + poll + } +} + +pub struct CancellationHandle { + shared_state: SharedState, +} + +impl CancellationHandle { + fn new(shared_state: SharedState) -> Self { + CancellationHandle { shared_state } + } + + /// Attempts to cancel the future this handle is associated with as soon as possible, returning + /// a future that completes when the future is canceled. + pub fn cancel(self) { + // Store to the boolean first before we write to state. + // This is because on `poll`, the future will update the state first then check the boolean. + // This ordering ensures that either the `poll` has read our cancellation, and hence will + // later notify the termination observer via the channel we store in `State::Cancelled`, + // or that we will observe the terminated state of the future and directly notify the + // `TerminationObserver` ourselves. + self.shared_state + .inner + .cancelled + .store(true, Ordering::SeqCst); + + match &mut *self.shared_state.inner.state.lock() { + State::Cancelled { .. } => { + unreachable!("We consume the CancellationHandle on cancel, so this isn't possible") + } + State::Exited => { + // Nothing to do, that future is done. + } + state @ State::Pending => { + // we wait for the future to `poll` once even if it has yet to do so. + // Since we always should be spawning the `ExplicitlyCancellableFuture` on tokio, + // it should be polled once. + let _old = std::mem::replace(state, State::Cancelled); + } + state @ State::Polled { .. } => { + let old = std::mem::replace(state, State::Cancelled); + match old { + State::Polled { waker } => waker.wake(), + _ => { + unreachable!() + } + } + } + }; + } +} + +#[derive(Clone_, Dupe_)] +struct SharedState { + inner: Arc, +} + +impl SharedState { + fn new() -> Self { + Self { + inner: Arc::new(SharedStateData { + state: Mutex::new(State::Pending), + cancelled: AtomicBool::new(false), + }), + } + } +} + +struct SharedStateData { + state: Mutex, + + /// When set, this future has been cancelled and should attempt to exit as soon as possible. + cancelled: AtomicBool, +} + +enum State { + /// This future has been constructed, but not polled yet. + Pending, + + /// This future has been polled. A waker is available. + Polled { waker: Waker }, + + /// This future has already been cancelled. + Cancelled, + + /// This future has already finished executing. + Exited, +} + +/// Context relating to execution of the `poll` of the future. This will contain the information +/// required for the `CancellationContext` that the future holds to enter critical sections and +/// structured cancellations. +#[derive(Clone, Dupe)] +pub(crate) struct ExecutionContext { + shared: Arc>, +} + +impl ExecutionContext { + fn new() -> Self { + Self { + shared: Arc::new(Mutex::new(ExecutionContextData { + cancellation_notification: { + CancellationNotificationData { + inner: Arc::new(CancellationNotificationDataInner { + notified: Default::default(), + wakers: Mutex::new(Some(Default::default())), + }), + } + }, + prevent_cancellation: 0, + should_exit: false, + })), + } + } + + pub fn testing() -> Self { + Self::new() + } + + pub(crate) fn enter_structured_cancellation( + &self, + ) -> (CancellationNotificationData, CriticalSectionGuard) { + let mut shared = self.shared.lock(); + + let notification = shared.enter_structured_cancellation(); + + (notification, CriticalSectionGuard::new(&self.shared)) + } +} + +pub(crate) struct CriticalSectionGuard<'a> { + shared: Option<&'a Mutex>, +} + +impl<'a> CriticalSectionGuard<'a> { + fn new(shared: &'a Mutex) -> Self { + Self { + shared: Some(shared), + } + } + + pub(crate) fn exit_prevent_cancellation(mut self) -> bool { + self.shared + .take() + .expect("should be set") + .lock() + .exit_prevent_cancellation() + } + + pub(crate) fn try_to_disable_cancellation(mut self) -> bool { + let mut shared = self.shared.take().expect("should be set").lock(); + if shared.try_to_disable_cancellation() { + true + } else { + // couldn't prevent cancellation, so release our hold onto the counter + shared.exit_prevent_cancellation(); + false + } + } +} + +impl<'a> Drop for CriticalSectionGuard<'a> { + fn drop(&mut self) { + if let Some(shared) = self.shared.take() { + // never actually exited during normal poll, but dropping this means we'll never poll + // again, so just release the `prevent_cancellation` + + shared.lock().exit_prevent_cancellation(); + } + } +} + +struct ExecutionContextData { + cancellation_notification: CancellationNotificationData, + + /// How many observers are preventing immediate cancellation. + prevent_cancellation: usize, + + should_exit: bool, +} + +impl ExecutionContextData { + /// Does this future not currently prevent its cancellation? + fn can_exit(&self) -> bool { + self.prevent_cancellation == 0 + } + + fn should_exit(&self) -> bool { + self.should_exit + } + + fn enter_structured_cancellation(&mut self) -> CancellationNotificationData { + self.prevent_cancellation += 1; + + self.cancellation_notification.dupe() + } + + fn notify_cancelled(&mut self) { + let updated = self.cancellation_notification.inner.notified.fetch_update( + Ordering::SeqCst, + Ordering::SeqCst, + |old| match CancellationNotificationStatus::from(old) { + CancellationNotificationStatus::Pending => { + Some(CancellationNotificationStatus::Notified.into()) + } + CancellationNotificationStatus::Notified => None, + CancellationNotificationStatus::Disabled => None, + }, + ); + if updated.is_ok() { + if let Some(mut wakers) = self.cancellation_notification.inner.wakers.lock().take() { + wakers.drain().for_each(|waker| waker.wake()); + } + } + } + + fn exit_prevent_cancellation(&mut self) -> bool { + self.prevent_cancellation -= 1; + + self.prevent_cancellation == 0 + } + + fn try_to_disable_cancellation(&mut self) -> bool { + let maybe_updated = self.cancellation_notification.inner.notified.fetch_update( + Ordering::SeqCst, + Ordering::SeqCst, + |old| match CancellationNotificationStatus::from(old) { + CancellationNotificationStatus::Pending => { + Some(CancellationNotificationStatus::Disabled.into()) + } + CancellationNotificationStatus::Notified => None, + CancellationNotificationStatus::Disabled => None, + }, + ); + + match maybe_updated { + Ok(_) => true, + Err(old) => { + let old = CancellationNotificationStatus::from(old); + matches!(old, CancellationNotificationStatus::Disabled) + } + } + } +} + +enum CancellationNotificationStatus { + /// no notifications yet. maps to '0' + Pending, + /// notified, maps to '1' + Notified, + /// disabled notifications, maps to '2' + Disabled, +} + +impl From for CancellationNotificationStatus { + fn from(value: u8) -> Self { + match value { + 0 => CancellationNotificationStatus::Pending, + 1 => CancellationNotificationStatus::Notified, + 2 => CancellationNotificationStatus::Disabled, + _ => panic!("invalid status"), + } + } +} + +impl From for u8 { + fn from(value: CancellationNotificationStatus) -> Self { + match value { + CancellationNotificationStatus::Pending => 0, + CancellationNotificationStatus::Notified => 1, + CancellationNotificationStatus::Disabled => 2, + } + } +} + +#[derive(Clone, Dupe)] +pub(crate) struct CancellationNotificationData { + inner: Arc, +} + +struct CancellationNotificationDataInner { + /// notification status per enum 'CancellationNotificationStatus' + notified: AtomicU8, + wakers: Mutex>>>, +} + +pub(crate) struct CancellationNotificationFuture { + data: CancellationNotificationData, + // index into the waker for this future held by the Slab in 'CancellationNotificationData' + id: Option, + // duplicate of the waker held for us to update the waker on poll without acquiring lock + waker: Arc, +} + +impl CancellationNotificationFuture { + pub(crate) fn new(data: CancellationNotificationData) -> Self { + let waker = Arc::new(AtomicWaker::new()); + let id = data + .inner + .wakers + .lock() + .as_mut() + .map(|wakers| wakers.insert(waker.dupe())); + CancellationNotificationFuture { data, id, waker } + } + + fn remove_waker(&mut self, id: Option) { + if let Some(id) = id { + self.data + .inner + .wakers + .lock() + .as_mut() + .map(|wakers| wakers.remove(id)); + } + } +} + +impl Clone for CancellationNotificationFuture { + fn clone(&self) -> Self { + CancellationNotificationFuture::new(self.data.dupe()) + } +} + +impl Dupe for CancellationNotificationFuture {} + +impl Drop for CancellationNotificationFuture { + fn drop(&mut self) { + self.remove_waker(self.id); + } +} + +impl Future for CancellationNotificationFuture { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match CancellationNotificationStatus::from(self.data.inner.notified.load(Ordering::SeqCst)) + { + CancellationNotificationStatus::Notified => { + // take the id so that we don't need to lock the wakers when this future is dropped + // after completion + let id = self.id.take(); + self.remove_waker(id); + Poll::Ready(()) + } + _ => { + self.waker.register(cx.waker()); + Poll::Pending + } + } + } +} + +#[cfg(test)] +mod tests { + use std::future::Future; + use std::pin::Pin; + use std::sync::atomic::AtomicBool; + use std::sync::atomic::Ordering; + use std::sync::Arc; + use std::task::Context; + use std::task::Poll; + use std::time::Duration; + + use assert_matches::assert_matches; + use dupe::Dupe; + use futures::FutureExt; + use parking_lot::Mutex; + use pin_project::pin_project; + use pin_project::pinned_drop; + + use crate::cancellation::future::make_cancellable_future; + use crate::cancellation::future::CancellationHandle; + + struct MaybePanicOnDrop { + panic: bool, + } + + impl Drop for MaybePanicOnDrop { + fn drop(&mut self) { + if self.panic { + panic!() + } + } + } + + #[tokio::test] + async fn test_ready() { + let (fut, _handle) = make_cancellable_future(|_| futures::future::ready(()).boxed()); + futures::pin_mut!(fut); + assert_matches!(futures::poll!(fut), Poll::Ready(Some(()))); + } + + #[tokio::test] + async fn test_cancel() { + let (fut, handle) = make_cancellable_future(|_| futures::future::pending::<()>().boxed()); + + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_cancel_never_polled() { + let (fut, handle) = make_cancellable_future(|_| futures::future::pending::<()>().boxed()); + + futures::pin_mut!(fut); + + handle.cancel(); + + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_cancel_already_finished() { + let (fut, handle) = make_cancellable_future(|_| futures::future::ready::<()>(()).boxed()); + + futures::pin_mut!(fut); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); + + handle.cancel(); + // this is okay + } + + #[tokio::test] + async fn test_wakeup() { + let (fut, handle) = make_cancellable_future(|_| futures::future::pending::<()>().boxed()); + + let task = tokio::task::spawn(fut); + futures::pin_mut!(task); + + assert_matches!( + tokio::time::timeout(Duration::from_millis(100), &mut task).await, + Err(..) + ); + + handle.cancel(); + + assert_matches!( + tokio::time::timeout(Duration::from_millis(100), &mut task).await, + Ok(Ok(None)) + ); + } + + #[tokio::test] + async fn test_is_dropped() { + let dropped = Arc::new(Mutex::new(false)); + + struct SetOnDrop { + dropped: Arc>, + } + + impl Drop for SetOnDrop { + fn drop(&mut self) { + *self.dropped.lock() = true; + } + } + + impl Future for SetOnDrop { + type Output = (); + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + Poll::Ready(()) + } + } + + let (fut, _handle) = make_cancellable_future({ + let dropped = dropped.dupe(); + |_| SetOnDrop { dropped }.boxed() + }); + + let task = tokio::task::spawn(fut); + + task.await.unwrap(); + assert!(*dropped.lock()); + } + + #[tokio::test] + async fn test_critical_section() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + { + cancellations.critical_section(tokio::task::yield_now).await; + } + futures::future::pending::<()>().await + } + .boxed() + }); + futures::pin_mut!(fut); + + // We reach the first yield. At this point there is one guard held by the critical section. + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + // Cancel, then poll again. Cancellation is checked, *then* the guard in the future + // is dropped and then immediately check for cancellation and yield. + handle.cancel(); + + // Poll again, this time we don't enter the future's poll because it is cancelled. + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_critical_section_noop_drop_is_allowed() { + let (fut, _handle) = make_cancellable_future(|cancellations| { + async { + let section = cancellations.critical_section(futures::future::pending::<()>); + drop(section); // Drop it within an ExecutionContext + } + .boxed() + }); + + fut.await; + } + + #[tokio::test] + async fn test_nested_critical_section() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + { + cancellations + .critical_section(|| async move { tokio::task::yield_now().await }) + .await; + } + futures::future::pending::<()>().await + } + .boxed() + }); + futures::pin_mut!(fut); + + // We reach the first yield. + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + let res = fut.await; + + assert_eq!(res, None); + } + + #[tokio::test] + async fn test_critical_section_cancelled_during_poll() { + let handle_slot = Arc::new(Mutex::new(None::)); + + let (fut, handle) = make_cancellable_future({ + let handle_slot = handle_slot.dupe(); + + move |cancellations| { + async move { + { + handle_slot + .lock() + .take() + .expect("Expected the guard to be here by now") + .cancel(); + + cancellations + .critical_section(|| async { + let mut panic = MaybePanicOnDrop { panic: true }; + tokio::task::yield_now().await; + panic.panic = false; + }) + .await; + } + futures::future::pending::<()>().await + } + .boxed() + } + }); + futures::pin_mut!(fut); + + *handle_slot.lock() = Some(handle); + + // Run the future. It'll drop the guard (and cancel itself) after entering the critical + // section while it's being polled, but it'll proceed to the end. + fut.await; + } + + // Cases to test: + // - Basic + // - Reentrant + // - Cancel when exiting critical section (with no further wakeups) + + #[tokio::test] + async fn test_structured_cancellation_notifies() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + cancellations + .with_structured_cancellation(|observer| observer) + .await; + } + .boxed() + }); + futures::pin_mut!(fut); + + // Proceed all the way to awaiting the observer + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + // Drop our guard. At this point we'll cancel, and notify the observer. + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); + } + + #[tokio::test] + async fn test_structured_cancellation_is_blocking() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + cancellations + .with_structured_cancellation(|_observer| async move { + let mut panic = MaybePanicOnDrop { panic: true }; + tokio::task::yield_now().await; + panic.panic = false; + }) + .await; + } + .boxed() + }); + futures::pin_mut!(fut); + + // Proceed all the way to the first pending. + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + // Drop our guard. We should resume and disarm the guard. + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); + } + + #[tokio::test] + async fn test_structured_cancellation_cancels_on_exit() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + cancellations + .with_structured_cancellation(|observer| observer) + .await; + futures::future::pending::<()>().await + } + .boxed() + }); + + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + // This is a bit of an implementation detail. + #[tokio::test] + async fn test_structured_cancellation_returns_to_executor() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + cancellations + .with_structured_cancellation(|observer| observer) + .await + } + .boxed() + }); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_structured_cancellation_is_reentrant() { + let (fut, handle) = make_cancellable_future(|cancellations| { + { + async move { + cancellations + .with_structured_cancellation(|o1| async move { + cancellations + .with_structured_cancellation(|o2| async move { + o2.await; + o1.await; + }) + .await; + }) + .await; + } + .boxed() + } + }); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); + } + + #[tokio::test] + async fn test_structured_cancellation_with_critical_section() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async move { + cancellations + .critical_section(|| async move { + cancellations + .with_structured_cancellation(|observer| async move { + let mut panic = MaybePanicOnDrop { panic: true }; + tokio::task::yield_now().await; + panic.panic = false; + + // we should get the cancel notification + observer.await; + }) + .await; + }) + .await + } + .boxed() + }); + futures::pin_mut!(fut); + + // Proceed all the way to the first pending. + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + // Drop our guard. We should resume and disarm the guard. + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_structured_cancellation_can_be_reentered() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + cancellations + .with_structured_cancellation(|_o1| async move {}) + .await; + cancellations + .with_structured_cancellation(|o2| async move { + o2.await; + }) + .await; + } + .boxed() + }); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); + } + + #[tokio::test] + async fn test_structured_cancellation_works_after_cancel() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async move { + cancellations + .with_structured_cancellation(|_o1| async move { + tokio::task::yield_now().await; + // At this point we'll get cancelled. + cancellations + .with_structured_cancellation(|o2| async move { + o2.await; + }) + .await; + }) + .await; + } + .boxed() + }); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_disable_cancellation() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async move { + assert!(cancellations.try_to_keep_going_on_cancellation().is_some()); + tokio::task::yield_now().await; + } + .boxed() + }); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); + } + + #[tokio::test] + async fn test_disable_cancellation_already_canceled() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async move { + assert!(cancellations.try_to_keep_going_on_cancellation().is_none()); + tokio::task::yield_now().await; + panic!("already canceled") + } + .boxed() + }); + futures::pin_mut!(fut); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_disable_cancellation_synced_with_structured_cancellation_already_cancelled() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async move { + cancellations + .with_structured_cancellation(|obs| async move { + tokio::task::yield_now().await; + futures::pin_mut!(obs); + assert_matches!(futures::poll!(&mut obs), Poll::Ready(())); + + assert!(cancellations.try_to_keep_going_on_cancellation().is_none()); + }) + .await; + } + .boxed() + }); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_disable_cancellation_synced_with_structured_cancellation_not_cancelled() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async move { + assert!(cancellations.try_to_keep_going_on_cancellation().is_some()); + + tokio::task::yield_now().await; + + cancellations + .with_structured_cancellation(|obs| async move { + futures::pin_mut!(obs); + assert_matches!(futures::poll!(&mut obs), Poll::Pending); + + assert!(cancellations.try_to_keep_going_on_cancellation().is_some()); + }) + .await; + } + .boxed() + }); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + + assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); + } + + #[tokio::test] + async fn test_finished_future_dropped_when_ready() { + #[pin_project(PinnedDrop)] + struct DropFuture(Arc); + + impl Future for DropFuture { + type Output = (); + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + Poll::Ready(()) + } + } + + #[pinned_drop] + impl PinnedDrop for DropFuture { + fn drop(self: Pin<&mut Self>) { + self.0.store(true, Ordering::SeqCst); + } + } + + let is_dropped = Arc::new(AtomicBool::new(false)); + let fut = DropFuture(is_dropped.dupe()); + + let (fut, _handle) = make_cancellable_future(|_cancellations| fut.boxed()); + futures::pin_mut!(fut); + + assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); + + assert!(is_dropped.load(Ordering::SeqCst)); + } + + #[tokio::test] + async fn test_finished_future_dropped_when_cancelled() { + struct DropFuture(Arc); + + impl Future for DropFuture { + type Output = (); + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + Poll::Pending + } + } + + impl Drop for DropFuture { + fn drop(&mut self) { + self.0.store(true, Ordering::SeqCst); + } + } + + let is_dropped = Arc::new(AtomicBool::new(false)); + let fut = DropFuture(is_dropped.dupe()); + + let (fut, handle) = make_cancellable_future(|_cancellations| fut.boxed()); + + futures::pin_mut!(fut); + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + assert!(is_dropped.load(Ordering::SeqCst)); + } + + #[tokio::test] + async fn test_lambda_is_ran_without_poll() { + let mut panic = MaybePanicOnDrop { panic: true }; + tokio::task::yield_now().await; + panic.panic = false; + + let (fut, handle) = make_cancellable_future(move |_cancellations| { + panic.panic = false; + + async move { + panic!("polled"); + } + .boxed() + }); + futures::pin_mut!(fut); + + // cancel before any polls + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_critical_section_via_prevent_cancellation() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + { + let prevent_cancellation = cancellations.begin_ignore_cancellation(); + tokio::task::yield_now().await; + + prevent_cancellation.allow_cancellations_again().await; + } + futures::future::pending::<()>().await + } + .boxed() + }); + futures::pin_mut!(fut); + + // We reach the first yield. At this point there is one guard held by the critical section. + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + // Cancel, then poll again. Cancellation is checked, *then* the guard in the future + // is dropped and then immediately check for cancellation and yield. + handle.cancel(); + + // Poll again, this time we don't enter the future's poll because it is cancelled. + assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); + } + + #[tokio::test] + async fn test_prevent_cancellation_drop_is_allowed() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + let prevent_cancellation = cancellations.begin_ignore_cancellation(); + drop(prevent_cancellation); + + futures::future::pending::<()>().await + } + .boxed() + }); + + futures::pin_mut!(fut); + // We reach the first yield. + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + + fut.await; + } + + #[tokio::test] + async fn test_prevent_cancellation_is_reentrant() { + let mut panic = MaybePanicOnDrop { panic: true }; + tokio::task::yield_now().await; + panic.panic = false; + + let (fut, handle) = make_cancellable_future(|cancellations| { + async move { + { + let prevent1 = cancellations.begin_ignore_cancellation(); + let prevent2 = cancellations.begin_ignore_cancellation(); + + tokio::task::yield_now().await; + + prevent1.allow_cancellations_again().await; + + panic.panic = false; + + prevent2.allow_cancellations_again().await; + } + futures::future::pending::<()>().await + } + .boxed() + }); + futures::pin_mut!(fut); + + // We reach the first yield. + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + handle.cancel(); + let res = fut.await; + + assert_eq!(res, None); + } + + #[tokio::test] + async fn test_prevent_cancellation_cancellation_observer_notifies() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + let prevent_cancellation = cancellations.begin_ignore_cancellation(); + prevent_cancellation.cancellation_observer().await; + } + .boxed() + }); + futures::pin_mut!(fut); + + // Proceed all the way to awaiting the observer + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + // Drop our guard. At this point we'll cancel, and notify the observer. + handle.cancel(); + assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); + } + + #[tokio::test] + async fn test_cancellation_observer_wakes_up_other_tasks() { + let (fut, handle) = make_cancellable_future(|cancellations| { + async { + let prevent_cancellation = cancellations.begin_ignore_cancellation(); + let observer = prevent_cancellation.cancellation_observer(); + + let _ignore = tokio::spawn(observer).await; + } + .boxed() + }); + futures::pin_mut!(fut); + + // Proceed all the way to awaiting the observer + assert_matches!(futures::poll!(&mut fut), Poll::Pending); + + // Drop our guard. At this point we'll cancel, and notify the observer. + handle.cancel(); + + fut.await; + } +} diff --git a/shed/more_futures/src/drop.rs b/app/buck2_futures/src/drop.rs similarity index 100% rename from shed/more_futures/src/drop.rs rename to app/buck2_futures/src/drop.rs diff --git a/shed/more_futures/src/instrumented_shared.rs b/app/buck2_futures/src/instrumented_shared.rs similarity index 99% rename from shed/more_futures/src/instrumented_shared.rs rename to app/buck2_futures/src/instrumented_shared.rs index 07009119a43ac..0ce21ae92b92e 100644 --- a/shed/more_futures/src/instrumented_shared.rs +++ b/app/buck2_futures/src/instrumented_shared.rs @@ -153,9 +153,7 @@ mod tests { use buck2_events::dispatch::EventDispatcher; use buck2_events::source::ChannelEventSource; use buck2_events::BuckEvent; - use buck2_wrapper_common::invocation_id::TraceId; use futures::future::BoxFuture; - use futures::FutureExt; use tokio::sync::Barrier; use tokio::sync::Mutex; diff --git a/app/buck2_futures/src/lib.rs b/app/buck2_futures/src/lib.rs new file mode 100644 index 0000000000000..a08cffffad813 --- /dev/null +++ b/app/buck2_futures/src/lib.rs @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(assert_matches)] +#![feature(pin_deref_mut)] + +pub mod cancellable_future; +pub mod cancellation; +pub mod drop; +pub mod instrumented_shared; +mod maybe_future; +pub mod owning_future; +pub mod spawn; +pub mod spawner; diff --git a/shed/more_futures/src/maybe_future.rs b/app/buck2_futures/src/maybe_future.rs similarity index 100% rename from shed/more_futures/src/maybe_future.rs rename to app/buck2_futures/src/maybe_future.rs diff --git a/shed/more_futures/src/owning_future.rs b/app/buck2_futures/src/owning_future.rs similarity index 99% rename from shed/more_futures/src/owning_future.rs rename to app/buck2_futures/src/owning_future.rs index 1087892d4aaa7..4b74a1d60f2f7 100644 --- a/shed/more_futures/src/owning_future.rs +++ b/app/buck2_futures/src/owning_future.rs @@ -12,7 +12,6 @@ //! This future is intended to be spawned on tokio-runtime directly, and for its results to be //! accessed via the joinhandle. //! It is not intended to be polled directly. -//! use std::future::Future; use std::marker::PhantomPinned; diff --git a/shed/more_futures/src/spawn.rs b/app/buck2_futures/src/spawn.rs similarity index 99% rename from shed/more_futures/src/spawn.rs rename to app/buck2_futures/src/spawn.rs index dd80b6df8e4af..1f3d8080a781c 100644 --- a/shed/more_futures/src/spawn.rs +++ b/app/buck2_futures/src/spawn.rs @@ -9,7 +9,6 @@ //! The future that is spawned, but has various more strict cancellation behaviour than //! tokio's JoinHandle -//! use std::any::Any; use std::pin::Pin; diff --git a/shed/more_futures/src/spawner.rs b/app/buck2_futures/src/spawner.rs similarity index 100% rename from shed/more_futures/src/spawner.rs rename to app/buck2_futures/src/spawner.rs diff --git a/app/buck2_grpc/BUCK b/app/buck2_grpc/BUCK index 7951109d0fce3..259e6b018389e 100644 --- a/app/buck2_grpc/BUCK +++ b/app/buck2_grpc/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/app/buck2_grpc/Cargo.toml b/app/buck2_grpc/Cargo.toml index ea1d4c374683d..89a6f293e7a98 100644 --- a/app/buck2_grpc/Cargo.toml +++ b/app/buck2_grpc/Cargo.toml @@ -1,7 +1,9 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_grpc" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } @@ -12,12 +14,5 @@ tokio = { workspace = true } tonic = { workspace = true } tower = { workspace = true } -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -gazebo_lint.version = "0.1" - [dev-dependencies] assert_matches = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_grpc/src/lib.rs b/app/buck2_grpc/src/lib.rs index b8610befa0d8c..5c4110e695511 100644 --- a/app/buck2_grpc/src/lib.rs +++ b/app/buck2_grpc/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + mod channel; mod server; mod util; diff --git a/app/buck2_grpc/src/server.rs b/app/buck2_grpc/src/server.rs index 7beb9391ef27d..568f58dc612cb 100644 --- a/app/buck2_grpc/src/server.rs +++ b/app/buck2_grpc/src/server.rs @@ -46,7 +46,6 @@ impl ServerHandle { pub fn spawn_oneshot(io: T, router: Router) -> ServerHandle where T: AsyncRead + AsyncWrite + Send + Unpin + 'static + tonic::transport::server::Connected, - L: Layer + Send + 'static, L::Service: Service< http::Request, diff --git a/app/buck2_http/BUCK b/app/buck2_http/BUCK new file mode 100644 index 0000000000000..137201717a357 --- /dev/null +++ b/app/buck2_http/BUCK @@ -0,0 +1,61 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_http", + srcs = glob(["src/**/*.rs"]), + os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:hyper-unix-connector", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:hyper-unix-connector", + ], + ), + ], + test_deps = [ + "fbsource//third-party/rust:httptest", + ], + test_os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:tempfile", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:tempfile", + ], + ), + ], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:bytes", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:http", + "fbsource//third-party/rust:hyper", + "fbsource//third-party/rust:hyper-proxy", + "fbsource//third-party/rust:hyper-rustls", + "fbsource//third-party/rust:hyper-timeout", + "fbsource//third-party/rust:ipnetwork", + "fbsource//third-party/rust:pin-project", + "fbsource//third-party/rust:rustls", + "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tokio-rustls", + "fbsource//third-party/rust:tokio-util", + "fbsource//third-party/rust:tracing", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_certs:buck2_certs", + "//buck2/app/buck2_error:buck2_error", + "//buck2/gazebo/dupe:dupe", + # @oss-disable: "//common/rust/cpe:cpe", + ], +) diff --git a/app/buck2_http/Cargo.toml b/app/buck2_http/Cargo.toml new file mode 100644 index 0000000000000..ce73bfc44e412 --- /dev/null +++ b/app/buck2_http/Cargo.toml @@ -0,0 +1,41 @@ +[package] +edition = "2021" +license = { workspace = true } +name = "buck2_http" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +bytes = { workspace = true } +futures = { workspace = true } +http = { workspace = true } +hyper = { workspace = true } +hyper-proxy = { workspace = true } +hyper-rustls = { workspace = true } +hyper-timeout = { workspace = true } +ipnetwork = { workspace = true } +pin-project = { workspace = true } +rustls = { workspace = true } +tokio = { workspace = true } +tokio-rustls = { workspace = true } +tokio-util = { workspace = true } +tracing = { workspace = true } + +allocative = { workspace = true } +dupe = { workspace = true } + +buck2_certs = { workspace = true } +buck2_error = { workspace = true } + +[target.'cfg(unix)'.dependencies] +hyper-unix-connector = { workspace = true } + +[dev-dependencies] +httptest = { workspace = true } + +[target.'cfg(unix)'.dev-dependencies] +tempfile = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_http/src/client.rs b/app/buck2_http/src/client.rs new file mode 100644 index 0000000000000..e10f2991d8903 --- /dev/null +++ b/app/buck2_http/src/client.rs @@ -0,0 +1,1002 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use anyhow::Context; +use bytes::Bytes; +use dupe::Dupe; +use futures::stream::BoxStream; +use futures::StreamExt; +use futures::TryStreamExt; +use http::request::Builder; +use http::uri::Scheme; +use http::Method; +use http::Uri; +use hyper::client::connect::Connect; +use hyper::client::ResponseFuture; +use hyper::Body; +use hyper::Request; +use hyper::Response; +use tokio::io::AsyncReadExt; +use tokio_util::io::StreamReader; + +use crate::redirect::PendingRequest; +use crate::redirect::RedirectEngine; +use crate::stats::CountingStream; +use crate::stats::HttpNetworkStats; +use crate::x2p::X2PAgentError; +use crate::HttpError; + +mod builder; +pub use builder::HttpClientBuilder; + +const DEFAULT_USER_AGENT: &str = "Buck2"; + +#[derive(Allocative, Clone, Dupe)] +pub struct HttpClient { + // hyper::Client doesn't impl Allocative. + #[allocative(skip)] + inner: Arc, + max_redirects: Option, + supports_vpnless: bool, + http2: bool, + stats: HttpNetworkStats, +} + +impl HttpClient { + fn request_builder(&self, uri: &str) -> Builder { + Request::builder() + .uri(uri) + .header(http::header::USER_AGENT, DEFAULT_USER_AGENT) + } + + /// Send a HEAD request. Assumes no body will be returned. If one is returned, it will be ignored. + pub async fn head(&self, uri: &str) -> Result, HttpError> { + let req = self + .request_builder(uri) + .method(Method::HEAD) + .body(Bytes::new()) + .map_err(HttpError::BuildRequest)?; + self.request(req).await.map(|resp| resp.map(|_| ())) + } + + /// Send a GET request. + pub async fn get( + &self, + uri: &str, + ) -> Result>>, HttpError> { + let req = self + .request_builder(uri) + .method(Method::GET) + .body(Bytes::new()) + .map_err(HttpError::BuildRequest)?; + self.request(req).await + } + + pub async fn post( + &self, + uri: &str, + body: Bytes, + headers: Vec<(String, String)>, + ) -> Result>>, HttpError> { + let mut builder = self.request_builder(uri).method(Method::POST); + for (name, value) in headers { + builder = builder.header(name, value); + } + let req = builder.body(body).map_err(HttpError::BuildRequest)?; + self.request(req).await + } + + pub async fn put( + &self, + uri: &str, + body: Bytes, + headers: Vec<(String, String)>, + ) -> Result>>, HttpError> { + let mut builder = self.request_builder(uri).method(Method::PUT); + for (name, value) in headers { + builder = builder.header(name, value); + } + let req = builder.body(body).map_err(HttpError::BuildRequest)?; + self.request(req).await + } + + async fn send_request_impl( + &self, + mut request: Request, + ) -> Result>>, HttpError> { + let uri = request.uri().to_string(); + let now = tokio::time::Instant::now(); + + // x2p requires scheme to be http since it handles all TLS. + if self.supports_vpnless() { + tracing::debug!( + "http: request: changing scheme for '{}' to http for vpnless", + request.uri() + ); + change_scheme_to_http(&mut request)?; + } + let resp = self.inner.request(request).await.map_err(|e| { + if is_hyper_error_due_to_timeout(&e) { + HttpError::Timeout { + uri, + duration: now.elapsed().as_secs(), + } + } else { + HttpError::SendRequest { uri, source: e } + } + })?; + Ok( + resp.map(|body| { + CountingStream::new(body, self.stats.downloaded_bytes().dupe()).boxed() + }), + ) + } + + /// Send a generic request. + pub async fn request( + &self, + request: Request, + ) -> Result>>, HttpError> { + let pending_request = PendingRequest::from_request(&request); + let uri = request.uri().clone(); + tracing::debug!("http: request: {:?}", request); + let resp = self.send_request_impl(request).await?; + tracing::debug!("http: response: {:?}", resp.status()); + + // Handle redirects up to self.max_redirects times. + let resp = if let Some(max_redirects) = self.max_redirects { + let redirect_engine = RedirectEngine::new(max_redirects, pending_request, resp); + redirect_engine + .handle_redirects(|req| self.send_request_impl(req)) + .await? + } else { + resp + }; + + if !resp.status().is_success() { + // Handle x2p errors as indicated by headers. + if let Some(x2p_err) = X2PAgentError::from_headers(&uri, resp.headers()) { + return Err(HttpError::X2P { + uri: uri.to_string(), + source: x2p_err, + }); + } + + let status = resp.status(); + let text = read_truncated_error_response(resp).await; + return Err(HttpError::Status { + status, + uri: uri.to_string(), + text, + }); + } + + Ok(resp) + } + + pub fn stats(&self) -> &HttpNetworkStats { + &self.stats + } + + /// Whether this client supports vpnless operation. When set, will make requests + /// to the `vpnless_url` attribute in the `download_file` action rather than the + /// normal `url` attribute. + pub fn supports_vpnless(&self) -> bool { + self.supports_vpnless + } + + pub fn http2(&self) -> bool { + self.http2 + } +} + +/// Trait wrapper around a hyper::Client because hyper::Client is parameterized by +/// the connector. At runtime, we want to pick different connectors (e.g. HttpsConnector, +/// ProxyConnector>, etc); thus wrap the client so we can switch +/// out the concrete type without exposing implementation details to callers. +pub(super) trait RequestClient: Send + Sync { + fn request(&self, request: Request) -> ResponseFuture; +} + +impl RequestClient for hyper::Client +where + C: Connect + Clone + Send + Sync + 'static, +{ + fn request(&self, request: Request) -> ResponseFuture { + self.request(request.map(Body::from)) + } +} + +async fn read_truncated_error_response( + mut resp: Response>>, +) -> String { + let read = StreamReader::new( + resp.body_mut() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)), + ); + let mut buf = Vec::with_capacity(1024); + read.take(1024).read_to_end(&mut buf).await.map_or_else( + |e| format!("Error decoding response: {:?}", e), + |_| String::from_utf8_lossy(buf.as_ref()).into_owned(), + ) +} + +/// Helper function to consume a response stream and convert it to a Bytes container. +/// Warning: This does no length checking (like hyper::body::to_bytes). Should +/// only be used for trusted endpoints. +pub async fn to_bytes(body: BoxStream<'_, hyper::Result>) -> anyhow::Result { + let mut reader = + StreamReader::new(body.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))); + let mut buf = Vec::new(); + reader + .read_to_end(&mut buf) + .await + .context("Reading response body")?; + Ok(buf.into()) +} + +/// x2pagent proxies only speak plain HTTP, so we need to mutate requests prior +/// to sending them off. +fn change_scheme_to_http(request: &mut Request) -> Result<(), HttpError> { + let uri = request.uri().clone(); + let uri_for_error = uri.clone(); + let mut parts = uri.into_parts(); + parts.scheme = Some(Scheme::HTTP); + *request.uri_mut() = Uri::from_parts(parts).map_err(|e| HttpError::InvalidUriParts { + uri: uri_for_error.to_string(), + source: e, + })?; + Ok(()) +} + +/// Helper function to check if any error in the chain of errors produced by +/// hyper is due to a timeout. +fn is_hyper_error_due_to_timeout(e: &hyper::Error) -> bool { + use std::error::Error; + + let mut cause = e.source(); + while let Some(err) = cause { + if let Some(io_err) = err.downcast_ref::() { + if let std::io::ErrorKind::TimedOut = io_err.kind() { + return true; + } + } + cause = err.source(); + } + + false +} + +#[cfg(test)] +mod tests { + use http::StatusCode; + use httptest::matchers::*; + use httptest::responders; + use httptest::Expectation; + + use super::*; + + #[test] + fn test_change_scheme_to_http_succeeds() -> anyhow::Result<()> { + let mut request = Request::builder() + .method(Method::GET) + .uri("https://some.site/foo") + .body(Bytes::new())?; + change_scheme_to_http(&mut request)?; + + assert_eq!( + Scheme::HTTP, + *request + .uri() + .scheme() + .expect("should have scheme after mutating request") + ); + Ok(()) + } + + #[test] + fn test_change_scheme_to_http_no_effect() -> anyhow::Result<()> { + let uri: Uri = "http://some.site/foo".try_into()?; + let mut request = Request::builder() + .method(Method::GET) + .uri(uri.clone()) + .body(Bytes::new())?; + change_scheme_to_http(&mut request)?; + + assert_eq!(&uri, request.uri()); + Ok(()) + } + + #[tokio::test] + async fn test_simple_get_success() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(request::method_path("GET", "/foo")) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let resp = client.get(&test_server.url_str("/foo")).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_simple_put_success() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(all_of![ + request::method_path("PUT", "/foo"), + request::body("Hello, world!") + ]) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let bytes = Bytes::from_static(b"Hello, world!"); + let resp = client + .put( + &test_server.url_str("/foo"), + bytes, + vec![("key".to_owned(), "value".to_owned())], + ) + .await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_simple_post_success() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(all_of![ + request::method_path("POST", "/foo"), + request::body("Hello, world!") + ]) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let bytes = Bytes::from_static(b"Hello, world!"); + let resp = client + .post( + &test_server.url_str("/foo"), + bytes, + vec![("key".to_owned(), "value".to_owned())], + ) + .await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_404_not_found_is_error() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(request::method_path("GET", "/foo")) + .respond_with(responders::status_code(404)), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let url = test_server.url_str("/foo"); + let result = client.get(&url).await; + assert!(result.is_err()); + if let HttpError::Status { status, uri, text } = result.as_ref().err().unwrap() { + assert_eq!(StatusCode::NOT_FOUND, *status); + assert_eq!(url.to_owned(), *uri); + assert!(text.is_empty()); + } else { + unreachable!( + "Expected HttpError::Status, got {:?}", + result.err().unwrap() + ); + } + + Ok(()) + } + + #[tokio::test] + async fn test_count_response_size() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(request::method_path("GET", "/foo")) + .times(2) + // Response body is 100 bytes in size. + .respond_with(responders::status_code(200).body(vec![0; 100])), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let mut resp = client.get(&test_server.url_str("/foo")).await?; + + // Consume the stream so we trigger a count. + while (resp.body_mut().next().await).is_some() {} + assert_eq!(100, client.stats().get_downloaded_bytes()); + + let mut resp = client.get(&test_server.url_str("/foo")).await?; + + // Consume the stream so we trigger a count. + while (resp.body_mut().next().await).is_some() {} + assert_eq!(200, client.stats().get_downloaded_bytes()); + + Ok(()) + } + + #[tokio::test] + async fn test_follows_redirects() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + // Chain of two redirects /foo -> /bar -> /baz. + test_server.expect( + Expectation::matching(request::method_path("GET", "/foo")) + .times(1) + .respond_with( + responders::status_code(302).append_header(http::header::LOCATION, "/bar"), + ), + ); + test_server.expect( + Expectation::matching(request::method_path("GET", "/bar")) + .times(1) + .respond_with( + responders::status_code(302).append_header(http::header::LOCATION, "/baz"), + ), + ); + test_server.expect( + Expectation::matching(request::method_path("GET", "/baz")) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_max_redirects(10) + .build(); + let resp = client.get(&test_server.url_str("/foo")).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_head_changes_to_get_on_redirect() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + // Chain of two redirects /foo -> /bar -> /baz. + test_server.expect( + Expectation::matching(request::method_path("HEAD", "/foo")) + .times(1) + .respond_with( + responders::status_code(302).append_header(http::header::LOCATION, "/bar"), + ), + ); + test_server.expect( + Expectation::matching(request::method_path("GET", "/bar")) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_max_redirects(10) + .build(); + let resp = client.head(&test_server.url_str("/foo")).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_post_gets_redirected() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + // Redirect /foo -> /bar + test_server.expect( + Expectation::matching(all_of![ + request::method_path("POST", "/foo"), + request::body("Hello, world!"), + ]) + .times(1) + .respond_with( + responders::status_code(307).append_header(http::header::LOCATION, "/bar"), + ), + ); + test_server.expect( + Expectation::matching(all_of![ + request::method_path("POST", "/bar"), + request::body("Hello, world!"), + request::headers(not(contains(key(hyper::header::ORIGIN.as_str())))), + request::headers(not(contains(key(hyper::header::AUTHORIZATION.as_str())))), + request::headers(not(contains(key(hyper::header::WWW_AUTHENTICATE.as_str())))), + request::headers(not(contains(key(hyper::header::COOKIE.as_str())))), + request::headers(not(contains(key( + hyper::header::PROXY_AUTHORIZATION.as_str() + )))), + ]) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_max_redirects(10) + .build(); + let bytes = Bytes::from_static(b"Hello, world!"); + let resp = client + .post( + &test_server.url_str("/foo"), + bytes, + vec![("key".to_owned(), "value".to_owned())], + ) + .await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_too_many_redirects_fails() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + // Chain of three redirects /foo -> /bar -> /baz -> /boo. + test_server.expect( + Expectation::matching(request::method_path("GET", "/foo")) + .times(1) + .respond_with( + responders::status_code(302).append_header(http::header::LOCATION, "/bar"), + ), + ); + test_server.expect( + Expectation::matching(request::method_path("GET", "/bar")) + .times(1) + .respond_with( + responders::status_code(302).append_header(http::header::LOCATION, "/baz"), + ), + ); + test_server.expect( + Expectation::matching(request::method_path("GET", "/baz")) + .times(1) + .respond_with( + responders::status_code(302).append_header(http::header::LOCATION, "/boo"), + ), + ); + test_server.expect( + Expectation::matching(request::method_path("GET", "/boo")) + .times(0) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_max_redirects(1) + .build(); + let url = test_server.url_str("/foo"); + let result = client.get(&url).await; + if let HttpError::TooManyRedirects { uri, max_redirects } = result.as_ref().err().unwrap() { + assert_eq!(url.to_owned(), *uri); + assert_eq!(1, *max_redirects); + } else { + unreachable!( + "Expected HttpError::TooManyRedirects, got {:?}", + result.err().unwrap() + ); + } + + Ok(()) + } + + #[cfg(unix)] + mod unix { + use std::convert::Infallible; + use std::path::PathBuf; + + use hyper::service::make_service_fn; + use hyper::service::service_fn; + use hyper::Server; + use hyper_unix_connector::UnixConnector; + + use super::*; + + /// Conceptually similar to crate::http::tests::ProxyServer, but sets up a + /// local unix domain socket instead. + pub struct UnixSocketProxyServer { + pub socket: PathBuf, + // Need to hold a ref so when Drop runs on Self we cancel the task. + #[allow(dead_code)] + handle: tokio::task::JoinHandle<()>, + // Need to hold ref so socket doesn't get removed. + #[allow(dead_code)] + tempdir: tempfile::TempDir, + } + + impl UnixSocketProxyServer { + pub async fn new() -> anyhow::Result { + let tempdir = tempfile::tempdir()?; + let socket = tempdir.path().join("test-uds.sock"); + + let listener: UnixConnector = tokio::net::UnixListener::bind(&socket) + .context("binding to unix socket")? + .into(); + let handler_func = make_service_fn(|_conn| async move { + Ok::<_, Infallible>(service_fn(|mut req: Request| async move { + let client = hyper::Client::new(); + req.headers_mut().insert( + http::header::VIA, + http::HeaderValue::from_static("testing-proxy-server"), + ); + println!("Proxying request: {:?}", req); + client + .request(req.map(Body::from)) + .await + .context("Failed sending requeest to destination") + })) + }); + + let handle = tokio::task::spawn(async move { + println!("started proxy server"); + Server::builder(listener) + .serve(handler_func) + .await + .expect("Proxy server exited unexpectedly"); + }); + + Ok(Self { + socket, + handle, + tempdir, + }) + } + } + } + + #[cfg(unix)] + #[tokio::test] + async fn test_proxies_through_unix_socket_when_set() -> anyhow::Result<()> { + let proxy_server = unix::UnixSocketProxyServer::new().await?; + + let test_server = httptest::Server::run(); + let url = test_server.url("/foo"); + let host = url.authority().unwrap().to_string(); + test_server.expect( + Expectation::matching(all_of![ + request::method_path("GET", "/foo"), + request::headers(contains(("via", "testing-proxy-server"))), + request::headers(contains(("host", host))), + ]) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_x2p_proxy(hyper_proxy::Proxy::new( + hyper_proxy::Intercept::Http, + hyper_unix_connector::Uri::new(proxy_server.socket, "/").into(), + )) + .build(); + let resp = client.get(&url.to_string()).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_x2p_error_response_is_forbidden_host() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + let url = test_server.url("/foo"); + test_server.expect( + Expectation::matching(all_of![request::method_path("GET", "/foo")]) + .times(1) + .respond_with( + responders::status_code(400) + .append_header("x-x2pagentd-error-type", "FORBIDDEN_HOST") + .append_header("x-x2pagentd-error-msg", "Nope"), + ), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let result = client.get(&url.to_string()).await; + assert!(result.is_err()); + assert!(matches!( + result, + Err(HttpError::X2P { + source: X2PAgentError::ForbiddenHost { .. }, + .. + }) + )); + + Ok(()) + } + + #[tokio::test] + async fn test_x2p_error_response_is_access_denied() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + let url = test_server.url("/foo"); + test_server.expect( + Expectation::matching(all_of![request::method_path("GET", "/foo")]) + .times(1) + .respond_with( + responders::status_code(400) + .append_header("x-fb-validated-x2pauth-decision", "deny") + .append_header("x-x2pagentd-error-msg", "Nope"), + ), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let result = client.get(&url.to_string()).await; + assert!(result.is_err()); + assert!(matches!( + result, + Err(HttpError::X2P { + source: X2PAgentError::AccessDenied { .. }, + .. + }), + )); + + Ok(()) + } + + #[tokio::test] + async fn test_x2p_error_response_is_generic_error() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + let url = test_server.url("/foo"); + test_server.expect( + Expectation::matching(all_of![request::method_path("GET", "/foo")]) + .times(1) + .respond_with( + responders::status_code(400) + .append_header("x-x2pagentd-error-msg", "Something else happened"), + ), + ); + + let client = HttpClientBuilder::https_with_system_roots().await?.build(); + let result = client.get(&url.to_string()).await; + assert!(result.is_err()); + assert!(matches!( + result, + Err(HttpError::X2P { + source: X2PAgentError::Error(..), + .. + }), + )); + + Ok(()) + } +} + +// TODO(skarlage, T160529958): Debug why these tests fail on CircleCI +#[cfg(all(test, fbcode_build))] +mod proxy_tests { + use std::convert::Infallible; + use std::net::TcpListener; + use std::net::ToSocketAddrs; + use std::time::Duration; + + use anyhow::Context; + use httptest::matchers::*; + use httptest::responders; + use httptest::Expectation; + use hyper::service::make_service_fn; + use hyper::service::service_fn; + use hyper::Server; + use hyper_proxy::Intercept; + use hyper_proxy::Proxy; + + use super::*; + use crate::proxy::DefaultSchemeUri; + + const HEADER_SLEEP_DURATION_MS: &str = "x-buck2-test-proxy-sleep-duration-ms"; + + /// Barebones proxy server implementation that simply forwards requests onto + /// the destination server. + struct ProxyServer { + addr: std::net::SocketAddr, + // Need to hold a ref to the task so when Drop runs on Self we cancel + // the task. + #[allow(dead_code)] + handle: tokio::task::JoinHandle<()>, + } + + impl ProxyServer { + async fn new() -> anyhow::Result { + let proxy_server_addr = "[::1]:0".to_socket_addrs().unwrap().next().unwrap(); + let listener = + TcpListener::bind(proxy_server_addr).context("failed to bind to local address")?; + let proxy_server_addr = listener.local_addr()?; + + let make_proxy_service = make_service_fn(|_conn| async move { + Ok::<_, Infallible>(service_fn(|mut req: Request| async move { + // Sleep if requested to simulate slow reads. + if let Some(s) = req.headers().get(HEADER_SLEEP_DURATION_MS) { + let sleep_duration = + Duration::from_millis(s.to_str().unwrap().parse().unwrap()); + tokio::time::sleep(sleep_duration).await; + } + + let client = hyper::Client::new(); + req.headers_mut().insert( + http::header::VIA, + http::HeaderValue::from_static("testing-proxy-server"), + ); + println!("Proxying request: {:?}", req); + client + .request(req) + .await + .context("Failed sending requeest to destination") + })) + }); + + let handle = tokio::task::spawn(async move { + println!("started proxy server"); + Server::from_tcp(listener) + .unwrap() + .serve(make_proxy_service) + .await + .expect("Proxy server exited unexpectedly"); + }); + + Ok(Self { + addr: proxy_server_addr, + handle, + }) + } + + fn uri(&self) -> anyhow::Result { + http::Uri::builder() + .scheme("http") + .authority(self.addr.to_string().as_str()) + .path_and_query("/") + .build() + .context("failed to build proxy server URI") + } + } + + #[tokio::test] + async fn test_uses_http_proxy() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(all_of![ + request::method_path("GET", "/foo"), + request::headers(contains(("via", "testing-proxy-server"))) + ]) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let proxy_server = ProxyServer::new().await?; + println!("proxy_server uri: {}", proxy_server.uri()?); + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_proxy(Proxy::new(Intercept::Http, proxy_server.uri()?)) + .build(); + let resp = client.get(&test_server.url_str("/foo")).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_uses_http_proxy_with_no_scheme_in_proxy_uri() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(all_of![ + request::method_path("GET", "/foo"), + request::headers(contains(("via", "testing-proxy-server"))) + ]) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let proxy_server = ProxyServer::new().await?; + + let authority = proxy_server.uri()?.authority().unwrap().clone(); + let proxy_uri = format!("{}:{}", authority.host(), authority.port().unwrap()); + println!("proxy_uri: {}", proxy_uri); + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_proxy(Proxy::new( + Intercept::Http, + DefaultSchemeUri(proxy_uri.try_into()?).into(), + )) + .build(); + let resp = client.get(&test_server.url_str("/foo")).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_does_not_proxy_when_no_proxy_matches() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(all_of![request::method_path("GET", "/foo")]) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let proxy_server = ProxyServer::new().await?; + println!("proxy_server uri: {}", proxy_server.uri()?); + + let test_server_host = test_server + .url("/") + .authority() + .unwrap() + .clone() + .host() + .to_owned(); + let no_proxy = crate::proxy::NoProxy::new(http::uri::Scheme::HTTP, test_server_host); + + // Don't proxy connections to test_server. + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_proxy(Proxy::new( + no_proxy.into_proxy_intercept(), + proxy_server.uri()?, + )) + .build(); + let resp = client.get(&test_server.url_str("/foo")).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + #[tokio::test] + async fn test_proxies_when_no_proxy_does_not_match() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + test_server.expect( + Expectation::matching(all_of![ + request::method_path("GET", "/foo"), + request::headers(contains(("via", "testing-proxy-server"))) + ]) + .times(1) + .respond_with(responders::status_code(200)), + ); + + let proxy_server = ProxyServer::new().await?; + println!("proxy_server uri: {}", proxy_server.uri()?); + + // Don't proxy HTTPS connections to *.foobar.com + let no_proxy = crate::proxy::NoProxy::new(http::uri::Scheme::HTTP, ".foobar.com"); + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_proxy(Proxy::new( + no_proxy.into_proxy_intercept(), + proxy_server.uri()?, + )) + .build(); + let resp = client.get(&test_server.url_str("/foo")).await?; + assert_eq!(200, resp.status().as_u16()); + + Ok(()) + } + + // Use proxy server harness to test slow connections. + #[tokio::test] + async fn test_timeout() -> anyhow::Result<()> { + let test_server = httptest::Server::run(); + let proxy_server = ProxyServer::new().await?; + + let client = HttpClientBuilder::https_with_system_roots() + .await? + .with_proxy(Proxy::new(Intercept::Http, proxy_server.uri()?)) + .with_read_timeout(Some(Duration::from_millis(10))) + .build(); + + let req = Request::builder() + .uri(test_server.url_str("/foo")) + .header(HEADER_SLEEP_DURATION_MS, "200") + .method(Method::GET) + .body(Bytes::new())?; + let res = client.request(req).await; + assert!(matches!(res, Err(HttpError::Timeout { .. }))); + Ok(()) + } +} diff --git a/app/buck2_http/src/client/builder.rs b/app/buck2_http/src/client/builder.rs new file mode 100644 index 0000000000000..04b0ae5695242 --- /dev/null +++ b/app/buck2_http/src/client/builder.rs @@ -0,0 +1,441 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::path::Path; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use buck2_certs::certs::find_internal_cert; +use buck2_certs::certs::supports_vpnless; +use buck2_certs::certs::tls_config_with_single_cert; +use buck2_certs::certs::tls_config_with_system_roots; +use hyper::client::HttpConnector; +use hyper::service::Service; +use hyper::Body; +use hyper::Uri; +use hyper_proxy::Proxy; +use hyper_proxy::ProxyConnector; +use hyper_rustls::HttpsConnector; +use hyper_rustls::HttpsConnectorBuilder; +use hyper_timeout::TimeoutConnector; +use rustls::ClientConfig; +use tokio::io::AsyncRead; +use tokio::io::AsyncWrite; +use tokio_rustls::TlsConnector; + +use super::HttpClient; +use super::RequestClient; +use crate::proxy; +use crate::stats::HttpNetworkStats; +use crate::x2p; + +#[derive(Clone, Debug, Default, PartialEq)] +pub struct TimeoutConfig { + connect_timeout: Option, + read_timeout: Option, + write_timeout: Option, +} + +impl TimeoutConfig { + fn to_connector(&self, connector: C) -> TimeoutConnector + where + C: Service + Send, + C::Response: AsyncRead + AsyncWrite + Send + Unpin, + C::Future: Send + 'static, + C::Error: Into>, + { + let mut timeout_connector = TimeoutConnector::new(connector); + timeout_connector.set_connect_timeout(self.connect_timeout); + timeout_connector.set_read_timeout(self.read_timeout); + timeout_connector.set_write_timeout(self.write_timeout); + timeout_connector + } +} + +pub struct HttpClientBuilder { + tls_config: ClientConfig, + proxies: Vec, + max_redirects: Option, + supports_vpnless: bool, + http2: bool, + timeout_config: Option, +} + +impl HttpClientBuilder { + /// Builds an http client compatible with OSS usage. + pub async fn oss() -> anyhow::Result { + tracing::debug!("Using OSS client"); + let mut builder = Self::https_with_system_roots().await?; + builder.with_proxy_from_env()?; + Ok(builder) + } + + /// Builds an http client compatible with internal Meta usage. + pub async fn internal() -> anyhow::Result { + let mut builder = Self::https_with_system_roots().await?; + if supports_vpnless() { + tracing::debug!("Using vpnless client"); + let proxy = x2p::find_proxy()?.context("Expected unix domain socket or http proxy port for x2p client but did not find either")?; + builder.with_x2p_proxy(proxy); + } else if let Some(cert_path) = find_internal_cert() { + tracing::debug!("Using internal https client"); + builder.with_client_auth_cert(cert_path).await?; + } else { + tracing::debug!("Using default https client"); + } + + Ok(builder) + } + + /// Creates a barebones https client using system roots for TLS authentication. + pub async fn https_with_system_roots() -> anyhow::Result { + let tls_config = tls_config_with_system_roots().await?; + Ok(Self { + tls_config, + proxies: Vec::new(), + max_redirects: None, + supports_vpnless: false, + http2: true, + timeout_config: None, + }) + } + + pub fn with_tls_config(&mut self, tls_config: ClientConfig) -> &mut Self { + self.tls_config = tls_config; + self + } + + pub async fn with_client_auth_cert>( + &mut self, + path: P, + ) -> anyhow::Result<&mut Self> { + let tls_config = tls_config_with_single_cert(path.as_ref(), path.as_ref()).await?; + Ok(self.with_tls_config(tls_config)) + } + + pub fn with_proxy(&mut self, proxy: Proxy) -> &mut Self { + self.proxies.push(proxy); + self + } + + pub fn with_x2p_proxy(&mut self, proxy: Proxy) -> &mut Self { + self.with_proxy(proxy).with_supports_vpnless(); + self + } + + pub fn with_proxy_from_env(&mut self) -> anyhow::Result<&mut Self> { + if let Some(proxy) = proxy::https_proxy_from_env()? { + self.with_proxy(proxy); + } + if let Some(proxy) = proxy::http_proxy_from_env()? { + self.with_proxy(proxy); + } + Ok(self) + } + + pub fn with_connect_timeout(&mut self, connect_timeout: Option) -> &mut Self { + if let Some(timeout_config) = &mut self.timeout_config { + timeout_config.connect_timeout = connect_timeout; + } else { + self.timeout_config = Some(TimeoutConfig { + connect_timeout, + read_timeout: None, + write_timeout: None, + }); + } + self + } + + pub fn connect_timeout(&self) -> Option { + self.timeout_config.as_ref().and_then(|c| c.connect_timeout) + } + + pub fn with_read_timeout(&mut self, read_timeout: Option) -> &mut Self { + if let Some(timeout_config) = &mut self.timeout_config { + timeout_config.read_timeout = read_timeout; + } else { + self.timeout_config = Some(TimeoutConfig { + read_timeout, + connect_timeout: None, + write_timeout: None, + }); + } + self + } + + pub fn read_timeout(&self) -> Option { + self.timeout_config.as_ref().and_then(|c| c.read_timeout) + } + + pub fn with_write_timeout(&mut self, write_timeout: Option) -> &mut Self { + if let Some(timeout_config) = &mut self.timeout_config { + timeout_config.write_timeout = write_timeout; + } else { + self.timeout_config = Some(TimeoutConfig { + write_timeout, + connect_timeout: None, + read_timeout: None, + }); + } + self + } + + pub fn write_timeout(&self) -> Option { + self.timeout_config.as_ref().and_then(|c| c.write_timeout) + } + + pub fn with_max_redirects(&mut self, max_redirects: usize) -> &mut Self { + self.max_redirects = Some(max_redirects); + self + } + + pub fn max_redirects(&self) -> Option { + self.max_redirects + } + + pub fn with_supports_vpnless(&mut self) -> &mut Self { + self.supports_vpnless = true; + self + } + + pub fn with_http2(&mut self, http2: bool) -> &mut Self { + self.http2 = http2; + self + } + + pub fn supports_vpnless(&self) -> bool { + self.supports_vpnless + } + + fn build_inner(&self) -> Arc { + match (self.proxies.as_slice(), &self.timeout_config) { + // Construct x2p unix socket client. + // Note: This ignores (and does not require) the TLS config. + #[cfg(unix)] + (proxies @ [_, ..], Some(timeout_config)) + if let Some(unix_socket) = find_unix_proxy(proxies) => + { + let timeout_connector = + timeout_config.to_connector(hyper_unix_connector::UnixClient); + let proxy_connector = + build_proxy_connector(&[unix_socket.clone()], timeout_connector, None); + Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) + } + #[cfg(unix)] + (proxies @ [_, ..], None) if let Some(unix_socket) = find_unix_proxy(proxies) => { + let proxy_connector = build_proxy_connector( + &[unix_socket.clone()], + hyper_unix_connector::UnixClient, + None, + ); + Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) + } + + // Construct x2p http proxy client. + (proxies @ [_, ..], Some(timeout_config)) if self.supports_vpnless => { + let mut http_connector = HttpConnector::new(); + // When talking to local x2pagent proxy, only http is supported. + http_connector.enforce_http(true); + let timeout_connector = timeout_config.to_connector(http_connector); + let proxy_connector = build_proxy_connector(proxies, timeout_connector, None); + Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) + } + (proxies @ [_, ..], None) if self.supports_vpnless => { + let mut http_connector = HttpConnector::new(); + // When talking to local x2pagent proxy, only http is supported. + http_connector.enforce_http(true); + let proxy_connector = build_proxy_connector(proxies, http_connector, None); + Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) + } + + // Proxied http client with TLS. + (proxies @ [_, ..], Some(timeout_config)) => { + let https_connector = build_https_connector(self.tls_config.clone(), self.http2); + let timeout_connector = timeout_config.to_connector(https_connector); + // Re-use TLS config from https connection for communication with proxies. + let proxy_connector = build_proxy_connector( + proxies, + timeout_connector, + Some(self.tls_config.clone()), + ); + Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) + } + (proxies @ [_, ..], None) => { + let https_connector = build_https_connector(self.tls_config.clone(), self.http2); + let proxy_connector = + build_proxy_connector(proxies, https_connector, Some(self.tls_config.clone())); + Arc::new(hyper::Client::builder().build::<_, Body>(proxy_connector)) + } + + // Client with TLS only. + ([], Some(timeout_config)) => { + let https_connector = build_https_connector(self.tls_config.clone(), self.http2); + let timeout_connector = timeout_config.to_connector(https_connector); + Arc::new(hyper::Client::builder().build::<_, Body>(timeout_connector)) + } + ([], None) => { + let https_connector = build_https_connector(self.tls_config.clone(), self.http2); + Arc::new(hyper::Client::builder().build::<_, Body>(https_connector)) + } + } + } + + pub fn build(&self) -> HttpClient { + HttpClient { + inner: self.build_inner(), + max_redirects: self.max_redirects, + supports_vpnless: self.supports_vpnless, + http2: self.http2, + stats: HttpNetworkStats::new(), + } + } +} + +fn build_https_connector(tls_config: ClientConfig, http2: bool) -> HttpsConnector { + let builder = HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_or_http() + .enable_http1(); + + if http2 { + builder.enable_http2().build() + } else { + builder.build() + } +} + +/// Build a proxy connector using `proxies`, wrapping underlying `connector`, +/// and optionally using `tls_config` to secure communications with the proxy. +/// +/// Note: Not all proxy connectors built by this client need TLS communication +/// with the proxy, e.g. if the proxy is on localhost. +fn build_proxy_connector( + proxies: &[Proxy], + connector: C, + tls_config: Option, +) -> ProxyConnector +where + C: Service + Send, + C::Response: AsyncRead + AsyncWrite + Send + Unpin, + C::Future: Send + 'static, + C::Error: Into>, +{ + // Note: we use the `unsecured()` constructor here, but all that does is + // not load the default TLS config. You can optionally pass your own tls + // config if needed. + let mut proxy_connector = ProxyConnector::unsecured(connector); + proxy_connector.extend_proxies(proxies.iter().cloned()); + if let Some(tls_config) = tls_config { + proxy_connector.set_tls(Some(TlsConnector::from(Arc::new(tls_config)))); + } + proxy_connector +} + +/// Helper function to find any proxies with unix:// as the scheme (which +/// indicates we want to proxy through a unix domain socket). +/// +/// Note: This _does_ compile on non-unix, but is only used at runtime in unix; +/// adding this to silence dead code warnings. +#[cfg(unix)] +fn find_unix_proxy(proxies: &[Proxy]) -> Option<&Proxy> { + proxies + .iter() + .find(|proxy| proxy.uri().scheme_str() == Some("unix")) +} + +#[cfg(test)] +mod tests { + use hyper_proxy::Intercept; + + use super::*; + + #[tokio::test] + async fn test_default_builder() -> anyhow::Result<()> { + let builder = HttpClientBuilder::https_with_system_roots().await?; + + assert_eq!(None, builder.max_redirects); + assert!(builder.proxies.is_empty()); + assert!(!builder.supports_vpnless); + Ok(()) + } + + #[tokio::test] + async fn test_supports_vpnless_set_true() -> anyhow::Result<()> { + let mut builder = HttpClientBuilder::https_with_system_roots().await?; + builder.with_supports_vpnless(); + + assert!(builder.supports_vpnless); + Ok(()) + } + + #[tokio::test] + async fn test_http2_option() -> anyhow::Result<()> { + let mut builder = HttpClientBuilder::https_with_system_roots().await?; + assert!(builder.http2); + builder.with_http2(false); + + assert!(!builder.http2); + Ok(()) + } + + #[tokio::test] + async fn test_with_max_redirects_overrides_default() -> anyhow::Result<()> { + let mut builder = HttpClientBuilder::https_with_system_roots().await?; + builder.with_max_redirects(5); + + assert_eq!(5, builder.max_redirects.unwrap()); + Ok(()) + } + + #[tokio::test] + async fn test_builder_with_proxy_adds_proxy() -> anyhow::Result<()> { + let proxy = Proxy::new(Intercept::All, "http://localhost:12345".try_into()?); + let mut builder = HttpClientBuilder::https_with_system_roots().await?; + builder.with_proxy(proxy); + + assert_eq!(1, builder.proxies.len()); + Ok(()) + } + + #[tokio::test] + async fn test_set_connect_timeout() -> anyhow::Result<()> { + let mut builder = HttpClientBuilder::https_with_system_roots().await?; + builder.with_connect_timeout(Some(Duration::from_millis(1000))); + + assert_eq!( + Some(TimeoutConfig { + connect_timeout: Some(Duration::from_millis(1000)), + read_timeout: None, + write_timeout: None, + }), + builder.timeout_config, + ); + + Ok(()) + } + + #[tokio::test] + async fn test_set_connect_and_read_timeouts() -> anyhow::Result<()> { + let mut builder = HttpClientBuilder::https_with_system_roots().await?; + builder + .with_connect_timeout(Some(Duration::from_millis(1000))) + .with_read_timeout(Some(Duration::from_millis(2000))); + + assert_eq!( + Some(TimeoutConfig { + connect_timeout: Some(Duration::from_millis(1000)), + read_timeout: Some(Duration::from_millis(2000)), + write_timeout: None, + }), + builder.timeout_config, + ); + Ok(()) + } +} diff --git a/app/buck2_http/src/lib.rs b/app/buck2_http/src/lib.rs new file mode 100644 index 0000000000000..1d8a96dd4bf65 --- /dev/null +++ b/app/buck2_http/src/lib.rs @@ -0,0 +1,102 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] +#![feature(if_let_guard)] + +use hyper::StatusCode; + +mod client; +mod proxy; +mod redirect; +pub mod retries; +mod stats; +mod x2p; + +pub use client::to_bytes; +pub use client::HttpClient; +pub use client::HttpClientBuilder; + +fn http_error_label(status: StatusCode) -> &'static str { + if status.is_server_error() { + "Server" + } else if status.is_client_error() { + "Client" + } else { + "Unknown" + } +} + +fn tag_from_status(status: StatusCode) -> Option { + if status.is_server_error() { + Some(buck2_error::ErrorTag::HttpServer) + } else if status == StatusCode::FORBIDDEN || status == StatusCode::NOT_FOUND { + Some(buck2_error::ErrorTag::HttpClient) + } else { + None + } +} + +#[derive(Debug, buck2_error::Error)] +#[buck2(tag = Http)] +pub enum HttpError { + #[error("HTTP URI Error: URI {uri} is malformed: {source:?}")] + InvalidUri { + uri: String, + #[source] + source: http::uri::InvalidUri, + }, + #[error("HTTP URI Error: URI parts {uri} is malformed: {source:?}")] + InvalidUriParts { + uri: String, + #[source] + source: http::uri::InvalidUriParts, + }, + #[error("HTTP: Error building request")] + BuildRequest(#[source] http::Error), + #[error("HTTP: Error sending request to {uri}")] + #[buck2(tier0)] + SendRequest { + uri: String, + #[source] + source: hyper::Error, + }, + #[error("HTTP {} Error ({status}) when querying URI: {uri}. Response text: {text}", http_error_label(*.status))] + #[buck2(tag = tag_from_status(*status))] + Status { + status: StatusCode, + uri: String, + text: String, + }, + #[error("HTTP Error: Exceeded max redirects ({max_redirects}) while fetching URI: {uri}. ")] + TooManyRedirects { uri: String, max_redirects: usize }, + #[error("HTTP: Error mutating request")] + MutateRequest(#[source] anyhow::Error), + #[error("HTTP: Timed out while making request to URI: {uri} after {duration} seconds.")] + #[buck2(tier0)] + Timeout { uri: String, duration: u64 }, + #[error("While making request to {uri} via x2p")] + X2P { + uri: String, + #[source] + source: x2p::X2PAgentError, + }, +} + +impl From for HttpError { + fn from(err: http::Error) -> Self { + Self::BuildRequest(err) + } +} + +impl From for HttpError { + fn from(err: anyhow::Error) -> Self { + Self::MutateRequest(err) + } +} diff --git a/app/buck2_common/src/http/proxy.rs b/app/buck2_http/src/proxy.rs similarity index 100% rename from app/buck2_common/src/http/proxy.rs rename to app/buck2_http/src/proxy.rs diff --git a/app/buck2_common/src/http/redirect.rs b/app/buck2_http/src/redirect.rs similarity index 99% rename from app/buck2_common/src/http/redirect.rs rename to app/buck2_http/src/redirect.rs index c1be73fbd69e3..57c48f8dec726 100644 --- a/app/buck2_common/src/http/redirect.rs +++ b/app/buck2_http/src/redirect.rs @@ -18,7 +18,7 @@ use hyper::Request; use hyper::Response; use hyper::StatusCode; -use crate::http::HttpError; +use crate::HttpError; trait UriWithRedirect { fn with_redirect(&self, location: &Uri) -> anyhow::Result; diff --git a/app/buck2_common/src/http/retries.rs b/app/buck2_http/src/retries.rs similarity index 80% rename from app/buck2_common/src/http/retries.rs rename to app/buck2_http/src/retries.rs index 9aa300c2a5a7e..15c86fd279022 100644 --- a/app/buck2_common/src/http/retries.rs +++ b/app/buck2_http/src/retries.rs @@ -11,12 +11,11 @@ use std::time::Duration; use futures::future::Future; use http::StatusCode; -use thiserror::Error; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum HttpError { #[error(transparent)] - Client(#[from] crate::http::HttpError), + Client(crate::HttpError), #[error("HTTP Transfer Error when querying URL: {}. Failed after {} bytes", .url, .received)] Transfer { @@ -27,15 +26,21 @@ pub enum HttpError { }, } +impl From for HttpError { + fn from(value: crate::HttpError) -> Self { + Self::Client(value) + } +} + impl HttpError { fn is_retryable(&self) -> bool { match self { Self::Client(client_error) => match client_error { - crate::http::HttpError::Status { status, .. } => { + crate::HttpError::Status { status, .. } => { status.is_server_error() || *status == StatusCode::TOO_MANY_REQUESTS } - crate::http::HttpError::Timeout { .. } => true, - crate::http::HttpError::SendRequest { .. } => true, + crate::HttpError::Timeout { .. } => true, + crate::HttpError::SendRequest { .. } => true, _ => false, }, Self::Transfer { source, .. } => !source.is_connect(), @@ -49,7 +54,7 @@ pub trait AsHttpError { pub async fn http_retry(exec: Exec, mut intervals: Vec) -> Result where Exec: Fn() -> F, - E: AsHttpError + std::fmt::Display, + E: std::error::Error + AsHttpError + std::fmt::Display + Send + Sync + 'static, F: Future>, { intervals.insert(0, Duration::from_secs(0)); @@ -58,24 +63,26 @@ where while let Some(duration) = backoff.next() { tokio::time::sleep(duration).await; - let res = exec().await; - - let http_error = res.as_ref().err().and_then(|err| err.as_http_error()); + let err = match exec().await { + Ok(val) => return Ok(val), + Err(err) => err, + }; - if let Some(http_error) = http_error { + if let Some(http_error) = err.as_http_error() { if http_error.is_retryable() { if let Some(b) = backoff.peek() { tracing::warn!( "Retrying a HTTP error after {} seconds: {:#}", b.as_secs(), - http_error + // Print as a buck2_error to make sure we get the source + buck2_error::Error::from(err) ); continue; } } } - return res; + return Err(err); } unreachable!("The loop above will exit before we get to the end") @@ -104,10 +111,16 @@ mod tests { } } - #[derive(Debug, Error)] + #[derive(Debug, buck2_error::Error)] enum HttpTestError { #[error("Error in test")] - Client(#[from] HttpError), + Client(#[source] HttpError), + } + + impl From for HttpTestError { + fn from(value: HttpError) -> Self { + Self::Client(value) + } } impl AsHttpError for HttpTestError { @@ -131,7 +144,7 @@ mod tests { } fn test_error(status: StatusCode) -> HttpTestError { - HttpTestError::Client(HttpError::Client(crate::http::HttpError::Status { + HttpTestError::Client(HttpError::Client(crate::HttpError::Status { status, uri: "something".to_owned(), text: "something else".to_owned(), diff --git a/app/buck2_common/src/http/stats.rs b/app/buck2_http/src/stats.rs similarity index 100% rename from app/buck2_common/src/http/stats.rs rename to app/buck2_http/src/stats.rs diff --git a/app/buck2_common/src/http/x2p.rs b/app/buck2_http/src/x2p.rs similarity index 90% rename from app/buck2_common/src/http/x2p.rs rename to app/buck2_http/src/x2p.rs index d354af4adf199..fdd13299c4391 100644 --- a/app/buck2_common/src/http/x2p.rs +++ b/app/buck2_http/src/x2p.rs @@ -60,22 +60,15 @@ pub fn find_proxy() -> anyhow::Result> { #[cfg(not(fbcode_build))] pub fn find_proxy() -> anyhow::Result> { - anyhow::bail!("VPNless development not supported for non-internal fbcode builds"); -} - -/// Whether the machine buck is running on supports vpnless operation. -pub fn supports_vpnless() -> bool { - #[cfg(fbcode_build)] - return cpe::x2p::supports_vpnless(); - - #[cfg(not(fbcode_build))] - return false; + Err(anyhow::anyhow!( + "VPNless development not supported for non-internal fbcode builds" + )) } /// Collection of different kinds of errors we can see from x2pagent. Typically /// denotes a URL is not authorized for vpnless access and/or using the wrong, /// non-vpnless url. -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] pub enum X2PAgentError { #[error("Host `{host}` is not authorized for vpnless access: {message}")] ForbiddenHost { host: String, message: String }, @@ -84,7 +77,13 @@ pub enum X2PAgentError { #[error("Host `{host}` and path `{path}` is not authorized on vpnless")] AccessDenied { host: String, path: String }, #[error(transparent)] - Error(#[from] anyhow::Error), + Error(anyhow::Error), +} + +impl From for X2PAgentError { + fn from(e: anyhow::Error) -> Self { + Self::Error(e) + } } impl X2PAgentError { diff --git a/app/buck2_install_proto/BUCK b/app/buck2_install_proto/BUCK index 9c78a92408323..52d3002435f41 100644 --- a/app/buck2_install_proto/BUCK +++ b/app/buck2_install_proto/BUCK @@ -1,7 +1,5 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") load("@fbcode//grpc_fb/codegen:buck_macros.bzl", "grpc_library") -load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -9,26 +7,12 @@ rust_protobuf_library( name = "buck2_install_proto", srcs = glob(["src/**/*.rs"]), build_script = "build.rs", - doctests = False, # FIXME protos = ["install.proto"], deps = [ "fbsource//third-party/rust:tonic", ], ) -python_binary( - # @autodeps-skip - name = "installer", - srcs = [ - "main.py", - ], - base_module = "", - main_module = "main", - deps = [ - ":install-py", - ], -) - grpc_library( name = "install", srcs = [ diff --git a/app/buck2_install_proto/Cargo.toml b/app/buck2_install_proto/Cargo.toml index 674e527513886..880422c4a4731 100644 --- a/app/buck2_install_proto/Cargo.toml +++ b/app/buck2_install_proto/Cargo.toml @@ -2,11 +2,12 @@ name = "buck2_install_proto" edition = "2021" +license = { workspace = true } +repository = { workspace = true } version = "0.1.0" [dependencies] prost = { workspace = true } -prost-types = { workspace = true } tonic = { workspace = true } [build-dependencies] diff --git a/app/buck2_install_proto/install.bzl b/app/buck2_install_proto/install.bzl deleted file mode 100644 index d0b5a97840737..0000000000000 --- a/app/buck2_install_proto/install.bzl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -def _installer_impl(ctx: AnalysisContext) -> list[Provider]: - installer = ctx.attrs.installer - return [DefaultInfo(), InstallInfo(installer = installer, files = ctx.attrs.files)] - -installer = rule(impl = _installer_impl, attrs = { - "files": attrs.dict(key = attrs.string(), value = attrs.source(), default = {}), - "installer": attrs.label(), -}) diff --git a/app/buck2_install_proto/install.proto b/app/buck2_install_proto/install.proto index 12d18362c9e10..84e5331e4d758 100644 --- a/app/buck2_install_proto/install.proto +++ b/app/buck2_install_proto/install.proto @@ -40,6 +40,22 @@ message FileResponse { string name = 2; string path = 3; ErrorDetail error_detail = 4; + // Connected device metadata to be logged to scuba. + // Should be sent once per device per install command, + // not per FileReadyRequest. + // Returned here in case the installer needs information + // from received files in order to connect. + repeated DeviceMetadata device_metadata = 5; +} + +// Metadata about a connected device for logging. +// ex. device name, os version. +message DeviceMetadata { + message Entry { + string key = 1; + string value = 2; + } + repeated Entry entry = 1; } message ErrorDetail { diff --git a/app/buck2_install_proto/main.py b/app/buck2_install_proto/main.py deleted file mode 100644 index d8224a57fb337..0000000000000 --- a/app/buck2_install_proto/main.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -import argparse -import os -import signal -import subprocess -import sys -import threading -from concurrent.futures import ThreadPoolExecutor -from pathlib import Path -from typing import Optional - -import grpc -from buck2.app.buck2_install_proto import install_pb2, install_pb2_grpc - - -class RsyncInstallerService(install_pb2_grpc.InstallerServicer): - def __init__(self, stop_event, argsparse, *args, **kwargs): - self.args = argsparse - self.stop_event = stop_event - if argsparse.install_location == "": - self.dst = argsparse.dst - else: - self.dst = f"{argsparse.install_location}:{argsparse.dst}" - - def Install(self, request, _context): - install_id = request.install_id - files = request.files - - print( - f"Received request with install info: {install_id=:} and {len(files)} files" - ) - - install_response = install_pb2.InstallResponse() - install_response.install_id = install_id - return install_response - - def FileReady(self, request, _context): - (_out, stderr, code) = self.rsync_install( - request.path, os.path.join(self.dst, request.name) - ) - response = { - "install_id": request.install_id, - "name": f"{request.name}", - "path": request.path, - } - - if code != 0: - error_detail = install_pb2.ErrorDetail() - error_detail.message = stderr - response["error_detail"] = error_detail - - file_response = install_pb2.FileResponse(**response) - return file_response - - def ShutdownServer(self, _request, _context): - shutdown(self.stop_event) - response = install_pb2.ShutdownResponse() - return response - - def rsync_install(self, src, dst): - if not (dst_parent := Path(dst).parent).exists(): - dst_parent.mkdir(parents=True, exist_ok=True) - cmd = [ - "rsync", - "-aL", - str(src), - str(dst), - ] - cp = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8" - ) - stdout, stderr = cp.communicate() - code = cp.returncode - return (stdout, stderr, code) - - -def try_command( - cmd: [str], - err_msg: str, - cwd: Optional = None, - env: Optional = None, - shell: bool = False, -): - try: - output = subprocess.check_output(cmd, cwd=cwd, env=env, shell=shell) - return output - except Exception as e: - print(f"Failed step {err_msg} with {str(e)}") - raise e - - -def shutdown(stop_event): - stop_event.set() - - -def serve(args): - print(f"Starting installer server installing to {args.dst}") - server = grpc.server( - thread_pool=ThreadPoolExecutor(max_workers=50), - options=[("grpc.max_receive_message_length", 500 * 1024 * 1024)], - ) - stop_event = threading.Event() - install_pb2_grpc.add_InstallerServicer_to_server( - RsyncInstallerService(stop_event, args), server - ) - ## https://grpc.github.io/grpc/python/grpc.html - listen_addr = server.add_insecure_port("[::]:" + args.tcp_port) - print(f"Started server on {listen_addr} w/ pid {os.getpid()}") - server.start() - signal.signal(signal.SIGINT, lambda x, y: shutdown(stop_event)) - try: - stop_event.wait() - print("Stopped RPC server, Waiting for RPCs to complete...") - server.stop(1).wait() - finally: - print("Exiting installer") - - -def parse_args(args=None): - parser = argparse.ArgumentParser(description="Parse args for install location") - parser.add_argument( - "--install-location", - help="Defines install hostname (I.E. devserver)", - default="", - ) - parser.add_argument( - "--dst", - type=str, - help="destination rsync target folder", - default="/tmp/buck2install/", - ) - parser.add_argument( - "--tcp-port", - type=str, - help="tcp port for installer to connect to", - required=True, - ) - # no need to parse --tcp-port and other not related params - args, _ = parser.parse_known_args(args) - return args - - -if __name__ == "__main__": - args = parse_args(sys.argv[1:]) - serve(args) diff --git a/app/buck2_install_proto/src/lib.rs b/app/buck2_install_proto/src/lib.rs index b215116d40a99..a0b80705bbff3 100644 --- a/app/buck2_install_proto/src/lib.rs +++ b/app/buck2_install_proto/src/lib.rs @@ -7,4 +7,6 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + tonic::include_proto!("install"); diff --git a/app/buck2_interpreter/BUCK b/app/buck2_interpreter/BUCK index 53b6bc9738af0..8a10aec2c165e 100644 --- a/app/buck2_interpreter/BUCK +++ b/app/buck2_interpreter/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -20,17 +19,18 @@ rust_library( "fbsource//third-party/rust:plist", "fbsource//third-party/rust:regex", "fbsource//third-party/rust:serde", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:triomphe", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_map:starlark_map", ], diff --git a/app/buck2_interpreter/Cargo.toml b/app/buck2_interpreter/Cargo.toml index 22ccc0901e419..febd074c17479 100644 --- a/app/buck2_interpreter/Cargo.toml +++ b/app/buck2_interpreter/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Interprets build files and handles loading and interpreting extension files, and communicating this information to DICE" +edition = "2021" +license = { workspace = true } name = "buck2_interpreter" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Interprets build files and handles loading and interpreting extension files, and communicating this information to DICE" [dependencies] anyhow = { workspace = true } @@ -11,30 +13,25 @@ derivative = { workspace = true } derive_more = { workspace = true } either = { workspace = true } fancy-regex = { workspace = true } +plist = { workspace = true } regex = { workspace = true } serde = { workspace = true } -thiserror = { workspace = true } -plist = { workspace = true } tokio = { workspace = true } +triomphe = { workspace = true } allocative = { workspace = true } dice = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -more_futures = { path = "../../shed/more_futures" } -starlark_map = { workspace = true } +gazebo = { workspace = true } starlark = { workspace = true } +starlark_map = { workspace = true } buck2_common = { workspace = true } -buck2_events = { workspace = true } buck2_core = { workspace = true } +buck2_error = { workspace = true } +buck2_events = { workspace = true } +buck2_futures = { workspace = true } buck2_util = { workspace = true } -[features] -# @oss-disable: default = ["gazebo_lint"] - [dev-dependencies] tempfile = { workspace = true } diff --git a/app/buck2_interpreter/src/anon_targets.rs b/app/buck2_interpreter/src/anon_targets.rs deleted file mode 100644 index 138a668efe197..0000000000000 --- a/app/buck2_interpreter/src/anon_targets.rs +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_util::late_binding::LateBinding; -use starlark::environment::GlobalsBuilder; - -/// Globals defined in `buck2_anon_targets` crate. -pub static REGISTER_ANON_TARGETS: LateBinding = - LateBinding::new("REGISTER_ANON_TARGETS"); diff --git a/app/buck2_interpreter/src/build_context.rs b/app/buck2_interpreter/src/build_context.rs index 8cfa8927a6498..f505aed901645 100644 --- a/app/buck2_interpreter/src/build_context.rs +++ b/app/buck2_interpreter/src/build_context.rs @@ -13,11 +13,11 @@ use starlark::eval::Evaluator; use crate::paths::path::StarlarkPath; pub static STARLARK_PATH_FROM_BUILD_CONTEXT: LateBinding< - for<'a> fn(&Evaluator<'_, 'a>) -> anyhow::Result>, + for<'a> fn(&Evaluator<'_, 'a, '_>) -> anyhow::Result>, > = LateBinding::new("STARLARK_PATH_FROM_BUILD_CONTEXT"); pub fn starlark_path_from_build_context<'a>( - eval: &Evaluator<'_, 'a>, + eval: &Evaluator<'_, 'a, '_>, ) -> anyhow::Result> { (STARLARK_PATH_FROM_BUILD_CONTEXT.get()?)(eval) } diff --git a/app/buck2_interpreter/src/bxl.rs b/app/buck2_interpreter/src/bxl.rs deleted file mode 100644 index 51f5802e1edac..0000000000000 --- a/app/buck2_interpreter/src/bxl.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_util::late_binding::LateBinding; -use starlark::environment::GlobalsBuilder; - -/// Globals defined in `buck2_bxl` crate, -/// which are used to create the context for `.bxl` evaluation. -pub static BXL_SPECIFIC_GLOBALS: LateBinding = - LateBinding::new("BXL_SPECIFIC_GLOBALS"); diff --git a/app/buck2_interpreter/src/cfg_constructor.rs b/app/buck2_interpreter/src/cfg_constructor.rs deleted file mode 100644 index c9604a478217c..0000000000000 --- a/app/buck2_interpreter/src/cfg_constructor.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_util::late_binding::LateBinding; -use starlark::environment::GlobalsBuilder; - -pub static REGISTER_SET_CFG_CONSTRUCTOR: LateBinding = - LateBinding::new("REGISTER_SET_CFG_CONSTRUCTOR"); diff --git a/app/buck2_interpreter/src/coerce.rs b/app/buck2_interpreter/src/coerce.rs index e38c1d8920157..36dbdf0aea6fd 100644 --- a/app/buck2_interpreter/src/coerce.rs +++ b/app/buck2_interpreter/src/coerce.rs @@ -7,10 +7,10 @@ * of this source tree. */ -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_util::late_binding::LateBinding; use starlark::eval::Evaluator; -pub static COERCE_TARGET_LABEL: LateBinding< +pub static COERCE_TARGET_LABEL_FOR_BZL: LateBinding< fn(&mut Evaluator, &str) -> anyhow::Result, > = LateBinding::new("COERCE_TARGET_LABEL"); diff --git a/app/buck2_interpreter/src/dice.rs b/app/buck2_interpreter/src/dice.rs new file mode 100644 index 0000000000000..950038fd501ef --- /dev/null +++ b/app/buck2_interpreter/src/dice.rs @@ -0,0 +1,21 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! The dice module contains the interpreter's integration with dice. This +//! module contains the extension traits that we implement for `Arc` +//! (the implementations of the traits are in the submodules). +//! +//! Several of these extension traits provide implementations of our delegate/DI +//! traits that are themselves build on dice (ex DiceInterpreterFileOps +//! implements InterpreterFileOps by basically putting DefaultInterpreterFileOps +//! onto the dice graph). + +pub mod starlark_debug; +pub mod starlark_provider; +pub mod starlark_types; diff --git a/app/buck2_interpreter/src/dice/mod.rs b/app/buck2_interpreter/src/dice/mod.rs deleted file mode 100644 index 741bc93d62faa..0000000000000 --- a/app/buck2_interpreter/src/dice/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! The dice module contains the interpreter's integration with dice. This -//! module contains the extension traits that we implement for `Arc` -//! (the implementations of the traits are in the submodules). -//! -//! Several of these extension traits provide implementations of our delegate/DI -//! traits that are themselves build on dice (ex DiceInterpreterFileOps -//! implements InterpreterFileOps by basically putting DefaultInterpreterFileOps -//! onto the dice graph). - -pub mod starlark_debug; -pub mod starlark_profiler; -pub mod starlark_provider; -pub mod starlark_types; diff --git a/app/buck2_interpreter/src/dice/starlark_debug.rs b/app/buck2_interpreter/src/dice/starlark_debug.rs index 7b2efa5045380..af499c275ed30 100644 --- a/app/buck2_interpreter/src/dice/starlark_debug.rs +++ b/app/buck2_interpreter/src/dice/starlark_debug.rs @@ -20,7 +20,7 @@ pub trait SetStarlarkDebugger { fn set_starlark_debugger_handle(&mut self, hook: Option>); } -impl HasStarlarkDebugger for DiceComputations { +impl HasStarlarkDebugger for DiceComputations<'_> { fn get_starlark_debugger_handle(&self) -> Option<&dyn StarlarkDebuggerHandle> { self.per_transaction_data() .data diff --git a/app/buck2_interpreter/src/dice/starlark_profiler.rs b/app/buck2_interpreter/src/dice/starlark_profiler.rs deleted file mode 100644 index a8bb50f3ba59b..0000000000000 --- a/app/buck2_interpreter/src/dice/starlark_profiler.rs +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocative::Allocative; -use async_trait::async_trait; -use buck2_common::result::SharedResult; -use dice::DiceComputations; -use dice::DiceTransactionUpdater; -use dice::InjectedKey; -use dice::Key; -use dupe::Dupe; -use more_futures::cancellation::CancellationContext; -use starlark::eval::ProfileMode; - -use crate::starlark_profiler::StarlarkProfileModeOrInstrumentation; - -#[derive(Debug, thiserror::Error)] -enum StarlarkProfilerError { - #[error("profiler is not configured to profile last element (internal error)")] - ProfilerConfigurationNotLast, -} - -/// Global profiling configuration. -#[derive(PartialEq, Eq, Clone, Dupe, Debug, Allocative)] -#[derive(Default)] -pub enum StarlarkProfilerConfiguration { - /// No profiling. - #[default] - None, - /// Profile loading of one `BUCK`, everything else is instrumented. - ProfileLastLoading(ProfileMode), - /// Profile analysis of the last target, everything else is instrumented. - ProfileLastAnalysis(ProfileMode), - /// Profile analysis targets recursively. - ProfileAnalysisRecursively(ProfileMode), - /// Profile BXL - ProfileBxl(ProfileMode), -} - -impl StarlarkProfilerConfiguration { - pub fn profile_last_bxl(&self) -> anyhow::Result<&ProfileMode> { - match self { - StarlarkProfilerConfiguration::None - | StarlarkProfilerConfiguration::ProfileLastAnalysis(_) - | StarlarkProfilerConfiguration::ProfileAnalysisRecursively(_) - | StarlarkProfilerConfiguration::ProfileLastLoading(_) => { - Err(StarlarkProfilerError::ProfilerConfigurationNotLast.into()) - } - StarlarkProfilerConfiguration::ProfileBxl(profile_mode) => Ok(profile_mode), - } - } - - pub fn profile_last_loading(&self) -> anyhow::Result<&ProfileMode> { - match self { - StarlarkProfilerConfiguration::None - | StarlarkProfilerConfiguration::ProfileLastAnalysis(_) - | StarlarkProfilerConfiguration::ProfileAnalysisRecursively(_) - | StarlarkProfilerConfiguration::ProfileBxl(_) => { - Err(StarlarkProfilerError::ProfilerConfigurationNotLast.into()) - } - StarlarkProfilerConfiguration::ProfileLastLoading(profile_mode) => Ok(profile_mode), - } - } - - pub fn profile_last_analysis(&self) -> anyhow::Result<&ProfileMode> { - match self { - StarlarkProfilerConfiguration::None - | StarlarkProfilerConfiguration::ProfileLastLoading(_) - | StarlarkProfilerConfiguration::ProfileBxl(_) => { - Err(StarlarkProfilerError::ProfilerConfigurationNotLast.into()) - } - StarlarkProfilerConfiguration::ProfileLastAnalysis(profile_mode) - | StarlarkProfilerConfiguration::ProfileAnalysisRecursively(profile_mode) => { - Ok(profile_mode) - } - } - } - - /// Profile mode for intermediate target analysis. - pub fn profile_mode_for_intermediate_analysis(&self) -> StarlarkProfileModeOrInstrumentation { - match self { - StarlarkProfilerConfiguration::None - | StarlarkProfilerConfiguration::ProfileLastLoading(_) - | StarlarkProfilerConfiguration::ProfileLastAnalysis(_) - | StarlarkProfilerConfiguration::ProfileBxl(_) => { - StarlarkProfileModeOrInstrumentation::None - } - StarlarkProfilerConfiguration::ProfileAnalysisRecursively(profile_mode) => { - StarlarkProfileModeOrInstrumentation::Profile(profile_mode.dupe()) - } - } - } -} - -#[derive( - Debug, - derive_more::Display, - Copy, - Clone, - Dupe, - Eq, - PartialEq, - Hash, - Allocative -)] -#[display(fmt = "{:?}", self)] -struct StarlarkProfilerConfigurationKey; - -#[derive( - Debug, - derive_more::Display, - Copy, - Clone, - Dupe, - Eq, - PartialEq, - Hash, - Allocative -)] -#[display(fmt = "{:?}", self)] -pub struct StarlarkProfileModeForIntermediateAnalysisKey; - -#[async_trait] -impl Key for StarlarkProfilerConfigurationKey { - type Value = SharedResult; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - let configuration = get_starlark_profiler_instrumentation_override(ctx).await?; - Ok(configuration) - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } -} - -#[async_trait] -impl Key for StarlarkProfileModeForIntermediateAnalysisKey { - type Value = SharedResult; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> SharedResult { - let configuration = get_starlark_profiler_configuration(ctx).await?; - Ok(configuration.profile_mode_for_intermediate_analysis()) - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } -} - -/// Global Starlark compiler instrumentation level. -/// -/// We profile only leaf computations (`BUCK` files or analysis), -/// and this key defines instrumentation of all the Starlark files, -/// regardless of whether profiled entity depends on them or not. -/// It's easier to implement with single global key, -/// the downside is we invalidate parse results when we switch -/// between normal operation/profiling. -#[derive( - Debug, - derive_more::Display, - Copy, - Clone, - Dupe, - Eq, - PartialEq, - Hash, - Allocative -)] -#[display(fmt = "{:?}", self)] -pub struct StarlarkProfilerInstrumentationOverrideKey; - -impl InjectedKey for StarlarkProfilerInstrumentationOverrideKey { - type Value = StarlarkProfilerConfiguration; - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } -} - -#[async_trait] -pub trait SetStarlarkProfilerInstrumentation { - fn set_starlark_profiler_instrumentation_override( - &mut self, - instrumentation: StarlarkProfilerConfiguration, - ) -> anyhow::Result<()>; -} - -#[async_trait] -pub trait GetStarlarkProfilerInstrumentation { - /// Profile mode for non-final targe analysis. - async fn get_profile_mode_for_intermediate_analysis( - &self, - ) -> anyhow::Result; -} - -#[async_trait] -impl SetStarlarkProfilerInstrumentation for DiceTransactionUpdater { - fn set_starlark_profiler_instrumentation_override( - &mut self, - instrumentation: StarlarkProfilerConfiguration, - ) -> anyhow::Result<()> { - Ok(self.changed_to([(StarlarkProfilerInstrumentationOverrideKey, instrumentation)])?) - } -} - -async fn get_starlark_profiler_instrumentation_override( - ctx: &DiceComputations, -) -> anyhow::Result { - Ok(ctx - .compute(&StarlarkProfilerInstrumentationOverrideKey) - .await?) -} - -/// Global profiler configuration. -/// -/// This function is not exposed outside, -/// because accessing full configuration may invalidate too much. -async fn get_starlark_profiler_configuration( - ctx: &DiceComputations, -) -> anyhow::Result { - Ok(ctx.compute(&StarlarkProfilerConfigurationKey).await??) -} - -#[async_trait] -impl GetStarlarkProfilerInstrumentation for DiceComputations { - async fn get_profile_mode_for_intermediate_analysis( - &self, - ) -> anyhow::Result { - Ok(self - .compute(&StarlarkProfileModeForIntermediateAnalysisKey) - .await??) - } -} diff --git a/app/buck2_interpreter/src/dice/starlark_provider.rs b/app/buck2_interpreter/src/dice/starlark_provider.rs index 16e198dfcd450..b091735741152 100644 --- a/app/buck2_interpreter/src/dice/starlark_provider.rs +++ b/app/buck2_interpreter/src/dice/starlark_provider.rs @@ -7,8 +7,10 @@ * of this source tree. */ -use std::ops::Deref; +use std::ops::DerefMut; +use buck2_common::legacy_configs::dice::HasLegacyConfigs; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; use dice::DiceComputations; use starlark::environment::FrozenModule; use starlark::environment::Module; @@ -17,7 +19,7 @@ use starlark::eval::Evaluator; use crate::dice::starlark_debug::HasStarlarkDebugger; use crate::factory::StarlarkEvaluatorProvider; use crate::starlark_debug::StarlarkDebugController; -use crate::starlark_profiler::StarlarkProfilerOrInstrumentation; +use crate::starlark_profiler::profiler::StarlarkProfilerOpt; /// This constructs an appropriate StarlarkEvaluatorProvider to set up /// profiling/instrumentation/debugging in a starlark Evaluator for buck. @@ -30,12 +32,22 @@ use crate::starlark_profiler::StarlarkProfilerOrInstrumentation; /// /// The provided closure will be invoked and passed an appropriate /// StarlarkEvaluatorProvider. -pub async fn with_starlark_eval_provider, R>( - ctx: D, - profiler_instrumentation: &mut StarlarkProfilerOrInstrumentation<'_>, +pub async fn with_starlark_eval_provider<'a, D: DerefMut>, R>( + mut ctx: D, + profiler_instrumentation: &mut StarlarkProfilerOpt<'_>, description: String, closure: impl FnOnce(&mut dyn StarlarkEvaluatorProvider, D) -> anyhow::Result, ) -> anyhow::Result { + let root_buckconfig = ctx.get_legacy_root_config_on_dice().await?; + + let starlark_max_callstack_size = + root_buckconfig + .view(&mut ctx) + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "starlark_max_callstack_size", + })?; + let debugger_handle = ctx.get_starlark_debugger_handle(); let debugger = match debugger_handle { Some(v) => Some(v.start_eval(&description).await?), @@ -43,18 +55,26 @@ pub async fn with_starlark_eval_provider, R> }; struct EvalProvider<'a, 'b> { - profiler: &'a mut StarlarkProfilerOrInstrumentation<'b>, + profiler: &'a mut StarlarkProfilerOpt<'b>, debugger: Option>, + starlark_max_callstack_size: Option, } impl StarlarkEvaluatorProvider for EvalProvider<'_, '_> { - fn make<'v, 'a>(&mut self, module: &'v Module) -> anyhow::Result> { + fn make<'v, 'a, 'e>( + &mut self, + module: &'v Module, + ) -> anyhow::Result<(Evaluator<'v, 'a, 'e>, bool)> { let mut eval = Evaluator::new(module); - self.profiler.initialize(&mut eval)?; + if let Some(stack_size) = self.starlark_max_callstack_size { + eval.set_max_callstack_size(stack_size)?; + } + + let is_profiling_enabled = self.profiler.initialize(&mut eval)?; if let Some(v) = &mut self.debugger { v.initialize(&mut eval)?; } - Ok(eval) + Ok((eval, is_profiling_enabled)) } fn evaluation_complete(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { @@ -70,6 +90,7 @@ pub async fn with_starlark_eval_provider, R> let mut provider = EvalProvider { profiler: profiler_instrumentation, debugger, + starlark_max_callstack_size, }; // If we're debugging, we need to move this to a tokio blocking task. diff --git a/app/buck2_interpreter/src/dice/starlark_types.rs b/app/buck2_interpreter/src/dice/starlark_types.rs index 82420665e09bc..b05ed32c3b69f 100644 --- a/app/buck2_interpreter/src/dice/starlark_types.rs +++ b/app/buck2_interpreter/src/dice/starlark_types.rs @@ -31,7 +31,7 @@ struct StarlarkTypesValue { Hash, Allocative )] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct StarlarkTypesKey; impl InjectedKey for StarlarkTypesKey { @@ -68,20 +68,20 @@ impl SetStarlarkTypes for DiceTransactionUpdater { #[async_trait] pub trait GetStarlarkTypes { - async fn get_disable_starlark_types(&self) -> anyhow::Result; - async fn get_unstable_typecheck(&self) -> anyhow::Result; + async fn get_disable_starlark_types(&mut self) -> anyhow::Result; + async fn get_unstable_typecheck(&mut self) -> anyhow::Result; } #[async_trait] -impl GetStarlarkTypes for DiceComputations { - async fn get_disable_starlark_types(&self) -> anyhow::Result { +impl GetStarlarkTypes for DiceComputations<'_> { + async fn get_disable_starlark_types(&mut self) -> anyhow::Result { Ok(self .compute(&StarlarkTypesKey) .await? .disable_starlark_types) } - async fn get_unstable_typecheck(&self) -> anyhow::Result { + async fn get_unstable_typecheck(&mut self) -> anyhow::Result { Ok(self.compute(&StarlarkTypesKey).await?.unstable_typecheck) } } diff --git a/app/buck2_interpreter/src/downstream_crate_starlark_defs.rs b/app/buck2_interpreter/src/downstream_crate_starlark_defs.rs new file mode 100644 index 0000000000000..75ae6c8137309 --- /dev/null +++ b/app/buck2_interpreter/src/downstream_crate_starlark_defs.rs @@ -0,0 +1,40 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_util::late_binding::LateBinding; +use starlark::environment::GlobalsBuilder; + +/// Globals defined in `buck2_build_api`. +pub static REGISTER_BUCK2_BUILD_API_GLOBALS: LateBinding = + LateBinding::new("REGISTER_BUCK2_BUILD_API_GLOBALS"); + +/// `__internal__`s defined in `buck2_build_api`. +pub static REGISTER_BUCK2_BUILD_API_INTERNALS: LateBinding = + LateBinding::new("REGISTER_BUCK2_BUILD_API_INTERNALS"); + +/// Globals defined in `buck2_transitions` crate. +pub static REGISTER_BUCK2_TRANSITION_GLOBALS: LateBinding = + LateBinding::new("REGISTER_BUCK2_TRANSITION_GLOBALS"); + +/// Globals defined in `buck2_action_impl` crate. +pub static REGISTER_BUCK2_ACTION_IMPL_GLOBALS: LateBinding = + LateBinding::new("REGISTER_BUCK2_ACTION_IMPL_GLOBALS"); + +/// Globals defined in `buck2_anon_targets` crate. +pub static REGISTER_BUCK2_ANON_TARGETS_GLOBALS: LateBinding = + LateBinding::new("REGISTER_BUCK2_ANON_TARGETS_GLOBALS"); + +/// Globals defined in `buck2_bxl` crate, +/// which are used to create the context for `.bxl` evaluation. +pub static REGISTER_BUCK2_BXL_GLOBALS: LateBinding = + LateBinding::new("REGISTER_BUCK2_BXL_GLOBALS"); + +/// Globals defined in `buck2_cfg_constructor` crate. +pub static REGISTER_BUCK2_CFG_CONSTRUCTOR_GLOBALS: LateBinding = + LateBinding::new("REGISTER_BUCK2_CFG_CONSTRUCTOR_GLOBALS"); diff --git a/app/buck2_interpreter/src/extra/mod.rs b/app/buck2_interpreter/src/extra.rs similarity index 100% rename from app/buck2_interpreter/src/extra/mod.rs rename to app/buck2_interpreter/src/extra.rs diff --git a/app/buck2_interpreter/src/extra/xcode.rs b/app/buck2_interpreter/src/extra/xcode.rs index a51fe4808a97f..961b5160cc888 100644 --- a/app/buck2_interpreter/src/extra/xcode.rs +++ b/app/buck2_interpreter/src/extra/xcode.rs @@ -9,17 +9,16 @@ use std::io; use std::path::Path; -use std::path::PathBuf; use allocative::Allocative; use anyhow::Context; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::abs_path::AbsPath; use regex::Regex; use serde::Deserialize; -use thiserror::Error; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum XcodeVersionError { #[error("XCode select symlink `{}` resolved to path without parent: `{}`", XCODE_SELECT_SYMLINK, _0.display())] XcodeSelectSymlinkResolvedToPathWithoutParent(AbsNormPathBuf), @@ -58,7 +57,7 @@ impl XcodeVersionInfo { // Construct from version.plist in root of Xcode install dir. pub fn new() -> anyhow::Result> { let resolved_xcode_path = - fs_util::canonicalize_if_exists(PathBuf::from(XCODE_SELECT_SYMLINK)) + fs_util::canonicalize_if_exists(AbsPath::new(XCODE_SELECT_SYMLINK)?) .context("resolve selected xcode link")?; let resolved_xcode_path = match resolved_xcode_path { Some(p) => p, @@ -139,6 +138,7 @@ impl XcodeVersionInfo { #[cfg(test)] mod tests { use std::fs; + use std::path::PathBuf; use super::*; diff --git a/app/buck2_interpreter/src/factory.rs b/app/buck2_interpreter/src/factory.rs index 5342edefa6d82..f9b72519bda85 100644 --- a/app/buck2_interpreter/src/factory.rs +++ b/app/buck2_interpreter/src/factory.rs @@ -14,8 +14,11 @@ use starlark::eval::Evaluator; /// Provides a starlark Evaluator. pub trait StarlarkEvaluatorProvider { /// Creates an Evaluator for a module. The evaluator will be configured for instrumenting/profiling/debugging - /// as appropriate. - fn make<'v, 'a>(&mut self, module: &'v Module) -> anyhow::Result>; + /// as appropriate. Also returns whether profiling is enabled. + fn make<'v, 'a, 'e>( + &mut self, + module: &'v Module, + ) -> anyhow::Result<(Evaluator<'v, 'a, 'e>, bool)>; fn evaluation_complete(&mut self, eval: &mut Evaluator) -> anyhow::Result<()>; @@ -26,8 +29,11 @@ pub trait StarlarkEvaluatorProvider { pub struct StarlarkPassthroughProvider; impl StarlarkEvaluatorProvider for StarlarkPassthroughProvider { - fn make<'v, 'a>(&mut self, module: &'v Module) -> anyhow::Result> { - Ok(Evaluator::new(module)) + fn make<'v, 'a, 'e>( + &mut self, + module: &'v Module, + ) -> anyhow::Result<(Evaluator<'v, 'a, 'e>, bool)> { + Ok((Evaluator::new(module), true)) } fn evaluation_complete(&mut self, _eval: &mut Evaluator) -> anyhow::Result<()> { diff --git a/app/buck2_interpreter/src/file_loader.rs b/app/buck2_interpreter/src/file_loader.rs index b8ca807547ab0..885dfd7944352 100644 --- a/app/buck2_interpreter/src/file_loader.rs +++ b/app/buck2_interpreter/src/file_loader.rs @@ -25,7 +25,7 @@ use starlark_map::ordered_map::OrderedMap; use crate::paths::module::OwnedStarlarkModulePath; use crate::paths::module::StarlarkModulePath; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum FileLoaderError { #[error("`native` in `prelude.bzl` must be a struct")] NativeMustBeStruct, diff --git a/app/buck2_interpreter/src/file_type.rs b/app/buck2_interpreter/src/file_type.rs index e07080d7f26dc..08ee4ed476764 100644 --- a/app/buck2_interpreter/src/file_type.rs +++ b/app/buck2_interpreter/src/file_type.rs @@ -22,6 +22,7 @@ pub enum StarlarkFileType { /// What type of file are we parsing - a `.bzl` file, `.bxl` file, or a `BUCK`/`TARGETS` file. impl StarlarkFileType { pub fn dialect(&self, disable_starlark_types: bool) -> Dialect { + let enable_f_strings = buck2_core::is_open_source(); let buck_dialect: Dialect = Dialect { enable_def: false, enable_lambda: true, @@ -30,6 +31,7 @@ impl StarlarkFileType { enable_types: DialectTypes::Disable, enable_load_reexport: false, enable_top_level_stmt: false, + enable_f_strings, ..Dialect::Standard }; let package_dialect: Dialect = Dialect { @@ -40,6 +42,7 @@ impl StarlarkFileType { enable_types: DialectTypes::Disable, enable_load_reexport: false, enable_top_level_stmt: false, + enable_f_strings, ..Dialect::Standard }; let bzl_dialect: Dialect = Dialect { @@ -54,6 +57,7 @@ impl StarlarkFileType { }, enable_load_reexport: false, enable_top_level_stmt: true, + enable_f_strings, ..Dialect::Standard }; let bxl_dialect: Dialect = Dialect { @@ -68,6 +72,7 @@ impl StarlarkFileType { }, enable_load_reexport: false, enable_top_level_stmt: true, + enable_f_strings, ..Dialect::Standard }; diff --git a/app/buck2_interpreter/src/functions/mod.rs b/app/buck2_interpreter/src/functions/mod.rs deleted file mode 100644 index 0f39fb404ab80..0000000000000 --- a/app/buck2_interpreter/src/functions/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod more; -pub mod transition; diff --git a/app/buck2_interpreter/src/functions/more.rs b/app/buck2_interpreter/src/functions/more.rs deleted file mode 100644 index d86ec7746c138..0000000000000 --- a/app/buck2_interpreter/src/functions/more.rs +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_util::late_binding::LateBinding; -use starlark::environment::GlobalsBuilder; - -/// Globals defined in `buck2_build_api`. -pub static REGISTER_BUCK2_BUILD_API_GLOBALS: LateBinding = - LateBinding::new("REGISTER_BUCK2_BUILD_API_GLOBALS"); diff --git a/app/buck2_interpreter/src/functions/transition.rs b/app/buck2_interpreter/src/functions/transition.rs deleted file mode 100644 index f13fa470c8aa6..0000000000000 --- a/app/buck2_interpreter/src/functions/transition.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_util::late_binding::LateBinding; -use starlark::environment::GlobalsBuilder; - -pub static REGISTER_TRANSITION: LateBinding = - LateBinding::new("REGISTER_TRANSITION"); diff --git a/app/buck2_interpreter/src/import_paths.rs b/app/buck2_interpreter/src/import_paths.rs index b536d4faac570..05f6f5ff551e0 100644 --- a/app/buck2_interpreter/src/import_paths.rs +++ b/app/buck2_interpreter/src/import_paths.rs @@ -13,18 +13,17 @@ use allocative::Allocative; use async_trait::async_trait; use buck2_common::dice::cells::HasCellResolver; use buck2_common::legacy_configs::dice::HasLegacyConfigs; -use buck2_common::legacy_configs::LegacyBuckConfig; -use buck2_common::result::SharedResult; -use buck2_common::result::ToUnsharedResultExt; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_common::legacy_configs::view::LegacyBuckConfigView; use buck2_core::bzl::ImportPath; use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::paths::CellRelativePathBuf; use buck2_core::cells::CellAliasResolver; +use buck2_futures::cancellation::CancellationContext; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use crate::package_imports::PackageImplicitImports; @@ -36,7 +35,7 @@ pub struct ImplicitImportPaths { impl ImplicitImportPaths { pub fn parse( - config: &LegacyBuckConfig, + mut config: impl LegacyBuckConfigView, cell_name: BuildFileCell, cell_alias_resolver: &CellAliasResolver, ) -> anyhow::Result { @@ -44,9 +43,12 @@ impl ImplicitImportPaths { // normal imports. e.g. it uses `cell//path/to/file.bzl` instead of // `cell//path/to:file.bzl`. let root_import = config - .get("buildfile", "includes") + .get(BuckconfigKeyRef { + section: "buildfile", + property: "includes", + })? .map(|i| { - let (cell_alias, path): (&str, &str) = i.split_once("//").unwrap_or(("", i)); + let (cell_alias, path): (&str, &str) = i.split_once("//").unwrap_or(("", &*i)); let path = CellRelativePathBuf::try_from(path.to_owned())?; let path = CellPath::new(cell_alias_resolver.resolve(cell_alias)?, path.to_buf()); @@ -58,7 +60,12 @@ impl ImplicitImportPaths { let package_imports = PackageImplicitImports::new( cell_name, cell_alias_resolver.dupe(), - config.get("buildfile", "package_includes"), + config + .get(BuckconfigKeyRef { + section: "buildfile", + property: "package_includes", + })? + .as_deref(), )?; Ok(ImplicitImportPaths { root_import, @@ -74,43 +81,40 @@ impl ImplicitImportPaths { #[async_trait] pub trait HasImportPaths { async fn import_paths_for_cell( - &self, + &mut self, cell_name: BuildFileCell, ) -> anyhow::Result>; } #[async_trait] -impl HasImportPaths for DiceComputations { +impl HasImportPaths for DiceComputations<'_> { async fn import_paths_for_cell( - &self, + &mut self, cell_name: BuildFileCell, ) -> anyhow::Result> { #[derive(Debug, Eq, PartialEq, Hash, Clone, derive_more::Display, Allocative)] - #[display(fmt = "{}", cell_name)] + #[display("{}", cell_name)] struct ImportPathsKey { cell_name: BuildFileCell, } #[async_trait] impl Key for ImportPathsKey { - type Value = SharedResult>; + type Value = buck2_error::Result>; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let config = ctx - .get_legacy_config_for_cell(self.cell_name.name()) - .await?; - let cell_resolver = ctx.get_cell_resolver().await?; - let cell_alias_resolver = cell_resolver - .get(self.cell_name.name())? - .cell_alias_resolver(); + let config = ctx.get_legacy_config_on_dice(self.cell_name.name()).await?; + let cell_alias_resolver = + ctx.get_cell_alias_resolver(self.cell_name.name()).await?; + Ok(Arc::new(ImplicitImportPaths::parse( - &config, + config.view(ctx), self.cell_name, - cell_alias_resolver, + &cell_alias_resolver, )?)) } @@ -124,6 +128,6 @@ impl HasImportPaths for DiceComputations { self.compute(&ImportPathsKey { cell_name }) .await? - .unshared_error() + .map_err(anyhow::Error::from) } } diff --git a/app/buck2_interpreter/src/late_binding_ty.rs b/app/buck2_interpreter/src/late_binding_ty.rs new file mode 100644 index 0000000000000..3f23db774170d --- /dev/null +++ b/app/buck2_interpreter/src/late_binding_ty.rs @@ -0,0 +1,36 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +macro_rules! late_binding_ty { + ($name:ident, $late:ident) => { + static $late: buck2_util::late_binding::LateBinding = + buck2_util::late_binding::LateBinding::new(stringify!($name)); + + #[allow(clippy::empty_enum)] + pub enum $name {} + + impl $name { + pub fn init(ty: starlark::typing::Ty) { + $late.init(ty); + } + } + + impl starlark::values::type_repr::StarlarkTypeRepr for $name { + type Canonical = Self; + + fn starlark_type_repr() -> starlark::typing::Ty { + dupe::Dupe::dupe($late.get().unwrap()) + } + } + }; +} + +late_binding_ty!(AnalysisContextReprLate, ANALYSIS_CONTEXT_REPR_LATE); +late_binding_ty!(ProviderReprLate, PROVIDER_REPR_LATE); +late_binding_ty!(TransitionReprLate, TRANSITION_REPR_LATE); diff --git a/app/buck2_interpreter/src/lib.rs b/app/buck2_interpreter/src/lib.rs index 3e0253eb5a820..38e1526afcc0c 100644 --- a/app/buck2_interpreter/src/lib.rs +++ b/app/buck2_interpreter/src/lib.rs @@ -7,32 +7,25 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! Implements Buck's handling of target patterns and parsing of build files. #![feature(pattern)] #![feature(try_blocks)] #![feature(never_type)] #![feature(box_patterns)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] - -#[macro_use] -extern crate starlark; -pub mod anon_targets; pub mod build_context; -pub mod bxl; -pub mod cfg_constructor; pub mod coerce; pub mod dice; +pub mod downstream_crate_starlark_defs; pub mod extra; pub mod factory; pub mod file_loader; pub mod file_type; -pub mod functions; pub mod import_paths; +pub mod late_binding_ty; pub mod load_module; pub mod package_imports; pub mod parse_import; @@ -40,6 +33,7 @@ pub mod paths; pub mod plugins; pub mod prelude_path; pub mod print_handler; +pub mod soft_error; pub mod starlark_debug; pub mod starlark_profiler; pub mod starlark_promise; diff --git a/app/buck2_interpreter/src/load_module.rs b/app/buck2_interpreter/src/load_module.rs index 9ffcbcc4cae3f..25f0f8354f210 100644 --- a/app/buck2_interpreter/src/load_module.rs +++ b/app/buck2_interpreter/src/load_module.rs @@ -17,7 +17,6 @@ use starlark::environment::Globals; use crate::file_loader::LoadedModule; use crate::file_loader::ModuleDeps; -use crate::file_type::StarlarkFileType; use crate::paths::module::StarlarkModulePath; use crate::paths::package::PackageFilePath; use crate::prelude_path::PreludePath; @@ -26,13 +25,13 @@ use crate::prelude_path::PreludePath; pub trait InterpreterCalculationImpl: Send + Sync + 'static { async fn get_loaded_module( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, path: StarlarkModulePath<'_>, ) -> anyhow::Result; async fn get_module_deps( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, package: PackageLabel, build_file_cell: BuildFileCell, ) -> anyhow::Result; @@ -40,17 +39,16 @@ pub trait InterpreterCalculationImpl: Send + Sync + 'static { /// Return `None` if the PACKAGE file doesn't exist. async fn get_package_file_deps( &self, - ctx: &DiceComputations, - package: &PackageFilePath, - ) -> anyhow::Result>>; + ctx: &mut DiceComputations<'_>, + package: PackageLabel, + ) -> anyhow::Result)>>; - async fn global_env_for_file_type( - &self, - ctx: &DiceComputations, - file_type: StarlarkFileType, - ) -> anyhow::Result; + async fn global_env(&self, ctx: &mut DiceComputations<'_>) -> anyhow::Result; - async fn prelude_import(&self, ctx: &DiceComputations) -> anyhow::Result>; + async fn prelude_import( + &self, + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result>; } pub static INTERPRETER_CALCULATION_IMPL: LateBinding<&'static dyn InterpreterCalculationImpl> = @@ -59,22 +57,37 @@ pub static INTERPRETER_CALCULATION_IMPL: LateBinding<&'static dyn InterpreterCal #[async_trait] pub trait InterpreterCalculation { /// Returns the LoadedModule for a given starlark file. This is cached on the dice graph. - async fn get_loaded_module(&self, path: StarlarkModulePath<'_>) - -> anyhow::Result; + async fn get_loaded_module( + &mut self, + path: StarlarkModulePath<'_>, + ) -> anyhow::Result; async fn get_loaded_module_from_import_path( - &self, + &mut self, path: &ImportPath, ) -> anyhow::Result { self.get_loaded_module(StarlarkModulePath::LoadFile(path)) .await } + + async fn get_loaded_module_imports( + &mut self, + path: &ImportPath, + ) -> anyhow::Result> { + //TODO(benfoxman): Don't need to get the whole module, just parse the imports. + Ok(self + .get_loaded_module_from_import_path(path) + .await? + .imports() + .cloned() + .collect()) + } } #[async_trait] -impl InterpreterCalculation for DiceComputations { +impl InterpreterCalculation for DiceComputations<'_> { async fn get_loaded_module( - &self, + &mut self, path: StarlarkModulePath<'_>, ) -> anyhow::Result { INTERPRETER_CALCULATION_IMPL diff --git a/app/buck2_interpreter/src/package_imports.rs b/app/buck2_interpreter/src/package_imports.rs index 6c3a3b5cc9740..c8cd52cdbeb0e 100644 --- a/app/buck2_interpreter/src/package_imports.rs +++ b/app/buck2_interpreter/src/package_imports.rs @@ -18,11 +18,10 @@ use buck2_core::cells::paths::CellRelativePath; use buck2_core::cells::paths::CellRelativePathBuf; use buck2_core::cells::CellAliasResolver; use buck2_core::package::PackageLabel; -use thiserror::Error; use crate::parse_import::parse_import; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum PackageImportsError { #[error("Expected value to contain `=>`. Got `{0}`.")] MissingArrow(String), @@ -128,7 +127,6 @@ mod tests { use buck2_core::cells::alias::NonEmptyCellAlias; use buck2_core::cells::name::CellName; - use buck2_core::cells::CellAliasResolver; use dupe::Dupe; use super::*; diff --git a/app/buck2_interpreter/src/parse_import.rs b/app/buck2_interpreter/src/parse_import.rs index 9589133b99510..6c08de365d91e 100644 --- a/app/buck2_interpreter/src/parse_import.rs +++ b/app/buck2_interpreter/src/parse_import.rs @@ -17,9 +17,9 @@ use buck2_core::cells::paths::CellRelativePathBuf; use buck2_core::cells::CellAliasResolver; use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; -use thiserror::Error; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] enum ImportParseError { #[error( "Unable to parse import spec. Expected format `(@)//package/name:filename.bzl` or `:filename.bzl`. Got `{0}`" @@ -31,9 +31,7 @@ enum ImportParseError { EmptyFileName(String), #[error("Unexpected relative import spec. Got `{0}`")] ProhibitedRelativeImport(String), - #[error( - "Invalid path `{0}` for file relative import path. Should be a forward relative path." - )] + #[error("Invalid path `{0}` for file relative import path. Should be a forward relative path.")] InvalidCurrentPathWhenFileRelativeImport(String), #[error( "Unable to parse import spec. Expected format `(@)//package/name:filename.bzl` or `:filename.bzl`, but got a path. Got `{0}`" @@ -174,7 +172,6 @@ mod tests { use buck2_core::cells::alias::NonEmptyCellAlias; use buck2_core::cells::name::CellName; - use buck2_core::cells::CellAliasResolver; use super::*; diff --git a/app/buck2_interpreter/src/paths/mod.rs b/app/buck2_interpreter/src/paths.rs similarity index 100% rename from app/buck2_interpreter/src/paths/mod.rs rename to app/buck2_interpreter/src/paths.rs diff --git a/app/buck2_interpreter/src/paths/bxl.rs b/app/buck2_interpreter/src/paths/bxl.rs index 504b57096ec2e..c71f421862a44 100644 --- a/app/buck2_interpreter/src/paths/bxl.rs +++ b/app/buck2_interpreter/src/paths/bxl.rs @@ -12,7 +12,6 @@ use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::name::CellName; use buck2_core::cells::paths::CellRelativePathBuf; -use thiserror::Error; /// Path of a `bxl` file for `bxl` commands #[derive( @@ -26,13 +25,13 @@ use thiserror::Error; PartialOrd, Allocative )] -#[display(fmt = "{}", path)] +#[display("{}", path)] pub struct BxlFilePath { /// The path of this bxl file, including the `bxl` extension path: CellPath, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] #[error("Expected a cell path to a `.bxl` file, but got `{0}`")] struct BxlPathError(CellPath); diff --git a/app/buck2_interpreter/src/paths/module.rs b/app/buck2_interpreter/src/paths/module.rs index 33696d4e084ca..6fc01fb040d72 100644 --- a/app/buck2_interpreter/src/paths/module.rs +++ b/app/buck2_interpreter/src/paths/module.rs @@ -36,7 +36,7 @@ use crate::paths::path::StarlarkPath; Eq, PartialEq )] -#[display(fmt = "{}", self.id())] +#[display("{}", self.id())] pub enum StarlarkModulePath<'a> { /// a file to be imported LoadFile(&'a ImportPath), @@ -85,7 +85,7 @@ impl<'a> StarlarkModulePath<'a> { } #[derive(Clone, derive_more::Display, Debug, Eq, PartialEq, Allocative)] -#[display(fmt = "{}", self.borrow())] +#[display("{}", self.borrow())] pub enum OwnedStarlarkModulePath { LoadFile(ImportPath), BxlFile(BxlFilePath), diff --git a/app/buck2_interpreter/src/paths/package.rs b/app/buck2_interpreter/src/paths/package.rs index 0be62a94a8b91..01f3a62d6be00 100644 --- a/app/buck2_interpreter/src/paths/package.rs +++ b/app/buck2_interpreter/src/paths/package.rs @@ -15,20 +15,37 @@ use buck2_core::cells::name::CellName; use buck2_core::fs::paths::file_name::FileName; #[derive(Clone, Debug, Eq, PartialEq, Hash, Allocative, derive_more::Display)] -#[display(fmt = "{}", path)] +#[display("{}", path)] pub struct PackageFilePath { /// Including `/PACKAGE`. path: CellPath, } impl PackageFilePath { - pub const PACKAGE_FILE_NAME: &'static FileName = FileName::unchecked_new("PACKAGE"); + pub fn package_file_names() -> impl Iterator { + [ + FileName::unchecked_new("BUCK_TREE"), + FileName::unchecked_new("PACKAGE"), + ] + .into_iter() + } + + /// Files which could be `PACKAGE` files. + pub fn for_dir(path: CellPathRef) -> impl Iterator + '_ { + Self::package_file_names().map(move |name| PackageFilePath { + path: path.join(name), + }) + } - /// Create for directory containing `PACKAGE` file. - pub fn for_dir(path: CellPathRef) -> PackageFilePath { - PackageFilePath { - path: path.join(Self::PACKAGE_FILE_NAME), + pub fn from_file_path(path: CellPathRef) -> Option { + for file_name in Self::package_file_names() { + if path.ends_with(file_name.as_ref()) { + return Some(PackageFilePath { + path: path.to_owned(), + }); + } } + None } pub fn cell(&self) -> CellName { @@ -46,10 +63,6 @@ impl PackageFilePath { .expect("constructor verifies that path is not root") } - pub fn parent_package_file(&self) -> Option { - self.dir().parent().map(PackageFilePath::for_dir) - } - pub fn path(&self) -> &CellPath { &self.path } diff --git a/app/buck2_interpreter/src/paths/path.rs b/app/buck2_interpreter/src/paths/path.rs index 1fcf78177d26d..5495bfac78983 100644 --- a/app/buck2_interpreter/src/paths/path.rs +++ b/app/buck2_interpreter/src/paths/path.rs @@ -27,7 +27,7 @@ use crate::paths::package::PackageFilePath; /// Path to file containing starlark that can be evaluated by the interpreter. #[derive(Display, Clone, Copy, Dupe, Debug, UnpackVariants)] -#[display(fmt = "{}", self.id())] +#[display("{}", self.id())] pub enum StarlarkPath<'a> { /// a build file BuildFile(&'a BuildFilePath), @@ -78,7 +78,7 @@ impl<'a> StarlarkPath<'a> { } #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{}", self.borrow())] +#[display("{}", self.borrow())] pub enum OwnedStarlarkPath { /// a build file BuildFile(BuildFilePath), diff --git a/app/buck2_interpreter/src/prelude_path.rs b/app/buck2_interpreter/src/prelude_path.rs index ebbdd61f62d07..42ef943695b74 100644 --- a/app/buck2_interpreter/src/prelude_path.rs +++ b/app/buck2_interpreter/src/prelude_path.rs @@ -37,9 +37,13 @@ impl PreludePath { } } -pub fn prelude_path(cell_resolver: &CellResolver) -> anyhow::Result { +pub fn prelude_path(cell_resolver: &CellResolver) -> anyhow::Result> { let alias_resolver = cell_resolver.root_cell_cell_alias_resolver(); - let prelude_cell = alias_resolver.resolve("prelude")?; + let Ok(prelude_cell) = alias_resolver.resolve("prelude") else { + return Ok(None); + }; let prelude_file = CellRelativePathBuf::unchecked_new("prelude.bzl".to_owned()); - ImportPath::new_same_cell(CellPath::new(prelude_cell, prelude_file)).map(PreludePath) + Ok(Some(PreludePath(ImportPath::new_same_cell( + CellPath::new(prelude_cell, prelude_file), + )?))) } diff --git a/app/buck2_interpreter/src/soft_error.rs b/app/buck2_interpreter/src/soft_error.rs new file mode 100644 index 0000000000000..fce81f0cc2941 --- /dev/null +++ b/app/buck2_interpreter/src/soft_error.rs @@ -0,0 +1,25 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::soft_error; +use buck2_error::ErrorTag; +use starlark::eval::SoftErrorHandler; +use starlark::ErrorKind; +pub struct Buck2StarlarkSoftErrorHandler; + +/// When starlark deprecates something, we propagate it to our `soft_error!` handler. +impl SoftErrorHandler for Buck2StarlarkSoftErrorHandler { + fn soft_error(&self, category: &str, error: starlark::Error) -> Result<(), starlark::Error> { + let error = + buck2_error::Error::from(error.into_anyhow()).tag([ErrorTag::AnyStarlarkEvaluation]); + soft_error!(&format!("starlark_rust_{category}"), error, deprecation: true, quiet:true) + .map_err(|e| starlark::Error::new_kind(ErrorKind::Other(e.into())))?; + Ok(()) + } +} diff --git a/app/buck2_interpreter/src/starlark_profiler.rs b/app/buck2_interpreter/src/starlark_profiler.rs index 5d260764d4815..d11b4dc314add 100644 --- a/app/buck2_interpreter/src/starlark_profiler.rs +++ b/app/buck2_interpreter/src/starlark_profiler.rs @@ -7,255 +7,7 @@ * of this source tree. */ -use std::cmp; -use std::time::Duration; -use std::time::Instant; - -use allocative::Allocative; -use anyhow::Context; -use dupe::Dupe; -use starlark::environment::FrozenModule; -use starlark::eval::Evaluator; -use starlark::eval::ProfileData; -use starlark::eval::ProfileMode; - -#[derive(Debug, thiserror::Error)] -enum StarlarkProfilerError { - #[error("will_freeze field was initialized incorrectly (internal error)")] - IncorrectWillFreeze, - #[error( - "Retained memory profiling is available only for analysis profile \ - or bxl profile (which freezes the module)" - )] - RetainedMemoryNotFrozen, - #[error("profile mode are inconsistent (internal error)")] - InconsistentProfileMode, -} - -/// When profiling Starlark file, all dependencies of that file must be -/// "instrumented" otherwise the profiler won't work. -/// -/// This struct defines instrumentation level for the module. -#[derive(Debug, PartialEq, Eq, Clone, Dupe, Allocative)] -pub struct StarlarkProfilerInstrumentation {} - -impl StarlarkProfilerInstrumentation { - pub fn new() -> Self { - Self {} - } -} - -#[derive(Debug, Allocative)] -pub struct StarlarkProfileDataAndStats { - profile_mode: ProfileMode, - #[allocative(skip)] // OK to skip because used only when profiling enabled. - pub profile_data: ProfileData, - initialized_at: Instant, - finalized_at: Instant, - total_retained_bytes: usize, -} - -impl StarlarkProfileDataAndStats { - pub fn elapsed(&self) -> Duration { - self.finalized_at.duration_since(self.initialized_at) - } - - pub fn total_retained_bytes(&self) -> usize { - self.total_retained_bytes - } - - pub fn merge<'a>( - datas: impl IntoIterator + Clone, - ) -> anyhow::Result { - let mut iter = datas.clone().into_iter(); - let first = iter.next().context("empty collection of profile data")?; - let profile_mode = first.profile_mode.dupe(); - let mut total_retained_bytes = first.total_retained_bytes; - let mut initialized_at = first.initialized_at; - let mut finalized_at = first.finalized_at; - - for data in iter { - if data.profile_mode != profile_mode { - return Err(StarlarkProfilerError::InconsistentProfileMode.into()); - } - initialized_at = cmp::min(initialized_at, data.initialized_at); - finalized_at = cmp::max(finalized_at, data.finalized_at); - total_retained_bytes += data.total_retained_bytes; - } - - let profile_data = ProfileData::merge(datas.into_iter().map(|data| &data.profile_data))?; - - Ok(StarlarkProfileDataAndStats { - profile_mode, - profile_data, - initialized_at, - finalized_at, - total_retained_bytes, - }) - } -} - -pub struct StarlarkProfiler { - profile_mode: ProfileMode, - /// Evaluation will freeze the module. - /// (And frozen module will be passed to `visit_frozen_module`). - will_freeze: bool, - - initialized_at: Option, - finalized_at: Option, - profile_data: Option, - total_retained_bytes: Option, -} - -impl StarlarkProfiler { - pub fn new(profile_mode: ProfileMode, will_freeze: bool) -> StarlarkProfiler { - Self { - profile_mode, - will_freeze, - initialized_at: None, - finalized_at: None, - profile_data: None, - total_retained_bytes: None, - } - } - - /// Collect all profiling data. - pub fn finish(self) -> anyhow::Result { - Ok(StarlarkProfileDataAndStats { - profile_mode: self.profile_mode, - initialized_at: self - .initialized_at - .context("did not initialize (internal error)")?, - finalized_at: self - .finalized_at - .context("did not finalize (internal error)")?, - total_retained_bytes: self - .total_retained_bytes - .context("did not visit heap (internal error)")?, - profile_data: self - .profile_data - .context("profile_data not initialized (internal error)")?, - }) - } - - /// Instrumentation level required by `bzl` files loaded by the profiled module. - fn instrumentation(&self) -> Option { - Some(StarlarkProfilerInstrumentation {}) - } - - /// Prepare an Evaluator to capture output relevant to this profiler. - fn initialize(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { - eval.enable_profile(&self.profile_mode)?; - self.initialized_at = Some(Instant::now()); - Ok(()) - } - - /// Post-analysis, produce the output of this profiler. - fn evaluation_complete(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { - self.finalized_at = Some(Instant::now()); - if !matches!( - self.profile_mode, - ProfileMode::HeapSummaryRetained | ProfileMode::HeapFlameRetained - ) { - self.profile_data = Some(eval.gen_profile()?); - } - Ok(()) - } - - fn visit_frozen_module(&mut self, module: Option<&FrozenModule>) -> anyhow::Result<()> { - if self.will_freeze != module.is_some() { - return Err(StarlarkProfilerError::IncorrectWillFreeze.into()); - } - - match self.profile_mode { - ProfileMode::HeapSummaryRetained | ProfileMode::HeapFlameRetained => { - let module = module.ok_or(StarlarkProfilerError::RetainedMemoryNotFrozen)?; - let profile = module.heap_profile()?; - self.profile_data = Some(profile); - } - _ => {} - } - - let total_retained_bytes = module.map_or(0, |module| { - module - .frozen_heap() - .allocated_summary() - .total_allocated_bytes() - }); - - self.total_retained_bytes = Some(total_retained_bytes); - - Ok(()) - } -} - -/// How individual starlark invocation (`bzl`, `BUCK` or analysis) should be interpreted. -#[derive(Clone, Dupe, Eq, PartialEq, Allocative)] -pub enum StarlarkProfileModeOrInstrumentation { - None, - Profile(ProfileMode), -} - -impl StarlarkProfileModeOrInstrumentation { - pub fn profile_mode(&self) -> Option<&ProfileMode> { - match self { - StarlarkProfileModeOrInstrumentation::Profile(profile) => Some(profile), - StarlarkProfileModeOrInstrumentation::None => None, - } - } -} - -enum StarlarkProfilerOrInstrumentationImpl<'p> { - None, - Profiler(&'p mut StarlarkProfiler), -} - -/// Modules can be evaluated with profiling or with instrumentation for profiling. -/// This type enapsulates this logic. -pub struct StarlarkProfilerOrInstrumentation<'p>(StarlarkProfilerOrInstrumentationImpl<'p>); - -impl<'p> StarlarkProfilerOrInstrumentation<'p> { - pub fn new( - profiler: &'p mut StarlarkProfiler, - instrumentation: Option, - ) -> StarlarkProfilerOrInstrumentation<'p> { - match (profiler.instrumentation(), instrumentation) { - (None, None) => StarlarkProfilerOrInstrumentation::disabled(), - (Some(_), Some(_)) => StarlarkProfilerOrInstrumentation::for_profiler(profiler), - (None, Some(_)) => StarlarkProfilerOrInstrumentation::disabled(), - (Some(_), None) => panic!("profiler, but no instrumentation"), - } - } - - pub fn for_profiler(profiler: &'p mut StarlarkProfiler) -> Self { - StarlarkProfilerOrInstrumentation(StarlarkProfilerOrInstrumentationImpl::Profiler(profiler)) - } - - /// No profiling. - pub fn disabled() -> StarlarkProfilerOrInstrumentation<'p> { - StarlarkProfilerOrInstrumentation(StarlarkProfilerOrInstrumentationImpl::None) - } - - pub fn initialize(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { - match &mut self.0 { - StarlarkProfilerOrInstrumentationImpl::None => Ok(()), - StarlarkProfilerOrInstrumentationImpl::Profiler(profiler) => profiler.initialize(eval), - } - } - - pub fn visit_frozen_module(&mut self, module: Option<&FrozenModule>) -> anyhow::Result<()> { - if let StarlarkProfilerOrInstrumentationImpl::Profiler(profiler) = &mut self.0 { - profiler.visit_frozen_module(module) - } else { - Ok(()) - } - } - - pub fn evaluation_complete(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { - if let StarlarkProfilerOrInstrumentationImpl::Profiler(profiler) = &mut self.0 { - profiler.evaluation_complete(eval) - } else { - Ok(()) - } - } -} +pub mod config; +pub mod data; +pub mod mode; +pub mod profiler; diff --git a/app/buck2_interpreter/src/starlark_profiler/config.rs b/app/buck2_interpreter/src/starlark_profiler/config.rs new file mode 100644 index 0000000000000..cf8f43422396e --- /dev/null +++ b/app/buck2_interpreter/src/starlark_profiler/config.rs @@ -0,0 +1,339 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args_typed; +use buck2_core::package::PackageLabel; +use buck2_core::pattern::package::PackagePredicate; +use buck2_core::pattern::pattern::ParsedPatternPredicate; +use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; +use buck2_core::pattern::pattern_type::TargetPatternExtra; +use buck2_core::pattern::unparsed::UnparsedPatternPredicate; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_futures::cancellation::CancellationContext; +use dice::DiceComputations; +use dice::DiceProjectionComputations; +use dice::DiceTransactionUpdater; +use dice::InjectedKey; +use dice::Key; +use dice::ProjectionKey; +use dupe::Dupe; +use starlark::eval::ProfileMode; + +use crate::starlark_profiler::mode::StarlarkProfileMode; + +/// Global profiling configuration. +#[derive(PartialEq, Eq, Clone, Debug, Allocative)] +#[derive(Default)] +pub enum StarlarkProfilerConfiguration { + /// No profiling. + #[default] + None, + /// Profile loading of one `BUCK`. + ProfileLoading( + ProfileMode, + UnparsedPatternPredicate, + ), + /// Profile analysis for given patterns. + ProfileAnalysis( + ProfileMode, + UnparsedPatternPredicate, + ), + /// Profile BXL + ProfileBxl(ProfileMode), +} + +#[derive(PartialEq, Eq, Clone, Debug, Allocative)] +enum StarlarkProfilerConfigurationResolved { + None, + ProfileLastLoading(ProfileMode, PackagePredicate), + ProfileAnalysis(ProfileMode, ParsedPatternPredicate), + ProfileBxl(ProfileMode), +} + +#[derive( + Debug, + derive_more::Display, + Copy, + Clone, + Dupe, + Eq, + PartialEq, + Hash, + Allocative +)] +#[display("{:?}", self)] +struct StarlarkProfilerConfigurationResolvedKey; + +#[async_trait] +impl Key for StarlarkProfilerConfigurationResolvedKey { + type Value = buck2_error::Result>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let configuration = ctx.compute(&StarlarkProfilerConfigurationKey).await?; + let new = match &*configuration { + StarlarkProfilerConfiguration::None => StarlarkProfilerConfigurationResolved::None, + StarlarkProfilerConfiguration::ProfileLoading(mode, patterns) => match patterns { + UnparsedPatternPredicate::Any => { + StarlarkProfilerConfigurationResolved::ProfileLastLoading( + mode.dupe(), + PackagePredicate::Any, + ) + } + UnparsedPatternPredicate::AnyOf(patterns) => { + let patterns = parse_patterns_from_cli_args_typed::< + ConfiguredProvidersPatternExtra, + >(ctx, patterns) + .await?; + let patterns = patterns + .into_iter() + .map(|p| p.into_package_pattern_ignore_target()) + .collect(); + StarlarkProfilerConfigurationResolved::ProfileLastLoading( + mode.dupe(), + PackagePredicate::AnyOf(patterns), + ) + } + }, + StarlarkProfilerConfiguration::ProfileAnalysis(mode, patterns) => { + match patterns { + UnparsedPatternPredicate::Any => { + StarlarkProfilerConfigurationResolved::ProfileAnalysis( + mode.dupe(), + ParsedPatternPredicate::Any, + ) + } + UnparsedPatternPredicate::AnyOf(patterns) => { + let patterns = parse_patterns_from_cli_args_typed::< + ConfiguredProvidersPatternExtra, + >(ctx, patterns) + .await?; + let patterns = patterns + .into_iter() + .map(|p| { + p.map(|_| { + // Drop the extra because: + // - we don't use providers when profiling analysis + // - configuration may create issues when there are transitions; + // it might return more than user requested without + // (e.g. target and host analysis), but practically this is not an issue. + TargetPatternExtra + }) + }) + .collect(); + StarlarkProfilerConfigurationResolved::ProfileAnalysis( + mode.dupe(), + ParsedPatternPredicate::AnyOf(patterns), + ) + } + } + } + StarlarkProfilerConfiguration::ProfileBxl(mode) => { + StarlarkProfilerConfigurationResolved::ProfileBxl(mode.dupe()) + } + }; + Ok(Arc::new(new)) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } +} + +#[derive( + Debug, + derive_more::Display, + Clone, + Dupe, + Eq, + PartialEq, + Hash, + Allocative +)] +struct StarlarkProfileModeForAnalysisKey(ConfiguredTargetLabel); + +impl ProjectionKey for StarlarkProfileModeForAnalysisKey { + type DeriveFromKey = StarlarkProfilerConfigurationResolvedKey; + + type Value = buck2_error::Result; + + fn compute( + &self, + configuration: &buck2_error::Result>, + _ctx: &DiceProjectionComputations, + ) -> buck2_error::Result { + match &**(configuration.as_ref().map_err(|e| e.dupe())?) { + StarlarkProfilerConfigurationResolved::None => Ok(StarlarkProfileMode::None), + StarlarkProfilerConfigurationResolved::ProfileLastLoading(..) => { + Ok(StarlarkProfileMode::None) + } + StarlarkProfilerConfigurationResolved::ProfileAnalysis(mode, patterns) => { + if patterns.matches(self.0.unconfigured()) { + Ok(StarlarkProfileMode::Profile(mode.dupe())) + } else { + Ok(StarlarkProfileMode::None) + } + } + StarlarkProfilerConfigurationResolved::ProfileBxl(_) => Ok(StarlarkProfileMode::None), + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } +} + +#[derive( + Debug, + derive_more::Display, + Clone, + Dupe, + Eq, + PartialEq, + Hash, + Allocative +)] +struct StarlarkProfileModeForLoadingKey(PackageLabel); + +impl ProjectionKey for StarlarkProfileModeForLoadingKey { + type DeriveFromKey = StarlarkProfilerConfigurationResolvedKey; + type Value = buck2_error::Result; + + fn compute( + &self, + derive_from: &buck2_error::Result>, + _ctx: &DiceProjectionComputations, + ) -> Self::Value { + match &**(derive_from.as_ref().map_err(|e| e.dupe())?) { + StarlarkProfilerConfigurationResolved::None => Ok(StarlarkProfileMode::None), + StarlarkProfilerConfigurationResolved::ProfileLastLoading(mode, patterns) => { + if patterns.matches(self.0) { + Ok(StarlarkProfileMode::Profile(mode.dupe())) + } else { + Ok(StarlarkProfileMode::None) + } + } + StarlarkProfilerConfigurationResolved::ProfileAnalysis(_, _) => { + Ok(StarlarkProfileMode::None) + } + StarlarkProfilerConfigurationResolved::ProfileBxl(_) => Ok(StarlarkProfileMode::None), + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } +} + +/// Global Starlark compiler instrumentation level. +/// +/// We profile only leaf computations (`BUCK` files or analysis), +/// and this key defines instrumentation of all the Starlark files, +/// regardless of whether profiled entity depends on them or not. +/// It's easier to implement with single global key, +/// the downside is we invalidate parse results when we switch +/// between normal operation/profiling. +#[derive( + Debug, + derive_more::Display, + Copy, + Clone, + Dupe, + Eq, + PartialEq, + Hash, + Allocative +)] +#[display("{:?}", self)] +pub struct StarlarkProfilerConfigurationKey; + +impl InjectedKey for StarlarkProfilerConfigurationKey { + type Value = Arc; + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } +} + +#[async_trait] +pub trait SetStarlarkProfilerInstrumentation { + fn set_starlark_profiler_configuration( + &mut self, + instrumentation: StarlarkProfilerConfiguration, + ) -> anyhow::Result<()>; +} + +#[async_trait] +pub trait GetStarlarkProfilerInstrumentation { + /// Profile mode for analysis of given target. + async fn get_profile_mode_for_analysis( + &mut self, + target_label: &ConfiguredTargetLabel, + ) -> anyhow::Result; + + async fn get_profile_mode_for_loading( + &mut self, + package_label: PackageLabel, + ) -> anyhow::Result; +} + +#[async_trait] +impl SetStarlarkProfilerInstrumentation for DiceTransactionUpdater { + fn set_starlark_profiler_configuration( + &mut self, + configuration: StarlarkProfilerConfiguration, + ) -> anyhow::Result<()> { + Ok(self.changed_to([(StarlarkProfilerConfigurationKey, Arc::new(configuration))])?) + } +} + +#[async_trait] +impl GetStarlarkProfilerInstrumentation for DiceComputations<'_> { + async fn get_profile_mode_for_analysis( + &mut self, + target_label: &ConfiguredTargetLabel, + ) -> anyhow::Result { + let cfg = self + .compute_opaque(&StarlarkProfilerConfigurationResolvedKey) + .await?; + Ok(self.projection( + &cfg, + &StarlarkProfileModeForAnalysisKey(target_label.dupe()), + )??) + } + + async fn get_profile_mode_for_loading( + &mut self, + package_label: PackageLabel, + ) -> anyhow::Result { + let cfg = self + .compute_opaque(&StarlarkProfilerConfigurationResolvedKey) + .await?; + Ok(self.projection( + &cfg, + &StarlarkProfileModeForLoadingKey(package_label.dupe()), + )??) + } +} diff --git a/app/buck2_interpreter/src/starlark_profiler/data.rs b/app/buck2_interpreter/src/starlark_profiler/data.rs new file mode 100644 index 0000000000000..31b1b1aecc7cf --- /dev/null +++ b/app/buck2_interpreter/src/starlark_profiler/data.rs @@ -0,0 +1,98 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::any::Any; +use std::cmp; +use std::time::Duration; +use std::time::Instant; + +use allocative::Allocative; +use anyhow::Context; +use buck2_common::starlark_profiler::StarlarkProfileDataAndStatsDyn; +use buck2_core::package::PackageLabel; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_error::BuckErrorContext; +use starlark::eval::ProfileData; +use starlark::StarlarkResultExt; + +#[derive(Clone, Debug, derive_more::Display, Allocative)] +pub enum ProfileTarget { + #[display("analysis:{}", _0)] + Analysis(ConfiguredTargetLabel), + #[display("loading:{}", _0)] + Loading(PackageLabel), + #[display("bxl")] + Bxl, +} + +#[derive(Debug, Clone, Allocative)] +pub struct StarlarkProfileDataAndStats { + #[allocative(skip)] // OK to skip because used only when profiling enabled. + pub profile_data: ProfileData, + pub targets: Vec, + pub(crate) initialized_at: Instant, + pub(crate) finalized_at: Instant, + pub(crate) total_retained_bytes: usize, +} + +impl StarlarkProfileDataAndStatsDyn for StarlarkProfileDataAndStats { + fn as_any(&self) -> &dyn Any { + self + } +} + +impl StarlarkProfileDataAndStats { + pub fn elapsed(&self) -> Duration { + self.finalized_at.duration_since(self.initialized_at) + } + + pub fn total_retained_bytes(&self) -> usize { + self.total_retained_bytes + } + + pub fn merge<'a>( + datas: impl IntoIterator, + ) -> anyhow::Result { + let datas = Vec::from_iter(datas); + let mut iter = datas.iter().copied(); + let first = iter.next().context("empty collection of profile data")?; + let mut total_retained_bytes = first.total_retained_bytes; + let mut initialized_at = first.initialized_at; + let mut finalized_at = first.finalized_at; + + for data in iter { + initialized_at = cmp::min(initialized_at, data.initialized_at); + finalized_at = cmp::max(finalized_at, data.finalized_at); + total_retained_bytes += data.total_retained_bytes; + } + + let profile_data = + ProfileData::merge(datas.iter().map(|data| &data.profile_data)).into_anyhow_result()?; + + Ok(StarlarkProfileDataAndStats { + profile_data, + targets: datas + .iter() + .flat_map(|data| data.targets.iter().cloned()) + .collect(), + initialized_at, + finalized_at, + total_retained_bytes, + }) + } + + pub fn downcast(profile_data: &dyn StarlarkProfileDataAndStatsDyn) -> anyhow::Result<&Self> { + profile_data + .as_any() + .downcast_ref::() + .internal_error_anyhow( + "There's only one implementation of StarlarkProfileDataAndStatsDyn", + ) + } +} diff --git a/app/buck2_interpreter/src/starlark_profiler/mode.rs b/app/buck2_interpreter/src/starlark_profiler/mode.rs new file mode 100644 index 0000000000000..22b4308619abf --- /dev/null +++ b/app/buck2_interpreter/src/starlark_profiler/mode.rs @@ -0,0 +1,28 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use dupe::Dupe; +use starlark::eval::ProfileMode; + +/// How individual starlark invocation (`bzl`, `BUCK` or analysis) should be interpreted. +#[derive(Clone, Dupe, Eq, PartialEq, Allocative)] +pub enum StarlarkProfileMode { + None, + Profile(ProfileMode), +} + +impl StarlarkProfileMode { + pub fn profile_mode(&self) -> Option<&ProfileMode> { + match self { + StarlarkProfileMode::Profile(profile) => Some(profile), + StarlarkProfileMode::None => None, + } + } +} diff --git a/app/buck2_interpreter/src/starlark_profiler/profiler.rs b/app/buck2_interpreter/src/starlark_profiler/profiler.rs new file mode 100644 index 0000000000000..0d95f8044ebf9 --- /dev/null +++ b/app/buck2_interpreter/src/starlark_profiler/profiler.rs @@ -0,0 +1,188 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::time::Instant; + +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use starlark::environment::FrozenModule; +use starlark::eval::Evaluator; +use starlark::eval::ProfileData; +use starlark::eval::ProfileMode; +use starlark::StarlarkResultExt; + +use crate::starlark_profiler::data::ProfileTarget; +use crate::starlark_profiler::data::StarlarkProfileDataAndStats; + +#[derive(Debug, buck2_error::Error)] +enum StarlarkProfilerError { + #[error( + "Retained memory profiling is available only for analysis profile \ + or bxl profile (which freezes the module)" + )] + RetainedMemoryNotFrozen, +} + +pub struct StarlarkProfiler { + profile_mode: ProfileMode, + /// Evaluation will freeze the module. + /// (And frozen module will be passed to `visit_frozen_module`). + will_freeze: bool, + + initialized_at: Option, + finalized_at: Option, + profile_data: Option, + total_retained_bytes: Option, + + target: ProfileTarget, +} + +impl StarlarkProfiler { + pub fn new( + profile_mode: ProfileMode, + will_freeze: bool, + target: ProfileTarget, + ) -> StarlarkProfiler { + Self { + profile_mode, + will_freeze, + initialized_at: None, + finalized_at: None, + profile_data: None, + total_retained_bytes: None, + target, + } + } + + /// Collect all profiling data. + pub fn finish(self) -> anyhow::Result { + Ok(StarlarkProfileDataAndStats { + initialized_at: self + .initialized_at + .internal_error_anyhow("did not initialize")?, + finalized_at: self + .finalized_at + .internal_error_anyhow("did not finalize")?, + total_retained_bytes: self + .total_retained_bytes + .internal_error_anyhow("did not visit heap")?, + profile_data: self + .profile_data + .internal_error_anyhow("profile_data not initialized")?, + targets: vec![self.target], + }) + } + + /// Prepare an Evaluator to capture output relevant to this profiler. + fn initialize(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { + eval.enable_profile(&self.profile_mode)?; + self.initialized_at = Some(Instant::now()); + Ok(()) + } + + /// Post-analysis, produce the output of this profiler. + fn evaluation_complete(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { + self.finalized_at = Some(Instant::now()); + if !self.profile_mode.requires_frozen_module() { + self.profile_data = Some(eval.gen_profile().into_anyhow_result()?); + } + Ok(()) + } + + fn visit_frozen_module(&mut self, module: Option<&FrozenModule>) -> anyhow::Result<()> { + if self.will_freeze != module.is_some() { + return Err(internal_error_anyhow!( + "will_freeze field was initialized incorrectly" + )); + } + + if self.profile_mode.requires_frozen_module() { + let module = module.ok_or(StarlarkProfilerError::RetainedMemoryNotFrozen)?; + let profile = module.heap_profile()?; + self.profile_data = Some(profile); + } + + let total_retained_bytes = module.map_or(0, |module| { + module + .frozen_heap() + .allocated_summary() + .total_allocated_bytes() + }); + + self.total_retained_bytes = Some(total_retained_bytes); + + Ok(()) + } +} + +enum StarlarkProfilerOptImpl<'p> { + None, + Profiler(&'p mut StarlarkProfiler), +} + +/// Modules can be evaluated with profiling or with instrumentation for profiling. +/// This type enapsulates this logic. +pub struct StarlarkProfilerOpt<'p>(StarlarkProfilerOptImpl<'p>); + +impl<'p> StarlarkProfilerOpt<'p> { + pub fn for_profiler(profiler: &'p mut StarlarkProfiler) -> Self { + StarlarkProfilerOpt(StarlarkProfilerOptImpl::Profiler(profiler)) + } + + /// No profiling. + pub fn disabled() -> StarlarkProfilerOpt<'p> { + StarlarkProfilerOpt(StarlarkProfilerOptImpl::None) + } + + pub fn initialize(&mut self, eval: &mut Evaluator) -> anyhow::Result { + match &mut self.0 { + StarlarkProfilerOptImpl::None => Ok(false), + StarlarkProfilerOptImpl::Profiler(profiler) => profiler.initialize(eval).map(|_| true), + } + } + + pub fn visit_frozen_module(&mut self, module: Option<&FrozenModule>) -> anyhow::Result<()> { + if let StarlarkProfilerOptImpl::Profiler(profiler) = &mut self.0 { + profiler.visit_frozen_module(module) + } else { + Ok(()) + } + } + + pub fn evaluation_complete(&mut self, eval: &mut Evaluator) -> anyhow::Result<()> { + if let StarlarkProfilerOptImpl::Profiler(profiler) = &mut self.0 { + profiler.evaluation_complete(eval) + } else { + Ok(()) + } + } +} + +pub enum StarlarkProfilerOptVal { + Disabled, + Profiler(StarlarkProfiler), +} + +impl StarlarkProfilerOptVal { + pub fn as_mut(&mut self) -> StarlarkProfilerOpt { + match self { + StarlarkProfilerOptVal::Disabled => StarlarkProfilerOpt::disabled(), + StarlarkProfilerOptVal::Profiler(profiler) => { + StarlarkProfilerOpt::for_profiler(profiler) + } + } + } + + pub fn finish(self) -> anyhow::Result> { + match self { + StarlarkProfilerOptVal::Disabled => Ok(None), + StarlarkProfilerOptVal::Profiler(profiler) => profiler.finish().map(Some), + } + } +} diff --git a/app/buck2_interpreter/src/starlark_promise.rs b/app/buck2_interpreter/src/starlark_promise.rs index eb70e0f1b8055..febc1ba1eb071 100644 --- a/app/buck2_interpreter/src/starlark_promise.rs +++ b/app/buck2_interpreter/src/starlark_promise.rs @@ -10,9 +10,11 @@ //! A type [`StarlarkPromise`] which provides basic promise-like functionality. use std::cell::Cell; use std::cell::RefCell; +use std::convert::Infallible; use std::mem; use allocative::Allocative; +use buck2_error::starlark_error::from_starlark; use derivative::Derivative; use derive_more::Display; use starlark::any::ProvidesStaticType; @@ -21,11 +23,14 @@ use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; +use starlark::starlark_module; use starlark::typing::Ty; use starlark::values::list::AllocList; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::typing::StarlarkCallable; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::NoSerialize; @@ -35,20 +40,18 @@ use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; use starlark::values::ValueTyped; -use thiserror::Error; /// A type that corresponds to a Rust promise. #[derive( ProvidesStaticType, NoSerialize, - StarlarkDocs, Display, Derivative, Trace, Allocative )] #[derivative(Debug)] -#[display(fmt = "promise()")] +#[display("promise()")] pub struct StarlarkPromise<'v> { /// The value of the promise. value: RefCell>, @@ -70,7 +73,10 @@ struct Validate<'v>(#[trace(unsafe_ignore)] fn(Value<'v>) -> anyhow::Result<()>) enum PromiseValue<'v> { Unresolved, Resolved(Value<'v>), - Map(ValueTyped<'v, StarlarkPromise<'v>>, Value<'v>), + Map( + ValueTyped<'v, StarlarkPromise<'v>>, + StarlarkCallable<'v, (Value<'v>,), Value<'v>>, + ), Join(PromiseJoin<'v>), } @@ -106,7 +112,7 @@ impl<'v> PromiseJoin<'v> { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum PromiseError { #[error("Can't .resolve on a promise produced with .map")] CantResolveMap, @@ -160,17 +166,18 @@ impl<'v> StarlarkPromise<'v> { } fn apply( - f: Value<'v>, + f: StarlarkCallable<'v, (Value<'v>,), Value<'v>>, x: Value<'v>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { - eval.eval_function(f, &[x], &[]) + eval.eval_function(f.0, &[x], &[]) + .map_err(|e| from_starlark(e).into()) } pub fn map( x: ValueTyped<'v, StarlarkPromise<'v>>, - f: Value<'v>, - eval: &mut Evaluator<'v, '_>, + f: StarlarkCallable<'v, (Value<'v>,), Value<'v>>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result>> { match x.get() { Some(x) => Ok(eval @@ -219,14 +226,14 @@ impl<'v> StarlarkPromise<'v> { /// Resolve a promise. Errors if the promise was produced by `.map` or the promise has /// already been resolved. - pub fn resolve(&self, x: Value<'v>, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + pub fn resolve(&self, x: Value<'v>, eval: &mut Evaluator<'v, '_, '_>) -> anyhow::Result<()> { if matches!(&*self.value.borrow(), PromiseValue::Map(..)) { return Err(PromiseError::CantResolveMap.into()); } self.resolve_rec(x, eval) } - fn resolve_rec(&self, x: Value<'v>, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn resolve_rec(&self, x: Value<'v>, eval: &mut Evaluator<'v, '_, '_>) -> anyhow::Result<()> { if matches!(&*self.value.borrow(), PromiseValue::Resolved(_)) { return Err(PromiseError::CantResolveTwice.into()); } @@ -276,14 +283,18 @@ impl<'v> AllocValue<'v> for StarlarkPromise<'v> { } impl<'v> StarlarkTypeRepr for &'v StarlarkPromise<'v> { + type Canonical = Self; + fn starlark_type_repr() -> Ty { StarlarkPromise::get_type_starlark_repr() } } impl<'v> UnpackValue<'v> for &'v StarlarkPromise<'v> { - fn unpack_value(x: Value<'v>) -> Option { - StarlarkPromise::from_value(x) + type Error = Infallible; + + fn unpack_value_impl(x: Value<'v>) -> Result, Self::Error> { + Ok(StarlarkPromise::from_value(x)) } } @@ -300,8 +311,8 @@ fn promise_methods(builder: &mut MethodsBuilder) { /// Given a promise, apply a function to the value it contains, producing a promise with the resulting value. fn map<'v>( this: ValueTyped<'v, StarlarkPromise<'v>>, - #[starlark(require = pos)] func: Value<'v>, - eval: &mut Evaluator<'v, '_>, + #[starlark(require = pos)] func: StarlarkCallable<'v, (Value<'v>,), Value<'v>>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result>> { StarlarkPromise::map(this, func, eval) } @@ -313,11 +324,11 @@ fn promise_methods(builder: &mut MethodsBuilder) { /// those from `p1`, `p2` and `p3` respectively. fn join<'v>( this: ValueTyped<'v, StarlarkPromise<'v>>, - #[starlark(args)] mut args: Vec>>, + #[starlark(args)] mut args: UnpackListOrTuple>>, heap: &'v Heap, ) -> anyhow::Result>> { - args.insert(0, this); - Ok(StarlarkPromise::join(args, heap)) + args.items.insert(0, this); + Ok(StarlarkPromise::join(args.items, heap)) } } @@ -328,18 +339,16 @@ pub fn register_promise(globals: &mut GlobalsBuilder) { #[cfg(test)] mod tests { - use std::cell::RefCell; use starlark::any::ProvidesStaticType; - use starlark::environment::GlobalsBuilder; use starlark::environment::Module; use starlark::syntax::AstModule; - use starlark::syntax::Dialect; use starlark::values::none::NoneType; use starlark::values::tuple::TupleRef; - use starlark::values::Value; + use starlark::StarlarkResultExt; use super::*; + use crate::file_type::StarlarkFileType; #[derive( ProvidesStaticType, @@ -350,7 +359,7 @@ mod tests { Display, Allocative )] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct Promises<'v>(RefCell>)>>); #[starlark_value(type = "promises")] @@ -360,7 +369,7 @@ mod tests { fn helpers(builder: &mut GlobalsBuilder) { fn promise_unresolved<'v>( name: String, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result>> { let promises = get_promises(eval.module()); let promise = eval.heap().alloc_typed(StarlarkPromise::new_unresolved()); @@ -400,9 +409,14 @@ mod tests { fn assert_promise<'v>(modu: &'v Module, content: &str) -> anyhow::Result> { alloc_promises(modu); let globals = GlobalsBuilder::standard().with(helpers).build(); - let ast = AstModule::parse("test.bzl", content.to_owned(), &Dialect::Extended)?; + let ast = AstModule::parse( + "test.bzl", + content.to_owned(), + &StarlarkFileType::Bzl.dialect(false), + ) + .into_anyhow_result()?; let mut eval = Evaluator::new(modu); - let res = eval.eval_module(ast, &globals)?; + let res = eval.eval_module(ast, &globals).into_anyhow_result()?; let promises = get_promises(modu); for (key, promise) in promises.0.borrow().iter() { promise.resolve(modu.heap().alloc(key), &mut eval)?; diff --git a/app/buck2_interpreter/src/types/mod.rs b/app/buck2_interpreter/src/types.rs similarity index 100% rename from app/buck2_interpreter/src/types/mod.rs rename to app/buck2_interpreter/src/types.rs diff --git a/app/buck2_interpreter/src/types/cell_path.rs b/app/buck2_interpreter/src/types/cell_path.rs index 6f6e70c771518..52ab46b33e9d7 100644 --- a/app/buck2_interpreter/src/types/cell_path.rs +++ b/app/buck2_interpreter/src/types/cell_path.rs @@ -7,38 +7,49 @@ * of this source tree. */ +use std::hash::Hash; + use allocative::Allocative; use buck2_core::cells::cell_path::CellPath; use derive_more::Display; use starlark::any::ProvidesStaticType; +use starlark::collections::StarlarkHasher; use starlark::environment::GlobalsBuilder; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; +use starlark::starlark_module; +use starlark::starlark_simple_value; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; +use starlark::values::Value; +use starlark::values::ValueLike; -#[derive( - Debug, - PartialEq, - Display, - ProvidesStaticType, - NoSerialize, - Allocative, - StarlarkDocs -)] +#[derive(Debug, PartialEq, Display, ProvidesStaticType, NoSerialize, Allocative)] pub struct StarlarkCellPath(pub CellPath); starlark_simple_value!(StarlarkCellPath); -#[starlark_value(type = "label_relative_path")] +#[starlark_value(type = "CellPath")] impl<'v> StarlarkValue<'v> for StarlarkCellPath { fn get_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); RES.methods(cell_path_methods) } + + fn equals(&self, other: Value<'v>) -> starlark::Result { + match other.downcast_ref::() { + None => Ok(false), + Some(v) => Ok(v.0 == self.0), + } + } + + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { + self.0.hash(hasher); + Ok(()) + } } #[starlark_module] diff --git a/app/buck2_interpreter/src/types/cell_root.rs b/app/buck2_interpreter/src/types/cell_root.rs index c0248fe738bd7..c833ba385e216 100644 --- a/app/buck2_interpreter/src/types/cell_root.rs +++ b/app/buck2_interpreter/src/types/cell_root.rs @@ -15,6 +15,8 @@ use buck2_core::cells::paths::CellRelativePathBuf; use derive_more::Display; use starlark::any::ProvidesStaticType; use starlark::environment::GlobalsBuilder; +use starlark::starlark_module; +use starlark::starlark_simple_value; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::NoSerialize; diff --git a/app/buck2_interpreter/src/types/configuration.rs b/app/buck2_interpreter/src/types/configuration.rs index 29185b35479db..b2aebd8f914e6 100644 --- a/app/buck2_interpreter/src/types/configuration.rs +++ b/app/buck2_interpreter/src/types/configuration.rs @@ -14,6 +14,8 @@ use starlark::any::ProvidesStaticType; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; +use starlark::starlark_module; +use starlark::starlark_simple_value; use starlark::values::starlark_value; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; diff --git a/app/buck2_interpreter/src/types/configured_providers_label.rs b/app/buck2_interpreter/src/types/configured_providers_label.rs index c9f9e7896de77..a0c49e935eb57 100644 --- a/app/buck2_interpreter/src/types/configured_providers_label.rs +++ b/app/buck2_interpreter/src/types/configured_providers_label.rs @@ -19,26 +19,26 @@ use dupe::Dupe; use serde::Serialize; use serde::Serializer; use starlark::any::ProvidesStaticType; -use starlark::coerce::Coerce; use starlark::collections::StarlarkHasher; -use starlark::docs::StarlarkDocs; use starlark::environment::GlobalsBuilder; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::typing::Ty; +use starlark::starlark_module; +use starlark::starlark_simple_value; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; -use starlark::values::AllocValue; use starlark::values::Freeze; use starlark::values::Heap; use starlark::values::StarlarkValue; +use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::Value; use crate::types::cell_path::StarlarkCellPath; use crate::types::cell_root::CellRoot; -use crate::types::project_root::ProjectRoot; +use crate::types::project_root::StarlarkProjectRoot; use crate::types::target_label::StarlarkConfiguredTargetLabel; use crate::types::target_label::StarlarkTargetLabel; @@ -49,18 +49,8 @@ impl StarlarkConfiguredProvidersLabel { } /// Container for `ConfiguredProvidersLabel` that gives users access to things like package, cell, etc. This can also be properly stringified by our forthcoming `CommandLine` object -#[derive( - Clone, - Debug, - Coerce, - Display, - Trace, - Freeze, - ProvidesStaticType, - StarlarkDocs, - Allocative -)] -#[display(fmt = "{}", label)] +#[derive(Clone, Debug, Display, Trace, Freeze, ProvidesStaticType, Allocative)] +#[display("{}", label)] #[repr(C)] pub struct StarlarkConfiguredProvidersLabel { #[freeze(identity)] @@ -98,21 +88,17 @@ where RES.methods(configured_label_methods) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { Ok(match StarlarkConfiguredProvidersLabel::from_value(other) { Some(other) => self.label == other.label, None => false, }) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.label.hash(hasher); Ok(()) } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } /// A label is used to represent a configured target. @@ -120,8 +106,11 @@ where fn configured_label_methods(builder: &mut MethodsBuilder) { /// For the label `fbcode//buck2/hello:world (ovr_config//platform/linux:x86_64-fbcode-46b26edb4b80a905)` this gives back `buck2/hello` #[starlark(attribute)] - fn package<'v>(this: &'v StarlarkConfiguredProvidersLabel) -> anyhow::Result<&'v str> { - Ok(this.label.target().pkg().cell_relative_path().as_str()) + fn package<'v>( + this: &'v StarlarkConfiguredProvidersLabel, + heap: &'v Heap, + ) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.label.target().pkg().cell_relative_path().as_str())) } /// For the label `fbcode//buck2/hello:world (ovr_config//platform/linux:x86_64-fbcode-46b26edb4b80a905)` this gives back `world` @@ -133,18 +122,20 @@ fn configured_label_methods(builder: &mut MethodsBuilder) { #[starlark(attribute)] fn sub_target<'v>( this: &'v StarlarkConfiguredProvidersLabel, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { Ok(match this.label.name() { - ProvidersName::Default => None, - ProvidersName::NonDefault(box NonDefaultProvidersName::Named(s)) => { - Some(s.iter().map(|p| p.as_str()).collect()) - } - ProvidersName::NonDefault(box NonDefaultProvidersName::UnrecognizedFlavor(_)) => { - unreachable!( - "This should have been an error when looking up the corresponding analysis (`{}`)", - this.label - ) - } + ProvidersName::Default => NoneOr::None, + ProvidersName::NonDefault(flavor) => match flavor.as_ref() { + NonDefaultProvidersName::Named(names) => { + NoneOr::Other(names.iter().map(|p| p.as_str()).collect()) + } + NonDefaultProvidersName::UnrecognizedFlavor(_) => { + unreachable!( + "This should have been an error when looking up the corresponding analysis (`{}`)", + this.label + ) + } + }, }) } @@ -170,8 +161,10 @@ fn configured_label_methods(builder: &mut MethodsBuilder) { /// Obtain a reference to the project's root. This can be used as if it were an artifact in /// places that expect one, such as `cmd_args().relative_to`. #[starlark(attribute)] - fn project_root<'v>(this: &StarlarkConfiguredProvidersLabel) -> anyhow::Result { - Ok(ProjectRoot::new()) + fn project_root<'v>( + this: &StarlarkConfiguredProvidersLabel, + ) -> anyhow::Result { + Ok(StarlarkProjectRoot) } /// For the label `fbcode//buck2/hello:world (ovr_config//platform/linux:x86_64-fbcode-46b26edb4b80a905)` this returns the unconfigured underlying target label (`fbcode//buck2/hello:world`) @@ -201,20 +194,18 @@ impl StarlarkProvidersLabel { #[derive( Clone, Debug, - Coerce, Display, Trace, Freeze, ProvidesStaticType, Serialize, - StarlarkDocs, Allocative )] -#[display(fmt = "{}", label)] +#[display("{}", label)] #[repr(C)] +#[serde(transparent)] pub struct StarlarkProvidersLabel { #[freeze(identity)] - #[serde(flatten)] label: ProvidersLabel, } @@ -236,7 +227,7 @@ where RES.methods(label_methods) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { if let Some(other) = StarlarkProvidersLabel::from_value(other) { Ok(self.label == other.label) } else { @@ -244,14 +235,10 @@ where } } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.label.hash(hasher); Ok(()) } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } #[starlark_module] @@ -262,25 +249,26 @@ fn label_methods(builder: &mut MethodsBuilder) { } #[starlark(attribute)] - fn sub_target<'v>(this: &'v StarlarkProvidersLabel) -> anyhow::Result>> { + fn sub_target<'v>(this: &'v StarlarkProvidersLabel) -> anyhow::Result>> { Ok(match this.label.name() { - ProvidersName::Default => None, - ProvidersName::NonDefault(box NonDefaultProvidersName::Named(s)) => { - Some(s.iter().map(|p| p.as_str()).collect()) - } - ProvidersName::NonDefault(box NonDefaultProvidersName::UnrecognizedFlavor(_)) => { - unreachable!( - "This should have been an error when looking up the corresponding analysis (`{}`)", - this.label - ) - } + ProvidersName::Default => NoneOr::None, + ProvidersName::NonDefault(flavor) => match flavor.as_ref() { + NonDefaultProvidersName::Named(names) => { + NoneOr::Other(names.iter().map(|p| p.as_str()).collect()) + } + NonDefaultProvidersName::UnrecognizedFlavor(_) => { + unreachable!( + "This should have been an error when looking up the corresponding analysis (`{}`)", + this.label + ) + } + }, }) } #[starlark(attribute)] - fn path<'v>(this: &StarlarkProvidersLabel, heap: &Heap) -> anyhow::Result> { - let path = StarlarkCellPath(this.label.target().pkg().to_cell_path()); - Ok(path.alloc_value(heap)) + fn path<'v>(this: &StarlarkProvidersLabel) -> anyhow::Result { + Ok(StarlarkCellPath(this.label.target().pkg().to_cell_path())) } #[starlark(attribute)] @@ -306,6 +294,74 @@ pub fn register_providers_label(globals: &mut GlobalsBuilder) { #[cfg(test)] mod tests { - // Tests live in `buck2_build_api` crate because tests depend on `Tester` type - // which depends on `buck2_build_api` heavily. + use buck2_core::configuration::data::ConfigurationData; + use buck2_core::provider::label::ConfiguredProvidersLabel; + use buck2_core::provider::label::NonDefaultProvidersName; + use buck2_core::provider::label::ProviderName; + use buck2_core::provider::label::ProvidersLabel; + use buck2_core::provider::label::ProvidersName; + use buck2_core::target::configured_target_label::ConfiguredTargetLabel; + use buck2_core::target::label::label::TargetLabel; + use buck2_util::arc_str::ArcSlice; + use starlark::assert::Assert; + use starlark::environment::GlobalsBuilder; + use starlark::starlark_module; + + use crate::types::configured_providers_label::StarlarkConfiguredProvidersLabel; + use crate::types::configured_providers_label::StarlarkProvidersLabel; + + #[starlark_module] + fn register_test_providers_label(globals: &mut GlobalsBuilder) { + fn configured_providers_label() -> anyhow::Result { + Ok(StarlarkConfiguredProvidersLabel { + label: ConfiguredProvidersLabel::new( + ConfiguredTargetLabel::testing_parse( + "foo//bar:baz", + ConfigurationData::testing_new(), + ), + ProvidersName::NonDefault(triomphe::Arc::new(NonDefaultProvidersName::Named( + ArcSlice::new([ + ProviderName::new("qux".to_owned())?, + ProviderName::new("quux".to_owned())?, + ]), + ))), + ), + }) + } + + fn providers_label() -> anyhow::Result { + Ok(StarlarkProvidersLabel { + label: ProvidersLabel::new( + TargetLabel::testing_parse("foo//bar:baz"), + ProvidersName::NonDefault(triomphe::Arc::new(NonDefaultProvidersName::Named( + ArcSlice::new([ + ProviderName::new("qux".to_owned())?, + ProviderName::new("quux".to_owned())?, + ]), + ))), + ), + }) + } + } + + #[test] + fn test_configured_providers_label_to_json() { + let mut a = Assert::new(); + a.globals_add(register_test_providers_label); + a.eq( + &"'\"foo//bar:baz[qux][quux] ()\"'" + .replace("", &ConfigurationData::testing_new().to_string()), + "json.encode(configured_providers_label())", + ); + } + + #[test] + fn test_providers_label_to_json() { + let mut a = Assert::new(); + a.globals_add(register_test_providers_label); + a.eq( + "'\"foo//bar:baz[qux][quux]\"'", + "json.encode(providers_label())", + ); + } } diff --git a/app/buck2_interpreter/src/types/opaque_metadata.rs b/app/buck2_interpreter/src/types/opaque_metadata.rs index 38132a8e97650..5f06745797fa5 100644 --- a/app/buck2_interpreter/src/types/opaque_metadata.rs +++ b/app/buck2_interpreter/src/types/opaque_metadata.rs @@ -11,11 +11,13 @@ use allocative::Allocative; use derive_more::Display; use dupe::Dupe; use starlark::any::ProvidesStaticType; -use starlark::docs::StarlarkDocs; -use starlark::starlark_simple_value; use starlark::values::starlark_value; +use starlark::values::AllocStaticSimple; +use starlark::values::AllocValue; +use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; +use starlark::values::Value; /// We do not make metadata available to rules, so instead we expose this opaque value when trying /// to resolve it to a Starlark object. @@ -26,14 +28,19 @@ use starlark::values::StarlarkValue; PartialEq, ProvidesStaticType, Allocative, - StarlarkDocs, NoSerialize, Display )] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub struct OpaqueMetadata; -starlark_simple_value!(OpaqueMetadata); - -#[starlark_value(type = "opaque_metadata")] +#[starlark_value(type = "OpaqueMetadata")] impl<'v> StarlarkValue<'v> for OpaqueMetadata {} + +impl<'v> AllocValue<'v> for OpaqueMetadata { + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + static INSTANCE: AllocStaticSimple = + AllocStaticSimple::alloc(OpaqueMetadata); + INSTANCE.to_frozen_value().to_value() + } +} diff --git a/app/buck2_interpreter/src/types/project_root.rs b/app/buck2_interpreter/src/types/project_root.rs index f260dff7d6eee..980f282b23c02 100644 --- a/app/buck2_interpreter/src/types/project_root.rs +++ b/app/buck2_interpreter/src/types/project_root.rs @@ -10,20 +10,45 @@ use allocative::Allocative; use derive_more::Display; use starlark::any::ProvidesStaticType; +use starlark::environment::GlobalsBuilder; +use starlark::starlark_module; use starlark::values::starlark_value; +use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::AllocFrozenValue; +use starlark::values::AllocStaticSimple; +use starlark::values::AllocValue; +use starlark::values::FrozenHeap; +use starlark::values::FrozenValue; +use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; +use starlark::values::Value; #[derive(Debug, PartialEq, Display, ProvidesStaticType, NoSerialize, Allocative)] -pub struct ProjectRoot; +pub struct StarlarkProjectRoot; -impl ProjectRoot { - pub fn new() -> Self { - Self +#[starlark_value(type = "project_root", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for StarlarkProjectRoot {} + +fn instance() -> FrozenValue { + static INSTANCE: AllocStaticSimple = + AllocStaticSimple::alloc(StarlarkProjectRoot); + INSTANCE.to_frozen_value() +} + +impl<'v> AllocValue<'v> for StarlarkProjectRoot { + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + instance().to_value() } } -starlark_simple_value!(ProjectRoot); +impl AllocFrozenValue for StarlarkProjectRoot { + fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { + instance() + } +} -#[starlark_value(type = "project_root")] -impl<'v> StarlarkValue<'v> for ProjectRoot {} +#[starlark_module] +pub fn register_project_root(globals: &mut GlobalsBuilder) { + const ProjectRoot: StarlarkValueAsType = StarlarkValueAsType::new(); +} diff --git a/app/buck2_interpreter/src/types/provider/mod.rs b/app/buck2_interpreter/src/types/provider.rs similarity index 100% rename from app/buck2_interpreter/src/types/provider/mod.rs rename to app/buck2_interpreter/src/types/provider.rs diff --git a/app/buck2_interpreter/src/types/provider/callable.rs b/app/buck2_interpreter/src/types/provider/callable.rs index 7009c81304bd5..90e1b2e096719 100644 --- a/app/buck2_interpreter/src/types/provider/callable.rs +++ b/app/buck2_interpreter/src/types/provider/callable.rs @@ -10,26 +10,11 @@ use std::sync::Arc; use buck2_core::provider::id::ProviderId; -use dupe::Dupe; use starlark::any::ProvidesStaticType; use starlark::values::ValueLike; -#[derive(Debug, thiserror::Error)] -enum ProviderCallableError { - #[error("provider callable did not have a bound id; this is an internal error")] - ProviderCallableMissingID, -} - pub trait ProviderCallableLike { - fn id(&self) -> Option<&Arc>; - - /// Frozen callables should always have this set. It's an error if somehow it doesn't. - fn require_id(&self) -> anyhow::Result> { - match self.id() { - Some(id) => Ok(id.dupe()), - None => Err(ProviderCallableError::ProviderCallableMissingID.into()), - } - } + fn id(&self) -> anyhow::Result<&Arc>; } unsafe impl<'v> ProvidesStaticType<'v> for &'v dyn ProviderCallableLike { diff --git a/app/buck2_interpreter/src/types/regex.rs b/app/buck2_interpreter/src/types/regex.rs index 700ab61061b85..c2ba00da49344 100644 --- a/app/buck2_interpreter/src/types/regex.rs +++ b/app/buck2_interpreter/src/types/regex.rs @@ -18,13 +18,15 @@ use starlark::environment::GlobalsBuilder; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; +use starlark::starlark_module; +use starlark::starlark_simple_value; use starlark::typing::Ty; use starlark::values::starlark_value; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; /// Wrapper for `regex::Regex`. -#[derive(ProvidesStaticType, Debug, NoSerialize, StarlarkDocs, Allocative)] +#[derive(ProvidesStaticType, Debug, NoSerialize, Allocative)] pub enum BuckStarlarkRegex { // TODO(nga): do not skip. // And this is important because regex can have a lot of cache. diff --git a/app/buck2_interpreter/src/types/rule.rs b/app/buck2_interpreter/src/types/rule.rs index 329a616cea341..8353e46bf9ce1 100644 --- a/app/buck2_interpreter/src/types/rule.rs +++ b/app/buck2_interpreter/src/types/rule.rs @@ -8,8 +8,19 @@ */ use buck2_util::late_binding::LateBinding; +use starlark::values::list::ListType; +use starlark::values::typing::FrozenStarlarkCallable; +use starlark::values::FrozenStringValue; use starlark::values::FrozenValue; +use starlark_map::small_map::SmallMap; /// `rule()` value `impl` field. -pub static FROZEN_RULE_GET_IMPL: LateBinding anyhow::Result> = - LateBinding::new("FROZEN_RULE_GET_IMPL"); +pub static FROZEN_RULE_GET_IMPL: LateBinding< + fn( + FrozenValue, + ) -> anyhow::Result>>, +> = LateBinding::new("FROZEN_RULE_GET_IMPL"); + +pub static FROZEN_PROMISE_ARTIFACT_MAPPINGS_GET_IMPL: LateBinding< + fn(FrozenValue) -> anyhow::Result>, +> = LateBinding::new("FROZEN_PROMISE_ARTIFACT_MAPPINGS_GET_IMPL"); diff --git a/app/buck2_interpreter/src/types/target_label.rs b/app/buck2_interpreter/src/types/target_label.rs index 1c5f00f620245..cf3f30d924c3c 100644 --- a/app/buck2_interpreter/src/types/target_label.rs +++ b/app/buck2_interpreter/src/types/target_label.rs @@ -17,30 +17,31 @@ use buck2_core::provider::label::ProviderName; use buck2_core::provider::label::ProvidersLabel; use buck2_core::provider::label::ProvidersName; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use derive_more::Display; use derive_more::From; use dupe::Dupe; use serde::Serialize; use starlark::any::ProvidesStaticType; use starlark::collections::StarlarkHasher; -use starlark::docs::StarlarkDocs; use starlark::environment::GlobalsBuilder; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; -use starlark::typing::Ty; -use starlark::values::list::AllocList; -use starlark::values::list::ListRef; +use starlark::starlark_module; +use starlark::starlark_simple_value; +use starlark::values::list::UnpackList; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Heap; use starlark::values::StarlarkValue; +use starlark::values::StringValue; +use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueError; use starlark::values::ValueLike; -use crate::starlark::values::AllocValue; use crate::types::cell_path::StarlarkCellPath; use crate::types::configuration::StarlarkConfiguration; use crate::types::configured_providers_label::StarlarkConfiguredProvidersLabel; @@ -57,7 +58,6 @@ use crate::types::configured_providers_label::StarlarkProvidersLabel; From, ProvidesStaticType, Serialize, - StarlarkDocs, Allocative )] #[serde(transparent)] @@ -84,12 +84,12 @@ impl<'v> StarlarkValue<'v> for StarlarkTargetLabel { RES.methods(label_methods) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.hash(hasher); Ok(()) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { if let Some(other) = other.downcast_ref::() { Ok(self.label == other.label) } else { @@ -97,24 +97,20 @@ impl<'v> StarlarkValue<'v> for StarlarkTargetLabel { } } - fn compare(&self, other: Value<'v>) -> anyhow::Result { + fn compare(&self, other: Value<'v>) -> starlark::Result { if let Some(other) = other.downcast_ref::() { Ok(self.label.cmp(&other.label)) } else { ValueError::unsupported_with(self, "compare", other) } } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } #[starlark_module] fn label_methods(builder: &mut MethodsBuilder) { #[starlark(attribute)] - fn package<'v>(this: &StarlarkTargetLabel) -> anyhow::Result<&'v str> { - Ok(this.label.pkg().cell_relative_path().as_str()) + fn package<'v>(this: &StarlarkTargetLabel, heap: &'v Heap) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.label.pkg().cell_relative_path().as_str())) } #[starlark(attribute)] @@ -127,6 +123,11 @@ fn label_methods(builder: &mut MethodsBuilder) { Ok(this.label.pkg().cell_name().as_str()) } + #[starlark(attribute)] + fn path<'v>(this: &StarlarkTargetLabel) -> anyhow::Result { + Ok(StarlarkCellPath(this.label.pkg().to_cell_path())) + } + /// Converts a `TargetLabel` into its corresponding `ProvidersLabel` given the subtarget names, /// which is a list for each layer of subtarget /// @@ -142,7 +143,9 @@ fn label_methods(builder: &mut MethodsBuilder) { /// ``` fn with_sub_target<'v>( this: &StarlarkTargetLabel, - #[starlark(default = AllocList::EMPTY)] subtarget_name: Value<'v>, + // TODO(nga): must be either positional or named. + #[starlark(default = SubtargetNameArg::List(UnpackList { items: Vec::new() }))] + subtarget_name: SubtargetNameArg<'v>, ) -> anyhow::Result { let providers_name = value_to_providers_name(subtarget_name)?; @@ -164,7 +167,6 @@ fn label_methods(builder: &mut MethodsBuilder) { From, ProvidesStaticType, Serialize, - StarlarkDocs, Allocative )] #[serde(transparent)] @@ -191,12 +193,12 @@ impl<'v> StarlarkValue<'v> for StarlarkConfiguredTargetLabel { RES.methods(configured_label_methods) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> starlark::Result<()> { self.hash(hasher); Ok(()) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> starlark::Result { if let Some(other) = other.downcast_ref::() { Ok(self.label == other.label) } else { @@ -204,24 +206,23 @@ impl<'v> StarlarkValue<'v> for StarlarkConfiguredTargetLabel { } } - fn compare(&self, other: Value<'v>) -> anyhow::Result { + fn compare(&self, other: Value<'v>) -> starlark::Result { if let Some(other) = other.downcast_ref::() { Ok(self.label.cmp(&other.label)) } else { ValueError::unsupported_with(self, "compare", other) } } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } } #[starlark_module] fn configured_label_methods(builder: &mut MethodsBuilder) { #[starlark(attribute)] - fn package<'v>(this: &StarlarkConfiguredTargetLabel) -> anyhow::Result<&'v str> { - Ok(this.label.pkg().cell_relative_path().as_str()) + fn package<'v>( + this: &StarlarkConfiguredTargetLabel, + heap: &'v Heap, + ) -> anyhow::Result> { + Ok(heap.alloc_str_intern(this.label.pkg().cell_relative_path().as_str())) } #[starlark(attribute)] @@ -235,9 +236,8 @@ fn configured_label_methods(builder: &mut MethodsBuilder) { } #[starlark(attribute)] - fn path<'v>(this: &StarlarkConfiguredTargetLabel, heap: &Heap) -> anyhow::Result> { - let path = StarlarkCellPath(this.label.pkg().to_cell_path()); - Ok(path.alloc_value(heap)) + fn path<'v>(this: &StarlarkConfiguredTargetLabel) -> anyhow::Result { + Ok(StarlarkCellPath(this.label.pkg().to_cell_path())) } fn config<'v>(this: &StarlarkConfiguredTargetLabel) -> anyhow::Result { @@ -266,7 +266,9 @@ fn configured_label_methods(builder: &mut MethodsBuilder) { /// ``` fn with_sub_target<'v>( this: &'v StarlarkConfiguredTargetLabel, - #[starlark(default = AllocList::EMPTY)] subtarget_name: Value<'v>, + // TODO(nga): must be either positional or named. + #[starlark(default = SubtargetNameArg::List(UnpackList { items: Vec::new() }))] + subtarget_name: SubtargetNameArg<'v>, ) -> anyhow::Result { let providers_name = value_to_providers_name(subtarget_name)?; @@ -276,42 +278,33 @@ fn configured_label_methods(builder: &mut MethodsBuilder) { } } -pub fn value_to_providers_name<'v>(subtarget_name: Value<'v>) -> anyhow::Result { - let subtarget = if let Some(list) = ListRef::from_value(subtarget_name) { - list.iter() +#[derive(StarlarkTypeRepr, UnpackValue)] +enum SubtargetNameArg<'v> { + List(UnpackList), + Str(&'v str), +} + +fn value_to_providers_name(subtarget_name: SubtargetNameArg) -> anyhow::Result { + let subtarget = match subtarget_name { + SubtargetNameArg::List(list) => list + .items + .into_iter() .map(|name| { - name.unpack_str() - .ok_or_else(|| { - anyhow::anyhow!(ValueError::IncorrectParameterTypeNamedWithExpected( - "subtarget_name".to_owned(), - "list of str or str".to_owned(), - name.get_type().to_owned(), - )) - }) - .and_then(|name| { - ProviderName::new(name.to_owned()) - .context("for parameter `subtarget_name`") - .map_err(|e| anyhow::anyhow!(e)) - }) + ProviderName::new(name) + .context("for parameter `subtarget_name`") + .map_err(|e| anyhow::anyhow!(e)) }) - .collect::>>()? - } else if let Some(str) = subtarget_name.unpack_str() { - vec![ProviderName::new(str.to_owned()).context("for parameter `subtarget_name`")?] - } else { - return Err(anyhow::anyhow!( - ValueError::IncorrectParameterTypeNamedWithExpected( - "subtarget_name".to_owned(), - "list of str or str".to_owned(), - subtarget_name.get_type().to_owned() - ) - )); + .collect::>>()?, + SubtargetNameArg::Str(str) => { + vec![ProviderName::new(str.to_owned()).context("for parameter `subtarget_name`")?] + } }; Ok(if subtarget.is_empty() { ProvidersName::Default } else { - ProvidersName::NonDefault(Box::new(NonDefaultProvidersName::Named( - subtarget.into_boxed_slice(), + ProvidersName::NonDefault(triomphe::Arc::new(NonDefaultProvidersName::Named( + buck2_util::arc_str::ArcSlice::from_iter(subtarget), ))) }) } diff --git a/app/buck2_interpreter/src/types/transition.rs b/app/buck2_interpreter/src/types/transition.rs index e80e4db6ff0c3..6cba2dc0ff0ad 100644 --- a/app/buck2_interpreter/src/types/transition.rs +++ b/app/buck2_interpreter/src/types/transition.rs @@ -13,7 +13,7 @@ use buck2_core::configuration::transition::id::TransitionId; use starlark::any::ProvidesStaticType; use starlark::values::Value; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum TransitionError { #[error("cfg parameter is not a transition object: {}", _0)] WrongType(String), diff --git a/app/buck2_interpreter_for_build/BUCK b/app/buck2_interpreter_for_build/BUCK index 7410d17838e4b..5049c69279c84 100644 --- a/app/buck2_interpreter_for_build/BUCK +++ b/app/buck2_interpreter_for_build/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -29,23 +28,25 @@ rust_library( "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:sha2", "fbsource//third-party/rust:smallvec", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tracing", "fbsource//third-party/rust:twox-hash", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_event_observer:buck2_event_observer", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_query:buck2_query", "//buck2/app/buck2_query_parser:buck2_query_parser", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", + "//buck2/gazebo/cmp_any:cmp_any", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_map:starlark_map", ], diff --git a/app/buck2_interpreter_for_build/Cargo.toml b/app/buck2_interpreter_for_build/Cargo.toml index 33a365c1efae0..fbed3f4b4a6a2 100644 --- a/app/buck2_interpreter_for_build/Cargo.toml +++ b/app/buck2_interpreter_for_build/Cargo.toml @@ -1,13 +1,16 @@ [package] +description = "Interpreter setup to BUCK files" +edition = "2021" +license = { workspace = true } name = "buck2_interpreter_for_build" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Interpreter setup to BUCK files" [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } bumpalo = { workspace = true } +cmp_any = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } either = { workspace = true } @@ -20,27 +23,28 @@ indoc = { workspace = true } itertools = { workspace = true } maplit = { workspace = true } once_cell = { workspace = true } -thiserror = { workspace = true } -tracing = { workspace = true } -twox-hash = { workspace = true } serde_json = { workspace = true } sha2 = { workspace = true } smallvec = { workspace = true } +tracing = { workspace = true } +twox-hash = { workspace = true } allocative = { workspace = true } dice = { workspace = true } -gazebo = { workspace = true } -more_futures = { workspace = true } dupe = { workspace = true } +gazebo = { workspace = true } starlark = { workspace = true } starlark_map = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } +buck2_event_observer = { workspace = true } buck2_events = { workspace = true } -buck2_node = { workspace = true } +buck2_futures = { workspace = true } buck2_interpreter = { workspace = true } +buck2_node = { workspace = true } buck2_query = { workspace = true } buck2_query_parser = { workspace = true } buck2_util = { workspace = true } diff --git a/app/buck2_interpreter_for_build/src/attrs.rs b/app/buck2_interpreter_for_build/src/attrs.rs new file mode 100644 index 0000000000000..001937ab5b28e --- /dev/null +++ b/app/buck2_interpreter_for_build/src/attrs.rs @@ -0,0 +1,89 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use anyhow::Context; +use buck2_node::attrs::attr::Attribute; +use buck2_node::attrs::attr::CoercedValue; +use buck2_node::attrs::coercion_context::AttrCoercionContext; +use buck2_node::attrs::configurable::AttrIsConfigurable; +use starlark::docs::DocString; +use starlark::docs::DocStringKind; +use starlark::values::Value; + +use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; +use crate::attrs::coerce::attr_type::AttrTypeExt; +use crate::attrs::coerce::error::CoercionError; + +pub(crate) mod attrs_global; +pub mod coerce; +pub(crate) mod starlark_attribute; + +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] +enum AttrCoerceError { + #[error("Parameter `{0}` had no value provided, but it is mandatory")] + MissingMandatoryParameter(String), +} + +pub trait AttributeCoerceExt { + fn coerce<'v>( + &self, + param_name: &str, + configurable: AttrIsConfigurable, + coercer_ctx: &dyn AttrCoercionContext, + value: Value<'v>, + ) -> anyhow::Result; + + fn docstring(&self) -> Option; + + fn starlark_type(&self) -> TyMaybeSelect; +} + +impl AttributeCoerceExt for Attribute { + /// Attempt to coerce a value. If the value provided is `None`, and a default value is available, + /// that default value is returned. + fn coerce<'v>( + &self, + param_name: &str, + configurable: AttrIsConfigurable, + coercer_ctx: &dyn AttrCoercionContext, + value: Value<'v>, + ) -> anyhow::Result { + if self.is_default_only() { + if value.is_none() { + return Ok(CoercedValue::Default); + } else { + return Err(CoercionError::DefaultOnly(value.to_string()).into()); + } + } + + match self.default() { + default if !value.is_none() => self + .coercer() + .coerce_with_default(configurable, coercer_ctx, value, default.map(|x| &**x)) + .map(CoercedValue::Custom) + .with_context(|| { + format!( + "Error coercing attribute `{}` of type `{}`", + param_name, self + ) + }), + Some(_) => Ok(CoercedValue::Default), + None => Err(AttrCoerceError::MissingMandatoryParameter(param_name.to_owned()).into()), + } + } + + fn docstring(&self) -> Option { + DocString::from_docstring(DocStringKind::Starlark, self.doc()) + } + + fn starlark_type(&self) -> TyMaybeSelect { + self.coercer().starlark_type() + } +} diff --git a/app/buck2_interpreter_for_build/src/attrs/attrs_global.rs b/app/buck2_interpreter_for_build/src/attrs/attrs_global.rs index 97c8b7776be03..47513fe0713fa 100644 --- a/app/buck2_interpreter_for_build/src/attrs/attrs_global.rs +++ b/app/buck2_interpreter_for_build/src/attrs/attrs_global.rs @@ -13,7 +13,8 @@ use allocative::Allocative; use anyhow::Context as _; use buck2_core::plugins::PluginKindSet; use buck2_core::provider::label::ProvidersLabel; -use buck2_interpreter::coerce::COERCE_TARGET_LABEL; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; +use buck2_interpreter::coerce::COERCE_TARGET_LABEL_FOR_BZL; use buck2_interpreter::types::provider::callable::ValueAsProviderCallableLike; use buck2_interpreter::types::transition::transition_id_from_value; use buck2_node::attrs::attr::Attribute; @@ -34,14 +35,17 @@ use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::starlark_value; +use starlark::values::tuple::UnpackTuple; use starlark::values::NoSerialize; use starlark::values::ProvidesStaticType; use starlark::values::StarlarkValue; +use starlark::values::StringValue; use starlark::values::Value; use starlark::values::ValueError; -use starlark::StarlarkDocs; -use thiserror::Error; +use starlark::values::ValueOf; +use starlark::values::ValueTypedComplex; use tracing::error; use crate::attrs::coerce::attr_type::AttrTypeExt; @@ -49,12 +53,14 @@ use crate::attrs::coerce::ctx::BuildAttrCoercionContext; use crate::attrs::starlark_attribute::register_attr_type; use crate::attrs::starlark_attribute::StarlarkAttribute; use crate::interpreter::build_context::BuildContext; -use crate::plugins::plugin_kind_from_value; +use crate::interpreter::selector::StarlarkSelector; use crate::plugins::AllPlugins; +use crate::plugins::PluginKindArg; const OPTION_NONE_EXPLANATION: &str = "`None` as an attribute value always picks the default. For `attrs.option`, if the default isn't `None`, there is no way to express `None`."; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] enum AttrError { #[error( "`attrs.option` `default` parameter must be `None` or absent, got `{0}`.\n{}", @@ -68,7 +74,7 @@ enum AttrError { pub(crate) trait AttributeExt { /// Helper to create an attribute from attrs.foo functions fn attr<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, default: Option>, doc: &str, coercer: AttrType, @@ -81,7 +87,7 @@ pub(crate) trait AttributeExt { impl AttributeExt for Attribute { /// Helper to create an attribute from attrs.foo functions fn attr<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, default: Option>, doc: &str, coercer: AttrType, @@ -92,7 +98,7 @@ impl AttributeExt for Attribute { coercer .coerce( AttrIsConfigurable::Yes, - &get_attr_coercion_context(eval)?, + &attr_coercion_context_for_bzl(eval)?, x, ) .context("Error coercing attribute default")?, @@ -118,25 +124,26 @@ impl AttributeExt for Attribute { } } -/// Grab a new coercion context object based on the main build file that is being evaluated. -/// This is used because we do not have access to a specific shared instance via ctx.extra -/// when evaluating .bzl files -pub(crate) fn get_attr_coercion_context<'v>( - eval: &Evaluator<'v, '_>, +/// Coerction context for evaluating bzl files (attr default, transition rules). +pub(crate) fn attr_coercion_context_for_bzl<'v>( + eval: &Evaluator<'v, '_, '_>, ) -> anyhow::Result { let build_context = BuildContext::from_context(eval)?; Ok(BuildAttrCoercionContext::new_no_package( build_context.cell_info().cell_resolver().dupe(), build_context.cell_info().name().name(), + build_context.cell_info().cell_alias_resolver().dupe(), + // It is OK to not deduplicate because we don't coerce a lot of labels in bzl files. + Arc::new(ConcurrentTargetLabelInterner::default()), )) } -pub(crate) fn init_coerce_target_label() { - COERCE_TARGET_LABEL - .init(|eval, value| get_attr_coercion_context(eval)?.coerce_target_label(value)) +pub(crate) fn init_coerce_target_label_for_bzl() { + COERCE_TARGET_LABEL_FOR_BZL + .init(|eval, value| attr_coercion_context_for_bzl(eval)?.coerce_target_label(value)) } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum DepError { #[error( "relative labels ('{invalid_label}') are not permitted as default values for `{attr}` \ @@ -149,7 +156,7 @@ enum DepError { fn dep_like_attr_handle_providers_arg(providers: Vec) -> anyhow::Result { Ok(ProviderIdSet::from(providers.try_map( |v| match v.as_provider_callable() { - Some(callable) => callable.require_id(), + Some(callable) => anyhow::Ok(callable.id()?.dupe()), None => Err(ValueError::IncorrectParameterTypeNamed("providers".to_owned()).into()), }, )?)) @@ -181,7 +188,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = named)] default: Option>, #[starlark(require = named)] validate: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let _unused = validate; Attribute::attr(eval, default, doc, AttrType::string()) @@ -193,7 +200,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = pos)] inner: &StarlarkAttribute, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let coercer = AttrType::list(inner.coercer_for_inner()?); Attribute::attr(eval, default, doc, coercer) @@ -204,13 +211,14 @@ fn attr_module(registry: &mut MethodsBuilder) { /// plan to execute things from this dependency as part of the compilation. fn exec_dep<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = named, default = Vec::new())] providers: Vec>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + providers: UnpackListOrTuple>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::check_not_relative_label(default, "attrs.exec_dep")?; - let required_providers = dep_like_attr_handle_providers_arg(providers)?; + let required_providers = dep_like_attr_handle_providers_arg(providers.items)?; let coercer = AttrType::exec_dep(required_providers); Attribute::attr(eval, default, doc, coercer) } @@ -220,27 +228,29 @@ fn attr_module(registry: &mut MethodsBuilder) { /// dependencies will be used to select the execution platform for this rule. fn toolchain_dep<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = named, default = Vec::new())] providers: Vec>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + providers: UnpackListOrTuple>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::check_not_relative_label(default, "attrs.toolchain_dep")?; - let required_providers = dep_like_attr_handle_providers_arg(providers)?; + let required_providers = dep_like_attr_handle_providers_arg(providers.items)?; let coercer = AttrType::toolchain_dep(required_providers); Attribute::attr(eval, default, doc, coercer) } fn transition_dep<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = named, default = Vec::new())] providers: Vec>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + providers: UnpackListOrTuple>, #[starlark(require = named)] cfg: Value<'v>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::check_not_relative_label(default, "attrs.transition_dep")?; - let required_providers = dep_like_attr_handle_providers_arg(providers)?; + let required_providers = dep_like_attr_handle_providers_arg(providers.items)?; let transition_id = transition_id_from_value(cfg)?; let coercer = AttrType::transition_dep(required_providers, transition_id); @@ -249,7 +259,7 @@ fn attr_module(registry: &mut MethodsBuilder) { Some(default) => { match coercer.coerce( AttrIsConfigurable::Yes, - &get_attr_coercion_context(eval)?, + &attr_coercion_context_for_bzl(eval)?, default, ) { Ok(coerced_default) => Some(coerced_default), @@ -267,27 +277,29 @@ fn attr_module(registry: &mut MethodsBuilder) { fn configured_dep<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = named, default = Vec::new())] providers: Vec>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + providers: UnpackListOrTuple>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::check_not_relative_label(default, "attrs.configured_dep")?; - let required_providers = dep_like_attr_handle_providers_arg(providers)?; + let required_providers = dep_like_attr_handle_providers_arg(providers.items)?; let coercer = AttrType::configured_dep(required_providers); Attribute::attr(eval, default, doc, coercer) } fn split_transition_dep<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = named, default = Vec::new())] providers: Vec>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + providers: UnpackListOrTuple>, #[starlark(require = named)] cfg: Value<'v>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::check_not_relative_label(default, "attrs.split_transition_dep")?; - let required_providers = dep_like_attr_handle_providers_arg(providers)?; + let required_providers = dep_like_attr_handle_providers_arg(providers.items)?; let transition_id = transition_id_from_value(cfg)?; let coercer = AttrType::split_transition_dep(required_providers, transition_id); @@ -296,7 +308,7 @@ fn attr_module(registry: &mut MethodsBuilder) { Some(default) => { match coercer.coerce( AttrIsConfigurable::Yes, - &get_attr_coercion_context(eval)?, + &attr_coercion_context_for_bzl(eval)?, default, ) { Ok(coerced_default) => Some(coerced_default), @@ -314,13 +326,12 @@ fn attr_module(registry: &mut MethodsBuilder) { fn plugin_dep<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = named)] kind: Value<'v>, + #[starlark(require = named)] kind: PluginKindArg, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { - let kind = plugin_kind_from_value(kind)?; - Attribute::attr(eval, default, doc, AttrType::plugin_dep(kind)) + Attribute::attr(eval, default, doc, AttrType::plugin_dep(kind.plugin_kind)) } /// Takes a target from the user, as a string, and supplies a dependency to the rule. @@ -331,27 +342,31 @@ fn attr_module(registry: &mut MethodsBuilder) { /// on the dependency. fn dep<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = named, default = Vec::new())] providers: Vec>, - #[starlark(require = named, default = Vec::new())] pulls_plugins: Vec>, - #[starlark(require = named, default = Either::Left(Vec::new()))] - pulls_and_pushes_plugins: Either>, &'v AllPlugins>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + providers: UnpackListOrTuple>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + pulls_plugins: UnpackListOrTuple, + #[starlark(require = named, default = Either::Left(UnpackListOrTuple::default()))] + pulls_and_pushes_plugins: Either, &'v AllPlugins>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::check_not_relative_label(default, "attrs.dep")?; - let required_providers = dep_like_attr_handle_providers_arg(providers)?; + let required_providers = dep_like_attr_handle_providers_arg(providers.items)?; let plugin_kinds = match pulls_and_pushes_plugins { Either::Right(_) => PluginKindSet::ALL, Either::Left(pulls_and_pushes_plugins) => { let pulls_and_pushes_plugins: Vec<_> = pulls_and_pushes_plugins + .items .into_iter() - .map(plugin_kind_from_value) - .collect::>()?; + .map(|PluginKindArg { plugin_kind }| plugin_kind) + .collect(); let pulls_plugins: Vec<_> = pulls_plugins + .items .into_iter() - .map(plugin_kind_from_value) - .collect::>()?; + .map(|PluginKindArg { plugin_kind }| plugin_kind) + .collect(); PluginKindSet::new(pulls_plugins, pulls_and_pushes_plugins)? } }; @@ -366,7 +381,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(this)] _this: Value<'v>, #[starlark(require = named, default = "")] doc: &str, #[starlark(require = named)] default: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::attr(eval, default, doc, AttrType::any()) } @@ -376,7 +391,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(this)] _this: Value<'v>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::attr(eval, default, doc, AttrType::bool()) } @@ -392,7 +407,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = pos)] inner: &StarlarkAttribute, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let coercer = AttrType::option(inner.coercer_for_inner()?); let attr = Attribute::attr(eval, default, doc, coercer)?; @@ -432,7 +447,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(this)] _this: Value<'v>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::attr(eval, default, doc, AttrType::label()) } @@ -446,7 +461,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = named, default = false)] sorted: bool, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let coercer = AttrType::dict(key.coercer_for_inner()?, value.coercer_for_inner()?, sorted); Attribute::attr(eval, default, doc, coercer) @@ -463,7 +478,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, #[starlark(require = named, default = false)] anon_target_compatible: bool, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let _unused = json; Attribute::attr(eval, default, doc, AttrType::arg(anon_target_compatible)) @@ -473,21 +488,28 @@ fn attr_module(registry: &mut MethodsBuilder) { /// Strings are matched case-insensitively, and always passed to the rule lowercase. fn r#enum<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(require = pos)] variants: Vec, - #[starlark(require = named)] default: Option>, + #[starlark(require = pos)] variants: UnpackListOrTuple, + #[starlark(require = named)] default: Option< + ValueOf<'v, Either, ValueTypedComplex<'v, StarlarkSelector<'v>>>>, + >, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { // Value seems to usually be a `[String]`, listing the possible values of the // enumeration. Unfortunately, for things like `exported_lang_preprocessor_flags` // it ends up being `Type` which doesn't match the data we see. - Attribute::attr(eval, default, doc, AttrType::enumeration(variants)?) + Attribute::attr( + eval, + default.map(|v| v.value), + doc, + AttrType::enumeration(variants.items)?, + ) } fn configuration_label<'v>( #[starlark(this)] _this: Value<'v>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { // TODO(nga): explain how this is different from `dep`. // This probably meant to be similar to `label`, but not configurable. @@ -504,7 +526,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(this)] _this: Value<'v>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::attr(eval, default, doc, AttrType::string()) } @@ -515,7 +537,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = named, default = false)] sorted: bool, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let _unused = sorted; let coercer = AttrType::list(value_type.coercer_for_inner()?); @@ -528,7 +550,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = named, default = false)] sorted: bool, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let value_coercer = value_type.coercer_for_inner()?; let coercer = AttrType::one_of(vec![ @@ -541,24 +563,24 @@ fn attr_module(registry: &mut MethodsBuilder) { /// Given a list of alternative attributes, selects the first that matches and gives that to the rule. fn one_of<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(args)] args: Vec<&StarlarkAttribute>, + #[starlark(args)] args: UnpackTuple<&StarlarkAttribute>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { - let coercer = AttrType::one_of(args.into_try_map(|arg| arg.coercer_for_inner())?); + let coercer = AttrType::one_of(args.items.into_try_map(|arg| arg.coercer_for_inner())?); Attribute::attr(eval, default, doc, coercer) } /// Takes a tuple of values and gives a tuple to the rule. fn tuple<'v>( #[starlark(this)] _this: Value<'v>, - #[starlark(args)] args: Vec<&StarlarkAttribute>, + #[starlark(args)] args: UnpackTuple<&StarlarkAttribute>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { - let coercer = AttrType::tuple(args.into_try_map(|arg| arg.coercer_for_inner())?); + let coercer = AttrType::tuple(args.items.into_try_map(|arg| arg.coercer_for_inner())?); Attribute::attr(eval, default, doc, coercer) } @@ -567,7 +589,7 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(this)] _this: Value<'v>, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::attr(eval, default, doc, AttrType::int()) } @@ -575,7 +597,7 @@ fn attr_module(registry: &mut MethodsBuilder) { fn query<'v>( #[starlark(this)] _this: Value<'v>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::attr(eval, None, doc, AttrType::query()) } @@ -609,22 +631,15 @@ fn attr_module(registry: &mut MethodsBuilder) { #[starlark(require = named, default = false)] allow_directory: bool, #[starlark(require = named)] default: Option>, #[starlark(require = named, default = "")] doc: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { Attribute::check_not_relative_label(default, "attrs.source")?; Attribute::attr(eval, default, doc, AttrType::source(allow_directory)) } } -#[derive( - Display, - Debug, - StarlarkDocs, - Allocative, - ProvidesStaticType, - NoSerialize -)] -#[display(fmt = "")] +#[derive(Display, Debug, Allocative, ProvidesStaticType, NoSerialize)] +#[display("")] struct Attrs; #[starlark_value(type = "attrs")] @@ -635,35 +650,7 @@ impl<'v> StarlarkValue<'v> for Attrs { } } -pub fn register_attrs(globals: &mut GlobalsBuilder) { +pub(crate) fn register_attrs(globals: &mut GlobalsBuilder) { globals.set("attrs", globals.frozen_heap().alloc_simple(Attrs)); register_attr_type(globals); } - -#[cfg(test)] -mod test { - use super::*; - use crate::interpreter::testing::Tester; - - #[test] - fn test_attr_display() -> anyhow::Result<()> { - let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_attrs); - tester.run_starlark_bzl_test(r#" -def assert_eq(a, b): - if a != b: - fail(a + " != " + b) - -assert_eq(repr(attrs.bool(default = True)), "attrs.bool(default=True)") -assert_eq(repr(attrs.string()), "attrs.string()") -assert_eq(repr(attrs.list(attrs.string())), "attrs.list(attrs.string())") -assert_eq(repr(attrs.dict(attrs.string(), attrs.string())), "attrs.dict(attrs.string(), attrs.string(), sorted=False)") -assert_eq(repr(attrs.one_of(attrs.string())), "attrs.one_of(attrs.string())") -assert_eq(repr(attrs.tuple(attrs.string())), "attrs.tuple(attrs.string())") -assert_eq(repr(attrs.option(attrs.string())), "attrs.option(attrs.string())") - -def test(): pass -"#)?; - Ok(()) - } -} diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/mod.rs b/app/buck2_interpreter_for_build/src/attrs/coerce.rs similarity index 100% rename from app/buck2_interpreter_for_build/src/attrs/coerce/mod.rs rename to app/buck2_interpreter_for_build/src/attrs/coerce.rs diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type.rs new file mode 100644 index 0000000000000..ae58b8dfcd11e --- /dev/null +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type.rs @@ -0,0 +1,161 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_node::attrs::attr_type::AttrType; +use buck2_node::attrs::attr_type::AttrTypeInner; +use buck2_node::attrs::coerced_attr::CoercedAttr; +use buck2_node::attrs::coercion_context::AttrCoercionContext; +use buck2_node::attrs::configurable::AttrIsConfigurable; +use starlark::values::Value; + +use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; +use crate::attrs::coerce::coerced_attr::CoercedAttrExr; +use crate::attrs::coerce::AttrTypeCoerce; + +pub mod any; +pub mod arg; +pub mod bool; +pub mod configuration_dep; +pub mod dep; +mod dict; +mod enumeration; +pub mod int; +pub mod label; +mod list; +mod metadata; +mod one_of; +mod option; +pub mod plugin_dep; +pub mod query; +pub mod source; +pub mod split_transition_dep; +mod string; +mod target_modifiers; +mod tuple; +pub(crate) mod ty_maybe_select; +mod visibility; +mod within_view; + +pub trait AttrTypeExt { + fn this(&self) -> &AttrType; + + fn coerce_item( + &self, + configurable: AttrIsConfigurable, + ctx: &dyn AttrCoercionContext, + value: Value, + ) -> anyhow::Result { + self.this().0.inner.coerce_item(configurable, ctx, value) + } + + fn coerce( + &self, + configurable: AttrIsConfigurable, + ctx: &dyn AttrCoercionContext, + value: Value, + ) -> anyhow::Result { + self.coerce_with_default(configurable, ctx, value, None) + } + + fn coerce_with_default( + &self, + configurable: AttrIsConfigurable, + ctx: &dyn AttrCoercionContext, + value: Value, + default: Option<&CoercedAttr>, + ) -> anyhow::Result { + CoercedAttr::coerce(self.this(), configurable, ctx, value, default) + } + + fn starlark_type(&self) -> TyMaybeSelect { + self.this().0.inner.starlark_type() + } +} + +impl AttrTypeExt for AttrType { + fn this(&self) -> &AttrType { + self + } +} + +pub trait AttrTypeInnerExt { + fn coerce_item( + &self, + configurable: AttrIsConfigurable, + ctx: &dyn AttrCoercionContext, + value: Value, + ) -> anyhow::Result; + + fn starlark_type(&self) -> TyMaybeSelect; +} + +impl AttrTypeInnerExt for AttrTypeInner { + fn coerce_item( + &self, + configurable: AttrIsConfigurable, + ctx: &dyn AttrCoercionContext, + value: Value, + ) -> anyhow::Result { + match self { + Self::Any(x) => x.coerce_item(configurable, ctx, value), + Self::Arg(x) => x.coerce_item(configurable, ctx, value), + Self::Bool(x) => x.coerce_item(configurable, ctx, value), + Self::Int(x) => x.coerce_item(configurable, ctx, value), + Self::Dep(x) => x.coerce_item(configurable, ctx, value), + Self::Dict(x) => x.coerce_item(configurable, ctx, value), + Self::List(x) => x.coerce_item(configurable, ctx, value), + Self::Tuple(x) => x.coerce_item(configurable, ctx, value), + Self::OneOf(x) => x.coerce_item(configurable, ctx, value), + Self::Option(x) => x.coerce_item(configurable, ctx, value), + Self::Source(x) => x.coerce_item(configurable, ctx, value), + Self::String(x) => x.coerce_item(configurable, ctx, value), + Self::Query(x) => x.coerce_item(configurable, ctx, value), + Self::ConfigurationDep(x) => x.coerce_item(configurable, ctx, value), + Self::ConfiguredDep(x) => x.coerce_item(configurable, ctx, value), + Self::PluginDep(x) => x.coerce_item(configurable, ctx, value), + Self::Enum(x) => x.coerce_item(configurable, ctx, value), + Self::SplitTransitionDep(x) => x.coerce_item(configurable, ctx, value), + Self::Label(x) => x.coerce_item(configurable, ctx, value), + Self::Visibility(x) => x.coerce_item(configurable, ctx, value), + Self::WithinView(x) => x.coerce_item(configurable, ctx, value), + Self::Metadata(x) => x.coerce_item(configurable, ctx, value), + Self::TargetModifiers(x) => x.coerce_item(configurable, ctx, value), + } + } + + /// Returns a starlark-compatible typing string, e.g. `[str.type]` for values coerced by this + /// attr. + fn starlark_type(&self) -> TyMaybeSelect { + match self { + AttrTypeInner::Any(x) => x.starlark_type(), + AttrTypeInner::Arg(x) => x.starlark_type(), + AttrTypeInner::ConfigurationDep(x) => x.starlark_type(), + AttrTypeInner::ConfiguredDep(x) => x.starlark_type(), + AttrTypeInner::Bool(x) => x.starlark_type(), + AttrTypeInner::Int(x) => x.starlark_type(), + AttrTypeInner::Dep(x) => x.starlark_type(), + AttrTypeInner::Dict(x) => x.starlark_type(), + AttrTypeInner::Enum(x) => x.starlark_type(), + AttrTypeInner::List(x) => x.starlark_type(), + AttrTypeInner::Tuple(x) => x.starlark_type(), + AttrTypeInner::OneOf(x) => x.starlark_type(), + AttrTypeInner::Option(x) => x.starlark_type(), + AttrTypeInner::Query(x) => x.starlark_type(), + AttrTypeInner::PluginDep(x) => x.starlark_type(), + AttrTypeInner::Source(x) => x.starlark_type(), + AttrTypeInner::String(x) => x.starlark_type(), + AttrTypeInner::SplitTransitionDep(x) => x.starlark_type(), + AttrTypeInner::Label(x) => x.starlark_type(), + AttrTypeInner::Visibility(x) => x.starlark_type(), + AttrTypeInner::WithinView(x) => x.starlark_type(), + AttrTypeInner::Metadata(x) => x.starlark_type(), + AttrTypeInner::TargetModifiers(x) => x.starlark_type(), + } + } +} diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/any.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/any.rs index 5c0d7dcac0bd2..0b51dcad7add2 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/any.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/any.rs @@ -22,11 +22,12 @@ use starlark::values::list::ListRef; use starlark::values::tuple::TupleRef; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::StarlarkResultExt; use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; use crate::attrs::coerce::AttrTypeCoerce; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum AnyError { #[error("Cannot coerce value of type `{0}` to any: `{1}`")] CannotCoerce(&'static str, String), @@ -37,7 +38,7 @@ fn to_literal(value: Value, ctx: &dyn AttrCoercionContext) -> anyhow::Result> = Lazy::new(|| hashset!["classpath_abi", "maven_coords", "output", "query_paths",]); -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum MacroError { #[error("Expected a single target label argument. Got `[{}]`", (.0).join(", "))] ExpectedSingleTargetArgument(Vec), + #[error("Expected a single path argument. Got `{}`", (.0).join(", "))] + ExpectedSinglePathArgument(Vec), #[error("Incorrect number of args to macro `{0}` (had {1} args)")] InvalidNumberOfArgs(String, usize), } @@ -91,6 +92,7 @@ impl AttrTypeCoerce for ArgAttrType { } "exe" => UnconfiguredMacro::new_exe(ctx, args, true)?, "exe_target" => UnconfiguredMacro::new_exe(ctx, args, false)?, + "source" => UnconfiguredMacro::new_source(ctx, args)?, "query_outputs" | "query_targets" | "query_targets_and_outputs" => { UnconfiguredMacro::new_query(ctx, ¯o_type, args)? } @@ -153,6 +155,20 @@ pub trait UnconfiguredMacroExt { }) } + fn new_source( + ctx: &dyn AttrCoercionContext, + args: Vec, + ) -> anyhow::Result { + if args.len() != 1 { + return Err(anyhow::anyhow!(MacroError::ExpectedSinglePathArgument( + args + ))); + } + + ctx.coerce_path(&args[0], /* allow_directory */ true) + .map(UnconfiguredMacro::Source) + } + fn new_user_keyed_placeholder( ctx: &dyn AttrCoercionContext, var_name: String, @@ -220,14 +236,15 @@ impl UnconfiguredMacroExt for UnconfiguredMacro {} #[cfg(test)] mod tests { use buck2_core::configuration::data::ConfigurationData; - use buck2_core::target::label::TargetLabel; + use buck2_core::package::PackageLabel; + use buck2_core::target::label::label::TargetLabel; use buck2_node::attrs::attr_type::AttrType; use buck2_node::attrs::coerced_deps_collector::CoercedDepsCollector; - use buck2_node::attrs::configurable::AttrIsConfigurable; use buck2_node::attrs::configuration_context::AttrConfigurationContext; - use buck2_node::attrs::configured_info::ConfiguredAttrInfo; + use buck2_node::attrs::configured_attr_info_for_tests::ConfiguredAttrInfoForTests; use buck2_node::attrs::display::AttrDisplayWithContextExt; use buck2_node::attrs::testing::configuration_ctx; + use dupe::Dupe; use gazebo::prelude::SliceExt; use starlark::environment::GlobalsBuilder; use starlark::environment::Module; @@ -248,7 +265,7 @@ mod tests { type DepsType = TargetLabel; fn get_deps(&self) -> anyhow::Result> { let mut visitor = CoercedDepsCollector::new(); - self.traverse(&mut visitor)?; + self.traverse(&mut visitor, PackageLabel::testing_new("root", ""))?; let CoercedDepsCollector { deps, exec_deps, .. } = visitor; @@ -272,7 +289,7 @@ mod tests { assert_eq!( format!( r#""$(exe root//:foo ({})) $(location root//:bar ({}))""#, - configuration_ctx().exec_cfg(), + configuration_ctx().exec_cfg()?, ConfigurationData::testing_new(), ), configured.as_display_no_ctx().to_string(), @@ -291,9 +308,9 @@ mod tests { let configured = location.configure(&configuration_ctx())?; if let MacroBase::Location(target) = &configured { - let mut info = ConfiguredAttrInfo::new(); - configured.traverse(&mut info)?; - assert_eq!(smallset![target.clone()], info.deps); + let mut info = ConfiguredAttrInfoForTests::new(); + configured.traverse(&mut info, PackageLabel::testing_new("root", ""))?; + assert_eq!(smallset![target.dupe()], info.deps); } else { return Err(anyhow::anyhow!("Expected Location")); } @@ -313,10 +330,10 @@ mod tests { let configured = exe.configure(&config_ctx)?; if let MacroBase::Exe { label, .. } = &configured { - let mut info = ConfiguredAttrInfo::new(); - configured.traverse(&mut info)?; - assert_eq!(label.cfg(), config_ctx.exec_cfg().cfg()); - assert_eq!(smallset![label.clone()], info.execution_deps); + let mut info = ConfiguredAttrInfoForTests::new(); + configured.traverse(&mut info, PackageLabel::testing_new("root", ""))?; + assert_eq!(label.cfg(), config_ctx.exec_cfg()?.cfg()); + assert_eq!(smallset![label.dupe()], info.execution_deps); assert_eq!(smallset![], info.deps); } else { return Err(anyhow::anyhow!("Expected Exe")); @@ -337,11 +354,11 @@ mod tests { let configured = exe.configure(&config_ctx)?; if let MacroBase::Exe { label, .. } = &configured { - let mut info = ConfiguredAttrInfo::new(); - configured.traverse(&mut info)?; + let mut info = ConfiguredAttrInfoForTests::new(); + configured.traverse(&mut info, PackageLabel::testing_new("root", ""))?; assert_eq!(label.cfg(), config_ctx.cfg().cfg()); assert_eq!(smallset![], info.execution_deps); - assert_eq!(smallset![label.clone()], info.deps); + assert_eq!(smallset![label.dupe()], info.deps); } else { return Err(anyhow::anyhow!("Expected Exe")); } diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/configuration_dep.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/configuration_dep.rs index 31baca90388e5..232f52e5a12b1 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/configuration_dep.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/configuration_dep.rs @@ -11,6 +11,7 @@ use buck2_node::attrs::attr_type::configuration_dep::ConfigurationDepAttrType; use buck2_node::attrs::coerced_attr::CoercedAttr; use buck2_node::attrs::coercion_context::AttrCoercionContext; use buck2_node::attrs::configurable::AttrIsConfigurable; +use buck2_node::configuration::resolved::ConfigurationSettingKey; use starlark::typing::Ty; use starlark::values::string::STRING_TYPE; use starlark::values::Value; @@ -31,7 +32,7 @@ impl AttrTypeCoerce for ConfigurationDepAttrType { .ok_or_else(|| anyhow::anyhow!(CoercionError::type_error(STRING_TYPE, value)))?; ctx.coerce_target_label(label) - .map(CoercedAttr::ConfigurationDep) + .map(|t| CoercedAttr::ConfigurationDep(ConfigurationSettingKey(t))) } fn starlark_type(&self) -> TyMaybeSelect { diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/dep.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/dep.rs index 195686eadb9f6..2d09e6030a704 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/dep.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/dep.rs @@ -18,6 +18,7 @@ use starlark::typing::Ty; use starlark::values::string::STRING_TYPE; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::StarlarkResultExt; use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; use crate::attrs::coerce::error::CoercionError; @@ -52,6 +53,7 @@ impl AttrTypeCoerce for ExplicitConfiguredDepAttrType { value: Value, ) -> anyhow::Result { let (label_value, platform_value): (Value, Value) = UnpackValue::unpack_value(value) + .into_anyhow_result()? .ok_or_else(|| { anyhow::anyhow!(CoercionError::type_error( "Tuple must be a pair of two strings", diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/int.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/int.rs index 46ce53cdbd185..03971f8a7eb33 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/int.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/int.rs @@ -14,6 +14,7 @@ use buck2_node::attrs::configurable::AttrIsConfigurable; use starlark::typing::Ty; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::StarlarkResultExt; use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; use crate::attrs::coerce::error::CoercionError; @@ -26,7 +27,7 @@ impl AttrTypeCoerce for IntAttrType { _ctx: &dyn AttrCoercionContext, value: Value, ) -> anyhow::Result { - match i64::unpack_value(value) { + match i64::unpack_value(value).into_anyhow_result()? { Some(x) => Ok(CoercedAttr::Int(x)), None => Err(anyhow::anyhow!(CoercionError::type_error("int", value))), } diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/metadata.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/metadata.rs index 829e18375487f..ad427c51e4569 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/metadata.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/metadata.rs @@ -8,6 +8,7 @@ */ use anyhow::Context as _; +use buck2_error::internal_error_anyhow; use buck2_interpreter::types::opaque_metadata::OpaqueMetadata; use buck2_node::attrs::attr_type::metadata::MetadataAttrType; use buck2_node::attrs::coerced_attr::CoercedAttr; @@ -16,21 +17,20 @@ use buck2_node::attrs::configurable::AttrIsConfigurable; use buck2_node::metadata::key::MetadataKey; use buck2_node::metadata::key::MetadataKeyRef; use buck2_node::metadata::map::MetadataMap; +use buck2_node::metadata::value::MetadataValue; use starlark::values::dict::Dict; use starlark::values::dict::DictRef; use starlark::values::string::STRING_TYPE; use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::Value; -use starlark_map::ordered_map::OrderedMap; +use starlark_map::small_map::SmallMap; use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; use crate::attrs::coerce::error::CoercionError; use crate::attrs::coerce::AttrTypeCoerce; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum MetadataAttrTypeCoerceError { - #[error("Metadata attribute is not configurable (internal error)")] - AttrTypeNotConfigurable, #[error( "Metadata attribute with key {} is not convertible to JSON: {}", .key, @@ -47,7 +47,9 @@ impl AttrTypeCoerce for MetadataAttrType { value: Value, ) -> anyhow::Result { if configurable == AttrIsConfigurable::Yes { - return Err(MetadataAttrTypeCoerceError::AttrTypeNotConfigurable.into()); + return Err(internal_error_anyhow!( + "Metadata attribute is not configurable" + )); } let dict = match DictRef::from_value(value) { @@ -55,7 +57,7 @@ impl AttrTypeCoerce for MetadataAttrType { None => return Err(CoercionError::type_error(Dict::TYPE, value).into()), }; - let mut map = OrderedMap::with_capacity(dict.len()); + let mut map = SmallMap::with_capacity(dict.len()); for (key, value) in dict.iter() { let key = match key.unpack_str() { Some(k) => k, @@ -71,7 +73,7 @@ impl AttrTypeCoerce for MetadataAttrType { } })?; - map.insert(key.to_owned(), value); + map.insert(key.to_owned(), MetadataValue::new(value)); } Ok(CoercedAttr::Metadata(MetadataMap::new(map))) diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/mod.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/mod.rs deleted file mode 100644 index 303d2a1156b8a..0000000000000 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/mod.rs +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_node::attrs::attr_type::AttrType; -use buck2_node::attrs::attr_type::AttrTypeInner; -use buck2_node::attrs::coerced_attr::CoercedAttr; -use buck2_node::attrs::coercion_context::AttrCoercionContext; -use buck2_node::attrs::configurable::AttrIsConfigurable; -use starlark::values::Value; - -use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; -use crate::attrs::coerce::coerced_attr::CoercedAttrExr; -use crate::attrs::coerce::AttrTypeCoerce; - -pub mod any; -pub mod arg; -pub mod bool; -pub mod configuration_dep; -pub mod dep; -mod dict; -mod enumeration; -pub mod int; -pub mod label; -mod list; -mod metadata; -mod one_of; -mod option; -pub mod plugin_dep; -pub mod query; -pub mod source; -pub mod split_transition_dep; -mod string; -mod tuple; -pub(crate) mod ty_maybe_select; -mod visibility; -mod within_view; - -pub trait AttrTypeExt { - fn this(&self) -> &AttrType; - - fn coerce_item( - &self, - configurable: AttrIsConfigurable, - ctx: &dyn AttrCoercionContext, - value: Value, - ) -> anyhow::Result { - self.this().0.coerce_item(configurable, ctx, value) - } - - fn coerce( - &self, - configurable: AttrIsConfigurable, - ctx: &dyn AttrCoercionContext, - value: Value, - ) -> anyhow::Result { - self.coerce_with_default(configurable, ctx, value, None) - } - - fn coerce_with_default( - &self, - configurable: AttrIsConfigurable, - ctx: &dyn AttrCoercionContext, - value: Value, - default: Option<&CoercedAttr>, - ) -> anyhow::Result { - CoercedAttr::coerce(self.this(), configurable, ctx, value, default) - } - - fn starlark_type(&self) -> TyMaybeSelect { - self.this().0.starlark_type() - } -} - -impl AttrTypeExt for AttrType { - fn this(&self) -> &AttrType { - self - } -} - -pub trait AttrTypeInnerExt { - fn coerce_item( - &self, - configurable: AttrIsConfigurable, - ctx: &dyn AttrCoercionContext, - value: Value, - ) -> anyhow::Result; - - fn starlark_type(&self) -> TyMaybeSelect; -} - -impl AttrTypeInnerExt for AttrTypeInner { - fn coerce_item( - &self, - configurable: AttrIsConfigurable, - ctx: &dyn AttrCoercionContext, - value: Value, - ) -> anyhow::Result { - match self { - Self::Any(x) => x.coerce_item(configurable, ctx, value), - Self::Arg(x) => x.coerce_item(configurable, ctx, value), - Self::Bool(x) => x.coerce_item(configurable, ctx, value), - Self::Int(x) => x.coerce_item(configurable, ctx, value), - Self::Dep(x) => x.coerce_item(configurable, ctx, value), - Self::Dict(x) => x.coerce_item(configurable, ctx, value), - Self::List(x) => x.coerce_item(configurable, ctx, value), - Self::Tuple(x) => x.coerce_item(configurable, ctx, value), - Self::OneOf(x) => x.coerce_item(configurable, ctx, value), - Self::Option(x) => x.coerce_item(configurable, ctx, value), - Self::Source(x) => x.coerce_item(configurable, ctx, value), - Self::String(x) => x.coerce_item(configurable, ctx, value), - Self::Query(x) => x.coerce_item(configurable, ctx, value), - Self::ConfigurationDep(x) => x.coerce_item(configurable, ctx, value), - Self::ConfiguredDep(x) => x.coerce_item(configurable, ctx, value), - Self::PluginDep(x) => x.coerce_item(configurable, ctx, value), - Self::Enum(x) => x.coerce_item(configurable, ctx, value), - Self::SplitTransitionDep(x) => x.coerce_item(configurable, ctx, value), - Self::Label(x) => x.coerce_item(configurable, ctx, value), - Self::Visibility(x) => x.coerce_item(configurable, ctx, value), - Self::WithinView(x) => x.coerce_item(configurable, ctx, value), - Self::Metadata(x) => x.coerce_item(configurable, ctx, value), - } - } - - /// Returns a starlark-compatible typing string, e.g. `[str.type]` for values coerced by this - /// attr. - fn starlark_type(&self) -> TyMaybeSelect { - match self { - AttrTypeInner::Any(x) => x.starlark_type(), - AttrTypeInner::Arg(x) => x.starlark_type(), - AttrTypeInner::ConfigurationDep(x) => x.starlark_type(), - AttrTypeInner::ConfiguredDep(x) => x.starlark_type(), - AttrTypeInner::Bool(x) => x.starlark_type(), - AttrTypeInner::Int(x) => x.starlark_type(), - AttrTypeInner::Dep(x) => x.starlark_type(), - AttrTypeInner::Dict(x) => x.starlark_type(), - AttrTypeInner::Enum(x) => x.starlark_type(), - AttrTypeInner::List(x) => x.starlark_type(), - AttrTypeInner::Tuple(x) => x.starlark_type(), - AttrTypeInner::OneOf(x) => x.starlark_type(), - AttrTypeInner::Option(x) => x.starlark_type(), - AttrTypeInner::Query(x) => x.starlark_type(), - AttrTypeInner::PluginDep(x) => x.starlark_type(), - AttrTypeInner::Source(x) => x.starlark_type(), - AttrTypeInner::String(x) => x.starlark_type(), - AttrTypeInner::SplitTransitionDep(x) => x.starlark_type(), - AttrTypeInner::Label(x) => x.starlark_type(), - AttrTypeInner::Visibility(x) => x.starlark_type(), - AttrTypeInner::WithinView(x) => x.starlark_type(), - AttrTypeInner::Metadata(x) => x.starlark_type(), - } - } -} diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/source.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/source.rs index b0a7610ad55f6..c24215102a848 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/source.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/source.rs @@ -20,7 +20,8 @@ use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; use crate::attrs::coerce::error::CoercionError; use crate::attrs::coerce::AttrTypeCoerce; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum SourceLabelCoercionError { #[error( "Couldn't coerce `{0}` as a source.\n Error when treated as a target: {1:#}\n Error when treated as a path: {2:#}" @@ -45,6 +46,12 @@ impl AttrTypeCoerce for SourceAttrType { let source_label = value .unpack_str() .ok_or_else(|| anyhow::anyhow!(CoercionError::type_error(STRING_TYPE, value)))?; + // FIXME(JakobDegen): We should not be recovering from an `Err` here. Two reasons: + // 1. This codepath is at least one of the reasons that running buck with `RUST_BACKTRACE=1` + // is slow, since producing an anyhow error is quite expensive. + // 2. For source attrs, we should have simpler rules for whether a string is interpreted as + // a label or as a path than whether or not this errors. This can error for all kinds of + // reasons match ctx.coerce_providers_label(source_label) { Ok(label) => Ok(CoercedAttr::SourceLabel(label)), Err(label_err) => { diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/target_modifiers.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/target_modifiers.rs new file mode 100644 index 0000000000000..b26407a84cf05 --- /dev/null +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/target_modifiers.rs @@ -0,0 +1,59 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use anyhow::Context as _; +use buck2_error::internal_error_anyhow; +use buck2_interpreter::types::opaque_metadata::OpaqueMetadata; +use buck2_node::attrs::attr_type::target_modifiers::TargetModifiersAttrType; +use buck2_node::attrs::coerced_attr::CoercedAttr; +use buck2_node::attrs::coercion_context::AttrCoercionContext; +use buck2_node::attrs::configurable::AttrIsConfigurable; +use buck2_node::attrs::values::TargetModifiersValue; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::Value; + +use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; +use crate::attrs::coerce::AttrTypeCoerce; + +#[derive(Debug, buck2_error::Error)] +enum TargetModifiersAttrTypeCoerceError { + #[error( + "Target modifiers attribute is not convertible to JSON: {}", + .value + )] + ValueIsNotJson { value: String }, +} + +impl AttrTypeCoerce for TargetModifiersAttrType { + fn coerce_item( + &self, + configurable: AttrIsConfigurable, + _ctx: &dyn AttrCoercionContext, + value: Value, + ) -> anyhow::Result { + if configurable == AttrIsConfigurable::Yes { + return Err(internal_error_anyhow!( + "modifiers attribute is not configurable" + )); + } + let value = value.to_json_value().with_context(|| { + TargetModifiersAttrTypeCoerceError::ValueIsNotJson { + value: value.to_repr(), + } + })?; + + Ok(CoercedAttr::TargetModifiers(TargetModifiersValue::new( + value, + ))) + } + + fn starlark_type(&self) -> TyMaybeSelect { + TyMaybeSelect::Basic(OpaqueMetadata::starlark_type_repr()) + } +} diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/ty_maybe_select.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/ty_maybe_select.rs index 2128abce41628..100d43b885adf 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/ty_maybe_select.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/ty_maybe_select.rs @@ -34,7 +34,6 @@ impl TyMaybeSelect { } } - #[allow(dead_code)] // TODO(nga): use. pub(crate) fn to_ty_with_select(&self) -> Ty { fn with_select(ty: Ty) -> Ty { Ty::union2(ty, StarlarkSelectorGen::::starlark_type_repr()) diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/visibility.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/visibility.rs index 53057a93a198f..cdd5f63ea14e5 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/visibility.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/visibility.rs @@ -22,7 +22,7 @@ use crate::attrs::coerce::attr_type::AttrTypeExt; use crate::attrs::coerce::AttrTypeCoerce; use crate::interpreter::selector::StarlarkSelector; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum VisibilityAttrTypeCoerceError { #[error("Visibility attribute is not configurable (internal error)")] AttrTypeNotConfigurable, diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/within_view.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/within_view.rs index fc2ed70c857b5..445d81f2781c6 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/within_view.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/attr_type/within_view.rs @@ -7,6 +7,7 @@ * of this source tree. */ +use buck2_error::internal_error_anyhow; use buck2_node::attrs::attr_type::visibility::VisibilityAttrType; use buck2_node::attrs::attr_type::within_view::WithinViewAttrType; use buck2_node::attrs::coerced_attr::CoercedAttr; @@ -18,12 +19,6 @@ use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; use crate::attrs::coerce::attr_type::visibility::parse_visibility_with_view; use crate::attrs::coerce::AttrTypeCoerce; -#[derive(Debug, thiserror::Error)] -enum WithinViewAttrTypeCoerceError { - #[error("Within view attribute is not configurable (internal error)")] - AttrTypeNotConfigurable, -} - impl AttrTypeCoerce for WithinViewAttrType { fn coerce_item( &self, @@ -32,7 +27,9 @@ impl AttrTypeCoerce for WithinViewAttrType { value: Value, ) -> anyhow::Result { if configurable == AttrIsConfigurable::Yes { - return Err(WithinViewAttrTypeCoerceError::AttrTypeNotConfigurable.into()); + return Err(internal_error_anyhow!( + "Within view attribute is not configurable" + )); } Ok(CoercedAttr::WithinView( parse_visibility_with_view(ctx, value)?.build_within_view(), diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/coerced_attr.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/coerced_attr.rs index 5665cdc5cd72e..0a1c3a92102aa 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/coerced_attr.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/coerced_attr.rs @@ -10,20 +10,22 @@ //! Contains the internal support within the attribute framework for `select()`. use anyhow::Context; +use buck2_error::internal_error_anyhow; use buck2_node::attrs::attr_type::AttrType; use buck2_node::attrs::coerced_attr::CoercedAttr; use buck2_node::attrs::coerced_attr::CoercedSelector; use buck2_node::attrs::coercion_context::AttrCoercionContext; use buck2_node::attrs::configurable::AttrIsConfigurable; +use buck2_node::configuration::resolved::ConfigurationSettingKey; use starlark::values::dict::DictRef; use starlark::values::Value; -use thiserror::Error; use crate::attrs::coerce::attr_type::AttrTypeExt; use crate::interpreter::selector::StarlarkSelector; use crate::interpreter::selector::StarlarkSelectorGen; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] enum SelectError { #[error("select() condition was not a string, got `{0}`.")] KeyNotString(String), @@ -31,16 +33,14 @@ enum SelectError { ValueNotDict(String), #[error("addition not supported for this attribute type `{0}`, got `{1}`.")] ConcatNotSupported(String, String), - #[error("select() cannot be used in non-configuable attribute")] + #[error("select() cannot be used in non-configurable attribute")] SelectCannotBeUsedForNonConfigurableAttr, - #[error("duplicate `\"DEFAULT\"` key in `select()` (internal error)")] - DuplicateDefaultKey, } pub trait CoercedAttrExr: Sized { fn coerce( attr: &AttrType, - configuable: AttrIsConfigurable, + configurable: AttrIsConfigurable, ctx: &dyn AttrCoercionContext, value: Value, default_attr: Option<&Self>, @@ -50,7 +50,7 @@ pub trait CoercedAttrExr: Sized { impl CoercedAttrExr for CoercedAttr { fn coerce( attr: &AttrType, - configuable: AttrIsConfigurable, + configurable: AttrIsConfigurable, ctx: &dyn AttrCoercionContext, value: Value, default_attr: Option<&Self>, @@ -67,12 +67,12 @@ impl CoercedAttrExr for CoercedAttr { // are actually compatible (i.e. selectable can ensure that both sides are // lists, we can ensure that both sides are List) if let Some(selector) = StarlarkSelector::from_value(value) { - if let AttrIsConfigurable::No = configuable { + if let AttrIsConfigurable::No = configurable { return Err(SelectError::SelectCannotBeUsedForNonConfigurableAttr.into()); } match *selector { - StarlarkSelectorGen::Inner(v) => { + StarlarkSelectorGen::Primary(v) => { if let Some(dict) = DictRef::from_value(v) { let has_default = dict.get_str("DEFAULT").is_some(); let mut entries = @@ -85,16 +85,18 @@ impl CoercedAttrExr for CoercedAttr { })?; let v = match default_attr { Some(default_attr) if v.is_none() => default_attr.clone(), - _ => CoercedAttr::coerce(attr, configuable, ctx, v, None)?, + _ => CoercedAttr::coerce(attr, configurable, ctx, v, None)?, }; if k == "DEFAULT" { if default.is_some() { - return Err(SelectError::DuplicateDefaultKey.into()); + return Err(internal_error_anyhow!( + "duplicate `\"DEFAULT\"` key in `select()`" + )); } default = Some(v); } else { let target = ctx.coerce_target_label(k)?; - entries.push((target, v)); + entries.push((ConfigurationSettingKey(target), v)); } } @@ -108,19 +110,19 @@ impl CoercedAttrExr for CoercedAttr { Err(anyhow::anyhow!(SelectError::ValueNotDict(v.to_repr()))) } } - StarlarkSelectorGen::Added(l, r) => { + StarlarkSelectorGen::Sum(l, r) => { if !attr.supports_concat() { return Err(anyhow::anyhow!(SelectError::ConcatNotSupported( attr.to_string(), format!("{} + {}", l, r) ))); } - let l = CoercedAttr::coerce(attr, configuable, ctx, l, None)?; + let l = CoercedAttr::coerce(attr, configurable, ctx, l, None)?; let mut l = match l { CoercedAttr::Concat(l) => l.into_vec(), l => vec![l], }; - let r = CoercedAttr::coerce(attr, configuable, ctx, r, None)?; + let r = CoercedAttr::coerce(attr, configurable, ctx, r, None)?; let r = match r { CoercedAttr::Concat(r) => r.into_vec(), r => vec![r], @@ -132,258 +134,8 @@ impl CoercedAttrExr for CoercedAttr { } } else { Ok(attr - .coerce_item(configuable, ctx, value) + .coerce_item(configurable, ctx, value) .with_context(|| format!("Error coercing {}", value))?) } } } - -#[cfg(test)] -mod tests { - use std::collections::BTreeMap; - use std::sync::Arc; - - use buck2_core::configuration::config_setting::ConfigSettingData; - use buck2_core::configuration::constraints::ConstraintKey; - use buck2_core::configuration::constraints::ConstraintValue; - use buck2_core::configuration::data::ConfigurationData; - use buck2_core::configuration::pair::ConfigurationNoExec; - use buck2_core::configuration::pair::ConfigurationWithExec; - use buck2_core::configuration::transition::applied::TransitionApplied; - use buck2_core::configuration::transition::id::TransitionId; - use buck2_core::target::label::TargetLabel; - use buck2_node::attrs::attr_type::bool::BoolLiteral; - use buck2_node::attrs::attr_type::string::StringLiteral; - use buck2_node::attrs::coerced_attr::CoercedAttr; - use buck2_node::attrs::coerced_attr::CoercedSelector; - use buck2_node::attrs::configuration_context::AttrConfigurationContext; - use buck2_node::attrs::fmt_context::AttrFmtContext; - use buck2_util::arc_str::ArcSlice; - use buck2_util::arc_str::ArcStr; - use dupe::Dupe; - use starlark_map::ordered_map::OrderedMap; - - #[test] - fn selector_equals_accounts_for_ordering() { - let s1 = CoercedAttr::Selector(Box::new( - CoercedSelector::new( - ArcSlice::new([ - ( - TargetLabel::testing_parse("cell1//pkg1:target1"), - CoercedAttr::Bool(BoolLiteral(true)), - ), - ( - TargetLabel::testing_parse("cell2//pkg2:target2"), - CoercedAttr::Bool(BoolLiteral(false)), - ), - ]), - None, - ) - .unwrap(), - )); - let s2 = CoercedAttr::Selector(Box::new( - CoercedSelector::new( - ArcSlice::new([ - ( - TargetLabel::testing_parse("cell1//pkg1:target1"), - CoercedAttr::Bool(BoolLiteral(true)), - ), - ( - TargetLabel::testing_parse("cell2//pkg2:target2"), - CoercedAttr::Bool(BoolLiteral(false)), - ), - ]), - None, - ) - .unwrap(), - )); - - assert_eq!(s1 == s2, true); - - let s2 = CoercedAttr::Selector(Box::new( - CoercedSelector::new( - ArcSlice::new([ - ( - TargetLabel::testing_parse("cell2//pkg2:target2"), - CoercedAttr::Bool(BoolLiteral(false)), - ), - ( - TargetLabel::testing_parse("cell1//pkg1:target1"), - CoercedAttr::Bool(BoolLiteral(true)), - ), - ]), - None, - ) - .unwrap(), - )); - - assert_eq!(s1 == s2, false); - } - - #[test] - fn select_the_most_specific() { - struct SelectTestConfigurationContext { - settings: BTreeMap, - } - - impl AttrConfigurationContext for SelectTestConfigurationContext { - fn matches<'a>(&'a self, label: &TargetLabel) -> Option<&'a ConfigSettingData> { - self.settings.get(label) - } - - fn cfg(&self) -> ConfigurationNoExec { - panic!() - } - - fn exec_cfg(&self) -> ConfigurationNoExec { - unimplemented!() - } - - fn toolchain_cfg(&self) -> ConfigurationWithExec { - panic!("not used in test") - } - - fn platform_cfg(&self, _label: &TargetLabel) -> anyhow::Result { - panic!("not used in test") - } - - fn resolved_transitions( - &self, - ) -> &OrderedMap, Arc> { - panic!("not used in test") - } - } - - fn constraint_key(t: &str) -> ConstraintKey { - ConstraintKey(TargetLabel::testing_parse(t)) - } - - fn constraint_value(t: &str) -> ConstraintValue { - ConstraintValue(TargetLabel::testing_parse(t)) - } - - let c_os = constraint_key("config//c:os"); - let c_linux = constraint_value("config//c:linux"); - let c_cpu = constraint_key("config//c:cpu"); - let c_arm64 = constraint_value("config//c:arm64"); - let c_x86_64 = constraint_value("config//c:x86_64"); - - let linux = TargetLabel::testing_parse("config//:linux"); - let linux_arm64 = TargetLabel::testing_parse("config//:linux-arm64"); - let linux_x86_64 = TargetLabel::testing_parse("config//:linux-x86_64"); - - let ctx = SelectTestConfigurationContext { - settings: BTreeMap::from_iter([ - ( - linux.dupe(), - ConfigSettingData { - constraints: BTreeMap::from_iter([(c_os.dupe(), c_linux.dupe())]), - buckconfigs: BTreeMap::new(), - }, - ), - ( - linux_arm64.dupe(), - ConfigSettingData { - constraints: BTreeMap::from_iter([ - (c_os.dupe(), c_linux.dupe()), - (c_cpu.dupe(), c_arm64.dupe()), - ]), - buckconfigs: BTreeMap::new(), - }, - ), - ( - linux_x86_64.dupe(), - ConfigSettingData { - constraints: BTreeMap::from_iter([ - (c_os.dupe(), c_linux.dupe()), - (c_cpu.dupe(), c_x86_64.dupe()), - ]), - buckconfigs: BTreeMap::new(), - }, - ), - ]), - }; - - fn literal_true() -> CoercedAttr { - CoercedAttr::Bool(BoolLiteral(true)) - } - fn literal_str() -> CoercedAttr { - CoercedAttr::String(StringLiteral(ArcStr::from("linux"))) - } - - // Test more specific is selected even if it is not first. - let select_entries = Box::new([ - (linux.dupe(), literal_true()), - (linux_x86_64.dupe(), literal_str()), - ]); - assert_eq!( - Some(&literal_str()), - CoercedAttr::select_the_most_specific(&ctx, &*select_entries).unwrap() - ); - - // Test more specific is selected even if it is first. - let select_entries = Box::new([ - (linux_x86_64.dupe(), literal_str()), - (linux.dupe(), literal_true()), - ]); - assert_eq!( - Some(&literal_str()), - CoercedAttr::select_the_most_specific(&ctx, &*select_entries).unwrap() - ); - - // Conflicting keys. - let select_entries = Box::new([ - (linux_arm64.dupe(), literal_true()), - (linux_x86_64.dupe(), literal_str()), - ]); - assert_eq!( - "Both select keys `config//:linux-arm64` and `config//:linux-x86_64` \ - match the configuration, but neither is more specific", - CoercedAttr::select_the_most_specific(&ctx, &*select_entries) - .unwrap_err() - .to_string() - ); - } - - #[test] - fn test_to_json_concat() { - assert_eq!( - r#"{"__type":"concat","items":["a","b","c","d"]}"#, - CoercedAttr::Concat(Box::new([ - CoercedAttr::String(StringLiteral(ArcStr::from("a"))), - CoercedAttr::String(StringLiteral(ArcStr::from("b"))), - CoercedAttr::String(StringLiteral(ArcStr::from("c"))), - CoercedAttr::String(StringLiteral(ArcStr::from("d"))), - ])) - .to_json(&AttrFmtContext::NO_CONTEXT) - .unwrap() - .to_string() - ); - } - - #[test] - fn test_to_json_selector() { - assert_eq!( - r#"{"__type":"selector","entries":{"DEFAULT":"ddd","config//:a":true,"config//:b":10}}"#, - CoercedAttr::Selector(Box::new( - CoercedSelector::new( - ArcSlice::new([ - ( - TargetLabel::testing_parse("config//:a"), - CoercedAttr::Bool(BoolLiteral(true)) - ), - ( - TargetLabel::testing_parse("config//:b"), - CoercedAttr::Int(10) - ), - ]), - Some(CoercedAttr::String(StringLiteral(ArcStr::from("ddd")))), - ) - .unwrap() - )) - .to_json(&AttrFmtContext::NO_CONTEXT) - .unwrap() - .to_string() - ); - } -} diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/ctx.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/ctx.rs index cb1a95542b9df..d87cb95c1872a 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/ctx.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/ctx.rs @@ -10,24 +10,27 @@ use std::cell::RefCell; use std::fmt; use std::fmt::Debug; +use std::sync::Arc; use buck2_common::package_listing::listing::PackageListing; use buck2_core::cells::name::CellName; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; use buck2_core::package::package_relative_path::PackageRelativePath; use buck2_core::package::package_relative_path::PackageRelativePathBuf; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::PatternType; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::provider::label::ProvidersLabel; use buck2_core::soft_error; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; use buck2_node::attrs::coerced_attr::CoercedAttr; use buck2_node::attrs::coerced_path::CoercedDirectory; use buck2_node::attrs::coerced_path::CoercedPath; use buck2_node::attrs::coercion_context::AttrCoercionContext; +use buck2_node::configuration::resolved::ConfigurationSettingKey; use buck2_node::query::query_functions::CONFIGURED_GRAPH_QUERY_FUNCTIONS; use buck2_query::query::syntax::simple::eval::error::QueryError; use buck2_query::query::syntax::simple::functions::QueryLiteralVisitor; @@ -38,14 +41,16 @@ use buck2_util::arc_str::ArcStr; use bumpalo::Bump; use dupe::Dupe; use dupe::IterDupedExt; -use hashbrown::raw::RawTable; +use hashbrown::hash_table; +use hashbrown::HashTable; use tracing::info; use super::interner::AttrCoercionInterner; use crate::attrs::coerce::arc_str_interner::ArcStrInterner; use crate::attrs::coerce::str_hash::str_hash; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum BuildAttrCoercionContextError { #[error("Expected a label, got the pattern `{0}`.")] RequiredLabel(String), @@ -66,6 +71,7 @@ pub struct BuildAttrCoercionContext { /// Used to coerce targets cell_resolver: CellResolver, cell_name: CellName, + cell_alias_resolver: CellAliasResolver, /// Used to resolve relative targets. This is present when a build file /// is being evaluated, however it is absent if an extension file is being /// evaluated. The latter case occurs when default values for attributes @@ -75,11 +81,12 @@ pub struct BuildAttrCoercionContext { package_boundary_exception: bool, /// Allocator for `label_cache`. alloc: Bump, + global_label_interner: Arc, /// Label coercion cache. We use `RawTable` where because `HashMap` API /// requires either computing hash twice (for get, then for insert) or /// allocating a key to perform a query using `entry` API. /// Strings are owned by `alloc`, using bump allocator makes evaluation 0.5% faster. - label_cache: RefCell>, + label_cache: RefCell>, str_interner: ArcStrInterner, list_interner: AttrCoercionInterner>, // TODO(scottcao): Dict and selects need separate interners right now because @@ -88,7 +95,7 @@ pub struct BuildAttrCoercionContext { // reduce key duplication in selects since select keys are more likely to be deduplicated // than select values dict_interner: AttrCoercionInterner>, - select_interner: AttrCoercionInterner>, + select_interner: AttrCoercionInterner>, } impl Debug for BuildAttrCoercionContext { @@ -102,16 +109,20 @@ impl BuildAttrCoercionContext { fn new( cell_resolver: CellResolver, cell_name: CellName, + cell_alias_resolver: CellAliasResolver, enclosing_package: Option<(PackageLabel, PackageListing)>, package_boundary_exception: bool, + global_label_interner: Arc, ) -> Self { Self { cell_resolver, cell_name, + cell_alias_resolver, enclosing_package, package_boundary_exception, alloc: Bump::new(), - label_cache: RefCell::new(RawTable::new()), + global_label_interner, + label_cache: RefCell::new(HashTable::new()), str_interner: ArcStrInterner::new(), list_interner: AttrCoercionInterner::new(), dict_interner: AttrCoercionInterner::new(), @@ -119,20 +130,36 @@ impl BuildAttrCoercionContext { } } - pub fn new_no_package(cell_resolver: CellResolver, cell_name: CellName) -> Self { - Self::new(cell_resolver, cell_name, None, false) + pub fn new_no_package( + cell_resolver: CellResolver, + cell_name: CellName, + cell_alias_resolver: CellAliasResolver, + global_label_interner: Arc, + ) -> Self { + Self::new( + cell_resolver, + cell_name, + cell_alias_resolver, + None, + false, + global_label_interner, + ) } pub fn new_with_package( cell_resolver: CellResolver, + cell_alias_resolver: CellAliasResolver, enclosing_package: (PackageLabel, PackageListing), package_boundary_exception: bool, + global_label_interner: Arc, ) -> Self { Self::new( cell_resolver, enclosing_package.0.cell_name(), + cell_alias_resolver, Some(enclosing_package), package_boundary_exception, + global_label_interner, ) } @@ -142,6 +169,7 @@ impl BuildAttrCoercionContext { self.enclosing_package.as_ref().map(|x| x.0.as_cell_path()), self.cell_name, &self.cell_resolver, + &self.cell_alias_resolver, ) } @@ -170,18 +198,23 @@ impl AttrCoercionContext for BuildAttrCoercionContext { let hash = str_hash(value); let mut label_cache = self.label_cache.borrow_mut(); - if let Some((_h, _v, label)) = label_cache.get(hash, |(_h, v, _)| value == unsafe { &**v }) - { - return Ok(label.clone()); - } - - let label = self.coerce_label_no_cache(value)?; - label_cache.insert( + match label_cache.entry( hash, - (hash, self.alloc.alloc_str(value), label.clone()), - |(h, _v, _)| *h, - ); - Ok(label) + |(h, v, _)| *h == hash && value == unsafe { &**v }, + |(h, _, _)| *h, + ) { + hash_table::Entry::Occupied(e) => Ok(e.get().2.dupe()), + hash_table::Entry::Vacant(e) => { + let label = self.coerce_label_no_cache(value)?; + + let (target_label, providers) = label.into_parts(); + let target_label = self.global_label_interner.intern(target_label); + let label = ProvidersLabel::new(target_label, providers); + + e.insert((hash, self.alloc.alloc_str(value), label.dupe())); + Ok(label) + } + } } fn intern_str(&self, value: &str) -> ArcStr { @@ -201,8 +234,8 @@ impl AttrCoercionContext for BuildAttrCoercionContext { fn intern_select( &self, - value: Vec<(TargetLabel, CoercedAttr)>, - ) -> ArcSlice<(TargetLabel, CoercedAttr)> { + value: Vec<(ConfigurationSettingKey, CoercedAttr)>, + ) -> ArcSlice<(ConfigurationSettingKey, CoercedAttr)> { self.select_interner.intern(value) } @@ -245,7 +278,7 @@ impl AttrCoercionContext for BuildAttrCoercionContext { if self.package_boundary_exception { info!("{} (could be due to a package boundary violation)", e); } else { - soft_error!("source_file_missing", e.into())?; + soft_error!("source_file_missing", e.into(), quiet: true)?; } Ok(CoercedPath::File(path.to_arc())) diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/error.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/error.rs index 13f2d71aecc78..901734fbbc821 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/error.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/error.rs @@ -10,7 +10,8 @@ use gazebo::prelude::*; use starlark::values::Value; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] pub(crate) enum CoercionError { #[error("Expected value of type `{0}`, got value with type `{1}` (value was `{2}`)")] TypeError(String, String, String), diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/interner.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/interner.rs index dc663bfb84d88..6b6f63e739007 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/interner.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/interner.rs @@ -16,15 +16,15 @@ use std::sync::Arc; use buck2_util::arc_str::ArcSlice; use buck2_util::arc_str::ArcStr; +use buck2_util::hash::BuckHasher; use dupe::Dupe; use hashbrown::raw::RawTable; -use twox_hash::XxHash64; /// An interner specific to our AttrCoercionContext used for interning different kinds of attributes. /// Things specific about this interner: /// - Requires interned values to be Dupe, so that you can intern both Arc<...> and specific Arc types like ArcStr. /// - Interner is not static, so it's not required to take up memory for the entire duration of the program. -pub(crate) struct AttrCoercionInterner { +pub(crate) struct AttrCoercionInterner { /// We use `RawTable` where because `HashMap` API /// requires either computing hash twice (for get, then for insert) or /// allocating a key to perform a query using `entry` API. diff --git a/app/buck2_interpreter_for_build/src/attrs/coerce/testing.rs b/app/buck2_interpreter_for_build/src/attrs/coerce/testing.rs index 188accffc600f..20df51c8f7f19 100644 --- a/app/buck2_interpreter_for_build/src/attrs/coerce/testing.rs +++ b/app/buck2_interpreter_for_build/src/attrs/coerce/testing.rs @@ -7,9 +7,9 @@ * of this source tree. */ -use std::collections::HashMap; +use std::sync::Arc; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_common::package_listing::listing::testing::PackageListingExt; use buck2_common::package_listing::listing::PackageListing; use buck2_core::bzl::ImportPath; @@ -19,18 +19,21 @@ use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; use buck2_core::package::PackageLabel; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; use buck2_interpreter::extra::InterpreterHostArchitecture; use buck2_interpreter::extra::InterpreterHostPlatform; +use buck2_interpreter::file_type::StarlarkFileType; use buck2_node::attrs::coercion_context::AttrCoercionContext; +use dupe::Dupe; use maplit::hashmap; use starlark::environment::Globals; use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::syntax::AstModule; -use starlark::syntax::Dialect; use starlark::values::Value; use crate::attrs::coerce::ctx::BuildAttrCoercionContext; +use crate::interpreter::buckconfig::LegacyConfigsViewForStarlark; use crate::interpreter::build_context::BuildContext; use crate::interpreter::build_context::PerFileTypeContext; use crate::interpreter::bzl_eval_ctx::BzlEvalCtx; @@ -47,20 +50,25 @@ pub fn coercion_ctx_listing(package_listing: PackageListing) -> impl AttrCoercio NonEmptyCellAlias::new("cell1".to_owned()).unwrap() => CellName::testing_new("cell1"), ]; - let cell_resolver = CellResolver::testing_with_names_and_paths_with_alias(&[ - ( - package.cell_name(), - CellRootPathBuf::testing_new(""), - aliases, - ), - ( - CellName::testing_new("cell1"), - CellRootPathBuf::testing_new("cell1"), - HashMap::new(), - ), - ]); + let cell_resolver = CellResolver::testing_with_names_and_paths_with_alias( + &[ + (package.cell_name(), CellRootPathBuf::testing_new("")), + ( + CellName::testing_new("cell1"), + CellRootPathBuf::testing_new("cell1"), + ), + ], + aliases, + ); + let cell_alias_resolver = cell_resolver.root_cell_cell_alias_resolver().dupe(); - BuildAttrCoercionContext::new_with_package(cell_resolver, (package, package_listing), false) + BuildAttrCoercionContext::new_with_package( + cell_resolver, + cell_alias_resolver, + (package, package_listing), + false, + Arc::new(ConcurrentTargetLabelInterner::default()), + ) } fn cell_resolver() -> CellResolver { @@ -75,24 +83,25 @@ pub fn to_value<'v>(env: &'v Module, globals: &Globals, content: &str) -> Value< let ast = AstModule::parse( &import_path.to_string(), content.to_owned(), - &Dialect::Extended, + &StarlarkFileType::Bzl.dialect(false), ) .unwrap_or_else(|err| panic!("Failed parsing `{}`. Error: `{}`", content, err)); let cell_info = InterpreterCellInfo::new( BuildFileCell::new(CellName::testing_new("root")), cell_resolver(), + cell_resolver().root_cell_cell_alias_resolver().dupe(), ) .unwrap(); - let buckconfig = LegacyBuckConfig::empty(); - let root_buckconfig = LegacyBuckConfig::empty(); + + let mut buckconfigs = + LegacyConfigsViewForStarlark::new(LegacyBuckConfig::empty(), LegacyBuckConfig::empty()); let host_platform = InterpreterHostPlatform::Linux; let host_architecture = InterpreterHostArchitecture::X86_64; let host_info = HostInfo::new(host_platform, host_architecture, None); let build_ctx = BuildContext::new_for_module( env, &cell_info, - &buckconfig, - &root_buckconfig, + &mut buckconfigs, &host_info, PerFileTypeContext::Bzl(BzlEvalCtx { bzl_path: import_path, diff --git a/app/buck2_interpreter_for_build/src/attrs/mod.rs b/app/buck2_interpreter_for_build/src/attrs/mod.rs deleted file mode 100644 index 8ce5317f9097f..0000000000000 --- a/app/buck2_interpreter_for_build/src/attrs/mod.rs +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use anyhow::Context; -use buck2_node::attrs::attr::Attribute; -use buck2_node::attrs::attr::CoercedValue; -use buck2_node::attrs::coercion_context::AttrCoercionContext; -use buck2_node::attrs::configurable::AttrIsConfigurable; -use starlark::docs::DocString; -use starlark::docs::DocStringKind; -use starlark::values::Value; - -use crate::attrs::coerce::attr_type::ty_maybe_select::TyMaybeSelect; -use crate::attrs::coerce::attr_type::AttrTypeExt; -use crate::attrs::coerce::error::CoercionError; - -pub mod attrs_global; -pub mod coerce; -pub(crate) mod starlark_attribute; - -#[derive(Debug, thiserror::Error)] -enum AttrCoerceError { - #[error("Parameter `{0}` had no value provided, but it is mandatory")] - MissingMandatoryParameter(String), -} - -pub trait AttributeCoerceExt { - fn coerce<'v>( - &self, - param_name: &str, - configurable: AttrIsConfigurable, - coercer_ctx: &dyn AttrCoercionContext, - value: Value<'v>, - ) -> anyhow::Result; - - fn docstring(&self) -> Option; - - fn starlark_type(&self) -> TyMaybeSelect; -} - -impl AttributeCoerceExt for Attribute { - /// Attempt to coerce a value. If the value provided is `None`, and a default value is available, - /// that default value is returned. - fn coerce<'v>( - &self, - param_name: &str, - configurable: AttrIsConfigurable, - coercer_ctx: &dyn AttrCoercionContext, - value: Value<'v>, - ) -> anyhow::Result { - if self.is_default_only() { - if value.is_none() { - return Ok(CoercedValue::Default); - } else { - return Err(CoercionError::DefaultOnly(value.to_string()).into()); - } - } - - match self.default() { - default if !value.is_none() => self - .coercer() - .coerce_with_default(configurable, coercer_ctx, value, default.map(|x| &**x)) - .map(CoercedValue::Custom) - .with_context(|| { - format!( - "Error coercing attribute `{}` of type `{}`", - param_name, self - ) - }), - Some(_) => Ok(CoercedValue::Default), - None => Err(AttrCoerceError::MissingMandatoryParameter(param_name.to_owned()).into()), - } - } - - fn docstring(&self) -> Option { - DocString::from_docstring(DocStringKind::Starlark, self.doc()) - } - - fn starlark_type(&self) -> TyMaybeSelect { - self.coercer().starlark_type() - } -} diff --git a/app/buck2_interpreter_for_build/src/attrs/starlark_attribute.rs b/app/buck2_interpreter_for_build/src/attrs/starlark_attribute.rs index a9ebda31ac31a..d4fe3697e9dc0 100644 --- a/app/buck2_interpreter_for_build/src/attrs/starlark_attribute.rs +++ b/app/buck2_interpreter_for_build/src/attrs/starlark_attribute.rs @@ -23,7 +23,7 @@ use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum StarlarkAttributeError { #[error("`attrs.default_only()` cannot be used in nested attributes")] DefaultOnlyInNested, diff --git a/app/buck2_interpreter_for_build/src/call_stack.rs b/app/buck2_interpreter_for_build/src/call_stack.rs new file mode 100644 index 0000000000000..232080874a0ca --- /dev/null +++ b/app/buck2_interpreter_for_build/src/call_stack.rs @@ -0,0 +1,42 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; +use std::hash::Hasher; + +use buck2_node::call_stack::StarlarkCallStackImpl; +use buck2_node::call_stack::StarlarkTargetCallStackRoot; +use cmp_any::PartialEqAny; +use starlark::eval::CallStack; + +// I can't implement a trait for a type that is not of this crate, so I wrap type here +#[derive(Debug, derive_more::Display, PartialEq)] +pub struct StarlarkCallStackWrapper(pub CallStack); + +impl StarlarkCallStackImpl for StarlarkCallStackWrapper { + fn eq_token(&self) -> PartialEqAny { + PartialEqAny::new(self) + } + + fn hash(&self, mut state: &mut dyn Hasher) { + self.0.hash(&mut state); + } + + fn root_location(&self) -> Option { + self.0 + .frames + .first() + .and_then(|l| l.location.as_ref()) + .map(|l| l.resolve().begin_file_line()) + .map(|l| StarlarkTargetCallStackRoot { + file: l.file.clone(), + line: l.line, + }) + } +} diff --git a/app/buck2_interpreter_for_build/src/interpreter.rs b/app/buck2_interpreter_for_build/src/interpreter.rs new file mode 100644 index 0000000000000..d9d41545ec04e --- /dev/null +++ b/app/buck2_interpreter_for_build/src/interpreter.rs @@ -0,0 +1,32 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod buckconfig; +pub mod build_context; +pub(crate) mod bzl_eval_ctx; +pub mod calculation; +pub(crate) mod cell_info; +pub(crate) mod check_starlark_stack_size; +pub mod configuror; +pub mod context; +pub mod cycles; +pub mod dice_calculation_delegate; +mod extra_value; +pub mod functions; +pub mod global_interpreter_state; +pub mod globals; +pub mod globspec; +pub mod interpreter_for_cell; +pub mod interpreter_setup; +pub mod module_internals; +pub(crate) mod natives; +pub mod package_file_calculation; +pub mod package_file_extra; +pub mod selector; +pub mod testing; diff --git a/app/buck2_interpreter_for_build/src/interpreter/buckconfig.rs b/app/buck2_interpreter_for_build/src/interpreter/buckconfig.rs index 46d0d4d9eb59e..2903de7723171 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/buckconfig.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/buckconfig.rs @@ -9,8 +9,13 @@ use std::cell::RefCell; use std::fmt; +use std::ops::DerefMut; +use std::sync::Arc; -use buck2_common::legacy_configs::view::LegacyBuckConfigView; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::dice::OpaqueLegacyBuckConfigOnDice; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use dice::DiceComputations; use hashbrown::raw::RawTable; use starlark::collections::Hashed; use starlark::environment::Module; @@ -23,24 +28,38 @@ struct BuckConfigEntry { value: Option, } -/// Version of cell buckconfig optimized for fast query from `read_config` Starlark function. -pub(crate) struct LegacyBuckConfigForStarlark<'a> { - module: &'a Module, - buckconfig: &'a (dyn LegacyBuckConfigView + 'a), +pub trait BuckConfigsViewForStarlark { + fn read_current_cell_config( + &mut self, + key: BuckconfigKeyRef, + ) -> anyhow::Result>>; + + fn read_root_cell_config(&mut self, key: BuckconfigKeyRef) -> anyhow::Result>>; +} + +struct BuckConfigsInner<'a> { + configs_view: &'a mut (dyn BuckConfigsViewForStarlark + 'a), /// Hash map by `(section, key)` pair, so we do one table lookup per request. /// So we hash the `key` even if the section does not exist, /// but this is practically not an issue, because keys usually come with cached hash. - cache: RefCell>, + current_cell_cache: RawTable, + root_cell_cache: RawTable, +} + +/// Version of cell buckconfig optimized for fast query from `read_config` Starlark function. +pub(crate) struct LegacyBuckConfigsForStarlark<'a> { + module: &'a Module, + inner: RefCell>, } -impl<'a> fmt::Debug for LegacyBuckConfigForStarlark<'a> { +impl<'a> fmt::Debug for LegacyBuckConfigsForStarlark<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("LegacyBuckConfigForStarlark") .finish_non_exhaustive() } } -impl<'a> LegacyBuckConfigForStarlark<'a> { +impl<'a> LegacyBuckConfigsForStarlark<'a> { // `section` or `key` 32 bit hashes are well swizzled, // but concatenation of them into 64 bit integer is not. // This function tries to fix that. @@ -60,12 +79,15 @@ impl<'a> LegacyBuckConfigForStarlark<'a> { /// Constructor. pub(crate) fn new( module: &'a Module, - buckconfig: &'a (dyn LegacyBuckConfigView + 'a), - ) -> LegacyBuckConfigForStarlark<'a> { - LegacyBuckConfigForStarlark { + configs_view: &'a mut (dyn BuckConfigsViewForStarlark + 'a), + ) -> LegacyBuckConfigsForStarlark<'a> { + LegacyBuckConfigsForStarlark { module, - buckconfig, - cache: RefCell::new(RawTable::new()), + inner: RefCell::new(BuckConfigsInner { + configs_view, + current_cell_cache: RawTable::new(), + root_cell_cache: RawTable::new(), + }), } } @@ -73,19 +95,40 @@ impl<'a> LegacyBuckConfigForStarlark<'a> { &self, section: Hashed<&str>, key: Hashed<&str>, + from_root_cell: bool, ) -> anyhow::Result> { let hash = Self::mix_hashes(section.hash().get(), key.hash().get()); - let mut cache = self.cache.borrow_mut(); + + let mut inner = self.inner.borrow_mut(); + let BuckConfigsInner { + ref mut configs_view, + ref mut current_cell_cache, + ref mut root_cell_cache, + } = inner.deref_mut(); + + let cache = if from_root_cell { + root_cell_cache + } else { + current_cell_cache + }; if let Some(e) = cache.get(hash, |e| { e.section.key() == section.key() && e.key.as_str() == *key.key() }) { return Ok(e.value); } - let value = self - .buckconfig - .get(section.key(), key.key())? - .map(|v| self.module.frozen_heap().alloc_str(&v)); + let value = if from_root_cell { + configs_view.read_root_cell_config(BuckconfigKeyRef { + section: section.key(), + property: key.key(), + })? + } else { + configs_view.read_current_cell_config(BuckconfigKeyRef { + section: section.key(), + property: key.key(), + })? + } + .map(|v| self.module.frozen_heap().alloc_str(&v)); cache.insert( hash, @@ -101,7 +144,7 @@ impl<'a> LegacyBuckConfigForStarlark<'a> { } /// Find the buckconfig entry. - pub(crate) fn get( + pub(crate) fn current_cell_get( &self, section: StringValue, key: StringValue, @@ -109,6 +152,80 @@ impl<'a> LegacyBuckConfigForStarlark<'a> { // Note here we reuse the hashes of `section` and `key`, // if `read_config` is called repeatedly with the same constant arguments: // `StringValue` caches the hashes. - self.get_impl(section.get_hashed_str(), key.get_hashed_str()) + self.get_impl(section.get_hashed_str(), key.get_hashed_str(), false) + } + + pub(crate) fn root_cell_get( + &self, + section: StringValue, + key: StringValue, + ) -> anyhow::Result> { + // Note here we reuse the hashes of `section` and `key`, + // if `read_config` is called repeatedly with the same constant arguments: + // `StringValue` caches the hashes. + self.get_impl(section.get_hashed_str(), key.get_hashed_str(), true) + } +} + +pub(crate) struct ConfigsOnDiceViewForStarlark<'a, 'd> { + ctx: &'a mut DiceComputations<'d>, + buckconfig: OpaqueLegacyBuckConfigOnDice, + root_buckconfig: OpaqueLegacyBuckConfigOnDice, +} + +impl<'a, 'd> ConfigsOnDiceViewForStarlark<'a, 'd> { + pub(crate) fn new( + ctx: &'a mut DiceComputations<'d>, + buckconfig: OpaqueLegacyBuckConfigOnDice, + root_buckconfig: OpaqueLegacyBuckConfigOnDice, + ) -> Self { + Self { + ctx, + buckconfig, + root_buckconfig, + } + } +} + +impl BuckConfigsViewForStarlark for ConfigsOnDiceViewForStarlark<'_, '_> { + fn read_current_cell_config( + &mut self, + key: BuckconfigKeyRef, + ) -> anyhow::Result>> { + self.buckconfig.lookup(self.ctx, key) + } + + fn read_root_cell_config(&mut self, key: BuckconfigKeyRef) -> anyhow::Result>> { + self.root_buckconfig.lookup(self.ctx, key) + } +} + +pub struct LegacyConfigsViewForStarlark { + current_cell_config: LegacyBuckConfig, + root_cell_config: LegacyBuckConfig, +} + +impl LegacyConfigsViewForStarlark { + pub(crate) fn new(buckconfig: LegacyBuckConfig, root_buckconfig: LegacyBuckConfig) -> Self { + Self { + current_cell_config: buckconfig, + root_cell_config: root_buckconfig, + } + } +} + +impl BuckConfigsViewForStarlark for LegacyConfigsViewForStarlark { + fn read_current_cell_config( + &mut self, + key: BuckconfigKeyRef, + ) -> anyhow::Result>> { + Ok(self + .current_cell_config + .get(key) + .map(|v| v.to_owned().into())) + } + + fn read_root_cell_config(&mut self, key: BuckconfigKeyRef) -> anyhow::Result>> { + Ok(self.root_cell_config.get(key).map(|v| v.to_owned().into())) } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/build_context.rs b/app/buck2_interpreter_for_build/src/interpreter/build_context.rs index 8f4728488abf4..8c65f44f366f6 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/build_context.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/build_context.rs @@ -7,9 +7,9 @@ * of this source tree. */ +use std::cell::OnceCell; use std::fmt::Debug; -use buck2_common::legacy_configs::view::LegacyBuckConfigView; use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::CellResolver; @@ -21,16 +21,17 @@ use buck2_interpreter::paths::path::StarlarkPath; use starlark::any::ProvidesStaticType; use starlark::environment::Module; use starlark::eval::Evaluator; -use thiserror::Error; -use crate::interpreter::buckconfig::LegacyBuckConfigForStarlark; +use crate::interpreter::buckconfig::BuckConfigsViewForStarlark; +use crate::interpreter::buckconfig::LegacyBuckConfigsForStarlark; use crate::interpreter::bzl_eval_ctx::BzlEvalCtx; use crate::interpreter::cell_info::InterpreterCellInfo; use crate::interpreter::functions::host_info::HostInfo; use crate::interpreter::module_internals::ModuleInternals; use crate::super_package::eval_ctx::PackageFileEvalCtx; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] +#[buck2(input)] enum BuildContextError { #[error( "This function is unavailable during analysis (usual solution is to place the information on a toolchain)" @@ -153,18 +154,18 @@ pub struct BuildContext<'a> { /// `load()` statements. pub cell_info: &'a InterpreterCellInfo, - /// Current cell file buckconfig. - pub(crate) buckconfig: LegacyBuckConfigForStarlark<'a>, - /// Buckconfig of the root cell. - pub(crate) root_buckconfig: LegacyBuckConfigForStarlark<'a>, + pub(crate) buckconfigs: LegacyBuckConfigsForStarlark<'a>, - pub host_info: &'a HostInfo, + pub(crate) host_info: &'a HostInfo, /// Context specific to type type. pub additional: PerFileTypeContext, /// When true, rule function is no-op. - pub ignore_attrs_for_profiling: bool, + pub(crate) ignore_attrs_for_profiling: bool, + + /// Peak allocated bytes limit for starlark. + pub(crate) starlark_peak_allocated_byte_limit: OnceCell>, } impl<'a> BuildContext<'a> { @@ -172,31 +173,27 @@ impl<'a> BuildContext<'a> { pub(crate) fn new_for_module( module: &'a Module, cell_info: &'a InterpreterCellInfo, - buckconfig: &'a (dyn LegacyBuckConfigView + 'a), - root_buckconfig: &'a (dyn LegacyBuckConfigView + 'a), + buckconfigs: &'a mut dyn BuckConfigsViewForStarlark, host_info: &'a HostInfo, additional: PerFileTypeContext, ignore_attrs_for_profiling: bool, ) -> BuildContext<'a> { - let buckconfig = LegacyBuckConfigForStarlark::new(module, buckconfig); - let root_buckconfig = LegacyBuckConfigForStarlark::new(module, root_buckconfig); + let buckconfigs = LegacyBuckConfigsForStarlark::new(module, buckconfigs); BuildContext { cell_info, - buckconfig, - root_buckconfig, + buckconfigs, host_info, additional, ignore_attrs_for_profiling, + starlark_peak_allocated_byte_limit: OnceCell::new(), } } - pub fn from_context<'v>(eval: &Evaluator<'v, 'a>) -> anyhow::Result<&'a BuildContext<'a>> { - match eval.extra { - None => Err(BuildContextError::UnavailableDuringAnalysis.into()), - Some(extra) => Ok(extra - .downcast_ref::() - .unwrap_or_else(|| panic!("Unable to access context extra. Wrong type."))), - } + pub fn from_context<'v, 'a1>( + eval: &Evaluator<'v, 'a1, 'a>, + ) -> anyhow::Result<&'a1 BuildContext<'a>> { + let f = || eval.extra?.downcast_ref::(); + f().ok_or_else(|| BuildContextError::UnavailableDuringAnalysis.into()) } pub(crate) fn cell_info(&self) -> &InterpreterCellInfo { diff --git a/app/buck2_interpreter_for_build/src/interpreter/build_defs.rs b/app/buck2_interpreter_for_build/src/interpreter/build_defs.rs deleted file mode 100644 index 4dc6d1a5d146c..0000000000000 --- a/app/buck2_interpreter_for_build/src/interpreter/build_defs.rs +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use starlark::environment::GlobalsBuilder; -use starlark::environment::LibraryExtension; -use starlark::eval::Evaluator; -use starlark::starlark_module; -use starlark::values::list::AllocList; -use starlark::values::list::ListOf; -use starlark::values::ValueOfUnchecked; - -use crate::interpreter::build_context::BuildContext; -use crate::interpreter::functions::dedupe::register_dedupe; -use crate::interpreter::functions::sha256::register_sha256; -use crate::interpreter::globspec::GlobSpec; -use crate::interpreter::module_internals::ModuleInternals; -use crate::interpreter::selector::register_select; - -#[starlark_module] -pub(crate) fn register_path(builder: &mut GlobalsBuilder) { - /// The `glob()` function specifies a set of files using patterns. - /// Only available from `BUCK` files. - /// - /// A typical `glob` call looks like: - /// - /// ```python - /// glob(["foo/**/*.h"]) - /// ``` - /// - /// This call will match all header files in the `foo` directory, recursively. - /// - /// You can also pass a named `exclude` parameter to remove files matching a pattern: - /// - /// ```python - /// glob(["foo/**/*.h"], exclude = ["**/config.h"]) - /// ``` - /// - /// This call will remove all `config.h` files from the initial match. - /// - /// The `glob()` call is evaluated against the list of files owned by this `BUCK` file. - /// A file is owned by whichever `BUCK` file is closest above it - so given `foo/BUCK` and - /// `foo/bar/BUCK` the file `foo/file.txt` would be owned by `foo/BUCK` (and available from - /// its `glob` results) but the file `foo/bar/file.txt` would be owned by `foo/bar/BUCk` - /// and _not_ appear in the glob result of `foo/BUCK`, even if you write `glob(["bar/file.txt"])`. - /// As a consequence of this rule, `glob(["../foo.txt"])` will always return an empty list of files. - /// - /// Currently `glob` is evaluated case-insensitively on all file systems, but we expect - /// that to change to case sensitive in the near future. - fn glob<'v>( - include: Vec, - #[starlark(require = named, default=Vec::new())] exclude: Vec, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>> { - let extra = ModuleInternals::from_context(eval, "glob")?; - let spec = GlobSpec::new(&include, &exclude)?; - let res = extra.resolve_glob(&spec).map(|path| path.as_str()); - Ok(ValueOfUnchecked::new(eval.heap().alloc(AllocList(res)))) - } - - /// `package_name()` can only be called in buildfiles (e.g. BUCK files) or PACKAGE files, and returns the name of the package. - /// E.g. inside `foo//bar/baz/BUCK` the output will be `bar/baz`. - /// E.g. inside `foo//bar/PACKAGE` the output will be `bar`. - fn package_name(eval: &mut Evaluator) -> anyhow::Result { - // An (IMO) unfortunate choice in the skylark api is that this just gives the cell-relative - // path of the package (which isn't a unique "name" for the package) - Ok(BuildContext::from_context(eval)? - .base_path()? - .path() - .to_string()) - } - - /// `get_base_path()` can only be called in buildfiles (e.g. BUCK files) or PACKAGE files, and returns the name of the package. - /// E.g. inside `foo//bar/baz/BUCK` the output will be `bar/baz`. - /// E.g. inside `foo//bar/PACKAGE` the output will be `bar`. - /// - /// This function is identical to `package_name`. - fn get_base_path(eval: &mut Evaluator) -> anyhow::Result { - Ok(BuildContext::from_context(eval)? - .base_path()? - .path() - .to_string()) - } - - /// Like `get_cell_name()` but prepends a leading `@` for compatibility with Buck1. - /// You should call `get_cell_name()` instead, and if you really want the `@`, - /// prepend it yourself. - fn repository_name(eval: &mut Evaluator) -> anyhow::Result { - // In Buck v1 the repository name has a leading `@` on it, so match that with v2. - // In practice, most users do `repository_name()[1:]` to drop it. - Ok(format!( - "@{}", - BuildContext::from_context(eval)?.cell_info().name() - )) - } - - /// `get_cell_name()` can be called from either a `BUCK` file or a `.bzl` file, - /// and returns the name of the cell where the `BUCK` file that started the call - /// lives. - /// - /// For example, inside `foo//bar/baz/BUCK` the output will be `foo`. - /// If that `BUCK` file does a `load("hello//world.bzl", "something")` then - /// the result in that `.bzl` file will also be `foo`. - fn get_cell_name(eval: &mut Evaluator) -> anyhow::Result { - Ok(BuildContext::from_context(eval)? - .cell_info() - .name() - .to_string()) - } -} - -/// Native functions included in all contexts (`BUCK`, `bzl`, `bxl`). -pub(crate) fn register_base_natives(registry: &mut GlobalsBuilder) { - register_path(registry); - register_select(registry); - register_sha256(registry); - register_dedupe(registry); -} - -pub fn starlark_library_extensions_for_buck2() -> &'static [LibraryExtension] { - &[ - LibraryExtension::Breakpoint, - LibraryExtension::Debug, - LibraryExtension::EnumType, - LibraryExtension::Filter, - LibraryExtension::Json, - LibraryExtension::Map, - LibraryExtension::Partial, - LibraryExtension::Pprint, - LibraryExtension::Print, - LibraryExtension::RecordType, - LibraryExtension::ExperimentalRegex, - LibraryExtension::StructType, - LibraryExtension::Typing, - LibraryExtension::Internal, - ] -} - -/// Configure globals for all three possible environments: `BUCK`, `bzl` and `bxl`. -pub fn configure_base_globals( - configure_native_struct: impl FnOnce(&mut GlobalsBuilder), -) -> GlobalsBuilder { - let starlark_extensions = starlark_library_extensions_for_buck2(); - let mut global_env = - GlobalsBuilder::extended_by(starlark_extensions).with(register_base_natives); - global_env.struct_("__internal__", |x| { - register_base_natives(x); - // If `native.` symbols need to be added to the global env, they should be done - // in `configure_build_file_globals()` or - // `configure_extension_file_globals()` - for ext in starlark_extensions { - ext.add(x) - } - configure_native_struct(x); - }); - global_env -} diff --git a/app/buck2_interpreter_for_build/src/interpreter/calculation.rs b/app/buck2_interpreter_for_build/src/interpreter/calculation.rs index cc88a64f881b6..1ede2ea328b04 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/calculation.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/calculation.rs @@ -15,25 +15,23 @@ use std::time::Instant; use allocative::Allocative; use async_trait::async_trait; -use buck2_common::package_listing::dice::HasPackageListingResolver; -use buck2_common::result::SharedResult; -use buck2_common::result::ToUnsharedResultExt; +use buck2_common::package_listing::dice::DicePackageListingResolver; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::package::PackageLabel; use buck2_events::dispatch::async_record_root_spans; use buck2_events::span::SpanId; +use buck2_futures::cancellation::CancellationContext; use buck2_interpreter::file_loader::LoadedModule; use buck2_interpreter::file_loader::ModuleDeps; -use buck2_interpreter::file_type::StarlarkFileType; use buck2_interpreter::load_module::InterpreterCalculationImpl; use buck2_interpreter::load_module::INTERPRETER_CALCULATION_IMPL; +use buck2_interpreter::paths::module::OwnedStarlarkModulePath; use buck2_interpreter::paths::module::StarlarkModulePath; use buck2_interpreter::paths::package::PackageFilePath; use buck2_interpreter::paths::path::StarlarkPath; use buck2_interpreter::prelude_path::PreludePath; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; use buck2_node::metadata::key::MetadataKey; use buck2_node::nodes::eval_result::EvaluationResult; use buck2_node::nodes::frontend::TargetGraphCalculation; @@ -47,13 +45,14 @@ use dice::Key; use dupe::Dupe; use futures::future::BoxFuture; use futures::FutureExt; -use more_futures::cancellation::CancellationContext; use smallvec::SmallVec; use starlark::environment::Globals; use starlark_map::small_map::SmallMap; +use crate::interpreter::dice_calculation_delegate::testing::EvalImportKey; use crate::interpreter::dice_calculation_delegate::HasCalculationDelegate; use crate::interpreter::global_interpreter_state::HasGlobalInterpreterState; +use crate::interpreter::package_file_calculation::EvalPackageFile; // Key for 'InterpreterCalculation::get_interpreter_results' #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] @@ -65,67 +64,61 @@ pub(crate) fn init_target_graph_calculation_impl() { TARGET_GRAPH_CALCULATION_IMPL.init(&TargetGraphCalculationInstance); } +#[async_trait] +impl Key for InterpreterResultsKey { + type Value = buck2_error::Result>; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let now = Instant::now(); + + let (result, spans) = + async_record_root_spans(ctx.get_interpreter_results_uncached(self.0.dupe())).await; + + ctx.store_evaluation_data(IntepreterResultsKeyActivationData { + duration: now.elapsed(), + result: result.dupe(), + spans, + })?; + + result + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + // TODO consider if we want to impl eq for this + false + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } +} + #[async_trait] impl TargetGraphCalculationImpl for TargetGraphCalculationInstance { async fn get_interpreter_results_uncached( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, package: PackageLabel, - ) -> SharedResult> { - let interpreter = ctx + ) -> buck2_error::Result> { + let mut interpreter = ctx .get_interpreter_calculator( package.cell_name(), BuildFileCell::new(package.cell_name()), ) .await?; - interpreter - .eval_build_file( - package.dupe(), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) - .await + interpreter.eval_build_file(package.dupe()).await } fn get_interpreter_results<'a>( &self, - ctx: &'a DiceComputations, + ctx: &'a mut DiceComputations, package: PackageLabel, ) -> BoxFuture<'a, anyhow::Result>> { - #[async_trait] - impl Key for InterpreterResultsKey { - type Value = SharedResult>; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - let now = Instant::now(); - - let (result, spans) = - async_record_root_spans(ctx.get_interpreter_results_uncached(self.0.dupe())) - .await; - - ctx.store_evaluation_data(IntepreterResultsKeyActivationData { - duration: now.elapsed(), - result: result.dupe(), - spans, - })?; - - result - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - // TODO consider if we want to impl eq for this - false - } - - fn validity(x: &Self::Value) -> bool { - x.is_ok() - } - } - ctx.compute(&InterpreterResultsKey(package.dupe())) - .map(|res| res?.unshared_error()) + .map(|v| v?.map_err(anyhow::Error::from)) .boxed() } } @@ -138,35 +131,64 @@ pub(crate) fn init_interpreter_calculation_impl() { PACKAGE_VALUES_CALCULATION.init(&PackageValuesCalculationInstance); } +#[async_trait] +impl Key for EvalImportKey { + type Value = buck2_error::Result; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let starlark_path = self.0.borrow(); + // We cannot just use the inner default delegate's eval_import + // because that wouldn't delegate back to us for inner eval_import calls. + Ok(ctx + .get_interpreter_calculator(starlark_path.cell(), starlark_path.build_file_cell()) + .await? + .eval_module_uncached(starlark_path) + .await?) + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + // While it is technically possible to compare the modules + // at least for simple modules (like modules defining only string constants), + // practically it is too hard to make it work correctly for every case. + false + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } +} + #[async_trait] impl InterpreterCalculationImpl for InterpreterCalculationInstance { async fn get_loaded_module( &self, - ctx: &DiceComputations, - path: StarlarkModulePath<'_>, + ctx: &mut DiceComputations<'_>, + starlark_path: StarlarkModulePath<'_>, ) -> anyhow::Result { - ctx.get_interpreter_calculator(path.cell(), path.build_file_cell()) + ctx.compute(&EvalImportKey(OwnedStarlarkModulePath::new(starlark_path))) .await? - .eval_module(path) - .await + .map_err(anyhow::Error::from) } async fn get_module_deps( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, package: PackageLabel, build_file_cell: BuildFileCell, ) -> anyhow::Result { - let calc = ctx - .get_interpreter_calculator(package.cell_name(), build_file_cell) - .await?; - - let build_file_name = ctx + let build_file_name = DicePackageListingResolver(ctx) .resolve_package_listing(package.dupe()) .await? .buildfile() .to_owned(); + let mut calc = ctx + .get_interpreter_calculator(package.cell_name(), build_file_cell) + .await?; + let (_module, module_deps) = calc .prepare_eval(StarlarkPath::BuildFile(&BuildFilePath::new( package.dupe(), @@ -179,34 +201,34 @@ impl InterpreterCalculationImpl for InterpreterCalculationInstance { async fn get_package_file_deps( &self, - ctx: &DiceComputations, - package: &PackageFilePath, - ) -> anyhow::Result>> { + ctx: &mut DiceComputations<'_>, + package: PackageLabel, + ) -> anyhow::Result)>> { // These aren't cached on the DICE graph, since in normal evaluation there aren't that many, and we can cache at a higher level. // Therefore we re-parse the file, if it exists. // Fortunately, there are only a small number (currently a few hundred) - let interpreter = ctx - .get_interpreter_calculator(package.cell(), package.build_file_cell()) + let cell_name = package.as_cell_path().cell(); + let mut interpreter = ctx + .get_interpreter_calculator(cell_name, BuildFileCell::new(cell_name)) .await?; - Ok(interpreter - .prepare_package_file_eval(package) - .await? - .map(|x| x.1.get_loaded_modules().imports().cloned().collect())) + let x = interpreter.prepare_package_file_eval(package).await?; + let Some((package_file_path, _module, deps)) = x else { + return Ok(None); + }; + Ok(Some(( + package_file_path, + deps.get_loaded_modules().imports().cloned().collect(), + ))) } - async fn global_env_for_file_type( - &self, - ctx: &DiceComputations, - file_type: StarlarkFileType, - ) -> anyhow::Result { - Ok(ctx - .get_global_interpreter_state() - .await? - .globals_for_file_type(file_type) - .dupe()) + async fn global_env(&self, ctx: &mut DiceComputations<'_>) -> anyhow::Result { + Ok(ctx.get_global_interpreter_state().await?.globals().dupe()) } - async fn prelude_import(&self, ctx: &DiceComputations) -> anyhow::Result> { + async fn prelude_import( + &self, + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result> { Ok(ctx .get_global_interpreter_state() .await? @@ -220,24 +242,18 @@ impl InterpreterCalculationImpl for InterpreterCalculationInstance { impl PackageValuesCalculation for PackageValuesCalculationInstance { async fn package_values( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, package: PackageLabel, ) -> anyhow::Result> { - let listing = ctx.resolve_package_listing(package.dupe()).await?; - let super_package = ctx - .get_interpreter_calculator( - package.cell_name(), - BuildFileCell::new(package.cell_name()), - ) + ctx.eval_package_file(package) .await? - .eval_package_file_for_build_file(package, &listing) - .await?; - super_package.package_values().package_values_json() + .package_values() + .package_values_json() } } pub struct IntepreterResultsKeyActivationData { pub duration: Duration, - pub result: SharedResult>, + pub result: buck2_error::Result>, pub spans: SmallVec<[SpanId; 1]>, } diff --git a/app/buck2_interpreter_for_build/src/interpreter/cell_info.rs b/app/buck2_interpreter_for_build/src/interpreter/cell_info.rs index 8b4201b6e78ec..ac16b4f2647bd 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/cell_info.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/cell_info.rs @@ -7,38 +7,40 @@ * of this source tree. */ -use std::sync::Arc; - use allocative::Allocative; use buck2_core::cells::build_file_cell::BuildFileCell; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; -use dupe::Dupe; - -#[derive(Clone, Dupe, Debug, Allocative)] -pub struct InterpreterCellInfo(Arc); -#[derive(Debug, Allocative)] -struct Data { +#[derive(Clone, Debug, Allocative)] +pub struct InterpreterCellInfo { cell_name: BuildFileCell, cell_resolver: CellResolver, + cell_alias_resolver: CellAliasResolver, } impl InterpreterCellInfo { pub(crate) fn new( cell_name: BuildFileCell, cell_resolver: CellResolver, + cell_alias_resolver: CellAliasResolver, ) -> anyhow::Result { - Ok(Self(Arc::new(Data { + Ok(Self { cell_name, cell_resolver, - }))) + cell_alias_resolver, + }) } pub(crate) fn name(&self) -> BuildFileCell { - self.0.cell_name + self.cell_name } pub fn cell_resolver(&self) -> &CellResolver { - &self.0.cell_resolver + &self.cell_resolver + } + + pub fn cell_alias_resolver(&self) -> &CellAliasResolver { + &self.cell_alias_resolver } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/check_starlark_stack_size.rs b/app/buck2_interpreter_for_build/src/interpreter/check_starlark_stack_size.rs new file mode 100644 index 0000000000000..c918de7f9ce72 --- /dev/null +++ b/app/buck2_interpreter_for_build/src/interpreter/check_starlark_stack_size.rs @@ -0,0 +1,110 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_error::starlark_error::from_starlark_with_options; +use buck2_error::BuckErrorContext; +use buck2_futures::cancellation::CancellationContext; +use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; +use buck2_interpreter::file_type::StarlarkFileType; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; +use dice::DiceComputations; +use dice::Key; +use indoc::indoc; +use starlark::environment::Globals; +use starlark::environment::Module; +use starlark::syntax::AstModule; + +#[derive(Debug, buck2_error::Error)] +enum CheckStarlarkStackSizeError { + #[error("Error checking starlark stack size")] + CheckStarlarkStackSizeError, +} + +// In order to prevent non deterministic crashes +// we intentionally set off a starlark stack overflow, to make +// sure that starlark catches the overflow and reports an error +// before the native stack overflows +pub(crate) async fn check_starlark_stack_size( + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result<()> { + #[derive(Debug, derive_more::Display, Clone, Allocative, Eq, PartialEq, Hash)] + struct StarlarkStackSizeChecker; + + #[async_trait] + impl Key for StarlarkStackSizeChecker { + type Value = buck2_error::Result<()>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + with_starlark_eval_provider( + ctx, + &mut StarlarkProfilerOpt::disabled(), + "Check starlark stack size".to_owned(), + move |provider, _| { + let env = Module::new(); + let (mut eval, _) = provider.make(&env)?; + let content = indoc!( + r#" + def f(): + f() + f() + "# + ); + let ast = AstModule::parse( + "x.star", + content.to_owned(), + &StarlarkFileType::Bzl.dialect(false), + ) + .map_err(|e| { + from_starlark_with_options( + e, + buck2_error::starlark_error::NativeErrorHandling::Unknown, + false, + ) + }) + .internal_error_anyhow("Failed to parse check module")?; + match eval.eval_module(ast, &Globals::standard()) { + Err(e) if e.to_string().contains("Starlark call stack overflow") => Ok(()), + Err(p) => Err(from_starlark_with_options( + p, + buck2_error::starlark_error::NativeErrorHandling::Unknown, + false, + ) + .into()), + Ok(_) => { + Err(CheckStarlarkStackSizeError::CheckStarlarkStackSizeError.into()) + } + } + }, + ) + .await?; + Ok(()) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } + } + + ctx.compute(&StarlarkStackSizeChecker) + .await? + .map_err(anyhow::Error::from) +} diff --git a/app/buck2_interpreter_for_build/src/interpreter/configuror.rs b/app/buck2_interpreter_for_build/src/interpreter/configuror.rs index 0d1a99f0fbb4c..87c69d954380f 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/configuror.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/configuror.rs @@ -8,12 +8,12 @@ */ use std::fmt::Debug; -use std::ptr; use std::sync::Arc; use allocative::Allocative; use buck2_common::package_listing::listing::PackageListing; use buck2_core::build_file_path::BuildFilePath; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; use buck2_interpreter::extra::xcode::XcodeVersionInfo; use buck2_interpreter::extra::InterpreterHostArchitecture; use buck2_interpreter::extra::InterpreterHostPlatform; @@ -27,27 +27,12 @@ use starlark::environment::Globals; use starlark::environment::GlobalsBuilder; use crate::attrs::coerce::ctx::BuildAttrCoercionContext; -use crate::interpreter::build_defs::configure_base_globals; use crate::interpreter::cell_info::InterpreterCellInfo; use crate::interpreter::functions::host_info::HostInfo; +use crate::interpreter::globals::base_globals; use crate::interpreter::module_internals::ModuleInternals; use crate::interpreter::module_internals::PackageImplicits; -#[derive(Clone, Allocative)] -struct ConfigureGlobalsFn(#[allocative(skip)] fn(&mut GlobalsBuilder)); - -impl Debug for ConfigureGlobalsFn { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ConfiguredGlobalsFn") - } -} - -impl PartialEq for ConfigureGlobalsFn { - fn eq(&self, other: &Self) -> bool { - ptr::eq(self.0 as *const (), other.0 as *const ()) - } -} - #[derive(Clone, Dupe, Allocative)] pub struct AdditionalGlobalsFn( #[allocative(skip)] pub Arc, @@ -66,7 +51,7 @@ impl PartialEq for AdditionalGlobalsFn { // And if compiler merges or splits vtables, we don't care, // because we behavior will be correct either way. // Anyway, this code is used only in tests. - #[allow(clippy::vtable_address_comparisons)] + #[allow(ambiguous_wide_pointer_comparisons)] Arc::ptr_eq(&self.0, &other.0) } } @@ -84,10 +69,7 @@ pub struct BuildInterpreterConfiguror { host_info: HostInfo, record_target_call_stack: bool, skip_targets_with_duplicate_names: bool, - configure_build_file_globals: ConfigureGlobalsFn, - configure_package_file_globals: ConfigureGlobalsFn, - configure_extension_file_globals: ConfigureGlobalsFn, - configure_bxl_file_globals: ConfigureGlobalsFn, + global_target_interner: Arc, /// For test. additional_globals: Option, } @@ -100,63 +82,22 @@ impl BuildInterpreterConfiguror { host_xcode_version: Option, record_target_call_stack: bool, skip_targets_with_duplicate_names: bool, - configure_build_file_globals: fn(&mut GlobalsBuilder), - configure_package_file_globals: fn(&mut GlobalsBuilder), - configure_extension_file_globals: fn(&mut GlobalsBuilder), - configure_bxl_file_globals: fn(&mut GlobalsBuilder), additional_globals: Option, + global_target_interner: Arc, ) -> anyhow::Result> { Ok(Arc::new(Self { prelude_import, host_info: HostInfo::new(host_platform, host_architecture, host_xcode_version), record_target_call_stack, skip_targets_with_duplicate_names, - configure_build_file_globals: ConfigureGlobalsFn(configure_build_file_globals), - configure_package_file_globals: ConfigureGlobalsFn(configure_package_file_globals), - configure_extension_file_globals: ConfigureGlobalsFn(configure_extension_file_globals), - configure_bxl_file_globals: ConfigureGlobalsFn(configure_bxl_file_globals), additional_globals, + global_target_interner, })) } - pub(crate) fn build_file_globals(&self) -> Globals { - // We want the `native` module to contain most things, so match what is in extension files - configure_base_globals(self.configure_extension_file_globals.0) - .with(|g| { - (self.configure_build_file_globals.0)(g); - if let Some(additional_globals) = &self.additional_globals { - (additional_globals.0)(g); - } - }) - .build() - } - - pub(crate) fn package_file_globals(&self) -> Globals { - configure_base_globals(self.configure_extension_file_globals.0) - .with(|g| { - (self.configure_package_file_globals.0)(g); - if let Some(additional_globals) = &self.additional_globals { - (additional_globals.0)(g); - } - }) - .build() - } - - pub(crate) fn extension_file_globals(&self) -> Globals { - configure_base_globals(self.configure_extension_file_globals.0) - .with(|g| { - (self.configure_extension_file_globals.0)(g); - if let Some(additional_globals) = &self.additional_globals { - (additional_globals.0)(g); - } - }) - .build() - } - - pub(crate) fn bxl_file_globals(&self) -> Globals { - configure_base_globals(self.configure_extension_file_globals.0) + pub(crate) fn globals(&self) -> Globals { + base_globals() .with(|g| { - (self.configure_bxl_file_globals.0)(g); if let Some(additional_globals) = &self.additional_globals { (additional_globals.0)(g); } @@ -198,8 +139,10 @@ impl BuildInterpreterConfiguror { }); let attr_coercer = BuildAttrCoercionContext::new_with_package( cell_info.cell_resolver().dupe(), + cell_info.cell_alias_resolver().dupe(), (buildfile_path.package().dupe(), package_listing.dupe()), package_boundary_exception, + self.global_target_interner.dupe(), ); let imports = loaded_modules.imports().cloned().collect(); diff --git a/app/buck2_interpreter_for_build/src/interpreter/context.rs b/app/buck2_interpreter_for_build/src/interpreter/context.rs index e3c03837a888a..f5459957df2e2 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/context.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/context.rs @@ -20,7 +20,7 @@ use dupe::Dupe; use crate::interpreter::configuror::BuildInterpreterConfiguror; #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct BuildContextKey(); impl InjectedKey for BuildContextKey { @@ -33,12 +33,16 @@ impl InjectedKey for BuildContextKey { #[async_trait] pub trait HasInterpreterContext { - async fn get_interpreter_configuror(&self) -> anyhow::Result>; + async fn get_interpreter_configuror( + &mut self, + ) -> anyhow::Result>; } #[async_trait] -impl HasInterpreterContext for DiceComputations { - async fn get_interpreter_configuror(&self) -> anyhow::Result> { +impl HasInterpreterContext for DiceComputations<'_> { + async fn get_interpreter_configuror( + &mut self, + ) -> anyhow::Result> { Ok(self.compute(&BuildContextKey()).await?.dupe()) } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/cycles.rs b/app/buck2_interpreter_for_build/src/interpreter/cycles.rs index 990a4bd6f6ff4..9241f38235907 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/cycles.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/cycles.rs @@ -7,15 +7,14 @@ * of this source tree. */ -use std::fmt::Display; use std::sync::Arc; use buck2_common::dice::cycles::CycleAdapterDescriptor; use buck2_interpreter::paths::module::OwnedStarlarkModulePath; use buck2_util::cycle_detector::CycleDescriptor; use derive_more::Display; +use dice::DynKey; use gazebo::prelude::VecExt; -use thiserror::Error; use crate::interpreter::dice_calculation_delegate::testing::EvalImportKey; @@ -24,15 +23,17 @@ pub struct LoadCycleDescriptor; #[derive(Debug, Clone, Eq, PartialEq, Hash, Display)] pub enum LoadCycleKey { - #[display(fmt = "{}", _0)] + #[display("{}", _0)] Module(OwnedStarlarkModulePath), } -#[derive(Debug, Error, Clone)] +#[derive(Debug, Clone)] pub struct LoadCycleError { cycle: Arc>, } +impl std::error::Error for LoadCycleError {} + impl Display for LoadCycleError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Load cycle detected (`->` means \"loads\"):")?; @@ -60,7 +61,7 @@ impl CycleDescriptor for LoadCycleDescriptor { } impl CycleAdapterDescriptor for LoadCycleDescriptor { - fn to_key(key: &dyn std::any::Any) -> Option { + fn to_key(key: &DynKey) -> Option { key.downcast_ref::() .map(|v| LoadCycleKey::Module(v.0.clone())) } diff --git a/app/buck2_interpreter_for_build/src/interpreter/dice_calculation_delegate.rs b/app/buck2_interpreter_for_build/src/interpreter/dice_calculation_delegate.rs index 7d690da674b8b..1f86d99fd17f3 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/dice_calculation_delegate.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/dice_calculation_delegate.rs @@ -14,52 +14,56 @@ use anyhow::Context; use async_trait::async_trait; use buck2_common::dice::cells::HasCellResolver; use buck2_common::dice::cycles::CycleGuard; -use buck2_common::dice::file_ops::DiceFileOps; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::error_report::CreateErrorReport; -use buck2_common::file_ops::FileOps; +use buck2_common::dice::file_ops::DiceFileComputations; use buck2_common::legacy_configs::dice::HasLegacyConfigs; -use buck2_common::legacy_configs::dice::LegacyBuckConfigOnDice; +use buck2_common::legacy_configs::dice::OpaqueLegacyBuckConfigOnDice; use buck2_common::package_boundary::HasPackageBoundaryExceptions; -use buck2_common::package_listing::dice::HasPackageListingResolver; +use buck2_common::package_listing::dice::DicePackageListingResolver; use buck2_common::package_listing::listing::PackageListing; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::name::CellName; use buck2_core::package::PackageLabel; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::span; -use buck2_events::dispatch::span_async; +use buck2_events::dispatch::span_async_simple; +use buck2_futures::cancellation::CancellationContext; use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; use buck2_interpreter::file_loader::LoadedModule; use buck2_interpreter::file_loader::ModuleDeps; use buck2_interpreter::import_paths::HasImportPaths; +use buck2_interpreter::load_module::InterpreterCalculation; use buck2_interpreter::paths::module::OwnedStarlarkModulePath; use buck2_interpreter::paths::module::StarlarkModulePath; use buck2_interpreter::paths::package::PackageFilePath; use buck2_interpreter::paths::path::StarlarkPath; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; +use buck2_interpreter::starlark_profiler::config::GetStarlarkProfilerInstrumentation; +use buck2_interpreter::starlark_profiler::data::ProfileTarget; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfiler; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOptVal; use buck2_node::nodes::eval_result::EvaluationResult; use buck2_node::super_package::SuperPackage; use derive_more::Display; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use futures::future; -use more_futures::cancellation::CancellationContext; +use futures::FutureExt; use starlark::codemap::FileSpan; use starlark::syntax::AstModule; +use crate::interpreter::buckconfig::ConfigsOnDiceViewForStarlark; +use crate::interpreter::cell_info::InterpreterCellInfo; +use crate::interpreter::check_starlark_stack_size::check_starlark_stack_size; use crate::interpreter::cycles::LoadCycleDescriptor; -use crate::interpreter::dice_calculation_delegate::keys::EvalImportKey; use crate::interpreter::global_interpreter_state::HasGlobalInterpreterState; use crate::interpreter::interpreter_for_cell::InterpreterForCell; +use crate::interpreter::interpreter_for_cell::ParseData; use crate::interpreter::interpreter_for_cell::ParseResult; use crate::super_package::package_value::SuperPackageValuesImpl; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum DiceCalculationDelegateError { #[error("Error evaluating build file: `{0}`")] EvalBuildFileError(BuildFilePath), @@ -68,46 +72,51 @@ enum DiceCalculationDelegateError { } #[async_trait] -pub trait HasCalculationDelegate<'c> { +pub trait HasCalculationDelegate<'c, 'd> { /// Get calculator for a file evaluation. /// /// This function only accepts cell names, but it is created /// per evaluated file (build file or `.bzl`). async fn get_interpreter_calculator( - &'c self, + &'c mut self, cell: CellName, build_file_cell: BuildFileCell, - ) -> anyhow::Result>; + ) -> anyhow::Result>; } #[async_trait] -impl<'c> HasCalculationDelegate<'c> for DiceComputations { +impl<'c, 'd> HasCalculationDelegate<'c, 'd> for DiceComputations<'d> { async fn get_interpreter_calculator( - &'c self, + &'c mut self, cell: CellName, build_file_cell: BuildFileCell, - ) -> anyhow::Result> { + ) -> anyhow::Result> { #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "{}@{}", _0, _1)] + #[display("{}@{}", _0, _1)] struct InterpreterConfigForCellKey(CellName, BuildFileCell); #[async_trait] impl Key for InterpreterConfigForCellKey { - type Value = SharedResult>; + type Value = buck2_error::Result>; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let cell_resolver = ctx.get_cell_resolver().await?; let global_state = ctx.get_global_interpreter_state().await?; - let cell = cell_resolver.get(self.0)?; + let cell_alias_resolver = ctx.get_cell_alias_resolver(self.0).await?; let implicit_import_paths = ctx.import_paths_for_cell(self.1).await?; + let cell_info = InterpreterCellInfo::new( + self.1, + ctx.get_cell_resolver().await?, + cell_alias_resolver, + )?; + Ok(Arc::new(InterpreterForCell::new( - cell.cell_alias_resolver().dupe(), + cell_info, global_state.dupe(), implicit_import_paths, )?)) @@ -118,7 +127,6 @@ impl<'c> HasCalculationDelegate<'c> for DiceComputations { } } - let file_ops = self.file_ops(); let configs = self .compute(&InterpreterConfigForCellKey(cell, build_file_cell)) .await??; @@ -126,118 +134,69 @@ impl<'c> HasCalculationDelegate<'c> for DiceComputations { Ok(DiceCalculationDelegate { build_file_cell, ctx: self, - fs: file_ops, configs, }) } } -#[derive(Clone)] -pub struct DiceCalculationDelegate<'c> { +pub struct DiceCalculationDelegate<'c, 'd> { build_file_cell: BuildFileCell, - ctx: &'c DiceComputations, - fs: DiceFileOps<'c>, + ctx: &'c mut DiceComputations<'d>, configs: Arc, } -impl<'c> DiceCalculationDelegate<'c> { - /// InterpreterCalculation invokes eval_import recursively. To support - /// an embedder caching the result of that, the InterpreterCalculation will - /// call into the delegate instead of calling itself directly. - /// - /// The delegate implementation should have roughly the same behavior as - /// just forwarding directly back to the calculation. - /// - /// ```ignore - /// async fn eval_module(...) -> ... { calculation.eval_module(...).await } - /// ``` - pub async fn eval_module( - &self, - starlark_path: StarlarkModulePath<'_>, - ) -> anyhow::Result { - #[async_trait] - impl Key for EvalImportKey { - type Value = SharedResult; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellation: &CancellationContext, - ) -> Self::Value { - let starlark_path = self.0.borrow(); - // We cannot just use the inner default delegate's eval_import - // because that wouldn't delegate back to us for inner eval_import calls. - Ok(ctx - .get_interpreter_calculator( - starlark_path.cell(), - starlark_path.build_file_cell(), - ) - .await? - .eval_module_uncached(starlark_path) - .await?) - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - // While it is technically possible to compare the modules - // at least for simple modules (like modules defining only string constants), - // practically it is too hard to make it work correctly for every case. - false - } - - fn validity(x: &Self::Value) -> bool { - x.is_ok() - } - } - - self.ctx - .compute(&EvalImportKey(OwnedStarlarkModulePath::new(starlark_path))) - .await? - .unshared_error() - } - +impl<'c, 'd: 'c> DiceCalculationDelegate<'c, 'd> { async fn get_legacy_buck_config_for_starlark( - &self, - ) -> anyhow::Result> { + &mut self, + ) -> anyhow::Result { self.ctx .get_legacy_config_on_dice(self.build_file_cell.name()) .await } - async fn parse_file(&self, starlark_path: StarlarkPath<'_>) -> anyhow::Result { + async fn parse_file(&mut self, starlark_path: StarlarkPath<'_>) -> anyhow::Result { let content = - ::read_file(&self.fs, starlark_path.path().as_ref().as_ref()).await?; + DiceFileComputations::read_file(self.ctx, starlark_path.path().as_ref().as_ref()) + .await?; self.configs.parse(starlark_path, content) } async fn eval_deps( - &self, + ctx: &mut DiceComputations<'_>, modules: &[(Option, OwnedStarlarkModulePath)], ) -> anyhow::Result { Ok(ModuleDeps( - futures::future::join_all(modules.iter().map(|(span, import)| async move { - self.eval_module(import.borrow()).await.with_context(|| { - format!( - "From load at {}", - span.as_ref() - .map_or("implicit location".to_owned(), |file_span| file_span - .resolve() - .begin_file_line() - .to_string()) - ) - }) - })) - .await - .into_iter() - .collect::>()?, + ctx.try_compute_join(modules, |ctx, (span, import)| { + async move { + ctx.get_loaded_module(import.borrow()) + .await + .with_context(|| { + format!( + "From load at {}", + span.as_ref() + .map_or("implicit location".to_owned(), |file_span| file_span + .resolve() + .begin_file_line() + .to_string()) + ) + }) + } + .boxed() + }) + .await?, )) } pub async fn prepare_eval<'a>( - &'a self, + &'a mut self, starlark_file: StarlarkPath<'_>, ) -> anyhow::Result<(AstModule, ModuleDeps)> { - let ParseResult(ast, imports) = self.parse_file(starlark_file).await?; - let fut = self.eval_deps(&imports); - let deps = LoadCycleDescriptor::guard_this(self.ctx, fut).await???; + let ParseData(ast, imports) = self.parse_file(starlark_file).await??; + let deps = CycleGuard::::new(self.ctx)? + .guard_this(Self::eval_deps(self.ctx, &imports)) + .await + .into_result(self.ctx) + .await???; Ok((ast, deps)) } @@ -245,9 +204,8 @@ impl<'c> DiceCalculationDelegate<'c> { &'a self, starlark_file: StarlarkPath<'_>, content: String, - ) -> anyhow::Result { - let ParseResult(ast, _) = self.configs.parse(starlark_file, content)?; - Ok(ast) + ) -> anyhow::Result { + self.configs.parse(starlark_file, content) } pub async fn resolve_load( @@ -259,7 +217,7 @@ impl<'c> DiceCalculationDelegate<'c> { } pub async fn eval_module_uncached( - &self, + &mut self, starlark_file: StarlarkModulePath<'_>, ) -> anyhow::Result { let (ast, deps) = self.prepare_eval(starlark_file.into()).await?; @@ -267,17 +225,20 @@ impl<'c> DiceCalculationDelegate<'c> { let buckconfig = self.get_legacy_buck_config_for_starlark().await?; let root_buckconfig = self.ctx.get_legacy_root_config_on_dice().await?; + let configs = &self.configs; + let ctx = &mut *self.ctx; + with_starlark_eval_provider( - self.ctx, - &mut StarlarkProfilerOrInstrumentation::disabled(), + ctx, + &mut StarlarkProfilerOpt::disabled(), format!("load:{}", &starlark_file), - move |provider, _| { - let evaluation = self - .configs + move |provider, ctx| { + let mut buckconfigs = + ConfigsOnDiceViewForStarlark::new(ctx, buckconfig, root_buckconfig); + let evaluation = configs .eval_module( starlark_file, - &buckconfig, - &root_buckconfig, + &mut buckconfigs, ast, loaded_modules.clone(), provider, @@ -296,27 +257,31 @@ impl<'c> DiceCalculationDelegate<'c> { .await } - /// Eval parent `PACKAGE` file for given `PACKAGE` file. + /// Eval parent `PACKAGE` file for given package file. async fn eval_parent_package_file( - &self, - file: &PackageFilePath, + &mut self, + file: PackageLabel, ) -> anyhow::Result { - match file.parent_package_file() { + let cell_resolver = self.ctx.get_cell_resolver().await?; + let proj_rel_path = cell_resolver.resolve_path(file.as_cell_path())?; + match proj_rel_path.parent() { None => { - // We are in the cell root, there's no parent. + // We are in the project root, there's no parent. Ok(SuperPackage::empty::()) } - Some(parent) => self.eval_package_file(&parent).await, + Some(parent) => { + let parent_cell = cell_resolver.get_cell_path(parent)?; + self.eval_package_file(PackageLabel::from_cell_path(parent_cell.as_ref())) + .await + } } } /// Return `None` if there's no `PACKAGE` file in the directory. pub async fn prepare_package_file_eval( - &self, - path: &PackageFilePath, - ) -> anyhow::Result> { - // This is cached if evaluating a `PACKAGE` file next to a `BUCK` file. - let dir = self.fs.read_dir(path.dir()).await?; + &mut self, + package: PackageLabel, + ) -> anyhow::Result> { // Note: // * we are using `read_dir` instead of `read_path_metadata` because // * it is an extra IO, and `read_dir` is likely already cached. @@ -324,24 +289,77 @@ impl<'c> DiceCalculationDelegate<'c> { // and not `package` on case-insensitive filesystems. // We do case-sensitive comparison for `BUCK` files, so we do the same here. // * we fail here if `PACKAGE` (but not `package`) exists, and it is not a file. - if !dir.contains(PackageFilePath::PACKAGE_FILE_NAME) { - return Ok(None); + + // package file results capture starlark values and so cannot be checked for equality. This means we + // can't get early cutoff for the consumers, and so we need to be careful to ensure our deps are precise. + // Otherwise noop package value recomputations can lead to large recompute costs. + // + // Here we put the package file check behind an additional dice key so that we don't recompute on irrelevant + // changes to the directory contents. + #[derive(Debug, Display, Clone, Allocative, Eq, PartialEq, Hash)] + struct PackageFileLookupKey(PackageLabel); + + #[async_trait] + impl Key for PackageFileLookupKey { + type Value = buck2_error::Result>>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + // This is cached if evaluating a `PACKAGE` file next to a `BUCK` file. + let dir = DiceFileComputations::read_dir(ctx, self.0.as_cell_path()).await?; + for package_file_path in PackageFilePath::for_dir(self.0.as_cell_path()) { + if !dir.contains( + package_file_path + .path() + .path() + .file_name() + .internal_error_anyhow("Must have name")?, + ) { + continue; + } + return Ok(Some(Arc::new(package_file_path))); + } + Ok(None) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } + } + + match self + .ctx + .compute(&PackageFileLookupKey(package.dupe())) + .await?? + { + Some(package_file_path) => { + let (module, deps) = self + .prepare_eval(StarlarkPath::PackageFile(&package_file_path)) + .await?; + Ok(Some(((*package_file_path).clone(), module, deps))) + } + None => Ok(None), } - Ok(Some( - self.prepare_eval(StarlarkPath::PackageFile(path)).await?, - )) } async fn eval_package_file_uncached( - &self, - path: &PackageFilePath, + &mut self, + path: PackageLabel, ) -> anyhow::Result { - let parent = self.eval_parent_package_file(path); - let prepare_eval = self.prepare_package_file_eval(path); + let parent = self.eval_parent_package_file(path.dupe()).await?; + let ast_deps = self.prepare_package_file_eval(path.dupe()).await?; - let (parent, ast_deps) = future::try_join(parent, prepare_eval).await?; - - let (ast, deps) = match ast_deps { + let (package_file_path, ast, deps) = match ast_deps { Some(x) => x, None => { // If there's no `PACKAGE` file, return parent. @@ -351,18 +369,24 @@ impl<'c> DiceCalculationDelegate<'c> { let buckconfig = self.get_legacy_buck_config_for_starlark().await?; let root_buckconfig = self.ctx.get_legacy_root_config_on_dice().await?; + + let configs = &self.configs; + let ctx = &mut *self.ctx; + with_starlark_eval_provider( - self.ctx, - &mut StarlarkProfilerOrInstrumentation::disabled(), + ctx, + &mut StarlarkProfilerOpt::disabled(), format!("load:{}", path), - move |provider, _| { - self.configs + move |provider, ctx| { + let mut buckconfigs = + ConfigsOnDiceViewForStarlark::new(ctx, buckconfig, root_buckconfig); + + configs .eval_package_file( - path, + &package_file_path, ast, parent, - &buckconfig, - &root_buckconfig, + &mut buckconfigs, deps.get_loaded_modules(), provider, ) @@ -373,29 +397,29 @@ impl<'c> DiceCalculationDelegate<'c> { } pub(crate) async fn eval_package_file( - &self, - path: &PackageFilePath, + &mut self, + path: PackageLabel, ) -> anyhow::Result { #[derive(Debug, Display, Clone, Allocative, Eq, PartialEq, Hash)] - struct PackageFileKey(PackageFilePath); + struct PackageFileKey(PackageLabel); #[async_trait] impl Key for PackageFileKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let interpreter = ctx - .get_interpreter_calculator(self.0.cell(), self.0.build_file_cell()) - .await - .shared_error()?; + let cell_name = self.0.as_cell_path().cell(); + let mut interpreter = ctx + .get_interpreter_calculator(cell_name, BuildFileCell::new(cell_name)) + .await?; interpreter - .eval_package_file_uncached(&self.0) + .eval_package_file_uncached(self.0.dupe()) .await - .shared_error() + .map_err(buck2_error::Error::from) } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -411,72 +435,83 @@ impl<'c> DiceCalculationDelegate<'c> { } self.ctx - .compute(&PackageFileKey(path.clone())) + .compute(&PackageFileKey(path)) .await? - .unshared_error() + .map_err(anyhow::Error::from) } /// Most directories do not contain a `PACKAGE` file, this function /// optimizes `eval_package_file` for this case by avoiding creation of DICE key. pub(crate) async fn eval_package_file_for_build_file( - &self, + &mut self, package: PackageLabel, package_listing: &PackageListing, ) -> anyhow::Result { - let package_file_path = PackageFilePath::for_dir(package.as_cell_path()); - if package_listing - .get_file(PackageFilePath::PACKAGE_FILE_NAME.as_ref()) - .is_none() - { - // Without this optimization, `cquery ` has 6% time regression. - // With this optimization, check for `PACKAGE` files adds 2% to time. - self.eval_parent_package_file(&package_file_path).await - } else { - self.eval_package_file(&package_file_path).await + for package_file_name in PackageFilePath::package_file_names() { + if package_listing + .get_file(package_file_name.as_ref()) + .is_some() + { + return self.eval_package_file(package).await; + } } + + // Without this optimization, `cquery ` has 6% time regression. + // With this optimization, check for `PACKAGE` files adds 2% to time. + self.eval_parent_package_file(package).await } async fn resolve_package_listing( - &self, + ctx: &mut DiceComputations<'_>, package: PackageLabel, ) -> anyhow::Result { - span_async( + span_async_simple( buck2_data::LoadPackageStart { path: package.as_cell_path().to_string(), }, - async { - let result = self.ctx.resolve_package_listing(package.dupe()).await; - let error = result.create_error_report(); - ( - result, - buck2_data::LoadPackageEnd { - path: package.as_cell_path().to_string(), - error, - }, - ) + DicePackageListingResolver(ctx).resolve_package_listing(package.dupe()), + buck2_data::LoadPackageEnd { + path: package.as_cell_path().to_string(), }, ) .await } pub async fn eval_build_file( - &self, + &mut self, package: PackageLabel, - profiler_instrumentation: &mut StarlarkProfilerOrInstrumentation<'_>, - ) -> SharedResult> { - let listing = self.resolve_package_listing(package.dupe()).await?; + ) -> buck2_error::Result> { + let ((), listing, profile_mode) = self + .ctx + .try_compute3( + |ctx| check_starlark_stack_size(ctx).boxed(), + |ctx| Self::resolve_package_listing(ctx, package.dupe()).boxed(), + |ctx| ctx.get_profile_mode_for_loading(package).boxed(), + ) + .await?; - let build_file_path = BuildFilePath::new(package.dupe(), listing.buildfile().to_owned()); - let ast_deps = self.prepare_eval(StarlarkPath::BuildFile(&build_file_path)); + let profiler_opt = profile_mode.profile_mode().map(|profile_mode| { + StarlarkProfiler::new(profile_mode.dupe(), false, ProfileTarget::Loading(package)) + }); - let super_package = self.eval_package_file_for_build_file(package.dupe(), &listing); + let mut profiler = match profiler_opt { + None => StarlarkProfilerOptVal::Disabled, + Some(profiler) => StarlarkProfilerOptVal::Profiler(profiler), + }; - let ((ast, deps), super_package) = future::try_join(ast_deps, super_package).await?; + let build_file_path = BuildFilePath::new(package.dupe(), listing.buildfile().to_owned()); + let (ast, deps) = self + .prepare_eval(StarlarkPath::BuildFile(&build_file_path)) + .await?; + let super_package = self + .eval_package_file_for_build_file(package.dupe(), &listing) + .await?; let package_boundary_exception = self .ctx .get_package_boundary_exception(package.as_cell_path()) - .await?; + .await? + .is_some(); let buckconfig = self.get_legacy_buck_config_for_starlark().await?; let root_buckconfig = self.ctx.get_legacy_root_config_on_dice().await?; let module_id = build_file_path.to_string(); @@ -485,18 +520,23 @@ impl<'c> DiceCalculationDelegate<'c> { cell: cell_str.clone(), module_id: module_id.clone(), }; - with_starlark_eval_provider( - self.ctx, - profiler_instrumentation, + + let configs = &self.configs; + let ctx = &mut *self.ctx; + + let mut eval_result = with_starlark_eval_provider( + ctx, + &mut profiler.as_mut(), format!("load_buildfile:{}", &package), - move |provider, _| { + move |provider, ctx| { + let mut buckconfigs = + ConfigsOnDiceViewForStarlark::new(ctx, buckconfig, root_buckconfig); + span(start_event, move || { - let result = self - .configs + let result_with_stats = configs .eval_build_file( &build_file_path, - &buckconfig, - &root_buckconfig, + &mut buckconfigs, listing, super_package, package_boundary_exception, @@ -508,22 +548,41 @@ impl<'c> DiceCalculationDelegate<'c> { .with_context(|| { DiceCalculationDelegateError::EvalBuildFileError(build_file_path) }); - let error = result.as_ref().err().map(|e| format!("{:#}", e)); + let error = result_with_stats.as_ref().err().map(|e| format!("{:#}", e)); + let starlark_peak_allocated_bytes = result_with_stats + .as_ref() + .ok() + .map(|rs| rs.starlark_peak_allocated_bytes); + let cpu_instruction_count = result_with_stats + .as_ref() + .ok() + .and_then(|rs| rs.cpu_instruction_count); + let result = result_with_stats.map(|rs| rs.result); + let target_count = result.as_ref().ok().map(|rs| rs.targets().len() as u64); ( result, buck2_data::LoadBuildFileEnd { module_id, cell: cell_str, + target_count, + starlark_peak_allocated_bytes, + cpu_instruction_count, error, }, ) }) }, ) - .await - .map(Arc::new) - .shared_error() + .await?; + let profile_data = profiler.finish()?; + if eval_result.starlark_profile.is_some() { + return Err( + internal_error_anyhow!("starlark_profile field must not be set yet").into(), + ); + } + eval_result.starlark_profile = profile_data.map(|d| Arc::new(d) as _); + Ok(Arc::new(eval_result)) } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/extra_value.rs b/app/buck2_interpreter_for_build/src/interpreter/extra_value.rs new file mode 100644 index 0000000000000..859bbc50cc27b --- /dev/null +++ b/app/buck2_interpreter_for_build/src/interpreter/extra_value.rs @@ -0,0 +1,76 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::OnceCell; + +use allocative::Allocative; +use buck2_error::BuckErrorContext; +use starlark::any::ProvidesStaticType; +use starlark::environment::FrozenModule; +use starlark::environment::Module; +use starlark::values::any_complex::StarlarkAnyComplex; +use starlark::values::Freeze; +use starlark::values::Freezer; +use starlark::values::OwnedFrozenValueTyped; +use starlark::values::Trace; +use starlark::values::ValueLike; + +use crate::interpreter::package_file_extra::FrozenPackageFileExtra; +use crate::interpreter::package_file_extra::PackageFileExtra; + +/// `Module.extra_value` when evaluating build, bzl, package, and bxl files. +#[derive(Default, Debug, ProvidesStaticType, Allocative, Trace)] +pub(crate) struct InterpreterExtraValue<'v> { + /// Set when evaluating `PACKAGE` files. + pub(crate) package_extra: OnceCell>, +} + +#[derive(Debug, ProvidesStaticType, Allocative)] +pub(crate) struct FrozenInterpreterExtraValue { + pub(crate) package_extra: Option, +} + +impl<'v> Freeze for InterpreterExtraValue<'v> { + type Frozen = FrozenInterpreterExtraValue; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + Ok(FrozenInterpreterExtraValue { + package_extra: self + .package_extra + .into_inner() + .map(|p| p.freeze(freezer)) + .transpose()?, + }) + } +} + +impl<'v> InterpreterExtraValue<'v> { + pub(crate) fn get(module: &'v Module) -> anyhow::Result<&'v InterpreterExtraValue<'v>> { + Ok(&module + .extra_value() + .internal_error_anyhow("Extra value is missing")? + .downcast_ref::>() + .internal_error_anyhow("Extra value had wrong type")? + .value) + } +} + +impl FrozenInterpreterExtraValue { + pub(crate) fn get( + module: &FrozenModule, + ) -> anyhow::Result>> + { + module + .owned_extra_value() + .internal_error_anyhow("Extra value is missing")? + .downcast() + .ok() + .internal_error_anyhow("Extra value had wrong type") + } +} diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions.rs b/app/buck2_interpreter_for_build/src/interpreter/functions.rs new file mode 100644 index 0000000000000..8931806d5567a --- /dev/null +++ b/app/buck2_interpreter_for_build/src/interpreter/functions.rs @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod dedupe; +pub(crate) mod host_info; +pub(crate) mod internals; +pub(crate) mod load_symbols; +pub(crate) mod path; +pub(crate) mod read_config; +pub(crate) mod regex; +pub(crate) mod sha256; +pub(crate) mod soft_error; +pub(crate) mod starlark; +pub(crate) mod warning; diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/dedupe.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/dedupe.rs index 8bae16455927e..08c05035dcf23 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/dedupe.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/dedupe.rs @@ -22,13 +22,11 @@ pub(crate) fn register_dedupe(builder: &mut GlobalsBuilder) { fn dedupe<'v>( #[starlark(require = pos)] val: Value<'v>, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> starlark::Result> { let mut seen = HashSet::new(); let mut res = Vec::new(); for v in val.iterate(heap)? { - let p = v.identity(); - if !seen.contains(&p) { - seen.insert(p); + if seen.insert(v.identity()) { res.push(v); } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/host_info.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/host_info.rs index ba7fe011c3118..d88fba5528a5e 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/host_info.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/host_info.rs @@ -106,7 +106,7 @@ fn new_host_info( } #[starlark_module] -pub fn register_host_info(builder: &mut GlobalsBuilder) { +pub(crate) fn register_host_info(builder: &mut GlobalsBuilder) { /// The `host_info()` function is used to get the current OS and processor architecture on the host. The structure returned is laid out thusly: /// /// ```python @@ -136,7 +136,7 @@ pub fn register_host_info(builder: &mut GlobalsBuilder) { /// ``` #[starlark(speculative_exec_safe)] fn host_info<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result>> { // Keeping this `speculative_exec_safe` is safe because BuildContext's `HostInfo`, // even when evaluated speculatively, is going to be the same across all interpreters @@ -150,7 +150,7 @@ pub fn register_host_info(builder: &mut GlobalsBuilder) { #[derive(Derivative, Clone, Debug, Allocative)] #[derivative(PartialEq)] -pub struct HostInfo { +pub(crate) struct HostInfo { // These first three fields are for equality only, otherwise not used platform: InterpreterHostPlatform, arch: InterpreterHostArchitecture, diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/internals.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/internals.rs new file mode 100644 index 0000000000000..79baf8b8f4c94 --- /dev/null +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/internals.rs @@ -0,0 +1,39 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use starlark::environment::GlobalsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::none::NoneType; + +use crate::interpreter::module_internals::ModuleInternals; + +#[derive(buck2_error::Error, Debug)] +#[error("Fail: {0}")] +struct BuckFail(String); + +/// Registers functions that are only available in the `__internal__` global and not meant to be +/// stable. +#[starlark_module] +pub(crate) fn register_internals(builder: &mut GlobalsBuilder) { + /// `fail()` but implemented using a buck2 error type instead of starlark's, for testing + /// purposes. + fn buck2_fail<'v>(msg: &str, _eval: &mut Evaluator<'v, '_, '_>) -> anyhow::Result { + Err(BuckFail(msg.to_owned()).into()) + } + + /// Returns a list of direct subpackage relative paths of current package. + fn sub_packages<'v>(eval: &mut Evaluator<'v, '_, '_>) -> anyhow::Result> { + let extra = ModuleInternals::from_context(eval, "sub_packages")?; + Ok(extra + .sub_packages() + .map(|p| p.as_str().to_owned()) + .collect()) + } +} diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/load_symbols.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/load_symbols.rs index 12a056ac26bbd..693a1a9f22f18 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/load_symbols.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/load_symbols.rs @@ -15,7 +15,7 @@ use starlark::values::none::NoneType; use starlark::values::Value; #[starlark_module] -pub fn register_load_symbols(builder: &mut GlobalsBuilder) { +pub(crate) fn register_load_symbols(builder: &mut GlobalsBuilder) { /// Used in a `.bzl` file to set exported symbols. In most cases just defining /// the symbol as a top-level binding is sufficient, but sometimes the names /// might be programatically generated. @@ -25,8 +25,8 @@ pub fn register_load_symbols(builder: &mut GlobalsBuilder) { /// This function should be used rarely. fn load_symbols<'v>( symbols: SmallMap<&'v str, Value<'v>>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result { for (k, v) in symbols.into_iter() { eval.set_module_variable_at_some_point(k, v)?; } diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/mod.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/mod.rs deleted file mode 100644 index faf1f5dd864b7..0000000000000 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod dedupe; -pub mod host_info; -pub mod load_symbols; -pub mod read_config; -pub(crate) mod regex; -pub mod sha256; -pub mod soft_error; -pub mod warning; diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/path.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/path.rs new file mode 100644 index 0000000000000..e06d71bfc2580 --- /dev/null +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/path.rs @@ -0,0 +1,113 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use starlark::environment::GlobalsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::list::AllocList; +use starlark::values::list::UnpackList; +use starlark::values::list_or_tuple::UnpackListOrTuple; +use starlark::values::ValueOfUnchecked; + +use crate::interpreter::build_context::BuildContext; +use crate::interpreter::globspec::GlobSpec; +use crate::interpreter::module_internals::ModuleInternals; + +#[starlark_module] +pub(crate) fn register_path(builder: &mut GlobalsBuilder) { + /// The `glob()` function specifies a set of files using patterns. + /// Only available from `BUCK` files. + /// + /// A typical `glob` call looks like: + /// + /// ```python + /// glob(["foo/**/*.h"]) + /// ``` + /// + /// This call will match all header files in the `foo` directory, recursively. + /// + /// You can also pass a named `exclude` parameter to remove files matching a pattern: + /// + /// ```python + /// glob(["foo/**/*.h"], exclude = ["**/config.h"]) + /// ``` + /// + /// This call will remove all `config.h` files from the initial match. + /// + /// The `glob()` call is evaluated against the list of files owned by this `BUCK` file. + /// A file is owned by whichever `BUCK` file is closest above it - so given `foo/BUCK` and + /// `foo/bar/BUCK` the file `foo/file.txt` would be owned by `foo/BUCK` (and available from + /// its `glob` results) but the file `foo/bar/file.txt` would be owned by `foo/bar/BUCk` + /// and _not_ appear in the glob result of `foo/BUCK`, even if you write `glob(["bar/file.txt"])`. + /// As a consequence of this rule, `glob(["../foo.txt"])` will always return an empty list of files. + /// + /// Currently `glob` is evaluated case-insensitively on all file systems, but we expect + /// that to change to case sensitive in the near future. + fn glob<'v>( + include: UnpackListOrTuple, + #[starlark(require = named, default=UnpackListOrTuple::default())] + exclude: UnpackListOrTuple, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result>> { + let extra = ModuleInternals::from_context(eval, "glob")?; + let spec = GlobSpec::new(&include.items, &exclude.items)?; + let res = extra.resolve_glob(&spec).map(|path| path.as_str()); + Ok(eval.heap().alloc_typed_unchecked(AllocList(res)).cast()) + } + + /// `package_name()` can only be called in buildfiles (e.g. BUCK files) or PACKAGE files, and returns the name of the package. + /// E.g. inside `foo//bar/baz/BUCK` the output will be `bar/baz`. + /// E.g. inside `foo//bar/PACKAGE` the output will be `bar`. + fn package_name(eval: &mut Evaluator) -> anyhow::Result { + // An (IMO) unfortunate choice in the skylark api is that this just gives the cell-relative + // path of the package (which isn't a unique "name" for the package) + Ok(BuildContext::from_context(eval)? + .base_path()? + .path() + .to_string()) + } + + /// `get_base_path()` can only be called in buildfiles (e.g. BUCK files) or PACKAGE files, and returns the name of the package. + /// E.g. inside `foo//bar/baz/BUCK` the output will be `bar/baz`. + /// E.g. inside `foo//bar/PACKAGE` the output will be `bar`. + /// + /// This function is identical to `package_name`. + fn get_base_path(eval: &mut Evaluator) -> anyhow::Result { + Ok(BuildContext::from_context(eval)? + .base_path()? + .path() + .to_string()) + } + + /// Like `get_cell_name()` but prepends a leading `@` for compatibility with Buck1. + /// You should call `get_cell_name()` instead, and if you really want the `@`, + /// prepend it yourself. + fn repository_name(eval: &mut Evaluator) -> anyhow::Result { + // In Buck v1 the repository name has a leading `@` on it, so match that with v2. + // In practice, most users do `repository_name()[1:]` to drop it. + Ok(format!( + "@{}", + BuildContext::from_context(eval)?.cell_info().name() + )) + } + + /// `get_cell_name()` can be called from either a `BUCK` file or a `.bzl` file, + /// and returns the name of the cell where the `BUCK` file that started the call + /// lives. + /// + /// For example, inside `foo//bar/baz/BUCK` the output will be `foo`. + /// If that `BUCK` file does a `load("hello//world.bzl", "something")` then + /// the result in that `.bzl` file will also be `foo`. + fn get_cell_name(eval: &mut Evaluator) -> anyhow::Result { + Ok(BuildContext::from_context(eval)? + .cell_info() + .name() + .to_string()) + } +} diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/read_config.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/read_config.rs index 0a274e264953e..ed3212575caa5 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/read_config.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/read_config.rs @@ -18,7 +18,7 @@ use starlark::values::Value; use crate::interpreter::build_context::BuildContext; #[starlark_module] -pub fn register_read_config(globals: &mut GlobalsBuilder) { +pub(crate) fn register_read_config(globals: &mut GlobalsBuilder) { /// Read a configuration from the nearest enclosing `.buckconfig` /// of the `BUCK` file that started evaluation of this code. /// @@ -44,10 +44,10 @@ pub fn register_read_config(globals: &mut GlobalsBuilder) { section: StringValue, key: StringValue, default: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { - let buckconfig = &BuildContext::from_context(eval)?.buckconfig; - match buckconfig.get(section, key)? { + let buckconfigs = &BuildContext::from_context(eval)?.buckconfigs; + match buckconfigs.current_cell_get(section, key)? { Some(v) => Ok(v.to_value()), None => Ok(default.unwrap_or_else(Value::new_none)), } @@ -61,10 +61,10 @@ pub fn register_read_config(globals: &mut GlobalsBuilder) { #[starlark(require = pos)] key: StringValue, // Unlike `read_config` we only allow string or `None` as default. #[starlark(require = pos, default = NoneOr::None)] default: NoneOr>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result>> { - let buckconfig = &BuildContext::from_context(eval)?.root_buckconfig; - match buckconfig.get(section, key)? { + let buckconfigs = &BuildContext::from_context(eval)?.buckconfigs; + match buckconfigs.root_cell_get(section, key)? { Some(v) => Ok(NoneOr::Other(v.to_string_value())), None => Ok(default), } diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/regex.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/regex.rs index 7fe087fb849ca..1f86c752417d4 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/regex.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/regex.rs @@ -13,7 +13,7 @@ use starlark::starlark_module; // TODO(nga): drop it, and only use `regex` function. #[starlark_module] -pub fn register_regex(builder: &mut GlobalsBuilder) { +pub(crate) fn register_regex(builder: &mut GlobalsBuilder) { /// Test if a regular expression matches a string. Fails if the regular expression /// is malformed. /// diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/sha256.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/sha256.rs index 02aa6d26c65a5..2e7ebcfb67df1 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/sha256.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/sha256.rs @@ -14,7 +14,7 @@ use starlark::starlark_module; /// Contains functions that we include in all contexts. #[starlark_module] -pub fn register_sha256(builder: &mut GlobalsBuilder) { +pub(crate) fn register_sha256(builder: &mut GlobalsBuilder) { /// Computes a sha256 digest for a string. Returns the hex representation of the digest. /// /// ```python diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/soft_error.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/soft_error.rs index 0a072407bf46b..501484f9eac94 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/soft_error.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/soft_error.rs @@ -13,7 +13,7 @@ use starlark::eval::Evaluator; use starlark::starlark_module; use starlark::values::none::NoneType; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum SoftErrorError { #[error("Error produced by Starlark: {category}: {message}\n{call_stack}")] StarlarkSoftError { @@ -30,7 +30,7 @@ enum SoftErrorError { } #[starlark_module] -pub fn register_soft_error(builder: &mut GlobalsBuilder) { +pub(crate) fn register_soft_error(builder: &mut GlobalsBuilder) { /// Produce an error that will become a hard error at some point in the future, but /// for now is a warning which is logged to the server. /// In the open source version of Buck2 this function always results in an error. @@ -51,7 +51,7 @@ pub fn register_soft_error(builder: &mut GlobalsBuilder) { #[starlark(require = pos)] message: String, #[starlark(require = named)] quiet: Option, #[starlark(require = named)] stack: Option, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { if !category.starts_with("starlark_") { return Err(SoftErrorError::InvalidCategory(category.to_owned()).into()); diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/starlark.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/starlark.rs new file mode 100644 index 0000000000000..3c38bf27a39a1 --- /dev/null +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/starlark.rs @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use starlark::environment::GlobalsBuilder; +use starlark::eval::Evaluator; +use starlark::starlark_module; +use starlark::values::none::NoneType; + +use crate::interpreter::build_context::BuildContext; + +#[derive(Debug, buck2_error::Error)] +enum StarlarkPeakMemoryError { + #[error("starlark peak memory already set in this file")] + MemorySetInThisFile(), +} + +#[starlark_module] +pub(crate) fn register_set_starlark_peak_allocated_byte_limit(globals: &mut GlobalsBuilder) { + /// Set the peak allocated bytes during evaluation of build ctx. + /// Err if it has already been set + fn set_starlark_peak_allocated_byte_limit<'v>( + #[starlark(require = pos)] value: u64, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result { + let build_ctx = BuildContext::from_context(eval)?; + let limit = &build_ctx.starlark_peak_allocated_byte_limit; + if limit.get().is_some() || limit.set(Some(value)).is_err() { + return Err(StarlarkPeakMemoryError::MemorySetInThisFile().into()); + } + Ok(NoneType) + } +} diff --git a/app/buck2_interpreter_for_build/src/interpreter/functions/warning.rs b/app/buck2_interpreter_for_build/src/interpreter/functions/warning.rs index af196bf854d6f..cf0ad835b35b9 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/functions/warning.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/functions/warning.rs @@ -12,7 +12,7 @@ use starlark::starlark_module; use starlark::values::none::NoneType; #[starlark_module] -pub fn register_warning(builder: &mut GlobalsBuilder) { +pub(crate) fn register_warning(builder: &mut GlobalsBuilder) { /// Print a warning. The line will be decorated with the timestamp and other details, /// including the word `WARN` (colored, if the console supports it). /// diff --git a/app/buck2_interpreter_for_build/src/interpreter/global_interpreter_state.rs b/app/buck2_interpreter_for_build/src/interpreter/global_interpreter_state.rs index 937182ab4e6a5..ee277e483b060 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/global_interpreter_state.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/global_interpreter_state.rs @@ -7,26 +7,19 @@ * of this source tree. */ -use std::collections::HashMap; use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::legacy_configs::dice::HasLegacyConfigs; -use buck2_common::legacy_configs::view::LegacyBuckConfigsView; -use buck2_common::result::SharedResult; -use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::CellResolver; +use buck2_futures::cancellation::CancellationContext; use buck2_interpreter::dice::starlark_types::GetStarlarkTypes; -use buck2_interpreter::file_type::StarlarkFileType; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use starlark::environment::Globals; -use crate::interpreter::cell_info::InterpreterCellInfo; use crate::interpreter::configuror::BuildInterpreterConfiguror; use crate::interpreter::context::HasInterpreterContext; @@ -36,25 +29,9 @@ use crate::interpreter::context::HasInterpreterContext; pub struct GlobalInterpreterState { pub cell_resolver: CellResolver, - pub(crate) cell_configs: HashMap, - /// The GlobalEnvironment contains all the globally available symbols - /// (primarily starlark stdlib and Buck-provided functions) that should - /// be available in a build file. - pub build_file_global_env: Globals, - - /// Symbols for `PACKAGE` files. - pub package_file_global_env: Globals, - - /// The GlobalEnvironment contains all the globally available symbols - /// (primarily starlark stdlib and Buck-provided functions) that should - /// be available in an extension file. - pub extension_file_global_env: Globals, - - /// The GlobalEnvironment contains all the globally available symbols - /// (primarily starlark stdlib and Buck-provided functions) that should - /// be available in a bxl file. - pub bxl_file_global_env: Globals, + /// (primarily starlark stdlib and Buck-provided functions). + pub global_env: Globals, /// Interpreter Configurer pub configuror: Arc, @@ -68,33 +45,16 @@ pub struct GlobalInterpreterState { impl GlobalInterpreterState { pub fn new( - legacy_configs: &dyn LegacyBuckConfigsView, cell_resolver: CellResolver, interpreter_configuror: Arc, disable_starlark_types: bool, unstable_typecheck: bool, ) -> anyhow::Result { - // TODO: There should be one of these that also does not have native functions - // in the global namespace so that it can be configured per-cell - let build_file_global_env = interpreter_configuror.build_file_globals(); - let package_file_global_env = interpreter_configuror.package_file_globals(); - let extension_file_global_env = interpreter_configuror.extension_file_globals(); - let bxl_file_global_env = interpreter_configuror.bxl_file_globals(); - - let mut cell_configs = HashMap::new(); - for (cell_name, _config) in legacy_configs.iter() { - cell_configs.insert( - BuildFileCell::new(cell_name), - InterpreterCellInfo::new(BuildFileCell::new(cell_name), cell_resolver.dupe())?, - ); - } + let global_env = interpreter_configuror.globals(); + Ok(Self { cell_resolver, - cell_configs, - build_file_global_env, - package_file_global_env, - extension_file_global_env, - bxl_file_global_env, + global_env, configuror: interpreter_configuror, disable_starlark_types, unstable_typecheck, @@ -105,24 +65,22 @@ impl GlobalInterpreterState { &self.configuror } - pub fn globals_for_file_type(&self, file_type: StarlarkFileType) -> &Globals { - match file_type { - StarlarkFileType::Buck => &self.build_file_global_env, - StarlarkFileType::Package => &self.package_file_global_env, - StarlarkFileType::Bzl => &self.extension_file_global_env, - StarlarkFileType::Bxl => &self.bxl_file_global_env, - } + pub fn globals(&self) -> &Globals { + &self.global_env } } #[async_trait] pub trait HasGlobalInterpreterState { - async fn get_global_interpreter_state(&self) -> anyhow::Result>; + async fn get_global_interpreter_state(&mut self) + -> anyhow::Result>; } #[async_trait] -impl HasGlobalInterpreterState for DiceComputations { - async fn get_global_interpreter_state(&self) -> anyhow::Result> { +impl HasGlobalInterpreterState for DiceComputations<'_> { + async fn get_global_interpreter_state( + &mut self, + ) -> anyhow::Result> { #[derive(Clone, Dupe, Allocative)] struct GisValue(Arc); @@ -136,25 +94,23 @@ impl HasGlobalInterpreterState for DiceComputations { PartialEq, Allocative )] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct GisKey(); #[async_trait] impl Key for GisKey { - type Value = SharedResult; + type Value = buck2_error::Result; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { let interpreter_configuror = ctx.get_interpreter_configuror().await?; - let legacy_configs = ctx.get_legacy_configs_on_dice().await?; let cell_resolver = ctx.get_cell_resolver().await?; let disable_starlark_types = ctx.get_disable_starlark_types().await?; let unstable_typecheck = ctx.get_unstable_typecheck().await?; Ok(GisValue(Arc::new(GlobalInterpreterState::new( - &legacy_configs, cell_resolver, interpreter_configuror, disable_starlark_types, diff --git a/app/buck2_interpreter_for_build/src/interpreter/globals.rs b/app/buck2_interpreter_for_build/src/interpreter/globals.rs index d80c00d20b4fe..5a287c184feca 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/globals.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/globals.rs @@ -7,28 +7,35 @@ * of this source tree. */ -use buck2_interpreter::anon_targets::REGISTER_ANON_TARGETS; -use buck2_interpreter::bxl::BXL_SPECIFIC_GLOBALS; -use buck2_interpreter::cfg_constructor::REGISTER_SET_CFG_CONSTRUCTOR; -use buck2_interpreter::functions::more::REGISTER_BUCK2_BUILD_API_GLOBALS; -use buck2_interpreter::functions::transition::REGISTER_TRANSITION; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_ACTION_IMPL_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_ANON_TARGETS_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_BUILD_API_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_BUILD_API_INTERNALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_BXL_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_CFG_CONSTRUCTOR_GLOBALS; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_TRANSITION_GLOBALS; use buck2_interpreter::starlark_promise::register_promise; use buck2_interpreter::types::cell_path::register_cell_path; use buck2_interpreter::types::cell_root::register_cell_root; use buck2_interpreter::types::configured_providers_label::register_providers_label; +use buck2_interpreter::types::project_root::register_project_root; use buck2_interpreter::types::regex::register_buck_regex; use buck2_interpreter::types::target_label::register_target_label; +use buck2_util::late_binding::LateBinding; use starlark::environment::GlobalsBuilder; +use starlark::environment::LibraryExtension; use crate::attrs::attrs_global::register_attrs; -use crate::interpreter::build_defs::register_path; use crate::interpreter::functions::dedupe::register_dedupe; use crate::interpreter::functions::host_info::register_host_info; +use crate::interpreter::functions::internals::register_internals; use crate::interpreter::functions::load_symbols::register_load_symbols; +use crate::interpreter::functions::path::register_path; use crate::interpreter::functions::read_config::register_read_config; use crate::interpreter::functions::regex::register_regex; use crate::interpreter::functions::sha256::register_sha256; use crate::interpreter::functions::soft_error::register_soft_error; +use crate::interpreter::functions::starlark::register_set_starlark_peak_allocated_byte_limit; use crate::interpreter::functions::warning::register_warning; use crate::interpreter::natives::register_module_natives; use crate::interpreter::selector::register_select; @@ -37,15 +44,19 @@ use crate::rule::register_rule_function; use crate::super_package::defs::register_package_natives; use crate::super_package::package_value::register_read_package_value; -/// Natives for all file types. -/// [It was decided](https://fburl.com/workplace/dlvp5c9q) -/// that we want identical globals for all files, except `BUCK` files, -/// where we additionally add prelude and package implicits. -pub fn register_universal_natives(builder: &mut GlobalsBuilder) { - (REGISTER_BUCK2_BUILD_API_GLOBALS.get().unwrap())(builder); - (REGISTER_TRANSITION.get().unwrap())(builder); - (BXL_SPECIFIC_GLOBALS.get().unwrap())(builder); - (REGISTER_SET_CFG_CONSTRUCTOR.get().unwrap())(builder); +fn from_late_binding(l: &LateBinding, builder: &mut GlobalsBuilder) { + if let Ok(v) = l.get() { + v(builder); + } +} + +// NOTE: Semantically, `register_load_natives`, `register_analysis_natives`, `register_bxl_natives`, +// and `starlark_library_extensions_for_buck2` are all the same, since all symbols are available +// everywhere. However, we distinguish between them for the purpose of generating documentation. + +pub fn register_load_natives(builder: &mut GlobalsBuilder) { + from_late_binding(®ISTER_BUCK2_CFG_CONSTRUCTOR_GLOBALS, builder); + from_late_binding(®ISTER_BUCK2_TRANSITION_GLOBALS, builder); register_module_natives(builder); register_host_info(builder); register_read_config(builder); @@ -62,11 +73,73 @@ pub fn register_universal_natives(builder: &mut GlobalsBuilder) { register_providers_label(builder); register_cell_path(builder); register_cell_root(builder); + register_project_root(builder); register_target_label(builder); register_path(builder); register_select(builder); - register_promise(builder); register_sha256(builder); register_dedupe(builder); - (REGISTER_ANON_TARGETS.get().unwrap())(builder); + register_set_starlark_peak_allocated_byte_limit(builder); +} + +pub fn register_analysis_natives(builder: &mut GlobalsBuilder) { + from_late_binding(®ISTER_BUCK2_ACTION_IMPL_GLOBALS, builder); + from_late_binding(®ISTER_BUCK2_BUILD_API_GLOBALS, builder); + register_promise(builder); + from_late_binding(®ISTER_BUCK2_ANON_TARGETS_GLOBALS, builder); +} + +pub fn register_bxl_natives(builder: &mut GlobalsBuilder) { + from_late_binding(®ISTER_BUCK2_BXL_GLOBALS, builder); +} + +pub fn starlark_library_extensions_for_buck2() -> &'static [LibraryExtension] { + &[ + LibraryExtension::Breakpoint, + LibraryExtension::Debug, + LibraryExtension::EnumType, + LibraryExtension::Filter, + LibraryExtension::Json, + LibraryExtension::Map, + LibraryExtension::Partial, + LibraryExtension::Pprint, + LibraryExtension::Pstr, + LibraryExtension::Prepr, + LibraryExtension::Print, + LibraryExtension::RecordType, + LibraryExtension::StructType, + LibraryExtension::Typing, + LibraryExtension::Internal, + LibraryExtension::CallStack, + LibraryExtension::SetType, + ] +} + +fn register_all_natives(builder: &mut GlobalsBuilder) { + register_load_natives(builder); + register_analysis_natives(builder); + register_bxl_natives(builder); + for ext in starlark_library_extensions_for_buck2() { + ext.add(builder); + } +} + +fn register_all_internals(builder: &mut GlobalsBuilder) { + register_internals(builder); + from_late_binding(®ISTER_BUCK2_BUILD_API_INTERNALS, builder); +} + +/// The standard set of globals that is available in all files. +/// +/// This does not include the implicit prelude and cell imports which are only available in `BUCK` +/// files, but does include everything else. +pub fn base_globals() -> GlobalsBuilder { + let mut global_env = GlobalsBuilder::standard().with(register_all_natives); + global_env.namespace("__internal__", |x| { + register_all_internals(x); + }); + global_env.namespace("__buck2_builtins__", |x| { + register_all_natives(x); + }); + global_env } diff --git a/app/buck2_interpreter_for_build/src/interpreter/globspec.rs b/app/buck2_interpreter_for_build/src/interpreter/globspec.rs index 9f22a362b9c75..f58f29b64a8d6 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/globspec.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/globspec.rs @@ -17,7 +17,8 @@ use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::package::package_relative_path::PackageRelativePath; use derivative::Derivative; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum GlobError { #[error( "Exact pattern must be valid forward relative path: `{0}` \ diff --git a/app/buck2_interpreter_for_build/src/interpreter/interpreter_for_cell.rs b/app/buck2_interpreter_for_build/src/interpreter/interpreter_for_cell.rs index 022a1744c24c2..c93a33037049b 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/interpreter_for_cell.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/interpreter_for_cell.rs @@ -11,18 +11,23 @@ //! operations of converting file content to ASTs and evaluating import and //! build files. +use std::cell::OnceCell; use std::cell::RefCell; use std::sync::Arc; use allocative::Allocative; use anyhow::Context; -use buck2_common::legacy_configs::view::LegacyBuckConfigView; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_common::package_listing::listing::PackageListing; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::cell_path::CellPath; -use buck2_core::cells::CellAliasResolver; +use buck2_core::soft_error; +use buck2_error::starlark_error::from_starlark; +use buck2_error::BuckErrorContext; +use buck2_event_observer::humanized::HumanizedBytes; use buck2_events::dispatch::get_dispatcher; use buck2_interpreter::factory::StarlarkEvaluatorProvider; use buck2_interpreter::file_loader::InterpreterFileLoader; @@ -40,45 +45,66 @@ use buck2_interpreter::paths::path::OwnedStarlarkPath; use buck2_interpreter::paths::path::StarlarkPath; use buck2_interpreter::prelude_path::PreludePath; use buck2_interpreter::print_handler::EventDispatcherPrintHandler; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; use buck2_node::nodes::eval_result::EvaluationResult; +use buck2_node::nodes::eval_result::EvaluationResultWithStats; use buck2_node::super_package::SuperPackage; +use buck2_util::per_thread_instruction_counter::PerThreadInstructionCounter; use dupe::Dupe; use gazebo::prelude::*; use starlark::codemap::FileSpan; use starlark::environment::FrozenModule; use starlark::environment::Module; use starlark::syntax::AstModule; -use starlark::values::OwnedFrozenValueTyped; -use thiserror::Error; +use starlark::values::any_complex::StarlarkAnyComplex; +use starlark::values::OwnedFrozenRef; +use crate::interpreter::buckconfig::BuckConfigsViewForStarlark; use crate::interpreter::build_context::BuildContext; use crate::interpreter::build_context::PerFileTypeContext; use crate::interpreter::bzl_eval_ctx::BzlEvalCtx; use crate::interpreter::cell_info::InterpreterCellInfo; +use crate::interpreter::extra_value::InterpreterExtraValue; use crate::interpreter::global_interpreter_state::GlobalInterpreterState; use crate::interpreter::module_internals::ModuleInternals; use crate::interpreter::package_file_extra::FrozenPackageFileExtra; use crate::super_package::eval_ctx::PackageFileEvalCtx; -#[derive(Debug, Error)] -enum StarlarkParseError { - #[error("Error parsing: `{0}`")] - InFile(OwnedStarlarkPath), - #[error("Tabs are not allowed in Buck files: `{0}`")] - Tabs(OwnedStarlarkPath), +#[derive(Debug, buck2_error::Error)] +#[error("Tabs are not allowed in Buck files: `{0}`")] +#[buck2(input)] +struct StarlarkTabsError(OwnedStarlarkPath); + +#[derive(Debug, buck2_error::Error)] +enum StarlarkPeakMemoryError { + #[error( + "Starlark peak memory usage for {0} is {1} which exceeds the limit {2}! Please reduce memory usage to prevent OOMs. See {3} for debugging tips." + )] + #[buck2(input)] + ExceedsThreshold(BuildFilePath, HumanizedBytes, HumanizedBytes, String), } -/// A ParseResult includes the parsed AST and a list of the imported files. +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] +enum StarlarkPeakMemorySoftError { + #[error( + "Starlark peak memory usage for {0} is {1} which is over 50% of the limit {2}! Consider investigating what takes too much memory: {3}." + )] + CloseToThreshold(BuildFilePath, HumanizedBytes, HumanizedBytes, String), +} + +/// A ParseData includes the parsed AST and a list of the imported files. /// /// The imports are under a separate Arc so that that can be shared with /// the evaluation result (which needs the imports but no longer needs the AST). -#[derive(Debug)] -pub struct ParseResult( +pub struct ParseData( pub AstModule, pub Arc, OwnedStarlarkModulePath)>>, ); -impl ParseResult { +pub type ParseResult = Result; + +impl ParseData { fn new( ast: AstModule, implicit_imports: Vec, @@ -108,6 +134,13 @@ impl ParseResult { } } +pub fn get_starlark_warning_link() -> &'static str { + if buck2_core::is_open_source() { + "https://buck2.build/docs/users/faq/starlark_peak_mem" + } else { + "https://fburl.com/starlark_peak_mem_warning" + } +} /// Interpreter for build files. /// /// The Interpreter is responsible for parsing files to an AST and then @@ -118,7 +151,7 @@ pub(crate) struct InterpreterForCell { /// Non-cell-specific information. global_state: Arc, /// Cell-specific alias resolver. - cell_names: CellAliasResolver, + cell_info: InterpreterCellInfo, /// Log GC. verbose_gc: bool, /// When true, rule function creates a node with no attributes. @@ -136,7 +169,7 @@ struct InterpreterLoadResolver { build_file_cell: BuildFileCell, } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum LoadResolutionError { #[error( "Cannot load `{0}`. Bxl loads are not allowed from within this context. bxl files can only be loaded from other bxl files." @@ -159,7 +192,11 @@ impl LoadResolver for InterpreterLoadResolver { // This is to be removed when we finish migration to Buck2. let path = path.trim_end_match("?v2_only"); - let path = parse_import(&self.config.cell_names, &self.loader_path, path)?; + let path = parse_import( + &self.config.cell_info.cell_alias_resolver(), + &self.loader_path, + path, + )?; // check for bxl files first before checking for prelude. // All bxl imports are parsed the same regardless of prelude or not. @@ -218,6 +255,13 @@ impl LoadResolver for InterpreterLoadResolver { } } +struct EvalResult { + additional: PerFileTypeContext, + starlark_peak_allocated_byte_limit: OnceCell>, + is_profiling_enabled: bool, + cpu_instruction_count: Option, +} + impl InterpreterForCell { fn verbose_gc() -> anyhow::Result { match std::env::var_os("BUCK2_STARLARK_VERBOSE_GC") { @@ -244,13 +288,13 @@ impl InterpreterForCell { //, configuror: Arc pub(crate) fn new( - cell_names: CellAliasResolver, + cell_info: InterpreterCellInfo, global_state: Arc, implicit_import_paths: Arc, ) -> anyhow::Result { Ok(Self { global_state, - cell_names, + cell_info, verbose_gc: Self::verbose_gc()?, ignore_attrs_for_profiling: Self::is_ignore_attrs_for_profiling()?, implicit_import_paths, @@ -268,9 +312,9 @@ impl InterpreterForCell { let prelude_env = loaded_modules .map .get(&StarlarkModulePath::LoadFile(prelude_import.import_path())) - .with_context(|| { + .with_internal_error_anyhow(|| { format!( - "Should've had an env for the prelude import `{}` (internal error)", + "Should've had an env for the prelude import `{}`", prelude_import, ) })?; @@ -282,6 +326,10 @@ impl InterpreterForCell { } } + env.set_extra_value_no_overwrite(env.heap().alloc_complex(StarlarkAnyComplex { + value: InterpreterExtraValue::default(), + }))?; + Ok(env) } @@ -299,7 +347,7 @@ impl InterpreterForCell { loaded_modules: &LoadedModules, ) -> anyhow::Result<(Module, ModuleInternals)> { let internals = self.global_state.configuror.new_extra_context( - self.get_cell_config(build_file.build_file_cell()), + &self.cell_info, build_file.clone(), package_listing.dupe(), super_package, @@ -313,11 +361,8 @@ impl InterpreterForCell { let root_env = loaded_modules .map .get(&StarlarkModulePath::LoadFile(&root_import)) - .with_context(|| { - format!( - "Should've had an env for the root import `{}` (internal error)", - root_import, - ) + .with_internal_error_anyhow(|| { + format!("Should've had an env for the root import `{}`", root_import,) })? .env(); env.import_public_symbols(root_env); @@ -326,13 +371,6 @@ impl InterpreterForCell { Ok((env, internals)) } - fn get_cell_config(&self, build_file_cell: BuildFileCell) -> &InterpreterCellInfo { - self.global_state - .cell_configs - .get(&build_file_cell) - .unwrap_or_else(|| panic!("Should've had cell config for {}", build_file_cell)) - } - fn load_resolver( self: &Arc, current_file_path: StarlarkPath<'_>, @@ -390,23 +428,35 @@ impl InterpreterForCell { // for example inside parentheses in function call arguments, // which restricts what the spec allows. if content.contains('\t') { - return Err(StarlarkParseError::Tabs(OwnedStarlarkPath::new(import)).into()); + return Err(StarlarkTabsError(OwnedStarlarkPath::new(import)).into()); } let project_relative_path = self .global_state .cell_resolver .resolve_path(import.path().as_ref().as_ref())?; - let result: anyhow::Result<_> = try { - let disable_starlark_types = self.global_state.disable_starlark_types; - let ast = AstModule::parse( - project_relative_path.as_str(), - content, - &import.file_type().dialect(disable_starlark_types), - )?; - let mut implicit_imports = Vec::new(); - if let Some(i) = self.prelude_import(import) { - implicit_imports.push(OwnedStarlarkModulePath::LoadFile(i.import_path().clone())); + + let disable_starlark_types = self.global_state.disable_starlark_types; + let ast = match AstModule::parse( + project_relative_path.as_str(), + content, + &import.file_type().dialect(disable_starlark_types), + ) { + Ok(ast) => ast, + Err(e) => { + return Ok(Err(from_starlark(e).context(format!( + "Error parsing: `{}`", + OwnedStarlarkPath::new(import) + )))); + } + }; + let mut implicit_imports = Vec::new(); + if let Some(i) = self.prelude_import(import) { + implicit_imports.push(OwnedStarlarkModulePath::LoadFile(i.import_path().clone())); + } + if let StarlarkPath::BuildFile(build_file) = import { + if let Some(i) = self.package_import(build_file) { + implicit_imports.push(OwnedStarlarkModulePath::LoadFile(i.import().clone())); } if let StarlarkPath::BuildFile(build_file) = import { if let Some(i) = self.package_import(build_file) { @@ -416,9 +466,8 @@ impl InterpreterForCell { implicit_imports.push(OwnedStarlarkModulePath::LoadFile(i)); } } - ParseResult::new(ast, implicit_imports, &self.load_resolver(import))? - }; - result.with_context(|| StarlarkParseError::InFile(OwnedStarlarkPath::new(import))) + } + ParseData::new(ast, implicit_imports, &self.load_resolver(import)).map(Ok) } pub(crate) fn resolve_path( @@ -433,53 +482,67 @@ impl InterpreterForCell { self: &Arc, env: &Module, ast: AstModule, - buckconfig: &dyn LegacyBuckConfigView, - root_buckconfig: &dyn LegacyBuckConfigView, + buckconfigs: &mut dyn BuckConfigsViewForStarlark, loaded_modules: LoadedModules, extra_context: PerFileTypeContext, eval_provider: &mut dyn StarlarkEvaluatorProvider, unstable_typecheck: bool, - ) -> anyhow::Result { + ) -> anyhow::Result { let import = extra_context.starlark_path(); - let globals = self - .global_state - .globals_for_file_type(extra_context.file_type()); + let globals = self.global_state.globals(); let file_loader = InterpreterFileLoader::new(loaded_modules, Arc::new(self.load_resolver(import))); - let cell_info = self.get_cell_config(import.build_file_cell()); let host_info = self.global_state.configuror.host_info(); let extra = BuildContext::new_for_module( env, - cell_info, - buckconfig, - root_buckconfig, + &self.cell_info, + buckconfigs, host_info, extra_context, self.ignore_attrs_for_profiling, ); + let is_profiling_enabled; let print = EventDispatcherPrintHandler(get_dispatcher()); - { - let mut eval = eval_provider.make(env)?; + let cpu_instruction_count = { + let (mut eval, is_profiling_enabled_by_provider) = eval_provider.make(env)?; + is_profiling_enabled = is_profiling_enabled_by_provider; eval.enable_static_typechecking(unstable_typecheck); eval.set_print_handler(&print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); eval.set_loader(&file_loader); eval.extra = Some(&extra); if self.verbose_gc { eval.verbose_gc(); } + + // Ignore error if failed to initialize instruction counter. + let instruction_counter: Option = + PerThreadInstructionCounter::init().ok().unwrap_or_default(); + match eval.eval_module(ast, globals) { Ok(_) => { + let cpu_instruction_count = instruction_counter.and_then(|c| c.collect().ok()); + eval_provider .evaluation_complete(&mut eval) .context("Profiler finalization failed")?; eval_provider .visit_frozen_module(None) - .context("Profiler heap visitation failed")? + .context("Profiler heap visitation failed")?; + + cpu_instruction_count + } + Err(p) => { + return Err(from_starlark(p).into()); } - Err(p) => return Err(p), } }; - Ok(extra.additional) + Ok(EvalResult { + additional: extra.additional, + is_profiling_enabled, + starlark_peak_allocated_byte_limit: extra.starlark_peak_allocated_byte_limit, + cpu_instruction_count, + }) } /// Evaluates the AST for a parsed module. Loaded modules must contain the loaded @@ -488,8 +551,7 @@ impl InterpreterForCell { pub(crate) fn eval_module( self: &Arc, starlark_path: StarlarkModulePath<'_>, - buckconfig: &dyn LegacyBuckConfigView, - root_buckconfig: &dyn LegacyBuckConfigView, + buckconfigs: &mut dyn BuckConfigsViewForStarlark, ast: AstModule, loaded_modules: LoadedModules, eval_provider: &mut dyn StarlarkEvaluatorProvider, @@ -505,15 +567,15 @@ impl InterpreterForCell { || matches!(starlark_path, StarlarkModulePath::BxlFile(..)) || match self.global_state.configuror.prelude_import() { Some(prelude_import) => { - prelude_import.prelude_cell() == self.cell_names.resolve_self() + prelude_import.prelude_cell() + == self.cell_info.cell_alias_resolver().resolve_self() } None => false, }; self.eval( &env, ast, - buckconfig, - root_buckconfig, + buckconfigs, loaded_modules, extra_context, eval_provider, @@ -527,8 +589,7 @@ impl InterpreterForCell { package_file_path: &PackageFilePath, ast: AstModule, parent: SuperPackage, - buckconfig: &dyn LegacyBuckConfigView, - root_buckconfig: &dyn LegacyBuckConfigView, + buckconfigs: &mut dyn BuckConfigsViewForStarlark, loaded_modules: LoadedModules, eval_provider: &mut dyn StarlarkEvaluatorProvider, ) -> anyhow::Result { @@ -543,21 +604,26 @@ impl InterpreterForCell { visibility: RefCell::new(None), }); - let per_file_context = self.eval( - &env, - ast, - buckconfig, - root_buckconfig, - loaded_modules, - extra_context, - eval_provider, - false, - )?; - - let extra: Option> = - if env.extra_value().is_some() { - // Only freeze if there's extra, otherwise we will needlessly freeze globals. - // TODO(nga): add API to only freeze extra. + let per_file_context = self + .eval( + &env, + ast, + buckconfigs, + loaded_modules, + extra_context, + eval_provider, + false, + )? + .additional; + + let extra: Option> = + if InterpreterExtraValue::get(&env)? + .package_extra + .get() + .is_some() + { + // Only freeze if there's something to freeze, otherwise we will needlessly freeze + // globals. TODO(nga): add API to only freeze extra. let env = env.freeze()?; FrozenPackageFileExtra::get(&env)? } else { @@ -575,8 +641,7 @@ impl InterpreterForCell { pub(crate) fn eval_build_file( self: &Arc, build_file: &BuildFilePath, - buckconfig: &dyn LegacyBuckConfigView, - root_buckconfig: &dyn LegacyBuckConfigView, + buckconfigs: &mut dyn BuckConfigsViewForStarlark, listing: PackageListing, super_package: SuperPackage, package_boundary_exception: bool, @@ -584,7 +649,7 @@ impl InterpreterForCell { loaded_modules: LoadedModules, eval_provider: &mut dyn StarlarkEvaluatorProvider, unstable_typecheck: bool, - ) -> anyhow::Result { + ) -> anyhow::Result { let (env, internals) = self.create_build_env( build_file, &listing, @@ -592,19 +657,68 @@ impl InterpreterForCell { package_boundary_exception, &loaded_modules, )?; - let internals = self - .eval( - &env, - ast, - buckconfig, - root_buckconfig, - loaded_modules, - PerFileTypeContext::Build(internals), - eval_provider, - unstable_typecheck, + let eval_result = self.eval( + &env, + ast, + buckconfigs, + loaded_modules, + PerFileTypeContext::Build(internals), + eval_provider, + unstable_typecheck, + )?; + + let internals = eval_result.additional.into_build()?; + let starlark_peak_allocated_bytes = env.heap().peak_allocated_bytes() as u64; + let buckconfig_key = BuckconfigKeyRef { + section: "buck2", + property: "check_starlark_peak_memory", + }; + let starlark_peak_mem_check_enabled = !eval_result.is_profiling_enabled + && LegacyBuckConfig::parse_value( + buckconfig_key, + buckconfigs + .read_root_cell_config(buckconfig_key)? + .as_deref(), )? - .into_build()?; + .unwrap_or(false); + let default_limit = 2 * (1 << 30); + let starlark_mem_limit = eval_result + .starlark_peak_allocated_byte_limit + .get() + .map_or(default_limit, |opt| opt.unwrap_or(default_limit)); + + if starlark_peak_mem_check_enabled && starlark_peak_allocated_bytes > starlark_mem_limit { + Err(StarlarkPeakMemoryError::ExceedsThreshold( + build_file.to_owned(), + HumanizedBytes::fixed_width(starlark_peak_allocated_bytes), + HumanizedBytes::fixed_width(starlark_mem_limit), + get_starlark_warning_link().to_owned(), + ) + .into()) + } else if starlark_peak_mem_check_enabled + && starlark_peak_allocated_bytes > starlark_mem_limit / 2 + { + soft_error!( + "starlark_memory_usage_over_soft_limit", + StarlarkPeakMemorySoftError::CloseToThreshold( + build_file.clone(), + HumanizedBytes::fixed_width(starlark_peak_allocated_bytes), + HumanizedBytes::fixed_width(starlark_mem_limit), + get_starlark_warning_link().to_owned() + ).into(), quiet: true + )?; - Ok(EvaluationResult::from(internals)) + Ok(EvaluationResultWithStats { + result: EvaluationResult::from(internals), + starlark_peak_allocated_bytes, + cpu_instruction_count: eval_result.cpu_instruction_count, + }) + } else { + Ok(EvaluationResultWithStats { + result: EvaluationResult::from(internals), + starlark_peak_allocated_bytes, + cpu_instruction_count: eval_result.cpu_instruction_count, + }) + } } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/interpreter_setup.rs b/app/buck2_interpreter_for_build/src/interpreter/interpreter_setup.rs index cfb0d54a064c3..6a95cf65d02a3 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/interpreter_setup.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/interpreter_setup.rs @@ -10,12 +10,12 @@ use std::sync::Arc; use buck2_common::dice::cells::SetCellResolver; +use buck2_common::legacy_configs::cells::ExternalBuckconfigData; use buck2_common::legacy_configs::dice::SetLegacyConfigs; -use buck2_common::legacy_configs::LegacyBuckConfigs; use buck2_core::cells::CellResolver; -use buck2_interpreter::dice::starlark_profiler::SetStarlarkProfilerInstrumentation; -use buck2_interpreter::dice::starlark_profiler::StarlarkProfilerConfiguration; use buck2_interpreter::dice::starlark_types::SetStarlarkTypes; +use buck2_interpreter::starlark_profiler::config::SetStarlarkProfilerInstrumentation; +use buck2_interpreter::starlark_profiler::config::StarlarkProfilerConfiguration; use dice::DiceTransactionUpdater; use crate::interpreter::configuror::BuildInterpreterConfiguror; @@ -26,17 +26,15 @@ pub fn setup_interpreter( updater: &mut DiceTransactionUpdater, cell_resolver: CellResolver, configuror: Arc, - legacy_configs: LegacyBuckConfigs, + legacy_config_overrides: Arc, starlark_profiler_instrumentation_override: StarlarkProfilerConfiguration, disable_starlark_types: bool, unstable_typecheck: bool, ) -> anyhow::Result<()> { updater.set_cell_resolver(cell_resolver)?; updater.set_interpreter_context(configuror)?; - updater.set_legacy_configs(legacy_configs)?; - updater.set_starlark_profiler_instrumentation_override( - starlark_profiler_instrumentation_override, - )?; + updater.set_legacy_config_external_data(legacy_config_overrides)?; + updater.set_starlark_profiler_configuration(starlark_profiler_instrumentation_override)?; updater.set_starlark_types(disable_starlark_types, unstable_typecheck)?; Ok(()) @@ -46,13 +44,12 @@ pub fn setup_interpreter_basic( dice: &mut DiceTransactionUpdater, cell_resolver: CellResolver, configuror: Arc, - legacy_configs: LegacyBuckConfigs, ) -> anyhow::Result<()> { setup_interpreter( dice, cell_resolver, configuror, - legacy_configs, + Arc::new(ExternalBuckconfigData::testing_default()), StarlarkProfilerConfiguration::default(), false, false, diff --git a/app/buck2_interpreter_for_build/src/interpreter/mod.rs b/app/buck2_interpreter_for_build/src/interpreter/mod.rs deleted file mode 100644 index 826568a99a928..0000000000000 --- a/app/buck2_interpreter_for_build/src/interpreter/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod buckconfig; -pub mod build_context; -pub mod build_defs; -pub(crate) mod bzl_eval_ctx; -pub mod calculation; -pub(crate) mod cell_info; -pub mod configuror; -pub mod context; -pub mod cycles; -pub mod dice_calculation_delegate; -pub mod functions; -pub mod global_interpreter_state; -pub mod globals; -pub mod globspec; -pub mod interpreter_for_cell; -pub mod interpreter_setup; -pub mod module_internals; -pub mod natives; -pub mod package_file_calculation; -pub mod package_file_extra; -pub mod selector; -pub mod testing; diff --git a/app/buck2_interpreter_for_build/src/interpreter/module_internals.rs b/app/buck2_interpreter_for_build/src/interpreter/module_internals.rs index 560f84cb83d7c..a5012cc19fe16 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/module_internals.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/module_internals.rs @@ -25,6 +25,7 @@ use buck2_node::nodes::eval_result::EvaluationResult; use buck2_node::nodes::targets_map::TargetsMap; use buck2_node::nodes::targets_map::TargetsMapRecordError; use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::oncall::Oncall; use buck2_node::package::Package; use buck2_node::super_package::SuperPackage; use dupe::Dupe; @@ -46,14 +47,17 @@ impl From for EvaluationResult { } = internals; let recorder = match state.into_inner() { State::BeforeTargets(_) => TargetsRecorder::new(), - State::Targets(RecordingTargets { recorder, .. }) => recorder, + State::RecordingTargets(RecordingTargets { recorder, .. }) => recorder, }; EvaluationResult::new(buildfile_path, imports, super_package, recorder.take()) } } -#[derive(Debug)] -struct Oncall(Arc); +#[derive(Debug, Default)] +struct BeforeTargets { + oncall: Option, + has_read_oncall: bool, +} #[derive(Debug)] struct RecordingTargets { @@ -64,9 +68,9 @@ struct RecordingTargets { #[derive(Debug)] enum State { /// No targets recorded yet, `oncall` call is allowed unless it was already called. - BeforeTargets(Option), + BeforeTargets(BeforeTargets), /// First target seen. - Targets(RecordingTargets), + RecordingTargets(RecordingTargets), } /// ModuleInternals contains the module/package-specific information for @@ -108,12 +112,15 @@ impl PackageImplicits { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum OncallErrors { #[error("Called `oncall` after one or more targets were declared, `oncall` must be first.")] OncallAfterTargets, #[error("Called `oncall` more than once in the file.")] DuplicateOncall, + #[error("Called `oncall` after calling `read_oncall`, `oncall` must be first.")] + AfterReadOncall, } impl ModuleInternals { @@ -130,7 +137,7 @@ impl ModuleInternals { Self { attr_coercion_context, buildfile_path, - state: RefCell::new(State::BeforeTargets(None)), + state: RefCell::new(State::BeforeTargets(BeforeTargets::default())), imports, package_implicits, record_target_call_stacks, @@ -160,13 +167,15 @@ impl ModuleInternals { pub(crate) fn set_oncall(&self, name: &str) -> anyhow::Result<()> { match &mut *self.state.borrow_mut() { - State::BeforeTargets(Some(_)) => Err(OncallErrors::DuplicateOncall.into()), - State::BeforeTargets(oncall) => { - assert!(oncall.is_none()); - *oncall = Some(Oncall(Arc::new(name.to_owned()))); - Ok(()) - } - State::Targets(..) => { + State::BeforeTargets(x) => match x.oncall { + _ if x.has_read_oncall => Err(OncallErrors::AfterReadOncall.into()), + Some(_) => Err(OncallErrors::DuplicateOncall.into()), + None => { + x.oncall = Some(Oncall::new(name)); + Ok(()) + } + }, + State::RecordingTargets(..) => { // We require oncall to be first both so users can find it, // and so we can propagate it to all targets more easily. Err(OncallErrors::OncallAfterTargets.into()) @@ -174,13 +183,23 @@ impl ModuleInternals { } } + pub(crate) fn get_oncall(&self) -> Option { + match &mut *self.state.borrow_mut() { + State::BeforeTargets(x) => { + x.has_read_oncall = true; + x.oncall.dupe() + } + State::RecordingTargets(t) => t.package.oncall.dupe(), + } + } + fn recording_targets(&self) -> RefMut { RefMut::map(self.state.borrow_mut(), |state| { loop { match state { - State::BeforeTargets(oncall) => { - let oncall = mem::take(oncall).map(|Oncall(name)| name); - *state = State::Targets(RecordingTargets { + State::BeforeTargets(BeforeTargets { oncall, .. }) => { + let oncall = mem::take(oncall); + *state = State::RecordingTargets(RecordingTargets { package: Arc::new(Package { buildfile_path: self.buildfile_path.dupe(), oncall, @@ -189,7 +208,7 @@ impl ModuleInternals { }); continue; } - State::Targets(r) => return r, + State::RecordingTargets(r) => return r, } } }) @@ -226,6 +245,11 @@ impl ModuleInternals { ) -> impl Iterator { spec.resolve_glob(self.package_listing.files()) } + + pub(crate) fn sub_packages(&self) -> impl Iterator { + self.package_listing + .subpackages_within(PackageRelativePath::empty()) + } } // Records the targets declared when evaluating a build file. diff --git a/app/buck2_interpreter_for_build/src/interpreter/natives.rs b/app/buck2_interpreter_for_build/src/interpreter/natives.rs index 770c097094d39..0b6f2556e3d20 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/natives.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/natives.rs @@ -10,13 +10,15 @@ use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::none::NoneOr; use starlark::values::none::NoneType; +use starlark::values::StringValue; use starlark::values::Value; use crate::interpreter::module_internals::ModuleInternals; #[starlark_module] -pub fn register_module_natives(globals: &mut GlobalsBuilder) { +pub(crate) fn register_module_natives(globals: &mut GlobalsBuilder) { /// Check if the target with `name` has already been defined, /// returns `True` if it has. /// @@ -38,10 +40,22 @@ pub fn register_module_natives(globals: &mut GlobalsBuilder) { Ok(NoneType) } + /// Called in a `BUCK` file to retrieve the previously set `oncall`, or `None` if none has been set. + /// It is an error to call `oncall` after calling this function. + fn read_oncall<'v>( + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result>> { + let internals = ModuleInternals::from_context(eval, "read_oncall")?; + match internals.get_oncall() { + None => Ok(NoneOr::None), + Some(oncall) => Ok(NoneOr::Other(eval.heap().alloc_str(oncall.as_str()))), + } + } + fn implicit_package_symbol<'v>( name: &str, default: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let internals = ModuleInternals::from_context(eval, "implicit_package_symbol")?; match internals.get_package_implicit(name) { diff --git a/app/buck2_interpreter_for_build/src/interpreter/package_file_calculation.rs b/app/buck2_interpreter_for_build/src/interpreter/package_file_calculation.rs index 1e2657fc6159c..a1497ae55ba16 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/package_file_calculation.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/package_file_calculation.rs @@ -8,7 +8,8 @@ */ use async_trait::async_trait; -use buck2_interpreter::paths::package::PackageFilePath; +use buck2_core::cells::build_file_cell::BuildFileCell; +use buck2_core::package::PackageLabel; use buck2_node::super_package::SuperPackage; use dice::DiceComputations; @@ -16,13 +17,14 @@ use crate::interpreter::dice_calculation_delegate::HasCalculationDelegate; #[async_trait] pub trait EvalPackageFile { - async fn eval_package_file(&self, path: &PackageFilePath) -> anyhow::Result; + async fn eval_package_file(&mut self, path: PackageLabel) -> anyhow::Result; } #[async_trait] -impl EvalPackageFile for DiceComputations { - async fn eval_package_file(&self, path: &PackageFilePath) -> anyhow::Result { - self.get_interpreter_calculator(path.cell(), path.build_file_cell()) +impl EvalPackageFile for DiceComputations<'_> { + async fn eval_package_file(&mut self, path: PackageLabel) -> anyhow::Result { + let cell_name = path.as_cell_path().cell(); + self.get_interpreter_calculator(cell_name, BuildFileCell::new(cell_name)) .await? .eval_package_file(path) .await diff --git a/app/buck2_interpreter_for_build/src/interpreter/package_file_extra.rs b/app/buck2_interpreter_for_build/src/interpreter/package_file_extra.rs index 23d4dc92d5601..84536a10161ce 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/package_file_extra.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/package_file_extra.rs @@ -24,24 +24,19 @@ use starlark::values::Freeze; use starlark::values::Freezer; use starlark::values::FrozenValue; use starlark::values::NoSerialize; +use starlark::values::OwnedFrozenRef; use starlark::values::OwnedFrozenValue; -use starlark::values::OwnedFrozenValueTyped; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Tracer; use starlark::values::Value; -use starlark::values::ValueLike; use starlark_map::small_map::SmallMap; +use crate::interpreter::extra_value::FrozenInterpreterExtraValue; +use crate::interpreter::extra_value::InterpreterExtraValue; use crate::super_package::package_value::FrozenStarlarkPackageValue; use crate::super_package::package_value::StarlarkPackageValue; -#[derive(Debug, thiserror::Error)] -enum PackageFileExtraError { - #[error("Wrong type of frozen package extra (internal error)")] - WrongTypeOfFrozenExtra, -} - /// `Module.extra_value` when evaluating `PACKAGE` file. #[derive( Default, @@ -51,7 +46,7 @@ enum PackageFileExtraError { ProvidesStaticType, Allocative )] -#[display(fmt = "{:?}", "self")] +#[display("{:?}", self)] pub struct PackageFileExtra<'v> { pub cfg_constructor: OnceCell>, pub(crate) package_values: RefCell>>, @@ -79,7 +74,7 @@ unsafe impl<'v> Trace<'v> for PackageFileExtra<'v> { ProvidesStaticType, Allocative )] -#[display(fmt = "{:?}", "self")] +#[display("{:?}", self)] pub struct FrozenPackageFileExtra { pub(crate) cfg_constructor: Option, pub(crate) package_values: SmallMap, @@ -127,37 +122,21 @@ impl<'v> Freeze for PackageFileExtra<'v> { } impl<'v> PackageFileExtra<'v> { - pub fn get_or_init(eval: &mut Evaluator<'v, '_>) -> anyhow::Result<&'v PackageFileExtra<'v>> { - match eval.module().extra_value() { - None => { - let extra = eval.heap().alloc_complex(PackageFileExtra::default()); - eval.module().set_extra_value(extra); - let extra = extra - .downcast_ref_err::() - .context("(internal error)")?; - Ok(extra) - } - Some(extra) => { - let extra = extra - .downcast_ref_err::() - .context("(internal error)")?; - Ok(extra) - } - } + pub fn get_or_init( + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result<&'v PackageFileExtra<'v>> { + Ok(InterpreterExtraValue::get(eval.module())? + .package_extra + .get_or_init(Default::default)) } } impl FrozenPackageFileExtra { pub(crate) fn get( module: &FrozenModule, - ) -> anyhow::Result>> { - match module.owned_extra_value() { - None => Ok(None), - Some(extra) => { - Ok(Some(extra.downcast().map_err(|_| { - PackageFileExtraError::WrongTypeOfFrozenExtra - })?)) - } - } + ) -> anyhow::Result>> { + Ok(FrozenInterpreterExtraValue::get(module)? + .into_owned_frozen_ref() + .try_map_option(|x| x.value.package_extra.as_ref())) } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/selector.rs b/app/buck2_interpreter_for_build/src/interpreter/selector.rs index a13692edd8879..cea72c1c56807 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/selector.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/selector.rs @@ -20,6 +20,8 @@ use starlark::starlark_complex_value; use starlark::starlark_module; use starlark::values::dict::Dict; use starlark::values::dict::DictRef; +use starlark::values::dict::DictType; +use starlark::values::none::NoneOr; use starlark::values::starlark_value; use starlark::values::starlark_value_as_type::StarlarkValueAsType; use starlark::values::Freeze; @@ -28,29 +30,34 @@ use starlark::values::FrozenValue; use starlark::values::Heap; use starlark::values::NoSerialize; use starlark::values::StarlarkValue; +use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::Tracer; +use starlark::values::UnpackValue; use starlark::values::Value; use starlark::values::ValueLike; -use starlark::StarlarkDocs; +use starlark::values::ValueOf; /// Representation of `select()` in Starlark. -#[derive(Debug, ProvidesStaticType, NoSerialize, StarlarkDocs, Allocative)] // TODO selector should probably support serializing +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] // TODO selector should probably support serializing #[repr(C)] pub enum StarlarkSelectorGen { - Inner(ValueType), - Added(ValueType, ValueType), + /// Simplest form, backed by dictionary representation + /// wrapped into `select` function call. + // TODO: add a type restriction here that ValueType should be a dict + Primary(ValueType), + Sum(ValueType, ValueType), } impl Display for StarlarkSelectorGen { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - StarlarkSelectorGen::Inner(v) => { + StarlarkSelectorGen::Primary(v) => { f.write_str("select(")?; v.fmt(f)?; f.write_str(")") } - StarlarkSelectorGen::Added(l, r) => { + StarlarkSelectorGen::Sum(l, r) => { l.fmt(f)?; f.write_str(" + ")?; r.fmt(f) @@ -64,36 +71,63 @@ unsafe impl, To> Coerce> for StarlarkSe starlark_complex_value!(pub StarlarkSelector); impl<'v> StarlarkSelector<'v> { - fn new(d: Value<'v>) -> Self { - StarlarkSelector::Inner(d) + pub fn new(d: Value<'v>) -> Self { + StarlarkSelector::Primary(d) } - fn added(left: Value<'v>, right: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - Ok(heap.alloc(StarlarkSelector::Added(left, right))) + fn sum(left: Value<'v>, right: Value<'v>, heap: &'v Heap) -> Value<'v> { + heap.alloc(StarlarkSelector::Sum(left, right)) } - /// Tests that two selects are equal to each other. For testing use only. - /// We simply compare their string representations. - fn select_equal_internal(left: Value, right: Value) -> anyhow::Result { - Ok(left.to_repr() == right.to_repr()) + pub fn from_concat(iter: I, heap: &'v Heap) -> Value<'v> + where + I: IntoIterator>, + { + fn values_to_selector<'v, I>( + selector: Option>, + values: &mut I, + heap: &'v Heap, + ) -> NoneOr> + where + I: Iterator>, + { + match (selector, values.next()) { + (None, None) => NoneOr::None, + (None, Some(v)) => { + if let Some(next_v) = values.next() { + let head = StarlarkSelector::Sum(v, next_v); + values_to_selector(Some(head), values, heap) + } else { + NoneOr::Other(StarlarkSelector::new(v)) + } + } + (Some(s), None) => NoneOr::Other(s), + (Some(s), Some(v)) => { + let head = Some(StarlarkSelector::Sum(heap.alloc(s), v)); + values_to_selector(head, values, heap) + } + } + } + let selector = values_to_selector(None, &mut iter.into_iter(), heap); + heap.alloc(selector) } fn select_map<'a>( val: Value<'a>, - eval: &mut Evaluator<'a, '_>, + eval: &mut Evaluator<'a, '_, '_>, func: Value<'a>, - ) -> anyhow::Result> { + ) -> starlark::Result> { fn invoke<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, func: Value<'v>, val: Value<'v>, - ) -> anyhow::Result> { + ) -> starlark::Result> { eval.eval_function(func, &[val], &[]) } if let Some(selector) = StarlarkSelector::from_value(val) { match *selector { - StarlarkSelectorGen::Inner(selector) => { + StarlarkSelectorGen::Primary(selector) => { let selector = DictRef::from_value(selector).unwrap(); let mut mapped = SmallMap::with_capacity(selector.len()); for (k, v) in selector.iter_hashed() { @@ -103,8 +137,8 @@ impl<'v> StarlarkSelector<'v> { .heap() .alloc(StarlarkSelector::new(eval.heap().alloc(Dict::new(mapped))))) } - StarlarkSelectorGen::Added(left, right) => { - Ok(eval.heap().alloc(StarlarkSelectorGen::Added( + StarlarkSelectorGen::Sum(left, right) => { + Ok(eval.heap().alloc(StarlarkSelectorGen::Sum( Self::select_map(left, eval, func)?, Self::select_map(right, eval, func)?, ))) @@ -117,24 +151,26 @@ impl<'v> StarlarkSelector<'v> { fn select_test<'a>( val: Value<'a>, - eval: &mut Evaluator<'a, '_>, + eval: &mut Evaluator<'a, '_, '_>, func: Value<'a>, - ) -> anyhow::Result { + ) -> starlark::Result { fn invoke<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, func: Value<'v>, val: Value<'v>, - ) -> anyhow::Result { + ) -> starlark::Result { eval.eval_function(func, &[val], &[])? .unpack_bool() .ok_or_else(|| { - anyhow::anyhow!("Expected testing function to have a boolean return type") + starlark::Error::new_kind(starlark::ErrorKind::Native(anyhow::anyhow!( + "Expected testing function to have a boolean return type" + ))) }) } if let Some(selector) = StarlarkSelector::from_value(val) { match *selector { - StarlarkSelectorGen::Inner(selector) => { + StarlarkSelectorGen::Primary(selector) => { let selector = DictRef::from_value(selector).unwrap(); for v in selector.values() { let result = invoke(eval, func, v)?; @@ -144,7 +180,7 @@ impl<'v> StarlarkSelector<'v> { } Ok(false) } - StarlarkSelectorGen::Added(left, right) => { + StarlarkSelectorGen::Sum(left, right) => { Ok(Self::select_test(left, eval, func)? || Self::select_test(right, eval, func)?) } @@ -166,8 +202,8 @@ impl<'v> StarlarkSelectorBase<'v> for StarlarkSelector<'v> { unsafe impl<'v> Trace<'v> for StarlarkSelector<'v> { fn trace(&mut self, tracer: &Tracer<'v>) { match self { - Self::Inner(a) => tracer.trace(a), - Self::Added(a, b) => { + Self::Primary(a) => tracer.trace(a), + Self::Sum(a, b) => { tracer.trace(a); tracer.trace(b); } @@ -179,9 +215,9 @@ impl<'v> Freeze for StarlarkSelector<'v> { type Frozen = FrozenStarlarkSelector; fn freeze(self, freezer: &Freezer) -> anyhow::Result { Ok(match self { - StarlarkSelector::Inner(v) => FrozenStarlarkSelector::Inner(v.freeze(freezer)?), - StarlarkSelector::Added(l, r) => { - FrozenStarlarkSelector::Added(l.freeze(freezer)?, r.freeze(freezer)?) + StarlarkSelector::Primary(v) => FrozenStarlarkSelector::Primary(v.freeze(freezer)?), + StarlarkSelector::Sum(l, r) => { + FrozenStarlarkSelector::Sum(l.freeze(freezer)?, r.freeze(freezer)?) } }) } @@ -192,7 +228,7 @@ impl StarlarkSelectorBase<'_> for FrozenStarlarkSelector { } #[starlark_value(type = "selector")] // TODO(nga): rename to `"Select"` to match constant name. -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for StarlarkSelectorGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StarlarkSelectorGen where Self: ProvidesStaticType<'v> + StarlarkSelectorBase<'v, Item = V>, { @@ -200,28 +236,21 @@ where true } - fn radd(&self, left: Value<'v>, heap: &'v Heap) -> Option>> { + fn radd(&self, left: Value<'v>, heap: &'v Heap) -> Option>> { let right = heap.alloc(match self { - StarlarkSelectorGen::Inner(x) => StarlarkSelectorGen::Inner(x.to_value()), - StarlarkSelectorGen::Added(x, y) => { - StarlarkSelectorGen::Added(x.to_value(), y.to_value()) - } + StarlarkSelectorGen::Primary(x) => StarlarkSelectorGen::Primary(x.to_value()), + StarlarkSelectorGen::Sum(x, y) => StarlarkSelectorGen::Sum(x.to_value(), y.to_value()), }); - Some(StarlarkSelector::added(left, right, heap)) + Some(Ok(StarlarkSelector::sum(left, right, heap))) } - fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { let this = match self { - Self::Inner(ref v) => heap.alloc(StarlarkSelector::new(v.to_value())), - Self::Added(ref l, ref r) => { - match StarlarkSelector::added(l.to_value(), r.to_value(), heap) { - Err(e) => return Some(Err(e)), - Ok(v) => v, - } - } + Self::Primary(ref v) => heap.alloc(StarlarkSelector::new(v.to_value())), + Self::Sum(ref l, ref r) => StarlarkSelector::sum(l.to_value(), r.to_value(), heap), }; - Some(StarlarkSelector::added(this, other, heap)) + Some(Ok(StarlarkSelector::sum(this, other, heap))) } } @@ -230,7 +259,8 @@ pub fn register_select(globals: &mut GlobalsBuilder) { const Select: StarlarkValueAsType = StarlarkValueAsType::new(); fn select<'v>(#[starlark(require = pos)] d: Value<'v>) -> anyhow::Result> { - Ok(StarlarkSelector::new(d)) + let d = ValueOf::>::unpack_value_err(d)?; + Ok(StarlarkSelector::new(*d)) } /// Maps a selector. @@ -243,13 +273,13 @@ pub fn register_select(globals: &mut GlobalsBuilder) { /// def increment_items(a): /// return [v + 1 for v in a] /// - /// select_map([1, 2] + select({"c": [2]}}, increment_items) == [2, 3] + select({"c": [3]}) + /// select_map([1, 2] + select({"c": [2]}), increment_items) == [2, 3] + select({"c": [3]}) /// ``` fn select_map<'v>( #[starlark(require = pos)] d: Value<'v>, #[starlark(require = pos)] func: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { StarlarkSelector::select_map(d, eval, func) } @@ -266,16 +296,17 @@ pub fn register_select(globals: &mut GlobalsBuilder) { fn select_test<'v>( #[starlark(require = pos)] d: Value<'v>, #[starlark(require = pos)] func: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result { StarlarkSelector::select_test(d, eval, func) } /// Tests that two selects are equal to each other. For testing use only. + /// We simply compare their string representations. fn select_equal_internal<'v>( #[starlark(require = pos)] left: Value<'v>, #[starlark(require = pos)] right: Value<'v>, ) -> anyhow::Result { - StarlarkSelector::select_equal_internal(left, right) + Ok(left.to_repr() == right.to_repr()) } } diff --git a/app/buck2_interpreter_for_build/src/interpreter/testing.rs b/app/buck2_interpreter_for_build/src/interpreter/testing.rs index 55ddb6a34e1bf..8cb0937ef9ac0 100644 --- a/app/buck2_interpreter_for_build/src/interpreter/testing.rs +++ b/app/buck2_interpreter_for_build/src/interpreter/testing.rs @@ -9,21 +9,19 @@ use std::sync::Arc; -use buck2_common::legacy_configs::testing::TestConfigParserFileOps; -use buck2_common::legacy_configs::LegacyBuckConfig; -use buck2_common::legacy_configs::LegacyBuckConfigs; +use buck2_common::legacy_configs::configs::testing::parse_with_config_args; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_common::package_listing::listing::testing::PackageListingExt; use buck2_common::package_listing::listing::PackageListing; -use buck2_common::result::SharedResult; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; -use buck2_core::cells::alias::NonEmptyCellAlias; use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; -use buck2_core::cells::*; -use buck2_core::fs::paths::abs_norm_path::AbsNormPath; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_core::cells::CellAliasResolver; +use buck2_core::cells::CellResolver; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; use buck2_interpreter::extra::InterpreterHostArchitecture; use buck2_interpreter::extra::InterpreterHostPlatform; use buck2_interpreter::factory::StarlarkPassthroughProvider; @@ -39,16 +37,15 @@ use buck2_node::nodes::targets_map::TargetsMap; use buck2_node::super_package::SuperPackage; use dupe::Dupe; use indoc::indoc; -use maplit::hashmap; use starlark::environment::GlobalsBuilder; -use starlark::starlark_module; -use starlark::values::Value; +use crate::interpreter::buckconfig::LegacyConfigsViewForStarlark; +use crate::interpreter::cell_info::InterpreterCellInfo; use crate::interpreter::configuror::AdditionalGlobalsFn; use crate::interpreter::configuror::BuildInterpreterConfiguror; use crate::interpreter::global_interpreter_state::GlobalInterpreterState; use crate::interpreter::interpreter_for_cell::InterpreterForCell; -use crate::interpreter::interpreter_for_cell::ParseResult; +use crate::interpreter::interpreter_for_cell::ParseData; use crate::super_package::package_value::SuperPackageValuesImpl; /// Simple container that allows us to instrument things like imports @@ -56,23 +53,14 @@ use crate::super_package::package_value::SuperPackageValuesImpl; pub struct Tester { cell_alias_resolver: CellAliasResolver, cell_resolver: CellResolver, - configs: LegacyBuckConfigs, + root_config: LegacyBuckConfig, loaded_modules: LoadedModules, additional_globals: Vec, prelude_path: Option, } -/// These functions will be available in the starlark environment for all code running through a Tester. -#[starlark_module] -pub fn common_helpers(builder: &mut GlobalsBuilder) { - /// Returns the string that pprint() will produce - fn pprint_str<'v>(value: Value<'v>) -> anyhow::Result { - Ok(format!("{:#}", value)) - } -} - /// Helpers required to help drive the interpreter -pub type CellsData = (CellAliasResolver, CellResolver, LegacyBuckConfigs); +pub type CellsData = (CellAliasResolver, CellResolver, LegacyBuckConfig); /// The same as `run_starlark_test`, but just make sure the parse succeeds; /// ignore the targets @@ -85,28 +73,17 @@ pub fn run_simple_starlark_test(content: &str) -> anyhow::Result<()> { } pub fn cells(extra_root_config: Option<&str>) -> anyhow::Result { - let mut agg = CellsAggregator::new(); - agg.add_cell_entry( - CellRootPathBuf::new(ProjectRelativePathBuf::try_from("".to_owned())?), - NonEmptyCellAlias::new("root".to_owned()).unwrap(), - CellRootPathBuf::new(ProjectRelativePathBuf::try_from("".to_owned())?), - )?; - let resolver = agg.make_cell_resolver()?; - let root_path = if cfg!(windows) { - AbsNormPath::new("c:/root").unwrap() - } else { - AbsNormPath::new("/root").unwrap() - }; - - let configs = hashmap![ - CellName::testing_new("root") => - LegacyBuckConfig::parse_with_file_ops( - root_path, - &mut TestConfigParserFileOps::new(&[ - ( - "/root", - indoc!( - r#" + let resolver = CellResolver::testing_with_name_and_path( + CellName::testing_new("root"), + CellRootPathBuf::new(ProjectRelativePath::empty().to_owned()), + ); + + let config = parse_with_config_args( + &[ + ( + "root", + indoc!( + r#" [section] key = value other = 1 @@ -117,25 +94,22 @@ pub fn cells(extra_root_config: Option<&str>) -> anyhow::Result { "# - ), ), - ("/extra_cfg", extra_root_config.unwrap_or("")), - ])?, - &[], - )? - ]; + ), + ("extra_cfg", extra_root_config.unwrap_or("")), + ], + "root", + &[], + )?; Ok(( - resolver - .get(CellName::testing_new("root"))? - .cell_alias_resolver() - .dupe(), + resolver.root_cell_cell_alias_resolver().dupe(), resolver, - LegacyBuckConfigs::new(configs), + config, )) } -pub fn expect_error(result: SharedResult, content: &str, expected: &str) { +pub fn expect_error(result: buck2_error::Result, content: &str, expected: &str) { match result { Ok(_) => { eprintln!( @@ -145,7 +119,7 @@ pub fn expect_error(result: SharedResult, content: &str, expected: &str) { panic!(); } Err(e) => { - let returned = e.to_string(); + let returned = format!("{:?}", e); if !returned.contains(expected) { eprintln!( "Could not find expected error string.\nExpected:\n{}\n\nError:\n{}\n\nCode contents:\n{}", @@ -163,11 +137,11 @@ impl Tester { } pub fn with_cells(cells_data: CellsData) -> anyhow::Result { - let (cell_alias_resolver, cell_resolver, configs) = cells_data; + let (cell_alias_resolver, cell_resolver, root_config) = cells_data; Ok(Self { cell_alias_resolver, cell_resolver, - configs, + root_config, loaded_modules: LoadedModules::default(), additional_globals: Vec::new(), prelude_path: None, @@ -187,18 +161,21 @@ impl Tester { } fn interpreter(&self) -> anyhow::Result> { + let build_file_cell = BuildFileCell::new(self.cell_alias_resolver.resolve_self()); let import_paths = ImplicitImportPaths::parse( - self.configs - .get(self.cell_alias_resolver.resolve_self()) - .unwrap(), - BuildFileCell::new(self.cell_alias_resolver.resolve_self()), + &self.root_config, + build_file_cell, &self.cell_alias_resolver, )?; let additional_globals = self.additional_globals.clone(); - Ok(Arc::new(InterpreterForCell::new( + let cell_info = InterpreterCellInfo::new( + build_file_cell, + self.cell_resolver.dupe(), self.cell_alias_resolver.dupe(), + )?; + Ok(Arc::new(InterpreterForCell::new( + cell_info, Arc::new(GlobalInterpreterState::new( - &self.configs, self.cell_resolver.dupe(), BuildInterpreterConfiguror::new( self.prelude_path.clone(), @@ -207,16 +184,12 @@ impl Tester { None, false, false, - |_| {}, - |_| {}, - |_| {}, - |_| {}, Some(AdditionalGlobalsFn(Arc::new(move |globals_builder| { - common_helpers(globals_builder); for additional_globals in &additional_globals { (additional_globals.0)(globals_builder) } }))), + Arc::new(ConcurrentTargetLabelInterner::default()), )?, false, true, @@ -225,11 +198,12 @@ impl Tester { )?)) } - pub fn parse(&self, import: StarlarkPath, content: &str) -> ParseResult { + pub fn parse(&self, import: StarlarkPath, content: &str) -> ParseData { self.interpreter() .unwrap() .parse(import, content.to_owned()) .unwrap() + .unwrap() } /// Evaluate an import, and add it to the existing loaded_modules() map to be @@ -252,18 +226,15 @@ impl Tester { loaded_modules: LoadedModules, ) -> anyhow::Result { let interpreter = self.interpreter()?; - let ParseResult(ast, _) = - interpreter.parse(StarlarkPath::LoadFile(path), content.to_owned())?; - let buckconfig = self - .configs - .get(self.cell_alias_resolver.resolve_self()) - .unwrap(); - let root_buckconfig = self.configs.get(self.cell_resolver.root_cell()).unwrap(); + let ParseData(ast, _) = + interpreter.parse(StarlarkPath::LoadFile(path), content.to_owned())??; let mut provider = StarlarkPassthroughProvider; + let mut buckconfigs = + LegacyConfigsViewForStarlark::new(self.root_config.dupe(), self.root_config.dupe()); + let env = interpreter.eval_module( StarlarkModulePath::LoadFile(path), - buckconfig, - root_buckconfig, + &mut buckconfigs, ast, loaded_modules.clone(), &mut provider, @@ -301,18 +272,14 @@ impl Tester { package_listing: PackageListing, ) -> anyhow::Result { let interpreter = self.interpreter()?; - let ParseResult(ast, _) = - interpreter.parse(StarlarkPath::BuildFile(path), content.to_owned())?; - let buckconfig = self - .configs - .get(self.cell_alias_resolver.resolve_self()) - .unwrap(); - let root_buckconfig = self.configs.get(self.cell_resolver.root_cell()).unwrap(); + let ParseData(ast, _) = + interpreter.parse(StarlarkPath::BuildFile(path), content.to_owned())??; let mut provider = StarlarkPassthroughProvider; - let eval_result = interpreter.eval_build_file( + let mut buckconfigs = + LegacyConfigsViewForStarlark::new(self.root_config.dupe(), self.root_config.dupe()); + let eval_result_with_stats = interpreter.eval_build_file( path, - buckconfig, - root_buckconfig, + &mut buckconfigs, package_listing, SuperPackage::empty::(), false, @@ -321,7 +288,7 @@ impl Tester { &mut provider, true, )?; - Ok(eval_result) + Ok(eval_result_with_stats.result) } pub fn build_file_path() -> BuildFilePath { @@ -330,7 +297,7 @@ impl Tester { /// Run a starlark test with a basic environment. See /// `run_starlark_test()` above. - pub fn run_starlark_test(&mut self, content: &str) -> SharedResult { + pub fn run_starlark_test(&mut self, content: &str) -> buck2_error::Result { let import_path = ImportPath::testing_new("root//some/package:defs.bzl"); self.add_import( &import_path, @@ -367,7 +334,7 @@ impl Tester { /// evaluation was successful. This can be handy if the .bzl /// evaluation environment is different from the build file /// environment. - pub fn run_starlark_bzl_test(&mut self, content: &str) -> SharedResult<()> { + pub fn run_starlark_bzl_test(&mut self, content: &str) -> buck2_error::Result<()> { let import_path = ImportPath::testing_new("root//some/package:defs.bzl"); let template = indoc!( r#" diff --git a/app/buck2_interpreter_for_build/src/label.rs b/app/buck2_interpreter_for_build/src/label.rs index d26f13d214be4..89aae66ac0dc3 100644 --- a/app/buck2_interpreter_for_build/src/label.rs +++ b/app/buck2_interpreter_for_build/src/label.rs @@ -9,10 +9,10 @@ pub mod testing { use buck2_core::configuration::data::ConfigurationData; + use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; use buck2_core::pattern::pattern_type::TargetPatternExtra; - use buck2_core::pattern::ParsedPattern; - use buck2_core::target::label::TargetLabel; + use buck2_core::target::label::label::TargetLabel; use buck2_interpreter::types::configured_providers_label::StarlarkConfiguredProvidersLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use starlark::environment::GlobalsBuilder; @@ -21,7 +21,7 @@ pub mod testing { use crate::interpreter::build_context::BuildContext; - #[derive(Debug, thiserror::Error)] + #[derive(Debug, buck2_error::Error)] enum LabelCreatorError { #[error("Expected provider, found something else: `{0}`")] ExpectedProvider(String), @@ -33,13 +33,14 @@ pub mod testing { pub fn label_creator(builder: &mut GlobalsBuilder) { fn label<'v>( s: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let c = BuildContext::from_context(eval)?; let target = match ParsedPattern::::parse_precise( s, c.cell_info().name().name(), c.cell_info().cell_resolver(), + c.cell_info().cell_alias_resolver(), )? { ParsedPattern::Target(package, target_name, providers) => { providers.into_providers_label(package, target_name.as_ref()) @@ -53,13 +54,14 @@ pub mod testing { fn target_label<'v>( s: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let c = BuildContext::from_context(eval)?; let target = match ParsedPattern::::parse_precise( s, c.cell_info().name().name(), c.cell_info().cell_resolver(), + c.cell_info().cell_alias_resolver(), )? { ParsedPattern::Target(package, target_name, TargetPatternExtra) => { TargetLabel::new(package, target_name.as_ref()) @@ -70,75 +72,3 @@ pub mod testing { } } } - -#[cfg(test)] -mod tests { - use indoc::indoc; - - use super::testing::label_creator; - use crate::interpreter::testing::expect_error; - use crate::interpreter::testing::Tester; - - #[test] - fn labels_are_usable() -> anyhow::Result<()> { - fn new_tester() -> anyhow::Result { - let mut tester = Tester::new()?; - tester.additional_globals(label_creator); - Ok(tester) - } - - let mut tester = new_tester()?; - tester.run_starlark_bzl_test(indoc!( - r#" - frozen_l_default = label("root//foo/bar:baz") - frozen_l = label("root//foo/bar:baz[something]") - def test(): - l_default = label("root//foo/bar:baz") - l = label("root//foo/bar:baz[something]") - - assert_eq_ignore_hash("root//foo/bar:baz (#)", repr(frozen_l_default)) - assert_eq_ignore_hash("root//foo/bar:baz (#)", str(frozen_l_default)) - assert_eq("foo/bar", frozen_l_default.package) - assert_eq("baz", frozen_l_default.name) - assert_eq(None, frozen_l_default.sub_target) - assert_eq("root", frozen_l_default.cell) - - assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", repr(frozen_l)) - assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", str(frozen_l)) - assert_eq("foo/bar", frozen_l.package) - assert_eq("baz", frozen_l.name) - assert_eq(["something"], frozen_l.sub_target) - - assert_eq_ignore_hash("root//foo/bar:baz (#)", repr(l_default)) - assert_eq_ignore_hash("root//foo/bar:baz (#)", str(l_default)) - assert_eq("foo/bar", l_default.package) - assert_eq("baz", l_default.name) - assert_eq(None, l_default.sub_target) - - assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", repr(l)) - assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", str(l)) - assert_eq("foo/bar", l.package) - assert_eq("baz", l.name) - assert_eq(["something"], l.sub_target) - assert_eq("root", l.cell) - - "# - ))?; - - let mut tester = new_tester()?; - let invalid_fields = indoc!( - r#" - l = label("root//foo:bar[baz]") - def hide_type(v): return v - def test(): - hide_type(l).invalid_field - "# - ); - expect_error( - tester.run_starlark_test(invalid_fields), - invalid_fields, - "Object of type `label` has no attribute `invalid_field`", - ); - Ok(()) - } -} diff --git a/app/buck2_interpreter_for_build/src/lib.rs b/app/buck2_interpreter_for_build/src/lib.rs index 70c3c60e66de7..5a1b1d550a029 100644 --- a/app/buck2_interpreter_for_build/src/lib.rs +++ b/app/buck2_interpreter_for_build/src/lib.rs @@ -7,26 +7,29 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(try_blocks)] use std::sync::Once; pub mod attrs; +pub mod call_stack; pub mod interpreter; pub mod label; pub mod nodes; -pub mod plugins; +pub(crate) mod plugins; pub mod rule; pub mod super_package; pub fn init_late_bindings() { static ONCE: Once = Once::new(); ONCE.call_once(|| { - attrs::attrs_global::init_coerce_target_label(); + attrs::attrs_global::init_coerce_target_label_for_bzl(); interpreter::calculation::init_interpreter_calculation_impl(); interpreter::calculation::init_target_graph_calculation_impl(); interpreter::build_context::init_starlark_path_from_build_context(); plugins::init_plugin_kind_from_value_impl(); rule::init_frozen_rule_get_impl(); + rule::init_frozen_promise_artifact_mappings_get_impl(); }); } diff --git a/app/buck2_interpreter_for_build/src/nodes/mod.rs b/app/buck2_interpreter_for_build/src/nodes.rs similarity index 100% rename from app/buck2_interpreter_for_build/src/nodes/mod.rs rename to app/buck2_interpreter_for_build/src/nodes.rs diff --git a/app/buck2_interpreter_for_build/src/nodes/attr_spec.rs b/app/buck2_interpreter_for_build/src/nodes/attr_spec.rs index 6cf185c21f5bb..72d20bc204fa2 100644 --- a/app/buck2_interpreter_for_build/src/nodes/attr_spec.rs +++ b/app/buck2_interpreter_for_build/src/nodes/attr_spec.rs @@ -10,12 +10,15 @@ use std::collections::HashMap; use anyhow::Context; -use buck2_core::target::label::TargetLabelRef; -use buck2_core::target::name::TargetName; +use buck2_core::target::label::label::TargetLabelRef; +use buck2_core::target::name::TargetNameRef; +use buck2_error::internal_error_anyhow; +use buck2_node::attrs::attr::Attribute; use buck2_node::attrs::attr::CoercedValue; use buck2_node::attrs::attr_type::string::StringLiteral; use buck2_node::attrs::coerced_attr::CoercedAttr; use buck2_node::attrs::configurable::AttrIsConfigurable; +use buck2_node::attrs::id::AttributeId; use buck2_node::attrs::inspect_options::AttrInspectOptions; use buck2_node::attrs::internal::attr_is_configurable; use buck2_node::attrs::internal::NAME_ATTRIBUTE_FIELD; @@ -28,7 +31,9 @@ use dupe::Dupe; use starlark::docs::DocString; use starlark::eval::ParametersParser; use starlark::eval::ParametersSpec; -use starlark::typing::Param; +use starlark::eval::ParametersSpecParam; +use starlark::typing::ParamIsRequired; +use starlark::typing::ParamSpec; use starlark::typing::Ty; use starlark::typing::TyFunction; use starlark::values::Value; @@ -37,19 +42,26 @@ use crate::attrs::AttributeCoerceExt; use crate::interpreter::module_internals::ModuleInternals; use crate::nodes::check_within_view::check_within_view; -#[derive(Debug, thiserror::Error)] -enum AttributeSpecError { - #[error("`within_view` coerced incorrectly (internal error)")] - WithinViewCoercedIncorrectly, -} - pub trait AttributeSpecExt { + fn start_parse<'a, 'v>( + &'a self, + param_parser: &mut ParametersParser<'v, '_>, + size_hint: usize, + ) -> anyhow::Result<( + // "name" attribute value. + &'v TargetNameRef, + // Remaining attributes. + impl ExactSizeIterator + 'a, + // Populated with name. + AttrValues, + )>; + fn parse_params<'v>( &self, - param_parser: ParametersParser<'v, '_>, + param_parser: &mut ParametersParser<'v, '_>, arg_count: usize, internals: &ModuleInternals, - ) -> anyhow::Result<(TargetName, AttrValues)>; + ) -> anyhow::Result<(&'v TargetNameRef, AttrValues)>; /// Returns a starlark Parameters for the rule callable. fn signature(&self, rule_name: String) -> ParametersSpec>; @@ -57,45 +69,58 @@ pub trait AttributeSpecExt { fn ty_function(&self) -> TyFunction; fn starlark_types(&self) -> Vec; - fn docstrings(&self) -> HashMap>; } impl AttributeSpecExt for AttributeSpec { - /// Parses params extracting the TargetName and the attribute values to store in the TargetNode. - fn parse_params<'v>( - &self, - mut param_parser: ParametersParser<'v, '_>, - arg_count: usize, - internals: &ModuleInternals, - ) -> anyhow::Result<(TargetName, AttrValues)> { - let mut attr_values = AttrValues::with_capacity(arg_count); + fn start_parse<'a, 'v>( + &'a self, + param_parser: &mut ParametersParser<'v, '_>, + size_hint: usize, + ) -> anyhow::Result<( + &'v TargetNameRef, + impl ExactSizeIterator + 'a, + AttrValues, + )> { + let mut attr_values = AttrValues::with_capacity(size_hint); let mut indices = self.attr_specs(); let name = match indices.next() { - Some((name_name, attr_idx, _attr)) - if name_name == NAME_ATTRIBUTE_FIELD && attr_idx.index_in_attribute_spec == 0 => - { - let name: &str = param_parser.next(NAME_ATTRIBUTE_FIELD)?; - + Some((name_name, attr_idx, _attr)) if name_name == NAME_ATTRIBUTE_FIELD => { + let name = param_parser.next()?; attr_values.push_sorted( attr_idx, CoercedAttr::String(StringLiteral(ArcStr::from(name))), ); - - TargetName::new(name)? + name + } + _ => { + return Err(internal_error_anyhow!( + "First attribute is `name`, it is known" + )); } - _ => panic!("First attribute is `name`, it is known"), }; + let name = TargetNameRef::new(name)?; + Ok((name, indices, attr_values)) + } - let target_label = TargetLabelRef::new(internals.buildfile_path().package(), name.as_ref()); + /// Parses params extracting the TargetName and the attribute values to store in the TargetNode. + fn parse_params<'v>( + &self, + param_parser: &mut ParametersParser<'v, '_>, + arg_count: usize, + internals: &ModuleInternals, + ) -> anyhow::Result<(&'v TargetNameRef, AttrValues)> { + let (name, indices, mut attr_values) = self.start_parse(param_parser, arg_count)?; + + let target_label = TargetLabelRef::new(internals.buildfile_path().package(), name); for (attr_name, attr_idx, attribute) in indices { let configurable = attr_is_configurable(attr_name); let user_value: Option = match attribute.default() { - Some(_) => param_parser.next_opt(attr_name)?, - None => Some(param_parser.next(attr_name)?), + Some(_) => param_parser.next_opt()?, + None => Some(param_parser.next()?), }; let attr_is_visibility = attr_name == VISIBILITY_ATTRIBUTE_FIELD; @@ -154,7 +179,7 @@ impl AttributeSpecExt for AttributeSpec { if let Some(within_view) = attr_values.get(AttributeSpec::within_view_attr_id()) { let within_view = match within_view { CoercedAttr::WithinView(within_view) => within_view, - _ => return Err(AttributeSpecError::WithinViewCoercedIncorrectly.into()), + _ => return Err(internal_error_anyhow!("`within_view` coerced incorrectly")), }; for a in self.attrs(&attr_values, AttrInspectOptions::DefinedOnly) { check_within_view( @@ -177,15 +202,19 @@ impl AttributeSpecExt for AttributeSpec { /// Returns a starlark Parameters for the rule callable. fn signature(&self, rule_name: String) -> ParametersSpec> { - let mut signature = ParametersSpec::with_capacity(rule_name, self.len()); - signature.no_more_positional_args(); - for (name, _idx, attribute) in self.attr_specs() { - match attribute.default() { - Some(_) => signature.optional(name), - None => signature.required(name), - }; - } - signature.finish() + ParametersSpec::new_named_only( + &rule_name, + self.attr_specs().map(|(name, _idx, attribute)| { + let default = attribute.default(); + ( + name, + match default { + Some(_) => ParametersSpecParam::Optional, + None => ParametersSpecParam::Required, + }, + ) + }), + ) } fn ty_function(&self) -> TyFunction { @@ -195,13 +224,13 @@ impl AttributeSpecExt for AttributeSpec { AttrIsConfigurable::Yes => attribute.starlark_type().to_ty_with_select(), AttrIsConfigurable::No => attribute.starlark_type().to_ty(), }; - let param = Param::name_only(name, ty); - let param = match attribute.default() { - Some(_) => param.optional(), - None => param, + let required = match attribute.default() { + Some(_) => ParamIsRequired::No, + None => ParamIsRequired::Yes, }; - params.push(param); + params.push((starlark::util::ArcStr::from(name), required, ty)); } + let params = ParamSpec::new_named_only(params).unwrap(); TyFunction::new(params, Ty::none()) } diff --git a/app/buck2_interpreter_for_build/src/nodes/check_within_view.rs b/app/buck2_interpreter_for_build/src/nodes/check_within_view.rs index 109ae610f6eae..95434c2a05d33 100644 --- a/app/buck2_interpreter_for_build/src/nodes/check_within_view.rs +++ b/app/buck2_interpreter_for_build/src/nodes/check_within_view.rs @@ -9,14 +9,15 @@ use std::sync::Arc; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::configuration::transition::id::TransitionId; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::package::PackageLabel; use buck2_core::plugins::PluginKind; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_node::attrs::attr_type::AttrType; use buck2_node::attrs::coerced_attr::CoercedAttr; use buck2_node::attrs::traversal::CoercedAttrTraversal; +use buck2_node::configuration::resolved::ConfigurationSettingKey; use buck2_node::visibility::VisibilityPattern; use buck2_node::visibility::VisibilityPatternList; use buck2_node::visibility::WithinViewSpecification; @@ -35,13 +36,15 @@ fn indented_within_view(spec: &WithinViewSpecification) -> String { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum CheckWithinViewError { #[error( - "Dependency `{}` is not within view (as specified by `within_view` attribute):\n{}", + "Target's `within_view` attribute does not allow dependency `{}`. Allowed dependencies:\n{}", _0, indented_within_view(_1) )] + #[buck2(tag = Visibility)] DepNotWithinView(TargetLabel, WithinViewSpecification), } @@ -108,7 +111,7 @@ pub(crate) fn check_within_view( self.check_dep_within_view(dep) } - fn configuration_dep(&mut self, _dep: &'a TargetLabel) -> anyhow::Result<()> { + fn configuration_dep(&mut self, _dep: &'a ConfigurationSettingKey) -> anyhow::Result<()> { // Skip configuration deps. Ok(()) } @@ -117,7 +120,7 @@ pub(crate) fn check_within_view( self.check_dep_within_view(dep) } - fn input(&mut self, _input: BuckPathRef) -> anyhow::Result<()> { + fn input(&mut self, _input: SourcePathRef) -> anyhow::Result<()> { Ok(()) } } diff --git a/app/buck2_interpreter_for_build/src/nodes/unconfigured.rs b/app/buck2_interpreter_for_build/src/nodes/unconfigured.rs index 1ef7eecd2ae08..1e06b77bc1b66 100644 --- a/app/buck2_interpreter_for_build/src/nodes/unconfigured.rs +++ b/app/buck2_interpreter_for_build/src/nodes/unconfigured.rs @@ -9,13 +9,10 @@ use std::sync::Arc; -use buck2_core::target::label::TargetLabel; -use buck2_core::target::name::TargetNameRef; +use buck2_core::target::label::label::TargetLabel; use buck2_node::attrs::coerced_deps_collector::CoercedDeps; use buck2_node::attrs::coerced_deps_collector::CoercedDepsCollector; use buck2_node::attrs::inspect_options::AttrInspectOptions; -use buck2_node::attrs::internal::NAME_ATTRIBUTE_FIELD; -use buck2_node::attrs::values::AttrValues; use buck2_node::call_stack::StarlarkCallStack; use buck2_node::nodes::unconfigured::TargetNode; use buck2_node::package::Package; @@ -25,6 +22,7 @@ use starlark::eval::CallStack; use starlark::eval::ParametersParser; use starlark::values::Value; +use crate::call_stack::StarlarkCallStackWrapper; use crate::interpreter::module_internals::ModuleInternals; use crate::nodes::attr_spec::AttributeSpecExt; @@ -33,14 +31,14 @@ pub trait TargetNodeExt: Sized { rule: Arc, package: Arc, internals: &ModuleInternals, - param_parser: ParametersParser<'v, '_>, + param_parser: &mut ParametersParser<'v, '_>, ) -> anyhow::Result; fn from_params<'v>( rule: Arc, package: Arc, internals: &ModuleInternals, - param_parser: ParametersParser<'v, '_>, + param_parser: &mut ParametersParser<'v, '_>, arg_count: usize, ignore_attrs_for_profiling: bool, call_stack: Option, @@ -53,26 +51,25 @@ impl TargetNodeExt for TargetNode { rule: Arc, package: Arc, internals: &ModuleInternals, - mut param_parser: ParametersParser<'v, '_>, + param_parser: &mut ParametersParser<'v, '_>, ) -> anyhow::Result { - for (attr_name, _attr_idx, _attr) in rule.attributes.attr_specs() { - let value: Value = param_parser.next(attr_name)?; - if attr_name == NAME_ATTRIBUTE_FIELD { - let label = TargetLabel::new( - internals.buildfile_path().package().dupe(), - TargetNameRef::new(value.unpack_str().unwrap()).unwrap(), - ); - return Ok(TargetNode::new( - rule.dupe(), - package, - label, - AttrValues::with_capacity(0), - CoercedDeps::default(), - None, - )); - } + let (name, indices, attr_values) = rule.attributes.start_parse(param_parser, 1)?; + + for (_, _, _) in indices { + // Consume all the arguments. + // We call `next_opt` even for non-optional parameters. starlark-rust doesn't check. + param_parser.next_opt::()?; } - unreachable!("`name` attribute not found"); + + let label = TargetLabel::new(internals.buildfile_path().package().dupe(), name); + Ok(TargetNode::new( + rule.dupe(), + package, + label, + attr_values, + CoercedDeps::default(), + None, + )) } /// The body of the callable returned by `rule()`. Records the target in this package's `TargetMap` @@ -81,7 +78,7 @@ impl TargetNodeExt for TargetNode { rule: Arc, package: Arc, internals: &ModuleInternals, - param_parser: ParametersParser<'v, '_>, + param_parser: &mut ParametersParser<'v, '_>, arg_count: usize, ignore_attrs_for_profiling: bool, call_stack: Option, @@ -100,7 +97,7 @@ impl TargetNodeExt for TargetNode { .parse_params(param_parser, arg_count, internals)?; let package_name = internals.buildfile_path().package(); - let label = TargetLabel::new(package_name.dupe(), target_name.as_ref()); + let label = TargetLabel::new(package_name.dupe(), target_name); let mut deps_cache = CoercedDepsCollector::new(); for a in rule.attributes.attrs(&attr_values, AttrInspectOptions::All) { @@ -113,7 +110,9 @@ impl TargetNodeExt for TargetNode { label, attr_values, CoercedDeps::from(deps_cache), - call_stack.map(StarlarkCallStack::new), + call_stack + .map(StarlarkCallStackWrapper) + .map(StarlarkCallStack::new), )) } } diff --git a/app/buck2_interpreter_for_build/src/plugins.rs b/app/buck2_interpreter_for_build/src/plugins.rs index cc30d452e0552..e2c4fe45bf6e1 100644 --- a/app/buck2_interpreter_for_build/src/plugins.rs +++ b/app/buck2_interpreter_for_build/src/plugins.rs @@ -15,14 +15,15 @@ use buck2_core::plugins::PluginKind; use buck2_interpreter::plugins::PLUGIN_KIND_FROM_VALUE; use derive_more::Display; use dupe::Dupe; +use either::Either; use starlark::environment::GlobalsBuilder; -use starlark::environment::Methods; -use starlark::environment::MethodsBuilder; -use starlark::environment::MethodsStatic; use starlark::eval::Evaluator; use starlark::starlark_module; use starlark::starlark_simple_value; +use starlark::typing::Ty; use starlark::values::starlark_value; +use starlark::values::starlark_value_as_type::StarlarkValueAsType; +use starlark::values::type_repr::StarlarkTypeRepr; use starlark::values::AllocValue; use starlark::values::Freeze; use starlark::values::Freezer; @@ -31,17 +32,17 @@ use starlark::values::NoSerialize; use starlark::values::ProvidesStaticType; use starlark::values::StarlarkValue; use starlark::values::Trace; +use starlark::values::UnpackValue; use starlark::values::Value; -use starlark::values::ValueLike; -use starlark::StarlarkDocs; +use starlark::values::ValueTypedComplex; use crate::interpreter::build_context::BuildContext; #[derive(Debug, derive_more::Display, Allocative)] enum InnerStarlarkPluginKind { - #[display(fmt = ">")] + #[display(">")] Unbound(CellPath), - #[display(fmt = "")] + #[display("")] Bound(PluginKind), } @@ -56,12 +57,16 @@ enum InnerStarlarkPluginKind { Trace, Allocative )] -#[display(fmt = "{}", "RefCell::borrow(_0)")] +#[display("{}", RefCell::borrow(_0))] pub struct StarlarkPluginKind(RefCell); #[starlark_value(type = "PluginKind")] impl<'v> StarlarkValue<'v> for StarlarkPluginKind { - fn export_as(&self, variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { let mut inner = self.0.borrow_mut(); let InnerStarlarkPluginKind::Unbound(cell_path) = &*inner else { // Was already exported @@ -74,7 +79,7 @@ impl<'v> StarlarkValue<'v> for StarlarkPluginKind { } } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum PluginKindError { #[error("Plugin kind has not yet been assigned to a global")] NotBound, @@ -104,7 +109,7 @@ impl<'v> AllocValue<'v> for StarlarkPluginKind { NoSerialize, Allocative )] -#[display(fmt = "{_0}")] +#[display("{_0}")] pub struct FrozenStarlarkPluginKind(PluginKind); starlark_simple_value!(FrozenStarlarkPluginKind); @@ -120,19 +125,48 @@ impl Freeze for StarlarkPluginKind { } } -pub(crate) fn plugin_kind_from_value<'v>(v: Value<'v>) -> anyhow::Result { - if let Some(unfrozen) = v.downcast_ref::() { - unfrozen.expect_bound() - } else if let Some(frozen) = v.downcast_ref::() { - Ok(frozen.0.dupe()) - } else { - Err(PluginKindError::NotAPluginKind(v.to_repr()).into()) +fn plugin_kind_from_value_typed<'v>( + v: ValueTypedComplex<'v, StarlarkPluginKind>, +) -> anyhow::Result { + match v.unpack() { + Either::Left(unfrozen) => unfrozen.expect_bound(), + Either::Right(frozen) => Ok(frozen.0.dupe()), + } +} + +fn plugin_kind_from_value<'v>(v: Value<'v>) -> anyhow::Result { + let Some(v) = ValueTypedComplex::new(v) else { + return Err(PluginKindError::NotAPluginKind(v.to_repr()).into()); + }; + plugin_kind_from_value_typed(v) +} + +pub(crate) struct PluginKindArg { + pub(crate) plugin_kind: PluginKind, +} + +impl StarlarkTypeRepr for PluginKindArg { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + ::starlark_type_repr() + } +} + +impl<'v> UnpackValue<'v> for PluginKindArg { + type Error = anyhow::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(v) = ValueTypedComplex::new(value) else { + return Ok(None); + }; + plugin_kind_from_value_typed(v).map(|kind| Some(PluginKindArg { plugin_kind: kind })) } } /// The value yielded by `plugins.ALL` #[derive(Display, Debug, Allocative, ProvidesStaticType, NoSerialize)] -#[display(fmt = "")] +#[display("")] pub struct AllPlugins; starlark_simple_value!(AllPlugins); @@ -140,15 +174,12 @@ starlark_simple_value!(AllPlugins); impl<'v> StarlarkValue<'v> for AllPlugins {} #[starlark_module] -fn plugins_module(registry: &mut MethodsBuilder) { +fn register_plugins_methods(r: &mut GlobalsBuilder) { /// Create a new plugin kind. /// /// The value returned should always be immediately bound to a global, like `MyPluginKind = /// plugins.kind()` - fn kind<'v>( - #[starlark(this)] _this: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result { + fn kind<'v>(eval: &mut Evaluator<'v, '_, '_>) -> anyhow::Result { let cell_path = BuildContext::from_context(eval)? .starlark_path() .path() @@ -166,33 +197,14 @@ fn plugins_module(registry: &mut MethodsBuilder) { /// /// This value is not supported on `uses_plugins` at this time, and hence it is not useful on /// `pulls_plugins` either. - #[starlark(attribute)] - fn All<'v>(#[starlark(this)] _this: Value<'v>) -> anyhow::Result { - Ok(AllPlugins) - } -} + const All: AllPlugins = AllPlugins; -#[derive( - Display, - Debug, - StarlarkDocs, - Allocative, - ProvidesStaticType, - NoSerialize -)] -#[display(fmt = "")] -struct Plugins; - -#[starlark_value(type = "plugins")] -impl<'v> StarlarkValue<'v> for Plugins { - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(plugins_module) - } + /// Type symbol for `PluginKind`. + const PluginKind: StarlarkValueAsType = StarlarkValueAsType::new(); } pub(crate) fn register_plugins(globals: &mut GlobalsBuilder) { - globals.set("plugins", globals.frozen_heap().alloc_simple(Plugins)); + globals.namespace("plugins", register_plugins_methods); } pub(crate) fn init_plugin_kind_from_value_impl() { diff --git a/app/buck2_interpreter_for_build/src/rule.rs b/app/buck2_interpreter_for_build/src/rule.rs index d99945aa6849c..c5aa6f370cb4e 100644 --- a/app/buck2_interpreter_for_build/src/rule.rs +++ b/app/buck2_interpreter_for_build/src/rule.rs @@ -9,7 +9,6 @@ use std::cell::RefCell; use std::fmt; -use std::fmt::Display; use std::sync::Arc; use allocative::Allocative; @@ -17,6 +16,11 @@ use anyhow::Context; use buck2_core::bzl::ImportPath; use buck2_core::configuration::transition::id::TransitionId; use buck2_core::plugins::PluginKind; +use buck2_interpreter::late_binding_ty::AnalysisContextReprLate; +use buck2_interpreter::late_binding_ty::ProviderReprLate; +use buck2_interpreter::late_binding_ty::TransitionReprLate; +use buck2_interpreter::starlark_promise::StarlarkPromise; +use buck2_interpreter::types::rule::FROZEN_PROMISE_ARTIFACT_MAPPINGS_GET_IMPL; use buck2_interpreter::types::rule::FROZEN_RULE_GET_IMPL; use buck2_interpreter::types::transition::transition_id_from_value; use buck2_node::attrs::attr::Attribute; @@ -28,11 +32,13 @@ use buck2_node::rule_type::RuleType; use buck2_node::rule_type::StarlarkRuleType; use derive_more::Display; use dupe::Dupe; +use either::Either; use gazebo::prelude::*; use itertools::Itertools; use starlark::any::ProvidesStaticType; use starlark::docs::DocFunction; use starlark::docs::DocItem; +use starlark::docs::DocMember; use starlark::docs::DocStringKind; use starlark::environment::GlobalsBuilder; use starlark::eval::Arguments; @@ -40,14 +46,20 @@ use starlark::eval::Evaluator; use starlark::eval::ParametersSpec; use starlark::starlark_module; use starlark::starlark_simple_value; -use starlark::typing::Param; +use starlark::typing::ParamSpec; use starlark::typing::Ty; -use starlark::values::dict::DictOf; +use starlark::values::dict::UnpackDictEntries; +use starlark::values::list::ListType; +use starlark::values::list::UnpackList; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::starlark_value; +use starlark::values::typing::FrozenStarlarkCallable; use starlark::values::typing::StarlarkCallable; +use starlark::values::typing::StarlarkCallableChecked; use starlark::values::AllocValue; use starlark::values::Freeze; use starlark::values::Freezer; +use starlark::values::FrozenRef; use starlark::values::FrozenStringValue; use starlark::values::FrozenValue; use starlark::values::Heap; @@ -56,6 +68,7 @@ use starlark::values::StarlarkValue; use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::Value; +use starlark::values::ValueOfUnchecked; use starlark_map::small_map::SmallMap; use crate::attrs::starlark_attribute::StarlarkAttribute; @@ -64,7 +77,7 @@ use crate::interpreter::build_context::PerFileTypeContext; use crate::interpreter::module_internals::ModuleInternals; use crate::nodes::attr_spec::AttributeSpecExt; use crate::nodes::unconfigured::TargetNodeExt; -use crate::plugins::plugin_kind_from_value; +use crate::plugins::PluginKindArg; pub static NAME_ATTRIBUTE_FIELD: &str = "name"; @@ -80,7 +93,7 @@ pub struct RuleCallable<'v> { id: RefCell>, /// The implementation function for this rule. Must be callable and take a /// ctx - implementation: Value<'v>, + implementation: StarlarkCallable<'v, (FrozenValue,), ListType>, // Field Name -> Attribute attributes: AttributeSpec, /// Type for the typechecker. @@ -122,7 +135,7 @@ impl<'v> Display for RuleCallable<'v> { } /// Errors around rule declaration, instantiation, validation, etc -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum RuleError { #[error("The output of rule() may only be called after the module is loaded")] RuleCalledBeforeFreezing, @@ -146,15 +159,15 @@ impl<'v> AllocValue<'v> for RuleCallable<'v> { impl<'v> RuleCallable<'v> { fn new( - implementation: StarlarkCallable<'v>, - attrs: DictOf<'v, &'v str, &'v StarlarkAttribute>, + implementation: StarlarkCallable<'v, (FrozenValue,), ListType>, + attrs: UnpackDictEntries<&'v str, &'v StarlarkAttribute>, cfg: Option, doc: &str, is_configuration_rule: bool, is_toolchain_rule: bool, - uses_plugins: Vec>, + uses_plugins: Vec, artifact_promise_mappings: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { // TODO(nmj): Add default attributes in here like 'name', 'visibility', etc // TODO(nmj): Verify that names are valid. This is technically handled by the Params @@ -166,7 +179,7 @@ impl<'v> RuleCallable<'v> { _ => return Err(RuleError::RuleNonInBzl.into()), }; let sorted_validated_attrs = attrs - .to_dict() + .entries .into_iter() .sorted_by(|(k1, _), (k2, _)| Ord::cmp(k1, k2)) .map(|(name, value)| { @@ -179,10 +192,6 @@ impl<'v> RuleCallable<'v> { .collect::>>()?; let cfg = cfg.try_map(transition_id_from_value)?; - let uses_plugins = uses_plugins - .into_iter() - .map(plugin_kind_from_value) - .collect::>()?; let rule_kind = match (is_configuration_rule, is_toolchain_rule) { (false, false) => RuleKind::Normal, @@ -198,7 +207,7 @@ impl<'v> RuleCallable<'v> { Ok(RuleCallable { import_path: bzl_path, id: RefCell::new(None), - implementation: implementation.0, + implementation, attributes, ty, cfg, @@ -227,16 +236,19 @@ impl<'v> RuleCallable<'v> { parameters_spec.documentation(parameter_types, parameter_docs), Ty::none(), self.docs.as_deref(), - None, ); - DocItem::Function(function_docs) + DocItem::Member(DocMember::Function(function_docs)) } } #[starlark_value(type = "rule")] impl<'v> StarlarkValue<'v> for RuleCallable<'v> { - fn export_as(&self, variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { *self.id.borrow_mut() = Some(StarlarkRuleType { import_path: self.import_path.clone(), name: variable_name.to_owned(), @@ -248,13 +260,15 @@ impl<'v> StarlarkValue<'v> for RuleCallable<'v> { &self, _me: Value<'v>, _args: &Arguments<'v, '_>, - _eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - Err(RuleError::RuleCalledBeforeFreezing.into()) + _eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + Err(starlark::Error::new_other( + RuleError::RuleCalledBeforeFreezing, + )) } - fn documentation(&self) -> Option { - Some(self.documentation_impl()) + fn documentation(&self) -> DocItem { + self.documentation_impl() } fn typechecker_ty(&self) -> Option { @@ -262,7 +276,7 @@ impl<'v> StarlarkValue<'v> for RuleCallable<'v> { } fn get_type_starlark_repr() -> Ty { - Ty::function(vec![Param::kwargs(Ty::any())], Ty::none()) + Ty::function(ParamSpec::kwargs(Ty::any()), Ty::none()) } } @@ -310,12 +324,12 @@ impl<'v> Freeze for RuleCallable<'v> { } #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] -#[display(fmt = "{}()", "rule.rule_type.name()")] +#[display("{}()", rule.rule_type.name())] pub struct FrozenRuleCallable { rule: Arc, /// Identical to `rule.rule_type` but more specific type. rule_type: Arc, - implementation: FrozenValue, + implementation: FrozenStarlarkCallable<(FrozenValue,), ListType>, signature: ParametersSpec, rule_docs: DocItem, ty: Ty, @@ -324,20 +338,29 @@ pub struct FrozenRuleCallable { } starlark_simple_value!(FrozenRuleCallable); +fn unpack_frozen_rule(rule: FrozenValue) -> anyhow::Result> { + rule.downcast_frozen_ref::() + .context("Expecting FrozenRuleCallable") +} + pub(crate) fn init_frozen_rule_get_impl() { FROZEN_RULE_GET_IMPL.init(|rule| { - let rule = rule - .downcast_frozen_ref::() - .context("Expecting FrozenRuleCallable")?; + let rule = unpack_frozen_rule(rule)?; Ok(rule.implementation) }) } -impl FrozenRuleCallable { - pub fn implementation(&self) -> FrozenValue { - self.implementation - } +pub(crate) fn init_frozen_promise_artifact_mappings_get_impl() { + FROZEN_PROMISE_ARTIFACT_MAPPINGS_GET_IMPL.init(|rule| { + let rule = unpack_frozen_rule(rule)?; + Ok(rule + .artifact_promise_mappings + .as_ref() + .map_or_else(SmallMap::new, |m| m.mappings.clone())) + }) +} +impl FrozenRuleCallable { pub fn rule_type(&self) -> &Arc { &self.rule_type } @@ -359,8 +382,8 @@ impl<'v> StarlarkValue<'v> for FrozenRuleCallable { &self, _me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { let record_target_call_stack = ModuleInternals::from_context(eval, self.rule.rule_type.name())? .record_target_call_stacks(); @@ -370,26 +393,28 @@ impl<'v> StarlarkValue<'v> for FrozenRuleCallable { None }; let arg_count = args.len()?; - self.signature.parser(args, eval, |param_parser, eval| { - // The body of the callable returned by `rule()`. - // Records the target in this package's `TargetMap`. - let internals = ModuleInternals::from_context(eval, self.rule.rule_type.name())?; - let target_node = TargetNode::from_params( - self.rule.dupe(), - internals.package(), - internals, - param_parser, - arg_count, - self.ignore_attrs_for_profiling, - call_stack, - )?; - internals.record(target_node)?; - Ok(Value::new_none()) - }) + self.signature + .parser(args, eval, |param_parser, eval| { + // The body of the callable returned by `rule()`. + // Records the target in this package's `TargetMap`. + let internals = ModuleInternals::from_context(eval, self.rule.rule_type.name())?; + let target_node = TargetNode::from_params( + self.rule.dupe(), + internals.package(), + internals, + param_parser, + arg_count, + self.ignore_attrs_for_profiling, + call_stack, + )?; + internals.record(target_node)?; + Ok(Value::new_none()) + }) + .map_err(Into::into) } - fn documentation(&self) -> Option { - Some(self.rule_docs.clone()) + fn documentation(&self) -> DocItem { + self.rule_docs.clone() } fn typechecker_ty(&self) -> Option { @@ -406,7 +431,7 @@ pub fn register_rule_function(builder: &mut GlobalsBuilder) { /// Define a rule. As a simple example: /// /// ```python - /// def _my_rule(ctx: "context") -> ["provider"]: + /// def _my_rule(ctx: AnalysisContext) -> list[Provider]: /// output = ctx.actions.write("hello.txt", ctx.attrs.contents, executable = ctx.attrs.exe) /// return [DefaultInfo(outputs = [output])] /// @@ -416,23 +441,32 @@ pub fn register_rule_function(builder: &mut GlobalsBuilder) { /// }) /// ``` fn rule<'v>( - #[starlark(require = named)] r#impl: StarlarkCallable<'v>, - #[starlark(require = named)] attrs: DictOf<'v, &'v str, &'v StarlarkAttribute>, - #[starlark(require = named)] cfg: Option, + #[starlark(require = named)] r#impl: StarlarkCallableChecked< + 'v, + (AnalysisContextReprLate,), + Either, StarlarkPromise>, + >, + #[starlark(require = named)] attrs: UnpackDictEntries<&'v str, &'v StarlarkAttribute>, + #[starlark(require = named)] cfg: Option>, #[starlark(require = named, default = "")] doc: &str, #[starlark(require = named, default = false)] is_configuration_rule: bool, #[starlark(require = named, default = false)] is_toolchain_rule: bool, - #[starlark(require = named, default = Vec::new())] uses_plugins: Vec>, - eval: &mut Evaluator<'v, '_>, + #[starlark(require = named, default = UnpackListOrTuple::default())] + uses_plugins: UnpackListOrTuple, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { RuleCallable::new( - r#impl, + StarlarkCallable::unchecked_new(r#impl.0), attrs, - cfg, + cfg.map(|v| v.get()), doc, is_configuration_rule, is_toolchain_rule, - uses_plugins, + uses_plugins + .items + .into_iter() + .map(|PluginKindArg { plugin_kind }| plugin_kind) + .collect(), None, eval, ) @@ -442,14 +476,18 @@ pub fn register_rule_function(builder: &mut GlobalsBuilder) { /// is a dict where the keys are the string name of the artifact, and the values are the callable functions that produce /// the artifact. This is only intended to be used with anon targets. fn anon_rule<'v>( - #[starlark(require = named)] r#impl: StarlarkCallable<'v>, - #[starlark(require = named)] attrs: DictOf<'v, &'v str, &'v StarlarkAttribute>, + #[starlark(require = named)] r#impl: StarlarkCallable< + 'v, + (FrozenValue,), + ListType, + >, + #[starlark(require = named)] attrs: UnpackDictEntries<&'v str, &'v StarlarkAttribute>, #[starlark(require = named, default = "")] doc: &str, #[starlark(require = named)] artifact_promise_mappings: SmallMap< StringValue<'v>, - StarlarkCallable<'v>, + StarlarkCallable<'v, (FrozenValue,), UnpackList>, >, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { RuleCallable::new( r#impl, diff --git a/app/buck2_interpreter_for_build/src/super_package.rs b/app/buck2_interpreter_for_build/src/super_package.rs new file mode 100644 index 0000000000000..6d45dbd29a4fc --- /dev/null +++ b/app/buck2_interpreter_for_build/src/super_package.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod defs; +pub(crate) mod eval_ctx; +pub(crate) mod package; +pub mod package_value; diff --git a/app/buck2_interpreter_for_build/src/super_package/defs.rs b/app/buck2_interpreter_for_build/src/super_package/defs.rs index 954ae023e0ddd..84dfbf9a73482 100644 --- a/app/buck2_interpreter_for_build/src/super_package/defs.rs +++ b/app/buck2_interpreter_for_build/src/super_package/defs.rs @@ -13,7 +13,7 @@ use crate::super_package::package::register_package_function; use crate::super_package::package_value::register_write_package_value; /// Globals for `PACKAGE` files and `bzl` files included from `PACKAGE` files. -pub fn register_package_natives(globals: &mut GlobalsBuilder) { +pub(crate) fn register_package_natives(globals: &mut GlobalsBuilder) { register_package_function(globals); register_write_package_value(globals); } diff --git a/app/buck2_interpreter_for_build/src/super_package/eval_ctx.rs b/app/buck2_interpreter_for_build/src/super_package/eval_ctx.rs index 76bc52251439a..2ed6df0e3daba 100644 --- a/app/buck2_interpreter_for_build/src/super_package/eval_ctx.rs +++ b/app/buck2_interpreter_for_build/src/super_package/eval_ctx.rs @@ -16,8 +16,8 @@ use buck2_node::super_package::SuperPackage; use buck2_node::visibility::VisibilitySpecification; use buck2_node::visibility::WithinViewSpecification; use dupe::Dupe; +use starlark::values::OwnedFrozenRef; use starlark::values::OwnedFrozenValue; -use starlark::values::OwnedFrozenValueTyped; use starlark_map::small_map::SmallMap; use crate::interpreter::package_file_extra::FrozenPackageFileExtra; @@ -43,7 +43,7 @@ pub struct PackageFileEvalCtx { impl PackageFileEvalCtx { fn cfg_constructor( - extra: Option<&OwnedFrozenValueTyped>, + extra: Option<&OwnedFrozenRef>, ) -> anyhow::Result>> { let Some(extra) = extra else { return Ok(None); @@ -61,7 +61,7 @@ impl PackageFileEvalCtx { pub(crate) fn build_super_package( self, - extra: Option>, + extra: Option>, ) -> anyhow::Result { let cfg_constructor = Self::cfg_constructor(extra.as_ref())?; @@ -83,19 +83,31 @@ impl PackageFileEvalCtx { let merged_package_values = SuperPackageValuesImpl::merge(self.parent.package_values(), package_values)?; - let PackageFileVisibilityFields { - visibility, - within_view, - inherit, - } = self.visibility.into_inner().unwrap_or_default(); - - let (visibility, within_view) = if inherit { - ( - self.parent.visibility().extend_with(&visibility), - self.parent.within_view().extend_with(&within_view), - ) - } else { - (visibility, within_view) + let (visibility, within_view) = match self.visibility.into_inner() { + Some(package_visibility) => { + if package_visibility.inherit { + ( + self.parent + .visibility() + .extend_with(&package_visibility.visibility), + self.parent + .within_view() + .extend_with(&package_visibility.within_view), + ) + } else { + ( + package_visibility.visibility, + package_visibility.within_view, + ) + } + } + None => { + // If the package file does not specify any visibility, default to the parent visibility. + ( + self.parent.visibility().to_owned(), + self.parent.within_view().to_owned(), + ) + } }; Ok(SuperPackage::new( diff --git a/app/buck2_interpreter_for_build/src/super_package/mod.rs b/app/buck2_interpreter_for_build/src/super_package/mod.rs deleted file mode 100644 index bee5a34bbc180..0000000000000 --- a/app/buck2_interpreter_for_build/src/super_package/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod defs; -pub(crate) mod eval_ctx; -pub(crate) mod package; -pub mod package_value; diff --git a/app/buck2_interpreter_for_build/src/super_package/package.rs b/app/buck2_interpreter_for_build/src/super_package/package.rs index 9bd7b78c7acef..a47c64963bb16 100644 --- a/app/buck2_interpreter_for_build/src/super_package/package.rs +++ b/app/buck2_interpreter_for_build/src/super_package/package.rs @@ -8,8 +8,9 @@ */ use buck2_core::cells::name::CellName; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; -use buck2_core::pattern::ParsedPattern; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_node::visibility::VisibilityPattern; use buck2_node::visibility::VisibilitySpecification; use buck2_node::visibility::VisibilityWithinViewBuilder; @@ -17,13 +18,14 @@ use buck2_node::visibility::WithinViewSpecification; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::starlark_module; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::none::NoneType; use crate::interpreter::build_context::BuildContext; use crate::interpreter::build_context::PerFileTypeContext; use crate::super_package::eval_ctx::PackageFileVisibilityFields; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum PackageFileError { #[error( "`package()` can only be called in `PACKAGE` files \ @@ -38,6 +40,7 @@ fn parse_visibility( patterns: &[String], cell_name: CellName, cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, ) -> anyhow::Result { let mut builder = VisibilityWithinViewBuilder::with_capacity(patterns.len()); for pattern in patterns { @@ -48,6 +51,7 @@ fn parse_visibility( pattern, cell_name, cell_resolver, + cell_alias_resolver, )?)); } } @@ -58,6 +62,7 @@ fn parse_within_view( patterns: &[String], cell_name: CellName, cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, ) -> anyhow::Result { let mut builder = VisibilityWithinViewBuilder::with_capacity(patterns.len()); for pattern in patterns { @@ -68,6 +73,7 @@ fn parse_within_view( pattern, cell_name, cell_resolver, + cell_alias_resolver, )?)); } } @@ -79,8 +85,10 @@ fn parse_within_view( pub(crate) fn register_package_function(globals: &mut GlobalsBuilder) { fn package( #[starlark(require=named, default=false)] inherit: bool, - #[starlark(require=named, default=Vec::new())] visibility: Vec, - #[starlark(require=named, default=Vec::new())] within_view: Vec, + #[starlark(require=named, default=UnpackListOrTuple::default())] + visibility: UnpackListOrTuple, + #[starlark(require=named, default=UnpackListOrTuple::default())] + within_view: UnpackListOrTuple, eval: &mut Evaluator, ) -> anyhow::Result { let build_context = BuildContext::from_context(eval)?; @@ -89,14 +97,16 @@ pub(crate) fn register_package_function(globals: &mut GlobalsBuilder) { _ => return Err(PackageFileError::NotPackage.into()), }; let visibility = parse_visibility( - &visibility, + &visibility.items, build_context.cell_info().name().name(), build_context.cell_info().cell_resolver(), + build_context.cell_info().cell_alias_resolver(), )?; let within_view = parse_within_view( - &within_view, + &within_view.items, build_context.cell_info().name().name(), build_context.cell_info().cell_resolver(), + build_context.cell_info().cell_alias_resolver(), )?; match &mut *package_file_eval_ctx.visibility.borrow_mut() { diff --git a/app/buck2_interpreter_for_build/src/super_package/package_value.rs b/app/buck2_interpreter_for_build/src/super_package/package_value.rs index cc01c31f5ff5c..87e79c2c985e1 100644 --- a/app/buck2_interpreter_for_build/src/super_package/package_value.rs +++ b/app/buck2_interpreter_for_build/src/super_package/package_value.rs @@ -13,6 +13,7 @@ use std::sync::Arc; use allocative::Allocative; use anyhow::Context; +use buck2_error::BuckErrorContext; use buck2_interpreter::file_type::StarlarkFileType; use buck2_node::metadata::key::MetadataKey; use buck2_node::metadata::key::MetadataKeyRef; @@ -34,7 +35,7 @@ use starlark_map::small_map::SmallMap; use crate::interpreter::build_context::BuildContext; use crate::interpreter::package_file_extra::PackageFileExtra; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum PackageValueError { #[error("key already set in this file: `{0}`")] KeyAlreadySetInThisFile(MetadataKey), @@ -44,7 +45,7 @@ enum PackageValueError { #[derive(Debug, Default, Allocative)] pub struct SuperPackageValuesImpl { - pub(crate) values: SmallMap, + values: SmallMap, } impl SuperPackageValuesImpl { @@ -52,7 +53,7 @@ impl SuperPackageValuesImpl { values .as_any() .downcast_ref::() - .context("Expecting SuperPackageValuesImpl (internal error)") + .internal_error_anyhow("Expecting SuperPackageValuesImpl") } pub(crate) fn merge( @@ -92,6 +93,16 @@ impl SuperPackageValues for SuperPackageValuesImpl { fn contains_key(&self, key: &MetadataKeyRef) -> bool { self.values.contains_key(key) } + + fn get_package_value_json( + &self, + key: &MetadataKeyRef, + ) -> anyhow::Result> { + match self.values.get(key) { + Some(value) => Ok(Some(value.to_json_value()?)), + None => Ok(None), + } + } } /// Value that is known to be serializable to JSON. @@ -141,7 +152,7 @@ impl OwnedFrozenStarlarkPackageValue { self.0 .value() .to_json_value() - .context("Not valid JSON, should have been validated at construction (internal error)") + .internal_error_anyhow("Not valid JSON, should have been validated at construction") } pub(crate) fn owned_frozen_value(&self) -> &OwnedFrozenValue { @@ -150,7 +161,7 @@ impl OwnedFrozenStarlarkPackageValue { } #[starlark_module] -pub fn register_write_package_value(globals: &mut GlobalsBuilder) { +pub(crate) fn register_write_package_value(globals: &mut GlobalsBuilder) { /// Set the value to be accessible in the nested `PACKAGE` files. /// /// If any parent `PACKAGE` value has already set the same `key`, @@ -160,7 +171,7 @@ pub fn register_write_package_value(globals: &mut GlobalsBuilder) { #[starlark(require = pos)] key: &str, #[starlark(require = pos)] value: Value<'v>, #[starlark(require = named, default = false)] overwrite: bool, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let key = MetadataKeyRef::new(key)?; @@ -192,13 +203,13 @@ pub fn register_write_package_value(globals: &mut GlobalsBuilder) { } #[starlark_module] -pub fn register_read_package_value(globals: &mut GlobalsBuilder) { +pub(crate) fn register_read_package_value(globals: &mut GlobalsBuilder) { /// Read value specified in the `PACKAGE` file. /// /// Returns `None` if value is not set. fn read_package_value<'v>( #[starlark(require = pos)] key: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let key = MetadataKeyRef::new(key)?; @@ -234,7 +245,7 @@ pub fn register_read_package_value(globals: &mut GlobalsBuilder) { /// Returns `None` if value is not set. fn read_parent_package_value<'v>( #[starlark(require = pos)] key: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let key = MetadataKeyRef::new(key)?; diff --git a/app/buck2_interpreter_for_build_tests/BUCK b/app/buck2_interpreter_for_build_tests/BUCK index 47758bde39f0a..b876305822717 100644 --- a/app/buck2_interpreter_for_build_tests/BUCK +++ b/app/buck2_interpreter_for_build_tests/BUCK @@ -1,25 +1,27 @@ -load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:rust_unittest.bzl", "rust_unittest") oncall("build_infra") -rust_library( +rust_unittest( name = "buck2_interpreter_for_build_tests", srcs = glob(["src/**/*.rs"]), crate_root = "src/lib.rs", - test_deps = [ + deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:ctor", + "fbsource//third-party/rust:futures", "fbsource//third-party/rust:indoc", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:tokio", "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", "//buck2/app/buck2_node:buck2_node", + "//buck2/app/buck2_transition:buck2_transition", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", diff --git a/app/buck2_interpreter_for_build_tests/Cargo.toml b/app/buck2_interpreter_for_build_tests/Cargo.toml index a68a58643f93c..a602e694aac9c 100644 --- a/app/buck2_interpreter_for_build_tests/Cargo.toml +++ b/app/buck2_interpreter_for_build_tests/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Tests for buck2_interpreter_for_build" +edition = "2021" +license = { workspace = true } name = "buck2_interpreter_for_build_tests" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Tests for buck2_interpreter_for_build" [dev-dependencies] anyhow = { workspace = true } @@ -13,14 +15,17 @@ tokio = { workspace = true } dice = { workspace = true } dupe = { workspace = true } +futures = { workspace = true } gazebo = { workspace = true } starlark = { workspace = true } buck2_build_api = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_interpreter = { workspace = true } buck2_interpreter_for_build = { workspace = true } buck2_node = { workspace = true } buck2_query = { workspace = true } +buck2_transition = { workspace = true } diff --git a/app/buck2_interpreter_for_build_tests/src/attr.rs b/app/buck2_interpreter_for_build_tests/src/attr.rs index f8ad2a53532af..672fb1f0eb33a 100644 --- a/app/buck2_interpreter_for_build_tests/src/attr.rs +++ b/app/buck2_interpreter_for_build_tests/src/attr.rs @@ -7,15 +7,16 @@ * of this source tree. */ +use std::sync::Arc; + use buck2_common::package_listing::listing::testing::PackageListingExt; use buck2_common::package_listing::listing::PackageListing; -use buck2_common::result::SharedResult; use buck2_core::cells::name::CellName; use buck2_core::cells::paths::CellRelativePath; use buck2_core::package::package_relative_path::PackageRelativePathBuf; use buck2_core::package::PackageLabel; use buck2_core::plugins::PluginKindSet; -use buck2_interpreter_for_build::attrs::attrs_global::register_attrs; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; use buck2_interpreter_for_build::attrs::coerce::attr_type::AttrTypeExt; use buck2_interpreter_for_build::attrs::coerce::ctx::BuildAttrCoercionContext; use buck2_interpreter_for_build::interpreter::testing::cells; @@ -29,15 +30,9 @@ use dupe::Dupe; use indoc::indoc; use starlark::values::Heap; -fn tester() -> Tester { - let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_attrs); - tester -} - #[test] -fn string_works() -> SharedResult<()> { - let mut tester = tester(); +fn string_works() -> buck2_error::Result<()> { + let mut tester = Tester::new().unwrap(); tester.run_starlark_bzl_test(indoc!( r#" frozen = attrs.string(default="something", doc = "foo") @@ -49,8 +44,8 @@ fn string_works() -> SharedResult<()> { } #[test] -fn boolean_works() -> SharedResult<()> { - let mut tester = tester(); +fn boolean_works() -> buck2_error::Result<()> { + let mut tester = Tester::new().unwrap(); tester.run_starlark_bzl_test(indoc!( r#" frozen = attrs.bool(default=False) @@ -62,8 +57,8 @@ fn boolean_works() -> SharedResult<()> { } #[test] -fn test_attr_module_registered() -> SharedResult<()> { - let mut tester = tester(); +fn test_attr_module_registered() -> buck2_error::Result<()> { + let mut tester = Tester::new().unwrap(); tester.run_starlark_bzl_test(indoc!( r#" def test(): @@ -73,8 +68,8 @@ fn test_attr_module_registered() -> SharedResult<()> { } #[test] -fn list_works() -> SharedResult<()> { - let mut tester = tester(); +fn list_works() -> buck2_error::Result<()> { + let mut tester = Tester::new().unwrap(); tester.run_starlark_bzl_test(indoc!( r#" frozen = attrs.list( @@ -90,14 +85,14 @@ fn list_works() -> SharedResult<()> { ) assert_eq('attrs.list(attrs.string(), default=[])', repr(not_frozen)) - assert_eq('attrs.list(attrs.string(), default=["1","2"])', repr(frozen)) + assert_eq('attrs.list(attrs.string(), default=["1", "2"])', repr(frozen)) "# )) } #[test] -fn enum_works() -> SharedResult<()> { - let mut tester = tester(); +fn enum_works() -> buck2_error::Result<()> { + let mut tester = Tester::new().unwrap(); tester.run_starlark_bzl_test(indoc!( r#" frozen = attrs.enum(["red", "green", "blue"]) @@ -114,13 +109,19 @@ fn attr_coercer_coerces() -> anyhow::Result<()> { let heap = Heap::new(); let some_cells = cells(None)?; let cell_resolver = some_cells.1; + let cell_alias_resolver = some_cells.0; let package = PackageLabel::new( CellName::testing_new("root"), CellRelativePath::unchecked_new("foo"), ); let enclosing_package = (package.dupe(), PackageListing::testing_empty()); - let coercer_ctx = - BuildAttrCoercionContext::new_with_package(cell_resolver, enclosing_package, false); + let coercer_ctx = BuildAttrCoercionContext::new_with_package( + cell_resolver, + cell_alias_resolver, + enclosing_package, + false, + Arc::new(ConcurrentTargetLabelInterner::default()), + ); let label_coercer = AttrType::dep(ProviderIdSet::EMPTY, PluginKindSet::EMPTY); let string_coercer = AttrType::string(); let enum_coercer = AttrType::enumeration(vec![ @@ -212,8 +213,8 @@ fn attr_coercer_coerces() -> anyhow::Result<()> { } #[test] -fn dep_works() -> SharedResult<()> { - let mut t = tester(); +fn dep_works() -> buck2_error::Result<()> { + let mut t = Tester::new().unwrap(); t.run_starlark_bzl_test(indoc!( r#" frozen1 = attrs.dep(default="root//foo:bar") @@ -225,7 +226,7 @@ fn dep_works() -> SharedResult<()> { "# ))?; - let mut t = tester(); + let mut t = Tester::new().unwrap(); t.run_starlark_bzl_test_expecting_error( indoc!( r#" @@ -237,7 +238,7 @@ fn dep_works() -> SharedResult<()> { ); // Relative targets are disallowed; there is no build file for them to be relative to - let mut t = tester(); + let mut t = Tester::new().unwrap(); t.run_starlark_bzl_test_expecting_error( indoc!( r#" @@ -251,8 +252,8 @@ fn dep_works() -> SharedResult<()> { } #[test] -fn source_works() -> SharedResult<()> { - let mut t = tester(); +fn source_works() -> buck2_error::Result<()> { + let mut t = Tester::new().unwrap(); t.run_starlark_bzl_test(indoc!( r#" frozen1 = attrs.source(default="root//foo:bar") @@ -265,7 +266,7 @@ fn source_works() -> SharedResult<()> { ))?; // Relative targets are disallowed; there is no build file for them to be relative to - let mut t = tester(); + let mut t = Tester::new().unwrap(); t.run_starlark_bzl_test_expecting_error( indoc!( r#" @@ -281,20 +282,27 @@ fn source_works() -> SharedResult<()> { #[test] fn coercing_src_to_path_works() -> anyhow::Result<()> { let cell_resolver = cells(None).unwrap().1; + let cell_alias_resolver = cells(None).unwrap().0; let package = PackageLabel::new( CellName::testing_new("root"), CellRelativePath::unchecked_new("foo/bar"), ); let package_ctx = BuildAttrCoercionContext::new_with_package( cell_resolver.dupe(), + cell_alias_resolver.dupe(), ( package.dupe(), PackageListing::testing_files(&["baz/quz.cpp"]), ), false, + Arc::new(ConcurrentTargetLabelInterner::default()), + ); + let no_package_ctx = BuildAttrCoercionContext::new_no_package( + cell_resolver, + CellName::testing_new("root"), + cell_alias_resolver, + Arc::new(ConcurrentTargetLabelInterner::default()), ); - let no_package_ctx = - BuildAttrCoercionContext::new_no_package(cell_resolver, CellName::testing_new("root")); let err = no_package_ctx .coerce_path("baz/quz.cpp", false) diff --git a/app/buck2_interpreter_for_build_tests/src/attrs.rs b/app/buck2_interpreter_for_build_tests/src/attrs.rs new file mode 100644 index 0000000000000..b6842c9d978bd --- /dev/null +++ b/app/buck2_interpreter_for_build_tests/src/attrs.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod attrs_global; diff --git a/app/buck2_interpreter_for_build_tests/src/attrs/attrs_global.rs b/app/buck2_interpreter_for_build_tests/src/attrs/attrs_global.rs new file mode 100644 index 0000000000000..f6805ffec8752 --- /dev/null +++ b/app/buck2_interpreter_for_build_tests/src/attrs/attrs_global.rs @@ -0,0 +1,31 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_interpreter_for_build::interpreter::testing::Tester; + +#[test] +fn test_attr_display() -> anyhow::Result<()> { + let mut tester = Tester::new().unwrap(); + tester.run_starlark_bzl_test(r#" +def assert_eq(a, b): + if a != b: + fail(a + " != " + b) + +assert_eq(repr(attrs.bool(default = True)), "attrs.bool(default=True)") +assert_eq(repr(attrs.string()), "attrs.string()") +assert_eq(repr(attrs.list(attrs.string())), "attrs.list(attrs.string())") +assert_eq(repr(attrs.dict(attrs.string(), attrs.string())), "attrs.dict(attrs.string(), attrs.string(), sorted=False)") +assert_eq(repr(attrs.one_of(attrs.string())), "attrs.one_of(attrs.string())") +assert_eq(repr(attrs.tuple(attrs.string())), "attrs.tuple(attrs.string())") +assert_eq(repr(attrs.option(attrs.string())), "attrs.option(attrs.string())") + +def test(): pass +"#)?; + Ok(()) +} diff --git a/app/buck2_interpreter_for_build_tests/src/functions/mod.rs b/app/buck2_interpreter_for_build_tests/src/functions.rs similarity index 100% rename from app/buck2_interpreter_for_build_tests/src/functions/mod.rs rename to app/buck2_interpreter_for_build_tests/src/functions.rs diff --git a/app/buck2_interpreter_for_build_tests/src/functions/host_info.rs b/app/buck2_interpreter_for_build_tests/src/functions/host_info.rs index e26bce39b92a0..52ae3630be607 100644 --- a/app/buck2_interpreter_for_build_tests/src/functions/host_info.rs +++ b/app/buck2_interpreter_for_build_tests/src/functions/host_info.rs @@ -7,14 +7,12 @@ * of this source tree. */ -use buck2_interpreter_for_build::interpreter::functions::host_info::register_host_info; use buck2_interpreter_for_build::interpreter::testing::Tester; use indoc::indoc; #[test] fn test_host_info() -> anyhow::Result<()> { let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_host_info); tester.run_starlark_test(indoc!( r#" def test(): @@ -34,7 +32,6 @@ fn test_host_info() -> anyhow::Result<()> { #[test] fn test_buck_v2() -> anyhow::Result<()> { let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_host_info); tester.run_starlark_test(indoc!( r#" def test(): diff --git a/app/buck2_interpreter_for_build_tests/src/functions/load_symbols.rs b/app/buck2_interpreter_for_build_tests/src/functions/load_symbols.rs index e5a0c1d2b8612..4d434c2bc936b 100644 --- a/app/buck2_interpreter_for_build_tests/src/functions/load_symbols.rs +++ b/app/buck2_interpreter_for_build_tests/src/functions/load_symbols.rs @@ -8,13 +8,11 @@ */ use buck2_core::bzl::ImportPath; -use buck2_interpreter_for_build::interpreter::functions::load_symbols::register_load_symbols; use buck2_interpreter_for_build::interpreter::testing::Tester; #[test] fn test_load_symbols() -> anyhow::Result<()> { let mut t = Tester::new()?; - t.additional_globals(register_load_symbols); let defines = ImportPath::testing_new("root//pkg:test.bzl"); t.add_import( &defines, diff --git a/app/buck2_interpreter_for_build_tests/src/functions/read_config.rs b/app/buck2_interpreter_for_build_tests/src/functions/read_config.rs index 13754cb4881e9..a25b7fe4c3124 100644 --- a/app/buck2_interpreter_for_build_tests/src/functions/read_config.rs +++ b/app/buck2_interpreter_for_build_tests/src/functions/read_config.rs @@ -7,14 +7,12 @@ * of this source tree. */ -use buck2_interpreter_for_build::interpreter::functions::read_config::register_read_config; use buck2_interpreter_for_build::interpreter::testing::Tester; use indoc::indoc; #[test] fn test_read_config() -> anyhow::Result<()> { let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_read_config); tester.run_starlark_test(indoc!( r#" def test(): diff --git a/app/buck2_interpreter_for_build_tests/src/functions/with_sub_target.rs b/app/buck2_interpreter_for_build_tests/src/functions/with_sub_target.rs index 9ab2dad7ab1fd..b18ad18b2423d 100644 --- a/app/buck2_interpreter_for_build_tests/src/functions/with_sub_target.rs +++ b/app/buck2_interpreter_for_build_tests/src/functions/with_sub_target.rs @@ -8,7 +8,7 @@ */ use buck2_core::configuration::data::ConfigurationData; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_interpreter::types::target_label::StarlarkConfiguredTargetLabel; use buck2_interpreter::types::target_label::StarlarkTargetLabel; use buck2_interpreter_for_build::interpreter::testing::Tester; @@ -35,12 +35,12 @@ fn test_with_sub_target() -> anyhow::Result<()> { r#" def test(): unconf_providers = unconf.with_sub_target(["ab", "cd"]) - assert_eq(isinstance(unconf_providers, "providers_label"), True) + assert_eq(isinstance(unconf_providers, ProvidersLabel), True) assert_eq(unconf_providers.raw_target(), unconf) assert_eq(str(unconf_providers), "cell//pkg:target[ab][cd]") conf_providers = conf.with_sub_target(["ab", "cd"]) - assert_eq(isinstance(conf_providers, "label"), True) + assert_eq(isinstance(conf_providers, ConfiguredProvidersLabel), True) assert_eq(conf_providers.configured_target(), conf) assert_eq(str(conf_providers), "cell//pkg:target[ab][cd] ()") "# diff --git a/app/buck2_interpreter_for_build_tests/src/interpreter.rs b/app/buck2_interpreter_for_build_tests/src/interpreter.rs index 7ffe155544c4c..afb4dbcaccacc 100644 --- a/app/buck2_interpreter_for_build_tests/src/interpreter.rs +++ b/app/buck2_interpreter_for_build_tests/src/interpreter.rs @@ -10,24 +10,21 @@ use buck2_build_api::interpreter::rule_defs::provider::registration::register_builtin_providers; use buck2_build_api::interpreter::rule_defs::register_rule_defs; use buck2_common::legacy_configs::cells::BuckConfigBasedCells; -use buck2_common::legacy_configs::testing::TestConfigParserFileOps; +use buck2_common::legacy_configs::configs::testing::TestConfigParserFileOps; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_common::package_listing::listing::testing::PackageListingExt; use buck2_common::package_listing::listing::PackageListing; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; -use buck2_core::cells::name::CellName; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_interpreter::file_loader::LoadedModules; use buck2_interpreter::paths::module::OwnedStarlarkModulePath; use buck2_interpreter::paths::path::StarlarkPath; -use buck2_interpreter_for_build::attrs::attrs_global::register_attrs; -use buck2_interpreter_for_build::interpreter::natives::register_module_natives; use buck2_interpreter_for_build::interpreter::testing::run_simple_starlark_test; use buck2_interpreter_for_build::interpreter::testing::CellsData; use buck2_interpreter_for_build::interpreter::testing::Tester; -use buck2_interpreter_for_build::rule::register_rule_function; use buck2_node::attrs::inspect_options::AttrInspectOptions; use buck2_node::nodes::unconfigured::testing::targets_to_json; use dupe::Dupe; @@ -103,8 +100,6 @@ fn test_load() { #[test] fn test_eval_build_file() { let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_rule_function); - tester.additional_globals(register_attrs); tester.additional_globals(register_builtin_providers); tester @@ -186,37 +181,30 @@ fn cells() -> CellsData { let repo_root = if cfg!(windows) { "C:/" } else { "/" }; let project_fs = ProjectRoot::new_unchecked(AbsNormPathBuf::try_from(repo_root.to_owned()).unwrap()); - let BuckConfigBasedCells { - cell_resolver, - configs_by_name, - config_paths: _, - } = BuckConfigBasedCells::parse_with_file_ops( - &project_fs, - &mut TestConfigParserFileOps::new(&[( - "/.buckconfig", - indoc!( - r#" - [repositories] + let BuckConfigBasedCells { cell_resolver, .. } = + futures::executor::block_on(BuckConfigBasedCells::testing_parse_with_file_ops( + &project_fs, + &mut TestConfigParserFileOps::new(&[( + ".buckconfig", + indoc!( + r#" + [cells] root = . cell1 = project/cell1 cell2 = project/cell2 xalias2 = project/cell2 "# - ), - )]) - .unwrap(), - &[], - ProjectRelativePath::empty(), - ) - .unwrap(); + ), + )]) + .unwrap(), + &[], + ProjectRelativePath::empty(), + )) + .unwrap(); ( - cell_resolver - .get(CellName::testing_new("root")) - .unwrap() - .cell_alias_resolver() - .dupe(), + cell_resolver.root_cell_cell_alias_resolver().dupe(), cell_resolver, - configs_by_name, + LegacyBuckConfig::empty(), ) } @@ -277,8 +265,6 @@ fn test_root_import() { .unwrap(); tester.additional_globals(register_builtin_providers); - tester.additional_globals(register_rule_function); - tester.additional_globals(register_attrs); let import_path = ImportPath::testing_new("root//:include.bzl"); tester @@ -385,8 +371,6 @@ fn test_package_import() -> anyhow::Result<()> { "# )), )?)?; - tester.additional_globals(register_rule_function); - tester.additional_globals(register_module_natives); let import_path = ImportPath::testing_new("root//:include.bzl"); tester.add_import( @@ -425,6 +409,7 @@ fn test_package_import() -> anyhow::Result<()> { "exec_compatible_with": [], "name": "DEFAULT", "target_compatible_with": [], + "modifiers": [], "tests": [], "visibility": [], "within_view": ["PUBLIC"], @@ -443,8 +428,6 @@ fn test_package_import() -> anyhow::Result<()> { #[test] fn eval() -> anyhow::Result<()> { let mut tester = Tester::new()?; - tester.additional_globals(register_module_natives); - tester.additional_globals(register_rule_function); let content = indoc!( r#" def _impl(ctx): @@ -452,11 +435,11 @@ fn eval() -> anyhow::Result<()> { export_file = rule(impl=_impl, attrs = {}) def test(): - assert_eq("some/package", __internal__.package_name()) - assert_eq("@root", __internal__.repository_name()) + assert_eq("some/package", __buck2_builtins__.package_name()) + assert_eq("@root", __buck2_builtins__.repository_name()) - assert_eq(package_name(), __internal__.package_name()) - assert_eq(repository_name(), __internal__.repository_name()) + assert_eq(package_name(), __buck2_builtins__.package_name()) + assert_eq(repository_name(), __buck2_builtins__.repository_name()) assert_eq(package_name(), get_base_path()) @@ -473,24 +456,33 @@ fn eval() -> anyhow::Result<()> { } #[test] -fn test_internal() -> anyhow::Result<()> { - // Test that most things end up on __internal__ - let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_rule_defs); +fn test_builtins() -> anyhow::Result<()> { + // Test that most things end up on __buck2_builtins__ run_simple_starlark_test(indoc!( r#" def test(): - assert_eq(__internal__.json.encode({}), "{}") + assert_eq(__buck2_builtins__.json.encode({}), "{}") "# - )) + ))?; + + // But not internals + let mut tester = Tester::new().unwrap(); + tester.run_starlark_test_expecting_error( + indoc!( + r#" + def test(): + __buck2_builtins__.buck2_fail("message") + "# + ), + "The attribute `buck2_fail` is not available", + ); + Ok(()) } #[test] fn test_oncall() -> anyhow::Result<()> { let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_module_natives); tester.additional_globals(register_rule_defs); - tester.additional_globals(register_rule_function); tester.run_starlark_test(indoc!( r#" def _impl(ctx): @@ -502,6 +494,32 @@ fn test_oncall() -> anyhow::Result<()> { export_file(name = "rule_name") "# ))?; + tester.run_starlark_test(indoc!( + r#" + def _impl(ctx): + pass + export_file = rule(impl=_impl, attrs = {}) + + def test(): + oncall("valid") + if read_oncall() != "valid": + fail("oncall should be set to valid") + export_file(name = "rule_name") + if read_oncall() != "valid": + fail("oncall should be set to valid and targets set") + "# + ))?; + tester.run_starlark_test(indoc!( + r#" + def _impl(ctx): + pass + export_file = rule(impl=_impl, attrs = {}) + + def test(): + if read_oncall() != None: + fail("oncall should be None if never set") + "# + ))?; tester.run_starlark_test_expecting_error( indoc!( r#" @@ -526,5 +544,15 @@ fn test_oncall() -> anyhow::Result<()> { ), "after one or more targets", ); + tester.run_starlark_test_expecting_error( + indoc!( + r#" + def test(): + read_oncall() + oncall("valid") + "# + ), + "after calling `read_oncall`", + ); Ok(()) } diff --git a/app/buck2_interpreter_for_build_tests/src/label.rs b/app/buck2_interpreter_for_build_tests/src/label.rs new file mode 100644 index 0000000000000..1740ba1355705 --- /dev/null +++ b/app/buck2_interpreter_for_build_tests/src/label.rs @@ -0,0 +1,76 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_interpreter_for_build::interpreter::testing::expect_error; +use buck2_interpreter_for_build::interpreter::testing::Tester; +use buck2_interpreter_for_build::label::testing::label_creator; +use indoc::indoc; + +#[test] +fn labels_are_usable() -> anyhow::Result<()> { + fn new_tester() -> anyhow::Result { + let mut tester = Tester::new()?; + tester.additional_globals(label_creator); + Ok(tester) + } + + let mut tester = new_tester()?; + tester.run_starlark_bzl_test(indoc!( + r#" + frozen_l_default = label("root//foo/bar:baz") + frozen_l = label("root//foo/bar:baz[something]") + def test(): + l_default = label("root//foo/bar:baz") + l = label("root//foo/bar:baz[something]") + + assert_eq_ignore_hash("root//foo/bar:baz (#)", repr(frozen_l_default)) + assert_eq_ignore_hash("root//foo/bar:baz (#)", str(frozen_l_default)) + assert_eq("foo/bar", frozen_l_default.package) + assert_eq("baz", frozen_l_default.name) + assert_eq(None, frozen_l_default.sub_target) + assert_eq("root", frozen_l_default.cell) + + assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", repr(frozen_l)) + assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", str(frozen_l)) + assert_eq("foo/bar", frozen_l.package) + assert_eq("baz", frozen_l.name) + assert_eq(["something"], frozen_l.sub_target) + + assert_eq_ignore_hash("root//foo/bar:baz (#)", repr(l_default)) + assert_eq_ignore_hash("root//foo/bar:baz (#)", str(l_default)) + assert_eq("foo/bar", l_default.package) + assert_eq("baz", l_default.name) + assert_eq(None, l_default.sub_target) + + assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", repr(l)) + assert_eq_ignore_hash("root//foo/bar:baz[something] (#)", str(l)) + assert_eq("foo/bar", l.package) + assert_eq("baz", l.name) + assert_eq(["something"], l.sub_target) + assert_eq("root", l.cell) + + "# + ))?; + + let mut tester = new_tester()?; + let invalid_fields = indoc!( + r#" + l = label("root//foo:bar[baz]") + def hide_type(v): return v + def test(): + hide_type(l).invalid_field + "# + ); + expect_error( + tester.run_starlark_test(invalid_fields), + invalid_fields, + "Object of type `label` has no attribute `invalid_field`", + ); + Ok(()) +} diff --git a/app/buck2_interpreter_for_build_tests/src/lib.rs b/app/buck2_interpreter_for_build_tests/src/lib.rs index 4ec5d2e90f99e..bfc8994b14dcc 100644 --- a/app/buck2_interpreter_for_build_tests/src/lib.rs +++ b/app/buck2_interpreter_for_build_tests/src/lib.rs @@ -7,18 +7,20 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![cfg(test)] #[cfg(test)] mod attr; +mod attrs; mod functions; pub mod interpreter; +mod label; mod rule; pub mod select; mod super_package; mod tests; mod uncategorized; -mod uncategorized_2; #[test] fn init_late_bindings_for_test() { @@ -26,5 +28,6 @@ fn init_late_bindings_for_test() { fn init() { buck2_interpreter_for_build::init_late_bindings(); buck2_build_api::init_late_bindings(); + buck2_transition::init_late_bindings(); } } diff --git a/app/buck2_interpreter_for_build_tests/src/rule.rs b/app/buck2_interpreter_for_build_tests/src/rule.rs index 9d2ea16ef6526..1a79963ebd5dc 100644 --- a/app/buck2_interpreter_for_build_tests/src/rule.rs +++ b/app/buck2_interpreter_for_build_tests/src/rule.rs @@ -8,13 +8,10 @@ */ use buck2_build_api::interpreter::rule_defs::transitive_set::transitive_set_definition::register_transitive_set; -use buck2_common::result::SharedResult; use buck2_core::bzl::ImportPath; use buck2_interpreter::file_loader::LoadedModules; -use buck2_interpreter_for_build::attrs::attrs_global::register_attrs; use buck2_interpreter_for_build::interpreter::testing::Tester; use buck2_interpreter_for_build::nodes::attr_spec::AttributeSpecExt; -use buck2_interpreter_for_build::rule::register_rule_function; use buck2_node::attrs::inspect_options::AttrInspectOptions; use buck2_node::attrs::spec::AttributeSpec; use buck2_node::nodes::unconfigured::testing::targets_to_json; @@ -22,6 +19,7 @@ use indoc::indoc; use serde_json::json; use starlark::docs::DocFunction; use starlark::docs::DocItem; +use starlark::docs::DocMember; use starlark::docs::DocParam; use starlark::docs::DocReturn; use starlark::docs::DocString; @@ -31,8 +29,6 @@ use starlark::typing::Ty; fn rule_tester() -> Tester { let mut tester = Tester::new().unwrap(); tester.additional_globals(register_transitive_set); - tester.additional_globals(register_rule_function); - tester.additional_globals(register_attrs); tester } @@ -132,7 +128,7 @@ fn rule_unbound() { } #[test] -fn udr_is_recorded() -> SharedResult<()> { +fn udr_is_recorded() -> buck2_error::Result<()> { let content = indoc!( r#" def impl(ctx): @@ -184,6 +180,7 @@ fn udr_is_recorded() -> SharedResult<()> { "exec_compatible_with": [], "src": "root//some/package/file1.java", "target_compatible_with": [], + "modifiers": [], "tests": [], "visibility": [], "within_view": ["PUBLIC"], @@ -201,6 +198,7 @@ fn udr_is_recorded() -> SharedResult<()> { "exec_compatible_with": [], "src": "root//foo:baz", "target_compatible_with": [], + "modifiers": [], "tests": [], "visibility": [], "within_view": ["PUBLIC"], @@ -253,10 +251,13 @@ fn udr_rejects_invalid_parameters() { run( missing_name, - "Missing parameter `name` for call to foo_binary", + "Missing named-only parameter `name` for call to `foo_binary`", ); run(invalid_name, "Invalid target name `bad name`."); - run(missing_mandatory, "Missing parameter `mandatory`"); + run( + missing_mandatory, + "Missing named-only parameter `mandatory`", + ); run(wrong_type, "coercing attribute `mandatory`"); run(unknown_param, "Found `unknown` extra named parameter"); run( @@ -336,7 +337,7 @@ fn returns_documentation() -> anyhow::Result<()> { ); fn arg(name: &str, raw_type: Ty, default: Option<&str>) -> DocParam { - DocParam::Arg { + DocParam { name: name.to_owned(), docs: DocString::from_docstring(DocStringKind::Starlark, &format!("{} docs", name)), typ: raw_type, @@ -349,23 +350,23 @@ fn returns_documentation() -> anyhow::Result<()> { let mut params = empty_spec .signature("foo_binary".to_owned()) .documentation(empty_spec.starlark_types(), empty_spec.docstrings()); - params.extend(vec![ + params.named_only.extend(vec![ arg("any", Ty::any(), None), - arg("arg", Ty::string(), Some("_")), - arg("bool", Ty::bool(), Some("_")), - arg("default_only", Ty::string(), Some("_")), - arg("dep", Ty::string(), Some("_")), - arg("dict", Ty::dict(Ty::string(), Ty::bool()), Some("_")), - arg("list", Ty::list(Ty::string()), Some("_")), - arg("one_of", Ty::union2(Ty::bool(), Ty::string()), Some("_")), - arg("option", Ty::union2(Ty::none(), Ty::string()), Some("_")), + arg("arg", Ty::string(), Some("...")), + arg("bool", Ty::bool(), Some("...")), + arg("default_only", Ty::string(), Some("...")), + arg("dep", Ty::string(), Some("...")), + arg("dict", Ty::dict(Ty::string(), Ty::bool()), Some("...")), + arg("list", Ty::list(Ty::string()), Some("...")), + arg("one_of", Ty::union2(Ty::bool(), Ty::string()), Some("...")), + arg("option", Ty::union2(Ty::none(), Ty::string()), Some("...")), arg("query", Ty::string(), None), - arg("source", Ty::string(), Some("_")), - arg("string", Ty::string(), Some("_")), - arg("tuple", Ty::tuple2(Ty::bool(), Ty::string()), Some("_")), + arg("source", Ty::string(), Some("...")), + arg("string", Ty::string(), Some("...")), + arg("tuple", Ty::tuple2(Ty::bool(), Ty::string()), Some("...")), ]); - let expected_docs = DocItem::Function(DocFunction { + let expected_docs = DocItem::Member(DocMember::Function(DocFunction { docs: DocString::from_docstring( DocStringKind::Starlark, "Summary for foo_binary\n\nDetails for foo_binary", @@ -375,8 +376,7 @@ fn returns_documentation() -> anyhow::Result<()> { docs: None, typ: Ty::none(), }, - as_type: None, - }); + })); let tester = rule_tester(); let res = tester.eval_import( @@ -389,8 +389,7 @@ fn returns_documentation() -> anyhow::Result<()> { .get("foo_binary") .expect("foo_binary to exist") .value() - .documentation() - .unwrap(); + .documentation(); assert_eq!(expected_docs, docs); diff --git a/app/buck2_interpreter_for_build_tests/src/select.rs b/app/buck2_interpreter_for_build_tests/src/select.rs index eec6babf186b8..5d57ef155bd8e 100644 --- a/app/buck2_interpreter_for_build_tests/src/select.rs +++ b/app/buck2_interpreter_for_build_tests/src/select.rs @@ -116,3 +116,37 @@ def test(): )) .unwrap(); } + +#[test] +fn test_failing_select_funcs() { + let mut tester = Tester::new().unwrap(); + assert!( + tester + .run_starlark_test(indoc!( + r#" +def test(): + # bitwise or fails + expr_select = {} | select({"config/windows:x86_64": {}}) + "# + )) + .is_err() + ); +} + +#[test] +fn test_failing_nested_select() { + let mut tester = Tester::new().unwrap(); + let test = indoc!( + r#" +def _test_func(value): + return "TEST" in value + +def test(): + expr_select = select_test(select(select({"config/windows:x86_64": "flag_TEST"})), _test_func) + "# + ); + assert!(tester.run_starlark_test(test).is_err_and(|e| { + e.to_string() + .contains("Expected `dict[str, typing.Any]`, but got `selector") + })); +} diff --git a/app/buck2_interpreter_for_build_tests/src/super_package/mod.rs b/app/buck2_interpreter_for_build_tests/src/super_package.rs similarity index 100% rename from app/buck2_interpreter_for_build_tests/src/super_package/mod.rs rename to app/buck2_interpreter_for_build_tests/src/super_package.rs diff --git a/app/buck2_interpreter_for_build_tests/src/super_package/package_function.rs b/app/buck2_interpreter_for_build_tests/src/super_package/package_function.rs index f3f51d8524322..905c6a062ec95 100644 --- a/app/buck2_interpreter_for_build_tests/src/super_package/package_function.rs +++ b/app/buck2_interpreter_for_build_tests/src/super_package/package_function.rs @@ -8,7 +8,7 @@ */ use buck2_core::fs::project::ProjectRootTemp; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::visibility::VisibilitySpecification; @@ -44,7 +44,7 @@ simple(name = "a") "#, ); - let ctx = calculation(&fs).await; + let mut ctx = calculation(&fs).await; let a = ctx .get_target_node(&TargetLabel::testing_parse("root//juxtaposition:a")) @@ -87,7 +87,7 @@ simple(name = "a") "#, ); - let ctx = calculation(&fs).await; + let mut ctx = calculation(&fs).await; let a = ctx .get_target_node(&TargetLabel::testing_parse("root//juxtaposition:a")) @@ -129,7 +129,7 @@ simple(name = "a") "#, ); - let ctx = calculation(&fs).await; + let mut ctx = calculation(&fs).await; let a = ctx .get_target_node(&TargetLabel::testing_parse("root//juxtaposition:a")) diff --git a/app/buck2_interpreter_for_build_tests/src/super_package/package_value.rs b/app/buck2_interpreter_for_build_tests/src/super_package/package_value.rs index f7367a08946b2..10a5d99a889d8 100644 --- a/app/buck2_interpreter_for_build_tests/src/super_package/package_value.rs +++ b/app/buck2_interpreter_for_build_tests/src/super_package/package_value.rs @@ -12,7 +12,6 @@ use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::fs::project::ProjectRootTemp; use buck2_core::package::PackageLabel; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; use buck2_interpreter_for_build::interpreter::dice_calculation_delegate::HasCalculationDelegate; use buck2_node::attrs::display::AttrDisplayWithContextExt; use buck2_node::attrs::inspect_options::AttrInspectOptions; @@ -53,17 +52,14 @@ async fn test_package_value_same_dir_package_file() { ), ); - let ctx = calculation(&fs).await; - let interpreter = ctx + let mut ctx = calculation(&fs).await; + let mut interpreter = ctx .get_interpreter_calculator(root_cell(), BuildFileCell::new(root_cell())) .await .unwrap(); let result = interpreter - .eval_build_file( - PackageLabel::testing_parse("root//headphones"), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) + .eval_build_file(PackageLabel::testing_parse("root//headphones")) .await .unwrap(); @@ -76,6 +72,7 @@ async fn test_package_value_same_dir_package_file() { .attr("value", AttrInspectOptions::DefinedOnly) .unwrap() .unwrap() + .value .as_display_no_ctx() .to_string() ); @@ -100,17 +97,14 @@ async fn test_package_value_parent_dir_package_file() { ), ); - let ctx = calculation(&fs).await; - let interpreter = ctx + let mut ctx = calculation(&fs).await; + let mut interpreter = ctx .get_interpreter_calculator(root_cell(), BuildFileCell::new(root_cell())) .await .unwrap(); let result = interpreter - .eval_build_file( - PackageLabel::testing_parse("root//trackpad"), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) + .eval_build_file(PackageLabel::testing_parse("root//trackpad")) .await .unwrap(); @@ -123,6 +117,7 @@ async fn test_package_value_parent_dir_package_file() { .attr("value", AttrInspectOptions::DefinedOnly) .unwrap() .unwrap() + .value .as_display_no_ctx() .to_string() ); @@ -136,16 +131,13 @@ async fn test_overwrite_package_value_not_allowed_without_overwrite_flag() { fs.write_file("foo/PACKAGE", "write_package_value('aaa.bbb', 'ccc')"); fs.write_file("foo/BUCK", ""); - let ctx = calculation(&fs).await; - let interpreter = ctx + let mut ctx = calculation(&fs).await; + let mut interpreter = ctx .get_interpreter_calculator(root_cell(), BuildFileCell::new(root_cell())) .await .unwrap(); let err = interpreter - .eval_build_file( - PackageLabel::testing_parse("root//foo"), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) + .eval_build_file(PackageLabel::testing_parse("root//foo")) .await; assert!( format!("{:?}", err) @@ -178,7 +170,7 @@ async fn test_overwrite_package_value_with_flag() { ), ); - let ctx = calculation(&fs).await; + let mut ctx = calculation(&fs).await; let result = ctx .get_interpreter_results(PackageLabel::testing_parse("root//foo")) .await @@ -193,6 +185,7 @@ async fn test_overwrite_package_value_with_flag() { .attr("value", AttrInspectOptions::DefinedOnly) .unwrap() .unwrap() + .value .as_display_no_ctx() .to_string() ); @@ -221,17 +214,14 @@ async fn test_read_parent_package_value() { ), ); - let ctx = calculation(&fs).await; - let interpreter = ctx + let mut ctx = calculation(&fs).await; + let mut interpreter = ctx .get_interpreter_calculator(root_cell(), BuildFileCell::new(root_cell())) .await .unwrap(); let result = interpreter - .eval_build_file( - PackageLabel::testing_parse("root//foo"), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) + .eval_build_file(PackageLabel::testing_parse("root//foo")) .await .unwrap(); @@ -244,6 +234,7 @@ async fn test_read_parent_package_value() { .attr("value", AttrInspectOptions::DefinedOnly) .unwrap() .unwrap() + .value .as_display_no_ctx() .to_string() ); @@ -286,17 +277,14 @@ async fn test_read_parent_package_value_from_bzl() { ), ); - let ctx = calculation(&fs).await; - let interpreter = ctx + let mut ctx = calculation(&fs).await; + let mut interpreter = ctx .get_interpreter_calculator(root_cell(), BuildFileCell::new(root_cell())) .await .unwrap(); let result = interpreter - .eval_build_file( - PackageLabel::testing_parse("root//foo"), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) + .eval_build_file(PackageLabel::testing_parse("root//foo")) .await .unwrap(); @@ -309,6 +297,7 @@ async fn test_read_parent_package_value_from_bzl() { .attr("value", AttrInspectOptions::DefinedOnly) .unwrap() .unwrap() + .value .as_display_no_ctx() .to_string() ); @@ -322,16 +311,13 @@ async fn test_read_parent_package_value_is_suggested_in_package_file() { fs.write_file("foo/PACKAGE", "read_package_value('aaa.bbb')"); fs.write_file("foo/BUCK", ""); - let ctx = calculation(&fs).await; - let interpreter = ctx + let mut ctx = calculation(&fs).await; + let mut interpreter = ctx .get_interpreter_calculator(root_cell(), BuildFileCell::new(root_cell())) .await .unwrap(); let err = interpreter - .eval_build_file( - PackageLabel::testing_parse("root//foo"), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) + .eval_build_file(PackageLabel::testing_parse("root//foo")) .await; assert!( format!("{:?}", err) @@ -366,16 +352,13 @@ async fn test_read_parent_package_value_is_suggested_in_bzl_file() { ); fs.write_file("foo/BUCK", ""); - let ctx = calculation(&fs).await; - let interpreter = ctx + let mut ctx = calculation(&fs).await; + let mut interpreter = ctx .get_interpreter_calculator(root_cell(), BuildFileCell::new(root_cell())) .await .unwrap(); let err = interpreter - .eval_build_file( - PackageLabel::testing_parse("root//foo"), - &mut StarlarkProfilerOrInstrumentation::disabled(), - ) + .eval_build_file(PackageLabel::testing_parse("root//foo")) .await; assert!( format!("{:?}", err) diff --git a/app/buck2_interpreter_for_build_tests/src/tests.rs b/app/buck2_interpreter_for_build_tests/src/tests.rs index f76fd75f4d3c6..1f3854205efcd 100644 --- a/app/buck2_interpreter_for_build_tests/src/tests.rs +++ b/app/buck2_interpreter_for_build_tests/src/tests.rs @@ -7,12 +7,12 @@ * of this source tree. */ -use buck2_build_api::interpreter::rule_defs::register_rule_defs; +use std::sync::Arc; + use buck2_common::dice::cells::SetCellResolver; use buck2_common::dice::data::testing::SetTestingIoProvider; +use buck2_common::legacy_configs::cells::ExternalBuckconfigData; use buck2_common::legacy_configs::dice::SetLegacyConfigs; -use buck2_common::legacy_configs::LegacyBuckConfig; -use buck2_common::legacy_configs::LegacyBuckConfigs; use buck2_core::bzl::ImportPath; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::cells::name::CellName; @@ -20,21 +20,17 @@ use buck2_core::cells::CellResolver; use buck2_core::fs::project::ProjectRootTemp; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::package::PackageLabel; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; use buck2_events::dispatch::EventDispatcher; use buck2_interpreter::dice::starlark_debug::SetStarlarkDebugger; -use buck2_interpreter::dice::starlark_profiler::SetStarlarkProfilerInstrumentation; -use buck2_interpreter::dice::starlark_profiler::StarlarkProfilerConfiguration; use buck2_interpreter::dice::starlark_types::SetStarlarkTypes; use buck2_interpreter::extra::InterpreterHostArchitecture; use buck2_interpreter::extra::InterpreterHostPlatform; use buck2_interpreter::load_module::InterpreterCalculation; -use buck2_interpreter_for_build::attrs::attrs_global::register_attrs; +use buck2_interpreter::starlark_profiler::config::SetStarlarkProfilerInstrumentation; +use buck2_interpreter::starlark_profiler::config::StarlarkProfilerConfiguration; use buck2_interpreter_for_build::interpreter::configuror::BuildInterpreterConfiguror; use buck2_interpreter_for_build::interpreter::context::SetInterpreterContext; -use buck2_interpreter_for_build::rule::register_rule_function; -use buck2_interpreter_for_build::super_package::defs::register_package_natives; -use buck2_interpreter_for_build::super_package::package_value::register_read_package_value; -use buck2_interpreter_for_build::super_package::package_value::register_write_package_value; use buck2_node::nodes::frontend::TargetGraphCalculation; use dice::DetectCycles; use dice::Dice; @@ -43,15 +39,6 @@ use dice::UserComputationData; use dupe::Dupe; use indoc::indoc; -fn empty_configs(resolver: &CellResolver) -> LegacyBuckConfigs { - let config = resolver - .cells() - .map(|(name, _)| (name, LegacyBuckConfig::empty())) - .collect(); - - LegacyBuckConfigs::new(config) -} - pub(crate) fn root_cell() -> CellName { CellName::testing_new("root") } @@ -71,7 +58,6 @@ pub(crate) async fn calculation(fs: &ProjectRootTemp) -> DiceTransaction { CellName::testing_new("root"), CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("".to_owned())), ); - let cell_configs = empty_configs(&resolver); ctx.set_cell_resolver(resolver.dupe()).unwrap(); ctx.set_interpreter_context( @@ -82,26 +68,15 @@ pub(crate) async fn calculation(fs: &ProjectRootTemp) -> DiceTransaction { None, false, false, - register_read_package_value, - |globals| { - register_package_natives(globals); - register_read_package_value(globals); - }, - |globals| { - register_rule_defs(globals); - register_rule_function(globals); - register_attrs(globals); - register_read_package_value(globals); - register_write_package_value(globals); - }, - |_| {}, None, + Arc::new(ConcurrentTargetLabelInterner::default()), ) .unwrap(), ) .unwrap(); - ctx.set_legacy_configs(cell_configs).unwrap(); - ctx.set_starlark_profiler_instrumentation_override(StarlarkProfilerConfiguration::default()) + ctx.set_legacy_config_external_data(Arc::new(ExternalBuckconfigData::testing_default())) + .unwrap(); + ctx.set_starlark_profiler_configuration(StarlarkProfilerConfiguration::default()) .unwrap(); ctx.set_starlark_types(false, false).unwrap(); ctx.commit().await @@ -119,7 +94,7 @@ async fn test_eval_import() { ), ); - let ctx = calculation(&fs).await; + let mut ctx = calculation(&fs).await; let env = ctx .get_loaded_module_from_import_path(&ImportPath::testing_new("root//pkg:two.bzl")) @@ -158,7 +133,7 @@ async fn test_eval_import_with_load() { ), ); - let ctx = calculation(&fs).await; + let mut ctx = calculation(&fs).await; let env = ctx .get_loaded_module_from_import_path(&ImportPath::testing_new("root//pkg:two.bzl")) .await @@ -233,7 +208,7 @@ async fn test_eval_build_file() { ), ); - let ctx = calculation(&fs).await; + let mut ctx = calculation(&fs).await; let package = PackageLabel::testing_parse("root//pkg"); let eval_result = ctx.get_interpreter_results(package.dupe()).await.unwrap(); diff --git a/app/buck2_interpreter_for_build_tests/src/uncategorized.rs b/app/buck2_interpreter_for_build_tests/src/uncategorized.rs index 64341dd47b58d..413cad3840255 100644 --- a/app/buck2_interpreter_for_build_tests/src/uncategorized.rs +++ b/app/buck2_interpreter_for_build_tests/src/uncategorized.rs @@ -7,10 +7,8 @@ * of this source tree. */ -use buck2_common::result::SharedResult; use buck2_core::bzl::ImportPath; use buck2_interpreter_for_build::interpreter::testing::Tester; -use buck2_interpreter_for_build::rule::register_rule_function; use indoc::indoc; use starlark::environment::GlobalsBuilder; use starlark::starlark_module; @@ -28,7 +26,6 @@ fn cannot_register_target_twice() { "# ); let mut tester = Tester::new().unwrap(); - tester.additional_globals(register_rule_function); let err = tester.run_starlark_test(content).expect_err("should fail"); assert!( err.to_string() @@ -47,7 +44,7 @@ fn extra_provider_module(builder: &mut GlobalsBuilder) { } #[test] -fn tester_can_load_extra_modules() -> SharedResult<()> { +fn tester_can_load_extra_modules() -> buck2_error::Result<()> { let mut tester = Tester::new()?; tester.additional_globals(extra_provider_module); @@ -73,8 +70,8 @@ fn tester_can_load_extra_modules() -> SharedResult<()> { } #[test] -fn tester_can_load_symbols_transitively() -> SharedResult<()> { - fn new_tester() -> SharedResult { +fn tester_can_load_symbols_transitively() -> buck2_error::Result<()> { + fn new_tester() -> buck2_error::Result { let mut tester = Tester::new()?; tester.add_import( &ImportPath::testing_new("root//test:def1.bzl"), diff --git a/app/buck2_interpreter_for_build_tests/src/uncategorized_2.rs b/app/buck2_interpreter_for_build_tests/src/uncategorized_2.rs deleted file mode 100644 index 79b1b37206956..0000000000000 --- a/app/buck2_interpreter_for_build_tests/src/uncategorized_2.rs +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_interpreter_for_build::interpreter::testing::Tester; -use indoc::indoc; - -#[test] -fn test_eval() { - let mut tester = Tester::new().unwrap(); - tester - .run_starlark_test(indoc!( - r#" - def test(): - assert_eq("some/package", __internal__.package_name()) - assert_eq("@root", __internal__.repository_name()) - - assert_eq(package_name(), __internal__.package_name()) - assert_eq(repository_name(), __internal__.repository_name()) - - assert_eq(package_name(), get_base_path()) - - print("some message") - print("multiple", "strings") - "# - )) - .unwrap(); -} diff --git a/app/buck2_miniperf/BUCK b/app/buck2_miniperf/BUCK index 6e1db183a5a61..43d39fa3aefaf 100644 --- a/app/buck2_miniperf/BUCK +++ b/app/buck2_miniperf/BUCK @@ -1,6 +1,5 @@ load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -10,6 +9,7 @@ rust_binary( ["src/**/*.rs"], ), allocator = "malloc", + allow_cache_upload = True, # JEMalloc isn't super lean (and we don't actually allocate anything here), # so dont use it. default_strip_mode = "full", @@ -46,6 +46,6 @@ rust_library( }), test_env = { "MINIPERF": "$(exe_target :buck2_miniperf)", - "THREE_BILLION_INSTRUCTIONS": "$(exe_target //buck2/shed/three_billion_instructions:three_billion_instructions)", + "THREE_BILLION_INSTRUCTIONS": "$(exe_target //buck2/shed/three_billion_instructions:three_billion_instructions-bin)", }, ) diff --git a/app/buck2_miniperf/Cargo.toml b/app/buck2_miniperf/Cargo.toml index 67ebffb9a9bd3..a72dd54299f53 100644 --- a/app/buck2_miniperf/Cargo.toml +++ b/app/buck2_miniperf/Cargo.toml @@ -1,17 +1,18 @@ [package] +authors = ["Meta"] +description = "A minimal implementation of perf. This has lower overhead when only CPU instruction counts are needed." +edition = "2021" +license = { workspace = true } name = "buck2_miniperf" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "A minimal implementation of perf. This has lower overhead when only CPU instruction counts are needed." -license = "MIT OR Apache-2.0" -authors = ["Meta"] [dependencies] anyhow = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] bincode = { workspace = true } -smallvec = { workspace = true } -perf-event = { workspace = true } buck2_miniperf_proto = { workspace = true } +perf-event = { workspace = true } +smallvec = { workspace = true } thiserror = { workspace = true } diff --git a/app/buck2_miniperf_proto/BUCK b/app/buck2_miniperf_proto/BUCK index 14b06c04f8eb3..b9e0a77a9d636 100644 --- a/app/buck2_miniperf_proto/BUCK +++ b/app/buck2_miniperf_proto/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -11,6 +10,7 @@ rust_library( ], deps = [ "fbsource//third-party/rust:serde", + "//buck2/app/buck2_data:buck2_data", "//buck2/gazebo/dupe:dupe", ], ) diff --git a/app/buck2_miniperf_proto/Cargo.toml b/app/buck2_miniperf_proto/Cargo.toml index 3b7e0607f3581..827380af2868e 100644 --- a/app/buck2_miniperf_proto/Cargo.toml +++ b/app/buck2_miniperf_proto/Cargo.toml @@ -1,12 +1,14 @@ [package] +authors = ["Meta"] +description = "Serialized datastructures for buck2_miniperf" +edition = "2021" +license = { workspace = true } name = "buck2_miniperf_proto" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Serialized datastructures for buck2_miniperf" -license = "MIT OR Apache-2.0" -authors = ["Meta"] [dependencies] +buck2_data = { workspace = true } dupe = { workspace = true } serde = { workspace = true } diff --git a/app/buck2_miniperf_proto/src/lib.rs b/app/buck2_miniperf_proto/src/lib.rs index 061091db62818..a293b698e7e8c 100644 --- a/app/buck2_miniperf_proto/src/lib.rs +++ b/app/buck2_miniperf_proto/src/lib.rs @@ -68,10 +68,18 @@ impl MiniperfCounter { let ratio = self.time_enabled as f64 / self.time_running as f64; (self.count as f64 * ratio) as u64 } + + pub fn to_proto(&self) -> buck2_data::CpuCounter { + buck2_data::CpuCounter { + count: self.count, + time_enabled: self.time_enabled, + time_running: self.time_running, + } + } } #[cfg(test)] -mod test { +mod tests { use super::*; #[test] @@ -117,4 +125,18 @@ mod test { MiniperfOutput::EXPECTED_SIZE ); } + + #[test] + fn test_to_proto() { + let output = MiniperfCounter { + count: 123, + time_enabled: 100, + time_running: 50, + } + .to_proto(); + + assert_eq!(output.count, 123); + assert_eq!(output.time_enabled, 100); + assert_eq!(output.time_running, 50); + } } diff --git a/app/buck2_node/BUCK b/app/buck2_node/BUCK index 6ba043449e15d..cdc31b00d9662 100644 --- a/app/buck2_node/BUCK +++ b/app/buck2_node/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -13,23 +12,26 @@ rust_library( deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:either", - "fbsource//third-party/rust:fnv", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:itertools", + "fbsource//third-party/rust:memchr", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:ref-cast", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", + "fbsource//third-party/rust:smallvec", "fbsource//third-party/rust:static_assertions", "fbsource//third-party/rust:strsim", - "fbsource//third-party/rust:thiserror", + "fbsource//third-party/rust:triomphe", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_query:buck2_query", "//buck2/app/buck2_query_parser:buck2_query_parser", "//buck2/app/buck2_util:buck2_util", diff --git a/app/buck2_node/Cargo.toml b/app/buck2_node/Cargo.toml index 75ad64a5929b5..5d21bf55d2ba9 100644 --- a/app/buck2_node/Cargo.toml +++ b/app/buck2_node/Cargo.toml @@ -1,46 +1,45 @@ [package] +description = "Target graph node" +edition = "2021" +license = { workspace = true } name = "buck2_node" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Target graph node" [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } derive_more = { workspace = true } -derivative = { workspace = true } either = { workspace = true } -fnv = { workspace = true } futures = { workspace = true } itertools = { workspace = true } +memchr = { workspace = true } once_cell = { workspace = true } ref-cast = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +smallvec = { workspace = true } static_assertions = { workspace = true } strsim = { workspace = true } -thiserror = { workspace = true } +triomphe = { workspace = true } -gazebo = { workspace = true } +allocative = { workspace = true } +cmp_any = { workspace = true } dice = { workspace = true } display_container = { workspace = true } dupe = { workspace = true } -cmp_any = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" +gazebo = { workspace = true } starlark_map = { workspace = true } -allocative = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } +buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } +buck2_futures = { workspace = true } buck2_query = { workspace = true } buck2_query_parser = { workspace = true } buck2_util = { workspace = true } [dev-dependencies] tokio = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_node/src/attrs.rs b/app/buck2_node/src/attrs.rs new file mode 100644 index 0000000000000..fec0886d18fbc --- /dev/null +++ b/app/buck2_node/src/attrs.rs @@ -0,0 +1,36 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod anon_target_attr_validation; +pub mod attr; +pub mod attr_type; +pub mod coerced_attr; +pub mod coerced_attr_full; +pub mod coerced_attr_with_type; +pub mod coerced_deps_collector; +pub mod coerced_path; +pub mod coercion_context; +pub mod configurable; +pub mod configuration_context; +pub mod configured_attr; +pub mod configured_attr_full; +pub mod configured_attr_info_for_tests; +pub mod configured_traversal; +pub mod display; +pub mod fmt_context; +pub mod hacks; +pub mod id; +pub mod inspect_options; +pub mod internal; +pub mod json; +pub mod serialize; +pub mod spec; +pub mod testing; +pub mod traversal; +pub mod values; diff --git a/app/buck2_node/src/attrs/anon_target_attr_validation.rs b/app/buck2_node/src/attrs/anon_target_attr_validation.rs index 2f4bf6f08080c..79c5a286a4042 100644 --- a/app/buck2_node/src/attrs/anon_target_attr_validation.rs +++ b/app/buck2_node/src/attrs/anon_target_attr_validation.rs @@ -9,12 +9,10 @@ use std::fmt::Debug; -use thiserror::Error; - use crate::attrs::attr_type::AttrType; use crate::attrs::attr_type::AttrTypeInner; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum AnonRuleAttrError { #[error("Attr type `{0}` is not supported for anon rules")] NotSupported(String), @@ -27,7 +25,7 @@ pub trait AnonRuleAttrValidation { impl AnonRuleAttrValidation for AttrType { fn validate_for_anon_rule(&self) -> anyhow::Result<()> { - match self.0.as_ref() { + match &self.0.inner { AttrTypeInner::Any(_) => Ok(()), AttrTypeInner::Bool(_) => Ok(()), AttrTypeInner::Int(_) => Ok(()), diff --git a/app/buck2_node/src/attrs/attr_type.rs b/app/buck2_node/src/attrs/attr_type.rs new file mode 100644 index 0000000000000..ba16274d3aac1 --- /dev/null +++ b/app/buck2_node/src/attrs/attr_type.rs @@ -0,0 +1,449 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; +use std::fmt::Display; +use std::sync::Arc; + +use allocative::Allocative; +use buck2_core::configuration::transition::id::TransitionId; +use buck2_core::plugins::PluginKind; +use buck2_core::plugins::PluginKindSet; +use dupe::Dupe; +use once_cell::sync::Lazy; + +use crate::attrs::attr_type::any::AnyAttrType; +use crate::attrs::attr_type::arg::ArgAttrType; +use crate::attrs::attr_type::bool::BoolAttrType; +use crate::attrs::attr_type::configuration_dep::ConfigurationDepAttrType; +use crate::attrs::attr_type::configured_dep::ExplicitConfiguredDepAttrType; +use crate::attrs::attr_type::dep::DepAttrTransition; +use crate::attrs::attr_type::dep::DepAttrType; +use crate::attrs::attr_type::dict::DictAttrType; +use crate::attrs::attr_type::enumeration::EnumAttrType; +use crate::attrs::attr_type::int::IntAttrType; +use crate::attrs::attr_type::label::LabelAttrType; +use crate::attrs::attr_type::list::ListAttrType; +use crate::attrs::attr_type::metadata::MetadataAttrType; +use crate::attrs::attr_type::one_of::OneOfAttrType; +use crate::attrs::attr_type::option::OptionAttrType; +use crate::attrs::attr_type::plugin_dep::PluginDepAttrType; +use crate::attrs::attr_type::query::QueryAttrType; +use crate::attrs::attr_type::source::SourceAttrType; +use crate::attrs::attr_type::split_transition_dep::SplitTransitionDepAttrType; +use crate::attrs::attr_type::string::StringAttrType; +use crate::attrs::attr_type::target_modifiers::TargetModifiersAttrType; +use crate::attrs::attr_type::tuple::TupleAttrType; +use crate::attrs::attr_type::visibility::VisibilityAttrType; +use crate::attrs::attr_type::within_view::WithinViewAttrType; +use crate::provider_id_set::ProviderIdSet; + +pub mod any; +pub mod any_matches; +pub mod arg; +pub mod attr_config; +pub mod attr_like; +pub mod bool; +pub mod configuration_dep; +pub mod configured_dep; +pub mod default_only; +pub mod dep; +pub mod dict; +pub mod enumeration; +pub mod int; +pub mod label; +pub mod list; +pub mod metadata; +pub mod one_of; +pub mod option; +pub mod plugin_dep; +pub mod query; +pub mod source; +pub mod split_transition_dep; +pub mod string; +pub mod target_modifiers; +pub mod tuple; +pub mod visibility; +pub mod within_view; + +#[derive(Clone, Dupe, Debug, Hash, Eq, PartialEq, Allocative)] +pub struct AttrType(pub Arc); + +#[derive(Debug, Hash, Eq, PartialEq, Allocative)] +pub struct AttrTypeInner2 { + pub inner: AttrTypeInner, + /// Attribute may have queries. + /// + /// These are either: + /// * `attrs.query(...)` + /// * `attrs.arg()` + /// * collection of those e.g. `attrs.list(attrs.query(...))` + pub may_have_queries: bool, +} + +#[derive(Debug, Hash, Eq, PartialEq, Allocative)] +pub enum AttrTypeInner { + Any(AnyAttrType), + Arg(ArgAttrType), + ConfigurationDep(ConfigurationDepAttrType), + ConfiguredDep(ExplicitConfiguredDepAttrType), + Bool(BoolAttrType), + Int(IntAttrType), + Dep(DepAttrType), + Dict(DictAttrType), + List(ListAttrType), + Tuple(TupleAttrType), + OneOf(OneOfAttrType), + Option(OptionAttrType), + PluginDep(PluginDepAttrType), + Query(QueryAttrType), + Source(SourceAttrType), + SplitTransitionDep(SplitTransitionDepAttrType), + String(StringAttrType), + Enum(EnumAttrType), + Label(LabelAttrType), + Visibility(VisibilityAttrType), + WithinView(WithinViewAttrType), + Metadata(MetadataAttrType), + TargetModifiers(TargetModifiersAttrType), +} + +impl AttrType { + pub fn fmt_with_default( + &self, + f: &mut fmt::Formatter<'_>, + default: Option<&str>, + ) -> fmt::Result { + let mut attr = |s| match default { + None => write!(f, "attrs.{}()", s), + Some(default) => write!(f, "attrs.{}(default={})", s, default), + }; + let arg = || match default { + None => String::new(), + Some(x) => format!(", default={}", x), + }; + + match &self.0.inner { + AttrTypeInner::Any(_) => attr("any"), + AttrTypeInner::Arg(_) => attr("arg"), + AttrTypeInner::ConfigurationDep(_) => attr("configuration_dep"), + AttrTypeInner::ConfiguredDep(_) => attr("configured_dep"), + AttrTypeInner::PluginDep(_) => attr("plugin_dep"), + AttrTypeInner::Bool(_) => attr("bool"), + AttrTypeInner::Int(_) => attr("int"), + AttrTypeInner::Dep(_) => attr("dep"), + AttrTypeInner::Query(_) => attr("query"), + AttrTypeInner::Dict(x) => x.fmt_with_arg(f, &arg()), + AttrTypeInner::List(x) => x.fmt_with_arg(f, &arg()), + AttrTypeInner::Tuple(x) => x.fmt_with_arg(f, &arg()), + AttrTypeInner::OneOf(x) => x.fmt_with_arg(f, &arg()), + AttrTypeInner::Option(x) => x.fmt_with_arg(f, &arg()), + AttrTypeInner::Enum(x) => x.fmt_with_arg(f, &arg()), + AttrTypeInner::Source(_) => attr("source"), + AttrTypeInner::SplitTransitionDep(_) => attr("split_transition_dep"), + AttrTypeInner::String(_) => attr("string"), + AttrTypeInner::Label(_) => attr("label"), + AttrTypeInner::Visibility(_) => attr("visibility"), + AttrTypeInner::WithinView(_) => attr("within_view"), + AttrTypeInner::Metadata(_) => attr("metadata"), + AttrTypeInner::TargetModifiers(_) => attr("modifiers"), + } + } + + pub fn any() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Any(AnyAttrType), + may_have_queries: false, + })) + } + + pub(crate) fn any_ref() -> &'static Self { + static ANY: Lazy = Lazy::new(AttrType::any); + &ANY + } + + /// An arg attribute. Args are similar to strings, but have built in support + /// for string parameter macros and make variables. Command line + /// builders used in rule implementations use args (and so an arg attribute + /// can be directly added to them). + /// + /// Takes in an anon_target_compatible flag, which indicates whether the arg + /// can be passed into anon targets. There is a slight memory hit when using + /// this flag. + pub fn arg(anon_target_compatible: bool) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Arg(ArgAttrType { + anon_target_compatible, + }), + may_have_queries: true, + })) + } + + pub fn enumeration(variants: Vec) -> anyhow::Result { + Ok(Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Enum(EnumAttrType::new(variants)?), + may_have_queries: false, + }))) + } + + pub fn bool() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Bool(BoolAttrType), + may_have_queries: false, + })) + } + + pub fn int() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Int(IntAttrType), + may_have_queries: false, + })) + } + + pub fn configuration_dep() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::ConfigurationDep(ConfigurationDepAttrType), + may_have_queries: false, + })) + } + + /// A TargetLabel attribute optionally with a specific provider/providers + /// that are expected. + /// + /// If `required_providers` is non-empty, the dependency must return those providers + /// from its implementation function. Otherwise an error will result at resolution time. + pub fn dep(required_providers: ProviderIdSet, plugin_kinds: PluginKindSet) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Dep(DepAttrType::new( + required_providers, + DepAttrTransition::Identity(plugin_kinds), + )), + may_have_queries: false, + })) + } + + /// An execution dependency attribute optionally with a specific provider/providers + /// that are expected. + /// + /// If `required_providers` is non-empty, the dependency must return those providers + /// from its implementation function. Otherwise an error will result at resolution time. + pub fn exec_dep(required_providers: ProviderIdSet) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Dep(DepAttrType::new( + required_providers, + DepAttrTransition::Exec, + )), + may_have_queries: false, + })) + } + + /// A toolchain dependency attribute optionally with a specific provider/providers + /// that are expected. + /// + /// If `required_providers` is non-empty, the dependency must return those providers + /// from its implementation function. Otherwise an error will result at resolution time. + pub fn toolchain_dep(required_providers: ProviderIdSet) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Dep(DepAttrType::new( + required_providers, + DepAttrTransition::Toolchain, + )), + may_have_queries: false, + })) + } + + /// An a dependency attribute which changes the configuration optionally with a specific + /// provider/providers that are expected. + /// + /// If `required_providers` is non-empty, the dependency must return those providers + /// from its implementation function. Otherwise an error will result at resolution time. + pub fn transition_dep(required_providers: ProviderIdSet, cfg: Arc) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Dep(DepAttrType::new( + required_providers, + DepAttrTransition::Transition(cfg), + )), + may_have_queries: false, + })) + } + + pub fn configured_dep(required_providers: ProviderIdSet) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::ConfiguredDep(ExplicitConfiguredDepAttrType { + required_providers, + }), + may_have_queries: false, + })) + } + + pub fn split_transition_dep(required_providers: ProviderIdSet, cfg: Arc) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::SplitTransitionDep(SplitTransitionDepAttrType::new( + required_providers, + cfg, + )), + may_have_queries: false, + })) + } + + pub fn plugin_dep(kind: PluginKind) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::PluginDep(PluginDepAttrType::new(kind)), + may_have_queries: false, + })) + } + + /// A dict attribute containing keys and values of the specified types. + pub fn dict(key: AttrType, value: AttrType, sorted: bool) -> Self { + let may_have_queries = key.0.may_have_queries || value.0.may_have_queries; + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Dict(DictAttrType::new(key, value, sorted)), + may_have_queries, + })) + } + + /// A list attribute containing items of some inner type. + pub fn list(inner: AttrType) -> Self { + let may_have_queries = inner.0.may_have_queries; + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::List(ListAttrType::new(inner)), + may_have_queries, + })) + } + + pub fn tuple(xs: Vec) -> Self { + let may_have_queries = xs.iter().any(|x| x.0.may_have_queries); + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Tuple(TupleAttrType::new(xs)), + may_have_queries, + })) + } + + pub fn one_of(xs: Vec) -> Self { + let may_have_queries = xs.iter().any(|x| x.0.may_have_queries); + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::OneOf(OneOfAttrType::new(xs)), + may_have_queries, + })) + } + + pub fn option(value: AttrType) -> Self { + let may_have_queries = value.0.may_have_queries; + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Option(OptionAttrType::new(value)), + may_have_queries, + })) + } + + pub fn query() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Query(QueryAttrType::new(DepAttrType::new( + ProviderIdSet::EMPTY, + DepAttrTransition::Identity(PluginKindSet::EMPTY), + ))), + may_have_queries: true, + })) + } + + // A file attribute. This will accept paths or targets like + /// `//some:target[inner]`. When contained within a list, one item may + /// expand to multiple (e.g. an output group or a lazy glob). + pub fn source(allow_directory: bool) -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Source(SourceAttrType { allow_directory }), + may_have_queries: false, + })) + } + + /// A string attribute. For flags passed to a command, an arg() attr is + /// preferred to support macro and make variable substitution. + pub fn string() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::String(StringAttrType), + may_have_queries: false, + })) + } + + pub fn label() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Label(LabelAttrType), + may_have_queries: false, + })) + } + + pub fn visibility() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Visibility(VisibilityAttrType), + may_have_queries: false, + })) + } + + pub fn within_view() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::WithinView(WithinViewAttrType), + may_have_queries: false, + })) + } + + pub fn metadata() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::Metadata(MetadataAttrType), + may_have_queries: false, + })) + } + + pub fn target_modifiers() -> Self { + Self(Arc::new(AttrTypeInner2 { + inner: AttrTypeInner::TargetModifiers(TargetModifiersAttrType), + may_have_queries: false, + })) + } + + /// Used when we first detect that concatenation is going to happen for an attr + /// while loading a build file. Returning false here will make us provide an error + /// during the loading phase at the point that the concatenation happens. + /// + /// In some cases, we can't detect that the concatenation isn't allowed at this + /// point and can only provide an error when performing the actual concatenation. + pub fn supports_concat(&self) -> bool { + match &self.0.inner { + AttrTypeInner::Bool(_) + | AttrTypeInner::Query(_) + | AttrTypeInner::Source(_) + | AttrTypeInner::ConfigurationDep(_) + | AttrTypeInner::ConfiguredDep(_) + | AttrTypeInner::PluginDep(_) + | AttrTypeInner::Int(_) + | AttrTypeInner::Dep(_) + | AttrTypeInner::Tuple(_) + | AttrTypeInner::SplitTransitionDep(_) + | AttrTypeInner::Label(_) + | AttrTypeInner::Enum(_) + | AttrTypeInner::Visibility(_) + | AttrTypeInner::WithinView(_) + | AttrTypeInner::TargetModifiers(_) + | AttrTypeInner::Metadata(_) => false, + AttrTypeInner::Any(_) + | AttrTypeInner::Arg(_) + | AttrTypeInner::Dict(_) + | AttrTypeInner::List(_) + | AttrTypeInner::String(_) => true, + AttrTypeInner::Option(inner) => inner.inner.supports_concat(), + // Reject if none of the inner types support concat. Mismatched types are rejected later. + AttrTypeInner::OneOf(inner) => inner.any_supports_concat(), + } + } +} + +/// Invariant: All these displays look like function calls, so follow the pattern `attrs.foo(...)`. +impl Display for AttrType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.fmt_with_default(f, None) + } +} diff --git a/app/buck2_node/src/attrs/attr_type/arg.rs b/app/buck2_node/src/attrs/attr_type/arg.rs new file mode 100644 index 0000000000000..7f9bdc7317833 --- /dev/null +++ b/app/buck2_node/src/attrs/attr_type/arg.rs @@ -0,0 +1,389 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod parser; + +use allocative::Allocative; +use buck2_core::package::source_path::SourcePathRef; +use buck2_core::package::PackageLabel; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::provider::label::ProvidersLabelMaybeConfigured; +use buck2_util::arc_str::ArcStr; +use derive_more::Display; +use dupe::Dupe; +use gazebo::prelude::SliceExt; +use static_assertions::assert_eq_size; + +use crate::attrs::attr_type::query::QueryMacroBase; +use crate::attrs::coerced_path::CoercedPath; +use crate::attrs::configuration_context::AttrConfigurationContext; +use crate::attrs::configured_traversal::ConfiguredAttrTraversal; +use crate::attrs::traversal::CoercedAttrTraversal; + +#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy, Dupe, Allocative)] +pub struct ArgAttrType { + pub anon_target_compatible: bool, +} + +/// [StringWithMacros] is the core representation for an attrs.arg() (in all of it's coerced, configured, and resolved +/// forms). The parsed arg string is held as a sequence of parts (each part either a literal string or a macro). When +/// being added to a command line, these parts will be concattenated together and added as a single arg. +/// Each variant takes in a boolean which determines if the resolved form should be compatible with anon targets. +#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] +pub enum StringWithMacros { + /// Semantically, StringWithMacros::StringPart(s) is equivalent to + /// StringWithMacros::ManyParts(vec![StringWithMacrosPart::String(s)]). We special-case this + /// for memory efficiency to avoid allocating unnecessary vectors, since lone string parts are + /// very frequent. + StringPart(ArcStr), + // For resolution, we defer all work and simply alloc a ConfiguredStringWithMacros into the starlark + // context. This allows us to defer resolution work to the point that it is being added to a command + // line, but it requires that we can cheaply copy ConfiguredStringWithMacros. + ManyParts(Box<[StringWithMacrosPart

    ]>), +} + +// Avoid changing the size accidentally. +assert_eq_size!(StringWithMacros, [usize; 3]); + +impl StringWithMacros { + pub fn concat(self, items: impl Iterator>) -> anyhow::Result { + let mut parts = Vec::new(); + for x in std::iter::once(Ok(self)).chain(items) { + match x? { + Self::StringPart(x) => { + parts.push(StringWithMacrosPart::String(x)); + } + Self::ManyParts(xs) => { + parts.extend(xs.into_vec()); + } + } + } + Ok(Self::ManyParts(parts.into_boxed_slice())) + } +} + +impl StringWithMacros { + pub fn traverse<'a>( + &'a self, + traversal: &mut dyn ConfiguredAttrTraversal, + pkg: PackageLabel, + ) -> anyhow::Result<()> { + match self { + Self::StringPart(..) => {} + Self::ManyParts(ref parts) => { + for part in parts.iter() { + match part { + StringWithMacrosPart::String(_) => {} + StringWithMacrosPart::Macro(_, m) => { + m.traverse(traversal, pkg)?; + } + } + } + } + } + Ok(()) + } +} + +impl StringWithMacros { + pub(crate) fn configure( + &self, + ctx: &dyn AttrConfigurationContext, + anon_target_compatible: bool, + ) -> anyhow::Result { + match self { + Self::StringPart(part) => Ok(ConfiguredStringWithMacros { + string_with_macros: StringWithMacros::StringPart(part.clone()), + anon_target_compatible, + }), + Self::ManyParts(parts) => Ok(ConfiguredStringWithMacros { + string_with_macros: StringWithMacros::ManyParts( + parts.try_map(|p| p.configure(ctx))?.into_boxed_slice(), + ), + anon_target_compatible, + }), + } + } + + pub(crate) fn traverse<'a>( + &'a self, + traversal: &mut dyn CoercedAttrTraversal<'a>, + pkg: PackageLabel, + ) -> anyhow::Result<()> { + match self { + Self::StringPart(..) => {} + Self::ManyParts(ref parts) => { + for part in parts.iter() { + match part { + StringWithMacrosPart::String(_) => {} + StringWithMacrosPart::Macro(_, m) => { + m.traverse(traversal, pkg)?; + } + } + } + } + } + + Ok(()) + } +} + +#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] +pub enum StringWithMacrosPart { + String(ArcStr), + Macro(/* write_to_file */ bool, MacroBase

    ), +} + +assert_eq_size!(MacroBase, [usize; 3]); +assert_eq_size!(StringWithMacrosPart, [usize; 4]); + +#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] +pub struct UnrecognizedMacro { + pub macro_type: Box, + pub args: Box<[String]>, +} + +#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] +pub enum MacroBase { + Location(P), + /// Represents both $(exe) and $(exe_target) usages. + Exe { + label: P, + exec_dep: bool, + }, + /// A user-defined make variable (like `$(CXX)`). This will be resolved based on the propagated TemplateVariableInfos. + UserUnkeyedPlaceholder(Box), + + /// A user-defined macro (like `$(cxxppflags //some:target)`). This will be resolved based on the propagated TemplateVariableInfos. + UserKeyedPlaceholder(Box<(Box, P, Option>)>), + + Query(Box>), + Source(CoercedPath), + + /// Right now, we defer error for unrecognized macros to the place where they are used. This just allows + /// us to progress further into a build and detect more issues. Once we have all (or most) of the buckv1 macros + /// recognized we'll remove this and make it an early error. + UnrecognizedMacro(Box), +} + +impl MacroBase { + pub fn traverse<'a>( + &'a self, + traversal: &mut dyn ConfiguredAttrTraversal, + pkg: PackageLabel, + ) -> anyhow::Result<()> { + // macros can't reference repo inputs (they only reference the outputs of other targets) + match self { + MacroBase::Location(l) | MacroBase::UserKeyedPlaceholder(box (_, l, _)) => { + traversal.dep(l) + } + MacroBase::Exe { + label, + exec_dep: true, + } => traversal.exec_dep(label), + MacroBase::Exe { + label, + exec_dep: false, + } => traversal.dep(label), + MacroBase::Source(path) => { + for x in path.inputs() { + traversal.input(SourcePathRef::new(pkg.dupe(), x))?; + } + Ok(()) + } + MacroBase::Query(query_macro) => query_macro.traverse(traversal), + MacroBase::UserUnkeyedPlaceholder(_) | MacroBase::UnrecognizedMacro(..) => Ok(()), + } + } +} + +impl MacroBase { + pub fn configure(&self, ctx: &dyn AttrConfigurationContext) -> anyhow::Result { + Ok(match self { + UnconfiguredMacro::Location(target) => { + ConfiguredMacro::Location(ctx.configure_target(target)) + } + UnconfiguredMacro::Exe { label, exec_dep } => ConfiguredMacro::Exe { + label: if *exec_dep { + ctx.configure_exec_target(label)? + } else { + ctx.configure_target(label) + }, + exec_dep: *exec_dep, + }, + UnconfiguredMacro::UserUnkeyedPlaceholder(var_name) => { + ConfiguredMacro::UserUnkeyedPlaceholder(var_name.clone()) + } + UnconfiguredMacro::UserKeyedPlaceholder(box (var_name, target, arg)) => { + ConfiguredMacro::UserKeyedPlaceholder(Box::new(( + var_name.clone(), + ctx.configure_target(target), + arg.clone(), + ))) + } + UnconfiguredMacro::Query(query) => { + ConfiguredMacro::Query(Box::new(query.configure(ctx)?)) + } + UnconfiguredMacro::Source(path) => ConfiguredMacro::Source(path.clone()), + UnconfiguredMacro::UnrecognizedMacro(macr) => { + ConfiguredMacro::UnrecognizedMacro(macr.clone()) + } + }) + } + + pub fn traverse<'a>( + &'a self, + traversal: &mut dyn CoercedAttrTraversal<'a>, + pkg: PackageLabel, + ) -> anyhow::Result<()> { + match self { + MacroBase::Location(l) | MacroBase::UserKeyedPlaceholder(box (_, l, _)) => { + traversal.dep(l.target()) + } + MacroBase::Exe { + label, + exec_dep: true, + } => traversal.exec_dep(label.target()), + MacroBase::Exe { + label, + exec_dep: false, + } => traversal.dep(label.target()), + MacroBase::Query(query) => query.traverse(traversal), + MacroBase::Source(path) => { + for x in path.inputs() { + traversal.input(SourcePathRef::new(pkg.dupe(), x))?; + } + Ok(()) + } + MacroBase::UserUnkeyedPlaceholder(_) | MacroBase::UnrecognizedMacro(..) => Ok(()), + } + } +} + +// These type aliases are just a little bit easier to use, the differentiating thing comes +// right at the beginning instead of at the end in a type param. +pub type UnconfiguredMacro = MacroBase; +pub type ConfiguredMacro = MacroBase; + +pub type UnconfiguredStringWithMacrosPart = StringWithMacrosPart; +pub type ConfiguredStringWithMacrosPart = StringWithMacrosPart; + +pub type UnconfiguredStringWithMacros = StringWithMacros; + +#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative, Display)] +#[display("{}", string_with_macros)] +pub struct ConfiguredStringWithMacros { + pub string_with_macros: StringWithMacros, + pub anon_target_compatible: bool, +} + +/// Display attempts to approximately reproduce the string that created a macro. +impl Display for MacroBase

    { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // TODO: this should re-escape values in the args that need to be escaped to have returned that arg (it's not possible + // to tell where there were unnecessary escapes and it's not worth tracking that). + match self { + MacroBase::Location(l) => write!(f, "location {}", l), + MacroBase::Exe { label, exec_dep } => { + write!( + f, + "{} {}", + if *exec_dep { "exe" } else { "exe_target" }, + label + ) + } + MacroBase::Query(query) => Display::fmt(query, f), + MacroBase::Source(path) => write!(f, "src {}", path.path()), + MacroBase::UserUnkeyedPlaceholder(var) => write!(f, "{}", var), + MacroBase::UserKeyedPlaceholder(box (macro_type, target, arg)) => write!( + f, + "{} {}{}", + macro_type, + target, + if let Some(arg) = arg { + format!(" {}", arg) + } else { + "".to_owned() + } + ), + MacroBase::UnrecognizedMacro(box UnrecognizedMacro { macro_type, args }) => { + write!(f, "({}) {}", macro_type, args.join(" ")) + } + } + } +} + +impl Display for StringWithMacrosPart

    { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StringWithMacrosPart::String(s) => write!(f, "{}", s), + StringWithMacrosPart::Macro(write_to_file, m) => { + write!(f, "$({}{})", if *write_to_file { "@" } else { "" }, m) + } + } + } +} + +impl StringWithMacrosPart { + pub(crate) fn configure( + &self, + ctx: &dyn AttrConfigurationContext, + ) -> anyhow::Result { + match self { + StringWithMacrosPart::String(val) => { + Ok(ConfiguredStringWithMacrosPart::String(val.clone())) + } + StringWithMacrosPart::Macro(write_to_file, unconfigured) => Ok( + ConfiguredStringWithMacrosPart::Macro(*write_to_file, unconfigured.configure(ctx)?), + ), + } + } +} + +impl Display for StringWithMacros

    { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::StringPart(part) => { + write!(f, "{}", part)?; + } + Self::ManyParts(ref parts) => { + for part in parts.iter() { + write!(f, "{}", part)?; + } + } + } + + Ok(()) + } +} + +/// Represents the type of a query placeholder (e.g. query_outputs, query_targets, query_targets_and_outputs). +#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] +pub enum QueryExpansion { + Output, + Target, + /// Holds the separator used between a target and its output. If not provided, they will be space-separated. + TargetAndOutput(Option), +} + +impl Display for QueryExpansion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + QueryExpansion::Output => f.write_str("query_outputs ")?, + QueryExpansion::Target => f.write_str("query_targets ")?, + QueryExpansion::TargetAndOutput(Some(separator)) => { + write!(f, "query_targets_and_outputs '{}' ", separator)?; + } + QueryExpansion::TargetAndOutput(None) => f.write_str("query_targets_and_outputs ")?, + }; + + Ok(()) + } +} diff --git a/app/buck2_node/src/attrs/attr_type/arg/mod.rs b/app/buck2_node/src/attrs/attr_type/arg/mod.rs deleted file mode 100644 index ef7dc261050a5..0000000000000 --- a/app/buck2_node/src/attrs/attr_type/arg/mod.rs +++ /dev/null @@ -1,369 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod parser; - -use std::fmt::Display; - -use allocative::Allocative; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersLabel; -use buck2_core::provider::label::ProvidersLabelMaybeConfigured; -use buck2_util::arc_str::ArcStr; -use derive_more::Display; -use dupe::Dupe; -use gazebo::prelude::SliceExt; -use static_assertions::assert_eq_size; - -use crate::attrs::attr_type::query::QueryMacroBase; -use crate::attrs::configuration_context::AttrConfigurationContext; -use crate::attrs::configured_traversal::ConfiguredAttrTraversal; -use crate::attrs::traversal::CoercedAttrTraversal; - -#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy, Dupe, Allocative)] -pub struct ArgAttrType { - pub anon_target_compatible: bool, -} - -/// [StringWithMacros] is the core representation for an attrs.arg() (in all of it's coerced, configured, and resolved -/// forms). The parsed arg string is held as a sequence of parts (each part either a literal string or a macro). When -/// being added to a command line, these parts will be concattenated together and added as a single arg. -/// Each variant takes in a boolean which determines if the resolved form should be compatible with anon targets. -#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] -pub enum StringWithMacros { - /// Semantically, StringWithMacros::StringPart(s) is equivalent to - /// StringWithMacros::ManyParts(vec![StringWithMacrosPart::String(s)]). We special-case this - /// for memory efficiency to avoid allocating unnecessary vectors, since lone string parts are - /// very frequent. - StringPart(ArcStr), - // For resolution, we defer all work and simply alloc a ConfiguredStringWithMacros into the starlark - // context. This allows us to defer resolution work to the point that it is being added to a command - // line, but it requires that we can cheaply copy ConfiguredStringWithMacros. - ManyParts(Box<[StringWithMacrosPart

    ]>), -} - -// Avoid changing the size accidentally. -assert_eq_size!(StringWithMacros, [usize; 3]); - -impl StringWithMacros { - pub fn concat(self, items: impl Iterator>) -> anyhow::Result { - let mut parts = Vec::new(); - for x in std::iter::once(Ok(self)).chain(items) { - match x? { - Self::StringPart(x) => { - parts.push(StringWithMacrosPart::String(x)); - } - Self::ManyParts(xs) => { - parts.extend(xs.into_vec()); - } - } - } - Ok(Self::ManyParts(parts.into_boxed_slice())) - } -} - -impl StringWithMacros { - pub fn traverse<'a>( - &'a self, - traversal: &mut dyn ConfiguredAttrTraversal, - ) -> anyhow::Result<()> { - match self { - Self::StringPart(..) => {} - Self::ManyParts(ref parts) => { - for part in parts.iter() { - match part { - StringWithMacrosPart::String(_) => {} - StringWithMacrosPart::Macro(_, m) => { - m.traverse(traversal)?; - } - } - } - } - } - Ok(()) - } -} - -impl StringWithMacros { - pub(crate) fn configure( - &self, - ctx: &dyn AttrConfigurationContext, - anon_target_compatible: bool, - ) -> anyhow::Result { - match self { - Self::StringPart(part) => Ok(ConfiguredStringWithMacros { - string_with_macros: StringWithMacros::StringPart(part.clone()), - anon_target_compatible, - }), - Self::ManyParts(parts) => Ok(ConfiguredStringWithMacros { - string_with_macros: StringWithMacros::ManyParts( - parts.try_map(|p| p.configure(ctx))?.into_boxed_slice(), - ), - anon_target_compatible, - }), - } - } - - pub(crate) fn traverse<'a>( - &'a self, - traversal: &mut dyn CoercedAttrTraversal<'a>, - ) -> anyhow::Result<()> { - match self { - Self::StringPart(..) => {} - Self::ManyParts(ref parts) => { - for part in parts.iter() { - match part { - StringWithMacrosPart::String(_) => {} - StringWithMacrosPart::Macro(_, m) => { - m.traverse(traversal)?; - } - } - } - } - } - - Ok(()) - } -} - -#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] -pub enum StringWithMacrosPart { - String(ArcStr), - Macro(/* write_to_file */ bool, MacroBase

    ), -} - -assert_eq_size!(MacroBase, [usize; 3]); -assert_eq_size!(StringWithMacrosPart, [usize; 4]); - -#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] -pub struct UnrecognizedMacro { - pub macro_type: Box, - pub args: Box<[String]>, -} - -#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] -pub enum MacroBase { - Location(P), - /// Represents both $(exe) and $(exe_target) usages. - Exe { - label: P, - exec_dep: bool, - }, - /// A user-defined make variable (like `$(CXX)`). This will be resolved based on the propagated TemplateVariableInfos. - UserUnkeyedPlaceholder(Box), - - /// A user-defined macro (like `$(cxxppflags //some:target)`). This will be resolved based on the propagated TemplateVariableInfos. - UserKeyedPlaceholder(Box<(Box, P, Option>)>), - - Query(Box>), - - /// Right now, we defer error for unrecognized macros to the place where they are used. This just allows - /// us to progress further into a build and detect more issues. Once we have all (or most) of the buckv1 macros - /// recognized we'll remove this and make it an early error. - UnrecognizedMacro(Box), -} - -impl MacroBase { - pub fn traverse<'a>( - &'a self, - traversal: &mut dyn ConfiguredAttrTraversal, - ) -> anyhow::Result<()> { - // macros can't reference repo inputs (they only reference the outputs of other targets) - match self { - MacroBase::Location(l) | MacroBase::UserKeyedPlaceholder(box (_, l, _)) => { - traversal.dep(l) - } - MacroBase::Exe { - label, - exec_dep: true, - } => traversal.exec_dep(label), - MacroBase::Exe { - label, - exec_dep: false, - } => traversal.dep(label), - MacroBase::Query(query_macro) => query_macro.traverse(traversal), - MacroBase::UserUnkeyedPlaceholder(_) | MacroBase::UnrecognizedMacro(..) => Ok(()), - } - } -} - -impl MacroBase { - pub fn configure(&self, ctx: &dyn AttrConfigurationContext) -> anyhow::Result { - Ok(match self { - UnconfiguredMacro::Location(target) => { - ConfiguredMacro::Location(ctx.configure_target(target)) - } - UnconfiguredMacro::Exe { label, exec_dep } => ConfiguredMacro::Exe { - label: if *exec_dep { - ctx.configure_exec_target(label) - } else { - ctx.configure_target(label) - }, - exec_dep: *exec_dep, - }, - UnconfiguredMacro::UserUnkeyedPlaceholder(var_name) => { - ConfiguredMacro::UserUnkeyedPlaceholder(var_name.clone()) - } - UnconfiguredMacro::UserKeyedPlaceholder(box (var_name, target, arg)) => { - ConfiguredMacro::UserKeyedPlaceholder(Box::new(( - var_name.clone(), - ctx.configure_target(target), - arg.clone(), - ))) - } - UnconfiguredMacro::Query(query) => { - ConfiguredMacro::Query(Box::new(query.configure(ctx)?)) - } - UnconfiguredMacro::UnrecognizedMacro(macr) => { - ConfiguredMacro::UnrecognizedMacro(macr.clone()) - } - }) - } - - pub fn traverse<'a>( - &'a self, - traversal: &mut dyn CoercedAttrTraversal<'a>, - ) -> anyhow::Result<()> { - match self { - MacroBase::Location(l) | MacroBase::UserKeyedPlaceholder(box (_, l, _)) => { - traversal.dep(l.target()) - } - MacroBase::Exe { - label, - exec_dep: true, - } => traversal.exec_dep(label.target()), - MacroBase::Exe { - label, - exec_dep: false, - } => traversal.dep(label.target()), - MacroBase::Query(query) => query.traverse(traversal), - MacroBase::UserUnkeyedPlaceholder(_) | MacroBase::UnrecognizedMacro(..) => Ok(()), - } - } -} - -// These type aliases are just a little bit easier to use, the differentiating thing comes -// right at the beginning instead of at the end in a type param. -pub type UnconfiguredMacro = MacroBase; -pub type ConfiguredMacro = MacroBase; - -pub type UnconfiguredStringWithMacrosPart = StringWithMacrosPart; -pub type ConfiguredStringWithMacrosPart = StringWithMacrosPart; - -pub type UnconfiguredStringWithMacros = StringWithMacros; - -#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative, Display)] -#[display(fmt = "{}", string_with_macros)] -pub struct ConfiguredStringWithMacros { - pub string_with_macros: StringWithMacros, - pub anon_target_compatible: bool, -} - -/// Display attempts to approximately reproduce the string that created a macro. -impl Display for MacroBase

    { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - // TODO: this should re-escape values in the args that need to be escaped to have returned that arg (it's not possible - // to tell where there were unnecessary escapes and it's not worth tracking that). - match self { - MacroBase::Location(l) => write!(f, "location {}", l), - MacroBase::Exe { label, exec_dep } => { - write!( - f, - "{} {}", - if *exec_dep { "exe" } else { "exe_target" }, - label - ) - } - MacroBase::Query(query) => Display::fmt(query, f), - MacroBase::UserUnkeyedPlaceholder(var) => write!(f, "{}", var), - MacroBase::UserKeyedPlaceholder(box (macro_type, target, arg)) => write!( - f, - "{} {}{}", - macro_type, - target, - if let Some(arg) = arg { - format!(" {}", arg) - } else { - "".to_owned() - } - ), - MacroBase::UnrecognizedMacro(box UnrecognizedMacro { macro_type, args }) => { - write!(f, "({}) {}", macro_type, args.join(" ")) - } - } - } -} - -impl Display for StringWithMacrosPart

    { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - StringWithMacrosPart::String(s) => write!(f, "{}", s), - StringWithMacrosPart::Macro(write_to_file, m) => { - write!(f, "$({}{})", if *write_to_file { "@" } else { "" }, m) - } - } - } -} - -impl StringWithMacrosPart { - pub(crate) fn configure( - &self, - ctx: &dyn AttrConfigurationContext, - ) -> anyhow::Result { - match self { - StringWithMacrosPart::String(val) => { - Ok(ConfiguredStringWithMacrosPart::String(val.clone())) - } - StringWithMacrosPart::Macro(write_to_file, unconfigured) => Ok( - ConfiguredStringWithMacrosPart::Macro(*write_to_file, unconfigured.configure(ctx)?), - ), - } - } -} - -impl Display for StringWithMacros

    { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::StringPart(part) => { - write!(f, "{}", part)?; - } - Self::ManyParts(ref parts) => { - for part in parts.iter() { - write!(f, "{}", part)?; - } - } - } - - Ok(()) - } -} - -/// Represents the type of a query placeholder (e.g. query_outputs, query_targets, query_targets_and_outputs). -#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] -pub enum QueryExpansion { - Output, - Target, - /// Holds the separator used between a target and its output. If not provided, they will be space-separated. - TargetAndOutput(Option), -} - -impl Display for QueryExpansion { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - QueryExpansion::Output => f.write_str("query_outputs ")?, - QueryExpansion::Target => f.write_str("query_targets ")?, - QueryExpansion::TargetAndOutput(Some(separator)) => { - write!(f, "query_targets_and_outputs '{}' ", separator)?; - } - QueryExpansion::TargetAndOutput(None) => f.write_str("query_targets_and_outputs ")?, - }; - - Ok(()) - } -} diff --git a/app/buck2_node/src/attrs/attr_type/arg/parser.rs b/app/buck2_node/src/attrs/attr_type/arg/parser.rs index 78c08157c358d..5e812bedc2dd4 100644 --- a/app/buck2_node/src/attrs/attr_type/arg/parser.rs +++ b/app/buck2_node/src/attrs/attr_type/arg/parser.rs @@ -49,11 +49,12 @@ //! unquoted arg has seen more `(` than `)`, it will not be terminated by whitespace or `)`. //! //! We diverge from buckv1 in a handful of known ways. +//! //! 1. buck1 allows pretty much any characters to appear in a macro type. We restrict it to alphanumeric and `_`. -//! 2. buck1 disallows spaces entirely within unquoted args. This can be surprising. Unquoted args are generally used for -//! the query part of query macros, and in other contexts where buck accepts queries it allows whitespace. -//! Example, the string "$(query_outputs deps(//some:target, 3))" would be rejected by buck1 due to the space before the 3. //! +//! 2. buck1 disallows spaces entirely within unquoted args. This can be surprising. Unquoted args are generally used for +//! the query part of query macros, and in other contexts where buck accepts queries it allows whitespace. +//! Example, the string "$(query_outputs deps(//some:target, 3))" would be rejected by buck1 due to the space before the 3. //! //! Some examples: //! @@ -70,8 +71,6 @@ // character. use std::result; -use thiserror::Error; - #[derive(Debug, PartialEq)] pub struct ParsedMacro { /// Indicates that the value of the macro should be written to a file and the command should be passed `@` where @@ -101,7 +100,7 @@ impl ParsedArg { /// Parsed a string into a structure with macros and their types and args identified. pub fn parse_macros(input: &str) -> anyhow::Result { match read(input) { - Ok((remaining, value)) => { + Ok((value, remaining)) => { assert!( remaining.is_empty(), "somehow had remaining stuff after a successful macro parse. Had `{}` remaining.", @@ -133,7 +132,8 @@ pub fn parse_macros(input: &str) -> anyhow::Result { } } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] enum ArgParseError { #[error("Unfinished quoted arg, expected a `{0}`")] UnfinishedQuotedArg(char), @@ -154,7 +154,7 @@ enum ArgParseError { type Error<'a> = (&'a str, ArgParseError); /// A Result includes both some parsed type and a slice of what remains to be parsed. -type Result<'a, T> = result::Result<(&'a str, T), Error<'a>>; +type Result<'a, T> = result::Result<(T, &'a str), Error<'a>>; // We diverge slightly from buckv1 here. // @@ -228,9 +228,9 @@ fn read_unquoted_arg(input: &str) -> Result { let (arg, rest) = (&input[0..pos], &input[pos..]); // The common case, by far, is that nothing is escaped. if !has_escapes { - Ok((rest, arg.to_owned())) + Ok((arg.to_owned(), rest)) } else { - Ok((rest, unescape(arg))) + Ok((unescape(arg), rest)) } } } @@ -261,9 +261,9 @@ fn read_quoted_arg(input: &str, quote: char) -> Result { let (arg, rest) = (&input[0..pos], &input[(pos + 1)..]); // The common case, by far, is that nothing is escaped. if !has_escapes { - Ok((rest, arg.to_owned())) + Ok((arg.to_owned(), rest)) } else { - Ok((rest, unescape(arg))) + Ok((unescape(arg), rest)) } } } @@ -291,7 +291,7 @@ fn read_macro_type(input: &str) -> Result { { Err((&input[pos..], ArgParseError::MacroTypeInvalidChar)) } else { - Ok((&input[pos..], macro_type.to_owned())) + Ok((macro_type.to_owned(), &input[pos..])) } } } @@ -310,23 +310,23 @@ fn read_macro(input: &str) -> Result { Some(working) => (true, working), None => (false, working), }; - let (working, macro_type) = read_macro_type(working)?; + let (macro_type, working) = read_macro_type(working)?; let mut working = consume_whitespace(working); let mut args = Vec::new(); while let Some(c) = working.chars().next() { if c == ')' { return Ok(( - &working[1..], ParsedMacro { write_to_file, macro_type, args, }, + &working[1..], )); } - let (remaining, arg) = read_macro_arg(working)?; + let (arg, remaining) = read_macro_arg(working)?; working = consume_whitespace(remaining); args.push(arg); } @@ -339,7 +339,17 @@ fn read_literal_opt(input: &str) -> Result>> { // even number of preceding `\` (as the first of each pair escapes the second). If there's // an odd number of preceding `\`, one of them should be removed. - let mut char_indices = input.char_indices(); + // Fast check that there are no macro refs in the string, which is the common case. + // We can do better than `memchr` (given our strings are short), but not much. + match memchr::memchr2(b'$', b'\\', input.as_bytes()) { + None => Ok((Some(input.into()), "")), + Some(pos) => read_literal_opt_slow(input, pos), + } +} + +fn read_literal_opt_slow(input: &str, pos: usize) -> Result>> { + let mut char_indices = input.bytes().enumerate().skip(pos); + let mut indices_to_drop = Vec::new(); enum State { Searching, @@ -357,28 +367,28 @@ fn read_literal_opt(input: &str) -> Result>> { let mut pos = char_indices.next(); while let Some((idx, c)) = pos { state = match (state, c) { - (Searching, '\\') => Escaped, - (Dollar, '\\') => Escaped, - (EscapedDollar, '\\') => Escaped, + (Searching, b'\\') => Escaped, + (Dollar, b'\\') => Escaped, + (EscapedDollar, b'\\') => Escaped, - (Escaped, '$') => EscapedDollar, + (Escaped, b'$') => EscapedDollar, (Escaped, _) => Searching, - (Searching, '$') => Dollar, + (Searching, b'$') => Dollar, (Searching, _) => Searching, - (Dollar, '(') => { + (Dollar, b'(') => { // found a macro break; } - (EscapedDollar, '(') => { + (EscapedDollar, b'(') => { // Indicates we hit the sequence `\$(` and we need to drop the `\` indices_to_drop.push(idx - 2); EscapedMacro(1) } - (EscapedMacro(1), ')') => Searching, - (EscapedMacro(n), ')') => EscapedMacro(n - 1), - (EscapedMacro(n), '(') => EscapedMacro(n + 1), + (EscapedMacro(1), b')') => Searching, + (EscapedMacro(n), b')') => EscapedMacro(n - 1), + (EscapedMacro(n), b'(') => EscapedMacro(n + 1), (EscapedMacro(n), _) => EscapedMacro(n), // Note that '(' and '\' is handled for both of thes above. (Dollar, _) => Searching, @@ -402,20 +412,20 @@ fn read_literal_opt(input: &str) -> Result>> { literal.push_str(&input[(indices_to_drop.last().unwrap() + 1)..literal_end]); literal }; - Ok((&input[literal_end..], Some(literal.into_boxed_str()))) + Ok((Some(literal.into_boxed_str()), &input[literal_end..])) } else { - Ok((input, None)) + Ok((None, input)) } } fn read(input: &str) -> Result { - let (remaining, literal) = read_literal_opt(input)?; + let (literal, remaining) = read_literal_opt(input)?; let mut working = remaining; if working.is_empty() { return Ok(( - "", ParsedArg(vec![ArgItem::String(literal.unwrap_or_else(|| "".into()))]), + "", )); } @@ -425,7 +435,7 @@ fn read(input: &str) -> Result { } while !working.is_empty() { - let (remaining, literal) = read_literal_opt(working)?; + let (literal, remaining) = read_literal_opt(working)?; working = remaining; if let Some(literal) = literal { complex.push(ArgItem::String(literal)); @@ -433,13 +443,13 @@ fn read(input: &str) -> Result { if !working.is_empty() { // we must be at the beginning of a macro. - let (remaining, parsed_macro) = read_macro(working)?; + let (parsed_macro, remaining) = read_macro(working)?; working = remaining; complex.push(ArgItem::Macro(parsed_macro)); } } - Ok(("", ParsedArg(complex))) + Ok((ParsedArg(complex), "")) } #[cfg(test)] @@ -464,15 +474,15 @@ mod tests { #[test] fn test_unquoted() -> result::Result<(), OwnedError> { - assert_eq!(read_macro_arg("abcd ")?, (" ", "abcd".to_owned())); - assert_eq!(read_macro_arg("abcd)")?, (")", "abcd".to_owned())); + assert_eq!(read_macro_arg("abcd ")?, ("abcd".to_owned(), " ")); + assert_eq!(read_macro_arg("abcd)")?, ("abcd".to_owned(), ")")); assert_eq!( read_macro_arg("deps(//some:target))")?, - (")", "deps(//some:target)".to_owned()) + ("deps(//some:target)".to_owned(), ")") ); assert_eq!( read_macro_arg("deps(//some:target, 3, first_order_deps()) ")?, - (" ", "deps(//some:target, 3, first_order_deps())".to_owned()) + ("deps(//some:target, 3, first_order_deps())".to_owned(), " ") ); Ok(()) @@ -480,11 +490,11 @@ mod tests { #[test] fn test_quoted() -> result::Result<(), OwnedError> { - assert_eq!(read_macro_arg("' abcd )'")?, ("", " abcd )".to_owned())); - assert_eq!(read_macro_arg("' ab%cd )'")?, ("", " ab%cd )".to_owned())); + assert_eq!(read_macro_arg("' abcd )'")?, (" abcd )".to_owned(), "")); + assert_eq!(read_macro_arg("' ab%cd )'")?, (" ab%cd )".to_owned(), "")); assert_eq!( read_macro_arg(r#"" \"\$\\ ")"#)?, - (")", r#" "$\ "#.to_owned()) + (r#" "$\ "#.to_owned(), ")") ); Ok(()) @@ -492,11 +502,11 @@ mod tests { #[test] fn test_macro_type() -> result::Result<(), OwnedError> { - assert_eq!(read_macro_type("name ")?, (" ", "name".to_owned())); - assert_eq!(read_macro_type("name)")?, (")", "name".to_owned())); + assert_eq!(read_macro_type("name ")?, ("name".to_owned(), " ")); + assert_eq!(read_macro_type("name)")?, ("name".to_owned(), ")")); assert_eq!( read_macro_type("platform-name ")?, - (" ", "platform-name".to_owned()) + ("platform-name".to_owned(), " ") ); assert!(read_macro_type("platform%name ").is_err()); assert!(read_macro_type("platform$name ").is_err()); @@ -505,6 +515,11 @@ mod tests { #[test] fn test_parse_macros() -> anyhow::Result<()> { + assert_eq!( + ParsedArg(vec![ArgItem::String("contains no macros".into())]), + parse_macros(r#"contains no macros"#)? + ); + assert_eq!( ParsedArg(vec![ArgItem::String("contains no $(macros)".into())]), parse_macros(r#"contains no \$(macros)"#)? diff --git a/app/buck2_node/src/attrs/attr_type/attr_config.rs b/app/buck2_node/src/attrs/attr_type/attr_config.rs index 8b501bfe615fe..cd1e2055036e1 100644 --- a/app/buck2_node/src/attrs/attr_type/attr_config.rs +++ b/app/buck2_node/src/attrs/attr_type/attr_config.rs @@ -9,7 +9,7 @@ use std::fmt::Display; -use buck2_core::buck_path::path::BuckPathRef; +use buck2_core::package::source_path::SourcePathRef; use dupe::Dupe; use either::Either; use serde_json::to_value; @@ -39,14 +39,15 @@ impl ToJsonWithContext for ConfiguredAttr { ConfiguredAttr::ExplicitConfiguredDep(e) => e.to_json(), ConfiguredAttr::SplitTransitionDep(e) => e.to_json(), ConfiguredAttr::ConfigurationDep(e) => Ok(to_value(e.to_string())?), - ConfiguredAttr::PluginDep(e) => Ok(to_value(e.0.to_string())?), + ConfiguredAttr::PluginDep(e, _) => Ok(to_value(e.to_string())?), ConfiguredAttr::Dep(e) => Ok(to_value(e.to_string())?), ConfiguredAttr::SourceLabel(e) => Ok(to_value(e.to_string())?), ConfiguredAttr::Label(e) => Ok(to_value(e.to_string())?), ConfiguredAttr::Arg(e) => Ok(to_value(e.to_string())?), - ConfiguredAttr::Query(e) => Ok(to_value(e.query())?), + ConfiguredAttr::Query(e) => Ok(to_value(&e.query.query)?), ConfiguredAttr::SourceFile(e) => Ok(to_value(source_file_display(ctx, e).to_string())?), ConfiguredAttr::Metadata(m) => Ok(m.to_value()), + ConfiguredAttr::TargetModifiers(m) => Ok(m.to_value()), } } } @@ -67,14 +68,15 @@ impl AnyMatches for ConfiguredAttr { ConfiguredAttr::ExplicitConfiguredDep(e) => e.any_matches(filter), ConfiguredAttr::SplitTransitionDep(e) => e.any_matches(filter), ConfiguredAttr::ConfigurationDep(e) => filter(&e.to_string()), - ConfiguredAttr::PluginDep(e) => filter(&e.0.to_string()), + ConfiguredAttr::PluginDep(e, _) => filter(&e.to_string()), ConfiguredAttr::Dep(e) => filter(&e.to_string()), ConfiguredAttr::SourceLabel(e) => filter(&e.to_string()), ConfiguredAttr::Label(e) => filter(&e.to_string()), ConfiguredAttr::Arg(e) => filter(&e.to_string()), - ConfiguredAttr::Query(e) => filter(e.query()), + ConfiguredAttr::Query(e) => filter(&e.query.query), ConfiguredAttr::SourceFile(e) => filter(&e.path().to_string()), ConfiguredAttr::Metadata(e) => e.any_matches(filter), + ConfiguredAttr::TargetModifiers(e) => e.any_matches(filter), } } } @@ -96,7 +98,7 @@ pub(crate) fn source_file_display<'a>( source_file: &'a CoercedPath, ) -> impl Display + 'a { match &ctx.package { - Some(pkg) => Either::Left(BuckPathRef::new(pkg.dupe(), source_file.path())), + Some(pkg) => Either::Left(SourcePathRef::new(pkg.dupe(), source_file.path())), None => { // This code is unreachable, but better this than panic. Either::Right(format!("/{}", source_file.path())) diff --git a/app/buck2_node/src/attrs/attr_type/configuration_dep.rs b/app/buck2_node/src/attrs/attr_type/configuration_dep.rs index 61ae3baebfa85..961d58bddd33f 100644 --- a/app/buck2_node/src/attrs/attr_type/configuration_dep.rs +++ b/app/buck2_node/src/attrs/attr_type/configuration_dep.rs @@ -8,11 +8,11 @@ */ use allocative::Allocative; -use buck2_core::target::label::TargetLabel; use dupe::Dupe; use crate::attrs::configuration_context::AttrConfigurationContext; use crate::attrs::configured_attr::ConfiguredAttr; +use crate::configuration::resolved::ConfigurationSettingKey; /// A configuration dep attribute accepts a target as a value. This is different from /// a dep in that the values themselves never undergo configuration and appear as bare @@ -32,8 +32,8 @@ pub struct ConfigurationDepAttrType; impl ConfigurationDepAttrType { pub(crate) fn configure( _ctx: &dyn AttrConfigurationContext, - label: &TargetLabel, + label: &ConfigurationSettingKey, ) -> anyhow::Result { - Ok(ConfiguredAttr::ConfigurationDep(Box::new(label.dupe()))) + Ok(ConfiguredAttr::ConfigurationDep(label.dupe())) } } diff --git a/app/buck2_node/src/attrs/attr_type/configured_dep.rs b/app/buck2_node/src/attrs/attr_type/configured_dep.rs index 6f454bab7ce67..f1d0d37f5e212 100644 --- a/app/buck2_node/src/attrs/attr_type/configured_dep.rs +++ b/app/buck2_node/src/attrs/attr_type/configured_dep.rs @@ -10,7 +10,7 @@ use allocative::Allocative; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use dupe::Dupe; use crate::attrs::attr_type::dep::ExplicitConfiguredDepMaybeConfigured; @@ -50,7 +50,7 @@ impl ExplicitConfiguredDepAttrType { /// Represents the value of an `attrs.configured_dep()` /// in its unconfigured form. #[derive(derive_more::Display, Debug, Hash, PartialEq, Eq, Clone, Allocative)] -#[display(fmt = "({}, {})", label, platform)] +#[display("({}, {})", label, platform)] pub struct UnconfiguredExplicitConfiguredDep { pub attr_type: ExplicitConfiguredDepAttrType, pub label: ProvidersLabel, @@ -60,7 +60,7 @@ pub struct UnconfiguredExplicitConfiguredDep { /// Represents the value of an `attrs.configured_dep()` /// in its configured form. #[derive(derive_more::Display, Hash, PartialEq, Eq, Debug, Clone, Allocative)] -#[display(fmt = "{}", label)] +#[display("{}", label)] pub struct ConfiguredExplicitConfiguredDep { pub attr_type: ExplicitConfiguredDepAttrType, pub label: ConfiguredProvidersLabel, diff --git a/app/buck2_node/src/attrs/attr_type/dep.rs b/app/buck2_node/src/attrs/attr_type/dep.rs index 57ebf7334d4f7..2095891c620c9 100644 --- a/app/buck2_node/src/attrs/attr_type/dep.rs +++ b/app/buck2_node/src/attrs/attr_type/dep.rs @@ -27,10 +27,6 @@ use crate::attrs::configured_traversal::ConfiguredAttrTraversal; use crate::attrs::traversal::CoercedAttrTraversal; use crate::provider_id_set::ProviderIdSet; -// Just a placeholder for what a label should resolve to. -#[derive(Debug)] -pub struct DefaultProvider {} - /// How configuration is changed when configuring a dep. #[derive(Debug, Eq, PartialEq, Hash, Clone, Dupe, Allocative)] pub enum DepAttrTransition { @@ -118,7 +114,7 @@ impl DepAttrType { ) -> anyhow::Result { let configured_label = match &self.transition { DepAttrTransition::Identity(..) => ctx.configure_target(label), - DepAttrTransition::Exec => ctx.configure_exec_target(label), + DepAttrTransition::Exec => ctx.configure_exec_target(label)?, DepAttrTransition::Toolchain => ctx.configure_toolchain_target(label), DepAttrTransition::Transition(tr) => ctx.configure_transition_target(label, tr)?, }; diff --git a/app/buck2_node/src/attrs/attr_type/dict.rs b/app/buck2_node/src/attrs/attr_type/dict.rs index ae6a94d17e4f0..bc6a7b9074954 100644 --- a/app/buck2_node/src/attrs/attr_type/dict.rs +++ b/app/buck2_node/src/attrs/attr_type/dict.rs @@ -13,6 +13,7 @@ use std::ops::Deref; use allocative::Allocative; use buck2_util::arc_str::ArcSlice; +use display_container::fmt_keyed_container; use serde_json::Value; use crate::attrs::attr_type::any_matches::AnyMatches; @@ -59,15 +60,15 @@ impl Deref for DictLiteral { impl AttrDisplayWithContext for DictLiteral { fn fmt(&self, ctx: &AttrFmtContext, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{{")?; - for (i, (k, v)) in self.0.iter().enumerate() { - if i != 0 { - write!(f, ",")?; - } - write!(f, "{}: {}", k.as_display(ctx), v.as_display(ctx))?; - } - write!(f, "}}")?; - Ok(()) + fmt_keyed_container( + f, + "{", + "}", + ": ", + self.0 + .iter() + .map(|(k, v)| (k.as_display(ctx), v.as_display(ctx))), + ) } } diff --git a/app/buck2_node/src/attrs/attr_type/enumeration.rs b/app/buck2_node/src/attrs/attr_type/enumeration.rs index 01979248e80cd..7567768542982 100644 --- a/app/buck2_node/src/attrs/attr_type/enumeration.rs +++ b/app/buck2_node/src/attrs/attr_type/enumeration.rs @@ -19,7 +19,7 @@ pub struct EnumAttrType { pub variants: OrderedSet, } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum EnumAttrError { #[error("enum.attr() variant names must all be lowercase, got `{0}`")] NotLowercase(String), diff --git a/app/buck2_node/src/attrs/attr_type/label.rs b/app/buck2_node/src/attrs/attr_type/label.rs index ccb9f8ea2b907..6c97a391c13cb 100644 --- a/app/buck2_node/src/attrs/attr_type/label.rs +++ b/app/buck2_node/src/attrs/attr_type/label.rs @@ -22,6 +22,6 @@ impl LabelAttrType { ctx: &dyn AttrConfigurationContext, label: &ProvidersLabel, ) -> anyhow::Result { - Ok(ConfiguredAttr::Label(Box::new(ctx.configure_target(label)))) + Ok(ConfiguredAttr::Label(ctx.configure_target(label))) } } diff --git a/app/buck2_node/src/attrs/attr_type/list.rs b/app/buck2_node/src/attrs/attr_type/list.rs index fed71f55a93e9..222eb7e391ea3 100644 --- a/app/buck2_node/src/attrs/attr_type/list.rs +++ b/app/buck2_node/src/attrs/attr_type/list.rs @@ -13,6 +13,7 @@ use std::ops::Deref; use allocative::Allocative; use buck2_util::arc_str::ArcSlice; +use display_container::fmt_container; use gazebo::prelude::SliceExt; use serde_json::to_value; use serde_json::Value; @@ -20,6 +21,7 @@ use serde_json::Value; use crate::attrs::attr_type::any_matches::AnyMatches; use crate::attrs::attr_type::AttrType; use crate::attrs::display::AttrDisplayWithContext; +use crate::attrs::display::AttrDisplayWithContextExt; use crate::attrs::fmt_context::AttrFmtContext; use crate::attrs::json::ToJsonWithContext; @@ -51,15 +53,7 @@ impl Deref for ListLiteral { impl AttrDisplayWithContext for ListLiteral { fn fmt(&self, ctx: &AttrFmtContext, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "[")?; - for (i, v) in self.0.iter().enumerate() { - if i != 0 { - write!(f, ",")?; - } - AttrDisplayWithContext::fmt(v, ctx, f)?; - } - write!(f, "]")?; - Ok(()) + fmt_container(f, "[", "]", self.0.iter().map(|v| v.as_display(ctx))) } } diff --git a/app/buck2_node/src/attrs/attr_type/mod.rs b/app/buck2_node/src/attrs/attr_type/mod.rs deleted file mode 100644 index 89013f40ceb57..0000000000000 --- a/app/buck2_node/src/attrs/attr_type/mod.rs +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt; -use std::fmt::Display; -use std::sync::Arc; - -use allocative::Allocative; -use buck2_core::configuration::transition::id::TransitionId; -use buck2_core::plugins::PluginKind; -use buck2_core::plugins::PluginKindSet; -use dupe::Dupe; - -use crate::attrs::attr_type::any::AnyAttrType; -use crate::attrs::attr_type::arg::ArgAttrType; -use crate::attrs::attr_type::bool::BoolAttrType; -use crate::attrs::attr_type::configuration_dep::ConfigurationDepAttrType; -use crate::attrs::attr_type::configured_dep::ExplicitConfiguredDepAttrType; -use crate::attrs::attr_type::dep::DepAttrTransition; -use crate::attrs::attr_type::dep::DepAttrType; -use crate::attrs::attr_type::dict::DictAttrType; -use crate::attrs::attr_type::enumeration::EnumAttrType; -use crate::attrs::attr_type::int::IntAttrType; -use crate::attrs::attr_type::label::LabelAttrType; -use crate::attrs::attr_type::list::ListAttrType; -use crate::attrs::attr_type::metadata::MetadataAttrType; -use crate::attrs::attr_type::one_of::OneOfAttrType; -use crate::attrs::attr_type::option::OptionAttrType; -use crate::attrs::attr_type::plugin_dep::PluginDepAttrType; -use crate::attrs::attr_type::query::QueryAttrType; -use crate::attrs::attr_type::source::SourceAttrType; -use crate::attrs::attr_type::split_transition_dep::SplitTransitionDepAttrType; -use crate::attrs::attr_type::string::StringAttrType; -use crate::attrs::attr_type::tuple::TupleAttrType; -use crate::attrs::attr_type::visibility::VisibilityAttrType; -use crate::attrs::attr_type::within_view::WithinViewAttrType; -use crate::provider_id_set::ProviderIdSet; - -pub mod any; -pub mod any_matches; -pub mod arg; -pub mod attr_config; -pub mod attr_like; -pub mod bool; -pub mod configuration_dep; -pub mod configured_dep; -pub mod default_only; -pub mod dep; -pub mod dict; -pub mod enumeration; -pub mod int; -pub mod label; -pub mod list; -pub mod metadata; -pub mod one_of; -pub mod option; -pub mod plugin_dep; -pub mod query; -pub mod source; -pub mod split_transition_dep; -pub mod string; -pub mod tuple; -pub mod visibility; -pub mod within_view; - -#[derive(Clone, Dupe, Debug, Hash, Eq, PartialEq, Allocative)] -pub struct AttrType(pub Arc); - -#[derive(Debug, Hash, Eq, PartialEq, Allocative)] -pub enum AttrTypeInner { - Any(AnyAttrType), - Arg(ArgAttrType), - ConfigurationDep(ConfigurationDepAttrType), - ConfiguredDep(ExplicitConfiguredDepAttrType), - Bool(BoolAttrType), - Int(IntAttrType), - Dep(DepAttrType), - Dict(DictAttrType), - List(ListAttrType), - Tuple(TupleAttrType), - OneOf(OneOfAttrType), - Option(OptionAttrType), - PluginDep(PluginDepAttrType), - Query(QueryAttrType), - Source(SourceAttrType), - SplitTransitionDep(SplitTransitionDepAttrType), - String(StringAttrType), - Enum(EnumAttrType), - Label(LabelAttrType), - Visibility(VisibilityAttrType), - WithinView(WithinViewAttrType), - Metadata(MetadataAttrType), -} - -impl AttrType { - pub fn fmt_with_default( - &self, - f: &mut fmt::Formatter<'_>, - default: Option<&str>, - ) -> fmt::Result { - let mut attr = |s| match default { - None => write!(f, "attrs.{}()", s), - Some(default) => write!(f, "attrs.{}(default={})", s, default), - }; - let arg = || match default { - None => String::new(), - Some(x) => format!(", default={}", x), - }; - - match &*self.0 { - AttrTypeInner::Any(_) => attr("any"), - AttrTypeInner::Arg(_) => attr("arg"), - AttrTypeInner::ConfigurationDep(_) => attr("configuration_dep"), - AttrTypeInner::ConfiguredDep(_) => attr("configured_dep"), - AttrTypeInner::PluginDep(_) => attr("plugin_dep"), - AttrTypeInner::Bool(_) => attr("bool"), - AttrTypeInner::Int(_) => attr("int"), - AttrTypeInner::Dep(_) => attr("dep"), - AttrTypeInner::Query(_) => attr("query"), - AttrTypeInner::Dict(x) => x.fmt_with_arg(f, &arg()), - AttrTypeInner::List(x) => x.fmt_with_arg(f, &arg()), - AttrTypeInner::Tuple(x) => x.fmt_with_arg(f, &arg()), - AttrTypeInner::OneOf(x) => x.fmt_with_arg(f, &arg()), - AttrTypeInner::Option(x) => x.fmt_with_arg(f, &arg()), - AttrTypeInner::Enum(x) => x.fmt_with_arg(f, &arg()), - AttrTypeInner::Source(_) => attr("source"), - AttrTypeInner::SplitTransitionDep(_) => attr("split_transition_dep"), - AttrTypeInner::String(_) => attr("string"), - AttrTypeInner::Label(_) => attr("label"), - AttrTypeInner::Visibility(_) => attr("visibility"), - AttrTypeInner::WithinView(_) => attr("within_view"), - AttrTypeInner::Metadata(_) => attr("metadata"), - } - } - - pub fn any() -> Self { - Self(Arc::new(AttrTypeInner::Any(AnyAttrType))) - } - - /// An arg attribute. Args are similar to strings, but have built in support - /// for string parameter macros and make variables. Command line - /// builders used in rule implementations use args (and so an arg attribute - /// can be directly added to them). - /// - /// Takes in an anon_target_compatible flag, which indicates whether the arg - /// can be passed into anon targets. There is a slight memory hit when using - /// this flag. - pub fn arg(anon_target_compatible: bool) -> Self { - Self(Arc::new(AttrTypeInner::Arg(ArgAttrType { - anon_target_compatible, - }))) - } - - pub fn enumeration(variants: Vec) -> anyhow::Result { - Ok(Self(Arc::new(AttrTypeInner::Enum(EnumAttrType::new( - variants, - )?)))) - } - - pub fn bool() -> Self { - Self(Arc::new(AttrTypeInner::Bool(BoolAttrType))) - } - - pub fn int() -> Self { - Self(Arc::new(AttrTypeInner::Int(IntAttrType))) - } - - pub fn configuration_dep() -> Self { - Self(Arc::new(AttrTypeInner::ConfigurationDep( - ConfigurationDepAttrType, - ))) - } - - /// A TargetLabel attribute optionally with a specific provider/providers - /// that are expected. - /// - /// If `required_providers` is non-empty, the dependency must return those providers - /// from its implementation function. Otherwise an error will result at resolution time. - pub fn dep(required_providers: ProviderIdSet, plugin_kinds: PluginKindSet) -> Self { - Self(Arc::new(AttrTypeInner::Dep(DepAttrType::new( - required_providers, - DepAttrTransition::Identity(plugin_kinds), - )))) - } - - /// An execution dependency attribute optionally with a specific provider/providers - /// that are expected. - /// - /// If `required_providers` is non-empty, the dependency must return those providers - /// from its implementation function. Otherwise an error will result at resolution time. - pub fn exec_dep(required_providers: ProviderIdSet) -> Self { - Self(Arc::new(AttrTypeInner::Dep(DepAttrType::new( - required_providers, - DepAttrTransition::Exec, - )))) - } - - /// A toolchain dependency attribute optionally with a specific provider/providers - /// that are expected. - /// - /// If `required_providers` is non-empty, the dependency must return those providers - /// from its implementation function. Otherwise an error will result at resolution time. - pub fn toolchain_dep(required_providers: ProviderIdSet) -> Self { - Self(Arc::new(AttrTypeInner::Dep(DepAttrType::new( - required_providers, - DepAttrTransition::Toolchain, - )))) - } - - /// An a dependency attribute which changes the configuration optionally with a specific - /// provider/providers that are expected. - /// - /// If `required_providers` is non-empty, the dependency must return those providers - /// from its implementation function. Otherwise an error will result at resolution time. - pub fn transition_dep(required_providers: ProviderIdSet, cfg: Arc) -> Self { - Self(Arc::new(AttrTypeInner::Dep(DepAttrType::new( - required_providers, - DepAttrTransition::Transition(cfg), - )))) - } - - pub fn configured_dep(required_providers: ProviderIdSet) -> Self { - Self(Arc::new(AttrTypeInner::ConfiguredDep( - ExplicitConfiguredDepAttrType { required_providers }, - ))) - } - - pub fn split_transition_dep(required_providers: ProviderIdSet, cfg: Arc) -> Self { - Self(Arc::new(AttrTypeInner::SplitTransitionDep( - SplitTransitionDepAttrType::new(required_providers, cfg), - ))) - } - - pub fn plugin_dep(kind: PluginKind) -> Self { - Self(Arc::new(AttrTypeInner::PluginDep(PluginDepAttrType::new( - kind, - )))) - } - - /// A dict attribute containing keys and values of the specified types. - pub fn dict(key: AttrType, value: AttrType, sorted: bool) -> Self { - Self(Arc::new(AttrTypeInner::Dict(DictAttrType::new( - key, value, sorted, - )))) - } - - /// A list attribute containing items of some inner type. - pub fn list(inner: AttrType) -> Self { - Self(Arc::new(AttrTypeInner::List(ListAttrType::new(inner)))) - } - - pub fn tuple(xs: Vec) -> Self { - Self(Arc::new(AttrTypeInner::Tuple(TupleAttrType::new(xs)))) - } - - pub fn one_of(xs: Vec) -> Self { - Self(Arc::new(AttrTypeInner::OneOf(OneOfAttrType::new(xs)))) - } - - pub fn option(value: AttrType) -> Self { - Self(Arc::new(AttrTypeInner::Option(OptionAttrType::new(value)))) - } - - pub fn query() -> Self { - Self(Arc::new(AttrTypeInner::Query(QueryAttrType::new( - DepAttrType::new( - ProviderIdSet::EMPTY, - DepAttrTransition::Identity(PluginKindSet::EMPTY), - ), - )))) - } - - // A file attribute. This will accept paths or targets like - /// `//some:target[inner]`. When contained within a list, one item may - /// expand to multiple (e.g. an output group or a lazy glob). - pub fn source(allow_directory: bool) -> Self { - Self(Arc::new(AttrTypeInner::Source(SourceAttrType { - allow_directory, - }))) - } - - /// A string attribute. For flags passed to a command, an arg() attr is - /// preferred to support macro and make variable substitution. - pub fn string() -> Self { - Self(Arc::new(AttrTypeInner::String(StringAttrType))) - } - - pub fn label() -> Self { - Self(Arc::new(AttrTypeInner::Label(LabelAttrType))) - } - - pub(crate) fn visibility() -> Self { - Self(Arc::new(AttrTypeInner::Visibility(VisibilityAttrType))) - } - - pub(crate) fn within_view() -> Self { - Self(Arc::new(AttrTypeInner::WithinView(WithinViewAttrType))) - } - - pub(crate) fn metadata() -> Self { - Self(Arc::new(AttrTypeInner::Metadata(MetadataAttrType))) - } - - /// Used when we first detect that concatenation is going to happen for an attr - /// while loading a build file. Returning false here will make us provide an error - /// during the loading phase at the point that the concatenation happens. - /// - /// In some cases, we can't detect that the concattenation isn't allowed at this - /// point and can only provide an error when performing the actual concatenation. - pub fn supports_concat(&self) -> bool { - match &*self.0 { - AttrTypeInner::Bool(_) - | AttrTypeInner::Query(_) - | AttrTypeInner::Source(_) - | AttrTypeInner::ConfigurationDep(_) - | AttrTypeInner::ConfiguredDep(_) - | AttrTypeInner::PluginDep(_) - | AttrTypeInner::Int(_) - | AttrTypeInner::Dep(_) - | AttrTypeInner::Tuple(_) - | AttrTypeInner::SplitTransitionDep(_) - | AttrTypeInner::Label(_) - | AttrTypeInner::Enum(_) - | AttrTypeInner::Visibility(_) - | AttrTypeInner::WithinView(_) - | AttrTypeInner::Metadata(_) => false, - AttrTypeInner::Any(_) - | AttrTypeInner::Arg(_) - | AttrTypeInner::Dict(_) - | AttrTypeInner::List(_) - | AttrTypeInner::String(_) => true, - AttrTypeInner::Option(inner) => inner.inner.supports_concat(), - // Reject if none of the inner types support concat. Mismatched types are rejected later. - AttrTypeInner::OneOf(inner) => inner.any_supports_concat(), - } - } - - /// If type is option, return the element type. - /// This function is needed because we store `Some` of coerced and configured attributes - /// without indication they are `Some`. In other words, `[""]` is coerced and configured - /// identically to both `attrs.list(attrs.string())` - /// and `attrs.option(attrs.list(attrs.string()))`. - pub(crate) fn unwrap_if_option(&self) -> &AttrType { - match &*self.0 { - AttrTypeInner::Option(inner) => &inner.inner, - _ => self, - } - } -} - -/// Invariant: All these displays look like function calls, so follow the pattern `attrs.foo(...)`. -impl Display for AttrType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.fmt_with_default(f, None) - } -} diff --git a/app/buck2_node/src/attrs/attr_type/one_of.rs b/app/buck2_node/src/attrs/attr_type/one_of.rs index 0c3700adf3f40..e4077238683f3 100644 --- a/app/buck2_node/src/attrs/attr_type/one_of.rs +++ b/app/buck2_node/src/attrs/attr_type/one_of.rs @@ -10,15 +10,10 @@ use std::fmt; use allocative::Allocative; +use buck2_error::BuckErrorContext; use crate::attrs::attr_type::AttrType; -#[derive(Debug, thiserror::Error)] -enum OneOfAttrTypeError { - #[error("Oneof index ({0}) out of bounds (internal error)")] - IndexOutOfBounds(u32), -} - #[derive(Debug, Eq, PartialEq, Hash, Allocative)] pub struct OneOfAttrType { pub xs: Vec, @@ -45,8 +40,8 @@ impl OneOfAttrType { } pub(crate) fn get(&self, i: u32) -> anyhow::Result<&AttrType> { - self.xs - .get(i as usize) - .ok_or_else(|| OneOfAttrTypeError::IndexOutOfBounds(i).into()) + self.xs.get(i as usize).with_internal_error_anyhow(|| { + format!("Oneof index ({i}) out of bounds (internal error)") + }) } } diff --git a/app/buck2_node/src/attrs/attr_type/query.rs b/app/buck2_node/src/attrs/attr_type/query.rs index 3444ae0ff141f..dc5bac56b9e93 100644 --- a/app/buck2_node/src/attrs/attr_type/query.rs +++ b/app/buck2_node/src/attrs/attr_type/query.rs @@ -23,6 +23,7 @@ use crate::attrs::configured_traversal::ConfiguredAttrTraversal; use crate::attrs::traversal::CoercedAttrTraversal; use crate::provider_id_set::ProviderIdSet; +/// Attribute type created with `attrs.query(...)`. #[derive(Debug, Eq, PartialEq, Hash, Allocative)] pub struct QueryAttrType { pub inner: DepAttrType, @@ -34,18 +35,13 @@ impl QueryAttrType { } } +/// Attribute value of type `attrs.query(...)`. #[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] pub struct QueryAttr { pub providers: ProviderIdSet, pub query: QueryAttrBase

    , } -impl QueryAttr

    { - pub fn query(&self) -> &str { - self.query.query() - } -} - impl QueryAttr { pub(crate) fn traverse<'a>( &'a self, @@ -74,6 +70,7 @@ impl QueryAttr { } } +/// Query in target node attribute, like `$(query_outputs ...)`. #[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] pub struct QueryMacroBase { pub expansion_type: QueryExpansion, @@ -82,7 +79,7 @@ pub struct QueryMacroBase { impl Display for QueryMacroBase

    { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {}", &self.expansion_type, self.query.query())?; + write!(f, "{} {}", &self.expansion_type, self.query.query)?; Ok(()) } } @@ -115,18 +112,17 @@ impl QueryMacroBase { } } +/// Query in target node. +/// +/// Used in either: +/// * Attribute created with `attrs.query(...)` +/// * Query inside macros like `$(query_targets ...)` #[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] pub struct QueryAttrBase { pub query: String, pub resolved_literals: ResolvedQueryLiterals

    , } -impl QueryAttrBase

    { - pub fn query(&self) -> &str { - &self.query - } -} - #[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative)] pub struct ResolvedQueryLiterals(pub BTreeMap); @@ -139,7 +135,7 @@ impl QueryAttrBase { for dep in self.resolved_literals.0.values() { traversal.dep(dep)?; } - traversal.query_macro(&self.query, &self.resolved_literals)?; + traversal.query(&self.query, &self.resolved_literals)?; Ok(()) } } diff --git a/app/buck2_node/src/attrs/attr_type/split_transition_dep.rs b/app/buck2_node/src/attrs/attr_type/split_transition_dep.rs index 3165e0e79726f..fe9005183b800 100644 --- a/app/buck2_node/src/attrs/attr_type/split_transition_dep.rs +++ b/app/buck2_node/src/attrs/attr_type/split_transition_dep.rs @@ -97,7 +97,7 @@ impl SplitTransitionDepMaybeConfigured for ConfiguredSplitTransitionDep { } #[derive(derive_more::Display, Debug, Hash, PartialEq, Eq, Clone, Allocative)] -#[display(fmt = "{}", label)] +#[display("{}", label)] pub struct SplitTransitionDep { pub label: ProvidersLabel, pub transition: Arc, diff --git a/app/buck2_node/src/attrs/attr_type/string.rs b/app/buck2_node/src/attrs/attr_type/string.rs index 3fa549369934a..8f4d5793982b3 100644 --- a/app/buck2_node/src/attrs/attr_type/string.rs +++ b/app/buck2_node/src/attrs/attr_type/string.rs @@ -8,7 +8,6 @@ */ use std::fmt; -use std::fmt::Display; use std::ops::Deref; use allocative::Allocative; @@ -16,6 +15,9 @@ use buck2_util::arc_str::ArcStr; use dupe::Dupe; use serde::Serialize; +use crate::attrs::display::AttrDisplayWithContext; +use crate::attrs::fmt_context::AttrFmtContext; + #[derive(Debug, Eq, PartialEq, Hash, Allocative, Clone, Copy, Dupe)] pub struct StringAttrType; @@ -33,9 +35,9 @@ impl Deref for StringLiteral { } } -impl Display for StringLiteral { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if f.alternate() { +impl AttrDisplayWithContext for StringLiteral { + fn fmt(&self, ctx: &AttrFmtContext, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if ctx.options.exclude_quotes { f.write_str(self.0.as_str()) } else { write!(f, "\"{}\"", self.0) diff --git a/app/buck2_node/src/attrs/attr_type/target_modifiers.rs b/app/buck2_node/src/attrs/attr_type/target_modifiers.rs new file mode 100644 index 0000000000000..ac2d2696cb6b2 --- /dev/null +++ b/app/buck2_node/src/attrs/attr_type/target_modifiers.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use dupe::Dupe; + +#[derive(Debug, Eq, PartialEq, Hash, Allocative, Clone, Copy, Dupe)] +pub struct TargetModifiersAttrType; diff --git a/app/buck2_node/src/attrs/attr_type/tuple.rs b/app/buck2_node/src/attrs/attr_type/tuple.rs index 2ec0249c50367..c11cda98e5800 100644 --- a/app/buck2_node/src/attrs/attr_type/tuple.rs +++ b/app/buck2_node/src/attrs/attr_type/tuple.rs @@ -14,6 +14,7 @@ use std::ops::Deref; use allocative::Allocative; use buck2_util::arc_str::ArcSlice; +use display_container::fmt_container; use gazebo::prelude::SliceExt; use serde_json::to_value; use serde_json::Value; @@ -21,6 +22,7 @@ use serde_json::Value; use crate::attrs::attr_type::any_matches::AnyMatches; use crate::attrs::attr_type::AttrType; use crate::attrs::display::AttrDisplayWithContext; +use crate::attrs::display::AttrDisplayWithContextExt; use crate::attrs::fmt_context::AttrFmtContext; use crate::attrs::json::ToJsonWithContext; @@ -59,15 +61,7 @@ impl Deref for TupleLiteral { impl AttrDisplayWithContext for TupleLiteral { fn fmt(&self, ctx: &AttrFmtContext, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "(")?; - for (i, v) in self.0.iter().enumerate() { - if i != 0 { - write!(f, ",")?; - } - AttrDisplayWithContext::fmt(v, ctx, f)?; - } - write!(f, ")")?; - Ok(()) + fmt_container(f, "(", ")", self.0.iter().map(|v| v.as_display(ctx))) } } diff --git a/app/buck2_node/src/attrs/coerced_attr.rs b/app/buck2_node/src/attrs/coerced_attr.rs index ad1d679579d1a..90da162962003 100644 --- a/app/buck2_node/src/attrs/coerced_attr.rs +++ b/app/buck2_node/src/attrs/coerced_attr.rs @@ -7,29 +7,35 @@ * of this source tree. */ +use core::fmt; use std::collections::HashSet; use std::fmt::Display; use std::hash::Hash; use allocative::Allocative; use anyhow::Context; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::configuration::config_setting::ConfigSettingData; -use buck2_core::configuration::data::ConfigurationData; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::package::PackageLabel; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_data::error::ErrorTag; +use buck2_error::buck2_error_anyhow; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_util::arc_str::ArcSlice; +use display_container::fmt_keyed_container; use dupe::Dupe; -use dupe::IterDupedExt; use gazebo::prelude::SliceExt; use itertools::Itertools; use serde::Serialize; use serde::Serializer; use serde_json::to_value; +use smallvec::SmallVec; use starlark_map::StarlarkHasherBuilder; +use super::values::TargetModifiersValue; use crate::attrs::attr_type::any_matches::AnyMatches; use crate::attrs::attr_type::arg::StringWithMacros; use crate::attrs::attr_type::attr_config::source_file_display; @@ -56,55 +62,47 @@ use crate::attrs::fmt_context::AttrFmtContext; use crate::attrs::json::ToJsonWithContext; use crate::attrs::serialize::AttrSerializeWithContext; use crate::attrs::traversal::CoercedAttrTraversal; +use crate::configuration::resolved::ConfigurationSettingKey; use crate::metadata::map::MetadataMap; use crate::visibility::VisibilitySpecification; use crate::visibility::WithinViewSpecification; -#[derive(thiserror::Error, Debug)] -enum SelectError { - #[error("None of {} conditions matched configuration `{}` and no default was set:\n{}", - .1.len(), - .0, - .1.iter().map(| s | format ! (" {}", s)).join("\n"), - )] - MissingDefault(ConfigurationData, Vec), - #[error( - "Both select keys `{0}` and `{1}` match the configuration, but neither is more specific" - )] - TwoKeysDoNotRefineEachOther(String, String), - #[error("concat with no items (internal error)")] - ConcatEmpty, - #[error("duplicate key `{0}` in `select()`")] - DuplicateKey(String), -} - -#[derive(Debug, thiserror::Error)] -enum CoercedAttrError { - #[error("Inconsistent number of elements in tuple")] - InconsistentTupleLength, +pub enum CoercedSelectorKeyRef<'a> { + Target(&'a ConfigurationSettingKey), + Default, } -enum CoercedSelectorKeyRef<'a> { - Target(&'a TargetLabel), - Default, +impl<'a> fmt::Display for CoercedSelectorKeyRef<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CoercedSelectorKeyRef::Target(target) => write!(f, "{}", target.0), + CoercedSelectorKeyRef::Default => write!(f, "DEFAULT"), + } + } } #[derive(Debug, Clone, PartialEq, Eq, Hash, Allocative)] pub struct CoercedSelector { - pub(crate) entries: ArcSlice<(TargetLabel, CoercedAttr)>, + pub(crate) entries: ArcSlice<(ConfigurationSettingKey, CoercedAttr)>, pub(crate) default: Option, } impl CoercedSelector { pub fn new( - entries: ArcSlice<(TargetLabel, CoercedAttr)>, + entries: ArcSlice<(ConfigurationSettingKey, CoercedAttr)>, default: Option, ) -> anyhow::Result { Self::check_all_keys_unique(&entries)?; Ok(CoercedSelector { entries, default }) } - fn check_all_keys_unique(entries: &[(TargetLabel, CoercedAttr)]) -> anyhow::Result<()> { + fn check_all_keys_unique( + entries: &[(ConfigurationSettingKey, CoercedAttr)], + ) -> anyhow::Result<()> { + fn duplicate_key(key: &ConfigurationSettingKey) -> anyhow::Error { + buck2_error_anyhow!([], "duplicate key `{key}` in `select()`") + } + // This is possible when select keys are specified like: // ``` // select({ @@ -121,16 +119,16 @@ impl CoercedSelector { for i in 0..entries.len() { for j in i + 1..entries.len() { if entries[i].0 == entries[j].0 { - return Err(SelectError::DuplicateKey(entries[i].0.to_string()).into()); + return Err(duplicate_key(&entries[i].0)); } } } } else { - let mut visited_keys: HashSet<&TargetLabel, _> = + let mut visited_keys: HashSet<&ConfigurationSettingKey, _> = HashSet::with_capacity_and_hasher(entries.len(), StarlarkHasherBuilder); for (k, _) in entries { if !visited_keys.insert(k) { - return Err(SelectError::DuplicateKey(k.to_string()).into()); + return Err(duplicate_key(k)); } } } @@ -138,7 +136,7 @@ impl CoercedSelector { Ok(()) } - fn all_entries(&self) -> impl Iterator { + pub fn all_entries(&self) -> impl Iterator { self.entries .iter() .map(|(k, v)| (CoercedSelectorKeyRef::Target(k), v)) @@ -199,7 +197,7 @@ pub enum CoercedAttr { ExplicitConfiguredDep(Box), SplitTransitionDep(ProvidersLabel), ConfiguredDep(Box>), - ConfigurationDep(TargetLabel), + ConfigurationDep(ConfigurationSettingKey), PluginDep(TargetLabel), Dep(ProvidersLabel), SourceLabel(ProvidersLabel), @@ -210,6 +208,7 @@ pub enum CoercedAttr { Query(Box>), SourceFile(CoercedPath), Metadata(MetadataMap), + TargetModifiers(TargetModifiersValue), } // This is just to help understand any impact that changes have to the size of this. @@ -224,24 +223,26 @@ impl AttrDisplayWithContext for CoercedAttr { match self { CoercedAttr::Selector(s) => { write!(f, "select(")?; - for (i, (key, value)) in s.all_entries().enumerate() { - if i > 0 { - write!(f, ",")?; - } - match key { - CoercedSelectorKeyRef::Target(k) => { - write!(f, "\"{}\"={}", k, value.as_display(ctx))?; - } - CoercedSelectorKeyRef::Default => { - write!(f, "\"DEFAULT\"={}", value.as_display(ctx))?; - } - } - } + fmt_keyed_container( + f, + "{", + "}", + ": ", + s.all_entries().map(|(k, v)| { + ( + match k { + CoercedSelectorKeyRef::Target(k) => format!("\"{}\"", k), + CoercedSelectorKeyRef::Default => "\"DEFAULT\"".to_owned(), + }, + v.as_display(ctx), + ) + }), + )?; write!(f, ")")?; Ok(()) } CoercedAttr::Concat(items) => { - write!(f, "{}", items.iter().map(|a| a.as_display(ctx)).format("+")) + items.iter().map(|a| a.as_display(ctx)).format("+").fmt(f) } CoercedAttr::Bool(v) => { write!(f, "{}", v) @@ -249,7 +250,9 @@ impl AttrDisplayWithContext for CoercedAttr { CoercedAttr::Int(v) => { write!(f, "{}", v) } - CoercedAttr::String(v) | CoercedAttr::EnumVariant(v) => Display::fmt(v, f), + CoercedAttr::String(v) | CoercedAttr::EnumVariant(v) => { + AttrDisplayWithContext::fmt(v, ctx, f) + } CoercedAttr::List(list) => AttrDisplayWithContext::fmt(list, ctx, f), CoercedAttr::Tuple(v) => AttrDisplayWithContext::fmt(v, ctx, f), CoercedAttr::Dict(v) => AttrDisplayWithContext::fmt(v, ctx, f), @@ -266,9 +269,10 @@ impl AttrDisplayWithContext for CoercedAttr { CoercedAttr::SourceLabel(e) => write!(f, "\"{}\"", e), CoercedAttr::Label(e) => write!(f, "\"{}\"", e), CoercedAttr::Arg(e) => write!(f, "\"{}\"", e), - CoercedAttr::Query(e) => write!(f, "\"{}\"", e.query()), + CoercedAttr::Query(e) => write!(f, "\"{}\"", e.query.query), CoercedAttr::SourceFile(e) => write!(f, "\"{}\"", source_file_display(ctx, e)), CoercedAttr::Metadata(m) => write!(f, "{}", m), + CoercedAttr::TargetModifiers(m) => write!(f, "{}", m), } } } @@ -347,9 +351,10 @@ impl CoercedAttr { CoercedAttr::SourceLabel(e) => Ok(to_value(e.to_string())?), CoercedAttr::Label(e) => Ok(to_value(e.to_string())?), CoercedAttr::Arg(e) => Ok(to_value(e.to_string())?), - CoercedAttr::Query(e) => Ok(to_value(e.query())?), + CoercedAttr::Query(e) => Ok(to_value(&e.query.query)?), CoercedAttr::SourceFile(e) => Ok(to_value(source_file_display(ctx, e).to_string())?), CoercedAttr::Metadata(m) => Ok(m.to_value()), + CoercedAttr::TargetModifiers(m) => Ok(m.to_value()), } } @@ -375,40 +380,40 @@ impl CoercedAttr { CoercedAttrWithType::Selector(CoercedSelector { entries, default }, t) => { for (condition, value) in entries.iter() { traversal.configuration_dep(condition)?; - value.traverse(t, pkg.dupe(), traversal)?; + value.traverse(t, pkg, traversal)?; } if let Some(v) = default { - v.traverse(t, pkg.dupe(), traversal)?; + v.traverse(t, pkg, traversal)?; } Ok(()) } CoercedAttrWithType::Concat(items, t) => { for item in items { - item.traverse(t, pkg.dupe(), traversal)?; + item.traverse(t, pkg, traversal)?; } Ok(()) } CoercedAttrWithType::None => Ok(()), - CoercedAttrWithType::Some(attr, t) => attr.traverse(&t.inner, pkg.dupe(), traversal), + CoercedAttrWithType::Some(attr, t) => attr.traverse(&t.inner, pkg, traversal), CoercedAttrWithType::AnyList(list) => { for v in list.iter() { // This is no-op now, but any may contain selects in the future. - v.traverse(t, pkg.dupe(), traversal)?; + v.traverse(t, pkg, traversal)?; } Ok(()) } CoercedAttrWithType::AnyTuple(tuple) => { for v in tuple.iter() { - v.traverse(t, pkg.dupe(), traversal)?; + v.traverse(t, pkg, traversal)?; } Ok(()) } CoercedAttrWithType::AnyDict(dict) => { for (k, v) in dict.iter() { - k.traverse(t, pkg.dupe(), traversal)?; - v.traverse(t, pkg.dupe(), traversal)?; + k.traverse(t, pkg, traversal)?; + v.traverse(t, pkg, traversal)?; } Ok(()) } @@ -419,24 +424,26 @@ impl CoercedAttr { CoercedAttrWithType::EnumVariant(..) => Ok(()), CoercedAttrWithType::List(list, t) => { for v in list.iter() { - v.traverse(&t.inner, pkg.dupe(), traversal)?; + v.traverse(&t.inner, pkg, traversal)?; } Ok(()) } CoercedAttrWithType::Tuple(list, t) => { if list.len() != t.xs.len() { - return Err(CoercedAttrError::InconsistentTupleLength.into()); + return Err(internal_error_anyhow!( + "Inconsistent number of elements in tuple" + )); } for (v, vt) in list.iter().zip(&t.xs) { - v.traverse(vt, pkg.dupe(), traversal)?; + v.traverse(vt, pkg, traversal)?; } Ok(()) } CoercedAttrWithType::Dict(dict, t) => { for (k, v) in dict.iter() { - k.traverse(&t.key, pkg.dupe(), traversal)?; - v.traverse(&t.value, pkg.dupe(), traversal)?; + k.traverse(&t.key, pkg, traversal)?; + v.traverse(&t.value, pkg, traversal)?; } Ok(()) } @@ -461,44 +468,88 @@ impl CoercedAttr { } CoercedAttrWithType::SourceLabel(s, _t) => traversal.dep(s.target()), CoercedAttrWithType::Label(label, _t) => traversal.label(label), - CoercedAttrWithType::Arg(arg, _t) => arg.traverse(traversal), + CoercedAttrWithType::Arg(arg, _t) => arg.traverse(traversal, pkg), CoercedAttrWithType::Query(query, _t) => query.traverse(traversal), CoercedAttrWithType::SourceFile(source, _t) => { for x in source.inputs() { - traversal.input(BuckPathRef::new(pkg.dupe(), x))?; + traversal.input(SourcePathRef::new(pkg, x))?; } Ok(()) } CoercedAttrWithType::Metadata(..) => Ok(()), + CoercedAttrWithType::TargetModifiers(..) => Ok(()), } } /// If more than one select key matches, select the most specific. - pub fn select_the_most_specific<'a>( - ctx: &dyn AttrConfigurationContext, - select_entries: &'a [(TargetLabel, CoercedAttr)], + pub fn select_the_most_specific<'a, 'x>( + select_entries: impl IntoIterator< + Item = ( + &'x ConfigurationSettingKey, + &'x ConfigSettingData, + &'a CoercedAttr, + ), + >, ) -> anyhow::Result> { - let mut matching: Option<(&TargetLabel, &ConfigSettingData, &CoercedAttr)> = None; - for (k, v) in select_entries { - matching = match (ctx.matches(k), matching) { - (None, matching) => matching, - (Some(conf), None) => Some((k, conf, v)), - (Some(conf), Some((prev_k, prev_conf, prev_v))) => { + let select_entries_vec = SmallVec::<[_; 17]>::from_iter(select_entries); + + let mut select_entries = select_entries_vec.iter().copied(); + let Some(mut matching): Option<( + &ConfigurationSettingKey, + &ConfigSettingData, + &CoercedAttr, + )> = select_entries.next() else { + return Ok(None); + }; + + for (k, conf, v) in select_entries { + let (prev_k, prev_conf, prev_v) = matching; + matching = { + { if conf.refines(prev_conf) { - Some((k, conf, v)) + (k, conf, v) } else if prev_conf.refines(conf) { - Some((prev_k, prev_conf, prev_v)) + (prev_k, prev_conf, prev_v) } else { - return Err(SelectError::TwoKeysDoNotRefineEachOther( - prev_k.to_string(), - k.to_string(), - ) - .into()); + return Self::select_the_most_specific_slow(select_entries_vec); } } } } - Ok(matching.map(|(_k, _conf, v)| v)) + Ok(Some(matching.2)) + } + + fn select_the_most_specific_slow<'a>( + select_entries: SmallVec< + [( + &ConfigurationSettingKey, + &ConfigSettingData, + &'a CoercedAttr, + ); 17], + >, + ) -> anyhow::Result> { + let mut entries = + SmallVec::<[(&ConfigurationSettingKey, &ConfigSettingData, &CoercedAttr); 17]>::new(); + + for (k, d, v) in select_entries { + // If there's entry for `linux-arm32` and current is `arm32`, skip current. + if entries.iter().any(|(_, prev_d, _)| prev_d.refines(d)) { + continue; + } + // If current is `linux-arm32`, remove `arm32` from `entries`. + entries.retain(|(_, prev_d, _)| !d.refines(prev_d)); + entries.push((k, d, v)); + } + match entries.as_slice() { + [] => Err(internal_error_anyhow!( + "no entries after slow select the most specific" + )), + [(.., x)] => Ok(Some(x)), + [(x, ..), (y, ..), ..] => Err(buck2_error_anyhow!( + [], + "Both select keys `{x}` and `{y}` match the configuration, but neither is more specific" + )), + } } fn select<'a>( @@ -506,15 +557,23 @@ impl CoercedAttr { select: &'a CoercedSelector, ) -> anyhow::Result<&'a CoercedAttr> { let CoercedSelector { entries, default } = select; - if let Some(v) = Self::select_the_most_specific(ctx, entries)? { + let resolved_cfg_settings = ctx.resolved_cfg_settings(); + let resolved_entries = entries.iter().filter_map(|(k, v)| { + resolved_cfg_settings + .setting_matches(k) + .map(|conf| (k, conf, v)) + }); + if let Some(v) = Self::select_the_most_specific(resolved_entries)? { Ok(v) } else { default.as_ref().ok_or_else(|| { - SelectError::MissingDefault( - ctx.cfg().cfg().dupe(), - entries.iter().map(|(k, _)| k).duped().collect(), + buck2_error_anyhow!( + [], + "None of {} conditions matched configuration `{}` and no default was set:\n{}", + entries.len(), + ctx.cfg().cfg(), + entries.iter().map(|(s, _)| format!(" {}", s)).join("\n"), ) - .into() }) } } @@ -527,6 +586,15 @@ impl CoercedAttr { &self, ty: &AttrType, ctx: &dyn AttrConfigurationContext, + ) -> anyhow::Result { + self.configure_inner(ty, ctx) + .tag_anyhow(ErrorTag::ConfigureAttr) + } + + fn configure_inner( + &self, + ty: &AttrType, + ctx: &dyn AttrConfigurationContext, ) -> anyhow::Result { Ok(match CoercedAttrWithType::pack(self, ty)? { CoercedAttrWithType::Selector(select, t) => { @@ -535,7 +603,7 @@ impl CoercedAttr { CoercedAttrWithType::Concat(items, t) => { let singleton = items.len() == 1; let mut it = items.iter().map(|item| item.configure(t, ctx)); - let first = it.next().ok_or(SelectError::ConcatEmpty)??; + let first = it.next().internal_error_anyhow("concat with no items")??; if singleton { first } else { @@ -543,16 +611,19 @@ impl CoercedAttr { } } - CoercedAttrWithType::AnyList(list) => { - ConfiguredAttr::List(ListLiteral(list.try_map(|v| v.configure(ty, ctx))?.into())) - } + CoercedAttrWithType::AnyList(list) => ConfiguredAttr::List(ListLiteral( + list.try_map(|v| v.configure(AttrType::any_ref(), ctx))? + .into(), + )), CoercedAttrWithType::AnyTuple(tuple) => ConfiguredAttr::Tuple(TupleLiteral( - tuple.try_map(|v| v.configure(ty, ctx))?.into(), + tuple + .try_map(|v| v.configure(AttrType::any_ref(), ctx))? + .into(), )), CoercedAttrWithType::AnyDict(dict) => ConfiguredAttr::Dict(DictLiteral( dict.try_map(|(k, v)| { - let k2 = k.configure(ty, ctx)?; - let v2 = v.configure(ty, ctx)?; + let k2 = k.configure(AttrType::any_ref(), ctx)?; + let v2 = v.configure(AttrType::any_ref(), ctx)?; anyhow::Ok((k2, v2)) })? .into(), @@ -567,7 +638,9 @@ impl CoercedAttr { )), CoercedAttrWithType::Tuple(list, t) => { if list.len() != t.xs.len() { - return Err(CoercedAttrError::InconsistentTupleLength.into()); + return Err(internal_error_anyhow!( + "Inconsistent number of elements in tuple" + )); } ConfiguredAttr::Tuple(TupleLiteral( list.iter() @@ -602,12 +675,12 @@ impl CoercedAttr { ConfigurationDepAttrType::configure(ctx, dep)? } CoercedAttrWithType::PluginDep(dep, t) => { - ConfiguredAttr::PluginDep(Box::new((dep.dupe(), t.kind().dupe()))) + ConfiguredAttr::PluginDep(dep.dupe(), t.kind().dupe()) } CoercedAttrWithType::Dep(dep, t) => t.configure(dep, ctx)?, - CoercedAttrWithType::SourceLabel(source, _) => ConfiguredAttr::SourceLabel(Box::new( - source.configure_pair(ctx.cfg().cfg_pair().dupe()), - )), + CoercedAttrWithType::SourceLabel(source, _) => { + ConfiguredAttr::SourceLabel(source.configure_pair(ctx.cfg().cfg_pair().dupe())) + } CoercedAttrWithType::Label(label, _) => LabelAttrType::configure(ctx, label)?, CoercedAttrWithType::Arg(arg, attr_type) => { ConfiguredAttr::Arg(arg.configure(ctx, attr_type.anon_target_compatible)?) @@ -617,6 +690,7 @@ impl CoercedAttr { } CoercedAttrWithType::SourceFile(s, _) => ConfiguredAttr::SourceFile(s.clone()), CoercedAttrWithType::Metadata(m, _) => ConfiguredAttr::Metadata(m.clone()), + CoercedAttrWithType::TargetModifiers(m, _) => ConfiguredAttr::TargetModifiers(m.dupe()), }) } @@ -662,9 +736,10 @@ impl CoercedAttr { CoercedAttr::SourceLabel(e) => filter(&e.to_string()), CoercedAttr::Label(e) => filter(&e.to_string()), CoercedAttr::Arg(e) => filter(&e.to_string()), - CoercedAttr::Query(e) => filter(e.query()), + CoercedAttr::Query(e) => filter(&e.query.query), CoercedAttr::SourceFile(e) => filter(&e.path().to_string()), CoercedAttr::Metadata(e) => e.any_matches(filter), + CoercedAttr::TargetModifiers(e) => e.any_matches(filter), } } } @@ -672,17 +747,17 @@ impl CoercedAttr { #[cfg(test)] mod tests { - use buck2_core::target::label::TargetLabel; use dupe::Dupe; use crate::attrs::coerced_attr::CoercedAttr; use crate::attrs::coerced_attr::CoercedSelector; + use crate::configuration::resolved::ConfigurationSettingKey; #[test] fn test_check_all_keys_unique_small() { - let a = TargetLabel::testing_parse("foo//:a"); - let b = TargetLabel::testing_parse("foo//:b"); - let c = TargetLabel::testing_parse("foo//:c"); + let a = ConfigurationSettingKey::testing_parse("foo//:a"); + let b = ConfigurationSettingKey::testing_parse("foo//:b"); + let c = ConfigurationSettingKey::testing_parse("foo//:c"); let attr = CoercedAttr::None; let a = (a.dupe(), attr.clone()); let b = (b.dupe(), attr.clone()); @@ -705,7 +780,7 @@ mod tests { let mut long = (0..100) .map(|i| { ( - TargetLabel::testing_parse(&format!("foo//:{}", i)), + ConfigurationSettingKey::testing_parse(&format!("foo//:{}", i)), attr.clone(), ) }) diff --git a/app/buck2_node/src/attrs/coerced_attr_with_type.rs b/app/buck2_node/src/attrs/coerced_attr_with_type.rs index ef3100bfd3b95..b9f311b4f210a 100644 --- a/app/buck2_node/src/attrs/coerced_attr_with_type.rs +++ b/app/buck2_node/src/attrs/coerced_attr_with_type.rs @@ -9,8 +9,9 @@ use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use super::attr_type::target_modifiers::TargetModifiersAttrType; use crate::attrs::attr_type::arg::ArgAttrType; use crate::attrs::attr_type::arg::StringWithMacros; use crate::attrs::attr_type::bool::BoolAttrType; @@ -47,11 +48,13 @@ use crate::attrs::coerced_attr::CoercedAttr; use crate::attrs::coerced_attr::CoercedSelector; use crate::attrs::coerced_path::CoercedPath; use crate::attrs::display::AttrDisplayWithContextExt; +use crate::attrs::values::TargetModifiersValue; +use crate::configuration::resolved::ConfigurationSettingKey; use crate::metadata::map::MetadataMap; use crate::visibility::VisibilitySpecification; use crate::visibility::WithinViewSpecification; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum CoercedAttrWithTypeError { #[error( "attr and type mismatch: {}, {}; ({:?}) (internal error)", @@ -94,7 +97,7 @@ pub enum CoercedAttrWithType<'a, 't> { ), SplitTransitionDep(&'a ProvidersLabel, &'t SplitTransitionDepAttrType), ConfiguredDep(&'a DepAttr), - ConfigurationDep(&'a TargetLabel, ConfigurationDepAttrType), + ConfigurationDep(&'a ConfigurationSettingKey, ConfigurationDepAttrType), PluginDep(&'a TargetLabel, &'t PluginDepAttrType), Dep(&'a ProvidersLabel, &'t DepAttrType), SourceLabel(&'a ProvidersLabel, SourceAttrType), @@ -103,6 +106,7 @@ pub enum CoercedAttrWithType<'a, 't> { Query(&'a QueryAttr, &'t QueryAttrType), SourceFile(&'a CoercedPath, SourceAttrType), Metadata(&'a MetadataMap, MetadataAttrType), + TargetModifiers(&'a TargetModifiersValue, TargetModifiersAttrType), } impl<'a, 't> CoercedAttrWithType<'a, 't> { @@ -111,7 +115,7 @@ impl<'a, 't> CoercedAttrWithType<'a, 't> { attr: &'a CoercedAttr, ty: &'t AttrType, ) -> anyhow::Result> { - match (attr, &*(ty.0)) { + match (attr, &ty.0.inner) { (CoercedAttr::Selector(s), _) => Ok(CoercedAttrWithType::Selector(s, ty)), (CoercedAttr::Concat(c), _) => Ok(CoercedAttrWithType::Concat(c, ty)), @@ -172,6 +176,9 @@ impl<'a, 't> CoercedAttrWithType<'a, 't> { (CoercedAttr::Metadata(p), AttrTypeInner::Metadata(t)) => { Ok(CoercedAttrWithType::Metadata(p, *t)) } + (CoercedAttr::TargetModifiers(p), AttrTypeInner::TargetModifiers(t)) => { + Ok(CoercedAttrWithType::TargetModifiers(p, *t)) + } // Explicitly list the remaining pattern to make sure nothing is forgotten. (CoercedAttr::Bool(_), _) @@ -194,7 +201,8 @@ impl<'a, 't> CoercedAttrWithType<'a, 't> { | (CoercedAttr::Arg(_), _) | (CoercedAttr::Query(_), _) | (CoercedAttr::SourceFile(_), _) - | (CoercedAttr::Metadata(_), _) => { + | (CoercedAttr::Metadata(_), _) + | (CoercedAttr::TargetModifiers(_), _) => { Err(CoercedAttrWithTypeError::Mismatch(attr.clone(), ty.clone()).into()) } } @@ -228,7 +236,8 @@ impl<'a, 't> CoercedAttrWithType<'a, 't> { | CoercedAttr::Query(_) | CoercedAttr::EnumVariant(_) | CoercedAttr::SourceFile(_) - | CoercedAttr::Metadata(_) => Err(CoercedAttrWithTypeError::Any.into()), + | CoercedAttr::Metadata(_) + | CoercedAttr::TargetModifiers(_) => Err(CoercedAttrWithTypeError::Any.into()), } } } diff --git a/app/buck2_node/src/attrs/coerced_deps_collector.rs b/app/buck2_node/src/attrs/coerced_deps_collector.rs index d3d3de6aac9df..8587113145fda 100644 --- a/app/buck2_node/src/attrs/coerced_deps_collector.rs +++ b/app/buck2_node/src/attrs/coerced_deps_collector.rs @@ -10,39 +10,41 @@ use std::sync::Arc; use allocative::Allocative; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::configuration::transition::id::TransitionId; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::plugins::PluginKind; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_util::thin_box::ThinBoxSlice; use dupe::Dupe; use starlark_map::ordered_set::OrderedSet; use crate::attrs::traversal::CoercedAttrTraversal; +use crate::configuration::resolved::ConfigurationSettingKey; #[derive(Default, Debug, PartialEq, Eq, Hash, Allocative)] pub struct CoercedDeps { /// Contains the deps derived from the attributes. /// Does not include the transition, exec or configuration deps. - pub deps: Box<[TargetLabel]>, + pub deps: ThinBoxSlice, /// Contains the deps which are transitioned to other configuration /// (including split transitions). - pub transition_deps: Box<[(TargetLabel, Arc)]>, + pub transition_deps: ThinBoxSlice<(TargetLabel, Arc)>, /// Contains the execution deps derived from the attributes. - pub exec_deps: Box<[TargetLabel]>, + pub exec_deps: ThinBoxSlice, /// Contains the toolchain deps derived from the attributes. - pub toolchain_deps: Box<[TargetLabel]>, + pub toolchain_deps: ThinBoxSlice, /// Contains the configuration deps. These are deps that appear as conditions in selects. - pub configuration_deps: Box<[TargetLabel]>, + pub configuration_deps: ThinBoxSlice, /// Contains platform targets of configured_alias() - pub platform_deps: Box<[TargetLabel]>, + pub platform_deps: ThinBoxSlice, /// Contains the plugin deps - pub plugin_deps: Box<[TargetLabel]>, + pub plugin_deps: ThinBoxSlice, } impl From for CoercedDeps { @@ -85,7 +87,7 @@ pub struct CoercedDepsCollector { pub toolchain_deps: OrderedSet, /// Contains the configuration deps. These are deps that appear as conditions in selects. - pub configuration_deps: OrderedSet, + pub configuration_deps: OrderedSet, /// Contains platform targets of configured_alias() pub platform_deps: OrderedSet, @@ -142,7 +144,7 @@ impl<'a> CoercedAttrTraversal<'a> for CoercedDepsCollector { Ok(()) } - fn configuration_dep(&mut self, dep: &'a TargetLabel) -> anyhow::Result<()> { + fn configuration_dep(&mut self, dep: &'a ConfigurationSettingKey) -> anyhow::Result<()> { self.configuration_deps.insert(dep.dupe()); Ok(()) } @@ -157,7 +159,7 @@ impl<'a> CoercedAttrTraversal<'a> for CoercedDepsCollector { Ok(()) } - fn input(&mut self, _input: BuckPathRef) -> anyhow::Result<()> { + fn input(&mut self, _input: SourcePathRef) -> anyhow::Result<()> { Ok(()) } } diff --git a/app/buck2_node/src/attrs/coercion_context.rs b/app/buck2_node/src/attrs/coercion_context.rs index d1f44f305687d..85885c79e9f61 100644 --- a/app/buck2_node/src/attrs/coercion_context.rs +++ b/app/buck2_node/src/attrs/coercion_context.rs @@ -7,12 +7,12 @@ * of this source tree. */ +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::provider::label::NonDefaultProvidersName; use buck2_core::provider::label::ProvidersLabel; use buck2_core::provider::label::ProvidersName; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_query::query::syntax::simple::functions::QueryLiteralVisitor; use buck2_query_parser::spanned::Spanned; use buck2_query_parser::Expr; @@ -21,8 +21,9 @@ use buck2_util::arc_str::ArcStr; use super::coerced_attr::CoercedAttr; use crate::attrs::coerced_path::CoercedPath; +use crate::configuration::resolved::ConfigurationSettingKey; -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum AttrCoercionContextError { #[error("Expected target label without name. Got `{0}`")] UnexpectedProvidersName(String), @@ -33,9 +34,19 @@ enum AttrCoercionContextError { pub trait AttrCoercionContext { fn coerce_target_label(&self, value: &str) -> anyhow::Result { let label = self.coerce_providers_label(value)?; - if let ProvidersName::NonDefault(box NonDefaultProvidersName::Named(_)) = label.name() { - return Err(AttrCoercionContextError::UnexpectedProvidersName(value.to_owned()).into()); + + match label.name() { + ProvidersName::NonDefault(flavor) => { + if let NonDefaultProvidersName::Named(_) = flavor.as_ref() { + return Err(AttrCoercionContextError::UnexpectedProvidersName( + value.to_owned(), + ) + .into()); + } + } + _ => {} } + Ok(label.into_parts().0) } @@ -51,8 +62,8 @@ pub trait AttrCoercionContext { // Reuse previously allocated selects if possible. fn intern_select( &self, - value: Vec<(TargetLabel, CoercedAttr)>, - ) -> ArcSlice<(TargetLabel, CoercedAttr)>; + value: Vec<(ConfigurationSettingKey, CoercedAttr)>, + ) -> ArcSlice<(ConfigurationSettingKey, CoercedAttr)>; // Reuse previously allocated dicts if possible. fn intern_dict( diff --git a/app/buck2_node/src/attrs/configuration_context.rs b/app/buck2_node/src/attrs/configuration_context.rs index 9e9b89d22c6f7..a911bdf8ea942 100644 --- a/app/buck2_node/src/attrs/configuration_context.rs +++ b/app/buck2_node/src/attrs/configuration_context.rs @@ -10,7 +10,6 @@ use std::sync::Arc; use anyhow::Context; -use buck2_core::configuration::config_setting::ConfigSettingData; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::pair::ConfigurationNoExec; use buck2_core::configuration::pair::ConfigurationWithExec; @@ -18,15 +17,15 @@ use buck2_core::configuration::transition::applied::TransitionApplied; use buck2_core::configuration::transition::id::TransitionId; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use dupe::Dupe; use starlark_map::ordered_map::OrderedMap; use starlark_map::sorted_map::SortedMap; -use crate::configuration::resolved::ConfigurationSettingKeyRef; use crate::configuration::resolved::ResolvedConfiguration; +use crate::configuration::resolved::ResolvedConfigurationSettings; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] pub enum PlatformConfigurationError { #[error("Could not find configuration for platform target `{0}`")] UnknownPlatformTarget(TargetLabel), @@ -35,12 +34,11 @@ pub enum PlatformConfigurationError { /// The context for attribute configuration. Contains information about the /// configuration. pub trait AttrConfigurationContext { - /// Return the content of the resolved `config_setting` on match. - fn matches<'a>(&'a self, label: &TargetLabel) -> Option<&'a ConfigSettingData>; + fn resolved_cfg_settings(&self) -> &ResolvedConfigurationSettings; fn cfg(&self) -> ConfigurationNoExec; - fn exec_cfg(&self) -> ConfigurationNoExec; + fn exec_cfg(&self) -> anyhow::Result; /// Must be equal to `(cfg, Some(exec_cfg))`. fn toolchain_cfg(&self) -> ConfigurationWithExec; @@ -49,14 +47,19 @@ pub trait AttrConfigurationContext { /// Map of transition ids resolved to configurations /// using current node configuration as input. - fn resolved_transitions(&self) -> &OrderedMap, Arc>; + fn resolved_transitions( + &self, + ) -> anyhow::Result<&OrderedMap, Arc>>; fn configure_target(&self, label: &ProvidersLabel) -> ConfiguredProvidersLabel { label.configure_pair(self.cfg().cfg_pair().dupe()) } - fn configure_exec_target(&self, label: &ProvidersLabel) -> ConfiguredProvidersLabel { - label.configure_pair(self.exec_cfg().cfg_pair().dupe()) + fn configure_exec_target( + &self, + label: &ProvidersLabel, + ) -> anyhow::Result { + Ok(label.configure_pair(self.exec_cfg()?.cfg_pair().dupe())) } fn configure_toolchain_target(&self, label: &ProvidersLabel) -> ConfiguredProvidersLabel { @@ -73,7 +76,7 @@ pub trait AttrConfigurationContext { tr: &TransitionId, ) -> anyhow::Result { let cfg = self - .resolved_transitions() + .resolved_transitions()? .get(tr) .context("internal error: no resolved transition")?; Ok(label.configure(cfg.single()?.dupe())) @@ -85,7 +88,7 @@ pub trait AttrConfigurationContext { tr: &TransitionId, ) -> anyhow::Result> { let cfg = self - .resolved_transitions() + .resolved_transitions()? .get(tr) .context("internal error: no resolved transition")?; let split = cfg.split()?; @@ -123,17 +126,16 @@ impl<'b> AttrConfigurationContextImpl<'b> { } impl<'b> AttrConfigurationContext for AttrConfigurationContextImpl<'b> { - fn matches<'a>(&'a self, label: &TargetLabel) -> Option<&'a ConfigSettingData> { - self.resolved_cfg - .setting_matches(ConfigurationSettingKeyRef(label)) + fn resolved_cfg_settings(&self) -> &ResolvedConfigurationSettings { + self.resolved_cfg.settings() } fn cfg(&self) -> ConfigurationNoExec { self.resolved_cfg.cfg().dupe() } - fn exec_cfg(&self) -> ConfigurationNoExec { - self.exec_cfg.dupe() + fn exec_cfg(&self) -> anyhow::Result { + Ok(self.exec_cfg.dupe()) } fn toolchain_cfg(&self) -> ConfigurationWithExec { @@ -149,7 +151,9 @@ impl<'b> AttrConfigurationContext for AttrConfigurationContextImpl<'b> { } } - fn resolved_transitions(&self) -> &OrderedMap, Arc> { - self.resolved_transitions + fn resolved_transitions( + &self, + ) -> anyhow::Result<&OrderedMap, Arc>> { + Ok(self.resolved_transitions) } } diff --git a/app/buck2_node/src/attrs/configured_attr.rs b/app/buck2_node/src/attrs/configured_attr.rs index 40db0b7c07c74..be25e2d233a41 100644 --- a/app/buck2_node/src/attrs/configured_attr.rs +++ b/app/buck2_node/src/attrs/configured_attr.rs @@ -11,13 +11,14 @@ use std::fmt::Debug; use std::fmt::Display; use allocative::Allocative; -use buck2_core::buck_path::path::BuckPathRef; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::package::PackageLabel; use buck2_core::plugins::PluginKind; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_error::buck2_error_anyhow; +use buck2_error::internal_error_anyhow; use buck2_util::arc_str::ArcStr; -use dupe::Dupe; use serde::Serialize; use serde::Serializer; use starlark_map::ordered_map::OrderedMap; @@ -30,6 +31,7 @@ use crate::attrs::attr_type::configured_dep::ConfiguredExplicitConfiguredDep; use crate::attrs::attr_type::dep::DepAttr; use crate::attrs::attr_type::dict::DictLiteral; use crate::attrs::attr_type::list::ListLiteral; +use crate::attrs::attr_type::one_of::OneOfAttrType; use crate::attrs::attr_type::query::QueryAttr; use crate::attrs::attr_type::split_transition_dep::ConfiguredSplitTransitionDep; use crate::attrs::attr_type::string::StringLiteral; @@ -43,33 +45,12 @@ use crate::attrs::display::AttrDisplayWithContextExt; use crate::attrs::fmt_context::AttrFmtContext; use crate::attrs::json::ToJsonWithContext; use crate::attrs::serialize::AttrSerializeWithContext; +use crate::attrs::values::TargetModifiersValue; +use crate::configuration::resolved::ConfigurationSettingKey; use crate::metadata::map::MetadataMap; use crate::visibility::VisibilitySpecification; use crate::visibility::WithinViewSpecification; -#[derive(Debug, thiserror::Error)] -enum ConfiguredAttrError { - #[error("addition not supported for this attribute type `{0}`.")] - ConcatNotSupported(String), - #[error("addition not supported for these attribute type `{0}` and value `{1}`.")] - ConcatNotSupportedValues(&'static str, String), - #[error("got same key in both sides of dictionary concat (key `{0}`).")] - DictConcatDuplicateKeys(String), - #[error( - "Cannot concatenate values coerced/configured to different oneof variants: `{0}` and `{1}`" - )] - ConcatDifferentOneofVariants(AttrType, AttrType), - #[error("while concat, LHS is oneof, expecting RHS to also be oneof (internal error)")] - LhsOneOfRhsNotOneOf, - #[error("expecting a list, got `{0}`")] - ExpectingList(String), - #[error("expecting configuration dep, got `{0}`")] - ExpectingConfigurationDep(String), - #[error("Inconsistent attr value (`{}`) and attr type (`{}`) (internal error)", - _0.as_display_no_ctx(), _1)] - InconsistentAttrValueAndAttrType(ConfiguredAttr, AttrType), -} - #[derive(Eq, PartialEq, Hash, Clone, Allocative, Debug)] pub enum ConfiguredAttr { Bool(BoolLiteral), @@ -100,21 +81,26 @@ pub enum ConfiguredAttr { WithinView(WithinViewSpecification), ExplicitConfiguredDep(Box), SplitTransitionDep(Box), - ConfigurationDep(Box), + ConfigurationDep(ConfigurationSettingKey), // Note: Despite being named `PluginDep`, this doesn't really act like a dep but rather like a // label - PluginDep(Box<(TargetLabel, PluginKind)>), + PluginDep(TargetLabel, PluginKind), Dep(Box>), - SourceLabel(Box), + SourceLabel(ConfiguredProvidersLabel), // NOTE: unlike deps, labels are not traversed, as they are typically used in lieu of deps in // cases that would cause cycles. - Label(Box), + Label(ConfiguredProvidersLabel), Arg(ConfiguredStringWithMacros), Query(Box>), SourceFile(CoercedPath), Metadata(MetadataMap), + TargetModifiers(TargetModifiersValue), } +// For `ConfiguredAttr` size is not as important as for `CoercedAttr`, +// yet we should keep it reasonable. +static_assertions::assert_eq_size!(ConfiguredAttr, [usize; 4]); + impl AttrSerializeWithContext for ConfiguredAttr { fn serialize_with_ctx(&self, ctx: &AttrFmtContext, s: S) -> Result where @@ -136,7 +122,9 @@ impl AttrDisplayWithContext for ConfiguredAttr { ConfiguredAttr::Int(v) => { write!(f, "{}", v) } - ConfiguredAttr::String(v) | ConfiguredAttr::EnumVariant(v) => Display::fmt(v, f), + ConfiguredAttr::String(v) | ConfiguredAttr::EnumVariant(v) => { + AttrDisplayWithContext::fmt(v, ctx, f) + } ConfiguredAttr::List(list) => AttrDisplayWithContext::fmt(list, ctx, f), ConfiguredAttr::Tuple(v) => AttrDisplayWithContext::fmt(v, ctx, f), ConfiguredAttr::Dict(v) => AttrDisplayWithContext::fmt(v, ctx, f), @@ -147,14 +135,15 @@ impl AttrDisplayWithContext for ConfiguredAttr { ConfiguredAttr::ExplicitConfiguredDep(e) => Display::fmt(e, f), ConfiguredAttr::SplitTransitionDep(e) => Display::fmt(e, f), ConfiguredAttr::ConfigurationDep(e) => write!(f, "\"{}\"", e), - ConfiguredAttr::PluginDep(e) => write!(f, "\"{}\"", e.0), + ConfiguredAttr::PluginDep(e, _) => write!(f, "\"{}\"", e), ConfiguredAttr::Dep(e) => write!(f, "\"{}\"", e), ConfiguredAttr::SourceLabel(e) => write!(f, "\"{}\"", e), ConfiguredAttr::Label(e) => write!(f, "\"{}\"", e), ConfiguredAttr::Arg(e) => write!(f, "\"{}\"", e), - ConfiguredAttr::Query(e) => write!(f, "\"{}\"", e.query()), + ConfiguredAttr::Query(e) => write!(f, "\"{}\"", e.query.query), ConfiguredAttr::SourceFile(e) => write!(f, "\"{}\"", source_file_display(ctx, e)), ConfiguredAttr::Metadata(m) => write!(f, "{}", m), + ConfiguredAttr::TargetModifiers(m) => write!(f, "{}", m), } } } @@ -173,20 +162,20 @@ impl ConfiguredAttr { ConfiguredAttr::EnumVariant(_) => Ok(()), ConfiguredAttr::List(list) => { for v in list.iter() { - v.traverse(pkg.dupe(), traversal)?; + v.traverse(pkg, traversal)?; } Ok(()) } ConfiguredAttr::Tuple(list) => { for v in list.iter() { - v.traverse(pkg.dupe(), traversal)?; + v.traverse(pkg, traversal)?; } Ok(()) } ConfiguredAttr::Dict(dict) => { for (k, v) in dict.iter() { - k.traverse(pkg.dupe(), traversal)?; - v.traverse(pkg.dupe(), traversal)?; + k.traverse(pkg, traversal)?; + v.traverse(pkg, traversal)?; } Ok(()) } @@ -201,21 +190,68 @@ impl ConfiguredAttr { } Ok(()) } - ConfiguredAttr::ConfigurationDep(dep) => traversal.configuration_dep(dep), - ConfiguredAttr::PluginDep(dep) => traversal.plugin_dep(&dep.0, &dep.1), + ConfiguredAttr::ConfigurationDep(dep) => traversal.configuration_dep(&dep.0), + ConfiguredAttr::PluginDep(dep, kind) => traversal.plugin_dep(dep, kind), ConfiguredAttr::Dep(dep) => dep.traverse(traversal), ConfiguredAttr::SourceLabel(dep) => traversal.dep(dep), ConfiguredAttr::Label(label) => traversal.label(label), - ConfiguredAttr::Arg(arg) => arg.string_with_macros.traverse(traversal), + ConfiguredAttr::Arg(arg) => arg.string_with_macros.traverse(traversal, pkg), ConfiguredAttr::Query(query) => query.traverse(traversal), ConfiguredAttr::SourceFile(source) => { for x in source.inputs() { - traversal.input(BuckPathRef::new(pkg.dupe(), x))?; + traversal.input(SourcePathRef::new(pkg, x))?; } Ok(()) } ConfiguredAttr::Metadata(..) => Ok(()), + ConfiguredAttr::TargetModifiers(..) => Ok(()), + } + } + + fn concat_not_supported(&self, attr_ty: &'static str) -> anyhow::Error { + buck2_error_anyhow!( + [], + "addition not supported for these attribute type `{}` and value `{}`", + attr_ty, + self.as_display_no_ctx() + ) + } + + fn unpack_oneof(self) -> anyhow::Result<(Self, u32)> { + match self { + ConfiguredAttr::OneOf(first, first_i) => Ok((*first, first_i)), + t => Err(internal_error_anyhow!( + "expecting oneof variant, got: {}`", + t.as_display_no_ctx(), + )), + } + } + + fn unpack_oneof_i(self, expected_i: u32, oneof: &OneOfAttrType) -> anyhow::Result { + let (first, i) = self.unpack_oneof()?; + if i != expected_i { + let first_t = oneof.get(expected_i)?; + let next_t = oneof.get(i)?; + return Err(buck2_error_anyhow!( + [], + "Cannot concatenate values coerced/configured \ + to different oneof variants: `{first_t}` and `{next_t}`" + )); } + Ok(first) + } + + fn concat_oneof( + self, + items: &mut dyn Iterator>, + oneof: &OneOfAttrType, + ) -> anyhow::Result { + let (first, first_i) = self.unpack_oneof()?; + let attr = first.concat( + &oneof.xs[first_i as usize], + &mut items.map(|v| v?.unpack_oneof_i(first_i, oneof)), + )?; + Ok(ConfiguredAttr::OneOf(Box::new(attr), first_i)) } /// Used for concatting the configured result of concatted selects. For most types this isn't allowed (it @@ -226,135 +262,101 @@ impl ConfiguredAttr { attr_type: &AttrType, items: &mut dyn Iterator>, ) -> anyhow::Result { - let mismatch = |ty, attr: ConfiguredAttr| { - Err(ConfiguredAttrError::ConcatNotSupportedValues( - ty, - attr.as_display_no_ctx().to_string(), - ) - .into()) - }; - - match self { - ConfiguredAttr::OneOf(box first, first_i) => { - // Becaise if attr type if `option(oneof([list(string), ...])`, + match &attr_type.0.inner { + AttrTypeInner::OneOf(xs) => self.concat_oneof(items, xs), + AttrTypeInner::Option(opt) => { + // Because if attr type is `option(oneof([list(string), ...])`, // value type is `oneof(list(...))`, without indication it is an option. - let attr_type = attr_type.unwrap_if_option(); - - let oneof_type = match &*attr_type.0 { - AttrTypeInner::OneOf(oneof_type) => oneof_type, - _ => { - return Err(ConfiguredAttrError::InconsistentAttrValueAndAttrType( - ConfiguredAttr::OneOf(Box::new(first), first_i), - attr_type.dupe(), - ) - .into()); - } - }; - - let first_t = oneof_type.get(first_i)?; - - first.concat( - first_t, - &mut items.map(|next| match next? { - ConfiguredAttr::OneOf(box next, next_i) => { - let next_t = oneof_type.get(next_i)?; - if first_i != next_i { - return Err(ConfiguredAttrError::ConcatDifferentOneofVariants( - first_t.dupe(), - next_t.dupe(), - ) - .into()); - } - Ok(next) - } - _ => Err(ConfiguredAttrError::LhsOneOfRhsNotOneOf.into()), - }), - ) + self.concat(&opt.inner, items) } - ConfiguredAttr::List(list) => { - let mut res = list.to_vec(); - for x in items { - match x? { - ConfiguredAttr::List(list2) => { - res.extend(list2.iter().cloned()); + _ => match self { + ConfiguredAttr::OneOf(..) => Err(internal_error_anyhow!( + "Inconsistent attr value (`{}`) and attr type (`{}`)", + self.as_display_no_ctx(), + attr_type + )), + ConfiguredAttr::List(list) => { + let mut res = list.to_vec(); + for x in items { + match x? { + ConfiguredAttr::List(list2) => { + res.extend(list2.iter().cloned()); + } + attr => return Err(attr.concat_not_supported("list")), } - attr => return mismatch("list", attr), } + Ok(ConfiguredAttr::List(ListLiteral(res.into()))) } - Ok(ConfiguredAttr::List(ListLiteral(res.into()))) - } - ConfiguredAttr::Dict(left) => { - let mut res = OrderedMap::new(); - for (k, v) in left.iter().cloned() { - res.insert(k, v); - } - for x in items { - match x? { - ConfiguredAttr::Dict(right) => { - for (k, v) in right.iter().cloned() { - match res.entry(k) { - small_map::Entry::Vacant(e) => { - e.insert(v); - } - small_map::Entry::Occupied(e) => { - return Err(ConfiguredAttrError::DictConcatDuplicateKeys( - e.key().as_display_no_ctx().to_string(), - ) - .into()); + ConfiguredAttr::Dict(left) => { + let mut res = OrderedMap::new(); + for (k, v) in left.iter().cloned() { + res.insert(k, v); + } + for x in items { + match x? { + ConfiguredAttr::Dict(right) => { + for (k, v) in right.iter().cloned() { + match res.entry(k) { + small_map::Entry::Vacant(e) => { + e.insert(v); + } + small_map::Entry::Occupied(e) => { + return Err(buck2_error_anyhow!( + [], + "got same key in both sides of dictionary concat (key `{}`)", + e.key().as_display_no_ctx() + )); + } } } } + attr => return Err(attr.concat_not_supported("dict")), } - attr => return mismatch("dict", attr), } + Ok(ConfiguredAttr::Dict(res.into_iter().collect())) } - Ok(ConfiguredAttr::Dict(res.into_iter().collect())) - } - ConfiguredAttr::String(res) => { - let mut items = items.peekable(); - if items.peek().is_none() { - Ok(ConfiguredAttr::String(res)) - } else { - let mut res = str::to_owned(&res.0); - for x in items { - match x? { - ConfiguredAttr::String(right) => res.push_str(&right.0), - attr => return mismatch("string", attr), + ConfiguredAttr::String(res) => { + let mut items = items.peekable(); + if items.peek().is_none() { + Ok(ConfiguredAttr::String(res)) + } else { + let mut res = str::to_owned(&res.0); + for x in items { + match x? { + ConfiguredAttr::String(right) => res.push_str(&right.0), + attr => return Err(attr.concat_not_supported("string")), + } } + Ok(ConfiguredAttr::String(StringLiteral(ArcStr::from(res)))) } - Ok(ConfiguredAttr::String(StringLiteral(ArcStr::from(res)))) } - } - ConfiguredAttr::Arg(left) => { - let res = left.string_with_macros.concat(items.map(|x| { - match x? { + ConfiguredAttr::Arg(left) => { + let res = left.string_with_macros.concat(items.map(|x| match x? { ConfiguredAttr::Arg(x) => Ok(x.string_with_macros), - attr => Err(ConfiguredAttrError::ConcatNotSupportedValues( - "arg", - attr.as_display_no_ctx().to_string(), - ) - .into()), - } - }))?; - Ok(ConfiguredAttr::Arg(ConfiguredStringWithMacros { - string_with_macros: res, - anon_target_compatible: left.anon_target_compatible, - })) - } - val => Err(ConfiguredAttrError::ConcatNotSupported( - val.as_display_no_ctx().to_string(), - ) - .into()), + attr => Err(attr.concat_not_supported("arg")), + }))?; + Ok(ConfiguredAttr::Arg(ConfiguredStringWithMacros { + string_with_macros: res, + anon_target_compatible: left.anon_target_compatible, + })) + } + val => Err(buck2_error_anyhow!( + [], + "addition not supported for this attribute type `{}`", + val.as_display_no_ctx() + )), + }, } } - pub(crate) fn try_into_configuration_dep(self) -> anyhow::Result { + pub(crate) fn try_into_configuration_dep(self) -> anyhow::Result { match self { - ConfiguredAttr::ConfigurationDep(d) => Ok(*d), - a => Err(ConfiguredAttrError::ExpectingConfigurationDep( - a.as_display_no_ctx().to_string(), - ) - .into()), + ConfiguredAttr::ConfigurationDep(d) => Ok(d), + s => Err(buck2_error_anyhow!( + [], + "expecting configuration dep, got `{0}`", + s.as_display_no_ctx() + )), } } @@ -368,7 +370,11 @@ impl ConfiguredAttr { pub(crate) fn try_into_list(self) -> anyhow::Result> { match self { ConfiguredAttr::List(list) => Ok(list.to_vec()), - a => Err(ConfiguredAttrError::ExpectingList(a.as_display_no_ctx().to_string()).into()), + a => Err(buck2_error_anyhow!( + [], + "expecting a list, got `{0}`", + a.as_display_no_ctx() + )), } } } diff --git a/app/buck2_node/src/attrs/configured_attr_full.rs b/app/buck2_node/src/attrs/configured_attr_full.rs index c1af7c6bdb1fc..cf76916af477b 100644 --- a/app/buck2_node/src/attrs/configured_attr_full.rs +++ b/app/buck2_node/src/attrs/configured_attr_full.rs @@ -15,6 +15,7 @@ use crate::attrs::configured_attr::ConfiguredAttr; use crate::attrs::configured_traversal::ConfiguredAttrTraversal; /// Full configured attribute: name, type, value. +#[derive(Debug)] pub struct ConfiguredAttrFull<'a> { pub name: &'a str, pub attr: &'a Attribute, diff --git a/app/buck2_node/src/attrs/configured_attr_info_for_tests.rs b/app/buck2_node/src/attrs/configured_attr_info_for_tests.rs new file mode 100644 index 0000000000000..3c0e7bda6f785 --- /dev/null +++ b/app/buck2_node/src/attrs/configured_attr_info_for_tests.rs @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_core::provider::label::ConfiguredProvidersLabel; +use starlark_map::small_set::SmallSet; + +use crate::attrs::configured_traversal::ConfiguredAttrTraversal; + +#[derive(Default, Debug)] +pub struct ConfiguredAttrInfoForTests { + // Including transitioned deps. + pub deps: SmallSet, + pub execution_deps: SmallSet, +} + +impl ConfiguredAttrInfoForTests { + pub fn new() -> Self { + Self::default() + } +} + +impl ConfiguredAttrTraversal for ConfiguredAttrInfoForTests { + fn dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { + self.deps.insert(dep.clone()); + Ok(()) + } + + fn exec_dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { + self.execution_deps.insert(dep.clone()); + Ok(()) + } +} diff --git a/app/buck2_node/src/attrs/configured_info.rs b/app/buck2_node/src/attrs/configured_info.rs deleted file mode 100644 index 2c307d5a19090..0000000000000 --- a/app/buck2_node/src/attrs/configured_info.rs +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_core::provider::label::ConfiguredProvidersLabel; -use starlark_map::small_set::SmallSet; - -use crate::attrs::attr_type::query::ResolvedQueryLiterals; -use crate::attrs::configured_traversal::ConfiguredAttrTraversal; - -#[derive(Default, Debug)] -pub struct ConfiguredAttrInfo { - // Including transitioned deps. - pub deps: SmallSet, - pub execution_deps: SmallSet, - pub has_query: bool, -} - -impl ConfiguredAttrInfo { - pub fn new() -> Self { - Self::default() - } -} - -impl ConfiguredAttrTraversal for ConfiguredAttrInfo { - fn dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - self.deps.insert(dep.clone()); - Ok(()) - } - - fn query_macro( - &mut self, - _query: &str, - _resolved_literals: &ResolvedQueryLiterals, - ) -> anyhow::Result<()> { - self.has_query = true; - Ok(()) - } - - fn exec_dep(&mut self, dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - self.execution_deps.insert(dep.clone()); - Ok(()) - } -} diff --git a/app/buck2_node/src/attrs/configured_traversal.rs b/app/buck2_node/src/attrs/configured_traversal.rs index 2d06646b24c9e..adcbecf1f89f1 100644 --- a/app/buck2_node/src/attrs/configured_traversal.rs +++ b/app/buck2_node/src/attrs/configured_traversal.rs @@ -7,11 +7,11 @@ * of this source tree. */ -use buck2_core::buck_path::path::BuckPathRef; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::plugins::PluginKind; use buck2_core::plugins::PluginKindSet; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use crate::attrs::attr_type::query::ResolvedQueryLiterals; @@ -45,7 +45,8 @@ pub trait ConfiguredAttrTraversal { Ok(()) } - fn query_macro( + /// Called for both `attrs.query(...)` and query macros like `$(query_targets ...)`. + fn query( &mut self, _query: &str, _resolved_literals: &ResolvedQueryLiterals, @@ -53,7 +54,7 @@ pub trait ConfiguredAttrTraversal { Ok(()) } - fn input(&mut self, _path: BuckPathRef) -> anyhow::Result<()> { + fn input(&mut self, _path: SourcePathRef) -> anyhow::Result<()> { Ok(()) } diff --git a/app/buck2_node/src/attrs/fmt_context.rs b/app/buck2_node/src/attrs/fmt_context.rs index fc29f7651763e..4256058f2ef58 100644 --- a/app/buck2_node/src/attrs/fmt_context.rs +++ b/app/buck2_node/src/attrs/fmt_context.rs @@ -8,6 +8,7 @@ */ use buck2_core::package::PackageLabel; +use buck2_query::query::environment::AttrFmtOptions; /// Attribute formatting context (for `Display` or `Serialize`). pub struct AttrFmtContext { @@ -16,8 +17,14 @@ pub struct AttrFmtContext { /// * tests /// * error messages pub package: Option, + pub options: AttrFmtOptions, } impl AttrFmtContext { - pub const NO_CONTEXT: AttrFmtContext = AttrFmtContext { package: None }; + pub const NO_CONTEXT: AttrFmtContext = AttrFmtContext { + package: None, + options: AttrFmtOptions { + exclude_quotes: false, + }, + }; } diff --git a/app/buck2_node/src/attrs/hacks.rs b/app/buck2_node/src/attrs/hacks.rs index d9177ee9031bb..c9ecc8a1bc9ec 100644 --- a/app/buck2_node/src/attrs/hacks.rs +++ b/app/buck2_node/src/attrs/hacks.rs @@ -17,7 +17,10 @@ use crate::attrs::coerced_attr::CoercedAttr; use crate::attrs::fmt_context::AttrFmtContext; pub fn value_to_json(value: &CoercedAttr, pkg: PackageLabel) -> anyhow::Result { - value.to_json(&AttrFmtContext { package: Some(pkg) }) + value.to_json(&AttrFmtContext { + package: Some(pkg), + options: Default::default(), + }) } pub fn value_to_string(value: &CoercedAttr, pkg: PackageLabel) -> anyhow::Result { diff --git a/app/buck2_node/src/attrs/internal.rs b/app/buck2_node/src/attrs/internal.rs index 152a1ab5123f6..e656a7d6c5c1a 100644 --- a/app/buck2_node/src/attrs/internal.rs +++ b/app/buck2_node/src/attrs/internal.rs @@ -11,9 +11,7 @@ use std::sync::Arc; -use buck2_core::plugins::PluginKindSet; use buck2_core::provider::id::ProviderId; -use dupe::Dupe; use once_cell::sync::Lazy; use starlark_map::ordered_map::OrderedMap; @@ -22,8 +20,8 @@ use crate::attrs::attr_type::any::AnyAttrType; use crate::attrs::attr_type::AttrType; use crate::attrs::coerced_attr::CoercedAttr; use crate::attrs::configurable::AttrIsConfigurable; +use crate::attrs::values::TargetModifiersValue; use crate::metadata::map::MetadataMap; -use crate::provider_id_set::ProviderIdSet; use crate::visibility::VisibilitySpecification; use crate::visibility::WithinViewSpecification; @@ -47,6 +45,7 @@ pub const EXEC_COMPATIBLE_WITH_ATTRIBUTE_FIELD: &str = "exec_compatible_with"; pub const VISIBILITY_ATTRIBUTE_FIELD: &str = "visibility"; pub const WITHIN_VIEW_ATTRIBUTE_FIELD: &str = "within_view"; pub const METADATA_ATTRIBUTE_FIELD: &str = "metadata"; +pub const TARGET_MODIFIERS_ATTRIBUTE_FIELD: &str = "modifiers"; pub const TESTS_ATTRIBUTE_FIELD: &str = "tests"; @@ -69,10 +68,7 @@ fn default_target_platform_attribute() -> Attribute { Attribute::new( Some(Arc::new(CoercedAttr::None)), "specifies the default target platform, used when no platforms are specified on the command line", - AttrType::option(AttrType::dep( - ProviderIdSet::from(vec![internal_attrs_platform_info_provider_id().dupe()]), - PluginKindSet::EMPTY, - )), + AttrType::option(AttrType::label()), ) } @@ -122,6 +118,16 @@ fn metadata_attribute() -> Attribute { ) } +fn target_modifiers_attribute() -> Attribute { + Attribute::new( + Some(Arc::new(CoercedAttr::TargetModifiers( + TargetModifiersValue::new(serde_json::Value::Array(vec![])), + ))), + "an array of modifiers associated with this target", + AttrType::target_modifiers(), + ) +} + fn tests_attribute() -> Attribute { let entry_type = AttrType::label(); Attribute::new( @@ -155,6 +161,10 @@ pub fn internal_attrs() -> &'static OrderedMap<&'static str, Attribute> { (WITHIN_VIEW_ATTRIBUTE_FIELD, within_view_attribute()), (METADATA_ATTRIBUTE_FIELD, metadata_attribute()), (TESTS_ATTRIBUTE_FIELD, tests_attribute()), + ( + TARGET_MODIFIERS_ATTRIBUTE_FIELD, + target_modifiers_attribute(), + ), ]) }); &ATTRS @@ -169,6 +179,7 @@ pub fn attr_is_configurable(name: &str) -> AttrIsConfigurable { || name == VISIBILITY_ATTRIBUTE_FIELD || name == WITHIN_VIEW_ATTRIBUTE_FIELD || name == METADATA_ATTRIBUTE_FIELD + || name == TARGET_MODIFIERS_ATTRIBUTE_FIELD { AttrIsConfigurable::No } else { diff --git a/app/buck2_node/src/attrs/mod.rs b/app/buck2_node/src/attrs/mod.rs deleted file mode 100644 index c27d4dd0ca418..0000000000000 --- a/app/buck2_node/src/attrs/mod.rs +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod anon_target_attr_validation; -pub mod attr; -pub mod attr_type; -pub mod coerced_attr; -pub mod coerced_attr_full; -pub mod coerced_attr_with_type; -pub mod coerced_deps_collector; -pub mod coerced_path; -pub mod coercion_context; -pub mod configurable; -pub mod configuration_context; -pub mod configured_attr; -pub mod configured_attr_full; -pub mod configured_info; -pub mod configured_traversal; -pub mod display; -pub mod fmt_context; -pub mod hacks; -pub mod id; -pub mod inspect_options; -pub mod internal; -pub mod json; -pub mod serialize; -pub mod spec; -pub mod testing; -pub mod traversal; -pub mod values; diff --git a/app/buck2_node/src/attrs/spec.rs b/app/buck2_node/src/attrs/spec.rs index 980544ef98914..93d2e49dedf03 100644 --- a/app/buck2_node/src/attrs/spec.rs +++ b/app/buck2_node/src/attrs/spec.rs @@ -36,7 +36,7 @@ pub struct AttributeSpec { attributes: OrderedMap, Attribute>, } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum AttributeSpecError { #[error("User provided attribute `{0}` overrides internal attribute")] InternalAttributeRedefined(String), @@ -128,7 +128,8 @@ impl AttributeSpec { if name == "metadata" { soft_error!( "metadata_attribute", - anyhow::anyhow!("Rules should not declare an attribute named metadata`"), + anyhow::anyhow!("Rules should not declare an attribute named metadata`").into(), + deprecation: true, quiet: true )?; } @@ -277,9 +278,15 @@ impl AttributeSpec { attr_values: &'v AttrValues, key: &str, opts: AttrInspectOptions, - ) -> anyhow::Result> { + ) -> anyhow::Result>> { if let Some(idx) = self.attribute_id_by_name(key) { - Ok(self.known_attr_or_none(idx, attr_values, opts)) + Ok(self + .known_attr_or_none(idx, attr_values, opts) + .map(|value| { + let name = self.attribute_name_by_id(idx); + let attr = self.attribute_by_id(idx); + CoercedAttrFull { name, attr, value } + })) } else { Err(AttributeSpecError::UnknownAttribute(key.to_owned()).into()) } diff --git a/app/buck2_node/src/attrs/testing.rs b/app/buck2_node/src/attrs/testing.rs index b1b09e5b4d10b..6a51a230a81be 100644 --- a/app/buck2_node/src/attrs/testing.rs +++ b/app/buck2_node/src/attrs/testing.rs @@ -17,28 +17,33 @@ use buck2_core::configuration::pair::ConfigurationNoExec; use buck2_core::configuration::pair::ConfigurationWithExec; use buck2_core::configuration::transition::applied::TransitionApplied; use buck2_core::configuration::transition::id::TransitionId; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use dupe::Dupe; use starlark_map::ordered_map::OrderedMap; +use starlark_map::unordered_map::UnorderedMap; use crate::attrs::configuration_context::AttrConfigurationContext; +use crate::configuration::resolved::ConfigurationNode; +use crate::configuration::resolved::ConfigurationSettingKey; +use crate::configuration::resolved::ResolvedConfigurationSettings; pub fn configuration_ctx() -> impl AttrConfigurationContext { - struct TestAttrConfigurationContext(ConfigurationData, ConfigurationData, ConfigSettingData); + struct TestAttrConfigurationContext( + ConfigurationData, + ConfigurationData, + ResolvedConfigurationSettings, + ); impl AttrConfigurationContext for TestAttrConfigurationContext { fn cfg(&self) -> ConfigurationNoExec { ConfigurationNoExec::new(self.0.dupe()) } - fn exec_cfg(&self) -> ConfigurationNoExec { - ConfigurationNoExec::new(self.1.dupe()) + fn exec_cfg(&self) -> anyhow::Result { + Ok(ConfigurationNoExec::new(self.1.dupe())) } - fn matches<'a>(&'a self, label: &TargetLabel) -> Option<&'a ConfigSettingData> { - match label.to_string().as_ref() { - "root//other:config" => Some(&self.2), - _ => None, - } + fn resolved_cfg_settings(&self) -> &ResolvedConfigurationSettings { + &self.2 } fn toolchain_cfg(&self) -> ConfigurationWithExec { @@ -49,7 +54,9 @@ pub fn configuration_ctx() -> impl AttrConfigurationContext { panic!("not used in tests") } - fn resolved_transitions(&self) -> &OrderedMap, Arc> { + fn resolved_transitions( + &self, + ) -> anyhow::Result<&OrderedMap, Arc>> { panic!("not used in tests") } } @@ -63,9 +70,22 @@ pub fn configuration_ctx() -> impl AttrConfigurationContext { }, ) .unwrap(), - ConfigSettingData { - constraints: BTreeMap::new(), - buckconfigs: BTreeMap::new(), - }, + ResolvedConfigurationSettings::new(UnorderedMap::from_iter([ + ( + ConfigurationSettingKey::testing_parse("root//other:config"), + ConfigurationNode::new(Some(ConfigSettingData { + constraints: BTreeMap::new(), + buckconfigs: BTreeMap::new(), + })), + ), + ( + ConfigurationSettingKey::testing_parse("root//some:config"), + ConfigurationNode::new(None), + ), + ( + ConfigurationSettingKey::testing_parse("cell1//other:config"), + ConfigurationNode::new(None), + ), + ])), ) } diff --git a/app/buck2_node/src/attrs/traversal.rs b/app/buck2_node/src/attrs/traversal.rs index d05d88f7aad2d..ffb716a8b7b5f 100644 --- a/app/buck2_node/src/attrs/traversal.rs +++ b/app/buck2_node/src/attrs/traversal.rs @@ -9,11 +9,13 @@ use std::sync::Arc; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::configuration::transition::id::TransitionId; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::plugins::PluginKind; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; + +use crate::configuration::resolved::ConfigurationSettingKey; pub trait CoercedAttrTraversal<'a> { fn dep(&mut self, dep: &'a TargetLabel) -> anyhow::Result<()>; @@ -29,10 +31,10 @@ pub trait CoercedAttrTraversal<'a> { dep: &'a TargetLabel, tr: &Arc, ) -> anyhow::Result<()>; - fn configuration_dep(&mut self, dep: &'a TargetLabel) -> anyhow::Result<()>; + fn configuration_dep(&mut self, dep: &'a ConfigurationSettingKey) -> anyhow::Result<()>; fn platform_dep(&mut self, dep: &'a TargetLabel) -> anyhow::Result<()>; fn plugin_dep(&mut self, dep: &'a TargetLabel, kind: &PluginKind) -> anyhow::Result<()>; - fn input(&mut self, input: BuckPathRef) -> anyhow::Result<()>; + fn input(&mut self, input: SourcePathRef) -> anyhow::Result<()>; fn label(&mut self, _label: &'a ProvidersLabel) -> anyhow::Result<()> { Ok(()) } diff --git a/app/buck2_node/src/attrs/values.rs b/app/buck2_node/src/attrs/values.rs index 167d0fc6a46b4..11a8b6e02a9cf 100644 --- a/app/buck2_node/src/attrs/values.rs +++ b/app/buck2_node/src/attrs/values.rs @@ -7,10 +7,14 @@ * of this source tree. */ +use std::sync::Arc; + use allocative::Allocative; +use dupe::Dupe; use starlark_map::vec2; use starlark_map::vec2::Vec2; +use super::attr_type::any_matches::AnyMatches; use crate::attrs::coerced_attr::CoercedAttr; use crate::attrs::id::AttributeId; @@ -66,3 +70,48 @@ impl<'a> IntoIterator for &'a AttrValues { self.sorted.iter() } } + +#[derive( + Debug, + Dupe, + Eq, + PartialEq, + Hash, + Clone, + Allocative, + Default, + derive_more::Display +)] +#[display("{}", self.0.as_ref())] +pub struct TargetModifiersValue(Arc); + +impl TargetModifiersValue { + pub fn new(v: serde_json::Value) -> Self { + Self(Arc::new(v)) + } + + pub fn to_value(&self) -> serde_json::Value { + (*self.0).clone() + } + + pub fn as_json(&self) -> Arc { + self.0.dupe() + } + + pub fn is_empty(&self) -> bool { + match self.0.as_ref() { + serde_json::Value::Null => true, + serde_json::Value::Bool(_) => false, + serde_json::Value::Number(_) => false, + serde_json::Value::String(_) => false, + serde_json::Value::Array(vec) => vec.is_empty(), + serde_json::Value::Object(map) => map.is_empty(), + } + } +} + +impl AnyMatches for TargetModifiersValue { + fn any_matches(&self, filter: &dyn Fn(&str) -> anyhow::Result) -> anyhow::Result { + self.0.any_matches(filter) + } +} diff --git a/app/buck2_node/src/call_stack.rs b/app/buck2_node/src/call_stack.rs index c675397f7df76..7d2a6edd5964e 100644 --- a/app/buck2_node/src/call_stack.rs +++ b/app/buck2_node/src/call_stack.rs @@ -7,7 +7,6 @@ * of this source tree. */ -use std::any::Any; use std::fmt::Debug; use std::fmt::Display; use std::hash::Hash; @@ -16,22 +15,19 @@ use std::hash::Hasher; use allocative::Allocative; use cmp_any::PartialEqAny; +// Duplicate of starlark_syntax::codemap::ResolvedFileLine +pub struct StarlarkTargetCallStackRoot { + /// File name. + pub file: String, + /// Line number is 0-based + pub line: usize, +} + /// Untyped version of `starlark::eval::CallStack`. pub trait StarlarkCallStackImpl: Display + Debug + Send + Sync + 'static { fn eq_token(&self) -> PartialEqAny; fn hash(&self, hashed: &mut dyn Hasher); -} - -impl StarlarkCallStackImpl - for S -{ - fn eq_token(&self) -> PartialEqAny { - PartialEqAny::new(self) - } - - fn hash(&self, mut state: &mut dyn Hasher) { - self.hash(&mut state); - } + fn root_location(&self) -> Option; } /// `buck2_node` crate does not depend on `starlark`, but need to store Starlark call stack. @@ -69,4 +65,8 @@ impl StarlarkCallStack { call_stack: Box::new(call_stack), } } + + pub fn root_location(&self) -> Option { + self.call_stack.root_location() + } } diff --git a/app/buck2_node/src/cfg_constructor.rs b/app/buck2_node/src/cfg_constructor.rs index 9e3a6dbf5c8c5..49246f2607296 100644 --- a/app/buck2_node/src/cfg_constructor.rs +++ b/app/buck2_node/src/cfg_constructor.rs @@ -8,6 +8,8 @@ */ use std::fmt::Debug; +use std::future::Future; +use std::pin::Pin; use std::sync::Arc; use allocative::Allocative; @@ -16,15 +18,28 @@ use buck2_core::configuration::data::ConfigurationData; use buck2_util::late_binding::LateBinding; use dice::DiceComputations; +use crate::metadata::key::MetadataKeyRef; +use crate::metadata::value::MetadataValue; +use crate::nodes::unconfigured::TargetNodeRef; +use crate::rule_type::RuleType; +use crate::super_package::SuperPackage; + /// Trait for configuration constructor functions. /// The output of invoking these functions is a PlatformInfo #[async_trait] pub trait CfgConstructorImpl: Send + Sync + Debug + Allocative { - async fn eval( - &self, - ctx: &DiceComputations, - cfg: &ConfigurationData, - ) -> anyhow::Result; + fn eval<'a>( + &'a self, + ctx: &'a mut DiceComputations, + cfg: &'a ConfigurationData, + package_cfg_modifiers: Option<&'a MetadataValue>, + target_cfg_modifiers: Option<&'a MetadataValue>, + cli_modifiers: &'a [String], + rule_type: &'a RuleType, + ) -> Pin> + Send + 'a>>; + + /// Returns the metadata key used to encode modifiers in PACKAGE values and metadata attribute + fn key<'a>(&'a self) -> &'a MetadataKeyRef; } pub static CFG_CONSTRUCTOR_CALCULATION_IMPL: LateBinding< @@ -33,17 +48,15 @@ pub static CFG_CONSTRUCTOR_CALCULATION_IMPL: LateBinding< #[async_trait] pub trait CfgConstructorCalculationImpl: Send + Sync + 'static { - /// Loads and returns cfg constructor functions. - async fn get_cfg_constructor( - &self, - ctx: &DiceComputations, - ) -> anyhow::Result>>; - /// Invokes starlark cfg constructors on provided configuration /// and returns the result. async fn eval_cfg_constructor( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, + target: TargetNodeRef<'_>, + super_package: &SuperPackage, cfg: ConfigurationData, + cli_modifiers: &Arc>, + rule_name: &RuleType, ) -> anyhow::Result; } diff --git a/app/buck2_node/src/configuration.rs b/app/buck2_node/src/configuration.rs new file mode 100644 index 0000000000000..7c36329d3110c --- /dev/null +++ b/app/buck2_node/src/configuration.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod calculation; +pub mod resolved; +pub mod target_platform_detector; +pub mod toolchain_constraints; diff --git a/app/buck2_node/src/configuration/calculation.rs b/app/buck2_node/src/configuration/calculation.rs new file mode 100644 index 0000000000000..f300c672459d8 --- /dev/null +++ b/app/buck2_node/src/configuration/calculation.rs @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use async_trait::async_trait; +use buck2_core::cells::name::CellName; +use buck2_core::configuration::data::ConfigurationData; +use buck2_core::target::label::label::TargetLabel; +use buck2_util::late_binding::LateBinding; +use dice::DiceComputations; + +use crate::configuration::resolved::ConfigurationSettingKey; +use crate::configuration::resolved::ResolvedConfiguration; + +#[async_trait] +pub trait ConfigurationCalculationDyn: Send + Sync + 'static { + async fn get_platform_configuration( + &self, + dice: &mut DiceComputations<'_>, + target: &TargetLabel, + ) -> anyhow::Result; + + async fn get_resolved_configuration( + &self, + dice: &mut DiceComputations<'_>, + target_cfg: &ConfigurationData, + target_node_cell: CellName, + configuration_deps: &[ConfigurationSettingKey], + ) -> buck2_error::Result; +} + +pub static CONFIGURATION_CALCULATION: LateBinding<&'static dyn ConfigurationCalculationDyn> = + LateBinding::new("CONFIGURATION_CALCULATION"); diff --git a/app/buck2_node/src/configuration/mod.rs b/app/buck2_node/src/configuration/mod.rs deleted file mode 100644 index 2af4516d62ef5..0000000000000 --- a/app/buck2_node/src/configuration/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod resolved; -pub mod target_platform_detector; -pub mod toolchain_constraints; diff --git a/app/buck2_node/src/configuration/resolved.rs b/app/buck2_node/src/configuration/resolved.rs index 71ef543ed379d..495b3ca8f1672 100644 --- a/app/buck2_node/src/configuration/resolved.rs +++ b/app/buck2_node/src/configuration/resolved.rs @@ -8,45 +8,34 @@ */ use std::hash::Hash; -use std::hash::Hasher; use std::sync::Arc; use allocative::Allocative; use buck2_core::configuration::config_setting::ConfigSettingData; -use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::pair::ConfigurationNoExec; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use dupe::Dupe; use starlark_map::unordered_map::UnorderedMap; -use starlark_map::Equivalent; -#[derive(Debug, Eq, Allocative)] +/// Key in `select` or an item in `target_compatible_with`. +/// Should point to `config_setting` target, or `constraint_value`. +#[derive( + Debug, + Eq, + PartialEq, + Hash, + Allocative, + derive_more::Display, + Clone, + Dupe, + Ord, + PartialOrd +)] pub struct ConfigurationSettingKey(pub TargetLabel); -#[derive(Debug, Hash, Eq, PartialEq)] -pub struct ConfigurationSettingKeyRef<'a>(pub &'a TargetLabel); - -impl Equivalent for ConfigurationSettingKeyRef<'_> { - fn equivalent(&self, key: &ConfigurationSettingKey) -> bool { - self == &key.as_ref() - } -} - impl ConfigurationSettingKey { - fn as_ref(&self) -> ConfigurationSettingKeyRef { - ConfigurationSettingKeyRef(&self.0) - } -} - -impl PartialEq for ConfigurationSettingKey { - fn eq(&self, other: &Self) -> bool { - self.as_ref() == other.as_ref() - } -} - -impl Hash for ConfigurationSettingKey { - fn hash(&self, state: &mut H) { - self.as_ref().hash(state); + pub fn testing_parse(label: &str) -> ConfigurationSettingKey { + ConfigurationSettingKey(TargetLabel::testing_parse(label)) } } @@ -60,16 +49,18 @@ impl Hash for ConfigurationSettingKey { pub struct ResolvedConfiguration(Arc); #[derive(Debug, Eq, PartialEq, Hash, Allocative)] -struct ResolvedConfigurationData { +pub(crate) struct ResolvedConfigurationData { cfg: ConfigurationNoExec, + pub(crate) settings: ResolvedConfigurationSettings, +} + +#[derive(Debug, Eq, PartialEq, Hash, Allocative)] +pub struct ResolvedConfigurationSettings { settings: UnorderedMap, } impl ResolvedConfiguration { - pub fn new( - cfg: ConfigurationNoExec, - settings: UnorderedMap, - ) -> Self { + pub fn new(cfg: ConfigurationNoExec, settings: ResolvedConfigurationSettings) -> Self { Self(Arc::new(ResolvedConfigurationData { cfg, settings })) } @@ -77,19 +68,27 @@ impl ResolvedConfiguration { &self.0.cfg } - pub fn setting_matches(&self, key: ConfigurationSettingKeyRef) -> Option<&ConfigSettingData> { - let configuration_node = self.0.settings.get(&key).expect( - "framework should've ensured all necessary configuration setting keys are present", - ); - if configuration_node.matches() { - Some(configuration_node.configuration_data()) - } else { - None - } + pub fn settings(&self) -> &ResolvedConfigurationSettings { + &self.0.settings + } +} + +impl ResolvedConfigurationSettings { + pub fn new( + settings: UnorderedMap, + ) -> ResolvedConfigurationSettings { + ResolvedConfigurationSettings { settings } + } + + pub fn empty() -> ResolvedConfigurationSettings { + ResolvedConfigurationSettings::new(UnorderedMap::new()) } - pub fn matches(&self, label: &TargetLabel) -> Option<&ConfigSettingData> { - self.setting_matches(ConfigurationSettingKeyRef(label)) + pub fn setting_matches(&self, key: &ConfigurationSettingKey) -> Option<&ConfigSettingData> { + let Some(configuration_node) = self.settings.get(key) else { + panic!("unresolved configuration setting: `{key}`"); + }; + configuration_node.configuration_data() } } @@ -99,46 +98,16 @@ pub struct ConfigurationNode(Arc); #[derive(Debug, Eq, PartialEq, Hash, Allocative)] struct ConfigurationNodeData { - // This is stored as a split Configuration/TargetLabel (rather than a ConfiguredTargetLabel) because it's not - // quite the same as what you would think of a ConfiguredTargetLabel. Importantly, we don't do analysis of the - // target with this configuration, instead we interpret the results of the analysis of the target in the "unbound" - // configuration within the context of this configuration. - cfg: ConfigurationData, - - label: TargetLabel, - - config_setting: ConfigSettingData, - - /// Indicates whether this node "matches" the configuration. - /// - /// For example, a configuration node that requires a list of constraints "matches" a configuration where all of those constraints are satisfied. - matches: bool, + /// `None` when config settings does not match the configuration. + config_setting: Option, } impl ConfigurationNode { - pub fn new( - cfg: ConfigurationData, - label: TargetLabel, - config_setting: ConfigSettingData, - matches: bool, - ) -> Self { - Self(Arc::new(ConfigurationNodeData { - cfg, - label, - config_setting, - matches, - })) - } - - pub fn matches(&self) -> bool { - self.0.matches - } - - pub fn label(&self) -> &TargetLabel { - &self.0.label + pub fn new(config_setting: Option) -> Self { + Self(Arc::new(ConfigurationNodeData { config_setting })) } - pub fn configuration_data(&self) -> &ConfigSettingData { - &self.0.config_setting + pub fn configuration_data(&self) -> Option<&ConfigSettingData> { + self.0.config_setting.as_ref() } } diff --git a/app/buck2_node/src/configuration/target_platform_detector.rs b/app/buck2_node/src/configuration/target_platform_detector.rs index e38368b5e648c..8d7d335d8f421 100644 --- a/app/buck2_node/src/configuration/target_platform_detector.rs +++ b/app/buck2_node/src/configuration/target_platform_detector.rs @@ -27,13 +27,13 @@ use allocative::Allocative; use anyhow::Context; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::name::CellName; +use buck2_core::cells::CellAliasResolver; use buck2_core::cells::CellResolver; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::target::label::TargetLabel; -use thiserror::Error; +use buck2_core::target::label::label::TargetLabel; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum DetectorSpecParseError { #[error( "`target:` platform detector only supports a recursive pattern matcher (like `cell//package/...`) but got `{0}`" @@ -64,6 +64,7 @@ impl TargetPlatformDetector { spec: &str, cell_name: CellName, cell_resolver: &CellResolver, + cell_alias_resolver: &CellAliasResolver, ) -> anyhow::Result { let mut detectors = Vec::new(); for value in spec.split_whitespace() { @@ -75,8 +76,11 @@ impl TargetPlatformDetector { matcher, cell_name, cell_resolver, + cell_alias_resolver, )? { - buck2_core::pattern::ParsedPattern::Recursive(root) => root, + buck2_core::pattern::pattern::ParsedPattern::Recursive(root) => { + root + } _ => { return Err( DetectorSpecParseError::TargetKindRequiresRecursivePattern( @@ -90,6 +94,7 @@ impl TargetPlatformDetector { target, cell_name, cell_resolver, + cell_alias_resolver, ) .and_then(|x| x.as_target_label(target)) .context("Error parsing target platform detector spec")?; @@ -130,33 +135,35 @@ mod tests { use buck2_core::cells::alias::NonEmptyCellAlias; use buck2_core::cells::cell_root_path::CellRootPathBuf; - use buck2_core::cells::name::CellName; use super::*; #[test] fn test_parse_errors() -> anyhow::Result<()> { - let cell_resolver = CellResolver::testing_with_names_and_paths_with_alias(&[ - ( - CellName::testing_new("root"), - CellRootPathBuf::testing_new(""), - HashMap::from_iter([( - NonEmptyCellAlias::testing_new("alias1"), + let cell_resolver = CellResolver::testing_with_names_and_paths_with_alias( + &[ + ( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ), + ( CellName::testing_new("cell1"), - )]), - ), - ( + CellRootPathBuf::testing_new("cell1"), + ), + ], + HashMap::from_iter([( + NonEmptyCellAlias::testing_new("alias1"), CellName::testing_new("cell1"), - CellRootPathBuf::testing_new("cell1"), - HashMap::new(), - ), - ]); + )]), + ); + let cell_alias_resolver = cell_resolver.root_cell_cell_alias_resolver(); let check_fails = |spec: &str| { if TargetPlatformDetector::parse_spec( spec, CellName::testing_new("root"), &cell_resolver, + &cell_alias_resolver, ) .is_ok() { @@ -168,8 +175,13 @@ mod tests { }; let check_good = |spec: &str| { - TargetPlatformDetector::parse_spec(spec, CellName::testing_new("root"), &cell_resolver) - .unwrap_or_else(|_| panic!("Expected parsing `{}` to succeed.", spec)) + TargetPlatformDetector::parse_spec( + spec, + CellName::testing_new("root"), + &cell_resolver, + &cell_alias_resolver, + ) + .unwrap_or_else(|_| panic!("Expected parsing `{}` to succeed.", spec)) }; check_good("target://...->//:tgt"); @@ -195,26 +207,29 @@ mod tests { #[test] fn test_detect() -> anyhow::Result<()> { - let cell_resolver = CellResolver::testing_with_names_and_paths_with_alias(&[ - ( - CellName::testing_new("root"), - CellRootPathBuf::testing_new(""), - HashMap::from_iter([( - NonEmptyCellAlias::testing_new("alias1"), + let cell_resolver = CellResolver::testing_with_names_and_paths_with_alias( + &[ + ( + CellName::testing_new("root"), + CellRootPathBuf::testing_new(""), + ), + ( CellName::testing_new("cell1"), - )]), - ), - ( + CellRootPathBuf::testing_new("cell1"), + ), + ], + HashMap::from_iter([( + NonEmptyCellAlias::testing_new("alias1"), CellName::testing_new("cell1"), - CellRootPathBuf::testing_new("cell1"), - HashMap::new(), - ), - ]); + )]), + ); + let cell_alias_resolver = cell_resolver.root_cell_cell_alias_resolver(); let detector = TargetPlatformDetector::parse_spec( "target://lib/...->//:p1 target://lib2/foo/...->//:p2 target:alias1//map/...->alias1//:alias", CellName::testing_new("root"), &cell_resolver, + &cell_alias_resolver, )?; let p1 = TargetLabel::testing_parse("root//:p1"); diff --git a/app/buck2_node/src/configuration/toolchain_constraints.rs b/app/buck2_node/src/configuration/toolchain_constraints.rs index 142614a9983a1..51543699f0c0c 100644 --- a/app/buck2_node/src/configuration/toolchain_constraints.rs +++ b/app/buck2_node/src/configuration/toolchain_constraints.rs @@ -10,35 +10,50 @@ use std::sync::Arc; use allocative::Allocative; -use buck2_core::configuration::compatibility::IncompatiblePlatformReason; -use buck2_core::execution_types::execution::ExecutionPlatform; +use buck2_core::target::label::label::TargetLabel; use dupe::Dupe; -use starlark_map::small_map::SmallMap; +use dupe::IterDupedExt; + +use crate::configuration::resolved::ConfigurationSettingKey; /// The constraint introduced on execution platform resolution by /// a toolchain rule (reached via a toolchain_dep). -#[derive(Dupe, Clone, PartialEq, Eq, Allocative)] -pub struct ToolchainConstraints { - // We know the set of execution platforms is fixed throughout the build, - // so we can record just the ones we are incompatible with, - // and assume all others we _are_ compatible with. - incompatible: Arc>>, +#[derive(Debug, Dupe, Clone, PartialEq, Eq, Hash, Allocative)] +pub struct ToolchainConstraints(Arc); + +#[derive(Debug, PartialEq, Eq, Hash, Allocative)] +struct ToolchainConstraintsImpl { + exec_deps: Vec, + exec_compatible_with: Vec, } impl ToolchainConstraints { - pub fn new(incompatible: SmallMap>) -> Self { - Self { - incompatible: Arc::new(incompatible), - } + pub fn new( + exec_deps: &[TargetLabel], + exec_compatible_with: &[ConfigurationSettingKey], + inherited_toolchains: &[ToolchainConstraints], + ) -> Self { + Self(Arc::new(ToolchainConstraintsImpl { + exec_deps: inherited_toolchains + .iter() + .flat_map(|i| &i.0.exec_deps) + .chain(exec_deps) + .duped() + .collect(), + exec_compatible_with: inherited_toolchains + .iter() + .flat_map(|i| &i.0.exec_compatible_with) + .chain(exec_compatible_with) + .duped() + .collect(), + })) + } + + pub fn exec_deps(&self) -> impl Iterator { + self.0.exec_deps.iter() } - pub fn allows( - &self, - exec_platform: &ExecutionPlatform, - ) -> Result<(), Arc> { - match self.incompatible.get(exec_platform) { - None => Ok(()), - Some(e) => Err(e.dupe()), - } + pub fn exec_compatible_with(&self) -> impl Iterator { + self.0.exec_compatible_with.iter() } } diff --git a/app/buck2_node/src/configured_universe.rs b/app/buck2_node/src/configured_universe.rs index 97ae814e77a98..662caab45035d 100644 --- a/app/buck2_node/src/configured_universe.rs +++ b/app/buck2_node/src/configured_universe.rs @@ -9,61 +9,87 @@ use std::collections::BTreeMap; use std::collections::BTreeSet; +use std::future::Future; +use std::pin::Pin; use allocative::Allocative; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_common::pattern::resolve::ResolvedPattern; use buck2_core::cells::cell_path::CellPath; +use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::PackageSpec; use buck2_core::pattern::pattern_type::PatternType; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::PackageSpec; use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::target::label::TargetLabel; -use buck2_core::target::name::TargetName; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_core::target::name::TargetNameRef; +use buck2_events::dispatch::span; use buck2_query::query::syntax::simple::eval::label_indexed::LabelIndexed; use buck2_query::query::syntax::simple::eval::set::TargetSet; -use derivative::Derivative; +use buck2_util::late_binding::LateBinding; +use buck2_util::self_ref::RefData; +use buck2_util::self_ref::SelfRef; +use dice::DiceComputations; use dupe::Dupe; -use dupe::IterDupedExt; use either::Either; use itertools::Itertools; use crate::nodes::configured::ConfiguredTargetNode; +use crate::nodes::configured::ConfiguredTargetNodeRef; use crate::nodes::configured_node_visit_all_deps::configured_node_visit_all_deps; +pub static UNIVERSE_FROM_LITERALS: LateBinding< + for<'c> fn( + &'c mut DiceComputations<'_>, + &'c ProjectRelativePath, + &'c [String], + GlobalCfgOptions, + ) -> Pin> + Send + 'c>>, +> = LateBinding::new("UNIVERSE_FROM_LITERALS"); + +#[derive(Debug)] +struct CqueryUniverseInner<'a> { + targets: BTreeMap< + PackageLabel, + BTreeMap<&'a TargetNameRef, BTreeSet>>>, + >, +} + +struct CqueryUniverseInnerType; + +impl RefData for CqueryUniverseInnerType { + type Data<'a> = CqueryUniverseInner<'a>; +} + /// Subset of targets `cquery` command works with. /// /// Targets are resolved in the universe, and file owners are also resolved in the universe. -#[derive(Allocative, Derivative, Clone)] -#[derivative(Debug)] +#[derive(Allocative, Debug)] pub struct CqueryUniverse { - targets: - BTreeMap>>>, + data: SelfRef, } -impl CqueryUniverse { +impl<'a> CqueryUniverseInner<'a> { pub fn new( targets: BTreeMap< PackageLabel, - BTreeMap>>, + BTreeMap<&'a TargetNameRef, BTreeSet>>>, >, - ) -> CqueryUniverse { - CqueryUniverse { targets } + ) -> Self { + CqueryUniverseInner { targets } } - pub fn len(&self) -> usize { - self.targets.values().map(|e| e.values().len()).sum() - } - - pub async fn build( - universe: &TargetSet, - ) -> anyhow::Result { + fn build_inner( + universe: &'a TargetSet, + ) -> anyhow::Result> { let mut targets: BTreeMap< PackageLabel, - BTreeMap>>, + BTreeMap<&TargetNameRef, BTreeSet>>, > = BTreeMap::new(); - configured_node_visit_all_deps(universe.iter().duped(), |target| { + configured_node_visit_all_deps(universe.iter().map(|t| t.as_ref()), |target| { let label = target.label(); let package_targets: &mut _ = targets .entry(label.pkg().dupe()) @@ -72,17 +98,44 @@ impl CqueryUniverse { let nodes: &mut _ = match package_targets.get_mut(label.name()) { Some(v) => v, None => package_targets - .entry(label.name().to_owned()) + .entry(label.name()) .or_insert_with(BTreeSet::new), }; - nodes.insert(LabelIndexed(target)); + let inserted = nodes.insert(LabelIndexed(target)); + assert!(inserted, "Visited targets must be unique"); + }); - Ok(()) - }) - .await?; + Ok(CqueryUniverseInner::new(targets)) + } +} - Ok(CqueryUniverse::new(targets)) +impl CqueryUniverse { + pub fn len(&self) -> usize { + self.data + .data() + .targets + .values() + .map(|e| e.values().len()) + .sum() + } + + pub fn iter<'a>(&'a self) -> impl Iterator> { + self.data + .data() + .targets + .values() + .flat_map(|map| map.values().flat_map(|set| set.iter().map(|node| node.0))) + } + + pub fn build(universe: &TargetSet) -> anyhow::Result { + span(buck2_data::CqueryUniverseBuildStart {}, || { + let r = SelfRef::try_new(universe.clone(), |universe| { + CqueryUniverseInner::build_inner(universe) + }) + .map(|data| CqueryUniverse { data }); + (r, buck2_data::CqueryUniverseBuildEnd {}) + }) } pub fn get( @@ -93,7 +146,7 @@ impl CqueryUniverse { for (package, spec) in &resolved_pattern.specs { targets.extend( self.get_from_package(package.dupe(), spec) - .map(|(node, TargetPatternExtra)| node), + .map(|(node, TargetPatternExtra)| node.to_owned()), ); } targets @@ -110,17 +163,35 @@ impl CqueryUniverse { let package = label.pkg(); let name = label.name(); let results = self + .data + .data() .targets .get(&package) .into_iter() .flat_map(move |package_universe| package_universe.get(name).into_iter().flatten()) - .map(|node| node.0.clone()); + .map(|node| node.0.to_owned()); configured_nodes.extend(results); } configured_nodes } + pub fn get_target_label(&self, label: &TargetLabel) -> Vec { + self.get_from_package( + label.pkg(), + &PackageSpec::Targets(vec![(label.name().to_owned(), TargetPatternExtra)]), + ) + .into_iter() + .map(|(node, _extra)| node.label().dupe()) + .collect() + } + + pub fn contains(&self, label: &ConfiguredTargetLabel) -> bool { + self.get_target_label(label.unconfigured()) + .iter() + .any(|t| t == label) + } + pub fn get_provider_labels( &self, resolved_pattern: &ResolvedPattern

    , @@ -141,29 +212,34 @@ impl CqueryUniverse { &'a self, package: PackageLabel, spec: &'a PackageSpec

    , - ) -> impl Iterator + 'a { - self.targets + ) -> impl Iterator, P)> + 'a { + self.data + .data() + .targets .get(&package) .into_iter() .flat_map(move |package_universe| match spec { PackageSpec::Targets(names) => { Either::Left(names.iter().flat_map(|(name, extra)| { - package_universe.get(name).into_iter().flat_map(|nodes| { - nodes.iter().filter_map(|node| { - if extra.matches_cfg(node.0.label().cfg()) { - Some((&node.0, extra.clone())) - } else { - None - } + package_universe + .get(name.as_ref()) + .into_iter() + .flat_map(|nodes| { + nodes.iter().filter_map(|node| { + if extra.matches_cfg(node.0.label().cfg()) { + Some((node.0, extra.clone())) + } else { + None + } + }) }) - }) })) } PackageSpec::All => Either::Right( package_universe .values() .flatten() - .map(|node| (&node.0, P::default())), + .map(|node| (node.0, P::default())), ), }) } @@ -182,13 +258,13 @@ impl CqueryUniverse { // We do it because the map is by `Package`, // and `BTreeMap` does not allow lookup by equivalent key. let package = PackageLabel::from_cell_path(package); - let package_data = match self.targets.get(&package) { + let package_data = match self.data.data().targets.get(&package) { None => continue, Some(package_data) => package_data, }; for node in package_data.values().flatten() { if node.0.inputs().contains(path) { - nodes.push(node.0.dupe()); + nodes.push(node.0.to_owned()); } } } @@ -202,10 +278,11 @@ mod tests { use buck2_core::configuration::bound_label::BoundConfigurationLabel; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::hash::ConfigurationHash; + use buck2_core::execution_types::execution::ExecutionPlatformResolution; use buck2_core::package::PackageLabel; + use buck2_core::pattern::pattern::PackageSpec; use buck2_core::pattern::pattern_type::ConfigurationPredicate; use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; - use buck2_core::pattern::PackageSpec; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::NonDefaultProvidersName; use buck2_core::provider::label::ProviderName; @@ -222,9 +299,9 @@ mod tests { #[tokio::test] async fn test_get_from_package_by_configured_provider_pattern() { fn providers_name() -> ProvidersName { - ProvidersName::NonDefault(Box::new(NonDefaultProvidersName::Named(Box::new([ - ProviderName::new("P".to_owned()).unwrap(), - ])))) + ProvidersName::NonDefault(triomphe::Arc::new(NonDefaultProvidersName::Named( + buck2_util::arc_str::ArcSlice::new([ProviderName::new("P".to_owned()).unwrap()]), + ))) } fn resolved_pattern( @@ -234,7 +311,7 @@ mod tests { specs: IndexMap::from_iter([( PackageLabel::testing_parse("foo//bar"), PackageSpec::Targets(Vec::from_iter([( - TargetName::unchecked_new("baz"), + TargetName::testing_new("baz"), ConfiguredProvidersPatternExtra { providers: providers_name(), cfg, @@ -250,19 +327,22 @@ mod tests { CqueryUniverse::build(&TargetSet::from_iter([ConfiguredTargetNode::testing_new( target_label.dupe(), "idris_library", + ExecutionPlatformResolution::new(None, Vec::new()), + vec![], + vec![], + None, )])) - .await .unwrap(); let provider_label = ConfiguredProvidersLabel::new(target_label, providers_name()); // Any configuration. assert_eq!( - Vec::from_iter([provider_label.clone()]), + Vec::from_iter([provider_label.dupe()]), universe.get_provider_labels(&resolved_pattern(ConfigurationPredicate::Any)) ); // Configuration label. assert_eq!( - Vec::from_iter([provider_label.clone()]), + Vec::from_iter([provider_label.dupe()]), universe.get_provider_labels(&resolved_pattern(ConfigurationPredicate::Bound( BoundConfigurationLabel::new( ConfigurationData::testing_new().label().unwrap().to_owned() diff --git a/app/buck2_node/src/execution.rs b/app/buck2_node/src/execution.rs new file mode 100644 index 0000000000000..30fbb4b4de9e8 --- /dev/null +++ b/app/buck2_node/src/execution.rs @@ -0,0 +1,65 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use async_trait::async_trait; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_core::cells::name::CellName; +use buck2_core::execution_types::execution::ExecutionPlatformResolution; +use buck2_core::execution_types::execution_platforms::ExecutionPlatforms; +use buck2_core::target::label::label::TargetLabel; +use buck2_core::target::target_configured_target_label::TargetConfiguredTargetLabel; +use buck2_util::late_binding::LateBinding; +use dice::DiceComputations; + +use crate::configuration::resolved::ConfigurationSettingKey; + +pub const EXECUTION_PLATFORMS_BUCKCONFIG: BuckconfigKeyRef = BuckconfigKeyRef { + section: "build", + property: "execution_platforms", +}; + +#[async_trait] +pub trait GetExecutionPlatformsImpl: 'static + Send + Sync { + async fn get_execution_platforms_impl( + &self, + dice_computations: &mut DiceComputations<'_>, + ) -> buck2_error::Result>; + + async fn execution_platform_resolution_one_for_cell( + &self, + dice: &mut DiceComputations<'_>, + exec_deps: Arc<[TargetLabel]>, + toolchain_deps: Arc<[TargetConfiguredTargetLabel]>, + exec_compatible_with: Arc<[ConfigurationSettingKey]>, + cell: CellName, + ) -> buck2_error::Result; +} + +pub static GET_EXECUTION_PLATFORMS: LateBinding<&'static dyn GetExecutionPlatformsImpl> = + LateBinding::new("EXECUTION_PLATFORMS"); + +#[allow(async_fn_in_trait)] +pub trait GetExecutionPlatforms: Send { + /// Returns a list of the configured execution platforms. This looks up the providers on the target + /// configured **in the root cell's buckconfig** with key `build.execution_platforms`. If there's no + /// value configured, it will return `None` which indicates we should fallback to the legacy execution + /// platform behavior. + async fn get_execution_platforms(&mut self) -> buck2_error::Result>; +} + +impl GetExecutionPlatforms for DiceComputations<'_> { + async fn get_execution_platforms(&mut self) -> buck2_error::Result> { + GET_EXECUTION_PLATFORMS + .get()? + .get_execution_platforms_impl(self) + .await + } +} diff --git a/app/buck2_node/src/lib.rs b/app/buck2_node/src/lib.rs index 4d415d29eb2dd..b2ea497f1ed67 100644 --- a/app/buck2_node/src/lib.rs +++ b/app/buck2_node/src/lib.rs @@ -7,20 +7,20 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(box_patterns)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] +#![allow(clippy::len_without_is_empty)] pub mod attrs; pub mod call_stack; pub mod cfg_constructor; pub mod configuration; pub mod configured_universe; +pub mod execution; pub mod load_patterns; pub mod metadata; pub mod nodes; +pub mod oncall; pub mod package; pub mod package_values_calculation; pub mod provider_id_set; diff --git a/app/buck2_node/src/load_patterns.rs b/app/buck2_node/src/load_patterns.rs index 5bdc699c852e9..d046305f2a9ad 100644 --- a/app/buck2_node/src/load_patterns.rs +++ b/app/buck2_node/src/load_patterns.rs @@ -11,20 +11,19 @@ use std::collections::BTreeMap; use std::collections::HashSet; use std::sync::Arc; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; +use buck2_common::dice::file_ops::DiceFileOps; use buck2_common::pattern::package_roots::collect_package_roots; use buck2_common::pattern::resolve::ResolvedPattern; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::PatternType; -use buck2_core::pattern::ParsedPattern; use buck2_core::target::name::TargetName; use buck2_events::dispatch::console_message; +use buck2_futures::owning_future::OwningFuture; +use buck2_util::hash::BuckHasherBuilder; use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; -use fnv::FnvBuildHasher; use futures::future::BoxFuture; use futures::stream::FuturesUnordered; use futures::FutureExt; @@ -35,16 +34,17 @@ use itertools::Itertools; use crate::nodes::eval_result::EvaluationResult; use crate::nodes::frontend::TargetGraphCalculation; use crate::nodes::unconfigured::TargetNode; +use crate::nodes::unconfigured::TargetNodeRef; use crate::super_package::SuperPackage; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum BuildErrors { #[error("Did not find package with name `{0}`.")] MissingPackage(PackageLabel), } async fn resolve_patterns_and_load_buildfiles<'c, T: PatternType>( - ctx: &'c DiceComputations, + ctx: &'c LinearRecomputeDiceComputations<'_>, parsed_patterns: Vec>, ) -> anyhow::Result<( ResolvedPattern, @@ -53,9 +53,9 @@ async fn resolve_patterns_and_load_buildfiles<'c, T: PatternType>( let mut spec = ResolvedPattern::::new(); let mut recursive_packages = Vec::new(); - struct Builder<'c> { - ctx: &'c DiceComputations, - already_loading: HashSet, + struct Builder<'c, 'd> { + ctx: &'c LinearRecomputeDiceComputations<'d>, + already_loading: HashSet, load_package_futs: FuturesUnordered>)>>, } @@ -66,18 +66,21 @@ async fn resolve_patterns_and_load_buildfiles<'c, T: PatternType>( already_loading: HashSet::default(), }; - impl<'c> Builder<'c> { + impl Builder<'_, '_> { fn load_package(&mut self, package: PackageLabel) { if !self.already_loading.insert(package.dupe()) { return; } // it's important that this is not async and the temporary spawn happens when the function is called as we don't immediately start polling these. + // so DO NOT USE async move here self.load_package_futs.push( - self.ctx - .get_interpreter_results(package.dupe()) - .map(|res| (package, res)) - .boxed(), + OwningFuture::new(self.ctx.get(), move |ctx| { + ctx.get_interpreter_results(package) + .map(move |res| (package, res)) + .boxed() + }) + .boxed(), ) } } @@ -98,10 +101,7 @@ async fn resolve_patterns_and_load_buildfiles<'c, T: PatternType>( } } - let file_ops = ctx.file_ops(); - let cell_resolver = ctx.get_cell_resolver().await?; - - collect_package_roots(&file_ops, &cell_resolver, recursive_packages, |package| { + collect_package_roots(&DiceFileOps(&ctx), recursive_packages, |package| { let package = package?; spec.add_package(package.dupe()); builder.load_package(package); @@ -113,7 +113,7 @@ async fn resolve_patterns_and_load_buildfiles<'c, T: PatternType>( } pub struct LoadedPatterns { - results: BTreeMap>>, + results: BTreeMap>>, } pub struct PackageLoadedPatterns { @@ -122,16 +122,16 @@ pub struct PackageLoadedPatterns { } impl PackageLoadedPatterns { - pub fn iter(&self) -> impl Iterator { - self.targets.iter() + pub fn iter(&self) -> impl Iterator)> { + self.targets.iter().map(|(k, v)| (k, v.as_ref())) } pub fn keys(&self) -> impl Iterator { self.targets.keys() } - pub fn values(&self) -> impl Iterator { - self.targets.values() + pub fn values(&self) -> impl Iterator> { + self.targets.values().map(|v| v.as_ref()) } pub fn into_values(self) -> impl Iterator { @@ -155,7 +155,7 @@ impl IntoIterator for PackageLoadedPatterns { impl LoadedPatterns { pub fn iter( &self, - ) -> impl Iterator>)> { + ) -> impl Iterator>)> { self.results.iter().map(|(k, v)| (k.dupe(), v)) } @@ -163,15 +163,17 @@ impl LoadedPatterns { #[allow(clippy::should_implement_trait)] pub fn into_iter( self, - ) -> impl Iterator>)> { + ) -> impl Iterator>)> { self.results.into_iter() } - pub fn iter_loaded_targets(&self) -> impl Iterator> { + pub fn iter_loaded_targets( + &self, + ) -> impl Iterator>> { self.results .values() .map(|result| match result { - Ok(pkg) => Ok(pkg.targets.values()), + Ok(pkg) => Ok(pkg.targets.values().map(|t| t.as_ref())), Err(e) => Err(e.dupe()), }) .flatten_ok() @@ -184,7 +186,7 @@ impl LoadedPatterns { let targets = result .as_ref() .map(|pkg| pkg.targets.values().map(|t| t.dupe()).collect::>()) - .map_err(|e| anyhow::Error::new(e.dupe())); + .map_err(|e| e.dupe().into()); (package.dupe(), targets) }) } @@ -213,10 +215,11 @@ impl MissingTargetBehavior { /// Finds all the requested targets in `spec` from a map of loaded targets in `load_result`. fn apply_spec( spec: ResolvedPattern, - load_results: BTreeMap>>, + load_results: BTreeMap>>, skip_missing_targets: MissingTargetBehavior, ) -> anyhow::Result> { - let mut all_targets: BTreeMap<_, SharedResult>> = BTreeMap::new(); + let mut all_targets: BTreeMap<_, buck2_error::Result>> = + BTreeMap::new(); for (pkg, pkg_spec) in spec.specs.into_iter() { let result = match load_results.get(&pkg) { Some(r) => r, @@ -227,7 +230,9 @@ fn apply_spec( let (label_to_node, missing) = res.apply_spec(pkg_spec); if let Some(missing) = missing { match skip_missing_targets { - MissingTargetBehavior::Fail => return Err(missing.into_error()), + MissingTargetBehavior::Fail => { + return Err(missing.into_errors().0.into()); + } MissingTargetBehavior::Warn => { console_message(missing.missing_targets_warning()) } @@ -254,17 +259,21 @@ fn apply_spec( } pub async fn load_patterns( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, parsed_patterns: Vec>, skip_missing_targets: MissingTargetBehavior, ) -> anyhow::Result> { - let (spec, mut load_package_futs) = - resolve_patterns_and_load_buildfiles(ctx, parsed_patterns).await?; - - let mut results: BTreeMap>> = BTreeMap::new(); - while let Some((pkg, load_res)) = load_package_futs.next().await { - results.insert(pkg, load_res.shared_error()); - } + ctx.with_linear_recompute(|ctx| async move { + let (spec, mut load_package_futs) = + resolve_patterns_and_load_buildfiles(&ctx, parsed_patterns).await?; + + let mut results: BTreeMap>> = + BTreeMap::new(); + while let Some((pkg, load_res)) = load_package_futs.next().await { + results.insert(pkg, load_res.map_err(buck2_error::Error::from)); + } - apply_spec(spec, results, skip_missing_targets) + apply_spec(spec, results, skip_missing_targets) + }) + .await } diff --git a/app/buck2_node/src/metadata.rs b/app/buck2_node/src/metadata.rs new file mode 100644 index 0000000000000..29bc32a4098f7 --- /dev/null +++ b/app/buck2_node/src/metadata.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod key; +pub mod map; +pub mod super_package_values; +pub mod value; diff --git a/app/buck2_node/src/metadata/key.rs b/app/buck2_node/src/metadata/key.rs index eb8db1c25d407..5df256c02c4c7 100644 --- a/app/buck2_node/src/metadata/key.rs +++ b/app/buck2_node/src/metadata/key.rs @@ -8,14 +8,15 @@ */ use std::borrow::Borrow; -use std::borrow::ToOwned; use allocative::Allocative; +use buck2_util::arc_str::ArcStr; use derive_more::Display; +use dupe::Dupe; use ref_cast::RefCast; use serde::Serialize; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] pub enum MetadataKeyError { #[error("key must contain exactly one dot: `{0}`")] KeyMustContainExactlyOneDot(String), @@ -24,18 +25,14 @@ pub enum MetadataKeyError { /// A String that we validated conforms to our rules for metadata keys (whih are quite relaxed: /// they must contain exactly one dot). #[derive( - PartialEq, Eq, PartialOrd, Ord, Display, Debug, Clone, Allocative, Serialize, Hash + PartialEq, Eq, PartialOrd, Ord, Display, Debug, Clone, Dupe, Allocative, Serialize, Hash )] #[serde(transparent)] -pub struct MetadataKey(String); +pub struct MetadataKey(ArcStr); impl MetadataKey { pub fn as_str(&self) -> &str { - &self.0 - } - - pub fn into_string(self) -> String { - self.0 + self.0.as_str() } } @@ -44,7 +41,7 @@ impl TryFrom for MetadataKey { fn try_from(key: String) -> Result { validate_key(&key)?; - Ok(Self(key)) + Ok(Self(ArcStr::from(key))) } } @@ -60,10 +57,14 @@ impl MetadataKeyRef { validate_key(key)?; Ok(Self::ref_cast(key)) } + + pub fn unchecked_new(key: &str) -> &Self { + Self::ref_cast(key) + } } fn validate_key(key: &str) -> Result<(), MetadataKeyError> { - if key.chars().filter(|c| *c == '.').count() != 1 { + if key.split('.').count() != 2 { return Err(MetadataKeyError::KeyMustContainExactlyOneDot( key.to_owned(), )); @@ -81,6 +82,22 @@ impl ToOwned for MetadataKeyRef { type Owned = MetadataKey; fn to_owned(&self) -> Self::Owned { - MetadataKey(self.0.to_owned()) + MetadataKey(ArcStr::from(&self.0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_metadata_key_validation() { + assert!(MetadataKeyRef::new("foo").is_err()); + assert!(MetadataKeyRef::new(".foo").is_ok()); + assert!(MetadataKeyRef::new("foo.").is_ok()); + assert!(MetadataKeyRef::new("foo.bar").is_ok()); + assert!(MetadataKeyRef::new("foo..bar").is_err()); + assert!(MetadataKeyRef::new("...").is_err()); + assert!(MetadataKeyRef::new("a.b.c").is_err()); } } diff --git a/app/buck2_node/src/metadata/map.rs b/app/buck2_node/src/metadata/map.rs index d4cfee81ae0f1..062b5829dd45e 100644 --- a/app/buck2_node/src/metadata/map.rs +++ b/app/buck2_node/src/metadata/map.rs @@ -9,81 +9,30 @@ use std::fmt; use std::hash::Hash; -use std::hash::Hasher; use allocative::Allocative; -use starlark_map::ordered_map::OrderedMap; +use starlark_map::small_map::SmallMap; +use starlark_map::sorted_map::SortedMap; use crate::attrs::attr_type::any_matches::AnyMatches; use crate::metadata::key::MetadataKey; +use crate::metadata::key::MetadataKeyRef; +use crate::metadata::value::MetadataValue; -#[derive(Debug, Eq, PartialEq, Clone, Allocative, Default)] +#[derive(Debug, Eq, PartialEq, Hash, Clone, Allocative, Default)] pub struct MetadataMap { - values: Box>, + values: Box>, } impl MetadataMap { - pub fn new(values: OrderedMap) -> Self { + pub fn new(values: SmallMap) -> Self { Self { - values: Box::new(values), + values: Box::new(SortedMap::from(values)), } } -} - -impl Hash for MetadataMap { - fn hash(&self, state: &mut H) { - let Self { values } = self; - state.write_usize(values.len()); - for (k, v) in values.iter_hashed() { - Hash::hash(&k, state); - hash_value(v, state); - } - } -} -fn hash_value(v: &serde_json::Value, state: &mut H) { - use serde_json::Value; - - match v { - Value::Null => { - state.write_u8(0); - } - Value::Bool(v) => { - state.write_u8(1); - v.hash(state); - } - Value::Number(v) => { - state.write_u8(2); - if let Some(v) = v.as_u64() { - state.write_u8(1); - v.hash(state); - } else if let Some(v) = v.as_i64() { - state.write_u8(2); - v.hash(state); - } else { - state.write_u8(3); - v.to_string().hash(state); - } - } - Value::String(v) => { - state.write_u8(3); - v.hash(state); - } - Value::Array(vals) => { - state.write_u8(4); - state.write_usize(vals.len()); - for v in vals { - hash_value(v, state); - } - } - Value::Object(vals) => { - state.write_u8(5); - state.write_usize(vals.len()); - for (k, v) in vals { - k.hash(state); - hash_value(v, state); - } - } + pub fn get(&self, key: &MetadataKeyRef) -> Option<&MetadataValue> { + self.values.get(key) } } @@ -92,7 +41,7 @@ impl MetadataMap { let Self { values } = self; let map = values .iter() - .map(|(k, v)| (k.as_str().to_owned(), v.clone())) + .map(|(k, v)| (k.as_str().to_owned(), v.as_json().clone())) .collect(); serde_json::Value::Object(map) } @@ -115,7 +64,7 @@ impl AnyMatches for MetadataMap { if filter(k.as_str())? { return Ok(true); } - if v.any_matches(filter)? { + if v.0.any_matches(filter)? { return Ok(true); } } @@ -129,10 +78,10 @@ mod tests { use super::*; fn make() -> MetadataMap { - let mut map = OrderedMap::new(); + let mut map = SmallMap::new(); map.insert( "foo.bar".to_owned().try_into().unwrap(), - serde_json::Value::String("baz".to_owned()), + MetadataValue::new(serde_json::Value::String("baz".to_owned())), ); MetadataMap::new(map) } diff --git a/app/buck2_node/src/metadata/mod.rs b/app/buck2_node/src/metadata/mod.rs deleted file mode 100644 index c79b90482f2dc..0000000000000 --- a/app/buck2_node/src/metadata/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod key; -pub mod map; -pub mod super_package_values; diff --git a/app/buck2_node/src/metadata/super_package_values.rs b/app/buck2_node/src/metadata/super_package_values.rs index c372618e0c233..01a0de2bc2190 100644 --- a/app/buck2_node/src/metadata/super_package_values.rs +++ b/app/buck2_node/src/metadata/super_package_values.rs @@ -23,4 +23,11 @@ pub trait SuperPackageValues: Debug + Allocative + Any + Send + Sync + 'static { /// validated at construction time. fn package_values_json(&self) -> anyhow::Result>; fn contains_key(&self, key: &MetadataKeyRef) -> bool; + /// Gets package value as a JSON for the given key. + /// Only error is internal error: all values must be serializable to JSON, + /// validated at construction time. + fn get_package_value_json( + &self, + key: &MetadataKeyRef, + ) -> anyhow::Result>; } diff --git a/app/buck2_node/src/metadata/value.rs b/app/buck2_node/src/metadata/value.rs new file mode 100644 index 0000000000000..20faa0d80a7a4 --- /dev/null +++ b/app/buck2_node/src/metadata/value.rs @@ -0,0 +1,96 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; +use std::hash::Hasher; +use std::sync::Arc; + +use allocative::Allocative; +use dupe::Dupe; +use serde::Deserialize; +use serde::Serialize; + +#[derive( + Debug, + // serde_json's equality ignores map ordering, but we should have + // equality with ordering preserved here. + // TODO: implement our own equality here that preserves ordering + Eq, + PartialEq, + Clone, + Dupe, + Allocative, + Default, + Serialize, + Deserialize +)] +pub struct MetadataValue(pub Arc); + +impl MetadataValue { + pub fn new(value: serde_json::Value) -> Self { + let value = Arc::new(value); + Self(value) + } + + pub fn as_json(&self) -> &serde_json::Value { + self.0.as_ref() + } +} + +impl Hash for MetadataValue { + fn hash(&self, state: &mut H) { + hash_value(&self.0, state); + } +} + +fn hash_value(v: &serde_json::Value, state: &mut H) { + use serde_json::Value; + + match v { + Value::Null => { + state.write_u8(0); + } + Value::Bool(v) => { + state.write_u8(1); + v.hash(state); + } + Value::Number(v) => { + state.write_u8(2); + if let Some(v) = v.as_u64() { + state.write_u8(1); + v.hash(state); + } else if let Some(v) = v.as_i64() { + state.write_u8(2); + v.hash(state); + } else { + state.write_u8(3); + v.to_string().hash(state); + } + } + Value::String(v) => { + state.write_u8(3); + v.hash(state); + } + Value::Array(vals) => { + state.write_u8(4); + state.write_usize(vals.len()); + for v in vals { + hash_value(v, state); + } + } + Value::Object(vals) => { + state.write_u8(5); + state.write_usize(vals.len()); + for (k, v) in vals { + k.hash(state); + hash_value(v, state); + } + } + } +} diff --git a/app/buck2_node/src/nodes.rs b/app/buck2_node/src/nodes.rs new file mode 100644 index 0000000000000..8a699cc0e5802 --- /dev/null +++ b/app/buck2_node/src/nodes.rs @@ -0,0 +1,60 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod configured; +pub mod configured_frontend; +pub mod configured_node_ref; +pub mod configured_node_visit_all_deps; +pub mod configured_ref; +pub mod eval_result; +pub mod frontend; +pub mod lookup; +pub mod targets_map; +pub mod unconfigured; + +/// Attributes on target nodes that are generated by buck, not provided by users. +pub mod attributes { + /// The nodes that this node depends on. + pub const DEPS: &str = "buck.deps"; + + /// The oncall for this node. + pub const ONCALL: &str = "buck.oncall"; + + /// The package or the buildfile path. + /// The package in `targets` command, but the buildfile path in `*query` commands. + pub const PACKAGE: &str = "buck.package"; + + /// A string representation of the target's rule type. + pub const TYPE: &str = "buck.type"; + + /// The target hash of this target. + pub const TARGET_HASH: &str = "buck.target_hash"; + + /// The callstack for this target. + pub const TARGET_CALL_STACK: &str = "buck.target_call_stack"; + + /// The configuration deps, deps that appear as conditions in selects. + pub const CONFIGURATION_DEPS: &str = "buck.configuration_deps"; + + /// The resolved execution platform for this node. + pub const EXECUTION_PLATFORM: &str = "buck.execution_platform"; + + /// The resolved target configuration for this node. + pub const TARGET_CONFIGURATION: &str = "buck.target_configuration"; + + /// The input source files/directories that this node uses. + pub const INPUTS: &str = "buck.inputs"; + + /// The package values for the package this target belongs to. + pub const PACKAGE_VALUES: &str = "buck.package_values"; + + /// The plugin lists on the node. This includes all plugins, regardless of whether they're + /// propagated or actually used. + pub const PLUGINS: &str = "buck.plugins"; +} diff --git a/app/buck2_node/src/nodes/configured.rs b/app/buck2_node/src/nodes/configured.rs index 131484ed12657..5aae16f53e4f8 100644 --- a/app/buck2_node/src/nodes/configured.rs +++ b/app/buck2_node/src/nodes/configured.rs @@ -15,8 +15,6 @@ use std::iter; use std::sync::Arc; use allocative::Allocative; -use anyhow::Context; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; use buck2_core::cells::cell_path::CellPath; @@ -24,7 +22,9 @@ use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::pair::ConfigurationNoExec; use buck2_core::configuration::transition::applied::TransitionApplied; use buck2_core::configuration::transition::id::TransitionId; +use buck2_core::execution_types::execution::ExecutionPlatform; use buck2_core::execution_types::execution::ExecutionPlatformResolution; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::plugins::PluginKind; use buck2_core::plugins::PluginKindSet; use buck2_core::plugins::PluginLists; @@ -32,13 +32,12 @@ use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; use buck2_core::provider::label::ProvidersName; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_util::arc_str::ArcStr; use dupe::Dupe; use either::Either; use once_cell::sync::Lazy; use starlark_map::ordered_map::OrderedMap; -use starlark_map::unordered_map::UnorderedMap; use starlark_map::Hashed; use crate::attrs::attr::Attribute; @@ -55,9 +54,12 @@ use crate::attrs::configured_attr::ConfiguredAttr; use crate::attrs::configured_attr_full::ConfiguredAttrFull; use crate::attrs::configured_traversal::ConfiguredAttrTraversal; use crate::attrs::inspect_options::AttrInspectOptions; -use crate::attrs::internal::TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD; use crate::attrs::internal::TESTS_ATTRIBUTE_FIELD; +use crate::call_stack::StarlarkCallStack; +use crate::call_stack::StarlarkTargetCallStackRoot; +use crate::configuration::resolved::ConfigurationSettingKey; use crate::configuration::resolved::ResolvedConfiguration; +use crate::configuration::resolved::ResolvedConfigurationSettings; use crate::nodes::attributes::DEPS; use crate::nodes::attributes::EXECUTION_PLATFORM; use crate::nodes::attributes::ONCALL; @@ -77,8 +79,10 @@ use crate::rule_type::StarlarkRuleType; /// in the node, instead the node just stores the base TargetNode and a configuration for /// resolving the attributes. This saves memory, but users should try avoid repeatedly /// requesting the same information. -#[derive(Debug, Clone, Dupe, Eq, PartialEq, Hash, Allocative)] -pub struct ConfiguredTargetNode(Arc>); +#[derive(Debug, Clone, Eq, PartialEq, Hash, Allocative)] +pub struct ConfiguredTargetNode(triomphe::Arc>); + +impl Dupe for ConfiguredTargetNode {} #[derive(Debug, Eq, PartialEq, Hash, Allocative)] enum TargetNodeOrForward { @@ -170,16 +174,15 @@ impl TargetNodeOrForward { // 3. deps could probably be approximated a diff against the targetnode's deps #[derive(Eq, PartialEq, Hash, Allocative)] struct ConfiguredTargetNodeData { - label: ConfiguredTargetLabel, + label: Hashed, target_node: TargetNodeOrForward, resolved_configuration: ResolvedConfiguration, resolved_transition_configurations: OrderedMap, Arc>, execution_platform_resolution: ExecutionPlatformResolution, - // Deps includes regular deps and transitioned deps, - // but excludes exec deps or configuration deps. + // all_deps includes regular deps and transitioned deps, + // and includes exec deps and configuration deps. // TODO(cjhopman): Should this be a diff against the node's deps? - deps: ConfiguredTargetNodeDeps, - exec_deps: ConfiguredTargetNodeDeps, + all_deps: ConfiguredTargetNodeDeps, platform_cfgs: OrderedMap, // TODO(JakobDegen): Consider saving some memory by using a more tset like representation of // the plugin lists @@ -196,21 +199,33 @@ impl Debug for ConfiguredTargetNodeData { impl ConfiguredTargetNode { /// Creates a minimal ConfiguredTargetNode. Some operations may unexpectedly fail. - pub fn testing_new(name: ConfiguredTargetLabel, rule_type: &str) -> Self { + pub fn testing_new( + name: ConfiguredTargetLabel, + rule_type: &str, + execution_platform_resolution: ExecutionPlatformResolution, + attrs: Vec<(&str, Attribute, CoercedAttr)>, + internal_attrs: Vec<(&str, Attribute, CoercedAttr)>, + call_stack: Option, + ) -> Self { use crate::nodes::unconfigured::testing::TargetNodeExt; let rule_type = RuleType::Starlark(Arc::new(StarlarkRuleType { import_path: ImportPath::testing_new("cell//pkg:rules.bzl"), name: rule_type.to_owned(), })); - let execution_platform_resolution = ExecutionPlatformResolution::new(None, Vec::new()); Self::new( name.dupe(), - TargetNode::testing_new(name.unconfigured().dupe(), rule_type, Vec::new()), + TargetNode::testing_new( + name.unconfigured().dupe(), + rule_type, + attrs, + internal_attrs, + call_stack, + ), ResolvedConfiguration::new( ConfigurationNoExec::new(name.cfg().dupe()), - UnorderedMap::new(), + ResolvedConfigurationSettings::empty(), ), OrderedMap::new(), execution_platform_resolution, @@ -232,14 +247,13 @@ impl ConfiguredTargetNode { platform_cfgs: OrderedMap, plugin_lists: PluginLists, ) -> Self { - Self(Arc::new(Hashed::new(ConfiguredTargetNodeData { - label: name, + Self(triomphe::Arc::new(Hashed::new(ConfiguredTargetNodeData { + label: Hashed::new(name), target_node: TargetNodeOrForward::TargetNode(target_node), resolved_configuration, resolved_transition_configurations: resolved_tr_configurations, execution_platform_resolution, - deps: ConfiguredTargetNodeDeps(deps.into_boxed_slice()), - exec_deps: ConfiguredTargetNodeDeps(exec_deps.into_boxed_slice()), + all_deps: ConfiguredTargetNodeDeps::new(deps, exec_deps), platform_cfgs, plugin_lists, }))) @@ -269,9 +283,9 @@ impl ConfiguredTargetNode { let configured_providers_label = providers_label.configure(transitioned_node.label().cfg().dupe()); - Ok(ConfiguredTargetNode(Arc::new(Hashed::new( + Ok(ConfiguredTargetNode(triomphe::Arc::new(Hashed::new( ConfiguredTargetNodeData { - label: name.dupe(), + label: Hashed::new(name.dupe()), target_node: TargetNodeOrForward::Forward( CoercedAttr::ConfiguredDep(Box::new(DepAttr { attr_type: DepAttrType::new( @@ -285,7 +299,7 @@ impl ConfiguredTargetNode { // We have no attributes with selects, so resolved configurations is empty. resolved_configuration: ResolvedConfiguration::new( name.cfg_pair().check_no_exec_cfg()?, - UnorderedMap::new(), + ResolvedConfigurationSettings::empty(), ), // We have no attributes to transition, so empty map is fine. resolved_transition_configurations: OrderedMap::new(), @@ -295,8 +309,7 @@ impl ConfiguredTargetNode { .execution_platform_resolution() .dupe(), plugin_lists: transitioned_node.plugin_lists().clone(), - deps: ConfiguredTargetNodeDeps(Box::new([transitioned_node])), - exec_deps: ConfiguredTargetNodeDeps(Box::new([])), + all_deps: ConfiguredTargetNodeDeps::new(vec![transitioned_node], vec![]), platform_cfgs: OrderedMap::new(), }, )))) @@ -308,22 +321,9 @@ impl ConfiguredTargetNode { &ATTRIBUTE } - pub fn target_compatible_with(&self) -> impl Iterator> + '_ { - self.get( - TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD, - AttrInspectOptions::All, - ) - .into_iter() - .flat_map(|a| { - Self::attr_as_target_compatible_with(a.value).map(|a| { - a.with_context(|| format!("attribute `{}`", TARGET_COMPATIBLE_WITH_ATTRIBUTE_FIELD)) - }) - }) - } - pub fn attr_as_target_compatible_with( attr: ConfiguredAttr, - ) -> impl Iterator> { + ) -> impl Iterator> { let list = match attr.try_into_list() { Ok(list) => list, Err(e) => return Either::Left(iter::once(Err(e))), @@ -340,79 +340,50 @@ impl ConfiguredTargetNode { /// later in the build process). // TODO(cjhopman): Should this include configuration deps? Should it include the configuration deps that were inspected resolving selects? pub fn deps(&self) -> impl Iterator { - self.0.deps.iter().chain(self.0.exec_deps.iter()) + self.0.all_deps.all_deps.iter() + } + + pub fn configuration_deps(&self) -> impl Iterator { + // Since we validate that all configuration dependencies are of kind Configuration, + // we can use that to filter the deps. + self.0 + .all_deps + .deps() + .iter() + .filter(|x| x.rule_kind() == RuleKind::Configuration) } pub fn toolchain_deps(&self) -> impl Iterator { // Since we validate that all toolchain dependencies are of kind Toolchain, // we can use that to filter the deps. self.0 - .deps + .all_deps + .deps() .iter() .filter(|x| x.rule_kind() == RuleKind::Toolchain) } pub fn inputs(&self) -> impl Iterator + '_ { - struct InputsCollector { - inputs: Vec, - } - impl ConfiguredAttrTraversal for InputsCollector { - fn dep(&mut self, _dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - Ok(()) - } - - fn input(&mut self, path: BuckPathRef) -> anyhow::Result<()> { - self.inputs.push(path.to_cell_path()); - Ok(()) - } - } - let mut traversal = InputsCollector { inputs: Vec::new() }; - for a in self.attrs(AttrInspectOptions::All) { - a.traverse(self.label().pkg(), &mut traversal) - .expect("inputs collector shouldn't return errors"); - } - traversal.inputs.into_iter() + self.as_ref().inputs() } - // TODO(cjhopman): switch to for_each_query? + #[inline] pub fn queries( &self, - ) -> impl Iterator)> { - struct Traversal { - queries: Vec<(String, ResolvedQueryLiterals)>, - } - let mut traversal = Traversal { - queries: Vec::new(), - }; - impl ConfiguredAttrTraversal for Traversal { - fn dep(&mut self, _dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - // ignored. - Ok(()) - } - - fn query_macro( - &mut self, - query: &str, - resolved_literals: &ResolvedQueryLiterals, - ) -> anyhow::Result<()> { - self.queries - .push((query.to_owned(), resolved_literals.clone())); - Ok(()) - } - } - // TODO(cjhopman): optimize for non-query attrs - for a in self.attrs(AttrInspectOptions::All) { - a.traverse(self.label().pkg(), &mut traversal).unwrap(); - } - traversal.queries.into_iter() + ) -> impl Iterator)> + '_ { + self.as_ref().queries() } pub fn target_deps(&self) -> impl Iterator { - self.0.deps.iter() + self.0 + .all_deps + .deps() + .iter() + .filter(|x| x.rule_kind() == RuleKind::Normal) } pub fn exec_deps(&self) -> impl Iterator { - self.0.exec_deps.iter() + self.0.all_deps.exec_deps().iter() } /// Return the `tests` declared for this target configured in same target platform as this target. @@ -429,7 +400,7 @@ impl ConfiguredTargetNode { } fn label(&mut self, label: &ConfiguredProvidersLabel) -> anyhow::Result<()> { - self.labels.push(label.clone()); + self.labels.push(label.dupe()); Ok(()) } } @@ -445,6 +416,18 @@ impl ConfiguredTargetNode { &self.0.label } + #[inline] + pub fn hashed_label(&self) -> Hashed<&ConfiguredTargetLabel> { + self.0.label.as_ref() + } + + pub fn target_node(&self) -> &TargetNode { + match &self.0.target_node { + TargetNodeOrForward::TargetNode(n) => n, + TargetNodeOrForward::Forward(_, n) => n.target_node(), + } + } + pub fn rule_type(&self) -> &RuleType { self.0.target_node.rule_type() } @@ -461,71 +444,21 @@ impl ConfiguredTargetNode { self.0.target_node.is_visible_to(target) } + #[inline] pub fn special_attrs(&self) -> impl Iterator { - let typ_attr = ConfiguredAttr::String(StringLiteral(self.rule_type().name().into())); - let deps_attr = ConfiguredAttr::List( - self.deps() - .map(|t| { - ConfiguredAttr::Label(Box::new(ConfiguredProvidersLabel::new( - t.label().dupe(), - ProvidersName::Default, - ))) - }) - .collect(), - ); - let package_attr = ConfiguredAttr::String(StringLiteral(ArcStr::from( - self.buildfile_path().to_string(), - ))); - vec![ - (TYPE, typ_attr), - (DEPS, deps_attr), - (PACKAGE, package_attr), - ( - ONCALL, - match self.oncall() { - None => ConfiguredAttr::None, - Some(x) => ConfiguredAttr::String(StringLiteral(ArcStr::from(x))), - }, - ), - ( - TARGET_CONFIGURATION, - ConfiguredAttr::String(StringLiteral(ArcStr::from(self.0.label.cfg().to_string()))), - ), - ( - EXECUTION_PLATFORM, - ConfiguredAttr::String(StringLiteral( - self.0 - .execution_platform_resolution - .platform() - .map_or_else(|_| ArcStr::from(""), |v| ArcStr::from(v.id())), - )), - ), - (PLUGINS, self.plugins_as_attr()), - ] - .into_iter() + self.as_ref().special_attrs() } + #[inline] pub fn oncall(&self) -> Option<&str> { - self.0.target_node.oncall() - } - - fn attr_configuration_context(&self) -> AttrConfigurationContextImpl { - AttrConfigurationContextImpl::new( - &self.0.resolved_configuration, - self.0.execution_platform_resolution.cfg(), - &self.0.resolved_transition_configurations, - &self.0.platform_cfgs, - ) + self.as_ref().oncall() } pub fn attrs<'a>( &'a self, opts: AttrInspectOptions, ) -> impl Iterator> + 'a { - self.0.target_node.attrs(opts).map(move |a| { - a.configure(&self.attr_configuration_context()) - .expect("checked attr configuration in constructor") - }) + self.as_ref().attrs(opts) } pub fn get<'a>( @@ -533,10 +466,7 @@ impl ConfiguredTargetNode { attr: &str, opts: AttrInspectOptions, ) -> Option> { - self.0.target_node.attr_or_none(attr, opts).map(|v| { - v.configure(&self.attr_configuration_context()) - .expect("checked attr configuration in constructor") - }) + self.as_ref().get(attr, opts) } pub fn call_stack(&self) -> Option { @@ -546,6 +476,13 @@ impl ConfiguredTargetNode { } } + pub fn root_location(&self) -> Option { + match &self.0.target_node { + TargetNodeOrForward::TargetNode(n) => n.root_location(), + TargetNodeOrForward::Forward(_, n) => n.root_location(), + } + } + /// Hash the fields that impact how this target is built. /// Don't do any recursive hashing of the dependencies. /// Hashes the attributes _after_ configuration, so changing unconfigured branches that @@ -569,45 +506,82 @@ impl ConfiguredTargetNode { } } - pub fn uses_plugins(&self) -> &[PluginKind] { - match &self.0.target_node { - TargetNodeOrForward::TargetNode(target_node) => target_node.uses_plugins(), - TargetNodeOrForward::Forward(_, _) => &[], + pub fn unwrap_forward(&self) -> &ConfiguredTargetNode { + match self.forward_target() { + None => self, + Some(t) => t, } } + #[inline] + pub fn target_configuration(&self) -> &ConfigurationData { + self.as_ref().0.get().label.cfg() + } + + #[inline] + pub fn execution_platform(&self) -> anyhow::Result<&ExecutionPlatform> { + self.as_ref().execution_platform_resolution().platform() + } + + #[inline] + pub fn uses_plugins(&self) -> &[PluginKind] { + self.as_ref().uses_plugins() + } + pub fn plugin_lists(&self) -> &PluginLists { &self.0.plugin_lists } - fn plugins_as_attr(&self) -> ConfiguredAttr { - let mut kinds = Vec::new(); - for (kind, plugins) in self.plugin_lists().iter_by_kind() { - // Using plugin dep here is a bit of an abuse. However, there's no - // `ConfiguredAttr::TargetLabel` type, and it also seems excessive to add one for this - // reason alone - let plugins = plugins - .map(|(target, _)| { - ConfiguredAttr::PluginDep(Box::new((target.dupe(), kind.dupe()))) - }) - .collect(); - kinds.push(( - ConfiguredAttr::String(StringLiteral(ArcStr::from(kind.as_str()))), - ConfiguredAttr::List(plugins), - )); - } - ConfiguredAttr::Dict(kinds.into_iter().collect()) + #[inline] + pub fn as_ref(&self) -> ConfiguredTargetNodeRef<'_> { + ConfiguredTargetNodeRef(triomphe::Arc::borrow_arc(&self.0)) + } + + pub fn ptr_eq(&self, other: &Self) -> bool { + triomphe::Arc::ptr_eq(&self.0, &other.0) } } /// The representation of the deps for a ConfiguredTargetNode. Provides the operations we require /// (iteration, eq, and hash), but guarantees those aren't recursive of the dep nodes' data. #[derive(Allocative)] -struct ConfiguredTargetNodeDeps(Box<[ConfiguredTargetNode]>); +struct ConfiguredTargetNodeDeps { + /// Number of deps, excluding exec deps. Used as an index to retrieve exec_deps + deps_count: usize, + /// (target deps and toolchain deps) followed by `exec_deps`. + all_deps: Box<[ConfiguredTargetNode]>, +} impl ConfiguredTargetNodeDeps { - fn iter(&self) -> impl ExactSizeIterator { - self.0.iter() + fn new(deps: Vec, exec_deps: Vec) -> Self { + if deps.is_empty() { + ConfiguredTargetNodeDeps { + deps_count: 0, + all_deps: exec_deps.into_boxed_slice(), + } + } else if exec_deps.is_empty() { + ConfiguredTargetNodeDeps { + deps_count: deps.len(), + all_deps: deps.into_boxed_slice(), + } + } else { + ConfiguredTargetNodeDeps { + deps_count: deps.len(), + all_deps: deps + .into_iter() + .chain(exec_deps) + .collect::>() + .into_boxed_slice(), + } + } + } + + fn deps(&self) -> &[ConfiguredTargetNode] { + &self.all_deps[..self.deps_count] + } + + fn exec_deps(&self) -> &[ConfiguredTargetNode] { + &self.all_deps[self.deps_count..] } } @@ -616,9 +590,15 @@ impl ConfiguredTargetNodeDeps { /// deep comparison. impl PartialEq for ConfiguredTargetNodeDeps { fn eq(&self, other: &Self) -> bool { - let it1 = self.iter(); - let it2 = other.iter(); - it1.len() == it2.len() && it1.zip(it2).all(|(x, y)| Arc::ptr_eq(&x.0, &y.0)) + let ConfiguredTargetNodeDeps { + deps_count, + all_deps, + } = self; + *deps_count == other.deps_count && all_deps.len() == other.all_deps.len() && { + let it1 = all_deps.iter(); + let it2 = other.all_deps.iter(); + it1.zip(it2).all(|(x, y)| triomphe::Arc::ptr_eq(&x.0, &y.0)) + } } } @@ -629,10 +609,220 @@ impl Eq for ConfiguredTargetNodeDeps {} /// the dependency has definitely changed (e.g. because its own deps changed). impl Hash for ConfiguredTargetNodeDeps { fn hash(&self, state: &mut H) { - let it = self.0.iter(); - state.write_usize(it.len()); - for node in it { + let ConfiguredTargetNodeDeps { + deps_count, + all_deps, + } = self; + state.write_usize(*deps_count); + for node in &**all_deps { node.label().hash(state); } } } + +/// Like `&ConfiguredTargetNode`, but cheaper (one fewer indirection). +#[derive(Debug, Copy, Clone)] +pub struct ConfiguredTargetNodeRef<'a>(triomphe::ArcBorrow<'a, Hashed>); + +impl<'a> Dupe for ConfiguredTargetNodeRef<'a> {} + +impl<'a> ConfiguredTargetNodeRef<'a> { + #[inline] + pub fn to_owned(self) -> ConfiguredTargetNode { + ConfiguredTargetNode(triomphe::ArcBorrow::clone_arc(&self.0)) + } + + #[inline] + pub fn deps(self) -> impl Iterator { + self.0.get().all_deps.all_deps.iter() + } + + #[inline] + pub fn ptr_eq(self, other: Self) -> bool { + triomphe::ArcBorrow::ptr_eq(&self.0, &other.0) + } + + #[inline] + pub fn label(self) -> &'a ConfiguredTargetLabel { + self.0.get().label.key() + } + + #[inline] + pub fn hashed_label(self) -> Hashed<&'a ConfiguredTargetLabel> { + self.0.get().label.as_ref() + } + + fn attr_configuration_context(self) -> AttrConfigurationContextImpl<'a> { + AttrConfigurationContextImpl::new( + &self.0.get().resolved_configuration, + self.0.get().execution_platform_resolution.cfg(), + &self.0.get().resolved_transition_configurations, + &self.0.get().platform_cfgs, + ) + } + + pub fn oncall(self) -> Option<&'a str> { + self.0.get().target_node.oncall() + } + + pub fn special_attrs(self) -> impl Iterator { + let typ_attr = ConfiguredAttr::String(StringLiteral(self.rule_type().name().into())); + let deps_attr = ConfiguredAttr::List( + self.deps() + .map(|t| { + ConfiguredAttr::Label(ConfiguredProvidersLabel::new( + t.label().dupe(), + ProvidersName::Default, + )) + }) + .collect(), + ); + let package_attr = ConfiguredAttr::String(StringLiteral(ArcStr::from( + self.buildfile_path().to_string(), + ))); + vec![ + (TYPE, typ_attr), + (DEPS, deps_attr), + (PACKAGE, package_attr), + ( + ONCALL, + match self.oncall() { + None => ConfiguredAttr::None, + Some(x) => ConfiguredAttr::String(StringLiteral(ArcStr::from(x))), + }, + ), + ( + TARGET_CONFIGURATION, + ConfiguredAttr::String(StringLiteral(ArcStr::from(self.0.label.cfg().to_string()))), + ), + ( + EXECUTION_PLATFORM, + ConfiguredAttr::String(StringLiteral( + self.0 + .execution_platform_resolution + .platform() + .map_or_else(|_| ArcStr::from(""), |v| ArcStr::from(v.id())), + )), + ), + (PLUGINS, self.plugins_as_attr()), + ] + .into_iter() + } + + pub fn attrs( + self, + opts: AttrInspectOptions, + ) -> impl Iterator> + 'a { + self.0.get().target_node.attrs(opts).map(move |a| { + a.configure(&self.attr_configuration_context()) + .expect("checked attr configuration in constructor") + }) + } + + pub fn get(self, attr: &str, opts: AttrInspectOptions) -> Option> { + self.0.get().target_node.attr_or_none(attr, opts).map(|v| { + v.configure(&self.attr_configuration_context()) + .expect("checked attr configuration in constructor") + }) + } + + pub fn inputs(self) -> impl Iterator + 'a { + struct InputsCollector { + inputs: Vec, + } + impl ConfiguredAttrTraversal for InputsCollector { + fn dep(&mut self, _dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { + Ok(()) + } + + fn input(&mut self, path: SourcePathRef) -> anyhow::Result<()> { + self.inputs.push(path.to_cell_path()); + Ok(()) + } + } + let mut traversal = InputsCollector { inputs: Vec::new() }; + for a in self.attrs(AttrInspectOptions::All) { + a.traverse(self.label().pkg(), &mut traversal) + .expect("inputs collector shouldn't return errors"); + } + traversal.inputs.into_iter() + } + + // TODO(cjhopman): switch to for_each_query? + pub fn queries( + self, + ) -> impl Iterator)> + 'a { + struct Traversal { + queries: Vec<(String, ResolvedQueryLiterals)>, + } + let mut traversal = Traversal { + queries: Vec::new(), + }; + impl ConfiguredAttrTraversal for Traversal { + fn dep(&mut self, _dep: &ConfiguredProvidersLabel) -> anyhow::Result<()> { + // ignored. + Ok(()) + } + + fn query( + &mut self, + query: &str, + resolved_literals: &ResolvedQueryLiterals, + ) -> anyhow::Result<()> { + self.queries + .push((query.to_owned(), resolved_literals.clone())); + Ok(()) + } + } + + for a in self.attrs(AttrInspectOptions::All) { + // Optimization. + if !a.attr.coercer().0.may_have_queries { + continue; + } + + a.traverse(self.label().pkg(), &mut traversal).unwrap(); + } + traversal.queries.into_iter() + } + + pub fn rule_type(self) -> &'a RuleType { + self.0.get().target_node.rule_type() + } + + pub fn execution_platform_resolution(self) -> &'a ExecutionPlatformResolution { + &self.0.get().execution_platform_resolution + } + + pub fn uses_plugins(self) -> &'a [PluginKind] { + match &self.0.get().target_node { + TargetNodeOrForward::TargetNode(target_node) => target_node.uses_plugins(), + TargetNodeOrForward::Forward(_, _) => &[], + } + } + + fn plugins_as_attr(self) -> ConfiguredAttr { + let mut kinds = Vec::new(); + for (kind, plugins) in self.plugin_lists().iter_by_kind() { + // Using plugin dep here is a bit of an abuse. However, there's no + // `ConfiguredAttr::TargetLabel` type, and it also seems excessive to add one for this + // reason alone + let plugins = plugins + .map(|(target, _)| ConfiguredAttr::PluginDep(target.dupe(), kind.dupe())) + .collect(); + kinds.push(( + ConfiguredAttr::String(StringLiteral(ArcStr::from(kind.as_str()))), + ConfiguredAttr::List(plugins), + )); + } + ConfiguredAttr::Dict(kinds.into_iter().collect()) + } + + pub fn plugin_lists(self) -> &'a PluginLists { + &self.0.get().plugin_lists + } + + pub fn buildfile_path(self) -> &'a BuildFilePath { + self.0.get().target_node.buildfile_path() + } +} diff --git a/app/buck2_node/src/nodes/configured_frontend.rs b/app/buck2_node/src/nodes/configured_frontend.rs index 209a01ffcb803..6e680a5fb4aa2 100644 --- a/app/buck2_node/src/nodes/configured_frontend.rs +++ b/app/buck2_node/src/nodes/configured_frontend.rs @@ -20,8 +20,9 @@ pub trait ConfiguredTargetNodeCalculationImpl: Send + Sync + 'static { /// Returns the ConfiguredTargetNode corresponding to a ConfiguredTargetLabel. async fn get_configured_target_node( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &ConfiguredTargetLabel, + check_dependency_incompatibility: bool, ) -> anyhow::Result>; } @@ -33,20 +34,40 @@ pub static CONFIGURED_TARGET_NODE_CALCULATION: LateBinding< pub trait ConfiguredTargetNodeCalculation { /// Returns the ConfiguredTargetNode corresponding to a ConfiguredTargetLabel. async fn get_configured_target_node( - &self, + &mut self, + target: &ConfiguredTargetLabel, + ) -> anyhow::Result>; + + /// Same as `get_configured_target_node` except it doesn't error/soft-error on + /// configured target that is transitively incompatible. This should only be used + /// to obtain any configured target node used as deps of other configured nodes, + /// ex. recursively from `get_configured_target_node` function. All other use cases + /// should use `get_configured_target_node` instead. + async fn get_internal_configured_target_node( + &mut self, target: &ConfiguredTargetLabel, ) -> anyhow::Result>; } #[async_trait] -impl ConfiguredTargetNodeCalculation for DiceComputations { +impl ConfiguredTargetNodeCalculation for DiceComputations<'_> { async fn get_configured_target_node( - &self, + &mut self, + target: &ConfiguredTargetLabel, + ) -> anyhow::Result> { + CONFIGURED_TARGET_NODE_CALCULATION + .get()? + .get_configured_target_node(self, target, true) + .await + } + + async fn get_internal_configured_target_node( + &mut self, target: &ConfiguredTargetLabel, ) -> anyhow::Result> { CONFIGURED_TARGET_NODE_CALCULATION .get()? - .get_configured_target_node(self, target) + .get_configured_target_node(self, target, false) .await } } diff --git a/app/buck2_node/src/nodes/configured_node_ref.rs b/app/buck2_node/src/nodes/configured_node_ref.rs new file mode 100644 index 0000000000000..3efc81c74ff5a --- /dev/null +++ b/app/buck2_node/src/nodes/configured_node_ref.rs @@ -0,0 +1,84 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; +use std::hash::Hasher; + +use buck2_query::query::graph::successors::GraphSuccessors; +use dupe::Dupe; +use starlark_map::StarlarkHashValue; + +use crate::nodes::configured::ConfiguredTargetNode; +use crate::nodes::configured::ConfiguredTargetNodeRef; + +#[derive(Debug, Dupe, Copy, Clone)] +pub struct ConfiguredTargetNodeRefNode<'a> { + // TODO(nga): we store hash here, but we also store hash in `dfs_postorder`. This is redundant. + label_hash: StarlarkHashValue, + node: ConfiguredTargetNodeRef<'a>, +} + +impl PartialEq for ConfiguredTargetNodeRefNode<'_> { + #[inline] + fn eq(&self, other: &Self) -> bool { + // If nodes are the same, their labels must be equal. + self.node.ptr_eq(other.node) + // If nodes are not the same, their hashes are likely different, so we store the hash too. + || (self.label_hash == other.label_hash && self.node.label() == other.node.label()) + } +} + +impl Eq for ConfiguredTargetNodeRefNode<'_> {} + +impl Hash for ConfiguredTargetNodeRefNode<'_> { + #[inline] + fn hash(&self, state: &mut H) { + self.label_hash.hash(state) + } +} + +impl<'a> ConfiguredTargetNodeRefNode<'a> { + #[inline] + pub fn new(node: &'a ConfiguredTargetNode) -> Self { + Self::from_ref(node.as_ref()) + } + + #[inline] + pub fn from_ref(node: ConfiguredTargetNodeRef<'a>) -> Self { + ConfiguredTargetNodeRefNode { + node, + label_hash: node.hashed_label().hash(), + } + } + + #[inline] + pub fn as_ref(&self) -> ConfiguredTargetNodeRef<'a> { + self.node + } + + #[inline] + pub fn to_node(&self) -> ConfiguredTargetNode { + self.node.to_owned() + } +} + +pub struct ConfiguredTargetNodeRefNodeDeps; + +impl<'a> GraphSuccessors> for ConfiguredTargetNodeRefNodeDeps { + #[inline] + fn for_each_successor( + &self, + node: &ConfiguredTargetNodeRefNode<'a>, + mut f: impl FnMut(&ConfiguredTargetNodeRefNode<'a>), + ) { + for dep in node.node.deps() { + f(&ConfiguredTargetNodeRefNode::new(dep)); + } + } +} diff --git a/app/buck2_node/src/nodes/configured_node_visit_all_deps.rs b/app/buck2_node/src/nodes/configured_node_visit_all_deps.rs index de1e62814da39..83dda3e18ee0f 100644 --- a/app/buck2_node/src/nodes/configured_node_visit_all_deps.rs +++ b/app/buck2_node/src/nodes/configured_node_visit_all_deps.rs @@ -7,54 +7,20 @@ * of this source tree. */ -use async_trait::async_trait; -use buck2_query::query::traversal::async_fast_depth_first_postorder_traversal; -use buck2_query::query::traversal::AsyncTraversalDelegate; -use buck2_query::query::traversal::ChildVisitor; -use dupe::Dupe; +use buck2_query::query::graph::bfs::bfs_preorder; -use crate::nodes::configured::ConfiguredTargetNode; -use crate::nodes::configured_ref::ConfiguredGraphNodeRef; -use crate::nodes::configured_ref::ConfiguredGraphNodeRefLookup; +use crate::nodes::configured::ConfiguredTargetNodeRef; +use crate::nodes::configured_node_ref::ConfiguredTargetNodeRefNode; +use crate::nodes::configured_node_ref::ConfiguredTargetNodeRefNodeDeps; /// Visit nodes and all dependencies recursively. -pub async fn configured_node_visit_all_deps( - roots: impl IntoIterator, - // TODO(nga): visitor does not need be either `Sync` or `Send`, - // this is artificial limitation of `async_depth_first_postorder_traversal`. - visitor: impl FnMut(ConfiguredTargetNode) -> anyhow::Result<()> + Send + Sync, -) -> anyhow::Result<()> { - // To support package/recursive patterns, we hold the map by package. To support a - // single target name having multiple instances in the universe, we map them to a list of nodes. - struct Delegate { - visitor: F, - } - - #[async_trait] - impl anyhow::Result<()> + Sync + Send> - AsyncTraversalDelegate for Delegate - { - fn visit(&mut self, target: ConfiguredGraphNodeRef) -> anyhow::Result<()> { - (self.visitor)(target.0) - } - - async fn for_each_child( - &mut self, - target: &ConfiguredGraphNodeRef, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - for dep in target.0.deps() { - func.visit(ConfiguredGraphNodeRef(dep.dupe()))?; - } - Ok(()) - } - } - let mut delegate = Delegate { visitor }; - - let roots = roots - .into_iter() - .map(|node| ConfiguredGraphNodeRef(node.dupe())) - .collect::>(); - async_fast_depth_first_postorder_traversal(&ConfiguredGraphNodeRefLookup, &roots, &mut delegate) - .await +pub fn configured_node_visit_all_deps<'a>( + roots: impl IntoIterator>, + mut visitor: impl FnMut(ConfiguredTargetNodeRef<'a>), +) { + bfs_preorder( + roots.into_iter().map(ConfiguredTargetNodeRefNode::from_ref), + ConfiguredTargetNodeRefNodeDeps, + |node| visitor(node.as_ref()), + ) } diff --git a/app/buck2_node/src/nodes/configured_ref.rs b/app/buck2_node/src/nodes/configured_ref.rs index c71a1402bcf68..5ce1a1360f4a5 100644 --- a/app/buck2_node/src/nodes/configured_ref.rs +++ b/app/buck2_node/src/nodes/configured_ref.rs @@ -8,44 +8,53 @@ */ use std::borrow::Cow; +use std::ops::Deref; use allocative::Allocative; -use async_trait::async_trait; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::cell_path::CellPath; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_query::query::environment::LabeledNode; -use buck2_query::query::environment::NodeLabel; use buck2_query::query::environment::QueryTarget; -use buck2_query::query::traversal::AsyncNodeLookup; -use buck2_query::query::traversal::NodeLookup; +use buck2_query::query::graph::node::LabeledNode; +use buck2_query::query::graph::node::NodeKey; use dupe::Dupe; use ref_cast::RefCast; -use serde::Serializer; use crate::attrs::attr_type::any_matches::AnyMatches; use crate::attrs::configured_attr::ConfiguredAttr; -use crate::attrs::display::AttrDisplayWithContextExt; -use crate::attrs::fmt_context::AttrFmtContext; use crate::attrs::inspect_options::AttrInspectOptions; -use crate::attrs::serialize::AttrSerializeWithContext; use crate::nodes::configured::ConfiguredTargetNode; /// `ConfiguredTargetNode` as both `LabeledNode` and `NodeLabel` and also `QueryTarget`. #[derive(Debug, Dupe, Clone, RefCast, Allocative)] #[repr(C)] -pub struct ConfiguredGraphNodeRef(pub ConfiguredTargetNode); +pub struct ConfiguredGraphNodeRef(ConfiguredTargetNode); -impl NodeLabel for ConfiguredGraphNodeRef { - fn label_for_filter(&self) -> String { - self.0.label().unconfigured().to_string() +impl NodeKey for ConfiguredGraphNodeRef {} + +impl Deref for ConfiguredGraphNodeRef { + type Target = ConfiguredTargetNode; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 } } impl ConfiguredGraphNodeRef { + #[inline] + pub fn new(node: ConfiguredTargetNode) -> Self { + ConfiguredGraphNodeRef(node) + } + pub fn label(&self) -> &ConfiguredTargetLabel { self.0.label() } + + #[inline] + pub fn into_inner(self) -> ConfiguredTargetNode { + self.0 + } } impl std::fmt::Display for ConfiguredGraphNodeRef { @@ -56,7 +65,7 @@ impl std::fmt::Display for ConfiguredGraphNodeRef { impl PartialOrd for ConfiguredGraphNodeRef { fn partial_cmp(&self, other: &Self) -> Option { - self.label().partial_cmp(other.label()) + Some(self.cmp(other)) } } @@ -68,7 +77,8 @@ impl Ord for ConfiguredGraphNodeRef { impl PartialEq for ConfiguredGraphNodeRef { fn eq(&self, other: &Self) -> bool { - self.label().eq(other.label()) + // `ptr_eq` is optimization. + self.0.ptr_eq(&other.0) || self.label().eq(other.label()) } } @@ -76,14 +86,14 @@ impl Eq for ConfiguredGraphNodeRef {} impl std::hash::Hash for ConfiguredGraphNodeRef { fn hash(&self, state: &mut H) { - self.label().hash(state) + self.0.hashed_label().hash().hash(state); } } impl LabeledNode for ConfiguredGraphNodeRef { - type NodeRef = ConfiguredGraphNodeRef; + type Key = ConfiguredGraphNodeRef; - fn node_ref(&self) -> &Self::NodeRef { + fn node_key(&self) -> &Self::Key { self } } @@ -91,27 +101,45 @@ impl LabeledNode for ConfiguredGraphNodeRef { impl QueryTarget for ConfiguredGraphNodeRef { type Attr<'a> = ConfiguredAttr; + fn label_for_filter(&self) -> String { + self.0.label().unconfigured().to_string() + } + fn rule_type(&self) -> Cow { Cow::Borrowed(self.0.rule_type().name()) } + fn name(&self) -> Cow { + Cow::Borrowed(self.0.label().name().as_str()) + } + fn buildfile_path(&self) -> &BuildFilePath { self.0.buildfile_path() } - // TODO(cjhopman): Use existential traits to remove the Box<> once they are stabilized. - fn deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(self.0.deps().map(ConfiguredGraphNodeRef::ref_cast)) + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a { + self.0.deps().map(ConfiguredGraphNodeRef::ref_cast) + } + + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + self.0.exec_deps().map(ConfiguredGraphNodeRef::ref_cast) } - fn exec_deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(self.0.exec_deps().map(ConfiguredGraphNodeRef::ref_cast)) + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + self.0.target_deps().map(ConfiguredGraphNodeRef::ref_cast) } - fn target_deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(self.0.target_deps().map(ConfiguredGraphNodeRef::ref_cast)) + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + self.0 + .configuration_deps() + .map(ConfiguredGraphNodeRef::ref_cast) } + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + self.0 + .toolchain_deps() + .map(ConfiguredGraphNodeRef::ref_cast) + } fn attr_any_matches( attr: &Self::Attr<'_>, filter: &dyn Fn(&str) -> anyhow::Result, @@ -139,6 +167,16 @@ impl QueryTarget for ConfiguredGraphNodeRef { Ok(()) } + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + mut func: F, + ) -> Result<(), E> { + for a in self.0.attrs(AttrInspectOptions::DefinedOnly) { + func(a.name, &a.value)?; + } + Ok(()) + } + fn map_attr>) -> R>(&self, key: &str, mut func: F) -> R { func( self.0 @@ -157,47 +195,4 @@ impl QueryTarget for ConfiguredGraphNodeRef { } Ok(()) } - - fn call_stack(&self) -> Option { - self.0.call_stack() - } - - fn attr_to_string_alternate(&self, attr: &Self::Attr<'_>) -> String { - format!( - "{:#}", - attr.as_display(&AttrFmtContext { - package: Some(self.0.label().pkg().dupe()), - }) - ) - } - - fn attr_serialize( - &self, - attr: &Self::Attr<'_>, - serializer: S, - ) -> Result { - attr.serialize_with_ctx( - &AttrFmtContext { - package: Some(self.0.label().pkg().dupe()), - }, - serializer, - ) - } -} - -/// Graph lookup implementation for `ConfiguredGraphNodeRef`. -/// The implementation is trivial because `ConfiguredGraphNodeRef` is both node ref and node. -pub struct ConfiguredGraphNodeRefLookup; - -#[async_trait] -impl AsyncNodeLookup for ConfiguredGraphNodeRefLookup { - async fn get(&self, label: &ConfiguredGraphNodeRef) -> anyhow::Result { - Ok(label.dupe()) - } -} - -impl NodeLookup for ConfiguredGraphNodeRefLookup { - fn get(&self, label: &ConfiguredGraphNodeRef) -> anyhow::Result { - Ok(label.dupe()) - } } diff --git a/app/buck2_node/src/nodes/eval_result.rs b/app/buck2_node/src/nodes/eval_result.rs index 0aafa934a9f30..131e3239bd751 100644 --- a/app/buck2_node/src/nodes/eval_result.rs +++ b/app/buck2_node/src/nodes/eval_result.rs @@ -13,12 +13,13 @@ use std::fmt::Write; use std::sync::Arc; use allocative::Allocative; +use buck2_common::starlark_profiler::StarlarkProfileDataAndStatsDyn; use buck2_core::build_file_path::BuildFilePath; use buck2_core::bzl::ImportPath; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::PackageSpec; use buck2_core::pattern::pattern_type::PatternType; -use buck2_core::pattern::PackageSpec; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_core::target::name::TargetName; use buck2_core::target::name::TargetNameRef; use dupe::Dupe; @@ -28,23 +29,23 @@ use itertools::Itertools; use crate::nodes::targets_map::TargetsMap; use crate::nodes::unconfigured::TargetNode; +use crate::nodes::unconfigured::TargetNodeRef; use crate::super_package::SuperPackage; -#[derive(Debug, thiserror::Error)] -enum EvalulationResultError { - #[error( - "Unknown target `{target}` from package `{package}`.\n\ +#[derive(Debug, buck2_error::Error)] +// WARN: CI uses this message to filter targets +// If you change this message, please also update https://fburl.com/code/z0azzcc3 +#[error( + "Unknown target `{target}` from package `{package}`.\n\ Did you mean one of the {num_targets} targets in {buildfile_path}?{similar_targets}" - )] - UnknownTarget { - target: TargetName, - package: PackageLabel, - num_targets: usize, - buildfile_path: Arc, - similar_targets: SuggestedSimilarTargets, - }, - #[error("Zero missing targets (internal error)")] - ZeroMissingTargets, +)] +#[buck2(input)] +pub struct MissingTargetError { + pub target: TargetName, + pub package: PackageLabel, + num_targets: usize, + buildfile_path: Arc, + similar_targets: SuggestedSimilarTargets, } #[derive(Debug)] @@ -58,24 +59,26 @@ pub struct MissingTargets { impl MissingTargets { /// Error message emitted when missing targets are not skipped. - pub fn into_error(mut self) -> anyhow::Error { - if self.missing_targets.is_empty() { - return EvalulationResultError::ZeroMissingTargets.into(); - } - let target = self.missing_targets.swap_remove(0); - let similar_targets = SuggestedSimilarTargets::suggest( - target.name(), - self.package.dupe(), - self.all_target_labels.iter().map(|x| x.name()), - ); - EvalulationResultError::UnknownTarget { - target: target.name().to_owned(), - package: self.package, - num_targets: self.num_targets, - buildfile_path: self.buildfile_path, - similar_targets, - } - .into() + pub fn into_errors(self) -> (MissingTargetError, impl Iterator) { + let mut iter = self.missing_targets.into_iter().map(move |target| { + let similar_targets = SuggestedSimilarTargets::suggest( + target.name(), + self.package.dupe(), + self.all_target_labels.iter().map(|x| x.name()), + ); + MissingTargetError { + target: target.name().to_owned(), + package: self.package.dupe(), + num_targets: self.num_targets, + buildfile_path: self.buildfile_path.dupe(), + similar_targets, + } + }); + ( + iter.next() + .expect("Should be guaranteed that this vec is non-empty in this same file"), + iter, + ) } fn gen_missing_target_warning(mut missing_targets: Vec) -> String { @@ -123,6 +126,7 @@ pub struct EvaluationResult { imports: Vec, super_package: SuperPackage, targets: TargetsMap, + pub starlark_profile: Option>, } impl EvaluationResult { @@ -137,6 +141,8 @@ impl EvaluationResult { imports, super_package, targets, + // This is populated later when `Evaluator` is finalized. + starlark_profile: None, } } @@ -160,13 +166,13 @@ impl EvaluationResult { &self.super_package } - pub fn get_target<'a>(&'a self, name: &TargetNameRef) -> Option<&'a TargetNode> { + pub fn get_target<'a>(&'a self, name: &TargetNameRef) -> Option> { self.targets.get(name) } - pub fn resolve_target<'a>(&'a self, path: &TargetNameRef) -> anyhow::Result<&'a TargetNode> { + pub fn resolve_target<'a>(&'a self, path: &TargetNameRef) -> anyhow::Result> { self.get_target(path).ok_or_else(|| { - EvalulationResultError::UnknownTarget { + MissingTargetError { target: path.to_owned(), package: self.package().dupe(), num_targets: self.targets.len(), @@ -194,7 +200,7 @@ impl EvaluationResult { for target_info in self.targets().values() { label_to_node.insert( (target_info.label().name().to_owned(), T::default()), - target_info.dupe(), + target_info.to_owned(), ); } (label_to_node, None) @@ -206,7 +212,7 @@ impl EvaluationResult { let node = self.get_target(target_name.as_ref()); match node { Some(node) => { - label_to_node.insert((target_name, extra), node.dupe()); + label_to_node.insert((target_name, extra), node.to_owned()); } None => missing_targets .push(TargetLabel::new(self.package(), target_name.as_ref())), @@ -229,6 +235,15 @@ impl EvaluationResult { } } +#[derive(Debug)] +pub struct EvaluationResultWithStats { + pub result: EvaluationResult, + // Peak allocated memory in starlark mutable heap during evaluation of BUCK file + pub starlark_peak_allocated_bytes: u64, + /// Instruction count during evaluation of `BUCK` file. + pub cpu_instruction_count: Option, +} + #[derive(Debug)] struct SuggestedSimilarTargets { package: PackageLabel, @@ -239,11 +254,12 @@ impl SuggestedSimilarTargets { fn suggest<'a>( target: &TargetNameRef, package: PackageLabel, - available_targets: impl Iterator, + available_targets: impl IntoIterator, ) -> Self { const MAX_RESULTS: usize = 10; const MAX_LEVENSHTEIN_DISTANCE: usize = 5; let targets: Vec = available_targets + .into_iter() .map(|t| (t, strsim::levenshtein(target.as_str(), t.as_str()))) .filter(|(t, lev)| { lev <= &MAX_LEVENSHTEIN_DISTANCE @@ -278,7 +294,7 @@ impl Display for SuggestedSimilarTargets { #[cfg(test)] mod tests { - use buck2_core::target::label::TargetLabel; + use buck2_core::target::label::label::TargetLabel; use crate::nodes::eval_result::MissingTargets; diff --git a/app/buck2_node/src/nodes/frontend.rs b/app/buck2_node/src/nodes/frontend.rs index 935ec45f7dbcf..ce5cfe982ff17 100644 --- a/app/buck2_node/src/nodes/frontend.rs +++ b/app/buck2_node/src/nodes/frontend.rs @@ -11,9 +11,8 @@ use std::sync::Arc; use anyhow::Context; use async_trait::async_trait; -use buck2_common::result::SharedResult; use buck2_core::package::PackageLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_util::late_binding::LateBinding; use dice::DiceComputations; use dupe::Dupe; @@ -22,21 +21,22 @@ use futures::FutureExt; use crate::nodes::eval_result::EvaluationResult; use crate::nodes::unconfigured::TargetNode; +use crate::super_package::SuperPackage; #[async_trait] pub trait TargetGraphCalculationImpl: Send + Sync + 'static { /// Like `get_interpreter_results` but doesn't cache the result on the DICE graph. async fn get_interpreter_results_uncached( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, package: PackageLabel, - ) -> SharedResult>; + ) -> buck2_error::Result>; /// Returns the full interpreter evaluation result for a Package. This consists of the full set /// of `TargetNode`s of interpreting that build file. fn get_interpreter_results<'a>( &self, - ctx: &'a DiceComputations, + ctx: &'a mut DiceComputations, package: PackageLabel, ) -> BoxFuture<'a, anyhow::Result>>; } @@ -48,14 +48,14 @@ pub static TARGET_GRAPH_CALCULATION_IMPL: LateBinding<&'static dyn TargetGraphCa pub trait TargetGraphCalculation { /// Like `get_interpreter_results` but doesn't cache the result on the DICE graph. async fn get_interpreter_results_uncached( - &self, + &mut self, package: PackageLabel, - ) -> SharedResult>; + ) -> buck2_error::Result>; /// Returns the full interpreter evaluation result for a Package. This consists of the full set /// of `TargetNode`s of interpreting that build file. fn get_interpreter_results( - &self, + &mut self, package: PackageLabel, ) -> BoxFuture<'_, anyhow::Result>>; @@ -63,17 +63,23 @@ pub trait TargetGraphCalculation { /// results for the the label's package, and so this is just a utility for accessing that, it /// isn't separately cached. fn get_target_node<'a>( - &'a self, + &'a mut self, target: &'a TargetLabel, ) -> BoxFuture<'a, anyhow::Result>; + + /// For a TargetLabel, returns the TargetNode and its SuperPackage from PACKAGE files. + fn get_target_node_with_super_package<'a>( + &'a mut self, + target: &'a TargetLabel, + ) -> BoxFuture<'a, anyhow::Result<(TargetNode, SuperPackage)>>; } #[async_trait] -impl TargetGraphCalculation for DiceComputations { +impl TargetGraphCalculation for DiceComputations<'_> { async fn get_interpreter_results_uncached( - &self, + &mut self, package: PackageLabel, - ) -> SharedResult> { + ) -> buck2_error::Result> { TARGET_GRAPH_CALCULATION_IMPL .get()? .get_interpreter_results_uncached(self, package) @@ -81,7 +87,7 @@ impl TargetGraphCalculation for DiceComputations { } fn get_interpreter_results( - &self, + &mut self, package: PackageLabel, ) -> BoxFuture<'_, anyhow::Result>> { TARGET_GRAPH_CALCULATION_IMPL @@ -91,25 +97,34 @@ impl TargetGraphCalculation for DiceComputations { } fn get_target_node<'a>( - &'a self, + &'a mut self, target: &'a TargetLabel, ) -> BoxFuture<'a, anyhow::Result> { + self.get_target_node_with_super_package(target) + .map(|r| r.map(|(node, _)| node)) + .boxed() + } + + fn get_target_node_with_super_package<'a>( + &'a mut self, + target: &'a TargetLabel, + ) -> BoxFuture<'a, anyhow::Result<(TargetNode, SuperPackage)>> { TARGET_GRAPH_CALCULATION_IMPL .get() .unwrap() .get_interpreter_results(self, target.pkg()) .map(move |res| { - anyhow::Ok( - res.with_context(|| { - format!( - "Error loading targets in package `{}` for target `{}`", - target.pkg(), - target - ) - })? - .resolve_target(target.name())? - .dupe(), - ) + let res = res.with_context(|| { + format!( + "Error loading targets in package `{}` for target `{}`", + target.pkg(), + target + ) + })?; + anyhow::Ok(( + res.resolve_target(target.name())?.to_owned(), + res.super_package().dupe(), + )) }) .boxed() } diff --git a/app/buck2_node/src/nodes/lookup.rs b/app/buck2_node/src/nodes/lookup.rs index 1fe2e45ffb162..788ee16bbe7cd 100644 --- a/app/buck2_node/src/nodes/lookup.rs +++ b/app/buck2_node/src/nodes/lookup.rs @@ -9,30 +9,31 @@ use async_trait::async_trait; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_query::query::traversal::AsyncNodeLookup; -use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use crate::nodes::configured::ConfiguredTargetNode; use crate::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use crate::nodes::frontend::TargetGraphCalculation; use crate::nodes::unconfigured::TargetNode; -pub struct TargetNodeLookup<'c>(pub &'c DiceComputations); +pub struct TargetNodeLookup<'c, 'd>(pub &'c LinearRecomputeDiceComputations<'d>); #[async_trait] -impl<'c> AsyncNodeLookup for TargetNodeLookup<'c> { +impl AsyncNodeLookup for TargetNodeLookup<'_, '_> { async fn get(&self, label: &TargetLabel) -> anyhow::Result { - Ok(self.0.get_target_node(label).await?) + Ok(self.0.get().get_target_node(label).await?) } } -pub struct ConfiguredTargetNodeLookup<'c>(pub &'c DiceComputations); +pub struct ConfiguredTargetNodeLookup<'c, 'd>(pub &'c LinearRecomputeDiceComputations<'d>); #[async_trait] -impl<'c> AsyncNodeLookup for ConfiguredTargetNodeLookup<'c> { +impl AsyncNodeLookup for ConfiguredTargetNodeLookup<'_, '_> { async fn get(&self, label: &ConfiguredTargetLabel) -> anyhow::Result { self.0 + .get() .get_configured_target_node(label) .await? .require_compatible() diff --git a/app/buck2_node/src/nodes/mod.rs b/app/buck2_node/src/nodes/mod.rs deleted file mode 100644 index 5bd14371a6cbc..0000000000000 --- a/app/buck2_node/src/nodes/mod.rs +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod configured; -pub mod configured_frontend; -pub mod configured_node_visit_all_deps; -pub mod configured_ref; -pub mod eval_result; -pub mod frontend; -pub mod lookup; -pub mod targets_map; -pub mod unconfigured; - -/// Attributes on target nodes that are generated by buck, not provided by users. -pub mod attributes { - /// The nodes that this node depends on. - pub static DEPS: &str = "buck.deps"; - - /// The oncall for this node. - pub static ONCALL: &str = "buck.oncall"; - - /// The package or the buildfile path. - /// The package in `targets` command, but the buildfile path in `*query` commands. - pub static PACKAGE: &str = "buck.package"; - - /// A string representation of the target's rule type. - pub static TYPE: &str = "buck.type"; - - /// The target hash of this target. - pub static TARGET_HASH: &str = "buck.target_hash"; - - /// The callstack for this target. - pub static TARGET_CALL_STACK: &str = "buck.target_call_stack"; - - /// The configuration deps, deps that appear as conditions in selects. - pub static CONFIGURATION_DEPS: &str = "buck.configuration_deps"; - - /// The resolved execution platform for this node. - pub static EXECUTION_PLATFORM: &str = "buck.execution_platform"; - - /// The resolved target configuration for this node. - pub static TARGET_CONFIGURATION: &str = "buck.target_configuration"; - - /// The input source files/directories that this node uses. - pub static INPUTS: &str = "buck.inputs"; - - /// The package values for the package this target belongs to. - pub static PACKAGE_VALUES: &str = "buck.package_values"; - - /// The plugin lists on the node. This includes all plugins, regardless of whether they're - /// propagated or actually used. - pub static PLUGINS: &str = "buck.plugins"; -} diff --git a/app/buck2_node/src/nodes/targets_map.rs b/app/buck2_node/src/nodes/targets_map.rs index 9ea04f91c9629..fb00876c8c77a 100644 --- a/app/buck2_node/src/nodes/targets_map.rs +++ b/app/buck2_node/src/nodes/targets_map.rs @@ -12,7 +12,7 @@ use std::hash::Hash; use std::hash::Hasher; use allocative::Allocative; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_core::target::name::TargetNameRef; use buck2_util::indent::indent; use dupe::Dupe; @@ -20,12 +20,14 @@ use starlark_map::ordered_set; use starlark_map::ordered_set::OrderedSet; use crate::nodes::unconfigured::TargetNode; +use crate::nodes::unconfigured::TargetNodeRef; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] pub enum TargetsMapRecordError { #[error( "Attempted to register target {0} twice, {}", - Self::format_call_stack_for_registered_target_twice(_1) + TargetsMapRecordError::format_call_stack_for_registered_target_twice(_1) )] RegisteredTargetTwice(TargetLabel, Option), } @@ -82,8 +84,8 @@ impl TargetsMap { } #[inline] - pub fn get(&self, name: &TargetNameRef) -> Option<&TargetNode> { - self.map.get(name).map(|NameIndexed(n)| n) + pub fn get<'a>(&'a self, name: &TargetNameRef) -> Option> { + self.map.get(name).map(|NameIndexed(n)| n.as_ref()) } #[inline] @@ -97,8 +99,10 @@ impl TargetsMap { } #[inline] - pub fn iter(&self) -> impl ExactSizeIterator { - self.map.iter().map(|NameIndexed(n)| (n.label().name(), n)) + pub fn iter(&self) -> impl ExactSizeIterator)> { + self.map + .iter() + .map(|NameIndexed(n)| (n.label().name(), n.as_ref())) } #[inline] @@ -112,7 +116,7 @@ impl TargetsMap { } #[inline] - pub fn values(&self) -> impl ExactSizeIterator { + pub fn values(&self) -> impl ExactSizeIterator> { self.iter().map(|(_, v)| v) } diff --git a/app/buck2_node/src/nodes/unconfigured.rs b/app/buck2_node/src/nodes/unconfigured.rs index 336c06c3f6143..b7ea4ea3bbca3 100644 --- a/app/buck2_node/src/nodes/unconfigured.rs +++ b/app/buck2_node/src/nodes/unconfigured.rs @@ -7,18 +7,21 @@ * of this source tree. */ +use core::fmt; use std::hash::Hash; use std::hash::Hasher; +use std::ops::Deref; use std::sync::Arc; use allocative::Allocative; -use buck2_core::buck_path::path::BuckPathRef; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::cell_path::CellPath; use buck2_core::configuration::transition::id::TransitionId; +use buck2_core::package::source_path::SourcePathRef; use buck2_core::plugins::PluginKind; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_error::internal_error_anyhow; use buck2_util::arc_str::ArcStr; use dupe::Dupe; @@ -29,11 +32,17 @@ use crate::attrs::coerced_deps_collector::CoercedDeps; use crate::attrs::display::AttrDisplayWithContextExt; use crate::attrs::inspect_options::AttrInspectOptions; use crate::attrs::internal::DEFAULT_TARGET_PLATFORM_ATTRIBUTE_FIELD; +use crate::attrs::internal::METADATA_ATTRIBUTE_FIELD; +use crate::attrs::internal::TARGET_MODIFIERS_ATTRIBUTE_FIELD; use crate::attrs::internal::TESTS_ATTRIBUTE_FIELD; use crate::attrs::spec::AttributeSpec; use crate::attrs::traversal::CoercedAttrTraversal; use crate::attrs::values::AttrValues; +use crate::attrs::values::TargetModifiersValue; use crate::call_stack::StarlarkCallStack; +use crate::call_stack::StarlarkTargetCallStackRoot; +use crate::configuration::resolved::ConfigurationSettingKey; +use crate::metadata::map::MetadataMap; use crate::nodes::attributes::CONFIGURATION_DEPS; use crate::nodes::attributes::DEPS; use crate::nodes::attributes::ONCALL; @@ -44,12 +53,6 @@ use crate::rule::Rule; use crate::rule_type::RuleType; use crate::visibility::VisibilitySpecification; -#[derive(Debug, thiserror::Error)] -enum TargetNodeError { - #[error("`visibility` attribute coerced incorrectly (`{0}`) (internal error)")] - IncorrectVisibilityAttribute(String), -} - /// Describes a target including its name, type, and the values that the user provided. /// Some information (e.g. deps) is extracted eagerly, most is in the attrs map and needs to be /// accessed via attribute visitors. @@ -58,8 +61,19 @@ enum TargetNodeError { /// the attribute names and it doesn't store an entry for something that has a default value. All /// that information is contained in the AttributeSpec. This means that to access an attribute we /// need to look at both the attrs held by the TargetNode and the information in the AttributeSpec. -#[derive(Debug, Clone, Dupe, Eq, PartialEq, Hash, Allocative)] -pub struct TargetNode(pub Arc); +#[derive(Debug, Clone, Eq, PartialEq, Hash, Allocative)] +pub struct TargetNode(triomphe::Arc); + +impl Dupe for TargetNode {} + +impl Deref for TargetNode { + type Target = TargetNodeData; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} /// The kind of the rule, denoting where it can be used and how. #[derive(Debug, Copy, Clone, Dupe, Eq, PartialEq, Hash, Allocative)] @@ -72,6 +86,22 @@ pub enum RuleKind { Toolchain, } +impl RuleKind { + pub fn as_str(&self) -> &'static str { + match self { + RuleKind::Normal => "normal", + RuleKind::Configuration => "configuration", + RuleKind::Toolchain => "toolchain", + } + } +} + +impl fmt::Display for RuleKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.as_str()) + } +} + #[derive(Debug, Eq, PartialEq, Hash, Allocative)] pub struct TargetNodeData { /// Rule type for this target. @@ -94,6 +124,28 @@ pub struct TargetNodeData { call_stack: Option, } +impl TargetNodeData { + pub fn is_toolchain_rule(&self) -> bool { + self.rule.rule_kind == RuleKind::Toolchain + } + + pub fn rule_type(&self) -> &RuleType { + &self.rule.rule_type + } + + pub fn oncall(&self) -> Option<&str> { + self.package.oncall.as_ref().map(|x| x.as_str()) + } + + pub fn call_stack(&self) -> Option { + self.call_stack.as_ref().map(|s| s.to_string()) + } + + pub fn root_location(&self) -> Option { + self.call_stack.as_ref().and_then(|s| s.root_location()) + } +} + impl TargetNode { pub fn new( rule: Arc, @@ -103,7 +155,7 @@ impl TargetNode { deps_cache: CoercedDeps, call_stack: Option, ) -> TargetNode { - TargetNode(Arc::new(TargetNodeData { + TargetNode(triomphe::Arc::new(TargetNodeData { rule, package, label, @@ -121,12 +173,8 @@ impl TargetNode { self.0.rule.rule_kind == RuleKind::Configuration } - pub fn is_toolchain_rule(&self) -> bool { - self.0.rule.rule_kind == RuleKind::Toolchain - } - pub fn uses_plugins(&self) -> &[PluginKind] { - &self.0.rule.uses_plugins + self.as_ref().uses_plugins() } pub fn get_default_target_platform(&self) -> Option<&TargetLabel> { @@ -136,7 +184,7 @@ impl TargetNode { ) { Some(v) => match v.value { CoercedAttr::None => None, - CoercedAttr::Dep(t) => Some(t.target()), + CoercedAttr::Label(t) => Some(t.target()), CoercedAttr::Selector(_) | CoercedAttr::Concat(_) => { unreachable!("coercer verified attribute is not configurable") } @@ -146,36 +194,21 @@ impl TargetNode { } } - pub fn rule_type(&self) -> &RuleType { - &self.0.rule.rule_type - } - + #[inline] pub fn buildfile_path(&self) -> &BuildFilePath { - &self.0.package.buildfile_path - } - - fn deps_cache(&self) -> &CoercedDeps { - &self.0.deps_cache + self.as_ref().buildfile_path() } /// Returns all deps for this node that we know about after processing the build file + #[inline] pub fn deps(&self) -> impl Iterator { - let deps_cache = self.deps_cache(); - deps_cache - .deps - .iter() - .chain(deps_cache.transition_deps.iter().map(|(dep, _tr)| dep)) - .chain(deps_cache.exec_deps.iter()) - .chain(deps_cache.toolchain_deps.iter()) - .chain(deps_cache.plugin_deps.iter()) + self.as_ref().deps() } /// Deps which are to be transitioned to other configuration using transition function. + #[inline] pub fn transition_deps(&self) -> impl Iterator)> { - self.deps_cache() - .transition_deps - .iter() - .map(|x| (&x.0, &x.1)) + self.as_ref().transition_deps() } pub fn label(&self) -> &TargetLabel { @@ -183,40 +216,7 @@ impl TargetNode { } pub fn special_attrs(&self) -> impl Iterator { - let typ_attr = CoercedAttr::String(StringLiteral(self.rule_type().name().into())); - let deps_attr = CoercedAttr::List( - self.deps() - .map(|t| CoercedAttr::Label(ProvidersLabel::default_for(t.dupe()))) - .collect(), - ); - let package_attr = CoercedAttr::String(StringLiteral(ArcStr::from( - self.buildfile_path().to_string(), - ))); - vec![ - (TYPE, typ_attr), - ( - CONFIGURATION_DEPS, - CoercedAttr::List( - self.get_configuration_deps() - .map(|t| CoercedAttr::ConfigurationDep(t.dupe())) - .collect(), - ), - ), - (DEPS, deps_attr), - (PACKAGE, package_attr), - ( - ONCALL, - match self.oncall() { - None => CoercedAttr::None, - Some(x) => CoercedAttr::String(StringLiteral(ArcStr::from(x))), - }, - ), - ] - .into_iter() - } - - pub fn oncall(&self) -> Option<&str> { - self.0.package.oncall.as_ref().map(|x| x.as_str()) + self.as_ref().special_attrs() } pub fn visibility(&self) -> anyhow::Result<&VisibilitySpecification> { @@ -226,10 +226,10 @@ impl TargetNode { // This code is unreachable: visibility attributes are validated // at the coercion stage. But if we did it wrong, // better error with all the context than panic. - Err(TargetNodeError::IncorrectVisibilityAttribute( + Err(internal_error_anyhow!( + "`visibility` attribute coerced incorrectly (`{0}`)", a.as_display_no_ctx().to_string(), - ) - .into()) + )) } None => { static DEFAULT: VisibilitySpecification = VisibilitySpecification::DEFAULT; @@ -245,48 +245,56 @@ impl TargetNode { Ok(self.visibility()?.0.matches_target(target)) } + #[inline] pub fn attrs(&self, opts: AttrInspectOptions) -> impl Iterator { - self.0.rule.attributes.attrs(&self.0.attributes, opts) + self.as_ref().attrs(opts) } + #[inline] pub fn platform_deps(&self) -> impl Iterator { - self.deps_cache().platform_deps.iter() + self.as_ref().platform_deps() } /// Return `None` if attribute is not present or unknown. + #[inline] pub fn attr_or_none<'a>( &'a self, key: &str, opts: AttrInspectOptions, ) -> Option> { - self.0 - .rule - .attributes - .attr_or_none(&self.0.attributes, key, opts) + self.as_ref().attr_or_none(key, opts) } /// Get attribute. /// /// * `None` if attribute is known but not set and no default. /// * error if attribute is unknown. - pub fn attr( - &self, + #[inline] + pub fn attr<'a>( + &'a self, key: &str, opts: AttrInspectOptions, - ) -> anyhow::Result> { - self.0.rule.attributes.attr(&self.0.attributes, key, opts) + ) -> anyhow::Result>> { + self.as_ref().attr(key, opts) } + #[inline] pub fn target_deps(&self) -> impl Iterator { - self.deps_cache().deps.iter() + self.as_ref().target_deps() } + #[inline] pub fn exec_deps(&self) -> impl Iterator { - self.deps_cache().exec_deps.iter() + self.as_ref().exec_deps() } - pub fn get_configuration_deps(&self) -> impl Iterator { - self.deps_cache().configuration_deps.iter() + #[inline] + pub fn get_configuration_deps(&self) -> impl Iterator { + self.as_ref().get_configuration_deps() + } + #[inline] + pub fn toolchain_deps(&self) -> impl Iterator { + self.as_ref().toolchain_deps() } pub fn tests(&self) -> impl Iterator { @@ -296,7 +304,7 @@ impl TargetNode { } impl<'a> CoercedAttrTraversal<'a> for TestCollector<'a> { - fn input(&mut self, _path: BuckPathRef) -> anyhow::Result<()> { + fn input(&mut self, _path: SourcePathRef) -> anyhow::Result<()> { Ok(()) } @@ -340,7 +348,10 @@ impl TargetNode { Ok(()) } - fn configuration_dep(&mut self, _dep: &'a TargetLabel) -> anyhow::Result<()> { + fn configuration_dep( + &mut self, + _dep: &'a ConfigurationSettingKey, + ) -> anyhow::Result<()> { Ok(()) } @@ -360,12 +371,202 @@ impl TargetNode { } pub fn inputs(&self) -> impl Iterator + '_ { + self.as_ref().inputs() + } + + /// Hash the fields that impact how this target is built. + /// Don't do any recursive hashing of the dependencies. + pub fn target_hash(&self, state: &mut H) { + self.label().hash(state); + self.rule_type().hash(state); + self.attrs(AttrInspectOptions::All).for_each(|x| { + // We deliberately don't hash the attribute, as if the value being passed to analysis + // stays the same, we don't care if the attribute that generated it changed. + x.name.hash(state); + x.value.hash(state); + }); + } + + #[inline] + pub fn metadata(&self) -> anyhow::Result> { + self.as_ref().metadata() + } + + #[inline] + pub fn as_ref(&self) -> TargetNodeRef<'_> { + TargetNodeRef(triomphe::Arc::borrow_arc(&self.0)) + } +} + +#[derive(Copy, Clone)] +pub struct TargetNodeRef<'a>(triomphe::ArcBorrow<'a, TargetNodeData>); + +impl<'a> Dupe for TargetNodeRef<'a> {} + +impl<'a> Deref for TargetNodeRef<'a> { + type Target = TargetNodeData; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a> TargetNodeRef<'a> { + #[inline] + pub fn label(self) -> &'a TargetLabel { + &self.0.get().label + } + + #[inline] + pub fn to_owned(self) -> TargetNode { + TargetNode(triomphe::ArcBorrow::clone_arc(&self.0)) + } + + pub fn buildfile_path(self) -> &'a BuildFilePath { + &self.0.get().package.buildfile_path + } + + /// Get attribute. + /// + /// * `None` if attribute is known but not set and no default. + /// * error if attribute is unknown. + pub fn attr( + self, + key: &str, + opts: AttrInspectOptions, + ) -> anyhow::Result>> { + self.0 + .get() + .rule + .attributes + .attr(&self.0.get().attributes, key, opts) + } + + /// Return `None` if attribute is not present or unknown. + pub fn attr_or_none(&self, key: &str, opts: AttrInspectOptions) -> Option> { + self.0 + .get() + .rule + .attributes + .attr_or_none(&self.0.get().attributes, key, opts) + } + + pub fn attrs(self, opts: AttrInspectOptions) -> impl Iterator> { + self.0 + .get() + .rule + .attributes + .attrs(&self.0.get().attributes, opts) + } + + pub fn special_attrs(self) -> impl Iterator + 'a { + let typ_attr = CoercedAttr::String(StringLiteral(self.rule_type().name().into())); + let deps_attr = CoercedAttr::List( + self.deps() + .map(|t| CoercedAttr::Label(ProvidersLabel::default_for(t.dupe()))) + .collect(), + ); + let package_attr = CoercedAttr::String(StringLiteral(ArcStr::from( + self.buildfile_path().to_string().as_str(), + ))); + vec![ + (TYPE, typ_attr), + ( + CONFIGURATION_DEPS, + CoercedAttr::List( + self.get_configuration_deps() + .map(|t| CoercedAttr::ConfigurationDep(t.dupe())) + .collect(), + ), + ), + (DEPS, deps_attr), + (PACKAGE, package_attr), + ( + ONCALL, + match self.oncall() { + None => CoercedAttr::None, + Some(x) => CoercedAttr::String(StringLiteral(ArcStr::from(x))), + }, + ), + ] + .into_iter() + } + + pub fn metadata(self) -> anyhow::Result> { + self.attr_or_none(METADATA_ATTRIBUTE_FIELD, AttrInspectOptions::All) + .map(|attr| match attr.value { + CoercedAttr::Metadata(m) => Ok(m), + x => Err(internal_error_anyhow!("`metadata` attribute should be coerced as a dict of strings to JSON values. Found `{:?}` instead", x)), + }) + .transpose() + } + + pub fn target_modifiers(self) -> anyhow::Result> { + self.attr_or_none(TARGET_MODIFIERS_ATTRIBUTE_FIELD, AttrInspectOptions::All) + .map(|attr| match attr.value { + CoercedAttr::TargetModifiers(m) => Ok(m), + x => Err(internal_error_anyhow!( + "`modifiers` attribute should be coerced as a JSON value. Found `{:?}` instead", + x + )), + }) + .transpose() + } + + pub fn target_deps(self) -> impl Iterator { + self.0.get().deps_cache.deps.iter() + } + + pub fn exec_deps(self) -> impl Iterator { + self.0.get().deps_cache.exec_deps.iter() + } + + pub fn toolchain_deps(self) -> impl Iterator { + self.0.get().deps_cache.toolchain_deps.iter() + } + + pub fn get_configuration_deps(self) -> impl Iterator { + self.0.get().deps_cache.configuration_deps.iter() + } + + pub fn platform_deps(self) -> impl Iterator { + self.0.get().deps_cache.platform_deps.iter() + } + + /// Returns all deps for this node that we know about after processing the build file + pub fn deps(self) -> impl Iterator { + let deps_cache = &self.0.get().deps_cache; + deps_cache + .deps + .iter() + .chain(deps_cache.transition_deps.iter().map(|(dep, _tr)| dep)) + .chain(deps_cache.exec_deps.iter()) + .chain(deps_cache.toolchain_deps.iter()) + .chain(deps_cache.plugin_deps.iter()) + } + + /// Deps which are to be transitioned to other configuration using transition function. + pub fn transition_deps(self) -> impl Iterator)> { + self.0 + .get() + .deps_cache + .transition_deps + .iter() + .map(|x| (&x.0, &x.1)) + } + + pub fn uses_plugins(self) -> &'a [PluginKind] { + &self.0.get().rule.uses_plugins + } + + pub fn inputs(self) -> impl Iterator + 'a { struct InputsCollector { inputs: Vec, } impl<'a> CoercedAttrTraversal<'a> for InputsCollector { - fn input(&mut self, path: BuckPathRef) -> anyhow::Result<()> { + fn input(&mut self, path: SourcePathRef) -> anyhow::Result<()> { self.inputs.push(path.to_cell_path()); Ok(()) } @@ -410,7 +611,10 @@ impl TargetNode { Ok(()) } - fn configuration_dep(&mut self, _dep: &'a TargetLabel) -> anyhow::Result<()> { + fn configuration_dep( + &mut self, + _dep: &'a ConfigurationSettingKey, + ) -> anyhow::Result<()> { Ok(()) } } @@ -422,52 +626,30 @@ impl TargetNode { traversal.inputs.into_iter() } - - pub fn call_stack(&self) -> Option { - self.0.call_stack.as_ref().map(|s| s.to_string()) - } - - /// Hash the fields that impact how this target is built. - /// Don't do any recursive hashing of the dependencies. - pub fn target_hash(&self, state: &mut H) { - self.label().hash(state); - self.rule_type().hash(state); - self.attrs(AttrInspectOptions::All).for_each(|x| { - // We deliberately don't hash the attribute, as if the value being passed to analysis - // stays the same, we don't care if the attribute that generated it changed. - x.name.hash(state); - x.value.hash(state); - }); - } } pub mod testing { - use std::sync::Arc; - use buck2_core::build_file_path::BuildFilePath; use buck2_core::fs::paths::file_name::FileNameBuf; use buck2_core::package::PackageLabel; - use buck2_core::target::label::TargetLabel; - use dupe::Dupe; use serde_json::map::Map; use serde_json::value::Value; use super::*; use crate::attrs::attr::Attribute; - use crate::attrs::coerced_attr::CoercedAttr; use crate::attrs::coerced_deps_collector::CoercedDepsCollector; use crate::attrs::fmt_context::AttrFmtContext; - use crate::attrs::inspect_options::AttrInspectOptions; - use crate::attrs::spec::AttributeSpec; - use crate::attrs::values::AttrValues; + use crate::attrs::id::AttributeId; + use crate::attrs::internal::internal_attrs; use crate::nodes::targets_map::TargetsMap; - use crate::rule_type::RuleType; pub trait TargetNodeExt { fn testing_new( label: TargetLabel, rule_type: RuleType, attrs: Vec<(&str, Attribute, CoercedAttr)>, + internal_attrs: Vec<(&str, Attribute, CoercedAttr)>, + call_stack: Option, ) -> Self; } @@ -476,6 +658,8 @@ pub mod testing { label: TargetLabel, rule_type: RuleType, attrs: Vec<(&str, Attribute, CoercedAttr)>, + internal_attrs: Vec<(&str, Attribute, CoercedAttr)>, + call_stack: Option, ) -> TargetNode { let attr_spec = AttributeSpec::testing_new( attrs @@ -491,6 +675,10 @@ pub mod testing { CoercedAttr::String(StringLiteral(label.name().as_str().into())), ); + for (name, _attr, val) in internal_attrs { + attributes.push_sorted(node_attr_id(name), val); + } + let mut deps_cache = CoercedDepsCollector::new(); for (name, _attr, val) in attrs.into_iter() { @@ -520,11 +708,24 @@ pub mod testing { label, attributes, CoercedDeps::from(deps_cache), - None, + call_stack, ) } } + fn node_attr_id(field: &str) -> AttributeId { + let index_in_attribute_spec = u16::try_from( + internal_attrs() + .keys() + .position(|name| *name == field) + .unwrap(), + ) + .unwrap(); + AttributeId { + index_in_attribute_spec, + } + } + /// Take a TargetsMap and convert it to a nice json representation. Adds in a __type__ attr /// for each target's values to make it clear what the rule type is. That can probably go /// away eventually. @@ -543,6 +744,7 @@ pub mod testing { a.name.to_owned(), a.value.to_json(&AttrFmtContext { package: Some(pkg.dupe()), + options: Default::default(), })?, )) }) diff --git a/app/buck2_node/src/oncall.rs b/app/buck2_node/src/oncall.rs new file mode 100644 index 0000000000000..ee52d7f341a84 --- /dev/null +++ b/app/buck2_node/src/oncall.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::Arc; + +use allocative::Allocative; +use dupe::Dupe; + +/// The `oncall` annotation for a `BUCK` file. +#[derive(Debug, Hash, Allocative, Eq, PartialEq, Dupe, Clone)] +pub struct Oncall(Arc); + +impl Oncall { + pub fn new(oncall: &str) -> Self { + Self(Arc::new(oncall.to_owned())) + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} diff --git a/app/buck2_node/src/package.rs b/app/buck2_node/src/package.rs index 233ba18a3b019..0c06e117d5f3e 100644 --- a/app/buck2_node/src/package.rs +++ b/app/buck2_node/src/package.rs @@ -12,6 +12,8 @@ use std::sync::Arc; use allocative::Allocative; use buck2_core::build_file_path::BuildFilePath; +use crate::oncall::Oncall; + /// Package-specific data for `TargetNode`. /// /// (Note this has nothing to do with `PACKAGE` files which are not implemented @@ -21,5 +23,5 @@ pub struct Package { /// The build file which defined this target, e.g. `fbcode//foo/bar/TARGETS` pub buildfile_path: Arc, /// The oncall attribute, if set - pub oncall: Option>, + pub oncall: Option, } diff --git a/app/buck2_node/src/package_values_calculation.rs b/app/buck2_node/src/package_values_calculation.rs index 13939fac23699..7c521556cd2e9 100644 --- a/app/buck2_node/src/package_values_calculation.rs +++ b/app/buck2_node/src/package_values_calculation.rs @@ -19,7 +19,7 @@ use crate::metadata::key::MetadataKey; pub trait PackageValuesCalculation: Send + Sync + 'static { async fn package_values( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, package: PackageLabel, ) -> anyhow::Result>; } diff --git a/app/buck2_node/src/query/mod.rs b/app/buck2_node/src/query.rs similarity index 100% rename from app/buck2_node/src/query/mod.rs rename to app/buck2_node/src/query.rs diff --git a/app/buck2_node/src/query/configured.rs b/app/buck2_node/src/query/configured.rs index d61c4438c39c1..0284e8b9143b5 100644 --- a/app/buck2_node/src/query/configured.rs +++ b/app/buck2_node/src/query/configured.rs @@ -12,53 +12,69 @@ use std::borrow::Cow; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::cell_path::CellPath; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_query::query::environment::LabeledNode; use buck2_query::query::environment::QueryTarget; +use buck2_query::query::graph::node::LabeledNode; use dupe::Dupe; -use serde::Serializer; +use starlark_map::Hashed; use crate::attrs::attr_type::any_matches::AnyMatches; use crate::attrs::configured_attr::ConfiguredAttr; -use crate::attrs::display::AttrDisplayWithContextExt; -use crate::attrs::fmt_context::AttrFmtContext; use crate::attrs::inspect_options::AttrInspectOptions; -use crate::attrs::serialize::AttrSerializeWithContext; use crate::nodes::configured::ConfiguredTargetNode; +use crate::nodes::configured::ConfiguredTargetNodeRef; impl LabeledNode for ConfiguredTargetNode { - type NodeRef = ConfiguredTargetLabel; + type Key = ConfiguredTargetLabel; - fn node_ref(&self) -> &Self::NodeRef { + fn node_key(&self) -> &Self::Key { ConfiguredTargetNode::label(self) } + + fn hashed_node_key(&self) -> Hashed<&Self::Key> { + ConfiguredTargetNode::hashed_label(self) + } } impl QueryTarget for ConfiguredTargetNode { type Attr<'a> = ConfiguredAttr; + fn label_for_filter(&self) -> String { + return self.label().unconfigured().to_string(); + } + fn rule_type(&self) -> Cow { Cow::Borrowed(ConfiguredTargetNode::rule_type(self).name()) } + fn name(&self) -> Cow { + Cow::Borrowed(self.label().name().as_str()) + } + fn buildfile_path(&self) -> &BuildFilePath { ConfiguredTargetNode::buildfile_path(self) } - // TODO(cjhopman): Use existential traits to remove the Box<> once they are stabilized. - fn deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(ConfiguredTargetNode::deps(self).map(|v| v.label())) + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a { + ConfiguredTargetNode::deps(self).map(|v| v.label()) } - fn exec_deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(ConfiguredTargetNode::exec_deps(self).map(|v| v.label())) + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + ConfiguredTargetNode::exec_deps(self).map(|v| v.label()) } - fn target_deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(ConfiguredTargetNode::target_deps(self).map(|v| v.label())) + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + ConfiguredTargetNode::target_deps(self).map(|v| v.label()) } - fn tests<'a>(&'a self) -> Option + Send + 'a>> { - Some(Box::new(self.tests().map(|t| t.target().dupe()))) + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + ConfiguredTargetNode::configuration_deps(self).map(|v| v.label()) + } + + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + ConfiguredTargetNode::toolchain_deps(self).map(|v| v.label()) + } + fn tests<'a>(&'a self) -> Option + Send + 'a> { + Some(self.tests().map(|t| t.target().dupe())) } fn special_attrs_for_each) -> Result<(), E>>( @@ -88,6 +104,16 @@ impl QueryTarget for ConfiguredTargetNode { Ok(()) } + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + mut func: F, + ) -> Result<(), E> { + for a in self.attrs(AttrInspectOptions::DefinedOnly) { + func(a.name, &a.value)?; + } + Ok(()) + } + fn map_attr>) -> R>(&self, key: &str, mut func: F) -> R { func( self.get(key, AttrInspectOptions::All) @@ -105,30 +131,16 @@ impl QueryTarget for ConfiguredTargetNode { } Ok(()) } +} - fn call_stack(&self) -> Option { - self.call_stack() - } +impl<'a> LabeledNode for ConfiguredTargetNodeRef<'a> { + type Key = ConfiguredTargetLabel; - fn attr_to_string_alternate(&self, attr: &Self::Attr<'_>) -> String { - format!( - "{:#}", - attr.as_display(&AttrFmtContext { - package: Some(self.label().pkg().dupe()), - }) - ) + fn node_key(&self) -> &Self::Key { + ConfiguredTargetNodeRef::label(*self) } - fn attr_serialize( - &self, - attr: &Self::Attr<'_>, - serializer: S, - ) -> Result { - attr.serialize_with_ctx( - &AttrFmtContext { - package: Some(self.label().pkg().dupe()), - }, - serializer, - ) + fn hashed_node_key(&self) -> Hashed<&Self::Key> { + ConfiguredTargetNodeRef::hashed_label(*self) } } diff --git a/app/buck2_node/src/query/unconfigured.rs b/app/buck2_node/src/query/unconfigured.rs index 5d50071b4017f..2f00a2df7fe74 100644 --- a/app/buck2_node/src/query/unconfigured.rs +++ b/app/buck2_node/src/query/unconfigured.rs @@ -11,23 +11,20 @@ use std::borrow::Cow; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::cell_path::CellPath; -use buck2_core::target::label::TargetLabel; -use buck2_query::query::environment::LabeledNode; +use buck2_core::target::label::label::TargetLabel; use buck2_query::query::environment::QueryTarget; +use buck2_query::query::graph::node::LabeledNode; use dupe::Dupe; -use serde::Serializer; use crate::attrs::coerced_attr::CoercedAttr; -use crate::attrs::display::AttrDisplayWithContextExt; -use crate::attrs::fmt_context::AttrFmtContext; use crate::attrs::inspect_options::AttrInspectOptions; -use crate::attrs::serialize::AttrSerializeWithContext; use crate::nodes::unconfigured::TargetNode; +use crate::nodes::unconfigured::TargetNodeData; impl LabeledNode for TargetNode { - type NodeRef = TargetLabel; + type Key = TargetLabel; - fn node_ref(&self) -> &Self::NodeRef { + fn node_key(&self) -> &Self::Key { TargetNode::label(self) } } @@ -36,28 +33,38 @@ impl QueryTarget for TargetNode { type Attr<'a> = CoercedAttr; fn rule_type(&self) -> Cow { - Cow::Borrowed(TargetNode::rule_type(self).name()) + Cow::Borrowed(TargetNodeData::rule_type(self).name()) + } + + fn name(&self) -> Cow { + Cow::Borrowed(self.label().name().as_str()) } fn buildfile_path(&self) -> &BuildFilePath { TargetNode::buildfile_path(self) } - // TODO(cjhopman): Use existential traits to remove the Box<> once they are stabilized. - fn deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(TargetNode::deps(self)) + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a { + TargetNode::deps(self) + } + + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + TargetNode::exec_deps(self) } - fn exec_deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(TargetNode::exec_deps(self)) + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + TargetNode::target_deps(self) } - fn target_deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(TargetNode::target_deps(self)) + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + TargetNode::get_configuration_deps(self).map(|k| &k.0) } - fn tests<'a>(&'a self) -> Option + Send + 'a>> { - Some(Box::new(self.tests().map(|t| t.target().dupe()))) + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + TargetNode::toolchain_deps(self) + } + fn tests<'a>(&'a self) -> Option + Send + 'a> { + Some(self.tests().map(|t| t.target().dupe())) } fn attr_any_matches( @@ -87,6 +94,16 @@ impl QueryTarget for TargetNode { Ok(()) } + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + mut func: F, + ) -> Result<(), E> { + for a in self.attrs(AttrInspectOptions::DefinedOnly) { + func(a.name, a.value)?; + } + Ok(()) + } + fn map_attr>) -> R>(&self, key: &str, mut func: F) -> R { func( self.attr_or_none(key, AttrInspectOptions::All) @@ -104,30 +121,4 @@ impl QueryTarget for TargetNode { } Ok(()) } - - fn call_stack(&self) -> Option { - self.call_stack() - } - - fn attr_to_string_alternate(&self, attr: &Self::Attr<'_>) -> String { - format!( - "{:#}", - attr.as_display(&AttrFmtContext { - package: Some(self.label().pkg().dupe()), - }) - ) - } - - fn attr_serialize( - &self, - attr: &Self::Attr<'_>, - serializer: S, - ) -> Result { - attr.serialize_with_ctx( - &AttrFmtContext { - package: Some(self.label().pkg().dupe()), - }, - serializer, - ) - } } diff --git a/app/buck2_node/src/rule_type.rs b/app/buck2_node/src/rule_type.rs index 403d90d41f39a..761ced305a6bd 100644 --- a/app/buck2_node/src/rule_type.rs +++ b/app/buck2_node/src/rule_type.rs @@ -15,7 +15,7 @@ use dupe::Dupe; /// The identifier used to find the implementation function for this rule. Should point at the output of `rule()` #[derive(Debug, Clone, derive_more::Display, Eq, PartialEq, Hash, Allocative)] -#[display(fmt = "{}:{}", import_path, name)] +#[display("{}:{}", import_path, name)] pub struct StarlarkRuleType { /// The cell, package, and file that contains the output of `rule()` pub import_path: ImportPath, @@ -35,7 +35,7 @@ pub struct StarlarkRuleType { )] pub enum RuleType { Starlark(Arc), - #[display(fmt = "forward")] + #[display("forward")] Forward, } diff --git a/app/buck2_node/src/target_calculation.rs b/app/buck2_node/src/target_calculation.rs index 1d1d225a537a2..e0dc8b13a6e13 100644 --- a/app/buck2_node/src/target_calculation.rs +++ b/app/buck2_node/src/target_calculation.rs @@ -8,20 +8,23 @@ */ use async_trait::async_trait; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_util::late_binding::LateBinding; use dice::DiceComputations; +use crate::nodes::configured_frontend::ConfiguredTargetNodeCalculation; + #[async_trait] pub trait ConfiguredTargetCalculationImpl: Send + Sync + 'static { async fn get_configured_target( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, target: &TargetLabel, - global_target_platform: Option<&TargetLabel>, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result; } @@ -43,44 +46,65 @@ pub trait ConfiguredTargetCalculation { /// a mix of the global Configuration, the target's `default_target_platform` and /// (potentially) self-transitions on that node. async fn get_configured_target( - &self, + &mut self, + target: &TargetLabel, + global_cfg_options: &GlobalCfgOptions, + ) -> anyhow::Result; + + async fn get_configured_target_post_transition( + &mut self, target: &TargetLabel, - global_target_platform: Option<&TargetLabel>, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result; async fn get_configured_provider_label( - &self, + &mut self, target: &ProvidersLabel, - global_target_platform: Option<&TargetLabel>, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result; async fn get_default_configured_target( - &self, + &mut self, target: &TargetLabel, ) -> anyhow::Result; } #[async_trait] -impl ConfiguredTargetCalculation for DiceComputations { +impl ConfiguredTargetCalculation for DiceComputations<'_> { async fn get_configured_target( - &self, + &mut self, target: &TargetLabel, - global_target_platform: Option<&TargetLabel>, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result { CONFIGURED_TARGET_CALCULATION .get()? - .get_configured_target(self, target, global_target_platform) + .get_configured_target(self, target, global_cfg_options) .await } + async fn get_configured_target_post_transition( + &mut self, + target: &TargetLabel, + global_cfg_options: &GlobalCfgOptions, + ) -> anyhow::Result { + let configured = self + .get_configured_target(target, global_cfg_options) + .await?; + let configured_target_node = self + .get_internal_configured_target_node(&configured) + .await? + .require_compatible()?; + Ok(configured_target_node.unwrap_forward().label().clone()) + } + async fn get_configured_provider_label( - &self, + &mut self, target: &ProvidersLabel, - global_target_platform: Option<&TargetLabel>, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result { let configured_target_label = CONFIGURED_TARGET_CALCULATION .get()? - .get_configured_target(self, target.target(), global_target_platform) + .get_configured_target(self, target.target(), global_cfg_options) .await?; Ok(ConfiguredProvidersLabel::new( configured_target_label, @@ -89,12 +113,12 @@ impl ConfiguredTargetCalculation for DiceComputations { } async fn get_default_configured_target( - &self, + &mut self, target: &TargetLabel, ) -> anyhow::Result { CONFIGURED_TARGET_CALCULATION .get()? - .get_configured_target(self, target, None) + .get_configured_target(self, target, &GlobalCfgOptions::default()) .await } } diff --git a/app/buck2_node/src/visibility.rs b/app/buck2_node/src/visibility.rs index aaedcbd3232eb..478bb213fbcaa 100644 --- a/app/buck2_node/src/visibility.rs +++ b/app/buck2_node/src/visibility.rs @@ -12,21 +12,21 @@ use std::fmt::Display; use std::fmt::Formatter; use allocative::Allocative; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_util::arc_str::ThinArcSlice; use dupe::Dupe; use gazebo::prelude::SliceExt; -use thiserror::Error; use crate::attrs::attr_type::any_matches::AnyMatches; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum VisibilityError { #[error( "`{0}` is not visible to `{1}` (run `buck2 uquery --output-attribute visibility {0}` to check the visibility)" )] + #[buck2(input, tag = Visibility)] NotVisibleTo(TargetLabel, TargetLabel), } @@ -42,7 +42,7 @@ impl VisibilityPattern { } #[derive(derive_more::Display)] -#[display(fmt = "\"{}\"", _0)] +#[display("\"{}\"", _0)] struct VisibilityPatternQuoted<'a>(&'a VisibilityPattern); #[derive(Debug, Eq, PartialEq, Hash, Clone, Dupe, Allocative)] diff --git a/app/buck2_node_tests/BUCK b/app/buck2_node_tests/BUCK index 2f1c5fe79e200..c78c6ffdd011a 100644 --- a/app/buck2_node_tests/BUCK +++ b/app/buck2_node_tests/BUCK @@ -1,18 +1,16 @@ -load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:rust_unittest.bzl", "rust_unittest") oncall("build_infra") -rust_library( +rust_unittest( name = "buck2_node_tests", srcs = glob(["src/**/*.rs"]), crate_root = "src/lib.rs", - test_deps = [ + deps = [ "fbsource//third-party/rust:anyhow", "//buck2/app/buck2_core:buck2_core", - "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", "//buck2/app/buck2_node:buck2_node", + "//buck2/app/buck2_util:buck2_util", "//buck2/gazebo/dupe:dupe", - "//buck2/starlark-rust/starlark:starlark", ], ) diff --git a/app/buck2_node_tests/Cargo.toml b/app/buck2_node_tests/Cargo.toml index 36b971970a8a9..6b43b1e10428f 100644 --- a/app/buck2_node_tests/Cargo.toml +++ b/app/buck2_node_tests/Cargo.toml @@ -1,15 +1,16 @@ [package] +description = "Tests for buck2_node" +edition = "2021" +license = { workspace = true } name = "buck2_node_tests" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Tests for buck2_node" [dev-dependencies] anyhow = { workspace = true } dupe = { workspace = true } -starlark = { workspace = true } buck2_core = { workspace = true } -buck2_interpreter_for_build = { workspace = true } buck2_node = { workspace = true } +buck2_util = { workspace = true } diff --git a/app/buck2_node_tests/src/attrs.rs b/app/buck2_node_tests/src/attrs.rs new file mode 100644 index 0000000000000..86eb2378b42a0 --- /dev/null +++ b/app/buck2_node_tests/src/attrs.rs @@ -0,0 +1,11 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod coerced_attr; +mod hacks; diff --git a/app/buck2_node_tests/src/attrs/coerced_attr.rs b/app/buck2_node_tests/src/attrs/coerced_attr.rs new file mode 100644 index 0000000000000..d677afe51d07c --- /dev/null +++ b/app/buck2_node_tests/src/attrs/coerced_attr.rs @@ -0,0 +1,217 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::BTreeMap; + +use buck2_core::configuration::config_setting::ConfigSettingData; +use buck2_core::configuration::constraints::ConstraintKey; +use buck2_core::configuration::constraints::ConstraintValue; +use buck2_node::attrs::attr_type::bool::BoolLiteral; +use buck2_node::attrs::attr_type::string::StringLiteral; +use buck2_node::attrs::coerced_attr::CoercedAttr; +use buck2_node::attrs::coerced_attr::CoercedSelector; +use buck2_node::attrs::fmt_context::AttrFmtContext; +use buck2_node::configuration::resolved::ConfigurationSettingKey; +use buck2_util::arc_str::ArcSlice; +use buck2_util::arc_str::ArcStr; +use dupe::Dupe; + +#[test] +fn selector_equals_accounts_for_ordering() { + let s1 = CoercedAttr::Selector(Box::new( + CoercedSelector::new( + ArcSlice::new([ + ( + ConfigurationSettingKey::testing_parse("cell1//pkg1:target1"), + CoercedAttr::Bool(BoolLiteral(true)), + ), + ( + ConfigurationSettingKey::testing_parse("cell2//pkg2:target2"), + CoercedAttr::Bool(BoolLiteral(false)), + ), + ]), + None, + ) + .unwrap(), + )); + let s2 = CoercedAttr::Selector(Box::new( + CoercedSelector::new( + ArcSlice::new([ + ( + ConfigurationSettingKey::testing_parse("cell1//pkg1:target1"), + CoercedAttr::Bool(BoolLiteral(true)), + ), + ( + ConfigurationSettingKey::testing_parse("cell2//pkg2:target2"), + CoercedAttr::Bool(BoolLiteral(false)), + ), + ]), + None, + ) + .unwrap(), + )); + + assert_eq!(s1 == s2, true); + + let s2 = CoercedAttr::Selector(Box::new( + CoercedSelector::new( + ArcSlice::new([ + ( + ConfigurationSettingKey::testing_parse("cell2//pkg2:target2"), + CoercedAttr::Bool(BoolLiteral(false)), + ), + ( + ConfigurationSettingKey::testing_parse("cell1//pkg1:target1"), + CoercedAttr::Bool(BoolLiteral(true)), + ), + ]), + None, + ) + .unwrap(), + )); + + assert_eq!(s1 == s2, false); +} + +#[test] +fn select_the_most_specific() { + let c_os = ConstraintKey::testing_new("config//c:os"); + let c_linux = ConstraintValue::testing_new("config//c:linux"); + let c_cpu = ConstraintKey::testing_new("config//c:cpu"); + let c_arm64 = ConstraintValue::testing_new("config//c:arm64"); + let c_x86_64 = ConstraintValue::testing_new("config//c:x86_64"); + + let linux = ConfigurationSettingKey::testing_parse("config//:linux"); + let linux_arm64 = ConfigurationSettingKey::testing_parse("config//:linux-arm64"); + let linux_x86_64 = ConfigurationSettingKey::testing_parse("config//:linux-x86_64"); + + let linux_data = + ConfigSettingData::testing_new(BTreeMap::from_iter([(c_os.dupe(), c_linux.dupe())])); + let linux_arm64_data = ConfigSettingData::testing_new(BTreeMap::from_iter([ + (c_os.dupe(), c_linux.dupe()), + (c_cpu.dupe(), c_arm64.dupe()), + ])); + let linux_x86_64_data = ConfigSettingData::testing_new(BTreeMap::from_iter([ + (c_os.dupe(), c_linux.dupe()), + (c_cpu.dupe(), c_x86_64.dupe()), + ])); + + let literal_true = CoercedAttr::Bool(BoolLiteral(true)); + let literal_str = CoercedAttr::String(StringLiteral(ArcStr::from("linux"))); + + // Test more specific is selected even if it is not first. + let select_entries = [ + (&linux, &linux_data, &literal_true), + (&linux_x86_64, &linux_x86_64_data, &literal_str), + ]; + assert_eq!( + Some(&literal_str), + CoercedAttr::select_the_most_specific(select_entries).unwrap() + ); + + // Test more specific is selected even if it is first. + let select_entries = [ + (&linux_x86_64, &linux_x86_64_data, &literal_str), + (&linux, &linux_data, &literal_true), + ]; + assert_eq!( + Some(&literal_str), + CoercedAttr::select_the_most_specific(select_entries).unwrap() + ); + + // Conflicting keys. + let select_entries = [ + (&linux_arm64, &linux_arm64_data, &literal_true), + (&linux_x86_64, &linux_x86_64_data, &literal_str), + ]; + assert_eq!( + "Both select keys `config//:linux-arm64` and `config//:linux-x86_64` \ + match the configuration, but neither is more specific", + CoercedAttr::select_the_most_specific(select_entries) + .unwrap_err() + .to_string() + ); +} + +#[test] // T177093673 +fn test_select_refines_bug() { + let c_windows = ( + ConstraintKey::testing_new("config//c:os"), + ConstraintValue::testing_new("config//c:windows"), + ); + let c_x86_64 = ( + ConstraintKey::testing_new("config//c:cpu"), + ConstraintValue::testing_new("config//c:x86_64"), + ); + + let windows = ConfigurationSettingKey::testing_parse("config//:windows"); + let x86_64 = ConfigurationSettingKey::testing_parse("config//:x86_64"); + let windows_x86_64 = ConfigurationSettingKey::testing_parse("config//:windows-x86_64"); + + let windows_data = ConfigSettingData::testing_new(BTreeMap::from_iter([c_windows.dupe()])); + let x86_64_data = ConfigSettingData::testing_new(BTreeMap::from_iter([c_x86_64.dupe()])); + let windows_x86_64_data = + ConfigSettingData::testing_new(BTreeMap::from_iter([c_windows, c_x86_64])); + + let value_windows = CoercedAttr::String(StringLiteral(ArcStr::from("windows"))); + let value_x86_64 = CoercedAttr::String(StringLiteral(ArcStr::from("x86_64"))); + let value_windows_x86_64 = CoercedAttr::String(StringLiteral(ArcStr::from("windows-x86_64"))); + let select_entries = [ + (&windows, &windows_data, &value_windows), + (&x86_64, &x86_64_data, &value_x86_64), + (&windows_x86_64, &windows_x86_64_data, &value_windows_x86_64), + ]; + + assert_eq!( + Some(&value_windows_x86_64), + CoercedAttr::select_the_most_specific(select_entries).unwrap() + ); +} + +#[test] +fn test_to_json_concat() { + assert_eq!( + r#"{"__type":"concat","items":["a","b","c","d"]}"#, + CoercedAttr::Concat(Box::new([ + CoercedAttr::String(StringLiteral(ArcStr::from("a"))), + CoercedAttr::String(StringLiteral(ArcStr::from("b"))), + CoercedAttr::String(StringLiteral(ArcStr::from("c"))), + CoercedAttr::String(StringLiteral(ArcStr::from("d"))), + ])) + .to_json(&AttrFmtContext::NO_CONTEXT) + .unwrap() + .to_string() + ); +} + +#[test] +fn test_to_json_selector() { + assert_eq!( + r#"{"__type":"selector","entries":{"DEFAULT":"ddd","config//:a":true,"config//:b":10}}"#, + CoercedAttr::Selector(Box::new( + CoercedSelector::new( + ArcSlice::new([ + ( + ConfigurationSettingKey::testing_parse("config//:a"), + CoercedAttr::Bool(BoolLiteral(true)) + ), + ( + ConfigurationSettingKey::testing_parse("config//:b"), + CoercedAttr::Int(10) + ), + ]), + Some(CoercedAttr::String(StringLiteral(ArcStr::from("ddd")))), + ) + .unwrap() + )) + .to_json(&AttrFmtContext::NO_CONTEXT) + .unwrap() + .to_string() + ); +} diff --git a/app/buck2_node_tests/src/attrs/hacks.rs b/app/buck2_node_tests/src/attrs/hacks.rs index e3054af67d226..90278029296d9 100644 --- a/app/buck2_node_tests/src/attrs/hacks.rs +++ b/app/buck2_node_tests/src/attrs/hacks.rs @@ -11,28 +11,17 @@ use buck2_core::cells::name::CellName; use buck2_core::cells::paths::CellRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::package::PackageLabel; -use buck2_interpreter_for_build::attrs::coerce::attr_type::AttrTypeExt; -use buck2_interpreter_for_build::attrs::coerce::ctx::BuildAttrCoercionContext; -use buck2_interpreter_for_build::interpreter::testing::cells; -use buck2_node::attrs::attr_type::AttrType; -use buck2_node::attrs::configurable::AttrIsConfigurable; +use buck2_node::attrs::attr_type::list::ListLiteral; +use buck2_node::attrs::attr_type::string::StringLiteral; +use buck2_node::attrs::coerced_attr::CoercedAttr; use buck2_node::attrs::hacks; +use buck2_util::arc_str::ArcSlice; +use buck2_util::arc_str::ArcStr; use dupe::Dupe; -use starlark::values::Heap; #[test] fn stringifies_correctly() -> anyhow::Result<()> { - let heap = Heap::new(); - let coercer_ctx = - BuildAttrCoercionContext::new_no_package(cells(None)?.1, cells(None)?.0.resolve_self()); - let coercer = AttrType::string(); - let coerced = coercer - .coerce( - AttrIsConfigurable::Yes, - &coercer_ctx, - heap.alloc("Hello, world!"), - ) - .unwrap(); + let coerced = CoercedAttr::String(StringLiteral(ArcStr::from("Hello, world!"))); let package = PackageLabel::new( CellName::testing_new("root"), @@ -44,11 +33,9 @@ fn stringifies_correctly() -> anyhow::Result<()> { hacks::value_to_string(&coerced, package.dupe())? ); - let list = AttrType::list(coercer).coerce( - AttrIsConfigurable::Yes, - &coercer_ctx, - heap.alloc(vec!["Hello, world!"]), - )?; + let list = CoercedAttr::List(ListLiteral(ArcSlice::new([CoercedAttr::String( + StringLiteral(ArcStr::from("Hello, world!")), + )]))); assert!(hacks::value_to_string(&list, package.dupe()).is_err()); Ok(()) } diff --git a/app/buck2_node_tests/src/attrs/mod.rs b/app/buck2_node_tests/src/attrs/mod.rs deleted file mode 100644 index 97fb3ebd43b4d..0000000000000 --- a/app/buck2_node_tests/src/attrs/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod hacks; diff --git a/app/buck2_node_tests/src/lib.rs b/app/buck2_node_tests/src/lib.rs index 60e903eca9586..2964a0fba04ee 100644 --- a/app/buck2_node_tests/src/lib.rs +++ b/app/buck2_node_tests/src/lib.rs @@ -7,6 +7,7 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![cfg(test)] mod attrs; diff --git a/app/buck2_offline_archive/BUCK b/app/buck2_offline_archive/BUCK index d6f2adc3ca091..ae72e7eef101b 100644 --- a/app/buck2_offline_archive/BUCK +++ b/app/buck2_offline_archive/BUCK @@ -5,12 +5,24 @@ oncall("build_infra") rust_library( name = "buck2_offline_archive", srcs = glob(["src/**/*.rs"]), - test_deps = [ - "fbsource//third-party/rust:tempfile", + test_os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:tempfile", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:tempfile", + ], + ), ], deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:serde", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_util:buck2_util", ], ) diff --git a/app/buck2_offline_archive/Cargo.toml b/app/buck2_offline_archive/Cargo.toml index 29c89078c98a4..43cea312ea240 100644 --- a/app/buck2_offline_archive/Cargo.toml +++ b/app/buck2_offline_archive/Cargo.toml @@ -1,7 +1,9 @@ [package] description = "Supporting code and utilities for manipulating offline build archives" edition = "2021" +license = { workspace = true } name = "buck2_offline_archive" +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -9,6 +11,7 @@ anyhow = { workspace = true } serde = { workspace = true } buck2_core = { workspace = true } +buck2_util = { workspace = true } -[dev-dependencies] +[target.'cfg(unix)'.dev-dependencies] tempfile = { workspace = true } diff --git a/app/buck2_offline_archive/src/lib.rs b/app/buck2_offline_archive/src/lib.rs index a35003978ac3d..956e669b7bfd1 100644 --- a/app/buck2_offline_archive/src/lib.rs +++ b/app/buck2_offline_archive/src/lib.rs @@ -7,12 +7,11 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + use std::ffi::OsStr; use std::fmt; use std::path::Path; -// Note: Using this because we don't need to propagate async in the offline -// archiver program. -use std::process::Command; use anyhow::Context; use buck2_core::fs::fs_util; @@ -20,6 +19,9 @@ use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::abs_path::AbsPathBuf; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +// Note: Using this because we don't need to propagate async in the offline +// archiver program +use buck2_util::process::background_command; #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct RelativeSymlink { @@ -131,7 +133,7 @@ where S: AsRef, P: AsRef, { - let result = Command::new("hg") + let result = background_command("hg") .args(args) .current_dir(path.as_ref()) .env("HGPLAIN", "1") @@ -141,7 +143,7 @@ where let out = String::from_utf8(result.stdout).context("hg stdout to string")?; let out = out.trim(); if out.is_empty() { - anyhow::bail!("expected to be run in hg repository"); + Err(anyhow::anyhow!("expected to be run in hg repository")) } else { Ok(out.to_owned()) } @@ -253,7 +255,7 @@ mod tests { // Canonicalize this because the canonicalize() call in `interior_links()` // resolves to /private/var/... on macOS. - let working_dir = fs_util::canonicalize(tree.path())?; + let working_dir = AbsNormPathBuf::new(tree.path().canonicalize()?)?; let external_link = ExternalSymlink { link: ProjectRelativePathBuf::unchecked_new("unused".to_owned()), @@ -290,7 +292,7 @@ mod tests { // Canonicalize this because the canonicalize() call in `interior_links()` // resolves to /private/var/... on macOS. - let working_dir = fs_util::canonicalize(tree.path())?; + let working_dir = AbsNormPathBuf::new(tree.path().canonicalize()?)?; let external_link = ExternalSymlink { link: ProjectRelativePathBuf::unchecked_new("unused".to_owned()), diff --git a/app/buck2_profile/BUCK b/app/buck2_profile/BUCK index a2fd2f52e1f5e..45f3c43cb9cd2 100644 --- a/app/buck2_profile/BUCK +++ b/app/buck2_profile/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/app/buck2_profile/Cargo.toml b/app/buck2_profile/Cargo.toml index a6e88b09af617..dde2987a9dbbe 100644 --- a/app/buck2_profile/Cargo.toml +++ b/app/buck2_profile/Cargo.toml @@ -1,7 +1,9 @@ [package] description = "common utitilies of buck2 profile" edition = "2021" +license = { workspace = true } name = "buck2_profile" +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -10,6 +12,6 @@ inferno = { workspace = true } starlark = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_core = { workspace = true } buck2_interpreter = { workspace = true } -buck2_cli_proto = { workspace = true } diff --git a/app/buck2_profile/src/lib.rs b/app/buck2_profile/src/lib.rs index c84ec02291d85..9292f9186faec 100644 --- a/app/buck2_profile/src/lib.rs +++ b/app/buck2_profile/src/lib.rs @@ -7,34 +7,47 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + use std::sync::Arc; use anyhow::Context; use buck2_cli_proto::profile_request::ProfileOpts; -use buck2_cli_proto::profile_request::Profiler; +use buck2_cli_proto::HasClientContext; use buck2_core::fs::fs_util; +use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_path::AbsPath; -use buck2_interpreter::dice::starlark_profiler::StarlarkProfilerConfiguration; -use buck2_interpreter::starlark_profiler::StarlarkProfileDataAndStats; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::pattern::unparsed::UnparsedPatternPredicate; +use buck2_core::pattern::unparsed::UnparsedPatterns; +use buck2_interpreter::starlark_profiler::config::StarlarkProfilerConfiguration; +use buck2_interpreter::starlark_profiler::data::StarlarkProfileDataAndStats; use starlark::eval::ProfileMode; +use starlark::StarlarkResultExt; + +pub fn proto_to_profile_mode(proto: buck2_cli_proto::ProfileMode) -> ProfileMode { + match proto { + buck2_cli_proto::ProfileMode::HeapFlameAllocated => ProfileMode::HeapFlameAllocated, + buck2_cli_proto::ProfileMode::HeapFlameRetained => ProfileMode::HeapFlameRetained, + buck2_cli_proto::ProfileMode::HeapSummaryAllocated => ProfileMode::HeapSummaryAllocated, + buck2_cli_proto::ProfileMode::HeapSummaryRetained => ProfileMode::HeapSummaryRetained, + buck2_cli_proto::ProfileMode::TimeFlame => ProfileMode::TimeFlame, + buck2_cli_proto::ProfileMode::Statement => ProfileMode::Statement, + buck2_cli_proto::ProfileMode::Bytecode => ProfileMode::Bytecode, + buck2_cli_proto::ProfileMode::BytecodePairs => ProfileMode::BytecodePairs, + buck2_cli_proto::ProfileMode::Typecheck => ProfileMode::Typecheck, + buck2_cli_proto::ProfileMode::Coverage => ProfileMode::Coverage, + } +} pub fn starlark_profiler_configuration_from_request( req: &buck2_cli_proto::ProfileRequest, + project_root: &ProjectRoot, ) -> anyhow::Result { - let profiler_proto = buck2_cli_proto::profile_request::Profiler::from_i32(req.profiler) - .context("Invalid profiler")?; + let profiler_proto = + buck2_cli_proto::ProfileMode::from_i32(req.profile_mode).context("Invalid profiler")?; - let profile_mode = match profiler_proto { - Profiler::HeapFlameAllocated => ProfileMode::HeapFlameAllocated, - Profiler::HeapFlameRetained => ProfileMode::HeapFlameRetained, - Profiler::HeapSummaryAllocated => ProfileMode::HeapSummaryAllocated, - Profiler::HeapSummaryRetained => ProfileMode::HeapSummaryRetained, - Profiler::TimeFlame => ProfileMode::TimeFlame, - Profiler::Statement => ProfileMode::Statement, - Profiler::Bytecode => ProfileMode::Bytecode, - Profiler::BytecodePairs => ProfileMode::BytecodePairs, - Profiler::Typecheck => ProfileMode::Typecheck, - }; + let profile_mode = proto_to_profile_mode(profiler_proto); match req.profile_opts.as_ref().expect("Missing profile opts") { ProfileOpts::TargetProfile(opts) => { @@ -42,7 +55,15 @@ pub fn starlark_profiler_configuration_from_request( .context("Invalid action")?; Ok(match (action, opts.recursive) { (buck2_cli_proto::target_profile::Action::Loading, false) => { - StarlarkProfilerConfiguration::ProfileLastLoading(profile_mode) + let working_dir = AbsNormPath::new(&req.client_context()?.working_dir)?; + let working_dir = project_root.relativize(working_dir)?; + StarlarkProfilerConfiguration::ProfileLoading( + profile_mode, + UnparsedPatternPredicate::AnyOf(UnparsedPatterns::new( + opts.target_patterns.clone(), + working_dir.to_buf(), + )), + ) } (buck2_cli_proto::target_profile::Action::Loading, true) => { return Err(anyhow::anyhow!( @@ -50,10 +71,21 @@ pub fn starlark_profiler_configuration_from_request( )); } (buck2_cli_proto::target_profile::Action::Analysis, false) => { - StarlarkProfilerConfiguration::ProfileLastAnalysis(profile_mode) + let working_dir = AbsNormPath::new(&req.client_context()?.working_dir)?; + let working_dir = project_root.relativize(working_dir)?; + StarlarkProfilerConfiguration::ProfileAnalysis( + profile_mode, + UnparsedPatternPredicate::AnyOf(UnparsedPatterns::new( + opts.target_patterns.clone(), + working_dir.to_buf(), + )), + ) } (buck2_cli_proto::target_profile::Action::Analysis, true) => { - StarlarkProfilerConfiguration::ProfileAnalysisRecursively(profile_mode) + StarlarkProfilerConfiguration::ProfileAnalysis( + profile_mode, + UnparsedPatternPredicate::Any, + ) } }) } @@ -61,17 +93,28 @@ pub fn starlark_profiler_configuration_from_request( } } -pub fn get_profile_response( - profile_data: Arc, - req: &buck2_cli_proto::ProfileRequest, +#[allow(clippy::format_collect)] +pub fn write_starlark_profile( + profile_data: &StarlarkProfileDataAndStats, output: &AbsPath, -) -> anyhow::Result { - let command_profile_mode = buck2_cli_proto::profile_request::Profiler::from_i32(req.profiler) - .context("Invalid profiler")?; +) -> anyhow::Result<()> { + fs_util::create_dir_if_not_exists(output)?; - match command_profile_mode { - Profiler::HeapFlameAllocated | Profiler::HeapFlameRetained | Profiler::TimeFlame => { - let mut profile = profile_data.profile_data.gen()?; + fs_util::write( + output.join("targets.txt"), + profile_data + .targets + .iter() + .map(|t| format!("{t}\n")) + .collect::(), + ) + .context("Failed to write targets")?; + + match profile_data.profile_data.profile_mode() { + ProfileMode::HeapFlameAllocated + | ProfileMode::HeapFlameRetained + | ProfileMode::TimeFlame => { + let mut profile = profile_data.profile_data.gen().into_anyhow_result()?; if profile.is_empty() { // inferno does not like empty flamegraphs. profile = "empty 1\n".to_owned(); @@ -84,17 +127,24 @@ pub fn get_profile_response( ) .context("writing SVG from profile data")?; - fs_util::create_dir_if_not_exists(output)?; - fs_util::write(output.join("flame.src"), &profile) .context("Failed to write profile")?; fs_util::write(output.join("flame.svg"), &svg).context("Failed to write profile")?; } _ => { - let profile = profile_data.profile_data.gen()?; - fs_util::write(output, profile).context("Failed to write profile")?; + let profile = profile_data.profile_data.gen().into_anyhow_result()?; + fs_util::write(output.join("profile.txt"), profile) + .context("Failed to write profile")?; } }; + Ok(()) +} + +pub fn get_profile_response( + profile_data: Arc, + output: &AbsPath, +) -> anyhow::Result { + write_starlark_profile(profile_data.as_ref(), output)?; Ok(buck2_cli_proto::ProfileResponse { elapsed: Some(profile_data.elapsed().try_into()?), diff --git a/app/buck2_protoc_dev/BUCK b/app/buck2_protoc_dev/BUCK index 3a88a2aeb46de..1cc6f30748f27 100644 --- a/app/buck2_protoc_dev/BUCK +++ b/app/buck2_protoc_dev/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/app/buck2_protoc_dev/Cargo.toml b/app/buck2_protoc_dev/Cargo.toml index 97c5714210371..0ec61d83e1478 100644 --- a/app/buck2_protoc_dev/Cargo.toml +++ b/app/buck2_protoc_dev/Cargo.toml @@ -1,9 +1,14 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_protoc_dev" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] -tonic-build = { workspace = true } dunce = { workspace = true } protoc-bin-vendored = { workspace = true } +tonic-build = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(buck2_build)"] } diff --git a/app/buck2_protoc_dev/src/lib.rs b/app/buck2_protoc_dev/src/lib.rs index 41e79ede70bed..a6ea6e07f00eb 100644 --- a/app/buck2_protoc_dev/src/lib.rs +++ b/app/buck2_protoc_dev/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + use std::env; use std::ffi::OsString; use std::io; diff --git a/app/buck2_query/BUCK b/app/buck2_query/BUCK index 829ea821ca7db..03e3bb6012158 100644 --- a/app/buck2_query/BUCK +++ b/app/buck2_query/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -17,11 +16,10 @@ rust_library( "fbsource//third-party/rust:indoc", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:ref-cast", - "fbsource//third-party/rust:serde", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_query_derive:buck2_query_derive", "//buck2/app/buck2_query_parser:buck2_query_parser", "//buck2/app/buck2_util:buck2_util", diff --git a/app/buck2_query/Cargo.toml b/app/buck2_query/Cargo.toml index 0bcdd4544c74c..e79cc66188300 100644 --- a/app/buck2_query/Cargo.toml +++ b/app/buck2_query/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Buck query generic implementation" +edition = "2021" +license = { workspace = true } name = "buck2_query" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Buck query generic implementation" [dependencies] anyhow = { workspace = true } @@ -14,26 +16,19 @@ futures = { workspace = true } indexmap = { workspace = true } indoc = { workspace = true } itertools = { workspace = true } -serde = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true } ref-cast = { workspace = true } +tokio = { workspace = true } allocative = { workspace = true } display_container = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" +gazebo = { workspace = true } starlark_map = { workspace = true } buck2_core = { workspace = true } -buck2_query_parser = { workspace = true } +buck2_error = { workspace = true } buck2_query_derive = { workspace = true } +buck2_query_parser = { workspace = true } buck2_util = { workspace = true } [lib] - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_query/src/lib.rs b/app/buck2_query/src/lib.rs index d6efab6583537..8a6c186e0d8cd 100644 --- a/app/buck2_query/src/lib.rs +++ b/app/buck2_query/src/lib.rs @@ -7,13 +7,9 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(box_patterns)] #![feature(try_blocks)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] pub mod query; diff --git a/app/buck2_query/src/query.rs b/app/buck2_query/src/query.rs new file mode 100644 index 0000000000000..68ff672d82b34 --- /dev/null +++ b/app/buck2_query/src/query.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod buck_types; +pub mod environment; +pub mod graph; +pub mod syntax; +pub mod traversal; diff --git a/app/buck2_query/src/query/buck_types.rs b/app/buck2_query/src/query/buck_types.rs index ee158cf61e621..0e9102697b92f 100644 --- a/app/buck2_query/src/query/buck_types.rs +++ b/app/buck2_query/src/query/buck_types.rs @@ -8,27 +8,10 @@ */ use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; -use crate::query::environment::ConfiguredOrUnconfiguredTargetLabel; -use crate::query::environment::NodeLabel; +use crate::query::graph::node::NodeKey; -impl ConfiguredOrUnconfiguredTargetLabel for TargetLabel { - fn unconfigured_label(&self) -> &TargetLabel { - self - } -} +impl NodeKey for TargetLabel {} -impl NodeLabel for TargetLabel {} - -impl ConfiguredOrUnconfiguredTargetLabel for ConfiguredTargetLabel { - fn unconfigured_label(&self) -> &TargetLabel { - self.unconfigured() - } -} - -impl NodeLabel for ConfiguredTargetLabel { - fn label_for_filter(&self) -> String { - return self.unconfigured().to_string(); - } -} +impl NodeKey for ConfiguredTargetLabel {} diff --git a/app/buck2_query/src/query/environment.rs b/app/buck2_query/src/query/environment.rs new file mode 100644 index 0000000000000..2712fb00edd2b --- /dev/null +++ b/app/buck2_query/src/query/environment.rs @@ -0,0 +1,461 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; +use std::fmt::Debug; +use std::iter; + +use anyhow::Context; +use async_trait::async_trait; +use buck2_core::build_file_path::BuildFilePath; +use buck2_core::cells::cell_path::CellPath; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_core::package::PackageLabel; +use dupe::Dupe; +use dupe::OptionDupedExt; +use futures::stream::FuturesUnordered; +use futures::stream::TryStreamExt; + +use crate::query::graph::async_bfs::async_bfs_find_path; +use crate::query::graph::graph::Graph; +use crate::query::graph::node::LabeledNode; +use crate::query::graph::node::NodeKey; +use crate::query::graph::successors::AsyncChildVisitor; +use crate::query::graph::successors::GraphSuccessors; +use crate::query::syntax::simple::eval::error::QueryError; +use crate::query::syntax::simple::eval::file_set::FileSet; +use crate::query::syntax::simple::eval::set::TargetSet; +use crate::query::traversal::AsyncNodeLookup; +use crate::query::traversal::ChildVisitor; +mod tests; + +#[derive(buck2_error::Error, Debug)] +pub enum QueryEnvironmentError { + #[error("Missing target `{}`. Targets in the package: <{}>", .0, .1.join(", "))] + MissingTargetError(String, Vec), + #[error("Expected package `{0}` to be available in traversal.")] + TraversalMissingPackage(PackageLabel), +} + +impl QueryEnvironmentError { + pub fn missing_target, Iter: IntoIterator>( + target: &T, + package_targets: Iter, + ) -> Self { + let existing = package_targets + .into_iter() + .map(|e| format!("`{}`", e.as_ref())) + .collect(); + Self::MissingTargetError(target.to_string(), existing) + } +} + +pub struct QueryTargets {} + +impl QueryTargets { + /// Used to process all the attrs of a node (both the normal rule attrs and the "special" attrs). Applies + /// a function to the attrs instead of returning an iterator as some of them are owned and some are refs + /// into the node. + pub fn for_all_attrs) -> Result<(), E>>( + target: &T, + mut func: F, + ) -> Result<(), E> { + target.special_attrs_for_each(&mut func)?; + target.attrs_for_each(&mut func)?; + Ok(()) + } +} + +#[derive(Default, Clone, Dupe)] +pub struct AttrFmtOptions { + pub exclude_quotes: bool, +} + +pub trait QueryTarget: LabeledNode + Dupe + Send + Sync + 'static { + type Attr<'a>: ?Sized + Debug + 'a; + + /// `filter()` function uses this. + fn label_for_filter(&self) -> String { + self.node_key().to_string() + } + + /// Returns the input files for this node. + fn inputs_for_each Result<(), E>>(&self, func: F) -> Result<(), E>; + + fn rule_type(&self) -> Cow; + + fn name(&self) -> Cow; + + /// Return the path to the buildfile that defines this target, e.g. `fbcode//foo/bar/TARGETS` + fn buildfile_path(&self) -> &BuildFilePath; + + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a; + + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a; + + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a; + + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a; + + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a; + + fn tests<'a>(&'a self) -> Option + Send + 'a> { + None::> + } + + fn attr_any_matches( + attr: &Self::Attr<'_>, + filter: &dyn Fn(&str) -> anyhow::Result, + ) -> anyhow::Result; + + fn special_attrs_for_each) -> Result<(), E>>( + &self, + func: F, + ) -> Result<(), E>; + + fn attrs_for_each) -> Result<(), E>>( + &self, + func: F, + ) -> Result<(), E>; + + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + func: F, + ) -> Result<(), E>; + + fn map_attr>) -> R>(&self, key: &str, func: F) -> R; +} + +#[async_trait] +pub trait TraversalFilter: Send + Sync { + /// Returns a the children that pass this filter. + async fn get_children(&self, target: &T) -> anyhow::Result>; +} + +/// The environment of a Buck query that can evaluate queries to produce a +/// result. +#[async_trait] +pub trait QueryEnvironment: Send + Sync { + type Target: QueryTarget; + + async fn get_node( + &self, + node_ref: &::Key, + ) -> anyhow::Result; + + async fn get_node_for_default_configured_target( + &self, + node_ref: &::Key, + ) -> anyhow::Result>; + + /// Evaluates a literal target pattern. See buck2_common::pattern + async fn eval_literals(&self, literal: &[&str]) -> anyhow::Result>; + + /// Evaluates a file literal + async fn eval_file_literal(&self, literal: &str) -> anyhow::Result; + + /// Performs a depth first traversal, with a post-order callback. The + /// delegate defines the traversal and receives the callback. + async fn dfs_postorder( + &self, + root: &TargetSet, + successors: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, + ) -> anyhow::Result<()>; + + async fn depth_limited_traversal( + &self, + root: &TargetSet, + successors: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, + depth: u32, + ) -> anyhow::Result<()>; + + async fn allpaths( + &self, + from: &TargetSet, + to: &TargetSet, + filter: Option<&dyn TraversalFilter>, + ) -> anyhow::Result> { + self.rdeps(from, to, None, filter).await + } + + async fn somepath( + &self, + from: &TargetSet, + to: &TargetSet, + filter: Option<&dyn TraversalFilter>, + ) -> anyhow::Result> { + let path = async_bfs_find_path( + from.iter(), + QueryEnvironmentAsNodeLookup { env: self }, + QueryTargetFilteredDepsSuccesors { filter }, + |t| to.get(t).duped(), + ) + .await? + .unwrap_or_default(); + + let target_set = TargetSet::from_iter(path); + Ok(target_set) + } + + async fn allbuildfiles(&self, _universe: &TargetSet) -> anyhow::Result { + Err(anyhow::anyhow!(QueryError::FunctionUnimplemented( + "allbuildfiles() is implemented only for uquery and cquery.", + ))) + } + + async fn rbuildfiles(&self, _universe: &FileSet, _argset: &FileSet) -> anyhow::Result { + Err(anyhow::anyhow!(QueryError::FunctionUnimplemented( + "rbuildfiles() is implemented only for uquery and cquery." + ))) + } + + async fn rdeps( + &self, + universe: &TargetSet, + from: &TargetSet, + depth: Option, + filter: Option<&dyn TraversalFilter>, + ) -> anyhow::Result> { + let graph = Graph::build_stable_dfs( + &QueryEnvironmentAsNodeLookup { env: self }, + universe.iter().map(|n| n.node_key().clone()), + QueryTargetFilteredDepsSuccesors { filter }, + ) + .await?; + + let graph = graph.reverse(); + + let mut rdeps = TargetSet::new(); + + let mut visit = |target| { + rdeps.insert_unique_unchecked(target); + Ok(()) + }; + + let roots_in_universe = from.filter(|t| Ok(graph.get(t.node_key()).is_some()))?; + + match depth { + // For unbounded traversals, buck1 recommends specifying a large value. We'll accept either a negative (like -1) or + // a large value as unbounded. We can't just call it optional because args are positional only in the query syntax + // and so to specify a filter you need to specify a depth. + Some(v) if (0..1_000_000_000).contains(&v) => { + let graph = graph.take_max_depth( + roots_in_universe.iter().map(|t| t.node_key().clone()), + v as u32, + ); + graph.depth_first_postorder_traversal( + roots_in_universe.iter().map(|t| t.node_key().clone()), + |t| visit(t.clone()), + )?; + } + _ => { + graph.depth_first_postorder_traversal( + roots_in_universe.iter().map(|t| t.node_key().clone()), + |t| visit(t.clone()), + )?; + } + } + + Ok(rdeps) + } + + async fn testsof( + &self, + targets: &TargetSet, + ) -> anyhow::Result> { + let target_tests = targets + .iter() + .map(|target| { + let tests = target + .tests() + .ok_or(QueryError::FunctionUnimplemented("testsof"))?; + + anyhow::Ok((target, tests)) + }) + .collect::, _>>()?; + + let mut futs = target_tests + .into_iter() + .flat_map(|(target, tests)| { + tests.into_iter().map(move |test| async move { + let test = self.get_node(&test).await.with_context(|| { + format!( + "Error getting test of target {}", + LabeledNode::node_key(target), + ) + })?; + anyhow::Ok(test) + }) + }) + .collect::>(); + + let mut ret = TargetSet::new(); + while let Some(test) = futs.try_next().await? { + ret.insert(test); + } + + Ok(ret) + } + + async fn testsof_with_default_target_platform( + &self, + targets: &TargetSet, + ) -> anyhow::Result>> { + let target_tests = targets + .iter() + .map(|target| { + let tests = target + .tests() + .ok_or(QueryError::FunctionUnimplemented("testsof"))?; + + anyhow::Ok((target, tests)) + }) + .collect::, _>>()?; + + let mut futs = target_tests + .into_iter() + .flat_map(|(target, tests)| { + tests.into_iter().map(move |test| async move { + let test = self + .get_node_for_default_configured_target(&test) + .await + .with_context(|| { + format!( + "Error getting test of target {}", + LabeledNode::node_key(target), + ) + })?; + anyhow::Ok(test) + }) + }) + .collect::>(); + + let mut ret = Vec::new(); + while let Some(test) = futs.try_next().await? { + ret.push(test); + } + + Ok(ret) + } + + async fn deps( + &self, + targets: &TargetSet, + depth: Option, + filter: Option<&dyn TraversalFilter>, + ) -> anyhow::Result> { + deps(self, targets, depth, filter).await + } + + async fn owner(&self, _paths: &FileSet) -> anyhow::Result>; + + async fn targets_in_buildfile( + &self, + paths: &FileSet, + ) -> anyhow::Result>; +} + +pub async fn deps( + env: &Env, + targets: &TargetSet, + depth: Option, + filter: Option<&dyn TraversalFilter>, +) -> anyhow::Result> { + let mut deps = TargetSet::new(); + + let visitor = QueryTargetFilteredDepsSuccesors { filter }; + let visit = |target| { + deps.insert_unique_unchecked(target); + Ok(()) + }; + + match depth { + // For unbounded traversals, buck1 recommends specifying a large value. We'll accept either a negative (like -1) or + // a large value as unbounded. We can't just call it optional because args are positional only in the query syntax + // and so to specify a filter you need to specify a depth. + Some(v) if (0..1_000_000_000).contains(&v) => { + env.depth_limited_traversal(targets, visitor, visit, v as u32) + .await?; + } + _ => { + env.dfs_postorder(targets, visitor, visit).await?; + } + } + + Ok(deps) +} + +pub struct QueryTargetDepsSuccessors; + +impl AsyncChildVisitor for QueryTargetDepsSuccessors { + async fn for_each_child( + &self, + node: &T, + mut children: impl ChildVisitor, + ) -> anyhow::Result<()> { + for dep in node.deps() { + children.visit(dep)?; + } + Ok(()) + } +} + +impl GraphSuccessors for QueryTargetDepsSuccessors +where + T: QueryTarget, +{ + fn for_each_successor(&self, node: &T, mut cb: impl FnMut(&T)) { + for dep in node.deps() { + cb(dep); + } + } +} + +pub struct QueryEnvironmentAsNodeLookup<'q, Q: QueryEnvironment + ?Sized> { + pub env: &'q Q, +} + +#[async_trait] +impl<'q, Q: QueryEnvironment + ?Sized> AsyncNodeLookup + for QueryEnvironmentAsNodeLookup<'q, Q> +{ + async fn get(&self, label: &::Key) -> anyhow::Result { + self.env.get_node(label).await + } +} + +pub struct QueryTargetFilteredDepsSuccesors<'a, Q: QueryTarget> { + filter: Option<&'a dyn TraversalFilter>, +} + +impl<'a, Q: QueryTarget> AsyncChildVisitor for QueryTargetFilteredDepsSuccesors<'a, Q> { + async fn for_each_child( + &self, + target: &Q, + mut func: impl ChildVisitor, + ) -> anyhow::Result<()> { + let res: anyhow::Result<_> = try { + match self.filter { + Some(filter) => { + for dep in filter.get_children(target).await?.iter() { + func.visit(dep.node_key())?; + } + } + None => { + for dep in target.deps() { + func.visit(dep)?; + } + } + } + }; + res.with_context(|| format!("Error traversing children of `{}`", target.node_key())) + } +} diff --git a/app/buck2_query/src/query/environment/mod.rs b/app/buck2_query/src/query/environment/mod.rs deleted file mode 100644 index 145faaeea8ed8..0000000000000 --- a/app/buck2_query/src/query/environment/mod.rs +++ /dev/null @@ -1,530 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::borrow::Cow; -use std::collections::HashMap; -use std::fmt::Debug; -use std::fmt::Display; -use std::hash::Hash; - -use anyhow::Context; -use async_trait::async_trait; -use buck2_core::build_file_path::BuildFilePath; -use buck2_core::cells::cell_path::CellPath; -use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_core::package::PackageLabel; -use buck2_core::target::label::TargetLabel; -use dupe::Dupe; -use futures::stream::FuturesUnordered; -use futures::stream::TryStreamExt; -use starlark_map::ordered_set::OrderedSet; -use thiserror::Error; - -use crate::query::syntax::simple::eval::error::QueryError; -use crate::query::syntax::simple::eval::file_set::FileSet; -use crate::query::syntax::simple::eval::set::TargetSet; -use crate::query::syntax::simple::eval::set::TargetSetExt; -use crate::query::traversal::AsyncTraversalDelegate; -use crate::query::traversal::ChildVisitor; -mod tests; - -#[derive(Error, Debug)] -pub enum QueryEnvironmentError { - #[error("Missing target `{}`. Targets in the package: <{}>", .0, .1.join(", "))] - MissingTargetError(String, Vec), - #[error("Expected package `{0}` to be available in traversal.")] - TraversalMissingPackage(PackageLabel), -} - -impl QueryEnvironmentError { - pub fn missing_target, Iter: IntoIterator>( - target: &T, - package_targets: Iter, - ) -> Self { - let existing = package_targets - .into_iter() - .map(|e| format!("`{}`", e.as_ref())) - .collect(); - Self::MissingTargetError(target.to_string(), existing) - } -} - -pub trait NodeLabel: Clone + Hash + PartialEq + Eq + Debug + Display + Send + Sync { - /// `filter()` function will use this. - fn label_for_filter(&self) -> String { - self.to_string() - } -} - -pub trait ConfiguredOrUnconfiguredTargetLabel: NodeLabel { - fn unconfigured_label(&self) -> &TargetLabel; -} - -pub trait LabeledNode: Dupe + Send + Sync + 'static { - type NodeRef: NodeLabel; - - fn node_ref(&self) -> &Self::NodeRef; -} - -pub struct QueryTargets {} - -impl QueryTargets { - /// Used to process all the attrs of a node (both the normal rule attrs and the "special" attrs). Applies - /// a function to the attrs instead of returning an iterator as some of them are owned and some are refs - /// into the node. - pub fn for_all_attrs) -> Result<(), E>>( - target: &T, - mut func: F, - ) -> Result<(), E> { - target.special_attrs_for_each(&mut func)?; - target.attrs_for_each(&mut func)?; - Ok(()) - } -} - -pub trait QueryTarget: LabeledNode + Dupe + Send + Sync + 'static { - type Attr<'a>: ?Sized + Debug + 'a; - - /// Returns the input files for this node. - fn inputs_for_each Result<(), E>>(&self, func: F) -> Result<(), E>; - - fn rule_type(&self) -> Cow; - - /// Return the path to the buildfile that defines this target, e.g. `fbcode//foo/bar/TARGETS` - fn buildfile_path(&self) -> &BuildFilePath; - - // TODO(cjhopman): Use existential traits to remove the Box<> once they are stabilized. - fn deps<'a>(&'a self) -> Box + Send + 'a>; - - // TODO(cjhopman): Use existential traits to remove the Box<> once they are stabilized. - fn exec_deps<'a>(&'a self) -> Box + Send + 'a>; - - // TODO(cjhopman): Use existential traits to remove the Box<> once they are stabilized. - fn target_deps<'a>(&'a self) -> Box + Send + 'a>; - - fn tests<'a>(&'a self) -> Option + Send + 'a>> { - None - } - - fn attr_to_string_alternate(&self, attr: &Self::Attr<'_>) -> String; - - fn attr_serialize( - &self, - attr: &Self::Attr<'_>, - serializer: S, - ) -> Result; - - fn attr_any_matches( - attr: &Self::Attr<'_>, - filter: &dyn Fn(&str) -> anyhow::Result, - ) -> anyhow::Result; - - fn special_attrs_for_each) -> Result<(), E>>( - &self, - func: F, - ) -> Result<(), E>; - - fn attrs_for_each) -> Result<(), E>>( - &self, - func: F, - ) -> Result<(), E>; - - fn map_attr>) -> R>(&self, key: &str, func: F) -> R; - - fn call_stack(&self) -> Option; -} - -#[async_trait] -pub trait TraversalFilter: Send + Sync { - /// Returns a the children that pass this filter. - async fn get_children(&self, target: &T) -> anyhow::Result>; -} - -/// The environment of a Buck query that can evaluate queries to produce a -/// result. -#[async_trait] -pub trait QueryEnvironment: Send + Sync { - type Target: QueryTarget; - - async fn get_node( - &self, - node_ref: &::NodeRef, - ) -> anyhow::Result; - - async fn get_node_for_default_configured_target( - &self, - node_ref: &::NodeRef, - ) -> anyhow::Result>; - - /// Evaluates a literal target pattern. See buck2_common::pattern - async fn eval_literals(&self, literal: &[&str]) -> anyhow::Result>; - - /// Evaluates a file literal - async fn eval_file_literal(&self, literal: &str) -> anyhow::Result; - - /// Performs a depth first traversal, with a post-order callback. The - /// delegate defines the traversal and receives the callback. - async fn dfs_postorder( - &self, - root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, - ) -> anyhow::Result<()>; - - async fn depth_limited_traversal( - &self, - root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, - depth: u32, - ) -> anyhow::Result<()>; - - async fn allpaths( - &self, - from: &TargetSet, - to: &TargetSet, - ) -> anyhow::Result> { - self.rdeps(from, to, None).await - } - - async fn somepath( - &self, - from: &TargetSet, - to: &TargetSet, - ) -> anyhow::Result> { - struct Delegate<'a, Q: QueryTarget> { - to: &'a TargetSet, - /// Contains targets that were reached starting from `from` that have a path to `to`. - path: TargetSet, - } - - #[async_trait] - impl<'a, Q: QueryTarget> AsyncTraversalDelegate for Delegate<'a, Q> { - fn visit(&mut self, target: Q) -> anyhow::Result<()> { - // NOTE: It would be better to just only post-order visit our parents, but that is - // not possible because we push *all* children when visiting a node, so we will not - // just post-visit all parents when we interrupt the search. - // NOTE: We assert! around the insertions below because we know each node should - // only be post-visited once but since we rely on `last()`, it matters so we check - // it. - - if let Some(head) = self.path.last() { - if target.deps().any(|t| t == head.node_ref()) { - assert!(self.path.insert(target)); - } - return Ok(()); - } - - if self.to.contains(target.node_ref()) { - assert!(self.path.insert(target)); - } - - Ok(()) - } - - async fn for_each_child( - &mut self, - target: &Q, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - // Stop adding more children if we are putting a path back together. - if !self.path.is_empty() || self.to.contains(target.node_ref()) { - return Ok(()); - } - let res: anyhow::Result<_> = try { - for dep in target.deps() { - func.visit(dep.clone())?; - } - }; - res.with_context(|| format!("Error traversing children of `{}`", target.node_ref())) - } - } - - let mut delegate = Delegate { - path: TargetSet::new(), - to, - }; - self.dfs_postorder(from, &mut delegate).await?; - Ok(delegate.path) - } - - async fn allbuildfiles(&self, _universe: &TargetSet) -> anyhow::Result { - Err(anyhow::anyhow!(QueryError::FunctionUnimplemented( - "allbuildfiles() is implemented only for uquery and cquery.", - ))) - } - - async fn rbuildfiles(&self, _universe: &FileSet, _argset: &FileSet) -> anyhow::Result { - Err(anyhow::anyhow!(QueryError::FunctionUnimplemented( - "rbuildfiles() is implemented only for uquery and cquery." - ))) - } - - async fn rdeps( - &self, - universe: &TargetSet, - from: &TargetSet, - depth: Option, - ) -> anyhow::Result> { - // First, we map all deps to their rdeps (parents). - // This effectively allows traversing the graph later, in reverse (following dependency back-edges). - struct ParentsCollectorDelegate { - parents: HashMap>, - // Keep track of nodes in-universe so that, if any rdeps are collected out-of-universe, - // we don't return them. - nodes_in_universe: TargetSet, - } - - #[async_trait] - impl AsyncTraversalDelegate for ParentsCollectorDelegate { - fn visit(&mut self, target: Q) -> anyhow::Result<()> { - self.nodes_in_universe.insert(target); - Ok(()) - } - - async fn for_each_child( - &mut self, - target: &Q, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - for dep in target.deps() { - func.visit(dep.clone()).with_context(|| { - format!("Error traversing children of `{}`", target.node_ref()) - })?; - self.parents - .entry(dep.clone()) - .or_default() - .insert(target.node_ref().clone()); - } - Ok(()) - } - } - - let mut parents_collector_delegate = ParentsCollectorDelegate { - parents: HashMap::new(), - nodes_in_universe: TargetSet::new(), - }; - - self.dfs_postorder(universe, &mut parents_collector_delegate) - .await?; - - // Now that we have a mapping of back-edges, traverse deps graph in reverse. - struct ReverseDelegate { - rdeps: TargetSet, - parents: HashMap>, - } - - #[async_trait] - impl AsyncTraversalDelegate for ReverseDelegate { - fn visit(&mut self, target: Q) -> anyhow::Result<()> { - self.rdeps.insert(target); - Ok(()) - } - - async fn for_each_child( - &mut self, - target: &Q, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - if let Some(parents) = self.parents.get(target.node_ref()) { - for parent in parents { - func.visit(parent.clone()).with_context(|| { - format!("Error traversing parents of `{}`", target.node_ref()) - })?; - } - } - Ok(()) - } - } - - let mut delegate = ReverseDelegate { - rdeps: TargetSet::new(), - parents: parents_collector_delegate.parents, - }; - - let roots_in_universe = from.intersect(&parents_collector_delegate.nodes_in_universe)?; - - match depth { - // For unbounded traversals, buck1 recommends specifying a large value. We'll accept either a negative (like -1) or - // a large value as unbounded. We can't just call it optional because args are positional only in the query syntax - // and so to specify a filter you need to specify a depth. - Some(v) if (0..1_000_000_000).contains(&v) => { - self.depth_limited_traversal(&roots_in_universe, &mut delegate, v as u32) - .await?; - } - _ => { - self.dfs_postorder(&roots_in_universe, &mut delegate) - .await?; - } - } - - Ok(delegate.rdeps) - } - - async fn testsof( - &self, - targets: &TargetSet, - ) -> anyhow::Result> { - let target_tests = targets - .iter() - .map(|target| { - let tests = target - .tests() - .ok_or(QueryError::FunctionUnimplemented("testsof"))?; - - anyhow::Ok((target, tests)) - }) - .collect::, _>>()?; - - let mut futs = target_tests - .into_iter() - .flat_map(|(target, tests)| { - tests.into_iter().map(move |test| async move { - let test = self.get_node(&test).await.with_context(|| { - format!( - "Error getting test of target {}", - LabeledNode::node_ref(target), - ) - })?; - anyhow::Ok(test) - }) - }) - .collect::>(); - - let mut ret = TargetSet::new(); - while let Some(test) = futs.try_next().await? { - ret.insert(test); - } - - Ok(ret) - } - - async fn testsof_with_default_target_platform( - &self, - targets: &TargetSet, - ) -> anyhow::Result>> { - let target_tests = targets - .iter() - .map(|target| { - let tests = target - .tests() - .ok_or(QueryError::FunctionUnimplemented("testsof"))?; - - anyhow::Ok((target, tests)) - }) - .collect::, _>>()?; - - let mut futs = target_tests - .into_iter() - .flat_map(|(target, tests)| { - tests.into_iter().map(move |test| async move { - let test = self - .get_node_for_default_configured_target(&test) - .await - .with_context(|| { - format!( - "Error getting test of target {}", - LabeledNode::node_ref(target), - ) - })?; - anyhow::Ok(test) - }) - }) - .collect::>(); - - let mut ret = Vec::new(); - while let Some(test) = futs.try_next().await? { - ret.push(test); - } - - Ok(ret) - } - - async fn deps( - &self, - targets: &TargetSet, - depth: Option, - filter: Option<&dyn TraversalFilter>, - ) -> anyhow::Result> { - deps(self, targets, depth, filter).await - } - - async fn owner(&self, _paths: &FileSet) -> anyhow::Result>; -} - -pub async fn deps( - env: &Env, - targets: &TargetSet, - depth: Option, - filter: Option<&dyn TraversalFilter>, -) -> anyhow::Result> { - let mut deps = TargetSet::new(); - - struct Delegate<'a, Q: QueryTarget> { - deps: &'a mut TargetSet, - filter: Option<&'a dyn TraversalFilter>, - } - - #[async_trait] - impl<'a, Q: QueryTarget> AsyncTraversalDelegate for Delegate<'a, Q> { - fn visit(&mut self, target: Q) -> anyhow::Result<()> { - self.deps.insert(target); - Ok(()) - } - - async fn for_each_child( - &mut self, - target: &Q, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - let res: anyhow::Result<_> = try { - match self.filter { - Some(filter) => { - for dep in filter.get_children(target).await?.iter() { - func.visit(dep.node_ref().clone())?; - } - } - None => { - for dep in target.deps() { - func.visit(dep.clone())?; - } - } - } - }; - res.with_context(|| format!("Error traversing children of `{}`", target.node_ref())) - } - } - - match depth { - // For unbounded traversals, buck1 recommends specifying a large value. We'll accept either a negative (like -1) or - // a large value as unbounded. We can't just call it optional because args are positional only in the query syntax - // and so to specify a filter you need to specify a depth. - Some(v) if (0..1_000_000_000).contains(&v) => { - env.depth_limited_traversal( - targets, - &mut Delegate { - deps: &mut deps, - filter, - }, - v as u32, - ) - .await?; - } - _ => { - env.dfs_postorder( - targets, - &mut Delegate { - deps: &mut deps, - filter, - }, - ) - .await?; - } - } - - Ok(deps) -} diff --git a/app/buck2_query/src/query/environment/tests.rs b/app/buck2_query/src/query/environment/tests.rs index d679f60ddacab..0457000e865aa 100644 --- a/app/buck2_query/src/query/environment/tests.rs +++ b/app/buck2_query/src/query/environment/tests.rs @@ -18,17 +18,14 @@ use buck2_query::query::traversal::async_depth_limited_traversal; use buck2_query::query::traversal::NodeLookup; use derive_more::Display; use derive_more::From; -use dupe::OptionDupedExt; use indexmap::IndexSet; -use serde::Serializer; use super::*; -use crate::query::traversal::AsyncNodeLookup; #[derive(Debug, Copy, Clone, Dupe, Eq, PartialEq, Hash, Display, From)] struct TestTargetId(u64); -impl NodeLabel for TestTargetId {} +impl NodeKey for TestTargetId {} #[derive(Debug, Copy, Clone, Dupe, Eq, PartialEq, Hash, Display)] struct TestTargetAttr; @@ -47,9 +44,9 @@ impl fmt::Debug for TestTarget { } impl LabeledNode for TestTarget { - type NodeRef = TestTargetId; + type Key = TestTargetId; - fn node_ref(&self) -> &Self::NodeRef { + fn node_key(&self) -> &Self::Key { &self.id } } @@ -65,32 +62,32 @@ impl QueryTarget for TestTarget { unimplemented!() } + fn name(&self) -> Cow { + unimplemented!() + } + fn buildfile_path(&self) -> &BuildFilePath { unimplemented!() } - fn deps<'a>(&'a self) -> Box + Send + 'a> { + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a { Box::new(self.deps.iter()) } - fn exec_deps<'a>(&'a self) -> Box + Send + 'a> { + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a { Box::new(std::iter::empty()) } - fn target_deps<'a>(&'a self) -> Box + Send + 'a> { + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a { Box::new(std::iter::empty()) } - fn attr_to_string_alternate(&self, _attr: &Self::Attr<'_>) -> String { - unimplemented!("not needed for tests") + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + Box::new(std::iter::empty()) } - fn attr_serialize( - &self, - _attr: &Self::Attr<'_>, - _serializer: S, - ) -> Result { - unimplemented!("not needed for tests") + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + Box::new(std::iter::empty()) } fn attr_any_matches( @@ -114,12 +111,15 @@ impl QueryTarget for TestTarget { unimplemented!() } - fn map_attr>) -> R>(&self, _key: &str, _func: F) -> R { + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + _func: F, + ) -> Result<(), E> { unimplemented!() } - fn call_stack(&self) -> Option { - None + fn map_attr>) -> R>(&self, _key: &str, _func: F) -> R { + unimplemented!() } } @@ -128,7 +128,7 @@ struct TestEnv { } impl NodeLookup for TestEnv { - fn get(&self, label: &::NodeRef) -> anyhow::Result { + fn get(&self, label: &::Key) -> anyhow::Result { self.graph .get(label) .duped() @@ -138,10 +138,7 @@ impl NodeLookup for TestEnv { #[async_trait] impl AsyncNodeLookup for TestEnv { - async fn get( - &self, - label: &::NodeRef, - ) -> anyhow::Result { + async fn get(&self, label: &::Key) -> anyhow::Result { self.graph .get(label) .duped() @@ -155,14 +152,14 @@ impl QueryEnvironment for TestEnv { async fn get_node( &self, - node_ref: &::NodeRef, + node_ref: &::Key, ) -> anyhow::Result { >::get(self, node_ref) } async fn get_node_for_default_configured_target( &self, - _node_ref: &::NodeRef, + _node_ref: &::Key, ) -> anyhow::Result> { unimplemented!() } @@ -178,24 +175,33 @@ impl QueryEnvironment for TestEnv { async fn dfs_postorder( &self, root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, + delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, ) -> anyhow::Result<()> { // TODO: Should this be part of QueryEnvironment's default impl? - async_depth_first_postorder_traversal(self, root.iter_names(), delegate).await + async_depth_first_postorder_traversal(self, root.iter_names(), delegate, visit).await } async fn depth_limited_traversal( &self, root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, + delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, depth: u32, ) -> anyhow::Result<()> { - async_depth_limited_traversal(self, root.iter_names(), delegate, depth).await + async_depth_limited_traversal(self, root.iter_names(), delegate, visit, depth).await } async fn owner(&self, _paths: &FileSet) -> anyhow::Result> { unimplemented!() } + + async fn targets_in_buildfile( + &self, + _paths: &FileSet, + ) -> anyhow::Result> { + unimplemented!() + } } impl TestEnv { @@ -248,12 +254,12 @@ async fn test_one_path() -> anyhow::Result<()> { env.edge(1, 12); let env = env.build(); - let path = env.allpaths(&env.set("1")?, &env.set("3")?).await?; + let path = env.allpaths(&env.set("1")?, &env.set("3")?, None).await?; let expected = env.set("1,2,3")?; assert_eq!(path, expected); - let path = env.somepath(&env.set("1")?, &env.set("3")?).await?; - let expected = env.set("3,2,1")?; + let path = env.somepath(&env.set("1")?, &env.set("3")?, None).await?; + let expected = env.set("1,2,3")?; assert_eq!(path, expected); Ok(()) @@ -272,13 +278,12 @@ async fn test_many_paths() -> anyhow::Result<()> { env.edge(10, 20); let env = env.build(); - let path = env.allpaths(&env.set("1")?, &env.set("3")?).await?; + let path = env.allpaths(&env.set("1")?, &env.set("3")?, None).await?; let expected = env.set("1,10,11,2,3")?; assert_eq!(path, expected); - // We iterate with a stack so this is why we find this path - let path = env.somepath(&env.set("1")?, &env.set("3")?).await?; - let expected = env.set("3,11,10,1")?; + let path = env.somepath(&env.set("1")?, &env.set("3")?, None).await?; + let expected = env.set("1,2,3")?; assert_eq!(path, expected); Ok(()) @@ -293,13 +298,17 @@ async fn test_distinct_paths() -> anyhow::Result<()> { env.edge(20, 200); let env = env.build(); - let path = env.allpaths(&env.set("1,2")?, &env.set("100,200")?).await?; + let path = env + .allpaths(&env.set("1,2")?, &env.set("100,200")?, None) + .await?; let expected = env.set("2,20,200,1,10,100")?; assert_eq!(path, expected); // Same as above - let path = env.somepath(&env.set("1,2")?, &env.set("100,200")?).await?; - let expected = env.set("200,20,2")?; + let path = env + .somepath(&env.set("1,2")?, &env.set("100,200")?, None) + .await?; + let expected = env.set("1,10,100")?; assert_eq!(path, expected); Ok(()) @@ -312,11 +321,11 @@ async fn test_no_path() -> anyhow::Result<()> { env.edge(2, 20); let env = env.build(); - let path = env.allpaths(&env.set("1")?, &env.set("20")?).await?; + let path = env.allpaths(&env.set("1")?, &env.set("20")?, None).await?; let expected = TargetSet::new(); assert_eq!(path, expected); - let path = env.somepath(&env.set("1")?, &env.set("20")?).await?; + let path = env.somepath(&env.set("1")?, &env.set("20")?, None).await?; let expected = TargetSet::new(); assert_eq!(path, expected); @@ -331,11 +340,11 @@ async fn test_nested_paths() -> anyhow::Result<()> { env.edge(3, 4); let env = env.build(); - let path = env.allpaths(&env.set("1")?, &env.set("2,4")?).await?; + let path = env.allpaths(&env.set("1")?, &env.set("2,4")?, None).await?; assert_eq!(path, env.set("1,2,3,4")?); - let path = env.somepath(&env.set("1")?, &env.set("2,4")?).await?; - assert_eq!(path, env.set("2,1")?); + let path = env.somepath(&env.set("1")?, &env.set("2,4")?, None).await?; + assert_eq!(path, env.set("1,2")?); Ok(()) } @@ -352,17 +361,63 @@ async fn test_paths_with_cycles_present() -> anyhow::Result<()> { env.edge(4, 3); let env = env.build(); - let path = env.allpaths(&env.set("3")?, &env.set("4")?).await?; + let path = env.allpaths(&env.set("3")?, &env.set("4")?, None).await?; assert_eq!(path, env.set("1,2,3,4")?); - let path = env.allpaths(&env.set("1")?, &env.set("1")?).await?; + let path = env.allpaths(&env.set("1")?, &env.set("1")?, None).await?; assert_eq!(path, env.set("2,3,4,1")?); - let path = env.allpaths(&env.set("1")?, &env.set("5")?).await?; + let path = env.allpaths(&env.set("1")?, &env.set("5")?, None).await?; assert_eq!(path, env.set("1,2,3,4,5")?); - let path = env.rdeps(&env.set("1")?, &env.set("3")?, Some(2)).await?; - assert_eq!(path, env.set("3,2,4,1")?); + let path = env + .rdeps(&env.set("1")?, &env.set("3")?, Some(2), None) + .await?; + assert_eq!(path, env.set("4,1,2,3")?); + + Ok(()) +} + +#[tokio::test] +async fn test_rdeps() -> anyhow::Result<()> { + let mut env = TestEnvBuilder::default(); + env.edge(1, 2); + env.edge(2, 3); + env.edge(3, 4); // Dead end. + env.edge(4, 5); + env.edge(1, 3); // Shortcut. + env.edge(3, 6); + let env = env.build(); + + let path = env + .rdeps(&env.set("1")?, &env.set("6")?, Some(0), None) + .await?; + assert_eq!(path, env.set("6")?); + + let path = env + .rdeps(&env.set("1")?, &env.set("6")?, Some(1), None) + .await?; + assert_eq!(path, env.set("3,6")?); + + let path = env + .rdeps(&env.set("1")?, &env.set("6")?, Some(2), None) + .await?; + assert_eq!(path, env.set("1,2,3,6")?); + + let path = env + .rdeps(&env.set("1")?, &env.set("6")?, Some(3), None) + .await?; + assert_eq!(path, env.set("1,2,3,6")?); + + let path = env + .rdeps(&env.set("1")?, &env.set("6")?, Some(4), None) + .await?; + assert_eq!(path, env.set("1,2,3,6")?); + + let path = env + .rdeps(&env.set("1")?, &env.set("6")?, None, None) + .await?; + assert_eq!(path, env.set("1,2,3,6")?); Ok(()) } diff --git a/app/buck2_query/src/query/futures_queue_generic.rs b/app/buck2_query/src/query/futures_queue_generic.rs deleted file mode 100644 index 6d79b21766088..0000000000000 --- a/app/buck2_query/src/query/futures_queue_generic.rs +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::future::Future; -use std::pin::Pin; - -use futures::stream::FuturesOrdered; -use futures::stream::FuturesUnordered; -use futures::Stream; -use futures::StreamExt; - -/// `FuturesOrdered` or `FuturesUnordered`. -// This can be done with GAT, but GAT is unstable, and requires too much work to make it work, -// and this switch has only a little runtime overhead. -pub(crate) enum FuturesQueue { - Ordered(FuturesOrdered), - Unordered(FuturesUnordered), -} - -impl FuturesQueue { - pub(crate) fn new_ordered() -> Self { - FuturesQueue::Ordered(FuturesOrdered::new()) - } - - pub(crate) fn new_unordered() -> Self { - FuturesQueue::Unordered(FuturesUnordered::new()) - } - - pub(crate) fn push(&mut self, fut: Fut) { - match self { - FuturesQueue::Ordered(futures_ordered) => futures_ordered.push_back(fut), - FuturesQueue::Unordered(futures_unordered) => futures_unordered.push(fut), - } - } -} - -impl Stream for FuturesQueue { - type Item = Fut::Output; - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - match self.get_mut() { - FuturesQueue::Ordered(futures_ordered) => futures_ordered.poll_next_unpin(cx), - FuturesQueue::Unordered(futures_unordered) => futures_unordered.poll_next_unpin(cx), - } - } -} diff --git a/app/buck2_query/src/query/graph.rs b/app/buck2_query/src/query/graph.rs new file mode 100644 index 0000000000000..d18c61871f5bc --- /dev/null +++ b/app/buck2_query/src/query/graph.rs @@ -0,0 +1,18 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod async_bfs; +pub mod bfs; +pub mod dfs; +pub(crate) mod graph; +pub mod node; +pub mod successors; +pub(crate) mod vec_as_map; +pub(crate) mod vec_as_set; +pub(crate) mod visited; diff --git a/app/buck2_query/src/query/graph/async_bfs.rs b/app/buck2_query/src/query/graph/async_bfs.rs new file mode 100644 index 0000000000000..a7317cb7d50ff --- /dev/null +++ b/app/buck2_query/src/query/graph/async_bfs.rs @@ -0,0 +1,386 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::future; +use std::mem; + +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use futures::future::Either; +use futures::stream::FuturesOrdered; +use futures::StreamExt; +use starlark_map::unordered_map; +use starlark_map::unordered_map::UnorderedMap; +use starlark_map::Hashed; + +use crate::query::graph::node::LabeledNode; +use crate::query::graph::successors::AsyncChildVisitor; +use crate::query::traversal::AsyncNodeLookup; + +struct Node { + /// `None` for roots. + parent: Option, + /// `None` when not yet looked up. + node: Option, +} + +struct BfsVisited { + visited: UnorderedMap>, +} + +impl BfsVisited { + fn take_path(mut self, last: &N::Key, mut item: impl FnMut(N)) -> anyhow::Result<()> { + let node = self + .visited + .remove(last) + .with_internal_error_anyhow(|| format!("missing node {}", last))?; + if node.node.is_some() { + return Err(internal_error_anyhow!("duplicate node {}", last)); + } + let mut parent_key = node.parent; + while let Some(key) = parent_key { + let node = self + .visited + .remove(&key) + .with_internal_error_anyhow(|| format!("missing node {}", key))?; + item( + node.node + .with_internal_error_anyhow(|| format!("missing node {}", key))?, + ); + parent_key = node.parent; + } + Ok(()) + } +} + +pub(crate) async fn async_bfs_find_path<'a, N: LabeledNode + 'static>( + roots: impl IntoIterator, + lookup: impl AsyncNodeLookup, + successors: impl AsyncChildVisitor, + target: impl Fn(&N::Key) -> Option + Sync, +) -> anyhow::Result>> { + let lookup = &lookup; + + let mut visited = BfsVisited:: { + visited: UnorderedMap::new(), + }; + + let mut queue = FuturesOrdered::new(); + + for root in roots { + let root_key = Hashed::new(root.node_key()); + match visited.visited.raw_entry_mut().from_key_hashed(root_key) { + unordered_map::RawEntryMut::Occupied(_) => {} + unordered_map::RawEntryMut::Vacant(e) => { + if let Some(target) = target(root_key.key()) { + return Ok(Some(vec![target])); + } + + e.insert_hashed( + root_key.cloned(), + Node { + parent: None, + node: None, + }, + ); + queue.push_back(Either::Left(future::ready(( + root_key.into_key().clone(), + anyhow::Ok(root.dupe()), + )))); + } + } + } + + while let Some((key, node)) = queue.next().await { + match node { + Ok(node) => { + let mut found: Option = None; + successors + .for_each_child(&node, &mut |succ: &N::Key| { + if found.is_some() { + return Ok(()); + } + + let succ = Hashed::new(succ); + match visited.visited.raw_entry_mut().from_key_hashed(succ) { + unordered_map::RawEntryMut::Occupied(_) => {} + unordered_map::RawEntryMut::Vacant(e) => { + if let Some(target) = target(succ.key()) { + found = Some(target); + return Ok(()); + } + + e.insert_hashed( + succ.cloned(), + Node { + parent: Some(node.node_key().clone()), + node: None, + }, + ); + let succ = (*succ.key()).clone(); + queue.push_back(Either::Right(async move { + let succ_node = lookup.get(&succ).await; + (succ, succ_node) + })); + } + } + + Ok(()) + }) + .await?; + + if let Some(found) = found { + let key = node.node_key().clone(); + let mut path: Vec = vec![found, node]; + visited.take_path(&key, |node| path.push(node))?; + path.reverse(); + return Ok(Some(path)); + } + let prev = mem::replace( + &mut visited + .visited + .get_mut(&key) + .with_internal_error_anyhow(|| format!("missing node {}", key))? + .node, + Some(node), + ); + if prev.is_some() { + return Err(internal_error_anyhow!("duplicate node {}", key)); + } + } + Err(mut e) => { + e = e.context(format!("traversing {}", key)); + let mut nodes = Vec::new(); + visited.take_path(&key, |node| nodes.push(node))?; + for node in nodes { + e = e.context(format!("traversing {}", node.node_key())); + } + return Err(e); + } + } + } + + Ok(None) +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::collections::HashSet; + + use async_trait::async_trait; + use buck2_query::query::traversal::ChildVisitor; + use dupe::Dupe; + use gazebo::prelude::VecExt; + + use crate::query::graph::async_bfs::async_bfs_find_path; + use crate::query::graph::node::LabeledNode; + use crate::query::graph::node::NodeKey; + use crate::query::graph::successors::AsyncChildVisitor; + use crate::query::traversal::AsyncNodeLookup; + + #[derive(Copy, Clone, Dupe, derive_more::Display, Debug, Eq, PartialEq, Hash)] + #[display("{:?}", self)] + struct TestNodeKey(u32); + #[derive(Copy, Clone, Dupe, Debug, Eq, PartialEq)] + struct TestNode(TestNodeKey); + + impl NodeKey for TestNodeKey {} + + impl LabeledNode for TestNode { + type Key = TestNodeKey; + + fn node_key(&self) -> &Self::Key { + &self.0 + } + } + + #[derive(Default)] + struct TestGraph { + successors: HashMap>, + errors: HashSet, + } + + impl TestGraph { + fn add_edge(&mut self, from: u32, to: u32) { + self.successors.entry(from).or_default().push(to); + } + + fn add_error(&mut self, node: u32) { + self.errors.insert(node); + } + } + + impl TestGraph { + async fn bfs_find_path( + &self, + roots: impl IntoIterator, + target: u32, + ) -> anyhow::Result>> { + let roots: Vec = roots + .into_iter() + .map(|n| TestNode(TestNodeKey(n))) + .collect(); + let path = async_bfs_find_path(&roots, self, self, |n| { + if n.0 == target { + Some(TestNode(*n)) + } else { + None + } + }) + .await?; + Ok(path.map(|path| path.into_map(|n| n.0.0))) + } + } + + impl AsyncChildVisitor for TestGraph { + async fn for_each_child( + &self, + node: &TestNode, + mut children: impl ChildVisitor, + ) -> anyhow::Result<()> { + for succ in self.successors.get(&node.0.0).unwrap_or(&Vec::new()) { + children.visit(&TestNodeKey(*succ))?; + } + Ok(()) + } + } + + #[async_trait] + impl AsyncNodeLookup for TestGraph { + async fn get(&self, label: &TestNodeKey) -> anyhow::Result { + if self.errors.contains(&label.0) { + return Err(anyhow::anyhow!("my error")); + } + Ok(TestNode(*label)) + } + } + + #[allow(dead_code)] + struct SuccessorsPlus1; + + impl AsyncChildVisitor for SuccessorsPlus1 { + async fn for_each_child( + &self, + node: &TestNode, + mut children: impl ChildVisitor, + ) -> anyhow::Result<()> { + children.visit(&TestNodeKey(node.0.0 + 1))?; + Ok(()) + } + } + + #[allow(dead_code)] + struct TestLookupImpl; + + #[async_trait] + impl AsyncNodeLookup for TestLookupImpl { + async fn get(&self, label: &TestNodeKey) -> anyhow::Result { + Ok(TestNode(*label)) + } + } + + #[tokio::test] + async fn test_async_bfs_find_path() { + let mut g = TestGraph::default(); + g.add_edge(0, 1); + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_edge(3, 4); + g.add_edge(4, 5); + + let path = g.bfs_find_path([0], 0).await.unwrap(); + assert_eq!(Some(vec![0]), path); + + let path = g.bfs_find_path([0], 1).await.unwrap(); + assert_eq!(Some(vec![0, 1]), path); + + let path = g.bfs_find_path([0], 2).await.unwrap(); + assert_eq!(Some(vec![0, 1, 2]), path); + + let path = g.bfs_find_path([0], 3).await.unwrap(); + assert_eq!(Some(vec![0, 1, 2, 3]), path); + } + + #[tokio::test] + async fn test_async_bfs_find_path_branch() { + let mut g = TestGraph::default(); + g.add_edge(0, 1); + g.add_edge(0, 3); + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_edge(3, 4); + + let path = g.bfs_find_path([0], 0).await.unwrap(); + assert_eq!(Some(vec![0]), path); + + let path = g.bfs_find_path([0], 1).await.unwrap(); + assert_eq!(Some(vec![0, 1]), path); + + let path = g.bfs_find_path([0], 2).await.unwrap(); + assert_eq!(Some(vec![0, 1, 2]), path); + + let path = g.bfs_find_path([0], 3).await.unwrap(); + assert_eq!(Some(vec![0, 3]), path); + + let path = g.bfs_find_path([0], 4).await.unwrap(); + assert_eq!(Some(vec![0, 3, 4]), path); + } + + #[tokio::test] + async fn test_async_bfs_find_path_error() { + let mut g = TestGraph::default(); + g.add_edge(0, 1); + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_error(3); + + let err = g.bfs_find_path([0], 9).await.unwrap_err(); + + let errors: Vec = err.chain().map(|e| e.to_string()).collect(); + assert_eq!( + vec![ + "traversing TestNodeKey(0)", + "traversing TestNodeKey(1)", + "traversing TestNodeKey(2)", + "traversing TestNodeKey(3)", + "my error" + ], + errors + ); + } + + #[tokio::test] + async fn test_async_bfs_find_path_multiple_starts() { + let mut g = TestGraph::default(); + g.add_edge(0, 1); + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_edge(3, 4); + g.add_edge(10, 11); + g.add_edge(11, 12); + + let path = g.bfs_find_path([0, 10], 12).await.unwrap(); + assert_eq!(Some(vec![10, 11, 12]), path); + } + + #[tokio::test] + async fn test_async_bfs_find_path_no_path() { + let mut g = TestGraph::default(); + g.add_edge(0, 1); + g.add_edge(1, 2); + g.add_edge(1, 3); + g.add_edge(1, 4); + g.add_edge(2, 3); + g.add_edge(3, 4); + + let path = g.bfs_find_path([0], 10).await.unwrap(); + assert_eq!(None, path); + } +} diff --git a/app/buck2_query/src/query/graph/bfs.rs b/app/buck2_query/src/query/graph/bfs.rs new file mode 100644 index 0000000000000..be244dee63ac9 --- /dev/null +++ b/app/buck2_query/src/query/graph/bfs.rs @@ -0,0 +1,78 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Generic BFS implementation. + +use std::collections::VecDeque; +use std::hash::Hash; + +use starlark_map::unordered_set; +use starlark_map::unordered_set::UnorderedSet; +use starlark_map::Hashed; + +use crate::query::graph::successors::GraphSuccessors; + +pub fn bfs_preorder( + roots: impl IntoIterator, + successors: impl GraphSuccessors, + mut visit: impl FnMut(N), +) { + let mut visited: UnorderedSet = UnorderedSet::new(); + let mut work: VecDeque = VecDeque::new(); + for root in roots { + let root = Hashed::new(root); + match visited.raw_entry_mut().from_entry_hashed(root.as_ref()) { + unordered_set::RawEntryMut::Occupied(_) => {} + unordered_set::RawEntryMut::Vacant(entry) => { + entry.insert_hashed(root.clone()); + work.push_back(root.into_key()); + } + } + } + + while let Some(curr) = work.pop_front() { + successors.for_each_successor(&curr, |succ| { + let succ = Hashed::new(succ); + match visited.raw_entry_mut().from_entry_hashed(succ) { + unordered_set::RawEntryMut::Occupied(_) => {} + unordered_set::RawEntryMut::Vacant(entry) => { + entry.insert_hashed(succ.cloned()); + work.push_back(succ.into_key().clone()); + } + } + }); + visit(curr); + } +} + +#[cfg(test)] +mod tests { + use crate::query::graph::bfs::bfs_preorder; + use crate::query::graph::successors::GraphSuccessors; + + #[test] + fn test_bfs_preorder() { + struct SuccessorsImpl; + + impl GraphSuccessors for SuccessorsImpl { + fn for_each_successor(&self, node: &u32, mut cb: impl FnMut(&u32)) { + for node in [node + 3, node + 5] { + if node <= 10 { + cb(&node); + } + } + } + } + + let mut visited = Vec::new(); + bfs_preorder([0], SuccessorsImpl, |n| visited.push(n)); + + assert_eq!(vec![0, 3, 5, 6, 8, 10, 9], visited); + } +} diff --git a/app/buck2_query/src/query/graph/dfs.rs b/app/buck2_query/src/query/graph/dfs.rs new file mode 100644 index 0000000000000..27235f5eeebeb --- /dev/null +++ b/app/buck2_query/src/query/graph/dfs.rs @@ -0,0 +1,127 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Generic DFS implementation. + +use std::hash::Hash; + +use dupe::Dupe; +use starlark_map::unordered_set::UnorderedSet; + +use crate::query::graph::successors::GraphSuccessors; +use crate::query::graph::vec_as_set::VecAsSet; +use crate::query::graph::visited::VisitedNodes; + +pub fn dfs_postorder( + roots: impl IntoIterator, + successors: impl GraphSuccessors, + visit: impl FnMut(N) -> anyhow::Result<()>, +) -> anyhow::Result<()> { + dfs_postorder_impl::<_, UnorderedSet>(roots, successors, visit) +} + +pub(crate) fn dfs_postorder_impl>( + roots: impl IntoIterator, + successors: impl GraphSuccessors, + mut visit: impl FnMut(N) -> anyhow::Result<()>, +) -> anyhow::Result<()> { + // This implementation simply performs a dfs. We maintain a work stack here. + // When visiting a node, we first add an item to the work stack to call + // post_visit for that node, and then add items to visit all the + // children. While a work item for a child will not be added if it has + // already been visited, if there's an item in the stack for that child + // it will still be added. When popping the visit, if the node had been + // visited, it's ignored. This ensures that a node's children are all + // visited before we do PostVisit for that node. + enum WorkItem { + PostVisit(N), + Visit(H, N), + } + + let mut visited: V = V::default(); + let mut work: Vec> = roots + .into_iter() + .map(|t| WorkItem::Visit(V::hash(&t), t)) + .collect(); + + while let Some(curr) = work.pop() { + match curr { + WorkItem::Visit(hash, target) => { + if !visited.insert_clone(hash, &target) { + continue; + } + + work.push(WorkItem::PostVisit(target.dupe())); + + successors.for_each_successor(&target, |succ| { + let hash = V::hash(succ); + if !visited.contains(hash, succ) { + work.push(WorkItem::Visit(hash, succ.dupe())); + } + }); + } + WorkItem::PostVisit(target) => { + visit(target)?; + } + } + } + + Ok(()) +} + +pub(crate) fn dfs_preorder( + roots: impl IntoIterator, + successors: impl GraphSuccessors, + mut visit: impl FnMut(u32), +) { + let mut visited = VecAsSet::default(); + let mut work = Vec::new(); + + for root in roots { + work.push(root); + + while let Some(node) = work.pop() { + if !visited.insert(node) { + continue; + } + visit(node); + + let work_len = work.len(); + successors.for_each_successor(&node, |succ| { + work.push(*succ); + }); + work[work_len..].reverse(); + } + } +} + +#[cfg(test)] +mod tests { + use crate::query::graph::dfs::dfs_preorder; + use crate::query::graph::successors::GraphSuccessors; + + #[test] + fn test() { + struct SuccessorImpl; + + impl GraphSuccessors for SuccessorImpl { + fn for_each_successor(&self, node: &u32, mut cb: impl FnMut(&u32)) { + for succ in [node + 2, node + 3] { + if succ <= 10 { + cb(&succ); + } + } + } + } + + let mut visited = Vec::new(); + dfs_preorder([0, 1], SuccessorImpl, |n| visited.push(n)); + assert_eq!(vec![0, 2, 4, 6, 8, 10, 9, 7, 5, 3, 1], visited); + } +} diff --git a/app/buck2_query/src/query/graph/graph.rs b/app/buck2_query/src/query/graph/graph.rs new file mode 100644 index 0000000000000..76d5f77ab72a7 --- /dev/null +++ b/app/buck2_query/src/query/graph/graph.rs @@ -0,0 +1,492 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::VecDeque; + +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use starlark_map::unordered_map; +use starlark_map::unordered_map::UnorderedMap; +use starlark_map::Hashed; + +use crate::query::graph::dfs::dfs_postorder_impl; +use crate::query::graph::dfs::dfs_preorder; +use crate::query::graph::node::LabeledNode; +use crate::query::graph::successors::AsyncChildVisitor; +use crate::query::graph::successors::GraphSuccessors; +use crate::query::graph::vec_as_map::VecAsMap; +use crate::query::graph::vec_as_set::VecAsSet; +use crate::query::traversal::AsyncNodeLookup; + +#[derive(Clone)] +struct GraphNode { + node: N, + children: Vec, +} + +/// Graph with all nodes and edges resolved and represented as integers. +/// +/// This is fast to traverse. +#[derive(Clone)] +pub(crate) struct Graph { + nodes: Vec>, + node_to_index: UnorderedMap, +} + +impl Graph { + pub(crate) fn get(&self, node: &N::Key) -> Option<&N> { + self.node_to_index + .get(node) + .map(|index| &self.nodes[*index as usize].node) + } +} + +struct GraphBuilder { + node_to_index: UnorderedMap, + nodes: VecAsMap>, +} + +impl GraphBuilder { + fn build(self) -> Graph { + assert_eq!(self.nodes.vec.len(), self.node_to_index.len()); + let nodes = self + .nodes + .vec + .into_iter() + .map(|n| n.unwrap()) + .collect::>(); + Graph { + nodes, + node_to_index: self.node_to_index, + } + } + + fn get_or_create_node(&mut self, node: &N::Key) -> u32 { + let node = Hashed::new(node); + let new_index = self.node_to_index.len(); + match self.node_to_index.raw_entry_mut().from_key_hashed(node) { + unordered_map::RawEntryMut::Occupied(e) => *e.get(), + unordered_map::RawEntryMut::Vacant(e) => { + let new_index = new_index.try_into().unwrap(); + e.insert((*node.key()).clone(), new_index); + new_index + } + } + } + + fn insert(&mut self, index: u32, node: N) { + let prev = self.nodes.insert( + index, + GraphNode { + node, + children: Vec::new(), + }, + ); + assert!(prev.is_none()); + } +} + +impl Graph { + /// Build the graph by traversing the nodes in `root` and their children. + /// + /// Resulting graph have node indices assigned non-deterministically. + pub(crate) async fn build( + nodes: &impl AsyncNodeLookup, + root: impl IntoIterator, + successors: impl AsyncChildVisitor, + ) -> anyhow::Result> { + let mut graph = GraphBuilder:: { + nodes: VecAsMap::default(), + node_to_index: UnorderedMap::default(), + }; + + // Map from node to parent node. + let mut visited: VecAsMap> = VecAsMap::default(); + let mut push = |queue: &mut FuturesUnordered<_>, + target_ref: &T::Key, + target_index: u32, + parent: Option| { + if visited.contains_key(target_index) { + return; + } + + visited.insert(target_index, parent); + + let target_ref = target_ref.clone(); + + queue.push(async move { + let result = nodes.get(&target_ref).await; + (target_index, result) + }) + }; + + let mut queue = FuturesUnordered::new(); + + for target in root { + let index = graph.get_or_create_node(&target); + push(&mut queue, &target, index, None); + } + + // TODO(cjhopman): FuturesOrdered/Unordered interacts poorly with tokio cooperative scheduling + // (see https://github.com/rust-lang/futures-rs/issues/2053). Clean this up once a good + // solution there exists. + while let Some((target_index, node)) = tokio::task::unconstrained(queue.next()).await { + let result: anyhow::Result<_> = try { + let node = node?; + + graph.insert(target_index, node.clone()); + + successors + .for_each_child(&node, &mut |child: &T::Key| { + let child_index = graph.get_or_create_node(child); + graph + .nodes + .get_mut(target_index) + .unwrap() + .children + .push(child_index); + push(&mut queue, child, child_index, Some(target_index)); + Ok(()) + }) + .await?; + graph + .nodes + .get_mut(target_index) + .unwrap() + .children + .shrink_to_fit(); + }; + if let Err(mut e) = result { + let mut target = target_index; + while let Some(Some(parent_index)) = visited.get(target) { + match graph.nodes.get(*parent_index) { + None => { + return Err(e.context(format!( + "Node {} has not node assigned (internal error)", + parent_index + ))); + } + Some(parent) => { + e = e.context(format!( + "Error traversing children of {}", + parent.node.node_key() + )); + target = *parent_index; + } + } + } + return Err(e); + } + } + + Ok(graph.build()) + } + + /// Build graph with nodes laid out in stable DFS order. + pub(crate) async fn build_stable_dfs( + nodes: &impl AsyncNodeLookup, + root: impl IntoIterator, + successors: impl AsyncChildVisitor, + ) -> anyhow::Result> { + let root = root.into_iter().collect::>(); + let graph = Self::build(nodes, root.iter().cloned(), successors).await?; + let root = root.into_iter().map(|n| graph.node_to_index[&n]); + let mut old_to_new: VecAsMap = VecAsMap::default(); + + let mut new_index = 0; + graph.dfs_preorder_indices(root, |old_index| { + let prev = old_to_new.insert(old_index, new_index); + assert!(prev.is_none()); + new_index += 1; + }); + + assert_eq!(graph.nodes.len(), new_index as usize); + + Ok(graph.index_remap(|old_index| *old_to_new.get(old_index).unwrap())) + } + + fn dfs_preorder_indices(&self, roots: impl IntoIterator, visitor: impl FnMut(u32)) { + dfs_preorder(roots, GraphSuccessorsImpl { graph: self }, visitor) + } + + /// Remap the indices of the graph. + fn index_remap(self, remap: impl Fn(u32) -> u32) -> Self { + let node_count = self.nodes.len().try_into().unwrap(); + self.index_remap_opt(|i| Some(remap(i)), node_count) + } + + /// Remap the indices of the graph. + /// + /// `remap` function must map populate the range `0..count`, otherwise this function will panic. + fn index_remap_opt(self, remap: impl Fn(u32) -> Option, count: u32) -> Self { + let Graph { + nodes, + mut node_to_index, + } = self; + + let mut new_nodes: VecAsMap> = VecAsMap::default(); + + for (i, mut node) in nodes.into_iter().enumerate() { + let old_id: u32 = i.try_into().unwrap(); + let Some(new_id) = remap(old_id) else { + continue; + }; + assert!(new_id < count); + + node.children.retain_mut(|node| { + if let Some(new_node) = remap(*node) { + *node = new_node; + true + } else { + false + } + }); + let prev = new_nodes.insert(new_id, node); + assert!(prev.is_none()); + } + + node_to_index.retain(|_, index| { + if let Some(new_index) = remap(*index) { + *index = new_index; + true + } else { + false + } + }); + + let new_nodes = new_nodes.vec.into_iter().map(|n| n.unwrap()).collect(); + Graph { + nodes: new_nodes, + node_to_index, + } + } + + /// Reverse the edges. + pub(crate) fn reverse(self) -> Graph { + let Graph { + mut nodes, + node_to_index, + } = self; + let mut new_edges: Vec> = (0..nodes.len()).map(|_| Vec::new()).collect(); + for node in nodes.iter().enumerate() { + for child in &node.1.children { + new_edges[*child as usize].push(node.0 as u32); + } + } + for (node, new_edges) in nodes.iter_mut().zip(new_edges) { + node.children = new_edges; + } + Graph { + nodes, + node_to_index, + } + } + + pub(crate) fn depth_first_postorder_traversal>( + &self, + root: RootIter, + mut visitor: impl FnMut(&T) -> anyhow::Result<()>, + ) -> anyhow::Result<()> { + dfs_postorder_impl::<_, VecAsSet>( + root.into_iter().map(|root| self.node_to_index[&root]), + GraphSuccessorsImpl { graph: self }, + |index| visitor(&self.nodes[index as usize].node), + ) + } + + /// Create a graph from the given roots up to the given max depth. + /// + /// Zero depth means only the roots. + pub(crate) fn take_max_depth( + self, + roots: impl IntoIterator, + max_depth: u32, + ) -> Graph { + // Map from old index to new index. + let mut visited: VecAsMap = VecAsMap::default(); + let mut ids_to_keep = Vec::new(); + let mut edge: VecDeque = VecDeque::new(); + + for root in roots { + let root = self.node_to_index[&root]; + if !visited.contains_key(root) { + let new_index = ids_to_keep.len().try_into().unwrap(); + let prev = visited.insert(root, new_index); + assert!(prev.is_none()); + ids_to_keep.push(root); + edge.push_back(root); + } + } + + for _ in 0..max_depth { + for _ in 0..edge.len() { + let node = edge.pop_front().unwrap(); + for &succ in &self.nodes[node as usize].children { + if !visited.contains_key(succ) { + let new_index = ids_to_keep.len().try_into().unwrap(); + let prev = visited.insert(succ, new_index); + assert!(prev.is_none()); + ids_to_keep.push(succ); + edge.push_back(succ); + } + } + } + } + + if self.nodes.len() == ids_to_keep.len() { + // We visited everything. Skip expensive remap. + return self; + } + + self.index_remap_opt( + |i| visited.get(i).copied(), + ids_to_keep.len().try_into().unwrap(), + ) + } +} + +struct GraphSuccessorsImpl<'a, N: LabeledNode> { + graph: &'a Graph, +} + +impl<'a, N: LabeledNode> GraphSuccessors for GraphSuccessorsImpl<'a, N> { + fn for_each_successor(&self, node: &u32, mut cb: impl FnMut(&u32)) { + for child in &self.graph.nodes[*node as usize].children { + cb(child); + } + } +} + +#[cfg(test)] +mod tests { + use async_trait::async_trait; + use buck2_query::query::traversal::ChildVisitor; + use dupe::Dupe; + + use crate::query::graph::bfs::bfs_preorder; + use crate::query::graph::graph::Graph; + use crate::query::graph::graph::GraphSuccessorsImpl; + use crate::query::graph::node::LabeledNode; + use crate::query::graph::node::NodeKey; + use crate::query::graph::successors::AsyncChildVisitor; + use crate::query::traversal::AsyncNodeLookup; + + #[derive(Clone, Copy, Dupe, Eq, PartialEq, Hash, derive_more::Display, Debug)] + #[display("{}", _0)] + struct Ref(u32); + + #[derive(Clone, Dupe)] + struct Node(Ref); + + impl NodeKey for Ref {} + + impl LabeledNode for Node { + type Key = Ref; + + fn node_key(&self) -> &Self::Key { + &self.0 + } + } + + async fn build_graph(start: &[u32], edges: &[(u32, u32)]) -> Graph { + struct Lookup; + + #[async_trait] + impl AsyncNodeLookup for Lookup { + async fn get(&self, label: &Ref) -> anyhow::Result { + Ok(Node(label.dupe())) + } + } + + struct Successors { + edges: Vec<(u32, u32)>, + } + + impl AsyncChildVisitor for Successors { + async fn for_each_child( + &self, + node: &Node, + mut children: impl ChildVisitor, + ) -> anyhow::Result<()> { + for (from, to) in &self.edges { + if node.0.0 == *from { + children.visit(&Ref(*to))?; + } + } + Ok(()) + } + } + + Graph::build( + &Lookup, + start.iter().copied().map(Ref), + Successors { + edges: edges.to_vec(), + }, + ) + .await + .unwrap() + } + + #[tokio::test] + async fn test_build_then_dfs_postorder() { + let graph = build_graph(&[10], &[(10, 20), (10, 30)]).await; + + let mut visited = Vec::new(); + graph + .depth_first_postorder_traversal([Ref(10)], |node| { + visited.push(node.0.0); + Ok(()) + }) + .unwrap(); + + // TODO(nga): should be `[30, 10, 20]`. + assert_eq!(vec![30, 20, 10], visited); + } + + fn bfs(graph: &Graph, start: &[u32]) -> Vec { + let mut visited = Vec::new(); + bfs_preorder( + start.iter().map(|i| graph.node_to_index[&Ref(*i)]), + GraphSuccessorsImpl { graph }, + |node| { + visited.push(graph.nodes[node as usize].node.0.0); + }, + ); + visited + } + + #[tokio::test] + async fn test_take_max_depth() { + let graph = build_graph(&[10, 30], &[(10, 20), (10, 30), (20, 30), (30, 40)]).await; + + let graph0 = graph.clone().take_max_depth([Ref(10)], 0); + assert_eq!(vec![10], bfs(&graph0, &[10])); + + let graph1 = graph.clone().take_max_depth([Ref(10)], 1); + assert_eq!(vec![10, 20, 30], bfs(&graph1, &[10])); + + let graph2 = graph.clone().take_max_depth([Ref(10)], 2); + assert_eq!(vec![10, 20, 30, 40], bfs(&graph2, &[10])); + + let graph3 = graph.clone().take_max_depth([Ref(10)], 3); + assert_eq!(vec![10, 20, 30, 40], bfs(&graph3, &[10])); + + let graph4 = graph.clone().take_max_depth([Ref(10)], 4); + assert_eq!(vec![10, 20, 30, 40], bfs(&graph4, &[10])); + + let graph_2_0 = graph.clone().take_max_depth([Ref(10), Ref(30)], 0); + assert_eq!(vec![10, 30], bfs(&graph_2_0, &[10, 30])); + + let graph_2_1 = graph.clone().take_max_depth([Ref(10), Ref(30)], 1); + assert_eq!(vec![10, 30, 20, 40], bfs(&graph_2_1, &[10, 30])); + + graph.take_max_depth([], 100); + } +} diff --git a/app/buck2_query/src/query/graph/node.rs b/app/buck2_query/src/query/graph/node.rs new file mode 100644 index 0000000000000..1075a93a68960 --- /dev/null +++ b/app/buck2_query/src/query/graph/node.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Debug; +use std::fmt::Display; +use std::hash::Hash; + +use dupe::Dupe; +use starlark_map::Hashed; + +pub trait NodeKey: Clone + Hash + PartialEq + Eq + Debug + Display + Send + Sync + 'static {} + +pub trait LabeledNode: Dupe + Send + Sync { + type Key: NodeKey; + + fn node_key(&self) -> &Self::Key; + + fn hashed_node_key(&self) -> Hashed<&Self::Key> { + Hashed::new(self.node_key()) + } +} diff --git a/app/buck2_query/src/query/graph/successors.rs b/app/buck2_query/src/query/graph/successors.rs new file mode 100644 index 0000000000000..de9d5dbb1f341 --- /dev/null +++ b/app/buck2_query/src/query/graph/successors.rs @@ -0,0 +1,35 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::future::Future; + +use buck2_query::query::traversal::ChildVisitor; + +use crate::query::graph::node::LabeledNode; + +/// Function to return the successors of a node. +pub trait GraphSuccessors { + fn for_each_successor(&self, node: &N, cb: impl FnMut(&N)); +} + +pub trait AsyncChildVisitor: Send + Sync { + fn for_each_child( + &self, + node: &N, + children: impl ChildVisitor, + ) -> impl Future> + Send; +} + +impl<'a, N: LabeledNode, A: AsyncChildVisitor + ?Sized + Send + Sync> AsyncChildVisitor + for &'a A +{ + async fn for_each_child(&self, node: &N, children: impl ChildVisitor) -> anyhow::Result<()> { + (**self).for_each_child(node, children).await + } +} diff --git a/app/buck2_query/src/query/graph/vec_as_map.rs b/app/buck2_query/src/query/graph/vec_as_map.rs new file mode 100644 index 0000000000000..022b5908d2ea3 --- /dev/null +++ b/app/buck2_query/src/query/graph/vec_as_map.rs @@ -0,0 +1,63 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![allow(dead_code)] // Used later in the stack. + +use std::fmt; +use std::fmt::Debug; +use std::mem; + +/// Map `u32` to `T`. +pub(crate) struct VecAsMap { + pub(crate) vec: Vec>, +} + +impl Debug for VecAsMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.entries()).finish() + } +} + +impl Default for VecAsMap { + fn default() -> Self { + VecAsMap { vec: Vec::new() } + } +} + +impl VecAsMap { + pub(crate) fn get(&self, index: u32) -> Option<&T> { + self.vec.get(index as usize).and_then(|e| e.as_ref()) + } + + pub(crate) fn get_mut(&mut self, index: u32) -> Option<&mut T> { + self.vec.get_mut(index as usize).and_then(|e| e.as_mut()) + } + + pub(crate) fn contains_key(&self, index: u32) -> bool { + self.get(index).is_some() + } + + fn entries(&self) -> impl Iterator { + self.vec + .iter() + .enumerate() + .filter_map(|(i, e)| e.as_ref().map(|e| (i as u32, e))) + } + + pub(crate) fn keys(&self) -> impl Iterator + '_ { + self.entries().map(|(k, _)| k) + } + + pub(crate) fn insert(&mut self, index: u32, value: T) -> Option { + if self.vec.len() <= index as usize { + self.vec.resize_with(index as usize + 1, || None); + } + mem::replace(&mut self.vec[index as usize], Some(value)) + } +} diff --git a/app/buck2_query/src/query/graph/vec_as_set.rs b/app/buck2_query/src/query/graph/vec_as_set.rs new file mode 100644 index 0000000000000..ecbba56f3d7d2 --- /dev/null +++ b/app/buck2_query/src/query/graph/vec_as_set.rs @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![allow(dead_code)] // Used later in the stack. + +use std::fmt; +use std::fmt::Debug; + +use crate::query::graph::vec_as_map::VecAsMap; + +#[derive(Default)] +pub(crate) struct VecAsSet { + // Can use bitset here. + vec: VecAsMap<()>, +} + +impl Debug for VecAsSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.vec.keys()).finish() + } +} + +impl VecAsSet { + pub(crate) fn contains(&self, index: u32) -> bool { + self.vec.contains_key(index) + } + + /// Return true if the index was not already present. + pub(crate) fn insert(&mut self, index: u32) -> bool { + self.vec.insert(index, ()).is_none() + } +} diff --git a/app/buck2_query/src/query/graph/visited.rs b/app/buck2_query/src/query/graph/visited.rs new file mode 100644 index 0000000000000..6148c4899919d --- /dev/null +++ b/app/buck2_query/src/query/graph/visited.rs @@ -0,0 +1,72 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![allow(dead_code)] // Used later in the stack. + +use std::hash::Hash; + +use starlark_map::unordered_set; +use starlark_map::unordered_set::UnorderedSet; +use starlark_map::Hashed; +use starlark_map::StarlarkHashValue; + +use crate::query::graph::vec_as_set::VecAsSet; + +/// A set to store visited nodes. +pub(crate) trait VisitedNodes: Default { + type Hash: Copy; + + fn hash(node: &T) -> Self::Hash; + + fn contains(&self, hash: Self::Hash, node: &T) -> bool; + fn insert_clone(&mut self, hash: Self::Hash, node: &T) -> bool + where + T: Clone; +} + +impl VisitedNodes for VecAsSet { + type Hash = (); + + fn hash(_node: &u32) -> Self::Hash {} + + fn contains(&self, _hash: (), node: &u32) -> bool { + self.contains(*node) + } + fn insert_clone(&mut self, _hash: (), node: &u32) -> bool { + self.insert(*node) + } +} + +impl VisitedNodes for UnorderedSet { + type Hash = StarlarkHashValue; + + fn hash(node: &T) -> Self::Hash { + StarlarkHashValue::new(node) + } + + fn contains(&self, hash: Self::Hash, node: &T) -> bool { + self.contains_hashed(Hashed::new_unchecked(hash, node)) + } + + fn insert_clone(&mut self, hash: Self::Hash, node: &T) -> bool + where + T: Clone, + { + match self + .raw_entry_mut() + .from_entry_hashed(Hashed::new_unchecked(hash, node)) + { + unordered_set::RawEntryMut::Occupied(_) => false, + unordered_set::RawEntryMut::Vacant(e) => { + e.insert_hashed(Hashed::new_unchecked(hash, node.clone())); + true + } + } + } +} diff --git a/app/buck2_query/src/query/mod.rs b/app/buck2_query/src/query/mod.rs deleted file mode 100644 index 7954a7c8a1c80..0000000000000 --- a/app/buck2_query/src/query/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod buck_types; -pub mod environment; -pub(crate) mod futures_queue_generic; -pub mod syntax; -pub mod traversal; diff --git a/app/buck2_query/src/query/syntax/mod.rs b/app/buck2_query/src/query/syntax.rs similarity index 100% rename from app/buck2_query/src/query/syntax/mod.rs rename to app/buck2_query/src/query/syntax.rs diff --git a/app/buck2_query/src/query/syntax/simple/mod.rs b/app/buck2_query/src/query/syntax/simple.rs similarity index 100% rename from app/buck2_query/src/query/syntax/simple/mod.rs rename to app/buck2_query/src/query/syntax/simple.rs diff --git a/app/buck2_query/src/query/syntax/simple/eval/mod.rs b/app/buck2_query/src/query/syntax/simple/eval.rs similarity index 100% rename from app/buck2_query/src/query/syntax/simple/eval/mod.rs rename to app/buck2_query/src/query/syntax/simple/eval.rs diff --git a/app/buck2_query/src/query/syntax/simple/eval/error.rs b/app/buck2_query/src/query/syntax/simple/eval/error.rs index 7d81c890a2687..938b7054bd5fc 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/error.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/error.rs @@ -11,11 +11,10 @@ use buck2_core::fs::project::ProjectRoot; use buck2_query_parser::spanned::Spanned; -use thiserror::Error; /// While this is a std Error type, we generally don't use it directly. It's instead wrapped in a Spanned and can be converted to a normal error /// with QueryError::convert_error (which will resolve the spans to context messages). -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub enum QueryError { #[error("unknown function `{0}`")] UnknownFunction(String), diff --git a/app/buck2_query/src/query/syntax/simple/eval/evaluator.rs b/app/buck2_query/src/query/syntax/simple/eval/evaluator.rs index 135f062ed1e55..620d5b7e1eae3 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/evaluator.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/evaluator.rs @@ -68,7 +68,7 @@ impl<'e, Env: QueryEnvironment> QueryEvaluator<'e, Env> { Expr::BinaryOpSequence(left, exprs) => { let (left, rights) = futures::future::try_join( self.eval(left), - futures::future::try_join_all(exprs.iter().map(|(op, expr)| async move { + buck2_util::future::try_join_all(exprs.iter().map(|(op, expr)| async move { let value = self.eval(expr).await?; Ok((op, value)) })), @@ -134,7 +134,7 @@ impl<'e, Env: QueryEnvironment> QueryEvaluator<'e, Env> { ) -> QueryResult> { self.eval(expr) .await? - .async_into_map_res(async move |value| { + .async_into_map_res(|value| async move { match value { // A top-level string we treat as a target pattern and resolve it. This allows something like // `buck2 query //lib/...` to resolve to the corresponding targets. diff --git a/app/buck2_query/src/query/syntax/simple/eval/file_set.rs b/app/buck2_query/src/query/syntax/simple/eval/file_set.rs index 188e1e251e7bd..e1c73c513afa4 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/file_set.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/file_set.rs @@ -76,7 +76,7 @@ impl FileSet { pub fn owner( &self, - _env: &dyn QueryEnvironment, + _env: &impl QueryEnvironment, ) -> anyhow::Result> { Err(anyhow::anyhow!(QueryError::FunctionUnimplemented( "owner()" @@ -97,6 +97,18 @@ impl FileSet { pub fn len(&self) -> usize { self.files.len() } + + pub fn contains(&self, item: &FileNode) -> bool { + self.files.contains(item) + } + + pub fn intersect(&self, right: &FileSet) -> anyhow::Result { + self.filter(|file| Ok(right.contains(file))) + } + + pub fn difference(&self, right: &FileSet) -> anyhow::Result { + self.filter(|file| Ok(!right.contains(file))) + } } impl FromIterator for FileSet { diff --git a/app/buck2_query/src/query/syntax/simple/eval/label_indexed.rs b/app/buck2_query/src/query/syntax/simple/eval/label_indexed.rs index fc28640eb843e..c1d9ecf0b6673 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/label_indexed.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/label_indexed.rs @@ -15,52 +15,53 @@ use dupe::Dupe; use starlark_map::ordered_set::OrderedSet; use starlark_map::small_set; use starlark_map::Equivalent; +use starlark_map::Hashed; -use crate::query::environment::LabeledNode; +use crate::query::graph::node::LabeledNode; #[derive(Debug, Clone, Dupe, Allocative)] pub struct LabelIndexed(pub T); impl PartialEq for LabelIndexed { fn eq(&self, other: &Self) -> bool { - self.0.node_ref() == other.0.node_ref() + self.0.node_key() == other.0.node_key() } } impl Eq for LabelIndexed {} impl Hash for LabelIndexed { fn hash(&self, state: &mut H) { - self.0.node_ref().hash(state) + self.0.hashed_node_key().hash().hash(state) } } impl Ord for LabelIndexed where - T::NodeRef: Ord, + T::Key: Ord, { fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.node_ref().cmp(other.0.node_ref()) + self.0.node_key().cmp(other.0.node_key()) } } impl PartialOrd for LabelIndexed where - T::NodeRef: PartialOrd, + T::Key: PartialOrd, { fn partial_cmp(&self, other: &Self) -> Option { - self.0.node_ref().partial_cmp(other.0.node_ref()) + self.0.node_key().partial_cmp(other.0.node_key()) } } -struct LabelIndexer<'a, T: LabeledNode>(&'a T::NodeRef); +struct LabelIndexer<'a, T: LabeledNode>(Hashed<&'a T::Key>); impl<'a, T: LabeledNode> Equivalent> for LabelIndexer<'a, T> { fn equivalent(&self, key: &LabelIndexed) -> bool { - self.0.eq(key.0.node_ref()) + *self.0.key() == key.0.node_key() } } impl<'a, T: LabeledNode> Hash for LabelIndexer<'a, T> { fn hash(&self, state: &mut H) { - self.0.hash(state) + self.0.hash().hash(state) } } @@ -88,12 +89,16 @@ impl LabelIndexedSet { self.nodes.len() } - pub fn get(&self, value: &T::NodeRef) -> Option<&T> { - self.nodes.get(&LabelIndexer(value)).map(|e| &e.0) + pub fn get(&self, value: &T::Key) -> Option<&T> { + self.nodes + .get(&LabelIndexer(Hashed::new(value))) + .map(|e| &e.0) } - pub fn take(&mut self, value: &T::NodeRef) -> Option { - self.nodes.take(&LabelIndexer(value)).map(|e| e.0) + pub fn take(&mut self, value: &T::Key) -> Option { + self.nodes + .take(&LabelIndexer(Hashed::new(value))) + .map(|e| e.0) } pub fn iter(&self) -> Iter { @@ -111,16 +116,20 @@ impl LabelIndexedSet { self.nodes.insert(LabelIndexed(value)) } - pub fn contains(&self, value: &T::NodeRef) -> bool { - self.nodes.contains(&LabelIndexer(value)) + pub fn insert_unique_unchecked(&mut self, value: T) { + self.nodes.insert_unique_unchecked(LabelIndexed(value)); + } + + pub fn contains(&self, value: &T::Key) -> bool { + self.nodes.contains(&LabelIndexer(Hashed::new(value))) } pub fn get_index(&self, index: usize) -> Option<&T> { self.nodes.get_index(index).map(|e| &e.0) } - pub fn get_index_of(&self, value: &T::NodeRef) -> Option { - self.nodes.get_index_of(&LabelIndexer(value)) + pub fn get_index_of(&self, value: &T::Key) -> Option { + self.nodes.get_index_of(&LabelIndexer(Hashed::new(value))) } pub fn last(&self) -> Option<&T> { diff --git a/app/buck2_query/src/query/syntax/simple/eval/literals.rs b/app/buck2_query/src/query/syntax/simple/eval/literals.rs index d8ecffeb7a5c5..f3d7756c0615c 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/literals.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/literals.rs @@ -23,13 +23,12 @@ use crate::query::syntax::simple::functions::QueryLiteralVisitor; pub fn extract_target_literals( functions: &F, query: &str, - result: &mut SmallSet, -) -> anyhow::Result<()> { +) -> anyhow::Result> { let parsed = parse_expr(query)?; - struct LiteralExtractor<'a> { - literals: &'a mut SmallSet, + struct LiteralExtractor { + literals: SmallSet, } - impl QueryLiteralVisitor for LiteralExtractor<'_> { + impl QueryLiteralVisitor for LiteralExtractor { fn target_pattern(&mut self, pattern: &str) -> anyhow::Result<()> { if pattern != QUERY_PERCENT_S_PLACEHOLDER { self.literals.get_or_insert_owned(pattern); @@ -37,9 +36,11 @@ pub fn extract_target_literals( Ok(()) } } - let mut visitor = LiteralExtractor { literals: result }; + let mut visitor = LiteralExtractor { + literals: SmallSet::new(), + }; functions .visit_literals(&mut visitor, &parsed) .into_anyhow(query)?; - Ok(()) + Ok(Vec::from_iter(visitor.literals)) } diff --git a/app/buck2_query/src/query/syntax/simple/eval/multi_query.rs b/app/buck2_query/src/query/syntax/simple/eval/multi_query.rs index 8b986664e9102..2dd1794c4abd3 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/multi_query.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/multi_query.rs @@ -8,11 +8,12 @@ */ //! Implementation of the cli and query_* attr query language. -use buck2_query_parser::placeholder::QUERY_PERCENT_S_PLACEHOLDER; -use futures::stream::FuturesOrdered; -use futures::Future; -use futures::StreamExt; + +use std::iter; + +use dupe::Dupe; use indexmap::IndexMap; +use itertools::Either; use crate::query::environment::QueryTarget; use crate::query::syntax::simple::eval::set::TargetSet; @@ -20,7 +21,7 @@ use crate::query::syntax::simple::eval::values::QueryEvaluationValue; /// Used to represent the results for a "multi-query" (one that contains a "%s" and potentially is applied against multiple literals). pub struct MultiQueryResult( - pub IndexMap>>, + pub IndexMap>>, ); impl MultiQueryResult { @@ -50,31 +51,11 @@ impl MultiQueryResult { } Ok(results) } -} -pub async fn process_multi_query>( - query: &str, - query_args: &[A], - func: F, -) -> MultiQueryResult -where - T: QueryTarget, - Fut: Future>)>, - F: Fn(String, String) -> Fut, -{ - let mut queue: FuturesOrdered<_> = query_args - .iter() - .map(|input| { - let input = input.as_ref(); - let query = query.replace(QUERY_PERCENT_S_PLACEHOLDER, input); - let input = input.to_owned(); - func(input, query) + pub(crate) fn targets(&self) -> impl Iterator> { + self.0.values().flat_map(|r| match r { + Ok(v) => Either::Left(v.targets()), + Err(e) => Either::Right(iter::once(Err(e.dupe()))), }) - .collect(); - - let mut results = IndexMap::new(); - while let Some((query, result)) = queue.next().await { - results.insert(query, result); } - MultiQueryResult(results) } diff --git a/app/buck2_query/src/query/syntax/simple/eval/set.rs b/app/buck2_query/src/query/syntax/simple/eval/set.rs index 8055a97321ca2..84389139853b2 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/set.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/set.rs @@ -11,16 +11,13 @@ use std::fmt; use std::fmt::Display; use allocative::Allocative; -use buck2_query::query::environment::LabeledNode; use display_container::fmt_container; use dupe::IterDupedExt; use fancy_regex::Regex; use fancy_regex::RegexBuilder; use indexmap::IndexSet; -use crate::query::environment::NodeLabel; use crate::query::environment::QueryTarget; -use crate::query::syntax::simple::eval::error::QueryError; use crate::query::syntax::simple::eval::file_set::FileNode; use crate::query::syntax::simple::eval::file_set::FileSet; use crate::query::syntax::simple::eval::label_indexed; @@ -48,6 +45,10 @@ impl TargetSet { self.targets.insert(value) } + pub fn insert_unique_unchecked(&mut self, value: T) { + self.targets.insert_unique_unchecked(value) + } + pub fn is_empty(&self) -> bool { self.targets.len() == 0 } @@ -56,11 +57,14 @@ impl TargetSet { self.targets.len() } - fn filter anyhow::Result>(&self, filter: F) -> anyhow::Result> { + pub(crate) fn filter anyhow::Result>( + &self, + filter: F, + ) -> anyhow::Result> { let mut targets = LabelIndexedSet::new(); for target in self.targets.iter() { if filter(target)? { - targets.insert(target.dupe()); + targets.insert_unique_unchecked(target.dupe()); } } Ok(Self { targets }) @@ -85,14 +89,6 @@ impl TargetSet { Ok(FileSet::new(files)) } - // TODO(cjhopman): Does this even make sense? - // TODO(cjhopman): I think this needs a heap to allocate values - pub fn labels(&self, _attr: &str) -> anyhow::Result<()> { - Err(anyhow::anyhow!(QueryError::FunctionUnimplemented( - "labels()" - ))) - } - pub fn union(&self, right: &TargetSet) -> TargetSet { let mut targets = LabelIndexedSet::new(); for target in self.targets.iter() { @@ -104,8 +100,8 @@ impl TargetSet { Self { targets } } - pub fn iter_names(&self) -> impl Iterator + Clone { - self.targets.iter().map(|e| e.node_ref()) + pub fn iter_names(&self) -> impl Iterator + Clone { + self.targets.iter().map(|e| e.node_key()) } pub fn iter(&self) -> Iter { @@ -117,11 +113,11 @@ impl TargetSet { self.targets.into_iter() } - pub fn contains(&self, item: &T::NodeRef) -> bool { + pub fn contains(&self, item: &T::Key) -> bool { self.targets.contains(item) } - pub fn get(&self, item: &T::NodeRef) -> Option<&T> { + pub fn get(&self, item: &T::Key) -> Option<&T> { self.targets.get(item) } @@ -129,7 +125,7 @@ impl TargetSet { self.targets.get_index(index) } - pub fn get_index_of(&self, item: &T::NodeRef) -> Option { + pub fn get_index_of(&self, item: &T::Key) -> Option { self.targets.get_index_of(item) } @@ -172,78 +168,63 @@ impl FromIterator for TargetSet { /// This contains additional TargetSet functions implemented via the core /// functions on TargetSet itself. -pub trait TargetSetExt { - type T: QueryTarget; - - fn filter anyhow::Result>( - &self, - filter: F, - ) -> anyhow::Result>; - - fn attrfilter( +impl TargetSet { + pub fn attrfilter( &self, attribute: &str, filter: &dyn Fn(&str) -> anyhow::Result, - ) -> anyhow::Result> { + ) -> anyhow::Result> { self.filter(move |node| { node.map_attr(attribute, |val| match val { None => Ok(false), - Some(v) => Self::T::attr_any_matches(v, &filter), + Some(v) => T::attr_any_matches(v, &filter), }) }) } - fn nattrfilter( + pub fn nattrfilter( &self, attribute: &str, filter: &dyn Fn(&str) -> anyhow::Result, - ) -> anyhow::Result> { + ) -> anyhow::Result> { self.filter(move |node| { node.map_attr(attribute, |val| match val { None => Ok(false), - Some(v) => Ok(!Self::T::attr_any_matches(v, &filter)?), + Some(v) => Ok(!T::attr_any_matches(v, &filter)?), }) }) } - fn attrregexfilter(&self, attribute: &str, value: &str) -> anyhow::Result> { + pub fn attrregexfilter(&self, attribute: &str, value: &str) -> anyhow::Result> { let regex = Regex::new(value)?; let filter = move |s: &'_ str| -> anyhow::Result { Ok(regex.is_match(s)?) }; self.attrfilter(attribute, &filter) } /// Filter targets by fully qualified name using regex partial match. - fn filter_name(&self, regex: &str) -> anyhow::Result> { + pub fn filter_name(&self, regex: &str) -> anyhow::Result> { let mut re = RegexBuilder::new(regex); re.delegate_dfa_size_limit(100 << 20); let re = re.build()?; - self.filter(|node| Ok(re.is_match(&node.node_ref().label_for_filter())?)) + self.filter(|node| Ok(re.is_match(&node.label_for_filter())?)) } - fn kind(&self, regex: &str) -> anyhow::Result> { + pub fn kind(&self, regex: &str) -> anyhow::Result> { let re = Regex::new(regex)?; self.filter(|node| Ok(re.is_match(&node.rule_type())?)) } - fn intersect(&self, right: &TargetSet) -> anyhow::Result> { - self.filter(|node| Ok(right.contains(node.node_ref()))) + pub fn intersect(&self, right: &TargetSet) -> anyhow::Result> { + self.filter(|node| Ok(right.contains(node.node_key()))) } - fn difference(&self, right: &TargetSet) -> anyhow::Result> { - self.filter(|node| Ok(!right.contains(node.node_ref()))) - } -} - -impl TargetSetExt for TargetSet { - type T = T; - - fn filter anyhow::Result>(&self, filter: F) -> anyhow::Result> { - TargetSet::filter(self, filter) + pub fn difference(&self, right: &TargetSet) -> anyhow::Result> { + self.filter(|node| Ok(!right.contains(node.node_key()))) } } impl Display for TargetSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt_container(f, "[", "]", self.targets.iter().map(|t| t.node_ref())) + fmt_container(f, "[", "]", self.targets.iter().map(|t| t.node_key())) } } diff --git a/app/buck2_query/src/query/syntax/simple/eval/tests.rs b/app/buck2_query/src/query/syntax/simple/eval/tests.rs index fe80fd6dba4b1..ba2bc1b88440c 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/tests.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/tests.rs @@ -17,38 +17,36 @@ use async_trait::async_trait; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::cell_path::CellPath; use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_query::query::environment::LabeledNode; use buck2_query_parser::parse_expr; use derive_more::Display; use dupe::Dupe; -use serde::Serialize; -use serde::Serializer; -use crate::query::environment::NodeLabel; use crate::query::environment::QueryEnvironment; use crate::query::environment::QueryTarget; +use crate::query::graph::node::LabeledNode; +use crate::query::graph::node::NodeKey; +use crate::query::graph::successors::AsyncChildVisitor; use crate::query::syntax::simple::eval::error::QueryError; use crate::query::syntax::simple::eval::evaluator::QueryEvaluator; use crate::query::syntax::simple::eval::file_set::FileSet; use crate::query::syntax::simple::eval::set::TargetSet; use crate::query::syntax::simple::functions::DefaultQueryFunctionsModule; -use crate::query::traversal::AsyncTraversalDelegate; #[derive(Clone, Hash, PartialEq, Eq, Debug, Display)] struct TargetRef(String); -impl NodeLabel for TargetRef {} +impl NodeKey for TargetRef {} -#[derive(Debug, Display, Serialize)] +#[derive(Debug, Display)] struct TargetAttr(String); #[derive(Debug, Clone, Dupe, Eq, PartialEq)] struct Target {} impl LabeledNode for Target { - type NodeRef = TargetRef; + type Key = TargetRef; - fn node_ref(&self) -> &Self::NodeRef { + fn node_key(&self) -> &Self::Key { unimplemented!() } } @@ -64,20 +62,47 @@ impl QueryTarget for Target { unimplemented!() } - fn buildfile_path(&self) -> &BuildFilePath { + fn name(&self) -> Cow { unimplemented!() } - fn deps<'a>(&'a self) -> Box + Send + 'a> { + fn buildfile_path(&self) -> &BuildFilePath { unimplemented!() } - fn exec_deps<'a>(&'a self) -> Box + Send + 'a> { - unimplemented!() + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator } - fn target_deps<'a>(&'a self) -> Box + Send + 'a> { - unimplemented!() + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator + } + + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator + } + + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator + } + + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator } fn special_attrs_for_each) -> Result<(), E>>( @@ -101,24 +126,15 @@ impl QueryTarget for Target { unimplemented!() } - fn map_attr>) -> R>(&self, _key: &str, _func: F) -> R { + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + _func: F, + ) -> Result<(), E> { unimplemented!() } - fn call_stack(&self) -> Option { - None - } - - fn attr_to_string_alternate(&self, _attr: &Self::Attr<'_>) -> String { - unimplemented!("not needed for tests") - } - - fn attr_serialize( - &self, - _attr: &Self::Attr<'_>, - _serializer: S, - ) -> Result { - unimplemented!("not needed for tests") + fn map_attr>) -> R>(&self, _key: &str, _func: F) -> R { + unimplemented!() } } @@ -149,7 +165,8 @@ impl QueryEnvironment for Env { async fn dfs_postorder( &self, _root: &TargetSet, - _delegate: &mut dyn AsyncTraversalDelegate, + _delegate: impl AsyncChildVisitor, + _visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, ) -> anyhow::Result<()> { unimplemented!() } @@ -157,7 +174,8 @@ impl QueryEnvironment for Env { async fn depth_limited_traversal( &self, _root: &TargetSet, - _delegate: &mut dyn AsyncTraversalDelegate, + _delegate: impl AsyncChildVisitor, + _visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, _depth: u32, ) -> anyhow::Result<()> { unimplemented!() @@ -166,6 +184,13 @@ impl QueryEnvironment for Env { async fn owner(&self, _paths: &FileSet) -> anyhow::Result> { unimplemented!() } + + async fn targets_in_buildfile( + &self, + _paths: &FileSet, + ) -> anyhow::Result> { + unimplemented!() + } } #[tokio::test] diff --git a/app/buck2_query/src/query/syntax/simple/eval/values.rs b/app/buck2_query/src/query/syntax/simple/eval/values.rs index 031241eb2d0c1..f8b82b2c7e877 100644 --- a/app/buck2_query/src/query/syntax/simple/eval/values.rs +++ b/app/buck2_query/src/query/syntax/simple/eval/values.rs @@ -9,8 +9,11 @@ //! Implementation of the cli and query_* attr query language. +use std::iter; + use buck2_query_parser::spanned::Spanned; use gazebo::variants::VariantName; +use itertools::Either; use crate::query::environment::QueryTarget; use crate::query::syntax::simple::eval::error::QueryError; @@ -23,6 +26,16 @@ pub enum QueryEvaluationResult { Multiple(MultiQueryResult), } +impl QueryEvaluationResult { + /// All the targets from all query results. + pub fn targets(&self) -> impl Iterator> { + match self { + QueryEvaluationResult::Single(v) => Either::Left(v.targets()), + QueryEvaluationResult::Multiple(v) => Either::Right(v.targets()), + } + } +} + /// Used as a value in query evaluation, may appear in arguments to functions, results of functions etc. #[derive(Debug, VariantName, Eq, PartialEq)] pub enum QueryValue { @@ -52,13 +65,22 @@ impl QueryEvaluationValue { pub fn try_into_targets(self) -> anyhow::Result> { match self { QueryEvaluationValue::TargetSet(targets) => Ok(targets), - v => { - return Err(QueryError::InvalidType { - expected: "targets", - actual: v.variant_name(), - } - .into()); + v => Err(QueryError::InvalidType { + expected: "targets", + actual: v.variant_name(), + } + .into()), + } + } + + pub(crate) fn targets(&self) -> impl Iterator> { + match self { + QueryEvaluationValue::TargetSet(targets) => Either::Left(targets.iter().map(Ok)), + v => Either::Right(iter::once(Err(QueryError::InvalidType { + expected: "targets", + actual: v.variant_name(), } + .into()))), } } } diff --git a/app/buck2_query/src/query/syntax/simple/functions.rs b/app/buck2_query/src/query/syntax/simple/functions.rs new file mode 100644 index 0000000000000..bfef26a0152db --- /dev/null +++ b/app/buck2_query/src/query/syntax/simple/functions.rs @@ -0,0 +1,845 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt; +use std::fmt::Debug; +use std::marker::PhantomData; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_query_derive::query_module; +use buck2_query_parser::spanned::Spanned; +use buck2_query_parser::BinaryOp; +use buck2_query_parser::Expr; +use gazebo::variants::VariantName; + +use crate::query::environment::QueryEnvironment; +use crate::query::syntax::simple::eval::error::QueryError; +use crate::query::syntax::simple::eval::evaluator::QueryEvaluator; +use crate::query::syntax::simple::eval::file_set::FileSet; +use crate::query::syntax::simple::eval::set::TargetSet; +use crate::query::syntax::simple::eval::values::QueryResult; +use crate::query::syntax::simple::eval::values::QueryValue; +use crate::query::syntax::simple::eval::values::QueryValueSet; +use crate::query::syntax::simple::functions::deps::DepsFunction; +use crate::query::syntax::simple::functions::docs::ModuleDescription; +use crate::query::syntax::simple::functions::helpers::CapturedExpr; +use crate::query::syntax::simple::functions::helpers::QueryArgType; +use crate::query::syntax::simple::functions::helpers::QueryBinaryOp; +use crate::query::syntax::simple::functions::helpers::QueryFunction; + +pub mod deps; +pub mod description; +pub mod docs; +pub mod helpers; + +pub trait QueryLiteralVisitor { + fn target_pattern(&mut self, pattern: &str) -> anyhow::Result<()>; +} + +pub trait HasModuleDescription { + fn describe() -> ModuleDescription; +} + +#[async_trait] +pub trait QueryFunctions: Debug + Send + Sync { + type Env: QueryEnvironment; + + fn get(&self, name: &str) -> Option<&dyn QueryFunction>; + + fn get_op(&self, op: BinaryOp) -> Option<&dyn QueryBinaryOp>; +} + +pub trait QueryFunctionsVisitLiterals: Debug + Send + Sync { + fn visit_literals( + &self, + visitor: &mut dyn QueryLiteralVisitor, + expr: &Spanned, + ) -> QueryResult<()>; +} + +impl QueryFunctionsVisitLiterals for F { + fn visit_literals( + &self, + visitor: &mut dyn QueryLiteralVisitor, + expr: &Spanned, + ) -> QueryResult<()> { + fn visit_literals_recurse( + this: &F, + visitor: &mut dyn QueryLiteralVisitor, + expr: &Expr, + ) -> Result<(), QueryError> { + match expr { + Expr::Function { + function_name, + args, + } => match this.get(function_name) { + Some(func) => { + for (i, arg) in args.iter().enumerate() { + visit_literals_item( + this, + visitor, + arg, + matches!( + func.arg_type(i)?, + QueryArgType::TargetSet + | QueryArgType::Set + | QueryArgType::Value + ), + )?; + } + Ok(()) + } + None => Err(QueryError::UnknownFunction( + (*function_name.fragment()).to_owned(), + )), + }, + Expr::BinaryOpSequence(left, exprs) => { + visit_literals_item(this, visitor, left, true)?; + // All binary ops are on targetsets currently. + for (_, right) in exprs { + visit_literals_item(this, visitor, right, true)?; + } + Ok(()) + } + Expr::Set(args) => { + for arg in args { + visitor.target_pattern(arg)?; + } + Ok(()) + } + Expr::FileSet(_args) => Ok(()), + Expr::String(..) | Expr::Integer(..) => { + panic!( + "This shouldn't be called with literals, they should be handled in the caller" + ) + } + } + } + + fn visit_literals_item( + this: &F, + visitor: &mut dyn QueryLiteralVisitor, + expr: &Spanned, + is_target_expr: bool, + ) -> QueryResult<()> { + expr.map_res(|value| -> Result<(), QueryError> { + match value { + Expr::String(val) => { + if is_target_expr { + visitor.target_pattern(val)?; + } + } + Expr::Integer(..) => { + // ignored + } + _ => visit_literals_recurse(this, visitor, value)?, + } + Ok(()) + }) + } + + visit_literals_item(self, visitor, expr, true) + } +} + +#[derive(Allocative)] +#[allocative(bound = "")] +pub struct DefaultQueryFunctionsModule { + implementation: DefaultQueryFunctions, +} + +impl Debug for DefaultQueryFunctionsModule { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("DefaultQueryFunctionsModule") + .finish_non_exhaustive() + } +} + +impl DefaultQueryFunctionsModule { + pub fn new() -> Self { + Self { + implementation: DefaultQueryFunctions::new(), + } + } +} + +type QueryFuncResult = + std::result::Result::Target>, QueryError>; + +async fn accept_target_set( + env: &Env, + val: QueryValue, +) -> Result, QueryError> { + match val { + QueryValue::TargetSet(x) => Ok(x), + QueryValue::String(literal) => Ok(env.eval_literals(&[&literal]).await?), + _ => Err(QueryError::InvalidType { + expected: "target_set", + actual: val.variant_name(), + }), + } +} + +/// Common query functions +#[query_module(Env)] +impl DefaultQueryFunctionsModule { + /// Computes all dependency paths. + /// + /// The `allpaths(from, to)` function evaluates to the graph formed by paths between the target expressions from and to, following the dependencies between nodes. For example, the value of + /// `buck query "allpaths('//foo:bar', '//foo/bar/lib:baz')"` + /// is the dependency graph rooted at the single target node `//foo:bar`, that includes all target nodes that depend (transitively) on `//foo/bar/lib:baz`. + /// + /// The two arguments to `allpaths()` can themselves be expressions. For example, the command: + /// `buck query "allpaths(kind(java_library, '//...'), '//foo:bar')"` + /// shows all the paths between any java_library in the repository and the target `//foo:bar`. + /// + /// We recommend using `allpaths()` with the `--output-format=dot` parameter to generate a graphviz file that can then be rendered as an image. For example: + /// + /// ```ignore + /// $ buck query "allpaths('//foo:bar', '//foo/bar/lib:baz')" --output-format=dot --output-file=result.dot + /// $ dot -Tpng result.dot -o image.png + /// ``` + /// + /// Graphviz is an open-source graph-visualization software tool. Graphviz uses the dot language to describe graphs. + async fn allpaths( + &self, + evaluator: &QueryEvaluator<'_, Env>, + from: TargetSet, + to: TargetSet, + captured_expr: Option>, + ) -> QueryFuncResult { + Ok(self + .implementation + .allpaths( + evaluator.env(), + evaluator.functions(), + &from, + &to, + captured_expr.as_ref(), + ) + .await? + .into()) + } + + async fn somepath( + &self, + evaluator: &QueryEvaluator<'_, Env>, + from: TargetSet, + to: TargetSet, + captured_expr: Option>, + ) -> QueryFuncResult { + Ok(self + .implementation + .somepath( + evaluator.env(), + evaluator.functions(), + &from, + &to, + captured_expr.as_ref(), + ) + .await? + .into()) + } + + /// The `attrfilter(attribute, value, targets)` operator evaluates the given target expression and filters the resulting build targets to those where the specified attribute contains the specified value. + /// In this context, the term attribute refers to an argument in a build rule, such as name, headers, srcs, or deps. + /// + /// - If the attribute is a single value, say `name`, it is compared to the specified value, and the target is returned if they match. + /// - If the attribute is a list, the target is returned if that list contains the specified value. + /// - If the attribute is a dictionary, the target is returned if the value exists in either the keys or the values of the dictionary. + /// + /// For example: + /// `buck2 query "attrfilter(deps, '//foo:bar', '//...')"` returns the build targets in the repository that depend on `//foo:bar`, or more precisely: those build targets that include `//foo:bar` in their deps argument list. + async fn attrfilter( + &self, + attr: String, + value: String, + targets: TargetSet, + ) -> QueryFuncResult { + Ok(self + .implementation + .attrfilter(&attr, &value, &targets)? + .into()) + } + + async fn nattrfilter( + &self, + attr: String, + value: String, + targets: TargetSet, + ) -> QueryFuncResult { + Ok(self + .implementation + .nattrfilter(&attr, &value, &targets)? + .into()) + } + + async fn attrregexfilter( + &self, + attr: String, + value: String, + targets: TargetSet, + ) -> QueryFuncResult { + Ok(self + .implementation + .attrregexfilter(&attr, &value, &targets)? + .into()) + } + + async fn buildfile(&self, targets: TargetSet) -> QueryFuncResult { + Ok(self.implementation.buildfile(&targets).into()) + } + + async fn rbuildfiles( + &self, + env: &Env, + universe: FileSet, + argset: FileSet, + ) -> QueryFuncResult { + Ok(self + .implementation + .rbuildfiles(env, &universe, &argset) + .await? + .into()) + } + + async fn allbuildfiles( + &self, + env: &Env, + universe: TargetSet, + ) -> QueryFuncResult { + Ok(self + .implementation + .allbuildfiles(env, &universe) + .await? + .into()) + } + + async fn deps( + &self, + evaluator: &QueryEvaluator<'_, Env>, + targets: TargetSet, + depth: Option, + captured_expr: Option>, + ) -> QueryFuncResult { + Ok(self + .implementation + .deps( + evaluator.env(), + evaluator.functions(), + &targets, + depth.map(|v| v as i32), + captured_expr.as_ref(), + ) + .await? + .into()) + } + + /// Filter using regex partial match. + /// Target are matched against their fully qualified name. + /// Files are matched against their repo path like `repo//foo/bar/baz.py`. + async fn filter(&self, regex: String, set: QueryValueSet) -> QueryFuncResult { + match set { + QueryValueSet::TargetSet(targets) => Ok(self + .implementation + .filter_target_set(®ex, &targets)? + .into()), + QueryValueSet::FileSet(files) => { + Ok(self.implementation.filter_file_set(®ex, &files)?.into()) + } + } + } + + async fn inputs(&self, targets: TargetSet) -> QueryFuncResult { + Ok(self.implementation.inputs(&targets)?.into()) + } + + /// The `kind(regex, targets)` operator evaluates the specified target expression, `targets`, and returns the targets where the rule type matches the specified `regex`. + /// The specified pattern can be a regular expression. For example, + /// `buck2 query "kind('java.*', deps('//foo:bar'))"` returns the targets that match the rule type `java.*` (`java_library`, `java_binary`, etc.) in the transitive dependencies of `//foo:bar`. + async fn kind(&self, regex: String, targets: TargetSet) -> QueryFuncResult { + Ok(targets.kind(®ex)?.into()) + } + + /// This function is not implemented, and won't be, because buck2 query core does not support + /// returning both files and targets from a single function. + /// + /// In buck1 it returns targets and files referenced by the given attribute + /// in the given targets. + /// + /// Some discussion in T126638795. + async fn labels(&self, attr: String, targets: TargetSet) -> QueryFuncResult { + self.implementation.labels(&attr, &targets) + } + + /// The `owner(inputfile)` operator returns the targets that own the specified inputfile. + /// In this context, own means that the target has the specified file as an input. You could consider the `owner()` and `inputs()` operators to be inverses of each other. + /// + /// Example: `buck2 query "owner('examples/1.txt')"` returns the targets that owns the file `examples/1.txt`, which could be a value such as `//examples:one`. + /// + /// It is possible for the specified file to have multiple owners, in which case, owner() returns a set of targets. + /// + /// If no owner for the file is found, owner() outputs the message: `No owner was found for ` + async fn owner(&self, env: &Env, files: FileSet) -> QueryFuncResult { + Ok(self.implementation.owner(env, &files).await?.into()) + } + + async fn targets_in_buildfile(&self, env: &Env, files: FileSet) -> QueryFuncResult { + Ok(self + .implementation + .targets_in_buildfile(env, &files) + .await? + .into()) + } + + async fn rdeps( + &self, + evaluator: &QueryEvaluator<'_, Env>, + universe: TargetSet, + targets: TargetSet, + depth: Option, + captured_expr: Option>, + ) -> QueryFuncResult { + Ok(self + .implementation + .rdeps( + evaluator.env(), + evaluator.functions(), + &universe, + &targets, + depth.map(|v| v as i32), + captured_expr.as_ref(), + ) + .await? + .into()) + } + + async fn testsof(&self, env: &Env, targets: TargetSet) -> QueryFuncResult { + Ok(self.implementation.testsof(env, &targets).await?.into()) + } + + // These three functions are intentionally implemented as errors. They are only available within the context + // of a deps functions 3rd parameter expr. When used in that context, the QueryFunctions will be augmented to + // have non-erroring implementations. + + /// A filter function that can be used in the query expression of `deps` query function. + /// Returns the output of deps function for the immediate dependencies of the given targets. Output is equivalent to `deps(, 1)`. + /// + /// Example: + /// `buck2 cquery "deps('//foo:bar', 1, first_order_deps())"`` is equivalent to `buck2 cquery "deps('//foo:bar', 1)"` + async fn first_order_deps(&self) -> QueryFuncResult { + Err(QueryError::NotAvailableInContext("first_order_deps")) + } + + /// A filter function that can be used in the query expression of `deps` query function. + /// Returns the target dependencies of each dependency of the given targets, excluding any configuration, toolchain and execution dependencies (build time dependencies) + /// like compiler used as a part of the build. + /// + /// Example: + /// `buck2 cquery "deps('//foo:bar', 1, target_deps())"`` + async fn target_deps(&self) -> QueryFuncResult { + Err(QueryError::NotAvailableInContext("target_deps")) + } + + /// A filter function that can be used in the query expression of `deps` query function. + /// Returns the output of deps function for execution dependencies (build time dependencies), ex. compiler used as a part of the build. + /// + /// Example: + /// `buck2 cquery "deps('//foo:bar', 1, exec_deps())"`` + async fn exec_deps(&self) -> QueryFuncResult { + Err(QueryError::NotAvailableInContext("exec_deps")) + } + + /// A filter function that can be used in the query expression of `deps` query function. + /// Returns the output of deps function for configuration dependencies (that appear as conditions in selects). + /// + /// Example: + /// `buck2 cquery "deps('//foo:bar', 1, configuration_deps())"`` + async fn configuration_deps(&self) -> QueryFuncResult { + Err(QueryError::NotAvailableInContext("configuration_deps")) + } + + /// A filter function that can be used in the query expression of `deps` query function. + /// Returns the output of deps function for toolchain dependencies. + /// + /// Example: + /// `buck2 cquery "deps('//foo:bar', 1, toolchain_deps())"`` + async fn toolchain_deps(&self) -> QueryFuncResult { + Err(QueryError::NotAvailableInContext("configuration_deps")) + } + /// Computes the set intersection over the given arguments. + /// Can be used with the `^` symbol. This operator is commutative. + /// + /// The parser treats this operator as left-associative and of equal precedence, so we recommend + /// that you use parentheses if you need to ensure a specific order of evaluation. A parenthesized expression + /// resolves to the value of the expression it encloses. + /// + /// Example: + /// `buck2 aquery "deps('//foo:bar') intersect deps('//baz:lib')"` is the same as + /// `buck2 aquery "deps('//foo:bar') ^ deps('//baz:lib')"` + /// Both return the targets that appear in the transitive closure of `//foo:bar` and `//baz:lib`. + #[binary_op(BinaryOp::Intersect)] + async fn intersect( + &self, + env: &Env, + left: QueryValue, + right: QueryValue, + ) -> Result, QueryError> { + self.implementation.intersect(env, left, right).await + } + + /// Computes the arguments that are in argument A but not in argument B. + /// Can be used with the `-` symbol. This operator is NOT commutative. + /// + /// The parser treats this operator as left-associative and of equal precedence, so we recommend + /// that you use parentheses if you need to ensure a specific order of evaluation. A parenthesized expression + /// resolves to the value of the expression it encloses. + /// + /// Example: + /// `buck2 aquery "deps('//foo:bar') except deps('//baz:lib')"` is the same as + /// `buck2 aquery "deps('//foo:bar') - deps('//baz:lib')"` + /// Both return the targets that `//foo:bar` depends on and that `//baz:lib` does NOT depend on. + #[binary_op(BinaryOp::Except)] + async fn except( + &self, + env: &Env, + left: QueryValue, + right: QueryValue, + ) -> Result, QueryError> { + self.implementation.except(env, left, right).await + } + + /// Computes the set union over the given arguments. + /// Can be used with the `+` symbol. This operator is commutative. + /// + /// The parser treats all this operator as left-associative and of equal precedence, so we recommend + /// that you use parentheses if you need to ensure a specific order of evaluation. A parenthesized expression + /// resolves to the value of the expression it encloses. + /// + /// Example: + /// `buck2 aquery "deps('//foo:bar') union deps('//baz:lib')"` is the same as + /// `buck2 aquery "deps('//foo:bar') + deps('//baz:lib')"` + /// Both return the aggregation of the targets that `//foo:bar` and `//baz:lib` depend on. + #[binary_op(BinaryOp::Union)] + async fn union( + &self, + env: &Env, + left: QueryValue, + right: QueryValue, + ) -> Result, QueryError> { + self.implementation.union(env, left, right).await + } +} + +#[derive(Allocative)] +#[allocative(bound = "")] +pub struct DefaultQueryFunctions { + _marker: std::marker::PhantomData, +} + +impl DefaultQueryFunctions { + pub fn new() -> Self { + Self { + _marker: PhantomData, + } + } +} + +impl DefaultQueryFunctions { + pub async fn allpaths( + &self, + env: &Env, + functions: &dyn QueryFunctions, + from: &TargetSet, + to: &TargetSet, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> Result, QueryError> { + Ok(DepsFunction:: { + _marker: PhantomData, + } + .invoke_allpaths(env, functions, from, to, captured_expr) + .await?) + } + + /// Find the shortest path (dependency chain) from one target set to another. + /// + /// First parameter is upstream (for example, final binary), second is some downstream (for example, a library). + /// + /// If there are multiple paths, which one is returned is unspecified. + /// + /// Results are returned in order from up to down. + /// + /// If there's no path, return an empty set. + /// + /// # Example + /// + /// ```text + /// $ buck2 uquery 'somepath(fbcode//buck2:buck2, fbcode//buck2/app/buck2_node:buck2_node)' + /// + /// fbcode//buck2:buck2 + /// fbcode//buck2/app/buck2:buck2-bin + /// fbcode//buck2/app/buck2_analysis:buck2_analysis + /// fbcode//buck2/app/buck2_node:buck2_node + /// ``` + pub async fn somepath( + &self, + env: &Env, + functions: &dyn QueryFunctions, + from: &TargetSet, + to: &TargetSet, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> Result, QueryError> { + Ok(DepsFunction:: { + _marker: PhantomData, + } + .invoke_somepath(env, functions, from, to, captured_expr) + .await?) + } + + pub fn attrfilter( + &self, + attr: &str, + value: &str, + targets: &TargetSet, + ) -> anyhow::Result> { + targets.attrfilter(attr, &|v| Ok(v == value)) + } + + pub fn nattrfilter( + &self, + attr: &str, + value: &str, + targets: &TargetSet, + ) -> anyhow::Result> { + targets.nattrfilter(attr, &|v| Ok(v == value)) + } + + pub fn attrregexfilter( + &self, + attr: &str, + value: &str, + targets: &TargetSet, + ) -> anyhow::Result> { + targets.attrregexfilter(attr, value) + } + + pub fn buildfile(&self, targets: &TargetSet) -> FileSet { + targets.buildfile() + } + + pub async fn allbuildfiles( + &self, + env: &Env, + universe: &TargetSet, + ) -> anyhow::Result { + env.allbuildfiles(universe).await + } + + pub async fn rbuildfiles( + &self, + env: &Env, + universe: &FileSet, + argset: &FileSet, + ) -> anyhow::Result { + env.rbuildfiles(universe, argset).await + } + + pub async fn deps( + &self, + env: &Env, + functions: &dyn QueryFunctions, + targets: &TargetSet, + depth: Option, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> anyhow::Result> { + DepsFunction:: { + _marker: PhantomData, + } + .invoke_deps(env, functions, targets, depth, captured_expr) + .await + } + + /// Filter targets by fully qualified name using regex partial match. + pub fn filter_target_set( + &self, + regex: &str, + targets: &TargetSet, + ) -> anyhow::Result> { + targets.filter_name(regex) + } + + pub fn filter_file_set(&self, regex: &str, files: &FileSet) -> anyhow::Result { + files.filter_name(regex) + } + + pub fn inputs(&self, targets: &TargetSet) -> anyhow::Result { + targets.inputs() + } + + pub fn labels( + &self, + _attr: &str, + _targets: &TargetSet, + ) -> Result, QueryError> { + Err(QueryError::FunctionUnimplemented("labels")) + } + + pub async fn owner( + &self, + env: &Env, + files: &FileSet, + ) -> anyhow::Result> { + env.owner(files).await + } + + pub async fn targets_in_buildfile( + &self, + env: &Env, + paths: &FileSet, + ) -> anyhow::Result> { + env.targets_in_buildfile(paths).await + } + + pub async fn rdeps( + &self, + env: &Env, + functions: &dyn QueryFunctions, + universe: &TargetSet, + targets: &TargetSet, + depth: Option, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> anyhow::Result> { + DepsFunction:: { + _marker: PhantomData, + } + .invoke_rdeps(env, functions, universe, targets, depth, captured_expr) + .await + } + + pub async fn testsof( + &self, + env: &Env, + targets: &TargetSet, + ) -> anyhow::Result> { + env.testsof(targets).await + } + + pub async fn testsof_with_default_target_platform( + &self, + env: &Env, + targets: &TargetSet, + ) -> anyhow::Result>> { + env.testsof_with_default_target_platform(targets).await + } + + pub async fn intersect( + &self, + env: &Env, + left: QueryValue, + right: QueryValue, + ) -> Result, QueryError> { + let left = accept_target_set(env, left).await?; + let right = accept_target_set(env, right).await?; + Ok(QueryValue::TargetSet(left.intersect(&right)?)) + } + + pub async fn except( + &self, + env: &Env, + left: QueryValue, + right: QueryValue, + ) -> Result, QueryError> { + let left = accept_target_set(env, left).await?; + let right = accept_target_set(env, right).await?; + Ok(QueryValue::TargetSet(left.difference(&right)?)) + } + + pub async fn union( + &self, + env: &Env, + left: QueryValue, + right: QueryValue, + ) -> Result, QueryError> { + // If the operations are of the same type, which + join them. + // If one is a string, and the other a FileSet or TargetSet, we can promote the string + match (left, right) { + (QueryValue::TargetSet(l), QueryValue::TargetSet(r)) => { + Ok(QueryValue::TargetSet(l.union(&r))) + } + (QueryValue::String(l), QueryValue::TargetSet(r)) => { + let l = env.eval_literals(&[&l]).await?; + Ok(QueryValue::TargetSet(l.union(&r))) + } + (QueryValue::TargetSet(l), QueryValue::String(r)) => { + let r = env.eval_literals(&[&r]).await?; + Ok(QueryValue::TargetSet(l.union(&r))) + } + (QueryValue::String(l), QueryValue::String(r)) => { + // Important that String + treats both as target literals, since that's what + // buck1 does - we blur the lines between string and targetset + Ok(QueryValue::TargetSet(env.eval_literals(&[&l, &r]).await?)) + } + (QueryValue::FileSet(l), QueryValue::FileSet(r)) => { + Ok(QueryValue::FileSet(l.union(&r))) + } + (QueryValue::String(l), QueryValue::FileSet(r)) => { + let l = env.eval_file_literal(&l).await?; + Ok(QueryValue::FileSet(l.union(&r))) + } + (QueryValue::FileSet(l), QueryValue::String(r)) => { + let r = env.eval_file_literal(&r).await?; + Ok(QueryValue::FileSet(l.union(&r))) + } + (left, right) => Err(QueryError::UnionIncompatibleTypes( + left.variant_name(), + right.variant_name(), + )), + } + } +} + +pub struct AugmentedQueryFunctions<'a, Env: QueryEnvironment> { + inner: &'a dyn QueryFunctions, + extra: Box + 'a>, +} + +impl<'a, Env: QueryEnvironment> Debug for AugmentedQueryFunctions<'a, Env> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AugmentedQueryFunctions") + .finish_non_exhaustive() + } +} + +impl<'a, Env: QueryEnvironment> AugmentedQueryFunctions<'a, Env> { + pub fn augment( + inner: &'a dyn QueryFunctions, + extra: Box + 'a>, + ) -> Self { + Self { inner, extra } + } +} + +impl<'a, Env: QueryEnvironment> QueryFunctions for AugmentedQueryFunctions<'a, Env> { + type Env = Env; + fn get(&self, name: &str) -> Option<&dyn QueryFunction> { + match self.extra.get(name) { + None => self.inner.get(name), + Some(v) => Some(v), + } + } + + fn get_op(&self, op: BinaryOp) -> Option<&dyn QueryBinaryOp> { + match self.extra.get_op(op) { + None => self.inner.get_op(op), + Some(v) => Some(v), + } + } +} diff --git a/app/buck2_query/src/query/syntax/simple/functions/deps.rs b/app/buck2_query/src/query/syntax/simple/functions/deps.rs index ec0a12b3d14e8..4994c8889047c 100644 --- a/app/buck2_query/src/query/syntax/simple/functions/deps.rs +++ b/app/buck2_query/src/query/syntax/simple/functions/deps.rs @@ -63,29 +63,43 @@ impl<'a, Env: QueryEnvironment> DepsContextFunctions<'a, Env> { } Ok(QueryValue::TargetSet(deps)) } + + async fn configuration_deps(&self, env: &Env) -> Result, QueryError> { + let mut deps = TargetSet::new(); + for dep in self.target.configuration_deps() { + deps.insert(env.get_node(dep).await?); + } + Ok(QueryValue::TargetSet(deps)) + } + + async fn toolchain_deps(&self, env: &Env) -> Result, QueryError> { + let mut deps = TargetSet::new(); + for dep in self.target.toolchain_deps() { + deps.insert(env.get_node(dep).await?); + } + Ok(QueryValue::TargetSet(deps)) + } } pub(crate) struct DepsFunction { pub(crate) _marker: PhantomData, } -impl DepsFunction { - pub(crate) async fn invoke_deps( - &self, - env: &Env, - functions: &dyn QueryFunctions, - targets: &TargetSet, - depth: Option, - captured_expr: Option<&CapturedExpr<'_>>, - ) -> anyhow::Result> { - let filter = match captured_expr { - Some(expr) => { - struct Filter<'a, Env: QueryEnvironment> { - inner_env: &'a Env, - functions: &'a dyn QueryFunctions, - expr: &'a CapturedExpr<'a>, - } +struct Filter<'a, Env: QueryEnvironment> { + inner_env: &'a Env, + functions: &'a dyn QueryFunctions, + expr: &'a CapturedExpr<'a>, +} +impl<'a, Env: QueryEnvironment> DepsFunction { + fn make_filter( + &'a self, + env: &'a Env, + functions: &'a dyn QueryFunctions, + captured_expr: Option<&'a CapturedExpr>, + ) -> Option> { + match captured_expr { + Some(expr) => { #[async_trait] impl<'a, T: QueryTarget, Env: QueryEnvironment> TraversalFilter for Filter<'a, Env> { async fn get_children(&self, target: &T) -> anyhow::Result> { @@ -108,19 +122,78 @@ impl DepsFunction { } } - Some(Filter { - inner_env: env, + Some(Filter::<'a, Env> { + inner_env: &env, functions, expr, }) } None => None, - }; + } + } + pub(crate) async fn invoke_deps( + &self, + env: &Env, + functions: &dyn QueryFunctions, + targets: &TargetSet, + depth: Option, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> anyhow::Result> { + let filter = self.make_filter(&env, functions, captured_expr); let filter_ref = filter .as_ref() .map(|v| v as &dyn TraversalFilter); env.deps(targets, depth, filter_ref).await } + + pub(crate) async fn invoke_rdeps( + &self, + env: &Env, + functions: &dyn QueryFunctions, + universe: &TargetSet, + from: &TargetSet, + depth: Option, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> anyhow::Result> { + let filter = self.make_filter(&env, functions, captured_expr); + let filter_ref = filter + .as_ref() + .map(|v| v as &dyn TraversalFilter); + + env.rdeps(universe, from, depth, filter_ref).await + } + + pub(crate) async fn invoke_somepath( + &self, + env: &Env, + functions: &dyn QueryFunctions, + from: &TargetSet, + to: &TargetSet, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> anyhow::Result> { + let filter = self.make_filter(&env, functions, captured_expr); + let filter_ref = filter + .as_ref() + .map(|v| v as &dyn TraversalFilter); + + env.somepath(from, to, filter_ref).await + } + + pub(crate) async fn invoke_allpaths( + &self, + env: &Env, + functions: &dyn QueryFunctions, + from: &TargetSet, + to: &TargetSet, + captured_expr: Option<&CapturedExpr<'_>>, + ) -> anyhow::Result> { + let filter = self.make_filter(&env, functions, captured_expr); + let filter_ref = filter + .as_ref() + .map(|v| v as &dyn TraversalFilter); + + env.allpaths(from, to, filter_ref).await + } } diff --git a/app/buck2_query/src/query/syntax/simple/functions/docs.rs b/app/buck2_query/src/query/syntax/simple/functions/docs.rs index a2ad903f75df1..0117e032ba2b2 100644 --- a/app/buck2_query/src/query/syntax/simple/functions/docs.rs +++ b/app/buck2_query/src/query/syntax/simple/functions/docs.rs @@ -135,11 +135,13 @@ impl QueryEnvironmentDescription { } fn render_arg_type_markdown(v: QueryArgType, options: &MarkdownOptions) -> String { - let mut rendered = if options.include_alt_text { - format!("*[{}]: {}\n", v.repr(), v.short_description(),) - } else { - String::new() + let mut rendered = format!("- *{}*: ", v.repr()); + match (options.include_alt_text, v.short_description()) { + (true, Some(short_description)) => { + rendered.push_str(short_description); + } + _ => {} }; - rendered.push_str(&format!("- *{}*: {}", v.repr(), v.description())); + rendered.push_str(&format!("\n\n {}", v.description())); rendered } diff --git a/app/buck2_query/src/query/syntax/simple/functions/helpers.rs b/app/buck2_query/src/query/syntax/simple/functions/helpers.rs index 3d12c2a1f9a94..290587a4a5a63 100644 --- a/app/buck2_query/src/query/syntax/simple/functions/helpers.rs +++ b/app/buck2_query/src/query/syntax/simple/functions/helpers.rs @@ -58,48 +58,41 @@ impl QueryArgType { } } - pub fn short_description(self) -> &'static str { + pub fn short_description(self) -> Option<&'static str> { match self { - QueryArgType::TargetSet => { - "a target expression, either a literal or the return value of a function" - } - QueryArgType::FileSet => { - "a file expression, either a literal or the return value of a function" - } - QueryArgType::Set => { - "a file or target expression, either a literal or the return value of a function" - } + QueryArgType::TargetSet => Some("either a literal or the return value of a function"), + QueryArgType::FileSet => Some("either a literal or the return value of a function"), QueryArgType::Expression => { - "a valid query expression, evaluated in a function-specific context" + Some("a valid query expression, evaluated in a function-specific context") } - _ => self.description(), + _ => None, } } pub fn description(self) -> &'static str { match self { QueryArgType::String => { - "a string. For example `non_quoted_string` or `\"quoted string\"`" + "For example, `non_quoted_string` or `\"quoted string\"`." } QueryArgType::Integer => { - "an integer. query integers must be positive and fit in a `u32`" + "Must be positive and fit in `u32`." } QueryArgType::TargetSet => { - "A target set expression. This could be a literal build target (`\"cell//some:target\"`), a literal build target pattern \ - (`\"cell//package:\"` or `\"cell//recursive/...\"`) or the result of another function that returns a target set. For \ - queries in cli commands (like `buck2 query`), literals can be relative to the current working dir (like `some:target` \ + "This could be a literal build target (`\"cell//some:target\"`) or a pattern \ + (`\"cell//package:\"` or `\"cell//recursive/...\"`) or the result of another function that returns a target expression. For \ + queries in CLI commands (like `buck2 query`), literals can be relative to the current working dir (like `some:target` \ or `...`)." } QueryArgType::FileSet => { - "A file set expression. This could be a file literal like `path/to/a.file` or the return value of a function that \ + "This could be a file literal like `path/to/a.file` or the return value of a function that \ returns files (for example, the `buildfile()` function)." } QueryArgType::Set => { - "A file set or target set expression. This could be a literal like `path/to/a.file` or `\"cell//some:target\"`, + "Either a *file expression* or *target expression*. This could be a literal like `path/to/a.file` or `\"cell//some:target\"`, or the return value of a function that returns files or targets." } QueryArgType::Expression => { - "A query expression. This is used for functions that capture an expression and evaluate it in another context. \ + "This is used for functions that capture an expression and evaluate it in another context. \ For example, the `deps()` function can accept an expression that it uses to find the children of a node to \ customize the deps traversal." } diff --git a/app/buck2_query/src/query/syntax/simple/functions/mod.rs b/app/buck2_query/src/query/syntax/simple/functions/mod.rs deleted file mode 100644 index 616d5e3f416eb..0000000000000 --- a/app/buck2_query/src/query/syntax/simple/functions/mod.rs +++ /dev/null @@ -1,679 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt; -use std::fmt::Debug; -use std::marker::PhantomData; - -use allocative::Allocative; -use async_trait::async_trait; -use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_query_derive::query_module; -use buck2_query_parser::spanned::Spanned; -use buck2_query_parser::BinaryOp; -use buck2_query_parser::Expr; -use gazebo::variants::VariantName; - -use crate::query::environment::QueryEnvironment; -use crate::query::syntax::simple::eval::error::QueryError; -use crate::query::syntax::simple::eval::evaluator::QueryEvaluator; -use crate::query::syntax::simple::eval::file_set::FileSet; -use crate::query::syntax::simple::eval::set::TargetSet; -use crate::query::syntax::simple::eval::set::TargetSetExt; -use crate::query::syntax::simple::eval::values::QueryResult; -use crate::query::syntax::simple::eval::values::QueryValue; -use crate::query::syntax::simple::eval::values::QueryValueSet; -use crate::query::syntax::simple::functions::deps::DepsFunction; -use crate::query::syntax::simple::functions::docs::ModuleDescription; -use crate::query::syntax::simple::functions::helpers::CapturedExpr; -use crate::query::syntax::simple::functions::helpers::QueryArgType; -use crate::query::syntax::simple::functions::helpers::QueryBinaryOp; -use crate::query::syntax::simple::functions::helpers::QueryFunction; - -pub mod deps; -pub mod description; -pub mod docs; -pub mod helpers; - -pub trait QueryLiteralVisitor { - fn target_pattern(&mut self, pattern: &str) -> anyhow::Result<()>; -} - -pub trait HasModuleDescription { - fn describe() -> ModuleDescription; -} - -#[async_trait] -pub trait QueryFunctions: Debug + Send + Sync { - type Env: QueryEnvironment; - - fn get(&self, name: &str) -> Option<&dyn QueryFunction>; - - fn get_op(&self, op: BinaryOp) -> Option<&dyn QueryBinaryOp>; -} - -pub trait QueryFunctionsVisitLiterals: Debug + Send + Sync { - fn visit_literals( - &self, - visitor: &mut dyn QueryLiteralVisitor, - expr: &Spanned, - ) -> QueryResult<()>; -} - -impl QueryFunctionsVisitLiterals for F { - fn visit_literals( - &self, - visitor: &mut dyn QueryLiteralVisitor, - expr: &Spanned, - ) -> QueryResult<()> { - fn visit_literals_recurse( - this: &F, - visitor: &mut dyn QueryLiteralVisitor, - expr: &Expr, - ) -> Result<(), QueryError> { - match expr { - Expr::Function { - function_name, - args, - } => match this.get(function_name) { - Some(func) => { - for (i, arg) in args.iter().enumerate() { - visit_literals_item( - this, - visitor, - arg, - matches!( - func.arg_type(i)?, - QueryArgType::TargetSet - | QueryArgType::Set - | QueryArgType::Value - ), - )?; - } - Ok(()) - } - None => Err(QueryError::UnknownFunction( - (*function_name.fragment()).to_owned(), - )), - }, - Expr::BinaryOpSequence(left, exprs) => { - visit_literals_item(this, visitor, left, true)?; - // All binary ops are on targetsets currently. - for (_, right) in exprs { - visit_literals_item(this, visitor, right, true)?; - } - Ok(()) - } - Expr::Set(args) => { - for arg in args { - visitor.target_pattern(arg)?; - } - Ok(()) - } - Expr::FileSet(_args) => Ok(()), - Expr::String(..) | Expr::Integer(..) => { - panic!( - "This shouldn't be called with literals, they should be handled in the caller" - ) - } - } - } - - fn visit_literals_item( - this: &F, - visitor: &mut dyn QueryLiteralVisitor, - expr: &Spanned, - is_target_expr: bool, - ) -> QueryResult<()> { - expr.map_res(|value| -> Result<(), QueryError> { - match value { - Expr::String(val) => { - if is_target_expr { - visitor.target_pattern(val)?; - } - } - Expr::Integer(..) => { - // ignored - } - _ => visit_literals_recurse(this, visitor, value)?, - } - Ok(()) - }) - } - - visit_literals_item(self, visitor, expr, true) - } -} - -#[derive(Allocative)] -#[allocative(bound = "")] -pub struct DefaultQueryFunctionsModule { - implementation: DefaultQueryFunctions, -} - -impl Debug for DefaultQueryFunctionsModule { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("DefaultQueryFunctionsModule") - .finish_non_exhaustive() - } -} - -impl DefaultQueryFunctionsModule { - pub fn new() -> Self { - Self { - implementation: DefaultQueryFunctions::new(), - } - } -} - -type QueryFuncResult = - std::result::Result::Target>, QueryError>; - -async fn accept_target_set( - env: &Env, - val: QueryValue, -) -> Result, QueryError> { - match val { - QueryValue::TargetSet(x) => Ok(x), - QueryValue::String(literal) => Ok(env.eval_literals(&[&literal]).await?), - _ => Err(QueryError::InvalidType { - expected: "target_set", - actual: val.variant_name(), - }), - } -} - -/// Common query functions -#[query_module(Env)] -impl DefaultQueryFunctionsModule { - /// Computes all dependency paths. - /// - /// The `allpaths(from, to)` function evaluates to the graph formed by paths between the target expressions from and to, following the dependencies between nodes. For example, the value of - /// `buck query "allpaths('//foo:bar', '//foo/bar/lib:baz')"` - /// is the dependency graph rooted at the single target node `//foo:bar`, that includes all target nodes that depend (transitively) on `//foo/bar/lib:baz`. - /// - /// The two arguments to `allpaths()` can themselves be expressions. For example, the command: - /// `buck query "allpaths(kind(java_library, '//...'), '//foo:bar')"` - /// shows all the paths between any java_library in the repository and the target `//foo:bar`. - /// - /// We recommend using `allpaths()` with the `--output-format=dot` parameter to generate a graphviz file that can then be rendered as an image. For example: - /// - /// ```ignore - /// $ buck query "allpaths('//foo:bar', '//foo/bar/lib:baz')" --output-format=dot --output-file=result.dot - /// $ dot -Tpng result.dot -o image.png - /// ``` - /// - /// Graphviz is an open-source graph-visualization software tool. Graphviz uses the dot language to describe graphs. - async fn allpaths( - &self, - env: &Env, - from: TargetSet, - to: TargetSet, - ) -> QueryFuncResult { - Ok(self.implementation.allpaths(env, &from, &to).await?.into()) - } - - async fn somepath( - &self, - env: &Env, - from: TargetSet, - to: TargetSet, - ) -> QueryFuncResult { - Ok(self.implementation.somepath(env, &from, &to).await?.into()) - } - - async fn attrfilter( - &self, - attr: String, - value: String, - targets: TargetSet, - ) -> QueryFuncResult { - Ok(self - .implementation - .attrfilter(&attr, &value, &targets)? - .into()) - } - - async fn nattrfilter( - &self, - attr: String, - value: String, - targets: TargetSet, - ) -> QueryFuncResult { - Ok(self - .implementation - .nattrfilter(&attr, &value, &targets)? - .into()) - } - - async fn attrregexfilter( - &self, - attr: String, - value: String, - targets: TargetSet, - ) -> QueryFuncResult { - Ok(self - .implementation - .attrregexfilter(&attr, &value, &targets)? - .into()) - } - - async fn buildfile(&self, targets: TargetSet) -> QueryFuncResult { - Ok(self.implementation.buildfile(&targets).into()) - } - - async fn rbuildfiles( - &self, - env: &Env, - universe: FileSet, - argset: FileSet, - ) -> QueryFuncResult { - Ok(self - .implementation - .rbuildfiles(env, &universe, &argset) - .await? - .into()) - } - - async fn allbuildfiles( - &self, - env: &Env, - universe: TargetSet, - ) -> QueryFuncResult { - Ok(self - .implementation - .allbuildfiles(env, &universe) - .await? - .into()) - } - - async fn deps( - &self, - evaluator: &QueryEvaluator<'_, Env>, - targets: TargetSet, - depth: Option, - captured_expr: Option>, - ) -> QueryFuncResult { - Ok(self - .implementation - .deps( - evaluator.env(), - evaluator.functions(), - &targets, - depth.map(|v| v as i32), - captured_expr.as_ref(), - ) - .await? - .into()) - } - - /// Filter using regex partial match. - /// Target are matched against their fully qualified name. - /// Files are matched against their repo path like `repo//foo/bar/baz.py`. - async fn filter(&self, regex: String, set: QueryValueSet) -> QueryFuncResult { - match set { - QueryValueSet::TargetSet(targets) => Ok(self - .implementation - .filter_target_set(®ex, &targets)? - .into()), - QueryValueSet::FileSet(files) => { - Ok(self.implementation.filter_file_set(®ex, &files)?.into()) - } - } - } - - async fn inputs(&self, targets: TargetSet) -> QueryFuncResult { - Ok(self.implementation.inputs(&targets)?.into()) - } - - async fn kind(&self, regex: String, targets: TargetSet) -> QueryFuncResult { - Ok(targets.kind(®ex)?.into()) - } - - async fn labels(&self, attr: String, targets: TargetSet) -> QueryFuncResult { - self.implementation.labels(&attr, &targets) - } - - async fn owner(&self, env: &Env, files: FileSet) -> QueryFuncResult { - Ok(self.implementation.owner(env, &files).await?.into()) - } - - async fn rdeps( - &self, - env: &Env, - universe: TargetSet, - targets: TargetSet, - depth: Option, - ) -> QueryFuncResult { - Ok(self - .implementation - .rdeps(env, &universe, &targets, depth.map(|v| v as i32)) - .await? - .into()) - } - - async fn testsof(&self, env: &Env, targets: TargetSet) -> QueryFuncResult { - Ok(self.implementation.testsof(env, &targets).await?.into()) - } - - // These three functions are intentionally implemented as errors. They are only available within the context - // of a deps functions 3rd parameter expr. When used in that context, the QueryFunctions will be augmented to - // have non-erroring implementations. - async fn first_order_deps(&self) -> QueryFuncResult { - self.implementation.first_order_deps() - } - async fn target_deps(&self) -> QueryFuncResult { - Err(QueryError::NotAvailableInContext("target_deps")) - } - async fn exec_deps(&self) -> QueryFuncResult { - Err(QueryError::NotAvailableInContext("exec_deps")) - } - - #[binary_op(BinaryOp::Intersect)] - async fn intersect( - &self, - env: &Env, - left: QueryValue, - right: QueryValue, - ) -> Result, QueryError> { - self.implementation.intersect(env, left, right).await - } - - #[binary_op(BinaryOp::Except)] - async fn except( - &self, - env: &Env, - left: QueryValue, - right: QueryValue, - ) -> Result, QueryError> { - self.implementation.except(env, left, right).await - } - - #[binary_op(BinaryOp::Union)] - async fn union( - &self, - env: &Env, - left: QueryValue, - right: QueryValue, - ) -> Result, QueryError> { - self.implementation.union(env, left, right).await - } -} - -#[derive(Allocative)] -#[allocative(bound = "")] -pub struct DefaultQueryFunctions { - _marker: std::marker::PhantomData, -} - -impl DefaultQueryFunctions { - pub fn new() -> Self { - Self { - _marker: PhantomData, - } - } -} - -impl DefaultQueryFunctions { - pub async fn allpaths( - &self, - env: &Env, - from: &TargetSet, - to: &TargetSet, - ) -> Result, QueryError> { - Ok(env.allpaths(from, to).await?) - } - - pub async fn somepath( - &self, - env: &Env, - from: &TargetSet, - to: &TargetSet, - ) -> Result, QueryError> { - Ok(env.somepath(from, to).await?) - } - - pub fn attrfilter( - &self, - attr: &str, - value: &str, - targets: &TargetSet, - ) -> anyhow::Result> { - targets.attrfilter(attr, &|v| Ok(v == value)) - } - - pub fn nattrfilter( - &self, - attr: &str, - value: &str, - targets: &TargetSet, - ) -> anyhow::Result> { - targets.nattrfilter(attr, &|v| Ok(v == value)) - } - - pub fn attrregexfilter( - &self, - attr: &str, - value: &str, - targets: &TargetSet, - ) -> anyhow::Result> { - targets.attrregexfilter(attr, value) - } - - pub fn buildfile(&self, targets: &TargetSet) -> FileSet { - targets.buildfile() - } - - pub async fn allbuildfiles( - &self, - env: &Env, - universe: &TargetSet, - ) -> anyhow::Result { - env.allbuildfiles(universe).await - } - - pub async fn rbuildfiles( - &self, - env: &Env, - universe: &FileSet, - argset: &FileSet, - ) -> anyhow::Result { - env.rbuildfiles(universe, argset).await - } - - pub async fn deps( - &self, - env: &Env, - functions: &dyn QueryFunctions, - targets: &TargetSet, - depth: Option, - captured_expr: Option<&CapturedExpr<'_>>, - ) -> anyhow::Result> { - DepsFunction:: { - _marker: PhantomData, - } - .invoke_deps(env, functions, targets, depth, captured_expr) - .await - } - - /// Filter targets by fully qualified name using regex partial match. - pub fn filter_target_set( - &self, - regex: &str, - targets: &TargetSet, - ) -> anyhow::Result> { - targets.filter_name(regex) - } - - pub fn filter_file_set(&self, regex: &str, files: &FileSet) -> anyhow::Result { - files.filter_name(regex) - } - - pub fn inputs(&self, targets: &TargetSet) -> anyhow::Result { - targets.inputs() - } - - pub fn labels( - &self, - _attr: &str, - _targets: &TargetSet, - ) -> Result, QueryError> { - Err(QueryError::FunctionUnimplemented("labels")) - } - - pub async fn owner( - &self, - env: &Env, - files: &FileSet, - ) -> anyhow::Result> { - env.owner(files).await - } - - pub async fn rdeps( - &self, - env: &Env, - universe: &TargetSet, - targets: &TargetSet, - depth: Option, - ) -> anyhow::Result> { - env.rdeps(universe, targets, depth).await - } - - pub async fn testsof( - &self, - env: &Env, - targets: &TargetSet, - ) -> anyhow::Result> { - env.testsof(targets).await - } - - pub async fn testsof_with_default_target_platform( - &self, - env: &Env, - targets: &TargetSet, - ) -> anyhow::Result>> { - env.testsof_with_default_target_platform(targets).await - } - - // These three functions are intentionally implemented as errors. They are only available within the context - // of a deps functions 3rd parameter expr. When used in that context, the QueryFunctions will be augmented to - // have non-erroring implementations. - pub fn first_order_deps(&self) -> QueryFuncResult { - Err(QueryError::NotAvailableInContext("first_order_deps")) - } - pub fn target_deps(&self) -> QueryFuncResult { - Err(QueryError::NotAvailableInContext("target_deps")) - } - pub fn exec_deps(&self) -> QueryFuncResult { - Err(QueryError::NotAvailableInContext("exec_deps")) - } - - pub async fn intersect( - &self, - env: &Env, - left: QueryValue, - right: QueryValue, - ) -> Result, QueryError> { - let left = accept_target_set(env, left).await?; - let right = accept_target_set(env, right).await?; - Ok(QueryValue::TargetSet(left.intersect(&right)?)) - } - - pub async fn except( - &self, - env: &Env, - left: QueryValue, - right: QueryValue, - ) -> Result, QueryError> { - let left = accept_target_set(env, left).await?; - let right = accept_target_set(env, right).await?; - Ok(QueryValue::TargetSet(left.difference(&right)?)) - } - - pub async fn union( - &self, - env: &Env, - left: QueryValue, - right: QueryValue, - ) -> Result, QueryError> { - // If the operations are of the same type, which + join them. - // If one is a string, and the other a FileSet or TargetSet, we can promote the string - match (left, right) { - (QueryValue::TargetSet(l), QueryValue::TargetSet(r)) => { - Ok(QueryValue::TargetSet(l.union(&r))) - } - (QueryValue::String(l), QueryValue::TargetSet(r)) => { - let l = env.eval_literals(&[&l]).await?; - Ok(QueryValue::TargetSet(l.union(&r))) - } - (QueryValue::TargetSet(l), QueryValue::String(r)) => { - let r = env.eval_literals(&[&r]).await?; - Ok(QueryValue::TargetSet(l.union(&r))) - } - (QueryValue::String(l), QueryValue::String(r)) => { - // Important that String + treats both as target literals, since that's what - // buck1 does - we blur the lines between string and targetset - Ok(QueryValue::TargetSet(env.eval_literals(&[&l, &r]).await?)) - } - (QueryValue::FileSet(l), QueryValue::FileSet(r)) => { - Ok(QueryValue::FileSet(l.union(&r))) - } - (QueryValue::String(l), QueryValue::FileSet(r)) => { - let l = env.eval_file_literal(&l).await?; - Ok(QueryValue::FileSet(l.union(&r))) - } - (QueryValue::FileSet(l), QueryValue::String(r)) => { - let r = env.eval_file_literal(&r).await?; - Ok(QueryValue::FileSet(l.union(&r))) - } - (left, right) => Err(QueryError::UnionIncompatibleTypes( - left.variant_name(), - right.variant_name(), - )), - } - } -} - -pub struct AugmentedQueryFunctions<'a, Env: QueryEnvironment> { - inner: &'a dyn QueryFunctions, - extra: Box + 'a>, -} - -impl<'a, Env: QueryEnvironment> Debug for AugmentedQueryFunctions<'a, Env> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AugmentedQueryFunctions") - .finish_non_exhaustive() - } -} - -impl<'a, Env: QueryEnvironment> AugmentedQueryFunctions<'a, Env> { - pub fn augment( - inner: &'a dyn QueryFunctions, - extra: Box + 'a>, - ) -> Self { - Self { inner, extra } - } -} - -impl<'a, Env: QueryEnvironment> QueryFunctions for AugmentedQueryFunctions<'a, Env> { - type Env = Env; - fn get(&self, name: &str) -> Option<&dyn QueryFunction> { - match self.extra.get(name) { - None => self.inner.get(name), - Some(v) => Some(v), - } - } - - fn get_op(&self, op: BinaryOp) -> Option<&dyn QueryBinaryOp> { - match self.extra.get_op(op) { - None => self.inner.get_op(op), - Some(v) => Some(v), - } - } -} diff --git a/app/buck2_query/src/query/traversal.rs b/app/buck2_query/src/query/traversal.rs index bffb971c70a52..12fa5727c003c 100644 --- a/app/buck2_query/src/query/traversal.rs +++ b/app/buck2_query/src/query/traversal.rs @@ -11,66 +11,59 @@ use std::collections::HashMap; use std::collections::HashSet; use async_trait::async_trait; +use futures::stream::FuturesOrdered; use futures::StreamExt; +use starlark_map::StarlarkHasherBuilder; -use crate::query::environment::LabeledNode; -use crate::query::futures_queue_generic::FuturesQueue; -use crate::query::syntax::simple::eval::label_indexed::LabelIndexedSet; +use crate::query::graph::graph::Graph; +use crate::query::graph::node::LabeledNode; +use crate::query::graph::successors::AsyncChildVisitor; pub trait ChildVisitor: Send { - fn visit(&mut self, node: T::NodeRef) -> anyhow::Result<()>; + fn visit(&mut self, node: &T::Key) -> anyhow::Result<()>; } impl ChildVisitor for F where - F: FnMut(T::NodeRef) -> anyhow::Result<()>, + F: FnMut(&T::Key) -> anyhow::Result<()>, F: Send, { - fn visit(&mut self, node: T::NodeRef) -> anyhow::Result<()> { + fn visit(&mut self, node: &T::Key) -> anyhow::Result<()> { self(node) } } -/// The TraversalDelegate determines how to traverse the graph (via -/// for_each_child) and then handles doing the actual processing (via -/// visit). Different traversals may call `visit()` at different times (ex. dfs_postorder -/// calls it after all children have been visited) -#[async_trait] -pub trait AsyncTraversalDelegate: Send + Sync { - /// visit is called once for each node. When it is called is traversal-dependent. - fn visit(&mut self, target: T) -> anyhow::Result<()>; - - /// for_each_child should apply the provided function to each child of the node. This may be called multiple times in some traversals. - async fn for_each_child( - &mut self, - target: &T, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()>; -} - pub trait NodeLookup { // TODO(cjhopman): Maybe this should be `&mut self` since we only need the one reference to it. - fn get(&self, label: &T::NodeRef) -> anyhow::Result; + fn get(&self, label: &T::Key) -> anyhow::Result; } #[async_trait] pub trait AsyncNodeLookup: Send + Sync { - async fn get(&self, label: &T::NodeRef) -> anyhow::Result; + async fn get(&self, label: &T::Key) -> anyhow::Result; } -/// Implements a completely unordered graph traversal that visits nodes in a random order. When -/// traversing the graph (potentially) requires work to produce each node (like processing build files) -/// this unordered traversal will parallelize the work efficiently. -pub async fn async_unordered_traversal< - 'a, - T: LabeledNode, - RootIter: IntoIterator, ->( - nodes: &dyn AsyncNodeLookup, - root: RootIter, - delegate: &mut dyn AsyncTraversalDelegate, -) -> anyhow::Result<()> { - async_traversal_common(nodes, root, delegate, None, false).await +/// Node lookup when node key is the same as the node. +pub struct NodeLookupId; + +impl> NodeLookup for NodeLookupId { + fn get(&self, key: &T::Key) -> anyhow::Result { + Ok(key.dupe()) + } +} + +#[async_trait] +impl> AsyncNodeLookup for NodeLookupId { + async fn get(&self, key: &T::Key) -> anyhow::Result { + Ok(key.dupe()) + } +} + +#[async_trait] +impl<'a, T: LabeledNode, A: AsyncNodeLookup> AsyncNodeLookup for &'a A { + async fn get(&self, label: &T::Key) -> anyhow::Result { + (*self).get(label).await + } } /// Implements a depth-first postorder traversal. A node will be visited only after all of its @@ -81,13 +74,13 @@ pub async fn async_unordered_traversal< // TODO(cjhopman): Figure out how to implement this traversal in a way that has good performance // in both cases. pub async fn async_fast_depth_first_postorder_traversal< - 'a, T: LabeledNode, - RootIter: IntoIterator, + RootIter: IntoIterator, >( - nodes: &(dyn NodeLookup + Send + Sync), + nodes: &impl NodeLookup, root: RootIter, - delegate: &mut dyn AsyncTraversalDelegate, + successors: impl AsyncChildVisitor, + mut visit: impl FnMut(T) -> anyhow::Result<()>, ) -> anyhow::Result<()> { // This implementation simply performs a dfs. We maintain a work stack here. // When visiting a node, we first add an item to the work stack to call @@ -97,16 +90,9 @@ pub async fn async_fast_depth_first_postorder_traversal< // it will still be added. When popping the visit, if the node had been // visited, it's ignored. This ensures that a node's children are all // visited before we do PostVisit for that node. - #[derive(Hash, Eq, PartialEq)] enum WorkItem { PostVisit(T), - Visit(T::NodeRef), - } - - #[derive(Default)] - struct State { - visited: HashSet, - work: Vec>, + Visit(T::Key), } // TODO(cjhopman): There's a couple of things that could be improved about this. @@ -114,53 +100,32 @@ pub async fn async_fast_depth_first_postorder_traversal< // couldn't figure out quite a good way to do that in rust. I think it would // mean changing the delegate's for_each_children to return an iterator, // but idk. - impl State { - fn new() -> Self { - Self { - visited: HashSet::new(), - work: Vec::new(), - } - } - fn push(&mut self, target: T::NodeRef) { - if self.visited.contains(&target) { - return; - } - - self.work.push(WorkItem::Visit(target)); - } - - fn pop(&mut self) -> Option> { - self.work.pop() - } - } + let mut visited: HashSet = HashSet::default(); + let mut work: Vec> = root.into_iter().map(|t| WorkItem::Visit(t)).collect(); - let mut state = State::new(); - - for target in root { - state.push(target.clone()); - } - - while let Some(curr) = state.pop() { + while let Some(curr) = work.pop() { match curr { WorkItem::Visit(target) => { - if state.visited.contains(&target) { + if visited.contains(&target) { continue; } let node = nodes.get(&target)?; - state.visited.insert(target); - state.work.push(WorkItem::PostVisit(node.dupe())); - - delegate - .for_each_child(&node, &mut |child| { - state.push(child); + visited.insert(target); + work.push(WorkItem::PostVisit(node.dupe())); + + successors + .for_each_child(&node, &mut |child: &T::Key| { + if !visited.contains(child) { + work.push(WorkItem::Visit(child.clone())); + } Ok(()) }) .await?; } WorkItem::PostVisit(target) => { - delegate.visit(target)?; + visit(target)?; } } } @@ -168,41 +133,35 @@ pub async fn async_fast_depth_first_postorder_traversal< Ok(()) } -async fn async_traversal_common< +pub async fn async_depth_limited_traversal< 'a, - T: LabeledNode, - RootIter: IntoIterator, + T: LabeledNode + 'static, + RootIter: IntoIterator, >( - nodes: &dyn AsyncNodeLookup, + nodes: &impl AsyncNodeLookup, root: RootIter, - delegate: &mut dyn AsyncTraversalDelegate, - // `None` means no max depth. - max_depth: Option, - ordered: bool, + successors: impl AsyncChildVisitor, + mut visit: impl FnMut(T) -> anyhow::Result<()>, + max_depth: u32, ) -> anyhow::Result<()> { - let mut visited = HashMap::new(); - let mut push = |queue: &mut FuturesQueue<_>, - target: T::NodeRef, - parent: Option, - depth: u32| { - if visited.contains_key(&target) { - return; - } - visited.insert(target.clone(), parent); - queue.push(async move { - let result = nodes.get(&target).await; - (target, depth, result) - }) - }; - - let mut queue = if ordered { - FuturesQueue::new_ordered() - } else { - FuturesQueue::new_unordered() - }; + let mut visited: HashMap<_, _, StarlarkHasherBuilder> = HashMap::default(); + let mut push = + |queue: &mut FuturesOrdered<_>, target: &T::Key, parent: Option, depth: u32| { + if visited.contains_key(target) { + return; + } + visited.insert(target.clone(), parent); + let target = target.clone(); + queue.push_back(async move { + let result = nodes.get(&target).await; + (target, depth, result) + }) + }; + + let mut queue = FuturesOrdered::new(); for target in root { - push(&mut queue, target.clone(), None, 0); + push(&mut queue, target, None, 0); } // TODO(cjhopman): FuturesOrdered/Unordered interacts poorly with tokio cooperative scheduling @@ -211,17 +170,17 @@ async fn async_traversal_common< while let Some((target, depth, node)) = tokio::task::unconstrained(queue.next()).await { let result: anyhow::Result<_> = try { let node = node?; - if Some(depth) != max_depth { + if depth != max_depth { let depth = depth + 1; - delegate - .for_each_child(&node, &mut |child| { + successors + .for_each_child(&node, &mut |child: &T::Key| { push(&mut queue, child, Some(target.clone()), depth); Ok(()) }) .await?; } - delegate.visit(node)?; + visit(node)?; }; if let Err(mut e) = result { @@ -237,108 +196,38 @@ async fn async_traversal_common< Ok(()) } -pub async fn async_depth_limited_traversal< - 'a, - T: LabeledNode, - RootIter: IntoIterator, ->( - nodes: &dyn AsyncNodeLookup, - root: RootIter, - delegate: &mut dyn AsyncTraversalDelegate, - max_depth: u32, -) -> anyhow::Result<()> { - async_traversal_common(nodes, root, delegate, Some(max_depth), true).await -} - /// Implements a depth-first postorder traversal. A node will be visited only after all of its /// dependencies have been visited. // TODO(cjhopman): Accept a generic iterator for the roots. We need to iterate over it twice and it's only used with this specific iterator so it was easier to not be generic. pub async fn async_depth_first_postorder_traversal< 'a, T: LabeledNode, - Iter: IntoIterator + Clone, + Iter: IntoIterator + Clone, >( - nodes: &dyn AsyncNodeLookup, + nodes: &impl AsyncNodeLookup, root: Iter, - delegate: &mut dyn AsyncTraversalDelegate, + successors: impl AsyncChildVisitor, + mut visit: impl FnMut(T) -> anyhow::Result<()>, ) -> anyhow::Result<()> { - // We first do an unordered graph traversal to collect all nodes. The unordered traversal efficiently - // uses resources when we need to process build files. - // We don't cache the results of the for_each_child iterators, so that is called multiple times. Potentially it would be more performant to avoid that if an expensive filter/operation is involved. - struct UnorderedDelegate<'a, T: LabeledNode> { - delegate: &'a mut dyn AsyncTraversalDelegate, - nodes: LabelIndexedSet, - } - - #[async_trait] - impl AsyncTraversalDelegate for UnorderedDelegate<'_, T> { - /// visit is called once for each node. When it is called is traversal-dependent. - fn visit(&mut self, target: T) -> anyhow::Result<()> { - self.nodes.insert(target); - Ok(()) - } - - /// for_each_child should apply the provided function to each child of the node. This may be called multiple times in some traversals. - async fn for_each_child( - &mut self, - target: &T, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - self.delegate.for_each_child(target, func).await - } - } - - let mut unordered_delegate = UnorderedDelegate { - delegate, - nodes: LabelIndexedSet::new(), - }; + let graph = Graph::build(nodes, root.clone().into_iter().cloned(), successors).await?; - async_unordered_traversal(nodes, root.clone(), &mut unordered_delegate).await?; - - struct Nodes { - nodes: LabelIndexedSet, - } - - impl NodeLookup for Nodes { - fn get(&self, label: &T::NodeRef) -> anyhow::Result { - Ok(self - .nodes - .get(label) - .unwrap_or_else(|| { - panic!( - "Should've failed in first traversal if there's a missing node (missing `{}`).", - label - ) - }) - .dupe()) - } - } - - let nodes = Nodes { - nodes: unordered_delegate.nodes, - }; - - async_fast_depth_first_postorder_traversal(&nodes, root, delegate).await?; - - Ok(()) + graph.depth_first_postorder_traversal(root.into_iter().cloned(), |node| visit(node.dupe())) } #[cfg(test)] mod tests { use std::borrow::Cow; - use std::collections::HashMap; use buck2_core::build_file_path::BuildFilePath; use buck2_core::cells::cell_path::CellPath; use derive_more::Display; use dupe::Dupe; + use dupe::IterDupedExt; use gazebo::prelude::*; - use serde::Serialize; - use serde::Serializer; use super::*; - use crate::query::environment::NodeLabel; use crate::query::environment::QueryTarget; + use crate::query::graph::node::NodeKey; use crate::query::syntax::simple::eval::set::TargetSet; #[derive(Debug, Clone)] @@ -350,15 +239,15 @@ mod tests { #[derive(Debug, Clone, Dupe, Hash, Display, PartialEq, Eq, PartialOrd, Ord)] struct Ref(i64); - impl NodeLabel for Ref {} + impl NodeKey for Ref {} - #[derive(Debug, Display, Serialize)] + #[derive(Debug, Display)] struct Attr(String); impl LabeledNode for Node { - type NodeRef = Ref; + type Key = Ref; - fn node_ref(&self) -> &Self::NodeRef { + fn node_key(&self) -> &Self::Key { &self.0 } } @@ -369,12 +258,15 @@ mod tests { fn rule_type(&self) -> Cow { unimplemented!() } + fn name(&self) -> Cow { + unimplemented!() + } fn buildfile_path(&self) -> &BuildFilePath { unimplemented!() } - fn deps<'a>(&'a self) -> Box + Send + 'a> { - Box::new(self.1.iter()) + fn deps<'a>(&'a self) -> impl Iterator + Send + 'a { + self.1.iter() } fn special_attrs_for_each) -> Result<(), E>>( @@ -398,6 +290,13 @@ mod tests { unimplemented!() } + fn defined_attrs_for_each) -> Result<(), E>>( + &self, + _func: F, + ) -> Result<(), E> { + unimplemented!() + } + fn map_attr>) -> R>(&self, _key: &str, _func: F) -> R { unimplemented!() } @@ -409,68 +308,73 @@ mod tests { unimplemented!() } - fn exec_deps<'a>(&'a self) -> Box + Send + 'a> { - unimplemented!() + fn exec_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator } - fn target_deps<'a>(&'a self) -> Box + Send + 'a> { - unimplemented!() + fn target_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator } - fn call_stack(&self) -> Option { - None + fn configuration_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator } - fn attr_to_string_alternate(&self, _attr: &Self::Attr<'_>) -> String { - unimplemented!("not needed for tests") - } - - fn attr_serialize( - &self, - _attr: &Self::Attr<'_>, - _serializer: S, - ) -> Result { - unimplemented!("not needed for tests") + fn toolchain_deps<'a>(&'a self) -> impl Iterator + Send + 'a { + let _iterator: Box + Send + 'a>; + unimplemented!(); + #[allow(unreachable_code)] + _iterator } } struct Graph(HashMap); impl Graph { - fn collecting_delegate<'a>( - &self, - results: &'a mut Vec, - ) -> impl AsyncTraversalDelegate + 'a { - struct Delegate<'a> { - results: &'a mut Vec, - } - - #[async_trait] - impl<'a> AsyncTraversalDelegate for Delegate<'a> { - fn visit(&mut self, target: Node) -> anyhow::Result<()> { - self.results.push(target.0.dupe()); - Ok(()) - } + fn child_visitor<'a>(&self) -> impl AsyncChildVisitor + 'a { + struct ChildVisitorImpl; + impl AsyncChildVisitor for ChildVisitorImpl { async fn for_each_child( - &mut self, + &self, target: &Node, - func: &mut dyn ChildVisitor, + mut func: impl ChildVisitor, ) -> anyhow::Result<()> { for child in &target.1 { - func.visit(child.dupe())?; + func.visit(child)?; } Ok(()) } } - Delegate { results } + ChildVisitorImpl + } + + fn async_node_lookup(&self) -> impl AsyncNodeLookup + '_ { + struct Lookup<'a>(&'a Graph); + + #[async_trait] + impl<'a> AsyncNodeLookup for Lookup<'a> { + async fn get(&self, label: &Ref) -> anyhow::Result { + self.0.get(label) + } + } + + Lookup(self) } } - #[async_trait] - impl AsyncNodeLookup for Graph { - async fn get(&self, label: &Ref) -> anyhow::Result { + impl NodeLookup for Graph { + fn get(&self, label: &Ref) -> anyhow::Result { self.0 .get(label) .ok_or_else(|| anyhow::anyhow!("missing node")) @@ -487,7 +391,7 @@ mod tests { } #[tokio::test] - async fn test_postorder() -> anyhow::Result<()> { + async fn test_async_depth_first_postorder_traversal() -> anyhow::Result<()> { let graph = make_graph(&[ (0, &[1, 2]), (1, &[2, 3, 4]), @@ -496,13 +400,21 @@ mod tests { (4, &[]), ])?; let mut targets = TargetSet::new(); - targets.insert(graph.get(&Ref(0)).await?); + targets.insert(graph.get(&Ref(0))?); let mut results = Vec::new(); { - let mut delegate = graph.collecting_delegate(&mut results); - async_depth_first_postorder_traversal(&graph, targets.iter_names(), &mut delegate) - .await?; + let child_visitor = graph.child_visitor(); + async_depth_first_postorder_traversal( + &graph.async_node_lookup(), + targets.iter_names(), + child_visitor, + |n| { + results.push(n.0); + Ok(()) + }, + ) + .await?; } assert_eq!(results, vec![Ref(4), Ref(3), Ref(2), Ref(1), Ref(0)]); @@ -520,22 +432,72 @@ mod tests { (4, &[]), ])?; let mut targets = TargetSet::new(); - targets.insert(graph.get(&Ref(0)).await?); + targets.insert(graph.get(&Ref(0))?); let mut results0 = Vec::new(); { - let mut delegate = graph.collecting_delegate(&mut results0); - async_depth_limited_traversal(&graph, targets.iter_names(), &mut delegate, 0).await?; + let delegate = graph.child_visitor(); + async_depth_limited_traversal( + &graph.async_node_lookup(), + targets.iter_names(), + delegate, + |n| { + results0.push(n.0); + Ok(()) + }, + 0, + ) + .await?; } assert_eq!(results0, vec![Ref(0)]); let mut results1 = Vec::new(); { - let mut delegate = graph.collecting_delegate(&mut results1); - async_depth_limited_traversal(&graph, targets.iter_names(), &mut delegate, 1).await?; + let delegate = graph.child_visitor(); + async_depth_limited_traversal( + &graph.async_node_lookup(), + targets.iter_names(), + delegate, + |n| { + results1.push(n.0); + Ok(()) + }, + 1, + ) + .await?; } assert_eq!(results1, vec![Ref(0), Ref(1), Ref(2)]); Ok(()) } + + #[tokio::test] + async fn test_async_fast_depth_first_postorder_traversal() -> anyhow::Result<()> { + let graph = make_graph(&[ + (0, &[1, 2]), + (1, &[2, 3, 4]), + (2, &[3, 4]), + (3, &[4]), + (4, &[]), + ])?; + let mut targets = TargetSet::new(); + targets.insert(graph.get(&Ref(0))?); + + let mut results = Vec::new(); + { + async_fast_depth_first_postorder_traversal( + &graph, + targets.iter_names().duped(), + graph.child_visitor(), + |n| { + results.push(n.0); + Ok(()) + }, + ) + .await?; + } + assert_eq!(results, vec![Ref(4), Ref(3), Ref(2), Ref(1), Ref(0)]); + + Ok(()) + } } diff --git a/app/buck2_query_derive/BUCK b/app/buck2_query_derive/BUCK index e07b0b58e3837..1e67cc70bd994 100644 --- a/app/buck2_query_derive/BUCK +++ b/app/buck2_query_derive/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/app/buck2_query_derive/Cargo.toml b/app/buck2_query_derive/Cargo.toml index 3b7720e274e5e..2996fd2202e2a 100644 --- a/app/buck2_query_derive/Cargo.toml +++ b/app/buck2_query_derive/Cargo.toml @@ -2,8 +2,9 @@ authors = ["Facebook"] description = "Proc macros for Buck's buck_query crate" edition = "2021" -license = "MIT OR Apache-2.0" +license = { workspace = true } name = "buck2_query_derive" +repository = { workspace = true } version = "0.4.1" [lib] @@ -12,13 +13,6 @@ proc-macro = true [dependencies] itertools = { workspace = true } proc-macro2 = { workspace = true } -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } textwrap = { workspace = true } - -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_query_derive/src/codegen.rs b/app/buck2_query_derive/src/codegen.rs index cccc59d2dc385..09e3156641763 100644 --- a/app/buck2_query_derive/src/codegen.rs +++ b/app/buck2_query_derive/src/codegen.rs @@ -206,11 +206,13 @@ fn gen_for_method(parsed: &Parsed, method: &Method) -> syn::Result Some(#func_ty::ref_cast(self) as &dyn QueryBinaryOp<#env_ident>) )); - let method_def = quote_spanned!(method.name.span() => + let struct_def: syn::ItemStruct = syn::parse_quote_spanned! { method.name.span() => #[derive(RefCast)] #[repr(transparent)] - struct #func_ty #impl_generics #where_clause(#self_ty); + struct #func_ty #impl_generics (#self_ty) #where_clause; + }; + let impl_def: syn::ItemImpl = syn::parse_quote_spanned! { method.name.span() => #[async_trait] impl #impl_generics QueryBinaryOp<#env_ident> for #func_ty #ty_generics #where_clause { fn name(&self) -> &'static str { #func_ident_str } @@ -228,6 +230,11 @@ fn gen_for_method(parsed: &Parsed, method: &Method) -> syn::Result + #struct_def + #impl_def ); Ok(MethodCodegen { @@ -244,11 +251,13 @@ fn gen_for_method(parsed: &Parsed, method: &Method) -> syn::Result Some(#func_ty::ref_cast(self) as &dyn QueryFunction<#env_ident>) )); - let method_def = quote_spanned!(method.name.span() => + let struct_def: syn::ItemStruct = syn::parse_quote_spanned! { method.name.span() => #[derive(RefCast)] #[repr(transparent)] - struct #func_ty #impl_generics #where_clause(#self_ty); + struct #func_ty #impl_generics (#self_ty) #where_clause; + }; + let impl_ref: syn::ItemImpl = syn::parse_quote_spanned! { method.name.span() => #[async_trait] impl #impl_generics QueryFunction<#env_ident> for #func_ty #ty_generics #where_clause { fn name(&self) -> &'static str { stringify!(#func_ident) } @@ -275,6 +284,11 @@ fn gen_for_method(parsed: &Parsed, method: &Method) -> syn::Result + #struct_def + #impl_ref ); Ok(MethodCodegen { diff --git a/app/buck2_query_derive/src/lib.rs b/app/buck2_query_derive/src/lib.rs index 7d2da0b0cf2f7..6b0e7657e1f8d 100644 --- a/app/buck2_query_derive/src/lib.rs +++ b/app/buck2_query_derive/src/lib.rs @@ -7,9 +7,7 @@ * of this source tree. */ -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] +#![feature(error_generic_member_access)] #![feature(try_blocks)] #[allow(unused_extern_crates)] // proc_macro is very special diff --git a/app/buck2_query_derive/src/parse.rs b/app/buck2_query_derive/src/parse.rs index 9d0e9ff159b72..3bab4b4b9f77b 100644 --- a/app/buck2_query_derive/src/parse.rs +++ b/app/buck2_query_derive/src/parse.rs @@ -288,7 +288,7 @@ impl syn::parse::Parse for QueryModuleArgs { } #[cfg(test)] -mod test { +mod tests { use quote::quote; use syn::parse_quote; use syn::Generics; @@ -350,19 +350,19 @@ mod test { assert_eq!(3, module.methods.len()); - let method = module.methods.get(0).unwrap(); + let method = module.methods.first().unwrap(); assert_eq!("buildfile", method.name.to_string()); let args = &method.args; assert_eq!(1, args.len()); let expected: Type = parse_quote!(TargetSet); - assert_eq!(&expected, &args.get(0).unwrap().ty); + assert_eq!(&expected, &args.first().unwrap().ty); let method = module.methods.get(1).unwrap(); assert_eq!("deps", method.name.to_string()); let args = &method.args; assert_eq!(4, args.len()); let expected: Type = parse_quote!(&QueryEvaluator<'_, Env>); - assert_eq!(&expected, &args.get(0).unwrap().ty); + assert_eq!(&expected, &args.first().unwrap().ty); let expected: Type = parse_quote!(TargetSet); assert_eq!(&expected, &args.get(1).unwrap().ty); let expected: Type = parse_quote!(Option); @@ -375,7 +375,7 @@ mod test { let args = &method.args; assert_eq!(2, args.len()); let expected: Type = parse_quote!(String); - assert_eq!(&expected, &args.get(0).unwrap().ty); + assert_eq!(&expected, &args.first().unwrap().ty); let expected: Type = parse_quote!(TargetSet); assert_eq!(&expected, &args.get(1).unwrap().ty); } diff --git a/app/buck2_query_impls/BUCK b/app/buck2_query_impls/BUCK index a897a71b46406..32f3a29eecf83 100644 --- a/app/buck2_query_impls/BUCK +++ b/app/buck2_query_impls/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -21,7 +20,6 @@ rust_library( "fbsource//third-party/rust:indexmap", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:ref-cast", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tracing", "//buck2/allocative/allocative:allocative", @@ -29,16 +27,17 @@ rust_library( "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_query:buck2_query", "//buck2/app/buck2_query_parser:buck2_query_parser", + "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", - "//buck2/shed/provider:provider", "//buck2/starlark-rust/starlark:starlark", ], ) diff --git a/app/buck2_query_impls/Cargo.toml b/app/buck2_query_impls/Cargo.toml index 69caa192942c6..63bcd60cd97d2 100644 --- a/app/buck2_query_impls/Cargo.toml +++ b/app/buck2_query_impls/Cargo.toml @@ -1,22 +1,22 @@ [package] authors = ["Facebook"] +description = "uquery, cquery, aquery and analysis query implementations for Buck2" edition = "2021" -license = "MIT OR Apache-2.0" +license = { workspace = true } name = "buck2_query_impls" +repository = { workspace = true } version = "0.0.0" -description = "uquery, cquery, aquery and analysis query implementations for Buck2" [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -futures = { workspace = true } dashmap = { workspace = true } derive_more = { workspace = true } either = { workspace = true } +futures = { workspace = true } indexmap = { workspace = true } itertools = { workspace = true } ref-cast = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } @@ -24,19 +24,20 @@ allocative = { workspace = true } dice = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } -more_futures = { workspace = true } -provider = { workspace = true } starlark = { workspace = true } buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } +buck2_futures = { workspace = true } buck2_interpreter = { workspace = true } buck2_node = { workspace = true } buck2_query = { workspace = true } buck2_query_parser = { workspace = true } +buck2_util = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/app/buck2_query_impls/src/analysis.rs b/app/buck2_query_impls/src/analysis.rs new file mode 100644 index 0000000000000..8c38f1e0903ac --- /dev/null +++ b/app/buck2_query_impls/src/analysis.rs @@ -0,0 +1,13 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod configured_graph; +pub(crate) mod environment; +pub(crate) mod eval; +pub(crate) mod evaluator; diff --git a/app/buck2_query_impls/src/analysis/configured_graph.rs b/app/buck2_query_impls/src/analysis/configured_graph.rs index ecdd281707ad3..c2aad9e7033e9 100644 --- a/app/buck2_query_impls/src/analysis/configured_graph.rs +++ b/app/buck2_query_impls/src/analysis/configured_graph.rs @@ -14,8 +14,8 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; use buck2_artifact::artifact::artifact_type::Artifact; -use buck2_common::result::SharedResult; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_futures::cancellation::CancellationContext; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use buck2_node::nodes::configured_ref::ConfiguredGraphNodeRef; @@ -23,33 +23,33 @@ use buck2_query::query::syntax::simple::eval::set::TargetSet; use derive_more::Display; use dice::DiceComputations; use dice::Key; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; use dupe::IterDupedExt; use dupe::OptionDupedExt; use futures::FutureExt; use indexmap::IndexMap; -use more_futures::cancellation::CancellationContext; use crate::analysis::environment::get_from_template_placeholder_info; use crate::analysis::environment::ConfiguredGraphQueryEnvironmentDelegate; -pub struct AnalysisDiceQueryDelegate<'c> { - pub ctx: &'c DiceComputations, +pub(crate) struct AnalysisDiceQueryDelegate<'c, 'd> { + pub(crate) ctx: &'c LinearRecomputeDiceComputations<'d>, } -impl<'c> AnalysisDiceQueryDelegate<'c> { - pub(crate) fn ctx(&self) -> &DiceComputations { - self.ctx +impl AnalysisDiceQueryDelegate<'_, '_> { + pub(crate) fn ctx<'d>(&'d self) -> DiceComputations<'d> { + self.ctx.get() } } -pub struct AnalysisConfiguredGraphQueryDelegate<'a> { - pub dice_query_delegate: Arc>, - pub resolved_literals: HashMap, +pub(crate) struct AnalysisConfiguredGraphQueryDelegate<'a, 'd> { + pub(crate) dice_query_delegate: Arc>, + pub(crate) resolved_literals: HashMap, } #[async_trait] -impl<'a> ConfiguredGraphQueryEnvironmentDelegate for AnalysisConfiguredGraphQueryDelegate<'a> { +impl ConfiguredGraphQueryEnvironmentDelegate for AnalysisConfiguredGraphQueryDelegate<'_, '_> { fn eval_literal(&self, literal: &str) -> anyhow::Result { self.resolved_literals .get(literal) @@ -63,7 +63,7 @@ impl<'a> ConfiguredGraphQueryEnvironmentDelegate for AnalysisConfiguredGraphQuer targets: TargetSet, ) -> anyhow::Result> { #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "template_placeholder_info_query({})", template_name)] + #[display("template_placeholder_info_query({})", template_name)] struct TemplatePlaceholderInfoQueryKey { template_name: &'static str, // Use `ConfiguredTargetLabel` instead of `ConfiguredGraphNodeRef` here because `ConfiguredGraphNodeRef` @@ -74,28 +74,48 @@ impl<'a> ConfiguredGraphQueryEnvironmentDelegate for AnalysisConfiguredGraphQuer #[async_trait] impl Key for TemplatePlaceholderInfoQueryKey { - type Value = SharedResult>>; + type Value = buck2_error::Result>>; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let (targets, label_to_artifact) = futures::future::try_join( - futures::future::try_join_all(self.targets.iter().map(|target| { - ctx.get_configured_target_node(target) - .map(|res| res?.require_compatible()) - })), - get_from_template_placeholder_info( - ctx, - self.template_name, - self.targets.iter().duped(), - ), - ) - .await?; - - let targets: TargetSet<_> = - targets.into_iter().map(ConfiguredGraphNodeRef).collect(); + let (targets, label_to_artifact) = { + ctx.try_compute2( + |ctx| { + async move { + ctx.try_compute_join(self.targets.iter(), |ctx, target| { + async move { + ctx.get_configured_target_node(target) + .await? + .require_compatible() + } + .boxed() + }) + .await + } + .boxed() + }, + |ctx| { + async move { + get_from_template_placeholder_info( + ctx, + self.template_name, + self.targets.iter().duped(), + ) + .await + } + .boxed() + }, + ) + .await? + }; + + let targets: TargetSet<_> = targets + .into_iter() + .map(ConfiguredGraphNodeRef::new) + .collect(); let targets = find_target_nodes(targets, label_to_artifact)?; Ok(Arc::new(targets)) } @@ -147,8 +167,8 @@ fn find_target_nodes( if result.len() == label_to_artifact.len() { return Ok(result); } - for dep in target.0.target_deps() { - let dep = ConfiguredGraphNodeRef(dep.dupe()); + for dep in target.target_deps() { + let dep = ConfiguredGraphNodeRef::new(dep.dupe()); if seen.insert(dep.dupe()) { queue.push_back(dep); } diff --git a/app/buck2_query_impls/src/analysis/environment.rs b/app/buck2_query_impls/src/analysis/environment.rs index 7ce007403f124..5940a8909b5ac 100644 --- a/app/buck2_query_impls/src/analysis/environment.rs +++ b/app/buck2_query_impls/src/analysis/environment.rs @@ -19,12 +19,8 @@ use async_trait::async_trait; use buck2_artifact::artifact::artifact_type::Artifact; use buck2_artifact::artifact::artifact_type::OutputArtifact; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; -use buck2_build_api::artifact_groups::deferred::DeferredTransitiveSetData; -use buck2_build_api::artifact_groups::deferred::TransitiveSetKey; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::artifact_groups::ResolvedArtifactGroup; -use buck2_build_api::deferred::calculation::DeferredCalculation; -use buck2_build_api::deferred::types::DeferredValueReady; use buck2_build_api::interpreter::rule_defs::artifact_tagging::ArtifactTag; use buck2_build_api::interpreter::rule_defs::cmd_args::value_as::ValueAsCommandLineLike; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; @@ -38,11 +34,15 @@ use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersName; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured_node_ref::ConfiguredTargetNodeRefNode; +use buck2_node::nodes::configured_node_ref::ConfiguredTargetNodeRefNodeDeps; use buck2_node::nodes::configured_ref::ConfiguredGraphNodeRef; -use buck2_node::nodes::configured_ref::ConfiguredGraphNodeRefLookup; use buck2_node::query::query_functions::CONFIGURED_GRAPH_QUERY_FUNCTIONS; -use buck2_query::query::environment::LabeledNode; +use buck2_query::query::environment::deps; use buck2_query::query::environment::QueryEnvironment; +use buck2_query::query::environment::TraversalFilter; +use buck2_query::query::graph::dfs::dfs_postorder; +use buck2_query::query::graph::successors::AsyncChildVisitor; use buck2_query::query::syntax::simple::eval::error::QueryError; use buck2_query::query::syntax::simple::eval::file_set::FileSet; use buck2_query::query::syntax::simple::eval::set::TargetSet; @@ -53,15 +53,17 @@ use buck2_query::query::syntax::simple::functions::DefaultQueryFunctionsModule; use buck2_query::query::syntax::simple::functions::QueryFunctions; use buck2_query::query::traversal::async_depth_limited_traversal; use buck2_query::query::traversal::async_fast_depth_first_postorder_traversal; -use buck2_query::query::traversal::AsyncTraversalDelegate; +use buck2_query::query::traversal::NodeLookupId; use buck2_query::query_module; use buck2_query_parser::BinaryOp; use dice::DiceComputations; use dupe::Dupe; +use dupe::IterDupedExt; +use futures::FutureExt; use indexmap::IndexMap; -use thiserror::Error; +use starlark::values::UnpackValue; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum AnalysisQueryError { #[error("file literals aren't supported in query attributes (got `{0}`)")] FileLiteralsNotAllowed(String), @@ -72,7 +74,7 @@ enum AnalysisQueryError { } #[async_trait] -pub trait ConfiguredGraphQueryEnvironmentDelegate: Send + Sync { +pub(crate) trait ConfiguredGraphQueryEnvironmentDelegate: Send + Sync { fn eval_literal(&self, literal: &str) -> anyhow::Result; async fn get_targets_from_template_placeholder_info( @@ -82,7 +84,7 @@ pub trait ConfiguredGraphQueryEnvironmentDelegate: Send + Sync { ) -> anyhow::Result>; } -pub struct ConfiguredGraphQueryEnvironment<'a> { +pub(crate) struct ConfiguredGraphQueryEnvironment<'a> { delegate: &'a dyn ConfiguredGraphQueryEnvironmentDelegate, } @@ -119,11 +121,11 @@ impl<'a> ConfiguredGraphFunctions<'a> { } impl<'a> ConfiguredGraphQueryEnvironment<'a> { - pub fn new(delegate: &'a dyn ConfiguredGraphQueryEnvironmentDelegate) -> Self { + pub(crate) fn new(delegate: &'a dyn ConfiguredGraphQueryEnvironmentDelegate) -> Self { Self { delegate } } - pub fn functions() -> impl QueryFunctions> { + pub(crate) fn functions() -> impl QueryFunctions> { struct Functions<'a> { defaults: DefaultQueryFunctionsModule>, extra_functions: ConfiguredGraphFunctions<'a>, @@ -202,7 +204,9 @@ impl<'a> QueryEnvironment for ConfiguredGraphQueryEnvironment<'a> { async fn eval_literals(&self, literal: &[&str]) -> anyhow::Result> { let mut result = TargetSet::new(); for lit in literal { - result.insert(ConfiguredGraphNodeRef(self.delegate.eval_literal(lit)?)); + result.insert(ConfiguredGraphNodeRef::new( + self.delegate.eval_literal(lit)?, + )); } Ok(result) } @@ -214,12 +218,14 @@ impl<'a> QueryEnvironment for ConfiguredGraphQueryEnvironment<'a> { async fn dfs_postorder( &self, root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, + delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, ) -> anyhow::Result<()> { async_fast_depth_first_postorder_traversal( - &ConfiguredGraphNodeRefLookup, - root.iter().map(LabeledNode::node_ref), + &NodeLookupId, + root.iter().duped(), delegate, + visit, ) .await } @@ -227,27 +233,50 @@ impl<'a> QueryEnvironment for ConfiguredGraphQueryEnvironment<'a> { async fn depth_limited_traversal( &self, root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, + delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, depth: u32, ) -> anyhow::Result<()> { - async_depth_limited_traversal(&ConfiguredGraphNodeRefLookup, root.iter(), delegate, depth) - .await + async_depth_limited_traversal(&NodeLookupId, root.iter(), delegate, visit, depth).await } async fn owner(&self, _paths: &FileSet) -> anyhow::Result> { Err(QueryError::FunctionUnimplemented("owner").into()) } -} -async fn dice_lookup_transitive_set( - ctx: &DiceComputations, - key: TransitiveSetKey, -) -> anyhow::Result> { - ctx.compute_deferred_data(&key).await + async fn targets_in_buildfile( + &self, + _paths: &FileSet, + ) -> anyhow::Result> { + Err(QueryError::FunctionUnimplemented("targets_in_buildfile").into()) + } + + async fn deps( + &self, + targets: &TargetSet, + depth: Option, + filter: Option<&dyn TraversalFilter>, + ) -> anyhow::Result> { + if depth.is_none() && filter.is_none() { + // TODO(nga): fast lookup with depth too. + let mut deps: TargetSet = TargetSet::new(); + dfs_postorder::( + targets.iter().map(|n| ConfiguredTargetNodeRefNode::new(n)), + ConfiguredTargetNodeRefNodeDeps, + |target| { + deps.insert(ConfiguredGraphNodeRef::new(target.to_node())); + Ok(()) + }, + )?; + Ok(deps) + } else { + deps(self, targets, depth, filter).await + } + } } async fn get_template_info_provider_artifacts( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, configured_label: &ConfiguredTargetLabel, template_name: &str, ) -> anyhow::Result> { @@ -299,7 +328,7 @@ async fn get_template_info_provider_artifacts( } pub(crate) async fn get_from_template_placeholder_info<'x>( - ctx: &'x DiceComputations, + ctx: &'x mut DiceComputations<'_>, template_name: &'static str, targets: impl IntoIterator, ) -> anyhow::Result> { @@ -322,15 +351,20 @@ pub(crate) async fn get_from_template_placeholder_info<'x>( // This will contain the ArtifactGroups we encounter during our traversal (so only artifacts and top-level tset nodes). // Artifacts are put here to keep them in the correct order in the output, tsets are top-level tset nodes that we need // to traverse. - let artifacts = futures::future::try_join_all(targets.into_iter().map(|target| async move { - let artifacts = get_template_info_provider_artifacts(ctx, &target, template_name).await?; - anyhow::Ok( - artifacts - .into_iter() - .map(move |artifact| (target.dupe(), artifact)), - ) - })) - .await?; + let artifacts = ctx + .try_compute_join(targets, |ctx, target| { + async move { + let artifacts = + get_template_info_provider_artifacts(ctx, &target, template_name).await?; + anyhow::Ok( + artifacts + .into_iter() + .map(move |artifact| (target.dupe(), artifact)), + ) + } + .boxed() + }) + .await?; let mut artifacts: VecDeque<_> = artifacts.into_iter().flatten().collect(); // This will contain the TransitiveSetProjectionKey we encounter as top-level nodes and we will also put in TransitiveSetProjectionKey @@ -357,14 +391,14 @@ pub(crate) async fn get_from_template_placeholder_info<'x>( Ok(()) }; - match artifact.resolved()? { + match artifact.resolved_artifact(ctx).await? { ResolvedArtifactGroup::Artifact(artifact) => { - handle_artifact(&mut label_to_artifact, artifact)?; + handle_artifact(&mut label_to_artifact, &artifact)?; } ResolvedArtifactGroup::TransitiveSetProjection(tset_key) => { // We've encountered a "top-level" tset node that we haven't yet seen (as either a top-level or intermediate node, doesn't matter). if seen.insert(tset_key.dupe()) { - let tset_value = dice_lookup_transitive_set(ctx, tset_key.key.dupe()).await?; + let tset_value = tset_key.key.lookup(ctx).await?; // Now we can traverse this tset from that node. This is a different traversal than our top-level one as we will // be accessing tset internals directly and so we can actually traverse the starlark objects without going back through @@ -373,7 +407,7 @@ pub(crate) async fn get_from_template_placeholder_info<'x>( // We can't use tset's normal traverse because we need to avoid retraversing parts of the tset graph that we've already // traversed (through other top-level tset nodes). let mut queue = VecDeque::new(); - queue.push_back(tset_value.as_value()); + queue.push_back(tset_value.to_value()); while let Some(v) = queue.pop_front() { let as_tset = TransitiveSet::from_value(v).context("invalid tset structure")?; @@ -402,7 +436,8 @@ pub(crate) async fn get_from_template_placeholder_info<'x>( // ignored } } - v.as_command_line_err()? + ValueAsCommandLineLike::unpack_value_err(v)? + .0 .visit_artifacts(&mut Visitor(&mut artifacts, target.dupe()))?; } diff --git a/app/buck2_query_impls/src/analysis/eval.rs b/app/buck2_query_impls/src/analysis/eval.rs index 365a4f39175eb..ea7f108fc2539 100644 --- a/app/buck2_query_impls/src/analysis/eval.rs +++ b/app/buck2_query_impls/src/analysis/eval.rs @@ -21,27 +21,30 @@ use crate::analysis::configured_graph::AnalysisConfiguredGraphQueryDelegate; use crate::analysis::configured_graph::AnalysisDiceQueryDelegate; use crate::analysis::environment::ConfiguredGraphQueryEnvironment; -pub fn init_eval_analysis_query() { +pub(crate) fn init_eval_analysis_query() { EVAL_ANALYSIS_QUERY.init(|ctx, query, resolved_literals| { Box::pin(eval_analysis_query(ctx, query, resolved_literals)) }); } async fn eval_analysis_query( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, query: &str, resolved_literals: HashMap, ) -> anyhow::Result> { - let dice_query_delegate = Arc::new(AnalysisDiceQueryDelegate { ctx }); - let delegate = AnalysisConfiguredGraphQueryDelegate { - dice_query_delegate, - resolved_literals, - }; + ctx.with_linear_recompute(|ctx| async move { + let dice_query_delegate = Arc::new(AnalysisDiceQueryDelegate { ctx: &ctx }); + let delegate = AnalysisConfiguredGraphQueryDelegate { + dice_query_delegate, + resolved_literals, + }; - let functions = ConfiguredGraphQueryEnvironment::functions(); - let env = ConfiguredGraphQueryEnvironment::new(&delegate); - let evaluator = QueryEvaluator::new(&env, &functions); + let functions = ConfiguredGraphQueryEnvironment::functions(); + let env = ConfiguredGraphQueryEnvironment::new(&delegate); + let evaluator = QueryEvaluator::new(&env, &functions); - let result = evaluator.eval_query(query).await?; - result.try_into_targets() + let result = evaluator.eval_query(query).await?; + result.try_into_targets() + }) + .await } diff --git a/app/buck2_query_impls/src/analysis/evaluator.rs b/app/buck2_query_impls/src/analysis/evaluator.rs index eef44edb50bf1..8567bd24206c8 100644 --- a/app/buck2_query_impls/src/analysis/evaluator.rs +++ b/app/buck2_query_impls/src/analysis/evaluator.rs @@ -9,74 +9,112 @@ //! Implementation of common cquery/uquery pieces. +use anyhow::Context; +use buck2_common::scope::scope_and_collect_with_dispatcher; +use buck2_events::dispatch::EventDispatcher; use buck2_query::query::environment::QueryEnvironment; use buck2_query::query::syntax::simple::eval::evaluator::QueryEvaluator; use buck2_query::query::syntax::simple::eval::literals::extract_target_literals; -use buck2_query::query::syntax::simple::eval::multi_query::process_multi_query; +use buck2_query::query::syntax::simple::eval::multi_query::MultiQueryResult; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; +use buck2_query::query::syntax::simple::eval::values::QueryEvaluationValue; use buck2_query::query::syntax::simple::functions::QueryFunctions; -use buck2_query_parser::placeholder::QUERY_PERCENT_S_PLACEHOLDER; +use buck2_query_parser::multi_query::MaybeMultiQuery; +use buck2_query_parser::multi_query::MultiQueryItem; use futures::Future; -use gazebo::prelude::*; -use starlark::collections::SmallSet; -use thiserror::Error; -#[derive(Debug, Error)] -enum EvalQueryError { - #[error("Query args supplied without any `%s` placeholder in the query, got args {}", .0.map(|x| format!("`{}`", x)).join(", "))] - ArgsWithoutPlaceholder(Vec), - #[error("Placeholder `%s` in query argument `{0}`")] - PlaceholderInPattern(String), +pub(crate) async fn eval_query< + F: QueryFunctions, + Env: QueryEnvironment, + Fut: Future> + Send, +>( + dispatcher: EventDispatcher, + functions: &F, + query: &str, + query_args: &[String], + environment: impl Fn(Vec) -> Fut + Send + Sync, +) -> anyhow::Result> { + let query = MaybeMultiQuery::parse(query, query_args)?; + match query { + MaybeMultiQuery::MultiQuery(queries) => { + let results = process_multi_query(dispatcher, functions, environment, &queries).await?; + Ok(QueryEvaluationResult::Multiple(results)) + } + MaybeMultiQuery::SingleQuery(query) => { + let result = eval_single_query(functions, &query, environment).await?; + Ok(QueryEvaluationResult::Single(result)) + } + } } -pub async fn eval_query< +async fn eval_single_query< F: QueryFunctions, Env: QueryEnvironment, Fut: Future>, - A: AsRef, >( functions: &F, query: &str, - query_args: &[A], - environment: impl FnOnce(Vec) -> Fut, -) -> anyhow::Result> { - let mut literals = SmallSet::new(); - if query.contains(QUERY_PERCENT_S_PLACEHOLDER) { - // We'd really like the query args to only be literals (file or target). - // If that didn't work, we'd really like query args to be well-formed expressions. - // Unfortunately Buck1 just substitutes in arbitrarily strings, where the query - // or query_args may not form anything remotely valid. - // We have to be backwards compatible :( - for q in query_args { - let q = q.as_ref(); - if q.contains(QUERY_PERCENT_S_PLACEHOLDER) { - return Err(EvalQueryError::PlaceholderInPattern(q.to_owned()).into()); + environment: impl Fn(Vec) -> Fut, +) -> anyhow::Result::Target>> +where + F: QueryFunctions, + Env: QueryEnvironment, + Fut: Future>, +{ + let literals = extract_target_literals(functions, query)?; + let env = environment(literals).await?; + QueryEvaluator::new(&env, functions).eval_query(query).await +} + +async fn process_multi_query( + dispatcher: EventDispatcher, + functions: &Qf, + env: impl Fn(Vec) -> EnvFut + Send + Sync, + queries: &[MultiQueryItem], +) -> anyhow::Result> +where + Qf: QueryFunctions, + Env: QueryEnvironment, + EnvFut: Future> + Send, +{ + // SAFETY: it is safe as long as we don't forget the future. We don't do that. + let ((), future_results) = unsafe { + scope_and_collect_with_dispatcher(dispatcher, |scope| { + for (i, query) in queries.iter().enumerate() { + let arg: String = query.arg.clone(); + let arg_1: String = query.arg.clone(); + let env = &env; + scope.spawn_cancellable( + async move { + let result = eval_single_query(functions, &query.query, env); + let result: buck2_error::Result<_> = result.await.map_err(|e| e.into()); + (i, arg, result) + }, + move || { + ( + i, + arg_1, + Err::<_, buck2_error::Error>( + anyhow::anyhow!("future was cancelled").into(), + ), + ) + }, + ) } - extract_target_literals( - functions, - &query.replace(QUERY_PERCENT_S_PLACEHOLDER, q), - &mut literals, - )?; - } - let env = environment(literals.into_iter().collect()).await?; - let results = process_multi_query(query, query_args, |input, query| { - let evaluator = QueryEvaluator::new(&env, functions); - async move { (input, evaluator.eval_query(&query).await) } }) - .await; - Ok(QueryEvaluationResult::Multiple(results)) - } else if !query_args.is_empty() { - Err( - EvalQueryError::ArgsWithoutPlaceholder(query_args.map(|s| s.as_ref().to_owned())) - .into(), - ) - } else { - extract_target_literals(functions, query, &mut literals)?; - let env = environment(literals.into_iter().collect()).await?; - Ok(QueryEvaluationResult::Single( - QueryEvaluator::new(&env, functions) - .eval_query(query) - .await?, - )) + .await + }; + + let mut results = Vec::with_capacity(future_results.len()); + for query_result in future_results { + let (i, query, result) = query_result.context("scope_and_collect failed")?; + results.push((i, query, result)); } + results.sort_by_key(|(i, _, _)| *i); + + let map = results + .into_iter() + .map(|(_, query, result)| (query, result)) + .collect(); + Ok(MultiQueryResult(map)) } diff --git a/app/buck2_query_impls/src/analysis/mod.rs b/app/buck2_query_impls/src/analysis/mod.rs deleted file mode 100644 index cbbe08fd2354d..0000000000000 --- a/app/buck2_query_impls/src/analysis/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod configured_graph; -pub mod environment; -pub(crate) mod eval; -pub(crate) mod evaluator; diff --git a/app/buck2_query_impls/src/aquery.rs b/app/buck2_query_impls/src/aquery.rs new file mode 100644 index 0000000000000..5dbef6c8d3604 --- /dev/null +++ b/app/buck2_query_impls/src/aquery.rs @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod bxl; +pub(crate) mod environment; +pub(crate) mod evaluator; +pub(crate) mod find_matching_action; +pub(crate) mod functions; diff --git a/app/buck2_query_impls/src/aquery/bxl.rs b/app/buck2_query_impls/src/aquery/bxl.rs index 866503be77b3f..f24950d3e140b 100644 --- a/app/buck2_query_impls/src/aquery/bxl.rs +++ b/app/buck2_query_impls/src/aquery/bxl.rs @@ -16,14 +16,13 @@ use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::query::bxl::BxlAqueryFunctions; use buck2_build_api::query::bxl::NEW_BXL_AQUERY_FUNCTIONS; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::package_boundary::HasPackageBoundaryExceptions; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_common::target_aliases::HasTargetAliasResolver; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; use buck2_query::query::syntax::simple::eval::file_set::FileSet; use buck2_query::query::syntax::simple::eval::set::TargetSet; use buck2_query::query::syntax::simple::eval::values::QueryValue; @@ -31,7 +30,10 @@ use buck2_query::query::syntax::simple::functions::helpers::CapturedExpr; use buck2_query::query::syntax::simple::functions::DefaultQueryFunctions; use buck2_query::query::syntax::simple::functions::DefaultQueryFunctionsModule; use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; +use futures::future::BoxFuture; +use futures::FutureExt; use itertools::Either; use crate::aquery::environment::AqueryDelegate; @@ -50,39 +52,40 @@ fn special_aquery_functions<'v>() -> AqueryFunctions<'v> { } struct BxlAqueryFunctionsImpl { - target_platform: Option, + global_cfg_options: GlobalCfgOptions, project_root: ProjectRoot, working_dir: ProjectRelativePathBuf, } impl BxlAqueryFunctionsImpl { - async fn aquery_delegate<'c>( + async fn aquery_delegate<'c, 'd>( &self, - dice: &'c mut DiceComputations, - ) -> anyhow::Result>> { - let cell_resolver = dice.get_cell_resolver().await?; - - let package_boundary_exceptions = dice.get_package_boundary_exceptions().await?; - let target_alias_resolver = dice - .target_alias_resolver_for_working_dir(&self.working_dir) + dice: &'c LinearRecomputeDiceComputations<'d>, + ) -> anyhow::Result>> { + let cell_resolver = dice.get().get_cell_resolver().await?; + let cell_alias_resolver = dice + .get() + .get_cell_alias_resolver_for_dir(&self.working_dir) .await?; + let target_alias_resolver = dice.get().target_alias_resolver().await?; + let query_data = Arc::new(DiceQueryData::new( - self.target_platform.clone(), + self.global_cfg_options.dupe(), cell_resolver.dupe(), + cell_alias_resolver, &self.working_dir, self.project_root.dupe(), target_alias_resolver, )?); - let query_delegate = - DiceQueryDelegate::new(dice, cell_resolver, package_boundary_exceptions, query_data); + let query_delegate = DiceQueryDelegate::new(&dice, query_data); Ok(Arc::new(DiceAqueryDelegate::new(query_delegate).await?)) } - async fn aquery_env<'c>( + async fn aquery_env<'c, 'd>( &self, - delegate: &Arc>, + delegate: &Arc>, ) -> anyhow::Result> { let literals = delegate.query_data().dupe(); Ok(AqueryEnvironment::new(delegate.dupe(), literals)) @@ -93,123 +96,155 @@ impl BxlAqueryFunctionsImpl { impl BxlAqueryFunctions for BxlAqueryFunctionsImpl { async fn allpaths( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(aquery_functions() - .allpaths( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - from, - to, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + Ok(aquery_functions() + .allpaths( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + from, + to, + captured_expr, + ) + .await?) + }) + .await } async fn somepath( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(aquery_functions() - .somepath( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - from, - to, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + Ok(aquery_functions() + .somepath( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + from, + to, + captured_expr, + ) + .await?) + }) + .await } async fn deps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, deps: Option, captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(aquery_functions() - .deps( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - &DefaultQueryFunctionsModule::new(), - targets, - deps, - captured_expr, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + aquery_functions() + .deps( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + targets, + deps, + captured_expr, + ) + .await + }) + .await } async fn rdeps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, universe: &TargetSet, targets: &TargetSet, depth: Option, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(aquery_functions() - .rdeps( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - universe, - targets, - depth, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + aquery_functions() + .rdeps( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + universe, + targets, + depth, + captured_expr, + ) + .await + }) + .await } async fn testsof( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result> { - Ok(aquery_functions() - .testsof( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - targets, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + aquery_functions() + .testsof( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + targets, + ) + .await + }) + .await } async fn owner( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, file_set: &FileSet, ) -> anyhow::Result> { - Ok(aquery_functions() - .owner( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - file_set, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + aquery_functions() + .owner( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + file_set, + ) + .await + }) + .await } async fn get_target_set( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, configured_labels: Vec, ) -> anyhow::Result<(Vec, TargetSet)> { - let delegate = &self.aquery_delegate(dice).await?; - let dice = delegate.ctx(); - let target_sets = futures::future::join_all(configured_labels.iter().map( - async move |label: &ConfiguredProvidersLabel| { - let maybe_result = dice.get_analysis_result(label.target()).await?; + let target_sets = dice + .try_compute_join( + configured_labels, + |ctx: &mut DiceComputations, + label: ConfiguredProvidersLabel| + -> BoxFuture< + anyhow::Result>>, + > { + async move { + let maybe_result = ctx.get_analysis_result(label.target()).await?; - match maybe_result { - MaybeCompatible::Incompatible(reason) => { - // Aquery skips incompatible targets by default on the CLI, but let's at least - // log the error messages to BXL's stderr - Ok(Either::Left(reason.target.dupe())) - } - MaybeCompatible::Compatible(result) => { - let target_set = delegate - .get_target_set_from_analysis(label, result.clone()) - .await?; - Ok(Either::Right(target_set)) + match maybe_result { + MaybeCompatible::Incompatible(reason) => { + // Aquery skips incompatible targets by default on the CLI, but let's at least + // log the error messages to BXL's stderr + Ok(Either::Left(reason.target.dupe())) + } + MaybeCompatible::Compatible(result) => { + ctx.with_linear_recompute(|ctx| async move { + let delegate = &self.aquery_delegate(&ctx).await?; + let target_set = delegate + .get_target_set_from_analysis(&label, result.clone()) + .await?; + Ok(Either::Right(target_set)) + }) + .await + } + } } - } - }, - )) - .await - .into_iter() - .map(|r| match r { - Ok(r) => Ok(r), - Err(e) => Err(e), - }) - .collect::>>()?; + .boxed() + }, + ) + .await?; let mut result = TargetSet::new(); let mut incompatible_targets = Vec::new(); @@ -223,52 +258,60 @@ impl BxlAqueryFunctions for BxlAqueryFunctionsImpl { async fn all_outputs( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result> { - let query_val = special_aquery_functions() - .all_outputs( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - targets.clone(), - ) - .await?; + dice.with_linear_recompute(|dice| async move { + let query_val = special_aquery_functions() + .all_outputs( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + targets.clone(), + ) + .await?; - match &query_val { - QueryValue::TargetSet(s) => Ok(s.clone()), - _ => unreachable!("all_outputs should always return target set"), - } + match &query_val { + QueryValue::TargetSet(s) => Ok(s.clone()), + _ => unreachable!("all_outputs should always return target set"), + } + }) + .await } async fn all_actions( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result> { - let query_val = special_aquery_functions() - .all_actions( - &self.aquery_env(&self.aquery_delegate(dice).await?).await?, - targets.clone(), - ) - .await?; + dice.with_linear_recompute(|dice| async move { + let query_val = special_aquery_functions() + .all_actions( + &self.aquery_env(&self.aquery_delegate(&dice).await?).await?, + targets.clone(), + ) + .await?; - match &query_val { - QueryValue::TargetSet(s) => Ok(s.clone()), - _ => unreachable!("all_actions should always return target set"), - } + match &query_val { + QueryValue::TargetSet(s) => Ok(s.clone()), + _ => unreachable!("all_actions should always return target set"), + } + }) + .await } } pub(crate) fn init_new_bxl_aquery_functions() { - NEW_BXL_AQUERY_FUNCTIONS.init(|target_platform, project_root, cell_name, cell_resolver| { - Box::pin(async move { - let cell = cell_resolver.get(cell_name)?; - let working_dir = cell.path().as_project_relative_path().to_buf(); + NEW_BXL_AQUERY_FUNCTIONS.init( + |global_cfg_options, project_root, cell_name, cell_resolver| { + Box::pin(async move { + let cell = cell_resolver.get(cell_name)?; + let working_dir = cell.path().as_project_relative_path().to_buf(); - Result::, _>::Ok(Box::new(BxlAqueryFunctionsImpl { - target_platform, - project_root, - working_dir, - })) - }) - }) + Result::, _>::Ok(Box::new(BxlAqueryFunctionsImpl { + global_cfg_options: global_cfg_options.dupe(), + project_root, + working_dir, + })) + }) + }, + ) } diff --git a/app/buck2_query_impls/src/aquery/environment.rs b/app/buck2_query_impls/src/aquery/environment.rs index 47918c5052e98..3d6eb8d845548 100644 --- a/app/buck2_query_impls/src/aquery/environment.rs +++ b/app/buck2_query_impls/src/aquery/environment.rs @@ -18,6 +18,7 @@ use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_query::query::environment::QueryEnvironment; +use buck2_query::query::graph::successors::AsyncChildVisitor; use buck2_query::query::syntax::simple::eval::error::QueryError; use buck2_query::query::syntax::simple::eval::file_set::FileSet; use buck2_query::query::syntax::simple::eval::set::TargetSet; @@ -27,7 +28,6 @@ use buck2_query::query::syntax::simple::functions::HasModuleDescription; use buck2_query::query::traversal::async_depth_first_postorder_traversal; use buck2_query::query::traversal::async_depth_limited_traversal; use buck2_query::query::traversal::AsyncNodeLookup; -use buck2_query::query::traversal::AsyncTraversalDelegate; use dice::DiceComputations; use crate::aquery::functions::AqueryFunctions; @@ -36,10 +36,10 @@ use crate::uquery::environment::QueryLiterals; /// CqueryDelegate resolves information needed by the QueryEnvironment. #[async_trait] -pub trait AqueryDelegate: Send + Sync { +pub(crate) trait AqueryDelegate: Send + Sync { fn cquery_delegate(&self) -> &dyn CqueryDelegate; - fn ctx(&self) -> &DiceComputations; + fn ctx<'a>(&'a self) -> DiceComputations<'a>; async fn get_node(&self, key: &ActionKey) -> anyhow::Result; @@ -55,13 +55,13 @@ pub trait AqueryDelegate: Send + Sync { ) -> anyhow::Result>; } -pub struct AqueryEnvironment<'c> { +pub(crate) struct AqueryEnvironment<'c> { pub(super) delegate: Arc, literals: Arc + 'c>, } impl<'c> AqueryEnvironment<'c> { - pub fn new( + pub(crate) fn new( delegate: Arc, literals: Arc + 'c>, ) -> Self { @@ -104,7 +104,7 @@ impl<'c> QueryEnvironment for AqueryEnvironment<'c> { async fn eval_literals(&self, literals: &[&str]) -> anyhow::Result> { self.literals - .eval_literals(literals, self.delegate.ctx()) + .eval_literals(literals, &mut self.delegate.ctx()) .await } @@ -119,7 +119,8 @@ impl<'c> QueryEnvironment for AqueryEnvironment<'c> { async fn dfs_postorder( &self, root: &TargetSet, - traversal_delegate: &mut dyn AsyncTraversalDelegate, + traversal_delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, ) -> anyhow::Result<()> { // TODO(cjhopman): The query nodes deps are going to flatten the tset structure for its deps. In a typical // build graph, a traversal over just the graph of ActionQueryNode ends up being an `O(n)` operation at each @@ -134,6 +135,7 @@ impl<'c> QueryEnvironment for AqueryEnvironment<'c> { }, root.iter_names(), traversal_delegate, + visit, ) .await } @@ -141,7 +143,8 @@ impl<'c> QueryEnvironment for AqueryEnvironment<'c> { async fn depth_limited_traversal( &self, root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, + delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, depth: u32, ) -> anyhow::Result<()> { // TODO(cjhopman): See above. @@ -152,6 +155,7 @@ impl<'c> QueryEnvironment for AqueryEnvironment<'c> { }, root.iter_names(), delegate, + visit, depth, ) .await @@ -160,6 +164,13 @@ impl<'c> QueryEnvironment for AqueryEnvironment<'c> { async fn owner(&self, _paths: &FileSet) -> anyhow::Result> { Err(QueryError::NotAvailableInContext("owner").into()) } + + async fn targets_in_buildfile( + &self, + _paths: &FileSet, + ) -> anyhow::Result> { + Err(QueryError::NotAvailableInContext("targets_in_buildfile").into()) + } } struct AqueryNodeLookup<'a, 'c> { diff --git a/app/buck2_query_impls/src/aquery/evaluator.rs b/app/buck2_query_impls/src/aquery/evaluator.rs index b6ffd30769e6b..217e83f63e82f 100644 --- a/app/buck2_query_impls/src/aquery/evaluator.rs +++ b/app/buck2_query_impls/src/aquery/evaluator.rs @@ -11,10 +11,11 @@ use std::sync::Arc; use buck2_build_api::actions::query::ActionQueryNode; +use buck2_common::events::HasEvents; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; -use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; use crate::analysis::evaluator::eval_query; @@ -25,11 +26,11 @@ use crate::dice::aquery::DiceAqueryDelegate; use crate::dice::get_dice_query_delegate; use crate::uquery::environment::PreresolvedQueryLiterals; -pub(crate) struct AqueryEvaluator<'c> { - dice_query_delegate: Arc>, +pub(crate) struct AqueryEvaluator<'c, 'd> { + dice_query_delegate: Arc>, } -impl AqueryEvaluator<'_> { +impl AqueryEvaluator<'_, '_> { pub(crate) async fn eval_query( &self, query: &str, @@ -37,44 +38,53 @@ impl AqueryEvaluator<'_> { ) -> anyhow::Result> { let functions = aquery_functions(); - eval_query(&functions, query, query_args, async move |literals| { - let resolved_literals = PreresolvedQueryLiterals::pre_resolve( - &**self.dice_query_delegate.query_data(), - &literals, - self.dice_query_delegate.ctx(), - ) - .await; - Ok(AqueryEnvironment::new( - self.dice_query_delegate.dupe(), - Arc::new(resolved_literals), - )) - }) + eval_query( + self.dice_query_delegate + .ctx() + .per_transaction_data() + .get_dispatcher() + .dupe(), + &functions, + query, + query_args, + |literals| async move { + let resolved_literals = PreresolvedQueryLiterals::pre_resolve( + &**self.dice_query_delegate.query_data(), + &literals, + &mut self.dice_query_delegate.ctx(), + ) + .await; + Ok(AqueryEnvironment::new( + self.dice_query_delegate.dupe(), + Arc::new(resolved_literals), + )) + }, + ) .await } } /// Evaluates some query expression. TargetNodes are resolved via the interpreter from /// the provided DiceCtx. -pub(crate) async fn get_aquery_evaluator<'a, 'c: 'a>( - ctx: &'c DiceComputations, +pub(crate) async fn get_aquery_evaluator<'a, 'c: 'a, 'd>( + ctx: &'c LinearRecomputeDiceComputations<'d>, working_dir: &'a ProjectRelativePath, - global_target_platform: Option, -) -> anyhow::Result> { + global_cfg_options: GlobalCfgOptions, +) -> anyhow::Result> { let dice_query_delegate = - get_dice_aquery_delegate(ctx, working_dir, global_target_platform).await?; + get_dice_aquery_delegate(ctx, working_dir, global_cfg_options).await?; Ok(AqueryEvaluator { dice_query_delegate, }) } // Provides the dice query delegate for aquery evaluator -pub(crate) async fn get_dice_aquery_delegate<'a, 'c: 'a>( - ctx: &'c DiceComputations, +pub(crate) async fn get_dice_aquery_delegate<'a, 'c: 'a, 'd>( + ctx: &'c LinearRecomputeDiceComputations<'d>, working_dir: &'a ProjectRelativePath, - global_target_platform: Option, -) -> anyhow::Result>> { - let dice_query_delegate = - get_dice_query_delegate(ctx, working_dir, global_target_platform).await?; + global_cfg_options: GlobalCfgOptions, +) -> anyhow::Result>> { + let dice_query_delegate = get_dice_query_delegate(ctx, working_dir, global_cfg_options).await?; let dice_query_delegate = Arc::new(DiceAqueryDelegate::new(dice_query_delegate).await?); Ok(dice_query_delegate) } diff --git a/app/buck2_query_impls/src/aquery/find_matching_action.rs b/app/buck2_query_impls/src/aquery/find_matching_action.rs index 4775d7727a620..a17f1c776912e 100644 --- a/app/buck2_query_impls/src/aquery/find_matching_action.rs +++ b/app/buck2_query_impls/src/aquery/find_matching_action.rs @@ -7,24 +7,36 @@ * of this source tree. */ +use std::borrow::Cow; + use buck2_artifact::actions::key::ActionKey; use buck2_artifact::artifact::build_artifact::BuildArtifact; -use buck2_artifact::artifact::provide_outputs::ProvideOutputs; use buck2_build_api::actions::query::ActionQueryNode; use buck2_build_api::actions::query::FIND_MATCHING_ACTION; use buck2_build_api::analysis::AnalysisResult; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; use dice::DiceComputations; +use dupe::Dupe; +use dupe::IterDupedExt; +use either::Either; use tracing::debug; use crate::aquery::evaluator::get_dice_aquery_delegate; +// Given the buckout path, how do we search actions? +enum ActionKeyMatch<'v> { + // This action key exactly produces the output path. + Exact(&'v ActionKey), + // Builds an output that is in the path. + OutputsOf(&'v BuildArtifact), +} + fn check_output_path<'v>( build_artifact: &'v BuildArtifact, - path_to_check: &'v ForwardRelativePathBuf, -) -> anyhow::Result> { + path_to_check: &ForwardRelativePathBuf, +) -> anyhow::Result>> { let path = build_artifact.get_path().path(); debug!( @@ -32,51 +44,88 @@ fn check_output_path<'v>( path, path_to_check ); - if path_to_check.starts_with(path_to_check) { - Ok(Some(build_artifact.key())) + let key = build_artifact.key(); + + if path_to_check == path { + Ok(Some(ActionKeyMatch::Exact(key))) + } else if path_to_check.starts_with(path) { + Ok(Some(ActionKeyMatch::OutputsOf(build_artifact))) } else { Ok(None) } } async fn find_matching_action( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, working_dir: &ProjectRelativePath, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, analysis: &AnalysisResult, path_after_target_name: ForwardRelativePathBuf, ) -> anyhow::Result> { - let dice_aquery_delegate = - get_dice_aquery_delegate(ctx, working_dir, global_target_platform.clone()).await?; + ctx.with_linear_recompute(|ctx| async move { + let dice_aquery_delegate = + get_dice_aquery_delegate(&ctx, working_dir, global_cfg_options.dupe()).await?; - for entry in analysis.iter_deferreds() { - match provider::request_value::(entry.as_complex()) { - Some(outputs) => { - let outputs = outputs.0?; - for build_artifact in &outputs { - match check_output_path(build_artifact, &path_after_target_name)? { - Some(action_key) => { - return Ok(Some( - dice_aquery_delegate.get_action_node(action_key).await?, - )); - } - None => (), + // Try to find exact path match first. If there are no exact matches, try to find an action + // that starts with the relevant part of the output path (this case is for targets that declare + // directories as outputs). + // + // FIXME(@wendyy): If we've iterated over all build artifacts and still haven't found an exact + // action key match, then return a possible action key with the shortest path. This can happen + // if a target declared an output directory instead of an artifact. As a best effort, we keep + // track of the possible build artifact with the shortest path to try find the action that produced + // the top-most directory. To fix this properly, we would need to let the action key or build + // artifact itself know if the output was a directory, which is nontrivial. + + // TODO(cjhopman): We should probably just support iterating over declared artifacts rather than + // this more complex approach. + let mut maybe_match: Option = None; + + for build_artifact in analysis + .analysis_values() + .iter_dynamic_lambda_outputs() + .chain(analysis.analysis_values().iter_actions().flat_map( + |v| match v.action().outputs() { + Cow::Borrowed(v) => Either::Left(v.iter().duped()), + Cow::Owned(v) => Either::Right(v.into_iter()), + }, + )) + { + match check_output_path(&build_artifact, &path_after_target_name)? { + Some(action_key_match) => match action_key_match { + ActionKeyMatch::Exact(key) => { + return Ok(Some(dice_aquery_delegate.get_action_node(key).await?)); } - } + ActionKeyMatch::OutputsOf(artifact) => match &maybe_match { + Some(maybe) => { + if artifact.get_path().len() < maybe.get_path().len() { + maybe_match = Some(artifact.dupe()); + } + } + None => maybe_match = Some(artifact.dupe()), + }, + }, + None => (), } - None => debug!("Could not extract outputs from deferred table entry"), } - } - Ok(None) + + match maybe_match { + Some(maybe) => Ok(Some( + dice_aquery_delegate.get_action_node(maybe.key()).await?, + )), + None => Ok(None), + } + }) + .await } pub(crate) fn init_find_matching_action() { FIND_MATCHING_ACTION.init( - |ctx, working_dir, global_target_platform, analysis, path_after_target_name| { + |ctx, working_dir, global_cfg_options, analysis, path_after_target_name| { Box::pin(find_matching_action( ctx, working_dir, - global_target_platform, + global_cfg_options, analysis, path_after_target_name, )) diff --git a/app/buck2_query_impls/src/aquery/functions.rs b/app/buck2_query_impls/src/aquery/functions.rs index 96aef5794e1a0..19932b49589c0 100644 --- a/app/buck2_query_impls/src/aquery/functions.rs +++ b/app/buck2_query_impls/src/aquery/functions.rs @@ -11,7 +11,6 @@ use std::fmt; use std::fmt::Debug; use std::marker::PhantomData; -use buck2_artifact::artifact::provide_outputs::ProvideActionKey; use buck2_build_api::actions::query::ActionQueryNode; use buck2_build_api::actions::query::ActionQueryNodeData; use buck2_query::query::syntax::simple::eval::error::QueryError; @@ -23,10 +22,11 @@ use buck2_query::query::syntax::simple::functions::DefaultQueryFunctionsModule; use buck2_query::query::syntax::simple::functions::QueryFunctions; use buck2_query::query_module; use buck2_query_parser::BinaryOp; +use dupe::Dupe; use crate::aquery::environment::AqueryEnvironment; -pub fn aquery_functions<'a>() -> impl QueryFunctions> { +pub(crate) fn aquery_functions<'a>() -> impl QueryFunctions> { struct Functions<'a> { defaults: DefaultQueryFunctionsModule>, extra_functions: AqueryFunctions<'a>, @@ -86,11 +86,8 @@ impl<'a> AqueryFunctions<'a> { analysis .providers()? .provider_collection() - .default_info() - .for_each_output(&mut |output| { - outputs.push(output); - Ok(()) - })?; + .default_info()? + .for_each_output(&mut |output| outputs.push(output))?; } } @@ -117,17 +114,15 @@ impl<'a> AqueryFunctions<'a> { res.insert(node); } ActionQueryNodeData::Analysis(analysis) => { - for entry in analysis.analysis_result().iter_deferreds() { - action_keys.extend(provider::request_value::( - entry.as_complex(), - )); + for action in analysis.analysis_result().analysis_values().iter_actions() { + action_keys.push(action.key().dupe()); } } } } - let nodes = futures::future::try_join_all( - action_keys.iter().map(|key| env.delegate.get_node(&key.0)), + let nodes = buck2_util::future::try_join_all( + action_keys.iter().map(|key| env.delegate.get_node(&key)), ) .await?; res.extend(nodes); diff --git a/app/buck2_query_impls/src/aquery/mod.rs b/app/buck2_query_impls/src/aquery/mod.rs deleted file mode 100644 index 7ed1453b7bee2..0000000000000 --- a/app/buck2_query_impls/src/aquery/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod bxl; -pub mod environment; -pub mod evaluator; -pub(crate) mod find_matching_action; -pub mod functions; diff --git a/app/buck2_query_impls/src/cquery.rs b/app/buck2_query_impls/src/cquery.rs new file mode 100644 index 0000000000000..6849c9eec9575 --- /dev/null +++ b/app/buck2_query_impls/src/cquery.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod bxl; +pub(crate) mod environment; +pub(crate) mod evaluator; diff --git a/app/buck2_query_impls/src/cquery/bxl.rs b/app/buck2_query_impls/src/cquery/bxl.rs index f8987a94c56b7..ce0a25d7c5a8e 100644 --- a/app/buck2_query_impls/src/cquery/bxl.rs +++ b/app/buck2_query_impls/src/cquery/bxl.rs @@ -12,14 +12,12 @@ use std::sync::Arc; use async_trait::async_trait; use buck2_build_api::query::bxl::BxlCqueryFunctions; use buck2_build_api::query::bxl::NEW_BXL_CQUERY_FUNCTIONS; -use buck2_build_api::query::oneshot::CqueryOwnerBehavior; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::package_boundary::HasPackageBoundaryExceptions; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_common::target_aliases::HasTargetAliasResolver; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::target::label::TargetLabel; use buck2_node::configured_universe::CqueryUniverse; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_query::query::syntax::simple::eval::file_set::FileSet; @@ -28,6 +26,7 @@ use buck2_query::query::syntax::simple::functions::helpers::CapturedExpr; use buck2_query::query::syntax::simple::functions::DefaultQueryFunctions; use buck2_query::query::syntax::simple::functions::DefaultQueryFunctionsModule; use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; use crate::cquery::environment::CqueryEnvironment; @@ -39,46 +38,43 @@ fn cquery_functions<'v>() -> DefaultQueryFunctions> { } struct BxlCqueryFunctionsImpl { - target_platform: Option, + global_cfg_options: GlobalCfgOptions, project_root: ProjectRoot, working_dir: ProjectRelativePathBuf, } impl BxlCqueryFunctionsImpl { - async fn setup_dice_query_delegate<'c>( + async fn setup_dice_query_delegate<'c, 'd>( &self, - dice: &'c mut DiceComputations, - ) -> anyhow::Result> { - let cell_resolver = dice.get_cell_resolver().await?; - - let package_boundary_exceptions = dice.get_package_boundary_exceptions().await?; - let target_alias_resolver = dice - .target_alias_resolver_for_working_dir(&self.working_dir) + dice: &'c LinearRecomputeDiceComputations<'d>, + ) -> anyhow::Result> { + let cell_resolver = dice.get().get_cell_resolver().await?; + let cell_alias_resolver = dice + .get() + .get_cell_alias_resolver_for_dir(&self.working_dir) .await?; + let target_alias_resolver = dice.get().target_alias_resolver().await?; + let query_data = Arc::new(DiceQueryData::new( - self.target_platform.dupe(), + self.global_cfg_options.dupe(), cell_resolver.dupe(), + cell_alias_resolver, &self.working_dir, self.project_root.dupe(), target_alias_resolver, )?); - Ok(DiceQueryDelegate::new( - dice, - cell_resolver, - package_boundary_exceptions, - query_data.dupe(), - )) + Ok(DiceQueryDelegate::new(dice, query_data.dupe())) } - async fn cquery_env<'c>( + async fn cquery_env<'c, 'd>( &self, - dice_query_delegate: &'c DiceQueryDelegate<'c>, + dice_query_delegate: &'c DiceQueryDelegate<'c, 'd>, universe: Option<&TargetSet>, ) -> anyhow::Result> { let universe = match universe { - Some(u) => Some(CqueryUniverse::build(u).await?), + Some(u) => Some(Arc::new(CqueryUniverse::build(u)?)), None => None, }; let literals = dice_query_delegate.query_data().dupe(); @@ -86,7 +82,6 @@ impl BxlCqueryFunctionsImpl { dice_query_delegate, literals, universe, - CqueryOwnerBehavior::Correct, )) } } @@ -95,132 +90,164 @@ impl BxlCqueryFunctionsImpl { impl BxlCqueryFunctions for BxlCqueryFunctionsImpl { async fn allpaths( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(cquery_functions() - .allpaths( - &self - .cquery_env(&self.setup_dice_query_delegate(dice).await?, None) - .await?, - from, - to, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + Ok(cquery_functions() + .allpaths( + &self + .cquery_env(&self.setup_dice_query_delegate(&dice).await?, None) + .await?, + &DefaultQueryFunctionsModule::new(), + from, + to, + captured_expr, + ) + .await?) + }) + .await } async fn somepath( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(cquery_functions() - .somepath( - &self - .cquery_env(&self.setup_dice_query_delegate(dice).await?, None) - .await?, - from, - to, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + Ok(cquery_functions() + .somepath( + &self + .cquery_env(&self.setup_dice_query_delegate(&dice).await?, None) + .await?, + &DefaultQueryFunctionsModule::new(), + from, + to, + captured_expr, + ) + .await?) + }) + .await } async fn owner( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, file_set: &FileSet, target_universe: Option<&TargetSet>, ) -> anyhow::Result> { - let query_delegate = self.setup_dice_query_delegate(dice).await?; - let cquery_env = self.cquery_env(&query_delegate, target_universe).await?; - Ok(cquery_functions().owner(&cquery_env, file_set).await?) + dice.with_linear_recompute(|dice| async move { + let query_delegate = self.setup_dice_query_delegate(&dice).await?; + let cquery_env = self.cquery_env(&query_delegate, target_universe).await?; + cquery_functions().owner(&cquery_env, file_set).await + }) + .await } async fn deps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, deps: Option, captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(cquery_functions() - .deps( - &self - .cquery_env(&self.setup_dice_query_delegate(dice).await?, None) - .await?, - &DefaultQueryFunctionsModule::new(), - targets, - deps, - captured_expr, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + cquery_functions() + .deps( + &self + .cquery_env(&self.setup_dice_query_delegate(&dice).await?, None) + .await?, + &DefaultQueryFunctionsModule::new(), + targets, + deps, + captured_expr, + ) + .await + }) + .await } async fn rdeps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, universe: &TargetSet, targets: &TargetSet, depth: Option, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(cquery_functions() - .rdeps( - &self - .cquery_env(&self.setup_dice_query_delegate(dice).await?, None) - .await?, - universe, - targets, - depth, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + cquery_functions() + .rdeps( + &self + .cquery_env(&self.setup_dice_query_delegate(&dice).await?, None) + .await?, + &DefaultQueryFunctionsModule::new(), + universe, + targets, + depth, + captured_expr, + ) + .await + }) + .await } async fn testsof( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result> { - Ok(cquery_functions() - .testsof( - &self - .cquery_env(&self.setup_dice_query_delegate(dice).await?, None) - .await?, - targets, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + cquery_functions() + .testsof( + &self + .cquery_env(&self.setup_dice_query_delegate(&dice).await?, None) + .await?, + targets, + ) + .await + }) + .await } async fn testsof_with_default_target_platform( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result>> { - Ok(cquery_functions() - .testsof_with_default_target_platform( - &self - .cquery_env(&self.setup_dice_query_delegate(dice).await?, None) - .await?, - targets, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + cquery_functions() + .testsof_with_default_target_platform( + &self + .cquery_env(&self.setup_dice_query_delegate(&dice).await?, None) + .await?, + targets, + ) + .await + }) + .await } } pub(crate) fn init_new_bxl_cquery_functions() { - NEW_BXL_CQUERY_FUNCTIONS.init(|target_platform, project_root, cell_name, cell_resolver| { - Box::pin(async move { - let cell = cell_resolver.get(cell_name)?; - // TODO(nga): working as as cell root is not right. - // Should be either the project root or user's current working directory. - let working_dir = cell.path().as_project_relative_path().to_buf(); - - Result::, _>::Ok(Box::new(BxlCqueryFunctionsImpl { - target_platform, - project_root, - working_dir, - })) - }) - }) + NEW_BXL_CQUERY_FUNCTIONS.init( + |global_cfg_options: GlobalCfgOptions, project_root, cell_name, cell_resolver| { + Box::pin(async move { + let cell = cell_resolver.get(cell_name)?; + // TODO(nga): working as as cell root is not right. + // Should be either the project root or user's current working directory. + let working_dir = cell.path().as_project_relative_path().to_buf(); + + Result::, _>::Ok(Box::new(BxlCqueryFunctionsImpl { + global_cfg_options, + project_root, + working_dir, + })) + }) + }, + ) } diff --git a/app/buck2_query_impls/src/cquery/environment.rs b/app/buck2_query_impls/src/cquery/environment.rs index 4034037f560dd..cd6530dad25ba 100644 --- a/app/buck2_query_impls/src/cquery/environment.rs +++ b/app/buck2_query_impls/src/cquery/environment.rs @@ -9,17 +9,23 @@ use std::sync::Arc; -use anyhow::Context; use async_trait::async_trait; -use buck2_build_api::query::oneshot::CqueryOwnerBehavior; use buck2_core::cells::cell_path::CellPath; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; -use buck2_events::dispatch::console_message; +use buck2_core::target::label::label::TargetLabel; +use buck2_error::BuckErrorContext; use buck2_node::configured_universe::CqueryUniverse; use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured_node_ref::ConfiguredTargetNodeRefNode; +use buck2_node::nodes::configured_node_ref::ConfiguredTargetNodeRefNodeDeps; +use buck2_query::query::environment::deps; use buck2_query::query::environment::QueryEnvironment; +use buck2_query::query::environment::QueryEnvironmentAsNodeLookup; +use buck2_query::query::environment::TraversalFilter; +use buck2_query::query::graph::dfs::dfs_postorder; +use buck2_query::query::graph::successors::AsyncChildVisitor; +use buck2_query::query::syntax::simple::eval::error::QueryError; use buck2_query::query::syntax::simple::eval::file_set::FileSet; use buck2_query::query::syntax::simple::eval::set::TargetSet; use buck2_query::query::syntax::simple::functions::docs::QueryEnvironmentDescription; @@ -27,10 +33,7 @@ use buck2_query::query::syntax::simple::functions::DefaultQueryFunctionsModule; use buck2_query::query::syntax::simple::functions::HasModuleDescription; use buck2_query::query::traversal::async_depth_first_postorder_traversal; use buck2_query::query::traversal::async_depth_limited_traversal; -use buck2_query::query::traversal::AsyncNodeLookup; -use buck2_query::query::traversal::AsyncTraversalDelegate; use dice::DiceComputations; -use dupe::Dupe; use tracing::warn; use crate::uquery::environment::allbuildfiles; @@ -38,41 +41,25 @@ use crate::uquery::environment::rbuildfiles; use crate::uquery::environment::QueryLiterals; use crate::uquery::environment::UqueryDelegate; -#[derive(Debug, thiserror::Error)] -enum CqueryError { - #[error("Target universe not specified (internal error)")] - NoUniverse, -} - /// CqueryDelegate resolves information needed by the QueryEnvironment. #[async_trait] -pub trait CqueryDelegate: Send + Sync { +pub(crate) trait CqueryDelegate: Send + Sync { fn uquery_delegate(&self) -> &dyn UqueryDelegate; - async fn get_node_for_target( - &self, - target: &TargetLabel, - ) -> anyhow::Result>; - async fn get_node_for_configured_target( &self, target: &ConfiguredTargetLabel, ) -> anyhow::Result; - async fn get_configured_target( - &self, - target: &TargetLabel, - ) -> anyhow::Result; - async fn get_node_for_default_configured_target( &self, target: &TargetLabel, ) -> anyhow::Result>; - fn ctx(&self) -> &DiceComputations; + fn ctx<'a>(&'a self) -> DiceComputations<'a>; } -pub struct CqueryEnvironment<'c> { +pub(crate) struct CqueryEnvironment<'c> { delegate: &'c dyn CqueryDelegate, literals: Arc + 'c>, // TODO(nga): BXL `cquery` function does not provides us the universe. @@ -81,22 +68,19 @@ pub struct CqueryEnvironment<'c> { // ``` // buck2 cquery 'deps(//foo:bar)' // ``` - universe: Option, - owner_behavior: CqueryOwnerBehavior, + universe: Option>, } impl<'c> CqueryEnvironment<'c> { - pub fn new( + pub(crate) fn new( delegate: &'c dyn CqueryDelegate, literals: Arc + 'c>, - universe: Option, - owner_behavior: CqueryOwnerBehavior, + universe: Option>, ) -> Self { Self { delegate, literals, universe, - owner_behavior, } } @@ -123,70 +107,11 @@ impl<'c> CqueryEnvironment<'c> { .await } - /// Deprecated `owner` function implementation. - /// See [this post](https://fburl.com/0xv7u4bz) for details. - async fn owner_deprecated(&self, path: &CellPath) -> anyhow::Result> { - // need to explicitly track this rather than checking for changes to result set since the owner might - // already be in the set. - let mut owners = Vec::new(); - match self - .delegate - .uquery_delegate() - .get_enclosing_packages(path) - .await - { - Ok(packages) => { - let package_futs = packages.iter().map(|package| async move { - let mut result: Vec = Vec::new(); - - // TODO(cjhopman): We should make sure that the file exists. - let targets = self - .delegate - .uquery_delegate() - .eval_build_file(package.dupe()) - .await?; - - for node in targets.targets().values() { - match self.delegate.get_node_for_target(node.label()).await? { - MaybeCompatible::Compatible(node) => { - for input in node.inputs() { - if &input == path { - result.push(node.dupe()); - // this intentionally only breaks out of the inner loop. We don't need to look at the - // other inputs of this target, but it's possible for a single file to be owned by - // multiple targets. - break; - } - } - } - MaybeCompatible::Incompatible(reason) => { - // TODO(scottcao): Add event for incompatible target skipping - console_message(reason.skipping_message( - &self.delegate.get_configured_target(node.label()).await?, - )); - } - } - } - - anyhow::Ok(result) - }); - - for nodes in futures::future::join_all(package_futs).await.into_iter() { - for node in nodes?.into_iter() { - owners.push(node); - } - } - } - Err(_) => { - // we don't consider this an error, it's usually the case that the user - // just wants to know the target owning the file if it exists. - } - }; - Ok(owners) - } - fn owner_correct(&self, path: &CellPath) -> anyhow::Result> { - let universe = self.universe.as_ref().context(CqueryError::NoUniverse)?; + let universe = self + .universe + .as_ref() + .internal_error_anyhow("Target universe not specified")?; Ok(universe.owners(path)) } } @@ -208,7 +133,7 @@ impl<'c> QueryEnvironment for CqueryEnvironment<'c> { async fn eval_literals(&self, literals: &[&str]) -> anyhow::Result> { self.literals - .eval_literals(literals, self.delegate.ctx()) + .eval_literals(literals, &mut self.delegate.ctx()) .await } @@ -221,19 +146,34 @@ impl<'c> QueryEnvironment for CqueryEnvironment<'c> { async fn dfs_postorder( &self, - root: &TargetSet, - traversal_delegate: &mut dyn AsyncTraversalDelegate, + root: &TargetSet, + traversal_delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, ) -> anyhow::Result<()> { - async_depth_first_postorder_traversal(self, root.iter_names(), traversal_delegate).await + async_depth_first_postorder_traversal( + &QueryEnvironmentAsNodeLookup { env: self }, + root.iter_names(), + traversal_delegate, + visit, + ) + .await } async fn depth_limited_traversal( &self, root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, + delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, depth: u32, ) -> anyhow::Result<()> { - async_depth_limited_traversal(self, root.iter_names(), delegate, depth).await + async_depth_limited_traversal( + &QueryEnvironmentAsNodeLookup { env: self }, + root.iter_names(), + delegate, + visit, + depth, + ) + .await } async fn allbuildfiles(&self, universe: &TargetSet) -> anyhow::Result { @@ -248,10 +188,7 @@ impl<'c> QueryEnvironment for CqueryEnvironment<'c> { let mut result = TargetSet::new(); for path in paths.iter() { - let owners = match &self.owner_behavior { - CqueryOwnerBehavior::Deprecated => self.owner_deprecated(path).await?, - CqueryOwnerBehavior::Correct => self.owner_correct(path)?, - }; + let owners = self.owner_correct(path)?; if owners.is_empty() { warn!("No owner was found for {}", path); } @@ -259,11 +196,35 @@ impl<'c> QueryEnvironment for CqueryEnvironment<'c> { } Ok(result) } -} -#[async_trait] -impl<'a> AsyncNodeLookup for CqueryEnvironment<'a> { - async fn get(&self, label: &ConfiguredTargetLabel) -> anyhow::Result { - self.get_node(label).await + async fn targets_in_buildfile( + &self, + _paths: &FileSet, + ) -> anyhow::Result> { + Err(QueryError::FunctionUnimplemented("targets_in_buildfile").into()) + } + + async fn deps( + &self, + targets: &TargetSet, + depth: Option, + filter: Option<&dyn TraversalFilter>, + ) -> anyhow::Result> { + if depth.is_none() && filter.is_none() { + // TODO(nga): fast lookup with depth too. + + let mut deps = TargetSet::new(); + dfs_postorder::( + targets.iter().map(ConfiguredTargetNodeRefNode::new), + ConfiguredTargetNodeRefNodeDeps, + |target| { + deps.insert_unique_unchecked(target.to_node()); + Ok(()) + }, + )?; + Ok(deps) + } else { + deps(self, targets, depth, filter).await + } } } diff --git a/app/buck2_query_impls/src/cquery/evaluator.rs b/app/buck2_query_impls/src/cquery/evaluator.rs index 6083928ed3840..e8c5bbc833459 100644 --- a/app/buck2_query_impls/src/cquery/evaluator.rs +++ b/app/buck2_query_impls/src/cquery/evaluator.rs @@ -11,10 +11,9 @@ use std::sync::Arc; -use buck2_build_api::query::oneshot::CqueryOwnerBehavior; -use buck2_common::result::ToSharedResultExt; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; +use buck2_common::events::HasEvents; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::console_message; use buck2_node::configured_universe::CqueryUniverse; use buck2_node::nodes::configured::ConfiguredTargetNode; @@ -28,109 +27,174 @@ use gazebo::prelude::*; use crate::analysis::evaluator::eval_query; use crate::cquery::environment::CqueryEnvironment; -use crate::dice::get_dice_query_delegate; use crate::dice::DiceQueryData; use crate::dice::DiceQueryDelegate; use crate::uquery::environment::PreresolvedQueryLiterals; use crate::uquery::environment::QueryLiterals; use crate::uquery::environment::UqueryDelegate; -pub struct CqueryEvaluator<'c> { - dice_query_delegate: DiceQueryDelegate<'c>, - functions: DefaultQueryFunctionsModule>, - owner_behavior: CqueryOwnerBehavior, -} +pub(crate) async fn eval_cquery( + dice_query_delegate: DiceQueryDelegate<'_, '_>, + query: &str, + query_args: &[String], + target_universe: Option<&[String]>, + collect_universes: bool, +) -> anyhow::Result<( + QueryEvaluationResult, + Option>>, +)> { + let dispatcher = dice_query_delegate + .ctx() + .per_transaction_data() + .get_dispatcher() + .dupe(); + let functions = DefaultQueryFunctionsModule::new(); + let dice_query_delegate = &dice_query_delegate; + + let target_universe = match target_universe { + None => None, + Some(target_universe) => Some(Arc::new( + build_cquery_universe_from_literals( + target_universe, + dice_query_delegate.query_data(), + &mut dice_query_delegate.ctx(), + ) + .await?, + )), + }; + + // Here we use queue to pass universes from `eval_query` callback to this function. + // This is ugly, but I (Stiopa) cannot figure out how to do it in a better way, + // without introducing a lot of complexity (generics, downcasting) + // through query evaluation stack. + let (universes_tx_value, universes_rx) = if collect_universes { + let (universes_tx_value, universes_rx) = std::sync::mpsc::channel(); + (Some(universes_tx_value), Some(universes_rx)) + } else { + (None, None) + }; + + if let (Some(target_universe), Some(universes_tx_value)) = + (&target_universe, &universes_tx_value) + { + universes_tx_value + .send(target_universe.dupe()) + .internal_error_anyhow("Must be open")?; + } -impl CqueryEvaluator<'_> { - pub async fn eval_query, U: AsRef>( - &self, - query: &str, - query_args: &[A], - target_universe: Option<&[U]>, - ) -> anyhow::Result> { - eval_query(&self.functions, query, query_args, async move |literals| { - let (universe, resolved_literals) = match target_universe { + let universes_tx = universes_tx_value.as_ref(); + + let target_universe = &target_universe; + + let result = eval_query( + dispatcher, + &functions, + query, + query_args, + |literals| async move { + let (resolved_literals, universe) = match target_universe { None => { if literals.is_empty() { console_message( "Query has no target literals and `--target-universe` is not specified.\n\ - Such query is correct, but the result is always empty.\n\ - Consider specifying `--target-universe` for this query\n\ - or using `uquery` instead of `cquery`".to_owned()); + Such query is correct, but the result is always empty.\n\ + Consider specifying `--target-universe` for this query\n\ + or using `uquery` instead of `cquery`".to_owned()); } // In the absence of a user-provided target universe, we use the target // literals in the cquery as the universe. - resolve_literals_in_universe(&self.dice_query_delegate, self.dice_query_delegate.query_data().dupe(), &literals, &literals) - .await? - } - Some(universe) => { - resolve_literals_in_universe(&self.dice_query_delegate, self.dice_query_delegate.query_data().dupe(), &literals, universe) - .await? + + let universe = build_cquery_universe_from_literals( + &literals, + dice_query_delegate.query_data(), + &mut dice_query_delegate.ctx(), + ) + .await?; + + let universe = Arc::new(universe); + + if let Some(universes_tx) = universes_tx { + universes_tx.send(universe.dupe()).internal_error_anyhow("Must be open")?; + } + + ( + resolve_literals_in_universe(&dice_query_delegate, &literals, &universe) + .await?, + universe, + ) } + Some(universe) => ( + resolve_literals_in_universe(&dice_query_delegate, &literals, &universe) + .await?, + universe.dupe(), + ), }; Ok(CqueryEnvironment::new( - &self.dice_query_delegate, + dice_query_delegate, Arc::new(resolved_literals), Some(universe), - self.owner_behavior, )) - }) - .await - } + }, + ) + .await?; + + drop(universes_tx_value); + + let universes = if let Some(universes_rx) = universes_rx { + let universes: Vec> = universes_rx.try_iter().collect(); + match universes_rx.try_recv() { + Ok(_) => return Err(internal_error_anyhow!("tx must be closed at this moment")), + Err(std::sync::mpsc::TryRecvError::Empty) => { + return Err(internal_error_anyhow!("tx must be closed at this moment")); + } + Err(std::sync::mpsc::TryRecvError::Disconnected) => {} + } + Some(universes) + } else { + None + }; + + Ok((result, universes)) } pub(crate) async fn preresolve_literals_and_build_universe( - dice_query_delegate: &DiceQueryDelegate<'_>, + dice_query_delegate: &DiceQueryDelegate<'_, '_>, dice_query_data: &DiceQueryData, literals: &[String], ) -> anyhow::Result<( CqueryUniverse, PreresolvedQueryLiterals, )> { - let resolved_literals = - PreresolvedQueryLiterals::pre_resolve(dice_query_data, literals, dice_query_delegate.ctx()) - .await; - let universe = CqueryUniverse::build(&resolved_literals.literals()?).await?; + let resolved_literals = PreresolvedQueryLiterals::pre_resolve( + dice_query_data, + literals, + &mut dice_query_delegate.ctx(), + ) + .await; + let universe = CqueryUniverse::build(&resolved_literals.literals()?)?; Ok((universe, resolved_literals)) } -/// Evaluates some query expression. TargetNodes are resolved via the interpreter from -/// the provided DiceCtx. -pub async fn get_cquery_evaluator<'a, 'c: 'a>( - ctx: &'c DiceComputations, - working_dir: &'a ProjectRelativePath, - global_target_platform: Option, - owner_behavior: CqueryOwnerBehavior, -) -> anyhow::Result> { - let dice_query_delegate = - get_dice_query_delegate(ctx, working_dir, global_target_platform).await?; - let functions = DefaultQueryFunctionsModule::new(); - Ok(CqueryEvaluator { - dice_query_delegate, - functions, - owner_behavior, - }) +async fn build_cquery_universe_from_literals( + universe: &[String], + query_literals: &DiceQueryData, + ctx: &mut DiceComputations<'_>, +) -> anyhow::Result { + let refs: Vec<_> = universe.map(|v| v.as_str()); + let universe_resolved = query_literals.eval_literals(&refs, ctx).await?; + + CqueryUniverse::build(&universe_resolved) } // This will first resolve the universe to configured nodes and then gather all // the deps. From there, it resolves the literals to any matching nodes in the universe deps. -async fn resolve_literals_in_universe, U: AsRef>( - dice_query_delegate: &DiceQueryDelegate<'_>, - query_literals: Arc, - literals: &[L], - universe: &[U], -) -> anyhow::Result<( - CqueryUniverse, - PreresolvedQueryLiterals, -)> { +async fn resolve_literals_in_universe( + dice_query_delegate: &DiceQueryDelegate<'_, '_>, + literals: &[String], + universe: &CqueryUniverse, +) -> anyhow::Result> { // TODO(cjhopman): We should probably also resolve the literals to TargetNode so that // we can get errors for packages or targets that don't exist or fail to load. - let refs: Vec<_> = universe.map(|v| v.as_ref()); - let universe_resolved = query_literals - .eval_literals(&refs, dice_query_delegate.ctx()) - .await?; - - let universe = CqueryUniverse::build(&universe_resolved).await?; // capture a reference so the ref can be moved into the future below. let universe_ref = &universe; @@ -141,16 +205,17 @@ async fn resolve_literals_in_universe, U: AsRef>( let resolution_futs: FuturesUnordered<_> = literals .iter() .map(|lit| async move { - let lit = lit.as_ref(); let result: anyhow::Result<_> = try { - let resolved_pattern = dice_query_delegate.resolve_target_patterns(&[lit]).await?; + let resolved_pattern = dice_query_delegate + .resolve_target_patterns(&[lit.as_str()]) + .await?; universe_ref.get(&resolved_pattern) }; - (lit.to_owned(), result.shared_error()) + (lit.to_owned(), result.map_err(buck2_error::Error::from)) }) .collect(); let resolved = resolution_futs.collect().await; - Ok((universe, PreresolvedQueryLiterals::new(resolved))) + Ok(PreresolvedQueryLiterals::new(resolved)) } diff --git a/app/buck2_query_impls/src/cquery/mod.rs b/app/buck2_query_impls/src/cquery/mod.rs deleted file mode 100644 index 9c43548eb0f28..0000000000000 --- a/app/buck2_query_impls/src/cquery/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod bxl; -pub mod environment; -pub mod evaluator; diff --git a/app/buck2_query_impls/src/dice.rs b/app/buck2_query_impls/src/dice.rs new file mode 100644 index 0000000000000..9b8e92fc491f5 --- /dev/null +++ b/app/buck2_query_impls/src/dice.rs @@ -0,0 +1,361 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::sync::Arc; + +use async_trait::async_trait; +use buck2_build_api::configure_targets::load_compatible_patterns; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::dice::data::HasIoProvider; +use buck2_common::dice::file_ops::DiceFileComputations; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_common::package_boundary::HasPackageBoundaryExceptions; +use buck2_common::package_listing::dice::DicePackageListingResolver; +use buck2_common::package_listing::resolver::PackageListingResolver; +use buck2_common::pattern::resolve::ResolveTargetPatterns; +use buck2_common::pattern::resolve::ResolvedPattern; +use buck2_common::target_aliases::BuckConfigTargetAliasResolver; +use buck2_common::target_aliases::HasTargetAliasResolver; +use buck2_core::cells::cell_path::CellPath; +use buck2_core::cells::name::CellName; +use buck2_core::cells::CellAliasResolver; +use buck2_core::cells::CellResolver; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::file_name::FileNameBuf; +use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::ParsedPattern; +use buck2_core::pattern::pattern_type::ProvidersPatternExtra; +use buck2_core::pattern::pattern_type::TargetPatternExtra; +use buck2_core::pattern::query_file_literal::parse_query_file_literal; +use buck2_core::provider::label::ProvidersName; +use buck2_core::soft_error; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_node::load_patterns::load_patterns; +use buck2_node::load_patterns::MissingTargetBehavior; +use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; +use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::target_calculation::ConfiguredTargetCalculation; +use buck2_query::query::syntax::simple::eval::file_set::FileNode; +use buck2_query::query::syntax::simple::eval::file_set::FileSet; +use buck2_query::query::syntax::simple::eval::set::TargetSet; +use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; +use futures::FutureExt; +use gazebo::prelude::*; +use indexmap::indexset; + +use crate::cquery::environment::CqueryDelegate; +use crate::uquery::environment::QueryLiterals; +use crate::uquery::environment::UqueryDelegate; + +pub(crate) mod aquery; + +#[derive(Debug, buck2_error::Error)] +enum LiteralParserError { + #[error("Expected a target pattern without providers, got: `{0}`")] + ExpectingTargetPatternWithoutProviders(String), +} + +pub(crate) struct LiteralParser { + // file and target literals are resolved relative to the working dir. + working_dir: CellPath, + working_dir_abs: AbsNormPathBuf, + project_root: ProjectRoot, + cell_resolver: CellResolver, + cell_alias_resolver: CellAliasResolver, + target_alias_resolver: BuckConfigTargetAliasResolver, +} + +impl LiteralParser { + // We allow provider names and flavors in the value and it gets stripped out for the result as queries operate on the target graphs. + fn parse_target_pattern( + &self, + value: &str, + ) -> anyhow::Result> { + let providers_pattern = self.parse_providers_pattern(value)?; + let target_pattern = match providers_pattern { + ParsedPattern::Target(package, target_name, ProvidersPatternExtra { providers }) => { + if providers != ProvidersName::Default { + // After converting this to hard error, replace this function body + // with direct call to `ParsedPattern::parse_relative`, + // as `parse_providers_pattern` does. + soft_error!( + "expecting_target_pattern_without_providers", + LiteralParserError::ExpectingTargetPatternWithoutProviders( + value.to_owned() + ) + .into() + )?; + } + ParsedPattern::Target(package, target_name, TargetPatternExtra) + } + ParsedPattern::Package(package) => ParsedPattern::Package(package), + ParsedPattern::Recursive(path) => ParsedPattern::Recursive(path), + }; + Ok(target_pattern) + } + + pub(crate) fn parse_providers_pattern( + &self, + value: &str, + ) -> anyhow::Result> { + ParsedPattern::parse_relative( + &self.target_alias_resolver, + self.working_dir.as_ref(), + value, + &self.cell_resolver, + &self.cell_alias_resolver, + ) + } + + fn parse_file_literal(&self, literal: &str) -> anyhow::Result { + parse_query_file_literal( + literal, + &self.cell_alias_resolver, + &self.cell_resolver, + &self.working_dir_abs, + &self.project_root, + ) + } +} + +/// A Uquery delegate that resolves TargetNodes with the provided +/// InterpreterCalculation. +pub(crate) struct DiceQueryDelegate<'c, 'd> { + ctx: &'c LinearRecomputeDiceComputations<'d>, + query_data: Arc, +} + +pub(crate) struct DiceQueryData { + literal_parser: LiteralParser, + global_cfg_options: GlobalCfgOptions, +} + +impl DiceQueryData { + pub(crate) fn new( + global_cfg_options: GlobalCfgOptions, + cell_resolver: CellResolver, + cell_alias_resolver: CellAliasResolver, + working_dir: &ProjectRelativePath, + project_root: ProjectRoot, + target_alias_resolver: BuckConfigTargetAliasResolver, + ) -> anyhow::Result { + let cell_path = cell_resolver.get_cell_path(working_dir)?; + + let working_dir_abs = project_root.resolve(working_dir); + + Ok(Self { + literal_parser: LiteralParser { + working_dir_abs, + working_dir: cell_path, + project_root, + cell_resolver, + cell_alias_resolver, + target_alias_resolver, + }, + global_cfg_options, + }) + } + + pub(crate) fn literal_parser(&self) -> &LiteralParser { + &self.literal_parser + } + + pub(crate) fn global_cfg_options(&self) -> &GlobalCfgOptions { + &self.global_cfg_options + } +} + +impl<'c, 'd> DiceQueryDelegate<'c, 'd> { + pub(crate) fn new( + ctx: &'c LinearRecomputeDiceComputations<'d>, + query_data: Arc, + ) -> Self { + Self { ctx, query_data } + } + + pub(crate) fn ctx<'x>(&'x self) -> DiceComputations<'x> { + self.ctx.get() + } + + pub(crate) fn query_data(&self) -> &Arc { + &self.query_data + } +} + +#[async_trait] +impl<'c, 'd> UqueryDelegate for DiceQueryDelegate<'c, 'd> { + // get the list of potential buildfile names for each cell + async fn get_buildfile_names_by_cell( + &self, + ) -> anyhow::Result>> { + let mut ctx = self.ctx.get(); + let resolver = ctx.get_cell_resolver().await?; + let buildfiles = ctx + .try_compute_join(resolver.cells(), |ctx, (name, _)| { + async move { + DiceFileComputations::buildfiles(ctx, name) + .await + .map(|x| (name, x)) + } + .boxed() + }) + .await?; + + Ok(buildfiles.into_iter().collect()) + } + + async fn resolve_target_patterns( + &self, + patterns: &[&str], + ) -> anyhow::Result> { + let parsed_patterns = + patterns.try_map(|p| self.query_data.literal_parser.parse_target_pattern(p))?; + ResolveTargetPatterns::resolve(&mut self.ctx.get(), &parsed_patterns).await + } + + // This returns 1 package normally but can return multiple packages if the path is covered under `self.package_boundary_exceptions`. + async fn get_enclosing_packages(&self, path: &CellPath) -> anyhow::Result> { + // Without package boundary violations, there is only 1 owning package for a path. + // However, with package boundary violations, all parent packages of the enclosing package can also be owners. + if let Some(enclosing_violation_path) = self + .ctx + .get() + .get_package_boundary_exception(path.as_ref()) + .await? + { + return Ok(DicePackageListingResolver(&mut self.ctx.get()) + .get_enclosing_packages(path.as_ref(), (*enclosing_violation_path).as_ref()) + .await? + .into_iter() + .collect()); + } + + let package = DicePackageListingResolver(&mut self.ctx.get()) + .get_enclosing_package(path.as_ref()) + .await?; + Ok(vec![package]) + } + + async fn eval_file_literal(&self, literal: &str) -> anyhow::Result { + let cell_path = self.query_data.literal_parser.parse_file_literal(literal)?; + Ok(FileSet::new(indexset![FileNode(cell_path)])) + } + + fn linear_dice_computations(&self) -> &LinearRecomputeDiceComputations<'_> { + self.ctx + } + + fn ctx<'a>(&'a self) -> DiceComputations<'a> { + self.ctx.get() + } +} + +#[async_trait] +impl<'c, 'd> CqueryDelegate for DiceQueryDelegate<'c, 'd> { + fn uquery_delegate(&self) -> &dyn UqueryDelegate { + self + } + + async fn get_node_for_configured_target( + &self, + target: &ConfiguredTargetLabel, + ) -> anyhow::Result { + Ok(self + .ctx + .get() + .get_configured_target_node(target) + .await? + .require_compatible()?) + } + + async fn get_node_for_default_configured_target( + &self, + target: &TargetLabel, + ) -> anyhow::Result> { + let target = self.ctx.get().get_default_configured_target(target).await?; + self.ctx.get().get_configured_target_node(&target).await + } + + fn ctx<'a>(&'a self) -> DiceComputations<'a> { + self.ctx.get() + } +} + +#[async_trait] +impl QueryLiterals for DiceQueryData { + async fn eval_literals( + &self, + literals: &[&str], + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + let parsed_patterns = literals.try_map(|p| self.literal_parser.parse_target_pattern(p))?; + load_compatible_patterns( + ctx, + parsed_patterns, + &self.global_cfg_options, + MissingTargetBehavior::Fail, + ) + .await + } +} + +#[async_trait] +impl QueryLiterals for DiceQueryData { + async fn eval_literals( + &self, + literals: &[&str], + ctx: &mut DiceComputations<'_>, + ) -> anyhow::Result> { + let parsed_patterns = literals.try_map(|p| self.literal_parser.parse_target_pattern(p))?; + let loaded_patterns = + load_patterns(ctx, parsed_patterns, MissingTargetBehavior::Fail).await?; + let mut target_set = TargetSet::new(); + for (_package, results) in loaded_patterns.into_iter() { + target_set.extend(results?.into_values()); + } + Ok(target_set) + } +} + +pub(crate) async fn get_dice_query_delegate<'a, 'c: 'a, 'd>( + ctx: &'c LinearRecomputeDiceComputations<'d>, + working_dir: &'a ProjectRelativePath, + global_cfg_options: GlobalCfgOptions, +) -> anyhow::Result> { + let cell_resolver = ctx.get().get_cell_resolver().await?; + let cell_alias_resolver = ctx + .get() + .get_cell_alias_resolver_for_dir(working_dir) + .await?; + let target_alias_resolver = ctx.get().target_alias_resolver().await?; + let project_root = ctx + .get() + .global_data() + .get_io_provider() + .project_root() + .to_owned(); + Ok(DiceQueryDelegate::new( + ctx, + Arc::new(DiceQueryData::new( + global_cfg_options, + cell_resolver, + cell_alias_resolver, + working_dir, + project_root, + target_alias_resolver, + )?), + )) +} diff --git a/app/buck2_query_impls/src/dice/aquery.rs b/app/buck2_query_impls/src/dice/aquery.rs index 7607539a9a722..63840624c2f57 100644 --- a/app/buck2_query_impls/src/dice/aquery.rs +++ b/app/buck2_query_impls/src/dice/aquery.rs @@ -11,7 +11,6 @@ use std::future::Future; use std::hash::Hash; use std::sync::Arc; -use anyhow::Context; use async_trait::async_trait; use buck2_artifact::actions::key::ActionKey; use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; @@ -26,11 +25,10 @@ use buck2_build_api::analysis::AnalysisResult; use buck2_build_api::artifact_groups::ArtifactGroup; use buck2_build_api::artifact_groups::ResolvedArtifactGroup; use buck2_build_api::artifact_groups::TransitiveSetProjectionKey; -use buck2_build_api::deferred::calculation::DeferredCalculation; -use buck2_common::result::SharedResult; +use buck2_build_api::keep_going::KeepGoing; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::fs::artifact_path_resolver::ArtifactFs; -use buck2_core::pattern::ParsedPattern; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_query::query::syntax::simple::eval::set::TargetSet; @@ -41,12 +39,9 @@ use dupe::Dupe; use futures::future::BoxFuture; use futures::future::FutureExt; use futures::future::Shared; -use futures::stream::FuturesOrdered; -use futures::StreamExt; use gazebo::prelude::*; use itertools::Either; use itertools::Itertools; -use thiserror::Error; use tokio::sync::oneshot; use crate::aquery::environment::AqueryDelegate; @@ -55,7 +50,7 @@ use crate::dice::DiceQueryData; use crate::dice::DiceQueryDelegate; use crate::uquery::environment::QueryLiterals; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum ActionQueryError { #[error( "`aquery` currently only supports literal target patterns, not package or recursive (got `{0}`)" @@ -127,8 +122,9 @@ impl NodeCache { /// be able to iterate the tset structure synchronously. #[derive(Clone, Dupe)] struct DiceAqueryNodesCache { - action_nodes: Arc>>, - tset_nodes: Arc>>, + action_nodes: Arc>>, + tset_nodes: + Arc>>, } impl DiceAqueryNodesCache { @@ -140,8 +136,8 @@ impl DiceAqueryNodesCache { } } -pub(crate) struct DiceAqueryDelegate<'c> { - base_delegate: DiceQueryDelegate<'c>, +pub(crate) struct DiceAqueryDelegate<'c, 'd> { + base_delegate: DiceQueryDelegate<'c, 'd>, query_data: Arc, } @@ -158,50 +154,53 @@ pub(crate) struct AqueryData { // artifact side of it holds a starlark ref. That would allow someone with an ArtifactGroup to synchronously // traverse the tset graph rather than needing to asynchronously resolve a TransitiveSetKey. async fn convert_inputs<'c, 'a, Iter: IntoIterator>( - ctx: &'c DiceComputations, + ctx: &'c mut DiceComputations<'_>, node_cache: DiceAqueryNodesCache, inputs: Iter, ) -> anyhow::Result> { + let resolved_artifacts: Vec<_> = tokio::task::unconstrained(KeepGoing::try_compute_join_all( + ctx, + inputs, + |ctx, input| async move { input.resolved_artifact(ctx).await }.boxed(), + )) + .await?; + let (artifacts, projections): (Vec<_>, Vec<_>) = Itertools::partition_map( - inputs + resolved_artifacts .into_iter() - .filter_map(|input| match input.assert_resolved() { - ResolvedArtifactGroup::Artifact(a) => a.action_key().map(Either::Left), + .filter_map(|resolved_artifact| match resolved_artifact { + ResolvedArtifactGroup::Artifact(a) => { + a.action_key().map(|a| Either::Left(a.clone())) + } ResolvedArtifactGroup::TransitiveSetProjection(key) => Some(Either::Right(key)), }), |v| v, ); let mut deps = artifacts.into_map(|a| ActionInput::ActionKey(ActionQueryNodeRef::Action(a.dupe()))); - let mut projection_deps: FuturesOrdered<_> = projections - .into_iter() - .map(|key| { + let projection_deps = ctx + .try_compute_join(projections, |ctx, key| { let key = key.dupe(); let node_cache = node_cache.dupe(); - get_tset_node(node_cache, ctx, key) + async move { get_tset_node(node_cache, ctx, key).await }.boxed() }) - .collect(); + .await?; - while let Some(node) = tokio::task::unconstrained(projection_deps.next()).await { - deps.push(ActionInput::IndirectInputs(node?)); + for node in projection_deps { + deps.push(ActionInput::IndirectInputs(node)); } Ok(deps) } fn compute_tset_node<'c>( node_cache: DiceAqueryNodesCache, - ctx: &'c DiceComputations, + ctx: &'c mut DiceComputations<'_>, key: TransitiveSetProjectionKey, -) -> BoxFuture<'c, SharedResult> { +) -> BoxFuture<'c, buck2_error::Result> { async move { - let set = ctx - .compute_deferred_data(&key.key) - .await - .context("Failed to compute deferred")?; + let set = key.key.lookup(ctx).await?; - let sub_inputs = set - .as_transitive_set() - .get_projection_sub_inputs(key.projection)?; + let sub_inputs = set.get_projection_sub_inputs(key.projection)?; let inputs = convert_inputs(ctx, node_cache, sub_inputs.iter()).await?; @@ -217,7 +216,7 @@ fn compute_tset_node<'c>( async fn get_tset_node<'c>( node_cache: DiceAqueryNodesCache, - ctx: &'c DiceComputations, + ctx: &'c mut DiceComputations<'_>, key: TransitiveSetProjectionKey, ) -> anyhow::Result { let copied_node_cache = node_cache.dupe(); @@ -231,10 +230,10 @@ async fn get_tset_node<'c>( fn compute_action_node<'c>( node_cache: DiceAqueryNodesCache, - ctx: &'c DiceComputations, + ctx: &'c mut DiceComputations<'_>, key: ActionKey, fs: Arc, -) -> BoxFuture<'c, SharedResult> { +) -> BoxFuture<'c, buck2_error::Result> { async move { let action = ActionCalculation::get_action(ctx, &key).await?; let deps = convert_inputs(ctx, node_cache, action.inputs()?.iter()).await?; @@ -245,7 +244,7 @@ fn compute_action_node<'c>( async fn get_action_node<'c>( node_cache: DiceAqueryNodesCache, - ctx: &'c DiceComputations, + ctx: &'c mut DiceComputations<'_>, key: ActionKey, fs: Arc, ) -> anyhow::Result { @@ -258,10 +257,10 @@ async fn get_action_node<'c>( .await?) } -impl<'c> DiceAqueryDelegate<'c> { +impl<'c, 'd> DiceAqueryDelegate<'c, 'd> { pub(crate) async fn new( - base_delegate: DiceQueryDelegate<'c>, - ) -> anyhow::Result> { + base_delegate: DiceQueryDelegate<'c, 'd>, + ) -> anyhow::Result> { let artifact_fs = Arc::new(base_delegate.ctx().get_artifact_fs().await?); let query_data = Arc::new(AqueryData { artifact_fs, @@ -274,14 +273,14 @@ impl<'c> DiceAqueryDelegate<'c> { }) } - pub fn query_data(&self) -> &Arc { + pub(crate) fn query_data(&self) -> &Arc { &self.query_data } - pub async fn get_action_node(&self, key: &ActionKey) -> anyhow::Result { + pub(crate) async fn get_action_node(&self, key: &ActionKey) -> anyhow::Result { get_action_node( self.query_data.nodes_cache.dupe(), - self.base_delegate.ctx(), + &mut self.base_delegate.ctx(), key.dupe(), self.query_data.artifact_fs.dupe(), ) @@ -290,12 +289,12 @@ impl<'c> DiceAqueryDelegate<'c> { } #[async_trait] -impl<'c> AqueryDelegate for DiceAqueryDelegate<'c> { +impl<'c, 'd> AqueryDelegate for DiceAqueryDelegate<'c, 'd> { fn cquery_delegate(&self) -> &dyn CqueryDelegate { &self.base_delegate } - fn ctx(&self) -> &DiceComputations { + fn ctx<'a>(&'a self) -> DiceComputations<'a> { self.base_delegate.ctx() } @@ -308,7 +307,7 @@ impl<'c> AqueryDelegate for DiceAqueryDelegate<'c> { artifacts: &[ArtifactGroup], ) -> anyhow::Result> { let inputs = convert_inputs( - self.base_delegate.ctx(), + &mut self.base_delegate.ctx(), self.query_data.nodes_cache.dupe(), artifacts, ) @@ -318,7 +317,7 @@ impl<'c> AqueryDelegate for DiceAqueryDelegate<'c> { .map(|i| i.require_action()) .collect::, _>>()?; - futures::future::try_join_all(refs.iter().map(|n| self.get_node(n))).await + buck2_util::future::try_join_all(refs.iter().map(|n| self.get_node(n))).await } async fn get_target_set_from_analysis( @@ -330,7 +329,7 @@ impl<'c> AqueryDelegate for DiceAqueryDelegate<'c> { self.query_data().as_ref(), configured_label, analysis, - self.ctx(), + &mut self.ctx(), ) .await } @@ -340,7 +339,7 @@ async fn get_target_set_from_analysis_inner( query_data: &AqueryData, configured_label: &ConfiguredProvidersLabel, analysis: AnalysisResult, - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, ) -> anyhow::Result> { let mut result = TargetSet::new(); @@ -348,7 +347,7 @@ async fn get_target_set_from_analysis_inner( for output in providers .provider_collection() - .default_info() + .default_info()? .default_outputs() { if let Some(action_key) = output.artifact().action_key() { @@ -365,7 +364,7 @@ async fn get_target_set_from_analysis_inner( } result.insert(ActionQueryNode::new_analysis( - configured_label.clone(), + configured_label.dupe(), analysis, )); @@ -377,7 +376,7 @@ impl QueryLiterals for AqueryData { async fn eval_literals( &self, literals: &[&str], - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, ) -> anyhow::Result> { // For literal evaluation, we resolve the providers pattern to the analysis result, pull out // the default outputs and look up the corresponding actions. @@ -395,7 +394,7 @@ impl QueryLiterals for AqueryData { let configured_label = dice .get_configured_provider_label( &label, - self.delegate_query_data.global_target_platform(), + self.delegate_query_data.global_cfg_options(), ) .await?; diff --git a/app/buck2_query_impls/src/dice/mod.rs b/app/buck2_query_impls/src/dice/mod.rs deleted file mode 100644 index aae0ac9deb6ac..0000000000000 --- a/app/buck2_query_impls/src/dice/mod.rs +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashMap; -use std::sync::Arc; - -use async_trait::async_trait; -use buck2_build_api::configure_targets::load_compatible_patterns; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::data::HasIoProvider; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::package_boundary::HasPackageBoundaryExceptions; -use buck2_common::package_boundary::PackageBoundaryExceptions; -use buck2_common::package_listing::dice::HasPackageListingResolver; -use buck2_common::package_listing::resolver::PackageListingResolver; -use buck2_common::pattern::resolve::resolve_target_patterns; -use buck2_common::pattern::resolve::ResolvedPattern; -use buck2_common::target_aliases::BuckConfigTargetAliasResolver; -use buck2_common::target_aliases::HasTargetAliasResolver; -use buck2_core::bzl::ImportPath; -use buck2_core::cells::cell_path::CellPath; -use buck2_core::cells::name::CellName; -use buck2_core::cells::CellAliasResolver; -use buck2_core::cells::CellResolver; -use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::file_name::FileNameBuf; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::package::PackageLabel; -use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::query_file_literal::parse_query_file_literal; -use buck2_core::pattern::ParsedPattern; -use buck2_core::provider::label::ProvidersName; -use buck2_core::soft_error; -use buck2_core::target::configured_target_label::ConfiguredTargetLabel; -use buck2_core::target::label::TargetLabel; -use buck2_interpreter::load_module::InterpreterCalculation; -use buck2_node::load_patterns::load_patterns; -use buck2_node::load_patterns::MissingTargetBehavior; -use buck2_node::nodes::configured::ConfiguredTargetNode; -use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; -use buck2_node::nodes::eval_result::EvaluationResult; -use buck2_node::nodes::frontend::TargetGraphCalculation; -use buck2_node::nodes::unconfigured::TargetNode; -use buck2_node::target_calculation::ConfiguredTargetCalculation; -use buck2_query::query::syntax::simple::eval::file_set::FileNode; -use buck2_query::query::syntax::simple::eval::file_set::FileSet; -use buck2_query::query::syntax::simple::eval::set::TargetSet; -use dice::DiceComputations; -use dupe::Dupe; -use gazebo::prelude::*; -use indexmap::indexset; - -use crate::cquery::environment::CqueryDelegate; -use crate::uquery::environment::QueryLiterals; -use crate::uquery::environment::UqueryDelegate; - -pub mod aquery; - -#[derive(Debug, thiserror::Error)] -enum LiteralParserError { - #[error("Expected a target pattern without providers, got: `{0}`")] - ExpectingTargetPatternWithoutProviders(String), -} - -pub(crate) struct LiteralParser { - // file and target literals are resolved relative to the working dir. - working_dir: CellPath, - working_dir_abs: AbsNormPathBuf, - project_root: ProjectRoot, - cell_resolver: CellResolver, - cell_alias_resolver: CellAliasResolver, - target_alias_resolver: BuckConfigTargetAliasResolver, -} - -impl LiteralParser { - // We allow provider names and flavors in the value and it gets stripped out for the result as queries operate on the target graphs. - fn parse_target_pattern( - &self, - value: &str, - ) -> anyhow::Result> { - let providers_pattern = self.parse_providers_pattern(value)?; - let target_pattern = match providers_pattern { - ParsedPattern::Target(package, target_name, ProvidersPatternExtra { providers }) => { - if providers != ProvidersName::Default { - // After converting this to hard error, replace this function body - // with direct call to `ParsedPattern::parse_relative`, - // as `parse_providers_pattern` does. - soft_error!( - "expecting_target_pattern_without_providers", - LiteralParserError::ExpectingTargetPatternWithoutProviders( - value.to_owned() - ) - .into() - )?; - } - ParsedPattern::Target(package, target_name, TargetPatternExtra) - } - ParsedPattern::Package(package) => ParsedPattern::Package(package), - ParsedPattern::Recursive(path) => ParsedPattern::Recursive(path), - }; - Ok(target_pattern) - } - - pub(crate) fn parse_providers_pattern( - &self, - value: &str, - ) -> anyhow::Result> { - ParsedPattern::parse_relative( - &self.target_alias_resolver, - self.working_dir.as_ref(), - value, - &self.cell_resolver, - ) - } - - fn parse_file_literal(&self, literal: &str) -> anyhow::Result { - parse_query_file_literal( - literal, - &self.cell_alias_resolver, - &self.cell_resolver, - &self.working_dir_abs, - &self.project_root, - ) - } -} - -/// A Uquery delegate that resolves TargetNodes with the provided -/// InterpreterCalculation. -pub struct DiceQueryDelegate<'c> { - ctx: &'c DiceComputations, - cell_resolver: CellResolver, - query_data: Arc, - package_boundary_exceptions: Arc, -} - -pub struct DiceQueryData { - literal_parser: LiteralParser, - global_target_platform: Option, -} - -impl DiceQueryData { - pub fn new( - global_target_platform: Option, - cell_resolver: CellResolver, - working_dir: &ProjectRelativePath, - project_root: ProjectRoot, - target_alias_resolver: BuckConfigTargetAliasResolver, - ) -> anyhow::Result { - let cell_path = cell_resolver.get_cell_path(working_dir)?; - - let cell_alias_resolver = cell_resolver - .get(cell_path.cell())? - .cell_alias_resolver() - .dupe(); - let working_dir_abs = project_root.resolve(working_dir); - - Ok(Self { - literal_parser: LiteralParser { - working_dir_abs, - working_dir: cell_path, - project_root, - cell_resolver, - cell_alias_resolver, - target_alias_resolver, - }, - global_target_platform, - }) - } - - pub(crate) fn literal_parser(&self) -> &LiteralParser { - &self.literal_parser - } - - pub(crate) fn global_target_platform(&self) -> Option<&TargetLabel> { - self.global_target_platform.as_ref() - } -} - -impl<'c> DiceQueryDelegate<'c> { - pub fn new( - ctx: &'c DiceComputations, - cell_resolver: CellResolver, - package_boundary_exceptions: Arc, - query_data: Arc, - ) -> Self { - Self { - ctx, - cell_resolver: cell_resolver.dupe(), - query_data, - package_boundary_exceptions, - } - } - - pub(crate) fn ctx(&self) -> &'c DiceComputations { - self.ctx - } - - pub(crate) fn query_data(&self) -> &Arc { - &self.query_data - } -} - -#[async_trait] -impl<'c> UqueryDelegate for DiceQueryDelegate<'c> { - async fn eval_build_file( - &self, - package: PackageLabel, - ) -> anyhow::Result> { - self.ctx.get_interpreter_results(package).await - } - - async fn eval_module_imports(&self, path: &ImportPath) -> anyhow::Result> { - //TODO(benfoxman): Don't need to get the whole module, just parse the imports. - let module = self.ctx.get_loaded_module_from_import_path(path).await?; - Ok(module.imports().cloned().collect()) - } - - // get the list of potential buildfile names for each cell - fn get_buildfile_names_by_cell(&self) -> anyhow::Result> { - let resolver = &self.cell_resolver; - let mut buildfile_names_by_cell = HashMap::::new(); - for (cell, _) in resolver.cells() { - let prev = buildfile_names_by_cell.insert(cell, resolver.get(cell)?.buildfiles()); - assert!(prev.is_none()); - } - Ok(buildfile_names_by_cell) - } - - async fn resolve_target_patterns( - &self, - patterns: &[&str], - ) -> anyhow::Result> { - let parsed_patterns = - patterns.try_map(|p| self.query_data.literal_parser.parse_target_pattern(p))?; - let file_ops = self.ctx.file_ops(); - resolve_target_patterns(&self.cell_resolver, &parsed_patterns, &file_ops).await - } - - // This returns 1 package normally but can return multiple packages if the path is covered under `self.package_boundary_exceptions`. - async fn get_enclosing_packages(&self, path: &CellPath) -> anyhow::Result> { - let package_listing_resolver = self.ctx.get_package_listing_resolver(); - - // Without package boundary violations, there is only 1 owning package for a path. - // However, with package boundary violations, all parent packages of the enclosing package can also be owners. - if let Some(enclosing_violation_path) = self - .package_boundary_exceptions - .get_package_boundary_exception_path(path) - { - return Ok(package_listing_resolver - .get_enclosing_packages(path.as_ref(), enclosing_violation_path.as_ref()) - .await? - .into_iter() - .collect()); - } - - let package = package_listing_resolver - .get_enclosing_package(path.as_ref()) - .await?; - Ok(vec![package]) - } - - async fn eval_file_literal(&self, literal: &str) -> anyhow::Result { - let cell_path = self.query_data.literal_parser.parse_file_literal(literal)?; - Ok(FileSet::new(indexset![FileNode(cell_path)])) - } - - fn ctx(&self) -> &DiceComputations { - self.ctx - } -} - -#[async_trait] -impl<'c> CqueryDelegate for DiceQueryDelegate<'c> { - fn uquery_delegate(&self) -> &dyn UqueryDelegate { - self - } - - async fn get_node_for_target( - &self, - target: &TargetLabel, - ) -> anyhow::Result> { - let target = self - .ctx - .get_configured_target(target, self.query_data.global_target_platform.as_ref()) - .await?; - Ok(self.ctx.get_configured_target_node(&target).await?) - } - - async fn get_node_for_configured_target( - &self, - target: &ConfiguredTargetLabel, - ) -> anyhow::Result { - Ok(self - .ctx - .get_configured_target_node(target) - .await? - .require_compatible()?) - } - - async fn get_node_for_default_configured_target( - &self, - target: &TargetLabel, - ) -> anyhow::Result> { - let target = self.ctx.get_default_configured_target(target).await?; - self.ctx.get_configured_target_node(&target).await - } - - async fn get_configured_target( - &self, - target: &TargetLabel, - ) -> anyhow::Result { - self.ctx - .get_configured_target(target, self.query_data.global_target_platform.as_ref()) - .await - } - - fn ctx(&self) -> &DiceComputations { - self.ctx - } -} - -#[async_trait] -impl QueryLiterals for DiceQueryData { - async fn eval_literals( - &self, - literals: &[&str], - ctx: &DiceComputations, - ) -> anyhow::Result> { - let parsed_patterns = literals.try_map(|p| self.literal_parser.parse_target_pattern(p))?; - load_compatible_patterns( - ctx, - parsed_patterns, - self.global_target_platform.dupe(), - MissingTargetBehavior::Fail, - ) - .await - } -} - -#[async_trait] -impl QueryLiterals for DiceQueryData { - async fn eval_literals( - &self, - literals: &[&str], - ctx: &DiceComputations, - ) -> anyhow::Result> { - let parsed_patterns = literals.try_map(|p| self.literal_parser.parse_target_pattern(p))?; - let loaded_patterns = - load_patterns(ctx, parsed_patterns, MissingTargetBehavior::Fail).await?; - let mut target_set = TargetSet::new(); - for (_package, results) in loaded_patterns.into_iter() { - target_set.extend(results?.into_values()); - } - Ok(target_set) - } -} - -pub(crate) async fn get_dice_query_delegate<'a, 'c: 'a>( - ctx: &'c DiceComputations, - working_dir: &'a ProjectRelativePath, - global_target_platform: Option, -) -> anyhow::Result> { - let cell_resolver = ctx.get_cell_resolver().await?; - let package_boundary_exceptions = ctx.get_package_boundary_exceptions().await?; - let target_alias_resolver = ctx - .target_alias_resolver_for_working_dir(working_dir) - .await?; - let project_root = ctx - .global_data() - .get_io_provider() - .project_root() - .to_owned(); - Ok(DiceQueryDelegate::new( - ctx, - cell_resolver.dupe(), - package_boundary_exceptions, - Arc::new(DiceQueryData::new( - global_target_platform, - cell_resolver, - working_dir, - project_root, - target_alias_resolver, - )?), - )) -} diff --git a/app/buck2_query_impls/src/frontend.rs b/app/buck2_query_impls/src/frontend.rs index e53eb37de00cf..d9be407ab5252 100644 --- a/app/buck2_query_impls/src/frontend.rs +++ b/app/buck2_query_impls/src/frontend.rs @@ -7,21 +7,23 @@ * of this source tree. */ +use std::sync::Arc; + use async_trait::async_trait; use buck2_build_api::actions::query::ActionQueryNode; -use buck2_build_api::query::oneshot::CqueryOwnerBehavior; use buck2_build_api::query::oneshot::QueryFrontend; use buck2_build_api::query::oneshot::QUERY_FRONTEND; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; use buck2_node::configured_universe::CqueryUniverse; +use buck2_node::configured_universe::UNIVERSE_FROM_LITERALS; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::unconfigured::TargetNode; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; use dice::DiceComputations; use crate::aquery::evaluator::get_aquery_evaluator; -use crate::cquery::evaluator::get_cquery_evaluator; +use crate::cquery::evaluator::eval_cquery; use crate::cquery::evaluator::preresolve_literals_and_build_universe; use crate::dice::get_dice_query_delegate; use crate::uquery::evaluator::get_uquery_evaluator; @@ -36,62 +38,82 @@ pub(crate) fn init_query_frontend() { impl QueryFrontend for QueryFrontendImpl { async fn eval_uquery( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, working_dir: &ProjectRelativePath, query: &str, query_args: &[String], - global_target_platform: Option, ) -> anyhow::Result> { - let evaluator = get_uquery_evaluator(ctx, working_dir, global_target_platform).await?; - - evaluator.eval_query(query, query_args).await + ctx.with_linear_recompute(|ctx| async move { + let evaluator = get_uquery_evaluator(&ctx, working_dir).await?; + evaluator.eval_query(query, query_args).await + }) + .await } + /// Evaluate a cquery query. + /// + /// Long with query results, the function returns all the universes + /// that was used to resolve query literals in non-deterministic order. + /// Universes are used in Starlark profiler. async fn eval_cquery( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, working_dir: &ProjectRelativePath, - owner_behavior: CqueryOwnerBehavior, query: &str, query_args: &[String], - global_target_platform: Option, + global_cfg_options: GlobalCfgOptions, target_universe: Option<&[String]>, - ) -> anyhow::Result> { - let evaluator = - get_cquery_evaluator(ctx, working_dir, global_target_platform, owner_behavior).await?; + collect_universes: bool, + ) -> anyhow::Result<( + QueryEvaluationResult, + Option>>, + )> { + ctx.with_linear_recompute(|ctx| async move { + let dice_query_delegate = + get_dice_query_delegate(&ctx, working_dir, global_cfg_options).await?; - // TODO(nga): this should support configured target patterns - // similarly to what we do for `build` command. - // Something like this should work: - // ``` - // buck2 cquery --target-universe android//:binary 'deps("some//:lib ()")' - // ``` - evaluator - .eval_query(query, query_args, target_universe.as_ref().map(|v| &v[..])) + // TODO(nga): this should support configured target patterns + // similarly to what we do for `build` command. + // Something like this should work: + // ``` + // buck2 cquery --target-universe android//:binary 'deps("some//:lib ()")' + // ``` + eval_cquery( + dice_query_delegate, + query, + query_args, + target_universe.as_ref().map(|v| &v[..]), + collect_universes, + ) .await + }) + .await } async fn eval_aquery( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, working_dir: &ProjectRelativePath, query: &str, query_args: &[String], - global_target_platform: Option, + global_cfg_options: GlobalCfgOptions, ) -> anyhow::Result> { - let evaluator = get_aquery_evaluator(ctx, working_dir, global_target_platform).await?; - - evaluator.eval_query(query, query_args).await + ctx.with_linear_recompute(|ctx| async move { + let evaluator = get_aquery_evaluator(&ctx, working_dir, global_cfg_options).await?; + evaluator.eval_query(query, query_args).await + }) + .await } +} - async fn universe_from_literals( - &self, - ctx: &DiceComputations, - cwd: &ProjectRelativePath, - literals: &[String], - global_target_platform: Option, - ) -> anyhow::Result { - let query_delegate = get_dice_query_delegate(ctx, cwd, global_target_platform).await?; +async fn universe_from_literals( + ctx: &mut DiceComputations<'_>, + cwd: &ProjectRelativePath, + literals: &[String], + global_cfg_options: GlobalCfgOptions, +) -> anyhow::Result { + ctx.with_linear_recompute(|ctx| async move { + let query_delegate = get_dice_query_delegate(&ctx, cwd, global_cfg_options).await?; Ok(preresolve_literals_and_build_universe( &query_delegate, query_delegate.query_data(), @@ -99,5 +121,22 @@ impl QueryFrontend for QueryFrontendImpl { ) .await? .0) - } + }) + .await +} + +pub(crate) fn init_universe_from_literals() { + UNIVERSE_FROM_LITERALS.init( + |ctx: &mut DiceComputations<'_>, + cwd: &ProjectRelativePath, + literals: &[String], + global_cfg_options: GlobalCfgOptions| { + Box::pin(universe_from_literals( + ctx, + cwd, + literals, + global_cfg_options, + )) + }, + ); } diff --git a/app/buck2_query_impls/src/lib.rs b/app/buck2_query_impls/src/lib.rs index ada3ea9eda310..0211914bd6e5c 100644 --- a/app/buck2_query_impls/src/lib.rs +++ b/app/buck2_query_impls/src/lib.rs @@ -7,19 +7,18 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(try_blocks)] -#![feature(provide_any)] use std::sync::Once; -pub mod analysis; -pub mod aquery; -pub mod cquery; +pub(crate) mod analysis; +pub(crate) mod aquery; +pub(crate) mod cquery; mod description; -pub mod dice; -pub mod frontend; -pub mod uquery; +pub(crate) mod dice; +pub(crate) mod frontend; +pub(crate) mod uquery; pub fn init_late_bindings() { static ONCE: Once = Once::new(); @@ -30,6 +29,7 @@ pub fn init_late_bindings() { aquery::find_matching_action::init_find_matching_action(); description::init_query_environment_description_by_type(); frontend::init_query_frontend(); + frontend::init_universe_from_literals(); cquery::bxl::init_new_bxl_cquery_functions(); aquery::bxl::init_new_bxl_aquery_functions(); uquery::bxl::init_new_bxl_uquery_functions(); diff --git a/app/buck2_query_impls/src/uquery.rs b/app/buck2_query_impls/src/uquery.rs new file mode 100644 index 0000000000000..6849c9eec9575 --- /dev/null +++ b/app/buck2_query_impls/src/uquery.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod bxl; +pub(crate) mod environment; +pub(crate) mod evaluator; diff --git a/app/buck2_query_impls/src/uquery/bxl.rs b/app/buck2_query_impls/src/uquery/bxl.rs index 5ee8ecef249e5..ebef36c74ff04 100644 --- a/app/buck2_query_impls/src/uquery/bxl.rs +++ b/app/buck2_query_impls/src/uquery/bxl.rs @@ -13,7 +13,7 @@ use async_trait::async_trait; use buck2_build_api::query::bxl::BxlUqueryFunctions; use buck2_build_api::query::bxl::NEW_BXL_UQUERY_FUNCTIONS; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::package_boundary::HasPackageBoundaryExceptions; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_common::target_aliases::HasTargetAliasResolver; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; @@ -24,6 +24,7 @@ use buck2_query::query::syntax::simple::functions::helpers::CapturedExpr; use buck2_query::query::syntax::simple::functions::DefaultQueryFunctions; use buck2_query::query::syntax::simple::functions::DefaultQueryFunctionsModule; use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; use crate::dice::DiceQueryData; @@ -40,35 +41,31 @@ struct BxlUqueryFunctionsImpl { } impl BxlUqueryFunctionsImpl { - async fn uquery_delegate<'c>( + async fn uquery_delegate<'c, 'd>( &self, - dice: &'c mut DiceComputations, - ) -> anyhow::Result> { - let cell_resolver = dice.get_cell_resolver().await?; - - let package_boundary_exceptions = dice.get_package_boundary_exceptions().await?; - let target_alias_resolver = dice - .target_alias_resolver_for_working_dir(&self.working_dir) + dice: &'c LinearRecomputeDiceComputations<'d>, + ) -> anyhow::Result> { + let cell_resolver = dice.get().get_cell_resolver().await?; + let cell_alias_resolver = dice + .get() + .get_cell_alias_resolver_for_dir(&self.working_dir) .await?; + let target_alias_resolver = dice.get().target_alias_resolver().await?; let query_data = Arc::new(DiceQueryData::new( - None, + GlobalCfgOptions::default(), cell_resolver.dupe(), + cell_alias_resolver, &self.working_dir, self.project_root.dupe(), target_alias_resolver, )?); - Ok(DiceQueryDelegate::new( - dice, - cell_resolver, - package_boundary_exceptions, - query_data, - )) + Ok(DiceQueryDelegate::new(dice, query_data)) } - async fn uquery_env<'c>( + async fn uquery_env<'c, 'd>( &self, - delegate: &'c DiceQueryDelegate<'c>, + delegate: &'c DiceQueryDelegate<'c, 'd>, ) -> anyhow::Result> { let literals = delegate.query_data().dupe(); Ok(UqueryEnvironment::new(delegate, literals)) @@ -79,88 +76,130 @@ impl BxlUqueryFunctionsImpl { impl BxlUqueryFunctions for BxlUqueryFunctionsImpl { async fn allpaths( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(uquery_functions() - .allpaths( - &self.uquery_env(&self.uquery_delegate(dice).await?).await?, - from, - to, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + Ok(uquery_functions() + .allpaths( + &self.uquery_env(&self.uquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + from, + to, + captured_expr, + ) + .await?) + }) + .await } async fn somepath( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, from: &TargetSet, to: &TargetSet, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(uquery_functions() - .somepath( - &self.uquery_env(&self.uquery_delegate(dice).await?).await?, - from, - to, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + Ok(uquery_functions() + .somepath( + &self.uquery_env(&self.uquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + from, + to, + captured_expr, + ) + .await?) + }) + .await } async fn deps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, deps: Option, captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(uquery_functions() - .deps( - &self.uquery_env(&self.uquery_delegate(dice).await?).await?, - &DefaultQueryFunctionsModule::new(), - targets, - deps, - captured_expr, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + uquery_functions() + .deps( + &self.uquery_env(&self.uquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + targets, + deps, + captured_expr, + ) + .await + }) + .await } async fn rdeps( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, universe: &TargetSet, targets: &TargetSet, depth: Option, + captured_expr: Option<&CapturedExpr>, ) -> anyhow::Result> { - Ok(uquery_functions() - .rdeps( - &self.uquery_env(&self.uquery_delegate(dice).await?).await?, - universe, - targets, - depth, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + uquery_functions() + .rdeps( + &self.uquery_env(&self.uquery_delegate(&dice).await?).await?, + &DefaultQueryFunctionsModule::new(), + universe, + targets, + depth, + captured_expr, + ) + .await + }) + .await } async fn testsof( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, targets: &TargetSet, ) -> anyhow::Result> { - Ok(uquery_functions() - .testsof( - &self.uquery_env(&self.uquery_delegate(dice).await?).await?, - targets, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + uquery_functions() + .testsof( + &self.uquery_env(&self.uquery_delegate(&dice).await?).await?, + targets, + ) + .await + }) + .await } async fn owner( &self, - dice: &mut DiceComputations, + dice: &mut DiceComputations<'_>, file_set: &FileSet, ) -> anyhow::Result> { - Ok(uquery_functions() - .owner( - &self.uquery_env(&self.uquery_delegate(dice).await?).await?, - file_set, - ) - .await?) + dice.with_linear_recompute(|dice| async move { + uquery_functions() + .owner( + &self.uquery_env(&self.uquery_delegate(&dice).await?).await?, + file_set, + ) + .await + }) + .await + } + async fn targets_in_buildfile( + &self, + dice: &mut DiceComputations<'_>, + file_set: &FileSet, + ) -> anyhow::Result> { + dice.with_linear_recompute(|dice| async move { + uquery_functions() + .targets_in_buildfile( + &self.uquery_env(&self.uquery_delegate(&dice).await?).await?, + file_set, + ) + .await + }) + .await } } diff --git a/app/buck2_query_impls/src/uquery/environment.rs b/app/buck2_query_impls/src/uquery/environment.rs index dad6740f4afb8..82ab9fba1f614 100644 --- a/app/buck2_query_impls/src/uquery/environment.rs +++ b/app/buck2_query_impls/src/uquery/environment.rs @@ -12,9 +12,9 @@ use std::sync::Arc; use anyhow::Context; use async_trait::async_trait; +use buck2_common::package_listing::dice::DicePackageListingResolver; +use buck2_common::package_listing::resolver::PackageListingResolver; use buck2_common::pattern::resolve::ResolvedPattern; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; use buck2_core::bzl::ImportPath; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::name::CellName; @@ -23,13 +23,16 @@ use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::paths::file_name::FileNameBuf; use buck2_core::package::PackageLabel; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::target::label::TargetLabel; -use buck2_node::nodes::eval_result::EvaluationResult; +use buck2_core::target::label::label::TargetLabel; +use buck2_interpreter::load_module::InterpreterCalculation; +use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::nodes::unconfigured::TargetNode; -use buck2_query::query::environment::LabeledNode; -use buck2_query::query::environment::NodeLabel; use buck2_query::query::environment::QueryEnvironment; +use buck2_query::query::environment::QueryEnvironmentAsNodeLookup; use buck2_query::query::environment::QueryTarget; +use buck2_query::query::graph::node::LabeledNode; +use buck2_query::query::graph::node::NodeKey; +use buck2_query::query::graph::successors::AsyncChildVisitor; use buck2_query::query::syntax::simple::eval::error::QueryError; use buck2_query::query::syntax::simple::eval::file_set::FileNode; use buck2_query::query::syntax::simple::eval::file_set::FileSet; @@ -40,29 +43,30 @@ use buck2_query::query::syntax::simple::functions::HasModuleDescription; use buck2_query::query::traversal::async_depth_first_postorder_traversal; use buck2_query::query::traversal::async_depth_limited_traversal; use buck2_query::query::traversal::AsyncNodeLookup; -use buck2_query::query::traversal::AsyncTraversalDelegate; use buck2_query::query::traversal::ChildVisitor; +use buck2_util::future::try_join_all; use derive_more::Display; use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; use futures::stream::FuturesUnordered; +use futures::FutureExt; use futures::StreamExt; use gazebo::prelude::*; use indexmap::IndexSet; use itertools::Itertools; use ref_cast::RefCast; -use thiserror::Error; use tracing::warn; type ArcCellPath = Arc; -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum QueryLiteralResolutionError { #[error("literal `{0}` missing in pre-resolved literals")] LiteralMissing(String), } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum RBuildFilesError { #[error("no parent found for the file `{0}`")] ParentDoesNotExist(ArcCellPath), @@ -70,23 +74,12 @@ enum RBuildFilesError { CellMissingBuildFileNames(CellName), } -pub enum SpecialAttr { - String(String), -} - /// UqueryDelegate resolves information needed by the QueryEnvironment. #[async_trait] -pub trait UqueryDelegate: Send + Sync { - /// Returns the EvaluationResult for evaluation of the buildfile. - async fn eval_build_file( +pub(crate) trait UqueryDelegate: Send + Sync { + async fn get_buildfile_names_by_cell( &self, - packages: PackageLabel, - ) -> anyhow::Result>; - - /// Get the imports from a LoadedModule corresponding to some path. - async fn eval_module_imports(&self, path: &ImportPath) -> anyhow::Result>; - - fn get_buildfile_names_by_cell(&self) -> anyhow::Result>; + ) -> anyhow::Result>>; /// Resolves a target pattern. async fn resolve_target_patterns( @@ -101,49 +94,55 @@ pub trait UqueryDelegate: Send + Sync { // all parent packages if the package matches `project.package_boundary_exceptions` buckconfig. async fn get_enclosing_packages(&self, path: &CellPath) -> anyhow::Result>; - fn ctx(&self) -> &DiceComputations; + fn linear_dice_computations(&self) -> &LinearRecomputeDiceComputations<'_>; + + fn ctx<'a>(&'a self) -> DiceComputations<'a>; } #[async_trait] -pub trait QueryLiterals: Send + Sync { +pub(crate) trait QueryLiterals: Send + Sync { async fn eval_literals( &self, literals: &[&str], - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, ) -> anyhow::Result>; } -pub struct UqueryEnvironment<'c> { +pub(crate) struct UqueryEnvironment<'c> { delegate: &'c dyn UqueryDelegate, literals: Arc + 'c>, } -pub struct PreresolvedQueryLiterals { - resolved_literals: HashMap>>, +pub(crate) struct PreresolvedQueryLiterals { + resolved_literals: HashMap>>, } impl PreresolvedQueryLiterals { - pub fn new(resolved_literals: HashMap>>) -> Self { + pub(crate) fn new( + resolved_literals: HashMap>>, + ) -> Self { Self { resolved_literals } } - pub async fn pre_resolve( + pub(crate) async fn pre_resolve( base: &dyn QueryLiterals, literals: &[String], - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, ) -> Self { - let futs = literals - .iter() - .map(|lit| async move { (lit.to_owned(), base.eval_literals(&[lit], dice).await) }); + let resolved_literal_results = dice + .compute_join(literals.iter(), |ctx, lit| { + async move { (lit.to_owned(), base.eval_literals(&[lit], ctx).await) }.boxed() + }) + .await; let mut resolved_literals = HashMap::new(); - for (literal, result) in futures::future::join_all(futs).await { - resolved_literals.insert(literal, result.shared_error()); + for (literal, result) in resolved_literal_results { + resolved_literals.insert(literal, result.map_err(buck2_error::Error::from)); } Self { resolved_literals } } /// All the literals, or error if resolution of any failed. - pub fn literals(&self) -> anyhow::Result> { + pub(crate) fn literals(&self) -> anyhow::Result> { let mut literals = TargetSet::new(); for result in self.resolved_literals.values() { literals.extend(result.as_ref().map_err(|e| e.dupe())?); @@ -157,7 +156,7 @@ impl QueryLiterals for PreresolvedQueryLiterals { async fn eval_literals( &self, literals: &[&str], - _: &DiceComputations, + _: &mut DiceComputations<'_>, ) -> anyhow::Result> { let mut targets = TargetSet::new(); for lit in literals { @@ -176,7 +175,7 @@ impl QueryLiterals for PreresolvedQueryLiterals { } impl<'c> UqueryEnvironment<'c> { - pub fn new( + pub(crate) fn new( delegate: &'c dyn UqueryDelegate, literals: Arc + 'c>, ) -> Self { @@ -193,11 +192,12 @@ impl<'c> UqueryEnvironment<'c> { async fn get_node(&self, target: &TargetLabel) -> anyhow::Result { let package = self .delegate - .eval_build_file(target.pkg()) + .ctx() + .get_interpreter_results(target.pkg()) .await .with_context(|| format!("Error looking up `{}``", target))?; let node = package.resolve_target(target.name())?; - Ok(node.dupe()) + Ok(node.to_owned()) } } @@ -221,7 +221,7 @@ impl<'c> QueryEnvironment for UqueryEnvironment<'c> { async fn eval_literals(&self, literals: &[&str]) -> anyhow::Result> { self.literals - .eval_literals(literals, self.delegate.ctx()) + .eval_literals(literals, &mut self.delegate.ctx()) .await } @@ -232,18 +232,33 @@ impl<'c> QueryEnvironment for UqueryEnvironment<'c> { async fn dfs_postorder( &self, root: &TargetSet, - traversal_delegate: &mut dyn AsyncTraversalDelegate, + traversal_delegate: impl AsyncChildVisitor, + visit: impl FnMut(TargetNode) -> anyhow::Result<()> + Send, ) -> anyhow::Result<()> { - async_depth_first_postorder_traversal(self, root.iter_names(), traversal_delegate).await + async_depth_first_postorder_traversal( + &QueryEnvironmentAsNodeLookup { env: self }, + root.iter_names(), + traversal_delegate, + visit, + ) + .await } async fn depth_limited_traversal( &self, root: &TargetSet, - delegate: &mut dyn AsyncTraversalDelegate, + delegate: impl AsyncChildVisitor, + visit: impl FnMut(Self::Target) -> anyhow::Result<()> + Send, depth: u32, ) -> anyhow::Result<()> { - async_depth_limited_traversal(self, root.iter_names(), delegate, depth).await + async_depth_limited_traversal( + &QueryEnvironmentAsNodeLookup { env: self }, + root.iter_names(), + delegate, + visit, + depth, + ) + .await } async fn allbuildfiles(&self, universe: &TargetSet) -> anyhow::Result { @@ -264,7 +279,11 @@ impl<'c> QueryEnvironment for UqueryEnvironment<'c> { Ok(packages) => { let package_futs = packages.map(|package| async move { // TODO(cjhopman): We should make sure that the file exists. - let targets = self.delegate.eval_build_file(package.dupe()).await?; + let targets = self + .delegate + .ctx() + .get_interpreter_results(package.dupe()) + .await?; let owner_targets: Vec = targets .targets() @@ -272,7 +291,7 @@ impl<'c> QueryEnvironment for UqueryEnvironment<'c> { .filter_map(|node| { for input in node.inputs() { if &input == path { - return Some(node.dupe()); + return Some(node.to_owned()); // this intentionally breaks out of the loop. We don't need to look at the // other inputs of this target, but it's possible for a single file to be owned by // multiple targets. @@ -302,12 +321,52 @@ impl<'c> QueryEnvironment for UqueryEnvironment<'c> { } Ok(result) } -} -#[async_trait] -impl<'a> AsyncNodeLookup for UqueryEnvironment<'a> { - async fn get(&self, label: &TargetLabel) -> anyhow::Result { - self.get_node(label).await + /// Finds all targets in some buildfiles. + /// + /// todo(dbarsky): instead of having this be a trait method, this should be implemented as a uquery-specific module + /// similar to [crate::aquery::functions::AqueryFunctions]. + async fn targets_in_buildfile( + &self, + paths: &FileSet, + ) -> anyhow::Result> { + let mut result: TargetSet = TargetSet::new(); + + let compute = &mut self.delegate.ctx(); + let mut resolver = DicePackageListingResolver(compute); + + let mut buildfiles = vec![]; + for path in paths.iter() { + let package_label = resolver.get_enclosing_package(path.as_ref()).await?; + let listing = resolver.resolve(package_label.dupe()).await?; + + // the listing's buildfile is relative to the package (the BUCK/TARGETS file). + // this makes it actually relative to the cell. + let buildfile = package_label.as_cell_path().join(listing.buildfile()); + if buildfile != *path { + // file is a not a buildfile. + continue; + } + buildfiles.push(package_label); + } + + let mut evaluations = vec![]; + for buildfile in buildfiles { + let fut = async move { self.delegate.ctx().get_interpreter_results(buildfile).await }; + evaluations.push(fut); + } + + let targets = try_join_all(evaluations).await?; + for evaluation_result in targets { + result.extend( + evaluation_result + .targets() + .values() + .map(|target_ref| target_ref.to_owned()), + ); + } + + Ok(result) } } @@ -323,13 +382,15 @@ pub(crate) async fn allbuildfiles<'c, T: QueryTarget>( paths.insert(FileNode(target.dupe().buildfile_path().path())); let eval_result = delegate - .eval_build_file(target.buildfile_path().package()) + .ctx() + .get_interpreter_results(target.buildfile_path().package()) .await?; // TODO: no longer use eval_build_file, just parse imports directly (will solve async issue too) top_level_imports.extend(eval_result.imports().iter().cloned()); } - let loads = get_transitive_loads(top_level_imports, delegate).await?; + let loads = + get_transitive_loads(top_level_imports, delegate.linear_dice_computations()).await?; let mut new_paths = IndexSet::::new(); for load in &loads { @@ -347,7 +408,7 @@ pub(crate) async fn rbuildfiles<'c>( let universe_paths: Vec = universe.iter().map(|file| Arc::new(file.clone())).collect(); // step 1: split the build files and bzl files - let (buildfiles, bzlfiles) = split_universe_files(&universe_paths, delegate)?; + let (buildfiles, bzlfiles) = split_universe_files(&universe_paths, delegate).await?; // step 2: get all top level imports accordingly let top_level_import_by_build_file = @@ -378,12 +439,12 @@ pub(crate) async fn rbuildfiles<'c>( #[repr(transparent)] struct NodeRef(ImportPath); - impl NodeLabel for NodeRef {} + impl NodeKey for NodeRef {} impl LabeledNode for Node { - type NodeRef = NodeRef; + type Key = NodeRef; - fn node_ref(&self) -> &NodeRef { + fn node_key(&self) -> &NodeRef { NodeRef::ref_cast(self.import_path()) } } @@ -397,67 +458,65 @@ pub(crate) async fn rbuildfiles<'c>( } } - struct Delegate<'c> { - output_paths: Vec, - argset: &'c FileSet, - first_order_import_map: HashMap>, + let mut output_paths: Vec = Vec::new(); + + struct Delegate<'a> { + first_order_import_map: &'a HashMap>, } - #[async_trait] - impl AsyncTraversalDelegate for Delegate<'_> { - fn visit(&mut self, node: Node) -> anyhow::Result<()> { - let node_import = node.import_path(); - if self.argset.iter().contains(node_import.path()) { - self.output_paths.push(node_import.clone()); - } else { - let loads = self - .first_order_import_map - .get(node_import) - .expect("import path should exist"); - for load in loads.iter() { - for arg in self.argset.iter() { - if load.path() == arg { - self.output_paths.push(node_import.clone()); - return Ok(()); - } + let visit = |node: Node| { + let node_import = node.import_path(); + if argset.iter().contains(node_import.path()) { + output_paths.push(node_import.clone()); + } else { + let loads = first_order_import_map + .get(node_import) + .expect("import path should exist"); + for load in loads.iter() { + for arg in argset.iter() { + if load.path() == arg { + output_paths.push(node_import.clone()); + return Ok(()); } } } - Ok(()) } + Ok(()) + }; + + impl AsyncChildVisitor for Delegate<'_> { async fn for_each_child( - &mut self, + &self, node: &Node, - func: &mut dyn ChildVisitor, + mut func: impl ChildVisitor, ) -> anyhow::Result<()> { for import in self .first_order_import_map .get(node.import_path()) .expect("import path should exist") { - func.visit(NodeRef(import.clone()))?; + func.visit(&NodeRef(import.clone()))?; } Ok(()) } } let lookup = Lookup {}; - let mut delegate = Delegate { - output_paths: vec![], - argset, - first_order_import_map, + let delegate = Delegate { + first_order_import_map: &first_order_import_map, }; // step 5: do traversal, get all modified imports async_depth_first_postorder_traversal( &lookup, all_top_level_imports.iter().map(NodeRef::ref_cast), - &mut delegate, + delegate, + visit, ) .await?; let mut output_files = IndexSet::::new(); - for file in &delegate.output_paths { + for file in &output_paths { output_files.insert(FileNode(file.path().clone())); } @@ -486,13 +545,13 @@ pub(crate) async fn rbuildfiles<'c>( Ok(FileSet::new(output_files)) } -fn split_universe_files<'c>( +async fn split_universe_files<'c>( universe: &[ArcCellPath], delegate: &'c dyn UqueryDelegate, ) -> anyhow::Result<(Vec, Vec)> { let mut buildfiles = Vec::::new(); let mut bzlfiles = Vec::::new(); - let buildfile_names_by_cell = delegate.get_buildfile_names_by_cell()?; + let buildfile_names_by_cell = delegate.get_buildfile_names_by_cell().await?; for file in universe { let buildfile_names_for_file = buildfile_names_by_cell.get(&file.cell()).ok_or_else(|| { @@ -534,7 +593,8 @@ async fn top_level_imports_by_build_file<'c>( ( file.dupe(), delegate - .eval_build_file(PackageLabel::from_cell_path(parent)) + .ctx() + .get_interpreter_results(PackageLabel::from_cell_path(parent)) .await, ) } else { @@ -559,11 +619,15 @@ async fn first_order_imports<'c>( all_top_level_imports: &[ImportPath], delegate: &'c dyn UqueryDelegate, ) -> anyhow::Result>> { - let all_imports = get_transitive_loads(all_top_level_imports.to_vec(), delegate).await?; + let all_imports = get_transitive_loads( + all_top_level_imports.to_vec(), + delegate.linear_dice_computations(), + ) + .await?; let mut all_first_order_futs: FuturesUnordered<_> = all_imports .iter() - .map(|node| async move { (node, delegate.eval_module_imports(node).await) }) + .map(|node| async move { (node, delegate.ctx().get_loaded_module_imports(node).await) }) .collect(); let mut first_order_import_map = HashMap::>::new(); @@ -578,9 +642,9 @@ async fn first_order_imports<'c>( } // Uquery and Cquery share ImportPath traversal logic, so we move the logic to this function. -pub(crate) async fn get_transitive_loads<'c>( +pub(crate) async fn get_transitive_loads( top_level_imports: Vec, - delegate: &'c dyn UqueryDelegate, + ctx: &LinearRecomputeDiceComputations<'_>, ) -> anyhow::Result> { #[derive(Clone, Dupe)] struct Node(Arc); @@ -595,12 +659,12 @@ pub(crate) async fn get_transitive_loads<'c>( #[repr(transparent)] struct NodeRef(ImportPath); - impl NodeLabel for NodeRef {} + impl NodeKey for NodeRef {} impl LabeledNode for Node { - type NodeRef = NodeRef; + type Key = NodeRef; - fn node_ref(&self) -> &NodeRef { + fn node_key(&self) -> &NodeRef { NodeRef::ref_cast(self.import_path()) } } @@ -614,43 +678,41 @@ pub(crate) async fn get_transitive_loads<'c>( } } - struct Delegate<'c> { - imports: Vec, - delegate: &'c dyn UqueryDelegate, + let mut imports: Vec = Vec::new(); + + struct Delegate<'c, 'a> { + ctx: &'c LinearRecomputeDiceComputations<'a>, } - #[async_trait] - impl AsyncTraversalDelegate for Delegate<'_> { - fn visit(&mut self, target: Node) -> anyhow::Result<()> { - self.imports.push(target.import_path().clone()); - Ok(()) - } + let visit = |target: Node| { + imports.push(target.import_path().clone()); + Ok(()) + }; + impl AsyncChildVisitor for Delegate<'_, '_> { async fn for_each_child( - &mut self, + &self, target: &Node, - func: &mut dyn ChildVisitor, + mut func: impl ChildVisitor, ) -> anyhow::Result<()> { for import in self - .delegate - .eval_module_imports(target.import_path()) + .ctx + .get() + .get_loaded_module_imports(target.import_path()) .await? { - func.visit(NodeRef(import.clone()))?; + func.visit(&NodeRef(import))?; } Ok(()) } } - let mut traversal_delegate = Delegate { - imports: vec![], - delegate: delegate.dupe(), - }; + let traversal_delegate = Delegate { ctx }; let lookup = Lookup {}; let import_nodes = top_level_imports.iter().map(NodeRef::ref_cast); - async_depth_first_postorder_traversal(&lookup, import_nodes, &mut traversal_delegate).await?; + async_depth_first_postorder_traversal(&lookup, import_nodes, traversal_delegate, visit).await?; - Ok(traversal_delegate.imports) + Ok(imports) } diff --git a/app/buck2_query_impls/src/uquery/evaluator.rs b/app/buck2_query_impls/src/uquery/evaluator.rs index 855159e08d792..62fb66e42f8a6 100644 --- a/app/buck2_query_impls/src/uquery/evaluator.rs +++ b/app/buck2_query_impls/src/uquery/evaluator.rs @@ -10,12 +10,14 @@ //! Implementation of the cli and query_* attr query language. use std::sync::Arc; +use buck2_common::events::HasEvents; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::target::label::TargetLabel; use buck2_node::nodes::unconfigured::TargetNode; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; use buck2_query::query::syntax::simple::functions::DefaultQueryFunctionsModule; -use dice::DiceComputations; +use dice::LinearRecomputeDiceComputations; +use dupe::Dupe; use crate::analysis::evaluator::eval_query; use crate::dice::get_dice_query_delegate; @@ -23,42 +25,51 @@ use crate::dice::DiceQueryDelegate; use crate::uquery::environment::PreresolvedQueryLiterals; use crate::uquery::environment::UqueryEnvironment; -pub struct UqueryEvaluator<'c> { - dice_query_delegate: DiceQueryDelegate<'c>, +pub(crate) struct UqueryEvaluator<'c, 'd> { + dice_query_delegate: DiceQueryDelegate<'c, 'd>, functions: DefaultQueryFunctionsModule>, } -impl UqueryEvaluator<'_> { - pub async fn eval_query( +impl UqueryEvaluator<'_, '_> { + pub(crate) async fn eval_query( &self, query: &str, query_args: &[String], ) -> anyhow::Result> { - eval_query(&self.functions, query, query_args, async move |literals| { - let resolved_literals = PreresolvedQueryLiterals::pre_resolve( - &**self.dice_query_delegate.query_data(), - &literals, - self.dice_query_delegate.ctx(), - ) - .await; - Ok(UqueryEnvironment::new( - &self.dice_query_delegate, - Arc::new(resolved_literals), - )) - }) + eval_query( + self.dice_query_delegate + .ctx() + .per_transaction_data() + .get_dispatcher() + .dupe(), + &self.functions, + query, + query_args, + |literals| async move { + let resolved_literals = PreresolvedQueryLiterals::pre_resolve( + &**self.dice_query_delegate.query_data(), + &literals, + &mut self.dice_query_delegate.ctx(), + ) + .await; + Ok(UqueryEnvironment::new( + &self.dice_query_delegate, + Arc::new(resolved_literals), + )) + }, + ) .await } } /// Evaluates some query expression. TargetNodes are resolved via the interpreter from /// the provided DiceCtx. -pub async fn get_uquery_evaluator<'a, 'c: 'a>( - ctx: &'c DiceComputations, +pub(crate) async fn get_uquery_evaluator<'a, 'c: 'a, 'd>( + ctx: &'c LinearRecomputeDiceComputations<'d>, working_dir: &'a ProjectRelativePath, - global_target_platform: Option, -) -> anyhow::Result> { +) -> anyhow::Result> { let dice_query_delegate = - get_dice_query_delegate(ctx, working_dir, global_target_platform).await?; + get_dice_query_delegate(ctx, working_dir, GlobalCfgOptions::default()).await?; let functions = DefaultQueryFunctionsModule::new(); Ok(UqueryEvaluator { diff --git a/app/buck2_query_impls/src/uquery/mod.rs b/app/buck2_query_impls/src/uquery/mod.rs deleted file mode 100644 index 9c43548eb0f28..0000000000000 --- a/app/buck2_query_impls/src/uquery/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod bxl; -pub mod environment; -pub mod evaluator; diff --git a/app/buck2_query_parser/BUCK b/app/buck2_query_parser/BUCK index aa371636ca35a..d3522f32d9d20 100644 --- a/app/buck2_query_parser/BUCK +++ b/app/buck2_query_parser/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -11,7 +10,7 @@ rust_library( "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:enum-map", "fbsource//third-party/rust:nom", - "fbsource//third-party/rust:thiserror", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_util:buck2_util", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", diff --git a/app/buck2_query_parser/Cargo.toml b/app/buck2_query_parser/Cargo.toml index 962b1458bb369..6ac2bb0bb4a8f 100644 --- a/app/buck2_query_parser/Cargo.toml +++ b/app/buck2_query_parser/Cargo.toml @@ -1,24 +1,19 @@ [package] +description = "Buck2 query language parser" +edition = "2021" +license = { workspace = true } name = "buck2_query_parser" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Buck2 query language parser" [dependencies] anyhow = { workspace = true } derive_more = { workspace = true } enum-map = { workspace = true } nom = { workspace = true } -structopt = { workspace = true } -thiserror = { workspace = true } -gazebo = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" dupe = { workspace = true } +gazebo = { workspace = true } +buck2_error = { workspace = true } buck2_util = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_query_parser/src/lib.rs b/app/buck2_query_parser/src/lib.rs index 38806f2f2ad64..277011bf3c412 100644 --- a/app/buck2_query_parser/src/lib.rs +++ b/app/buck2_query_parser/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! Parsing query expressions. //! //! The parser doesn't do any validation on the function names or function arguments, that is handled @@ -16,7 +18,6 @@ //! The grammar is something like: //! //! ```text -//! //! # note that set's args are space-separated, not comma-separated and so cannot be treated as a function //! EXPR ::= //! WORD @@ -41,9 +42,9 @@ //! INTEGER ::= "0" | ("1-9" "0-9"*) //! //! FUNCTION_NAME ::= "a-zA-Z_" "a-zA-Z0-9_" * -//! //! ``` +pub mod multi_query; pub mod placeholder; pub mod span; pub mod spanned; @@ -79,7 +80,6 @@ use nom::sequence::pair; use nom::sequence::preceded; use nom::sequence::terminated; use nom::IResult; -use thiserror::Error; use crate::span::Span; use crate::spanned::Spanned; @@ -102,9 +102,10 @@ impl<'a, E: nom::error::ParseError> + nom::error::ContextError { } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum ParseError { #[error("{0}")] + #[buck2(input)] NomError(String), } diff --git a/app/buck2_query_parser/src/multi_query.rs b/app/buck2_query_parser/src/multi_query.rs new file mode 100644 index 0000000000000..e9abbb0d6f769 --- /dev/null +++ b/app/buck2_query_parser/src/multi_query.rs @@ -0,0 +1,65 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use gazebo::prelude::SliceExt; + +use crate::placeholder::QUERY_PERCENT_S_PLACEHOLDER; + +#[derive(Debug, buck2_error::Error)] +enum EvalQueryError { + #[error("Query args supplied without any `%s` placeholder in the query, got args {}", .0.map(|x| format!("`{}`", x)).join(", "))] + ArgsWithoutPlaceholder(Vec), + #[error("Placeholder `%s` in query argument `{0}`")] + PlaceholderInPattern(String), +} + +pub struct MultiQueryItem { + pub arg: String, + pub query: String, +} + +/// Parsed query with optional `%s` placeholder and optional query args. +pub enum MaybeMultiQuery { + SingleQuery(String), + MultiQuery(Vec), +} + +impl MaybeMultiQuery { + pub fn parse( + query: &str, + args: impl IntoIterator>, + ) -> anyhow::Result { + if query.contains(QUERY_PERCENT_S_PLACEHOLDER) { + // We'd really like the query args to only be literals (file or target). + // If that didn't work, we'd really like query args to be well-formed expressions. + // Unfortunately Buck1 just substitutes in arbitrarily strings, where the query + // or query_args may not form anything remotely valid. + // We have to be backwards compatible :( + let queries = args + .into_iter() + .map(|arg| { + let arg = arg.as_ref().to_owned(); + if arg.contains(QUERY_PERCENT_S_PLACEHOLDER) { + return Err(EvalQueryError::PlaceholderInPattern(arg).into()); + } + let query = query.replace(QUERY_PERCENT_S_PLACEHOLDER, &arg); + Ok(MultiQueryItem { arg, query }) + }) + .collect::>()?; + Ok(MaybeMultiQuery::MultiQuery(queries)) + } else { + let args: Vec = args.into_iter().map(|q| q.as_ref().to_owned()).collect(); + if !args.is_empty() { + Err(EvalQueryError::ArgsWithoutPlaceholder(args).into()) + } else { + Ok(MaybeMultiQuery::SingleQuery(query.to_owned())) + } + } + } +} diff --git a/app/buck2_query_parser/src/placeholder.rs b/app/buck2_query_parser/src/placeholder.rs index 2dfa423e74ba9..98bd04a266855 100644 --- a/app/buck2_query_parser/src/placeholder.rs +++ b/app/buck2_query_parser/src/placeholder.rs @@ -7,7 +7,7 @@ * of this source tree. */ -/// Replace `%s` with remaining command line arguments which contain query literals. +/// Perform multiple queries with `%s` substituted with each of the arguments. pub const QUERY_PERCENT_S_PLACEHOLDER: &str = "%s"; -/// Replace `%Ss` with query literals read from files from remaining command line arguments. +/// Perform single query with `%Ss` substituted with `set(...)` of the arguments. pub const QUERY_PERCENT_SS_PLACEHOLDER: &str = "%Ss"; diff --git a/app/buck2_query_parser/src/spanned.rs b/app/buck2_query_parser/src/spanned.rs index 1ae1695eea311..fe1ac38a34ca1 100644 --- a/app/buck2_query_parser/src/spanned.rs +++ b/app/buck2_query_parser/src/spanned.rs @@ -16,7 +16,7 @@ use derive_more::Display; /// the references into the input so that this has a 'static lifetime. To be useful, it'll need to be /// recombined with the input string. #[derive(Debug, Display)] -#[display(fmt = "{}", value)] +#[display("{}", value)] pub struct Spanned { pub position: Range, pub value: T, diff --git a/app/buck2_re_configuration/BUCK b/app/buck2_re_configuration/BUCK index eb2fca0b93043..12d211bda4cbd 100644 --- a/app/buck2_re_configuration/BUCK +++ b/app/buck2_re_configuration/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -10,5 +9,6 @@ rust_library( "fbsource//third-party/rust:anyhow", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_common:buck2_common", + "//buck2/app/buck2_core:buck2_core", ], ) diff --git a/app/buck2_re_configuration/Cargo.toml b/app/buck2_re_configuration/Cargo.toml index 51f4a9cb75f7f..d1e23e605b3e7 100644 --- a/app/buck2_re_configuration/Cargo.toml +++ b/app/buck2_re_configuration/Cargo.toml @@ -1,10 +1,16 @@ [package] +description = "utilities to configure RE in Buck2" +edition = "2021" +license = { workspace = true } name = "buck2_re_configuration" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "utilities to configure RE in Buck2" [dependencies] allocative = { workspace = true } anyhow = { workspace = true } buck2_common = { workspace = true } +buck2_core = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_re_configuration/src/lib.rs b/app/buck2_re_configuration/src/lib.rs index 65327e12e1745..568788cf52584 100644 --- a/app/buck2_re_configuration/src/lib.rs +++ b/app/buck2_re_configuration/src/lib.rs @@ -7,10 +7,14 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + use std::str::FromStr; use allocative::Allocative; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_core::rollout_percentage::RolloutPercentage; static BUCK2_RE_CLIENT_CFG_SECTION: &str = "buck2_re_client"; @@ -23,6 +27,8 @@ pub trait RemoteExecutionStaticMetadataImpl: Sized { #[allow(unused)] mod fbcode { + use buck2_common::legacy_configs::key::BuckconfigKeyRef; + use super::*; /// Metadata that doesn't change between executions @@ -62,79 +68,169 @@ mod fbcode { // ttl management pub minimal_blob_ttl_seconds: Option, + // When less than (X*100)% of TTL remains, refresh data in the store + pub remaining_ttl_fraction_refresh_threshold: Option, + // Adds a randomness to when refresh the TTL + pub remaining_ttl_random_extra_threshold: Option, + + pub disable_fallocate: bool, + pub respect_file_symlinks: bool, + pub execute_over_thrift: bool, } impl RemoteExecutionStaticMetadataImpl for RemoteExecutionStaticMetadata { fn from_legacy_config(legacy_config: &LegacyBuckConfig) -> anyhow::Result { Ok(Self { - cas_address: legacy_config.parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_address")?, + cas_address: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_address", + })?, cas_connection_count: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_connection_count")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_connection_count", + })? .unwrap_or(16), - cas_shared_cache: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_shared_cache")?, - cas_shared_cache_mode: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_shared_cache_mode")?, - cas_shared_cache_port: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_shared_cache_port")?, - cas_shared_cache_tls: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_shared_cache_tls")?, - action_cache_address: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "action_cache_address")?, + cas_shared_cache: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_shared_cache", + })?, + cas_shared_cache_mode: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_shared_cache_mode", + })?, + cas_shared_cache_port: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_shared_cache_port", + })?, + cas_shared_cache_tls: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_shared_cache_tls", + })?, + action_cache_address: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "action_cache_address", + })?, action_cache_connection_count: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "action_cache_connection_count")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "action_cache_connection_count", + })? .unwrap_or(4), - engine_address: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "engine_address")?, + engine_address: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "engine_address", + })?, engine_connection_count: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "engine_connection_count")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "engine_connection_count", + })? .unwrap_or(4), verbose_logging: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "verbose_logging")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "verbose_logging", + })? .unwrap_or(false), cas_thread_count: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_thread_count")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_thread_count", + })? .unwrap_or(4), cas_thread_count_ratio: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_thread_count_ratio")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_thread_count_ratio", + })? .unwrap_or(0.0), use_manifold_rich_client: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "use_manifold_rich_client_new")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "use_manifold_rich_client_new", + })? .unwrap_or(true), use_zippy_rich_client: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "use_zippy_rich_client")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "use_zippy_rich_client", + })? .unwrap_or(false), use_p2p: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "use_p2p")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "use_p2p", + })? .unwrap_or(false), - rich_client_channels_per_blob: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "rich_client_channels_per_blob")?, - rich_client_attempt_timeout_ms: legacy_config.parse( - BUCK2_RE_CLIENT_CFG_SECTION, - "rich_client_attempt_timeout_ms", - )?, - rich_client_retries_count: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "rich_client_retries_count")?, - force_enable_deduplicate_find_missing: legacy_config.parse( - BUCK2_RE_CLIENT_CFG_SECTION, - "force_enable_deduplicate_find_missing", + rich_client_channels_per_blob: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "rich_client_channels_per_blob", + })?, + rich_client_attempt_timeout_ms: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "rich_client_attempt_timeout_ms", + })?, + rich_client_retries_count: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "rich_client_retries_count", + })?, + force_enable_deduplicate_find_missing: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "force_enable_deduplicate_find_missing", + })?, + features_config_path: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "features_config_path", + })?, + curl_reactor_max_number_of_retries: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "curl_reactor_max_number_of_retries", + })?, + curl_reactor_connection_timeout_ms: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "curl_reactor_connection_timeout_ms", + })?, + curl_reactor_request_timeout_ms: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "curl_reactor_request_timeout_ms", + })?, + minimal_blob_ttl_seconds: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "minimal_blob_ttl_seconds", + })?, + disable_fallocate: legacy_config + .parse::(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "disable_fallocate", + })? + .unwrap_or(RolloutPercentage::never()) + .roll(), + remaining_ttl_fraction_refresh_threshold: legacy_config.parse( + BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "remaining_ttl_fraction_refresh_threshold", + }, )?, - features_config_path: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "features_config_path")?, - curl_reactor_max_number_of_retries: legacy_config.parse( - BUCK2_RE_CLIENT_CFG_SECTION, - "curl_reactor_max_number_of_retries", - )?, - curl_reactor_connection_timeout_ms: legacy_config.parse( - BUCK2_RE_CLIENT_CFG_SECTION, - "curl_reactor_connection_timeout_ms", - )?, - curl_reactor_request_timeout_ms: legacy_config.parse( - BUCK2_RE_CLIENT_CFG_SECTION, - "curl_reactor_request_timeout_ms", - )?, - minimal_blob_ttl_seconds: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "minimal_blob_ttl_seconds")?, + remaining_ttl_random_extra_threshold: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "remaining_ttl_random_extra_threshold", + })?, + respect_file_symlinks: legacy_config + .parse::(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "respect_file_symlinks", + })? + .unwrap_or(RolloutPercentage::never()) + .roll(), + execute_over_thrift: legacy_config + .parse::(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "execute_over_thrift", + })? + // TODO: Change to always (T203734691) + .unwrap_or(RolloutPercentage::never()) + .roll(), }) } @@ -200,6 +296,14 @@ pub struct Buck2OssReConfiguration { pub capabilities: Option, /// The instance name to use in requests. pub instance_name: Option, + /// Use the Meta version of the request metadata + pub use_fbcode_metadata: bool, + /// The max size for a GRPC message to be decoded. + pub max_decoding_message_size: Option, + /// The max cumulative blob size for `Read` and `BatchReadBlobs` methods. + pub max_total_batch_size: Option, + /// Maximum number of concurrent upload requests for each action. + pub max_concurrent_uploads_per_action: Option, } #[derive(Clone, Debug, Default, Allocative)] @@ -230,29 +334,76 @@ impl Buck2OssReConfiguration { pub fn from_legacy_config(legacy_config: &LegacyBuckConfig) -> anyhow::Result { // this is used for all three services by default, if given; if one of // them has an explicit address given as well though, use that instead - let default_address: Option = - legacy_config.parse(BUCK2_RE_CLIENT_CFG_SECTION, "address")?; + let default_address: Option = legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "address", + })?; Ok(Self { cas_address: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "cas_address")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "cas_address", + })? .or(default_address.clone()), engine_address: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "engine_address")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "engine_address", + })? .or(default_address.clone()), action_cache_address: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "action_cache_address")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "action_cache_address", + })? .or(default_address), tls: legacy_config - .parse(BUCK2_RE_CLIENT_CFG_SECTION, "tls")? + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "tls", + })? .unwrap_or(true), - tls_ca_certs: legacy_config.parse(BUCK2_RE_CLIENT_CFG_SECTION, "tls_ca_certs")?, - tls_client_cert: legacy_config.parse(BUCK2_RE_CLIENT_CFG_SECTION, "tls_client_cert")?, + tls_ca_certs: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "tls_ca_certs", + })?, + tls_client_cert: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "tls_client_cert", + })?, http_headers: legacy_config - .parse_list(BUCK2_RE_CLIENT_CFG_SECTION, "http_headers")? + .parse_list(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "http_headers", + })? .unwrap_or_default(), // Empty list is as good None. - capabilities: legacy_config.parse(BUCK2_RE_CLIENT_CFG_SECTION, "capabilities")?, - instance_name: legacy_config.parse(BUCK2_RE_CLIENT_CFG_SECTION, "instance_name")?, + capabilities: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "capabilities", + })?, + instance_name: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "instance_name", + })?, + use_fbcode_metadata: legacy_config + .parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "use_fbcode_metadata", + })? + .unwrap_or(true), + max_decoding_message_size: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "max_decoding_message_size", + })?, + max_total_batch_size: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "max_total_batch_size", + })?, + max_concurrent_uploads_per_action: legacy_config.parse(BuckconfigKeyRef { + section: BUCK2_RE_CLIENT_CFG_SECTION, + property: "max_concurrent_uploads_per_action", + })?, }) } } diff --git a/app/buck2_server/BUCK b/app/buck2_server/BUCK index c5e661bfe32dd..f2a399f7c8ff7 100644 --- a/app/buck2_server/BUCK +++ b/app/buck2_server/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -19,10 +18,16 @@ rust_library( "fbsource//third-party/rust:psutil", ], ), + ( + "windows", + [ + "fbsource//third-party/rust:winapi", + ], + ), ], test_deps = [ "fbsource//third-party/rust:assert_matches", - "fbsource//third-party/rust:maplit", + "fbsource//third-party/rust:indoc", "//buck2/app/buck2_util:buck2_util", ], deps = [ @@ -36,10 +41,8 @@ rust_library( "fbsource//third-party/rust:flate2", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:inferno", - "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:lsp-server", "fbsource//third-party/rust:lsp-types", - "fbsource//third-party/rust:maplit", "fbsource//third-party/rust:num_cpus", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:parking_lot", @@ -51,28 +54,31 @@ rust_library( "fbsource//third-party/rust:shlex", "fbsource//third-party/rust:sync_wrapper", "fbsource//third-party/rust:tar", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tokio-stream", "fbsource//third-party/rust:tonic", "fbsource//third-party/rust:tracing", - # @oss-disable: "//blake3:blake3-constants-rust", + # @oss-disable: "//blake3:blake3-constants-rust-nothrift", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_analysis:buck2_analysis", "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_build_signals:buck2_build_signals", + "//buck2/app/buck2_certs:buck2_certs", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_configured:buck2_configured", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", "//buck2/app/buck2_eden:buck2_eden", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_event_observer:buck2_event_observer", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_execute_impl:buck2_execute_impl", "//buck2/app/buck2_file_watcher:buck2_file_watcher", "//buck2/app/buck2_forkserver:buck2_forkserver", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/app/buck2_http:buck2_http", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", "//buck2/app/buck2_node:buck2_node", @@ -81,17 +87,16 @@ rust_library( "//buck2/app/buck2_server_ctx:buck2_server_ctx", "//buck2/app/buck2_server_starlark_debug:buck2_server_starlark_debug", "//buck2/app/buck2_subscription_proto:buck2_subscription_proto", + "//buck2/app/buck2_test:buck2_test", "//buck2/app/buck2_util:buck2_util", + "//buck2/app/buck2_validation:buck2_validation", "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", "//buck2/host_sharing:host_sharing", - "//buck2/remote_execution:remote_execution", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_lsp:starlark_lsp", - "//buck2/starlark-rust/starlark_map:starlark_map", # @oss-disable: "//common/rust/shed/detect_eden:detect_eden", "//common/rust/shed/fbinit:fbinit", ], diff --git a/app/buck2_server/Cargo.toml b/app/buck2_server/Cargo.toml index d372bfec7c51a..4debd22437b33 100644 --- a/app/buck2_server/Cargo.toml +++ b/app/buck2_server/Cargo.toml @@ -1,7 +1,9 @@ [package] description = "Some parts of Buck2 server" edition = "2021" +license = { workspace = true } name = "buck2_server" +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -9,18 +11,18 @@ anyhow = { workspace = true } async-recursion = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } +buck2_re_configuration = { workspace = true } chrono = { workspace = true } constant_time_eq = { workspace = true } crossbeam-channel = { workspace = true } flate2 = { workspace = true } futures = { workspace = true } inferno = { workspace = true } -itertools = { workspace = true } lsp-server = { workspace = true } lsp-types = { workspace = true } -once_cell = { workspace = true } -maplit = { workspace = true } num_cpus = { workspace = true } +once_cell = { workspace = true } +parking_lot = { workspace = true } pin-project = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } @@ -29,59 +31,60 @@ serde_json = { workspace = true } shlex = { workspace = true } sync_wrapper = { workspace = true } tar = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } -buck2_re_configuration = { workspace = true } -parking_lot = { workspace = true } allocative = { workspace = true } +dice = { workspace = true } +dupe = { workspace = true } fbinit = { workspace = true } gazebo = { workspace = true } -dupe = { workspace = true } -dice = { workspace = true } -more_futures = { workspace = true } -remote_execution = { workspace = true } starlark = { workspace = true } starlark_lsp = { workspace = true } -starlark_map = { workspace = true } buck2_analysis = { workspace = true } buck2_build_api = { workspace = true } +buck2_build_signals = { workspace = true } +buck2_certs = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_configured = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } +buck2_event_observer = { workspace = true } +buck2_events = { workspace = true } buck2_execute = { workspace = true } buck2_execute_impl = { workspace = true } buck2_file_watcher = { workspace = true } buck2_forkserver = { workspace = true } -buck2_build_signals = { workspace = true } +buck2_futures = { workspace = true } +buck2_http = { workspace = true } buck2_interpreter = { workspace = true } buck2_interpreter_for_build = { workspace = true } buck2_node = { workspace = true } buck2_profile = { workspace = true } buck2_server_ctx = { workspace = true } -buck2_cli_proto = { workspace = true } -buck2_subscription_proto = { workspace = true } -buck2_event_observer = { workspace = true } -buck2_events = { workspace = true } buck2_server_starlark_debug = { workspace = true } +buck2_subscription_proto = { workspace = true } +buck2_test = { workspace = true } buck2_util = { workspace = true } +buck2_validation = { workspace = true } buck2_wrapper_common = { workspace = true } -buck2_eden = { workspace = true } host_sharing = { workspace = true } [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dependencies] psutil = { workspace = true } -[target.'cfg(any(fbcode_build, cargo_internal_build))'.dependencies] -# @oss-disable: detect_eden = { path = "../../../common/rust/shed/detect_eden" } -# @oss-disable: fsinfo = { path = "../../../eden/scm/lib/fsinfo" } +[target.'cfg(windows)'.dependencies] +winapi = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -maplit = { workspace = true } buck2_util = { workspace = true } +indoc = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_server/src/active_commands.rs b/app/buck2_server/src/active_commands.rs index df4977246f126..cc5be3a6cee8d 100644 --- a/app/buck2_server/src/active_commands.rs +++ b/app/buck2_server/src/active_commands.rs @@ -11,7 +11,6 @@ use std::collections::HashMap; use std::collections::HashSet; use std::sync::Arc; -use buck2_cli_proto::ClientContext; use buck2_event_observer::dice_state::DiceState; use buck2_event_observer::pending_estimate::pending_estimate; use buck2_event_observer::span_tracker; @@ -222,17 +221,17 @@ pub struct ActiveCommand { } impl ActiveCommand { - pub fn new(event_dispatcher: &EventDispatcher, client_ctx: &ClientContext) -> Self { + pub fn new(event_dispatcher: &EventDispatcher, sanitized_argv: Vec) -> Self { let (sender, receiver) = oneshot::channel(); - let state = Arc::new(ActiveCommandState::new(client_ctx.sanitized_argv.clone())); + let state = Arc::new(ActiveCommandState::new(sanitized_argv)); let trace_id = event_dispatcher.trace_id().dupe(); let result = { // Scope the guard so it's locked as little as possible let mut active_commands = ACTIVE_COMMANDS.lock(); - let existing_active_commands = if active_commands.len() > 1 { + let existing_active_commands = if active_commands.len() > 0 { Some(active_commands.clone()) } else { None @@ -277,6 +276,10 @@ impl ActiveCommand { mod tests { use std::time::SystemTime; + use assert_matches::assert_matches; + use buck2_events::source::ChannelEventSource; + use buck2_events::Event; + use super::*; #[test] @@ -284,8 +287,8 @@ mod tests { let mut writer = ActiveCommandStateWriter::new(Arc::new(ActiveCommandState::new(Vec::new()))); - let root = SpanId::new(); - let child = SpanId::new(); + let root = SpanId::next(); + let child = SpanId::next(); let trace = TraceId::new(); writer.peek_event(&BuckEvent::new( @@ -387,6 +390,8 @@ mod tests { finished: 2, check_deps_started: 0, check_deps_finished: 0, + compute_started: 0, + compute_finished: 0, }, ); map @@ -407,4 +412,55 @@ mod tests { } ); } + + fn create_dispatcher() -> (EventDispatcher, ChannelEventSource, TraceId) { + let (daemon_dispatcher_events, daemon_dispatcher_sink) = + buck2_events::create_source_sink_pair(); + let trace_id = TraceId::new(); + let dispatcher = EventDispatcher::new(trace_id.dupe(), daemon_dispatcher_sink); + + (dispatcher, daemon_dispatcher_events, trace_id) + } + + fn check_concurrent_command_trace_ids_eq(event: Option, expected_trace_ids: &[String]) { + assert_matches!(event, Some(Event::Buck(event)) => { + assert_matches!( + event.data(), + buck2_data::buck_event::Data::Instant(buck2_data::InstantEvent { + data: Some(buck2_data::instant_event::Data::ConcurrentCommands( + buck2_data::ConcurrentCommands { + trace_ids, + } + )) + }) => { + // Use HashSets because trace ids may not be reported in the same order that we specified. + let trace_ids: HashSet<&String> = trace_ids.iter().collect(); + let expected_trace_ids: HashSet<&String> = expected_trace_ids.iter().collect(); + assert_eq!(trace_ids, expected_trace_ids); + } + ); + }); + } + + #[test] + fn test_multiple_active_commands() { + let (dispatcher1, mut source1, id1) = create_dispatcher(); + let _active1 = ActiveCommand::new(&dispatcher1, Vec::new()); + + let (dispatcher2, mut source2, id2) = create_dispatcher(); + let _active2 = ActiveCommand::new(&dispatcher2, Vec::new()); + + check_concurrent_command_trace_ids_eq(source1.try_receive(), &[id2.to_string()]); + check_concurrent_command_trace_ids_eq(source2.try_receive(), &[id1.to_string()]); + + let (dispatcher3, mut source3, id3) = create_dispatcher(); + let _active3 = ActiveCommand::new(&dispatcher3, Vec::new()); + + check_concurrent_command_trace_ids_eq(source1.try_receive(), &[id3.to_string()]); + check_concurrent_command_trace_ids_eq(source2.try_receive(), &[id3.to_string()]); + check_concurrent_command_trace_ids_eq( + source3.try_receive(), + &[id1.to_string(), id2.to_string()], + ); + } } diff --git a/app/buck2_server/src/builtin_docs/bxl_docs.rs b/app/buck2_server/src/builtin_docs/bxl_docs.rs deleted file mode 100644 index cd7524f6deb82..0000000000000 --- a/app/buck2_server/src/builtin_docs/bxl_docs.rs +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashSet; -use std::sync::Arc; - -use buck2_interpreter_for_build::interpreter::global_interpreter_state::GlobalInterpreterState; -use maplit::hashset; -use once_cell::sync::Lazy; -use starlark::docs::Doc; -use starlark::docs::DocItem; -use starlark::docs::DocMember; -use starlark::docs::DocObject; -use starlark_map::smallmap; - -use crate::builtin_docs::docs::builtin_doc; - -// List of BXL global function names. -// (@wendyy) I don't think this is a nice implementation, but this offers us finer -// grain control over documentation generated by globals. Let's at least get the -// documentation out there before coming up with a cleaner solution -static BXL_GLOBAL_METHOD_NAME_SET: Lazy> = Lazy::new(|| { - hashset![ - "sub_target", - "configured_sub_target", - "target_set", - "get_path_without_materialization", - "get_paths_without_materialization" - ] -}); - -// Globals that are in BXL. For BXL, we will probably only have global functions (as opposed -// to objects and properties). -pub(crate) fn get_builtin_bxl_docs( - interpreter_state: Arc, -) -> anyhow::Result> { - let b_o = interpreter_state.bxl_file_global_env.documentation(); - let mut docs = vec![]; - for member in b_o.members { - if BXL_GLOBAL_METHOD_NAME_SET.contains(member.0.as_str()) { - match member.1 { - DocMember::Function(function) => { - // Convert the function to its own object so it gets its own markdown file name. - // Otherwise, it will be named `native.md`, which is confusing in static docs. - let doc_item = DocItem::Object(DocObject { - docs: None, - members: smallmap! {member.0.clone() => DocMember::Function(function)}, - }); - - docs.push(builtin_doc(member.0.as_str(), "bxl", doc_item)); - } - _ => (), - } - } - } - - Ok(docs) -} diff --git a/app/buck2_server/src/builtin_docs/docs.rs b/app/buck2_server/src/builtin_docs/docs.rs deleted file mode 100644 index 93df323c060f9..0000000000000 --- a/app/buck2_server/src/builtin_docs/docs.rs +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashMap; -use std::collections::HashSet; -use std::path::Path; -use std::sync::Arc; - -use anyhow::Context; -use async_trait::async_trait; -use buck2_cli_proto::unstable_docs_request; -use buck2_cli_proto::UnstableDocsRequest; -use buck2_cli_proto::UnstableDocsResponse; -use buck2_common::dice::cells::HasCellResolver; -use buck2_core::bzl::ImportPath; -use buck2_core::cells::build_file_cell::BuildFileCell; -use buck2_core::cells::cell_path::CellPath; -use buck2_core::cells::CellAliasResolver; -use buck2_core::fs::paths::abs_path::AbsPath; -use buck2_interpreter::load_module::InterpreterCalculation; -use buck2_interpreter::parse_import::parse_bzl_path_with_config; -use buck2_interpreter::parse_import::ParseImportOptions; -use buck2_interpreter::parse_import::RelativeImports; -use buck2_interpreter::prelude_path::prelude_path; -use buck2_interpreter_for_build::interpreter::build_defs::starlark_library_extensions_for_buck2; -use buck2_interpreter_for_build::interpreter::global_interpreter_state::GlobalInterpreterState; -use buck2_interpreter_for_build::interpreter::global_interpreter_state::HasGlobalInterpreterState; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::template::run_server_command; -use buck2_server_ctx::template::ServerCommandTemplate; -use dice::DiceComputations; -use dice::DiceTransaction; -use dupe::Dupe; -use starlark::collections::SmallMap; -use starlark::docs::get_registered_starlark_docs; -use starlark::docs::Doc; -use starlark::docs::DocItem; -use starlark::docs::DocMember; -use starlark::docs::DocModule; -use starlark::docs::DocProperty; -use starlark::docs::Identifier; -use starlark::docs::Location; -use starlark::environment::Globals; -use starlark::typing::Ty; - -use super::bxl_docs::get_builtin_bxl_docs; -use crate::builtin_docs::markdown::generate_markdown_files; - -#[derive(Debug, thiserror::Error)] -enum DocsError { - #[error("Unknown format requested (internal error)")] - UnknownFormat, -} - -fn parse_import_paths( - cell_resolver: &CellAliasResolver, - current_dir: &CellPath, - current_cell: BuildFileCell, - symbol_patterns: &[String], -) -> anyhow::Result> { - let parse_options = ParseImportOptions { - allow_missing_at_symbol: true, - relative_import_option: RelativeImports::Allow { current_dir }, - }; - - symbol_patterns - .iter() - .map(|symbol_pattern| { - parse_bzl_path_with_config(cell_resolver, symbol_pattern, &parse_options, current_cell) - }) - .collect() -} - -pub(crate) fn builtin_doc(name: S, directory: &str, item: DocItem) -> Doc { - let mut custom_attrs = HashMap::new(); - if !directory.is_empty() { - custom_attrs.insert("directory".to_owned(), directory.to_owned()); - } - - Doc { - id: Identifier { - name: name.to_string(), - location: None, - }, - item, - custom_attrs, - } -} - -fn get_builtin_global_starlark_docs() -> Doc { - let globals = Globals::extended_by(starlark_library_extensions_for_buck2()); - builtin_doc( - "globals", - "standard", - DocItem::Module(globals.documentation()), - ) -} - -/// Globals that are in the interpreter, but none of the starlark global symbols. -fn get_builtin_build_docs(interpreter_state: Arc) -> anyhow::Result { - let mut b_o = interpreter_state.extension_file_global_env.documentation(); - let globals = Globals::extended_by(starlark_library_extensions_for_buck2()); - let global_symbols: HashSet<_> = globals.names().map(|s| s.as_str()).collect(); - b_o.members = b_o - .members - .into_iter() - .filter(|(name, _)| !global_symbols.contains(&name.as_str())) - .collect(); - Ok(builtin_doc("globals", "", DocItem::Module(b_o))) -} - -pub fn get_builtin_docs( - interpreter_state: Arc, -) -> anyhow::Result> { - let mut all_builtins = vec![ - get_builtin_global_starlark_docs(), - get_builtin_build_docs(interpreter_state.dupe())?, - ]; - - all_builtins.extend(get_builtin_bxl_docs(interpreter_state)?); - all_builtins.extend(get_registered_starlark_docs()); - - Ok(all_builtins) -} - -/// Get the documentation for exported symbols in the prelude -/// -/// Creates top level docs for member functions of "native" too, -/// presuming that those symbols don't already exist in `existing_globals` -/// (to avoid re-exporting and overriding the real builtins if there is conflict) -pub async fn get_prelude_docs( - ctx: &DiceTransaction, - existing_globals: &HashSet<&str>, -) -> anyhow::Result> { - let cell_resolver = ctx.get_cell_resolver().await?; - let prelude_path = prelude_path(&cell_resolver)?; - get_docs_from_module(ctx, prelude_path.import_path(), Some(existing_globals)).await -} - -async fn get_docs_from_module( - ctx: &DiceComputations, - import_path: &ImportPath, - // If we want to promote `native`, what should we exclude - promote_native: Option<&HashSet<&str>>, -) -> anyhow::Result> { - // Do this so that we don't get the '@' in the display if we're printing targets from a - // different cell root. i.e. `//foo:bar.bzl`, rather than `//foo:bar.bzl @ cell` - let import_path_string = format!( - "{}:{}", - import_path.path().parent().unwrap(), - import_path.path().path().file_name().unwrap() - ); - let module = ctx.get_loaded_module_from_import_path(import_path).await?; - let frozen_module = module.env(); - let mut module_docs = frozen_module.documentation(); - - // For the prelude, we want to promote `native` symbol up one level - if let Some(existing_globals) = promote_native { - for (name, value) in module.extra_globals_from_prelude_for_buck_files()? { - if !existing_globals.contains(&name) && !module_docs.members.contains_key(name) { - let doc = match value.to_value().documentation() { - Some(DocItem::Function(f)) => DocMember::Function(f), - _ => DocMember::Property(DocProperty { - docs: None, - typ: Ty::any(), - }), - }; - - module_docs.members.insert(name.to_owned(), doc); - } - } - } - - let mut docs = vec![]; - - if let Some(module_doc) = module_docs.docs { - docs.push(Doc { - id: Identifier { - name: import_path_string.clone(), - location: Some(starlark::docs::Location { - path: import_path_string.clone(), - }), - }, - item: DocItem::Module(DocModule { - docs: Some(module_doc), - members: SmallMap::new(), - }), - custom_attrs: Default::default(), - }); - } - docs.extend(module_docs.members.into_iter().map(|(symbol, d)| { - Doc { - // TODO(nmj): Map this back into the codemap to get a line/column - id: Identifier { - name: symbol, - location: Some(Location { - path: import_path_string.clone(), - }), - }, - item: d.to_doc_item(), - custom_attrs: Default::default(), - } - })); - - Ok(docs) -} - -pub async fn docs_command( - context: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: UnstableDocsRequest, -) -> anyhow::Result { - run_server_command( - DocsServerCommand { req }, - context, - partial_result_dispatcher, - ) - .await -} - -struct DocsServerCommand { - req: UnstableDocsRequest, -} - -#[async_trait] -impl ServerCommandTemplate for DocsServerCommand { - type StartEvent = buck2_data::DocsCommandStart; - type EndEvent = buck2_data::DocsCommandEnd; - type Response = UnstableDocsResponse; - type PartialResult = NoPartialResult; - - async fn command( - &self, - server_ctx: &dyn ServerCommandContextTrait, - _partial_result_dispatcher: PartialResultDispatcher, - ctx: DiceTransaction, - ) -> anyhow::Result { - docs(server_ctx, ctx, &self.req).await - } - - fn is_success(&self, _response: &Self::Response) -> bool { - // No response if we failed. - true - } -} - -enum Format { - Json, - Markdown, -} - -impl Format { - fn from_proto(request: &UnstableDocsRequest) -> anyhow::Result { - let format = unstable_docs_request::Format::from_i32(request.format) - .context("incorrect enum value")?; - match format { - unstable_docs_request::Format::Json => Ok(Format::Json), - unstable_docs_request::Format::Markdown => Ok(Format::Markdown), - unstable_docs_request::Format::Unknown => Err(DocsError::UnknownFormat.into()), - } - } -} - -async fn docs( - server_ctx: &dyn ServerCommandContextTrait, - dice_ctx: DiceTransaction, - request: &UnstableDocsRequest, -) -> anyhow::Result { - let format = Format::from_proto(request)?; - - let cell_resolver = dice_ctx.get_cell_resolver().await?; - let current_cell_path = cell_resolver.get_cell_path(server_ctx.working_dir())?; - let current_cell = BuildFileCell::new(current_cell_path.cell()); - - let cell_alias_resolver = cell_resolver - .get(current_cell_path.cell())? - .cell_alias_resolver(); - - let lookups = parse_import_paths( - cell_alias_resolver, - ¤t_cell_path, - current_cell, - &request.symbol_patterns, - )?; - - let mut docs = if request.retrieve_builtins { - get_builtin_docs(dice_ctx.get_global_interpreter_state().await?.dupe())? - } else { - vec![] - }; - - if request.retrieve_prelude { - let builtin_names = docs.iter().map(|d| d.id.name.as_str()).collect(); - let prelude_docs = get_prelude_docs(&dice_ctx, &builtin_names).await?; - docs.extend(prelude_docs); - } - - let module_calcs: Vec<_> = lookups - .iter() - .map(|import_path| get_docs_from_module(&dice_ctx, import_path, None)) - .collect(); - - let modules_docs = futures::future::try_join_all(module_calcs).await?; - docs.extend(modules_docs.into_iter().flatten()); - - let json_output = match format { - Format::Json => Some(serde_json::to_string(&docs)?), - Format::Markdown => { - let path = AbsPath::new(Path::new(request.markdown_output_path.as_ref().context( - "`markdown_output_path` must be set when requesting markdown (internal error)", - )?))?; - let starlark_subdir = Path::new(&request.markdown_starlark_subdir); - let native_subdir = Path::new(&request.markdown_native_subdir); - generate_markdown_files(path, starlark_subdir, native_subdir, docs)?; - None - } - }; - - Ok(UnstableDocsResponse { json_output }) -} diff --git a/app/buck2_server/src/builtin_docs/markdown.rs b/app/buck2_server/src/builtin_docs/markdown.rs deleted file mode 100644 index 4e52b2a8f1253..0000000000000 --- a/app/buck2_server/src/builtin_docs/markdown.rs +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::cmp::Ordering; -use std::collections::HashMap; -use std::path::Path; -use std::path::PathBuf; - -use buck2_core::fs::fs_util; -use buck2_core::fs::paths::abs_path::AbsPath; -use buck2_events::dispatch::console_message; -use itertools::Itertools; -use starlark::docs::Doc; -use starlark::docs::DocItem; -use starlark::docs::MarkdownFlavor; -use starlark::docs::RenderMarkdown; - -use crate::lsp::output_subdir_for_doc; - -#[derive(Clone, Debug, Default)] -struct MarkdownOutput { - sections: Vec, -} - -impl MarkdownOutput { - fn write_to_file(&self, path: &AbsPath) -> anyhow::Result { - let mut contents = self.sections.join("\n\n---\n"); - if !contents.ends_with('\n') { - contents.push('\n'); - } - if let Some(p) = path.parent() { - fs_util::create_dir_all(p)?; - } - fs_util::write(path, &contents)?; - - Ok(contents) - } - - /// Convert a buck style path (foo//bar:baz.bzl) to path, failing if someone attempts to traverse upward - fn path_from_location(location: &str) -> anyhow::Result { - Ok(Path::new(&location.replace("//", "/").replace(':', "/")).to_path_buf()) - } - - /// Get the output path for the markdown for a given [`Doc`], whether it's in a starlark file, or a native symbol. - fn markdown_path_for_doc( - starlark_subdir: &Path, - native_subdir: &Path, - doc: &Doc, - ) -> anyhow::Result { - let subdir = output_subdir_for_doc(doc)?; - let path = match &doc.id.location { - Some(loc) => starlark_subdir - .join(subdir.as_path()) - .join(Self::path_from_location(&loc.path)?), - None => match &doc.item { - // Functions all go in one file. - // Objects get their on file (e.g. each provider, Artifact, etc) - DocItem::Function(_) | DocItem::Property(_) => { - native_subdir.join(subdir.as_path()).join("native") - } - DocItem::Module(_) | DocItem::Object(_) => { - native_subdir.join(subdir.as_path()).join(&doc.id.name) - } - }, - }; - let path = path.with_extension(match path.extension() { - None => "md".to_owned(), - Some(e) => format!("{}.md", e.to_str().expect("path if not UTF-8")), - }); - Ok(path) - } -} - -/// Does the heavy work of processing the docs and writing them to markdown files. -pub(crate) fn generate_markdown_files( - destination_dir: &AbsPath, - starlark_subdir: &Path, - native_subdir: &Path, - docs: Vec, -) -> anyhow::Result<()> { - let mut outputs = HashMap::new(); - - fn item_ordering(l: &Doc, r: &Doc) -> Ordering { - match (&l.item, &r.item) { - (DocItem::Module(_), DocItem::Module(_)) => l.id.name.cmp(&r.id.name), - (DocItem::Module(_), _) => Ordering::Less, - (_, DocItem::Module(_)) => Ordering::Greater, - _ => l.id.name.cmp(&r.id.name), - } - } - - for doc in docs.into_iter().sorted_by(item_ordering) { - let markdown_path = - MarkdownOutput::markdown_path_for_doc(starlark_subdir, native_subdir, &doc)?; - let markdown_file = outputs - .entry(markdown_path) - .or_insert_with(MarkdownOutput::default); - if let Some(docs) = doc.render_markdown_opt(MarkdownFlavor::DocFile) { - markdown_file.sections.push(docs); - } - } - - for (relative_path, markdown_file) in outputs.iter() { - let path = destination_dir.join(relative_path); - console_message(format!("Writing to {}", path.display())); - - markdown_file.write_to_file(&path)?; - } - - Ok(()) -} diff --git a/app/buck2_server/src/builtin_docs/mod.rs b/app/buck2_server/src/builtin_docs/mod.rs deleted file mode 100644 index 8bbb20c6497fd..0000000000000 --- a/app/buck2_server/src/builtin_docs/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod bxl_docs; -pub mod docs; -pub(crate) mod markdown; diff --git a/app/buck2_server/src/clean_stale.rs b/app/buck2_server/src/clean_stale.rs index 31816e6b21562..11bf8bd34eccd 100644 --- a/app/buck2_server/src/clean_stale.rs +++ b/app/buck2_server/src/clean_stale.rs @@ -52,7 +52,7 @@ impl ServerCommandTemplate for CleanStaleServerCommand { ) -> anyhow::Result { server_ctx .cancellation_context() - .critical_section(async move || { + .critical_section(|| async move { let deferred_materializer = server_ctx.materializer(); let extension = deferred_materializer @@ -77,7 +77,7 @@ impl ServerCommandTemplate for CleanStaleServerCommand { true } - fn end_event(&self, response: &anyhow::Result) -> Self::EndEvent { + fn end_event(&self, response: &buck2_error::Result) -> Self::EndEvent { let clean_stale_stats = if let Ok(res) = response { res.stats.clone() } else { @@ -85,8 +85,4 @@ impl ServerCommandTemplate for CleanStaleServerCommand { }; buck2_data::CleanCommandEnd { clean_stale_stats } } - - fn exclusive_command_name(&self) -> Option { - Some("clean --stale".to_owned()) - } } diff --git a/app/buck2_server/src/configs.rs b/app/buck2_server/src/configs.rs deleted file mode 100644 index 3f93bcb809dca..0000000000000 --- a/app/buck2_server/src/configs.rs +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashSet; - -use anyhow::Context; -use buck2_cli_proto::config_override::ConfigType; -use buck2_cli_proto::ConfigOverride; -use buck2_common::legacy_configs::cells::BuckConfigBasedCells; -use buck2_common::legacy_configs::LegacyBuckConfigs; -use buck2_common::legacy_configs::LegacyConfigCmdArg; -use buck2_core::cells::CellResolver; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::project::ProjectRoot; -use buck2_core::fs::project_rel_path::ProjectRelativePath; - -fn config_type_from_i32(value: i32) -> anyhow::Result { - ConfigType::from_i32(value).with_context(|| { - format!( - "Unknown ConfigType enum value `{}` when trying to deserialize", - value - ) - }) -} - -pub fn get_legacy_config_args<'a, Iter: IntoIterator>( - config_overrides: Iter, -) -> anyhow::Result> { - config_overrides - .into_iter() - .map( - |config_arg| match config_type_from_i32(config_arg.config_type)? { - ConfigType::Value => LegacyConfigCmdArg::flag(&config_arg.config_override), - ConfigType::File => LegacyConfigCmdArg::file(&config_arg.config_override), - }, - ) - .collect::>>() -} - -/// Read the configs, returning the cell resolver and the legacy configs -pub fn parse_legacy_cells( - config_overrides: &[LegacyConfigCmdArg], - cwd: &ProjectRelativePath, - fs: &ProjectRoot, -) -> anyhow::Result<(CellResolver, LegacyBuckConfigs, HashSet)> { - // TODO: We do not need to reparse _all_ configs, instead we just need to - // overlay any custom configs for the current build command on top of - // the base configs derived from the config files. This requires us to - // store the base configs + overlaid ones separately, so we can cheaply - // recompose. - let res = BuckConfigBasedCells::parse_with_config_args(fs, config_overrides, cwd)?; - Ok((res.cell_resolver, res.configs_by_name, res.config_paths)) -} diff --git a/app/buck2_server/src/ctx.rs b/app/buck2_server/src/ctx.rs index 0903de3d3b5cf..fe422b0dc4da1 100644 --- a/app/buck2_server/src/ctx.rs +++ b/app/buck2_server/src/ctx.rs @@ -11,12 +11,13 @@ use std::collections::HashMap; use std::collections::HashSet; use std::io::BufWriter; use std::marker::PhantomData; -use std::ops::Deref; use std::sync::Arc; use allocative::Allocative; -use anyhow::Context; use async_trait::async_trait; +use buck2_build_api::actions::execute::dice_data::set_fallback_executor_config; +use buck2_build_api::actions::execute::dice_data::SetCommandExecutor; +use buck2_build_api::actions::execute::dice_data::SetReClient; use buck2_build_api::actions::impls::run_action_knobs::HasRunActionKnobs; use buck2_build_api::actions::impls::run_action_knobs::RunActionKnobs; use buck2_build_api::build::HasCreateUnhashedSymlinkLock; @@ -25,34 +26,37 @@ use buck2_build_api::build_signals::BuildSignalsInstaller; use buck2_build_api::build_signals::SetBuildSignals; use buck2_build_api::context::SetBuildContextData; use buck2_build_api::keep_going::HasKeepGoing; +use buck2_build_api::materialize::HasMaterializationQueueTracker; use buck2_build_api::spawner::BuckSpawner; -use buck2_build_signals::CriticalPathBackendName; -use buck2_build_signals::HasCriticalPathBackend; +use buck2_build_signals::env::CriticalPathBackendName; +use buck2_build_signals::env::HasCriticalPathBackend; +use buck2_certs::validate::CertState; use buck2_cli_proto::client_context::HostArchOverride; use buck2_cli_proto::client_context::HostPlatformOverride; +use buck2_cli_proto::client_context::PreemptibleWhen; use buck2_cli_proto::common_build_options::ExecutionStrategy; +use buck2_cli_proto::config_override::ConfigType; use buck2_cli_proto::ClientContext; use buck2_cli_proto::CommonBuildOptions; +use buck2_cli_proto::ConfigOverride; use buck2_common::dice::cells::HasCellResolver; use buck2_common::dice::cycles::CycleDetectorAdapter; use buck2_common::dice::cycles::PairDiceCycleDetector; -use buck2_common::dice::data::HasIoProvider; -use buck2_common::http::HttpClient; use buck2_common::http::SetHttpClient; use buck2_common::invocation_paths::InvocationPaths; use buck2_common::io::trace::TracingIoProvider; +use buck2_common::legacy_configs::cells::BuckConfigBasedCells; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::dice::HasInjectedLegacyConfigs; use buck2_common::legacy_configs::dice::HasLegacyConfigs; -use buck2_common::legacy_configs::LegacyBuckConfig; -use buck2_common::legacy_configs::LegacyBuckConfigs; -use buck2_common::legacy_configs::LegacyConfigCmdArg; -use buck2_common::result::SharedError; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; +use buck2_common::legacy_configs::diffs::ConfigDiffTracker; +use buck2_common::legacy_configs::file_ops::ConfigPath; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_configured::calculation::ConfiguredGraphCycleDescriptor; -use buck2_core::async_once_cell::AsyncOnceCell; -use buck2_core::cells::CellResolver; use buck2_core::execution_types::executor_config::CommandExecutorConfig; +use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::facebook_only; +use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::file_name::FileName; @@ -61,17 +65,14 @@ use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; use buck2_core::fs::working_dir::WorkingDir; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::rollout_percentage::RolloutPercentage; +use buck2_core::target::label::interner::ConcurrentTargetLabelInterner; use buck2_events::daemon_id; use buck2_events::dispatch::EventDispatcher; use buck2_events::metadata; -use buck2_execute::execute::blocking::BlockingExecutor; use buck2_execute::execute::blocking::SetBlockingExecutor; -use buck2_execute::execute::dice_data::set_fallback_executor_config; -use buck2_execute::execute::dice_data::SetCommandExecutor; -use buck2_execute::execute::dice_data::SetReClient; use buck2_execute::knobs::ExecutorGlobalKnobs; use buck2_execute::materialize::materializer::Materializer; use buck2_execute::materialize::materializer::SetMaterializer; @@ -80,21 +81,17 @@ use buck2_execute::re::manager::ReConnectionHandle; use buck2_execute::re::manager::ReConnectionObserver; use buck2_execute_impl::executors::worker::WorkerPool; use buck2_execute_impl::low_pass_filter::LowPassFilter; -use buck2_execute_impl::re::paranoid_download::ParanoidDownloader; -use buck2_file_watcher::file_watcher::FileWatcher; use buck2_file_watcher::mergebase::SetMergebase; -use buck2_forkserver::client::ForkserverClient; +use buck2_futures::cancellation::ExplicitCancellationContext; use buck2_interpreter::dice::starlark_debug::SetStarlarkDebugger; -use buck2_interpreter::dice::starlark_profiler::StarlarkProfilerConfiguration; use buck2_interpreter::extra::xcode::XcodeVersionInfo; use buck2_interpreter::extra::InterpreterHostArchitecture; use buck2_interpreter::extra::InterpreterHostPlatform; use buck2_interpreter::prelude_path::prelude_path; +use buck2_interpreter::starlark_profiler::config::StarlarkProfilerConfiguration; use buck2_interpreter_for_build::interpreter::configuror::BuildInterpreterConfiguror; use buck2_interpreter_for_build::interpreter::cycles::LoadCycleDescriptor; -use buck2_interpreter_for_build::interpreter::globals::register_universal_natives; use buck2_interpreter_for_build::interpreter::interpreter_setup::setup_interpreter; -use buck2_server_ctx::concurrency::DiceDataProvider; use buck2_server_ctx::concurrency::DiceUpdater; use buck2_server_ctx::ctx::DiceAccessor; use buck2_server_ctx::ctx::PrivateStruct; @@ -103,7 +100,10 @@ use buck2_server_ctx::stderr_output_guard::StderrOutputGuard; use buck2_server_ctx::stderr_output_guard::StderrOutputWriter; use buck2_server_starlark_debug::create_debugger_handle; use buck2_server_starlark_debug::BuckStarlarkDebuggerHandle; +use buck2_test::local_resource_registry::InitLocalResourceRegistry; +use buck2_util::arc_str::ArcS; use buck2_util::truncate::truncate_container; +use buck2_validation::enabled_optional_validations_key::SetEnabledOptionalValidations; use dice::DiceComputations; use dice::DiceData; use dice::DiceTransactionUpdater; @@ -113,13 +113,9 @@ use dupe::Dupe; use gazebo::prelude::SliceExt; use host_sharing::HostSharingBroker; use host_sharing::HostSharingStrategy; -use more_futures::cancellation::ExplicitCancellationContext; -use tokio::sync::Mutex; use tracing::warn; use crate::active_commands::ActiveCommandDropGuard; -use crate::configs::get_legacy_config_args; -use crate::configs::parse_legacy_cells; use crate::daemon::common::get_default_executor_config; use crate::daemon::common::parse_concurrency; use crate::daemon::common::CommandExecutorFactory; @@ -129,7 +125,7 @@ use crate::heartbeat_guard::HeartbeatGuard; use crate::host_info; use crate::snapshot::SnapshotCollector; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum DaemonCommunicationError { #[error("Got invalid working directory `{0}`")] InvalidWorkingDirectory(String), @@ -161,7 +157,7 @@ pub struct ServerCommandContext<'a> { /// working-dir relative way. For example, it's common to resolve target patterns relative to /// the working directory and resolving cell aliases there. This should generally only be used /// to interpret values that are in the request. We should convert to client-agnostic things early. - pub working_dir: ProjectRelativePathBuf, + pub working_dir: ArcS, working_dir_abs: WorkingDir, @@ -174,6 +170,9 @@ pub struct ServerCommandContext<'a> { host_arch_override: HostArchOverride, host_xcode_version_override: Option, + reuse_current_config: bool, + config_overrides: Vec, + // This ensures that there's only one RE connection during the lifetime of this context. It's possible // that we give out other handles, but we don't depend on the lifetimes of those for this guarantee. We // also use this to send a RemoteExecutionSessionCreated if the connection is made. @@ -196,14 +195,14 @@ pub struct ServerCommandContext<'a> { /// Common build options associated with this command. build_options: Option, - /// The CellResolver and Configs loader for this command - cell_configs_loader: Arc, - /// Keep emitting heartbeat events while the ServerCommandContext is alive We put this in an /// Option so that we can ensure heartbeat events are cancelled before everything else is /// dropped. heartbeat_guard_handle: Option, + /// The current state of the certificate. This is used to detect errors due to invalid certs. + cert_state: CertState, + /// Daemon uuid passed in from the client side to detect nested invocation. pub(crate) daemon_uuid_from_client: Option, @@ -216,6 +215,7 @@ pub struct ServerCommandContext<'a> { cancellations: &'a ExplicitCancellationContext, exit_when_different_state: bool, + preemptible: PreemptibleWhen, } impl<'a> ServerCommandContext<'a> { @@ -225,6 +225,7 @@ impl<'a> ServerCommandContext<'a> { starlark_profiler_instrumentation_override: StarlarkProfilerConfiguration, build_options: Option<&CommonBuildOptions>, paths: &InvocationPaths, + cert_state: CertState, snapshot_collector: SnapshotCollector, cancellations: &'a ExplicitCancellationContext, ) -> anyhow::Result { @@ -237,6 +238,8 @@ impl<'a> ServerCommandContext<'a> { client_context.working_dir.clone(), )) })?; + let working_dir_project_relative: ArcS = + ArcS::from(<&ProjectRelativePath>::from(&*working_dir_project_relative)); #[derive(Allocative)] struct Observer { @@ -270,18 +273,14 @@ impl<'a> ServerCommandContext<'a> { })); // Add argfiles read by client into IO tracing state. - if let Some(tracing_provider) = base_context - .daemon - .io - .as_any() - .downcast_ref::() - { - let argfiles: anyhow::Result> = client_context + if let Some(tracing_provider) = TracingIoProvider::from_io(&*base_context.daemon.io) { + for p in client_context .argfiles .iter() .map(|s| AbsNormPathBuf::new(s.into())) - .collect(); - tracing_provider.add_config_paths(&base_context.project_root, argfiles?); + { + tracing_provider.add_external_path(p?); + } } let oncall = if client_context.oncall.is_empty() { @@ -299,33 +298,25 @@ impl<'a> ServerCommandContext<'a> { let heartbeat_guard_handle = HeartbeatGuard::new(base_context.events.dupe(), snapshot_collector); - let config_overrides = get_legacy_config_args(&client_context.config_overrides)?; - - let cell_configs_loader = Arc::new(CellConfigLoader { - project_root: base_context.project_root.clone(), - working_dir: working_dir_project_relative.to_buf().into(), - reuse_current_config: client_context.reuse_current_config, - config_overrides, - loaded_cell_configs: AsyncOnceCell::new(), - }); - let debugger_handle = create_debugger_handle(base_context.events.dupe()); Ok(ServerCommandContext { base_context, - working_dir: working_dir_project_relative.to_buf().into(), + working_dir: working_dir_project_relative, working_dir_abs: WorkingDir::unchecked_new(working_dir.to_buf()), host_platform_override: client_context.host_platform(), host_arch_override: client_context.host_arch(), host_xcode_version_override: client_context.host_xcode_version.clone(), + reuse_current_config: client_context.reuse_current_config, + config_overrides: client_context.config_overrides.clone(), oncall, client_id_from_client_metadata, _re_connection_handle: re_connection_handle, + cert_state, starlark_profiler_instrumentation_override, buck_out_dir: paths.buck_out_dir(), isolation_prefix: paths.isolation.clone(), build_options: build_options.cloned(), - cell_configs_loader, record_target_call_stacks: client_context.target_call_stacks, skip_targets_with_duplicate_names: client_context.skip_targets_with_duplicate_names, disable_starlark_types: client_context.disable_starlark_types, @@ -337,13 +328,14 @@ impl<'a> ServerCommandContext<'a> { debugger_handle, cancellations, exit_when_different_state: client_context.exit_when_different_state, + preemptible: client_context.preemptible(), }) } - async fn dice_data_constructor( - &self, + async fn dice_updater<'s>( + &'s self, build_signals: BuildSignalsInstaller, - ) -> DiceCommandDataProvider { + ) -> anyhow::Result> { let execution_strategy = self .build_options .as_ref() @@ -370,6 +362,7 @@ impl<'a> ServerCommandContext<'a> { .base_context .daemon .use_network_action_output_cache, + new_style_scratch_path: self.base_context.daemon.new_style_scratch_path, ..Default::default() }; @@ -382,72 +375,45 @@ impl<'a> ServerCommandContext<'a> { .as_ref() .and_then(|opts| opts.concurrency.as_ref()) .map(|obj| parse_concurrency(obj.concurrency)) - .map(|v| v.map_err(SharedError::from)); + .map(|v| v.map_err(buck2_error::Error::from)); let executor_config = get_default_executor_config(self.host_platform_override); - let blocking_executor: Arc<_> = self.base_context.daemon.blocking_executor.dupe(); - let materializer = self.base_context.daemon.materializer.dupe(); let re_connection = Arc::new(self.get_re_connection()); - let forkserver = self.base_context.daemon.forkserver.dupe(); - let upload_all_actions = self .build_options .as_ref() .map_or(false, |opts| opts.upload_all_actions); - let create_unhashed_symlink_lock = - self.base_context.daemon.create_unhashed_outputs_lock.dupe(); + let (interpreter_platform, interpreter_architecture, interpreter_xcode_version) = + host_info::get_host_info( + self.host_platform_override, + self.host_arch_override, + &self.host_xcode_version_override, + )?; - DiceCommandDataProvider { - cell_configs_loader: self.cell_configs_loader.dupe(), - events: self.events().dupe(), + Ok(DiceCommandUpdater { + cmd_ctx: self, execution_strategy, run_action_knobs, concurrency, executor_config: Arc::new(executor_config), - blocking_executor, - materializer, re_connection, build_signals, - forkserver, upload_all_actions, skip_cache_read, skip_cache_write, - create_unhashed_symlink_lock, - starlark_debugger: self.debugger_handle.dupe(), keep_going: self .build_options .as_ref() .map_or(false, |opts| opts.keep_going), - http_client: self.base_context.daemon.http_client.dupe(), - paranoid: self.base_context.daemon.paranoid.dupe(), - spawner: self.base_context.spawner.dupe(), - } - } - - async fn dice_updater(&self) -> anyhow::Result { - let (interpreter_platform, interpreter_architecture, interpreter_xcode_version) = - host_info::get_host_info( - self.host_platform_override, - self.host_arch_override, - &self.host_xcode_version_override, - )?; - - Ok(DiceCommandUpdater { - file_watcher: self.base_context.daemon.file_watcher.dupe(), - cell_config_loader: self.cell_configs_loader.dupe(), - buck_out_dir: self.buck_out_dir.clone(), + materialize_failed_inputs: self + .build_options + .as_ref() + .map_or(false, |opts| opts.materialize_failed_inputs), interpreter_platform, interpreter_architecture, interpreter_xcode_version, - starlark_profiler_instrumentation_override: self - .starlark_profiler_instrumentation_override - .dupe(), - disable_starlark_types: self.disable_starlark_types, - unstable_typecheck: self.unstable_typecheck, - skip_targets_with_duplicate_names: self.skip_targets_with_duplicate_names, - record_target_call_stacks: self.record_target_call_stacks, }) } @@ -459,114 +425,230 @@ impl<'a> ServerCommandContext<'a> { } } -struct CellConfigLoader { - project_root: ProjectRoot, - working_dir: ProjectRelativePathBuf, - /// Reuses build config from the previous invocation if there is one - reuse_current_config: bool, - config_overrides: Vec, - loaded_cell_configs: - AsyncOnceCell)>>, -} - -impl CellConfigLoader { - pub async fn cells_and_configs( +impl ServerCommandContext<'_> { + async fn load_new_configs( &self, - dice_ctx: &DiceComputations, - ) -> SharedResult<(CellResolver, LegacyBuckConfigs, HashSet)> { - self.loaded_cell_configs - .get_or_init(async move { - if self.reuse_current_config { - // If there is a previous command and --reuse-current-config is set, then the old config is used, ignoring any overrides. - if dice_ctx.is_cell_resolver_key_set().await? - && dice_ctx.is_legacy_configs_key_set().await? - { - if !self.config_overrides.is_empty() { - warn!( - "Found config overrides while using --reuse-current-config flag. Ignoring overrides [{}] and using current config instead", - truncate_container(self.config_overrides.iter().map(|o| o.to_string()), 200), - ); - } - return Ok::<(CellResolver, LegacyBuckConfigs, HashSet), anyhow::Error>(( - dice_ctx.get_cell_resolver().await?, - dice_ctx.get_legacy_configs().await?, - HashSet::new(), - )).shared_error(); - } else { - // If there is no previous command but the flag was set, then the flag is ignored, the command behaves as if there isn't the reuse config flag. - warn!( - "--reuse-current-config flag was set, but there was no previous invocation detected. Ignoring --reuse-current-config flag" - ); + dice_ctx: &mut DiceComputations<'_>, + ) -> buck2_error::Result { + let new_configs = BuckConfigBasedCells::parse_with_config_args( + &self.base_context.project_root, + &self.config_overrides, + &self.working_dir, + ) + .await?; + + self.report_traced_config_paths(&new_configs.config_paths)?; + + if self.reuse_current_config { + if dice_ctx + .is_injected_external_buckconfig_data_key_set() + .await? + { + if !self.config_overrides.is_empty() { + let config_type_str = |c| match ConfigType::from_i32(c) { + Some(ConfigType::Value) => "--config", + Some(ConfigType::File) => "--config-file", + None => "", + }; + warn!( + "Found config overrides while using --reuse-current-config flag. Ignoring overrides [{}] and using current config instead", + truncate_container( + self.config_overrides.iter().map(|o| { + format!("{} {}", config_type_str(o.config_type), o.config_override) + }), + 200 + ), + ); + } + // If `--reuse-current-config` is set, use the external config data from the + // previous command. + Ok(BuckConfigBasedCells { + cell_resolver: new_configs.cell_resolver, + root_config: new_configs.root_config, + config_paths: HashSet::new(), + external_data: dice_ctx.get_injected_external_buckconfig_data().await?, + }) + } else { + // If there is no previous command but the flag was set, then the flag is ignored, + // the command behaves as if there isn't the reuse config flag. + warn!( + "--reuse-current-config flag was set, but there was no previous invocation detected. Ignoring --reuse-current-config flag" + ); + Ok(new_configs) + } + } else { + Ok(new_configs) + } + } + + fn report_traced_config_paths(&self, paths: &HashSet) -> anyhow::Result<()> { + if let Some(tracing_provider) = TracingIoProvider::from_io(&*self.base_context.daemon.io) { + for config_path in paths { + match config_path { + ConfigPath::Global(p) => { + // FIXME(JakobDegen): This is wrong, since we might fail to add symlinks that we depend on. + let p = fs_util::canonicalize(p)?; + tracing_provider.add_external_path(p) } + ConfigPath::Project(p) => tracing_provider.add_project_path(p.clone()), } - parse_legacy_cells(&self.config_overrides, &self.working_dir, &self.project_root) - .shared_error() - }) - .await - .clone() + } + } + + Ok(()) } } -struct DiceCommandDataProvider { - cell_configs_loader: Arc, +struct DiceCommandUpdater<'s, 'a: 's> { + cmd_ctx: &'s ServerCommandContext<'a>, execution_strategy: ExecutionStrategy, - events: EventDispatcher, - concurrency: Option>, + concurrency: Option>, executor_config: Arc, - blocking_executor: Arc, - materializer: Arc, re_connection: Arc, build_signals: BuildSignalsInstaller, - forkserver: Option, upload_all_actions: bool, run_action_knobs: RunActionKnobs, skip_cache_read: bool, skip_cache_write: bool, - create_unhashed_symlink_lock: Arc>, - starlark_debugger: Option, keep_going: bool, - http_client: HttpClient, - paranoid: Option, - spawner: Arc, + materialize_failed_inputs: bool, + interpreter_platform: InterpreterHostPlatform, + interpreter_architecture: InterpreterHostArchitecture, + interpreter_xcode_version: Option, +} + +fn create_cycle_detector() -> Arc { + Arc::new(PairDiceCycleDetector( + CycleDetectorAdapter::::new(), + CycleDetectorAdapter::::new(), + )) } #[async_trait] -impl DiceDataProvider for DiceCommandDataProvider { - async fn provide(&self, ctx: &DiceComputations) -> anyhow::Result { - let (cell_resolver, legacy_configs, _): (CellResolver, LegacyBuckConfigs, _) = - self.cell_configs_loader.cells_and_configs(ctx).await?; +impl<'s, 'a> DiceUpdater for DiceCommandUpdater<'s, 'a> { + async fn update( + &self, + mut ctx: DiceTransactionUpdater, + ) -> anyhow::Result<(DiceTransactionUpdater, UserComputationData)> { + let existing_state = &mut ctx.existing_state().await.clone(); + let cells_and_configs = self.cmd_ctx.load_new_configs(existing_state).await?; + let cell_resolver = cells_and_configs.cell_resolver; + + let configuror = BuildInterpreterConfiguror::new( + prelude_path(&cell_resolver)?, + self.interpreter_platform, + self.interpreter_architecture, + self.interpreter_xcode_version.clone(), + self.cmd_ctx.record_target_call_stacks, + self.cmd_ctx.skip_targets_with_duplicate_names, + None, + // New interner for each transaction. + Arc::new(ConcurrentTargetLabelInterner::default()), + )?; + + ctx.set_buck_out_path(Some(self.cmd_ctx.buck_out_dir.clone()))?; + + let optional_validations = self + .cmd_ctx + .build_options + .as_ref() + .map_or(Vec::new(), |opts| opts.enable_optional_validations.clone()); + + ctx.set_enabled_optional_validations(optional_validations)?; + + setup_interpreter( + &mut ctx, + cell_resolver, + configuror, + cells_and_configs.external_data, + self.cmd_ctx + .starlark_profiler_instrumentation_override + .clone(), + self.cmd_ctx.disable_starlark_types, + self.cmd_ctx.unstable_typecheck, + )?; + + let (ctx, mergebase) = self + .cmd_ctx + .base_context + .daemon + .file_watcher + .sync(ctx) + .await?; - // TODO(cjhopman): The CellResolver and the legacy configs shouldn't be leaves on the graph. This should - // just be setting the config overrides and host platform override as leaves on the graph. + let mut user_data = self.make_user_computation_data(&cells_and_configs.root_config)?; + ConfigDiffTracker::promote_into( + existing_state, + &mut user_data, + &cells_and_configs.root_config, + ); + user_data.set_mergebase(mergebase); - let root_config = legacy_configs - .get(cell_resolver.root_cell()) - .context("No config for root cell")?; + Ok((ctx, user_data)) + } +} - let config_threads = root_config.parse("build", "threads")?.unwrap_or(0); +impl<'a, 's> DiceCommandUpdater<'a, 's> { + fn make_user_computation_data( + &self, + root_config: &LegacyBuckConfig, + ) -> anyhow::Result { + let config_threads = root_config + .parse(BuckconfigKeyRef { + section: "build", + property: "threads", + })? + .unwrap_or(0); let concurrency = match self.concurrency.as_ref() { Some(v) => v.dupe()?, None => parse_concurrency(config_threads)?, }; - if let Some(max_lines) = root_config.parse("ui", "thread_line_limit")? { - self.events + if let Some(max_lines) = root_config.parse(BuckconfigKeyRef { + section: "ui", + property: "thread_line_limit", + })? { + self.cmd_ctx + .events() .instant_event(buck2_data::ConsolePreferences { max_lines }); } let enable_miniperf = root_config - .parse::("buck2", "miniperf2")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "miniperf2", + })? .unwrap_or_else(RolloutPercentage::always) .roll(); let log_action_keys = root_config - .parse::("buck2", "log_action_keys")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "log_action_keys", + })? .unwrap_or_else(RolloutPercentage::always) .roll(); let log_configured_graph_size = root_config - .parse::("buck2", "log_configured_graph_size")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "log_configured_graph_size", + })? + .unwrap_or(false); + + let persistent_worker_shutdown_timeout_s = root_config + .parse::(BuckconfigKeyRef { + section: "build", + property: "persistent_worker_shutdown_timeout_s", + })? + .or(Some(10)); + + let persistent_worker_check_child_liveness = root_config + .parse::(BuckconfigKeyRef { + section: "build", + property: "persistent_worker_check_child_liveness", + })? .unwrap_or(false); let executor_global_knobs = ExecutorGlobalKnobs { @@ -585,10 +667,13 @@ impl DiceDataProvider for DiceCommandDataProvider { let low_pass_filter = LowPassFilter::new(concurrency); let mut data = DiceData::new(); - data.set(self.events.dupe()); + data.set(self.cmd_ctx.events().dupe()); let cycle_detector = if root_config - .parse::("build", "lazy_cycle_detector")? + .parse::(BuckconfigKeyRef { + section: "build", + property: "lazy_cycle_detector", + })? .unwrap_or(true) { Some(create_cycle_detector()) @@ -599,138 +684,104 @@ impl DiceDataProvider for DiceCommandDataProvider { let mut run_action_knobs = self.run_action_knobs.dupe(); run_action_knobs.use_network_action_output_cache |= root_config - .parse::("buck2", "use_network_action_output_cache")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "use_network_action_output_cache", + })? .unwrap_or(false); let mut data = UserComputationData { data, - tracker: Arc::new(BuckDiceTracker::new(self.events.dupe())), + tracker: Arc::new(BuckDiceTracker::new(self.cmd_ctx.events().dupe())?), cycle_detector, activation_tracker: Some(self.build_signals.activation_tracker.dupe()), ..Default::default() }; - let worker_pool = Arc::new(WorkerPool::new()); + let worker_pool = Arc::new(WorkerPool::new( + persistent_worker_shutdown_timeout_s, + persistent_worker_check_child_liveness, + )); let critical_path_backend = root_config - .parse("buck2", "critical_path_backend2")? + .parse(BuckconfigKeyRef { + section: "buck2", + property: "critical_path_backend2", + })? .unwrap_or(CriticalPathBackendName::Default); + let override_use_case = root_config.parse::(BuckconfigKeyRef { + section: "buck2_re_client", + property: "override_use_case", + })?; + set_fallback_executor_config(&mut data.data, self.executor_config.dupe()); - data.set_re_client(self.re_connection.get_client()); + data.set_re_client( + self.re_connection + .get_client() + .with_re_use_case_override(override_use_case), + ); data.set_command_executor(Box::new(CommandExecutorFactory::new( self.re_connection.dupe(), host_sharing_broker, low_pass_filter, - self.materializer.dupe(), - self.blocking_executor.dupe(), + self.cmd_ctx.base_context.daemon.materializer.dupe(), + self.cmd_ctx.base_context.daemon.blocking_executor.dupe(), self.execution_strategy, executor_global_knobs, self.upload_all_actions, - self.forkserver.dupe(), + self.cmd_ctx.base_context.daemon.forkserver.dupe(), self.skip_cache_read, self.skip_cache_write, - ctx.global_data() - .get_io_provider() - .project_root() - .to_owned(), + self.cmd_ctx.base_context.daemon.io.project_root().dupe(), worker_pool, - self.paranoid.dupe(), + self.cmd_ctx.base_context.daemon.paranoid.dupe(), + self.materialize_failed_inputs, + override_use_case, ))); - data.set_blocking_executor(self.blocking_executor.dupe()); - data.set_http_client(self.http_client.dupe()); - data.set_materializer(self.materializer.dupe()); + data.set_blocking_executor(self.cmd_ctx.base_context.daemon.blocking_executor.dupe()); + data.set_http_client(self.cmd_ctx.base_context.daemon.http_client.dupe()); + data.set_materializer(self.cmd_ctx.base_context.daemon.materializer.dupe()); + data.init_materialization_queue_tracker(); data.set_build_signals(self.build_signals.build_signals.dupe()); data.set_run_action_knobs(run_action_knobs); - data.set_create_unhashed_symlink_lock(self.create_unhashed_symlink_lock.dupe()); - data.set_starlark_debugger_handle(self.starlark_debugger.clone().map(|v| Box::new(v) as _)); + data.set_create_unhashed_symlink_lock( + self.cmd_ctx + .base_context + .daemon + .create_unhashed_outputs_lock + .dupe(), + ); + data.set_starlark_debugger_handle( + self.cmd_ctx + .debugger_handle + .clone() + .map(|v| Box::new(v) as _), + ); data.set_keep_going(self.keep_going); data.set_critical_path_backend(critical_path_backend); - data.spawner = self.spawner.dupe(); + data.init_local_resource_registry(); + data.spawner = self.cmd_ctx.base_context.daemon.spawner.dupe(); let tags = vec![ format!("lazy-cycle-detector:{}", has_cycle_detector), format!("miniperf:{}", enable_miniperf), format!("log-configured-graph-size:{}", log_configured_graph_size), ]; - self.events.instant_event(buck2_data::TagEvent { tags }); - - self.events.instant_event(buck2_data::CommandOptions { - concurrency: concurrency as _, - }); + self.cmd_ctx + .events() + .instant_event(buck2_data::TagEvent { tags }); + + self.cmd_ctx + .events() + .instant_event(buck2_data::CommandOptions { + concurrency: concurrency as _, + }); Ok(data) } } -fn create_cycle_detector() -> Arc { - Arc::new(PairDiceCycleDetector( - CycleDetectorAdapter::::new(), - CycleDetectorAdapter::::new(), - )) -} - -struct DiceCommandUpdater { - file_watcher: Arc, - cell_config_loader: Arc, - buck_out_dir: ProjectRelativePathBuf, - interpreter_platform: InterpreterHostPlatform, - interpreter_architecture: InterpreterHostArchitecture, - interpreter_xcode_version: Option, - starlark_profiler_instrumentation_override: StarlarkProfilerConfiguration, - disable_starlark_types: bool, - unstable_typecheck: bool, - record_target_call_stacks: bool, - skip_targets_with_duplicate_names: bool, -} - -#[async_trait] -impl DiceUpdater for DiceCommandUpdater { - async fn update( - &self, - ctx: DiceTransactionUpdater, - user_data: &mut UserComputationData, - ) -> anyhow::Result { - let (cell_resolver, legacy_configs, _): (CellResolver, LegacyBuckConfigs, _) = self - .cell_config_loader - .cells_and_configs(ctx.existing_state().await.deref()) - .await?; - // TODO(cjhopman): The CellResolver and the legacy configs shouldn't be leaves on the graph. This should - // just be setting the config overrides and host platform override as leaves on the graph. - - let configuror = BuildInterpreterConfiguror::new( - Some(prelude_path(&cell_resolver)?), - self.interpreter_platform, - self.interpreter_architecture, - self.interpreter_xcode_version.clone(), - self.record_target_call_stacks, - self.skip_targets_with_duplicate_names, - register_universal_natives, - register_universal_natives, - register_universal_natives, - register_universal_natives, - None, - )?; - - let (mut ctx, mergebase) = self.file_watcher.sync(ctx).await?; - user_data.set_mergebase(mergebase); - - ctx.set_buck_out_path(Some(self.buck_out_dir.clone()))?; - - setup_interpreter( - &mut ctx, - cell_resolver, - configuror, - legacy_configs, - self.starlark_profiler_instrumentation_override.dupe(), - self.disable_starlark_types, - self.unstable_typecheck, - )?; - - Ok(ctx) - } -} - impl<'a> Drop for ServerCommandContext<'a> { fn drop(&mut self) { // Ensure we cancel the heartbeat guard first. @@ -756,6 +807,10 @@ impl<'a> ServerCommandContextTrait for ServerCommandContext<'a> { &self.isolation_prefix } + fn cert_state(&self) -> CertState { + self.cert_state.dupe() + } + fn project_root(&self) -> &ProjectRoot { &self.base_context.project_root } @@ -765,7 +820,10 @@ impl<'a> ServerCommandContextTrait for ServerCommandContext<'a> { } /// Provides a DiceTransaction, initialized on first use and shared after initialization. - async fn dice_accessor(&self, _private: PrivateStruct) -> SharedResult { + async fn dice_accessor<'s>( + &'s self, + _private: PrivateStruct, + ) -> buck2_error::Result> { let (build_signals_installer, deferred_build_signals) = create_build_signals(); let is_nested_invocation = if let Some(uuid) = &self.daemon_uuid_from_client { @@ -776,11 +834,11 @@ impl<'a> ServerCommandContextTrait for ServerCommandContext<'a> { Ok(DiceAccessor { dice_handler: self.base_context.daemon.dice_manager.dupe(), - data: Box::new(self.dice_data_constructor(build_signals_installer).await), - setup: Box::new(self.dice_updater().await?), + setup: Box::new(self.dice_updater(build_signals_installer).await?), is_nested_invocation, sanitized_argv: self.sanitized_argv.clone(), exit_when_different_state: self.exit_when_different_state, + preemptible: self.preemptible, build_signals: deferred_build_signals, }) } @@ -834,6 +892,15 @@ impl<'a> ServerCommandContextTrait for ServerCommandContext<'a> { .to_string(), ); + metadata.insert( + "http_versions".to_owned(), + match self.base_context.daemon.http_client.http2() { + true => "1,2", + false => "1", + } + .to_owned(), + ); + Ok(metadata) } @@ -841,7 +908,7 @@ impl<'a> ServerCommandContextTrait for ServerCommandContext<'a> { /// section async fn config_metadata( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result> { // Facebook only: metadata collection for Scribe writes facebook_only(); @@ -849,86 +916,89 @@ impl<'a> ServerCommandContextTrait for ServerCommandContext<'a> { fn add_config( map: &mut HashMap, cfg: &LegacyBuckConfig, - section: &'static str, - key: &'static str, + key: BuckconfigKeyRef<'static>, field_name: &'static str, ) { - if let Some(value) = cfg.get(section, key) { + if let Some(value) = cfg.get(key) { map.insert(field_name.to_owned(), value.to_owned()); } } fn extract_scuba_defaults( - config: Option<&LegacyBuckConfig>, + config: &LegacyBuckConfig, ) -> Option> { - let config = config?.get("scuba", "defaults")?; + let config = config.get(BuckconfigKeyRef { + section: "scuba", + property: "defaults", + })?; let unescaped_config = shlex::split(config)?.join(""); let sample_json: serde_json::Value = serde_json::from_str(&unescaped_config).ok()?; sample_json.get("normals")?.as_object().cloned() } let mut metadata = HashMap::new(); - // In the case of invalid configuration (e.g. something like buck2 build -c X), `dice_ctx_default` returns an - // error. We won't be able to get configs to log in that case, but we shouldn't crash. - let (cells, configs, paths): (CellResolver, LegacyBuckConfigs, HashSet) = - self.cell_configs_loader.cells_and_configs(ctx).await?; - // Add legacy config paths to I/O tracing (if enabled). - if let Some(tracing_provider) = self - .base_context - .daemon - .io - .as_any() - .downcast_ref::() - { - tracing_provider.add_config_paths(&self.base_context.project_root, paths); - } + let cells = ctx.get_cell_resolver().await?; + + let config = ctx.get_legacy_config_for_cell(cells.root_cell()).await?; + add_config( + &mut metadata, + &config, + BuckconfigKeyRef { + section: "log", + property: "repository", + }, + "repository", + ); - let root_cell_config = configs.get(cells.root_cell()); - if let Ok(config) = root_cell_config { - add_config(&mut metadata, config, "log", "repository", "repository"); - - // Buck1 honors a configuration field, `scuba.defaults`, by drawing values from the configuration value and - // inserting them verbatim into Scuba samples. Buck2 doesn't write to Scuba in the same way that Buck1 - // does, but metadata in this function indirectly makes its way to Scuba, so it makes sense to respect at - // least some of the data within it. - // - // The configuration field is expected to be the canonical JSON representation for a Scuba sample, which is - // to say something like this: - // ``` - // { - // "normals": { "key": "value" }, - // "ints": { "key": 0 }, - // } - // ``` - // - // TODO(swgillespie) - This only covers the normals since Buck2's event protocol only allows for string - // metadata. Depending on what sort of things we're missing by dropping int default columns, we might want - // to consider adding support to the protocol for integer metadata. - - if let Ok(cwd_cell_name) = cells.find(&self.working_dir) { - let cwd_cell_config = configs.get(cwd_cell_name).ok(); - if let Some(normals_obj) = extract_scuba_defaults(cwd_cell_config) { - for (key, value) in normals_obj.iter() { - if let Some(value) = value.as_str() { - metadata.insert(key.clone(), value.to_owned()); - } + // Buck1 honors a configuration field, `scuba.defaults`, by drawing values from the configuration value and + // inserting them verbatim into Scuba samples. Buck2 doesn't write to Scuba in the same way that Buck1 + // does, but metadata in this function indirectly makes its way to Scuba, so it makes sense to respect at + // least some of the data within it. + // + // The configuration field is expected to be the canonical JSON representation for a Scuba sample, which is + // to say something like this: + // ``` + // { + // "normals": { "key": "value" }, + // "ints": { "key": 0 }, + // } + // ``` + // + // TODO(swgillespie) - This only covers the normals since Buck2's event protocol only allows for string + // metadata. Depending on what sort of things we're missing by dropping int default columns, we might want + // to consider adding support to the protocol for integer metadata. + + if let Ok(cwd_cell_name) = cells.find(&self.working_dir) { + let cwd_cell_config = ctx.get_legacy_config_for_cell(cwd_cell_name).await?; + if let Some(normals_obj) = extract_scuba_defaults(&cwd_cell_config) { + for (key, value) in normals_obj.iter() { + if let Some(value) = value.as_str() { + metadata.insert(key.clone(), value.to_owned()); } } - - // `client.id` is often set via the `-c` flag; `-c` configuration is assigned to the cwd cell and not - // the root cell. - if let Some(config) = cwd_cell_config { - add_config(&mut metadata, config, "client", "id", "client"); - add_config( - &mut metadata, - config, - "cache", - "schedule_type", - "schedule_type", - ); - } } + + // `client.id` is often set via the `-c` flag; `-c` configuration is assigned to the cwd cell and not + // the root cell. + add_config( + &mut metadata, + &config, + BuckconfigKeyRef { + section: "client", + property: "id", + }, + "client", + ); + add_config( + &mut metadata, + &config, + BuckconfigKeyRef { + section: "cache", + property: "schedule_type", + }, + "schedule_type", + ); } Ok(metadata) diff --git a/app/buck2_server/src/daemon.rs b/app/buck2_server/src/daemon.rs new file mode 100644 index 0000000000000..16646fee0058b --- /dev/null +++ b/app/buck2_server/src/daemon.rs @@ -0,0 +1,22 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod check_working_dir; +pub mod common; +pub mod crash; +pub mod daemon_tcp; +pub mod dice_dump; +pub mod disk_state; +pub mod forkserver; +pub(crate) mod io_provider; +mod multi_event_stream; +pub mod panic; +pub mod server; +pub(crate) mod server_allocative; +pub mod state; diff --git a/app/buck2_server/src/daemon/common.rs b/app/buck2_server/src/daemon/common.rs index ff6e1eecabe44..54b72d2194863 100644 --- a/app/buck2_server/src/daemon/common.rs +++ b/app/buck2_server/src/daemon/common.rs @@ -11,9 +11,11 @@ use std::sync::Arc; use std::sync::OnceLock; use anyhow::Context as _; +use buck2_build_api::actions::execute::dice_data::CommandExecutorResponse; +use buck2_build_api::actions::execute::dice_data::HasCommandExecutor; use buck2_cli_proto::client_context::HostPlatformOverride; use buck2_cli_proto::common_build_options::ExecutionStrategy; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_core::execution_types::executor_config::CacheUploadBehavior; use buck2_core::execution_types::executor_config::CommandExecutorConfig; use buck2_core::execution_types::executor_config::CommandGenerationOptions; @@ -21,37 +23,42 @@ use buck2_core::execution_types::executor_config::Executor; use buck2_core::execution_types::executor_config::HybridExecutionLevel; use buck2_core::execution_types::executor_config::LocalExecutorOptions; use buck2_core::execution_types::executor_config::PathSeparatorKind; +use buck2_core::execution_types::executor_config::RePlatformFields; use buck2_core::execution_types::executor_config::RemoteEnabledExecutor; +use buck2_core::execution_types::executor_config::RemoteEnabledExecutorOptions; +use buck2_core::execution_types::executor_config::RemoteExecutorDependency; use buck2_core::execution_types::executor_config::RemoteExecutorOptions; use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::project::ProjectRoot; +use buck2_error::BuckErrorContext; use buck2_execute::execute::blocking::BlockingExecutor; +use buck2_execute::execute::cache_uploader::force_cache_upload; use buck2_execute::execute::cache_uploader::NoOpCacheUploader; -use buck2_execute::execute::dice_data::CommandExecutorResponse; -use buck2_execute::execute::dice_data::HasCommandExecutor; use buck2_execute::execute::prepared::NoOpCommandOptionalExecutor; use buck2_execute::execute::prepared::PreparedCommandExecutor; use buck2_execute::execute::prepared::PreparedCommandOptionalExecutor; use buck2_execute::execute::request::ExecutorPreference; use buck2_execute::knobs::ExecutorGlobalKnobs; use buck2_execute::materialize::materializer::Materializer; +use buck2_execute::re::manager::ManagedRemoteExecutionClient; use buck2_execute::re::manager::ReConnectionHandle; use buck2_execute_impl::executors::action_cache::ActionCacheChecker; use buck2_execute_impl::executors::action_cache::RemoteDepFileCacheChecker; +use buck2_execute_impl::executors::action_cache_upload_permission_checker::ActionCacheUploadPermissionChecker; use buck2_execute_impl::executors::caching::CacheUploader; +use buck2_execute_impl::executors::hybrid::FallbackTracker; use buck2_execute_impl::executors::hybrid::HybridExecutor; use buck2_execute_impl::executors::local::LocalExecutor; use buck2_execute_impl::executors::re::ReExecutor; use buck2_execute_impl::executors::stacked::StackedExecutor; +use buck2_execute_impl::executors::to_re_platform::RePlatformFieldsToRePlatform; use buck2_execute_impl::executors::worker::WorkerPool; use buck2_execute_impl::low_pass_filter::LowPassFilter; use buck2_execute_impl::re::paranoid_download::ParanoidDownloader; use buck2_forkserver::client::ForkserverClient; use dupe::Dupe; use host_sharing::HostSharingBroker; -use remote_execution as RE; -use starlark_map::sorted_map::SortedMap; pub fn parse_concurrency(requested: u32) -> anyhow::Result { let mut ret = requested.try_into().context("Invalid concurrency")?; @@ -66,24 +73,29 @@ pub fn parse_concurrency(requested: u32) -> anyhow::Result { /// For each buck invocations, we'll have a single CommandExecutorFactory. This contains shared /// state used by all command executor strategies. pub struct CommandExecutorFactory { - pub re_connection: Arc, + re_connection: Arc, // TODO(cjhopman): This should probably be a global limit, otherwise simultaneous commands may // use more resources than intended (this might no longer be accurate since only instances // sharing the same DICE context should be allowed to proceed concurrently, and we only have // one CommandExecutorFactory per DICE context). - pub host_sharing_broker: Arc, - pub low_pass_filter: Arc, - pub materializer: Arc, - pub blocking_executor: Arc, - pub strategy: ExecutionStrategy, - pub executor_global_knobs: ExecutorGlobalKnobs, - pub upload_all_actions: bool, - pub forkserver: Option, - pub skip_cache_read: bool, - pub skip_cache_write: bool, + host_sharing_broker: Arc, + low_pass_filter: Arc, + materializer: Arc, + blocking_executor: Arc, + strategy: ExecutionStrategy, + executor_global_knobs: ExecutorGlobalKnobs, + upload_all_actions: bool, + forkserver: Option, + skip_cache_read: bool, + skip_cache_write: bool, project_root: ProjectRoot, worker_pool: Arc, paranoid: Option, + materialize_failed_inputs: bool, + /// Cache permission checks per command. + cache_upload_permission_checker: Arc, + fallback_tracker: Arc, + re_use_case_override: Option, } impl CommandExecutorFactory { @@ -102,7 +114,14 @@ impl CommandExecutorFactory { project_root: ProjectRoot, worker_pool: Arc, paranoid: Option, + materialize_failed_inputs: bool, + re_use_case_override: Option, ) -> Self { + let cache_upload_permission_checker = Arc::new(ActionCacheUploadPermissionChecker::new( + re_connection + .get_client() + .with_re_use_case_override(re_use_case_override), + )); Self { re_connection, host_sharing_broker: Arc::new(host_sharing_broker), @@ -118,8 +137,25 @@ impl CommandExecutorFactory { project_root, worker_pool, paranoid, + materialize_failed_inputs, + cache_upload_permission_checker, + fallback_tracker: Arc::new(FallbackTracker::new()), + re_use_case_override, } } + + fn get_prepared_re_client(&self) -> ManagedRemoteExecutionClient { + self.re_connection + .get_client() + .with_re_use_case_override(self.re_use_case_override) + } + + fn get_prepared_re_use_case( + &self, + re_use_case: RemoteExecutorUseCase, + ) -> RemoteExecutorUseCase { + self.re_use_case_override.unwrap_or(re_use_case) + } } impl HasCommandExecutor for CommandExecutorFactory { @@ -159,7 +195,7 @@ impl HasCommandExecutor for CommandExecutorFactory { return Err(anyhow::anyhow!( "The desired execution strategy (`{:?}`) is incompatible with the local executor", self.strategy, - )); + )).input_anyhow(); } return Ok(CommandExecutorResponse { @@ -170,24 +206,29 @@ impl HasCommandExecutor for CommandExecutorFactory { }); } - let remote_executor_new = |options: &RemoteExecutorOptions, - re_use_case: &RemoteExecutorUseCase, - re_action_key: &Option, - remote_cache_enabled: bool| { - ReExecutor { - artifact_fs: artifact_fs.clone(), - project_fs: self.project_root.clone(), - materializer: self.materializer.dupe(), - re_client: self.re_connection.get_client(), - re_use_case: *re_use_case, - re_action_key: re_action_key.clone(), - re_max_queue_time_ms: options.re_max_queue_time_ms, - knobs: self.executor_global_knobs.dupe(), - skip_cache_read: self.skip_cache_read || !remote_cache_enabled, - skip_cache_write: self.skip_cache_write || !remote_cache_enabled, - paranoid: self.paranoid.dupe(), - } - }; + let remote_executor_new = + |options: &RemoteExecutorOptions, + re_use_case: &RemoteExecutorUseCase, + re_action_key: &Option, + remote_cache_enabled: bool, + dependencies: &[RemoteExecutorDependency]| { + ReExecutor { + artifact_fs: artifact_fs.clone(), + project_fs: self.project_root.clone(), + materializer: self.materializer.dupe(), + re_client: self.get_prepared_re_client(), + re_use_case: self.get_prepared_re_use_case(*re_use_case), + re_action_key: re_action_key.clone(), + re_max_queue_time_ms: options.re_max_queue_time_ms, + re_resource_units: options.re_resource_units, + knobs: self.executor_global_knobs.dupe(), + skip_cache_read: self.skip_cache_read || !remote_cache_enabled, + skip_cache_write: self.skip_cache_write || !remote_cache_enabled, + paranoid: self.paranoid.dupe(), + materialize_failed_inputs: self.materialize_failed_inputs, + dependencies: dependencies.to_vec(), + } + }; let response = match &executor_config.executor { Executor::Local(local) => { @@ -202,34 +243,25 @@ impl HasCommandExecutor for CommandExecutorFactory { }) } } - Executor::RemoteEnabled { - executor, - re_properties, - re_use_case, - re_action_key, - cache_upload_behavior, - remote_cache_enabled, - remote_dep_file_cache_enabled, - } => { + Executor::RemoteEnabled(remote_options) => { // NOTE: While we now have a legit flag for this, we keep the env var. This has been used // in remediating prod incidents in the past, and this is the kind of thing that can easily // become tribal knowledge. Keeping this does not hurt us. - static DISABLE_CACHING: EnvHelper = - EnvHelper::new("BUCK2_TEST_DISABLE_CACHING"); - - let disable_caching = DISABLE_CACHING - .get_copied()? - .unwrap_or(self.skip_cache_read); - let disable_caching = - disable_caching || (!remote_cache_enabled && !remote_dep_file_cache_enabled); + buck2_env_anyhow!("BUCK2_TEST_DISABLE_CACHING", type=bool, applicability=testing)? + .unwrap_or(self.skip_cache_read); + + let disable_caching = disable_caching + || (!remote_options.remote_cache_enabled + && !remote_options.remote_dep_file_cache_enabled); // This is for test only as in real life, it would be silly to only use the remote dep file cache and not the regular cache // This will only do anything if cache is not disabled and remote dep file cache is enabled - static ONLY_REMOTE_DEP_FILE_CACHE: EnvHelper = - EnvHelper::new("BUCK2_TEST_ONLY_REMOTE_DEP_FILE_CACHE"); - let only_remote_dep_file_cache = - ONLY_REMOTE_DEP_FILE_CACHE.get_copied()?.unwrap_or(false); + let only_remote_dep_file_cache = buck2_env_anyhow!( + "BUCK2_TEST_ONLY_REMOTE_DEP_FILE_CACHE", + bool, + applicability = testing + )?; let cache_checker_new = || -> Arc { if disable_caching { @@ -237,13 +269,14 @@ impl HasCommandExecutor for CommandExecutorFactory { } let remote_dep_file_checker: Arc = - if *remote_dep_file_cache_enabled { + if remote_options.remote_dep_file_cache_enabled { Arc::new(RemoteDepFileCacheChecker { artifact_fs: artifact_fs.clone(), materializer: self.materializer.dupe(), - re_client: self.re_connection.get_client(), - re_use_case: *re_use_case, - re_action_key: re_action_key.clone(), + re_client: self.get_prepared_re_client(), + re_use_case: self + .get_prepared_re_use_case(remote_options.re_use_case), + re_action_key: remote_options.re_action_key.clone(), upload_all_actions: self.upload_all_actions, knobs: self.executor_global_knobs.dupe(), paranoid: self.paranoid.dupe(), @@ -258,9 +291,9 @@ impl HasCommandExecutor for CommandExecutorFactory { Arc::new(ActionCacheChecker { artifact_fs: artifact_fs.clone(), materializer: self.materializer.dupe(), - re_client: self.re_connection.get_client(), - re_use_case: *re_use_case, - re_action_key: re_action_key.clone(), + re_client: self.get_prepared_re_client(), + re_use_case: self.get_prepared_re_use_case(remote_options.re_use_case), + re_action_key: remote_options.re_action_key.clone(), upload_all_actions: self.upload_all_actions, knobs: self.executor_global_knobs.dupe(), paranoid: self.paranoid.dupe(), @@ -269,67 +302,73 @@ impl HasCommandExecutor for CommandExecutorFactory { } }; - let executor: Option> = match &executor { - RemoteEnabledExecutor::Local(local) if !self.strategy.ban_local() => { - Some(Arc::new(local_executor_new(local))) - } - RemoteEnabledExecutor::Remote(remote) if !self.strategy.ban_remote() => { - Some(Arc::new(remote_executor_new( - remote, - re_use_case, - re_action_key, - *remote_cache_enabled, - ))) - } - RemoteEnabledExecutor::Hybrid { - local, - remote, - level, - } if !self.strategy.ban_hybrid() => { - let re_max_input_files_bytes = remote - .re_max_input_files_bytes - .unwrap_or(DEFAULT_RE_MAX_INPUT_FILE_BYTES); - let local = local_executor_new(local); - let remote = remote_executor_new( + let executor: Option> = + match &remote_options.executor { + RemoteEnabledExecutor::Local(local) if !self.strategy.ban_local() => { + Some(Arc::new(local_executor_new(local))) + } + RemoteEnabledExecutor::Remote(remote) if !self.strategy.ban_remote() => { + Some(Arc::new(remote_executor_new( + remote, + &remote_options.re_use_case, + &remote_options.re_action_key, + remote_options.remote_cache_enabled, + &remote_options.dependencies, + ))) + } + RemoteEnabledExecutor::Hybrid { + local, remote, - re_use_case, - re_action_key, - *remote_cache_enabled, - ); - let executor_preference = self.strategy.hybrid_preference(); - let low_pass_filter = self.low_pass_filter.dupe(); - - if self.paranoid.is_some() { - let executor_preference = executor_preference - .and(ExecutorPreference::DefaultErasePreferences)?; - - Some(Arc::new(HybridExecutor { - local, - remote: StackedExecutor { - optional: cache_checker_new(), - fallback: remote, - }, - level: HybridExecutionLevel::Full { - fallback_on_failure: true, - low_pass_filter: false, - }, - executor_preference, - re_max_input_files_bytes, - low_pass_filter, - })) - } else { - Some(Arc::new(HybridExecutor { - local, + level, + } if !self.strategy.ban_hybrid() => { + let re_max_input_files_bytes = remote + .re_max_input_files_bytes + .unwrap_or(DEFAULT_RE_MAX_INPUT_FILE_BYTES); + let local = local_executor_new(local); + let remote = remote_executor_new( remote, - level: *level, - executor_preference, - re_max_input_files_bytes, - low_pass_filter, - })) + &remote_options.re_use_case, + &remote_options.re_action_key, + remote_options.remote_cache_enabled, + &remote_options.dependencies, + ); + let executor_preference = self.strategy.hybrid_preference(); + let low_pass_filter = self.low_pass_filter.dupe(); + let fallback_tracker = self.fallback_tracker.dupe(); + + if self.paranoid.is_some() { + let executor_preference = executor_preference + .and(ExecutorPreference::DefaultErasePreferences)?; + + Some(Arc::new(HybridExecutor { + local, + remote: StackedExecutor { + optional: cache_checker_new(), + fallback: remote, + }, + level: HybridExecutionLevel::Full { + fallback_on_failure: true, + low_pass_filter: false, + }, + executor_preference, + re_max_input_files_bytes, + low_pass_filter, + fallback_tracker, + })) + } else { + Some(Arc::new(HybridExecutor { + local, + remote, + level: *level, + executor_preference, + re_max_input_files_bytes, + low_pass_filter, + fallback_tracker, + })) + } } - } - _ => None, - }; + _ => None, + }; let cache_checker = if self.paranoid.is_some() { Arc::new(NoOpCommandOptionalExecutor {}) as _ @@ -337,35 +376,37 @@ impl HasCommandExecutor for CommandExecutorFactory { cache_checker_new() }; - let platform = RE::Platform { - properties: re_properties - .iter() - .map(|(k, v)| RE::Property { - name: k.clone(), - value: v.clone(), - }) - .collect(), - }; - - let cache_uploader = if disable_caching { + let cache_uploader = if force_cache_upload()? { + Arc::new(CacheUploader::new( + artifact_fs.clone(), + self.materializer.dupe(), + self.get_prepared_re_client(), + remote_options.re_use_case, + remote_options.re_properties.clone(), + None, + self.cache_upload_permission_checker.dupe(), + )) as _ + } else if disable_caching { Arc::new(NoOpCacheUploader {}) as _ - } else if let CacheUploadBehavior::Enabled { max_bytes } = cache_upload_behavior { - Arc::new(CacheUploader { - artifact_fs: artifact_fs.clone(), - materializer: self.materializer.dupe(), - re_client: self.re_connection.get_client(), - re_use_case: *re_use_case, - knobs: self.executor_global_knobs.dupe(), - max_bytes: *max_bytes, - platform: platform.clone(), - }) as _ + } else if let CacheUploadBehavior::Enabled { max_bytes } = + remote_options.cache_upload_behavior + { + Arc::new(CacheUploader::new( + artifact_fs.clone(), + self.materializer.dupe(), + self.get_prepared_re_client(), + remote_options.re_use_case, + remote_options.re_properties.clone(), + max_bytes, + self.cache_upload_permission_checker.dupe(), + )) as _ } else { Arc::new(NoOpCacheUploader {}) as _ }; executor.map(|executor| CommandExecutorResponse { executor, - platform, + platform: remote_options.re_properties.to_re_platform(), cache_checker, cache_uploader, }) @@ -373,9 +414,7 @@ impl HasCommandExecutor for CommandExecutorFactory { }; let response = response - .with_context(|| format!( -"The desired execution strategy (`{:?}`) is incompatible with the executor config that was selected: {:?}", -self.strategy, executor_config))?; + .with_context(|| format!("The desired execution strategy (`{:?}`) is incompatible with the executor config that was selected: {:?}", self.strategy, executor_config)).input_anyhow()?; Ok(response) } @@ -426,7 +465,7 @@ pub fn get_default_executor_config(host_platform: HostPlatformOverride) -> Comma let executor = if buck2_core::is_open_source() { Executor::Local(LocalExecutorOptions::default()) } else { - Executor::RemoteEnabled { + Executor::RemoteEnabled(RemoteEnabledExecutorOptions { executor: RemoteEnabledExecutor::Hybrid { local: LocalExecutorOptions::default(), remote: RemoteExecutorOptions::default(), @@ -438,7 +477,8 @@ pub fn get_default_executor_config(host_platform: HostPlatformOverride) -> Comma cache_upload_behavior: CacheUploadBehavior::Disabled, remote_cache_enabled: true, remote_dep_file_cache_enabled: false, - } + dependencies: vec![], + }) }; CommandExecutorConfig { @@ -450,7 +490,7 @@ pub fn get_default_executor_config(host_platform: HostPlatformOverride) -> Comma } } -fn get_default_re_properties(host_platform: HostPlatformOverride) -> SortedMap { +fn get_default_re_properties(host_platform: HostPlatformOverride) -> RePlatformFields { let linux = &[("platform", "linux-remote-execution")]; let macos = &[("platform", "mac"), ("subplatform", "any")]; let windows = &[("platform", "windows")]; @@ -467,10 +507,14 @@ fn get_default_re_properties(host_platform: HostPlatformOverride) -> SortedMap PathSeparatorKind { diff --git a/app/buck2_server/src/daemon/crash.rs b/app/buck2_server/src/daemon/crash.rs new file mode 100644 index 0000000000000..79a355b8ecf39 --- /dev/null +++ b/app/buck2_server/src/daemon/crash.rs @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::unstable_crash_request::CrashType; +use buck2_cli_proto::GenericResponse; +use buck2_cli_proto::UnstableCrashRequest; + +pub(crate) fn crash(req: UnstableCrashRequest) -> anyhow::Result { + let crash_type = CrashType::from_i32(req.crash_type).ok_or(anyhow::anyhow!("bad request"))?; + match crash_type { + CrashType::Panic => { + panic!("explicitly requested panic (via unstable_crash)"); + #[allow(unreachable_code)] + Ok(GenericResponse {}) + } + CrashType::Abort => { + // Crash with SIGABRT. + // Should trigger folly signal handler to dump stack trace. + // SIGSEGV,SIGTERM,SIGBUS,SIGILL,etc. should behave similarly. + // https://fburl.com/code/ap385ats + std::process::abort(); + } + } +} diff --git a/app/buck2_server/src/daemon/disk_state.rs b/app/buck2_server/src/daemon/disk_state.rs index 815076e04ad96..99c719b359bca 100644 --- a/app/buck2_server/src/daemon/disk_state.rs +++ b/app/buck2_server/src/daemon/disk_state.rs @@ -13,11 +13,11 @@ use std::sync::Arc; use allocative::Allocative; use anyhow::Context; use buck2_common::invocation_paths::InvocationPaths; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::paths::file_name::FileName; -use buck2_core::fs::project::ProjectRoot; use buck2_core::rollout_percentage::RolloutPercentage; use buck2_execute::digest_config::DigestConfig; use buck2_execute::execute::blocking::BlockingExecutor; @@ -45,7 +45,10 @@ impl DiskStateOptions { materialization_method, MaterializationMethod::Deferred | MaterializationMethod::DeferredSkipFinalArtifacts ) && root_config - .parse::("buck2", "sqlite_materializer_state")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "sqlite_materializer_state", + })? .unwrap_or_else(RolloutPercentage::never) .roll(); Ok(Self { @@ -60,7 +63,6 @@ pub(crate) async fn maybe_initialize_materializer_sqlite_db( io_executor: Arc, root_config: &LegacyBuckConfig, deferred_materializer_configs: &DeferredMaterializerConfigs, - fs: ProjectRoot, digest_config: DigestConfig, init_ctx: &BuckdServerInitPreferences, ) -> anyhow::Result<(Option, Option)> { @@ -68,7 +70,9 @@ pub(crate) async fn maybe_initialize_materializer_sqlite_db( // When sqlite materializer state is disabled, we should always delete the materializer state db. // Otherwise, artifacts in buck-out will diverge from the state stored in db. io_executor - .execute_io_inline(|| fs.remove_path_recursive(&paths.materializer_state_path())) + .execute_io_inline(|| { + fs_util::remove_all(&paths.materializer_state_path()).map_err(anyhow::Error::from) + }) .await?; return Ok((None, None)); } @@ -84,9 +88,10 @@ pub(crate) async fn maybe_initialize_materializer_sqlite_db( .to_string(), ), ]); - if let Some(buckconfig_version) = - root_config.parse("buck2", "sqlite_materializer_state_version")? - { + if let Some(buckconfig_version) = root_config.parse(BuckconfigKeyRef { + section: "buck2", + property: "sqlite_materializer_state_version", + })? { versions.insert("buckconfig_version".to_owned(), buckconfig_version); } if let Some(hostname) = metadata.get("hostname") { @@ -136,7 +141,6 @@ pub(crate) async fn maybe_initialize_materializer_sqlite_db( pub(crate) fn delete_unknown_disk_state( cache_dir_path: &AbsNormPath, known_dir_names: &[&FileName], - fs: ProjectRoot, ) -> anyhow::Result<()> { let res: anyhow::Result<()> = try { if cache_dir_path.exists() { @@ -150,7 +154,7 @@ pub(crate) fn delete_unknown_disk_state( // known_dir_names is always small, so this contains isn't expensive if !known_dir_names.contains(&filename) || !entry.path().is_dir() { - fs.remove_path_recursive(&cache_dir_path.join(filename))?; + fs_util::remove_all(&cache_dir_path.join(filename))?; } } } @@ -169,7 +173,6 @@ mod tests { use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::project::ProjectRootTemp; use buck2_core::fs::project_rel_path::ProjectRelativePath; - use dupe::Dupe; use super::*; @@ -184,12 +187,14 @@ mod tests { let command_hashes_db = cache_dir_path.join(ForwardRelativePath::unchecked_new( "command_hashes/db.sqlite", )); - fs.create_file(&materializer_state_db, false).unwrap(); - fs.create_file(&command_hashes_db, false).unwrap(); + fs_util::create_dir_all(materializer_state_db.parent().unwrap()).unwrap(); + fs_util::write(&materializer_state_db, b"").unwrap(); + fs_util::create_dir_all(command_hashes_db.parent().unwrap()).unwrap(); + fs_util::write(&command_hashes_db, b"").unwrap(); assert!(materializer_state_db.exists()); assert!(command_hashes_db.exists()); - delete_unknown_disk_state(&cache_dir_path, &[], fs.dupe()).unwrap(); + delete_unknown_disk_state(&cache_dir_path, &[]).unwrap(); assert!(!materializer_state_db.exists()); assert!(!command_hashes_db.exists()); @@ -206,15 +211,16 @@ mod tests { let command_hashes_db = cache_dir_path.join(ForwardRelativePath::unchecked_new( "command_hashes/db.sqlite", )); - fs.create_file(&materializer_state_db, false).unwrap(); - fs.create_file(&command_hashes_db, false).unwrap(); + fs_util::create_dir_all(materializer_state_db.parent().unwrap()).unwrap(); + fs_util::write(&materializer_state_db, b"").unwrap(); + fs_util::create_dir_all(command_hashes_db.parent().unwrap()).unwrap(); + fs_util::write(&command_hashes_db, b"").unwrap(); assert!(materializer_state_db.exists()); assert!(command_hashes_db.exists()); delete_unknown_disk_state( &cache_dir_path, &[FileName::unchecked_new("materializer_state")], - fs.dupe(), ) .unwrap(); diff --git a/app/buck2_server/src/daemon/forkserver.rs b/app/buck2_server/src/daemon/forkserver.rs index 21d379174bfd0..de2099dcb0e19 100644 --- a/app/buck2_server/src/daemon/forkserver.rs +++ b/app/buck2_server/src/daemon/forkserver.rs @@ -7,7 +7,8 @@ * of this source tree. */ -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::init::ResourceControlConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_forkserver::client::ForkserverClient; @@ -15,12 +16,17 @@ use buck2_forkserver::client::ForkserverClient; pub async fn maybe_launch_forkserver( root_config: &LegacyBuckConfig, forkserver_state_dir: &AbsNormPath, + resource_control: &ResourceControlConfig, ) -> anyhow::Result> { use anyhow::Context; + use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_core::rollout_percentage::RolloutPercentage; let config = root_config - .parse::("buck2", "forkserver")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "forkserver", + })? .unwrap_or_else(RolloutPercentage::always); if !config.roll() { @@ -29,7 +35,13 @@ pub async fn maybe_launch_forkserver( let exe = std::env::current_exe().context("Cannot access current_exe")?; Some( - buck2_forkserver::unix::launch_forkserver(exe, &["forkserver"], forkserver_state_dir).await, + buck2_forkserver::unix::launch_forkserver( + exe, + &["forkserver"], + forkserver_state_dir, + resource_control.serialize()?, + ) + .await, ) .transpose() } @@ -38,6 +50,7 @@ pub async fn maybe_launch_forkserver( pub async fn maybe_launch_forkserver( _root_config: &LegacyBuckConfig, _forkserver_state_dir: &AbsNormPath, + _resource_control: &ResourceControlConfig, ) -> anyhow::Result> { Ok(None) } diff --git a/app/buck2_server/src/daemon/io_provider.rs b/app/buck2_server/src/daemon/io_provider.rs index 326fb85cb95b2..c7f915d08e0af 100644 --- a/app/buck2_server/src/daemon/io_provider.rs +++ b/app/buck2_server/src/daemon/io_provider.rs @@ -13,26 +13,29 @@ use buck2_common::cas_digest::CasDigestConfig; use buck2_common::io::fs::FsIoProvider; use buck2_common::io::trace::TracingIoProvider; use buck2_common::io::IoProvider; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_core::fs::project::ProjectRoot; pub async fn create_io_provider( fb: fbinit::FacebookInit, project_fs: ProjectRoot, - root_config: Option<&LegacyBuckConfig>, + root_config: &LegacyBuckConfig, cas_digest_config: CasDigestConfig, trace_io: bool, ) -> anyhow::Result> { - #[cfg(any(fbcode_build, cargo_internal_build))] + #[cfg(fbcode_build)] { + use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_core::rollout_percentage::RolloutPercentage; let allow_eden_io_default = RolloutPercentage::from_bool(cfg!(any(target_os = "macos", target_os = "windows"))); let allow_eden_io = root_config - .and_then(|c| c.parse("buck2", "allow_eden_io").transpose()) - .transpose()? + .parse(BuckconfigKeyRef { + section: "buck2", + property: "allow_eden_io", + })? .unwrap_or(allow_eden_io_default) .roll(); diff --git a/app/buck2_server/src/daemon/mod.rs b/app/buck2_server/src/daemon/mod.rs deleted file mode 100644 index f57a01d0a3a51..0000000000000 --- a/app/buck2_server/src/daemon/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod check_working_dir; -pub mod common; -pub mod daemon_tcp; -pub mod dice_dump; -pub mod disk_state; -pub mod forkserver; -pub(crate) mod io_provider; -mod multi_event_stream; -pub mod panic; -pub mod server; -pub(crate) mod server_allocative; -pub mod state; diff --git a/app/buck2_server/src/daemon/multi_event_stream.rs b/app/buck2_server/src/daemon/multi_event_stream.rs index 4f3afb78bd048..3bf61f31a692b 100644 --- a/app/buck2_server/src/daemon/multi_event_stream.rs +++ b/app/buck2_server/src/daemon/multi_event_stream.rs @@ -104,7 +104,6 @@ mod tests { use assert_matches::assert_matches; use buck2_data::BuckEvent; use futures::stream::poll_fn; - use futures::stream::Stream; use futures::stream::StreamExt; use super::*; diff --git a/app/buck2_server/src/daemon/panic.rs b/app/buck2_server/src/daemon/panic.rs index a139847024fd8..b93176c82a7fc 100644 --- a/app/buck2_server/src/daemon/panic.rs +++ b/app/buck2_server/src/daemon/panic.rs @@ -22,7 +22,7 @@ use std::time::Duration; use std::time::SystemTime; use buck2_cli_proto::unstable_dice_dump_request::DiceDumpFormat; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_wrapper_common::invocation_id::TraceId; use crate::daemon::dice_dump::tar_dice_dump; @@ -72,14 +72,9 @@ pub fn initialize(daemon_state: Arc) { /// This cell prevents a circular set of panics if this happens. static ALREADY_DUMPED_DICE: OnceLock<()> = OnceLock::new(); -static DICE_DUMP_ON_PANIC: EnvHelper = EnvHelper::new("BUCK2_DICE_DUMP_ON_PANIC"); - fn daemon_panic_hook(daemon_state: &Arc, info: &PanicInfo) { if !buck2_core::is_open_source() - && DICE_DUMP_ON_PANIC - .get_copied() - .unwrap_or_default() - .unwrap_or_default() + && buck2_env_anyhow!("BUCK2_DICE_DUMP_ON_PANIC", bool).unwrap_or_default() && ALREADY_DUMPED_DICE.set(()).is_ok() { let panic_id = TraceId::new(); diff --git a/app/buck2_server/src/daemon/server.rs b/app/buck2_server/src/daemon/server.rs index 78cab94e06d78..ba072b34a4f90 100644 --- a/app/buck2_server/src/daemon/server.rs +++ b/app/buck2_server/src/daemon/server.rs @@ -16,7 +16,6 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use std::task::Context; use std::task::Poll; -use std::thread; use std::time::Duration; use std::time::Instant; use std::time::SystemTime; @@ -26,39 +25,56 @@ use anyhow::Context as _; use async_trait::async_trait; use buck2_build_api::configure_dice::configure_dice_for_buck; use buck2_build_api::spawner::BuckSpawner; +use buck2_certs::validate::check_cert_state; +use buck2_certs::validate::validate_certs; +use buck2_certs::validate::CertState; use buck2_cli_proto::daemon_api_server::*; use buck2_cli_proto::*; use buck2_common::buckd_connection::BUCK_AUTH_TOKEN_HEADER; use buck2_common::events::HasEvents; +use buck2_common::init::DaemonStartupConfig; use buck2_common::invocation_paths::InvocationPaths; use buck2_common::io::trace::TracingIoProvider; use buck2_common::io::IoProvider; -use buck2_common::legacy_configs::init::DaemonStartupConfig; -use buck2_common::legacy_configs::LegacyBuckConfig; +use buck2_common::legacy_configs::configs::LegacyBuckConfig; use buck2_common::memory; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_core::error::reload_hard_error_config; use buck2_core::error::reset_soft_error_counters; use buck2_core::fs::cwd::WorkingDirectory; use buck2_core::fs::fs_util; +use buck2_core::fs::fs_util::disk_space_stats; +use buck2_core::fs::fs_util::DiskSpaceStats; use buck2_core::fs::paths::abs_path::AbsPathBuf; +use buck2_core::fs::project::ProjectRoot; use buck2_core::logging::LogConfigurationReloadHandle; +use buck2_core::pattern::unparsed::UnparsedPatternPredicate; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::EventDispatcher; +use buck2_events::errors::create_error_report; use buck2_events::source::ChannelEventSource; use buck2_events::Event; use buck2_execute::digest_config::DigestConfig; use buck2_execute::materialize::materializer::MaterializationMethod; use buck2_execute_impl::materializers::sqlite::MaterializerStateIdentity; -use buck2_interpreter::dice::starlark_profiler::StarlarkProfilerConfiguration; +use buck2_futures::cancellation::ExplicitCancellationContext; +use buck2_futures::drop::DropTogether; +use buck2_futures::spawn::spawn_cancellable; +use buck2_interpreter::starlark_profiler::config::StarlarkProfilerConfiguration; +use buck2_profile::proto_to_profile_mode; use buck2_profile::starlark_profiler_configuration_from_request; use buck2_server_ctx::bxl::BXL_SERVER_COMMANDS; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::other_server_commands::OTHER_SERVER_COMMANDS; +use buck2_server_ctx::late_bindings::AUDIT_SERVER_COMMAND; +use buck2_server_ctx::late_bindings::OTHER_SERVER_COMMANDS; +use buck2_server_ctx::late_bindings::STARLARK_SERVER_COMMAND; use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use buck2_server_ctx::streaming_request_handler::StreamingRequestHandler; use buck2_server_ctx::test_command::TEST_COMMAND; use buck2_server_starlark_debug::run::run_dap_server_command; +use buck2_test::executor_launcher::get_all_test_executors; +use buck2_util::system_stats::system_memory_stats; +use buck2_util::threads::thread_spawn; use dice::DetectCycles; use dice::Dice; use dice::WhichDice; @@ -73,13 +89,11 @@ use futures::FutureExt; use futures::Stream; use futures::StreamExt; use futures::TryFutureExt; -use more_futures::cancellation::ExplicitCancellationContext; -use more_futures::drop::DropTogether; -use more_futures::spawn::spawn_cancellable; use rand::RngCore; use rand::SeedableRng; use tokio::runtime::Handle; use tokio::sync::oneshot; +use tokio::time::timeout; use tonic::service::interceptor; use tonic::service::Interceptor; use tonic::transport::Server; @@ -92,16 +106,19 @@ use crate::active_commands::ActiveCommand; use crate::active_commands::ActiveCommandStateWriter; use crate::clean_stale::clean_stale_command; use crate::ctx::ServerCommandContext; +use crate::daemon::crash::crash; use crate::daemon::multi_event_stream::MultiEventStream; use crate::daemon::server_allocative::spawn_allocative; use crate::daemon::state::DaemonState; use crate::file_status::file_status_command; use crate::lsp::run_lsp_server_command; use crate::new_generic::new_generic_command; +use crate::profile::profile_command; use crate::snapshot; use crate::snapshot::SnapshotCollector; use crate::subscription::run_subscription_server_command; use crate::trace_io::trace_io_command; +use crate::version_control_revision; // TODO(cjhopman): Figure out a reasonable value for this. static DEFAULT_KILL_TIMEOUT: Duration = Duration::from_millis(500); @@ -168,35 +185,6 @@ impl BuckdServerInitPreferences { } } -/// Access to functions which live outside of `buck2_server` crate. -#[async_trait] -pub trait BuckdServerDependencies: Send + Sync + 'static { - async fn audit( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, - ) -> anyhow::Result; - async fn starlark( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, - ) -> anyhow::Result; - async fn profile( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::ProfileRequest, - ) -> anyhow::Result; - async fn docs( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::UnstableDocsRequest, - ) -> anyhow::Result; -} - #[derive(Clone)] struct BuckCheckAuthTokenInterceptor { auth_token: String, @@ -208,19 +196,11 @@ impl Interceptor for BuckCheckAuthTokenInterceptor { Some(token) => token, None => return Err(Status::unauthenticated("missing auth token")), }; - if !constant_time_eq::constant_time_eq( - token.as_bytes(), - self.auth_token.as_str().as_bytes(), - ) { + if !constant_time_eq::constant_time_eq(token.as_bytes(), self.auth_token.as_bytes()) { return Err(Status::unauthenticated("invalid auth token")); } - static FAIL_AUTH: EnvHelper = EnvHelper::new("BUCK2_TEST_FAIL_BUCKD_AUTH"); - if FAIL_AUTH - .get_copied() - .unwrap_or_default() - .unwrap_or_default() - { + if buck2_env_anyhow!("BUCK2_TEST_FAIL_BUCKD_AUTH", bool, applicability = testing).unwrap() { return Err(Status::unauthenticated("injected auth error")); } @@ -240,9 +220,9 @@ pub(crate) struct BuckdServerData { daemon_shutdown: DaemonShutdown, daemon_state: Arc, #[allocative(skip)] - command_channel: UnboundedSender<()>, + cert_state: CertState, #[allocative(skip)] - callbacks: &'static dyn BuckdServerDependencies, + command_channel: UnboundedSender<()>, #[allocative(skip)] log_reload_handle: Arc, #[allocative(skip)] @@ -266,7 +246,6 @@ impl BuckdServer { process_info: DaemonProcessInfo, base_daemon_constraints: buck2_cli_proto::DaemonConstraints, listener: Pin> + Send>>, - callbacks: &'static dyn BuckdServerDependencies, rt: Handle, ) -> anyhow::Result<()> { let now = SystemTime::now(); @@ -282,14 +261,16 @@ impl BuckdServer { // Create buck-out and potentially chdir to there. fs_util::create_dir_all(paths.buck_out_path()).context("Error creating buck_out_path")?; - let cwd = if !matches!(materializations, MaterializationMethod::Eden) { + // TODO(scottcao): make this not optional + let cwd = { let dir = WorkingDirectory::open(paths.buck_out_path())?; dir.chdir_and_promise_it_will_not_change()?; Some(dir) - } else { - None }; + let cert_state = CertState::new().await; + certs_validation_background_job(cert_state.dupe()).await; + let daemon_state = Arc::new( DaemonState::new(fb, paths, init_ctx, rt.clone(), materializations, cwd).await, ); @@ -309,8 +290,8 @@ impl BuckdServer { shutdown_channel, }, daemon_state, + cert_state, command_channel, - callbacks, log_reload_handle, rt, })); @@ -420,13 +401,41 @@ impl BuckdServer { guard, daemon_shutdown_channel, state, - } = ActiveCommand::new(&dispatch, client_ctx); + } = ActiveCommand::new(&dispatch, client_ctx.sanitized_argv.clone()); let data = daemon_state.data()?; + // Fire off a system-wide event to record the memory usage of this process. + // TODO(ezgi): add it to oneshot command too + let system_warning_config = &data.system_warning_config; + dispatch.instant_event(buck2_data::SystemInfo { + system_total_memory_bytes: Some(system_memory_stats()), + memory_pressure_threshold_percent: system_warning_config + .memory_pressure_threshold_percent, + total_disk_space_bytes: disk_space_stats(daemon_state.paths.buck_out_path()) + .ok() + .map(|DiskSpaceStats { total_space, .. }| total_space), + remaining_disk_space_threshold_gb: system_warning_config + .remaining_disk_space_threshold_gb, + min_re_download_bytes_threshold: system_warning_config.min_re_download_bytes_threshold, + avg_re_download_bytes_per_sec_threshold: system_warning_config + .avg_re_download_bytes_per_sec_threshold, + min_cache_hit_threshold_percent: system_warning_config.min_cache_hit_threshold_percent, + cache_warning_min_completion_threshold_percent: system_warning_config + .cache_warning_min_completion_threshold_percent, + cache_warning_min_actions_count: system_warning_config.cache_warning_min_actions_count, + }); + // Fire off a snapshot before we start doing anything else. We use the metrics emitted here // as a baseline. - let snapshot_collector = SnapshotCollector::new(data.dupe()); + let snapshot_collector = + SnapshotCollector::new(data.dupe(), daemon_state.paths.buck_out_path()); dispatch.instant_event(Box::new(snapshot_collector.create_snapshot())); + let cert_state = self.0.cert_state.dupe(); + + // Spawn an async task to collect expensive info + // We start collecting inmediately, and emit the event as soon as it is ready + let version_control_revision_collector = + version_control_revision::spawn_version_control_collector(dispatch.dupe()); let resp = streaming( req, @@ -446,13 +455,24 @@ impl BuckdServer { opts.starlark_profiler_instrumentation_override(&req)?, req.build_options(), &daemon_state.paths, + cert_state.dupe(), snapshot_collector, cancellations, )?; func(&context, PartialResultDispatcher::new(dispatch.dupe()), req).await? }; - dispatch.command_result(result_to_command_result(result)); + // Do not kill the process prematurely. + drop(version_control_revision_collector); + match result { + Ok(_) => dispatch.command_result(result_to_command_result(result)), + Err(e) => match check_cert_state(cert_state).await { + Some(err) => dispatch.command_result(error_to_command_result( + err.context(format!("{e:?}")), + )), + _ => dispatch.command_result(error_to_command_result(e)), + }, + } } .boxed() }, @@ -485,10 +505,13 @@ impl BuckdServer { // send signal to register new command time _ = self.0.command_channel.unbounded_send(()); - Ok(self - .run_streaming_anyhow(req, opts, func) - .await - .unwrap_or_else(error_to_response_stream)) + match self.run_streaming_anyhow(req, opts, func).await { + Ok(resp) => Ok(resp), + Err(e) => match check_cert_state(self.0.cert_state.dupe()).await { + Some(err) => Ok(error_to_response_stream(err.context(format!("{e:?}")))), + _ => Ok(error_to_response_stream(e)), + }, + } } async fn oneshot< @@ -533,10 +556,11 @@ fn convert_positive_duration(proto_duration: &prost_types::Duration) -> Result CommandResult { - let messages = vec![format!("{:?}", e)]; + let report = create_error_report(&e.into()); + let errors = vec![report]; CommandResult { - result: Some(command_result::Result::Error(CommandError { messages })), + result: Some(command_result::Result::Error(CommandError { errors })), } } @@ -668,14 +692,12 @@ where ); let (output_send, output_recv) = tokio::sync::mpsc::unbounded_channel(); - // We run the event consumer on a totally separate tokio runtime to avoid the consumer task from getting stuck behind + // We run the event consumer on new non-tokio thread to avoid the consumer task from getting stuck behind // another tokio task in its lifo task slot. See T96012305 and https://github.com/tokio-rs/tokio/issues/4323 for more // information. - let merge_task = thread::Builder::new() - .name("pump-events".to_owned()) - .spawn(move || { - pump_events(events, state, output_send); - }); + let merge_task = thread_spawn("pump-events", move || { + pump_events(events, state, output_send); + }); if let Err(e) = merge_task { return error_to_response_stream( anyhow::Error::new(e).context("failed to spawn pump-events"), @@ -729,8 +751,41 @@ where })) } +struct QueryCommandOptions { + /// `buck2_cli_proto::ProfileMode`. + profile_mode: Option, +} + +impl OneshotCommandOptions for QueryCommandOptions { + fn pre_run(&self, _server: &BuckdServer) -> Result<(), Status> { + Ok(()) + } +} + +impl StreamingCommandOptions for QueryCommandOptions { + fn starlark_profiler_instrumentation_override( + &self, + _req: &Req, + ) -> anyhow::Result { + match self.profile_mode { + None => Ok(StarlarkProfilerConfiguration::None), + Some(mode) => { + let mode = buck2_cli_proto::ProfileMode::from_i32(mode) + .internal_error_anyhow("invalid profile mode enum value")?; + Ok(StarlarkProfilerConfiguration::ProfileLoading( + proto_to_profile_mode(mode), + // We enable profiling for everything, + // but collect results only for subset of packages. + UnparsedPatternPredicate::Any, + )) + } + } + } +} + type ResponseStream = Pin> + Send + Sync>>; + #[async_trait] impl DaemonApi for BuckdServer { async fn kill(&self, req: Request) -> Result, Status> { @@ -794,14 +849,20 @@ impl DaemonApi for BuckdServer { self.oneshot(req, DefaultCommandOptions, move |req| async move { let snapshot = if req.snapshot { let data = daemon_state.data()?; - Some(snapshot::SnapshotCollector::new(data.dupe()).create_snapshot()) + Some( + snapshot::SnapshotCollector::new( + data.dupe(), + daemon_state.paths.buck_out_path(), + ) + .create_snapshot(), + ) } else { None }; let extra_constraints = daemon_state.data().as_ref().ok().map(|state| { buck2_cli_proto::ExtraDaemonConstraints { - trace_io_enabled: state.io.as_any().is::(), + trace_io_enabled: TracingIoProvider::from_io(&*state.io).is_some(), materializer_state_identity: state .materializer_state_identity .as_ref() @@ -812,6 +873,15 @@ impl DaemonApi for BuckdServer { let mut daemon_constraints = self.0.base_daemon_constraints.clone(); daemon_constraints.extra = extra_constraints; + let valid_working_directory = daemon_state.validate_cwd().is_ok(); + let valid_buck_out_mount = daemon_state.validate_buck_out_mount().is_ok(); + + let io_provider = daemon_state + .data() + .as_ref() + .ok() + .map(|state| state.io.name().to_owned()); + let uptime = self.0.start_instant.elapsed(); let base = StatusResponse { process_info: Some(self.0.process_info.clone()), @@ -831,6 +901,14 @@ impl DaemonApi for BuckdServer { .as_ref() .ok() .map(|state| state.http_client.supports_vpnless()), + http2: daemon_state + .data() + .as_ref() + .ok() + .map(|state| state.http_client.http2()), + valid_working_directory: Some(valid_working_directory), + valid_buck_out_mount: Some(valid_buck_out_mount), + io_provider, ..Default::default() }; Ok(base) @@ -843,8 +921,14 @@ impl DaemonApi for BuckdServer { req: Request, ) -> Result, Status> { self.oneshot(req, DefaultCommandOptions, move |req| async move { - let FlushDepFilesRequest {} = req; - buck2_file_watcher::dep_files::flush_dep_files(); + let FlushDepFilesRequest { + retain_locally_produced_dep_files, + } = req; + if retain_locally_produced_dep_files { + buck2_file_watcher::dep_files::flush_non_local_dep_files(); + } else { + buck2_file_watcher::dep_files::flush_dep_files(); + } Ok(GenericResponse {}) }) .await @@ -956,9 +1040,10 @@ impl DaemonApi for BuckdServer { &self, req: Request, ) -> Result, Status> { + let profile_mode = req.get_ref().profile_mode; self.run_streaming( req, - DefaultCommandOptions, + QueryCommandOptions { profile_mode }, |ctx, partial_result_dispatcher, req| { Box::pin(async { OTHER_SERVER_COMMANDS @@ -1036,12 +1121,16 @@ impl DaemonApi for BuckdServer { &self, req: Request, ) -> Result, Status> { - let callbacks = self.0.callbacks; self.run_streaming( req, DefaultCommandOptions, |ctx, partial_result_dispatcher, req| { - callbacks.audit(ctx, partial_result_dispatcher, req) + Box::pin(async { + AUDIT_SERVER_COMMAND + .get()? + .audit(ctx, partial_result_dispatcher, req) + .await + }) }, ) .await @@ -1052,12 +1141,16 @@ impl DaemonApi for BuckdServer { &self, req: Request, ) -> Result, Status> { - let callbacks = self.0.callbacks; self.run_streaming( req, DefaultCommandOptions, |ctx, partial_result_dispatcher, req| { - callbacks.starlark(ctx, partial_result_dispatcher, req) + Box::pin(async { + STARLARK_SERVER_COMMAND + .get()? + .starlark(ctx, partial_result_dispatcher, req) + .await + }) }, ) .await @@ -1085,20 +1178,14 @@ impl DaemonApi for BuckdServer { async fn unstable_crash( &self, - _req: Request, - ) -> Result, Status> { - panic!("explicitly requested panic (via unstable_crash)"); - } - - async fn segfault( - &self, - _req: Request, - ) -> Result, Status> { - unsafe { - std::ptr::null_mut::<&'static str>() - .write("Explicitly requested segfault (via `segfault`)") - }; - unreachable!() + req: Request, + ) -> Result, Status> { + self.oneshot( + req, + DefaultCommandOptions, + move |req| async move { crash(req) }, + ) + .await } async fn unstable_heap_dump( @@ -1106,15 +1193,28 @@ impl DaemonApi for BuckdServer { req: Request, ) -> Result, Status> { self.check_if_accepting_requests()?; + let req = req.into_inner(); - let heap_dump = memory::write_heap_to_file(&req.into_inner().destination_path); - match heap_dump { - Ok(_) => Ok(Response::new(UnstableHeapDumpResponse {})), - Err(e) => Err(Status::invalid_argument(format!( - "failed to perform heap dump: {}", - e - ))), + memory::write_heap_to_file(&req.destination_path) + .map_err(|e| Status::invalid_argument(format!("failed to perform heap dump: {}", e)))?; + if let Some(test_executor_destination_path) = req.test_executor_destination_path { + let test_executors = get_all_test_executors(); + tracing::debug!( + "currently have {} test executor(s), dumping last one to {}", + test_executors.len(), + test_executor_destination_path + ); + // TODO: Figure out a way to dump all of them and not just the last. + if let Some(test_executor) = test_executors.last() { + test_executor + .unstable_heap_dump(&test_executor_destination_path) + .await + .map_err(|e| { + Status::invalid_argument(format!("failed to perform heap dump: {}", e)) + })?; + } } + Ok(Response::new(UnstableHeapDumpResponse {})) } async fn unstable_allocator_stats( @@ -1171,7 +1271,7 @@ impl DaemonApi for BuckdServer { let client_ctx = req.get_ref().client_context()?; let trace_id = client_ctx.trace_id.parse()?; let (event_source, dispatcher) = self.0.daemon_state.prepare_events(trace_id).await?; - let active_command = ActiveCommand::new(&dispatcher, client_ctx); + let active_command = ActiveCommand::new(&dispatcher, client_ctx.sanitized_argv.clone()); (event_source, dispatcher, active_command) }; @@ -1214,28 +1314,14 @@ impl DaemonApi for BuckdServer { )) } - type UnstableDocsStream = ResponseStream; - async fn unstable_docs( - &self, - req: Request, - ) -> Result, Status> { - let callbacks = self.0.callbacks; - self.run_streaming( - req, - DefaultCommandOptions, - |ctx, partial_result_dispatcher, req| { - callbacks.docs(ctx, partial_result_dispatcher, req) - }, - ) - .await - } - type Profile2Stream = ResponseStream; async fn profile2( &self, req: Request, ) -> Result, Status> { - struct ProfileCommandOptions; + struct ProfileCommandOptions { + project_root: ProjectRoot, + } impl OneshotCommandOptions for ProfileCommandOptions {} @@ -1244,16 +1330,29 @@ impl DaemonApi for BuckdServer { &self, req: &ProfileRequest, ) -> anyhow::Result { - starlark_profiler_configuration_from_request(req) + starlark_profiler_configuration_from_request(req, &self.project_root) } } - let callbacks = self.0.callbacks; self.run_streaming( req, - ProfileCommandOptions, + ProfileCommandOptions { + project_root: self.0.daemon_state.paths.project_root().dupe(), + }, |ctx, partial_result_dispatcher, req| { - callbacks.profile(ctx, partial_result_dispatcher, req) + Box::pin(async { + match req.profile_opts.as_ref().expect("Missing profile opts") { + buck2_cli_proto::profile_request::ProfileOpts::TargetProfile(_) => { + profile_command(ctx, partial_result_dispatcher, req).await + } + buck2_cli_proto::profile_request::ProfileOpts::BxlProfile(_) => { + BXL_SERVER_COMMANDS + .get()? + .bxl_profile(ctx, partial_result_dispatcher, req) + .await + } + } + }) }, ) .await @@ -1267,8 +1366,8 @@ impl DaemonApi for BuckdServer { self.run_streaming( req, DefaultCommandOptions, - |context, _: PartialResultDispatcher, req| { - new_generic_command(context, req).boxed() + |context, partial: PartialResultDispatcher, req| { + new_generic_command(context, req, partial).boxed() }, ) .await @@ -1409,11 +1508,12 @@ fn server_shutdown_signal( command_receiver: UnboundedReceiver<()>, mut shutdown_receiver: UnboundedReceiver<()>, ) -> anyhow::Result> { - static TESTING_INACTIVITY_TIMEOUT: EnvHelper = - EnvHelper::new("BUCK2_TESTING_INACTIVITY_TIMEOUT"); - let mut duration = DEFAULT_INACTIVITY_TIMEOUT; - if *TESTING_INACTIVITY_TIMEOUT.get()?.unwrap_or(&false) { + if buck2_env_anyhow!( + "BUCK2_TESTING_INACTIVITY_TIMEOUT", + bool, + applicability = testing + )? { duration = Duration::from_secs(1); } @@ -1430,22 +1530,25 @@ fn server_shutdown_signal( async fn inactivity_timeout(mut command_receiver: UnboundedReceiver<()>, duration: Duration) { // this restarts the timer everytime there is a new command - loop { - let command = command_receiver.next(); - let timer = tokio::time::sleep(duration); + while (timeout(duration, command_receiver.next()).await).is_ok() {} +} - futures::pin_mut!(command); - futures::pin_mut!(timer); +async fn certs_validation_background_job(cert_state: CertState) { + tokio::task::spawn(async move { + const CERTS_VALIDATION_INTERVAL: u64 = 60 * 60; // 1 hour + loop { + tokio::time::sleep(Duration::from_secs(CERTS_VALIDATION_INTERVAL)).await; + let result = validate_certs().await; + let mut valid = cert_state.state.lock().await; - match futures::future::select(command, timer).await { - futures::future::Either::Left(_) => continue, - futures::future::Either::Right(_) => break, - }; - } + *valid = result.is_ok(); + } + }); } /// No-op set of command options. struct DefaultCommandOptions; impl OneshotCommandOptions for DefaultCommandOptions {} + impl StreamingCommandOptions for DefaultCommandOptions {} diff --git a/app/buck2_server/src/daemon/state.rs b/app/buck2_server/src/daemon/state.rs index 7717464adcfec..b0d08ed366cc7 100644 --- a/app/buck2_server/src/daemon/state.rs +++ b/app/buck2_server/src/daemon/state.rs @@ -19,25 +19,27 @@ use anyhow::Context; use buck2_build_api::spawner::BuckSpawner; use buck2_cli_proto::unstable_dice_dump_request::DiceDumpFormat; use buck2_common::cas_digest::DigestAlgorithm; -use buck2_common::cas_digest::DigestAlgorithmKind; -use buck2_common::http::HttpClient; -use buck2_common::http::HttpClientBuilder; +use buck2_common::cas_digest::DigestAlgorithmFamily; use buck2_common::ignores::ignore_set::IgnoreSet; +use buck2_common::init::DaemonStartupConfig; +use buck2_common::init::SystemWarningConfig; +use buck2_common::init::Timeout; use buck2_common::invocation_paths::InvocationPaths; use buck2_common::io::IoProvider; use buck2_common::legacy_configs::cells::BuckConfigBasedCells; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_core::buck2_env_anyhow; use buck2_core::cells::name::CellName; -use buck2_core::env_helper::EnvHelper; use buck2_core::facebook_only; use buck2_core::fs::cwd::WorkingDirectory; use buck2_core::fs::project::ProjectRoot; +use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_core::is_open_source; use buck2_core::rollout_percentage::RolloutPercentage; use buck2_core::tag_result; use buck2_events::dispatch::EventDispatcher; -use buck2_events::sink::scribe; +use buck2_events::sink::remote; use buck2_events::sink::tee::TeeSink; use buck2_events::source::ChannelEventSource; use buck2_events::EventSinkWithStats; @@ -47,6 +49,7 @@ use buck2_execute::execute::blocking::BuckBlockingExecutor; use buck2_execute::materialize::materializer::MaterializationMethod; use buck2_execute::materialize::materializer::Materializer; use buck2_execute::re::manager::ReConnectionManager; +use buck2_execute_impl::materializers::deferred::clean_stale::CleanStaleConfig; use buck2_execute_impl::materializers::deferred::AccessTimesUpdates; use buck2_execute_impl::materializers::deferred::DeferredMaterializer; use buck2_execute_impl::materializers::deferred::DeferredMaterializerConfigs; @@ -58,6 +61,8 @@ use buck2_execute_impl::materializers::sqlite::MaterializerStateSqliteDb; use buck2_execute_impl::re::paranoid_download::ParanoidDownloader; use buck2_file_watcher::file_watcher::FileWatcher; use buck2_forkserver::client::ForkserverClient; +use buck2_http::HttpClient; +use buck2_http::HttpClientBuilder; use buck2_re_configuration::RemoteExecutionStaticMetadata; use buck2_re_configuration::RemoteExecutionStaticMetadataImpl; use buck2_server_ctx::concurrency::ConcurrencyHandler; @@ -89,7 +94,7 @@ pub struct DaemonState { pub paths: InvocationPaths, /// This holds the main data shared across different commands. - pub(crate) data: SharedResult>, + pub(crate) data: buck2_error::Result>, #[allocative(skip)] rt: Handle, @@ -106,7 +111,7 @@ pub struct DaemonStateData { /// (or DaemonState) itself and instead they should be represented on the computation graph. /// /// The DICE graph is held by the concurrency handler to manage locking for concurrent commands - pub(crate) dice_manager: ConcurrencyHandler, + pub(crate) dice_manager: Arc, /// Synced every time we run a command. pub(crate) file_watcher: Arc, @@ -170,6 +175,15 @@ pub struct DaemonStateData { /// Spawner pub spawner: Arc, + + /// Tags to be logged per command. + pub tags: Vec, + + /// Config used to display system warnings + pub system_warning_config: SystemWarningConfig, + + /// TODO(cjhopman): Modifies action digest, remove after migration + pub new_style_scratch_path: bool, } impl DaemonStateData { @@ -209,7 +223,7 @@ impl DaemonState { tracing::info!("Daemon state is ready."); - let data = data.shared_error(); + let data = data.map_err(buck2_error::Error::from); DaemonState { fb, @@ -229,33 +243,77 @@ impl DaemonState { rt: Handle, materializations: MaterializationMethod, ) -> anyhow::Result> { + if buck2_env_anyhow!( + "BUCK2_TEST_INIT_DAEMON_ERROR", + bool, + applicability = testing + )? { + // TODO(minglunli): Errors here don't actually make it to invocation records which should be fixed + return Err(anyhow::anyhow!("Injected init daemon error")); + } + let daemon_state_data_rt = rt.clone(); rt.spawn(async move { let fs = paths.project_root().clone(); tracing::info!("Reading config..."); - let legacy_cells = BuckConfigBasedCells::parse(&fs)?; + let legacy_cells = BuckConfigBasedCells::parse_with_config_args( + &fs, + &[], + ProjectRelativePath::empty(), + ) + .await?; tracing::info!("Starting..."); - let (legacy_configs, cells) = - (legacy_cells.configs_by_name, legacy_cells.cell_resolver); - - let root_config = legacy_configs - .get(cells.root_cell()) - .context("No config for root cell")?; + let cells = &legacy_cells.cell_resolver; + let root_config = &legacy_cells + .parse_single_cell(cells.root_cell(), &fs) + .await?; - static DEFAULT_DIGEST_ALGORITHM: EnvHelper = - EnvHelper::new("BUCK_DEFAULT_DIGEST_ALGORITHM"); + let buffer_size = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "event_log_buffer_size", + })? + .unwrap_or(10000); + let retry_backoff = Duration::from_millis( + root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "event_log_retry_backoff_duration_ms", + })? + .unwrap_or(500), + ); + let retry_attempts = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "event_log_retry_attempts", + })? + .unwrap_or(5); + let message_batch_size = root_config.parse(BuckconfigKeyRef { + section: "buck2", + property: "event_log_message_batch_size", + })?; + let scribe_sink = Self::init_scribe_sink( + fb, + buffer_size, + retry_backoff, + retry_attempts, + message_batch_size, + ) + .context("failed to init scribe sink")?; let default_digest_algorithm = - DEFAULT_DIGEST_ALGORITHM.get_copied()?.unwrap_or_else(|| { - if buck2_core::is_open_source() { - DigestAlgorithmKind::Sha256 - } else { - DigestAlgorithmKind::Sha1 - } - }); + buck2_env_anyhow!("BUCK_DEFAULT_DIGEST_ALGORITHM", type=DigestAlgorithmFamily)?; + + let default_digest_algorithm = default_digest_algorithm.unwrap_or_else(|| { + if buck2_core::is_open_source() { + DigestAlgorithmFamily::Sha256 + } else { + DigestAlgorithmFamily::Sha1 + } + }); let digest_algorithms = init_ctx .daemon_startup_config @@ -264,7 +322,7 @@ impl DaemonState { .map(|algos| { algos .split(',') - .map(DigestAlgorithmKind::from_str) + .map(DigestAlgorithmFamily::from_str) .collect::>() }) .transpose() @@ -289,50 +347,77 @@ impl DaemonState { root_config, )?); - let ignore_specs: HashMap = legacy_configs - .iter() - .map(|(cell, config)| { - Ok(( - cell, - IgnoreSet::from_ignore_spec( - config.get("project", "ignore").unwrap_or(""), - cells.is_root_cell(cell), - )?, - )) - }) - .collect::>()?; + let mut ignore_specs: HashMap = HashMap::new(); + for (cell, _) in cells.cells() { + let config = legacy_cells.parse_single_cell(cell, &fs).await?; + ignore_specs.insert( + cell, + IgnoreSet::from_ignore_spec( + config + .get(BuckconfigKeyRef { + section: "project", + property: "ignore", + }) + .unwrap_or(""), + cells.is_root_cell(cell), + )?, + ); + } let disk_state_options = DiskStateOptions::new(root_config, materializations.dupe())?; let blocking_executor = Arc::new(BuckBlockingExecutor::default_concurrency(fs.dupe())?); let cache_dir_path = paths.cache_dir_path(); let valid_cache_dirs = paths.valid_cache_dirs(); - let fs_duped = fs.dupe(); let deferred_materializer_configs = { let defer_write_actions = root_config - .parse::("buck2", "defer_write_actions")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "defer_write_actions", + })? .unwrap_or_else(RolloutPercentage::never) .roll(); // RE will refresh any TTL < 1 hour, so we check twice an hour and refresh any TTL // < 1 hour. let ttl_refresh_frequency = root_config - .parse("buck2", "ttl_refresh_frequency_seconds")? + .parse(BuckconfigKeyRef { + section: "buck2", + property: "ttl_refresh_frequency_seconds", + })? .unwrap_or(1800); let ttl_refresh_min_ttl = root_config - .parse("buck2", "ttl_refresh_min_ttl_seconds")? + .parse(BuckconfigKeyRef { + section: "buck2", + property: "ttl_refresh_min_ttl_seconds", + })? .unwrap_or(3600); let ttl_refresh_enabled = root_config - .parse::("buck2", "ttl_refresh_enabled")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "ttl_refresh_enabled", + })? .unwrap_or_else(RolloutPercentage::never) .roll(); let update_access_times = AccessTimesUpdates::try_new_from_config_value( - root_config.get("buck2", "update_access_times"), + root_config.get(BuckconfigKeyRef { + section: "buck2", + property: "update_access_times", + }), )?; + let verbose_materializer_log = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "verbose_materializer_event_log", + })? + .unwrap_or(false); + + let clean_stale_config = CleanStaleConfig::from_buck_config(root_config)?; + DeferredMaterializerConfigs { materialize_final_artifacts: matches!( materializations, @@ -345,6 +430,8 @@ impl DaemonState { enabled: ttl_refresh_enabled, }, update_access_times, + verbose_materializer_log, + clean_stale_config, } }; @@ -352,7 +439,7 @@ impl DaemonState { create_io_provider( fb, fs.dupe(), - legacy_configs.get(cells.root_cell()).ok(), + root_config, digest_config.cas_digest_config(), init_ctx.enable_trace_io, ), @@ -360,7 +447,7 @@ impl DaemonState { // Using `execute_io_inline` is just out of convenience. // It doesn't really matter what's used here since there's no IO-heavy // operations on daemon startup - delete_unknown_disk_state(&cache_dir_path, &valid_cache_dirs, fs_duped) + delete_unknown_disk_state(&cache_dir_path, &valid_cache_dirs) }), maybe_initialize_materializer_sqlite_db( &disk_state_options, @@ -368,17 +455,16 @@ impl DaemonState { blocking_executor.dupe() as Arc, root_config, &deferred_materializer_configs, - fs.clone(), digest_config, &init_ctx, ), ) .await?; - let http_client = - HttpClientBuilder::from_startup_config(&init_ctx.daemon_startup_config) - .context("Error creating HTTP client")? - .build(); + let http_client = http_client_from_startup_config(&init_ctx.daemon_startup_config) + .await + .context("Error creating HTTP client")? + .build(); let materializer_state_identity = materializer_db.as_ref().map(|d| d.identity().clone()); @@ -387,13 +473,20 @@ impl DaemonState { fb, false, 10, - static_metadata, + static_metadata.dupe(), Some(paths.re_logs_dir()), paths.buck_out_path(), init_ctx.daemon_startup_config.paranoid, )); + // Used only to dispatch events to scribe that are not associated with a specific command (ex. materializer clean up events) + let daemon_dispatcher = if let Some(sink) = scribe_sink.dupe() { + EventDispatcher::new(TraceId::null(), sink.to_event_sync()) + } else { + // If needed this could log to a sink that redirects to a daemon event log (maybe `~/.buck/buckd/repo-path/event-log`) + // but for now seems fine to drop events if scribe isn't enabled. + EventDispatcher::null() + }; let materializer = Self::create_materializer( - fb, io.project_root().dupe(), digest_config, paths.buck_out_dir(), @@ -404,12 +497,17 @@ impl DaemonState { materializer_db, materializer_state, http_client.dupe(), + daemon_dispatcher, )?; // Create this after the materializer because it'll want to write to buck-out, and an Eden // materializer would create buck-out now. - let forkserver = - maybe_launch_forkserver(root_config, &paths.forkserver_state_dir()).await?; + let forkserver = maybe_launch_forkserver( + root_config, + &paths.forkserver_state_dir(), + &init_ctx.daemon_startup_config.resource_control, + ) + .await?; let dice = init_ctx .construct_dice(io.dupe(), digest_config, root_config) @@ -433,39 +531,27 @@ impl DaemonState { })?; let hash_all_commands = root_config - .parse::("buck2", "hash_all_commands")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "hash_all_commands", + })? .unwrap_or_else(RolloutPercentage::never) .roll(); let use_network_action_output_cache = root_config - .parse("buck2", "use_network_action_output_cache")? + .parse(BuckconfigKeyRef { + section: "buck2", + property: "use_network_action_output_cache", + })? .unwrap_or(false); let create_unhashed_outputs_lock = Arc::new(Mutex::new(())); - let buffer_size = root_config - .parse("buck2", "event_log_buffer_size")? - .unwrap_or(10000); - let retry_backoff = Duration::from_millis( - root_config - .parse("buck2", "event_log_retry_backoff_duration_ms")? - .unwrap_or(500), - ); - let retry_attempts = root_config - .parse("buck2", "event_log_retry_attempts")? - .unwrap_or(5); - let message_batch_size = root_config.parse("buck2", "event_log_message_batch_size")?; - let scribe_sink = Self::init_scribe_sink( - fb, - buffer_size, - retry_backoff, - retry_attempts, - message_batch_size, - ) - .context("failed to init scribe sink")?; - let enable_restarter = root_config - .parse::("buck2", "restarter")? + .parse::(BuckconfigKeyRef { + section: "buck2", + property: "restarter", + })? .unwrap_or_else(RolloutPercentage::never) .roll(); @@ -480,9 +566,41 @@ impl DaemonState { None }; + let remote_dep_files_enabled = root_config + .parse(BuckconfigKeyRef { + section: "build", + property: "remote_dep_file_cache_enabled", + })? + .unwrap_or(false); + + let tags = vec![ + format!("dice-detect-cycles:{}", dice.detect_cycles().variant_name()), + format!("which-dice:{}", dice.which_dice().variant_name()), + format!("hash-all-commands:{}", hash_all_commands), + format!( + "sqlite-materializer-state:{}", + disk_state_options.sqlite_materializer_state + ), + format!("paranoid:{}", paranoid.is_some()), + format!("remote-dep-files:{}", remote_dep_files_enabled), + #[cfg(fbcode_build)] + format!( + "respect-file-symlinks:{}", + static_metadata.respect_file_symlinks + ), + ]; + let system_warning_config = SystemWarningConfig::from_config(root_config)?; // Kick off an initial sync eagerly. This gets Watchamn to start watching the path we care // about (potentially kicking off an initial crawl). + // TODO(minglunli): Modifies action digest, remove after confirming bvb works fine + let new_style_scratch_path = root_config + .parse(BuckconfigKeyRef { + section: "buck2", + property: "new_style_scratch_path", + })? + .unwrap_or(false); + // disable the eager spawn for watchman until we fix dice commit to avoid a panic TODO(bobyf) // tokio::task::spawn(watchman_query.sync()); Ok(Arc::new(DaemonStateData { @@ -495,6 +613,7 @@ impl DaemonState { forkserver, scribe_sink, hash_all_commands, + new_style_scratch_path, use_network_action_output_cache, disk_state_options, start_time: std::time::Instant::now(), @@ -504,13 +623,14 @@ impl DaemonState { http_client, paranoid, spawner: Arc::new(BuckSpawner::new(daemon_state_data_rt)), + tags, + system_warning_config, })) }) .await? } fn create_materializer( - fb: FacebookInit, fs: ProjectRoot, digest_config: DigestConfig, buck_out_path: ProjectRelativePathBuf, @@ -521,6 +641,7 @@ impl DaemonState { materializer_db: Option, materializer_state: Option, http_client: HttpClient, + daemon_dispatcher: EventDispatcher, ) -> anyhow::Result> { match materializations { MaterializationMethod::Immediate => Ok(Arc::new(ImmediateMaterializer::new( @@ -541,50 +662,9 @@ impl DaemonState { materializer_db, materializer_state, http_client, + daemon_dispatcher, )?)) } - MaterializationMethod::Eden => { - #[cfg(any(fbcode_build, cargo_internal_build))] - { - use buck2_execute::materialize::eden_api::EdenBuckOut; - use buck2_execute_impl::materializers::eden::EdenMaterializer; - - let buck_out_mount = fs.root().join(&buck_out_path); - - if cfg!(unix) { - Ok(Arc::new( - EdenMaterializer::new( - fs, - digest_config, - re_client_manager.dupe(), - blocking_executor, - EdenBuckOut::new( - fb, - buck_out_path, - buck_out_mount, - re_client_manager, - ) - .context("Failed to create EdenFS-based buck-out")?, - http_client, - ) - .context("Failed to create Eden materializer")?, - )) - } else { - Err(anyhow::anyhow!( - "`eden` materialization method is not supported on Windows" - )) - } - } - #[cfg(not(any(fbcode_build, cargo_internal_build)))] - { - let _unused = buck_out_path; - let _unused = fs; - let _unused = fb; - Err(anyhow::anyhow!( - "`eden` materialization method is only supported in Meta internal builds" - )) - } - } } } @@ -596,7 +676,7 @@ impl DaemonState { message_batch_size: Option, ) -> anyhow::Result>> { facebook_only(); - scribe::new_thrift_scribe_sink_if_enabled( + remote::new_remote_event_sink_if_enabled( fb, buffer_size, retry_backoff, @@ -611,7 +691,7 @@ impl DaemonState { pub async fn prepare_events( &self, trace_id: TraceId, - ) -> SharedResult<(ChannelEventSource, EventDispatcher)> { + ) -> buck2_error::Result<(ChannelEventSource, EventDispatcher)> { // facebook only: logging events to Scribe. facebook_only(); let (events, sink) = buck2_events::create_source_sink_pair(); @@ -631,7 +711,7 @@ impl DaemonState { &self, dispatcher: EventDispatcher, drop_guard: ActiveCommandDropGuard, - ) -> SharedResult { + ) -> buck2_error::Result { let data = self.data(); dispatcher.instant_event(buck2_data::RestartConfiguration { @@ -640,7 +720,7 @@ impl DaemonState { tag_result!( "eden_not_connected", - check_working_dir::check_working_dir(), + check_working_dir::check_working_dir().map_err(|e| e.into()), quiet: true, daemon_in_memory_state_is_corrupted: true, task: false @@ -653,28 +733,9 @@ impl DaemonState { .context("Error validating buck-out mount")?; let data = data?; - - let tags = vec![ - format!( - "dice-detect-cycles:{}", - data.dice_manager - .unsafe_dice() - .detect_cycles() - .variant_name() - ), - format!( - "which-dice:{}", - data.dice_manager.unsafe_dice().which_dice().variant_name() - ), - format!("hash-all-commands:{}", data.hash_all_commands), - format!( - "sqlite-materializer-state:{}", - data.disk_state_options.sqlite_materializer_state - ), - format!("paranoid:{}", data.paranoid.is_some()), - ]; - - dispatcher.instant_event(buck2_data::TagEvent { tags }); + dispatcher.instant_event(buck2_data::TagEvent { + tags: data.tags.clone(), + }); // Sync any FS changes and invalidate DICE state if necessary. Get the Eden // version of the underlying system in parallel if available. @@ -697,7 +758,7 @@ impl DaemonState { Ok(self.data.dupe()?) } - fn validate_cwd(&self) -> anyhow::Result<()> { + pub fn validate_cwd(&self) -> anyhow::Result<()> { if let Some(working_directory) = &self.working_directory { let res = working_directory.is_stale().and_then(|stale| { if stale { @@ -713,7 +774,7 @@ impl DaemonState { tag_result!( "stale_cwd", - res, + res.map_err(|e| e.into()), quiet: true, daemon_in_memory_state_is_corrupted: true, task: false @@ -723,8 +784,8 @@ impl DaemonState { Ok(()) } - fn validate_buck_out_mount(&self) -> anyhow::Result<()> { - #[cfg(any(fbcode_build, cargo_internal_build))] + pub fn validate_buck_out_mount(&self) -> anyhow::Result<()> { + #[cfg(fbcode_build)] { use buck2_core::fs::fs_util; use buck2_core::soft_error; @@ -765,6 +826,7 @@ impl DaemonState { This will likely lead to failed or slow builds. \ To remediate, run `eden redirect fixup`." ) + .into() )?; } @@ -772,19 +834,15 @@ impl DaemonState { } } -fn convert_algorithm_kind(kind: DigestAlgorithmKind) -> anyhow::Result { +fn convert_algorithm_kind(kind: DigestAlgorithmFamily) -> anyhow::Result { anyhow::Ok(match kind { - DigestAlgorithmKind::Sha1 => DigestAlgorithm::Sha1, - DigestAlgorithmKind::Sha256 => DigestAlgorithm::Sha256, - DigestAlgorithmKind::Blake3 => DigestAlgorithm::Blake3, - DigestAlgorithmKind::Blake3Keyed => { + DigestAlgorithmFamily::Sha1 => DigestAlgorithm::Sha1, + DigestAlgorithmFamily::Sha256 => DigestAlgorithm::Sha256, + DigestAlgorithmFamily::Blake3 => DigestAlgorithm::Blake3, + DigestAlgorithmFamily::Blake3Keyed => { #[cfg(fbcode_build)] { - let key = blake3_constants::BLAKE3_HASH_KEY - .as_bytes() - .try_into() - .context("BLAKE3_HASH_KEY is the wrong size")?; - + let key = blake3_constants::BLAKE3_HASH_KEY; DigestAlgorithm::Blake3Keyed { key } } @@ -800,3 +858,133 @@ fn convert_algorithm_kind(kind: DigestAlgorithmKind) -> anyhow::Result anyhow::Result { + let mut builder = if is_open_source() { + HttpClientBuilder::oss().await? + } else { + HttpClientBuilder::internal().await? + }; + builder.with_max_redirects(config.http.max_redirects.unwrap_or(DEFAULT_MAX_REDIRECTS)); + builder.with_http2(config.http.http2); + match config.http.connect_timeout() { + Timeout::Value(d) => { + builder.with_connect_timeout(Some(d)); + } + Timeout::Default => { + builder.with_connect_timeout(Some(Duration::from_millis(DEFAULT_CONNECT_TIMEOUT_MS))); + } + _ => {} + } + match config.http.read_timeout() { + Timeout::Value(d) => { + builder.with_read_timeout(Some(d)); + } + Timeout::Default => { + builder.with_read_timeout(Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS))); + } + _ => {} + } + match config.http.write_timeout() { + Timeout::Value(d) => { + builder.with_write_timeout(Some(d)); + } + _ => {} + } + + Ok(builder) +} + +#[cfg(test)] +mod tests { + + use buck2_common::legacy_configs::configs::testing::parse; + use indoc::indoc; + + use super::*; + + #[tokio::test] + async fn test_from_startup_config_defaults_internal() -> anyhow::Result<()> { + let builder = + http_client_from_startup_config(&DaemonStartupConfig::testing_empty()).await?; + assert_eq!(DEFAULT_MAX_REDIRECTS, builder.max_redirects().unwrap()); + assert_eq!( + builder.supports_vpnless(), + buck2_certs::certs::supports_vpnless() + ); + assert_eq!( + Some(Duration::from_millis(DEFAULT_CONNECT_TIMEOUT_MS)), + builder.connect_timeout() + ); + assert_eq!( + Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS)), + builder.read_timeout() + ); + assert_eq!(None, builder.write_timeout()); + + Ok(()) + } + + #[tokio::test] + async fn test_from_startup_config_overrides() -> anyhow::Result<()> { + let config = parse( + &[( + "config", + indoc!( + r#" + [http] + max_redirects = 5 + connect_timeout_ms = 10 + write_timeout_ms = 5 + "# + ), + )], + "config", + )?; + let startup_config = DaemonStartupConfig::new(&config)?; + let builder = http_client_from_startup_config(&startup_config).await?; + assert_eq!(5, builder.max_redirects().unwrap()); + assert_eq!(Some(Duration::from_millis(10)), builder.connect_timeout()); + assert_eq!( + Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS)), + builder.read_timeout() + ); + assert_eq!(Some(Duration::from_millis(5)), builder.write_timeout()); + + Ok(()) + } + + #[tokio::test] + async fn test_from_startup_config_zero_for_unset() -> anyhow::Result<()> { + let config = parse( + &[( + "config", + indoc!( + r#" + [http] + connect_timeout_ms = 0 + "#, + ), + )], + "config", + )?; + let startup_config = DaemonStartupConfig::new(&config)?; + let builder = http_client_from_startup_config(&startup_config).await?; + assert_eq!(None, builder.connect_timeout()); + assert_eq!( + Some(Duration::from_millis(DEFAULT_READ_TIMEOUT_MS)), + builder.read_timeout() + ); + assert_eq!(None, builder.write_timeout()); + + Ok(()) + } +} diff --git a/app/buck2_server/src/dice_tracker.rs b/app/buck2_server/src/dice_tracker.rs index 1d59961e2a27d..03cfeb1633b8d 100644 --- a/app/buck2_server/src/dice_tracker.rs +++ b/app/buck2_server/src/dice_tracker.rs @@ -11,9 +11,11 @@ use std::collections::HashMap; use std::time::Duration; use allocative::Allocative; +use buck2_core::buck2_env_anyhow; use buck2_data::*; use buck2_events::dispatch::with_dispatcher_async; use buck2_events::dispatch::EventDispatcher; +use buck2_util::threads::thread_spawn; use dice::DiceEvent; use dice::DiceEventListener; use dupe::Dupe; @@ -36,30 +38,36 @@ pub struct BuckDiceTracker { event_forwarder: UnboundedSender, } -const DICE_SNAPSHOT_INTERVAL: Duration = Duration::from_millis(500); - impl BuckDiceTracker { - pub fn new(events: EventDispatcher) -> Self { + pub fn new(events: EventDispatcher) -> anyhow::Result { let (event_forwarder, receiver) = mpsc::unbounded(); + let snapshot_interval = + buck2_env_anyhow!("BUCK2_DICE_SNAPSHOT_INTERVAL_MS", type=u64, default = 500) + .map(Duration::from_millis)?; - std::thread::spawn(move || { + thread_spawn("buck2-dice-tracker", move || { let runtime = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); runtime.block_on(with_dispatcher_async( events.dupe(), - Self::run_task(events, receiver), + Self::run_task(events, receiver, snapshot_interval), )) - }); + }) + .unwrap(); - Self { event_forwarder } + Ok(Self { event_forwarder }) } - async fn run_task(events: EventDispatcher, mut receiver: UnboundedReceiver) { + async fn run_task( + events: EventDispatcher, + mut receiver: UnboundedReceiver, + snapshot_interval: Duration, + ) { let mut needs_update = false; let mut states = HashMap::new(); - let mut interval = tokio::time::interval(DICE_SNAPSHOT_INTERVAL); + let mut interval = tokio::time::interval(snapshot_interval); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); // This will loop until the sender side of the channel is dropped. loop { @@ -79,6 +87,12 @@ impl BuckDiceTracker { Some(DiceEvent::CheckDepsFinished{key_type}) => { states.entry(key_type).or_insert_with(DiceKeyState::default).check_deps_finished += 1; } + Some(DiceEvent::ComputeStarted{key_type}) => { + states.entry(key_type).or_insert_with(DiceKeyState::default).compute_started += 1; + } + Some(DiceEvent::ComputeFinished{key_type}) => { + states.entry(key_type).or_insert_with(DiceKeyState::default).compute_finished += 1; + } None => { // This indicates that the sender side has been dropped and we can exit. break; diff --git a/app/buck2_server/src/file_status.rs b/app/buck2_server/src/file_status.rs index 22943972ac6bf..f595c4be6906c 100644 --- a/app/buck2_server/src/file_status.rs +++ b/app/buck2_server/src/file_status.rs @@ -14,7 +14,7 @@ use std::path::Path; use async_recursion::async_recursion; use async_trait::async_trait; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; +use buck2_common::dice::file_ops::DiceFileOps; use buck2_common::file_ops::FileOps; use buck2_common::file_ops::RawPathMetadata; use buck2_common::file_ops::RawSymlink; @@ -60,7 +60,7 @@ struct FileStatusResult<'a> { /// Number of ones that were bad bad: usize, /// Whether to write matches - verbose: bool, + show_matches: bool, stdout: StdoutPartialOutput<'a>, } @@ -86,7 +86,7 @@ impl FileStatusResult<'_> { kind, path, fs, dice, )?; self.bad += 1; - } else if self.verbose { + } else if self.show_matches { writeln!(self.stdout, "Match: {} at {}: {}", kind, path, fs)?; } @@ -119,20 +119,19 @@ impl ServerCommandTemplate for FileStatusServerCommand { &self, server_ctx: &dyn ServerCommandContextTrait, mut stdout: PartialResultDispatcher, - ctx: DiceTransaction, + mut ctx: DiceTransaction, ) -> anyhow::Result { - let file_ops = ctx.file_ops(); - let cell_resolver = ctx.get_cell_resolver().await?; + let cell_resolver = &ctx.get_cell_resolver().await?; let project_root = server_ctx.project_root(); let digest_config = ctx.global_data().get_digest_config(); - let io = FsIoProvider::new(project_root.dupe(), digest_config.cas_digest_config()); + let io = &FsIoProvider::new(project_root.dupe(), digest_config.cas_digest_config()); let stdout = stdout.as_writer(); let mut result = FileStatusResult { checked: 0, bad: 0, - verbose: self.req.verbose, + show_matches: self.req.show_matches, stdout, }; @@ -141,7 +140,11 @@ impl ServerCommandTemplate for FileStatusServerCommand { for path in &self.req.paths { let path = project_root.relativize_any(AbsPath::new(Path::new(path))?)?; writeln!(&mut stderr, "Check file status: {}", path)?; - check_file_status(&file_ops, &cell_resolver, &io, &path, &mut result).await?; + let result = &mut result; + ctx.with_linear_recompute(|ctx| async move { + check_file_status(&DiceFileOps(&ctx), cell_resolver, io, &path, result).await + }) + .await?; } if result.bad != 0 { Err(anyhow::anyhow!("Failed with {} mismatches", result.bad)) @@ -165,14 +168,14 @@ impl ServerCommandTemplate for FileStatusServerCommand { async fn check_file_status( file_ops: &dyn FileOps, cell_resolver: &CellResolver, - io: &FsIoProvider, + io: &dyn IoProvider, path: &ProjectRelativePath, result: &mut FileStatusResult, ) -> anyhow::Result<()> { result.checking(); let cell_path = cell_resolver.get_cell_path(path)?; - if file_ops.is_ignored(cell_path.as_ref()).await? { + if file_ops.is_ignored(cell_path.as_ref()).await?.is_ignored() { return Ok(()); } diff --git a/app/buck2_server/src/host_info.rs b/app/buck2_server/src/host_info.rs index 3daa19ba4b066..70f7ed301841d 100644 --- a/app/buck2_server/src/host_info.rs +++ b/app/buck2_server/src/host_info.rs @@ -65,7 +65,7 @@ pub fn get_host_info( { Ok(v) => v, Err(e) => { - soft_error!("invalid_xcode_version", e)?; + soft_error!("invalid_xcode_version", e.into())?; None } } diff --git a/app/buck2_server/src/lib.rs b/app/buck2_server/src/lib.rs index e265f72b11992..b100cc8c3ebbf 100644 --- a/app/buck2_server/src/lib.rs +++ b/app/buck2_server/src/lib.rs @@ -7,14 +7,13 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] #![feature(try_blocks)] #![feature(once_cell_try)] +#![feature(used_with_arg)] pub mod active_commands; -pub mod builtin_docs; mod clean_stale; -mod configs; mod ctx; pub mod daemon; mod dice_tracker; @@ -30,3 +29,4 @@ pub mod profile; mod snapshot; mod subscription; mod trace_io; +mod version_control_revision; diff --git a/app/buck2_server/src/lsp.rs b/app/buck2_server/src/lsp.rs index e2e0e16975d04..beda30cc2b6b4 100644 --- a/app/buck2_server/src/lsp.rs +++ b/app/buck2_server/src/lsp.rs @@ -8,39 +8,42 @@ */ use std::collections::HashMap; +use std::collections::HashSet; use std::future::Future; use std::io; use std::io::ErrorKind; use std::path::Path; use std::thread; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_cli_proto::*; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::file_ops::FileOps; -use buck2_common::package_listing::dice::HasPackageListingResolver; -use buck2_common::result::SharedResult; +use buck2_common::dice::file_ops::DiceFileComputations; +use buck2_common::package_listing::dice::DicePackageListingResolver; use buck2_core::bzl::ImportPath; use buck2_core::cells::build_file_cell::BuildFileCell; use buck2_core::cells::cell_path::CellPathRef; use buck2_core::cells::CellResolver; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::package::package_relative_path::PackageRelativePath; +use buck2_core::package::source_path::SourcePath; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_core::target::name::TargetName; use buck2_events::dispatch::span_async; use buck2_events::dispatch::with_dispatcher; use buck2_events::dispatch::with_dispatcher_async; +use buck2_interpreter::load_module::InterpreterCalculation; use buck2_interpreter::paths::bxl::BxlFilePath; use buck2_interpreter::paths::module::OwnedStarlarkModulePath; -use buck2_interpreter::paths::path::StarlarkPath; +use buck2_interpreter::prelude_path::prelude_path; use buck2_interpreter_for_build::interpreter::dice_calculation_delegate::HasCalculationDelegate; -use buck2_interpreter_for_build::interpreter::global_interpreter_state::HasGlobalInterpreterState; -use buck2_server_ctx::command_end::command_end; +use buck2_interpreter_for_build::interpreter::globals::base_globals; +use buck2_interpreter_for_build::interpreter::interpreter_for_cell::ParseData; +use buck2_server_ctx::commands::command_end; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; @@ -53,17 +56,13 @@ use futures::FutureExt; use futures::SinkExt; use futures::StreamExt; use gazebo::prelude::StrExt; -use itertools::Itertools; use lsp_server::Connection; use lsp_server::Message; use lsp_types::Url; use starlark::analysis::find_call_name::AstModuleFindCallName; use starlark::codemap::Span; -use starlark::docs::Doc; -use starlark::docs::DocItem; +use starlark::docs::markdown::render_doc_item_no_link; use starlark::docs::DocModule; -use starlark::docs::Identifier; -use starlark::docs::Location; use starlark::errors::EvalMessage; use starlark::syntax::AstModule; use starlark_lsp::error::eval_message_to_lsp_diagnostic; @@ -76,14 +75,8 @@ use tokio::runtime::Handle; use tokio::sync::Mutex; use tokio::sync::MutexGuard; -use crate::builtin_docs::docs::get_builtin_docs; -use crate::builtin_docs::docs::get_prelude_docs; - -static DOCS_DIRECTORY_KEY: &str = "directory"; -static DOCS_BUILTIN_KEY: &str = "builtin"; - /// Errors when [`LspContext::resolve_load()`] cannot resolve a given path. -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ResolveLoadError { /// The scheme provided was not correct or supported. #[error("Url `{}` was expected to be of type `{}`", .1, .0)] @@ -100,9 +93,9 @@ struct DocsCacheManager { } impl DocsCacheManager { - async fn new(fs: ProjectRoot, dice_ctx: DiceTransaction) -> anyhow::Result { + async fn new(fs: ProjectRoot, mut dice_ctx: DiceTransaction) -> anyhow::Result { Ok(Self { - docs_cache: Mutex::new(Self::new_docs_cache(&fs, &dice_ctx).await?), + docs_cache: Mutex::new(Self::new_docs_cache(&fs, &mut dice_ctx).await?), fs, valid_at: dice_ctx.equality_token(), }) @@ -110,7 +103,7 @@ impl DocsCacheManager { async fn get_cache( &self, - current_dice_ctx: DiceTransaction, + mut current_dice_ctx: DiceTransaction, ) -> anyhow::Result> { let mut docs_cache = self.docs_cache.lock().await; @@ -118,7 +111,7 @@ impl DocsCacheManager { match self.is_reusable(¤t_dice_ctx) { true => (), false => { - let new_docs_cache = Self::new_docs_cache(fs, ¤t_dice_ctx).await?; + let new_docs_cache = Self::new_docs_cache(fs, &mut current_dice_ctx).await?; *docs_cache = new_docs_cache } }; @@ -132,39 +125,53 @@ impl DocsCacheManager { async fn new_docs_cache<'v>( fs: &ProjectRoot, - dice_ctx: &DiceTransaction, + dice_ctx: &mut DiceTransaction, ) -> anyhow::Result { - fn flatten(docs: Vec) -> Vec { - docs.into_iter() - .flat_map(|x| match x.item { - DocItem::Module(module) => module - .members - .into_iter() - .map(|(name, v)| Doc { - id: Identifier { - name, - location: x.id.location.clone(), - }, - item: v.to_doc_item(), - custom_attrs: x.custom_attrs.clone(), - }) - .collect(), - DocItem::Object(_) => vec![], - _ => { - vec![x] - } - }) - .collect::>() - } + let mut builtin_docs = Vec::new(); let cell_resolver = dice_ctx.get_cell_resolver().await?; - let global_interpreter_state = dice_ctx.get_global_interpreter_state().await?; - let mut builtin_docs = flatten(get_builtin_docs(global_interpreter_state)?); - let builtin_names = builtin_docs.iter().map(|d| d.id.name.as_str()).collect(); - let prelude_docs = flatten(get_prelude_docs(dice_ctx, &builtin_names).await?); - builtin_docs.extend(prelude_docs); - DocsCache::new(&builtin_docs, dice_ctx, fs, &cell_resolver).await + builtin_docs.push((None, get_builtin_globals_docs())); + + let builtin_names = builtin_docs + .iter() + .flat_map(|(_, d)| d.members.keys().map(|s| s.as_str())) + .collect(); + if let Some((import_path, docs)) = get_prelude_docs(dice_ctx, &builtin_names).await? { + builtin_docs.push((Some(import_path), docs)); + } + DocsCache::new(&builtin_docs, fs, &cell_resolver).await + } +} + +fn get_builtin_globals_docs() -> DocModule { + base_globals().build().documentation() +} + +async fn get_prelude_docs( + ctx: &DiceTransaction, + existing_globals: &HashSet<&str>, +) -> anyhow::Result> { + let ctx = &mut ctx.clone(); + let cell_resolver = ctx.get_cell_resolver().await?; + let Some(prelude_path) = prelude_path(&cell_resolver)? else { + return Ok(None); + }; + let import_path = prelude_path.import_path(); + + let module = ctx.get_loaded_module_from_import_path(import_path).await?; + let frozen_module = module.env(); + let mut module_docs = frozen_module.documentation(); + + // For the prelude, we want to promote `native` symbol up one level + for (name, value) in module.extra_globals_from_prelude_for_buck_files()? { + if !existing_globals.contains(&name) && !module_docs.members.contains_key(name) { + let doc = value.to_value().documentation(); + + module_docs.members.insert(name.to_owned(), doc); + } } + + Ok(Some((import_path.clone(), module_docs))) } /// Store rendered starlark representations of Doc objects for builtin symbols, @@ -177,7 +184,7 @@ struct DocsCache { native_starlark_files: HashMap, } -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum DocsCacheError { #[error("Duplicate global symbol `{}` detected. Existing URL was `{}`, new URL was `{}`", .name, .existing, .new)] DuplicateGlobalSymbol { @@ -189,89 +196,77 @@ enum DocsCacheError { impl DocsCache { async fn get_prelude_uri( - location: &Location, - dice_ctx: &DiceTransaction, + location: &ImportPath, fs: &ProjectRoot, cell_resolver: &CellResolver, ) -> anyhow::Result { - // We strip the "@" off of the path normally to avoid having it in the docs, - // in paths, etc, but it needs to be put back in to create an import path correctly. - let path = if location.path.contains("//") - && !location.path.starts_with('@') - && !location.path.starts_with("//") - { - format!("@{}", location.path) - } else { - location.path.to_owned() - }; - - let bfc = BuildFileCell::new(cell_resolver.root_cell()); - let calculator = dice_ctx - .get_interpreter_calculator(cell_resolver.root_cell(), bfc) - .await?; - - let root_import_path = ImportPath::new_same_cell( - cell_resolver.get_cell_path(ProjectRelativePath::new("non_existent.bzl")?)?, - )?; - let starlark_file = StarlarkPath::LoadFile(&root_import_path); - let loaded_import_path = calculator.resolve_load(starlark_file, &path).await?; - - let relative_path = - cell_resolver.resolve_path(loaded_import_path.borrow().path().as_ref())?; + let relative_path = cell_resolver.resolve_path(location.path().as_ref())?; let abs_path = fs.resolve(&relative_path); Ok(Url::from_file_path(abs_path).unwrap().try_into()?) } async fn new( - builtin_symbols: &[Doc], - dice_ctx: &DiceTransaction, + builtin_symbols: &[(Option, DocModule)], fs: &ProjectRoot, cell_resolver: &CellResolver, ) -> anyhow::Result { Self::new_with_lookup(builtin_symbols, |location| async { - Self::get_prelude_uri(location, dice_ctx, fs, cell_resolver).await + Self::get_prelude_uri(location, fs, cell_resolver).await }) .await } async fn new_with_lookup< 'a, - F: Fn(&'a Location) -> Fut + 'a, + F: Fn(&'a ImportPath) -> Fut + 'a, Fut: Future>, >( - builtin_symbols: &'a [Doc], + builtin_symbols: &'a [(Option, DocModule)], location_lookup: F, ) -> anyhow::Result { let mut global_urls = HashMap::with_capacity(builtin_symbols.len()); - let mut native_starlark_files = HashMap::new(); - for doc in builtin_symbols { - let url = match &doc.id.location { - Some(l) => location_lookup(l).await?, - None => { - let filename = Path::new(&doc.id.name); - let filename = filename.with_extension(match filename.extension() { - None => "bzl".to_owned(), - Some(e) => format!("{}.bzl", e.to_str().expect("path is UTF-8")), - }); - let path = Path::new("/native") - .join(output_subdir_for_doc(doc)?.as_path()) - .join(filename); - - let url = - LspUrl::try_from(Url::parse(&format!("starlark:{}", path.display()))?)?; - - native_starlark_files.insert(url.clone(), doc.render_as_code()); - url - } - }; - if let Some(existing) = global_urls.insert(doc.id.name.clone(), url.clone()) { - return Err(DocsCacheError::DuplicateGlobalSymbol { - name: doc.id.name.clone(), + + let mut insert_global = |sym: String, url: LspUrl| { + if let Some(existing) = global_urls.insert(sym.clone(), url.clone()) { + Err(DocsCacheError::DuplicateGlobalSymbol { + name: sym, existing, new: url, } - .into()); + .into()) + } else { + anyhow::Ok(()) } + }; + + let mut native_starlark_files = HashMap::new(); + for (import_path, docs) in builtin_symbols { + match import_path { + Some(l) => { + let url = location_lookup(l).await?; + for (sym, _) in &docs.members { + insert_global(sym.clone(), url.clone())?; + } + } + None => { + for (sym, mem) in &docs.members { + let filename = Path::new(&sym); + let filename = filename.with_extension(match filename.extension() { + None => "bzl".to_owned(), + Some(e) => format!("{}.bzl", e.to_str().expect("path is UTF-8")), + }); + let path = Path::new("/native").join(filename); + + let url = + LspUrl::try_from(Url::parse(&format!("starlark:{}", path.display()))?)?; + let rendered = render_doc_item_no_link(sym, mem); + let prev = native_starlark_files.insert(url.clone(), rendered); + assert!(prev.is_none()); + + insert_global(sym.clone(), url)?; + } + } + }; } Ok(Self { global_urls, @@ -290,81 +285,6 @@ impl DocsCache { } } -#[derive(Debug, thiserror::Error)] -enum DocPathError { - #[error("Directory traversal was found in documentation path `{}` provided for `{}`", .path, .name)] - InvalidDirectory { - name: String, - path: String, - source: anyhow::Error, - }, - #[error("Invalid custom attributes were found on `{}`: {}", .name, format_custom_attr_error(.keys_and_values))] - InvalidCustomAttributes { - name: String, - keys_and_values: Vec<(String, String)>, - }, - #[error("Conflicting custom attributes were found on `{}`: {}", .name, format_custom_attr_error(.keys_and_values))] - ConflictingCustomAttributes { - name: String, - keys_and_values: Vec<(String, String)>, - }, -} - -fn format_custom_attr_error(keys_and_values: &[(String, String)]) -> String { - let mut ret = "{".to_owned(); - ret.push_str( - &keys_and_values - .iter() - .map(|(k, v)| format!("`{}` => `{}`", k, v)) - .join(", "), - ); - ret.push('}'); - ret -} - -/// Get the output subdirectory for a [`Doc`] based on the `directory` custom attr, if present. -pub fn output_subdir_for_doc(doc: &Doc) -> anyhow::Result { - let unknown_keys: Vec<_> = doc - .custom_attrs - .iter() - .filter(|(k, _)| *k != DOCS_DIRECTORY_KEY && *k != DOCS_BUILTIN_KEY) - .map(|(k, v)| (k.clone(), v.clone())) - .collect(); - if !unknown_keys.is_empty() { - return Err(DocPathError::InvalidCustomAttributes { - name: doc.id.name.to_owned(), - keys_and_values: unknown_keys, - } - .into()); - } - - match ( - doc.custom_attrs.get(DOCS_DIRECTORY_KEY), - doc.custom_attrs.get(DOCS_BUILTIN_KEY), - ) { - (Some(path), None) | (None, Some(path)) => { - match ForwardRelativePathBuf::new(path.to_owned()) { - Ok(fp) => Ok(fp), - Err(e) => Err(DocPathError::InvalidDirectory { - name: doc.id.name.to_owned(), - path: path.to_owned(), - source: e, - } - .into()), - } - } - (Some(dir), Some(builtin)) => Err(DocPathError::ConflictingCustomAttributes { - name: doc.id.name.to_owned(), - keys_and_values: vec![ - (DOCS_DIRECTORY_KEY.to_owned(), dir.clone()), - (DOCS_BUILTIN_KEY.to_owned(), builtin.clone()), - ], - } - .into()), - (None, None) => Ok(ForwardRelativePathBuf::new(String::new())?), - } -} - struct BuckLspContext<'a> { server_ctx: &'a dyn ServerCommandContextTrait, fs: ProjectRoot, @@ -372,7 +292,7 @@ struct BuckLspContext<'a> { runtime: Handle, } -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum BuckLspContextError { /// The scheme provided was not correct or supported. #[error("Url `{}` was expected to be of type `{}`", .1, .0)] @@ -384,7 +304,7 @@ impl<'a> BuckLspContext<'a> { server_ctx: &'a dyn ServerCommandContextTrait, ) -> anyhow::Result> { let (fs, docs_cache_manager) = server_ctx - .with_dice_ctx(async move |server_ctx, dice_ctx| { + .with_dice_ctx(|server_ctx, dice_ctx| async move { let fs = server_ctx.project_root().clone(); let docs_cache_manager = DocsCacheManager::new(fs.clone(), dice_ctx).await?; @@ -419,7 +339,7 @@ impl<'a> BuckLspContext<'a> { let abs_path = AbsPath::new(path)?; let relative_path = self.fs.relativize_any(abs_path)?; let cell_resolver = self - .with_dice_ctx(|dice_ctx| async move { dice_ctx.get_cell_resolver().await }) + .with_dice_ctx(|mut dice_ctx| async move { dice_ctx.get_cell_resolver().await }) .await?; let cell_path = cell_resolver.get_cell_path(&relative_path)?; @@ -445,7 +365,7 @@ impl<'a> BuckLspContext<'a> { // to get a ProjectRelativePath. We already guaranteed that things start with a '/' // (rooted from `starlark:`, see LspUrl), so just drop it. let cell_resolver = self - .with_dice_ctx(|dice_ctx| async move { dice_ctx.get_cell_resolver().await }) + .with_dice_ctx(|mut dice_ctx| async move { dice_ctx.get_cell_resolver().await }) .await?; let cell_path = cell_resolver.get_cell_path(&ProjectRelativePath::new( @@ -462,11 +382,27 @@ impl<'a> BuckLspContext<'a> { } } - async fn parse_file_with_contents( + async fn parse_file_with_contents(&self, uri: &LspUrl, content: String) -> LspEvalResult { + match self + .parse_file_from_contents_and_handle_diagnostic(uri, content) + .await + { + Ok(res) => res, + Err(e) => { + let message = EvalMessage::from_any_error(uri.path(), &e); + LspEvalResult { + diagnostics: vec![eval_message_to_lsp_diagnostic(message)], + ast: None, + } + } + } + } + + async fn parse_file_from_contents_and_handle_diagnostic( &self, uri: &LspUrl, content: String, - ) -> SharedResult { + ) -> anyhow::Result { let import_path: OwnedStarlarkModulePath = match uri { LspUrl::File(path) => self.import_path(path).await, LspUrl::Starlark(path) => self.starlark_import_path(path).await, @@ -477,24 +413,32 @@ impl<'a> BuckLspContext<'a> { .into()), }?; - Ok(self - .with_dice_ctx(|dice_ctx| async move { - let calculator = dice_ctx - .get_interpreter_calculator( - import_path.borrow().cell(), - import_path.borrow().build_file_cell(), - ) - .await?; - - let module_path = import_path.borrow(); - let path = module_path.starlark_path(); - let ast = calculator.prepare_eval_with_content(path, content)?; - Ok(LspEvalResult { - diagnostics: vec![], + self.with_dice_ctx(|mut dice_ctx| async move { + let calculator = dice_ctx + .get_interpreter_calculator( + import_path.borrow().cell(), + import_path.borrow().build_file_cell(), + ) + .await?; + + let module_path = import_path.borrow(); + let path = module_path.starlark_path(); + let parse_result = calculator.prepare_eval_with_content(path, content)?; + match parse_result { + Ok(ParseData(ast, _)) => Ok(LspEvalResult { + diagnostics: Vec::new(), ast: Some(ast), - }) - }) - .await?) + }), + Err(e) => { + let message = EvalMessage::from_error(uri.path(), &e.into()); + Ok(LspEvalResult { + diagnostics: vec![eval_message_to_lsp_diagnostic(message)], + ast: None, + }) + } + } + }) + .await } async fn parse_file_from_string( @@ -505,7 +449,7 @@ impl<'a> BuckLspContext<'a> { match ForwardRelativePath::new(literal) { Ok(package_relative) => { let cell_resolver = self - .with_dice_ctx(|dice_ctx| async move { dice_ctx.get_cell_resolver().await }) + .with_dice_ctx(|mut dice_ctx| async move { dice_ctx.get_cell_resolver().await }) .await?; let relative_path = cell_resolver.resolve_path(current_package.join(package_relative).as_ref())?; @@ -527,25 +471,37 @@ impl<'a> BuckLspContext<'a> { current_package: CellPathRef<'_>, literal: &str, ) -> anyhow::Result> { - let cell_resolver = self - .with_dice_ctx(|dice_ctx| async move { dice_ctx.get_cell_resolver().await }) + let (artifact_fs, cell_alias_resolver) = self + .with_dice_ctx(|mut dice_ctx| async move { + Ok(( + dice_ctx.get_artifact_fs().await?, + dice_ctx + .get_cell_alias_resolver(current_package.cell()) + .await?, + )) + }) .await?; + let cell_resolver = artifact_fs.cell_resolver(); match ParsedPattern::::parsed_opt_absolute( literal, Some(current_package), current_package.cell(), &cell_resolver, + &cell_alias_resolver, ) { Ok(ParsedPattern::Target(package, target, _)) => { let res = self - .with_dice_ctx(async move |dice_ctx| { - Ok(dice_ctx + .with_dice_ctx(|mut dice_ctx| async move { + Ok(DicePackageListingResolver(&mut dice_ctx) .resolve_package_listing(package.dupe()) .await .and_then(|listing| { - let relative_path = cell_resolver - .resolve_package(package.dupe())? - .join(listing.buildfile()); + // In the case of external cells, we need to actually materialize + // this thing on disk, so treat it like a source artifact + let buildfile: &PackageRelativePath = listing.buildfile().as_ref(); + let source_path = SourcePath::new(package.dupe(), buildfile.into()); + let relative_path = + artifact_fs.resolve_source(source_path.as_ref())?; let path = self.fs.resolve(&relative_path); match Url::from_file_path(path).unwrap().try_into() { Ok(url) => { @@ -580,16 +536,7 @@ impl<'a> LspContext for BuckLspContext<'a> { .block_on(with_dispatcher_async(dispatcher, async { match uri { LspUrl::File(_) | LspUrl::Starlark(_) => { - match self.parse_file_with_contents(uri, content).await { - Ok(result) => result, - Err(e) => { - let message = EvalMessage::from_anyhow(uri.path(), e.inner()); - LspEvalResult { - diagnostics: vec![eval_message_to_lsp_diagnostic(message)], - ast: None, - } - } - } + self.parse_file_with_contents(uri, content).await } _ => LspEvalResult::default(), } @@ -610,7 +557,7 @@ impl<'a> LspContext for BuckLspContext<'a> { let current_import_path = self.import_path(current_file).await?; let borrowed_current_import_path = current_import_path.borrow(); let url = self - .with_dice_ctx(async move |dice_ctx| { + .with_dice_ctx(|mut dice_ctx| async move { let calculator = dice_ctx .get_interpreter_calculator( borrowed_current_import_path.cell(), @@ -690,9 +637,9 @@ impl<'a> LspContext for BuckLspContext<'a> { LspUrl::File(path) => { let path = self.import_path(path).await?; - self.with_dice_ctx(async move |dice_ctx| { - match ::read_file( - &dice_ctx.file_ops(), + self.with_dice_ctx(|mut dice_ctx| async move { + match DiceFileComputations::read_file( + &mut dice_ctx, path.borrow().path().as_ref(), ) .await @@ -710,7 +657,7 @@ impl<'a> LspContext for BuckLspContext<'a> { } LspUrl::Starlark(_) => { let docs_cache = self - .with_dice_ctx(async move |dice_ctx| { + .with_dice_ctx(|dice_ctx| async move { self.docs_cache_manager.get_cache(dice_ctx).await }) .await?; @@ -765,9 +712,11 @@ pub(crate) async fn run_lsp_server_command( data: Some(buck2_data::LspCommandStart {}.into()), }; span_async(start_event, async move { - let result = run_lsp_server(ctx, partial_result_dispatcher, req).await; + let result = run_lsp_server(ctx, partial_result_dispatcher, req) + .await + .map_err(Into::into); let end_event = command_end(&result, buck2_data::LspCommandEnd {}); - (result, end_event) + (result.map_err(Into::into), end_event) }) .await } @@ -908,59 +857,65 @@ fn handle_outgoing_lsp_message( } #[cfg(test)] -mod test { +mod tests { + use buck2_core::bzl::ImportPath; use lsp_types::Url; - use maplit::hashmap; - use starlark::docs::Doc; use starlark::docs::DocFunction; use starlark::docs::DocItem; - use starlark::docs::Identifier; - use starlark::docs::Location; + use starlark::docs::DocMember; + use starlark::docs::DocModule; use starlark_lsp::server::LspUrl; use crate::lsp::DocsCache; - use crate::lsp::DOCS_DIRECTORY_KEY; #[test] fn cache_builds() -> anyhow::Result<()> { + const P: &str = "cell//foo:bar.bzl"; + let docs = vec![ - Doc { - id: Identifier { - name: "native_function1".to_owned(), - location: None, + ( + None, + DocModule { + docs: None, + members: [ + ( + "native_function1".to_owned(), + DocItem::Member(DocMember::Function(DocFunction::default())), + ), + ( + "native_function2".to_owned(), + DocItem::Member(DocMember::Function(DocFunction::default())), + ), + ] + .into_iter() + .collect(), }, - item: DocItem::Function(DocFunction::default()), - custom_attrs: Default::default(), - }, - Doc { - id: Identifier { - name: "native_function2".to_owned(), - location: None, + ), + ( + Some(ImportPath::testing_new(P)), + DocModule { + docs: None, + members: [( + "prelude_function".to_owned(), + DocItem::Member(DocMember::Function(DocFunction::default())), + )] + .into_iter() + .collect(), }, - item: DocItem::Function(DocFunction::default()), - custom_attrs: hashmap! { DOCS_DIRECTORY_KEY.to_owned() => "subdir".to_owned() }, - }, - Doc { - id: Identifier { - name: "prelude_function".to_owned(), - location: Some(Location { - path: "//dir/prelude.bzl".to_owned(), - }), - }, - item: DocItem::Function(DocFunction::default()), - custom_attrs: Default::default(), - }, + ), ]; let runtime = tokio::runtime::Builder::new_current_thread() .build() .unwrap(); - async fn lookup_function(location: &Location) -> anyhow::Result { - match location.path.as_str() { - "//dir/prelude.bzl" => Ok(LspUrl::try_from(Url::parse( + + async fn lookup_function(location: &ImportPath) -> anyhow::Result { + if location == &ImportPath::testing_new(P) { + Ok(LspUrl::try_from(Url::parse( "file:///usr/local/dir/prelude.bzl", - )?)?), - p => Err(anyhow::anyhow!("Unknown path {}", p)), + )?)?) + } else { + Err(anyhow::anyhow!("Unknown path {}", location)) } } @@ -972,7 +927,7 @@ mod test { cache.url_for_symbol("native_function1").unwrap() ); assert_eq!( - &LspUrl::try_from(Url::parse("starlark:/native/subdir/native_function2.bzl")?)?, + &LspUrl::try_from(Url::parse("starlark:/native/native_function2.bzl")?)?, cache.url_for_symbol("native_function2").unwrap() ); assert_eq!( diff --git a/app/buck2_server/src/materialize.rs b/app/buck2_server/src/materialize.rs index 4ecf379d332ef..dc9c0e9753556 100644 --- a/app/buck2_server/src/materialize.rs +++ b/app/buck2_server/src/materialize.rs @@ -12,7 +12,7 @@ use buck2_cli_proto::new_generic::MaterializeRequest; use buck2_cli_proto::new_generic::MaterializeResponse; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_events::dispatch::span_async; -use buck2_server_ctx::command_end::command_end; +use buck2_server_ctx::commands::command_end; use buck2_server_ctx::ctx::ServerCommandContextTrait; use crate::ctx::BaseServerCommandContext; @@ -30,9 +30,10 @@ pub(crate) async fn materialize_command( let result = materialize(&context.base_context, req.paths) .await .map(|()| MaterializeResponse {}) - .context("Failed to materialize paths"); + .context("Failed to materialize paths") + .map_err(Into::into); let end_event = command_end(&result, buck2_data::MaterializeCommandEnd {}); - (result, end_event) + (result.map_err(Into::into), end_event) }) .await } diff --git a/app/buck2_server/src/net_io.rs b/app/buck2_server/src/net_io.rs index c9ded95fe8a86..ff96e3aeaaf80 100644 --- a/app/buck2_server/src/net_io.rs +++ b/app/buck2_server/src/net_io.rs @@ -7,10 +7,18 @@ * of this source tree. */ +#[derive(Copy, Clone, Debug)] +pub enum NetworkKind { + WiFi, + Ethernet, + Unknown, +} + #[derive(Clone, Debug)] -pub struct Counters { +pub struct NetworkStat { pub bytes_sent: u64, pub bytes_recv: u64, + pub network_kind: NetworkKind, } #[cfg(any(target_os = "macos", target_os = "linux"))] @@ -44,18 +52,25 @@ mod collector { /// * If a new NIC appears between collection periods, we'll start keeping /// track of it. /// * If a NIC *disappears*, then we stop reporting on its stats. - pub fn collect(&self) -> anyhow::Result>> { + pub fn collect(&self) -> anyhow::Result>> { let mut collector = self.collector.lock().expect("poisoned lock"); let counters: HashMap<_, _> = collector .net_io_counters_pernic() .context("collecting old counters")? .into_iter() + .filter(|(s, _)| { + ["en", "eth", "wlan"] + .iter() + .any(|prefix| s.starts_with(prefix)) + }) .map(|(nic, counters)| { + let network_kind = NetworkKind::from_name(&nic); ( nic, - Counters { + NetworkStat { bytes_sent: counters.bytes_sent(), bytes_recv: counters.bytes_recv(), + network_kind, }, ) }) @@ -64,29 +79,156 @@ mod collector { Ok(Some(counters)) } } + + impl NetworkKind { + #[cfg(target_os = "macos")] + fn from_name(name: &str) -> NetworkKind { + match name { + // on macbook en0 is always WiFi + // TODO(yurysamkevich): properly detect device type using SCNetworkInterfaceGetInterfaceType Apple API + "en0" => NetworkKind::WiFi, + n if n.starts_with("en") => NetworkKind::Ethernet, + _ => NetworkKind::Unknown, + } + } + + #[cfg(target_os = "linux")] + fn from_name(name: &str) -> NetworkKind { + match name { + n if n.starts_with("eth") => NetworkKind::Ethernet, + n if n.starts_with("wlan") => NetworkKind::WiFi, + _ => NetworkKind::Unknown, + } + } + } } -// psutil network stats aren't implemented on windows or other unix-likes. -#[cfg(not(any(target_os = "macos", target_os = "linux")))] +#[cfg(target_os = "windows")] mod collector { use std::collections::HashMap; + use std::io::Error; + use std::slice::from_raw_parts; use dupe::Dupe; + use winapi::shared::ipifcons; + use winapi::shared::netioapi::FreeMibTable; + use winapi::shared::netioapi::GetIfTable2; + use winapi::shared::netioapi::MIB_IF_TABLE2; + use winapi::shared::ntdef::FALSE; + use winapi::shared::winerror::NO_ERROR; use super::*; #[derive(Clone, Debug, Dupe)] pub struct SystemNetworkIoCollector; + struct TableGuard { + table: *mut MIB_IF_TABLE2, + } + + impl Drop for TableGuard { + fn drop(&mut self) { + unsafe { FreeMibTable(self.table as *mut _) } + } + } + impl SystemNetworkIoCollector { pub fn new() -> Self { Self } - pub fn collect(&self) -> anyhow::Result>> { + pub fn collect(&self) -> anyhow::Result>> { + let mut counters = HashMap::new(); + let (_guard, entries) = unsafe { + let mut table: *mut MIB_IF_TABLE2 = std::ptr::null_mut(); + + if GetIfTable2(&mut table) != NO_ERROR { + return Err(anyhow::anyhow!( + "Failed to retrieve MIB-II interface table: {}", + Error::last_os_error() + )); + }; + + // Ensure the table gets freed + let _guard = TableGuard { table }; + + let num_entries = (*table).NumEntries; + let table_ptr = (*table).Table.as_ptr(); + (_guard, from_raw_parts(table_ptr, num_entries as usize)) + }; + + for entry in entries { + if entry.InterfaceAndOperStatusFlags.HardwareInterface() == FALSE { + continue; + } + + let name_len = entry + .Alias + .iter() + .position(|c| *c == 0) + .unwrap_or(entry.Alias.len()); + let interface_name = String::from_utf16(&entry.Alias[..name_len]) + .unwrap_or_else(|_| String::from("")); + let bytes_sent = entry.OutOctets; + let bytes_recv = entry.InOctets; + let network_kind = match entry.Type { + ipifcons::IF_TYPE_ETHERNET_CSMACD => NetworkKind::Ethernet, + ipifcons::IF_TYPE_IEEE80211 => NetworkKind::WiFi, + _ => NetworkKind::Unknown, + }; + counters.insert( + interface_name, + NetworkStat { + bytes_sent, + bytes_recv, + network_kind, + }, + ); + } + Ok(Some(counters)) + } + } +} + +// psutil network stats aren't implemented other unix-likes. +#[cfg(not(any(target_os = "macos", target_os = "linux", target_os = "windows")))] +mod collector { + use std::collections::HashMap; + + use dupe::Dupe; + + use super::*; + + #[derive(Clone, Debug, Dupe)] + pub struct SystemNetworkIoCollector; + + impl SystemNetworkIoCollector { + pub fn new() -> Self { + Self + } + + pub fn collect(&self) -> anyhow::Result>> { Ok(None) } } } pub use collector::SystemNetworkIoCollector; + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_network_collector() { + let collector = SystemNetworkIoCollector::new(); + let stat = collector.collect().unwrap().unwrap(); + assert!(!stat.is_empty()); + + let (recv, sent) = stat.iter().fold((0, 0), |acc, x| { + (acc.0 + x.1.bytes_recv, acc.0 + x.1.bytes_sent) + }); + assert!(recv > 0); + assert!(sent > 0); + } +} diff --git a/app/buck2_server/src/new_generic.rs b/app/buck2_server/src/new_generic.rs index 24120ca786d7d..c939e70b5453c 100644 --- a/app/buck2_server/src/new_generic.rs +++ b/app/buck2_server/src/new_generic.rs @@ -10,7 +10,10 @@ use anyhow::Context; use buck2_cli_proto::new_generic::NewGenericRequest; use buck2_cli_proto::new_generic::NewGenericResponse; -use buck2_server_ctx::other_server_commands::OTHER_SERVER_COMMANDS; +use buck2_server_ctx::late_bindings::DOCS_SERVER_COMMAND; +use buck2_server_ctx::late_bindings::OTHER_SERVER_COMMANDS; +use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use crate::ctx::ServerCommandContext; use crate::materialize::materialize_command; @@ -18,6 +21,7 @@ use crate::materialize::materialize_command; pub(crate) async fn new_generic_command( context: &ServerCommandContext<'_>, req: buck2_cli_proto::NewGenericRequestMessage, + partial_result_dispatcher: PartialResultDispatcher, ) -> anyhow::Result { let req = req.new_generic_request; let req: NewGenericRequest = @@ -26,9 +30,33 @@ pub(crate) async fn new_generic_command( NewGenericRequest::Materialize(m) => { NewGenericResponse::Materialize(materialize_command(context, m).await?) } + NewGenericRequest::Complete(e) => NewGenericResponse::Complete( + OTHER_SERVER_COMMANDS + .get()? + .complete(context, partial_result_dispatcher, e) + .await?, + ), NewGenericRequest::DebugEval(e) => NewGenericResponse::DebugEval( OTHER_SERVER_COMMANDS.get()?.debug_eval(context, e).await?, ), + NewGenericRequest::Explain(m) => NewGenericResponse::Explain( + OTHER_SERVER_COMMANDS + .get()? + .explain(context, partial_result_dispatcher, m) + .await?, + ), + NewGenericRequest::ExpandExternalCell(e) => NewGenericResponse::ExpandExternalCell( + OTHER_SERVER_COMMANDS + .get()? + .expand_external_cell(context, partial_result_dispatcher, e) + .await?, + ), + NewGenericRequest::Docs(d) => NewGenericResponse::Docs( + DOCS_SERVER_COMMAND + .get()? + .docs(context, partial_result_dispatcher, d) + .await?, + ), }; let resp = serde_json::to_string(&resp).context("Could not serialize `NewGenericResponse`")?; Ok(buck2_cli_proto::NewGenericResponseMessage { diff --git a/app/buck2_server/src/profile.rs b/app/buck2_server/src/profile.rs index 24e575c14becf..4cb3e5ddf4442 100644 --- a/app/buck2_server/src/profile.rs +++ b/app/buck2_server/src/profile.rs @@ -13,101 +13,94 @@ use std::sync::Arc; use anyhow::Context as _; use async_trait::async_trait; use buck2_analysis::analysis::calculation::profile_analysis; -use buck2_analysis::analysis::calculation::profile_analysis_recursively; use buck2_cli_proto::profile_request::ProfileOpts; use buck2_cli_proto::target_profile::Action; -use buck2_cli_proto::ClientContext; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::pattern::resolve::resolve_target_patterns; -use buck2_core::cells::build_file_cell::BuildFileCell; +use buck2_cli_proto::TargetCfg; +use buck2_common::pattern::parse_from_cli::parse_and_resolve_patterns_from_cli_args; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::PackageSpec; -use buck2_core::target::label::TargetLabel; -use buck2_interpreter::dice::starlark_profiler::StarlarkProfilerConfiguration; -use buck2_interpreter::starlark_profiler::StarlarkProfileDataAndStats; -use buck2_interpreter::starlark_profiler::StarlarkProfiler; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; -use buck2_interpreter_for_build::interpreter::dice_calculation_delegate::HasCalculationDelegate; -use buck2_node::target_calculation::ConfiguredTargetCalculation; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use buck2_futures::spawn::spawn_cancellable; +use buck2_interpreter::starlark_profiler::config::GetStarlarkProfilerInstrumentation; +use buck2_interpreter::starlark_profiler::config::StarlarkProfilerConfiguration; +use buck2_interpreter::starlark_profiler::data::StarlarkProfileDataAndStats; +use buck2_interpreter::starlark_profiler::mode::StarlarkProfileMode; +use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_profile::get_profile_response; use buck2_profile::starlark_profiler_configuration_from_request; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; +use buck2_server_ctx::pattern_parse_and_resolve::parse_and_resolve_patterns_to_targets_from_cli_args; +use buck2_server_ctx::target_resolution_config::TargetResolutionConfig; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use dice::DiceTransaction; use dupe::Dupe; use futures::future::FutureExt; -use more_futures::spawn::spawn_cancellable; async fn generate_profile_analysis( - ctx: DiceTransaction, - package: PackageLabel, - spec: PackageSpec, - global_target_platform: Option, + mut ctx: DiceTransaction, + server_ctx: &dyn ServerCommandContextTrait, + target_patterns: &[String], + target_resolution_config: TargetResolutionConfig, profile_mode: &StarlarkProfilerConfiguration, ) -> anyhow::Result> { - let (target, TargetPatternExtra) = match spec { - PackageSpec::Targets(targets) => one(targets).context("Invalid targets"), - PackageSpec::All => Err(anyhow::Error::msg("Cannot use a package")), - } - .context("Did not find exactly one target")?; - - let label = TargetLabel::new(package.dupe(), target.as_ref()); + let targets = parse_and_resolve_patterns_to_targets_from_cli_args::< + ConfiguredProvidersPatternExtra, + >(&mut ctx, target_patterns, server_ctx.working_dir()) + .await?; - let configured_target = ctx - .get_configured_target(&label, global_target_platform.as_ref()) + let target_resolution_config = &target_resolution_config; + let configured_targetss = ctx + .try_compute_join(targets, |ctx, label| { + async move { + target_resolution_config + .get_configured_target(ctx, &label.target_label) + .await + } + .boxed() + }) .await?; + let configured_targets: Vec = + configured_targetss.into_iter().flatten().collect(); + match profile_mode { - StarlarkProfilerConfiguration::ProfileLastAnalysis(profile_mode) => { - profile_analysis(&ctx, &configured_target, profile_mode) + StarlarkProfilerConfiguration::ProfileAnalysis(..) => { + profile_analysis(&mut ctx, &configured_targets) .await - .context("Analysis failed") - } - StarlarkProfilerConfiguration::ProfileAnalysisRecursively(_) => { - profile_analysis_recursively(&ctx, &configured_target) - .await - .context("Analysis failed") + .context("Recursive profile analysis failed") .map(Arc::new) } - _ => Err(anyhow::anyhow!("Incorrect profile mode (internal error)")), + _ => Err(internal_error_anyhow!("Incorrect profile mode")), } } async fn generate_profile_loading( ctx: &DiceTransaction, package: PackageLabel, - spec: PackageSpec, - profile_mode: &StarlarkProfilerConfiguration, ) -> anyhow::Result { - match spec { - PackageSpec::Targets(..) => { - return Err(anyhow::Error::msg("Must use a package")); + // Self-check. + let profile_mode = ctx.clone().get_profile_mode_for_loading(package).await?; + match profile_mode { + StarlarkProfileMode::None => { + return Err(internal_error_anyhow!("profile mode must be set in DICE")); } - PackageSpec::All => {} + StarlarkProfileMode::Profile(_) => {} } - let calculation = ctx - .get_interpreter_calculator(package.cell_name(), BuildFileCell::new(package.cell_name())) - .await?; - - let mut profiler = StarlarkProfiler::new(profile_mode.profile_last_loading()?.dupe(), false); + let eval_result = ctx.clone().get_interpreter_results(package).await?; - calculation - .eval_build_file( - package, - &mut StarlarkProfilerOrInstrumentation::for_profiler(&mut profiler), - ) - .await?; - - profiler.finish() + let starlark_profile = &eval_result + .starlark_profile + .as_ref() + .internal_error_anyhow("profile result must be set")?; + Ok(StarlarkProfileDataAndStats::downcast(&***starlark_profile)?.clone()) } pub async fn profile_command( @@ -137,7 +130,8 @@ impl ServerCommandTemplate for ProfileServerCommand { ) -> anyhow::Result { let output = AbsPath::new(Path::new(&self.req.destination_path))?; - let profile_mode = starlark_profiler_configuration_from_request(&self.req)?; + let profile_mode = + starlark_profiler_configuration_from_request(&self.req, server_ctx.project_root())?; match self .req @@ -149,23 +143,20 @@ impl ServerCommandTemplate for ProfileServerCommand { let action = buck2_cli_proto::target_profile::Action::from_i32(opts.action) .context("Invalid action")?; - let context = self - .req - .context - .as_ref() - .context("Missing client context")?; - let profile_data = generate_profile( server_ctx, ctx, - context, &opts.target_patterns, + opts.target_cfg + .as_ref() + .internal_error_anyhow("target_cfg not set")?, + &opts.target_universe, action, &profile_mode, ) .await?; - get_profile_response(profile_data, &self.req, output) + get_profile_response(profile_data, output) } _ => { return Err(anyhow::anyhow!( @@ -184,70 +175,54 @@ impl ServerCommandTemplate for ProfileServerCommand { async fn generate_profile( server_ctx: &dyn ServerCommandContextTrait, mut ctx: DiceTransaction, - client_ctx: &ClientContext, - target_patterns: &[buck2_data::TargetPattern], + target_patterns: &[String], + target_cfg: &TargetCfg, + target_universe: &[String], action: Action, profile_mode: &StarlarkProfilerConfiguration, ) -> anyhow::Result> { - let cells = ctx.get_cell_resolver().await?; - - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; - - let parsed_patterns = parse_patterns_from_cli_args::( - &mut ctx, - target_patterns, - server_ctx.working_dir(), - ) - .await?; - - let resolved = resolve_target_patterns(&cells, &parsed_patterns, &ctx.file_ops()).await?; + let target_resolution_config = + TargetResolutionConfig::from_args(&mut ctx, target_cfg, server_ctx, target_universe) + .await?; match action { Action::Analysis => { - let (package, spec) = one(resolved.specs) - .context("Error: profiling analysis requires exactly one target pattern")?; - generate_profile_analysis(ctx, package, spec, global_target_platform, profile_mode) - .await + generate_profile_analysis( + ctx, + server_ctx, + target_patterns, + target_resolution_config, + profile_mode, + ) + .await } Action::Loading => { + let resolved = parse_and_resolve_patterns_from_cli_args::( + &mut ctx, + &target_patterns, + server_ctx.working_dir(), + ) + .await?; + let ctx = &ctx; let ctx_data = ctx.per_transaction_data(); - let profiles = - futures::future::try_join_all(resolved.specs.into_iter().map(|(package, spec)| { - let profile_mode = profile_mode.dupe(); + let profiles = buck2_util::future::try_join_all(resolved.specs.into_iter().map( + |(package, _spec)| { let ctx = ctx.dupe(); spawn_cancellable( move |_cancel| { - async move { - generate_profile_loading(&ctx, package, spec, &profile_mode).await - } - .boxed() + async move { generate_profile_loading(&ctx, package).await }.boxed() }, &*ctx_data.spawner, ctx_data, ) .into_drop_cancel() - })) - .await?; - - // We expect that some profile modes cannot be merged here, so we only attempt to merge - // if > 1 profile. - if profiles.len() == 1 { - return Ok(Arc::new(profiles.into_iter().next().unwrap())); - } + }, + )) + .await?; StarlarkProfileDataAndStats::merge(profiles.iter()).map(Arc::new) } } } - -fn one(it: impl IntoIterator) -> anyhow::Result { - let mut it = it.into_iter(); - let val = it.next().context("No value found")?; - if it.next().is_some() { - return Err(anyhow::Error::msg("More than one value found")); - } - Ok(val) -} diff --git a/app/buck2_server/src/snapshot.rs b/app/buck2_server/src/snapshot.rs index 662ec8b9ed15c..9f151ac0a7d3b 100644 --- a/app/buck2_server/src/snapshot.rs +++ b/app/buck2_server/src/snapshot.rs @@ -11,7 +11,11 @@ use std::collections::HashMap; use std::sync::Arc; use anyhow::Context as _; +use buck2_core::fs::fs_util::disk_space_stats; +use buck2_core::fs::fs_util::DiskSpaceStats; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::io_counters::IoCounterKey; +use buck2_events::EventSinkStats; use buck2_execute::re::manager::ReConnectionManager; use buck2_util::process_stats::process_stats; use buck2_util::system_stats::UnixSystemStats; @@ -19,6 +23,7 @@ use dupe::Dupe; use crate::daemon::state::DaemonStateData; use crate::jemalloc_stats::get_allocator_stats; +use crate::net_io::NetworkKind; use crate::net_io::SystemNetworkIoCollector; /// Stores state handles necessary to produce snapshots. @@ -26,13 +31,15 @@ use crate::net_io::SystemNetworkIoCollector; pub struct SnapshotCollector { daemon: Arc, net_io_collector: SystemNetworkIoCollector, + buck_out_path: Arc, } impl SnapshotCollector { - pub fn new(daemon: Arc) -> SnapshotCollector { + pub fn new(daemon: Arc, buck_out_path: AbsNormPathBuf) -> SnapshotCollector { SnapshotCollector { daemon, net_io_collector: SystemNetworkIoCollector::new(), + buck_out_path: buck_out_path.into(), } } @@ -122,6 +129,26 @@ impl SnapshotCollector { snapshot.re_get_digest_expirations_finished_with_error = stats.get_digest_expirations.finished_with_error; + snapshot.zdb_download_queries = stats.download_stats.zdb.queries; + snapshot.zdb_download_bytes = stats.download_stats.zdb.bytes; + snapshot.zdb_upload_queries = stats.upload_stats.zdb.queries; + snapshot.zdb_upload_bytes = stats.upload_stats.zdb.bytes; + + snapshot.zgateway_download_queries = stats.download_stats.zgateway.queries; + snapshot.zgateway_download_bytes = stats.download_stats.zgateway.bytes; + snapshot.zgateway_upload_queries = stats.upload_stats.zgateway.queries; + snapshot.zgateway_upload_bytes = stats.upload_stats.zgateway.bytes; + + snapshot.manifold_download_queries = stats.download_stats.manifold.queries; + snapshot.manifold_download_bytes = stats.download_stats.manifold.bytes; + snapshot.manifold_upload_queries = stats.upload_stats.manifold.queries; + snapshot.manifold_upload_bytes = stats.upload_stats.manifold.bytes; + + snapshot.hedwig_download_queries = stats.download_stats.hedwig.queries; + snapshot.hedwig_download_bytes = stats.download_stats.hedwig.bytes; + snapshot.hedwig_upload_queries = stats.upload_stats.hedwig.queries; + snapshot.hedwig_upload_bytes = stats.upload_stats.hedwig.bytes; + Ok(()) } @@ -147,23 +174,39 @@ impl SnapshotCollector { } fn add_sink_metrics(&self, snapshot: &mut buck2_data::Snapshot) { - if let Some(metrics) = self - .daemon - .scribe_sink - .as_ref() - .and_then(|sink| sink.stats()) - { - snapshot.sink_successes = Some(metrics.successes); - snapshot.sink_failures = Some(metrics.failures); - snapshot.sink_buffer_depth = Some(metrics.buffered); - snapshot.sink_dropped = Some(metrics.dropped); + if let Some(metrics) = self.daemon.scribe_sink.as_ref().map(|sink| sink.stats()) { + let EventSinkStats { + successes, + failures_invalid_request, + failures_unauthorized, + failures_rate_limited, + failures_pushed_back, + failures_enqueue_failed, + failures_internal_error, + failures_timed_out, + failures_unknown, + buffered, + dropped, + bytes_written, + } = metrics; + snapshot.sink_successes = Some(successes); + snapshot.sink_failures = Some(metrics.failures()); + snapshot.sink_failures_invalid_request = Some(failures_invalid_request); + snapshot.sink_failures_unauthorized = Some(failures_unauthorized); + snapshot.sink_failures_rate_limited = Some(failures_rate_limited); + snapshot.sink_failures_pushed_back = Some(failures_pushed_back); + snapshot.sink_failures_enqueue_failed = Some(failures_enqueue_failed); + snapshot.sink_failures_internal_error = Some(failures_internal_error); + snapshot.sink_failures_timed_out = Some(failures_timed_out); + snapshot.sink_failures_unknown = Some(failures_unknown); + snapshot.sink_buffer_depth = Some(buffered); + snapshot.sink_dropped = Some(dropped); + snapshot.sink_bytes_written = Some(bytes_written); } } fn add_net_io_metrics(&self, snapshot: &mut buck2_data::Snapshot) { - if let Ok(Some(mut net_io_counters_per_nic)) = self.net_io_collector.collect() { - net_io_counters_per_nic - .retain(|k, _| ["en", "eth"].iter().any(|prefix| k.starts_with(prefix))); + if let Ok(Some(net_io_counters_per_nic)) = self.net_io_collector.collect() { snapshot.network_interface_stats = net_io_counters_per_nic .into_iter() .map(|(nic, counters)| { @@ -172,6 +215,13 @@ impl SnapshotCollector { buck2_data::NetworkInterfaceStats { tx_bytes: counters.bytes_sent, rx_bytes: counters.bytes_recv, + network_kind: match counters.network_kind { + NetworkKind::WiFi => buck2_data::NetworkKind::WiFi.into(), + NetworkKind::Ethernet => buck2_data::NetworkKind::Ethernet.into(), + NetworkKind::Unknown => { + buck2_data::NetworkKind::UnknownNetKind.into() + } + }, }, ) }) @@ -200,6 +250,14 @@ impl SnapshotCollector { snapshot.malloc_bytes_allocated = alloc_stats.bytes_allocated; } + if let Ok(DiskSpaceStats { + total_space, + free_space, + }) = disk_space_stats(&*self.buck_out_path) + { + snapshot.used_disk_space_bytes = Some(total_space - free_space); + } + if let Some(UnixSystemStats { load1, load5, diff --git a/app/buck2_server/src/subscription.rs b/app/buck2_server/src/subscription.rs index a34abfb6dee7f..3eac4332b7650 100644 --- a/app/buck2_server/src/subscription.rs +++ b/app/buck2_server/src/subscription.rs @@ -9,9 +9,10 @@ use std::time::Duration; -use anyhow::Context as _; +use anyhow::Context; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::span_async; -use buck2_server_ctx::command_end::command_end; +use buck2_server_ctx::commands::command_end; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use buck2_server_ctx::streaming_request_handler::StreamingRequestHandler; @@ -58,7 +59,7 @@ pub(crate) async fn run_subscription_server_command( message = req.message().fuse() => { use buck2_subscription_proto::subscription_request::Request; - match message?.request.context("Empty message")?.request.context("Empty request")? { + match message?.request.context("Empty message").input_anyhow()?.request.context("Empty request").input_anyhow()? { Request::Disconnect(disconnect) => { break disconnect; } @@ -107,9 +108,10 @@ pub(crate) async fn run_subscription_server_command( buck2_cli_proto::SubscriptionCommandResponse {} }; + let result = result.map_err(Into::into); let end_event = command_end(&result, buck2_data::SubscriptionCommandEnd {}); - (result, end_event) + (result.map_err(Into::into), end_event) }) .await } diff --git a/app/buck2_server/src/trace_io.rs b/app/buck2_server/src/trace_io.rs index 0c8e792c2f173..79cd1f47dca58 100644 --- a/app/buck2_server/src/trace_io.rs +++ b/app/buck2_server/src/trace_io.rs @@ -13,7 +13,7 @@ use buck2_cli_proto::trace_io_response; use buck2_common::file_ops::RawSymlink; use buck2_common::io::trace::TracingIoProvider; use buck2_events::dispatch::span_async; -use buck2_server_ctx::command_end::command_end; +use buck2_server_ctx::commands::command_end; use buck2_server_ctx::ctx::ServerCommandContextTrait; use crate::ctx::ServerCommandContext; @@ -27,12 +27,7 @@ pub(crate) async fn trace_io_command( data: Some(buck2_data::TraceIoCommandStart {}.into()), }; span_async(start_event, async move { - let tracing_provider = &context - .base_context - .daemon - .io - .as_any() - .downcast_ref::(); + let tracing_provider = TracingIoProvider::from_io(&*context.base_context.daemon.io); let respond_with_trace = matches!( req.read_state, Some(trace_io_request::ReadIoTracingState { with_trace: true }) @@ -54,10 +49,11 @@ pub(crate) async fn trace_io_command( relative_symlinks: Vec::new(), external_symlinks: Vec::new(), }), - }; + } + .map_err(Into::into); let end_event = command_end(&result, buck2_data::TraceIoCommandEnd {}); - (result, end_event) + (result.map_err(Into::into), end_event) }) .await } @@ -91,7 +87,11 @@ async fn build_response_with_trace( external_symlinks.push(trace_io_response::ExternalSymlink { link: link.at.to_string(), target: external.target_str().to_owned(), - remaining_path: external.remaining_path().map(|path| path.to_string()), + remaining_path: if external.remaining_path().is_empty() { + None + } else { + Some(external.remaining_path().to_string()) + }, }); } } diff --git a/app/buck2_server/src/version_control_revision.rs b/app/buck2_server/src/version_control_revision.rs new file mode 100644 index 0000000000000..221e9de630ee4 --- /dev/null +++ b/app/buck2_server/src/version_control_revision.rs @@ -0,0 +1,225 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::mem; +use std::process::Output; +use std::process::Stdio; + +use anyhow::Context; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::EventDispatcher; +use buck2_util::process::async_background_command; +use tokio::io::AsyncReadExt; +use tokio::process::Child; +use tokio::sync::OnceCell; + +/// Spawn tasks to collect version control information +/// and return a droppable handle that will cancel them on drop. +pub(crate) fn spawn_version_control_collector(dispatch: EventDispatcher) -> AbortOnDropHandle { + AbortOnDropHandle { + handle: tokio::spawn(async move { + let event = create_revision_data().await; + dispatch.instant_event(event); + }), + } +} + +/// Abort the underlying task on drop. +pub(crate) struct AbortOnDropHandle { + pub handle: tokio::task::JoinHandle<()>, +} + +impl Drop for AbortOnDropHandle { + fn drop(&mut self) { + self.handle.abort(); + } +} + +#[derive(Clone, Copy, Debug)] +enum RepoVcs { + Hg, + Git, + Unknown, +} + +/// A wrapper over a child process that will reap the child process on drop. +/// On Unix platforms, a child process becomes a zombie until it is reaped by its parent. +struct ProperlyReapedChild { + child: Option, +} + +impl ProperlyReapedChild { + async fn output(mut self) -> anyhow::Result { + let mut stdout = Vec::new(); + let mut stderr = Vec::new(); + let mut child = + mem::take(&mut self.child).internal_error_anyhow("child field must be set")?; + let mut stdout_pipe = child.stdout.take().context("stdout is not piped")?; + let mut stderr_pipe = child.stderr.take().context("stderr is not piped")?; + let (stdout_error, stderr_error, status) = tokio::join!( + stdout_pipe.read_to_end(&mut stdout), + stderr_pipe.read_to_end(&mut stderr), + child.wait(), + ); + + let result = match stdout_error.is_ok() || stderr_error.is_ok() { + true => Ok(Output { + status: status?, + stdout, + stderr, + }), + false => Err(internal_error_anyhow!("Failed to read stdout and stderr")), + }; + reap_child(child); + result + } +} + +impl Drop for ProperlyReapedChild { + fn drop(&mut self) { + if let Some(child) = mem::take(&mut self.child) { + reap_child(child); + } + } +} + +fn reap_on_drop_command(command: &str, args: &[&str]) -> anyhow::Result { + async_background_command(command) + .args(args) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true) + .spawn() + .map(|child| ProperlyReapedChild { child: Some(child) }) + .map_err(|e| e.into()) +} + +fn reap_child(mut child: Child) { + tokio::spawn(async move { + if let Some(child_id) = child.id() { + // If a child process has already exited, the child.id() is None. + tracing::warn!("Killed child process: {:?}", child_id); + } + drop(child.kill().await); + }); +} + +async fn create_revision_data() -> buck2_data::VersionControlRevision { + let mut revision = buck2_data::VersionControlRevision::default(); + match repo_type().await { + Ok(repo_vcs) => { + match repo_vcs { + RepoVcs::Hg => { + match add_hg_data(&mut revision).await { + Err(e) => { + revision.command_error = Some(e.to_string()); + } + Ok(_) => {} + }; + } + RepoVcs::Git => { + // TODO(rajneeshl): Implement the git data + } + RepoVcs::Unknown => { + revision.command_error = Some("Unknown repository type".to_owned()); + } + } + } + Err(e) => { + revision.command_error = Some(e.to_string()); + } + } + revision +} + +async fn add_hg_data(revision: &mut buck2_data::VersionControlRevision) -> anyhow::Result<()> { + // We fire 2 hg command in parallel: + // The `hg whereami` returns the full hash of the revision + // The `hg status` returns if there are any local changes + let whereami_command = reap_on_drop_command("hg", &["whereami"])?; + let status_command = reap_on_drop_command("hg", &["status"])?; + + let (whereami_output, status_output) = + tokio::join!(whereami_command.output(), status_command.output()); + + match whereami_output { + Ok(result) => { + if !result.status.success() { + revision.command_error = Some(format!( + "Command `hg whereami` failed with error code {}; stderr:\n{}", + result.status, + std::str::from_utf8(&result.stderr)? + )); + return Ok(()); + } + let stdout = std::str::from_utf8(&result.stdout)?.trim(); + if stdout.len() == 40 { + revision.hg_revision = Some(stdout.to_owned()); + } else { + revision.command_error = Some(format!("Unexpected revision : {}", stdout)); + } + } + Err(e) => { + revision.command_error = + Some(format!("Command `hg whereami` failed with error: {:?}", e)); + } + } + + match status_output { + Ok(result) => { + if !result.status.success() { + revision.command_error = Some(format!( + "Command `hg status` failed with error code {}; stderr:\n{}", + result.status, + std::str::from_utf8(&result.stderr)? + )); + return Ok(()); + } + revision.has_local_changes = + Some(!std::str::from_utf8(&result.stdout)?.trim().is_empty()); + return Ok(()); + } + Err(e) => { + revision.command_error = + Some(format!("Command `hg status` failed with error: {:?}", e)); + } + }; + Ok(()) +} + +async fn repo_type() -> anyhow::Result<&'static RepoVcs> { + static REPO_TYPE: OnceCell> = OnceCell::const_new(); + async fn repo_type_impl() -> anyhow::Result { + let (hg_output, git_output) = tokio::join!( + reap_on_drop_command("hg", &["root"])?.output(), + reap_on_drop_command("git", &["rev-parse", "--is-inside-work-tree"])?.output() + ); + + let is_hg = hg_output.map_or(false, |output| { + std::str::from_utf8(&output.stdout).map_or(false, |s| !s.trim().is_empty()) + }); + let is_git = git_output.map_or(false, |output| { + std::str::from_utf8(&output.stdout).map_or(false, |s| s.trim() == "true") + }); + + if is_hg { + Ok(RepoVcs::Hg) + } else if is_git { + Ok(RepoVcs::Git) + } else { + Ok(RepoVcs::Unknown) + } + } + REPO_TYPE + .get_or_init(repo_type_impl) + .await + .as_ref() + .map_err(anyhow::Error::msg) +} diff --git a/app/buck2_server_commands/BUCK b/app/buck2_server_commands/BUCK index 942838b9241db..7a6ab9b6feee8 100644 --- a/app/buck2_server_commands/BUCK +++ b/app/buck2_server_commands/BUCK @@ -1,19 +1,21 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") rust_library( name = "buck2_server_commands", - srcs = glob(["src/**/*.rs"]), + srcs = glob([ + "src/**/*.rs", + "src/commands/explain/*", + ]), deps = [ - "fbsource//third-party/blake3:blake3-rust", "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-recursion", "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:blake3", "fbsource//third-party/rust:chrono", - "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:flate2", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:indent_write", "fbsource//third-party/rust:itertools", @@ -23,30 +25,34 @@ rust_library( "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:siphasher", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tokio-stream", "fbsource//third-party/rust:tonic", "fbsource//third-party/rust:tracing", + "fbsource//third-party/rust:zstd", "//buck2/app/buck2_artifact:buck2_artifact", "//buck2/app/buck2_build_api:buck2_build_api", + "//buck2/app/buck2_certs:buck2_certs", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_directory:buck2_directory", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", + # @oss-disable: "//buck2/app/buck2_explain:buck2_explain", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_install_proto:buck2_install_proto", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_node:buck2_node", + "//buck2/app/buck2_profile:buck2_profile", "//buck2/app/buck2_query:buck2_query", "//buck2/app/buck2_server_ctx:buck2_server_ctx", "//buck2/app/buck2_util:buck2_util", - "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark_map:starlark_map", ], ) diff --git a/app/buck2_server_commands/Cargo.toml b/app/buck2_server_commands/Cargo.toml index e35428b84434e..9cf224101007f 100644 --- a/app/buck2_server_commands/Cargo.toml +++ b/app/buck2_server_commands/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Implementation of several Buck commands" +edition = "2021" +license = { workspace = true } name = "buck2_server_commands" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Implementation of several Buck commands" [dependencies] anyhow = { workspace = true } @@ -10,8 +12,8 @@ async-recursion = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } chrono = { workspace = true } -derivative = { workspace = true } derive_more = { workspace = true } +flate2 = { workspace = true } futures = { workspace = true } indent_write = { workspace = true } itertools = { workspace = true } @@ -21,32 +23,36 @@ regex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } siphasher = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } -tracing-subscriber = { workspace = true } +zstd = { workspace = true } dice = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } -allocative = { workspace = true } -more_futures = { workspace = true} starlark_map = { workspace = true } buck2_artifact = { workspace = true } buck2_build_api = { workspace = true } +buck2_certs = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_directory = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_execute = { workspace = true } +buck2_futures = { workspace = true } +buck2_install_proto = { workspace = true } buck2_interpreter = { workspace = true } buck2_node = { workspace = true } +buck2_profile = { workspace = true } buck2_query = { workspace = true } buck2_server_ctx = { workspace = true } -buck2_cli_proto = { workspace = true } buck2_util = { workspace = true } -buck2_install_proto = { workspace = true } -buck2_wrapper_common = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/app/buck2_server_commands/src/commands.rs b/app/buck2_server_commands/src/commands.rs new file mode 100644 index 0000000000000..4e2c7b191ea20 --- /dev/null +++ b/app/buck2_server_commands/src/commands.rs @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod build; +pub mod complete; +pub mod ctargets; +pub mod debug_eval; +pub mod expand_external_cell; +pub mod explain; +pub(crate) mod init_commands; +pub mod install; +pub mod query; +pub mod targets; +pub mod targets_show_outputs; diff --git a/app/buck2_server_commands/src/commands/build.rs b/app/buck2_server_commands/src/commands/build.rs new file mode 100644 index 0000000000000..2da9baf829595 --- /dev/null +++ b/app/buck2_server_commands/src/commands/build.rs @@ -0,0 +1,658 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io::BufWriter; +use std::io::Write; +use std::sync::Arc; + +use anyhow::Context as _; +use async_trait::async_trait; +use buck2_artifact::artifact::artifact_dump::ArtifactInfo; +use buck2_artifact::artifact::artifact_dump::ArtifactMetadataJson; +use buck2_artifact::artifact::artifact_dump::DirectoryInfo; +use buck2_artifact::artifact::artifact_dump::ExternalSymlinkInfo; +use buck2_artifact::artifact::artifact_dump::FileInfo; +use buck2_artifact::artifact::artifact_dump::SymlinkInfo; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; +use buck2_build_api::build; +use buck2_build_api::build::build_report::build_report_opts; +use buck2_build_api::build::build_report::generate_build_report; +use buck2_build_api::build::BuildEvent; +use buck2_build_api::build::BuildTargetResult; +use buck2_build_api::build::ConfiguredBuildEvent; +use buck2_build_api::build::HasCreateUnhashedSymlinkLock; +use buck2_build_api::build::ProviderArtifacts; +use buck2_build_api::build::ProvidersToBuild; +use buck2_build_api::materialize::MaterializationContext; +use buck2_cli_proto::build_request::build_providers::Action as BuildProviderAction; +use buck2_cli_proto::build_request::BuildProviders; +use buck2_cli_proto::build_request::Materializations; +use buck2_cli_proto::CommonBuildOptions; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_common::legacy_configs::dice::HasLegacyConfigs; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_common::pattern::resolve::ResolveTargetPatterns; +use buck2_common::pattern::resolve::ResolvedPattern; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::PackageSpec; +use buck2_core::pattern::pattern::ParsedPattern; +use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; +use buck2_core::pattern::pattern_type::ProvidersPatternExtra; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::provider::label::ProvidersName; +use buck2_core::target::label::label::TargetLabel; +use buck2_directory::directory::directory::Directory; +use buck2_directory::directory::directory_iterator::DirectoryIterator; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::console_message; +use buck2_events::dispatch::span_async; +use buck2_events::dispatch::span_async_simple; +use buck2_events::errors::create_error_report; +use buck2_execute::directory::ActionDirectoryBuilder; +use buck2_execute::directory::ActionDirectoryMember; +use buck2_node::configured_universe::CqueryUniverse; +use buck2_node::load_patterns::MissingTargetBehavior; +use buck2_node::nodes::frontend::TargetGraphCalculation; +use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::target_calculation::ConfiguredTargetCalculation; +use buck2_server_ctx::commands::send_target_cfg_event; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::target_resolution_config::TargetResolutionConfig; +use buck2_server_ctx::template::run_server_command; +use buck2_server_ctx::template::ServerCommandTemplate; +use dice::DiceTransaction; +use dice::LinearRecomputeDiceComputations; +use dupe::Dupe; +use futures::future::FutureExt; +use futures::stream::futures_unordered::FuturesUnordered; +use futures::stream::Stream; +use futures::stream::StreamExt; +use itertools::Either; +use itertools::Itertools; +use serde::ser::SerializeSeq; +use serde::ser::Serializer; + +use crate::commands::build::result_report::ResultReporter; +use crate::commands::build::result_report::ResultReporterOptions; +use crate::commands::build::unhashed_outputs::create_unhashed_outputs; + +#[allow(unused)] +mod result_report; +mod unhashed_outputs; + +pub(crate) async fn build_command( + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::BuildRequest, +) -> anyhow::Result { + run_server_command(BuildServerCommand { req }, ctx, partial_result_dispatcher).await +} + +struct BuildServerCommand { + req: buck2_cli_proto::BuildRequest, +} + +#[async_trait] +impl ServerCommandTemplate for BuildServerCommand { + type StartEvent = buck2_data::BuildCommandStart; + type EndEvent = buck2_data::BuildCommandEnd; + type Response = buck2_cli_proto::BuildResponse; + type PartialResult = NoPartialResult; + + fn end_event(&self, _response: &buck2_error::Result) -> Self::EndEvent { + buck2_data::BuildCommandEnd { + unresolved_target_patterns: self + .req + .target_patterns + .iter() + .map(|p| buck2_data::TargetPattern { value: p.clone() }) + .collect(), + } + } + + async fn command( + &self, + server_ctx: &dyn ServerCommandContextTrait, + _partial_result_dispatcher: PartialResultDispatcher, + ctx: DiceTransaction, + ) -> anyhow::Result { + build(server_ctx, ctx, &self.req).await + } + + fn is_success(&self, response: &Self::Response) -> bool { + response.errors.is_empty() + } + + fn additional_telemetry_errors( + &self, + response: &Self::Response, + ) -> Vec { + response.errors.clone() + } +} + +fn expect_build_opts(req: &buck2_cli_proto::BuildRequest) -> &CommonBuildOptions { + req.build_opts.as_ref().expect("should have build options") +} + +async fn dump_artifacts_to_file( + path: &str, + provider_artifacts: &[ProviderArtifacts], + artifact_fs: &ArtifactFs, +) -> anyhow::Result<()> { + let file = std::fs::File::create(path).context("Failed to create output hash file")?; + let writer = BufWriter::new(file); + let mut ser = serde_json::Serializer::new(writer); + let mut seq = ser + .serialize_seq(None) + .context("Failed to write vec to output hash file")?; + + let mut dir = ActionDirectoryBuilder::empty(); + for artifact in provider_artifacts { + artifact.values.add_to_directory(&mut dir, artifact_fs)?; + } + for (entry_path, entry) in dir.unordered_walk().with_paths() { + let info = match entry { + DirectoryEntry::Dir(_) => ArtifactInfo::Directory(DirectoryInfo {}), + DirectoryEntry::Leaf(ActionDirectoryMember::File(metadata)) => { + let cas_digest = metadata.digest.data(); + ArtifactInfo::File(FileInfo { + digest: cas_digest, + digest_kind: cas_digest.raw_digest().algorithm(), + is_exec: metadata.is_executable, + }) + } + DirectoryEntry::Leaf(ActionDirectoryMember::Symlink(symlink_target)) => { + ArtifactInfo::Symlink(SymlinkInfo { + symlink_rel_path: symlink_target.target(), + }) + } + DirectoryEntry::Leaf(ActionDirectoryMember::ExternalSymlink(external_symlink)) => { + ArtifactInfo::ExternalSymlink(ExternalSymlinkInfo { + target: external_symlink.target(), + remaining_path: if external_symlink.remaining_path().is_empty() { + None + } else { + Some(external_symlink.remaining_path()) + }, + }) + } + }; + + let artifact_meta_json = ArtifactMetadataJson { + path: &entry_path, + info, + }; + seq.serialize_element(&artifact_meta_json) + .context("Failed to write data to output hash file")?; + } + + seq.end() + .context("Failed to write vec end to output hash file")?; + ser.into_inner() + .flush() + .context("Failed to flush output hash file")?; + Ok(()) +} + +async fn build( + server_ctx: &dyn ServerCommandContextTrait, + mut ctx: DiceTransaction, + request: &buck2_cli_proto::BuildRequest, +) -> anyhow::Result { + let cwd = server_ctx.working_dir(); + + let build_opts = expect_build_opts(request); + + let cell_resolver = ctx.get_cell_resolver().await?; + + let parsed_patterns: Vec> = + parse_patterns_from_cli_args(&mut ctx, &request.target_patterns, cwd).await?; + server_ctx.log_target_pattern(&parsed_patterns); + + let resolved_pattern: ResolvedPattern = + ResolveTargetPatterns::resolve(&mut ctx, &parsed_patterns).await?; + + let target_resolution_config = TargetResolutionConfig::from_args( + &mut ctx, + request + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &request.target_universe, + ) + .await?; + + let build_providers = Arc::new(request.build_providers.clone().unwrap()); + + let final_artifact_materializations = + Materializations::from_i32(request.final_artifact_materializations) + .with_context(|| "Invalid final_artifact_materializations") + .unwrap(); + + let want_configured_graph_size = ctx + .parse_legacy_config_property( + cell_resolver.root_cell(), + BuckconfigKeyRef { + section: "buck2", + property: "log_configured_graph_size", + }, + ) + .await? + .unwrap_or_default(); + + let build_result = ctx + .with_linear_recompute(|ctx| async move { + build_targets( + &ctx, + resolved_pattern, + target_resolution_config, + build_providers, + &final_artifact_materializations.into(), + build_opts.fail_fast, + MissingTargetBehavior::from_skip(build_opts.skip_missing_targets), + build_opts.skip_incompatible_targets, + want_configured_graph_size, + ) + .await + }) + .await?; + + send_target_cfg_event( + server_ctx.events(), + build_result.configured.keys(), + &request.target_cfg, + ); + + process_build_result(server_ctx, ctx, request, build_result).await +} + +async fn process_build_result( + server_ctx: &dyn ServerCommandContextTrait, + mut ctx: DiceTransaction, + request: &buck2_cli_proto::BuildRequest, + build_result: BuildTargetResult, +) -> anyhow::Result { + let fs = server_ctx.project_root(); + let cwd = server_ctx.working_dir(); + + let build_opts = expect_build_opts(request); + let response_options = request.response_options.clone().unwrap_or_default(); + + let cell_resolver = ctx.get_cell_resolver().await?; + let artifact_fs = ctx.get_artifact_fs().await?; + + let result_reports = ResultReporter::convert( + &artifact_fs, + server_ctx.cert_state(), + ResultReporterOptions { + return_outputs: response_options.return_outputs, + return_default_other_outputs: response_options.return_default_other_outputs, + }, + &build_result, + ) + .await; + + let serialized_build_report = if build_opts.unstable_print_build_report { + let build_report_opts = build_report_opts(&mut ctx, &cell_resolver, build_opts).await?; + + generate_build_report( + build_report_opts, + &artifact_fs, + &cell_resolver, + fs, + cwd, + server_ctx.events().trace_id(), + &build_result.configured, + &build_result.other_errors, + )? + } else { + None + }; + + let mut provider_artifacts = Vec::new(); + for v in build_result.configured.into_values() { + // We omit skipped targets here. + let Some(v) = v else { continue }; + let mut outputs = v.outputs.into_iter().filter_map(|output| match output { + Ok(output) => Some(output), + _ => None, + }); + provider_artifacts.extend(&mut outputs); + } + + if let Some(output_hashes_file) = &request.output_hashes_file { + span_async_simple( + buck2_data::CreateOutputHashesFileStart {}, + async { + dump_artifacts_to_file(output_hashes_file, &provider_artifacts, &artifact_fs) + .await + .with_context(|| { + format!("Failed to write output hashes file to {output_hashes_file}",) + }) + }, + buck2_data::CreateOutputHashesFileEnd {}, + ) + .await?; + } + + let should_create_unhashed_links = ctx + .parse_legacy_config_property( + cell_resolver.root_cell(), + BuckconfigKeyRef { + section: "buck2", + property: "create_unhashed_links", + }, + ) + .await?; + + if should_create_unhashed_links.unwrap_or(false) { + span_async(buck2_data::CreateOutputSymlinksStart {}, async { + let lock = ctx + .per_transaction_data() + .get_create_unhashed_symlink_lock(); + let _guard = lock.lock().await; + let res = create_unhashed_outputs(provider_artifacts, &artifact_fs, fs); + + let created = match res.as_ref() { + Ok(n) => *n, + Err(..) => 0, + }; + (res, buck2_data::CreateOutputSymlinksEnd { created }) + }) + .await?; + } + + let build_targets = result_reports.build_targets; + let errors = result_reports + .build_errors + .errors + .iter() + .map(create_error_report) + .unique_by(|e| e.message.clone()) + .collect(); + + let project_root = server_ctx.project_root().to_string(); + + Ok(buck2_cli_proto::BuildResponse { + build_targets, + project_root, + serialized_build_report, + errors, + }) +} + +async fn build_targets( + ctx: &LinearRecomputeDiceComputations<'_>, + spec: ResolvedPattern, + target_resolution_config: TargetResolutionConfig, + build_providers: Arc, + materialization: &MaterializationContext, + fail_fast: bool, + missing_target_behavior: MissingTargetBehavior, + skip_incompatible_targets: bool, + want_configured_graph_size: bool, +) -> anyhow::Result { + let stream = match target_resolution_config { + TargetResolutionConfig::Default(global_cfg_options) => { + let spec = spec.convert_pattern().context( + "Targets with explicit configuration can only be built when the `--target-universe=` flag is provided", + )?; + build_targets_with_global_target_platform( + ctx, + spec, + global_cfg_options, + build_providers, + materialization, + missing_target_behavior, + skip_incompatible_targets, + want_configured_graph_size, + ) + .left_stream() + } + TargetResolutionConfig::Universe(universe) => build_targets_in_universe( + ctx, + spec, + universe, + build_providers, + materialization, + want_configured_graph_size, + ) + .map(BuildEvent::Configured) + .right_stream(), + }; + + BuildTargetResult::collect_stream(stream, fail_fast).await +} + +fn build_targets_in_universe<'a>( + ctx: &'a LinearRecomputeDiceComputations, + spec: ResolvedPattern, + universe: CqueryUniverse, + build_providers: Arc, + materialization: &'a MaterializationContext, + want_configured_graph_size: bool, +) -> impl Stream + Unpin + 'a { + let providers_to_build = build_providers_to_providers_to_build(&build_providers); + let provider_labels = universe.get_provider_labels(&spec); + provider_labels + .into_iter() + .map(|p| { + let providers_to_build = providers_to_build.clone(); + async move { + build::build_configured_label( + ctx, + materialization, + p, + &providers_to_build, + build::BuildConfiguredLabelOptions { + skippable: false, + want_configured_graph_size, + }, + ) + .await + } + }) + .collect::>() + .flatten_unordered(None) +} + +fn build_targets_with_global_target_platform<'a>( + ctx: &'a LinearRecomputeDiceComputations<'_>, + spec: ResolvedPattern, + global_cfg_options: GlobalCfgOptions, + build_providers: Arc, + materialization: &'a MaterializationContext, + missing_target_behavior: MissingTargetBehavior, + skip_incompatible_targets: bool, + want_configured_graph_size: bool, +) -> impl Stream + Unpin + 'a { + futures::stream::iter(spec.specs.into_iter().map(move |(package, spec)| { + build_targets_for_spec( + ctx, + spec, + package, + global_cfg_options.dupe(), + build_providers.dupe(), + materialization, + missing_target_behavior, + skip_incompatible_targets, + want_configured_graph_size, + ) + .boxed() + .flatten_stream() + })) + .flatten_unordered(None) +} + +struct TargetBuildSpec { + target: TargetNode, + providers: ProvidersName, + global_cfg_options: GlobalCfgOptions, + // Indicates whether this target was explicitly requested or not. If it's the result + // of something like `//foo/...` we can skip it (for example if it's incompatible with + // the target platform). + skippable: bool, + want_configured_graph_size: bool, +} + +fn build_providers_to_providers_to_build(build_providers: &BuildProviders) -> ProvidersToBuild { + let mut providers_to_build = ProvidersToBuild::default(); + + if build_providers.default_info != BuildProviderAction::Skip as i32 { + providers_to_build.default = true; + providers_to_build.default_other = true; + } + + if build_providers.test_info != BuildProviderAction::Skip as i32 { + providers_to_build.tests = true; + } + + if build_providers.run_info != BuildProviderAction::Skip as i32 { + providers_to_build.run = true; + } + + providers_to_build +} + +async fn build_targets_for_spec<'a>( + ctx: &'a LinearRecomputeDiceComputations<'_>, + spec: PackageSpec, + package: PackageLabel, + global_cfg_options: GlobalCfgOptions, + build_providers: Arc, + materialization: &'a MaterializationContext, + missing_target_behavior: MissingTargetBehavior, + skip_incompatible_targets: bool, + want_configured_graph_size: bool, +) -> impl Stream + 'a { + let skippable = match spec { + PackageSpec::Targets(..) => skip_incompatible_targets, + PackageSpec::All => true, + }; + + let res = match ctx.get().get_interpreter_results(package.dupe()).await { + Ok(res) => res, + Err(e) => { + let e: buck2_error::Error = e.into(); + // Try to associate the error to concrete targets, if possible + let targets = match spec { + PackageSpec::Targets(targets) => Either::Left( + targets + .into_iter() + .map(move |(t, providers)| { + ProvidersLabel::new( + TargetLabel::new(package.dupe(), t.as_ref()), + providers.providers, + ) + }) + .map(Some), + ), + PackageSpec::All => Either::Right(std::iter::once(None)), + }; + return futures::stream::iter(targets.into_iter().map(move |t| { + BuildEvent::OtherError { + label: t, + err: e.dupe(), + } + })) + .left_stream(); + } + }; + let (targets, missing) = res.apply_spec(spec); + let missing_target_stream = match (missing, missing_target_behavior) { + (Some(missing), MissingTargetBehavior::Fail) => { + let (first, rest) = missing.into_errors(); + futures::stream::iter(std::iter::once(first).chain(rest).map(|err| { + BuildEvent::OtherError { + label: Some(ProvidersLabel::new( + TargetLabel::new(err.package.dupe(), err.target.as_ref()), + ProvidersName::Default, + )), + err: err.into(), + } + })) + .left_stream() + } + (Some(missing), MissingTargetBehavior::Warn) => { + // TODO: This should be reported in the build report eventually. + console_message(missing.missing_targets_warning()); + futures::stream::empty().right_stream() + } + (None, _) => futures::stream::empty().right_stream(), + }; + + let todo_targets: Vec = targets + .into_iter() + .map(|((_target_name, extra), target)| TargetBuildSpec { + target, + providers: extra.providers, + global_cfg_options: global_cfg_options.dupe(), + skippable, + want_configured_graph_size, + }) + .collect(); + + let providers_to_build = build_providers_to_providers_to_build(&build_providers); + + todo_targets + .into_iter() + .map(|build_spec| { + let providers_to_build = providers_to_build.clone(); + async move { build_target(ctx, build_spec, &providers_to_build, materialization).await } + }) + .collect::>() + .flatten_unordered(None) + .chain(missing_target_stream) + .right_stream() +} + +async fn build_target<'a>( + ctx: &'a LinearRecomputeDiceComputations<'_>, + spec: TargetBuildSpec, + providers_to_build: &ProvidersToBuild, + materialization: &'a MaterializationContext, +) -> impl Stream + 'a { + let providers_label = ProvidersLabel::new(spec.target.label().dupe(), spec.providers); + let providers_label = match ctx + .get() + .get_configured_provider_label(&providers_label, &spec.global_cfg_options) + .await + { + Ok(l) => l, + Err(e) => { + return futures::stream::once(futures::future::ready(BuildEvent::OtherError { + label: Some(providers_label), + err: e.into(), + })) + .left_stream(); + } + }; + + build::build_configured_label( + ctx, + materialization, + providers_label, + providers_to_build, + build::BuildConfiguredLabelOptions { + skippable: spec.skippable, + want_configured_graph_size: spec.want_configured_graph_size, + }, + ) + .await + .map(BuildEvent::Configured) + .right_stream() +} diff --git a/app/buck2_server_commands/src/commands/build/mod.rs b/app/buck2_server_commands/src/commands/build/mod.rs deleted file mode 100644 index 1f2ba11c8bb6a..0000000000000 --- a/app/buck2_server_commands/src/commands/build/mod.rs +++ /dev/null @@ -1,565 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::BTreeMap; -use std::io::BufWriter; -use std::sync::Arc; - -use anyhow::Context as _; -use async_trait::async_trait; -use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; -use buck2_build_api::build; -use buck2_build_api::build::BuildEvent; -use buck2_build_api::build::BuildTargetResult; -use buck2_build_api::build::ConvertMaterializationContext; -use buck2_build_api::build::HasCreateUnhashedSymlinkLock; -use buck2_build_api::build::MaterializationContext; -use buck2_build_api::build::ProvidersToBuild; -use buck2_build_api::query::oneshot::QUERY_FRONTEND; -use buck2_cli_proto::build_request::build_providers::Action as BuildProviderAction; -use buck2_cli_proto::build_request::BuildProviders; -use buck2_cli_proto::build_request::Materializations; -use buck2_cli_proto::HasClientContext; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::legacy_configs::dice::HasLegacyConfigs; -use buck2_common::pattern::resolve::resolve_target_patterns; -use buck2_common::pattern::resolve::ResolvedPattern; -use buck2_core::fs::fs_util; -use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; -use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::pattern::PackageSpec; -use buck2_core::pattern::ParsedPattern; -use buck2_core::provider::label::ConfiguredProvidersLabel; -use buck2_core::provider::label::ProvidersLabel; -use buck2_core::provider::label::ProvidersName; -use buck2_core::target::label::TargetLabel; -use buck2_events::dispatch::console_message; -use buck2_events::dispatch::span_async; -use buck2_node::configured_universe::CqueryUniverse; -use buck2_node::load_patterns::MissingTargetBehavior; -use buck2_node::nodes::eval_result::EvaluationResult; -use buck2_node::nodes::frontend::TargetGraphCalculation; -use buck2_node::nodes::unconfigured::TargetNode; -use buck2_node::target_calculation::ConfiguredTargetCalculation; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; -use buck2_server_ctx::template::run_server_command; -use buck2_server_ctx::template::ServerCommandTemplate; -use dice::DiceComputations; -use dice::DiceTransaction; -use dupe::Dupe; -use futures::future::FutureExt; -use futures::future::TryFutureExt; -use futures::stream::futures_unordered::FuturesUnordered; -use futures::stream::Stream; -use futures::stream::StreamExt; -use itertools::Itertools; - -use crate::commands::build::results::build_report::BuildReportCollector; -use crate::commands::build::results::result_report::ResultReporter; -use crate::commands::build::results::result_report::ResultReporterOptions; -use crate::commands::build::results::BuildOwner; -use crate::commands::build::results::BuildResultCollector; -use crate::commands::build::unhashed_outputs::create_unhashed_outputs; - -mod results; -mod unhashed_outputs; - -pub(crate) async fn build_command( - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::BuildRequest, -) -> anyhow::Result { - run_server_command(BuildServerCommand { req }, ctx, partial_result_dispatcher).await -} - -struct BuildServerCommand { - req: buck2_cli_proto::BuildRequest, -} - -#[async_trait] -impl ServerCommandTemplate for BuildServerCommand { - type StartEvent = buck2_data::BuildCommandStart; - type EndEvent = buck2_data::BuildCommandEnd; - type Response = buck2_cli_proto::BuildResponse; - type PartialResult = NoPartialResult; - - fn end_event(&self, _response: &anyhow::Result) -> Self::EndEvent { - buck2_data::BuildCommandEnd { - unresolved_target_patterns: self.req.target_patterns.clone(), - } - } - - async fn command( - &self, - server_ctx: &dyn ServerCommandContextTrait, - _partial_result_dispatcher: PartialResultDispatcher, - ctx: DiceTransaction, - ) -> anyhow::Result { - build(server_ctx, ctx, &self.req).await - } - - fn is_success(&self, response: &Self::Response) -> bool { - response.error_messages.is_empty() - } -} - -enum TargetResolutionConfig { - /// Resolve using target platform. - Default(Option), - /// Resolve in the universe. - Universe(CqueryUniverse), -} - -async fn build( - server_ctx: &dyn ServerCommandContextTrait, - mut ctx: DiceTransaction, - request: &buck2_cli_proto::BuildRequest, -) -> anyhow::Result { - // TODO(nmj): Move build report printing logic out of here. - let fs = server_ctx.project_root(); - let cwd = server_ctx.working_dir(); - - let build_opts = request - .build_opts - .as_ref() - .expect("should have build options"); - - let cell_resolver = ctx.get_cell_resolver().await?; - - let client_ctx = request.client_context()?; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; - - let should_create_unhashed_links = ctx - .parse_legacy_config_property(cell_resolver.root_cell(), "buck2", "create_unhashed_links") - .await?; - - let want_configured_graph_size = ctx - .parse_legacy_config_property( - cell_resolver.root_cell(), - "buck2", - "log_configured_graph_size", - ) - .await? - .unwrap_or_default(); - - let parsed_patterns: Vec> = - parse_patterns_from_cli_args(&mut ctx, &request.target_patterns, cwd).await?; - server_ctx.log_target_pattern(&parsed_patterns); - - let resolved_pattern: ResolvedPattern = - resolve_target_patterns(&cell_resolver, &parsed_patterns, &ctx.file_ops()).await?; - - let target_resolution_config: TargetResolutionConfig = if request.target_universe.is_empty() { - TargetResolutionConfig::Default(global_target_platform) - } else { - TargetResolutionConfig::Universe( - QUERY_FRONTEND - .get()? - .universe_from_literals(&ctx, cwd, &request.target_universe, global_target_platform) - .await?, - ) - }; - - let artifact_fs = ctx.get_artifact_fs().await?; - let build_providers = Arc::new(request.build_providers.clone().unwrap()); - let response_options = request.response_options.clone().unwrap_or_default(); - - let mut result_collector = ResultReporter::new( - &artifact_fs, - ResultReporterOptions { - return_outputs: response_options.return_outputs, - return_default_other_outputs: response_options.return_default_other_outputs, - }, - ); - - let mut build_report_collector = if build_opts.unstable_print_build_report { - Some(BuildReportCollector::new( - server_ctx.events().trace_id(), - &artifact_fs, - server_ctx.project_root(), - ctx.parse_legacy_config_property( - cell_resolver.root_cell(), - "build_report", - "print_unconfigured_section", - ) - .await? - .unwrap_or(true), - ctx.parse_legacy_config_property( - cell_resolver.root_cell(), - "build_report", - "unstable_include_other_outputs", - ) - .await? - .unwrap_or(false), - )) - } else { - None - }; - - let mut result_collectors = vec![ - Some(&mut result_collector as &mut dyn BuildResultCollector), - build_report_collector - .as_mut() - .map(|v| v as &mut dyn BuildResultCollector), - ] - .into_iter() - .flatten() - .collect::>(); - - let final_artifact_materializations = - Materializations::from_i32(request.final_artifact_materializations) - .with_context(|| "Invalid final_artifact_materializations") - .unwrap(); - let materialization_context = - ConvertMaterializationContext::from(final_artifact_materializations); - - let mut provider_artifacts = Vec::new(); - for (k, v) in build_targets( - &ctx, - resolved_pattern, - target_resolution_config, - build_providers, - &materialization_context, - build_opts.fail_fast, - MissingTargetBehavior::from_skip(build_opts.skip_missing_targets), - build_opts.skip_incompatible_targets, - want_configured_graph_size, - ) - .await? - { - result_collectors.collect_result(&BuildOwner::Target(&k), &v); - let mut outputs = v.outputs.into_iter().filter_map(|output| match output { - Ok(output) => Some(output), - _ => None, - }); - provider_artifacts.extend(&mut outputs); - } - - if should_create_unhashed_links.unwrap_or(false) { - span_async(buck2_data::CreateOutputSymlinksStart {}, async { - let lock = ctx - .per_transaction_data() - .get_create_unhashed_symlink_lock(); - let _guard = lock.lock().await; - let res = create_unhashed_outputs(provider_artifacts, &artifact_fs, fs); - - let created = match res.as_ref() { - Ok(n) => *n, - Err(..) => 0, - }; - (res, buck2_data::CreateOutputSymlinksEnd { created }) - }) - .await?; - } - - let mut serialized_build_report = None; - if let Some(build_report_collector) = build_report_collector { - let report = build_report_collector.into_report(); - if !build_opts.unstable_build_report_filename.is_empty() { - let file = fs_util::create_file( - fs.resolve(cwd) - .as_abs_path() - .join(&build_opts.unstable_build_report_filename), - ) - .context("Error writing build report")?; - let mut file = BufWriter::new(file); - serde_json::to_writer_pretty(&mut file, &report)? - } else { - serialized_build_report = Some(serde_json::to_string(&report)?); - }; - } - - // TODO(nmj): The BuildResult / BuildResponse will eventually return all of the - // data back to the CLI client, and all build report generation will happen there. - // For now, we're going to be a little hacky to remove some stdout printing that - // used to exist here. - let (build_targets, error_messages) = match result_collector.results() { - Ok(targets) => (targets, Vec::new()), - Err(errors) => { - let error_strings = errors - .errors - .iter() - .map(|e| format!("{:#}", e)) - .unique() - .collect(); - (vec![], error_strings) - } - }; - - let project_root = server_ctx.project_root().to_string(); - - Ok(buck2_cli_proto::BuildResponse { - build_targets, - project_root, - serialized_build_report: serialized_build_report.unwrap_or_default(), - error_messages, - }) -} - -async fn build_targets( - ctx: &DiceComputations, - spec: ResolvedPattern, - target_resolution_config: TargetResolutionConfig, - build_providers: Arc, - materialization_context: &MaterializationContext, - fail_fast: bool, - missing_target_behavior: MissingTargetBehavior, - skip_incompatible_targets: bool, - want_configured_graph_size: bool, -) -> anyhow::Result> { - let stream = match target_resolution_config { - TargetResolutionConfig::Default(global_target_platform) => { - let spec = spec.convert_pattern().context( - "Cannot build with explicit configurations when universe is not specified", - )?; - build_targets_with_global_target_platform( - ctx, - spec, - global_target_platform, - build_providers, - materialization_context, - missing_target_behavior, - skip_incompatible_targets, - want_configured_graph_size, - ) - .left_stream() - } - TargetResolutionConfig::Universe(universe) => build_targets_in_universe( - ctx, - spec, - universe, - build_providers, - materialization_context, - want_configured_graph_size, - ) - .right_stream(), - }; - - // We omit skipped targets here. - let res = BuildTargetResult::collect_stream(stream, fail_fast) - .await? - .into_iter() - .filter_map(|(k, v)| Some((k, v?))) - .collect(); - - Ok(res) -} - -fn build_targets_in_universe<'a>( - ctx: &'a DiceComputations, - spec: ResolvedPattern, - universe: CqueryUniverse, - build_providers: Arc, - materialization_context: &'a MaterializationContext, - want_configured_graph_size: bool, -) -> impl Stream> + Unpin + 'a { - let providers_to_build = build_providers_to_providers_to_build(&build_providers); - let provider_labels = universe.get_provider_labels(&spec); - provider_labels - .into_iter() - .map(|p| { - let providers_to_build = providers_to_build.clone(); - async move { - let res = build::build_configured_label( - ctx, - materialization_context, - p, - &providers_to_build, - build::BuildConfiguredLabelOptions { - skippable: false, - want_configured_graph_size, - }, - ) - .await; - - match res { - Ok(stream) => stream.map(Ok).left_stream(), - Err(e) => futures::stream::once(futures::future::ready(Err(e))).right_stream(), - } - } - .boxed() - }) - .collect::>() - .flatten_unordered(None) -} - -fn build_targets_with_global_target_platform<'a>( - ctx: &'a DiceComputations, - spec: ResolvedPattern, - global_target_platform: Option, - build_providers: Arc, - materialization_context: &'a MaterializationContext, - missing_target_behavior: MissingTargetBehavior, - skip_incompatible_targets: bool, - want_configured_graph_size: bool, -) -> impl Stream> + Unpin + 'a { - spec.specs - .into_iter() - .map(|(package, spec)| { - let build_providers = build_providers.dupe(); - let global_target_platform = global_target_platform.dupe(); - async move { - let res = ctx.get_interpreter_results(package.dupe()).await?; - anyhow::Ok(build_targets_for_spec( - ctx, - spec, - global_target_platform, - res, - build_providers, - materialization_context, - missing_target_behavior, - skip_incompatible_targets, - want_configured_graph_size, - )) - } - }) - .collect::>() - .map(|res| match res { - Ok(stream) => stream.left_stream(), - Err(e) => futures::stream::once(futures::future::ready(Err(e))).right_stream(), - }) - .flatten_unordered(None) -} - -struct TargetBuildSpec { - target: TargetNode, - providers: ProvidersName, - global_target_platform: Option, - // Indicates whether this target was explicitly requested or not. If it's the result - // of something like `//foo/...` we can skip it (for example if it's incompatible with - // the target platform). - skippable: bool, - want_configured_graph_size: bool, -} - -fn build_providers_to_providers_to_build(build_providers: &BuildProviders) -> ProvidersToBuild { - let mut providers_to_build = ProvidersToBuild::default(); - - if build_providers.default_info != BuildProviderAction::Skip as i32 { - providers_to_build.default = true; - providers_to_build.default_other = true; - } - - if build_providers.test_info != BuildProviderAction::Skip as i32 { - providers_to_build.tests = true; - } - - if build_providers.run_info != BuildProviderAction::Skip as i32 { - providers_to_build.run = true; - } - - providers_to_build -} - -fn build_targets_for_spec<'a>( - ctx: &'a DiceComputations, - spec: PackageSpec, - global_target_platform: Option, - res: Arc, - build_providers: Arc, - materialization_context: &'a MaterializationContext, - missing_target_behavior: MissingTargetBehavior, - skip_incompatible_targets: bool, - want_configured_graph_size: bool, -) -> impl Stream> + Unpin + 'a { - async move { - let skippable = match spec { - PackageSpec::Targets(..) => skip_incompatible_targets, - PackageSpec::All => true, - }; - - let (targets, missing) = res.apply_spec(spec); - if let Some(missing) = missing { - match missing_target_behavior { - MissingTargetBehavior::Fail => { - return Err(missing.into_error()); - } - MissingTargetBehavior::Warn => { - // TODO: This should be reported in the build report eventually. - console_message(missing.missing_targets_warning()); - } - } - } - - let todo_targets: Vec = targets - .into_iter() - .map(|((_target_name, extra), target)| TargetBuildSpec { - target, - providers: extra.providers, - global_target_platform: global_target_platform.dupe(), - skippable, - want_configured_graph_size, - }) - .collect(); - - let providers_to_build = build_providers_to_providers_to_build(&build_providers); - - let stream = todo_targets - .into_iter() - .map(|build_spec| { - let providers_to_build = providers_to_build.clone(); - async move { - build_target( - ctx, - build_spec, - &providers_to_build, - materialization_context, - ) - .await - } - .boxed() - }) - .collect::>() - .flatten_unordered(None); - - anyhow::Ok(stream) - } - .boxed() - .try_flatten_stream() -} - -async fn build_target<'a>( - ctx: &'a DiceComputations, - spec: TargetBuildSpec, - providers_to_build: &ProvidersToBuild, - materialization_context: &MaterializationContext, -) -> impl Stream> + 'a { - let res = async { - let providers_label = ctx - .get_configured_provider_label( - &ProvidersLabel::new(spec.target.label().dupe(), spec.providers), - spec.global_target_platform.as_ref(), - ) - .await?; - - build::build_configured_label( - ctx, - materialization_context, - providers_label, - providers_to_build, - build::BuildConfiguredLabelOptions { - skippable: spec.skippable, - want_configured_graph_size: spec.want_configured_graph_size, - }, - ) - .await - } - .await; - - match res { - Ok(stream) => stream.map(Ok).left_stream(), - Err(e) => futures::stream::once(futures::future::ready(Err(e))).right_stream(), - } -} diff --git a/app/buck2_server_commands/src/commands/build/result_report.rs b/app/buck2_server_commands/src/commands/build/result_report.rs new file mode 100644 index 0000000000000..7a70ccb9dfd99 --- /dev/null +++ b/app/buck2_server_commands/src/commands/build/result_report.rs @@ -0,0 +1,198 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Processing and reporting the the results of the build + +use buck2_build_api::build::BuildProviderType; +use buck2_build_api::build::BuildTargetResult; +use buck2_build_api::build::ConfiguredBuildTargetResult; +use buck2_build_api::build::ProviderArtifacts; +use buck2_certs::validate::check_cert_state; +use buck2_certs::validate::CertState; +use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_core::fs::artifact_path_resolver::ArtifactFs; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_execute::artifact::artifact_dyn::ArtifactDyn; +use dupe::Dupe; +use starlark_map::small_map::SmallMap; + +mod proto { + pub use buck2_cli_proto::build_target::build_output::BuildOutputProviders; + pub use buck2_cli_proto::build_target::BuildOutput; + pub use buck2_cli_proto::BuildTarget; +} + +/// Simple container for multiple [`buck2_error::Error`]s +pub(crate) struct BuildErrors { + pub(crate) errors: Vec, +} + +#[derive(Copy, Clone, Dupe)] +pub(crate) struct ResultReporterOptions { + pub(crate) return_outputs: bool, + pub(crate) return_default_other_outputs: bool, +} + +/// Collects build results into a Result, buck2_error::Errors>. If any targets +/// fail, then the error case will be returned, otherwise a vec of all the successful results. +pub(crate) struct ResultReporter<'a> { + artifact_fs: &'a ArtifactFs, + options: ResultReporterOptions, + results: Vec, +} + +pub(crate) struct BuildTargetsAndErrors { + pub(crate) build_targets: Vec, + pub(crate) build_errors: BuildErrors, +} + +impl<'a> ResultReporter<'a> { + pub(crate) async fn convert( + artifact_fs: &'a ArtifactFs, + cert_state: CertState, + options: ResultReporterOptions, + build_result: &BuildTargetResult, + ) -> BuildTargetsAndErrors { + let mut out = Self { + artifact_fs, + options, + results: Vec::new(), + }; + + let mut non_action_errors = vec![]; + let mut action_errors = vec![]; + non_action_errors.extend(build_result.other_errors.values().flatten().cloned()); + + for (k, v) in &build_result.configured { + // We omit skipped targets here. + let Some(v) = v else { continue }; + non_action_errors.extend(v.errors.iter().cloned()); + action_errors.extend(v.outputs.iter().filter_map(|x| x.as_ref().err()).cloned()); + + out.collect_result(k, v); + } + + let mut error_list = if let Some(e) = non_action_errors.pop() { + // FIXME(JakobDegen): We'd like to return more than one error here, but we have + // to get better at error deduplication first + vec![e] + } else { + // FIXME: Only one non-action error or all action errors is returned currently + action_errors + }; + + if !error_list.is_empty() { + if let Some(e) = check_cert_state(cert_state).await { + error_list.push(e.into()); + } + } + + BuildTargetsAndErrors { + build_targets: out.results, + build_errors: BuildErrors { errors: error_list }, + } + } + + fn collect_result( + &mut self, + label: &ConfiguredProvidersLabel, + result: &ConfiguredBuildTargetResult, + ) { + let outputs = result + .outputs + .iter() + .filter_map(|output| output.as_ref().ok()); + + let artifacts = if self.options.return_outputs { + // NOTE: We use an SmallMap here to preserve the order the rule author wrote, all + // the while avoiding duplicates. + let mut artifacts = SmallMap::new(); + + for output in outputs { + let ProviderArtifacts { + values, + provider_type, + } = output; + + if !self.options.return_default_other_outputs + && matches!(provider_type, BuildProviderType::DefaultOther) + { + continue; + } + + for (artifact, _value) in values.iter() { + let entry = + artifacts + .entry(artifact) + .or_insert_with(|| proto::BuildOutputProviders { + default_info: false, + run_info: false, + other: false, + test_info: false, + }); + + match provider_type { + BuildProviderType::Default => { + entry.default_info = true; + } + BuildProviderType::DefaultOther => { + entry.other = true; + } + BuildProviderType::Run => { + entry.run_info = true; + } + BuildProviderType::Test => { + entry.test_info = true; + } + } + } + } + + let artifact_fs = &self.artifact_fs; + + // Write it this way because `.into_iter()` gets rust-analyzer confused + IntoIterator::into_iter(artifacts) + .map(|(a, providers)| proto::BuildOutput { + path: a.resolve_path(artifact_fs).unwrap().to_string(), + providers: Some(providers), + }) + .collect() + } else { + Vec::new() + }; + + let target = label.unconfigured().to_string(); + let configuration = label.cfg().to_string(); + + let configured_graph_size = match &result.configured_graph_size { + Some(Ok(MaybeCompatible::Compatible(v))) => Some(*v), + Some(Ok(MaybeCompatible::Incompatible(..))) => None, + Some(Err(e)) => { + // We don't expect an error on this unless something else on this target + // failed. + tracing::debug!( + "Graph size calculation error failed for {}: {:#}", + target, + e + ); + None + } + None => None, + }; + + self.results.push(proto::BuildTarget { + target, + configuration, + run_args: result.run_args.clone().unwrap_or_default(), + target_rule_type_name: result.target_rule_type_name.clone(), + outputs: artifacts, + configured_graph_size, + }) + } +} diff --git a/app/buck2_server_commands/src/commands/build/results.rs b/app/buck2_server_commands/src/commands/build/results.rs deleted file mode 100644 index 766d04786ad0d..0000000000000 --- a/app/buck2_server_commands/src/commands/build/results.rs +++ /dev/null @@ -1,448 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Processing and reporting the the results of the build - -use buck2_build_api::build::BuildTargetResult; -use buck2_core::provider::label::ConfiguredProvidersLabel; - -pub(crate) enum BuildOwner<'a> { - Target(&'a ConfiguredProvidersLabel), -} - -/// Collects the results of the build and processes it -pub(crate) trait BuildResultCollector: Send { - fn collect_result(&mut self, label: &BuildOwner, result: &BuildTargetResult); -} - -impl BuildResultCollector for Vec<&mut dyn BuildResultCollector> { - fn collect_result(&mut self, label: &BuildOwner, result: &BuildTargetResult) { - for collector in self { - collector.collect_result(label, result); - } - } -} - -pub mod result_report { - use buck2_build_api::build::BuildProviderType; - use buck2_build_api::build::BuildTargetResult; - use buck2_build_api::build::ProviderArtifacts; - use buck2_cli_proto::build_target::build_output::BuildOutputProviders; - use buck2_cli_proto::build_target::BuildOutput; - use buck2_cli_proto::BuildTarget; - use buck2_common::result::SharedError; - use buck2_core::configuration::compatibility::MaybeCompatible; - use buck2_core::fs::artifact_path_resolver::ArtifactFs; - use buck2_execute::artifact::artifact_dyn::ArtifactDyn; - use dupe::Dupe; - use starlark_map::small_map::SmallMap; - - use crate::commands::build::results::BuildOwner; - use crate::commands::build::results::BuildResultCollector; - - /// Simple container for multiple [`SharedError`]s - pub(crate) struct SharedErrors { - pub errors: Vec, - } - - #[derive(Copy, Clone, Dupe)] - pub(crate) struct ResultReporterOptions { - pub(crate) return_outputs: bool, - pub(crate) return_default_other_outputs: bool, - } - - pub(crate) struct ResultReporter<'a> { - artifact_fs: &'a ArtifactFs, - options: ResultReporterOptions, - results: Result, SharedErrors>, - } - - impl<'a> ResultReporter<'a> { - pub(crate) fn new(artifact_fs: &'a ArtifactFs, options: ResultReporterOptions) -> Self { - Self { - artifact_fs, - options, - results: Ok(Vec::new()), - } - } - - pub(crate) fn results(self) -> Result, SharedErrors> { - self.results - } - } - - impl<'a> BuildResultCollector for ResultReporter<'a> { - fn collect_result(&mut self, label: &BuildOwner, result: &BuildTargetResult) { - let outputs = result - .outputs - .iter() - .filter_map(|output| match output { - Ok(output) => Some(output), - Err(e) => { - match self.results.as_mut() { - Ok(..) => { - self.results = Err(SharedErrors { - errors: vec![e.dupe()], - }); - } - Err(errs) => errs.errors.push(e.dupe()), - }; - None - } - }) - .collect::>(); - - if let Ok(r) = &mut self.results { - let artifacts = if self.options.return_outputs { - // NOTE: We use an SmallMap here to preserve the order the rule author wrote, all - // the while avoiding duplicates. - let mut artifacts = SmallMap::new(); - - for output in outputs { - let ProviderArtifacts { - values, - provider_type, - } = output; - - if !self.options.return_default_other_outputs - && matches!(provider_type, BuildProviderType::DefaultOther) - { - continue; - } - - for (artifact, _value) in values.iter() { - let entry = - artifacts - .entry(artifact) - .or_insert_with(|| BuildOutputProviders { - default_info: false, - run_info: false, - other: false, - test_info: false, - }); - - match provider_type { - BuildProviderType::Default => { - entry.default_info = true; - } - BuildProviderType::DefaultOther => { - entry.other = true; - } - BuildProviderType::Run => { - entry.run_info = true; - } - BuildProviderType::Test => { - entry.test_info = true; - } - } - } - } - - let artifact_fs = &self.artifact_fs; - - artifacts - .into_iter() - .map(|(a, providers)| BuildOutput { - path: a.resolve_path(artifact_fs).unwrap().to_string(), - providers: Some(providers), - }) - .collect() - } else { - Vec::new() - }; - - let (target, configuration) = match label { - BuildOwner::Target(t) => (t.unconfigured().to_string(), t.cfg().to_string()), - }; - - let configured_graph_size = match &result.configured_graph_size { - Some(Ok(MaybeCompatible::Compatible(v))) => Some(*v), - Some(Ok(MaybeCompatible::Incompatible(..))) => None, - Some(Err(e)) => { - // We don't expect an error on this unless something else on this target - // failed. - tracing::debug!( - "Graph size calculation error failed for {}: {:#}", - target, - e - ); - None - } - None => None, - }; - - r.push(BuildTarget { - target, - configuration, - run_args: result.run_args.clone().unwrap_or_default(), - outputs: artifacts, - configured_graph_size, - }) - }; - } - } -} - -pub mod build_report { - use std::collections::HashMap; - - use buck2_build_api::build::BuildProviderType; - use buck2_core::configuration::compatibility::MaybeCompatible; - use buck2_core::configuration::data::ConfigurationData; - use buck2_core::fs::artifact_path_resolver::ArtifactFs; - use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; - use buck2_core::fs::project::ProjectRoot; - use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; - use buck2_core::provider::label::NonDefaultProvidersName; - use buck2_core::provider::label::ProvidersName; - use buck2_core::target::label::TargetLabel; - use buck2_execute::artifact::artifact_dyn::ArtifactDyn; - use buck2_wrapper_common::invocation_id::TraceId; - use derivative::Derivative; - use dupe::Dupe; - use itertools::Itertools; - use serde::Serialize; - use starlark_map::small_set::SmallSet; - - use crate::commands::build::results::BuildOwner; - use crate::commands::build::results::BuildResultCollector; - use crate::commands::build::BuildTargetResult; - - #[derive(Debug, Serialize)] - #[allow(clippy::upper_case_acronyms)] // We care about how they serialise - enum BuildOutcome { - SUCCESS, - FAIL, - #[allow(dead_code)] // Part of the spec, but not yet used - CANCELED, - } - - impl Default for BuildOutcome { - fn default() -> Self { - Self::SUCCESS - } - } - - #[derive(Debug, Serialize)] - pub(crate) struct BuildReport { - trace_id: TraceId, - success: bool, - results: HashMap, - failures: HashMap, - project_root: AbsNormPathBuf, - truncated: bool, - } - - #[derive(Default, Debug, Serialize)] - pub(crate) struct BuildReportEntry { - /// whether this particular target was successful - success: BuildOutcome, - /// a map of each subtarget of the current target (outputted as a `|` delimited list) to - /// the default exposed output of the subtarget - outputs: HashMap>, - /// a map of each subtarget of the current target (outputted as a `|` delimited list) to - /// the hidden, implicitly built outputs of the subtarget. There are multiple outputs - /// per subtarget - other_outputs: HashMap>, - /// The size of the graph for this target, if it was produced - configured_graph_size: Option, - } - - #[derive(Debug, Serialize)] - pub(crate) struct ConfiguredBuildReportEntry { - #[serde(flatten)] - #[serde(skip_serializing_if = "Option::is_none")] - compatible: Option, - - /// the configured entry - configured: HashMap, - } - - #[derive(Derivative, Serialize, Eq, PartialEq, Hash)] - #[derivative(Debug)] - #[serde(untagged)] - enum EntryLabel { - #[derivative(Debug = "transparent")] - Target(TargetLabel), - } - - pub(crate) struct BuildReportCollector<'a> { - trace_id: &'a TraceId, - artifact_fs: &'a ArtifactFs, - build_report_results: HashMap, - overall_success: bool, - project_root: &'a ProjectRoot, - include_unconfigured_section: bool, - include_other_outputs: bool, - } - - impl<'a> BuildReportCollector<'a> { - pub(crate) fn new( - trace_id: &'a TraceId, - artifact_fs: &'a ArtifactFs, - project_root: &'a ProjectRoot, - include_unconfigured_section: bool, - include_other_outputs: bool, - ) -> Self { - Self { - trace_id, - artifact_fs, - build_report_results: HashMap::new(), - overall_success: true, - project_root, - include_unconfigured_section, - include_other_outputs, - } - } - - pub(crate) fn into_report(self) -> BuildReport { - BuildReport { - trace_id: self.trace_id.dupe(), - success: self.overall_success, - results: self.build_report_results, - failures: HashMap::new(), - project_root: self.project_root.root().to_owned(), - // In buck1 we may truncate build report for a large number of targets. - // Setting this to false since we don't currently truncate buck2's build report. - truncated: false, - } - } - } - - impl<'a> BuildResultCollector for BuildReportCollector<'a> { - fn collect_result(&mut self, label: &BuildOwner, result: &BuildTargetResult) { - let (default_outs, other_outs, success) = { - let mut default_outs = SmallSet::new(); - let mut other_outs = SmallSet::new(); - let mut success = true; - - result.outputs.iter().for_each(|res| { - match res { - Ok(artifacts) => { - let mut is_default = false; - let mut is_other = false; - - match artifacts.provider_type { - BuildProviderType::Default => { - // as long as we have requested it as a default info, it should be - // considered a default output whether or not it also appears as an other - // non-main output - is_default = true; - } - BuildProviderType::DefaultOther - | BuildProviderType::Run - | BuildProviderType::Test => { - // as long as the output isn't the default, we add it to other outputs. - // This means that the same artifact may appear twice if its part of the - // default AND the other outputs, but this is intended as it accurately - // describes the type of the artifact - is_other = true; - } - } - - for (artifact, _value) in artifacts.values.iter() { - if is_default { - default_outs - .insert(artifact.resolve_path(self.artifact_fs).unwrap()); - } - - if is_other && self.include_other_outputs { - other_outs - .insert(artifact.resolve_path(self.artifact_fs).unwrap()); - } - } - } - Err(..) => success = false, - } - }); - - (default_outs, other_outs, success) - }; - - let report_results = self - .build_report_results - .entry(match label { - BuildOwner::Target(t) => EntryLabel::Target(t.unconfigured().target().dupe()), - }) - .or_insert_with(|| ConfiguredBuildReportEntry { - compatible: if self.include_unconfigured_section { - Some(BuildReportEntry::default()) - } else { - None - }, - configured: HashMap::new(), - }); - - let unconfigured_report = &mut report_results.compatible; - let configured_report = report_results - .configured - .entry(match label { - BuildOwner::Target(t) => t.cfg().dupe(), - }) - .or_insert_with(BuildReportEntry::default); - if !default_outs.is_empty() { - if let Some(report) = unconfigured_report { - report.outputs.insert( - report_providers_name(label), - default_outs.iter().cloned().collect(), - ); - } - - configured_report.outputs.insert( - report_providers_name(label), - default_outs.into_iter().collect(), - ); - } - if !other_outs.is_empty() { - if let Some(report) = unconfigured_report { - report.other_outputs.insert( - report_providers_name(label), - other_outs.iter().cloned().collect(), - ); - } - - configured_report.other_outputs.insert( - report_providers_name(label), - other_outs.into_iter().collect(), - ); - } - - if !success { - if let Some(report) = unconfigured_report { - report.success = BuildOutcome::FAIL; - } - configured_report.success = BuildOutcome::FAIL; - self.overall_success = false; - } - - if let Some(Ok(MaybeCompatible::Compatible(configured_graph_size))) = - result.configured_graph_size - { - if let Some(report) = unconfigured_report { - report.configured_graph_size = Some(configured_graph_size); - } - configured_report.configured_graph_size = Some(configured_graph_size); - } - } - } - - fn report_providers_name(label: &BuildOwner) -> String { - match label { - BuildOwner::Target(t) => match t.name() { - ProvidersName::Default => "DEFAULT".to_owned(), - ProvidersName::NonDefault(box NonDefaultProvidersName::Named(names)) => { - names.iter().join("|") - } - ProvidersName::NonDefault(box NonDefaultProvidersName::UnrecognizedFlavor(f)) => { - format!("#{}", f) - } - }, - } - } -} diff --git a/app/buck2_server_commands/src/commands/build/unhashed_outputs.rs b/app/buck2_server_commands/src/commands/build/unhashed_outputs.rs index 19a97fb5d307d..5c0fe787e98a3 100644 --- a/app/buck2_server_commands/src/commands/build/unhashed_outputs.rs +++ b/app/buck2_server_commands/src/commands/build/unhashed_outputs.rs @@ -149,7 +149,7 @@ fn iter_reverse_ancestors<'a>( } #[cfg(test)] -mod test { +mod tests { use super::*; #[test] diff --git a/app/buck2_server_commands/src/commands/complete.rs b/app/buck2_server_commands/src/commands/complete.rs new file mode 100644 index 0000000000000..ed2e9535b0715 --- /dev/null +++ b/app/buck2_server_commands/src/commands/complete.rs @@ -0,0 +1,84 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::new_generic::CompleteRequest; +use buck2_cli_proto::new_generic::CompleteResponse; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_core::pattern::pattern_type::TargetPatternExtra; +use buck2_node::load_patterns::load_patterns; +use buck2_node::load_patterns::MissingTargetBehavior; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::template::run_server_command; +use buck2_server_ctx::template::ServerCommandTemplate; +use dice::DiceTransaction; + +pub(crate) async fn complete_command( + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: CompleteRequest, +) -> anyhow::Result { + run_server_command( + CompleteServerCommand { req }, + ctx, + partial_result_dispatcher, + ) + .await +} + +struct CompleteServerCommand { + req: CompleteRequest, +} + +#[async_trait::async_trait] +impl ServerCommandTemplate for CompleteServerCommand { + type StartEvent = buck2_data::CompleteCommandStart; + type EndEvent = buck2_data::CompleteCommandEnd; + type Response = buck2_cli_proto::new_generic::CompleteResponse; + type PartialResult = NoPartialResult; + + async fn command( + &self, + server_ctx: &dyn ServerCommandContextTrait, + _partial_result_dispatcher: PartialResultDispatcher, + mut dice: DiceTransaction, + ) -> anyhow::Result { + let cwd = server_ctx.working_dir(); + let parsed_target_patterns = parse_patterns_from_cli_args::( + &mut dice, + &[self.req.partial_target.clone()], + cwd, + ) + .await?; + + let results = &load_patterns( + &mut dice, + parsed_target_patterns, + MissingTargetBehavior::Fail, + ) + .await?; + + let mut output: Vec = vec![]; + for node in results.iter_loaded_targets() { + output.push(format!("{}", node?.label())); + } + Ok(CompleteResponse { + completions: output, + }) + } + + fn is_success(&self, _response: &Self::Response) -> bool { + true + } + + fn exclusive_command_name(&self) -> Option { + Some("complete".to_owned()) + } +} diff --git a/app/buck2_server_commands/src/commands/ctargets.rs b/app/buck2_server_commands/src/commands/ctargets.rs new file mode 100644 index 0000000000000..65609af5bb49e --- /dev/null +++ b/app/buck2_server_commands/src/commands/ctargets.rs @@ -0,0 +1,122 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Write; +use std::iter; + +use async_trait::async_trait; +use buck2_build_api::configure_targets::load_compatible_patterns; +use buck2_cli_proto::ConfiguredTargetsRequest; +use buck2_cli_proto::ConfiguredTargetsResponse; +use buck2_cli_proto::HasClientContext; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_core::pattern::pattern_type::TargetPatternExtra; +use buck2_error::BuckErrorContext; +use buck2_node::load_patterns::MissingTargetBehavior; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; +use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::template::run_server_command; +use buck2_server_ctx::template::ServerCommandTemplate; +use dice::DiceTransaction; + +use crate::commands::targets::fmt::print_target_call_stack_after_target; + +pub(crate) async fn configured_targets_command( + server_ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: ConfiguredTargetsRequest, +) -> anyhow::Result { + run_server_command( + ConfiguredTargetsServerCommand { req }, + server_ctx, + partial_result_dispatcher, + ) + .await +} + +struct ConfiguredTargetsServerCommand { + req: ConfiguredTargetsRequest, +} + +#[async_trait] +impl ServerCommandTemplate for ConfiguredTargetsServerCommand { + type StartEvent = buck2_data::ConfiguredTargetsCommandStart; + type EndEvent = buck2_data::ConfiguredTargetsCommandEnd; + type Response = ConfiguredTargetsResponse; + type PartialResult = NoPartialResult; + + fn is_success(&self, response: &ConfiguredTargetsResponse) -> bool { + let ConfiguredTargetsResponse { + serialized_targets_output, + } = response; + let _ignore = serialized_targets_output; + true + } + + async fn command( + &self, + server_ctx: &dyn ServerCommandContextTrait, + _partial_result_dispatcher: PartialResultDispatcher, + mut ctx: DiceTransaction, + ) -> anyhow::Result { + // TODO(nga): this should accept `ConfiguredTargetPatternExtra`. And handle the universe. + let parsed_patterns = parse_patterns_from_cli_args::( + &mut ctx, + &self.req.target_patterns, + server_ctx.working_dir(), + ) + .await?; + + let client_ctx = self.req.client_context()?; + + let target_call_stacks = client_ctx.target_call_stacks; + + let global_cfg_options = global_cfg_options_from_client_context( + self.req + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut ctx, + ) + .await?; + + let skip_missing_targets = MissingTargetBehavior::from_skip(self.req.skip_missing_targets); + + let compatible_targets = load_compatible_patterns( + &mut ctx, + parsed_patterns, + &global_cfg_options, + skip_missing_targets, + ) + .await?; + + let mut serialized_targets_output = String::new(); + for node in compatible_targets.into_iter() { + // TODO(nga): we should probably get rid of forward nodes. + let nodes = iter::once(&node).chain(node.forward_target()); + + for node in nodes { + writeln!(serialized_targets_output, "{}", node.label())?; + if target_call_stacks { + print_target_call_stack_after_target( + &mut serialized_targets_output, + node.call_stack().as_deref(), + ); + } + } + } + + Ok(ConfiguredTargetsResponse { + serialized_targets_output, + }) + } +} diff --git a/app/buck2_server_commands/src/commands/ctargets/mod.rs b/app/buck2_server_commands/src/commands/ctargets/mod.rs deleted file mode 100644 index 2d89fbc1ccb32..0000000000000 --- a/app/buck2_server_commands/src/commands/ctargets/mod.rs +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt::Write; -use std::iter; - -use async_trait::async_trait; -use buck2_build_api::configure_targets::load_compatible_patterns; -use buck2_cli_proto::ConfiguredTargetsRequest; -use buck2_cli_proto::ConfiguredTargetsResponse; -use buck2_cli_proto::HasClientContext; -use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_node::load_patterns::MissingTargetBehavior; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; -use buck2_server_ctx::template::run_server_command; -use buck2_server_ctx::template::ServerCommandTemplate; -use dice::DiceTransaction; - -use crate::commands::targets::fmt::print_target_call_stack_after_target; - -pub(crate) async fn configured_targets_command( - server_ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: ConfiguredTargetsRequest, -) -> anyhow::Result { - run_server_command( - ConfiguredTargetsServerCommand { req }, - server_ctx, - partial_result_dispatcher, - ) - .await -} - -struct ConfiguredTargetsServerCommand { - req: ConfiguredTargetsRequest, -} - -#[async_trait] -impl ServerCommandTemplate for ConfiguredTargetsServerCommand { - type StartEvent = buck2_data::ConfiguredTargetsCommandStart; - type EndEvent = buck2_data::ConfiguredTargetsCommandEnd; - type Response = ConfiguredTargetsResponse; - type PartialResult = NoPartialResult; - - fn is_success(&self, response: &ConfiguredTargetsResponse) -> bool { - let ConfiguredTargetsResponse { - serialized_targets_output, - } = response; - let _ignore = serialized_targets_output; - true - } - - async fn command( - &self, - server_ctx: &dyn ServerCommandContextTrait, - _partial_result_dispatcher: PartialResultDispatcher, - mut ctx: DiceTransaction, - ) -> anyhow::Result { - // TODO(nga): this should accept `ConfiguredTargetPatternExtra`. And handle the universe. - let parsed_patterns = parse_patterns_from_cli_args::( - &mut ctx, - &self.req.target_patterns, - server_ctx.working_dir(), - ) - .await?; - - let client_ctx = self.req.client_context()?; - - let target_call_stacks = client_ctx.target_call_stacks; - - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; - - let skip_missing_targets = MissingTargetBehavior::from_skip(self.req.skip_missing_targets); - - let compatible_targets = load_compatible_patterns( - &ctx, - parsed_patterns, - global_target_platform, - skip_missing_targets, - ) - .await?; - - let mut serialized_targets_output = String::new(); - for node in compatible_targets.into_iter() { - // TODO(nga): we should probably get rid of forward nodes. - let nodes = iter::once(&node).chain(node.forward_target()); - - for node in nodes { - writeln!(serialized_targets_output, "{}", node.label())?; - if target_call_stacks { - print_target_call_stack_after_target( - &mut serialized_targets_output, - node.call_stack().as_deref(), - ); - } - } - } - - Ok(ConfiguredTargetsResponse { - serialized_targets_output, - }) - } -} diff --git a/app/buck2_server_commands/src/commands/debug_eval.rs b/app/buck2_server_commands/src/commands/debug_eval.rs index 2cc824018624a..d93acde470706 100644 --- a/app/buck2_server_commands/src/commands/debug_eval.rs +++ b/app/buck2_server_commands/src/commands/debug_eval.rs @@ -20,9 +20,8 @@ use buck2_interpreter::paths::bxl::BxlFilePath; use buck2_interpreter::paths::module::OwnedStarlarkModulePath; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::ctx::ServerCommandDiceContext; -use futures::future; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum DebugEvalError { #[error("Can only eval `.bzl` or `.bxl`, but got `{0}`")] InvalidImportPath(CellPath), @@ -33,10 +32,12 @@ pub(crate) async fn debug_eval_command( req: DebugEvalRequest, ) -> anyhow::Result { context - .with_dice_ctx(|server_ctx, ctx| async move { + .with_dice_ctx(|server_ctx, mut ctx| async move { let cell_resolver = ctx.get_cell_resolver().await?; let current_cell_path = cell_resolver.get_cell_path(server_ctx.working_dir())?; let mut loads = Vec::new(); + + let ctx = &ctx; for path in req.paths { let path = AbsPathBuf::new(path)?; let path = fs_util::canonicalize(&path)?; @@ -52,14 +53,12 @@ pub(crate) async fn debug_eval_command( } else { return Err(DebugEvalError::InvalidImportPath(path).into()); }; - loads.push(async { - let import_path = import_path; - ctx.get_loaded_module(import_path.borrow()).await - }); + loads + .push(async move { ctx.clone().get_loaded_module(import_path.borrow()).await }); } // Catch errors, ignore results. - future::try_join_all(loads).await?; + buck2_util::future::try_join_all(loads).await?; Ok(DebugEvalResponse {}) }) diff --git a/app/buck2_server_commands/src/commands/expand_external_cell.rs b/app/buck2_server_commands/src/commands/expand_external_cell.rs new file mode 100644 index 0000000000000..a71d3f2dd280e --- /dev/null +++ b/app/buck2_server_commands/src/commands/expand_external_cell.rs @@ -0,0 +1,86 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::new_generic::ExpandExternalCellRequest; +use buck2_cli_proto::new_generic::ExpandExternalCellResponse; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::external_cells::EXTERNAL_CELLS_IMPL; +use buck2_core::cells::name::CellName; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::template::run_server_command; +use buck2_server_ctx::template::ServerCommandTemplate; +use dice::DiceTransaction; +use dupe::Dupe; + +pub(crate) async fn expand_external_cell_command( + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: ExpandExternalCellRequest, +) -> anyhow::Result { + run_server_command( + ExpandExternalCellServerCommand { req }, + ctx, + partial_result_dispatcher, + ) + .await +} + +struct ExpandExternalCellServerCommand { + req: ExpandExternalCellRequest, +} + +#[derive(buck2_error::Error, Debug)] +enum ExpandExternalCellError { + #[error("Cell `{0}` is not an external cell")] + CellNotExternal(CellName), +} + +#[async_trait::async_trait] +impl ServerCommandTemplate for ExpandExternalCellServerCommand { + type StartEvent = buck2_data::ExpandExternalCellCommandStart; + type EndEvent = buck2_data::ExpandExternalCellCommandEnd; + type Response = buck2_cli_proto::new_generic::ExpandExternalCellResponse; + type PartialResult = NoPartialResult; + + async fn command( + &self, + server_ctx: &dyn ServerCommandContextTrait, + _partial_result_dispatcher: PartialResultDispatcher, + mut ctx: DiceTransaction, + ) -> anyhow::Result { + let res = ctx.get_cell_resolver().await?; + let cell = ctx + .get_cell_alias_resolver_for_dir(server_ctx.working_dir()) + .await? + .resolve(&self.req.cell_name)?; + + let instance = res.get(cell)?; + let Some(origin) = instance.external() else { + return Err(ExpandExternalCellError::CellNotExternal(cell).into()); + }; + EXTERNAL_CELLS_IMPL + .get()? + .expand(&mut ctx, cell, origin.dupe(), instance.path()) + .await?; + + Ok(ExpandExternalCellResponse { + path: instance.path().to_string(), + }) + } + + fn is_success(&self, _response: &Self::Response) -> bool { + true + } + + fn exclusive_command_name(&self) -> Option { + Some("expand-external-cell".to_owned()) + } +} diff --git a/app/buck2_server_commands/src/commands/explain.rs b/app/buck2_server_commands/src/commands/explain.rs new file mode 100644 index 0000000000000..68f69204527ff --- /dev/null +++ b/app/buck2_server_commands/src/commands/explain.rs @@ -0,0 +1,139 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::new_generic::ExplainRequest; +use buck2_cli_proto::new_generic::ExplainResponse; +use buck2_core::pattern::pattern_type::ConfiguredTargetPatternExtra; +use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; +use buck2_query::query::syntax::simple::eval::label_indexed::LabelIndexedSet; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::pattern_parse_and_resolve::parse_and_resolve_patterns_to_targets_from_cli_args; +use buck2_server_ctx::target_resolution_config::TargetResolutionConfig; +use buck2_server_ctx::template::run_server_command; +use buck2_server_ctx::template::ServerCommandTemplate; +use dice::DiceTransaction; +use dupe::Dupe; +use dupe::IterDupedExt; +use tonic::async_trait; + +pub(crate) async fn explain_command( + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: ExplainRequest, +) -> anyhow::Result { + run_server_command(ExplainServerCommand { req }, ctx, partial_result_dispatcher).await +} + +struct ExplainServerCommand { + req: ExplainRequest, +} + +#[async_trait] +impl ServerCommandTemplate for ExplainServerCommand { + type StartEvent = buck2_data::ExplainCommandStart; + type EndEvent = buck2_data::ExplainCommandEnd; + type Response = buck2_cli_proto::new_generic::ExplainResponse; + type PartialResult = NoPartialResult; + + async fn command( + &self, + server_ctx: &dyn ServerCommandContextTrait, + _partial_result_dispatcher: PartialResultDispatcher, + ctx: DiceTransaction, + ) -> anyhow::Result { + explain(server_ctx, ctx, &self.req).await + } + + fn is_success(&self, _response: &Self::Response) -> bool { + // No response if we failed. + true + } + + fn exclusive_command_name(&self) -> Option { + Some("explain".to_owned()) + } +} + +pub(crate) async fn explain( + server_ctx: &dyn ServerCommandContextTrait, + mut ctx: DiceTransaction, + req: &ExplainRequest, +) -> anyhow::Result { + let configured_target = { + // TODO iguridi: this is hacky + let target_pattern = parse_and_resolve_patterns_to_targets_from_cli_args::< + ConfiguredTargetPatternExtra, + >(&mut ctx, &[req.target.clone()], server_ctx.working_dir()) + .await?; + + let target_label = match target_pattern.as_slice() { + [p] => &p.target_label, + _ => { + return Err(anyhow::anyhow!( + "Expected exactly one target, got {}", + target_pattern.len() + )); + } + }; + + let target_resolution_config = TargetResolutionConfig::from_args( + &mut ctx, + &req.target_cfg, + server_ctx, + &req.target_universe, + ) + .await?; + + let configured_targets = target_resolution_config + .get_configured_target(&mut ctx, target_label) + .await?; + if configured_targets.len() != 1 { + return Err(anyhow::anyhow!( + "Expected exactly one target, got {}", + configured_targets.len() + )); + } + ctx.get_configured_target_node(&configured_targets[0]) + .await? + .require_compatible()? // TODO iguridi: not sure about this, make things simpler for now + }; + + let all_deps = { + let mut stack = vec![configured_target]; + let mut visited = LabelIndexedSet::new(); + while let Some(node) = stack.pop() { + if visited.insert(node.dupe()) { + stack.extend(node.deps().duped()); + } + } + visited.into_iter().collect::>() + }; + + // TODO iguridi: make it work for OSS + #[cfg(fbcode_build)] + { + buck2_explain::main( + all_deps, + req.output.as_ref(), + req.fbs_dump.as_ref(), + req.manifold_path.as_deref(), + ) + .await?; + } + #[cfg(not(fbcode_build))] + { + // just "using" unused variables + let _all_deps = all_deps; + } + + Ok(ExplainResponse {}) +} diff --git a/app/buck2_server_commands/src/commands/init_commands.rs b/app/buck2_server_commands/src/commands/init_commands.rs index 0cdf2daa2902a..23f428de1ec7a 100644 --- a/app/buck2_server_commands/src/commands/init_commands.rs +++ b/app/buck2_server_commands/src/commands/init_commands.rs @@ -8,17 +8,26 @@ */ use async_trait::async_trait; +use buck2_cli_proto::new_generic::CompleteRequest; +use buck2_cli_proto::new_generic::CompleteResponse; use buck2_cli_proto::new_generic::DebugEvalRequest; use buck2_cli_proto::new_generic::DebugEvalResponse; +use buck2_cli_proto::new_generic::ExpandExternalCellRequest; +use buck2_cli_proto::new_generic::ExpandExternalCellResponse; +use buck2_cli_proto::new_generic::ExplainRequest; +use buck2_cli_proto::new_generic::ExplainResponse; use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::other_server_commands::OtherServerCommands; -use buck2_server_ctx::other_server_commands::OTHER_SERVER_COMMANDS; +use buck2_server_ctx::late_bindings::OtherServerCommands; +use buck2_server_ctx::late_bindings::OTHER_SERVER_COMMANDS; use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use crate::commands::build::build_command; +use crate::commands::complete::complete_command; use crate::commands::ctargets::configured_targets_command; use crate::commands::debug_eval::debug_eval_command; +use crate::commands::expand_external_cell::expand_external_cell_command; +use crate::commands::explain::explain_command; use crate::commands::install::install_command; use crate::commands::query::aquery::aquery_command; use crate::commands::query::cquery::cquery_command; @@ -94,6 +103,16 @@ impl OtherServerCommands for OtherServerCommandsInstance { ) -> anyhow::Result { configured_targets_command(ctx, partial_result_dispatcher, req).await } + + async fn complete( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: CompleteRequest, + ) -> anyhow::Result { + complete_command(ctx, partial_result_dispatcher, req).await + } + async fn debug_eval( &self, ctx: &dyn ServerCommandContextTrait, @@ -101,6 +120,24 @@ impl OtherServerCommands for OtherServerCommandsInstance { ) -> anyhow::Result { debug_eval_command(ctx, req).await } + + async fn explain( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: ExplainRequest, + ) -> anyhow::Result { + explain_command(ctx, partial_result_dispatcher, req).await + } + + async fn expand_external_cell( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: ExpandExternalCellRequest, + ) -> anyhow::Result { + expand_external_cell_command(ctx, partial_result_dispatcher, req).await + } } pub(crate) fn init_other_server_commands() { diff --git a/app/buck2_server_commands/src/commands/install.rs b/app/buck2_server_commands/src/commands/install.rs index 8ec0ff47c15b0..fb240185f1cd7 100644 --- a/app/buck2_server_commands/src/commands/install.rs +++ b/app/buck2_server_commands/src/commands/install.rs @@ -15,6 +15,8 @@ use std::net::Ipv4Addr; use std::net::SocketAddr; use std::net::TcpListener; use std::process::Stdio; +use std::sync::Arc; +use std::time::Instant; use anyhow::Context; use async_trait::async_trait; @@ -22,80 +24,91 @@ use buck2_artifact::artifact::artifact_type::Artifact; use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::build::materialize_artifact_group; -use buck2_build_api::build::MaterializationContext; use buck2_build_api::context::HasBuildContextData; use buck2_build_api::interpreter::rule_defs::cmd_args::AbsCommandLineContext; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; use buck2_build_api::interpreter::rule_defs::provider::builtin::install_info::FrozenInstallInfo; use buck2_build_api::interpreter::rule_defs::provider::builtin::run_info::FrozenRunInfo; -use buck2_cli_proto::HasClientContext; +use buck2_build_api::materialize::materialize_artifact_group; +use buck2_build_api::materialize::MaterializationContext; +use buck2_build_api::validation::validation_impl::VALIDATION_IMPL; use buck2_cli_proto::InstallRequest; use buck2_cli_proto::InstallResponse; use buck2_common::client_utils::get_channel_tcp; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; use buck2_common::file_ops::FileDigest; -use buck2_common::pattern::resolve::resolve_target_patterns; -use buck2_core::directory::DirectoryEntry; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_common::pattern::resolve::ResolveTargetPatterns; use buck2_core::execution_types::executor_config::PathSeparatorKind; use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::PackageSpec; use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersName; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_core::target::name::TargetName; use buck2_data::InstallEventInfoEnd; use buck2_data::InstallEventInfoStart; +use buck2_directory::directory::entry::DirectoryEntry; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::get_dispatcher; use buck2_events::dispatch::span_async; +use buck2_events::dispatch::span_async_simple; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_execute::artifact::fs::ExecutorFs; use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::directory::ActionDirectoryMember; use buck2_install_proto::installer_client::InstallerClient; +use buck2_install_proto::DeviceMetadata; use buck2_install_proto::FileReadyRequest; use buck2_install_proto::InstallInfoRequest; use buck2_install_proto::ShutdownRequest; use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; +use buck2_util::future::try_join_all; use buck2_util::process::background_command; -use chrono::NaiveDateTime; +use chrono::DateTime; use chrono::Utc; use dice::DiceComputations; use dice::DiceTransaction; use dupe::Dupe; -use futures::future::try_join; -use futures::future::try_join_all; use futures::future::FutureExt; use futures::stream::StreamExt; use futures::stream::TryStreamExt; use starlark_map::small_map::SmallMap; use tokio::sync::mpsc; +use tokio::sync::Mutex; use tonic::transport::Channel; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] +#[buck2(tag = Install)] pub enum InstallError { + /// Input errors from installer definition #[error("Target {1}:{0} cannot be installed as it does not expose an InstallInfo provider")] + #[buck2(input)] NoInstallProvider(TargetName, PackageLabel), #[error("Installer target `{0}` doesn't expose RunInfo provider")] + #[buck2(input)] NoRunInfoProvider(TargetName), + /// Errors from external installer process, may represent infra errors or input errors (ex. no device). + /// Tagging as input errors in the absence of a way for installers to report infra errors. #[error( "Installer failed to process file ready request for `{install_id}`. Artifact: `{artifact}` located at `{path}`. Error message: `{err}`\n. More details can be found at `{installer_log}`" )] + #[buck2(input)] ProcessingFileReadyFailure { install_id: String, artifact: String, @@ -105,25 +118,28 @@ pub enum InstallError { }, #[error("Installer failed for `{install_id}` with `{err}`")] + #[buck2(input)] InternalInstallerFailure { install_id: String, err: String }, + /// Infra errors #[error("Communication with the installer failed with `{err}`")] + #[buck2(tier0)] InstallerCommunicationFailure { err: String }, #[error("Incorrect seconds/nanos argument")] + #[buck2(tier0)] NativeDateTime, } async fn get_installer_log_directory( server_ctx: &dyn ServerCommandContextTrait, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result { let out_path = ctx.get_buck_out_path().await?; let filesystem = server_ctx.project_root(); - let buck_out_path = out_path + let buck_out_path = filesystem .root() - .as_forward_relative_path() - .resolve(filesystem.root()); + .join(out_path.root().as_forward_relative_path()); let install_log_dir = buck_out_path.join(ForwardRelativePathBuf::unchecked_new( "installer".to_owned(), )); @@ -150,9 +166,14 @@ impl ServerCommandTemplate for InstallServerCommand { type Response = InstallResponse; type PartialResult = NoPartialResult; - fn end_event(&self, _response: &anyhow::Result) -> Self::EndEvent { + fn end_event(&self, _response: &buck2_error::Result) -> Self::EndEvent { buck2_data::InstallCommandEnd { - unresolved_target_patterns: self.req.target_patterns.clone(), + unresolved_target_patterns: self + .req + .target_patterns + .iter() + .map(|p| buck2_data::TargetPattern { value: p.clone() }) + .collect(), } } @@ -171,33 +192,76 @@ impl ServerCommandTemplate for InstallServerCommand { } } +struct InstallRequestData<'a> { + installer_label: ConfiguredProvidersLabel, + installed_targets: Vec<(ConfiguredTargetLabel, SmallMap<&'a str, Artifact>)>, +} + +fn install_id(installed_target: &ConfiguredTargetLabel) -> String { + format!("{}", installed_target) +} + async fn install( server_ctx: &dyn ServerCommandContextTrait, mut ctx: DiceTransaction, request: &InstallRequest, ) -> anyhow::Result { - let cwd = server_ctx.working_dir(); + let install_request_data_vec = + collect_install_request_data(server_ctx, &mut ctx, request).await?; + + let install_log_dir = &get_installer_log_directory(server_ctx, &mut ctx).await?; + + let install_requests = install_request_data_vec.into_iter().map(|data| { + let installer_run_args = &request.installer_run_args; + DiceComputations::declare_closure(move |ctx| { + async move { + handle_install_request( + ctx, + install_log_dir, + &data, + installer_run_args, + request.installer_debug, + ) + .await + } + .boxed() + }) + }); + + let install_requests = ctx.compute_many(install_requests); + try_join_all(install_requests) + .await + .context("Interaction with installer failed.")?; - let cell_resolver = ctx.get_cell_resolver().await?; + Ok(InstallResponse {}) +} - let client_ctx = request.client_context()?; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; +async fn collect_install_request_data<'a>( + server_ctx: &dyn ServerCommandContextTrait, + ctx: &mut DiceTransaction, + request: &InstallRequest, +) -> anyhow::Result>> { + let cwd = server_ctx.working_dir(); - let materializations = MaterializationContext::force_materializations(); - let materializations = &materializations; // Don't move this below. + let global_cfg_options = global_cfg_options_from_client_context( + request + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + ctx, + ) + .await?; // Note does not return the providers let parsed_patterns = parse_patterns_from_cli_args::( - &mut ctx, + ctx, &request.target_patterns, cwd, ) .await?; server_ctx.log_target_pattern(&parsed_patterns); - - let resolved_pattern = - resolve_target_patterns(&cell_resolver, &parsed_patterns, &ctx.file_ops()).await?; + let resolved_pattern = ResolveTargetPatterns::resolve(ctx, &parsed_patterns).await?; let resolved_pattern = resolved_pattern .convert_pattern() @@ -205,10 +269,9 @@ async fn install( let mut installer_to_files_map = HashMap::new(); for (package, spec) in resolved_pattern.specs { - let ctx = &ctx; let targets: Vec<(TargetName, ProvidersPatternExtra)> = match spec { - buck2_core::pattern::PackageSpec::Targets(targets) => targets, - buck2_core::pattern::PackageSpec::All => { + PackageSpec::Targets(targets) => targets, + PackageSpec::All => { let interpreter_results = ctx.get_interpreter_results(package.dupe()).await?; interpreter_results .targets() @@ -227,7 +290,7 @@ async fn install( for (target_name, providers) in targets { let label = providers.into_providers_label(package.dupe(), target_name.as_ref()); let providers_label = ctx - .get_configured_provider_label(&label, global_target_platform.dupe().as_ref()) + .get_configured_provider_label(&label, &global_cfg_options) .await?; let frozen_providers = ctx .get_providers(&providers_label) @@ -236,12 +299,11 @@ async fn install( let providers = frozen_providers.provider_collection(); match providers.builtin_provider::() { Some(install_info) => { - let install_id = format!("{}", providers_label.target()); let installer_label = install_info.get_installer()?; installer_to_files_map .entry(installer_label) .or_insert_with(Vec::new) - .push((install_id, install_info)); + .push((providers_label.target().dupe(), install_info)); } None => { return Err(InstallError::NoInstallProvider( @@ -254,39 +316,19 @@ async fn install( } } - let install_log_dir = &get_installer_log_directory(server_ctx, &ctx).await?; - - let mut install_requests = Vec::with_capacity(installer_to_files_map.len()); - for (installer_label, install_info_vector) in &installer_to_files_map { - let ctx = &ctx; - let installer_run_args = &request.installer_run_args; - - let mut install_files_vector: Vec<(&String, SmallMap<_, _>)> = Vec::new(); - for (install_id, install_info) in install_info_vector { + let mut request_data_vec = Vec::with_capacity(installer_to_files_map.len()); + for (installer_label, install_info_vector) in installer_to_files_map { + let mut installed_targets = Vec::with_capacity(install_info_vector.len()); + for (installed_target, install_info) in install_info_vector { let install_files = install_info.get_files()?; - install_files_vector.push((install_id, install_files)); + installed_targets.push((installed_target, install_files)); } - - let handle_install_request_future = async move { - handle_install_request( - ctx, - materializations, - install_log_dir, - &install_files_vector, - installer_label, - installer_run_args, - request.installer_debug, - ) - .await - }; - install_requests.push(handle_install_request_future); + request_data_vec.push(InstallRequestData { + installer_label, + installed_targets, + }); } - - try_join_all(install_requests) - .await - .context("Interaction with installer failed.")?; - - Ok(InstallResponse {}) + Ok(request_data_vec) } fn get_random_tcp_port() -> anyhow::Result { @@ -297,9 +339,8 @@ fn get_random_tcp_port() -> anyhow::Result { } fn get_timestamp_as_string() -> anyhow::Result { - let nt = NaiveDateTime::from_timestamp_opt(Utc::now().timestamp(), 0) + let dt = DateTime::from_timestamp(Utc::now().timestamp(), 0) .context(InstallError::NativeDateTime)?; - let dt = nt.and_utc(); Ok(dt.format("%Y%m%d-%H%M%S").to_string()) } @@ -310,77 +351,116 @@ fn calculate_hash(t: &T) -> u64 { } async fn handle_install_request<'a>( - ctx: &'a DiceComputations, - materializations: &'a MaterializationContext, + ctx: &'a mut DiceComputations<'_>, install_log_dir: &AbsNormPathBuf, - install_files_slice: &[(&String, SmallMap<&str, Artifact>)], - installer_label: &ConfiguredProvidersLabel, + install_request_data: &InstallRequestData<'_>, initial_installer_run_args: &[String], installer_debug: bool, ) -> anyhow::Result<()> { let (files_tx, files_rx) = mpsc::unbounded_channel(); - let build_files = async move { - build_files(ctx, materializations, install_files_slice, files_tx).await?; - anyhow::Ok(()) - }; - let build_installer_and_connect = async move { - // FIXME: The random unused tcp port might be available when get_random_tcp_port() is called, - // but when the installer tries to bind on it, someone else might bind on it. - // TODO: choose unused tcp port on installer side. - // The way communication may happen: - // 1. buck2 passes a temp file for a tcp port output. - // 2. installer app choose unused tcp port and writes it into the passed file. - // 3. buck2 reads tcp port from file and use it to connect to the installer app. (`connect_to_installer` function) - let tcp_port = get_random_tcp_port()?; - - let installer_log_filename = format!( - "{}/installer_{}_{}.log", - install_log_dir, - get_timestamp_as_string()?, - calculate_hash(&installer_label.target().name()) - ); - - let mut installer_run_args: Vec = initial_installer_run_args.to_vec(); - - installer_run_args.extend(vec![ - "--tcp-port".to_owned(), - tcp_port.to_string(), - "--log-path".to_owned(), - installer_log_filename.to_owned(), - ]); - - build_launch_installer( - ctx, - materializations, - installer_label, - &installer_run_args, - installer_debug, + let (artifacts_ready, (installer_ready, installer_finished, device_metadata)) = ctx + .try_compute2( + |ctx| { + async move { + build_files(ctx, &install_request_data.installed_targets, files_tx).await?; + anyhow::Ok(Instant::now()) + } + .boxed() + }, + |ctx| { + async move { + // FIXME: The random unused tcp port might be available when get_random_tcp_port() is called, + // but when the installer tries to bind on it, someone else might bind on it. + // TODO: choose unused tcp port on installer side. + // The way communication may happen: + // 1. buck2 passes a temp file for a tcp port output. + // 2. installer app choose unused tcp port and writes it into the passed file. + // 3. buck2 reads tcp port from file and use it to connect to the installer app. (`connect_to_installer` function) + let tcp_port = get_random_tcp_port()?; + + let installer_log_filename = format!( + "{}/installer_{}_{}.log", + install_log_dir, + get_timestamp_as_string()?, + calculate_hash(&install_request_data.installer_label.target().name()) + ); + + let mut installer_run_args: Vec = initial_installer_run_args.to_vec(); + + installer_run_args.extend(vec![ + "--tcp-port".to_owned(), + tcp_port.to_string(), + "--log-path".to_owned(), + installer_log_filename.to_owned(), + ]); + + build_launch_installer( + ctx, + &install_request_data.installer_label, + &installer_run_args, + installer_debug, + ) + .await?; + let client: InstallerClient = connect_to_installer(tcp_port).await?; + let artifact_fs = ctx.get_artifact_fs().await?; + + let installer_ready = Instant::now(); + for (installed_target, install_files) in &install_request_data.installed_targets + { + send_install_info( + client.clone(), + &install_id(installed_target), + install_files, + &artifact_fs, + ) + .await?; + } + + let device_metadata = Arc::new(Mutex::new(Vec::new())); + let send_files_result = + tokio_stream::wrappers::UnboundedReceiverStream::new(files_rx) + .map(anyhow::Ok) + .try_for_each_concurrent(None, |file| { + send_file( + file, + &artifact_fs, + client.clone(), + installer_log_filename.to_owned(), + device_metadata.dupe(), + ) + }) + .await; + let installer_finished = Instant::now(); + send_shutdown_command(client.clone()).await?; + send_files_result.context("Failed to send artifacts to installer")?; + anyhow::Ok((installer_ready, installer_finished, device_metadata)) + } + .boxed() + }, ) .await?; - let client: InstallerClient = connect_to_installer(tcp_port).await?; - let artifact_fs = ctx.get_artifact_fs().await?; - - for (install_id, install_files) in install_files_slice { - send_install_info(client.clone(), install_id, install_files, &artifact_fs).await?; - } - - let send_files_result = tokio_stream::wrappers::UnboundedReceiverStream::new(files_rx) - .map(anyhow::Ok) - .try_for_each_concurrent(None, |file| { - send_file( - file, - &artifact_fs, - client.clone(), - installer_log_filename.to_owned(), - ) - }) - .await; - send_shutdown_command(client.clone()).await?; - send_files_result.context("Failed to send artifacts to installer")?; - anyhow::Ok(()) - }; - try_join(build_installer_and_connect, build_files).await?; + let device_metadata: Vec = device_metadata + .lock() + .await + .iter() + .map(|metadata| buck2_data::DeviceMetadata { + entry: metadata + .entry + .iter() + .map(|e| buck2_data::device_metadata::Entry { + key: e.key.clone(), + value: e.value.clone(), + }) + .collect(), + }) + .collect(); + let build_finished = std::cmp::max(installer_ready, artifacts_ready); + let install_duration = installer_finished - build_finished; + get_dispatcher().instant_event(buck2_data::InstallFinished { + duration: install_duration.try_into().ok(), + device_metadata, + }); anyhow::Ok(()) } @@ -444,8 +524,7 @@ async fn send_shutdown_command(mut client: InstallerClient) -> anyhow:: } async fn build_launch_installer<'a>( - ctx: &'a DiceComputations, - materializations: &'a MaterializationContext, + ctx: &'a mut DiceComputations<'_>, providers_label: &ConfiguredProvidersLabel, installer_run_args: &[String], installer_log_console: bool, @@ -476,16 +555,26 @@ async fn build_launch_installer<'a>( (artifact_visitor.inputs, run_args) }; // returns IndexMap; - try_join_all(inputs.into_iter().map(|input| async move { - materialize_artifact_group(ctx, &input, materializations) + ctx.try_compute_join(inputs, |ctx, input| { + async move { + materialize_artifact_group( + ctx, + &input, + &MaterializationContext::Materialize { force: true }, + ) .await .map(|value| (input, value)) - })) + } + .boxed() + }) .await .context("Failed to build installer")?; + + let build_id: &str = &get_dispatcher().trace_id().to_string(); background_command(&run_args[0]) .args(&run_args[1..]) .args(installer_run_args) + .env("BUCK2_UUID", build_id) .stderr(get_stdio(installer_log_console)?) .spawn() .context("Failed to spawn installer")?; @@ -513,9 +602,8 @@ pub struct FileResult { } async fn build_files( - ctx: &DiceComputations, - materializations: &MaterializationContext, - install_files_slice: &[(&String, SmallMap<&str, Artifact>)], + ctx: &mut DiceComputations<'_>, + install_files_slice: &[(ConfiguredTargetLabel, SmallMap<&str, Artifact>)], tx: mpsc::UnboundedSender, ) -> anyhow::Result<()> { let mut file_outputs = Vec::with_capacity(install_files_slice.len()); @@ -530,22 +618,49 @@ async fn build_files( } } - try_join_all(file_outputs.into_iter().map( - |(install_id, name, artifact, tx_clone)| async move { - let artifact_values = - materialize_artifact_group(ctx, &artifact, materializations).await?; - for (artifact, artifact_value) in artifact_values.iter() { - let file_result = FileResult { - install_id: (*install_id).to_owned(), - name: (*name).to_owned(), - artifact: artifact.to_owned(), - artifact_value: artifact_value.to_owned(), - }; - tx_clone.send(file_result)?; + ctx.try_compute_join( + file_outputs, + |ctx, (installed_target, name, artifact, tx_clone)| { + async move { + let (_, artifact_values) = ctx + .try_compute2( + |ctx| { + async move { + VALIDATION_IMPL + .get()? + .validate_target_node_transitively(ctx, installed_target.dupe()) + .await + } + .boxed() + }, + |ctx| { + async move { + Ok(materialize_artifact_group( + ctx, + &artifact, + &MaterializationContext::Materialize { force: true }, + ) + .await?) + } + .boxed() + }, + ) + .await?; + for (artifact, artifact_value) in artifact_values.iter() { + let install_id = install_id(installed_target); + let file_result = FileResult { + install_id, + name: (*name).to_owned(), + artifact: artifact.to_owned(), + artifact_value: artifact_value.to_owned(), + }; + tx_clone.send(file_result)?; + } + anyhow::Ok(()) } - anyhow::Ok(()) + .boxed() }, - )) + ) .await?; Ok(()) } @@ -560,12 +675,12 @@ async fn connect_to_installer(tcp_port: u16) -> anyhow::Result anyhow::Result, install_log: String, + device_metadata: Arc>>, ) -> anyhow::Result<()> { let install_id = file.install_id; let name = file.name; @@ -637,7 +753,7 @@ async fn send_file( span_async(start, async { let mut outcome: anyhow::Result<()> = Ok(()); let response_result = client.file_ready(request).await; - let response = match response_result { + let mut response = match response_result { Ok(r) => r.into_inner(), Err(status) => { return ( @@ -667,6 +783,10 @@ async fn send_file( } .into()); } + device_metadata + .lock() + .await + .append(&mut response.device_metadata); if let Some(error_detail) = response.error_detail { outcome = Err(InstallError::ProcessingFileReadyFailure { diff --git a/app/buck2_server_commands/src/commands/mod.rs b/app/buck2_server_commands/src/commands/mod.rs deleted file mode 100644 index d0c12def77a9f..0000000000000 --- a/app/buck2_server_commands/src/commands/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod build; -pub mod ctargets; -pub mod debug_eval; -pub(crate) mod init_commands; -pub mod install; -pub mod query; -pub mod targets; -pub mod targets_show_outputs; diff --git a/app/buck2_server_commands/src/commands/query.rs b/app/buck2_server_commands/src/commands/query.rs new file mode 100644 index 0000000000000..62b3407d49877 --- /dev/null +++ b/app/buck2_server_commands/src/commands/query.rs @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod aquery; +pub mod cquery; +pub mod printer; +pub(crate) mod query_target_ext; +pub(crate) mod starlark_profile; +pub mod uquery; + +#[derive(Debug, buck2_error::Error)] +enum QueryCommandError { + #[error( + "query result was a set of files and one or more --output-attribute was requested, but files have not attributes" + )] + FileSetHasNoAttributes, +} diff --git a/app/buck2_server_commands/src/commands/query/aquery.rs b/app/buck2_server_commands/src/commands/query/aquery.rs index 17954373c15fe..2d42debc7d3b0 100644 --- a/app/buck2_server_commands/src/commands/query/aquery.rs +++ b/app/buck2_server_commands/src/commands/query/aquery.rs @@ -9,20 +9,50 @@ use std::io::Write; -use anyhow::Context; use async_trait::async_trait; +use buck2_build_api::actions::query::ActionQueryNode; use buck2_build_api::query::oneshot::QUERY_FRONTEND; use buck2_common::dice::cells::HasCellResolver; +use buck2_error::BuckErrorContext; +use buck2_query::query::environment::AttrFmtOptions; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use dice::DiceTransaction; use crate::commands::query::printer::QueryResultPrinter; use crate::commands::query::printer::ShouldPrintProviders; +use crate::commands::query::query_target_ext::QueryCommandTarget; + +impl QueryCommandTarget for ActionQueryNode { + fn call_stack(&self) -> Option { + None + } + + fn attr_to_string_alternate(&self, _options: AttrFmtOptions, attr: &Self::Attr<'_>) -> String { + format!("{:#}", attr) + } + + fn attr_serialize( + &self, + attr: &Self::Attr<'_>, + serializer: S, + ) -> Result { + serde::Serialize::serialize(attr, serializer) + } + + fn attr_fmt( + &self, + fmt: &mut std::fmt::Formatter<'_>, + _options: AttrFmtOptions, + attr: &Self::Attr<'_>, + ) -> std::fmt::Result { + std::fmt::Display::fmt(attr, fmt) + } +} pub(crate) async fn aquery_command( ctx: &dyn ServerCommandContextTrait, @@ -78,26 +108,27 @@ async fn aquery( )?; let buck2_cli_proto::AqueryRequest { - query, - query_args, - context, - .. + query, query_args, .. } = request; - let client_ctx = context - .as_ref() - .context("No client context (internal error)")?; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; + let global_cfg_options = global_cfg_options_from_client_context( + request + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut ctx, + ) + .await?; let query_result = QUERY_FRONTEND .get()? .eval_aquery( - &ctx, + &mut ctx, server_ctx.working_dir(), query, query_args, - global_target_platform, + global_cfg_options, ) .await?; diff --git a/app/buck2_server_commands/src/commands/query/cquery.rs b/app/buck2_server_commands/src/commands/query/cquery.rs index 84d9c4a4b0538..1c248ac785274 100644 --- a/app/buck2_server_commands/src/commands/query/cquery.rs +++ b/app/buck2_server_commands/src/commands/query/cquery.rs @@ -9,11 +9,9 @@ use std::io::Write; -use anyhow::Context; use async_trait::async_trait; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; -use buck2_build_api::query::oneshot::CqueryOwnerBehavior; use buck2_build_api::query::oneshot::QUERY_FRONTEND; use buck2_cli_proto::CqueryRequest; use buck2_cli_proto::CqueryResponse; @@ -21,21 +19,76 @@ use buck2_common::dice::cells::HasCellResolver; use buck2_core::configuration::compatibility::MaybeCompatible; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersName; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use buck2_node::attrs::display::AttrDisplayWithContext; +use buck2_node::attrs::display::AttrDisplayWithContextExt; +use buck2_node::attrs::fmt_context::AttrFmtContext; +use buck2_node::attrs::serialize::AttrSerializeWithContext; use buck2_node::nodes::configured::ConfiguredTargetNode; +use buck2_query::query::environment::AttrFmtOptions; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use buck2_util::truncate::truncate; -use dice::DiceComputations; use dice::DiceTransaction; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; use crate::commands::query::printer::ProviderLookUp; use crate::commands::query::printer::QueryResultPrinter; use crate::commands::query::printer::ShouldPrintProviders; +use crate::commands::query::query_target_ext::QueryCommandTarget; +use crate::commands::query::starlark_profile::write_query_profile_for_targets; + +impl QueryCommandTarget for ConfiguredTargetNode { + fn call_stack(&self) -> Option { + ConfiguredTargetNode::call_stack(self) + } + + fn attr_to_string_alternate(&self, options: AttrFmtOptions, attr: &Self::Attr<'_>) -> String { + format!( + "{:#}", + attr.as_display(&AttrFmtContext { + package: Some(self.label().pkg().dupe()), + options + }) + ) + } + + fn attr_serialize( + &self, + attr: &Self::Attr<'_>, + serializer: S, + ) -> Result { + attr.serialize_with_ctx( + &AttrFmtContext { + package: Some(self.label().pkg().dupe()), + options: Default::default(), + }, + serializer, + ) + } + + fn attr_fmt( + &self, + fmt: &mut std::fmt::Formatter<'_>, + options: AttrFmtOptions, + attr: &Self::Attr<'_>, + ) -> std::fmt::Result { + AttrDisplayWithContext::fmt( + attr, + &AttrFmtContext { + package: Some(self.label().pkg().dupe()), + options, + }, + fmt, + ) + } +} pub(crate) async fn cquery_command( ctx: &dyn ServerCommandContextTrait, @@ -103,7 +156,7 @@ async fn cquery( target_universe, context, show_providers, - correct_owner, + target_cfg, .. } = request; // The request will always have a universe value, an empty one indicates the user didn't provide a universe. @@ -114,73 +167,111 @@ async fn cquery( }; let client_ctx = context .as_ref() - .context("No client context (internal error)")?; + .internal_error_anyhow("No client context")?; let target_call_stacks = client_ctx.target_call_stacks; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; + let global_cfg_options = global_cfg_options_from_client_context( + target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut ctx, + ) + .await?; - let owner_behavior = match correct_owner { - true => CqueryOwnerBehavior::Correct, - false => CqueryOwnerBehavior::Deprecated, - }; + let profile_mode = request + .profile_mode + .map(|i| { + buck2_cli_proto::ProfileMode::from_i32(i).internal_error_anyhow("Invalid profile mode") + }) + .transpose()?; - let query_result = QUERY_FRONTEND + let (query_result, universes) = QUERY_FRONTEND .get()? .eval_cquery( - &ctx, + &mut ctx, server_ctx.working_dir(), - owner_behavior, query, query_args, - global_target_platform, + global_cfg_options, target_universe, + profile_mode.is_some(), ) .await?; - let should_print_providers = if *show_providers { - ShouldPrintProviders::Yes(&*ctx as &dyn ProviderLookUp) - } else { - ShouldPrintProviders::No - }; - - match query_result { - QueryEvaluationResult::Single(targets) => { - output_configuration - .print_single_output( - &mut stdout, - targets, - target_call_stacks, - should_print_providers, - ) - .await? + if let Some(profile_mode) = profile_mode { + let universes = universes.internal_error_anyhow("No universes")?; + if universes.is_empty() { + // Sanity check. + return Err(internal_error_anyhow!("Empty universes list")); } - QueryEvaluationResult::Multiple(results) => { - output_configuration - .print_multi_output( - &mut stdout, - results, - target_call_stacks, - should_print_providers, - ) - .await? + + write_query_profile_for_targets( + &mut ctx, + profile_mode, + request.profile_output.as_deref(), + universes.iter().flat_map(|u| { + u.iter() + .map(|t| t.label().unconfigured().pkg()) + // `collect` should not be needed, but I was defeated by the compiler. + .collect::>() + }), + ) + .await?; + } else { + if universes.is_some() { + return Err(internal_error_anyhow!("We did not request universes")); } - }; + } + + ctx.with_linear_recompute(|ctx| async move { + let should_print_providers = if *show_providers { + ShouldPrintProviders::Yes(&ctx as &dyn ProviderLookUp) + } else { + ShouldPrintProviders::No + }; + + match query_result { + QueryEvaluationResult::Single(targets) => { + output_configuration + .print_single_output( + &mut stdout, + targets, + target_call_stacks, + should_print_providers, + ) + .await? + } + QueryEvaluationResult::Multiple(results) => { + output_configuration + .print_multi_output( + &mut stdout, + results, + target_call_stacks, + should_print_providers, + ) + .await? + } + }; + anyhow::Ok(()) + }) + .await?; Ok(CqueryResponse {}) } #[async_trait] -impl ProviderLookUp for DiceComputations { +impl ProviderLookUp for LinearRecomputeDiceComputations<'_> { async fn lookup( &self, t: &ConfiguredTargetNode, ) -> anyhow::Result> { - self.get_providers(&ConfiguredProvidersLabel::new( - t.label().dupe(), - ProvidersName::Default, - )) - .await + self.get() + .get_providers(&ConfiguredProvidersLabel::new( + t.label().dupe(), + ProvidersName::Default, + )) + .await } } diff --git a/app/buck2_server_commands/src/commands/query/mod.rs b/app/buck2_server_commands/src/commands/query/mod.rs deleted file mode 100644 index 9fe304c0f1a5d..0000000000000 --- a/app/buck2_server_commands/src/commands/query/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use thiserror::Error; - -pub mod aquery; -pub mod cquery; -pub mod printer; -pub mod uquery; - -#[derive(Debug, Error)] -enum QueryCommandError { - #[error( - "query result was a set of files and one or more --output-attribute was requested, but files have not attributes" - )] - FileSetHasNoAttributes, -} diff --git a/app/buck2_server_commands/src/commands/query/printer.rs b/app/buck2_server_commands/src/commands/query/printer.rs index 102f258f8c75d..d3365b1ce6e94 100644 --- a/app/buck2_server_commands/src/commands/query/printer.rs +++ b/app/buck2_server_commands/src/commands/query/printer.rs @@ -9,6 +9,7 @@ #![allow(clippy::drop_non_drop)] // FIXME? +use std::collections::BTreeMap; use std::fmt::Display; use std::fmt::Formatter; use std::io::Write; @@ -20,6 +21,7 @@ use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProvide use buck2_cli_proto::QueryOutputFormat; use buck2_core::cells::CellResolver; use buck2_core::configuration::compatibility::MaybeCompatible; +use buck2_query::query::environment::AttrFmtOptions; use buck2_query::query::environment::QueryTarget; use buck2_query::query::environment::QueryTargets; use buck2_query::query::syntax::simple::eval::file_set::FileSet; @@ -32,12 +34,14 @@ use dupe::Copy_; use dupe::Dupe_; use gazebo::variants::UnpackVariants; use indent_write::fmt::IndentWriter; +use indent_write::io::IndentWriter as IoIndentWriter; use regex::RegexSet; use serde::ser::SerializeMap; use serde::ser::SerializeSeq; use serde::Serialize; use serde::Serializer; +use crate::commands::query::query_target_ext::QueryCommandTarget; use crate::commands::query::QueryCommandError; use crate::dot::targets::DotTargetGraph; use crate::dot::Dot; @@ -56,7 +60,7 @@ pub trait ProviderLookUp: Send + Sync { } #[derive(Debug)] -pub struct QueryResultPrinter<'a> { +pub(crate) struct QueryResultPrinter<'a> { resolver: &'a CellResolver, attributes: Option, output_format: QueryOutputFormat, @@ -93,13 +97,13 @@ struct PrintableQueryTarget<'a, T: QueryTarget> { impl<'a, T: QueryTarget> PrintableQueryTarget<'a, T> { fn label(&self) -> String { - self.value.node_ref().to_string() + self.value.node_key().to_string() } } -impl<'a, T: QueryTarget> Display for PrintableQueryTarget<'a, T> { +impl<'a, T: QueryCommandTarget> Display for PrintableQueryTarget<'a, T> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.value.node_ref())?; + write!(f, "{}", self.value.node_key())?; if self.target_call_stacks || self.providers.is_some() { writeln!(f)?; @@ -129,7 +133,7 @@ impl<'a, T: QueryTarget> Display for PrintableQueryTarget<'a, T> { } } -impl<'a, T: QueryTarget> Serialize for PrintableQueryTarget<'a, T> { +impl<'a, T: QueryCommandTarget> Serialize for PrintableQueryTarget<'a, T> { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -139,12 +143,12 @@ impl<'a, T: QueryTarget> Serialize for PrintableQueryTarget<'a, T> { QueryTargets::for_all_attrs(self.value, |attr_name, attr_value| { if let Some(attr_regex) = self.attributes { if attr_regex.is_match(attr_name) { - struct AttrValueSerialize<'a, 'b, T: QueryTarget> { + struct AttrValueSerialize<'a, 'b, T: QueryCommandTarget> { target: &'a T, attr: &'a T::Attr<'b>, } - impl<'a, 'b, T: QueryTarget> Serialize for AttrValueSerialize<'a, 'b, T> { + impl<'a, 'b, T: QueryCommandTarget> Serialize for AttrValueSerialize<'a, 'b, T> { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -177,7 +181,7 @@ impl<'a, T: QueryTarget> Serialize for PrintableQueryTarget<'a, T> { } } -impl<'a, T: QueryTarget> Serialize for TargetSetJsonPrinter<'a, T> { +impl<'a, T: QueryCommandTarget> Serialize for TargetSetJsonPrinter<'a, T> { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -253,7 +257,7 @@ impl<'a> QueryResultPrinter<'a> { }) } - pub async fn print_multi_output<'b, T: QueryTarget, W: std::io::Write>( + pub async fn print_multi_output<'b, T: QueryCommandTarget, W: std::io::Write>( &self, mut output: W, multi_result: MultiQueryResult, @@ -305,7 +309,7 @@ impl<'a> QueryResultPrinter<'a> { std::mem::drop(ser); // need to add a newline to flush the output. writeln!(&mut output)?; - captured_error + Ok(captured_error?) } _ => { self.print_single_output( @@ -319,7 +323,7 @@ impl<'a> QueryResultPrinter<'a> { } } - pub async fn print_single_output<'b, T: QueryTarget, W: std::io::Write>( + pub async fn print_single_output<'b, T: QueryCommandTarget, W: std::io::Write>( &self, mut output: W, result: QueryEvaluationValue, @@ -336,6 +340,42 @@ impl<'a> QueryResultPrinter<'a> { writeln!(&mut output, "{}", target)?; } } + QueryOutputFormat::Starlark => { + for (i, target) in targets.iter().enumerate() { + if i > 0 { + writeln!(&mut output)?; + writeln!(&mut output)?; + } + if call_stack { + match target.call_stack() { + Some(call_stack) => { + write!(&mut output, "{}", indent("# ", &call_stack))?; + } + None => { + // This is `aquery`. + } + } + } + writeln!(&mut output, "{}(", target.rule_type())?; + let mut inner_out = IoIndentWriter::new(" ", &mut output); + let mut attrs = BTreeMap::new(); + + target.defined_attrs_for_each(|k, v| { + attrs.insert( + k.to_owned(), + format!("{:#}", target.attr_display(v, AttrFmtOptions::default())), + ); + anyhow::Ok(()) + })?; + if let Some(name) = attrs.remove("name") { + writeln!(&mut inner_out, "name = {},", name)?; + } + for (k, v) in attrs { + writeln!(&mut inner_out, "{} = {},", k, v)?; + } + writeln!(&mut output, ")")?; + } + } QueryOutputFormat::Json => { let mut ser = serde_json::Serializer::pretty(&mut output); TargetSetJsonPrinter::new( @@ -374,7 +414,7 @@ impl<'a> QueryResultPrinter<'a> { return Err(QueryCommandError::FileSetHasNoAttributes.into()); } match self.output_format { - QueryOutputFormat::Default => { + QueryOutputFormat::Default | QueryOutputFormat::Starlark => { for file in files.iter() { writeln!( &mut output, @@ -414,21 +454,18 @@ async fn printable_targets<'a, T: QueryTarget>( attributes: &'a Option, target_call_stacks: bool, ) -> anyhow::Result>> { - futures::future::join_all(targets.iter().map(|t| { - let print_providers = &print_providers; - async move { - Ok(PrintableQueryTarget { - value: t, - attributes, - target_call_stacks, - providers: match print_providers { - ShouldPrintProviders::No => None, - ShouldPrintProviders::Yes(lookup) => { - Some(lookup.lookup(t).await?.require_compatible()?) - } - }, - }) - } + futures::future::join_all(targets.iter().map(|t| async move { + Ok(PrintableQueryTarget { + value: t, + attributes, + target_call_stacks, + providers: match print_providers { + ShouldPrintProviders::No => None, + ShouldPrintProviders::Yes(lookup) => { + Some(lookup.lookup(t).await?.require_compatible()?) + } + }, + }) })) .await .into_iter() diff --git a/app/buck2_server_commands/src/commands/query/query_target_ext.rs b/app/buck2_server_commands/src/commands/query/query_target_ext.rs new file mode 100644 index 0000000000000..f5b1094628bb9 --- /dev/null +++ b/app/buck2_server_commands/src/commands/query/query_target_ext.rs @@ -0,0 +1,50 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Formatter; + +use buck2_query::query::environment::AttrFmtOptions; +use buck2_query::query::environment::QueryTarget; +use dupe::Dupe; + +/// Extensions of `QueryTarget` needed in query commands. +pub(crate) trait QueryCommandTarget: QueryTarget { + fn call_stack(&self) -> Option; + + #[allow(dead_code)] + fn attr_to_string_alternate(&self, _options: AttrFmtOptions, attr: &Self::Attr<'_>) -> String; + + fn attr_serialize( + &self, + attr: &Self::Attr<'_>, + serializer: S, + ) -> Result; + + fn attr_fmt( + &self, + fmt: &mut std::fmt::Formatter<'_>, + _options: AttrFmtOptions, + attr: &Self::Attr<'_>, + ) -> std::fmt::Result; + + fn attr_display<'a, 'b>( + &'a self, + attr: &'a Self::Attr<'b>, + options: AttrFmtOptions, + ) -> AttrDisplay<'a, 'b, Self> { + AttrDisplay(self, attr, options) + } +} + +pub struct AttrDisplay<'a, 'b, T: QueryCommandTarget>(&'a T, &'a T::Attr<'b>, AttrFmtOptions); +impl<'a, 'b, T: QueryCommandTarget> std::fmt::Display for AttrDisplay<'a, 'b, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.0.attr_fmt(f, self.2.dupe(), self.1) + } +} diff --git a/app/buck2_server_commands/src/commands/query/starlark_profile.rs b/app/buck2_server_commands/src/commands/query/starlark_profile.rs new file mode 100644 index 0000000000000..829c5571c17cf --- /dev/null +++ b/app/buck2_server_commands/src/commands/query/starlark_profile.rs @@ -0,0 +1,67 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::path::Path; + +use buck2_core::fs::paths::abs_path::AbsPath; +use buck2_core::package::PackageLabel; +use buck2_error::BuckErrorContext; +use buck2_events::dispatch::console_message; +use buck2_interpreter::starlark_profiler::data::StarlarkProfileDataAndStats; +use buck2_node::nodes::frontend::TargetGraphCalculation; +use buck2_profile::write_starlark_profile; +use dice::DiceComputations; +use futures::FutureExt; + +pub(crate) async fn write_query_profile_for_targets<'a>( + ctx: &mut DiceComputations<'_>, + _profile_mode: buck2_cli_proto::ProfileMode, + output_path: Option<&str>, + targets: impl IntoIterator, +) -> anyhow::Result<()> { + let output_path = + output_path.internal_error_anyhow("Outut path must be set for profile mode")?; + let output_path = AbsPath::new(Path::new(output_path)) + .internal_error_anyhow("Output path must be set to absolute path by the client")?; + do_write_query_profile_for_targets(ctx, output_path, Vec::from_iter(targets)) + .boxed() + .await +} + +async fn do_write_query_profile_for_targets<'a>( + ctx: &mut DiceComputations<'_>, + output_path: &AbsPath, + mut targets: Vec, +) -> anyhow::Result<()> { + // We want stable output. + targets.sort(); + targets.dedup(); + + let mut profiles = Vec::new(); + for target in targets { + // This should be already cached. + let eval_results = ctx.get_interpreter_results(target).await?; + let profile = eval_results + .starlark_profile + .as_ref() + .internal_error_anyhow("Starlark profile must be set")?; + let profile = StarlarkProfileDataAndStats::downcast(&**profile)?; + profiles.push(profile.clone()); + } + let profile = StarlarkProfileDataAndStats::merge(&profiles)?; + + write_starlark_profile(&profile, output_path)?; + + console_message(format!( + "Starlark profile data is written to {}", + output_path.display() + )); + + Ok(()) +} diff --git a/app/buck2_server_commands/src/commands/query/uquery.rs b/app/buck2_server_commands/src/commands/query/uquery.rs index 974a6fb13ee7d..f9485619e85ee 100644 --- a/app/buck2_server_commands/src/commands/query/uquery.rs +++ b/app/buck2_server_commands/src/commands/query/uquery.rs @@ -9,22 +9,76 @@ use std::io::Write; -use anyhow::Context; use async_trait::async_trait; use buck2_build_api::query::oneshot::QUERY_FRONTEND; use buck2_cli_proto::UqueryRequest; use buck2_cli_proto::UqueryResponse; use buck2_common::dice::cells::HasCellResolver; +use buck2_error::BuckErrorContext; +use buck2_node::attrs::display::AttrDisplayWithContext; +use buck2_node::attrs::display::AttrDisplayWithContextExt; +use buck2_node::attrs::fmt_context::AttrFmtContext; +use buck2_node::attrs::serialize::AttrSerializeWithContext; +use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::nodes::unconfigured::TargetNodeData; +use buck2_query::query::environment::AttrFmtOptions; use buck2_query::query::syntax::simple::eval::values::QueryEvaluationResult; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use dice::DiceTransaction; +use dupe::Dupe; use crate::commands::query::printer::QueryResultPrinter; use crate::commands::query::printer::ShouldPrintProviders; +use crate::commands::query::query_target_ext::QueryCommandTarget; + +impl QueryCommandTarget for TargetNode { + fn call_stack(&self) -> Option { + TargetNodeData::call_stack(self) + } + + fn attr_to_string_alternate(&self, options: AttrFmtOptions, attr: &Self::Attr<'_>) -> String { + format!( + "{:#}", + attr.as_display(&AttrFmtContext { + package: Some(self.label().pkg().dupe()), + options + }) + ) + } + + fn attr_serialize( + &self, + attr: &Self::Attr<'_>, + serializer: S, + ) -> Result { + attr.serialize_with_ctx( + &AttrFmtContext { + package: Some(self.label().pkg().dupe()), + options: Default::default(), + }, + serializer, + ) + } + + fn attr_fmt( + &self, + fmt: &mut std::fmt::Formatter<'_>, + options: AttrFmtOptions, + attr: &Self::Attr<'_>, + ) -> std::fmt::Result { + AttrDisplayWithContext::fmt( + attr, + &AttrFmtContext { + package: Some(self.label().pkg().dupe()), + options, + }, + fmt, + ) + } +} pub(crate) async fn uquery_command( ctx: &dyn ServerCommandContextTrait, @@ -87,22 +141,13 @@ async fn uquery( let client_ctx = context .as_ref() - .context("No client context (internal error)")?; + .internal_error_anyhow("No client context")?; let target_call_stacks = client_ctx.target_call_stacks; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; - let query_result = QUERY_FRONTEND .get()? - .eval_uquery( - &ctx, - server_ctx.working_dir(), - query, - query_args, - global_target_platform, - ) + .eval_uquery(&mut ctx, server_ctx.working_dir(), query, query_args) .await?; match query_result { diff --git a/app/buck2_server_commands/src/commands/targets.rs b/app/buck2_server_commands/src/commands/targets.rs new file mode 100644 index 0000000000000..bc11f2c2aa1b7 --- /dev/null +++ b/app/buck2_server_commands/src/commands/targets.rs @@ -0,0 +1,267 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod default; +pub(crate) mod fmt; +mod resolve_alias; +mod streaming; +use std::fs::File; +use std::io::BufWriter; +use std::io::Write; + +use anyhow::Context as _; +use async_trait::async_trait; +use buck2_cli_proto::targets_request; +use buck2_cli_proto::targets_request::Compression; +use buck2_cli_proto::targets_request::TargetHashGraphType; +use buck2_cli_proto::TargetsRequest; +use buck2_cli_proto::TargetsResponse; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_core::pattern::pattern_type::TargetPatternExtra; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; +use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; +use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; +use buck2_server_ctx::template::run_server_command; +use buck2_server_ctx::template::ServerCommandTemplate; +use dice::DiceTransaction; +use flate2::write::GzEncoder; +use zstd::stream::write as zstd; + +use crate::commands::targets::default::targets_batch; +use crate::commands::targets::default::TargetHashOptions; +use crate::commands::targets::fmt::create_formatter; +use crate::commands::targets::resolve_alias::targets_resolve_aliases; +use crate::commands::targets::streaming::targets_streaming; + +#[derive(PartialEq, Eq)] +enum OutputType { + Stdout, + File, +} + +trait Compressor: Write + Send { + fn finish(self: Box) -> anyhow::Result<()>; +} + +struct UncompressedCompressor(T); + +impl Write for UncompressedCompressor { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.0.write(buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.0.flush() + } + + fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> { + self.0.write_all(buf) + } +} + +impl Compressor for UncompressedCompressor { + fn finish(self: Box) -> anyhow::Result<()> { + Ok(()) + } +} + +impl Compressor for GzEncoder { + fn finish(self: Box) -> anyhow::Result<()> { + (*self).finish()?; + Ok(()) + } +} + +impl Compressor for zstd::Encoder<'_, T> { + fn finish(self: Box) -> anyhow::Result<()> { + (*self).finish()?; + Ok(()) + } +} + +fn outputter<'a, W: Write + Send + 'a>( + request: &TargetsRequest, + stdout: W, +) -> anyhow::Result<(OutputType, Box)> { + let (output_type, output): (_, Box) = match &request.output { + None => (OutputType::Stdout, Box::new(UncompressedCompressor(stdout))), + Some(file) => { + let file = + BufWriter::new(File::create(file).with_context(|| { + format!("Failed to open file `{file}` for `targets` output ") + })?); + (OutputType::File, Box::new(UncompressedCompressor(file))) + } + }; + + let compression = Compression::from_i32(request.compression) + .internal_error_anyhow("buck cli should send valid compression type")?; + let output = match compression { + Compression::Uncompressed => output, + Compression::Gzip => Box::new(GzEncoder::new(output, Default::default())), + Compression::Zstd => Box::new(zstd::Encoder::new(output, 0)?), + }; + Ok((output_type, output)) +} + +pub(crate) async fn targets_command( + server_ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: TargetsRequest, +) -> anyhow::Result { + run_server_command( + TargetsServerCommand { req }, + server_ctx, + partial_result_dispatcher, + ) + .await +} + +struct TargetsServerCommand { + req: TargetsRequest, +} + +#[async_trait] +impl ServerCommandTemplate for TargetsServerCommand { + type StartEvent = buck2_data::TargetsCommandStart; + type EndEvent = buck2_data::TargetsCommandEnd; + type Response = TargetsResponse; + type PartialResult = buck2_cli_proto::StdoutBytes; + + async fn command( + &self, + server_ctx: &dyn ServerCommandContextTrait, + mut partial_result_dispatcher: PartialResultDispatcher, + dice: DiceTransaction, + ) -> anyhow::Result { + targets( + server_ctx, + &mut partial_result_dispatcher.as_writer(), + dice, + &self.req, + ) + .await + } + + fn is_success(&self, _response: &Self::Response) -> bool { + // No response if we failed. + true + } + + fn end_event(&self, _response: &buck2_error::Result) -> Self::EndEvent { + buck2_data::TargetsCommandEnd { + unresolved_target_patterns: self + .req + .target_patterns + .iter() + .map(|p| buck2_data::TargetPattern { value: p.clone() }) + .collect(), + } + } +} + +async fn targets( + server_ctx: &dyn ServerCommandContextTrait, + stdout: &mut (impl Write + Send), + dice: DiceTransaction, + request: &TargetsRequest, +) -> anyhow::Result { + let (output_type, mut output) = outputter(request, stdout)?; + let mut res = targets_with_output(server_ctx, dice, request, &mut output).await; + match &mut res { + Ok(response) + if !response.serialized_targets_output.is_empty() + && output_type == OutputType::File => + { + output.write_all(response.serialized_targets_output.as_bytes())?; + response.serialized_targets_output.clear(); + } + _ => {} + } + output.flush()?; + output.finish()?; + res +} + +async fn targets_with_output( + server_ctx: &dyn ServerCommandContextTrait, + mut dice: DiceTransaction, + request: &TargetsRequest, + output: &mut (impl Write + Send), +) -> anyhow::Result { + let cwd = server_ctx.working_dir(); + let cell_resolver = dice.get_cell_resolver().await?; + let parsed_target_patterns = parse_patterns_from_cli_args::( + &mut dice, + &request.target_patterns, + cwd, + ) + .await?; + + match &request.targets { + Some(targets_request::Targets::ResolveAlias(_)) => { + targets_resolve_aliases(dice, request, parsed_target_patterns).await + } + Some(targets_request::Targets::Other(other)) => { + if other.streaming { + let formatter = create_formatter(request, other)?; + let hashing = match TargetHashGraphType::from_i32(other.target_hash_graph_type) + .expect("buck cli should send valid target hash graph type") + { + TargetHashGraphType::None => None, + _ => Some(other.target_hash_use_fast_hash), + }; + + let res = targets_streaming( + server_ctx, + dice, + formatter, + output, + parsed_target_patterns, + other.keep_going, + other.cached, + other.imports, + hashing, + request.concurrency.as_ref().map(|x| x.concurrency as usize), + ) + .await; + Ok(TargetsResponse { + error_count: res?.errors, + serialized_targets_output: String::new(), + }) + } else { + let formatter = create_formatter(request, other)?; + let global_cfg_options = global_cfg_options_from_client_context( + request + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut dice, + ) + .await?; + let fs = server_ctx.project_root(); + targets_batch( + server_ctx, + dice, + &*formatter, + parsed_target_patterns, + &global_cfg_options, + TargetHashOptions::new(other, &cell_resolver, fs)?, + other.keep_going, + ) + .await + } + } + None => Err(internal_error_anyhow!("Missing field in proto request")), + } +} diff --git a/app/buck2_server_commands/src/commands/targets/default.rs b/app/buck2_server_commands/src/commands/targets/default.rs index d39b85772d2bd..5cb38337f59bf 100644 --- a/app/buck2_server_commands/src/commands/targets/default.rs +++ b/app/buck2_server_commands/src/commands/targets/default.rs @@ -17,12 +17,12 @@ use buck2_cli_proto::targets_request; use buck2_cli_proto::targets_request::TargetHashFileMode; use buck2_cli_proto::targets_request::TargetHashGraphType; use buck2_cli_proto::TargetsResponse; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::cells::CellResolver; use buck2_core::fs::paths::abs_path::AbsPath; use buck2_core::fs::project::ProjectRoot; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::target::label::TargetLabel; use buck2_node::load_patterns::load_patterns; use buck2_node::load_patterns::MissingTargetBehavior; use buck2_node::nodes::configured::ConfiguredTargetNode; @@ -37,7 +37,6 @@ use dupe::OptionDupedExt; use crate::commands::targets::fmt::Stats; use crate::commands::targets::fmt::TargetFormatter; use crate::commands::targets::fmt::TargetInfo; -use crate::commands::targets::mk_error; use crate::target_hash::TargetHashes; use crate::target_hash::TargetHashesFileMode; @@ -84,42 +83,47 @@ impl TargetHashOptions { pub(crate) async fn targets_batch( server_ctx: &dyn ServerCommandContextTrait, - dice: DiceTransaction, + mut dice: DiceTransaction, formatter: &dyn TargetFormatter, parsed_patterns: Vec>, - target_platform: Option, + global_cfg_options: &GlobalCfgOptions, hash_options: TargetHashOptions, keep_going: bool, ) -> anyhow::Result { - let results = load_patterns(&dice, parsed_patterns, MissingTargetBehavior::Fail).await?; + let results = &load_patterns(&mut dice, parsed_patterns, MissingTargetBehavior::Fail).await?; - let target_hashes = match hash_options.graph_type { - TargetHashGraphType::Configured => Some( - TargetHashes::compute::( - dice.dupe(), - ConfiguredTargetNodeLookup(&dice), - results.iter_loaded_targets_by_package().collect(), - target_platform, - hash_options.file_mode, - hash_options.fast_hash, - hash_options.recursive, - ) - .await?, - ), - TargetHashGraphType::Unconfigured => Some( - TargetHashes::compute::( - dice.dupe(), - TargetNodeLookup(&dice), - results.iter_loaded_targets_by_package().collect(), - target_platform, - hash_options.file_mode, - hash_options.fast_hash, - hash_options.recursive, - ) - .await?, - ), - _ => None, - }; + let target_hashes = dice + .dupe() + .with_linear_recompute(|linear_ctx| async move { + match hash_options.graph_type { + TargetHashGraphType::Configured => anyhow::Ok(Some( + TargetHashes::compute::( + dice.dupe(), + ConfiguredTargetNodeLookup(&linear_ctx), + results.iter_loaded_targets_by_package().collect(), + global_cfg_options, + hash_options.file_mode, + hash_options.fast_hash, + hash_options.recursive, + ) + .await?, + )), + TargetHashGraphType::Unconfigured => Ok(Some( + TargetHashes::compute::( + dice.dupe(), + TargetNodeLookup(&linear_ctx), + results.iter_loaded_targets_by_package().collect(), + global_cfg_options, + hash_options.file_mode, + hash_options.fast_hash, + hash_options.recursive, + ) + .await?, + )), + _ => Ok(None), + } + }) + .await?; let mut buffer = String::new(); formatter.begin(&mut buffer); @@ -151,14 +155,14 @@ pub(crate) async fn targets_batch( } } Err(e) => { - stats.errors += 1; + stats.add_error(e); let mut stderr = String::new(); if needs_separator { formatter.separator(&mut buffer); } needs_separator = true; - formatter.package_error(package.dupe(), e.inner(), &mut buffer, &mut stderr); + formatter.package_error(package.dupe(), e, &mut buffer, &mut stderr); server_ctx.stderr()?.write_all(stderr.as_bytes())?; @@ -169,8 +173,8 @@ pub(crate) async fn targets_batch( } } formatter.end(&stats, &mut buffer); - if !keep_going && stats.errors != 0 { - Err(mk_error(stats.errors)) + if !keep_going && let Some(e) = stats.to_error() { + Err(e) } else { Ok(TargetsResponse { error_count: stats.errors, diff --git a/app/buck2_server_commands/src/commands/targets/fmt.rs b/app/buck2_server_commands/src/commands/targets/fmt.rs index 400ffef109d79..4c23d5b7f32b3 100644 --- a/app/buck2_server_commands/src/commands/targets/fmt.rs +++ b/app/buck2_server_commands/src/commands/targets/fmt.rs @@ -7,10 +7,10 @@ * of this source tree. */ +use std::collections::BTreeSet; use std::fmt::Write; use std::sync::Arc; -use anyhow::Context; use buck2_cli_proto::targets_request; use buck2_cli_proto::targets_request::OutputFormat; use buck2_cli_proto::targets_request::TargetHashGraphType; @@ -19,6 +19,8 @@ use buck2_cli_proto::TargetsRequest; use buck2_core::bzl::ImportPath; use buck2_core::cells::cell_path::CellPath; use buck2_core::package::PackageLabel; +use buck2_error::internal_error_anyhow; +use buck2_error::BuckErrorContext; use buck2_node::attrs::hacks::value_to_json; use buck2_node::attrs::inspect_options::AttrInspectOptions; use buck2_node::nodes::attributes::DEPS; @@ -29,7 +31,7 @@ use buck2_node::nodes::attributes::PACKAGE_VALUES; use buck2_node::nodes::attributes::TARGET_CALL_STACK; use buck2_node::nodes::attributes::TARGET_HASH; use buck2_node::nodes::attributes::TYPE; -use buck2_node::nodes::unconfigured::TargetNode; +use buck2_node::nodes::unconfigured::TargetNodeRef; use buck2_node::super_package::SuperPackage; use buck2_util::indent::indent; use gazebo::prelude::SliceExt; @@ -38,21 +40,13 @@ use regex::RegexSet; use crate::json::QuotedJson; use crate::target_hash::BuckTargetHash; -#[derive(Debug, thiserror::Error)] -enum FormatterError { - #[error("Attributes can only be specified when output format is JSON (internal error)")] - AttrsOnlyWithJson, - #[error("`output_format` is not set (internal error)")] - OutputFormatNotSet, -} - pub(crate) struct TargetInfo<'a> { - pub(crate) node: &'a TargetNode, + pub(crate) node: TargetNodeRef<'a>, pub(crate) target_hash: Option, pub(crate) super_package: &'a SuperPackage, } -fn package_error_to_stderr(package: &PackageLabel, error: &anyhow::Error, stderr: &mut String) { +fn package_error_to_stderr(package: PackageLabel, error: &buck2_error::Error, stderr: &mut String) { writeln!(stderr, "Error parsing {package}\n{error:?}").unwrap(); } @@ -74,11 +68,11 @@ pub(crate) trait TargetFormatter: Send + Sync { fn package_error( &self, package: PackageLabel, - error: &anyhow::Error, + error: &buck2_error::Error, stdout: &mut String, stderr: &mut String, ) { - package_error_to_stderr(&package, error, stderr); + package_error_to_stderr(package, error, stderr); } } @@ -294,14 +288,14 @@ impl TargetFormatter for JsonFormat { fn package_error( &self, package: PackageLabel, - error: &anyhow::Error, + error: &buck2_error::Error, stdout: &mut String, stderr: &mut String, ) { // When an error happens we print it to stdout (as a JSON entry) and to stderr (as a human message). // If the user has keep-going turned on, they'll get the JSON on stdout, but also have the error message appear on stderr. // If the user has keep-going turned off, they'll only see one error message and then abort. - package_error_to_stderr(&package, error, stderr); + package_error_to_stderr(package, error, stderr); self.writer.entry_start(stdout); let mut first = true; self.writer.entry_item( @@ -314,7 +308,7 @@ impl TargetFormatter for JsonFormat { stdout, &mut first, "buck.error", - QuotedJson::quote_str(&format!("{:?}", error)), + QuotedJson::quote_str(&format!("{error:?}")), ); self.writer.entry_end(stdout, first); } @@ -323,6 +317,8 @@ impl TargetFormatter for JsonFormat { #[derive(Debug, Default)] pub(crate) struct Stats { pub(crate) errors: u64, + error_tags: BTreeSet, + error_category: Option, pub(crate) success: u64, pub(crate) targets: u64, } @@ -333,6 +329,39 @@ impl Stats { self.success += stats.success; self.targets += stats.targets; } + + pub(crate) fn add_error(&mut self, e: &buck2_error::Error) { + self.error_tags.extend(e.tags()); + if let Some(category) = e.get_tier() { + self.error_category = Some(category.combine(self.error_category.take())); + } + self.errors += 1; + } + + pub(crate) fn to_error(&self) -> Option { + if self.errors == 0 { + return None; + } + // Simpler error so that we don't print long errors twice (when exiting buck2) + let package_str = if self.errors == 1 { + "package" + } else { + "packages" + }; + + #[derive(buck2_error::Error, Debug)] + enum TargetsError { + #[error("Failed to parse {0} {1}")] + FailedToParse(u64, &'static str), + } + + let mut e = buck2_error::Error::from(TargetsError::FailedToParse(self.errors, package_str)); + e = e.tag(self.error_tags.iter().copied()); + if let Some(category) = self.error_category { + e = e.context(category); + } + Some(e.into()) + } } struct StatsFormat; @@ -376,7 +405,7 @@ pub(crate) fn create_formatter( other: &targets_request::Other, ) -> anyhow::Result> { let output_format = OutputFormat::from_i32(request.output_format) - .context("Invalid value of `output_format` (internal error)")?; + .internal_error_anyhow("Invalid value of `output_format`")?; let target_call_stacks = request.client_context()?.target_call_stacks; @@ -385,13 +414,15 @@ pub(crate) fn create_formatter( _ => { // Self-check. if !other.output_attributes.is_empty() { - return Err(FormatterError::AttrsOnlyWithJson.into()); + return Err(internal_error_anyhow!( + "Attributes can only be specified when output format is JSON" + )); } } } match output_format { - OutputFormat::Unknown => Err(FormatterError::OutputFormatNotSet.into()), + OutputFormat::Unknown => Err(internal_error_anyhow!("`output_format` is not set")), OutputFormat::Stats => Ok(Arc::new(StatsFormat)), OutputFormat::Text => Ok(Arc::new(TargetNameFormat { target_call_stacks, diff --git a/app/buck2_server_commands/src/commands/targets/mod.rs b/app/buck2_server_commands/src/commands/targets/mod.rs deleted file mode 100644 index f3f49ec287c23..0000000000000 --- a/app/buck2_server_commands/src/commands/targets/mod.rs +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod default; -pub(crate) mod fmt; -mod resolve_alias; -mod streaming; - -use std::fs::File; -use std::io::BufWriter; -use std::io::Write; - -use anyhow::Context as _; -use async_trait::async_trait; -use buck2_cli_proto::targets_request; -use buck2_cli_proto::targets_request::TargetHashGraphType; -use buck2_cli_proto::HasClientContext; -use buck2_cli_proto::TargetsRequest; -use buck2_cli_proto::TargetsResponse; -use buck2_common::dice::cells::HasCellResolver; -use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; -use buck2_server_ctx::template::run_server_command; -use buck2_server_ctx::template::ServerCommandTemplate; -use dice::DiceTransaction; - -use crate::commands::targets::default::targets_batch; -use crate::commands::targets::default::TargetHashOptions; -use crate::commands::targets::fmt::create_formatter; -use crate::commands::targets::resolve_alias::targets_resolve_aliases; -use crate::commands::targets::streaming::targets_streaming; - -#[derive(Debug, thiserror::Error)] -enum TargetsCommandError { - #[error("Missing field in proto request (internal error)")] - MissingField, -} - -pub(crate) enum Outputter { - Stdout, - File(BufWriter), -} - -impl Outputter { - fn new(request: &TargetsRequest) -> anyhow::Result { - match &request.output { - None => Ok(Self::Stdout), - Some(file) => Ok(Self::File(BufWriter::new( - File::create(file).with_context(|| { - format!("Failed to open file `{}` for `targets` output ", file) - })?, - ))), - } - } - - fn write1(&mut self, stdout: &mut impl Write, x: &str) -> anyhow::Result<()> { - match self { - Self::Stdout => stdout.write_all(x.as_bytes())?, - Self::File(f) => f.write_all(x.as_bytes())?, - } - Ok(()) - } - - fn write2(&mut self, stdout: &mut impl Write, x: &str, y: &str) -> anyhow::Result<()> { - match self { - Self::Stdout => { - stdout.write_all(x.as_bytes())?; - stdout.write_all(y.as_bytes())?; - } - Self::File(f) => { - f.write_all(x.as_bytes())?; - f.write_all(y.as_bytes())?; - } - } - Ok(()) - } - - /// If this outputter should write anything to a file, do so, and return whatever buffer is left over. - fn write_to_file(&mut self, buffer: String) -> anyhow::Result { - match self { - Self::Stdout => Ok(buffer), - Self::File(f) => { - f.write_all(buffer.as_bytes())?; - Ok(String::new()) - } - } - } - - fn flush(&mut self) -> anyhow::Result<()> { - match self { - Self::Stdout => Ok(()), - Self::File(f) => Ok(f.flush()?), - } - } -} - -pub(crate) async fn targets_command( - server_ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: TargetsRequest, -) -> anyhow::Result { - run_server_command( - TargetsServerCommand { req }, - server_ctx, - partial_result_dispatcher, - ) - .await -} - -struct TargetsServerCommand { - req: TargetsRequest, -} - -#[async_trait] -impl ServerCommandTemplate for TargetsServerCommand { - type StartEvent = buck2_data::TargetsCommandStart; - type EndEvent = buck2_data::TargetsCommandEnd; - type Response = TargetsResponse; - type PartialResult = buck2_cli_proto::StdoutBytes; - - async fn command( - &self, - server_ctx: &dyn ServerCommandContextTrait, - mut partial_result_dispatcher: PartialResultDispatcher, - dice: DiceTransaction, - ) -> anyhow::Result { - targets( - server_ctx, - &mut partial_result_dispatcher.as_writer(), - dice, - &self.req, - ) - .await - } - - fn is_success(&self, _response: &Self::Response) -> bool { - // No response if we failed. - true - } - - fn end_event(&self, _response: &anyhow::Result) -> Self::EndEvent { - buck2_data::TargetsCommandEnd { - unresolved_target_patterns: self.req.target_patterns.clone(), - } - } -} - -async fn targets( - server_ctx: &dyn ServerCommandContextTrait, - stdout: &mut impl Write, - mut dice: DiceTransaction, - request: &TargetsRequest, -) -> anyhow::Result { - // TODO(nmj): Rather than returning fully formatted data in the TargetsResponse, we should - // instead return structured data, and return *that* to the CLI. The CLI should - // then handle printing. The current approach is just a temporary hack to fix some - // issues with printing to stdout. - - let cwd = server_ctx.working_dir(); - let cell_resolver = dice.get_cell_resolver().await?; - let parsed_target_patterns = parse_patterns_from_cli_args::( - &mut dice, - &request.target_patterns, - cwd, - ) - .await?; - - let mut outputter = Outputter::new(request)?; - - let response = match &request.targets { - Some(targets_request::Targets::ResolveAlias(_)) => { - targets_resolve_aliases(dice, request, parsed_target_patterns).await? - } - Some(targets_request::Targets::Other(other)) => { - if other.streaming { - let formatter = create_formatter(request, other)?; - let hashing = match TargetHashGraphType::from_i32(other.target_hash_graph_type) - .expect("buck cli should send valid target hash graph type") - { - TargetHashGraphType::None => None, - _ => Some(other.target_hash_use_fast_hash), - }; - - let res = targets_streaming( - server_ctx, - stdout, - dice, - formatter, - &mut outputter, - parsed_target_patterns, - other.keep_going, - other.cached, - other.imports, - hashing, - request.concurrency.as_ref().map(|x| x.concurrency as usize), - ) - .await; - // Make sure we always flush the outputter, even on failure, as we may have partially written to it - outputter.flush()?; - res? - } else { - let formatter = create_formatter(request, other)?; - let client_ctx = request.client_context()?; - let target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut dice).await?; - let fs = server_ctx.project_root(); - targets_batch( - server_ctx, - dice, - &*formatter, - parsed_target_patterns, - target_platform, - TargetHashOptions::new(other, &cell_resolver, fs)?, - other.keep_going, - ) - .await? - } - } - None => return Err(TargetsCommandError::MissingField.into()), - }; - - let response = TargetsResponse { - error_count: response.error_count, - serialized_targets_output: outputter.write_to_file(response.serialized_targets_output)?, - }; - outputter.flush()?; - Ok(response) -} - -fn mk_error(errors: u64) -> anyhow::Error { - // Simpler error so that we don't print long errors twice (when exiting buck2) - let package_str = if errors == 1 { "package" } else { "packages" }; - anyhow::anyhow!("Failed to parse {} {}", errors, package_str) -} diff --git a/app/buck2_server_commands/src/commands/targets/resolve_alias.rs b/app/buck2_server_commands/src/commands/targets/resolve_alias.rs index 7b6a110c494dc..8aa601cbfcb84 100644 --- a/app/buck2_server_commands/src/commands/targets/resolve_alias.rs +++ b/app/buck2_server_commands/src/commands/targets/resolve_alias.rs @@ -9,10 +9,8 @@ //! Server-side implementation of `buck2 targets --resolve-alias` command. -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ResolveAliasError { - #[error("`output_format` not set (internal error)")] - OutputFormatNotSet, #[error("`--stat` format is not supported by `--resolve-alias`")] StatFormatNotSupported, } @@ -20,25 +18,22 @@ enum ResolveAliasError { use std::collections::HashMap; use std::collections::HashSet; use std::fmt::Write; -use std::sync::Arc; use anyhow::Context; use buck2_cli_proto::targets_request::OutputFormat; use buck2_cli_proto::TargetsRequest; use buck2_cli_proto::TargetsResponse; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::ParsedPattern; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_error::internal_error_anyhow; +use buck2_error::AnyhowContextForError; +use buck2_error::BuckErrorContext; use buck2_node::nodes::attributes::PACKAGE; -use buck2_node::nodes::eval_result::EvaluationResult; use buck2_node::nodes::frontend::TargetGraphCalculation; use dice::DiceTransaction; use dupe::Dupe; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use futures::FutureExt; use crate::commands::targets::fmt::JsonWriter; use crate::json::QuotedJson; @@ -108,7 +103,7 @@ impl ResolveAliasFormatter for LinesWriter { } pub(crate) async fn targets_resolve_aliases( - dice: DiceTransaction, + mut dice: DiceTransaction, request: &TargetsRequest, parsed_target_patterns: Vec>, ) -> anyhow::Result { @@ -121,7 +116,7 @@ pub(crate) async fn targets_resolve_aliases( } _ => Err(anyhow::anyhow!( "Invalid alias (does not expand to a single target): `{}`", - alias.value + alias )), }) .collect::, _>>()?; @@ -131,32 +126,31 @@ pub(crate) async fn targets_resolve_aliases( .map(|(package, _name)| package.dupe()) .collect::>(); - let packages = packages - .into_iter() - .map(|package| { - let dice = &dice; + let packages: HashMap<_, _> = dice + .compute_join(packages, |ctx: &mut _, package| { async move { ( package.dupe(), - dice.get_interpreter_results(package.dupe()) + ctx.get_interpreter_results(package.dupe()) .await - .shared_error(), + .map_err(buck2_error::Error::from), ) } + .boxed() }) - .collect::>() - .collect::>>>() - .await; + .await + .into_iter() + .collect(); let mut buffer = String::new(); let output_format = OutputFormat::from_i32(request.output_format) - .context("Invalid value of `output_format` (internal error)")?; + .internal_error_anyhow("Invalid value of `output_format`")?; let json_writer; let formatter = match output_format { - OutputFormat::Unknown => return Err(ResolveAliasError::OutputFormatNotSet.into()), + OutputFormat::Unknown => return Err(internal_error_anyhow!("`output_format` not set")), OutputFormat::Text => &LinesWriter as &dyn ResolveAliasFormatter, OutputFormat::Json => { json_writer = JsonWriter { json_lines: false }; @@ -194,13 +188,13 @@ pub(crate) async fn targets_resolve_aliases( ) }) }) - .with_context(|| format!("Invalid alias: `{}`", alias.value))?; + .with_context(|| format!("Invalid alias: `{}`", alias))?; if needs_separator { formatter.separator(&mut buffer); } needs_separator = true; - formatter.emit(&alias.value, node.label(), &mut buffer); + formatter.emit(&alias, node.label(), &mut buffer); } formatter.end(&mut buffer); diff --git a/app/buck2_server_commands/src/commands/targets/streaming.rs b/app/buck2_server_commands/src/commands/targets/streaming.rs index b5f67ec31c296..c6cfe58aef711 100644 --- a/app/buck2_server_commands/src/commands/targets/streaming.rs +++ b/app/buck2_server_commands/src/commands/targets/streaming.rs @@ -15,16 +15,16 @@ use std::mem; use std::sync::Arc; use std::sync::Mutex; -use buck2_cli_proto::TargetsResponse; use buck2_common::pattern::package_roots::find_package_roots_stream; use buck2_common::pattern::resolve::ResolvedPattern; use buck2_core::bzl::ImportPath; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::PackageSpec; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::PatternType; use buck2_core::pattern::pattern_type::TargetPatternExtra; -use buck2_core::pattern::PackageSpec; -use buck2_core::pattern::ParsedPattern; use buck2_core::target::name::TargetName; +use buck2_futures::spawn::spawn_cancellable; use buck2_interpreter::load_module::InterpreterCalculation; use buck2_interpreter::load_module::INTERPRETER_CALCULATION_IMPL; use buck2_interpreter::paths::package::PackageFilePath; @@ -35,38 +35,38 @@ use buck2_server_ctx::ctx::ServerCommandContextTrait; use dice::DiceComputations; use dice::DiceTransaction; use dupe::Dupe; -use dupe::IterDupedExt; use futures::future::FutureExt; use futures::Stream; use futures::StreamExt; use gazebo::prelude::VecExt; use itertools::Either; use itertools::Itertools; -use more_futures::spawn::spawn_cancellable; use starlark_map::small_set::SmallSet; -use thiserror::Error; use tokio::sync::Semaphore; use crate::commands::targets::fmt::Stats; use crate::commands::targets::fmt::TargetFormatter; use crate::commands::targets::fmt::TargetInfo; -use crate::commands::targets::mk_error; -use crate::commands::targets::Outputter; use crate::target_hash::TargetHashes; +fn write_str(outputter: &mut dyn Write, s: &mut String) -> anyhow::Result<()> { + outputter.write_all(s.as_bytes())?; + s.clear(); + Ok(()) +} + pub(crate) async fn targets_streaming( server_ctx: &dyn ServerCommandContextTrait, - stdout: &mut impl Write, - dice: DiceTransaction, + mut dice: DiceTransaction, formatter: Arc, - outputter: &mut Outputter, + outputter: &mut (dyn Write + Send), parsed_patterns: Vec>, keep_going: bool, cached: bool, imports: bool, fast_hash: Option, // None = no hashing threads: Option, -) -> anyhow::Result { +) -> anyhow::Result { struct Res { stats: Stats, // Stats to merge in package: PackageLabel, // The package I was operating on @@ -77,12 +77,13 @@ pub(crate) async fn targets_streaming( let imported = Arc::new(Mutex::new(SmallSet::new())); let threads = Arc::new(Semaphore::new(threads.unwrap_or(Semaphore::MAX_PERMITS))); - let mut packages = stream_packages(&dice, parsed_patterns) + let cloned_dice = dice.clone(); + let mut packages = stream_packages(&cloned_dice, parsed_patterns) .map(|x| { let formatter = formatter.dupe(); let imported = imported.dupe(); let threads = threads.dupe(); - let ctx = dice.dupe(); + let mut ctx = cloned_dice.dupe(); spawn_cancellable( |_cancellation| { @@ -98,10 +99,11 @@ pub(crate) async fn targets_streaming( let targets = { // This bit of code is the heavy CPU stuff, so guard it with the threads let _permit = threads.acquire().await.unwrap(); - load_targets(&ctx, package.dupe(), spec, cached, keep_going).await + load_targets(&mut ctx, package.dupe(), spec, cached, keep_going) + .await }; let mut show_err = |err| { - res.stats.errors += 1; + res.stats.add_error(err); let mut stderr = String::new(); formatter.package_error( package.dupe(), @@ -114,7 +116,7 @@ pub(crate) async fn targets_streaming( match targets { Ok((eval_result, targets, err)) => { if let Some(err) = err { - show_err(&err); + show_err(&err.into()); formatter.separator(&mut res.stdout); } res.stats.success += 1; @@ -138,7 +140,7 @@ pub(crate) async fn targets_streaming( } formatter.target( TargetInfo { - node, + node: node.as_ref(), target_hash: fast_hash.map(|fast| { TargetHashes::compute_immediate_one(node, fast) }), @@ -149,7 +151,7 @@ pub(crate) async fn targets_streaming( } } Err(err) => { - show_err(&err); + show_err(&err.into()); } } anyhow::Ok(res) @@ -157,8 +159,8 @@ pub(crate) async fn targets_streaming( } .boxed() }, - &*dice.per_transaction_data().spawner, - dice.per_transaction_data(), + &*cloned_dice.per_transaction_data().spawner, + cloned_dice.per_transaction_data(), ) .into_drop_cancel() }) @@ -171,12 +173,14 @@ pub(crate) async fn targets_streaming( let mut needs_separator = false; let mut package_files_seen = SmallSet::new(); while let Some(res) = packages.next().await { - let res = res?; + let mut res = res?; stats.merge(&res.stats); if let Some(stderr) = &res.stderr { server_ctx.stderr()?.write_all(stderr.as_bytes())?; if !keep_going { - return Err(mk_error(stats.errors)); + return Err(stats + .to_error() + .expect("Result only has a stderr if there were errors")); } } if !res.stdout.is_empty() { @@ -184,31 +188,35 @@ pub(crate) async fn targets_streaming( formatter.separator(&mut buffer); } needs_separator = true; - outputter.write2(stdout, &buffer, &res.stdout)?; - buffer.clear(); + write_str(outputter, &mut buffer)?; + write_str(outputter, &mut res.stdout)?; } if imports { // Need to also find imports from PACKAGE files - let mut path = Some(PackageFilePath::for_dir(res.package.as_cell_path())); + let mut path = Some(res.package); while let Some(x) = path { if package_files_seen.contains(&x) { break; } - package_files_seen.insert(x.clone()); + package_files_seen.insert(x); // These aren't cached, but the cost is relatively low (Starlark parsing), // and there aren't many, so we just do it on the main thread. // We ignore errors as these will bubble up as BUCK file errors already. - if let Ok(Some(imports)) = package_imports(&dice, &x).await { + if let Ok(Some((package_file_path, imports))) = + package_imports(&mut dice, x.dupe()).await + { if needs_separator { formatter.separator(&mut buffer); } needs_separator = true; - formatter.imports(x.path(), &imports, None, &mut buffer); - outputter.write1(stdout, &buffer)?; - buffer.clear(); + formatter.imports(package_file_path.path(), &imports, None, &mut buffer); + write_str(outputter, &mut buffer)?; imported.lock().unwrap().extend(imports.into_iter()); } - path = x.parent_package_file(); + // TODO(nga): we should cross cell boundary: + // This is what we do when we evaluate `PACKAGE` files. + // https://fburl.com/code/qxl59b64 + path = x.parent(); } } } @@ -229,23 +237,20 @@ pub(crate) async fn targets_streaming( let imports = loaded.imports().cloned().collect::>(); formatter.imports(path.path(), &imports, None, &mut buffer); todo.extend(imports); - outputter.write1(stdout, &buffer)?; - buffer.clear(); + write_str(outputter, &mut buffer)?; } } formatter.end(&stats, &mut buffer); - Ok(TargetsResponse { - error_count: stats.errors, - serialized_targets_output: buffer, - }) + write_str(outputter, &mut buffer)?; + Ok(stats) } /// Given the patterns, separate into those which have an explicit package, and those which are recursive -fn stream_packages( - dice: &DiceTransaction, +fn stream_packages<'a, T: PatternType>( + dice: &'a DiceTransaction, patterns: Vec>, -) -> impl Stream)>> { +) -> impl Stream)>> + 'a { let mut spec = ResolvedPattern::::new(); let mut recursive_paths = Vec::new(); @@ -267,7 +272,7 @@ fn stream_packages( .chain(find_package_roots_stream(dice, recursive_paths).map(|x| Ok((x?, PackageSpec::All)))) } -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum TargetsError { #[error( "Unknown targets {} from package `{0}`.", @@ -278,7 +283,7 @@ enum TargetsError { /// Load the targets from a package. If `keep_going` is specified then it may return a `Some` error in the triple. async fn load_targets( - dice: &DiceComputations, + dice: &mut DiceComputations<'_>, package: PackageLabel, spec: PackageSpec, cached: bool, @@ -304,7 +309,7 @@ async fn load_targets( .partition_map(|(target, TargetPatternExtra)| { match result.targets().get(target.as_ref()) { None => Either::Left(target), - Some(x) => Either::Right(x.dupe()), + Some(x) => Either::Right(x.to_owned()), } }); let err = if miss.is_empty() { @@ -315,13 +320,13 @@ async fn load_targets( Ok((result, targets, err)) } else { let targets = targets.into_try_map(|(target, TargetPatternExtra)| { - anyhow::Ok(result.resolve_target(target.as_ref())?.dupe()) + anyhow::Ok(result.resolve_target(target.as_ref())?.to_owned()) })?; Ok((result, targets, None)) } } PackageSpec::All => { - let targets = result.targets().values().duped().collect(); + let targets = result.targets().values().map(|t| t.to_owned()).collect(); Ok((result, targets, None)) } } @@ -329,9 +334,9 @@ async fn load_targets( /// Return `None` if the PACKAGE file doesn't exist async fn package_imports( - dice: &DiceComputations, - path: &PackageFilePath, -) -> anyhow::Result>> { + dice: &mut DiceComputations<'_>, + path: PackageLabel, +) -> anyhow::Result)>> { INTERPRETER_CALCULATION_IMPL .get()? .get_package_file_deps(dice, path) diff --git a/app/buck2_server_commands/src/commands/targets_show_outputs.rs b/app/buck2_server_commands/src/commands/targets_show_outputs.rs index 18a2bb2582511..b1f167c30ec31 100644 --- a/app/buck2_server_commands/src/commands/targets_show_outputs.rs +++ b/app/buck2_server_commands/src/commands/targets_show_outputs.rs @@ -14,39 +14,35 @@ use buck2_artifact::artifact::artifact_type::Artifact; use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_cli_proto::targets_show_outputs_response::TargetPaths; -use buck2_cli_proto::HasClientContext; use buck2_cli_proto::TargetsRequest; use buck2_cli_proto::TargetsShowOutputsResponse; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::pattern::resolve::resolve_target_patterns; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_common::pattern::resolve::ResolveTargetPatterns; use buck2_common::pattern::resolve::ResolvedPattern; -use buck2_core::cells::CellResolver; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::PackageSpec; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::pattern::PackageSpec; -use buck2_core::pattern::ParsedPattern; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_error::BuckErrorContext; use buck2_execute::artifact::artifact_dyn::ArtifactDyn; use buck2_node::nodes::eval_result::EvaluationResult; use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::target_calculation::ConfiguredTargetCalculation; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use dice::DiceComputations; use dice::DiceTransaction; use dupe::Dupe; use futures::future::FutureExt; -use futures::stream::FuturesUnordered; use gazebo::prelude::VecExt; -use tokio_stream::StreamExt; struct TargetsArtifacts { providers_label: ConfiguredProvidersLabel, @@ -99,11 +95,15 @@ async fn targets_show_outputs( ) -> anyhow::Result { let cwd = server_ctx.working_dir(); - let cell_resolver = ctx.get_cell_resolver().await?; - - let client_ctx = request.client_context()?; - let target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; + let global_cfg_options = global_cfg_options_from_client_context( + request + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut ctx, + ) + .await?; let parsed_patterns = parse_patterns_from_cli_args::( &mut ctx, @@ -116,13 +116,9 @@ async fn targets_show_outputs( let mut targets_paths = Vec::new(); - for targets_artifacts in retrieve_targets_artifacts_from_patterns( - &ctx, - &target_platform, - &parsed_patterns, - &cell_resolver, - ) - .await? + for targets_artifacts in + retrieve_targets_artifacts_from_patterns(&mut ctx, &global_cfg_options, &parsed_patterns) + .await? { let mut paths = Vec::new(); for artifact in targets_artifacts.artifacts { @@ -139,70 +135,57 @@ async fn targets_show_outputs( } async fn retrieve_targets_artifacts_from_patterns( - ctx: &DiceComputations, - global_target_platform: &Option, + ctx: &mut DiceComputations<'_>, + global_cfg_options: &GlobalCfgOptions, parsed_patterns: &[ParsedPattern], - cell_resolver: &CellResolver, ) -> anyhow::Result> { - let resolved_pattern = - resolve_target_patterns(cell_resolver, parsed_patterns, &ctx.file_ops()).await?; + let resolved_pattern = ResolveTargetPatterns::resolve(ctx, parsed_patterns).await?; - retrieve_artifacts_for_targets(ctx, resolved_pattern, global_target_platform.to_owned()).await + retrieve_artifacts_for_targets(ctx, resolved_pattern, global_cfg_options).await } async fn retrieve_artifacts_for_targets( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, spec: ResolvedPattern, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result> { - let futs: FuturesUnordered<_> = spec - .specs - .into_iter() - .map(|(package, spec)| { - let global_target_platform = global_target_platform.dupe(); + let artifacts_for_specs = ctx + .try_compute_join(spec.specs, |ctx, (package, spec)| { async move { { let res = ctx.get_interpreter_results(package.dupe()).await?; - retrieve_artifacts_for_spec( - ctx, - package.dupe(), - spec, - global_target_platform, - res, - ) - .await + retrieve_artifacts_for_spec(ctx, package.dupe(), spec, global_cfg_options, res) + .await } } .boxed() }) - .collect(); - - futures::pin_mut!(futs); + .await?; let mut results = Vec::new(); - while let Some(mut targets_artifacts) = futs.try_next().await? { - results.append(&mut targets_artifacts); + for artifacts in artifacts_for_specs { + results.extend(artifacts); } Ok(results) } async fn retrieve_artifacts_for_spec( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, package: PackageLabel, spec: PackageSpec, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, res: Arc, ) -> anyhow::Result> { let available_targets = res.targets(); - let todo_targets: Vec<(ProvidersLabel, Option)> = match spec { + let todo_targets: Vec<(ProvidersLabel, &GlobalCfgOptions)> = match spec { PackageSpec::All => available_targets .keys() .map(|t| { ( ProvidersLabel::default_for(TargetLabel::new(package.dupe(), t)), - global_target_platform.dupe(), + global_cfg_options, ) }) .collect(), @@ -213,34 +196,26 @@ async fn retrieve_artifacts_for_spec( targets.into_map(|(target_name, providers)| { ( providers.into_providers_label(package.dupe(), target_name.as_ref()), - global_target_platform.dupe(), + global_cfg_options, ) }) } }; - let mut futs: FuturesUnordered<_> = todo_targets - .into_iter() - .map(|(providers_label, target_platform)| { - retrieve_artifacts_for_provider_label(ctx, providers_label, target_platform) - }) - .collect(); - - let mut outputs = Vec::new(); - while let Some(targets_artifacts) = futs.next().await { - outputs.push(targets_artifacts?); - } - + let outputs = ctx.try_compute_join(todo_targets, |ctx, (providers_label, cfg_flags)| { + async move { retrieve_artifacts_for_provider_label(ctx, providers_label, cfg_flags).await } + .boxed() + }).await?; Ok(outputs) } async fn retrieve_artifacts_for_provider_label( - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, providers_label: ProvidersLabel, - target_platform: Option, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result { let providers_label = ctx - .get_configured_provider_label(&providers_label, target_platform.as_ref()) + .get_configured_provider_label(&providers_label, global_cfg_options) .await?; let providers = ctx @@ -252,11 +227,8 @@ async fn retrieve_artifacts_for_provider_label( let mut artifacts = Vec::new(); collection - .default_info() - .for_each_default_output_artifact_only(&mut |o| { - artifacts.push(o); - Ok(()) - })?; + .default_info()? + .for_each_default_output_artifact_only(&mut |o| artifacts.push(o))?; Ok(TargetsArtifacts { providers_label, diff --git a/app/buck2_server_commands/src/dot.rs b/app/buck2_server_commands/src/dot.rs new file mode 100644 index 0000000000000..614eb0fb3b9f5 --- /dev/null +++ b/app/buck2_server_commands/src/dot.rs @@ -0,0 +1,176 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! A very limited interface for writing dot files (see ) +//! +//! Has a lot less features than or , +//! but it's easier for us to match buck1's output with this simple implementation. +// TODO(cjhopman): while the `dot` crate is probably too opinionated, `tabbycat` looks nice and is +// lower level so gives a lot of control (including control over ordering of node/edge statements). +// It looks like we could use that, but it mostly would just handle the actual writing of the +// data in the right format and maybe escaping. It's not been imported to tp2 so we implement it +// ourselves for now. + +use std::collections::hash_map::Entry::Occupied; +use std::collections::hash_map::Entry::Vacant; +use std::collections::HashMap; +use std::fmt::Display; +use std::io::Write; + +use once_cell::sync::Lazy; +use regex::Regex; +use starlark_map::small_map::SmallMap; + +pub mod targets; + +#[derive(Default, Debug)] +pub struct DotNodeAttrs { + pub style: Option, + pub color: Option, + pub label: Option, + pub extra: SmallMap, +} + +impl Display for DotNodeAttrs { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let style = self.style.as_ref().map(|v| ("style", v)); + let color = self.color.as_ref().map(|v| ("color", v)); + let label = self.label.as_ref().map(|v| ("label", v)); + + for (i, (key, value)) in style + .into_iter() + .chain(color.into_iter()) + .chain(label.into_iter()) + .chain(self.extra.iter().map(|(l, r)| (l.as_str(), r))) + .enumerate() + { + if i != 0 { + f.write_str(",")?; + } + write!(f, "{}={}", escape_id(key), escape_id(value))?; + } + Ok(()) + } +} + +/// A node in the graph. +pub trait DotNode { + fn attrs(&self) -> anyhow::Result; + fn id(&self) -> String; +} + +/// Represents a directed edge between two nodes, identified by their id. +pub struct DotEdge<'a> { + from: &'a str, + to: &'a str, +} + +pub trait DotDigraph<'a> { + type Node: DotNode; + + fn name(&self) -> &str; + + fn for_each_node anyhow::Result<()>>( + &'a self, + f: F, + ) -> anyhow::Result<()>; + fn for_each_edge anyhow::Result<()>>( + &'a self, + node: &Self::Node, + f: F, + ) -> anyhow::Result<()>; +} + +/// ids in dot format need to have the '"' escaped. +/// +/// From +/// ```ignore +/// An ID is one of the following: +/// - Any string of alphabetic ([a-zA-Z\200-\377]) characters, underscores ('_') or digits([0-9]), not beginning with a digit; +/// - a numeral [-]?(.[0-9]⁺ | [0-9]⁺(.[0-9]*)? ); +/// - any double-quoted string ("...") possibly containing escaped quotes (\")¹; +/// - an HTML string (<...>). +/// +/// We support (approximately) the first two forms and then anything else gets quoted and escaped as the third form. +fn escape_id(value: &str) -> String { + static RE_STRING: Lazy = Lazy::new(|| Regex::new("^[a-zA-Z_][a-zA-Z_0-9]*$").unwrap()); + static RE_NUMBER: Lazy = + Lazy::new(|| Regex::new("^-?(.[0-9]+ | [0-9]+.[0-9]*)$").unwrap()); + + if RE_STRING.is_match(value) || RE_NUMBER.is_match(value) { + return value.to_owned(); + } + + format!("\"{}\"", value.replace('"', "\\\"")) +} + +pub struct Dot {} + +impl Dot { + pub fn render<'a, T: DotDigraph<'a>, W: Write>(graph: &'a T, mut w: W) -> anyhow::Result<()> { + writeln!(w, "digraph {} {{", graph.name())?; + graph.for_each_node(|node| { + let attrs = node.attrs()?; + writeln!(w, " {} [{}];", escape_id(&node.id()), attrs)?; + graph.for_each_edge(node, |edge| { + writeln!(w, " {} -> {};", escape_id(edge.from), escape_id(edge.to))?; + Ok(()) + })?; + Ok(()) + })?; + writeln!(w, "}}")?; + Ok(()) + } +} + +pub struct DotCompact {} + +impl DotCompact { + pub fn render<'a, T: DotDigraph<'a>, W: Write>(graph: &'a T, mut w: W) -> anyhow::Result<()> { + writeln!(w, "digraph {} {{", graph.name())?; + + let mut next_id: u32 = 0; + let mut lookup_numeric_id: HashMap = HashMap::new(); + + let mut name_to_number = |node_name: &str| -> u32 { + match lookup_numeric_id.entry(node_name.to_owned()) { + Vacant(entry) => { + next_id += 1; + entry.insert(next_id); + next_id + } + Occupied(entry) => *entry.get(), + } + }; + + graph.for_each_node(|node| { + let attrs = node.attrs()?; + let node_name = &escape_id(&node.id()); + writeln!( + w, + " {} [{},label={}];", + name_to_number(node_name), + attrs, + escape_id(&node.id()) + )?; + graph.for_each_edge(node, |edge| { + writeln!( + w, + " {} -> {};", + name_to_number(&escape_id(edge.from)), + name_to_number(&escape_id(edge.to)) + )?; + Ok(()) + })?; + Ok(()) + })?; + writeln!(w, "}}")?; + Ok(()) + } +} diff --git a/app/buck2_server_commands/src/dot/mod.rs b/app/buck2_server_commands/src/dot/mod.rs deleted file mode 100644 index c525008004c45..0000000000000 --- a/app/buck2_server_commands/src/dot/mod.rs +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! A very limited interface for writing dot files (see ) -//! -//! Has a lot less features than or , -//! but it's easier for us to match buck1's output with this simple implementation. -//! -// TODO(cjhopman): while the `dot` crate is probably too opinionated, `tabbycat` looks nice and is -// lower level so gives a lot of control (including control over ordering of node/edge statements). -// It looks like we could use that, but it mostly would just handle the actual writing of the -// data in the right format and maybe escaping. It's not been imported to tp2 so we implement it -// ourselves for now. - -use std::collections::hash_map::Entry::Occupied; -use std::collections::hash_map::Entry::Vacant; -use std::collections::HashMap; -use std::fmt::Display; -use std::io::Write; - -use once_cell::sync::Lazy; -use regex::Regex; -use starlark_map::small_map::SmallMap; - -pub mod targets; - -#[derive(Default, Debug)] -pub struct DotNodeAttrs { - pub style: Option, - pub color: Option, - pub label: Option, - pub extra: SmallMap, -} - -impl Display for DotNodeAttrs { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let style = self.style.as_ref().map(|v| ("style", v)); - let color = self.color.as_ref().map(|v| ("color", v)); - let label = self.label.as_ref().map(|v| ("label", v)); - - for (i, (key, value)) in style - .into_iter() - .chain(color.into_iter()) - .chain(label.into_iter()) - .chain(self.extra.iter().map(|(l, r)| (l.as_str(), r))) - .enumerate() - { - if i != 0 { - f.write_str(",")?; - } - write!(f, "{}={}", escape_id(key), escape_id(value))?; - } - Ok(()) - } -} - -/// A node in the graph. -pub trait DotNode { - fn attrs(&self) -> anyhow::Result; - fn id(&self) -> String; -} - -/// Represents a directed edge between two nodes, identified by their id. -pub struct DotEdge<'a> { - from: &'a str, - to: &'a str, -} - -pub trait DotDigraph<'a> { - type Node: DotNode; - - fn name(&self) -> &str; - - fn for_each_node anyhow::Result<()>>( - &'a self, - f: F, - ) -> anyhow::Result<()>; - fn for_each_edge anyhow::Result<()>>( - &'a self, - node: &Self::Node, - f: F, - ) -> anyhow::Result<()>; -} - -/// ids in dot format need to have the '"' escaped. -/// -/// From -/// ```ignore -/// An ID is one of the following: -/// - Any string of alphabetic ([a-zA-Z\200-\377]) characters, underscores ('_') or digits([0-9]), not beginning with a digit; -/// - a numeral [-]?(.[0-9]⁺ | [0-9]⁺(.[0-9]*)? ); -/// - any double-quoted string ("...") possibly containing escaped quotes (\")¹; -/// - an HTML string (<...>). -/// -/// We support (approximately) the first two forms and then anything else gets quoted and escaped as the third form. -fn escape_id(value: &str) -> String { - static RE_STRING: Lazy = Lazy::new(|| Regex::new("^[a-zA-Z_][a-zA-Z_0-9]*$").unwrap()); - static RE_NUMBER: Lazy = - Lazy::new(|| Regex::new("^-?(.[0-9]+ | [0-9]+.[0-9]*)$").unwrap()); - - if RE_STRING.is_match(value) || RE_NUMBER.is_match(value) { - return value.to_owned(); - } - - format!("\"{}\"", value.replace('"', "\\\"")) -} - -pub struct Dot {} - -impl Dot { - pub fn render<'a, T: DotDigraph<'a>, W: Write>(graph: &'a T, mut w: W) -> anyhow::Result<()> { - writeln!(w, "digraph {} {{", graph.name())?; - graph.for_each_node(|node| { - let attrs = node.attrs()?; - writeln!(w, " {} [{}];", escape_id(&node.id()), attrs)?; - graph.for_each_edge(node, |edge| { - writeln!(w, " {} -> {};", escape_id(edge.from), escape_id(edge.to))?; - Ok(()) - })?; - Ok(()) - })?; - writeln!(w, "}}")?; - Ok(()) - } -} - -pub struct DotCompact {} - -impl DotCompact { - pub fn render<'a, T: DotDigraph<'a>, W: Write>(graph: &'a T, mut w: W) -> anyhow::Result<()> { - writeln!(w, "digraph {} {{", graph.name())?; - - let mut next_id: u32 = 0; - let mut lookup_numeric_id: HashMap = HashMap::new(); - - let mut name_to_number = |node_name: &str| -> u32 { - match lookup_numeric_id.entry(node_name.to_owned()) { - Vacant(entry) => { - next_id += 1; - entry.insert(next_id); - next_id - } - Occupied(entry) => *entry.get(), - } - }; - - graph.for_each_node(|node| { - let attrs = node.attrs()?; - let node_name = &escape_id(&node.id()); - writeln!( - w, - " {} [{},label={}];", - name_to_number(node_name), - attrs, - escape_id(&node.id()) - )?; - graph.for_each_edge(node, |edge| { - writeln!( - w, - " {} -> {};", - name_to_number(&escape_id(edge.from)), - name_to_number(&escape_id(edge.to)) - )?; - Ok(()) - })?; - Ok(()) - })?; - writeln!(w, "}}")?; - Ok(()) - } -} diff --git a/app/buck2_server_commands/src/dot/targets.rs b/app/buck2_server_commands/src/dot/targets.rs index 3767e15e3b46f..1f34a26cfe0b4 100644 --- a/app/buck2_server_commands/src/dot/targets.rs +++ b/app/buck2_server_commands/src/dot/targets.rs @@ -7,12 +7,14 @@ * of this source tree. */ +use buck2_query::query::environment::AttrFmtOptions; use buck2_query::query::environment::QueryTarget; use buck2_query::query::environment::QueryTargets; use buck2_query::query::syntax::simple::eval::set::TargetSet; use regex::RegexSet; use starlark_map::small_map::SmallMap; +use crate::commands::query::query_target_ext::QueryCommandTarget; use crate::dot::DotDigraph; use crate::dot::DotEdge; use crate::dot::DotNode; @@ -26,7 +28,7 @@ pub struct DotTargetGraph { pub attributes: Option, } -impl<'a, T: QueryTarget> DotDigraph<'a> for DotTargetGraph { +impl<'a, T: QueryCommandTarget> DotDigraph<'a> for DotTargetGraph { type Node = DotTargetGraphNode<'a, T>; fn name(&self) -> &str { @@ -52,7 +54,7 @@ impl<'a, T: QueryTarget> DotDigraph<'a> for DotTargetGraph { // Only include edges to other nodes within the subgraph. if self.targets.contains(dep) { f(&DotEdge { - from: &node.0.node_ref().to_string(), + from: &node.0.node_key().to_string(), to: &dep.to_string(), })?; } @@ -61,7 +63,7 @@ impl<'a, T: QueryTarget> DotDigraph<'a> for DotTargetGraph { } } -impl<'a, T: QueryTarget> DotNode for DotTargetGraphNode<'a, T> { +impl<'a, T: QueryCommandTarget> DotNode for DotTargetGraphNode<'a, T> { fn attrs(&self) -> anyhow::Result { let extra = match &self.1.attributes { Some(attr_regex) => { @@ -72,7 +74,15 @@ impl<'a, T: QueryTarget> DotNode for DotTargetGraphNode<'a, T> { if attr_regex.is_match(attr_name) { extra.insert( format!("buck_{}", attr_name), - self.0.attr_to_string_alternate(attr_value), + format!( + "{}", + self.0.attr_display( + attr_value, + AttrFmtOptions { + exclude_quotes: true, + }, + ) + ), ); } Ok(()) @@ -91,6 +101,6 @@ impl<'a, T: QueryTarget> DotNode for DotTargetGraphNode<'a, T> { } fn id(&self) -> String { - self.0.node_ref().to_string() + self.0.node_key().to_string() } } diff --git a/app/buck2_server_commands/src/lib.rs b/app/buck2_server_commands/src/lib.rs index 4866d1fdf5499..06584f68c34ce 100644 --- a/app/buck2_server_commands/src/lib.rs +++ b/app/buck2_server_commands/src/lib.rs @@ -7,10 +7,12 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! Implementation of several server commands. -#![feature(async_closure)] #![feature(box_patterns)] +#![feature(let_chains)] #![feature(try_blocks)] pub mod commands; diff --git a/app/buck2_server_commands/src/target_hash.rs b/app/buck2_server_commands/src/target_hash.rs index ede8ddce5b369..5aafc7052bf77 100644 --- a/app/buck2_server_commands/src/target_hash.rs +++ b/app/buck2_server_commands/src/target_hash.rs @@ -16,25 +16,24 @@ use std::sync::Arc; use async_recursion::async_recursion; use async_trait::async_trait; use buck2_build_api::configure_targets::get_compatible_targets; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::file_ops::FileOps; +use buck2_common::dice::file_ops::DiceFileComputations; use buck2_common::file_ops::PathMetadata; use buck2_common::file_ops::PathMetadataOrRedirection; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_core::cells::cell_path::CellPath; use buck2_core::cells::cell_path::CellPathRef; use buck2_core::package::PackageLabel; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::configured_or_unconfigured::ConfiguredOrUnconfiguredTargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_futures::spawn::spawn_cancellable; +use buck2_futures::spawn::DropCancelFuture; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::unconfigured::TargetNode; -use buck2_query::query::environment::ConfiguredOrUnconfiguredTargetLabel; use buck2_query::query::environment::QueryTarget; +use buck2_query::query::environment::QueryTargetDepsSuccessors; use buck2_query::query::syntax::simple::eval::set::TargetSet; use buck2_query::query::traversal::async_depth_first_postorder_traversal; use buck2_query::query::traversal::AsyncNodeLookup; -use buck2_query::query::traversal::AsyncTraversalDelegate; -use buck2_query::query::traversal::ChildVisitor; use dice::DiceComputations; use dice::DiceTransaction; use dupe::Dupe; @@ -44,14 +43,12 @@ use futures::join; use futures::stream::FuturesUnordered; use futures::FutureExt; use futures::StreamExt; -use more_futures::spawn::spawn_cancellable; -use more_futures::spawn::DropCancelFuture; use os_str_bytes::OsStrBytes; use siphasher::sip128::Hasher128; use siphasher::sip128::SipHasher24; #[derive(Clone, Dupe, derive_more::Display)] -#[display(fmt = "{:032x}", _0)] +#[display("{:032x}", _0)] pub struct BuckTargetHash(pub u128); trait BuckTargetHasher: Hasher + Send + 'static { @@ -129,11 +126,11 @@ impl FileHasher for PathsAndContentsHasher { async fn hash_path(&self, cell_path: &CellPath) -> anyhow::Result> { #[async_recursion] async fn hash_item( - file_ops: &dyn FileOps, + ctx: &mut DiceComputations<'_>, cell_path: CellPathRef<'async_recursion>, res: &mut Vec, ) -> anyhow::Result<()> { - let info = file_ops.read_path_metadata(cell_path.dupe()).await?; + let info = DiceFileComputations::read_path_metadata(ctx, cell_path.dupe()).await?; // Important that the different branches can never clash, so add a prefix byte to them match PathMetadataOrRedirection::from(info) { PathMetadataOrRedirection::PathMetadata(meta) => match meta { @@ -156,27 +153,28 @@ impl FileHasher for PathsAndContentsHasher { } PathMetadata::Directory => { res.push(2u8); - let files = file_ops.read_dir(cell_path.dupe()).await?.included; + let files = DiceFileComputations::read_dir(ctx, cell_path.dupe()) + .await? + .included; res.extend(files.len().to_be_bytes()); for x in &*files { let name = x.file_name.as_str(); res.extend(name.len().to_be_bytes()); res.extend(name.as_bytes()); - hash_item(file_ops, cell_path.join(&x.file_name).as_ref(), res).await?; + hash_item(ctx, cell_path.join(&x.file_name).as_ref(), res).await?; } } }, PathMetadataOrRedirection::Redirection(r) => { // TODO (T126181780): This should have a limit on recursion. - hash_item(file_ops, r.as_ref().as_ref(), res).await?; + hash_item(ctx, r.as_ref().as_ref(), res).await?; } } Ok(()) } - let file_ops = self.dice.file_ops(); let mut res = Vec::new(); - hash_item(&file_ops, cell_path.as_ref(), &mut res).await?; + hash_item(&mut self.dice.clone(), cell_path.as_ref(), &mut res).await?; Ok(res) } } @@ -193,9 +191,9 @@ pub trait TargetHashingTargetNode: QueryTarget { // Takes in Target Nodes and returns a new set of (un)Configured // Target Nodes based on type of hashing specified. async fn get_target_nodes( - dice: &DiceComputations, + dice: &mut DiceComputations, loaded_targets: Vec<(PackageLabel, anyhow::Result>)>, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result>; } @@ -206,11 +204,11 @@ impl TargetHashingTargetNode for ConfiguredTargetNode { } async fn get_target_nodes( - dice: &DiceComputations, + dice: &mut DiceComputations, loaded_targets: Vec<(PackageLabel, anyhow::Result>)>, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result> { - get_compatible_targets(dice, loaded_targets.into_iter(), global_target_platform).await + get_compatible_targets(dice, loaded_targets.into_iter(), global_cfg_options).await } } @@ -221,9 +219,9 @@ impl TargetHashingTargetNode for TargetNode { } async fn get_target_nodes( - _dice: &DiceComputations, + _dice: &mut DiceComputations, loaded_targets: Vec<(PackageLabel, anyhow::Result>)>, - _global_target_platform: Option, + _global_cfg_options: &GlobalCfgOptions, ) -> anyhow::Result> { let mut target_set = TargetSet::new(); for (_package, result) in loaded_targets { @@ -234,10 +232,10 @@ impl TargetHashingTargetNode for TargetNode { } pub struct TargetHashes { // key is an unconfigured target label, but the hash is generated from the configured target label. - target_mapping: HashMap>, + target_mapping: HashMap>, } -#[derive(thiserror::Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum TargetHashError { #[error( "Found a dependency `{0}` of target `{1}` which has not been hashed yet. This may indicate a dependency cycle in the unconfigured graph." @@ -246,7 +244,7 @@ enum TargetHashError { } impl TargetHashes { - pub fn get(&self, label: &TargetLabel) -> Option<&SharedResult> { + pub fn get(&self, label: &TargetLabel) -> Option<&buck2_error::Result> { self.target_mapping.get(label) } @@ -258,106 +256,87 @@ impl TargetHashes { use_fast_hash: bool, ) -> anyhow::Result where - T::NodeRef: ConfiguredOrUnconfiguredTargetLabel, + T::Key: ConfiguredOrUnconfiguredTargetLabel, { - struct Delegate { - hashes: HashMap>>>, - file_hasher: Option>, - use_fast_hash: bool, - dice: DiceTransaction, - } - - #[async_trait] - impl AsyncTraversalDelegate for Delegate { - fn visit(&mut self, target: T) -> anyhow::Result<()> { - // this is postorder, so guaranteed that all deps have futures already. - let dep_futures: Vec<_> = target - .deps() - .map(|dep| { - self.hashes.get(dep).cloned().ok_or_else(|| { - TargetHashError::DependencyCycle( - dep.clone().to_string(), - target.node_ref().to_string(), - ) - }) + let mut hashes: HashMap< + T::Key, + Shared>>, + > = HashMap::new(); + + let visit = |target: T| { + // this is postorder, so guaranteed that all deps have futures already. + let dep_futures: Vec<_> = target + .deps() + .map(|dep| { + hashes.get(dep).cloned().ok_or_else(|| { + TargetHashError::DependencyCycle( + dep.clone().to_string(), + target.node_key().to_string(), + ) }) - .collect::, TargetHashError>>()?; - - let file_hasher = self.file_hasher.dupe(); - let dice = self.dice.dupe(); - - let use_fast_hash = self.use_fast_hash; - // we spawn off the hash computation since it can't be done in visit directly. Even if it could, - // this allows us to start the computations for dependents before finishing the computation for a node. - self.hashes.insert( - target.node_ref().clone(), - spawn_cancellable( - |_| { - async move { - let mut hasher = TargetHashes::new_hasher(use_fast_hash); - TargetHashes::hash_node(&target, &mut *hasher); - - let mut input_futs = Vec::new(); - if let Some(file_hasher) = file_hasher { - target.inputs_for_each(|cell_path| { - let file_hasher = file_hasher.dupe(); - input_futs.push(async move { - let file_hash = file_hasher.hash_path(&cell_path).await; - (cell_path, file_hash) - }); - anyhow::Ok(()) - })?; - } - - let (dep_hashes, input_hashes) = - join!(join_all(dep_futures), join_all(input_futs)); - - TargetHashes::hash_deps(dep_hashes, &mut *hasher)?; - TargetHashes::hash_files(input_hashes, &mut *hasher)?; - - Ok(hasher.finish_u128()) + }) + .collect::, TargetHashError>>()?; + + let file_hasher = file_hasher.dupe(); + let dice = dice.dupe(); + + // we spawn off the hash computation since it can't be done in visit directly. Even if it could, + // this allows us to start the computations for dependents before finishing the computation for a node. + hashes.insert( + target.node_key().clone(), + spawn_cancellable( + |_| { + async move { + let mut hasher = TargetHashes::new_hasher(use_fast_hash); + TargetHashes::hash_node(&target, &mut *hasher); + + let mut input_futs = Vec::new(); + if let Some(file_hasher) = file_hasher { + target.inputs_for_each(|cell_path| { + let file_hasher = file_hasher.dupe(); + input_futs.push(async move { + let file_hash = file_hasher.hash_path(&cell_path).await; + (cell_path, file_hash) + }); + anyhow::Ok(()) + })?; } - .boxed() - }, - &*dice.per_transaction_data().spawner, - dice.per_transaction_data(), - ) - .into_drop_cancel() - .shared(), - ); - Ok(()) - } + let (dep_hashes, input_hashes) = + join!(join_all(dep_futures), join_all(input_futs)); - async fn for_each_child( - &mut self, - target: &T, - func: &mut dyn ChildVisitor, - ) -> anyhow::Result<()> { - for dep in target.deps() { - func.visit(dep.clone())?; - } + TargetHashes::hash_deps(dep_hashes, &mut *hasher)?; + TargetHashes::hash_files(input_hashes, &mut *hasher)?; - Ok(()) - } - } + Ok(hasher.finish_u128()) + } + .boxed() + }, + &*dice.per_transaction_data().spawner, + dice.per_transaction_data(), + ) + .into_drop_cancel() + .shared(), + ); - let mut delegate = Delegate:: { - hashes: HashMap::new(), - file_hasher, - use_fast_hash, - dice, + Ok(()) }; - async_depth_first_postorder_traversal(&lookup, targets.iter_names(), &mut delegate).await?; + async_depth_first_postorder_traversal( + &lookup, + targets.iter_names(), + QueryTargetDepsSuccessors, + visit, + ) + .await?; - let mut futures: FuturesUnordered<_> = delegate - .hashes + let mut futures: FuturesUnordered<_> = hashes .into_iter() .map(|(target, fut)| async move { (target, fut.await) }) .collect(); - let mut target_mapping: HashMap> = HashMap::new(); + let mut target_mapping: HashMap> = + HashMap::new(); // TODO(cjhopman): FuturesOrdered/Unordered interacts poorly with tokio cooperative scheduling // (see https://github.com/rust-lang/futures-rs/issues/2053). Clean this up once a good @@ -383,7 +362,7 @@ impl TargetHashes { use_fast_hash: bool, ) -> anyhow::Result where - T::NodeRef: ConfiguredOrUnconfiguredTargetLabel, + T::Key: ConfiguredOrUnconfiguredTargetLabel, { let hashing_futures: Vec<_> = targets .into_iter() @@ -412,8 +391,8 @@ impl TargetHashes { hasher.finish_u128() }; ( - target.node_ref().unconfigured_label().dupe(), - hash_result.shared_error(), + target.node_key().unconfigured_label().dupe(), + hash_result.map_err(buck2_error::Error::from), ) } .boxed() @@ -421,7 +400,7 @@ impl TargetHashes { }) .collect(); - let target_mapping: HashMap> = + let target_mapping: HashMap> = join_all(hashing_futures).await.into_iter().collect(); Ok(Self { target_mapping }) } @@ -433,18 +412,18 @@ impl TargetHashes { } pub async fn compute>( - dice: DiceTransaction, + mut dice: DiceTransaction, lookup: L, targets: Vec<(PackageLabel, anyhow::Result>)>, - global_target_platform: Option, + global_cfg_options: &GlobalCfgOptions, file_hash_mode: TargetHashesFileMode, use_fast_hash: bool, target_hash_recursive: bool, ) -> anyhow::Result where - T::NodeRef: ConfiguredOrUnconfiguredTargetLabel, + T::Key: ConfiguredOrUnconfiguredTargetLabel, { - let targets = T::get_target_nodes(&dice, targets, global_target_platform).await?; + let targets = T::get_target_nodes(&mut dice, targets, global_cfg_options).await?; let file_hasher = Self::new_file_hasher(dice.dupe(), file_hash_mode); if target_hash_recursive { Self::compute_recursive_target_hashes(dice, lookup, targets, file_hasher, use_fast_hash) @@ -484,9 +463,9 @@ impl TargetHashes { } fn hash_deps( - dep_hashes: Vec>, + dep_hashes: Vec>, hasher: &mut dyn BuckTargetHasher, - ) -> SharedResult<()> { + ) -> buck2_error::Result<()> { for target_hash in dep_hashes { hasher.write_u128(target_hash?.0); } @@ -496,7 +475,7 @@ impl TargetHashes { fn hash_files( file_digests: Vec<(CellPath, anyhow::Result>)>, mut hasher: &mut dyn BuckTargetHasher, - ) -> SharedResult<()> { + ) -> buck2_error::Result<()> { for (path, digest) in file_digests { path.hash(&mut hasher); let digest = digest?; diff --git a/app/buck2_server_ctx/BUCK b/app/buck2_server_ctx/BUCK index dc5cc512f0d26..e52c87a6d61a8 100644 --- a/app/buck2_server_ctx/BUCK +++ b/app/buck2_server_ctx/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -20,25 +19,26 @@ rust_library( "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:parking_lot", "fbsource//third-party/rust:pin-project", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tonic", "fbsource//third-party/rust:tracing", - "fbsource//third-party/rust:tracing-subscriber", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_build_signals:buck2_build_signals", + "//buck2/app/buck2_certs:buck2_certs", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_util:buck2_util", "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark_map:starlark_map", ], ) diff --git a/app/buck2_server_ctx/Cargo.toml b/app/buck2_server_ctx/Cargo.toml index 21d0be15f4883..6024ba84c081a 100644 --- a/app/buck2_server_ctx/Cargo.toml +++ b/app/buck2_server_ctx/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Common parts of Buck commands" +edition = "2021" +license = { workspace = true } name = "buck2_server_ctx" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Common parts of Buck commands" [dependencies] anyhow = { workspace = true } @@ -13,31 +15,33 @@ futures = { workspace = true } itertools = { workspace = true } parking_lot = { workspace = true } pin-project = { workspace = true } -thiserror = { workspace = true } +tokio = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } -more_futures = { workspace = true } -tokio = { workspace = true } allocative = { workspace = true } dice = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } +gazebo = { workspace = true } # Please do not add dependency on `buck2_build_api`. +buck2_build_signals = { workspace = true } +buck2_certs = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_execute = { workspace = true } -buck2_cli_proto = { workspace = true } +buck2_futures = { workspace = true } +buck2_node = { workspace = true } buck2_util = { workspace = true } buck2_wrapper_common = { workspace = true } -buck2_build_signals = { workspace = true } starlark_map = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } derivative = { workspace = true } tokio = { workspace = true } -assert_matches = { workspace = true } diff --git a/app/buck2_server_ctx/src/command_end.rs b/app/buck2_server_ctx/src/command_end.rs deleted file mode 100644 index 79c6453623b6c..0000000000000 --- a/app/buck2_server_ctx/src/command_end.rs +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -/// Common code executed in the end of command to produce `CommandEnd`. -pub fn command_end(result: &anyhow::Result, data: D) -> buck2_data::CommandEnd -where - D: Into, -{ - command_end_ext(result, data.into(), |_| true) -} - -pub fn command_end_ext( - result: &anyhow::Result, - data: D, - is_success: F, -) -> buck2_data::CommandEnd -where - F: FnOnce(&R) -> bool, - D: Into, -{ - let (is_success, error_messages) = match result { - Ok(r) => (is_success(r), Vec::new()), - Err(e) => (false, vec![format!("{:#}", e)]), - }; - buck2_data::CommandEnd { - is_success, - error_messages, - data: Some(data.into()), - } -} diff --git a/app/buck2_server_ctx/src/commands.rs b/app/buck2_server_ctx/src/commands.rs new file mode 100644 index 0000000000000..14a480ae8d11b --- /dev/null +++ b/app/buck2_server_ctx/src/commands.rs @@ -0,0 +1,71 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashSet; + +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_events::dispatch::EventDispatcher; +use buck2_events::errors::create_error_report; + +/// Common code executed in the end of command to produce `CommandEnd`. +pub fn command_end(result: &buck2_error::Result, data: D) -> buck2_data::CommandEnd +where + D: Into, +{ + command_end_ext(result, data.into(), |_| true, |_| Vec::new()) +} + +pub fn command_end_ext( + result: &buck2_error::Result, + data: D, + is_success: F, + additional_telemetry_errors: G, +) -> buck2_data::CommandEnd +where + F: FnOnce(&R) -> bool, + G: FnOnce(&R) -> Vec, + D: Into, +{ + let (is_success, errors) = match result { + Ok(r) => (is_success(r), additional_telemetry_errors(r)), + Err(e) => (false, vec![create_error_report(e)]), + }; + buck2_data::CommandEnd { + is_success, + errors, + data: Some(data.into()), + } +} + +/// Common code to send TargetCfg event after command execution. +pub fn send_target_cfg_event( + event_dispatcher: &EventDispatcher, + conf_labels: impl IntoIterator, + target_cfg: &Option, +) { + let mut target_platforms = HashSet::new(); + for conf in conf_labels { + // cfg can be unbound + if let Ok(label) = conf.cfg().label() { + if !target_platforms.contains(label) { + target_platforms.insert(label.to_owned()); + } + } + } + + let cli_modifiers = target_cfg + .as_ref() + .map(|cfg| cfg.cli_modifiers.clone()) + .unwrap_or_default(); + + event_dispatcher.instant_event(buck2_data::TargetCfg { + target_platforms: target_platforms.into_iter().collect(), + cli_modifiers, + }); +} diff --git a/app/buck2_server_ctx/src/concurrency.rs b/app/buck2_server_ctx/src/concurrency.rs index c56ea36f044a5..7c7431b3f1b1a 100644 --- a/app/buck2_server_ctx/src/concurrency.rs +++ b/app/buck2_server_ctx/src/concurrency.rs @@ -15,13 +15,13 @@ use std::collections::VecDeque; use std::fmt::Debug; -use std::ops::Deref; use std::sync::Arc; use allocative::Allocative; use anyhow::Context; use async_condvar_fair::Condvar; use async_trait::async_trait; +use buck2_cli_proto::client_context::PreemptibleWhen; use buck2_core::soft_error; use buck2_data::DiceBlockConcurrentCommandEnd; use buck2_data::DiceBlockConcurrentCommandStart; @@ -30,32 +30,35 @@ use buck2_data::DiceSynchronizeSectionEnd; use buck2_data::DiceSynchronizeSectionStart; use buck2_data::ExclusiveCommandWaitEnd; use buck2_data::ExclusiveCommandWaitStart; -use buck2_data::ExitWhenDifferentState; use buck2_data::NoActiveDiceState; +use buck2_error::internal_error_anyhow; use buck2_events::dispatch::EventDispatcher; +use buck2_futures::cancellation::ExplicitCancellationContext; use buck2_util::truncate::truncate; use buck2_wrapper_common::invocation_id::TraceId; use derive_more::Display; use dice::Dice; -use dice::DiceComputations; use dice::DiceEquality; use dice::DiceTransaction; use dice::DiceTransactionUpdater; use dice::UserComputationData; use dupe::Dupe; +use futures::future; use futures::future::BoxFuture; +use futures::future::Either; use futures::future::Future; use futures::future::FutureExt; use futures::future::Shared; +use futures::pin_mut; use itertools::Itertools; -use more_futures::cancellation::ExplicitCancellationContext; use starlark_map::small_map::SmallMap; use starlark_map::small_set::SmallSet; -use thiserror::Error; +use tokio::sync::oneshot; +use tokio::sync::oneshot::error::RecvError; use tokio::sync::Mutex; use tokio::sync::MutexGuard; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ConcurrencyHandlerError { #[error( "Recursive invocation of Buck, which is discouraged, but will probably work (using the same state). Trace Ids: {0}. Recursive invocation command: `{1}`" @@ -64,9 +67,15 @@ enum ConcurrencyHandlerError { #[error( "Recursive invocation of Buck, with a different state. Use `--isolation-dir` on the inner invocation to fix this. Trace Ids: {0}. Recursive invocation command: `{1}`" )] + #[buck2(input)] NestedInvocationWithDifferentStates(String, String), #[error("`--exit-when-different-state` was set")] + #[buck2(tag = DaemonIsBusy)] ExitWhenDifferentState, + + #[error("`--preemptible` was set, and buck daemon preempted this command as another came in.")] + #[buck2(tag = DaemonPreempted)] + ExitOnPreemption, } #[derive(Clone, Dupe, Copy, Debug)] @@ -86,16 +95,16 @@ pub enum BypassSemaphore { /// /// Currently, we allow concurrency if two `DiceTransactions` are deemed equivalent, such that /// any computation result that occurs in one is directly reusable by another. -#[derive(Clone, Dupe, Allocative)] +#[derive(Allocative)] pub struct ConcurrencyHandler { - data: Arc>, + data: Mutex, // use an async condvar because the `wait` to `notify` spans across an async function (namely // the entire command execution). #[allocative(skip)] - cond: Arc, + cond: Condvar, dice: Arc, /// Used to prevent commands (clean --stale) from running in parallel with dice commands - exclusive_command_lock: Arc, + exclusive_command_lock: ExclusiveCommandLock, } #[derive(Allocative)] @@ -130,6 +139,9 @@ struct CommandData { trace_id: TraceId, argv: Vec, dispatcher: EventDispatcher, + preemption_setting: PreemptibleWhen, + #[allocative(skip)] + preempt: Option>, } impl CommandData { @@ -242,13 +254,7 @@ pub trait DiceUpdater: Send + Sync { async fn update( &self, mut ctx: DiceTransactionUpdater, - _user_data: &mut UserComputationData, - ) -> anyhow::Result; -} - -#[async_trait] -pub trait DiceDataProvider: Send + Sync + 'static { - async fn provide(&self, ctx: &DiceComputations) -> anyhow::Result; + ) -> anyhow::Result<(DiceTransactionUpdater, UserComputationData)>; } #[derive(Allocative)] @@ -257,6 +263,7 @@ struct ExclusiveCommandLock { owning_command: Arc>>, } +#[allow(dead_code)] // fields never read enum ExclusiveCommandLockGuard<'a> { Shared(tokio::sync::RwLockReadGuard<'a, ()>), Exclusive( @@ -306,27 +313,26 @@ impl ExclusiveCommandLock { } impl ConcurrencyHandler { - pub fn new(dice: Arc) -> Self { - ConcurrencyHandler { - data: Arc::new(Mutex::new(ConcurrencyHandlerData { + pub fn new(dice: Arc) -> Arc { + Arc::new(ConcurrencyHandler { + data: Mutex::new(ConcurrencyHandlerData { dice_status: DiceStatus::idle(), active_commands: SmallMap::new(), next_command_id: CommandId(0), cleanup_epoch: 0, previously_tainted: false, - })), - cond: Default::default(), + }), + cond: Condvar::new(), dice, - exclusive_command_lock: Arc::new(ExclusiveCommandLock::new()), - } + exclusive_command_lock: ExclusiveCommandLock::new(), + }) } /// Enters a critical section that requires concurrent command synchronization, /// and runs the given `exec` function in the critical section. pub async fn enter( - &self, + self: &Arc, event_dispatcher: EventDispatcher, - data: &dyn DiceDataProvider, updates: &dyn DiceUpdater, exec: F, is_nested_invocation: bool, @@ -334,6 +340,7 @@ impl ConcurrencyHandler { exclusive_cmd: Option, exit_when_different_state: bool, cancellations: &ExplicitCancellationContext, + preemptible: PreemptibleWhen, ) -> anyhow::Result where F: FnOnce(DiceTransaction) -> Fut, @@ -358,18 +365,18 @@ impl ConcurrencyHandler { .await; let events = event_dispatcher.dupe(); - let (_guard, transaction) = event_dispatcher + let (_guard, transaction, preempt_receiver) = event_dispatcher .span_async(DiceSynchronizeSectionStart {}, async move { ( cancellations .critical_section(|| { self.wait_for_others( - data, updates, events, is_nested_invocation, sanitized_argv, exit_when_different_state, + preemptible, ) }) .await, @@ -378,7 +385,16 @@ impl ConcurrencyHandler { }) .await?; - Ok(exec(transaction).await) + let result = exec(transaction); + pin_mut!(result); + pin_mut!(preempt_receiver); + + match future::select(result, preempt_receiver).await { + Either::Left((result, _)) => Ok(result), + Either::Right((_preemption, _)) => { + Err(ConcurrencyHandlerError::ExitOnPreemption.into()) + } + } } // this is normally super unsafe, but because we are using an async condvar that takes care @@ -386,27 +402,40 @@ impl ConcurrencyHandler { // The async condvar will handle properly allowing under threads to proceed, avoiding // starvation. async fn wait_for_others( - &self, - user_data: &dyn DiceDataProvider, + self: &Arc, updates: &dyn DiceUpdater, event_dispatcher: EventDispatcher, is_nested_invocation: bool, sanitized_argv: Vec, exit_when_different_state: bool, - ) -> anyhow::Result<(OnExecExit, DiceTransaction)> { + preemptible: PreemptibleWhen, + ) -> anyhow::Result<( + OnExecExit, + DiceTransaction, + impl Future>, + )> { + // Have to put it on the function unfortunately, https://github.com/rust-lang/rust-clippy/issues/9047 + #![allow(clippy::await_holding_invalid_type)] + let trace = event_dispatcher.trace_id().dupe(); let span = tracing::span!(tracing::Level::DEBUG, "wait_for_others", trace = %trace); + // FIXME(JakobDegen): Clippy points out that tracing won't know when this future gets + // descheduled from this executor thread, so this may show up in the wrong places let _enter = span.enter(); let mut data = self.data.lock().await; let command_id = data.next_command_id.increment(); + let (preempt_sender, preempt_receiver) = oneshot::channel::<()>(); + let command_data = CommandData { trace_id: trace.dupe(), argv: sanitized_argv, dispatcher: event_dispatcher.dupe(), + preemption_setting: preemptible, + preempt: Some(preempt_sender), }; let (transaction, tainted) = loop { @@ -439,11 +468,8 @@ impl ConcurrencyHandler { let transaction = async { let updater = self.dice.updater(); - let mut user_data = user_data - .provide(updater.existing_state().await.deref()) - .await?; - let transaction = updates.update(updater, &mut user_data).await?; + let (transaction, user_data) = updates.update(updater).await?; event_dispatcher .span_async(buck2_data::DiceStateUpdateStart {}, async { @@ -467,6 +493,10 @@ impl ConcurrencyHandler { // If we have a different state, attempt to transition to cleanup. This will // succeed only if the current state is not in use. if !is_same_state { + // If the active commands are preemptible, preempt them. + self.cancel_preemptible_commands(&mut data, is_same_state); + + // transition to cleanup == "wait until all other blocking commands finish" if data.transition_to_cleanup(&self.dice) { continue; } @@ -483,25 +513,28 @@ impl ConcurrencyHandler { match bypass_semaphore { BypassSemaphore::Error => { - return Err(anyhow::Error::new( + return Err( ConcurrencyHandlerError::NestedInvocationWithDifferentStates( format_traces(&data.active_commands, &command_data), command_data.format_argv(), - ), - )); + ) + .into(), + ); } BypassSemaphore::Run(state) => { self.emit_logs(state, &data.active_commands, &command_data)?; + self.cancel_preemptible_commands(&mut data, is_same_state); break (transaction, false); } BypassSemaphore::Block => { if exit_when_different_state { - event_dispatcher.instant_event(ExitWhenDifferentState {}); - - return Err(anyhow::Error::new( - ConcurrencyHandlerError::ExitWhenDifferentState, - )) - .context(buck2_data::ErrorCause::DaemonIsBusy); + let active_commands: Vec = data + .active_commands + .values() + .map(|d| TraceId::to_string(&d.trace_id)) + .collect(); + return Err(ConcurrencyHandlerError::ExitWhenDifferentState) + .with_context(|| format!("Buck daemon is busy processing another command: {}", active_commands.join(", "))); } // We should probably show more than the first here, but for now // this is what we have. @@ -521,7 +554,7 @@ impl ConcurrencyHandler { }, async { ( - self.cond.wait((data, &*self.data)).await, + self.cond.wait((data, &self.data)).await, DiceBlockConcurrentCommandEnd { ending_active_trace_id: trace_id.to_string(), }, @@ -554,9 +587,10 @@ impl ConcurrencyHandler { } // create the on exit drop handler, which will take care of notifying tasks. - let drop_guard = OnExecExit::new(self.dupe(), command_id, command_data, data); + let drop_guard = OnExecExit::new(self.dupe(), command_id, command_data, data)?; + // This adds the task to the list of all tasks (see ::new impl) - Ok((drop_guard, transaction)) + Ok((drop_guard, transaction, preempt_receiver)) } /// Access dice without locking for dumps. @@ -564,6 +598,24 @@ impl ConcurrencyHandler { &self.dice } + fn cancel_preemptible_commands(&self, data: &mut ConcurrencyHandlerData, is_same_state: bool) { + // If the active commands are preemptible, interrupt them. + for cmd in data.active_commands.values_mut() { + if cmd.preemption_setting == PreemptibleWhen::Never { + continue; + } + if is_same_state && cmd.preemption_setting == PreemptibleWhen::OnDifferentState { + continue; + } + match cmd.preempt.take() { + Some(preempt) => { + let _ = preempt.send(()); + } + None => {} + }; + } + } + fn determine_bypass_semaphore( &self, is_same_state: bool, @@ -598,6 +650,7 @@ impl ConcurrencyHandler { active_commands, current_command.format_argv(), )) + .into() )?; } _ => {} @@ -622,17 +675,22 @@ fn format_traces( /// Held to execute a command so that when the command is canceled, we properly remove its state /// from the handler so that it's no longer registered as a ongoing command. -struct OnExecExit(Option<(ConcurrencyHandler, CommandId)>); +struct OnExecExit(Option<(Arc, CommandId)>); impl OnExecExit { pub fn new( - handler: ConcurrencyHandler, + handler: Arc, command: CommandId, data: CommandData, mut guard: MutexGuard<'_, ConcurrencyHandlerData>, - ) -> Self { - guard.active_commands.insert(command, data); - Self(Some((handler, command))) + ) -> anyhow::Result { + let prev = guard.active_commands.insert(command, data); + if prev.is_some() { + return Err(internal_error_anyhow!( + "command id `{command}` is already registered" + )); + } + Ok(OnExecExit(Some((handler, command)))) } } @@ -644,7 +702,7 @@ impl Drop for OnExecExit { tokio::task::spawn(async move { let mut data = this.0.data.lock().await; data.active_commands - .remove(&this.1) + .shift_remove(&this.1) .expect("command was active but not in active_commands"); tracing::info!("Active command was removed: {}", this.1); @@ -665,37 +723,29 @@ impl Drop for OnExecExit { mod tests { use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; - use std::sync::Arc; use std::task::Poll; use std::time::Duration; use allocative::Allocative; - use anyhow::Context; use assert_matches::assert_matches; use async_trait::async_trait; use buck2_core::is_open_source; use buck2_events::create_source_sink_pair; - use buck2_events::dispatch::EventDispatcher; use buck2_events::source::ChannelEventSource; use buck2_events::span::SpanId; use buck2_events::BuckEvent; - use buck2_wrapper_common::invocation_id::TraceId; + use buck2_futures::cancellation::CancellationContext; use derivative::Derivative; use dice::DetectCycles; - use dice::Dice; use dice::DiceComputations; - use dice::DiceTransactionUpdater; use dice::InjectedKey; use dice::Key; - use dice::UserComputationData; use dupe::Dupe; use futures::pin_mut; use futures::poll; - use more_futures::cancellation::CancellationContext; use parking_lot::Mutex; use tokio::sync::Barrier; use tokio::sync::RwLock; - use tokio::sync::Semaphore; use super::*; @@ -706,9 +756,8 @@ mod tests { async fn update( &self, ctx: DiceTransactionUpdater, - _user_data: &mut UserComputationData, - ) -> anyhow::Result { - Ok(ctx) + ) -> anyhow::Result<(DiceTransactionUpdater, UserComputationData)> { + Ok((ctx, Default::default())) } } @@ -719,10 +768,9 @@ mod tests { async fn update( &self, mut ctx: DiceTransactionUpdater, - _user_data: &mut UserComputationData, - ) -> anyhow::Result { + ) -> anyhow::Result<(DiceTransactionUpdater, UserComputationData)> { ctx.changed_to(vec![(K, ())])?; - Ok(ctx) + Ok((ctx, Default::default())) } } @@ -738,15 +786,6 @@ mod tests { } } - struct TestDiceDataProvider; - - #[async_trait] - impl DiceDataProvider for TestDiceDataProvider { - async fn provide(&self, _ctx: &DiceComputations) -> anyhow::Result { - Ok(Default::default()) - } - } - #[tokio::test] async fn nested_invocation_same_transaction() { // FIXME: This times out on open source, and we don't know why @@ -766,7 +805,6 @@ mod tests { let fut1 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces1), - &TestDiceDataProvider, &NoChanges, |_| { let b = barrier.dupe(); @@ -779,10 +817,10 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); let fut2 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces2), - &TestDiceDataProvider, &NoChanges, |_| { let b = barrier.dupe(); @@ -795,10 +833,10 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); let fut3 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces3), - &TestDiceDataProvider, &NoChanges, |_| { let b = barrier.dupe(); @@ -811,6 +849,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); let (r1, r2, r3) = futures::future::join3(fut1, fut2, fut3).await; @@ -832,7 +871,6 @@ mod tests { let fut1 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces1), - &TestDiceDataProvider, &NoChanges, |_| { let b = barrier.dupe(); @@ -845,11 +883,11 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); let fut2 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces2), - &TestDiceDataProvider, &CtxDifferent, |_| { let b = barrier.dupe(); @@ -862,6 +900,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); match futures::future::try_join(fut1, fut2).await { @@ -886,7 +925,6 @@ mod tests { let fut1 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces1), - &TestDiceDataProvider, &NoChanges, |_| { let b = barrier.dupe(); @@ -899,10 +937,10 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); let fut2 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces2), - &TestDiceDataProvider, &NoChanges, |_| { let b = barrier.dupe(); @@ -915,10 +953,10 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); let fut3 = concurrency.enter( EventDispatcher::null_sink_with_trace(traces3), - &TestDiceDataProvider, &NoChanges, |_| { let b = barrier.dupe(); @@ -931,6 +969,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); let (r1, r2, r3) = futures::future::join3(fut1, fut2, fut3).await; @@ -969,7 +1008,6 @@ mod tests { concurrency .enter( EventDispatcher::null_sink_with_trace(traces1), - &TestDiceDataProvider, &NoChanges, |_| async move { barrier.wait().await; @@ -980,6 +1018,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await } @@ -994,7 +1033,6 @@ mod tests { concurrency .enter( EventDispatcher::null_sink_with_trace(traces2), - &TestDiceDataProvider, &NoChanges, |_| async move { barrier.wait().await; @@ -1005,6 +1043,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await } @@ -1022,7 +1061,6 @@ mod tests { concurrency .enter( EventDispatcher::null_sink_with_trace(traces_different), - &TestDiceDataProvider, &CtxDifferent, |_| async move { arrived.store(true, Ordering::Relaxed); @@ -1032,6 +1070,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await } @@ -1086,7 +1125,6 @@ mod tests { concurrency .enter( EventDispatcher::null_sink_with_trace(traces1), - &TestDiceDataProvider, &NoChanges, |_| async move { barrier.wait().await; @@ -1097,6 +1135,7 @@ mod tests { None, true, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await } @@ -1111,7 +1150,6 @@ mod tests { concurrency .enter( EventDispatcher::null_sink_with_trace(traces2), - &TestDiceDataProvider, &NoChanges, |_| async move { barrier.wait().await; @@ -1122,6 +1160,7 @@ mod tests { None, true, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await } @@ -1139,7 +1178,6 @@ mod tests { concurrency .enter( EventDispatcher::null_sink_with_trace(traces_different), - &TestDiceDataProvider, &CtxDifferent, |_| async move { arrived.store(true, Ordering::Relaxed); @@ -1149,6 +1187,7 @@ mod tests { None, true, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await } @@ -1168,21 +1207,139 @@ mod tests { let fut3_result = fut3.await?; - assert!(fut3_result.is_err()); + let fut3_error: buck2_error::Error = fut3_result.unwrap_err().into(); assert!( - fut3_result - .err() - .unwrap() - .to_string() - .contains("daemon is busy") + fut3_error + .tags() + .contains(&buck2_error::ErrorTag::DaemonIsBusy), ); Ok(()) } + #[tokio::test] + async fn parallel_invocation_exit_when_preemptible() -> anyhow::Result<()> { + let dice = Dice::builder().build(DetectCycles::Enabled); + + let concurrency = ConcurrencyHandler::new(dice.dupe()); + + let traces1 = TraceId::new(); + let traces2 = traces1.dupe(); + let traces_different = TraceId::new(); + + let block1 = Arc::new(RwLock::new(())); + let blocked1 = block1.write().await; + + let block2 = Arc::new(RwLock::new(())); + let blocked2 = block2.write().await; + + let barrier1 = Arc::new(Barrier::new(3)); + let barrier2 = Arc::new(Barrier::new(2)); + + let arrived = Arc::new(AtomicBool::new(false)); + + let fut1 = tokio::spawn({ + let concurrency = concurrency.dupe(); + let barrier = barrier1.dupe(); + let b = block1.dupe(); + + async move { + concurrency + .enter( + EventDispatcher::null_sink_with_trace(traces1), + &NoChanges, + |_| async move { + barrier.wait().await; + let _g = b.read().await; + }, + false, + Vec::new(), + None, + false, + ExplicitCancellationContext::testing(), + PreemptibleWhen::Always, + ) + .await + } + }); + + let fut2 = tokio::spawn({ + let concurrency = concurrency.dupe(); + let barrier = barrier1.dupe(); + let b = block2.dupe(); + + async move { + concurrency + .enter( + EventDispatcher::null_sink_with_trace(traces2), + &NoChanges, + |_| async move { + barrier.wait().await; + let _g = b.read().await; + }, + false, + Vec::new(), + None, + false, + ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, + ) + .await + } + }); + + barrier1.wait().await; + + let fut3 = tokio::spawn({ + let concurrency = concurrency.dupe(); + let barrier = barrier2.dupe(); + let arrived = arrived.dupe(); + + async move { + barrier.wait().await; + concurrency + .enter( + EventDispatcher::null_sink_with_trace(traces_different), + &CtxDifferent, + |_| async move { + arrived.store(true, Ordering::Relaxed); + }, + false, + Vec::new(), + None, + false, + ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, + ) + .await + } + }); + + barrier2.wait().await; + + assert!(!arrived.load(Ordering::Relaxed)); + + drop(blocked1); + let fut1_result = fut1.await?; + let fut1_error: buck2_error::Error = fut1_result.unwrap_err().into(); + assert!( + fut1_error + .tags() + .contains(&buck2_error::ErrorTag::DaemonPreempted), + ); + + assert!(!arrived.load(Ordering::Relaxed)); + + drop(blocked2); + fut2.await??; + fut3.await??; + + Ok(()) + } + #[derive(Clone, Dupe, Derivative, Allocative, Display)] #[derivative(Hash, Eq, PartialEq, Debug)] - #[display(fmt = "CleanupTestKey")] + #[display("CleanupTestKey")] struct CleanupTestKey { #[derivative(Debug = "ignore", Hash = "ignore", PartialEq = "ignore")] is_executing: Arc>, @@ -1192,7 +1349,6 @@ mod tests { impl Key for CleanupTestKey { type Value = (); - #[allow(clippy::await_holding_lock)] async fn compute( &self, _ctx: &mut DiceComputations, @@ -1229,9 +1385,8 @@ mod tests { concurrency .enter( EventDispatcher::null(), - &TestDiceDataProvider, &NoChanges, - |dice| async move { + |mut dice| async move { let compute = dice.compute(key).fuse(); let started = async { @@ -1257,6 +1412,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await?; @@ -1265,7 +1421,6 @@ mod tests { concurrency .enter( EventDispatcher::null(), - &TestDiceDataProvider, &NoChanges, |_dice| async move { // The key should still be evaluating by now. @@ -1276,6 +1431,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await?; @@ -1284,7 +1440,6 @@ mod tests { concurrency .enter( EventDispatcher::null(), - &TestDiceDataProvider, &CtxDifferent, |_dice| async move { assert!(!key.is_executing.is_locked()); @@ -1294,6 +1449,7 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await?; @@ -1387,7 +1543,6 @@ mod tests { concurrency .enter( dispatcher, - &TestDiceDataProvider, &NoChanges, |_| async move { let _guard = mutex.try_lock().expect("Not exclusive!"); @@ -1402,6 +1557,7 @@ mod tests { exclusive_cmd, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await } @@ -1465,9 +1621,8 @@ mod tests { concurrency .enter( EventDispatcher::null(), - &TestDiceDataProvider, &CtxDifferent, - |dice| async move { + |mut dice| async move { // NOTE: We need to actually compute something for DICE to be not-idle. dice.compute(&K).await.unwrap(); tokio::task::yield_now().await; @@ -1477,11 +1632,12 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ) .await }); - futures::future::try_join_all(tasks).await?; + buck2_util::future::try_join_all(tasks).await?; assert!(!concurrency.data.lock().await.previously_tainted); @@ -1490,40 +1646,40 @@ mod tests { #[tokio::test] async fn test_updates_are_synchronized() -> anyhow::Result<()> { + async fn wait_on(b: &AtomicBool) { + while !b.load(Ordering::Relaxed) { + tokio::task::yield_now().await; + } + } + let dice = Dice::builder().build(DetectCycles::Enabled); let concurrency = ConcurrencyHandler::new(dice.dupe()); struct Updater { - should_be_able_to_run: AtomicBool, - arrived_update: Semaphore, + // Set when the updater enters the update function + on_enter: AtomicBool, + // Set to indicate that the updater should exit its update function + allow_exit: AtomicBool, } - #[async_trait] impl DiceUpdater for Updater { async fn update( &self, ctx: DiceTransactionUpdater, - _user_data: &mut UserComputationData, - ) -> anyhow::Result { - self.arrived_update.add_permits(1); - tokio::task::yield_now().await; - - if self.should_be_able_to_run.load(Ordering::SeqCst) { - Ok(ctx) - } else { - panic!("shouldn't be running") - } + ) -> anyhow::Result<(DiceTransactionUpdater, UserComputationData)> { + self.on_enter.store(true, Ordering::Relaxed); + wait_on(&self.allow_exit).await; + Ok((ctx, Default::default())) } } let updater1 = Updater { - should_be_able_to_run: AtomicBool::new(false), - arrived_update: Semaphore::new(0), + on_enter: AtomicBool::new(false), + allow_exit: AtomicBool::new(false), }; let fut1 = concurrency.enter( EventDispatcher::null(), - &TestDiceDataProvider, &updater1, |_dice| async move { tokio::task::yield_now().await; @@ -1533,16 +1689,18 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); pin_mut!(fut1); let updater2 = Updater { - should_be_able_to_run: AtomicBool::new(false), - arrived_update: Semaphore::new(0), + on_enter: AtomicBool::new(false), + // We can set this to true immediately as we don't ever need the + // second one to wait on anything + allow_exit: AtomicBool::new(true), }; let fut2 = concurrency.enter( EventDispatcher::null(), - &TestDiceDataProvider, &updater2, |_dice| async move { tokio::task::yield_now().await; @@ -1552,26 +1710,32 @@ mod tests { None, false, ExplicitCancellationContext::testing(), + PreemptibleWhen::Never, ); pin_mut!(fut2); - // poll once will arrive at the `yield` in updater - assert_matches!(poll!(&mut fut1), Poll::Pending); - let _g = updater1.arrived_update.acquire().await?; - - // polling multiple times on the second command will all be pending and not complete - assert_matches!(poll!(&mut fut2), Poll::Pending); - assert_matches!(poll!(&mut fut2), Poll::Pending); - assert_matches!(poll!(&mut fut2), Poll::Pending); - assert_matches!(poll!(&mut fut2), Poll::Pending); + // Wait for the first updater's update to be entered + tokio::select! { + _ = &mut fut1 => panic!("First should not be able to exit yet"), + _ = wait_on(&updater1.on_enter) => (), + } - // now make the first command runnable - updater1.should_be_able_to_run.store(true, Ordering::SeqCst); - fut1.await?; + // Now the first updater is blocked within its update function. Poll the + // second one many times so that it makes as much progress as it can + for _ in 0..100 { + assert_matches!(poll!(&mut fut2), Poll::Pending); + } + // But it should not have entered its update yet + assert!( + !updater2.on_enter.load(Ordering::Relaxed), + "Updaters are not correctly synchronized" + ); - // 1 is done and dropped, so `2` can now finish - updater2.should_be_able_to_run.store(true, Ordering::SeqCst); - fut2.await?; + // Now unblock the first one and let both finish + updater1.allow_exit.store(true, Ordering::Relaxed); + let (a, b) = tokio::join!(fut1, fut2); + a.unwrap(); + b.unwrap(); Ok(()) } diff --git a/app/buck2_server_ctx/src/ctx.rs b/app/buck2_server_ctx/src/ctx.rs index e7fb8fa0c4973..c563828674e12 100644 --- a/app/buck2_server_ctx/src/ctx.rs +++ b/app/buck2_server_ctx/src/ctx.rs @@ -12,29 +12,29 @@ use std::future::Future; use std::sync::Arc; use async_trait::async_trait; -use buck2_build_signals::BuildSignalsContext; -use buck2_build_signals::DeferredBuildSignals; -use buck2_build_signals::HasCriticalPathBackend; -use buck2_common::result::SharedResult; +use buck2_build_signals::env::BuildSignalsContext; +use buck2_build_signals::env::DeferredBuildSignals; +use buck2_build_signals::env::HasCriticalPathBackend; +use buck2_certs::validate::CertState; +use buck2_cli_proto::client_context::PreemptibleWhen; use buck2_core::fs::paths::file_name::FileName; use buck2_core::fs::project::ProjectRoot; use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::working_dir::WorkingDir; +use buck2_core::pattern::pattern::ParsedPattern; use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; -use buck2_core::pattern::ParsedPattern; use buck2_data::CommandCriticalEnd; use buck2_data::CommandCriticalStart; use buck2_data::DiceCriticalSectionEnd; use buck2_data::DiceCriticalSectionStart; use buck2_events::dispatch::EventDispatcher; use buck2_execute::materialize::materializer::Materializer; +use buck2_futures::cancellation::ExplicitCancellationContext; use dice::DiceComputations; use dice::DiceTransaction; use dupe::Dupe; -use more_futures::cancellation::ExplicitCancellationContext; use crate::concurrency::ConcurrencyHandler; -use crate::concurrency::DiceDataProvider; use crate::concurrency::DiceUpdater; use crate::stderr_output_guard::StderrOutputGuard; @@ -50,10 +50,15 @@ pub trait ServerCommandContextTrait: Send + Sync { fn project_root(&self) -> &ProjectRoot; + fn cert_state(&self) -> CertState; + fn materializer(&self) -> Arc; /// exposes the dice for scoped access, but isn't intended to be callable by anyone - async fn dice_accessor(&self, private: PrivateStruct) -> SharedResult; + async fn dice_accessor<'a>( + &'a self, + private: PrivateStruct, + ) -> buck2_error::Result>; fn events(&self) -> &EventDispatcher; @@ -63,7 +68,7 @@ pub trait ServerCommandContextTrait: Send + Sync { async fn config_metadata( &self, - ctx: &DiceComputations, + ctx: &mut DiceComputations<'_>, ) -> anyhow::Result>; fn log_target_pattern( @@ -76,13 +81,13 @@ pub trait ServerCommandContextTrait: Send + Sync { pub struct PrivateStruct(()); -pub struct DiceAccessor { - pub dice_handler: ConcurrencyHandler, - pub data: Box, - pub setup: Box, +pub struct DiceAccessor<'a> { + pub dice_handler: Arc, + pub setup: Box, pub is_nested_invocation: bool, pub sanitized_argv: Vec, pub exit_when_different_state: bool, + pub preemptible: PreemptibleWhen, pub build_signals: Box, } @@ -129,11 +134,11 @@ impl ServerCommandDiceContext for dyn ServerCommandContextTrait + '_ { { let DiceAccessor { dice_handler, - data, setup, is_nested_invocation, sanitized_argv, exit_when_different_state, + preemptible, build_signals, } = self.dice_accessor(PrivateStruct(())).await?; @@ -144,13 +149,12 @@ impl ServerCommandDiceContext for dyn ServerCommandContextTrait + '_ { dice_handler .enter( self.events().dupe(), - &*data, &*setup, - |dice| async move { + |mut dice| async move { let events = self.events().dupe(); let request_metadata = self.request_metadata().await?; - let config_metadata = self.config_metadata(&dice).await?; + let config_metadata = self.config_metadata(&mut dice).await?; events .span_async( @@ -159,7 +163,7 @@ impl ServerCommandDiceContext for dyn ServerCommandContextTrait + '_ { dice_version: dice.equality_token().to_string(), }, async move { - let res = buck2_build_signals::scope( + let res = buck2_build_signals::env::scope( build_signals, self.events().dupe(), dice.per_transaction_data() @@ -197,6 +201,7 @@ impl ServerCommandDiceContext for dyn ServerCommandContextTrait + '_ { exclusive_cmd, exit_when_different_state, self.cancellation_context(), + preemptible, ) .await, DiceCriticalSectionEnd {}, diff --git a/app/buck2_server_ctx/src/global_cfg_options.rs b/app/buck2_server_ctx/src/global_cfg_options.rs new file mode 100644 index 0000000000000..f6b0e35ff1f8f --- /dev/null +++ b/app/buck2_server_ctx/src/global_cfg_options.rs @@ -0,0 +1,49 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::TargetCfg; +use buck2_common::dice::cells::HasCellResolver; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_core::cells::CellResolver; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::pattern::pattern::ParsedPattern; +use dice::DiceComputations; + +use crate::ctx::ServerCommandContextTrait; + +/// Extract target configuration components. +pub async fn global_cfg_options_from_client_context( + target_cfg: &TargetCfg, + server_ctx: &dyn ServerCommandContextTrait, + dice_ctx: &mut DiceComputations<'_>, +) -> anyhow::Result { + let cell_resolver: &CellResolver = &dice_ctx.get_cell_resolver().await?; + let working_dir: &ProjectRelativePath = server_ctx.working_dir(); + let cwd = cell_resolver.get_cell_path(working_dir)?; + let cell_alias_resolver = dice_ctx.get_cell_alias_resolver(cwd.cell()).await?; + let target_platform = &target_cfg.target_platform; + let target_platform_label = if !target_platform.is_empty() { + Some( + ParsedPattern::parse_precise( + target_platform, + cwd.cell(), + cell_resolver, + &cell_alias_resolver, + )? + .as_target_label(target_platform)?, + ) + } else { + None + }; + + Ok(GlobalCfgOptions { + target_platform: target_platform_label, + cli_modifiers: target_cfg.cli_modifiers.clone().into(), + }) +} diff --git a/app/buck2_server_ctx/src/late_bindings.rs b/app/buck2_server_ctx/src/late_bindings.rs new file mode 100644 index 0000000000000..ceb71e8f278b0 --- /dev/null +++ b/app/buck2_server_ctx/src/late_bindings.rs @@ -0,0 +1,140 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use async_trait::async_trait; +use buck2_cli_proto::new_generic::CompleteRequest; +use buck2_cli_proto::new_generic::CompleteResponse; +use buck2_cli_proto::new_generic::DebugEvalRequest; +use buck2_cli_proto::new_generic::DebugEvalResponse; +use buck2_cli_proto::new_generic::ExpandExternalCellRequest; +use buck2_cli_proto::new_generic::ExpandExternalCellResponse; +use buck2_cli_proto::new_generic::ExplainRequest; +use buck2_cli_proto::new_generic::ExplainResponse; +use buck2_util::late_binding::LateBinding; + +use crate::ctx::ServerCommandContextTrait; +use crate::partial_result_dispatcher::NoPartialResult; +use crate::partial_result_dispatcher::PartialResultDispatcher; + +#[async_trait] +pub trait OtherServerCommands: Send + Sync + 'static { + async fn build( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::BuildRequest, + ) -> anyhow::Result; + async fn install( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::InstallRequest, + ) -> anyhow::Result; + async fn uquery( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::UqueryRequest, + ) -> anyhow::Result; + async fn cquery( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::CqueryRequest, + ) -> anyhow::Result; + async fn aquery( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::AqueryRequest, + ) -> anyhow::Result; + async fn targets( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::TargetsRequest, + ) -> anyhow::Result; + async fn targets_show_outputs( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::TargetsRequest, + ) -> anyhow::Result; + async fn ctargets( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::ConfiguredTargetsRequest, + ) -> anyhow::Result; + async fn complete( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: CompleteRequest, + ) -> anyhow::Result; + async fn debug_eval( + &self, + ctx: &dyn ServerCommandContextTrait, + req: DebugEvalRequest, + ) -> anyhow::Result; + async fn explain( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: ExplainRequest, + ) -> anyhow::Result; + async fn expand_external_cell( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: ExpandExternalCellRequest, + ) -> anyhow::Result; +} + +pub static OTHER_SERVER_COMMANDS: LateBinding<&'static dyn OtherServerCommands> = + LateBinding::new("OTHER_SERVER_COMMANDS"); + +#[async_trait] +pub trait DocsServerComamnd: Send + Sync + 'static { + async fn docs( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::new_generic::DocsRequest, + ) -> anyhow::Result; +} + +pub static DOCS_SERVER_COMMAND: LateBinding<&'static dyn DocsServerComamnd> = + LateBinding::new("DOCS_SERVER_COMMAND"); + +#[async_trait] +pub trait AuditServerCommand: Send + Sync + 'static { + async fn audit( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::GenericRequest, + ) -> anyhow::Result; +} + +pub static AUDIT_SERVER_COMMAND: LateBinding<&'static dyn AuditServerCommand> = + LateBinding::new("AUDIT_SERVER_COMMAND"); + +#[async_trait] +pub trait StarlarkServerCommand: Send + Sync + 'static { + async fn starlark( + &self, + ctx: &dyn ServerCommandContextTrait, + partial_result_dispatcher: PartialResultDispatcher, + req: buck2_cli_proto::GenericRequest, + ) -> anyhow::Result; +} + +pub static STARLARK_SERVER_COMMAND: LateBinding<&'static dyn StarlarkServerCommand> = + LateBinding::new("STARLARK_SERVER_COMMAND"); diff --git a/app/buck2_server_ctx/src/lib.rs b/app/buck2_server_ctx/src/lib.rs index dda22e647bed5..e3a583ca1c261 100644 --- a/app/buck2_server_ctx/src/lib.rs +++ b/app/buck2_server_ctx/src/lib.rs @@ -7,16 +7,22 @@ * of this source tree. */ +#![feature(let_chains)] +#![feature(error_generic_member_access)] +#![feature(used_with_arg)] +#![feature(anonymous_lifetime_in_impl_trait)] + pub mod bxl; -pub mod command_end; +pub mod commands; pub mod concurrency; pub mod ctx; -pub mod logging; -pub mod other_server_commands; +pub mod global_cfg_options; +pub mod late_bindings; pub mod partial_result_dispatcher; -pub mod pattern; +pub mod pattern_parse_and_resolve; pub mod stderr_output_guard; pub mod stdout_partial_output; pub mod streaming_request_handler; +pub mod target_resolution_config; pub mod template; pub mod test_command; diff --git a/app/buck2_server_ctx/src/other_server_commands.rs b/app/buck2_server_ctx/src/other_server_commands.rs deleted file mode 100644 index 1408d9de6d25b..0000000000000 --- a/app/buck2_server_ctx/src/other_server_commands.rs +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use async_trait::async_trait; -use buck2_cli_proto::new_generic::DebugEvalRequest; -use buck2_cli_proto::new_generic::DebugEvalResponse; -use buck2_util::late_binding::LateBinding; - -use crate::ctx::ServerCommandContextTrait; -use crate::partial_result_dispatcher::NoPartialResult; -use crate::partial_result_dispatcher::PartialResultDispatcher; - -#[async_trait] -pub trait OtherServerCommands: Send + Sync + 'static { - async fn build( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::BuildRequest, - ) -> anyhow::Result; - async fn install( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::InstallRequest, - ) -> anyhow::Result; - async fn uquery( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::UqueryRequest, - ) -> anyhow::Result; - async fn cquery( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::CqueryRequest, - ) -> anyhow::Result; - async fn aquery( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::AqueryRequest, - ) -> anyhow::Result; - async fn targets( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::TargetsRequest, - ) -> anyhow::Result; - async fn targets_show_outputs( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::TargetsRequest, - ) -> anyhow::Result; - async fn ctargets( - &self, - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::ConfiguredTargetsRequest, - ) -> anyhow::Result; - async fn debug_eval( - &self, - ctx: &dyn ServerCommandContextTrait, - req: DebugEvalRequest, - ) -> anyhow::Result; -} - -pub static OTHER_SERVER_COMMANDS: LateBinding<&'static dyn OtherServerCommands> = - LateBinding::new("OTHER_SERVER_COMMANDS"); diff --git a/app/buck2_server_ctx/src/pattern.rs b/app/buck2_server_ctx/src/pattern.rs deleted file mode 100644 index 3a115f6aaa33d..0000000000000 --- a/app/buck2_server_ctx/src/pattern.rs +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_cli_proto::ClientContext; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::target_aliases::BuckConfigTargetAliasResolver; -use buck2_common::target_aliases::HasTargetAliasResolver; -use buck2_core::cells::cell_path::CellPath; -use buck2_core::cells::CellResolver; -use buck2_core::fs::project_rel_path::ProjectRelativePath; -use buck2_core::pattern::pattern_type::PatternType; -use buck2_core::pattern::ParsedPattern; -use buck2_core::target::label::TargetLabel; -use dice::DiceComputations; -use gazebo::prelude::*; - -use crate::ctx::ServerCommandContextTrait; - -pub struct PatternParser { - cell_resolver: CellResolver, - cwd: CellPath, - target_alias_resolver: BuckConfigTargetAliasResolver, -} - -impl PatternParser { - pub async fn new( - ctx: &mut DiceComputations, - cwd: &ProjectRelativePath, - ) -> anyhow::Result { - let cell_resolver = ctx.get_cell_resolver().await?; - - let cwd = cell_resolver.get_cell_path(&cwd)?; - let cell_name = cwd.cell(); - - let target_alias_resolver = ctx.target_alias_resolver_for_cell(cell_name).await?; - - Ok(Self { - cell_resolver, - cwd, - target_alias_resolver, - }) - } - - pub fn parse_pattern(&self, pattern: &str) -> anyhow::Result> { - ParsedPattern::parse_relaxed( - &self.target_alias_resolver, - self.cwd.as_ref(), - pattern, - &self.cell_resolver, - ) - } -} - -/// Parse target patterns out of command line arguments. -/// -/// The format allowed here is more relaxed than in build files and elsewhere, so only use this -/// with strings passed by the user on the CLI. -/// See `ParsedPattern::parse_relaxed` for details. -pub async fn parse_patterns_from_cli_args( - ctx: &mut DiceComputations, - target_patterns: &[buck2_data::TargetPattern], - cwd: &ProjectRelativePath, -) -> anyhow::Result>> { - let parser = PatternParser::new(ctx, cwd).await?; - - target_patterns.try_map(|value| parser.parse_pattern(&value.value)) -} - -/// Extract target configuration (platform) label from [`ClientContext`]. -pub async fn target_platform_from_client_context( - client_ctx: &ClientContext, - server_ctx: &dyn ServerCommandContextTrait, - dice_ctx: &mut DiceComputations, -) -> anyhow::Result> { - target_platform_from_client_context_impl( - client_ctx, - &dice_ctx.get_cell_resolver().await?, - server_ctx.working_dir(), - ) - .await -} - -async fn target_platform_from_client_context_impl( - client_context: &ClientContext, - cell_resolver: &CellResolver, - working_dir: &ProjectRelativePath, -) -> anyhow::Result> { - let cwd = cell_resolver.get_cell_path(working_dir)?; - - let target_platform = &client_context.target_platform; - if !target_platform.is_empty() { - Ok(Some( - ParsedPattern::parse_precise(target_platform, cwd.cell(), cell_resolver)? - .as_target_label(target_platform)?, - )) - } else { - Ok(None) - } -} diff --git a/app/buck2_server_ctx/src/pattern_parse_and_resolve.rs b/app/buck2_server_ctx/src/pattern_parse_and_resolve.rs new file mode 100644 index 0000000000000..e536211d43d69 --- /dev/null +++ b/app/buck2_server_ctx/src/pattern_parse_and_resolve.rs @@ -0,0 +1,66 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_common::pattern::parse_from_cli; +use buck2_core::fs::project_rel_path::ProjectRelativePath; +use buck2_core::pattern::pattern::TargetLabelWithExtra; +use buck2_core::pattern::pattern_type::PatternType; +use buck2_core::pattern::pattern_type::ProvidersPatternExtra; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_node::nodes::frontend::TargetGraphCalculation; +use dice::DiceComputations; +use dupe::Dupe; +use gazebo::prelude::VecExt; + +pub async fn parse_and_resolve_patterns_to_targets_from_cli_args( + ctx: &mut DiceComputations<'_>, + target_patterns: &[String], + cwd: &ProjectRelativePath, +) -> anyhow::Result>> { + let resolved_pattern = + parse_from_cli::parse_and_resolve_patterns_from_cli_args::(ctx, target_patterns, cwd) + .await?; + let mut result_targets = Vec::new(); + for (package, spec) in resolved_pattern.specs { + match spec { + buck2_core::pattern::pattern::PackageSpec::Targets(targets) => { + result_targets.extend(targets.into_map(|(name, extra)| TargetLabelWithExtra { + target_label: TargetLabel::new(package.dupe(), name.as_ref()), + extra, + })) + } + buck2_core::pattern::pattern::PackageSpec::All => { + // Note this code is not parallel. Careful if used in performance sensitive code. + let interpreter_results = ctx.get_interpreter_results(package.dupe()).await?; + result_targets.extend(interpreter_results.targets().keys().map(|target| { + TargetLabelWithExtra { + target_label: TargetLabel::new(package.dupe(), target), + extra: T::default(), + } + })); + } + } + } + Ok(result_targets) +} + +pub async fn parse_and_resolve_provider_labels_from_cli_args( + ctx: &mut DiceComputations<'_>, + target_patterns: &[String], + cwd: &ProjectRelativePath, +) -> anyhow::Result> { + let targets = parse_and_resolve_patterns_to_targets_from_cli_args::( + ctx, + target_patterns, + cwd, + ) + .await?; + Ok(targets.into_map(|t| t.into_providers_label())) +} diff --git a/app/buck2_server_ctx/src/stderr_output_guard.rs b/app/buck2_server_ctx/src/stderr_output_guard.rs index 3638f2081784e..e6222166903aa 100644 --- a/app/buck2_server_ctx/src/stderr_output_guard.rs +++ b/app/buck2_server_ctx/src/stderr_output_guard.rs @@ -13,7 +13,7 @@ use std::io::Write; use std::marker::PhantomData; use std::str; -use buck2_core::env_helper::EnvHelper; +use buck2_core::buck2_env_anyhow; use buck2_events::dispatch::EventDispatcher; use dupe::Dupe; @@ -71,8 +71,7 @@ impl StderrOutputWriter { fn get_chunk_size() -> anyhow::Result { // protobuf recommends each message should be under 1MB const DEFAULT_CHUNK_SIZE: usize = 1024 * 1024; - static CHUNK_SIZE: EnvHelper = EnvHelper::new("BUCK2_DEBUG_RAWOUTPUT_CHUNK_SIZE"); - Ok(CHUNK_SIZE.get_copied()?.unwrap_or(DEFAULT_CHUNK_SIZE)) + buck2_env_anyhow!("BUCK2_DEBUG_RAWOUTPUT_CHUNK_SIZE", type=usize, default=DEFAULT_CHUNK_SIZE) } /// Given complete valid UTF-8 string, truncate it to be no longer than given limit. diff --git a/app/buck2_server_ctx/src/streaming_request_handler.rs b/app/buck2_server_ctx/src/streaming_request_handler.rs index 848da07a95ad0..5749d6ee0daea 100644 --- a/app/buck2_server_ctx/src/streaming_request_handler.rs +++ b/app/buck2_server_ctx/src/streaming_request_handler.rs @@ -17,7 +17,7 @@ use futures::Stream; use pin_project::pin_project; use tonic::Status; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum StreamingRequestError { #[error("Request returned error status: {0}")] GrpcStatus(Status), diff --git a/app/buck2_server_ctx/src/target_resolution_config.rs b/app/buck2_server_ctx/src/target_resolution_config.rs new file mode 100644 index 0000000000000..ca4b617432d62 --- /dev/null +++ b/app/buck2_server_ctx/src/target_resolution_config.rs @@ -0,0 +1,140 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use buck2_cli_proto::TargetCfg; +use buck2_common::global_cfg_options::GlobalCfgOptions; +use buck2_core::configuration::bound_id::BoundConfigurationId; +use buck2_core::configuration::data::ConfigurationData; +use buck2_core::pattern::pattern::TargetLabelWithExtra; +use buck2_core::pattern::pattern_type::ConfigurationPredicate; +use buck2_core::pattern::pattern_type::ConfiguredTargetPatternExtra; +use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::provider::label::ProvidersLabel; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_core::target::label::label::TargetLabel; +use buck2_node::configured_universe::CqueryUniverse; +use buck2_node::configured_universe::UNIVERSE_FROM_LITERALS; +use buck2_node::target_calculation::ConfiguredTargetCalculation; +use dice::DiceComputations; +use gazebo::prelude::VecExt; + +use crate::ctx::ServerCommandContextTrait; +use crate::global_cfg_options::global_cfg_options_from_client_context; + +#[derive(Debug, buck2_error::Error)] +enum PatternNotSupportedError { + #[error("Builtin configurations are not supported: `{0}`")] + BuiltinConfigurationsNotSupported(String), + #[error( + "Patterns with configuration label without configuration hash are not supported: `{0}`" + )] + ConfigurationLabelWithoutHashNotSupported(String), +} + +pub enum TargetResolutionConfig { + /// Resolve using target platform. + Default(GlobalCfgOptions), + /// Resolve in the universe. + Universe(CqueryUniverse), +} + +impl TargetResolutionConfig { + pub async fn from_args( + ctx: &mut DiceComputations<'_>, + target_cfg: &TargetCfg, + server_ctx: &dyn ServerCommandContextTrait, + target_universe: &[String], + ) -> anyhow::Result { + let global_cfg_options = + global_cfg_options_from_client_context(target_cfg, server_ctx, ctx).await?; + if target_universe.is_empty() { + Ok(TargetResolutionConfig::Default(global_cfg_options)) + } else { + Ok(TargetResolutionConfig::Universe( + (UNIVERSE_FROM_LITERALS.get()?)( + ctx, + server_ctx.working_dir(), + &target_universe, + global_cfg_options, + ) + .await?, + )) + } + } + + pub async fn get_configured_target( + &self, + ctx: &mut DiceComputations<'_>, + label: &TargetLabel, + ) -> anyhow::Result> { + match self { + TargetResolutionConfig::Default(global_cfg_options) => Ok(vec![ + ctx.get_configured_target(label, global_cfg_options).await?, + ]), + TargetResolutionConfig::Universe(universe) => { + // TODO(nga): whoever called this function, + // they may have resolved pattern unnecessarily. + Ok(universe.get_target_label(label)) + } + } + } + + pub async fn get_configured_provider_label( + &self, + ctx: &mut DiceComputations<'_>, + label: &ProvidersLabel, + ) -> anyhow::Result> { + Ok(self + .get_configured_target(ctx, label.target()) + .await? + .into_map(|configured_target_label| { + ConfiguredProvidersLabel::new(configured_target_label, label.name().clone()) + })) + } + + pub async fn get_configured_targets_for_configured_target_literals( + &self, + ctx: &mut DiceComputations<'_>, + label: &TargetLabelWithExtra, + ) -> anyhow::Result> { + let TargetLabelWithExtra { + target_label, + extra, + } = &label; + match &extra.cfg { + ConfigurationPredicate::Any => self.get_configured_target(ctx, &target_label).await, + ConfigurationPredicate::Builtin(p) => Err( + PatternNotSupportedError::BuiltinConfigurationsNotSupported(p.to_string()).into(), + ), + ConfigurationPredicate::Bound(label, None) => Err( + PatternNotSupportedError::ConfigurationLabelWithoutHashNotSupported( + label.to_string(), + ) + .into(), + ), + ConfigurationPredicate::Bound(label, Some(hash)) => { + let cfg = ConfigurationData::lookup_bound(BoundConfigurationId { + label: label.clone(), + hash: hash.clone(), + })?; + let configured = target_label.configure(cfg); + match self { + TargetResolutionConfig::Default(_) => Ok(vec![configured]), + TargetResolutionConfig::Universe(universe) => { + if universe.contains(&configured) { + Ok(vec![configured]) + } else { + Ok(Vec::new()) + } + } + } + } + } + } +} diff --git a/app/buck2_server_ctx/src/template.rs b/app/buck2_server_ctx/src/template.rs index ac0f697fba84a..4841e56f8d5ac 100644 --- a/app/buck2_server_ctx/src/template.rs +++ b/app/buck2_server_ctx/src/template.rs @@ -8,14 +8,14 @@ */ use async_trait::async_trait; +use buck2_core::logging::log_file::TracingLogFile; use buck2_events::dispatch::span_async; use buck2_execute::materialize::materializer::HasMaterializer; use dice::DiceTransaction; -use crate::command_end::command_end_ext; +use crate::commands::command_end_ext; use crate::ctx::ServerCommandContextTrait; use crate::ctx::ServerCommandDiceContext; -use crate::logging::TracingLogFile; use crate::partial_result_dispatcher::PartialResultDispatcher; /// Typical server command with DICE and span. @@ -37,7 +37,7 @@ pub trait ServerCommandTemplate: Send + Sync { } /// Create end event. Called after command is invoked. - fn end_event(&self, _response: &anyhow::Result) -> Self::EndEvent { + fn end_event(&self, _response: &buck2_error::Result) -> Self::EndEvent { Self::EndEvent::default() } @@ -51,6 +51,15 @@ pub trait ServerCommandTemplate: Send + Sync { None } + /// Additional errors that should be reported via the invocation record, even if the command + /// successfully produces a response. + fn additional_telemetry_errors( + &self, + _response: &Self::Response, + ) -> Vec { + Vec::new() + } + /// Command implementation. async fn command( &self, @@ -86,11 +95,15 @@ pub async fn run_server_command( }, command.exclusive_command_name(), ) - .await; - let end_event = command_end_ext(&result, command.end_event(&result), |result| { - command.is_success(result) - }); - (result, end_event) + .await + .map_err(Into::into); + let end_event = command_end_ext( + &result, + command.end_event(&result), + |result| command.is_success(result), + |result| command.additional_telemetry_errors(result), + ); + (result.map_err(Into::into), end_event) }) .await } diff --git a/app/buck2_server_starlark_debug/BUCK b/app/buck2_server_starlark_debug/BUCK index 604088640d858..fba1e394b9583 100644 --- a/app/buck2_server_starlark_debug/BUCK +++ b/app/buck2_server_starlark_debug/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -18,18 +17,17 @@ rust_library( "fbsource//third-party/rust:num_cpus", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tokio-stream", "fbsource//third-party/rust:tracing", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_server_ctx:buck2_server_ctx", "//buck2/gazebo/dupe:dupe", - "//buck2/gazebo/gazebo:gazebo", "//buck2/starlark-rust/starlark:starlark", ], ) diff --git a/app/buck2_server_starlark_debug/Cargo.toml b/app/buck2_server_starlark_debug/Cargo.toml index c9a847fe7ee9f..3bee74fac6cf3 100644 --- a/app/buck2_server_starlark_debug/Cargo.toml +++ b/app/buck2_server_starlark_debug/Cargo.toml @@ -1,7 +1,9 @@ [package] description = "Server implementation of starlark debug command" edition = "2021" +license = { workspace = true } name = "buck2_server_starlark_debug" +repository = { workspace = true } version = "0.1.0" [dependencies] @@ -14,18 +16,17 @@ itertools = { workspace = true } num_cpus = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } dupe = { workspace = true } -gazebo = { workspace = true } starlark = { workspace = true } buck2_cli_proto = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } buck2_interpreter = { workspace = true } buck2_server_ctx = { workspace = true } diff --git a/app/buck2_server_starlark_debug/src/error.rs b/app/buck2_server_starlark_debug/src/error.rs index 1712dd5edab39..b6f807e3548a1 100644 --- a/app/buck2_server_starlark_debug/src/error.rs +++ b/app/buck2_server_starlark_debug/src/error.rs @@ -7,10 +7,8 @@ * of this source tree. */ -use thiserror::Error; - /// Errors from buck's starlark debugger -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum StarlarkDebuggerError { #[error("starlark debugger has not yet implemented this functionality")] Unimplemented, @@ -19,7 +17,7 @@ pub(crate) enum StarlarkDebuggerError { } /// Internal errors from buck's starlark debugger -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] pub(crate) enum StarlarkDebuggerInternalError { #[error("Internal error: debbugger server shutdown unexpectedly")] UnexpectedDebuggerShutdown, diff --git a/app/buck2_server_starlark_debug/src/lib.rs b/app/buck2_server_starlark_debug/src/lib.rs index 4e62552437761..596bc6a12c007 100644 --- a/app/buck2_server_starlark_debug/src/lib.rs +++ b/app/buck2_server_starlark_debug/src/lib.rs @@ -7,6 +7,7 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![allow(rustdoc::private_intra_doc_links)] //! Provides the (daemon-side) support for buck2's starlark debugging. @@ -67,6 +68,7 @@ mod dap_api; mod error; pub mod run; mod server; +mod variable_known_paths; /// A handle to the debugger server. #[derive(Debug, Clone, Dupe)] diff --git a/app/buck2_server_starlark_debug/src/run.rs b/app/buck2_server_starlark_debug/src/run.rs index 020e572e7bf6f..dc370fc49c2af 100644 --- a/app/buck2_server_starlark_debug/src/run.rs +++ b/app/buck2_server_starlark_debug/src/run.rs @@ -8,7 +8,7 @@ */ use buck2_events::dispatch::span_async; -use buck2_server_ctx::command_end::command_end; +use buck2_server_ctx::commands::command_end; use buck2_server_ctx::ctx::ServerCommandContextTrait; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; use buck2_server_ctx::streaming_request_handler::StreamingRequestHandler; @@ -51,9 +51,11 @@ pub async fn run_dap_server_command( data: Some(buck2_data::StarlarkDebugAttachCommandStart {}.into()), }; span_async(start_event, async move { - let result = run_dap_server(ctx, partial_result_dispatcher, req).await; + let result = run_dap_server(ctx, partial_result_dispatcher, req) + .await + .map_err(Into::into); let end_event = command_end(&result, buck2_data::StarlarkDebugAttachCommandEnd {}); - (result, end_event) + (result.map_err(Into::into), end_event) }) .await } diff --git a/app/buck2_server_starlark_debug/src/server.rs b/app/buck2_server_starlark_debug/src/server.rs index b4d51371db767..20210ff17dc03 100644 --- a/app/buck2_server_starlark_debug/src/server.rs +++ b/app/buck2_server_starlark_debug/src/server.rs @@ -14,6 +14,7 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; +use anyhow::Context; use buck2_core::fs::fs_util; use buck2_core::fs::paths::abs_norm_path::AbsNormPath; use buck2_core::fs::project::ProjectRoot; @@ -23,7 +24,6 @@ use buck2_interpreter::starlark_debug::StarlarkDebugController; use debugserver_types as dap; use dupe::Dupe; use futures::StreamExt; -use gazebo::prelude::*; use itertools::Itertools; use starlark::debug::prepare_dap_adapter; use starlark::debug::resolve_breakpoints; @@ -32,10 +32,11 @@ use starlark::debug::DapAdapterClient; use starlark::debug::DapAdapterEvalHook; use starlark::debug::ResolvedBreakpoints; use starlark::debug::StepKind; +use starlark::debug::VariablePath; use starlark::syntax::AstModule; use starlark::syntax::Dialect; use starlark::syntax::DialectTypes; -use thiserror::Error; +use starlark::StarlarkResultExt; use tokio::select; use tokio::sync::mpsc; use tokio::sync::oneshot; @@ -52,6 +53,7 @@ use crate::dap_api::ContinueArguments; use crate::dap_api::DebugServer; use crate::error::StarlarkDebuggerError; use crate::run::ToClientMessage; +use crate::variable_known_paths::VariablesKnownPaths; use crate::BuckStarlarkDebuggerHandle; use crate::HandleData; use crate::HandleId; @@ -64,21 +66,22 @@ fn capabilities() -> serde_json::Value { // debugserver_types is out of date and missing fields on Capabilities and so we just construct // a little json map explicitly ourselves. serde_json::json!({ - "supports_configuration_done_request": true, - "supports_evaluate_for_hovers": true, - "supports_set_variable": true, - "supports_step_in_targets_request": true, - "supports_conditional_breakpoints": true, - "support_terminate_debuggee": false, - "support_suspend_debuggee": false, + "supportsConfigurationDoneRequest": true, + "supportsEvaluateForHovers": true, + "supportsSetVariable": true, + "supportsStepInTargetsRequest": true, + "supportsConditionalBreakpoints": true, + // note that some capabilities have the word "support" and some "supports" this seems to be according to the spec + "supportTerminateDebuggee": false, + "supportSuspendDebuggee": false, // This is different from starlark's `dap_capabilities`. The buck starlark debugger treats // each ongoing starlark Evaluation as a separate thread and handles requests appropriately. - "supports_single_thread_execution_requests": true, + "supportsSingleThreadExecutionRequests": true, }) } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum DebuggerError { #[error("SetBreakpointsArguments invalid: {0:?}")] InvalidSetBreakpoints(dap::SetBreakpointsArguments), @@ -283,9 +286,71 @@ struct ServerState { /// 100s of ids) free_pseudo_threads: BTreeSet, next_pseudo_thread: u32, + + /// This data structure keeps track of destructured local variables obtained by debugger at breakpoint + /// this is required to satify incremental nature of VariablesRequeste + /// variables are lazily fetched from starlark evaluator and cached by thread id + variables_by_thread: HashMap, } -static TOP_FRAME_LOCALS_ID: i64 = 2000; +/// This type is using bitmasking to pack "is_top_frame, thread_id, variable_id" into an integer value +/// Since this is a JSON serialized number it's safer to only rely on 53 bits +/// thread_id is u32 but we're putting a limit of 20 bits on it for now to simplify DAP implementation +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +struct VariableId(i64); + +impl VariableId { + const MASK_53_BITS: i64 = (1 << 53) - 1; + + pub fn new(top_frame: bool, thread_id: u32, variable_id: u32) -> anyhow::Result { + if thread_id > 0xFFFFF { + return Err(anyhow::Error::msg(format!( + "Thread ID exceeds 20-bit limit: max is 0xFFFFF, received {}", + thread_id + ))); + } + let top_frame_flag = (if top_frame { 1 } else { 0 }) << 52; + let thread_id_part = ((thread_id as i64) << 32) & 0xFFFFF00000000; + Ok(Self(top_frame_flag | thread_id_part | variable_id as i64)) + } + + pub fn is_top_frame(self) -> bool { + (self.0 >> 52) != 0 + } + + pub fn thread_id(self) -> u32 { + ((self.0 >> 32) & 0xFFFFF) as u32 + } + + pub fn variable_id(self) -> u32 { + (self.0 & 0xFFFFFFFF) as u32 + } + + pub fn as_i64(self) -> i64 { + self.0 + } +} + +impl TryFrom for VariableId { + type Error = anyhow::Error; + + fn try_from(value: i64) -> Result { + if value & VariableId::MASK_53_BITS == value { + Ok(Self(value)) + } else { + Err(anyhow::Error::msg(format!( + "value exceeds 53-bit limit. value: {}", + value + ))) + } + } +} + +impl From for i64 { + fn from(value: VariableId) -> Self { + value.0 + } +} impl DebugServer for ServerState { fn initialize( @@ -389,7 +454,7 @@ impl DebugServer for ServerState { // rewrite variables reference to include our threadid. we don't currently send back any // variables other than locals so the TOP_FRAME_LOCALS_ID doesnt really matter (though we // do check against it below). - variables_reference: (thread_id << 16) | TOP_FRAME_LOCALS_ID, + variables_reference: VariableId::new(true, thread_id as u32, 0)?.as_i64(), expensive: false, column: None, end_column: None, @@ -405,20 +470,69 @@ impl DebugServer for ServerState { &mut self, x: dap::VariablesArguments, ) -> anyhow::Result { - let thread_id = x.variables_reference >> 16; - let variables_id = x.variables_reference & 0xFFFF; + let encoded_variable_id = VariableId::try_from(x.variables_reference)?; + let thread_id = encoded_variable_id.thread_id(); // We only understand the TOP_FRAME_LOCALS_ID id - if variables_id != TOP_FRAME_LOCALS_ID { + if !encoded_variable_id.is_top_frame() { return Ok(dap::VariablesResponseBody { variables: Vec::new(), }); } - let hook = self.find_hook_by_pseudo_thread(thread_id)?; - let vars_info = hook.adapter.variables()?; - Ok(dap::VariablesResponseBody { - variables: vars_info.locals.into_map(|var| var.to_dap()), - }) + let mut result = Vec::new(); + + if encoded_variable_id.variable_id() == 0 { + let hook = self.find_hook_by_pseudo_thread(thread_id.into())?; + let vars_info = hook.adapter.variables()?; + let known_variables = self + .variables_by_thread + .entry(thread_id) + .or_insert_with(VariablesKnownPaths::default); + + for v in vars_info.locals { + let has_children = v.has_children; + let var_path = VariablePath::new_local(&v.name.to_string()); + let mut dap_message = v.to_dap(); + if has_children { + let var_id = known_variables.insert(var_path); + dap_message.variables_reference = + VariableId::new(true, thread_id, var_id)?.into(); + } + result.push(dap_message); + } + } else { + let path = self + .variables_by_thread + .get(&thread_id) + .and_then(|x| x.get(encoded_variable_id.variable_id())) + .map(ToOwned::to_owned); + + if let Some(path) = path { + let hook = self.find_hook_by_pseudo_thread(thread_id.into())?; + let inspect_result = hook.adapter.inspect_variable(path.to_owned())?; + let current_frame_vars = self + .variables_by_thread + .get_mut(&thread_id) + .context("variables cache must exist in this codepath")?; + + for child in inspect_result.sub_values { + let child_path = path.make_child(child.name.clone()); + let has_children = child.has_children; + let mut dap_result = child.to_dap(); + if has_children { + let reference_id = VariableId::new( + true, + thread_id, + current_frame_vars.insert(child_path), + )?; + dap_result.variables_reference = reference_id.into(); + } + result.push(dap_result); + } + } + }; + + Ok(dap::VariablesResponseBody { variables: result }) } fn source(&mut self, _x: dap::SourceArguments) -> anyhow::Result { @@ -466,7 +580,48 @@ impl DebugServer for ServerState { } let hook = self.find_hook_by_pseudo_thread(thread_id)?; - hook.adapter.evaluate(&x.expression) + match hook.adapter.evaluate(&x.expression) { + Ok(v) if v.has_children => { + let mut variable_id = 0; + + self.variables_by_thread + .entry(thread_id as u32) + .and_modify(|path| { + variable_id = path.insert(VariablePath::new_expression(&x.expression)) + }) + .or_insert_with(|| { + let mut result = VariablesKnownPaths::default(); + variable_id = result.insert(VariablePath::new_expression(&x.expression)); + result + }); + + Ok(dap::EvaluateResponseBody { + indexed_variables: None, + named_variables: None, + presentation_hint: None, + result: v.result, + type_: Some(v.type_), + variables_reference: VariableId::new(true, thread_id as u32, variable_id)? + .as_i64() as f64, + }) + } + Ok(v) => Ok(dap::EvaluateResponseBody { + indexed_variables: None, + named_variables: None, + presentation_hint: None, + result: v.result, + type_: Some(v.type_), + variables_reference: 0.0, + }), + Err(er) => Ok(dap::EvaluateResponseBody { + indexed_variables: None, + named_variables: None, + presentation_hint: None, + result: format!("{:#}", er), + type_: None, + variables_reference: 0.0, + }), + } } fn disconnect(&mut self, _x: dap::DisconnectArguments) -> anyhow::Result<()> { @@ -485,6 +640,7 @@ impl ServerState { next_pseudo_thread: 0, next_hook_id: HookId(0), set_breakpoints: HashMap::new(), + variables_by_thread: HashMap::new(), } } @@ -677,6 +833,7 @@ impl ServerState { }; state.stopped_at = Some(description); let thread_id = state.pseudo_thread_id; + self.variables_by_thread.remove(&thread_id); let msg = dap::StoppedEventBody { reason: "breakpoint".to_owned(), @@ -710,9 +867,10 @@ impl ServerState { fn get_ast(&self, source: &ProjectRelativePath) -> anyhow::Result { debug!("tried to get ast `{}`", source); let abs_path = self.project_root.resolve(source); - let content = fs_util::read_to_string_if_exists(abs_path)?.unwrap(); + let content = fs_util::read_to_string_if_exists(abs_path)? + .ok_or_else(|| anyhow::anyhow!("file not found: {}", source))?; AstModule::parse( - &source.to_string(), + source.as_ref(), content, &Dialect { enable_def: true, @@ -725,6 +883,7 @@ impl ServerState { ..Dialect::Standard }, ) + .into_anyhow_result() } } @@ -738,8 +897,9 @@ struct BuckStarlarkDapAdapterClient { } impl DapAdapterClient for BuckStarlarkDapAdapterClient { - fn event_stopped(&self) { - self.handle.0.server.event_stopped(self.hook_id) + fn event_stopped(&self) -> starlark::Result<()> { + self.handle.0.server.event_stopped(self.hook_id); + Ok(()) } } @@ -786,3 +946,45 @@ fn describe_frame(frame: dap::StackFrame) -> String { _ => "???".to_owned(), } } + +#[cfg(test)] +mod tests { + use super::VariableId; + + fn check_variable_err(is_top_frame: bool, thread_id: u32, variable_id: u32) { + assert!( + VariableId::new(is_top_frame, thread_id, variable_id).is_err(), + "Expecting error for values ({}, {}, {})", + is_top_frame, + thread_id, + variable_id + ); + } + + fn check_variable_id(is_top_frame: bool, thread_id: u32, variable_id: u32) { + let var = VariableId::new(is_top_frame, thread_id, variable_id).unwrap(); + assert_eq!( + (var.is_top_frame(), var.thread_id(), var.variable_id()), + (is_top_frame, thread_id, variable_id) + ); + } + + #[test] + fn test_variable_id_failures() { + check_variable_err(true, u32::MAX, u32::MAX); + check_variable_err(true, 0xFFFFF + 1, u32::MAX); + } + + #[test] + fn test_variable_id() { + check_variable_id(true, 1234, 9867324); + check_variable_id(true, 0, 0); + check_variable_id(false, 0, 0); + check_variable_id(true, u16::MAX as u32, u32::MAX); + check_variable_id(false, u16::MAX as u32, u32::MAX); + check_variable_id(true, 0xFFFFF, u32::MAX); + check_variable_id(false, 0xFFFFF, u32::MAX); + check_variable_id(false, 0xFFFFF - 1, u32::MAX / 2); + check_variable_id(true, 0xFFFFF - 1, u32::MAX / 2); + } +} diff --git a/app/buck2_server_starlark_debug/src/variable_known_paths.rs b/app/buck2_server_starlark_debug/src/variable_known_paths.rs new file mode 100644 index 0000000000000..0584b8e33e558 --- /dev/null +++ b/app/buck2_server_starlark_debug/src/variable_known_paths.rs @@ -0,0 +1,40 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; + +use starlark::debug::VariablePath; + +/// Maps variable IDs to their access paths for tree-structured DAP protocol variables. +/// +/// In the DAP protocol, variables are structured as trees. When a variable with a non-zero ID is encountered, +/// it's expected to have child variables. This struct helps maintain a relationship between variable IDs and their +/// access paths to facilitate evaluation and traversal. +/// +/// # Example +/// +/// A variable with path "name.field1" that has a child "innerField" will be mapped such that +/// the child's ID corresponds to the access path "name.field1.innerField". This allows correct traversal +/// upon a variable request against the child's ID. +#[derive(Default, Debug)] +pub(crate) struct VariablesKnownPaths { + path_by_id: HashMap, +} + +impl VariablesKnownPaths { + pub fn get(&self, id: u32) -> Option<&VariablePath> { + self.path_by_id.get(&id) + } + + pub fn insert(&mut self, path: VariablePath) -> u32 { + let id = (self.path_by_id.len() + 1) as u32; + self.path_by_id.insert(id, path); + id + } +} diff --git a/app/buck2_starlark/BUCK b/app/buck2_starlark/BUCK deleted file mode 100644 index 29e139e15baa4..0000000000000 --- a/app/buck2_starlark/BUCK +++ /dev/null @@ -1,34 +0,0 @@ -load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") - -oncall("build_infra") - -rust_library( - name = "buck2_starlark", - srcs = glob(["src/**/*.rs"]), - deps = [ - "fbsource//third-party/rust:anyhow", - "fbsource//third-party/rust:async-recursion", - "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:clap-3", - "fbsource//third-party/rust:debugserver-types", - "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:once_cell", - "fbsource//third-party/rust:serde", - "fbsource//third-party/rust:serde_json", - "fbsource//third-party/rust:thiserror", - "//buck2/app/buck2_cli_proto:buck2_cli_proto", - "//buck2/app/buck2_client_ctx:buck2_client_ctx", - "//buck2/app/buck2_common:buck2_common", - "//buck2/app/buck2_core:buck2_core", - "//buck2/app/buck2_data:buck2_data", - "//buck2/app/buck2_event_observer:buck2_event_observer", - "//buck2/app/buck2_events:buck2_events", - "//buck2/app/buck2_interpreter:buck2_interpreter", - "//buck2/app/buck2_interpreter_for_build:buck2_interpreter_for_build", - "//buck2/app/buck2_server_ctx:buck2_server_ctx", - "//buck2/dice/dice:dice", - "//buck2/gazebo/dupe:dupe", - "//buck2/starlark-rust/starlark:starlark", - ], -) diff --git a/app/buck2_starlark/Cargo.toml b/app/buck2_starlark/Cargo.toml deleted file mode 100644 index 89d8c071137f4..0000000000000 --- a/app/buck2_starlark/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "buck2_starlark" -version = "0.1.0" -edition = "2021" -description = "`buck2 starlark` command implementation, both client and server" - -[dependencies] -anyhow = { workspace = true } -async-recursion = { workspace = true } -async-trait = { workspace = true } -clap = { workspace = true } -debugserver-types = { workspace = true } -dice = { workspace = true } -dupe = { workspace = true } -futures = { workspace = true } -once_cell = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -starlark = { workspace = true } -thiserror = { workspace = true } - -buck2_client_ctx = { workspace = true } -buck2_cli_proto = { workspace = true } -buck2_common = { workspace = true } -buck2_core = { workspace = true } -buck2_data = { workspace = true } -buck2_events = { workspace = true } -buck2_event_observer = { workspace = true } -buck2_interpreter = { workspace = true } -buck2_interpreter_for_build = { workspace = true } -buck2_server_ctx = { workspace = true } diff --git a/app/buck2_starlark/src/debug.rs b/app/buck2_starlark/src/debug.rs deleted file mode 100644 index 8f8083842653f..0000000000000 --- a/app/buck2_starlark/src/debug.rs +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::io::Write; - -use async_trait::async_trait; -use buck2_cli_proto::DapRequest; -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; -use buck2_client_ctx::common::ConsoleType; -use buck2_client_ctx::daemon::client::BuckdClientConnector; -use buck2_client_ctx::events_ctx::PartialResultCtx; -use buck2_client_ctx::events_ctx::PartialResultHandler; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::ide_support::ide_message_stream; -use buck2_client_ctx::stream_util::reborrow_stream_for_static; -use buck2_client_ctx::streaming::StreamingCommand; -use buck2_client_ctx::subscribers::subscriber::EventSubscriber; -use buck2_event_observer::unpack_event::unpack_event; -use buck2_event_observer::unpack_event::UnpackedBuckEvent; -use buck2_events::BuckEvent; -use futures::StreamExt; -use once_cell::sync::Lazy; - -/// Run the starlark debug adapter protocol server -/// -/// This forwards requests received on stdin to a debug server running in the -/// buck daemon. DAP events and responses are returned from the daemon and sent -/// to this command's stdout. -#[derive(Debug, clap::Parser)] -#[clap(name = "starlark-debug-attach")] -pub struct StarlarkDebugAttachCommand { - #[clap(flatten)] - config_opts: CommonBuildConfigurationOptions, - - #[clap(flatten)] - event_log_opts: CommonDaemonCommandOptions, -} - -pub fn write_dap_message(out: &mut impl Write, msg: &[u8]) -> anyhow::Result<()> { - write!(out, "Content-Length: {}\r\n\r\n", msg.len())?; - out.write_all(msg)?; - out.flush()?; - Ok(()) -} - -/// All DAP messages are written to stdout. -fn send_message_to_dap_client(msg: &[u8]) -> anyhow::Result<()> { - let stdout = std::io::stdout(); - let mut stdout = stdout.lock(); - write_dap_message(&mut stdout, msg)?; - Ok(()) -} - -#[async_trait] -impl StreamingCommand for StarlarkDebugAttachCommand { - const COMMAND_NAME: &'static str = "starlark-debug-attach"; - - async fn exec_impl( - self, - buckd: &mut BuckdClientConnector, - matches: &clap::ArgMatches, - ctx: &mut ClientCommandContext<'_>, - ) -> ExitResult { - let client_context = ctx.client_context(matches, &self)?; - - let stream = ide_message_stream::<_, debugserver_types::Request>(ctx.stdin()).filter_map( - |m| async move { - match m { - Ok(dap_json) => Some(DapRequest { dap_json }), - Err(e) => { - let _ignored = buck2_client_ctx::eprintln!( - "Could not read message from stdin: `{}`", - e - ); - // TODO(cjhopman): the client just hangs at this point. We should probably error out (or - // distinguish between FramedRead errors and errors of us converting to a Request). - None - } - } - }, - ); - - let mut partial_result_handler = DapPartialResultHandler; - - reborrow_stream_for_static( - stream, - |stream| async move { - buckd - .with_flushing() - .dap(client_context, stream, &mut partial_result_handler) - .await - }, - // The DAP server side does not handle hangups. So, until it does... we never hang up: - || None, - ) - .await??; - - ExitResult::success() - } - - fn console_opts(&self) -> &CommonConsoleOptions { - // This should only be communicated with by an IDE, so disable anything other - // than the simple console - static SIMPLE_CONSOLE: Lazy = Lazy::new(|| CommonConsoleOptions { - console_type: ConsoleType::Simple, - ui: vec![], - no_interactive_console: true, - }); - &SIMPLE_CONSOLE - } - - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - &self.event_log_opts - } - - fn common_opts(&self) -> &CommonBuildConfigurationOptions { - &self.config_opts - } - - fn should_expect_spans(&self) -> bool { - // If we're running the debugger, do not show "Waiting for daemon..." if we do not get any spans. - false - } - - fn extra_subscribers(&self) -> Vec> { - /// We add an additional subscriber that converts a handful of informative events - /// to DAP "output" events. Without this, at best these would go to stderr, but vscode's - /// executable DAP client ignores stderr, so this subscriber allows us to get that information - /// into somewhere visible to the user. - - struct ConvertToDap; - - impl ConvertToDap { - fn write_console(&self, msg: &str) -> anyhow::Result<()> { - let ev = debugserver_types::OutputEvent { - type_: "event".to_owned(), - event: "output".to_owned(), - // All other events are being sent by the debug support in the server and that's - // maintaining the sequence numbers. For us to get the correct sequence number - // here would be tricky. Instead, we just set it to 0 and hope that nobody notices/cares - // that it's out of order/invalid. The alternative would probably be to - // deserialize all events from the server and rewrite their sequence numbers (and - // potentially references to those sequence numbers coming back from the dap client). - seq: 0, - body: debugserver_types::OutputEventBody { - category: None, - column: None, - data: None, - line: None, - output: format!("{}\n", msg), - source: None, - variables_reference: None, - }, - }; - send_message_to_dap_client(&serde_json::to_vec(&ev)?) - } - } - - #[async_trait] - impl EventSubscriber for ConvertToDap { - async fn handle_output(&mut self, raw_output: &[u8]) -> anyhow::Result<()> { - self.write_console(&String::from_utf8_lossy(raw_output)) - } - - async fn handle_tailer_stderr(&mut self, stderr: &str) -> anyhow::Result<()> { - self.write_console(stderr) - } - - async fn handle_events( - &mut self, - events: &[std::sync::Arc], - ) -> anyhow::Result<()> { - for ev in events { - match unpack_event(ev)? { - UnpackedBuckEvent::Instant(_, _, data) => match data { - buck2_data::instant_event::Data::StructuredError(soft_error) => { - if !soft_error.quiet { - self.write_console(&format!( - "soft error: {}", - &soft_error.payload - ))?; - } - } - buck2_data::instant_event::Data::ConsoleMessage(message) => { - self.write_console(&message.message)?; - } - _ => {} - }, - _ => {} - } - } - Ok(()) - } - - async fn handle_error(&mut self, error: &anyhow::Error) -> anyhow::Result<()> { - self.write_console(&format!( - "buck2 starlark-attach debugserver error: {}", - error - )) - } - } - - vec![Box::new(ConvertToDap)] - } -} - -struct DapPartialResultHandler; - -#[async_trait] -impl PartialResultHandler for DapPartialResultHandler { - type PartialResult = buck2_cli_proto::DapMessage; - - async fn handle_partial_result( - &mut self, - mut _ctx: PartialResultCtx<'_, '_>, - partial_res: buck2_cli_proto::DapMessage, - ) -> anyhow::Result<()> { - send_message_to_dap_client(&partial_res.dap_json) - } -} diff --git a/app/buck2_starlark/src/lib.rs b/app/buck2_starlark/src/lib.rs deleted file mode 100644 index 6e03aee13b302..0000000000000 --- a/app/buck2_starlark/src/lib.rs +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![feature(async_closure)] -#![feature(try_blocks)] - -use async_trait::async_trait; -use buck2_cli_proto::ClientContext; -use buck2_cli_proto::GenericRequest; -use buck2_client_ctx::argv::Argv; -use buck2_client_ctx::argv::SanitizedArgv; -use buck2_client_ctx::client_ctx::ClientCommandContext; -use buck2_client_ctx::common::CommonBuildConfigurationOptions; -use buck2_client_ctx::common::CommonConsoleOptions; -use buck2_client_ctx::common::CommonDaemonCommandOptions; -use buck2_client_ctx::daemon::client::BuckdClientConnector; -use buck2_client_ctx::daemon::client::StdoutPartialResultHandler; -use buck2_client_ctx::exit_result::ExitResult; -use buck2_client_ctx::streaming::BuckSubcommand; -use buck2_client_ctx::streaming::StreamingCommand; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; - -use crate::debug::StarlarkDebugAttachCommand; -use crate::lint::StarlarkLintCommand; -use crate::typecheck::StarlarkTypecheckCommand; - -mod debug; -mod lint; -pub mod server; -mod typecheck; -mod util; - -#[derive(Debug, clap::Subcommand)] -#[clap(name = "starlark", about = "Run Starlark operations")] -pub enum StarlarkCommand { - #[clap(flatten)] - Opaque(StarlarkOpaqueCommand), - DebugAttach(StarlarkDebugAttachCommand), -} - -// Used for subcommands that follow `buck2 audit`'s "opaque" pattern where the command object is serialized -// to the daemon and deserialized there and has a `server_execute()` on the Command object itself (as opposed -// to using structured endpoints in the daemon protocol). -#[derive(Debug, clap::Subcommand, serde::Serialize, serde::Deserialize)] -pub enum StarlarkOpaqueCommand { - Lint(StarlarkLintCommand), - Typecheck(StarlarkTypecheckCommand), -} - -#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize, Default)] -pub struct StarlarkCommandCommonOptions { - #[clap(flatten)] - config_opts: CommonBuildConfigurationOptions, - - #[clap(flatten)] - console_opts: CommonConsoleOptions, - - #[clap(flatten)] - event_log_opts: CommonDaemonCommandOptions, -} - -#[async_trait] -pub trait StarlarkOpaqueSubcommand: Send + Sync + 'static { - async fn server_execute( - &self, - server_ctx: &dyn ServerCommandContextTrait, - stdout: PartialResultDispatcher, - client_server_ctx: ClientContext, - ) -> anyhow::Result<()>; - - fn common_opts(&self) -> &StarlarkCommandCommonOptions; -} - -impl StarlarkOpaqueCommand { - pub async fn server_execute( - &self, - server_ctx: &dyn ServerCommandContextTrait, - stdout: PartialResultDispatcher, - client_server_ctx: ClientContext, - ) -> anyhow::Result<()> { - self.as_subcommand() - .server_execute(server_ctx, stdout, client_server_ctx) - .await - } - fn as_subcommand(&self) -> &dyn StarlarkOpaqueSubcommand { - match self { - Self::Lint(cmd) => cmd, - Self::Typecheck(cmd) => cmd, - } - } -} - -#[async_trait] -impl StreamingCommand for StarlarkOpaqueCommand { - const COMMAND_NAME: &'static str = "starlark"; - - /// Starlark subcommands are all implemented as a generic request to the buckd server that will deserialize the command object. - async fn exec_impl( - self, - buckd: &mut BuckdClientConnector, - matches: &clap::ArgMatches, - ctx: &mut ClientCommandContext<'_>, - ) -> ExitResult { - let serialized = serde_json::to_string(&self)?; - - let context = ctx.client_context(matches, &self)?; - - buckd - .with_flushing() - .starlark( - GenericRequest { - context: Some(context), - serialized_opts: serialized, - }, - ctx.stdin().console_interaction_stream(self.console_opts()), - &mut StdoutPartialResultHandler, - ) - .await??; - ExitResult::success() - } - - fn console_opts(&self) -> &CommonConsoleOptions { - &self.as_subcommand().common_opts().console_opts - } - - fn event_log_opts(&self) -> &CommonDaemonCommandOptions { - &self.as_subcommand().common_opts().event_log_opts - } - - fn common_opts(&self) -> &CommonBuildConfigurationOptions { - &self.as_subcommand().common_opts().config_opts - } -} - -impl StarlarkCommand { - pub fn exec(self, matches: &clap::ArgMatches, ctx: ClientCommandContext<'_>) -> ExitResult { - let matches = matches.subcommand().expect("subcommand not found").1; - match self { - StarlarkCommand::Opaque(cmd) => cmd.exec(matches, ctx), - StarlarkCommand::DebugAttach(cmd) => cmd.exec(matches, ctx), - } - } - - pub fn sanitize_argv(&self, argv: Argv) -> SanitizedArgv { - argv.no_need_to_sanitize() - } -} diff --git a/app/buck2_starlark/src/lint.rs b/app/buck2_starlark/src/lint.rs deleted file mode 100644 index a9edd887917f5..0000000000000 --- a/app/buck2_starlark/src/lint.rs +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashMap; -use std::collections::HashSet; -use std::io::Write; -use std::sync::Arc; - -use anyhow::Context; -use async_trait::async_trait; -use buck2_cli_proto::ClientContext; -use buck2_client_ctx::path_arg::PathArg; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::data::HasIoProvider; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::io::IoProvider; -use buck2_core::cells::name::CellName; -use buck2_core::cells::CellResolver; -use buck2_interpreter::file_type::StarlarkFileType; -use buck2_interpreter::paths::path::StarlarkPath; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::ctx::ServerCommandDiceContext; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use dice::DiceTransaction; -use dupe::Dupe; -use starlark::analysis::AstModuleLint; -use starlark::codemap::FileSpan; -use starlark::errors::Diagnostic; -use starlark::errors::EvalSeverity; -use starlark::errors::Lint; -use starlark::syntax::AstModule; - -use crate::util::environment::Environment; -use crate::util::paths::starlark_files; -use crate::StarlarkCommandCommonOptions; -use crate::StarlarkOpaqueSubcommand; - -#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] -#[clap(name = "starlark-lint", about = "Run the Starlark linter.")] -pub struct StarlarkLintCommand { - #[clap(flatten)] - common_opts: StarlarkCommandCommonOptions, - - #[clap(value_name = "PATH", required = true)] - paths: Vec, -} - -/// The cache of names for a path, keyed by its CellName and its path type. -struct Cache<'a> { - dice: &'a DiceTransaction, - cached: HashMap<(CellName, StarlarkFileType), Arc>>, -} - -impl<'a> Cache<'a> { - pub(crate) fn new(dice: &'a DiceTransaction) -> Cache<'a> { - Self { - dice, - cached: HashMap::new(), - } - } - - pub(crate) async fn get_names( - &mut self, - path: &StarlarkPath<'_>, - ) -> anyhow::Result>> { - let path_type = path.file_type(); - let cell = path.cell(); - if let Some(res) = self.cached.get(&(cell, path_type)) { - return Ok(res.dupe()); - } - let env: Environment = Environment::new(cell, path_type, self.dice).await?; - let res = Arc::new(env.get_names(path_type, self.dice).await?); - self.cached.insert((cell, path_type), res.dupe()); - Ok(res) - } -} - -async fn lint_file( - path: &StarlarkPath<'_>, - cell_resolver: &CellResolver, - io: &dyn IoProvider, - cache: &mut Cache<'_>, -) -> anyhow::Result> { - let dialect = path.file_type().dialect(false); - let proj_path = cell_resolver.resolve_path(path.path().as_ref().as_ref())?; - let path_str = proj_path.to_string(); - let content = io - .read_file_if_exists(proj_path) - .await? - .with_context(|| format!("File not found: `{}`", path_str))?; - match AstModule::parse(&path_str, content.clone(), &dialect) { - Ok(ast) => Ok(ast.lint(Some(&*cache.get_names(path).await?))), - Err(err) => { - // There was a parse error, so we don't want to fail, we want to give a nice error message - // Do the best we can - it is probably a `Diagnostic`, which gives us more precise info. - let (span, message) = match err.downcast::() { - Err(err) => (None, err), - Ok(diag) => (diag.span, diag.message), - }; - Ok(vec![Lint { - location: span.unwrap_or_else(|| FileSpan::new(path_str, content)), - short_name: "parse_error".to_owned(), - severity: EvalSeverity::Error, - problem: format!("{:#}", message), - original: "".to_owned(), - }]) - } - } -} - -#[async_trait] -impl StarlarkOpaqueSubcommand for StarlarkLintCommand { - async fn server_execute( - &self, - server_ctx: &dyn ServerCommandContextTrait, - mut stdout: PartialResultDispatcher, - _client_ctx: ClientContext, - ) -> anyhow::Result<()> { - server_ctx - .with_dice_ctx(async move |server_ctx, ctx| { - let cell_resolver = ctx.get_cell_resolver().await?; - let fs = ctx.file_ops(); - let io = ctx.global_data().get_io_provider(); - let mut cache = Cache::new(&ctx); - - let mut stdout = stdout.as_writer(); - let mut lint_count = 0; - let files = - starlark_files(&self.paths, server_ctx, &cell_resolver, &fs, &*io).await?; - for file in &files { - let lints = lint_file(&file.borrow(), &cell_resolver, &*io, &mut cache).await?; - lint_count += lints.len(); - for lint in lints { - writeln!(stdout, "{}", lint)?; - } - } - if lint_count > 0 { - Err(anyhow::anyhow!("Found {} lints", lint_count)) - } else { - writeln!( - server_ctx.stderr()?, - "Found no lints in {} files", - files.len() - )?; - Ok(()) - } - }) - .await - } - - fn common_opts(&self) -> &StarlarkCommandCommonOptions { - &self.common_opts - } -} diff --git a/app/buck2_starlark/src/server.rs b/app/buck2_starlark/src/server.rs deleted file mode 100644 index 1c2f9b92742ac..0000000000000 --- a/app/buck2_starlark/src/server.rs +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_events::dispatch::span_async; -use buck2_server_ctx::command_end::command_end; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; - -use crate::StarlarkOpaqueCommand; - -pub async fn server_starlark_command( - ctx: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, -) -> anyhow::Result { - let start_event = buck2_data::CommandStart { - metadata: ctx.request_metadata().await?, - data: Some(buck2_data::StarlarkCommandStart {}.into()), - }; - - span_async( - start_event, - server_starlark_command_inner(ctx, partial_result_dispatcher, req), - ) - .await -} - -async fn server_starlark_command_inner( - context: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, -) -> ( - anyhow::Result, - buck2_data::CommandEnd, -) { - let result = parse_command_and_execute(context, partial_result_dispatcher, req).await; - let end_event = command_end(&result, buck2_data::StarlarkCommandEnd {}); - - let result = result.map(|()| buck2_cli_proto::GenericResponse {}); - - (result, end_event) -} - -async fn parse_command_and_execute( - context: &dyn ServerCommandContextTrait, - partial_result_dispatcher: PartialResultDispatcher, - req: buck2_cli_proto::GenericRequest, -) -> anyhow::Result<()> { - let command: StarlarkOpaqueCommand = serde_json::from_str(&req.serialized_opts)?; - command - .server_execute( - context, - partial_result_dispatcher, - req.context.expect("buck cli always sets a client context"), - ) - .await -} diff --git a/app/buck2_starlark/src/typecheck.rs b/app/buck2_starlark/src/typecheck.rs deleted file mode 100644 index 0232ff13f9f52..0000000000000 --- a/app/buck2_starlark/src/typecheck.rs +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashMap; -use std::io::Write; - -use anyhow::Context; -use async_recursion::async_recursion; -use async_trait::async_trait; -use buck2_cli_proto::ClientContext; -use buck2_client_ctx::path_arg::PathArg; -use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::data::HasIoProvider; -use buck2_common::dice::file_ops::HasFileOps; -use buck2_common::io::IoProvider; -use buck2_core::cells::name::CellName; -use buck2_core::cells::CellResolver; -use buck2_interpreter::file_type::StarlarkFileType; -use buck2_interpreter::paths::module::OwnedStarlarkModulePath; -use buck2_interpreter::paths::path::OwnedStarlarkPath; -use buck2_interpreter_for_build::interpreter::dice_calculation_delegate::HasCalculationDelegate; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use buck2_server_ctx::ctx::ServerCommandDiceContext; -use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use dice::DiceTransaction; -use dupe::Dupe; -use starlark::environment::Globals; -use starlark::typing::AstModuleTypecheck; -use starlark::typing::Interface; - -use crate::util::environment::Environment; -use crate::util::paths::starlark_files; -use crate::StarlarkCommandCommonOptions; -use crate::StarlarkOpaqueSubcommand; - -#[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] -#[clap(name = "starlark-typecheck", about = "Run the Starlark typechecker.")] -pub struct StarlarkTypecheckCommand { - #[clap(flatten)] - common_opts: StarlarkCommandCommonOptions, - - #[clap(value_name = "PATH", required = true)] - paths: Vec, -} - -struct Cache<'a> { - // Things we have access to get information - dice: &'a DiceTransaction, - io: &'a dyn IoProvider, - cell_resolver: &'a CellResolver, - // Things we have access to write information - stdout: &'a mut (dyn Write + Send + Sync), - stderr: &'a mut (dyn Write + Send + Sync), - // Our accumulated state - oracle: HashMap<(CellName, StarlarkFileType), Globals>, - cache: HashMap, -} - -impl<'a> Cache<'a> { - async fn typecheck(&mut self, path: OwnedStarlarkPath) -> anyhow::Result<()> { - self.run(path).await?; - Ok(()) - } - - async fn get_oracle( - &mut self, - cell: CellName, - path_type: StarlarkFileType, - ) -> anyhow::Result { - match self.oracle.get(&(cell, path_type)) { - Some(g) => Ok(g.dupe()), - None => { - let globals = Environment::new(cell, path_type, self.dice).await?.globals; - self.oracle.insert((cell, path_type), globals.dupe()); - Ok(globals) - } - } - } - - async fn get(&mut self, path: OwnedStarlarkModulePath) -> anyhow::Result { - match self.cache.get(&path) { - Some(x) => Ok(x.dupe()), - None => { - let res = self.run(path.clone().into_starlark_path()).await?; - self.cache.insert(path, res.dupe()); - Ok(res) - } - } - } - - #[async_recursion] - async fn run(&mut self, path: OwnedStarlarkPath) -> anyhow::Result { - let path_ref = path.borrow(); - writeln!(self.stderr, "Type checking: {path_ref}")?; - let proj_path = self - .cell_resolver - .resolve_path(path_ref.path().as_ref().as_ref())?; - let path_str = proj_path.to_string(); - let src = self - .io - .read_file_if_exists(proj_path) - .await? - .with_context(|| format!("File not found: `{path_str}`"))?; - - let interp = self - .dice - .get_interpreter_calculator(path_ref.cell(), path_ref.build_file_cell()) - .await?; - - let ast = interp.prepare_eval_with_content(path_ref, src)?; - let mut loads = HashMap::new(); - for x in ast.loads() { - let y = interp.resolve_load(path_ref, x.module_id).await?; - let interface = self.get(y).await?; - loads.insert(x.module_id.to_owned(), interface); - } - let globals = self - .get_oracle(path_ref.cell(), path_ref.file_type()) - .await?; - let (errors, bindings, interface, approxiomations) = ast.typecheck(&globals, &loads); - - if !approxiomations.is_empty() { - writeln!(self.stderr, "\n\nAPPROXIMATIONS:")?; - for x in approxiomations { - writeln!(self.stderr, "{x}")?; - } - } - - writeln!(self.stderr, "\n\nBINDINGS:\n{bindings}")?; - - let errors_count = errors.len(); - if errors_count == 0 { - Ok(interface) - } else { - writeln!(self.stdout, "\n\nERRORS:")?; - for x in errors { - writeln!(self.stdout, "{x}")?; - } - Err(anyhow::anyhow!("Detected {errors_count} errors")) - } - } -} - -#[async_trait] -impl StarlarkOpaqueSubcommand for StarlarkTypecheckCommand { - async fn server_execute( - &self, - server_ctx: &dyn ServerCommandContextTrait, - mut stdout: PartialResultDispatcher, - _client_ctx: ClientContext, - ) -> anyhow::Result<()> { - server_ctx - .with_dice_ctx(async move |server_ctx, dice| { - let cell_resolver = dice.get_cell_resolver().await?; - let fs = dice.file_ops(); - let io = dice.global_data().get_io_provider(); - - let files = - starlark_files(&self.paths, server_ctx, &cell_resolver, &fs, &*io).await?; - let mut stdout = stdout.as_writer(); - let mut stderr = server_ctx.stderr()?; - let mut cache = Cache { - dice: &dice, - io: &*io, - cell_resolver: &cell_resolver, - stdout: &mut stdout, - stderr: &mut stderr, - oracle: HashMap::new(), - cache: HashMap::new(), - }; - for file in files { - cache.typecheck(file).await?; - } - let file_count = cache.cache.len(); - writeln!(stderr, "Found no type errors in {file_count} files")?; - Ok(()) - }) - .await - } - - fn common_opts(&self) -> &StarlarkCommandCommonOptions { - &self.common_opts - } -} diff --git a/app/buck2_starlark/src/util/environment.rs b/app/buck2_starlark/src/util/environment.rs deleted file mode 100644 index ac009cd6936b6..0000000000000 --- a/app/buck2_starlark/src/util/environment.rs +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::HashSet; - -use buck2_core::bzl::ImportPath; -use buck2_core::cells::build_file_cell::BuildFileCell; -use buck2_core::cells::name::CellName; -use buck2_interpreter::file_type::StarlarkFileType; -use buck2_interpreter::import_paths::HasImportPaths; -use buck2_interpreter::load_module::InterpreterCalculation; -use buck2_interpreter::load_module::INTERPRETER_CALCULATION_IMPL; -use buck2_interpreter::prelude_path::PreludePath; -use dice::DiceTransaction; -use starlark::environment::Globals; - -/// The environment in which a Starlark file is evaluated. -pub(crate) struct Environment { - /// The globals that are driven from Rust. - pub(crate) globals: Globals, - /// The path to the prelude, if the prelude is loaded in this file. - /// Note that in a BUCK file the `native` value is also exploded into the top-level. - prelude: Option, - /// A path that is implicitly loaded as additional globals. - preload: Option, -} - -impl Environment { - pub(crate) async fn new( - cell: CellName, - path_type: StarlarkFileType, - dice: &DiceTransaction, - ) -> anyhow::Result { - // Find the information from the globals - let globals = INTERPRETER_CALCULATION_IMPL - .get()? - .global_env_for_file_type(dice, path_type) - .await?; - - // Next grab the prelude, unless we are in the prelude cell and not a build file - let prelude = match INTERPRETER_CALCULATION_IMPL - .get()? - .prelude_import(dice) - .await? - { - Some(prelude) - if path_type == StarlarkFileType::Buck || prelude.import_path().cell() != cell => - { - Some(prelude) - } - _ => None, - }; - - // Now grab the pre-load things - let preload = dice - .import_paths_for_cell(BuildFileCell::new(cell)) - .await? - .root_import() - .cloned(); - - Ok(Environment { - globals, - prelude, - preload, - }) - } - - pub(crate) async fn get_names( - &self, - path_type: StarlarkFileType, - dice: &DiceTransaction, - ) -> anyhow::Result> { - let mut names = HashSet::new(); - - for x in self.globals.names() { - names.insert(x.as_str().to_owned()); - } - - if let Some(prelude) = &self.prelude { - let m = dice - .get_loaded_module_from_import_path(prelude.import_path()) - .await?; - for x in m.env().names() { - names.insert(x.as_str().to_owned()); - } - if path_type == StarlarkFileType::Buck { - for (name, _value) in m.extra_globals_from_prelude_for_buck_files()? { - names.insert(name.to_owned()); - } - } - } - - if let Some(preload) = &self.preload { - let m = dice.get_loaded_module_from_import_path(preload).await?; - for x in m.env().names() { - names.insert(x.as_str().to_owned()); - } - } - - Ok(names) - } -} diff --git a/app/buck2_starlark/src/util/paths.rs b/app/buck2_starlark/src/util/paths.rs deleted file mode 100644 index ffba5fa92ed3d..0000000000000 --- a/app/buck2_starlark/src/util/paths.rs +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::ops::Deref; - -use async_recursion::async_recursion; -use buck2_client_ctx::path_arg::PathArg; -use buck2_common::file_ops::FileOps; -use buck2_common::file_ops::FileType; -use buck2_common::file_ops::RawPathMetadata; -use buck2_common::io::IoProvider; -use buck2_core::build_file_path::BuildFilePath; -use buck2_core::bzl::ImportPath; -use buck2_core::cells::CellResolver; -use buck2_core::fs::paths::file_name::FileName; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; -use buck2_core::package::PackageLabel; -use buck2_interpreter::paths::bxl::BxlFilePath; -use buck2_interpreter::paths::package::PackageFilePath; -use buck2_interpreter::paths::path::OwnedStarlarkPath; -use buck2_server_ctx::ctx::ServerCommandContextTrait; -use dupe::Dupe; -use thiserror::Error; - -#[derive(Debug, Error)] -enum StarlarkFilesError { - #[error("File not found, `{0}`")] - FileNotFound(ProjectRelativePathBuf), - #[error("Symlinks and other esoteric files are not supported, `{0}`")] - UnsupportedFileType(ProjectRelativePathBuf), -} - -#[async_recursion] -async fn starlark_file( - proj_path: ProjectRelativePathBuf, - // None = this file was given explicitly - // Some = it was a directory traversal (and we know its type) - recursive: Option, - cell_resolver: &CellResolver, - fs: &dyn FileOps, - io: &dyn IoProvider, - files: &mut Vec, -) -> anyhow::Result<()> { - let cell_path = cell_resolver.get_cell_path(&proj_path)?; - if recursive.is_some() && fs.is_ignored(cell_path.as_ref()).await? { - // File is ignored by Buck, give up on it - return Ok(()); - } - - let typ = match &recursive { - Some(typ) => typ.dupe(), - None => match io.read_path_metadata_if_exists(proj_path.clone()).await? { - None => { - return Err(StarlarkFilesError::FileNotFound(proj_path).into()); - } - Some(RawPathMetadata::Directory) => FileType::Directory, - Some(RawPathMetadata::File(_)) => { - // It's a shame we throw away the digest we calculated, but not a huge deal (its cheap compared to parsing) - FileType::File - } - Some(RawPathMetadata::Symlink { .. }) => FileType::Symlink, - }, - }; - - match typ { - FileType::Directory => { - for x in io.read_dir(proj_path.clone()).await? { - let Ok(file_name) = FileName::new(&x.file_name) else { - // Skip files which buck does not like: - // this function works with `CellPath` values, - // which cannot be constructed from paths not acceptable by buck. - continue; - }; - let mut child_path = proj_path.clone(); - child_path.push(file_name); - starlark_file(child_path, Some(x.file_type), cell_resolver, fs, io, files).await?; - } - } - FileType::File => { - // It's a shame we throw away the digest we calculated, but not a huge deal (its cheap compared to parsing) - let is_buildfile = match proj_path.file_name() { - None => false, - Some(file_name) => cell_resolver - .get(cell_path.cell())? - .buildfiles() - .iter() - .any(|x| (*x).deref() == file_name), - }; - - if is_buildfile { - files.push(OwnedStarlarkPath::BuildFile(BuildFilePath::new( - PackageLabel::from_cell_path(cell_path.parent().unwrap()), - proj_path.file_name().unwrap().to_owned(), - ))); - } else if proj_path.as_str().ends_with(".bxl") { - files.push(OwnedStarlarkPath::BxlFile(BxlFilePath::new(cell_path)?)); - } else if proj_path.ends_with(PackageFilePath::PACKAGE_FILE_NAME) { - // `parent` must return `Some` if we have a non-empty file, which we do because of above - files.push(OwnedStarlarkPath::PackageFile(PackageFilePath::for_dir( - cell_path.parent().unwrap(), - ))) - } else if recursive.is_none() || proj_path.as_str().ends_with(".bzl") { - // If a file was asked for explicitly, and is nothing else, treat it as .bzl file - // If it's not explicit, just ignore it (probably a source file) - files.push(OwnedStarlarkPath::LoadFile(ImportPath::new_same_cell( - cell_path, - )?)); - } - } - FileType::Symlink | FileType::Unknown => { - if recursive.is_none() { - return Err(StarlarkFilesError::UnsupportedFileType(proj_path).into()); - } - } - } - Ok(()) -} - -/// Find the paths to apply Starlark to (e.g. linter, typecheck) -pub(crate) async fn starlark_files( - paths: &[PathArg], - context: &dyn ServerCommandContextTrait, - cell_resolver: &CellResolver, - fs: &dyn FileOps, - io: &dyn IoProvider, -) -> anyhow::Result> { - let mut files = Vec::new(); - - for path in paths { - let path = path.resolve(context.working_dir_abs()); - let cell_path = cell_resolver.get_cell_path_from_abs_path(&path, context.project_root())?; - let proj_path = cell_resolver.resolve_path(cell_path.as_ref())?; - starlark_file(proj_path, None, cell_resolver, fs, io, &mut files).await?; - } - Ok(files) -} diff --git a/app/buck2_subscription_proto/BUCK b/app/buck2_subscription_proto/BUCK index 53ce9e7dfa6a7..265a21f715530 100644 --- a/app/buck2_subscription_proto/BUCK +++ b/app/buck2_subscription_proto/BUCK @@ -1,6 +1,6 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") load("@fbcode//grpc_fb/codegen:buck_macros.bzl", "grpc_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") oncall("build_infra") @@ -8,7 +8,6 @@ rust_protobuf_library( name = "buck2_subscription_proto", srcs = glob(["src/**/*.rs"]), build_script = "build.rs", - doctests = False, # FIXME protos = ["subscription.proto"], deps = [ "fbsource//third-party/rust:derive_more", @@ -27,3 +26,7 @@ grpc_library( "py", ], ) + +export_file( + name = "subscription.proto", +) diff --git a/app/buck2_subscription_proto/Cargo.toml b/app/buck2_subscription_proto/Cargo.toml index 5820126bc267a..c33930296c4c4 100644 --- a/app/buck2_subscription_proto/Cargo.toml +++ b/app/buck2_subscription_proto/Cargo.toml @@ -1,15 +1,16 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_subscription_proto" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] +allocative = { workspace = true } +derive_more = { workspace = true } prost = { workspace = true } serde = { workspace = true } tonic = { workspace = true } -allocative = { workspace = true } -derive_more = { workspace = true } [build-dependencies] buck2_protoc_dev = { workspace = true } -tonic-build = { workspace = true } diff --git a/app/buck2_subscription_proto/src/lib.rs b/app/buck2_subscription_proto/src/lib.rs index e9ef1c63b9b12..55f35ef9e6315 100644 --- a/app/buck2_subscription_proto/src/lib.rs +++ b/app/buck2_subscription_proto/src/lib.rs @@ -7,4 +7,6 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + tonic::include_proto!("buck.subscription"); diff --git a/app/buck2_test/BUCK b/app/buck2_test/BUCK index b0eebc72984f5..2106e20afbcc6 100644 --- a/app/buck2_test/BUCK +++ b/app/buck2_test/BUCK @@ -1,11 +1,24 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") rust_library( name = "buck2_test", srcs = glob(["src/**/*.rs"]), + os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:libc", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:libc", + ], + ), + ], test_deps = [ "fbsource//third-party/rust:maplit", ], @@ -17,31 +30,36 @@ rust_library( "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:indexmap", - "fbsource//third-party/rust:libc", + "fbsource//third-party/rust:itertools", + "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tracing", "fbsource//third-party/rust:uuid", + "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_build_api:buck2_build_api", "//buck2/app/buck2_cli_proto:buck2_cli_proto", "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_data:buck2_data", "//buck2/app/buck2_downward_api:buck2_downward_api", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_error_derive:buck2_error_derive", "//buck2/app/buck2_events:buck2_events", "//buck2/app/buck2_execute:buck2_execute", "//buck2/app/buck2_execute_impl:buck2_execute_impl", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_grpc:buck2_grpc", "//buck2/app/buck2_node:buck2_node", "//buck2/app/buck2_server_ctx:buck2_server_ctx", "//buck2/app/buck2_test_api:buck2_test_api", "//buck2/app/buck2_util:buck2_util", "//buck2/dice/dice:dice", + "//buck2/gazebo/display_container:display_container", "//buck2/gazebo/dupe:dupe", - "//buck2/gazebo/gazebo:gazebo", "//buck2/host_sharing:host_sharing", - "//buck2/shed/more_futures:more_futures", + "//buck2/remote_execution:remote_execution", "//buck2/starlark-rust/starlark:starlark", "//common/rust/shed/sorted_vector_map:sorted_vector_map", ], diff --git a/app/buck2_test/Cargo.toml b/app/buck2_test/Cargo.toml index a70f2c533da1c..a5b78e7bb333d 100644 --- a/app/buck2_test/Cargo.toml +++ b/app/buck2_test/Cargo.toml @@ -1,46 +1,56 @@ [package] +description = "Buck V2 test runner" +edition = "2021" +license = { workspace = true } name = "buck2_test" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Buck V2 test runner" [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -dashmap = { workspace = true } -futures = { workspace = true } chrono = { workspace = true } +dashmap = { workspace = true } derive_more = { workspace = true } +display_container = { workspace = true } +futures = { workspace = true } indexmap = { workspace = true } -libc = { workspace = true } +itertools = { workspace = true } +once_cell = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } uuid = { workspace = true } +allocative = { workspace = true } dice = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } host_sharing = { workspace = true } -more_futures = { workspace = true } +remote_execution = { workspace = true } sorted_vector_map = { workspace = true } starlark = { workspace = true } buck2_build_api = { workspace = true } +buck2_cli_proto = { workspace = true } buck2_common = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } -buck2_execute_impl = { workspace = true } -buck2_node = { workspace = true } -buck2_server_ctx = { workspace = true } -buck2_cli_proto = { workspace = true } buck2_downward_api = { workspace = true } +buck2_error = { workspace = true } +buck2_error_derive = { workspace = true } buck2_events = { workspace = true } buck2_execute = { workspace = true } -buck2_test_api = { workspace = true } +buck2_execute_impl = { workspace = true } +buck2_futures = { workspace = true } buck2_grpc = { workspace = true } +buck2_node = { workspace = true } +buck2_server_ctx = { workspace = true } +buck2_test_api = { workspace = true } buck2_util = { workspace = true } +[target.'cfg(unix)'.dependencies] +libc = { workspace = true } + [dev-dependencies] maplit = { workspace = true } diff --git a/app/buck2_test/src/command.rs b/app/buck2_test/src/command.rs index a69e582e0799a..4ca78746efb42 100644 --- a/app/buck2_test/src/command.rs +++ b/app/buck2_test/src/command.rs @@ -7,67 +7,79 @@ * of this source tree. */ -use std::collections::HashMap; use std::collections::HashSet; +use std::ops::ControlFlow; use std::path::PathBuf; use std::sync::Arc; +use std::time::Duration; use anyhow::Context; use async_trait::async_trait; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; -use buck2_build_api::artifact_groups::calculation::ArtifactGroupCalculation; -use buck2_build_api::artifact_groups::ArtifactGroup; -use buck2_build_api::interpreter::rule_defs::cmd_args::SimpleCommandLineArtifactVisitor; -use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollection; +use buck2_build_api::build::build_configured_label; +use buck2_build_api::build::build_report::build_report_opts; +use buck2_build_api::build::build_report::generate_build_report; +use buck2_build_api::build::BuildConfiguredLabelOptions; +use buck2_build_api::build::BuildEvent; +use buck2_build_api::build::BuildTargetResult; +use buck2_build_api::build::ConfiguredBuildEventVariant; +use buck2_build_api::build::ProvidersToBuild; +use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; use buck2_build_api::interpreter::rule_defs::provider::test_provider::TestProvider; +use buck2_build_api::materialize::MaterializationContext; use buck2_cli_proto::HasClientContext; use buck2_cli_proto::TestRequest; use buck2_cli_proto::TestResponse; use buck2_common::dice::cells::HasCellResolver; -use buck2_common::dice::file_ops::HasFileOps; use buck2_common::events::HasEvents; +use buck2_common::global_cfg_options::GlobalCfgOptions; use buck2_common::legacy_configs::dice::HasLegacyConfigs; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; use buck2_common::liveliness_observer::LivelinessGuard; use buck2_common::liveliness_observer::LivelinessObserver; -use buck2_common::pattern::resolve::resolve_target_patterns; +use buck2_common::liveliness_observer::LivelinessObserverExt; +use buck2_common::liveliness_observer::TimeoutLivelinessObserver; +use buck2_common::pattern::parse_from_cli::parse_patterns_from_cli_args; +use buck2_common::pattern::resolve::ResolveTargetPatterns; use buck2_common::pattern::resolve::ResolvedPattern; use buck2_core::cells::name::CellName; use buck2_core::cells::CellResolver; use buck2_core::configuration::compatibility::MaybeCompatible; -use buck2_core::env_helper::EnvHelper; use buck2_core::fs::fs_util; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_core::fs::paths::abs_path::AbsPathBuf; use buck2_core::package::PackageLabel; +use buck2_core::pattern::pattern::PackageSpec; use buck2_core::pattern::pattern_type::ConfiguredProvidersPatternExtra; use buck2_core::pattern::pattern_type::ProvidersPatternExtra; -use buck2_core::pattern::PackageSpec; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_core::provider::label::ProvidersLabel; use buck2_core::tag_result; -use buck2_core::target::label::TargetLabel; -use buck2_core::target::name::TargetName; +use buck2_core::target::label::label::TargetLabel; +use buck2_error::BuckErrorContext; use buck2_events::dispatch::console_message; use buck2_events::dispatch::with_dispatcher_async; +use buck2_events::errors::create_error_report; +use buck2_futures::cancellation::CancellationContext; use buck2_node::load_patterns::MissingTargetBehavior; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; -use buck2_node::nodes::eval_result::EvaluationResult; use buck2_node::nodes::frontend::TargetGraphCalculation; use buck2_node::target_calculation::ConfiguredTargetCalculation; +use buck2_server_ctx::commands::send_target_cfg_event; use buck2_server_ctx::ctx::ServerCommandContextTrait; +use buck2_server_ctx::global_cfg_options::global_cfg_options_from_client_context; use buck2_server_ctx::partial_result_dispatcher::NoPartialResult; use buck2_server_ctx::partial_result_dispatcher::PartialResultDispatcher; -use buck2_server_ctx::pattern::parse_patterns_from_cli_args; -use buck2_server_ctx::pattern::target_platform_from_client_context; use buck2_server_ctx::template::run_server_command; use buck2_server_ctx::template::ServerCommandTemplate; use buck2_server_ctx::test_command::TEST_COMMAND; use buck2_test_api::data::TestResult; use buck2_test_api::data::TestStatus; use buck2_test_api::protocol::TestExecutor; -use dice::DiceComputations; use dice::DiceTransaction; +use dice::LinearRecomputeDiceComputations; use dupe::Dupe; +use dupe::IterDupedExt; use futures::channel::mpsc; use futures::future; use futures::future::BoxFuture; @@ -75,43 +87,31 @@ use futures::future::FutureExt; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use futures::stream::TryStreamExt; -use gazebo::prelude::*; -use indexmap::indexset; use indexmap::IndexSet; -use more_futures::cancellation::CancellationContext; -use serde::Serialize; +use itertools::Itertools; use crate::downward_api::BuckTestDownwardApi; use crate::executor_launcher::ExecutorLaunch; use crate::executor_launcher::ExecutorLauncher; use crate::executor_launcher::OutOfProcessTestExecutor; -use crate::local_resource_registry::LocalResourceRegistry; +use crate::executor_launcher::TestExecutorClientWrapper; +use crate::local_resource_registry::HasLocalResourceRegistry; use crate::orchestrator::BuckTestOrchestrator; use crate::orchestrator::ExecutorMessage; use crate::session::TestSession; use crate::session::TestSessionOptions; use crate::translations::build_configured_target_handle; -#[derive(Debug, Serialize)] -pub(crate) struct TestReport { - project_root: AbsNormPathBuf, - outputs: HashMap>, -} - struct TestOutcome { - error_messages: Vec, + errors: Vec, executor_report: ExecutorReport, executor_stdout: String, executor_stderr: String, + build_target_result: BuildTargetResult, } impl TestOutcome { - pub(crate) fn exit_code(&self) -> anyhow::Result> { - if !self.error_messages.is_empty() { - // Some tests failed to build. Send `None` back to - // the client to delegate the exit code generation. - return Ok(None); - } + fn exit_code(&self) -> anyhow::Result> { self.executor_report .exit_code .context("Test executor did not provide an exit code") @@ -201,6 +201,11 @@ impl TestStatuses { } } +#[derive(Debug, buck2_error_derive::Error)] +#[buck2(tag = TestDeadlineExpired)] +#[error("This test run exceeded the deadline that was provided")] +struct DeadlineExpired; + async fn test_command( ctx: &dyn ServerCommandContextTrait, partial_result_dispatcher: PartialResultDispatcher, @@ -227,15 +232,27 @@ impl ServerCommandTemplate for TestServerCommand { type PartialResult = NoPartialResult; fn is_success(&self, response: &Self::Response) -> bool { - matches!(response.exit_code, Some(0)) + matches!(response.exit_code, Some(0)) && response.errors.is_empty() } - fn end_event(&self, _response: &anyhow::Result) -> Self::EndEvent { + fn end_event(&self, _response: &buck2_error::Result) -> Self::EndEvent { buck2_data::TestCommandEnd { - unresolved_target_patterns: self.req.target_patterns.clone(), + unresolved_target_patterns: self + .req + .target_patterns + .iter() + .map(|p| buck2_data::TargetPattern { value: p.clone() }) + .collect(), } } + fn additional_telemetry_errors( + &self, + response: &Self::Response, + ) -> Vec { + response.errors.clone() + } + async fn command( &self, server_ctx: &dyn ServerCommandContextTrait, @@ -258,13 +275,26 @@ async fn test( let working_dir_cell = cell_resolver.find(cwd)?; let client_ctx = request.client_context()?; - let global_target_platform = - target_platform_from_client_context(client_ctx, server_ctx, &mut ctx).await?; + let global_cfg_options = global_cfg_options_from_client_context( + request + .target_cfg + .as_ref() + .internal_error_anyhow("target_cfg must be set")?, + server_ctx, + &mut ctx, + ) + .await?; // Get the test runner from the config. Note that we use a different key from v1 since the API // is completely different, so there is not expectation that the same binary works for both. let test_executor_config = ctx - .get_legacy_config_property(cell_resolver.root_cell(), "test", "v2_test_executor") + .get_legacy_config_property( + cell_resolver.root_cell(), + BuckconfigKeyRef { + section: "test", + property: "v2_test_executor", + }, + ) .await? .filter(|s| !s.is_empty()); @@ -272,7 +302,8 @@ async fn test( Some(config) => { let test_executor = post_process_test_executor(config.as_ref()) .with_context(|| format!("Invalid `test.v2_test_executor`: {}", config))?; - let test_executor_args = Vec::new(); + let test_executor_args = + vec!["--buck-trace-id".to_owned(), client_ctx.trace_id.clone()]; (test_executor, test_executor_args) } None => { @@ -287,8 +318,7 @@ async fn test( parse_patterns_from_cli_args(&mut ctx, &request.target_patterns, cwd).await?; server_ctx.log_target_pattern(&parsed_patterns); - let resolved_pattern = - resolve_target_patterns(&cell_resolver, &parsed_patterns, &ctx.file_ops()).await?; + let resolved_pattern = ResolveTargetPatterns::resolve(&mut ctx, &parsed_patterns).await?; let launcher: Box = Box::new(OutOfProcessTestExecutor { executable: test_executor, @@ -311,10 +341,18 @@ async fn test( .build_opts .as_ref() .expect("should have build options"); + + let timeout = request + .timeout + .as_ref() + .map(|t| t.clone().try_into()) + .transpose() + .context("Invalid `duration`")?; + let test_outcome = test_targets( - ctx, + ctx.dupe(), resolved_pattern, - global_target_platform, + global_cfg_options, request.test_executor_args.clone(), Arc::new(TestLabelFiltering::new( request.included_labels.clone(), @@ -324,13 +362,21 @@ async fn test( )), &*launcher, session, - cell_resolver, + cell_resolver.dupe(), working_dir_cell, build_opts.skip_incompatible_targets, MissingTargetBehavior::from_skip(build_opts.skip_missing_targets), + timeout, + request.ignore_tests_attribute, ) .await?; + send_target_cfg_event( + server_ctx.events(), + test_outcome.build_target_result.configured.keys(), + &request.target_cfg, + ); + // TODO(bobyf) remap exit code for buck reserved exit code let exit_code = test_outcome.exit_code().context("No exit code available")?; @@ -379,20 +425,39 @@ async fn test( ), }; + let serialized_build_report = if build_opts.unstable_print_build_report { + let artifact_fs = ctx.get_artifact_fs().await?; + let build_report_opts = build_report_opts(&mut ctx, &cell_resolver, build_opts).await?; + + generate_build_report( + build_report_opts, + &artifact_fs, + &cell_resolver, + server_ctx.project_root(), + cwd, + server_ctx.events().trace_id(), + &test_outcome.build_target_result.configured, + &test_outcome.build_target_result.other_errors, + )? + } else { + None + }; + Ok(TestResponse { exit_code, - error_messages: test_outcome.error_messages, + errors: test_outcome.errors, test_statuses: Some(test_statuses), executor_stdout: test_outcome.executor_stdout, executor_stderr: test_outcome.executor_stderr, executor_info_messages: test_outcome.executor_report.info_messages, + serialized_build_report, }) } async fn test_targets( ctx: DiceTransaction, pattern: ResolvedPattern, - global_target_platform: Option, + global_cfg_options: GlobalCfgOptions, external_runner_args: Vec, label_filtering: Arc, launcher: &dyn ExecutorLauncher, @@ -401,9 +466,18 @@ async fn test_targets( working_dir_cell: CellName, skip_incompatible_targets: bool, missing_target_behavior: MissingTargetBehavior, + timeout: Option, + ignore_tests_attribute: bool, ) -> anyhow::Result { let session = Arc::new(session); - let (liveliness_observer, _guard) = LivelinessGuard::create(); + + let (mut liveliness_observer, _guard) = LivelinessGuard::create(); + let timeout_observer = timeout.map(|timeout| { + Arc::new(TimeoutLivelinessObserver::new(timeout)) as Arc + }); + if let Some(timeout_observer) = &timeout_observer { + liveliness_observer = Arc::new(liveliness_observer.and(timeout_observer.dupe())) as _; + } let tpx_args = { let mut args = vec![ @@ -423,7 +497,7 @@ async fn test_targets( let res = tag_result!( "executor_launch_failed", - res, + res.map_err(|e| e.into()), quiet: true, daemon_in_memory_state_is_corrupted: true, task: false @@ -436,18 +510,21 @@ async fn test_targets( } = res; let test_executor = Arc::new(test_executor) as Arc; + let test_executor_wrapper = TestExecutorClientWrapper::new(test_executor.dupe()); let (test_status_sender, test_status_receiver) = mpsc::unbounded(); let test_server = tokio::spawn({ let test_status_sender = test_status_sender.clone(); + let liveliness_observer = liveliness_observer.dupe(); with_dispatcher_async( ctx.per_transaction_data().get_dispatcher().dupe(), // NOTE: This is will cancel if the liveliness guard indicates we should. async move { // Spawn our server to listen to the test runner's requests for execution. - let local_resource_registry = Arc::new(LocalResourceRegistry::new()); + // Keep wrapper alive for the lifetime of the executor to ensure it stays registered. + let _test_executor_wrapper = test_executor_wrapper; let orchestrator = BuckTestOrchestrator::new( ctx.dupe(), @@ -455,7 +532,6 @@ async fn test_targets( liveliness_observer.dupe(), test_status_sender, CancellationContext::never_cancelled(), // sending the orchestrator directly to be spawned by make_server, which never calls it. - local_resource_registry.dupe(), ) .await .context("Failed to create a BuckTestOrchestrator")?; @@ -465,12 +541,13 @@ async fn test_targets( let mut driver = TestDriver::new(TestDriverState { ctx: &ctx, label_filtering: &label_filtering, - global_target_platform: &global_target_platform, + global_cfg_options: &global_cfg_options, session: &session, test_executor: &test_executor, cell_resolver: &cell_resolver, working_dir_cell, missing_target_behavior, + ignore_tests_attribute, }); driver.push_pattern( @@ -499,7 +576,6 @@ async fn test_targets( .context("Failed to notify test executor of end-of-tests")?; // Wait for the tests to finish running. - let test_statuses = test_status_receiver .try_fold(ExecutorReport::default(), |mut acc, result| { acc.ingest(&result); @@ -510,20 +586,28 @@ async fn test_targets( // Shutdown our server. This is technically not *required* since dropping it would shut it // down implicitly, but let's do it anyway so we can collect any errors. - server_handle .shutdown() .await .context("Failed to shutdown orchestrator")?; + let local_resource_registry = ctx.get_local_resource_registry(); + local_resource_registry .release_all_resources() .await .context("Failed to release local resources")?; - // And finally return our results; + // Process the build errors we've collected. + let error_stream = futures::stream::iter(driver.error_events); + let error_target_result = BuildTargetResult::collect_stream(error_stream, false) + .await + .context("Failed to collect error events")?; - anyhow::Ok((driver.build_errors, test_statuses)) + driver.build_target_result.extend(error_target_result); + + // And finally return our results; + anyhow::Ok((driver.build_target_result, test_statuses)) }, ) }); @@ -547,15 +631,28 @@ async fn test_targets( ))); // TODO(bobyf, torozco) we can use cancellation handle here instead of liveliness observer - let (build_errors, executor_report) = test_server + let (build_target_result, executor_report) = test_server .await .context("Failed to collect executor report")??; + let mut errors = convert_error(&build_target_result) + .iter() + .map(create_error_report) + .unique_by(|e| e.message.clone()) + .collect::>(); + + if let Some(timeout_observer) = timeout_observer { + if !timeout_observer.is_alive().await { + errors.push(create_error_report(&DeadlineExpired.into())); + } + } + Ok(TestOutcome { - error_messages: build_errors, + errors, executor_stdout: executor_output.stdout, executor_stderr: executor_output.stderr, executor_report, + build_target_result, }) } @@ -569,40 +666,48 @@ enum TestDriverTask { label: ProvidersLabel, skippable: bool, }, + BuildTarget { + label: ConfiguredProvidersLabel, + }, TestTarget { label: ConfiguredProvidersLabel, + providers: FrozenProviderCollectionValue, + build_target_result: BuildTargetResult, }, } #[derive(Copy, Clone, Dupe)] -pub(crate) struct TestDriverState<'a, 'e> { - ctx: &'a DiceComputations, +struct TestDriverState<'a, 'e> { + ctx: &'a DiceTransaction, label_filtering: &'a Arc, - global_target_platform: &'a Option, + global_cfg_options: &'a GlobalCfgOptions, session: &'a TestSession, test_executor: &'a Arc, cell_resolver: &'a CellResolver, working_dir_cell: CellName, missing_target_behavior: MissingTargetBehavior, + ignore_tests_attribute: bool, } /// Maintains the state of an ongoing test execution. struct TestDriver<'a, 'e> { state: TestDriverState<'a, 'e>, - work: FuturesUnordered>>>, + work: FuturesUnordered, Vec>>>, labels_configured: HashSet<(ProvidersLabel, bool)>, labels_tested: HashSet, - build_errors: Vec, + error_events: Vec, + build_target_result: BuildTargetResult, } impl<'a, 'e> TestDriver<'a, 'e> { - pub(crate) fn new(state: TestDriverState<'a, 'e>) -> Self { + fn new(state: TestDriverState<'a, 'e>) -> Self { Self { state, work: FuturesUnordered::new(), labels_configured: HashSet::new(), labels_tested: HashSet::new(), - build_errors: Vec::new(), + error_events: Vec::new(), + build_target_result: BuildTargetResult::new(), } } @@ -613,11 +718,13 @@ impl<'a, 'e> TestDriver<'a, 'e> { skip_incompatible_targets: bool, ) { for (package, spec) in pattern.specs.into_iter() { - let fut = future::ready(anyhow::Ok(vec![TestDriverTask::InterpretTarget { - package, - spec, - skip_incompatible_targets, - }])) + let fut = future::ready(ControlFlow::Continue(vec![ + TestDriverTask::InterpretTarget { + package, + spec, + skip_incompatible_targets, + }, + ])) .boxed(); self.work.push(fut); @@ -628,7 +735,7 @@ impl<'a, 'e> TestDriver<'a, 'e> { async fn drive_to_completion(&mut self) { while let Some(tasks) = self.work.next().await { match tasks { - Ok(tasks) => { + ControlFlow::Continue(tasks) => { for task in tasks { match task { TestDriverTask::InterpretTarget { @@ -641,16 +748,20 @@ impl<'a, 'e> TestDriver<'a, 'e> { TestDriverTask::ConfigureTarget { label, skippable } => { self.configure_target(label, skippable); } - TestDriverTask::TestTarget { label } => { - self.test_target(label); + TestDriverTask::BuildTarget { label } => { + self.build_target(label); + } + TestDriverTask::TestTarget { + label, + providers, + build_target_result, + } => { + self.test_target(label, providers, build_target_result); } } } } - Err(e) => { - // TODO(brasselsprouts): filter out duplicate errors. - self.build_errors.push(format!("{:#}", e)); - } + ControlFlow::Break(events) => self.error_events.extend(events), } } } @@ -665,15 +776,70 @@ impl<'a, 'e> TestDriver<'a, 'e> { self.work.push( async move { - let res = state.ctx.get_interpreter_results(package.dupe()).await?; - let SpecTargets { labels, skippable } = spec_to_targets( - spec, - res, - skip_incompatible_targets, - state.missing_target_behavior, - )?; + let res = match state + .ctx + .clone() + .get_interpreter_results(package.dupe()) + .await + { + Ok(res) => res, + Err(e) => { + let e: buck2_error::Error = e.into(); + let mut events = Vec::new(); + // Try to associate the error to concrete targets, if possible + match spec { + PackageSpec::Targets(targets) => { + for (target, providers) in targets { + let label = Some(ProvidersLabel::new( + TargetLabel::new(package.dupe(), target.as_ref()), + providers.providers, + )); + + events.push(BuildEvent::OtherError { + label, + err: e.dupe(), + }); + } + } + PackageSpec::All => events.push(BuildEvent::OtherError { + label: None, + err: e, + }), + }; + + return ControlFlow::Break(events); + } + }; + + // Indicates whether this should be skipped if incompatible. + let skippable = match spec { + PackageSpec::Targets(..) => skip_incompatible_targets, + PackageSpec::All => true, + }; + + let (targets, missing) = res.apply_spec(spec); + + if let Some(missing) = missing { + match state.missing_target_behavior { + MissingTargetBehavior::Fail => { + let err = missing.into_errors().0; + let events = vec![BuildEvent::OtherError { + label: Some(ProvidersLabel::new( + TargetLabel::new(err.package.dupe(), err.target.as_ref()), + buck2_core::provider::label::ProvidersName::Default, + )), + err: err.into(), + }]; + + return ControlFlow::Break(events); + } + MissingTargetBehavior::Warn => { + console_message(missing.missing_targets_warning()); + } + } + } - let labels = labels.into_map(|(target_name, providers_pattern)| { + let labels = targets.into_keys().map(|(target_name, providers_pattern)| { providers_pattern.into_providers_label(package.dupe(), target_name.as_ref()) }); @@ -682,41 +848,69 @@ impl<'a, 'e> TestDriver<'a, 'e> { .map(|label| TestDriverTask::ConfigureTarget { label, skippable }) .collect(); - anyhow::Ok(work) + ControlFlow::Continue(work) } .boxed(), ); } fn configure_target(&mut self, label: ProvidersLabel, skippable: bool) { - if !self.labels_configured.insert((label.clone(), skippable)) { + if !self.labels_configured.insert((label.dupe(), skippable)) { return; } let state = self.state; let fut = async move { - let label = state + let label = match state .ctx - .get_configured_provider_label(&label, state.global_target_platform.as_ref()) - .await?; + .clone() + .get_configured_provider_label(&label, state.global_cfg_options) + .await + { + Ok(label) => label, + Err(e) => { + return ControlFlow::Break(vec![BuildEvent::OtherError { + label: Some(label), + err: e.into(), + }]); + } + }; - let node = state.ctx.get_configured_target_node(label.target()).await?; + let node = match state + .ctx + .clone() + .get_configured_target_node(label.target()) + .await + { + Ok(node) => node, + Err(e) => { + return ControlFlow::Break(vec![BuildEvent::new_configured( + label, + ConfiguredBuildEventVariant::Error { err: e.into() }, + )]); + } + }; let node = match node { MaybeCompatible::Incompatible(reason) => { if skippable { eprintln!("{}", reason.skipping_message(label.target())); - return Ok(vec![]); + return ControlFlow::Continue(vec![]); } else { - return Err(reason.to_err()); + return ControlFlow::Break(vec![BuildEvent::new_configured( + label, + ConfiguredBuildEventVariant::Error { + err: reason.to_err().into(), + }, + )]); } } MaybeCompatible::Compatible(node) => node, }; - // Test this: it's compatible. - let mut work = vec![TestDriverTask::TestTarget { label }]; + // Build and then test this: it's compatible. + let mut work = vec![TestDriverTask::BuildTarget { label }]; // If this node is a forward, it'll get flattened when we do analysis and run the // test later, but its `tests` attribute here will not be, and that means we'll @@ -725,40 +919,94 @@ impl<'a, 'e> TestDriver<'a, 'e> { let node = node.forward_target().unwrap_or(&node); // Look up `tests` in the the target we're testing, and if we find any tests, add them to the test backlog. - for test in node.tests() { - work.push(TestDriverTask::ConfigureTarget { - label: test.unconfigured(), - // Historically `skippable: false` is what we enforced here, perhaps that - // should change. - skippable: false, - }); + if !state.ignore_tests_attribute { + for test in node.tests() { + work.push(TestDriverTask::ConfigureTarget { + label: test.unconfigured(), + // Historically `skippable: false` is what we enforced here, perhaps that + // should change. + skippable: false, + }); + } } - anyhow::Ok(work) + ControlFlow::Continue(work) } .boxed(); self.work.push(fut); } - fn test_target(&mut self, label: ConfiguredProvidersLabel) { - if !self.labels_tested.insert(label.clone()) { + fn build_target(&mut self, label: ConfiguredProvidersLabel) { + if !self.labels_tested.insert(label.dupe()) { return; } let state = self.state; + let build_label = label.dupe(); let fut = async move { - test_target( - state.ctx, + let ctx = &mut state.ctx.clone(); + + let result = match ctx + .with_linear_recompute(|ctx| async move { + build_target_result(&ctx, &state.label_filtering, build_label).await + }) + .await + { + Ok(result) => result, + Err(e) => { + return ControlFlow::Break(vec![BuildEvent::new_configured( + label, + ConfiguredBuildEventVariant::Error { err: e.into() }, + )]); + } + }; + + ControlFlow::Continue(vec![TestDriverTask::TestTarget { label, + build_target_result: result.0, + providers: result.1, + }]) + } + .boxed(); + + self.work.push(fut); + } + + fn test_target( + &mut self, + label: ConfiguredProvidersLabel, + providers: FrozenProviderCollectionValue, + build_target_result: BuildTargetResult, + ) { + let should_test = !build_target_result.build_failed && !build_target_result.is_empty(); + self.build_target_result.extend(build_target_result); + + // Build has failed, no need to continue with test. + if !should_test { + return; + } + + let state = self.state; + let fut = async move { + if let Err(e) = test_target( + label.dupe(), + providers, state.test_executor.dupe(), state.session, state.label_filtering.dupe(), state.cell_resolver, state.working_dir_cell, ) - .await?; - anyhow::Ok(vec![]) + .await + { + return ControlFlow::Break(vec![BuildEvent::new_configured( + label, + ConfiguredBuildEventVariant::Error { err: e.into() }, + )]); + } + + ControlFlow::Continue(vec![]) } .boxed(); @@ -766,59 +1014,66 @@ impl<'a, 'e> TestDriver<'a, 'e> { } } -struct SpecTargets { - labels: Vec<(TargetName, ProvidersPatternExtra)>, - /// Indicates whether this should be skipped if incompatible. - skippable: bool, -} - -fn spec_to_targets( - spec: PackageSpec, - res: Arc, - skip_incompatible_targets: bool, - missing_target_behavior: MissingTargetBehavior, -) -> anyhow::Result { - let skippable = match spec { - PackageSpec::Targets(..) => skip_incompatible_targets, - PackageSpec::All => true, - }; - - let (targets, missing) = res.apply_spec(spec); +async fn build_target_result( + ctx: &LinearRecomputeDiceComputations<'_>, + label_filtering: &TestLabelFiltering, + label: ConfiguredProvidersLabel, +) -> anyhow::Result<(BuildTargetResult, FrozenProviderCollectionValue)> { + // NOTE: We fail if we hit an incompatible target here. This can happen if we reach an + // incompatible target via `tests = [...]`. This should perhaps change, but that's how it works + // in v1: https://fb.workplace.com/groups/buckeng/posts/8520953297953210 + let providers = ctx + .get() + .get_providers(&label) + .await? + .require_compatible()?; + let collections = providers.provider_collection(); - if let Some(missing) = missing { - match missing_target_behavior { - MissingTargetBehavior::Fail => { - return Err(missing.into_error()); - } - MissingTargetBehavior::Warn => { - console_message(missing.missing_targets_warning()); + let build_target_result = match ::from_collection(collections) { + Some(test_info) => { + if skip_build_based_on_labels(test_info, label_filtering) { + return Ok((BuildTargetResult::new(), providers)); } - } - } + let stream = build_configured_label( + &ctx, + &MaterializationContext::Skip, + label, + &ProvidersToBuild { + default: false, + default_other: false, + run: false, + tests: true, + }, + BuildConfiguredLabelOptions { + skippable: false, + want_configured_graph_size: false, + }, + ) + .await + .map(BuildEvent::Configured); - Ok(SpecTargets { - labels: targets.into_keys().collect(), - skippable, - }) + BuildTargetResult::collect_stream(stream, false).await? + } + None => { + // not a test + BuildTargetResult::new() + } + }; + Ok((build_target_result, providers)) } async fn test_target( - ctx: &DiceComputations, target: ConfiguredProvidersLabel, + providers: FrozenProviderCollectionValue, test_executor: Arc, session: &TestSession, label_filtering: Arc, cell_resolver: &CellResolver, working_dir_cell: CellName, ) -> anyhow::Result> { - // NOTE: We fail if we hit an incompatible target here. This can happen if we reach an - // incompatible target via `tests = [...]`. This should perhaps change, but that's how it works - // in v1: https://fb.workplace.com/groups/buckeng/posts/8520953297953210 - let frozen_providers = ctx.get_providers(&target).await?.require_compatible()?; - let providers = frozen_providers.provider_collection(); - build_artifacts(ctx, providers, &label_filtering).await?; + let collection = providers.provider_collection(); - let fut = match ::from_collection(providers) { + let fut = match ::from_collection(collection) { Some(test_info) => { if skip_run_based_on_labels(test_info, &label_filtering) { return Ok(None); @@ -846,6 +1101,18 @@ async fn test_target( fut.await } +fn convert_error(build_result: &BuildTargetResult) -> Vec { + let mut errors = Vec::new(); + errors.extend(build_result.other_errors.values().flatten().duped()); + + for v in build_result.configured.values().flatten() { + errors.extend(v.errors.iter().duped()); + errors.extend(v.outputs.iter().filter_map(|x| x.as_ref().err()).duped()); + } + + errors +} + fn skip_run_based_on_labels( provider: &dyn TestProvider, label_filtering: &TestLabelFiltering, @@ -861,41 +1128,6 @@ fn skip_build_based_on_labels( !label_filtering.build_filtered_targets && skip_run_based_on_labels(provider, label_filtering) } -async fn build_artifacts( - ctx: &DiceComputations, - providers: &FrozenProviderCollection, - label_filtering: &TestLabelFiltering, -) -> anyhow::Result<()> { - fn get_artifacts_to_build( - label_filtering: &TestLabelFiltering, - providers: &FrozenProviderCollection, - ) -> anyhow::Result> { - Ok(match ::from_collection(providers) { - Some(provider) => { - if skip_build_based_on_labels(provider, label_filtering) { - return Ok(indexset![]); - } - let mut artifact_visitor = SimpleCommandLineArtifactVisitor::new(); - provider.visit_artifacts(&mut artifact_visitor)?; - artifact_visitor.inputs - } - None => { - // not a test - indexset![] - } - }) - } - let artifacts_to_build = get_artifacts_to_build(label_filtering, providers)?; - // build the test target first - future::try_join_all( - artifacts_to_build - .iter() - .map(|input| ctx.ensure_artifact_group(input)), - ) - .await?; - Ok(()) -} - fn run_tests<'a, 'b>( test_executor: Arc, providers_label: ConfiguredProvidersLabel, @@ -905,7 +1137,7 @@ fn run_tests<'a, 'b>( working_dir_cell: CellName, ) -> BoxFuture<'a, anyhow::Result> { let maybe_handle = - build_configured_target_handle(providers_label.clone(), session, cell_resolver); + build_configured_target_handle(providers_label.dupe(), session, cell_resolver); match maybe_handle { Ok(handle) => { @@ -987,25 +1219,17 @@ impl TestLabelFiltering { fn post_process_test_executor(s: &str) -> anyhow::Result { match s.split_once("$BUCK2_BINARY_DIR/") { Some(("", rest)) => { - let exe = std::env::current_exe().context("Cannot get Buck2 executable")?; - let exe = fs_util::canonicalize(exe) - .context("Failed to canonicalize path to Buck2 executable")?; + let exe = + AbsPathBuf::new(std::env::current_exe().context("Cannot get Buck2 executable")?)?; + let exe = fs_util::canonicalize(&exe).context( + "Failed to canonicalize path to Buck2 executable. Try running `buck2 kill`.", + )?; + let exe = exe.as_abs_path(); - let mut exe_dir = exe + let exe_dir = exe .parent() .context("Buck2 executable directory has no parent")?; - // We allow overriding the dir here. This is used for buck2.sh - static BINARY_DIR_RELATIVE_OVERRIDE: EnvHelper = - EnvHelper::new("BUCK2_BINARY_DIR_RELATIVE_TO"); - - let overridden; - - if let Some(v) = BINARY_DIR_RELATIVE_OVERRIDE.get()? { - overridden = exe_dir.join(v.as_path()); - exe_dir = &overridden; - } - Ok(exe_dir.join(rest).to_path_buf()) } Some(..) => Err(anyhow::anyhow!("Invalid value: {}", s)), diff --git a/app/buck2_test/src/executor_launcher.rs b/app/buck2_test/src/executor_launcher.rs index 51659a3b63a6c..29c35a008ccc9 100644 --- a/app/buck2_test/src/executor_launcher.rs +++ b/app/buck2_test/src/executor_launcher.rs @@ -7,24 +7,30 @@ * of this source tree. */ +use std::collections::HashMap; use std::path::PathBuf; use std::pin::Pin; +use std::sync::Arc; +use std::sync::Mutex; use std::task::Context; use std::task::Poll; use anyhow::Context as _; use async_trait::async_trait; +use buck2_core::buck2_env_anyhow; use buck2_events::dispatch::EventDispatcher; use buck2_grpc::DuplexChannel; use buck2_grpc::ServerHandle; use buck2_test_api::grpc::spawn_orchestrator_server; use buck2_test_api::grpc::TestExecutorClient; +use buck2_test_api::protocol::TestExecutor; use derive_more::Display; use dupe::Dupe; use futures::future::try_join3; use futures::future::BoxFuture; use futures::future::Future; use futures::future::FutureExt; +use once_cell::sync::Lazy; use tokio::io::AsyncRead; use tokio::io::AsyncWrite; use tokio::process::Child; @@ -32,6 +38,35 @@ use tokio::process::Child; use crate::downward_api::BuckTestDownwardApi; use crate::orchestrator::BuckTestOrchestrator; +static TEST_EXECUTOR_CLIENTS: Lazy>>> = + Lazy::new(|| Mutex::new(HashMap::new())); + +pub struct TestExecutorClientWrapper(u16); +impl TestExecutorClientWrapper { + pub fn new(client: Arc) -> Self { + let mut clients = TEST_EXECUTOR_CLIENTS.lock().unwrap(); + let id = clients.keys().max().unwrap_or(&0) + 1; + tracing::debug!(id = id, "Adding test executor"); + clients.insert(id, client); + Self(id) + } +} +impl Drop for TestExecutorClientWrapper { + fn drop(&mut self) { + tracing::debug!(id = self.0, "Removing test executor"); + TEST_EXECUTOR_CLIENTS.lock().unwrap().remove(&self.0); + } +} + +pub fn get_all_test_executors() -> Vec> { + TEST_EXECUTOR_CLIENTS + .lock() + .unwrap() + .iter() + .map(|(_, exe)| exe.clone()) + .collect() +} + pub struct ExecutorLaunch { pub handle: ExecutorFuture, pub client: TestExecutorClient, @@ -76,7 +111,7 @@ impl Future for ExecutorFuture { #[derive(Debug, Display)] #[display( - fmt = "Test executor exited unexpectedly with status {}.\nStdout:\n{}\nStderr:\n{}", + "Test executor exited unexpectedly with status {}.\nStdout:\n{}\nStderr:\n{}", exit_code, stdout, stderr @@ -101,14 +136,13 @@ pub struct OutOfProcessTestExecutor { #[async_trait] impl ExecutorLauncher for OutOfProcessTestExecutor { async fn launch(&self, tpx_args: Vec) -> anyhow::Result { - #[cfg(unix)] - { - use buck2_core::env_helper::EnvHelper; - - static BUCK2_TEST_TPX_USE_TCP: EnvHelper = - EnvHelper::new("BUCK2_TEST_TPX_USE_TCP"); - let use_tcp = BUCK2_TEST_TPX_USE_TCP.get_copied()?.unwrap_or_default(); - if !use_tcp { + // Declare outside of `cfg(unix)` so `buck2 help-env` would include it on Windows + // even if it is no-op on Windows. + let use_tcp = buck2_env_anyhow!("BUCK2_TEST_TPX_USE_TCP", bool)?; + + if !use_tcp { + #[cfg(unix)] + { return spawn_orchestrator( crate::unix::executor::spawn( self.executable.as_ref(), diff --git a/app/buck2_test/src/lib.rs b/app/buck2_test/src/lib.rs index 453f1b4d4a82c..25dc907b01fed 100644 --- a/app/buck2_test/src/lib.rs +++ b/app/buck2_test/src/lib.rs @@ -7,17 +7,19 @@ * of this source tree. */ -//! Implementation of test running. +#![feature(error_generic_member_access)] +#![feature(used_with_arg)] -#![feature(async_closure)] +//! Implementation of test running. pub mod command; pub mod downward_api; pub mod executor_launcher; pub(crate) mod local_resource_api; -pub(crate) mod local_resource_registry; +pub mod local_resource_registry; pub(crate) mod local_resource_setup; pub mod orchestrator; +pub(crate) mod remote_storage; pub mod session; pub(crate) mod tcp; pub mod translations; diff --git a/app/buck2_test/src/local_resource_registry.rs b/app/buck2_test/src/local_resource_registry.rs index 44b055e52cb03..1e5a243a1f06b 100644 --- a/app/buck2_test/src/local_resource_registry.rs +++ b/app/buck2_test/src/local_resource_registry.rs @@ -7,47 +7,41 @@ * of this source tree. */ +use std::collections::HashMap; +use std::sync::Arc; use std::time::Duration; use anyhow::Context; use buck2_common::kill_util::try_terminate_process_gracefully; use buck2_common::local_resource_state::LocalResourceState; -use buck2_common::result::SharedResult; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_data::ReleaseLocalResourcesEnd; use buck2_data::ReleaseLocalResourcesStart; -use buck2_events::dispatch::span_async; -use dashmap::DashMap; -use futures::future::BoxFuture; -use futures::future::Shared; +use buck2_events::dispatch::span_async_simple; +use dice::DiceComputations; +use dice::UserComputationData; +use dupe::Dupe; +use tokio::sync::Mutex; -pub struct LocalResourceRegistry<'a>( - pub DashMap>>>, +#[derive(Default)] +pub struct LocalResourceRegistry( + pub Arc>>>, ); -impl<'a> LocalResourceRegistry<'a> { - pub(crate) fn new() -> Self { - LocalResourceRegistry(DashMap::new()) - } - +impl LocalResourceRegistry { pub(crate) async fn release_all_resources(&self) -> anyhow::Result<()> { - // We setup resources prior to running tests so at this point everything should be set up, so just resolve all futures. - let resource_futs = self - .0 - .iter() - .map(|entry| entry.value().clone()) - .collect::>(); - - if resource_futs.is_empty() { + let resources = { + let mut lock = self.0.lock().await; + lock.drain().flat_map(|(_, v)| v).collect::>() + }; + + if resources.is_empty() { return Ok(()); } - let cleanup = async move || -> anyhow::Result<()> { - let futs = futures::future::join_all(resource_futs) - .await + let cleanup = || async move { + let resource_futs = resources .into_iter() - // Failed setup most likely means the test failed and problem will be reported in the test status. - .flat_map(|r| r.into_iter()) .filter(|s| s.owning_pid().is_some()) .map(|s| async move { let pid = s.owning_pid().unwrap(); @@ -60,19 +54,45 @@ impl<'a> LocalResourceRegistry<'a> { )) }); - futures::future::join_all(futs) + futures::future::join_all(resource_futs) .await .into_iter() - .collect::>()?; + .collect::>()?; - Ok(()) + Ok::<(), anyhow::Error>(()) }; let start = ReleaseLocalResourcesStart {}; let end = ReleaseLocalResourcesEnd {}; - span_async(start, async move { (cleanup().await, end) }).await?; + span_async_simple(start, cleanup(), end).await?; + + Ok::<(), anyhow::Error>(()) + } +} + +pub trait InitLocalResourceRegistry { + fn init_local_resource_registry(&mut self); +} + +pub trait HasLocalResourceRegistry { + fn get_local_resource_registry(&self) -> Arc; +} + +impl InitLocalResourceRegistry for UserComputationData { + fn init_local_resource_registry(&mut self) { + self.data.set(Arc::new(LocalResourceRegistry::default())); + } +} + +impl HasLocalResourceRegistry for DiceComputations<'_> { + fn get_local_resource_registry(&self) -> Arc { + let data = self + .per_transaction_data() + .data + .get::>() + .expect("LocalResourceRegistry should be set"); - Ok(()) + data.dupe() } } diff --git a/app/buck2_test/src/local_resource_setup.rs b/app/buck2_test/src/local_resource_setup.rs index 34f277abc42aa..884c73e0352ca 100644 --- a/app/buck2_test/src/local_resource_setup.rs +++ b/app/buck2_test/src/local_resource_setup.rs @@ -21,9 +21,26 @@ use buck2_core::soft_error; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_execute::artifact::fs::ExecutorFs; use buck2_test_api::data::RequiredLocalResources; -use dice::DiceTransaction; +use buck2_test_api::data::TestStage; +use dice::DiceComputations; use dupe::Dupe; +use futures::FutureExt; use indexmap::IndexMap; +use itertools::Itertools; + +pub(crate) enum TestStageSimple { + Listing, + Testing, +} + +impl From<&TestStage> for TestStageSimple { + fn from(value: &TestStage) -> Self { + match value { + TestStage::Listing(_) => TestStageSimple::Listing, + TestStage::Testing { .. } => TestStageSimple::Testing, + } + } +} /// Container for everything needed to set up a local resource. #[derive(Debug)] @@ -42,12 +59,13 @@ pub(crate) struct LocalResourceSetupContext { } pub(crate) async fn required_local_resources_setup_contexts( - dice: &DiceTransaction, + dice: &mut DiceComputations<'_>, executor_fs: &ExecutorFs<'_>, test_info: &FrozenExternalRunnerTestInfo, required_local_resources: &RequiredLocalResources, + stage: &TestStageSimple, ) -> anyhow::Result> { - let providers = required_providers(dice, test_info, required_local_resources).await?; + let providers = required_providers(dice, test_info, required_local_resources, stage).await?; let mut cmd_line_context = DefaultCommandLineContext::new(executor_fs); let mut result = vec![]; for (source_target_label, provider) in providers { @@ -70,9 +88,10 @@ pub(crate) async fn required_local_resources_setup_contexts( } async fn required_providers<'v>( - dice: &DiceTransaction, + dice: &mut DiceComputations<'_>, test_info: &'v FrozenExternalRunnerTestInfo, required_local_resources: &'v RequiredLocalResources, + stage: &TestStageSimple, ) -> anyhow::Result> { let available_resources = test_info.local_resources(); @@ -80,6 +99,16 @@ async fn required_providers<'v>( .resources .iter() .map(|resource_type| &resource_type.name as &'v str) + .chain( + test_info + .required_local_resources() + .filter_map(|r| match stage { + TestStageSimple::Listing if r.listing => Some(&r.name as &str), + TestStageSimple::Testing if r.execution => Some(&r.name as &str), + _ => None, + }), + ) + .unique() .map(|type_name| { available_resources.get(type_name).copied().ok_or_else(|| { anyhow::Error::msg(format!( @@ -92,22 +121,22 @@ async fn required_providers<'v>( Ok(Some(x)) => Some(Ok(x)), Ok(None) => None, Err(e) => { - let _ignore = soft_error!("missing_required_local_resource", e, quiet: true); + let _ignore = soft_error!("missing_required_local_resource", e.into(), quiet: true); None } }) .collect::, anyhow::Error>>()?; - let futs = targets.iter().map(|t| get_local_resource_info(dice, t)); - - futures::future::join_all(futs) - .await - .into_iter() - .collect::, _>>() + dice.compute_join(targets, |dice, target| { + async move { get_local_resource_info(dice, target).await }.boxed() + }) + .await + .into_iter() + .collect::, _>>() } async fn get_local_resource_info<'v>( - dice: &DiceTransaction, + dice: &mut DiceComputations<'_>, target: &'v ConfiguredProvidersLabel, ) -> anyhow::Result<(&'v ConfiguredTargetLabel, &'v FrozenLocalResourceInfo)> { let providers = dice.get_providers(target).await?.require_compatible()?; diff --git a/app/buck2_test/src/orchestrator.rs b/app/buck2_test/src/orchestrator.rs index 22328c446a8d5..f08c6da589a82 100644 --- a/app/buck2_test/src/orchestrator.rs +++ b/app/buck2_test/src/orchestrator.rs @@ -7,19 +7,31 @@ * of this source tree. */ +// https://github.com/rust-lang/rust-clippy/issues/12806 +#![allow(clippy::unnecessary_to_owned)] + //! Implementation of the `TestOrchestrator` from `buck2_test_api`. +use std::borrow::Cow; use std::collections::HashMap; use std::ffi::OsStr; +use std::fmt::Display; +use std::ops::ControlFlow; +use std::ops::DerefMut; use std::sync::Arc; use std::time::Duration; -use anyhow::Context as _; +use allocative::Allocative; +use anyhow::Context; use async_trait::async_trait; use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; +use buck2_build_api::actions::execute::dice_data::CommandExecutorResponse; +use buck2_build_api::actions::execute::dice_data::DiceHasCommandExecutor; +use buck2_build_api::actions::execute::dice_data::GetReClient; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::artifact_groups::calculation::ArtifactGroupCalculation; use buck2_build_api::artifact_groups::ArtifactGroup; +use buck2_build_api::context::HasBuildContextData; use buck2_build_api::interpreter::rule_defs::cmd_args::space_separated::SpaceSeparatedCommandLineBuilder; use buck2_build_api::interpreter::rule_defs::cmd_args::AbsCommandLineContext; use buck2_build_api::interpreter::rule_defs::cmd_args::CommandLineArgLike; @@ -32,11 +44,11 @@ use buck2_build_api::interpreter::rule_defs::provider::builtin::external_runner_ use buck2_build_api::interpreter::rule_defs::provider::builtin::external_runner_test_info::TestCommandMember; use buck2_common::dice::cells::HasCellResolver; use buck2_common::events::HasEvents; +use buck2_common::legacy_configs::dice::HasLegacyConfigs; +use buck2_common::legacy_configs::key::BuckconfigKeyRef; +use buck2_common::legacy_configs::view::LegacyBuckConfigView; use buck2_common::liveliness_observer::LivelinessObserver; use buck2_common::local_resource_state::LocalResourceState; -use buck2_common::result::SharedError; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; use buck2_core::cells::cell_root_path::CellRootPathBuf; use buck2_core::execution_types::executor_config::CommandExecutorConfig; use buck2_core::execution_types::executor_config::CommandGenerationOptions; @@ -47,9 +59,11 @@ use buck2_core::fs::artifact_path_resolver::ArtifactFs; use buck2_core::fs::buck_out_path::BuckOutTestPath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -use buck2_core::fs::project_rel_path::ProjectRelativePath; use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; +use buck2_core::pattern::pattern::ParsedPattern; +use buck2_core::pattern::pattern_type::TargetPatternExtra; use buck2_core::provider::label::ConfiguredProvidersLabel; +use buck2_core::target::configured_or_unconfigured::ConfiguredOrUnconfiguredTargetLabel; use buck2_core::target::configured_target_label::ConfiguredTargetLabel; use buck2_data::SetupLocalResourcesEnd; use buck2_data::SetupLocalResourcesStart; @@ -61,16 +75,17 @@ use buck2_data::TestRunStart; use buck2_data::TestSessionInfo; use buck2_data::TestSuite; use buck2_data::ToProtoMessage; +use buck2_error::AnyhowContextForError; use buck2_events::dispatch::EventDispatcher; use buck2_execute::artifact::fs::ExecutorFs; +use buck2_execute::artifact_value::ArtifactValue; use buck2_execute::digest_config::DigestConfig; use buck2_execute::digest_config::HasDigestConfig; use buck2_execute::execute::blocking::HasBlockingExecutor; +use buck2_execute::execute::cache_uploader::CacheUploadInfo; use buck2_execute::execute::cache_uploader::NoOpCacheUploader; use buck2_execute::execute::claim::MutexClaimManager; use buck2_execute::execute::command_executor::CommandExecutor; -use buck2_execute::execute::dice_data::CommandExecutorResponse; -use buck2_execute::execute::dice_data::HasCommandExecutor; use buck2_execute::execute::environment_inheritance::EnvironmentInheritance; use buck2_execute::execute::kind::CommandExecutionKind; use buck2_execute::execute::manager::CommandExecutionManager; @@ -82,6 +97,8 @@ use buck2_execute::execute::request::CommandExecutionPaths; use buck2_execute::execute::request::CommandExecutionRequest; use buck2_execute::execute::request::ExecutorPreference; use buck2_execute::execute::request::OutputCreationBehavior; +use buck2_execute::execute::request::WorkerId; +use buck2_execute::execute::request::WorkerSpec; use buck2_execute::execute::result::CommandExecutionMetadata; use buck2_execute::execute::result::CommandExecutionReport; use buck2_execute::execute::result::CommandExecutionResult; @@ -92,13 +109,13 @@ use buck2_execute_impl::executors::local::apply_local_execution_environment; use buck2_execute_impl::executors::local::create_output_dirs; use buck2_execute_impl::executors::local::materialize_inputs; use buck2_execute_impl::executors::local::EnvironmentBuilder; +use buck2_futures::cancellation::CancellationContext; use buck2_node::nodes::configured::ConfiguredTargetNode; use buck2_node::nodes::configured_frontend::ConfiguredTargetNodeCalculation; use buck2_test_api::data::ArgValue; use buck2_test_api::data::ArgValueContent; use buck2_test_api::data::ConfiguredTargetHandle; use buck2_test_api::data::DeclaredOutput; -use buck2_test_api::data::DisplayMetadata; use buck2_test_api::data::ExecuteResponse; use buck2_test_api::data::ExecutionDetails; use buck2_test_api::data::ExecutionResult2; @@ -106,30 +123,40 @@ use buck2_test_api::data::ExecutionStatus; use buck2_test_api::data::ExecutionStream; use buck2_test_api::data::ExecutorConfigOverride; use buck2_test_api::data::ExternalRunnerSpecValue; +use buck2_test_api::data::LocalExecutionCommand; use buck2_test_api::data::Output; use buck2_test_api::data::PrepareForLocalExecutionResult; use buck2_test_api::data::RequiredLocalResources; use buck2_test_api::data::TestResult; +use buck2_test_api::data::TestStage; use buck2_test_api::protocol::TestOrchestrator; use derive_more::From; +use dice::DiceComputations; use dice::DiceTransaction; +use dice::Key; +use display_container::fmt_container; +use display_container::fmt_keyed_container; use dupe::Dupe; use futures::channel::mpsc::UnboundedSender; +use futures::stream::FuturesUnordered; +use futures::stream::StreamExt; use futures::FutureExt; use host_sharing::HostSharingRequirements; use indexmap::indexset; use indexmap::IndexMap; use indexmap::IndexSet; -use more_futures::cancellation::CancellationContext; use sorted_vector_map::SortedVectorMap; use starlark::values::FrozenRef; use uuid::Uuid; use crate::local_resource_api::LocalResourcesSetupResult; -use crate::local_resource_registry::LocalResourceRegistry; +use crate::local_resource_registry::HasLocalResourceRegistry; use crate::local_resource_setup::required_local_resources_setup_contexts; use crate::local_resource_setup::LocalResourceSetupContext; +use crate::local_resource_setup::TestStageSimple; +use crate::remote_storage; use crate::session::TestSession; +use crate::session::TestSessionOptions; use crate::translations; const MAX_SUFFIX_LEN: usize = 1024; @@ -141,15 +168,13 @@ pub enum ExecutorMessage { InfoMessage(String), } -pub struct BuckTestOrchestrator<'a> { +pub struct BuckTestOrchestrator<'a: 'static> { dice: DiceTransaction, session: Arc, results_channel: UnboundedSender>, events: EventDispatcher, liveliness_observer: Arc, - digest_config: DigestConfig, cancellations: &'a CancellationContext<'a>, - local_resource_state_registry: Arc>, } impl<'a> BuckTestOrchestrator<'a> { @@ -159,19 +184,15 @@ impl<'a> BuckTestOrchestrator<'a> { liveliness_observer: Arc, results_channel: UnboundedSender>, cancellations: &'a CancellationContext<'a>, - local_resource_state_registry: Arc>, ) -> anyhow::Result> { let events = dice.per_transaction_data().get_dispatcher().dupe(); - let digest_config = dice.global_data().get_digest_config(); Ok(Self::from_parts( dice, session, liveliness_observer, results_channel, events, - digest_config, cancellations, - local_resource_state_registry, )) } @@ -181,9 +202,7 @@ impl<'a> BuckTestOrchestrator<'a> { liveliness_observer: Arc, results_channel: UnboundedSender>, events: EventDispatcher, - digest_config: DigestConfig, cancellations: &'a CancellationContext, - local_resource_state_registry: Arc>, ) -> BuckTestOrchestrator<'a> { Self { dice, @@ -191,14 +210,14 @@ impl<'a> BuckTestOrchestrator<'a> { results_channel, events, liveliness_observer, - digest_config, cancellations, - local_resource_state_registry, } } - async fn require_alive(&self) -> Result<(), Cancelled> { - if !self.liveliness_observer.is_alive().await { + async fn require_alive( + liveliness_observer: Arc, + ) -> Result<(), Cancelled> { + if !liveliness_observer.is_alive().await { return Err(Cancelled); } @@ -207,7 +226,7 @@ impl<'a> BuckTestOrchestrator<'a> { async fn execute2( &self, - metadata: DisplayMetadata, + stage: TestStage, test_target: ConfiguredTargetHandle, cmd: Vec, env: SortedVectorMap, @@ -217,27 +236,156 @@ impl<'a> BuckTestOrchestrator<'a> { executor_override: Option, required_local_resources: RequiredLocalResources, ) -> Result { - self.require_alive().await?; + Self::require_alive(self.liveliness_observer.dupe()).await?; let test_target = self.session.get(test_target)?; - let fs = self.dice.get_artifact_fs().await?; + let fs = self.dice.clone().get_artifact_fs().await?; + let pre_create_dirs = Arc::new(pre_create_dirs); - let test_info = self.get_test_info(&test_target).await?; - let test_executor = self - .get_test_executor(&test_target, &test_info, executor_override, &fs) - .await?; - let test_executable_expanded = self - .expand_test_executable( - &test_target, - &test_info, - cmd, - env, - pre_create_dirs, - &test_executor.executor_fs(), - ) - .await?; + let ExecuteData { + stdout, + stderr, + status, + timing, + execution_kind, + outputs, + } = prepare_and_execute( + self.dice.dupe().deref_mut(), + self.cancellations, + TestExecutionKey { + test_target, + cmd: Arc::new(cmd), + env: Arc::new(env), + executor_override: executor_override.map(Arc::new), + required_local_resources: Arc::new(required_local_resources), + pre_create_dirs: pre_create_dirs.dupe(), + prefix: TestExecutionPrefix::new(&stage, &self.session), + stage: Arc::new(stage), + options: self.session.options(), + timeout, + host_sharing_requirements: host_sharing_requirements.into(), + }, + self.liveliness_observer.dupe(), + ) + .await?; + + Self::require_alive(self.liveliness_observer.dupe()).await?; + + let mut output_map = HashMap::new(); + let mut paths_to_materialize = vec![]; + + let remote_storage_config_update_futures = FuturesUnordered::new(); + + for (test_path, artifact) in outputs { + let project_relative_path = fs.buck_out_path_resolver().resolve_test(&test_path); + let output_name = test_path.into_path().into(); + // It's OK to search iteratively here because there will be few entries in `pre_create_dirs` + let remote_storage_config = pre_create_dirs + .iter() + .find(|&x| x.name == output_name) + .map_or_else(Default::default, |x| x.remote_storage_config.dupe()); + match ( + remote_storage_config.supports_remote, + execution_kind.as_ref(), + translations::convert_artifact(output_name.clone().into_string(), &artifact), + ) { + // This condition checks that a downstream consumer supports + // remote outputs AND the output is actually in CAS. + // + // TODO(arr): is there a better way to check that the output is + // in CAS other than checking that the command was executed on + // RE? Alternatively, when we make buck upload local testing + // artifacts to CAS, we can remove this condition altogether. + (true, Some(CommandExecutionKind::Remote { .. }), Some(remote_object)) => { + let future = async move { + let _unused = remote_storage::apply_config( + self.dice.per_transaction_data().get_re_client(), + &artifact, + &remote_storage_config, + ) + .await; + (output_name, remote_object) + }; + remote_storage_config_update_futures.push(future); + } + _ => { + paths_to_materialize.push(project_relative_path.clone()); + let abs_path = fs.fs().resolve(&project_relative_path); + output_map.insert(output_name, Output::LocalPath(abs_path)); + } + }; + } + let results: Vec<_> = remote_storage_config_update_futures.collect().await; + for result in results { + output_map.insert(result.0, Output::RemoteObject(result.1)); + } + + // Request materialization in case this ran on RE. Eventually Tpx should be able to + // understand remote outputs but currently we don't have this. + self.dice + .per_transaction_data() + .get_materializer() + .ensure_materialized(paths_to_materialize) + .await + .context("Error materializing test outputs")?; + Ok(ExecutionResult2 { + status, + stdout, + stderr, + outputs: output_map, + start_time: timing.start_time, + execution_time: timing.execution_time, + execution_details: ExecutionDetails { + execution_kind: execution_kind.map(|k| k.to_proto(false)), + }, + }) + } + + async fn prepare_and_execute_no_dice( + dice: &mut DiceComputations<'_>, + key: TestExecutionKey, + liveliness_observer: Arc, + cancellation: &CancellationContext<'_>, + ) -> Result { + let TestExecutionKey { + test_target, + cmd, + env, + executor_override, + required_local_resources, + pre_create_dirs, + stage, + options, + prefix, + timeout, + host_sharing_requirements, + } = key; + let fs = dice.get_artifact_fs().await?; + let test_info = Self::get_test_info(dice, &test_target).await?; + let test_executor = Self::get_test_executor( + dice, + &test_target, + &test_info, + executor_override, + &fs, + &stage, + ) + .await?; + let test_executable_expanded = Self::expand_test_executable( + dice, + &test_target, + &test_info, + Cow::Borrowed(&cmd), + Cow::Borrowed(&env), + Cow::Borrowed(&pre_create_dirs), + &test_executor.executor_fs(), + prefix, + options, + ) + .boxed() + .await?; let ExpandedTestExecutable { cwd, cmd: expanded_cmd, @@ -245,88 +393,218 @@ impl<'a> BuckTestOrchestrator<'a> { inputs, supports_re, declared_outputs, + worker, } = test_executable_expanded; - - let executor_preference = self.executor_preference(supports_re)?; - + let executor_preference = Self::executor_preference(options, supports_re)?; let required_resources = if test_executor.is_local_execution_possible(executor_preference) { - let setup_local_resources_executor = self.get_local_executor(&fs)?; + let setup_local_resources_executor = Self::get_local_executor(dice, &fs).await?; + let simple_stage = stage.as_ref().into(); let setup_contexts = { let executor_fs = setup_local_resources_executor.executor_fs(); required_local_resources_setup_contexts( - &self.dice, + dice, &executor_fs, &test_info, &required_local_resources, + &simple_stage, ) .await? }; // If some timeout is neeeded, use the same value as for the test itself which is better than nothing. - let resources = self - .setup_local_resources(setup_contexts, setup_local_resources_executor, timeout) - .await?; - - self.require_alive().await?; - - resources + Self::setup_local_resources( + dice, + cancellation, + setup_contexts, + setup_local_resources_executor, + timeout, + liveliness_observer.dupe(), + ) + .await? } else { vec![] }; + let execution_request = Self::create_command_execution_request( + dice, + cwd, + expanded_cmd, + expanded_env, + inputs, + declared_outputs, + &fs, + Some(timeout), + Some(host_sharing_requirements), + Some(executor_preference), + required_resources, + worker, + ) + .boxed() + .await?; + let result = Self::execute_request( + dice, + cancellation, + &test_target, + &stage, + &test_executor, + execution_request, + liveliness_observer.dupe(), + ) + .boxed() + .await?; + Ok(result) + } +} - let execution_request = self - .create_command_execution_request( - cwd, - expanded_cmd, - expanded_env, - inputs, - declared_outputs, - &fs, - Some(timeout), - Some(host_sharing_requirements), - Some(executor_preference), - required_resources, - ) - .await?; +#[derive(Clone, Dupe, Debug, Eq, Hash, PartialEq, Allocative)] +struct TestExecutionKey { + test_target: ConfiguredProvidersLabel, + cmd: Arc>, + env: Arc>, + executor_override: Option>, + required_local_resources: Arc, + pre_create_dirs: Arc>, + stage: Arc, + options: TestSessionOptions, + prefix: TestExecutionPrefix, + timeout: Duration, + host_sharing_requirements: Arc, +} - let (stdout, stderr, status, timing, execution_kind, outputs) = self - .execute_shared(&test_target, metadata, &test_executor, execution_request) - .await?; +#[derive(Clone, Dupe, Debug, Eq, Hash, PartialEq, Allocative)] +enum TestExecutionPrefix { + Listing, + Testing(Arc), +} - self.require_alive().await?; +impl TestExecutionPrefix { + fn new(stage: &TestStage, session: &TestSession) -> Self { + match stage { + TestStage::Listing(_) => TestExecutionPrefix::Listing, + TestStage::Testing { .. } => TestExecutionPrefix::Testing(session.prefix().dupe()), + } + } +} - let (outputs, paths_to_materialize) = outputs - .into_iter() - .map(|test_path| { - let project_path = fs.buck_out_path_resolver().resolve_test(&test_path); - let abs_path = fs.fs().resolve(&project_path); - let declared_output = DeclaredOutput { - name: test_path.into_path(), - }; - ((declared_output, Output::LocalPath(abs_path)), project_path) +impl Display for TestExecutionPrefix { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TestExecutionPrefix::Listing => write!(f, "Listing"), + TestExecutionPrefix::Testing(prefix) => write!(f, "Testing({})", prefix), + } + } +} + +#[async_trait] +impl Key for TestExecutionKey { + type Value = buck2_error::Result>; + + async fn compute( + &self, + ctx: &mut DiceComputations, + cancellations: &CancellationContext, + ) -> Self::Value { + Ok(cancellations + .with_structured_cancellation(|observer| { + async move { + let result = BuckTestOrchestrator::prepare_and_execute_no_dice( + ctx, + self.dupe(), + Arc::new(observer), + cancellations, + ) + .await; + let result: anyhow::Result> = match result { + Ok(ok) => Ok(Arc::new(ok)), + Err(err) => match err { + ExecuteError::Error(err) => Err(err)?, + ExecuteError::Cancelled(_) => Err(ExecuteDiceErr::Cancelled)?, + }, + }; + result + } + .boxed() }) - .unzip(); + .await?) + } - // Request materialization in case this ran on RE. Eventually Tpx should be able to - // understand remote outputs but currently we don't have this. - self.dice - .per_transaction_data() - .get_materializer() - .ensure_materialized(paths_to_materialize) - .await - .context("Error materializing test outputs")?; + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} - Ok(ExecutionResult2 { - status, - stdout, - stderr, - outputs, - start_time: timing.start_time, - execution_time: timing.execution_time, - execution_details: ExecutionDetails { - execution_kind: execution_kind.map(|k| k.to_proto(false)), - }, - }) +async fn prepare_and_execute( + ctx: &mut DiceComputations<'static>, + cancellation: &CancellationContext<'_>, + key: TestExecutionKey, + liveliness_observer: Arc, +) -> Result { + let execute_on_dice = match key.stage.as_ref() { + TestStage::Listing(_) => check_cache_listings_experiment(ctx, &key.test_target).await?, + TestStage::Testing { .. } => false, + }; + if execute_on_dice { + let result = tokio::select! { + _ = liveliness_observer.while_alive() => { + Err(ExecuteError::Cancelled(Cancelled)) + } + result = prepare_and_execute_dice(ctx, &key) => { + result + } + }?; + Ok((*result).clone()) + } else { + Ok(BuckTestOrchestrator::prepare_and_execute_no_dice( + ctx, + key, + liveliness_observer, + cancellation, + ) + .await?) + } +} + +async fn prepare_and_execute_dice( + ctx: &mut DiceComputations<'_>, + key: &TestExecutionKey, +) -> Result, ExecuteError> { + let result = ctx.compute(key).await; + let result = result.map_err(anyhow::Error::from)?; + + result.map_err(anyhow::Error::from).map_err(|err| { + if err.downcast_ref::().is_some() { + ExecuteError::Cancelled(Cancelled) + } else { + ExecuteError::Error(err) + } + }) +} + +impl Display for TestExecutionKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "test_target = {}, ", self.test_target)?; + fmt_container(f, "cmd = [", "], ", self.cmd.as_ref())?; + fmt_keyed_container(f, "env = {", "}, ", ",", self.env.as_ref())?; + fmt_container( + f, + "executor_override = [", + "], ", + self.executor_override.iter(), + )?; + write!( + f, + "required_local_resources = {}, ", + self.required_local_resources.as_ref(), + )?; + fmt_container(f, "pre_create_dirs = [", "], ", self.pre_create_dirs.iter())?; + write!( + f, + "stage = {}, options = {}, prefix = {}, timeout = {}, host_sharing_requirements = {}", + self.stage, + self.options, + self.prefix, + self.timeout.as_millis(), + self.host_sharing_requirements + ) } } @@ -346,11 +624,18 @@ enum ExecuteError { Cancelled(Cancelled), } +#[derive(From, Debug, buck2_error::Error)] +/// Used to support the same ExecuteError's api via dice +enum ExecuteDiceErr { + #[error("Cancelled")] + Cancelled, +} + #[async_trait] impl<'a> TestOrchestrator for BuckTestOrchestrator<'a> { async fn execute2( &self, - metadata: DisplayMetadata, + stage: TestStage, test_target: ConfiguredTargetHandle, cmd: Vec, env: SortedVectorMap, @@ -362,7 +647,7 @@ impl<'a> TestOrchestrator for BuckTestOrchestrator<'a> { ) -> anyhow::Result { let res = BuckTestOrchestrator::execute2( self, - metadata, + stage, test_target, cmd, env, @@ -432,32 +717,70 @@ impl<'a> TestOrchestrator for BuckTestOrchestrator<'a> { async fn prepare_for_local_execution( &self, - _metadata: DisplayMetadata, + stage: TestStage, test_target: ConfiguredTargetHandle, cmd: Vec, env: SortedVectorMap, pre_create_dirs: Vec, + required_local_resources: RequiredLocalResources, ) -> anyhow::Result { let test_target = self.session.get(test_target)?; - let fs = self.dice.get_artifact_fs().await?; + let fs = self.dice.clone().get_artifact_fs().await?; - let test_info = self.get_test_info(&test_target).await?; - // Tests are not run, so there is no executor override. - let executor = self - .get_test_executor(&test_target, &test_info, None, &fs) - .await?; - let test_executable_expanded = self - .expand_test_executable( - &test_target, + let test_info = Self::get_test_info(self.dice.dupe().deref_mut(), &test_target).await?; + + // In contrast from actual test execution we do not check if local execution is possible. + // We leave that decision to actual local execution runner that requests local execution preparation. + let setup_local_resources_executor = + Self::get_local_executor(self.dice.dupe().deref_mut(), &fs).await?; + let setup_contexts = { + let executor_fs = setup_local_resources_executor.executor_fs(); + required_local_resources_setup_contexts( + self.dice.dupe().deref_mut(), + &executor_fs, &test_info, - cmd, - env, - pre_create_dirs, - &executor.executor_fs(), + &required_local_resources, + &TestStageSimple::Testing, ) + .await? + }; + let setup_commands: Vec = self + .dice + .dupe() + .deref_mut() + .try_compute_join(setup_contexts, |dice, context| { + let fs = fs.clone(); + async move { + Self::prepare_local_resource(dice, context, &fs, Duration::default()).await + } + .boxed() + }) .await?; + // Tests are not run, so there is no executor override. + let executor = Self::get_test_executor( + self.dice.dupe().deref_mut(), + &test_target, + &test_info, + None, + &fs, + &stage, + ) + .await?; + let test_executable_expanded = Self::expand_test_executable( + self.dice.dupe().deref_mut(), + &test_target, + &test_info, + Cow::Owned(cmd), + Cow::Owned(env), + Cow::Owned(pre_create_dirs), + &executor.executor_fs(), + TestExecutionPrefix::new(&stage, &self.session), + self.session.options(), + ) + .await?; + let ExpandedTestExecutable { cwd, cmd: expanded_cmd, @@ -465,27 +788,29 @@ impl<'a> TestOrchestrator for BuckTestOrchestrator<'a> { inputs, supports_re: _, declared_outputs, + worker, } = test_executable_expanded; - let execution_request = self - .create_command_execution_request( - cwd, - expanded_cmd, - expanded_env, - inputs, - declared_outputs, - &fs, - None, - None, - None, - vec![], - ) - .await?; + let execution_request = Self::create_command_execution_request( + self.dice.dupe().deref_mut(), + cwd, + expanded_cmd, + expanded_env, + inputs, + declared_outputs, + &fs, + None, + None, + None, + vec![], + worker, + ) + .await?; let materializer = self.dice.per_transaction_data().get_materializer(); let blocking_executor = self.dice.get_blocking_executor(); - materialize_inputs(&fs, &materializer, &execution_request).await?; + materialize_inputs(&fs, materializer.as_ref(), &execution_request).await?; create_output_dirs( &fs, @@ -496,9 +821,29 @@ impl<'a> TestOrchestrator for BuckTestOrchestrator<'a> { ) .await?; + for local_resource_setup_command in setup_commands.iter() { + materialize_inputs( + &fs, + materializer.as_ref(), + &local_resource_setup_command.execution_request, + ) + .await?; + let blocking_executor = self.dice.get_blocking_executor(); + + create_output_dirs( + &fs, + &local_resource_setup_command.execution_request, + materializer.dupe(), + blocking_executor, + self.cancellations, + ) + .await?; + } + Ok(create_prepare_for_local_execution_result( &fs, execution_request, + setup_commands, )) } @@ -509,12 +854,24 @@ impl<'a> TestOrchestrator for BuckTestOrchestrator<'a> { Ok(()) } } +#[derive(Allocative, Clone)] +struct ExecuteData { + pub stdout: ExecutionStream, + pub stderr: ExecutionStream, + pub status: ExecutionStatus, + pub timing: CommandExecutionMetadata, + pub execution_kind: Option, + pub outputs: Vec<(BuckOutTestPath, ArtifactValue)>, +} impl<'b> BuckTestOrchestrator<'b> { - fn executor_preference(&self, test_supports_re: bool) -> anyhow::Result { + fn executor_preference( + opts: TestSessionOptions, + test_supports_re: bool, + ) -> anyhow::Result { let mut executor_preference = ExecutorPreference::Default; - if !self.session.options().allow_re { + if !opts.allow_re { // We don't ban RE (we only prefer not to use it) if the session doesn't allow it, so // that executor overrides or default executor can still route executions to RE. executor_preference = executor_preference.and(ExecutorPreference::LocalPreferred)?; @@ -528,32 +885,27 @@ impl<'b> BuckTestOrchestrator<'b> { Ok(executor_preference) } - async fn execute_shared( - &self, - test_target: &ConfiguredProvidersLabel, - metadata: DisplayMetadata, + /// Core request execution logic. + async fn execute_request( + dice: &mut DiceComputations<'_>, + cancellation: &CancellationContext<'_>, + test_target_label: &ConfiguredProvidersLabel, + stage: &TestStage, executor: &CommandExecutor, request: CommandExecutionRequest, - ) -> Result< - ( - ExecutionStream, - ExecutionStream, - ExecutionStatus, - CommandExecutionMetadata, - Option, - Vec, - ), - ExecuteError, - > { + liveliness_observer: Arc, + ) -> Result { + let events = dice.per_transaction_data().get_dispatcher().dupe(); let manager = CommandExecutionManager::new( Box::new(MutexClaimManager::new()), - self.events.dupe(), - self.liveliness_observer.dupe(), + events.dupe(), + liveliness_observer.dupe(), ); + let digest_config = dice.global_data().get_digest_config(); - let mut action_key_suffix = match &metadata { - DisplayMetadata::Listing(_) => "listing".to_owned(), - DisplayMetadata::Testing { testcases, .. } => testcases.join(" "), + let mut action_key_suffix = match &stage { + TestStage::Listing(_) => "listing".to_owned(), + TestStage::Testing { testcases, .. } => testcases.join(" "), }; if action_key_suffix.len() > MAX_SUFFIX_LEN { let truncated = "(truncated)"; @@ -562,25 +914,108 @@ impl<'b> BuckTestOrchestrator<'b> { } let test_target = TestTarget { - target: test_target.target(), + target: test_target_label.target(), action_key_suffix, }; // For test execution, we currently do not do any cache queries - let prepared_action = executor.prepare_action(&request, self.digest_config)?; + let prepared_action = executor.prepare_action(&request, digest_config)?; let prepared_command = PreparedCommand { target: &test_target as _, request: &request, prepared_action: &prepared_action, - digest_config: self.digest_config, + digest_config, + }; + + let action_cache = match stage { + TestStage::Listing(_) => { + executor + .action_cache(manager, &prepared_command, cancellation) + .await + } + TestStage::Testing { .. } => ControlFlow::Continue(manager), }; - let command = executor.exec_cmd(manager, &prepared_command, self.cancellations); // instrument execution with a span. // TODO(brasselsprouts): migrate this into the executor to get better accuracy. + + let (command_exec_result, cached) = match action_cache { + ControlFlow::Break(result) => (result, true), + ControlFlow::Continue(manager) => { + let command = executor.exec_cmd(manager, &prepared_command, cancellation); + let result = match stage { + TestStage::Listing(listing) => { + let start = TestDiscoveryStart { + suite_name: listing.clone(), + }; + events + .span_async(start, async move { + let result = command.await; + let end = TestDiscoveryEnd { + suite_name: listing.clone(), + command_report: Some( + result + .report + .to_command_execution_proto(true, true, false) + .await, + ), + }; + (result, end) + }) + .await + } + TestStage::Testing { suite, testcases } => { + let test_suite = Some(TestSuite { + suite_name: suite.clone(), + test_names: testcases.clone(), + target_label: Some(test_target.target.as_proto()), + }); + let start = TestRunStart { + suite: test_suite.clone(), + }; + events + .span_async(start, async move { + let result = command.await; + let end = TestRunEnd { + suite: test_suite, + command_report: Some( + result + .report + .to_command_execution_proto(true, true, false) + .await, + ), + }; + (result, end) + }) + .await + } + }; + (result, false) + } + }; + + if let TestStage::Listing(_) = stage { + if !cached && check_cache_listings_experiment(dice, &test_target_label).await? { + let info = CacheUploadInfo { + target: &test_target as _, + digest_config, + }; + let _result = executor + .cache_upload( + &info, + &command_exec_result, + None, + None, + &prepared_action.action_and_blobs, + ) + .await?; + } + } + let CommandExecutionResult { outputs, + did_cache_upload: _, report: CommandExecutionReport { std_streams, @@ -589,63 +1024,12 @@ impl<'b> BuckTestOrchestrator<'b> { timing, .. }, - rejected_execution: _, - did_cache_upload: _, - did_dep_file_cache_upload: _, - dep_file_key: _, - eligible_for_full_hybrid: _, - dep_file_metadata: _, - } = match metadata { - DisplayMetadata::Listing(listing) => { - let start = TestDiscoveryStart { - suite_name: listing.clone(), - }; - self.events - .span_async(start, async move { - let result = command.await; - let end = TestDiscoveryEnd { - suite_name: listing, - command_report: Some( - result - .report - .to_command_execution_proto(true, true, false) - .await, - ), - }; - (result, end) - }) - .await - } - DisplayMetadata::Testing { suite, testcases } => { - let test_suite = Some(TestSuite { - suite_name: suite, - test_names: testcases, - target_label: Some(test_target.target.as_proto()), - }); - let start = TestRunStart { - suite: test_suite.clone(), - }; - self.events - .span_async(start, async move { - let result = command.await; - let end = TestRunEnd { - suite: test_suite, - command_report: Some( - result - .report - .to_command_execution_proto(true, true, false) - .await, - ), - }; - (result, end) - }) - .await - } - }; + .. + } = command_exec_result; let outputs = outputs - .into_keys() - .filter_map(|output| Some(output.into_test_path()?.0)) + .into_iter() + .filter_map(|(output, artifact)| Some((output.into_test_path()?.0, artifact))) .collect(); let std_streams = std_streams @@ -656,59 +1040,62 @@ impl<'b> BuckTestOrchestrator<'b> { let stderr = ExecutionStream::Inline(std_streams.stderr); Ok(match status { - CommandExecutionStatus::Success { execution_kind } => ( + CommandExecutionStatus::Success { execution_kind } => ExecuteData { stdout, stderr, - ExecutionStatus::Finished { + status: ExecutionStatus::Finished { exitcode: exit_code.unwrap_or(0), }, timing, - Some(execution_kind), + execution_kind: Some(execution_kind), outputs, - ), - CommandExecutionStatus::Failure { execution_kind } => ( + }, + CommandExecutionStatus::Failure { execution_kind } => ExecuteData { stdout, stderr, - ExecutionStatus::Finished { + status: ExecutionStatus::Finished { exitcode: exit_code.unwrap_or(1), }, timing, - Some(execution_kind), + execution_kind: Some(execution_kind), outputs, - ), + }, CommandExecutionStatus::TimedOut { duration, execution_kind, - } => ( + } => ExecuteData { stdout, stderr, - ExecutionStatus::TimedOut { duration }, + status: ExecutionStatus::TimedOut { duration }, timing, - Some(execution_kind), + execution_kind: Some(execution_kind), outputs, - ), - CommandExecutionStatus::Error { stage: _, error } => ( - ExecutionStream::Inline(Default::default()), - ExecutionStream::Inline(format!("{:?}", error).into_bytes()), - ExecutionStatus::Finished { + }, + CommandExecutionStatus::Error { + error, + execution_kind, + .. + } => ExecuteData { + stdout: ExecutionStream::Inline(Default::default()), + stderr: ExecutionStream::Inline(format!("{:?}", error).into_bytes()), + status: ExecutionStatus::Finished { exitcode: exit_code.unwrap_or(1), }, timing, - None, + execution_kind, outputs, - ), + }, CommandExecutionStatus::Cancelled => { return Err(ExecuteError::Cancelled(Cancelled)); } }) } - fn get_command_executor( - &self, - fs: &ArtifactFs, - test_target_node: &ConfiguredTargetNode, - executor_override: Option<&CommandExecutorConfig>, - ) -> anyhow::Result { + fn executor_config_with_remote_cache_override<'a>( + test_target_node: &'a ConfiguredTargetNode, + executor_override: Option<&'a CommandExecutorConfig>, + stage: &TestStage, + ) -> anyhow::Result> { let executor_config = match executor_override { Some(o) => o, None => test_target_node @@ -717,17 +1104,57 @@ impl<'b> BuckTestOrchestrator<'b> { .context("Error accessing executor config")?, }; + if let TestStage::Listing(_) = &stage { + return Ok(Cow::Borrowed(executor_config)); + } + + match &executor_config.executor { + Executor::RemoteEnabled(options) if options.remote_cache_enabled => { + let mut exec_options = options.clone(); + exec_options.remote_cache_enabled = false; + let executor_config = CommandExecutorConfig { + executor: Executor::RemoteEnabled(exec_options), + options: executor_config.options.dupe(), + }; + Ok(Cow::Owned(executor_config)) + } + Executor::Local(_) | Executor::RemoteEnabled(_) => Ok(Cow::Borrowed(executor_config)), + } + } + + async fn get_command_executor( + dice: &mut DiceComputations<'_>, + fs: &ArtifactFs, + test_target_node: &ConfiguredTargetNode, + executor_override: Option<&CommandExecutorConfig>, + stage: &TestStage, + ) -> anyhow::Result { + let executor_config = &Self::executor_config_with_remote_cache_override( + test_target_node, + executor_override, + &stage, + )?; + let CommandExecutorResponse { executor, platform, - cache_checker: _, - cache_uploader: _, - } = self.dice.get_command_executor(fs, executor_config)?; + cache_checker, + cache_uploader, + } = dice.get_command_executor_from_dice(executor_config).await?; + + // Caching is enabled only for listings + let (cache_uploader, cache_checker) = match stage { + TestStage::Listing(_) => (cache_uploader, cache_checker), + TestStage::Testing { .. } => ( + Arc::new(NoOpCacheUploader {}) as _, + Arc::new(NoOpCommandOptionalExecutor {}) as _, + ), + }; + let executor = CommandExecutor::new( executor, - // Caching is not enabled for tests yet. Use the NoOp - Arc::new(NoOpCommandOptionalExecutor {}), - Arc::new(NoOpCacheUploader {}), + cache_checker, + cache_uploader, fs.clone(), executor_config.options, platform, @@ -735,7 +1162,10 @@ impl<'b> BuckTestOrchestrator<'b> { Ok(executor) } - fn get_local_executor(&self, fs: &ArtifactFs) -> anyhow::Result { + async fn get_local_executor( + dice: &mut DiceComputations<'_>, + fs: &ArtifactFs, + ) -> anyhow::Result { let executor_config = CommandExecutorConfig { executor: Executor::Local(LocalExecutorOptions::default()), options: CommandGenerationOptions { @@ -748,7 +1178,9 @@ impl<'b> BuckTestOrchestrator<'b> { platform, cache_checker: _, cache_uploader: _, - } = self.dice.get_command_executor(fs, &executor_config)?; + } = dice + .get_command_executor_from_dice(&executor_config) + .await?; let executor = CommandExecutor::new( executor, Arc::new(NoOpCommandOptionalExecutor {}), @@ -761,11 +1193,10 @@ impl<'b> BuckTestOrchestrator<'b> { } async fn get_test_info( - &self, + dice: &mut DiceComputations<'_>, test_target: &ConfiguredProvidersLabel, ) -> anyhow::Result> { - let providers = self - .dice + let providers = dice .get_providers(test_target) .await? .require_compatible()?; @@ -777,21 +1208,21 @@ impl<'b> BuckTestOrchestrator<'b> { } async fn get_test_executor( - &self, + dice: &mut DiceComputations<'_>, test_target: &ConfiguredProvidersLabel, test_info: &FrozenExternalRunnerTestInfo, - executor_override: Option, + executor_override: Option>, fs: &ArtifactFs, + stage: &TestStage, ) -> anyhow::Result { // NOTE: get_providers() implicitly calls this already but it's not the end of the world // since this will get cached in DICE. - let node = self - .dice + let node = dice .get_configured_target_node(test_target.target()) .await? .require_compatible()?; - let resolved_executor_override = match executor_override.as_ref() { + let resolved_executor_override = match executor_override { Some(executor_override) => Some( &test_info .executor_override(&executor_override.name) @@ -807,29 +1238,29 @@ impl<'b> BuckTestOrchestrator<'b> { None => test_info.default_executor().map(|o| &o.0), }; - self.get_command_executor( + Self::get_command_executor( + dice, fs, &node, resolved_executor_override.as_ref().map(|a| &***a), + stage, ) + .await .context("Error constructing CommandExecutor") } - async fn expand_test_executable( - &self, + async fn expand_test_executable<'a>( + dice: &mut DiceComputations<'_>, test_target: &ConfiguredProvidersLabel, test_info: &FrozenExternalRunnerTestInfo, - cmd: Vec, - env: SortedVectorMap, - pre_create_dirs: Vec, + cmd: Cow<'a, Vec>, + env: Cow<'a, SortedVectorMap>, + pre_create_dirs: Cow<'a, Vec>, executor_fs: &ExecutorFs<'_>, + prefix: TestExecutionPrefix, + opts: TestSessionOptions, ) -> anyhow::Result { - let output_root = self - .session - .prefix() - .join(ForwardRelativePathBuf::unchecked_new( - Uuid::new_v4().to_string(), - )); + let output_root = resolve_output_root(dice, test_target, prefix).await?; let mut declared_outputs = IndexMap::::new(); @@ -839,14 +1270,12 @@ impl<'b> BuckTestOrchestrator<'b> { let expanded; { - let opts = self.session.options(); - cwd = if test_info.run_from_project_root() || opts.force_run_from_project_root { CellRootPathBuf::new(ProjectRelativePathBuf::unchecked_new("".to_owned())) } else { supports_re = false; // For compatibility with v1, - let cell_resolver = self.dice.get_cell_resolver().await?; + let cell_resolver = dice.get_cell_resolver().await?; let cell = cell_resolver.get(test_target.target().pkg().cell_name())?; cell.path().to_buf() }; @@ -870,25 +1299,26 @@ impl<'b> BuckTestOrchestrator<'b> { }?; }; - let (expanded_cmd, expanded_env, inputs) = expanded; + let (expanded_cmd, expanded_env, inputs, expanded_worker) = expanded; - for output in pre_create_dirs { - let test_path = BuckOutTestPath::new(output_root.clone(), output.name); + for output in pre_create_dirs.into_owned() { + let test_path = BuckOutTestPath::new(output_root.clone(), output.name.into()); declared_outputs.insert(test_path, OutputCreationBehavior::Create); } Ok(ExpandedTestExecutable { - cwd: cwd.project_relative_path().to_buf(), + cwd: cwd.as_project_relative_path().to_buf(), cmd: expanded_cmd, env: expanded_env, inputs, declared_outputs, supports_re, + worker: expanded_worker, }) } async fn create_command_execution_request( - &self, + dice: &mut DiceComputations<'_>, cwd: ProjectRelativePathBuf, cmd: Vec, env: SortedVectorMap, @@ -896,9 +1326,10 @@ impl<'b> BuckTestOrchestrator<'b> { declared_outputs: IndexMap, fs: &ArtifactFs, timeout: Option, - host_sharing_requirements: Option, + host_sharing_requirements: Option>, executor_preference: Option, required_local_resources: Vec, + worker: Option, ) -> anyhow::Result { let mut inputs = Vec::with_capacity(cmd_inputs.len()); for input in &cmd_inputs { @@ -906,7 +1337,7 @@ impl<'b> BuckTestOrchestrator<'b> { // hence we don't actually need to spawn these in parallel // TODO (T102328660): Does CommandExecutionRequest need this artifact? inputs.push(CommandExecutionInput::Artifact(Box::new( - self.dice.ensure_artifact_group(input).await?, + dice.ensure_artifact_group(input).await?, ))); } @@ -916,22 +1347,24 @@ impl<'b> BuckTestOrchestrator<'b> { .into_iter() .map(|(path, create)| CommandExecutionOutput::TestPath { path, create }) .collect(); + let digest_config = dice.global_data().get_digest_config(); let mut request = CommandExecutionRequest::new( vec![], cmd, - CommandExecutionPaths::new(inputs, outputs, fs, self.digest_config)?, + CommandExecutionPaths::new(inputs, outputs, fs, digest_config)?, env, ); request = request .with_working_directory(cwd) .with_local_environment_inheritance(EnvironmentInheritance::test_allowlist()) .with_disable_miniperf(true) + .with_worker(worker) .with_required_local_resources(required_local_resources)?; if let Some(timeout) = timeout { request = request.with_timeout(timeout) } if let Some(host_sharing_requirements) = host_sharing_requirements { - request = request.with_host_sharing_requirements(host_sharing_requirements); + request = request.with_host_sharing_requirements(host_sharing_requirements.dupe()); } if let Some(executor_preference) = executor_preference { request = request.with_executor_preference(executor_preference); @@ -940,71 +1373,98 @@ impl<'b> BuckTestOrchestrator<'b> { } async fn setup_local_resources( - &self, + dice: &mut DiceComputations<'_>, + cancellation: &CancellationContext<'_>, setup_contexts: Vec, executor: CommandExecutor, default_timeout: Duration, + liveliness_observer: Arc, ) -> Result, ExecuteError> { - let setup_commands = - futures::future::try_join_all(setup_contexts.into_iter().map(|context| { - self.prepare_local_resource(context, executor.fs(), default_timeout) - })) + if setup_contexts.is_empty() { + return Ok(vec![]); + } + let setup_commands = dice + .try_compute_join(setup_contexts, |dice, context| { + let fs = executor.fs(); + async move { + Self::prepare_local_resource(dice, context, &fs, default_timeout).await + } + .boxed() + }) .await?; - self.require_alive().await?; - - let resource_futs = setup_commands.into_iter().map(|context| { - let local_resource_target = context.target.dupe(); - self.local_resource_state_registry - .0 - .entry(local_resource_target.dupe()) - .or_insert_with(|| { - let setup = Self::start_local_resource( - self.events.dupe(), - self.liveliness_observer.dupe(), - self.digest_config.dupe(), - executor.dupe(), - context, - self.cancellations, - ); - async move { + Self::require_alive(liveliness_observer.dupe()).await?; + let events = dice.per_transaction_data().get_dispatcher().dupe(); + let digest_config = dice.global_data().get_digest_config(); + + // TODO(romanp): The code below is not optimal. We are locking the entire registry here, but we could have better concurrency. + // For example, if different suites require different local resources and can execute in parallel, this code runs sequentially but should run in parallel. + // An easy fix would be to introduce an RwLock instead of a mutex. In this case, suites that have the necessary resources and do not require write access + // can be executed in parallel. + let local_resource_state_registry = dice.get_local_resource_registry(); + let required_targets = setup_commands + .iter() + .map(|ctx| ctx.target.dupe()) + .collect::>(); + let mut lock = local_resource_state_registry.0.lock().await; + + let resource_futs = setup_commands + .into_iter() + .filter(|ctx| !lock.contains_key(&ctx.target)) + .map(|ctx| { + let missing_target = ctx.target.dupe(); + let setup = Self::start_local_resource( + events.dupe(), + liveliness_observer.dupe(), + digest_config.dupe(), + executor.dupe(), + ctx, + cancellation, + ); + async move { + ( + missing_target.dupe(), setup .await .with_context(|| { format!( "Error setting up local resource declared in `{}`", - local_resource_target + missing_target ) }) - .shared_error() - } - .boxed() - .shared() - }) - .clone() - }); + .map_err(buck2_error::Error::from), + ) + } + }); + for (target, result) in futures::future::join_all(resource_futs).await { + lock.insert(target, result); + } - Ok(futures::future::try_join_all(resource_futs) - .await - .map_err(anyhow::Error::from)?) + let result: buck2_error::Result> = required_targets + .iter() + .map(|t| lock.get(t).unwrap().clone()) + .collect(); + Ok(result.map_err(anyhow::Error::from)?) } async fn prepare_local_resource( - &self, + dice: &mut DiceComputations<'_>, context: LocalResourceSetupContext, fs: &ArtifactFs, default_timeout: Duration, ) -> anyhow::Result { - let futs = context - .input_artifacts - .iter() - .map(|group| self.dice.ensure_artifact_group(group)); - let inputs = futures::future::try_join_all(futs).await?; + let digest_config = dice.global_data().get_digest_config(); + + let inputs = dice + .try_compute_join(context.input_artifacts, |dice, group| { + async move { dice.ensure_artifact_group(&group).await }.boxed() + }) + .await?; let inputs = inputs .into_iter() .map(|group_values| CommandExecutionInput::Artifact(Box::new(group_values))) .collect(); - let paths = CommandExecutionPaths::new(inputs, indexset![], fs, self.digest_config)?; + let paths = CommandExecutionPaths::new(inputs, indexset![], fs, digest_config)?; let mut execution_request = CommandExecutionRequest::new(vec![], context.cmd, paths, Default::default()); execution_request = @@ -1022,8 +1482,8 @@ impl<'b> BuckTestOrchestrator<'b> { digest_config: DigestConfig, executor: CommandExecutor, context: PreparedLocalResourceSetupContext, - cancellations: &'b CancellationContext<'b>, - ) -> SharedResult { + cancellation: &CancellationContext<'_>, + ) -> buck2_error::Result { let manager = CommandExecutionManager::new( Box::new(MutexClaimManager::new()), events.dupe(), @@ -1040,9 +1500,11 @@ impl<'b> BuckTestOrchestrator<'b> { prepared_action: &prepared_action, digest_config, }; - let command = executor.exec_cmd(manager, &prepared_command, cancellations); + let command = executor.exec_cmd(manager, &prepared_command, cancellation); - let start = SetupLocalResourcesStart {}; + let start = SetupLocalResourcesStart { + target_label: Some(context.target.as_proto()), + }; let end = SetupLocalResourcesEnd {}; let execution_result = events .span_async(start, async move { (command.await, end) }) @@ -1058,12 +1520,7 @@ impl<'b> BuckTestOrchestrator<'b> { timing: _, .. }, - rejected_execution: _, - did_cache_upload: _, - did_dep_file_cache_upload: _, - dep_file_key: _, - eligible_for_full_hybrid: _, - dep_file_metadata: _, + .. } = execution_result; let std_streams = std_streams @@ -1074,28 +1531,26 @@ impl<'b> BuckTestOrchestrator<'b> { match status { CommandExecutionStatus::Success { .. } => {} CommandExecutionStatus::Failure { .. } => { - return Err(SharedError::new(anyhow::anyhow!( + return Err(anyhow::anyhow!( "Local resource setup command failed with `{}` exit code, stdout:\n{}\nstderr:\n{}\n", exit_code.unwrap_or(1), String::from_utf8_lossy(&std_streams.stdout), String::from_utf8_lossy(&std_streams.stderr), - ))); + ).into()); } CommandExecutionStatus::TimedOut { duration, .. } => { - return Err(SharedError::new(anyhow::anyhow!( + return Err(anyhow::anyhow!( "Local resource setup command timed out after `{}s`, stdout:\n{}\nstderr:\n{}\n", duration.as_secs(), String::from_utf8_lossy(&std_streams.stdout), String::from_utf8_lossy(&std_streams.stderr), - ))); + ).into()); } - CommandExecutionStatus::Error { stage: _, error } => { - return Err(SharedError::new(error)); + CommandExecutionStatus::Error { error, .. } => { + return Err(error.into()); } CommandExecutionStatus::Cancelled => { - return Err(SharedError::new(anyhow::anyhow!( - "Local resource setup command cancelled" - ))); + return Err(anyhow::anyhow!("Local resource setup command cancelled").into()); } }; @@ -1123,8 +1578,8 @@ struct Execute2RequestExpander<'a> { output_root: &'a ForwardRelativePath, declared_outputs: &'a mut IndexMap, fs: &'a ExecutorFs<'a>, - cmd: Vec, - env: SortedVectorMap, + cmd: Cow<'a, Vec>, + env: Cow<'a, SortedVectorMap>, } impl<'a> Execute2RequestExpander<'a> { @@ -1135,12 +1590,20 @@ impl<'a> Execute2RequestExpander<'a> { Vec, SortedVectorMap, IndexSet, + Option, )> where B: CommandLineContextExt<'a>, { - let cli_args_for_interpolation = self - .test_info + let Execute2RequestExpander { + test_info, + output_root, + declared_outputs, + fs, + cmd, + env, + } = self; + let cli_args_for_interpolation = test_info .command() .filter_map(|c| match c { TestCommandMember::Literal(..) => None, @@ -1148,7 +1611,7 @@ impl<'a> Execute2RequestExpander<'a> { }) .collect::>(); - let env_for_interpolation = self.test_info.env().collect::>(); + let env_for_interpolation = test_info.env().collect::>(); let expand_arg_value = |cli: &mut dyn CommandLineBuilder, ctx: &mut dyn CommandLineContext, @@ -1164,7 +1627,7 @@ impl<'a> Execute2RequestExpander<'a> { match content { ArgValueContent::ExternalRunnerSpecValue(ExternalRunnerSpecValue::Verbatim(v)) => { - v.add_to_command_line(&mut cli, ctx)?; + v.as_str().add_to_command_line(&mut cli, ctx)?; } ArgValueContent::ExternalRunnerSpecValue(ExternalRunnerSpecValue::ArgHandle(h)) => { let arg = cli_args_for_interpolation @@ -1182,12 +1645,9 @@ impl<'a> Execute2RequestExpander<'a> { arg.add_to_command_line(&mut cli, ctx)?; } ArgValueContent::DeclaredOutput(output) => { - let test_path = BuckOutTestPath::new(self.output_root.to_owned(), output.name); - let path = self - .fs - .fs() - .buck_out_path_resolver() - .resolve_test(&test_path); + let test_path = + BuckOutTestPath::new(output_root.to_owned(), output.name.into()); + let path = fs.fs().buck_out_path_resolver().resolve_test(&test_path); let path = ctx.resolve_project_path(path)?.into_string(); cli.push_arg(path); declared_outputs.insert(test_path, OutputCreationBehavior::Parent); @@ -1201,39 +1661,73 @@ impl<'a> Execute2RequestExpander<'a> { let mut expanded_cmd = Vec::::new(); let mut ctx = B::new(self.fs); - for var in self.cmd { + for var in cmd.into_owned() { expand_arg_value( &mut expanded_cmd, &mut ctx, &mut artifact_visitor, - self.declared_outputs, + declared_outputs, var, )?; } - let expanded_env = self - .env + let expanded_env = env + .into_owned() .into_iter() .map(|(k, v)| { - let mut env = String::new(); - let mut ctx = B::new(self.fs); + let mut curr_env = String::new(); + let mut ctx = B::new(fs); expand_arg_value( - &mut SpaceSeparatedCommandLineBuilder::wrap_string(&mut env), + &mut SpaceSeparatedCommandLineBuilder::wrap_string(&mut curr_env), &mut ctx, &mut artifact_visitor, - self.declared_outputs, + declared_outputs, v, )?; - anyhow::Ok((k, env)) + anyhow::Ok((k, curr_env)) }) .collect::, _>>()?; + let expanded_worker = match test_info.worker() { + Some(worker) => { + let mut worker_rendered = Vec::::new(); + let worker_exe = worker.exe_command_line(); + worker_exe.add_to_command_line(&mut worker_rendered, &mut ctx)?; + worker_exe.visit_artifacts(&mut artifact_visitor)?; + Some(WorkerSpec { + exe: worker_rendered, + id: WorkerId(worker.id), + concurrency: worker.concurrency(), + }) + } + _ => None, + }; + let inputs = artifact_visitor.inputs; - Ok((expanded_cmd, expanded_env, inputs)) + Ok((expanded_cmd, expanded_env, inputs, expanded_worker)) } } +async fn resolve_output_root( + dice: &mut DiceComputations<'_>, + test_target: &ConfiguredProvidersLabel, + prefix: TestExecutionPrefix, +) -> Result { + let output_root = match prefix { + TestExecutionPrefix::Listing => { + let resolver = dice.get_buck_out_path().await?; + resolver + .resolve_test_discovery(test_target) + .into_forward_relative_path_buf() + } + TestExecutionPrefix::Testing(prefix) => prefix.join(ForwardRelativePathBuf::unchecked_new( + Uuid::new_v4().to_string(), + )), + }; + Ok(output_root) +} + trait CommandLineContextExt<'a>: CommandLineContext + 'a { fn new(fs: &'a ExecutorFs) -> Self; } @@ -1274,15 +1768,15 @@ struct ExpandedTestExecutable { inputs: IndexSet, supports_re: bool, declared_outputs: IndexMap, + worker: Option, } fn create_prepare_for_local_execution_result( fs: &ArtifactFs, request: CommandExecutionRequest, + local_resource_setup_commands: Vec, ) -> PrepareForLocalExecutionResult { - let relative_cwd = request - .working_directory() - .unwrap_or_else(|| ProjectRelativePath::empty()); + let relative_cwd = request.working_directory(); let cwd = fs.fs().resolve(relative_cwd); let cmd = request.all_args_vec(); @@ -1294,7 +1788,40 @@ fn create_prepare_for_local_execution_result( request.local_environment_inheritance(), ); + let local_resource_setup_commands = local_resource_setup_commands + .into_iter() + .map(|r| local_resource_setup_command_prepared_for_local_execution(fs, r)) + .collect::>(); + PrepareForLocalExecutionResult { + command: LocalExecutionCommand { + cmd, + env: env.into_inner(), + cwd, + }, + local_resource_setup_commands, + } +} + +fn local_resource_setup_command_prepared_for_local_execution( + fs: &ArtifactFs, + resource_setup_command: PreparedLocalResourceSetupContext, +) -> LocalExecutionCommand { + let relative_cwd = resource_setup_command.execution_request.working_directory(); + let cwd = fs.fs().resolve(relative_cwd); + let cmd = resource_setup_command.execution_request.all_args_vec(); + + let mut env = LossyEnvironment::new(); + apply_local_execution_environment( + &mut env, + &cwd, + resource_setup_command.execution_request.env(), + resource_setup_command + .execution_request + .local_environment_inheritance(), + ); + + LocalExecutionCommand { cmd, env: env.into_inner(), cwd, @@ -1374,6 +1901,74 @@ impl CommandExecutionTarget for TestTarget<'_> { } } +/// Checks if test listings cache is enabled. Needed only for safe deployment and will be removed +async fn check_cache_listings_experiment( + dice: &mut DiceComputations<'_>, + test_target: &ConfiguredProvidersLabel, +) -> anyhow::Result { + #[derive( + Clone, + Dupe, + derive_more::Display, + Debug, + Eq, + Hash, + PartialEq, + Allocative + )] + struct CheckCacheListingsConfigKey; + + #[async_trait] + impl Key for CheckCacheListingsConfigKey { + type Value = buck2_error::Result>>>; + + async fn compute( + &self, + mut dice: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let cell_resolver = dice.get_cell_resolver().await?; + let root_cell = cell_resolver.root_cell(); + let alias_resolver = dice.get_cell_alias_resolver(root_cell).await?; + let root_conf = dice.get_legacy_root_config_on_dice().await?; + let patterns: Vec = root_conf + .view(&mut dice) + .parse_list(BuckconfigKeyRef { + section: "buck2", + property: "cache_test_listings", + })? + .unwrap_or_default(); + + let mut result = Vec::new(); + for pattern in patterns { + result.push(ParsedPattern::parse_precise( + pattern.trim(), + root_cell, + &cell_resolver, + &alias_resolver, + )?); + } + Ok(result.into()) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } + } + + let patterns = dice.compute(&CheckCacheListingsConfigKey).await??; + for pattern in patterns.iter() { + if pattern.matches(test_target.target().unconfigured_label()) { + return Ok(true); + } + } + + Ok(false) +} + #[derive(Debug)] struct LocalResourceTarget<'a> { target: &'a ConfiguredTargetLabel, @@ -1416,8 +2011,6 @@ mod tests { use buck2_core::cells::CellResolver; use buck2_core::configuration::data::ConfigurationData; use buck2_core::fs::project::ProjectRootTemp; - use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; - use buck2_events::dispatch::EventDispatcher; use buck2_test_api::data::TestStatus; use dice::testing::DiceBuilder; use dice::UserComputationData; @@ -1456,9 +2049,7 @@ mod tests { NoopLivelinessObserver::create(), sender, EventDispatcher::null(), - DigestConfig::testing_default(), CancellationContext::testing(), - Arc::new(LocalResourceRegistry::new()), ), receiver, )) diff --git a/app/buck2_test/src/remote_storage.rs b/app/buck2_test/src/remote_storage.rs new file mode 100644 index 0000000000000..011d99744dd31 --- /dev/null +++ b/app/buck2_test/src/remote_storage.rs @@ -0,0 +1,122 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use anyhow::Ok; +use buck2_execute::artifact_value::ArtifactValue; +use buck2_execute::digest::CasDigestToReExt; +use buck2_execute::directory::ActionDirectoryEntry; +use buck2_execute::directory::ActionDirectoryMember; +use buck2_execute::directory::ActionSharedDirectory; +use buck2_execute::re::manager::ManagedRemoteExecutionClient; +use buck2_test_api::data::RemoteStorageConfig; +use dupe::Dupe; +use remote_execution::TDigest; + +pub async fn apply_config( + client: ManagedRemoteExecutionClient, + artifact: &ArtifactValue, + config: &RemoteStorageConfig, +) -> anyhow::Result<()> { + match &config.ttl_config { + Some(ttl_config) => { + // Note that deps represent artifacts that symlinks depend on. Currently, test artifact trees + // that contain symlinks cannot be converted into remote objects. Therefore, we do not extend + // the TTL of symlinks. Additionally, it is rare for test outputs to include symlinks, but if they do, + // we are materializing them on disk. + let digests = collect_digests(artifact.entry()); + client + .extend_digest_ttl(digests, ttl_config.ttl, ttl_config.use_case.dupe()) + .await + } + _ => Ok(()), + } +} + +fn collect_digests(directory_entry: &ActionDirectoryEntry) -> Vec { + match directory_entry { + ActionDirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => vec![f.digest.to_re()], + ActionDirectoryEntry::Dir(dir) => { + let mut digests: Vec<_> = dir + .entries() + .into_iter() + .map(|(_, entry)| collect_digests(entry)) + .flatten() + .collect(); + digests.push(dir.fingerprint().to_re()); + digests + } + _ => vec![], // Symlink or ExternalSymlink + } +} + +#[cfg(test)] +mod tests { + use buck2_common::file_ops::FileMetadata; + use buck2_common::file_ops::TrackedFileDigest; + use buck2_core::fs::project_rel_path::ProjectRelativePath; + use buck2_execute::digest_config::DigestConfig; + use buck2_execute::directory::extract_artifact_value; + use buck2_execute::directory::insert_file; + use buck2_execute::directory::ActionDirectoryBuilder; + + use super::*; + + #[test] + fn test_collect_digests_dir() { + let digest_config = DigestConfig::testing_default(); + let mut expected: Vec = vec![]; + let mut builder = ActionDirectoryBuilder::empty(); + + // construct a directory with 3 files, f1, f2, and f3 + for file in &["d1/f1", "d1/f2", "d1/f3"] { + let empty_file = FileMetadata { + digest: TrackedFileDigest::from_content( + file.as_bytes(), + digest_config.cas_digest_config(), + ), + is_executable: false, + }; + expected.push(empty_file.digest.to_re()); + let _unused = insert_file( + &mut builder, + ProjectRelativePath::new(file).unwrap(), + empty_file, + ); + } + + let value = extract_artifact_value( + &builder, + ProjectRelativePath::new("d1").unwrap(), + digest_config, + ) + .expect("Failed to build dir") + .unwrap(); + + let digests = match value.entry() { + ActionDirectoryEntry::Dir(dir) => { + expected.push(dir.fingerprint().to_re()); + collect_digests(&value.entry()) + } + _ => vec![], + }; + assert_eq!(expected, digests); + } + + #[test] + fn test_collect_digests_leaf() { + let digest_config = DigestConfig::testing_default(); + let empty_file = digest_config.empty_file(); + let expected = vec![empty_file.digest.to_re()]; + + let leaf_entry: ActionDirectoryEntry = + ActionDirectoryEntry::Leaf(ActionDirectoryMember::File(empty_file)); + let digests = collect_digests(&leaf_entry); + assert_eq!(expected, digests); + } +} diff --git a/app/buck2_test/src/session.rs b/app/buck2_test/src/session.rs index bade1481236f1..a55186f0f6810 100644 --- a/app/buck2_test/src/session.rs +++ b/app/buck2_test/src/session.rs @@ -7,11 +7,13 @@ * of this source tree. */ +use core::fmt; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; +use std::sync::Arc; +use allocative::Allocative; use anyhow::Context as _; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_test_api::data::ConfiguredTargetHandle; @@ -19,7 +21,7 @@ use chrono::Local; use dashmap::DashMap; use dupe::Dupe; -#[derive(Debug, Clone, Copy, Dupe, Default)] +#[derive(Debug, Clone, Copy, Dupe, Default, Allocative, PartialEq, Hash, Eq)] pub struct TestSessionOptions { /// Whether this session should allow things to run on RE. pub allow_re: bool, @@ -27,6 +29,16 @@ pub struct TestSessionOptions { pub force_run_from_project_root: bool, } +impl fmt::Display for TestSessionOptions { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "allow_re = {}, force_use_project_relative_paths = {}, force_run_from_project_root = {}", + self.allow_re, self.force_use_project_relative_paths, self.force_run_from_project_root + ) + } +} + /// The state of a buck2 test command. pub struct TestSession { /// The next ConfiguredTargetHandle that will be assigned. @@ -37,7 +49,7 @@ pub struct TestSession { /// The prefix to assign to all paths for this test session. This isn't used to provide any /// uniqueness (at least not at this time), but it's helpful to group outputs in a way that /// more-or-less matches a given test session. - prefix: ForwardRelativePathBuf, + prefix: Arc, /// Options overriding the behavior of tests executed in this session. This is primarily /// intended for unstable or debugging features. options: TestSessionOptions, @@ -55,7 +67,7 @@ impl TestSession { Self { next_id: AtomicU64::new(0), labels: DashMap::new(), - prefix, + prefix: Arc::new(prefix), options, } } @@ -64,8 +76,8 @@ impl TestSession { self.options } - pub fn prefix(&self) -> &ForwardRelativePath { - self.prefix.as_ref() + pub fn prefix(&self) -> Arc { + self.prefix.dupe() } /// Insert a new provider and retrieve the matching handle. diff --git a/app/buck2_test/src/tcp/mod.rs b/app/buck2_test/src/tcp.rs similarity index 100% rename from app/buck2_test/src/tcp/mod.rs rename to app/buck2_test/src/tcp.rs diff --git a/app/buck2_test/src/translations.rs b/app/buck2_test/src/translations.rs index ac25d615b0870..5c2e9331030f4 100644 --- a/app/buck2_test/src/translations.rs +++ b/app/buck2_test/src/translations.rs @@ -10,14 +10,20 @@ //! Translation between buck core data and the test spec data types use anyhow::Context; +use buck2_common::file_ops::FileDigest; use buck2_core::cells::CellResolver; use buck2_core::provider::label::ConfiguredProvidersLabel; use buck2_data::ToProtoMessage; +use buck2_execute::artifact_value::ArtifactValue; +use buck2_execute::directory::ActionDirectoryEntry; +use buck2_execute::directory::ActionDirectoryMember; +use buck2_execute::directory::ActionSharedDirectory; use buck2_test_api::data::ConfiguredTarget; +use buck2_test_api::data::RemoteObject; use crate::session::TestSession; -pub fn build_configured_target_handle( +pub(crate) fn build_configured_target_handle( target: ConfiguredProvidersLabel, session: &TestSession, cell_resolver: &CellResolver, @@ -41,7 +47,7 @@ pub fn build_configured_target_handle( }) } -pub fn convert_test_result( +pub(crate) fn convert_test_result( test_result: buck2_test_api::data::TestResult, session: &TestSession, ) -> anyhow::Result { @@ -65,3 +71,48 @@ pub fn convert_test_result( target_label: Some(test_target.target().as_proto()), }) } + +/// Convert a named [`ArtifactValue`] into a test API's [`RemoteObject`]. +/// +/// Note that artifact trees containing symlinks currently can't be converted. +/// Test outputs are unlikely to contain symlinks, and if they do, we'd rather +/// fall back to materializing them on disk. +pub(crate) fn convert_artifact(name: String, artifact: &ArtifactValue) -> Option { + // deps represent artifacts that symlinks depend on. Bail when present. + if artifact.deps().is_some() { + return None; + } + + convert_directory_entry(name, artifact.entry()) +} + +fn convert_digest(digest: &FileDigest) -> buck2_test_api::data::CasDigest { + let hash = format!("{}", digest.raw_digest()); + buck2_test_api::data::CasDigest { + hash, + size_bytes: digest.size() as i64, + } +} + +fn convert_directory_entry( + name: String, + entry: &ActionDirectoryEntry, +) -> Option { + match entry { + ActionDirectoryEntry::Leaf( + ActionDirectoryMember::Symlink(..) | ActionDirectoryMember::ExternalSymlink(..), + ) => None, + ActionDirectoryEntry::Leaf(ActionDirectoryMember::File(f)) => { + Some(RemoteObject::file(name, convert_digest(f.digest.data()))) + } + ActionDirectoryEntry::Dir(dir) => { + let children = dir + .entries() + .into_iter() + .map(|(name, entry)| convert_directory_entry(name.to_string(), entry)) + .collect::>>()?; + let digest = convert_digest(dir.fingerprint().data()); + Some(RemoteObject::dir(name, digest, children)) + } + } +} diff --git a/app/buck2_test/src/unix/mod.rs b/app/buck2_test/src/unix.rs similarity index 100% rename from app/buck2_test/src/unix/mod.rs rename to app/buck2_test/src/unix.rs diff --git a/app/buck2_test_api/BUCK b/app/buck2_test_api/BUCK index d85c0b90203c9..b6134ba1272d1 100644 --- a/app/buck2_test_api/BUCK +++ b/app/buck2_test_api/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -14,6 +13,7 @@ rust_library( deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:prost-types", @@ -22,6 +22,7 @@ rust_library( "fbsource//third-party/rust:tower-layer", "fbsource//third-party/rust:tower-service", "fbsource//third-party/rust:tracing", + "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_core:buck2_core", "//buck2/app/buck2_downward_api:buck2_downward_api", "//buck2/app/buck2_downward_api_proto:buck2_downward_api_proto", diff --git a/app/buck2_test_api/Cargo.toml b/app/buck2_test_api/Cargo.toml index 135f1be39e204..4c26962098d17 100644 --- a/app/buck2_test_api/Cargo.toml +++ b/app/buck2_test_api/Cargo.toml @@ -1,37 +1,35 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_test_api" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] +allocative = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } +derivative = { workspace = true } derive_more = { workspace = true } futures = { workspace = true } +prost-types = { workspace = true } +tokio = { workspace = true } tonic = { workspace = true } tower-layer = { workspace = true } tower-service = { workspace = true } -tokio = { workspace = true } tracing = { workspace = true } -prost-types = { workspace = true } -gazebo = { workspace = true } dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" +gazebo = { workspace = true } host_sharing = { workspace = true } sorted_vector_map = { workspace = true } buck2_core = { workspace = true } +buck2_downward_api = { workspace = true } +buck2_downward_api_proto = { workspace = true } buck2_events = { workspace = true } buck2_grpc = { workspace = true } buck2_test_proto = { workspace = true } -buck2_downward_api = { workspace = true } -buck2_downward_api_proto = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/app/buck2_test_api/src/data.rs b/app/buck2_test_api/src/data.rs new file mode 100644 index 0000000000000..e62d8cbeaefb7 --- /dev/null +++ b/app/buck2_test_api/src/data.rs @@ -0,0 +1,444 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Core data objects used in the protocol + +mod convert; + +use std::collections::HashMap; +use std::fmt; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::time::Duration; +use std::time::SystemTime; + +use allocative::Allocative; +use buck2_core::cells::name::CellName; +use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; +pub use buck2_test_proto::CasDigest; +pub use buck2_test_proto::ExecutionDetails; +use derivative::Derivative; +use derive_more::Display; +use derive_more::From; +use dupe::Dupe; +use host_sharing::HostSharingRequirements; +use sorted_vector_map::SortedVectorMap; + +/// A handle generated by the TestOrchestrator. It can be used by the TestExecutor to access this +/// target. +#[derive(Debug, Copy, Clone, Dupe, From, Hash, Eq, PartialEq)] +pub struct ConfiguredTargetHandle(u64); + +/// The Target of a test rule +#[derive(Debug, Clone, PartialEq)] +pub struct ConfiguredTarget { + pub handle: ConfiguredTargetHandle, + /// Structured data + pub cell: String, + pub package: String, + pub target: String, + pub configuration: String, + pub package_project_relative_path: ForwardRelativePathBuf, +} + +/// Metadata about the execution to display +#[derive(Debug, Clone, PartialEq, Allocative, Hash, Eq)] +pub enum TestStage { + // Listing the test binary to discover tests. The String is the name of the suite at the binary + Listing(String), + // the name of the test(s) that we are running for the suite of a target + Testing { + suite: String, + testcases: Vec, + }, +} + +impl fmt::Display for TestStage { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match &self { + TestStage::Listing(name) => write!(f, "Listing({})", name), + TestStage::Testing { suite, testcases } => { + write!(f, "Testing({}:[{}])", suite, testcases.join(", ")) + } + } + } +} + +#[derive(Clone, PartialEq, Allocative)] +pub enum ExecutionStream { + Inline(Vec), +} + +impl Debug for ExecutionStream { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Self::Inline(d) => { + write!(f, "{}", String::from_utf8_lossy(d)) + } + } + } +} + +#[derive(Clone, Debug, Dupe, PartialEq, Allocative)] +pub enum ExecutionStatus { + Finished { exitcode: i32 }, + TimedOut { duration: Duration }, +} + +/// The result of running a test +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct TestResult { + // the target the test came from + pub target: ConfiguredTargetHandle, + // the name of the test + pub name: String, + // the status of running the test + pub status: TestStatus, + // additional optional messages + pub msg: Option, + // the duration of the test run + // TODO(skcd) should this be optional? why doesn't everything have duration + pub duration: Option, + // the output of the test execution (combining stdout and stderr) + pub details: String, +} + +/// different possible test results +#[derive(PartialEq, Eq, Debug, Clone, Dupe)] +#[allow(non_camel_case_types)] +pub enum TestStatus { + PASS, + FAIL, + SKIP, + OMITTED, + FATAL, + TIMEOUT, + // There is something called unknown, adding it here for now, + // we can change it later on. + UNKNOWN, + // We also have re-runs + RERUN, + LISTING_SUCCESS, + LISTING_FAILED, +} + +/// The set of information about a test rule that is passed to the test executor +#[derive(Clone, Debug, PartialEq)] +pub struct ExternalRunnerSpec { + /// Target the spec belongs to + pub target: ConfiguredTarget, + /// Type of test spec + pub test_type: String, + /// Base command used for further processing. A mix of verbatim arguments and + /// opaque handles for more complex arguments. + pub command: Vec, + /// Environment variables a specified by the rule. A mapping from keys to + /// verbatim values or opaque handles for more complex values. + pub env: HashMap, + /// Labels defined on the rule. + pub labels: Vec, + /// Contacts defined on the rule. + pub contacts: Vec, + /// Oncall for the test + pub oncall: Option, + /// Cell of current working directory for test command. + pub working_dir_cell: CellName, +} + +/// Command line argument or environment variable value +/// +/// It is either a verbatim string, or a reference to a more complex value that's opaque to the +/// test run coordinator. +#[derive(Clone, Debug, PartialEq, Allocative, Hash, Eq)] +pub enum ExternalRunnerSpecValue { + Verbatim(String), + ArgHandle(ArgHandle), + EnvHandle(EnvHandle), +} + +impl std::fmt::Display for ExternalRunnerSpecValue { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Self::Verbatim(s) => write!(f, "Verbatim({})", s), + Self::ArgHandle(h) => write!(f, "ArgHandle({})", h), + Self::EnvHandle(h) => write!(f, "EnvHandle({})", h), + } + } +} + +/// Handle referring to a complex argument defined on the test rule +#[derive(Clone, Debug, Dupe, PartialEq, From, Allocative, Hash, Eq, Display)] +#[display("{}", _0)] +pub struct ArgHandle(pub usize); + +impl TryFrom for ArgHandle { + type Error = anyhow::Error; + + fn try_from(i: i64) -> Result { + Ok(ArgHandle(i.try_into()?)) + } +} + +/// Handle referring to a complex environment value defined on the test rule +#[derive(Clone, Debug, PartialEq, From, Allocative, Hash, Eq, Display)] +#[display("{}", _0)] +pub struct EnvHandle(pub String); + +#[derive(Clone, Debug, PartialEq, Allocative, Hash, Eq, Display)] +#[display("content = {}, format = {}", "content", "format")] +pub struct ArgValue { + pub content: ArgValueContent, + pub format: Option, +} + +#[derive(Clone, Debug, PartialEq, Allocative, Hash, Eq)] +pub enum ArgValueContent { + ExternalRunnerSpecValue(ExternalRunnerSpecValue), + DeclaredOutput(OutputName), +} + +impl fmt::Display for ArgValueContent { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Self::ExternalRunnerSpecValue(v) => write!(f, "ExternalRunnerSpecValue({})", v), + Self::DeclaredOutput(o) => write!(f, "DeclaredOutput({})", o), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, From, Allocative, Display)] +#[display("name = {}", "name")] +pub struct OutputName { + pub name: ForwardRelativePathBuf, +} + +impl OutputName { + pub fn unchecked_new(name: String) -> Self { + Self { + name: ForwardRelativePathBuf::unchecked_new(name), + } + } + + pub fn as_str(&self) -> &str { + self.name.as_str() + } + + pub fn into_string(self) -> String { + self.name.into_string() + } +} + +impl From for ForwardRelativePathBuf { + fn from(value: OutputName) -> Self { + value.name + } +} + +#[derive(Dupe, Clone, PartialEq, Allocative, Hash, Eq)] +pub struct TtlConfig { + /// Specifies a custom TTL for blobs under the output dir. + pub ttl: Duration, + /// Specifies a custom use-case to use for managing blobs' TTL. + pub use_case: RemoteExecutorUseCase, +} + +impl fmt::Display for TtlConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "ttl = {}, use_case = {}", + self.ttl.as_millis(), + self.use_case + ) + } +} + +// Use a custom implementation of Debug because we don't care about interning +// that's happening inside RemoteExecutorUseCase +impl Debug for TtlConfig { + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { + fmt.debug_struct("TtlConfig") + .field("ttl", &self.ttl) + .field("use_case", &self.use_case.as_str()) + .finish() + } +} + +#[derive(Debug, Dupe, Clone, PartialEq, Default, Allocative, Hash, Eq, Display)] +#[display("remote = {}, ttl = {})", "supports_remote", "ttl_config")] +pub struct RemoteStorageConfig { + /// Signals that the output does not have to be materialized. + pub supports_remote: bool, + pub ttl_config: Option, +} + +impl RemoteStorageConfig { + pub fn new(supports_remote: bool) -> Self { + Self { + supports_remote, + ..Default::default() + } + } +} + +#[derive(Debug, Clone, PartialEq, Allocative, Hash, Eq, Display)] +#[display("name = {}, config = {}", "name", "remote_storage_config")] +pub struct DeclaredOutput { + pub name: OutputName, + pub remote_storage_config: RemoteStorageConfig, +} + +impl DeclaredOutput { + pub fn unchecked_new(name: String, remote_storage_config: RemoteStorageConfig) -> Self { + Self { + name: OutputName::unchecked_new(name), + remote_storage_config, + } + } +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq, Allocative, Display)] +#[display("name = {}", "name")] + +pub struct ExecutorConfigOverride { + pub name: String, +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq, Allocative, Display)] +#[display("name = {}", "name")] + +pub struct LocalResourceType { + pub name: String, +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq, Allocative, Display)] +#[display("resources = {}", "resources")] +pub struct RequiredLocalResources { + pub resources: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct ExecuteRequest2 { + pub test_executable: TestExecutable, + pub timeout: Duration, + pub host_sharing_requirements: HostSharingRequirements, + pub executor_override: Option, + pub required_local_resources: RequiredLocalResources, +} + +#[derive(Debug, Clone)] +#[derive(Derivative)] +#[derivative(PartialEq)] +pub struct RemoteFile { + pub digest: CasDigest, + #[derivative(PartialEq = "ignore")] + pub name: String, +} + +/// [`RemoteDir`] carries information about the whole tree. +#[derive(Debug, Clone)] +#[derive(Derivative)] +#[derivative(PartialEq)] +pub struct RemoteDir { + // Digest carries all the necessary information about the contents of the + // directory tree. Enough for equality checks (hence the ignores on the + // other fields). + pub digest: CasDigest, + #[derivative(PartialEq = "ignore")] + pub name: String, + #[derivative(PartialEq = "ignore")] + pub children: Vec, +} + +/// Files and directories stored remotely in CAS. +#[derive(Debug, Clone, PartialEq)] +pub enum RemoteObject { + File(RemoteFile), + Dir(RemoteDir), +} + +impl RemoteObject { + pub fn file(name: String, digest: CasDigest) -> Self { + Self::File(RemoteFile { name, digest }) + } + + pub fn dir(name: String, digest: CasDigest, children: Vec) -> Self { + Self::Dir(RemoteDir { + name, + digest, + children, + }) + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum Output { + LocalPath(AbsNormPathBuf), + RemoteObject(RemoteObject), +} + +#[derive(Clone, Debug, PartialEq)] +pub struct ExecutionResult2 { + pub status: ExecutionStatus, + pub stdout: ExecutionStream, + pub stderr: ExecutionStream, + pub outputs: HashMap, + pub start_time: SystemTime, + pub execution_time: Duration, + /// We don't try to convert this field, mostly because it shares with buck2.data, and that + /// seems to have very little value. We just validate it's sent. + pub execution_details: ExecutionDetails, +} + +#[allow(clippy::large_enum_variant)] +pub enum ExecuteResponse { + /// A result is available. + Result(ExecutionResult2), + + /// The test run is being cancelled. + Cancelled, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct TestExecutable { + pub stage: TestStage, + pub target: ConfiguredTargetHandle, + pub cmd: Vec, + pub env: SortedVectorMap, + pub pre_create_dirs: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct PrepareForLocalExecutionResult { + pub command: LocalExecutionCommand, + pub local_resource_setup_commands: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct LocalExecutionCommand { + pub cmd: Vec, + pub env: SortedVectorMap, + pub cwd: AbsNormPathBuf, +} + +pub mod testing { + use crate::data::ConfiguredTargetHandle; + + pub trait ConfiguredTargetHandleExt { + fn testing_new(id: u64) -> Self; + } + + impl ConfiguredTargetHandleExt for ConfiguredTargetHandle { + fn testing_new(id: u64) -> Self { + Self(id) + } + } +} diff --git a/app/buck2_test_api/src/data/convert.rs b/app/buck2_test_api/src/data/convert.rs index 862647d4e8f0e..14d76ad437bb6 100644 --- a/app/buck2_test_api/src/data/convert.rs +++ b/app/buck2_test_api/src/data/convert.rs @@ -7,16 +7,21 @@ * of this source tree. */ +use std::time::Duration; use std::time::SystemTime; use anyhow::Context as _; use buck2_core::cells::name::CellName; +use buck2_core::execution_types::executor_config::RemoteExecutorUseCase; use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; use gazebo::prelude::*; +use super::LocalExecutionCommand; use super::LocalResourceType; use super::PrepareForLocalExecutionResult; +use super::RemoteStorageConfig; use super::RequiredLocalResources; +use super::TtlConfig; use crate::convert; use crate::data::ArgHandle; use crate::data::ArgValue; @@ -24,7 +29,6 @@ use crate::data::ArgValueContent; use crate::data::ConfiguredTarget; use crate::data::ConfiguredTargetHandle; use crate::data::DeclaredOutput; -use crate::data::DisplayMetadata; use crate::data::EnvHandle; use crate::data::ExecuteRequest2; use crate::data::ExecutionResult2; @@ -34,17 +38,22 @@ use crate::data::ExecutorConfigOverride; use crate::data::ExternalRunnerSpec; use crate::data::ExternalRunnerSpecValue; use crate::data::Output; +use crate::data::OutputName; +use crate::data::RemoteDir; +use crate::data::RemoteFile; +use crate::data::RemoteObject; use crate::data::TestExecutable; use crate::data::TestResult; +use crate::data::TestStage; use crate::data::TestStatus; use crate::protocol::convert::host_sharing_requirements_from_grpc; use crate::protocol::convert::host_sharing_requirements_to_grpc; -impl TryFrom for DisplayMetadata { +impl TryFrom for TestStage { type Error = anyhow::Error; - fn try_from(s: buck2_test_proto::DisplayMetadata) -> Result { - use buck2_test_proto::display_metadata::*; + fn try_from(s: buck2_test_proto::TestStage) -> Result { + use buck2_test_proto::test_stage::*; use buck2_test_proto::Testing; let res = match s.item.context("Missing `item`")? { @@ -56,11 +65,11 @@ impl TryFrom for DisplayMetadata { } } -impl TryInto for DisplayMetadata { +impl TryInto for TestStage { type Error = anyhow::Error; - fn try_into(self) -> Result { - use buck2_test_proto::display_metadata::*; + fn try_into(self) -> Result { + use buck2_test_proto::test_stage::*; use buck2_test_proto::Testing; let item = match self { @@ -68,7 +77,7 @@ impl TryInto for DisplayMetadata { Self::Testing { suite, testcases } => Item::Testing(Testing { suite, testcases }), }; - Ok(buck2_test_proto::DisplayMetadata { item: Some(item) }) + Ok(buck2_test_proto::TestStage { item: Some(item) }) } } @@ -386,10 +395,46 @@ impl TryInto for ExternalRunnerSpecVa } } +impl From for buck2_test_proto::OutputName { + fn from(o: OutputName) -> Self { + Self { + name: o.name.as_str().to_owned(), + } + } +} + +impl TryFrom for OutputName { + type Error = anyhow::Error; + + fn try_from(o: buck2_test_proto::OutputName) -> Result { + let name = ForwardRelativePathBuf::try_from(o.name)?; + Ok(Self { name }) + } +} + +impl From for buck2_test_proto::TtlConfig { + fn from(o: TtlConfig) -> Self { + Self { + ttl_seconds: o.ttl.as_secs() as i64, + use_case: o.use_case.to_string(), + } + } +} + +impl From for TtlConfig { + fn from(o: buck2_test_proto::TtlConfig) -> Self { + let ttl = Duration::from_secs(o.ttl_seconds as u64); + let use_case = RemoteExecutorUseCase::new(o.use_case); + Self { ttl, use_case } + } +} + impl From for buck2_test_proto::DeclaredOutput { fn from(o: DeclaredOutput) -> Self { Self { name: o.name.as_str().to_owned(), + supports_remote: o.remote_storage_config.supports_remote, + ttl_config: o.remote_storage_config.ttl_config.map(Into::into), } } } @@ -398,8 +443,15 @@ impl TryFrom for DeclaredOutput { type Error = anyhow::Error; fn try_from(o: buck2_test_proto::DeclaredOutput) -> Result { - let name = ForwardRelativePathBuf::try_from(o.name)?; - Ok(Self { name }) + let name = ForwardRelativePathBuf::try_from(o.name)?.into(); + let remote_storage_config = RemoteStorageConfig { + supports_remote: o.supports_remote, + ttl_config: o.ttl_config.map(Into::into), + }; + Ok(Self { + name, + remote_storage_config, + }) } } @@ -565,6 +617,58 @@ impl TryInto for ExecuteRequest2 { } } +impl TryInto for RemoteObject { + type Error = anyhow::Error; + + fn try_into(self) -> Result { + match self { + RemoteObject::File(RemoteFile { name, digest }) => { + let node = buck2_test_proto::RemoteFileNode { name }; + Ok(buck2_test_proto::RemoteObject { + digest: Some(digest), + node: Some(buck2_test_proto::remote_object::Node::File(node)), + }) + } + RemoteObject::Dir(RemoteDir { + name, + digest, + children, + }) => { + let children = children + .into_iter() + .map(|child| child.try_into()) + .collect::, _>>()?; + let node = buck2_test_proto::RemoteDirNode { name, children }; + Ok(buck2_test_proto::RemoteObject { + digest: Some(digest), + node: Some(buck2_test_proto::remote_object::Node::Dir(node)), + }) + } + } + } +} + +impl TryFrom for RemoteObject { + type Error = anyhow::Error; + + fn try_from(value: buck2_test_proto::RemoteObject) -> Result { + let digest = value.digest.context("missing digest")?; + match value.node.context("missing node")? { + buck2_test_proto::remote_object::Node::File(file) => { + Ok(RemoteObject::file(file.name, digest)) + } + buck2_test_proto::remote_object::Node::Dir(dir) => { + let children = dir + .children + .into_iter() + .map(|child| child.try_into()) + .collect::, _>>()?; + Ok(RemoteObject::dir(dir.name, digest, children)) + } + } + } +} + impl TryInto for Output { type Error = anyhow::Error; @@ -575,6 +679,7 @@ impl TryInto for Output { Self::LocalPath(value) => { Value::LocalPath(value.to_str().context("Invalid local path")?.to_owned()) } + Self::RemoteObject(value) => Value::RemoteObject(value.try_into()?), }; Ok(buck2_test_proto::Output { value: Some(value) }) @@ -591,6 +696,7 @@ impl TryFrom for Output { Value::LocalPath(value) => { Self::LocalPath(value.try_into().context("Invalid local path value.")?) } + Value::RemoteObject(value) => Self::RemoteObject(value.try_into()?), }) } } @@ -608,7 +714,7 @@ impl TryInto for ExecutionResult2 { .into_iter() .map(|(k, v)| { Ok(buck2_test_proto::OutputEntry { - declared_output: Some(k.try_into().context("Invalid `declared_output`")?), + declared_output: Some(k.into()), output: Some(v.try_into().context("Invalid `output`")?), }) }) @@ -697,13 +803,13 @@ impl TryFrom for TestExecutable { fn try_from(s: buck2_test_proto::TestExecutable) -> Result { let buck2_test_proto::TestExecutable { - ui_prints, + stage, target, cmd, pre_create_dirs, env, } = s; - let ui_prints = ui_prints + let ui_prints = stage .context("Missing `ui_prints`")? .try_into() .context("Invalid `ui_prints`")?; @@ -734,7 +840,7 @@ impl TryFrom for TestExecutable { .context("Invalid `pre_create_dirs`")?; Ok(TestExecutable { - ui_prints, + stage: ui_prints, target, cmd, env, @@ -747,7 +853,7 @@ impl TryInto for TestExecutable { type Error = anyhow::Error; fn try_into(self) -> Result { - let ui_prints = Some(self.ui_prints.try_into().context("Invalid `ui_prints`")?); + let stage = Some(self.stage.try_into().context("Invalid `ui_prints`")?); let target = Some(self.target.try_into().context("Invalid `target`")?); let cmd = self .cmd @@ -769,13 +875,10 @@ impl TryInto for TestExecutable { }) .collect::>()?; - let pre_create_dirs = self - .pre_create_dirs - .into_try_map(|i| i.try_into()) - .context("Invalid `pre_create_dirs`")?; + let pre_create_dirs = self.pre_create_dirs.into_map(|i| i.into()); Ok(buck2_test_proto::TestExecutable { - ui_prints, + stage, target, cmd, pre_create_dirs, @@ -784,44 +887,124 @@ impl TryInto for TestExecutable { } } -impl TryInto for PrepareForLocalExecutionResult { +impl TryInto + for PrepareForLocalExecutionResult +{ type Error = anyhow::Error; - fn try_into(self) -> Result { - let cwd = self.cwd.to_str().context("Invalid cwd path")?.to_owned(); + fn try_into(self) -> Result { + let cwd = self + .command + .cwd + .to_str() + .context("Invalid cwd path")? + .to_owned(); + + Ok(buck2_test_proto::PrepareForLocalExecutionResponse { + result: Some(buck2_test_proto::PrepareForLocalExecutionResult { + cmd: self.command.cmd, + cwd, + env: self + .command + .env + .into_iter() + .map( + |(key, value)| buck2_test_proto::VerbatimEnvironmentVariable { key, value }, + ) + .collect(), + }), + setup_local_resource_commands: self + .local_resource_setup_commands + .into_iter() + .map(|c| { + >::try_into(c) + }) + .collect::, anyhow::Error>>()?, + }) + } +} + +impl TryInto for LocalExecutionCommand { + type Error = anyhow::Error; - Ok(buck2_test_proto::PrepareForLocalExecutionResult { + fn try_into( + self, + ) -> Result { + Ok(buck2_test_proto::SetupLocalResourceLocalExecutionCommand { cmd: self.cmd, - cwd, + cwd: self + .cwd + .to_str() + .context("Invalid cwd path for local resource")? + .to_owned(), env: self .env .into_iter() - .map(|(key, value)| buck2_test_proto::VerbatimEnvironmentVariable { key, value }) + .map(|(k, v)| buck2_test_proto::VerbatimEnvironmentVariable { key: k, value: v }) .collect(), }) } } -impl TryFrom for PrepareForLocalExecutionResult { +impl TryFrom for LocalExecutionCommand { type Error = anyhow::Error; fn try_from(s: buck2_test_proto::PrepareForLocalExecutionResult) -> Result { - let buck2_test_proto::PrepareForLocalExecutionResult { cmd, cwd, env } = s; - let cwd = cwd.try_into().context("Invalid cwd value.")?; + Ok(Self { + cmd: s.cmd, + cwd: s.cwd.try_into().context("Invalid cwd value.")?, + env: s + .env + .into_iter() + .map(|env_var| (env_var.key, env_var.value)) + .collect(), + }) + } +} - let env = env - .into_iter() - .map(|env_var| (env_var.key, env_var.value)) - .collect(); +impl TryFrom for LocalExecutionCommand { + type Error = anyhow::Error; + + fn try_from( + s: buck2_test_proto::SetupLocalResourceLocalExecutionCommand, + ) -> Result { + Ok(Self { + cmd: s.cmd, + cwd: s.cwd.try_into().context("Invalid cwd value.")?, + env: s + .env + .into_iter() + .map(|env_var| (env_var.key, env_var.value)) + .collect(), + }) + } +} + +impl TryFrom + for PrepareForLocalExecutionResult +{ + type Error = anyhow::Error; - Ok(PrepareForLocalExecutionResult { cmd, env, cwd }) + fn try_from( + s: buck2_test_proto::PrepareForLocalExecutionResponse, + ) -> Result { + let result = s.result.context("Missing `result`")?; + Ok(Self { + command: LocalExecutionCommand::try_from(result)?, + local_resource_setup_commands: s + .setup_local_resource_commands + .into_iter() + .map(LocalExecutionCommand::try_from) + .collect::, anyhow::Error>>()?, + }) } } #[cfg(test)] mod tests { use std::fmt::Debug; - use std::time::Duration; use host_sharing::HostSharingRequirements; use sorted_vector_map::sorted_vector_map; @@ -880,11 +1063,12 @@ mod tests { #[test] fn execute_request2_roundtrip() { let declared_output = DeclaredOutput { - name: ForwardRelativePathBuf::unchecked_new("name".to_owned()), + name: OutputName::unchecked_new("name".to_owned()), + remote_storage_config: RemoteStorageConfig::new(true), }; let test_executable = TestExecutable { - ui_prints: DisplayMetadata::Listing("name".to_owned()), + stage: TestStage::Listing("name".to_owned()), target: ConfiguredTargetHandle(42), cmd: vec![ ArgValue { @@ -894,7 +1078,7 @@ mod tests { format: None, }, ArgValue { - content: ArgValueContent::DeclaredOutput(declared_output.clone()), + content: ArgValueContent::DeclaredOutput(declared_output.name.clone()), format: Some("--output={}".to_owned()), }, ], @@ -936,9 +1120,7 @@ mod tests { stdout: ExecutionStream::Inline(vec![97, 115, 109]), stderr: ExecutionStream::Inline(vec![118, 105, 109]), outputs: [( - DeclaredOutput { - name: ForwardRelativePathBuf::unchecked_new("name".to_owned()), - }, + OutputName::unchecked_new("name".to_owned()), Output::LocalPath(String::from(local_path).try_into().expect("valid abs path")), )] .into_iter() @@ -950,8 +1132,7 @@ mod tests { assert_roundtrips::(&result); } - #[test] - fn prepare_for_local_execution_result_roundtrip() { + fn dummy_local_execution_command() -> LocalExecutionCommand { let cmd = vec![ "my_cmd".to_owned(), "--some-arg".to_owned(), @@ -965,10 +1146,18 @@ mod tests { let cwd = String::from(local_path).try_into().expect("valid abs path"); let env = sorted_vector_map! { "some_env".to_owned() => "some_env_val".to_owned() }; - let result = PrepareForLocalExecutionResult { cmd, env, cwd }; + LocalExecutionCommand { cmd, env, cwd } + } + + #[test] + fn prepare_for_local_execution_result_roundtrip() { + let result = PrepareForLocalExecutionResult { + command: dummy_local_execution_command(), + local_resource_setup_commands: vec![dummy_local_execution_command()], + }; assert_roundtrips::< - buck2_test_proto::PrepareForLocalExecutionResult, + buck2_test_proto::PrepareForLocalExecutionResponse, PrepareForLocalExecutionResult, >(&result); } @@ -976,11 +1165,12 @@ mod tests { #[test] fn test_executable_roundtrip() { let declared_output = DeclaredOutput { - name: ForwardRelativePathBuf::unchecked_new("name".to_owned()), + name: OutputName::unchecked_new("name".to_owned()), + remote_storage_config: RemoteStorageConfig::new(false), }; let test_executable = TestExecutable { - ui_prints: DisplayMetadata::Listing("name".to_owned()), + stage: TestStage::Listing("name".to_owned()), target: ConfiguredTargetHandle(42), cmd: vec![ ArgValue { @@ -990,7 +1180,7 @@ mod tests { format: None, }, ArgValue { - content: ArgValueContent::DeclaredOutput(declared_output.clone()), + content: ArgValueContent::DeclaredOutput(declared_output.name.clone()), format: Some("--output={}".to_owned()), }, ], diff --git a/app/buck2_test_api/src/data/mod.rs b/app/buck2_test_api/src/data/mod.rs deleted file mode 100644 index 331f474fde7e6..0000000000000 --- a/app/buck2_test_api/src/data/mod.rs +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Core data objects used in the protocol - -mod convert; - -use std::collections::HashMap; -use std::fmt; -use std::fmt::Debug; -use std::fmt::Formatter; -use std::time::Duration; -use std::time::SystemTime; - -use buck2_core::cells::name::CellName; -use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; -use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; -pub use buck2_test_proto::ExecutionDetails; -use derive_more::From; -use dupe::Dupe; -use host_sharing::HostSharingRequirements; -use sorted_vector_map::SortedVectorMap; - -/// A handle generated by the TestOrchestrator. It can be used by the TestExecutor to access this -/// target. -#[derive(Debug, Copy, Clone, Dupe, From, Hash, Eq, PartialEq)] -pub struct ConfiguredTargetHandle(u64); - -/// The Target of a test rule -#[derive(Debug, Clone, PartialEq)] -pub struct ConfiguredTarget { - pub handle: ConfiguredTargetHandle, - /// Structured data - pub cell: String, - pub package: String, - pub target: String, - pub configuration: String, - pub package_project_relative_path: ForwardRelativePathBuf, -} - -/// Metadata about the execution to display -#[derive(Debug, Clone, PartialEq)] -pub enum DisplayMetadata { - // Listing the test binary to discover tests. The String is the name of the suite at the binary - Listing(String), - // the name of the test(s) that we are running for the suite of a target - Testing { - suite: String, - testcases: Vec, - }, -} - -#[derive(Clone, PartialEq)] -pub enum ExecutionStream { - Inline(Vec), -} - -impl Debug for ExecutionStream { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Self::Inline(d) => { - write!(f, "{}", String::from_utf8_lossy(d)) - } - } - } -} - -#[derive(Clone, Debug, Dupe, PartialEq)] -pub enum ExecutionStatus { - Finished { exitcode: i32 }, - TimedOut { duration: Duration }, -} - -/// The result of running a test -#[derive(PartialEq, Eq, Debug, Clone)] -pub struct TestResult { - // the target the test came from - pub target: ConfiguredTargetHandle, - // the name of the test - pub name: String, - // the status of running the test - pub status: TestStatus, - // additional optional messages - pub msg: Option, - // the duration of the test run - // TODO(skcd) should this be optional? why doesn't everything have duration - pub duration: Option, - // the output of the test execution (combining stdout and stderr) - pub details: String, -} - -/// different possible test results -#[derive(PartialEq, Eq, Debug, Clone, Dupe)] -#[allow(non_camel_case_types)] -pub enum TestStatus { - PASS, - FAIL, - SKIP, - OMITTED, - FATAL, - TIMEOUT, - // There is something called unknown, adding it here for now, - // we can change it later on. - UNKNOWN, - // We also have re-runs - RERUN, - LISTING_SUCCESS, - LISTING_FAILED, -} - -/// The set of information about a test rule that is passed to the test executor -#[derive(Clone, Debug, PartialEq)] -pub struct ExternalRunnerSpec { - /// Target the spec belongs to - pub target: ConfiguredTarget, - /// Type of test spec - pub test_type: String, - /// Base command used for further processing. A mix of verbatim arguments and - /// opaque handles for more complex arguments. - pub command: Vec, - /// Environment variables a specified by the rule. A mapping from keys to - /// verbatim values or opaque handles for more complex values. - pub env: HashMap, - /// Labels defined on the rule. - pub labels: Vec, - /// Contacts defined on the rule. - pub contacts: Vec, - /// Oncall for the test - pub oncall: Option, - /// Cell of current working directory for test command. - pub working_dir_cell: CellName, -} - -/// Command line argument or environment variable value -/// -/// It is either a verbatim string, or a reference to a more complex value that's opaque to the -/// test run coordinator. -#[derive(Clone, Debug, PartialEq)] -pub enum ExternalRunnerSpecValue { - Verbatim(String), - ArgHandle(ArgHandle), - EnvHandle(EnvHandle), -} - -/// Handle referring to a complex argument defined on the test rule -#[derive(Clone, Debug, Dupe, PartialEq, From)] -pub struct ArgHandle(pub usize); - -impl TryFrom for ArgHandle { - type Error = anyhow::Error; - - fn try_from(i: i64) -> Result { - Ok(ArgHandle(i.try_into()?)) - } -} - -/// Handle referring to a complex environment value defined on the test rule -#[derive(Clone, Debug, PartialEq, From)] -pub struct EnvHandle(pub String); - -#[derive(Clone, Debug, PartialEq)] -pub struct ArgValue { - pub content: ArgValueContent, - pub format: Option, -} - -#[derive(Clone, Debug, PartialEq)] -pub enum ArgValueContent { - ExternalRunnerSpecValue(ExternalRunnerSpecValue), - DeclaredOutput(DeclaredOutput), -} - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct DeclaredOutput { - pub name: ForwardRelativePathBuf, -} - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct ExecutorConfigOverride { - pub name: String, -} - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct LocalResourceType { - pub name: String, -} - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct RequiredLocalResources { - pub resources: Vec, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct ExecuteRequest2 { - pub test_executable: TestExecutable, - pub timeout: Duration, - pub host_sharing_requirements: HostSharingRequirements, - pub executor_override: Option, - pub required_local_resources: RequiredLocalResources, -} - -#[derive(Clone, Debug, PartialEq)] -pub enum Output { - LocalPath(AbsNormPathBuf), -} - -#[derive(Clone, Debug, PartialEq)] -pub struct ExecutionResult2 { - pub status: ExecutionStatus, - pub stdout: ExecutionStream, - pub stderr: ExecutionStream, - pub outputs: HashMap, - pub start_time: SystemTime, - pub execution_time: Duration, - /// We don't try to convert this field, mostly because it shares with buck2.data, and that - /// seems to have very little value. We just validate it's sent. - pub execution_details: ExecutionDetails, -} - -#[allow(clippy::large_enum_variant)] -pub enum ExecuteResponse { - /// A result is available. - Result(ExecutionResult2), - - /// The test run is being cancelled. - Cancelled, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct TestExecutable { - pub ui_prints: DisplayMetadata, - pub target: ConfiguredTargetHandle, - pub cmd: Vec, - pub env: SortedVectorMap, - pub pre_create_dirs: Vec, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct PrepareForLocalExecutionResult { - pub cmd: Vec, - pub env: SortedVectorMap, - pub cwd: AbsNormPathBuf, -} - -pub mod testing { - use crate::data::ConfiguredTargetHandle; - - pub trait ConfiguredTargetHandleExt { - fn testing_new(id: u64) -> Self; - } - - impl ConfiguredTargetHandleExt for ConfiguredTargetHandle { - fn testing_new(id: u64) -> Self { - Self(id) - } - } -} diff --git a/app/buck2_test_api/src/grpc/mod.rs b/app/buck2_test_api/src/grpc.rs similarity index 100% rename from app/buck2_test_api/src/grpc/mod.rs rename to app/buck2_test_api/src/grpc.rs diff --git a/app/buck2_test_api/src/grpc/executor.rs b/app/buck2_test_api/src/grpc/executor.rs index 99ba16f0bbca1..e02bd9b8532d7 100644 --- a/app/buck2_test_api/src/grpc/executor.rs +++ b/app/buck2_test_api/src/grpc/executor.rs @@ -16,6 +16,8 @@ use buck2_test_proto::test_executor_client; use buck2_test_proto::test_executor_server; use buck2_test_proto::Empty; use buck2_test_proto::ExternalRunnerSpecRequest; +use buck2_test_proto::UnstableHeapDumpRequest; +use buck2_test_proto::UnstableHeapDumpResponse; use tokio::io::AsyncRead; use tokio::io::AsyncWrite; use tonic::transport::Channel; @@ -59,6 +61,16 @@ impl TestExecutor for TestExecutorClient { self.client.clone().end_of_test_requests(Empty {}).await?; Ok(()) } + + async fn unstable_heap_dump(&self, path: &str) -> anyhow::Result<()> { + self.client + .clone() + .unstable_heap_dump(UnstableHeapDumpRequest { + destination_path: path.into(), + }) + .await?; + Ok(()) + } } pub struct Service { @@ -106,6 +118,20 @@ where }) .await } + + async fn unstable_heap_dump( + &self, + req: tonic::Request, + ) -> Result, tonic::Status> { + to_tonic(async move { + self.inner + .unstable_heap_dump(&req.into_inner().destination_path) + .await + .context("Failed to dispatch unstable_heap_dump")?; + Ok(UnstableHeapDumpResponse {}) + }) + .await + } } pub fn spawn_executor_server(io: I, executor: E) -> ServerHandle diff --git a/app/buck2_test_api/src/grpc/orchestrator.rs b/app/buck2_test_api/src/grpc/orchestrator.rs index 820ebefa83896..9acbc39108cba 100644 --- a/app/buck2_test_api/src/grpc/orchestrator.rs +++ b/app/buck2_test_api/src/grpc/orchestrator.rs @@ -39,6 +39,7 @@ use buck2_test_proto::Testing; use dupe::Dupe; use futures::future::BoxFuture; use futures::future::FutureExt; +use gazebo::prelude::VecExt; use host_sharing::HostSharingRequirements; use sorted_vector_map::SortedVectorMap; use tokio::io::AsyncRead; @@ -50,7 +51,6 @@ use tracing::Level; use crate::data::ArgValue; use crate::data::ConfiguredTargetHandle; use crate::data::DeclaredOutput; -use crate::data::DisplayMetadata; use crate::data::ExecuteRequest2; use crate::data::ExecuteResponse; use crate::data::ExecutorConfigOverride; @@ -58,6 +58,7 @@ use crate::data::PrepareForLocalExecutionResult; use crate::data::RequiredLocalResources; use crate::data::TestExecutable; use crate::data::TestResult; +use crate::data::TestStage; use crate::protocol::TestOrchestrator; /// Test runner client to buck2 test orchestrator. @@ -134,7 +135,7 @@ impl DownwardApi for TestOrchestratorClient { impl TestOrchestratorClient { pub async fn execute2( &self, - ui_prints: DisplayMetadata, + ui_prints: TestStage, target: ConfiguredTargetHandle, cmd: Vec, env: SortedVectorMap, @@ -145,7 +146,7 @@ impl TestOrchestratorClient { required_local_resources: RequiredLocalResources, ) -> anyhow::Result { let test_executable = TestExecutable { - ui_prints, + stage: ui_prints, target, cmd, env, @@ -237,14 +238,15 @@ impl TestOrchestratorClient { pub async fn prepare_for_local_execution( &self, - ui_prints: DisplayMetadata, + stage: TestStage, target: ConfiguredTargetHandle, cmd: Vec, env: SortedVectorMap, pre_create_dirs: Vec, + required_local_resources: RequiredLocalResources, ) -> anyhow::Result { let executable = TestExecutable { - ui_prints, + stage, target, cmd, env, @@ -257,20 +259,15 @@ impl TestOrchestratorClient { let request = buck2_test_proto::PrepareForLocalExecutionRequest { test_executable: Some(executable), + required_local_resources: required_local_resources.resources.into_map(|r| r.into()), }; - let PrepareForLocalExecutionResponse { result } = self - .test_orchestrator_client + self.test_orchestrator_client .clone() .prepare_for_local_execution(request) .await? - .into_inner(); - - let result = result - .context("Missing `result`")? + .into_inner() .try_into() - .context("Invalid `result`")?; - - Ok(result) + .context("Invalid `result`") } pub async fn attach_info_message(&self, message: String) -> anyhow::Result<()> { @@ -308,7 +305,7 @@ where .context("Invalid execute2 request")?; let TestExecutable { - ui_prints, + stage, target, cmd, env, @@ -318,7 +315,7 @@ where let response = self .inner .execute2( - ui_prints, + stage, target, cmd, env, @@ -436,11 +433,16 @@ where request: tonic::Request, ) -> Result, tonic::Status> { to_tonic(async move { - let buck2_test_proto::PrepareForLocalExecutionRequest { test_executable } = - request.into_inner(); + let buck2_test_proto::PrepareForLocalExecutionRequest { + test_executable, + required_local_resources, + } = request.into_inner(); + let resources = RequiredLocalResources { + resources: required_local_resources.into_map(|r| r.into()), + }; let TestExecutable { - ui_prints, + stage, target, cmd, env, @@ -453,15 +455,11 @@ where let result = self .inner - .prepare_for_local_execution(ui_prints, target, cmd, env, pre_create_dirs) + .prepare_for_local_execution(stage, target, cmd, env, pre_create_dirs, resources) .await .context("Prepare for local execution failed")?; - let result = result.try_into().context("Failed to serialize result")?; - - Ok(PrepareForLocalExecutionResponse { - result: Some(result), - }) + result.try_into().context("Failed to serialize result") }) .await } diff --git a/app/buck2_test_api/src/lib.rs b/app/buck2_test_api/src/lib.rs index 555221b37865d..0b819babd7e4f 100644 --- a/app/buck2_test_api/src/lib.rs +++ b/app/buck2_test_api/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! This crate defines the interactions between Buck and the test executor via a test protocol. //! //! # Test Executor Protocol @@ -22,10 +24,6 @@ //! External test executors are expected to implement the trait `TestExecutor`. Test executors //! will be able to interact with Buck via the `TestOrchestrator` trait. -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] - pub mod convert; pub mod data; pub mod grpc; diff --git a/app/buck2_test_api/src/protocol.rs b/app/buck2_test_api/src/protocol.rs new file mode 100644 index 0000000000000..77248f4eaa5a1 --- /dev/null +++ b/app/buck2_test_api/src/protocol.rs @@ -0,0 +1,132 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! The traits that defines the protocol between buck and a test executor. +//! +//! Test executors are expected to implement the trait `TestExecutor`. Buck will need to implement +//! the trait `Buck` for the test executor to interact against. + +pub mod convert; + +use std::time::Duration; + +use dupe::Dupe; +use host_sharing::HostSharingRequirements; +use sorted_vector_map::SortedVectorMap; + +use crate::data::ArgValue; +use crate::data::ConfiguredTargetHandle; +use crate::data::DeclaredOutput; +use crate::data::ExecuteResponse; +use crate::data::ExecutorConfigOverride; +use crate::data::ExternalRunnerSpec; +use crate::data::PrepareForLocalExecutionResult; +use crate::data::RequiredLocalResources; +use crate::data::TestResult; +use crate::data::TestStage; + +/// available to buck to interact with the test executor +#[async_trait::async_trait] +pub trait TestExecutor: Send + Sync { + /// sends an external runner spec to the test executor + async fn external_runner_spec(&self, s: ExternalRunnerSpec) -> anyhow::Result<()>; + + // report that there are no more test specs to send + async fn end_of_test_requests(&self) -> anyhow::Result<()>; + + /// performs a heap dump and saves the dump to a file. + async fn unstable_heap_dump(&self, _: &str) -> anyhow::Result<()> { + Err(anyhow::anyhow!("Unimplemented!")) + } +} + +// available to the test executor to interact with the orchestrator +#[async_trait::async_trait] +pub trait TestOrchestrator: Send + Sync { + /// executes the given command and returns the result of the execution + async fn execute2( + &self, + // information about this execute request for Buck's UX + stage: TestStage, + // the label of the rule being tested + target: ConfiguredTargetHandle, + // the command to run + cmd: Vec, + // environment variables to set at runtime + env: SortedVectorMap, + // timeout for command + timeout: Duration, + // parameters used to effectively share the executor host for this command. + host_sharing_requirements: HostSharingRequirements, + // outputs that need to be pre created as directories + pre_create_dirs: Vec, + // A specific executor to use for this. It must be declared on the underlying + // ExternalRunnerTestInfo to work. + executor_override: Option, + required_local_resources: RequiredLocalResources, + ) -> anyhow::Result; + + /// reports a test is done + async fn report_test_result(&self, r: TestResult) -> anyhow::Result<()>; + + async fn report_tests_discovered( + &self, + target: ConfiguredTargetHandle, + suite: String, + name: Vec, + ) -> anyhow::Result<()>; + + /// report a summary about the current test executor + async fn report_test_session(&self, session_info: String) -> anyhow::Result<()>; + + /// report that all tests are done and provide the exit code that this test executor wants to + /// return for the test command, no more executions + async fn end_of_test_results(&self, exit_code: i32) -> anyhow::Result<()>; + + /// prepare the given test executable to be available for local execution. + /// Return the actual command with all the args, env and cwd to be executed locally. + async fn prepare_for_local_execution( + &self, + stage: TestStage, + target: ConfiguredTargetHandle, + cmd: Vec, + env: SortedVectorMap, + pre_create_dirs: Vec, + required_local_resources: RequiredLocalResources, + ) -> anyhow::Result; + + /// attach a message containing information that the executor wants to be surfaced + /// to the user + async fn attach_info_message(&self, message: String) -> anyhow::Result<()>; +} + +// TODO need to figure out what this is. we can go without it for now +#[derive(Debug, Default, Clone, Dupe)] +pub struct ExecPlatformRefinement; + +/// the template name for external test executor to fill with the test execution location +pub const OUTPUT_DIR: &str = "output_dir"; + +/// the template name for buck to fill with the commands outputs +pub const OUTPUTS_TEMPLATE: &str = "outputs"; + +/// the template name for buck to fill with the run cmd based on the test's run information +/// this should be multi-arity +pub const TEST_RUN_CMD: &str = "test_run_cmd"; + +/// the template name for buck to fill with the run environment based on the test's run information +/// this should be multi-arity +pub const TEST_RUN_ENV_KEYS: &str = "test_run_env_keys"; + +/// the template name for buck to fill with the run environment based on the test's run information +/// this should be multi-arity +pub const TEST_RUN_ENV_VALUES: &str = "test_run_env_values"; + +/// the template name for the external test runner to fill with test filters +pub const TEST_FILTER_TEMPLATE: &str = "test_filter"; diff --git a/app/buck2_test_api/src/protocol/mod.rs b/app/buck2_test_api/src/protocol/mod.rs deleted file mode 100644 index 4d4e2e0c9ef52..0000000000000 --- a/app/buck2_test_api/src/protocol/mod.rs +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! The traits that defines the protocol between buck and a test executor. -//! -//! Test executors are expected to implement the trait `TestExecutor`. Buck will need to implement -//! the trait `Buck` for the test executor to interact against. - -pub mod convert; - -use std::time::Duration; - -use dupe::Dupe; -use host_sharing::HostSharingRequirements; -use sorted_vector_map::SortedVectorMap; - -use crate::data::ArgValue; -use crate::data::ConfiguredTargetHandle; -use crate::data::DeclaredOutput; -use crate::data::DisplayMetadata; -use crate::data::ExecuteResponse; -use crate::data::ExecutorConfigOverride; -use crate::data::ExternalRunnerSpec; -use crate::data::PrepareForLocalExecutionResult; -use crate::data::RequiredLocalResources; -use crate::data::TestResult; - -/// available to buck to interact with the test executor -#[async_trait::async_trait] -pub trait TestExecutor: Send + Sync { - /// sends an external runner spec to the test executor - async fn external_runner_spec(&self, s: ExternalRunnerSpec) -> anyhow::Result<()>; - - // report that there are no more test specs to send - async fn end_of_test_requests(&self) -> anyhow::Result<()>; -} - -// available to the test executor to interact with the orchestrator -#[async_trait::async_trait] -pub trait TestOrchestrator: Send + Sync { - /// executes the given command and returns the result of the execution - async fn execute2( - &self, - // information about this execute request for Buck's UX - ui_prints: DisplayMetadata, - // the label of the rule being tested - target: ConfiguredTargetHandle, - // the command to run - cmd: Vec, - // environment variables to set at runtime - env: SortedVectorMap, - // timeout for command - timeout: Duration, - // parameters used to effectively share the executor host for this command. - host_sharing_requirements: HostSharingRequirements, - // outputs that need to be pre created as directories - pre_create_dirs: Vec, - // A specific executor to use for this. It must be declared on the underlying - // ExternalRunnerTestInfo to work. - executor_override: Option, - required_local_resources: RequiredLocalResources, - ) -> anyhow::Result; - - /// reports a test is done - async fn report_test_result(&self, r: TestResult) -> anyhow::Result<()>; - - async fn report_tests_discovered( - &self, - target: ConfiguredTargetHandle, - suite: String, - name: Vec, - ) -> anyhow::Result<()>; - - /// report a summary about the current test executor - async fn report_test_session(&self, session_info: String) -> anyhow::Result<()>; - - /// report that all tests are done and provide the exit code that this test executor wants to - /// return for the test command, no more executions - async fn end_of_test_results(&self, exit_code: i32) -> anyhow::Result<()>; - - /// prepare the given test executable to be available for local execution. - /// Return the actual command with all the args, env and cwd to be executed locally. - async fn prepare_for_local_execution( - &self, - ui_prints: DisplayMetadata, - target: ConfiguredTargetHandle, - cmd: Vec, - env: SortedVectorMap, - pre_create_dirs: Vec, - ) -> anyhow::Result; - - /// attach a message containing information that the executor wants to be surfaced - /// to the user - async fn attach_info_message(&self, message: String) -> anyhow::Result<()>; -} - -// TODO need to figure out what this is. we can go without it for now -#[derive(Debug, Default, Clone, Dupe)] -pub struct ExecPlatformRefinement; - -/// the template name for external test executor to fill with the test execution location -pub const OUTPUT_DIR: &str = "output_dir"; - -/// the template name for buck to fill with the commands outputs -pub const OUTPUTS_TEMPLATE: &str = "outputs"; - -/// the template name for buck to fill with the run cmd based on the test's run information -/// this should be multi-arity -pub const TEST_RUN_CMD: &str = "test_run_cmd"; - -/// the template name for buck to fill with the run environment based on the test's run information -/// this should be multi-arity -pub const TEST_RUN_ENV_KEYS: &str = "test_run_env_keys"; - -/// the template name for buck to fill with the run environment based on the test's run information -/// this should be multi-arity -pub const TEST_RUN_ENV_VALUES: &str = "test_run_env_values"; - -/// the template name for the external test runner to fill with test filters -pub const TEST_FILTER_TEMPLATE: &str = "test_filter"; diff --git a/app/buck2_test_proto/BUCK b/app/buck2_test_proto/BUCK index d7425e7b2428b..1f719c8abd56a 100644 --- a/app/buck2_test_proto/BUCK +++ b/app/buck2_test_proto/BUCK @@ -1,5 +1,5 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs/lib:oss.bzl", "translate_target") oncall("build_infra") @@ -7,10 +7,11 @@ rust_protobuf_library( name = "buck2_test_proto", srcs = glob(["src/**/*.rs"]), build_env = { - "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location //buck2/app/buck2_data:data.proto)", + "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location {})".format( + translate_target("//buck2/app/buck2_data:data_proto"), + ), }, build_script = "build.rs", - doctests = False, # FIXME protos = ["test.proto"], deps = [ "fbsource//third-party/rust:prost-types", diff --git a/app/buck2_test_proto/Cargo.toml b/app/buck2_test_proto/Cargo.toml index 10e935bb62439..627c827306ace 100644 --- a/app/buck2_test_proto/Cargo.toml +++ b/app/buck2_test_proto/Cargo.toml @@ -2,13 +2,15 @@ name = "buck2_test_proto" edition = "2021" +license = { workspace = true } +repository = { workspace = true } version = "0.1.0" [dependencies] +buck2_data = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } tonic = { workspace = true } -buck2_data = { workspace = true } [build-dependencies] buck2_protoc_dev = { workspace = true } diff --git a/app/buck2_test_proto/src/lib.rs b/app/buck2_test_proto/src/lib.rs index 1376753e71251..049636f39a904 100644 --- a/app/buck2_test_proto/src/lib.rs +++ b/app/buck2_test_proto/src/lib.rs @@ -7,4 +7,6 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + tonic::include_proto!("buck.test"); diff --git a/app/buck2_test_proto/test.proto b/app/buck2_test_proto/test.proto index b92fd65513aaf..b13210fe96e4a 100644 --- a/app/buck2_test_proto/test.proto +++ b/app/buck2_test_proto/test.proto @@ -21,7 +21,7 @@ message Testing { repeated string testcases = 2; } -message DisplayMetadata { +message TestStage { message Listing { string suite = 1; } @@ -179,7 +179,7 @@ message ExternalRunnerSpecValue { message TestExecutable { reserved 4; - DisplayMetadata ui_prints = 1; + TestStage stage = 1; ConfiguredTargetHandle target = 2; repeated ArgValue cmd = 3; repeated DeclaredOutput pre_create_dirs = 5; @@ -205,6 +205,7 @@ message ExecuteRequest2 { message PrepareForLocalExecutionRequest { TestExecutable test_executable = 1; + repeated LocalResourceType required_local_resources = 2; } message ArgValue { @@ -225,22 +226,73 @@ message ArgFormat { message ArgValueContent { oneof value { ExternalRunnerSpecValue spec_value = 1; - DeclaredOutput declared_output = 2; + OutputName declared_output = 2; } } +// This message should be compatible with the prefix of DeclaredOutput for +// backwards compatibility +message OutputName { + string name = 1; + reserved 2; +} + +message TtlConfig { + // Specifies a custom TTL in seconds for blobs in the output. + int64 ttl_seconds = 1; + // Specifies a custom use-case to use for managing the blobs in CAS. + string use_case = 2; +} + message DeclaredOutput { string name = 1; + // Flags that a consumer supports *remote* output, in which case it's not + // necessary to materialize this output. + bool supports_remote = 2; + optional TtlConfig ttl_config = 3; +} + +// Copy of Digest from Remote Execution API: +// https://github.com/facebook/buck2/blob/main/remote_execution/oss/re_grpc_proto/proto/build/bazel/remote/execution/v2/remote_execution.proto#L936-L943 +message CasDigest { + // The hash. In the case of SHA-256, it will always be a lowercase hex string + // exactly 64 characters long. + string hash = 1; + + // The size of the blob, in bytes. + int64 size_bytes = 2; +} + +// TODO(arr): consider using remote_execution.proto later on. The API surface +// there is larger but consolidation may be worth it. +message RemoteFileNode { + string name = 1; +} + +message RemoteDirNode { + string name = 1; + repeated RemoteObject children = 2; +} + +message RemoteObject { + CasDigest digest = 1; + oneof node { + RemoteFileNode file = 2; + RemoteDirNode dir = 3; + } } message Output { + // TODO(arr): replace this oneof with just it's fields when we start uploading + // results of local executions to CAS oneof value { string local_path = 1; + RemoteObject remote_object = 2; } } message OutputEntry { - DeclaredOutput declared_output = 1; + OutputName declared_output = 1; Output output = 2; } @@ -274,6 +326,12 @@ message PrepareForLocalExecutionResult { repeated VerbatimEnvironmentVariable env = 4; } +message SetupLocalResourceLocalExecutionCommand { + repeated string cmd = 1; + string cwd = 2; + repeated VerbatimEnvironmentVariable env = 3; +} + message VerbatimEnvironmentVariable { string key = 1; string value = 2; @@ -281,12 +339,21 @@ message VerbatimEnvironmentVariable { message PrepareForLocalExecutionResponse { PrepareForLocalExecutionResult result = 1; + repeated SetupLocalResourceLocalExecutionCommand + setup_local_resource_commands = 2; } message AttachInfoMessageRequest { string message = 1; } +message UnstableHeapDumpRequest { + // The path to write the heap dump to. If this path is relative, it is made + // absolute relative to the working directory of the daemon. + string destination_path = 1; +} +message UnstableHeapDumpResponse {} + service TestOrchestrator { rpc EndOfTestResults(EndOfTestResultsRequest) returns (Empty); rpc ReportTestResult(ReportTestResultRequest) returns (Empty); @@ -301,4 +368,8 @@ service TestOrchestrator { service TestExecutor { rpc ExternalRunnerSpec(ExternalRunnerSpecRequest) returns (Empty); rpc EndOfTestRequests(Empty) returns (Empty); + // Requests the test server to perform a heap dump and save the dump to a + // file. + rpc Unstable_HeapDump(UnstableHeapDumpRequest) + returns (UnstableHeapDumpResponse); } diff --git a/app/buck2_test_runner/BUCK b/app/buck2_test_runner/BUCK index de28ca2ab443d..0998501f2f7bb 100644 --- a/app/buck2_test_runner/BUCK +++ b/app/buck2_test_runner/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -11,11 +10,11 @@ rust_library( deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:clap-3", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:parking_lot", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_grpc:buck2_grpc", "//buck2/app/buck2_test_api:buck2_test_api", "//buck2/host_sharing:host_sharing", diff --git a/app/buck2_test_runner/Cargo.toml b/app/buck2_test_runner/Cargo.toml index 0a9dc2d32daa9..b360183750240 100644 --- a/app/buck2_test_runner/Cargo.toml +++ b/app/buck2_test_runner/Cargo.toml @@ -1,7 +1,9 @@ [package] +edition = "2021" +license = { workspace = true } name = "buck2_test_runner" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] anyhow = { workspace = true } @@ -9,9 +11,9 @@ async-trait = { workspace = true } clap = { workspace = true } futures = { workspace = true } parking_lot = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true } +buck2_error = { workspace = true } buck2_grpc = { workspace = true } buck2_test_api = { workspace = true } host_sharing = { workspace = true } diff --git a/app/buck2_test_runner/src/config.rs b/app/buck2_test_runner/src/config.rs index cce44edeb780f..802d99cd51604 100644 --- a/app/buck2_test_runner/src/config.rs +++ b/app/buck2_test_runner/src/config.rs @@ -20,21 +20,21 @@ pub struct Config { pub env: Vec, /// Max number of seconds allowed to run a test. - #[clap(long, default_value = "600", parse(try_from_str=try_parse_timeout_from_str))] + #[clap(long, default_value = "600", value_parser = try_parse_timeout_from_str)] pub timeout: Duration, /// Ignored arg included for backwards compatibility. - #[clap(long, hidden = true)] + #[clap(long, hide = true)] buck_test_info: String, /// Passthrough argments to test binary. /// Available as a workaround for when test features are available. - #[clap(long, multiple = true, allow_hyphen_values = true)] + #[clap(long, num_args=1.., allow_hyphen_values = true)] pub test_arg: Vec, } /// Uiltity that can be used to parse Env values from CLI arguments. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct EnvValue { pub name: String, pub value: String, @@ -60,7 +60,7 @@ impl FromStr for EnvValue { } } -#[derive(Debug, thiserror::Error, PartialEq)] +#[derive(Debug, buck2_error::Error, PartialEq)] pub enum EnvValueParseError { #[error("Incorrect syntax for env value. Please use name=value. Input: `{0}`")] IncorrectSyntax(String), diff --git a/app/buck2_test_runner/src/lib.rs b/app/buck2_test_runner/src/lib.rs index e88da68844ad1..c874c424879f3 100644 --- a/app/buck2_test_runner/src/lib.rs +++ b/app/buck2_test_runner/src/lib.rs @@ -7,7 +7,7 @@ * of this source tree. */ -#![feature(async_closure)] +#![feature(error_generic_member_access)] mod config; mod executor; diff --git a/app/buck2_test_runner/src/runner.rs b/app/buck2_test_runner/src/runner.rs index 2eb0d9c2bfb5d..0763fffc21e74 100644 --- a/app/buck2_test_runner/src/runner.rs +++ b/app/buck2_test_runner/src/runner.rs @@ -11,7 +11,6 @@ use anyhow::Context; use buck2_test_api::data::ArgValue; use buck2_test_api::data::ArgValueContent; use buck2_test_api::data::ConfiguredTargetHandle; -use buck2_test_api::data::DisplayMetadata; use buck2_test_api::data::ExecuteResponse; use buck2_test_api::data::ExecutionResult2; use buck2_test_api::data::ExecutionStatus; @@ -19,6 +18,7 @@ use buck2_test_api::data::ExternalRunnerSpec; use buck2_test_api::data::ExternalRunnerSpecValue; use buck2_test_api::data::RequiredLocalResources; use buck2_test_api::data::TestResult; +use buck2_test_api::data::TestStage; use buck2_test_api::data::TestStatus; use buck2_test_api::grpc::TestOrchestratorClient; use clap::Parser; @@ -69,7 +69,7 @@ impl Buck2TestRunner { drop(maybe_receiver); } let run_verdict = receiver - .map(async move |spec| { + .map(|spec| async move { let name = format!( "{}//{}:{}", spec.target.cell, spec.target.package, spec.target.target @@ -101,7 +101,7 @@ impl Buck2TestRunner { // If any individual test failed, consider the entire run to have failed. .fold( RunVerdict::Pass, - async move |mut run_verdict, test_status| { + |mut run_verdict, test_status| async move { if test_status != TestStatus::PASS { run_verdict = RunVerdict::Fail; } @@ -119,7 +119,7 @@ impl Buck2TestRunner { &self, spec: ExternalRunnerSpec, ) -> anyhow::Result { - let display_metadata = DisplayMetadata::Testing { + let stage = TestStage::Testing { suite: spec.target.target, testcases: Vec::new(), }; @@ -175,7 +175,7 @@ impl Buck2TestRunner { self.orchestrator_client .execute2( - display_metadata, + stage, target_handle, command, env, diff --git a/app/buck2_transition/BUCK b/app/buck2_transition/BUCK index 78bf82af627f8..9e110868d57b8 100644 --- a/app/buck2_transition/BUCK +++ b/app/buck2_transition/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -12,19 +11,19 @@ rust_library( "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:either", "fbsource//third-party/rust:itertools", - "fbsource//third-party/rust:thiserror", "//buck2/allocative/allocative:allocative", "//buck2/app/buck2_build_api:buck2_build_api", - "//buck2/app/buck2_common:buck2_common", "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", "//buck2/app/buck2_events:buck2_events", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/app/buck2_interpreter:buck2_interpreter", "//buck2/app/buck2_node:buck2_node", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", "//buck2/starlark-rust/starlark:starlark", "//buck2/starlark-rust/starlark_map:starlark_map", ], diff --git a/app/buck2_transition/Cargo.toml b/app/buck2_transition/Cargo.toml index ca0b79aefd82e..5432df41e42a7 100644 --- a/app/buck2_transition/Cargo.toml +++ b/app/buck2_transition/Cargo.toml @@ -1,27 +1,30 @@ [package] +description = "Implementation of configuration transition" +edition = "2021" +license = { workspace = true } name = "buck2_transition" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Implementation of configuration transition" [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } derive_more = { workspace = true } +either = { workspace = true } itertools = { workspace = true } -thiserror = { workspace = true } allocative = { workspace = true } dice = { workspace = true } dupe = { workspace = true } gazebo = { workspace = true } -more_futures = { workspace = true } starlark = { workspace = true } starlark_map = { workspace = true } +buck2_analysis = { workspace = true } buck2_build_api = { workspace = true } -buck2_common = { workspace = true } buck2_core = { workspace = true } +buck2_error = { workspace = true } buck2_events = { workspace = true } +buck2_futures = { workspace = true } buck2_interpreter = { workspace = true } buck2_node = { workspace = true } diff --git a/app/buck2_transition/src/coerced_attr.rs b/app/buck2_transition/src/coerced_attr.rs deleted file mode 100644 index 41d81fffdd1a2..0000000000000 --- a/app/buck2_transition/src/coerced_attr.rs +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use buck2_node::attrs::coerced_attr::CoercedAttr; -use buck2_node::attrs::display::AttrDisplayWithContextExt; -use starlark::values::dict::Dict; -use starlark::values::list::AllocList; -use starlark::values::tuple::AllocTuple; -use starlark::values::Heap; -use starlark::values::Value; -use starlark_map::small_map::SmallMap; - -#[derive(Debug, thiserror::Error)] -enum CoercedAttrResolveError { - #[error("Attribute cannot be converted to Starlark value: `{0}`")] - AttrCannotBeConvertedToValue(String), -} - -pub(crate) trait CoercedAttrResolveExt { - fn to_value<'v>(&self, heap: &'v Heap) -> anyhow::Result>; -} - -impl CoercedAttrResolveExt for CoercedAttr { - fn to_value<'v>(&self, heap: &'v Heap) -> anyhow::Result> { - match self { - x @ (CoercedAttr::Concat(..) | CoercedAttr::Selector(..)) => { - // It is possible to convert selects back to Starlark objects, - // but there's no need to do it for now (and probably never will). - Err(CoercedAttrResolveError::AttrCannotBeConvertedToValue( - x.as_display_no_ctx().to_string(), - ) - .into()) - } - CoercedAttr::None => Ok(Value::new_none()), - CoercedAttr::Bool(b) => Ok(Value::new_bool(b.0)), - CoercedAttr::Int(i) => Ok(heap.alloc(*i)), - CoercedAttr::String(s) | CoercedAttr::EnumVariant(s) => { - Ok(heap.alloc_str(s).to_value()) - } - CoercedAttr::List(l) => { - let mut v = Vec::with_capacity(l.len()); - for e in l.iter() { - v.push(e.to_value(heap)?); - } - Ok(heap.alloc(AllocList(v))) - } - CoercedAttr::Tuple(l) => { - let mut v = Vec::with_capacity(l.len()); - for e in l.iter() { - v.push(e.to_value(heap)?); - } - Ok(heap.alloc(AllocTuple(v))) - } - CoercedAttr::Dict(d) => { - let mut m = SmallMap::with_capacity(d.len()); - for (k, v) in d.iter() { - m.insert_hashed(k.to_value(heap)?.get_hashed()?, v.to_value(heap)?); - } - Ok(heap.alloc(Dict::new(m))) - } - x => { - // For now this function is used to convert attributes to Starlark values - // for transition rules which access attributes. - // - // For regular deps this function should fail. - // - // For configuration deps, this function should resolve attributes to providers, - // but it is not implemented yet. - Err(CoercedAttrResolveError::AttrCannotBeConvertedToValue( - x.as_display_no_ctx().to_string(), - ) - .into()) - } - } - } -} diff --git a/app/buck2_transition/src/lib.rs b/app/buck2_transition/src/lib.rs index 43adecb1185f0..29e43d6939e2f 100644 --- a/app/buck2_transition/src/lib.rs +++ b/app/buck2_transition/src/lib.rs @@ -7,12 +7,13 @@ * of this source tree. */ +#![feature(error_generic_member_access)] #![feature(try_blocks)] -pub(crate) mod coerced_attr; pub(crate) mod transition; pub fn init_late_bindings() { transition::calculation_apply_transition::init_transition_calculation(); + transition::calculation_fetch_transition::init_transition_attr_provider(); transition::starlark::init_register_transition(); } diff --git a/app/buck2_transition/src/transition/mod.rs b/app/buck2_transition/src/transition.rs similarity index 100% rename from app/buck2_transition/src/transition/mod.rs rename to app/buck2_transition/src/transition.rs diff --git a/app/buck2_transition/src/transition/calculation_apply_transition.rs b/app/buck2_transition/src/transition/calculation_apply_transition.rs index 588e9c53da6f8..f227995c2a085 100644 --- a/app/buck2_transition/src/transition/calculation_apply_transition.rs +++ b/app/buck2_transition/src/transition/calculation_apply_transition.rs @@ -12,49 +12,48 @@ use std::sync::Arc; use allocative::Allocative; use anyhow::Context; use async_trait::async_trait; +use buck2_build_api::actions::query::PackageLabelOption; +use buck2_build_api::actions::query::CONFIGURED_ATTR_TO_VALUE; use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; use buck2_build_api::interpreter::rule_defs::provider::builtin::platform_info::PlatformInfo; use buck2_build_api::interpreter::rule_defs::provider::collection::FrozenProviderCollectionValue; use buck2_build_api::transition::TransitionCalculation; use buck2_build_api::transition::TRANSITION_CALCULATION; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; -use buck2_common::result::ToUnsharedResultExt; use buck2_core::configuration::cfg_diff::cfg_diff; use buck2_core::configuration::data::ConfigurationData; use buck2_core::configuration::transition::applied::TransitionApplied; use buck2_core::configuration::transition::id::TransitionId; -use buck2_core::target::label::TargetLabel; +use buck2_core::provider::label::ProvidersLabel; +use buck2_error::starlark_error::from_starlark; +use buck2_error::AnyhowContextForError; use buck2_events::dispatch::get_dispatcher; +use buck2_futures::cancellation::CancellationContext; use buck2_interpreter::dice::starlark_provider::with_starlark_eval_provider; use buck2_interpreter::print_handler::EventDispatcherPrintHandler; -use buck2_interpreter::starlark_profiler::StarlarkProfilerOrInstrumentation; -use buck2_node::attrs::coerced_attr::CoercedAttr; +use buck2_interpreter::soft_error::Buck2StarlarkSoftErrorHandler; +use buck2_interpreter::starlark_profiler::profiler::StarlarkProfilerOpt; +use buck2_node::attrs::configured_attr::ConfiguredAttr; use buck2_node::attrs::display::AttrDisplayWithContextExt; -use buck2_node::attrs::inspect_options::AttrInspectOptions; -use buck2_node::nodes::unconfigured::TargetNode; use derive_more::Display; use dice::DiceComputations; use dice::Key; use dupe::Dupe; -use gazebo::prelude::*; +use dupe::OptionDupedExt; use itertools::Itertools; -use more_futures::cancellation::CancellationContext; use starlark::environment::Module; use starlark::eval::Evaluator; -use starlark::values::dict::DictOf; +use starlark::values::dict::UnpackDictEntries; use starlark::values::structs::AllocStruct; use starlark::values::UnpackValue; use starlark::values::Value; +use starlark::StarlarkResultExt; use starlark_map::ordered_map::OrderedMap; use starlark_map::sorted_map::SortedMap; -use thiserror::Error; -use crate::coerced_attr::CoercedAttrResolveExt; use crate::transition::calculation_fetch_transition::FetchTransition; use crate::transition::starlark::FrozenTransition; -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ApplyTransitionError { #[error("transition function not marked as `split` must return a `PlatformInfo`")] NonSplitTransitionMustReturnPlatformInfo, @@ -78,7 +77,7 @@ fn call_transition_function<'v>( conf: &ConfigurationData, refs: Value<'v>, attrs: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let mut args = vec![ ( @@ -91,12 +90,16 @@ fn call_transition_function<'v>( if let Some(attrs) = attrs { args.push(("attrs", attrs)); } - let new_platforms = eval.eval_function(transition.implementation.to_value(), &[], &args)?; + let new_platforms = eval + .eval_function(transition.implementation.to_value(), &[], &args) + .map_err(from_starlark)?; if transition.split { - match DictOf::<&str, &PlatformInfo>::unpack_value(new_platforms) { + match UnpackDictEntries::<&str, &PlatformInfo>::unpack_value(new_platforms) + .into_anyhow_result()? + { Some(dict) => { let mut split = OrderedMap::new(); - for (k, v) in dict.to_dict() { + for (k, v) in dict.entries { let prev = split.insert(k.to_owned(), v.to_configuration()?); assert!(prev.is_none()); } @@ -105,25 +108,30 @@ fn call_transition_function<'v>( None => Err(ApplyTransitionError::SplitTransitionMustReturnDict.into()), } } else { - match <&PlatformInfo>::unpack_value(new_platforms) { - Some(platform) => Ok(TransitionApplied::Single(platform.to_configuration()?)), - None => Err(ApplyTransitionError::NonSplitTransitionMustReturnPlatformInfo.into()), + match <&PlatformInfo>::unpack_value_err(new_platforms) { + Ok(platform) => Ok(TransitionApplied::Single(platform.to_configuration()?)), + Err(_) => Err(ApplyTransitionError::NonSplitTransitionMustReturnPlatformInfo.into()), } } } async fn do_apply_transition( - ctx: &DiceComputations, - attrs: Option<&[Option]>, + ctx: &mut DiceComputations<'_>, + attrs: Option<&[Option>]>, conf: &ConfigurationData, transition_id: &TransitionId, -) -> SharedResult { +) -> buck2_error::Result { let transition = ctx.fetch_transition(transition_id).await?; let module = Module::new(); let mut refs = Vec::with_capacity(transition.refs.len()); let mut refs_refs = Vec::new(); for (s, t) in &transition.refs { - let provider_collection_value = ctx.fetch_transition_function_reference(t).await?; + let provider_collection_value = ctx + .fetch_transition_function_reference( + // TODO(T198210718) + &ProvidersLabel::default_for(t.dupe()), + ) + .await?; refs.push(( *s, // This is safe because we store a reference to provider collection in `refs_refs`. @@ -134,13 +142,14 @@ async fn do_apply_transition( let print = EventDispatcherPrintHandler(get_dispatcher()); with_starlark_eval_provider( ctx, - &mut StarlarkProfilerOrInstrumentation::disabled(), + &mut StarlarkProfilerOpt::disabled(), format!("transition:{}", transition_id), move |provider, _| { - let mut eval = provider.make(&module)?; + let (mut eval, _) = provider.make(&module)?; eval.set_print_handler(&print); + eval.set_soft_error_handler(&Buck2StarlarkSoftErrorHandler); let refs = module.heap().alloc(AllocStruct(refs)); - let attrs = match (&transition.attrs, attrs) { + let attrs = match (&transition.attrs_names_starlark, attrs) { (Some(names), Some(values)) => { if names.len() != values.len() { return Err( @@ -150,7 +159,12 @@ async fn do_apply_transition( let mut attrs = Vec::with_capacity(names.len()); for (name, value) in names.iter().zip(values.iter()) { let value = match value { - Some(value) => value.to_value(module.heap()).with_context(|| { + Some(value) => (CONFIGURED_ATTR_TO_VALUE.get()?)( + &value, + PackageLabelOption::TransitionAttr, + module.heap(), + ) + .with_context(|| { format!( "Error converting attribute `{}={}` to Starlark value", name.as_str(), @@ -172,8 +186,7 @@ async fn do_apply_transition( TransitionApplied::Single(new) => { let new_2 = match call_transition_function(&transition, &new, refs, attrs, &mut eval) - .context("applying transition again on transition output") - .shared_error()? + .context("applying transition again on transition output")? { TransitionApplied::Single(new_2) => new_2, TransitionApplied::Split(_) => { @@ -200,29 +213,25 @@ async fn do_apply_transition( }, ) .await - .shared_error() + .map_err(buck2_error::Error::from) } #[async_trait] pub(crate) trait ApplyTransition { /// Resolve `refs` param of transition function. async fn fetch_transition_function_reference( - &self, - target: &TargetLabel, - ) -> SharedResult; + &mut self, + target: &ProvidersLabel, + ) -> buck2_error::Result; } #[async_trait] -impl ApplyTransition for DiceComputations { +impl ApplyTransition for DiceComputations<'_> { async fn fetch_transition_function_reference( - &self, - target: &TargetLabel, - ) -> SharedResult { - Ok(self - .get_configuration_analysis_result(target) - .await? - .providers() - .dupe()) + &mut self, + target: &ProvidersLabel, + ) -> buck2_error::Result { + Ok(self.get_configuration_analysis_result(target).await?.dupe()) } } @@ -236,13 +245,13 @@ pub(crate) fn init_transition_calculation() { impl TransitionCalculation for TransitionCalculationImpl { async fn apply_transition( &self, - ctx: &DiceComputations, - target_node: &TargetNode, + ctx: &mut DiceComputations<'_>, + configured_attrs: &OrderedMap<&str, Arc>, cfg: &ConfigurationData, transition_id: &TransitionId, ) -> anyhow::Result> { #[derive(Debug, Eq, PartialEq, Hash, Clone, Display, Allocative)] - #[display(fmt = "{} ({}){}", transition_id, cfg, "self.fmt_attrs()")] + #[display("{} ({}){}", transition_id, cfg, self.fmt_attrs())] struct TransitionKey { cfg: ConfigurationData, transition_id: TransitionId, @@ -250,7 +259,7 @@ impl TransitionCalculation for TransitionCalculationImpl { /// The attr value index is the index of attribute in transition object. /// Attributes are added here so multiple targets with the equal attributes /// (e.g. the same `java_version = 14`) share the transition computation. - attrs: Option>>, + attrs: Option>>>, } impl TransitionKey { @@ -277,14 +286,14 @@ impl TransitionCalculation for TransitionCalculationImpl { #[async_trait] impl Key for TransitionKey { - type Value = SharedResult>; + type Value = buck2_error::Result>; async fn compute( &self, ctx: &mut DiceComputations, _cancellation: &CancellationContext, ) -> Self::Value { - let v: SharedResult<_> = try { + let v: buck2_error::Result<_> = try { do_apply_transition(ctx, self.attrs.as_deref(), &self.cfg, &self.transition_id) .await? }; @@ -306,12 +315,13 @@ impl TransitionCalculation for TransitionCalculationImpl { let transition = ctx.fetch_transition(transition_id).await?; #[allow(clippy::manual_map)] - let attrs = if let Some(attrs) = &transition.attrs { - Some(attrs.try_map(|attr| { - target_node - .attr(attr, AttrInspectOptions::All) - .map(|o| o.cloned()) - })?) + let attrs = if let Some(attrs) = &transition.attrs_names_starlark { + Some( + attrs + .iter() + .map(|attr| configured_attrs.get(attr.as_str()).duped()) + .collect(), + ) } else { None }; @@ -322,6 +332,6 @@ impl TransitionCalculation for TransitionCalculationImpl { attrs, }; - ctx.compute(&key).await?.unshared_error() + ctx.compute(&key).await?.map_err(anyhow::Error::from) } } diff --git a/app/buck2_transition/src/transition/calculation_fetch_transition.rs b/app/buck2_transition/src/transition/calculation_fetch_transition.rs index 70a74a4c1d0f9..ce0f9f80c0d9c 100644 --- a/app/buck2_transition/src/transition/calculation_fetch_transition.rs +++ b/app/buck2_transition/src/transition/calculation_fetch_transition.rs @@ -7,15 +7,16 @@ * of this source tree. */ +use std::sync::Arc; + use async_trait::async_trait; -use buck2_common::result::SharedError; -use buck2_common::result::SharedResult; -use buck2_common::result::ToSharedResultExt; +use buck2_build_api::transition::TransitionAttrProvider; +use buck2_build_api::transition::TRANSITION_ATTRS_PROVIDER; use buck2_core::configuration::transition::id::TransitionId; use buck2_interpreter::load_module::InterpreterCalculation; use dice::DiceComputations; +use dupe::OptionDupedExt; use starlark::values::OwnedFrozenValueTyped; -use thiserror::Error; use crate::transition::starlark::FrozenTransition; @@ -24,31 +25,51 @@ use crate::transition::starlark::FrozenTransition; pub(crate) trait FetchTransition { /// Fetch transition object by id. async fn fetch_transition( - &self, + &mut self, id: &TransitionId, - ) -> SharedResult>; + ) -> buck2_error::Result>; } -#[derive(Debug, Error)] +#[derive(Debug, buck2_error::Error)] enum FetchTransitionError { #[error("Transition object not found by id {:?}", _0)] NotFound(TransitionId), } #[async_trait] -impl FetchTransition for DiceComputations { +impl FetchTransition for DiceComputations<'_> { async fn fetch_transition( - &self, + &mut self, id: &TransitionId, - ) -> SharedResult> { + ) -> buck2_error::Result> { let module = self.get_loaded_module_from_import_path(&id.path).await?; let transition = module .env() // This is a hashmap lookup, so we are not caching the result in DICE. .get_any_visibility(&id.name) - .map_err(|_| SharedError::new(FetchTransitionError::NotFound(id.clone())))? + .map_err(|_| buck2_error::Error::new(FetchTransitionError::NotFound(id.clone())))? .0; - transition.downcast_anyhow().shared_error() + transition + .downcast_anyhow() + .map_err(buck2_error::Error::from) } } + +struct TransitionGetAttrs; + +#[async_trait] +impl TransitionAttrProvider for TransitionGetAttrs { + async fn transition_attrs( + &self, + ctx: &mut DiceComputations<'_>, + transition_id: &TransitionId, + ) -> anyhow::Result>> { + let transition = ctx.fetch_transition(transition_id).await?; + Ok(transition.attrs_names.as_ref().duped()) + } +} + +pub(crate) fn init_transition_attr_provider() { + TRANSITION_ATTRS_PROVIDER.init(&TransitionGetAttrs); +} diff --git a/app/buck2_transition/src/transition/starlark.rs b/app/buck2_transition/src/transition/starlark.rs index be8f075ca77f7..df96e346db680 100644 --- a/app/buck2_transition/src/transition/starlark.rs +++ b/app/buck2_transition/src/transition/starlark.rs @@ -10,28 +10,41 @@ use std::cell::RefCell; use std::collections::HashSet; use std::sync::Arc; +use std::sync::LazyLock; use allocative::Allocative; +use anyhow::Context; +use buck2_build_api::interpreter::rule_defs::provider::builtin::platform_info::PlatformInfo; use buck2_core::bzl::ImportPath; use buck2_core::configuration::transition::id::TransitionId; -use buck2_core::target::label::TargetLabel; +use buck2_core::target::label::label::TargetLabel; use buck2_interpreter::build_context::starlark_path_from_build_context; -use buck2_interpreter::coerce::COERCE_TARGET_LABEL; -use buck2_interpreter::functions::transition::REGISTER_TRANSITION; +use buck2_interpreter::coerce::COERCE_TARGET_LABEL_FOR_BZL; +use buck2_interpreter::downstream_crate_starlark_defs::REGISTER_BUCK2_TRANSITION_GLOBALS; +use buck2_interpreter::late_binding_ty::TransitionReprLate; use buck2_interpreter::types::transition::TransitionValue; use derive_more::Display; use dupe::Dupe; +use either::Either; use gazebo::prelude::*; -use itertools::Itertools; use starlark::any::ProvidesStaticType; use starlark::collections::SmallMap; use starlark::environment::GlobalsBuilder; use starlark::eval::Evaluator; use starlark::starlark_complex_values; use starlark::starlark_module; -use starlark::values::dict::DictOf; +use starlark::typing::ParamIsRequired; +use starlark::typing::ParamSpec; +use starlark::typing::Ty; +use starlark::util::ArcStr; +use starlark::values::dict::DictType; +use starlark::values::dict::UnpackDictEntries; +use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark::values::starlark_value; -use starlark::values::typing::StarlarkCallable; +use starlark::values::structs::StructRef; +use starlark::values::type_repr::StarlarkTypeRepr; +use starlark::values::typing::StarlarkCallableChecked; +use starlark::values::typing::StarlarkCallableParamSpec; use starlark::values::Demand; use starlark::values::Freeze; use starlark::values::Freezer; @@ -42,23 +55,14 @@ use starlark::values::StarlarkValue; use starlark::values::StringValue; use starlark::values::Trace; use starlark::values::Value; +use starlark::StarlarkResultExt; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum TransitionError { #[error("Transition must be assigned to a variable, e.g. `android_cpus = transition(...)`")] TransitionNotAssigned, #[error("`transition` can only be declared in .bzl files")] OnlyBzl, - #[error( - "`transition` implementation must be def with two parameters: `platform` and `refs`, \ - but it is not a def" - )] - MustBeDefNotDef, - #[error( - "`transition` implementation must be def with parameters: {}, \ - but it is a def with signature `{0}`", - _1.iter().map(|s| format!("`{}`", s)).join(", "))] - MustBeDefWrongSig(String, &'static [&'static str]), #[error("Non-unique list of attrs")] NonUniqueAttrs, } @@ -68,7 +72,7 @@ enum TransitionError { struct TargetLabelTrace(TargetLabel); #[derive(Debug, Display, Trace, ProvidesStaticType, NoSerialize, Allocative)] -#[display(fmt = "transition")] +#[display("transition")] pub(crate) struct Transition<'v> { /// The name of this transition, filled in by `export_as()`. This must be set before this /// object can be used. @@ -85,18 +89,24 @@ pub(crate) struct Transition<'v> { } #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] -#[display(fmt = "transition")] +#[display("transition")] pub(crate) struct FrozenTransition { id: Arc, pub(crate) implementation: FrozenValue, pub(crate) refs: SmallMap, - pub(crate) attrs: Option>, + pub(crate) attrs_names_starlark: Option>, + // the same as `attrs_names_starlark` but String representation + pub(crate) attrs_names: Option>, pub(crate) split: bool, } #[starlark_value(type = "transition")] impl<'v> StarlarkValue<'v> for Transition<'v> { - fn export_as(&self, variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { let mut id = self.id.borrow_mut(); // First export wins if id.is_none() { @@ -140,12 +150,21 @@ impl<'v> Freeze for Transition<'v> { .attrs .map(|a| a.into_try_map(|a| a.freeze(freezer))) .transpose()?; + let attrs_names = attrs.as_ref().map(|attrs| { + Arc::from( + attrs + .iter() + .map(|a| a.as_str().to_owned()) + .collect::>(), + ) + }); let split = self.split; Ok(FrozenTransition { id, implementation, refs, - attrs, + attrs_names_starlark: attrs, + attrs_names, split, }) } @@ -169,21 +188,105 @@ impl TransitionValue for FrozenTransition { } } +struct ParamNameAndType { + name: &'static str, + ty: LazyLock, +} + +static IMPL_PLATFORM_PARAM: ParamNameAndType = ParamNameAndType { + name: "platform", + ty: LazyLock::new(PlatformInfo::starlark_type_repr), +}; +static IMPL_REFS_PARAM: ParamNameAndType = ParamNameAndType { + name: "refs", + ty: LazyLock::new(StructRef::starlark_type_repr), +}; +static IMPL_ATTRS_PARAM: ParamNameAndType = ParamNameAndType { + name: "attrs", + ty: LazyLock::new(StructRef::starlark_type_repr), +}; + +type ImplSingleReturnTy<'v> = PlatformInfo<'v>; +type ImplSplitReturnTy<'v> = DictType>; + +struct TransitionImplParams; + +impl StarlarkCallableParamSpec for TransitionImplParams { + fn params() -> ParamSpec { + ParamSpec::new_named_only([ + ( + ArcStr::new_static(IMPL_PLATFORM_PARAM.name), + ParamIsRequired::Yes, + IMPL_PLATFORM_PARAM.ty.dupe(), + ), + ( + ArcStr::new_static(IMPL_REFS_PARAM.name), + ParamIsRequired::Yes, + IMPL_REFS_PARAM.ty.dupe(), + ), + ( + ArcStr::new_static(IMPL_ATTRS_PARAM.name), + ParamIsRequired::No, + IMPL_ATTRS_PARAM.ty.dupe(), + ), + ]) + .unwrap() + } +} + +// This function is not optimized, but it is called like 10 times during the heavy build. +fn validate_transition_impl(implementation: Value, attrs: bool, split: bool) -> anyhow::Result<()> { + let expected_return_type = match split { + false => ImplSingleReturnTy::starlark_type_repr(), + true => ImplSplitReturnTy::starlark_type_repr(), + }; + + implementation + .check_callable_with( + [], + [ + (IMPL_PLATFORM_PARAM.name, &*IMPL_PLATFORM_PARAM.ty), + (IMPL_REFS_PARAM.name, &*IMPL_REFS_PARAM.ty), + ] + .into_iter() + .chain(match attrs { + true => Some((IMPL_ATTRS_PARAM.name, &*IMPL_ATTRS_PARAM.ty)), + false => None, + }), + None, + None, + &expected_return_type, + ) + .into_anyhow_result() + .context("`impl` function signature is incorrect") +} + #[starlark_module] fn register_transition_function(builder: &mut GlobalsBuilder) { fn transition<'v>( - #[starlark(require = named)] r#impl: StarlarkCallable<'v>, - #[starlark(require = named)] refs: DictOf<'v, StringValue<'v>, StringValue<'v>>, - #[starlark(require = named)] attrs: Option>>, + // Note that precise function type is not checked by static or runtime typechecker, + // and exists here only for documentation purposes. + #[starlark(require = named)] r#impl: StarlarkCallableChecked< + 'v, + TransitionImplParams, + Either, + >, + #[starlark(require = named)] refs: UnpackDictEntries, StringValue<'v>>, + #[starlark(require = named)] attrs: Option>>, #[starlark(require = named, default = false)] split: bool, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { let implementation = r#impl.0; let refs = refs - .collect_entries() + .entries .into_iter() - .map(|(n, r)| Ok((n, TargetLabelTrace((COERCE_TARGET_LABEL.get()?)(eval, &r)?)))) + .map(|(n, r)| { + Ok(( + n, + TargetLabelTrace((COERCE_TARGET_LABEL_FOR_BZL.get()?)(eval, &r)?), + )) + }) .collect::>()?; let path: ImportPath = (*starlark_path_from_build_context(eval)? @@ -191,38 +294,27 @@ fn register_transition_function(builder: &mut GlobalsBuilder) { .ok_or(TransitionError::OnlyBzl)?) .clone(); - let parameters_spec = match implementation.parameters_spec() { - Some(parameters_spec) => parameters_spec, - None => return Err(TransitionError::MustBeDefNotDef.into()), - }; - let expected_params: &[&str] = if let Some(attrs) = &attrs { - let attrs_set: HashSet = attrs.iter().copied().collect(); - if attrs_set.len() != attrs.len() { + if let Some(attrs) = &attrs { + let attrs_set: HashSet = attrs.items.iter().copied().collect(); + if attrs_set.len() != attrs.items.len() { return Err(TransitionError::NonUniqueAttrs.into()); } - &["platform", "refs", "attrs"] - } else { - &["platform", "refs"] }; - if !parameters_spec.can_fill_with_args(0, expected_params) { - return Err(TransitionError::MustBeDefWrongSig( - parameters_spec.parameters_str(), - expected_params, - ) - .into()); - } + + validate_transition_impl(implementation, attrs.is_some(), split)?; Ok(Transition { id: RefCell::new(None), path, implementation, refs, - attrs, + attrs: attrs.map(|a| a.items), split, }) } } pub(crate) fn init_register_transition() { - REGISTER_TRANSITION.init(register_transition_function); + REGISTER_BUCK2_TRANSITION_GLOBALS.init(register_transition_function); + TransitionReprLate::init(Transition::starlark_type_repr()); } diff --git a/app/buck2_util/BUCK b/app/buck2_util/BUCK index becdee87f30a3..b45b06ae0078b 100644 --- a/app/buck2_util/BUCK +++ b/app/buck2_util/BUCK @@ -1,14 +1,29 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") +# @oss-disable: _is_oss = False +_is_oss = True # @oss-enable + rust_library( name = "buck2_util", srcs = glob( ["src/**/*.rs"], ), os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:libc", + "fbsource//third-party/rust:perf-event", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:libc", + ], + ), ( "windows", [ @@ -16,15 +31,22 @@ rust_library( ], ), ], + rustc_flags = (select({ + "DEFAULT": [], + "ovr_config//build_mode/constraints:asan": ["--cfg=buck2_asan"], + "ovr_config//build_mode/constraints:asan-ubsan": ["--cfg=buck2_asan"], + "ovr_config//build_mode/constraints:asan-ubsan-dev": ["--cfg=buck2_asan"], + }) if not _is_oss else []), test_deps = [ + "fbcode//buck2/shed/three_billion_instructions:three_billion_instructions", "fbsource//third-party/rust:serde_json", ], deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:libc", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:static_assertions", + "fbsource//third-party/rust:sysinfo", "fbsource//third-party/rust:tokio", "fbsource//third-party/rust:tracing", "fbsource//third-party/rust:triomphe", diff --git a/app/buck2_util/Cargo.toml b/app/buck2_util/Cargo.toml index dd9bd1806c963..9bb584d79309b 100644 --- a/app/buck2_util/Cargo.toml +++ b/app/buck2_util/Cargo.toml @@ -1,29 +1,41 @@ [package] -name = "buck2_util" -version = "0.1.0" -edition = "2021" description = """ Unsorted utilities used by buck2. Not specific to buck2, generic enough to be used by other projects. but not generic or stable enough to be published on crates.io as standalone crates. """ +edition = "2021" +license = { workspace = true } +name = "buck2_util" +repository = { workspace = true } +version = "0.1.0" [dependencies] allocative = { workspace = true } anyhow = { workspace = true } dupe = { workspace = true } futures = { workspace = true } -once_cell = { workspace = true } -triomphe = { workspace = true } -tracing = { workspace = true } starlark_map = { workspace = true } +sysinfo = { workspace = true } +tracing = { workspace = true } +triomphe = { workspace = true } -libc = { workspace = true } serde = { workspace = true } static_assertions = { workspace = true } tokio = { workspace = true } +[target.'cfg(target_os = "linux")'.dependencies] +perf-event = { workspace = true } + +[target.'cfg(unix)'.dependencies] +libc = { workspace = true } + [target.'cfg(windows)'.dependencies] winapi = { workspace = true } [dev-dependencies] serde_json = { workspace = true } + +three_billion_instructions = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(debug)", "cfg(buck2_asan)"] } diff --git a/app/buck2_util/src/arc_str.rs b/app/buck2_util/src/arc_str.rs new file mode 100644 index 0000000000000..4841a0583124a --- /dev/null +++ b/app/buck2_util/src/arc_str.rs @@ -0,0 +1,24 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod base; +mod fat; +pub(crate) mod iterator_as_exact_size_iterator; +mod slice; +mod string_like; +mod thin; +mod thin_slice; + +pub use crate::arc_str::fat::ArcStr; +pub use crate::arc_str::slice::ArcSlice; +pub use crate::arc_str::string_like::ArcS; +pub use crate::arc_str::string_like::StringInside; +pub use crate::arc_str::string_like::ThinArcS; +pub use crate::arc_str::thin::ThinArcStr; +pub use crate::arc_str::thin_slice::ThinArcSlice; diff --git a/app/buck2_util/src/arc_str/base.rs b/app/buck2_util/src/arc_str/base.rs index 731f9fb4b1d47..8fe6ed9a23d63 100644 --- a/app/buck2_util/src/arc_str/base.rs +++ b/app/buck2_util/src/arc_str/base.rs @@ -84,7 +84,6 @@ impl ArcStrBaseInner

    { }; fn layout_for_len(len: usize) -> Layout { - #[allow(clippy::let_unit_value)] let () = Self::ASSERT; let size = Self::OFFSET_OF_DATA.checked_add(len).unwrap(); diff --git a/app/buck2_util/src/arc_str/mod.rs b/app/buck2_util/src/arc_str/mod.rs deleted file mode 100644 index f8e767d478ae8..0000000000000 --- a/app/buck2_util/src/arc_str/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod base; -mod fat; -pub(crate) mod iterator_as_exact_size_iterator; -mod slice; -mod string_like; -mod thin; -mod thin_slice; - -pub use crate::arc_str::fat::ArcStr; -pub use crate::arc_str::slice::ArcSlice; -pub use crate::arc_str::string_like::ArcS; -pub use crate::arc_str::string_like::StringInside; -pub use crate::arc_str::thin::ThinArcStr; -pub use crate::arc_str::thin_slice::ThinArcSlice; diff --git a/app/buck2_util/src/arc_str/slice.rs b/app/buck2_util/src/arc_str/slice.rs index dbad1a6e7bd10..06bea2f4f8948 100644 --- a/app/buck2_util/src/arc_str/slice.rs +++ b/app/buck2_util/src/arc_str/slice.rs @@ -7,8 +7,6 @@ * of this source tree. */ -#![allow(clippy::from_iter_instead_of_collect)] - use std::borrow::Borrow; use std::hash::Hash; use std::ops::Deref; @@ -87,6 +85,18 @@ impl Hash for ArcSlice { } } +impl PartialOrd for ArcSlice { + fn partial_cmp(&self, other: &ArcSlice) -> Option { + self[..].partial_cmp(&other[..]) + } +} + +impl Ord for ArcSlice { + fn cmp(&self, other: &ArcSlice) -> std::cmp::Ordering { + self[..].cmp(&other[..]) + } +} + impl Borrow<[T]> for ArcSlice { #[inline] fn borrow(&self) -> &[T] { diff --git a/app/buck2_util/src/arc_str/string_like.rs b/app/buck2_util/src/arc_str/string_like.rs index 3c662e35fb06c..0c7f156f83526 100644 --- a/app/buck2_util/src/arc_str/string_like.rs +++ b/app/buck2_util/src/arc_str/string_like.rs @@ -18,6 +18,7 @@ use dupe::Clone_; use dupe::Dupe_; use crate::arc_str::ArcStr; +use crate::arc_str::ThinArcStr; /// Unsized type which is a string inside. pub trait StringInside { @@ -70,6 +71,13 @@ impl Deref for ArcS { } } +impl AsRef for ArcS { + #[inline] + fn as_ref(&self) -> &S { + self + } +} + impl Borrow for ArcS { #[inline] fn borrow(&self) -> &S { @@ -84,6 +92,70 @@ impl Display for ArcS { } } +#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Clone_, Dupe_, Debug, Allocative)] +#[allocative(bound = "")] +pub struct ThinArcS { + s: ThinArcStr, + _marker: PhantomData<*const S>, +} + +// Copy-paste these two lines from `std::sync::Arc`. +unsafe impl Send for ThinArcS {} +unsafe impl Sync for ThinArcS {} + +impl ThinArcS { + // Cannot implement `TryFrom` trait, something about conflicting implementations. + #[inline] + pub fn try_from<'a>(s: &'a str) -> anyhow::Result> + where + &'a S: TryFrom<&'a str, Error = anyhow::Error>, + S: 'a, + { + let s: &S = TryFrom::try_from(s)?; + Ok(ThinArcS::from(s)) + } +} + +impl<'a, S: StringInside + ?Sized> From<&'a S> for ThinArcS { + #[inline] + fn from(s: &'a S) -> Self { + Self { + s: ThinArcStr::from(S::as_str(s)), + _marker: PhantomData, + } + } +} + +impl Deref for ThinArcS { + type Target = S; + + #[inline] + fn deref(&self) -> &Self::Target { + S::from_str(&self.s) + } +} + +impl AsRef for ThinArcS { + #[inline] + fn as_ref(&self) -> &S { + self + } +} + +impl Borrow for ThinArcS { + #[inline] + fn borrow(&self) -> &S { + self + } +} + +impl Display for ThinArcS { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Display::fmt(&**self, f) + } +} + #[cfg(test)] mod tests { use std::fmt; diff --git a/app/buck2_util/src/arc_str/thin_slice.rs b/app/buck2_util/src/arc_str/thin_slice.rs index 47a7ede2533a0..0ec0c4b185f65 100644 --- a/app/buck2_util/src/arc_str/thin_slice.rs +++ b/app/buck2_util/src/arc_str/thin_slice.rs @@ -109,7 +109,6 @@ impl<'a, T> IntoIterator for &'a ThinArcSlice { } impl FromIterator for ThinArcSlice { - #[allow(clippy::from_iter_instead_of_collect)] fn from_iter>(iter: I) -> Self { let iter = iter.into_iter(); let (lower, upper) = iter.size_hint(); @@ -145,7 +144,6 @@ mod tests { assert_eq!(["a".to_owned(), "b".to_owned()], *slice); } - #[allow(clippy::from_iter_instead_of_collect)] #[test] fn test_from_iter() { // Iterator without size hint. diff --git a/app/buck2_client_ctx/src/cleanup_ctx.rs b/app/buck2_util/src/cleanup_ctx.rs similarity index 100% rename from app/buck2_client_ctx/src/cleanup_ctx.rs rename to app/buck2_util/src/cleanup_ctx.rs diff --git a/app/buck2_util/src/cycle_detector.rs b/app/buck2_util/src/cycle_detector.rs index 8d7509cedabdd..f06296a82a933 100644 --- a/app/buck2_util/src/cycle_detector.rs +++ b/app/buck2_util/src/cycle_detector.rs @@ -466,6 +466,7 @@ mod tests { struct SimpleCycleDescriptor; #[derive(Clone, Debug)] + #[allow(dead_code)] // field `0` is never read struct Error(Arc>); impl std::fmt::Display for Error { diff --git a/app/buck2_util/src/future.rs b/app/buck2_util/src/future.rs new file mode 100644 index 0000000000000..fd911d40cb1ef --- /dev/null +++ b/app/buck2_util/src/future.rs @@ -0,0 +1,100 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::future::Future; +use std::iter; +use std::mem; + +use futures::stream::FuturesUnordered; +use futures::FutureExt; +use futures::StreamExt; + +// patternlint-disable-next-line buck2-no-futures-try-join-all +/// Semantically the same as `futures::future::try_join_all`, but bug free. +/// +/// +pub fn try_join_all(i: I) -> impl Future, E>> +where + F: Future>, + I: IntoIterator, +{ + let mut i = i.into_iter().fuse(); + let f1 = i.next(); + let f2 = i.next(); + let f3 = i.next(); + + let (f1, f2, f3) = match (f1, f2, f3) { + (None, _, _) => { + return futures::future::ready(Ok(Vec::new())) + .left_future() + .left_future(); + } + (Some(f1), None, _) => { + return async move { Ok(vec![f1.await?]) } + .left_future() + .right_future(); + } + (Some(f1), Some(f2), None) => { + return async move { + let (v1, v2) = futures::future::try_join(f1, f2).await?; + Ok(vec![v1, v2]) + } + .right_future() + .left_future(); + } + (Some(f1), Some(f2), Some(f3)) => (f1, f2, f3), + }; + + #[allow(clippy::tuple_array_conversions)] + let mut futs: FuturesUnordered<_> = [f1, f2, f3] + .into_iter() + .chain(i) + .enumerate() + .map(|(i, f)| async move { (i, f.await) }) + .collect(); + + async move { + let mut outputs: Vec> = iter::repeat_with(|| None).take(futs.len()).collect(); + while let Some((i, res)) = futs.next().await { + match res { + Ok(v) => { + let prev = mem::replace(&mut outputs[i], Some(v)); + assert!(prev.is_none()); + } + Err(e) => return Err(e), + } + } + Ok(outputs.into_iter().map(|v| v.unwrap()).collect()) + } + .right_future() + .right_future() +} + +#[cfg(test)] +mod tests { + async fn slow_ok_or_err(ok: bool) -> Result<(), ()> { + if ok { + futures::future::pending().await + } else { + Err(()) + } + } + + /// Test that the futures are eagerly cancelled when the first one fails. This test fails with + /// the `futures` version of the function. + #[tokio::test] + async fn test_returns_eagerly() { + for size in [0, 1, 2, 10, 1000] { + let futs = std::iter::repeat_with(|| slow_ok_or_err(true)) + .take(size) + .chain([slow_ok_or_err(false)]); + super::try_join_all(futs).await.unwrap_err(); + } + } +} diff --git a/app/buck2_util/src/golden_test_helper.rs b/app/buck2_util/src/golden_test_helper.rs new file mode 100644 index 0000000000000..e9ee670830a8a --- /dev/null +++ b/app/buck2_util/src/golden_test_helper.rs @@ -0,0 +1,74 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Used by golden tests written in rust, particularly useful for starlark tests +//! +//! This is mostly a copy from starlark_syntax/src/golden_test_template.rs. +//! Copied so we wouldn't need to depend on starlark_syntax for tests. + +use std::env; +use std::fmt::Write; +use std::fs; + +use anyhow::Context; + +const REGENERATE_VAR_NAME: &str = "BUCK2_RUST_REGENERATE_GOLDEN_TESTS"; + +#[allow(clippy::write_literal)] // We mark generated files as generated, but not this file. +fn make_golden(output: &str) -> String { + let mut golden = String::new(); + writeln!(golden, "# {at}generated", at = "@").unwrap(); + writeln!( + golden, + "# To regenerate, append -- --env {REGENERATE_VAR_NAME}=1 and re-run the test" + ) + .unwrap(); + writeln!(golden).unwrap(); + writeln!(golden, "{}", output.trim_end()).unwrap(); + golden +} + +/// Common code for golden tests. +pub fn golden_test_template(golden_rel_path: &str, output: &str) { + assert!(golden_rel_path.contains(".golden")); + + let manifest_dir = + env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` variable must be set"); + let golden_file_path = format!("{manifest_dir}/{golden_rel_path}"); + let output_with_prefix = make_golden(output); + + if env::var(REGENERATE_VAR_NAME).is_ok() { + fs::write(&golden_file_path, &output_with_prefix) + .with_context(|| format!("Writing `{golden_file_path}`")) + .unwrap(); + } else { + let expected = fs::read_to_string(&golden_file_path) + .with_context(|| format!("Reading `{golden_file_path}`")) + .unwrap(); + + let expected = if cfg!(windows) { + // Git may check out files on Windows with \r\n as line separator. + // We could configure git, but it's more reliable to handle it in the test. + expected.replace("\r\n", "\n") + } else { + expected + }; + assert_eq!(expected, output_with_prefix); + } +} + +/// Duplicate of `starlark::tests::util::trim_rust_backtrace` to avoid exposing test internals. +/// There's no anyhow API to print error without rust backtrace +/// ([issue](https://github.com/dtolnay/anyhow/issues/300)). +pub fn trim_rust_backtrace(error: &str) -> &str { + match error.find("\nStack backtrace:") { + Some(pos) => error[..pos].trim_end(), + None => error.trim_end(), + } +} diff --git a/app/buck2_util/src/hash.rs b/app/buck2_util/src/hash.rs new file mode 100644 index 0000000000000..3927aa6923adf --- /dev/null +++ b/app/buck2_util/src/hash.rs @@ -0,0 +1,85 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Hasher for buck2 code. +//! +//! Default hasher used by Rust `HashMap` is performance killer, +//! so consider using custom hasher. +//! +//! If unsure which hash to use, use this one. + +use std::hash::BuildHasher; +use std::hash::Hasher; + +use dupe::Dupe; +use starlark_map::StarlarkHasher; + +#[derive(Default)] +pub struct BuckHasher(StarlarkHasher); + +impl BuckHasher { + #[inline] + pub fn new() -> Self { + BuckHasher(StarlarkHasher::new()) + } +} + +impl Hasher for BuckHasher { + #[inline] + fn finish(&self) -> u64 { + self.0.finish() + } + + #[inline] + fn write(&mut self, bytes: &[u8]) { + self.0.write(bytes) + } + + #[inline] + fn write_u8(&mut self, i: u8) { + self.0.write_u8(i) + } + + #[inline] + fn write_u16(&mut self, i: u16) { + self.0.write_u16(i) + } + + #[inline] + fn write_u32(&mut self, i: u32) { + self.0.write_u32(i) + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.0.write_u64(i) + } + + #[inline] + fn write_u128(&mut self, i: u128) { + self.0.write_u128(i) + } + + #[inline] + fn write_usize(&mut self, i: usize) { + self.0.write_usize(i) + } +} + +#[derive(Default, Debug, Clone, Copy, Dupe)] +pub struct BuckHasherBuilder; + +impl BuildHasher for BuckHasherBuilder { + type Hasher = BuckHasher; + + #[inline] + fn build_hasher(&self) -> Self::Hasher { + BuckHasher::new() + } +} diff --git a/app/buck2_util/src/late_binding.rs b/app/buck2_util/src/late_binding.rs index cbc84b45826dc..93ac8aa62c1e4 100644 --- a/app/buck2_util/src/late_binding.rs +++ b/app/buck2_util/src/late_binding.rs @@ -47,7 +47,7 @@ use anyhow::Context; /// Suppose you have a function `fn foo()` initialized with `LateBinding`. /// The convention is this: /// * in the interface crate define a static variable -/// `static FOO: LateBinding = LateBinding::new("FOO");` +/// `static FOO: LateBinding = LateBinding::new("FOO");` /// ([example](https://fburl.com/code/rvxqbf4f)). /// * in the implementation crate define an implementation like `fn foo() { ... }`, /// and next to the implementation, define a function like `fn init_foo() { FOO.init(foo); }` diff --git a/app/buck2_util/src/lib.rs b/app/buck2_util/src/lib.rs index 2f74e7ff51a34..9314b925a0eba 100644 --- a/app/buck2_util/src/lib.rs +++ b/app/buck2_util/src/lib.rs @@ -7,17 +7,30 @@ * of this source tree. */ +#![feature(error_generic_member_access)] +#![feature(once_cell_try)] #![feature(round_char_boundary)] pub mod arc_str; +pub mod cleanup_ctx; pub mod commas; pub mod cycle_detector; +pub mod future; +pub mod golden_test_helper; +pub mod hash; pub mod indent; pub mod late_binding; +pub mod network_speed_average; +pub mod os; +pub mod per_thread_instruction_counter; pub mod process; pub mod process_stats; pub mod rtabort; +pub mod self_ref; +pub mod sliding_window; pub mod system_stats; pub mod thin_box; +pub mod threads; +pub mod tokio_runtime; pub mod truncate; diff --git a/app/buck2_util/src/network_speed_average.rs b/app/buck2_util/src/network_speed_average.rs new file mode 100644 index 0000000000000..88b0a6ebff530 --- /dev/null +++ b/app/buck2_util/src/network_speed_average.rs @@ -0,0 +1,102 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::mem; +use std::time::Duration; +use std::time::SystemTime; + +const MICROS_IN_SEC: u64 = 1_000_000; + +struct Snapshot { + timestamp: SystemTime, + value: u64, +} + +#[derive(Default)] +pub struct NetworkSpeedAverage { + total_duration: Duration, + total_value: u64, + last_snapshot: Option, +} + +/// Maintains an average in ascending sequence of values ignoring repeating values. +/// The reason is If we take all bytes downloaded over a build time it won't show a correct number. +/// We need to calculate a number of bytes over time an artefacts has been downloaded. +impl NetworkSpeedAverage { + /// Report the value at a new timestamp + /// + /// Each update value must be greater than or equal to the previous one. + /// Equal values are ignored during average calculation. + pub fn update(&mut self, timestamp: SystemTime, value: u64) { + let last = mem::replace(&mut self.last_snapshot, Some(Snapshot { timestamp, value })); + if let Some((duration, value)) = + NetworkSpeedAverage::elapsed_if_value_changed(&last, &Snapshot { timestamp, value }) + { + self.total_duration += duration; + self.total_value += value; + } + } + + pub fn avg_per_second(&self) -> Option { + let micros = self.total_duration.as_micros(); + if micros == 0 { + return None; + } + Some(self.total_value * MICROS_IN_SEC / micros as u64) + } + + fn elapsed_if_value_changed( + last: &Option, + current: &Snapshot, + ) -> Option<(Duration, u64)> { + let last = last.as_ref()?; + let duration = current.timestamp.duration_since(last.timestamp).ok()?; + if duration.is_zero() { + return None; + } + let diff = current.value - last.value; + if diff == 0 { + return None; + } + Some((duration, diff)) + } +} + +#[cfg(test)] +mod tests { + use std::ops::Add; + use std::time::Duration; + use std::time::SystemTime; + + use super::NetworkSpeedAverage; + + #[test] + fn test_network_speed_average() { + let t0 = SystemTime::UNIX_EPOCH.add(Duration::from_secs(100000)); + + let mut avg = NetworkSpeedAverage::default(); + assert_eq!(None, avg.avg_per_second()); + + avg.update(t0, 100); + assert_eq!(None, avg.avg_per_second()); + + avg.update(t0.add(Duration::from_secs(1)), 200); + assert_eq!(Some(100), avg.avg_per_second()); + + avg.update(t0.add(Duration::from_secs(2)), 400); + assert_eq!(Some(150), avg.avg_per_second()); + + // if value hasn't changed then update should be no op + avg.update(t0.add(Duration::from_secs(3)), 400); + assert_eq!(Some(150), avg.avg_per_second()); + + avg.update(t0.add(Duration::from_secs(4)), 700); + assert_eq!(Some(200), avg.avg_per_second()); + } +} diff --git a/app/buck2_util/src/os.rs b/app/buck2_util/src/os.rs new file mode 100644 index 0000000000000..bbd17aab1b3da --- /dev/null +++ b/app/buck2_util/src/os.rs @@ -0,0 +1,11 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod macos; +pub mod win; diff --git a/app/buck2_util/src/os/macos.rs b/app/buck2_util/src/os/macos.rs new file mode 100644 index 0000000000000..65b7a02f06887 --- /dev/null +++ b/app/buck2_util/src/os/macos.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod host_cpu_load_info; +pub(crate) mod mach_error_string; +pub mod sc_clk_tck; diff --git a/app/buck2_util/src/os/macos/host_cpu_load_info.rs b/app/buck2_util/src/os/macos/host_cpu_load_info.rs new file mode 100644 index 0000000000000..c4ada397a5c5d --- /dev/null +++ b/app/buck2_util/src/os/macos/host_cpu_load_info.rs @@ -0,0 +1,94 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(target_os = "macos")] + +use crate::os::macos::mach_error_string::mach_error_string; + +/// CPU usage sum for all cores from the system start. +/// Field values are in ticks. Each tick is 1/100 second (but please check `sc_clk_tck`). +// Currently, on my laptop sum of all fields is 1528429426. +// Which is 1528429426 / 100 = 15284294 seconds. +// 15284294 / 10 CPU cores = 1528429 seconds per core, which is 17 days. +// Note at 10 CPUs, numbers overflow after a couple of weeks of uptime. +#[derive(Debug, Copy, Clone)] +pub struct HostCpuLoadInfo { + pub user: u32, + pub system: u32, + pub idle: u32, + pub nice: u32, +} + +/// Query `HOST_CPU_LOAD_INFO`. This is low-level API. +pub fn host_cpu_load_info() -> anyhow::Result { + unsafe { + let mut count: libc::mach_msg_type_number_t = libc::HOST_CPU_LOAD_INFO_COUNT; + + // https://github.com/rust-lang/libc/pull/3916 + #[repr(C)] + struct host_cpu_load_info { + cpu_ticks: [libc::natural_t; libc::CPU_STATE_MAX as usize], + } + + let mut host_info = host_cpu_load_info { + cpu_ticks: [0; libc::CPU_STATE_MAX as usize], + }; + + let res = libc::host_statistics64( + libc::mach_host_self(), + libc::HOST_CPU_LOAD_INFO, + &mut host_info as *mut _ as *mut libc::integer_t, + &mut count, + ); + if res != libc::KERN_SUCCESS { + return Err(anyhow::anyhow!( + "host_statistics64 failed: {}", + mach_error_string(res) + )); + } + + Ok(HostCpuLoadInfo { + user: host_info.cpu_ticks[libc::CPU_STATE_USER as usize], + system: host_info.cpu_ticks[libc::CPU_STATE_SYSTEM as usize], + idle: host_info.cpu_ticks[libc::CPU_STATE_IDLE as usize], + nice: host_info.cpu_ticks[libc::CPU_STATE_NICE as usize], + }) + } +} + +#[cfg(test)] +mod tests { + use std::thread; + use std::time::Duration; + + use crate::os::macos::host_cpu_load_info::host_cpu_load_info; + + #[test] + fn test_host_cpu_load_info() { + let x = host_cpu_load_info().unwrap(); + thread::sleep(Duration::from_millis(10)); + let y = host_cpu_load_info().unwrap(); + assert!(x.user <= y.user); + assert!(x.system <= y.system); + assert!(x.idle <= y.idle); + assert!(x.nice <= y.nice); + let sum_x = x.user as u64 + x.system as u64 + x.idle as u64 + x.nice as u64; + let sum_y = y.user as u64 + y.system as u64 + y.idle as u64 + y.nice as u64; + + let delta = sum_y.wrapping_sub(sum_x) as i64; + + // 10 CPUs for 100 seconds at 100 ticks per second. + assert!(delta < 100_000, "{:?} <=> {:?}", x, y); + + // This test fails if comparison is `>` instead of `>=` + // unless sleep time is 1s or more. + // `host_cpu_load_info` seems to be updated every 1s. + assert!(delta >= 0, "{:?} <=> {:?}", x, y); + } +} diff --git a/app/buck2_util/src/os/macos/mach_error_string.rs b/app/buck2_util/src/os/macos/mach_error_string.rs new file mode 100644 index 0000000000000..185d0cae64916 --- /dev/null +++ b/app/buck2_util/src/os/macos/mach_error_string.rs @@ -0,0 +1,37 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(target_os = "macos")] + +pub(crate) fn mach_error_string(err: libc::kern_return_t) -> &'static str { + extern "C" { + fn mach_error_string(err: libc::kern_return_t) -> *const libc::c_char; + } + + unsafe { + let c_str = mach_error_string(err); + let c_str = std::ffi::CStr::from_ptr(c_str); + c_str + .to_str() + .unwrap_or("mach_error_string returned invalid UTF-8") + } +} + +#[cfg(test)] +mod tests { + use crate::os::macos::mach_error_string::mach_error_string; + + #[test] + fn test_mach_error_string() { + let success = mach_error_string(libc::KERN_SUCCESS); + assert!(success.contains("success"), "{:?}", success); + let no_space = mach_error_string(libc::KERN_NO_SPACE); + assert!(no_space.contains("no space"), "{:?}", no_space); + } +} diff --git a/app/buck2_util/src/os/macos/sc_clk_tck.rs b/app/buck2_util/src/os/macos/sc_clk_tck.rs new file mode 100644 index 0000000000000..cbeea2a646510 --- /dev/null +++ b/app/buck2_util/src/os/macos/sc_clk_tck.rs @@ -0,0 +1,45 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(target_os = "macos")] + +use std::sync::OnceLock; + +use anyhow::Context; + +#[allow(clippy::absurd_extreme_comparisons)] +pub fn sc_clk_tck() -> anyhow::Result { + static TICKS: OnceLock = OnceLock::new(); + TICKS + .get_or_try_init(|| { + unsafe { + let rate = libc::sysconf(libc::_SC_CLK_TCK); + let rate: u32 = rate + .try_into() + .context("Integer overflow converting ticks per second")?; + // Practically it is always 100. But we have to check it. + if rate <= 0 || rate > 10_000 { + return Err(anyhow::anyhow!("Invalid ticks per second: {}", rate)); + } + Ok(rate) + } + }) + .copied() +} + +#[cfg(test)] +mod tests { + use crate::os::macos::sc_clk_tck::sc_clk_tck; + + #[test] + fn test_ticks_per_second() { + // It is always 100. + assert_eq!(100, sc_clk_tck().unwrap()); + } +} diff --git a/app/buck2_util/src/os/win.rs b/app/buck2_util/src/os/win.rs new file mode 100644 index 0000000000000..6e62b28c30f29 --- /dev/null +++ b/app/buck2_util/src/os/win.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod os_str; diff --git a/app/buck2_util/src/os/win/os_str.rs b/app/buck2_util/src/os/win/os_str.rs new file mode 100644 index 0000000000000..92dac9b59d720 --- /dev/null +++ b/app/buck2_util/src/os/win/os_str.rs @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#[cfg(windows)] +pub fn os_str_to_wide_null_term(s: &std::ffi::OsStr) -> Vec { + use std::iter; + use std::os::windows::ffi::OsStrExt; + s.encode_wide().chain(iter::once(0)).collect() +} diff --git a/app/buck2_util/src/per_thread_instruction_counter.rs b/app/buck2_util/src/per_thread_instruction_counter.rs new file mode 100644 index 0000000000000..4f5f9a19f9756 --- /dev/null +++ b/app/buck2_util/src/per_thread_instruction_counter.rs @@ -0,0 +1,100 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +/// Simple wrapper for perf-event to measure the number of instructions +/// executed by a current thread. +pub struct PerThreadInstructionCounter { + #[cfg(target_os = "linux")] + counter: perf_event::Counter, + #[cfg(not(target_os = "linux"))] + non_linux: std::convert::Infallible, +} + +impl PerThreadInstructionCounter { + /// Create a new instruction counter. + /// + /// Return `Err` is `perf_event` failed, `None` on unsupported platforms. + pub fn init() -> anyhow::Result> { + Self::init_impl() + } + + #[cfg(target_os = "linux")] + fn init_impl() -> anyhow::Result> { + let mut counter = perf_event::Builder::new() + .observe_self() + .any_cpu() + .inherit(false) + .kind(perf_event::events::Hardware::INSTRUCTIONS) + .build()?; + counter.enable()?; + Ok(Some(PerThreadInstructionCounter { counter })) + } + + #[cfg(not(target_os = "linux"))] + fn init_impl() -> anyhow::Result> { + Ok(None) + } + + /// Collect the number of instructions executed by the thread. + pub fn collect(self) -> anyhow::Result { + self.collect_impl() + } + + #[cfg(target_os = "linux")] + fn collect_impl(mut self) -> anyhow::Result { + self.counter.disable()?; + let count = self.counter.read_count_and_time()?; + if count.time_running == 0 { + Err(anyhow::anyhow!("No counter data collected")) + } else { + let count = + (count.count as u128) * (count.time_enabled as u128) / (count.time_running as u128); + Ok(count as u64) + } + } + + #[cfg(not(target_os = "linux"))] + fn collect_impl(self) -> anyhow::Result { + match self.non_linux {} + } +} + +#[cfg(test)] +mod tests { + use std::env; + + use three_billion_instructions::three_billion_instructions; + + use crate::per_thread_instruction_counter::PerThreadInstructionCounter; + + fn is_github_actions() -> bool { + // Set by GitHub Actions: + // https://docs.github.com/en/actions/learn-github-actions/variables + env::var("GITHUB_ACTIONS").is_ok() + } + + #[allow(unreachable_code)] // Compiler says it is uninhabited on non-linux platforms. + #[allow(unused_variables)] // This seems like a compiler bug. + #[test] + fn test_perf_thread_instruction_counter() { + if is_github_actions() { + // Fails with permission denied on GitHub Actions CI. + return; + } + + if !cfg!(target_os = "linux") { + assert!(PerThreadInstructionCounter::init().unwrap().is_none()); + } else { + let counter = PerThreadInstructionCounter::init().unwrap().unwrap(); + three_billion_instructions().unwrap(); + let count = counter.collect().unwrap(); + assert!((3_000_000_000..=3_100_000_000).contains(&count)); + } + } +} diff --git a/app/buck2_util/src/self_ref.rs b/app/buck2_util/src/self_ref.rs new file mode 100644 index 0000000000000..633bb4187bf7a --- /dev/null +++ b/app/buck2_util/src/self_ref.rs @@ -0,0 +1,72 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::convert::Infallible; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::sync::Arc; + +use allocative::Allocative; + +/// Describing data that can be stored in `SelfRef`. +pub trait RefData: 'static { + type Data<'a>: 'a; +} + +/// Self-referential struct. +#[derive(Allocative)] +#[allocative(bound = "D: RefData")] +pub struct SelfRef { + #[allocative(skip)] // TODO(nga): do not skip. + data: D::Data<'static>, + // Owner must be placed after `data` to ensure that `data` is dropped before `owner`. + // Owner must be in `Arc` (or `Rc`) because + // - pointers stay valid when `SelfRef` is moved. + // - it cannot be `Box` because it would violate aliasing rules + owner: Arc, +} + +impl Debug for SelfRef +where + D: RefData, + for<'a> D::Data<'a>: Debug, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SelfRef") + .field("data", self.data()) + .finish_non_exhaustive() + } +} + +impl SelfRef { + pub fn try_new( + owner: O, + data: impl for<'a> FnOnce(&'a O) -> Result, E>, + ) -> Result { + let owner: Arc = Arc::new(owner); + let data = data(&owner)?; + let data = unsafe { std::mem::transmute::, D::Data<'static>>(data) }; + Ok(SelfRef { owner, data }) + } + + pub fn new( + owner: O, + data: impl for<'a> FnOnce(&'a O) -> D::Data<'a>, + ) -> Self { + match Self::try_new(owner, |f| Ok::<_, Infallible>(data(f))) { + Ok(x) => x, + Err(e) => match e {}, + } + } + + #[inline] + pub fn data(&self) -> &D::Data<'_> { + unsafe { std::mem::transmute::<&D::Data<'static>, &D::Data<'_>>(&self.data) } + } +} diff --git a/app/buck2_util/src/sliding_window.rs b/app/buck2_util/src/sliding_window.rs new file mode 100644 index 0000000000000..87e9c990a4d02 --- /dev/null +++ b/app/buck2_util/src/sliding_window.rs @@ -0,0 +1,120 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cmp::max; +use std::collections::VecDeque; +use std::time::Duration; +use std::time::SystemTime; + +/// Maintains sliding windows in an ascending sequence of values +pub struct SlidingWindow { + duration: Duration, + queue: VecDeque<(SystemTime, u64)>, + max_per_sec: Option, +} + +impl SlidingWindow { + pub fn new(duration: Duration) -> Self { + Self { + duration, + queue: VecDeque::new(), + max_per_sec: None, + } + } + + /// Report the value at a new timestamp + /// + /// Each update value must be greater than or equal to the previous one + pub fn update(&mut self, timestamp: SystemTime, value: u64) { + self.queue.push_back((timestamp, value)); + + while let Some(current_duration) = self.window_duration() { + // we want to have at least two elements in window to calculate value per second + if current_duration > self.duration && self.queue.len() > 2 { + let _ = self.queue.pop_front(); + } else { + self.max_per_sec = max(self.max_per_sec, self.current_per_second()); + break; + } + } + } + + pub fn max_per_second(&self) -> Option { + self.max_per_sec + } + + fn current_per_second(&self) -> Option { + let (_, first_val) = self.queue.front()?; + let (_, last_val) = self.queue.back()?; + let delta_value = last_val.checked_sub(*first_val)?; + let duration = self.window_duration()?; + Some(delta_value * 1_000_000 / duration.as_micros() as u64) + } + + fn window_duration(&self) -> Option { + let (first_ts, _) = self.queue.front()?; + let (last_ts, _) = self.queue.back()?; + let duration = last_ts.duration_since(*first_ts).ok()?; + if duration.is_zero() { + return None; + } + Some(duration) + } +} + +#[cfg(test)] +mod tests { + use std::ops::Add; + use std::time::Duration; + use std::time::SystemTime; + + use super::SlidingWindow; + + #[test] + fn test_sliding_window() { + let t0 = SystemTime::UNIX_EPOCH.add(Duration::from_secs(100000)); + + let mut windows = SlidingWindow::new(Duration::from_secs(2)); + assert_eq!(None, windows.current_per_second()); + assert_eq!(None, windows.max_per_second()); + + windows.update(t0, 100); + assert_eq!(None, windows.current_per_second()); + assert_eq!(None, windows.max_per_second()); + + windows.update(t0.add(Duration::from_secs(1)), 200); + assert_eq!(Some(100), windows.current_per_second()); + assert_eq!(Some(100), windows.max_per_second()); + + windows.update(t0.add(Duration::from_secs(2)), 400); + assert_eq!(Some(150), windows.current_per_second()); + assert_eq!(Some(150), windows.max_per_second()); + + windows.update(t0.add(Duration::from_secs(3)), 450); + assert_eq!(Some(125), windows.current_per_second()); + assert_eq!(Some(150), windows.max_per_second()); + } + + #[test] + fn test_over_window_size() { + let t0 = SystemTime::UNIX_EPOCH.add(Duration::from_secs(100000)); + + let mut windows = SlidingWindow::new(Duration::from_secs(1)); + assert_eq!(None, windows.current_per_second()); + assert_eq!(None, windows.max_per_second()); + + windows.update(t0, 100); + assert_eq!(None, windows.current_per_second()); + assert_eq!(None, windows.max_per_second()); + + windows.update(t0.add(Duration::from_secs(2)), 300); + assert_eq!(Some(100), windows.current_per_second()); + assert_eq!(Some(100), windows.max_per_second()); + } +} diff --git a/app/buck2_util/src/system_stats.rs b/app/buck2_util/src/system_stats.rs index 4dd04d3b6aea6..b23929acade69 100644 --- a/app/buck2_util/src/system_stats.rs +++ b/app/buck2_util/src/system_stats.rs @@ -33,3 +33,26 @@ impl UnixSystemStats { None } } + +pub fn system_memory_stats() -> u64 { + use sysinfo::MemoryRefreshKind; + use sysinfo::RefreshKind; + use sysinfo::System; + + let system = System::new_with_specifics( + RefreshKind::new().with_memory(MemoryRefreshKind::new().with_ram()), + ); + system.total_memory() +} + +#[cfg(test)] +mod tests { + use super::system_memory_stats; + + #[test] + fn get_system_memory_stats() { + let total_mem = system_memory_stats(); + // sysinfo returns zero when fails to retrieve data + assert!(total_mem > 0); + } +} diff --git a/app/buck2_util/src/thin_box.rs b/app/buck2_util/src/thin_box.rs index f73552d302c58..c87bd7874fa26 100644 --- a/app/buck2_util/src/thin_box.rs +++ b/app/buck2_util/src/thin_box.rs @@ -7,8 +7,6 @@ * of this source tree. */ -#![allow(clippy::from_iter_instead_of_collect)] - use std::alloc; use std::alloc::Layout; use std::fmt::Debug; @@ -24,17 +22,23 @@ use std::slice; use allocative::Allocative; +#[repr(C)] +struct ThinBoxSliceLayout { + len: usize, + data: [T; 0], +} + +impl ThinBoxSliceLayout { + fn offset_of_data() -> usize { + mem::offset_of!(ThinBoxSliceLayout::, data) + } +} + /// `Box<[T]>` but thin pointer. /// /// Statically allocated for empty slice. pub struct ThinBoxSlice { - /// Pointer to the first element. - /// - /// Memory layout: - /// ```ignore - /// [len, T, T, ...] - /// ^ points here - /// ``` + /// Pointer to the first element, `ThinBoxSliceLayout.data`. ptr: NonNull, } @@ -42,28 +46,22 @@ unsafe impl Sync for ThinBoxSlice {} unsafe impl Send for ThinBoxSlice {} impl ThinBoxSlice { - const _ASSERTS: () = { - // Otherwise empty slice is not aligned properly. - assert!(mem::align_of::() <= mem::align_of::()); - }; - #[inline] pub const fn empty() -> ThinBoxSlice { - const LEN_ZERO: usize = 0; + let instance = &ThinBoxSliceLayout:: { len: 0, data: [] }; unsafe { - let ptr = (&LEN_ZERO as *const usize).add(1) as *mut T; - let ptr = NonNull::new_unchecked(ptr); - ThinBoxSlice { ptr } + ThinBoxSlice { + ptr: NonNull::new_unchecked(instance.data.as_ptr() as *mut T), + } } } /// Allocation layout for a slice of length `len`. #[inline] fn layout_for_len(len: usize) -> Layout { - let (layout, offset_of_data) = Layout::new::() + let (layout, _offset_of_data) = Layout::new::>() .extend(Layout::array::(len).unwrap()) .unwrap(); - assert_eq!(mem::size_of::(), offset_of_data); layout } @@ -71,7 +69,15 @@ impl ThinBoxSlice { // Not called `len` to avoid overload with `Deref::len`. #[inline] fn read_len(&self) -> usize { - unsafe { *self.ptr.cast::().as_ptr().sub(1) } + unsafe { + (*self + .ptr + .as_ptr() + .cast::() + .sub(ThinBoxSliceLayout::::offset_of_data()) + .cast::>()) + .len + } } /// Allocate uninitialized memory for a slice of length `len`. @@ -213,22 +219,29 @@ impl Allocative for ThinBoxSlice { fn visit<'a, 'b: 'a>(&self, visitor: &'a mut allocative::Visitor<'b>) { let mut visitor = visitor.enter_self_sized::(); { - let mut visitor = - visitor.enter_unique(allocative::Key::new("ptr"), mem::size_of_val(&self.ptr)); - { - let mut visitor = visitor.enter( - allocative::Key::new("alloc"), - Self::layout_for_len(self.len()).size(), - ); - visitor.visit_simple(allocative::Key::new("len"), mem::size_of::()); + let ptr_key = allocative::Key::new("ptr"); + if self.len() == 0 { + // Statically allocated data, so just report the pointer itself + visitor.visit_simple(ptr_key, mem::size_of_val(&self.ptr)); + } else { + let mut visitor = + visitor.enter_unique(allocative::Key::new("ptr"), mem::size_of_val(&self.ptr)); { - let mut visitor = visitor.enter(allocative::Key::new("data"), self.len()); - visitor.visit_slice::(self); + let mut visitor = visitor.enter( + allocative::Key::new("alloc"), + Self::layout_for_len(self.len()).size(), + ); + visitor.visit_simple(allocative::Key::new("len"), mem::size_of::()); + { + let mut visitor = visitor + .enter(allocative::Key::new("data"), mem::size_of_val::<[_]>(self)); + visitor.visit_slice::(self); + visitor.exit(); + } visitor.exit(); } visitor.exit(); } - visitor.exit(); } visitor.exit(); } diff --git a/app/buck2_util/src/threads.rs b/app/buck2_util/src/threads.rs new file mode 100644 index 0000000000000..9803e5904caed --- /dev/null +++ b/app/buck2_util/src/threads.rs @@ -0,0 +1,211 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::cell::Cell; +use std::future::Future; +use std::hint; +use std::pin::pin; +use std::pin::Pin; +use std::task::Poll; +use std::thread; + +use anyhow::Context; + +/// Default stack size for buck2. +/// +/// We want to be independent of possible future changes to the default stack size in Rust. +pub(crate) const THREAD_DEFAULT_STACK_SIZE: usize = { + if cfg!(buck2_asan) { + // ASAN requires much larger stack size. + 8 << 20 + } else if cfg!(debug_assertions) { + // Need 4MB for windows-debug according to D60449433. + 4 << 20 + } else { + 2 << 20 + } +}; + +fn thread_builder(name: &str) -> thread::Builder { + thread::Builder::new() + .stack_size(THREAD_DEFAULT_STACK_SIZE) + .name(name.to_owned()) +} + +pub fn thread_spawn(name: &str, code: F) -> std::io::Result> +where + T: Send + 'static, + F: FnOnce() -> T + Send + 'static, +{ + thread_builder(name).spawn(move || { + on_thread_start(); + let r = code(); + on_thread_stop(); + r + }) +} + +pub fn thread_spawn_scoped<'scope, 'env: 'scope, T, F>( + name: &str, + scope: &'scope thread::Scope<'scope, 'env>, + code: F, +) -> std::io::Result> +where + T: Send + 'static, + F: FnOnce() -> T + Send + 'scope, +{ + thread_builder(name).spawn_scoped(scope, move || { + on_thread_start(); + let r = code(); + on_thread_stop(); + r + }) +} + +pub(crate) fn stack_pointer() -> *const () { + let mut x: u32 = 0; + hint::black_box(&mut x as *const u32 as *const ()) +} + +#[derive(Copy, Clone)] +struct ValidStackRange { + start: *const (), + end: *const (), +} + +impl ValidStackRange { + fn full_range() -> ValidStackRange { + let start = usize::MAX as *const (); + let end = usize::MIN as *const (); + ValidStackRange { start, end } + } +} + +thread_local! { + static STACK_RANGE: Cell> = const { Cell::new(None) }; +} + +pub(crate) fn on_thread_start() { + assert!( + STACK_RANGE.get().is_none(), + "stack range must not be set in a new thread" + ); + let stack_pointer = stack_pointer(); + // Stack grows downwards. So we add to the start and subtract from the end. + // Add a little bit to the start because we don't really know where the stack starts. + let start = (stack_pointer as usize).checked_add(0x1000).unwrap() as *const (); + // Subtract 3/4 to catch stack overflow before program crashes. + let end = (stack_pointer as usize) + .checked_sub(THREAD_DEFAULT_STACK_SIZE / 4 * 3) + .unwrap() as *const (); + let stack_range = ValidStackRange { start, end }; + STACK_RANGE.set(Some(stack_range)); +} + +pub(crate) fn on_thread_stop() { + let range = STACK_RANGE.replace(None); + assert!(range.is_some(), "stack range must be set in a thread"); +} + +pub fn check_stack_overflow() -> anyhow::Result<()> { + let stack_range = STACK_RANGE + .get() + .context("stack range not set (internal error)")?; + let stack_pointer = stack_pointer(); + if stack_pointer > stack_range.start { + return Err(anyhow::anyhow!( + "stack underflow, should not happen (internal error)" + )); + } + if stack_pointer < stack_range.end { + // TODO(nga): need to tag this error, but we don't have tags in `buck2_util`. + return Err(anyhow::anyhow!("stack overflow (internal error)")); + } + Ok(()) +} + +#[must_use] +pub struct IgnoreStackOverflowChecksForCurrentThread { + prev: Option, +} + +impl Drop for IgnoreStackOverflowChecksForCurrentThread { + fn drop(&mut self) { + STACK_RANGE.set(self.prev.take()); + } +} + +/// For tests. +pub fn ignore_stack_overflow_checks_for_current_thread() -> IgnoreStackOverflowChecksForCurrentThread +{ + let prev = STACK_RANGE.replace(Some(ValidStackRange::full_range())); + IgnoreStackOverflowChecksForCurrentThread { prev } +} + +/// For tests. +pub async fn ignore_stack_overflow_checks_for_future(f: F) -> F::Output { + let f = pin!(f); + + struct IgnoreStackOverflowChecksForFuture<'a, F> { + f: Pin<&'a mut F>, + } + + impl<'a, F: Future> Future for IgnoreStackOverflowChecksForFuture<'a, F> { + type Output = F::Output; + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let _ignore = ignore_stack_overflow_checks_for_current_thread(); + self.f.as_mut().poll(cx) + } + } + + IgnoreStackOverflowChecksForFuture { f }.await +} + +#[cfg(test)] +pub(crate) mod tests { + use std::hint; + + use crate::threads::check_stack_overflow; + use crate::threads::thread_spawn; + + pub(crate) fn recursive_function(frames: u32) -> anyhow::Result<()> { + let Some(frames) = frames.checked_sub(1) else { + return Ok(()); + }; + + check_stack_overflow()?; + + // Allocate a string on the stack so the compiler won't optimize the recursion away. + let mut x = String::new(); + hint::black_box(&mut x); + recursive_function(frames)?; + hint::black_box(&mut x); + Ok(()) + } + + #[test] + fn test_catch_stack_overflow() { + let error = thread_spawn("test", || recursive_function(u32::MAX)) + .unwrap() + .join() + .unwrap() + .unwrap_err(); + assert!(error.to_string().contains("stack overflow"), "{error:?}"); + } + + #[test] + fn test_no_stack_overflow() { + let () = thread_spawn("test", || recursive_function(1000)) + .unwrap() + .join() + .unwrap() + .unwrap(); + } +} diff --git a/app/buck2_util/src/tokio_runtime.rs b/app/buck2_util/src/tokio_runtime.rs new file mode 100644 index 0000000000000..9256b15a79aff --- /dev/null +++ b/app/buck2_util/src/tokio_runtime.rs @@ -0,0 +1,54 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use tokio::runtime::Builder; + +use crate::threads::on_thread_start; +use crate::threads::on_thread_stop; +use crate::threads::THREAD_DEFAULT_STACK_SIZE; + +pub fn new_tokio_runtime(thread_name: &str) -> Builder { + let mut builder = Builder::new_multi_thread(); + builder.thread_stack_size(THREAD_DEFAULT_STACK_SIZE); + builder.thread_name(thread_name); + builder.on_thread_start(on_thread_start); + builder.on_thread_stop(on_thread_stop); + builder +} + +#[cfg(test)] +mod tests { + use crate::threads::tests::recursive_function; + use crate::tokio_runtime::new_tokio_runtime; + + #[test] + fn test_stack_overflow() { + let rt = new_tokio_runtime("test_stack_overflow").build().unwrap(); + let error = rt + .block_on(async { + tokio::spawn(async { recursive_function(u32::MAX) }) + .await + .unwrap() + }) + .unwrap_err(); + assert!(error.to_string().contains("stack overflow"), "{error:?}"); + } + + #[test] + fn test_no_stack_overflow() { + let rt = new_tokio_runtime("test_stack_overflow").build().unwrap(); + let () = rt + .block_on(async { + tokio::spawn(async { recursive_function(1000) }) + .await + .unwrap() + }) + .unwrap(); + } +} diff --git a/app/buck2_util/src/truncate.rs b/app/buck2_util/src/truncate.rs index bbfcf077cb314..fcde3089bac8d 100644 --- a/app/buck2_util/src/truncate.rs +++ b/app/buck2_util/src/truncate.rs @@ -14,10 +14,11 @@ const TRUNCATION_DELIM: &str = ", "; /// the debug message with the middle elided if it's too long. /// `max_length` is maximum length of truncated message. pub fn truncate(msg: &str, max_length: usize) -> String { - if max_length <= TRUNCATION_MSG.len() { - TRUNCATION_MSG.to_owned() - } else if msg.len() > max_length { - let max_length_without_truncation_msg = max_length.saturating_sub(TRUNCATION_MSG.len()); + if msg.len() <= max_length { + return msg.to_owned(); + } + + if let Some(max_length_without_truncation_msg) = max_length.checked_sub(TRUNCATION_MSG.len()) { // Note that for Unicode strings we might end up with less than max_length characters, // because these functions are all in terms of bytes. // Not worth the hassle to do better, given how rare that is. @@ -29,7 +30,7 @@ pub fn truncate(msg: &str, max_length: usize) -> String { ..msg.len()] ) } else { - msg.to_owned() + TRUNCATION_MSG.to_owned() } } diff --git a/app/buck2_validation/BUCK b/app/buck2_validation/BUCK new file mode 100644 index 0000000000000..1da1c5aa90a67 --- /dev/null +++ b/app/buck2_validation/BUCK @@ -0,0 +1,28 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "buck2_validation", + srcs = glob( + ["src/**/*.rs"], + ), + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:derivative", + "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:either", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:serde_json", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_artifact:buck2_artifact", + "//buck2/app/buck2_build_api:buck2_build_api", + "//buck2/app/buck2_core:buck2_core", + "//buck2/app/buck2_error:buck2_error", + "//buck2/app/buck2_execute:buck2_execute", + "//buck2/dice/dice:dice", + "//buck2/gazebo/dupe:dupe", + ], +) diff --git a/app/buck2_validation/Cargo.toml b/app/buck2_validation/Cargo.toml new file mode 100644 index 0000000000000..adefb47cc8f0d --- /dev/null +++ b/app/buck2_validation/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = """ +This crate defines the implementation logic for validation functionality. +""" +edition = "2021" +license = { workspace = true } +name = "buck2_validation" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +allocative = { workspace = true } +anyhow = { workspace = true } +async-trait = { workspace = true } +derivative = { workspace = true } +derive_more = { workspace = true } +dice = { workspace = true } +dupe = { workspace = true } +either = { workspace = true } +futures = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } + +buck2_artifact = { workspace = true } +buck2_build_api = { workspace = true } +buck2_core = { workspace = true } +buck2_error = { workspace = true } +buck2_execute = { workspace = true } diff --git a/app/buck2_validation/src/cached_validation_result.rs b/app/buck2_validation/src/cached_validation_result.rs new file mode 100644 index 0000000000000..136f550d51495 --- /dev/null +++ b/app/buck2_validation/src/cached_validation_result.rs @@ -0,0 +1,167 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; +use std::sync::Arc; + +use allocative::Allocative; +use buck2_core::base_deferred_key::BaseDeferredKey; +use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; +use dupe::Dupe; + +use crate::validator_api::ValidationResult; +use crate::validator_api::ValidationStatus; + +/// Result of running a validation, cached in DICE. +#[derive(Clone, Dupe, Allocative, PartialEq)] +pub(crate) struct CachedValidationResult(pub(crate) Arc); + +#[derive(Allocative, PartialEq)] +pub(crate) enum CachedValidationResultData { + Success, + Failure(ValidationFailedUserFacingError), +} + +#[derive(buck2_error::Error, Debug, PartialEq, Allocative, Clone)] +#[buck2(input)] +#[error( + "Validation for `{target}` failed:\n\n{}.\n\nFull validation result is located at: `{result_path}`", self.rendered_message() +)] +pub(crate) struct ValidationFailedUserFacingError { + target: BaseDeferredKey, + short_message: Option, + result_path: AbsNormPathBuf, +} + +impl ValidationFailedUserFacingError { + pub(crate) fn rendered_message(&self) -> Cow { + self.short_message.as_deref().map_or_else( + || Cow::Borrowed("Diagnostic message is missing from validation result"), + |x| Cow::Owned(format!("\"{}\"", x)), + ) + } +} + +impl CachedValidationResult { + pub(crate) fn new( + parsed_result: ValidationResult, + target: BaseDeferredKey, + validation_result_path: AbsNormPathBuf, + ) -> CachedValidationResult { + let data = match parsed_result { + ValidationResult { + status: ValidationStatus::Success, + .. + } => CachedValidationResultData::Success, + ValidationResult { + status: ValidationStatus::Failure, + message, + } => CachedValidationResultData::Failure(ValidationFailedUserFacingError::new( + message, + target, + validation_result_path, + )), + }; + Self(Arc::new(data)) + } +} + +impl ValidationFailedUserFacingError { + pub(crate) fn new( + message: Option, + target: BaseDeferredKey, + validation_result_path: AbsNormPathBuf, + ) -> Self { + let short_message = message.map(|x| { + const MAX_CACHED_LENGTH: usize = 600; + // Shortened message as we don't want to store too much data in DICE + shorten_message(x, MAX_CACHED_LENGTH) + }); + Self { + target, + short_message, + result_path: validation_result_path, + } + } +} + +/// If original message exceeds the limit, cut it and add an ellipsis. +fn shorten_message(mut message: String, max_bytes: usize) -> String { + if message.len() > max_bytes { + let new_length = message.floor_char_boundary(max_bytes); + message.truncate(new_length); + message.push_str("..."); + } + message +} + +#[cfg(test)] +mod tests { + #[cfg(unix)] + use buck2_core::configuration::data::ConfigurationData; + #[cfg(unix)] + use buck2_core::target::configured_target_label::ConfiguredTargetLabel; + + use super::*; + + #[test] + fn test_shorten_message() { + assert_eq!(shorten_message("Hello World".to_owned(), 5), "Hello..."); + assert_eq!(shorten_message("Hello World".to_owned(), 11), "Hello World"); + assert_eq!( + shorten_message("Hello World".to_owned(), 100), + "Hello World" + ); + assert_eq!(shorten_message("Привет, мир".to_owned(), 7), "При..."); + assert_eq!(shorten_message("Привет, мир".to_owned(), 12), "Привет..."); + assert_eq!(shorten_message("Привет, мир".to_owned(), 22), "Привет, мир"); + assert_eq!(shorten_message("你好世界".to_owned(), 5), "你..."); + assert_eq!(shorten_message("你好世界".to_owned(), 8), "你好..."); + assert_eq!(shorten_message("你好世界".to_owned(), 16), "你好世界"); + } + + #[cfg(unix)] + #[test] + fn test_error_rendering() -> anyhow::Result<()> { + let target = + ConfiguredTargetLabel::testing_parse("cell//pkg:foo", ConfigurationData::testing_new()); + let path = AbsNormPathBuf::from("/my/path/to/validation/result".to_owned())?; + assert_eq!( + format!( + "{}", + ValidationFailedUserFacingError::new( + None, + BaseDeferredKey::TargetLabel(target.dupe()), + path.clone(), + ) + ), + r#"Validation for `cell//pkg:foo (#2c29d96c65b4379a)` failed: + +Diagnostic message is missing from validation result. + +Full validation result is located at: `/my/path/to/validation/result`"# + ); + assert_eq!( + format!( + "{}", + ValidationFailedUserFacingError::new( + Some("Here is my diagnostic message".to_owned()), + BaseDeferredKey::TargetLabel(target), + path, + ) + ), + r#"Validation for `cell//pkg:foo (#2c29d96c65b4379a)` failed: + +"Here is my diagnostic message". + +Full validation result is located at: `/my/path/to/validation/result`"# + ); + Ok(()) + } +} diff --git a/app/buck2_validation/src/enabled_optional_validations_key.rs b/app/buck2_validation/src/enabled_optional_validations_key.rs new file mode 100644 index 0000000000000..650195afdf738 --- /dev/null +++ b/app/buck2_validation/src/enabled_optional_validations_key.rs @@ -0,0 +1,43 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::BTreeSet; +use std::sync::Arc; + +use allocative::Allocative; +use derive_more::Display; +use dice::DiceTransactionUpdater; +use dice::InjectedKey; +use dupe::Dupe; + +#[derive(Display, Debug, Hash, Eq, Clone, Dupe, PartialEq, Allocative)] + +// DICE key that corresponds to optional validations that are enabled via command line flag. +pub(crate) struct EnabledOptionalValidationsKey; + +pub trait SetEnabledOptionalValidations { + fn set_enabled_optional_validations(&mut self, validations: Vec) -> anyhow::Result<()>; +} + +impl InjectedKey for EnabledOptionalValidationsKey { + type Value = Arc>; + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } +} + +impl SetEnabledOptionalValidations for DiceTransactionUpdater { + fn set_enabled_optional_validations(&mut self, validations: Vec) -> anyhow::Result<()> { + Ok(self.changed_to(vec![( + EnabledOptionalValidationsKey, + Arc::new(BTreeSet::from_iter(validations)), + )])?) + } +} diff --git a/app/buck2_validation/src/lib.rs b/app/buck2_validation/src/lib.rs new file mode 100644 index 0000000000000..0f8984767caa7 --- /dev/null +++ b/app/buck2_validation/src/lib.rs @@ -0,0 +1,28 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] +#![feature(assert_matches)] +#![feature(round_char_boundary)] + +use std::sync::Once; + +mod cached_validation_result; +pub mod enabled_optional_validations_key; +mod single_validation_key; +mod transitive_validation_key; +mod validation; +mod validator_api; + +pub fn init_late_bindings() { + static ONCE: Once = Once::new(); + ONCE.call_once(|| { + validation::init_validation_impl(); + }); +} diff --git a/app/buck2_validation/src/single_validation_key.rs b/app/buck2_validation/src/single_validation_key.rs new file mode 100644 index 0000000000000..a8391d60a7c94 --- /dev/null +++ b/app/buck2_validation/src/single_validation_key.rs @@ -0,0 +1,101 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use anyhow::Context; +use async_trait::async_trait; +use buck2_artifact::actions::key::ActionKey; +use buck2_build_api::actions::artifact::get_artifact_fs::GetArtifactFs; +use buck2_build_api::actions::calculation::ActionCalculation; +use buck2_core::fs::async_fs_util; +use buck2_error::BuckErrorContext; +use buck2_execute::materialize::materializer::HasMaterializer; +use derive_more::Display; +use dice::CancellationContext; +use dice::DiceComputations; +use dice::Key; +use dupe::Dupe; + +use crate::cached_validation_result::CachedValidationResult; +use crate::validator_api::parse_validation_result; + +#[derive(Debug, buck2_error::Error)] +enum ParseValidationResultError { + #[error("Validation result should produce exactly one artifact")] + WrongNumberOfArtifacts, +} + +/// DICE key that corresponds to a single validation, represented by a ValidationSpec object. +/// Computation is the whole process of: +/// 1) Building the validation result artifact. +/// 2) Materializing it. +/// 3) Reading and parsing it to produce result which could be cached in DICE. +#[derive(Clone, Display, Dupe, Debug, Eq, PartialEq, Hash, Allocative)] +#[repr(transparent)] +pub(crate) struct SingleValidationKey(pub ActionKey); + +#[async_trait] +impl Key for SingleValidationKey { + type Value = buck2_error::Result; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let gen_path = { + let build_result = ActionCalculation::build_action(ctx, &self.0).await?; + if build_result.iter().count() != 1 { + return Err(buck2_error::Error::new( + ParseValidationResultError::WrongNumberOfArtifacts, + )); + } + let (gen_path, ..) = build_result + .iter() + .next() + .internal_error_anyhow("Just checked single element")?; + gen_path.dupe() + }; + + let fs = ctx.get_artifact_fs().await?; + let project_relative_path = fs.buck_out_path_resolver().resolve_gen(&gen_path); + + let validation_result_path = fs.fs().resolve(&project_relative_path); + + // Make sure validation result is materialized before we parse it + ctx.per_transaction_data() + .get_materializer() + .ensure_materialized(vec![project_relative_path]) + .await?; + + let content = async_fs_util::read_to_string(&validation_result_path) + .await + .context("Reading validation result")?; + + match parse_validation_result(&content) { + Ok(r) => Ok(CachedValidationResult::new( + r, + self.0.owner().dupe(), + validation_result_path, + )), + Err(e) => Err(buck2_error::Error::from(e)), + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } +} diff --git a/app/buck2_validation/src/transitive_validation_key.rs b/app/buck2_validation/src/transitive_validation_key.rs new file mode 100644 index 0000000000000..ce5f3ba75f371 --- /dev/null +++ b/app/buck2_validation/src/transitive_validation_key.rs @@ -0,0 +1,214 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::borrow::Cow; +use std::collections::BTreeSet; +use std::sync::Arc; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_artifact::artifact::artifact_type::Artifact; +use buck2_build_api::analysis::calculation::RuleAnalysisCalculation; +use buck2_build_api::validation::transitive_validations::TransitiveValidations; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use buck2_error::BuckErrorContext; +use derivative::Derivative; +use derive_more::Display; +use dice::CancellationContext; +use dice::DiceComputations; +use dice::DiceError; +use dice::Key; +use dupe::Dupe; +use dupe::IterDupedExt; +use either::Either; +use futures::future::FutureExt; + +use crate::cached_validation_result::CachedValidationResult; +use crate::cached_validation_result::CachedValidationResultData; +use crate::cached_validation_result::ValidationFailedUserFacingError; +use crate::enabled_optional_validations_key::EnabledOptionalValidationsKey; +use crate::single_validation_key::SingleValidationKey; + +/// DICE key that corresponds to a validation of a whole target subgraph rooted at the given node. +#[derive( + Clone, Display, Dupe, Allocative, Derivative, Hash, Eq, PartialEq, Debug +)] +#[repr(transparent)] +pub(crate) struct TransitiveValidationKey(pub ConfiguredTargetLabel); + +impl TransitiveValidationKey { + /// Only performs validations that are described in `ValidationInfo` for the current node + async fn validate_current_node( + &self, + ctx: &mut DiceComputations<'_>, + transitive_validations: TransitiveValidations, + ) -> Result<(), TreatValidationFailureAsError> { + let info = match &transitive_validations.0.info { + Some(info) => info, + None => return Ok(()), + }; + + let enabled_optional_validations = if info.validations().any(|spec| spec.optional()) { + Either::Left(ctx.compute(&EnabledOptionalValidationsKey).await?) + } else { + Either::Right(Cow::Owned(BTreeSet::new())) + }; + + let enabled_optional_validations: &BTreeSet = + AsRef::as_ref(&enabled_optional_validations); + + let artifacts = info + .validations() + .filter(|spec| !spec.optional() || enabled_optional_validations.contains(spec.name())) + .map(|spec| spec.validation_result().get_bound_artifact()) + .map(|r| r.map_err(buck2_error::Error::from)) + .collect::>>()?; + ctx.try_compute_join(artifacts, |ctx, output| { + async move { compute_single_validation(ctx, output).await }.boxed() + }) + .await + .map(|_| ()) + } + + async fn validate_children( + &self, + ctx: &mut DiceComputations<'_>, + transitive_validations: TransitiveValidations, + ) -> Result<(), TreatValidationFailureAsError> { + ctx.try_compute_join( + transitive_validations.0.children.iter().duped(), + |ctx, label| { + let key = TransitiveValidationKey(label); + async move { + let result = ctx.compute(&key).await?; + tighten_cached_validation_result(result) + } + .boxed() + }, + ) + .await + .map(|_| ()) + } +} + +#[async_trait] +impl Key for TransitiveValidationKey { + type Value = buck2_error::Result; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellation: &CancellationContext, + ) -> Self::Value { + let transitive_validations = ctx + .get_validations(&self.0) + .await? + .require_compatible() + .internal_error_anyhow("Incompatible node is not expected")?; + let transitive_validations = match transitive_validations { + Some(x) => x, + // Means there are no transitive `ValidationInfo` providers, validation is nop. + None => { + return Ok(CachedValidationResult(Arc::new( + CachedValidationResultData::Success, + ))); + } + }; + let result = ctx + .try_compute2( + { + let transitive_validations = transitive_validations.dupe(); + move |ctx| { + self.validate_current_node(ctx, transitive_validations) + .boxed() + } + }, + move |ctx| self.validate_children(ctx, transitive_validations).boxed(), + ) + .await; + match result { + Ok(_) => Ok(CachedValidationResult(Arc::new( + CachedValidationResultData::Success, + ))), + Err(TreatValidationFailureAsError::Transient(e)) => Err(e), + Err(TreatValidationFailureAsError::ValidationFailed(e)) => Ok(CachedValidationResult( + Arc::new(CachedValidationResultData::Failure(e)), + )), + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, + } + } + + fn validity(x: &Self::Value) -> bool { + x.is_ok() + } +} + +/// Auxiliary error type to be able to stop running validations early during transitive check +/// when we first encounter a validation failure. We do not treat validation failure +/// as error when doing single node check in order to be able to cache the result in DICE. +enum TreatValidationFailureAsError { + ValidationFailed(ValidationFailedUserFacingError), + Transient(buck2_error::Error), +} + +impl From for TreatValidationFailureAsError { + fn from(value: buck2_error::Error) -> Self { + TreatValidationFailureAsError::Transient(value) + } +} + +impl From for TreatValidationFailureAsError { + fn from(value: anyhow::Error) -> Self { + TreatValidationFailureAsError::Transient(buck2_error::Error::from(value)) + } +} + +impl From for TreatValidationFailureAsError { + fn from(value: DiceError) -> Self { + TreatValidationFailureAsError::Transient(buck2_error::Error::from(value)) + } +} + +impl From for TreatValidationFailureAsError { + fn from(value: ValidationFailedUserFacingError) -> Self { + TreatValidationFailureAsError::ValidationFailed(value) + } +} + +async fn compute_single_validation( + ctx: &mut DiceComputations<'_>, + validation_result: Artifact, +) -> Result<(), TreatValidationFailureAsError> { + let action_key = validation_result + .action_key() + .internal_error_anyhow("Expected validation to be a build artifact")?; + let key = SingleValidationKey(action_key.dupe()); + let result = ctx.compute(&key).await?; + tighten_cached_validation_result(result) +} + +fn tighten_cached_validation_result( + result: buck2_error::Result, +) -> Result<(), TreatValidationFailureAsError> { + match result { + Ok(result) => match result.0.as_ref() { + CachedValidationResultData::Success => Ok(()), + CachedValidationResultData::Failure(user_facing_error) => { + Err(user_facing_error.clone().into()) + } + }, + Err(e) => Err(e.into()), + } +} diff --git a/app/buck2_validation/src/validation.rs b/app/buck2_validation/src/validation.rs new file mode 100644 index 0000000000000..224e9caed0237 --- /dev/null +++ b/app/buck2_validation/src/validation.rs @@ -0,0 +1,39 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use async_trait::async_trait; +use buck2_build_api::validation::validation_impl::ValidationImpl; +use buck2_build_api::validation::validation_impl::VALIDATION_IMPL; +use buck2_core::target::configured_target_label::ConfiguredTargetLabel; +use dice::DiceComputations; + +use crate::cached_validation_result::CachedValidationResultData; +use crate::transitive_validation_key::TransitiveValidationKey; + +pub(crate) fn init_validation_impl() { + VALIDATION_IMPL.init(&ValidationImplInstance); +} + +struct ValidationImplInstance; + +#[async_trait] +impl ValidationImpl for ValidationImplInstance { + async fn validate_target_node_transitively( + &self, + ctx: &mut DiceComputations<'_>, + target: ConfiguredTargetLabel, + ) -> Result<(), buck2_error::Error> { + let key = TransitiveValidationKey(target); + let result = ctx.compute(&key).await??; + match result.0.as_ref() { + CachedValidationResultData::Success => Ok(()), + CachedValidationResultData::Failure(e) => Err(buck2_error::Error::from(e.clone())), + } + } +} diff --git a/app/buck2_validation/src/validator_api.rs b/app/buck2_validation/src/validator_api.rs new file mode 100644 index 0000000000000..ef98462e67ccb --- /dev/null +++ b/app/buck2_validation/src/validator_api.rs @@ -0,0 +1,161 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! This module defines the API between validators and Buck2 by specifying +//! the schema for a JSON file that represents the result of a validator run. + +use serde::Deserialize; +use serde_json::value::RawValue; + +const CURRENT_VERSION: i32 = 1; + +#[derive(Deserialize, Debug)] +struct ValidationStaticSchema<'a> { + version: i32, + #[serde(borrow)] + data: &'a RawValue, +} + +#[derive(Deserialize, Debug, PartialEq)] +pub(crate) enum ValidationStatus { + #[serde(alias = "success")] + Success, + #[serde(alias = "failure")] + Failure, +} + +#[derive(Deserialize, Debug, PartialEq)] +pub(crate) struct ValidationResult { + pub status: ValidationStatus, + pub message: Option, +} + +#[derive(Debug, buck2_error::Error)] +#[buck2(input)] +enum ValidationApiError { + #[error("Validation result should contain valid JSON.")] + InvalidJson { + #[source] + error: serde_json::Error, + }, + #[error("Incompatible version of validation result `{0}`, expected `{1}`.")] + IncompatibleVersion(i32, i32), + #[error("JSON content doesn't match schema.")] + JsonNotMatchingSchema { + #[source] + error: serde_json::Error, + }, +} + +pub(crate) fn parse_validation_result(content: &str) -> anyhow::Result { + let result: ValidationStaticSchema = match serde_json::from_str(content) { + Ok(x) => x, + Err(error) => return Err((ValidationApiError::InvalidJson { error }).into()), + }; + if result.version != CURRENT_VERSION { + return Err( + ValidationApiError::IncompatibleVersion(result.version, CURRENT_VERSION).into(), + ); + } + let result: ValidationResult = match serde_json::from_str(result.data.get()) { + Ok(x) => x, + Err(error) => return Err((ValidationApiError::JsonNotMatchingSchema { error }).into()), + }; + Ok(result) +} + +#[cfg(test)] +mod tests { + use std::assert_matches::assert_matches; + + use super::*; + + #[test] + fn test_parse_broken_json() { + let json = r#" + { + "version": 1 + "#; + let error = parse_validation_result(json).expect_err("Expected parsing to fail"); + assert_matches!( + error.downcast_ref::(), + Some(ValidationApiError::InvalidJson { .. }) + ); + } + + #[test] + fn test_parse_wrong_schema() { + let json = r#" + { + "data": { + "foo": 1, + "bar": 2 + } + } + "#; + let error = parse_validation_result(json).expect_err("Expected parsing to fail"); + assert_matches!( + error.downcast_ref::(), + Some(ValidationApiError::InvalidJson { .. }) + ); + } + + #[test] + fn test_parse_valid_result() { + let json = r#" + { + "version": 1, + "data": { + "status": "failure", + "message": "something somewhere" + } + } + "#; + let expected = ValidationResult { + status: ValidationStatus::Failure, + message: Some("something somewhere".to_owned()), + }; + assert_eq!(parse_validation_result(json).unwrap(), expected); + } + + #[test] + fn test_parse_invalid_version() { + let json = r#" + { + "version": 2, + "data": { + "foo": 1, + "bar": 2 + } + } + "#; + let error = parse_validation_result(json).expect_err("Expected parsing to fail"); + assert_matches!( + error.downcast_ref::(), + Some(ValidationApiError::IncompatibleVersion(..)) + ); + } + + #[test] + fn test_parse_no_matching_schema_with_valid_version() { + let json = r#" + { + "version": 1, + "data": { + "message": "something somewhere" + } + } + "#; + let error = parse_validation_result(json).expect_err("Expected parsing to fail"); + assert_matches!( + error.downcast_ref::(), + Some(ValidationApiError::JsonNotMatchingSchema { .. }) + ); + } +} diff --git a/app/buck2_worker_proto/BUCK b/app/buck2_worker_proto/BUCK index 94cb2759bc02e..bed6f19e6639e 100644 --- a/app/buck2_worker_proto/BUCK +++ b/app/buck2_worker_proto/BUCK @@ -1,6 +1,5 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") load("@fbcode//grpc_fb/codegen:buck_macros.bzl", "grpc_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -8,7 +7,6 @@ rust_protobuf_library( name = "buck2_worker_proto", srcs = glob(["src/**/*.rs"]), build_script = "build.rs", - doctests = False, # FIXME protos = ["worker.proto"], deps = [ "fbsource//third-party/rust:tonic", diff --git a/app/buck2_worker_proto/Cargo.toml b/app/buck2_worker_proto/Cargo.toml index 7f73470fe6fb6..adc39f9f11abd 100644 --- a/app/buck2_worker_proto/Cargo.toml +++ b/app/buck2_worker_proto/Cargo.toml @@ -2,11 +2,12 @@ name = "buck2_worker_proto" edition = "2021" +license = { workspace = true } +repository = { workspace = true } version = "0.1.0" [dependencies] prost = { workspace = true } -prost-types = { workspace = true } tonic = { workspace = true } [build-dependencies] diff --git a/app/buck2_worker_proto/src/lib.rs b/app/buck2_worker_proto/src/lib.rs index 2416c4b210335..25e2dbc700934 100644 --- a/app/buck2_worker_proto/src/lib.rs +++ b/app/buck2_worker_proto/src/lib.rs @@ -7,4 +7,6 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + tonic::include_proto!("worker"); diff --git a/app/buck2_worker_proto/worker.proto b/app/buck2_worker_proto/worker.proto index 0fb4ced5062b4..d87eab214143b 100644 --- a/app/buck2_worker_proto/worker.proto +++ b/app/buck2_worker_proto/worker.proto @@ -23,11 +23,13 @@ message ExecuteCommand { repeated bytes argv = 1; repeated EnvironmentEntry env = 2; + optional uint64 timeout_s = 3; } message ExecuteResponse { int32 exit_code = 1; string stderr = 2; + optional uint64 timed_out_after_s = 3; } message ExecuteCancel {} diff --git a/app/buck2_wrapper_common/BUCK b/app/buck2_wrapper_common/BUCK index bb20738dc85b0..4deb113af868d 100644 --- a/app/buck2_wrapper_common/BUCK +++ b/app/buck2_wrapper_common/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -32,12 +31,13 @@ rust_library( ], deps = [ "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:shlex", "fbsource//third-party/rust:sysinfo", - "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:uuid", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_error:buck2_error", "//buck2/gazebo/dupe:dupe", ], ) diff --git a/app/buck2_wrapper_common/Cargo.toml b/app/buck2_wrapper_common/Cargo.toml index 130609c27b1d4..c654d1cf6318f 100644 --- a/app/buck2_wrapper_common/Cargo.toml +++ b/app/buck2_wrapper_common/Cargo.toml @@ -1,25 +1,28 @@ [package] -name = "buck2_wrapper_common" -version = "0.1.0" -edition = "2021" description = """ Code shared with Meta internal buck2 launcher. Careful! The wrapper is not released as part of the regular buck version bumps, meaning code changes here are not "atomically" updated. """ +edition = "2021" +license = { workspace = true } +name = "buck2_wrapper_common" +repository = { workspace = true } +version = "0.1.0" [dependencies] allocative = { workspace = true } -dupe = { workspace = true } anyhow = { workspace = true } -once_cell = { workspace = true } -shlex = { workspace = true } +derive_more = { workspace = true } +dupe = { workspace = true } serde = { workspace = true } +shlex = { workspace = true } sysinfo = { workspace = true } -thiserror = { workspace = true } uuid = { workspace = true } +buck2_error = { workspace = true } + [target.'cfg(unix)'.dependencies] nix = { workspace = true } diff --git a/app/buck2_wrapper_common/src/invocation_id.rs b/app/buck2_wrapper_common/src/invocation_id.rs index 62fefc3db17f3..63708df34af54 100644 --- a/app/buck2_wrapper_common/src/invocation_id.rs +++ b/app/buck2_wrapper_common/src/invocation_id.rs @@ -22,7 +22,7 @@ use uuid::Uuid; use crate::BUCK_WRAPPER_UUID_ENV_VAR; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum TraceIdError { #[error("`{}` environment variable is not UTF-8", BUCK_WRAPPER_UUID_ENV_VAR)] EnvVarNotUtf8, @@ -110,7 +110,7 @@ impl TraceId { } #[cfg(test)] -mod test { +mod tests { use super::*; #[test] diff --git a/app/buck2_wrapper_common/src/is_buck2.rs b/app/buck2_wrapper_common/src/is_buck2.rs index 65dbb4cef454b..bc6e7d6a4249c 100644 --- a/app/buck2_wrapper_common/src/is_buck2.rs +++ b/app/buck2_wrapper_common/src/is_buck2.rs @@ -25,7 +25,7 @@ pub(crate) fn is_buck2_exe(path: &Path, who_is_asking: WhoIsAsking) -> bool { let Some(file_stem) = path.file_stem() else { return false; }; - if path.file_stem() == Some(OsStr::new("buck2")) { + if file_stem == OsStr::new("buck2") || file_stem == OsStr::new("buck2-daemon") { true } else { match who_is_asking { diff --git a/app/buck2_wrapper_common/src/kill.rs b/app/buck2_wrapper_common/src/kill.rs index 9c98bd4ff3fa8..457048bb5b341 100644 --- a/app/buck2_wrapper_common/src/kill.rs +++ b/app/buck2_wrapper_common/src/kill.rs @@ -9,162 +9,58 @@ //! Cross-platform process killing. -pub fn process_exists(pid: u32) -> anyhow::Result { - os_specific::process_exists(pid) +use std::time::Duration; + +use sysinfo::Process; + +use crate::pid::Pid; +#[cfg(unix)] +use crate::unix::kill as imp; +#[cfg(windows)] +use crate::win::kill as imp; + +pub fn process_creation_time(process: &Process) -> Option { + imp::process_creation_time(process) +} + +pub fn process_exists(pid: Pid) -> anyhow::Result { + imp::process_exists(pid) } /// Send `KILL` or call `TerminateProcess` on the given process. /// /// Returns a KilledProcessHandle that can be used to observe the termination of the killed process. -pub fn kill(pid: u32) -> anyhow::Result> { - match os_specific::kill(pid)? { - Some(handle) => Ok(Box::new(handle) as _), - None => Ok(Box::new(NoProcess) as _), +pub fn kill(pid: Pid) -> anyhow::Result> { + match imp::kill(pid)? { + Some(handle) => Ok(Some(KilledProcessHandle { handle })), + None => Ok(None), } } -pub trait KilledProcessHandle { - fn has_exited(&self) -> anyhow::Result; +pub struct KilledProcessHandle { + handle: imp::KilledProcessHandleImpl, +} - fn status(&self) -> Option; +impl KilledProcessHandle { + pub fn has_exited(&self) -> anyhow::Result { + self.handle.has_exited() + } } /// Get the status of a given process according to sysinfo. -pub fn get_sysinfo_status(pid: impl TryInto) -> Option { - use sysinfo::Pid; - use sysinfo::PidExt; - use sysinfo::ProcessExt; +pub fn get_sysinfo_status(pid: Pid) -> Option { use sysinfo::ProcessRefreshKind; use sysinfo::System; - use sysinfo::SystemExt; - let pid = pid.try_into().ok()?; - let pid = Pid::from_u32(pid); + let pid = sysinfo::Pid::from_u32(pid.to_u32()); let mut system = System::new(); - system.refresh_process_specifics(pid, ProcessRefreshKind::new()); + // There is some bug in `sysinfo` so we have to use `refresh_processes_specifics` + // instead of `refresh_process_specifics`, otherwise we not always get process info. + system.refresh_processes_specifics(ProcessRefreshKind::new()); let proc = system.process(pid)?; - Some(proc.status().to_string()) -} - -/// Returned when os_specific::kill reports that nothing was killed because the process wasn't even -/// running. -struct NoProcess; - -impl KilledProcessHandle for NoProcess { - fn has_exited(&self) -> anyhow::Result { - Ok(true) - } - - fn status(&self) -> Option { - Some("NoProcess".to_owned()) - } -} - -#[cfg(unix)] -mod os_specific { - use anyhow::Context; - use nix::sys::signal::Signal; - - use crate::kill::get_sysinfo_status; - use crate::kill::KilledProcessHandle; - - pub(crate) fn process_exists(pid: u32) -> anyhow::Result { - let pid = nix::unistd::Pid::from_raw( - pid.try_into() - .with_context(|| format!("Integer overflow converting pid {} to pid_t", pid))?, - ); - match nix::sys::signal::kill(pid, None) { - Ok(_) => Ok(true), - Err(nix::errno::Errno::ESRCH) => Ok(false), - Err(e) => Err(e) - .with_context(|| format!("unexpected error checking if process {} exists", pid)), - } - } - - fn process_exists_impl(pid: nix::unistd::Pid) -> anyhow::Result { - match nix::sys::signal::kill(pid, None) { - Ok(_) => Ok(true), - Err(nix::errno::Errno::ESRCH) => Ok(false), - Err(e) => Err(e) - .with_context(|| format!("unexpected error checking if process {} exists", pid)), - } - } - - pub(super) fn kill(pid: u32) -> anyhow::Result> { - let pid = nix::unistd::Pid::from_raw( - pid.try_into() - .with_context(|| format!("Integer overflow converting pid {} to pid_t", pid))?, - ); - - match nix::sys::signal::kill(pid, Signal::SIGKILL) { - Ok(()) => Ok(Some(UnixKilledProcessHandle { pid })), - Err(nix::errno::Errno::ESRCH) => Ok(None), - Err(e) => Err(e).with_context(|| format!("Failed to kill pid {}", pid)), - } - } - - struct UnixKilledProcessHandle { - pid: nix::unistd::Pid, - } - - impl KilledProcessHandle for UnixKilledProcessHandle { - fn has_exited(&self) -> anyhow::Result { - Ok(!process_exists_impl(self.pid)?) - } - - fn status(&self) -> Option { - get_sysinfo_status(self.pid.as_raw()) - } - } -} - -#[cfg(windows)] -pub mod os_specific { - use std::time::Duration; - - use crate::kill::get_sysinfo_status; - use crate::kill::KilledProcessHandle; - use crate::winapi_process::WinapiProcessHandle; - - pub(crate) fn process_exists(pid: u32) -> anyhow::Result { - Ok(WinapiProcessHandle::open_for_info(pid).is_some()) - } - - pub(super) fn kill(pid: u32) -> anyhow::Result> { - let handle = match WinapiProcessHandle::open_for_terminate(pid) { - Some(proc_handle) => proc_handle, - None => return Ok(None), - }; - - handle.terminate()?; - - Ok(Some(WindowsKilledProcessHandle { handle })) - } - - /// Windows reuses PIDs more aggressively than UNIX, so there we add an extra guard in the form - /// of the process creation time. - struct WindowsKilledProcessHandle { - handle: WinapiProcessHandle, - } - - impl KilledProcessHandle for WindowsKilledProcessHandle { - fn has_exited(&self) -> anyhow::Result { - self.handle.has_exited() - } - - fn status(&self) -> Option { - // Maybe there is a better way to get this via the handle, but for now this'll do. - get_sysinfo_status(self.handle.pid()) - } - } - - /// Returns process creation time with 100 ns precision. - pub fn process_creation_time(pid: u32) -> Option { - let proc_handle = WinapiProcessHandle::open_for_info(pid)?; - proc_handle.process_creation_time().ok() - } + Some(proc.status()) } #[cfg(test)] @@ -176,6 +72,7 @@ mod tests { use crate::kill::kill; use crate::kill::process_exists; + use crate::pid::Pid; #[test] fn test_process_exists_kill() { @@ -189,12 +86,14 @@ mod tests { command }; let mut child = command.spawn().unwrap(); - let pid = child.id(); - for _ in 0..5 { - assert!(process_exists(pid).unwrap()); - } + let pid = Pid::from_u32(child.id()).unwrap(); + // TODO T187306095: we only check for existence once, because flakiness + assert!( + process_exists(pid).unwrap(), + "process should exist; attempt 1; pid {pid}" + ); - let handle = kill(pid).unwrap(); + let handle = kill(pid).unwrap().unwrap(); child.wait().unwrap(); // Drop child to ensure the Windows handle is closed. diff --git a/app/buck2_wrapper_common/src/lib.rs b/app/buck2_wrapper_common/src/lib.rs index e80a509f4ca9a..2d96b6e468e8b 100644 --- a/app/buck2_wrapper_common/src/lib.rs +++ b/app/buck2_wrapper_common/src/lib.rs @@ -7,6 +7,8 @@ * of this source tree. */ +#![feature(error_generic_member_access)] + //! Code shared between `buck2_wrapper` and `buck2`. //! //! Careful! The wrapper is not released as part of the regular buck version bumps, @@ -20,37 +22,73 @@ use std::time::Duration; use std::time::Instant; use is_buck2::WhoIsAsking; -use sysinfo::Pid; -use sysinfo::PidExt; -use sysinfo::ProcessExt; use sysinfo::System; -use sysinfo::SystemExt; use crate::is_buck2::is_buck2_exe; +use crate::pid::Pid; pub mod invocation_id; pub mod is_buck2; pub mod kill; -pub mod winapi_handle; -pub(crate) mod winapi_process; +pub mod pid; +#[cfg(unix)] +mod unix; +#[cfg(windows)] +pub mod win; pub const BUCK2_WRAPPER_ENV_VAR: &str = "BUCK2_WRAPPER"; pub const BUCK_WRAPPER_UUID_ENV_VAR: &str = "BUCK_WRAPPER_UUID"; /// Because `sysinfo::Process` is not `Clone`. struct ProcessInfo { - pid: u32, + pid: Pid, name: String, cmd: Vec, } +/// Get the list of all PIDs on Linux +/// +/// As of sysinfo 0.30, the `processes` function returns all posix TIDs (what the kernel calls +/// PIDs), and not just all posix PIDs (what the kernel calls TGIDs). In order to make sure that we +/// don't kill any of the TIDs in our PID, we need to filter the list of TIDs down. This function +/// returns the list of all PIDs on the system. +fn get_all_tgids_linux() -> Option> { + if !cfg!(target_os = "linux") { + return None; + } + + let Ok(entries) = std::fs::read_dir("/proc") else { + return Some(HashSet::new()); + }; + + let mut all_tgids = HashSet::new(); + + for e in entries { + let Ok(e) = e else { + continue; + }; + let Ok(file_type) = e.file_type() else { + continue; + }; + if !file_type.is_dir() { + continue; + } + let Some(pid) = e.file_name().to_str().and_then(|s| s.parse::().ok()) else { + continue; + }; + all_tgids.insert(sysinfo::Pid::from_u32(pid)); + } + + Some(all_tgids) +} + /// Find all buck2 processes in the system. fn find_buck2_processes(who_is_asking: WhoIsAsking) -> Vec { let mut system = System::new(); system.refresh_processes(); let mut current_parents = HashSet::new(); - let mut parent = Some(Pid::from_u32(std::process::id())); + let mut parent = Some(sysinfo::Pid::from_u32(std::process::id())); while let Some(pid) = parent { // There is a small chance on Windows that the PID of a dead parent // was reused by some of its descendants, and this can create a loop. @@ -60,11 +98,25 @@ fn find_buck2_processes(who_is_asking: WhoIsAsking) -> Vec { parent = system.process(pid).and_then(|p| p.parent()); } + let filtered_proc_list = get_all_tgids_linux(); + let mut buck2_processes = Vec::new(); for (pid, process) in system.processes() { - if is_buck2_exe(process.exe(), who_is_asking) && !current_parents.contains(pid) { + // See comment on `get_all_tgids_linux` + if let Some(filtered_proc_list) = filtered_proc_list.as_ref() { + if !filtered_proc_list.contains(&pid) { + continue; + } + } + let Some(exe) = process.exe() else { + continue; + }; + if is_buck2_exe(exe, who_is_asking) && !current_parents.contains(pid) { + let Ok(pid) = Pid::from_u32(pid.as_u32()) else { + continue; + }; buck2_processes.push(ProcessInfo { - pid: pid.as_u32(), + pid, name: process.name().to_owned(), cmd: process.cmd().to_vec(), }); @@ -92,13 +144,9 @@ pub fn killall(who_is_asking: WhoIsAsking, write: impl Fn(String)) -> bool { impl Printer { fn fmt_status(&mut self, process: &ProcessInfo, status: &str) -> String { - format!( - "{} {} ({}). {}", - status, - process.name, - process.pid, - shlex::join(process.cmd.iter().map(|s| s.as_str())), - ) + let cmd = shlex::try_join(process.cmd.iter().map(|s| s.as_str())) + .expect("Null byte unexpected"); + format!("{} {} ({}). {}", status, process.name, process.pid, cmd,) } fn failed_to_kill(&mut self, process: &ProcessInfo, error: anyhow::Error) { @@ -125,7 +173,8 @@ pub fn killall(who_is_asking: WhoIsAsking, write: impl Fn(String)) -> bool { let mut processes_still_alive: Vec<(ProcessInfo, _)> = Vec::new(); for process in buck2_processes { match kill::kill(process.pid) { - Ok(handle) => processes_still_alive.push((process, handle)), + Ok(Some(handle)) => processes_still_alive.push((process, handle)), + Ok(None) => {} Err(e) => printer.failed_to_kill(&process, e), }; } diff --git a/app/buck2_wrapper_common/src/pid.rs b/app/buck2_wrapper_common/src/pid.rs new file mode 100644 index 0000000000000..09c09c48c36d1 --- /dev/null +++ b/app/buck2_wrapper_common/src/pid.rs @@ -0,0 +1,49 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::num::NonZeroU32; + +use anyhow::Context; +use dupe::Dupe; + +/// Process id. +#[derive(Debug, Clone, Copy, Dupe, derive_more::Display)] +#[display("{}", pid)] +pub struct Pid { + pid: NonZeroU32, +} + +impl Pid { + pub fn from_u32(pid: u32) -> anyhow::Result { + Ok(Pid { + pid: NonZeroU32::new(pid).context("pid must be non-zero")?, + }) + } + + pub fn from_i64(pid: i64) -> anyhow::Result { + Self::from_u32( + pid.try_into() + .context("integer overflow converting pid to u32")?, + ) + } + + pub fn to_u32(self) -> u32 { + self.pid.get() + } + + #[cfg(unix)] + pub fn to_nix(self) -> anyhow::Result { + Ok(nix::unistd::Pid::from_raw( + self.pid + .get() + .try_into() + .context("Integer overflow converting pid to pid_t")?, + )) + } +} diff --git a/app/buck2_wrapper_common/src/unix.rs b/app/buck2_wrapper_common/src/unix.rs new file mode 100644 index 0000000000000..ce63e3a75088e --- /dev/null +++ b/app/buck2_wrapper_common/src/unix.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(super) mod kill; diff --git a/app/buck2_wrapper_common/src/unix/kill.rs b/app/buck2_wrapper_common/src/unix/kill.rs new file mode 100644 index 0000000000000..52669bd6a361e --- /dev/null +++ b/app/buck2_wrapper_common/src/unix/kill.rs @@ -0,0 +1,77 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::time::Duration; + +use anyhow::Context; +use nix::sys::signal::Signal; +use sysinfo::Process; + +use crate::kill::get_sysinfo_status; +use crate::pid::Pid; + +pub(crate) fn process_creation_time(process: &Process) -> Option { + // Returns process creation time with 1 second precision. + Some(Duration::from_secs(process.start_time())) +} + +pub(crate) fn process_exists(pid: Pid) -> anyhow::Result { + Ok(match get_sysinfo_status(pid) { + // It occasionally happens that systemd on a machine becomes unresponsive and stops reaping + // its children. Unfortunately, there's not really much that we can do about that, and it + // does typically eventually resolve itself. In the meantime though, the user may be waiting + // on the daemon restart to finish to continue their work, so let's not block them. + Some(sysinfo::ProcessStatus::Zombie) => false, + Some(_) => true, + None => false, + }) +} + +pub(crate) fn kill(pid: Pid) -> anyhow::Result> { + let pid_nix = pid.to_nix()?; + + match nix::sys::signal::kill(pid_nix, Signal::SIGKILL) { + Ok(()) => Ok(Some(KilledProcessHandleImpl { pid })), + Err(nix::errno::Errno::ESRCH) => Ok(None), + Err(e) => Err(e).with_context(|| format!("Failed to kill pid {}", pid)), + } +} + +pub(crate) struct KilledProcessHandleImpl { + pid: Pid, +} + +impl KilledProcessHandleImpl { + pub(crate) fn has_exited(&self) -> anyhow::Result { + Ok(!process_exists(self.pid)?) + } +} + +#[cfg(test)] +mod tests { + use std::thread::sleep; + use std::time::Duration; + + use buck2_util::process::background_command; + + use crate::pid::Pid; + use crate::unix::kill::process_exists; + + #[test] + fn test_zombie_process_exist() { + let mut command = background_command("sh"); + command.args(["-c", "exit 0"]); + let child = command.spawn().unwrap(); + // sleep a bit to let the child exit + sleep(Duration::from_secs(1)); + let pid = Pid::from_u32(child.id()).unwrap(); + // we consider zombie as non existent + assert!(!process_exists(pid).unwrap(), "process shouldn't exist"); + } +} diff --git a/app/buck2_wrapper_common/src/win.rs b/app/buck2_wrapper_common/src/win.rs new file mode 100644 index 0000000000000..ce96492b24454 --- /dev/null +++ b/app/buck2_wrapper_common/src/win.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(super) mod kill; +pub mod winapi_handle; +pub(crate) mod winapi_process; diff --git a/app/buck2_wrapper_common/src/win/kill.rs b/app/buck2_wrapper_common/src/win/kill.rs new file mode 100644 index 0000000000000..178389546fdfe --- /dev/null +++ b/app/buck2_wrapper_common/src/win/kill.rs @@ -0,0 +1,49 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::time::Duration; + +use sysinfo::Process; + +use crate::pid::Pid; +use crate::win::winapi_process::WinapiProcessHandle; + +pub(crate) fn process_creation_time(process: &Process) -> Option { + let pid = Pid::from_u32(process.pid().as_u32()).ok()?; + let proc_handle = WinapiProcessHandle::open_for_info(pid)?; + // Returns process creation time with 100 ns precision. + proc_handle.process_creation_time().ok() +} + +pub(crate) fn process_exists(pid: Pid) -> anyhow::Result { + Ok(WinapiProcessHandle::open_for_info(pid).is_some()) +} + +pub(crate) fn kill(pid: Pid) -> anyhow::Result> { + let handle = match WinapiProcessHandle::open_for_terminate(pid) { + Some(proc_handle) => proc_handle, + None => return Ok(None), + }; + + handle.terminate()?; + + Ok(Some(KilledProcessHandleImpl { handle })) +} + +/// Windows reuses PIDs more aggressively than UNIX, so there we add an extra guard in the form +/// of the process creation time. +pub(crate) struct KilledProcessHandleImpl { + handle: WinapiProcessHandle, +} + +impl KilledProcessHandleImpl { + pub(crate) fn has_exited(&self) -> anyhow::Result { + self.handle.has_exited() + } +} diff --git a/app/buck2_wrapper_common/src/win/winapi_handle.rs b/app/buck2_wrapper_common/src/win/winapi_handle.rs new file mode 100644 index 0000000000000..5d718e9864269 --- /dev/null +++ b/app/buck2_wrapper_common/src/win/winapi_handle.rs @@ -0,0 +1,56 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg(windows)] + +use std::io; + +use winapi::um::handleapi::CloseHandle; +use winapi::um::winnt::HANDLE; + +/// Close handle on drop. +pub struct WinapiHandle { + handle: HANDLE, +} + +unsafe impl Send for WinapiHandle {} +unsafe impl Sync for WinapiHandle {} + +impl WinapiHandle { + /// Unsafe because it closes the handle on drop. + pub unsafe fn new(handle: HANDLE) -> Option { + if handle.is_null() { + None + } else { + Some(WinapiHandle { handle }) + } + } + + /// Wrap a handle, call `last_os_error` if it's null. + pub unsafe fn new_check_last_os_error(handle: HANDLE) -> anyhow::Result { + if let Some(handle) = WinapiHandle::new(handle) { + Ok(handle) + } else { + Err(io::Error::last_os_error().into()) + } + } + + pub fn handle(&self) -> HANDLE { + self.handle + } +} + +impl Drop for WinapiHandle { + fn drop(&mut self) { + unsafe { + let res = CloseHandle(self.handle); + assert!(res != 0, "CloseHandle failed"); + }; + } +} diff --git a/app/buck2_wrapper_common/src/winapi_process.rs b/app/buck2_wrapper_common/src/win/winapi_process.rs similarity index 90% rename from app/buck2_wrapper_common/src/winapi_process.rs rename to app/buck2_wrapper_common/src/win/winapi_process.rs index 6041da884d5a4..b131b62177b68 100644 --- a/app/buck2_wrapper_common/src/winapi_process.rs +++ b/app/buck2_wrapper_common/src/win/winapi_process.rs @@ -22,33 +22,33 @@ use winapi::um::processthreadsapi::TerminateProcess; use winapi::um::winnt::PROCESS_QUERY_INFORMATION; use winapi::um::winnt::PROCESS_TERMINATE; -use crate::winapi_handle::WinapiHandle; +use crate::pid::Pid; +use crate::win::winapi_handle::WinapiHandle; /// `HANDLE` which points to a process. pub(crate) struct WinapiProcessHandle { handle: WinapiHandle, - pid: u32, + pid: Pid, } impl WinapiProcessHandle { /// Open a process handle to query. `None` if process doesn't exist. - pub(crate) fn open_for_info(pid: u32) -> Option { + pub(crate) fn open_for_info(pid: Pid) -> Option { WinapiProcessHandle::open_impl(pid, PROCESS_QUERY_INFORMATION) } /// Open a process handle to terminate. `None` if process doesn't exist. - pub(crate) fn open_for_terminate(pid: u32) -> Option { + pub(crate) fn open_for_terminate(pid: Pid) -> Option { WinapiProcessHandle::open_impl(pid, PROCESS_TERMINATE | PROCESS_QUERY_INFORMATION) } - fn open_impl(pid: u32, desired_access: u32) -> Option { - let proc_handle = unsafe { OpenProcess(desired_access, 0, pid) }; - if proc_handle.is_null() { + fn open_impl(pid: Pid, desired_access: u32) -> Option { + let proc_handle = unsafe { OpenProcess(desired_access, 0, pid.to_u32()) }; + let Some(handle) = (unsafe { WinapiHandle::new(proc_handle) }) else { // If proc_handle is null, process died already, or other error like access denied. // TODO(nga): handle error properly. return None; - } - let handle = unsafe { WinapiHandle::new(proc_handle) }; + }; Some(WinapiProcessHandle { handle, pid }) } @@ -120,8 +120,4 @@ impl WinapiProcessHandle { pub(crate) fn has_exited(&self) -> anyhow::Result { Ok(self.exit_code()?.is_some()) } - - pub(crate) fn pid(&self) -> u32 { - self.pid - } } diff --git a/app/buck2_wrapper_common/src/winapi_handle.rs b/app/buck2_wrapper_common/src/winapi_handle.rs deleted file mode 100644 index c650c8a84d370..0000000000000 --- a/app/buck2_wrapper_common/src/winapi_handle.rs +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![cfg(windows)] - -use winapi::um::handleapi::CloseHandle; -use winapi::um::winnt::HANDLE; - -/// Close handle on drop. -pub struct WinapiHandle { - handle: HANDLE, -} - -impl WinapiHandle { - /// Unsafe because it closes the handle on drop. - pub unsafe fn new(handle: HANDLE) -> WinapiHandle { - WinapiHandle { handle } - } - - pub fn handle(&self) -> HANDLE { - self.handle - } -} - -impl Drop for WinapiHandle { - fn drop(&mut self) { - unsafe { - if !self.handle.is_null() { - let res = CloseHandle(self.handle); - assert!(res != 0, "CloseHandle failed"); - } - }; - } -} diff --git a/app/modifier.bzl b/app/modifier.bzl new file mode 100644 index 0000000000000..7a6f747b384d4 --- /dev/null +++ b/app/modifier.bzl @@ -0,0 +1,149 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode//buck2/cfg/experimental:modifiers.bzl", "modifiers") + +def buck2_modifiers(): + # **WARNING**: This is not vetted for correctness and should only be used in fbcode/buck2. + # A somewhat hacked together list of modifiers to enable mode-free builds and opt-by-default-cxx builds. + # This currently only works for linux and mac but not for cross-building (ex. build mac from linux) + # - Mode-free builds: Users can use `-m opt` or `--modifier opt` instead of `@fbcode//mode/opt` to trigger builds + # E2e tests build with opt buck2 by default. + # - Opt-by-default cxx: Dev mode builds of buck2 comes with optimized, sanitizer-free cxx deps by default, making + # dev mode buck2 significantly more usable + # + # This is a demo that all of this could be done with modifiers. Most of these modifiers should be set on higher-level + # PACKAGE files like fbcode/PACKAGE or fbsource/PACKAGE. + # + # Known problems: + # - We have to explicitly disable link groups at the moment because link group macros are supposed to only be turned + # on for dev mode and it checks for this by reading the dev mode buckconfig, but the opt modifier also uses dev + # mode buckconfig, and as a result the presence of link group map breaks our opt modifier build. + + return [ + modifiers.conditional({ + "DEFAULT": modifiers.conditional({ + "DEFAULT": "ovr_config//build_mode/default_opt_cxx:disabled", + "ovr_config//build_mode:dev": "ovr_config//build_mode/default_opt_cxx:enabled", + "ovr_config//build_mode:opt": "ovr_config//build_mode/default_opt_cxx:enabled", + }), + # Opt by default cxx toolchain would override the thin-lto toolchain, so don't use opt by default toolchain + # if thin-lto is present + "ovr_config//build_mode/constraints:lto-thin": "ovr_config//build_mode/default_opt_cxx:disabled", + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//os:linux": modifiers.conditional({ + "DEFAULT": modifiers.conditional({ + "DEFAULT": "ovr_config//build_mode/constraints:static", + "ovr_config//build_mode:dev": "ovr_config//build_mode/constraints:shared", + }), + "ovr_config//build_mode:asan": "ovr_config//build_mode/constraints:static_pic", + }), + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//os:linux": modifiers.conditional({ + "DEFAULT": "ovr_config//build_mode/constraints:split-dwarf", + "ovr_config//build_mode:dev": None, + }), + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": ( + "ovr_config//build_mode:no-san" + ), + # Unfortunately, setting `ovr_config//build_mode:no-san` like this is a bit problematic when using sanitizer + # modefiles because the no-san value here would override the sanitizer constraint set by those modefiles + # in the target platform, meaning we would always get sanitizer disabled no matter what sanitizer modefile + # is used. To work around this, explicitly check that we are not using any sanitizer modefile by checking + # that the `fbcode.sanitizer` buckconfig is set to the default value "address-undefined-dev". We can undo this + # change in the future when our CI is updated to use sanitizer modifiers instead of sanitizer modefiles. + }) if read_config("fbcode", "sanitizer") == "address-undefined-dev" else None, + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": "ovr_config//build_mode/constraints:fbcode-build-info-mode-stable", + "ovr_config//build_mode:opt": "ovr_config//build_mode/constraints:fbcode-build-info-mode-full", + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": "ovr_config//build_mode/constraints:python-default-package-style-inplace", + "ovr_config//build_mode:opt": "ovr_config//build_mode/constraints:python-default-package-style-standalone", + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//os:macos": "ovr_config//build_mode/constraints:fbcode-build-info-ldflags-accepted", + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//os:macos": "ovr_config//build_mode/constraints:fbcode-custom-allocators-enabled", + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//os:macos": "ovr_config//toolchain/fb/constraints:macos-minimal", + }), + }), + # TODO(scottcao): This modifier can be deleted if D61497000 lands successfully + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//os:macos": "ovr_config//toolchain/xcode/force_minimal_xcode:yes", + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//runtime:fbcode": modifiers.conditional({ + "ovr_config//cpu:arm64": "ovr_config//runtime/constraints:platform010-aarch64", + "ovr_config//cpu:x86_64": "ovr_config//runtime/constraints:platform010", + }), + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//runtime:fbcode": modifiers.conditional({ + "ovr_config//cpu:arm64": "ovr_config//cpu/constraints:nosve2", + "ovr_config//cpu:x86_64": "ovr_config//cpu/constraints:sve2", + }), + }), + }), + modifiers.conditional({ + "DEFAULT": None, + "ovr_config//build_mode/default_opt_cxx:enabled": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//os:linux": modifiers.conditional({ + "DEFAULT": None, + "ovr_config//cpu:arm64": "ovr_config//cpu/constraints:armv8.4a", + }), + }), + }), + ] + +def disable_buck2_modifiers(): + return ["ovr_config//build_mode/default_opt_cxx:disabled"] diff --git a/app_dep_graph_rules/BUCK b/app_dep_graph_rules/BUCK new file mode 100644 index 0000000000000..1428a88ca5731 --- /dev/null +++ b/app_dep_graph_rules/BUCK @@ -0,0 +1,8 @@ +load(":test_impl.bzl", "test_buck2_dep_graph") + +oncall("build_infra") + +# Note: This test is run by building this target +test_buck2_dep_graph( + name = "test_buck2_dep_graph", +) diff --git a/app_dep_graph_rules/rules.bzl b/app_dep_graph_rules/rules.bzl new file mode 100644 index 0000000000000..c41f128b4709c --- /dev/null +++ b/app_dep_graph_rules/rules.bzl @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Avoid some copy-paste +def _app(s): + return "//buck2/app/" + s + ":" + s + +# These crates should only implement late bindings and not be depended on +# directly +LATE_BINDING_ONLY_CRATES = [ + _app("buck2_anon_target"), + _app("buck2_audit_server"), + _app("buck2_bxl"), + _app("buck2_query_impls"), +] + +# Unordered pairs where neither crate may depend on the other +BANNED_DEP_PATHS = [ + (_app("buck2_common"), _app("buck2_directory")), + (_app("buck2_common"), "//buck2/starlark-rust/starlark:starlark"), + (_app("buck2_build_api"), _app("buck2_execute_impl")), + (_app("buck2_build_api"), _app("buck2_interpreter_for_build")), + (_app("buck2_server"), _app("buck2_server_commands")), + (_app("buck2_bxl"), _app("buck2_configured")), +] diff --git a/app_dep_graph_rules/test_impl.bzl b/app_dep_graph_rules/test_impl.bzl new file mode 100644 index 0000000000000..1cd003ebbd4d4 --- /dev/null +++ b/app_dep_graph_rules/test_impl.bzl @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @oss-disable: load("@fbcode_macros//build_defs:platform_utils.bzl", "platform_utils") +load(":rules.bzl", "BANNED_DEP_PATHS", "LATE_BINDING_ONLY_CRATES") + +platform_utils = None # @oss-enable + +def _dtp(): + return platform_utils.get_cxx_platform_for_base_path(package_name()).target_platform if platform_utils else None + +def _check_client_to_re_path(ctx: AnalysisContext): + path = ctx.attrs.client_to_re_path + if len(path) != 0: + m = "Buck2 client binary may not have a dependency on `fbcode//remote_execution/`!" + m += "\nDependency path:" + m += "".join(["\n" + str(t) for t in path]) + fail(m) + +def _check_late_binding_only(ctx: AnalysisContext): + for all_paths in ctx.attrs.late_binding_only_paths: + all_paths = list(all_paths) + target = all_paths.pop() + all_paths.pop(0) + if len(all_paths) != 0: + m = "Late-binding-only crate `" + str(target.label) + "` may not be depended on by:" + m += "".join(["\n" + str(p.label) for p in all_paths]) + fail(m) + +def _check_banned_dep_paths(ctx: AnalysisContext): + for path in ctx.attrs.banned_dep_paths: + if len(path) > 0: + a = path[-1].label + b = path[0].label + m = str(a) + " may not depend on " + str(b) + "! Path:" + m += "".join(["\n" + str(p.label) for p in path]) + fail(m) + +def _impl(ctx: AnalysisContext): + _check_client_to_re_path(ctx) + _check_late_binding_only(ctx) + _check_banned_dep_paths(ctx) + return [DefaultInfo()] + +_test_buck2_dep_graph = rule( + impl = _impl, + attrs = { + "banned_dep_paths": attrs.list(attrs.query()), + "client_to_re_path": attrs.query(), + "late_binding_only_paths": attrs.list(attrs.query()), + }, +) + +_CLIENT_BIN = "fbcode//buck2/app/buck2:buck2_client-bin" +_BUCK2_BIN = "fbcode//buck2/app/buck2:buck2-bin" + +_RE_CLIENT_TARGET = "//remote_execution/client_lib/wrappers/rust:re_client_lib" + +_CLIENT_TO_RE = "somepath({}, filter(fbcode//remote_execution/, deps({})) + {})".format(_CLIENT_BIN, _CLIENT_BIN, _RE_CLIENT_TARGET) + +def test_buck2_dep_graph(name): + banned_dep_paths = [] + for a, b in BANNED_DEP_PATHS: + if a > b: + m = "`BANNED_DEP_PATHS` entries must be sorted:\n" + m += " " + str(a) + "\n" + m += " > " + str(b) + fail(m) + + banned_dep_paths.append("somepath({}, {})".format(a, b)) + banned_dep_paths.append("somepath({}, {})".format(b, a)) + + _test_buck2_dep_graph( + name = name, + client_to_re_path = _CLIENT_TO_RE, + late_binding_only_paths = ["allpaths({}, {})".format(_BUCK2_BIN, c) for c in LATE_BINDING_ONLY_CRATES], + banned_dep_paths = banned_dep_paths, + default_target_platform = _dtp(), + ) diff --git a/buck_rust_binary.bzl b/buck_rust_binary.bzl index dc6cd081549c2..d75af79cf9f3e 100644 --- a/buck_rust_binary.bzl +++ b/buck_rust_binary.bzl @@ -17,6 +17,14 @@ def buck_rust_binary(**kwargs): "ovr_config//os:macos": "static", }) + # Link group is currently used automatically for rust in dev mode. Unfortunately, it builds + # a binary that is not relocatable and it checks for dev mode by reading the build mode buckconfig. + # If we don't disable link groups, we will also end up building a non-relocatable binary when + # using opt modifier because opt modifier does not change build mode buckconfig. Work around this + # by disabling link groups for now. + # TODO(scottcao): Delete this line once link group macros are properly selectified. + kwargs["link_group_map"] = [] + # JEMalloc is not (yet!) the default on MacOS so add the allocator # explicitly on all platforms here. kwargs.setdefault("allocator", "jemalloc") diff --git a/build/buck2/README.md b/build/buck2/README.md new file mode 100644 index 0000000000000..c4237b1abab28 --- /dev/null +++ b/build/buck2/README.md @@ -0,0 +1,16 @@ +# Easy buck2 builds for Facebook projects + +This directory contains buck2 targets designed to simplify buck2 builds of +Meta open source projects. + +The most notable target is `//build/buck2/install_deps`, which will attempt to +discover and install necessary third party packages from apt / dnf / etc. +See the "repos" directory for the currently supported platforms. + +## Deployment + +This directory is copied literally into a number of different Facebook open +source repositories. Any change made to code in this directory will be +automatically be replicated by our open source tooling into all GitHub hosted +repositories that use `buck2`. Typically this directory is copied +into the open source repositories as `build/buck2/`. diff --git a/build/buck2/install_deps/BUCK b/build/buck2/install_deps/BUCK new file mode 100644 index 0000000000000..99bab33a722c3 --- /dev/null +++ b/build/buck2/install_deps/BUCK @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_sh_binary") + +oncall("open_source") + +buck_sh_binary( + name = "install_deps", + main = "install_deps.sh", + resources = glob(["repos/*"]), +) diff --git a/build/buck2/install_deps/install_deps.sh b/build/buck2/install_deps/install_deps.sh new file mode 100755 index 0000000000000..b715acc025c27 --- /dev/null +++ b/build/buck2/install_deps/install_deps.sh @@ -0,0 +1,71 @@ +#!/bin/sh +# Copyright (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +if [ -z "$INSTALL_COMMAND" ]; then + if [ -f /etc/os-release ]; then + . /etc/os-release; + fi + + if command -v brew >/dev/null; then + ID="homebrew"; + fi + + if [ -f "$BUCK_DEFAULT_RUNTIME_RESOURCES/repos/$ID" ]; then + # shellcheck disable=SC1090 + . "$BUCK_DEFAULT_RUNTIME_RESOURCES/repos/$ID"; + else + echo "Unable to determine platform id / install commands"; + return 1; + fi +fi + +if [ -z "${BUCK2_COMMAND}" ]; then + if command -v buck2 >/dev/null; then + BUCK2_COMMAND="buck2" + elif command -v dotslash >/dev/null && [ -f ./buck2 ]; then + BUCK2_COMMAND="dotslash ./buck2" + else + echo "Unable to determine buck2 command"; + return 1; + fi +fi + +__confirm() { + echo "Press \"y\" to continue" + read -r REPLY + expr "X$REPLY" : '^X[Yy]$' >/dev/null +} + +PKG_FILE=$(mktemp /tmp/buck2-install-pkgs.XXXXXX) + +if ! command -v jq >/dev/null; then + echo "Failed to find jq command, attempting to install with" + echo + echo "$INSTALL_COMMAND" jq + echo + if __confirm; then + eval "$INSTALL_COMMAND jq" + else + echo "Not confirmed, exiting"; + exit 1 + fi +fi + +eval "$BUCK2_COMMAND cquery 'kind(system_packages, deps(//...))' \\ + --output-attribute=packages --modifier $ID --json 2>/dev/null \\ + | jq -r '.[].packages[]' \\ + | sort \\ + | uniq \\ + > $PKG_FILE" + +echo "About to install the project dependencies with the following command:" +echo +eval "cat $PKG_FILE | xargs echo $INSTALL_COMMAND" +echo +if __confirm; then + eval "cat $PKG_FILE | xargs -r $INSTALL_COMMAND" +else + echo "Not installing dependencies" +fi + +rm "$PKG_FILE" diff --git a/build/buck2/install_deps/repos/fedora b/build/buck2/install_deps/repos/fedora new file mode 100755 index 0000000000000..6182343bd6623 --- /dev/null +++ b/build/buck2/install_deps/repos/fedora @@ -0,0 +1 @@ +INSTALL_COMMAND="sudo -E dnf install -y" diff --git a/build/buck2/install_deps/repos/homebrew b/build/buck2/install_deps/repos/homebrew new file mode 100755 index 0000000000000..5d1dafb821511 --- /dev/null +++ b/build/buck2/install_deps/repos/homebrew @@ -0,0 +1 @@ +INSTALL_COMMAND="brew install" diff --git a/build/buck2/install_deps/repos/ubuntu b/build/buck2/install_deps/repos/ubuntu new file mode 100755 index 0000000000000..b1ce680002993 --- /dev/null +++ b/build/buck2/install_deps/repos/ubuntu @@ -0,0 +1 @@ +INSTALL_COMMAND="sudo -E apt-get install -y" diff --git a/cfg/experimental/cfg_constructor.bzl b/cfg/experimental/cfg_constructor.bzl deleted file mode 100644 index 10a876d5e5ad5..0000000000000 --- a/cfg/experimental/cfg_constructor.bzl +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -load(":name.bzl", "cfg_name") - -PostConstraintAnalysisParams = record( - legacy_platform = PlatformInfo | None, - cli_modifiers = list[str], -) - -def cfg_constructor_pre_constraint_analysis( - *, - legacy_platform: PlatformInfo | None, - cli_modifiers: list[str]) -> (list[str], PostConstraintAnalysisParams): - """ - First stage of cfg constructor for modifiers. - - Args: - legacy_platform: PlatformInfo from legacy target platform resolution, if one is specified - cli_modifiers: modifiers specified from `--modifier` flag, `?modifier`, or BXL - - Returns `(refs, PostConstraintAnalysisParams)`, where `refs` is a list of fully qualified configuration - targets we need providers for. - """ - - refs = cli_modifiers - return refs, PostConstraintAnalysisParams( - legacy_platform = legacy_platform, - cli_modifiers = cli_modifiers, - ) - -def cfg_constructor_post_constraint_analysis( - *, - refs: dict[str, ProviderCollection], - params: PostConstraintAnalysisParams) -> PlatformInfo: - """ - Second stage of cfg constructor for modifiers. - - Args: - refs: a dictionary of fully qualified target labels for configuration targets with their providers - params: `PostConstraintAnalysisParams` returned from first stage of cfg constructor - - Returns a PlatformInfo - """ - - modifiers = params.cli_modifiers - - if not modifiers: - # If there is no modifier and legacy platform is specified, - # then return the legacy platform as is without changing the label or - # configuration. - return params.legacy_platform or PlatformInfo( - # Empty configuration - label = "", - configuration = ConfigurationInfo( - constraints = {}, - values = {}, - ), - ) - - constraints = {} - for modifier in modifiers: - constraint_value = refs[modifier][ConstraintValueInfo] - constraints[constraint_value.setting.label] = constraint_value - - if params.legacy_platform: - # For backwards compatibility with legacy target platform, any constraint setting - # from legacy target platform not covered by modifiers will be added to the configuration - for key, value in params.legacy_platform.configuration.constraints.items(): - if key not in constraints: - constraints[key] = value - - cfg = ConfigurationInfo( - constraints = constraints, - values = {}, - ) - name = cfg_name(cfg) - return PlatformInfo( - label = name, - configuration = cfg, - ) diff --git a/cfg/experimental/modifiers.bzl b/cfg/experimental/modifiers.bzl new file mode 100644 index 0000000000000..fa2d558d5fc59 --- /dev/null +++ b/cfg/experimental/modifiers.bzl @@ -0,0 +1,79 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cfg/modifier:asserts.bzl?v2_only", "verify_normalized_modifier", "verify_normalized_target") +load( + "@prelude//cfg/modifier:types.bzl?v2_only", + "Modifier", # @unused Used in type annotation + "ModifiersMatch", +) + +def _modifiers_match( + matcher: dict[str, Modifier]) -> ModifiersMatch: + """ + A select operator for modifiers. A `modifiers.match` specifies a way for a + modifier to be added based on an existing constraint in the configuration. + The `matcher` is a dictionary that maps from a set of constraints to a + modifier. + + For example, suppose `cfg//os:linux` and `cfg//os:windows` are constraint values + for OS and `cfg//compiler:clang` and `cfg//compiler:msvc` are constraint values + for compiler. The following `modifiers.match` conditionally adds the msvc constraint + if the the windows constraint is matched or adds the clang constraint if the + the linux constraint is matched. + ``` + modifiers.match({ + "cfg//os:windows": "cfg//compiler:msvc", + "cfg//os:linux": "cfg//compiler:clang", + "DEFAULT": None, + }) + ``` + "DEFAULT" is a special key that represents the default case. If no other keys match, + then the modifier specified by DEFAULT will be used. + If None is specified, then a modifier will not be added. + + `modifiers.match`s can be stacked. For example, + suppose this modifier is specified in fbcode/PACKAGE + ``` + modifier = modifiers.match({ + "cfg//os:windows": "cfg//compiler:msvc", + "DEFAULT": None, + }) + ``` + Suppose this modifier is specified in fbcode/project/PACKAGE + ``` + modifier = modifiers.match({ + "cfg//os:linux": "cfg//compiler:clang", + "DEFAULT": None, + }) + ``` + For any target covered by fbcode/project/PACKAGE, this is + equivalent to one modifier in that specifies + ``` + modifiers.match({ + "cfg//os:windows": "cfg//compiler:msvc", + "DEFAULT": modifiers.match({ + "DEFAULT": None, + "cfg//os:linux": "cfg//compiler:clang", + }) + }) + ``` + """ + + for key, sub_modifier in matcher.items(): + if key != "DEFAULT": + verify_normalized_target(key) + verify_normalized_modifier(sub_modifier) + + matcher["_type"] = "ModifiersMatch" + return matcher + +modifiers = struct( + # modifiers.match is deprecated for modifiers.conditional + match = _modifiers_match, + conditional = _modifiers_match, +) diff --git a/cfg/experimental/name.bzl b/cfg/experimental/name.bzl deleted file mode 100644 index 7f8d2009d5ece..0000000000000 --- a/cfg/experimental/name.bzl +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -NAMED_CONSTRAINT_SETTINGS = [ - # TODO(scottcao): Add OSS constraints as well - "ovr_config//os/constraints:os", - "ovr_config//cpu/constraints:cpu", - "ovr_config//runtime/constraints:runtime", - "ovr_config//runtime/constraints:runtime_version", - "ovr_config//os/sdk/apple/constraints:_", - "ovr_config//os/sdk/android/ndk/constraints:version", - "ovr_config//build_mode/constraints:san", -] - -def cfg_name(cfg: ConfigurationInfo) -> str: - """Derives a reasonable name for a ConfigurationInfo""" - - name_list = [] - constraints = {str(key): value for key, value in cfg.constraints.items()} - for constraint_setting in NAMED_CONSTRAINT_SETTINGS: - if constraint_setting in constraints: - name_list.append(constraints[constraint_setting].label.name) - return "-".join(name_list) diff --git a/cfg/experimental/set_cfg_modifiers.bzl b/cfg/experimental/set_cfg_modifiers.bzl new file mode 100644 index 0000000000000..0a0f78cb5c7c4 --- /dev/null +++ b/cfg/experimental/set_cfg_modifiers.bzl @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cfg/modifier:set_cfg_modifiers.bzl", _set_cfg_modifiers = "set_cfg_modifiers") +load("@prelude//cfg/modifier:types.bzl", "Modifier") # @unused Used in type annotation + +########################################################## +# NOTE: This file is now available in the buck2 prelude. # +# # +# You should prefer including / using that version. # +########################################################## + +def set_cfg_modifiers( + cfg_modifiers: list[Modifier] | None = None, + extra_cfg_modifiers_per_rule: dict[str, list[Modifier]] | None = None): + """ + Sets a configuration modifier for all targets under this PACKAGE file. This can only be called from a PACKAGE file context + (e.g. a PACKAGE file or a bzl file transitively loaded by a PACKAGE file). + + Args: + cfg_modifiers: + A list of modifiers to set. The simplest modifier is a constraint value target. + For example, to change the OS to linux in fbsource, this can be specified as `["ovr_config//os/constraints:linux"]`. + extra_cfg_modifiers_per_rule: + A dictionary of rule name to a list of modifiers to set. This is applied on top of modifiers from `cfg_modifiers` parameter + if a target's rule name matches the key, so it can override any modifier from `cfg_modifiers` parameter in the same PACKAGE. + For example, if this dictionary is `{"python_binary": ["ovr_config//os/constraints:macos"]}`, + then all python_binary targets covered will have the macos constraint added to their configurations. + """ + _set_cfg_modifiers(cfg_modifiers, extra_cfg_modifiers_per_rule) diff --git a/defs.bzl b/defs.bzl index 8cc7f373678db..5402b65b44fe3 100644 --- a/defs.bzl +++ b/defs.bzl @@ -6,34 +6,39 @@ # of this source tree. load("@fbcode_macros//build_defs:platform_utils.bzl", "platform_utils") +load("@prelude//decls:common.bzl", "buck") +load("@prelude//os_lookup:defs.bzl", "OsLookup") -def _symlinked_buck2_and_tpx_impl(ctx: AnalysisContext) -> list[Provider]: +def _buck2_bundle_impl(ctx: AnalysisContext) -> list[Provider]: """ Produce a directory layout that is similar to the one our release binary uses, this allows setting a path for Tpx relative to BUCK2_BINARY_DIR. - - We do the whole BUCK2_BINARY_DIR_RELATIVE_TO dance to support doing this - using just symlinks. If we're willing to do a copy we can just - `out.project("buck2")` and we're done. """ + target_is_windows = ctx.attrs._target_os_type[OsLookup].platform == "windows" + buck2 = ctx.attrs.buck2[DefaultInfo].default_outputs[0] + buck2_client = ctx.attrs.buck2_client[DefaultInfo].default_outputs[0] tpx = ctx.attrs.tpx[DefaultInfo].default_outputs[0] - out = ctx.actions.symlinked_dir("out", {"buck2": buck2, "buck2-tpx": tpx}) - - cmd = cmd_args( - "/usr/bin/env", - cmd_args(out, format = "BUCK2_BINARY_DIR_RELATIVE_TO={}").relative_to(buck2, parent = 1), - out.project("buck2"), - ).hidden(out) + binary_extension = ".exe" if target_is_windows else "" + buck2_binary = "buck2" + binary_extension + buck2_tpx_binary = "buck2-tpx" + binary_extension + buck2_daemon_binary = "buck2-daemon" + binary_extension + out = ctx.actions.copied_dir("out", {buck2_binary: buck2_client, buck2_tpx_binary: tpx, buck2_daemon_binary: buck2}) - return [DefaultInfo(out), RunInfo(cmd)] + return [DefaultInfo(out), RunInfo(cmd_args(out.project("buck2" + binary_extension)))] -_symlinked_buck2_and_tpx = rule( - impl = _symlinked_buck2_and_tpx_impl, - attrs = {"buck2": attrs.dep(), "labels": attrs.list(attrs.string(), default = []), "tpx": attrs.dep()}, +_buck2_bundle = rule( + impl = _buck2_bundle_impl, + attrs = { + "buck2": attrs.dep(), + "buck2_client": attrs.dep(), + "labels": attrs.list(attrs.string(), default = []), + "tpx": attrs.dep(), + "_target_os_type": buck.target_os_type_arg(), + }, ) -def symlinked_buck2_and_tpx(**kwargs): +def buck2_bundle(**kwargs): cxx_platform = platform_utils.get_cxx_platform_for_base_path(native.package_name()) kwargs["default_target_platform"] = cxx_platform.target_platform - _symlinked_buck2_and_tpx(**kwargs) + _buck2_bundle(**kwargs) diff --git a/dice/README.md b/dice/README.md index 41d58b1aa3179..8929e27b9ef09 100644 --- a/dice/README.md +++ b/dice/README.md @@ -1,26 +1,14 @@ # Dynamic Incremental Computation Engine, aka. DICE -[![Support Ukraine](https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB)](https://opensource.fb.com/support-ukraine) -[![GitHub link](https://img.shields.io/badge/GitHub-facebookincubator%2Fdice-blue.svg)](https://github.com/facebookincubator/dice) -[![crates.io version](https://img.shields.io/crates/v/dice.svg)](https://crates.io/crates/dice) -[![docs.rs availability](https://img.shields.io/docsrs/dice?label=docs.rs)](https://docs.rs/dice/) -[![Build status](https://img.shields.io/github/workflow/status/facebookincubator/dice/ci.svg)](https://github.com/facebookincubator/dice/actions) - DICE is a dynamic incremental computation engine. It is an implemented incremental computation engine that supports parallel computation. -# Documentation -For detailed documentation, see the docs in [dice/docs/index.md](dice/docs/index.md) - -## Making a release +## Documentation -1. Check the [GitHub Actions](https://github.com/facebookincubator/dice/actions) are green. -2. Update `CHANGELOG.md` with the changes since the last release. [This link](https://github.com/facebookincubator/dice/compare/v0.1.0...main) can help (update to compare against the last release). -3. Update the version numbers of the two `Cargo.toml` files. Bump them by 0.0.1 if there are no incompatible changes, or 0.1.0 if there are. Bump the dependency in `dice_examples` to point at the latest `dice` version. -4. Copy the files `CHANGELOG.md`, the two `LICENSE-` files and `README.md` into `dice` subdirectory. -5. Run `cargo publish --allow-dirty --dry-run`, then without the `--dry-run` in `dice`. We do not publish `dice_examples` -6. Create a [GitHub release](https://github.com/facebookincubator/dice/releases/new) with `v0.X.Y`, using the `dice` version as the name. +For detailed documentation, see the docs in +[dice/docs/index.md](dice/docs/index.md) ## License -DICE is both MIT and Apache License, Version 2.0 licensed, as found in the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. +DICE is both MIT and Apache License, Version 2.0 licensed, as found in the +[LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. diff --git a/dice/dice/BUCK b/dice/dice/BUCK index 2d3004e193aa6..fac2a8f7d772b 100644 --- a/dice/dice/BUCK +++ b/dice/dice/BUCK @@ -1,6 +1,4 @@ -load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -10,11 +8,11 @@ rust_library( ["src/**/*.rs"], ), crate_root = "src/lib.rs", - doctests = False, # FIXME test_deps = [ "fbsource//third-party/rust:assert_matches", "fbsource//third-party/rust:bincode", "fbsource//third-party/rust:derivative", + "fbsource//third-party/rust:indoc", "fbsource//third-party/rust:tempfile", ], deps = [ @@ -24,39 +22,29 @@ rust_library( "fbsource//third-party/rust:dashmap", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", - "fbsource//third-party/rust:fnv", "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:fxhash", "fbsource//third-party/rust:indexmap", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:parking_lot", + "fbsource//third-party/rust:ref-cast", "fbsource//third-party/rust:scopeguard", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:slab", + "fbsource//third-party/rust:static_assertions", "fbsource//third-party/rust:take_mut", "fbsource//third-party/rust:thiserror", "fbsource//third-party/rust:tokio", - "fbsource//third-party/rust:tokio-stream", "fbsource//third-party/rust:tracing", "fbsource//third-party/rust:triomphe", + "fbsource//third-party/rust:typed-arena", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/gazebo/cmp_any:cmp_any", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", "//buck2/shed/lock_free_hashtable:lock_free_hashtable", "//buck2/shed/lock_free_vec:lock_free_vec", - "//buck2/shed/more_futures:more_futures", "//common/rust/shed/sorted_vector_map:sorted_vector_map", ], ) - -rust_binary( - name = "read_dump", - srcs = ["bin/read_dump.rs"], - deps = [ - "fbsource//third-party/rust:anyhow", - "fbsource//third-party/rust:bincode", - "fbsource//third-party/rust:clap-3", - "fbsource//third-party/rust:serde_json", - ":dice", - ], -) diff --git a/dice/dice/Cargo.toml b/dice/dice/Cargo.toml index 73cb16f0d66f2..d4bc45640a5bd 100644 --- a/dice/dice/Cargo.toml +++ b/dice/dice/Cargo.toml @@ -1,58 +1,48 @@ [package] +edition = "2021" +license = { workspace = true } name = "dice" +repository = { workspace = true } version = "0.1.0" -edition = "2021" [dependencies] +allocative = { workspace = true } +anyhow = "1.0.65" anymap = "0.12.1" async-trait = "0.1.24" -bincode = "1.3.3" -clap = { version = "3.2.24", features = ["derive"] } -dashmap = "4.0.2" -derivative = { workspace = true} -derive_more = "0.99.3" -fnv = "1.0" -futures = "0.3" -indexmap = { version = "1.9.1", features = ["serde-1"] } -itertools = "0.10.0" -more_futures = { path = "../../shed/more_futures" } -once_cell = "1.3" -sorted_vector_map.version = "0.1" -# @oss-disable: sorted_vector_map.path = "../../../common/rust/shed/sorted_vector_map" -tokio = { version = "1.5", features = ["full"]} -tokio-stream = { workspace = true } +buck2_futures = { path = "../../app/buck2_futures" } +cmp_any = { workspace = true } +dashmap = "5.5.3" +derivative = { workspace = true } +derive_more = { version = "1.0.0", features = ["full"] } dupe = { workspace = true } +futures = "0.3" +fxhash = "0.2.1" gazebo = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -cmp_any = { workspace = true } -allocative = { workspace = true } +indexmap = { workspace = true } +indoc = { workspace = true } +itertools = "0.13.0" +lock_free_hashtable = { workspace = true } +lock_free_vec = { workspace = true } +parking_lot = { version = "0.11.2", features = ["send_guard"] } +ref-cast = { workspace = true } +scopeguard = { workspace = true } serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.48" slab = "0.4.7" +# @oss-disable: sorted_vector_map.path = "../../../common/rust/shed/sorted_vector_map" +sorted_vector_map.version = "0.2" +static_assertions = { workspace = true } +take_mut = { workspace = true } thiserror = "1.0.36" +tokio = { version = "1.5", features = ["full"] } tracing = "0.1.22" -parking_lot = { version = "0.11.2", features = ["send_guard"] } -anyhow = "1.0.65" -take_mut = { workspace = true } triomphe = { workspace = true } -lock_free_hashtable = { workspace = true } -lock_free_vec = { workspace = true } -scopeguard = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] +typed-arena = { workspace = true } [dev-dependencies] -cast = "0.2.3" -derivative = "2.1.1" -tempfile = "3.1" anyhow = "1.0.65" assert_matches = "1.5" -tokio = { version = "1.5", features = ["full"]} - - -[[bin]] -name = "read_dump" -path = "bin/read_dump.rs" +bincode = { workspace = true } +derivative = "2.1.1" +tempfile = "3.1" +tokio = { version = "1.5", features = ["full"] } diff --git a/dice/dice/bin/read_dump.rs b/dice/dice/bin/read_dump.rs deleted file mode 100644 index edafb2c617065..0000000000000 --- a/dice/dice/bin/read_dump.rs +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fs::File; -use std::path::PathBuf; - -use clap::Parser; -use dice::introspection::graph::SerializedGraphNodesForKey; - -#[derive(Debug, clap::Parser)] -#[clap(name = "read_dump", about = "dice dump reader")] -pub(crate) struct Opt { - #[clap(name = "DICE_DUMP", help = "The dice dump")] - file: PathBuf, - #[clap(long = "out", help = "Copy the output to this path")] - out: Option, -} - -fn main() -> anyhow::Result<()> { - let clap = Opt::clap(); - let matches = clap.get_matches_from(std::env::args().collect::>()); - let opt = Opt::from_clap(&matches); - - let file = File::open(opt.file)?; - - let out: Vec = bincode::deserialize_from(&file)?; - - match opt.out { - Some(path) => { - serde_json::to_writer_pretty(File::create(path)?, &out)?; - } - None => { - serde_json::to_writer_pretty(std::io::stdout(), &out)?; - } - }; - - Ok(()) -} diff --git a/dice/dice/docs/api.md b/dice/dice/docs/api.md index 1f0d7010d342f..ee3e979b3f943 100644 --- a/dice/dice/docs/api.md +++ b/dice/dice/docs/api.md @@ -2,22 +2,28 @@ Everything starts at the base struct `Dice`. -From it, you can get the `TransactionUpdater` where you can report the changes to your computation graph you expect -to see this time. You can dirty nodes to be recomputed via `TransactionUpdater::changed`, -or report new [Injected Values](writing_computations.md#injected-keys) via `TransactionUpdater::changed_to`. -These changes recorded will not be seen by anyone until you commit them via `TransactionUpdater::commit`, which returns -you an instance of `DiceTransaction` containing all changes recorded up until this moment (but not any future changes). +From it, you can get the `TransactionUpdater` where you can report the changes +to your computation graph you expect to see this time. You can dirty nodes to be +recomputed via `TransactionUpdater::changed`, or report new +[Injected Values](writing_computations.md#injected-keys) via +`TransactionUpdater::changed_to`. These changes recorded will not be seen by +anyone until you commit them via `TransactionUpdater::commit`, which returns you +an instance of `DiceTransaction` containing all changes recorded up until this +moment (but not any future changes). -`DiceTransaction` instances that have the same set of reported changes are considered to be of the same state. Computations -can be requested from them concurrently, and work will be shared. -However, concurrent requests to `DiceTransactions` with different states are NOT supported. -DICE currently does NOT enforce this rule. Please be aware. +`DiceTransaction` instances that have the same set of reported changes are +considered to be of the same state. Computations can be requested from them +concurrently, and work will be shared. However, concurrent requests to +`DiceTransactions` with different states are NOT supported. DICE currently does +NOT enforce this rule. Please be aware. +The function `DiceComputations::compute(Key)` is used to compute a specific Key, +returning a future to the result. `DiceTransaction` derefs into a +`DiceComputations` such computations can be called directly on the transaction. -The function `DiceComputations::compute(Key)` is used to compute a specific Key, returning a future to the result. -`DiceTransaction` derefs into a `DiceComputations` such computations can be called directly on the transaction. +To make code look natural, we often employ the pattern of writing computation +traits as follows: -To make code look natural, we often employ the pattern of writing computation traits as follows: ```rust #[async_trait] trait MyComputationTrait { @@ -38,7 +44,9 @@ impl MyComputationTrait for DiceComputations { } ``` -This will let you write the more natural code as follows instead of having to explicitly create and refer to Keys everywhere in the code base: +This will let you write the more natural code as follows instead of having to +explicitly create and refer to Keys everywhere in the code base: + ```rust use MyComputationTrait; @@ -52,6 +60,7 @@ async fn main() { } ``` -You can group computations in traits however you wish, with whatever Key types you desire. In buck2, we tend to group -related computations together (i.e all parsing computations in one trait, or all action computations in one trait) to +You can group computations in traits however you wish, with whatever Key types +you desire. In buck2, we tend to group related computations together (i.e all +parsing computations in one trait, or all action computations in one trait) to better organize our modules and limit the number of dependencies pulled in. diff --git a/dice/dice/docs/cancellations.md b/dice/dice/docs/cancellations.md index 173759f256cb9..59d318cb0966f 100644 --- a/dice/dice/docs/cancellations.md +++ b/dice/dice/docs/cancellations.md @@ -1,22 +1,28 @@ # Cancellations -DICE supports cancellations of computations you have requested. -Since each computation is returned as a future, dropping the future will notify DICE that the computation is no longer -needed, and can be cancelled if appropriate. Note that dropping the future does not guarantee that computation is -canceled because of multi-tenancy, where if any other request depends on the same computation, the computation will -continue to run to completion for the other request. +DICE supports cancellations of computations you have requested. Since each +computation is returned as a future, dropping the future will notify DICE that +the computation is no longer needed, and can be cancelled if appropriate. Note +that dropping the future does not guarantee that computation is canceled because +of multi-tenancy, where if any other request depends on the same computation, +the computation will continue to run to completion for the other request. ## How It Works -DICE tracks all currently running computations in a map of [`WeakShared`](https://docs.rs/futures/0.3.17/futures/future/struct.WeakShared.html) -`DiceTask`s. This core map is what allows concurrent requests share work when they request for the same computation. -When a request requires a computation that is currently running in the map, it will attempt to acquire a [`Shared`](https://docs.rs/futures/0.3.17/futures/future/struct.Shared.html) -version of the `WeakShared`. If the `WeakShared` was already dropped, acquiring a `Shared` will fail, causing the -request to spawn a new `DiceTask`, holding onto a `Shared` and inserting its corresponding `WeakShared` into the map. +DICE tracks all currently running computations in a map of +[`WeakShared`](https://docs.rs/futures/0.3.17/futures/future/struct.WeakShared.html) +`DiceTask`s. This core map is what allows concurrent requests share work when +they request for the same computation. When a request requires a computation +that is currently running in the map, it will attempt to acquire a +[`Shared`](https://docs.rs/futures/0.3.17/futures/future/struct.Shared.html) +version of the `WeakShared`. If the `WeakShared` was already dropped, acquiring +a `Shared` will fail, causing the request to spawn a new `DiceTask`, holding +onto a `Shared` and inserting its corresponding `WeakShared` into the map. -When a request is canceled by dropping its future, the `Shared` it holds will be dropped. -When there are no strong references (i.e `Shared` versions) of the `WeakShared`, the `DiceTask` will be dropped, -which triggers the spawned task to be aborted. -By having only active requests hold onto the `Shared`, and the map itself holding only a `WeakShared`, we can guarantee -that the futures are never canceled if there is a request actively depending on it, and that the future will be canceled -once there are no active requests for it. +When a request is canceled by dropping its future, the `Shared` it holds will be +dropped. When there are no strong references (i.e `Shared` versions) of the +`WeakShared`, the `DiceTask` will be dropped, which triggers the spawned task to +be aborted. By having only active requests hold onto the `Shared`, and the map +itself holding only a `WeakShared`, we can guarantee that the futures are never +canceled if there is a request actively depending on it, and that the future +will be canceled once there are no active requests for it. diff --git a/dice/dice/docs/incrementality.md b/dice/dice/docs/incrementality.md index 2a900ca1cbfea..281dae815648d 100644 --- a/dice/dice/docs/incrementality.md +++ b/dice/dice/docs/incrementality.md @@ -1,19 +1,31 @@ # Incrementality -Incrementality is the idea that given any changes of a collection of interdependent computations, -only the changed portions of the computation are recomputed. To record changes and to discover the changed portion, -portions of the dependency graph needs to be traversed to discover the changes. +Incrementality is the idea that given any changes of a collection of +interdependent computations, only the changed portions of the computation are +recomputed. To record changes and to discover the changed portion, portions of +the dependency graph needs to be traversed to discover the changes. -DICE tracks the reverse dependencies (rdeps) of computations to achieve O(invalidated subset) traversals and O(changed subset) recomputations for a given request. -* 'invalidated subset' is the set of all possibly invalidated nodes intersected with the set of nodes that might be needed for the request regardless of whether a node is cached or not. -* 'changed subset' is the set nodes whose values changed intersected with the set of nodes that might be needed for the request regardless of whether a node is cached or not. +DICE tracks the reverse dependencies (rdeps) of computations to achieve +O(invalidated subset) traversals and O(changed subset) recomputations for a +given request. + +- 'invalidated subset' is the set of all possibly invalidated nodes intersected + with the set of nodes that might be needed for the request regardless of + whether a node is cached or not. +- 'changed subset' is the set nodes whose values changed intersected with the + set of nodes that might be needed for the request regardless of whether a node + is cached or not. This allows DICE to minimize the amount of work performed for each new request. # Multi-versioning -DICE supports a multi-commit transaction model, where computations/requests that is currently running do not see newly -committed changes. We do not yet support running transactions at different versions concurrently nor storing multiple versions -of nodes. + +DICE supports a multi-commit transaction model, where computations/requests that +is currently running do not see newly committed changes. We do not yet support +running transactions at different versions concurrently nor storing multiple +versions of nodes. # Details -Details of the incrementality algorithm can be found in the [PDF](DiceIncrementalityAlgorithms.pdf). + +Details of the incrementality algorithm can be found in the +[PDF](DiceIncrementalityAlgorithms.pdf). diff --git a/dice/dice/docs/index.md b/dice/dice/docs/index.md index 5d3979a464177..46897b9bd66a7 100644 --- a/dice/dice/docs/index.md +++ b/dice/dice/docs/index.md @@ -1,29 +1,38 @@ # DICE -DICE is a dynamic incremental computation engine that supports parallel computation, inspired by -[Adapton](https://docs.rs/adapton/latest/adapton/) and [Salsa](https://github.com/salsa-rs/salsa), +DICE is a dynamic incremental computation engine that supports parallel +computation, inspired by [Adapton](https://docs.rs/adapton/latest/adapton/) and +[Salsa](https://github.com/salsa-rs/salsa), -DICE is the core computation engine that powers the incremental graph transformations of [buck2](https://github.com/facebook/buck2). -It is intended to offer a generic computation API that can be used beyond just Buck, so that any kind of incremental computation can run on DICE. -All computations are executed in parallel on DICE via tokio executors. Duplicate requests to the same computations are deduplicated. +DICE is the core computation engine that powers the incremental graph +transformations of [buck2](https://github.com/facebook/buck2). It is intended to +offer a generic computation API that can be used beyond just Buck, so that any +kind of incremental computation can run on DICE. All computations are executed +in parallel on DICE via tokio executors. Duplicate requests to the same +computations are deduplicated. DICE is currently still experimental and largely being rewritten. ## Features + - [Incrementality](incrementality.md) - Incrementality behaviour of DICE - [Parallelism](parallelism.md) - Parallelism and Behaviour of Computations -- [Cancellations](cancellations.md) - Cancelling of a currently running computation +- [Cancellations](cancellations.md) - Cancelling of a currently running + computation - [Transient Errors](transients.md) - Transient Error Handling - [Projections](projections.md) - Projection Computations - Cycle Detection // TODO ## Using DICE + - [Basic API](api.md) - How to use DICE -- [Writing Computations](writing_computations.md) - How to write computations that are incremental +- [Writing Computations](writing_computations.md) - How to write computations + that are incremental ## Benchmarking DICE -// TODO +// TODO ## Debugging the Graph + // TODO diff --git a/dice/dice/docs/parallelism.md b/dice/dice/docs/parallelism.md index 14ad4c2af60c3..edaa269a90b35 100644 --- a/dice/dice/docs/parallelism.md +++ b/dice/dice/docs/parallelism.md @@ -1,9 +1,12 @@ # Parallelism and Computation Behaviour -Every computation in DICE is automatically spawned asynchronously in Rust via tokio to be computed in parallel. -They behave like standard Rust futures, suspending when users `await` dependent computations and resuming when the +Every computation in DICE is automatically spawned asynchronously in Rust via +tokio to be computed in parallel. They behave like standard Rust futures, +suspending when users `await` dependent computations and resuming when the dependent futures are ready. -The same identical computation is always deduplicated, so concurrent requests to the same exact key will only be -executed once if not cached. Additionally, for normal computations, we guarantee that the same instance of the computed -value is returned to all requests if the value is considered equal based on the Key's equality. +The same identical computation is always deduplicated, so concurrent requests to +the same exact key will only be executed once if not cached. Additionally, for +normal computations, we guarantee that the same instance of the computed value +is returned to all requests if the value is considered equal based on the Key's +equality. diff --git a/dice/dice/docs/projections.md b/dice/dice/docs/projections.md index 3837ef8764a6c..4cb175756da65 100644 --- a/dice/dice/docs/projections.md +++ b/dice/dice/docs/projections.md @@ -1,19 +1,25 @@ # Projection Computations DICE supports a special type of synchronous computation called "Projections". -These are synchronous computations that are derived from the result of a larger parallel async computation. - -This allows computations to depend on only "portions" of the result of another computation, allowing the parent computation -to be resurrected more often and not need be recomputed if only the unused portions of the dependent result changes. -For example, you may have a computation that retrieves and parses JSON. Now you have an expensive computation that -requires a single value of the JSON. Rather than depending on the entirety of the JSON and having to rerun the expensive -computation whenever any of the JSON value changes, you can write a new Projection Computation that provides access to -specific values from the JSON. Now you can have the expensive computation depend on the projection, which will avoid needing -to rerun the expensive computation unless that specific projected value changes. +These are synchronous computations that are derived from the result of a larger +parallel async computation. + +This allows computations to depend on only "portions" of the result of another +computation, allowing the parent computation to be resurrected more often and +not need be recomputed if only the unused portions of the dependent result +changes. For example, you may have a computation that retrieves and parses JSON. +Now you have an expensive computation that requires a single value of the JSON. +Rather than depending on the entirety of the JSON and having to rerun the +expensive computation whenever any of the JSON value changes, you can write a +new Projection Computation that provides access to specific values from the +JSON. Now you can have the expensive computation depend on the projection, which +will avoid needing to rerun the expensive computation unless that specific +projected value changes. ## API -To create a Projection Computation, create a struct and implement `ProjectionKey`. +To create a Projection Computation, create a struct and implement +`ProjectionKey`. ```rust struct MyProjection; @@ -35,13 +41,16 @@ impl dice::api::ProjectionKey for MyProjection { } } ``` -The `BaseComputeKey` is the async computation for which the projected values are based off of. +The `BaseComputeKey` is the async computation for which the projected values are +based off of. -To request the projection, you must compute the base via `DiceComputations::compute_opaque(Key)` which returns a `OpaqueValue`. -Then, request the projection via `OpaqueValue::projection(ProjectionKey)`. +To request the projection, you must compute the base via +`DiceComputations::compute_opaque(Key)` which returns a `OpaqueValue`. Then, +request the projection via `OpaqueValue::projection(ProjectionKey)`. -Similar to normal keys, buck2 often hides the keys to make code look more natural via traits. +Similar to normal keys, buck2 often hides the keys to make code look more +natural via traits. ```rust #[async_trait] @@ -76,7 +85,9 @@ impl SyncProjectionTrait for OpaqueValue { } ``` -This will let you write the more natural code as follows instead of having to explicitly create and refer to Keys everywhere in the code base: +This will let you write the more natural code as follows instead of having to +explicitly create and refer to Keys everywhere in the code base: + ```rust use MyComputationTrait; use SyncProjectionTrait; diff --git a/dice/dice/docs/transients.md b/dice/dice/docs/transients.md index 0d6422aa0261a..ac313731aab3b 100644 --- a/dice/dice/docs/transients.md +++ b/dice/dice/docs/transients.md @@ -1,12 +1,15 @@ # Transient Errors -DICE has a concept of "transient" errors, which are errors that are non-deterministic and should be retried instead of -cached. -These are indicated by `Key::validity(Key::Value)` returning `false`. +DICE has a concept of "transient" errors, which are errors that are +non-deterministic and should be retried instead of cached. These are indicated +by `Key::validity(Key::Value)` returning `false`. -When DICE encounters a "transient value", the value is reused for the ongoing computation transaction. That is, all -active requests of the same transaction will see the same instance of the transient value. However, this value will -not be cached such that upon obtaining a new transaction with or without committing any changes to the graph, the value -will be recomputed (once). If the recompute still results in a transient, then the value is still not cached and the -same behaviour occurs on the next fresh transaction. If the recompute results in a non-transient value, then the value -will be cached, and the next transaction will reuse the cached value if there are no changes that invalidate the value. +When DICE encounters a "transient value", the value is reused for the ongoing +computation transaction. That is, all active requests of the same transaction +will see the same instance of the transient value. However, this value will not +be cached such that upon obtaining a new transaction with or without committing +any changes to the graph, the value will be recomputed (once). If the recompute +still results in a transient, then the value is still not cached and the same +behaviour occurs on the next fresh transaction. If the recompute results in a +non-transient value, then the value will be cached, and the next transaction +will reuse the cached value if there are no changes that invalidate the value. diff --git a/dice/dice/docs/writing_computations.md b/dice/dice/docs/writing_computations.md index 0fbc6b3885828..61b1ffbf12d36 100644 --- a/dice/dice/docs/writing_computations.md +++ b/dice/dice/docs/writing_computations.md @@ -1,8 +1,10 @@ # Writing a Computation + Computations are written by declaring a struct that implements the `Key` trait. -In this trait, you will declare a `compute` function that is the calculation to perform when not cached. -This method will receive a context `DiceComputations`, which is where you can request for further keys' values. These -keys will be recorded as dependencies of the current computation. +In this trait, you will declare a `compute` function that is the calculation to +perform when not cached. This method will receive a context `DiceComputations`, +which is where you can request for further keys' values. These keys will be +recorded as dependencies of the current computation. ```rust #[derive(Allocative, Clone, Debug, Display, Eq, PartialEq, Hash)] @@ -24,17 +26,21 @@ impl dice::api::Key for MyKey { } ``` -Additionally, there are methods `equality` and `validity` that one can implement for the `Key` trait to configure -the behaviour of [transients](transients.md) and equals for the output of the computation. Equals allows DICE to resurrect -nodes that depends on values that were invalidated, but end up recomputing to the "same" value. +Additionally, there are methods `equality` and `validity` that one can implement +for the `Key` trait to configure the behaviour of [transients](transients.md) +and equals for the output of the computation. Equals allows DICE to resurrect +nodes that depends on values that were invalidated, but end up recomputing to +the "same" value. ## Injected Keys -Injected Keys are a special type of Keys that are NOT computed. They must have their value explicitly set when informing -DICE of updated values via `TransactionUpdater::changed_to`, and all future requests will yield such value until updated -again. -Computations are written by declaring a struct that implements the `InjectedKey` trait. -They have no compute function, but offers `equality` as well. +Injected Keys are a special type of Keys that are NOT computed. They must have +their value explicitly set when informing DICE of updated values via +`TransactionUpdater::changed_to`, and all future requests will yield such value +until updated again. + +Computations are written by declaring a struct that implements the `InjectedKey` +trait. They have no compute function, but offers `equality` as well. ```rust struct MyInjectedKey; diff --git a/dice/dice/src/api.rs b/dice/dice/src/api.rs new file mode 100644 index 0000000000000..12a244fb02f3c --- /dev/null +++ b/dice/dice/src/api.rs @@ -0,0 +1,29 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Public DICE API + +pub(crate) mod activation_tracker; +pub(crate) mod computations; +pub(crate) mod cycles; +pub(crate) mod data; +pub(crate) mod demand; +pub(crate) mod dice; +pub(crate) mod dyn_key; +pub(crate) mod error; +pub(crate) mod events; +pub(crate) mod injected; +pub(crate) mod invalidation_tracking; +pub mod key; +pub(crate) mod opaque; +pub(crate) mod projection; +pub(crate) mod storage_type; +pub(crate) mod transaction; +pub(crate) mod user_data; +pub(crate) mod which; diff --git a/dice/dice/src/api/activation_tracker.rs b/dice/dice/src/api/activation_tracker.rs index e4fd5315109e8..64e9460a42994 100644 --- a/dice/dice/src/api/activation_tracker.rs +++ b/dice/dice/src/api/activation_tracker.rs @@ -9,6 +9,8 @@ use std::any::Any; +use crate::DynKey; + /// An ActivationTracker can be used to identify which keys were either reused or computed during a /// transaction. pub trait ActivationTracker: Send + Sync + 'static { @@ -17,8 +19,8 @@ pub trait ActivationTracker: Send + Sync + 'static { /// to `store_evaluation_data` (if any). fn key_activated( &self, - key: &dyn Any, - deps: &mut dyn Iterator, + key: &DynKey, + deps: &mut dyn Iterator, activation_data: ActivationData, ); } diff --git a/dice/dice/src/api/computations.rs b/dice/dice/src/api/computations.rs index bf6639cfdcef0..376fd7170bfe6 100644 --- a/dice/dice/src/api/computations.rs +++ b/dice/dice/src/api/computations.rs @@ -8,11 +8,11 @@ */ use std::future::Future; -use std::marker::PhantomData; -use std::ops::Deref; -use std::ops::DerefMut; +use std::sync::Arc; use allocative::Allocative; +use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use futures::future::BoxFuture; use crate::api::data::DiceData; @@ -21,6 +21,9 @@ use crate::api::key::Key; use crate::api::opaque::OpaqueValue; use crate::api::user_data::UserComputationData; use crate::ctx::DiceComputationsImpl; +use crate::ctx::LinearRecomputeDiceComputationsImpl; +use crate::DiceKeyTrackedInvalidationPaths; +use crate::ProjectionKey; use crate::UserCycleDetectorGuard; /// The context for computations to register themselves, and request for additional dependencies. @@ -32,20 +35,19 @@ use crate::UserCycleDetectorGuard; /// The context is valid only for the duration of the computation of a single key, and cannot be /// owned. #[derive(Allocative)] -#[repr(transparent)] -pub struct DiceComputations(pub(crate) DiceComputationsImpl); +pub struct DiceComputations<'a>(pub(crate) DiceComputationsImpl<'a>); fn _test_computations_sync_send() { fn _assert_sync_send() {} _assert_sync_send::(); } -impl DiceComputations { - /// Gets all the result of of the given computation key. - /// recorded as dependencies of the current computation for which this +impl<'d> DiceComputations<'d> { + /// Gets the result of the given computation key. + /// Record dependencies of the current computation for which this /// context is for. pub fn compute<'a, K>( - &'a self, + &'a mut self, key: &K, ) -> impl Future::Value>> + 'a where @@ -61,30 +63,223 @@ impl DiceComputations { pub fn compute_opaque<'a, K>( &'a self, key: &K, - ) -> impl Future>> + 'a + ) -> impl Future>> + 'a where K: Key, { self.0.compute_opaque(key) } - /// Computes all the given tasks in parallel, returning an unordered Stream + pub fn projection<'a, K: Key, P: ProjectionKey>( + &'a mut self, + derive_from: &OpaqueValue, + projection_key: &P, + ) -> DiceResult { + self.0.projection(derive_from, projection_key) + } + + pub fn opaque_into_value<'a, K: Key>( + &'a mut self, + derive_from: OpaqueValue, + ) -> DiceResult { + self.0.opaque_into_value(derive_from) + } + + /// DiceComputations &mut-based api can make some computations much more complex to express, but without it + /// the data dependencies between different compute requests are impossible to track. with_linear_recompute() + /// is an escape hatch in the case where we are willing to sacrifice recompute performance for easier expression + /// of a computation. It should not be used lightly, it can be difficult to attribute performance regressions to its use. + /// + /// Withing the with_linear_recompute(), all deps will be recorded as accessed sequentially and so recomputation + /// will not trigger them in parallel. This will apply to calls on DiceComputations returned by .ctx() and by the ones received + /// in closures from compute_many and friends. It will not apply to the other deps from which the linear recompute was created. + /// + /// For example: + /// + /// ```ignore + /// let mut ctx = something(); + /// let keys1 = vec![Key(10), Key(11)]; + /// let keys2 = vec![Key(20), Key(21)]; + /// let keys3 = vec![Key(30), Key(31)]; + /// let keys4 = vec![Key(40), Key(41)] + /// ctx.compute_join(keys1, |ctx, key1| ctx.compute(key1).boxed()).await; + /// ctx.with_linear_recompute(|mut linear| { + /// linear.ctx().compute_join(keys2, |ctx, key2| async move { + /// ctx.compute2( + /// |ctx| ctx.compute(key2).boxed(), + /// |ctx| ctx.compute_join(keys3, |ctx, key3| ctx.compute(key3).boxed()).boxed() + /// ).await; + /// }.boxed() + /// ).await + /// ctx.compute_join(keys4, |ctx, key4| ctx.compute(key4).boxed()).await; + /// }); + /// ``` + /// + /// In this example, the recomputation of all of keys2 and keys3 would be done linearly, but keys1 and keys4 would be recomputed in parallel. + pub fn with_linear_recompute<'a, T, Fut: Future + 'a>( + &'a mut self, + func: impl FnOnce(LinearRecomputeDiceComputations<'a>) -> Fut + 'a, + ) -> impl Future + 'a { + self.0.with_linear_recompute(func) + } + + /// Creates computation Futures for all the given tasks. + /// + /// ```ignore + /// let mut ctx: &'a DiceComputations = ctx(); + /// let data: String = data(); + /// let keys : Vec = keys(); + /// let futs = ctx.compute_many(keys.into_iter().map(|k| + /// DiceComputations::declare_closure( + /// |dice: &mut DiceComputations| -> BoxFuture { + /// async move { + /// dice.compute(k).await + data + /// }.boxed() + /// } + /// ) + /// )); + /// futures::future::join_all(futs).await; + /// ``` pub fn compute_many<'a, T: 'a>( - &'a self, + &'a mut self, computes: impl IntoIterator< - Item = impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, + Item = impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, >, ) -> Vec + 'a> { self.0.compute_many(computes) } - /// Computes all the given tasks in parallel, returning an unordered Stream + /// Maps the items into computation futures and joins on them. + /// + /// ```ignore + /// let mut ctx: &'a DiceComputations = ctx(); + /// let data: String = data(); + /// let keys : Vec = keys(); + /// // When defined inplance, there's no need to use a declare helper. + /// ctx.compute_join(keys, |dice: &mut DiceComputations, k: &Key| { + /// async move { + /// dice.compute(k).await + data + /// } + /// }).await; + /// + /// // If the closure is going to be declared outside the compute_many itself, you need to use + /// // declare_join_closure for it to get the right lifetime bounds. + /// let compute_one = DiceComputations::declare_join_closure( + /// |dice: &mut DiceComputations, k: &Key| { + /// async move { + /// dice.compute(k).await + data + /// } + /// } + /// ); + /// ctx.compute_join(keys, compute_one).await; + /// ```` + pub fn compute_join<'a, T: Send, R: 'a>( + &'a mut self, + items: impl IntoIterator, + mapper: ( + impl for<'x> FnOnce(&'x mut DiceComputations<'a>, T) -> BoxFuture<'x, R> + Send + Sync + Copy + ), + ) -> impl Future> + 'a { + let futs = self.compute_many(items.into_iter().map(move |v| { + DiceComputations::declare_closure(move |ctx: &mut DiceComputations| -> BoxFuture { + mapper(ctx, v) + }) + })); + futures::future::join_all(futs) + } + + /// Maps the items into computations futures and then returns a future which represents either a + /// collection of the results or an error. + pub fn try_compute_join<'a, T: Send, R: 'a, E: 'a>( + &'a mut self, + items: impl IntoIterator, + mapper: ( + impl for<'x> FnOnce(&'x mut DiceComputations<'a>, T) -> BoxFuture<'x, Result> + + Send + + Sync + + Copy + ), + ) -> impl Future, E>> + 'a { + let futs = self.compute_many(items.into_iter().map(move |v| { + DiceComputations::declare_closure( + move |ctx: &mut DiceComputations| -> BoxFuture> { mapper(ctx, v) }, + ) + })); + crate::future::try_join_all(futs) + } + + /// Computes all the given tasks in parallel. + /// + /// If the closures are defined out of the compute2 call, you need to use declare_closure() to get the right lifetimes. pub fn compute2<'a, T: 'a, U: 'a>( - &'a self, - compute1: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, - compute2: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, U> + Send, - ) -> (impl Future + 'a, impl Future + 'a) { - self.0.compute2(compute1, compute2) + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, U> + Send, + ) -> impl Future + 'a { + let (t, u) = self.0.compute2(compute1, compute2); + futures::future::join(t, u) + } + + /// Compute all the given tasks in parallel. + pub fn try_compute2<'a, T: 'a, U: 'a, E: 'a>( + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, Result> + + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, Result> + + Send, + ) -> impl Future> + 'a { + let (t, u) = self.0.compute2(compute1, compute2); + futures::future::try_join(t, u) + } + + /// Computes all the given tasks in parallel. + /// + /// If the closures are defined out of the compute3 call, you need to use declare_closure() to get the right lifetimes. + pub fn compute3<'a, T: 'a, U: 'a, V: 'a>( + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, U> + Send, + compute3: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, V> + Send, + ) -> impl Future + 'a { + let (t, u, v) = self.0.compute3(compute1, compute2, compute3); + futures::future::join3(t, u, v) + } + + /// Compute all the given tasks in parallel. + pub fn try_compute3<'a, T: 'a, U: 'a, V: 'a, E: 'a>( + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, Result> + + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, Result> + + Send, + compute3: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, Result> + + Send, + ) -> impl Future> + 'a { + let (t, u, v) = self.0.compute3(compute1, compute2, compute3); + futures::future::try_join3(t, u, v) + } + + /// Used to declare a higher order closure for compute_join and try_compute_join. + /// + /// We need to use BoxFuture here to express that the future captures the 'x lifetime. + pub fn declare_join_closure<'a, T, R, Closure>(closure: Closure) -> Closure + where + Closure: for<'x> FnOnce(&'x mut DiceComputations<'a>, T) -> BoxFuture<'x, R> + + Send + + Sync + + Copy, + { + closure + } + + /// Used to declare a higher order closure for compute2 and compute_many. + /// + /// We need to use BoxFuture here to express that the future captures the 'x lifetime. + pub fn declare_closure<'a, R, Closure>(closure: Closure) -> Closure + where + Closure: for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, R>, + { + closure } /// Data that is static per the entire lifetime of Dice. These data are initialized at the @@ -102,7 +297,7 @@ impl DiceComputations { } /// Gets the current cycle guard if its set. If it's set but a different type, an error will be returned. - pub fn cycle_guard(&self) -> DiceResult> { + pub fn cycle_guard(&self) -> DiceResult>> { self.0.cycle_guard() } @@ -111,131 +306,51 @@ impl DiceComputations { pub fn store_evaluation_data(&self, value: T) -> DiceResult<()> { self.0.store_evaluation_data(value) } -} - -/// For a `compute_many` and `compute2` request, the DiceComputations provided to each lambda -/// is a reference that's only available for some specific lifetime `'x`. This is express as a -/// higher rank lifetime bound `for <'x>` in rust. However, `for <'x>` bounds do not have constraints -/// on them so rust infers them to be any lifetime, including 'static, which is wrong. So, we -/// introduce an extra lifetime here which forces rust compiler to infer additional bounds on -/// the `for <'x>` as a `&'x DiceComputationParallel<'a>` cannot live more than `'a`, so using this -/// type as the argument to the closure forces the correct lifetime bounds to be inferred by rust. -pub struct DiceComputationsParallel<'a>(pub(crate) DiceComputations, PhantomData<&'a ()>); -impl<'a> DiceComputationsParallel<'a> { - pub(crate) fn new(ctx: DiceComputations) -> Self { - Self(ctx, PhantomData) + /// Returns the current tracked invalidation paths for this computation node. + pub fn get_invalidation_paths(&mut self) -> DiceKeyTrackedInvalidationPaths { + self.0.get_invalidation_paths() } } -impl<'a> Deref for DiceComputationsParallel<'a> { - type Target = DiceComputations; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} +pub struct LinearRecomputeDiceComputations<'a>(pub(crate) LinearRecomputeDiceComputationsImpl<'a>); -impl<'a> DerefMut for DiceComputationsParallel<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 +impl LinearRecomputeDiceComputations<'_> { + pub fn get(&self) -> DiceComputations<'_> { + self.0.get() } } -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use allocative::Allocative; - use derive_more::Display; - use dupe::Dupe; - use indexmap::indexset; - - use crate::api::cycles::DetectCycles; - use crate::api::error::DiceErrorImpl; - use crate::api::storage_type::StorageType; - use crate::api::user_data::UserComputationData; - use crate::legacy::ctx::ComputationData; - use crate::legacy::cycles::RequestedKey; - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - - #[derive(Clone, Dupe, Display, Debug, PartialEq, Eq, Hash, Allocative)] - struct K(usize); - impl StorageProperties for K { - type Key = K; - - type Value = (); - - fn key_type_name() -> &'static str { - unreachable!() - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } - - fn storage_type(&self) -> StorageType { - unreachable!() - } - - fn equality(&self, _x: &Self::Value, _y: &Self::Value) -> bool { - unreachable!() - } - - fn validity(&self, _x: &Self::Value) -> bool { - unreachable!() +// This assertion assures we don't unknowingly regress the size of this critical future. +// TODO(cjhopman): We should be able to wrap this in a convenient assertion macro. +#[allow(unused, clippy::diverging_sub_expression)] +fn _assert_dice_compute_future_sizes() { + let ctx: DiceComputations = panic!(); + #[derive(Allocative, Debug, Clone, PartialEq, Eq, Hash)] + struct K(u64); + impl std::fmt::Display for K { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + panic!() } } - - #[test] - fn cycle_detection_when_no_cycles() -> anyhow::Result<()> { - let ctx = ComputationData::new(UserComputationData::new(), DetectCycles::Enabled); - let ctx = ctx.subrequest::(&K(1))?; - let ctx = ctx.subrequest::(&K(2))?; - let ctx = ctx.subrequest::(&K(3))?; - let _ctx = ctx.subrequest::(&K(4))?; - - Ok(()) - } - - #[test] - fn cycle_detection_when_cycles() -> anyhow::Result<()> { - let ctx = ComputationData::new(UserComputationData::new(), DetectCycles::Enabled); - let ctx = ctx.subrequest::(&K(1))?; - let ctx = ctx.subrequest::(&K(2))?; - let ctx = ctx.subrequest::(&K(3))?; - let ctx = ctx.subrequest::(&K(4))?; - match ctx.subrequest::(&K(1)) { - Ok(_) => { - panic!("should have cycle error") - } - Err(e) => match &*e.0 { - DiceErrorImpl::Cycle { - trigger, - cyclic_keys, - } => { - assert!( - (**trigger).get_key_equality() == K(1).get_key_equality(), - "expected trigger key to be `{}` but was `{}`", - K(1), - trigger - ); - assert_eq!( - cyclic_keys, - &indexset![ - Arc::new(K(1)) as Arc, - Arc::new(K(2)) as Arc, - Arc::new(K(3)) as Arc, - Arc::new(K(4)) as Arc - ] - ) - } - _ => { - panic!("wrong error type") - } - }, + #[async_trait] + impl Key for K { + type Value = Arc; + + async fn compute( + &self, + ctx: &mut DiceComputations, + cancellations: &CancellationContext, + ) -> Self::Value { + panic!() } - Ok(()) + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + panic!() + } } + let k: K = panic!(); + let v = ctx.compute(&k); + let e = [0u8; 704 / 8]; + static_assertions::assert_eq_size_ptr!(&v, &e); } diff --git a/dice/dice/src/api/cycles.rs b/dice/dice/src/api/cycles.rs index b57f9966c4646..edfe05b4f58b9 100644 --- a/dice/dice/src/api/cycles.rs +++ b/dice/dice/src/api/cycles.rs @@ -31,10 +31,37 @@ impl FromStr for DetectCycles { type Err = InvalidType; fn from_str(s: &str) -> Result { - match s.to_uppercase().as_str() { - "ENABLED" => Ok(DetectCycles::Enabled), - "DISABLED" => Ok(DetectCycles::Disabled), - _ => Err(InvalidType(s.to_owned())), + if s.eq_ignore_ascii_case("ENABLED") { + Ok(DetectCycles::Enabled) + } else if s.eq_ignore_ascii_case("DISABLED") { + Ok(DetectCycles::Disabled) + } else { + Err(InvalidType(s.to_owned())) } } } + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + + #[test] + fn parse() { + assert_matches!("enabled".parse::(), Ok(DetectCycles::Enabled)); + assert_matches!("ENABLED".parse::(), Ok(DetectCycles::Enabled)); + assert_matches!( + "disabled".parse::(), + Ok(DetectCycles::Disabled) + ); + assert_matches!( + "DISABLED".parse::(), + Ok(DetectCycles::Disabled) + ); + assert_matches!( + "foo".parse::(), + Err(InvalidType(x)) if x == "foo" + ); + } +} diff --git a/dice/dice/src/api/data.rs b/dice/dice/src/api/data.rs index f00d5a3e12777..b85faaf386c4c 100644 --- a/dice/dice/src/api/data.rs +++ b/dice/dice/src/api/data.rs @@ -19,11 +19,11 @@ //! use crate::dice::DiceData; //! //! pub trait HasData { -//! fn my_data(&self) -> usize; +//! fn my_data(&self) -> usize; //! -//! fn other_data(&self) -> &String; +//! fn other_data(&self) -> &String; //! -//! fn set_multi(&mut self, i: usize, s: String); +//! fn set_multi(&mut self, i: usize, s: String); //! } //! //! struct HasDataContainer(usize, String); @@ -47,9 +47,7 @@ //! //! assert_eq!(data.other_data(), &"foo".to_string()); //! assert_eq!(data.my_data(), 1); -//! //! ``` -//! use std::collections::BTreeSet; diff --git a/dice/dice/src/api/demand.rs b/dice/dice/src/api/demand.rs new file mode 100644 index 0000000000000..b60615fc4eeeb --- /dev/null +++ b/dice/dice/src/api/demand.rs @@ -0,0 +1,142 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::any::TypeId; +use std::marker::PhantomData; + +pub(crate) trait DemandImpl { + fn tag() -> TypeId; +} + +pub(crate) struct DemandValue { + pub(crate) value: Option, +} + +pub(crate) struct DemandRef<'a, T: ?Sized + 'static> { + pub(crate) value: Option<&'a T>, +} + +impl DemandImpl for DemandValue { + fn tag() -> TypeId { + TypeId::of::>() + } +} + +impl<'a, T: ?Sized + 'static> DemandImpl for DemandRef<'a, T> { + fn tag() -> TypeId { + TypeId::of::>() + } +} + +/// Supplied value can be passed using this object. +pub struct Demand<'a> { + /// What we want? + demand_impl_tag: TypeId, + // SAFETY: Must be a `&'a mut` to a `DemandRef` or `DemandValue`, matching the `TypeId` above + demand_impl: *mut (), + _phantom: PhantomData<&'a mut ()>, +} + +impl<'a> Demand<'a> { + pub(crate) fn new(demand_impl: &'a mut I) -> Demand<'a> + where + I: DemandImpl, + { + Demand { + demand_impl_tag: I::tag(), + demand_impl: demand_impl as *mut _ as *mut (), + _phantom: PhantomData, + } + } + + /// Provide a value. Discard the value if the type is not matched. + pub fn provide_value(&mut self, value: T) { + self.provide_value_with(|| value); + } + + /// Provide a value. Discard the value if the type is not matched. + pub fn provide_value_with(&mut self, f: impl FnOnce() -> T) { + if self.demand_impl_tag == DemandValue::::tag() { + // SAFETY: We've checked that this is a pointer to a `DemandValue` + let demand_impl = unsafe { &mut *(self.demand_impl as *mut DemandValue) }; + demand_impl.value = Some(f()); + } + } + + /// Provide a reference. Discard the value if the type is not matched. + pub fn provide_ref(&mut self, value: &'a T) { + self.provide_ref_with(|| value); + } + + /// Provide a reference. Discard the value if the type is not matched. + pub fn provide_ref_with(&mut self, f: impl FnOnce() -> &'a T) { + if self.demand_impl_tag == DemandRef::::tag() { + // SAFETY: We've checked that this is a pointer to a `DemandRef` + let demand_impl = unsafe { &mut *(self.demand_impl as *mut DemandRef) }; + demand_impl.value = Some(f()); + } + } +} + +#[cfg(test)] +mod tests { + use allocative::Allocative; + use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; + + use crate::Demand; + use crate::DiceComputations; + use crate::DynKey; + use crate::Key; + + #[test] + fn test_request() { + #[derive(derive_more::Display, Eq, PartialEq, Hash, Allocative, Clone, Debug)] + struct MKey(u32); + + #[async_trait] + impl Key for MKey { + type Value = (); + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + panic!("not needed in tests") + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + panic!("not needed in tests") + } + + fn provide<'a>(&'a self, demand: &mut Demand<'a>) { + demand.provide_value(self.0); + demand.provide_ref::(self); + } + } + + trait Foo { + fn foo(&self) -> u32; + } + + impl Foo for MKey { + fn foo(&self) -> u32 { + self.0 + } + } + + let key = MKey(17); + let key = DynKey::from_key(key); + assert_eq!(Some(17), key.request_value::()); + assert_eq!(None, key.request_value::()); + assert_eq!(17, key.request_ref::().unwrap().foo()); + assert!(key.request_ref::().is_none()); + } +} diff --git a/dice/dice/src/api/dice.rs b/dice/dice/src/api/dice.rs index acf8a0281cd80..61e9784fe4f25 100644 --- a/dice/dice/src/api/dice.rs +++ b/dice/dice/src/api/dice.rs @@ -24,19 +24,19 @@ //! use dice::{Key, InjectedKey, DiceComputations, DiceDataBuilder, DiceData, DiceTransactionUpdater}; //! use std::sync::Arc; //! use allocative::Allocative; -//! use more_futures::cancellation::CancellationContext; +//! use buck2_futures::cancellation::CancellationContext; //! //! /// A configuration computation that consists of values that are pre-computed outside of DICE -//! pub struct InjectConfigs<'compute>(&'compute DiceComputations); +//! pub struct InjectConfigs<'compute, 'd>(&'compute mut DiceComputations<'d>); //! -//! impl<'compute> InjectConfigs<'compute> { -//! pub async fn get_config(&self) -> usize { +//! impl<'compute, 'd> InjectConfigs<'compute, 'd> { +//! pub async fn get_config(&mut self) -> usize { //! self.0.compute(&ConfigKey).await.unwrap() //! } //! } //! //! #[derive(Clone, Debug, Display, Eq, Hash, PartialEq, Allocative)] -//! #[display(fmt = "{:?}", self)] +//! #[display("{:?}", self)] //! struct ConfigKey; //! //! #[async_trait] @@ -48,13 +48,13 @@ //! } //! } //! -//! pub struct MyComputation<'compute>(&'compute DiceComputations); +//! pub struct MyComputation<'compute, 'd>(pub &'compute mut DiceComputations<'d>); //! -//! impl<'compute> MyComputation<'compute> { +//! impl<'compute, 'd> MyComputation<'compute, 'd> { //! // declaring a computation function -//! pub async fn compute_a(&self, a: usize, s: String) -> Arc { +//! pub async fn compute_a(&mut self, a: usize, s: String) -> Arc { //! #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -//! #[display(fmt = "{:?}", self)] +//! #[display("{:?}", self)] //! struct ComputeA(usize, String); //! //! #[async_trait] @@ -63,7 +63,7 @@ //! //! async fn compute(&self, ctx: &mut DiceComputations, _cancellations: &CancellationContext) -> Self::Value { //! // request for other computations on the self -//! let n = ctx.my_computation().compute_b(self.0).await; +//! let n = MyComputation(ctx).compute_b(self.0).await; //! Arc::new(self.1.repeat(n)) //! } //! @@ -76,13 +76,13 @@ //! } //! //! // second computation function -//! pub async fn compute_b(&self, a: usize) -> usize { +//! pub async fn compute_b(&mut self, a: usize) -> usize { //! self.0.compute(&ComputeB(a)).await.unwrap() //! } //! } //! //! #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -//! #[display(fmt = "{:?}", self)] +//! #[display("{:?}", self)] //! struct ComputeB(usize); //! //! #[async_trait] @@ -90,7 +90,7 @@ //! type Value = usize; //! //! async fn compute(&self, ctx: &mut DiceComputations, cancellations: &CancellationContext) -> Self::Value { -//! self.0 + ctx.injected_configs().get_config().await + ctx.global_data().static_data().len() +//! self.0 + InjectConfigs(ctx).get_config().await + ctx.global_data().static_data().len() //! } //! //! fn equality(x: &Self::Value,y: &Self::Value) -> bool { @@ -98,29 +98,6 @@ //! } //! } //! -//! // trait to register the computation to DICE -//! pub trait HasMyComputation { -//! fn my_computation(&self) -> MyComputation; -//! } -//! -//! // attach the declared computation to DICE via the context -//! impl HasMyComputation for DiceComputations { -//! fn my_computation(&self) -> MyComputation { -//! MyComputation(self) -//! } -//! } -//! -//! // trait to register the precomputed configs to DICE -//! pub trait HasInjectedConfig { -//! fn injected_configs(&self) -> InjectConfigs; -//! } -//! -//! impl HasInjectedConfig for DiceComputations { -//! fn injected_configs(&self) -> InjectConfigs { -//! InjectConfigs(self) -//! } -//! } -//! //! pub trait SetInjectedConfig { //! fn inject_config(&mut self, i: usize); //! } @@ -167,21 +144,21 @@ //! let mut ctx = engine.updater(); //! ctx.inject_config(0); //! -//! let ctx = rt.block_on(ctx.commit()); +//! let mut ctx = rt.block_on(ctx.commit()); //! //! // request the computation from DICE //! rt.block_on(async { -//! assert_eq!("aaaaaaaa", &*ctx.my_computation().compute_a(4, "a".into()).await); +//! assert_eq!("aaaaaaaa", &*MyComputation(&mut ctx).compute_a(4, "a".into()).await); //! }); //! //! let mut ctx = engine.updater(); //! ctx.inject_config(2); //! -//! let ctx = rt.block_on(ctx.commit()); +//! let mut ctx = rt.block_on(ctx.commit()); //! //! // request the computation from DICE //! rt.block_on(async { -//! assert_eq!("aaaaaaaaaa", &*ctx.my_computation().compute_a(4, "a".into()).await); +//! assert_eq!("aaaaaaaaaa", &*MyComputation(&mut ctx).compute_a(4, "a".into()).await); //! }); //! ``` @@ -210,7 +187,7 @@ pub struct Dice { impl Dice { pub fn builder() -> DiceDataBuilder { - DiceDataBuilder(DiceDataBuilderImpl::new_legacy()) + DiceDataBuilder(DiceDataBuilderImpl::new_modern()) } pub fn modern() -> DiceDataBuilder { @@ -255,7 +232,6 @@ impl Dice { pub fn which_dice(&self) -> WhichDice { match self.implementation { - DiceImplementation::Legacy(_) => WhichDice::Legacy, DiceImplementation::Modern(_) => WhichDice::Modern, } } diff --git a/dice/dice/src/api/dyn_key.rs b/dice/dice/src/api/dyn_key.rs new file mode 100644 index 0000000000000..a908c517eaa58 --- /dev/null +++ b/dice/dice/src/api/dyn_key.rs @@ -0,0 +1,57 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Display; + +use ref_cast::ref_cast_custom; +use ref_cast::RefCastCustom; + +use crate::impls::key::DiceKeyErased; +use crate::Key; + +/// A type erased Key. Dice APIs that return key references will pass them as DynKey (unless they can be +/// passed as the specific Key type). +#[derive(RefCastCustom)] +#[repr(transparent)] +pub struct DynKey { + pub(crate) erased: DiceKeyErased, +} + +impl DynKey { + pub fn key_type_name(&self) -> &'static str { + self.erased.key_type_name() + } + + pub fn downcast_ref(&self) -> Option<&K> { + self.erased.as_any().downcast_ref() + } + + pub fn request_value(&self) -> Option { + self.erased.request_value() + } + + pub fn request_ref(&self) -> Option<&T> { + self.erased.request_ref() + } + + pub fn from_key(k: impl Key) -> Self { + Self { + erased: DiceKeyErased::key(k), + } + } + + #[ref_cast_custom] + pub(crate) const fn ref_cast(erased: &DiceKeyErased) -> &Self; +} + +impl Display for DynKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(&self.erased, f) + } +} diff --git a/dice/dice/src/api/error.rs b/dice/dice/src/api/error.rs index 7b5ffc69692d9..609ed138a2cd7 100644 --- a/dice/dice/src/api/error.rs +++ b/dice/dice/src/api/error.rs @@ -16,6 +16,7 @@ use itertools::Itertools; use thiserror::Error; use crate::legacy::cycles::RequestedKey; +use crate::result::CancellationReason; #[derive(Clone, Dupe, Debug, Error, Allocative)] #[error(transparent)] @@ -40,13 +41,17 @@ impl DiceError { DiceError(Arc::new(DiceErrorImpl::DuplicateChange(key))) } - pub fn cancelled() -> Self { - DiceError(Arc::new(DiceErrorImpl::Cancelled)) + pub fn cancelled(reason: CancellationReason) -> Self { + DiceError(Arc::new(DiceErrorImpl::Cancelled(reason))) } pub fn duplicate_activation_data() -> Self { DiceError(Arc::new(DiceErrorImpl::DuplicateActivationData)) } + + pub(crate) fn injected_key_invalidated(key: Arc) -> Self { + DiceError(Arc::new(DiceErrorImpl::InjectedKeyGotInvalidation(key))) + } } #[derive(Debug, Error, Allocative)] @@ -60,10 +65,12 @@ pub(crate) enum DiceErrorImpl { DuplicateChange(Arc), #[error("Key `{0}` was reported as changed to an invalid value")] ChangedToInvalid(Arc), + #[error("Key `{0}` is an InjectedKey and received an invalidation")] + InjectedKeyGotInvalidation(Arc), /// NOTE: This isn't an error users normally see, since if the user is waiting on a result, the /// future doesn't get cancelled. - #[error("The evaluation of this key was cancelled")] - Cancelled, + #[error("The evaluation of this key was cancelled: {0}")] + Cancelled(CancellationReason), #[error( "Requested cycle_guard of type {}, but current guard has type {}", expected_type_name, diff --git a/dice/dice/src/api/events.rs b/dice/dice/src/api/events.rs index 69352dcdb7fcc..01379d1eed4ac 100644 --- a/dice/dice/src/api/events.rs +++ b/dice/dice/src/api/events.rs @@ -11,10 +11,10 @@ use allocative::Allocative; #[derive(Allocative, PartialEq, Eq, Debug)] pub enum DiceEvent { - /// Computation started. + /// Key evaluation started. Started { key_type: &'static str }, - /// Computation finished. + /// Key evaluation finished. Finished { key_type: &'static str }, /// Checking dependencies has started. @@ -22,6 +22,12 @@ pub enum DiceEvent { /// Checking dependencies has finished. CheckDepsFinished { key_type: &'static str }, + + /// Compute has started. + ComputeStarted { key_type: &'static str }, + + /// Compute has finished. + ComputeFinished { key_type: &'static str }, } pub trait DiceEventListener: Allocative + Send + Sync + 'static { diff --git a/dice/dice/src/api/injected.rs b/dice/dice/src/api/injected.rs index 20ff4d2943a3b..d0c92d893a81d 100644 --- a/dice/dice/src/api/injected.rs +++ b/dice/dice/src/api/injected.rs @@ -13,12 +13,13 @@ use std::hash::Hash; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::key::Key; use crate::api::storage_type::StorageType; +use crate::InvalidationSourcePriority; /// Specialized version of `Key` above. This type of Key is never computed. It /// should always be injected onto the graph before being requested via @@ -35,6 +36,10 @@ pub trait InjectedKey: type Value: Allocative + Dupe + Send + Sync + 'static; fn equality(x: &Self::Value, y: &Self::Value) -> bool; + + fn invalidation_source_priority() -> InvalidationSourcePriority { + InvalidationSourcePriority::Normal + } } #[async_trait] @@ -61,7 +66,10 @@ where } fn storage_type() -> StorageType { - // if we store more than usize max value, we are in trouble. - StorageType::LastN(usize::max_value()) + StorageType::Injected + } + + fn invalidation_source_priority() -> InvalidationSourcePriority { + K::invalidation_source_priority() } } diff --git a/dice/dice/src/api/invalidation_tracking.rs b/dice/dice/src/api/invalidation_tracking.rs new file mode 100644 index 0000000000000..5044e69e5ca0f --- /dev/null +++ b/dice/dice/src/api/invalidation_tracking.rs @@ -0,0 +1,136 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Dice computations track "invalidation paths" for every node in the computation. +//! +//! Invalidation path tracking is a data flow analysis that tracks the flow of changed +//! state through the computation graph. Directly invalidated or injected nodes +//! are the root "invalidation sources". For every computed node, its invalidation paths +//! will each either be "clean" (if all of its dependencies' invalidation paths are clean) +//! or will point to one of its non-clean dependencies . When multiple deps +//! are not clean, it will point to the one with the most recently changed invalidation +//! source. In this way, we record an invalidation path through the graph to the invalidation +//! source for every affected node. +//! +//! We track invalidations of two priorities: "normal" and "high". Each key type specifies +//! its own invalidation source priority that is used when that node is directly invalidated or +//! injected. Each node will record the most recent invalidation source for each priority. +//! A high priority invalidation will also count for normal priority tracking (but not the +//! reverse). So if a node has two deps A and B, and A has a normal priority invalidation +//! at v1 and B has a high priority invalidation at v2, B would be recorded for both normal +//! and high priority source. If those versions were reversed (A at v2, B at v1), we'd +//! record A for the normal priority invalidation path (because its invalidation is more recent) and would +//! record B for the high priority invalidation path (because A is normal priority). + +use std::sync::Arc; + +use dupe::Dupe; +use gazebo::variants::VariantName; + +pub use crate::api::dyn_key::DynKey; +use crate::impls::dice::DiceModern; +use crate::impls::value::InvalidationPath; +use crate::impls::value::InvalidationPathNode; +use crate::versions::VersionNumber; + +/// The invalidation paths for a key. This is accessible from [`crate::DiceComputations::get_invalidation_paths()`] +#[derive(Debug)] +pub struct DiceKeyTrackedInvalidationPaths { + pub normal_priority_path: DiceTrackedInvalidationPath, + pub high_priority_path: DiceTrackedInvalidationPath, +} + +/// The invalidation path state for a computation node. +#[derive(Debug, VariantName)] +pub enum DiceTrackedInvalidationPath { + /// Indicates no (non-Ignored) invalidated source data flowed into this computation. + Clean, + /// The invalidation path state is unknown. We only track the most recent invalidation source for a node, and if we compute the value + /// at an older version we won't know the invalidation source. + Unknown, + /// Invalidated data has flowed into this computation. [DiceInvalidationPath] holds the invalidation path. + Invalidated(DiceInvalidationPath), +} + +pub struct DiceInvalidationPath { + dice: Arc, + data: crate::arc::Arc, +} + +impl std::fmt::Debug for DiceInvalidationPath { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DiceInvalidationPath") + .field("data", &self.data) + .finish() + } +} + +/// A node in the invalidation path. +pub struct InvalidationPathEntry { + pub key: DynKey, + pub version: VersionNumber, +} + +impl DiceInvalidationPath { + /// Returns the "invalidation path". + /// + /// This will contain all nodes from the invalidation source to this node indicating how the + /// data has flowed to this node. The first entry of the result will be the invalidation source. + pub fn get_invalidation_path(&self) -> Vec { + let mut res = Vec::new(); + + let mut data = &self.data; + loop { + let key = self.dice.key_index.get(data.key); + res.push(InvalidationPathEntry { + key: DynKey { erased: key.dupe() }, + version: data.version, + }); + match &data.cause { + InvalidationPath::Invalidated(cause) => { + data = cause; + } + _ => { + break; + } + } + } + + res.reverse(); + res + } +} + +impl DiceKeyTrackedInvalidationPaths { + pub(crate) fn new( + dice: Arc, + normal_priority_path: InvalidationPath, + high_priority_path: InvalidationPath, + ) -> Self { + Self { + normal_priority_path: DiceTrackedInvalidationPath::new( + dice.dupe(), + normal_priority_path, + ), + high_priority_path: DiceTrackedInvalidationPath::new(dice, high_priority_path), + } + } +} + +impl DiceTrackedInvalidationPath { + pub(crate) fn new(dice: Arc, path: InvalidationPath) -> Self { + match path { + InvalidationPath::Clean => DiceTrackedInvalidationPath::Clean, + InvalidationPath::Unknown => DiceTrackedInvalidationPath::Unknown, + InvalidationPath::Invalidated(data) => { + DiceTrackedInvalidationPath::Invalidated(DiceInvalidationPath { dice, data }) + } + } + } +} diff --git a/dice/dice/src/api/key.rs b/dice/dice/src/api/key.rs index d2f2bb1a6ef9c..86d00d1ffa96c 100644 --- a/dice/dice/src/api/key.rs +++ b/dice/dice/src/api/key.rs @@ -13,12 +13,13 @@ use std::hash::Hash; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::storage_type::StorageType; use crate::introspection::graph::short_type_name; +use crate::Demand; /// The computation Key that maps to a value. The key will be used as an index /// for caching the computed values. @@ -63,6 +64,32 @@ pub trait Key: Allocative + Debug + Display + Clone + Eq + Hash + Send + Sync + } fn storage_type() -> StorageType { - StorageType::LastN(1) + StorageType::Normal } + + /// Machinery to provide. Default implementation is no-op. + /// + /// Provided value can be obtained with [`DynKey`](crate::DynKey). + fn provide<'a>(&'a self, demand: &mut Demand<'a>) { + let _ignore = demand; + } + + fn invalidation_source_priority() -> InvalidationSourcePriority { + InvalidationSourcePriority::Normal + } +} + +/// Dice tracks up to two invalidation paths for each node, a normal priority and a +/// high priority one. The high priority one considers root invalidated Keys that have a High priority, +/// the normal priority one will consider both Normal and High. The InvalidationSourcePriority +/// is used only for the priority of the Key as a source of invalidation, all nodes participate +/// in both normal and high for propagating invalidations. +#[derive(Allocative, Debug, Clone, Copy, Dupe, PartialEq, Eq)] +pub enum InvalidationSourcePriority { + /// The key will be ignored for invalidation source tracking. + Ignored, + /// The key can be an invalidation source only for the "normal" invalidation source path. + Normal, + /// The key can be an invalidation source for both the "normal" and "high" invalidation source paths. + High, } diff --git a/dice/dice/src/api/mod.rs b/dice/dice/src/api/mod.rs deleted file mode 100644 index b3c2885eb72bb..0000000000000 --- a/dice/dice/src/api/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Public DICE API - -pub mod activation_tracker; -pub mod computations; -pub mod cycles; -pub mod data; -pub mod dice; -pub mod error; -pub mod events; -pub mod injected; -pub mod key; -pub mod opaque; -pub mod projection; -pub mod storage_type; -pub mod transaction; -pub mod user_data; -pub mod which; diff --git a/dice/dice/src/api/opaque.rs b/dice/dice/src/api/opaque.rs index d3fac1942493c..a2b70f0dc3bbf 100644 --- a/dice/dice/src/api/opaque.rs +++ b/dice/dice/src/api/opaque.rs @@ -11,9 +11,8 @@ use std::fmt; use std::fmt::Debug; use std::fmt::Formatter; -use crate::api::error::DiceResult; use crate::api::key::Key; -use crate::api::projection::ProjectionKey; +use crate::impls::opaque::OpaqueValueModern; use crate::opaque::OpaqueValueImpl; /// Computed value which is not directly visible to user. @@ -22,11 +21,11 @@ use crate::opaque::OpaqueValueImpl; /// so projection result is recorded as a dependency /// of a computation which requested the opaqued value, /// but the opaque value key is not. -pub struct OpaqueValue<'a, K: Key> { - pub(crate) implementation: OpaqueValueImpl<'a, K>, +pub struct OpaqueValue { + pub(crate) implementation: OpaqueValueImpl, } -impl<'a, K> Debug for OpaqueValue<'a, K> +impl Debug for OpaqueValue where K: Key, K::Value: Debug, @@ -36,15 +35,20 @@ where } } -impl<'a, K: Key> OpaqueValue<'a, K> { - pub(crate) fn new(implementation: OpaqueValueImpl<'a, K>) -> Self { +impl OpaqueValue { + pub(crate) fn new(implementation: OpaqueValueImpl) -> Self { Self { implementation } } - pub fn projection

    (&self, projection_key: &P) -> DiceResult - where - P: ProjectionKey, - { - self.implementation.projection(projection_key) + pub(crate) fn unpack_modern(&self) -> Option<&OpaqueValueModern> { + match &self.implementation { + OpaqueValueImpl::Modern(v) => Some(v), + } + } + + pub(crate) fn into_modern(self) -> Option> { + match self.implementation { + OpaqueValueImpl::Modern(v) => Some(v), + } } } diff --git a/dice/dice/src/api/projection.rs b/dice/dice/src/api/projection.rs index d1bcf385bacca..56912cf4568c9 100644 --- a/dice/dice/src/api/projection.rs +++ b/dice/dice/src/api/projection.rs @@ -53,7 +53,7 @@ pub trait ProjectionKey: } fn storage_type() -> StorageType { - StorageType::LastN(1) + StorageType::Normal } /// Provides a short informative name for this projection type. diff --git a/dice/dice/src/api/storage_type.rs b/dice/dice/src/api/storage_type.rs index fda7c9b7606d7..deec0299ab56b 100644 --- a/dice/dice/src/api/storage_type.rs +++ b/dice/dice/src/api/storage_type.rs @@ -12,10 +12,11 @@ use dupe::Dupe; use gazebo::variants::UnpackVariants; /// Storage type for a cached entry. -/// The oldest entry will be evicted once the cache stores more than N entries of the same key -/// request to compute them. TODO think about whether or not we can -/// optimize to delete injected keys when no more computation will request that version +/// +/// For an Injected entry, we must store all values that are still reachable because we +/// cannot recompute them. #[derive(UnpackVariants, Debug, Clone, Copy, Dupe, Allocative)] pub enum StorageType { - LastN(usize), + Normal, + Injected, } diff --git a/dice/dice/src/api/transaction.rs b/dice/dice/src/api/transaction.rs index 751c0a61e9bc8..ab4eec63014ab 100644 --- a/dice/dice/src/api/transaction.rs +++ b/dice/dice/src/api/transaction.rs @@ -141,7 +141,7 @@ impl DiceEquivalent for DiceEquality { } impl Deref for DiceTransaction { - type Target = DiceComputations; + type Target = DiceComputations<'static>; fn deref(&self) -> &Self::Target { self.0.as_computations() diff --git a/dice/dice/src/api/user_data.rs b/dice/dice/src/api/user_data.rs index 4e0258edc7d5c..1b0fd33ad3e9c 100644 --- a/dice/dice/src/api/user_data.rs +++ b/dice/dice/src/api/user_data.rs @@ -11,13 +11,14 @@ use std::any::Any; use std::sync::Arc; use allocative::Allocative; -use more_futures::spawner::Spawner; -use more_futures::spawner::TokioSpawner; +use buck2_futures::spawner::Spawner; +use buck2_futures::spawner::TokioSpawner; use crate::api::activation_tracker::ActivationTracker; use crate::api::data::DiceData; use crate::api::events::DiceEvent; use crate::api::events::DiceEventListener; +use crate::DynKey; /// Includes all user related computation-specific data. #[derive(Allocative)] @@ -44,25 +45,32 @@ pub struct UserComputationData { /// A UserCycleDetector can be used for custom cycle detection in the DICE computation. pub trait UserCycleDetector: Send + Sync + 'static { /// Called by DICE when it starts computing a key. `key` will be a user Key type (and so user can reliably downcast it to known types). - fn start_computing_key(&self, key: &dyn Any) -> Option>; + fn start_computing_key(&self, key: &DynKey) -> Option>; /// Called by DICE when the key finished computing. - fn finished_computing_key(&self, key: &dyn Any); + fn finished_computing_key(&self, key: &DynKey); } /// A UserCycleDetectorGuard is used to track the currently computing key. User code can access this through /// ComputationData::cycle_guard() (and then downcast it with as_any to potentially access custom cycle behavior). -pub trait UserCycleDetectorGuard: Send + Sync + 'static { +pub trait UserCycleDetectorGuard: AsAnyArc + Send + Sync + 'static { /// Called by dice when a dependency edge is encountered. - fn add_edge(&self, key: &dyn Any); - - /// This is used to allow user code to get at the concrete guard instance. - fn as_any(&self) -> &dyn Any; + fn add_edge(&self, key: &DynKey); /// Used in error messages. fn type_name(&self) -> &'static str; } +pub trait AsAnyArc { + fn as_any_arc(self: Arc) -> Arc; +} + +impl AsAnyArc for T { + fn as_any_arc(self: Arc) -> Arc { + self + } +} + #[derive(Allocative)] pub struct RequireDefault(()); diff --git a/dice/dice/src/api/which.rs b/dice/dice/src/api/which.rs index 20f8ab22675b4..6e39683f8ac7f 100644 --- a/dice/dice/src/api/which.rs +++ b/dice/dice/src/api/which.rs @@ -15,7 +15,7 @@ use gazebo::variants::VariantName; use thiserror::Error; /// which dice impl to use -#[derive(Clone, Dupe, Copy, Debug, VariantName, Allocative)] +#[derive(Clone, Dupe, Copy, Debug, VariantName, Allocative, PartialEq)] pub enum WhichDice { Legacy, Modern, diff --git a/dice/dice/src/arc.rs b/dice/dice/src/arc.rs index 2983fa96b2ff1..a7ba8e0c45941 100644 --- a/dice/dice/src/arc.rs +++ b/dice/dice/src/arc.rs @@ -30,12 +30,20 @@ impl Arc { } } +impl Arc { + #[inline] + pub(crate) fn make_mut(&mut self) -> &mut T { + triomphe::Arc::make_mut(&mut self.0) + } +} + impl Clone for Arc { #[inline] fn clone(&self) -> Self { Arc(self.0.clone()) } } + impl Dupe for Arc {} impl Deref for Arc { diff --git a/dice/dice/src/ctx.rs b/dice/dice/src/ctx.rs index c430e1c394d95..e35dc9f89b6d4 100644 --- a/dice/dice/src/ctx.rs +++ b/dice/dice/src/ctx.rs @@ -11,48 +11,48 @@ use std::future::Future; use std::sync::Arc; use allocative::Allocative; -use dupe::Dupe; use futures::future::BoxFuture; use futures::FutureExt; use gazebo::variants::UnpackVariants; -use more_futures::owning_future::OwningFuture; use crate::api::computations::DiceComputations; -use crate::api::computations::DiceComputationsParallel; use crate::api::data::DiceData; use crate::api::error::DiceResult; +use crate::api::invalidation_tracking::DiceKeyTrackedInvalidationPaths; use crate::api::key::Key; use crate::api::opaque::OpaqueValue; use crate::api::user_data::UserComputationData; use crate::api::user_data::UserCycleDetectorGuard; +use crate::impls::ctx::LinearRecomputeModern; use crate::impls::ctx::ModernComputeCtx; -use crate::legacy::ctx::DiceComputationsImplLegacy; use crate::opaque::OpaqueValueImpl; use crate::versions::VersionNumber; +use crate::LinearRecomputeDiceComputations; +use crate::ProjectionKey; +/// This is just a dispatcher to either of Legacy or Modern Dice. +/// +/// It converts their impl futures into a common impl (via left/right_future()) and does some +/// minor packing/unpacking of types (like OpaqueValue to/from OpaqueValueLegacy). Otherwise it +/// just forwards calls along. #[derive(Allocative, UnpackVariants)] -pub(crate) enum DiceComputationsImpl { - Legacy(Arc), - Modern(ModernComputeCtx), +pub(crate) enum DiceComputationsImpl<'a> { + Modern(ModernComputeCtx<'a>), } -impl DiceComputationsImpl { +impl<'d> DiceComputationsImpl<'d> { /// Gets all the result of of the given computation key. /// recorded as dependencies of the current computation for which this /// context is for. pub(crate) fn compute<'a, K>( - &'a self, + &'a mut self, key: &K, ) -> impl Future::Value>> + 'a where K: Key, { match self { - DiceComputationsImpl::Legacy(delegate) => delegate - .compute_opaque(key) - .map(|r| r.map(|x| x.into_value())) - .left_future(), - DiceComputationsImpl::Modern(delegate) => delegate.compute(key).right_future(), + DiceComputationsImpl::Modern(delegate) => delegate.compute(key), } } @@ -63,87 +63,92 @@ impl DiceComputationsImpl { pub(crate) fn compute_opaque<'a, K>( &'a self, key: &K, - ) -> impl Future>> + 'a + ) -> impl Future>> + 'a where K: Key, { match self { - DiceComputationsImpl::Legacy(delegate) => delegate - .compute_opaque(key) - .map(|r| r.map(|x| OpaqueValue::new(OpaqueValueImpl::Legacy(x)))) - .left_future(), DiceComputationsImpl::Modern(delegate) => delegate .compute_opaque(key) - .map(|r| r.map(|x| OpaqueValue::new(OpaqueValueImpl::Modern(x)))) - .right_future(), + .map(|r| r.map(|x| OpaqueValue::new(OpaqueValueImpl::Modern(x)))), + } + } + + pub fn projection<'a, K: Key, P: ProjectionKey>( + &'a mut self, + derive_from: &OpaqueValue, + projection_key: &P, + ) -> DiceResult { + match self { + DiceComputationsImpl::Modern(delegate) => delegate.projection( + derive_from.unpack_modern().expect("engine type mismatch"), + projection_key, + ), + } + } + + pub fn opaque_into_value<'a, K: Key>( + &'a mut self, + derive_from: OpaqueValue, + ) -> DiceResult { + match self { + DiceComputationsImpl::Modern(delegate) => Ok(delegate + .opaque_into_value(derive_from.into_modern().expect("engine type mismatch"))), } } /// Computes all the given tasks in parallel, returning an unordered Stream pub(crate) fn compute_many<'a, T: 'a>( - &'a self, + &'a mut self, computes: impl IntoIterator< - Item = impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, + Item = impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, >, ) -> Vec + 'a> { match self { - DiceComputationsImpl::Legacy(ctx) => { - // legacy dice does nothing special - computes - .into_iter() - .map(|work| { - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations( - DiceComputationsImpl::Legacy(ctx.dupe()), - )), - work, - ) - .left_future() - }) - .collect() - } - DiceComputationsImpl::Modern(ctx) => ctx.compute_many(computes), + DiceComputationsImpl::Modern(delegate) => delegate.compute_many(computes), } } pub(crate) fn compute2<'a, T: 'a, U: 'a>( - &'a self, - compute1: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, - compute2: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, U> + Send, + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, U> + Send, ) -> (impl Future + 'a, impl Future + 'a) { match self { - DiceComputationsImpl::Legacy(ctx) => { - // legacy dice does nothing special - ( - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations( - DiceComputationsImpl::Legacy(ctx.dupe()), - )), - compute1, - ) - .left_future(), - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations( - DiceComputationsImpl::Legacy(ctx.dupe()), - )), - compute2, - ) - .left_future(), - ) - } - DiceComputationsImpl::Modern(ctx) => { - let (f1, f2) = ctx.compute2(compute1, compute2); + DiceComputationsImpl::Modern(delegate) => delegate.compute2(compute1, compute2), + } + } - (f1.right_future(), f2.right_future()) + pub(crate) fn compute3<'a, T: 'a, U: 'a, V: 'a>( + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, U> + Send, + compute3: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, V> + Send, + ) -> ( + impl Future + 'a, + impl Future + 'a, + impl Future + 'a, + ) { + match self { + DiceComputationsImpl::Modern(delegate) => { + delegate.compute3(compute1, compute2, compute3) } } } + pub(crate) fn with_linear_recompute<'a, T, Fut: Future + 'a>( + &'a mut self, + func: impl FnOnce(LinearRecomputeDiceComputations<'a>) -> Fut + 'a, + ) -> impl Future + 'a { + match self { + DiceComputationsImpl::Modern(delegate) => delegate.with_linear_recompute(func), + } + } + /// Data that is static per the entire lifetime of Dice. These data are initialized at the /// time that Dice is initialized via the constructor. pub(crate) fn global_data(&self) -> &DiceData { match self { - DiceComputationsImpl::Legacy(delegate) => delegate.global_data(), DiceComputationsImpl::Modern(delegate) => delegate.global_data(), } } @@ -154,29 +159,43 @@ impl DiceComputationsImpl { /// each have their own individual data. pub(crate) fn per_transaction_data(&self) -> &UserComputationData { match self { - DiceComputationsImpl::Legacy(delegate) => delegate.per_transaction_data(), DiceComputationsImpl::Modern(delegate) => delegate.per_transaction_data(), } } - pub(crate) fn cycle_guard(&self) -> DiceResult> { + pub(crate) fn cycle_guard(&self) -> DiceResult>> { match self { - DiceComputationsImpl::Legacy(delegate) => delegate.cycle_guard(), DiceComputationsImpl::Modern(delegate) => delegate.cycle_guard(), } } pub fn store_evaluation_data(&self, value: T) -> DiceResult<()> { match self { - DiceComputationsImpl::Legacy(delegate) => delegate.store_evaluation_data(value), DiceComputationsImpl::Modern(delegate) => delegate.store_evaluation_data(value), } } pub(crate) fn get_version(&self) -> VersionNumber { match self { - DiceComputationsImpl::Legacy(delegate) => delegate.get_version(), DiceComputationsImpl::Modern(delegate) => delegate.get_version(), } } + + pub fn get_invalidation_paths(&mut self) -> DiceKeyTrackedInvalidationPaths { + match self { + DiceComputationsImpl::Modern(delegate) => delegate.get_invalidation_paths(), + } + } +} + +pub(crate) enum LinearRecomputeDiceComputationsImpl<'a> { + Modern(LinearRecomputeModern<'a>), +} + +impl LinearRecomputeDiceComputationsImpl<'_> { + pub(crate) fn get(&self) -> DiceComputations<'_> { + match self { + LinearRecomputeDiceComputationsImpl::Modern(delegate) => delegate.get(), + } + } } diff --git a/dice/dice/src/future.rs b/dice/dice/src/future.rs new file mode 100644 index 0000000000000..c66ca0ca3a277 --- /dev/null +++ b/dice/dice/src/future.rs @@ -0,0 +1,102 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// Copy-paste from `buck2_util`. + +use std::future::Future; +use std::iter; +use std::mem; + +use futures::stream::FuturesUnordered; +use futures::FutureExt; +use futures::StreamExt; + +// patternlint-disable-next-line buck2-no-futures-try-join-all +/// Semantically the same as `futures::future::try_join_all`, but bug free. +/// +/// +pub(crate) fn try_join_all(i: I) -> impl Future, E>> +where + F: Future>, + I: IntoIterator, +{ + let mut i = i.into_iter().fuse(); + let f1 = i.next(); + let f2 = i.next(); + let f3 = i.next(); + + let (f1, f2, f3) = match (f1, f2, f3) { + (None, _, _) => { + return futures::future::ready(Ok(Vec::new())) + .left_future() + .left_future(); + } + (Some(f1), None, _) => { + return async move { Ok(vec![f1.await?]) } + .left_future() + .right_future(); + } + (Some(f1), Some(f2), None) => { + return async move { + let (v1, v2) = futures::future::try_join(f1, f2).await?; + Ok(vec![v1, v2]) + } + .right_future() + .left_future(); + } + (Some(f1), Some(f2), Some(f3)) => (f1, f2, f3), + }; + + #[allow(clippy::tuple_array_conversions)] + let mut futs: FuturesUnordered<_> = [f1, f2, f3] + .into_iter() + .chain(i) + .enumerate() + .map(|(i, f)| async move { (i, f.await) }) + .collect(); + + async move { + let mut outputs: Vec> = iter::repeat_with(|| None).take(futs.len()).collect(); + while let Some((i, res)) = futs.next().await { + match res { + Ok(v) => { + let prev = mem::replace(&mut outputs[i], Some(v)); + assert!(prev.is_none()); + } + Err(e) => return Err(e), + } + } + Ok(outputs.into_iter().map(|v| v.unwrap()).collect()) + } + .right_future() + .right_future() +} + +#[cfg(test)] +mod tests { + async fn slow_ok_or_err(ok: bool) -> Result<(), ()> { + if ok { + futures::future::pending().await + } else { + Err(()) + } + } + + /// Test that the futures are eagerly cancelled when the first one fails. This test fails with + /// the `futures` version of the function. + #[tokio::test] + async fn test_returns_eagerly() { + for size in [0, 1, 2, 10, 1000] { + let futs = std::iter::repeat_with(|| slow_ok_or_err(true)) + .take(size) + .chain([slow_ok_or_err(false)]); + super::try_join_all(futs).await.unwrap_err(); + } + } +} diff --git a/dice/dice/src/impls.rs b/dice/dice/src/impls.rs new file mode 100644 index 0000000000000..ed576f235abac --- /dev/null +++ b/dice/dice/src/impls.rs @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod cache; +pub(crate) mod core; +pub(crate) mod ctx; +mod deps; +pub(crate) mod dice; +pub(crate) mod evaluator; +pub(crate) mod events; +mod hash; +pub(crate) mod key; +mod key_index; +pub(crate) mod opaque; +pub(crate) mod task; +#[cfg(test)] +mod tests; +pub(crate) mod transaction; +pub(crate) mod user_cycle; +pub(crate) mod value; +pub(crate) mod worker; diff --git a/dice/dice/src/impls/cache.rs b/dice/dice/src/impls/cache.rs index a97a05603f2d8..c6db1bf5292b7 100644 --- a/dice/dice/src/impls/cache.rs +++ b/dice/dice/src/impls/cache.rs @@ -15,7 +15,7 @@ use std::sync::atomic::Ordering; use allocative::Allocative; use dashmap::DashMap; use dupe::Dupe; -use fnv::FnvBuildHasher; +use fxhash::FxBuildHasher; use lock_free_hashtable::sharded::ShardedLockFreeRawTable; use crate::arc::Arc; @@ -27,7 +27,7 @@ use crate::impls::value::DiceComputedValue; struct Data { completed: ShardedLockFreeRawTable, 64>, /// Completed tasks lazily moved into `completed` from this map. - storage: DashMap, + storage: DashMap, is_cancelled: AtomicBool, } @@ -45,8 +45,8 @@ struct DiceCompletedTask { /// Reference to the task in the cache. pub(crate) enum DiceTaskRef<'a> { Computed(DiceComputedValue), - Occupied(dashmap::mapref::entry::OccupiedEntry<'a, DiceKey, DiceTask, FnvBuildHasher>), - Vacant(dashmap::mapref::entry::VacantEntry<'a, DiceKey, DiceTask, FnvBuildHasher>), + Occupied(dashmap::mapref::entry::OccupiedEntry<'a, DiceKey, DiceTask, FxBuildHasher>), + Vacant(dashmap::mapref::entry::VacantEntry<'a, DiceKey, DiceTask, FxBuildHasher>), TransactionCancelled, } @@ -186,18 +186,17 @@ mod tests { use allocative::Allocative; use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; + use buck2_futures::spawner::TokioSpawner; use derive_more::Display; use dupe::Dupe; use futures::FutureExt; - use more_futures::cancellation::CancellationContext; - use more_futures::spawner::TokioSpawner; use crate::api::computations::DiceComputations; use crate::api::key::Key; use crate::arc::Arc; use crate::impls::cache::DiceTaskRef; use crate::impls::cache::SharedCache; - use crate::impls::core::graph::history::CellHistory; use crate::impls::key::DiceKey; use crate::impls::key::ParentKey; use crate::impls::task::dice::DiceTask; @@ -206,6 +205,8 @@ mod tests { use crate::impls::value::DiceKeyValue; use crate::impls::value::DiceValidValue; use crate::impls::value::MaybeValidDiceValue; + use crate::impls::value::TrackedInvalidationPaths; + use crate::versions::VersionRanges; #[derive(Allocative, Clone, Debug, Display, Eq, PartialEq, Hash)] struct K; @@ -234,7 +235,8 @@ mod tests { MaybeValidDiceValue::valid(DiceValidValue::testing_new( DiceKeyValue::::new(val), )), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )); Box::new(()) as Box diff --git a/dice/dice/src/impls/core/mod.rs b/dice/dice/src/impls/core.rs similarity index 100% rename from dice/dice/src/impls/core/mod.rs rename to dice/dice/src/impls/core.rs diff --git a/dice/dice/src/impls/core/graph.rs b/dice/dice/src/impls/core/graph.rs new file mode 100644 index 0000000000000..6346b679e3df5 --- /dev/null +++ b/dice/dice/src/impls/core/graph.rs @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! The versioned dice graph of dependencies +#[allow(unused)] +pub(crate) mod introspection; +mod lazy_deps; +pub(crate) mod nodes; +pub(crate) mod storage; +pub(crate) mod types; diff --git a/dice/dice/src/impls/core/graph/dependencies.rs b/dice/dice/src/impls/core/graph/dependencies.rs deleted file mode 100644 index 228d9f34ccdac..0000000000000 --- a/dice/dice/src/impls/core/graph/dependencies.rs +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Represents the forward and backward dependencies of the computation graph - -use std::collections::hash_map::Entry; - -use allocative::Allocative; -use dupe::Dupe; - -use crate::arc::Arc; -use crate::impls::key::DiceKey; -use crate::versions::VersionNumber; -use crate::HashMap; - -#[derive(Allocative, Clone)] -pub(crate) struct VersionedDependencies { - /// once the deps at a particular version is written, it is final and never modified - /// We only store the dependencies relevant to the most recent result - recorded_at: VersionNumber, - deps: Arc>, -} - -impl Dupe for VersionedDependencies { - // triomphe is dupe -} - -impl VersionedDependencies { - pub(crate) fn new(recorded_at: VersionNumber, deps: Arc>) -> Self { - Self { recorded_at, deps } - } - - pub(crate) fn deps(&self) -> Arc> { - self.deps.dupe() - } - - pub(crate) fn replace_deps(&mut self, v: VersionNumber, deps: Arc>) { - if self.recorded_at < v { - // we only ever write the newest version of the dependencies of this node for simplicity - // That way, if we are ever dirtied, we just check if the latest version of the deps - // have changed at the dirtied version which only requires spawning one set of deps. - // It might cause us to falsely fail to reuse some nodes, but this is less memory - // and less work per node when in incremental cases. - self.deps = deps; - self.recorded_at = v; - } - } -} - -// the set of reverse dependencies of a node -#[derive(Allocative, Clone)] // TODO(bobyf) remove need to clone -pub(crate) struct VersionedRevDependencies { - rdeps: HashMap, -} - -impl VersionedRevDependencies { - pub(crate) fn new() -> Self { - Self { - rdeps: Default::default(), - } - } - - pub(crate) fn add_rdep(&mut self, dependent: DiceKey, current_version: VersionNumber) { - match self.rdeps.entry(dependent) { - Entry::Occupied(entry) => { - if *entry.get() < current_version { - entry.replace_entry(current_version); - } - } - Entry::Vacant(v) => { - v.insert(current_version); - } - } - } - - pub(crate) fn rdeps(&self) -> &HashMap { - &self.rdeps - } -} diff --git a/dice/dice/src/impls/core/graph/history.rs b/dice/dice/src/impls/core/graph/history.rs deleted file mode 100644 index 61d79c1f3d8db..0000000000000 --- a/dice/dice/src/impls/core/graph/history.rs +++ /dev/null @@ -1,918 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::borrow::Borrow; -use std::cmp; -use std::collections::Bound; -use std::fmt::Debug; - -use allocative::Allocative; -use dupe::IterDupedExt; -use gazebo::variants::VariantName; -use sorted_vector_map::sorted_vector_set; -use sorted_vector_map::SortedVectorMap; -use sorted_vector_map::SortedVectorSet; - -use crate::versions::VersionNumber; -use crate::versions::VersionRange; -use crate::versions::VersionRanges; - -/// The history of one computation unit. -/// The history is one of the `HistoryState`s. -/// -/// The semantics of `CellHistory` is such that the state is Unknown for all versions until a -/// particular version `v0` is verified, upon which for all versions `v1` where `v1 > v0`, `v1 < d` -/// where `d` is the minimum dirtied version larger than `v0`, the state is Verified. -/// This is technically better represented as a single vec of history states, but we'll change the -/// actual representation later. -// TODO(bobyf): this data structure can probably be way better optimized -#[derive(Debug, Allocative, Clone)] -pub(crate) struct CellHistory { - verified: SortedVectorSet, - /// versions of dirty, mapping ot whether or not it's a forced dirty (which means recompute - /// regardless of node changed) - dirtied: SortedVectorMap, -} - -impl CellHistory { - pub(crate) fn verified(verified: VersionNumber) -> Self { - Self { - verified: sorted_vector_set![verified], - dirtied: SortedVectorMap::new(), - } - } - - #[cfg(test)] - pub(crate) fn dirtied(dirty: VersionNumber, force: bool) -> Self { - use sorted_vector_map::sorted_vector_map; - - Self { - verified: SortedVectorSet::new(), - dirtied: sorted_vector_map![dirty => force], - } - } - - pub(crate) fn empty() -> Self { - Self { - verified: SortedVectorSet::new(), - dirtied: SortedVectorMap::new(), - } - } - - /// Makes a duplicate version of the history, and marks the given version as where a new value - /// was verified. If applicable, another version is returned, representing the version at which - /// the original history that is newer than the current recorded history. - /// - /// |earliest_valid| is the earliest possible validated version for the new history. - /// This is usually determined by the latest verified version that is relevant to - /// the current verified_at amongst all the dependencies. - /// - /// That is, given a history with state `[verified, dirty, unknown, dirty, verified, dirty]`, - /// and `recorded_at = 2`, we would create a new history that is - /// `[unknown, verified, verified, dirty, dirty]`. The portion of original history that's - /// newer would be `[unknown, unknown, unknown, unknown, verified, dirty]`. - /// Returns the version `d2`, the version that is newer if any, and the new history. - /// - /// This function also accounts for propagating dirtiness based on history from dependencies. - /// See `propagate_dirty_deps`. - pub(crate) fn make_new_verified_history( - &self, - verified_at: VersionNumber, - earliest_valid: Option, - ) -> (VersionNumber, Option, Self) { - let mut verified = self.verified.clone(); - let mut dirtied = self.dirtied.clone(); - - // If we don't have any bounds on the earliest this version can be verified, - // we assume that it is being set at just the current version. - let since = self.min_validatable_version(verified_at, earliest_valid); - { - for vt in verified - .range((Bound::Unbounded, Bound::Excluded(since))) - .duped() - .collect::>() - { - verified.remove(&vt); - } - - for vt in dirtied - .range((Bound::Unbounded, Bound::Excluded(since))) - .map(|e| *e.0) - .collect::>() - { - dirtied.remove(&vt); - } - } - - let up_to = verified - .range((Bound::Excluded(verified_at), Bound::Unbounded)) - .min() - .copied(); - - if let Some(v) = up_to.as_ref() { - for vt in verified - .range((Bound::Included(*v), Bound::Unbounded)) - .copied() - .collect::>() - { - verified.remove(&vt); - } - }; - - verified.insert(since); - dirtied.remove(&since); - - let new = CellHistory { verified, dirtied }; - - (since, up_to, new) - } - - /// Marks the given version as verified on the history, returning the oldest version that - /// became verified due to marking this node as verified. - /// For example, assuming no deps, if dirtied at v2, and marking v4, the oldest version that - /// became verified would be v2, since marking v4 with no changes from v2 to v4 implies that - /// all of v2, v3, v4 are verified. - /// But if instead there was a dep v3, v2 could not be marked verified, but v3 & v4 would be. - /// - /// Dependencies history are accounted for by propagating their dirtied versions to the - /// verified history through 'propagate_dirty_from_deps' - pub(crate) fn mark_verified(&mut self, v: VersionNumber, deps: I) -> VersionNumber - where - I: IntoIterator, - I::IntoIter: Clone, - H: Borrow, - { - let deps_iter = deps.into_iter(); - // We can't be verified before any of our deps were most-recently verified. - let all_deps_unchanged_since = deps_iter - .clone() - .filter_map(|dep| dep.borrow().latest_verified_before(v)) - .max(); - - // We only need to propagate the earliest “dirty” from any of its deps, as we can rely on - // the recomputation from that dirty version to re-propagate any newer “dirty” as needed. - let mut min_dirty = None; - for dep in deps_iter { - if let Some(dirty) = dep - .borrow() - .dirtied - .range((Bound::Excluded(v), Bound::Unbounded)) - .next() - .map(|e| *e.0) - { - min_dirty = min_dirty.map_or(Some(dirty), |old| Some(cmp::min(old, dirty))) - } - } - - self.mark_verified_modern(v, all_deps_unchanged_since, min_dirty) - } - - /// Marks the given version as verified on the history, returning the oldest version that - /// became verified due to marking this node as verified. - /// For example, assuming no deps, if dirtied at v2, and marking v4, the oldest version that - /// became verified would be v2, since marking v4 with no changes from v2 to v4 implies that - /// all of v2, v3, v4 are verified. - /// But if instead there was a dep v3, v2 could not be marked verified, but v3 & v4 would be. - /// - /// Dependencies history are accounted for by propagating their dirtied versions to the - /// verified history through 'propagate_dirty_from_deps' - pub(crate) fn mark_verified_modern( - &mut self, - v: VersionNumber, - all_deps_unchanged_since: Option, - first_dep_dirtied: Option, - ) -> VersionNumber -where { - // We can't be verified before any of our deps were most-recently verified. - let min_validated = self.min_validatable_version(v, all_deps_unchanged_since); - let changed_since = if let Some(prev_verified) = self - .verified - .range((Bound::Excluded(min_validated), Bound::Included(v))) - .next() - { - *prev_verified - } else { - if self.dirtied.remove(&min_validated).is_some() { - if let Some(prev_valid) = self - .verified - .range((Bound::Unbounded, Bound::Excluded(v))) - .max() - { - if self - .dirtied - .range((Bound::Included(*prev_valid), Bound::Included(min_validated))) - .next() - .is_some() - { - self.verified.insert(min_validated); - } - } - } else { - self.verified.insert(min_validated); - } - min_validated - }; - - self.propagate_from_deps_version(changed_since, first_dep_dirtied); - - changed_since - } - - /// return true if the history was changed, else false. - pub(crate) fn mark_invalidated(&mut self, v: VersionNumber) -> bool { - if self.dirtied.contains_key(&v) { - return false; - } - - self.dirty(v, false); - true - } - - /// Return true if the history was changed, else false. - /// This forces this node to be invalidated and history to be "dirty" so that it's recomputed - /// regardless if dependencies changed - pub(crate) fn force_dirty(&mut self, v: VersionNumber) -> bool { - if self.dirtied.get(&v).map_or(false, |d| *d) { - // only noop if this was already force dirtied, otherwise, we override the dirty with - // force dirty - false - } else { - self.dirty(v, true); - true - } - } - - /// returns a vec of ranges of verified versions. This is returned as a 'VersionRange' - pub(crate) fn get_verified_ranges(&self) -> VersionRanges { - let mut verified = self.verified.iter().peekable(); - let mut dirtied = self.dirtied.iter().peekable(); - - let mut out = VersionRanges::new(); - let mut last_verified = None; - loop { - match (verified.peek(), dirtied.peek()) { - (Some(v), Some(d)) => { - if v < &d.0 { - last_verified.get_or_insert(*verified.next().expect("we just peeked it")); - } else if let Some(begin) = last_verified.take() { - out.insert(VersionRange::bounded( - begin, - *dirtied.next().expect("we just peeked it").0, - )); - } else { - dirtied.next(); - } - } - (Some(_), None) => { - out.insert(VersionRange::begins_with( - last_verified.unwrap_or(*verified.next().expect("we just peeked it")), - )); - return out; - } - (None, Some(_)) => { - if let Some(begin) = last_verified.take() { - out.insert(VersionRange::bounded( - begin, - *dirtied.next().expect("we just peeked it").0, - )); - } else { - dirtied.next(); - } - return out; - } - (None, None) => { - if let Some(begin) = last_verified { - out.insert(VersionRange::begins_with(begin)) - } - return out; - } - } - } - } - - pub(crate) fn get_history(&self, v: &VersionNumber) -> HistoryState { - if let Some(last_verified) = self - .verified - .range((Bound::Unbounded, Bound::Included(*v))) - .max() - .copied() - { - let mut is_dirty = false; - let mut is_force_dirty = false; - - // We need to look for force-dirtied versions across all dirtied versions, since we - // guarantee that when force-dirty is called where will be a recomputation. - for (_version, force_dirty) in self - .dirtied - .range((Bound::Included(last_verified), Bound::Included(*v))) - { - is_dirty = true; - if *force_dirty { - is_force_dirty = true; - } - } - - if is_force_dirty { - return HistoryState::Dirty; - } - - if is_dirty { - return HistoryState::Unknown(self.get_verified_ranges()); - } - - HistoryState::Verified - } else { - HistoryState::Unknown(self.get_verified_ranges()) - } - } - - pub(crate) fn latest_dirtied(&self) -> Option { - self.dirtied.iter().max().map(|d| *d.0) - } - - pub(crate) fn latest_verified_before(&self, v: VersionNumber) -> Option { - self.verified - .range((Bound::Unbounded, Bound::Included(v))) - .next_back() - .copied() - } - - pub(crate) fn first_dirty_after(&self, v: VersionNumber) -> Option { - self.dirtied - .range((Bound::Excluded(v), Bound::Unbounded)) - .next() - .map(|(v, _)| *v) - } - - pub(crate) fn first_verified_after(&self, v: VersionNumber) -> Option { - self.verified - .range((Bound::Excluded(v), Bound::Unbounded)) - .next() - .copied() - } - - /// When a node is recomputed to the same value as its existing history, but with a new set of - /// dependencies, that node needs to know when itself will next be dirtied due to changes in its - /// new dependencies. - /// This will make the current history propagate any dirty versions necessary from the given - /// set of dependencies at a version 'v'. - pub(crate) fn propagate_from_deps( - &mut self, - v: VersionNumber, - deps: impl IntoIterator, - ) where - H: Borrow, - { - // We only need to propagate the earliest “dirty” from any of its deps, as we can rely on - // the recomputation from that dirty version to re-propagate any newer “dirty” as needed. - let mut min_dirty = None; - for dep in deps { - if let Some(dirty) = dep - .borrow() - .dirtied - .range((Bound::Excluded(v), Bound::Unbounded)) - .next() - .map(|e| *e.0) - { - min_dirty = min_dirty.map_or(Some(dirty), |old| Some(cmp::min(old, dirty))) - } - } - - self.propagate_from_deps_version(v, min_dirty) - } - - /// When a node is recomputed to the same value as its existing history, but with a new set of - /// dependencies, that node needs to know when itself will next be dirtied due to changes in its - /// new dependencies. - /// This will make the current history propagate any dirty versions necessary from the given - /// set of dependencies at a version 'v'. - pub(crate) fn propagate_from_deps_version( - &mut self, - v: VersionNumber, - deps_min_version: Option, - ) { - if let Some(min_dirty) = deps_min_version { - // By verifying the given version, we only need to fill in the history up to the next - // smallest verified and dirtied version. Any dirties beyond that would be irrelevant - // as either we would already be dirtied, or some newer version have already verified us - // and these propagated deps are irrelevant. - let relevant_hist_up_to = { - let nearest_verified = self - .verified - .range((Bound::Excluded(v), Bound::Unbounded)) - .next(); - let nearest_dirted = self - .dirtied - .range((Bound::Excluded(v), Bound::Unbounded)) - .next() - .map(|e| e.0); - - match (nearest_verified, nearest_dirted) { - (Some(v), Some(d)) => Some(*cmp::min(v, d)), - (Some(v), None) => Some(*v), - (None, Some(d)) => Some(*d), - (None, None) => None, - } - }; - - if relevant_hist_up_to.map_or(true, |rel_v| min_dirty < rel_v) { - self.dirtied.insert(min_dirty, false); - } - } - } - - fn min_validatable_version( - &self, - verified_at: VersionNumber, - earliest_valid: Option, - ) -> VersionNumber { - let last_dirtied = self - .dirtied - .range((Bound::Unbounded, Bound::Included(verified_at))) - .next_back() - .map(|r| *r.0); - // If we don't have any bounds on the earliest this version can be verified, - // we assume that it is being set at just the current version. - [last_dirtied, earliest_valid] - .into_iter() - .flatten() - .max() - .unwrap_or(verified_at) - } -} - -impl CellHistory { - fn dirty(&mut self, v: VersionNumber, force: bool) { - assert!( - !self - .verified - .range((Bound::Included(v), Bound::Unbounded)) - .any(|_| true), - "should never get into state where we are dirtying a version `{:?}` that was explicitly marked as verified. Verified versions `{:?}`", - v, - self.verified - ); - - self.dirtied.insert(v, force); - } -} - -/// The various different states that a particular 'VersionNumber' can be in with respect to a -/// 'CellHistory' -#[derive(VariantName)] -pub(crate) enum HistoryState { - /// known to be verified - Verified, - /// version is in unknown state, where the last known verified version is returned - Unknown(VersionRanges), - /// version is known to be dirty - Dirty, -} - -#[cfg(test)] -pub(crate) mod testing { - use gazebo::variants::VariantName; - - use crate::impls::core::graph::history::CellHistory; - use crate::impls::core::graph::history::HistoryState; - use crate::versions::VersionNumber; - use crate::versions::VersionRanges; - - pub(crate) trait CellHistoryExt { - fn testing_new(verified: &[VersionNumber], dirtied: &[VersionNumber]) -> Self; - - fn get_verified(&self) -> Vec; - } - - impl CellHistoryExt for CellHistory { - fn testing_new(verified: &[VersionNumber], dirtied: &[VersionNumber]) -> Self { - CellHistory { - verified: verified.iter().copied().collect(), - dirtied: dirtied.iter().map(|v| (*v, false)).collect(), - } - } - - fn get_verified(&self) -> Vec { - self.verified.iter().copied().collect() - } - } - - pub(crate) trait HistoryExt { - fn assert_verified(&self); - - fn assert_unknown(&self) -> &VersionRanges; - - fn assert_dirty(&self); - } - - impl HistoryExt for HistoryState { - fn assert_verified(&self) { - match self { - HistoryState::Verified => {} - x => panic!("expected Verified but was {}", x.variant_name()), - } - } - - fn assert_unknown(&self) -> &VersionRanges { - match self { - HistoryState::Unknown(v) => v, - x => panic!("expected Unknown but was {}", x.variant_name()), - } - } - - fn assert_dirty(&self) { - match self { - HistoryState::Dirty => {} - x => panic!("expected Dirty but was {}", x.variant_name()), - } - } - } -} - -mod introspection { - use crate::impls::core::graph::history::CellHistory; - use crate::versions::VersionNumber; - - impl CellHistory { - pub fn to_introspectable(&self) -> crate::introspection::graph::CellHistory { - crate::introspection::graph::CellHistory::new( - self.verified - .iter() - .map(VersionNumber::to_introspectable) - .collect(), - self.dirtied - .iter() - .map(|(k, v)| (k.to_introspectable(), *v)) - .collect(), - ) - } - } -} - -#[cfg(test)] -mod tests { - use sorted_vector_map::sorted_vector_map; - use sorted_vector_map::sorted_vector_set; - use sorted_vector_map::SortedVectorSet; - - use crate::impls::core::graph::history::testing::CellHistoryExt; - use crate::impls::core::graph::history::testing::HistoryExt; - use crate::impls::core::graph::history::CellHistory; - use crate::versions::testing::VersionRangesExt; - use crate::versions::VersionNumber; - use crate::versions::VersionRange; - use crate::versions::VersionRanges; - - #[test] - fn cell_history_propagates() { - let mut hist = CellHistory::verified(VersionNumber::new(0)); - hist.propagate_from_deps( - VersionNumber::new(0), - &[ - CellHistory::dirtied(VersionNumber::new(1), false), - CellHistory::dirtied(VersionNumber::new(4), true), - ], - ); - assert_eq!( - hist.get_history(&VersionNumber::new(1)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(1) - )]) - ); - - // now verify it if the history was already verified at some future versions. - let mut hist = CellHistory { - verified: sorted_vector_set![VersionNumber::new(0), VersionNumber::new(2)], - dirtied: Default::default(), - }; - // we should ignore dirties that occur after the known version - hist.propagate_from_deps( - VersionNumber::new(0), - &[CellHistory::dirtied(VersionNumber::new(4), false)], - ); - hist.get_history(&VersionNumber::new(2)).assert_verified(); - hist.get_history(&VersionNumber::new(4)).assert_verified(); - - // we should propagate dirties that occur before the known version - hist.propagate_from_deps( - VersionNumber::new(0), - &[CellHistory::dirtied(VersionNumber::new(1), false)], - ); - assert_eq!( - hist.get_history(&VersionNumber::new(1)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(1)), - VersionRange::begins_with(VersionNumber::new(2)) - ]) - ); - - // now verify it if the history was already dirted at some future versions. - let mut hist = CellHistory { - verified: sorted_vector_set![VersionNumber::new(0), VersionNumber::new(2)], - dirtied: Default::default(), - }; - // we should ignore dirties that occur after the known version - hist.propagate_from_deps( - VersionNumber::new(0), - &[CellHistory::dirtied(VersionNumber::new(4), false)], - ); - hist.get_history(&VersionNumber::new(2)).assert_verified(); - hist.get_history(&VersionNumber::new(4)).assert_verified(); - - // we should propagate dirties that occur before the known version - hist.propagate_from_deps( - VersionNumber::new(0), - &[CellHistory::dirtied(VersionNumber::new(1), false)], - ); - assert_eq!( - hist.get_history(&VersionNumber::new(1)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(1)), - VersionRange::begins_with(VersionNumber::new(2)) - ]) - ); - } - - #[test] - fn cell_history_tracks_correctly() { - let mut history = CellHistory::verified(VersionNumber::new(0)); - history - .get_history(&VersionNumber::new(0)) - .assert_verified(); - history - .get_history(&VersionNumber::new(1)) - .assert_verified(); - - assert_eq!(history.mark_invalidated(VersionNumber::new(2)), true); - assert_eq!(history.mark_invalidated(VersionNumber::new(2)), false); - assert_eq!( - history.get_history(&VersionNumber::new(2)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(2) - )]) - ); - - assert_eq!( - history.mark_verified(VersionNumber::new(0), std::iter::empty::()), - VersionNumber::new(0) - ); - history - .get_history(&VersionNumber::new(0)) - .assert_verified(); - history - .get_history(&VersionNumber::new(1)) - .assert_verified(); - assert_eq!( - history.get_history(&VersionNumber::new(2)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(2), - )]) - ); - - assert_eq!( - history.mark_verified(VersionNumber::new(2), std::iter::empty::()), - VersionNumber::new(2) - ); - history - .get_history(&VersionNumber::new(0)) - .assert_verified(); - history - .get_history(&VersionNumber::new(2)) - .assert_verified(); - history - .get_history(&VersionNumber::new(1)) - .assert_verified(); - // assert that all we did was delete the dirty, so that we have one continuous history - // usually we don't want to assert implementation, but this is important internal state - assert_eq!(history.verified.len(), 1); - assert!(history.dirtied.is_empty()); - - assert_eq!(history.mark_invalidated(VersionNumber::new(3)), true); - history - .get_history(&VersionNumber::new(0)) - .assert_verified(); - history - .get_history(&VersionNumber::new(2)) - .assert_verified(); - history - .get_history(&VersionNumber::new(1)) - .assert_verified(); - assert_eq!( - history.get_history(&VersionNumber::new(3)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(3) - )]) - ); - assert_eq!( - history.get_history(&VersionNumber::new(5)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(3) - )]) - ); - // assert that all we did was add one item to dirty - // usually we don't want to assert implementation, but this is important internal state - assert_eq!(history.verified.len(), 1); - assert_eq!(history.dirtied.len(), 1); - - let up_to = history.mark_verified( - VersionNumber::new(4), - [VersionNumber::new(5), VersionNumber::new(8)] - .into_iter() - .map(|v| CellHistory::dirtied(v, false)), - ); - assert_eq!(up_to, VersionNumber::new(3)); - history - .get_history(&VersionNumber::new(0)) - .assert_verified(); - history - .get_history(&VersionNumber::new(2)) - .assert_verified(); - history - .get_history(&VersionNumber::new(3)) - .assert_verified(); - assert_eq!( - history.get_history(&VersionNumber::new(5)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(5) - )]) - ); - // assert that all we did was delete from dirty, and carry over one dirty from deps - // usually we don't want to assert implementation, but this is important internal state - assert_eq!(history.verified.len(), 1); - assert_eq!(history.dirtied.len(), 1); - - assert_eq!(history.mark_invalidated(VersionNumber::new(6)), true); - // assert that all we did add one entry to dirty - // usually we don't want to assert implementation, but this is important internal state - assert_eq!(history.verified.len(), 1); - assert_eq!(history.dirtied.len(), 2); - let up_to = history.mark_verified(VersionNumber::new(7), std::iter::empty::()); - assert_eq!(up_to, VersionNumber::new(6)); - history - .get_history(&VersionNumber::new(0)) - .assert_verified(); - history - .get_history(&VersionNumber::new(2)) - .assert_verified(); - history - .get_history(&VersionNumber::new(3)) - .assert_verified(); - history - .get_history(&VersionNumber::new(6)) - .assert_verified(); - history - .get_history(&VersionNumber::new(7)) - .assert_verified(); - assert_eq!( - history.get_history(&VersionNumber::new(5)).assert_unknown(), - &VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(5)), - VersionRange::begins_with(VersionNumber::new(6)) - ]) - ); - // assert that we removed one dirty - // usually we don't want to assert implementation, but this is important internal state - assert_eq!(history.verified.len(), 2); - assert_eq!(history.dirtied.len(), 1); - - assert!(history.mark_invalidated(VersionNumber::new(9))); - assert!(!history.dirtied.get(&VersionNumber::new(9)).unwrap()); - assert!(history.force_dirty(VersionNumber::new(9))); - assert!(history.dirtied.get(&VersionNumber::new(9)).unwrap()); - assert!(!history.force_dirty(VersionNumber::new(9))); - history - .get_history(&VersionNumber::new(7)) - .assert_verified(); - history.get_history(&VersionNumber::new(9)).assert_dirty(); - history.get_history(&VersionNumber::new(10)).assert_dirty(); - - // assert that all we did add one entry to dirty - // usually we don't want to assert implementation, but this is important internal state - assert_eq!(history.verified.len(), 2); - assert_eq!(history.dirtied.len(), 2); - - // Here, since one dep is only verified at v11, we can't mark v10 as verified. - let up_to = history.mark_verified( - VersionNumber::new(11), - [VersionNumber::new(10), VersionNumber::new(11)] - .into_iter() - .map(CellHistory::verified), - ); - assert_eq!(up_to, VersionNumber::new(11)); - - history.get_history(&VersionNumber::new(9)).assert_dirty(); - history.get_history(&VersionNumber::new(10)).assert_dirty(); - history - .get_history(&VersionNumber::new(11)) - .assert_verified(); - - // assert that we added one verified, but kept all dirties since nothing overlapped. - // usually we don't want to assert implementation, but this is important internal state - assert_eq!(history.verified.len(), 3); - assert_eq!(history.dirtied.len(), 2); - } - - #[test] - fn cell_history_makes_new_history_correctly() { - let hist = CellHistory::testing_new( - &[VersionNumber::new(0), VersionNumber::new(3)], - &[VersionNumber::new(1), VersionNumber::new(4)], - ); - let (v, end, hist) = hist.make_new_verified_history(VersionNumber::new(2), None); - - assert_eq!(end, Some(VersionNumber::new(3))); - assert_eq!(v, VersionNumber::new(1)); - assert_eq!( - hist.dirtied, - sorted_vector_map![VersionNumber::new(4) => false] - ); - assert_eq!(hist.verified, sorted_vector_set![VersionNumber::new(1)]); - } - - #[test] - fn cell_history_verified_ranges() { - let hist = CellHistory::testing_new(&[], &[]); - assert_eq!(hist.get_verified_ranges().ranges(), &SortedVectorSet::new()); - - let hist = CellHistory::testing_new(&[VersionNumber::new(1)], &[]); - assert_eq!( - hist.get_verified_ranges().ranges(), - &sorted_vector_set![VersionRange::begins_with(VersionNumber::new(1))] - ); - - let hist = CellHistory::testing_new(&[VersionNumber::new(1), VersionNumber::new(3)], &[]); - assert_eq!( - hist.get_verified_ranges().ranges(), - &sorted_vector_set![VersionRange::begins_with(VersionNumber::new(1))] - ); - - let hist = CellHistory::testing_new( - &[VersionNumber::new(1), VersionNumber::new(3)], - &[VersionNumber::new(2)], - ); - assert_eq!( - hist.get_verified_ranges().ranges(), - &sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(1), VersionNumber::new(2)), - VersionRange::begins_with(VersionNumber::new(3)) - ] - ); - - let hist = CellHistory::testing_new(&[VersionNumber::new(1)], &[VersionNumber::new(3)]); - assert_eq!( - hist.get_verified_ranges().ranges(), - &sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(3) - ),] - ); - - let hist = CellHistory::testing_new( - &[ - VersionNumber::new(1), - VersionNumber::new(2), - VersionNumber::new(4), - VersionNumber::new(7), - VersionNumber::new(9), - ], - &[ - VersionNumber::new(3), - VersionNumber::new(5), - VersionNumber::new(6), - ], - ); - assert_eq!( - hist.get_verified_ranges().ranges(), - &sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(1), VersionNumber::new(3)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(5)), - VersionRange::begins_with(VersionNumber::new(7)) - ] - ); - } - - #[test] - fn test_force_dirty_has_precedence() { - let mut h1 = CellHistory::verified(VersionNumber::new(0)); - assert!(h1.mark_invalidated(VersionNumber::new(1))); - assert!(h1.force_dirty(VersionNumber::new(2))); - h1.get_history(&VersionNumber::new(2)).assert_dirty(); - - let mut h2 = CellHistory::verified(VersionNumber::new(0)); - assert!(h2.force_dirty(VersionNumber::new(1))); - assert!(h2.mark_invalidated(VersionNumber::new(2))); - h2.get_history(&VersionNumber::new(2)).assert_dirty(); - } -} diff --git a/dice/dice/src/impls/core/graph/introspection.rs b/dice/dice/src/impls/core/graph/introspection.rs index fde0b0ac6a072..523511ebd8f17 100644 --- a/dice/dice/src/impls/core/graph/introspection.rs +++ b/dice/dice/src/impls/core/graph/introspection.rs @@ -13,10 +13,12 @@ use dupe::Dupe; use gazebo::prelude::SliceExt; use crate::arc::Arc; +use crate::impls::core::graph::nodes::ForceDirtyHistory; use crate::impls::core::graph::nodes::VersionedGraphNode; use crate::impls::core::graph::storage::VersionedGraph; use crate::impls::key::DiceKey; use crate::introspection::graph::AnyKey; +use crate::introspection::graph::CellHistory; use crate::introspection::graph::EngineForIntrospection; use crate::introspection::graph::GraphNodeKind; use crate::introspection::graph::KeyID; @@ -29,15 +31,10 @@ use crate::HashMap; use crate::HashSet; pub struct VersionedGraphIntrospectable { - nodes: HashMap, + nodes: HashMap, edges: HashMap>>, } -pub(crate) struct GraphNodesForKey { - pub k: DiceKey, - pub nodes: BTreeMap>, -} - impl VersionedGraphIntrospectable { pub(crate) fn keys<'a>(&'a self) -> impl Iterator + 'a { self.nodes.keys() @@ -47,8 +44,10 @@ impl VersionedGraphIntrospectable { ) -> impl Iterator>)> + 'a { self.edges.iter() } - pub(crate) fn nodes<'a>(&'a self) -> impl Iterator + 'a { - self.nodes.values() + pub(crate) fn nodes<'a>( + &'a self, + ) -> impl Iterator + 'a { + self.nodes.iter() } pub(crate) fn len_for_introspection(&self) -> usize { self.nodes.len() @@ -61,60 +60,23 @@ impl VersionedGraph { let mut nodes = HashMap::default(); fn visit_node(key: DiceKey, node: &VersionedGraphNode) -> Option { - match node { - VersionedGraphNode::Occupied(o) => Some(SerializedGraphNode { - node_id: NodeID(key.index as usize), - kind: GraphNodeKind::Occupied, - history: o.metadata().hist.to_introspectable(), - deps: Some(visit_deps(o.metadata().deps.deps())), - rdeps: Some(visit_rdeps(o.metadata().rdeps.rdeps())), - }), - VersionedGraphNode::Vacant(_) => { - // TODO(bobyf) should probably write the metadata of vacant - None - } - } + node.to_introspectable() } - for (k, versioned_nodes) in self.last_n.iter() { - nodes.insert( - *k, - GraphNodesForKey { - k: *k, - nodes: versioned_nodes - .iter() - .map(|(v, node)| (v.to_introspectable(), visit_node(*k, node))) - .collect(), - }, - ); - - if let Some(last) = versioned_nodes.iter().last() { - edges.insert( - *k, - last.1 - .unpack_occupied() - .map_or_else(|| Arc::new(Vec::new()), |node| node.metadata().deps.deps()), - ); + for (k, versioned_node) in self.nodes.iter() { + if let Some(serialized) = visit_node(*k, versioned_node) { + nodes.insert(*k, serialized); } - } - fn visit_deps(deps: Arc>) -> HashSet { - deps.iter().map(|d| d.introspect()).collect() + edges.insert( + *k, + versioned_node.unpack_occupied().map_or_else( + || Arc::new(Vec::new()), + |node| Arc::new(node.deps().iter_keys().collect()), + ), + ); } - fn visit_rdeps( - rdeps: &HashMap, - ) -> BTreeMap> { - let mut res = BTreeMap::>::new(); - - for (rdep, v) in rdeps { - res.entry(v.to_introspectable()) - .or_default() - .push(NodeID(rdep.index as usize)) - } - - res - } VersionedGraphIntrospectable { nodes, edges } } } diff --git a/dice/dice/src/impls/core/graph/lazy_deps.rs b/dice/dice/src/impls/core/graph/lazy_deps.rs new file mode 100644 index 0000000000000..2ed02f84f732a --- /dev/null +++ b/dice/dice/src/impls/core/graph/lazy_deps.rs @@ -0,0 +1,113 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; + +use crate::impls::key::DiceKey; +use crate::versions::VersionNumber; + +/// Provides a set that lazily dedupes entries. This is efficient for common patterns that we see for rdeps tracking. +/// +/// For rdeps, in the normal flow (where computations happen at increasing versions without overlap) the sequence of things we'll see is: +/// +/// 1. the first version will add some set of unique rdeps +/// 2. subsequent versions will add more rdeps, a dep will only be added once at each version, but can be repeated across versions +/// 3. eventually the node itself will be invalidated and the rdeps will be drained +/// +/// This means that the inserts will be a sequence of sets of unique deps. deduping multiple times withing one such block is wasteful, and +/// deduping on every block may also be wasteful (a typical flow adds a lot of rdeps in the first block, and then only a small number in each +/// subsequent block). +/// +/// So, we track the version so we can identify the transitions between those blocks. and only do dedupe (1) when starting a new block and (2) +/// if our list is sufficiently large (we know the minimum size is the largest of our previous deduped size or the size of any block we've added). +/// +/// One common pattern that is close to our worst case is that a node itself is never invalidated but that all of its rdeps are. In that case +/// at every version we'll be adding the same N rdeps to the list. In that case, we'll do the sort+dedupe on every version. Overall, this +/// ends up being better in practice than using HashSet. +#[derive(Allocative, Debug)] +pub(crate) struct LazyDepsSet { + data: Vec, + state: State, +} + +#[derive(Allocative, Debug, Clone, Copy)] +enum State { + New, + Growing { + /// The last version that we've seen. We use this to track the size of "blocks" of unique deps + latest_version: VersionNumber, + /// How many deps have been added at `latest_version` + this_version_count: u32, + /// The maximum of our previous deduped size and the size of any block of deps we've seen since then + min_size: u32, + }, +} + +impl LazyDepsSet { + pub(crate) fn new() -> LazyDepsSet { + Self { + data: Vec::new(), + state: State::New, + } + } + + pub(crate) fn insert(&mut self, v: VersionNumber, k: DiceKey) { + match self.state { + State::New => { + self.state = State::Growing { + latest_version: v, + min_size: 0, + this_version_count: 1, + } + } + State::Growing { + latest_version, + min_size, + ref mut this_version_count, + } => { + if latest_version == v { + *this_version_count += 1; + } else if latest_version < v { + // This indicates that we've started a new "block". + let mut min_size = std::cmp::max(min_size, *this_version_count); + + // need to decide if we should clean up duplicates + let resize_threshold = std::cmp::max(10, (min_size as usize) * 3 / 2); + if self.data.len() > resize_threshold { + // stable sort is going to best handle the fact that the initial run is completely sorted. + self.data.sort(); + self.data.dedup(); + min_size = self.data.len() as u32; + } + + self.state = State::Growing { + latest_version: v, + min_size, + this_version_count: 1, + } + } else { + // an older version... just ignore? + // TODO(cjhopman): We should revisit this behavior when we more widely allow concurrent computations. + } + } + } + self.data.push(k); + } + + /// Clears the set and returns all currently stored deps. The returned iterator might contain duplicates. + pub(crate) fn drain(&mut self) -> std::vec::Drain<'_, DiceKey> { + self.state = State::New; + self.data.drain(..) + } + + /// Iterates over all currently stored deps. The returned iterator might contain duplicates. + pub(crate) fn iter(&self) -> impl Iterator + '_ { + self.data.iter().copied() + } +} diff --git a/dice/dice/src/impls/core/graph/mod.rs b/dice/dice/src/impls/core/graph/mod.rs deleted file mode 100644 index 7f9fee1b3afb1..0000000000000 --- a/dice/dice/src/impls/core/graph/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! The versioned dice graph of dependencies -mod dependencies; -pub(crate) mod history; -#[allow(unused)] -pub(crate) mod introspection; -mod nodes; -pub(crate) mod storage; -pub(crate) mod types; diff --git a/dice/dice/src/impls/core/graph/nodes.rs b/dice/dice/src/impls/core/graph/nodes.rs index 7426fbde55c60..bbf399e4aa8e0 100644 --- a/dice/dice/src/impls/core/graph/nodes.rs +++ b/dice/dice/src/impls/core/graph/nodes.rs @@ -16,130 +16,570 @@ //! The 'VersionedCache' will track dependency edges and use computed version //! number for each cache entry and a global version counter to determine //! up-to-date-ness of cache entries. -//! + +use std::ops::Bound; +use std::ops::RangeBounds; use allocative::Allocative; use dupe::Dupe; use gazebo::variants::UnpackVariants; +use itertools::Itertools; +use sorted_vector_map::SortedVectorMap; +use super::types::VersionedGraphResult; +use crate::api::key::InvalidationSourcePriority; use crate::arc::Arc; -use crate::impls::core::graph::dependencies::VersionedDependencies; -use crate::impls::core::graph::dependencies::VersionedRevDependencies; -use crate::impls::core::graph::history::CellHistory; +use crate::impls::core::graph::lazy_deps::LazyDepsSet; +use crate::impls::core::graph::types::VersionedGraphResultMismatch; +use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::key::DiceKey; use crate::impls::value::DiceComputedValue; use crate::impls::value::DiceValidValue; use crate::impls::value::MaybeValidDiceValue; +use crate::impls::value::TrackedInvalidationPaths; +use crate::introspection::graph::GraphNodeKind; +use crate::introspection::graph::KeyID; +use crate::introspection::graph::NodeID; +use crate::introspection::graph::SerializedGraphNode; use crate::versions::VersionNumber; +use crate::versions::VersionRange; +use crate::versions::VersionRanges; +use crate::HashSet; -/// actual entries as seen when querying the cache -/// The placeholder will be used to indicate known dirty entries. -#[derive(UnpackVariants, Allocative)] +/// Actual entries as seen when querying the VersionedGraph. +/// +/// This is responsible for tracking the information related to a single +/// node (i.e. a DiceKey) required for incremental computations. +#[derive(UnpackVariants, Allocative, Debug)] pub(crate) enum VersionedGraphNode { Occupied(OccupiedGraphNode), + Injected(InjectedGraphNode), Vacant(VacantGraphNode), } impl VersionedGraphNode { - pub(crate) fn force_dirty(&mut self, v: VersionNumber) -> bool { + pub(crate) fn force_dirty( + &mut self, + v: VersionNumber, + invalidation_priority: InvalidationSourcePriority, + ) -> InvalidateResult { + match self { + VersionedGraphNode::Occupied(e) => e.force_dirty(v, invalidation_priority), + VersionedGraphNode::Vacant(e) => { + if e.force_dirty(v, invalidation_priority) { + InvalidateResult::Changed(None) + } else { + InvalidateResult::NoChange + } + } + VersionedGraphNode::Injected(e) => { + panic!("injected keys don't get invalidated (`{:?}`)", e) + } + } + } + + pub(crate) fn mark_invalidated( + &mut self, + v: VersionNumber, + invalidation_priority: Option, + ) -> InvalidateResult { match self { - VersionedGraphNode::Occupied(e) => e.metadata.hist.force_dirty(v), - VersionedGraphNode::Vacant(e) => e.hist.force_dirty(v), + VersionedGraphNode::Occupied(e) => { + if e.mark_invalidated(v, invalidation_priority) { + InvalidateResult::Changed(Some(e.metadata.rdeps.drain())) + } else { + InvalidateResult::NoChange + } + } + VersionedGraphNode::Vacant(e) => { + panic!("vacant nodes shouldn't get invalidated (`{:?}`)", e) + } + VersionedGraphNode::Injected(e) => { + panic!("injected keys don't get invalidated (`{:?}`)", e) + } } } - pub(crate) fn mark_invalidated(&mut self, v: VersionNumber) -> bool { + /// Returns the VersionedGraphResult for the entry at the provided version. + pub(crate) fn at_version(&self, v: VersionNumber) -> VersionedGraphResult { + match self { + VersionedGraphNode::Occupied(entry) => entry.at_version(v), + VersionedGraphNode::Vacant(_) => VersionedGraphResult::Compute, + VersionedGraphNode::Injected(entry) => entry.at_version(v), + } + } + + pub(crate) fn add_rdep_at(&mut self, v: VersionNumber, k: DiceKey) { + match self { + VersionedGraphNode::Occupied(occ) => occ.add_rdep_at(v, k), + VersionedGraphNode::Injected(inj) => inj.add_rdep_at(v, k), + VersionedGraphNode::Vacant(_) => { + unreachable!("we can't have an rdep on something that has never seen a value") + } + } + } + + pub(crate) fn on_injected( + &mut self, + version: VersionNumber, + value: DiceValidValue, + invalidation_priority: InvalidationSourcePriority, + ) -> InvalidateResult { match self { - VersionedGraphNode::Occupied(e) => e.metadata.hist.mark_invalidated(v), - VersionedGraphNode::Vacant(e) => e.hist.mark_invalidated(v), + VersionedGraphNode::Occupied(occ) => { + occ.on_injected(version, value, invalidation_priority) + } + VersionedGraphNode::Injected(inj) => { + inj.on_injected(version, value, invalidation_priority) + } + VersionedGraphNode::Vacant(vac) => { + let entry = OccupiedGraphNode::new( + vac.key, + value, + Arc::new(SeriesParallelDeps::None), + VersionRange::begins_with(version).into_ranges(), + vac.dirtied_history.clone(), + TrackedInvalidationPaths::new(invalidation_priority, vac.key, version), + ); + *self = Self::Occupied(entry); + InvalidateResult::Changed(None) + } } } - pub(crate) fn history(&self) -> &CellHistory { + /// Returns the newly updated value for the key, and whether or not any state changed. + #[cfg_attr(debug_assertions, instrument(level = "debug", skip(self, value, deps, reusable), fields(key = ?key, valid_deps_versions = ?valid_deps_versions)))] + pub(crate) fn on_computed( + &mut self, + key: super::types::VersionedGraphKey, + value: DiceValidValue, + mut valid_deps_versions: VersionRanges, + reusable: super::storage::ValueReusable, + deps: Arc, + mut invalidation_paths: TrackedInvalidationPaths, + ) -> (DiceComputedValue, bool) { + let (dirtied_history, overwrite_entry) = match self { + VersionedGraphNode::Occupied(entry) if reusable.is_reusable(&value, &deps, entry) => { + debug!("marking graph entry as unchanged"); + entry.mark_unchanged(key.v, valid_deps_versions, invalidation_paths); + let ret = entry.computed_val(key.v); + return (ret, false); + } + VersionedGraphNode::Occupied(entry) => { + // TODO(cjhopman): Should this consider the max version in valid_deps_version rather than just key.v? + ( + &entry.metadata.dirtied_history, + !entry.metadata.ever_valid_after(key.v), + ) + } + VersionedGraphNode::Vacant(entry) => (&entry.dirtied_history, true), + _ => unreachable!("injected nodes are never computed"), + }; + + let (force_dirty_restricted_range, invalidation_priority) = dirtied_history.get_x(key.v); + if force_dirty_restricted_range.begin() > VersionNumber(0) { + invalidation_paths.update(TrackedInvalidationPaths::new( + invalidation_priority, + key.k, + force_dirty_restricted_range.begin(), + )) + } + + valid_deps_versions.intersect_range(force_dirty_restricted_range); + let computed_version = VersionRange::bounded(key.v, VersionNumber::new(key.v.0 + 1)); + valid_deps_versions.insert(computed_version); + + if !overwrite_entry { + // TODO(cjhopman): Returning `true` here matches previous behavior, but it seems odd + // that we claim something changed when we don't change anything. It's likely that the + // the return value actually is used to mean something different than that we changed + // something. + debug!("skipping new graph entry because value is older than current entry"); + return ( + DiceComputedValue::new( + MaybeValidDiceValue::valid(value), + Arc::new(valid_deps_versions), + invalidation_paths, + ), + true, + ); + } + + debug!("making new graph entry because value not reusable"); + let new = OccupiedGraphNode::new( + key.k, + value, + deps, + valid_deps_versions, + dirtied_history.clone(), + invalidation_paths, + ); + let ret = new.computed_val(key.v); + *self = VersionedGraphNode::Occupied(new); + + (ret, true) + } + + pub(crate) fn to_introspectable(&self) -> Option { + fn visit_deps<'a>(deps: impl Iterator + 'a) -> HashSet { + deps.map(|d| d.introspect()).collect() + } + + fn visit_rdeps(rdeps: impl Iterator) -> Vec { + rdeps.unique().map(|d| NodeID(d.index as usize)).collect() + } + match self { - VersionedGraphNode::Occupied(o) => &o.metadata().hist, - VersionedGraphNode::Vacant(v) => &v.hist, + VersionedGraphNode::Occupied(o) => Some(SerializedGraphNode { + node_id: NodeID(o.key.index as usize), + kind: GraphNodeKind::Occupied, + history: crate::introspection::graph::CellHistory { + valid_ranges: o.metadata.verified_ranges.to_introspectable(), + force_dirtied_at: o.metadata.dirtied_history.to_introspectable(), + }, + deps: Some(visit_deps(o.deps().iter_keys())), + rdeps: Some(visit_rdeps(o.rdeps())), + }), + VersionedGraphNode::Vacant(_) => { + // TODO(bobyf) should probably write the metadata of vacant + None + } + VersionedGraphNode::Injected(inj) => { + let latest = inj.latest(); + Some(SerializedGraphNode { + node_id: NodeID(inj.key.index as usize), + kind: GraphNodeKind::Occupied, + history: crate::introspection::graph::CellHistory { + valid_ranges: latest.valid_versions.to_introspectable(), + force_dirtied_at: Vec::new(), + }, + deps: None, + rdeps: Some(visit_rdeps(inj.rdeps.iter())), + }) + } + } + } + + pub(crate) fn intersect_valid_versions_at( + &self, + v: VersionNumber, + deps_verified_ranges: &mut VersionRanges, + ) { + match self.valid_versions_at(v) { + None => { + deps_verified_ranges.clear(); + } + Some(valid_ranges) => { + deps_verified_ranges.intersect_in_place(valid_ranges); + } } } + + fn valid_versions_at(&self, v: VersionNumber) -> Option<&VersionRanges> { + match self { + VersionedGraphNode::Occupied(occ) => { + if occ.metadata.verified_ranges.contains(v) { + Some(&occ.metadata.verified_ranges) + } else { + None + } + } + VersionedGraphNode::Injected(inj) => Some(&inj.data_at(v).unwrap().1.valid_versions), + VersionedGraphNode::Vacant(_) => { + unreachable!() + } + } + } +} + +pub(crate) enum InvalidateResult<'a> { + NoChange, + /// Returns the rdeps of the node (that must also be invalidated). There can be duplicates in this list. + Changed(Option>), } /// The stored entry of the cache -#[derive(Allocative, Clone)] // TODO(bobyf) remove need to clone +#[derive(Allocative, Debug)] pub(crate) struct OccupiedGraphNode { key: DiceKey, res: DiceValidValue, metadata: NodeMetadata, + invalidation_paths: TrackedInvalidationPaths, } /// Meta data about a DICE node, which are its edges and history information -#[derive(Allocative, Clone)] // TODO(bobyf) remove need to clone +#[derive(Allocative, Debug)] pub(crate) struct NodeMetadata { - pub(crate) deps: VersionedDependencies, - pub(crate) rdeps: VersionedRevDependencies, - pub(crate) hist: CellHistory, + deps: Arc, + rdeps: LazyDepsSet, + verified_ranges: Arc, + dirtied_history: ForceDirtyHistory, +} + +impl NodeMetadata { + fn should_add_rdep_at(&self, v: VersionNumber) -> bool { + match self.verified_ranges.last() { + Some(last) => match (last.begin(), last.end()) { + (begin, _) if begin > v => false, + (_, Some(_end)) => false, + _ => true, + }, + None => true, + } + } + + fn ever_valid_after(&self, v: VersionNumber) -> bool { + match self.verified_ranges.last() { + Some(last) => match last.end() { + Some(end) => end > v, + _ => true, + }, + _ => false, + } + } +} + +/// For a node, keeps a history of every version that that node has been force-dirtied at. +/// +/// Across a force-dirtied version, we cannot ever reuse a node's value based on its deps' values not changing. +#[derive(Allocative, Clone, Debug)] +pub(crate) struct ForceDirtyHistory { + #[allow(clippy::box_collection)] + versions: Option, InvalidationSourcePriority)>>, +} + +// the vast majority of nodes are never force-dirtied, so we want to make sure that we optimize for that. +static_assertions::assert_eq_size!(ForceDirtyHistory, [usize; 1]); + +impl ForceDirtyHistory { + pub(crate) fn new() -> Self { + Self { versions: None } + } + + /// Marks a version as force-dirtied. Returns true if the version was not already marked. + /// + /// Should only ever be called at increasing version numbers. + pub(crate) fn force_dirty( + &mut self, + v: VersionNumber, + invalidation_priority: InvalidationSourcePriority, + ) -> bool { + match &mut self.versions { + Some(data) => { + if *data.0.last().unwrap() == v { + false + } else { + data.0.push(v); + true + } + } + None => { + self.versions = Some(Box::new((vec![v], invalidation_priority))); + true + } + } + } + + /// Returns the force-dirtied bounds around the provided version. + fn restricted_range(&self, version: VersionNumber) -> VersionRange { + self.get_x(version).0 + } + + fn get_x(&self, version: VersionNumber) -> (VersionRange, InvalidationSourcePriority) { + match &self.versions { + None => ( + VersionRange::begins_with(VersionNumber::ZERO), + InvalidationSourcePriority::Normal, + ), + Some(data) => { + let (dirties, invalidation_priority) = &**data; + let mut end = None; + let mut begin = None; + for dirty_v in dirties.iter().rev() { + if *dirty_v <= version { + begin = Some(*dirty_v); + break; + } else { + end = Some(*dirty_v); + } + } + + ( + match (begin, end) { + (Some(begin), Some(end)) => VersionRange::bounded(begin, end), + (Some(begin), None) => VersionRange::begins_with(begin), + (None, Some(end)) => VersionRange::bounded(VersionNumber::new(0), end), + (None, None) => VersionRange::begins_with(VersionNumber::ZERO), + }, + *invalidation_priority, + ) + } + } + } + + pub(crate) fn to_introspectable(&self) -> Vec { + match &self.versions { + Some(data) => data.0.iter().map(|v| v.to_introspectable()).collect(), + None => Vec::new(), + } + } } impl OccupiedGraphNode { pub(crate) fn new( key: DiceKey, res: DiceValidValue, - deps: VersionedDependencies, - hist: CellHistory, + deps: Arc, + verified_ranges: VersionRanges, + dirtied_history: ForceDirtyHistory, + invalidation_paths: TrackedInvalidationPaths, ) -> Self { Self { key, res, metadata: NodeMetadata { - hist, deps, - rdeps: VersionedRevDependencies::new(), + rdeps: LazyDepsSet::new(), + verified_ranges: Arc::new(verified_ranges), + dirtied_history, }, + invalidation_paths, } } - pub(crate) fn metadata(&self) -> &NodeMetadata { - &self.metadata - } - - pub(crate) fn metadata_mut(&mut self) -> &mut NodeMetadata { - &mut self.metadata - } - pub(crate) fn mark_unchanged( &mut self, - v: VersionNumber, - latest_dep_verified: Option, - first_dep_dirtied: Option, - deps: Arc>, - ) -> VersionNumber { - // Marking a node as unchanged ALWAYS requires the dependencies for which we used to deem - // that the node is unchanged. - // - // Consider a node n2 that depends on n1 at version v0. - // We then dirty versions v1, v2, v3 at n1. We'd defer dirtying v2, v3 on n2 due - // to the fact that it's possible that at v2 and v3, n2 no longer depends on n1 - // and we rely on deferred propagation of dirtiness. However, if at v2, we recompute - // and find that the values are equal to that at v0, then we can resurrect v0's n2. - // However, at this point, we will need to deferred propagate the dirty at v3. - let changed_since = - self.metadata - .hist - .mark_verified_modern(v, latest_dep_verified, first_dep_dirtied); - self.metadata.deps.replace_deps(changed_since, deps); - - changed_since + version: VersionNumber, + mut valid_deps_versions: VersionRanges, + new_invalidation_paths: TrackedInvalidationPaths, + ) { + valid_deps_versions + .intersect_range(self.metadata.dirtied_history.restricted_range(version)); + valid_deps_versions.insert(VersionRange::bounded( + version, + VersionNumber::new(version.0 + 1), + )); + + if !valid_deps_versions.is_empty() { + Arc::make_mut(&mut self.metadata.verified_ranges).union_in_place(&valid_deps_versions); + } + + self.invalidation_paths.update(new_invalidation_paths) } pub(crate) fn val(&self) -> &DiceValidValue { &self.res } - pub(crate) fn computed_val(&self) -> DiceComputedValue { + pub(crate) fn computed_val(&self, for_version: VersionNumber) -> DiceComputedValue { DiceComputedValue::new( MaybeValidDiceValue::valid(self.res.dupe()), - Arc::new(self.metadata.hist.clone()), + self.metadata.verified_ranges.dupe(), + self.invalidation_paths.at_version(for_version), ) } + + pub(crate) fn on_injected( + &mut self, + version: VersionNumber, + value: DiceValidValue, + invalidation_priority: InvalidationSourcePriority, + ) -> InvalidateResult { + // TODO(cjhopman): accepting injections only for InjectedKey would make the VersionedGraph simpler. Currently, this is used + // for "mocking" dice keys in tests via DiceBuilder::mock_and_return(). + if self.val().equality(&value) { + // TODO(cjhopman): This is wrong. The node could currently be in a dirtied state and we + // aren't recording that the value is verified at this version. + return InvalidateResult::NoChange; + } + + self.res = value; + self.metadata.deps = Arc::new(SeriesParallelDeps::None); + self.metadata.verified_ranges = Arc::new(VersionRange::begins_with(version).into_ranges()); + self.invalidation_paths + .update(TrackedInvalidationPaths::new( + invalidation_priority, + self.key, + version, + )); + + InvalidateResult::Changed(Some(self.metadata.rdeps.drain())) + } + + fn at_version(&self, v: VersionNumber) -> VersionedGraphResult { + match self.metadata.verified_ranges.find_value_upper_bound(v) { + Some(found) if found == v => VersionedGraphResult::Match(self.computed_val(v)), + Some(prev_verified_version) => { + if self + .metadata + .dirtied_history + .restricted_range(v) + .contains(&prev_verified_version) + { + VersionedGraphResult::CheckDeps(VersionedGraphResultMismatch { + entry: self.val().dupe(), + prev_verified_version, + deps_to_validate: self.metadata.deps.dupe(), + }) + } else { + VersionedGraphResult::Compute + } + } + None => VersionedGraphResult::Compute, + } + } + + fn add_rdep_at(&mut self, v: VersionNumber, k: DiceKey) { + if self.metadata.should_add_rdep_at(v) { + self.metadata.rdeps.insert(v, k); + } + } + + fn force_dirty( + &mut self, + v: VersionNumber, + invalidation_priority: InvalidationSourcePriority, + ) -> InvalidateResult { + self.mark_invalidated(v, Some(invalidation_priority)); + if self + .metadata + .dirtied_history + .force_dirty(v, invalidation_priority) + { + InvalidateResult::Changed(Some(self.metadata.rdeps.drain())) + } else { + InvalidateResult::NoChange + } + } + + fn mark_invalidated( + &mut self, + v: VersionNumber, + invalidation_priority: Option, + ) -> bool { + if let Some(invalidation_priority) = invalidation_priority { + self.invalidation_paths + .update(TrackedInvalidationPaths::new( + invalidation_priority, + self.key, + v, + )); + } + Arc::make_mut(&mut self.metadata.verified_ranges) + .intersect_range(VersionRange::bounded(VersionNumber::ZERO, v)) + } + + fn rdeps(&self) -> impl Iterator + '_ { + self.metadata.rdeps.iter() + } + + pub(crate) fn deps(&self) -> &Arc { + &self.metadata.deps + } + + pub(crate) fn is_verified_at(&self, version: VersionNumber) -> bool { + self.metadata.verified_ranges.contains(version) + } } /// An entry in the graph that has no computation value associated. This is used to store the @@ -147,32 +587,156 @@ impl OccupiedGraphNode { /// This will be replaced by `OccupiedGraphNode` when a computed value is associated with /// this node. There is no guarantees of when, or even if that will occur since users may never /// need the associated value at this node. -#[derive(Allocative)] +#[derive(Allocative, Debug)] pub(crate) struct VacantGraphNode { - pub(crate) key: DiceKey, - pub(crate) hist: CellHistory, + key: DiceKey, + dirtied_history: ForceDirtyHistory, + invalidation_priority: InvalidationSourcePriority, +} +impl VacantGraphNode { + pub(crate) fn new(key: DiceKey, invalidation_priority: InvalidationSourcePriority) -> Self { + Self { + key, + dirtied_history: ForceDirtyHistory::new(), + invalidation_priority, + } + } + + pub(crate) fn force_dirty( + &mut self, + v: VersionNumber, + invalidation_priority: InvalidationSourcePriority, + ) -> bool { + assert!(self.invalidation_priority == invalidation_priority); + self.dirtied_history.force_dirty(v, invalidation_priority) + } +} + +/// An entry in the graph for an InjectedKey. This will store all injected values it ever sees because +/// we cannot recompute them if they are dropped. +#[derive(Allocative, Debug)] +pub(crate) struct InjectedGraphNode { + key: DiceKey, + values: SortedVectorMap, + rdeps: LazyDepsSet, + invalidation_paths: TrackedInvalidationPaths, +} + +#[derive(Allocative, Debug)] +pub(crate) struct InjectedNodeData { + value: DiceValidValue, + first_valid_version: VersionNumber, + // Used to cache the version ranges for `at_version`. This is a single VersionRange. + valid_versions: Arc, +} + +impl InjectedGraphNode { + /// Returns a list of rdeps to invalidate and a bool indicating if the value changed. Should only ever be called at increasing version numbers. + pub(crate) fn on_injected( + &mut self, + version: VersionNumber, + value: DiceValidValue, + invalidation_priority: InvalidationSourcePriority, + ) -> InvalidateResult { + match self.values.values_mut().next_back() { + Some(v) if v.value.equality(&value) => { + return InvalidateResult::NoChange; + } + Some(v) => { + v.valid_versions = + Arc::new(VersionRange::bounded(v.first_valid_version, version).into_ranges()); + } + None => {} + }; + + self.values + .insert(version, Self::new_node_data(value, version)); + self.invalidation_paths + .update(TrackedInvalidationPaths::new( + invalidation_priority, + self.key, + version, + )); + + InvalidateResult::Changed(Some(self.rdeps.drain())) + } + + pub(crate) fn at_version(&self, v: VersionNumber) -> VersionedGraphResult { + match self.data_at(v) { + Some((_, data)) => VersionedGraphResult::Match(DiceComputedValue::new( + MaybeValidDiceValue::valid(data.value.dupe()), + data.valid_versions.dupe(), + self.invalidation_paths.at_version(v), + )), + None => VersionedGraphResult::Compute, + } + } + + pub(crate) fn add_rdep_at(&mut self, v: VersionNumber, k: DiceKey) { + for version in self.values.keys().rev() { + if *version <= v { + self.rdeps.insert(v, k); + return; + } + } + unreachable!() + } + + pub(crate) fn new( + k: DiceKey, + v: VersionNumber, + value: DiceValidValue, + invalidation_priority: InvalidationSourcePriority, + ) -> InjectedGraphNode { + InjectedGraphNode { + key: k, + values: [(v, Self::new_node_data(value, v))].into_iter().collect(), + rdeps: LazyDepsSet::new(), + invalidation_paths: TrackedInvalidationPaths::new(invalidation_priority, k, v), + } + } + + pub(crate) fn latest(&self) -> &InjectedNodeData { + // We don't ever create an empty values map + self.values.values().next_back().unwrap() + } + + fn new_node_data(value: DiceValidValue, version: VersionNumber) -> InjectedNodeData { + InjectedNodeData { + value, + first_valid_version: version, + valid_versions: Arc::new(VersionRange::begins_with(version).into_ranges()), + } + } + + fn data_at(&self, v: VersionNumber) -> Option<(&VersionNumber, &InjectedNodeData)> { + self.values + .range((Bound::Unbounded, Bound::Included(v))) + .last() + } } #[cfg(test)] mod tests { use allocative::Allocative; use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; - use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::key::Key; use crate::arc::Arc; - use crate::impls::core::graph::dependencies::VersionedDependencies; - use crate::impls::core::graph::history::testing::CellHistoryExt; - use crate::impls::core::graph::history::testing::HistoryExt; - use crate::impls::core::graph::history::CellHistory; + use crate::impls::core::graph::nodes::ForceDirtyHistory; use crate::impls::core::graph::nodes::OccupiedGraphNode; + use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::key::DiceKey; use crate::impls::value::DiceKeyValue; use crate::impls::value::DiceValidValue; + use crate::impls::value::TrackedInvalidationPaths; use crate::versions::VersionNumber; + use crate::versions::VersionRange; + use crate::versions::VersionRanges; #[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] struct K; @@ -195,62 +759,37 @@ mod tests { } #[test] - fn update_versioned_graph_entry_tracks_versions_and_deps() { - let deps0: Arc> = Arc::new(vec![DiceKey { index: 5 }]); + fn update_versioned_graph_entry_tracks_versions() { + let deps0: Arc = + Arc::new(SeriesParallelDeps::serial_from_vec(vec![DiceKey { + index: 5, + }])); let mut entry = OccupiedGraphNode::new( DiceKey { index: 1335 }, DiceValidValue::testing_new(DiceKeyValue::::new(1)), - VersionedDependencies::new(VersionNumber::new(0), deps0.dupe()), - CellHistory::testing_new( - &[VersionNumber::new(0)], - &[VersionNumber::new(1), VersionNumber::new(2)], + deps0.dupe(), + VersionRanges::testing_new( + vec![VersionRange::bounded( + VersionNumber::new(0), + VersionNumber::new(1), + )] + .into_iter() + .collect(), ), + ForceDirtyHistory::new(), + TrackedInvalidationPaths::clean(), ); - entry - .metadata() - .hist - .get_history(&VersionNumber::new(0)) - .assert_verified(); - assert_eq!(entry.metadata().deps.deps(), deps0); - - entry.mark_unchanged(VersionNumber::new(1), None, None, Arc::new(vec![])); - entry - .metadata() - .hist - .get_history(&VersionNumber::new(0)) - .assert_verified(); - entry - .metadata() - .hist - .get_history(&VersionNumber::new(1)) - .assert_verified(); - assert_eq!(entry.metadata().deps.deps(), Arc::new(Vec::new())); - - let deps1 = Arc::new(vec![DiceKey { index: 7 }]); + assert!(entry.is_verified_at(VersionNumber::new(0))); + assert_eq!(*entry.deps(), deps0); + entry.mark_unchanged( - VersionNumber::new(2), - Some(VersionNumber::new(1)), - None, - deps1.dupe(), + VersionNumber::new(1), + VersionRanges::new(), + TrackedInvalidationPaths::clean(), ); - entry - .metadata() - .hist - .get_history(&VersionNumber::new(0)) - .assert_verified(); - entry - .metadata() - .hist - .get_history(&VersionNumber::new(1)) - .assert_verified(); - entry - .metadata() - .hist - .get_history(&VersionNumber::new(2)) - .assert_verified(); - - assert_eq!(entry.metadata().deps.deps(), deps1); + assert!(entry.is_verified_at(VersionNumber::new(0))); + assert!(entry.is_verified_at(VersionNumber::new(1))); } } diff --git a/dice/dice/src/impls/core/graph/storage.rs b/dice/dice/src/impls/core/graph/storage.rs index a53f9b0efb677..2bdc62661e57e 100644 --- a/dice/dice/src/impls/core/graph/storage.rs +++ b/dice/dice/src/impls/core/graph/storage.rs @@ -7,48 +7,192 @@ * of this source tree. */ -//! //! A cache that deals with versions //! //! This is responsible for performing incremental caching and invalidations //! with multiple versions in-flight at the same time. //! -//! The 'VersionedCache' will track dependency edges and use computed version +//! The 'VersionedGraph' will track dependency edges and use computed version //! number for each cache entry and a global version counter to determine //! up-to-date-ness of cache entries. //! - -use std::cmp; -use std::ops::Bound; +//! TODO(cjhopman): Some of the documentation below indicates intended or future behavior. The intent +//! is that this documentation describes a "correct" implementation of VersionedGraph as we want it +//! to be, the implementation is still being updated to match it. This TODO should be removed when +//! we've matched this behavior. +//! +//! Behavior: +//! +//! There's two main operations that the storage needs to support: +//! +//! - (Op. 1) getting the value of a key (X) at a particular version, this happens one of three ways: +//! - (1.1): X has no previous value present: the new value is computed +//! - (1.2): X has a previous value present and it is known to be valid at that version: use the existing value +//! - (1.3): X has a previous value present and it is invalidated at that version +//! - (1.3a): if X's deps' values have not changed since some version where X's stored value was present, reuse the existing value +//! - (1.3b): if any of X's deps values have changed, recompute the value +//! - (Op. 2) processing invalidations being receieved (only at the most recent version) +//! +//! To support these operations, nodes store +//! +//! - (i) computed values +//! - (ii) the seriesparalleldeps for that computed value +//! - (iii) a cellhistory indicating at what versions both (1)+(2) are known to be valid +//! - (iv) a list of versions where the node is "force-dirtied" +//! - (v) the non-invalidated most recent reverse dependencies. +//! +//! A node may store multiple computed values (and so also multiple (ii) and (iii)) at different versions. Nodes for InjectedKeys, for +//! example, will store all values that they ever see (as we cannot recompute ones that we drop). +//! +//! A node will not know about invalidations outside of its valid cell history. +//! +//! For example, consider a scenario with a node A depends on B and this sequence: +//! +//! - at v1, A is computed (and so B is as well) +//! - at v2, B is invalidated, A will also be invalidated +//! - at v2, B is then computed (but not A) +//! - at v3, B is invalidated. +//! +//! After this sequence, A will not have been informed of the invalidation at v3. +//! If A is then computed at v2, we will do "deferred dirty propagation" to +//! inform it of the dirty at v3 (if appropriate). +//! +//! For (Op. 2) invalidations: Invalidations can start at both leaf and non-leaf nodes. An invalidation only happens at +//! the maximum version. Invalidation of a node does roughly: +//! +//! ```ignore +//! // returns a set of keys that also need to be invalidated +//! invalidate(node, version) -> Vec { +//! if already dirtied? { return vec![] } +//! mark_dirty(node, version); +//! return take(&mut node.rdeps); +//! } +//! ``` +//! +//! rdeps are stored only for invalidation. Together, this means that a node only needs to stores +//! rdeps for its latest version and only until being invalidated. +//! +//! For (Op. 1) get/compute, there's a couple of non-trivial steps: +//! +//! Consider `get(key=K, version=V)`: +//! +//! 1. node lookup: will lookup the node for key K, if it's not present, it needs to be computed (so skip (2)), +//! if it is present and valid at V it can be directly reused (and we skip everything else, including 4 as +//! there's no state to update). +//! +//! 2. deps check: this will try to determine if we can reuse the cached (but dirtied) value for a node. First, +//! we have the node for key K with a potential value+deps and cell-history H. We will check the latest version +//! in H less than V (call this VP). For each dep Dn, we will get it's history at V and check if it also includes +//! ther version VP (i.e. Dn had the same value at VP as it does at V). If all deps pass that check, we can reuse +//! K's value from version VP. +//! +//! 3. re-compute: the VersionedGraph doesn't care much about this, it's just a normal non-cached compute +//! +//! 4. update state: for a value+deps we are storing we have two associated things to make sure are +//! up-to-date: (i) the corresponding cell-history and (ii) the rdeps of the node's dependencies. +//! (i) has two parts: first determine the initial cell history from the node, there's two cases: +//! +//! a. value is valid from checkdeps: in this case, we'll get a version VP (from (2) above) that indicates +//! the version at which we've checked the deps didn't change. we can reuse the cell history for the node +//! at version VP. +//! +//! b. value has been recomputed. we can reuse the cell-history for the node if both its value and its deps +//! are equal to the new value+deps. +//! +//! If there's no cell-history to reuse (maybe because a newer computation has evicted the associated data), we only +//! know that the value+deps are valid at exactly `[V, V+1)` (i.e. just at version V). +//! second, we determine the set of versions at which we know the deps have the same value as at the version +//! we are computing (V). This is just an intersection of their cell-histories at that version. The valid deps versions +//! are further restricted to ensure they don't cross any force-dirtied versions of the node we are computing. +//! The final valid cell-history is the union of these two. +//! (ii) is easier, we just tell each dep node to record the rdep at the version we are computing. If the rdep has already seen a +//! dirty at a later version, it does not need to record the rdep (and our computing of (i) will reflect that dirty). +//! +//! "forced-dirty": +//! The non-leaf nodes that are directly invalidated are going to be marked as "force-dirtied" at that +//! version, while rdeps of those just get marked as invalidated. If a dep is marked as "force-dirtied" +//! at a version, we will never reuse its value across that version based on its deps not changing. +//! +//! This means that we need to store these markers forever and that a deps check cannot cross these markers +//! and that when computing a cell-history after recomputing we must ensure that the deps-based part of that +//! history does not cross the markers (it's okay for value-based equality to make the history cross those +//! values). +//! +//! Code structure: +//! +//! This is structured so that VersionedGraphNode is mostly responsible for the parts of these operations +//! that are specific to a specific node and that VersionedGraph is responsible for the rest (so things +//! crossing multiple nodes, managing the map of nodes, dealing with nodes not yet being in the map, etc.). +//! +//! Potential improvements: +//! +//! 1. We could decouple the cell history for (i) and (ii). Right now, if +//! the list of deps changes but the computed value stays the same, we don't record that dependents could +//! reuse the value across the two versions. +//! +//! 2. For the deps check, instead of checking only the against the most recent previous version, we could +//! check against the entire cell-history for our potential reused value. The idea of the deps check is that +//! if all of the deps are in a matching state for any version within our cell-history, we can reuse that +//! cell. We currently check only against the most recent version because it is significantly simpler and +//! in practice is almost as good as checking against the whole history. +//! +//! FAQ: +//! +//! Q: CellHistory is complex, couldn't we just operate on single VersionRange? +//! +//! A: Consider the case where you have key A depends on B depends on C, and the following sequence of operations: +//! +//! - At V1, compute A (and so B and C). +//! - Change C to a new value (computation is now at V2). +//! - At V2, compute C. +//! - A and B will now be dirty at V2 +//! - Change C back to its initial (V1) value (computation is now at V3) +//! - At V3, compute A +//! +//! When B is recomputed at V3, we will see a history like: `[[V1, V2), [V3, inf)]` and see that we can reuse +//! the A that was computed at V1. If nodes only stored a valid VersionRange, B would have lost the information +//! that it has the same value at V1 and V3. +//! +//! Q: Why use CellHistory-based dependency checks at all? Could we store a node's deps seen values along with the +//! deps and do value-based dependency checks? +//! +//! A: This could be an interesting avenue to explore. It has several potential drawbacks: (1) significantly increased +//! memory use to store pointers (probably Arc) to the values in each dependent (2) value equality is potentially +//! expensive and this approach may require additional caching to avoid that cost (for example, at least a +//! transaction-level cache of such comparisons, otherwise we'd need to do O(E) equality checks). +//! +//! Q: Could we do strong-hash-based equality checks instead? +//! +//! A: This could also be interesting to explore. It's possible that this could resolve all the issues with doing +//! value-based dep checks. use allocative::Allocative; -use dupe::Dupe; -use sorted_vector_map::SortedVectorMap; +use crate::api::key::InvalidationSourcePriority; use crate::api::storage_type::StorageType; use crate::arc::Arc; -use crate::impls::core::graph::dependencies::VersionedDependencies; -use crate::impls::core::graph::history::CellHistory; -use crate::impls::core::graph::history::HistoryState; +use crate::impls::core::graph::nodes::ForceDirtyHistory; +use crate::impls::core::graph::nodes::InjectedGraphNode; +use crate::impls::core::graph::nodes::InvalidateResult; use crate::impls::core::graph::nodes::OccupiedGraphNode; use crate::impls::core::graph::nodes::VacantGraphNode; use crate::impls::core::graph::nodes::VersionedGraphNode; use crate::impls::core::graph::types::VersionedGraphKey; use crate::impls::core::graph::types::VersionedGraphResult; -use crate::impls::core::graph::types::VersionedGraphResultMismatch; +use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::key::DiceKey; use crate::impls::value::DiceComputedValue; use crate::impls::value::DiceValidValue; +use crate::impls::value::TrackedInvalidationPaths; use crate::versions::VersionNumber; +use crate::versions::VersionRange; use crate::versions::VersionRanges; use crate::HashMap; +use crate::HashSet; /// The actual incremental cache that checks versions and dependency's versions /// to maintain correct caching based on versions and the versions of its /// dependencies. -/// -/// TODO refactor this so that the storage doesn't handle multi version, instead the nodes do, and -/// support different node types of different storage persistency. #[derive(Allocative)] pub(crate) struct VersionedGraph { /// storage that stores every version forever @@ -56,470 +200,186 @@ pub(crate) struct VersionedGraph { /// the node changes. Corresponding to each key is a node storing the values and the history. /// VacantGraphEntries can only be present when no other entries are present for the key at /// any version. - pub(crate) last_n: HashMap>, + pub(crate) nodes: HashMap, } impl VersionedGraph { pub(crate) fn new() -> Self { Self { - last_n: Default::default(), + nodes: Default::default(), } } - /// gets the cache entry corresponding to the cache entry if up to date. - /// returns 'None' if entry is missing or versions are out of date. + /// Gets the entry corresponding to the cache entry if up to date. pub(crate) fn get(&self, key: VersionedGraphKey) -> VersionedGraphResult { - fn handle_occupied( - key: VersionedGraphKey, - entry: &OccupiedGraphNode, - ) -> VersionedGraphResult { - match entry.metadata().hist.get_history(&key.v) { - HistoryState::Verified => VersionedGraphResult::Match(entry.computed_val()), - HistoryState::Unknown(verified_versions) => { - VersionedGraphResult::CheckDeps(VersionedGraphResultMismatch { - entry: entry.val().dupe(), - verified_versions, - deps_to_validate: entry.metadata().deps.deps(), - }) - } - HistoryState::Dirty => VersionedGraphResult::Compute, - } - } - - fn handle_vacant() -> VersionedGraphResult { - // vacant entries only occur if no other graph entries are - // present, so we know this has to be dirty - VersionedGraphResult::Compute - } - - if let Some(versioned) = self.last_n.get(&key.k) { - let mut potential = versioned.range(( - Bound::Included(VersionNumber::new(0)), - Bound::Included(key.v), - )); - if let Some(found) = potential.next_back().map(|e| match e.1 { - VersionedGraphNode::Occupied(entry) => handle_occupied(key, entry), - VersionedGraphNode::Vacant(_) => handle_vacant(), - }) { - found - } else { - // this branch takes care of an ongoing computation that is operating on an older - // version than anything stored currently. However, it has a problem where it's nodes - // would fail to share work due to nothing going into the cache if its evaluating - // to a different result. TODO add some per ctx result caching for old versions - versioned - .range((Bound::Included(key.v), Bound::Unbounded)) - .find_map(|(v, e)| match e { - VersionedGraphNode::Occupied(e) => Some((v, e)), - VersionedGraphNode::Vacant(_) => None, - }) - .map_or_else( - || VersionedGraphResult::Compute, - |(_, entry)| { - VersionedGraphResult::CheckDeps(VersionedGraphResultMismatch { - entry: entry.val().dupe(), - verified_versions: entry.metadata().hist.get_verified_ranges(), - deps_to_validate: entry.metadata().deps.deps(), - }) - }, - ) - } + if let Some(entry) = self.nodes.get(&key.k) { + entry.at_version(key.v) } else { VersionedGraphResult::Compute } } - /// gets the cache entry corresponding to the cache entry if up to date. - /// returns 'None' if entry is missing or versions are out of date. - fn get_internal<'a>( - &'a mut self, - key: VersionedGraphKey, - ) -> Option<&'a mut VersionedGraphNode> { - if let Some(versioned) = self.last_n.get_mut(&key.k) { - let v = versioned - .range(( - Bound::Included(VersionNumber::new(0)), - Bound::Included(key.v), - )) - .next_back() - .map(|e| *e.0); - - match v { - None => { - // this branch takes care of an ongoing computation that is operating on an older - // version than anything stored currently. However, it has a problem where it's nodes - // would fail to share work due to nothing going into the cache if its evaluating - // to a different result. TODO add some per ctx result caching for old versions - versioned - .range_mut((Bound::Included(key.v), Bound::Unbounded)) - .map(|(_v, e)| e) - .next() - } - Some(v) => versioned.get_mut(&v), - } - } else { - None - } - } - /// updates the cached value based on the given key and versions. The value - /// is only updated if the version of the new value is of a newer - /// version than what is stored. + /// is only stored if the version is newer than what is stored. /// Returns the new entry, and an optional old entry that was invalidated due to this update - #[instrument(level = "debug", skip(self, value, storage_type, deps, reusable), fields(key = ?key))] + #[cfg_attr(debug_assertions, instrument(level = "debug", skip(self, value, storage_type, deps, reusable), fields(key = ?key)))] pub(crate) fn update( &mut self, key: VersionedGraphKey, value: DiceValidValue, reusable: ValueReusable, - deps: Arc>, + deps: Arc, storage_type: StorageType, + invalidation_paths: TrackedInvalidationPaths, ) -> (DiceComputedValue, bool) { - let StorageType::LastN(num_to_keep) = storage_type; - // persistent keys, if any changes, are committed at the moment when the version - // is increased. therefore, it must be the case that the current update for the - // persistent key is the largest/newest version. it's also the case that they are - // never updated to the cache more than once per version. - // TODO refactor this to be less error prone. - - // we pick the nearest entry because the closest version number to the current key would - // have the least number of changes recorded in dice, which we assume naively to mean - // most likely to reuse a node. We could implement this to check for reuse against both - // the previous and the next version, but that complexity is likely not worth the benefit - // of trying to reuse a node. Maybe this is worth revisiting at some point. - let nearest = { - let versioned_map = self.last_n.entry(key.k).or_default(); - Self::nearest_entry(&key, versioned_map) + if let StorageType::Injected = storage_type { + unreachable!( + "Injected keys should not receive update calls, as those are only from a compute() finishing and InjectedKeys have no compute()" + ); }; - let mut latest_dep_verified = None; - let mut first_dep_dirtied = None; - for dep in deps.iter() { - match self.get_internal(VersionedGraphKey::new(key.v, dep.dupe())) { - None => { - unreachable!("dependency should exist") - } - Some(node) => match node { - VersionedGraphNode::Occupied(occ) => { - if let Some(dep_v) = occ.metadata().hist.latest_verified_before(key.v) { - latest_dep_verified = cmp::max(latest_dep_verified, Some(dep_v)); - - let dep_d_v = occ.metadata().hist.first_dirty_after(key.v); - first_dep_dirtied = cmp::min(first_dep_dirtied.or(dep_d_v), dep_d_v); - - occ.metadata_mut().rdeps.add_rdep(key.k, key.v); - } else { - let dep_d_v = occ.metadata().hist.first_verified_after(key.v); - first_dep_dirtied = cmp::min(first_dep_dirtied.or(dep_d_v), dep_d_v); - } - } - VersionedGraphNode::Vacant(_) => { - unreachable!("dependency should exist") - } - }, - } + let mut valid_deps_versions = VersionRange::begins_with(VersionNumber::ZERO).into_ranges(); + + // Add rdeps. + for dep in deps.iter_keys() { + let node = self.nodes.get_mut(&dep).expect("dependency should exist"); + node.add_rdep_at(key.v, key.k); + node.intersect_valid_versions_at(key.v, &mut valid_deps_versions); } - if let Some(key_of_e) = nearest { - self.update_entry( - key_of_e, + let invalidation_paths = invalidation_paths.for_dependent(key.k); + + // Update entry. + match self.nodes.get_mut(&key.k) { + Some(entry) => entry.on_computed( key, value, - first_dep_dirtied, - latest_dep_verified, + valid_deps_versions, reusable, deps, - num_to_keep, - ) - } else { - ( + invalidation_paths, + ), + + None => ( self.update_empty( key.k, key.v, value, - first_dep_dirtied, - latest_dep_verified, + valid_deps_versions, deps, + invalidation_paths, ), true, - ) + ), } } - /// find the nearest entry to the given key, preferring the smaller version number when tied - fn nearest_entry<'a>( - key: &VersionedGraphKey, - versioned_map: &'a SortedVectorMap, - ) -> Option { - let newest_previous = versioned_map - .range(( - Bound::Included(VersionNumber::new(0)), - Bound::Included(key.v), - )) - .next_back() - .map(|(v, _e)| *v); - let oldest_newer = versioned_map - .range((Bound::Included(key.v), Bound::Unbounded)) - .next() - .map(|(v, _e)| *v); - - match (newest_previous, oldest_newer) { - (Some(prev_v), Some(next_v)) => { - if next_v - key.v < prev_v - key.v { - Some(next_v) - } else { - Some(prev_v) - } - } - (Some(x), None) => Some(x), - (None, Some(x)) => Some(x), - (None, None) => None, - } - } - - #[instrument(level = "debug", skip(self, value, deps), fields(key = ?key, v = %v, first_dep_dirtied = ?first_dep_dirtied, latest_dep_verified = ?latest_dep_verified))] - fn update_empty( - &mut self, - key: DiceKey, - v: VersionNumber, - value: DiceValidValue, - first_dep_dirtied: Option, - latest_dep_verified: Option, - deps: Arc>, - ) -> DiceComputedValue { - debug!("making new graph entry because previously empty"); - - let since = latest_dep_verified.unwrap_or(v); - let mut hist = CellHistory::verified(since); - hist.propagate_from_deps_version(since, first_dep_dirtied); - let entry = - OccupiedGraphNode::new(key, value, VersionedDependencies::new(since, deps), hist); - - let res = entry.computed_val(); - - self.last_n - .get_mut(&key) - .unwrap() - .insert(v, VersionedGraphNode::Occupied(entry)); - - res - } - - /// Returns the newly updated value for the key, and whether or not any state changed. - #[instrument(level = "debug", skip(self, value, deps, num_to_keep, reusable), fields(key = ?key, key_of_e = %key_of_e, first_dep_dirtied = ?first_dep_dirtied, latest_dep_verified = ?latest_dep_verified))] - fn update_entry( - &mut self, - key_of_e: VersionNumber, - key: VersionedGraphKey, - value: DiceValidValue, - first_dep_dirtied: Option, - latest_dep_verified: Option, - reusable: ValueReusable, - deps: Arc>, - num_to_keep: usize, - ) -> (DiceComputedValue, bool) { - let versioned_map = self.last_n.get_mut(&key.k).unwrap(); - let (ret, map_fixup) = match versioned_map.get_mut(&key_of_e).unwrap() { - VersionedGraphNode::Occupied(entry) if reusable.is_reusable(&value, entry) => { - debug!("marking graph entry as unchanged"); - let since = - entry.mark_unchanged(key.v, latest_dep_verified, first_dep_dirtied, deps); - - let ret = entry.computed_val(); - - (ret, MapFixup::Reused { since, key_of_e }) - } - entry => { - debug!("making new graph entry because value not reusable"); - - let (since, end, mut hist) = entry - .history() - .make_new_verified_history(key.v, latest_dep_verified); - - hist.propagate_from_deps_version(key.v, first_dep_dirtied); - - let new = OccupiedGraphNode::new( - key.k, - value, - VersionedDependencies::new(since, deps), - hist, - ); - - let ret = new.computed_val(); - - ( - ret, - MapFixup::NewEntry { - since, - end, - new, - key_of_e, - num_to_keep, - }, - ) - } - }; - - let any_invalidated = map_fixup.fixup(versioned_map); - - (ret, any_invalidated) - } - /// Invalidates an entry and its transitive rdeps. Returning true if this caused any type of /// change pub(crate) fn invalidate( &mut self, key: VersionedGraphKey, invalidate: InvalidateKind, + invalidation_priority: InvalidationSourcePriority, ) -> bool { - let rdeps = { - match invalidate { - invalidate @ (InvalidateKind::ForceDirty | InvalidateKind::Invalidate) => { - let versioned_map = self - .last_n - .entry(key.k) - .or_insert_with(SortedVectorMap::new); - if let Some(e) = versioned_map - .range_mut((Bound::Unbounded, Bound::Included(key.v))) - .next_back() - .map(|(_, e)| e) - { - let dirtied = match invalidate { - InvalidateKind::ForceDirty => e.force_dirty(key.v), - InvalidateKind::Invalidate => e.mark_invalidated(key.v), - _ => unreachable!("handled elsewhere"), - }; - - if dirtied { - if let Some(e) = e.unpack_occupied() { - let queue = { - let metadata = e.metadata(); - let rdeps = metadata.rdeps.rdeps(); - - rdeps - .iter() - .map(|(r, v)| (r.dupe(), *v)) - .collect::>() - }; - - queue - } else { - return true; - } - } else { - return false; - } - } else { - let mut entry = VersionedGraphNode::Vacant(VacantGraphNode { - key: key.k, - hist: CellHistory::empty(), - }); - - entry.mark_invalidated(key.v); - - versioned_map.insert(key.v, entry); - - return true; - } - } - InvalidateKind::Update(value, StorageType::LastN(num_to_keep)) => { - let rdeps = { - let entry = self.last_n.get(&key.k).and_then(|versioned_map| { - versioned_map - .range((Bound::Unbounded, Bound::Included(key.v))) - .next_back() - .map(|(_, e)| e) - }); - - match entry { - Some(VersionedGraphNode::Occupied(occ)) => { - if !occ.val().equality(&value) { - occ.metadata() - .rdeps - .rdeps() - .iter() - .map(|(r, v)| (r.dupe(), *v)) - .collect::>() - } else { - return false; - } - } - _ => vec![], - } - }; - - let versioned_map = self.last_n.entry(key.k).or_default(); - let fixup = if let Some((key_of_e, entry)) = versioned_map - .range_mut((Bound::Unbounded, Bound::Included(key.v))) - .next_back() - { - let (since, end, mut hist) = - entry.history().make_new_verified_history(key.v, None); - - hist.propagate_from_deps_version(key.v, None); - - let new = OccupiedGraphNode::new( + let entry = match self.nodes.get_mut(&key.k) { + Some(entry) => entry, + _ => { + let new_entry = match invalidate { + InvalidateKind::Update(value, StorageType::Injected) => { + VersionedGraphNode::Injected(InjectedGraphNode::new( key.k, + key.v, value, - VersionedDependencies::new(since, Arc::new(vec![])), - hist, - ); - - MapFixup::NewEntry { - since, - end, - new, - key_of_e: *key_of_e, - num_to_keep, - } - } else { - let entry = VersionedGraphNode::Occupied(OccupiedGraphNode::new( + invalidation_priority, + )) + } + InvalidateKind::Update(value, StorageType::Normal) => { + VersionedGraphNode::Occupied(OccupiedGraphNode::new( key.k, value, - VersionedDependencies::new(key.v, Arc::new(vec![])), - CellHistory::verified(key.v), - )); - - versioned_map.insert(key.v, entry); - - return true; - }; + Arc::new(SeriesParallelDeps::None), + VersionRange::begins_with(key.v).into_ranges(), + ForceDirtyHistory::new(), + TrackedInvalidationPaths::new(invalidation_priority, key.k, key.v), + )) + } + _ => { + let mut node = VacantGraphNode::new(key.k, invalidation_priority); + // invalidated and force_dirty for a vacant node are going to have the same behavior in practice. + node.force_dirty(key.v, invalidation_priority); + VersionedGraphNode::Vacant(node) + } + }; - fixup.fixup(versioned_map); + self.nodes.insert(key.k, new_entry); + return true; + } + }; - rdeps + let queue = { + let res = match invalidate { + InvalidateKind::Invalidate => { + entry.mark_invalidated(key.v, Some(invalidation_priority)) } + InvalidateKind::ForceDirty => entry.force_dirty(key.v, invalidation_priority), + InvalidateKind::Update(value, _) => { + entry.on_injected(key.v, value, invalidation_priority) + } + }; + + if let InvalidateResult::Changed(rdeps) = res { + rdeps.into_iter().flatten().collect() + } else { + return false; } }; - self.invalidate_rdeps(key.v, rdeps); + self.invalidate_rdeps(key.v, queue); true } - fn invalidate_rdeps( + // ----------------------------------------------------------------------------- + // ------------------------- Implementation functions below -------------------- + // ----------------------------------------------------------------------------- + + #[cfg_attr(debug_assertions, instrument(level = "debug", skip(self, value, deps), fields(key = ?key, v = %v, valid_deps_versions = ?valid_deps_versions)))] + fn update_empty( &mut self, - version: VersionNumber, - mut queue: Vec<(DiceKey, VersionNumber)>, - ) { - while let Some((rdep, relevant_version)) = queue.pop() { - if let Some(node) = self.get_internal(VersionedGraphKey::new(relevant_version, rdep)) { - if node.mark_invalidated(version) { - // since dirty always occurs in increasing order, it must be the case that if - // the history was already dirtied, it was by a version number less than the - // current version number. - // furthermore, if the rdep was dirtied, at any future versions larger than - // the version it was dirtied at, it may no longer depend on the current node - // so we skip marking it as dirty, and rely on delayed propagation of dirty - - if let Some(node) = node.unpack_occupied() { - queue.extend({ - let rdeps = node.metadata().rdeps.rdeps(); - - rdeps - .iter() - .map(|(r, v)| (r.dupe(), *v)) - .collect::>() - }) + key: DiceKey, + v: VersionNumber, + value: DiceValidValue, + mut valid_deps_versions: VersionRanges, + deps: Arc, + invalidation_paths: TrackedInvalidationPaths, + ) -> DiceComputedValue { + debug!("making new graph entry because previously empty"); + valid_deps_versions.insert(VersionRange::bounded(v, VersionNumber::new(v.0 + 1))); + let entry = OccupiedGraphNode::new( + key, + value, + deps, + valid_deps_versions, + ForceDirtyHistory::new(), + invalidation_paths, + ); + + let res = entry.computed_val(v); + self.nodes.insert(key, VersionedGraphNode::Occupied(entry)); + res + } + + fn invalidate_rdeps(&mut self, version: VersionNumber, mut queued: HashSet) { + let mut queue: Vec<_> = queued.iter().copied().collect(); + + while let Some(rdep) = queue.pop() { + if let Some(node) = self.nodes.get_mut(&rdep) { + if let InvalidateResult::Changed(Some(rdeps)) = node.mark_invalidated(version, None) + { + for dep in rdeps.into_iter() { + if queued.insert(dep) { + queue.push(dep); + } } } } @@ -531,16 +391,25 @@ pub(crate) enum ValueReusable { /// Directly compare the values for equality to determine if the node can be reused EqualityBased, /// Compare the value's version history to determine if the node can be reused - VersionBased(VersionRanges), + VersionBased(VersionNumber), } impl ValueReusable { - fn is_reusable(&self, new_value: &DiceValidValue, value: &OccupiedGraphNode) -> bool { + pub(crate) fn is_reusable( + &self, + new_value: &DiceValidValue, + new_deps: &SeriesParallelDeps, + value: &OccupiedGraphNode, + ) -> bool { match self { - ValueReusable::EqualityBased => new_value.equality(value.val()), - ValueReusable::VersionBased(hist) => !hist - .intersect(&value.metadata().hist.get_verified_ranges()) - .is_empty(), + ValueReusable::EqualityBased => { + if new_deps != &***value.deps() { + return false; + } + new_value.equality(value.val()) + } + // For version-based, the deps are guaranteed to match if `version` is in the node's verified versions. + ValueReusable::VersionBased(version) => value.is_verified_at(*version), } } } @@ -552,95 +421,13 @@ pub(crate) enum InvalidateKind { Update(DiceValidValue, StorageType), } -// because of rust mut lifetimes, we have to fixup the entries in our map after we drop the -// references to the nodes we modify. This forces us to express things via these enums -// that approximately acts as lambdas -enum MapFixup { - Reused { - since: VersionNumber, - key_of_e: VersionNumber, - }, - NewEntry { - since: VersionNumber, - end: Option, - new: OccupiedGraphNode, - key_of_e: VersionNumber, - num_to_keep: usize, - }, -} - -impl MapFixup { - fn fixup(self, versioned_map: &mut SortedVectorMap) -> bool { - match self { - MapFixup::Reused { since, key_of_e } => { - if since < key_of_e { - let old = versioned_map.remove(&key_of_e).unwrap(); - versioned_map.insert(since, old); - } - false - } - MapFixup::NewEntry { - since, - end, - new, - key_of_e, - num_to_keep, - } => { - match versioned_map.get(&key_of_e).unwrap() { - VersionedGraphNode::Occupied(occ) => { - if let Some(end) = end { - // if there is newer data, we also need to store that at a newer - // key to make it reachable. - // TODO(bobyf): we probably want a custom versioned map here to - // better represent this and reduce complexity - - if versioned_map.len() == num_to_keep { - // if we are already at max entries to store, then we should - // just skip doing this entirely, as the most up to date - // entry we will store will be the entry at "end", which - // is no different than the original entry. - // We also don't need to store rdeps since this node will be discarded - return true; - } - - // TODO change storage so that we don't clone - versioned_map.insert(end, VersionedGraphNode::Occupied(occ.clone())); - } - - if versioned_map.len() == num_to_keep { - let min_version_stored = *versioned_map.iter().next().expect("should be at least one entry if there is more entries than what we want to keep").0; - - if since < min_version_stored { - return true; - } - - versioned_map.remove(&min_version_stored); - } - - versioned_map.insert(since, VersionedGraphNode::Occupied(new)); - - true - } - VersionedGraphNode::Vacant(_) => { - // remove the vacant entry since we now have an actual entry - versioned_map.remove(&key_of_e); - versioned_map.insert(since, VersionedGraphNode::Occupied(new)); - - true - } - } - } - } - } -} - #[cfg(test)] pub(crate) mod testing { use gazebo::variants::VariantName; use crate::impls::core::graph::storage::VersionedGraphResult; - use crate::impls::core::graph::storage::VersionedGraphResultMismatch; + use crate::impls::core::graph::types::VersionedGraphResultMismatch; use crate::impls::value::DiceComputedValue; pub(crate) trait VersionedCacheResultAssertsExt { @@ -652,19 +439,40 @@ pub(crate) mod testing { } impl VersionedCacheResultAssertsExt for VersionedGraphResult { + #[track_caller] fn assert_compute(&self) { - self.unpack_compute() - .unwrap_or_else(|| panic!("expected Compute, but was {}", self.variant_name())) + match self.unpack_compute() { + Some(v) => v, + None => panic!( + "expected Compute, but was {} ({:?})", + self.variant_name(), + self + ), + } } + #[track_caller] fn assert_match(&self) -> &DiceComputedValue { - self.unpack_match() - .unwrap_or_else(|| panic!("expected Match, but was {}", self.variant_name())) + match self.unpack_match() { + Some(v) => v, + None => panic!( + "expected Match, but was {} ({:?})", + self.variant_name(), + self + ), + } } + #[track_caller] fn assert_check_deps(&self) -> &VersionedGraphResultMismatch { - self.unpack_check_deps() - .unwrap_or_else(|| panic!("expected Mismatch, but was {}", self.variant_name())) + match self.unpack_check_deps() { + Some(v) => v, + None => panic!( + "expected CheckDeps, but was {} ({:?})", + self.variant_name(), + self + ), + } } } } @@ -676,12 +484,12 @@ mod tests { use allocative::Allocative; use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; - use more_futures::cancellation::CancellationContext; - use sorted_vector_map::sorted_vector_set; use crate::api::computations::DiceComputations; + use crate::api::key::InvalidationSourcePriority; use crate::api::key::Key; use crate::arc::Arc; use crate::impls::core::graph::storage::testing::VersionedCacheResultAssertsExt; @@ -690,13 +498,12 @@ mod tests { use crate::impls::core::graph::storage::ValueReusable; use crate::impls::core::graph::storage::VersionedGraph; use crate::impls::core::graph::types::VersionedGraphKey; + use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::key::DiceKey; use crate::impls::value::DiceKeyValue; use crate::impls::value::DiceValidValue; - use crate::versions::testing::VersionRangesExt; + use crate::impls::value::TrackedInvalidationPaths; use crate::versions::VersionNumber; - use crate::versions::VersionRange; - use crate::versions::VersionRanges; #[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] struct K; @@ -718,12 +525,26 @@ mod tests { } } + fn inject(graph: &mut VersionedGraph, v: usize, key: DiceKey, value: usize) { + graph.invalidate( + VersionedGraphKey::new(VersionNumber::new(v), key), + InvalidateKind::Update( + DiceValidValue::testing_new(DiceKeyValue::::new(value)), + StorageType::Injected, + ), + InvalidationSourcePriority::Normal, + ); + } + #[test] fn latest_only_stores_latest_only() { let mut cache = VersionedGraph::new(); let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); - let key = VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }); + let dep_key = DiceKey { index: 1 }; + let key_at = |v| VersionedGraphKey::new(VersionNumber::new(v), DiceKey { index: 0 }); + let key = key_at(0); + inject(&mut cache, 0, dep_key, 100); // first, empty cache gives none cache.get(key).assert_compute(); @@ -733,8 +554,9 @@ mod tests { key.dupe(), res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); @@ -743,16 +565,17 @@ mod tests { let res2 = DiceValidValue::testing_new(DiceKeyValue::::new(200)); - let key2 = VersionedGraphKey::new(VersionNumber::new(2), DiceKey { index: 0 }); - assert!(cache.invalidate(key2.dupe(), InvalidateKind::Invalidate)); + let key2 = key_at(2); + inject(&mut cache, 2, dep_key, 200); assert!( cache .update( key2.dupe(), res2.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); @@ -766,36 +589,39 @@ mod tests { ); // old version is gone let entry = cache.get(key.dupe()); + + entry.assert_compute(); + + inject(&mut cache, 3, dep_key, 300); + + let entry = cache.get(key_at(3)); let mismatch = entry.assert_check_deps(); assert!(mismatch.entry.equality(&res2)); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![VersionRange::begins_with( - VersionNumber::new(2), - )]) - ); + assert_eq!(mismatch.prev_verified_version, VersionNumber::new(2)); // if the value is the same, then versions are shared let res3 = DiceValidValue::testing_new(DiceKeyValue::::new(200)); - let key3 = VersionedGraphKey::new(VersionNumber::new(5), DiceKey { index: 0 }); - let key4 = VersionedGraphKey::new(VersionNumber::new(4), DiceKey { index: 0 }); - assert!(cache.invalidate(key4.dupe(), InvalidateKind::Invalidate)); - assert!(cache.invalidate(key3.dupe(), InvalidateKind::Invalidate)); + let key4 = key_at(4); + let key5 = key_at(5); + + inject(&mut cache, 4, dep_key, 400); + inject(&mut cache, 5, dep_key, 500); assert!( !cache .update( - key3.dupe(), + key5.dupe(), res3, ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); assert!( cache - .get(key3.dupe(),) + .get(key5.dupe(),) .assert_match() .value() .equality(&res2) @@ -809,15 +635,12 @@ mod tests { ); // the first result is gone still let entry = cache.get(key.dupe()); + entry.assert_compute(); + + let entry = cache.get(key_at(3)); let mismatch = entry.assert_check_deps(); assert!(mismatch.entry.equality(&res2)); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(2), VersionNumber::new(4)), - VersionRange::begins_with(VersionNumber::new(5)) - ]) - ); + assert_eq!(mismatch.prev_verified_version, VersionNumber::new(2)); // smaller version numbers don't get cached let res4 = DiceValidValue::testing_new(DiceKeyValue::::new(400)); @@ -827,25 +650,20 @@ mod tests { key4.dupe(), res4, ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); let entry = cache.get(key4.dupe()); let mismatch = entry.assert_check_deps(); assert!(mismatch.entry.equality(&res2)); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(2), VersionNumber::new(4)), - VersionRange::begins_with(VersionNumber::new(5)) - ]) - ); + assert_eq!(mismatch.prev_verified_version, VersionNumber::new(2)); assert!( cache - .get(key3.dupe()) + .get(key5.dupe()) .assert_match() .value() .equality(&res2) @@ -859,15 +677,14 @@ mod tests { ); // the first result is gone still let entry = cache.get(key.dupe()); + entry.assert_compute(); + + // @3 still needs deps check + let entry = cache.get(key_at(3)); let mismatch = entry.assert_check_deps(); assert!(mismatch.entry.equality(&res2)); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(2), VersionNumber::new(4)), - VersionRange::begins_with(VersionNumber::new(5)) - ]) - ); + assert_eq!(mismatch.prev_verified_version, VersionNumber::new(2)); + // different key is miss cache .get(VersionedGraphKey::new( @@ -876,45 +693,38 @@ mod tests { )) .assert_compute(); - let key5 = VersionedGraphKey::new(VersionNumber::new(7), DiceKey { index: 0 }); - assert!(cache.invalidate(key5, InvalidateKind::ForceDirty)); - cache.get(key5.dupe()).assert_compute() + let key7 = VersionedGraphKey::new(VersionNumber::new(7), DiceKey { index: 0 }); + + cache.invalidate( + key7, + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, + ); + cache.get(key7.dupe()).assert_compute() } #[test] - fn last_n_max_usize_stores_everything() { + fn injected_keys_are_stored_indefinitely() { let mut cache = VersionedGraph::new(); let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); let key = VersionedGraphKey::new(VersionNumber::new(0), DiceKey { index: 0 }); - assert!( - cache - .update( - key, - res.dupe(), - ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(usize::MAX) - ) - .1 - ); + assert!(cache.invalidate( + key, + InvalidateKind::Update(res.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + )); assert!(cache.get(key.dupe()).assert_match().value().equality(&res)); let res2 = DiceValidValue::testing_new(DiceKeyValue::::new(200)); let key2 = VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }); - assert!(cache.invalidate(key2.dupe(), InvalidateKind::Invalidate)); - assert!( - cache - .update( - key2.dupe(), - res2.dupe(), - ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(usize::MAX) - ) - .1 - ); + + assert!(cache.invalidate( + key2, + InvalidateKind::Update(res2.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + )); assert!( cache @@ -929,18 +739,11 @@ mod tests { let res3 = DiceValidValue::testing_new(DiceKeyValue::::new(300)); let key3 = VersionedGraphKey::new(VersionNumber::new(5), DiceKey { index: 0 }); let key2 = VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }); - assert!(cache.invalidate(key3.dupe(), InvalidateKind::Invalidate)); - assert!( - cache - .update( - key3, - res3.dupe(), - ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(usize::MAX) - ) - .1 - ); + assert!(cache.invalidate( + key3, + InvalidateKind::Update(res3.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + )); assert!( cache @@ -980,243 +783,121 @@ mod tests { // different key is none let key6 = VersionedGraphKey::new(VersionNumber::new(6), DiceKey { index: 100 }); cache.get(key6.dupe()).assert_compute(); - - let key7 = VersionedGraphKey::new(VersionNumber::new(7), DiceKey { index: 0 }); - assert!(cache.invalidate(key7.dupe(), InvalidateKind::ForceDirty)); - cache.get(key7.dupe()).assert_compute() - } - - #[tokio::test] - async fn last_2_stores_last_2() { - let mut cache = VersionedGraph::new(); - let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); - let key = VersionedGraphKey::new(VersionNumber::new(0), DiceKey { index: 0 }); - - assert!( - cache - .update( - key.dupe(), - res.dupe(), - ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(2) - ) - .1 - ); - - assert!(cache.get(key.dupe()).assert_match().value().equality(&res)); - - let res2 = DiceValidValue::testing_new(DiceKeyValue::::new(200)); - let key2 = VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }); - assert!(cache.invalidate(key2.dupe(), InvalidateKind::Invalidate)); - assert!( - cache - .update( - key2.dupe(), - res2.dupe(), - ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(2) - ) - .1 - ); - - assert!( - cache - .get(key2.dupe()) - .assert_match() - .value() - .equality(&res2) - ); - assert!(cache.get(key.dupe()).assert_match().value().equality(&res)); - - // skip a few versions - let res3 = DiceValidValue::testing_new(DiceKeyValue::::new(300)); - let key3 = VersionedGraphKey::new(VersionNumber::new(5), DiceKey { index: 0 }); - let key2 = VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }); - assert!(cache.invalidate(key3.dupe(), InvalidateKind::Invalidate)); - assert!( - cache - .update( - key3.dupe(), - res3.dupe(), - ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(2) - ) - .1 - ); - - assert!( - cache - .get(key3.dupe()) - .assert_match() - .value() - .equality(&res3) - ); - assert!( - cache - .get(key2.dupe()) - .assert_match() - .value() - .equality(&res2) - ); - // the oldest entry should be evicted because we don't store more than 2 - let entry = cache.get(key.dupe()); - let mismatch = entry.assert_check_deps(); - assert!(mismatch.entry.equality(&res2)); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(5) - )]) - ) - } - - #[test] - fn test_dirty_for_persistent_storage() { - fn key(v: usize) -> VersionedGraphKey { - VersionedGraphKey::new(VersionNumber::new(v), DiceKey { index: 0 }) - } - - let mut cache = VersionedGraph::new(); - let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); - - let existing = cache.invalidate(key(0), InvalidateKind::Invalidate); - assert!(existing); - - cache.get(key(0).dupe()).assert_compute(); - cache.get(key(1).dupe()).assert_compute(); - - let existing = cache.invalidate(key(2), InvalidateKind::Invalidate); - assert!(existing); - - cache.get(key(0).dupe()).assert_compute(); - cache.get(key(1).dupe()).assert_compute(); - cache.get(key(2).dupe()).assert_compute(); - - cache.update( - key(0), - res.dupe(), - ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(usize::MAX), - ); - assert!( - cache - .get(key(0).dupe()) - .assert_match() - .value() - .equality(&res) - ); - assert!( - cache - .get(key(1).dupe()) - .assert_match() - .value() - .equality(&res) - ); - cache.get(key(2)).assert_check_deps(); } #[test] fn test_dirty_for_nonpersistent_storage() { - fn key(v: usize) -> VersionedGraphKey { + fn key_a(v: usize) -> VersionedGraphKey { VersionedGraphKey::new(VersionNumber::new(v), DiceKey { index: 1 }) } let mut cache = VersionedGraph::new(); let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); - let existing = cache.invalidate(key(0), InvalidateKind::Invalidate); + let existing = cache.invalidate( + key_a(0), + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, + ); assert!(existing); - cache.get(key(0).dupe()).assert_compute(); - cache.get(key(1).dupe()).assert_compute(); + cache.get(key_a(0).dupe()).assert_compute(); + cache.get(key_a(1).dupe()).assert_compute(); - let existing = cache.invalidate(key(2), InvalidateKind::Invalidate); + let existing = cache.invalidate( + key_a(2), + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, + ); assert!(existing); - cache.get(key(0).dupe()).assert_compute(); - cache.get(key(1).dupe()).assert_compute(); - cache.get(key(2).dupe()).assert_compute(); + cache.get(key_a(0).dupe()).assert_compute(); + cache.get(key_a(1).dupe()).assert_compute(); + cache.get(key_a(2).dupe()).assert_compute(); cache.update( - key(0), + key_a(0), res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::None), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); assert!( cache - .get(key(0).dupe()) + .get(key_a(0).dupe()) .assert_match() .value() .equality(&res) ); assert!( cache - .get(key(1).dupe()) + .get(key_a(1).dupe()) .assert_match() .value() .equality(&res) ); - cache.get(key(2).dupe()).assert_check_deps(); + cache.get(key_a(2).dupe()).assert_compute(); } #[test] fn reuse_inserts_into_cache() { - // This tests a very specific condition of resurrecting a value. - // Consider a node n at version v0 that was dirtied at v1, v2. - // It was evaluated at v1, resulting in a different value, but at v2, it results in the same - // value as v0. - // It is possible that we attempt to resurrect the entry from v0 and v2, which actually - // requires actually requires insertion of a new entry at v2, rather than simply marking - // v0 as reusable. + // This tests a very specific condition that is mostly irrelevant because + // the intent is to deal with storage having a transitive value at v2, but + // we don't store transients in storage anymore + // TODO(cjhopman): Does this test cover any important behavior that's not otherwise covered? let mut cache = VersionedGraph::new(); - let key1 = VersionedGraphKey::new(VersionNumber::new(0), DiceKey { index: 0 }); + let key1 = VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }); let res = DiceValidValue::testing_new(DiceKeyValue::::new(1)); + let dep_key = DiceKey { index: 1 }; + inject(&mut cache, 1, dep_key, 100); + let value = cache.update( key1, res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); - let key2 = VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }); + inject(&mut cache, 2, dep_key, 200); + + let key2 = VersionedGraphKey::new(VersionNumber::new(2), DiceKey { index: 0 }); let res2 = DiceValidValue::testing_new(DiceKeyValue::::new(2)); cache.update( key2, res2.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); - let key3 = VersionedGraphKey::new(VersionNumber::new(2), DiceKey { index: 0 }); + inject(&mut cache, 3, dep_key, 300); + + let key3 = VersionedGraphKey::new(VersionNumber::new(3), DiceKey { index: 0 }); let res3 = DiceValidValue::testing_new(DiceKeyValue::::new(1)); let value3 = cache.update( key3.dupe(), res3.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); // should have created a new entry because of key2 - #[allow(clippy::vtable_address_comparisons)] // this should be same exact ptr copy - let is_same_ptr = !std::sync::Arc::ptr_eq( + #[allow(ambiguous_wide_pointer_comparisons)] // this should be same exact ptr copy + let is_same_ptr = std::sync::Arc::ptr_eq( value.0.value().testing_value(), value3.0.value().testing_value(), ); - assert!(is_same_ptr); + assert!(!is_same_ptr); // should actually be cached though cache.get(key3).assert_match(); } @@ -1226,26 +907,32 @@ mod tests { let mut cache = VersionedGraph::new(); let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); - let key = VersionedGraphKey::new(VersionNumber::new(5), DiceKey { index: 0 }); + let dep_key = DiceKey { index: 1 }; + let key5 = VersionedGraphKey::new(VersionNumber::new(5), DiceKey { index: 0 }); + + for v in 0..10 { + inject(&mut cache, v, dep_key, v * 100); + } // first, empty cache gives none - cache.get(key.dupe()).assert_compute(); + cache.get(key5.dupe()).assert_compute(); assert!( cache .update( - key.dupe(), + key5.dupe(), res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) @@ -1253,29 +940,29 @@ mod tests { // now insert a new value of a older version, this shouldn't evict anything. let res2 = DiceValidValue::testing_new(DiceKeyValue::::new(200)); - let key2 = VersionedGraphKey::new(VersionNumber::new(4), DiceKey { index: 0 }); + let key4 = VersionedGraphKey::new(VersionNumber::new(4), DiceKey { index: 0 }); assert!( cache .update( - key2.dupe(), + key4.dupe(), res2.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); - cache.get(key2.dupe()).assert_check_deps(); + cache.get(key4.dupe()).assert_compute(); // the newer version should still be there assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) ); - // there should be size 1 - assert_eq!(cache.last_n.get(&DiceKey { index: 0 }).unwrap().len(), 1); + assert!(cache.nodes.contains_key(&DiceKey { index: 0 })); // now insert the same value of a older version, this shouldn't evict anything but reuses // the existing node. @@ -1286,15 +973,16 @@ mod tests { key3.dupe(), res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) @@ -1309,29 +997,30 @@ mod tests { // now insert the same value of a newer version, this shouldn't evict anything but reuses // the existing node. - let key4 = VersionedGraphKey::new(VersionNumber::new(6), DiceKey { index: 0 }); + let key6 = VersionedGraphKey::new(VersionNumber::new(6), DiceKey { index: 0 }); assert!( !cache .update( - key4.dupe(), + key6.dupe(), res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) ); assert!( cache - .get(key4.dupe()) + .get(key6.dupe()) .assert_match() .value() .instance_equal(&res) @@ -1342,82 +1031,90 @@ mod tests { fn update_prior_version_reuses_nodes_when_history_based() { let mut cache = VersionedGraph::new(); let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); + // We use a different value here because if something looks at equality we + // want it to look not equal, we want reusability to come entirely from VersionBased checks. + // This means that if we were to inspect the cache, the values might not make sense, but + // that's okay. let res_fake = DiceValidValue::testing_new(DiceKeyValue::::new(99999)); - let key = VersionedGraphKey::new(VersionNumber::new(5), DiceKey { index: 0 }); + let dep_key = DiceKey { index: 1 }; + for v in 0..10 { + inject(&mut cache, v, dep_key, v * 100); + } + + let key5 = VersionedGraphKey::new(VersionNumber::new(5), DiceKey { index: 0 }); // first, empty cache gives none - cache.get(key.dupe()).assert_compute(); + cache.get(key5.dupe()).assert_compute(); assert!( cache .update( - key.dupe(), + key5.dupe(), res.dupe(), - ValueReusable::VersionBased(VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(5), VersionNumber::new(6)) - ])), - Arc::new(vec![]), - StorageType::LastN(1) + // there's nothing in the cache to be reused. + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) ); - // now insert a new value of a older version, this shouldn't evict anything. - let key2 = VersionedGraphKey::new(VersionNumber::new(4), DiceKey { index: 0 }); + // now insert a new value of a older version, this shouldn't evict anything + // because Normal stores the most recent N by version number. + let key4 = VersionedGraphKey::new(VersionNumber::new(4), DiceKey { index: 0 }); assert!( cache .update( - key2.dupe(), + key4.dupe(), res_fake.dupe(), - ValueReusable::VersionBased(VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(1), VersionNumber::new(5)) - ])), - Arc::new(vec![]), - StorageType::LastN(1) + ValueReusable::VersionBased(VersionNumber(1)), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); - cache.get(key2.dupe()).assert_check_deps(); + cache.get(key4.dupe()).assert_compute(); // the newer version should still be there assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) ); // there should be size 1 - assert_eq!(cache.last_n.get(&DiceKey { index: 0 }).unwrap().len(), 1); + assert!(cache.nodes.contains_key(&DiceKey { index: 0 })); // now insert the same value of a older version, this shouldn't evict anything but reuses - // the existing node. + // the existing node and drops the res_fake value. let key3 = VersionedGraphKey::new(VersionNumber::new(3), DiceKey { index: 0 }); assert!( !cache .update( key3.dupe(), res_fake.dupe(), - ValueReusable::VersionBased(VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(5), VersionNumber::new(6)) - ])), - Arc::new(vec![]), - StorageType::LastN(1) + ValueReusable::VersionBased(VersionNumber::new(5)), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) @@ -1430,37 +1127,54 @@ mod tests { .instance_equal(&res) ); - // now insert the different value of a newer version, this shouldn't evict anything but reuses - // the existing node. - let key4 = VersionedGraphKey::new(VersionNumber::new(6), DiceKey { index: 0 }); + // now insert the different value at a newer version, but with VersionBased reusability. + // this shouldn't evict anything and should drop the res_fake value. + let key6 = VersionedGraphKey::new(VersionNumber::new(6), DiceKey { index: 0 }); assert!( !cache .update( - key4.dupe(), + key6.dupe(), res_fake.dupe(), - ValueReusable::VersionBased(VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(5), VersionNumber::new(6)) - ])), - Arc::new(vec![]), - StorageType::LastN(1) + ValueReusable::VersionBased(VersionNumber::new(5)), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ) .1 ); assert!( cache - .get(key.dupe()) + .get(key5.dupe()) .assert_match() .value() .instance_equal(&res) ); assert!( cache - .get(key4.dupe()) + .get(key6.dupe()) .assert_match() .value() .instance_equal(&res) ); + + // now insert a different value at a newer version, with Equality reusability. + // this should evict the old cached values. + let key7 = VersionedGraphKey::new(VersionNumber::new(7), DiceKey { index: 0 }); + assert!( + cache + .update( + key7.dupe(), + res_fake.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![dep_key])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ) + .1 + ); + + cache.get(key5.dupe()).assert_compute(); } #[test] @@ -1473,8 +1187,9 @@ mod tests { key, res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::None), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); let key1 = VersionedGraphKey::new(VersionNumber::new(0), DiceKey { index: 1 }); @@ -1482,8 +1197,11 @@ mod tests { key1, res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![DiceKey { index: 0 }]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![DiceKey { + index: 0, + }])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); let key2 = VersionedGraphKey::new(VersionNumber::new(0), DiceKey { index: 2 }); @@ -1491,13 +1209,17 @@ mod tests { key2, res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![DiceKey { index: 0 }]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![DiceKey { + index: 0, + }])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); assert!(cache.invalidate( VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }), - InvalidateKind::ForceDirty + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, )); assert_eq!( @@ -1508,7 +1230,9 @@ mod tests { )) .assert_check_deps() .deps_to_validate, - Arc::new(vec![DiceKey { index: 0 }]) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![DiceKey { + index: 0 + }])) ); assert_eq!( cache @@ -1518,7 +1242,9 @@ mod tests { )) .assert_check_deps() .deps_to_validate, - Arc::new(vec![DiceKey { index: 0 }]) + Arc::new(SeriesParallelDeps::serial_from_vec(vec![DiceKey { + index: 0 + }])) ); Ok(()) @@ -1534,13 +1260,15 @@ mod tests { key, res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::None), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); assert!(!cache.invalidate( VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }), - InvalidateKind::Update(res.dupe(), StorageType::LastN(1)) + InvalidateKind::Update(res.dupe(), StorageType::Normal), + InvalidationSourcePriority::Normal, )); let key = VersionedGraphKey::new(VersionNumber::new(2), DiceKey { index: 0 }); @@ -1548,18 +1276,306 @@ mod tests { key, res.dupe(), ValueReusable::EqualityBased, - Arc::new(vec![]), - StorageType::LastN(1), + Arc::new(SeriesParallelDeps::None), + StorageType::Normal, + TrackedInvalidationPaths::clean(), ); assert!(cache.invalidate( VersionedGraphKey::new(VersionNumber::new(1), DiceKey { index: 0 }), InvalidateKind::Update( DiceValidValue::testing_new(DiceKeyValue::::new(30)), - StorageType::LastN(1) - ) + StorageType::Normal + ), + InvalidationSourcePriority::Normal, )); Ok(()) } + + #[test] + fn check_that_we_handle_noncomputed_version_in_history_correctly() { + fn do_test() -> anyhow::Result<()> { + let mut cache = VersionedGraph::new(); + let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); + let res2 = DiceValidValue::testing_new(DiceKeyValue::::new(101)); + + let key_a = DiceKey { index: 0 }; + let key_b = DiceKey { index: 1 }; + + let key_a0 = VersionedGraphKey::new(VersionNumber::new(0), key_a); + let key_a1 = VersionedGraphKey::new(VersionNumber::new(1), key_a); + let key_a2 = VersionedGraphKey::new(VersionNumber::new(2), key_a); + + let key_b0 = VersionedGraphKey::new(VersionNumber::new(0), key_b); + let key_b1 = VersionedGraphKey::new(VersionNumber::new(1), key_b); + let key_b2 = VersionedGraphKey::new(VersionNumber::new(2), key_b); + + cache.invalidate( + key_a0, + InvalidateKind::Update(res.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + ); + cache.invalidate( + key_a1, + InvalidateKind::Update(res2.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + ); + cache.invalidate( + key_a2, + InvalidateKind::Update(res.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + ); + + cache.update( + key_b0, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_a])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + // deferred dirty propagation should have b invalidated at v1. + cache.get(key_b1).assert_check_deps(); + + cache.update( + key_b2, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_a])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + cache.get(key_b0).assert_match(); + cache.get(key_b1).assert_check_deps(); + cache.get(key_b2).assert_match(); + + // this last bit checks a specific optimization. we know that b is valid at v0 and v2, if + // we compute something at v0 that depends only on b, we should be able to reuse the computed value + // at v2 + let key_c = DiceKey { index: 2 }; + let key_c0 = VersionedGraphKey::new(VersionNumber::new(0), key_c); + let key_c1 = VersionedGraphKey::new(VersionNumber::new(1), key_c); + let key_c2 = VersionedGraphKey::new(VersionNumber::new(2), key_c); + + cache.update( + key_c0, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_b])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + cache.get(key_c0).assert_match(); + cache.get(key_c1).assert_check_deps(); + cache.get(key_c2).assert_match(); + + Ok(()) + } + do_test().unwrap() + } + + #[test] + fn check_that_force_dirty_cannot_be_used_for_deps_check_forward() -> anyhow::Result<()> { + let mut cache = VersionedGraph::new(); + let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); + + let key_a = DiceKey { index: 0 }; + let key_b = DiceKey { index: 1 }; + + let key_a0 = VersionedGraphKey::new(VersionNumber::new(0), key_a); + let key_a1 = VersionedGraphKey::new(VersionNumber::new(1), key_a); + let key_a2 = VersionedGraphKey::new(VersionNumber::new(2), key_a); + + let key_b0 = VersionedGraphKey::new(VersionNumber::new(0), key_b); + + // b is valid from 0->inf + cache.invalidate( + key_b0, + InvalidateKind::Update(res.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + ); + + cache.update( + key_a0, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_b])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + cache.invalidate( + key_a1, + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, + ); + + cache.get(key_a1).assert_compute(); + cache.get(key_a2).assert_compute(); + + Ok(()) + } + + #[test] + fn check_that_force_dirty_cannot_be_used_for_deps_check_backward() -> anyhow::Result<()> { + let mut cache = VersionedGraph::new(); + let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); + + let key_a = DiceKey { index: 0 }; + let key_b = DiceKey { index: 1 }; + + let key_a0 = VersionedGraphKey::new(VersionNumber::new(0), key_a); + let key_a1 = VersionedGraphKey::new(VersionNumber::new(1), key_a); + let key_a2 = VersionedGraphKey::new(VersionNumber::new(2), key_a); + let key_a3 = VersionedGraphKey::new(VersionNumber::new(3), key_a); + + let key_b0 = VersionedGraphKey::new(VersionNumber::new(0), key_b); + + // b is valid from 0->inf + cache.invalidate( + key_b0, + InvalidateKind::Update(res.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + ); + + cache.invalidate( + key_a2, + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, + ); + cache.update( + key_a3, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_b])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + cache.get(key_a0).assert_compute(); + cache.get(key_a1).assert_compute(); + + Ok(()) + } + + #[test] + fn check_that_valid_deps_across_force_dirty_dont_extend_valid_range_past_dirty() + -> anyhow::Result<()> { + let mut cache = VersionedGraph::new(); + let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); + + let key_a = DiceKey { index: 0 }; + let key_b = DiceKey { index: 1 }; + + let key_a0 = VersionedGraphKey::new(VersionNumber::new(0), key_a); + let key_a1 = VersionedGraphKey::new(VersionNumber::new(1), key_a); + let key_a2 = VersionedGraphKey::new(VersionNumber::new(2), key_a); + let key_a3 = VersionedGraphKey::new(VersionNumber::new(3), key_a); + + let key_b0 = VersionedGraphKey::new(VersionNumber::new(0), key_b); + + // b is valid from 0->inf + cache.invalidate( + key_b0, + InvalidateKind::Update(res.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + ); + + // a force-dirtied at v1 + cache.invalidate( + key_a1, + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, + ); + + // a computed at v2, since deps haven't changed it should be valid at v1 but due to force dirty not at v0 + cache.update( + key_a2, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_b])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + cache.get(key_a0).assert_compute(); + cache.get(key_a1).assert_match(); + + cache.update( + key_a3, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_b])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + cache.get(key_a0).assert_compute(); + cache.get(key_a1).assert_match(); + + Ok(()) + } + + #[test] + fn check_that_force_dirty_does_not_get_forgotten_after_later_computes() -> anyhow::Result<()> { + let mut cache = VersionedGraph::new(); + let res = DiceValidValue::testing_new(DiceKeyValue::::new(100)); + + let key_a = DiceKey { index: 0 }; + let key_b = DiceKey { index: 1 }; + + let key_a0 = VersionedGraphKey::new(VersionNumber::new(0), key_a); + let key_a1 = VersionedGraphKey::new(VersionNumber::new(1), key_a); + let key_a2 = VersionedGraphKey::new(VersionNumber::new(2), key_a); + + let key_b0 = VersionedGraphKey::new(VersionNumber::new(0), key_b); + + // b is valid from 0->inf + cache.invalidate( + key_b0, + InvalidateKind::Update(res.dupe(), StorageType::Injected), + InvalidationSourcePriority::Normal, + ); + + let key_a100 = VersionedGraphKey::new(VersionNumber::new(100), key_a); + + for i in 1..100 { + cache.invalidate( + VersionedGraphKey::new(VersionNumber(i), key_a), + InvalidateKind::ForceDirty, + InvalidationSourcePriority::Normal, + ); + } + + cache.update( + key_a100, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_b])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + cache.update( + key_a0, + res.dupe(), + ValueReusable::EqualityBased, + Arc::new(SeriesParallelDeps::serial_from_vec(vec![key_b])), + StorageType::Normal, + TrackedInvalidationPaths::clean(), + ); + + // There was a force-dirty at v1 (and v2, v3, ...), we should not be able to reuse the + // value at v0 regardless of deps. + cache.get(key_a0).assert_match(); + cache.get(key_a1).assert_compute(); + cache.get(key_a2).assert_compute(); + cache.get(key_a2).assert_compute(); + + Ok(()) + } } diff --git a/dice/dice/src/impls/core/graph/types.rs b/dice/dice/src/impls/core/graph/types.rs index 446286206a005..cd3b106960842 100644 --- a/dice/dice/src/impls/core/graph/types.rs +++ b/dice/dice/src/impls/core/graph/types.rs @@ -14,11 +14,11 @@ use gazebo::variants::UnpackVariants; use gazebo::variants::VariantName; use crate::arc::Arc; +use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::key::DiceKey; use crate::impls::value::DiceComputedValue; use crate::impls::value::DiceValidValue; use crate::versions::VersionNumber; -use crate::versions::VersionRanges; /// The Key for a Versioned, incremental computation #[derive(Copy, Clone, Dupe, Debug)] @@ -37,21 +37,27 @@ impl VersionedGraphKey { pub(crate) struct VersionedGraphResultMismatch { /// Last known value for the key. pub(crate) entry: DiceValidValue, - /// Versions at which the value for given key is valid. - pub(crate) verified_versions: VersionRanges, - pub(crate) deps_to_validate: Arc>, + /// Most recent previous version at which the last known value was valid. + pub(crate) prev_verified_version: VersionNumber, + pub(crate) deps_to_validate: Arc, } #[derive(Debug, VariantName, UnpackVariants)] pub(crate) enum VersionedGraphResult { - /// when the version cache has the exact matching entry via versions + /// the entry is present and valid at the requested version Match(DiceComputedValue), - /// when the version cache found an entry, but the versions were mismatching. The existing entry - /// is returned, along with the last known version + /// the entry at the requested version has been invalidated and + /// we have a previous value with deps to possibly resurrect CheckDeps(VersionedGraphResultMismatch), - /// An entry that is known to require re-evaluation because it was marked as dirty at the - /// requested version or that it was missing + /// the entry is missing or there's no previously valid value to check Compute, + /// the storage has rejected the request + Rejected(RejectedReason), +} + +#[derive(Debug)] +pub(crate) enum RejectedReason { + RejectedDueToGraphClear, } #[cfg(test)] @@ -61,6 +67,7 @@ pub(crate) mod testing { use crate::impls::core::graph::types::VersionedGraphResultMismatch; use crate::impls::value::DiceComputedValue; + #[allow(dead_code)] pub(crate) trait VersionedCacheResultAssertsExt { fn assert_compute(&self); diff --git a/dice/dice/src/impls/core/internals.rs b/dice/dice/src/impls/core/internals.rs index 35712bcca1e75..cc5e83431445e 100644 --- a/dice/dice/src/impls/core/internals.rs +++ b/dice/dice/src/impls/core/internals.rs @@ -7,8 +7,12 @@ * of this source tree. */ +use std::thread; + use gazebo::prelude::SliceExt; +use super::graph::types::RejectedReason; +use crate::api::key::InvalidationSourcePriority; use crate::api::storage_type::StorageType; use crate::arc::Arc; use crate::impls::cache::SharedCache; @@ -21,18 +25,21 @@ use crate::impls::core::graph::types::VersionedGraphResult; use crate::impls::core::versions::introspection::VersionIntrospectable; use crate::impls::core::versions::VersionEpoch; use crate::impls::core::versions::VersionTracker; +use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::key::DiceKey; use crate::impls::task::dice::DiceTask; use crate::impls::task::dice::TerminationObserver; use crate::impls::transaction::ChangeType; use crate::impls::value::DiceComputedValue; use crate::impls::value::DiceValidValue; +use crate::impls::value::TrackedInvalidationPaths; use crate::metrics::Metrics; use crate::result::CancellableResult; -use crate::result::Cancelled; +use crate::result::CancellationReason; use crate::versions::VersionNumber; /// Core state of DICE, holding the actual graph and version information +#[derive(allocative::Allocative)] pub(super) struct CoreState { version_tracker: VersionTracker, graph: VersionedGraph, @@ -50,13 +57,13 @@ impl CoreState { pub(super) fn update_state( &mut self, - updates: impl IntoIterator, + updates: impl IntoIterator, ) -> VersionNumber { let version_update = self.version_tracker.write(); let v = version_update.version(); let mut changes_recorded = false; - for (key, change) in updates { + for (key, change, invalidation_priority) in updates { changes_recorded |= self.graph.invalidate( VersionedGraphKey::new(v, key), match change { @@ -65,6 +72,7 @@ impl CoreState { #[cfg(test)] ChangeType::TestingSoftDirty => InvalidateKind::Invalidate, }, + invalidation_priority, ); } if changes_recorded { @@ -92,7 +100,11 @@ impl CoreState { } pub(super) fn lookup_key(&mut self, key: VersionedGraphKey) -> VersionedGraphResult { - self.graph.get(key) + if self.version_tracker.should_reject(key.v) { + VersionedGraphResult::Rejected(RejectedReason::RejectedDueToGraphClear) + } else { + self.graph.get(key) + } } pub(super) fn update_computed( @@ -102,16 +114,18 @@ impl CoreState { storage: StorageType, value: DiceValidValue, reusability: ValueReusable, - deps: Arc>, + deps: Arc, + invalidation_paths: TrackedInvalidationPaths, ) -> CancellableResult { if self.version_tracker.is_relevant(key.v, epoch) { debug!(msg = "update graph entry", k = ?key.k, v = %key.v, v_epoch = %epoch); - - Ok(self.graph.update(key, value, reusability, deps, storage).0) + Ok(self + .graph + .update(key, value, reusability, deps, storage, invalidation_paths) + .0) } else { debug!(msg = "update is rejected due to outdated epoch", k = ?key.k, v = %key.v, v_epoch = %epoch); - - Err(Cancelled) + Err(CancellationReason::OutdatedEpoch) } } @@ -124,12 +138,15 @@ impl CoreState { } pub(super) fn unstable_drop_everything(&mut self) { - self.version_tracker.write().commit(); + self.version_tracker.clear(); // Do the actual drop on a different thread because we may have to drop a lot of stuff // here. - let map = std::mem::take(&mut self.graph.last_n); - std::thread::spawn(move || drop(map)); + let map = std::mem::take(&mut self.graph.nodes); + thread::Builder::new() + .name("dice-drop-everything".to_owned()) + .spawn(move || drop(map)) + .expect("failed to spawn thread"); } pub(super) fn metrics(&self) -> Metrics { @@ -143,7 +160,7 @@ impl CoreState { } Metrics { - key_count: self.graph.last_n.len(), + key_count: self.graph.nodes.len(), currently_active_key_count: currently_running_key_count, active_transaction_count: active_transaction_count as u32, // probably won't support more than u32 transactions } @@ -163,18 +180,18 @@ mod tests { use allocative::Allocative; use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; + use buck2_futures::spawner::TokioSpawner; use derive_more::Display; use dupe::Dupe; use futures::FutureExt; - use more_futures::cancellation::CancellationContext; - use more_futures::spawner::TokioSpawner; use tokio::sync::Semaphore; use crate::api::computations::DiceComputations; + use crate::api::key::InvalidationSourcePriority; use crate::api::key::Key; use crate::arc::Arc; use crate::impls::cache::DiceTaskRef; - use crate::impls::core::graph::history::CellHistory; use crate::impls::core::internals::CoreState; use crate::impls::key::DiceKey; use crate::impls::key::ParentKey; @@ -185,19 +202,29 @@ mod tests { use crate::impls::value::DiceKeyValue; use crate::impls::value::DiceValidValue; use crate::impls::value::MaybeValidDiceValue; + use crate::impls::value::TrackedInvalidationPaths; use crate::versions::VersionNumber; + use crate::versions::VersionRanges; #[test] fn update_state_gets_next_version() { let mut core = CoreState::new(); assert_eq!( - core.update_state([(DiceKey { index: 0 }, ChangeType::Invalidate)]), + core.update_state([( + DiceKey { index: 0 }, + ChangeType::Invalidate, + InvalidationSourcePriority::Normal + )]), VersionNumber::new(1) ); assert_eq!( - core.update_state([(DiceKey { index: 1 }, ChangeType::Invalidate)]), + core.update_state([( + DiceKey { index: 1 }, + ChangeType::Invalidate, + InvalidationSourcePriority::Normal + )]), VersionNumber::new(2) ); } @@ -235,7 +262,8 @@ mod tests { MaybeValidDiceValue::valid(DiceValidValue::testing_new( DiceKeyValue::::new(val), )), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )); Box::new(()) as Box diff --git a/dice/dice/src/impls/core/processor.rs b/dice/dice/src/impls/core/processor.rs index 2e3927257c1e3..62546f2f9ced0 100644 --- a/dice/dice/src/impls/core/processor.rs +++ b/dice/dice/src/impls/core/processor.rs @@ -48,7 +48,7 @@ impl StateProcessor { debug!("Processor terminated"); } - #[instrument(skip_all, fields(kind = %message.variant_name()))] + #[cfg_attr(debug_assertions, instrument(skip_all, fields(kind = %message.variant_name())))] fn iteration(&mut self, message: StateRequest) { match message { StateRequest::UpdateState { changes, resp } => { @@ -77,6 +77,7 @@ impl StateProcessor { storage, value, deps, + invalidation_paths, resp, .. } => { @@ -88,6 +89,7 @@ impl StateProcessor { value, ValueReusable::EqualityBased, deps, + invalidation_paths, ))); } StateRequest::UpdateMismatchAsUnchanged { @@ -95,6 +97,7 @@ impl StateProcessor { epoch, storage, previous, + invalidation_paths, resp, .. } => { @@ -104,8 +107,9 @@ impl StateProcessor { epoch, storage, previous.entry, - ValueReusable::VersionBased(previous.verified_versions), + ValueReusable::VersionBased(previous.prev_verified_version), previous.deps_to_validate, + invalidation_paths, ))); } StateRequest::GetTasksPendingCancellation { resp } => { @@ -118,6 +122,19 @@ impl StateProcessor { StateRequest::Introspection { resp } => { let _ignored = resp.send(self.state.introspection()); } + StateRequest::MakeAvailableForAllocative { resp } => { + use std::sync::Arc; + + let (complete_tx, complete_rx) = tokio::sync::oneshot::channel(); + let state = std::mem::replace(&mut self.state, CoreState::new()); + let arc_state = Arc::new(state); + drop(resp.send((Arc::clone(&arc_state), complete_tx))); + drop(complete_rx.blocking_recv()); + // Correctness: Contract on `MakeAvailableForAllocative` + let state = + Arc::into_inner(arc_state).expect("Other references to have been dropped"); + self.state = state; + } } } } diff --git a/dice/dice/src/impls/core/state.rs b/dice/dice/src/impls/core/state.rs index f7d37b3983316..e9547ffb3135c 100644 --- a/dice/dice/src/impls/core/state.rs +++ b/dice/dice/src/impls/core/state.rs @@ -10,37 +10,220 @@ use allocative::Allocative; use derivative::Derivative; use dupe::Dupe; +use futures::Future; use gazebo::variants::VariantName; +use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::oneshot::Receiver; use tokio::sync::oneshot::Sender; +use tokio::sync::oneshot::{self}; +use crate::api::key::InvalidationSourcePriority; use crate::api::storage_type::StorageType; use crate::arc::Arc; use crate::impls::core::graph::introspection::VersionedGraphIntrospectable; use crate::impls::core::graph::types::VersionedGraphKey; use crate::impls::core::graph::types::VersionedGraphResult; use crate::impls::core::graph::types::VersionedGraphResultMismatch; +use crate::impls::core::internals::CoreState; use crate::impls::core::processor::StateProcessor; use crate::impls::core::versions::introspection::VersionIntrospectable; use crate::impls::core::versions::VersionEpoch; use crate::impls::ctx::SharedLiveTransactionCtx; +use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::key::DiceKey; use crate::impls::task::dice::TerminationObserver; use crate::impls::transaction::ActiveTransactionGuard; use crate::impls::transaction::ChangeType; use crate::impls::value::DiceComputedValue; use crate::impls::value::DiceValidValue; +use crate::impls::value::TrackedInvalidationPaths; use crate::metrics::Metrics; use crate::result::CancellableResult; use crate::versions::VersionNumber; +/// A handle to the core state that allows sending requests +#[derive(Clone)] +pub(crate) struct CoreStateHandle { + tx: UnboundedSender, + // should this handle hold onto the thread and terminate it when all of Dice is dropped? +} + +impl CoreStateHandle { + pub(super) fn new(tx: UnboundedSender) -> Self { + Self { tx } + } + + fn request(&self, message: StateRequest) { + self.tx.send(message).expect("dice runner died"); + } + + fn call(&self, message: StateRequest, recv: Receiver) -> impl Future { + self.request(message); + futures::FutureExt::map(recv, |v| v.unwrap()) + } + + /// Updates the core state with the given set of changes. The new VersionNumber is returned + pub(crate) fn update_state( + &self, + changes: Vec<(DiceKey, ChangeType, InvalidationSourcePriority)>, + ) -> impl Future { + let (resp, recv) = oneshot::channel(); + self.call(StateRequest::UpdateState { changes, resp }, recv) + } + + /// Gets the current version number + pub(crate) fn current_version(&self) -> impl Future { + let (resp, recv) = oneshot::channel(); + self.call(StateRequest::CurrentVersion { resp }, recv) + } + + /// Obtains the shared state ctx at the given version + pub(crate) fn ctx_at_version( + &self, + version: VersionNumber, + guard: ActiveTransactionGuard, + ) -> impl Future { + let (resp, recv) = oneshot::channel(); + self.call( + StateRequest::CtxAtVersion { + version, + guard, + resp, + }, + recv, + ) + } + + /// Report that a computation context at a version has been dropped + pub(crate) fn drop_ctx_at_version(&self, version: VersionNumber) { + self.request(StateRequest::DropCtxAtVersion { version }) + } + + /// Lookup the state of a key + pub(crate) fn lookup_key( + &self, + key: VersionedGraphKey, + ) -> impl Future { + let (resp, recv) = oneshot::channel(); + self.call(StateRequest::LookupKey { key, resp }, recv) + } + + /// Report that a value has been computed + pub(crate) fn update_computed( + &self, + key: VersionedGraphKey, + epoch: VersionEpoch, + storage: StorageType, + value: DiceValidValue, + deps: Arc, + invalidation_paths: TrackedInvalidationPaths, + ) -> impl Future> { + let (resp, recv) = oneshot::channel(); + self.call( + StateRequest::UpdateComputed { + key, + epoch, + storage, + value, + deps, + invalidation_paths, + resp, + }, + recv, + ) + } + + /// Report that a value has been verified to be unchanged due to its deps + pub(crate) fn update_mismatch_as_unchanged( + &self, + key: VersionedGraphKey, + epoch: VersionEpoch, + storage: StorageType, + previous: VersionedGraphResultMismatch, + invalidation_paths: TrackedInvalidationPaths, + ) -> impl Future> { + let (resp, recv) = oneshot::channel(); + self.call( + StateRequest::UpdateMismatchAsUnchanged { + key, + epoch, + storage, + previous, + resp, + invalidation_paths, + }, + recv, + ) + } + + /// Get all the tasks pending cancellation + pub(crate) fn get_tasks_pending_cancellation( + &self, + ) -> impl Future> { + let (resp, recv) = oneshot::channel(); + self.call(StateRequest::GetTasksPendingCancellation { resp }, recv) + } + + /// For unstable take + pub(crate) fn unstable_drop_everything(&self) { + self.request(StateRequest::UnstableDropEverything) + } + + /// Collect metrics + pub(crate) fn metrics(&self) -> Metrics { + let (resp, recv) = oneshot::channel(); + self.request(StateRequest::Metrics { resp }); + + // Modern dice can just run on a blocking runtime and block waiting for the channel. + // This is safe since the processing dice thread is dedicated, and never awaits any other tasks. + tokio::task::block_in_place(|| recv.blocking_recv().unwrap()) + } + + /// Collects the introspectable dice state + pub(crate) fn introspection(&self) -> (VersionedGraphIntrospectable, VersionIntrospectable) { + let (resp, recv) = oneshot::channel(); + + self.request(StateRequest::Introspection { resp }); + + // Modern dice can just run on a blocking runtime and block waiting for the channel. + // This is safe since the processing dice thread is dedicated, and never awaits any other tasks. + tokio::task::block_in_place(|| recv.blocking_recv().unwrap()) + } +} + +impl Allocative for CoreStateHandle { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut allocative::Visitor<'b>) { + let mut visitor = visitor.enter_self_sized::(); + + let (resp, recv) = oneshot::channel(); + self.request(StateRequest::MakeAvailableForAllocative { resp }); + + let (state, complete_tx) = tokio::task::block_in_place(|| recv.blocking_recv().unwrap()); + + // FIXME(JakobDegen): Ideally we'd correctly report the fact that this is shared, but there's + // no easy identifier to use + Allocative::visit(&state, &mut visitor); + + drop(state); + drop(complete_tx); + } +} + +impl Dupe for CoreStateHandle {} + +/// Start processing state +pub(crate) fn init_state() -> CoreStateHandle { + StateProcessor::spawn() +} + /// Core state is accessed via message passing to a single threaded processor #[derive(Derivative, VariantName)] #[derivative(Debug)] -pub(crate) enum StateRequest { +pub(super) enum StateRequest { /// Updates the core state with the given set of changes. The new VersionNumber that should be /// used is sent back via the channel provided UpdateState { - changes: Vec<(DiceKey, ChangeType)>, + changes: Vec<(DiceKey, ChangeType, InvalidationSourcePriority)>, resp: Sender, }, /// Gets the current version number @@ -67,7 +250,8 @@ pub(crate) enum StateRequest { /// The newly computed value value: DiceValidValue, /// The deps accessed during the computation of newly computed value - deps: Arc>, + deps: Arc, + invalidation_paths: TrackedInvalidationPaths, /// Response of the new value to use. This could be a different instance that is `Eq` to the /// given computed value if the state already stores an instance of value that is equal. resp: Sender>, @@ -80,6 +264,7 @@ pub(crate) enum StateRequest { storage: StorageType, /// The previous value sent for verification previous: VersionedGraphResultMismatch, + invalidation_paths: TrackedInvalidationPaths, /// Response of the new value to use. This could be a different instance that is `Eq` to the /// given computed value if the state already stores an instance of value that is equal. resp: Sender>, @@ -98,29 +283,14 @@ pub(crate) enum StateRequest { #[derivative(Debug = "ignore")] resp: Sender<(VersionedGraphIntrospectable, VersionIntrospectable)>, }, -} - -/// A handle to the core state that allows sending requests -#[derive(Allocative, Clone)] -pub(crate) struct CoreStateHandle { - #[allocative(skip)] - tx: tokio::sync::mpsc::UnboundedSender, - // should this handle hold onto the thread and terminate it when all of Dice is dropped? -} - -impl CoreStateHandle { - pub(crate) fn new(tx: tokio::sync::mpsc::UnboundedSender) -> Self { - Self { tx } - } - - pub(crate) fn request(&self, message: StateRequest) { - self.tx.send(message).expect("dice runner died"); - } -} - -impl Dupe for CoreStateHandle {} - -/// Start processing state -pub(crate) fn init_state() -> CoreStateHandle { - StateProcessor::spawn() + /// Makes the dice state available temporarily to be able to run allocative + /// + /// Although the `CoreState` is in an `Arc`, this is only to convince the compiler that this is + /// safe, and it should actually be understood as being a `&'a CoreState`, where `'a` is the + /// lifetime that starts when the response is sent, and ends when the provided sender is + /// dropped. Failing to drop all references to the `Arc` by then will cause a panic. + MakeAvailableForAllocative { + #[derivative(Debug = "ignore")] + resp: Sender<(std::sync::Arc, Sender)>, + }, } diff --git a/dice/dice/src/impls/core/versions.rs b/dice/dice/src/impls/core/versions.rs index c88a81771da8a..cad9d43c22ce0 100644 --- a/dice/dice/src/impls/core/versions.rs +++ b/dice/dice/src/impls/core/versions.rs @@ -21,6 +21,7 @@ use crate::HashMap; #[derive(Allocative)] pub(crate) struct VersionTracker { current: VersionNumber, + invalid_before: VersionNumber, /// Tracks the currently active versions and how many contexts are holding each of them. active_versions: HashMap, epoch_tracker: VersionEpochTracker, @@ -45,7 +46,7 @@ impl VersionEpochTracker { } #[derive(Copy, Clone, Eq, Debug, Display, Dupe, PartialEq, Allocative)] -#[display(fmt = "v{}", "_0")] +#[display("v{}", _0)] pub(crate) struct VersionEpoch(usize); impl VersionEpoch { @@ -68,6 +69,7 @@ impl VersionTracker { pub(crate) fn new() -> Self { VersionTracker { current: VersionNumber::ZERO, + invalid_before: VersionNumber::ZERO, active_versions: HashMap::default(), epoch_tracker: VersionEpochTracker::new(), } @@ -110,9 +112,15 @@ impl VersionTracker { /// check if the given version and epoch are "relevant", that is the current active version's /// epoch matches the given epoch pub(crate) fn is_relevant(&self, v: VersionNumber, epoch: VersionEpoch) -> bool { - self.active_versions - .get(&v) - .map_or(false, |active| active.version_epoch == epoch) + v >= self.invalid_before + && self + .active_versions + .get(&v) + .map_or(false, |active| active.version_epoch == epoch) + } + + pub(crate) fn should_reject(&self, v: VersionNumber) -> bool { + v < self.invalid_before } /// Drops reference to a VersionNumber given the token @@ -146,6 +154,11 @@ impl VersionTracker { pub(crate) fn write(&mut self) -> VersionForWrites { VersionForWrites { tracker: self } } + + pub(crate) fn clear(&mut self) { + self.current.inc(); + self.invalid_before = self.current; + } } pub(crate) struct VersionForWrites<'a> { @@ -186,6 +199,7 @@ pub(crate) mod introspection { ); impl VersionIntrospectable { + #[allow(dead_code)] pub(crate) fn versions_currently_running(&self) -> Vec { self.0.iter().map(|(v, _)| VersionNumber(*v)).collect() } @@ -207,19 +221,6 @@ pub(crate) mod introspection { }) .collect() } - - pub(crate) fn currently_running_key_count(&self) -> usize { - self.0 - .iter() - .flat_map(|(_, cache)| { - cache.iter().filter(|(_, state)| match state { - DiceTaskStateForDebugging::AsyncInProgress => true, - DiceTaskStateForDebugging::SyncInProgress => true, - _ => false, - }) - }) - .count() - } } impl VersionTracker { diff --git a/dice/dice/src/impls/ctx.rs b/dice/dice/src/impls/ctx.rs index 528f3665438dc..d807a933cb9d5 100644 --- a/dice/dice/src/impls/ctx.rs +++ b/dice/dice/src/impls/ctx.rs @@ -10,40 +10,40 @@ use std::any::Any; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; +use std::ops::DerefMut; use std::sync::Arc; use allocative::Allocative; +use buck2_futures::owning_future::OwningFuture; use derivative::Derivative; use dupe::Dupe; use futures::future::BoxFuture; -use futures::future::Either; use futures::FutureExt; use futures::TryFutureExt; -use gazebo::variants::UnpackVariants; -use more_futures::owning_future::OwningFuture; +use itertools::Either; use parking_lot::Mutex; -use parking_lot::MutexGuard; +use typed_arena::Arena; use crate::api::activation_tracker::ActivationData; use crate::api::computations::DiceComputations; -use crate::api::computations::DiceComputationsParallel; use crate::api::data::DiceData; use crate::api::error::DiceResult; +use crate::api::invalidation_tracking::DiceKeyTrackedInvalidationPaths; use crate::api::key::Key; use crate::api::projection::ProjectionKey; use crate::api::user_data::UserComputationData; use crate::ctx::DiceComputationsImpl; +use crate::ctx::LinearRecomputeDiceComputationsImpl; use crate::impls::cache::DiceTaskRef; use crate::impls::cache::SharedCache; use crate::impls::core::state::CoreStateHandle; use crate::impls::core::versions::VersionEpoch; -use crate::impls::dep_trackers::RecordingDepsTracker; +use crate::impls::deps::RecordedDeps; +use crate::impls::deps::RecordingDepsTracker; use crate::impls::dice::DiceModern; use crate::impls::evaluator::AsyncEvaluator; use crate::impls::evaluator::SyncEvaluator; use crate::impls::events::DiceEventDispatcher; -use crate::impls::incremental::IncrementalEngine; use crate::impls::key::CowDiceKeyHashed; use crate::impls::key::DiceKey; use crate::impls::key::ParentKey; @@ -57,17 +57,16 @@ use crate::impls::transaction::TransactionUpdater; use crate::impls::user_cycle::KeyComputingUserCycleDetectorData; use crate::impls::user_cycle::UserCycleDetectorData; use crate::impls::value::DiceComputedValue; -use crate::impls::value::DiceValidity; -use crate::impls::value::MaybeValidDiceValue; -use crate::owned::Owned; -use crate::owned::Ref; +use crate::impls::value::TrackedInvalidationPaths; +use crate::impls::worker::project_for_key; +use crate::impls::worker::DiceTaskWorker; use crate::result::CancellableResult; -use crate::result::Cancelled; +use crate::result::CancellationReason; use crate::transaction_update::DiceTransactionUpdaterImpl; use crate::versions::VersionNumber; use crate::DiceError; use crate::DiceTransactionUpdater; -use crate::HashSet; +use crate::LinearRecomputeDiceComputations; use crate::UserCycleDetectorGuard; /// Context that is the base for which all requests start from @@ -75,22 +74,16 @@ use crate::UserCycleDetectorGuard; pub(crate) struct BaseComputeCtx { // we need to give off references of `DiceComputation` so hold this for now, but really once we // get rid of the enum, we just hold onto the base data directly and do some ref casts - data: DiceComputations, + data: DiceComputations<'static>, live_version_guard: ActiveTransactionGuard, } impl Clone for BaseComputeCtx { fn clone(&self) -> Self { - Self { - data: match &self.data.0 { - DiceComputationsImpl::Legacy(_) => { - unreachable!("wrong dice") - } - DiceComputationsImpl::Modern(ctx) => { - DiceComputations(DiceComputationsImpl::Modern(ctx.clone_for_base())) - } - }, - live_version_guard: self.live_version_guard.dupe(), + match &self.data.0 { + DiceComputationsImpl::Modern(modern) => { + BaseComputeCtx::clone_for(modern, self.live_version_guard.dupe()) + } } } } @@ -105,14 +98,28 @@ impl BaseComputeCtx { live_version_guard: ActiveTransactionGuard, ) -> Self { Self { - data: DiceComputations(DiceComputationsImpl::Modern(ModernComputeCtx::Regular( - PerComputeCtx::new( - ParentKey::None, + data: DiceComputations(DiceComputationsImpl::Modern(ModernComputeCtx::new( + ParentKey::None, + KeyComputingUserCycleDetectorData::Untracked, + AsyncEvaluator { per_live_version_ctx, user_data, dice, - KeyComputingUserCycleDetectorData::Untracked, - ), + }, + ))), + live_version_guard, + } + } + + fn clone_for( + modern: &ModernComputeCtx<'_>, + live_version_guard: ActiveTransactionGuard, + ) -> BaseComputeCtx { + Self { + data: DiceComputations(DiceComputationsImpl::Modern(ModernComputeCtx::new( + ParentKey::None, + KeyComputingUserCycleDetectorData::Untracked, + modern.ctx_data().async_evaluator.clone(), ))), live_version_guard, } @@ -124,62 +131,54 @@ impl BaseComputeCtx { pub(crate) fn into_updater(self) -> DiceTransactionUpdater { DiceTransactionUpdater(match self.data.0 { - DiceComputationsImpl::Legacy(_) => unreachable!("modern dice"), DiceComputationsImpl::Modern(delegate) => { - DiceTransactionUpdaterImpl::Modern(match delegate { - ModernComputeCtx::Regular(ctx) => ctx.into_updater(), - ModernComputeCtx::Parallel(_) => { - unreachable!("base context can never hold any but the regular context") - } - }) + DiceTransactionUpdaterImpl::Modern(delegate.into_updater()) } }) } - pub(crate) fn as_computations(&self) -> &DiceComputations { + pub(crate) fn as_computations(&self) -> &DiceComputations<'static> { &self.data } - pub(crate) fn as_computations_mut(&mut self) -> &mut DiceComputations { + pub(crate) fn as_computations_mut(&mut self) -> &mut DiceComputations<'static> { &mut self.data } } impl Deref for BaseComputeCtx { - type Target = ModernComputeCtx; + type Target = ModernComputeCtx<'static>; fn deref(&self) -> &Self::Target { match &self.data.0 { - DiceComputationsImpl::Legacy(_) => { - unreachable!("legacy dice instead of modern") - } DiceComputationsImpl::Modern(ctx) => ctx, } } } -/// Context that is available from the `DiceComputation`s for modern dice calculations -#[derive(Allocative, UnpackVariants)] -pub(crate) enum ModernComputeCtx { - /// The standard context given to a Key - Regular(PerComputeCtx), - /// The context when we are in the lambdas of a `compute_many` of a Key - Parallel(PerParallelComputeCtx), +impl DerefMut for BaseComputeCtx { + fn deref_mut(&mut self) -> &mut Self::Target { + match &mut self.data.0 { + DiceComputationsImpl::Modern(ctx) => ctx, + } + } } -impl ModernComputeCtx { +impl<'d> ModernComputeCtx<'d> { /// Gets all the result of of the given computation key. /// recorded as dependencies of the current computation for which this /// context is for. pub(crate) fn compute<'a, K>( - &'a self, + &'a mut self, key: &K, ) -> impl Future::Value>> + 'a where K: Key, + Self: 'a, { - self.compute_opaque(key) - .map(|r| r.map(|opaque| opaque.into_value())) + let (ctx_data, dep_trackers) = self.unpack(); + Self::compute_opaque_impl(ctx_data, key) + .map(move |r| r.map(|opaque| Self::opaque_into_value_impl(dep_trackers, opaque))) } /// Compute "opaque" value where the value is only accessible via projections. @@ -193,155 +192,245 @@ impl ModernComputeCtx { where K: Key, { - match self { - ModernComputeCtx::Regular(ctx) => ctx.compute_opaque(key).left_future(), - ModernComputeCtx::Parallel(ctx) => ctx.compute_opaque(key).right_future(), - } - .map(move |cancellable_result| { + Self::compute_opaque_impl(self.ctx_data(), key) + } + + fn compute_opaque_impl<'a, K>( + ctx_data: &CoreCtx, + key: &K, + ) -> impl Future>> + 'a + where + K: Key, + { + ctx_data.compute_opaque(key).map(move |cancellable_result| { let cancellable = cancellable_result.map(move |(dice_key, dice_value)| { - OpaqueValueModern::new(self, dice_key, dice_value.value().dupe()) + let (value, invalidation_paths) = dice_value.into_parts(); + OpaqueValueModern::new(dice_key, value, invalidation_paths) }); - cancellable.map_err(|_| DiceError::cancelled()) + cancellable.map_err(DiceError::cancelled) }) } - /// Compute many tasks that can be ran in parallel without depending on each other + /// Computes all the given tasks in parallel, returning an unordered Stream pub(crate) fn compute_many<'a, T: 'a>( - &'a self, + &'a mut self, computes: impl IntoIterator< - Item = impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, + Item = impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, >, - ) -> Vec< - Either< - Pin>>>, - impl Future + 'a, - >, - > { - match self { - ModernComputeCtx::Regular(ctx) => ctx - .compute_many(computes) - .map(|f| f.left_future().right_future()) - .collect(), - ModernComputeCtx::Parallel(ctx) => ctx - .compute_many(computes) - .map(|f| f.right_future().right_future()) - .collect(), - } + ) -> Vec + 'a> { + let iter = computes.into_iter(); + let parallel = self.parallel_builder(iter.size_hint().0); + iter.map(|func| parallel.compute(func)).collect() } pub(crate) fn compute2<'a, T: 'a, U: 'a>( - &'a self, - compute1: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, - compute2: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, U> + Send, + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, U> + Send, ) -> (impl Future + 'a, impl Future + 'a) { - match self { - ModernComputeCtx::Regular(ctx) => { - let (f1, f2) = ctx.compute2(compute1, compute2); - - (f1.left_future(), f2.left_future()) - } - ModernComputeCtx::Parallel(ctx) => { - let (f1, f2) = ctx.compute2(compute1, compute2); - (f1.right_future(), f2.right_future()) - } - } + let parallel = self.parallel_builder(2); + (parallel.compute(compute1), parallel.compute(compute2)) } - /// Compute "projection" based on deriving value - pub(crate) fn project( - &self, - key: &K, - base_key: DiceKey, - base: MaybeValidDiceValue, - ) -> DiceResult - where - K: ProjectionKey, - { - match self { - ModernComputeCtx::Regular(ctx) => ctx.project(key, base_key, base), - ModernComputeCtx::Parallel(ctx) => ctx.project(key, base_key, base), - } + pub(crate) fn compute3<'a, T: 'a, U: 'a, V: 'a>( + &'a mut self, + compute1: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, + compute2: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, U> + Send, + compute3: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, V> + Send, + ) -> ( + impl Future + 'a, + impl Future + 'a, + impl Future + 'a, + ) { + let parallel = self.parallel_builder(3); + + ( + parallel.compute(compute1), + parallel.compute(compute2), + parallel.compute(compute3), + ) } - /// Data that is static per the entire lifetime of Dice. These data are initialized at the - /// time that Dice is initialized via the constructor. - pub(crate) fn global_data(&self) -> &DiceData { - match self { - ModernComputeCtx::Regular(ctx) => ctx.global_data(), - ModernComputeCtx::Parallel(ctx) => ctx.global_data(), - } + pub(crate) fn with_linear_recompute<'a, T, Fut: Future + 'a>( + &'a mut self, + func: impl FnOnce(LinearRecomputeDiceComputations<'a>) -> Fut + 'a, + ) -> impl Future + 'a { + let (ctx_data, self_dep_trackers) = self.unpack(); + let dep_trackers = Arc::new(Mutex::new(RecordingDepsTracker::new( + // TODO(cjhopman): if inspected during the with_linear_recompute, this will be missing some invalidation paths. + TrackedInvalidationPaths::clean(), + ))); + let fut = func(LinearRecomputeDiceComputations( + LinearRecomputeDiceComputationsImpl::Modern(LinearRecomputeModern { + ctx_data, + dep_trackers: dep_trackers.dupe(), + }), + )); + + fut.map(move |v| { + let mut self_dep_trackers = self_dep_trackers.lock(); + let dep_trackers = Arc::into_inner(dep_trackers) + .unwrap() + .into_inner() + .collect_deps(); + let validity = dep_trackers.deps_validity; + for k in dep_trackers.deps.iter_keys() { + self_dep_trackers.record(k, validity, TrackedInvalidationPaths::clean()) + } + self_dep_trackers.update_invalidation_paths(dep_trackers.invalidation_paths.dupe()); + v + }) } - /// Data that is static for the lifetime of the current request context. This lifetime is - /// the lifetime of the top-level `DiceComputation` used for all requests. - /// The data is also specific to each request context, so multiple concurrent requests can - /// each have their own individual data. - pub(crate) fn per_transaction_data(&self) -> &UserComputationData { - match self { - ModernComputeCtx::Regular(ctx) => ctx.per_transaction_data(), - ModernComputeCtx::Parallel(ctx) => ctx.per_transaction_data(), - } + pub(crate) fn opaque_into_value(&mut self, opaque: OpaqueValueModern) -> K::Value { + Self::opaque_into_value_impl(self.unpack().1, opaque) } - pub(crate) fn get_version(&self) -> VersionNumber { - match self { - ModernComputeCtx::Regular(ctx) => ctx.get_version(), - ModernComputeCtx::Parallel(ctx) => ctx.get_version(), - } + fn opaque_into_value_impl( + deps: DepsTrackerHolder, + opaque: OpaqueValueModern, + ) -> K::Value { + let OpaqueValueModern { + derive_from_key, + derive_from, + invalidation_paths, + .. + } = opaque; + + deps.lock() + .record(derive_from_key, derive_from.validity(), invalidation_paths); + + derive_from + .downcast_maybe_transient::() + .expect("type mismatch") + .dupe() } - pub(super) fn dep_trackers(&self) -> MutexGuard<'_, RecordingDepsTracker> { - match self { - ModernComputeCtx::Regular(ctx) => ctx.dep_trackers(), - ModernComputeCtx::Parallel(ctx) => ctx.dep_trackers(), - } + pub(crate) fn get_invalidation_paths(&mut self) -> DiceKeyTrackedInvalidationPaths { + let (normal, high) = { + let mut dep_trackers = self.dep_trackers(); + let paths = dep_trackers.invalidation_paths(); + (paths.get_normal(), paths.get_high()) + }; + DiceKeyTrackedInvalidationPaths::new( + self.ctx_data().async_evaluator.dice.dupe(), + normal, + high, + ) } +} - pub(crate) fn store_evaluation_data( - &self, - value: T, - ) -> DiceResult<()> { - match self { - ModernComputeCtx::Regular(ctx) => ctx.store_evaluation_data(value), - ModernComputeCtx::Parallel(ctx) => ctx.store_evaluation_data(value), - } +impl<'a> From> for DiceComputations<'a> { + fn from(value: ModernComputeCtx<'a>) -> Self { + DiceComputations(DiceComputationsImpl::Modern(value)) } +} - pub(crate) fn cycle_guard(&self) -> DiceResult> { - match self { - ModernComputeCtx::Regular(ctx) => ctx.cycle_guard(), - ModernComputeCtx::Parallel(ctx) => ctx.cycle_guard(), +pub(crate) struct LinearRecomputeModern<'a> { + ctx_data: &'a CoreCtx, + dep_trackers: Arc>, +} + +impl LinearRecomputeModern<'_> { + pub(crate) fn get(&self) -> DiceComputations<'_> { + ModernComputeCtx::Linear { + ctx_data: self.ctx_data, + dep_trackers: &self.dep_trackers, } + .into() } } -impl ModernComputeCtx { - pub(crate) fn clone_for_base(&self) -> ModernComputeCtx { +/// This is used to create the ctx for each individual parallel compute (from compute_many/compute_join/compute2/etc). +/// +/// For the Normal case, each parallel ctx will be expected to record its deps into a RecordedDeps allocated in the arena. +/// +/// For the Linear case, each parallel ctx will record deps into the shared RecordingDepsTracker. +pub(crate) enum ModernComputeCtxParallelBuilder<'a> { + Normal { + ctx_data: &'a CoreCtx, + tracker_arena: &'a Arena, + invalidation_paths: &'a TrackedInvalidationPaths, + }, + Linear { + ctx_data: &'a CoreCtx, + dep_trackers: &'a Mutex, + }, +} +impl<'a> ModernComputeCtxParallelBuilder<'a> { + fn compute( + &self, + func: impl for<'x> FnOnce(&'x mut DiceComputations<'a>) -> BoxFuture<'x, T> + Send, + ) -> impl Future + 'a { match self { - ModernComputeCtx::Regular(ctx) => ModernComputeCtx::Regular(PerComputeCtx::new( - ParentKey::None, - ctx.ctx_data.async_evaluator.per_live_version_ctx.dupe(), - ctx.ctx_data.async_evaluator.user_data.dupe(), - ctx.ctx_data.async_evaluator.dice.dupe(), - KeyComputingUserCycleDetectorData::Untracked, - )), - ModernComputeCtx::Parallel(_) => { - unreachable!("parallel context should never be held by the base ctx") - } + ModernComputeCtxParallelBuilder::Normal { + ctx_data, + tracker_arena, + invalidation_paths, + } => OwningFuture::new( + ( + tracker_arena.alloc(RecordedDeps::new()), + ModernComputeCtx::Parallel { + ctx_data, + dep_trackers: RecordingDepsTracker::new((*invalidation_paths).dupe()), + } + .into(), + ), + |(_, ctx)| func(ctx), + ) + .map_taking_data(|v, (this_deps, ctx)| match ctx.0 { + DiceComputationsImpl::Modern(ModernComputeCtx::Parallel { + dep_trackers, .. + }) => { + *this_deps = dep_trackers.collect_deps(); + v + } + _ => unreachable!(), + }) + .left_future(), + ModernComputeCtxParallelBuilder::Linear { + ctx_data, + dep_trackers, + } => OwningFuture::new( + ModernComputeCtx::Linear { + ctx_data, + dep_trackers, + } + .into(), + func, + ) + .right_future(), } } } /// Context given to the `compute` function of a `Key`. #[derive(Allocative)] -pub(crate) struct PerComputeCtx { - dep_trackers: Mutex, // If we make PerComputeCtx &mut, we can get rid of this mutex after some refactoring - ctx_data: Owned, +pub(crate) enum ModernComputeCtx<'a> { + /// The initial ctx for a key computation. + Owned { + ctx_data: CoreCtx, + dep_trackers: RecordingDepsTracker, + }, + /// The ctx within a compute_many/compute_join/try_compute_join. + Parallel { + #[allocative(skip)] + ctx_data: &'a CoreCtx, + #[allocative(skip)] + dep_trackers: RecordingDepsTracker, + }, + /// The ctx within a with_linear_recompute. + Linear { + #[allocative(skip)] + ctx_data: &'a CoreCtx, + #[allocative(skip)] + dep_trackers: &'a Mutex, + }, } #[derive(Allocative)] -struct CoreCtx { +pub(crate) struct CoreCtx { async_evaluator: AsyncEvaluator, parent_key: ParentKey, #[allocative(skip)] @@ -351,288 +440,136 @@ struct CoreCtx { evaluation_data: Mutex, } -impl PerComputeCtx { - pub(crate) fn new( - parent_key: ParentKey, - per_live_version_ctx: SharedLiveTransactionCtx, - user_data: Arc, - dice: Arc, - cycles: KeyComputingUserCycleDetectorData, - ) -> Self { - Self { - dep_trackers: Mutex::new(RecordingDepsTracker::new()), - ctx_data: Owned::new(CoreCtx { - async_evaluator: AsyncEvaluator { - per_live_version_ctx, - user_data, - dice, - }, - parent_key, - cycles, - evaluation_data: Mutex::new(EvaluationData::none()), - }), +impl ModernComputeCtx<'static> { + fn into_owned(self) -> (CoreCtx, RecordingDepsTracker) { + match self { + ModernComputeCtx::Owned { + ctx_data, + dep_trackers, + } => (ctx_data, dep_trackers), + _ => unreachable!(), } } - - /// Compute "opaque" value where the value is only accessible via projections. - /// Projections allow accessing derived results from the "opaque" value, - /// where the dependency of reading a projection is the projection value rather - /// than the entire opaque value. - pub(crate) fn compute_opaque<'a, K>( - &'a self, - key: &K, - ) -> impl Future> + 'a - where - K: Key, - { - self.ctx_data.compute_opaque(key) - } - - /// Compute many tasks that can be ran in parallel without depending on each other - pub(crate) fn compute_many<'a, T: 'a>( - &'a self, - computes: impl IntoIterator< - Item = impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, - >, - ) -> impl Iterator + 'a> { - computes.into_iter().map(|work| { - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations(DiceComputationsImpl::Modern( - ModernComputeCtx::Parallel(PerParallelComputeCtx::new(self.ctx_data.as_ref())), - ))), - |ctx| work(ctx), - ) - .map_taking_data(|res, ctx| { - // TODO record structured dependencies instead of flat list - self.dep_trackers.lock().record_parallel_ctx_deps( - ctx.0 - .0 - .into_modern() - .expect("modern dice") - .into_parallel() - .expect("parallel ctx") - .dep_trackers - .into_inner(), - ); - - res - }) - }) - } - - pub(crate) fn compute2<'a, T: 'a, U: 'a>( - &'a self, - compute1: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, - compute2: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, U> + Send, - ) -> (impl Future + 'a, impl Future + 'a) { - ( - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations(DiceComputationsImpl::Modern( - ModernComputeCtx::Parallel(PerParallelComputeCtx::new(self.ctx_data.as_ref())), - ))), - compute1, - ) - .map_taking_data(|res, ctx| { - // TODO record structured dependencies instead of flat list - self.dep_trackers.lock().record_parallel_ctx_deps( - ctx.0 - .0 - .into_modern() - .expect("modern dice") - .into_parallel() - .expect("parallel ctx") - .dep_trackers - .into_inner(), - ); - - res - }), - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations(DiceComputationsImpl::Modern( - ModernComputeCtx::Parallel(PerParallelComputeCtx::new(self.ctx_data.as_ref())), - ))), - compute2, - ) - .map_taking_data(|res, ctx| { - // TODO record structured dependencies instead of flat list - self.dep_trackers.lock().record_parallel_ctx_deps( - ctx.0 - .0 - .into_modern() - .expect("modern dice") - .into_parallel() - .expect("parallel ctx") - .dep_trackers - .into_inner(), - ); - - res - }), - ) - } - - /// Compute "projection" based on deriving value - pub(crate) fn project( - &self, - key: &K, - base_key: DiceKey, - base: MaybeValidDiceValue, - ) -> DiceResult - where - K: ProjectionKey, - { - self.ctx_data - .project(key, base_key, base, &self.dep_trackers) - } - - /// Data that is static per the entire lifetime of Dice. These data are initialized at the - /// time that Dice is initialized via the constructor. - pub(crate) fn global_data(&self) -> &DiceData { - self.ctx_data.global_data() - } - - /// Data that is static for the lifetime of the current request context. This lifetime is - /// the lifetime of the top-level `DiceComputation` used for all requests. - /// The data is also specific to each request context, so multiple concurrent requests can - /// each have their own individual data. - pub(crate) fn per_transaction_data(&self) -> &UserComputationData { - self.ctx_data.per_transaction_data() - } - - pub(crate) fn get_version(&self) -> VersionNumber { - self.ctx_data.get_version() - } - - pub(crate) fn into_updater(self) -> TransactionUpdater { - self.ctx_data.unwrap_inner().unwrap().into_updater() - } - - pub(super) fn dep_trackers(&self) -> MutexGuard<'_, RecordingDepsTracker> { - self.dep_trackers.lock() - } - - pub(crate) fn store_evaluation_data( - &self, - value: T, - ) -> DiceResult<()> { - self.ctx_data.store_evaluation_data(value) - } - pub(crate) fn finalize( self, ) -> ( - (HashSet, DiceValidity), + RecordedDeps, EvaluationData, KeyComputingUserCycleDetectorData, ) { - let data = self.ctx_data.unwrap_inner().unwrap(); + let (data, dep_trackers) = self.into_owned(); ( - self.dep_trackers.into_inner().collect_deps(), + dep_trackers.collect_deps(), data.evaluation_data.into_inner(), data.cycles, ) } - pub(crate) fn cycle_guard(&self) -> DiceResult> { - self.ctx_data.cycle_guard() + pub(crate) fn into_updater(self) -> TransactionUpdater { + self.into_owned().0.into_updater() } } -/// Context given to the lambdas of the `compute_many` function of a `Key`. -#[derive(Allocative)] -pub(crate) struct PerParallelComputeCtx { - dep_trackers: Mutex, // If we make PerComputeCtx &mut, we can get rid of this mutex after some refactoring - ctx_data: Ref, // this ref is alive while the main context is alive, which should be the case +struct DepsTrackerHolder<'a>(Either<&'a mut RecordingDepsTracker, &'a Mutex>); +impl<'a> DepsTrackerHolder<'a> { + fn lock(self) -> impl DerefMut + 'a { + self.0.map_right(|v| v.lock()) + } } -impl PerParallelComputeCtx { - fn new(ctx_data: Ref) -> Self { - Self { - dep_trackers: Mutex::new(RecordingDepsTracker::new()), - ctx_data, +impl ModernComputeCtx<'_> { + pub(crate) fn new( + parent_key: ParentKey, + cycles: KeyComputingUserCycleDetectorData, + async_evaluator: AsyncEvaluator, + ) -> ModernComputeCtx<'static> { + ModernComputeCtx::Owned { + dep_trackers: RecordingDepsTracker::new(TrackedInvalidationPaths::clean()), + ctx_data: CoreCtx { + async_evaluator, + parent_key, + cycles, + evaluation_data: Mutex::new(EvaluationData::none()), + }, } } - /// Compute "opaque" value where the value is only accessible via projections. - /// Projections allow accessing derived results from the "opaque" value, - /// where the dependency of reading a projection is the projection value rather - /// than the entire opaque value. - pub(crate) fn compute_opaque<'a, K>( - &'a self, - key: &K, - ) -> impl Future> + 'a - where - K: Key, - { - self.ctx_data - .maybe_access(|ctx| ctx.compute_opaque(key)) - .expect("only alive while main PerComputeCtx is alive") + fn parallel_builder<'a>(&'a mut self, size_hint: usize) -> ModernComputeCtxParallelBuilder<'a> { + match self { + ModernComputeCtx::Owned { + ctx_data, + dep_trackers, + } => { + let (tracker_arena, invalidation_paths) = dep_trackers.push_parallel(size_hint); + ModernComputeCtxParallelBuilder::Normal { + ctx_data, + tracker_arena, + invalidation_paths, + } + } + ModernComputeCtx::Parallel { + ctx_data, + dep_trackers, + } => { + let (tracker_arena, invalidation_paths) = dep_trackers.push_parallel(size_hint); + ModernComputeCtxParallelBuilder::Normal { + ctx_data, + tracker_arena, + invalidation_paths, + } + } + ModernComputeCtx::Linear { + ctx_data, + dep_trackers, + } => ModernComputeCtxParallelBuilder::Linear { + ctx_data, + dep_trackers, + }, + } } - /// Compute many tasks that can be ran in parallel without depending on each other - pub(crate) fn compute_many<'a: 'i, 'i, T: 'a>( - &'a self, - computes: impl IntoIterator< - Item = impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, - > + 'i, - ) -> impl Iterator + 'a> + 'i { - computes.into_iter().map(|work| { - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations(DiceComputationsImpl::Modern( - ModernComputeCtx::Parallel(PerParallelComputeCtx::new(self.ctx_data.dupe())), - ))), - |ctx| work(ctx), - ) - }) + fn ctx_data(&self) -> &CoreCtx { + match self { + ModernComputeCtx::Owned { ctx_data, .. } => ctx_data, + ModernComputeCtx::Parallel { ctx_data, .. } => ctx_data, + ModernComputeCtx::Linear { ctx_data, .. } => ctx_data, + } } - pub(crate) fn compute2<'a, T: 'a, U: 'a>( - &'a self, - compute1: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, T> + Send, - compute2: impl for<'x> FnOnce(&'x mut DiceComputationsParallel<'a>) -> BoxFuture<'x, U> + Send, - ) -> (impl Future + 'a, impl Future + 'a) { - ( - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations(DiceComputationsImpl::Modern( - ModernComputeCtx::Parallel(PerParallelComputeCtx::new(self.ctx_data.dupe())), - ))), - compute1, - ), - OwningFuture::new( - DiceComputationsParallel::new(DiceComputations(DiceComputationsImpl::Modern( - ModernComputeCtx::Parallel(PerParallelComputeCtx::new(self.ctx_data.dupe())), - ))), - compute2, + fn unpack(&mut self) -> (&CoreCtx, DepsTrackerHolder) { + match self { + ModernComputeCtx::Owned { + ctx_data, + dep_trackers, + } => (ctx_data, DepsTrackerHolder(Either::Left(dep_trackers))), + ModernComputeCtx::Parallel { + ctx_data, + dep_trackers, + } => ( + ctx_data, + DepsTrackerHolder(Either::Left(&mut *dep_trackers)), ), - ) + ModernComputeCtx::Linear { + ctx_data, + dep_trackers, + } => (ctx_data, DepsTrackerHolder(Either::Right(dep_trackers))), + } } /// Compute "projection" based on deriving value - pub(crate) fn project( - &self, - key: &K, - base_key: DiceKey, - base: MaybeValidDiceValue, - ) -> DiceResult - where - K: ProjectionKey, - { - self.ctx_data - .maybe_access(|ctx| ctx.project(key, base_key, base, &self.dep_trackers)) - .expect("only alive while main PerComputeCtx is alive") + pub(crate) fn projection>( + &mut self, + derive_from: &OpaqueValueModern, + key: &P, + ) -> DiceResult { + let (ctx_data, dep_trackers) = self.unpack(); + ctx_data.project(key, derive_from, dep_trackers) } /// Data that is static per the entire lifetime of Dice. These data are initialized at the /// time that Dice is initialized via the constructor. pub(crate) fn global_data(&self) -> &DiceData { - unsafe { - // SAFETY: lifetime of the parallel context ensures we hold it less than the main - // PerComputeCtx keeping the data alive - self.ctx_data - .deref() - .expect("only alive while main PerComputeCtx is alive") - } - .global_data() + self.ctx_data().global_data() } /// Data that is static for the lifetime of the current request context. This lifetime is @@ -640,44 +577,27 @@ impl PerParallelComputeCtx { /// The data is also specific to each request context, so multiple concurrent requests can /// each have their own individual data. pub(crate) fn per_transaction_data(&self) -> &UserComputationData { - unsafe { - // SAFETY: lifetime of the parallel context ensures we hold it less than the main - // PerComputeCtx keeping the data alive - self.ctx_data - .deref() - .expect("only alive while main PerComputeCtx is alive") - } - .per_transaction_data() + self.ctx_data().per_transaction_data() } pub(crate) fn get_version(&self) -> VersionNumber { - self.ctx_data - .maybe_access(|ctx| ctx.get_version()) - .expect("only alive while main PerComputeCtx is alive") + self.ctx_data().get_version() } - pub(super) fn dep_trackers(&self) -> MutexGuard<'_, RecordingDepsTracker> { - self.dep_trackers.lock() + #[allow(unused)] // used in test + pub(super) fn dep_trackers(&mut self) -> impl DerefMut + '_ { + self.unpack().1.lock() } pub(crate) fn store_evaluation_data( &self, value: T, ) -> DiceResult<()> { - self.ctx_data - .maybe_access(|ctx| ctx.store_evaluation_data(value)) - .expect("only alive while main PerComputeCtx is alive") + self.ctx_data().store_evaluation_data(value) } - pub(crate) fn cycle_guard(&self) -> DiceResult> { - unsafe { - // SAFETY: lifetime of the parallel context ensures we hold it less than the main - // PerComputeCtx keeping the data alive - self.ctx_data - .deref() - .expect("only alive while main PerComputeCtx is alive") - } - .cycle_guard() + pub(crate) fn cycle_guard(&self) -> DiceResult>> { + self.ctx_data().cycle_guard() } } @@ -712,21 +632,17 @@ impl CoreCtx { } /// Compute "projection" based on deriving value - pub(crate) fn project( + fn project>( &self, key: &K, - base_key: DiceKey, - base: MaybeValidDiceValue, - dep_trackers: &Mutex, - ) -> DiceResult - where - K: ProjectionKey, - { + base: &OpaqueValueModern, + dep_trackers: DepsTrackerHolder, + ) -> DiceResult { let dice_key = self .async_evaluator .dice .key_index - .index(CowDiceKeyHashed::proj_ref(base_key, key)); + .index(CowDiceKeyHashed::proj_ref(base.derive_from_key, key)); let r = self .async_evaluator @@ -738,7 +654,8 @@ impl CoreCtx { SyncEvaluator::new( self.async_evaluator.user_data.dupe(), self.async_evaluator.dice.dupe(), - base, + base.derive_from.dupe(), + base.invalidation_paths.dupe(), ), DiceEventDispatcher::new( self.async_evaluator.user_data.tracker.dupe(), @@ -748,10 +665,14 @@ impl CoreCtx { let r = match r { Ok(r) => r, - Err(_cancelled) => return Err(DiceError::cancelled()), + Err(reason) => return Err(DiceError::cancelled(reason)), }; - dep_trackers.lock().record(dice_key, r.value().validity()); + dep_trackers.lock().record( + dice_key, + r.value().validity(), + r.invalidation_paths().dupe(), + ); Ok(r.value() .downcast_maybe_transient::() @@ -796,7 +717,7 @@ impl CoreCtx { Ok(()) } - pub(crate) fn cycle_guard(&self) -> DiceResult> { + pub(crate) fn cycle_guard(&self) -> DiceResult>> { self.cycles.cycle_guard() } } @@ -853,7 +774,7 @@ impl SharedLiveTransactionCtx { ); take_mut::take(occupied.get_mut(), |previous| { - IncrementalEngine::spawn_for_key( + DiceTaskWorker::spawn( key, self.version_epoch, eval, @@ -881,7 +802,7 @@ impl SharedLiveTransactionCtx { let events = DiceEventDispatcher::new(eval.user_data.tracker.dupe(), eval.dice.dupe()); - let task = IncrementalEngine::spawn_for_key( + let task = DiceTaskWorker::spawn( key, self.version_epoch, eval, @@ -906,7 +827,7 @@ impl SharedLiveTransactionCtx { debug!(msg = "computing shared state is cancelled", k = ?key, v = ?v, v_epoch = ?v_epoch); tokio::task::yield_now().await; - Err(Cancelled) + Err(CancellationReason::TransactionCancelled) } .right_future() }, @@ -970,7 +891,7 @@ impl SharedLiveTransactionCtx { } }; - IncrementalEngine::project_for_key( + project_for_key( state, promise, key, @@ -1012,6 +933,11 @@ pub(crate) mod testing { impl SharedLiveTransactionCtx { pub(crate) fn inject(&self, k: DiceKey, v: DiceComputedValue) { + // TODO(cjhopman): We should delete this. tests using it are doing weird things and + // causing the transaction cache to be out of sync with what is possible in real + // execution and it makes things really difficult to reason about. These tests + // should be constructing the states they want to test via valid interactions + // with things. let task = unsafe { // SAFETY: completed immediately below sync_dice_task(k) diff --git a/dice/dice/src/impls/dep_trackers.rs b/dice/dice/src/impls/dep_trackers.rs deleted file mode 100644 index 4234362c5d96f..0000000000000 --- a/dice/dice/src/impls/dep_trackers.rs +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Trackers that records dependencies and reverse dependencies during execution of requested nodes - -use allocative::Allocative; - -use crate::impls::key::DiceKey; -use crate::impls::value::DiceValidity; -use crate::HashSet; - -/// The 'DepsTracker' is used to record dependencies of a particular compute node by calling -/// 'record' for each dependency, and then getting a list of 'Dependency's at the end by calling -/// 'collect_deps'. -#[derive(Allocative)] -pub(crate) struct RecordingDepsTracker { - deps: HashSet, - deps_validity: DiceValidity, -} - -impl RecordingDepsTracker { - pub(crate) fn new() -> Self { - Self { - deps: HashSet::default(), - deps_validity: DiceValidity::Valid, - } - } - - pub(crate) fn record(&mut self, k: DiceKey, validity: DiceValidity) { - self.deps.insert(k); - match validity { - DiceValidity::Transient => self.deps_validity = validity, - _ => {} - } - } - - // TODO record structured dependencies instead of flat list - pub(crate) fn record_parallel_ctx_deps(&mut self, other: RecordingDepsTracker) { - for dep in other.deps { - self.deps.insert(dep); - } - - match other.deps_validity { - DiceValidity::Transient => self.deps_validity = other.deps_validity, - _ => {} - } - } - - pub(crate) fn collect_deps(self) -> (HashSet, DiceValidity) { - (self.deps, self.deps_validity) - } -} - -#[cfg(test)] -pub(crate) mod testing { - - use crate::impls::dep_trackers::RecordingDepsTracker; - use crate::impls::key::DiceKey; - use crate::HashSet; - - pub(crate) trait RecordingDepsTrackersExt { - fn recorded_deps(&self) -> &HashSet; - } - - impl RecordingDepsTrackersExt for RecordingDepsTracker { - fn recorded_deps(&self) -> &HashSet { - &self.deps - } - } -} - -#[cfg(test)] -mod tests { - - use crate::impls::dep_trackers::RecordingDepsTracker; - use crate::impls::key::DiceKey; - use crate::impls::value::DiceValidity; - use crate::HashSet; - - #[tokio::test] - async fn recording_deps_tracker_tracks_deps() -> anyhow::Result<()> { - let mut deps_tracker = RecordingDepsTracker::new(); - - deps_tracker.record(DiceKey { index: 2 }, DiceValidity::Valid); - deps_tracker.record(DiceKey { index: 3 }, DiceValidity::Valid); - - let (deps, validity) = deps_tracker.collect_deps(); - let expected = HashSet::from_iter([DiceKey { index: 2 }, DiceKey { index: 3 }]); - assert_eq!(deps, expected); - assert_eq!(validity, DiceValidity::Valid); - - Ok(()) - } - - #[test] - fn recording_deps_tracker_tracks_deps_invalid() -> anyhow::Result<()> { - let mut deps_tracker = RecordingDepsTracker::new(); - - deps_tracker.record(DiceKey { index: 2 }, DiceValidity::Valid); - deps_tracker.record(DiceKey { index: 3 }, DiceValidity::Transient); - - let (deps, validity) = deps_tracker.collect_deps(); - let expected = HashSet::from_iter([DiceKey { index: 2 }, DiceKey { index: 3 }]); - assert_eq!(deps, expected); - assert_eq!(validity, DiceValidity::Transient); - - Ok(()) - } -} diff --git a/dice/dice/src/impls/deps.rs b/dice/dice/src/impls/deps.rs new file mode 100644 index 0000000000000..42768b644b908 --- /dev/null +++ b/dice/dice/src/impls/deps.rs @@ -0,0 +1,615 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Trackers that records dependencies and reverse dependencies during execution of requested nodes + +use allocative::Allocative; +use dupe::Dupe; +use static_assertions::assert_eq_size; +use typed_arena::Arena; + +use crate::impls::deps::graph::SeriesParallelDeps; +use crate::impls::key::DiceKey; +use crate::impls::value::DiceValidity; +use crate::impls::value::TrackedInvalidationPaths; + +pub(crate) mod encoding; +pub(crate) mod graph; +pub(crate) mod iterator; + +/// The 'DepsTracker' is used to record dependencies of a particular compute node by calling +/// 'record' for each dependency, and then getting a list of 'Dependency's at the end by calling +/// 'collect_deps'. +#[derive(Allocative)] +pub(crate) struct RecordingDepsTracker { + deps: RecordedDeps, + + /// While a parallel computation is happening (from ctx.compute_many()/etc), we'll have an Arena here + /// where each parallel ctx gets a slot for its deps. After the parallel computation is finished, we'll + /// then record this into deps above. + #[allocative(skip)] // TODO(cjhopman): Fix this. + curr_parallel: Option>>, +} + +#[derive(Allocative)] +pub(crate) struct RecordedDeps { + pub(crate) deps: SeriesParallelDeps, + pub(crate) deps_validity: DiceValidity, + pub(crate) invalidation_paths: TrackedInvalidationPaths, +} + +impl RecordedDeps { + fn record( + &mut self, + k: DiceKey, + validity: DiceValidity, + invalidation_paths: TrackedInvalidationPaths, + ) { + self.deps.insert(k); + self.deps_validity.and(validity); + self.update_invalidation_paths(invalidation_paths) + } + + fn update_invalidation_paths(&mut self, paths: TrackedInvalidationPaths) { + self.invalidation_paths.update(paths) + } + + pub(crate) fn new() -> Self { + RecordedDeps { + deps: SeriesParallelDeps::None, + deps_validity: DiceValidity::Valid, + invalidation_paths: TrackedInvalidationPaths::clean(), + } + } + + #[allow(clippy::boxed_local)] + fn insert_parallel(&mut self, mut parallel: Arena) { + let mut new_keys = 0; + let mut new_specs = 0; + + for dep in parallel.iter_mut() { + self.deps_validity.and(dep.deps_validity); + self.invalidation_paths + .update(dep.invalidation_paths.dupe()); + let header = dep.deps.header(); + new_keys += header.keys_len(); + new_specs += header.encoded_len(); + } + if new_keys == 0 { + return; + } + + self.deps.insert_parallel( + parallel + .iter_mut() + .map(|v| std::mem::replace(&mut v.deps, SeriesParallelDeps::None)), + new_keys, + new_specs, + ); + } + + #[cfg(test)] + pub(crate) fn iter_keys(&self) -> impl Iterator + '_ { + self.deps.iter_keys() + } +} + +assert_eq_size!(RecordingDepsTracker, [usize; 8]); + +fn _check_deps_trackers_send_and_sync() { + fn _assert_send_sync() {} + _assert_send_sync::(); +} + +impl RecordingDepsTracker { + pub(crate) fn new(invalidation_paths: TrackedInvalidationPaths) -> Self { + let mut deps = RecordedDeps::new(); + deps.invalidation_paths = invalidation_paths; + Self { + deps, + curr_parallel: None, + } + } + + pub(crate) fn record( + &mut self, + k: DiceKey, + validity: DiceValidity, + invalidation_paths: TrackedInvalidationPaths, + ) { + self.flatten_parallel(); + self.deps.record(k, validity, invalidation_paths); + } + + pub(crate) fn update_invalidation_paths( + &mut self, + invalidation_paths: TrackedInvalidationPaths, + ) { + self.deps.update_invalidation_paths(invalidation_paths); + } + + /// Used to start a new parallel computation. Returns the Arena that each parallel ctx should record its deps to. + pub(crate) fn push_parallel( + &mut self, + size_hint: usize, + ) -> (&Arena, &TrackedInvalidationPaths) { + self.flatten_parallel(); + assert!(self.curr_parallel.is_none()); + + let Self { + curr_parallel, + deps, + } = self; + ( + curr_parallel + .insert(Box::new(SyncArena::with_capacity(size_hint))) + .inner(), + &deps.invalidation_paths, + ) + } + + pub(crate) fn collect_deps(mut self) -> RecordedDeps { + self.flatten_parallel(); + self.deps + } + + /// "Flattens" the previous parallel computation into deps if there is one. This should be called on any function + /// that accesses/writes to deps. + fn flatten_parallel(&mut self) { + if let Some(parallel) = self.curr_parallel.take() { + self.deps.insert_parallel(parallel.into_inner()) + } + } + + pub(crate) fn invalidation_paths(&mut self) -> &TrackedInvalidationPaths { + self.flatten_parallel(); + &self.deps.invalidation_paths + } +} + +mod sync_arena { + // We put SyncArena in its own mod to make the inner Arena truly private. + use typed_arena::Arena; + + pub(super) struct SyncArena(Arena); + + /// Safety: SyncArena only exposes apis taking `&mut self` and `self`. + unsafe impl Sync for SyncArena {} + + impl SyncArena { + pub(super) fn with_capacity(s: usize) -> Self { + Self(Arena::with_capacity(s)) + } + + pub(super) fn inner(&mut self) -> &mut Arena { + &mut self.0 + } + + pub(super) fn into_inner(self) -> Arena { + self.0 + } + } +} +use sync_arena::SyncArena; + +#[cfg(test)] +pub(crate) mod testing { + use crate::impls::deps::RecordingDepsTracker; + use crate::impls::key::DiceKey; + use crate::HashSet; + + pub(crate) trait RecordingDepsTrackersExt { + fn recorded_deps(&self) -> HashSet; + } + + impl RecordingDepsTrackersExt for RecordingDepsTracker { + fn recorded_deps(&self) -> HashSet { + self.deps.iter_keys().collect() + } + } +} + +#[cfg(test)] +mod tests { + + use itertools::Itertools; + use typed_arena::Arena; + + use crate::impls::deps::iterator::ParallelNodeIterator; + use crate::impls::deps::iterator::SeriesParallelDepsIteratorItem; + use crate::impls::deps::RecordedDeps; + use crate::impls::deps::RecordingDepsTracker; + use crate::impls::key::DiceKey; + use crate::impls::value::testing::MakeInvalidationPaths; + use crate::impls::value::DiceValidity; + use crate::impls::value::TrackedInvalidationPaths; + use crate::HashSet; + + struct DisplaySPDeps<'a, T: Iterator>>(T); + impl<'a, T: Iterator>> DisplaySPDeps<'a, T> { + fn debug_string(self) -> String { + SeriesNodeDisplay(self.0) + .to_lines() + .into_iter() + .map(|v| v.trim().to_owned()) + .join("\n") + } + } + + struct SeriesNodeDisplay<'a, T: Iterator>>(T); + impl<'a, T: Iterator>> SeriesNodeDisplay<'a, T> { + fn to_lines(&mut self) -> Vec { + let mut lines = Vec::new(); + lines.push("S".to_owned()); + for item in self.0.by_ref() { + lines.push("|".to_owned()); + match item { + SeriesParallelDepsIteratorItem::Key(k) => lines.push(format!("K({})", k.index)), + SeriesParallelDepsIteratorItem::Parallel(p) => { + lines.extend(ParallelNodeDisplay(p).to_lines()) + } + } + } + lines.push("|".to_owned()); + lines.push("E".to_owned()); + lines + } + } + + struct ParallelNodeDisplay<'a>(ParallelNodeIterator<'a>); + impl ParallelNodeDisplay<'_> { + fn to_lines(&mut self) -> Vec { + let mut inner_lines = Vec::new(); + let mut inner_widths = Vec::new(); + let mut longest = 0; + for item in self.0.by_ref() { + let lines = SeriesNodeDisplay(item).to_lines(); + let width = lines.iter().map(|v| v.len()).max().unwrap_or(1); + longest = std::cmp::max(longest, lines.len()); + inner_lines.push(lines); + inner_widths.push(width); + } + + let mut lines = Vec::new(); + lines.push("P".to_owned()); + + let mut prefix_line = String::new(); + let mut suffix_line = String::new(); + let mut total_width = 0; + for (i, width) in inner_widths.iter().enumerate() { + total_width += width + 1; + if i == 0 { + prefix_line += "|"; + suffix_line += "|"; + } else { + prefix_line += "\\"; + suffix_line += "/"; + } + // 1 short because we want to offset the \ and / + prefix_line += &" ".repeat(total_width - prefix_line.len() - 1); + suffix_line += &" ".repeat(total_width - suffix_line.len() - 1); + } + + lines.push(prefix_line); + for i in 0..longest { + let mut line = String::new(); + let mut total_width = 0; + for j in 0..inner_lines.len() { + total_width += inner_widths[j] + 1; + match inner_lines[j].get(i) { + Some(v) => line += v, + None => line += "|", + } + line += &" ".repeat(total_width - line.len()); + } + lines.push(line); + } + lines.push(suffix_line); + lines.push("J".to_owned()); + lines + } + } + + #[tokio::test] + async fn recording_deps_tracker_tracks_deps() -> anyhow::Result<()> { + let mut deps_tracker = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + + deps_tracker.record( + DiceKey { index: 2 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + deps_tracker.record( + DiceKey { index: 3 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + + let recorded_deps = deps_tracker.collect_deps(); + let expected = HashSet::from_iter([DiceKey { index: 2 }, DiceKey { index: 3 }]); + let actual: HashSet<_> = recorded_deps.iter_keys().collect(); + assert_eq!(actual, expected); + assert_eq!(recorded_deps.deps_validity, DiceValidity::Valid); + + Ok(()) + } + + #[tokio::test] + async fn recording_deps_tracker_tracks_invalidations() -> anyhow::Result<()> { + let mut deps_tracker = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + + deps_tracker.record( + DiceKey { index: 2 }, + DiceValidity::Valid, + MakeInvalidationPaths { + normal: (DiceKey { index: 101 }, 8), + high: None, + } + .into(), + ); + + assert_eq!( + deps_tracker.invalidation_paths(), + &MakeInvalidationPaths { + normal: (DiceKey { index: 101 }, 8), + high: None, + } + .into() + ); + + { + let p1 = deps_tracker.push_parallel(0).0; + { + let s1 = p1.alloc(RecordedDeps::new()); + s1.record( + DiceKey { index: 11 }, + DiceValidity::Valid, + MakeInvalidationPaths { + normal: (DiceKey { index: 102 }, 6), + high: Some((DiceKey { index: 102 }, 6)), + } + .into(), + ); + } + } + assert_eq!( + deps_tracker.invalidation_paths(), + &MakeInvalidationPaths { + normal: (DiceKey { index: 101 }, 8), + high: Some((DiceKey { index: 102 }, 6)), + } + .into() + ); + + Ok(()) + } + + #[test] + fn recording_deps_tracker_tracks_deps_invalid() -> anyhow::Result<()> { + let mut deps_tracker = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + + deps_tracker.record( + DiceKey { index: 2 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + deps_tracker.record( + DiceKey { index: 3 }, + DiceValidity::Transient, + TrackedInvalidationPaths::clean(), + ); + + let recorded_deps = deps_tracker.collect_deps(); + let expected = HashSet::from_iter([DiceKey { index: 2 }, DiceKey { index: 3 }]); + let actual: HashSet<_> = recorded_deps.iter_keys().collect(); + assert_eq!(actual, expected); + assert_eq!(recorded_deps.deps_validity, DiceValidity::Transient); + + Ok(()) + } + + #[test] + fn test_series_parallel_record_and_iter() -> anyhow::Result<()> { + let mut tracker = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + + { + let p1 = tracker.push_parallel(0).0; + { + let s1 = p1.alloc(RecordedDeps::new()); + s1.record( + DiceKey { index: 11 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 12 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 13 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 14 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 15 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 16 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 17 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 18 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s1.record( + DiceKey { index: 19 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + } + { + let s2 = p1.alloc(RecordedDeps::new()); + s2.record( + DiceKey { index: 21 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + { + let p2 = Box::new(Arena::new()); + { + let s3 = p2.alloc(RecordedDeps::new()); + s3.record( + DiceKey { index: 22 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s3.record( + DiceKey { index: 23 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + } + { + let s3 = p2.alloc(RecordedDeps::new()); + s3.record( + DiceKey { index: 24 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + s3.record( + DiceKey { index: 25 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + } + s2.insert_parallel(*p2); + } + } + } + + tracker.record( + DiceKey { index: 91 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + tracker.record( + DiceKey { index: 92 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + tracker.record( + DiceKey { index: 93 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + + { + let p2 = tracker.push_parallel(3).0; + for i in 0..5 { + let s = p2.alloc(RecordedDeps::new()); + s.record( + DiceKey { index: 32 + i }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + } + } + tracker.record( + DiceKey { index: 94 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + tracker.record( + DiceKey { index: 95 }, + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + + let deps = tracker.collect_deps(); + + eprintln!("{:?}", &deps.deps); + + let rendered = DisplaySPDeps(deps.deps.iter()).debug_string(); + + let expected = indoc::indoc! {r" + S + | + P + | \ + S S + | | + K(11) K(21) + | | + K(12) P + | | \ + K(13) S S + | | | + K(14) K(22) K(24) + | | | + K(15) K(23) K(25) + | | | + K(16) E E + | | / + K(17) J + | | + K(18) E + | | + K(19) | + | | + E | + | / + J + | + K(91) + | + K(92) + | + K(93) + | + P + | \ \ \ \ + S S S S S + | | | | | + K(32) K(33) K(34) K(35) K(36) + | | | | | + E E E E E + | / / / / + J + | + K(94) + | + K(95) + | + E + "} + .trim(); + assert_eq!( + rendered, expected, + "rendered:\n{}\nexpected:\n{}\n\n{:?}", + rendered, expected, deps.deps + ); + + Ok(()) + } +} diff --git a/dice/dice/src/impls/deps/encoding.rs b/dice/dice/src/impls/deps/encoding.rs new file mode 100644 index 0000000000000..0d4d94c90c135 --- /dev/null +++ b/dice/dice/src/impls/deps/encoding.rs @@ -0,0 +1,310 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! We pack the SeriesParallelDeps graph descriptors (`SPItem` and `SPSeriesHeader`) into +//! a `Vec` to minimize the cost of storing them. +//! +//! This module contains the utilities for encoding and decoding those. +//! +//! The basic structure is +//! ```ignore +//! <4 bit tag> +//! ``` + +use std::fmt::Debug; +use std::fmt::Display; + +use dupe::Dupe; +use gazebo::prelude::OptionExt; +use gazebo::variants::VariantName; +use itertools::Itertools; +use thiserror::Error; + +use crate::impls::deps::graph::SPItem; +use crate::impls::deps::graph::SPSeriesHeader; + +pub(crate) trait SPEncoder { + fn write_series_header(&mut self, header: SPSeriesHeader); + fn write_item(&mut self, item: SPItem); +} + +#[derive(VariantName, Eq, PartialEq, Clone, Copy, Dupe)] +pub(crate) enum SPTag { + HeaderSimple, + HeaderComplexKeys, + HeaderComplexSpecs, + ItemKeys, + ItemParallelKeys, + ItemParallelSpecs, +} + +impl SPTag { + fn encode(self) -> u32 { + match self { + SPTag::HeaderSimple => 0, + SPTag::HeaderComplexKeys => 1, + SPTag::HeaderComplexSpecs => 2, + SPTag::ItemKeys => 3, + SPTag::ItemParallelKeys => 4, + SPTag::ItemParallelSpecs => 5, + } + } + + fn decode(v: u32) -> Result { + let r = match v { + 0 => SPTag::HeaderSimple, + 1 => SPTag::HeaderComplexKeys, + 2 => SPTag::HeaderComplexSpecs, + 3 => SPTag::ItemKeys, + 4 => SPTag::ItemParallelKeys, + 5 => SPTag::ItemParallelSpecs, + v => { + return Err(SPDecoderError::InvalidTag(v)); + } + }; + assert!(r.encode() == v); + Ok(r) + } +} + +impl Display for SPTag { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.variant_name()) + } +} + +impl Debug for SPTag { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(self, f) + } +} + +fn push(vec: &mut Vec, tag: SPTag, value: u32) { + assert!(value < (1 << 28)); + vec.push((tag.encode() << 28) | value); +} + +impl SPEncoder for Vec { + fn write_series_header(&mut self, header: SPSeriesHeader) { + match header { + SPSeriesHeader::Simple { key_count } => { + push(self, SPTag::HeaderSimple, key_count); + } + SPSeriesHeader::Complex { + key_count, + spec_count, + } => { + push(self, SPTag::HeaderComplexKeys, key_count); + push(self, SPTag::HeaderComplexSpecs, spec_count); + } + } + } + + fn write_item(&mut self, item: SPItem) { + match item { + SPItem::Keys { key_count } => { + push(self, SPTag::ItemKeys, key_count); + } + SPItem::Parallel { + key_count, + spec_count, + } => { + push(self, SPTag::ItemParallelKeys, key_count); + push(self, SPTag::ItemParallelSpecs, spec_count); + } + } + } +} + +fn debug_string<'a>(iter: impl Iterator) -> String { + iter.map(|v| SPDecoder::split(*v)) + .map(|(x, y)| format!("({:#04b}, {})", x, y)) + .join(",") +} + +impl SPSeriesHeader { + pub(crate) fn encoded_len(&self) -> u32 { + match self { + SPSeriesHeader::Simple { .. } => 1, + SPSeriesHeader::Complex { spec_count, .. } => 2 + spec_count, + } + } +} + +impl SPItem { + pub(crate) fn encoded_len(&self) -> u32 { + match self { + SPItem::Keys { .. } => 1, + SPItem::Parallel { .. } => 2, + } + } +} + +pub(crate) struct SPDecoder<'a>(pub(crate) std::slice::Iter<'a, u32>); + +impl SPDecoder<'_> { + pub(crate) fn read_series_header(&mut self) -> Result, SPDecoderError> { + self.read()?.try_map(|v| match v { + (SPTag::HeaderSimple, key_count) => Ok(SPSeriesHeader::Simple { key_count }), + (SPTag::HeaderComplexKeys, key_count) => match self.read()? { + Some((SPTag::HeaderComplexSpecs, spec_count)) => Ok(SPSeriesHeader::Complex { + key_count, + spec_count, + }), + v => Err(SPDecoderError::InvalidItem { + expected: vec![SPTag::HeaderComplexSpecs], + actual: v, + remaining: debug_string(&mut self.0), + }), + }, + v => Err(SPDecoderError::InvalidItem { + expected: vec![SPTag::HeaderSimple, SPTag::HeaderComplexKeys], + actual: Some(v), + remaining: debug_string(&mut self.0), + }), + }) + } + + pub(crate) fn read_item(&mut self) -> Result, SPDecoderError> { + self.read()?.try_map(|v| match v { + (SPTag::ItemKeys, key_count) => Ok(SPItem::Keys { key_count }), + (SPTag::ItemParallelKeys, key_count) => match self.read()? { + Some((SPTag::ItemParallelSpecs, spec_count)) => Ok(SPItem::Parallel { + key_count, + spec_count, + }), + v => Err(SPDecoderError::InvalidItem { + expected: vec![SPTag::ItemParallelSpecs], + actual: v, + remaining: debug_string(&mut self.0), + }), + }, + v => Err(SPDecoderError::InvalidItem { + expected: vec![SPTag::ItemKeys, SPTag::ItemParallelKeys], + actual: Some(v), + remaining: debug_string(&mut self.0), + }), + }) + } + + fn read(&mut self) -> Result, SPDecoderError> { + self.0.next().try_map(|v| { + let (tag, val) = Self::split(*v); + Ok((SPTag::decode(tag)?, val)) + }) + } + + fn split(v: u32) -> (u32, u32) { + (v >> 28, v & 0x0FFFFFFF) + } + + pub(crate) fn split_at(&mut self, specs: u32) -> Self { + let remaining_specs = self.0.as_slice(); + let (split_specs, remaining_specs) = remaining_specs.split_at(specs as usize); + self.0 = remaining_specs.iter(); + SPDecoder(split_specs.iter()) + } +} + +#[derive(Debug, Error, Eq, PartialEq)] +pub(crate) enum SPDecoderError { + #[error( + "error decoding series-parallel graph, expected one of <{}> tags, got {:?}, remaining <{}>", + .expected.iter().join(","), + .actual, + .remaining + )] + InvalidItem { + expected: Vec, + actual: Option<(SPTag, u32)>, + remaining: String, + }, + #[error("error decoding series-parallel graph, got invalid tag `{0}`")] + InvalidTag(u32), +} + +#[cfg(test)] +mod test { + use crate::impls::deps::encoding::SPDecoder; + use crate::impls::deps::encoding::SPEncoder; + use crate::impls::deps::graph::SPItem; + use crate::impls::deps::graph::SPSeriesHeader; + + #[test] + fn encoding_and_decoding_works() -> anyhow::Result<()> { + let mut encoded = Vec::new(); + + let item1 = SPItem::Keys { key_count: 3 }; + let item2 = SPItem::Keys { key_count: 2 }; + let item3 = SPItem::Parallel { + key_count: 100, + spec_count: 101, + }; + + let header1 = SPSeriesHeader::Simple { key_count: 2 }; + let header2 = SPSeriesHeader::Complex { + key_count: 200, + spec_count: 201, + }; + let header3 = SPSeriesHeader::Simple { key_count: 4 }; + + encoded.write_item(item1); + encoded.write_item(item2); + + encoded.write_series_header(header1); + encoded.write_series_header(header2); + + encoded.write_item(item3); + encoded.write_series_header(header3); + + let mut decoder = SPDecoder(encoded.iter()); + + assert_eq!(decoder.read_item(), Ok(Some(item1))); + assert_eq!(decoder.read_item(), Ok(Some(item2))); + assert_eq!(decoder.read_series_header(), Ok(Some(header1))); + assert_eq!(decoder.read_series_header(), Ok(Some(header2))); + assert_eq!(decoder.read_item(), Ok(Some(item3))); + assert_eq!(decoder.read_series_header(), Ok(Some(header3))); + assert_eq!(decoder.read_item(), Ok(None)); + + Ok(()) + } + + #[test] + fn decoder_returns_errors_on_wrong_type() -> anyhow::Result<()> { + { + let mut encoded = Vec::new(); + encoded.write_item(SPItem::Keys { key_count: 1 }); + assert!(SPDecoder(encoded.iter()).read_series_header().is_err()); + } + { + let mut encoded = Vec::new(); + encoded.write_item(SPItem::Parallel { + key_count: 100, + spec_count: 101, + }); + assert!(SPDecoder(encoded.iter()).read_series_header().is_err()); + } + { + let mut encoded = Vec::new(); + encoded.write_series_header(SPSeriesHeader::Simple { key_count: 1 }); + assert!(SPDecoder(encoded.iter()).read_item().is_err()); + } + { + let mut encoded = Vec::new(); + encoded.write_series_header(SPSeriesHeader::Complex { + key_count: 100, + spec_count: 101, + }); + assert!(SPDecoder(encoded.iter()).read_item().is_err()); + } + + Ok(()) + } +} diff --git a/dice/dice/src/impls/deps/graph.rs b/dice/dice/src/impls/deps/graph.rs new file mode 100644 index 0000000000000..28c19dd6372d7 --- /dev/null +++ b/dice/dice/src/impls/deps/graph.rs @@ -0,0 +1,297 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fmt::Debug; + +use allocative::Allocative; +use dupe::Dupe; +use gazebo::variants::VariantName; +use itertools::Either; +use itertools::Itertools; + +use crate::impls::deps::encoding::SPEncoder; +use crate::impls::deps::iterator::SeriesNodeIterator; +use crate::impls::deps::iterator::SeriesParallelDepsIteratorItem; +use crate::impls::key::DiceKey; + +/// The DiceComputations compute apis are designed so that in normal usage the graph of +/// inter-dep data dependencies within a compute node form a series-parallel graph. +/// +/// The [SeriesParallelDeps] records the deps and the structure of that graph so that +/// when we recompute we can check keys in parallel but avoid requesting a key that +/// would not be requested by calling the compute directly in that state. +/// +/// For non-trivial graphs, we will encode the graph as a flat list of keys and an +/// encoding of the description of the graph. +/// +/// The `SeriesNodeIterator` and `ParallelNodeIterator` provide a fairly readable +/// implementation of decoding and traversing the graph. +/// +/// A Series node is a sequence of keys and parallel nodes. It's encoding is a sequence of two items: +/// SPItem::Keys(v): Indicates that the next v keys are part of this series +/// SPItem::Parallel{keys: x, specs: y}: Indicates that the next `x` keys and `y` specs form a parallel node +/// +/// If a Series node has remaining keys not covered by its spec, the remaining keys are all part of the series. +/// +/// A Parallel node is a set of Series nodes. A parallel node of series nodes S1, S2, and S3 will have: +/// +/// ```ignore +/// keys = keys(S1) + keys(S2) + keys(S3) +/// spec = header(S1) + spec(S1) + header(S2) + spec(S2) + header(S3) + spec(S3) +/// ``` +/// +/// Where `spec(S1)` is the encoding of a series Node as described above and `header(S1)` is a SPSeriesHeader, which +/// is one of two items: +/// SPSeriesHeader::Simple(n): Indicates the series has no nested parallel nodes and is just a series of n keys (and 0 specs). +/// SPSeriesHeader::Complex{keys: x, specs: y}: Indicates a complex series that covers the next x keys and y specs. +/// +/// For both SPItem::Parallel and SPSeriesHeader::Complex, the specs value is the size of the encoded specs. +#[derive(Allocative, Debug, Eq, PartialEq)] +pub(crate) enum SeriesParallelDeps { + None, + /// It's very common for a parallel compute to record only a single dep and so we have an optimized case for that. + One(DiceKey), + /// Once a set of deps becomes non-trivial, it's represented by a SPDepsMany. + Many(Box), +} + +impl SeriesParallelDeps { + pub(crate) fn insert(&mut self, k: DiceKey) { + match self { + SeriesParallelDeps::None => *self = SeriesParallelDeps::One(k), + SeriesParallelDeps::One(_) => self.upgrade_to_many().push(k), + SeriesParallelDeps::Many(v) => v.push(k), + } + } + + fn upgrade_to_many(&mut self) -> &mut SPDepsMany { + match self { + SeriesParallelDeps::None => { + *self = SeriesParallelDeps::Many(Box::new(SPDepsMany::new())); + self.unwrap_many_mut() + } + SeriesParallelDeps::One(..) => { + let v = + std::mem::replace(self, SeriesParallelDeps::Many(Box::new(SPDepsMany::new()))); + let v = match v { + SeriesParallelDeps::One(v) => v, + _ => unreachable!(), + }; + let many = self.unwrap_many_mut(); + many.push(v); + many + } + SeriesParallelDeps::Many(v) => &mut *v, + } + } + + pub(crate) fn header(&self) -> SPSeriesHeader { + match self { + SeriesParallelDeps::None => SPSeriesHeader::Simple { key_count: 0 }, + SeriesParallelDeps::One(_) => SPSeriesHeader::Simple { key_count: 1 }, + SeriesParallelDeps::Many(many) => { + if many.spec.is_empty() { + SPSeriesHeader::Simple { + key_count: many.deps.len().try_into().unwrap(), + } + } else { + SPSeriesHeader::Complex { + key_count: many.deps.len().try_into().unwrap(), + spec_count: many.spec.len().try_into().unwrap(), + } + } + } + } + } + + fn unwrap_many_mut(&mut self) -> &mut SPDepsMany { + match self { + SeriesParallelDeps::Many(v) => &mut *v, + _ => panic!(), + } + } + + pub(crate) fn serial_from_vec(mut vec: Vec) -> SeriesParallelDeps { + match vec.len() { + 0 => SeriesParallelDeps::None, + 1 => SeriesParallelDeps::One(vec.pop().unwrap()), + _ => SeriesParallelDeps::Many(Box::new(SPDepsMany::serial_from_vec(vec))), + } + } + + pub(crate) fn iter_keys(&self) -> impl Iterator + '_ { + match self { + SeriesParallelDeps::None => Either::Left(Option::::None.into_iter()), + SeriesParallelDeps::One(v) => Either::Left(Option::::Some(*v).into_iter()), + SeriesParallelDeps::Many(m) => Either::Right(m.deps.iter().copied()), + } + } + + pub(crate) fn is_empty(&self) -> bool { + match self { + SeriesParallelDeps::None => true, + SeriesParallelDeps::One(_) => false, + SeriesParallelDeps::Many(many) => many.deps.is_empty(), + } + } + + #[allow(unused)] // TODO(cjhopman): delete this once it's used outside tests + pub(crate) fn iter(&self) -> impl Iterator> { + match self { + SeriesParallelDeps::None => { + Either::Left(Option::::None.into_iter()) + } + SeriesParallelDeps::One(k) => { + Either::Left(Some(SeriesParallelDepsIteratorItem::Key(k)).into_iter()) + } + SeriesParallelDeps::Many(v) => Either::Right(v.iter()), + } + } + + pub(crate) fn insert_parallel( + &mut self, + parallel: impl Iterator, + new_keys: u32, + new_specs: u32, + ) { + self.upgrade_to_many() + .insert_parallel(parallel, new_keys, new_specs); + } +} + +#[derive(Allocative, Eq, PartialEq)] +pub(crate) struct SPDepsMany { + deps: Vec, + /// This holds the encoded series-parallel graph structure, i.e. it tells how to read the deps list as a series-parallel graph. + spec: Vec, + trailing_deps_start: u32, +} + +impl Debug for SPDepsMany { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SeriesParallelDeps") + .field( + "deps", + &format!("[{}]", self.deps.iter().map(|v| v.index).join(",")), + ) + .field("spec", &format!("[{:?}]", &self.spec)) + .field("trailing_deps_start", &self.trailing_deps_start) + .finish() + } +} + +impl SPDepsMany { + fn new() -> SPDepsMany { + Self { + deps: Vec::new(), + spec: Vec::new(), + trailing_deps_start: 0, + } + } + + fn push(&mut self, k: DiceKey) { + self.deps.push(k); + } + + pub(crate) fn iter(&self) -> SeriesNodeIterator<'_> { + SeriesNodeIterator::new(self.deps.iter(), self.spec.iter()) + } + + fn serial_from_vec(vec: Vec) -> SPDepsMany { + Self { + deps: vec, + spec: Vec::new(), + trailing_deps_start: 0, + } + } + + fn insert_parallel( + &mut self, + parallel: impl Iterator, + new_keys: u32, + new_specs: u32, + ) { + let trailing_keys = (self.deps.len() - (self.trailing_deps_start as usize)) + .try_into() + .unwrap(); + let trailing_keys = match trailing_keys { + 0 => None, + n => Some(SPItem::Keys { key_count: n }), + }; + let parallel_item = SPItem::Parallel { + key_count: new_keys.try_into().unwrap(), + spec_count: new_specs.try_into().unwrap(), + }; + + let mut total_new_specs = new_specs; + if let Some(v) = &trailing_keys { + total_new_specs += v.encoded_len(); + } + total_new_specs += parallel_item.encoded_len(); + let total_new_specs = total_new_specs as usize; + + self.deps.reserve(new_keys as usize); + let expected_total_specs = self.spec.len() + total_new_specs; + self.spec.reserve(total_new_specs); + + if let Some(v) = trailing_keys { + self.spec.write_item(v) + } + self.spec.write_item(parallel_item); + + for dep in parallel { + self.spec.write_series_header(dep.header()); + match dep { + SeriesParallelDeps::None => {} + SeriesParallelDeps::One(v) => { + self.deps.push(v); + } + SeriesParallelDeps::Many(other) => { + self.spec.extend(other.spec); + self.deps.extend(other.deps); + } + } + } + + assert_eq!(self.spec.len(), expected_total_specs); + self.trailing_deps_start = self.deps.len().try_into().unwrap(); + } +} + +/// SPSeriesHeader describes the total encoded size of a series node. +#[derive(Debug, Copy, Clone, Dupe, VariantName, Eq, PartialEq)] +pub(crate) enum SPSeriesHeader { + /// A series node with no parallel children. + Simple { key_count: u32 }, + /// A series node with parallel children. key_count and spec_count here indicate the full size of this encoded series node and all of its (transitive) children. + Complex { key_count: u32, spec_count: u32 }, +} + +impl SPSeriesHeader { + pub(crate) fn keys_len(&self) -> u32 { + match self { + SPSeriesHeader::Simple { key_count } => *key_count, + SPSeriesHeader::Complex { key_count, .. } => *key_count, + } + } +} + +/// SPItem represents part of a series node. A series node consists of a sequence of keys and parallel nodes, and a "part" is +/// either a set of sequential keys or a single parallel node. +#[derive(Debug, Copy, Clone, Dupe, VariantName, Eq, PartialEq)] +pub(crate) enum SPItem { + Keys { + key_count: u32, + }, + /// The key_count and spec_count here are the full size of the encoded parallel node and all of its (transitive) children. + Parallel { + key_count: u32, + spec_count: u32, + }, +} diff --git a/dice/dice/src/impls/deps/iterator.rs b/dice/dice/src/impls/deps/iterator.rs new file mode 100644 index 0000000000000..6345bfb91a868 --- /dev/null +++ b/dice/dice/src/impls/deps/iterator.rs @@ -0,0 +1,121 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use crate::impls::deps::encoding::SPDecoder; +use crate::impls::deps::graph::SPItem; +use crate::impls::deps::graph::SPSeriesHeader; +use crate::impls::key::DiceKey; + +#[allow(unused)] +pub(crate) enum SeriesParallelDepsIteratorItem<'a> { + Key(&'a DiceKey), + Parallel(ParallelNodeIterator<'a>), +} + +/// Data and some utility functions used by both a SeriesNodeIterator and a ParallelNodeIterator +struct IteratorData<'a> { + deps: std::slice::Iter<'a, DiceKey>, + specs: SPDecoder<'a>, +} + +pub(crate) struct SeriesNodeIterator<'a> { + data: IteratorData<'a>, + keys_to_next_spec: u32, +} + +impl SeriesNodeIterator<'_> { + pub(crate) fn new<'a>( + deps: std::slice::Iter<'a, DiceKey>, + specs: std::slice::Iter<'a, u32>, + ) -> SeriesNodeIterator<'a> { + SeriesNodeIterator { + data: IteratorData { + deps, + specs: SPDecoder(specs), + }, + keys_to_next_spec: 0, + } + } +} + +impl<'a> IteratorData<'a> { + fn split_at(&mut self, keys: u32, specs: u32) -> IteratorData<'a> { + let remaining_keys = self.deps.as_slice(); + let (split_keys, remaining_keys) = remaining_keys.split_at(keys as usize); + self.deps = remaining_keys.iter(); + + let split_specs = self.specs.split_at(specs); + IteratorData { + deps: split_keys.iter(), + specs: split_specs, + } + } +} + +impl<'a> Iterator for SeriesNodeIterator<'a> { + type Item = SeriesParallelDepsIteratorItem<'a>; + + fn next(&mut self) -> Option { + if self.keys_to_next_spec == 0 { + match self.data.specs.read_item().unwrap() { + Some(SPItem::Keys { key_count }) => { + self.keys_to_next_spec = key_count; + } + Some(SPItem::Parallel { + key_count, + spec_count, + }) => { + let parallel_data = self.data.split_at(key_count, spec_count); + return Some(SeriesParallelDepsIteratorItem::Parallel( + ParallelNodeIterator { + data: parallel_data, + }, + )); + } + None => { + // drain the trailing keys + self.keys_to_next_spec = u32::MAX; + } + } + } + self.keys_to_next_spec -= 1; + self.data + .deps + .next() + .map(SeriesParallelDepsIteratorItem::Key) + } +} + +pub(crate) struct ParallelNodeIterator<'a> { + data: IteratorData<'a>, +} + +impl<'a> Iterator for ParallelNodeIterator<'a> { + type Item = SeriesNodeIterator<'a>; + + fn next(&mut self) -> Option { + let (next_series_keys, next_series_specs) = + match self.data.specs.read_series_header().unwrap() { + None => { + return None; + } + Some(SPSeriesHeader::Complex { + key_count, + spec_count, + }) => (key_count, spec_count), + Some(SPSeriesHeader::Simple { key_count }) => (key_count, 0), + }; + + let next_series_data = self.data.split_at(next_series_keys, next_series_specs); + Some(SeriesNodeIterator { + data: next_series_data, + keys_to_next_spec: 0, + }) + } +} diff --git a/dice/dice/src/impls/dice.rs b/dice/dice/src/impls/dice.rs index 94d81ae8e9d90..f9c357fc2e90f 100644 --- a/dice/dice/src/impls/dice.rs +++ b/dice/dice/src/impls/dice.rs @@ -19,7 +19,6 @@ use crate::api::data::DiceData; use crate::api::user_data::UserComputationData; use crate::impls::core::state::init_state; use crate::impls::core::state::CoreStateHandle; -use crate::impls::core::state::StateRequest; use crate::impls::key_index::DiceKeyIndex; use crate::impls::transaction::TransactionUpdater; use crate::introspection::graph::GraphIntrospectable; @@ -80,23 +79,11 @@ impl DiceModern { } pub fn metrics(&self) -> Metrics { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.state_handle - .request(StateRequest::Metrics { resp: tx }); - - // Modern dice can just run on a blocking runtime and block waiting for the channel. - // This is safe since the processing dice thread is dedicated, and never awaits any other tasks. - tokio::task::block_in_place(|| rx.blocking_recv().unwrap()) + self.state_handle.metrics() } pub fn to_introspectable(&self) -> GraphIntrospectable { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.state_handle - .request(StateRequest::Introspection { resp: tx }); - - let (graph_introspectable, version_introspectable) = rx.blocking_recv().unwrap(); + let (graph_introspectable, version_introspectable) = self.state_handle.introspection(); // a bit subtle, but make sure we introspect the key_index after we get the graphs as // there may still be new keys added and running. A snapshot of `key_index` prior to // snapshotting the graphs will result in missing keys @@ -120,25 +107,16 @@ impl DiceModern { /// Wait until all active versions have exited. pub fn wait_for_idle(&self) -> impl Future + 'static { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.state_handle - .request(StateRequest::GetTasksPendingCancellation { resp: tx }); - + let rx = self.state_handle.get_tasks_pending_cancellation(); async move { - let tasks = rx.await.unwrap(); + let tasks = rx.await; futures::future::join_all(tasks).await; } } /// true when there are no tasks pending cancellation pub async fn is_idle(&self) -> bool { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.state_handle - .request(StateRequest::GetTasksPendingCancellation { resp: tx }); - - let tasks = rx.await.unwrap(); + let tasks = self.state_handle.get_tasks_pending_cancellation().await; tasks.iter().all(|task| task.is_terminated()) } @@ -148,7 +126,6 @@ impl DiceModern { pub(crate) mod testing { use dupe::Dupe; - use crate::impls::core::state::StateRequest; use crate::impls::ctx::SharedLiveTransactionCtx; use crate::impls::dice::DiceModern; use crate::impls::transaction::ActiveTransactionGuard; @@ -159,15 +136,8 @@ pub(crate) mod testing { &self, v: VersionNumber, ) -> (SharedLiveTransactionCtx, ActiveTransactionGuard) { - let (tx, rx) = tokio::sync::oneshot::channel(); - let guard = ActiveTransactionGuard::new(v, self.state_handle.dupe()); - self.state_handle.request(StateRequest::CtxAtVersion { - version: v, - guard, - resp: tx, - }); - rx.await.unwrap() + self.state_handle.ctx_at_version(v, guard).await } } } diff --git a/dice/dice/src/impls/evaluator.rs b/dice/dice/src/impls/evaluator.rs index 8170a550c0816..d1ca8161013f3 100644 --- a/dice/dice/src/impls/evaluator.rs +++ b/dice/dice/src/impls/evaluator.rs @@ -18,19 +18,19 @@ use crate::api::storage_type::StorageType; use crate::api::user_data::UserComputationData; use crate::ctx::DiceComputationsImpl; use crate::impls::ctx::ModernComputeCtx; -use crate::impls::ctx::PerComputeCtx; use crate::impls::ctx::SharedLiveTransactionCtx; +use crate::impls::deps::graph::SeriesParallelDeps; use crate::impls::dice::DiceModern; use crate::impls::key::DiceKey; use crate::impls::key::DiceKeyErased; use crate::impls::key::ParentKey; +use crate::impls::user_cycle::KeyComputingUserCycleDetectorData; use crate::impls::value::MaybeValidDiceValue; -use crate::impls::worker::state::ActivationInfo; -use crate::impls::worker::state::DiceWorkerStateComputing; +use crate::impls::value::TrackedInvalidationPaths; +use crate::impls::worker::state::DiceWorkerStateEvaluating; use crate::impls::worker::state::DiceWorkerStateFinishedEvaluating; use crate::result::CancellableResult; use crate::ActivationData; -use crate::HashSet; /// Evaluates Keys #[derive(Clone, Dupe, Allocative)] @@ -52,53 +52,36 @@ impl AsyncEvaluator { pub(crate) async fn evaluate<'a, 'b>( &self, key: DiceKey, - state: DiceWorkerStateComputing<'a, 'b>, + state: DiceWorkerStateEvaluating<'a, 'b>, + cycles: KeyComputingUserCycleDetectorData, ) -> CancellableResult> { let key_erased = self.dice.key_index.get(key); - let (cycles, state) = state.evaluating(); - match key_erased { DiceKeyErased::Key(key_dyn) => { - let mut new_ctx = DiceComputations(DiceComputationsImpl::Modern( - ModernComputeCtx::Regular(PerComputeCtx::new( + let mut new_ctx = + DiceComputations(DiceComputationsImpl::Modern(ModernComputeCtx::new( ParentKey::Some(key), // within this key's compute, this key is the parent - self.per_live_version_ctx.dupe(), - self.user_data.dupe(), - self.dice.dupe(), cycles, - )), - )); + self.dupe(), + ))); let value = key_dyn .compute(&mut new_ctx, &state.cancellation_ctx().into_compatible()) .await; - let ((deps, dep_validity), evaluation_data, cycles) = match new_ctx.0 { - DiceComputationsImpl::Legacy(_) => { - unreachable!("modern dice created above") - } - DiceComputationsImpl::Modern(new_ctx) => new_ctx - .into_regular() - .expect("created regular above") - .finalize(), + let (recorded_deps, evaluation_data, cycles) = match new_ctx.0 { + DiceComputationsImpl::Modern(new_ctx) => new_ctx.finalize(), }; - let activation = ActivationInfo::new( - &self.dice.key_index, - &self.user_data.activation_tracker, - key, - deps.iter(), - evaluation_data.into_activation_data(), // Projection keys can't set this. - ); - state.finished( cycles, KeyEvaluationResult { - value: MaybeValidDiceValue::new(value, dep_validity), - deps, + value: MaybeValidDiceValue::new(value, recorded_deps.deps_validity), + deps: recorded_deps.deps, storage: key_dyn.storage_type(), + invalidation_paths: recorded_deps.invalidation_paths, }, - activation, + evaluation_data.into_activation_data(), ) } DiceKeyErased::Projection(proj) => { @@ -119,22 +102,15 @@ impl AsyncEvaluator { let value = proj.proj().compute(base.value(), &ctx); - let activation = ActivationInfo::new( - &self.dice.key_index, - &self.user_data.activation_tracker, - key, - [proj.base()].iter(), - ActivationData::Evaluated(None), // Projection keys can't set this. - ); - state.finished( cycles, KeyEvaluationResult { value: MaybeValidDiceValue::new(value, base.value().validity()), - deps: [proj.base()].into_iter().collect(), + deps: SeriesParallelDeps::serial_from_vec(vec![proj.base()]), storage: proj.proj().storage_type(), + invalidation_paths: base.invalidation_paths().for_dependent(key), }, - activation, + ActivationData::Evaluated(None), // Projection keys can't set this. ) } } @@ -147,6 +123,7 @@ pub(crate) struct SyncEvaluator { user_data: Arc, dice: Arc, base: MaybeValidDiceValue, + base_invalidation_paths: TrackedInvalidationPaths, } impl SyncEvaluator { @@ -154,11 +131,13 @@ impl SyncEvaluator { user_data: Arc, dice: Arc, base: MaybeValidDiceValue, + base_invalidation_paths: TrackedInvalidationPaths, ) -> Self { Self { user_data, dice, base, + base_invalidation_paths, } } @@ -178,8 +157,9 @@ impl SyncEvaluator { KeyEvaluationResult { value: MaybeValidDiceValue::new(value, self.base.validity()), - deps: [proj.base()].into_iter().collect(), + deps: SeriesParallelDeps::serial_from_vec(vec![proj.base()]), storage: proj.proj().storage_type(), + invalidation_paths: self.base_invalidation_paths.for_dependent(key), } } } @@ -188,6 +168,7 @@ impl SyncEvaluator { pub(crate) struct KeyEvaluationResult { pub(crate) value: MaybeValidDiceValue, - pub(crate) deps: HashSet, + pub(crate) deps: SeriesParallelDeps, pub(crate) storage: StorageType, + pub(crate) invalidation_paths: TrackedInvalidationPaths, } diff --git a/dice/dice/src/impls/events.rs b/dice/dice/src/impls/events.rs index 2ba0861665223..d9554b7bbd269 100644 --- a/dice/dice/src/impls/events.rs +++ b/dice/dice/src/impls/events.rs @@ -52,4 +52,18 @@ impl DiceEventDispatcher { self.tracker .event(DiceEvent::CheckDepsFinished { key_type: desc }) } + + pub(crate) fn compute_started(&self, k: DiceKey) { + let desc = self.dice.key_index.get(k).key_type_name(); + + self.tracker + .event(DiceEvent::ComputeStarted { key_type: desc }) + } + + pub(crate) fn compute_finished(&self, k: DiceKey) { + let desc = self.dice.key_index.get(k).key_type_name(); + + self.tracker + .event(DiceEvent::ComputeFinished { key_type: desc }) + } } diff --git a/dice/dice/src/impls/hash.rs b/dice/dice/src/impls/hash.rs index 0fedb2457d27e..70b0deb44b2e2 100644 --- a/dice/dice/src/impls/hash.rs +++ b/dice/dice/src/impls/hash.rs @@ -12,10 +12,10 @@ use std::hash::Hash; use std::hash::Hasher; use std::mem; -use fnv::FnvHasher; +use fxhash::FxHasher; pub(crate) fn key_hash(key: &K) -> u64 { - let mut hasher = FnvHasher::default(); + let mut hasher = FxHasher::default(); if mem::size_of::() == 0 { // Hashing `TypeId` unconditionally measurably slows down hashing. TypeId::of::().hash(&mut hasher); diff --git a/dice/dice/src/impls/incremental.rs b/dice/dice/src/impls/incremental.rs new file mode 100644 index 0000000000000..534968763a920 --- /dev/null +++ b/dice/dice/src/impls/incremental.rs @@ -0,0 +1,371 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! The incrementality module of BUCK +//! +//! This is responsible for performing incremental caching and invalidations +//! with multiple versions in-flight at the same time. + +use std::borrow::Cow; +use std::fmt::Debug; +use std::future; + +use allocative::Allocative; +use dupe::Dupe; +use futures::stream::FuturesUnordered; +use futures::FutureExt; +use futures::StreamExt; +use tokio::sync::oneshot; + +use crate::api::activation_tracker::ActivationData; +use crate::arc::Arc; +use crate::impls::core::graph::history::CellHistory; +use crate::impls::core::graph::types::VersionedGraphKey; +use crate::impls::core::graph::types::VersionedGraphResult; +use crate::impls::core::state::CoreStateHandle; +use crate::impls::core::state::StateRequest; +use crate::impls::core::versions::VersionEpoch; +use crate::impls::deps::graph::SeriesParallelDeps; +use crate::impls::evaluator::AsyncEvaluator; +use crate::impls::evaluator::SyncEvaluator; +use crate::impls::events::DiceEventDispatcher; +use crate::impls::key::DiceKey; +use crate::impls::key::ParentKey; +use crate::impls::task::dice::DiceTask; +use crate::impls::task::promise::DicePromise; +use crate::impls::task::promise::DiceSyncResult; +use crate::impls::task::PreviouslyCancelledTask; +use crate::impls::user_cycle::UserCycleDetectorData; +use crate::impls::value::DiceComputedValue; +use crate::impls::worker::state::ActivationInfo; +use crate::impls::worker::state::DiceWorkerStateCheckingDeps; +use crate::impls::worker::state::DiceWorkerStateComputing; +use crate::impls::worker::state::DiceWorkerStateFinishedAndCached; +use crate::impls::worker::state::DiceWorkerStateLookupNode; +use crate::impls::worker::DiceTaskWorker; +use crate::result::CancellableResult; +use crate::result::Cancelled; +use crate::versions::VersionNumber; +use crate::versions::VersionRanges; + +#[cfg(test)] +mod tests; + +/// The incremental engine that manages all the handling of the results of a +/// specific key, performing the recomputation if necessary +/// +/// The computation of an identical request (same key and version) is +/// automatically deduplicated, so that identical requests share the same set of +/// work. It is guaranteed that there is at most one computation in flight at a +/// time if they share the same key and version. +#[derive(Allocative)] +pub(crate) struct IncrementalEngine { + state: CoreStateHandle, + pub(crate) version_epoch: VersionEpoch, +} + +impl Debug for IncrementalEngine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("IncrementalEngine").finish_non_exhaustive() + } +} + +impl IncrementalEngine { + fn new(state: CoreStateHandle, version_epoch: VersionEpoch) -> Self { + Self { + state, + version_epoch, + } + } + + pub(crate) fn spawn_for_key( + k: DiceKey, + version_epoch: VersionEpoch, + eval: AsyncEvaluator, + cycles: UserCycleDetectorData, + events_dispatcher: DiceEventDispatcher, + previously_cancelled_task: Option, + ) -> DiceTask { + let state_handle = eval.dice.state_handle.dupe(); + DiceTaskWorker::spawn( + k, + eval, + cycles, + events_dispatcher, + previously_cancelled_task, + IncrementalEngine::new(state_handle, version_epoch), + ) + } + + #[cfg_attr(debug_assertions, instrument( + level = "debug", + skip(state, promise, eval, event_dispatcher), + fields(k = ?k, version = %v), + ))] + pub(crate) fn project_for_key( + state: CoreStateHandle, + promise: DicePromise, + k: DiceKey, + v: VersionNumber, + version_epoch: VersionEpoch, + eval: SyncEvaluator, + event_dispatcher: DiceEventDispatcher, + ) -> CancellableResult { + promise.sync_get_or_complete(|| { + event_dispatcher.started(k); + + debug!(msg = "running projection"); + + let eval_result = eval.evaluate(k); + + debug!(msg = "projection finished. updating caches"); + + let (res, future) = { + // send the update but don't wait for it + let state_future = match eval_result.value.dupe().into_valid_value() { + Ok(value) => { + let (tx, rx) = oneshot::channel(); + state.request(StateRequest::UpdateComputed { + key: VersionedGraphKey::new(v, k), + epoch: version_epoch, + storage: eval_result.storage, + value, + deps: Arc::new(eval_result.deps), + resp: tx, + }); + + Some( + rx.map(|res| res.map_err(|_channel_drop| Cancelled).flatten()) + .boxed(), + ) + } + Err(_transient_result) => { + // transients are never stored in the state, but the result should be shared + // with async computations as if it were. + None + } + }; + + (eval_result.value, state_future) + }; + + debug!(msg = "update future completed"); + event_dispatcher.finished(k); + + let computed_value = DiceComputedValue::new(res, Arc::new(CellHistory::verified(v))); + let state_future = + future.unwrap_or_else(|| future::ready(Ok(computed_value.dupe())).boxed()); + + DiceSyncResult { + sync_result: computed_value, + state_future, + } + }) + } + + pub(crate) async fn eval_entry_versioned( + &self, + k: DiceKey, + eval: &AsyncEvaluator, + events_dispatcher: DiceEventDispatcher, + task_state: DiceWorkerStateLookupNode<'_, '_>, + ) -> CancellableResult { + let v = eval.per_live_version_ctx.get_version(); + let (tx, rx) = oneshot::channel(); + self.state.request(StateRequest::LookupKey { + key: VersionedGraphKey::new(v, k), + resp: tx, + }); + + let state_result = rx.await.unwrap(); + + match state_result { + VersionedGraphResult::Match(entry) => task_state.lookup_matches(entry), + VersionedGraphResult::Compute => { + self.compute(k, eval, &events_dispatcher, task_state.lookup_dirtied(eval)) + .await + } + + VersionedGraphResult::CheckDeps(mismatch) => { + let task_state = task_state.checking_deps(eval); + + let deps_changed = { + events_dispatcher.check_deps_started(k); + scopeguard::defer! { + events_dispatcher.check_deps_finished(k); + } + + self.compute_whether_dependencies_changed( + ParentKey::Some(k), // the computing of deps is triggered by this key as the parent + eval.dupe(), + &mismatch.verified_versions, + &mismatch.deps_to_validate, + &task_state, + ) + .await? + }; + + match deps_changed { + DidDepsChange::Changed | DidDepsChange::NoDeps => { + self.compute(k, eval, &events_dispatcher, task_state.deps_not_match()) + .await + } + DidDepsChange::NoChange => { + let task_state = task_state.deps_match(ActivationInfo::new( + &eval.dice.key_index, + &eval.user_data.activation_tracker, + k, + mismatch.deps_to_validate.iter_keys(), + ActivationData::Reused, + ))?; + + // report reuse + let (tx, rx) = oneshot::channel(); + self.state.request(StateRequest::UpdateMismatchAsUnchanged { + key: VersionedGraphKey::new(v, k), + epoch: self.version_epoch, + storage: eval.storage_type(k), + previous: mismatch, + resp: tx, + }); + + rx.await.unwrap().map(|r| task_state.cached(r)) + } + } + } + } + } + + async fn compute( + &self, + k: DiceKey, + eval: &AsyncEvaluator, + event_dispatcher: &DiceEventDispatcher, + task_state: DiceWorkerStateComputing<'_, '_>, + ) -> CancellableResult { + event_dispatcher.started(k); + scopeguard::defer! { + event_dispatcher.finished(k); + }; + + let v = eval.per_live_version_ctx.get_version(); + + // TODO(bobyf) these also make good locations where we want to perform instrumentation + debug!(msg = "running evaluator"); + + let eval_result_state = eval.evaluate(k, task_state).await?; + let eval_result = eval_result_state.result; + + let res = { + match eval_result.value.into_valid_value() { + Ok(value) => { + let (tx, rx) = oneshot::channel(); + self.state.request(StateRequest::UpdateComputed { + key: VersionedGraphKey::new(v, k), + epoch: self.version_epoch, + storage: eval_result.storage, + value, + deps: Arc::new(eval_result.deps), + resp: tx, + }); + + rx.await.unwrap() + } + Err(value) => Ok(DiceComputedValue::new( + value, + Arc::new(CellHistory::verified(v)), + )), + } + }; + + res.map(|res| eval_result_state.state.cached(res)) + } + + /// determines if the given 'Dependency' has changed between versions 'last_version' and + /// 'target_version' + #[cfg_attr(debug_assertions, instrument( + level = "debug", + skip(self, eval, deps, check_deps_state), + fields(version = %eval.per_live_version_ctx.get_version(), verified_versions = %verified_versions) + ))] + async fn compute_whether_dependencies_changed( + &self, + parent_key: ParentKey, + eval: AsyncEvaluator, + verified_versions: &VersionRanges, + deps: &SeriesParallelDeps, + check_deps_state: &DiceWorkerStateCheckingDeps<'_, '_>, + ) -> CancellableResult { + trace!(deps = ?deps); + + if deps.is_empty() { + return Ok(DidDepsChange::NoDeps); + } + + let mut fs: FuturesUnordered<_> = deps + .iter_keys() + .map(|dep| { + eval.per_live_version_ctx + .compute_opaque( + dep, + parent_key, + &eval, + check_deps_state.cycles_for_dep(dep, &eval), + ) + .map(|r| r.map(|v| v.history().get_verified_ranges())) + }) + .collect(); + + let mut verified_versions = Cow::Borrowed(verified_versions); + + while let Some(dep_result) = fs.next().await { + match dep_result { + Ok(dep_version_ranges) => { + verified_versions = + Cow::Owned(verified_versions.intersect(&dep_version_ranges)); + if verified_versions.is_empty() { + return Ok(DidDepsChange::Changed); + } + } + Err(Cancelled) => { + return Err(Cancelled); + } + } + } + + Ok(DidDepsChange::NoChange) + } +} + +enum DidDepsChange { + Changed, + NoChange, + NoDeps, +} + +#[cfg(test)] +pub(crate) mod testing { + + use crate::impls::incremental::DidDepsChange; + + pub(crate) trait DidDepsChangeExt { + fn is_changed(&self) -> bool; + } + + impl DidDepsChangeExt for DidDepsChange { + fn is_changed(&self) -> bool { + match self { + DidDepsChange::Changed => true, + DidDepsChange::NoChange => false, + DidDepsChange::NoDeps => false, + } + } + } +} diff --git a/dice/dice/src/impls/incremental/mod.rs b/dice/dice/src/impls/incremental/mod.rs deleted file mode 100644 index d75958898654b..0000000000000 --- a/dice/dice/src/impls/incremental/mod.rs +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! The incrementality module of BUCK -//! -//! This is responsible for performing incremental caching and invalidations -//! with multiple versions in-flight at the same time. -//! - -use std::borrow::Cow; -use std::fmt::Debug; -use std::future; - -use allocative::Allocative; -use dupe::Dupe; -use futures::stream::FuturesUnordered; -use futures::FutureExt; -use futures::StreamExt; -use tokio::sync::oneshot; - -use crate::api::activation_tracker::ActivationData; -use crate::arc::Arc; -use crate::impls::core::graph::history::CellHistory; -use crate::impls::core::graph::types::VersionedGraphKey; -use crate::impls::core::graph::types::VersionedGraphResult; -use crate::impls::core::state::CoreStateHandle; -use crate::impls::core::state::StateRequest; -use crate::impls::core::versions::VersionEpoch; -use crate::impls::evaluator::AsyncEvaluator; -use crate::impls::evaluator::SyncEvaluator; -use crate::impls::events::DiceEventDispatcher; -use crate::impls::key::DiceKey; -use crate::impls::key::ParentKey; -use crate::impls::task::dice::DiceTask; -use crate::impls::task::promise::DicePromise; -use crate::impls::task::promise::DiceSyncResult; -use crate::impls::task::PreviouslyCancelledTask; -use crate::impls::user_cycle::UserCycleDetectorData; -use crate::impls::value::DiceComputedValue; -use crate::impls::worker::state::ActivationInfo; -use crate::impls::worker::state::DiceWorkerStateCheckingDeps; -use crate::impls::worker::state::DiceWorkerStateComputing; -use crate::impls::worker::state::DiceWorkerStateFinishedAndCached; -use crate::impls::worker::state::DiceWorkerStateLookupNode; -use crate::impls::worker::DiceTaskWorker; -use crate::result::CancellableResult; -use crate::result::Cancelled; -use crate::versions::VersionNumber; -use crate::versions::VersionRanges; - -#[cfg(test)] -mod tests; - -/// The incremental engine that manages all the handling of the results of a -/// specific key, performing the recomputation if necessary -/// -/// The computation of an identical request (same key and version) is -/// automatically deduplicated, so that identical requests share the same set of -/// work. It is guaranteed that there is at most one computation in flight at a -/// time if they share the same key and version. -#[derive(Allocative)] -pub(crate) struct IncrementalEngine { - state: CoreStateHandle, - pub(crate) version_epoch: VersionEpoch, -} - -impl Debug for IncrementalEngine { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("IncrementalEngine").finish_non_exhaustive() - } -} - -impl IncrementalEngine { - fn new(state: CoreStateHandle, version_epoch: VersionEpoch) -> Self { - Self { - state, - version_epoch, - } - } - - pub(crate) fn spawn_for_key( - k: DiceKey, - version_epoch: VersionEpoch, - eval: AsyncEvaluator, - cycles: UserCycleDetectorData, - events_dispatcher: DiceEventDispatcher, - previously_cancelled_task: Option, - ) -> DiceTask { - let state_handle = eval.dice.state_handle.dupe(); - DiceTaskWorker::spawn( - k, - eval, - cycles, - events_dispatcher, - previously_cancelled_task, - IncrementalEngine::new(state_handle, version_epoch), - ) - } - - #[instrument( - level = "debug", - skip(state, promise, eval, event_dispatcher), - fields(k = ?k, version = %v), - )] - pub(crate) fn project_for_key( - state: CoreStateHandle, - promise: DicePromise, - k: DiceKey, - v: VersionNumber, - version_epoch: VersionEpoch, - eval: SyncEvaluator, - event_dispatcher: DiceEventDispatcher, - ) -> CancellableResult { - promise.sync_get_or_complete(|| { - event_dispatcher.started(k); - - debug!(msg = "running projection"); - - let eval_result = eval.evaluate(k); - - debug!(msg = "projection finished. updating caches"); - - let (res, future) = { - // send the update but don't wait for it - let state_future = match eval_result.value.dupe().into_valid_value() { - Ok(value) => { - let (tx, rx) = oneshot::channel(); - state.request(StateRequest::UpdateComputed { - key: VersionedGraphKey::new(v, k), - epoch: version_epoch, - storage: eval_result.storage, - value, - deps: Arc::new(eval_result.deps.into_iter().collect()), - resp: tx, - }); - - Some( - rx.map(|res| res.map_err(|_channel_drop| Cancelled).flatten()) - .boxed(), - ) - } - Err(_transient_result) => { - // transients are never stored in the state, but the result should be shared - // with async computations as if it were. - None - } - }; - - (eval_result.value, state_future) - }; - - debug!(msg = "update future completed"); - event_dispatcher.finished(k); - - let computed_value = DiceComputedValue::new(res, Arc::new(CellHistory::verified(v))); - let state_future = - future.unwrap_or_else(|| future::ready(Ok(computed_value.dupe())).boxed()); - - DiceSyncResult { - sync_result: computed_value, - state_future, - } - }) - } - - pub(crate) async fn eval_entry_versioned( - &self, - k: DiceKey, - eval: &AsyncEvaluator, - events_dispatcher: DiceEventDispatcher, - task_state: DiceWorkerStateLookupNode<'_, '_>, - ) -> CancellableResult { - let v = eval.per_live_version_ctx.get_version(); - let (tx, rx) = oneshot::channel(); - self.state.request(StateRequest::LookupKey { - key: VersionedGraphKey::new(v, k), - resp: tx, - }); - - let state_result = rx.await.unwrap(); - - match state_result { - VersionedGraphResult::Match(entry) => task_state.lookup_matches(entry), - VersionedGraphResult::Compute => { - self.compute(k, eval, &events_dispatcher, task_state.lookup_dirtied(eval)) - .await - } - - VersionedGraphResult::CheckDeps(mismatch) => { - let task_state = task_state.checking_deps(eval); - - let deps_changed = { - events_dispatcher.check_deps_started(k); - scopeguard::defer! { - events_dispatcher.check_deps_finished(k); - } - - self.compute_whether_dependencies_changed( - ParentKey::Some(k), // the computing of deps is triggered by this key as the parent - eval.dupe(), - &mismatch.verified_versions, - &mismatch.deps_to_validate, - &task_state, - ) - .await? - }; - - match deps_changed { - DidDepsChange::Changed | DidDepsChange::NoDeps => { - self.compute(k, eval, &events_dispatcher, task_state.deps_not_match()) - .await - } - DidDepsChange::NoChange => { - let task_state = task_state.deps_match(ActivationInfo::new( - &eval.dice.key_index, - &eval.user_data.activation_tracker, - k, - mismatch.deps_to_validate.iter(), - ActivationData::Reused, - ))?; - - // report reuse - let (tx, rx) = oneshot::channel(); - self.state.request(StateRequest::UpdateMismatchAsUnchanged { - key: VersionedGraphKey::new(v, k), - epoch: self.version_epoch, - storage: eval.storage_type(k), - previous: mismatch, - resp: tx, - }); - - rx.await.unwrap().map(|r| task_state.cached(r)) - } - } - } - } - } - - async fn compute( - &self, - k: DiceKey, - eval: &AsyncEvaluator, - event_dispatcher: &DiceEventDispatcher, - task_state: DiceWorkerStateComputing<'_, '_>, - ) -> CancellableResult { - event_dispatcher.started(k); - scopeguard::defer! { - event_dispatcher.finished(k); - }; - - let v = eval.per_live_version_ctx.get_version(); - - // TODO(bobyf) these also make good locations where we want to perform instrumentation - debug!(msg = "running evaluator"); - - let eval_result_state = eval.evaluate(k, task_state).await?; - let eval_result = eval_result_state.result; - - let res = { - match eval_result.value.into_valid_value() { - Ok(value) => { - let (tx, rx) = oneshot::channel(); - self.state.request(StateRequest::UpdateComputed { - key: VersionedGraphKey::new(v, k), - epoch: self.version_epoch, - storage: eval_result.storage, - value, - deps: Arc::new(eval_result.deps.into_iter().collect()), - resp: tx, - }); - - rx.await.unwrap() - } - Err(value) => Ok(DiceComputedValue::new( - value, - Arc::new(CellHistory::verified(v)), - )), - } - }; - - res.map(|res| eval_result_state.state.cached(res)) - } - - /// determines if the given 'Dependency' has changed between versions 'last_version' and - /// 'target_version' - #[instrument( - level = "debug", - skip(self, eval, deps, check_deps_state), - fields(version = %eval.per_live_version_ctx.get_version(), verified_versions = %verified_versions) - )] - async fn compute_whether_dependencies_changed( - &self, - parent_key: ParentKey, - eval: AsyncEvaluator, - verified_versions: &VersionRanges, - deps: &[DiceKey], - check_deps_state: &DiceWorkerStateCheckingDeps<'_, '_>, - ) -> CancellableResult { - trace!(deps = ?deps); - - if deps.is_empty() { - return Ok(DidDepsChange::NoDeps); - } - - let mut fs: FuturesUnordered<_> = deps - .iter() - .map(|dep| { - eval.per_live_version_ctx - .compute_opaque( - dep.dupe(), - parent_key, - &eval, - check_deps_state.cycles_for_dep(*dep, &eval), - ) - .map(|r| r.map(|v| v.history().get_verified_ranges())) - }) - .collect(); - - let mut verified_versions = Cow::Borrowed(verified_versions); - - while let Some(dep_result) = fs.next().await { - match dep_result { - Ok(dep_version_ranges) => { - verified_versions = - Cow::Owned(verified_versions.intersect(&dep_version_ranges)); - if verified_versions.is_empty() { - return Ok(DidDepsChange::Changed); - } - } - Err(Cancelled) => { - return Err(Cancelled); - } - } - } - - Ok(DidDepsChange::NoChange) - } -} - -enum DidDepsChange { - Changed, - NoChange, - NoDeps, -} - -#[cfg(test)] -pub(crate) mod testing { - - use crate::impls::incremental::DidDepsChange; - - pub(crate) trait DidDepsChangeExt { - fn is_changed(&self) -> bool; - } - - impl DidDepsChangeExt for DidDepsChange { - fn is_changed(&self) -> bool { - match self { - DidDepsChange::Changed => true, - DidDepsChange::NoChange => false, - DidDepsChange::NoDeps => false, - } - } - } -} diff --git a/dice/dice/src/impls/incremental/tests.rs b/dice/dice/src/impls/incremental/tests.rs deleted file mode 100644 index f818b91ade3c8..0000000000000 --- a/dice/dice/src/impls/incremental/tests.rs +++ /dev/null @@ -1,1142 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! The incrementality module of BUCK -//! -//! This is responsible for performing incremental caching and invalidations -//! with multiple versions in-flight at the same time. -//! - -use std::fmt::Debug; -use std::hash::Hash; -use std::hash::Hasher; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::time::Duration; - -use allocative::Allocative; -use assert_matches::assert_matches; -use async_trait::async_trait; -use derive_more::Display; -use dupe::Dupe; -use futures::pin_mut; -use more_futures::cancellation::CancellationContext; -use sorted_vector_map::sorted_vector_set; -use tokio::sync::Mutex; -use tokio::sync::Notify; - -use crate::api::computations::DiceComputations; -use crate::api::data::DiceData; -use crate::api::key::Key; -use crate::api::storage_type::StorageType; -use crate::api::user_data::NoOpTracker; -use crate::api::user_data::UserComputationData; -use crate::arc::Arc; -use crate::impls::core::graph::history::testing::CellHistoryExt; -use crate::impls::core::graph::history::CellHistory; -use crate::impls::core::graph::types::VersionedGraphKey; -use crate::impls::core::state::StateRequest; -use crate::impls::core::versions::VersionEpoch; -use crate::impls::ctx::SharedLiveTransactionCtx; -use crate::impls::dice::DiceModern; -use crate::impls::evaluator::AsyncEvaluator; -use crate::impls::events::DiceEventDispatcher; -use crate::impls::incremental::testing::DidDepsChangeExt; -use crate::impls::incremental::IncrementalEngine; -use crate::impls::key::DiceKey; -use crate::impls::key::ParentKey; -use crate::impls::task::handle::DiceTaskHandle; -use crate::impls::task::PreviouslyCancelledTask; -use crate::impls::transaction::ActiveTransactionGuard; -use crate::impls::transaction::ChangeType; -use crate::impls::user_cycle::UserCycleDetectorData; -use crate::impls::value::DiceComputedValue; -use crate::impls::value::DiceKeyValue; -use crate::impls::value::DiceValidValue; -use crate::impls::value::MaybeValidDiceValue; -use crate::impls::worker::state::DiceWorkerStateCheckingDeps; -use crate::result::CancellableResult; -use crate::versions::testing::VersionRangesExt; -use crate::versions::VersionNumber; -use crate::versions::VersionRange; -use crate::versions::VersionRanges; - -#[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] -struct K; - -#[async_trait] -impl Key for K { - type Value = usize; - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - unimplemented!("test") - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } -} - -#[derive(Allocative, Clone, Debug, Display)] -#[display(fmt = "{:?}", self)] -struct IsRan(Arc); - -#[async_trait] -impl Key for IsRan { - type Value = (); - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - self.0.store(true, Ordering::SeqCst); - } - - fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { - false - } -} - -impl PartialEq for IsRan { - fn eq(&self, _other: &Self) -> bool { - true - } -} -impl Eq for IsRan {} -impl Hash for IsRan { - fn hash(&self, _state: &mut H) {} -} - -#[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] -struct Finish; - -#[async_trait] -impl Key for Finish { - type Value = (); - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - true - } -} - -#[tokio::test] -async fn test_detecting_changed_dependencies() -> anyhow::Result<()> { - let dice = DiceModern::new(DiceData::new()); - let engine = IncrementalEngine::new(dice.state_handle.dupe(), VersionEpoch::testing_new(0)); - - let user_data = std::sync::Arc::new(UserComputationData::new()); - - let (ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(1)).await; - ctx.inject( - DiceKey { index: 100 }, - DiceComputedValue::new( - MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::testing_new(&[VersionNumber::new(1)], &[])), - ), - ); - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let mut task_handle = DiceTaskHandle::testing_new(); - - assert!( - engine - .compute_whether_dependencies_changed( - ParentKey::None, - eval.dupe(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(1) - )]), - &[DiceKey { index: 100 }], - &DiceWorkerStateCheckingDeps::testing(&mut task_handle) - ) - .await? - .is_changed() - ); - - let (ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(2)).await; - ctx.inject( - DiceKey { index: 100 }, - DiceComputedValue::new( - MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::testing_new(&[VersionNumber::new(1)], &[])), - ), - ); - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - assert!( - !engine - .compute_whether_dependencies_changed( - ParentKey::None, - eval.dupe(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(2) - )]), - &[DiceKey { index: 100 }], - &DiceWorkerStateCheckingDeps::testing(&mut task_handle) - ) - .await? - .is_changed() - ); - - // Now we also check that when deps have transients and such. - // for legacy, this would deal with cycles, but modern dice will detect cycles through post - // processing and rely on the user cycle detector for now (which returns errors via the result. - let (ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(2)).await; - ctx.inject( - DiceKey { index: 200 }, - DiceComputedValue::new( - MaybeValidDiceValue::transient(std::sync::Arc::new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::testing_new(&[VersionNumber::new(2)], &[])), - ), - ); - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - assert!( - engine - .compute_whether_dependencies_changed( - ParentKey::None, - eval.dupe(), - &VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(2) - )]), - &[DiceKey { index: 200 }], - &DiceWorkerStateCheckingDeps::testing(&mut task_handle) - ) - .await? - .is_changed() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_values_gets_reevaluated_when_deps_change() -> anyhow::Result<()> { - let dice = DiceModern::new(DiceData::new()); - - let user_data = std::sync::Arc::new(UserComputationData::new()); - let events = DiceEventDispatcher::new(user_data.tracker.dupe(), dice.dupe()); - - let is_ran = Arc::new(AtomicBool::new(false)); - let key = dice.key_index.index_key(IsRan(is_ran.dupe())); - - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::CtxAtVersion { - version: VersionNumber::new(0), - guard: ActiveTransactionGuard::new(VersionNumber::new(0), dice.state_handle.dupe()), - resp: tx, - }); - let (ctx, guard) = rx.await.unwrap(); - - // set the initial state - let (tx, _rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateComputed { - key: VersionedGraphKey::new(VersionNumber::new(0), DiceKey { index: 100 }), - epoch: ctx.testing_get_epoch(), - storage: StorageType::LastN(1), - value: DiceValidValue::testing_new(DiceKeyValue::::new(1)), - deps: Arc::new(vec![]), - resp: tx, - }); - let (tx, _rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateComputed { - key: VersionedGraphKey::new(VersionNumber::new(0), key.dupe()), - epoch: ctx.testing_get_epoch(), - storage: StorageType::LastN(1), - value: DiceValidValue::testing_new(DiceKeyValue::::new(())), - deps: Arc::new(vec![DiceKey { index: 100 }]), - resp: tx, - }); - - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateState { - changes: vec![(key.dupe(), ChangeType::TestingSoftDirty)], - resp: tx, - }); - let v = rx.await.unwrap(); - drop(guard); - drop(ctx); - - let (ctx, _guard) = dice.testing_shared_ctx(v).await; - ctx.inject( - DiceKey { index: 100 }, - DiceComputedValue::new( - MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::verified(VersionNumber::new(0))), - ), - ); - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let task = IncrementalEngine::spawn_for_key( - key.dupe(), - ctx.testing_get_epoch(), - eval.dupe(), - UserCycleDetectorData::testing_new(), - events.dupe(), - None, - ); - let res = task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await?; - assert_eq!( - res.history().get_verified_ranges(), - VersionRanges::testing_new(sorted_vector_set![VersionRange::begins_with( - VersionNumber::new(0) - )]) - ); - assert!(!is_ran.load(Ordering::SeqCst)); - - // next version - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateState { - changes: vec![(key.dupe(), ChangeType::TestingSoftDirty)], - resp: tx, - }); - let v = rx.await.unwrap(); - - let (ctx, _guard) = dice.testing_shared_ctx(v).await; - ctx.inject( - DiceKey { index: 100 }, - DiceComputedValue::new( - MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::verified(v)), - ), - ); - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let task = IncrementalEngine::spawn_for_key( - key.dupe(), - ctx.testing_get_epoch(), - eval.dupe(), - UserCycleDetectorData::testing_new(), - events.dupe(), - None, - ); - let res = task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await?; - assert_eq!( - res.history().get_verified_ranges(), - VersionRanges::testing_new(sorted_vector_set![VersionRange::begins_with(v)]) - ); - assert_eq!(is_ran.load(Ordering::SeqCst), true); - is_ran.store(false, Ordering::SeqCst); - - // now force the dependency to have version numbers [1, 2] - // also force dirty the root node so we actually check its deps since the above would - // short circuit dirtying due to the dep value actually being equal. - - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateState { - changes: vec![(key.dupe(), ChangeType::TestingSoftDirty)], - resp: tx, - }); - let new_v = rx.await.unwrap(); - - let (ctx, _guard) = dice.testing_shared_ctx(v).await; - - // TODO(nga): `inject` violates `SharedCache` invariant: - // value computed should not be downgraded to not computed. - if true { - return Ok(()); - } - - ctx.inject( - DiceKey { index: 100 }, - DiceComputedValue::new( - MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::testing_new(&[v, new_v], &[])), - ), - ); - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let task = IncrementalEngine::spawn_for_key( - key.dupe(), - ctx.testing_get_epoch(), - eval.dupe(), - UserCycleDetectorData::testing_new(), - events.dupe(), - None, - ); - let res = task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await?; - assert_eq!( - res.history().get_verified_ranges(), - VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded(v, new_v)]) - ); - assert_eq!(is_ran.load(Ordering::SeqCst), false); - - Ok(()) -} - -#[tokio::test] -async fn when_equal_return_same_instance() -> anyhow::Result<()> { - let dice = DiceModern::new(DiceData::new()); - - let user_data = std::sync::Arc::new(UserComputationData::new()); - let events = DiceEventDispatcher::new(user_data.tracker.dupe(), dice.dupe()); - - let instance = Arc::new(AtomicUsize::new(0)); - - #[derive(Clone, Dupe, Allocative)] - struct InstanceEqual { - instance_count: usize, - } - - impl PartialEq for InstanceEqual { - fn eq(&self, _other: &Self) -> bool { - true - } - } - - #[derive(Allocative, Clone, Debug, Display)] - #[display(fmt = "{:?}", self)] - struct InstanceEqualKey(Arc); - - #[async_trait] - impl Key for InstanceEqualKey { - type Value = InstanceEqual; - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - InstanceEqual { - instance_count: self.0.fetch_add(1, Ordering::SeqCst), - } - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - } - impl PartialEq for InstanceEqualKey { - fn eq(&self, _other: &Self) -> bool { - true - } - } - impl Eq for InstanceEqualKey {} - impl Hash for InstanceEqualKey { - fn hash(&self, _state: &mut H) {} - } - - let key = dice.key_index.index_key(InstanceEqualKey(instance.dupe())); - - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateState { - changes: vec![], - resp: tx, - }); - let v = rx.await.unwrap(); - - let (ctx, _guard) = dice.testing_shared_ctx(v).await; - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let task = IncrementalEngine::spawn_for_key( - key.dupe(), - ctx.testing_get_epoch(), - eval.dupe(), - UserCycleDetectorData::testing_new(), - events.dupe(), - None, - ); - let res = task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await?; - - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateState { - changes: vec![(key.dupe(), ChangeType::Invalidate)], - resp: tx, - }); - let v = rx.await.unwrap(); - - let (ctx, _guard) = dice.testing_shared_ctx(v).await; - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let task = IncrementalEngine::spawn_for_key( - key.dupe(), - ctx.testing_get_epoch(), - eval.dupe(), - UserCycleDetectorData::testing_new(), - events.dupe(), - None, - ); - let res2 = task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await?; - - // verify that we incremented the total instance counter - assert_eq!(instance.load(Ordering::SeqCst), 2); - - assert_eq!( - res.history().get_verified_ranges(), - VersionRanges::testing_new( - sorted_vector_set! { VersionRange::begins_with(VersionNumber::new(0))} - ) - ); - - // verify that the instance we return and store is the same as the original instance - assert_eq!( - res.value() - .downcast_maybe_transient::() - .unwrap() - .instance_count, - res2.value() - .downcast_maybe_transient::() - .unwrap() - .instance_count - ); - - Ok(()) -} - -#[tokio::test] -async fn spawn_with_no_previously_cancelled_task() { - let dice = DiceModern::new(DiceData::new()); - - let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; - - let is_ran = Arc::new(AtomicBool::new(false)); - let k = dice.key_index.index_key(IsRan(is_ran.dupe())); - - let extra = std::sync::Arc::new(UserComputationData::new()); - let eval = AsyncEvaluator { - per_live_version_ctx: shared_ctx.dupe(), - user_data: extra.dupe(), - dice: dice.dupe(), - }; - let cycles = UserCycleDetectorData::testing_new(); - let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); - let previously_cancelled_task = None; - - let task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval, - cycles, - events_dispatcher, - previously_cancelled_task, - ); - - assert!( - task.depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await - .is_ok() - ); - - assert!(is_ran.load(Ordering::SeqCst)); -} - -#[tokio::test] -async fn spawn_with_previously_cancelled_task_that_cancelled() { - let dice = DiceModern::new(DiceData::new()); - - let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; - - let extra = std::sync::Arc::new(UserComputationData::new()); - let eval = AsyncEvaluator { - per_live_version_ctx: shared_ctx.dupe(), - user_data: extra.dupe(), - dice: dice.dupe(), - }; - let cycles = UserCycleDetectorData::testing_new(); - let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); - - #[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] - struct CancellableNeverFinish; - - #[async_trait] - impl Key for CancellableNeverFinish { - type Value = (); - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - futures::future::pending().await - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - unreachable!("test") - } - } - - let k = dice.key_index.index_key(CancellableNeverFinish); - let previous_task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval.dupe(), - cycles, - events_dispatcher.dupe(), - None, - ); - - previous_task.cancel(); - - let previously_cancelled_task = Some(PreviouslyCancelledTask { - previous: previous_task, - }); - - let is_ran = Arc::new(AtomicBool::new(false)); - let k = dice.key_index.index_key(IsRan(is_ran.dupe())); - let cycles = UserCycleDetectorData::testing_new(); - let task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval, - cycles, - events_dispatcher, - previously_cancelled_task, - ); - - assert!( - task.depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await - .is_ok() - ); - - assert!(is_ran.load(Ordering::SeqCst)); -} - -#[tokio::test] -async fn spawn_with_previously_cancelled_task_that_finished() { - let dice = DiceModern::new(DiceData::new()); - - let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; - - let extra = std::sync::Arc::new(UserComputationData::new()); - let eval = AsyncEvaluator { - per_live_version_ctx: shared_ctx.dupe(), - user_data: extra.dupe(), - dice: dice.dupe(), - }; - let cycles = UserCycleDetectorData::testing_new(); - let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); - - #[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] - struct Finish; - - #[async_trait] - impl Key for Finish { - type Value = (); - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - true - } - } - - let k = dice.key_index.index_key(Finish); - let previous_task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval.dupe(), - cycles, - events_dispatcher.dupe(), - None, - ); - // wait for it to finish then trigger cancel - previous_task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await - .unwrap(); - previous_task.cancel(); - - let previously_cancelled_task = Some(PreviouslyCancelledTask { - previous: previous_task, - }); - - let is_ran = Arc::new(AtomicBool::new(false)); - let k = dice.key_index.index_key(IsRan(is_ran.dupe())); - let cycles = UserCycleDetectorData::testing_new(); - let task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval, - cycles, - events_dispatcher, - previously_cancelled_task, - ); - - assert!( - task.depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await - .is_ok() - ); - - assert!(!is_ran.load(Ordering::SeqCst)); -} - -#[tokio::test] -async fn mismatch_epoch_results_in_cancelled_result() { - let dice = DiceModern::new(DiceData::new()); - - let (shared_ctx, guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; - - let extra = std::sync::Arc::new(UserComputationData::new()); - let eval = AsyncEvaluator { - per_live_version_ctx: shared_ctx.dupe(), - user_data: extra.dupe(), - dice: dice.dupe(), - }; - let cycles = UserCycleDetectorData::testing_new(); - let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); - - // trigger dice to delete and update the epoch - drop(guard); - - let k = dice.key_index.index_key(Finish); - let task = IncrementalEngine::spawn_for_key( - k, - shared_ctx.testing_get_epoch(), - eval.dupe(), - cycles, - events_dispatcher.dupe(), - None, - ); - // wait for it to finish then trigger cancel - assert_matches!( - task.depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await, - Err(_) => {} - ); -} - -#[tokio::test] -async fn spawn_with_previously_cancelled_task_nested_cancelled() -> anyhow::Result<()> { - #[derive(Allocative, Clone, Debug, Display)] - #[display(fmt = "{:?}", self)] - #[allocative(skip)] - struct DontRunTwice { - is_started: Arc, - exclusive: Arc>, - prevent_cancel: Arc, - } - - impl PartialEq for DontRunTwice { - fn eq(&self, _other: &Self) -> bool { - true - } - } - impl Eq for DontRunTwice {} - impl Hash for DontRunTwice { - fn hash(&self, _state: &mut H) {} - } - - #[async_trait] - impl Key for DontRunTwice { - type Value = (); - - async fn compute( - &self, - _ctx: &mut DiceComputations, - cancellations: &CancellationContext, - ) -> Self::Value { - let mut guard = self - .exclusive - .try_lock() - .expect("Can only have one concurrent execution"); - - if *guard { - // Last attempt, return. - } else { - // Note that we did our first execution. Keep the lock held. The point of the - // test is to prove that nobody will get to run before we exit and drop it. - *guard = true; - - cancellations - .with_structured_cancellation(|obs| async move { - // Resume the rest of the code. - self.is_started.notify_one(); - // Wait for our cancellation. - obs.await; - - // Yield. If the final evaluation is ready (that would be a bug!), it will - // run now. - tokio::task::yield_now().await; - self.prevent_cancel.notified().await; - }) - .await; - - // Never return, but this bit will be the one that's cancelled. - futures::future::pending().await - } - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - } - - let dice = DiceModern::new(DiceData::new()); - - let exclusive = Arc::new(Mutex::new(false)); - let is_started = Arc::new(Notify::new()); - let prevent_cancel = Arc::new(Notify::new()); - - let key = DontRunTwice { - exclusive, - is_started: is_started.dupe(), - prevent_cancel: prevent_cancel.dupe(), - }; - - let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; - - let k = dice.key_index.index_key(key); - - let extra = std::sync::Arc::new(UserComputationData::new()); - let eval = AsyncEvaluator { - per_live_version_ctx: shared_ctx.dupe(), - user_data: extra.dupe(), - dice: dice.dupe(), - }; - let cycles = UserCycleDetectorData::testing_new(); - let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); - - let first_task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval.dupe(), - cycles, - events_dispatcher.dupe(), - None, - ); - is_started.notified().await; - first_task.cancel(); - - let cycles = UserCycleDetectorData::testing_new(); - let second_task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval.dupe(), - cycles, - events_dispatcher.dupe(), - Some(PreviouslyCancelledTask { - previous: first_task, - }), - ); - - second_task.cancel(); - - let cycles = UserCycleDetectorData::testing_new(); - let third_task = IncrementalEngine::spawn_for_key( - k, - VersionEpoch::testing_new(0), - eval, - cycles, - events_dispatcher, - Some(PreviouslyCancelledTask { - previous: second_task, - }), - ); - - let promise = third_task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap(); - - pin_mut!(promise); - - // if we poll before we allow cancellation, we shouldn't complete - // tokio doesn't always guarantee the yields switches between tasks so this makes the test - // slightly more resilient to scheduling - let res = tokio::time::timeout(Duration::from_secs(5), &mut promise).await; - - assert!(res.is_err()); - - prevent_cancel.notify_one(); - let _ignored = promise.await?; - - Ok(()) -} - -#[tokio::test] -async fn test_values_gets_resurrect_if_deps_dont_change_regardless_of_equality() --> anyhow::Result<()> { - #[derive(Allocative, Clone, Debug, Display)] - #[display(fmt = "{:?}", self)] - struct NeverEqual; - - #[async_trait] - impl Key for NeverEqual { - type Value = Arc<()>; - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - panic!("never ran as deps equal") - } - - fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { - false - } - } - - impl PartialEq for NeverEqual { - fn eq(&self, _other: &Self) -> bool { - true - } - } - impl Eq for NeverEqual {} - impl Hash for NeverEqual { - fn hash(&self, _state: &mut H) {} - } - - /// creates the initial test graph with a single key that depends on a value - async fn populate_initial_graph( - dice: &std::sync::Arc, - compute_key: DiceKey, - compute_res: DiceValidValue, - ) { - let (ctx, _guard) = get_ctx_at_version(dice, VersionNumber::new(0)).await; - - // set the initial state - let _ignore = update_computed_value( - dice, - &ctx, - DiceKey { index: 100 }, - VersionNumber::new(0), - DiceValidValue::testing_new(DiceKeyValue::::new(1)), - Arc::new(vec![]), - ); - let _ignore = update_computed_value( - dice, - &ctx, - compute_key.dupe(), - VersionNumber::new(0), - compute_res.dupe(), - Arc::new(vec![DiceKey { index: 100 }]), - ); - } - - /// gets a new context where the parent is dirtied such that it needs to check its deps, and the - /// dep has a history as provided - async fn ctx_with_dep_having_history( - dice: &std::sync::Arc, - parent_key: DiceKey, - dep_history: CellHistory, - ) -> (SharedLiveTransactionCtx, ActiveTransactionGuard) { - let v = soft_dirty(dice, parent_key.dupe()).await; - let (ctx, guard) = dice.testing_shared_ctx(v).await; - ctx.inject( - DiceKey { index: 100 }, - DiceComputedValue::new( - MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(dep_history), - ), - ); - - (ctx, guard) - } - - let dice = DiceModern::new(DiceData::new()); - - let user_data = std::sync::Arc::new(UserComputationData::new()); - let events = DiceEventDispatcher::new(user_data.tracker.dupe(), dice.dupe()); - - let res = DiceValidValue::testing_new(DiceKeyValue::::new(Arc::new(()))); - let key = dice.key_index.index_key(NeverEqual); - - populate_initial_graph(&dice, key.dupe(), res.dupe()).await; - - let (ctx, _guard) = ctx_with_dep_having_history( - &dice, - key.dupe(), - CellHistory::verified(VersionNumber::new(0)), - ) - .await; - - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let task = IncrementalEngine::spawn_for_key( - key.dupe(), - ctx.testing_get_epoch(), - eval.dupe(), - UserCycleDetectorData::testing_new(), - events.dupe(), - None, - ); - let computed_res = task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await?; - assert_eq!( - computed_res.history().get_verified_ranges(), - VersionRanges::testing_new(sorted_vector_set![VersionRange::begins_with( - VersionNumber::new(0) - )]) - ); - assert!(computed_res.value().instance_equal(&res)); - - // next version - let (ctx, _guard) = ctx_with_dep_having_history( - &dice, - key.dupe(), - CellHistory::verified(VersionNumber::new(0)), - ) - .await; - - let eval = AsyncEvaluator { - per_live_version_ctx: ctx.dupe(), - user_data: user_data.dupe(), - dice: dice.dupe(), - }; - - let task = IncrementalEngine::spawn_for_key( - key.dupe(), - ctx.testing_get_epoch(), - eval.dupe(), - UserCycleDetectorData::testing_new(), - events.dupe(), - None, - ); - let computed_res = task - .depended_on_by(ParentKey::None) - .not_cancelled() - .unwrap() - .await?; - assert_eq!( - computed_res.history().get_verified_ranges(), - VersionRanges::testing_new(sorted_vector_set![VersionRange::begins_with( - VersionNumber::new(0) - )]) - ); - assert!(computed_res.value().instance_equal(&res)); - - Ok(()) -} - -async fn soft_dirty(dice: &std::sync::Arc, key: DiceKey) -> VersionNumber { - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateState { - changes: vec![(key.dupe(), ChangeType::TestingSoftDirty)], - resp: tx, - }); - rx.await.unwrap() -} - -fn update_computed_value( - dice: &std::sync::Arc, - ctx: &SharedLiveTransactionCtx, - k: DiceKey, - v: VersionNumber, - value: DiceValidValue, - deps: Arc>, -) -> tokio::sync::oneshot::Receiver> { - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::UpdateComputed { - key: VersionedGraphKey::new(v, k), - epoch: ctx.testing_get_epoch(), - storage: StorageType::LastN(1), - value, - deps, - resp: tx, - }); - - rx -} - -async fn get_ctx_at_version( - dice: &std::sync::Arc, - v: VersionNumber, -) -> (SharedLiveTransactionCtx, ActiveTransactionGuard) { - let (tx, rx) = tokio::sync::oneshot::channel(); - dice.state_handle.request(StateRequest::CtxAtVersion { - version: VersionNumber::new(0), - guard: ActiveTransactionGuard::new(v, dice.state_handle.dupe()), - resp: tx, - }); - let (ctx, guard) = rx.await.unwrap(); - (ctx, guard) -} diff --git a/dice/dice/src/impls/key.rs b/dice/dice/src/impls/key.rs index 0eae0759ee172..f96827205aea7 100644 --- a/dice/dice/src/impls/key.rs +++ b/dice/dice/src/impls/key.rs @@ -8,7 +8,6 @@ */ use std::any::Any; -use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; use std::hash::Hasher; @@ -16,13 +15,15 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use cmp_any::PartialEqAny; use derive_more::Display; use dupe::Dupe; -use fnv::FnvHasher; -use more_futures::cancellation::CancellationContext; +use fxhash::FxHasher; use crate::api::computations::DiceComputations; +use crate::api::demand::DemandRef; +use crate::api::demand::DemandValue; use crate::api::key::Key; use crate::api::projection::DiceProjectionComputations; use crate::api::projection::ProjectionKey; @@ -33,6 +34,7 @@ use crate::impls::value::DiceKeyValue; use crate::impls::value::DiceProjectValue; use crate::impls::value::DiceValueDyn; use crate::impls::value::MaybeValidDiceValue; +use crate::Demand; /// Type erased internal dice key #[derive( @@ -92,6 +94,26 @@ impl DiceKeyErased { } } + pub(crate) fn request_value(&self) -> Option { + let mut demand_impl = DemandValue { value: None }; + let mut demand = Demand::new(&mut demand_impl); + match self { + DiceKeyErased::Key(k) => k.provide(&mut demand), + DiceKeyErased::Projection(..) => {} + } + demand_impl.value + } + + pub(crate) fn request_ref(&self) -> Option<&T> { + let mut demand_impl = DemandRef { value: None }; + let mut demand = Demand::new(&mut demand_impl); + match self { + DiceKeyErased::Key(k) => k.provide(&mut demand), + DiceKeyErased::Projection(..) => {} + } + demand_impl.value + } + pub(crate) fn downcast(self) -> Option> { match self { DiceKeyErased::Key(k) => { @@ -111,7 +133,7 @@ impl DiceKeyErased { } } - pub(crate) fn as_ref<'a>(&'a self) -> DiceKeyErasedRef<'a> { + pub(crate) fn as_ref(&self) -> DiceKeyErasedRef<'_> { match self { DiceKeyErased::Key(k) => DiceKeyErasedRef::Key(&**k), DiceKeyErased::Projection(proj) => { @@ -271,6 +293,8 @@ pub(crate) trait DiceKeyDyn: Allocative + Display + Send + Sync + 'static { fn key_type_name(&self) -> &'static str; fn storage_type(&self) -> StorageType; + + fn provide<'a>(&'a self, demand: &mut Demand<'a>); } #[async_trait] @@ -310,6 +334,10 @@ where fn storage_type(&self) -> StorageType { K::storage_type() } + + fn provide<'a>(&'a self, demand: &mut Demand<'a>) { + K::provide(self, demand) + } } pub(crate) trait DiceProjectionDyn: Allocative + Display + Send + Sync + 'static { @@ -391,7 +419,7 @@ impl ProjectionWithBase { } fn hash(&self) -> u64 { - let mut hasher = FnvHasher::default(); + let mut hasher = FxHasher::default(); self.base.hash(&mut hasher); self.proj.hash().hash(&mut hasher); @@ -429,7 +457,7 @@ impl<'a> ProjectionWithBaseRef<'a> { } fn hash(&self) -> u64 { - let mut hasher = FnvHasher::default(); + let mut hasher = FxHasher::default(); self.base.hash(&mut hasher); self.proj.hash().hash(&mut hasher); @@ -530,9 +558,9 @@ mod tests { use std::sync::Arc; use allocative::Allocative; + use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; - use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::key::Key; diff --git a/dice/dice/src/impls/key_index.rs b/dice/dice/src/impls/key_index.rs index fc70b60631e27..89c1334719dae 100644 --- a/dice/dice/src/impls/key_index.rs +++ b/dice/dice/src/impls/key_index.rs @@ -215,9 +215,9 @@ mod tests { use allocative::Allocative; use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; - use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::key::Key; diff --git a/dice/dice/src/impls/mod.rs b/dice/dice/src/impls/mod.rs deleted file mode 100644 index 2fd9fa4b07db0..0000000000000 --- a/dice/dice/src/impls/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub(crate) mod cache; -pub(crate) mod core; -pub(crate) mod ctx; -mod dep_trackers; -pub(crate) mod dice; -pub(crate) mod evaluator; -pub(crate) mod events; -mod hash; -pub(crate) mod incremental; -pub(crate) mod key; -mod key_index; -pub(crate) mod opaque; -pub(crate) mod task; -#[cfg(test)] -mod tests; -pub(crate) mod transaction; -pub(crate) mod user_cycle; -pub(crate) mod value; -pub(crate) mod worker; diff --git a/dice/dice/src/impls/opaque.rs b/dice/dice/src/impls/opaque.rs index 0e7437aa916c6..237273d5957bc 100644 --- a/dice/dice/src/impls/opaque.rs +++ b/dice/dice/src/impls/opaque.rs @@ -10,65 +10,38 @@ use std::marker::PhantomData; use derivative::Derivative; -use dupe::Dupe; -use crate::api::error::DiceResult; use crate::api::key::Key; -use crate::api::projection::ProjectionKey; -use crate::impls::ctx::ModernComputeCtx; use crate::impls::key::DiceKey; use crate::impls::value::MaybeValidDiceValue; +use crate::impls::value::TrackedInvalidationPaths; #[derive(Derivative)] #[derivative(Debug)] -pub(crate) struct OpaqueValueModern<'a, K: Key> { - derive_from_key: DiceKey, +pub(crate) struct OpaqueValueModern { + pub(crate) derive_from_key: DiceKey, #[derivative(Debug = "ignore")] - derive_from: MaybeValidDiceValue, - #[derivative(Debug = "ignore")] - parent_computation: &'a ModernComputeCtx, + pub(crate) derive_from: MaybeValidDiceValue, + pub(crate) invalidation_paths: TrackedInvalidationPaths, ty: PhantomData, } -impl<'a, K> OpaqueValueModern<'a, K> +impl OpaqueValueModern where K: Key, { pub(crate) fn new( - parent_computation: &'a ModernComputeCtx, derive_from_key: DiceKey, derive_from: MaybeValidDiceValue, + invalidation_paths: TrackedInvalidationPaths, ) -> Self { Self { derive_from_key, derive_from, - parent_computation, + invalidation_paths, ty: Default::default(), } } - - pub(crate) fn projection

    (&self, projection_key: &P) -> DiceResult - where - P: ProjectionKey, - { - self.parent_computation.project( - projection_key, - self.derive_from_key, - self.derive_from.dupe(), - ) - } - - /// Get a value and record parent computation dependency on `K`. - pub(crate) fn into_value(self) -> K::Value { - self.parent_computation - .dep_trackers() - .record(self.derive_from_key, self.derive_from.validity()); - - self.derive_from - .downcast_maybe_transient::() - .expect("type mismatch") - .dupe() - } } #[cfg(test)] @@ -77,18 +50,19 @@ mod tests { use allocative::Allocative; use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; use derive_more::Display; - use more_futures::cancellation::CancellationContext; use crate::api::data::DiceData; use crate::api::key::Key; - use crate::impls::dep_trackers::testing::RecordingDepsTrackersExt; + use crate::impls::deps::testing::RecordingDepsTrackersExt; use crate::impls::dice::DiceModern; use crate::impls::key::DiceKey; use crate::impls::opaque::OpaqueValueModern; use crate::impls::value::DiceKeyValue; use crate::impls::value::DiceValidity; use crate::impls::value::MaybeValidDiceValue; + use crate::impls::value::TrackedInvalidationPaths; use crate::DiceComputations; use crate::HashSet; @@ -116,21 +90,21 @@ mod tests { async fn opaque_records_deps_when_used() { let dice = DiceModern::new(DiceData::new()); - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; let opaque = OpaqueValueModern::::new( - &ctx, DiceKey { index: 0 }, MaybeValidDiceValue::new(Arc::new(DiceKeyValue::::new(1)), DiceValidity::Valid), + TrackedInvalidationPaths::clean(), ); - assert_eq!(ctx.dep_trackers().recorded_deps(), &HashSet::default()); + assert_eq!(ctx.dep_trackers().recorded_deps(), HashSet::default()); - assert_eq!(opaque.into_value(), 1); + assert_eq!(ctx.opaque_into_value(opaque), 1); assert_eq!( ctx.dep_trackers().recorded_deps(), - &[DiceKey { index: 0 }].into_iter().collect() + [DiceKey { index: 0 }].into_iter().collect() ); } } diff --git a/dice/dice/src/impls/task.rs b/dice/dice/src/impls/task.rs new file mode 100644 index 0000000000000..857fe1b7985bd --- /dev/null +++ b/dice/dice/src/impls/task.rs @@ -0,0 +1,78 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::any::Any; + +use buck2_futures::owning_future::OwningFuture; +use buck2_futures::spawn::spawn_cancellable; +use buck2_futures::spawn::FutureAndCancellationHandle; +use buck2_futures::spawner::Spawner; +use dupe::Dupe; +use futures::future::BoxFuture; +use futures::FutureExt; + +use crate::impls::key::DiceKey; +use crate::impls::task::dice::Cancellations; +use crate::impls::task::dice::DiceTask; +use crate::impls::task::dice::DiceTaskInternal; +use crate::impls::task::handle::DiceTaskHandle; + +pub(crate) mod dice; +pub(crate) mod handle; +pub(crate) mod promise; +mod state; + +#[cfg(test)] +mod tests; + +pub(crate) fn spawn_dice_task( + key: DiceKey, + spawner: &dyn Spawner, + ctx: &S, + f: impl for<'a, 'b> FnOnce(&'a mut DiceTaskHandle<'b>) -> BoxFuture<'a, Box> + Send, +) -> DiceTask { + let internal = DiceTaskInternal::new(key); + + // since the spawn is alive until cancelled via the handle, we can drop the spawn future itself + let FutureAndCancellationHandle { + cancellation_handle, + .. + } = spawn_cancellable( + { + let internal = internal.dupe(); + |cancellations| { + let handle = DiceTaskHandle::new(internal, cancellations); + + OwningFuture::new(handle, f).boxed() + } + }, + spawner, + ctx, + ); + + DiceTask { + internal, + cancellations: Cancellations::new(cancellation_handle), + } +} + +/// Unsafe as this creates a Task that must be completed explicitly otherwise polling will never +/// complete. +pub(crate) unsafe fn sync_dice_task(key: DiceKey) -> DiceTask { + let internal = DiceTaskInternal::new(key); + + DiceTask { + internal, + cancellations: Cancellations::not_cancellable(), + } +} + +pub(crate) struct PreviouslyCancelledTask { + pub(crate) previous: DiceTask, +} diff --git a/dice/dice/src/impls/task/dice.rs b/dice/dice/src/impls/task/dice.rs index d09567906d167..98809b8f75774 100644 --- a/dice/dice/src/impls/task/dice.rs +++ b/dice/dice/src/impls/task/dice.rs @@ -17,11 +17,11 @@ use std::task::Poll; use allocative::Allocative; use allocative::Visitor; +use buck2_futures::cancellation::future::CancellationHandle; use dupe::Dupe; use dupe::OptionDupedExt; use futures::task::AtomicWaker; use futures::FutureExt; -use more_futures::cancellation::future::CancellationHandle; use parking_lot::Mutex; use parking_lot::MutexGuard; use parking_lot::RwLock; @@ -35,7 +35,8 @@ use crate::impls::task::promise::DicePromise; use crate::impls::task::state::AtomicDiceTaskState; use crate::impls::value::DiceComputedValue; use crate::result::CancellableResult; -use crate::result::Cancelled; +use crate::result::CancellationReason; +use crate::GlobalStats; /// /// 'DiceTask' is approximately a copy of Shared and Weak from std, but with some custom special @@ -353,7 +354,7 @@ impl DiceTaskInternal { /// report the task as terminated. This should only be called once. No effect if called affect /// task is already ready - pub(super) fn report_terminated(&self) { + pub(super) fn report_terminated(&self, reason: CancellationReason) { match self.state.sync() { TaskState::Continue => {} TaskState::Finished => { @@ -365,7 +366,7 @@ impl DiceTaskInternal { // SAFETY: no tasks read the value unless state is converted to `READY` &mut *self.maybe_value.get() } - .replace(Err(Cancelled)) + .replace(Err(reason)) .is_some(); assert!( !prev_exist, @@ -415,6 +416,7 @@ impl Cancellations { } pub(super) fn cancel(&self, _lock: &MutexGuard) { + GlobalStats::record_cancellation(); if let Some(internal) = self.internal.as_ref() { take_mut::take( unsafe { diff --git a/dice/dice/src/impls/task/handle.rs b/dice/dice/src/impls/task/handle.rs index 25c5f15ebde00..212ae9eb4dbf4 100644 --- a/dice/dice/src/impls/task/handle.rs +++ b/dice/dice/src/impls/task/handle.rs @@ -9,12 +9,13 @@ //! Handle to the DiceTask as seen by the thread responsible for completing the task +use buck2_futures::cancellation::ExplicitCancellationContext; use dupe::Dupe; -use more_futures::cancellation::ExplicitCancellationContext; use crate::arc::Arc; use crate::impls::task::dice::DiceTaskInternal; use crate::impls::value::DiceComputedValue; +use crate::result::CancellationReason; /// The handle to the 'DiceTask' owned by the spawned thread that is responsible for completing /// the task. @@ -74,19 +75,6 @@ impl<'a> DiceTaskHandle<'a> { pub(crate) fn cancellation_ctx(&self) -> &'a ExplicitCancellationContext { self.cancellations } - - #[cfg(test)] - pub(crate) fn testing_new() -> DiceTaskHandle<'static> { - let internal = DiceTaskInternal::new(crate::impls::key::DiceKey { index: 99999 }); - DiceTaskHandle::<'static> { - internal: internal.dupe(), - cancellations: ExplicitCancellationContext::testing(), - completion_handle: TaskCompletionHandle { - internal, - result: None, - }, - } - } } impl TaskCompletionHandle { @@ -107,7 +95,8 @@ impl Drop for TaskCompletionHandle { // This is only owned by the main worker task. If this was dropped, and no result was // ever recorded, then we must have been terminated. - self.internal.report_terminated() + self.internal + .report_terminated(CancellationReason::NoResult) } } } diff --git a/dice/dice/src/impls/task/mod.rs b/dice/dice/src/impls/task/mod.rs deleted file mode 100644 index 9e6972c024422..0000000000000 --- a/dice/dice/src/impls/task/mod.rs +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::any::Any; - -use dupe::Dupe; -use futures::future::BoxFuture; -use futures::FutureExt; -use more_futures::owning_future::OwningFuture; -use more_futures::spawn::spawn_cancellable; -use more_futures::spawn::FutureAndCancellationHandle; -use more_futures::spawner::Spawner; - -use crate::impls::key::DiceKey; -use crate::impls::task::dice::Cancellations; -use crate::impls::task::dice::DiceTask; -use crate::impls::task::dice::DiceTaskInternal; -use crate::impls::task::handle::DiceTaskHandle; - -pub(crate) mod dice; -pub(crate) mod handle; -pub(crate) mod promise; -mod state; - -#[cfg(test)] -mod tests; - -pub(crate) fn spawn_dice_task( - key: DiceKey, - spawner: &dyn Spawner, - ctx: &S, - f: impl for<'a, 'b> FnOnce(&'a mut DiceTaskHandle<'b>) -> BoxFuture<'a, Box> + Send, -) -> DiceTask { - let internal = DiceTaskInternal::new(key); - - // since the spawn is alive until cancelled via the handle, we can drop the spawn future itself - let FutureAndCancellationHandle { - cancellation_handle, - .. - } = spawn_cancellable( - { - let internal = internal.dupe(); - |cancellations| { - let handle = DiceTaskHandle::new(internal, cancellations); - - OwningFuture::new(handle, f).boxed() - } - }, - spawner, - ctx, - ); - - DiceTask { - internal, - cancellations: Cancellations::new(cancellation_handle), - } -} - -/// Unsafe as this creates a Task that must be completed explicitly otherwise polling will never -/// complete. -pub(crate) unsafe fn sync_dice_task(key: DiceKey) -> DiceTask { - let internal = DiceTaskInternal::new(key); - - DiceTask { - internal, - cancellations: Cancellations::not_cancellable(), - } -} - -pub(crate) struct PreviouslyCancelledTask { - pub(crate) previous: DiceTask, -} diff --git a/dice/dice/src/impls/task/promise.rs b/dice/dice/src/impls/task/promise.rs index a33a2fdd9fbe7..b5b005f5a6e6a 100644 --- a/dice/dice/src/impls/task/promise.rs +++ b/dice/dice/src/impls/task/promise.rs @@ -26,7 +26,6 @@ use crate::impls::task::dice::SlabId; use crate::impls::task::handle::TaskState; use crate::impls::value::DiceComputedValue; use crate::result::CancellableResult; -use crate::result::Cancelled; /// A string reference to a 'DiceTask' that is pollable as a future. /// This is only awoken when the result is ready, as none of the pollers are responsible for @@ -157,9 +156,9 @@ impl DicePromise { // setting the result let _ignore = internals.set_value(result); } - Err(Cancelled) => { + Err(reason) => { // if its cancelled, report cancelled - internals.report_terminated(); + internals.report_terminated(reason); } } diff --git a/dice/dice/src/impls/task/tests.rs b/dice/dice/src/impls/task/tests.rs index a3b707d2e9480..e0c99d2ff1855 100644 --- a/dice/dice/src/impls/task/tests.rs +++ b/dice/dice/src/impls/task/tests.rs @@ -8,19 +8,20 @@ */ use std::any::Any; +use std::hash::Hash; use std::sync::atomic::Ordering; use std::task::Poll; use allocative::Allocative; use assert_matches::assert_matches; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use buck2_futures::spawner::TokioSpawner; use derive_more::Display; use dupe::Dupe; use futures::pin_mut; use futures::poll; use futures::FutureExt; -use more_futures::cancellation::CancellationContext; -use more_futures::spawner::TokioSpawner; use tokio::sync::oneshot; use tokio::sync::Barrier; use tokio::sync::Mutex; @@ -30,7 +31,6 @@ use tokio::sync::Semaphore; use crate::api::computations::DiceComputations; use crate::api::key::Key; use crate::arc::Arc; -use crate::impls::core::graph::history::CellHistory; use crate::impls::key::DiceKey; use crate::impls::key::ParentKey; use crate::impls::task::dice::MaybeCancelled; @@ -41,7 +41,9 @@ use crate::impls::value::DiceComputedValue; use crate::impls::value::DiceKeyValue; use crate::impls::value::DiceValidValue; use crate::impls::value::MaybeValidDiceValue; -use crate::result::Cancelled; +use crate::impls::value::TrackedInvalidationPaths; +use crate::result::CancellationReason; +use crate::versions::VersionRanges; #[derive(Allocative, Clone, Debug, Display, Eq, PartialEq, Hash)] struct K; @@ -77,7 +79,8 @@ async fn simple_task() -> anyhow::Result<()> { handle.finished(DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(2))), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )); Box::new(()) as Box @@ -137,7 +140,8 @@ async fn not_ready_until_dropped() -> anyhow::Result<()> { // wait for the lock too handle.finished(DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )); sent_finish.notify_one(); @@ -227,7 +231,8 @@ async fn multiple_promises_all_completes() -> anyhow::Result<()> { // wait for the lock too handle.finished(DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(2))), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )); Box::new(()) as Box @@ -313,7 +318,8 @@ async fn sync_complete_task_completes_promises() -> anyhow::Result<()> { .unwrap() .sync_get_or_complete(|| DiceSyncResult::testing(DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(2))), - Arc::new(CellHistory::empty()) + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )))? .value() .equality(&DiceValidValue::testing_new(DiceKeyValue::::new(2))) @@ -357,11 +363,13 @@ async fn sync_complete_task_with_future() -> anyhow::Result<()> { let v_sync = DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(2))), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), ); let v_async = DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(99))), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), ); let (tx, rx) = oneshot::channel(); @@ -372,7 +380,10 @@ async fn sync_complete_task_with_future() -> anyhow::Result<()> { .sync_get_or_complete(|| DiceSyncResult { sync_result: v_sync, state_future: rx - .map(|res| { res.map_err(|_| Cancelled).flatten() }) + .map(|res| { + res.map_err(|_| CancellationReason::TransactionCancelled) + .flatten() + }) .boxed(), })? .value() @@ -473,7 +484,8 @@ async fn sync_complete_task_wakes_waiters() -> anyhow::Result<()> { .unwrap() .sync_get_or_complete(|| DiceSyncResult::testing(DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::empty()) + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )))? .value() .equality(&DiceValidValue::testing_new(DiceKeyValue::::new(1))) @@ -512,7 +524,8 @@ async fn sync_complete_unfinished_spawned_task() -> anyhow::Result<()> { MaybeValidDiceValue::valid(DiceValidValue::testing_new( DiceKeyValue::::new(2), )), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )); Box::new(()) as Box @@ -532,7 +545,8 @@ async fn sync_complete_unfinished_spawned_task() -> anyhow::Result<()> { .unwrap() .sync_get_or_complete(|| DiceSyncResult::testing(DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::empty()) + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )))? .value() .equality(&DiceValidValue::testing_new(DiceKeyValue::::new(1))) @@ -575,7 +589,8 @@ async fn sync_complete_finished_spawned_task() -> anyhow::Result<()> { MaybeValidDiceValue::valid(DiceValidValue::testing_new( DiceKeyValue::::new(2), )), - Arc::new(CellHistory::empty()), + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )); sem.add_permits(1); @@ -600,7 +615,8 @@ async fn sync_complete_finished_spawned_task() -> anyhow::Result<()> { .unwrap() .sync_get_or_complete(|| DiceSyncResult::testing(DiceComputedValue::new( MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), - Arc::new(CellHistory::empty()) + Arc::new(VersionRanges::new()), + TrackedInvalidationPaths::clean(), )))? .value() .equality(&DiceValidValue::testing_new(DiceKeyValue::::new(2))) diff --git a/dice/dice/src/impls/tests/mod.rs b/dice/dice/src/impls/tests.rs similarity index 100% rename from dice/dice/src/impls/tests/mod.rs rename to dice/dice/src/impls/tests.rs diff --git a/dice/dice/src/impls/tests/activation_tracker.rs b/dice/dice/src/impls/tests/activation_tracker.rs index 744f3b87fec11..30c3cc071f301 100644 --- a/dice/dice/src/impls/tests/activation_tracker.rs +++ b/dice/dice/src/impls/tests/activation_tracker.rs @@ -7,15 +7,14 @@ * of this source tree. */ -use std::any::Any; use std::sync::Arc; use std::sync::Mutex; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::cycles::DetectCycles; @@ -25,6 +24,7 @@ use crate::api::user_data::UserComputationData; use crate::ActivationData; use crate::ActivationTracker; use crate::DiceDataBuilder; +use crate::DynKey; use crate::InjectedKey; #[derive(Default, Allocative)] @@ -44,8 +44,8 @@ impl Tracker { impl ActivationTracker for Tracker { fn key_activated( &self, - key: &dyn Any, - deps: &mut dyn Iterator, + key: &DynKey, + deps: &mut dyn Iterator, activation_data: ActivationData, ) { let (data, reused) = match activation_data { @@ -54,8 +54,8 @@ impl ActivationTracker for Tracker { }; self.state.lock().unwrap().push(( - Kind::from_any(key), - deps.into_iter().map(Kind::from_any).collect(), + Kind::from_dyn_key(key), + deps.into_iter().map(Kind::from_dyn_key).collect(), data, reused, )); @@ -70,20 +70,20 @@ enum Kind { } impl Kind { - fn from_any(key: &dyn Any) -> Self { - if key.is::() { + fn from_dyn_key(key: &DynKey) -> Self { + if key.downcast_ref::().is_some() { return Self::Injected; } - if key.is::() { + if key.downcast_ref::().is_some() { return Self::Stage0; } - if key.is::() { + if key.downcast_ref::().is_some() { return Self::Stage1; } - panic!("Unexpected key: {:?}", key) + panic!("Unexpected key: {}", key) } } @@ -91,7 +91,7 @@ impl Kind { struct Data; #[derive(Clone, Dupe, Debug, Display, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Injected; #[async_trait] @@ -104,7 +104,7 @@ impl InjectedKey for Injected { } #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Stage0; #[async_trait] @@ -126,7 +126,7 @@ impl Key for Stage0 { } #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Stage1; #[async_trait] @@ -161,7 +161,7 @@ async fn test_events_impl(builder: DiceDataBuilder) -> anyhow::Result<()> { let mut updater = dice.updater_with_data(data); updater.changed_to(vec![(Injected, 123)])?; - let transaction = updater.commit().await; + let mut transaction = updater.commit().await; transaction.compute(&Stage1).await?; assert_eq!( @@ -185,7 +185,7 @@ async fn test_events_impl(builder: DiceDataBuilder) -> anyhow::Result<()> { let mut updater = dice.updater_with_data(data); updater.changed_to(vec![(Injected, 456)])?; - let transaction = updater.commit().await; + let mut transaction = updater.commit().await; transaction.compute(&Stage1).await?; assert_eq!( diff --git a/dice/dice/src/impls/tests/demo.rs b/dice/dice/src/impls/tests/demo.rs index 6e0ba8463e0cd..5fd38d7176e06 100644 --- a/dice/dice/src/impls/tests/demo.rs +++ b/dice/dice/src/impls/tests/demo.rs @@ -18,9 +18,9 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use tempfile::NamedTempFile; use crate::api::computations::DiceComputations; @@ -37,7 +37,7 @@ enum Encoding { } #[derive(Clone, Dupe, Debug, Display, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct EncodingConfig(); impl InjectedKey for EncodingConfig { @@ -48,9 +48,9 @@ impl InjectedKey for EncodingConfig { } } -struct Encodings<'c>(&'c mut DiceComputations); +struct Encodings<'c, 'd>(&'c mut DiceComputations<'d>); -impl<'c> Encodings<'c> { +impl<'c, 'd> Encodings<'c, 'd> { async fn get(&mut self) -> Result> { self.0 .compute(&EncodingConfig()) @@ -59,12 +59,12 @@ impl<'c> Encodings<'c> { } } -trait HasEncodings { - fn encodings(&mut self) -> Encodings; +trait HasEncodings<'d> { + fn encodings<'c>(&'c mut self) -> Encodings<'c, 'd>; } -impl HasEncodings for DiceComputations { - fn encodings(&mut self) -> Encodings { +impl<'d> HasEncodings<'d> for DiceComputations<'d> { + fn encodings<'c>(&'c mut self) -> Encodings<'c, 'd> { Encodings(self) } } @@ -79,40 +79,40 @@ impl SetEncodings for DiceTransactionUpdater { } } -struct Filesystem<'c>(&'c mut DiceComputations); +struct Filesystem<'c, 'd>(&'c mut DiceComputations<'d>); #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "File({})", "_0.display()")] +#[display("File({})", _0.display())] struct File(PathBuf); -impl<'c> Filesystem<'c> { - async fn read_file(&mut self, file: &Path) -> Result, Arc> { - #[async_trait] - impl Key for File { - type Value = Result, Arc>; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - let encoding = ctx.encodings().get().await?; - - let s = fs::read_to_string(&self.0).unwrap(); - - Ok(Arc::new(match encoding { - Encoding::Utf8 => s, - Encoding::Ascii => s.replace(":-)", "smile"), - })) - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } +#[async_trait] +impl Key for File { + type Value = Result, Arc>; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let encoding = ctx.encodings().get().await?; + + let s = fs::read_to_string(&self.0).unwrap(); + + Ok(Arc::new(match encoding { + Encoding::Utf8 => s, + Encoding::Ascii => s.replace(":-)", "smile"), + })) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, } + } +} +impl<'c, 'd> Filesystem<'c, 'd> { + async fn read_file(&mut self, file: &Path) -> Result, Arc> { self.0 .compute(&File(file.to_path_buf())) .await @@ -120,12 +120,12 @@ impl<'c> Filesystem<'c> { } } -trait HasFilesystem<'c> { - fn filesystem(&'c mut self) -> Filesystem<'c>; +trait HasFilesystem<'c, 'd> { + fn filesystem(&'c mut self) -> Filesystem<'c, 'd>; } -impl<'c> HasFilesystem<'c> for DiceComputations { - fn filesystem(&'c mut self) -> Filesystem<'c> { +impl<'c, 'd> HasFilesystem<'c, 'd> for DiceComputations<'d> { + fn filesystem(&'c mut self) -> Filesystem<'c, 'd> { Filesystem(self) } } diff --git a/dice/dice/src/impls/tests/events.rs b/dice/dice/src/impls/tests/events.rs index 2ef2af3429f7e..4e4e73efaa35a 100644 --- a/dice/dice/src/impls/tests/events.rs +++ b/dice/dice/src/impls/tests/events.rs @@ -12,9 +12,9 @@ use std::sync::Mutex; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::cycles::DetectCycles; @@ -38,7 +38,7 @@ impl DiceEventListener for Tracker { } #[derive(Clone, Dupe, Debug, Display, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Injected; #[async_trait] @@ -51,7 +51,7 @@ impl InjectedKey for Injected { } #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Stage0; #[async_trait] @@ -72,7 +72,7 @@ impl Key for Stage0 { } #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Stage1; #[async_trait] @@ -106,15 +106,19 @@ async fn test_events_impl(builder: DiceDataBuilder) -> anyhow::Result<()> { let mut updater = dice.updater_with_data(data); updater.changed_to(vec![(Injected, 123)])?; - let transaction = updater.commit().await; + let mut transaction = updater.commit().await; transaction.compute(&Stage1).await?; assert_eq!( &*tracker.state.lock().unwrap(), &[ DiceEvent::Started { key_type: "Stage1" }, + DiceEvent::ComputeStarted { key_type: "Stage1" }, DiceEvent::Started { key_type: "Stage0" }, + DiceEvent::ComputeStarted { key_type: "Stage0" }, + DiceEvent::ComputeFinished { key_type: "Stage0" }, DiceEvent::Finished { key_type: "Stage0" }, + DiceEvent::ComputeFinished { key_type: "Stage1" }, DiceEvent::Finished { key_type: "Stage1" }, ] ); @@ -132,18 +136,22 @@ async fn test_events_impl(builder: DiceDataBuilder) -> anyhow::Result<()> { let mut updater = dice.updater_with_data(data); updater.changed_to(vec![(Injected, 456)])?; - let transaction = updater.commit().await; + let mut transaction = updater.commit().await; transaction.compute(&Stage1).await?; assert_eq!( &*tracker.state.lock().unwrap(), &[ + DiceEvent::Started { key_type: "Stage1" }, DiceEvent::CheckDepsStarted { key_type: "Stage1" }, + DiceEvent::Started { key_type: "Stage0" }, DiceEvent::CheckDepsStarted { key_type: "Stage0" }, DiceEvent::CheckDepsFinished { key_type: "Stage0" }, - DiceEvent::Started { key_type: "Stage0" }, + DiceEvent::ComputeStarted { key_type: "Stage0" }, + DiceEvent::ComputeFinished { key_type: "Stage0" }, DiceEvent::Finished { key_type: "Stage0" }, - DiceEvent::CheckDepsFinished { key_type: "Stage1" } + DiceEvent::CheckDepsFinished { key_type: "Stage1" }, + DiceEvent::Finished { key_type: "Stage1" }, ] ); } diff --git a/dice/dice/src/impls/tests/general.rs b/dice/dice/src/impls/tests/general.rs index da2f261eabc40..b85b35ecd88f9 100644 --- a/dice/dice/src/impls/tests/general.rs +++ b/dice/dice/src/impls/tests/general.rs @@ -16,11 +16,11 @@ use std::sync::Mutex; use allocative::Allocative; use assert_matches::assert_matches; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derivative::Derivative; use derive_more::Display; use dupe::Dupe; use futures::FutureExt; -use more_futures::cancellation::CancellationContext; use tokio::sync::oneshot; use crate::api::computations::DiceComputations; @@ -33,11 +33,12 @@ use crate::impls::dice::DiceModern; use crate::versions::VersionNumber; use crate::Dice; use crate::DiceData; +use crate::DynKey; use crate::UserCycleDetector; use crate::UserCycleDetectorGuard; #[derive(Clone, Dupe, Debug, Display, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Foo(i32); #[async_trait] @@ -51,7 +52,7 @@ impl InjectedKey for Foo { #[derive(Clone, Dupe, Debug, Derivative, Allocative, Display)] #[derivative(PartialEq, Eq, Hash)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] #[allocative(skip)] struct KeyThatRuns { #[derivative(Hash = "ignore", PartialEq = "ignore")] @@ -90,7 +91,7 @@ async fn set_injected_multiple_times_per_commit() -> anyhow::Result<()> { ctx.changed_to(vec![(Foo(0), 0)])?; ctx.changed_to(vec![(Foo(1), 1)])?; - let ctx = ctx.commit().await; + let mut ctx = ctx.commit().await; assert_eq!(ctx.compute(&Foo(0)).await?, 0); assert_eq!(ctx.compute(&Foo(1)).await?, 1); } @@ -133,7 +134,7 @@ async fn set_injected_with_no_change_no_new_ctx() -> anyhow::Result<()> { } #[derive(Clone, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct K(i32); #[async_trait] @@ -176,7 +177,7 @@ fn dice_computations_are_parallel() { #[derive(Clone, Debug, Display, Derivative, Allocative)] #[derivative(Hash, PartialEq, Eq)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct Blocking { index: usize, #[derivative(PartialEq = "ignore", Hash = "ignore")] @@ -237,7 +238,7 @@ async fn different_data_per_compute_ctx() { struct U(usize); #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct DataRequest(u8); #[async_trait] impl Key for DataRequest { @@ -268,9 +269,9 @@ async fn different_data_per_compute_ctx() { d }; - let ctx0 = dice.updater_with_data(per_cmd_data0).commit().await; + let mut ctx0 = dice.updater_with_data(per_cmd_data0).commit().await; - let ctx1 = dice.updater_with_data(per_cmd_data1).commit().await; + let mut ctx1 = dice.updater_with_data(per_cmd_data1).commit().await; let request0 = ctx0.compute(&DataRequest(0)); let request1 = ctx1.compute(&DataRequest(1)); @@ -312,7 +313,7 @@ fn invalid_update() { } #[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Fib(u8); #[async_trait] @@ -331,11 +332,11 @@ impl Key for Fib { return Ok(self.0 as u64); } let (a, b) = { - let (a, b) = ctx.compute2( + ctx.compute2( |ctx| ctx.compute(&Fib(self.0 - 2)).boxed(), |ctx| ctx.compute(&Fib(self.0 - 1)).boxed(), - ); - futures::future::join(a, b).await + ) + .await }; match (a, b) { (Ok(a), Ok(b)) => Ok(a? + b?), @@ -369,22 +370,19 @@ struct CycleDetectorGuard { } impl UserCycleDetector for CycleDetector { - fn start_computing_key( - &self, - key: &dyn std::any::Any, - ) -> Option> { + fn start_computing_key(&self, key: &DynKey) -> Option> { let f = key.downcast_ref::().unwrap(); self.events .lock() .unwrap() .push(CycleDetectorEvents::Start(*f)); - Some(Box::new(CycleDetectorGuard { + Some(Arc::new(CycleDetectorGuard { key: *f, events: self.events.dupe(), })) } - fn finished_computing_key(&self, key: &dyn std::any::Any) { + fn finished_computing_key(&self, key: &DynKey) { let f = key.downcast_ref::().unwrap(); self.events .lock() @@ -394,7 +392,7 @@ impl UserCycleDetector for CycleDetector { } impl UserCycleDetectorGuard for CycleDetectorGuard { - fn add_edge(&self, key: &dyn std::any::Any) { + fn add_edge(&self, key: &DynKey) { let f = key.downcast_ref::().unwrap(); self.events .lock() @@ -402,10 +400,6 @@ impl UserCycleDetectorGuard for CycleDetectorGuard { .push(CycleDetectorEvents::Edge(self.key, *f)) } - fn as_any(&self) -> &dyn std::any::Any { - self - } - fn type_name(&self) -> &'static str { std::any::type_name::() } @@ -424,7 +418,7 @@ fn user_cycle_detector_receives_events() -> anyhow::Result<()> { })), ..Default::default() }; - let ctx = dice.updater_with_data(user_data).commit().await; + let mut ctx = dice.updater_with_data(user_data).commit().await; let res = ctx.compute(&Fib(20)).await?.expect("should succeed"); assert_eq!(res, 6765); @@ -490,7 +484,7 @@ async fn dropping_request_future_cancels_execution() { #[derive(Clone, Dupe, Debug, Derivative, Allocative, Display)] #[derivative(PartialEq, Eq, Hash)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] #[allocative(skip)] struct KeyThatShouldntRun { #[derivative(Hash = "ignore", PartialEq = "ignore")] @@ -531,7 +525,7 @@ async fn dropping_request_future_cancels_execution() { let dice = DiceModern::builder().build(DetectCycles::Disabled); - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; let key = KeyThatShouldntRun { barrier1: barrier1.dupe(), @@ -565,7 +559,7 @@ async fn dropping_request_future_doesnt_cancel_if_multiple_requests_active() { let barrier2 = Arc::new(tokio::sync::Semaphore::new(0)); let is_ran = Arc::new(AtomicBool::new(false)); - let key = KeyThatRuns { + let key = &KeyThatRuns { barrier1: barrier1.dupe(), barrier2: barrier2.dupe(), is_ran: is_ran.dupe(), @@ -573,9 +567,11 @@ async fn dropping_request_future_doesnt_cancel_if_multiple_requests_active() { let dice = DiceModern::builder().build(DetectCycles::Disabled); - let ctx = dice.updater().commit().await; - let req1 = ctx.compute(&key); - let req2 = ctx.compute(&key); + let mut ctx = dice.updater().commit().await; + let (req1, req2) = ctx.compute2( + |ctx| ctx.compute(key).boxed(), + |ctx| ctx.compute(key).boxed(), + ); // ensure that the key starts computing let _b = barrier1.acquire().await; @@ -602,7 +598,7 @@ async fn user_cycle_detector_is_present_modern() -> anyhow::Result<()> { async fn user_cycle_detector_is_present(dice: Arc) -> anyhow::Result<()> { #[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct AccessCycleGuardKey; #[async_trait] @@ -632,22 +628,15 @@ async fn user_cycle_detector_is_present(dice: Arc) -> anyhow::Result<()> { struct AccessCycleDetectorGuard; impl UserCycleDetector for AccessCycleDetector { - fn start_computing_key( - &self, - _key: &dyn std::any::Any, - ) -> Option> { - Some(Box::new(AccessCycleDetectorGuard)) + fn start_computing_key(&self, _key: &DynKey) -> Option> { + Some(Arc::new(AccessCycleDetectorGuard)) } - fn finished_computing_key(&self, _key: &dyn std::any::Any) {} + fn finished_computing_key(&self, _key: &DynKey) {} } impl UserCycleDetectorGuard for AccessCycleDetectorGuard { - fn add_edge(&self, _key: &dyn std::any::Any) {} - - fn as_any(&self) -> &dyn std::any::Any { - self - } + fn add_edge(&self, _key: &DynKey) {} fn type_name(&self) -> &'static str { std::any::type_name::() @@ -658,7 +647,7 @@ async fn user_cycle_detector_is_present(dice: Arc) -> anyhow::Result<()> { cycle_detector: Some(Arc::new(AccessCycleDetector)), ..Default::default() }; - let ctx = dice.updater_with_data(user_data).commit().await; + let mut ctx = dice.updater_with_data(user_data).commit().await; Ok(ctx.compute(&AccessCycleGuardKey).await?) } @@ -666,7 +655,7 @@ async fn user_cycle_detector_is_present(dice: Arc) -> anyhow::Result<()> { async fn test_dice_usable_after_cancellations() { let dice = DiceModern::builder().build(DetectCycles::Disabled); - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; let barrier1 = Arc::new(tokio::sync::Semaphore::new(0)); let barrier2 = Arc::new(tokio::sync::Semaphore::new(0)); @@ -691,7 +680,7 @@ async fn test_dice_usable_after_cancellations() { assert!(!is_ran.load(Ordering::Acquire)); - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; // req2 still succeed. Note that due to dice caching, even if we make a new key, the same // instance would be used, so just use the same one. diff --git a/dice/dice/src/impls/tests/keys.rs b/dice/dice/src/impls/tests/keys.rs index 2a2614cb0a9c9..a842af304f618 100644 --- a/dice/dice/src/impls/tests/keys.rs +++ b/dice/dice/src/impls/tests/keys.rs @@ -13,10 +13,11 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; use futures::future::join3; -use more_futures::cancellation::CancellationContext; +use futures::FutureExt; use tokio::sync::Mutex; use crate::api::computations::DiceComputations; @@ -27,7 +28,7 @@ use crate::impls::dice::DiceModern; #[tokio::test] async fn concurrent_identical_requests_are_deduped() -> anyhow::Result<()> { #[derive(Allocative, Clone, Debug, Display)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct ComputeOnce(#[allocative(skip)] Arc>); impl PartialEq for ComputeOnce { @@ -65,13 +66,15 @@ async fn concurrent_identical_requests_are_deduped() -> anyhow::Result<()> { let guard = Arc::new(Mutex::new(0)); let _g = guard.lock().await; - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; - let k = ComputeOnce(guard.dupe()); + let k = &ComputeOnce(guard.dupe()); - let compute1 = ctx.compute(&k); - let compute2 = ctx.compute(&k); - let compute3 = ctx.compute(&k); + let (compute1, compute2, compute3) = ctx.compute3( + |ctx| ctx.compute(k).boxed(), + |ctx| ctx.compute(k).boxed(), + |ctx| ctx.compute(k).boxed(), + ); drop(_g); @@ -90,7 +93,7 @@ fn different_requests_are_spawned_in_parallel() -> anyhow::Result<()> { let n_thread = 10usize; #[derive(Allocative, Clone, Debug, Display)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] // purposely use a sync barrier to see that our compute is spawned struct ComputeParallel(#[allocative(skip)] Arc); @@ -136,10 +139,12 @@ fn different_requests_are_spawned_in_parallel() -> anyhow::Result<()> { rt.block_on(async move { let dice = DiceModern::new(DiceData::new()); - let ctx = dice.updater().commit().await; - let k = ComputeParallel(barrier.dupe()); + let ctx = &dice.updater().commit().await; + let k = &ComputeParallel(barrier.dupe()); - let futs = (0..n_thread).map(|_| ctx.compute(&k)).collect::>(); + let futs = (0..n_thread) + .map(|_| async move { ctx.clone().compute(k).await }) + .collect::>(); let mut sum = 0; futures::future::join_all(futs) diff --git a/dice/dice/src/impls/tests/spawner.rs b/dice/dice/src/impls/tests/spawner.rs index 1a85ee403254f..bd72b977313d3 100644 --- a/dice/dice/src/impls/tests/spawner.rs +++ b/dice/dice/src/impls/tests/spawner.rs @@ -14,11 +14,11 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use buck2_futures::spawner::Spawner; use derive_more::Display; use dupe::Dupe; use futures::future::BoxFuture; -use more_futures::cancellation::CancellationContext; -use more_futures::spawner::Spawner; use tokio::task::JoinHandle; use crate::api::computations::DiceComputations; @@ -69,7 +69,7 @@ async fn uses_custom_spawner() { data.spawner = spawner.dupe(); let updater = dice.updater_with_data(data); - let ctx = updater.commit().await; + let mut ctx = updater.commit().await; ctx.compute(&K).await.unwrap(); diff --git a/dice/dice/src/impls/tests/transients.rs b/dice/dice/src/impls/tests/transients.rs index 1c2fdbe439b92..734a95d93caaa 100644 --- a/dice/dice/src/impls/tests/transients.rs +++ b/dice/dice/src/impls/tests/transients.rs @@ -13,10 +13,10 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derivative::Derivative; use derive_more::Display; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::cycles::DetectCycles; @@ -27,7 +27,7 @@ use crate::impls::dice::DiceModern; async fn invalid_results_are_not_cached() -> anyhow::Result<()> { #[derive(Clone, Dupe, Debug, Display, Derivative, Allocative)] #[derivative(Hash, PartialEq, Eq)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct AlwaysTransient(#[derivative(PartialEq = "ignore", Hash = "ignore")] Arc); #[async_trait] @@ -55,7 +55,7 @@ async fn invalid_results_are_not_cached() -> anyhow::Result<()> { let dice = DiceModern::builder().build(DetectCycles::Enabled); let is_ran = Arc::new(AtomicBool::new(false)); { - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; ctx.compute(&AlwaysTransient(is_ran.dupe())).await?; assert!(is_ran.load(Ordering::SeqCst)); @@ -65,7 +65,7 @@ async fn invalid_results_are_not_cached() -> anyhow::Result<()> { assert!(!is_ran.load(Ordering::SeqCst)); // simultaneously ctx should also re-use the result - let ctx1 = dice.updater().commit().await; + let mut ctx1 = dice.updater().commit().await; is_ran.store(false, Ordering::SeqCst); ctx1.compute(&AlwaysTransient(is_ran.dupe())).await?; assert!(!is_ran.load(Ordering::SeqCst)); @@ -73,7 +73,7 @@ async fn invalid_results_are_not_cached() -> anyhow::Result<()> { { // new context should re-run - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; is_ran.store(false, Ordering::SeqCst); ctx.compute(&AlwaysTransient(is_ran.dupe())).await?; assert!(is_ran.load(Ordering::SeqCst)); @@ -91,7 +91,7 @@ async fn invalid_results_are_not_cached() -> anyhow::Result<()> { async fn demo_with_transient() -> anyhow::Result<()> { #[derive(Clone, Dupe, Debug, Display, Derivative, Allocative)] #[derivative(Hash, PartialEq, Eq)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct MaybeTransient( usize, #[derivative(PartialEq = "ignore", Hash = "ignore")] Arc, @@ -142,7 +142,7 @@ async fn demo_with_transient() -> anyhow::Result<()> { let dice = DiceModern::builder().build(DetectCycles::Enabled); - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; let validity = Arc::new(AtomicBool::new(false)); assert!( @@ -160,7 +160,7 @@ async fn demo_with_transient() -> anyhow::Result<()> { drop(ctx); - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; assert_eq!( ctx.compute(&MaybeTransient(10, validity.dupe())).await?, Ok(512) diff --git a/dice/dice/src/impls/tests/user_data.rs b/dice/dice/src/impls/tests/user_data.rs index a5b4e3bd49004..46cdb69920718 100644 --- a/dice/dice/src/impls/tests/user_data.rs +++ b/dice/dice/src/impls/tests/user_data.rs @@ -9,9 +9,9 @@ use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::cycles::DetectCycles; @@ -24,7 +24,7 @@ async fn different_data_per_compute_ctx() { struct U(usize); #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct DataRequest(u8); #[async_trait] impl Key for DataRequest { @@ -55,9 +55,9 @@ async fn different_data_per_compute_ctx() { d }; - let ctx0 = dice.updater_with_data(per_cmd_data0).commit().await; + let mut ctx0 = dice.updater_with_data(per_cmd_data0).commit().await; - let ctx1 = dice.updater_with_data(per_cmd_data1).commit().await; + let mut ctx1 = dice.updater_with_data(per_cmd_data1).commit().await; let request0 = ctx0.compute(&DataRequest(0)); let request1 = ctx1.compute(&DataRequest(1)); diff --git a/dice/dice/src/impls/transaction.rs b/dice/dice/src/impls/transaction.rs index c04a8ca2b655d..03aa9c65b7bf0 100644 --- a/dice/dice/src/impls/transaction.rs +++ b/dice/dice/src/impls/transaction.rs @@ -12,15 +12,14 @@ use std::sync::Arc; use allocative::Allocative; use derivative::Derivative; use dupe::Dupe; -use tokio::sync::oneshot; use crate::api::error::DiceError; use crate::api::error::DiceResult; +use crate::api::key::InvalidationSourcePriority; use crate::api::key::Key; use crate::api::storage_type::StorageType; use crate::api::user_data::UserComputationData; use crate::impls::core::state::CoreStateHandle; -use crate::impls::core::state::StateRequest; use crate::impls::ctx::BaseComputeCtx; use crate::impls::ctx::SharedLiveTransactionCtx; use crate::impls::key::DiceKey; @@ -90,7 +89,7 @@ impl TransactionUpdater { } /// Commit the changes registered via 'changed' and 'changed_to' to the current newest version. - pub(crate) async fn commit(self) -> BaseComputeCtx { + pub(crate) async fn commit<'a>(self) -> BaseComputeCtx { let user_data = self.user_data.dupe(); let dice = self.dice.dupe(); @@ -110,47 +109,32 @@ impl TransactionUpdater { } pub(crate) async fn existing_state(&self) -> BaseComputeCtx { - let (tx, rx) = oneshot::channel(); - self.dice - .state_handle - .request(StateRequest::CurrentVersion { resp: tx }); - - let v = rx.await.unwrap(); + let v = self.dice.state_handle.current_version().await; let guard = ActiveTransactionGuard::new(v, self.dice.state_handle.dupe()); - let (tx, rx) = oneshot::channel(); - self.dice.state_handle.request(StateRequest::CtxAtVersion { - version: v, - guard, - resp: tx, - }); - - let (transaction, guard) = rx.await.unwrap(); + let (transaction, guard) = self.dice.state_handle.ctx_at_version(v, guard).await; BaseComputeCtx::new(transaction, self.user_data.dupe(), self.dice.dupe(), guard) } pub(crate) fn unstable_take(&self) { - self.dice - .state_handle - .request(StateRequest::UnstableDropEverything) + self.dice.state_handle.unstable_drop_everything() } async fn commit_to_state(self) -> (SharedLiveTransactionCtx, ActiveTransactionGuard) { - let (tx, rx) = oneshot::channel(); - self.dice.state_handle.request(StateRequest::UpdateState { - changes: self.scheduled_changes.changes.into_iter().collect(), - resp: tx, - }); + let v = self + .dice + .state_handle + .update_state( + self.scheduled_changes + .changes + .into_iter() + .map(|(k, (t, p))| (k, t, p)) + .collect(), + ) + .await; - let v = rx.await.unwrap(); let guard = ActiveTransactionGuard::new(v, self.dice.state_handle.dupe()); - let (tx, rx) = oneshot::channel(); - self.dice.state_handle.request(StateRequest::CtxAtVersion { - guard, - version: v, - resp: tx, - }); - - rx.await.unwrap() + + self.dice.state_handle.ctx_at_version(v, guard).await } } @@ -174,14 +158,13 @@ pub(crate) struct ActiveTransactionGuardInner { impl Drop for ActiveTransactionGuardInner { fn drop(&mut self) { - self.state_handle - .request(StateRequest::DropCtxAtVersion { version: self.v }) + self.state_handle.drop_ctx_at_version(self.v) } } #[derive(Allocative)] struct Changes { - changes: HashMap, + changes: HashMap, dice: Arc, } @@ -194,13 +177,24 @@ impl Changes { } pub(crate) fn change(&mut self, key: K, change: ChangeType) -> DiceResult<()> { - let key = self.dice.key_index.index_key(key); - if self.changes.insert(key, change).is_some() { - Err(DiceError::duplicate( - self.dice.key_index.get(key).dupe().downcast::().unwrap(), - )) - } else { - Ok(()) + match (change, K::storage_type()) { + (ChangeType::Invalidate, StorageType::Injected) => { + Err(DiceError::injected_key_invalidated(Arc::new(key))) + } + (change, _) => { + let key = self.dice.key_index.index_key(key); + if self + .changes + .insert(key, (change, K::invalidation_source_priority())) + .is_some() + { + Err(DiceError::duplicate( + self.dice.key_index.get(key).dupe().downcast::().unwrap(), + )) + } else { + Ok(()) + } + } } } } @@ -213,6 +207,11 @@ pub(crate) enum ChangeType { UpdateValue(DiceValidValue, StorageType), #[cfg(test)] /// testing only, set as recheck but not required to rerun + /// TODO(cjhopman): Delete this, it's really hard to use correctly and + /// it causes VersionedGraph to need to deal with flows of invalidations + /// that it otherwise wouldn't. + /// The right way to get a "soft-dirty", would be to have a dep and do a + /// normal ChangeType::Invalidate on the dep. TestingSoftDirty, } @@ -222,11 +221,12 @@ mod tests { use allocative::Allocative; use assert_matches::assert_matches; use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; use derive_more::Display; - use more_futures::cancellation::CancellationContext; use crate::api::computations::DiceComputations; use crate::api::data::DiceData; + use crate::api::key::InvalidationSourcePriority; use crate::api::key::Key; use crate::impls::dice::DiceModern; use crate::impls::key::CowDiceKeyHashed; @@ -267,14 +267,14 @@ mod tests { .scheduled_changes .changes .get(&dice.key_index.index(CowDiceKeyHashed::key(K(1)))), - Some(ChangeType::Invalidate) + Some((ChangeType::Invalidate, InvalidationSourcePriority::Normal)) ); assert_matches!( updater .scheduled_changes .changes .get(&dice.key_index.index(CowDiceKeyHashed::key(K(2)))), - Some(ChangeType::Invalidate) + Some((ChangeType::Invalidate, InvalidationSourcePriority::Normal)) ); assert_matches!( @@ -282,7 +282,7 @@ mod tests { .scheduled_changes .changes .get(&dice.key_index.index(CowDiceKeyHashed::key(K(3)))), - Some(ChangeType::UpdateValue(x, _)) if *x.downcast_ref::().unwrap() == 3 + Some((ChangeType::UpdateValue(x, _), _)) if *x.downcast_ref::().unwrap() == 3 ); assert_matches!( @@ -290,7 +290,7 @@ mod tests { .scheduled_changes .changes .get(&dice.key_index.index(CowDiceKeyHashed::key(K(4)))), - Some(ChangeType::UpdateValue(x, _)) if *x.downcast_ref::().unwrap() == 4 + Some((ChangeType::UpdateValue(x, _), _)) if *x.downcast_ref::().unwrap() == 4 ); assert!(updater.changed(vec![K(1)]).is_err()); diff --git a/dice/dice/src/impls/user_cycle.rs b/dice/dice/src/impls/user_cycle.rs index 5ad59362b94b2..2e84dbcda1c5d 100644 --- a/dice/dice/src/impls/user_cycle.rs +++ b/dice/dice/src/impls/user_cycle.rs @@ -19,6 +19,7 @@ use crate::api::user_data::UserCycleDetectorGuard; use crate::impls::key::DiceKey; use crate::impls::key::DiceKeyErased; use crate::impls::key_index::DiceKeyIndex; +use crate::DynKey; pub(crate) struct UserCycleDetectorData(()); @@ -31,14 +32,14 @@ impl UserCycleDetectorData { ) -> KeyComputingUserCycleDetectorData { if let Some(detector) = detector { let k_erased = key_index.get(k); - if let Some(guard) = detector.start_computing_key(k_erased.as_any()) { + if let Some(guard) = detector.start_computing_key(DynKey::ref_cast(k_erased)) { debug!("cycles start key {:?}", k); - return KeyComputingUserCycleDetectorData::Detecting { + return KeyComputingUserCycleDetectorData::Detecting(Arc::new(DetectingData { k_erased: k_erased.dupe(), k, guard, detector: detector.dupe(), - }; + })); } } KeyComputingUserCycleDetectorData::Untracked @@ -51,21 +52,24 @@ impl UserCycleDetectorData { } /// User supplied cycle detector +#[derive(Clone)] pub(crate) enum KeyComputingUserCycleDetectorData { - Detecting { - k_erased: DiceKeyErased, - k: DiceKey, - guard: Box, - detector: Arc, - }, + Detecting(Arc), Untracked, } +pub(crate) struct DetectingData { + k_erased: DiceKeyErased, + k: DiceKey, + guard: Arc, + detector: Arc, +} + impl KeyComputingUserCycleDetectorData { pub(crate) fn subrequest(&self, k: DiceKey, key_index: &DiceKeyIndex) -> UserCycleDetectorData { match self { - KeyComputingUserCycleDetectorData::Detecting { guard, .. } => { - guard.add_edge(key_index.get(k).as_any()); + KeyComputingUserCycleDetectorData::Detecting(data) => { + data.guard.add_edge(DynKey::ref_cast(key_index.get(k))); } KeyComputingUserCycleDetectorData::Untracked => {} } @@ -73,15 +77,15 @@ impl KeyComputingUserCycleDetectorData { UserCycleDetectorData(()) } - pub(crate) fn cycle_guard(&self) -> DiceResult> { + pub(crate) fn cycle_guard(&self) -> DiceResult>> { match self { - KeyComputingUserCycleDetectorData::Detecting { guard, .. } => { - match guard.as_any().downcast_ref() { - Some(guard) => Ok(Some(guard)), - None => Err(DiceError(Arc::new( + KeyComputingUserCycleDetectorData::Detecting(data) => { + match data.guard.dupe().as_any_arc().downcast::() { + Ok(guard) => Ok(Some(guard)), + Err(_) => Err(DiceError(Arc::new( DiceErrorImpl::UnexpectedCycleGuardType { expected_type_name: std::any::type_name::().to_owned(), - actual_type_name: guard.type_name().to_owned(), + actual_type_name: data.guard.type_name().to_owned(), }, ))), } @@ -91,19 +95,10 @@ impl KeyComputingUserCycleDetectorData { } } -impl Drop for KeyComputingUserCycleDetectorData { +impl Drop for DetectingData { fn drop(&mut self) { - match self { - KeyComputingUserCycleDetectorData::Detecting { - k_erased, - k, - detector, - .. - } => { - debug!("cycles finish key {:?}", k); - detector.finished_computing_key(k_erased.as_any()) - } - KeyComputingUserCycleDetectorData::Untracked => {} - } + debug!("cycles finish key {:?}", self.k); + self.detector + .finished_computing_key(DynKey::ref_cast(&self.k_erased)) } } diff --git a/dice/dice/src/impls/value.rs b/dice/dice/src/impls/value.rs index 46eef5953909b..31d18633464dc 100644 --- a/dice/dice/src/impls/value.rs +++ b/dice/dice/src/impls/value.rs @@ -14,8 +14,11 @@ use std::fmt::Formatter; use allocative::Allocative; use dupe::Dupe; +use crate::api::key::InvalidationSourcePriority; use crate::arc::Arc; -use crate::impls::core::graph::history::CellHistory; +use crate::impls::key::DiceKey; +use crate::versions::VersionNumber; +use crate::versions::VersionRanges; use crate::Key; use crate::ProjectionKey; @@ -94,7 +97,7 @@ impl MaybeValidDiceValue { #[cfg(test)] pub(crate) fn instance_equal(&self, other: &DiceValidValue) -> bool { - #[allow(clippy::vtable_address_comparisons)] + #[allow(ambiguous_wide_pointer_comparisons)] // we literally just want to compare the exact pointer std::sync::Arc::ptr_eq(&self.value, &other.0) } @@ -115,33 +118,172 @@ pub(crate) enum DiceValidity { Transient, } -#[derive(Allocative, Clone)] +impl DiceValidity { + pub(crate) fn and(&mut self, other: Self) { + if other == DiceValidity::Transient { + *self = DiceValidity::Transient; + } + } +} + +#[derive(Allocative, Clone, Dupe)] pub(crate) struct DiceComputedValue { value: MaybeValidDiceValue, - valid: Arc, + valid: Arc, + invalidation_paths: TrackedInvalidationPaths, +} + +#[derive(Allocative, Debug, Clone, Dupe, PartialEq, Eq)] +pub(crate) enum InvalidationPath { + Clean, + Unknown, + Invalidated(Arc), } +impl InvalidationPath { + fn for_dependent(&self, key: DiceKey) -> InvalidationPath { + match self { + InvalidationPath::Clean => InvalidationPath::Clean, + InvalidationPath::Unknown => InvalidationPath::Unknown, + InvalidationPath::Invalidated(v) => { + InvalidationPath::Invalidated(Arc::new(InvalidationPathNode { + key, + version: v.version, + cause: self.dupe(), + })) + } + } + } + + fn at_version(&self, v: VersionNumber) -> InvalidationPath { + match self { + InvalidationPath::Invalidated(t) if t.version > v => InvalidationPath::Unknown, + _ => self.dupe(), + } + } + + fn update(&mut self, other: InvalidationPath) { + if let InvalidationPath::Invalidated(other) = other { + match self { + InvalidationPath::Invalidated(this) if this.version > other.version => {} + InvalidationPath::Unknown => {} + InvalidationPath::Clean | InvalidationPath::Invalidated(..) => { + *self = InvalidationPath::Invalidated(other); + } + } + } + } +} + +#[derive(Allocative, Debug, Clone, Dupe, PartialEq, Eq)] +pub(crate) struct InvalidationPathNode { + /// The key at this node in the path. + pub(crate) key: DiceKey, + pub(crate) version: VersionNumber, + pub(crate) cause: InvalidationPath, +} + +#[derive(Allocative, Debug, Clone, Dupe, PartialEq, Eq)] +pub(crate) struct TrackedInvalidationPaths { + /// The path to a normal or high priority invalidation source. + normal: InvalidationPath, + /// The path to a high priority invalidation source. + high: InvalidationPath, +} + +impl TrackedInvalidationPaths { + pub(crate) fn for_dependent(&self, key: DiceKey) -> TrackedInvalidationPaths { + TrackedInvalidationPaths { + normal: self.normal.for_dependent(key), + high: self.high.for_dependent(key), + } + } + + pub(crate) fn clean() -> TrackedInvalidationPaths { + TrackedInvalidationPaths { + normal: InvalidationPath::Clean, + high: InvalidationPath::Clean, + } + } + + pub(crate) fn at_version(&self, v: VersionNumber) -> TrackedInvalidationPaths { + TrackedInvalidationPaths { + normal: self.normal.at_version(v), + high: self.high.at_version(v), + } + } -impl Dupe for DiceComputedValue { - // triomphe Arc is dupe + pub(crate) fn new( + priority: InvalidationSourcePriority, + key: DiceKey, + version: VersionNumber, + ) -> Self { + let path = InvalidationPath::Invalidated(Arc::new(InvalidationPathNode { + key, + version, + cause: InvalidationPath::Clean, + })); + match priority { + InvalidationSourcePriority::Ignored => Self::clean(), + InvalidationSourcePriority::Normal => Self { + normal: path, + high: InvalidationPath::Clean, + }, + InvalidationSourcePriority::High => Self { + normal: path.dupe(), + high: path, + }, + } + } + + pub(crate) fn update(&mut self, new_paths: TrackedInvalidationPaths) { + self.normal.update(new_paths.normal); + self.high.update(new_paths.high); + } + + pub(crate) fn get_normal(&self) -> InvalidationPath { + self.normal.dupe() + } + + pub(crate) fn get_high(&self) -> InvalidationPath { + self.high.dupe() + } } impl DiceComputedValue { - pub(crate) fn new(value: MaybeValidDiceValue, valid: Arc) -> Self { - Self { value, valid } + pub(crate) fn new( + value: MaybeValidDiceValue, + valid: Arc, + invalidation_paths: TrackedInvalidationPaths, + ) -> Self { + Self { + value, + valid, + invalidation_paths, + } } pub(crate) fn value(&self) -> &MaybeValidDiceValue { &self.value } - pub(crate) fn history(&self) -> &CellHistory { + pub(crate) fn versions(&self) -> &VersionRanges { &self.valid } + + pub(crate) fn invalidation_paths(&self) -> &TrackedInvalidationPaths { + &self.invalidation_paths + } + + pub(crate) fn into_parts(self) -> (MaybeValidDiceValue, TrackedInvalidationPaths) { + (self.value, self.invalidation_paths) + } } impl Debug for DiceComputedValue { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("DiceComputedValue").finish_non_exhaustive() + f.debug_struct("DiceComputedValue") + .field("valid", &self.valid) + .finish_non_exhaustive() } } @@ -221,12 +363,16 @@ where } #[cfg(test)] -mod testing { - use std::sync::Arc; - +pub mod testing { + use crate::arc::Arc; + use crate::impls::key::DiceKey; use crate::impls::value::DiceValidValue; use crate::impls::value::DiceValueDyn; + use crate::impls::value::InvalidationPath; + use crate::impls::value::InvalidationPathNode; use crate::impls::value::MaybeValidDiceValue; + use crate::impls::value::TrackedInvalidationPaths; + use crate::versions::VersionNumber; impl DiceValidValue { pub(crate) fn testing_new(value: V) -> Self { @@ -235,8 +381,100 @@ mod testing { } impl MaybeValidDiceValue { - pub(crate) fn testing_value(&self) -> &Arc { + pub(crate) fn testing_value(&self) -> &std::sync::Arc { &self.value } } + + pub struct MakeInvalidationPaths { + pub normal: (DiceKey, usize), + pub high: Option<(DiceKey, usize)>, + } + + impl MakeInvalidationPaths { + pub fn into(self) -> TrackedInvalidationPaths { + TrackedInvalidationPaths { + normal: InvalidationPath::Invalidated(Arc::new(InvalidationPathNode { + key: self.normal.0, + version: VersionNumber(self.normal.1), + cause: InvalidationPath::Clean, + })), + high: self.high.map_or(InvalidationPath::Clean, |(k, v)| { + InvalidationPath::Invalidated(Arc::new(InvalidationPathNode { + key: k, + version: VersionNumber(v), + cause: InvalidationPath::Clean, + })) + }), + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::impls::value::testing::MakeInvalidationPaths; + + #[test] + + fn test_invalidation_paths() -> anyhow::Result<()> { + let key0 = DiceKey { index: 0 }; + let key1 = DiceKey { index: 1 }; + let key2 = DiceKey { index: 2 }; + let mut paths = TrackedInvalidationPaths::clean(); + + paths.update( + MakeInvalidationPaths { + normal: (key0, 2), + high: None, + } + .into(), + ); + + assert_eq!( + paths, + MakeInvalidationPaths { + normal: (key0, 2), + high: None, + } + .into() + ); + + paths.update( + MakeInvalidationPaths { + normal: (key1, 3), + high: Some((key1, 3)), + } + .into(), + ); + + assert_eq!( + paths, + MakeInvalidationPaths { + normal: (key1, 3), + high: Some((key1, 3)), + } + .into() + ); + + paths.update( + MakeInvalidationPaths { + normal: (key2, 4), + high: None, + } + .into(), + ); + + assert_eq!( + paths, + MakeInvalidationPaths { + normal: (key2, 4), + high: Some((key1, 3)) + } + .into() + ); + + Ok(()) + } } diff --git a/dice/dice/src/impls/worker.rs b/dice/dice/src/impls/worker.rs new file mode 100644 index 0000000000000..29d817c333b74 --- /dev/null +++ b/dice/dice/src/impls/worker.rs @@ -0,0 +1,544 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! The main worker thread for the dice task + +use std::any::Any; +use std::future; + +use dupe::Dupe; +use futures::future::BoxFuture; +use futures::pin_mut; +use futures::stream; +use futures::stream::FuturesUnordered; +use futures::Future; +use futures::FutureExt; +use futures::StreamExt; +use gazebo::variants::VariantName; +use itertools::Either; +use tracing::Instrument; + +use crate::api::activation_tracker::ActivationData; +use crate::arc::Arc; +use crate::impls::core::graph::types::VersionedGraphKey; +use crate::impls::core::graph::types::VersionedGraphResult; +use crate::impls::core::state::CoreStateHandle; +use crate::impls::core::versions::VersionEpoch; +use crate::impls::deps::graph::SeriesParallelDeps; +use crate::impls::deps::iterator::SeriesParallelDepsIteratorItem; +use crate::impls::evaluator::AsyncEvaluator; +use crate::impls::evaluator::KeyEvaluationResult; +use crate::impls::evaluator::SyncEvaluator; +use crate::impls::events::DiceEventDispatcher; +use crate::impls::key::DiceKey; +use crate::impls::key::ParentKey; +use crate::impls::task::dice::DiceTask; +use crate::impls::task::promise::DicePromise; +use crate::impls::task::promise::DiceSyncResult; +use crate::impls::task::spawn_dice_task; +use crate::impls::task::PreviouslyCancelledTask; +use crate::impls::user_cycle::KeyComputingUserCycleDetectorData; +use crate::impls::user_cycle::UserCycleDetectorData; +use crate::impls::value::DiceComputedValue; +use crate::impls::value::TrackedInvalidationPaths; +use crate::impls::worker::state::ActivationInfo; +use crate::impls::worker::state::DiceWorkerStateAwaitingPrevious; +use crate::impls::worker::state::DiceWorkerStateEvaluating; +use crate::impls::worker::state::DiceWorkerStateFinishedAndCached; +use crate::impls::worker::state::DiceWorkerStateFinishedEvaluating; +use crate::impls::worker::state::DiceWorkerStateLookupNode; +use crate::result::CancellableResult; +use crate::result::CancellationReason; +use crate::versions::VersionNumber; +use crate::versions::VersionRange; + +pub(crate) mod state; + +#[cfg(test)] +mod tests; + +/// The worker on the spawned dice task +/// +/// Manages all the handling of the results of a specific key, performing the recomputation +/// if necessary +/// +/// The computation of an identical request (same key and version) is +/// automatically deduplicated, so that identical requests share the same set of +/// work. It is guaranteed that there is at most one computation in flight at a +/// time if they share the same key and version. + +pub(crate) struct DiceTaskWorker { + k: DiceKey, + eval: AsyncEvaluator, + event_dispatcher: DiceEventDispatcher, + version_epoch: VersionEpoch, +} + +impl DiceTaskWorker { + pub(crate) fn spawn( + k: DiceKey, + version_epoch: VersionEpoch, + eval: AsyncEvaluator, + cycles: UserCycleDetectorData, + event_dispatcher: DiceEventDispatcher, + previously_cancelled_task: Option, + ) -> DiceTask { + let span = debug_span!(parent: None, "spawned_dice_task", k = ?k, v = %eval.per_live_version_ctx.get_version(), v_epoch = %version_epoch); + + let spawner = eval.user_data.spawner.dupe(); + let spawner_ctx = eval.user_data.dupe(); + let state_handle = eval.dice.state_handle.dupe(); + + let worker = DiceTaskWorker { + k, + eval, + event_dispatcher, + version_epoch, + }; + + spawn_dice_task(k, &*spawner, &spawner_ctx, move |handle| { + // NOTE: important to run prevent cancellation eagerly in the sync scope to prevent + // cancellations so that we don't cancel the current task before we finish waiting + // for the previously cancelled task + let prevent_cancellation = handle.cancellation_ctx().begin_ignore_cancellation(); + let state = + DiceWorkerStateAwaitingPrevious::new(k, cycles, handle, prevent_cancellation); + + async move { + let previous_result = match previously_cancelled_task { + Some(v) => state.await_previous(v).await, + None => Either::Right(state.no_previous_task().await), + }; + + match previous_result { + Either::Left(_) => { + // previous result actually finished + } + Either::Right(state) => { + let _ignore = worker.do_work(state_handle, state).await; + } + } + + Box::new(()) as Box + } + .instrument(span) + .boxed() + }) + } + + /// This is the primary flow of how a key is computed or re-computed. + pub(crate) async fn do_work( + &self, + state_handle: CoreStateHandle, + task_state: DiceWorkerStateLookupNode<'_, '_>, + ) -> CancellableResult { + let v = self.eval.per_live_version_ctx.get_version(); + + let state_result = state_handle + .lookup_key(VersionedGraphKey::new(v, self.k)) + .await; + + // handle cancelled/cache hits before sending started events + let deps_to_check = match state_result { + VersionedGraphResult::Match(entry) => { + return task_state.lookup_matches(entry); + } + VersionedGraphResult::CheckDeps(mismatch2) => Some(mismatch2), + VersionedGraphResult::Compute => None, + VersionedGraphResult::Rejected(..) => { + return Err(CancellationReason::Rejected); + } + }; + + self.event_dispatcher.started(self.k); + scopeguard::defer! { + self.event_dispatcher.finished(self.k); + }; + + // deps_check_continuables needs to capture these and so they need to outlive it. + let cycles; + let mismatch; + let (task_state, deps_check_continuables) = match deps_to_check { + Some(mismatch2) => { + let (task_state, cycles2) = task_state.checking_deps(&self.eval); + cycles = cycles2; + mismatch = mismatch2; + + self.event_dispatcher.check_deps_started(self.k); + + let check_deps_result = { + scopeguard::defer! { + self.event_dispatcher.check_deps_finished(self.k); + } + check_dependencies( + &self.eval, + ParentKey::Some(self.k), + &mismatch.deps_to_validate, + mismatch.prev_verified_version, + &cycles, + ) + .await? + }; + + match check_deps_result { + CheckDependenciesResult::NoChange { .. } => { + let invalidation_paths = + check_deps_result.unwrap_no_change_invalidation_paths(); + + let task_state = task_state.deps_match()?; + + let activation_info = self.activation_info( + mismatch.deps_to_validate.iter_keys(), + ActivationData::Reused, + ); + + let response = state_handle + .update_mismatch_as_unchanged( + VersionedGraphKey::new(v, self.k), + self.version_epoch, + self.eval.storage_type(self.k), + mismatch, + invalidation_paths, + ) + .await; + + return response.map(|r| task_state.cached(r, activation_info)); + } + CheckDependenciesResult::NoDeps => { + // TODO(cjhopman): Why do we treat nodeps as deps not matching? There seems to be some + // implicit meaning to a node having no deps at this point, but it's unclear what that is. + (task_state.deps_not_match(), None) + } + CheckDependenciesResult::Changed { continuables } => { + (task_state.deps_not_match(), Some(continuables)) + } + } + } + None => { + let (task_state, cycles2) = task_state.lookup_dirtied(&self.eval); + cycles = cycles2; + (task_state, None) + } + }; + + let DiceWorkerStateFinishedEvaluating { + state, + activation_data, + result, + } = self.compute(task_state, &cycles).await?; + + // explicitly drop this here to make it clear that its important that we hold onto it, it + // otherwise appears unused, but we don't want to cancel anything that it has started requesting + // before compute finishes. + // TODO(cjhopman): we could be polling this future, it might eagerly request deps more quickly than + // the compute would. + drop(deps_check_continuables); + + let activation_info = self.activation_info(result.deps.iter_keys(), activation_data); + + let res = { + match result.value.into_valid_value() { + Ok(value) => { + let v = self.eval.per_live_version_ctx.get_version(); + state_handle + .update_computed( + VersionedGraphKey::new(v, self.k), + self.version_epoch, + result.storage, + value, + Arc::new(result.deps), + result.invalidation_paths, + ) + .await + } + Err(value) => Ok(DiceComputedValue::new( + value, + Arc::new(VersionRange::begins_with(v).into_ranges()), + result.invalidation_paths, + )), + } + }; + + res.map(|res| state.cached(res, activation_info)) + } + + async fn compute<'a, 'b>( + &self, + task_state: DiceWorkerStateEvaluating<'a, 'b>, + cycles: &KeyComputingUserCycleDetectorData, + ) -> CancellableResult> { + self.event_dispatcher.compute_started(self.k); + scopeguard::defer! { + self.event_dispatcher.compute_finished(self.k); + }; + + // TODO(bobyf) these also make good locations where we want to perform instrumentation + debug!(msg = "running evaluator"); + + self.eval.evaluate(self.k, task_state, cycles.clone()).await + } + + fn activation_info<'a>( + &self, + deps: impl Iterator + 'a, + data: ActivationData, + ) -> Option { + ActivationInfo::new( + &self.eval.dice.key_index, + &self.eval.user_data.activation_tracker, + self.k, + deps, + data, + ) + } +} + +/// Used for checking if dependencies have changed since the previously checked version. +#[cfg_attr(debug_assertions, instrument( + level = "debug", + skip(eval, cycles), + fields(version = %eval.per_live_version_ctx.get_version(), version = %version) +))] +pub(crate) async fn check_dependencies<'a>( + eval: &'a AsyncEvaluator, + parent_key: ParentKey, + deps: &'a SeriesParallelDeps, + version: VersionNumber, + cycles: &'a KeyComputingUserCycleDetectorData, +) -> CancellableResult> { + async fn drain_continuables< + 'a, + Fut: Future>>, + >( + inner: BoxFuture<'a, CancellableResult<()>>, + parallel: FuturesUnordered, + ) -> CancellableResult<()> { + let parallel = parallel.map(|v| v.map(|_| ())); + let combined = stream::select(inner.into_stream(), parallel); + pin_mut!(combined); + while let Some(v) = combined.next().await { + if let Err(cancelled) = v { + return Err(cancelled); + } + } + Ok(()) + } + + fn check_dependencies_series<'a>( + eval: &'a AsyncEvaluator, + parent_key: ParentKey, + deps: impl Iterator> + Send + 'a, + version: VersionNumber, + cycles: &'a KeyComputingUserCycleDetectorData, + ) -> BoxFuture<'a, CancellableResult>> { + let mut invalidation_paths = TrackedInvalidationPaths::clean(); + async move { + for v in deps { + match v { + SeriesParallelDepsIteratorItem::Key(k) => { + match check_dependency(eval, parent_key, *k, cycles, version).await { + Ok(CheckDependencyResult::NoChange(dep_paths)) => { + invalidation_paths.update(dep_paths); + } + Ok(CheckDependencyResult::Changed) => { + return Ok(CheckDependenciesResult::Changed { + continuables: std::future::ready(Ok(())).boxed(), + }); + } + Err(cancelled) => { + return Err(cancelled); + } + } + } + SeriesParallelDepsIteratorItem::Parallel(p) => { + let mut futures: FuturesUnordered<_> = p + .map(|deps| { + check_dependencies_series(eval, parent_key, deps, version, cycles) + .boxed() + }) + .collect(); + + while let Some(v) = futures.next().await { + match v? { + CheckDependenciesResult::NoChange { + invalidation_paths: deps_paths, + } => { + invalidation_paths.update(deps_paths); + } + CheckDependenciesResult::NoDeps => {} + CheckDependenciesResult::Changed { continuables } => { + return Ok(CheckDependenciesResult::Changed { + continuables: drain_continuables(continuables, futures) + .boxed(), + }); + } + } + } + } + } + } + Ok(CheckDependenciesResult::NoChange { invalidation_paths }) + } + .boxed() + } + + if deps.is_empty() { + return Ok(CheckDependenciesResult::NoDeps); + } + + trace!(deps = ?deps); + + check_dependencies_series(eval, parent_key, deps.iter(), version, cycles).await +} + +enum CheckDependencyResult { + Changed, + NoChange(TrackedInvalidationPaths), +} + +async fn check_dependency( + eval: &AsyncEvaluator, + parent_key: ParentKey, + dep: DiceKey, + cycles: &KeyComputingUserCycleDetectorData, + version: VersionNumber, +) -> CancellableResult { + let dep_result = eval + .per_live_version_ctx + .compute_opaque( + dep, + parent_key, + &eval, + cycles.subrequest(dep, &eval.dice.key_index), + ) + .await?; + + if dep_result.versions().contains(version) { + Ok(CheckDependencyResult::NoChange(dep_result.into_parts().1)) + } else { + Ok(CheckDependencyResult::Changed) + } +} + +#[derive(VariantName)] +enum CheckDependenciesResult<'a> { + NoDeps, + NoChange { + invalidation_paths: TrackedInvalidationPaths, + }, + Changed { + /// If any dep has changed, the deps checking doesn't need to be stopped, when something has + /// changed in a dep in a parallel series we can continue to request and compute the other + /// paths in that parallel series (and so potentially continue to request new deps). + /// + /// Those other checks won't be dropped/cancelled until the continuables future is dropped, + /// and polling it will continue that deps check process. + continuables: BoxFuture<'a, CancellableResult<()>>, + }, +} +impl CheckDependenciesResult<'_> { + fn unwrap_no_change_invalidation_paths(self) -> TrackedInvalidationPaths { + match self { + Self::NoChange { invalidation_paths } => invalidation_paths, + _ => panic!(), + } + } +} + +#[cfg_attr(debug_assertions, instrument( + level = "debug", + skip(state, promise, eval, event_dispatcher), + fields(k = ?k, version = %v), +))] +pub(crate) fn project_for_key( + state: CoreStateHandle, + promise: DicePromise, + k: DiceKey, + v: VersionNumber, + version_epoch: VersionEpoch, + eval: SyncEvaluator, + event_dispatcher: DiceEventDispatcher, +) -> CancellableResult { + promise.sync_get_or_complete(|| { + event_dispatcher.started(k); + + debug!(msg = "running projection"); + + let eval_result = eval.evaluate(k); + + debug!(msg = "projection finished. updating caches"); + + let (res, invalidation_paths, future) = { + let KeyEvaluationResult { + value, + deps, + storage, + invalidation_paths, + } = eval_result; + // send the update but don't wait for it + let state_future = match value.dupe().into_valid_value() { + Ok(value) => { + let rx = state.update_computed( + VersionedGraphKey::new(v, k), + version_epoch, + storage, + value, + Arc::new(deps), + invalidation_paths.dupe(), + ); + + Some(rx.map(|res| res).boxed()) + } + Err(_transient_result) => { + // transients are never stored in the state, but the result should be shared + // with async computations as if it were. + None + } + }; + + (value, invalidation_paths, state_future) + }; + + debug!(msg = "update future completed"); + event_dispatcher.finished(k); + + let computed_value = DiceComputedValue::new( + res, + Arc::new(VersionRange::begins_with(v).into_ranges()), + invalidation_paths, + ); + let state_future = + future.unwrap_or_else(|| future::ready(Ok(computed_value.dupe())).boxed()); + + DiceSyncResult { + sync_result: computed_value, + state_future, + } + }) +} + +#[cfg(test)] +pub(crate) mod testing { + + use crate::impls::worker::CheckDependenciesResult; + + pub(crate) trait CheckDependenciesResultExt { + fn is_changed(&self) -> bool; + } + + impl CheckDependenciesResultExt for CheckDependenciesResult<'_> { + fn is_changed(&self) -> bool { + match self { + CheckDependenciesResult::Changed { .. } => true, + CheckDependenciesResult::NoChange { .. } => false, + CheckDependenciesResult::NoDeps => false, + } + } + } +} diff --git a/dice/dice/src/impls/worker/mod.rs b/dice/dice/src/impls/worker/mod.rs deleted file mode 100644 index 11ce31a9a6666..0000000000000 --- a/dice/dice/src/impls/worker/mod.rs +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! The main worker thread for the dice task - -use std::any::Any; - -use dupe::Dupe; -use futures::FutureExt; -use itertools::Either; -use tracing::Instrument; - -use crate::impls::evaluator::AsyncEvaluator; -use crate::impls::events::DiceEventDispatcher; -use crate::impls::incremental::IncrementalEngine; -use crate::impls::key::DiceKey; -use crate::impls::task::dice::DiceTask; -use crate::impls::task::spawn_dice_task; -use crate::impls::task::PreviouslyCancelledTask; -use crate::impls::user_cycle::UserCycleDetectorData; -use crate::impls::worker::state::DiceWorkerStateAwaitingPrevious; -use crate::impls::worker::state::DiceWorkerStateFinishedAndCached; -use crate::impls::worker::state::DiceWorkerStateLookupNode; -use crate::result::CancellableResult; -use crate::result::Cancelled; - -pub(crate) mod state; - -/// The worker on the spawned dice task -pub(crate) struct DiceTaskWorker { - k: DiceKey, - eval: AsyncEvaluator, - events_dispatcher: DiceEventDispatcher, - incremental: IncrementalEngine, -} - -impl DiceTaskWorker { - pub(crate) fn spawn( - k: DiceKey, - eval: AsyncEvaluator, - cycles: UserCycleDetectorData, - events_dispatcher: DiceEventDispatcher, - previously_cancelled_task: Option, - incremental: IncrementalEngine, - ) -> DiceTask { - let span = debug_span!(parent: None, "spawned_dice_task", k = ?k, v = %eval.per_live_version_ctx.get_version(), v_epoch = %incremental.version_epoch); - - let spawner = eval.user_data.spawner.dupe(); - let spawner_ctx = eval.user_data.dupe(); - - let worker = DiceTaskWorker::new(k, eval, events_dispatcher, incremental); - - spawn_dice_task(k, &*spawner, &spawner_ctx, move |handle| { - // NOTE: important to run prevent cancellation eagerly in the sync function to prevent - // cancellations so that we don't cancel the current task before we finish waiting - // for the previously cancelled task - let prevent_cancellation = handle.cancellation_ctx().begin_ignore_cancellation(); - let state = - DiceWorkerStateAwaitingPrevious::new(k, cycles, handle, prevent_cancellation); - - // we hold onto the handle and drop it last after consuming the `worker`. This - // ensures any data being held for the actual evaluation is dropped before we - // notify the future as done. - async move { - match worker - .await_previous(previously_cancelled_task, state) - .await - { - Either::Left(_) => { - // done - } - Either::Right(state) => { - let _ignore = worker.do_work(state).await; - } - } - - Box::new(()) as Box - } - .instrument(span) - .boxed() - }) - } - - fn new( - k: DiceKey, - eval: AsyncEvaluator, - events_dispatcher: DiceEventDispatcher, - incremental: IncrementalEngine, - ) -> Self { - Self { - k, - eval, - events_dispatcher, - incremental, - } - } - - async fn await_previous<'a, 'b>( - &self, - previously_cancelled_task: Option, - state: DiceWorkerStateAwaitingPrevious<'a, 'b>, - ) -> Either< - CancellableResult, - DiceWorkerStateLookupNode<'a, 'b>, - > { - Either::Right(if let Some(previous) = previously_cancelled_task { - previous.previous.await_termination().await; - - // old task actually finished, so just use that result if it wasn't - // cancelled - - match previous - .previous - .get_finished_value() - .expect("Terminated task must have finished value") - { - Ok(res) => { - return Either::Left(state.previously_finished(res)); - } - Err(Cancelled) => { - // actually was cancelled, so just continue re-evaluating - } - } - - state.previously_cancelled().await - } else { - state.no_previous_task().await - }) - } - - pub(crate) async fn do_work( - self, - state: DiceWorkerStateLookupNode<'_, '_>, - ) -> CancellableResult { - self.incremental - .eval_entry_versioned(self.k, &self.eval, self.events_dispatcher, state) - .await - } -} diff --git a/dice/dice/src/impls/worker/state.rs b/dice/dice/src/impls/worker/state.rs index 276a74fc35f60..9bc1085471071 100644 --- a/dice/dice/src/impls/worker/state.rs +++ b/dice/dice/src/impls/worker/state.rs @@ -11,10 +11,11 @@ use std::sync::Arc; +use buck2_futures::cancellable_future::DisableCancellationGuard; +use buck2_futures::cancellation::ExplicitCancellationContext; +use buck2_futures::cancellation::IgnoreCancellationGuard; use dupe::Dupe; -use more_futures::cancellable_future::DisableCancellationGuard; -use more_futures::cancellation::ExplicitCancellationContext; -use more_futures::cancellation::IgnoreCancellationGuard; +use itertools::Either; use crate::impls::evaluator::AsyncEvaluator; use crate::impls::evaluator::KeyEvaluationResult; @@ -22,13 +23,15 @@ use crate::impls::key::DiceKey; use crate::impls::key::DiceKeyErased; use crate::impls::key_index::DiceKeyIndex; use crate::impls::task::handle::DiceTaskHandle; +use crate::impls::task::PreviouslyCancelledTask; use crate::impls::user_cycle::KeyComputingUserCycleDetectorData; use crate::impls::user_cycle::UserCycleDetectorData; use crate::impls::value::DiceComputedValue; use crate::result::CancellableResult; -use crate::result::Cancelled; +use crate::result::CancellationReason; use crate::ActivationData; use crate::ActivationTracker; +use crate::DynKey; /// Represents when we are in a spawned dice task worker and are currently waiting for the previous /// cancelled instance of this task to finish cancelling. @@ -95,6 +98,34 @@ impl<'a, 'b> DiceWorkerStateAwaitingPrevious<'a, 'b> { cycles: self.cycles, } } + + pub(crate) async fn await_previous( + self, + previous: PreviouslyCancelledTask, + ) -> Either< + CancellableResult, + DiceWorkerStateLookupNode<'a, 'b>, + > { + previous.previous.await_termination().await; + + // old task actually finished, so just use that result if it wasn't + // cancelled + + match previous + .previous + .get_finished_value() + .expect("Terminated task must have finished value") + { + Ok(res) => { + return Either::Left(self.previously_finished(res)); + } + Err(_cancelled) => { + // actually was cancelled, so just continue re-evaluating + } + } + + Either::Right(self.previously_cancelled().await) + } } fn finish_with_cached_value( @@ -103,7 +134,7 @@ fn finish_with_cached_value( internals: &mut DiceTaskHandle<'_>, ) -> CancellableResult { match disable_cancellation { - None => Err(Cancelled), + None => Err(CancellationReason::Cached), Some(g) => { internals.finished(value); @@ -126,7 +157,10 @@ impl<'a, 'b> DiceWorkerStateLookupNode<'a, 'b> { pub(crate) fn checking_deps( self, eval: &AsyncEvaluator, - ) -> DiceWorkerStateCheckingDeps<'a, 'b> { + ) -> ( + DiceWorkerStateCheckingDeps<'a, 'b>, + KeyComputingUserCycleDetectorData, + ) { debug!(msg = "found existing entry with mismatching version. checking if deps changed."); self.internals.checking_deps(); @@ -137,13 +171,21 @@ impl<'a, 'b> DiceWorkerStateLookupNode<'a, 'b> { eval.user_data.cycle_detector.as_ref(), ); - DiceWorkerStateCheckingDeps { + ( + DiceWorkerStateCheckingDeps { + internals: self.internals, + }, cycles, - internals: self.internals, - } + ) } - pub(crate) fn lookup_dirtied(self, eval: &AsyncEvaluator) -> DiceWorkerStateComputing<'a, 'b> { + pub(crate) fn lookup_dirtied( + self, + eval: &AsyncEvaluator, + ) -> ( + DiceWorkerStateEvaluating<'a, 'b>, + KeyComputingUserCycleDetectorData, + ) { debug!(msg = "lookup requires recompute."); self.internals.computing(); @@ -154,10 +196,12 @@ impl<'a, 'b> DiceWorkerStateLookupNode<'a, 'b> { eval.user_data.cycle_detector.as_ref(), ); - DiceWorkerStateComputing { + ( + DiceWorkerStateEvaluating { + internals: self.internals, + }, cycles, - internals: self.internals, - } + ) } pub(crate) fn lookup_matches( @@ -179,33 +223,20 @@ impl<'a, 'b> DiceWorkerStateLookupNode<'a, 'b> { /// When the spawned dice task worker is checking if the dependencies have changed since the last /// time this node was verified, and are waiting for the results of the dependency re-computation. pub(crate) struct DiceWorkerStateCheckingDeps<'a, 'b> { - cycles: KeyComputingUserCycleDetectorData, internals: &'a mut DiceTaskHandle<'b>, } impl<'a, 'b> DiceWorkerStateCheckingDeps<'a, 'b> { - pub(crate) fn cycles_for_dep( - &self, - dep: DiceKey, - eval: &AsyncEvaluator, - ) -> UserCycleDetectorData { - self.cycles.subrequest(dep, &eval.dice.key_index) - } - - pub(crate) fn deps_not_match(self) -> DiceWorkerStateComputing<'a, 'b> { + pub(crate) fn deps_not_match(self) -> DiceWorkerStateEvaluating<'a, 'b> { debug!(msg = "deps changed"); self.internals.computing(); - DiceWorkerStateComputing { - cycles: self.cycles, + DiceWorkerStateEvaluating { internals: self.internals, } } - pub(crate) fn deps_match( - self, - activation_info: Option, - ) -> CancellableResult> { + pub(crate) fn deps_match(self) -> CancellableResult> { debug!(msg = "reusing previous value because deps didn't change. Updating caches"); let guard = match self @@ -216,46 +247,15 @@ impl<'a, 'b> DiceWorkerStateCheckingDeps<'a, 'b> { Some(g) => g, None => { debug!("evaluation cancelled, skipping cache updates"); - return Err(Cancelled); + return Err(CancellationReason::DepsMatch); } }; Ok(DiceWorkerStateFinished { _prevent_cancellation: guard, internals: self.internals, - activation_info, }) } - - #[cfg(test)] - pub(crate) fn testing(task_handle: &'a mut DiceTaskHandle<'b>) -> Self { - DiceWorkerStateCheckingDeps { - cycles: KeyComputingUserCycleDetectorData::Untracked, - internals: task_handle, - } - } -} - -/// When the spawned dice worker is currently computing the requested Key. -pub(crate) struct DiceWorkerStateComputing<'a, 'b> { - cycles: KeyComputingUserCycleDetectorData, - internals: &'a mut DiceTaskHandle<'b>, -} - -impl<'a, 'b> DiceWorkerStateComputing<'a, 'b> { - pub(crate) fn evaluating( - self, - ) -> ( - KeyComputingUserCycleDetectorData, - DiceWorkerStateEvaluating<'a, 'b>, - ) { - ( - self.cycles, - DiceWorkerStateEvaluating { - internals: self.internals, - }, - ) - } } /// When the spawned dice worker is currently actively evaluating the `Key::compute` function @@ -272,7 +272,7 @@ impl<'a, 'b> DiceWorkerStateEvaluating<'a, 'b> { self, cycles: KeyComputingUserCycleDetectorData, result: KeyEvaluationResult, - activation_info: Option, + activation_data: ActivationData, ) -> CancellableResult> { debug!(msg = "evaluation finished. updating caches"); @@ -284,7 +284,7 @@ impl<'a, 'b> DiceWorkerStateEvaluating<'a, 'b> { Some(g) => g, None => { debug!("evaluation cancelled, skipping cache updates"); - return Err(Cancelled); + return Err(CancellationReason::WorkerFinished); } }; @@ -294,8 +294,8 @@ impl<'a, 'b> DiceWorkerStateEvaluating<'a, 'b> { state: DiceWorkerStateFinished { _prevent_cancellation: guard, internals: self.internals, - activation_info, }, + activation_data, result, }) } @@ -304,6 +304,7 @@ impl<'a, 'b> DiceWorkerStateEvaluating<'a, 'b> { /// When the spawned dice worker has just finished evaluating the `Key::compute` function pub(crate) struct DiceWorkerStateFinishedEvaluating<'a, 'b> { pub(crate) state: DiceWorkerStateFinished<'a, 'b>, + pub(crate) activation_data: ActivationData, pub(crate) result: KeyEvaluationResult, } @@ -313,17 +314,20 @@ pub(crate) struct DiceWorkerStateFinishedEvaluating<'a, 'b> { pub(crate) struct DiceWorkerStateFinished<'a, 'b> { _prevent_cancellation: DisableCancellationGuard, internals: &'a mut DiceTaskHandle<'b>, - activation_info: Option, } impl<'a, 'b> DiceWorkerStateFinished<'a, 'b> { - pub(crate) fn cached(mut self, value: DiceComputedValue) -> DiceWorkerStateFinishedAndCached { + pub(crate) fn cached( + self, + value: DiceComputedValue, + activation_info: Option, + ) -> DiceWorkerStateFinishedAndCached { debug!(msg = "Update caches complete"); - if let Some(activation_info) = self.activation_info.take() { + if let Some(activation_info) = activation_info { activation_info.activation_tracker.key_activated( - activation_info.key.as_any(), - &mut activation_info.deps.iter().map(|k| k.as_any()), + DynKey::ref_cast(&activation_info.key), + &mut activation_info.deps.iter().map(DynKey::ref_cast), activation_info.activation_data, ) } @@ -347,12 +351,12 @@ impl ActivationInfo { key_index: &DiceKeyIndex, activation_tracker: &Option>, key: DiceKey, - deps: impl Iterator, + deps: impl Iterator + 'a, activation_data: ActivationData, ) -> Option { if let Some(activation_tracker) = activation_tracker { let key = key_index.get(key).dupe(); - let deps = deps.map(|dep| key_index.get(*dep).dupe()).collect(); + let deps = deps.map(|dep| key_index.get(dep).dupe()).collect(); Some(ActivationInfo { activation_tracker: activation_tracker.dupe(), diff --git a/dice/dice/src/impls/worker/tests.rs b/dice/dice/src/impls/worker/tests.rs new file mode 100644 index 0000000000000..d06462b61ea5f --- /dev/null +++ b/dice/dice/src/impls/worker/tests.rs @@ -0,0 +1,1243 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; +use std::hash::Hasher; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::AtomicU32; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::time::Duration; + +use allocative::Allocative; +use assert_matches::assert_matches; +use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use derive_more::Display; +use dupe::Dupe; +use dupe::IterDupedExt; +use futures::pin_mut; +use futures::Future; +use gazebo::prelude::SliceExt; +use gazebo::variants::VariantName; +use tokio::sync::Mutex; +use tokio::sync::Notify; +use tokio::sync::Semaphore; + +use crate::api::computations::DiceComputations; +use crate::api::data::DiceData; +use crate::api::key::InvalidationSourcePriority; +use crate::api::key::Key; +use crate::api::storage_type::StorageType; +use crate::api::user_data::NoOpTracker; +use crate::api::user_data::UserComputationData; +use crate::arc::Arc; +use crate::impls::core::graph::types::VersionedGraphKey; +use crate::impls::core::versions::VersionEpoch; +use crate::impls::ctx::SharedLiveTransactionCtx; +use crate::impls::deps::graph::SeriesParallelDeps; +use crate::impls::deps::RecordingDepsTracker; +use crate::impls::dice::DiceModern; +use crate::impls::evaluator::AsyncEvaluator; +use crate::impls::events::DiceEventDispatcher; +use crate::impls::key::DiceKey; +use crate::impls::key::ParentKey; +use crate::impls::task::PreviouslyCancelledTask; +use crate::impls::transaction::ActiveTransactionGuard; +use crate::impls::transaction::ChangeType; +use crate::impls::user_cycle::KeyComputingUserCycleDetectorData; +use crate::impls::user_cycle::UserCycleDetectorData; +use crate::impls::value::DiceComputedValue; +use crate::impls::value::DiceKeyValue; +use crate::impls::value::DiceValidValue; +use crate::impls::value::DiceValidity; +use crate::impls::value::MaybeValidDiceValue; +use crate::impls::value::TrackedInvalidationPaths; +use crate::impls::worker::check_dependencies; +use crate::impls::worker::testing::CheckDependenciesResultExt; +use crate::impls::worker::CheckDependenciesResult; +use crate::impls::worker::DiceTaskWorker; +use crate::result::CancellableResult; +use crate::versions::VersionNumber; +use crate::versions::VersionRange; +use crate::versions::VersionRanges; + +#[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] +struct K; + +#[async_trait] +impl Key for K { + type Value = usize; + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + unimplemented!("test") + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } +} + +#[derive(Allocative, Clone, Debug, Display)] +#[display("{:?}", self)] +struct IsRan(Arc); + +#[async_trait] +impl Key for IsRan { + type Value = (); + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + self.0.store(true, Ordering::SeqCst); + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +impl PartialEq for IsRan { + fn eq(&self, _other: &Self) -> bool { + true + } +} +impl Eq for IsRan {} +impl Hash for IsRan { + fn hash(&self, _state: &mut H) {} +} + +#[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] +struct Finish; + +#[async_trait] +impl Key for Finish { + type Value = (); + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + true + } +} + +#[tokio::test] +async fn test_detecting_changed_dependencies() -> anyhow::Result<()> { + let dice = DiceModern::new(DiceData::new()); + + let user_data = std::sync::Arc::new(UserComputationData::new()); + + let (ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(1)).await; + ctx.inject( + DiceKey { index: 100 }, + DiceComputedValue::new( + MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), + Arc::new(VersionRange::begins_with(VersionNumber::new(1)).into_ranges()), + TrackedInvalidationPaths::clean(), + ), + ); + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + assert!( + check_dependencies( + &eval, + ParentKey::None, + &SeriesParallelDeps::serial_from_vec(vec![DiceKey { index: 100 }]), + VersionNumber::new(0), + &KeyComputingUserCycleDetectorData::Untracked, + ) + .await? + .is_changed() + ); + + let (ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(2)).await; + ctx.inject( + DiceKey { index: 100 }, + DiceComputedValue::new( + MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), + Arc::new(VersionRange::begins_with(VersionNumber::new(1)).into_ranges()), + TrackedInvalidationPaths::clean(), + ), + ); + + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + assert!( + !check_dependencies( + &eval, + ParentKey::None, + &SeriesParallelDeps::serial_from_vec(vec![DiceKey { index: 100 }]), + VersionNumber::new(1), + &KeyComputingUserCycleDetectorData::Untracked, + ) + .await? + .is_changed() + ); + + // Now we also check that when deps have transients and such. + // for legacy, this would deal with cycles, but modern dice will detect cycles through post + // processing and rely on the user cycle detector for now (which returns errors via the result. + let (ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(2)).await; + ctx.inject( + DiceKey { index: 200 }, + DiceComputedValue::new( + MaybeValidDiceValue::transient(std::sync::Arc::new(DiceKeyValue::::new(1))), + Arc::new(VersionRange::begins_with(VersionNumber::new(2)).into_ranges()), + TrackedInvalidationPaths::clean(), + ), + ); + + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + assert!( + check_dependencies( + &eval, + ParentKey::None, + &SeriesParallelDeps::serial_from_vec(vec![DiceKey { index: 200 }]), + VersionNumber::new(1), + &KeyComputingUserCycleDetectorData::Untracked, + ) + .await? + .is_changed() + ); + + Ok(()) +} + +#[tokio::test] +async fn when_equal_return_same_instance() -> anyhow::Result<()> { + let dice = DiceModern::new(DiceData::new()); + + let user_data = std::sync::Arc::new(UserComputationData::new()); + let events = DiceEventDispatcher::new(user_data.tracker.dupe(), dice.dupe()); + + let instance = Arc::new(AtomicUsize::new(0)); + + #[derive(Clone, Dupe, Allocative)] + struct InstanceEqual { + instance_count: usize, + } + + impl PartialEq for InstanceEqual { + fn eq(&self, _other: &Self) -> bool { + true + } + } + + #[derive(Allocative, Clone, Debug, Display)] + #[display("{:?}", self)] + struct InstanceEqualKey(Arc); + + #[async_trait] + impl Key for InstanceEqualKey { + type Value = InstanceEqual; + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + InstanceEqual { + instance_count: self.0.fetch_add(1, Ordering::SeqCst), + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } + } + impl PartialEq for InstanceEqualKey { + fn eq(&self, _other: &Self) -> bool { + true + } + } + impl Eq for InstanceEqualKey {} + impl Hash for InstanceEqualKey { + fn hash(&self, _state: &mut H) {} + } + + let key = dice.key_index.index_key(InstanceEqualKey(instance.dupe())); + + let v = dice.state_handle.update_state(vec![]).await; + + let (ctx, _guard) = dice.testing_shared_ctx(v).await; + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + let task = DiceTaskWorker::spawn( + key.dupe(), + ctx.testing_get_epoch(), + eval.dupe(), + UserCycleDetectorData::testing_new(), + events.dupe(), + None, + ); + let res = task + .depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await?; + + let v = dice + .state_handle + .update_state(vec![( + key.dupe(), + ChangeType::Invalidate, + InvalidationSourcePriority::Normal, + )]) + .await; + + let (ctx, _guard) = dice.testing_shared_ctx(v).await; + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + let task = DiceTaskWorker::spawn( + key.dupe(), + ctx.testing_get_epoch(), + eval.dupe(), + UserCycleDetectorData::testing_new(), + events.dupe(), + None, + ); + let res2 = task + .depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await?; + + // verify that we incremented the total instance counter + assert_eq!(instance.load(Ordering::SeqCst), 2); + + assert_eq!( + res.versions(), + &VersionRanges::testing_new(vec![VersionRange::begins_with(VersionNumber::new(0))]) + ); + + // verify that the instance we return and store is the same as the original instance + assert_eq!( + res.value() + .downcast_maybe_transient::() + .unwrap() + .instance_count, + res2.value() + .downcast_maybe_transient::() + .unwrap() + .instance_count + ); + + Ok(()) +} + +#[tokio::test] +async fn spawn_with_no_previously_cancelled_task() { + let dice = DiceModern::new(DiceData::new()); + + let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; + + let is_ran = Arc::new(AtomicBool::new(false)); + let k = dice.key_index.index_key(IsRan(is_ran.dupe())); + + let extra = std::sync::Arc::new(UserComputationData::new()); + let eval = AsyncEvaluator { + per_live_version_ctx: shared_ctx.dupe(), + user_data: extra.dupe(), + dice: dice.dupe(), + }; + let cycles = UserCycleDetectorData::testing_new(); + let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); + let previously_cancelled_task = None; + + let task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval, + cycles, + events_dispatcher, + previously_cancelled_task, + ); + + assert!( + task.depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await + .is_ok() + ); + + assert!(is_ran.load(Ordering::SeqCst)); +} + +#[tokio::test] +async fn spawn_with_previously_cancelled_task_that_cancelled() { + let dice = DiceModern::new(DiceData::new()); + + let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; + + let extra = std::sync::Arc::new(UserComputationData::new()); + let eval = AsyncEvaluator { + per_live_version_ctx: shared_ctx.dupe(), + user_data: extra.dupe(), + dice: dice.dupe(), + }; + let cycles = UserCycleDetectorData::testing_new(); + let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); + + #[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] + struct CancellableNeverFinish; + + #[async_trait] + impl Key for CancellableNeverFinish { + type Value = (); + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + futures::future::pending().await + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + unreachable!("test") + } + } + + let k = dice.key_index.index_key(CancellableNeverFinish); + let previous_task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval.dupe(), + cycles, + events_dispatcher.dupe(), + None, + ); + + previous_task.cancel(); + + let previously_cancelled_task = Some(PreviouslyCancelledTask { + previous: previous_task, + }); + + let is_ran = Arc::new(AtomicBool::new(false)); + let k = dice.key_index.index_key(IsRan(is_ran.dupe())); + let cycles = UserCycleDetectorData::testing_new(); + let task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval, + cycles, + events_dispatcher, + previously_cancelled_task, + ); + + assert!( + task.depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await + .is_ok() + ); + + assert!(is_ran.load(Ordering::SeqCst)); +} + +#[tokio::test] +async fn spawn_with_previously_cancelled_task_that_finished() { + let dice = DiceModern::new(DiceData::new()); + + let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; + + let extra = std::sync::Arc::new(UserComputationData::new()); + let eval = AsyncEvaluator { + per_live_version_ctx: shared_ctx.dupe(), + user_data: extra.dupe(), + dice: dice.dupe(), + }; + let cycles = UserCycleDetectorData::testing_new(); + let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); + + #[derive(Allocative, Clone, Dupe, Debug, Display, PartialEq, Eq, Hash)] + struct Finish; + + #[async_trait] + impl Key for Finish { + type Value = (); + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + true + } + } + + let k = dice.key_index.index_key(Finish); + let previous_task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval.dupe(), + cycles, + events_dispatcher.dupe(), + None, + ); + // wait for it to finish then trigger cancel + previous_task + .depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await + .unwrap(); + previous_task.cancel(); + + let previously_cancelled_task = Some(PreviouslyCancelledTask { + previous: previous_task, + }); + + let is_ran = Arc::new(AtomicBool::new(false)); + let k = dice.key_index.index_key(IsRan(is_ran.dupe())); + let cycles = UserCycleDetectorData::testing_new(); + let task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval, + cycles, + events_dispatcher, + previously_cancelled_task, + ); + + assert!( + task.depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await + .is_ok() + ); + + assert!(!is_ran.load(Ordering::SeqCst)); +} + +#[tokio::test] +async fn mismatch_epoch_results_in_cancelled_result() { + let dice = DiceModern::new(DiceData::new()); + + let (shared_ctx, guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; + + let extra = std::sync::Arc::new(UserComputationData::new()); + let eval = AsyncEvaluator { + per_live_version_ctx: shared_ctx.dupe(), + user_data: extra.dupe(), + dice: dice.dupe(), + }; + let cycles = UserCycleDetectorData::testing_new(); + let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); + + // trigger dice to delete and update the epoch + drop(guard); + + let k = dice.key_index.index_key(Finish); + let task = DiceTaskWorker::spawn( + k, + shared_ctx.testing_get_epoch(), + eval.dupe(), + cycles, + events_dispatcher.dupe(), + None, + ); + // wait for it to finish then trigger cancel + assert_matches!( + task.depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await, + Err(_) => {} + ); +} + +#[tokio::test] +async fn spawn_with_previously_cancelled_task_nested_cancelled() -> anyhow::Result<()> { + #[derive(Allocative, Clone, Debug, Display)] + #[display("{:?}", self)] + #[allocative(skip)] + struct DontRunTwice { + is_started: Arc, + exclusive: Arc>, + prevent_cancel: Arc, + } + + impl PartialEq for DontRunTwice { + fn eq(&self, _other: &Self) -> bool { + true + } + } + impl Eq for DontRunTwice {} + impl Hash for DontRunTwice { + fn hash(&self, _state: &mut H) {} + } + + #[async_trait] + impl Key for DontRunTwice { + type Value = (); + + async fn compute( + &self, + _ctx: &mut DiceComputations, + cancellations: &CancellationContext, + ) -> Self::Value { + let mut guard = self + .exclusive + .try_lock() + .expect("Can only have one concurrent execution"); + + if *guard { + // Last attempt, return. + } else { + // Note that we did our first execution. Keep the lock held. The point of the + // test is to prove that nobody will get to run before we exit and drop it. + *guard = true; + + cancellations + .with_structured_cancellation(|obs| async move { + // Resume the rest of the code. + self.is_started.notify_one(); + // Wait for our cancellation. + obs.await; + + // Yield. If the final evaluation is ready (that would be a bug!), it will + // run now. + tokio::task::yield_now().await; + self.prevent_cancel.notified().await; + }) + .await; + + // Never return, but this bit will be the one that's cancelled. + futures::future::pending().await + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } + } + + let dice = DiceModern::new(DiceData::new()); + + let exclusive = Arc::new(Mutex::new(false)); + let is_started = Arc::new(Notify::new()); + let prevent_cancel = Arc::new(Notify::new()); + + let key = DontRunTwice { + exclusive, + is_started: is_started.dupe(), + prevent_cancel: prevent_cancel.dupe(), + }; + + let (shared_ctx, _guard) = dice.testing_shared_ctx(VersionNumber::new(0)).await; + + let k = dice.key_index.index_key(key); + + let extra = std::sync::Arc::new(UserComputationData::new()); + let eval = AsyncEvaluator { + per_live_version_ctx: shared_ctx.dupe(), + user_data: extra.dupe(), + dice: dice.dupe(), + }; + let cycles = UserCycleDetectorData::testing_new(); + let events_dispatcher = DiceEventDispatcher::new(std::sync::Arc::new(NoOpTracker), dice.dupe()); + + let first_task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval.dupe(), + cycles, + events_dispatcher.dupe(), + None, + ); + is_started.notified().await; + first_task.cancel(); + + let cycles = UserCycleDetectorData::testing_new(); + let second_task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval.dupe(), + cycles, + events_dispatcher.dupe(), + Some(PreviouslyCancelledTask { + previous: first_task, + }), + ); + + second_task.cancel(); + + let cycles = UserCycleDetectorData::testing_new(); + let third_task = DiceTaskWorker::spawn( + k, + VersionEpoch::testing_new(0), + eval, + cycles, + events_dispatcher, + Some(PreviouslyCancelledTask { + previous: second_task, + }), + ); + + let promise = third_task + .depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap(); + + pin_mut!(promise); + + // if we poll before we allow cancellation, we shouldn't complete + // tokio doesn't always guarantee the yields switches between tasks so this makes the test + // slightly more resilient to scheduling + let res = tokio::time::timeout(Duration::from_secs(5), &mut promise).await; + + assert!(res.is_err()); + + prevent_cancel.notify_one(); + let _ignored = promise.await?; + + Ok(()) +} + +#[tokio::test] +async fn test_values_gets_resurrect_if_deps_dont_change_regardless_of_equality() +-> anyhow::Result<()> { + #[derive(Allocative, Clone, Debug, Display)] + #[display("{:?}", self)] + struct NeverEqual; + + #[async_trait] + impl Key for NeverEqual { + type Value = Arc<()>; + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + panic!("never ran as deps equal") + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + impl PartialEq for NeverEqual { + fn eq(&self, _other: &Self) -> bool { + true + } + } + impl Eq for NeverEqual {} + impl Hash for NeverEqual { + fn hash(&self, _state: &mut H) {} + } + + /// creates the initial test graph with a single key that depends on a value + async fn populate_initial_graph( + dice: &std::sync::Arc, + compute_key: DiceKey, + compute_res: DiceValidValue, + ) { + let (ctx, _guard) = get_ctx_at_version(dice, VersionNumber::new(0)).await; + + // set the initial state + let _ignore = update_computed_value( + dice, + &ctx, + DiceKey { index: 100 }, + VersionNumber::new(0), + DiceValidValue::testing_new(DiceKeyValue::::new(1)), + Arc::new(SeriesParallelDeps::None), + ); + let _ignore = update_computed_value( + dice, + &ctx, + compute_key.dupe(), + VersionNumber::new(0), + compute_res.dupe(), + Arc::new(SeriesParallelDeps::serial_from_vec(vec![DiceKey { + index: 100, + }])), + ); + } + + /// gets a new context where the parent is dirtied such that it needs to check its deps, and the + /// dep has a history as provided + async fn ctx_with_dep_having_history( + dice: &std::sync::Arc, + parent_key: DiceKey, + dep_history: VersionRanges, + ) -> (SharedLiveTransactionCtx, ActiveTransactionGuard) { + let v = soft_dirty(dice, parent_key.dupe()).await; + let (ctx, guard) = dice.testing_shared_ctx(v).await; + ctx.inject( + DiceKey { index: 100 }, + DiceComputedValue::new( + MaybeValidDiceValue::valid(DiceValidValue::testing_new(DiceKeyValue::::new(1))), + Arc::new(dep_history), + TrackedInvalidationPaths::clean(), + ), + ); + + (ctx, guard) + } + + let dice = DiceModern::new(DiceData::new()); + + let user_data = std::sync::Arc::new(UserComputationData::new()); + let events = DiceEventDispatcher::new(user_data.tracker.dupe(), dice.dupe()); + + let res = DiceValidValue::testing_new(DiceKeyValue::::new(Arc::new(()))); + let key = dice.key_index.index_key(NeverEqual); + + populate_initial_graph(&dice, key.dupe(), res.dupe()).await; + + let (ctx, _guard) = ctx_with_dep_having_history( + &dice, + key.dupe(), + VersionRanges::testing_new(vec![VersionRange::begins_with(VersionNumber::new(0))]), + ) + .await; + + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + let task = DiceTaskWorker::spawn( + key.dupe(), + ctx.testing_get_epoch(), + eval.dupe(), + UserCycleDetectorData::testing_new(), + events.dupe(), + None, + ); + let computed_res = task + .depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await?; + assert_eq!( + computed_res.versions(), + &VersionRanges::testing_new(vec![VersionRange::begins_with(VersionNumber::new(0))]) + ); + assert!(computed_res.value().instance_equal(&res)); + + // next version + let (ctx, _guard) = ctx_with_dep_having_history( + &dice, + key.dupe(), + VersionRanges::testing_new(vec![VersionRange::begins_with(VersionNumber::new(0))]), + ) + .await; + + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + let task = DiceTaskWorker::spawn( + key.dupe(), + ctx.testing_get_epoch(), + eval.dupe(), + UserCycleDetectorData::testing_new(), + events.dupe(), + None, + ); + let computed_res = task + .depended_on_by(ParentKey::None) + .not_cancelled() + .unwrap() + .await?; + assert_eq!( + computed_res.versions(), + &VersionRanges::testing_new(vec![VersionRange::begins_with(VersionNumber::new(0))]) + ); + assert!(computed_res.value().instance_equal(&res)); + + Ok(()) +} + +async fn soft_dirty(dice: &std::sync::Arc, key: DiceKey) -> VersionNumber { + dice.state_handle + .update_state(vec![( + key.dupe(), + ChangeType::TestingSoftDirty, + InvalidationSourcePriority::Normal, + )]) + .await +} + +fn update_computed_value( + dice: &std::sync::Arc, + ctx: &SharedLiveTransactionCtx, + k: DiceKey, + v: VersionNumber, + value: DiceValidValue, + deps: Arc, +) -> impl Future> { + dice.state_handle.update_computed( + VersionedGraphKey::new(v, k), + ctx.testing_get_epoch(), + StorageType::Normal, + value, + deps, + TrackedInvalidationPaths::clean(), + ) +} + +async fn get_ctx_at_version( + dice: &std::sync::Arc, + v: VersionNumber, +) -> (SharedLiveTransactionCtx, ActiveTransactionGuard) { + dice.state_handle + .ctx_at_version( + VersionNumber::new(0), + ActiveTransactionGuard::new(v, dice.state_handle.dupe()), + ) + .await +} + +// tests that dependency checking stops at the first changed dep in a series node (see SeriesParallelDeps). +// +// it's tough to directly test that, instead we cause the one node we expect to be computed to wait for a +// short period and then check the total number of nodes that get computed. +#[tokio::test] +async fn test_check_dependencies_stops_at_changed() -> anyhow::Result<()> { + let dice = DiceModern::new(DiceData::new()); + + let compute_behavior = (0..20) + .map(|_v| std::sync::Mutex::new(ComputeBehavior::Immediate)) + .collect(); + + let data = Arc::new(Data { + total_computed: AtomicU32::new(0), + compute_behavior, + }); + + let spkeys: Vec<_> = (0..20) + .map(|v| SPKey { + data: data.clone(), + idx: v, + }) + .collect(); + let keys = spkeys.map(|v| dice.key_index.index_key(v.dupe())); + + // mark all keys as having a value at v0, invalidated at v1 + // for all keys, the real value is different (so if recomputed will be seen as changed) + let mut updater = dice.updater(); + updater + .changed_to(spkeys.iter().map(|k| (k.dupe(), 100)).collect::>()) + .unwrap(); + let prev_version = updater.commit().await.get_version(); + + let mut updater = dice.updater(); + updater + .changed(spkeys.iter().duped().collect::>()) + .unwrap(); + let version = updater.commit().await.get_version(); + + let user_data = std::sync::Arc::new(UserComputationData::new()); + let (ctx, _guard) = dice.testing_shared_ctx(version).await; + + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + *data.compute_behavior[0].lock().unwrap() = ComputeBehavior::Sleep(Duration::from_millis(20)); + + let deps = SeriesParallelDeps::serial_from_vec(keys); + let cycles = KeyComputingUserCycleDetectorData::Untracked; + let check_deps_result = + check_dependencies(&eval, ParentKey::None, &deps, prev_version, &cycles).await?; + + match check_deps_result { + CheckDependenciesResult::Changed { continuables } => { + continuables.await?; + } + v => { + panic!("unexpected checkdeps result {}", v.variant_name()) + } + } + + assert_eq!(data.total_computed.load(Ordering::SeqCst), 1); + + Ok(()) +} + +/// tests that dependency checking can continue and fully finish parallel nodes +/// +/// we build a series-parallel graph like this: +/// +/// ```ignore +/// 0 +/// 1 +/// 2 +/// +/// 3 | 8 | 13 +/// 4 | 9 | 14 +/// 5 6 | 10 11 | 15 16 +/// 7 | 12 | 17 +/// +/// 18 +/// 19 +/// ``` +/// +/// where key 9's value has changed. This should trigger recomputation via check_dependencies in +/// everything except for 10, 11, 12, 18, and 19 (i.e. the 3 and 13 branches should be fully +/// re-computed). +/// +/// we block 3 and 13's computation on getting the first result (the DidDepsChange::Changed) back +/// from check_dependencies. +#[tokio::test] +async fn test_check_dependencies_can_eagerly_check_all_parallel_deps() -> anyhow::Result<()> { + let dice = DiceModern::new(DiceData::new()); + + let compute_behavior = (0..20) + .map(|_v| std::sync::Mutex::new(ComputeBehavior::Immediate)) + .collect(); + + let data = Arc::new(Data { + total_computed: AtomicU32::new(0), + compute_behavior, + }); + + let spkeys: Vec<_> = (0..20) + .map(|v| SPKey { + data: data.clone(), + idx: v, + }) + .collect(); + let keys = spkeys.map(|v| dice.key_index.index_key(v.dupe())); + + // mark all keys as having a value at v0, invalidated at v1 + // for key 9, the value will be different, but for the rest it will be the same + let mut updater = dice.updater(); + updater + .changed_to( + spkeys + .iter() + .map(|k| (k.dupe(), if k.idx == 9 { 100 } else { k.idx })) + .collect::>(), + ) + .unwrap(); + let prev_version = updater.commit().await.get_version(); + + let mut updater = dice.updater(); + updater + .changed(spkeys.iter().duped().collect::>()) + .unwrap(); + let version = updater.commit().await.get_version(); + + let user_data = std::sync::Arc::new(UserComputationData::new()); + let (ctx, _guard) = dice.testing_shared_ctx(version).await; + + let eval = AsyncEvaluator { + per_live_version_ctx: ctx.dupe(), + user_data: user_data.dupe(), + dice: dice.dupe(), + }; + + let semaphore = Arc::new(Semaphore::new(0)); + + *data.compute_behavior[3].lock().unwrap() = ComputeBehavior::WaitFor(semaphore.dupe()); + *data.compute_behavior[13].lock().unwrap() = ComputeBehavior::WaitFor(semaphore.dupe()); + + let mut deps = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + deps.record( + keys[0], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + deps.record( + keys[1], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + deps.record( + keys[2], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + { + let parallel = deps.push_parallel(0).0; + for i in 0..3 { + let offset = i * 5; + let mut deps = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + deps.record( + keys[3 + offset], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + deps.record( + keys[4 + offset], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + { + let parallel = deps.push_parallel(2).0; + let mut deps = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + deps.record( + keys[5 + offset], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + parallel.alloc(deps.collect_deps()); + let mut deps = RecordingDepsTracker::new(TrackedInvalidationPaths::clean()); + deps.record( + keys[6 + offset], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + parallel.alloc(deps.collect_deps()); + } + deps.record( + keys[7 + offset], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + parallel.alloc(deps.collect_deps()); + } + } + deps.record( + keys[18], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + deps.record( + keys[19], + DiceValidity::Valid, + TrackedInvalidationPaths::clean(), + ); + + let deps = deps.collect_deps(); + let cycles = KeyComputingUserCycleDetectorData::Untracked; + + let check_deps_result = + check_dependencies(&eval, ParentKey::None, &deps.deps, prev_version, &cycles).await?; + + match check_deps_result { + CheckDependenciesResult::Changed { continuables } => { + semaphore.add_permits(100); + continuables.await?; + } + v => { + panic!("unexpected checkdeps result {}", v.variant_name()) + } + } + + assert_eq!(data.total_computed.load(Ordering::SeqCst), 15); + + Ok(()) +} + +#[derive(Clone, Debug)] +enum ComputeBehavior { + Sleep(Duration), + WaitFor(Arc), + Immediate, +} + +#[derive(Debug)] +struct Data { + total_computed: AtomicU32, + compute_behavior: Vec>, +} + +#[derive(Allocative, Clone, Dupe, Debug)] +struct SPKey { + #[allocative(skip)] + data: Arc, + idx: usize, +} + +impl Display for SPKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "SPKey({})", self.idx) + } +} +impl Hash for SPKey { + fn hash(&self, state: &mut H) { + self.idx.hash(state); + } +} + +impl Eq for SPKey {} +impl PartialEq for SPKey { + fn eq(&self, other: &Self) -> bool { + self.idx == other.idx + } +} + +#[async_trait] +impl Key for SPKey { + type Value = usize; + + async fn compute( + &self, + _ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let behavior = self.data.compute_behavior[self.idx].lock().unwrap().clone(); + match behavior { + ComputeBehavior::Sleep(duration) => tokio::time::sleep(duration).await, + ComputeBehavior::WaitFor(semaphore) => { + drop(semaphore.acquire().await); + } + ComputeBehavior::Immediate => {} + } + + self.data.total_computed.fetch_add(1, Ordering::SeqCst); + self.idx + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } +} diff --git a/dice/dice/src/introspection.rs b/dice/dice/src/introspection.rs new file mode 100644 index 0000000000000..1045bded4b044 --- /dev/null +++ b/dice/dice/src/introspection.rs @@ -0,0 +1,163 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! Interfaces for introspection of the DICE graph + +use crate::introspection::graph::AnyKey; +use crate::introspection::graph::GraphIntrospectable; +use crate::Dice; +use crate::DiceImplementation; + +pub mod graph; +pub(crate) mod introspect; + +pub use crate::introspection::introspect::serialize_dense_graph; +pub use crate::introspection::introspect::serialize_graph; + +impl Dice { + pub fn to_introspectable(&self) -> GraphIntrospectable { + match &self.implementation { + DiceImplementation::Modern(_) => { + unimplemented!("todo") + } + } + } +} + +#[cfg(test)] +mod tests { + use allocative::Allocative; + use anyhow::Context as _; + use async_trait::async_trait; + use buck2_futures::cancellation::CancellationContext; + use derive_more::Display; + use dupe::Dupe; + + use crate::api::computations::DiceComputations; + use crate::api::cycles::DetectCycles; + use crate::api::key::Key; + use crate::impls::dice::DiceModern; + use crate::introspection::graph::SerializedGraphNodesForKey; + use crate::introspection::serialize_graph; + use crate::HashMap; + + #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] + #[display("{:?}", self)] + struct KeyA(usize); + + #[async_trait] + impl Key for KeyA { + type Value = (); + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + if self.0 > 0 { + ctx.compute(&KeyA(self.0 - 1)).await.unwrap(); + } else { + ctx.compute(&KeyB).await.unwrap(); + } + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + unimplemented!() + } + } + + #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] + #[display("{:?}", self)] + struct KeyB; + + #[async_trait] + impl Key for KeyB { + type Value = (); + + async fn compute( + &self, + _: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + // Noop + } + + fn equality(_: &Self::Value, _: &Self::Value) -> bool { + unimplemented!() + } + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn test_serialization() -> anyhow::Result<()> { + let dice = DiceModern::builder().build(DetectCycles::Disabled); + let mut ctx = dice.updater().commit().await; + ctx.compute(&KeyA(3)).await?; + + let mut nodes = Vec::new(); + let mut edges = Vec::new(); + let mut nodes_currently_running = Vec::new(); + + serialize_graph( + &dice.to_introspectable(), + &mut nodes, + &mut edges, + &mut nodes_currently_running, + ) + .unwrap(); + let nodes = String::from_utf8(nodes)?; + let edges = String::from_utf8(edges)?; + + let mut node_map = HashMap::::default(); + let mut edge_list = Vec::<(u64, u64)>::new(); + + for line in nodes.lines() { + let mut it = line.trim().split('\t'); + let idx = it.next().context("No idx")?.parse()?; + let _key_type = it.next().context("No key type")?; + let key = it.next().context("No key")?; + node_map.insert(key.into(), idx); + } + + for line in edges.lines() { + let mut it = line.trim().split('\t'); + let from = it.next().context("No idx")?.parse()?; + let to = it.next().context("No key")?.parse()?; + edge_list.push((from, to)); + } + + let a3 = *node_map.get("KeyA(3)").context("Missing key")?; + let a2 = *node_map.get("KeyA(2)").context("Missing key")?; + let a1 = *node_map.get("KeyA(1)").context("Missing key")?; + let a0 = *node_map.get("KeyA(0)").context("Missing key")?; + let b = *node_map.get("KeyB").context("Missing key")?; + + let mut expected_edge_list = vec![(a3, a2), (a2, a1), (a1, a0), (a0, b)]; + expected_edge_list.sort_unstable(); + edge_list.sort_unstable(); + assert_eq!(expected_edge_list, edge_list); + + // TODO(cjhopman): fix this + // assert!(nodes_currently_running.is_empty()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn test_serialization_dense() -> anyhow::Result<()> { + let dice = DiceModern::builder().build(DetectCycles::Disabled); + let mut ctx = dice.updater().commit().await; + ctx.compute(&KeyA(3)).await?; + + let node = bincode::serialize(&dice.to_introspectable())?; + + let _out: Vec = bincode::deserialize(&node)?; + Ok(()) + } +} diff --git a/dice/dice/src/introspection/graph.rs b/dice/dice/src/introspection/graph.rs index 64bc28fa1b9f0..e9494f1bea478 100644 --- a/dice/dice/src/introspection/graph.rs +++ b/dice/dice/src/introspection/graph.rs @@ -7,20 +7,16 @@ * of this source tree. */ -use std::collections::BTreeMap; -use std::collections::BTreeSet; use std::fmt; use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; use std::hash::Hasher; use std::iter; -use std::sync::Arc; use cmp_any::PartialEqAny; use derivative::Derivative; use dupe::Dupe; -use itertools::Either; use serde::de::Error; use serde::de::Unexpected; use serde::de::Visitor; @@ -34,34 +30,22 @@ use crate::impls::core::versions::introspection::VersionIntrospectable; use crate::impls::key::DiceKey; use crate::introspection::serialize_dense_graph; use crate::legacy::dice_futures::dice_task::DiceTaskStateForDebugging; -use crate::legacy::incremental::ErasedEngine; use crate::HashMap; use crate::HashSet; #[derive(Derivative)] #[derivative(Debug)] pub enum GraphIntrospectable { - Legacy { - #[derivative(Debug = "ignore")] - introspectables: LegacyIntrospectable, - }, Modern { #[derivative(Debug = "ignore")] introspection: ModernIntrospectable, }, } -pub struct LegacyIntrospectable(pub(crate) Vec>); - impl GraphIntrospectable { pub(crate) fn introspectables(&self) -> impl Iterator { match self { - GraphIntrospectable::Legacy { introspectables } => { - Either::Left(introspectables.0.iter().map(|e| e.introspect())) - } - GraphIntrospectable::Modern { introspection } => { - Either::Right(iter::once(introspection as _)) - } + GraphIntrospectable::Modern { introspection } => iter::once(introspection as _), } } } @@ -106,13 +90,13 @@ impl EngineForIntrospection for ModernIntrospectable { &'a self, _keys: &'a mut HashMap, ) -> Box + 'a> { - Box::new(self.graph.nodes().map(|node| { - let any_k = self.key_map.get(&node.k).expect("key should be present"); + Box::new(self.graph.nodes().map(|(key, node)| { + let any_k = self.key_map.get(&key).expect("key should be present"); SerializedGraphNodesForKey { - id: KeyID(node.k.index as usize), + id: KeyID(node.node_id.0), key: any_k.to_string(), type_name: any_k.type_name().to_owned(), - nodes: node.nodes.clone(), + nodes: Some(node.clone()), } })) } @@ -120,10 +104,6 @@ impl EngineForIntrospection for ModernIntrospectable { fn len_for_introspection(&self) -> usize { self.graph.len_for_introspection() } - - fn currently_running_key_count(&self) -> usize { - self.version_data.currently_running_key_count() - } } impl Serialize for GraphIntrospectable { @@ -210,28 +190,8 @@ pub enum GraphNodeKind { #[derive(Clone, Serialize, Deserialize)] pub struct CellHistory { - pub history: BTreeMap, -} - -impl CellHistory { - pub fn new(verified: BTreeSet, dirtied: BTreeMap) -> Self { - Self { - history: verified - .into_iter() - .map(|v| (v, HistoryState::Verified)) - .chain(dirtied.into_iter().map(|(v, f)| { - ( - v, - if f { - HistoryState::ForceDirty - } else { - HistoryState::Dirty - }, - ) - })) - .collect(), - } - } + pub valid_ranges: Vec<(VersionNumber, Option)>, + pub force_dirtied_at: Vec, } #[derive(Clone, Serialize, Deserialize)] @@ -250,7 +210,7 @@ pub struct SerializedGraphNode { /// it's theoretically possible for those locks to be poisoned. /// Therefore, they're optional. pub deps: Option>, - pub rdeps: Option>>, + pub rdeps: Option>, } #[derive(Clone, Serialize, Deserialize)] @@ -258,22 +218,23 @@ pub struct SerializedGraphNodesForKey { pub id: KeyID, pub key: String, pub type_name: String, - pub nodes: BTreeMap>, + pub nodes: Option, } pub(crate) trait EngineForIntrospection { + #[allow(dead_code)] fn keys<'a>(&'a self) -> Box + 'a>; fn edges<'a>(&'a self) -> Box)> + 'a>; fn keys_currently_running<'a>( &'a self, ) -> Vec<(AnyKey, VersionNumber, DiceTaskStateForDebugging)>; + #[allow(dead_code)] fn versions_currently_running<'a>(&'a self) -> Vec; fn nodes<'a>( &'a self, keys: &'a mut HashMap, ) -> Box + 'a>; fn len_for_introspection(&self) -> usize; - fn currently_running_key_count(&self) -> usize; } pub(crate) trait KeyForIntrospection: Display + Send + 'static { diff --git a/dice/dice/src/introspection/mod.rs b/dice/dice/src/introspection/mod.rs deleted file mode 100644 index 4e23fb4e72e8f..0000000000000 --- a/dice/dice/src/introspection/mod.rs +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! Interfaces for introspection of the DICE graph - -use crate::introspection::graph::AnyKey; -use crate::introspection::graph::GraphIntrospectable; -use crate::introspection::graph::LegacyIntrospectable; -use crate::Dice; -use crate::DiceImplementation; - -pub mod graph; -pub(crate) mod introspect; - -pub use crate::introspection::introspect::serialize_dense_graph; -pub use crate::introspection::introspect::serialize_graph; -use crate::legacy::DiceLegacy; - -impl Dice { - pub fn to_introspectable(&self) -> GraphIntrospectable { - match &self.implementation { - DiceImplementation::Legacy(dice) => dice.to_introspectable(), - DiceImplementation::Modern(_) => { - unimplemented!("todo") - } - } - } -} - -impl DiceLegacy { - pub fn to_introspectable(&self) -> GraphIntrospectable { - GraphIntrospectable::Legacy { - introspectables: LegacyIntrospectable(self.map.read().engines().to_vec()), - } - } -} - -#[cfg(test)] -mod tests { - use allocative::Allocative; - use anyhow::Context as _; - use async_trait::async_trait; - use derive_more::Display; - use dupe::Dupe; - use more_futures::cancellation::CancellationContext; - - use crate::api::computations::DiceComputations; - use crate::api::cycles::DetectCycles; - use crate::api::key::Key; - use crate::introspection::graph::SerializedGraphNodesForKey; - use crate::introspection::serialize_graph; - use crate::DiceLegacy; - use crate::HashMap; - - #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "{:?}", self)] - struct KeyA(usize); - - #[async_trait] - impl Key for KeyA { - type Value = (); - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - if self.0 > 0 { - ctx.compute(&KeyA(self.0 - 1)).await.unwrap(); - } else { - ctx.compute(&KeyB).await.unwrap(); - } - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - unimplemented!() - } - } - - #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "{:?}", self)] - struct KeyB; - - #[async_trait] - impl Key for KeyB { - type Value = (); - - async fn compute( - &self, - _: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - // Noop - } - - fn equality(_: &Self::Value, _: &Self::Value) -> bool { - unimplemented!() - } - } - - #[tokio::test] - async fn test_serialization() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Disabled); - let ctx = dice.updater().commit().await; - ctx.compute(&KeyA(3)).await?; - - let mut nodes = Vec::new(); - let mut edges = Vec::new(); - let mut nodes_currently_running = Vec::new(); - - serialize_graph( - &dice.to_introspectable(), - &mut nodes, - &mut edges, - &mut nodes_currently_running, - ) - .unwrap(); - let nodes = String::from_utf8(nodes)?; - let edges = String::from_utf8(edges)?; - - let mut node_map = HashMap::::default(); - let mut edge_list = Vec::<(u64, u64)>::new(); - - for line in nodes.lines() { - let mut it = line.trim().split('\t'); - let idx = it.next().context("No idx")?.parse()?; - let _key_type = it.next().context("No key type")?; - let key = it.next().context("No key")?; - node_map.insert(key.into(), idx); - } - - for line in edges.lines() { - let mut it = line.trim().split('\t'); - let from = it.next().context("No idx")?.parse()?; - let to = it.next().context("No key")?.parse()?; - edge_list.push((from, to)); - } - - let a3 = *node_map.get("KeyA(3)").context("Missing key")?; - let a2 = *node_map.get("KeyA(2)").context("Missing key")?; - let a1 = *node_map.get("KeyA(1)").context("Missing key")?; - let a0 = *node_map.get("KeyA(0)").context("Missing key")?; - let b = *node_map.get("KeyB").context("Missing key")?; - - let mut expected_edge_list = vec![(a3, a2), (a2, a1), (a1, a0), (a0, b)]; - expected_edge_list.sort_unstable(); - edge_list.sort_unstable(); - assert_eq!(expected_edge_list, edge_list); - - assert!(nodes_currently_running.is_empty()); - - Ok(()) - } - - #[tokio::test] - async fn test_serialization_dense() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Disabled); - let ctx = dice.updater().commit().await; - ctx.compute(&KeyA(3)).await?; - - let node = bincode::serialize(&dice.to_introspectable())?; - - let _out: Vec = bincode::deserialize(&node)?; - Ok(()) - } -} diff --git a/dice/dice/src/legacy.rs b/dice/dice/src/legacy.rs new file mode 100644 index 0000000000000..d4ed7137210ed --- /dev/null +++ b/dice/dice/src/legacy.rs @@ -0,0 +1,11 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub(crate) mod cycles; +pub(crate) mod dice_futures; diff --git a/dice/dice/src/legacy/ctx.rs b/dice/dice/src/legacy/ctx.rs deleted file mode 100644 index 89b203efaa056..0000000000000 --- a/dice/dice/src/legacy/ctx.rs +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::any::Any; -use std::future::Future; -use std::sync::Arc; - -use allocative::Allocative; -use dupe::Dupe; -use futures::FutureExt; -use parking_lot::Mutex; - -use crate::api::activation_tracker::ActivationData; -use crate::api::cycles::DetectCycles; -use crate::api::data::DiceData; -use crate::api::error::DiceErrorImpl; -use crate::api::error::DiceResult; -use crate::api::key::Key; -use crate::api::projection::ProjectionKey; -use crate::api::user_data::UserComputationData; -use crate::api::user_data::UserCycleDetectorGuard; -use crate::legacy::cycles::CycleDetector; -use crate::legacy::incremental::dep_trackers::BothDepTrackers; -use crate::legacy::incremental::dep_trackers::BothDeps; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::legacy::incremental::transaction_ctx::ActiveTransactionCountGuard; -use crate::legacy::incremental::transaction_ctx::Changes; -use crate::legacy::incremental::transaction_ctx::TransactionCtx; -use crate::legacy::incremental::versions::VersionForWrites; -use crate::legacy::incremental::versions::VersionGuard; -use crate::legacy::key::StoragePropertiesForKey; -use crate::legacy::map::DiceMap; -use crate::legacy::opaque::OpaqueValueImplLegacy; -use crate::legacy::projection::ProjectionKeyAsKey; -use crate::legacy::projection::ProjectionKeyProperties; -use crate::legacy::DiceLegacy; -use crate::versions::VersionNumber; -use crate::DiceError; - -/// A context for the duration of a top-level compute request. -/// -/// This contains both user-visible and dice-internal computation-specific data. -#[derive(Allocative)] -pub(crate) struct ComputationData { - pub(crate) user_data: Arc, - cycle_detector: Option>, - // TODO(bobyf): this seems a natural place to gather some stats about the compute too - #[allocative(skip)] - pub(crate) user_cycle_detector_guard: Option>, - /// Store extra data from provided by the key's evaluation, which will be passed to the - /// user_data's ActivationTracker when the key evaluation finishes. - #[allocative(skip)] - pub(crate) evaluation_data: Mutex>>, -} - -impl ComputationData { - pub(crate) fn new(data: UserComputationData, detect_cycles: DetectCycles) -> Self { - Self { - user_data: Arc::new(data), - cycle_detector: match detect_cycles { - DetectCycles::Enabled => Some(Box::new(CycleDetector::new())), - DetectCycles::Disabled => None, - }, - user_cycle_detector_guard: None, - evaluation_data: Mutex::new(None), - } - } - - /// records that we are entering the computation of another key as part of this main request - /// i.e. computing key a, which during its evaluation requests key b, enters a new subrequest. - pub(crate) fn subrequest(&self, key: &K::Key) -> DiceResult - where - K: StorageProperties, - { - if let Some(v) = &self.user_cycle_detector_guard { - v.add_edge(K::to_key_any(key)); - } - Ok(Self { - user_data: self.user_data.dupe(), - cycle_detector: self - .cycle_detector - .as_ref() - .map(|detector| Ok(Box::new(CycleDetector::visit(detector, key)?))) - .transpose()?, - user_cycle_detector_guard: None, - evaluation_data: Mutex::new(None), - }) - } - - pub(crate) fn start_computing_key(&mut self, k: &K::Key) { - assert!(self.user_cycle_detector_guard.is_none()); - self.user_cycle_detector_guard = self - .user_data - .cycle_detector - .as_ref() - .and_then(|v| v.start_computing_key(K::to_key_any(k))); - } - - pub(crate) fn finished_computing_key( - self, - k: &K::Key, - deps: &BothDeps, - reused: bool, - ) { - if let Some(v) = &self.user_data.cycle_detector { - v.finished_computing_key(K::to_key_any(k)) - } - - if let Some(v) = &self.user_data.activation_tracker { - let mut iter = deps.deps.iter().map(|d| d.to_key_any()); - - let activation_data = if reused { - ActivationData::Reused - } else { - ActivationData::Evaluated(self.evaluation_data.lock().take()) - }; - - v.key_activated(K::to_key_any(k), &mut iter, activation_data); - } - } -} - -/// A context for computations to request for additional dependencies. The -/// dependencies accessed are tracked for caching, if enabled based on -/// 'Strategy'. -/// -/// The context is valid only for the duration of the computation of a single -/// key. -/// -/// When marking values as changed on the ctx, the changes are part of the next -/// version. The next version is only committed when the current context is -/// dropped. context, which means that the "current" context will not see -/// updated values. -#[derive(Allocative)] -pub(crate) struct DiceComputationsImplLegacy { - pub(crate) transaction_ctx: Arc, - pub(crate) dice: Arc, - pub(crate) dep_trackers: BothDepTrackers, - pub(crate) extra: ComputationData, -} - -impl DiceComputationsImplLegacy { - pub(crate) fn new_transaction( - dice: Arc, - version: VersionGuard, - version_for_writes: VersionForWrites, - extra: ComputationData, - ) -> Self { - Self { - transaction_ctx: Arc::new(TransactionCtx::new( - version, - version_for_writes, - Changes::new(), - ActiveTransactionCountGuard::new(&dice), - )), - dep_trackers: BothDepTrackers::noop(), - dice: dice.dupe(), - extra, - } - } - - pub(crate) fn new_for_key_evaluation( - dice: Arc, - transaction_ctx: Arc, - extra: ComputationData, - ) -> Arc { - // TODO(bobyf): for memory, handle cases where we don't want explicit tracking - Arc::new(Self { - transaction_ctx, - dice: dice.dupe(), - dep_trackers: BothDepTrackers::recording(), - extra, - }) - } - - pub(crate) fn finalize(self: Arc) -> (BothDeps, ComputationData) { - // TODO express this via lifetimes - let this = Arc::try_unwrap(self).map_err(|_| "The computation lifetime of the `ctx` has ended and there should be no further references to the `Arc`").unwrap(); - - (this.dep_trackers.collect_deps(), this.extra) - } - - pub(crate) fn compute_opaque<'a, K>( - self: &'a Arc, - key: &K, - ) -> impl Future>> + 'a - where - K: Key, - { - // This would be simpler with an `async fn/async move {}`, but we create these for every edge in the computation - // and many of those may be live at a time, and so we need to take more care and ensure this is fairly small. - let cache = self.dice.find_cache::(); - let extra = self.extra.subrequest::>(key); - match extra { - Ok(extra) => cache - .eval_for_opaque(key, &self.transaction_ctx, extra) - .map(move |value| Ok(OpaqueValueImplLegacy::new(value, self, cache))) - .left_future(), - Err(e) => futures::future::ready(Err(e)).right_future(), - } - } - - pub(crate) fn compute_projection_sync

    ( - self: &Arc, - derive_from: &OpaqueValueImplLegacy, - projection_key: &P, - ) -> DiceResult - where - P: ProjectionKey, - { - assert!(Arc::ptr_eq(self, derive_from.parent_computations)); - - let cache = self.dice.find_projection_cache::

    (); - - let projection_key_as_key = ProjectionKeyAsKey { - derive_from_key: derive_from.key().clone(), - k: projection_key.clone(), - }; - - let extra = self - .extra - .subrequest::>(&projection_key_as_key)?; - - Ok(cache.eval_projection( - &projection_key_as_key, - derive_from, - &self.transaction_ctx, - &extra, - )) - } - - pub(crate) fn changed(&self, changed: I) -> DiceResult<()> - where - K: Key, - I: IntoIterator + Send + Sync + 'static, - { - let mut changes = self.transaction_ctx.changes(); - - changed.into_iter().try_for_each(|k| { - let dice = self.dice.dupe(); - changes.change( - k.clone(), - Box::new(move |version| { - debug!(msg = "marking value as changed", version = %version, key = %k); - let cache = dice.find_cache::(); - cache.dirty(k, version, true); - - true - }), - ) - }) - } - - pub(crate) fn changed_to(&self, changed: I) -> DiceResult<()> - where - K: Key, - I: IntoIterator + Send + Sync + 'static, - { - let mut changes = self.transaction_ctx.changes(); - - changed.into_iter().try_for_each(|(k, v)| { - if !K::validity(&v) { - return Err(DiceError::invalid_change(Arc::new(k))); - } - let dice = self.dice.dupe(); - changes.change( - k.clone(), - Box::new(move |version| { - let cache = dice.find_cache::(); - debug!(msg = "marking value as updated", version = %version, key = %k); - cache.update_injected_value(k, version, v) - }), - ) - }) - } - - /// Commit the changes registered via 'changed' and 'changed_to' to the current newest version. - /// This can only be called when the this is the only node remaining in the computation graph - pub(crate) fn commit(self: Arc) -> Arc { - // TODO need to clean up these ctxs so we have less runtime errors from Arc references - let this = Arc::try_unwrap(self) - .map_err(|_| "Error: tried to commit when there are more references") - .unwrap(); - let eval = Arc::try_unwrap(this.transaction_ctx) - .map_err(|_| "Error: tried to commit when there are more references") - .unwrap(); - - // hold onto the prev version until we get the new one below so we don't increment minor - // version needlessly. - let _prev_v = eval.commit(); - - this.dice.make_ctx(this.extra) - } - - /// Same as `commit`, but replacing the user data with the given - pub(crate) fn commit_with_data( - self: Arc, - extra: UserComputationData, - ) -> Arc { - // TODO need to clean up these ctxs so we have less runtime errors from Arc references - let mut this = Arc::try_unwrap(self) - .map_err(|_| "Error: tried to commit when there are more references") - .unwrap(); - let eval = Arc::try_unwrap(this.transaction_ctx) - .map_err(|_| "Error: tried to commit when there are more references") - .unwrap(); - - // hold onto the prev version until we get the new one below so we don't increment minor - // version needlessly. - let _prev_v = eval.commit(); - - this.dice.make_ctx(ComputationData { - user_data: Arc::new(extra), - cycle_detector: this.extra.cycle_detector.take(), - user_cycle_detector_guard: None, - evaluation_data: Mutex::new(None), - }) - } - - pub(crate) fn get_version(&self) -> VersionNumber { - self.transaction_ctx.get_version() - } - - pub(crate) fn unstable_take(self: &Arc) -> DiceMap { - self.dice.unstable_take() - } - - pub(crate) fn global_data(&self) -> &DiceData { - &self.dice.data - } - - pub(crate) fn per_transaction_data(&self) -> &UserComputationData { - &self.extra.user_data - } - - pub(crate) fn cycle_guard(&self) -> DiceResult> { - match &self.extra.user_cycle_detector_guard { - None => Ok(None), - Some(guard) => match guard.as_any().downcast_ref() { - Some(guard) => Ok(Some(guard)), - None => Err(DiceError(Arc::new( - DiceErrorImpl::UnexpectedCycleGuardType { - expected_type_name: std::any::type_name::().to_owned(), - actual_type_name: guard.type_name().to_owned(), - }, - ))), - }, - } - } - - pub fn store_evaluation_data(&self, value: T) -> DiceResult<()> { - let mut evaluation_data = self.extra.evaluation_data.lock(); - if evaluation_data.is_some() { - return Err(DiceError::duplicate_activation_data()); - } - *evaluation_data = Some(Box::new(value) as _); - Ok(()) - } -} - -#[cfg(test)] -pub(crate) mod testing { - use crate::api::cycles::DetectCycles; - use crate::api::user_data::UserComputationData; - use crate::ctx::DiceComputationsImpl; - use crate::legacy::ctx::ComputationData; - use crate::legacy::incremental::versions::MinorVersion; - use crate::transaction::DiceTransactionImpl; - - pub(crate) trait DiceCtxExt { - fn get_minor_version(&self) -> MinorVersion; - } - - impl DiceCtxExt for DiceComputationsImpl { - fn get_minor_version(&self) -> MinorVersion { - match self { - DiceComputationsImpl::Legacy(delegate) => { - delegate.transaction_ctx.get_minor_version() - } - DiceComputationsImpl::Modern(_delegate) => { - unimplemented!("todo") - } - } - } - } - - impl DiceCtxExt for DiceTransactionImpl { - fn get_minor_version(&self) -> MinorVersion { - match self { - DiceTransactionImpl::Legacy(delegate) => delegate.0.get_minor_version(), - DiceTransactionImpl::Modern(_delegate) => { - unimplemented!("todo") - } - } - } - } - - pub(crate) trait ComputationDataExt { - fn testing_new() -> Self; - } - - impl ComputationDataExt for ComputationData { - fn testing_new() -> Self { - Self::new(UserComputationData::new(), DetectCycles::Enabled) - } - } -} diff --git a/dice/dice/src/legacy/cycles.rs b/dice/dice/src/legacy/cycles.rs index 5c1519d6e8c00..c71deaee183b0 100644 --- a/dice/dice/src/legacy/cycles.rs +++ b/dice/dice/src/legacy/cycles.rs @@ -14,15 +14,10 @@ use std::fmt::Debug; use std::fmt::Display; use std::hash::Hash; use std::hash::Hasher; -use std::sync::Arc; use allocative::Allocative; use cmp_any::PartialEqAny; -use dupe::IterDupedExt; -use indexmap::set::IndexSet; -use crate::api::error::DiceError; -use crate::api::error::DiceResult; use crate::api::key::Key; /// A `Key` that has been requested within Dice. @@ -68,106 +63,3 @@ impl PartialEq for dyn RequestedKey { } impl Eq for dyn RequestedKey {} - -#[derive(Allocative)] -pub(crate) struct CycleDetector { - stack: IndexSet>, -} - -impl CycleDetector { - pub(crate) fn new() -> Self { - Self { - stack: IndexSet::new(), - } - } - - pub(crate) fn visit(&self, key: &K) -> DiceResult - where - K: Allocative + Clone + Debug + Display + Eq + Hash + Send + Sync + 'static, - { - // quick and dirty cycle detection. we will have to make this more efficient - // TODO(bobyf) - let mut stack = self.stack.clone(); - if !stack.insert(Arc::new(key.clone())) { - Err(DiceError::cycle( - Arc::new(key.clone()), - stack.iter().duped().collect(), - )) - } else { - Ok(Self { stack }) - } - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use allocative::Allocative; - use derive_more::Display; - use dupe::Dupe; - use indexmap::indexset; - - use crate::api::error::DiceErrorImpl; - use crate::legacy::cycles::CycleDetector; - use crate::legacy::cycles::RequestedKey; - - #[derive(Clone, Dupe, Display, Debug, PartialEq, Eq, Hash, Allocative)] - struct K(usize); - - #[test] - fn cycle_detection_when_no_cycles() -> anyhow::Result<()> { - let detector = CycleDetector::new(); - let detector1 = detector.visit(&K(1))?; - let detector12 = detector1.visit(&K(2))?; - let detector123 = detector12.visit(&K(3))?; - let _detector1234 = detector123.visit(&K(4))?; - - let detector13 = detector1.visit(&K(3))?; - let _detector132 = detector13.visit(&K(2))?; - - Ok(()) - } - - #[test] - fn cycle_detection_when_cycles() -> anyhow::Result<()> { - let detector = CycleDetector::new(); - let detector = detector.visit(&K(1))?; - let detector = detector.visit(&K(2))?; - let detector = detector.visit(&K(3))?; - let detector = detector.visit(&K(4))?; - - match detector.visit(&K(1)) { - Ok(_) => { - panic!("should have cycle error") - } - Err(e) => match &*e.0 { - DiceErrorImpl::Cycle { - cyclic_keys, - trigger, - } => { - assert!( - (**trigger).get_key_equality() == K(1).get_key_equality(), - "expected trigger key to be `{}` but was `{}`", - K(1), - trigger - ); - assert_eq!( - cyclic_keys, - &indexset![ - Arc::new(K(1)) as Arc, - Arc::new(K(2)) as Arc, - Arc::new(K(3)) as Arc, - Arc::new(K(4)) as Arc - ] - ) - } - _ => { - panic!("wrong error type") - } - }, - } - - Ok(()) - } -} diff --git a/dice/dice/src/legacy/dice_futures.rs b/dice/dice/src/legacy/dice_futures.rs new file mode 100644 index 0000000000000..250d167c999a6 --- /dev/null +++ b/dice/dice/src/legacy/dice_futures.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +pub mod dice_task; diff --git a/dice/dice/src/legacy/dice_futures/dice_future.rs b/dice/dice/src/legacy/dice_futures/dice_future.rs deleted file mode 100644 index 7e3ddd7afaeef..0000000000000 --- a/dice/dice/src/legacy/dice_futures/dice_future.rs +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::future::Future; -use std::pin::Pin; -use std::task::Poll; - -use futures::future::BoxFuture; -use futures::FutureExt; -use more_futures::instrumented_shared::SharedEventsFuture; -use more_futures::spawn::StrongJoinHandle; -use more_futures::spawn::WeakFutureError; - -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::result::CancellableResult; -use crate::GraphNode; - -type DiceJoinHandle = StrongJoinHandle< - SharedEventsFuture< - BoxFuture<'static, Result>, WeakFutureError>>, - >, ->; - -pub(crate) enum DiceFuture { - /// Earlier computed value. - Ready(Option>), - /// Current computation spawned the task. - AsyncCancellableSpawned(DiceJoinHandle), - /// Other computation for current key spawned the task. - AsyncCancellableJoining(DiceJoinHandle), -} - -impl Future for DiceFuture -where - S: StorageProperties, -{ - type Output = GraphNode; - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - match self.get_mut() { - DiceFuture::Ready(value) => Poll::Ready(value.take().expect("polled after ready")), - DiceFuture::AsyncCancellableSpawned(fut) | DiceFuture::AsyncCancellableJoining(fut) => { - Pin::new(&mut fut.map(|cancellable| match cancellable { - Ok(res) => res, - Err(_) => { - unreachable!("Strong Join Handle was cancelled while still polled") - } - })) - .poll(cx) - } - } - } -} diff --git a/dice/dice/src/legacy/dice_futures/dice_task.rs b/dice/dice/src/legacy/dice_futures/dice_task.rs index 11d0d62048f7d..2c2c4789b6ba4 100644 --- a/dice/dice/src/legacy/dice_futures/dice_task.rs +++ b/dice/dice/src/legacy/dice_futures/dice_task.rs @@ -7,7 +7,6 @@ * of this source tree. */ -use allocative::Allocative; use dupe::Dupe; #[derive(Debug, Clone, Copy, Dupe)] @@ -18,13 +17,6 @@ pub(crate) enum DiceTaskStateForDebugging { AsyncInProgress, /// Weak handle is empty. AsyncDropped, - /// Sync task finished. - SyncReady, /// Sync in progress. SyncInProgress, } - -/// Marker trait for a task currently executed in `IncrementalEngine`. -pub(crate) trait DiceTask: Allocative + Send + Sync + 'static { - fn state_for_debugging(&self) -> DiceTaskStateForDebugging; -} diff --git a/dice/dice/src/legacy/dice_futures/future_handle.rs b/dice/dice/src/legacy/dice_futures/future_handle.rs deleted file mode 100644 index ada8667fc86be..0000000000000 --- a/dice/dice/src/legacy/dice_futures/future_handle.rs +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! The future that is spawned and managed by DICE. This is a single computation unit that is -//! shareable across different computation units. -//! -use allocative::Allocative; -use more_futures::spawn::CompletionObserver; -use more_futures::spawn::WeakFutureError; -use more_futures::spawn::WeakJoinHandle; - -use crate::legacy::dice_futures::dice_future::DiceFuture; -use crate::legacy::dice_futures::dice_task::DiceTask; -use crate::legacy::dice_futures::dice_task::DiceTaskStateForDebugging; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::result::CancellableResult; -use crate::GraphNode; - -#[derive(Allocative)] -pub(crate) struct WeakDiceFutureHandle { - #[allocative(skip)] // TODO(nga): value may be hiding in there. - handle: WeakJoinHandle>, WeakFutureError>>, -} - -impl DiceTask for WeakDiceFutureHandle { - fn state_for_debugging(&self) -> DiceTaskStateForDebugging { - match self.handle.pollable() { - Some(p) => { - if p.inner().inner().peek().is_some() { - DiceTaskStateForDebugging::AsyncReady - } else { - DiceTaskStateForDebugging::AsyncInProgress - } - } - None => DiceTaskStateForDebugging::AsyncDropped, - } - } -} - -impl WeakDiceFutureHandle { - pub(crate) fn async_cancellable( - handle: WeakJoinHandle>, WeakFutureError>>, - ) -> WeakDiceFutureHandle { - WeakDiceFutureHandle { handle } - } - - pub(crate) fn pollable(&self) -> Option> { - self.handle - .pollable() - .map(DiceFuture::AsyncCancellableJoining) - } - - /// Turn this into a JoinHandle. The output is erased. This is used to observe this future - /// exiting, but that's it. - pub fn into_completion_observer(self) -> CompletionObserver>> { - self.handle.into_completion_observer() - } -} diff --git a/dice/dice/src/legacy/dice_futures/mod.rs b/dice/dice/src/legacy/dice_futures/mod.rs deleted file mode 100644 index 802f2b91e2ec5..0000000000000 --- a/dice/dice/src/legacy/dice_futures/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -pub mod dice_future; -pub mod dice_task; -pub mod future_handle; -pub mod sync_handle; diff --git a/dice/dice/src/legacy/dice_futures/sync_handle.rs b/dice/dice/src/legacy/dice_futures/sync_handle.rs deleted file mode 100644 index d81861628ec9e..0000000000000 --- a/dice/dice/src/legacy/dice_futures/sync_handle.rs +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use allocative::Allocative; -use dupe::Clone_; -use dupe::Dupe; -use futures::future::Shared; - -use crate::legacy::dice_futures::dice_task::DiceTask; -use crate::legacy::dice_futures::dice_task::DiceTaskStateForDebugging; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::GraphNode; - -/// `IncrementalEngine` task type for projection key. -#[derive(Clone_, Allocative)] -pub(crate) struct SyncDiceTaskHandle { - // Doesn't have peek, so we cannot check if the value is there. - // Could measure the size of the channel though. - #[allocative(skip)] - pub(crate) rx: Shared>>, -} - -impl Dupe for SyncDiceTaskHandle {} - -impl DiceTask for SyncDiceTaskHandle { - fn state_for_debugging(&self) -> DiceTaskStateForDebugging { - if self.rx.peek().is_some() { - DiceTaskStateForDebugging::SyncReady - } else { - DiceTaskStateForDebugging::SyncInProgress - } - } -} diff --git a/dice/dice/src/legacy/incremental/dep_trackers.rs b/dice/dice/src/legacy/incremental/dep_trackers.rs deleted file mode 100644 index 4caedbc29df7b..0000000000000 --- a/dice/dice/src/legacy/incremental/dep_trackers.rs +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Trackers that records dependencies and reverse dependencies during execution of requested nodes - -use std::sync::Arc; - -use allocative::Allocative; -use dupe::Dupe; -use parking_lot::Mutex; - -use crate::legacy::incremental::dep_trackers::internals::ComputedDep; -use crate::legacy::incremental::graph::dependencies::ComputedDependency; -use crate::legacy::incremental::graph::GraphNode; -use crate::legacy::incremental::graph::GraphNodeDyn; -use crate::legacy::incremental::IncrementalComputeProperties; -use crate::legacy::incremental::IncrementalEngine; -use crate::versions::VersionNumber; -use crate::HashSet; - -/// The 'DepsTracker' is used to record dependencies of a particular compute node by calling -/// 'record' for each dependency, and then getting a list of 'Dependency's at the end by calling -/// 'collect_deps'. -#[derive(Allocative)] -struct RecordingDepsTracker { - deps: HashSet>, -} - -impl RecordingDepsTracker { - fn new() -> Self { - Self { - deps: HashSet::default(), - } - } - - /// records k with the given evaluator and engine - fn record(&mut self, v: VersionNumber, engine: Arc>, node: GraphNode) - where - K: IncrementalComputeProperties, - { - self.deps.insert(Box::new(ComputedDep { - engine: Arc::downgrade(&engine), - version: v, - node, - })); - } - - fn collect_deps(self) -> HashSet> { - self.deps - } -} - -#[derive(Allocative)] -struct RecordingRdepsTracker { - rdeps: Vec>, -} - -impl RecordingRdepsTracker { - fn new() -> Self { - Self { rdeps: Vec::new() } - } - - fn record(&mut self, dep: Arc) { - self.rdeps.push(dep) - } - - fn collect_rdeps(self) -> Vec> { - self.rdeps - } -} - -#[derive(Allocative)] -struct BothRecordingDepTrackers { - deps: RecordingDepsTracker, - rdeps: RecordingRdepsTracker, -} - -#[derive(Default)] -pub(crate) struct BothDeps { - pub(crate) deps: HashSet>, - pub(crate) rdeps: Vec>, -} - -impl BothDeps { - pub(crate) fn only_one_dep( - version: VersionNumber, - node: GraphNode, - incremental_engine: &Arc>, - ) -> BothDeps { - let dep: Box = Box::new(ComputedDep:: { - engine: Arc::downgrade(incremental_engine), - version, - node: node.dupe(), - }); - BothDeps { - deps: HashSet::from_iter([dep]), - rdeps: Vec::from_iter([node.into_dyn()]), - } - } -} - -#[derive(Allocative)] -enum BothDepTrackersImpl { - Noop, - Recording(Mutex), -} - -#[derive(Allocative)] -pub(crate) struct BothDepTrackers(BothDepTrackersImpl); - -/// There are two variants, a 'Recording' tracker and a 'Noop' tracker. The 'Noop' tracker never -/// tracks any dependencies such that 'collect_deps' is always empty. The 'Recording' tracker will -/// actually track the dependencies. -impl BothDepTrackers { - pub(crate) fn noop() -> BothDepTrackers { - BothDepTrackers(BothDepTrackersImpl::Noop) - } - - pub(crate) fn recording() -> BothDepTrackers { - BothDepTrackers(BothDepTrackersImpl::Recording(Mutex::new( - BothRecordingDepTrackers { - deps: RecordingDepsTracker::new(), - rdeps: RecordingRdepsTracker::new(), - }, - ))) - } - - /// records k with the given evaluator and engine - pub(crate) fn record( - &self, - v: VersionNumber, - engine: Arc>, - node: GraphNode, - ) where - K: IncrementalComputeProperties, - { - match &self.0 { - BothDepTrackersImpl::Noop => {} - BothDepTrackersImpl::Recording(recording) => { - let mut recording = recording.lock(); - let BothRecordingDepTrackers { deps, rdeps } = &mut *recording; - deps.record(v, engine, node.dupe()); - rdeps.record(node.into_dyn()); - } - } - } - - pub(crate) fn collect_deps(self) -> BothDeps { - match self.0 { - BothDepTrackersImpl::Noop => BothDeps::default(), - BothDepTrackersImpl::Recording(recording) => { - let BothRecordingDepTrackers { deps, rdeps } = recording.into_inner(); - let deps = deps.collect_deps(); - let rdeps = rdeps.collect_rdeps(); - BothDeps { deps, rdeps } - } - } - } -} - -mod internals { - use std::any::type_name; - use std::any::Any; - use std::fmt; - use std::fmt::Debug; - use std::fmt::Display; - use std::fmt::Formatter; - use std::hash::Hash; - use std::hash::Hasher; - use std::sync::Arc; - use std::sync::Weak; - - use allocative::Allocative; - use async_trait::async_trait; - use cmp_any::PartialEqAny; - use dupe::Dupe; - - use crate::api::error::DiceResult; - use crate::introspection::graph::AnyKey; - use crate::legacy::ctx::ComputationData; - use crate::legacy::incremental::graph::GraphNode; - use crate::legacy::incremental::graph::GraphNodeDyn; - use crate::legacy::incremental::graph::ReadOnlyHistory; - use crate::legacy::incremental::graph::VersionedGraphKeyRef; - use crate::legacy::incremental::transaction_ctx::TransactionCtx; - use crate::legacy::incremental::versions::MinorVersion; - use crate::legacy::incremental::ComputedDependency; - use crate::legacy::incremental::Dependency; - use crate::legacy::incremental::IncrementalComputeProperties; - use crate::legacy::incremental::IncrementalEngine; - use crate::versions::VersionNumber; - - #[derive(Allocative)] - pub(crate) struct ComputedDep { - pub(crate) engine: Weak>, - pub(crate) version: VersionNumber, - pub(crate) node: GraphNode, - } - - impl ComputedDependency for ComputedDep - where - K: IncrementalComputeProperties, - { - fn get_history(&self) -> ReadOnlyHistory { - self.node.get_history() - } - - fn into_dependency(self: Box) -> Box { - Box::new(Dep { - engine: self.engine, - k: self.node.key().clone(), - }) - } - - fn get_key_equality(&self) -> (PartialEqAny, VersionNumber) { - (PartialEqAny::new(self.node.key()), self.version) - } - - fn to_key_any(&self) -> &dyn Any { - K::to_key_any(self.node.key()) - } - - fn hash(&self, mut state: &mut dyn Hasher) { - self.node.key().hash(&mut state); - self.version.hash(&mut state); - } - - fn is_valid(&self) -> bool { - self.node.is_valid() - } - } - - impl Debug for ComputedDep - where - K: IncrementalComputeProperties, - { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { - write!( - f, - "ComputedDependency(({:?}={:?}) -> {:?}, version={:?})", - type_name::(), - self.node.key(), - type_name::(), - self.version, - ) - } - } - - #[derive(Allocative)] - pub(crate) struct Dep - where - K: IncrementalComputeProperties, - { - pub(crate) engine: Weak>, - pub(crate) k: K::Key, - } - - impl Dep - where - K: IncrementalComputeProperties, - { - pub(crate) fn engine(&self) -> Arc> { - self.engine.upgrade().expect( - "IncrementalEngine should not be destroyed because IncrementalEngine owns Dep", - ) - } - } - - #[async_trait] - impl Dependency for Dep - where - K: IncrementalComputeProperties, - { - #[instrument(level = "info", skip(self, transaction_ctx, extra), fields(k = %self.k, version = %transaction_ctx.get_version()))] - async fn recompute( - &self, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult<(Box, Arc)> { - let res = K::recompute(&self.k, &self.engine(), transaction_ctx, extra).await?; - - Ok(( - Box::new(ComputedDep { - engine: self.engine.dupe(), - version: transaction_ctx.get_version(), - node: res.dupe(), - }), - res.into_dyn(), - )) - } - - fn lookup_node(&self, v: VersionNumber, mv: MinorVersion) -> Option> { - if let Some(node) = self - .engine() - .versioned_cache - .get(VersionedGraphKeyRef::new(v, &self.k), mv) - .unpack_match() - { - Some(node.dupe().into_dyn()) - } else { - None - } - } - - fn dirty(&self, v: VersionNumber) { - self.engine().dirty(self.k.clone(), v, false) - } - - fn get_key_equality(&self) -> PartialEqAny { - PartialEqAny::new(&self.k) - } - - fn hash(&self, mut state: &mut dyn Hasher) { - self.k.hash(&mut state) - } - - fn introspect(&self) -> AnyKey { - AnyKey::new(self.k.clone()) - } - - fn to_key_any(&self) -> &dyn std::any::Any { - K::to_key_any(&self.k) - } - } - - impl Debug for Dep - where - K: IncrementalComputeProperties, - { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { - write!( - f, - "Dependency(({:?}={:?}) -> {:?})", - type_name::(), - self.k, - type_name::() - ) - } - } - - impl Display for Dep - where - K: IncrementalComputeProperties, - { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "Dependency({})", self.k) - } - } -} - -#[cfg(test)] -pub(crate) mod testing { - use std::sync::Arc; - use std::sync::Weak; - - pub(crate) use crate::legacy::incremental::dep_trackers::internals::ComputedDep; - pub(crate) use crate::legacy::incremental::dep_trackers::internals::Dep; - use crate::legacy::incremental::graph::GraphNode; - use crate::legacy::incremental::graph::OccupiedGraphNode; - use crate::legacy::incremental::IncrementalComputeProperties; - use crate::legacy::incremental::IncrementalEngine; - use crate::versions::VersionNumber; - - pub(crate) trait DepExt { - fn testing_new(engine: Weak>, k: K::Key) -> Self; - } - - impl DepExt for Dep - where - K: IncrementalComputeProperties, - { - fn testing_new(engine: Weak>, k: K::Key) -> Self { - Dep { engine, k } - } - } - - pub(crate) trait ComputedDepExt { - fn testing_new( - engine: Weak>, - version: VersionNumber, - node: Arc>, - ) -> Self; - } - - impl ComputedDepExt for ComputedDep - where - K: IncrementalComputeProperties, - { - fn testing_new( - engine: Weak>, - version: VersionNumber, - node: Arc>, - ) -> Self { - ComputedDep { - engine, - version, - node: GraphNode::occupied(node), - } - } - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use dupe::Dupe; - use futures::FutureExt; - - use crate::impls::core::graph::history::CellHistory; - use crate::legacy::ctx::testing::ComputationDataExt; - use crate::legacy::ctx::ComputationData; - use crate::legacy::incremental::dep_trackers::BothDeps; - use crate::legacy::incremental::dep_trackers::RecordingDepsTracker; - use crate::legacy::incremental::dep_trackers::RecordingRdepsTracker; - use crate::legacy::incremental::evaluator::testing::EvaluatorFn; - use crate::legacy::incremental::evaluator::testing::EvaluatorUnreachable; - use crate::legacy::incremental::graph::OccupiedGraphNode; - use crate::legacy::incremental::testing::ComputedDependencyExt; - use crate::legacy::incremental::IncrementalEngine; - use crate::legacy::incremental::TransactionCtx; - use crate::versions::VersionNumber; - use crate::HashSet; - - #[test] - fn recording_rdeps_tracker_tracks_rdeps() { - let mut rdeps_tracker = RecordingRdepsTracker::new(); - - let node = Arc::new(OccupiedGraphNode::>::new( - 1337, - 2, - CellHistory::verified(VersionNumber::new(0)), - )); - rdeps_tracker.record(node.dupe()); - let tracked = rdeps_tracker.collect_rdeps(); - - assert_eq!(tracked.len(), 1); - } - - #[tokio::test] - async fn recording_deps_tracker_tracks_deps() -> anyhow::Result<()> { - let mut deps_tracker = RecordingDepsTracker::new(); - // set up so that we have keys 2 and 3 with a history of VersionNumber(1) - let fn_for_2_and_3 = |k| async move { (k, BothDeps::default()) }.boxed(); - - let engine = IncrementalEngine::new(EvaluatorFn::new(move |k, _| fn_for_2_and_3(k))); - - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(1))); - - let node1 = engine - .eval_entry_versioned(&2, &ctx, ComputationData::testing_new()) - .await; - let node2 = engine - .eval_entry_versioned(&3, &ctx, ComputationData::testing_new()) - .await; - - deps_tracker.record(VersionNumber::new(1), engine.dupe(), node1); - deps_tracker.record(VersionNumber::new(1), engine.dupe(), node2); - - let deps = deps_tracker.collect_deps(); - - let expected = HashSet::from_iter([ - ComputedDependencyExt::>::testing_raw( - 2, - VersionNumber::new(1), - true, - ), - ComputedDependencyExt::>::testing_raw( - 3, - VersionNumber::new(1), - true, - ), - ]); - assert_eq!(deps, expected); - - Ok(()) - } -} diff --git a/dice/dice/src/legacy/incremental/evaluator.rs b/dice/dice/src/legacy/incremental/evaluator.rs deleted file mode 100644 index 7031e9dd5a31b..0000000000000 --- a/dice/dice/src/legacy/incremental/evaluator.rs +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; - -use async_trait::async_trait; -use more_futures::cancellation::CancellationContext; - -use crate::legacy::ctx::ComputationData; -use crate::legacy::dice_futures::dice_task::DiceTask; -use crate::legacy::incremental::graph::GraphNode; -use crate::legacy::incremental::IncrementalComputeProperties; -use crate::legacy::incremental::IncrementalEngine; -use crate::legacy::EvaluationResult; -use crate::TransactionCtx; -use crate::WeakDiceFutureHandle; - -#[async_trait] -pub(crate) trait Recompute: IncrementalComputeProperties { - /// Dice task executed in the `IncrementalEngine` for compute or recompute. - type DiceTask: DiceTask; - - async fn recompute( - key: &Self::Key, - engine: &Arc>, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> GraphNode; -} - -#[async_trait] -pub(crate) trait Evaluator: - IncrementalComputeProperties> -{ - async fn eval( - &self, - k: &Self::Key, - transaction_ctx: Arc, - cancellations: &CancellationContext, - extra: ComputationData, - ) -> EvaluationResult; -} - -#[cfg(test)] -pub(crate) mod testing { - use std::fmt; - use std::fmt::Debug; - use std::fmt::Display; - use std::hash::Hash; - use std::sync::Arc; - - use allocative::Allocative; - use async_trait::async_trait; - use dupe::Dupe; - use futures::future::BoxFuture; - use gazebo::prelude::*; - use more_futures::cancellation::CancellationContext; - - use crate::api::error::DiceResult; - use crate::api::storage_type::StorageType; - use crate::legacy::ctx::ComputationData; - use crate::legacy::incremental::evaluator::Evaluator; - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - use crate::legacy::incremental::graph::GraphNode; - use crate::legacy::incremental::Computable; - use crate::legacy::incremental::IncrementalComputeProperties; - use crate::legacy::incremental::IncrementalEngine; - use crate::legacy::BothDeps; - use crate::legacy::EvaluationResult; - use crate::TransactionCtx; - use crate::WeakDiceFutureHandle; - - /// Evaluator which panics on attempt to evaluate. - #[derive(Default_, Allocative)] - #[allocative(bound = "")] - pub(crate) struct EvaluatorUnreachable { - _kv: std::marker::PhantomData V>, - } - - impl fmt::Debug for EvaluatorUnreachable { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EvaluatorUnreachable") - .finish_non_exhaustive() - } - } - - impl< - K: Send + Sync + Hash + Eq + Display + Debug + Clone + Allocative + 'static, - V: Send + Sync + PartialEq + Dupe + Allocative + 'static, - > StorageProperties for EvaluatorUnreachable - { - type Key = K; - type Value = V; - - fn key_type_name() -> &'static str { - "TestingUnreachable" - } - - fn storage_type(&self) -> StorageType { - unreachable!() - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(&self, _x: &Self::Value) -> bool { - true - } - - fn to_key_any(_key: &Self::Key) -> &dyn std::any::Any { - unreachable!() - } - } - - #[async_trait] - impl< - K: Send + Sync + Hash + Eq + Display + Debug + Clone + Allocative + 'static, - V: Send + Sync + PartialEq + Dupe + Allocative + 'static, - > IncrementalComputeProperties for EvaluatorUnreachable - { - type DiceTask = WeakDiceFutureHandle; - - async fn recompute( - _key: &Self::Key, - _engine: &Arc>, - _transaction_ctx: &Arc, - _extra: &ComputationData, - ) -> DiceResult> { - unreachable!() - } - } - - #[async_trait] - impl< - K: Send + Sync + Hash + Eq + Display + Debug + Clone + Allocative + 'static, - V: Send + Sync + PartialEq + Dupe + Allocative + 'static, - > Evaluator for EvaluatorUnreachable - { - async fn eval( - &self, - _k: &Self::Key, - _transaction_ctx: Arc, - _cancellations: &CancellationContext, - _extra: ComputationData, - ) -> EvaluationResult { - unreachable!() - } - } - - /// Evaluator which invokes boxed closure on evaluation. - #[derive(Allocative)] - #[allocative(bound = "")] - pub(crate) struct EvaluatorFn { - #[allocative(skip)] - f: Box< - dyn for<'a> Fn(K, &'a CancellationContext) -> BoxFuture<'a, (V, BothDeps)> - + Send - + Sync - + 'static, - >, - } - - impl EvaluatorFn { - pub(crate) fn new(f: F) -> Self - where - F: for<'a> FnOnce(K, &'a CancellationContext) -> BoxFuture<'a, (V, BothDeps)> - + Clone - + Sync - + Send - + 'static, - { - Self { - f: Box::new(move |k, cancellations| { - let f = f.clone(); - f(k, cancellations) - }), - } - } - } - - impl Debug for EvaluatorFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EvaluatorFn").finish_non_exhaustive() - } - } - - impl< - K: Clone + Eq + Hash + Display + Debug + Send + Sync + Allocative + 'static, - V: Dupe + PartialEq + Send + Sync + Allocative + 'static, - > StorageProperties for EvaluatorFn - { - type Key = K; - type Value = V; - - fn key_type_name() -> &'static str { - "EvaluatorFn" - } - - fn storage_type(&self) -> StorageType { - StorageType::LastN(1) - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(&self, _x: &Self::Value) -> bool { - true - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } - } - - #[async_trait] - impl IncrementalComputeProperties for EvaluatorFn - where - K: Computable + 'static, - V: Dupe + PartialEq + Allocative + Send + Sync + 'static, - { - type DiceTask = WeakDiceFutureHandle; - - async fn recompute( - key: &K, - engine: &Arc>, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult> { - Ok(engine - .eval_entry_versioned(key, transaction_ctx, extra.subrequest::(key)?) - .await) - } - } - - #[async_trait] - impl Evaluator for EvaluatorFn - where - K: Clone + Eq + Hash + Display + Debug + Allocative + Send + Sync + 'static, - V: Dupe + PartialEq + Allocative + Send + Sync + 'static, - { - async fn eval( - &self, - k: &K, - _: Arc, - cancellations: &CancellationContext, - extra: ComputationData, - ) -> EvaluationResult { - let (value, both_deps) = (self.f)(k.clone(), cancellations).await; - EvaluationResult { - value, - both_deps, - extra, - } - } - } -} diff --git a/dice/dice/src/legacy/incremental/graph/dependencies.rs b/dice/dice/src/legacy/incremental/graph/dependencies.rs deleted file mode 100644 index 043f386baaa7d..0000000000000 --- a/dice/dice/src/legacy/incremental/graph/dependencies.rs +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Represents the forward and backward dependencies of the computation graph - -use std::any::Any; -use std::collections::hash_map::Entry; -use std::fmt::Debug; -use std::fmt::Display; -use std::hash::Hash; -use std::hash::Hasher; -use std::sync::Arc; -use std::sync::Weak; - -use allocative::Allocative; -use async_trait::async_trait; -use cmp_any::PartialEqAny; -use dupe::Dupe; -use parking_lot::RwLock; -use parking_lot::RwLockReadGuard; - -use crate::api::error::DiceResult; -use crate::introspection::graph::AnyKey; -use crate::legacy::ctx::ComputationData; -use crate::legacy::incremental::graph::GraphNodeDyn; -use crate::legacy::incremental::graph::ReadOnlyHistory; -use crate::legacy::incremental::transaction_ctx::TransactionCtx; -use crate::legacy::incremental::versions::MinorVersion; -use crate::versions::VersionNumber; -use crate::HashMap; - -/// The dependency information stored by the core engine -#[async_trait] -pub(crate) trait Dependency: Allocative + Debug + Display + Send + Sync { - async fn recompute( - &self, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult<(Box, Arc)>; - - /// looks up the stored node of this dependency. This can return `None` if this entry - /// was evicted from the storage. - fn lookup_node(&self, v: VersionNumber, mv: MinorVersion) -> Option>; - - fn dirty(&self, v: VersionNumber); - - fn get_key_equality(&self) -> PartialEqAny; - - fn to_key_any(&self) -> &dyn Any; - - fn hash(&self, state: &mut dyn Hasher); - - /// Provide a type-erased AnyKey representing this Dependency. This is used when traversing - /// DICE to dump its state. - fn introspect(&self) -> AnyKey; -} - -impl PartialEq for dyn Dependency { - fn eq(&self, other: &Self) -> bool { - self.get_key_equality() == other.get_key_equality() - } -} - -impl Eq for dyn Dependency {} - -impl Hash for dyn Dependency { - fn hash(&self, state: &mut H) { - self.hash(state) - } -} - -/// The dependency information right after they were requested. This allows us to look up -/// information from the dependency without further computation. -pub(crate) trait ComputedDependency: Allocative + Debug + Send + Sync { - fn get_history(&self) -> ReadOnlyHistory; - - /// converts itself into the data to be stored in deps and rdeps - fn into_dependency(self: Box) -> Box; - - fn get_key_equality(&self) -> (PartialEqAny, VersionNumber); - - fn to_key_any(&self) -> &dyn Any; - - fn hash(&self, state: &mut dyn Hasher); - - fn is_valid(&self) -> bool; -} - -impl PartialEq for dyn ComputedDependency { - fn eq(&self, other: &Self) -> bool { - self.get_key_equality() == other.get_key_equality() - } -} - -impl Eq for dyn ComputedDependency {} - -impl Hash for dyn ComputedDependency { - fn hash(&self, state: &mut H) { - self.hash(state) - } -} - -#[derive(Allocative)] -pub(crate) struct VersionedDependencies { - /// once the deps at a particular version is written, it is final and never modified - /// We only store the dependencies relevant to the most recent result - deps: RwLock>>)>>, -} - -impl VersionedDependencies { - pub(crate) fn new() -> Self { - Self { - deps: RwLock::new(None), - } - } - - pub(crate) fn deps(&self) -> Option>>> { - self.deps.read().as_ref().map(|d| d.1.dupe()) - } - - pub(crate) fn add_deps(&self, v: VersionNumber, deps: Arc>>) { - let mut this_deps = self.deps.write(); - if this_deps.as_ref().map_or(true, |d| v > d.0) { - // we only ever write the newest version of the dependencies of this node for simplicity - // That way, if we are ever dirtied, we just check if the latest version of the deps - // have changed at the dirtied version which only requires spawning one set of deps. - // It might cause us to falsely fail to reuse some nodes, but this is less memory - // and less work per node when in incremental cases. - *this_deps = Some((v, deps)); - } - } - - pub(crate) fn debug_deps( - &self, - ) -> &RwLock>>)>> { - &self.deps - } -} - -/// Eq and Hash for an rdep is related to the address of the node it points to, since in a dice -/// session, the node stored is always kept alive via an `Arc`, node equality is the ptr address -#[derive(Clone, Dupe, Allocative)] -#[repr(transparent)] -pub(crate) struct Rdep(pub(crate) Weak); - -impl PartialEq for Rdep { - fn eq(&self, other: &Self) -> bool { - Weak::ptr_eq(&self.0, &other.0) - } -} - -impl Eq for Rdep {} - -impl Hash for Rdep { - fn hash(&self, state: &mut H) { - self.0.upgrade().map(|p| Arc::as_ptr(&p)).hash(state) - } -} - -// the set of reverse dependencies of a node -#[derive(Clone, Dupe, Allocative)] -pub(crate) struct VersionedRevDependencies { - data: Arc>, -} - -#[derive(Allocative)] -pub(crate) struct VersionedRevDependenciesData { - // TODO(bobyf) do we need something special for quick lookup per version or is this fine - pub(crate) rdeps: HashMap, -} - -impl VersionedRevDependencies { - pub(crate) fn new() -> Self { - Self { - data: Arc::new(RwLock::new(VersionedRevDependenciesData { - rdeps: Default::default(), - })), - } - } - - pub(crate) fn add_rdep( - &self, - dependent: Weak, - current_version: VersionNumber, - ) { - let mut data = self.data.write(); - - match data.rdeps.entry(Rdep(dependent)) { - Entry::Occupied(entry) => { - if *entry.get() < current_version { - entry.replace_entry(current_version); - } - } - Entry::Vacant(v) => { - v.insert(current_version); - } - } - } - - pub(crate) fn rdeps(&self) -> RwLockReadGuard { - self.data.read() - } -} diff --git a/dice/dice/src/legacy/incremental/graph/mod.rs b/dice/dice/src/legacy/incremental/graph/mod.rs deleted file mode 100644 index e4554147a9474..0000000000000 --- a/dice/dice/src/legacy/incremental/graph/mod.rs +++ /dev/null @@ -1,2259 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! A cache that deals with versions -//! -//! This is responsible for performing incremental caching and invalidations -//! with multiple versions in-flight at the same time. -//! -//! The 'VersionedCache' will track dependency edges and use computed version -//! number for each cache entry and a global version counter to determine -//! up-to-date-ness of cache entries. -//! - -pub(crate) mod dependencies; -pub(crate) mod storage_properties; - -use std::borrow::Borrow; -use std::collections::Bound; -use std::fmt::Debug; -use std::ops::Bound::Included; -use std::ops::Bound::Unbounded; -use std::ops::Deref; -use std::ops::DerefMut; -use std::sync::Arc; -use std::sync::Weak; - -use allocative::Allocative; -use dashmap::mapref::one::RefMut; -use dashmap::DashMap; -use dupe::Clone_; -use dupe::Copy_; -use dupe::Dupe; -use dupe::Dupe_; -use gazebo::variants::UnpackVariants; -use gazebo::variants::VariantName; -use parking_lot::MappedRwLockReadGuard; -use parking_lot::RwLock; -use parking_lot::RwLockReadGuard; -use parking_lot::RwLockWriteGuard; -use sorted_vector_map::SortedVectorMap; - -use crate::api::storage_type::StorageType; -use crate::impls::core::graph::history::HistoryState; -use crate::introspection::graph::AnyKey; -use crate::legacy::incremental::dep_trackers::BothDeps; -use crate::legacy::incremental::graph::dependencies::ComputedDependency; -use crate::legacy::incremental::graph::dependencies::VersionedDependencies; -use crate::legacy::incremental::graph::dependencies::VersionedRevDependencies; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::legacy::incremental::versions::MinorVersion; -use crate::legacy::incremental::CellHistory; -use crate::legacy::incremental::Dependency; -use crate::versions::VersionNumber; -use crate::versions::VersionRanges; -use crate::HashSet; - -/// The Key for a Versioned, incremental computation -#[derive(Clone, Debug)] -pub(crate) struct VersionedGraphKey { - v: VersionNumber, - k: K, -} - -impl VersionedGraphKey { - pub(crate) fn new(v: VersionNumber, k: K) -> Self { - VersionedGraphKey { v, k } - } - - #[cfg(test)] - pub(crate) fn as_ref(&self) -> VersionedGraphKeyRef { - VersionedGraphKeyRef { - v: self.v, - k: &self.k, - } - } -} - -#[derive(Debug, Copy_, Clone_, Dupe_)] -pub(crate) struct VersionedGraphKeyRef<'k, K> { - v: VersionNumber, - k: &'k K, -} - -impl<'k, K> VersionedGraphKeyRef<'k, K> { - pub(crate) fn new(v: VersionNumber, k: &'k K) -> Self { - VersionedGraphKeyRef { v, k } - } -} - -/// actual entries as seen when querying the cache -/// The placeholder will be used to indicate known dirty entries. -#[derive(Clone_, Dupe_, UnpackVariants, Allocative)] -pub(crate) enum VersionedGraphNode { - Occupied(Arc>), - Vacant(Arc>), -} - -impl VersionedGraphNode -where - K: StorageProperties, -{ - pub(crate) fn key(&self) -> &K::Key { - match &self { - VersionedGraphNode::Occupied(o) => &o.key, - VersionedGraphNode::Vacant(v) => &v.key, - } - } -} - -#[derive(Clone_, Dupe_, UnpackVariants, Allocative)] -pub(crate) enum VersionedGraphNodeInternal { - Occupied(Arc>), - Transient(Arc>), - Vacant(Arc>), -} - -impl VersionedGraphNodeInternal -where - K: StorageProperties, -{ - pub(crate) fn unpack_graph_value(&self) -> Option> { - match self { - VersionedGraphNodeInternal::Occupied(e) => Some(GraphNode::occupied(e.dupe())), - VersionedGraphNodeInternal::Transient(e) => Some(GraphNode::transient(e.dupe())), - VersionedGraphNodeInternal::Vacant(_) => None, - } - } - - #[allow(dead_code)] - pub(crate) fn key(&self) -> &K::Key { - match self { - VersionedGraphNodeInternal::Occupied(o) => &o.key, - VersionedGraphNodeInternal::Transient(t) => &t.key, - VersionedGraphNodeInternal::Vacant(v) => &v.key, - } - } -} - -impl VersionedGraphNodeInternal { - pub(crate) fn force_dirty(&self, v: VersionNumber) -> bool { - match self { - VersionedGraphNodeInternal::Occupied(e) => e.metadata.write().hist.force_dirty(v), - VersionedGraphNodeInternal::Vacant(e) => e.hist.write().force_dirty(v), - VersionedGraphNodeInternal::Transient(transient) => match &transient.last_valid.1 { - // for transient entries, we mark the previous entry as invalidated, since when we - // replace the transient entry with a new valid one, we do the history using the - // previous entry - VersionedGraphNode::Occupied(e) => e.metadata.write().hist.force_dirty(v), - VersionedGraphNode::Vacant(e) => e.hist.write().force_dirty(v), - }, - } - } - - pub(crate) fn mark_invalidated(&self, v: VersionNumber) -> bool { - match self { - VersionedGraphNodeInternal::Occupied(e) => e.metadata.write().hist.mark_invalidated(v), - VersionedGraphNodeInternal::Vacant(e) => e.hist.write().mark_invalidated(v), - VersionedGraphNodeInternal::Transient(transient) => { - // for transient entries, we mark the previous entry as invalidated, since when we - // replace the transient entry with a new valid one, we do the history using the - // previous entry - match &transient.last_valid.1 { - VersionedGraphNode::Occupied(e) => e.metadata.write().hist.mark_invalidated(v), - VersionedGraphNode::Vacant(e) => e.hist.write().mark_invalidated(v), - } - } - } - } -} - -/// The stored entry of the cache -#[derive(Allocative)] -pub(crate) struct OccupiedGraphNode { - key: K::Key, - res: K::Value, - metadata: RwLock, -} - -/// Represents a node currently in the DICE graph, along with its typed value. -/// The only operations on this are operations that are race condition free. So, one can only read -/// the metadata, or set `rdeps`, which is safe as it will not result in any other thread that -/// holds this particular node from seeing any value that may become outdated. -#[derive(Clone_, Dupe_, Allocative)] -pub(crate) struct GraphNode(GraphNodeInner); - -#[derive(Clone_, Dupe_, UnpackVariants, Allocative)] -enum GraphNodeInner { - Occupied(Arc>), - Transient(Arc>), -} - -impl GraphNode { - pub(crate) fn occupied(e: Arc>) -> Self { - Self(GraphNodeInner::Occupied(e)) - } - - pub(crate) fn transient(e: Arc>) -> Self { - Self(GraphNodeInner::Transient(e)) - } -} - -impl GraphNode -where - K: StorageProperties, -{ - pub(crate) fn key(&self) -> &K::Key { - match &self.0 { - GraphNodeInner::Occupied(o) => &o.key, - GraphNodeInner::Transient(t) => &t.key, - } - } - - pub(crate) fn val(&self) -> &K::Value { - match &self.0 { - GraphNodeInner::Occupied(o) => &o.res, - GraphNodeInner::Transient(t) => &t.res, - } - } - - pub(crate) fn read_meta(&self) -> RwLockReadGuard<'_, NodeMetadata> { - match &self.0 { - GraphNodeInner::Occupied(o) => o.read_meta(), - GraphNodeInner::Transient(t) => t.read_meta(), - } - } - - pub(crate) fn try_read_meta(&self) -> Option> { - match &self.0 { - GraphNodeInner::Occupied(o) => o.try_read_meta(), - GraphNodeInner::Transient(t) => t.try_read_meta(), - } - } - - pub(crate) fn is_valid(&self) -> bool { - match &self.0 { - GraphNodeInner::Occupied(o) => o.is_valid(), - GraphNodeInner::Transient(t) => t.is_valid(), - } - } - - pub(crate) fn get_history(&self) -> ReadOnlyHistory { - match &self.0 { - GraphNodeInner::Occupied(o) => o.get_history(), - GraphNodeInner::Transient(t) => t.get_history(), - } - } - - pub(crate) fn into_dyn(self) -> Arc { - match self.0 { - GraphNodeInner::Occupied(o) => o, - GraphNodeInner::Transient(t) => t, - } - } -} - -/// Represents a node currently in the DICE graph. The only operations on this are operations that -/// are race condition free. So, one can only read the metadata, or set `rdeps`, which is safe -/// as it will not result in any other thread that holds this particular node from seeing any -/// value that may become outdated. -pub(crate) trait GraphNodeDyn: Allocative + Send + Sync + 'static { - fn get_history(&self) -> ReadOnlyHistory; - - fn read_rdeps(&self) -> VersionedRevDependencies; - - fn add_rdep(&self, dependent: Weak, v: VersionNumber); - - fn writable(&self) -> WritableMetadata; - - /// whether this is a valid entry or a transient entry - fn is_valid(&self) -> bool; - - fn key(&self) -> AnyKey; - - fn id(&self) -> usize; -} - -impl GraphNodeDyn for OccupiedGraphNode -where - K: StorageProperties, -{ - fn get_history(&self) -> ReadOnlyHistory { - ReadOnlyHistory::from(self.metadata.read()) - } - - fn read_rdeps(&self) -> VersionedRevDependencies { - self.metadata.read().rdeps.dupe() - } - - fn add_rdep(&self, dependent: Weak, v: VersionNumber) { - // we only need to hold a read lock on `metadata` since adding `rdep` does not affect - // the versioning/history of this node at all, which means that any other threads holding - // onto this node will see this operation as side-effect free - self.metadata.read().rdeps.add_rdep(dependent, v) - } - - fn writable(&self) -> WritableMetadata { - WritableMetadata::from(self.metadata.write()) - } - - fn is_valid(&self) -> bool { - true - } - - fn key(&self) -> AnyKey { - AnyKey::new(self.key.clone()) - } - - fn id(&self) -> usize { - self as *const Self as usize - } -} - -/// Meta data about a DICE node, which are its edges and history information -#[derive(Allocative)] -pub(crate) struct NodeMetadata { - pub(crate) deps: VersionedDependencies, - pub(crate) rdeps: VersionedRevDependencies, - pub(crate) hist: CellHistory, -} - -pub(crate) enum ReadOnlyHistory<'a> { - FromHistInfo(MappedRwLockReadGuard<'a, CellHistory>), - FromCell(RwLockReadGuard<'a, CellHistory>), -} - -impl<'a> Borrow for ReadOnlyHistory<'a> { - fn borrow(&self) -> &CellHistory { - match self { - ReadOnlyHistory::FromHistInfo(hist) => hist, - ReadOnlyHistory::FromCell(hist) => hist, - } - } -} - -impl<'a> From> for ReadOnlyHistory<'a> { - fn from(lock: RwLockReadGuard<'a, NodeMetadata>) -> Self { - Self::FromHistInfo(RwLockReadGuard::map(lock, |h| &h.hist)) - } -} - -impl<'a> From> for ReadOnlyHistory<'a> { - fn from(lock: RwLockReadGuard<'a, CellHistory>) -> Self { - Self::FromCell(lock) - } -} - -impl<'a> Deref for ReadOnlyHistory<'a> { - type Target = CellHistory; - - fn deref(&self) -> &Self::Target { - match self { - ReadOnlyHistory::FromHistInfo(hist) => hist, - ReadOnlyHistory::FromCell(hist) => hist, - } - } -} - -pub(crate) struct WritableMetadata<'a>(RwLockWriteGuard<'a, NodeMetadata>); - -impl<'a> From> for WritableMetadata<'a> { - fn from(lock: RwLockWriteGuard<'a, NodeMetadata>) -> Self { - Self(lock) - } -} - -impl<'a> Deref for WritableMetadata<'a> { - type Target = NodeMetadata; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl<'a> DerefMut for WritableMetadata<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl OccupiedGraphNode { - pub(crate) fn new(key: K::Key, res: K::Value, hist: CellHistory) -> Self { - Self { - key, - res, - metadata: RwLock::new(NodeMetadata { - hist, - deps: VersionedDependencies::new(), - rdeps: VersionedRevDependencies::new(), - }), - } - } - - #[cfg(test)] - pub(crate) fn testing_new( - key: K::Key, - res: K::Value, - hist: CellHistory, - deps: VersionedDependencies, - rdeps: VersionedRevDependencies, - ) -> Self { - Self { - key, - res, - metadata: RwLock::new(NodeMetadata { deps, rdeps, hist }), - } - } - - pub(crate) fn read_meta(&self) -> RwLockReadGuard<'_, NodeMetadata> { - self.metadata.read() - } - - fn try_read_meta(&self) -> Option> { - self.metadata.try_read() - } - - pub(crate) fn mark_unchanged( - &self, - v: VersionNumber, - deps: HashSet>, - ) -> VersionNumber { - // Marking a node as unchanged ALWAYS requires the dependencies for which we used to deem - // that the node is unchanged. - // - // Consider a node n2 that depends on n1 at version v0. - // We then dirty versions v1, v2, v3 at n1. We'd defer dirtying v2, v3 on n2 due - // to the fact that it's possible that at v2 and v3, n2 no longer depends on n1 - // and we rely on deferred propagation of dirtiness. However, if at v2, we recompute - // and find that the values are equal to that at v0, then we can resurrect v0's n2. - // However, at this point, we will need to deferred propagate the dirty at v3. - let mut histinfo = self.metadata.write(); - let changed_since = histinfo - .hist - .mark_verified(v, deps.iter().map(|d| d.get_history())); - histinfo.deps.add_deps( - changed_since, - Arc::new( - deps.into_iter() - .map(ComputedDependency::into_dependency) - .collect(), - ), - ); - - changed_since - } -} - -/// An entry in the graph that has no computation value associated. This is used to store the -/// history information that is known. -/// This will be replaced by `OccupiedGraphNode` when a computed value is associated with -/// this node. There is no guarantees of when, or even if that will occur since users may never -/// need the associated value at this node. -#[derive(Allocative)] -pub(crate) struct VacantGraphNode { - key: K::Key, - hist: RwLock, -} - -impl VacantGraphNode { - pub(crate) fn get_history(&self) -> RwLockReadGuard<'_, CellHistory> { - self.hist.read() - } -} - -/// An entry in the graph for which a computation returned a transient result. -#[derive(Allocative)] -pub(crate) struct TransientGraphNode { - key: K::Key, - res: K::Value, - meta: RwLock, - /// The largest minor version encountered so far - m_version: MinorVersion, - /// the last time we saw a valid result - last_valid: (VersionNumber, VersionedGraphNode), -} - -impl TransientGraphNode { - fn new( - key: K::Key, - res: K::Value, - hist: CellHistory, - m_version: MinorVersion, - last_valid: (VersionNumber, VersionedGraphNode), - ) -> Self { - Self { - key, - res, - meta: RwLock::new(NodeMetadata { - deps: VersionedDependencies::new(), - rdeps: VersionedRevDependencies::new(), - hist, - }), - m_version, - last_valid, - } - } -} - -impl TransientGraphNode { - fn read_meta(&self) -> RwLockReadGuard<'_, NodeMetadata> { - self.meta.read() - } - - fn try_read_meta(&self) -> Option> { - self.meta.try_read() - } - - fn mark_unchanged(&self, v: VersionNumber) -> VersionNumber { - self.meta - .write() - .hist - .mark_verified(v, std::iter::empty::()) - } -} - -impl GraphNodeDyn for TransientGraphNode -where - K: StorageProperties, -{ - fn get_history(&self) -> ReadOnlyHistory { - ReadOnlyHistory::from(self.meta.read()) - } - - fn read_rdeps(&self) -> VersionedRevDependencies { - self.meta.read().rdeps.dupe() - } - - fn add_rdep(&self, _dependent: Weak, _v: VersionNumber) { - // do nothing, since this is an transient entry, the nodes that depend on this must also be - // transient, therefore, does not need rdeps invalidation - } - - fn writable(&self) -> WritableMetadata { - WritableMetadata::from(self.meta.write()) - } - - fn is_valid(&self) -> bool { - false - } - - fn key(&self) -> AnyKey { - AnyKey::new(self.key.clone()) - } - - fn id(&self) -> usize { - self as *const Self as usize - } -} - -/// The actual incremental cache that checks versions and dependency's versions -/// to maintain correct caching based on versions and the versions of its -/// dependencies. -/// -/// TODO consolidate the two maps where possible. This will depend on whether we -/// offer persistent storage to users, and how injected keys will work. -#[derive(Allocative)] -pub(crate) struct VersionedGraph { - /// storage that stores every version forever - /// This storage is implemented so that the map keys are composed of the versions for which - /// the node changes. Corresponding to each key is a node storing the values and the history. - /// VacantGraphEntries can only be present when no other entries are present for the key at - /// any version. - pub(crate) last_n: - DashMap>>, - pub(crate) storage_properties: K, -} - -#[derive(Clone_)] -pub(crate) struct VersionedGraphResultMismatch { - /// Last known value for the key. - pub(crate) entry: GraphNode, - /// Versions at which the value for given key is valid. - pub(crate) verified_versions: VersionRanges, -} - -impl VersionedGraphResultMismatch -where - K: StorageProperties, -{ - pub(crate) fn deps_at_last_version( - &self, - ) -> (&VersionRanges, Option>>>) { - (&self.verified_versions, self.entry.read_meta().deps.deps()) - } -} - -#[derive(VariantName, UnpackVariants)] -pub(crate) enum VersionedGraphResult { - /// when the version cache has the exact matching entry via versions - Match(GraphNode), - /// when the version cache found an entry, but the versions were mismatching. The existing entry - /// is returned, along with the last known version - Mismatch(VersionedGraphResultMismatch), - /// An entry that is known to require re-evaluation because it was marked as dirty at the - /// requested version - Dirty, - /// when no entry is found in the cache - None, -} - -impl VersionedGraph -where - K: StorageProperties, -{ - pub(crate) fn new(storage_properties: K) -> Self { - Self { - last_n: Default::default(), - storage_properties, - } - } - - /// gets the cache entry corresponding to the cache entry if up to date. - /// returns 'None' if entry is missing or versions are out of date. - pub(crate) fn get( - &self, - key: VersionedGraphKeyRef, - mv: MinorVersion, - ) -> VersionedGraphResult { - fn handle_occupied( - key: VersionedGraphKeyRef, - entry: &Arc>, - ) -> VersionedGraphResult - where - K: StorageProperties, - { - match entry.read_meta().hist.get_history(&key.v) { - HistoryState::Verified => { - VersionedGraphResult::Match(GraphNode::occupied((*entry).dupe())) - } - HistoryState::Unknown(verified_versions) => { - VersionedGraphResult::Mismatch(VersionedGraphResultMismatch { - entry: GraphNode::occupied((*entry).dupe()), - verified_versions, - }) - } - HistoryState::Dirty => VersionedGraphResult::Dirty, - } - } - - fn handle_vacant() -> VersionedGraphResult - where - K: StorageProperties, - { - // vacant entries only occur if no other graph entries are - // present, so we know this has to be dirty - VersionedGraphResult::Dirty - } - - if let Some(versioned) = self.last_n.get(key.k) { - let mut potential = versioned.range((Included(VersionNumber::new(0)), Included(key.v))); - if let Some(found) = potential.next_back().map(|e| match e.1 { - VersionedGraphNodeInternal::Occupied(entry) => handle_occupied(key, entry), - VersionedGraphNodeInternal::Vacant(_) => handle_vacant(), - VersionedGraphNodeInternal::Transient(entry) => { - if mv > entry.m_version || e.0 < &key.v { - match &entry.last_valid { - (_, VersionedGraphNode::Occupied(prev_entry)) => { - match handle_occupied(key, prev_entry) { - VersionedGraphResult::Match(_) => unreachable!("previous value shouldn't be verified if the currently matching entry was transient"), - x => x, - } - } - (_, VersionedGraphNode::Vacant(_)) => handle_vacant(), - } - } else { - VersionedGraphResult::Match(GraphNode::transient(entry.dupe())) - } - } - }) { - found - } else { - // this branch takes care of an ongoing computation that is operating on an older - // version than anything stored currently. However, it has a problem where it's nodes - // would fail to share work due to nothing going into the cache if its evaluating - // to a different result. TODO add some per ctx result caching for old versions - versioned - .range((Bound::Included(key.v), Bound::Unbounded)) - .find_map(|(v, e)| match e { - VersionedGraphNodeInternal::Occupied(e) => Some((v, e)), - VersionedGraphNodeInternal::Vacant(_) => None, - VersionedGraphNodeInternal::Transient(_) => None, - }) - .map_or_else( - || VersionedGraphResult::None, - |(_, entry)| VersionedGraphResult::Mismatch(VersionedGraphResultMismatch { - entry: GraphNode::occupied((*entry).dupe()), - verified_versions: entry.read_meta().hist.get_verified_ranges(), - }), - ) - } - } else { - VersionedGraphResult::None - } - } - - /// Marks an existing entry as reusable at the given key version. - pub(crate) fn mark_unchanged( - &self, - key: VersionedGraphKey, - m_v: MinorVersion, - value_unchanged: GraphNode, - both_deps: BothDeps, - ) -> GraphNode { - // Consider a node n at version v0 that was dirtied at v1, v2. - // It was evaluated at v1, resulting in a different value, but at v2, it results in the same - // value as v0. - // It is possible that we attempt to resurrect the entry from v0 and v2, which actually - // requires actually requires insertion of a new entry at v2, rather than simply marking - // v0 as reusable. So, we delegate to a specialization of `update` with a special - // EntryUpdater - match value_unchanged.0 { - GraphNodeInner::Occupied(o) => { - self.update( - key, - EntryUpdater { - storage_properties: &self.storage_properties, - kind: EntryUpdaterKind::Reuse { e: o, both_deps }, - }, - ) - .0 - } - GraphNodeInner::Transient(t) => { - self.update( - key, - EntryUpdater { - storage_properties: &self.storage_properties, - kind: EntryUpdaterKind::ReuseTransient { e: t, m_v }, - }, - ) - .0 - } - } - } - - pub(crate) fn update_computed_value( - &self, - key: VersionedGraphKey, - m_v: MinorVersion, - res: K::Value, - both_deps: BothDeps, - ) -> (GraphNode, Option>) { - let entry_updater: EntryUpdater = EntryUpdater { - storage_properties: &self.storage_properties, - kind: EntryUpdaterKind::Computed { - res, - m_v, - both_deps, - }, - }; - - self.update(key, entry_updater) - } - - /// updates a node to the given value as a user injected value. The user injected value MUST - /// be a valid value - pub(crate) fn update_injected_value( - &self, - key: VersionedGraphKey, - res: K::Value, - ) -> (GraphNode, Option>) { - let entry_updater = EntryUpdater { - storage_properties: &self.storage_properties, - kind: EntryUpdaterKind::ValidOnly { res }, - }; - - self.update(key, entry_updater) - } - - /// updates the cached value based on the given key and versions. The value - /// is only updated if the version of the new value is of a newer - /// version than what is stored. - /// Returns the new entry, and an optional old entry that was invalidated due to this update - fn update( - &self, - key: VersionedGraphKey, - entry_updater: EntryUpdater, - ) -> (GraphNode, Option>) { - let StorageType::LastN(num_to_keep) = self.storage_properties.storage_type(); - // persistent keys, if any changes, are committed at the moment when the version - // is increased. therefore, it must be the case that the current update for the - // persistent key is the largest/newest version. it's also the case that they are - // never updated to the cache more than once per version. - // TODO refactor this to be less error prone. - let mut versioned_map = self.last_n.entry(key.k.clone()).or_default(); - - // we pick the nearest entry because the closest version number to the current key would - // have the least number of changes recorded in dice, which we assume naively to mean - // most likely to reuse a node. We could implement this to check for reuse against both - // the previous and the next version, but that complexity is likely not worth the benefit - // of trying to reuse a node. Maybe this is worth revisiting at some point. - let nearest = Self::nearest_entry(&key, &mut versioned_map); - - if let Some((key_of_e, e)) = nearest { - match e { - VersionedGraphNodeInternal::Occupied(e) => self.update_existing( - key.v, - entry_updater, - num_to_keep, - &mut versioned_map, - key_of_e, - e, - ), - VersionedGraphNodeInternal::Vacant(e) => { - self.update_vacant(key.v, entry_updater, &mut versioned_map, key_of_e, e) - } - VersionedGraphNodeInternal::Transient(e) => self.update_transient( - key.v, - entry_updater, - num_to_keep, - &mut versioned_map, - key_of_e, - e, - ), - } - } else { - self.update_empty(key.k, key.v, entry_updater, &mut versioned_map) - } - } - - /// find the nearest entry to the given key, preferring the smaller version number when tied - fn nearest_entry( - key: &VersionedGraphKey<::Key>, - versioned_map: &mut RefMut< - ::Key, - SortedVectorMap>, - >, - ) -> Option<(VersionNumber, VersionedGraphNodeInternal)> { - let newest_previous = versioned_map - .range((Included(VersionNumber::new(0)), Included(key.v))) - .next_back() - .map(|(v, e)| (*v, e.dupe())); - let oldest_newer = versioned_map - .range((Included(key.v), Unbounded)) - .next() - .map(|(v, e)| (*v, e.dupe())); - - match (newest_previous, oldest_newer) { - (Some((prev_v, prev_e)), Some((next_v, next_e))) => { - if next_v - key.v < prev_v - key.v { - Some((next_v, next_e)) - } else { - Some((prev_v, prev_e)) - } - } - (Some(x), None) => Some(x), - (None, Some(x)) => Some(x), - (None, None) => None, - } - } - - fn update_empty( - &self, - key: K::Key, - v: VersionNumber, - entry_creator: EntryUpdater, - versioned_map: &mut RefMut< - K::Key, - SortedVectorMap>, - >, - ) -> (GraphNode, Option>) { - let (v, entry) = entry_creator.build( - v, - v, - VersionedGraphNode::Vacant(Arc::new(VacantGraphNode { - key, - hist: RwLock::new(CellHistory::empty()), - })), - v, - CellHistory::verified(v), - ); - versioned_map.insert(v, entry.dupe()); - (entry.unpack_graph_value().unwrap(), None) - } - - fn update_existing( - &self, - v: VersionNumber, - // creates the entry, handling transient vs valid results and attaching the new node as rdeps - entry_creator: EntryUpdater, - num_to_keep: usize, - versioned_map: &mut RefMut< - K::Key, - SortedVectorMap>, - >, - version_of_e: VersionNumber, - e: Arc>, - ) -> (GraphNode, Option>) { - match entry_creator.try_reuse_occupied_entry(v, e.dupe()) { - EntryReused::Reused(reused) => { - if v < version_of_e { - assert!( - versioned_map - .insert(v, VersionedGraphNodeInternal::Occupied(reused.dupe())) - .is_none() - ); - assert!(versioned_map.remove(&version_of_e).is_some()); - } - (GraphNode::occupied(reused), None) - } - EntryReused::NotReusable(entry_creator) => { - let latest_dep_verified = entry_creator.both_deps().and_then(|deps| { - deps.deps - .iter() - .filter_map(|dep| dep.get_history().latest_verified_before(v)) - .max() - }); - let (since, end, hist) = e - .read_meta() - .hist - .make_new_verified_history(v, latest_dep_verified); - let (v_new, new) = entry_creator.build( - v, - version_of_e, - VersionedGraphNode::Occupied(e.dupe()), - since, - hist, - ); - - if let Some(end) = end { - // if there is newer data, we also need to store that at a newer - // key to make it reachable. - // TODO(bobyf): we probably want a custom versioned map here to - // better represent this and reduce complexity - - if versioned_map.len() == num_to_keep { - // if we are already at max entries to store, then we should - // just skip doing this entirely, as the most up to date - // entry we will store will be the entry at "end", which - // is no different than the original entry. - // We also don't need to store rdeps since this node will be discarded - return (new.unpack_graph_value().unwrap(), None); - } - - let prev = e.dupe(); - versioned_map.insert(end, VersionedGraphNodeInternal::Occupied(prev)); - } - - if versioned_map.len() == num_to_keep { - let min_version_stored = *versioned_map.iter().next().expect("should be at least one entry if there is more entries than what we want to keep").0; - - if since < min_version_stored { - return (new.unpack_graph_value().unwrap(), None); - } - - versioned_map.remove(&min_version_stored); - } - - versioned_map.insert(v_new, new.dupe()); - - ( - new.unpack_graph_value().unwrap(), - Some(GraphNode::occupied(e.dupe())), - ) - } - } - } - - fn update_vacant( - &self, - v: VersionNumber, - entry_creator: EntryUpdater, - versioned_map: &mut RefMut< - K::Key, - SortedVectorMap>, - >, - version_of_vacant: VersionNumber, - vacant_entry: Arc>, - ) -> (GraphNode, Option>) { - let (since, _, hist) = vacant_entry - .get_history() - .make_new_verified_history(v, None); - let (v_new, new) = entry_creator.build( - v, - version_of_vacant, - VersionedGraphNode::Vacant(vacant_entry), - since, - hist, - ); - - // remove the vacant entry since we now have an actual entry - versioned_map.remove(&version_of_vacant); - versioned_map.insert(v_new, new.dupe()); - - (new.unpack_graph_value().unwrap(), None) - } - - fn update_transient( - &self, - v: VersionNumber, - entry_creator: EntryUpdater, - num_to_keep: usize, - versioned_map: &mut RefMut< - K::Key, - SortedVectorMap>, - >, - version_of_transient: VersionNumber, - transient_entry: Arc>, - ) -> (GraphNode, Option>) { - if entry_creator.can_reuse_transient(&transient_entry) { - transient_entry.mark_unchanged(v); - - if v < version_of_transient { - assert!( - versioned_map - .insert( - v, - VersionedGraphNodeInternal::Transient(transient_entry.dupe()) - ) - .is_none() - ); - assert!(versioned_map.remove(&version_of_transient).is_some()); - } - - return (GraphNode::transient(transient_entry), None); - } - - // perform the update as if this transient entry didn't exist - let did_remove = versioned_map.remove(&version_of_transient).is_some(); - match &transient_entry.last_valid { - (prev_version, VersionedGraphNode::Occupied(occupied)) => { - if did_remove { - // need to put the previous value back first. - // since we did a removal, this is guaranteed to not exceed the entry limit - versioned_map.insert( - *prev_version, - VersionedGraphNodeInternal::Occupied(occupied.dupe()), - ); - } - self.update_existing( - v, - entry_creator, - num_to_keep, - versioned_map, - *prev_version, - occupied.dupe(), - ) - } - (prev_version, VersionedGraphNode::Vacant(vacant)) => self.update_vacant( - v, - entry_creator, - versioned_map, - *prev_version, - vacant.dupe(), - ), - } - } - - /// Obtains an entry at the given key, creating a Vacant entry if none-exists - pub(crate) fn entry(&self, key: VersionedGraphKey) -> VersionedGraphNodeInternal { - let mut versioned_map = self - .last_n - .entry(key.k) - .or_insert_with(SortedVectorMap::new); - if let Some(e) = versioned_map - .range((Bound::Unbounded, Bound::Included(key.v))) - .next_back() - .map(|(_, e)| e) - { - e.dupe() - } else { - let entry = VersionedGraphNodeInternal::Vacant(Arc::new(VacantGraphNode { - key: versioned_map.key().clone(), - hist: RwLock::new(CellHistory::empty()), - })); - versioned_map.insert(key.v, entry.dupe()); - - entry - } - } - - pub(crate) fn iter( - &self, - ) -> impl Iterator< - Item = dashmap::mapref::multiple::RefMulti< - K::Key, - SortedVectorMap>, - >, - > { - self.last_n.iter() - } - - pub(crate) fn len(&self) -> usize { - self.last_n.len() - } -} - -struct EntryUpdater<'a, K: StorageProperties> { - kind: EntryUpdaterKind, - storage_properties: &'a K, -} - -enum EntryUpdaterKind { - ValidOnly { - res: K::Value, - }, - Computed { - res: K::Value, - m_v: MinorVersion, - both_deps: BothDeps, - }, - Reuse { - e: Arc>, - both_deps: BothDeps, - }, - ReuseTransient { - e: Arc>, - m_v: MinorVersion, - }, -} - -enum EntryReused<'a, K: StorageProperties> { - Reused(Arc>), - NotReusable(EntryUpdater<'a, K>), -} - -impl<'a, K: StorageProperties> EntryUpdater<'a, K> { - fn both_deps(&self) -> Option<&BothDeps> { - match &self.kind { - EntryUpdaterKind::ValidOnly { .. } => None, - EntryUpdaterKind::Computed { both_deps, .. } => Some(both_deps), - EntryUpdaterKind::Reuse { both_deps, .. } => Some(both_deps), - EntryUpdaterKind::ReuseTransient { .. } => None, - } - } - fn try_reuse_occupied_entry( - self, - v: VersionNumber, - old: Arc>, - ) -> EntryReused<'a, K> { - fn reuse_node( - v: VersionNumber, - e: &Arc>, - both_deps: BothDeps, - ) { - let since = e.mark_unchanged(v, both_deps.deps); - - for rdep in both_deps.rdeps { - let node: Arc = e.dupe(); - rdep.add_rdep(Arc::downgrade(&node), since); - } - } - - match self.kind { - EntryUpdaterKind::ValidOnly { res } => { - if self.storage_properties.equality(&old.res, &res) { - old.mark_unchanged(v, HashSet::default()); - EntryReused::Reused(old) - } else { - EntryReused::NotReusable(EntryUpdater { - storage_properties: self.storage_properties, - kind: EntryUpdaterKind::ValidOnly { res }, - }) - } - } - EntryUpdaterKind::Computed { - res, - m_v, - both_deps, - } => { - if self.storage_properties.equality(&old.res, &res) { - reuse_node(v, &old, both_deps); - EntryReused::Reused(old) - } else { - EntryReused::NotReusable(EntryUpdater { - storage_properties: self.storage_properties, - kind: EntryUpdaterKind::Computed { - res, - m_v, - both_deps, - }, - }) - } - } - EntryUpdaterKind::Reuse { e, both_deps } => { - if Arc::ptr_eq(&old, &e) || self.storage_properties.equality(&old.res, &e.res) { - reuse_node(v, &old, both_deps); - EntryReused::Reused(old) - } else { - EntryReused::NotReusable(EntryUpdater { - storage_properties: self.storage_properties, - kind: EntryUpdaterKind::Reuse { e, both_deps }, - }) - } - } - EntryUpdaterKind::ReuseTransient { e, m_v } => EntryReused::NotReusable(EntryUpdater { - storage_properties: self.storage_properties, - kind: EntryUpdaterKind::ReuseTransient { e, m_v }, - }), - } - } - - fn can_reuse_transient(&self, old: &Arc>) -> bool { - match &self.kind { - EntryUpdaterKind::ReuseTransient { e, .. } => { - Arc::ptr_eq(old, e) || self.storage_properties.equality(&old.res, &e.res) - } - _ => false, - } - } - - fn build( - self, - v_computed: VersionNumber, - existing_version: VersionNumber, - existing_entry: VersionedGraphNode, - since: VersionNumber, - hist: CellHistory, - ) -> (VersionNumber, VersionedGraphNodeInternal) { - match self.kind { - EntryUpdaterKind::ValidOnly { res, .. } => ( - since, - VersionedGraphNodeInternal::Occupied(Self::make_entry( - existing_entry.key().clone(), - res, - BothDeps::default(), - since, - hist, - )), - ), - EntryUpdaterKind::Reuse { e, both_deps, .. } => ( - since, - VersionedGraphNodeInternal::Occupied(Self::make_entry( - existing_entry.key().clone(), - e.res.dupe(), - both_deps, - since, - hist, - )), - ), - EntryUpdaterKind::Computed { - res, - m_v, - both_deps, - .. - } => { - // Explicit check that the dependencies are valid (i.e not transient). This allows - // each computation to focus on determining whether it's own result was transient - // without having to be aware of what is transient for its dependencies. - // Any transient value will automatically make all values that depend on it - // transient, so the next request will recompute all these values. - if self.storage_properties.validity(&res) - && both_deps.deps.iter().all(|d| d.is_valid()) - { - ( - since, - VersionedGraphNodeInternal::Occupied(Self::make_entry( - existing_entry.key().clone(), - res, - both_deps, - since, - hist, - )), - ) - } else { - ( - v_computed, - VersionedGraphNodeInternal::Transient(Arc::new(TransientGraphNode::new( - existing_entry.key().clone(), - res, - hist, - m_v, - (existing_version, existing_entry), - ))), - ) - } - } - EntryUpdaterKind::ReuseTransient { e, m_v, .. } => ( - since, - VersionedGraphNodeInternal::Transient(Arc::new(TransientGraphNode::new( - existing_entry.key().clone(), - e.res.dupe(), - hist, - m_v, - (existing_version, existing_entry), - ))), - ), - } - } - - /// makes a new occupied entry to be added onto the graph, satisfying its invariants around - /// deps, rdeps, and history - fn make_entry( - key: K::Key, - res: K::Value, - both_deps: BothDeps, - // the version for which this entry becomes valid - since: VersionNumber, - // the full history - hist: CellHistory, - ) -> Arc> { - let new = Arc::new(OccupiedGraphNode::new(key, res, hist)); - - // register the existing node's deps with reverse edges first before creating the history - // of this node and putting it on the cache. - // The ordering is crucial for history and deps tracking because we must either inherit - // our deps' dirty history, or rely on our deps to invalidate this node via rdeps. - // We always dirty by dirtying the node itself first, and then traversing the rdeps. - // Therefore, we require that we first add rdeps to our deps, and then create our history - // so that if they are currently being dirtied, they will either mark us as dirtied at the - // appropriate version, or we will read their most up-to-date history and inherit - // the deps' dirtiness. - for rdep in both_deps.rdeps { - let node: Weak<_> = Arc::downgrade(&new); - rdep.add_rdep(node, since); - } - - { - let mut writable = new.writable(); - writable - .hist - .propagate_from_deps(since, both_deps.deps.iter().map(|d| d.get_history())); - writable.deps.add_deps( - since, - Arc::new( - both_deps - .deps - .into_iter() - .map(ComputedDependency::into_dependency) - .collect(), - ), - ); - } - new - } -} - -mod introspection { - use crate::introspection::graph::GraphNodeKind; - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - use crate::legacy::incremental::graph::VersionedGraphNodeInternal; - - impl From<&VersionedGraphNodeInternal> for GraphNodeKind { - fn from(n: &VersionedGraphNodeInternal) -> Self { - match n { - VersionedGraphNodeInternal::Occupied(_) => GraphNodeKind::Occupied, - VersionedGraphNodeInternal::Transient(_) => GraphNodeKind::Transient, - VersionedGraphNodeInternal::Vacant(_) => GraphNodeKind::Vacant, - } - } - } -} - -#[cfg(test)] -pub(crate) mod testing { - - use dupe::Dupe; - use gazebo::variants::VariantName; - - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - use crate::legacy::incremental::graph::GraphNode; - use crate::legacy::incremental::graph::VersionedGraphResult; - use crate::legacy::incremental::graph::VersionedGraphResultMismatch; - - pub(crate) trait VersionedCacheResultAssertsExt { - fn assert_none(&self); - - fn assert_dirty(&self); - - fn assert_match(&self) -> GraphNode; - - fn assert_mismatch(&self) -> VersionedGraphResultMismatch; - } - - impl VersionedCacheResultAssertsExt for VersionedGraphResult { - fn assert_none(&self) { - self.unpack_none() - .unwrap_or_else(|| panic!("expected None, but was {}", self.variant_name())) - } - - fn assert_dirty(&self) { - self.unpack_dirty() - .unwrap_or_else(|| panic!("expected Dirty, but was {}", self.variant_name())) - } - - fn assert_match(&self) -> GraphNode { - self.unpack_match() - .unwrap_or_else(|| panic!("expected Match, but was {}", self.variant_name())) - .dupe() - } - - fn assert_mismatch(&self) -> VersionedGraphResultMismatch { - self.unpack_mismatch() - .unwrap_or_else(|| panic!("expected Mismatch, but was {}", self.variant_name())) - .clone() - } - } -} - -#[cfg(test)] -mod tests { - use std::fmt; - use std::fmt::Debug; - use std::fmt::Formatter; - use std::hash::Hash; - use std::marker::PhantomData; - use std::sync::atomic; - use std::sync::Arc; - - use allocative::Allocative; - use async_trait::async_trait; - use derive_more::Display; - use dupe::Dupe; - use more_futures::cancellation::CancellationContext; - use sorted_vector_map::sorted_vector_set; - - use crate::api::computations::DiceComputations; - use crate::api::injected::InjectedKey; - use crate::api::key::Key; - use crate::api::storage_type::StorageType; - use crate::impls::core::graph::history::testing::CellHistoryExt; - use crate::impls::core::graph::history::testing::HistoryExt; - use crate::impls::core::graph::history::CellHistory; - use crate::legacy::incremental::dep_trackers::BothDeps; - use crate::legacy::incremental::evaluator::testing::EvaluatorUnreachable; - use crate::legacy::incremental::graph::dependencies::Dependency; - use crate::legacy::incremental::graph::storage_properties::testing::StoragePropertiesLastN; - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - use crate::legacy::incremental::graph::testing::VersionedCacheResultAssertsExt; - use crate::legacy::incremental::graph::GraphNodeDyn; - use crate::legacy::incremental::graph::OccupiedGraphNode; - use crate::legacy::incremental::graph::VersionedGraph; - use crate::legacy::incremental::graph::VersionedGraphKey; - use crate::legacy::incremental::graph::VersionedGraphKeyRef; - use crate::legacy::incremental::testing::ComputedDependencyExt; - use crate::legacy::incremental::testing::DependencyExt; - use crate::legacy::incremental::versions::MinorVersion; - use crate::legacy::incremental::Computable; - use crate::versions::testing::VersionRangesExt; - use crate::versions::VersionNumber; - use crate::versions::VersionRange; - use crate::versions::VersionRanges; - use crate::HashSet; - - #[derive(Clone, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] - #[display(fmt = "{:?}", self)] - struct NonPersistent(i32); - - #[async_trait] - impl Key for NonPersistent { - type Value = i32; - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - unimplemented!() - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - } - - #[derive(Clone, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] - #[display(fmt = "{:?}", self)] - struct Persistent(i32); - - #[async_trait] - impl InjectedKey for Persistent { - type Value = i32; - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - } - - #[derive( - Clone, - Dupe, - Debug, - Eq, - PartialEq, - Hash, - derive_more::Display, - Allocative - )] - #[display(fmt = "{:?}", self)] - struct Last2(i32); - - #[test] - fn latest_only_stores_latest_only() { - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, i32>::new(1)); - let res = 100; - let key = VersionedGraphKey::new(VersionNumber::new(1), NonPersistent(0)); - let mv = MinorVersion::testing_new(0); - - // first, empty cache gives none - cache.get(key.as_ref(), mv).assert_none(); - - assert_eq!( - cache - .update_computed_value(key.clone(), mv, res, BothDeps::default()) - .1 - .is_none(), - true - ); - - assert_eq!(*cache.get(key.as_ref(), mv).assert_match().val(), res); - - let res2 = 200; - let key2 = VersionedGraphKey::new(VersionNumber::new(2), NonPersistent(0)); - assert!( - cache - .entry(key2.clone()) - .mark_invalidated(VersionNumber::new(2)) - ); - assert_eq!( - *cache - .update_computed_value(key2.clone(), mv, res2, BothDeps::default()) - .1 - .expect("should have an old entry that is evicted") - .val(), - res - ); - - assert_eq!(*cache.get(key2.as_ref(), mv).assert_match().val(), res2); - // old version is gone - let mismatch = cache - .get(key.as_ref(), MinorVersion::testing_new(0)) - .assert_mismatch(); - assert_eq!(*mismatch.entry.val(), res2); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![VersionRange::begins_with( - VersionNumber::new(2), - )]) - ); - - // if the value is the same, then versions are shared - let res3 = 200; - let key3 = VersionedGraphKey::new(VersionNumber::new(5), NonPersistent(0)); - let key4 = VersionedGraphKey::new(VersionNumber::new(4), NonPersistent(0)); - assert!( - cache - .entry(key4.clone()) - .mark_invalidated(VersionNumber::new(4)) - ); - assert!( - cache - .entry(key3.clone()) - .mark_invalidated(VersionNumber::new(5)) - ); - assert_eq!( - cache - .update_computed_value(key3.clone(), mv, res3, BothDeps::default()) - .1 - .is_none(), - true - ); - - assert_eq!(cache.get(key3.as_ref(), mv).assert_match().val(), &res2); - assert_eq!(cache.get(key2.as_ref(), mv).assert_match().val(), &res2); - // the first result is gone still - let mismatch = cache.get(key.as_ref(), mv).assert_mismatch(); - assert_eq!(mismatch.entry.val(), &res2); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(2), VersionNumber::new(4)), - VersionRange::begins_with(VersionNumber::new(5)) - ]) - ); - - // smaller version numbers don't get cached - let res4 = 400; - assert_eq!( - cache - .update_computed_value(key4.clone(), mv, res4, BothDeps::default()) - .1 - .is_none(), - true - ); - let mismatch = cache.get(key4.as_ref(), mv).assert_mismatch(); - assert_eq!(mismatch.entry.val(), &res2); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(2), VersionNumber::new(4)), - VersionRange::begins_with(VersionNumber::new(5)) - ]) - ); - - assert_eq!(cache.get(key3.as_ref(), mv).assert_match().val(), &res2); - assert_eq!(cache.get(key2.as_ref(), mv).assert_match().val(), &res2); - // the first result is gone still - let mismatch = cache.get(key.as_ref(), mv).assert_mismatch(); - assert_eq!(mismatch.entry.val(), &res2); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(2), VersionNumber::new(4)), - VersionRange::begins_with(VersionNumber::new(5)) - ]) - ); - // different key is miss - cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(5), &NonPersistent(2)), - MinorVersion::testing_new(0), - ) - .assert_none(); - - let key5 = VersionedGraphKey::new(VersionNumber::new(7), NonPersistent(0)); - assert!( - cache - .entry(key5.clone(),) - .force_dirty(VersionNumber::new(7)) - ); - cache.get(key5.as_ref(), mv).assert_dirty() - } - - #[test] - fn last_n_max_usize_stores_everything() { - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, i32>::new(usize::MAX)); - let res = 100; - let key = VersionedGraphKey::new(VersionNumber::new(0), Persistent(0)); - let mv = MinorVersion::testing_new(0); - - assert_eq!( - cache - .update_computed_value(key.clone(), mv, res, BothDeps::default()) - .1 - .is_none(), - true - ); - - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &res); - - let res2 = 200; - let key2 = VersionedGraphKey::new(VersionNumber::new(1), Persistent(0)); - assert!( - cache - .entry(key2.clone()) - .mark_invalidated(VersionNumber::new(1)) - ); - assert_eq!( - *cache - .update_computed_value(key2.clone(), mv, res2, BothDeps::default()) - .1 - .expect("should have an old entry that is evicted") - .val(), - res - ); - - assert_eq!(cache.get(key2.as_ref(), mv).assert_match().val(), &res2); - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &res); - - // skip a few versions - let res3 = 300; - let key3 = VersionedGraphKey::new(VersionNumber::new(5), Persistent(0)); - let key2 = VersionedGraphKey::new(VersionNumber::new(1), Persistent(0)); - assert!( - cache - .entry(key3.clone(),) - .mark_invalidated(VersionNumber::new(5)) - ); - assert_eq!( - *cache - .update_computed_value(key3.clone(), mv, res3, BothDeps::default()) - .1 - .expect("should have an old entry that is evicted") - .val(), - res2 - ); - - assert_eq!(cache.get(key3.as_ref(), mv).assert_match().val(), &res3); - assert_eq!(cache.get(key2.as_ref(), mv).assert_match().val(), &res2); - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &res); - - // keys goes to the largest version that's smaller than it - let key4 = VersionedGraphKey::new(VersionNumber::new(4), Persistent(0)); - assert_eq!(cache.get(key4.as_ref(), mv).assert_match().val(), &res2); - - let key5 = VersionedGraphKey::new(VersionNumber::new(6), Persistent(0)); - assert_eq!(cache.get(key5.as_ref(), mv).assert_match().val(), &res3); - - // different key is none - let key6 = VersionedGraphKey::new(VersionNumber::new(6), Persistent(2)); - cache.get(key6.as_ref(), mv).assert_none(); - - let key7 = VersionedGraphKey::new(VersionNumber::new(7), Persistent(0)); - assert!( - cache - .entry(key7.clone(),) - .force_dirty(VersionNumber::new(7)) - ); - cache.get(key7.as_ref(), mv).assert_dirty() - } - - #[test] - fn last_2_stores_last_2() { - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, i32>::new(2)); - let res = 100; - let key = VersionedGraphKey::new(VersionNumber::new(0), Last2(0)); - let mv = MinorVersion::testing_new(0); - - assert_eq!( - cache - .update_computed_value(key.clone(), mv, res, BothDeps::default()) - .1 - .is_none(), - true - ); - - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &res); - - let res2 = 200; - let key2 = VersionedGraphKey::new(VersionNumber::new(1), Last2(0)); - assert!( - cache - .entry(key2.clone(),) - .mark_invalidated(VersionNumber::new(1)) - ); - assert_eq!( - *cache - .update_computed_value(key2.clone(), mv, res2, BothDeps::default()) - .1 - .expect("should have an old entry that is evicted") - .val(), - res - ); - - assert_eq!(cache.get(key2.as_ref(), mv).assert_match().val(), &res2); - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &res); - - // skip a few versions - let res3 = 300; - let key3 = VersionedGraphKey::new(VersionNumber::new(5), Last2(0)); - let key2 = VersionedGraphKey::new(VersionNumber::new(1), Last2(0)); - assert!( - cache - .entry(key3.clone()) - .mark_invalidated(VersionNumber::new(5)) - ); - assert_eq!( - *cache - .update_computed_value(key3.clone(), mv, res3, BothDeps::default()) - .1 - .expect("should have an old entry that is evicted") - .val(), - res2 - ); - - assert_eq!(cache.get(key3.as_ref(), mv).assert_match().val(), &res3); - assert_eq!(cache.get(key2.as_ref(), mv).assert_match().val(), &res2); - // the oldest entry should be evicted because we don't store more than 2 - let mismatch = cache.get(key.as_ref(), mv).assert_mismatch(); - assert_eq!(mismatch.entry.val(), &res2); - assert_eq!( - mismatch.verified_versions, - VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(5) - )]) - ) - } - - #[derive(Allocative)] - #[allocative(bound = "")] - struct StoragePropertiesForTransientTests { - storage_type: StorageType, - validity: Arc, - _marker: PhantomData V>, - } - - impl Debug for StoragePropertiesForTransientTests { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("StorageKeyForTransientTests") - .finish_non_exhaustive() - } - } - - impl StorageProperties - for StoragePropertiesForTransientTests - { - type Key = K; - type Value = V; - - fn key_type_name() -> &'static str { - "SPForTransientTests" - } - - fn storage_type(&self) -> StorageType { - self.storage_type - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(&self, x: &Self::Value) -> bool { - let _ = x; - self.validity.load(atomic::Ordering::SeqCst) - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } - } - - #[test] - fn transient_entry_return_only_for_same_minor_version() { - let validity = Arc::new(atomic::AtomicBool::new(false)); - let cache = VersionedGraph::new(StoragePropertiesForTransientTests { - storage_type: StorageType::LastN(2), - validity: validity.dupe(), - _marker: PhantomData, - }); - let res = Arc::new(100); - let key = VersionedGraphKey::new(VersionNumber::new(0), Last2(0)); - let mv = MinorVersion::testing_new(0); - - validity.store(false, atomic::Ordering::SeqCst); - - assert!( - cache - .update_computed_value(key.clone(), mv, res.dupe(), BothDeps::default()) - .1 - .is_none() - ); - - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &res); - - cache - .get(key.as_ref(), MinorVersion::testing_new(1)) - .assert_dirty(); - // a newer version should always be invalid regardless of minor version - cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(1), &Last2(0)), - MinorVersion::testing_new(0), - ) - .assert_dirty(); - - validity.store(true, atomic::Ordering::SeqCst); - - assert!( - cache - .update_computed_value( - key.clone(), - MinorVersion::testing_new(1), - res.dupe(), - BothDeps::default() - ) - .1 - .is_none() - ); - - cache - .get(key.as_ref(), MinorVersion::testing_new(1)) - .assert_match(); - } - - #[test] - fn transient_entry_gets_removed_on_update() { - let validity = Arc::new(atomic::AtomicBool::new(false)); - let cache = VersionedGraph::new(StoragePropertiesForTransientTests { - storage_type: StorageType::LastN(1), - validity: validity.dupe(), - _marker: PhantomData, - }); - let key = VersionedGraphKey::new(VersionNumber::new(0), NonPersistent(0)); - let mv = MinorVersion::testing_new(0); - - validity.store(true, atomic::Ordering::SeqCst); - - // first put a valid value - assert!( - cache - .update_computed_value(key, mv, 100, BothDeps::default()) - .1 - .is_none() - ); - - validity.store(false, atomic::Ordering::SeqCst); - - // now put a value is invalid - let key = VersionedGraphKey::new(VersionNumber::new(1), NonPersistent(0)); - assert!( - cache - .update_computed_value(key.clone(), mv, 1, BothDeps::default()) - .1 - .is_some() - ); - - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &1); - - validity.store(true, atomic::Ordering::SeqCst); - - // now put the original value at a new version - let key = VersionedGraphKey::new(VersionNumber::new(2), NonPersistent(0)); - assert!( - cache - .update_computed_value(key.clone(), mv, 100, BothDeps::default()) - .1 - .is_none() - ); - - assert_eq!(cache.get(key.as_ref(), mv).assert_match().val(), &100); - } - - #[test] - fn update_versioned_graph_entry_tracks_versions_and_deps() { - let deps0: Arc>> = Arc::new(vec![DependencyExt::< - EvaluatorUnreachable<_, usize>, - >::testing_raw(5)]); - let entry = OccupiedGraphNode::>::new( - 1337, - 1, - CellHistory::testing_new( - &[VersionNumber::new(0)], - &[VersionNumber::new(1), VersionNumber::new(2)], - ), - ); - entry - .writable() - .deps - .add_deps(VersionNumber::new(0), deps0.dupe()); - - entry - .read_meta() - .hist - .get_history(&VersionNumber::new(0)) - .assert_verified(); - assert_eq!( - *entry.read_meta().deps.debug_deps().read(), - Some((VersionNumber::new(0), deps0.dupe())) - ); - - entry.mark_unchanged(VersionNumber::new(1), HashSet::default()); - entry - .read_meta() - .hist - .get_history(&VersionNumber::new(0)) - .assert_verified(); - entry - .read_meta() - .hist - .get_history(&VersionNumber::new(1)) - .assert_verified(); - assert_eq!( - *entry.read_meta().deps.debug_deps().read(), - Some((VersionNumber::new(1), Arc::new(Vec::new()))) - ); - - let deps1 = HashSet::from_iter([ - ComputedDependencyExt::>::testing_raw( - 7, - VersionNumber::new(1), - true, - ), - ]); - entry.mark_unchanged(VersionNumber::new(2), deps1); - let deps1: Arc>> = Arc::new(vec![DependencyExt::< - EvaluatorUnreachable<_, usize>, - >::testing_raw(7)]); - - entry - .read_meta() - .hist - .get_history(&VersionNumber::new(0)) - .assert_verified(); - entry - .read_meta() - .hist - .get_history(&VersionNumber::new(1)) - .assert_verified(); - entry - .read_meta() - .hist - .get_history(&VersionNumber::new(2)) - .assert_verified(); - - assert_eq!( - *entry.read_meta().deps.debug_deps().read(), - Some((VersionNumber::new(2), deps1)) - ); - } - - #[test] - fn test_dirty_for_persistent_storage() { - fn key(v: usize) -> VersionedGraphKey { - VersionedGraphKey::new(VersionNumber::new(v), Persistent(0)) - } - let mv = MinorVersion::testing_new(0); - - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, i32>::new(usize::MAX)); - let res = 100; - - let existing = cache.entry(key(0)); - assert!(existing.unpack_vacant().is_some()); - existing.mark_invalidated(VersionNumber::new(0)); - - cache.get(key(0).as_ref(), mv).assert_dirty(); - cache.get(key(1).as_ref(), mv).assert_dirty(); - - let existing = cache.entry(key(2)); - assert!(existing.unpack_vacant().is_some()); - existing.mark_invalidated(VersionNumber::new(2)); - - cache.get(key(0).as_ref(), mv).assert_dirty(); - cache.get(key(1).as_ref(), mv).assert_dirty(); - cache.get(key(2).as_ref(), mv).assert_dirty(); - - cache.update_computed_value(key(0), mv, res, BothDeps::default()); - assert_eq!( - cache - .get(key(0).as_ref(), mv) - .assert_match() - .read_meta() - .hist - .get_verified(), - vec![VersionNumber::new(0)] - ); - assert_eq!( - cache - .get(key(1).as_ref(), mv) - .assert_match() - .read_meta() - .hist - .get_verified(), - vec![VersionNumber::new(0)] - ); - cache.get(key(2).as_ref(), mv).assert_mismatch(); - } - - #[test] - fn test_dirty_for_nonpersistent_storage() { - fn key(v: usize) -> VersionedGraphKey { - VersionedGraphKey::new(VersionNumber::new(v), NonPersistent(0)) - } - let mv = MinorVersion::testing_new(0); - - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, i32>::default()); - let res = 100; - - let existing = cache.entry(key(0)); - assert!(existing.unpack_vacant().is_some()); - assert!(existing.mark_invalidated(VersionNumber::new(0))); - - cache.get(key(0).as_ref(), mv).assert_dirty(); - cache.get(key(1).as_ref(), mv).assert_dirty(); - - let existing = cache.entry(key(2)); - assert!(existing.unpack_vacant().is_some()); - assert!(existing.mark_invalidated(VersionNumber::new(2))); - - cache.get(key(0).as_ref(), mv).assert_dirty(); - cache.get(key(1).as_ref(), mv).assert_dirty(); - cache.get(key(2).as_ref(), mv).assert_dirty(); - - cache.update_computed_value(key(0), mv, res, BothDeps::default()); - assert_eq!( - cache - .get(key(0).as_ref(), mv) - .assert_match() - .read_meta() - .hist - .get_verified(), - vec![VersionNumber::new(0)] - ); - assert_eq!( - cache - .get(key(1).as_ref(), mv) - .assert_match() - .read_meta() - .hist - .get_verified(), - vec![VersionNumber::new(0)] - ); - cache.get(key(2).as_ref(), mv).assert_mismatch(); - } - - #[test] - fn invalid_deps_makes_parent_invalid() { - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, u32>::new(2)); - let res = 10; - let key = VersionedGraphKey::new(VersionNumber::new(0), Last2(0)); - let mv = MinorVersion::testing_new(0); - - let (node, _) = cache.update_computed_value( - key, - mv, - res, - BothDeps { - deps: HashSet::from_iter([ - ComputedDependencyExt::>::testing_raw( - 1, - VersionNumber::new(0), - true, - ), - ComputedDependencyExt::>::testing_raw( - 2, - VersionNumber::new(0), - false, - ), - ]), - rdeps: Vec::new(), - }, - ); - - assert!(!node.into_dyn().is_valid()); - } - - #[test] - fn transient_errors_reused_for_same_version() { - #[derive(Debug, Allocative)] - struct StoragePropertiesForTest; - - impl StorageProperties for StoragePropertiesForTest { - type Key = NonPersistent; - type Value = u32; - - fn key_type_name() -> &'static str { - "" - } - - fn storage_type(&self) -> StorageType { - StorageType::LastN(1) - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(&self, x: &Self::Value) -> bool { - let _ = x; - false - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } - } - - let cache = VersionedGraph::new(StoragePropertiesForTest); - let res = 100; - let key1 = VersionedGraphKey::new(VersionNumber::new(1), NonPersistent(0)); - let mv = MinorVersion::testing_new(3); - - cache.update_computed_value(key1.clone(), mv, res, BothDeps::default()); - - cache.get(key1.as_ref(), mv).assert_match(); - - let key2 = VersionedGraphKey::new(VersionNumber::new(3), NonPersistent(0)); - let (node, _) = cache.update_computed_value(key2.clone(), mv, res, BothDeps::default()); - - cache.get(key2.as_ref(), mv).assert_match(); - cache - .get(key1.as_ref(), MinorVersion::testing_new(4)) - .assert_none(); - cache.get(key1.as_ref(), mv).assert_none(); - - node.into_dyn() - .writable() - .hist - .mark_invalidated(VersionNumber::new(5)); - - let key3 = VersionedGraphKey::new(VersionNumber::new(6), NonPersistent(0)); - cache.update_computed_value(key3.clone(), mv, res, BothDeps::default()); - - cache.get(key3.as_ref(), mv).assert_match(); - cache.get(key2.as_ref(), mv).assert_none(); - cache.get(key1.as_ref(), mv).assert_none(); - } - - #[test] - fn reuse_inserts_into_cache() { - // This tests a very specific condition of resurrecting a value. - // Consider a node n at version v0 that was dirtied at v1, v2. - // It was evaluated at v1, resulting in a different value, but at v2, it results in the same - // value as v0. - // It is possible that we attempt to resurrect the entry from v0 and v2, which actually - // requires actually requires insertion of a new entry at v2, rather than simply marking - // v0 as reusable. - - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, i32>::default()); - - let key1 = VersionedGraphKey::new(VersionNumber::new(0), NonPersistent(0)); - let mv = MinorVersion::testing_new(0); - - let (node, _) = cache.update_computed_value(key1, mv, 1, BothDeps::default()); - - let key2 = VersionedGraphKey::new(VersionNumber::new(1), NonPersistent(0)); - cache.update_computed_value(key2, mv, 2, BothDeps::default()); - - let key3 = VersionedGraphKey::new(VersionNumber::new(2), NonPersistent(0)); - - let reused = cache.mark_unchanged( - key3.clone(), - MinorVersion::testing_new(0), - node.dupe(), - BothDeps::default(), - ); - - // should have created a new entry because of key2 - assert!(!Arc::ptr_eq( - reused.0.unpack_occupied().unwrap(), - node.0.unpack_occupied().unwrap() - )); - // should actually be cached though - cache.get(key3.as_ref(), mv).assert_match(); - } - - #[test] - fn update_prior_version_reuses_nodes_correctly() { - let cache = VersionedGraph::new(StoragePropertiesLastN::<_, i32>::new(1)); - let res = 100; - let key = VersionedGraphKey::new(VersionNumber::new(5), NonPersistent(0)); - let mv = MinorVersion::testing_new(0); - - // first, empty cache gives none - cache.get(key.as_ref(), mv).assert_none(); - - assert!( - cache - .update_computed_value(key.clone(), mv, res, BothDeps::default()) - .1 - .is_none(), - ); - - assert_eq!(*cache.get(key.as_ref(), mv).assert_match().val(), res); - - // now insert a new value of a older version, this shouldn't evict anything. - let res2 = 200; - let key2 = VersionedGraphKey::new(VersionNumber::new(4), NonPersistent(0)); - assert!( - cache - .update_computed_value(key2.clone(), mv, res2, BothDeps::default()) - .1 - .is_none() - ); - cache.get(key2.as_ref(), mv).assert_mismatch(); - // the newer version should still be there - assert_eq!(*cache.get(key.as_ref(), mv).assert_match().val(), res); - // there should be size 1 - assert_eq!(cache.last_n.get(&NonPersistent(0)).unwrap().len(), 1); - - // now insert the same value of a older version, this shouldn't evict anything but reuses - // the existing node. - let key3 = VersionedGraphKey::new(VersionNumber::new(3), NonPersistent(0)); - assert!( - cache - .update_computed_value(key3.clone(), mv, res, BothDeps::default()) - .1 - .is_none() - ); - - assert_eq!(*cache.get(key.as_ref(), mv).assert_match().val(), res); - assert_eq!(*cache.get(key3.as_ref(), mv).assert_match().val(), res); - } -} diff --git a/dice/dice/src/legacy/incremental/graph/storage_properties.rs b/dice/dice/src/legacy/incremental/graph/storage_properties.rs deleted file mode 100644 index c38f2a2b672d7..0000000000000 --- a/dice/dice/src/legacy/incremental/graph/storage_properties.rs +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::any::Any; -use std::fmt::Debug; - -use allocative::Allocative; -use dupe::Dupe; - -use crate::api::storage_type::StorageType; -use crate::legacy::incremental::Computable; - -/// Defines key/value operations in the versioned graph storage. -pub(crate) trait StorageProperties: - Allocative + Debug + Send + Sync + Sized + 'static -{ - /// Type of the key. - type Key: Computable + 'static; - /// Type of the value. - type Value: Allocative + Dupe + Send + Sync + 'static; - fn key_type_name() -> &'static str; - - /// Returns the Key as an Any. This should be an Any for the user-provided - /// type (or ProjectionKeyAsKey for projections) so that users can potentially - /// inspect its concrete type. - fn to_key_any(key: &Self::Key) -> &dyn Any; - - /// How long the value should be stored. - fn storage_type(&self) -> StorageType; - /// Are values equal? - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool; - /// Is computed value valid (or transient)? - fn validity(&self, x: &Self::Value) -> bool; -} - -#[cfg(test)] -pub(crate) mod testing { - use std::fmt; - use std::sync::Arc; - - use allocative::Allocative; - use async_trait::async_trait; - use dupe::Dupe; - - use crate::api::error::DiceResult; - use crate::api::storage_type::StorageType; - use crate::legacy::ctx::ComputationData; - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - use crate::legacy::incremental::Computable; - use crate::legacy::incremental::IncrementalComputeProperties; - use crate::legacy::incremental::IncrementalEngine; - use crate::GraphNode; - use crate::TransactionCtx; - use crate::WeakDiceFutureHandle; - - /// Simple storage key for tests. - /// - /// * `PartialEq` is used for value equality - /// * values are always considered valid - /// * storage type is configured - #[derive(Allocative)] - #[allocative(bound = "")] - pub(crate) struct StoragePropertiesLastN { - n: usize, - _t: std::marker::PhantomData, - _v: std::marker::PhantomData, - } - - #[async_trait] - impl - IncrementalComputeProperties for StoragePropertiesLastN - { - type DiceTask = WeakDiceFutureHandle; - - async fn recompute( - _key: &Self::Key, - _engine: &Arc>, - _transaction_ctx: &Arc, - _extra: &ComputationData, - ) -> DiceResult> { - unimplemented!("not needed for test") - } - } - - impl fmt::Debug for StoragePropertiesLastN { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("StorageKeyLastN") - .field("n", &self.n) - .finish_non_exhaustive() - } - } - - impl Default for StoragePropertiesLastN { - fn default() -> Self { - StoragePropertiesLastN::new(1) - } - } - - impl StoragePropertiesLastN { - pub(crate) fn new(n: usize) -> Self { - Self { - n, - _t: std::marker::PhantomData, - _v: std::marker::PhantomData, - } - } - } - - impl StorageProperties - for StoragePropertiesLastN - { - type Key = T; - type Value = V; - - fn key_type_name() -> &'static str { - "TestingLastN" - } - - fn storage_type(&self) -> StorageType { - StorageType::LastN(self.n) - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(&self, x: &Self::Value) -> bool { - let _ = x; - true - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } - } -} diff --git a/dice/dice/src/legacy/incremental/introspection.rs b/dice/dice/src/legacy/incremental/introspection.rs deleted file mode 100644 index cf538c21401ea..0000000000000 --- a/dice/dice/src/legacy/incremental/introspection.rs +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::collections::BTreeMap; -use std::sync::Arc; - -use dupe::Dupe; -use sorted_vector_map::SortedVectorMap; - -use crate::introspection::graph::AnyKey; -use crate::introspection::graph::EngineForIntrospection; -use crate::introspection::graph::GraphNodeKind; -use crate::introspection::graph::KeyID; -use crate::introspection::graph::NodeID; -use crate::introspection::graph::SerializedGraphNode; -use crate::introspection::graph::SerializedGraphNodesForKey; -use crate::legacy::dice_futures::dice_task::DiceTask; -use crate::legacy::dice_futures::dice_task::DiceTaskStateForDebugging; -use crate::legacy::incremental::graph::dependencies::VersionedDependencies; -use crate::legacy::incremental::graph::dependencies::VersionedRevDependencies; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::legacy::incremental::graph::GraphNodeDyn; -use crate::legacy::incremental::graph::VersionedGraphNodeInternal; -use crate::legacy::incremental::Dependency; -use crate::legacy::incremental::IncrementalComputeProperties; -use crate::legacy::incremental::IncrementalEngine; -use crate::versions::VersionNumber; -use crate::HashMap; -use crate::HashSet; - -impl EngineForIntrospection for IncrementalEngine -where - K: IncrementalComputeProperties + 'static, - T: Dupe + Send + Sync + 'static, -{ - fn keys<'a>(&'a self) -> Box + 'a> { - Box::new( - self.versioned_cache - .iter() - .map(|e| AnyKey::new(e.key().clone())), - ) - } - - fn edges<'a>(&'a self) -> Box)> + 'a> { - fn extract_deps( - e: &SortedVectorMap>, - ) -> Option>>> { - e.iter() - .last()? - .1 - .unpack_occupied()? - .read_meta() - .deps - .deps() - } - - Box::new(self.versioned_cache.iter().map(|e| { - let k = AnyKey::new(e.key().clone()); - let deps = match extract_deps(e.value()) { - Some(deps) => deps.iter().map(|d| d.introspect()).collect(), - None => Vec::new(), - }; - (k, deps) - })) - } - - fn keys_currently_running<'a>( - &'a self, - ) -> Vec<( - AnyKey, - crate::introspection::graph::VersionNumber, - DiceTaskStateForDebugging, - )> { - self.currently_running - .read() - .iter() - .flat_map(|(v, es)| { - es.iter() - .map(move |entry| { - let k = entry.key(); - let e = entry.value(); - ( - AnyKey::new(k.clone()), - crate::introspection::graph::VersionNumber(v.0), - e.task.state_for_debugging(), - ) - }) - .collect::>() - }) - .collect() - } - - fn versions_currently_running<'a>(&'a self) -> Vec { - self.currently_running - .read() - .iter() - .map(|e| e.0.to_introspectable()) - .collect() - } - - fn nodes<'a>( - &'a self, - keys: &'a mut HashMap, - ) -> Box + 'a> { - let mut map_id = move |key: AnyKey| -> KeyID { - let num_keys = keys.len(); - *keys.entry(key).or_insert_with(|| KeyID(num_keys)) - }; - fn visit_deps KeyID>( - deps: &VersionedDependencies, - map_id: &mut M, - ) -> Option> { - deps.debug_deps().try_read().and_then(|deps| { - deps.as_ref() - .map(|deps| deps.1.iter().map(|d| map_id(d.introspect())).collect()) - }) - } - fn visit_rdeps( - rdeps: &VersionedRevDependencies, - ) -> BTreeMap> { - let mut res = BTreeMap::new(); - - let rdeps = rdeps.rdeps(); - for rdep in rdeps.rdeps.iter() { - if let Some(node) = rdep.0.0.upgrade() { - res.entry(rdep.1.to_introspectable()) - .or_insert_with(Vec::new) - .push(NodeID(node.id())); - } - } - - res - } - fn visit_node KeyID>( - node: &VersionedGraphNodeInternal, - map_id: &mut M, - ) -> Option { - node.unpack_graph_value().map(|graph_value| { - let m = graph_value.try_read_meta(); - SerializedGraphNode { - node_id: match node { - VersionedGraphNodeInternal::Occupied(o) => NodeID(o.id()), - VersionedGraphNodeInternal::Transient(t) => NodeID(t.id()), - VersionedGraphNodeInternal::Vacant(_) => { - unreachable!("node was unpacked, can't be vacant") - } - }, - kind: GraphNodeKind::from(node), - history: (*graph_value.get_history()).to_introspectable(), - deps: m.as_ref().and_then(|meta| visit_deps(&meta.deps, map_id)), - rdeps: m.map(|meta| visit_rdeps(&meta.rdeps)), - } - }) - } - Box::new(self.versioned_cache.iter().map(move |e| { - let k = AnyKey::new(e.key().clone()); - SerializedGraphNodesForKey { - id: map_id(k.clone()), - key: k.to_string(), - type_name: k.short_type_name().to_owned(), - nodes: e - .value() - .iter() - .map(|(v, node)| (v.to_introspectable(), visit_node(node, &mut map_id))) - .collect(), - } - })) - } - - fn len_for_introspection(&self) -> usize { - self.versioned_cache.len() - } - - fn currently_running_key_count(&self) -> usize { - self.currently_running - .read() - .iter() - .map(|(_, e)| e.len()) - .sum() - } -} diff --git a/dice/dice/src/legacy/incremental/mod.rs b/dice/dice/src/legacy/incremental/mod.rs deleted file mode 100644 index 41c92067dc0af..0000000000000 --- a/dice/dice/src/legacy/incremental/mod.rs +++ /dev/null @@ -1,2610 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! The incrementality module of BUCK -//! -//! This is responsible for performing incremental caching and invalidations -//! with multiple versions in-flight at the same time. -//! - -pub(crate) mod dep_trackers; -pub(crate) mod evaluator; -pub(crate) mod graph; -pub(crate) mod introspection; -pub(crate) mod transaction_ctx; -pub(crate) mod versions; - -use std::borrow::Cow; -use std::fmt::Debug; -use std::fmt::Display; -use std::future::Future; -use std::hash::Hash; -use std::sync::atomic::AtomicU64; -use std::sync::atomic::Ordering; -use std::sync::Arc; - -use allocative::Allocative; -use async_trait::async_trait; -use dashmap::mapref::entry::Entry; -use dashmap::DashMap; -use dupe::Dupe; -use futures::stream::FuturesUnordered; -use futures::FutureExt; -use futures::StreamExt; -use more_futures::cancellation::CancellationContext; -use more_futures::spawn::spawn_dropcancel_with_preamble; -use more_futures::spawn::CompletionObserver; -use parking_lot::MappedRwLockReadGuard; -use parking_lot::RwLock; -use parking_lot::RwLockReadGuard; -use parking_lot::RwLockWriteGuard; -use tracing::Span; - -use crate::api::error::DiceResult; -use crate::api::events::DiceEvent; -use crate::api::key::Key; -use crate::api::projection::DiceProjectionComputations; -use crate::api::projection::ProjectionKey; -use crate::api::user_data::UserComputationData; -use crate::impls::core::graph::history::CellHistory; -use crate::introspection::graph::EngineForIntrospection; -use crate::legacy::ctx::ComputationData; -use crate::legacy::dice_futures::dice_future::DiceFuture; -use crate::legacy::dice_futures::dice_task::DiceTask; -use crate::legacy::dice_futures::future_handle::WeakDiceFutureHandle; -use crate::legacy::dice_futures::sync_handle::SyncDiceTaskHandle; -use crate::legacy::incremental::dep_trackers::BothDeps; -use crate::legacy::incremental::evaluator::Evaluator; -pub(crate) use crate::legacy::incremental::graph::dependencies::ComputedDependency; -pub(crate) use crate::legacy::incremental::graph::dependencies::Dependency; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::legacy::incremental::graph::GraphNode; -use crate::legacy::incremental::graph::VersionedGraph; -use crate::legacy::incremental::graph::VersionedGraphKey; -use crate::legacy::incremental::graph::VersionedGraphKeyRef; -use crate::legacy::incremental::graph::VersionedGraphResult; -use crate::legacy::incremental::graph::VersionedGraphResultMismatch; -use crate::legacy::incremental::transaction_ctx::TransactionCtx; -use crate::legacy::opaque::OpaqueValueImplLegacy; -use crate::legacy::projection::ProjectionKeyAsKey; -use crate::legacy::projection::ProjectionKeyProperties; -use crate::legacy::EvaluationResult; -use crate::result::CancellableResult; -use crate::result::Cancelled; -use crate::versions::VersionNumber; -use crate::versions::VersionRanges; -use crate::HashMap; -use crate::HashSet; -use crate::StoragePropertiesForKey; - -#[async_trait] -pub(crate) trait IncrementalComputeProperties: StorageProperties { - /// Dice task executed in the `IncrementalEngine` for compute or recompute. - type DiceTask: DiceTask; - - /// Recompute previously computed value. - async fn recompute( - key: &Self::Key, - engine: &Arc>, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult>; -} - -#[derive(Allocative)] -struct RunningEntry { - task: ::DiceTask, - epoch: Epoch, -} - -#[derive(Allocative, Copy, Clone, Dupe, Eq, PartialEq, derive_more::Display)] -struct Epoch(u64); - -/// The incremental engine that manages all the handling of the results of a -/// specific key, performing the recomputation if necessary -/// -/// The computation of an identical request (same key and version) is -/// automatically deduplicated, so that identical requests share the same set of -/// work. It is guaranteed that there is at most one computation in flight at a -/// time if they share the same key and version. -#[derive(Allocative)] -pub(crate) struct IncrementalEngine { - versioned_cache: VersionedGraph, - /// tracks the currently running computations. This is evicted upon - /// completion of the computation - currently_running: RwLock>>>, - /// Tracks the last scheduled task. We use this when deleting from the currently_running map, - /// since it's possible to overwrite an existing entry while both futures are running. - epoch: AtomicU64, -} - -impl Debug for IncrementalEngine { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("IncrementalEngine").finish_non_exhaustive() - } -} - -pub(crate) trait ErasedEngine: Allocative { - fn introspect(&self) -> &dyn EngineForIntrospection; - - fn gc_version(&self, v: VersionNumber); -} - -impl ErasedEngine for IncrementalEngine -where - K: IncrementalComputeProperties, -{ - fn introspect(&self) -> &dyn EngineForIntrospection { - self - } - - fn gc_version(&self, v: VersionNumber) { - let mut running_map = self.currently_running.write(); - running_map.remove(&v); - running_map.shrink_to_fit(); - } -} - -pub trait Computable: - Allocative + Clone + Display + Debug + Eq + Hash + Send + Sync + 'static -{ -} - -impl Computable for T where - T: Allocative + Clone + Display + Debug + Eq + Hash + Send + Sync + 'static -{ -} - -impl IncrementalEngine -where - K: IncrementalComputeProperties, -{ - pub(crate) fn new(evaluator: K) -> Arc { - Arc::new(Self { - versioned_cache: VersionedGraph::new(evaluator), - currently_running: RwLock::new(HashMap::default()), - epoch: AtomicU64::new(0), - }) - } - - fn next_epoch(&self) -> Epoch { - Epoch(self.epoch.fetch_add(1, Ordering::Relaxed)) - } - - /// Dirties the value at K - #[instrument(level = "info", skip(self), fields(k = %k, version = %version))] - pub(crate) fn dirty(&self, k: K::Key, version: VersionNumber, force_dirty: bool) { - // It is crucial that we dirty first before updating the rdeps. - // This is related to the race condition where we invalidate while nodes are being inserted - // into the graph at the same time: - // When a new node is written to the graph, for the node to maintain current dirty versions - // the node must either be reachable via `rdeps`, or propagate it's deps dirtiness. - // Currently, per `update_impl`, the new node will add rdeps to itself first, and then - // inherit its deps dirtiness. - // So, when marking the an invalidated node as dirty first ensures that any new nodes will - // either read the invalidated node's history, or be invalidated via the rdeps traversal, or - // both - let node = self - .versioned_cache - .entry(VersionedGraphKey::new(version, k)); - - let hist_changed = if force_dirty { - node.force_dirty(version) - } else { - node.mark_invalidated(version) - }; - - if hist_changed { - // if we actually did something, invalidate the rdeps of occupied entries - if let Some(node) = node.unpack_occupied() { - debug!("dirtying rdeps"); - Self::invalidate_rdeps(version, GraphNode::occupied(node.dupe())) - } - } - } - - fn invalidate_rdeps(version: VersionNumber, invalidated: GraphNode) { - let mut queue = { - let metadata = invalidated.read_meta(); - let rdeps = metadata.rdeps.rdeps(); - - rdeps - .rdeps - .iter() - .map(|(r, v)| (r.dupe(), *v)) - .collect::>() - }; - - while let Some((rdep, relevant_version)) = queue.pop() { - if let Some(node) = rdep.0.upgrade() { - let mut metadata = node.writable(); - - if metadata - .hist - .latest_dirtied() - .map_or(true, |d| d < relevant_version) - { - // since dirty always occurs in increasing order, it must be the case that if - // the history was already dirtied, it was by a version number less than the - // current version number. - // furthermore, if the rdep was dirtied, at any future versions larger than - // the version it was dirtied at, it may no longer depend on the current node - // so we skip marking it as dirty, and rely on delayed propagation of dirty - if metadata.hist.mark_invalidated(version) { - queue.extend({ - let rdeps = metadata.rdeps.rdeps(); - - rdeps - .rdeps - .iter() - .map(|(r, v)| (r.dupe(), *v)) - .collect::>() - }) - } - } - } - } - } -} - -impl IncrementalEngine -where - K: IncrementalComputeProperties + Evaluator + 'static, -{ - /// Like `eval` but without recording dependencies. - pub(crate) fn eval_for_opaque( - self: &Arc, - k: &K::Key, - transaction_ctx: &Arc, - extra: ComputationData, - ) -> DiceFuture { - self.eval_entry_versioned(k, transaction_ctx, extra) - } - - /// Updates the value at K. Returns whether this injected value actually causes a change - #[instrument(level = "info", skip(self, res, ), fields(k = %k, version = %version))] - pub(crate) fn update_injected_value( - self: &Arc, - k: K::Key, - version: VersionNumber, - res: K::Value, - ) -> bool { - // It is crucial that we `dirty` first before updating the `rdeps`. - // See `IncrementalEngine::dirty` below for details. - let node = self - .versioned_cache - .entry(VersionedGraphKey::new(version, k.clone())); - - node.mark_invalidated(version); - - let (new, invalidated) = self - .versioned_cache - .update_injected_value(VersionedGraphKey::new(version, k), res); - - if let Some(invalidated) = invalidated { - debug!("dirtying rdeps"); - Self::invalidate_rdeps(version, invalidated) - } - - let is_changed = new.get_history().latest_verified_before(version) == Some(version); - is_changed - } - - // NOTE: Avoid making this an `async fn`. This function uses a bit of stack space, and it's - // important to ensure none of it is held across await points, because it's called by `eval()`, - // which itself models an edge in DICE and might be kept alive for a long time. - pub(crate) fn eval_entry_versioned( - self: &Arc, - k: &K::Key, - transaction_ctx: &Arc, - extra: ComputationData, - ) -> DiceFuture { - if let VersionedGraphResult::Match(entry) = self.versioned_cache.get( - VersionedGraphKeyRef::new(transaction_ctx.get_version(), k), - transaction_ctx.get_minor_version(), - ) { - debug!(k = %k, msg = "found existing entry with matching version in cache. reusing result."); - DiceFuture::Ready(Some(entry)) - } else { - let this = self.dupe(); - - let running_map = self.get_running_map(transaction_ctx); - - let res = match running_map.entry(k.clone()) { - Entry::Occupied(mut occupied) => { - if let Some(existing) = occupied.get().task.pollable() { - debug!(k=%k, msg = "found a task that is currently running. polling on existing task"); - existing - } else { - let mut fut = None; - - take_mut::take(occupied.get_mut(), |entry| { - let (task, new_fut) = this.new_dice_task( - k.clone(), - transaction_ctx, - extra, - Some((entry.epoch, entry.task.into_completion_observer())), - ); - fut = Some(new_fut); - task - }); - - debug!(k=%k, epoch=%occupied.get().epoch, msg = "new task inserted into running map"); - - fut.unwrap() - } - } - Entry::Vacant(vacant) => { - let (task, fut) = this.new_dice_task(k.clone(), transaction_ctx, extra, None); - let entry = vacant.insert(task); - - debug!(k=%k, epoch=%entry.epoch, msg = "new task inserted into running map"); - - fut - } - }; - - res - } - } - - #[instrument( - level = "debug", - skip(self, transaction_ctx, extra, cancelled_instance), - fields(k = %k), - )] - fn new_dice_task( - self: Arc>, - k: K::Key, - transaction_ctx: &Arc, - extra: ComputationData, - cancelled_instance: Option<(Epoch, CompletionObserver>>)>, - ) -> (RunningEntry, DiceFuture) { - debug!( - "no matching entry in cache, and no tasks currently running. spawning a new task..." - ); - - let eval_ctx = transaction_ctx.dupe(); - let key = k.clone(); - let v = eval_ctx.get_version(); - let epoch = self.next_epoch(); - - let user_data = extra.user_data.dupe(); - - struct Evaluation { - engine: Arc>, - epoch: Epoch, - k: K::Key, - v: VersionNumber, - } - - impl Drop for Evaluation { - fn drop(&mut self) { - let span = tracing::span!( - tracing::Level::DEBUG, - "Evaluation::drop", - k = %self.k, - v = %self.v, - epoch = %self.epoch - ); - let _guard = span.enter(); - - debug!("exiting"); - match self.engine.currently_running.read().get(&self.v) { - None => {} - Some(map) => { - debug!("awaiting lock"); - match map.entry(self.k.clone()) { - Entry::Occupied(entry) => { - if entry.get().epoch == self.epoch { - entry.remove(); - debug!("future removed"); - } - } - Entry::Vacant(_) => {} - } - debug!("complete"); - } - } - } - } - - let ev = Evaluation { - engine: self, - epoch, - k, - v, - }; - - let future = async move { - let cancellation = CancellationContext::todo(); - - // check again since another thread could have inserted into the versioned - // cache before we entered the index. - let res = match ev.engine.versioned_cache.get( - VersionedGraphKeyRef::new(eval_ctx.get_version(), &ev.k), - eval_ctx.get_minor_version(), - ) { - VersionedGraphResult::Match(entry) => { - debug!("found existing entry with matching version in cache. reusing result."); - CancellableResult::Ok(entry) - } - VersionedGraphResult::Mismatch(mismatch) => { - let mut extra = extra; - debug!("no matching entry in cache. checking for dependency changes"); - extra.start_computing_key::(&ev.k); - - let deps_changed = { - extra.user_data.tracker.event(DiceEvent::CheckDepsStarted { - key_type: K::key_type_name(), - }); - - scopeguard::defer! { - extra - .user_data - .tracker - .event(DiceEvent::CheckDepsFinished { key_type: K::key_type_name() }); - } - - Self::compute_whether_versioned_dependencies_changed( - &ev.k, &eval_ctx, &extra, &mismatch, - ) - .await - }; - - match deps_changed { - DidDepsChange::Changed | DidDepsChange::NoDeps => { - debug!("dependencies changed. recomputing..."); - ev.engine - .compute(&ev.k, eval_ctx, extra, &cancellation) - .await - } - DidDepsChange::NoChange(unchanged_both_deps) => { - debug!("dependencies are unchanged, reusing entry"); - extra.finished_computing_key::(&ev.k, &unchanged_both_deps, true); - CancellableResult::Ok(ev.engine.reuse( - ev.k.clone(), - &eval_ctx, - mismatch.entry, - unchanged_both_deps, - )) - } - } - } - VersionedGraphResult::Dirty | VersionedGraphResult::None => { - let mut extra = extra; - extra.start_computing_key::(&ev.k); - - debug!("dirtied. recomputing..."); - ev.engine - .compute(&ev.k, eval_ctx, extra, &cancellation) - .await - } - }; - - debug!("finished. returning result"); - res - }; - - let span = debug_span!( - parent: None, - "spawned_dice_task", - key = % key, - version = % v, - epoch = % epoch, - ); - - // If a task is being cancelled, then we need to wait for it to finish first. This wait - // should normally be fairly short. It goes into a non-cancellable preamble because we hold - // the only reference to this cancelled instance, so if we were to get cancelled too we - // would remove ourselves from the running map and lose it! - let (task, handle) = match cancelled_instance { - Some((instance_epoch, instance)) => Self::spawn_task( - future, - async move { - debug!(msg = "awaiting cancelled future", epoch = %instance_epoch); - instance.await - }, - &user_data, - span, - ), - None => Self::spawn_task(future, futures::future::ready(()), &user_data, span), - }; - - (RunningEntry { task, epoch }, handle) - } - - fn spawn_task( - future: impl Future>> + Send + 'static, - preamble: impl Future + Send + 'static, - spawner_ctx: &UserComputationData, - span: Span, - ) -> (WeakDiceFutureHandle, DiceFuture) { - let (task, fut) = spawn_dropcancel_with_preamble( - future, - preamble, - spawner_ctx.spawner.as_ref(), - spawner_ctx, - span, - ); - let task = WeakDiceFutureHandle::async_cancellable(task); - let fut = DiceFuture::AsyncCancellableSpawned(fut); - (task, fut) - } - - #[instrument( - level = "debug", - skip(self, transaction_ctx, extra, cancellation), - fields(k = %k, version = %transaction_ctx.get_version()), - )] - async fn compute( - self: &Arc, - k: &K::Key, - transaction_ctx: Arc, - extra: ComputationData, - cancellation: &CancellationContext<'_>, - ) -> CancellableResult> { - let desc = K::key_type_name(); - extra - .user_data - .tracker - .event(DiceEvent::Started { key_type: desc }); - let tracker = extra.user_data.tracker.dupe(); - - scopeguard::defer! { - tracker.event(DiceEvent::Finished { key_type: desc }); - }; - - let v = transaction_ctx.get_version(); - let m_v = transaction_ctx.get_minor_version(); - - // TODO(bobyf) these also make good locations where we want to perform instrumentation - debug!(msg = "running evaluator"); - - let EvaluationResult { - value, - both_deps, - extra, - } = self - .versioned_cache - .storage_properties - .eval(k, transaction_ctx, cancellation, extra) - .await; - - let _guard = match cancellation.try_to_disable_cancellation() { - Some(g) => g, - None => { - debug!("evaluation cancelled, skipping cache updates"); - return Err(Cancelled); - } - }; - - debug!(msg = "evaluation finished. updating caches"); - extra.finished_computing_key::(k, &both_deps, false); - - let (entry, _old) = self.versioned_cache.update_computed_value( - VersionedGraphKey::new(v, k.clone()), - m_v, - value, - both_deps, - ); - - // This feels like it should move in the scopeguard above to notify the cycle detector on - // cancellation, but our cycle detector does not currently support being notified multiple - // times about the same key, so we don't. - debug!(msg = "cache updates completed"); - - CancellableResult::Ok(entry) - } -} - -impl IncrementalEngine> { - /// Synchronously evaluate projection key given previously computed derive from key. - pub(crate) fn eval_projection( - self: &Arc, - k: &ProjectionKeyAsKey

    , - derive_from: &OpaqueValueImplLegacy, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> P::Value { - let node = self.eval_projection_versioned(k, derive_from, transaction_ctx, extra); - - let value = node.val().dupe(); - - // Update dependencies. - derive_from - .parent_computations - .dep_trackers - .record::>(transaction_ctx.get_version(), self.dupe(), node); - - value - } - - /// Synchronously evaluate projection key without recording dependencies. - #[instrument( - level = "debug", - skip(self, transaction_ctx, extra, derive_from), - fields(k = %k, v = %transaction_ctx.get_version()), - )] - fn eval_projection_versioned( - self: &Arc, - k: &ProjectionKeyAsKey

    , - derive_from: &OpaqueValueImplLegacy, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> GraphNode> { - if let VersionedGraphResult::Match(entry) = self.versioned_cache.get( - VersionedGraphKeyRef::new(transaction_ctx.get_version(), k), - transaction_ctx.get_minor_version(), - ) { - entry.dupe() - } else { - enum Val { - Occupied(SyncDiceTaskHandle>), - Vacant(tokio::sync::oneshot::Sender>>), - } - - let val = { - let running_map = self.get_running_map(transaction_ctx); - - let val = match running_map.entry(k.clone()) { - Entry::Occupied(occupied) => Val::Occupied(occupied.get().task.dupe()), - Entry::Vacant(vacant) => { - let (tx, rx) = tokio::sync::oneshot::channel(); - vacant.insert(RunningEntry { - task: SyncDiceTaskHandle { rx: rx.shared() }, - epoch: self.next_epoch(), - }); - Val::Vacant(tx) - } - }; - - val - }; - - match val { - Val::Occupied(o) => { - // It is safe to block here because projection computation is synchronous. - // Here is some explanation why we need unconstrained: - // https://gist.github.com/stepancheg/0c1e6ed4b45a334a9a222e7db38537f2 - debug!(msg = "polling an existing sync projection task"); - futures::executor::block_on(tokio::task::unconstrained(o.rx)) - .expect("sync task don't fail") - .dupe() - } - Val::Vacant(v) => self.eval_projection_task( - k, - &derive_from.value, - derive_from.as_both_deps(), - transaction_ctx, - extra, - v, - ), - } - } - } - - /// Evaluate projection key after acquiring the task lock. - fn eval_projection_task( - self: &Arc, - k: &ProjectionKeyAsKey

    , - // FIXME? - derive_from: &GraphNode>, - derive_from_as_deps: BothDeps, - transaction_ctx: &Arc, - extra: &ComputationData, - tx: tokio::sync::oneshot::Sender>>, - ) -> GraphNode> { - debug!(msg = "evaluating sync projection task"); - - let node = match self.versioned_cache.get( - VersionedGraphKeyRef::new(transaction_ctx.get_version(), k), - transaction_ctx.get_minor_version(), - ) { - VersionedGraphResult::Match(entry) => entry.dupe(), - VersionedGraphResult::Mismatch(mismatch) => { - // Async key evaluation calls `compute_whether_dependencies_changed`, - // but we cannot do that because the function is synchronous. - // So we do simpler check here: if `derive_from` versions are compatible with - // cached node versions, we reuse the cached node and recompute otherwise. - if !Self::check_whether_opaque_value_changed(derive_from, &mismatch) { - self.reuse( - k.clone(), - transaction_ctx, - mismatch.entry, - derive_from_as_deps, - ) - } else { - self.do_compute_projection( - k, - derive_from.val(), - derive_from_as_deps, - transaction_ctx, - extra, - ) - } - } - VersionedGraphResult::Dirty | VersionedGraphResult::None => self.do_compute_projection( - k, - derive_from.val(), - derive_from_as_deps, - transaction_ctx, - extra, - ), - }; - - let sent = tx.send(node.dupe()); - assert!(sent.is_ok(), "receiver is still alive"); - - debug!(msg = "projection task completed"); - - if let Some(running_map) = self - .currently_running - .read() - .get(&transaction_ctx.get_version()) - { - let removed = running_map.remove(k); - assert!(removed.is_some()); - } - - debug!(msg = "currently_running cleared"); - - node - } - - /// Projection key is found in cache, but version mismatches. - /// Can we reuse that value? - fn check_whether_opaque_value_changed( - derive_from: &GraphNode>, - projection_mismatch: &VersionedGraphResultMismatch>, - ) -> bool { - let derive_from_versions = derive_from.get_history().get_verified_ranges(); - projection_mismatch - .verified_versions - .intersect(&derive_from_versions) - .is_empty() - } - - /// Invoke projection computation function and update the cache. - fn do_compute_projection( - &self, - key: &ProjectionKeyAsKey

    , - derive_from: &::Value, - derive_from_as_deps: BothDeps, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> GraphNode> { - let dice = self - .versioned_cache - .storage_properties - .dice - .upgrade() - .unwrap(); - let ctx = DiceProjectionComputations { - user_data: &extra.user_data, - data: &dice.data, - }; - - let value = key.k.compute(derive_from, &ctx); - - let (entry, _old) = self.versioned_cache.update_computed_value( - VersionedGraphKey::new(transaction_ctx.get_version(), key.clone()), - transaction_ctx.get_minor_version(), - value, - derive_from_as_deps, - ); - - entry - } - - /// Asynchronously recompute projection key. - pub(crate) async fn recompute_projection( - self: &Arc, - k: &ProjectionKeyAsKey

    , - transaction_ctx: &Arc, - extra: ComputationData, - ) -> DiceResult>> { - match self.versioned_cache.get( - VersionedGraphKeyRef::new(transaction_ctx.get_version(), k), - transaction_ctx.get_minor_version(), - ) { - VersionedGraphResult::Match(entry) => { - debug!("found existing entry with matching version in cache. reusing result."); - Ok(entry) - } - VersionedGraphResult::Mismatch(mismatch) => { - let eval_ctx = transaction_ctx.dupe(); - let key = k.clone(); - - // Unlike synchronous projection computation, on recompute - // we can perform full dependencies changes check. - // Unlike asynchronous key computation, we do not hold task lock here. - match Self::compute_whether_versioned_dependencies_changed( - k, &eval_ctx, &extra, &mismatch, - ) - .await - { - DidDepsChange::Changed | DidDepsChange::NoDeps => { - debug!("dependencies changed. recomputing..."); - - self.do_recompute_projection(k, transaction_ctx, &extra) - .await - } - DidDepsChange::NoChange(unchanged_both_deps) => { - debug!("dependencies are unchanged, reusing entry"); - - Ok(self.reuse(key, &eval_ctx, mismatch.entry, unchanged_both_deps)) - } - } - } - VersionedGraphResult::Dirty => { - self.do_recompute_projection(k, transaction_ctx, &extra) - .await - } - VersionedGraphResult::None => { - unreachable!("on recompute, dependency key should always be present") - } - } - } - - async fn do_recompute_projection( - self: &Arc, - k: &ProjectionKeyAsKey

    , - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult>> { - let cache = self - .versioned_cache - .storage_properties - .dice - .upgrade() - .unwrap() - .find_cache(); - - // We must compute derive value before acquiring the task lock - // because projection computation must not block. - // This is different from regular key evaluation. - let value = cache - .eval_for_opaque( - &k.derive_from_key, - transaction_ctx, - extra - .subrequest::>(&k.derive_from_key)?, - ) - .await; - - let derive_from_both_deps = - BothDeps::only_one_dep(transaction_ctx.get_version(), value.dupe(), &cache); - - enum Val { - Occupied(SyncDiceTaskHandle>), - Vacant(tokio::sync::oneshot::Sender>>), - } - - let val = { - let running_map = self.get_running_map(transaction_ctx); - - let val = match running_map.entry(k.clone()) { - Entry::Occupied(occupied) => Val::Occupied(occupied.get().task.dupe()), - Entry::Vacant(vacant) => { - let (tx, rx) = tokio::sync::oneshot::channel(); - vacant.insert(RunningEntry { - task: SyncDiceTaskHandle { rx: rx.shared() }, - epoch: self.next_epoch(), - }); - Val::Vacant(tx) - } - }; - - val - }; - - Ok(match val { - Val::Occupied(o) => o.rx.await.expect("sync task cannot fail"), - Val::Vacant(v) => self.eval_projection_task( - k, - &value, - derive_from_both_deps, - transaction_ctx, - extra, - v, - ), - }) - } -} - -impl IncrementalEngine { - #[instrument( - level = "debug", - skip(transaction_ctx, mismatch, extra), - fields(version = %transaction_ctx.get_version()), - )] - async fn compute_whether_versioned_dependencies_changed( - key: &K::Key, - transaction_ctx: &Arc, - extra: &ComputationData, - mismatch: &VersionedGraphResultMismatch, - ) -> DidDepsChange { - // we know that the result is last computed at 'last_verified_version', which means that - // its dependencies must have also been verified at 'last_verified_version'. - // So to determine if this result is reusable, we check whether any of the dependencies - // have changed between 'last_verified_version' and the currently requested version. - - match mismatch.deps_at_last_version() { - (versions, Some(deps)) => { - // TODO(bobyf) spawn everything for now, but we really should be smarter here - Self::compute_whether_dependencies_changed( - key, - transaction_ctx, - extra, - versions, - &deps, - ) - // boxed to segment this more expensive bit out of the main new_dice_task future (held - // by all active computations). - .boxed() - .await - } - _ => DidDepsChange::Changed, - } - } - - #[instrument( - level = "debug", - skip(self, transaction_ctx, value_to_reuse, both_deps), - fields(k = %k, version = %transaction_ctx.get_version(), m_version = %transaction_ctx.get_minor_version()), - )] - fn reuse( - self: &Arc, - k: K::Key, - transaction_ctx: &Arc, - value_to_reuse: GraphNode, - both_deps: BothDeps, - ) -> GraphNode { - let v = transaction_ctx.get_version(); - - debug!(msg = "reusing entry"); - - self.versioned_cache.mark_unchanged( - VersionedGraphKey::new(v, k), - transaction_ctx.get_minor_version(), - value_to_reuse, - both_deps, - ) - } - - /// determines if the given 'Dependency' has changed between versions 'last_version' and - /// 'target_version' - #[instrument( - level = "debug", - skip(transaction_ctx, extra, deps), - fields(version = %transaction_ctx.get_version(), verified_versions = %verified_versions) - )] - async fn compute_whether_dependencies_changed( - key: &K::Key, - transaction_ctx: &Arc, - extra: &ComputationData, - verified_versions: &VersionRanges, - deps: &Arc>>, - ) -> DidDepsChange { - if deps.is_empty() { - return DidDepsChange::NoDeps; - } - - let mut fs: FuturesUnordered<_> = - (deps.iter().map(|dep| dep.recompute(transaction_ctx, extra))).collect(); - - let mut verified_versions = Cow::Borrowed(verified_versions); - - let mut computed_deps = HashSet::default(); - let mut computed_nodes = Vec::new(); - while let Some(dep_res) = fs.next().await { - match dep_res { - Ok((dep, dep_node)) => { - verified_versions = Cow::Owned( - verified_versions.intersect(&dep.get_history().get_verified_ranges()), - ); - if verified_versions.is_empty() { - debug!(msg = "deps changed"); - return DidDepsChange::Changed; - } - computed_deps.insert(dep); - computed_nodes.push(dep_node); - } - Err(_dice_err) => { - // we don't cache DiceErrors, so this must be because the dependency changed - // If the cycle/DiceError is real, we'll hit and propagate it when we recompute - // the parent key. - return DidDepsChange::Changed; - } - } - } - - debug!(msg = "deps did not change"); - - DidDepsChange::NoChange(BothDeps { - deps: computed_deps, - rdeps: computed_nodes, - }) - } - - fn get_running_map<'a>( - self: &'a Arc, - transaction_ctx: &Arc, - ) -> MappedRwLockReadGuard<'a, DashMap>> { - let locked = self.currently_running.read(); - - if locked.get(&transaction_ctx.get_version()).is_some() { - RwLockReadGuard::map(locked, |locked| { - locked - .get(&transaction_ctx.get_version()) - .expect("just checked") - }) - } else { - drop(locked); - let mut map = self.currently_running.write(); - map.entry(transaction_ctx.get_version()).or_default(); - - let locked = RwLockWriteGuard::downgrade(map); - RwLockReadGuard::map(locked, |locked| { - locked - .get(&transaction_ctx.get_version()) - .expect("just inserted") - }) - } - } -} - -enum DidDepsChange { - Changed, - NoChange(BothDeps), - NoDeps, -} - -#[cfg(test)] -pub(crate) mod testing { - use std::any::Any; - use std::hash::Hash; - use std::hash::Hasher; - use std::marker::PhantomData; - use std::sync::Arc; - - use allocative::Allocative; - use async_trait::async_trait; - use cmp_any::PartialEqAny; - use derivative::Derivative; - use parking_lot::RwLock; - - use crate::legacy::incremental::dep_trackers::testing::Dep; - use crate::legacy::incremental::dep_trackers::testing::DepExt; - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - // re-export the cache assertion utility - pub(crate) use crate::legacy::incremental::graph::testing::VersionedCacheResultAssertsExt; - use crate::legacy::incremental::graph::GraphNode; - use crate::legacy::incremental::graph::ReadOnlyHistory; - use crate::legacy::incremental::graph::VersionedGraphKeyRef; - use crate::legacy::incremental::graph::VersionedGraphResult; - use crate::legacy::incremental::versions::MinorVersion; - use crate::legacy::incremental::CellHistory; - use crate::legacy::incremental::ComputedDependency; - use crate::legacy::incremental::Dependency; - use crate::legacy::incremental::DidDepsChange; - use crate::legacy::incremental::Evaluator; - use crate::legacy::incremental::IncrementalComputeProperties; - use crate::legacy::incremental::IncrementalEngine; - use crate::legacy::incremental::VersionNumber; - - pub(crate) struct DependencyExt(PhantomData); - - impl DependencyExt - where - K: Evaluator + Default, - { - /// creates a raw Dependency for testing for equals comparison that - /// isn't usable for anything - pub(crate) fn testing_raw(k: K::Key) -> Box - where - K: Default, - { - Box::new(Dep::::testing_new( - // we'll never reach the below since all we do is use this for testing equality for - // tests - Arc::downgrade(&IncrementalEngine::new(K::default())), - k, - )) - } - } - - pub(crate) struct ComputedDependencyExt(PhantomData); - - impl ComputedDependencyExt - where - K: Evaluator + Default, - { - /// creates a raw Dependency for testing for equals comparison that - /// isn't usable for anything - pub(crate) fn testing_raw( - k: K::Key, - v: VersionNumber, - is_valid: bool, - ) -> Box { - #[derive(Clone, Derivative, Allocative)] - #[derivative(Debug)] - #[allocative(bound = "")] - struct Fake( - (K::Key, VersionNumber), - #[derivative(Hash = "ignore", PartialEq = "ignore")] Arc>, - #[derivative(Hash = "ignore", PartialEq = "ignore")] bool, - ); - impl ComputedDependency for Fake - where - K: Evaluator + Default, - { - fn get_history(&self) -> ReadOnlyHistory { - ReadOnlyHistory::from(self.1.read()) - } - - fn into_dependency(self: Box) -> Box { - DependencyExt::::testing_raw(self.0.0) - } - - fn get_key_equality(&self) -> (PartialEqAny, VersionNumber) { - (PartialEqAny::new(&self.0.0), self.0.1) - } - - fn to_key_any(&self) -> &dyn Any { - K::to_key_any(&self.0.0) - } - - fn hash(&self, mut state: &mut dyn Hasher) { - self.0.hash(&mut state); - } - - fn is_valid(&self) -> bool { - self.2 - } - } - - Box::new(Fake::( - (k, v), - Arc::new(RwLock::new(CellHistory::verified(v))), - is_valid, - )) - } - } - - #[async_trait] - pub(crate) trait IncrementalEngineExt - where - K: StorageProperties + 'static, - { - fn get_cached( - self: &Arc, - k: K::Key, - version: VersionNumber, - m_version: MinorVersion, - ) -> GraphNode; - - fn get_maybe_cached( - self: &Arc, - k: K::Key, - version: VersionNumber, - m_version: MinorVersion, - ) -> VersionedGraphResult; - } - - #[async_trait] - impl IncrementalEngineExt for IncrementalEngine - where - K: IncrementalComputeProperties, - { - fn get_cached( - self: &Arc, - k: K::Key, - version: VersionNumber, - m_version: MinorVersion, - ) -> GraphNode { - self.get_maybe_cached(k, version, m_version).assert_match() - } - - fn get_maybe_cached( - self: &Arc, - k: K::Key, - version: VersionNumber, - m_version: MinorVersion, - ) -> VersionedGraphResult { - self.versioned_cache - .get(VersionedGraphKeyRef::new(version, &k), m_version) - } - } - - pub(crate) trait DidDepsChangeExt { - fn is_changed(&self) -> bool; - } - - impl DidDepsChangeExt for DidDepsChange { - fn is_changed(&self) -> bool { - match self { - DidDepsChange::Changed => true, - DidDepsChange::NoChange(..) => false, - DidDepsChange::NoDeps => false, - } - } - } -} - -#[cfg(test)] -mod tests { - use std::fmt; - use std::fmt::Debug; - use std::fmt::Formatter; - use std::hash::Hash; - use std::hash::Hasher; - use std::sync::atomic::AtomicBool; - use std::sync::atomic::AtomicU16; - use std::sync::atomic::AtomicUsize; - use std::sync::atomic::Ordering; - use std::sync::Arc; - use std::sync::Barrier; - use std::sync::Weak; - use std::time::Duration; - - use allocative::Allocative; - use async_trait::async_trait; - use cmp_any::PartialEqAny; - use derive_more::Display; - use dupe::Dupe; - use futures::FutureExt; - use gazebo::prelude::*; - use indexmap::indexset; - use more_futures::cancellation::CancellationContext; - use parking_lot::Mutex; - use parking_lot::RwLock; - use sorted_vector_map::sorted_vector_set; - use tokio::sync::Barrier as AsyncBarrier; - use tokio::sync::Mutex as AsyncMutex; - use tokio::sync::Notify; - use tokio::sync::RwLock as AsyncRwLock; - - use crate::api::error::DiceError; - use crate::api::error::DiceResult; - use crate::api::storage_type::StorageType; - use crate::impls::core::graph::history::testing::CellHistoryExt; - use crate::introspection::graph::AnyKey; - use crate::legacy::ctx::testing::ComputationDataExt; - use crate::legacy::ctx::ComputationData; - use crate::legacy::incremental::dep_trackers::testing::ComputedDep; - use crate::legacy::incremental::dep_trackers::testing::ComputedDepExt; - use crate::legacy::incremental::dep_trackers::BothDeps; - use crate::legacy::incremental::evaluator::testing::EvaluatorFn; - use crate::legacy::incremental::evaluator::testing::EvaluatorUnreachable; - use crate::legacy::incremental::graph::dependencies::VersionedDependencies; - use crate::legacy::incremental::graph::dependencies::VersionedRevDependencies; - use crate::legacy::incremental::graph::storage_properties::testing::StoragePropertiesLastN; - use crate::legacy::incremental::graph::storage_properties::StorageProperties; - use crate::legacy::incremental::graph::GraphNode; - use crate::legacy::incremental::graph::GraphNodeDyn; - use crate::legacy::incremental::graph::NodeMetadata; - use crate::legacy::incremental::graph::OccupiedGraphNode; - use crate::legacy::incremental::graph::ReadOnlyHistory; - use crate::legacy::incremental::graph::VersionedGraphKeyRef; - use crate::legacy::incremental::graph::WritableMetadata; - use crate::legacy::incremental::testing::DependencyExt; - use crate::legacy::incremental::testing::DidDepsChangeExt; - use crate::legacy::incremental::testing::IncrementalEngineExt; - use crate::legacy::incremental::testing::VersionedCacheResultAssertsExt; - use crate::legacy::incremental::transaction_ctx::ActiveTransactionCountGuard; - use crate::legacy::incremental::transaction_ctx::Changes; - use crate::legacy::incremental::versions::MinorVersion; - use crate::legacy::incremental::versions::MinorVersionGuard; - use crate::legacy::incremental::versions::VersionForWrites; - use crate::legacy::incremental::versions::VersionGuard; - use crate::legacy::incremental::versions::VersionTracker; - use crate::legacy::incremental::CellHistory; - use crate::legacy::incremental::ComputedDependency; - use crate::legacy::incremental::Dependency; - use crate::legacy::incremental::Evaluator; - use crate::legacy::incremental::IncrementalComputeProperties; - use crate::legacy::incremental::IncrementalEngine; - use crate::legacy::incremental::TransactionCtx; - use crate::legacy::incremental::VersionedGraphResultMismatch; - use crate::legacy::EvaluationResult; - use crate::versions::testing::VersionRangesExt; - use crate::versions::VersionNumber; - use crate::versions::VersionRange; - use crate::versions::VersionRanges; - use crate::HashSet; - use crate::WeakDiceFutureHandle; - - #[tokio::test] - async fn evaluation_tracks_rdeps() -> anyhow::Result<()> { - let node = Arc::new(OccupiedGraphNode::>::new( - 1337, - 1, - CellHistory::verified(VersionNumber::new(1)), - )); - let graph_node: Arc = node.dupe(); - - // set up so that we have keys 2 and 3 with a history of VersionNumber(1) - let eval_fn = move |k| { - async move { - ( - k, - BothDeps { - deps: HashSet::default(), - rdeps: vec![graph_node.dupe()], - }, - ) - } - .boxed() - }; - let engine = IncrementalEngine::new(EvaluatorFn::new(|k, _| eval_fn(k))); - - let vt = VersionTracker::new(Box::new(|_| {})); - - let eval_ctx = Arc::new(TransactionCtx::new( - VersionGuard::testing_new( - vt.dupe(), - VersionNumber::new(1), - MinorVersionGuard::testing_new(0), - ), - VersionForWrites::testing_new(VersionNumber::new(2)), - Changes::new(), - ActiveTransactionCountGuard::testing_new(), - )); - - let t = *(engine - .eval_entry_versioned(&2, &eval_ctx, ComputationData::testing_new()) - .await - .val()); - assert_eq!(t, 2); - - let t = *(engine - .eval_entry_versioned(&3, &eval_ctx, ComputationData::testing_new()) - .await - .val()); - assert_eq!(t, 3); - - let mut expected = HashSet::from_iter([ - Arc::as_ptr( - &engine - .get_cached(2, VersionNumber::new(1), MinorVersion::testing_new(0)) - .into_dyn(), - ), - Arc::as_ptr( - &engine - .get_cached(3, VersionNumber::new(1), MinorVersion::testing_new(0)) - .into_dyn(), - ), - ]); - for rdep in node.read_meta().rdeps.rdeps().rdeps.iter() { - assert!( - expected.remove(&Arc::as_ptr(&rdep.0.0.upgrade().unwrap())), - "Extra rdeps" - ); - } - assert!(expected.is_empty(), "Missing {} rdeps", expected.len()); - - Ok(()) - } - - #[test] - fn concurrent_identical_requests_are_deduped() { - let n_thread = 10; - - // use counters to verify that only 1 eval of each key has ever ran - let counter0 = Arc::new(AtomicU16::new(0)); - let counter1 = Arc::new(AtomicU16::new(0)); - - let evaluator = { - let counter0 = counter0.dupe(); - let counter1 = counter1.dupe(); - |k| { - async move { - { - if k == 0 { - counter0.fetch_add(1, Ordering::SeqCst); - (1i32, BothDeps::default()) - } else { - counter1.fetch_add(1, Ordering::SeqCst); - (2i32, BothDeps::default()) - } - } - } - .boxed() - } - }; - - let engine = IncrementalEngine::new(EvaluatorFn::new(|k, _| evaluator(k))); - - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(n_thread) - .max_blocking_threads(n_thread) - .build() - .unwrap(); - let barrier = Arc::new(Barrier::new(n_thread - 1)); - - rt.block_on(async { - let mut futs = Vec::new(); - - (0..n_thread / 2) - .map(|_| { - let e = engine.dupe(); - let b = barrier.dupe(); - tokio::spawn(async move { - b.wait(); - // this isn't guaranteed to hit `eval` all at the same time, but it gives a - // decent chance of doing so - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(0))); - anyhow::Ok( - *(e.eval_entry_versioned(&0, &ctx, ComputationData::testing_new()) - .await - .val()), - ) - }) - }) - .for_each(|f| futs.push(f)); - - (n_thread / 2..n_thread - 1) - .map(|_| { - let e = engine.dupe(); - let b = barrier.dupe(); - tokio::spawn(async move { - b.wait(); - // this isn't guaranteed to hit `eval` all at the same time, but it gives a - // decent chance of doing so - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(0))); - anyhow::Ok( - *(e.eval_entry_versioned(&1, &ctx, ComputationData::testing_new()) - .await - .val()), - ) - }) - }) - .for_each(|f| futs.push(f)); - - let res = futures::future::join_all(futs) - .await - .into_map(|r| r.unwrap().unwrap()); - - assert_eq!(res.iter().any(|x| x == &1), true); - assert_eq!(res.iter().any(|x| x == &2), true); - - assert_eq!(counter0.load(Ordering::SeqCst), 1); - assert_eq!(counter1.load(Ordering::SeqCst), 1); - }) - } - - #[test] - fn different_requests_are_spawned_in_parallel() { - let n_thread = 10usize; - - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(n_thread + 1) - .max_blocking_threads(n_thread + 1) - .build() - .unwrap(); - - // use barrier to ensure that n_threads in parallel are spawned by the engine - let barrier = Arc::new(Barrier::new(n_thread)); - - rt.block_on(async move { - let engine = IncrementalEngine::new(EvaluatorFn::new(|_k, _| { - async move { - let b = barrier.dupe(); - // spawned tasks that can only proceed if all are - // concurrently running - b.wait(); - (1usize, BothDeps::default()) - } - .boxed() - })); - let engine = &engine; - - let mut sum = 0; - let v = VersionNumber::new(0); - let futs = (0..n_thread) - .map(|i| async move { - let ctx = Arc::new(TransactionCtx::testing_new(v)); - *(engine - .eval_entry_versioned(&i, &ctx, ComputationData::testing_new()) - .await - .val()) - }) - .collect::>(); - - futures::future::join_all(futs) - .await - .iter() - .for_each(|res| sum += res); - - assert_eq!(sum, n_thread); - }) - } - - #[tokio::test] - async fn test_detecting_changed_dependencies() -> anyhow::Result<()> { - #[derive(Clone, Dupe, Debug, Display, Allocative)] - #[display(fmt = "FakeDep({})", _0)] - struct FakeDep(usize, Arc); - - impl FakeDep { - fn new(hash: usize, hist: CellHistory) -> Self { - Self( - hash, - Arc::new(FakeNode(RwLock::new(NodeMetadata { - deps: VersionedDependencies::new(), - rdeps: VersionedRevDependencies::new(), - hist, - }))), - ) - } - } - - #[derive(Allocative)] - struct FakeNode(RwLock); - - impl Debug for FakeNode { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self.0.read().hist) - } - } - - impl GraphNodeDyn for FakeNode { - fn get_history(&self) -> ReadOnlyHistory { - ReadOnlyHistory::from(self.0.read()) - } - - fn read_rdeps(&self) -> VersionedRevDependencies { - self.0.read().rdeps.dupe() - } - - fn add_rdep(&self, dependent: Weak, v: VersionNumber) { - self.0.read().rdeps.add_rdep(dependent, v) - } - - fn writable(&self) -> WritableMetadata { - WritableMetadata::from(self.0.write()) - } - - fn is_valid(&self) -> bool { - true - } - - fn key(&self) -> AnyKey { - unimplemented!() - } - - fn id(&self) -> usize { - self as *const Self as usize - } - } - - impl ComputedDependency for FakeDep { - fn get_history(&self) -> ReadOnlyHistory { - self.1.get_history() - } - - fn into_dependency(self: Box) -> Box { - Box::new(*self) - } - - fn get_key_equality(&self) -> (PartialEqAny, VersionNumber) { - (PartialEqAny::new(&self.0), VersionNumber(0)) - } - - fn to_key_any(&self) -> &dyn std::any::Any { - unimplemented!() - } - - fn hash(&self, mut state: &mut dyn Hasher) { - self.0.hash(&mut state) - } - - fn is_valid(&self) -> bool { - self.1.is_valid() - } - } - - #[async_trait] - impl Dependency for FakeDep { - async fn recompute( - &self, - _transaction_ctx: &Arc, - _: &ComputationData, - ) -> DiceResult<(Box, Arc)> { - Ok((Box::new(self.dupe()), self.1.dupe())) - } - - fn lookup_node( - &self, - _v: VersionNumber, - _mv: MinorVersion, - ) -> Option> { - Some(self.1.dupe()) - } - - fn dirty(&self, v: VersionNumber) { - self.1.0.write().hist.mark_invalidated(v); - } - - fn get_key_equality(&self) -> PartialEqAny { - PartialEqAny::new(&self.0) - } - - fn hash(&self, mut state: &mut dyn Hasher) { - self.0.hash(&mut state) - } - - fn introspect<'a>(&'a self) -> AnyKey { - AnyKey::new(self.0) - } - - fn to_key_any(&self) -> &dyn std::any::Any { - self - } - } - - let dep = FakeDep::new(1, CellHistory::testing_new(&[VersionNumber::new(1)], &[])); - - let entry = Arc::new( - OccupiedGraphNode::>::new( - 1337, - 1, - CellHistory::testing_new(&[VersionNumber::new(0)], &[VersionNumber::new(1)]), - ), - ); - entry.writable().deps.add_deps( - VersionNumber::new(0), - Arc::new(vec![Box::new(dep.dupe()) as _]), - ); - - // new version to trigger re-evaluation of dep - let eval_ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(1))); - assert!( - IncrementalEngine::>::compute_whether_versioned_dependencies_changed( - &1337, - &eval_ctx, - &ComputationData::testing_new(), - &VersionedGraphResultMismatch { - entry: GraphNode::occupied(entry), - verified_versions: VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(1) - )]) - } - ) - .await - .is_changed(), - ); - - // with an entry that has been verified at the previous dep calculated version - let entry = Arc::new(OccupiedGraphNode::new( - 1337, - 1, - CellHistory::verified(VersionNumber::new(1)), - )); - entry.writable().deps.add_deps( - VersionNumber::new(1), - Arc::new(vec![Box::new(dep.dupe()) as _]), - ); - // new version to trigger re-evaluation of dep - let eval_ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(2))); - assert!( - !IncrementalEngine::>::compute_whether_versioned_dependencies_changed( - &1337, - &eval_ctx, - &ComputationData::testing_new(), - &VersionedGraphResultMismatch { - entry: GraphNode::occupied(entry), - verified_versions: VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(2) - )]), - } - - ) - .await - .is_changed(), - ); - - // Now we also check that when deps have cycles, we ignore it since its possible the cycle - // is no longer valid - #[derive(Clone, Dupe, Debug, Display, Allocative, PartialEq, Hash)] - #[display(fmt = "FakeCycleDep")] - struct FakeCycleDep; - - #[async_trait] - impl Dependency for FakeCycleDep { - async fn recompute( - &self, - _transaction_ctx: &Arc, - _: &ComputationData, - ) -> DiceResult<(Box, Arc)> { - Err(DiceError::cycle(Arc::new(2123), indexset![])) - } - - fn lookup_node( - &self, - _v: VersionNumber, - _mv: MinorVersion, - ) -> Option> { - None - } - - fn dirty(&self, _v: VersionNumber) {} - - fn get_key_equality(&self) -> PartialEqAny { - PartialEqAny::new(self) - } - - fn hash(&self, mut state: &mut dyn Hasher) { - Hash::hash(self, &mut state) - } - - fn introspect<'a>(&'a self) -> AnyKey { - AnyKey::new(2123) - } - - fn to_key_any(&self) -> &dyn std::any::Any { - self - } - } - - let dep = FakeCycleDep; - - // with an entry that has been verified at the previous dep calculated version - let entry = Arc::new(OccupiedGraphNode::new( - 1338, - 1, - CellHistory::verified(VersionNumber::new(1)), - )); - entry.writable().deps.add_deps( - VersionNumber::new(1), - Arc::new(vec![Box::new(dep.dupe()) as _]), - ); - // new version to trigger re-evaluation of dep - let eval_ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(2))); - assert!( - IncrementalEngine::>::compute_whether_versioned_dependencies_changed( - &1338, - &eval_ctx, - &ComputationData::testing_new(), - &VersionedGraphResultMismatch { - entry: GraphNode::occupied(entry), - verified_versions: VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(2) - )]), - } - - ) - .await - .is_changed(), - ); - - Ok(()) - } - - #[tokio::test] - async fn test_values_gets_reevaluated_when_deps_change() -> anyhow::Result<()> { - let is_ran = Arc::new(AtomicBool::new(false)); - let eval_result = Arc::new(AtomicUsize::new(0)); - let dep: Arc>>>> = - Arc::new(Default::default()); - - let evaluator = { - let is_ran = is_ran.dupe(); - let dep = dep.dupe(); - let eval_result = eval_result.dupe(); - move |k| { - async move { - if k == 10 && !is_ran.load(Ordering::SeqCst) { - is_ran.store(true, Ordering::SeqCst); - let value = eval_result.load(Ordering::SeqCst); - let both_deps = BothDeps { - deps: HashSet::from_iter([ - Box::new(dep.lock().take().unwrap()) as Box - ]), - rdeps: Vec::new(), - }; - return (value, both_deps); - } - panic!("never called. should be cached not evaluated") - } - .boxed() - } - }; - - let engine = - IncrementalEngine::>::new(EvaluatorFn::new(|k, _| { - evaluator(k) - })); - *dep.lock() = Some(ComputedDep::testing_new( - Arc::downgrade(&engine.dupe()), - VersionNumber::new(0), - Arc::new(OccupiedGraphNode::testing_new( - 1, - 1, - CellHistory::verified(VersionNumber::new(0)), - VersionedDependencies::new(), - VersionedRevDependencies::new(), - )), - )); - - eval_result.store(10, Ordering::SeqCst); - assert!(engine.update_injected_value(1, VersionNumber::new(1), 100)); - *dep.lock() = Some(ComputedDep::testing_new( - Arc::downgrade(&engine.dupe()), - VersionNumber::new(1), - Arc::new(OccupiedGraphNode::testing_new( - 1, - 100, - CellHistory::verified(VersionNumber::new(1)), - VersionedDependencies::new(), - VersionedRevDependencies::new(), - )), - )); - - let entry = { - let eval_ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(1))); - let node = engine - .eval_entry_versioned(&10, &eval_ctx, ComputationData::testing_new()) - .await; - engine - .versioned_cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(1), &1), - MinorVersion::testing_new(0), - ) - .assert_match() - .read_meta() - .rdeps - .add_rdep( - Arc::downgrade(&node.dupe().into_dyn()), - VersionNumber::new(1), - ); - - assert!( - engine - .currently_running - .read() - .iter() - .all(|(_v, e)| e.is_empty()) - ); - - node - }; - - assert_eq!(is_ran.load(Ordering::SeqCst), true); - assert_eq!(*entry.val(), 10); - assert_eq!( - entry.read_meta().deps.deps(), - Some(Arc::new(vec![DependencyExt::< - EvaluatorUnreachable, - >::testing_raw(1)])) - ); - assert_eq!( - entry.read_meta().hist.get_verified(), - vec![VersionNumber::new(1)] - ); - - // now force the dependency to have version numbers [1, 2] - assert!(!engine.update_injected_value(1, VersionNumber::new(2), 100)); - // also force dirty the root node so we actually check its deps since the above would - // short circuit dirtying due to the dep value actually being equal. - engine.dirty(10, VersionNumber::new(2), false); - is_ran.store(false, Ordering::SeqCst); - *dep.lock() = Some(ComputedDep::testing_new( - Arc::downgrade(&engine.dupe()), - VersionNumber::new(2), - Arc::new(OccupiedGraphNode::testing_new( - 1, - 100, - CellHistory::testing_new(&[VersionNumber::new(1), VersionNumber::new(2)], &[]), - VersionedDependencies::new(), - VersionedRevDependencies::new(), - )), - )); - - let eval_ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(2))); - let entry = engine - .eval_entry_versioned(&10, &eval_ctx, ComputationData::testing_new()) - .await; - assert_eq!(is_ran.load(Ordering::SeqCst), false); - assert_eq!(*entry.val(), 10); - assert_eq!( - entry.read_meta().deps.deps(), - Some(Arc::new(vec![DependencyExt::< - EvaluatorUnreachable, - >::testing_raw(1)])) - ); - assert_eq!( - entry.read_meta().hist.get_verified_ranges(), - VersionRanges::testing_new(sorted_vector_set![VersionRange::begins_with( - VersionNumber::new(1), - )]) - ); - assert!( - engine - .currently_running - .read() - .iter() - .all(|(_v, e)| e.is_empty()) - ); - - // now force the dependency to be different and have versions [3] - assert!(engine.update_injected_value(1, VersionNumber::new(3), 200)); - eval_result.store(20, Ordering::SeqCst); - *dep.lock() = Some(ComputedDep::testing_new( - Arc::downgrade(&engine.dupe()), - VersionNumber::new(3), - Arc::new(OccupiedGraphNode::testing_new( - 1, - 200, - CellHistory::testing_new(&[VersionNumber::new(3)], &[VersionNumber::new(5)]), - VersionedDependencies::new(), - VersionedRevDependencies::new(), - )), - )); - - let entry = { - let eval_ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(3))); - let node = engine - .eval_entry_versioned(&10, &eval_ctx, ComputationData::testing_new()) - .await; - engine - .versioned_cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(3), &1), - MinorVersion::testing_new(0), - ) - .assert_match() - .read_meta() - .rdeps - .add_rdep( - Arc::downgrade(&node.dupe().into_dyn()), - VersionNumber::new(3), - ); - - assert!( - engine - .currently_running - .read() - .iter() - .all(|(_v, e)| e.is_empty()) - ); - - node - }; - - assert_eq!(is_ran.load(Ordering::SeqCst), true); - assert_eq!(*entry.val(), 20); - assert_eq!( - entry.read_meta().deps.deps(), - Some(Arc::new(vec![DependencyExt::< - EvaluatorUnreachable, - >::testing_raw(1)])) - ); - assert_eq!( - entry.read_meta().hist.get_verified_ranges(), - VersionRanges::testing_new(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(3), - VersionNumber::new(5) - )]) - ); - - Ok(()) - } - - #[tokio::test] - async fn dirty_invalidates_rdeps_across_engines() -> anyhow::Result<()> { - let vt = VersionTracker::new(Box::new(|_| {})); - let ctx = Arc::new(TransactionCtx::new( - VersionGuard::testing_new( - vt.dupe(), - VersionNumber::new(0), - MinorVersionGuard::testing_new(0), - ), - VersionForWrites::testing_new(VersionNumber::new(1)), - Changes::new(), - ActiveTransactionCountGuard::testing_new(), - )); - - let engine0 = IncrementalEngine::new(EvaluatorFn::new(|k, _| { - async move { (k, BothDeps::default()) }.boxed() - })); - let node0 = engine0 - .eval_entry_versioned(&0, &ctx, ComputationData::testing_new()) - .await; - - let engine1 = IncrementalEngine::new(EvaluatorFn::new(|k, _| { - async move { - ( - k, - BothDeps { - deps: HashSet::default(), - rdeps: vec![node0.into_dyn()], - }, - ) - } - .boxed() - })); - let node1 = engine1 - .eval_entry_versioned(&1, &ctx, ComputationData::testing_new()) - .await; - - let engine2 = IncrementalEngine::new(EvaluatorFn::new(|k, _| { - async move { - ( - k, - BothDeps { - deps: HashSet::default(), - rdeps: vec![node1.into_dyn()], - }, - ) - } - .boxed() - })); - let node2 = engine2 - .eval_entry_versioned(&2, &ctx, ComputationData::testing_new()) - .await; - - let engine3 = IncrementalEngine::new(EvaluatorFn::new(|k, _| { - async move { - ( - k, - BothDeps { - deps: HashSet::default(), - rdeps: vec![node2.into_dyn()], - }, - ) - } - .boxed() - })); - let _node3 = engine3 - .eval_entry_versioned(&3, &ctx, ComputationData::testing_new()) - .await; - engine0.dirty(0, VersionNumber::new(2), false); - - engine0 - .versioned_cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(2), &0), - MinorVersion::testing_new(0), - ) - .assert_mismatch(); - engine1 - .versioned_cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(2), &1), - MinorVersion::testing_new(0), - ) - .assert_mismatch(); - engine2 - .versioned_cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(2), &2), - MinorVersion::testing_new(0), - ) - .assert_mismatch(); - engine3 - .versioned_cache - .get( - VersionedGraphKeyRef::new(VersionNumber::new(2), &3), - MinorVersion::testing_new(0), - ) - .assert_mismatch(); - - Ok(()) - } - - #[test] - fn dropping_future_cancels_execution() { - let n_thread = 10usize; - - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(n_thread + 1) - .max_blocking_threads(n_thread + 1) - .enable_time() - .build() - .unwrap(); - - // use barrier to ensure that n_threads in parallel are spawned by the engine, so that the - // main thread knows all requests have been spawned - let task_started = Arc::new(AsyncBarrier::new(n_thread + 1)); - // prevent the started computations from finishing until we drop them. Also use the async - // mutex here so the futures are at an await point and can be dropped - let guard = Arc::new(AsyncMutex::new(())); - // prevent the main thread from checking the ran counter too early. This enforces either - // the computations have been dropped or have set the ran counter - let check_guard = Arc::new(AsyncRwLock::new(())); - // counts how many of the tasks we spawn actually got past the guard above - let ran_counter = Arc::new(AtomicBool::new(false)); - - rt.block_on(async move { - let g = guard.dupe(); - let guard_locked = guard.lock().await; - let s = task_started.dupe(); - let cg = check_guard.dupe(); - let c = ran_counter.dupe(); - - let engine = IncrementalEngine::new(EvaluatorFn::new(|_k, _| { - async move { - // spawned tasks that can only proceed if all are - // concurrently running - let _c_guard = cg.read().await; - s.wait().await; - let _g_guard = g.lock().await; - - // DICE only can guarantee cancels at await points, so add an await point to - // ensure that the task has been canceled before hitting the code below. - tokio::task::yield_now().await; - - c.store(true, Ordering::SeqCst); - - ( - 1usize, - BothDeps { - deps: HashSet::default(), - rdeps: Vec::new(), - }, - ) - } - .boxed() - })); - - let v = VersionNumber::new(0); - let futs = futures::future::join_all( - (0..n_thread) - .map(|i| { - let engine = engine.dupe(); - async move { - let ctx = Arc::new(TransactionCtx::testing_new(v)); - *(engine - .eval_entry_versioned(&i, &ctx, ComputationData::testing_new()) - .await - .val()) - } - }) - .collect::>(), - ); - - #[allow(clippy::mut_mut)] - { - // wait for all futures to start and get to the guard - futures::select! { - _ = futs.fuse() => { - // use this select to drive the computations - unreachable!("futures shouldn't ever finish") }, - _ = task_started.wait().fuse() => { - // continue to cancel the tasks - } - } - } - - // futs was consumed and dropped after the select above. - - // drop the guard - drop(guard_locked); - - let _c_guard = check_guard.write().await; - - // Verify that no futures made progress past an await point after being cancelled. - assert!(!ran_counter.load(Ordering::SeqCst)); - - // Currently running should get cleared out, but that happens asynchronously as the - // futures get dropped. - tokio::time::timeout(Duration::from_secs(1), async move { - loop { - let empty = engine - .currently_running - .read() - .iter() - .all(|(_v, e)| e.is_empty()); - - if empty { - break; - } - - tokio::task::yield_now().await; - } - }) - .await - .unwrap(); - - // Check again that no progress was made until shutdown. - assert!(!ran_counter.load(Ordering::SeqCst)); - }) - } - - #[tokio::test] - async fn mark_unchanged_propagates_dirty_from_deps() -> anyhow::Result<()> { - // This tests a very specific condition of resurrecting a value. - // Consider a node n2 that depends on n1 at version v0. - // We then dirty versions v1, v2, v3 at n1. We'd defer dirtying v2, v3 on n2 due - // to the fact that it's possible that at v2 and v3, n2 no longer depends on n1 - // and we rely on deferred propagation of dirtiness. However, if at v2, we recompute - // and find that the values are equal to that at v0, then we can resurrect v0's n2. - // It is important to verify that we deferred propagate the dirty at v3. - use crate::legacy::incremental::dep_trackers::testing::ComputedDep; - - #[derive(Debug, Default, Allocative)] - struct EvalEvenOdd; - - impl StorageProperties for EvalEvenOdd { - type Key = usize; - type Value = usize; - - fn key_type_name() -> &'static str { - "EvalEvenOdd" - } - - fn storage_type(&self) -> StorageType { - StorageType::LastN(1) - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(&self, _x: &Self::Value) -> bool { - true - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } - } - - #[async_trait] - impl IncrementalComputeProperties for EvalEvenOdd { - type DiceTask = WeakDiceFutureHandle; - - async fn recompute( - key: &Self::Key, - engine: &Arc>, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult> { - Ok(engine - .eval_entry_versioned(key, transaction_ctx, extra.subrequest::(key)?) - .await) - } - } - - #[async_trait] - impl Evaluator for EvalEvenOdd { - async fn eval( - &self, - _k: &usize, - transaction_ctx: Arc, - _cancellations: &CancellationContext, - extra: ComputationData, - ) -> EvaluationResult { - EvaluationResult { - value: transaction_ctx.get_version().to_string()[1..] - .parse::() - .unwrap() - % 2, - both_deps: BothDeps { - deps: HashSet::default(), - rdeps: Vec::new(), - }, - extra, - } - } - } - - // there's a situation where - let engine = IncrementalEngine::new(EvalEvenOdd); - - #[derive(Debug, Allocative)] - struct Eval(Arc>); - - impl StorageProperties for Eval { - type Key = usize; - type Value = usize; - - fn key_type_name() -> &'static str { - "Eval" - } - - fn storage_type(&self) -> StorageType { - StorageType::LastN(1) - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(&self, _x: &Self::Value) -> bool { - true - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } - } - - #[async_trait] - impl IncrementalComputeProperties for Eval { - type DiceTask = WeakDiceFutureHandle; - - async fn recompute( - _key: &Self::Key, - _engine: &Arc>, - _transaction_ctx: &Arc, - _extra: &ComputationData, - ) -> DiceResult> { - unimplemented!("not needed in test") - } - } - - #[async_trait] - impl Evaluator for Eval { - async fn eval( - &self, - _k: &usize, - transaction_ctx: Arc, - _cancellations: &CancellationContext, - extra: ComputationData, - ) -> EvaluationResult { - let sub_extra = extra.subrequest::(&1).unwrap(); - let node = self - .0 - .eval_entry_versioned(&1, &transaction_ctx, sub_extra) - .await; - EvaluationResult { - value: *node.val(), - both_deps: BothDeps { - deps: HashSet::from_iter([Box::new(ComputedDep { - engine: Arc::downgrade(&self.0), - version: transaction_ctx.get_version(), - node: node.dupe(), - }) - as Box]), - rdeps: vec![node.into_dyn()], - }, - extra, - } - } - } - let engine1 = IncrementalEngine::new(Eval(engine.dupe())); - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(0))); - assert_eq!( - engine1 - .eval_entry_versioned(&2, &ctx, ComputationData::testing_new()) - .await - .val(), - &0 - ); - - engine.dirty(1, VersionNumber::new(1), true); - engine.dirty(1, VersionNumber::new(2), true); - engine.dirty(1, VersionNumber::new(3), true); - - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(2))); - assert_eq!( - engine1 - .eval_entry_versioned(&2, &ctx, ComputationData::testing_new()) - .await - .val(), - &0 - ); - - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(3))); - assert_eq!( - engine1 - .eval_entry_versioned(&2, &ctx, ComputationData::testing_new()) - .await - .val(), - &1 - ); - - Ok(()) - } - - #[tokio::test] - async fn when_equal_return_same_instance() { - let instance = Arc::new(AtomicUsize::new(0)); - - #[derive(Clone, Dupe, Allocative)] - struct InstanceEqual { - instance_count: usize, - } - - impl PartialEq for InstanceEqual { - fn eq(&self, _other: &Self) -> bool { - true - } - } - - let engine = { - let instance_count = instance.dupe(); - IncrementalEngine::new(EvaluatorFn::new(move |_k, _| { - async move { - let value = InstanceEqual { - instance_count: instance_count.fetch_add(1, Ordering::SeqCst), - }; - - (value, BothDeps::default()) - } - .boxed() - })) - }; - - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(0))); - - let first_instance = engine - .eval_entry_versioned(&1, &ctx, ComputationData::testing_new()) - .await - .val() - .dupe(); - - engine.dirty(1, VersionNumber::new(1), false); - - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(1))); - let second_node = engine - .eval_entry_versioned(&1, &ctx, ComputationData::testing_new()) - .await; - - // verify that we incremented the total instance counter - assert_eq!(instance.load(Ordering::SeqCst), 2); - - assert_eq!( - second_node.get_history().get_verified_ranges(), - VersionRanges::testing_new( - sorted_vector_set! { VersionRange::begins_with(VersionNumber::new(0))} - ) - ); - - // verify that the instance we return and store is the same as the original instance - assert_eq!( - first_instance.instance_count, - second_node.val().instance_count - ); - } - - #[tokio::test] - async fn test_async_cancellation() { - // Does a double-duty of keeping track of how many executions we did + whether they happen - // concurrently. - let exclusive = Arc::new(Mutex::new(false)); - let notify = Arc::new(Notify::new()); - - let engine = IncrementalEngine::new({ - let notify = notify.dupe(); - EvaluatorFn::new(move |_k, cancellations| { - async move { - let mut guard = exclusive - .try_lock() - .expect("Can only have one concurrent execution"); - - if *guard { - // Last attempt, return. - ((), Default::default()) - } else { - // Note that we did our first execution. Keep the lock held. The point of the - // test is to prove that nobody will get to run before we exit and drop it. - *guard = true; - - cancellations - .with_structured_cancellation(|obs| async move { - // Resume the rest of the code. - notify.notify_one(); - // Wait for our cancellation. - obs.await; - // Yield. If the final evaluation is ready (that would be a bug!), it will - // run now. - tokio::task::yield_now().await; - }) - .await; - - // Never return, but this bit will be the one that's cancelled. - futures::future::pending().await - } - } - .boxed() - }) - }); - - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(0))); - - // Start & cancel once we enter the structured cancellation section. - let fut = engine.eval_entry_versioned(&1, &ctx, ComputationData::testing_new()); - notify.notified().await; - drop(fut); - - // Spawn a future and immediately cancel it. That future will not actually run, because we - // are on a single-threaded runtime. However, it will take the place of the cancelled task - // in the currently_running map. - let fut = engine.eval_entry_versioned(&1, &ctx, ComputationData::testing_new()); - drop(fut); - - // This time, wait until execution finishes. The mutex within the evaluation proves that we - // don't execute concurrently. - tokio::time::timeout( - Duration::from_secs(1), - engine.eval_entry_versioned(&1, &ctx, ComputationData::testing_new()), - ) - .await - .unwrap(); - } - - #[tokio::test] - async fn test_cancelled_tasks_do_not_write_to_cache() { - let entered_critical_section = Arc::new(Notify::new()); - let external_guard_dropped = Arc::new(Notify::new()); - - let first_call = Arc::new(Mutex::new(true)); - - let engine = IncrementalEngine::new({ - let entered_critical_section = entered_critical_section.dupe(); - let external_guard_dropped = external_guard_dropped.dupe(); - - EvaluatorFn::new(move |_k, cancellations| { - let was_first_call; - - { - let mut first_call = first_call.lock(); - was_first_call = *first_call; - *first_call = false; - } - - async move { - if was_first_call { - cancellations - .critical_section(|| async move { - entered_critical_section.notify_one(); - tokio::time::timeout( - Duration::from_secs(1), - external_guard_dropped.notified(), - ) - .await - .unwrap(); - }) - .await; - } - - (was_first_call, BothDeps::default()) - } - .boxed() - }) - }); - - let ctx = Arc::new(TransactionCtx::testing_new(VersionNumber::new(0))); - - // Start & cancel once we enter the critical section. - let fut = engine.eval_entry_versioned(&1, &ctx, ComputationData::testing_new()); - tokio::time::timeout(Duration::from_secs(1), entered_critical_section.notified()) - .await - .unwrap(); - drop(fut); - - // Notify the future that we've now dropped our guard. The future is in a critical section - // so it does own a guard. - external_guard_dropped.notify_one(); - - // Now, give the future we spawned a chance to run. - tokio::task::yield_now().await; - - let val = tokio::time::timeout( - Duration::from_secs(1), - engine.eval_entry_versioned(&1, &ctx, ComputationData::testing_new()), - ) - .await - .unwrap(); - - // Expect to get the output of the second call, since the first one was not allowed to - // populate the cache. - assert!(!val.val(), "got the value from the first call"); - } -} diff --git a/dice/dice/src/legacy/incremental/transaction_ctx.rs b/dice/dice/src/legacy/incremental/transaction_ctx.rs deleted file mode 100644 index cf50349a94494..0000000000000 --- a/dice/dice/src/legacy/incremental/transaction_ctx.rs +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; -use std::sync::Weak; - -use allocative::Allocative; -use anymap::any::Any; -use anymap::Map; -use parking_lot::Mutex; -use parking_lot::MutexGuard; - -use crate::api::error::DiceError; -use crate::api::error::DiceResult; -use crate::api::key::Key; -use crate::legacy::incremental::versions::MinorVersion; -use crate::legacy::incremental::versions::VersionForWrites; -use crate::legacy::incremental::versions::VersionGuard; -use crate::legacy::DiceLegacy; -use crate::versions::VersionNumber; -use crate::HashSet; - -/// Increment/decrement the number of active transactions. -#[derive(Allocative)] -pub(crate) struct ActiveTransactionCountGuard { - dice: Weak, -} - -impl ActiveTransactionCountGuard { - pub(crate) fn new(dice: &Arc) -> ActiveTransactionCountGuard { - dice.active_transaction_count - .fetch_add(1, std::sync::atomic::Ordering::SeqCst); - ActiveTransactionCountGuard { - dice: Arc::downgrade(dice), - } - } - - #[cfg(test)] - pub(crate) fn testing_new() -> ActiveTransactionCountGuard { - ActiveTransactionCountGuard { dice: Weak::new() } - } -} - -impl Drop for ActiveTransactionCountGuard { - fn drop(&mut self) { - if let Some(dice) = self.dice.upgrade() { - dice.active_transaction_count - .fetch_sub(1, std::sync::atomic::Ordering::SeqCst); - } - } -} - -/// A context for evaluating in the Engine. -/// The context is valid for computing the entire subgraph of a particular key -/// and contains all the dependency and version tracking information. -/// -/// TODO express validity with lifetimes -#[derive(Allocative)] -pub(crate) struct TransactionCtx { - version_guard: VersionGuard, - version_for_writes: VersionForWrites, - changes: Mutex, - _active_transaction_count_guard: ActiveTransactionCountGuard, -} - -impl TransactionCtx { - pub(crate) fn new( - version_guard: VersionGuard, - version_for_writes: VersionForWrites, - changes: Changes, - active_transaction_count_guard: ActiveTransactionCountGuard, - ) -> Self { - Self { - version_guard, - version_for_writes, - changes: Mutex::new(changes), - _active_transaction_count_guard: active_transaction_count_guard, - } - } - - pub(crate) fn get_version(&self) -> VersionNumber { - self.version_guard.version - } - - pub(crate) fn get_minor_version(&self) -> MinorVersion { - *self.version_guard.minor_version_guard - } - - pub(crate) fn get_version_for_writes(&self) -> VersionNumber { - self.version_for_writes.get() - } - - pub(crate) fn changes(&self) -> MutexGuard<'_, Changes> { - self.changes.lock() - } - - #[cfg(test)] - pub(crate) fn testing_new(v: VersionNumber) -> Self { - Self { - version_guard: VersionGuard::testing_new( - crate::legacy::incremental::versions::VersionTracker::new(Box::new(|_| {})), - v, - crate::legacy::incremental::versions::MinorVersionGuard::testing_new(0), - ), - version_for_writes: VersionForWrites::testing_new(v), - changes: Mutex::new(Changes::new()), - _active_transaction_count_guard: ActiveTransactionCountGuard::testing_new(), - } - } - - pub(crate) fn commit(self) -> VersionGuard { - let is_changed = { - let mut changed = self.changes(); - let version_for_writes = self.get_version_for_writes(); - let num_changes = changed.ops().len(); - debug!( - old_version = ?self.version_guard.version, - version_for_writes = ?version_for_writes, - msg = "committing new changes", - num_changes = num_changes - ); - - changed.ops().drain(..).fold(false, |has_change, change| { - change(version_for_writes) || has_change - }) - }; - - if is_changed { - debug!( - old_version = %self.version_guard.version, - version_for_writes = %self.get_version_for_writes(), - msg = "committed new changes", - ); - } else { - debug!(version = %self.version_guard.version, msg = "no changes to commit"); - self.version_for_writes.rollback() - } - - self.version_guard - } -} - -#[derive(Allocative)] -pub(crate) struct Changes { - #[allocative(skip)] // TODO(nga): measure. - keys: Map, - #[allocative(skip)] // TODO(nga): measure. - changes: Vec bool + Send>>, -} - -impl Changes { - pub(crate) fn new() -> Self { - Self { - keys: Map::new(), - changes: vec![], - } - } - - pub(crate) fn change( - &mut self, - key: K, - change: Box bool + Send>, - ) -> DiceResult<()> { - let map = self - .keys - .entry::>() - .or_insert_with(HashSet::default); - if !map.insert(key.clone()) { - Err(DiceError::duplicate(Arc::new(key))) - } else { - self.changes.push(change); - Ok(()) - } - } - - pub fn ops(&mut self) -> &mut Vec bool + Send>> { - &mut self.changes - } -} diff --git a/dice/dice/src/legacy/incremental/versions.rs b/dice/dice/src/legacy/incremental/versions.rs deleted file mode 100644 index ebc7b23fc467f..0000000000000 --- a/dice/dice/src/legacy/incremental/versions.rs +++ /dev/null @@ -1,638 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::cell::UnsafeCell; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::fmt; -use std::fmt::Debug; -use std::fmt::Formatter; -use std::ops::Deref; -use std::sync::Arc; -use std::sync::OnceLock; -use std::sync::Weak; - -use allocative::Allocative; -use derive_more::Display; -use dupe::Dupe; -use parking_lot::lock_api::RawMutex as RawMutexApi; -use parking_lot::Mutex; -use parking_lot::RawMutex; -use parking_lot::RwLock; - -use crate::versions::VersionNumber; - -/// The minor version associated with the major `VersionNumber`. The minor version an identifier to -/// all currently active computations of a particular `VersionNumber`. -/// So, for all computations currently active at a particular `VersionNumber`, they all share the -/// same `MinorVersion`. Furthermore, each time a computation occurs at a `VersionNumber`, if there -/// are no currently active computations at that `VersionNumber`, the `MinorVersion` is increased. -#[derive(Copy, Eq, Debug, Display, Dupe)] -// split this due to formatters not agreeing -#[derive(PartialEq, Hash, Clone, Ord, PartialOrd, Allocative)] -#[display(fmt = "m{}", "_0")] -pub(crate) struct MinorVersion(usize); - -impl MinorVersion { - const ZERO: MinorVersion = MinorVersion(0); - - pub(crate) fn next(&self) -> MinorVersion { - MinorVersion(self.0 + 1) - } -} - -#[cfg(test)] -impl MinorVersion { - pub(crate) fn testing_new(num: usize) -> Self { - MinorVersion(num) - } -} - -// A bit of a weird type to put the MinorVersion in Arc, but we do it to -// have weak guards. -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_arc_on_dupe))] -#[derive(Allocative)] -pub(crate) struct MinorVersionGuard(Arc); -#[derive(Allocative)] -pub(crate) struct MinorVersionWeak(Weak); - -impl MinorVersionGuard { - #[cfg(test)] - pub(crate) fn testing_new(m_v: usize) -> Self { - Self(Arc::new(MinorVersion(m_v))) - } - - pub(crate) fn downgrade(&self) -> MinorVersionWeak { - MinorVersionWeak(Arc::downgrade(&self.0)) - } -} - -impl Deref for MinorVersionGuard { - type Target = MinorVersion; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl MinorVersionWeak { - pub(crate) fn new() -> MinorVersionWeak { - MinorVersionWeak(Weak::new()) - } - - pub(crate) fn upgrade(&self) -> Option { - self.0.upgrade().map(MinorVersionGuard) - } -} - -/// The version to write to if updating the computations. This version is only -/// queried from the 'VersionTracker' when the underlying version is requested, -/// and only committed when this value is dropped. -#[derive(Allocative)] -pub(crate) struct VersionForWrites { - v: OnceLock, - version_tracker: Arc, -} - -impl VersionForWrites { - fn new(version_tracker: Arc) -> Self { - Self { - v: OnceLock::new(), - version_tracker, - } - } - - /// actually gets the version number to use for updates. - pub(crate) fn get(&self) -> VersionNumber { - let v = self.v.get_or_init(|| self.version_tracker.next()); - v.v - } - - /// records no writes, so undo the write version increase - pub(crate) fn rollback(mut self) { - if let Some(guard) = self.v.take() { - self.version_tracker.prev(guard); - } - } - - #[cfg(test)] - pub(crate) fn testing_new(v: VersionNumber) -> Self { - let lock = Arc::new(RawMutex::INIT); - lock.lock(); - - Self { - v: OnceLock::from(VersionWriteGuard { lock, v }), - version_tracker: VersionTracker::new(Box::new(|_| {})), - } - } -} - -#[derive(Allocative)] -struct VersionWriteGuard { - lock: Arc, - v: VersionNumber, -} - -impl Drop for VersionWriteGuard { - fn drop(&mut self) { - unsafe { self.lock.unlock() }; - } -} - -unsafe impl Send for VersionWriteGuard {} -unsafe impl Sync for VersionWriteGuard {} - -impl Drop for VersionForWrites { - fn drop(&mut self) { - self.v - .get() - .iter() - .for_each(|v| self.version_tracker.update(&v.v)) - } -} - -/// Tracks the currently in-flight versions for updates and reads to ensure -/// values are up to date. -#[derive(Allocative)] -pub(crate) struct VersionTracker { - /// Ran when versions update. If the version number is present, that was a version that was - /// just deleted. - #[allocative(skip)] - on_update: Box)>, - current: RwLock, - /// Tracks the currently active versions and how many contexts are holding each of them. - active_versions: Mutex>, - /// use a RawMutex here so that we can lock and unlock using our custom `VersionWriteGuard` - /// that is Send and Sync, so that the write version can be sent across multiple threads for - /// Dice updates, while guaranteeing that the lock is held so that updates to dice are mutually - /// exclusive. - write_lock: Arc, - /// locked by write_lock above - #[allocative(skip)] - write_version: UnsafeCell, -} - -enum VersionTrackerUpdate { - Added(VersionNumber), - Deleted(VersionNumber), -} - -pub(crate) struct VersionTrackerUpdateNotification<'a> { - update: VersionTrackerUpdate, - active_versions: &'a HashMap, -} - -impl fmt::Debug for VersionTrackerUpdateNotification<'_> { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { - write!(f, "VersionTrackerUpdateNotification {{ ")?; - match self.update { - VersionTrackerUpdate::Added(v) => write!(f, "added = {}, ", v)?, - VersionTrackerUpdate::Deleted(v) => write!(f, "deleted = {}, ", v)?, - }; - write!(f, "active = {:?}, ", self.active_versions)?; - write!(f, "}}")?; - Ok(()) - } -} - -impl VersionTrackerUpdateNotification<'_> { - /// If any version was just deleted, which version that is. - pub(crate) fn deleted_version(&self) -> Option { - match self.update { - VersionTrackerUpdate::Added(..) => None, - VersionTrackerUpdate::Deleted(v) => Some(v), - } - } - - /// The number of active versions - pub(crate) fn active_version_count(&self) -> usize { - self.active_versions.len() - } -} - -unsafe impl Send for VersionTracker {} -unsafe impl Sync for VersionTracker {} - -/// Provides the current version for reading, tracking the number of active references -#[derive(Allocative)] -pub(crate) struct VersionGuard { - tracker: Arc, - pub(crate) version: VersionNumber, - pub(crate) minor_version_guard: MinorVersionGuard, -} - -impl VersionGuard { - #[cfg(test)] - pub(crate) fn testing_new( - tracker: Arc, - version: VersionNumber, - minor_version_guard: MinorVersionGuard, - ) -> Self { - *tracker.active_versions.lock().entry(version).or_default() += 1; - - Self { - tracker, - version, - minor_version_guard, - } - } -} - -impl Drop for VersionGuard { - fn drop(&mut self) { - let mut active_versions = self.tracker.active_versions.lock(); - let entry = active_versions.entry(self.version); - - let cleanup = match entry { - Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if *entry.get() == 0 { - entry.remove(); - true - } else { - false - } - } - Entry::Vacant(_) => { - unreachable!("this version is active so it should exist in the map"); - } - }; - - if cleanup { - (self.tracker.on_update)(VersionTrackerUpdateNotification { - update: VersionTrackerUpdate::Deleted(self.version), - active_versions: &active_versions, - }); - } - } -} - -#[derive(Allocative)] -struct VersionToMinor { - version: VersionNumber, - /// index of the vec is the version, which index into the corresponding minor version tracker - minor_version_tracker: Vec, -} - -impl VersionTracker { - pub(crate) fn new(on_update: Box)>) -> Arc { - Arc::new(VersionTracker { - on_update, - current: RwLock::new(VersionToMinor { - version: VersionNumber::ZERO, - minor_version_tracker: vec![MinorVersionTracker::new()], - }), - active_versions: Mutex::new(HashMap::new()), - write_lock: Arc::new(RawMutex::INIT), - write_version: UnsafeCell::new(VersionNumber::ZERO), - }) - } - - /// request an increase in the global version number. This returns a - /// 'VersionNumber' that holds the next available version number. Note - /// that the new version isn't committed to be the new global current - /// version until the 'VersionNumber' is dropped. - fn next(&self) -> VersionWriteGuard { - self.write_lock.lock(); - let v = unsafe { &mut *self.write_version.get() }; - v.inc(); - - VersionWriteGuard { - lock: self.write_lock.dupe(), - v: *v, - } - } - - /// request a decrease in the global version number. This will make the next available - /// version number one lower. - fn prev(&self, _guard: VersionWriteGuard) { - // lock is held by the guard - let v = unsafe { &mut *self.write_version.get() }; - debug_assert!(&_guard.v == v); - v.dec(); - } - - /// hands out the current "latest" committed version and its corresponding - /// minor version. The "latest" version is the most recent version number that was given - /// via `next`, not the most recently committed version. - /// - /// the minor version is updated such that it is incremented per major version, only when - /// there are no active owners of the minor version. - pub(crate) fn current(self: &Arc) -> VersionGuard { - let cur = self.current.read(); - let v = cur.version; - let m = cur.minor_version_tracker[v.0].acquire(); - - let mut active_versions = self.active_versions.lock(); - *active_versions.entry(v).or_default() += 1; - - (self.on_update)(VersionTrackerUpdateNotification { - update: VersionTrackerUpdate::Added(v), - active_versions: &active_versions, - }); - - VersionGuard { - tracker: self.dupe(), - version: v, - minor_version_guard: m, - } - } - - /// Requests the 'WriteVersion' that is intended to be used for updates to - /// the incremental computations - pub(crate) fn write(self: &Arc) -> VersionForWrites { - VersionForWrites::new(self.dupe()) - } - - /// updates the current version to the latest of the currently stored - /// version and the given - fn update(&self, v: &VersionNumber) { - let mut cur = self.current.write(); - - if cur.version < *v { - cur.minor_version_tracker - .resize_with(v.0 + 1, MinorVersionTracker::new); - cur.version = *v; - } - } -} - -#[derive(Allocative)] -struct MinorVersionTracker { - m_v: RwLock<(MinorVersionWeak, MinorVersion)>, -} - -impl MinorVersionTracker { - fn new() -> Self { - Self { - m_v: RwLock::new((MinorVersionWeak::new(), MinorVersion::ZERO)), - } - } - - fn acquire(&self) -> MinorVersionGuard { - if let Some(m_v) = self.m_v.read().0.upgrade() { - return m_v; - } - let mut v = self.m_v.write(); - if let Some(m_v) = v.0.upgrade() { - m_v - } else { - let new_mv = MinorVersionGuard(Arc::new(v.1)); - let new_v = v.1.next(); - *v = (new_mv.downgrade(), new_v); - new_mv - } - } -} - -#[cfg(test)] -mod tests { - use std::mem; - use std::sync::atomic::AtomicBool; - use std::sync::atomic::Ordering; - use std::sync::Arc; - use std::sync::Barrier; - use std::sync::Mutex; - use std::thread; - use std::time::Duration; - - use dupe::Dupe; - - use crate::legacy::incremental::versions::MinorVersion; - use crate::legacy::incremental::versions::MinorVersionTracker; - use crate::legacy::incremental::versions::VersionNumber; - use crate::legacy::incremental::versions::VersionTracker; - - #[test] - fn simple_version_increases() { - let vt = VersionTracker::new(Box::new(|_| {})); - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(0), MinorVersion::testing_new(0)) - ); - - vt.update(&vt.next().v); - - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(1), MinorVersion::testing_new(0)) - ); - } - - #[test] - fn active_versions_are_tracked() { - let cleaned = Arc::new(Mutex::new(None)); - - let vt = VersionTracker::new({ - let c = cleaned.dupe(); - Box::new(move |v| { - *c.lock().unwrap() = v.deleted_version(); - }) - }); - let vg1 = vt.current(); - assert_eq!(vg1.version, VersionNumber::new(0)); - assert_eq!(*vt.active_versions.lock().get(&vg1.version).unwrap(), 1); - - let vg2 = vt.current(); - assert_eq!(vg2.version, VersionNumber::new(0)); - assert_eq!(*vt.active_versions.lock().get(&vg2.version).unwrap(), 2); - - drop(vg2); - - assert!(cleaned.lock().unwrap().is_none()); - assert_eq!(*vt.active_versions.lock().get(&vg1.version).unwrap(), 1); - - { - let w = vt.write(); - w.get(); - } - - let vg3 = vt.current(); - assert_eq!(vg3.version, VersionNumber::new(1)); - assert_eq!(*vt.active_versions.lock().get(&vg3.version).unwrap(), 1); - - assert_eq!(*vt.active_versions.lock().get(&vg1.version).unwrap(), 1); - - drop(vg3); - - assert!( - vt.active_versions - .lock() - .get(&VersionNumber::new(1)) - .is_none() - ); - assert_eq!(*cleaned.lock().unwrap(), Some(VersionNumber::new(1))); - - assert_eq!(*vt.active_versions.lock().get(&vg1.version).unwrap(), 1); - - drop(vg1); - - assert_eq!(*cleaned.lock().unwrap(), Some(VersionNumber::new(0))); - assert!( - vt.active_versions - .lock() - .get(&VersionNumber::new(0)) - .is_none() - ); - } - - #[test] - fn write_version_commits_on_drop() { - let vt = VersionTracker::new(Box::new(|_| {})); - { - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(0), MinorVersion::testing_new(0)) - ); - } - - { - let v1 = vt.write(); - assert_eq!(v1.get(), VersionNumber::new(1)); - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(0), MinorVersion::testing_new(1)) - ); - - std::mem::drop(vg); - - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(0), MinorVersion::testing_new(2)) - ); - - std::mem::drop(v1); - - let v2 = vt.write(); - assert_eq!(v2.get(), VersionNumber::new(2)); - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(1), MinorVersion::testing_new(0)) - ); - std::mem::drop(v2); - - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(2), MinorVersion::testing_new(0)) - ); - } - { - let vg = vt.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(2), MinorVersion::testing_new(1)) - ); - } - } - - #[test] - fn write_version_is_lazy() { - let vt = VersionTracker::new(Box::new(|_| {})); - - let write1 = vt.write(); - let write2 = vt.write(); - - assert_eq!(write1.v.get().is_some(), false); - assert_eq!(write2.v.get().is_some(), false); - - // getting write2 first gives it the lower number - assert_eq!(write2.get(), VersionNumber::new(1)); - mem::drop(write2); - - assert_eq!(write1.get(), VersionNumber::new(2)); - } - - #[test] - fn write_version_rollbacks() { - let vt = VersionTracker::new(Box::new(|_| {})); - - let write1 = vt.write(); - let write2 = vt.write(); - let write3 = vt.write(); - let write4 = vt.write(); - - assert!(write1.v.get().is_none()); - assert!(write2.v.get().is_none()); - assert!(write3.v.get().is_none()); - assert!(write4.v.get().is_none()); - - assert_eq!(write2.get(), VersionNumber::new(1)); - write2.rollback(); - - assert_eq!(write1.get(), VersionNumber::new(1)); - drop(write1); - - // never attempted to get a version can still rollback properly - write3.rollback(); - assert_eq!(write4.get(), VersionNumber::new(2)); - } - - #[test] - fn minor_version_updates_only_when_no_refs() { - let vt = MinorVersionTracker::new(); - - { - let m1 = vt.acquire(); - assert_eq!(*m1, MinorVersion::testing_new(0)); - - let m2 = vt.acquire(); - assert_eq!(*m2, MinorVersion::testing_new(0)); - - mem::drop(m1); - let m3 = vt.acquire(); - assert_eq!(*m3, MinorVersion::testing_new(0)); - } - - let m = vt.acquire(); - assert_eq!(*m, MinorVersion::testing_new(1)); - } - - #[test] - fn version_write_is_exclusive() { - let tracker = VersionTracker::new(Box::new(|_| {})); - let write_v = tracker.write(); - assert_eq!(write_v.get(), VersionNumber::new(1)); - - let barrier = Arc::new(Barrier::new(2)); - - let is_ran = Arc::new(AtomicBool::new(false)); - - let handle = thread::spawn({ - let tracker = tracker.dupe(); - let is_ran = is_ran.dupe(); - let barrier = barrier.dupe(); - move || { - barrier.wait(); - - let write_v = tracker.write(); - assert_eq!(write_v.get(), VersionNumber::new(2)); - - is_ran.store(true, Ordering::SeqCst); - } - }); - - barrier.wait(); - // sadly there's still a race that the spawned thread might not execute the call to `write()` - // so we just sleep here briefly and hope. - thread::sleep(Duration::from_secs(1)); - - assert_eq!(is_ran.load(Ordering::SeqCst), false); - - mem::drop(write_v); - - handle.join().unwrap(); - } -} diff --git a/dice/dice/src/legacy/key.rs b/dice/dice/src/legacy/key.rs deleted file mode 100644 index 24f8e3a04bf30..0000000000000 --- a/dice/dice/src/legacy/key.rs +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt::Debug; -use std::sync::Arc; -use std::sync::Weak; - -use allocative::Allocative; - -use crate::api::key::Key; -use crate::api::storage_type::StorageType; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::legacy::DiceLegacy; - -#[derive(Debug, Allocative)] -#[allocative(bound = "")] -pub(crate) struct StoragePropertiesForKey { - _k: std::marker::PhantomData, - pub(crate) dice: Weak, -} - -impl StoragePropertiesForKey { - pub(crate) fn new(dice: &Arc) -> StoragePropertiesForKey { - StoragePropertiesForKey { - _k: std::marker::PhantomData, - dice: Arc::downgrade(dice), - } - } -} - -impl StorageProperties for StoragePropertiesForKey -where - K: Key, -{ - type Key = K; - type Value = K::Value; - - fn storage_type(&self) -> StorageType { - K::storage_type() - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - K::equality(x, y) - } - - fn validity(&self, x: &Self::Value) -> bool { - K::validity(x) - } - - fn key_type_name() -> &'static str { - K::key_type_name() - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - key - } -} diff --git a/dice/dice/src/legacy/map.rs b/dice/dice/src/legacy/map.rs deleted file mode 100644 index e54100b481710..0000000000000 --- a/dice/dice/src/legacy/map.rs +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::Arc; - -use allocative::Allocative; -use anymap::any::Any; -use anymap::Map; -use dupe::Dupe; -use dupe::OptionDupedExt; - -use crate::legacy::incremental::ErasedEngine; -use crate::legacy::incremental::IncrementalComputeProperties; -use crate::legacy::incremental::IncrementalEngine; - -/// A dynamically typed Map for DICE to map computations to their key, value -/// cache maps. -#[derive(Allocative)] -pub(crate) struct DiceMap { - #[allocative(skip)] - typed: Map, - erased: Vec>, -} - -impl DiceMap { - pub(crate) fn new() -> Self { - Self { - typed: Map::new(), - erased: Vec::new(), - } - } - - pub(crate) fn find_cache_opt(&self) -> Option>> - where - S: IncrementalComputeProperties, - { - self.typed.get::>>().duped() - } - - /// finds the computation cache for the given computation type - pub(crate) fn find_cache( - &mut self, - new: impl FnOnce() -> Arc>, - ) -> Arc> - where - S: IncrementalComputeProperties, - { - if let Some(cache) = self.typed.get::>>() { - cache.dupe() - } else { - let cache = new(); - self.typed.insert::>>(cache.dupe()); - self.erased - .push(cache.dupe() as Arc); - cache - } - } - - pub(crate) fn engines(&self) -> &[Arc] { - self.erased.as_slice() - } - - pub(crate) fn key_count(&self) -> usize { - self.erased - .iter() - .map(|e| e.introspect().len_for_introspection()) - .sum() - } - - pub(crate) fn currently_running_key_count(&self) -> usize { - self.erased - .iter() - .map(|e| e.introspect().currently_running_key_count()) - .sum() - } -} - -#[cfg(test)] -mod tests { - use allocative::Allocative; - use async_trait::async_trait; - use derive_more::Display; - use dupe::Dupe; - use more_futures::cancellation::CancellationContext; - - use crate::api::computations::DiceComputations; - use crate::api::cycles::DetectCycles; - use crate::api::key::Key; - use crate::legacy::incremental::testing::IncrementalEngineExt; - use crate::legacy::incremental::versions::MinorVersion; - use crate::legacy::incremental::IncrementalEngine; - use crate::legacy::map::DiceMap; - use crate::legacy::DiceLegacy; - use crate::versions::VersionNumber; - use crate::StoragePropertiesForKey; - - #[tokio::test] - async fn test_find_caches() { - #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] - #[display(fmt = "{:?}", self)] - struct MyKey; - #[derive(Clone, Dupe, Display, Debug, Eq, PartialEq, Allocative)] - #[display(fmt = "{:?}", self)] - struct Bar; - - #[async_trait] - impl Key for MyKey { - type Value = Bar; - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - panic!("value should be cached, not evaluated") - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - } - - let mut map = DiceMap::new(); - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - let cache = map.find_cache(|| IncrementalEngine::new(StoragePropertiesForKey::new(&dice))); - { - cache.update_injected_value(MyKey, VersionNumber::new(0), Bar); - assert_eq!( - cache - .get_cached(MyKey, VersionNumber::new(0), MinorVersion::testing_new(0)) - .val(), - &Bar - ) - } - } -} diff --git a/dice/dice/src/legacy/mod.rs b/dice/dice/src/legacy/mod.rs deleted file mode 100644 index 2aa0960a69302..0000000000000 --- a/dice/dice/src/legacy/mod.rs +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt::Debug; -use std::sync::atomic::AtomicU32; -use std::sync::Arc; -use std::sync::Weak; - -use allocative::Allocative; -use async_trait::async_trait; -use dice_futures::future_handle::WeakDiceFutureHandle; -use dupe::Dupe; -use futures::future::Future; -use futures::StreamExt; -use gazebo::prelude::*; -use incremental::evaluator::Evaluator; -use incremental::graph::GraphNode; -use incremental::transaction_ctx::TransactionCtx; -use incremental::versions::VersionTracker; -use incremental::IncrementalComputeProperties; -use incremental::IncrementalEngine; -use key::StoragePropertiesForKey; -use map::DiceMap; -use more_futures::cancellation::CancellationContext; -use parking_lot::RwLock; -use projection::ProjectionKeyProperties; -use tokio::sync::watch; - -use crate::api::computations::DiceComputations; -use crate::api::cycles::DetectCycles; -use crate::api::data::DiceData; -use crate::api::error::DiceResult; -use crate::api::key::Key; -use crate::api::projection::ProjectionKey; -use crate::api::transaction::DiceTransactionUpdater; -use crate::api::user_data::UserComputationData; -use crate::ctx::DiceComputationsImpl; -use crate::legacy::ctx::ComputationData; -use crate::legacy::ctx::DiceComputationsImplLegacy; -use crate::legacy::incremental::dep_trackers::BothDeps; -use crate::metrics::Metrics; -use crate::transaction_update::DiceTransactionUpdaterImpl; - -pub(crate) mod ctx; -pub(crate) mod cycles; -pub(crate) mod dice_futures; -pub(crate) mod key; -pub(crate) mod map; -pub(crate) mod opaque; -pub(crate) mod projection; - -pub mod incremental; -#[cfg(test)] -mod tests; - -/// An incremental computation engine that executes arbitrary computations that -/// maps `Key`s to values. -#[derive(Allocative)] -pub(crate) struct DiceLegacy { - pub(crate) data: DiceData, - pub(crate) map: Arc>, - pub(crate) global_versions: Arc, - detect_cycles: DetectCycles, - /// Number of active transactions. - /// Or more precisely, the number of alive transaction context objects. - pub(crate) active_transaction_count: AtomicU32, - #[allocative(skip)] - active_versions_observer: watch::Receiver, -} - -impl Debug for DiceLegacy { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Dice") - .field("detect_cycles", &self.detect_cycles) - .finish_non_exhaustive() - } -} - -pub(crate) struct DiceLegacyDataBuilder(DiceData); - -impl DiceLegacyDataBuilder { - pub(crate) fn new() -> Self { - Self(DiceData::new()) - } - - pub fn set(&mut self, val: K) { - self.0.set(val); - } - - pub fn build(self, detect_cycles: DetectCycles) -> Arc { - DiceLegacy::new(self.0, detect_cycles) - } -} - -impl DiceLegacy { - #[cfg(test)] - pub(crate) fn builder() -> DiceLegacyDataBuilder { - DiceLegacyDataBuilder::new() - } - - pub(crate) fn new(data: DiceData, detect_cycles: DetectCycles) -> Arc { - let map = Arc::new(RwLock::new(DiceMap::new())); - let weak_map = Arc::downgrade(&map); - let (active_versions_sender, active_versions_observer) = watch::channel(0); - - Arc::new(DiceLegacy { - data, - map, - global_versions: VersionTracker::new(Box::new(move |update| { - tracing::debug!("VersionTracker update: {:?}", update); - - if let Some(deleted) = update.deleted_version() { - if let Some(engines) = weak_map.upgrade() { - engines - .read() - .engines() - .map(|engine| engine.gc_version(deleted)); - } - } - - // If the corresponding Dice has been dropped, then so be it, ignore the error. - active_versions_sender.send_replace(update.active_version_count()); - })), - detect_cycles, - active_transaction_count: AtomicU32::new(0), - active_versions_observer, - }) - } - - pub fn updater(self: &Arc) -> DiceTransactionUpdater { - self.updater_with_data(UserComputationData::new()) - } - - pub fn updater_with_data( - self: &Arc, - extra: UserComputationData, - ) -> DiceTransactionUpdater { - let ctx = self.make_ctx(ComputationData::new(extra, self.detect_cycles)); - DiceTransactionUpdater(DiceTransactionUpdaterImpl::Legacy(ctx)) - } - - pub(crate) fn make_ctx( - self: &Arc, - extra: ComputationData, - ) -> Arc { - Arc::new(DiceComputationsImplLegacy::new_transaction( - self.dupe(), - self.global_versions.current(), - self.global_versions.write(), - extra, - )) - } - - /// finds the computation index for the given key - pub(crate) fn find_cache( - self: &Arc, - ) -> Arc>> - where - K: Key, - { - if let Some(cache) = self - .map - .read() - .find_cache_opt::>() - { - return cache; - } - - self.map - .write() - .find_cache(|| IncrementalEngine::new(StoragePropertiesForKey::::new(self))) - } - - pub(crate) fn find_projection_cache( - self: &Arc, - ) -> Arc>> - where - P: ProjectionKey, - { - if let Some(cache) = self - .map - .read() - .find_cache_opt::>() - { - return cache; - } - - self.map - .write() - .find_cache(|| IncrementalEngine::new(ProjectionKeyProperties::

    ::new(self))) - } - - pub(crate) fn unstable_take(self: &Arc) -> DiceMap { - debug!(msg = "clearing all Dice state"); - let mut map = self.map.write(); - std::mem::replace(&mut map, DiceMap::new()) - } - - pub fn detect_cycles(&self) -> &DetectCycles { - &self.detect_cycles - } - - pub fn metrics(&self) -> Metrics { - let dice_map = self.map.read(); - Metrics { - key_count: dice_map.key_count(), - currently_active_key_count: dice_map.currently_running_key_count(), - active_transaction_count: self - .active_transaction_count - .load(std::sync::atomic::Ordering::SeqCst), - } - } - - /// Wait until all active versions have exited. - pub fn wait_for_idle(&self) -> impl Future + 'static { - let obs = self.active_versions_observer.clone(); - let mut obs = tokio_stream::wrappers::WatchStream::new(obs); - - async move { - while let Some(v) = obs.next().await { - if v == 0 { - break; - } - } - } - } - - pub fn is_idle(&self) -> bool { - *self.active_versions_observer.borrow() == 0 - } -} - -#[derive(Clone, Dupe)] -struct Eval(Weak); - -#[async_trait] -impl IncrementalComputeProperties for StoragePropertiesForKey { - type DiceTask = WeakDiceFutureHandle; - - async fn recompute( - key: &Self::Key, - engine: &Arc>, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult>> { - Ok(engine - .eval_entry_versioned(key, transaction_ctx, extra.subrequest::(key)?) - .await) - } -} - -#[async_trait] -impl Evaluator for StoragePropertiesForKey { - async fn eval( - &self, - k: &K, - transaction_ctx: Arc, - cancellations: &CancellationContext, - extra: ComputationData, - ) -> EvaluationResult { - let ctx = DiceComputationsImplLegacy::new_for_key_evaluation( - self.dice - .upgrade() - .expect("Dice holds DiceMap so it should still be alive here"), - transaction_ctx, - extra, - ); - - let value = k - .compute( - &mut DiceComputations(DiceComputationsImpl::Legacy(ctx.dupe())), - cancellations, - ) - .await; - - let (both_deps, extra) = ctx.finalize(); - - EvaluationResult { - value, - both_deps, - extra, - } - } -} - -/// Result of evaluation computation. -pub(crate) struct EvaluationResult { - pub(crate) value: T, - pub(crate) both_deps: BothDeps, - pub(crate) extra: ComputationData, -} diff --git a/dice/dice/src/legacy/opaque.rs b/dice/dice/src/legacy/opaque.rs deleted file mode 100644 index 0a76977f0b95d..0000000000000 --- a/dice/dice/src/legacy/opaque.rs +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::fmt; -use std::fmt::Debug; -use std::fmt::Formatter; -use std::sync::Arc; - -use dupe::Dupe; - -use crate::api::error::DiceResult; -use crate::api::key::Key; -use crate::api::projection::ProjectionKey; -use crate::legacy::ctx::DiceComputationsImplLegacy; -use crate::legacy::incremental::dep_trackers::BothDeps; -use crate::legacy::incremental::graph::GraphNode; -use crate::legacy::incremental::IncrementalEngine; -use crate::StoragePropertiesForKey; - -/// Computed value which is not directly visible to user. -/// -/// The value can be accessed only via "projection" operation, -/// so projection result is recorded as a dependency -/// of a computation which requested the opaqued value, -/// but the opaque value key is not. -pub(crate) struct OpaqueValueImplLegacy<'a, K: Key> { - /// Computed value. - pub(crate) value: GraphNode>, - /// Computations which requested this value, parent of K. - pub(crate) parent_computations: &'a Arc, - incremental_engine: Arc>>, -} - -impl<'a, K> Debug for OpaqueValueImplLegacy<'a, K> -where - K: Key, - K::Value: Debug, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OpaqueValue") - .field("key", self.value.key()) - .field("value", self.value.val()) - .finish_non_exhaustive() - } -} - -impl<'a, K: Key> OpaqueValueImplLegacy<'a, K> { - pub(crate) fn new( - value: GraphNode>, - parent_computations: &'a Arc, - incremental_engine: Arc>>, - ) -> OpaqueValueImplLegacy<'a, K> { - OpaqueValueImplLegacy { - value, - parent_computations, - incremental_engine, - } - } - - pub(crate) fn key(&self) -> &K { - self.value.key() - } - - pub(crate) fn as_both_deps(&self) -> BothDeps { - BothDeps::only_one_dep( - self.parent_computations.transaction_ctx.get_version(), - self.value.dupe(), - &self.incremental_engine, - ) - } - - /// Get a value and record parent computation dependency on `K`. - pub(crate) fn into_value(self) -> K::Value { - let value = self.value.val().dupe(); - - // Track dependencies. - self.parent_computations.dep_trackers.record( - self.parent_computations.transaction_ctx.get_version(), - self.incremental_engine, - self.value, - ); - - value - } - - pub(crate) fn projection

    (&self, projection_key: &P) -> DiceResult - where - P: ProjectionKey, - { - self.parent_computations - .compute_projection_sync(self, projection_key) - } -} diff --git a/dice/dice/src/legacy/projection.rs b/dice/dice/src/legacy/projection.rs deleted file mode 100644 index d49f5fc96c9e9..0000000000000 --- a/dice/dice/src/legacy/projection.rs +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! "Projection" keys: synchronously computed keys from "opaque" values. - -use std::fmt::Debug; -use std::hash::Hash; -use std::sync::Arc; -use std::sync::Weak; - -use allocative::Allocative; -use async_trait::async_trait; - -use crate::api::error::DiceResult; -use crate::api::projection::ProjectionKey; -use crate::api::storage_type::StorageType; -use crate::introspection::graph::short_type_name; -use crate::legacy::ctx::ComputationData; -use crate::legacy::dice_futures::sync_handle::SyncDiceTaskHandle; -use crate::legacy::incremental::graph::storage_properties::StorageProperties; -use crate::legacy::incremental::IncrementalComputeProperties; -use crate::legacy::incremental::IncrementalEngine; -use crate::legacy::DiceLegacy; -use crate::GraphNode; -use crate::TransactionCtx; - -/// Actual key in the graph (well, actual key is `StorageKeyForKey>`). -/// `ProjectionKey` alone does not have information about what it is derived from. -/// So we record dependency on projection key, we depend on this key. -#[derive(Debug, derive_more::Display, Clone, PartialEq, Eq, Hash, Allocative)] -#[display(fmt = "({}, {})", derive_from_key, k)] -pub(crate) struct ProjectionKeyAsKey { - pub(crate) derive_from_key: P::DeriveFromKey, - pub(crate) k: P, -} - -#[derive(Debug, Allocative)] -#[allocative(bound = "")] -pub(crate) struct ProjectionKeyProperties { - _marker: std::marker::PhantomData

    , - pub(crate) dice: Weak, -} - -impl ProjectionKeyProperties

    { - pub(crate) fn new(dice: &Arc) -> Self { - ProjectionKeyProperties { - _marker: std::marker::PhantomData, - dice: Arc::downgrade(dice), - } - } -} - -impl StorageProperties for ProjectionKeyProperties

    { - type Key = ProjectionKeyAsKey

    ; - type Value = P::Value; - - fn storage_type(&self) -> StorageType { - StorageType::LastN(1) - } - - fn equality(&self, x: &Self::Value, y: &Self::Value) -> bool { - P::equality(x, y) - } - - fn validity(&self, x: &Self::Value) -> bool { - P::validity(x) - } - - /// Provides a short informative name for this projection type. - fn key_type_name() -> &'static str { - short_type_name(std::any::type_name::()) - } - - fn to_key_any(key: &Self::Key) -> &dyn std::any::Any { - &key.k - } -} - -#[async_trait] -impl IncrementalComputeProperties for ProjectionKeyProperties

    { - type DiceTask = SyncDiceTaskHandle; - - async fn recompute( - key: &Self::Key, - engine: &Arc>, - transaction_ctx: &Arc, - extra: &ComputationData, - ) -> DiceResult> { - engine - .recompute_projection(key, transaction_ctx, extra.subrequest::(key)?) - .await - } -} diff --git a/dice/dice/src/legacy/tests/mod.rs b/dice/dice/src/legacy/tests/mod.rs deleted file mode 100644 index 8dab1549249f3..0000000000000 --- a/dice/dice/src/legacy/tests/mod.rs +++ /dev/null @@ -1,995 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering; -use std::sync::Barrier; -use std::sync::Mutex; -use std::time::Duration; - -use assert_matches::assert_matches; -use async_trait::async_trait; -use derivative::Derivative; -use derive_more::Display; -use dupe::Dupe; -use futures::future::FutureExt; -use futures::future::Shared; -use more_futures::cancellation::CancellationContext; -use tokio::sync::oneshot; -use tokio::time::timeout; - -use super::*; -use crate::api::computations::DiceComputations; -use crate::api::cycles::DetectCycles; -use crate::api::error::DiceErrorImpl; -use crate::api::injected::InjectedKey; -use crate::api::key::Key; -use crate::api::user_data::UserComputationData; -use crate::legacy::ctx::testing::DiceCtxExt; -use crate::legacy::incremental::evaluator::testing::EvaluatorUnreachable; -use crate::legacy::incremental::testing::DependencyExt; -use crate::legacy::incremental::testing::IncrementalEngineExt; -use crate::legacy::incremental::testing::VersionedCacheResultAssertsExt; -use crate::legacy::incremental::versions::MinorVersion; -use crate::versions::VersionNumber; -use crate::HashSet; -use crate::UserCycleDetector; -use crate::UserCycleDetectorGuard; - -#[derive(Clone, Dupe, Debug, Display, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] -struct Foo(i32); - -#[async_trait] -impl InjectedKey for Foo { - type Value = i32; - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } -} - -#[tokio::test] -async fn set_injected_multiple_times_per_commit() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - { - let mut ctx = dice.updater(); - ctx.changed_to(vec![(Foo(0), 0)])?; - ctx.changed_to(vec![(Foo(1), 1)])?; - - let ctx = ctx.commit().await; - assert_eq!(ctx.compute(&Foo(0)).await?, 0); - assert_eq!(ctx.compute(&Foo(1)).await?, 1); - } - - { - let mut ctx = dice.updater(); - ctx.changed_to(vec![(Foo(0), 0)])?; - - assert_matches!( - ctx.changed_to(vec![(Foo(0), 1)]), - Err(err) => assert_matches!(&*err.0, DiceErrorImpl::DuplicateChange(_)) - ); - } - - Ok(()) -} - -#[tokio::test] -async fn set_injected_with_no_change_no_new_ctx() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - { - let mut ctx = dice.updater(); - ctx.changed_to(vec![(Foo(0), 0)])?; - - let ctx = ctx.commit().await; - - assert_eq!(ctx.0.get_version(), VersionNumber::new(1)); - } - - { - let mut ctx = dice.updater(); - ctx.changed_to(vec![(Foo(0), 0)])?; - - let ctx = ctx.commit().await; - assert_eq!(ctx.0.get_version(), VersionNumber::new(1)); - } - - Ok(()) -} - -#[tokio::test] -async fn updates_caches_only_on_ctx_finalize_in_order() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - { - let mut ctx = dice.updater(); - - // now we write something and commit - ctx.changed_to(vec![(Foo(1), 1)])?; - let vg = dice.global_versions.current(); - dice.find_cache::() - .get_maybe_cached(Foo(1), vg.version, *vg.minor_version_guard) - .assert_none(); - - ctx.commit().await; - - // committing the context commits the value - let vg = dice.global_versions.current(); - assert_eq!( - *dice - .find_cache::() - .get_maybe_cached(Foo(1), vg.version, *vg.minor_version_guard) - .assert_match() - .val(), - 1 - ); - } - - { - let mut ctx = dice.updater(); - let mut ctx1 = dice.updater(); - // even if we do a change on this ctx first. - ctx.changed_to(vec![(Foo(2), 2)])?; - ctx1.changed_to(vec![(Foo(3), 3)])?; - - // as long as we commit ctx1 first, it's values are committed first, in linear - // history - ctx1.commit().await; - - let vg = dice.global_versions.current(); - assert_eq!( - *dice - .find_cache::() - .get_maybe_cached(Foo(3), vg.version, *vg.minor_version_guard) - .assert_match() - .val(), - 3 - ); - - dice.find_cache::() - .get_maybe_cached(Foo(2), vg.version, *vg.minor_version_guard) - .assert_none(); - - ctx.commit().await; - - // only now is 'ctx' committed - let vg = dice.global_versions.current(); - assert_eq!( - *dice - .find_cache::() - .get_maybe_cached(Foo(2), vg.version, *vg.minor_version_guard) - .assert_match() - .val(), - 2 - ); - } - - Ok(()) -} - -#[derive(Clone, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] -struct K(i32); - -#[async_trait] -impl Key for K { - type Value = Result>; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - let mut sum = self.0; - for i in 0..self.0 { - sum += ctx - .compute(&K(i)) - .await - .map_err(|e| Arc::new(anyhow::anyhow!(e)))?? - .0; - } - Ok(K(sum)) - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } -} - -#[test] -fn ctx_tracks_deps_properly() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let ctx = dice.updater().commit().await; - let res = ctx - .compute(&K(5)) - .await? - .map_err(|e| anyhow::anyhow!(format!("{:#}", e)))?; - assert_eq!(res, K(31)); - - // introspect the caches for dependency info - fn assert_cached_deps(dice: &Arc, k: i32) { - let vg = dice.global_versions.current(); - let cached = dice - .find_cache::() - .get_cached(K(k), vg.version, *vg.minor_version_guard) - .dupe(); - - let expected_deps = (0..k) - .map(K) - .map(DependencyExt::>::testing_raw) - .collect::>(); - - // TODO(bobyf) better assert the versions stored in deps - let meta = cached.read_meta(); - let deps = meta.deps.debug_deps().read(); - let (version, deps) = deps.as_ref().expect("No deps"); - - assert_eq!(*version, VersionNumber::new(0)); - - let deps = deps.iter().collect::>(); - let expected_deps = expected_deps.iter().collect::>(); - - assert_eq!(deps, expected_deps); - } - - assert_cached_deps(&dice, 5); - assert_cached_deps(&dice, 4); - assert_cached_deps(&dice, 3); - assert_cached_deps(&dice, 2); - assert_cached_deps(&dice, 1); - assert_cached_deps(&dice, 0); - - Ok(()) - }) -} - -#[test] -fn ctx_tracks_rdeps_properly() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(1) - .max_blocking_threads(1) - .build() - .unwrap(); - rt.block_on(async { - let ctx = dice.updater().commit().await; - let res = ctx - .compute(&K(5)) - .await? - .map_err(|e| anyhow::anyhow!(format!("{:#}", e)))?; - assert_eq!(res, K(31)); - - // introspect the caches for dependency info - fn assert_cached_rdeps(dice: &Arc, k: i32) { - let vg = dice.global_versions.current(); - - let cached = dice - .find_cache::() - .get_cached(K(k), vg.version, *vg.minor_version_guard) - .dupe(); - - let mut expected_deps = ((k + 1)..6) - .map(K) - .map(|k| { - Arc::as_ptr( - &dice - .find_cache::() - .get_cached(k, VersionNumber::new(0), *vg.minor_version_guard) - .into_dyn(), - ) - }) - .collect::>(); - - for rdep in cached.read_meta().rdeps.rdeps().rdeps.iter() { - assert!( - expected_deps.remove(&Arc::as_ptr(&rdep.0.0.upgrade().unwrap())), - "Extra rdeps" - ) - } - assert!( - expected_deps.is_empty(), - "Missing {} rdeps", - expected_deps.len() - ) - } - - assert_cached_rdeps(&dice, 0); - assert_cached_rdeps(&dice, 4); - assert_cached_rdeps(&dice, 3); - assert_cached_rdeps(&dice, 2); - assert_cached_rdeps(&dice, 1); - assert_cached_rdeps(&dice, 0); - - Ok(()) - }) -} - -// ignore this for now. Need to change ctx to better represent lifetimes and ownership -// to support this -// -// #[test] -// fn compute_that_requests_changes() { -// let dice = Dice::new(); -// -// #[derive(Clone, Debug, Eq, PartialEq, Hash)] -// struct InvalidatingKey; -// -// #[async_trait] -// impl Key for InvalidatingKey { -// type Value = i32; -// -// async fn compute(&self, ctx: &Arc) -> Self::Value { -// ctx.changed_to(vec![(InvalidatingKey, 1)]); -// ctx.commit(); -// 0 -// } -// } -// -// let mut rt = tokio::runtime::Runtime::new().unwrap(); -// rt.block_on(async { -// assert_eq!(dice.ctx().compute(&InvalidatingKey).await, 0); -// assert_eq!(dice.ctx().compute(&InvalidatingKey).await, 1); -// }); -// } - -#[test] -fn dice_computations_are_parallel() { - let n_thread = 10; - - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(n_thread) - .max_blocking_threads(n_thread) - .build() - .unwrap(); - let barrier = Arc::new(Barrier::new(n_thread)); - - #[derive(Clone, Debug, Display, Derivative, Allocative)] - #[derivative(Hash, PartialEq, Eq)] - #[display(fmt = "{:?}", self)] - struct Blocking { - index: usize, - #[derivative(PartialEq = "ignore", Hash = "ignore")] - #[allocative(skip)] - barrier: Arc, - } - - #[async_trait] - impl Key for Blocking { - type Value = usize; - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - self.barrier.wait(); - 1 - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - } - - rt.block_on(async move { - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - let mut sum = 0; - - let dice = &dice; - let barrier = &barrier; - - let futs = (0..n_thread) - .map(|i| async move { - dice.updater() - .commit() - .await - .compute(&Blocking { - index: i, - barrier: barrier.dupe(), - }) - .await - .unwrap() - }) - .collect::>(); - - futures::future::join_all(futs) - .await - .iter() - .for_each(|res| sum += res); - - assert_eq!(sum, n_thread); - }) -} - -#[tokio::test] -async fn different_data_per_compute_ctx() { - struct U(usize); - - #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] - #[display(fmt = "{:?}", self)] - struct DataRequest(u8); - #[async_trait] - impl Key for DataRequest { - type Value = usize; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - ctx.per_transaction_data().data.get::().unwrap().0 - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - } - - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - let per_cmd_data0 = { - let mut d = UserComputationData::new(); - d.data.set(U(0)); - d - }; - let per_cmd_data1 = { - let mut d = UserComputationData::new(); - d.data.set(U(1)); - d - }; - - let ctx0 = dice.updater_with_data(per_cmd_data0).commit().await; - - let ctx1 = dice.updater_with_data(per_cmd_data1).commit().await; - - let request0 = ctx0.compute(&DataRequest(0)); - let request1 = ctx1.compute(&DataRequest(1)); - - assert_eq!(request0.await.unwrap(), 0); - assert_eq!(request1.await.unwrap(), 1); -} - -#[tokio::test] -async fn invalid_results_are_not_cached() -> anyhow::Result<()> { - #[derive(Clone, Dupe, Debug, Display, Derivative, Allocative)] - #[derivative(Hash, PartialEq, Eq)] - #[display(fmt = "{:?}", self)] - struct AlwaysTransient(#[derivative(PartialEq = "ignore", Hash = "ignore")] Arc); - - #[async_trait] - impl Key for AlwaysTransient { - type Value = usize; - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - self.0.store(true, Ordering::SeqCst); - 1 - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(_x: &Self::Value) -> bool { - false - } - } - - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - let is_ran = Arc::new(AtomicBool::new(false)); - { - let ctx = dice.updater().commit().await; - ctx.compute(&AlwaysTransient(is_ran.dupe())).await?; - assert!(is_ran.load(Ordering::SeqCst)); - - // same ctx, so should reuse the result and - is_ran.store(false, Ordering::SeqCst); - ctx.compute(&AlwaysTransient(is_ran.dupe())).await?; - assert!(!is_ran.load(Ordering::SeqCst)); - - // simultaneously ctx should also re-use the result - let ctx1 = dice.updater().commit().await; - is_ran.store(false, Ordering::SeqCst); - ctx1.compute(&AlwaysTransient(is_ran.dupe())).await?; - assert!(!is_ran.load(Ordering::SeqCst)); - } - - { - // new context should re-run - let ctx = dice.updater().commit().await; - is_ran.store(false, Ordering::SeqCst); - ctx.compute(&AlwaysTransient(is_ran.dupe())).await?; - assert!(is_ran.load(Ordering::SeqCst)); - - // same ctx, so should reuse the result and - is_ran.store(false, Ordering::SeqCst); - ctx.compute(&AlwaysTransient(is_ran.dupe())).await?; - assert!(!is_ran.load(Ordering::SeqCst)); - } - - Ok(()) -} - -#[tokio::test] -async fn demo_with_transient() -> anyhow::Result<()> { - #[derive(Clone, Dupe, Debug, Display, Derivative, Allocative)] - #[derivative(Hash, PartialEq, Eq)] - #[display(fmt = "{:?}", self)] - struct MaybeTransient( - usize, - #[derivative(PartialEq = "ignore", Hash = "ignore")] Arc, - ); - - #[async_trait] - impl Key for MaybeTransient { - type Value = Result; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - if self.0 == 0 { - if !self.1.load(Ordering::SeqCst) { - Err(true) - } else { - Ok(1) - } - } else { - let mut sum = 0; - for i in 0..self.0 { - if let Ok(v) = ctx - .compute(&MaybeTransient(i, self.1.dupe())) - .await - .unwrap() - { - sum += v; - } else { - return Err(false); - } - } - Ok(sum) - } - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - x == y - } - - fn validity(x: &Self::Value) -> bool { - // intermediate nodes won't be directly invalid, but rely on the children to - // propagate transient-ness - if let Err(x) = x { !*x } else { true } - } - } - - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - let ctx = dice.updater().commit().await; - let validity = Arc::new(AtomicBool::new(false)); - - assert!( - ctx.compute(&MaybeTransient(10, validity.dupe())) - .await? - .is_err(), - ); - - validity.store(true, Ordering::SeqCst); - assert!( - ctx.compute(&MaybeTransient(10, validity.dupe())) - .await? - .is_err(), - ); - - drop(ctx); - - let ctx = dice.updater().commit().await; - assert_eq!( - ctx.compute(&MaybeTransient(10, validity.dupe())).await?, - Ok(512) - ); - - Ok(()) -} - -#[tokio::test] -async fn test_wait_for_idle() -> anyhow::Result<()> { - #[derive(Clone, Debug, Display, Derivative, Allocative)] - #[derivative(Hash, PartialEq, Eq)] - #[display(fmt = "{:?}", self)] - struct TestKey { - id: usize, - - #[allocative(skip)] - #[derivative(PartialEq = "ignore", Hash = "ignore")] - channel: Shared>, - } - - impl Dupe for TestKey {} - - #[async_trait] - impl Key for TestKey { - type Value = (); - - async fn compute( - &self, - _ctx: &mut DiceComputations, - cancellations: &CancellationContext, - ) -> Self::Value { - cancellations - .critical_section(|| self.channel.clone()) - .await - .unwrap() - } - - fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { - false - } - } - - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - let ctx = dice.updater().commit().await; - - let (tx, rx) = oneshot::channel(); - let rx = rx.shared(); - - let key = TestKey { id: 1, channel: rx }; - let handle = ctx.compute(&key); - - let idle = dice.wait_for_idle(); - futures::pin_mut!(idle); - - assert_matches!(timeout(Duration::from_secs(1), &mut idle).await, Err(..)); - assert!(!dice.is_idle()); - - drop(handle); - drop(ctx); - assert_matches!(timeout(Duration::from_secs(1), &mut idle).await, Err(..)); - assert!(!dice.is_idle()); - - tx.send(()).unwrap(); - assert_matches!(timeout(Duration::from_secs(1), &mut idle).await, Ok(..)); - assert!(dice.is_idle()); - - // Still idle. - let stays_idle = async { - dice.wait_for_idle().await; - dice.wait_for_idle().await; - }; - assert_matches!(timeout(Duration::from_secs(1), stays_idle).await, Ok(..)); - - Ok(()) -} - -#[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] -#[display(fmt = "{:?}", self)] -struct Fib(u8); - -#[async_trait] -impl Key for Fib { - type Value = Result>; - - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - if self.0 > 93 { - return Err(Arc::new(anyhow::anyhow!("that's too big"))); - } - if self.0 < 2 { - return Ok(self.0 as u64); - } - let (a, b) = { - let (a, b) = ctx.compute2( - |ctx| ctx.compute(&Fib(self.0 - 2)).boxed(), - |ctx| ctx.compute(&Fib(self.0 - 1)).boxed(), - ); - futures::future::join(a, b).await - }; - match (a, b) { - (Ok(a), Ok(b)) => Ok(a? + b?), - _ => Err(Arc::new(anyhow::anyhow!("some dice error"))), - } - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } -} - -#[derive(Debug, Eq, PartialEq)] -enum CycleDetectorEvents { - Start(Fib), - Finish(Fib), - Edge(Fib, Fib), -} - -#[derive(Debug, Clone)] -struct CycleDetector { - events: Arc>>, -} - -struct CycleDetectorGuard { - key: Fib, - events: Arc>>, -} - -impl UserCycleDetector for CycleDetector { - fn start_computing_key( - &self, - key: &dyn std::any::Any, - ) -> Option> { - let f = key.downcast_ref::().unwrap(); - self.events - .lock() - .unwrap() - .push(CycleDetectorEvents::Start(*f)); - Some(Box::new(CycleDetectorGuard { - key: *f, - events: self.events.clone(), - })) - } - - fn finished_computing_key(&self, key: &dyn std::any::Any) { - let f = key.downcast_ref::().unwrap(); - self.events - .lock() - .unwrap() - .push(CycleDetectorEvents::Finish(*f)); - } -} - -impl UserCycleDetectorGuard for CycleDetectorGuard { - fn add_edge(&self, key: &dyn std::any::Any) { - let f = key.downcast_ref::().unwrap(); - self.events - .lock() - .unwrap() - .push(CycleDetectorEvents::Edge(self.key, *f)) - } - - fn as_any(&self) -> &dyn std::any::Any { - self - } - - fn type_name(&self) -> &'static str { - std::any::type_name::() - } -} - -#[test] -fn user_cycle_detector_receives_events() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Disabled); - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let events = Arc::new(Mutex::new(Vec::new())); - let user_data = UserComputationData { - cycle_detector: Some(Arc::new(CycleDetector { - events: events.clone(), - })), - ..Default::default() - }; - let ctx = dice.updater_with_data(user_data).commit().await; - let res = ctx.compute(&Fib(20)).await?.expect("should succeed"); - assert_eq!(res, 6765); - - let check_events = move |i, expected_edges| { - let mut started = false; - let mut finished = false; - let mut edges = Vec::new(); - for ev in events.lock().unwrap().iter() { - match ev { - CycleDetectorEvents::Start(Fib(v)) if i == *v => { - started = true; - assert!(!finished); - } - CycleDetectorEvents::Finish(Fib(v)) if i == *v => { - assert!(!finished); - finished = true; - assert!(started); - } - CycleDetectorEvents::Edge(Fib(v), Fib(j)) if i == *v => { - assert!(started); - assert!(!finished); - edges.push(*j) - } - _ => { - // ignore - } - } - } - assert!(finished); - edges.sort(); - assert_eq!(edges, expected_edges); - }; - - check_events(0, vec![]); - check_events(1, vec![]); - for i in 2..=20 { - check_events(i, vec![i - 2, i - 1]); - } - - Ok(()) - }) -} - -#[tokio::test] -async fn compute_and_update_uses_proper_version_numbers() -> anyhow::Result<()> { - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - - { - let ctx = dice.updater().commit().await; - assert_eq!(ctx.0.get_version(), VersionNumber::new(0)); - assert_eq!(ctx.0.get_minor_version(), MinorVersion::testing_new(0)); - } - - { - // second context that didn't have any writes should still be the same version - let ctx = dice.updater().commit().await; - assert_eq!(ctx.0.get_version(), VersionNumber::new(0)); - assert_eq!(ctx.0.get_minor_version(), MinorVersion::testing_new(1)); - - // now we write something and commit - let mut ctx = dice.updater(); - ctx.changed_to(vec![(Foo(1), 1)])?; - // current version shouldn't be updated - assert_eq!( - ctx.existing_state().await.0.get_version(), - VersionNumber::new(0) - ); - assert_eq!( - ctx.existing_state().await.0.get_minor_version(), - MinorVersion::testing_new(1) - ); - - let mut ctx1 = dice.updater(); - // previous ctx isn't dropped, so versions shouldn't be committed yet. - assert_eq!( - ctx1.existing_state().await.0.get_version(), - VersionNumber::new(0) - ); - assert_eq!( - ctx1.existing_state().await.0.get_minor_version(), - MinorVersion::testing_new(1) - ); - - // if we update on the new context, nothing committed - ctx1.changed_to(vec![(Foo(2), 2)])?; - assert_eq!( - ctx1.existing_state().await.0.get_version(), - VersionNumber::new(0) - ); - assert_eq!( - ctx1.existing_state().await.0.get_minor_version(), - MinorVersion::testing_new(1) - ); - - // drop a context - ctx1.commit().await; - // we should only have committed once, and in increasing order - let vg = dice.global_versions.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(1), MinorVersion::testing_new(1)) - ); - - ctx.commit().await; - // both versions finalized. - let vg = dice.global_versions.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(2), MinorVersion::testing_new(1)) - ); - assert!(dice.map.read().engines().iter().all(|engine| { - engine - .introspect() - .versions_currently_running() - .first() - .is_none() - })); - } - - { - let mut ctx = dice.updater(); - assert_eq!( - ctx.existing_state().await.0.get_version(), - VersionNumber::new(2) - ); - assert_eq!( - ctx.existing_state().await.0.get_minor_version(), - MinorVersion::testing_new(2) - ); - - ctx.changed_to(vec![(Foo(3), 3)])?; - assert_eq!( - ctx.existing_state().await.0.get_version(), - VersionNumber::new(2) - ); - assert_eq!( - ctx.existing_state().await.0.get_minor_version(), - MinorVersion::testing_new(2) - ); - - ctx.commit().await; - let vg = dice.global_versions.current(); - assert_eq!( - (vg.version, *vg.minor_version_guard), - (VersionNumber::new(3), MinorVersion::testing_new(1)) - ); - assert!(dice.map.read().engines().iter().all(|engine| { - engine - .introspect() - .versions_currently_running() - .first() - .is_none() - })); - } - - Ok(()) -} - -#[test] -fn test_active_transaction_count() { - let dice = Arc::new(DiceLegacy::new(DiceData::new(), DetectCycles::Enabled)); - assert_eq!(0, dice.metrics().active_transaction_count); - let ctx = dice.updater().commit(); - assert_eq!(1, dice.metrics().active_transaction_count); - drop(ctx); - assert_eq!(0, dice.metrics().active_transaction_count); -} - -#[test] -fn invalid_update() { - #[derive(Clone, Dupe, Debug, Display, PartialEq, Eq, Hash, Allocative)] - struct Invalid; - - #[async_trait] - impl Key for Invalid { - type Value = (); - - async fn compute( - &self, - _ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - unimplemented!("not needed for test") - } - - fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { - unimplemented!("not needed for test") - } - - fn validity(_x: &Self::Value) -> bool { - false - } - } - - let dice = DiceLegacy::builder().build(DetectCycles::Enabled); - let mut updater = dice.updater(); - - assert!(updater.changed_to([(Invalid, ())]).is_err()); -} diff --git a/dice/dice/src/lib.rs b/dice/dice/src/lib.rs index fd1bf2121c78d..a5f71bc3002fb 100644 --- a/dice/dice/src/lib.rs +++ b/dice/dice/src/lib.rs @@ -24,19 +24,19 @@ //! use dice::{Key, InjectedKey, DiceComputations, DiceDataBuilder, DiceData, DiceTransactionUpdater}; //! use std::sync::Arc; //! use allocative::Allocative; -//! use more_futures::cancellation::CancellationContext; +//! use buck2_futures::cancellation::CancellationContext; //! //! /// A configuration computation that consists of values that are pre-computed outside of DICE -//! pub struct InjectConfigs<'compute>(&'compute DiceComputations); +//! pub struct InjectConfigs<'compute, 'd>(&'compute mut DiceComputations<'d>); //! -//! impl<'compute> InjectConfigs<'compute> { -//! pub async fn get_config(&self) -> usize { +//! impl<'compute, 'd> InjectConfigs<'compute, 'd> { +//! pub async fn get_config(&mut self) -> usize { //! self.0.compute(&ConfigKey).await.unwrap() //! } //! } //! //! #[derive(Clone, Debug, Display, Eq, Hash, PartialEq, Allocative)] -//! #[display(fmt = "{:?}", self)] +//! #[display("{:?}", self)] //! struct ConfigKey; //! //! #[async_trait] @@ -48,13 +48,13 @@ //! } //! } //! -//! pub struct MyComputation<'compute>(&'compute DiceComputations); +//! pub struct MyComputation<'compute, 'd>(pub &'compute mut DiceComputations<'d>); //! -//! impl<'compute> MyComputation<'compute> { +//! impl<'compute, 'd> MyComputation<'compute, 'd> { //! // declaring a computation function -//! pub async fn compute_a(&self, a: usize, s: String) -> Arc { +//! pub async fn compute_a(&mut self, a: usize, s: String) -> Arc { //! #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -//! #[display(fmt = "{:?}", self)] +//! #[display("{:?}", self)] //! struct ComputeA(usize, String); //! //! #[async_trait] @@ -63,7 +63,7 @@ //! //! async fn compute(&self, ctx: &mut DiceComputations, _cancellations: &CancellationContext) -> Self::Value { //! // request for other computations on the self -//! let n = ctx.my_computation().compute_b(self.0).await; +//! let n = MyComputation(ctx).compute_b(self.0).await; //! Arc::new(self.1.repeat(n)) //! } //! @@ -76,21 +76,21 @@ //! } //! //! // second computation function -//! pub async fn compute_b(&self, a: usize) -> usize { +//! pub async fn compute_b(&mut self, a: usize) -> usize { //! self.0.compute(&ComputeB(a)).await.unwrap() //! } //! } //! //! #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -//! #[display(fmt = "{:?}", self)] +//! #[display("{:?}", self)] //! struct ComputeB(usize); //! //! #[async_trait] //! impl Key for ComputeB { //! type Value = usize; //! -//! async fn compute(&self, ctx: &mut DiceComputations, _cancellations: &CancellationContext) -> Self::Value { -//! self.0 + ctx.injected_configs().get_config().await + ctx.global_data().static_data().len() +//! async fn compute(&self, ctx: &mut DiceComputations, cancellations: &CancellationContext) -> Self::Value { +//! self.0 + InjectConfigs(ctx).get_config().await + ctx.global_data().static_data().len() //! } //! //! fn equality(x: &Self::Value,y: &Self::Value) -> bool { @@ -98,29 +98,6 @@ //! } //! } //! -//! // trait to register the computation to DICE -//! pub trait HasMyComputation { -//! fn my_computation(&self) -> MyComputation; -//! } -//! -//! // attach the declared computation to DICE via the context -//! impl HasMyComputation for DiceComputations { -//! fn my_computation(&self) -> MyComputation { -//! MyComputation(self) -//! } -//! } -//! -//! // trait to register the precomputed configs to DICE -//! pub trait HasInjectedConfig { -//! fn injected_configs(&self) -> InjectConfigs; -//! } -//! -//! impl HasInjectedConfig for DiceComputations { -//! fn injected_configs(&self) -> InjectConfigs { -//! InjectConfigs(self) -//! } -//! } -//! //! pub trait SetInjectedConfig { //! fn inject_config(&mut self, i: usize); //! } @@ -167,35 +144,30 @@ //! let mut ctx = engine.updater(); //! ctx.inject_config(0); //! -//! let ctx = rt.block_on(ctx.commit()); +//! let mut ctx = rt.block_on(ctx.commit()); //! //! // request the computation from DICE //! rt.block_on(async { -//! assert_eq!("aaaaaaaa", &*ctx.my_computation().compute_a(4, "a".into()).await); +//! assert_eq!("aaaaaaaa", &*MyComputation(&mut ctx).compute_a(4, "a".into()).await); //! }); //! //! let mut ctx = engine.updater(); //! ctx.inject_config(2); //! -//! let ctx = rt.block_on(ctx.commit()); +//! let mut ctx = rt.block_on(ctx.commit()); //! //! // request the computation from DICE //! rt.block_on(async { -//! assert_eq!("aaaaaaaaaa", &*ctx.my_computation().compute_a(4, "a".into()).await); +//! assert_eq!("aaaaaaaaaa", &*MyComputation(&mut ctx).compute_a(4, "a".into()).await); //! }); //! ``` -#![feature(async_closure)] #![feature(entry_insert)] #![feature(fn_traits)] #![feature(test)] #![feature(map_try_insert)] #![feature(map_entry_replace)] #![feature(result_flattening)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] // This sometimes flag false positives where proc-macros expand pass by value into pass by refs #![allow(clippy::trivially_copy_pass_by_ref)] @@ -208,6 +180,7 @@ extern crate tracing; mod api; pub(crate) mod arc; mod ctx; +pub(crate) mod future; mod impls; pub mod introspection; mod legacy; @@ -215,6 +188,7 @@ pub(crate) mod metrics; mod opaque; pub(crate) mod owned; pub(crate) mod result; +pub(crate) mod stats; mod transaction; mod transaction_update; mod versions; @@ -224,35 +198,37 @@ use std::io::Write; use std::sync::Arc; use allocative::Allocative; -pub(crate) use fnv::FnvHashMap as HashMap; -pub(crate) use fnv::FnvHashSet as HashSet; +pub use buck2_futures::cancellation::future::CancellationHandle; // expose cancellation handle as api +pub use buck2_futures::cancellation::CancellationContext; // expose cancellation context as api +pub use buck2_futures::spawn::CancellableJoinHandle; // expose cancellation context as api +pub use buck2_futures::spawn::FutureAndCancellationHandle; +pub use buck2_futures::spawn::WeakFutureError; // expose future errors as api +pub(crate) type HashMap = std::collections::HashMap; +pub(crate) type HashSet = std::collections::HashSet; use futures::future::Future; -use futures::FutureExt; -use legacy::dice_futures::future_handle::WeakDiceFutureHandle; -use legacy::incremental::graph::GraphNode; -use legacy::incremental::transaction_ctx::TransactionCtx; -use legacy::key::StoragePropertiesForKey; use metrics::Metrics; -pub use more_futures::cancellation::future::CancellationHandle; // expose cancellation handle as api -pub use more_futures::cancellation::CancellationContext; // expose cancellation context as api -pub use more_futures::spawn::CancellableJoinHandle; // expose cancellation context as api -pub use more_futures::spawn::FutureAndCancellationHandle; -pub use more_futures::spawn::WeakFutureError; // expose future errors as api use serde::Serializer; pub use crate::api::activation_tracker::ActivationData; pub use crate::api::activation_tracker::ActivationTracker; pub use crate::api::computations::DiceComputations; -pub use crate::api::computations::DiceComputationsParallel; +pub use crate::api::computations::LinearRecomputeDiceComputations; pub use crate::api::cycles::DetectCycles; pub use crate::api::data::DiceData; +pub use crate::api::demand::Demand; pub use crate::api::dice::Dice; pub use crate::api::dice::DiceDataBuilder; +pub use crate::api::dyn_key::DynKey; pub use crate::api::error::DiceError; pub use crate::api::error::DiceResult; pub use crate::api::events::DiceEvent; pub use crate::api::events::DiceEventListener; pub use crate::api::injected::InjectedKey; +pub use crate::api::invalidation_tracking::DiceInvalidationPath; +pub use crate::api::invalidation_tracking::DiceKeyTrackedInvalidationPaths; +pub use crate::api::invalidation_tracking::DiceTrackedInvalidationPath; +pub use crate::api::invalidation_tracking::InvalidationPathEntry; +pub use crate::api::key::InvalidationSourcePriority; pub use crate::api::key::Key; pub use crate::api::opaque::OpaqueValue; pub use crate::api::projection::DiceProjectionComputations; @@ -269,20 +245,17 @@ use crate::impls::dice::DiceModernDataBuilder; use crate::introspection::graph::GraphIntrospectable; use crate::introspection::serialize_dense_graph; use crate::introspection::serialize_graph; -use crate::legacy::DiceLegacy; -use crate::legacy::DiceLegacyDataBuilder; +pub use crate::stats::GlobalStats; use crate::transaction_update::DiceTransactionUpdaterImpl; #[derive(Allocative, Debug)] pub(crate) enum DiceImplementation { - Legacy(Arc), Modern(Arc), } impl DiceImplementation { pub fn updater(&self) -> DiceTransactionUpdater { match self { - DiceImplementation::Legacy(dice) => dice.updater(), DiceImplementation::Modern(dice) => { DiceTransactionUpdater(DiceTransactionUpdaterImpl::Modern(dice.updater())) } @@ -291,7 +264,6 @@ impl DiceImplementation { pub fn updater_with_data(&self, extra: UserComputationData) -> DiceTransactionUpdater { match self { - DiceImplementation::Legacy(dice) => dice.updater_with_data(extra), DiceImplementation::Modern(dice) => DiceTransactionUpdater( DiceTransactionUpdaterImpl::Modern(dice.updater_with_data(extra)), ), @@ -322,21 +294,18 @@ impl DiceImplementation { fn to_introspectable(&self) -> GraphIntrospectable { match self { - DiceImplementation::Legacy(dice) => dice.to_introspectable(), DiceImplementation::Modern(dice) => dice.to_introspectable(), } } pub fn detect_cycles(&self) -> &DetectCycles { match self { - DiceImplementation::Legacy(dice) => dice.detect_cycles(), DiceImplementation::Modern(dice) => dice.detect_cycles(), } } pub fn metrics(&self) -> Metrics { match self { - DiceImplementation::Legacy(dice) => dice.metrics(), DiceImplementation::Modern(dice) => dice.metrics(), } } @@ -344,43 +313,34 @@ impl DiceImplementation { /// Wait until all active versions have exited. pub fn wait_for_idle(&self) -> impl Future + 'static { match self { - DiceImplementation::Legacy(dice) => dice.wait_for_idle().left_future(), - DiceImplementation::Modern(dice) => dice.wait_for_idle().right_future(), + DiceImplementation::Modern(dice) => dice.wait_for_idle(), } } pub async fn is_idle(&self) -> bool { match self { - DiceImplementation::Legacy(dice) => dice.is_idle(), DiceImplementation::Modern(dice) => dice.is_idle().await, } } } pub(crate) enum DiceDataBuilderImpl { - Legacy(DiceLegacyDataBuilder), Modern(DiceModernDataBuilder), } impl DiceDataBuilderImpl { - pub(crate) fn new_legacy() -> Self { - Self::Legacy(DiceLegacyDataBuilder::new()) - } - pub(crate) fn new_modern() -> Self { Self::Modern(DiceModernDataBuilder::new()) } pub fn set(&mut self, val: K) { match self { - DiceDataBuilderImpl::Legacy(d) => d.set(val), DiceDataBuilderImpl::Modern(d) => d.set(val), } } pub fn build(self, detect_cycles: DetectCycles) -> Arc { Dice::new(match self { - DiceDataBuilderImpl::Legacy(d) => DiceImplementation::Legacy(d.build(detect_cycles)), DiceDataBuilderImpl::Modern(d) => DiceImplementation::Modern(d.build(detect_cycles)), }) } diff --git a/dice/dice/src/opaque.rs b/dice/dice/src/opaque.rs index 5c31ee00a975b..df6a0d080d15a 100644 --- a/dice/dice/src/opaque.rs +++ b/dice/dice/src/opaque.rs @@ -11,11 +11,8 @@ use std::fmt; use std::fmt::Debug; use std::fmt::Formatter; -use crate::api::error::DiceResult; use crate::api::key::Key; -use crate::api::projection::ProjectionKey; use crate::impls::opaque::OpaqueValueModern; -use crate::legacy::opaque::OpaqueValueImplLegacy; /// Computed value which is not directly visible to user. /// @@ -23,32 +20,18 @@ use crate::legacy::opaque::OpaqueValueImplLegacy; /// so projection result is recorded as a dependency /// of a computation which requested the opaqued value, /// but the opaque value key is not. -pub(crate) enum OpaqueValueImpl<'a, K: Key> { - Legacy(OpaqueValueImplLegacy<'a, K>), - Modern(OpaqueValueModern<'a, K>), +pub(crate) enum OpaqueValueImpl { + Modern(OpaqueValueModern), } -impl<'a, K> Debug for OpaqueValueImpl<'a, K> +impl Debug for OpaqueValueImpl where K: Key, K::Value: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - OpaqueValueImpl::Legacy(delegate) => delegate.fmt(f), OpaqueValueImpl::Modern(delegate) => delegate.fmt(f), } } } - -impl<'a, K: Key> OpaqueValueImpl<'a, K> { - pub(crate) fn projection

    (&self, projection_key: &P) -> DiceResult - where - P: ProjectionKey, - { - match self { - OpaqueValueImpl::Legacy(delegate) => delegate.projection(projection_key), - OpaqueValueImpl::Modern(delegate) => delegate.projection(projection_key), - } - } -} diff --git a/dice/dice/src/owned.rs b/dice/dice/src/owned.rs index 5437a9da3e1a4..b2162c2c8ca78 100644 --- a/dice/dice/src/owned.rs +++ b/dice/dice/src/owned.rs @@ -11,6 +11,9 @@ //! The reference can only be accessed via a lambda providing an optional access based on whether //! the reference is still valid. +// Intend to use this again when parallel computations have correct dep recording. +#![allow(unused)] + use std::ops::Deref; use std::sync::Arc; use std::sync::Weak; diff --git a/dice/dice/src/result.rs b/dice/dice/src/result.rs index a526bc5aea9f9..7940d20c35341 100644 --- a/dice/dice/src/result.rs +++ b/dice/dice/src/result.rs @@ -15,9 +15,18 @@ use dupe::Dupe; use thiserror::Error; #[allow(unused)] // TODO temporary -pub(crate) type CancellableResult = Result; +pub(crate) type CancellableResult = Result; #[allow(unused)] // TODO temporary #[derive(Clone, Dupe, Display, Debug, Error, Allocative)] -#[display(fmt = "{:?}", self)] -pub(crate) struct Cancelled; +#[display("{:?}", self)] +pub enum CancellationReason { + OutdatedEpoch, + TransactionCancelled, + Terminated, + NoResult, + Rejected, + DepsMatch, + WorkerFinished, + Cached, +} diff --git a/dice/dice/src/stats.rs b/dice/dice/src/stats.rs new file mode 100644 index 0000000000000..bec6c5f1ee194 --- /dev/null +++ b/dice/dice/src/stats.rs @@ -0,0 +1,31 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; + +/// Provides informative stats over all dice instances. +pub struct GlobalStats { + /// A count of how many computations have been cancelled. + pub cancellations: u64, +} + +pub static CANCEL_COUNT: AtomicU64 = AtomicU64::new(0); + +impl GlobalStats { + pub fn get() -> Self { + Self { + cancellations: CANCEL_COUNT.load(Ordering::Relaxed), + } + } + + pub(crate) fn record_cancellation() { + CANCEL_COUNT.fetch_add(1, Ordering::Relaxed); + } +} diff --git a/dice/dice/src/transaction.rs b/dice/dice/src/transaction.rs index 4dfaca0934f48..cf5a33805a968 100644 --- a/dice/dice/src/transaction.rs +++ b/dice/dice/src/transaction.rs @@ -10,32 +10,19 @@ use allocative::Allocative; use dupe::Dupe; -use crate::ctx::DiceComputationsImpl; use crate::impls::ctx::BaseComputeCtx; -use crate::transaction_update::DiceTransactionUpdaterImpl; use crate::versions::VersionNumber; use crate::DiceComputations; use crate::DiceTransactionUpdater; #[derive(Allocative)] pub(crate) enum DiceTransactionImpl { - Legacy(DiceComputations), Modern(BaseComputeCtx), } impl Clone for DiceTransactionImpl { fn clone(&self) -> Self { match self { - DiceTransactionImpl::Legacy(ctx) => match &ctx.0 { - // since 'DiceComputations' should not be clone or dupe, we manually implement - // clone and dupe for the 'DiceTransaction' - DiceComputationsImpl::Legacy(ctx) => DiceTransactionImpl::Legacy(DiceComputations( - DiceComputationsImpl::Legacy(ctx.dupe()), - )), - DiceComputationsImpl::Modern(_) => { - unreachable!("wrong dice") - } - }, DiceTransactionImpl::Modern(ctx) => DiceTransactionImpl::Modern(ctx.clone()), } } @@ -46,35 +33,24 @@ impl Dupe for DiceTransactionImpl {} impl DiceTransactionImpl { pub(crate) fn get_version(&self) -> VersionNumber { match self { - DiceTransactionImpl::Legacy(ctx) => ctx.0.get_version(), DiceTransactionImpl::Modern(ctx) => ctx.get_version(), } } pub(crate) fn into_updater(self) -> DiceTransactionUpdater { match self { - DiceTransactionImpl::Legacy(delegate) => match delegate.0 { - DiceComputationsImpl::Legacy(ctx) => { - DiceTransactionUpdater(DiceTransactionUpdaterImpl::Legacy(ctx)) - } - DiceComputationsImpl::Modern(_) => { - unreachable!("legacy dice") - } - }, DiceTransactionImpl::Modern(delegate) => delegate.into_updater(), } } - pub(crate) fn as_computations(&self) -> &DiceComputations { + pub(crate) fn as_computations(&self) -> &DiceComputations<'static> { match self { - DiceTransactionImpl::Legacy(ctx) => ctx, DiceTransactionImpl::Modern(ctx) => ctx.as_computations(), } } - pub(crate) fn as_computations_mut(&mut self) -> &mut DiceComputations { + pub(crate) fn as_computations_mut(&mut self) -> &mut DiceComputations<'static> { match self { - DiceTransactionImpl::Legacy(ctx) => ctx, DiceTransactionImpl::Modern(ctx) => ctx.as_computations_mut(), } } diff --git a/dice/dice/src/transaction_update.rs b/dice/dice/src/transaction_update.rs index 54b94a53ca503..4925adeb36eaf 100644 --- a/dice/dice/src/transaction_update.rs +++ b/dice/dice/src/transaction_update.rs @@ -8,20 +8,14 @@ */ use std::future::Future; -use std::sync::Arc; -use std::thread; use allocative::Allocative; -use dupe::Dupe; use futures::FutureExt; -use crate::api::computations::DiceComputations; use crate::api::error::DiceResult; use crate::api::key::Key; use crate::api::user_data::UserComputationData; -use crate::ctx::DiceComputationsImpl; use crate::impls::transaction::TransactionUpdater; -use crate::legacy::ctx::DiceComputationsImplLegacy; use crate::transaction::DiceTransactionImpl; use crate::DiceTransaction; @@ -29,24 +23,15 @@ use crate::DiceTransaction; /// to DICE, which returns the Transaction where we spawn computations. #[derive(Allocative)] pub(crate) enum DiceTransactionUpdaterImpl { - Legacy(Arc), - #[allow(unused)] Modern(TransactionUpdater), } impl DiceTransactionUpdaterImpl { pub(crate) fn existing_state(&self) -> impl Future + '_ { match self { - DiceTransactionUpdaterImpl::Legacy(ctx) => { - futures::future::ready(DiceTransaction(DiceTransactionImpl::Legacy( - DiceComputations(DiceComputationsImpl::Legacy(ctx.dupe())), - ))) - .left_future() - } DiceTransactionUpdaterImpl::Modern(delegate) => delegate .existing_state() - .map(|d| DiceTransaction(DiceTransactionImpl::Modern(d))) - .right_future(), + .map(|d| DiceTransaction(DiceTransactionImpl::Modern(d))), } } @@ -58,7 +43,6 @@ impl DiceTransactionUpdaterImpl { I: IntoIterator + Send + Sync + 'static, { match self { - DiceTransactionUpdaterImpl::Legacy(ctx) => ctx.changed(changed), DiceTransactionUpdaterImpl::Modern(delegate) => delegate.changed(changed), } } @@ -76,7 +60,6 @@ impl DiceTransactionUpdaterImpl { I: IntoIterator + Send + Sync + 'static, { match self { - DiceTransactionUpdaterImpl::Legacy(ctx) => ctx.changed_to(changed), DiceTransactionUpdaterImpl::Modern(delegate) => delegate.changed_to(changed), } } @@ -84,16 +67,9 @@ impl DiceTransactionUpdaterImpl { /// Commit the changes registered via 'changed' and 'changed_to' to the current newest version. pub(crate) fn commit(self) -> impl Future { match self { - DiceTransactionUpdaterImpl::Legacy(ctx) => { - futures::future::ready(DiceTransaction(DiceTransactionImpl::Legacy( - DiceComputations(DiceComputationsImpl::Legacy(ctx.commit())), - ))) - .left_future() - } DiceTransactionUpdaterImpl::Modern(delegate) => delegate .commit() - .map(|x| DiceTransaction(DiceTransactionImpl::Modern(x))) - .right_future(), + .map(|x| DiceTransaction(DiceTransactionImpl::Modern(x))), } } @@ -104,32 +80,21 @@ impl DiceTransactionUpdaterImpl { extra: UserComputationData, ) -> impl Future { match self { - DiceTransactionUpdaterImpl::Legacy(ctx) => { - futures::future::ready(DiceTransaction(DiceTransactionImpl::Legacy( - DiceComputations(DiceComputationsImpl::Legacy(ctx.commit_with_data(extra))), - ))) - .left_future() - } DiceTransactionUpdaterImpl::Modern(delegate) => delegate .commit_with_data(extra) - .map(|x| DiceTransaction(DiceTransactionImpl::Modern(x))) - .right_future(), + .map(|x| DiceTransaction(DiceTransactionImpl::Modern(x))), } } /// Clears the entire DICE state. The dropping of values from memory happens asynchronously. + /// + /// Any currently running computations may receive cancellations as we may have dropped data + /// needed to make progress. + // TODO(cjhopman): Why is this named take when it doesn't return the taken data? It should be named clear. pub fn unstable_take(self) -> Self { match self { - DiceTransactionUpdaterImpl::Legacy(ctx) => { - let map = ctx.unstable_take(); - // Destructors can be slow, so we do this in a separate thread. - thread::spawn(|| drop(map)); - - DiceTransactionUpdaterImpl::Legacy(ctx) - } DiceTransactionUpdaterImpl::Modern(delegate) => { delegate.unstable_take(); - DiceTransactionUpdaterImpl::Modern(delegate) } } diff --git a/dice/dice/src/versions.rs b/dice/dice/src/versions.rs index 2060b6627acb8..3a83691eba306 100644 --- a/dice/dice/src/versions.rs +++ b/dice/dice/src/versions.rs @@ -16,7 +16,6 @@ use std::cmp; use std::fmt; use std::fmt::Debug; -use std::fmt::Display; use std::fmt::Formatter; use std::ops::Bound; use std::ops::RangeBounds; @@ -25,13 +24,12 @@ use std::ops::Sub; use allocative::Allocative; use derive_more::Display; use dupe::Dupe; -use sorted_vector_map::SortedVectorSet; /// The incrementing Version number associated with all the cache entries #[derive(Copy, Eq, Debug, Display, Dupe)] // split this due to formatters not agreeing #[derive(PartialEq, Hash, Clone, Ord, PartialOrd, Allocative)] -#[display(fmt = "v{}", "_0")] +#[display("v{}", _0)] pub struct VersionNumber(pub(crate) usize); impl VersionNumber { @@ -49,6 +47,10 @@ impl VersionNumber { pub(crate) fn dec(&mut self) { self.0 = self.0.checked_sub(1).expect("shouldn't underflow"); } + + pub fn value(&self) -> usize { + self.0 + } } impl Sub for VersionNumber { @@ -71,7 +73,7 @@ mod introspection { /// Represents a range of versions. This range must have a start that is inclusive, but may be /// unbounded towards the end. The end, if present, is exclusive. -#[derive(Allocative, Eq, Debug, Dupe, PartialEq, Hash, Clone)] +#[derive(Allocative, Eq, Debug, Dupe, PartialEq, Hash, Clone, Copy)] pub(crate) struct VersionRange { begin: VersionNumber, end: Option, @@ -93,7 +95,7 @@ impl Display for VersionRange { } impl VersionRange { - fn new(begin: VersionNumber, end: Option) -> Self { + pub(crate) fn new(begin: VersionNumber, end: Option) -> Self { if let Some(end) = end { assert!(begin < end); } @@ -108,6 +110,11 @@ impl VersionRange { VersionRange::new(begin, None) } + #[allow(unused)] // TODO(cjhopman): This will be used. + pub(crate) fn into_ranges(self) -> VersionRanges { + VersionRanges(vec![self]) + } + pub(crate) fn intersect(&self, other: &VersionRange) -> Option { // we exclude the end bound, because intervals [1,2) and [2,3) do not intersect fn contains_end_exclusive( @@ -160,8 +167,13 @@ impl VersionRange { } #[allow(unused)] // useful function - pub(crate) fn begin(&self) -> &VersionNumber { - &self.begin + pub(crate) fn begin(&self) -> VersionNumber { + self.begin + } + + #[allow(unused)] // useful function + pub(crate) fn end(&self) -> Option { + self.end } /// Merges this range with the given range if they overlap, otherwise return `None` @@ -179,27 +191,34 @@ impl VersionRange { if is_between_end_inclusive(self.begin, other.begin, other.end) || is_between_end_inclusive(other.begin, self.begin, self.end) { - Some(VersionRange::new( - cmp::min(self.begin, other.begin), - match (self.end, other.end) { - (None, _) => None, - (_, None) => None, - (Some(e1), Some(e2)) => Some(cmp::max(e1, e2)), - }, - )) + Some(self.merge_unchecked(other)) } else { None } } + + /// Merges this range with the given range assuming that they overlap + fn merge_unchecked(&self, other: &VersionRange) -> VersionRange { + VersionRange::new( + cmp::min(self.begin, other.begin), + match (self.end, other.end) { + (None, _) => None, + (_, None) => None, + (Some(e1), Some(e2)) => Some(cmp::max(e1, e2)), + }, + ) + } } +// TODO(cjhopman): While implementing RangeBounds gives access to a bunch of apis, they are all kinda deceptive +// because VersionRange bounds are more restricted than RangeBounds are and so using many of the apis is kinda awkward. impl RangeBounds for VersionRange { fn start_bound(&self) -> Bound<&VersionNumber> { Bound::Included(&self.begin) } fn end_bound(&self) -> Bound<&VersionNumber> { - self.end.as_ref().map_or(Bound::Unbounded, Bound::Included) + self.end.as_ref().map_or(Bound::Unbounded, Bound::Excluded) } } @@ -233,7 +252,7 @@ impl Ord for VersionRange { /// 3, and 4 would not be in the sequence of ranges, but 1, 2, 5, would be. This is essentially /// a list of numerical end-exclusive intervals. #[derive(Allocative, Eq, Debug, PartialEq, Hash, Clone, PartialOrd, Ord)] -pub(crate) struct VersionRanges(SortedVectorSet); +pub(crate) struct VersionRanges(Vec); impl Display for VersionRanges { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -249,40 +268,81 @@ impl Display for VersionRanges { } impl VersionRanges { - pub(crate) fn new() -> Self { - Self(Default::default()) + /// Returns the last range if this is non-empty. + pub(crate) fn last(&self) -> Option { + self.0.last().copied() + } + + pub(crate) fn find_value_upper_bound(&self, v: VersionNumber) -> Option { + // we generally expect queries at later versions so just look through the list from the + // end. potentially this should be changed if that expectation is no longer true. + for range in self.0.iter().rev() { + if range.begin <= v { + if range.contains(&v) { + return Some(v); + } else { + let mut end = range.end.unwrap(); + end.dec(); + assert!(end < v); + return Some(end); + } + } + } + None } - /// inserts a single range, merging different ranges if necessary - pub(crate) fn insert(&mut self, mut range: VersionRange) { - if let Some(smaller) = self - .0 - .range((Bound::Unbounded, Bound::Included(range.dupe()))) - .max() - { - if let Some(merged) = smaller.merge(&range) { - let to_remove = smaller.dupe(); - let removed = self.0.remove(&to_remove); - assert!(removed); - range = merged; - } + /// same as union_range + pub(crate) fn insert(&mut self, range: VersionRange) { + self.union_range(range) + } + + /// unions with a single range, merging different ranges if necessary + pub(crate) fn union_range(&mut self, range: VersionRange) { + let len = self.0.len(); + + // this works by finding a position to insert the new range, and then unions it with any overlapping ranges after its insertion point. + + let idx = self.0.partition_point(|r| match r.end { + Some(end) if end < range.begin => true, + _ => false, + }); + + // idx now points to the first range with end >= range.begin, which is the position where we will "insert" the new range. + // there's three cases: + // 1. idx = len, we just append range at the end: there's nothing to merge + // 2. self.0[idx].begin > range.end: there's no overlapping ranges, just insert range at idx + // 3. self.0[idx] and range have overlap: merge them and then need to scan for and merge any additional overlap after idx + + // handle case 1 + if idx == len { + self.0.push(range); + return; } - if let Some(larger) = self - .0 - .range((Bound::Included(range.dupe()), Bound::Unbounded)) - .min() - { - if let Some(merged) = larger.merge(&range) { - let to_remove = larger.dupe(); - let removed = self.0.remove(&to_remove); - assert!(removed); - range = merged; + let mut merged = match self.0[idx].merge(&range) { + Some(merged) => merged, + None => { + // no overlap, handle case 2 + self.0.insert(idx, range); + return; } + }; + + let first_non_overlap = match merged.end { + None => len, + Some(end) => (idx + 1) + self.0[(idx + 1)..].partition_point(|r| r.begin <= end), + }; + + let last_overlap = first_non_overlap - 1; + if last_overlap > idx { + // the inserted range overlaps multiple entries, we need to use the largest end value of all the overlapped + // ranges (which is either the end of the last one or the end of the one we're inserting). + merged = merged.merge_unchecked(&self.0[last_overlap]); } - let inserted = self.0.insert(range); - assert!(inserted); + self.0[idx] = merged; + // Vec::drain is the most efficient way to remove a range. + self.0.drain((idx + 1)..first_non_overlap); } /// Computes the union of this set of ranges and another @@ -291,7 +351,7 @@ impl VersionRanges { let mut this = self.0.iter().peekable(); let mut other = other.0.iter().peekable(); - let mut out = SortedVectorSet::new(); + let mut out = Vec::new(); let mut pending: Option = None; loop { let smaller = match (this.peek(), other.peek()) { @@ -313,7 +373,7 @@ impl VersionRanges { if let Some(merged) = last.merge(smaller) { merged } else { - out.insert(last); + out.push(last); smaller.dupe() } }, @@ -321,18 +381,24 @@ impl VersionRanges { } if let Some(last) = pending { - out.insert(last); + out.push(last); } VersionRanges(out) } + #[allow(unused)] // TODO(cjhopman): This will be used. + pub(crate) fn union_in_place(&mut self, other: &VersionRanges) { + // TODO(cjhopman): implement this efficiently. + *self = self.union(other); + } + /// Computes the intersection of this set of ranges and another pub(crate) fn intersect(&self, other: &VersionRanges) -> VersionRanges { let mut this = self.0.iter().peekable(); let mut other = other.0.iter().peekable(); - let mut out = SortedVectorSet::new(); + let mut out = Vec::new(); // Pending is the last range we saw that has the largest end point, which is not the // standard sorting of intervals. // We want the largest end point interval to handle cases where there is one large interval @@ -358,7 +424,7 @@ impl VersionRanges { // we know that within an VersionRange, there are no overlaps, so as soon as we // have an intersection, it can be pushed to the result and no other ranges // will overlap with the intersection - out.insert(intersect); + out.push(intersect); // get the largest ending range pending = Some(cmp::max_by(r, smaller.dupe(), |r1, r2| { @@ -382,50 +448,163 @@ impl VersionRanges { VersionRanges(out) } - /// Computes the intersection of this set of ranges and a range. - #[allow(unused)] // useful function - pub(crate) fn intersect_range(&self, range: &VersionRange) -> VersionRanges { - let mut ranges = VersionRanges::new(); - ranges.insert(range.dupe()); - self.intersect(&ranges) + #[allow(unused)] // TODO(cjhopman): This will be used. + pub(crate) fn intersect_in_place(&mut self, other: &VersionRanges) { + if self != other { + *self = self.intersect(other) + } } - pub(crate) fn ranges(&self) -> &SortedVectorSet { - &self.0 + /// Computes the intersection of this set of ranges and a range. + #[allow(unused)] // TODO(cjhopman): This will be used. + pub(crate) fn intersect_range(&mut self, range: VersionRange) -> bool { + if self.is_empty() { + return false; + } + + let self_begin = self.0.first().unwrap().begin; + let self_end = self.0.last().unwrap().end; + + if range.begin <= self_begin { + match (self_end, range.end) { + (Some(_), None) => { + return false; + } + (Some(self_end), Some(range_end)) if self_end <= range_end => { + return false; + } + _ => {} + } + } + + if let Some(end) = range.end { + for j in (0..self.0.len()).rev() { + let v = &mut self.0[j]; + if v.begin < end { + match v.end { + Some(this_end) if this_end < end => {} + _ => v.end = Some(end), + } + break; + } else { + self.0.pop(); + } + } + } + + let begin = range.begin; + let mut i = 0; + while i < self.0.len() { + let v = &mut self.0[i]; + + match v.end { + Some(e) if e <= begin => { + i += 1; + continue; + } + _ => {} + }; + + if v.begin < begin { + v.begin = begin; + } + break; + } + if i < self.0.len() { + self.0.drain(0..i); + } else { + self.clear() + } + + true } pub(crate) fn is_empty(&self) -> bool { // Ranges in the set are not empty, so self is not empty if the ranges set is not empty. - self.ranges().is_empty() + self.0.is_empty() } -} -#[cfg(test)] -pub(crate) mod testing { - use sorted_vector_map::SortedVectorSet; + pub(crate) fn contains(&self, version: VersionNumber) -> bool { + self.find_value_upper_bound(version) == Some(version) + } - use crate::versions::VersionRange; - use crate::versions::VersionRanges; + pub(crate) fn to_introspectable( + &self, + ) -> Vec<( + crate::introspection::graph::VersionNumber, + Option, + )> { + self.0 + .iter() + .map(|v| { + ( + v.begin().to_introspectable(), + v.end().map(|v| v.to_introspectable()), + ) + }) + .collect() + } - pub(crate) trait VersionRangesExt { - fn testing_new(ranges: SortedVectorSet) -> Self; + pub(crate) fn clear(&mut self) { + self.0.clear() } +} - impl VersionRangesExt for VersionRanges { - fn testing_new(ranges: SortedVectorSet) -> Self { - Self(ranges) - } +#[cfg(test)] +impl VersionRanges { + pub(crate) fn new() -> Self { + Self(Default::default()) + } + + pub(crate) fn testing_new(ranges: Vec) -> Self { + Self(ranges) } } #[cfg(test)] mod tests { - use sorted_vector_map::sorted_vector_set; + use std::ops::RangeBounds; use crate::versions::VersionNumber; use crate::versions::VersionRange; use crate::versions::VersionRanges; + #[track_caller] + fn into_range(range: (i32, i32)) -> VersionRange { + let (b, e) = range; + match e { + -1 => VersionRange::begins_with(VersionNumber::new(b.try_into().unwrap())), + e => VersionRange::bounded( + VersionNumber::new(b.try_into().unwrap()), + VersionNumber::new(e.try_into().unwrap()), + ), + } + } + + #[track_caller] + fn into_ranges(ranges: [(i32, i32); N]) -> VersionRanges { + VersionRanges(ranges.iter().copied().map(into_range).collect()) + } + + #[test] + fn version_range_contains() { + let r1 = VersionRange::bounded(VersionNumber::new(3), VersionNumber::new(6)); + assert_eq!(r1.contains(&VersionNumber::new(1)), false); + assert_eq!(r1.contains(&VersionNumber::new(2)), false); + assert_eq!(r1.contains(&VersionNumber::new(3)), true); + assert_eq!(r1.contains(&VersionNumber::new(4)), true); + assert_eq!(r1.contains(&VersionNumber::new(5)), true); + assert_eq!(r1.contains(&VersionNumber::new(6)), false); + assert_eq!(r1.contains(&VersionNumber::new(7)), false); + assert_eq!(r1.contains(&VersionNumber::new(8)), false); + + let r1 = VersionRange::begins_with(VersionNumber::new(3)); + assert_eq!(r1.contains(&VersionNumber::new(2)), false); + assert_eq!(r1.contains(&VersionNumber::new(3)), true); + assert_eq!(r1.contains(&VersionNumber::new(4)), true); + assert_eq!(r1.contains(&VersionNumber::new(5000)), true); + } + #[test] fn version_range_intersects() { let r1 = VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(4)); @@ -614,138 +793,222 @@ mod tests { #[test] fn version_ranges_union() { - let r1 = VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(1), VersionNumber::new(3)), - VersionRange::bounded(VersionNumber::new(6), VersionNumber::new(8)), - VersionRange::bounded(VersionNumber::new(10), VersionNumber::new(11)) - ]); - - let r2 = VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(1)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(5)), - VersionRange::bounded(VersionNumber::new(8), VersionNumber::new(10)), - VersionRange::bounded(VersionNumber::new(11), VersionNumber::new(12)), - VersionRange::begins_with(VersionNumber::new(13)), - ]); + let r1 = into_ranges([(1, 3), (6, 8), (10, 11)]); + let r2 = into_ranges([(0, 1), (4, 5), (8, 10), (11, 12), (13, -1)]); assert_eq!( r1.union(&r2), - VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(3)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(5)), - VersionRange::bounded(VersionNumber::new(6), VersionNumber::new(12)), - VersionRange::begins_with(VersionNumber::new(13)), - ]) + into_ranges([(0, 3), (4, 5), (6, 12), (13, -1)]) ); } #[test] fn version_ranges_intersect() { - let r1 = VersionRanges(sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(3) - ),]); - - let r2 = VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(5)), - VersionRange::bounded(VersionNumber::new(11), VersionNumber::new(12)), - VersionRange::begins_with(VersionNumber::new(13)), - ]); + let r1 = into_ranges([(1, 3)]); + let r2 = into_ranges([(4, 5), (11, 12), (13, -1)]); assert_eq!(r1.intersect(&r2), VersionRanges::new()); - let r1 = VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(1), VersionNumber::new(3)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(5)), - VersionRange::bounded(VersionNumber::new(6), VersionNumber::new(9)), - VersionRange::bounded(VersionNumber::new(10), VersionNumber::new(14)), - VersionRange::begins_with(VersionNumber::new(15)), - ]); - - let r2 = VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(1)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(5)), - VersionRange::bounded(VersionNumber::new(8), VersionNumber::new(10)), - VersionRange::bounded(VersionNumber::new(11), VersionNumber::new(12)), - VersionRange::begins_with(VersionNumber::new(13)), - ]); + let r1 = into_ranges([(1, 3), (4, 5), (6, 9), (10, 14), (15, -1)]); + let r2 = into_ranges([(0, 1), (4, 5), (8, 10), (11, 12), (13, -1)]); assert_eq!( r1.intersect(&r2), - VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(5)), - VersionRange::bounded(VersionNumber::new(8), VersionNumber::new(9)), - VersionRange::bounded(VersionNumber::new(11), VersionNumber::new(12)), - VersionRange::bounded(VersionNumber::new(13), VersionNumber::new(14)), - VersionRange::begins_with(VersionNumber::new(15)), - ]) + into_ranges([(4, 5), (8, 9), (11, 12), (13, 14), (15, -1)]) ); } #[test] fn version_ranges_intersects_range() { - let r1 = VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber(10), VersionNumber(20)), - VersionRange::bounded(VersionNumber(30), VersionNumber(40)), - ]); - let r2 = VersionRange::bounded(VersionNumber(15), VersionNumber(35)); - let expected = VersionRanges(sorted_vector_set![ - VersionRange::bounded(VersionNumber(15), VersionNumber(20)), - VersionRange::bounded(VersionNumber(30), VersionNumber(35)), - ]); - assert_eq!(expected, r1.intersect_range(&r2)); + #[track_caller] + fn assert_intersect_range( + initial: [(i32, i32); N], + intersect_with: (i32, i32), + expected: [(i32, i32); M], + ) { + let initial = into_ranges(initial); + let mut as_ranges = initial.clone(); + let intersect_with = into_range(intersect_with); + as_ranges.intersect_range(intersect_with); + let expected_ranges = into_ranges(expected); + + assert_eq!( + as_ranges, expected_ranges, + "in assert_intersect_range(\n {:?},\n {:?},\n {:?}\n)", + initial, intersect_with, expected + ) + } + + assert_intersect_range([], (0, -1), []); + + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (35, -1), + [(35, 40), (50, 60)], + ); + + // check cases for begin (before all ranges, between ranges, at beginning, within, at end, after all) + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (5, 55), + [(10, 20), (30, 40), (50, 55)], + ); + + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (25, 55), + [(30, 40), (50, 55)], + ); + + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (30, 55), + [(30, 40), (50, 55)], + ); + + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (35, 55), + [(35, 40), (50, 55)], + ); + + assert_intersect_range([(10, 20), (30, 40), (50, 60)], (40, 55), [(50, 55)]); + + assert_intersect_range([(10, 20), (30, 40), (50, 60)], (65, 75), []); + + // And check similar cases for end + assert_intersect_range([(10, 20), (30, 40), (50, 60)], (0, 5), []); + + assert_intersect_range([(10, 20), (30, 40), (50, 60)], (35, 45), [(35, 40)]); + + assert_intersect_range([(10, 20), (30, 40), (50, 60)], (35, 50), [(35, 40)]); + + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (35, 55), + [(35, 40), (50, 55)], + ); + + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (35, 60), + [(35, 40), (50, 60)], + ); + + assert_intersect_range( + [(10, 20), (30, 40), (50, 60)], + (35, 65), + [(35, 40), (50, 60)], + ); } #[test] fn version_ranges_insert() { - let mut r = VersionRanges::new(); + #[track_caller] + fn test_insert( + initial: [(i32, i32); N], + range: (i32, i32), + expected: [(i32, i32); M], + ) { + let initial = into_ranges(initial); + let mut r = initial.clone(); + let range = into_range(range); + r.insert(range); + let expected = into_ranges(expected); + assert!( + r == expected, + "test_insert assertion failed\n initial: {}\n range: {}\n expected: {}\n actual: {}", + initial, + range, + expected, + r + ); + } - r.insert(VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(3), - )); - assert_eq!( - r.ranges(), - &sorted_vector_set![VersionRange::bounded( - VersionNumber::new(1), - VersionNumber::new(3) - )] + test_insert([], (1, 3), [(1, 3)]); + + test_insert([(1, 3)], (4, 6), [(1, 3), (4, 6)]); + + // Before: |...) |...) + // Insert: |...) + test_insert([(1, 3), (4, 6)], (5, 7), [(1, 3), (4, 7)]); + + // Before: |...) |...) + // Insert: |...) + test_insert([(1, 3), (4, 7)], (0, 1), [(0, 3), (4, 7)]); + + // Before: |...) |...) |...) + // Insert: |..) + test_insert( + [(20, 25), (30, 35), (40, 45)], + (22, 27), + [(20, 27), (30, 35), (40, 45)], ); - r.insert(VersionRange::bounded( - VersionNumber::new(4), - VersionNumber::new(6), - )); - assert_eq!( - r.ranges(), - &sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(1), VersionNumber::new(3)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(6)) - ] + // Before: |...) |...) |...) + // Insert: |....) + test_insert( + [(20, 25), (30, 35), (40, 45)], + (22, 30), + [(20, 35), (40, 45)], ); - r.insert(VersionRange::bounded( - VersionNumber::new(5), - VersionNumber::new(7), - )); - assert_eq!( - r.ranges(), - &sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(1), VersionNumber::new(3)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(7)) - ] + // Before: |...) |...) |...) + // Insert: |...) + test_insert( + [(20, 25), (30, 35), (40, 45)], + (25, 30), + [(20, 35), (40, 45)], ); - r.insert(VersionRange::bounded( - VersionNumber::new(0), - VersionNumber::new(1), - )); - assert_eq!( - r.ranges(), - &sorted_vector_set![ - VersionRange::bounded(VersionNumber::new(0), VersionNumber::new(3)), - VersionRange::bounded(VersionNumber::new(4), VersionNumber::new(7)) - ] + // Before: |...) |...) |...) + // Insert: |......) + test_insert( + [(20, 25), (30, 35), (40, 45)], + (22, 33), + [(20, 35), (40, 45)], + ); + + // Before: |...) |...) |...) + // Insert: |...........) + test_insert([(20, 25), (30, 35), (40, 45)], (25, 40), [(20, 45)]); + + // Before: |...) |...) |...) + // Insert: |..........) + test_insert( + [(20, 25), (30, 35), (40, 45)], + (22, 37), + [(20, 37), (40, 45)], + ); + + // Before: |...) |...) |...) + // Insert: |...................) + test_insert([(20, 25), (30, 35), (40, 45)], (22, 47), [(20, 47)]); + + // Before: |...) |...) |...) + // Insert: |...................inf + test_insert([(20, 25), (30, 35), (40, 45)], (22, -1), [(20, -1)]); + + // Before: |...) |...) |...) |...inf + // Insert: |.....................) + test_insert( + [(20, 25), (30, 35), (40, 45), (50, -1)], + (22, 50), + [(20, -1)], + ); + + test_insert( + [ + (20, 25), + (30, 35), + (40, 45), + (50, 55), + (60, 65), + (70, 75), + (80, 85), + ], + (22, 42), + [(20, 45), (50, 55), (60, 65), (70, 75), (80, 85)], ); } } diff --git a/dice/dice_examples/BUCK b/dice/dice_examples/BUCK index 149414d2fd65e..02afe36bcd9d7 100644 --- a/dice/dice_examples/BUCK +++ b/dice/dice_examples/BUCK @@ -1,5 +1,5 @@ +load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -13,12 +13,27 @@ rust_library( "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:higher-order-closure", "fbsource//third-party/rust:ref-cast", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", - "//buck2/shed/more_futures:more_futures", + ], +) + +rust_binary( + name = "many_cancel", + srcs = ["bin/many_cancel.rs"], + deps = [ + "fbsource//third-party/rust:async-trait", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:tokio", + "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_futures:buck2_futures", + "//buck2/dice/dice:dice", + "//buck2/gazebo/dupe:dupe", ], ) diff --git a/dice/dice_examples/Cargo.toml b/dice/dice_examples/Cargo.toml index 7aa58ebbb78e5..70848c63244a6 100644 --- a/dice/dice_examples/Cargo.toml +++ b/dice/dice_examples/Cargo.toml @@ -1,31 +1,26 @@ [package] +edition = "2021" +license = { workspace = true } name = "dice_examples" +repository = { workspace = true } version = "0.0.0" -edition = "2021" [dependencies] +allocative = { workspace = true } anyhow = "1.0.65" async-trait = "0.1.24" -derive_more = "0.99.3" -futures = "0.3" -higher-order-closure = { workspace = true } -ref-cast = "1.0.0" +buck2_futures = { path = "../../app/buck2_futures" } +derive_more = { version = "1.0.0", features = ["full"] } dupe = { workspace = true } +futures = "0.3" gazebo = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -more_futures = { path = "../../shed/more_futures" } -allocative = { workspace = true } +ref-cast = "1.0.0" dice = { path = "../dice" } [dev-dependencies] +anyhow = "1.0.65" cast = "0.2.3" futures = "0.3" -anyhow = "1.0.65" paste = "1.0" # ! -tokio = { version = "1.5", features = ["full"]} - -[features] -# @oss-disable: default = ["gazebo_lint"] +tokio = { version = "1.5", features = ["full"] } diff --git a/dice/dice_examples/bin/many_cancel.rs b/dice/dice_examples/bin/many_cancel.rs new file mode 100644 index 0000000000000..0be6fba4bacc4 --- /dev/null +++ b/dice/dice_examples/bin/many_cancel.rs @@ -0,0 +1,334 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Creates a computation that consists of a top-chain, a bottom-chain, and a dense middle. +//! +//! The dependency structure is something like: +//! T0 -> T1 -> T2 -> ... -> TN -> DN <...> D0 +//! | | | | | +//! v v v v | +//! B0 <- B1 <- B2 <- ... <- BN <------------/ +//! | +//! v +//! Leaf +//! +//! We use this computation to explore the costs of computing and especially recomputing +//! different graph structures and to understand how dice is cancelling nodes during +//! recomputation. +//! +//! The process is that first we compute T0 at v0. +//! +//! We will then invalidate Leaf and recompute T0, but with additional side-channel enforced ordering. +//! +//! Specifically, we add waits in the computes so that: +//! 1. for all K > 0, the compute call for BK+1 pauses until the compute call for TK has started +//! 2. for all K, the compute call for DK pauses until the compute call for TN has started (the last node in the top chain) +//! +//! This causes dice to go through a specific controlled flow: +//! +//! 1. T0 is requested and is dirtied, so request recompute of all deps of T0 +//! 2. this potentially starts recompute of the entire graph, but only B0 can actually proceed +//! 3. B0 is recomputed and has changed, still nothing can progress +//! 4. A dep of T0 has changed and so it must be actually recomputed +//! 5. Potentially dice cancels the other in-progess computations of T0 deps (and so recursively the whole graph) +//! 6. start recomputing T0, this requests T1 +//! 7. T1 is requested and is dirtied, so request recompute of all deps of T1 +//! 8. basically repeat (1) - (6) for T1 +//! 9. repeat (7) - (8) for T2, ..., TN +//! 10. finally, once TN has started recomputing it will allow all the dense nodes to recompute once it requests them. +//! +//! So simplified it kinda looks like in sequence: +//! +//! check_deps(T0), compute(B0), compute(T0), check_deps(T1), compute(B1), ..., compute(D0..DN) +//! +//! If at (5) dice actually cancels the in-progress computations, you can actually end up with many times more +//! cancellations than there are nodes in the graph. +use std::time::Duration; +use std::time::Instant; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use clap::Parser; +use derive_more::Display; +use dice::DetectCycles; +use dice::Dice; +use dice::DiceComputations; +use dice::DiceData; +use dice::GlobalStats; +use dice::InjectedKey; +use dice::Key; +use dice::UserComputationData; +use dupe::Dupe; +use futures::FutureExt; +use tokio::sync::Semaphore; + +struct Latches { + /// One latch for each Top/Bottom pair. Bottom(K + 1) will wait for latch K to be released (by Top(K)). + chain_latches: Vec, + /// One latch for all dense nodes, this will be released by the last Top node in the chain. + dense_latch: Semaphore, +} + +impl Latches { + fn new(max_chain: u32) -> Self { + Self { + chain_latches: (0..(max_chain + 1)) // chain goes to max_chain + .map(|_| Semaphore::new(0)) + .collect(), + dense_latch: Semaphore::new(0), + } + } + + async fn chain_latch_wait(&self, i: usize) { + if let Some(v) = self.chain_latches.get(i) { + v.acquire().await.unwrap().forget() + } + } + + fn chain_latch_release(&self, i: usize) { + self.chain_latches + .get(i) + .unwrap() + .add_permits(Semaphore::MAX_PERMITS / 10); + } + + async fn dense_latch_wait(&self) { + self.dense_latch.acquire().await.unwrap().forget(); + } + + fn dense_latch_release(&self) { + self.dense_latch + .add_permits(Semaphore::MAX_PERMITS - self.dense_latch.available_permits()); + } + + /// Releases all latches, we use this for the first computation. + fn release_all(&self) { + self.dense_latch_release(); + for i in 0..self.chain_latches.len() { + self.chain_latch_release(i) + } + } +} + +#[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] +#[display("TopKey({})", _0)] +pub struct TopKey(u32); + +#[async_trait] +impl Key for TopKey { + type Value = u32; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> u32 { + let config = ctx.compute(&ConfigKey).await.unwrap(); + let latches = ctx.per_transaction_data().data.get::().unwrap(); + if self.0 == config.chain_count { + latches.dense_latch_release(); + ctx.compute2( + |ctx| async move { drop(ctx.compute(&DenseKey(config.dense_count)).await) }.boxed(), + |ctx| async move { drop(ctx.compute(&BottomKey(self.0)).await) }.boxed(), + ) + .await; + } else { + latches.chain_latch_release(self.0 as usize + 1); + ctx.compute2( + |ctx| { + async move { + // This allows time to drop the graph of things below this before we re-request them. + std::thread::sleep(Duration::from_millis(config.wait_millis)); + drop(ctx.compute(&TopKey(self.0 + 1)).await) + } + .boxed() + }, + |ctx| async move { drop(ctx.compute(&BottomKey(self.0)).await) }.boxed(), + ) + .await; + } + self.0 + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +#[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] +#[display("BottomKey({})", _0)] +pub struct BottomKey(u32); + +#[async_trait] +impl Key for BottomKey { + type Value = u32; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> u32 { + let config = ctx.compute(&ConfigKey).await.unwrap(); + let latches = ctx.per_transaction_data().data.get::().unwrap(); + latches.chain_latch_wait(self.0 as usize).await; + // this gives time for the graph of deps to be all requested before returning a changed value and cancelling them + std::thread::sleep(Duration::from_millis(config.wait_millis)); + if self.0 == 0 { + drop(ctx.compute(&Leaf).await); + } else { + drop(ctx.compute(&BottomKey(self.0 - 1)).await); + } + self.0 + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +#[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] +#[display("DenseKey({})", _0)] +pub struct DenseKey(u32); + +#[async_trait] +impl Key for DenseKey { + type Value = u32; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> u32 { + let config = ctx.compute(&ConfigKey).await.unwrap(); + let latches = ctx.per_transaction_data().data.get::().unwrap(); + latches.dense_latch_wait().await; + std::thread::sleep(Duration::from_millis(config.wait_millis)); + if self.0 == 0 { + drop(ctx.compute(&BottomKey(config.chain_count)).await) + } else { + drop( + ctx.compute_join(0..self.0, |ctx, v| { + async move { drop(ctx.compute(&DenseKey(v)).await) }.boxed() + }) + .await, + ); + } + self.0 + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +#[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] +#[display("Leaf")] +pub struct Leaf; + +impl InjectedKey for Leaf { + type Value = u32; + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +#[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] +#[display("ConfigKey")] +pub struct ConfigKey; + +impl InjectedKey for ConfigKey { + type Value = Config; + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +#[derive(Parser, Debug, Clone, Copy, Dupe, Allocative)] +pub struct Config { + #[arg(long, default_value_t = 100)] + chain_count: u32, + #[arg(long, default_value_t = 1000)] + dense_count: u32, + #[arg(long, default_value_t = 5)] + wait_millis: u64, + #[arg(long, default_value_t = false)] + modern: bool, + #[arg(long, default_value_t = false)] + detect_cycles: bool, +} + +#[tokio::main] +async fn main() { + let config = Config::parse(); + + eprintln!("Using config {:?}", &config); + + let builder = if config.modern { + Dice::modern() + } else { + Dice::builder() + }; + let dice = if config.detect_cycles { + builder.build(DetectCycles::Enabled) + } else { + builder.build(DetectCycles::Disabled) + }; + + let start = Instant::now(); + + { + let mut ctx = dice.updater(); + ctx.changed_to(vec![(ConfigKey, config)]).unwrap(); + ctx.commit().await; + } + + eprintln!("starting first computation"); + + { + let mut dice_data = DiceData::new(); + let latches = Latches::new(config.chain_count); + latches.release_all(); + dice_data.set(latches); + let mut ctx = dice.updater_with_data(UserComputationData { + data: dice_data, + ..Default::default() + }); + + ctx.changed_to(vec![(Leaf, 0)]).unwrap(); + + let mut ctx = ctx.commit().await; + drop(ctx.compute(&TopKey(0)).await); + } + + let first_done = Instant::now(); + + eprintln!("first computation took {}s", start.elapsed().as_secs_f32()); + + let latches = Latches::new(config.chain_count); + latches.chain_latch_release(0); + + { + let mut dice_data = DiceData::new(); + dice_data.set(latches); + let mut ctx = dice.updater_with_data(UserComputationData { + data: dice_data, + ..Default::default() + }); + ctx.changed_to(vec![(Leaf, 1)]).unwrap(); + let mut ctx = ctx.commit().await; + drop(ctx.compute(&TopKey(0)).await); + } + + eprintln!("recompute took {}s", first_done.elapsed().as_secs_f32()); + + eprintln!("cancellation count {}", GlobalStats::get().cancellations); +} diff --git a/dice/dice_examples/src/lib.rs b/dice/dice_examples/src/lib.rs index 9b152cfb497fa..65e40dd3956c7 100644 --- a/dice/dice_examples/src/lib.rs +++ b/dice/dice_examples/src/lib.rs @@ -7,8 +7,5 @@ * of this source tree. */ -#[macro_use] -extern crate higher_order_closure; - pub mod math_computation; pub mod supply_chain; diff --git a/dice/dice_examples/src/math_computation.rs b/dice/dice_examples/src/math_computation.rs index 9d21e95006231..9434f8be94212 100644 --- a/dice/dice_examples/src/math_computation.rs +++ b/dice/dice_examples/src/math_computation.rs @@ -18,9 +18,9 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DiceComputations; -use dice::DiceComputationsParallel; use dice::DiceTransactionUpdater; use dice::InjectedKey; use dice::Key; @@ -29,10 +29,9 @@ use futures::future; use futures::future::BoxFuture; use futures::FutureExt; use gazebo::prelude::*; -use more_futures::cancellation::CancellationContext; #[derive(Clone, Dupe, PartialEq, Eq, Hash, Display, Debug, Allocative)] -#[display(fmt = "Var({})", _0)] +#[display("Var({})", _0)] pub struct Var(pub Arc); #[derive(Clone, PartialEq, Eq, Hash, Debug, Allocative)] @@ -126,7 +125,7 @@ impl MathEquations for DiceTransactionUpdater { } #[derive(Clone, Display, Debug, Dupe, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "Eval({})", _0)] +#[display("Eval({})", _0)] pub struct EvalVar(pub Var); #[async_trait] impl Key for EvalVar { @@ -153,7 +152,7 @@ impl Key for EvalVar { } #[async_trait] -impl Math for DiceComputations { +impl Math for DiceComputations<'_> { async fn eval(&mut self, var: Var) -> Result> { Ok(self .compute(&EvalVar(var)) @@ -162,20 +161,20 @@ impl Math for DiceComputations { } } -async fn resolve_units( - ctx: &DiceComputations, +async fn resolve_units<'a>( + ctx: &mut DiceComputations<'a>, units: &[Unit], ) -> Result, Arc> { - let futs = ctx.compute_many(units.iter().map(|unit| - higher_order_closure! { - for<'x> move |ctx: &'x mut DiceComputationsParallel<'_>| -> BoxFuture<'x, Result>> { + let futs = ctx.compute_many(units.iter().map(|unit| { + DiceComputations::declare_closure( + move |ctx: &mut DiceComputations| -> BoxFuture>> { match unit { Unit::Var(var) => ctx.eval(var.clone()).boxed(), Unit::Literal(lit) => futures::future::ready(Ok(*lit)).boxed(), } - } - } - )); + }, + ) + })); future::join_all(futs) .await @@ -183,12 +182,12 @@ async fn resolve_units( .collect::>() } -async fn lookup_unit(ctx: &DiceComputations, var: &Var) -> anyhow::Result> { +async fn lookup_unit(ctx: &mut DiceComputations<'_>, var: &Var) -> anyhow::Result> { Ok(ctx.compute(&LookupVar(var.clone())).await?) } #[derive(Clone, Dupe, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "Lookup({})", _0)] +#[display("Lookup({})", _0)] struct LookupVar(Var); impl InjectedKey for LookupVar { type Value = Arc; diff --git a/dice/dice_examples/src/supply_chain.rs b/dice/dice_examples/src/supply_chain.rs index c7cc8a30cac7d..b48fe26345d4f 100644 --- a/dice/dice_examples/src/supply_chain.rs +++ b/dice/dice_examples/src/supply_chain.rs @@ -20,14 +20,13 @@ //! The cost of a resource is the sum of each item cost + a company specific flat fee. use std::collections::HashMap; -use std::future::Future; use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DiceComputations; -use dice::DiceComputationsParallel; use dice::DiceResult; use dice::DiceTransactionUpdater; use dice::InjectedKey; @@ -39,7 +38,6 @@ use futures::stream::FuturesUnordered; use futures::FutureExt; use futures::StreamExt; use gazebo::prelude::*; -use more_futures::cancellation::CancellationContext; use ref_cast::RefCast; #[derive(Display, Debug, Hash, Eq, Clone, PartialEq, Dupe, Allocative)] @@ -162,16 +160,15 @@ impl Setup for DiceTransactionUpdater { .unzip(); // get the remote resources => company mapping - let state = self.existing_state().await; - let remote_resources = join_all(state - .compute_many(resources.iter().map(|res| { - higher_order_closure! { - for <'x> move |ctx: &'x mut DiceComputationsParallel<'_>| -> BoxFuture<'x, DiceResult>>> { - ctx.compute(res).boxed() - } - } - }))) - .await; + let mut state = self.existing_state().await; + let remote_resources = join_all(state.compute_many(resources.iter().map(|res| { + DiceComputations::declare_closure( + |ctx: &mut DiceComputations<'_>| -> BoxFuture>>> { + ctx.compute(res).boxed() + }, + ) + }))) + .await; // combine remote company list with local company list for reach resource let joined: Vec<_> = resources @@ -211,13 +208,13 @@ pub trait CostUpdater { ) -> anyhow::Result<()>; } -fn lookup_company_resource_cost<'a>( - ctx: &'a DiceComputations, +async fn lookup_company_resource_cost( + ctx: &mut DiceComputations<'_>, company: &LookupCompany, resource: &Resource, -) -> impl Future, Arc>> + 'a { +) -> Result, Arc> { #[derive(Display, Debug, Hash, Eq, Clone, Dupe, PartialEq, Allocative)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct LookupCompanyResourceCost(LookupCompany, Resource); #[async_trait] impl Key for LookupCompanyResourceCost { @@ -242,13 +239,11 @@ fn lookup_company_resource_cost<'a>( // get the unit cost for each resource needed to make item let mut futs : FuturesUnordered<_> = ctx.compute_many(recipe.ingredients.iter().map(|(required, resource)| { - higher_order_closure! { - for <'x> move |ctx: &'x mut DiceComputationsParallel<'_>| -> BoxFuture<'x, Result, Arc>> { + DiceComputations::declare_closure(|ctx: &mut DiceComputations<'_>| -> BoxFuture, Arc>> { ctx.resource_cost(resource).map(|res| { Ok::<_, Arc>(res?.map(|x| x * *required as u16)) }).boxed() - } - } + }) })).into_iter().collect(); let mut sum = 0; @@ -270,13 +265,15 @@ fn lookup_company_resource_cost<'a>( } } } + let key = LookupCompanyResourceCost(company.clone(), resource.dupe()); - ctx.compute(&LookupCompanyResourceCost(company.clone(), resource.dupe())) - .map(|r| r.map_err(|e| Arc::new(anyhow::anyhow!(e)))?) + ctx.compute(&key) + .await + .map_err(|e| Arc::new(anyhow::anyhow!(e)))? } #[async_trait] -impl Cost for DiceComputations { +impl Cost for DiceComputations<'_> { async fn resource_cost( &mut self, resource: &Resource, @@ -300,11 +297,11 @@ impl Cost for DiceComputations { let costs = join_all(ctx .compute_many(companies.iter().map(|company| { - higher_order_closure! { - for <'x> move |ctx: &'x mut DiceComputationsParallel<'_>| -> BoxFuture<'x, Result, Arc>> { + DiceComputations::declare_closure( + |ctx: &mut DiceComputations<'_>| -> BoxFuture, Arc>> { lookup_company_resource_cost(ctx, company, &self.0).boxed() } - } + ) }))) .await; diff --git a/dice/dice_examples/tests/common/main.rs b/dice/dice_examples/tests/common/main.rs index 23d4e0dd67576..65e40dd3956c7 100644 --- a/dice/dice_examples/tests/common/main.rs +++ b/dice/dice_examples/tests/common/main.rs @@ -7,9 +7,5 @@ * of this source tree. */ -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] - pub mod math_computation; pub mod supply_chain; diff --git a/dice/dice_tests/BUCK b/dice/dice_tests/BUCK index 14fb00acdaf5a..69f9e1e2b511c 100644 --- a/dice/dice_tests/BUCK +++ b/dice/dice_tests/BUCK @@ -13,12 +13,14 @@ rust_library( "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", "fbsource//third-party/rust:derive_more", + "fbsource//third-party/rust:futures", "fbsource//third-party/rust:parking_lot", "fbsource//third-party/rust:tempfile", "fbsource//third-party/rust:tokio", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", - "//buck2/shed/more_futures:more_futures", + "//buck2/gazebo/gazebo:gazebo", ], ) diff --git a/dice/dice_tests/Cargo.toml b/dice/dice_tests/Cargo.toml index a25e9690ff12c..31d72a311139d 100644 --- a/dice/dice_tests/Cargo.toml +++ b/dice/dice_tests/Cargo.toml @@ -1,20 +1,21 @@ [package] -name = "dice_tests" -version = "0.0.0" +description = "e2e tests for DICE. Live in a separate crate for faster development." edition = "2021" +name = "dice_tests" publish = false -description = "e2e tests for DICE. Live in a separate crate for faster development." +version = "0.0.0" [dev-dependencies] allocative = { workspace = true } +buck2_futures = { workspace = true } dupe = { workspace = true } -more_futures = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } derive_more = { workspace = true } +dice = { workspace = true } +futures = { workspace = true } +gazebo = { workspace = true } parking_lot = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } - -dice = { workspace = true } diff --git a/dice/dice_tests/src/general.rs b/dice/dice_tests/src/general.rs new file mode 100644 index 0000000000000..46233598bb4b4 --- /dev/null +++ b/dice/dice_tests/src/general.rs @@ -0,0 +1,183 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use derive_more::Display; +use dice::DetectCycles; +use dice::Dice; +use dice::DiceComputations; +use dice::InjectedKey; +use dice::Key; +use dupe::Dupe; + +// dice graph storage needs to not reuse deps just because the value hasn't changed +#[tokio::test] +async fn test_dice_recompute_doesnt_reuse_wrong_deps() -> anyhow::Result<()> { + #[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] + #[display("{:?}", self)] + struct Leaf(u32); + + impl InjectedKey for Leaf { + type Value = u32; + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + *x == *y + } + } + + #[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] + #[display("{:?}", self)] + struct Derived; + + #[async_trait] + impl Key for Derived { + type Value = u32; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> u32 { + let x = ctx.compute(&Leaf(0)).await.unwrap(); + ctx.compute(&Leaf(x)).await.unwrap() + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + *x == *y + } + } + + let dice = Dice::modern().build(DetectCycles::Enabled); + + let mut updater = dice.updater(); + updater.changed_to([(Leaf(0), 1), (Leaf(1), 100), (Leaf(2), 200)])?; + let mut ctx1 = updater.commit().await; + + let mut updater = dice.updater(); + updater.changed_to([(Leaf(0), 1), (Leaf(1), 300), (Leaf(2), 200)])?; + let mut ctx2 = updater.commit().await; + + let mut updater = dice.updater(); + updater.changed_to([(Leaf(0), 2), (Leaf(1), 400), (Leaf(2), 100)])?; + let mut ctx3 = updater.commit().await; + + assert_eq!(ctx1.compute(&Derived).await.unwrap(), 100); + assert_eq!(ctx3.compute(&Derived).await.unwrap(), 100); + assert_eq!(ctx2.compute(&Derived).await.unwrap(), 300); + + Ok(()) +} + +#[tokio::test] +async fn test_dice_clear_doesnt_break_ongoing_computation() -> anyhow::Result<()> { + #[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] + #[display("{:?}", self)] + struct Fib(u32); + + #[async_trait] + impl Key for Fib { + type Value = Option; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Option { + Some(match self.0 { + 0 => 1, + 1 => 1, + n => { + ctx.compute(&Fib(n - 1)).await.ok()?? + + ctx.compute(&Fib(n - 2)).await.ok()?? + } + }) + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + let dice = Dice::modern().build(DetectCycles::Enabled); + let updater = dice.updater(); + let mut ctx1 = updater.commit().await; + + ctx1.compute(&Fib(3)).await?; + + let updater = dice.updater(); + updater.unstable_take(); + + let res = ctx1.compute(&Fib(10)).await; + + assert!(res.is_err(), "Expected `Err(_)`, got `{:?}`", res); + + Ok(()) +} + +#[test] +fn test_dice_clear_doesnt_cause_inject_compute() { + // Detecting that a dice compute panicked is actually kinda tricky, in normal flow + // that is a hard error but in tests it instead just looks to dice like the node is cancelled. + // We detect it by configuring the runtime to shutdown and panic itself if any task panics, but + // that only works right now with the current_thread runtime. + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .unhandled_panic(tokio::runtime::UnhandledPanic::ShutdownRuntime) + .build() + .unwrap(); + + // Spawn the root task + rt.block_on(async { + #[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] + #[display("{:?}", self)] + struct Node; + + #[async_trait] + impl Key for Node { + type Value = u32; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> u32 { + drop(ctx.compute(&Leaf).await); + 1 + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + #[derive(Clone, Copy, Dupe, Display, Debug, Eq, PartialEq, Hash, Allocative)] + #[display("{:?}", self)] + struct Leaf; + + impl InjectedKey for Leaf { + type Value = u32; + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + let dice = Dice::modern().build(DetectCycles::Enabled); + let mut updater = dice.updater(); + drop(updater.changed_to([(Leaf, 1)])); + let mut ctx1 = updater.commit().await; + let fut = ctx1.compute(&Node); + + let updater = dice.updater(); + updater.unstable_take(); + + drop(fut.await); + }); +} diff --git a/dice/dice_tests/src/invalidation_tracking.rs b/dice/dice_tests/src/invalidation_tracking.rs new file mode 100644 index 0000000000000..846aa1dba3711 --- /dev/null +++ b/dice/dice_tests/src/invalidation_tracking.rs @@ -0,0 +1,395 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; +use std::sync::Arc; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use derive_more::Display; +use dice::DetectCycles; +use dice::Dice; +use dice::DiceComputations; +use dice::DiceKeyTrackedInvalidationPaths; +use dice::DiceTrackedInvalidationPath; +use dice::InjectedKey; +use dice::InvalidationSourcePriority; +use dice::Key; +use dupe::Dupe; +use futures::future::FutureExt; +use gazebo::prelude::*; +use gazebo::variants::VariantName; + +#[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] +struct NormalInjected(u32); + +#[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] +struct HighInjected(u32); + +#[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] +struct NormalChanged(u32); + +#[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] +struct HighChanged(u32); + +impl InjectedKey for NormalInjected { + type Value = u32; + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +impl InjectedKey for HighInjected { + type Value = u32; + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + + fn invalidation_source_priority() -> InvalidationSourcePriority { + InvalidationSourcePriority::High + } +} + +#[async_trait] +impl Key for NormalChanged { + type Value = u32; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + ctx.compute(&NormalInjected(self.0)).await.unwrap() + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } +} + +#[async_trait] +impl Key for HighChanged { + type Value = u32; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + ctx.compute(&HighInjected(self.0)).await.unwrap() + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + + fn invalidation_source_priority() -> InvalidationSourcePriority { + InvalidationSourcePriority::High + } +} + +#[derive(Allocative, Debug, Clone, Dupe)] +struct CapturedInvalidationPaths(#[allocative(skip)] Arc); + +impl CapturedInvalidationPaths { + fn new(paths: DiceKeyTrackedInvalidationPaths) -> Self { + Self(Arc::new(paths)) + } +} + +enum ExpectedInvalidation { + Clean, + #[allow(unused)] + Unknown, + Invalidated(Vec<(String, usize)>), +} + +impl ExpectedInvalidation { + fn item(k: K, v: usize) -> (String, usize) { + (format!("{}({})", K::key_type_name(), k), v) + } +} + +#[track_caller] +fn assert_invalidations( + normal: ExpectedInvalidation, + high: ExpectedInvalidation, + actual: CapturedInvalidationPaths, +) { + assert_single_invalidation(normal, &actual.0.normal_priority_path, "normal"); + assert_single_invalidation(high, &actual.0.high_priority_path, "high"); +} + +#[track_caller] +fn assert_single_invalidation( + expected: ExpectedInvalidation, + actual: &DiceTrackedInvalidationPath, + priority: &str, +) { + match (expected, actual) { + (ExpectedInvalidation::Clean, DiceTrackedInvalidationPath::Clean) => {} + (ExpectedInvalidation::Clean, _) => { + panic!( + "{} invalidation mismatch. Expected DiceTrackedInvalidationPath::Clean, got DiceTrackedInvalidationPath::{}", + priority, + actual.variant_name() + ) + } + (ExpectedInvalidation::Unknown, DiceTrackedInvalidationPath::Unknown) => {} + (ExpectedInvalidation::Unknown, _) => { + panic!( + "{} invalidation mismatch. Expected DiceTrackedInvalidationPath::Unknown, got DiceTrackedInvalidationPath::{}", + priority, + actual.variant_name() + ) + } + ( + ExpectedInvalidation::Invalidated(expected), + DiceTrackedInvalidationPath::Invalidated(actual), + ) => { + assert_eq!( + expected, + actual.get_invalidation_path().map(|v| ( + format!("{}({})", v.key.key_type_name(), v.key), + v.version.value() + )) + ) + } + (ExpectedInvalidation::Invalidated(..), _) => { + panic!( + "{} invalidation mismatch. Expected DiceTrackedInvalidationPath::Invalidated, got DiceTrackedInvalidationPath::{}", + priority, + actual.variant_name() + ) + } + } +} + +#[test] +fn test_compute_tracks_invalidations() -> anyhow::Result<()> { + // Detecting that a dice compute panicked is actually kinda tricky, in normal flow + // that is a hard error but in tests it instead just looks to dice like the node is cancelled. + // We detect it by configuring the runtime to shutdown and panic itself if any task panics, but + // that only works right now with the current_thread runtime. + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .unhandled_panic(tokio::runtime::UnhandledPanic::ShutdownRuntime) + .build() + .unwrap(); + + // Spawn the root task + rt.block_on(async { + let dice = { + let builder = Dice::modern(); + builder.build(DetectCycles::Enabled) + }; + + // We want to set up the invalidation sources such that the FooInjected are all invalidated at `2 * self.0` (i.e NormalInjected(4) + // is invalidated at 40) and the FooChanged forms are at `2 * self.0 + 1` + + for i in 0..10 { + let mut updater = dice.updater(); + updater.changed_to([(NormalInjected(i), i)])?; + updater.changed_to([(HighInjected(i), i)])?; + updater.commit().await; + + let mut updater = dice.updater(); + updater.changed([NormalChanged(i)])?; + updater.changed([HighChanged(i)])?; + updater.commit().await; + } + + #[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] + struct Top(u32); + + #[async_trait] + impl Key for Top { + type Value = CapturedInvalidationPaths; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + match self.0 { + 0 => {} + 1 => { + ctx.compute(&HighInjected(0)).await.unwrap(); + ctx.compute(&NormalInjected(1)).await.unwrap(); + } + 2 => { + ctx.compute(&HighChanged(1)).await.unwrap(); + ctx.compute(&NormalChanged(0)).await.unwrap(); + } + 3 => { + ctx.compute(&HighChanged(0)).await.unwrap(); + ctx.compute2( + |ctx| { + async move { ctx.compute(&NormalInjected(1)).await.unwrap() } + .boxed() + }, + |ctx| { + async move { ctx.compute(&NormalChanged(1)).await.unwrap() }.boxed() + }, + ) + .await; + } + n if n < 8 => { + ctx.compute(&Top(n - 1)).await.unwrap(); + } + k => { + panic!("don't request such a big key {}", k) + } + } + + CapturedInvalidationPaths::new(ctx.get_invalidation_paths()) + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + let mut ctx = dice.updater().commit().await; + + assert_invalidations( + ExpectedInvalidation::Clean, + ExpectedInvalidation::Clean, + ctx.compute(&Top(0)).await?, + ); + assert_invalidations( + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item( + NormalInjected(1), + 3, + )]), + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item(HighInjected(0), 1)]), + ctx.compute(&Top(1)).await?, + ); + assert_invalidations( + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item(HighChanged(1), 4)]), + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item(HighChanged(1), 4)]), + ctx.compute(&Top(2)).await?, + ); + assert_invalidations( + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item( + NormalChanged(1), + 4, + )]), + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item(HighChanged(0), 2)]), + ctx.compute(&Top(3)).await?, + ); + + assert_invalidations( + ExpectedInvalidation::Invalidated(vec![ + ExpectedInvalidation::item(NormalChanged(1), 4), + ExpectedInvalidation::item(Top(3), 4), + ExpectedInvalidation::item(Top(4), 4), + ExpectedInvalidation::item(Top(5), 4), + ExpectedInvalidation::item(Top(6), 4), + ]), + ExpectedInvalidation::Invalidated(vec![ + ExpectedInvalidation::item(HighChanged(0), 2), + ExpectedInvalidation::item(Top(3), 2), + ExpectedInvalidation::item(Top(4), 2), + ExpectedInvalidation::item(Top(5), 2), + ExpectedInvalidation::item(Top(6), 2), + ]), + ctx.compute(&Top(7)).await?, + ); + + Ok(()) + }) +} + +#[test] +fn test_compute_tracks_invalidations_over_versions() -> anyhow::Result<()> { + // Detecting that a dice compute panicked is actually kinda tricky, in normal flow + // that is a hard error but in tests it instead just looks to dice like the node is cancelled. + // We detect it by configuring the runtime to shutdown and panic itself if any task panics, but + // that only works right now with the current_thread runtime. + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .unhandled_panic(tokio::runtime::UnhandledPanic::ShutdownRuntime) + .build() + .unwrap(); + + // Spawn the root task + rt.block_on(async { + let dice = { + let builder = Dice::modern(); + builder.build(DetectCycles::Enabled) + }; + + #[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] + struct Top(u32); + + #[async_trait] + impl Key for Top { + type Value = CapturedInvalidationPaths; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + ctx.compute(&HighChanged(self.0)).await.unwrap(); + + CapturedInvalidationPaths::new(ctx.get_invalidation_paths()) + } + + fn equality(_x: &Self::Value, _y: &Self::Value) -> bool { + false + } + } + + let mut updater = dice.updater(); + updater.changed_to([(HighInjected(0), 0)])?; + updater.commit().await; + + let mut ctx = dice.updater().commit().await; + assert_invalidations( + ExpectedInvalidation::Invalidated(vec![ + ExpectedInvalidation::item(HighInjected(0), 1), + ExpectedInvalidation::item(HighChanged(0), 1), + ]), + ExpectedInvalidation::Invalidated(vec![ + ExpectedInvalidation::item(HighInjected(0), 1), + ExpectedInvalidation::item(HighChanged(0), 1), + ]), + ctx.compute(&Top(0)).await?, + ); + + let mut updater = dice.updater(); + updater.changed_to([(HighInjected(0), 1)])?; + updater.commit().await; + + let mut ctx = dice.updater().commit().await; + assert_invalidations( + ExpectedInvalidation::Invalidated(vec![ + ExpectedInvalidation::item(HighInjected(0), 2), + ExpectedInvalidation::item(HighChanged(0), 2), + ]), + ExpectedInvalidation::Invalidated(vec![ + ExpectedInvalidation::item(HighInjected(0), 2), + ExpectedInvalidation::item(HighChanged(0), 2), + ]), + ctx.compute(&Top(0)).await?, + ); + + let mut updater = dice.updater(); + updater.changed([HighChanged(0)])?; + updater.commit().await; + + let mut ctx = dice.updater().commit().await; + assert_invalidations( + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item(HighChanged(0), 3)]), + ExpectedInvalidation::Invalidated(vec![ExpectedInvalidation::item(HighChanged(0), 3)]), + ctx.compute(&Top(0)).await?, + ); + + Ok(()) + }) +} diff --git a/dice/dice_tests/src/legacy/mod.rs b/dice/dice_tests/src/legacy.rs similarity index 100% rename from dice/dice_tests/src/legacy/mod.rs rename to dice/dice_tests/src/legacy.rs diff --git a/dice/dice_tests/src/legacy/demo.rs b/dice/dice_tests/src/legacy/demo.rs index 9a09fb25993f2..69c2d67be22d4 100644 --- a/dice/dice_tests/src/legacy/demo.rs +++ b/dice/dice_tests/src/legacy/demo.rs @@ -18,6 +18,7 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DetectCycles; use dice::Dice; @@ -26,7 +27,6 @@ use dice::DiceTransactionUpdater; use dice::InjectedKey; use dice::Key; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use tempfile::NamedTempFile; #[derive(Debug, Clone, Dupe, PartialEq, Allocative)] @@ -36,7 +36,7 @@ enum Encoding { } #[derive(Clone, Dupe, Debug, Display, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct EncodingConfig(); impl InjectedKey for EncodingConfig { @@ -47,9 +47,9 @@ impl InjectedKey for EncodingConfig { } } -struct Encodings<'c>(&'c mut DiceComputations); +struct Encodings<'c, 'd>(&'c mut DiceComputations<'d>); -impl<'c> Encodings<'c> { +impl<'c, 'd> Encodings<'c, 'd> { async fn get(&mut self) -> Result> { self.0 .compute(&EncodingConfig()) @@ -58,12 +58,12 @@ impl<'c> Encodings<'c> { } } -trait HasEncodings { - fn encodings(&mut self) -> Encodings; +trait HasEncodings<'d> { + fn encodings<'c>(&'c mut self) -> Encodings<'c, 'd>; } -impl HasEncodings for DiceComputations { - fn encodings(&mut self) -> Encodings { +impl<'d> HasEncodings<'d> for DiceComputations<'d> { + fn encodings<'c>(&'c mut self) -> Encodings<'c, 'd> { Encodings(self) } } @@ -78,40 +78,40 @@ impl SetEncodings for DiceTransactionUpdater { } } -struct Filesystem<'c>(&'c mut DiceComputations); +struct Filesystem<'c, 'd>(&'c mut DiceComputations<'d>); #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "File({})", "_0.display()")] +#[display("File({})", _0.display())] struct File(PathBuf); -impl<'c> Filesystem<'c> { - async fn read_file(&mut self, file: &Path) -> Result, Arc> { - #[async_trait] - impl Key for File { - type Value = Result, Arc>; - async fn compute( - &self, - ctx: &mut DiceComputations, - _cancellations: &CancellationContext, - ) -> Self::Value { - let encoding = ctx.encodings().get().await?; - - let s = fs::read_to_string(&self.0).unwrap(); - - Ok(Arc::new(match encoding { - Encoding::Utf8 => s, - Encoding::Ascii => s.replace(":-)", "smile"), - })) - } - - fn equality(x: &Self::Value, y: &Self::Value) -> bool { - match (x, y) { - (Ok(x), Ok(y)) => x == y, - _ => false, - } - } +#[async_trait] +impl Key for File { + type Value = Result, Arc>; + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + let encoding = ctx.encodings().get().await?; + + let s = fs::read_to_string(&self.0).unwrap(); + + Ok(Arc::new(match encoding { + Encoding::Utf8 => s, + Encoding::Ascii => s.replace(":-)", "smile"), + })) + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + match (x, y) { + (Ok(x), Ok(y)) => x == y, + _ => false, } + } +} +impl<'c, 'd> Filesystem<'c, 'd> { + async fn read_file(&mut self, file: &Path) -> Result, Arc> { self.0 .compute(&File(file.to_path_buf())) .await @@ -119,12 +119,12 @@ impl<'c> Filesystem<'c> { } } -trait HasFilesystem<'c> { - fn filesystem(&'c mut self) -> Filesystem<'c>; +trait HasFilesystem<'d> { + fn filesystem<'c>(&'c mut self) -> Filesystem<'c, 'd>; } -impl<'c> HasFilesystem<'c> for DiceComputations { - fn filesystem(&'c mut self) -> Filesystem<'c> { +impl<'d> HasFilesystem<'d> for DiceComputations<'d> { + fn filesystem<'c>(&'c mut self) -> Filesystem<'c, 'd> { Filesystem(self) } } diff --git a/dice/dice_tests/src/legacy/projection.rs b/dice/dice_tests/src/legacy/projection.rs index ccac3d1939166..b58b24c8d3019 100644 --- a/dice/dice_tests/src/legacy/projection.rs +++ b/dice/dice_tests/src/legacy/projection.rs @@ -14,6 +14,7 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use dice::DetectCycles; use dice::Dice; use dice::DiceComputations; @@ -23,7 +24,6 @@ use dice::Key; use dice::ProjectionKey; use dice::UserComputationData; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use parking_lot::Mutex; /// We have three keys in this test: @@ -52,7 +52,7 @@ struct GlobalConfig { /// "Evaluate" a file. #[derive(Debug, derive_more::Display, Clone, Hash, PartialEq, Eq, Allocative)] -#[display(fmt = "{}", name)] +#[display("{}", name)] struct FileKey { name: String, } @@ -75,10 +75,13 @@ impl Key for FileKey { // which is the result of file evaluation. // We are testing that file evaluation is not invalidated // if unrelated configurations changed. - let value = config - .projection(&ConfigPropertyKey { - key: "x".to_owned(), - }) + let value = ctx + .projection( + &config, + &ConfigPropertyKey { + key: "x".to_owned(), + }, + ) .map_err(|e| Arc::new(anyhow::anyhow!(e)))?; // Record we executed this computation. ctx.global_data() @@ -106,7 +109,7 @@ impl Key for FileKey { Eq, Allocative )] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct ConfigKey; #[async_trait] @@ -143,7 +146,7 @@ impl Key for ConfigKey { /// One "property" of the "configuration". #[derive(Debug, derive_more::Display, Clone, Hash, PartialEq, Eq, Allocative)] -#[display(fmt = "{}", key)] +#[display("{}", key)] struct ConfigPropertyKey { key: String, } @@ -200,7 +203,7 @@ async fn smoke() -> anyhow::Result<()> { ..Default::default() }); - let ctx = ctx.commit().await; + let mut ctx = ctx.commit().await; let file = ctx .compute(&FileKey { @@ -233,7 +236,7 @@ async fn smoke() -> anyhow::Result<()> { data.data.set(GlobalConfig { config: HashMap::from_iter([("x".to_owned(), "X".to_owned())]), }); - let ctx = dice.updater_with_data(data).commit().await; + let mut ctx = dice.updater_with_data(data).commit().await; let file = ctx .compute(&FileKey { @@ -265,7 +268,7 @@ async fn smoke() -> anyhow::Result<()> { ("y".to_owned(), "Y".to_owned()), ]), }); - let ctx = dice.updater_with_data(data).commit().await; + let mut ctx = dice.updater_with_data(data).commit().await; let file = ctx .compute(&FileKey { diff --git a/dice/dice_tests/src/lib.rs b/dice/dice_tests/src/lib.rs index c792f7e7f0e5a..e673078fae3f7 100644 --- a/dice/dice_tests/src/lib.rs +++ b/dice/dice_tests/src/lib.rs @@ -9,6 +9,10 @@ #![cfg(test)] +mod general; +mod invalidation_tracking; mod legacy; +mod linear_recompute; +mod multiversion; mod opaque; mod projection; diff --git a/dice/dice_tests/src/linear_recompute.rs b/dice/dice_tests/src/linear_recompute.rs new file mode 100644 index 0000000000000..d104104c0f57b --- /dev/null +++ b/dice/dice_tests/src/linear_recompute.rs @@ -0,0 +1,81 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use derive_more::Display; +use dice::DetectCycles; +use dice::Dice; +use dice::DiceComputations; +use dice::Key; +use futures::future::FutureExt; + +#[tokio::test] +async fn test_linear_recompute_tracks_deps() { + #[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] + enum K { + #[display("K::Top")] + Top, + #[display("K::Mid({})", _0)] + Mid(u32), + } + + #[async_trait] + impl Key for K { + type Value = u32; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + match self { + K::Top => { + ctx.with_linear_recompute(|linear| { + async move { + let mut v = 0; + for i in 0..100 { + v += linear.get().compute(&K::Mid(i)).await.unwrap(); + } + v + } + .boxed() + }) + .await + } + K::Mid(v) => *v, + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } + } + + let dice = { + let builder = Dice::modern(); + builder.build(DetectCycles::Enabled) + }; + + let mut ctx = dice.updater().commit().await; + + assert_eq!(ctx.compute(&K::Top).await.unwrap(), 4950); + + let mut ctx = { + let mut updater = dice.updater(); + updater.changed_to(vec![(K::Mid(50), 0)]).unwrap(); + updater.commit().await + }; + + // should be 50 less. + assert_eq!(ctx.compute(&K::Top).await.unwrap(), 4900); +} diff --git a/dice/dice_tests/src/multiversion.rs b/dice/dice_tests/src/multiversion.rs new file mode 100644 index 0000000000000..19fde2268c973 --- /dev/null +++ b/dice/dice_tests/src/multiversion.rs @@ -0,0 +1,96 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::hash::Hash; + +use allocative::Allocative; +use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; +use derive_more::Display; +use dice::DetectCycles; +use dice::Dice; +use dice::DiceComputations; +use dice::InjectedKey; +use dice::Key; + +#[tokio::test] +async fn test_a_multiversion_bug() { + #[derive(Allocative, Clone, Debug, Display, Eq, PartialEq, Hash)] + #[display("{:?}", self)] + struct Leaf; + + #[async_trait] + impl InjectedKey for Leaf { + type Value = u32; + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } + } + + #[derive(Allocative, Clone, Copy, Debug, Display, Eq, PartialEq, Hash)] + enum Derived { + #[display("Derived::Top")] + Top, + #[display("Derived::Mid")] + Mid, + } + + #[async_trait] + impl Key for Derived { + type Value = u32; + + async fn compute( + &self, + ctx: &mut DiceComputations, + _cancellations: &CancellationContext, + ) -> Self::Value { + match self { + Derived::Top => ctx.compute(&Derived::Mid).await.unwrap(), + Derived::Mid => ctx.compute(&Leaf).await.unwrap(), + } + } + + fn equality(x: &Self::Value, y: &Self::Value) -> bool { + x == y + } + } + + let dice = { + let builder = Dice::modern(); + builder.build(DetectCycles::Enabled) + }; + + let mut ctx1 = { + let mut updater = dice.updater(); + updater.changed_to(vec![(Leaf, 1)]).unwrap(); + updater.commit().await + }; + + let mut ctx2 = { + let mut updater = dice.updater(); + updater.changed_to(vec![(Leaf, 2)]).unwrap(); + updater.commit().await + }; + + let mut ctx3 = { + let mut updater = dice.updater(); + updater.changed_to(vec![(Leaf, 1)]).unwrap(); + updater.commit().await + }; + + assert_eq!(ctx1.compute(&Derived::Mid).await.unwrap(), 1); + assert_eq!(ctx3.compute(&Derived::Mid).await.unwrap(), 1); + + assert_eq!(ctx2.compute(&Derived::Mid).await.unwrap(), 2); + assert_eq!(ctx2.compute(&Derived::Top).await.unwrap(), 2); + + assert_eq!(ctx1.compute(&Derived::Top).await.unwrap(), 1); + assert_eq!(ctx3.compute(&Derived::Top).await.unwrap(), 1); +} diff --git a/dice/dice_tests/src/opaque.rs b/dice/dice_tests/src/opaque.rs index 995c2b5e1d750..e640d32800139 100644 --- a/dice/dice_tests/src/opaque.rs +++ b/dice/dice_tests/src/opaque.rs @@ -11,6 +11,7 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use dice::DetectCycles; use dice::Dice; use dice::DiceComputations; @@ -18,7 +19,6 @@ use dice::DiceData; use dice::Key; use dice::UserComputationData; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use parking_lot::Mutex; #[derive(Debug, PartialEq)] @@ -150,7 +150,7 @@ async fn key_does_not_read_opaque() -> anyhow::Result<()> { // Part 2: invalidate opaque key, and "compute" the key again. ctx.changed([IsOpaque])?; - let ctx = ctx.commit().await; + let mut ctx = ctx.commit().await; tracker.lock().computations.clear(); diff --git a/dice/dice_tests/src/projection/mod.rs b/dice/dice_tests/src/projection.rs similarity index 100% rename from dice/dice_tests/src/projection/mod.rs rename to dice/dice_tests/src/projection.rs diff --git a/dice/dice_tests/src/projection/concurrent.rs b/dice/dice_tests/src/projection/concurrent.rs index 2670a052fb538..fc67a8f264574 100644 --- a/dice/dice_tests/src/projection/concurrent.rs +++ b/dice/dice_tests/src/projection/concurrent.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DetectCycles; use dice::Dice; @@ -23,7 +24,6 @@ use dice::DiceProjectionComputations; use dice::Key; use dice::ProjectionKey; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; #[derive(Allocative, Clone, Debug, Display, Eq, PartialEq, Hash)] struct BaseK; @@ -47,7 +47,7 @@ impl Key for BaseK { #[tokio::test] async fn concurrent_identical_requests_are_reused() -> anyhow::Result<()> { #[derive(Allocative, Clone, Debug, Display)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct ComputeOnce(#[allocative(skip)] Arc); impl PartialEq for ComputeOnce { @@ -83,22 +83,22 @@ async fn concurrent_identical_requests_are_reused() -> anyhow::Result<()> { let count = Arc::new(AtomicU8::new(0)); - let ctx = dice.updater().commit().await; + let mut ctx = dice.updater().commit().await; let k = ComputeOnce(count.dupe()); let base = ctx.compute_opaque(&BaseK).await?; - base.projection(&k)?; + ctx.projection(&base, &k)?; assert_eq!(count.load(Ordering::SeqCst), 1); - base.projection(&k)?; + ctx.projection(&base, &k)?; assert_eq!(count.load(Ordering::SeqCst), 1); // call base again but technically same key let base = ctx.compute_opaque(&BaseK).await?; - base.projection(&k)?; + ctx.projection(&base, &k)?; assert_eq!(count.load(Ordering::SeqCst), 1); Ok(()) diff --git a/dice/dice_tests/src/projection/smoke.rs b/dice/dice_tests/src/projection/smoke.rs index 5bebd87e10854..f389c353b9435 100644 --- a/dice/dice_tests/src/projection/smoke.rs +++ b/dice/dice_tests/src/projection/smoke.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use derive_more::Display; use dice::DetectCycles; use dice::Dice; @@ -27,7 +28,6 @@ use dice::Key; use dice::ProjectionKey; use dice::UserComputationData; use dupe::Dupe; -use more_futures::cancellation::CancellationContext; use parking_lot::Mutex; /// We have three keys in this test: @@ -56,7 +56,7 @@ struct GlobalConfig { /// "Evaluate" a file. #[derive(Debug, derive_more::Display, Clone, Hash, PartialEq, Eq, Allocative)] -#[display(fmt = "{}", name)] +#[display("{}", name)] struct FileKey { name: String, } @@ -79,10 +79,13 @@ impl Key for FileKey { // which is the result of file evaluation. // We are testing that file evaluation is not invalidated // if unrelated configurations changed. - let value = config - .projection(&ConfigPropertyKey { - key: "x".to_owned(), - }) + let value = ctx + .projection( + &config, + &ConfigPropertyKey { + key: "x".to_owned(), + }, + ) .map_err(|e| Arc::new(anyhow::anyhow!(e)))?; // Record we executed this computation. ctx.global_data() @@ -113,7 +116,7 @@ impl Key for FileKey { Eq, Allocative )] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct ConfigKey; #[async_trait] @@ -150,7 +153,7 @@ impl Key for ConfigKey { /// One "property" of the "configuration". #[derive(Debug, derive_more::Display, Clone, Hash, PartialEq, Eq, Allocative)] -#[display(fmt = "{}", key)] +#[display("{}", key)] struct ConfigPropertyKey { key: String, } @@ -192,6 +195,7 @@ async fn smoke() -> anyhow::Result<()> { })); let mut dice = Dice::modern(); + dice.set(tracker.dupe()); let dice = dice.build(DetectCycles::Enabled); @@ -207,7 +211,7 @@ async fn smoke() -> anyhow::Result<()> { ..Default::default() }); - let ctx = ctx.commit().await; + let mut ctx = ctx.commit().await; let file = ctx .compute(&FileKey { @@ -240,7 +244,7 @@ async fn smoke() -> anyhow::Result<()> { data.data.set(GlobalConfig { config: HashMap::from_iter([("x".to_owned(), "X".to_owned())]), }); - let ctx = dice.updater_with_data(data).commit().await; + let mut ctx = dice.updater_with_data(data).commit().await; let file = ctx .compute(&FileKey { @@ -272,7 +276,7 @@ async fn smoke() -> anyhow::Result<()> { ("y".to_owned(), "Y".to_owned()), ]), }); - let ctx = dice.updater_with_data(data).commit().await; + let mut ctx = dice.updater_with_data(data).commit().await; let file = ctx .compute(&FileKey { @@ -347,7 +351,7 @@ async fn projection_sync_and_then_recompute_incremental_reuses_key() -> anyhow:: } #[derive(Allocative, Clone, Debug, Display)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct DependsOnProjection(Arc); #[async_trait] @@ -360,11 +364,11 @@ async fn projection_sync_and_then_recompute_incremental_reuses_key() -> anyhow:: _cancellations: &CancellationContext, ) -> Self::Value { self.0.store(true, Ordering::SeqCst); - ctx.compute_opaque(&BaseKey) - .await - .unwrap() - .projection(&ProjectionEqualKey) - .unwrap() + ctx.projection( + &ctx.compute_opaque(&BaseKey).await.unwrap(), + &ProjectionEqualKey, + ) + .unwrap() } fn equality(x: &Self::Value, y: &Self::Value) -> bool { @@ -385,7 +389,7 @@ async fn projection_sync_and_then_recompute_incremental_reuses_key() -> anyhow:: let mut updater = dice.updater(); updater.changed_to([(BaseKey, 1)])?; - let ctx = updater.commit().await; + let mut ctx = updater.commit().await; assert_eq!(ctx.compute(&DependsOnProjection(is_ran.dupe())).await?, 1); assert!(is_ran.load(Ordering::SeqCst)); @@ -394,13 +398,11 @@ async fn projection_sync_and_then_recompute_incremental_reuses_key() -> anyhow:: // introduce a change let mut updater = dice.updater(); updater.changed_to([(BaseKey, 9999)])?; - let ctx = updater.commit().await; + let mut ctx = updater.commit().await; // if we run the sync first - let projected = ctx - .compute_opaque(&BaseKey) - .await? - .projection(&ProjectionEqualKey)?; + let derive_from = ctx.compute_opaque(&BaseKey).await?; + let projected = ctx.projection(&derive_from, &ProjectionEqualKey)?; assert_eq!(projected, 1); // should not be ran diff --git a/dice/fuzzy_dice/BUCK b/dice/fuzzy_dice/BUCK index ee25dcb94e4b7..92d88bc716b3c 100644 --- a/dice/fuzzy_dice/BUCK +++ b/dice/fuzzy_dice/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -11,12 +10,11 @@ rust_binary( deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:async-trait", - "fbsource//third-party/rust:clap-3", + "fbsource//third-party/rust:clap", "fbsource//third-party/rust:crossbeam", "fbsource//third-party/rust:derivative", "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:higher-order-closure", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:parking_lot", "fbsource//third-party/rust:quickcheck", @@ -27,8 +25,8 @@ rust_binary( "fbsource//third-party/rust:tracing-subscriber", "fbsource//third-party/rust:uuid", "//buck2/allocative/allocative:allocative", + "//buck2/app/buck2_futures:buck2_futures", "//buck2/dice/dice:dice", "//buck2/gazebo/dupe:dupe", - "//buck2/shed/more_futures:more_futures", ], ) diff --git a/dice/fuzzy_dice/Cargo.toml b/dice/fuzzy_dice/Cargo.toml index a1c251224c529..6a8b7b720d2ba 100644 --- a/dice/fuzzy_dice/Cargo.toml +++ b/dice/fuzzy_dice/Cargo.toml @@ -1,7 +1,9 @@ [package] +edition = "2021" +license = { workspace = true } name = "fuzzy_dice" +repository = { workspace = true } version = "0.1.0" -edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -10,36 +12,27 @@ anyhow = "1.0.65" async-trait = "0.1.24" clap = { version = "3.2.24", features = ["derive"] } crossbeam = "0.8" -derive_more = "0.99.3" derivative = "2.1.1" +derive_more = { version = "1.0.0", features = ["full"] } futures = "0.3" -higher-order-closure = { workspace = true } -itertools = "0.10.3" +itertools = "0.13.0" parking_lot = { version = "0.11.2", features = ["send_guard"] } quickcheck = "1.0" -quickcheck_async = "0.1.1" -ref-cast = "1.0.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.48" thiserror = "1.0.36" -tokio = { version = "1.5", features = ["full"]} +tokio = { version = "1.5", features = ["full"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] } uuid = { version = "1.2", features = ["v4"] } -dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -more_futures = { path = "../../shed/more_futures" } allocative = { workspace = true } +buck2_futures = { path = "../../app/buck2_futures" } +dupe = { workspace = true } dice = { path = "../dice" } [dev-dependencies] # Newer cast pull in semver 1.0, which fails with # https://github.com/dtolnay/semver/issues/251 -futures = "0.3" anyhow = "1.0.65" - -[features] -# @oss-disable: default = ["gazebo_lint"] +futures = "0.3" diff --git a/dice/fuzzy_dice/src/computation.rs b/dice/fuzzy_dice/src/computation.rs index 1ee21bf4136f8..f692472ca4766 100644 --- a/dice/fuzzy_dice/src/computation.rs +++ b/dice/fuzzy_dice/src/computation.rs @@ -12,11 +12,11 @@ use std::sync::Arc; use allocative::Allocative; use async_trait::async_trait; +use buck2_futures::cancellation::CancellationContext; use crossbeam::queue::SegQueue; use derivative::Derivative; use derive_more::Display; use dice::DiceComputations; -use dice::DiceComputationsParallel; use dice::DiceTransactionUpdater; use dice::InjectedKey; use dice::Key; @@ -24,13 +24,12 @@ use dupe::Dupe; use futures::future; use futures::future::BoxFuture; use futures::FutureExt; -use more_futures::cancellation::CancellationContext; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Copy, PartialEq, Eq, Hash, Display, Debug, Allocative)] #[derive(Serialize, Deserialize)] -#[display(fmt = "key{}", _0)] +#[display("key{}", _0)] #[serde(transparent)] pub struct Var(pub usize); @@ -40,21 +39,21 @@ pub enum Unit { Literal(bool), } -async fn resolve_units( - ctx: &mut DiceComputations, +async fn resolve_units<'a>( + ctx: &'a mut DiceComputations<'_>, units: &[Unit], state: Arc, ) -> anyhow::Result> { let futs = ctx.compute_many(units.iter().map(|unit| { let state = state.dupe(); - higher_order_closure! { - for<'x> move |ctx: &'x mut DiceComputationsParallel<'_>| -> BoxFuture<'x, Result> { + DiceComputations::declare_closure( + move |ctx: &mut DiceComputations| -> BoxFuture> { match unit { Unit::Variable(var) => ctx.eval(state, *var).boxed(), Unit::Literal(lit) => futures::future::ready(Ok(*lit)).boxed(), } - } - } + }, + ) })); future::join_all(futs).await.into_iter().collect() } @@ -70,12 +69,12 @@ pub enum Expr { Xor(Vec), } -async fn lookup_unit(ctx: &mut DiceComputations, var: Var) -> anyhow::Result> { +async fn lookup_unit(ctx: &mut DiceComputations<'_>, var: Var) -> anyhow::Result> { Ok(ctx.compute(&LookupVar(var)).await?) } #[derive(Clone, Display, Debug, Eq, Hash, PartialEq, Allocative)] -#[display(fmt = "Lookup({})", _0)] +#[display("Lookup({})", _0)] struct LookupVar(Var); impl InjectedKey for LookupVar { type Value = Arc; @@ -87,6 +86,7 @@ impl InjectedKey for LookupVar { pub trait FuzzEquations { fn set_equation(&mut self, var: Var, expr: Expr) -> anyhow::Result<()>; + #[allow(dead_code)] fn set_equations(&mut self, expr: impl IntoIterator) -> anyhow::Result<()>; } @@ -113,7 +113,7 @@ pub trait FuzzMath { } #[async_trait] -impl FuzzMath for DiceComputations { +impl FuzzMath for DiceComputations<'_> { async fn eval(&mut self, state: Arc, var: Var) -> anyhow::Result { Ok(*self .compute(&state.eval_var(var)) @@ -159,7 +159,7 @@ impl FuzzState { #[derive(Derivative, Clone, Display, Allocative)] #[derivative(Hash, Debug)] -#[display(fmt = "Eval({})", key)] +#[display("Eval({})", key)] pub struct EvalVar { key: Var, #[derivative(Debug = "ignore", Hash = "ignore")] diff --git a/dice/fuzzy_dice/src/execution.rs b/dice/fuzzy_dice/src/execution.rs index 61697b0052df6..25db897bfea54 100644 --- a/dice/fuzzy_dice/src/execution.rs +++ b/dice/fuzzy_dice/src/execution.rs @@ -536,8 +536,8 @@ impl Arbitrary for DiceExecutionOrder { } // Semi-randomly select the expr type. match usize::arbitrary(g) % 100 { - 0..33 => Expr::Unit(Unit::Literal(bool::arbitrary(g))), - 34..66 => Expr::Xor({ + 0..=33 => Expr::Unit(Unit::Literal(bool::arbitrary(g))), + 34..=66 => Expr::Xor({ let mut vec = Vec::new(); for _ in 0..Self::VARS_PER_XOR { vec.push(select_var(g, vars)) @@ -564,7 +564,7 @@ impl Arbitrary for DiceExecutionOrder { let i = usize::arbitrary(g) % active_vars.len(); // Semi-randomly select a next op. timeline.push(match usize::arbitrary(g) % 100 { - 0..40 => Operation::Query { + 0..=40 => Operation::Query { ctx_id: { if gen_out_of_order { *g.choose(&ctx_ids).unwrap() @@ -574,7 +574,7 @@ impl Arbitrary for DiceExecutionOrder { }, var: active_vars[i], }, - 41..50 if gen_transients => Operation::EnqueueStep( + 41..=50 if gen_transients => Operation::EnqueueStep( *g.choose(&active_vars).unwrap(), vec![ComputationStep::ReturnTransient], ), diff --git a/dice/fuzzy_dice/src/main.rs b/dice/fuzzy_dice/src/main.rs index 9c080b77d74ec..258c92514bc3c 100644 --- a/dice/fuzzy_dice/src/main.rs +++ b/dice/fuzzy_dice/src/main.rs @@ -7,12 +7,6 @@ * of this source tree. */ -#![feature(exclusive_range_pattern)] -#![feature(async_closure)] - -#[macro_use] -extern crate higher_order_closure; - use std::fs; use std::fs::File; use std::path::Path; diff --git a/dice/oss/CHANGELOG.md b/dice/oss/CHANGELOG.md index 8f51278a58e3e..1834602de1536 100644 --- a/dice/oss/CHANGELOG.md +++ b/dice/oss/CHANGELOG.md @@ -1,3 +1,3 @@ # DICE -* Initial version. +- Initial version. diff --git a/dice/oss/CONTRIBUTING.md b/dice/oss/CONTRIBUTING.md index c7042cbe51c3b..3726e31cc8fcc 100644 --- a/dice/oss/CONTRIBUTING.md +++ b/dice/oss/CONTRIBUTING.md @@ -1,12 +1,13 @@ # Contributing to DICE -We want to make contributing to this project as easy and transparent as possible. +We want to make contributing to this project as easy and transparent as +possible. ## Our Development Process -DICE is currently developed in Facebook's internal repositories and then exported -out to GitHub by a Facebook team member; however, we invite you to submit pull -requests as described below. +DICE is currently developed in Facebook's internal repositories and then +exported out to GitHub by a Facebook team member; however, we invite you to +submit pull requests as described below. ## Pull Requests @@ -41,6 +42,6 @@ Follow the automatic `rust fmt` configuration. ## License -By contributing to DICE, you agree that your contributions will be -licensed under both the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) +By contributing to DICE, you agree that your contributions will be licensed +under both the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files in the root directory of this source tree. diff --git a/dice/oss/Cargo.toml b/dice/oss/Cargo.toml index b13f0591349fc..a53dc38382d76 100644 --- a/dice/oss/Cargo.toml +++ b/dice/oss/Cargo.toml @@ -1,12 +1,17 @@ [workspace] -resolver = "2" members = [ "gazebo/gazebo", "gazebo/gazebo_derive", "dice", "dice_examples", ] +resolver = "2" + +[workspace.package] +license = "MIT OR Apache-2.0" +repository = "https://github.com/facebook/buck2" + [workspace.dependencies] -allocative = { version = "0.3" } +allocative = { version = "0.3.3" } gazebo = { version = "0.8.1", path = "gazebo/gazebo" } gazebo_derive = { version = "0.8.1", path = "gazebo/gazebo_derive" } diff --git a/dice/read_dump/BUCK b/dice/read_dump/BUCK new file mode 100644 index 0000000000000..c765392c50f04 --- /dev/null +++ b/dice/read_dump/BUCK @@ -0,0 +1,15 @@ +load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") + +oncall("build_infra") + +rust_binary( + name = "read_dump", + srcs = ["src/main.rs"], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:bincode", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:serde_json", + "//buck2/dice/dice:dice", + ], +) diff --git a/dice/read_dump/Cargo.toml b/dice/read_dump/Cargo.toml new file mode 100644 index 0000000000000..a06ab3e7e90fe --- /dev/null +++ b/dice/read_dump/Cargo.toml @@ -0,0 +1,14 @@ +[package] +edition = "2021" +license = { workspace = true } +name = "read_dump" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +bincode = { workspace = true } +clap = { workspace = true } +serde_json = { workspace = true } + +dice = { path = "../dice" } diff --git a/dice/read_dump/src/main.rs b/dice/read_dump/src/main.rs new file mode 100644 index 0000000000000..797c2ee8f3156 --- /dev/null +++ b/dice/read_dump/src/main.rs @@ -0,0 +1,45 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::fs::File; +use std::path::PathBuf; + +use clap::CommandFactory; +use clap::FromArgMatches; +use dice::introspection::graph::SerializedGraphNodesForKey; + +#[derive(Debug, clap::Parser)] +#[clap(name = "read_dump", about = "dice dump reader")] +pub(crate) struct Opt { + #[clap(name = "DICE_DUMP", help = "The dice dump")] + file: PathBuf, + #[clap(long = "out", help = "Copy the output to this path")] + out: Option, +} + +fn main() -> anyhow::Result<()> { + let clap = ::command(); + let matches = clap.get_matches_from(std::env::args().collect::>()); + let opt = Opt::from_arg_matches(&matches)?; + + let file = File::open(opt.file)?; + + let out: Vec = bincode::deserialize_from(&file)?; + + match opt.out { + Some(path) => { + serde_json::to_writer_pretty(File::create(path)?, &out)?; + } + None => { + serde_json::to_writer_pretty(std::io::stdout(), &out)?; + } + }; + + Ok(()) +} diff --git a/docs.py b/docs.py deleted file mode 100755 index 16b4a2fc1933e..0000000000000 --- a/docs.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -""" -Generate API documentation for the website. -""" - -import argparse -import os -import subprocess -import tempfile -from pathlib import Path - - -def read_file(path): - with open(path, "r") as f: - return f.read() - - -def write_file(path, contents): - with open(path, "w") as f: - f.write(contents) - - -def buck_command(args): - if args.buck2: - return args.buck2 - elif args.prod: - return "buck2" - elif args.cargo: - return "cargo run --bin=buck2 --" - else: - return "./buck2.sh" - - -# Given the path to the documentation, e.g. native/bxl/analysis_result -# produce a new name which is the destination, e.g. bxl/analysis_result -def doc_name(x): - if x.startswith("native/bxl/"): - return x[7:] # drop the native - elif x.endswith("/rules.bzl"): - return "rules" - elif x.endswith("/function"): - # Uninteresting docs we'd rather not have generated - return None - elif x.startswith("native/standard/") or x.startswith("native/extension/"): - return "starlark/" + x.split("/")[-1] - elif x.startswith("native/"): - return "build/" + x[7:] - else: - raise RuntimeError("Unknown name: " + x) - - -def main() -> None: - parser = argparse.ArgumentParser() - parser.add_argument( - "--prod", - action="store_true", - default=False, - help="Whether to use the production `buck2` binary", - ) - parser.add_argument( - "--cargo", - action="store_true", - default=False, - help="Whether to use a `cargo` built binary.", - ) - parser.add_argument( - "--buck2", - nargs="?", - help="Whether to use provided binary.", - ) - args = parser.parse_args() - - # Change to buck2 directory - buck2_dir = Path(__file__).parent.absolute() - os.chdir(str(buck2_dir)) - - # Clear the docs folder first so that if we change the names of any - # objects, we'll remove old docs - for x in Path("docs").rglob("*.generated.md"): - os.remove(x) - - # Copy the starlark docs over. docusaurus does not handle upward path traversal very well. - for x in Path("starlark-rust/docs").glob("*.md"): - name = Path(x).stem - prefix = "---\nid: " + name + "\n---\n" - write_file( - "docs/developers/starlark/" + name + ".generated.md", prefix + read_file(x) - ) - - with tempfile.TemporaryDirectory() as tmp: - # Actually generate the docs - print("Running Buck...") - subprocess.run( - buck_command(args) - + " docs starlark --format=markdown_files --markdown-files-destination-dir=" - + tmp - + " --builtins prelude//docs:rules.bzl", - shell=True, - check=True, - ) - - for orig in Path(tmp).rglob("*.md"): - src = read_file(orig) - path = os.path.relpath(orig, tmp) - if path.endswith(".md"): - path = path[:-3] - - name = doc_name(path) - if name is None: - continue - - prefix = "---\nid: " + name.rsplit("/")[-1] + "\n---\n" - if name == "rules": - prefix += "# Rules\n\nThese rules are available as standard in Buck2.\n" - src = "\n".join(src.splitlines()[1:]) - - dest = "docs/api/" + name + ".generated.md" - os.makedirs(Path(dest).parent, exist_ok=True) - write_file(dest, prefix + src) - - -if __name__ == "__main__": - main() diff --git a/docs/.gitignore b/docs/.gitignore index 0aebda0a49b72..db80a172650f8 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1,2 +1,5 @@ -/generated/ -*.generated.md +/api/ +/developers/starlark/ +/prelude/ +/users/commands/ +/users/query/ diff --git a/docs/about/benefits/compared_to_buck1.md b/docs/about/benefits/compared_to_buck1.md new file mode 100644 index 0000000000000..1728c210f1865 --- /dev/null +++ b/docs/about/benefits/compared_to_buck1.md @@ -0,0 +1,151 @@ +--- +id: compared_to_buck1 +title: Benefits When Compared to Buck1 +--- + +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + + + +For reports from real users, see the [Testimonials](testimonials.fb.md), which +include Workplace posts and their full context. + + + +## Benefits for end users + +> _"`buck2 build SOME_TARGET_I_ALREADY_BUILT_BEFORE` is basically instantaneous +> and is a super delightful experience. 🙂"_ - End user experience +> ([source](https://fb.prod.workplace.com/groups/buck2users/posts/3030704467185914)) + +> _"Buck2 is largely faster and more memory efficient than buck1, and where I’ve +> seen counter-examples, the buck2 team quickly optimizes and fixes that.🙂"_ - +> Software Engineer feedback +> ([source](https://fb.prod.workplace.com/groups/devx.ci.bffs/posts/616830502778501)) + +For people who use Buck on a daily basis (such as using Buck build as part of +their development inner loop), switching to Buck2 provides the following +benefits: + +- **Performance** - the performance of Buck2 is better in four ways: + - **_Fast things are fast_** - in Buck1, simply typing `buck build` when there + is nothing to do can be expensive (23 seconds in some benchmarks). In Buck2, + the same build action takes 0.1 seconds. Actions that should be fast are + fast, which enables developers to use Buck more freely, without trying to + work around the build system. + - **_Slow things are faster_** - when there is real work to do, Buck2 is + significantly closer to the critical path. Benchmarks range from 5%/10s + faster for changing a header file (lots of parallel C++ computations, Buck1 + already nearly at the critical path) to 42%/145s faster (changing a Thrift + file in a large project). + - **_Users contribute to the shared cache_** - with Buck1, only trusted CI + builds write to the network cache, while with Buck2 everyone writes to the + cache through sandboxed remote execution. This increases the chance of cache + hits, saving capacity and time. + - **_CI builds go faster_** - these numbers vary day by day, but most projects + are 2-4x faster. This means spending less time waiting for CI and saving + some capacity at the same time. +- **Correctness** - in Buck2, rules are hermetic by default. Missing + dependencies are errors. These restrictions apply to both the user-written + `BUCK` files and the language rules. + - During the process of migrating to Buck2, a huge number of missing + dependencies have been fixed. However, during the same process, several + Buck1 issues were identified that are not going to be fixed in Buck1 (such + as missing headers, genrules without dependencies, and OCaml rules don’t + track all deps). The end result is that Buck2 gives the right answer more + often, cutting down on user surprises. +- **Rule features** - the rules in Buck2, especially for less commonly used + languages (such as Haskell, OCaml, and Rust) support features above and beyond + those in Buck1. + - Examples: dependencies can be given as arguments to + `prebuilt_ocaml_library`, Haskell enables the use of stub headers from C++, + and Rust has experimental pipelining support. +- **Actively developed** - the Meta build team is putting all its efforts behind + Buck2; it's vastly easier to develop than Buck1. While Buck2 is already ahead + of Buck1 in many important aspects, the difference is only going to grow with + several improvements in the pipeline. +- **Support** - Meta can provide much better support to those having + difficulties with Buck2 than to those using Buck1. + +## Benefits for Rule Authors + +If you write language-specific rules, then Buck2 is in a different league to +Buck1. + +> _"This is all rather fun! Buck2 rules are so much more hackable than +> Buck1."_ - Software Engineer feedback +> ([source](https://fb.prod.workplace.com/groups/333784157210625/posts/928214407767594)) + +There are a number of reasons why Buck2 excels for Rule Authors: + +- **Faster developer cycle** - in Buck1, the time from changing a rule to seeing + the impact is many minutes: you first have to compile Buck1, invalidate the + dependency cache (and so on), and perhaps work between multiple OSs. With + Buck2, it takes seconds, you don’t even need to restart the daemon. +- **Simple API** - Buck2 rules use a small and documented Starlark API, which is + dependency-correct by construction. In Buck1, the rules must obey a lot of + subtle side conditions with a much larger API. +- **Easier deployment** - for Buck2, deployment is just checking the rules in, + with an atomic commit changing associated macros (when required). For Buck1, + you have to make the repo work with the old and new rules and wait for a Buck + version bump to ship your changes, perhaps a few days later. +- **Low barrier to entry** - writing rules in Buck2 is vastly easier than Buck1, + significantly increasing the developer pool. This means that writing rules is + now accessible to language experts, not just Buck experts. + +## Benefits for Integrators + +For those people who integrate Buck2 into larger systems, in addition to many of +the above benefits apply, Buck2 provides the following benefits: + +- **Faster queries** - many integrators make extensive use of `buck uquery` and + `cquery`. In Buck2, these commands are **faster** and use **less memory**. + - For example, on CI target determination (a bunch of targets/queries), Buck2 + is 25% faster at P50 (moving to 40% faster at P95) with 25% less memory + (saving over 20Gb, and crossing below the 64Gb threshold). +- **Profiling** - Buck2 already ships with five types of profiling for both + loading and analysis (flame graphs, statement breakdown, heap profiles etc). + With Buck2, these tools are much more easily accessible to people not on the + Build Infra team. + + + +- **Eden friendly** - Buck2 is tuned for the Eden architecture, performing fewer + disk operations with greater parallelism. + - For example, the slowdown caused by using Eden for `targets` on `fbandroid` + is + [reduced from 300s to 80s](https://fb.workplace.com/groups/132499338763090/posts/132580122088345). +- **Better observability** - Buck2 populates many Scuba tables with information + about + [loading](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_loads), + [analysis](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_analyses), + [builds](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_builds) + and + [errors](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_action_errors), + and more. The architecture of Buck2 ensures that all important information can + be recorded in a uniform manner, enabling sensible trade-offs to be made about + what to store vs for how long. + + + +## The downside + +While there are many benefits, it would be remiss not to include a small list of +temporary issues: + +- **Stability** - Buck2 is under active development, which means the risk of + regression is correspondingly higher. There may be issues, but they will be + fixed as quickly as possible (and lessons learned) through the through Meta's + SEV-review process. +- **Corner cases** - Buck1 has been battle-tested for nearly a decade, which has + included attention to events such as error messages in unlikely corner cases. + Only time and user feedback will enable Meta to bring Buck2 to the same level. + Please share all such feedback! + + + +- **Buck2 Web UI** - there isn’t yet a working Web UI equivalent to the one + provided by Buck1. But we’re working on it and hope to share an initial + version shortly. + + diff --git a/docs/about/bootstrapping.md b/docs/about/bootstrapping.md new file mode 100644 index 0000000000000..17192e3b49ce5 --- /dev/null +++ b/docs/about/bootstrapping.md @@ -0,0 +1,32 @@ +--- +id: bootstrapping +title: Bootstrapping Buck2 +--- + +# Bootstrapping Buck2 + +To generate `BUCK` files for `buck2`'s dependencies, we use +[reindeer](https://github.com/facebookincubator/reindeer). + +Note that the resulting binary will be compiled without optimisations or +[jemalloc](https://github.com/jemalloc/jemalloc), so we recommend using the +Cargo-produced binary in further development. + +First, install `reindeer` with `Cargo`: + +```sh +cargo install --locked --git https://github.com/facebookincubator/reindeer reindeer +``` + +Next, run the following to buckify dependencies: + +```sh +cd buck2/ +reindeer --third-party-dir shim/third-party/rust buckify +``` + +Build `buck2` with `buck2`: + +```sh +buck2 build //:buck2 +``` diff --git a/docs/about/getting_started.md b/docs/about/getting_started.md new file mode 100644 index 0000000000000..8cf378c03a15a --- /dev/null +++ b/docs/about/getting_started.md @@ -0,0 +1,252 @@ +--- +id: getting_started +title: Getting Started +--- + +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + +## Installing Buck2 + +The latest set of `buck2` executables can be found under the +[`latest` release page](https://github.com/facebook/buck2/releases/tag/latest). + +Additionally, for each bi-monthly release there is a +[dotslash](https://dotslash-cli.com/) file that is appropriate for checkin to a +repository. This will automatically fetch the correct version and architecture +for each user, and ensures a consistent build environment for each commit in the +repo. + +To get started, first install [rustup](https://rustup.rs/), then compile the +`buck2` executable: + +```bash +rustup install nightly-2024-07-21 +cargo +nightly-2024-07-21 install --git https://github.com/facebook/buck2.git buck2 +``` + +The above commands install `buck2` into a suitable directory, such as +`$HOME/.cargo/bin`, which you should then add to your `$PATH`: + +Linux / macOS + +```sh +export PATH=$HOME/.cargo/bin:$PATH +``` + +Windows Powershell + +```powershell +$Env:PATH += ";$HOME\.cargo\bin" +``` + +With Buck2 installed, you can build projects with `buck2`! + +### Windows configuration + +Some of our rules use symlinks, which are disabled by default for non-admin +Windows users. You can fix that by +[enabling Developer Mode](https://pureinfotech.com/enable-developer-mode-windows-11/). + +## Compiling your first project + +This section covers the building of a +['hello_world' example project](https://github.com/facebook/buck2/tree/main/examples/hello_world) +that contains a simple C++ binary. If you are interested in seeing how other +languages can be built, take a look at the +[prelude example project](https://github.com/facebook/buck2/tree/main/examples/with_prelude), +which contains Rust, C++, Python, and OCaml targets. + +First, clone the buck2 repository and cd into the 'hello_world' project: + +```bash +git clone https://github.com/facebook/buck2.git +cd buck2/examples/hello_world +``` + +`buck2 init --git` is all the setup you need to start building. This will use +git submodule to pull [buck2-prelude](https://github.com/facebook/buck2-prelude) +into your project: + +```sh +buck2 init --git +``` + +To use another version control system, run `buck2 init` and manually download +[buck2-prelude](https://github.com/facebook/buck2-prelude) into `prelude` at +root. + +```sh +buck2 init +``` + +To build the entire project, run: + +Note: _Requires clang and lld to be in the path_ + +```sh +buck2 build //... +``` + +Note that this uses a +[simple C++ toolchain](https://github.com/facebook/buck2/blob/main/prelude/toolchains/cxx.bzl) +that requires a recent version of `clang` to be installed on your system. This +can be installed with any package manager (ex. `apt install clang`, +`xcode-select --install` on macOS, `choco install llvm`). After installing any +external tools or changing your `PATH`, run `buck2 kill` before running a build. + +To list all targets available in the project, run: + +```sh +buck2 targets //... +``` + +To run the main C++ binary, run: + +```sh +buck2 run //:main +``` + +The newly built binary can be found with the `--show-output` flag: + +```sh +buck2 build //:main --show-output +``` + +Output: + +```sh +Build ID: 0e890477-5b7f-4829-9ffe-662e572320a0 +Jobs completed: 3. Time elapsed: 0.0s. +BUILD SUCCEEDED +root//:main buck-out/v2/gen/root/9f4d83578bb24895/__main__/main +``` + +## Creating your first hello_world project + +This section demonstrates how to create a simple C++ 'hello_world' project. + +To get started, make a new folder for your project and cd into it. + +```sh +mkdir hello_world +cd hello_world +``` + +Next, run `buck2 init --git` to initialize the project. This command will set up +your project with `git` and pull in +[buck2-prelude](https://github.com/facebook/buck2-prelude) as a submodule. +Additionally, it will generate multiple files with default values. + +```sh +buck2 init --git +``` + +Next, add the source code `main.cpp` , + +```c++ +#include +int main() { + std::cout << "Hello from a C++ Buck2 program!" << std::endl; +} +``` + +Then, define a `cxx_binary` in the root `BUCK` file: + +```Python +# BUCK +cxx_binary( + name = "main", + srcs = ["main.cpp"], + link_style = "static", +) +``` + +If you try to build `//:main` at this point, you'll see an error about `buck2` +not being able to find `toolchains//:cxx`. + +The final step is to define the necessary toolchain targets. For that project, +you need `system_cxx_toolchain` and `system_python_bootstrap_toolchain`, which +will pick up the necessary tools (clang++, python, and so on) from the system. + +```Python +# toolchains/BUCK +load("@prelude//toolchains:cxx.bzl", "system_cxx_toolchain") +load("@prelude//toolchains:python.bzl", "system_python_bootstrap_toolchain") + +system_cxx_toolchain( + name = "cxx", + visibility = ["PUBLIC"], +) + +system_python_bootstrap_toolchain( + name = "python_bootstrap", + visibility = ["PUBLIC"], +) +``` + +At this point, your project should have the following files: + +```bash +$ tree -a -I "buck-out|prelude|.git" +|-- .buckconfig +|-- .gitmodules +|-- BUCK +|-- main.cpp +`-- toolchains + `-- BUCK +``` + +Now, you're ready to see the build in action. + +To build the main C++ target, run: + +```sh +buck2 build //:main +``` + +To run the main C++ target, run: + +```sh +buck2 run //:main +``` + +In summary, a `buck2` project requires: + +1. A `.buckconfig` file in the root which has a `[cells]` section listing out + [cells](https://buck2.build/docs/concepts/glossary/#cell) +2. A `prelude` directory, which contains a collection of + [rules](https://buck2.build/docs/concepts/glossary/#rule) of your choice. + `buck2 init` will pull in the + [buck2-prelude](https://github.com/facebook/buck2-prelude.git) as a git + submodule by default +3. If using the [buck2-prelude](https://github.com/facebook/buck2-prelude.git), + a `toolchains` directory that declares relevant toolchains. We provide some + basic toolchains in + [prelude/toolchains](https://github.com/facebook/buck2/tree/main/prelude/toolchains) +4. `BUCK` files that specify targets for your project + +`buck2 init --git` will generate all of these with reasonable default values. + +## Learning More + +You should now be ready to explore Buck2 for use in your own projects. You can +explore the [examples](https://github.com/facebook/buck2/tree/main/examples) +folder. Look out for more tutorials in the future. + + + +## Communication channels + +The following channels provide an insight into Buck2: + +- [Buck2 Engineering](https://fb.workplace.com/groups/buck2prototyping) - + Workplace group for discussions about what features Buck2 should have, how + it's going, status updates, and much more. +- [Buck2 Users](https://fb.workplace.com/groups/buck2users) - Workplace group + featuring questions from users and reports of bugs. +- [Buck2 Rule Authors](https://fb.workplace.com/groups/347532827186692) - + Workplace group for discussions about language-specific rules. +- [Buck2 Oncall Hub](https://www.internalfb.com/intern/monitor/oncall_profile?oncall=buck2) - + urgent tasks and escalation. + + diff --git a/docs/about/why.md b/docs/about/why.md new file mode 100644 index 0000000000000..d0fabb03c6ee0 --- /dev/null +++ b/docs/about/why.md @@ -0,0 +1,148 @@ +--- +id: why +title: Why Buck2 +--- + +Buck2 is a build system from Meta. This page answers the questions: +[why does Buck2 exist](#why-does-buck2-exist), +[what's different about Buck2](#whats-different-about-buck2), and +[why use Buck2](#why-use-buck2). + +## Why does Buck2 exist? + +Meta employs a very large monorepo, consisting of a variety of programming +languages, including C++, Python, Rust, Kotlin, Swift, Objective-C, Haskell, +OCaml, and more. Google employs a different but functionally similar monorepo. + +These large scale and multi-language repositories are generally beyond the +capabilities of traditional build systems like `make`. To optimize the build and +performance of these large systems, Facebook and Google developed their own +build systems, respectively Buck and Bazel. While the internal version of Bazel +was started first (also known as Blaze), Buck was open sourced first (back in +March 2013), followed by Bazel a few years later (March 2015). + +The retroactively named Buck1 was a capable build system and is still in use +today at Meta (although many users have migrated). Buck2 is a rewrite that aims +to keep the best bits of Buck1 (with a high degree of target compatibility) but +also borrows ideas from [academic](https://ndmitchell.com/#shake_10_sep_2012) +[research](https://ndmitchell.com/#shake_21_apr_2020) and build systems, +including [Bazel](https://bazel.build/), [Pants](https://www.pantsbuild.org/), +[Shake](https://shakebuild.com/), [Tup](https://gittup.org/tup/), and more. + +Following are aspects common to Buck1 and Buck2 (and in most cases, Bazel): + +- **Targets that can be queried** - the build is defined as a series of targets, + specified in `BUCK` files, that depend on other targets. This graph of targets + can be queried to understand how they relate to each other and what the + potential impact of a change might be. +- **Remote execution** - the build can send actions to a set of remote servers + to be executed, increasing the parallelism significantly. +- **Multi-language composability** - there can be lots of different languages in + a single build, and they can be put together. For example, you could have a + Python library that depends on a Rust library, which, in turn depends on a C + library. +- **File watching** - at large enough scale, simply looking for changed files is + prohibitively expensive. Buck can integrate with + [Watchman](https://facebook.github.io/watchman/) to discover which files have + changed efficiently. However, for simplicity of setup, the open-source version + defaults to using `inotify` or similar functionality. +- **Uses Starlark** - Starlark is a deterministic Python-like language used to + specify the targets, enabling the definition of targets as literals and more + advanced manipulation/sharing. + +## What's different about Buck2? + +Buck2 has many minor differences from Buck1, but there are a number that give +new efficiency or expressiveness that are of note (most of these are also +different from Bazel). + +- **Buck2 is written in Rust** - Buck1 was written in Java. One of the + advantages of using Rust is the absence of GC pauses, However, Java also has + advantages, such as better memory profiling tools. +- **Buck2 is remote execution first** - local execution is considered a special + case of remote execution, in contrast to Buck1 where it was added after. That + means that things such as directory hashes can be pre-computed ready to send + to remote execution, giving efficiency benefits. +- **All Buck2 rules are written in Starlark** - whereas, in Buck1, they were + written in Java as part of the binary, which makes iteration on rules much + faster. +- **The Buck2 binary is entirely language agnostic** - as a consequence of + having all the rules external to the binary, the most important and complex + rule (such as in C++), don't have access to magic internal features. As a + result, features have been made available to all rules, including: + - [Dep files](../rule_authors/dep_files.md) - the ability to declare that a + subset of the files weren't actually used, and thus not be sensitive to + changes within them. + - [Incremental actions](../rule_authors/incremental_actions.md) - the ability + to have the action short-circuit some subset of the work if run again. +- **Buck2 uses a dynamic (aka monadic) graph as its underlying computation + engine** - while most dependencies are specified statically, there are two + particular features that expose dynamic power to rule authors: + - [Dynamic dependencies](../rule_authors/dynamic_dependencies.md) - enable + rules to build a file then look at its contents before specifying the + dependencies and steps in future actions. Common uses are languages where + the dependency structure within a project must follow imports (e.g. Haskell, + OCaml) and distributed ThinLTO (where the best optimization plan is + generated from summaries). + - [Anonymous targets](../rule_authors/anon_targets.md) - enable rules to + create a graph that has more sharing than the original user graph. As a + result, two unrelated binaries can compile shared code only once, despite + the shared code not knowing about this commonality. This feature is useful + for rules like Swift feature resolution. +- **[Transitive-sets](../rule_authors/transitive_sets.md)** - similar in purpose + to Bazel's [depset](https://bazel.build/rules/lib/depset). But, instead of + being just a memory optimization, are also wired into the dependency graph, + providing a reduction in the size of the dependency graph. +- **Buck2 is not phased** - there are no target graph/action graph phases, just + a series of dependencies in a + [single graph on DICE](https://github.com/facebook/buck2/blob/main/dice/dice/docs/index.md) + that result in whatever the user requested. That means that Buck2 can + sometimes parallelise different phases and track changes very precisely. +- **Buck2 can integrate with the virtual filesystem + [Eden](https://github.com/facebook/sapling)** - this provides good + performance, even when the file system is backed by source control fetches. + However, Eden is not required, and a normal file system will also work well. +- **The Buck2 Starlark implementation is available + [as a standalone library](https://developers.facebook.com/blog/post/2021/04/08/rust-starlark-library/)** - + this provides features such as IDE integration (both LSP and DAP bindings), + linters, typecheckers, and more. These features are integrated into Buck2 to + give a better developer experience (which is still evolving). +- **Buck2 supports configurations** - (such as `select`) to provide + multi-platform/architecture builds, which are heavily inspired by Bazel. + Within that space, there is a number of small differences, such as + `toolchain_deps`. +- **Buck2 is fast** - in our internal tests, we observed that Buck2 completed + builds 2x as fast as Buck1. + +For a comprehensive list of benefits, see +[Benefits Compared to Buck1](benefits/compared_to_buck1.md). + +## Why use Buck2? + +It would be delightful if you tried out Buck2! But it is early-stage software, +so users may run into unexpected issues. If you encounter an issue, please +report it via [Github issues](https://github.com/facebook/buck2/issues). + +Buck2 is being used internally within Meta and is available as open-source +from 2023. + +There are several differences between the internal and open-source versions: + +- Meta uses an internal version of remote execution with builds always hooked up + to remote execution. The open-source binding, which uses Buck2 without remote + execution, may be less polished. +- There are some configuration differences between the open source and internal + versions. For example, file changes default to `inotify` in open-source, and + to Watchman internally. +- The prelude (containing all the rules) is the same for open-source as + internal, but toolchains are not open-sourced. The required custom toolchains + may not work as well. + +There are also some things that aren't quite yet finished: + +- There are not yet mechanisms to build in release mode (that should be achieved + by modifying the toolchain). +- Windows/Mac builds are still in progress; open-source code is mostly tested on + Linux. + +If none of that puts you off, [give Buck2 a go](getting_started.md)! diff --git a/docs/api.md b/docs/api.md index 8c32943e904b0..dbfe71ee1204d 100644 --- a/docs/api.md +++ b/docs/api.md @@ -1,7 +1,16 @@ # APIs -A lot of Buck2 is driven by Starlark APIs. While there is a [Starlark specification](https://github.com/bazelbuild/starlark/blob/master/spec.md), for most purposes it can be considered a subset of Python. There are three main places you can write Starlark in Buck2: +A lot of Buck2 is driven by Starlark APIs. While there is a +[Starlark specification](https://github.com/bazelbuild/starlark/blob/master/spec.md), +for most purposes it can be considered a subset of Python. There are three main +places you can write Starlark in Buck2: -* In `BUCK` files, where you can define the rules. The most interesting functions are [the rules themselves](rules), but you will often use the [builtin Starlark functions](starlark/globals) (most of which are the same as in Python), and a few of the [build functions](build/globals) (e.g. `glob`). -* In rule definitions, where you can use the same Starlark standard functions, but will heavily be using the [build functions](build/globals) (e.g. `rule` and `attrs`). -* In [BXL](../developers/bxl), where the [context type](bxl/bxl_ctx) is one of the more important ones. +- In `BUCK` files, where you can define the rules. The most interesting + functions are [the rules themselves](../prelude/globals), but you will often + use the [builtin Starlark functions](starlark) (most of which are the same as + in Python), and a few of the [build functions](build) (e.g. `glob`). +- In rule definitions, where you can use the same Starlark standard functions, + but will heavily be using the [build functions](build) (e.g. `rule` and + `attrs`). +- In [BXL](../developers/bxl), where the [context type](bxl/bxl/Context) is one + of the more important ones. diff --git a/docs/benefits.md b/docs/benefits.md deleted file mode 100644 index b5377e840ae86..0000000000000 --- a/docs/benefits.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: benefits -title: Benefits When Compared to Buck1 ---- - - - -For reports from real users, see the [Testimonials](testimonials.fb.md), which include Workplace posts and their full context. - - - -## Benefits for end users - -> *"`buck2 build SOME_TARGET_I_ALREADY_BUILT_BEFORE` is basically instantaneous and is a super delightful experience. 🙂" - End user experience* ([source](https://fb.prod.workplace.com/groups/buck2users/posts/3030704467185914)) - -> *"Buck2 is largely faster and more memory efficient than buck1, and where I’ve seen counter-examples, the buck2 team quickly optimizes and fixes that.🙂"* - Software Engineer feedback ([source](https://fb.prod.workplace.com/groups/devx.ci.bffs/posts/616830502778501)) - -For people who use Buck on a daily basis (such as using Buck build as part of their development inner loop), switching to Buck2 provides the following benefits: - -* **Performance** - the performance of Buck2 is better in four ways: - * ***Fast things are fast*** - in Buck1, simply typing `buck build` when there is nothing to do can be expensive (23 seconds in some benchmarks). In Buck2, the same build action takes 0.1 seconds. Actions that should be fast are fast, which enables developers to use Buck more freely, without trying to work around the build system. - * ***Slow things are faster*** - when there is real work to do, Buck2 is significantly closer to the critical path. Benchmarks range from 5%/10s faster for changing a header file (lots of parallel C++ computations, Buck1 already nearly at the critical path) to 42%/145s faster (changing a Thrift file in a large project). - * ***Users contribute to the shared cache*** - with Buck1, only trusted CI builds write to the network cache, while with Buck2 everyone writes to the cache through sandboxed remote execution. This increases the chance of cache hits, saving capacity and time. - * ***CI builds go faster*** - these numbers vary day by day, but most projects are 2-4x faster. This means spending less time waiting for CI and saving some capacity at the same time. -* **Correctness** - in Buck2, rules are hermetic by default. Missing dependencies are errors. These restrictions apply to both the user-written `BUCK` files and the language rules. - * During the process of migrating to Buck2, a huge number of missing dependencies have been fixed. However, during the same process, several Buck1 issues were identified that are not going to be fixed in Buck1 (such as missing headers, genrules without dependencies, and OCaml rules don’t track all deps). The end result is that Buck2 gives the right answer more often, cutting down on user surprises. -* **Rule features** - the rules in Buck2, especially for less commonly used languages (such as Haskell, OCaml, and Rust) support features above and beyond those in Buck1. - * Examples: dependencies can be given as arguments to `prebuilt_ocaml_library`, Haskell enables the use of stub headers from C++, and Rust has experimental pipelining support. -* **Actively developed** - the Meta build team is putting all its efforts behind Buck2; it's vastly easier to develop than Buck1. While Buck2 is already ahead of Buck1 in many important aspects, the difference is only going to grow with several improvements in the pipeline. -* **Support** - Meta can provide much better support to those having difficulties with Buck2 than to those using Buck1. - -## Benefits for Rule Authors - -If you write language-specific rules, then Buck2 is in a different league to Buck1. - -> *"This is all rather fun! Buck2 rules are so much more hackable than Buck1."* - Software Engineer feedback -([source](https://fb.prod.workplace.com/groups/333784157210625/posts/928214407767594)) - -There are a number of reasons why Buck2 excels for Rule Authors: - -* **Faster developer cycle** - in Buck1, the time from changing a rule to seeing the impact is many minutes: you first have to compile Buck1, invalidate the dependency cache (and so on), and perhaps work between multiple OSs. With Buck2, it takes seconds, you don’t even need to restart the daemon. -* **Simple API** - Buck2 rules use a small and documented Starlark API, which is dependency-correct by construction. In Buck1, the rules must obey a lot of subtle side conditions with a much larger API. -* **Easier deployment** - for Buck2, deployment is just checking the rules in, with an atomic commit changing associated macros (when required). For Buck1, you have to make the repo work with the old and new rules and wait for a Buck version bump to ship your changes, perhaps a few days later. -* **Low barrier to entry** - writing rules in Buck2 is vastly easier than Buck1, significantly increasing the developer pool. This means that writing rules is now accessible to language experts, not just Buck experts. - -## Benefits for Integrators - -For those people who integrate Buck2 into larger systems, in addition to many of the above benefits apply, Buck2 provides the following benefits: - -* **Faster queries** - many integrators make extensive use of `buck uquery` and `cquery`. In Buck2, these commands are **faster** and use **less memory**. - * For example, on CI target determination (a bunch of targets/queries), Buck2 is 25% faster at P50 (moving to 40% faster at P95) with 25% less memory (saving over 20Gb, and crossing below the 64Gb threshold). -* **Profiling** - Buck2 already ships with five types of profiling for both loading and analysis (flame graphs, statement breakdown, heap profiles etc). With Buck2, these tools are much more easily accessible to people not on the Build Infra team. - - - -* **Eden friendly** - Buck2 is tuned for the Eden architecture, performing fewer disk operations with greater parallelism. - * For example, the slowdown caused by using Eden for `targets` on `fbandroid` is [reduced from 300s to 80s](https://fb.workplace.com/groups/132499338763090/posts/132580122088345). -* **Better observability** - Buck2 populates many Scuba tables with information about [loading](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_loads), [analysis](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_analyses), [builds](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_builds) and [errors](https://www.internalfb.com/intern/scuba/query/?dataset=buck2_action_errors), and more. The architecture of Buck2 ensures that all important information can be recorded in a uniform manner, enabling sensible trade-offs to be made about what to store vs for how long. - - - -## The downside - -While there are many benefits, it would be remiss not to include a small list of temporary issues: - -* **Stability** - Buck2 is under active development, which means the risk of regression is correspondingly higher. There may be issues, but they will be fixed as quickly as possible (and lessons learned) through the through Meta's SEV-review process. -* **Corner cases** - Buck1 has been battle-tested for nearly a decade, which has included attention to events such as error messages in unlikely corner cases. Only time and user feedback will enable Meta to bring Buck2 to the same level. Please share all such feedback! - - - -* **Buck2 Web UI** - there isn’t yet a working Web UI equivalent to the one provided by Buck1. But we’re working on it and hope to share an initial version shortly. - - diff --git a/docs/bootstrapping.md b/docs/bootstrapping.md deleted file mode 100644 index dec06df3b4590..0000000000000 --- a/docs/bootstrapping.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: bootstrapping -title: Bootstrapping Buck2 ---- - -# Bootstrapping Buck2 - -To generate `BUCK` files for `buck2`'s dependencies, we use [reindeer](https://github.com/facebookincubator/reindeer). - -Note that the resulting binary will be compiled without optimisations or [jemalloc](https://github.com/jemalloc/jemalloc), so we recommend using the Cargo-produced binary in further development. - -First, install `reindeer` with `Cargo`: -```sh -cargo install --locked --git https://github.com/facebookincubator/reindeer reindeer -``` - -Next, run the following to buckify dependencies: -```sh -cd buck2/ -reindeer --third-party-dir shim/third-party/rust buckify -``` - -Build `buck2` with `buck2`: -```sh -buck2 build //:buck2 -``` diff --git a/docs/concepts/buck_out.md b/docs/concepts/buck_out.md new file mode 100644 index 0000000000000..5a76dd36f044d --- /dev/null +++ b/docs/concepts/buck_out.md @@ -0,0 +1,22 @@ +--- +id: buck_out +title: buck-out +--- + +# buck-out + +Buck2 stores build artifacts in a directory named `buck-out` in the root of your +[project](glossary.md#project). You should not make assumptions about where +Buck2 places your build artifacts within the directory structure beneath +`buck-out` as these locations depend on Buck2's implementation and could +potentially change over time. Instead, to obtain the location of the build +artifact for a particular target, you can use one of the `--show-*-output` +options with the [`buck2 build`](../../users/commands/build) or +[`buck2 targets`](../../users/commands/targets) commands, most commonly +`--show-output`. For the full list of ways to show the output location, you can +run `buck2 build --help` or `buck2 targets --help`. + +``` +buck2 targets --show-output +buck2 build --show-output +``` diff --git a/docs/concepts/buckconfig.md b/docs/concepts/buckconfig.md index 5685aa479bb7d..dbbcb3e4a2deb 100644 --- a/docs/concepts/buckconfig.md +++ b/docs/concepts/buckconfig.md @@ -3,66 +3,93 @@ id: buckconfig title: .buckconfig --- -The root of your [project](glossary.md#project) must contain a configuration file named `.buckconfig`. Before executing, Buck2 reads this file to incorporate any customizations it specifies. +The root of your [project](glossary.md#project) must contain a configuration +file named `.buckconfig`. Before executing, Buck2 reads this file to incorporate +any customizations it specifies. ## Performance impact of Buck2 configuration changes -Because configuration settings are sometimes included in the cache keys that Buck2 uses in its caching system, changes to Buck's configuration can invalidate previously-built artifacts in Buck's caches. If this occurs, Buck2 rebuilds those artifacts, which can impact your build time. +Because configuration settings are sometimes included in the cache keys that +Buck2 uses in its caching system, changes to Buck's configuration can invalidate +previously-built artifacts in Buck's caches. If this occurs, Buck2 rebuilds +those artifacts, which can impact your build time. ## The .buckconfig file uses the INI file format -The `.buckconfig` file uses the [INI file format](http://en.wikipedia.org/wiki/INI_file). That is, it is divided into *sections* where each section contains a collection of key *names* and key *values*. The `.buckconfig` implementation supports some modifications to the INI file format; these are discussed below. +The `.buckconfig` file uses the +[INI file format](http://en.wikipedia.org/wiki/INI_file). That is, it is divided +into _sections_ where each section contains a collection of key _names_ and key +_values_. The `.buckconfig` implementation supports some modifications to the +INI file format; these are discussed below. ### Other INI file parsers -As mentioned previously, we have extended the INI file parser that Buck2 uses to parse configuration files. As a result, *INI file parsers provided by other languages or libraries are often not able to parse Buck's configuration files successfully*. +As mentioned previously, we have extended the INI file parser that Buck2 uses to +parse configuration files. As a result, _INI file parsers provided by other +languages or libraries are often not able to parse Buck's configuration files +successfully_. ### Dot character not supported in section names -We do not support the use of the *dot* character (`.`) in section names within Buck2 configuration files. For example, the following is **not** supported—*although Buck2 does not issue a warning or error*. +We do not support the use of the _dot_ character (`.`) in section names within +Buck2 configuration files. For example, the following is **not** +supported—_although Buck2 does not issue a warning or error_. ```ini [foo.bar] baz=1 ``` -Note that sometimes you might need to define your own custom sections, such as for platform flavors for C++ or Python. These scenarios are examples of when you should be careful not to introduce the dot character in section names. -This constraint is because Buck2 uses the dot character to delimit section names and key names in other contexts such as the `--config` command-line parameter. +Note that sometimes you might need to define your own custom sections, such as +for platform flavors for C++ or Python. These scenarios are examples of when you +should be careful not to introduce the dot character in section names. This +constraint is because Buck2 uses the dot character to delimit section names and +key names in other contexts such as the `--config` command-line parameter. ## Character encoding -To ensure that any character can be encoded in a `.buckconfig` key value, you can use escape sequences to encode characters that would otherwise be problematic. -The following escape sequences are supported. +To ensure that any character can be encoded in a `.buckconfig` key value, you +can use escape sequences to encode characters that would otherwise be +problematic. The following escape sequences are supported. -|`\\` |backslash | -|--- |--- | -|`\"` |double quote | -|`\n` |newline | -|`\r` |carriage return | -|`\t` |tab | -|`\x##` |Unicode character with code point ## (in hex) | -|`\u####` |Unicode character with code point #### (in hex) | -|`\U########` |Unicode character with code point ######## (in hex) | +| `\\` | backslash | +| ------------ | --------------------------------------------------- | +| `\"` | double quote | +| `\n` | newline | +| `\r` | carriage return | +| `\t` | tab | +| `\x##` | Unicode character with code point ## (in hex) | +| `\u####` | Unicode character with code point #### (in hex) | +| `\U########` | Unicode character with code point ######## (in hex) | ## Key values as lists -Although the standard INI format supports only key values that represent a single item, Buck2 supports key values that represent a list of items. The syntax is to separate the items in the list using the space (`0x20`) character. For example, a key value for the list of command-line flags to be passed to a compiler could be represented as a list of the flags separated by spaces: +Although the standard INI format supports only key values that represent a +single item, Buck2 supports key values that represent a list of items. The +syntax is to separate the items in the list using the space (`0x20`) character. +For example, a key value for the list of command-line flags to be passed to a +compiler could be represented as a list of the flags separated by spaces: ``` flags = -foo -bar -baz -qux ``` -When a key value is parsed as a list instead of a single item, the separator character is interpreted as a separator only when it occurs *outside of double quotes*. For example, if `flags` is a key value interpreted as a list of items separated by spaces, then +When a key value is parsed as a list instead of a single item, the separator +character is interpreted as a separator only when it occurs _outside of double +quotes_. For example, if `flags` is a key value interpreted as a list of items +separated by spaces, then ``` flags = -foo "-bar \u0429" ``` -results in the two strings: `foo` and `-bar Щ`; the space character between `-bar` and `\u0429` is not interpreted as a separator. +results in the two strings: `foo` and `-bar Щ`; the space character between +`-bar` and `\u0429` is not interpreted as a separator. ## Transclusion of values from one key to another -Values from other keys can be transcluded into the current key using the following syntax inside the current key value. +Values from other keys can be transcluded into the current key using the +following syntax inside the current key value. ``` $(config

    .) @@ -76,35 +103,64 @@ For example, to use the `[go].vendor_path` in a custom setting: ## Comments -In addition to the semicolon (`;`), you can use the pound sign (`#`), as a comment character in `.buckconfig`. +In addition to the semicolon (`;`), you can use the pound sign (`#`), as a +comment character in `.buckconfig`. ## .buckconfig.local -The root of your [project](glossary.md#project) may contain a second configuration file named `.buckconfig.local`. Its format is the same as that of `.buckconfig`, but settings in `.buckconfig.local` override those in `.buckconfig`. In practice, `.buckconfig` is a version-controlled file that contains settings that are applicable to all team members, whereas `.buckconfig.local` is excluded from version control to allow users to define personal settings, such as personal aliases. +The root of your [project](glossary.md#project) may contain a second +configuration file named `.buckconfig.local`. Its format is the same as that of +`.buckconfig`, but settings in `.buckconfig.local` override those in +`.buckconfig`. In practice, `.buckconfig` is a version-controlled file that +contains settings that are applicable to all team members, whereas +`.buckconfig.local` is excluded from version control to allow users to define +personal settings, such as personal aliases. ## Other initialization files -In addition to the `.buckconfig` and `.buckconfig.local` files in the project root, Buck2 reads configuration settings from the following additional locations, some of which are actually directories: +In addition to the `.buckconfig` and `.buckconfig.local` files in the project +root, Buck2 reads configuration settings from the following additional +locations, some of which are actually directories: 1. Directory `.buckconfig.d` located in the project root directory. -2. File `.buckconfig` and directory `.buckconfig.d` located in the current user's home directory which, on Unix-like systems, is available from the `HOME` environment variable or through the `~` symbol. -3. File `buckconfig` and directory `buckconfig.d` located in system directory `/etc/`. - -Buck2 treats *any* file—irrespective of name—in a `.buckconfig.d`(`buckconfig.d`) directory (excluding files found in subdirectories) as a Buck2 configuration file, provided that it adheres to `.buckconfig` syntax. -Note that a `.buckconfig.d` directory is distinct from the similarly-named `.buckd` directory which is used by the [Buck2 Daemon (`buckd`)](daemon.md) . -For a description of how Buck2 resolves collisions between settings in these configuration files, see the section [**Precedence of Buck2 configuration specifications**](#precedence-of-buck2-configuration-specifications) +2. File `.buckconfig` and directory `.buckconfig.d` located in the current + user's home directory which, on Unix-like systems, is available from the + `HOME` environment variable or through the `~` symbol. +3. File `buckconfig` and directory `buckconfig.d` located in system directory + `/etc/`. + +Buck2 treats _any_ file—irrespective of name—in a +`.buckconfig.d`(`buckconfig.d`) directory (excluding files found in +subdirectories) as a Buck2 configuration file, provided that it adheres to +`.buckconfig` syntax. Note that a `.buckconfig.d` directory is distinct from the +similarly-named `.buckd` directory which is used by the +[Buck2 Daemon (`buckd`)](daemon.md) . For a description of how Buck2 resolves +collisions between settings in these configuration files, see the section +[**Precedence of Buck2 configuration specifications**](#precedence-of-buck2-configuration-specifications) below. ## Command-line control of configuration -In addition to the above configuration files, Buck2 supports specifying additional configuration files from the Buck2 command line using the `--config-file` parameter. -You can also specify configuration settings *individually* on the Buck2 command line using the `--config` (`-c`) parameter. Furthermore, you can aggregate these settings into *flag files* using the `--flagfile` parameter. A flag file provides similar functionality to a configuration file but uses a different syntax. Flag files are sometimes called *mode files* or *at* (`@`) files. +In addition to the above configuration files, Buck2 supports specifying +additional configuration files from the Buck2 command line using the +`--config-file` parameter. You can also specify configuration settings +_individually_ on the Buck2 command line using the `--config` (`-c`) parameter. +Furthermore, you can aggregate these settings into _flag files_ using the +`--flagfile` parameter. A flag file provides similar functionality to a +configuration file but uses a different syntax. Flag files are sometimes called +_mode files_ or _at_ (`@`) files. ## Precedence of Buck2 configuration specifications -The following list shows the order of precedence for how Buck2 interprets its configuration specifications. Settings specified using a method closer to the top of the list have higher precedence and will override those lower on the list. For example, the `.buckconfig` file in the repo overrides a `.buckconfig` file in the user's `HOME` directory. +The following list shows the order of precedence for how Buck2 interprets its +configuration specifications. Settings specified using a method closer to the +top of the list have higher precedence and will override those lower on the +list. For example, the `.buckconfig` file in the repo overrides a `.buckconfig` +file in the user's `HOME` directory. -1. Configuration specified on the command line using `--config` (`-c`), `--config-file` and `--flagfile`. Configuration specified later on the command line overrides configuration specified earlier. +1. Configuration specified on the command line using `--config` (`-c`), + `--config-file` and `--flagfile`. Configuration specified later on the + command line overrides configuration specified earlier. 1. `.buckconfig.local` in the repo. 1. `.buckconfig` in the repo. 1. Files in a `.buckconfig.d` folder of the repo. @@ -113,27 +169,44 @@ The following list shows the order of precedence for how Buck2 interprets its co 1. The global file `/etc/buckconfig` 1. Files in the global directory `/etc/buckconfig.d` -Files in a `.buckconfig.d` (`buckconfig.d`) directory have precedence according to the lexicographical order of their file names. Files *later* in the lexicographical order have precedence over files earlier in that order. +Files in a `.buckconfig.d` (`buckconfig.d`) directory have precedence according +to the lexicographical order of their file names. Files _later_ in the +lexicographical order have precedence over files earlier in that order. ## Configuration files can include other files -Any of the configuration files that we've discussed so far can also include by reference other files that contain configuration information. These included files can contain complete `.buckconfig` sections or they can contain a group of key name/value pairs that constitute part of a section. In this second use case, you'll need to ensure that the *included* file is referenced beneath the appropriate section in the *including* file. Because of this additional complexity, we recommend that you include only files that contain complete sections. -**Note:** Inclusion of files is a Buck-specific extension to the INI file parser that Buck2 uses. Therefore, if you use this feature, your Buck2 configuration files will probably not be parsable by other more-generic INI file parsers. -The syntax to include a file is +Any of the configuration files that we've discussed so far can also include by +reference other files that contain configuration information. These included +files can contain complete `.buckconfig` sections or they can contain a group of +key name/value pairs that constitute part of a section. In this second use case, +you'll need to ensure that the _included_ file is referenced beneath the +appropriate section in the _including_ file. Because of this additional +complexity, we recommend that you include only files that contain complete +sections. **Note:** Inclusion of files is a Buck-specific extension to the INI +file parser that Buck2 uses. Therefore, if you use this feature, your Buck2 +configuration files will probably not be parsable by other more-generic INI file +parsers. The syntax to include a file is ``` ``` -where *path-to-included-file* is either a relative path from the including file (recommended) or an absolute path from the root of the file system. -You can also specify that the file should be included only if it exists by prefixing with a question mark (`?`). +where _path-to-included-file_ is either a relative path from the including file +(recommended) or an absolute path from the root of the file system. You can also +specify that the file should be included only if it exists by prefixing with a +question mark (`?`). ``` ``` -If you use this prefix, it is not an error condition if the file does not exist; Buck2 just silently continues to process the rest of the configuration file. -In the following example, the `.buckconfig` file includes the file `cxx-other-platform.include` which exists in the subdirectory `cxx-other-platform`. The `.buckconfig` file will also include the file `future-platform` from the directory `future-platform.include` if that file exists. +If you use this prefix, it is not an error condition if the file does not exist; +Buck2 just silently continues to process the rest of the configuration file. In +the following example, the `.buckconfig` file includes the file +`cxx-other-platform.include` which exists in the subdirectory +`cxx-other-platform`. The `.buckconfig` file will also include the file +`future-platform` from the directory `future-platform.include` if that file +exists. ``` # @@ -151,3 +224,51 @@ In the following example, the `.buckconfig` file includes the file `cxx-other-pl [cxx#other_platform] cxxppflags="-D MYMACRO=\"Watchman\"" ``` + +## Sections + +Below is an incomplete list of supported buckconfigs. + +## [alias] + +This section contains definitions of [build target](build_target.md) aliases. + +``` +[alias] + app = //apps/myapp:app + apptest = //apps/myapp:test +``` + +These aliases can then be used from the command line: + +``` +$ buck2 build app +$ buck2 test apptest +``` + +## [cells] + +Lists the cells that constitute the Buck2 project. Buck2 builds that are part of +this project—that is, which use this `.buckconfig`—can access the cells +specified in this section. + +``` +[cells] + buck = . + bazel_skylib = ./third-party/skylark/bazel-skylib +``` + +The string on the left-hand side of the equals sign is the _alias_ for the cell. +The string on the right-hand side of the equals sign is the path to the cell +from the directory that contains this `.buckconfig` file. It is not necessary to +include the current cell in this section, but we consider it a best practice to +do so: + +``` +buck = . +``` + +You can view the contents of this section using the `buck2 audit cell` command. + +`[repositories]` is additionally supported as a deprecated alternative name for +this section. diff --git a/docs/concepts/build_file.md b/docs/concepts/build_file.md new file mode 100644 index 0000000000000..8a4279bdde20f --- /dev/null +++ b/docs/concepts/build_file.md @@ -0,0 +1,63 @@ +--- +id: build_file +title: Build File +--- + +# Build File + +A _build file_ is a file, typically named `BUCK`, that defines one or more +[build rule](build_rule.md)s. Note that you can change the name that Buck2 uses +for the build file in the `buildfile` section of `.buckconfig`. A source file in +your project can only be referenced by rules in its "nearest" build file, where +"nearest" means its closest direct ancestor in your project's file tree. (If a +source file has a build file as a sibling, then that is its nearest ancestor.) +For example, if your project had the following `BUCK` files: + +``` +java/com/facebook/base/BUCK +java/com/facebook/common/BUCK +java/com/facebook/common/collect/BUCK +``` + +Then your build rules would have the following constraints: + +- Rules in `java/com/facebook/base/BUCK` can reference any file under + `java/com/facebook/base/`. +- Rules in `java/com/facebook/common/` can reference any files under that + directory, except for those under `java/com/facebook/common/collect/`, as + those "belong" to the `BUCK` file in the `collect` directory. + +The set of source files accessible to a build file is also known as its _build +package_. The way to refer to code across build packages is to create build +rules and use `deps` to refer to that code. Going back to the previous example, +suppose code in `java/com/facebook/common/concurrent/` wants to depend on code +in `java/com/facebook/common/collect/`. Presumably +`java/com/facebook/common/collect/BUCK` has a build rule like: + +``` +java_library( + name = 'collect', + srcs = glob(['*.java']), + deps = ['//java/com/facebook/base:base',],) +``` + +Then `java/com/facebook/common/BUCK` could have a rule like: + +``` +java_library( + name = 'concurrent', + srcs = glob(['concurrent/*.java']), + deps = ['//java/com/facebook/base:base','//java/com/facebook/common/collect:collect',],) +``` + +whereas the following **would be invalid** because +`java/com/facebook/common/collect/` has its own build file, so +`//java/com/facebook/common/collect:concurrent` cannot list +`java/com/facebook/common/collect/*.java` in its `srcs`. + +``` +java_library( + name = 'concurrent', + srcs = glob(['collect/*.java', 'concurrent/*.java']), + deps = ['//java/com/facebook/base:base',],) +``` diff --git a/docs/concepts/build_rule.md b/docs/concepts/build_rule.md new file mode 100644 index 0000000000000..7149ad60fcd2a --- /dev/null +++ b/docs/concepts/build_rule.md @@ -0,0 +1,166 @@ +--- +id: build_rule +title: Build Rule +--- + +# Build Rule + +A _build rule_ is a procedure for producing output files from a set of input +files in the context of a specified build configuration. Build rules are +specified in [build file](build_file.md)s—typically named BUCK. **Note:** A +build rule must explicitly specify, in its arguments, all of its required inputs +in order for Buck2 to be able to build the rule's output in a way that is +deterministic and reproducible. + +## Buck2's collection of build rules + +Buck2 comes with a collection of built-in build rules for many common build +procedures. For example, compiling Java code against the Android SDK is a common +procedure, so Buck2 provides the build rule +[`android_library`](../../prelude/globals#android_library) to do that. +Similarly, the final product of most Android development is an APK, so you can +use the build rule [`android_binary`](../../prelude/globals#android_binary) to +create an APK. + +## Source files as inputs to build rules + +Most build rules specify source files as inputs. For example, a +[`cxx_library`](../../prelude/globals#cxx_library) rule would specify `.cpp` +files as inputs. To support specifying these files, a `cxx_library` rule +provides the `srcs` argument. Some languages, such as C++, use header files as +well. To specify these, `cxx_library` provides a `headers` argument. In addition +to `srcs` and `headers`, some rules provide variants of these arguments, such as +`platform_srcs` and `platform_headers`. These arguments support groups of source +files that should be used as inputs only when building for specific platforms. + +### Package boundaries and access to source files + +In Buck2, a BUCK file defines a _package_, which corresponds _roughly_ to the +directory that contains the BUCK file and those subdirectories that do not +themselves contain BUCK files. (To learn more, see the +[Key Concepts](key_concepts.md) topic.) A rule in a BUCK file cannot specify a +source file as an input unless that source file is in that BUCK file's package. +An exception to this restriction exists for header files, but only if a rule in +the package that contains the header file _exports_ that header file using the +`exported_headers` argument. For more details, see the description for +`exported_headers` in, for example, the +[`cxx_library`](../../prelude/globals#cxx_library) topic. More commonly though, +the package for a BUCK file contains all the source files required for the rules +defined in that BUCK file. Functionality in source files from other packages is +made available through the artifacts produced by the rules in the BUCK files for +those packages. For example, a [`cxx_binary`](../../prelude/globals/#cxx_binary) +might use the functionality in a `cxx_library` that is defined in another +package. To access that functionality, the `cxx_binary` would take that +`cxx_library` as a _dependency_. + +##### Symlinks: Use with caution if at all + +We recommend that you do _not_ use symlinks—either absolute or relative—to +specify input files to build rules. Although using symlinks in this context does +sometimes work, it can lead to unexpected behavior and errors. + +## Dependencies: Output from one rule as input to another rule + +A build rule can use the output from another build rule as one of its inputs by +specifying that rule as a _dependency_. Typically, a build rule specifies its +dependencies as a list of [build target](build_target.md)s in its `deps` +argument. However, the rule can also specify dependencies—as build targets—in +other arguments, such as `srcs`. **Example:** The output of a +[`java_library`](../../prelude/globals/#java_library) rule is a JAR file. If a +`java_library` rule specifies another `java_library` rule as a dependency, the +JAR file produced by the specified rule is added to the classpath for the +`java_library` that depends on it. **Example:** If a +[`java_binary`](../../prelude/globals/#java_binary) rule specifies a +`java_library` rule as a dependency, the JAR file for the specified +`java_library` is available on the classpath for the `java_binary`. In addition, +in the case of `java_binary`, the JAR files for any dependencies of the +`java_library` rule _are also_ made available to the `java_binary` rule—and if +those dependencies have dependencies of their own, they are added as well. This +exhaustive cascade of dependencies is referred to as the rule's _transitive +closure_. + +### Required dependencies are always built first + +Buck2 guarantees that any dependencies that a rule lists that are required in +order to build that rule are built successfully _before_ Buck2 builds the rule +itself. Note though that there can be special cases—such as +[`apple_bundle`](../../prelude/globals/#apple_bundle)—where a rule's listed +dependencies do not actually need to be built before the rule. + +### Visibility + +In order for a build rule to take a dependency on another build rule, the build +rule on which the dependency is taken must be _visible_ to the build rule taking +the dependency. A build rule's `visibility` argument is a list of +[build target pattern](target_pattern.md)s that specify the rules that can take +that rule as a dependency. For more information about the concept of visibility +in Buck2, see the [Visibility](visibility.md) topic. + +### Dependencies define a graph + +Build rules and their dependencies define a directed acyclic graph (DAG). Buck2 +requires this graph to be acyclic to make it possible to build independent +subgraphs in parallel. + +## How to handle special cases: genrules and macros + +Although Buck2 provides a rich set of built-in build rules for developers, it is +not able to address all possible needs. As an "escape hatch," Buck2 provides a +category of generic build rules called _genrules_. With genrules, you can +perform arbitrary operations using shell scripts. The genrules supported by +Buck2 are: + +- [`genrule`](../../prelude/globals/#genrule) +- [`apk_genrule`](../../prelude/globals/#apk_genrule) +- [`cxx_genrule`](../../prelude/globals/#cxx_genrule) + +### Multiple output files with genrules + +In most cases, a build rule produces exactly one output file. However, with +genrules, you can specify an output _directory_ and write arbitrary files to +that directory. + +### Macros + +Finally, note that you can define functions that generate build rules. In +general, this should not be something that you need to do, but taking advantage +of this option might help you add needed functionality to Buck2's without +editing its source code. + +## String parameter macros + +It is also possible to expand references to other rules within the `cmd`, using +builtin `string parameter macros`. All build rules expanded in the command are +automatically considered to be dependencies of the `genrule()`. + +Note that the paths returned by these macros are _relative_ paths. Using +relative paths ensures that your builds are _hermetic_, that is, they are +reproducible across different machine environments. + +`$(classpath //path/to:target)` + +Expands to the transitive classpath of the specified build rule, provided that +the rule has a Java classpath. If the rule does not have (or contribute to) a +classpath, then an exception is thrown and the build breaks. + +`$(exe //path/to:target)` + +Expands a build rule that results in an executable to the commands necessary to +run that executable. For example, a `java_binary()` might expand to a call to +`java -jar path/to/target.jar` . Files that are executable (perhaps generated by +a `genrule()`) are also expanded. If the build rule does not generate an +executable output, then an exception is thrown and the build breaks. + +If the `$(exe my_dependency)` dependency should actually be built with the +target platform, use `$(exe_target my_dependency)` instead, which will stick to +the same platform as the target. + +`$(location //path/to:target)` + +Expands to the location of the output of the specified build rule. This means +that you can refer to the output without needing to be aware of how Buck is +storing data on the disk mid-build. + +``` + +``` diff --git a/docs/concepts/build_target.md b/docs/concepts/build_target.md new file mode 100644 index 0000000000000..0191f29bc7301 --- /dev/null +++ b/docs/concepts/build_target.md @@ -0,0 +1,135 @@ +--- +id: build_target +title: Build Target +--- + +# Build Target + +A _build target_ is a string that identifies a build target in your project. +Build targets are used as arguments to Buck2 commands, such as +[`buck2 build`](../../users/commands/build) and +[`buck2 run`](../../users/commands/run). Build targets are also used as +arguments to [build rules](build_rule.md) to enable one target to reference +another. For example, a build rule might use a build target to reference another +target in order to specify that target as a _dependency_. + +#### Fully-qualified build targets + +Here is an example of a _fully-qualified_ build target: + +``` +//java/com/facebook/share:ui +``` + +A fully-qualified build target has three components: + +1. The `//` prefix indicates that the subsequent path is from the _root_ of your + project. You can use the `buck2 root` command to identify the root of your + project. +2. The `java/com/facebook/share` between the `//` prefix and the colon (`:`) + indicates that the [build file](build_file.md) (usually named `BUCK`) is + located in the directory `java/com/facebook/share`. +3. The `ui` after the colon (`:`) indicates the name of the build target within + the build file. Build target names must be unique within a build file. By + _name_ we mean, more formally, the value of the `name` argument to the build + rule. + +Note that the name of the build file itself—usually BUCK—does _not_ occur in the +build target. All build files within a given Buck2 project must have the same +name—defined in the `[buildfile].name` entry of `.buckconfig`. Therefore, it is +unnecessary to include the name in the target. The full regular expression for a +fully-qualified build target is as follows: + +``` +[A-Za-z0-9._-]*//[A-Za-z0-9/._-]*:[A-Za-z0-9_/.=,@~+-]+ +|- cell name -| | package path | |--- target name ----| +``` + +In Buck2, a _cell_ defines a directory tree of one or more Buck2 packages. For +more information about Buck2 cells and their relationship to packages and +projects, see the [Key Concepts](key_concepts.md) topic. **NOTE:** All target +paths are assumed to start from the root of the Buck2 project. Buck2 does not +support specifying a target path that starts from a directory below the root. +Although the double forward slash (`//`) that prefixes target paths can be +omitted when specifying a target from the command line (see **Pro Tips** below), +Buck2 still assumes that the path is from the root. Buck2 does support +_relative_ build paths, but in Buck2, that concept refers to specifying build +targets _from within_ a build file. See **Relative build targets** below for +more details. + +#### Relative build targets + +A _relative_ build target can be used to reference a build target _within the +same _[_build file_](build_file.md). A relative build target starts with a colon +(`:`) and is followed by only the third component (or _short name_) of the +fully-qualified build target. The following snippet from a build file shows an +example of using a relative path. + +``` +## Assume this target is in //java/com/facebook/share/BUCK# +java_binary( + name = 'ui_jar', + deps = [ + ## The following target path + ## //java/com/facebook/share:ui + ## is the same as using the following relative path.# + ':ui', + ], +) +``` + +## Command-line Pro Tips + +Here are some ways that you can reduce your typing when you specify build +targets as command-line arguments to the `buck2 build` or `buck2 run` commands. +Consider the following example of a fully-qualified build target used with the +`buck2 build` command: + +``` +buck2 build //java/com/facebook/share:share +``` + +Although Buck2 is always strict when parsing build targets in build files, Buck2 +is flexible when parsing build targets on the command-line. Specifically, the +leading `//` is optional on the command line, so the above could be: + +``` +buck2 build java/com/facebook/share:share +``` + +Also, if there is a forward slash before the colon, it is ignored, so this could +also be written as: + +``` +buck2 build java/com/facebook/share/:share +``` + +which enables you to produce the red text shown below using tab-completion, +which dramatically reduces how much you need to type: + +``` +buck2 build java/com/facebook/share/:share +``` + +Finally, if the final path element matches the value specified after the colon, +it can be omitted: + +``` +# This is treated as //java/com/facebook/share:share. +buck2 build java/com/facebook/share/ +``` + +which makes the build target even easier to tab-complete. For this reason, the +name of the build target for the primary deliverable in a build file is often +named the same as the parent directory. That way, it can be built from the +command-line with less typing. + +## See also + +Buck2 supports the ability to define **_aliases_ for build targets**; using +aliases can improve brevity when specifying targets on the Buck2 command line. +For more information, see the [`[alias]`](buckconfig.md#alias) section in the +documentation for [`.buckconfig`](buckconfig.md). A +[**build target pattern**](target_pattern.md) is a string that describes a set +of one or more build targets. For example, the pattern `//...` is used to build +an entire project. For more information, see the **Build Target Pattern** topic. diff --git a/docs/concepts/concept_map.md b/docs/concepts/concept_map.md index 3054bca4c06f8..1c7b2e952eb06 100644 --- a/docs/concepts/concept_map.md +++ b/docs/concepts/concept_map.md @@ -5,16 +5,25 @@ title: Concept Map import useBaseUrl from '@docusaurus/useBaseUrl'; -The Concept Map provides an at-a-glance overview of the relationships between widely used Buck2 concepts. It is meant to be a tool to help those onboarding to Buck2 to quickly gain an understanding of the Buck2 environment. +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + +The Concept Map provides an at-a-glance overview of the relationships between +widely used Buck2 concepts. It is meant to be a tool to help those onboarding to +Buck2 to quickly gain an understanding of the Buck2 environment. justifyContent + :::note -The Concept Map is for reference only and is not intended to be 100% accurate nor complete. +The Concept Map is for reference only and is not intended to be 100% +accurate nor complete. -The version above was created in LucidChart and is located in the [Buck2 team folder](https://lucid.app/folder/invitations/accept/inv_c5c89718-b1cd-4b22-ae76-a47616719948). To login into Lucidcharts, do `bunnylol lucidchart` +The version above was created in LucidChart and is located in the +[Buck2 team folder](https://lucid.app/folder/invitations/accept/inv_c5c89718-b1cd-4b22-ae76-a47616719948). +To login into Lucidcharts, do `bunnylol lucidchart` + ::: diff --git a/docs/concepts/configurations.md b/docs/concepts/configurations.md new file mode 100644 index 0000000000000..fd19e41914fab --- /dev/null +++ b/docs/concepts/configurations.md @@ -0,0 +1,91 @@ +--- +id: configurations +title: Configurations +--- + +For rule authors see also: [Configurations](../rule_authors/configurations.md) + +When building a target, buck always builds it in a particular "configuration." +The configuration typically includes information like the target os, target +arch, sanitizers, opt level, etc. One way to understand the effect that a +configuration has is via the `cquery` and `uquery` commands. The cquery command +will compute the appropriate configuration for a target and display a version of +that target's attributes with the configuration applied. The `uquery` command +will not apply a configuration. + +Here is a heavily trimmed version of the outputs of invoking `uquery` and +`cquery` on `//buck2/app/buck2_core:buck2_core`. + +``` +> buck2 uquery -A '"//buck2/app/buck2_core:buck2_core"' +{ + "fbcode//buck2/app/buck2_core:buck2_core": { + "buck.type": "rust_library", + "buck.package": "fbcode//buck2/app/buck2_core:TARGETS", + "name": "buck2_core", + "visibility": [ + "PUBLIC" + ], + "deps": { + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:arc-swap", + "fbsource//third-party/rust:blake3", + "fbsource//third-party/rust:compact_str", + "fbsource//third-party/rust:dashmap", + { + "__type": "selector", + "entries": { + "DEFAULT": [], + "ovr_config//os:windows": [ + "fbsource//third-party/rust:common-path" + ] + } + }, + { + "__type": "selector", + "entries": { + "DEFAULT": [], + "ovr_config//os:linux": [ + "fbsource//third-party/rust:nix" + ] + } + }, + }, + } +} +``` + +``` +> buck2 cquery -A '"//buck2/app/buck2_core:buck2_core"' +{ + "fbcode//buck2/app/buck2_core:buck2_core (ovr_config//platform/linux:)": { + "buck.type": "rust_library", + "buck.package": "fbcode//buck2/app/buck2_core:TARGETS", + "buck.target_configuration": "ovr_config//platform/linux:", + "buck.execution_platform": "fbcode//buck2/platform/", + "name": "buck2_core", + "visibility": [ + "PUBLIC" + ], + "deps": [ + "fbsource//third-party/rust:anyhow (ovr_config//platform/linux:)", + "fbsource//third-party/rust:arc-swap (ovr_config//platform/linux:)", + "fbsource//third-party/rust:blake3 (ovr_config//platform/linux:)", + "fbsource//third-party/rust:compact_str (ovr_config//platform/linux:)", + "fbsource//third-party/rust:dashmap (ovr_config//platform/linux:)", + "fbsource//third-party/rust:nix (ovr_config//platform/linux:)" + ] +} +``` + +The `cquery` output has additional `buck.target_configuration` and +`buck.execution_platform` attributes which tell you what the target is being +built for and what it's being built on, respectively. `uquery` doesn't have +those. + +The deps in `uquery` also have a number of selects; these indicate that the +`common-path` dependency should only be included when building for Windows, +while the `nix` dependency is needed only for Linux. In `cquery` that +distinction has been resolved; because the target has been configured for Linux, +the `nix` dependency is present and indistinguishable from any other, while the +`common-path` dependency is gone. diff --git a/docs/concepts/daemon.md b/docs/concepts/daemon.md index 19127f6993344..dcd344f48abb4 100644 --- a/docs/concepts/daemon.md +++ b/docs/concepts/daemon.md @@ -3,21 +3,38 @@ id: daemon title: Daemon (buckd) --- -The first time that a Buck2 command is run, Buck2 starts a daemon process for the current project. For subsequent commands, Buck2 checks for the running daemon process and, if found, uses the daemon to execute the command. Using the Buck2 daemon can save significant time as it enables Buck to share cache between Buck2 invocations. +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; -By default, there is 1 daemon per [project](./glossary.md#project) root, you can run multiple daemons in the same project by specifying an [isolation dir](./glossary.md#isolation-dir). +The first time that a Buck2 command is run, Buck2 starts a daemon process for +the current project. For subsequent commands, Buck2 checks for the running +daemon process and, if found, uses the daemon to execute the command. Using the +Buck2 daemon can save significant time as it enables Buck to share cache between +Buck2 invocations. -While it runs, the Buck daemon process monitors the project's file system for changes. The Buck daemon excludes from monitoring any subtrees of the project file system that are specified in the `[project].ignore` setting of `.buckconfig` (for details, see the still-relevant [[project].ignore](../legacy/files-and-directories/dot-buckconfig.md#ignore) section of the '.buckconfig' legacy document). +By default, there is 1 daemon per [project](./glossary.md#project) root, you can +run multiple daemons in the same project by specifying an +[isolation dir](./glossary.md#isolation-dir). + +While it runs, the Buck daemon process monitors the project's file system for +changes. The Buck daemon excludes from monitoring any subtrees of the project +file system that are specified in the `[project].ignore` setting of +`.buckconfig`. + +You can see detailed information about the status of the daemon by running +`buck2 status`. ## Killing or disabling the Buck daemon -The Buck daemon process is killed if `buck2 clean` or `buck2 kill` commands are run. Note that they won't kill the daemon associated with custom isolation dirs. To do that, run using the `--isolation-dir` option (`buck2 --isolation-dir `) +The Buck daemon process is killed if `buck2 clean` or `buck2 kill` commands are +run. Note that they won't kill the daemon associated with custom isolation dirs. +To do that, run using the `--isolation-dir` option +(`buck2 --isolation-dir `) The Daemon is also killed when: -* The `buck2 killall` command is run. -* A new buck2 version is available. +- The `buck2 killall` command is run. +- A new buck2 version is available. diff --git a/docs/concepts/glossary.md b/docs/concepts/glossary.md index 5564d0c2f7479..e29b7561659d0 100644 --- a/docs/concepts/glossary.md +++ b/docs/concepts/glossary.md @@ -6,138 +6,290 @@ toc_max_heading_level: 4 #### .buckconfig -The root of your [project](#project) must contain a configuration file named `.buckconfig`. Before executing, Buck2 reads this file to incorporate specified customizations. See [.buckconfig](buckconfig.md) for more info. +The root of your [project](#project) must contain a configuration file named +`.buckconfig`. Before executing, Buck2 reads this file to incorporate specified +customizations. See [.buckconfig](buckconfig.md) for more info. #### Action -An individual, cacheable, ideally hermetic command that's run during the [build](#buck-file). It takes [artifacts](#artifact) as inputs and produces other artifacts as outputs. An example command could be `gcc -o main main.c`, which takes the artifact `main.c` (a source file) and produces the artifact called `main` (the compiled binary). +An individual, cacheable, ideally hermetic command that's run during the +[build](#buck-file). It takes [artifacts](#artifact) as inputs and produces +other artifacts as outputs. An example command could be `gcc -o main main.c`, +which takes the artifact `main.c` (a source file) and produces the artifact +called `main` (the compiled binary). + +#### Action digest + +Encoded [action](#action) representation. It is sent to +[remote execution](#remote-execution-re). Used among other things to retrieve +action inputs and to check for cache hits #### Action graph -The dependency graph of all [actions](#action) belonging to a target: it can be queried with `buck2 aquery`. +The dependency graph of all [actions](#action) belonging to a target: it can be +queried with `buck2 aquery`. #### Artifact -A single input or output of an [action](#action). These are files that participate as inputs or outputs of a build and can be source files or build outputs. For more information, see the [Artifact API](https://buck2.build/docs/api/build/Artifact/). +A single input or output of an [action](#action). These are files that +participate as inputs or outputs of a build and can be source files or build +outputs. For more information, see the +[Artifact API](https://buck2.build/docs/api/build/Artifact/). #### Attribute -Declared by a [rule](#rule) and used to express the properties of a particular instance of a rule to create a [target](#target). For example, srcs, deps and copts, which declare a target's source files, dependencies, and custom compiler options, respectively. The available attributes for a target depend on its rule type. +Declared by a [rule](#rule) and used to express the properties of a particular +instance of a rule to create a [target](#target). For example, srcs, deps and +copts, which declare a target's source files, dependencies, and custom compiler +options, respectively. The available attributes for a target depend on its rule +type. #### BUCK file -A `BUCK` file (the name is configurable, some projects use `TARGETS`) is the main configuration file that tells Buck2 what to build, what their dependencies are, and how to build them. Buck2 takes a `BUCK` file as input and evaluates the file to declare [targets](#target), which are then used to create a graph of dependencies and to derive the [actions](#action) that must be completed to build intermediate and final software outputs. A `BUCK` file marks a directory and any sub-directories not containing a `BUCK` file as a [package](#package). +A `BUCK` file (the name is configurable, some projects use `TARGETS`) is the +main configuration file that tells Buck2 what to build, what their dependencies +are, and how to build them. Buck2 takes a `BUCK` file as input and evaluates the +file to declare [targets](#target), which are then used to create a graph of +dependencies and to derive the [actions](#action) that must be completed to +build intermediate and final software outputs. A `BUCK` file marks a directory +and any sub-directories not containing a `BUCK` file as a [package](#package). #### BXL -BXL ([Buck eXtension Language](https://buck2.build/docs/developers/bxl)) scripts are written in [Starlark](#starlark) (a restricted subset of Python) and give integrators the ability to inspect and interact directly with the buck2 graph. +BXL ([Buck eXtension Language](https://buck2.build/docs/developers/bxl)) scripts +are written in [Starlark](#starlark) (a restricted subset of Python) and give +integrators the ability to inspect and interact directly with the buck2 graph. -BXL scripts can query the [action graph](#action-graph), [configured graph](#configured-graph), and [unconfigured graph](#unconfigured-graph). They can also create [actions](#action) and trigger builds. +BXL scripts can query the [action graph](#action-graph), +[configured graph](#configured-graph), and +[unconfigured graph](#unconfigured-graph). They can also create +[actions](#action) and trigger builds. #### Cell -The directory tree of one or more Buck2 [packages](#package). A Buck2 build can involve multiple cells. The cell root always contains a [.buckconfig](#buckconfig), although the presence of a .buckconfig file doesn't in itself define a cell. Rather, the cells involved in a build are defined at the time Buck2 is invoked; they are specified in the .buckconfig for the Buck [project](#project). +The directory tree of one or more Buck2 [packages](#package). A Buck2 build can +involve multiple cells. The cell root always contains a +[.buckconfig](#buckconfig), although the presence of a .buckconfig file doesn't +in itself define a cell. Rather, the cells involved in a build are defined at +the time Buck2 is invoked; they are specified in the .buckconfig for the Buck +[project](#project). #### Configuration -Configurations consist of a set of 'constraint values' that are used to resolve `select` [attributes](#attribute) prior to evaluating [rule](#rule) implementations: the attribute takes the value of the first branch in the `select` that matches the configuration. +Configurations consist of a set of 'constraint values' that are used to resolve +`select` [attributes](#attribute) prior to evaluating [rule](#rule) +implementations: the attribute takes the value of the first branch in the +`select` that matches the configuration. -Configurations are instantiated by rules that produce a `PlatformInfo` [provider](#provider). Once created, targets can receive their configuration through a variety of mechanisms, such as: +Configurations are instantiated by rules that produce a `PlatformInfo` +[provider](#provider). Once created, targets can receive their configuration +through a variety of mechanisms, such as: -* Inheritance - by default, when following a dependency edge A -> B, B inherits A's configuration. -* The `default_target_platform` attribute and `--target-platforms` command line flag. -* [Transitions](#transition) (see below). +- Inheritance - by default, when following a dependency edge A -> B, B inherits + A's configuration. +- The `default_target_platform` attribute and `--target-platforms` command line + flag. +- [Transitions](#transition) (see below). -Configurations allow a single target to exist in multiple variants in the configured graph (for example, to build a given binary at differing optimization levels or targeting different CPU architectures). +Configurations allow a single target to exist in multiple variants in the +configured graph (for example, to build a given binary at differing optimization +levels or targeting different CPU architectures). #### Configured graph -The configured target graph is generated by configuring target nodes in the [unconfigured target graph](#unconfigured-graph). That is, `selects` are fully resolved and configurations applied. The configured graph includes information about the [configurations](#configuration) and [transitions](#transition) involved in building targets. The same target may appear in multiple different configurations (when printed, the configuration is after the target in parentheses). +The configured target graph is generated by configuring target nodes in the +[unconfigured target graph](#unconfigured-graph). That is, `selects` are fully +resolved and configurations applied. The configured graph includes information +about the [configurations](#configuration) and [transitions](#transition) +involved in building targets. The same target may appear in multiple different +configurations (when printed, the configuration is after the target in +parentheses). + +#### Constraint + +A constraint represents a property that may differ across different +[target](#target) or build contexts, such as CPU architecture, the version of a +system-installed compiler, optimization level, which version of a particular +library to use, etc. #### Daemon -The Daemon process lives between invocations and is designed to allow for cache reuse between Buck2 invocations, which can considerably speed up builds. For more information, see [Daemon (buckd)](daemon.md). +The Daemon process lives between invocations and is designed to allow for cache +reuse between Buck2 invocations, which can considerably speed up builds. For +more information, see [Daemon (buckd)](daemon.md). #### Dependency -A directed edge between two [targets](#target). A target `A` can have a dependency on target `B`, for example, if any `dep` attribute of `A` mentions `B`. A target's dependence on another target is determined by the [visibility](#visibility) of the latter. +A directed edge between two [targets](#target). A target `A` can have a +dependency on target `B`, for example, if any `dep` attribute of `A` mentions +`B`. A target's dependence on another target is determined by the +[visibility](#visibility) of the latter. #### Execution platform -A type of [rule](#rule) that includes information such as what execution types a [target](#target) supports, which can be [remote](#remote-execution-re), local, and [hybrid](#hybrid-execution) execution. Also, whether it supports cache uploads, which allows users to get cache hits for things that executed locally. +A type of [rule](#rule) that includes information such as what execution types a +[target](#target) supports, which can be [remote](#remote-execution-re), local, +and [hybrid](#hybrid-execution) execution. Also, whether it supports cache +uploads, which allows users to get cache hits for things that executed locally. #### Hybrid execution -Enables shifting work to the local host when available parallelism in the build is low. This enables users to save on [remote execution](#remote-execution-re) roundtrips to enable faster builds. +Allows Buck2 to race local and remote execution and get whichever finishes first +(unless there's a cache hit, then it will get output from cache). This can +provide substantial speedup by eliminating the overhead of going to +[remote execution](#remote-execution-re) when there is enough capacity to +service the build locally. #### Isolation dir -Instances of Buck2 share a [daemon](#daemon) if and only if their isolation directory is identical. The isolation directory also influences the output paths provided by Buck2. +Instances of Buck2 share a [daemon](#daemon) if and only if their isolation +directory is identical. The isolation directory also influences the output paths +provided by Buck2. + +#### Modifiers + +It's a modification of a constraint from the existing +[configuration](#configuration) to obtain a new configuration. They provide a +unified way to specify build settings on a [project](#project), +[target](#target), and command line level. It is intended to replace +[target platforms](#target-platform) and most use cases of +[.buckconfigs](#buckconfig). #### Package -A directory that contains a Buck2 [BUCK file](#buck-file) and all source files belonging to the same directory as the BUCK file, or any of its subdirectories that do not contain a BUCK file themselves. +A directory that contains a Buck2 [BUCK file](#buck-file) and all source files +belonging to the same directory as the BUCK file, or any of its subdirectories +that do not contain a BUCK file themselves. #### Prelude -The prelude is a unique `.bzl` file located at `prelude//prelude.bzl`. Buck2 implicitly loads all the symbols defined in the prelude whenever it loads a [`BUCK`](#buck-file) file. Symbols defined outside the prelude can be imported via a `load()` statement. +The prelude is a unique `.bzl` file located at `prelude//prelude.bzl`. Buck2 +implicitly loads all the symbols defined in the prelude whenever it loads a +[`BUCK`](#buck-file) file. Symbols defined outside the prelude can be imported +via a `load()` statement. -When you create a Buck2 project using `buck2 init --git`, it will contain the same prelude used internally at Meta by Buck2 users. It is viewable at https://github.com/facebook/buck2/tree/main/prelude. +When you create a Buck2 project using `buck2 init --git`, it will contain the +same prelude used internally at Meta by Buck2 users. It is viewable at +https://github.com/facebook/buck2/tree/main/prelude. #### Project -The Outermost directory where there is a [.buckconfig](#buckconfig): also known as the [root cell](#cell). The .buckconfig for the project specifies the [cells](#cell) that constitute the Buck2 project. Specifically, these cells are specified in the '[repositories]' section of the `.buckconfig`. All command invocations are executed from the project root. +The Outermost directory where there is a [.buckconfig](#buckconfig): also known +as the [root cell](#cell). The .buckconfig for the project specifies the +[cells](#cell) that constitute the Buck2 project. Specifically, these cells are +specified in the '[cells]' section of the `.buckconfig`. All command invocations +are executed from the project root. #### Provider -Data returned from a [rule](#rule) function. It's the only way that information from this rule is available to other rules that depend on it (see [dependency](#dependency)). Every rule must return at least the `DefaultInfo` provider. A common case is to also return either `RunInfo` (because they are executable) or custom providers that the dependents rule can use. For more information, see [Providers](https://buck2.build/docs/rule_authors/writing_rules/#providers). +Data returned from a [rule](#rule) function. It's the only way that information +from this rule is available to other rules that depend on it (see +[dependency](#dependency)). For more information, see +[Providers](https://buck2.build/docs/rule_authors/writing_rules/#providers). + +#### Platform + +A named set of [constraints](#constraint), defining a specific runtime +environment. E.g. `cpu=x86_64, os=windows` #### Remote execution (RE) -Distributed execution of [actions](#action) on remote workers. It can speed up builds significantly by scaling the nodes available for parallel actions, and by caching action outputs across Buck2 users. +Distributed execution of [actions](#action) on remote workers. It can speed up +builds significantly by scaling the nodes available for parallel actions, and by +caching action outputs across Buck2 users. #### Rule -A rule consists of an attribute spec and an implementation, which is a [Starlark](#starlark) function. +A rule consists of an attribute spec and an implementation, which is a +[Starlark](#starlark) function. -The attribute spec declares what attributes the rule expects to receive. The rule implementation receives the [attributes](#attribute) of a [target](#target) and the [providers](#provider) of its [dependencies](#dependency). It can declare new [actions](#action) and [artifacts](#artifact) and must return [providers](#provider) that can be used to pass data to its dependents or to Buck2 itself. +The attribute spec declares what attributes the rule expects to receive. The +rule implementation receives the [attributes](#attribute) of a [target](#target) +and the [providers](#provider) of its [dependencies](#dependency). It can +declare new [actions](#action) and [artifacts](#artifact) and must return +[providers](#provider) that can be used to pass data to its dependents or to +Buck2 itself. -Rules are instantiated in [BUCK files](#buck-file) to declare targets and set their attributes. The rule implementation is called when Buck2 needs its providers, which can happen when the target is built, or when one of its dependents is. +Rules are instantiated in [BUCK files](#buck-file) to declare targets and set +their attributes. The rule implementation is called when Buck2 needs its +providers, which can happen when the target is built, or when one of its +dependents is. -As an example, the `cxx_binary` rule could be used to create a C++ binary, but `android_binary` rule would be used to create an Android APK +As an example, the `cxx_binary` rule could be used to create a C++ binary, but +`android_binary` rule would be used to create an Android APK #### Starlark -Starlark is a dialect of Python originally developed by Google for the [Bazel build tool](https://bazel.build/rules/language). It is the configuration language of the Buck2 build system and the language you use in `.bzl` and [`BUCK` files](#buck-file) to define and instantiate [rules](#rule). +Starlark is a dialect of Python originally developed by Google for the +[Bazel build tool](https://bazel.build/rules/language). It is the configuration +language of the Buck2 build system and the language you use in `.bzl` and +[`BUCK` files](#buck-file) to define and instantiate [rules](#rule). + +There are many reasons why Meta has chosen Starlark, as detailed in +[The Rust Starlark library](https://developers.facebook.com/blog/post/2021/04/08/rust-starlark-library/) +article. -There are many reasons why Meta has chosen Starlark, as detailed in [The Rust Starlark library](https://developers.facebook.com/blog/post/2021/04/08/rust-starlark-library/) article. +The Buck2 project maintains and uses an open source +[Starlark interpreter in Rust](https://github.com/facebook/starlark-rust). -The Buck2 project maintains and uses an open source [Starlark interpreter in Rust](https://github.com/facebookexperimental/starlark-rust). +#### Subtarget + +Collection of [providers](#provider) that can be accesed by name. The subtargets +can have their own subtargets as well, which can be accessed by chaining them, +e.g.: `buck2 build cell//foo:bar[baz][qux]`. #### Target -An object that is defined in a [BUCK file](#buck-file). Targets represent the buildable units of a build from the perspective of the end user. Declared by instantiating a [rule](#rule) with attributes. A target has [dependencies](#dependency), which are references to other targets. +An object that is defined in a [BUCK file](#buck-file). Targets represent the +buildable units of a build from the perspective of the end user. Declared by +instantiating a [rule](#rule) with attributes. A target has +[dependencies](#dependency), which are references to other targets. #### Target label -The identifier for a [target](#target). Structured as `cellAlias//path/to/package:target`, where `cellAlias//` maps to a [cell root](#cell) path (as defined in the [./buckconfig](#buckconfig) of the cell this target belongs to), `path/to/package` is the [package](#package) directory that contains the [BUCK file](#buck-file) declaring the target (relative to the mapped cell alias), and `:target` is the target's name. +The identifier for a [target](#target). Structured as +`cell_alias//path/to/package:target`, where `cell_alias//` maps to a +[cell root](#cell) path (as defined in the [./buckconfig](#buckconfig) of the +cell this target belongs to), `path/to/package` is the [package](#package) +directory that contains the [BUCK file](#buck-file) declaring the target +(relative to the mapped cell alias), and `:target` is the target's name. #### Target pattern -A string that resolves to a set of [targets](#target). They can be used as arguments to commands such as `buck2 build` and `buck2 uquery`. They can also be used in the [visibility](#visibility) argument of a [rule](#rule). For more information, see [Target pattern](./target_pattern.md). +A string that resolves to a set of [targets](#target). They can be used as +arguments to commands such as `buck2 build` and `buck2 uquery`. They can also be +used in the [visibility](#visibility) argument of a [rule](#rule). For more +information, see [Target pattern](./target_pattern.md). + +#### Target platform + +Represents the [platform](#platform) that the final output is built for residing +and executing. If buck2 is a chef, and the output is the meal, the target +platform would be the people that eat the meal. #### Target universe -A set of configured targets and their transitive deps. In the context of cquery and build in the Buck2 CLI, any literals are resolved to all matching targets within the universe. Target universe can be passed explicitly on the Buck2 CLI via `--target-universe`. If omitted, the target universe will be inferred by constructing a universe using all the target literals (and their transitive deps) within the query string for cquery. +A set of configured targets and their transitive deps. In the context of cquery +and build in the Buck2 CLI, any literals are resolved to all matching targets +within the universe. Target universe can be passed explicitly on the Buck2 CLI +via `--target-universe`. If omitted, the target universe will be inferred by +constructing a universe using all the target literals (and their transitive +deps) within the query string for cquery. #### Transition -Allows the [configuration](#configuration) to change across a [dependency](#dependency) edge. That is, normally, if [target](#target) A depends on target B, then if the configuration for A is X, then B is configured using X too. By using a transition, you can produce X to configure B instead. +Allows the [configuration](#configuration) to change across a +[dependency](#dependency) edge. That is, normally, if [target](#target) A +depends on target B, then if the configuration for A is X, then B is configured +using X too. By using a transition, you can produce X to configure B instead. #### Unconfigured graph -A graph of [targets](#target) before [configurations](#configuration) are applied. Can be queried via `buck2 uquery`. +A graph of [targets](#target) before [configurations](#configuration) are +applied. Can be queried via `buck2 uquery`. #### Visibility -Determines whether a [target](#target) can include another [target](#target) as its [dependency](#dependency). For more information, see [Visibility](./visibility.md). +Determines whether a [target](#target) can include another [target](#target) as +its [dependency](#dependency). For more information, see +[Visibility](./visibility.md). diff --git a/docs/concepts/key_concepts.md b/docs/concepts/key_concepts.md new file mode 100644 index 0000000000000..fd09c2fc73a52 --- /dev/null +++ b/docs/concepts/key_concepts.md @@ -0,0 +1,86 @@ +--- +id: key_concepts +title: Key Concepts +--- + +import useBaseUrl from '@docusaurus/useBaseUrl'; + +# Key concepts + +Buck2 has a number of fundamental concepts: + +- A [**_build rule_**](build_rule.md) describes how to produce an output file + from a set of input files. Most build rules are specific to a particular + language or platform. For example, you would use the + [`cxx_binary`](../../prelude/globals/#cxx_binary) rule to create a C++ binary, + but you would use the + [`android_binary`](../../prelude/globals/#android_binary) rule to create an + Android APK. +- A [**_build target_**](build_target.md) is a string that uniquely identifies a + build rule. It can be thought of as a URI for the build rule within the Buck2 + project. +- A [**_build file_**](build_rule.md) defines one or more build rules. In Buck2, + build files are typically named `BUCK`. A `BUCK` file is analogous to the + `Makefile` used by the Make utility. In your project, you will usually have a + separate `BUCK` file for each buildable unit of software—such as a binary or + library. For large projects, you could have hundreds of `BUCK` files. + +A Buck2 **_package_** comprises of: a Buck2 build file (a `BUCK` file), all +files—such as source files and headers—in the same directory as the `BUCK` file +or in subdirectories, provided those subdirectories do not themselves contain a +`BUCK` file. To say it another way, a `BUCK` file defines the root of a package, +but Buck2 packages might not include all their subdirectories because Buck2 +packages do not overlap or contain other Buck2 packages. For example, in the +following diagram, the BUCK file in directory `app-dir-1` defines that directory +as the root of a package—which is labeled **Package A** in the diagram. The +directory `app-dir-2` is part of Package A because it is a subdirectory of +`app-dir-1`, but does not itself contain a BUCK file. Now, consider directory +`app-dir-3`. Because `app-dir-3` contains a BUCK file it is the root of a new +package (**Package B**). Although `app-dir-3` is a subdirectory of `app-dir-1`, +it is _not_ part of Package A. Buck2 has the concept of a **_cell_**, which +defines a directory tree of one or more Buck2 packages. A Buck2 build could +involve multiple cells. Cells often correspond to repositories, but this isn't +required. The root of a Buck2 cell contains a global configuration file called +[**`.buckconfig`**](buckconfig.md). Note that although the cell root should +contain a `.buckconfig`, the presence of a `.buckconfig` file doesn't in itself +define a cell. Rather, _the cells involved in a build are defined at the time +Buck2 is invoked_; they are specified in the `.buckconfig` for the Buck2 +_project_ (see below). A Buck2 **_project_** is defined by the `.buckconfig` +where Buck2 is invoked, or if that directory doesn't contain a `.buckconfig`, +the project is defined by the `.buckconfig` in the nearest ancestor directory. +The `.buckconfig` for the project specifies the cells that constitute the Buck2 +project. Specifically, these cells are specified in the +[cells](buckconfig.md#cells) section of the `.buckconfig`. Note that the +directory tree rooted at this `.buckconfig` is automatically considered a cell +by Buck2; in other words, the project's `.buckconfig` doesn't need to specify +the project cell explicitly—although it is a good practice to do so. + +justifyContent + +### Buck2's dependency graph + +Every build rule can have zero or more dependencies. You can specify these +dependencies using, for example, the `deps` argument to the build rule. For more +information about specifying dependencies, consult the reference page for the +build rule you are using. These dependencies form a directed graph, called the +_target graph_. Buck2 requires the graph to be acyclic. When building the output +of a build rule, all of the rule's transitive dependencies are built first. This +means that the graph is built in a "bottom-up" fashion. A build rule knows only +which rules it depends on, not which rules depend on it. This makes the graph +easier to reason about and enables Buck2 to identify independent subgraphs that +can be built in parallel. It also enables Buck2 to determine the minimal set of +build targets that need to be rebuilt. + +### Multiple Buck2 projects in a single repository + +Buck2 is designed to build multiple deliverables from a single repository—that +is, a _monorepo_—rather than from multiple repositories. Support for the +monorepo design motivated Buck2's support for cells and projects. It is +Facebook's experience that maintaining all dependencies in the same repository +makes it easier to ensure that all developers have the correct version of the +code and simplifies the process of making atomic commits. + +### See also + +Take a look at the [Concept Map](concept_map.md) for a visualization of how +Buck2 concepts interact with each other. Also see the [Glossary](glossary.md). diff --git a/docs/concepts/target_pattern.md b/docs/concepts/target_pattern.md index af4bc6f444f4f..59fa79665b2cb 100644 --- a/docs/concepts/target_pattern.md +++ b/docs/concepts/target_pattern.md @@ -3,7 +3,11 @@ id: target_pattern title: Target Pattern --- -A *target pattern* is a string that resolves to a set of [targets](./glossary.md#target). A target pattern can be used as arguments to commands, such as `buck2 build` and `buck uquery`. You can also use build target patterns in the [visibility](./glossary.md#visibility)) argument of your build [rules](./glossary.md#rule). +A _target pattern_ is a string that resolves to a set of +[targets](./glossary.md#target). A target pattern can be used as arguments to +commands, such as `buck2 build` and `buck uquery`. You can also use build target +patterns in the [visibility](./glossary.md#visibility) argument of your build +[rules](./glossary.md#rule). The simplest build target pattern matches the build target of the same name: @@ -14,13 +18,16 @@ The simplest build target pattern matches the build target of the same name: //apps/myapp:app ``` -A build target pattern that ends with a colon matches all build targets in the build file at the preceding directory path. For example, suppose that the build file: +A build target pattern that ends with a colon matches all build targets in the +build file at the preceding directory path. For example, suppose that the build +file: ```sh apps/myapp/BUCK ``` -defines the rules: `app_v1` and `app_v2`, then the following build target pattern would match both of those rules: +defines the rules: `app_v1` and `app_v2`, then the following build target +pattern would match both of those rules: ```bash # @@ -29,7 +36,10 @@ defines the rules: `app_v1` and `app_v2`, then the following build target patter //apps/myapp: ``` -A build target pattern that ends with an ellipsis (`/...`) matches all build targets in the build file in the directory that precedes the ellipsis and also *all build targets in build files in subdirectories*. For example, suppose that you have the following build files: +A build target pattern that ends with an ellipsis (`/...`) matches all build +targets in the build file in the directory that precedes the ellipsis and also +_all build targets in build files in subdirectories_. For example, suppose that +you have the following build files: ```bash apps/BUCK @@ -45,7 +55,8 @@ then the following pattern would match all build targets in both of those files: //apps/... ``` -A target pattern that does not include a `:` separator matches the target with the same name as the last element of the path: +A target pattern that does not include a `:` separator matches the target with +the same name as the last element of the path: ```bash # @@ -65,10 +76,13 @@ myapp:myapp ### Build target patterns are not allowed in the deps argument -Build target patterns cannot be used with the `deps` argument of a build rule. Buck requires that you specify all dependencies explicitly as either fully-qualified or relative build targets. +Build target patterns cannot be used with the `deps` argument of a build rule. +Buck requires that you specify all dependencies explicitly as either +fully-qualified or relative build targets. ### Target aliases -Buck supports the ability to define *aliases* for build targets; using aliases can improve brevity when specifying targets on the Buck command line. +Buck supports the ability to define _aliases_ for build targets; using aliases +can improve brevity when specifying targets on the Buck command line. To see which aliases exist, use `buck2 audit config alias`. diff --git a/docs/concepts/visibility.md b/docs/concepts/visibility.md index 4a74ed3554380..70e3bcc284144 100644 --- a/docs/concepts/visibility.md +++ b/docs/concepts/visibility.md @@ -3,18 +3,31 @@ id: visibility title: Visibility --- -Visibility determines whether a [target](./glossary.md#target) can reference another target in its [attributes](./glossary.md#attribute). In a large project, you may want to prevent developers from 'reaching across' the project and pulling in additional code. Reducing the visibility of targets can help prevent that type of behavior. +Visibility determines whether a [target](./glossary.md#target) can reference +another target in its [attributes](./glossary.md#attribute). In a large project, +you may want to prevent developers from 'reaching across' the project and +pulling in additional code. Reducing the visibility of targets can help prevent +that type of behavior. -There are two types of visibility attributes available (each of which takes a list of [target patterns](./glossary.md#target-pattern)): +There are two types of visibility attributes available (each of which takes a +list of [target patterns](./glossary.md#target-pattern)): -* `visibility` - determines which other targets can depend on a target. -* `within_view` - determines which other targets a target can depend on. +- `visibility` - determines which other targets can depend on a target. +- `within_view` - determines which other targets a target can depend on. -Both attributes act as allowlists, with some exceptions. In general, if a target is not listed, there may be no dependency relationship. If the `within_view` list is empty or unset, however, its check is bypassed. Similarly, targets defined in the same [BUCK file](./glossary.md#buck-file) always act as if they were members of their siblings' `visibility` lists. +Both attributes act as allowlists, with some exceptions. In general, if a target +is not listed, there may be no dependency relationship. If the `within_view` +list is empty or unset, however, its check is bypassed. Similarly, targets +defined in the same [BUCK file](./glossary.md#buck-file) always act as if they +were members of their siblings' `visibility` lists. -There is also a special value for `visibility` attribute: `'PUBLIC'`, which makes a build rule visible to all targets. +There is also a special value for `visibility` attribute: `'PUBLIC'`, which +makes a build rule visible to all targets. -In case of logically-conflicting lists, `within_view` takes precedence over `visibility`. If `//foo:bar` defines `//hello:world` in its `visibility` list, but `//hello:world` does not define `//foo:bar` in its `within_view` list, then `//hello:world` may not depend on `//foo:bar`. +In case of logically-conflicting lists, `within_view` takes precedence over +`visibility`. If `//foo:bar` defines `//hello:world` in its `visibility` list, +but `//hello:world` does not define `//foo:bar` in its `within_view` list, then +`//hello:world` may not depend on `//foo:bar`. ## Examples @@ -28,7 +41,8 @@ prebuilt_jar( ) ``` -It is common to restrict the visibility of Android resources to the Java code that uses it: +It is common to restrict the visibility of Android resources to the Java code +that uses it: ```java android_resource( @@ -39,7 +53,8 @@ android_resource( ) ``` -Or it may be simpler to make it visible to the entire directory in case additional build rules are added to `java/com/example/ui/BUCK`: +Or it may be simpler to make it visible to the entire directory in case +additional build rules are added to `java/com/example/ui/BUCK`: ```java android_resource( @@ -50,7 +65,10 @@ android_resource( ) ``` -Also, it is common to limit code for testing to be visible only to tests. If you define all of your Java unit tests in a folder named `javatests/` in the root of your project, then you could define the following rule to ensure that only build rules under `javatests/` can depend on JUnit: +Also, it is common to limit code for testing to be visible only to tests. If you +define all of your Java unit tests in a folder named `javatests/` in the root of +your project, then you could define the following rule to ensure that only build +rules under `javatests/` can depend on JUnit: ```java prebuilt_jar( @@ -60,7 +78,8 @@ prebuilt_jar( ) ``` -Finally, restricting the view of a target can be useful for preventing dependency creep: +Finally, restricting the view of a target can be useful for preventing +dependency creep: ```java java_library( diff --git a/docs/developers/architecture/buck1_vs_buck2.md b/docs/developers/architecture/buck1_vs_buck2.md index d2250b7c1688b..0be074ccf367a 100644 --- a/docs/developers/architecture/buck1_vs_buck2.md +++ b/docs/developers/architecture/buck1_vs_buck2.md @@ -7,116 +7,169 @@ title: Buck1 vs Buck2 The following table provides an at-a-glance comparison of Buck1 and Buck2. -|Buck1 | Buck2 | -|:--|:--| -| Build files in Starlark | Build files in Starlark | -| Macros in Starlark | Macros in Starlark | -| Rules in Java | Rules in Starlark | -| Rules and Macros are logically similar | Rules and Macros are logically similar | -| Rules and Core are not well abstracted | Rules and Core are strongly separated | -| Core in Java | Core in Rust | -| Remote Execution (RE) not well supported | All rules support remote execution by default | -| Varying degrees of incrementality / parallelism | Unified incrementality / parallelism | +| Buck1 | Buck2 | +| :---------------------------------------------- | :-------------------------------------------- | +| Build files in Starlark | Build files in Starlark | +| Macros in Starlark | Macros in Starlark | +| Rules in Java | Rules in Starlark | +| Rules and Macros are logically similar | Rules and Macros are logically similar | +| Rules and Core are not well abstracted | Rules and Core are strongly separated | +| Core in Java | Core in Rust | +| Remote Execution (RE) not well supported | All rules support remote execution by default | +| Varying degrees of incrementality / parallelism | Unified incrementality / parallelism | ## Top-down vs Bottom-up - understanding the implications of the difference in execution models between Buck1 and Buck2 -It is often said that Buck1 does 'top down' and Buck2 does 'bottom up' building. This results in cases where some topics that seem conceptually trivial in Buck1 are hard problems in Buck2, or vice versa. +It is often said that Buck1 does 'top down' and Buck2 does 'bottom up' building. +This results in cases where some topics that seem conceptually trivial in Buck1 +are hard problems in Buck2, or vice versa. ### What are the differences? -**Scenario**: Imagine you are building A, which depends on both B and C, but where neither B nor C have any dependencies. +**Scenario**: Imagine you are building A, which depends on both B and C, but +where neither B nor C have any dependencies. -For the sake of simplicity, imagine B and C are C++ compilations (that produce object files), and A is a link (that consumes them and produces a shared library). +For the sake of simplicity, imagine B and C are C++ compilations (that produce +object files), and A is a link (that consumes them and produces a shared +library). #### Building A with Buck1 Following is an oversimplified view of what happens: -* Buck1 computes the 'rulekey' for B. - * This consists of mixing together the hashes of the C++ file being compiled, as well as all C++ compiler flags, and so on. -* Buck1 then does the same for C. -* Buck1 then computes the rulekey for A. - * This consist of mixing together the rulekeys of B and C, as well as linker flags used by A. for example. -* Buck1 then looks up the rulekey for A in the cache. - * If there's a hit, then Buck1 downloads the output and its job done. - * If there's a cache miss, continue. -* Buck1 then queries the cache for the rulekeys of B and C: - * If there's a hit, then the output is downloaded. - * If there's a miss, then Buck1 runs the commands needed to produce the object file that was missed. Regardless of whether those commands run locally or on RE, Buck1 downloads the output of B and C. -* Buck1 then runs the command for A to produce the shared library. - * At this point, Buck1 may actually do another cache lookup with a different rulekey, which is called an *input based rulekey*. This rulekey is derived from the inputs of the action that needs executing, which at this point of the build are known (since they were just built)! +- Buck1 computes the 'rulekey' for B. + - This consists of mixing together the hashes of the C++ file being compiled, + as well as all C++ compiler flags, and so on. +- Buck1 then does the same for C. +- Buck1 then computes the rulekey for A. + - This consist of mixing together the rulekeys of B and C, as well as linker + flags used by A. for example. +- Buck1 then looks up the rulekey for A in the cache. + - If there's a hit, then Buck1 downloads the output and its job done. + - If there's a cache miss, continue. +- Buck1 then queries the cache for the rulekeys of B and C: + - If there's a hit, then the output is downloaded. + - If there's a miss, then Buck1 runs the commands needed to produce the object + file that was missed. Regardless of whether those commands run locally or on + RE, Buck1 downloads the output of B and C. +- Buck1 then runs the command for A to produce the shared library. + - At this point, Buck1 may actually do another cache lookup with a different + rulekey, which is called an _input based rulekey_. This rulekey is derived + from the inputs of the action that needs executing, which at this point of + the build are known (since they were just built)! #### Building A with Buck2 In contrast, if you ask Buck2 to build A, here is what happens: -* Buck2 produce the action to compile B and computes the hash of the action. - * This is the 'action digest', which consists of mixing the hashes of all the inputs (such as the C++ file), as well as the command line (so, implicitly, the compiler flags). -* Buck2 queries the action cache for the action digest hash. - * If there's a hit, Buck2 obtains the hash of the resulting object file (that is, the output of B). - * If there's a miss, Buck2 runs the action on RE (or potentially locally) and obtains the hash of the object file. If the action runs remotely, Buck2 will not download the output. -* Buck2 does the same thing for C. -* Buck2 produces the action to link A. - * This consists of mixing together all the hashes of the input files (which were retrieved earlier) and the command line to produce an action digest, then querying the cache and potentially running the action. -* Once Buck2 produces A (again, on RE), then, since this output was requested by the user (unlike the intermediary outputs B and C), Buck2 downloads A. +- Buck2 produce the action to compile B and computes the hash of the action. + - This is the 'action digest', which consists of mixing the hashes of all the + inputs (such as the C++ file), as well as the command line (so, implicitly, + the compiler flags). +- Buck2 queries the action cache for the action digest hash. + - If there's a hit, Buck2 obtains the hash of the resulting object file (that + is, the output of B). + - If there's a miss, Buck2 runs the action on RE (or potentially locally) and + obtains the hash of the object file. If the action runs remotely, Buck2 will + not download the output. +- Buck2 does the same thing for C. +- Buck2 produces the action to link A. + - This consists of mixing together all the hashes of the input files (which + were retrieved earlier) and the command line to produce an action digest, + then querying the cache and potentially running the action. +- Once Buck2 produces A (again, on RE), then, since this output was requested by + the user (unlike the intermediary outputs B and C), Buck2 downloads A. ### Some implications #### Rulekeys vs Action digests -The closest thing to Buck1’s rulekey in Buck2 is the action digest, but they are very different! +The closest thing to Buck1’s rulekey in Buck2 is the action digest, but they are +very different! -Since it’s a product of the (transitive) inputs of an action, the (default) rulekey can be computed without running anything or querying any caches. However, the action digest cannot: it requires the actual inputs of an action, which means you need to build all the dependencies first. +Since it’s a product of the (transitive) inputs of an action, the (default) +rulekey can be computed without running anything or querying any caches. +However, the action digest cannot: it requires the actual inputs of an action, +which means you need to build all the dependencies first. This means that: -* In Buck1, you can ask for rulekeys for a target. -* In Buck2, you’d have to run the build first then ask for the action digests (this is what the `buck2 log what-ran` would show you). +- In Buck1, you can ask for rulekeys for a target. +- In Buck2, you’d have to run the build first then ask for the action digests + (this is what the `buck2 log what-ran` would show you). #### Buck2 queries many more caches -* Buck1 will not descend further down a tree of dependency when it gets a cache hit. -* Buck2 will always walk up all your dependencies, regardless of whether you get cache hits or not. +- Buck1 will not descend further down a tree of dependency when it gets a cache + hit. +- Buck2 will always walk up all your dependencies, regardless of whether you get + cache hits or not. #### Materialization -* When Buck1 gets a cache miss, it downloads the outputs. -* Buck2, by contract, does not download outputs as part of a build (this is called 'deferred materialization'). - * Note that Buck2 does download the outputs if the user asked for them (that is, they were the targets the user put on the command line). +- When Buck1 gets a cache miss, it downloads the outputs. +- Buck2, by contract, does not download outputs as part of a build (this is + called 'deferred materialization'). + - Note that Buck2 does download the outputs if the user asked for them (that + is, they were the targets the user put on the command line). ### Second-order implications #### Non-determinism -Non-determinism in a build affects Buck2 and Buck1 differently. One scenario that often works fine in Buck1 but can work catastrophically bad in Buck2 is a codegen step, driven by a Python binary. +Non-determinism in a build affects Buck2 and Buck1 differently. One scenario +that often works fine in Buck1 but can work catastrophically bad in Buck2 is a +codegen step, driven by a Python binary. -In certain configurations/modes, Python binaries are non-deterministic, because they are (XARs)[https://engineering.fb.com/2018/07/13/data-infrastructure/xars-a-more-efficient-open-source-system-for-self-contained-executables/] (eXecutable ARchives) and that is always non-deterministic, which is bad! +In certain configurations/modes, Python binaries are non-deterministic, because +they are XARs +([https://engineering.fb.com/2018/07/13/data-infrastructure/xars-a-more-efficient-open-source-system-for-self-contained-executables/](eXecutable +ARchives)) and that is always non-deterministic, which is bad! -* In Buck1, that doesn’t really matter, because you can get a cache hit on the codegen output without ever visiting the XAR (as long as the input files haven’t changed). -* In Buck2, you need the XAR to check the action cache for the codegen step. - * However, binaries are often not cached in certain configurations/modes, so your XAR isn’t cached. - * Therefore, since your XAR build is non-deterministic, you’ll always miss in the action cache and the codegen step will always have to run in every build. +- In Buck1, that doesn’t really matter, because you can get a cache hit on the + codegen output without ever visiting the XAR (as long as the input files + haven’t changed). +- In Buck2, you need the XAR to check the action cache for the codegen step. + - However, binaries are often not cached in certain configurations/modes, so + your XAR isn’t cached. + - Therefore, since your XAR build is non-deterministic, you’ll always miss in + the action cache and the codegen step will always have to run in every + build. -It can get worse! If the Python binary produces non-deterministic codegen, then the entire build might become uncacheable. +It can get worse! If the Python binary produces non-deterministic codegen, then +the entire build might become uncacheable. #### Cache misses don’t necessarily propagate -Say that, in Buck2, you’re trying to build a chain of actions like codegen -> compile -> link. +Say that, in Buck2, you’re trying to build a chain of actions like codegen -> +compile -> link. -Even if your codegen step isn’t cached (say, because its action inputs are non-deterministic as mentioned above), as long as the codegen output is deterministic, you can still get cache hits from compile and link steps. +Even if your codegen step isn’t cached (say, because its action inputs are +non-deterministic as mentioned above), as long as the codegen output is +deterministic, you can still get cache hits from compile and link steps. #### Hybrid execution -If you squint, you’ll note that Buck1’s build could be viewed as 'local first', whereas Buck2’s would be better viewed as 'remote first': +If you squint, you’ll note that Buck1’s build could be viewed as 'local first', +whereas Buck2’s would be better viewed as 'remote first': -* When Buck1 builds something remotely or gets a cache hit, the outputs are always downloaded. -* When Buck2 builds something remotely or gets a cache hit, the outputs are never downloaded. +- When Buck1 builds something remotely or gets a cache hit, the outputs are + always downloaded. +- When Buck2 builds something remotely or gets a cache hit, the outputs are + never downloaded. In turn, this has some important implications: -* When Buck1 builds something locally, the inputs are always already present. -* When Buck2 builds something locally, the inputs have to be downloaded, unless they were built locally (which if you’re doing any RE, is usually not the case), or if another command caused them to be downloaded. +- When Buck1 builds something locally, the inputs are always already present. +- When Buck2 builds something locally, the inputs have to be downloaded, unless + they were built locally (which if you’re doing any RE, is usually not the + case), or if another command caused them to be downloaded. -This means that, in Buck1, running something locally when you have spare resources is usually a no-brainer, because it’s always ready to go, and you’ll save on not having to download the output from RE (though you might have to upload the output if you need to run actions depending on it later). +This means that, in Buck1, running something locally when you have spare +resources is usually a no-brainer, because it’s always ready to go, and you’ll +save on not having to download the output from RE (though you might have to +upload the output if you need to run actions depending on it later). -On the flip side, with Buck2, that’s not necessarily the case. To run an action locally, you need to download inputs that you might otherwise not have needed, which will tax your network connection. +On the flip side, with Buck2, that’s not necessarily the case. To run an action +locally, you need to download inputs that you might otherwise not have needed, +which will tax your network connection. diff --git a/docs/developers/architecture/buck2.md b/docs/developers/architecture/buck2.md index 7b1b5e9b1e2b9..90438a76590ef 100644 --- a/docs/developers/architecture/buck2.md +++ b/docs/developers/architecture/buck2.md @@ -7,94 +7,184 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; ## High-level Overview -Buck2 is a build system whose core is written in Rust. Starlark, which is a deterministic, immutable version of Python, is used to extend the Buck2 build system, enabling Buck2 to be language-agnostic. - -The high-level flow starts with a user creating a build file (a `BUCK` file) containing one or more targets, which is specified by the target label, its inputs (sources, attributes, configurations, and dependencies), and the type of macro or rule to use. - -Briefly, a macro is a wrapper around a rule, which runs necessary commands to generate what’s needed for a target (for example, for a `cxx_binary` target, generate the header map and run necessary `clang` commands). Macros can be used to reduce boilerplate code for users (such as to supply the same set of attributes for a rule for all targets). Macros and rules are both written in Starlark and are specified by input sources, attributes, and the implementation function. - -If the target type is a macro, then the macro will fill in some details (for example, for a `cxx_binary` target, these are the compilation, debug flags to use, this is the `clang` to use). If the target type is a rule, then the macro layer is skipped altogether. - -This is all orchestrated by the core, which performs operations such as executing Buck2 CLI args, generating/updating the dependency graph (which contains the configured target nodes, unconfigured target nodes, action nodes, among other types of nodes that all allow for incrementality and execution), and materializing the artifacts. The core is written in Rust. +Buck2 is a build system whose core is written in Rust. Starlark, which is a +deterministic, immutable version of Python, is used to extend the Buck2 build +system, enabling Buck2 to be language-agnostic. + +The high-level flow starts with a user creating a build file (a `BUCK` file) +containing one or more targets, which is specified by the target label, its +inputs (sources, attributes, configurations, and dependencies), and the type of +macro or rule to use. + +Briefly, a macro is a wrapper around a rule, which runs necessary commands to +generate what’s needed for a target (for example, for a `cxx_binary` target, +generate the header map and run necessary `clang` commands). Macros can be used +to reduce boilerplate code for users (such as to supply the same set of +attributes for a rule for all targets). Macros and rules are both written in +Starlark and are specified by input sources, attributes, and the implementation +function. + +If the target type is a macro, then the macro will fill in some details (for +example, for a `cxx_binary` target, these are the compilation, debug flags to +use, this is the `clang` to use). If the target type is a rule, then the macro +layer is skipped altogether. + +This is all orchestrated by the core, which performs operations such as +executing Buck2 CLI args, generating/updating the dependency graph (which +contains the configured target nodes, unconfigured target nodes, action nodes, +among other types of nodes that all allow for incrementality and execution), and +materializing the artifacts. The core is written in Rust. The following diagram shows the high-level overview. justifyContent -The Buck2 CLI runs in a client process, which sends commands to the Buck2 daemon via gRPC. The daemon goes through several phases after receiving a request from the client: **evaluation, configuration, analysis, execution, and materialization** (see [Execution Model](#execution-model), below). When using `buck2 test`, there is a final stage for **testing**. Note that these are the phases that a build goes through, but they are not always sequential. +The Buck2 CLI runs in a client process, which sends commands to the Buck2 daemon +via gRPC. The daemon goes through several phases after receiving a request from +the client: **evaluation, configuration, analysis, execution, and +materialization** (see [Execution Model](#execution-model), below). When using +`buck2 test`, there is a final stage for **testing**. Note that these are the +phases that a build goes through, but they are not always sequential. -After finishing all phases, the daemon will send the response back to the client via gRPC. +After finishing all phases, the daemon will send the response back to the client +via gRPC. ## Execution Model -The following diagram shows the Execution Model, which consists of 5 phases and states. +The following diagram shows the Execution Model, which consists of 5 phases and +states. justifyContent -Each of the phases and states shown in the Execution Model, are detailed in the following sub-sections. +Each of the phases and states shown in the Execution Model, are detailed in the +following sub-sections. ### State 0 - Build Files -Build files (commonly referred to as `BUCK` files, their default name) are the main input to Buck2 and are syntactically Python. +Build files (commonly referred to as `BUCK` files, their default name) are the +main input to Buck2 and are syntactically Python. -Each build file is uniquely identified by the directory in which it's located. Since all build files have the same name, there cannot be two build files in the same directory. This is usually represented as the relative path from the root of the project (the directory where the .buckconfig file is). +Each build file is uniquely identified by the directory in which it's located. +Since all build files have the same name, there cannot be two build files in the +same directory. This is usually represented as the relative path from the root +of the project (the directory where the .buckconfig file is). -Each build file has a set of targets. These describe the things the user wants Buck2 to know about. Each target has a type and a set of named attributes, including at least a name (also known as the label) identifying it. Additional attributes depend on the type of the target. +Each build file has a set of targets. These describe the things the user wants +Buck2 to know about. Each target has a type and a set of named attributes, +including at least a name (also known as the label) identifying it. Additional +attributes depend on the type of the target. ### Phase A: Evaluation -First, Buck2 evaluates a build file, and then constructs an unconfigured target graph. +First, Buck2 evaluates a build file, and then constructs an unconfigured target +graph. -Buck2 performs directory listings to discover packages, then evaluates the build files that were found, expands any macros detected into their underlying rules, and then will take rule attributes and convert them from Starlark to Rust types to construct a target node, and insert it into the unconfigured target graph, which is a smaller portion of Buck2’s larger dependency graph. The target node consists of a reference to rule implementation, and the set of attributes and sources. +Buck2 performs directory listings to discover packages, then evaluates the build +files that were found, expands any macros detected into their underlying rules, +and then will take rule attributes and convert them from Starlark to Rust types +to construct a target node, and insert it into the unconfigured target graph, +which is a smaller portion of Buck2’s larger dependency graph. The target node +consists of a reference to rule implementation, and the set of attributes and +sources. -The result of evaluation is a list of targets read from the build file mapped to a target node in Buck2 unconfigured target graph. +The result of evaluation is a list of targets read from the build file mapped to +a target node in Buck2 unconfigured target graph. ### State 1 - Unconfigured Target Graph is generated -At this point, the unconfigured target graph is available for the next stage of transformation, which is to configure the target nodes within the graph. +At this point, the unconfigured target graph is available for the next stage of +transformation, which is to configure the target nodes within the graph. ### Phase B: Configuration -At the end of evaluation, the target nodes are not yet configured. Configuration means applying a list of constraints (such as resolving selects to specify the right CPU) to make sure the target can be run where it needs to. This is also known as target platform resolution, and can be configured within the target, the buckconfig, propagated from dependencies, or passed into the CLI. After applying configurations, the target nodes are transformed into configured target nodes within the Buck2 configured target graph, which is a smaller portion of Buck2’s larger dependency graph. +At the end of evaluation, the target nodes are not yet configured. Configuration +means applying a list of constraints (such as resolving selects to specify the +right CPU) to make sure the target can be run where it needs to. This is also +known as target platform resolution, and can be configured within the target, +the buckconfig, propagated from dependencies, or passed into the CLI. After +applying configurations, the target nodes are transformed into configured target +nodes within the Buck2 configured target graph, which is a smaller portion of +Buck2’s larger dependency graph. ### State 2 - Configured Target Graph is generated -At this point, the configured target graph is available for the analysis stage to generate the action graph. +At this point, the configured target graph is available for the analysis stage +to generate the action graph. ### Phase C: Analysis -In the analysis phase, Buck2 constructs a context object (ctx) which contains relevant information (such as attributes pulled from the configuration stage), all converted into Starlark types and made available to the rule. For example, the target’s dependencies are turned into a `ProviderCollection`, source files are converted into `StarlarkArtifacts`, and String attributes are turned into a `StarlarkString`. This ctx object is backed by Buck2’s dependency graph for computation and rules use it to tell Buck2 to run actions, create dynamic actions, or create new files. - -The rule will return a list of providers, which is data that the rule wants to expose to its dependents (that is, can flow through the dependency graph), such as output artifact information (such as file paths and file hashes). Providers could be actions, source files, or attributes. Within the returned list, DefaultInfo always needs to be returned, which indicates what the default outputs are. Some other common built-in providers include RunInfo, TestInfo, and InstallInfo. - -The end result is a list of providers and actions (inserted into the action graph) that Buck2 needs to execute to produce the desired outputs, known as 'bound artifacts'. +In the analysis phase, Buck2 constructs a context object (ctx) which contains +relevant information (such as attributes pulled from the configuration stage), +all converted into Starlark types and made available to the rule. For example, +the target’s dependencies are turned into a `ProviderCollection`, source files +are converted into `StarlarkArtifacts`, and String attributes are turned into a +`StarlarkString`. This ctx object is backed by Buck2’s dependency graph for +computation and rules use it to tell Buck2 to run actions, create dynamic +actions, or create new files. + +The rule will return a list of providers, which is data that the rule wants to +expose to its dependents (that is, can flow through the dependency graph), such +as output artifact information (such as file paths and file hashes). Providers +could be actions, source files, or attributes. Within the returned list, +DefaultInfo always needs to be returned, which indicates what the default +outputs are. Some other common built-in providers include RunInfo, TestInfo, and +InstallInfo. + +The end result is a list of providers and actions (inserted into the action +graph) that Buck2 needs to execute to produce the desired outputs, known as +'bound artifacts'. ### State 3 - Action Graph and Providers are generated -At this point, the action graph and providers are available to be processed by the execution stage. +At this point, the action graph and providers are available to be processed by +the execution stage. ### Phase D: Execute -Execution is where Buck2 takes all the providers (input files from the targets, args from the command line), runs the actions, and then outputs the computed results. The critical path is the theoretical lower bound for the duration of a build, which are the slowest set of actions. +Execution is where Buck2 takes all the providers (input files from the targets, +args from the command line), runs the actions, and then outputs the computed +results. The critical path is the theoretical lower bound for the duration of a +build, which are the slowest set of actions. Buck2 can be run locally or on remote execution, or in a hybrid manner. -For each action, an input action digest is created from the action (hash of command line and all of the action’s inputs), uploaded, and cached within RE. This is known as the **RE action cache**. - -If there is a cache hit, then Buck2 does not need to run the command for the action, and RE returns the output action digest. This is known as **remote execution**. - -If there is not a cache hit, then local execution has to be done, where all the action’s input files are retrieved from the filesystem (most likely from EdenFS), computation is run on these source files, and then outputted to buck-out using I/O operations in the filesystem. - -Hybrid execution allows Buck2 to race local and remote execution and return the returns of whichever finishes first for a performance speedup. - -These action digests are how Buck2 communicates with RE. The action outputs, including final/build artifacts, intermediaries, file, directories, and symlinks related to the build, are then materialized (downloaded to disk), and can be found in the buck-out path. There are different configurations that a user can set to control how materialization is handled. +For each action, a digest is created which is a hash of an action's command and +all its inputs. Buck2 then checks if there is a result cached within RE for an +action with a given digest. + +If there is a cache hit, Buck2 does not need to run the command for the action. +Instead, the RE returns the output action digest. This digest can be used to +download the actual output artifacts at a later time. This is known as the **RE +action cache**. + +If there is a cache miss, the action needs to be run either remotely or locally. +If Buck2 decides to run the action remotely, it will first upload all of the +action's inputs that are missing from the RE's content addressable storage. If +Buck2 decides to run the action locally, it will first download and materialize +in `buck-out` all of the action's inputs. These inputs might be outputs of other +actions and are stored in RE's content addressable storage but are missing on +the local machine.Only after those steps will Buck2 schedule the action for +actual execution. + +Buck2 can also decide to run local and remote execution simultaneously (a +process known as racing), and use the result of whichever action finishes first +to speed up performance. This strategy is known as **hybrid execution**." + +Materialization of action outputs (which involves downloading and placing them +in the correct location in `buck-out`) can be done immediately after the action +has finished executing. Alternatively, it can be deferred until it is actually +needed for the local execution of another action. There are various +configurations that a user can set to control how this materialization is +handled. ### State 4 - Build outputs are generated At this point, the build is complete. -If a user ran `buck2 test`, then there is a final transformation for Buck2 to construct a command for TPX to execute the actual test. +If a user ran `buck2 test`, then there is a final transformation for Buck2 to +construct a command for TPX to execute the actual test. ### Phase E: Execute tests -For more detail on testing, review [Test Execution](/docs/rule_authors/test_execution). +For more detail on testing, review +[Test Execution](/docs/rule_authors/test_execution). diff --git a/docs/developers/architecture/buck2_telemetry.md b/docs/developers/architecture/buck2_telemetry.md index fcc429d5fd460..9cb33beaff202 100644 --- a/docs/developers/architecture/buck2_telemetry.md +++ b/docs/developers/architecture/buck2_telemetry.md @@ -3,6 +3,9 @@ id: buck2_telemetry title: Buck2 Telemetry --- + :::note 🚧   THIS PAGE IS UNDER CONSTRUCTION + + ::: diff --git a/docs/developers/bxl.md b/docs/developers/bxl.md index d2a15fde125ab..0b21f9daa2a43 100644 --- a/docs/developers/bxl.md +++ b/docs/developers/bxl.md @@ -5,71 +5,113 @@ title: Why BXL ## Buck2 Extension Language (BXL) -BXL is a Starlark-based script that enables integrators to inspect and interact with the Buck2 graph. +BXL is a Starlark-based script that enables integrators to inspect and interact +with the Buck2 graph. Integrators are able to: -* Write Starlark code that queries, analyzes, and builds on the Buck2 graph. -* Introspect and interact with the Buck2 graph structures natively, via Starlark, in a safe, controlled manner. +- Write Starlark code that queries, analyzes, and builds on the Buck2 graph. +- Introspect and interact with the Buck2 graph structures natively, via + Starlark, in a safe, controlled manner. -Introspection of the Buck2 graph can occur at the unconfigured, configured, providers, and action stages. There are also APIs offered to allow BXL to accept custom command line argument, output artifacts, and print results to stdout. +Introspection of the Buck2 graph can occur at the unconfigured, configured, +providers, and action stages. There are also APIs offered to allow BXL to accept +custom command line argument, output artifacts, and print results to stdout. -BXL leverages Buck2 core's incremental [caching](bxl_faq.md#when-is-my-bxl-script-cached). It also has support for [running actions](bxl_common_how_tos.md#running-actions), [dynamic outputs](bxl_dynamic_output.md), and [anonymous targets](bxl_anon_target.md). In addition, BXL has [profiling](bxl_common_how_tos.md#profiling-testing-and-debugging-a-bxl-script) capabilities, and allows users to add their own [telemetry](bxl_telemetry.md) directly within the BXL scripts. +BXL leverages Buck2 core's incremental +[caching](bxl_faq.md#when-is-my-bxl-script-cached). It also has support for +[running actions](bxl_common_how_tos.md#running-actions), +[dynamic outputs](bxl_dynamic_output.md), and +[anonymous targets](bxl_anon_target.md). In addition, BXL has +[profiling](bxl_common_how_tos.md#profiling-testing-and-debugging-a-bxl-script) +capabilities, and allows users to add their own [telemetry](bxl_telemetry.md) +directly within the BXL scripts. -BXL is considered to be mostly stable, with a bit more active development here and there. +BXL is considered to be mostly stable, with a bit more active development here +and there. ## When should I use BXL over Buck2 API/CLI? -There are many overlaps between BXL and Buck2 (for example, both can run cquery and both can build targets). It’s possible that one use case could be handled by both BXL and Buck2. - -Following are some specific recommendations to help decide when to use BXL over regular Buck2: - -* **Use/inspect resolved attributes that are not exposed/accessible to users via normal Buck2 operations.** - * This includes introspecting the Starlark object of providers, analyzing the Starlark object of a rule’s attr before and after coercing and resolution, and introspecting intermediate query results. -* **Reduce/eliminate the need to make several Buck2 calls within your program, such as running several subprocesses to call `cquery` several times.** - * With BXL, you can just call the BXL script once in a subprocess, potentially reducing the amount of code you need to write in your program. For example, if you need to call cquery and build several times, you can put that all within a single BXL script and run `buck2 bxl` once, rather than running `buck2 cquery` and `buck2 build` several times. -* **Reduce/eliminate the need to manually parse Buck2 output format within your program, and any bugs that may come with manual parsing**. - * Some languages are more verbose than others when it comes to string parsing. - * BXL scripts are written in Starlark, which is basically a deterministic, immutable Python, and are able to directly introspect Starlark objects (such as rules and target nodes, and so on) and call methods on these objects instead of parsing them over Buck2’s output. +There are many overlaps between BXL and Buck2 (for example, both can run cquery +and both can build targets). It’s possible that one use case could be handled by +both BXL and Buck2. + +Following are some specific recommendations to help decide when to use BXL over +regular Buck2: + +- **Use/inspect resolved attributes that are not exposed/accessible to users via + normal Buck2 operations.** + - This includes introspecting the Starlark object of providers, analyzing the + Starlark object of a rule’s attr before and after coercing and resolution, + and introspecting intermediate query results. +- **Reduce/eliminate the need to make several Buck2 calls within your program, + such as running several subprocesses to call `cquery` several times.** + - With BXL, you can just call the BXL script once in a subprocess, potentially + reducing the amount of code you need to write in your program. For example, + if you need to call cquery and build several times, you can put that all + within a single BXL script and run `buck2 bxl` once, rather than running + `buck2 cquery` and `buck2 build` several times. +- **Reduce/eliminate the need to manually parse Buck2 output format within your + program, and any bugs that may come with manual parsing**. + - Some languages are more verbose than others when it comes to string parsing. + - BXL scripts are written in Starlark, which is basically a deterministic, + immutable Python. BXL is able to directly introspect Starlark objects (such + as rules and target nodes, and so on) and call methods on these objects + instead of parsing them over Buck2’s output. ## Example Use Cases ### Generate a project for IDE IDE project generation is roughly as follows: -* Form the target graph for the project target -* Perform some filtering on the graph targets if needed. This depends on the target's configuration. -* For each target, generate the project target metadata, including: - * compiler flags - * linker flags - * paths to generated files - * inputs and outputs for each targets - * the paths relative to some `PATH` -* Write a single file translating this metadata into a format understood by the IDE + +- Form the target graph for the project target +- Perform some filtering on the graph targets if needed. This depends on the + target's configuration. +- For each target, generate the project target metadata, including: + - compiler flags + - linker flags + - paths to generated files + - inputs and outputs for each targets + - the paths relative to some `PATH` +- Write a single file translating this metadata into a format understood by the + IDE An example BXL flow for generating a project for IDE might be: -* Add some command line arguments to accept a target (or subtarget) to generate the project -* Run analysis on the project target with a specific configuration to filter the graph targets -* For each resulting target, inspect the providers and attributes to extract the required metadata information. BXL has filesystem operations handle paths within the project -* Run actions based on the linker/compiler flags, and build artifacts as needed to generate a project -* Write a single file containing the metadata obtained from previous steps +- Add some command line arguments to accept a target (or subtarget) to generate + the project +- Run analysis on the project target with a specific configuration to filter the + graph targets +- For each resulting target, inspect the providers and attributes to extract the + required metadata information. BXL uses filesystem operations to handle paths + within the project +- Run actions based on the linker/compiler flags, and build artifacts as needed + to generate a project +- Write a single file containing the metadata obtained from previous steps ### Build an LSP -A compilation database is a database containing information about which compile options are used to build the files in a project. Language Server Protocols (LSPs) uses the compilation database to provide language features like auto complete, go to definition, and find all references for the user within an IDE/editor. +A compilation database is a database containing information about which compile +options are used to build the files in a project. Language Server Protocols +(LSPs) uses the compilation database to provide language features like auto +complete, go to definition, and find all references for the user within an +IDE/editor. An example BXL flow for building a C++ LSP might be: -* Add a command line argument to accept a file -* Run owners cquery in BXL to get the owning target of the file -* Run analysis on the owning target to get the desired clang flags -* Use BXL to write the clang flags to the disk in compilation database format +- Add a command line argument to accept a file +- Run owners cquery in BXL to get the owning target of the file +- Run analysis on the owning target to get the desired clang flags +- Use BXL to write the clang flags to the disk in compilation database format ### Perform graph analysis Some example graph analysis functionalities might be: -* Run an analysis in BXL on a set of targets, and then inspect their providers, and build some subtargets -* Run a uquery on some set of targets, and inspect the resulting nodes' coerced attributes -* Run a cquery on some set of targets with a specific configuration, and inspect the resulting nodes' attributes before and after resolution +- Run an analysis in BXL on a set of targets, and then inspect their providers, + and build some subtargets +- Run a uquery on some set of targets, and inspect the resulting nodes' coerced + attributes +- Run a cquery on some set of targets with a specific configuration, and inspect + the resulting nodes' attributes before and after resolution diff --git a/docs/developers/bxl_anon_target.md b/docs/developers/bxl_anon_target.md index 90b641291094b..4e0757099abc0 100644 --- a/docs/developers/bxl_anon_target.md +++ b/docs/developers/bxl_anon_target.md @@ -5,30 +5,49 @@ title: BXL and Anonymous Targets ## Anonymous targets -[Anonymous targets](../rule_authors/anon_targets.md) are supported in BXL. Anonymous targets are keyed by the attributes, and allow you to share/cache work more effectively. +[Anonymous targets](../rule_authors/anon_targets.md) are supported in BXL. +Anonymous targets are keyed by the attributes, and allow you to share/cache work +more effectively. -You might want to use anonymous targets if there is some heavy Starlark evaluation which can be cached, or if you want to cache local actions. +You might want to use anonymous targets if there is some heavy Starlark +evaluation which can be cached, or if you want to cache local actions. -**Note**: The context object within the anon target rule is **not** a BXL context, but a normal rule analysis context. +**Note**: The context object within the anon target rule is **not** a BXL +context, but a normal rule analysis context. ### APIs -The `actions` object returned from `ctx.bxl_actions().actions` (equivalent of `ctx.actions` in normal rules) has the following functions for anonymous targets: - -* `anon_target(rule: "rule", attrs: Dict[str, Any]) -> "promise"`: generates a single anonymous target. Return type is an unresolved `promise`. -* `anon_targets(rules: [("rule", Dict[str, Any])]) -> "promise"`: generates a list of anonymous targets. Return type is an unresolved `promise` representing the list of anonymous targets. -* `artifact_promise(promise: "promise") -> "promise_artifact"`: turns an unresolved promise into a kind of artifact. See [Convert promise to artifact](../rule_authors/anon_targets.md#convert-promise-to-artifact) for more info on why you might want to use this. - -The resulting promise also has `map()` and `join()` functions. `map()` applies a function to the promise's results, and `join()` turns multiple promises into a single promise. - -To resolve promises in BXL, `bxl_ctx` has a `resolve()` function, which takes in the analysis actions instance (`actions` object returned from `ctx.bxl_actions().actions`) and a single promise and returns an optional promise value, if there is one. If you intend to create multiple promises, using `join()` to produce a single promise will allow you to resolve them concurently with a single `resolve()` call. +The `actions` object returned from `ctx.bxl_actions().actions` (equivalent of +`ctx.actions` in normal rules) has the following functions for anonymous +targets: + +- `anon_target(rule: "rule", attrs: Dict[str, Any]) -> "promise"`: generates a + single anonymous target. Return type is an unresolved `promise`. +- `anon_targets(rules: [("rule", Dict[str, Any])]) -> "promise"`: generates a + list of anonymous targets. Return type is an unresolved `promise` representing + the list of anonymous targets. +- `artifact_promise(promise: "promise") -> "promise_artifact"`: turns an + unresolved promise into a kind of artifact. See + [Convert promise to artifact](../rule_authors/anon_targets.md#convert-promise-to-artifact) + for more info on why you might want to use this. + +The resulting promise also has `map()` and `join()` functions. `map()` applies a +function to the promise's results, and `join()` turns multiple promises into a +single promise. + +To resolve promises in BXL, `bxl_ctx` has a `resolve()` function, which takes in +the analysis actions instance (`actions` object returned from +`ctx.bxl_actions().actions`) and a single promise and returns an optional +promise value, if there is one. If you intend to create multiple promises, using +`join()` to produce a single promise will allow you to resolve them concurently +with a single `resolve()` call. Small example: ```python def _my_impl(ctx): bxl_actions = ctx.bxl_actions() # pass in relevant params to configure the execution platform resolution - actions = ctx.bxl_actions().actions + actions = bxl_actions.actions promise1 = actions.anon_target(my_anon_rule1, my_attrs1).promise promise2 = actions.anon_target(my_anon_rule2, my_attrs2).promise.map(my_map_function) diff --git a/docs/developers/bxl_basics.md b/docs/developers/bxl_basics.md new file mode 100644 index 0000000000000..17939749b850e --- /dev/null +++ b/docs/developers/bxl_basics.md @@ -0,0 +1,108 @@ +--- +id: bxl_basics +title: BXL Basics +--- + +This page is a primer on common BXL functionalities and data types. Ramping up +in BXL may be challenging without much prior knowledge of Buck2 building blocks +(ex: targets, configurations, queries), so please take a look at the +[Concepts](../concepts/concept_map.md) documentation before reading on. + +## Common BXL functionalities + +### Build + +You can build targets within BXL with +[`ctx.build()`](../../api/bxl/Context/#contextbuild). The result is a +[`bxl.BuildResult`](../../api/bxl/BuildResult), which has `artifacts()` and +`failures()` functions that provide iterators to the artifacts or failures, +respectively. You can pass in a single target or target pattern to build. + +### Analysis + +You can run analysis on targets within BXL via +[`ctx.analysis()`](../../api/bxl/Context/#contextanalysis). Analysis means to +evaluate the underlying rule implementation for the inputted targets, and +produce the providers that the rule defined for the target. A common workflow is +to inspect the resulting providers, and perhaps ensure parts of these providers +or run actions using information from the providers (see [Actions](#actions) +below). + +### Query + +Buck2 supports a couple different query types: querying the unconfigured graph +(`buck2 uquery`), the configured graph (`buck2 cquery`), or the action graph +(`buck2 aquery`). These queries are all available in BXL as well: + +- `ctx.uquery()` returns a [`bxl.UqueryContext`](../../api/bxl/UqueryContext) +- `ctx.cquery()` returns a [`bxl.CqueryContext`](../../api/bxl/CqueryContext) +- `ctx.aquery()` returns a [`bxl.AqueryContext`](../../api/bxl/AqueryContext) + +You can read more about the individual queries in the API docs. There are many +queries that are common between uquery, cquery, and aquery, but cquery and +aquery will have extra queries unique to the configured graph or the action +graph. One more thing to call out is the `eval()` query, which is a special +query that takes in the entire query as a string literal. A common use for +`eval()` is to migrate a complex query from Buck2 CLI to BXL by dropping the +entire query string directly into `eval()`. + +The query results are target sets (iterable container) of +[`bxl.UnconfiguredTargetNode`s](../../api/bxl/UnconfiguredTargetNode) for +uquery, [`bxl.ConfiguredTargetNode`s](../../api/bxl/ConfiguredTargetNode) for +cquery, and [`bxl.ActionQueryNode`s](../../api/bxl/ActionQueryNode) for aquery. +Each of these node types have accessors on their attributes. A common workflow +is to run some query in BXL, and iterate through the resulting nodes to inspect +their attributes, and use those attributes to inform further computations in +BXL. + +#### Uquery + +Querying the unconfigured graph means that no configurations (such as platforms +and transitions) have been applied to the target graph yet. This means that it's +very possible that some parts of the target graph is broken due to lack of +configurations. Generally to avoid this problem, cquery may be preferred +instead. + +#### Cquery + +Querying the configured graph means that configurations have been applied to the +target graph. For cquery, we require that users use a +[target universe](../developers/bxl_target_universe.md) for their query inputs. + +#### Aquery + +Aquery is a quite different from uquery and cquery. It is used to query the +action graph, which is constructed after Buck2 runs analysis on the targets and +produces the list of providers and actions needed to build the target. + +### Actions + +You can create actions directly within the BXL API. The available action APIs +are equivalent to the ones found on the +[`AnalysisActions`](../../api/build/AnalysisActions) type for normal rules, with +the caveat that [dynamic actions](./bxl_dynamic_output.md) use the +[`bxl.Context`](../../api/bxl/Context) (which provides richer functionalities). + +A common workflow would be to run analysis on a target, and use some interesting +bits found in the analysis result to construct an augmented +[`cmd_args`](../../api/build#cmd_args) to run, and then ensure the action's +output (see below for ensuring). Also see +[Running actions](./bxl_common_how_tos.md#running-actions). + +### Ensure + +Ensuring an artifact means that you want the artifact to be materialized +(meaning, downloaded to your machine) at the end of the BXL execution. There are +two APIs for ensuring: `ctx.output.ensure()` and `ctx.output.ensure_multiple()` +(see [`bxl.OutputStream`](../../api/bxl/OutputStream)). As the naming indicates, +the former is for ensuring a single artifact, and the latter is for ensuring +multiple artifact-like inputs. Artifact-like inputs include +[`cmd_args`](../../api/build#cmd_args) (can be found when inspecting providers), +[`bxl.BuildResult`](../../api/bxl/BuildResult) (produced when building something +in BXL), or [`artifact`](../../api/build/artifact) (can be found when inspecting +providers, or creating your own actions). + +A common workflow is to ensure an artifact that you created via some custom +actions defined in your script, or ensuring some artifacts found in the +providers after running analysis. Also see +[What do I need to know about ensured artifacts](./bxl_faq.md#what-do-i-need-to-know-about-ensured-artifacts). diff --git a/docs/developers/bxl_common_how_tos.md b/docs/developers/bxl_common_how_tos.md index e569f346ef73a..c7c198f16d0af 100644 --- a/docs/developers/bxl_common_how_tos.md +++ b/docs/developers/bxl_common_how_tos.md @@ -1,11 +1,14 @@ --- id: bxl_how_tos -title: Common How-Tos +title: Common How-Tos --- +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + ## Passing in and using CLI args -A BXL function can accept a `cli_args` attribute where args names and types are specified to use within your script, as shown in the following example: +A BXL function can accept a `cli_args` attribute where args names and types are +specified to use within your script, as shown in the following example: Example: @@ -32,7 +35,8 @@ On the command line, you can invoke the arguments as follows: buck2 bxl //myscript.bxl:example -- --bool_arg true --list_type 1 --list_type 2 --target //foo:bar ``` -For BXL functions, to read the arguments, use them as attributes from the `cli_args` attribute on the BXL `ctx` object, as follows: +For BXL functions, to read the arguments, use them as attributes from the +`cli_args` attribute on the BXL `ctx` object, as follows: ```python def _impl_example(ctx): @@ -41,7 +45,8 @@ def _impl_example(ctx): ## Running actions -You can create actions within BXL via the `actions_factory`. This is called once globally then used on demand: +You can create actions within BXL via the `actions_factory`. This is called once +globally then used on demand: ```python def _impl_example(ctx): @@ -49,15 +54,36 @@ def _impl_example(ctx): output = actions.write("my_output", "out") ``` -You can specify the execution platform resolution by setting named parameters when instantiating `bxl_actions`: -* `exec_deps` - These are dependencies you wish to access as executables for creating the action. This is usually the same set of targets one would pass to rule's `attr.exec_dep`. Accepts a list of strings, subtarget labels, target labels, or target nodes. -* `toolchains` - The set of toolchains needed for the actions you intend to create. Accepts a list of strings, subtarget labels, target labels, or target nodes. -* `target_platform` - The intended target platform for your toolchains. Accepts a string or target label. -* `exec_compatible_with` - Explicit list of configuration nodes (like platforms or constraints) that these actions are compatible with. This is the `exec_compatible_with` attribute of a target. Accepts a list of strings, target labels, or target nodes. - -If you specify `exec_deps` or `toolchains`, you can access the resolved `dependency` objects on the `bxl_actions` object. The `bxl_actions` object will have `exec_deps` and `toolchains` attributes, which are `dict`s where the keys are the unconfigured subtarget labels, and the values are the configured/resolved `dependency` objects. - -Note that the keys of `exec_deps` and `toolchains` must be unconfigured subtarget labels (`StarlarkProvidersLabel`), and not unconfigured target labels. You can use `ctx.unconfigured_sub_targets(...)` or `with_sub_target()` on `target_label` to create the label. +You will need to have +[execution platforms](../rule_authors/configurations.md#execution-platforms) +enabled for your project, or else you will get an error. You can specify the +execution platform resolution by setting named parameters when instantiating +`bxl_actions`: + +- `exec_deps` - These are dependencies you wish to access as executables for + creating the action. This is usually the same set of targets one would pass to + rule's `attr.exec_dep`. Accepts a list of strings, subtarget labels, target + labels, or target nodes. +- `toolchains` - The set of toolchains needed for the actions you intend to + create. Accepts a list of strings, subtarget labels, target labels, or target + nodes. +- `target_platform` - The intended target platform for your toolchains. Accepts + a string or target label. +- `exec_compatible_with` - Explicit list of configuration nodes (like platforms + or constraints) that these actions are compatible with. This is the + `exec_compatible_with` attribute of a target. Accepts a list of strings, + target labels, or target nodes. + +If you specify `exec_deps` or `toolchains`, you can access the resolved +`dependency` objects on the `bxl_actions` object. The `bxl_actions` object will +have `exec_deps` and `toolchains` attributes, which are `dict`s where the keys +are the unconfigured subtarget labels, and the values are the +configured/resolved `dependency` objects. + +Note that the keys of `exec_deps` and `toolchains` must be unconfigured +subtarget labels (`StarlarkProvidersLabel`), and not unconfigured target labels. +You can use `ctx.unconfigured_sub_targets(...)` or `with_sub_target()` on +`target_label` to create the label. ```python def _impl_example(ctx): @@ -77,7 +103,8 @@ def _impl_example(ctx): ## Getting providers from an analysis -After calling `analysis()`, you can get the providers collection from `providers()`: +After calling `analysis()`, you can get the providers collection from +`providers()`: ```python def _impl_example(ctx): @@ -86,7 +113,8 @@ def _impl_example(ctx): ## Get a specific provider from an analysis -After calling `analysis()`, you can also get the providers collection from `providers()` then grab whatever specific provider you need: +After calling `analysis()`, you can also get the providers collection from +`providers()` then grab whatever specific provider you need: ```python def _impl_example(ctx): @@ -96,49 +124,89 @@ def _impl_example(ctx): ## Get a specific subtarget from an analysis -Once you have a provider, you can get its subtargets by using the `sub_targets` attribute on the struct to get a dict of provider labels to provider collections: +Once you have a provider, you can get its subtargets by using the `sub_targets` +attribute on the struct to get a dict of provider labels to provider +collections: ```python def _impl_example(ctx): - subtarget = ctx.analysis(my_target).providers()[DefaultInfo].sub_targets[“my_subtarget”] + subtarget = ctx.analysis(my_target).providers()[DefaultInfo].sub_targets["my_subtarget"] ctx.output.print(subtarget) ``` -## Building a subtarget +## Building a target/subtarget without blocking -You can use `analysis()` to get a specific subtarget from an analysis, or you can pass in the subtarget literal directly into `ctx.build()`: +`ctx.build` is synchronous and should only be used when the result of the build +is needed inline during the bxl execution. To execute builds without blocking +the script, retrieve the `DefaultInfo` from the target's providers and use the +`ctx.output.ensure_multiple` api. + +Example: ```python -def _impl_example(ctx): - outputs = ctx.build("cell//path/to/my:target[my_subtarget]") - ctx.output.ensure_multiple(outputs) +ctx.output.ensure_multiple(ctx.analysis(label).providers()[DefaultInfo]) ``` -## Getting attributes or resolved attributes efficiently +## Getting attributes or resolved attributes efficiently on a configured target node -If you need to use all of the attrs/resolved_attrs, then initializing the eager variant once would be best. If you only need a few of the attrs, then initializing the lazy variant is better. There’s not really a hard line, it depends on the target node, and which attrs you are looking for. If performance is key to your BXL script, the best way to determine this is to use the BXL profiler. +If you need to use all of the attrs/resolved_attrs, then initializing the eager +variant once would be best. If you only need a few of the attrs, then +initializing the lazy variant is better. There’s not really a hard line, it +depends on the target node, and which attrs you are looking for. If performance +is key to your BXL script, the best way to determine this is to use the BXL +profiler. -Regardless, if you use eager or lazy versions of getting attributes, you should cache the attrs object: +Regardless, if you use eager or lazy versions of getting attributes, you should +cache the attrs object: ```python def _impl_example(ctx): - lazy = ctx.attrs_lazy() # call once and reuse wherever is necessary - eager = ctx.attrs_eager() # call once and reuse wherever is necessary + my_configured_node = ctx.configured_targets(":foo") + + # call once and resue, ideally when you need most/all attrs + eager = my_configured_node.attrs_eager() + + # call once and reuse, ideally when you only need a few attrs + lazy = my_configured_node.attrs_lazy() + + # call once and reuse, ideally when you need most/all resolved attrs + resolved_eager = my_configured_node.resolved_attrs_eager(ctx) + + # call once and reuse, ideally when you only need a few resolved attrs + resolved_lazy = my_configured_node.resolved_attrs_lazy(ctx) ``` ## Inspecting a struct -You can use `dir(my_struct)` to inspect a struct. You can also use `getattr(my_struct, “my_attr”)` to grab individual attributes, which is equivalent to `my_struct.my_attr`. +You can use `dir(my_struct)` to inspect a struct. You can also use +`getattr(my_struct, "my_attr")` to grab individual attributes, which is +equivalent to `my_struct.my_attr`. -These are available as part of the [Starlark language spec](https://github.com/google/skylark/blob/a0e5de7e63b47e716cca7226662a4c95d47bf873/doc/spec.md#dir). +These are available as part of the +[Starlark language spec](https://github.com/bazelbuild/starlark/blob/master/spec.md#dir). ## Set addition/subtraction on a `target_set` -There are a few BXL actions that return a `target_set` (such as a cquery `eval()`). The `target_set` supports set subtraction and addition (you can use `-` and `+` directly in Starlark). +There are a few BXL actions that return a `target_set` (such as a cquery +`eval()`). The `target_set` supports set subtraction and addition (you can use +`-` and `+` directly in Starlark). + +## Initializing configured/unconfigured `target_set` + +You can use following apis to initialize `target_set` + +```python +def bxl.utarget_set(nodes: None | list[bxl.UnconfiguredTargetNode]) -> bxl.UnconfiguredTargetSet +``` + +```python +def bxl.ctarget_set(nodes: None | list[bxl.ConfiguredTargetNode]) -> bxl.ConfiguredTargetSet +``` ## Profiling, Testing, and Debugging a BXL script -You can use `buck2 bxl profiler`, with various measurements, to determine where the script is least efficient. +You can use `buck2 bxl profiler`, with various measurements, to determine where +the script is least efficient. To time individual pieces of the script, you can use BXL’s timestamp methods: @@ -151,7 +219,42 @@ def _impl_example(_ctx): end2 = start.elapsed_millis() ``` -BXL does not have a debugger available nor a robust testing framework for mocking. +- **Debug** - the common way to debug a BXL script is with print statements + (`print()`, `pprint()` and `ctx.output.print()`). + + + +- **Debugger** - to use the debugger you can follow these instructions + [here](https://fb.workplace.com/groups/buck2eng/permalink/3562907607330619/). + + 1. `fdb --starlark-kill-buck attach buck` + 2. place a breakpoint to the bxl file + 3. run the buck2 bxl command + + + +- **Test** - BXL does not have a robust testing framework for mocking. The main + method to test a BXL script is to actually invoke it with required inputs then + verify the outputs. + +## Getting the path of an artifact as a string + +The starlark `artifact` type encapsulates source artifacts, declared artifacts, +and build artifacts. It can be dangerous to access paths and use them in further +BXL computations. For example, if you are trying to use absolute paths for +something and end up passing it into a remotely executed action, the absolute +path may not exist on the remote machine. Or, if you are working with paths and +expecting the artifact to already have been materialized in further BXL +computations, that would also result in errors. + +However, if you are not making any assumptions about the existence of these +artifacts, you can use use +[`get_path_without_materialization()`](../../api/bxl#get_path_without_materialization), +which accepts source, declared, or build aritfacts. It does _not_ accept ensured +artifacts (also see +[What do I need to know about ensured artifacts](./bxl_faq.md#what-do-i-need-to-know-about-ensured-artifacts)). -* **Debug** - the main method to debug a BXL script is with print statements (`print()` and `ctx.output.print()`). -* **Test** - the main method to test a BXL script is to actually invoke it with required inputs then verify the outputs. +For getting paths of `cmd_args()` inputs, you can use +[`get_paths_without_materialization()`](../../api/bxl#get_paths_without_materialization), +but note this is risky because the inputs could contain tsets, which, when +expanded, could be very large. Use these methods at your own risk. diff --git a/docs/developers/bxl_dynamic_output.md b/docs/developers/bxl_dynamic_output.md index a3f977ac00f63..979b01c685525 100644 --- a/docs/developers/bxl_dynamic_output.md +++ b/docs/developers/bxl_dynamic_output.md @@ -5,20 +5,36 @@ title: BXL and Dynamic Outputs ## Dynamic output -When declaring [dynamic outputs](../rule_authors/dynamic_dependencies.md) within a BXL script, the dynamic lambda for is created with a `bxl_ctx`, which means that you can do things like run analysis or queries to inspect the build graph from within the dynamic lambda. +When declaring [dynamic outputs](../rule_authors/dynamic_dependencies.md) within +a BXL script, the dynamic lambda for is created with a `bxl_ctx`, which means +that you can do things like run analysis or queries to inspect the build graph +from within the dynamic lambda. -You may declare multiple dynamic outputs within a single BXL script, or declare nested dynamic outputs. Dynamic outputs are run asynchronously after the BXL evaluation. +You may declare multiple dynamic outputs within a single BXL script, or declare +nested dynamic outputs. Dynamic outputs are run asynchronously after the BXL +evaluation. ### Limitations -- `ctx.output` is not available from a dynamic lambda. This means you can’t ensure artifacts or print cached outputs within a dynamic lambda. -- Error messages from skipping incompatible targets are only emitted to the console, and not cached in the stderr + +- `ctx.output` is not available from a dynamic lambda. This means you can’t + ensure artifacts or print cached outputs within a dynamic lambda. +- Error messages from skipping incompatible targets are only emitted to the + console, and not cached in the stderr - `build()` is not available from a dynamic lambda -- `bxl_actions` in a dynamic lambda always inherits the execution platform resolution of the root/parent BXL. - - The expected usage of `bxl_actions` from within a dynamic lambda is to instantiate it without any named parameters, but the `exec_deps` and `toolchains` of the execution platform resolution are accessible, and return the same values as the root/parent BXL +- `bxl_actions` in a dynamic lambda always inherits the execution platform + resolution of the root/parent BXL. + - The expected usage of `bxl_actions` from within a dynamic lambda is to + instantiate it without any named parameters, but the `exec_deps` and + `toolchains` of the execution platform resolution are accessible, and return + the same values as the root/parent BXL - Profiling is not hooked up to dynamic BXL context ### Silly example -This is a silly example of creating a dynamic output which reads some `query_params` input, calls some BXL functions like `uquery`, `configured_targets` to get the resolved attributes of a target node, and then writes the attributes to an output file. + +This is a silly example of creating a dynamic output which reads some +`query_params` input, calls some BXL functions like `uquery`, +`configured_targets` to get the resolved attributes of a target node, and then +writes the attributes to an output file. ```python def _impl_dynamic_output(ctx): diff --git a/docs/developers/bxl_faq.md b/docs/developers/bxl_faq.md index 00b48172439ec..15f1d40bec2ac 100644 --- a/docs/developers/bxl_faq.md +++ b/docs/developers/bxl_faq.md @@ -5,27 +5,74 @@ title: FAQs ## When is my BXL script cached? -The entire BXL script is represented as a single node on the DICE graph (Buck2’s internal dependency graph). When the script’s input changes, the entire node is invalidated and needs to be recomputed. For example, if a BXL function calls uquery, then uses the result to do a cquery and then a build, if Buck2 detects that any of the recorded calls to uquery, cquery, and build changes, then the entire BXL script will be reran. The computations themselves (uquery, cquery, and build) will still be incrementally evaluated via DICE, so we are not rerunning _every_ computation entirely within the BXL. +The entire BXL script is represented as a single node on the DICE graph (Buck2’s +internal dependency graph). When the script’s input changes, the entire node is +invalidated and needs to be recomputed. For example, if a BXL function calls +uquery, then uses the result to do a cquery and then a build, if Buck2 detects +that any of the recorded calls to uquery, cquery, and build changes, then the +entire BXL script will be reran. The computations themselves (uquery, cquery, +and build) will still be incrementally evaluated via DICE, so we are not +rerunning _every_ computation entirely within the BXL. -When the BXL script creates artifacts and ensures them, those artifacts are cached separately in an action outside of the BXL execution. This means that the artifacts produced by BXL are cached separately from the BXL script itself, much like the computations within a BXL. +When the BXL script creates artifacts and ensures them, those artifacts are +cached separately in an action outside of the BXL execution. This means that the +artifacts produced by BXL are cached separately from the BXL script itself, much +like the computations within a BXL. -During 2023, there is a plan to add finer grain incrementality to make better use of DICE’s existing incrementality support. +During 2023, there is a plan to add finer grain incrementality to make better +use of DICE’s existing incrementality support. ## What’s the difference between `ctx.output.print()` and `print()`? -* `ctx.output.print()` writes items to stdout by buck2 even when the script is cached. Items written to the output stream are considered to be the results of a BXL script, which will be displayed to stdout by buck2 even when the script is cached. -* `print()` is offered by Starlark via the stdlib. This prints anything you want but won’t be provided to stdout at the end of a BXL script. These can be used to print to stderr. NOTE: `print()` statements don't show up if the script has been cached. +- `ctx.output.print()` writes items to stdout by buck2 even when the script is + cached. Items written to the output stream are considered to be the results of + a BXL script, which will be displayed to stdout by buck2 even when the script + is cached. +- `print()` is offered by Starlark via the stdlib. This prints anything you want + but won’t be provided to stdout at the end of a BXL script. These can be used + to print to stderr. NOTE: `print()` statements don't show up if the script has + been cached. ## What do I need to know about ensured artifacts -An `ensured_artifact` prints out the relative or absolute path via `ctx.output.print()`, depending on if called with `abs_path()` or `rel_path`(), but will print out `>` via `print()`. +An `ensured_artifact` prints out the relative or absolute path via +`ctx.output.print()`, depending on if called with `abs_path()` or `rel_path`(), +but will print out `>` via `print()`. -This is intentional because when the ensured artifact is created within BXL, it has not been materialized yet. It will be materialized after the BXL script finishes executing, and Buck2 core performs some additional actions after the BXL script. +This is intentional because when the ensured artifact is created within BXL, it +has not been materialized yet. It will be materialized after the BXL script +finishes executing, and Buck2 core performs some additional actions after the +BXL script. -This is a safeguard to prevent people from misusing the artifact path and passing it into an action without the artifact having been materialized or passing an absolute path into RE, which can actually mess up RE and render the action not shareable across users. In addition, it makes these actions separately cacheable from the BXL execution. +This is a safeguard to prevent people from misusing the artifact path and +passing it into an action without the artifact having been materialized or +passing an absolute path into RE, which can actually mess up RE and render the +action not shareable across users. In addition, it makes these actions +separately cacheable from the BXL execution. -### What is the difference between dynamic outputs and anon targets? +## What is the difference between dynamic outputs and anon targets? -Dynamic outputs are meant for [dynamic dependencies](../rule_authors/dynamic_dependencies.md). The context type is a `bxl_ctx`. Dynamic outputs are ran asynchronously outside of the BXL execution. +Dynamic outputs are meant for +[dynamic dependencies](../rule_authors/dynamic_dependencies.md). The context +type is a `bxl_ctx`. Dynamic outputs are ran asynchronously outside of the BXL +execution. -Anon targets are meant for sharing work betwen multiple BXLs. The context type is a normal rule analysis `context`. Anon targets are `await`-ed inline with your BXL function. +Anon targets are meant for sharing work betwen multiple BXLs. The context type +is a normal rule analysis `context`. Anon targets are `await`-ed inline with +your BXL function. + +## Can I mutate types returned by BXL APIs? + +The data types produced by BXL API calls are always immutable. + +## What is run synchronously vs asynchronously? + +Starlark itself is run synchronously. However, certain BXL APIs are evaluated +asynchronously. + +If you pass in multiple inputs to builds, queries, or analyses, the execution of +these API calls will be blocking, but the inputs themselves will be evaluated in +parallel within the execution. + +Ensuring artifacts, dynamic outputs, anon targets, and resolving promises will +happen _after_ the Starlark script is executed. diff --git a/docs/developers/bxl_getting_started.md b/docs/developers/bxl_getting_started.md index 43150df236d6b..87419a128b75b 100644 --- a/docs/developers/bxl_getting_started.md +++ b/docs/developers/bxl_getting_started.md @@ -3,9 +3,25 @@ id: bxl_getting_started title: Getting Started --- +If you are mostly unfamiliar with Buck2, take a look at +[BXL Basics](./bxl_basics.md). + +## Navigating the docs + +All BXL APIs can be found [here](../../api/bxl). A good place to start would be +the [`bxl.Context`](../../api/bxl/Context), which contains all available BXL +functionalities. + +The [Build APIs](../../api/build) available and often useful in BXL as well. + +The [Starlark spec](https://github.com/bazelbuild/starlark/blob/master/spec.md) +is also a good resource for general Starlark APIs. + ## Writing a BXL -To create a BXL, first, create a script somewhere in the repository ending in `.bxl`. (Note that you can define a single bxl per file, or multiple BXLs per file like in Starlark rules). +To create a BXL, first, create a script somewhere in the repository ending in +`.bxl`. (Note that you can define a single bxl per file, or multiple BXLs per +file like in Starlark rules). In it, define a BXL function as follows: @@ -19,20 +35,23 @@ your_function_name = bxl_main( cli_args = { # cli args that you want to receive from the command line "bool_arg": cli_args.bool(), - "list_type": cli_args.list(cli_args.int()), + # cli_args will be converted to snakecase. e.g. for this case, passed as --list-type, accessed via ctx.cli_args.list_type + "list-type": cli_args.list(cli_args.int()), "optional": cli_args.option(cli_args.string()), "target": cli_args.target_label(), }, ) ``` -This exposes `your_function_name` as a function, with whatever arguments you defined it, so that on the command line you can invoke: +This exposes `your_function_name` as a function, with whatever arguments you +defined it, so that on the command line you can invoke: ```text -buck2 bxl //myscript.bxl:your_function_name -- --bool_arg true --list_type 1 --list_type 2 --target //foo:bar` +buck2 bxl //myscript.bxl:your_function_name -- --bool_arg true --list-type 1 --list-type 2 --target //foo:bar` ``` -You can also add helpdocs to the cli args and get them to show up in cli via `--help`: +You can also add helpdocs to the cli args and get them to show up in cli via +`--help`: ```python def _your_implementation(ctx): @@ -47,11 +66,19 @@ your_function_name = bxl_main( ) ``` -The implementation function takes a single context as parameter (see the documentation for [`BxlContext`](https://buck2.build/docs/api/bxl/bxl_ctx/)). Using it, you'll be able to access functions that enable you to perform queries, analysis, builds, and even create your own actions within BXL to build artifacts as part of a BXL function. +The implementation function takes a single context as parameter (see the +documentation for [`bxl.Context`](../../api/bxl/Context)). Using it, you'll be +able to access functions that enable you to perform queries, analysis, builds, +and even create your own actions within BXL to build artifacts as part of a BXL +function. -The primary method to return information from BXL is to either print them, or build some artifact (for details, see the [`OutputStream`](https://buck2.build/docs/api/bxl/bxl_output_stream/) documentation, available as part of `ctx.output`). -At high level, `ctx.output.print(..)` prints results to stdout, and `ctx.output.ensure(artifact)` marks artifacts as to be materialized into buck-out by the end of the BXL -function, returning an object that lets you print the output path via `ctx.output.print(ensured)`. +The primary method to return information from BXL is to either print them, or +build some artifact (for details, see the +[`bxl.OutputStream`](../../api/bxl/OutputStream) documentation, available as +part of `ctx.output`). At high level, `ctx.output.print(..)` prints results to +stdout, and `ctx.output.ensure(artifact)` marks artifacts as to be materialized +into buck-out by the end of the BXL function, returning an object that lets you +print the output path via `ctx.output.print(ensured)`. ## Running a BXL @@ -61,12 +88,15 @@ To run a BXL function, invoke the buck2 command: buck2 bxl -- ``` - Where `` is of the form `:`, and `` are the arguments that the function accepts from the command line. +Where `` is of the form `:`, +and `` are the arguments that the function accepts from the +command line. The documentation for a BXL function can be seen by running: ```text - buck2 bxl -- --help` - ``` + buck2 bxl -- --help +``` - Note that this is different from `buck2 bxl --help`, which generates the help for the buck2 command instead of the function. +Note that this is different from `buck2 bxl --help`, which generates the help +for the buck2 command instead of the function. diff --git a/docs/developers/bxl_target_universe.md b/docs/developers/bxl_target_universe.md index 23b1c03f72bbf..81d2b077d7bc8 100644 --- a/docs/developers/bxl_target_universe.md +++ b/docs/developers/bxl_target_universe.md @@ -5,13 +5,23 @@ title: Target Universe in BXL ## BXL cquery and target universe -BXL cannot infer the [target universe](../concepts/glossary.md#target-universe) like in the CLI (in most cases). BXL splits up cquery functions per function (ex: `ctx.cquery().kind(...)`), with the exception of `ctx.cquery.eval(...)`, which accepts literals exactly like in the CLI. For the `eval` query, target universe is inferred exactly like the CLI. +BXL cannot infer the [target universe](../concepts/glossary.md#target-universe) +like in the CLI (in most cases). BXL splits up cquery functions per function +(ex: `ctx.cquery().kind(...)`), with the exception of `ctx.cquery.eval(...)`, +which accepts literals exactly like in the CLI. For the `eval` query, target +universe is inferred exactly like the CLI. For all other cases, take the following query as an example: `buck2 cquery "rdeps(deps(//example:foo), deps(//example:bar))"` -The target universe here should be constructed from the all the target literals and their transitive deps, which is to say `deps(//example:foo, //example:bar)`. When you run the query, the evaluation of `deps(//example:foo)` and `deps(//example:bar)` nested in the `rdeps` query will happen inside the universe resulting from `deps(//example:foo, //example:bar)`. Translating it to BXL's individual cquery functions, and let’s say we also try to use the target literals to construct the universe as the CLI target inference does: +The target universe here should be constructed from the all the target literals +and their transitive deps, which is to say `deps(//example:foo, //example:bar)`. +When you run the query, the evaluation of `deps(//example:foo)` and +`deps(//example:bar)` nested in the `rdeps` query will happen inside the +universe resulting from `deps(//example:foo, //example:bar)`. Translating it to +BXL's individual cquery functions, and let’s say we also try to use the target +literals to construct the universe as the CLI target inference does: ```python from_node = ctx.cquery().deps("//example:foo") # universe would be //example:foo @@ -21,54 +31,91 @@ to_node = ctx.cquery().deps("//example:bar") # universe would be //example:bar rdeps = ctx.cquery().rdeps(from_node, to_node) # what is the universe here? ``` -Here, the `from_node` query is actually evaluated in the wrong target universe because we have broken up the query steps in BXL. Instead of `deps(//example:foo)` being evaluated in `deps(//example:foo, //example:bar)`, it’s evaluated with only `deps(//example:foo)`. It’s impossible to know that there’s going to be an rdeps query later on that expects a different target universe. +Here, the `from_node` query is actually evaluated in the wrong target universe +because we have broken up the query steps in BXL. Instead of +`deps(//example:foo)` being evaluated in `deps(//example:foo, //example:bar)`, +it’s evaluated with only `deps(//example:foo)`. It’s impossible to know that +there’s going to be an rdeps query later on that expects a different target +universe. ### Specifying target universe in BXL cquery -BXL cquery functions should only accept configured targets as inputs, with the exception of `eval` and `testsof_with_default_platform`. +BXL cquery functions should only accept configured targets as inputs, with the +exception of `eval` and `testsof_with_default_platform`. -BXL has a `ctx.target_universe()` function to construct a `target_universe` object, which has a `lookup()` function to lookup the configured targets within the target universe and return the target set. ​​The lookup functionality is useful because sometimes a single target can appear multiple times within a target universe. For example, if you specify a cxx toolchain using its unconfigured target label, it will always match against all cxx toolchains in the target universe (so at least once for target deps and once for exec deps), since cxx toolchains may have multiple configurations. Example: +BXL has a `ctx.target_universe()` function to construct a `target_universe` +object, which has a `lookup()` function to lookup the configured targets within +the target universe and return the target set. ​​The lookup functionality is +useful because sometimes a single target can appear multiple times within a +target universe. For example, if you specify a cxx toolchain using its +unconfigured target label, it will always match against all cxx toolchains in +the target universe (so at least once for target deps and once for exec deps), +since cxx toolchains may have multiple configurations. Example: ```python -def _impl: +def _impl(): target_universe = ctx.target_universe(["//example:foo", "//example:bar"]) to_node = target_universe.lookup("//example:foo") from_node = target_universe.lookup("//example:bar") rdeps = ctx.cquery().rdeps(to_node, from_node) ``` -However, sometimes you might want a specific configuration instead of using all configurations found within a target universe, in which case you could use `ctx.configured_targets(...)` to specify the configuration. Or, sometimes you may want to use the specific configured target nodes resulting from other BXL calls. In these cases, you can pass the configured targets directly into cquery functions, instead of going through target universe lookup. +However, sometimes you might want a specific configuration instead of using all +configurations found within a target universe, in which case you could use +`ctx.configured_targets(...)` to specify the configuration. Or, sometimes you +may want to use the specific configured target nodes resulting from other BXL +calls. In these cases, you can pass the configured targets directly into cquery +functions, instead of going through target universe lookup. ### What does the target universe tend to be in practice? -For `owner` query, the universe would be constructed with the unconfigured target nodes returned from `ctx.uquery().owner(...)`. Example: +For `owner` query, the universe would be constructed with the unconfigured +target nodes returned from `ctx.uquery().owner(...)`. Example: ```python -def _impl: +def _impl(): unconfigured_owners = ctx.uquery().owner("foobar") target_universe = ctx.target_universe(unconfigured_owners).target_set() owners = ctx.cquery().owner("foobar", target_universe) ``` -For everything else, the universe would usually be constructed using all target literals found in your query. Example: +For everything else, the universe would usually be constructed using all target +literals found in your query. Example: ```python -def _impl: +def _impl(): target_universe = ctx.target_universe("//example:foo") inputs = target_universe.target_set() deps = ctx.cquery().deps(inputs) ``` -While the above guideline should work for `rdeps` as well, for `rdeps` the universe would usually be narrowed down to the "to"/"destination" target set argument. (This is a subset of the target universe suggested for non-`owner` query cases). Updating the example from above: +While the above guideline should work for `rdeps` as well, for `rdeps` the +universe would usually be narrowed down to the "to"/"destination" target set +argument. (This is a subset of the target universe suggested for non-`owner` +query cases). Updating the example from above: ```python -def _impl: +def _impl(): target_universe = ctx.target_universe("//example:foo") # narrowed down to the "to" literals in rdeps universe_node = target_universe.target_set() from_node = target_universe.lookup("//example:bar") rdeps = ctx.cquery().rdeps(universe_node, from_node) ``` +### `keep-going` + +The configured graph can be broken for various reasons: incompatible targets +(BXL skips these automatically), visibility issues, nonexistent targets, etc. +For issues that are not incompatible targets, the `target_universe` can be +constructed with the `keep_going` flag set to `True` to skip any other errors, +and your cquery will not error out. Note that `keep_going` is only compatible +for a single string literal target or target pattern at the moment. + +```python +ctx.target_universe("//foo/...", keep_going = True) +``` + ## BXL build and target universe -Note that BXL builds currently do not support target universe, but we intend to add this. +Note that BXL builds currently do not support target universe, but we intend to +add this. diff --git a/docs/developers/bxl_telemetry.md b/docs/developers/bxl_telemetry.md index 9c67503884d89..0a4cb999ca981 100644 --- a/docs/developers/bxl_telemetry.md +++ b/docs/developers/bxl_telemetry.md @@ -1,15 +1,20 @@ --- id: bxl_telemetry -title: BXL Telemetry +title: BXL Telemetry --- ## Telemetry ### Emitting events from your BXL script -In BXL, you can emit custom events via `ctx.instant_event()`, which takes in two named parameters: -* `id`: string, identifies your event. Helpful to identify your event when looking through event logs. Ids do not have to be unique in a single BXL script. -* `metadata`: dict, where keys are strings, and values are strings, bools, ints, or lists/dicts of the mentioned types. You can put any metadata you wish here. +In BXL, you can emit custom events via `ctx.instant_event()`, which takes in two +named parameters: + +- `id`: string, identifies your event. Helpful to identify your event when + looking through event logs. Ids do not have to be unique in a single BXL + script. +- `metadata`: dict, where keys are strings, and values are strings, bools, ints, + or lists/dicts of the mentioned types. You can put any metadata you wish here. Example: @@ -23,7 +28,12 @@ my_script = bxl_main( ) ``` -Only instant events can be manually created within BXL at this time, which means that the event represents a single point in time. If you need something similar to spans (start and end events which encompass a range of time) for measuring the duration of a particular section (excluding actions - see below for more information), you could couple instant events with the global `now()` function to measure the duration yourself: +Only instant events can be manually created within BXL at this time, which means +that the event represents a single point in time. If you need something similar +to spans (start and end events which encompass a range of time) for measuring +the duration of a particular section (excluding actions - see below for more +information), you could couple instant events with the global `now()` function +to measure the duration yourself: ```python def _impl(ctx): @@ -45,11 +55,20 @@ my_script = bxl_main( **Measuring time for actions and ensuring artifacts** -You cannot use `now()` to measure the time it takes to run actions and ensure artifacts because these processes occur asynchronously outside of the BXL script execution. For BXL user telemetry, we emit action events via the buck2 core automatically. Events around ensuring the artifacts are not emitted currently, but will be added soon. +You cannot use `now()` to measure the time it takes to run actions and ensure +artifacts because these processes occur asynchronously outside of the BXL script +execution. For BXL user telemetry, we emit action events via the buck2 core +automatically. Events around ensuring the artifacts are not emitted currently, +but will be added soon. ### User event log -To write to your own event log when running BXL, you can run your BXL command with the `--user-event-log` flag to tell buck2 where to write the events to. Buck2 is aware of the following file extensions: `.json-lines`, `json-lines.zst`, `.json-lines.gz`, and will compress the files automatically for you depending on the extension. If the extension is not one of these, the logs will always be written in JSONL format, uncompressed. +To write to your own event log when running BXL, you can run your BXL command +with the `--user-event-log` flag to tell buck2 where to write the events to. +Buck2 is aware of the following file extensions: `.json-lines`, +`json-lines.zst`, `.json-lines.gz`, and will compress the files automatically +for you depending on the extension. If the extension is not one of these, the +logs will always be written in JSONL format, uncompressed. Example: @@ -57,19 +76,31 @@ Example: buck2 bxl path//to/my_script/script.bxl:my_script --user-event-log my_file.json-lines.gz ``` -When using this flag to write to a custom event log, it is up to you to clean up these log files. In addition, if the same filename is used with subsequent BXL invocations, events are always appended to the existing file contents, which is the same behavior as `buck2 --event-log `. If you tell buck2 to write to a compressed file, you are responsible for decompressing them. +When using this flag to write to a custom event log, it is up to you to clean up +these log files. In addition, if the same filename is used with subsequent BXL +invocations, events are always appended to the existing file contents, which is +the same behavior as `buck2 --event-log `. If you tell buck2 +to write to a compressed file, you are responsible for decompressing them. ### Getting a user event log from a normal event log -`buck2 log show-user` can be used to convert a normal event log (regardless of encoding/compression) to a user event. Similar to `buck2 log show`, you can choose the most recent invocation, or the nth invocation, or provide a path to the normal user event log. Note that user event logs are not able to be passed into `buck2 log show` or `buck2 log show-user`. +`buck2 log show-user` can be used to convert a normal event log (regardless of +encoding/compression) to a user event. Similar to `buck2 log show`, you can +choose the most recent invocation, or the nth invocation, or provide a path to +the normal user event log. Note that user event logs are not able to be passed +into `buck2 log show` or `buck2 log show-user`. ### Event log output -The first line of your event log will always be the invocation record, which contains useful things like command line args used, working directory, etc. The subsequent lines are either instant events and/or action events, depending on your BXL script's contents. +The first line of your event log will always be the invocation record, which +contains useful things like command line args used, working directory, etc. The +subsequent lines are either instant events and/or action events, depending on +your BXL script's contents. **Instant event** Sample: + ```python { "StarlarkUserEvent": { @@ -93,6 +124,7 @@ Sample: ``` **Action event** + ```python { "ActionExecutionEvent": { @@ -112,17 +144,20 @@ Sample: ``` `execution_kind` includes: -* Local: action was executed locally -* Remote: action was executed via a remote executor -* ActionCache: action was served by the action cache and not executed -* Simple: action is simple and executed inline within buck2 (ex: write, symlink_dir) -* Skipped: action was not executed at all -* Deferred: action logically executed, but didn't do all the work -* LocalDepFile: action was served by the local dep file cache and not executed. -* LocalWorker: action was executed via a local worker -* NotSet: action execution kind was not set + +- Local: action was executed locally +- Remote: action was executed via a remote executor +- ActionCache: action was served by the action cache and not executed +- Simple: action is simple and executed inline within buck2 (ex: write, + symlink_dir) +- Skipped: action was not executed at all +- Deferred: action logically executed, but didn't do all the work +- LocalDepFile: action was served by the local dep file cache and not executed. +- LocalWorker: action was executed via a local worker +- NotSet: action execution kind was not set **Ensure artifact event** + ```python { "BxlEnsureArtifactsEvent": { diff --git a/docs/developers/options.md b/docs/developers/options.md index c63aa7d7320de..e8eada0f95efb 100644 --- a/docs/developers/options.md +++ b/docs/developers/options.md @@ -4,7 +4,7 @@ Buck 2 introduces some options that don't exist in v1 and are accessed in the root cell: - `project.watchman_merge_base`: defines the merge base to use for SCM-aware - queries to Watchman. This is read when the daemon starts and cannot be - changed later without a restart. -- `test.v2_test_executor`: defines the program to invoke as the test executor - in `buck test`. This is read every time a test command executes. + queries to Watchman. This is read when the daemon starts and cannot be changed + later without a restart. +- `test.v2_test_executor`: defines the program to invoke as the test executor in + `buck test`. This is read every time a test command executes. diff --git a/docs/developers/parity_script.md b/docs/developers/parity_script.md deleted file mode 100644 index 313e31c9dc38a..0000000000000 --- a/docs/developers/parity_script.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: parity_script -title: Parity Testing ---- - -This page covers the parity testing/command replay script found in `scripts/buck_replay/main.py`. - -## Overview - -The `buck_replay` script is meant to test parity between v1 and v2 implementations of commands by querying for logs of the repo and execution state (args, directory) of v2 command invocations, reproducing it locally, making the necessary conversions from v2 to v1 args, and then running both versions of the command so output can be checked/compared. - -When output differs/parity testing fails, the results are logged into a Scuba table for future reference/analysis. - -## Flags - -The following is a list of arguments/flags currently supported by the list: - -* `--verbose` - supplying this turns on debug logging. By default, the replay script logs updates on script progress and any errors that happen. When `--verbose` is given, debug logging will also provide updates on commit and directory changes while parity testing. -* `--dry-run` - toggles logging to a test Scuba table instead of the production one. Useful if you're making edits/testing the script itself. -* `--epoch` - the time after which to query Scuba for logs of commands for, as a Unix timestamp. If not supplied, it defaults to the last 24 hours. -* `--limit` - limits the number of rows queried from Scuba. The default limit is 100000 rows. - -## Running the script - -The script can be run with buck: - -```shell -buck run //buck2/scripts/buck_replay:buck_replay -``` - -Example with flags: - -```shell -buck run //buck2/scripts/buck_replay:buck_replay -- --verbose --dry-run --epoch 1626739329 --limit 100000 -``` - -## Development - -The script does not yet support commands beyond `audit config`. Because of differences in flags (different names, new/dropped flags, and so on) in v1 and v2 implementations of commands, there needs to be some conversion when going from one set of arguments to the other. As such, support for a command requires the implementation of a `Command` class for that command, and with it, several methods: - -* `format_common_args`, `format_args_v1`, `format_args_v2` - to format the flags/arguments in common between the v1 and v2 versions of a command as well as the ones specific to v1 and v2, respectively. -* `run_v1` and `run_v2` - meant to run the v1 and v2 commands and capture the relevant output. -* `test_parity` - meant to compare the output the v1 and v2 outputs (note that the standard for what's "equal" may change between commands) and log whatever is necessary. - -You can also work on features surrounding the replay script; specifically, adding logging to more commands (since in v2 only `audit config` logging is supported) and Ingress tailer support (currently command logging is handled by `CommandReporterProcessor`). diff --git a/docs/developers/request_for_comments.md b/docs/developers/request_for_comments.md index dd22a0f82c2c4..e707bfb292c16 100644 --- a/docs/developers/request_for_comments.md +++ b/docs/developers/request_for_comments.md @@ -7,18 +7,18 @@ Following are Request for Comments (RFCs) at specific stages of the lifecycle. ### Drafts -* [@configuration syntax](rfcs/drafts/configuration-at-syntax.md) -* [bxl actions and Build API](rfcs/drafts/bxl-actions.md) -* [Digest Kinds](rfcs/drafts/digest-kinds.md) -* [labels -> metadata attribute](rfcs/attr-metadata.md) +- [@configuration syntax](rfcs/drafts/configuration-at-syntax.md) +- [bxl actions and Build API](rfcs/drafts/bxl-actions.md) +- [Digest Kinds](rfcs/drafts/digest-kinds.md) +- [labels -> metadata attribute](rfcs/attr-metadata.md) ### Accepted -* [configured_alias](rfcs/configured-alias.md) -* [Buck Extension Language (BXL)](rfcs/bxl.md) -* [Bxl Support for performing analysis on targets](rfcs/bxl-analysis.md) -* [Package-local values](rfcs/package-local-values.md) +- [configured_alias](rfcs/configured-alias.md) +- [Buck Extension Language (BXL)](rfcs/bxl.md) +- [Bxl Support for performing analysis on targets](rfcs/bxl-analysis.md) +- [Package-local values](rfcs/package-local-values.md) ### Implemented -* [ProviderCollection[]](rfcs/implemented/provider-collection-at.md) +- [ProviderCollection[]](rfcs/implemented/provider-collection-at.md) diff --git a/docs/developers/starlark/spec.md b/docs/developers/starlark/spec.md deleted file mode 100644 index 31160e90e9f9b..0000000000000 --- a/docs/developers/starlark/spec.md +++ /dev/null @@ -1,3 +0,0 @@ -# Starlark Language Specification - -The Starlark language spec can be found in the [Bazel GitHub repository](https://github.com/bazelbuild/starlark/blob/master/spec.md). diff --git a/docs/developers/what-ran.md b/docs/developers/what-ran.md index d7cb0848d238d..154fbcbb58ea3 100644 --- a/docs/developers/what-ran.md +++ b/docs/developers/what-ran.md @@ -3,13 +3,18 @@ id: what-ran title: Finding Commands That Buck2 Ran --- -Buck2 logs all the commands it runs. So, after you've run a build, you can query Buck2 to get access to the exact command it used. +import { FbInternalOnly, OssOnly } from +'docusaurus-plugin-internaldocs-fb/internal'; + +Buck2 logs all the commands it runs. So, after you've run a build, you can query +Buck2 to get access to the exact command it used. To do so, do your build as normal, then run `buck2 log what-ran`. ## What Ran output format -This will output a table showing all the commands that were executed, and how they were executed. +This will output a table showing all the commands that were executed, and how +they were executed. The structure is as follows: @@ -19,22 +24,30 @@ REASON TARGET IDENTIFIER EXECUTOR REPRODUCER Which should be used as follows: -* REASON - value is either `build` (for building a thing) or `test` (for running a test). -* TARGET - the name of the build target that declared an action. -* IDENTIFIER - depends on the target but will usually be something like a file name or a module. -* EXECUTOR - value is either `cache`, `re` or `local`. -* REPRODUCER - how you can re-run this yourself. +- REASON - value is either `build` (for building a thing) or `test` (for running + a test). +- TARGET - the name of the build target that declared an action. +- IDENTIFIER - depends on the target but will usually be something like a file + name or a module. +- EXECUTOR - value is either `cache`, `re` or `local`. +- REPRODUCER - how you can re-run this yourself. ## Using the What Ran output Use What Ran as follows: -* Start by identifying the command you're looking for: - * You can grep the output for a given target. - * You can then grep by identifier if necessary. For example, if you're after C++ compilation, try grepping for the basename of your file (for example, for `fbcode/my/stuff.cpp`, grep for `stuff.cpp`). -* Once you found it, reproduce as follows: - * If the executor was `local`, the command is in the output, so just run it. It's expected that you'll do this from the root of your project (use `buck2 root --kind project` to find where that is). - * If the executor was `re` or `cache`, you're provided a RE digest of the form `HASH:SIZE`. Run `frecli cas download-action HASH:SIZE` to retrieve the action, then follow the instructions to run it. +- Start by identifying the command you're looking for: + - You can grep the output for a given target. + - You can then grep by identifier if necessary. For example, if you're after + C++ compilation, try grepping for the basename of your file (for example, + for `fbcode/my/stuff.cpp`, grep for `stuff.cpp`). +- Once you found it, reproduce as follows: + - If the executor was `local`, the command is in the output, so just run it. + It's expected that you'll do this from the root of your project (use + `buck2 root --kind project` to find where that is). + - If the executor was `re` or `cache`, you're provided a RE digest of the form + `HASH:SIZE`. Run `frecli cas download-action HASH:SIZE` to retrieve the + action, then follow the instructions to run it. ## Examples @@ -56,14 +69,22 @@ The following ran on RE: build fbcode//common/init:kill (cxx_compile Kill.cpp (pic)) re 97feca9d014155a80ec55fe27e6bb17f9d2f8574:94 ``` + To repro, you'd run: ```bash frecli cas download-action 97feca9d014155a80ec55fe27e6bb17f9d2f8574:94 ``` + + +Reproducing this command will depend on the particular RE implementation you use. + + ## Expired Digests -Note that if the action was a cache hit on RE, you might get an error when downloading it, indicating that it's not found. If that happens, it's because the cache entry is there but the inputs have expired. +Note that if the action was a cache hit on RE, you might get an error when +downloading it, indicating that it's not found. If that happens, it's because +the cache entry is there but the inputs have expired. If this happens to you, run your build with `--upload-all-actions`. diff --git a/docs/developers/windows_cheat_sheet.md b/docs/developers/windows_cheat_sheet.md index 6796fe6f879eb..13eca58d8e4c9 100644 --- a/docs/developers/windows_cheat_sheet.md +++ b/docs/developers/windows_cheat_sheet.md @@ -3,33 +3,36 @@ id: windows_cheat_sheet title: Windows Cheat Sheet --- -This page contains notes and tips to assist you in understanding the different tools used when migrating Buck2 to Windows. +This page contains notes and tips to assist you in understanding the different +tools used when migrating Buck2 to Windows. ## CMD, Powershell, Bash Command Comparison -| Bash | Powershell | CMD | What does it do | -|--------------------|--------------------|------------------|-----------------------------------| -| cd | cd | cd | Change the current directory | -| mkdir | mkdir | mkdir / md | Create a directory | -| ls | ls | dir | List contents of a directory | -| export var="value" | $env:var="value" | set var=value | To set environment variables | -| $ENV_VAR | $env:ENV_VAR | %ENV_VAR% | Read environment variable | -| echo "Hello world" | echo "Hello world" | echo Hello world | To print something on the screen | -| rm | rm | del | Delete a file | -| rm -rf | rmdir | rmdir | Delete a directory | -| cat | cat | type | Print file content to console | +| Bash | Powershell | CMD | What does it do | +| ------------------ | ------------------ | ---------------- | -------------------------------- | +| cd | cd | cd | Change the current directory | +| mkdir | mkdir | mkdir / md | Create a directory | +| ls | ls | dir | List contents of a directory | +| export var="value" | $env:var="value" | set var=value | To set environment variables | +| $ENV_VAR | $env:ENV_VAR | %ENV_VAR% | Read environment variable | +| echo "Hello world" | echo "Hello world" | echo Hello world | To print something on the screen | +| rm | rm | del | Delete a file | +| rm -rf | rmdir | rmdir | Delete a directory | +| cat | cat | type | Print file content to console | ## Symlinks In Windows, there are two types of symlinks: file and directory. -You can find out which type of symlink is being created using: `dir /AL /S `. +You can find out which type of symlink is being created using: +`dir /AL /S `. The command lists all of the symbolic links in the `` directory: -* `^` is a Directory SymLink -* `^` is a File SymLink +- `^` is a Directory SymLink +- `^` is a File SymLink ## Target names -Escaping the '=' symbol on Windows is quite complicated: make sure none of the targets being built contain this symbol as it could cause build breakages. +Escaping the '=' symbol on Windows is quite complicated: make sure none of the +targets being built contain this symbol as it could cause build breakages. diff --git a/docs/getting_started.md b/docs/getting_started.md deleted file mode 100644 index 0f4bf3df7c222..0000000000000 --- a/docs/getting_started.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -id: getting_started -title: Getting Started ---- - -## Installing Buck2 - -The latest set of `buck2` executables can be found under the [`latest` release page](https://github.com/facebook/buck2/releases/tag/latest). - -To get started, first install [rustup](https://rustup.rs/), then compile the `buck2` executable: - -```bash -rustup install nightly-2023-07-10 -cargo +nightly-2023-07-10 install --git https://github.com/facebook/buck2.git buck2 -``` - -The above commands install `buck2` into a suitable directory, such as `$HOME/.cargo/bin`, which you should then add to your `$PATH`: - -Linux / macOS -```sh -export PATH=$HOME/.cargo/bin:$PATH -``` -Windows Powershell -```powershell -$Env:PATH += ";$HOME\.cargo\bin" -``` -With Buck2 installed, you can build projects with `buck2`! - -### Windows configuration - -Some of our rules use symlinks, which are disabled by default for non-admin Windows users. You can fix that by [enabling Developer Mode](https://pureinfotech.com/enable-developer-mode-windows-11/). - -## Compiling your first project - -This section covers the building of a ['hello_world' example project](https://github.com/facebook/buck2/tree/main/examples/hello_world) that contains a simple C++ binary. If you are interested in seeing how other languages can be built, take a look at the [prelude example project](https://github.com/facebook/buck2/tree/main/examples/with_prelude), which contains Rust, C++, Python, and OCaml targets. - -First, clone the buck2 repository and cd into the 'hello_world' project: - -```bash -git clone https://github.com/facebookincubator/buck2.git -cd buck2/examples/hello_world -``` - - `buck2 init --git` is all the setup you need to start building. This will use git submodule to pull [buck2-prelude](https://github.com/facebook/buck2-prelude) into your project: - -```sh -buck2 init --git -``` - -To use another version control system, run `buck2 init` and manually download [buck2-prelude](https://github.com/facebookincubator/buck2-prelude) into `prelude` at root. -```sh -buck2 init -``` - -To build the entire project, run: - -Note: _Requires clang and lld to be in the path_ -```sh -buck2 build //... -``` - -Note that this uses a [simple C++ toolchain](https://github.com/facebook/buck2/blob/main/prelude/toolchains/cxx.bzl) that requires a recent version of `clang` to be installed on your system. This can be installed with any package manager (ex. `apt install clang`, `xcode-select --install` on macOS, `choco install llvm`). -After installing any external tools or changing your `PATH`, run `buck2 kill` before running a build. - - -To list all targets available in the project, run: - -```sh -buck2 targets //... -``` - -To run the main C++ binary, run: - -```sh -buck2 run //:main -``` - -The newly built binary can be found with the `--show-output` flag: - -```sh -buck2 build //:main --show-output -``` - -Output: - -```sh -Build ID: 0e890477-5b7f-4829-9ffe-662e572320a0 -Jobs completed: 3. Time elapsed: 0.0s. -BUILD SUCCEEDED -root//:main buck-out/v2/gen/root/9f4d83578bb24895/__main__/main -``` - -## Creating your first hello_world project - -This section demonstrates how to create a simple C++ 'hello_world' project. - -To get started, make a new folder for your project and cd into it. - -```sh -mkdir hello_world -cd hello_world -``` - -Next, run `buck2 init --git` to initialize the project. This command will set up your project with `git` and pull in [buck2-prelude](https://github.com/facebook/buck2-prelude) as a submodule. Additionally, it will generate multiple files with default values. - -```sh -buck2 init --git -``` - -Next, add the source code `main.cpp` , - -```c++ -#include -int main() { - std::cout << "Hello from a C++ Buck2 program!" << std::endl; -} -``` - -Then, define a `cxx_binary` in the root `BUCK` file: - -```Python -# BUCK -cxx_binary( - name = "main", - srcs = ["main.cpp"], - link_style = "static", -) -``` - -If you try to build `//:main` at this point, you'll see an error about `buck2` not being able to find `toolchains//:cxx`. - -The final step is to define the necessary toolchain targets. For that project, you need `system_cxx_toolchain` and `system_python_bootstrap_toolchain`, which will pick up the necessary tools (clang++, python, and so on) from the system. - -```Python -# toolchains/BUCK -load("@prelude//toolchains:cxx.bzl", "system_cxx_toolchain") -load("@prelude//toolchains:python.bzl", "system_python_bootstrap_toolchain") - -system_cxx_toolchain( - name = "cxx", - visibility = ["PUBLIC"], -) - -system_python_bootstrap_toolchain( - name = "python_bootstrap", - visibility = ["PUBLIC"], -) -``` - -At this point, your project should have the following files: - -```bash -$ tree -a -I "buck-out|prelude|.git" -|-- .buckconfig -|-- .gitmodules -|-- BUCK -|-- main.cpp -`-- toolchains - `-- BUCK -``` - -Now, you're ready to see the build in action. - -To build the main C++ target, run: - -```sh -buck2 build //:main -``` - -To run the main C++ target, run: - -```sh -buck2 run //:main -``` - -In summary, a `buck2` project requires: -1. A `.buckconfig` file in the root which has a `[repositories]` section listing out [cells](https://buck2.build/docs/concepts/glossary/#cell) -2. A `prelude` directory, which contains a collection of [rules](https://buck2.build/docs/concepts/glossary/#rule) of your choice. `buck2 init` will pull in the [buck2-prelude](https://github.com/facebook/buck2-prelude.git) as a git submodule by default -3. If using the [buck2-prelude](https://github.com/facebook/buck2-prelude.git), a `toolchains` directory that declares relevant toolchains. We provide some basic toolchains in [prelude/toolchains](https://github.com/facebook/buck2/tree/main/prelude/toolchains) -4. `BUCK` files that specify targets for your project - -`buck2 init --git` will generate all of these with reasonable default values. - -## Learning More - -You should now be ready to explore Buck2 for use in your own projects. You can explore the [examples](https://github.com/facebook/buck2/tree/main/examples) folder. Look out for more tutorials in the future. - - - -## Communication channels - -The following channels provide an insight into Buck2: - -* [Buck2 Engineering](https://fb.workplace.com/groups/buck2prototyping) - Workplace group for discussions about what features Buck2 should have, how it's going, status updates, and much more. -* [Buck2 Users](https://fb.workplace.com/groups/buck2users) - Workplace group featuring questions from users and reports of bugs. -* [Buck2 Rule Authors](https://fb.workplace.com/groups/347532827186692) - Workplace group for discussions about language-specific rules. -* [Buck2 Oncall Hub](https://www.internalfb.com/intern/monitor/oncall_profile?oncall=buck2) - urgent tasks and escalation. - - diff --git a/docs/index.md b/docs/index.md index 1df72991d62f6..313caba7d31e1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -3,104 +3,141 @@ id: index title: Introduction --- -Welcome to Buck2, a large scale, fast, reliable, and extensible build tool developed and used by Meta. Buck2 supports a variety of languages on many platforms. +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; -Buck2's core is written in [Rust](https://www.rust-lang.org/). [Starlark](https://github.com/bazelbuild/starlark), which is a deterministic, immutable dialect of Python, is used to extend the Buck2 build system, enabling Buck2 to be language-agnostic. With Starlark, users can define their own custom rules. +Welcome to Buck2, a large scale, fast, reliable, and extensible build tool +developed and used by Meta. Buck2 supports a variety of languages on many +platforms. -Buck2 leverages the Bazel spec of [Remote Build Execution](https://bazel.build/remote/rbe) as the primary means of parallelization and caching, which increases the importance of idempotency (no matter how many times an operation is performed, it yields the same result) and hermeticity (code is sealed off from the world), giving the right results, reliably. +Buck2's core is written in [Rust](https://www.rust-lang.org/). +[Starlark](https://github.com/bazelbuild/starlark), which is a deterministic, +immutable dialect of Python, is used to extend the Buck2 build system, enabling +Buck2 to be language-agnostic. With Starlark, users can define their own custom +rules. -Buck2 multi-language support includes C++, Python, Java, Go, Rust, Erlang, OCaml, and more. +Buck2 leverages the Bazel spec of +[Remote Build Execution](https://bazel.build/remote/rbe) as the primary means of +parallelization and caching, which increases the importance of idempotency (no +matter how many times an operation is performed, it yields the same result) and +hermeticity (code is sealed off from the world), giving the right results, +reliably. -The following sub-sections contain a list of links to key points in the Buck2 Documentation website that explain the advantages of using Buck2 for you and your team. +Buck2 multi-language support includes C++, Python, Java, Go, Rust, Erlang, +OCaml, and more. + +The following sub-sections contain a list of links to key points in the Buck2 +Documentation website that explain the advantages of using Buck2 for you and +your team. ## Buck2 Documentation Website Links ### For end users -* [Getting Started](getting_started.md) - how to get started with using Buck2. -* [Benefits](benefits.md) - the benefits of using Buck2. +- [Getting Started](about/getting_started.md) - how to get started with using + Buck2. +- [Benefits](about/benefits/compared_to_buck1.md) - the benefits of using Buck2. -* [Migration Guide](users/migration_guide.fb.md) - how to port projects from Buck to Buck2, including the issues you might face and notable differences. -* [Buck2 and Build Observability](users/build_observability/observability.fb.md) - how to use Buck2's datasets to analyze specific invocations or classes of invocations. -* [Migrating builds to work VPNless](users/advanced/vpnless.fb.md) - how to migrate builds to work without VPN or lighthouse access. +- [Migration Guide](users/migration_guide.fb.md) - how to port projects from + Buck to Buck2, including the issues you might face and notable differences. +- [Buck2 and Build Observability](users/build_observability/observability.fb.md) - + how to use Buck2's datasets to analyze specific invocations or classes of + invocations. +- [Migrating builds to work VPNless](users/advanced/vpnless.fb.md) - how to + migrate builds to work without VPN or lighthouse access. ### For people writing rules -* [Writing Rules](rule_authors/writing_rules.md) - how to write rules to support new languages. -* [Rule APIs](rule_authors/rule_api.md) - gives the API available when writing rules. -* [Starlark Types](https://github.com/facebookexperimental/starlark-rust/blob/main/docs/types.md) - rules are written in Starlark (which is approximately Python), but our version adds types. +- [Writing Rules](rule_authors/writing_rules.md) - how to write rules to support + new languages. +- [Build APIs](api/build) - documentation for the APIs available when writing + rules. +- [Starlark Types](https://github.com/facebook/starlark-rust/blob/main/docs/types.md) - + rules are written in Starlark (which is approximately Python), but our version + adds types. -* [Rule Writing Tips](rule_authors/rule_writing_tips.fb.md) - tips for migrating rules from Buck1 to Buck2. +- [Rule Writing Tips](rule_authors/rule_writing_tips.fb.md) - tips for migrating + rules from Buck1 to Buck2. ### For people integrating with Buck2 -* [Extending Buck via BXL](developers/bxl.md) - powerful Starlark scripts for introspection of Buck2's graphs. -* [Reindeer](https://github.com/facebookincubator/reindeer) - a set of tools for importing Rust crates from crates.io, git repos etc and generating a BUCK file for using them. -* [ocaml-scripts](https://github.com/facebook/ocaml-scripts) - scripts to generate a BUCK file enabling the use of OCaml packages from an OPAM switch. +- [Extending Buck via BXL](developers/bxl.md) - powerful Starlark scripts for + introspection of Buck2's graphs. +- [Buck2 change detector](https://github.com/facebookincubator/buck2-change-detector) - + tools for building a CI that only builds/tests what has changed in diff/PR. +- [Buck2 GitHub actions installer](https://github.com/dtolnay/install-buck2) - + script to make GitHub CI with Buck2 easier. +- [Reindeer](https://github.com/facebookincubator/reindeer) - a set of tools for + importing Rust crates from crates.io, git repos etc and generating a BUCK file + for using them. +- [ocaml-scripts](https://github.com/facebook/ocaml-scripts) - scripts to + generate a BUCK file enabling the use of OCaml packages from an OPAM switch. +- [Buckle](https://github.com/benbrittain/buckle) - a launcher for Buck2 on a + per-project basis. Enables a project or team to do seamless upgrades of their + build system tooling. ### External articles about Buck2 -* [Introducing Buck2](https://engineering.fb.com/2023/04/06/open-source/buck2-open-source-large-scale-build-system/) - our initial introduction when we open sourced Buck2. -* [Reddit AMA](https://old.reddit.com/r/rust/comments/136qs44/hello_rrust_we_are_meta_engineers_who_created_the/) where the Buck2 team answered a number of questions. -* [Using buck to build Rust projects](https://steveklabnik.com/writing/using-buck-to-build-rust-projects) - working through an initial small Rust project, by [Steve Klabnik](https://steveklabnik.com/). Followed up by [building from crates.io](https://steveklabnik.com/writing/using-cratesio-with-buck) and [updating Buck2](https://steveklabnik.com/writing/updating-buck). -* [Awesome Buck2](https://github.com/sluongng/awesome-buck2) is a collection of resources about Buck2. -* [Buck2 Unboxing](https://www.buildbuddy.io/blog/buck2-review/) is a general review of Buck2 by [Son Luong Ngoc](https://github.com/sluongng/). -* [A tour around Buck2](https://www.tweag.io/blog/2023-07-06-buck2/) gives an overview of Buck2 and how it differs from Bazel. +- [Introducing Buck2](https://engineering.fb.com/2023/04/06/open-source/buck2-open-source-large-scale-build-system/) - + our initial introduction when we open sourced Buck2. +- [Reddit AMA](https://old.reddit.com/r/rust/comments/136qs44/hello_rrust_we_are_meta_engineers_who_created_the/) + where the Buck2 team answered a number of questions. +- [Using buck to build Rust projects](https://steveklabnik.com/writing/using-buck-to-build-rust-projects) - + working through an initial small Rust project, by + [Steve Klabnik](https://steveklabnik.com/). Followed up by + [building from crates.io](https://steveklabnik.com/writing/using-cratesio-with-buck) + and [updating Buck2](https://steveklabnik.com/writing/updating-buck). +- [Awesome Buck2](https://github.com/sluongng/awesome-buck2) is a collection of + resources about Buck2. +- [Buck2 Unboxing](https://www.buildbuddy.io/blog/buck2-review/) is a general + review of Buck2 by [Son Luong Ngoc](https://github.com/sluongng/). +- [A tour around Buck2](https://www.tweag.io/blog/2023-07-06-buck2/) gives an + overview of Buck2 and how it differs from Bazel. + +### External videos about Buck2 + +- [Accelerating builds with Buck2](https://www.youtube.com/watch?v=oMIzKVxUNAE) + Neil talks about why Buck2 is fast. +- [Buck2: optimizations & dynamic dependencies](https://www.youtube.com/watch?v=EQfVu42KwDs) + Neil and Chris talk about why Buck2 is fast and some of the advanced + dependency features. +- [Building Erlang with Buck2](https://www.youtube.com/watch?v=4ALgsBqNBhQ) + Andreas talks about building WhatsApp with Buck2. +- [antlir2: Deterministic image bulids with Buck2](https://www.youtube.com/watch?v=Wv-ilbckSx4) + talks about layering a packaging system over Buck2. ### External projects using Buck2 -* [System Initiative](https://www.systeminit.com/) build their DevOps product [using Buck2](https://nickgerace.dev/post/system-initiative-the-second-wave-of-devops/#under-the-hood), with their own custom prelude. -* [Rust `cxx` library](https://github.com/dtolnay/cxx) has examples and tests with a wide variety of build systems, including Buck2. -* [`ocamlrep` library](https://github.com/facebook/ocamlrep) allows for interop between OCaml and Rust code, and can be [built with Buck2](https://github.com/facebook/ocamlrep/blob/main/README-BUCK.md). -* [`buck2-nix`](https://github.com/thoughtpolice/buck2-nix) is an experiment to integrate Buck2, [Sapling](https://sapling-scm.com) and [Nix](https://nixos.org) together in a harmonious way. - -Feel free to [send a PR](https://github.com/facebook/buck2/edit/main/docs/index.md) adding your project. +- [System Initiative](https://www.systeminit.com/) build their DevOps product + [using Buck2](https://nickgerace.dev/post/system-initiative-the-second-wave-of-devops/#under-the-hood), + with their own custom prelude. +- [Rust `cxx` library](https://github.com/dtolnay/cxx) has examples and tests + with a wide variety of build systems, including Buck2. +- [`ocamlrep` library](https://github.com/facebook/ocamlrep) allows for interop + between OCaml and Rust code, and can be + [built with Buck2](https://github.com/facebook/ocamlrep/blob/main/README-BUCK.md). +- [`buck2-nix`](https://github.com/thoughtpolice/buck2-nix) is an experiment to + integrate Buck2, [Sapling](https://sapling-scm.com) and + [Nix](https://nixos.org) together in a harmonious way. + +Feel free to +[send a PR](https://github.com/facebook/buck2/edit/main/docs/index.md) adding +your project. ### For people developing Buck2 -* [Basic README](https://www.internalfb.com/code/fbsource/fbcode/buck2/README.md) - how to get started, compile Buck2 and the basic workflows. -* [Notes for Developers](developers/developers.fb.md) - more advanced workflows and notes around debugging, profiling etc. - -## Specialised groups - -We have Workplace groups and task tags for various projects. Most task folders are *not monitored*, so post all questions and bug reports to a Workplace group. - -### Workplace groups - -* [Admarket](https://fb.workplace.com/groups/2011248092366093) - collaboration between Admarket, DevX and Build Infra teams in their effort to migrate Admarket to Buck2. -* [Android](https://fb.workplace.com/groups/4318511658259181) - discussions on anything related to the migration of fbandroid to Buck2. -* [Apple](https://fb.workplace.com/groups/305599448025888/) - discussions related to the migration of fbobjc to Buck2. -* [Fbcode TD](https://fb.workplace.com/groups/603286664133355/) - migrations for TDs, including fbcode, mobile, and rl TDs, as well as UTD. -* [Fbcode](https://fb.workplace.com/groups/1080276222750085) - collaboration between fbcode teams, DevX and Build Infra in their effort to migrate fbcode services to Buck2. -* [Hack](https://fb.workplace.com/groups/496546384752884) - discussions, ideas, updates, and more as we move Hack to Buck2. -* [Haskell](https://fb.workplace.com/groups/202582585277200/) - discussions, ideas, updates, and more as we move Haskell to Buck2. -* [Infer](https://fb.workplace.com/groups/601798364244831/) - discussions related to ideas, bugs, jobs, and feedback on Infer. -* [Open source](https://fb.workplace.com/groups/3434452653448246) - people particularly enthusiastic about open sourcing Buck2. -* [Reality labs](https://fb.workplace.com/groups/930797200910874/) - unmoderated non-support group for talking about arvr's integration and onboarding to Buck2. -* [Shots](https://fb.workplace.com/groups/4899204743424118) - Shots engineers who are experimenting with Buck2. -* [Tpx](https://fb.workplace.com/groups/900436963938958/) - Buck2/Tpx coordination group. -* [Unicorn](https://fb.workplace.com/groups/503973410692177) - collaboration between Unicorn, DevX and Build Infra teams in their effort to migrate Unicorn to Buck2. -* [WhatsApp](https://fb.workplace.com/groups/whatsapp.buck2) - Buck2 in the WhatsApp server. -* [Windows](https://fb.workplace.com/groups/580747310463852/) - discussions related to Buck2 on Windows. - -### Task folders - -* [Admarket on Buck V2](https://www.internalfb.com/tasks?q=163089765955500) -* [Apple Build Infra](https://www.internalfb.com/tasks?q=1710478139132259) -* [Buck2](https://www.internalfb.com/tasks?q=446583836738538) -* [DICE - BuckV2](https://www.internalfb.com/tasks?q=413466250534831) -* [Eden on Buck V2](https://www.internalfb.com/tasks?q=406698320868619) -* [FbCode TD on Buck2](https://www.internalfb.com/tasks?q=980682532796984) -* [Unicorn on Buck V2](https://www.internalfb.com/tasks?q=262220628906648) +- [Basic README](https://www.internalfb.com/code/fbsource/fbcode/buck2/README.md) - + how to get started, compile Buck2 and the basic workflows. +- [Notes for Developers](developers/developers.fb.md) - more advanced workflows + and notes around debugging, profiling etc. diff --git a/docs/legacy/README.md b/docs/legacy/README.md deleted file mode 100644 index ca5109e4f5588..0000000000000 --- a/docs/legacy/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# buck 1 Documentation Import -This folder contains documentation pulled from the Buck 1 website at https://buck.build/. - -The folders include the following: -``` -├── README.md -├── about -│   ├── faq.md -│   ├── fast.md -│   ├── performance.md -│   └── troubleshooting.md -├── basics -│   ├── cheatsheet.md -│   ├── getting-started.md -│   ├── key-concepts -│   └── tutorials.md -├── concepts -│   ├── buck-daemon.md -│   ├── build-file.md -│   ├── build-rule.md -│   ├── build-target-pattern.md -│   ├── build-target.md -│   ├── skylark.md -│   └── visibility.md -└── files-and-directories - ├── buck-out.md - └── dot-buckconfig.md - -``` -Feel free to contact Brian Johnson (brianjo) with any questions -for further cleanup of this documentation. diff --git a/docs/legacy/about/faq.md b/docs/legacy/about/faq.md deleted file mode 100644 index bdf68a1b0e732..0000000000000 --- a/docs/legacy/about/faq.md +++ /dev/null @@ -1,5 +0,0 @@ -# FAQ - -#### **Q: Why is it called Buck?** - -A: The word "buck" is similar to the word "build" and is quick to type. It also has awesome mascot potential. diff --git a/docs/legacy/about/fast.md b/docs/legacy/about/fast.md deleted file mode 100644 index 038e0e007633b..0000000000000 --- a/docs/legacy/about/fast.md +++ /dev/null @@ -1,24 +0,0 @@ -# What Makes Buck so Fast? - -Buck exploits a number of strategies to reduce build times. - -## Buck builds dependencies in parallel - -Buck is designed so that any input files required by a [build target](https://buck.build/concept/build_target.html) must be specified in the [build rule](https://buck.build/concept/build_rule.html) for that target. Therefore, we can know that the directed acyclic graph [(DAG)](http://en.wikipedia.org/wiki/Directed_acyclic_graph) that Buck constructs from the build rule is an accurate reflection of the build's dependencies, and that once a rule's dependencies are satisfied, the target for that rule can be built. -Having a DAG makes it straightforward for rules to be built in parallel, which can dramatically reduce build times. Buck starts with the leaf nodes of the graph, that is, targets that have no dependencies. Buck adds these to a queue of targets to build. When a thread is available, Buck removes a target from the front of the queue and builds it. Assuming the target builds successfully, Buck notifies all of the rules that depend on that target. When all of a rule's dependencies have been satisfied, Buck adds that rule's target to the build queue. Computation proceeds in this manner until all of the nodes in the graph have been built. This execution model means that breaking modules into finer dependencies creates opportunities for increased parallelism, which improves throughput. - -## Buck uses only first-order dependencies for Java - -When compiling Java, Buck uses first-order dependencies only, that is, dependencies that you specify explicitly in the `deps` argument of your build rule. This means that the compilation step in your build sees only explicitly-declared dependencies, not other libraries that those dependencies themselves depend on. -Using only first-order dependencies dramatically shrinks the set of APIs that your Java code is exposed to, which dramatically reduces the scope of changes that will trigger a rebuild. -**NOTE:** If your rule does, in fact, depend on a dependency of one of your explicitly-specified dependencies—such as a *second-order* dependency—you can make that dependency available to your rule by specifying it in an `exported_deps` argument in the rule of the explicitly-specified dependency. - -## Buck uses dependency files to trim over-specified inputs - -Buck's low-level build rules specify all inputs—such as source files or the outputs from other build rules—that might contribute to the output when the build rule is executed. Normally, changes to any of these inputs result in a new RuleKey and therefore trigger a rebuild. However, in practice, it's not uncommon for these build rules to *over-specify* their inputs. A good example is Buck's C/C++ compilation rules. C/C++ compilation rules specify as inputs all headers found from the transitive closure of C/C++ library dependencies, even though in many cases only a small subset of these headers are actually used. For example, a C/C++ source file might use only one of many headers exported by a C/C++ library dependency. However, there's not enough information available before running the build to know if any given input is used, and so all inputs must be considered, which can lead to unnecessary rebuilding. -In some cases, after the build completes, Buck can figure out the exact subset of the listed inputs that were actually used. In C/C++, compilers such as `gcc` provide a `-M` option which produces a dependency file. This file identifies the exact headers that were used during compilation. For supported rules, Buck uses this dependency file before the build, to try to avoid an unnecessary rebuilding: - -* If the dependency file is available before the build, Buck reads the file and uses it to filter out unused inputs when constructing the RuleKey. -* If no dependency file is available before the build, Buck runs the build as normal and produces a dependency file. The dependency file is then available for subsequent builds. - -Note that dependency files are used only if the standard RuleKey—which considers all inputs—doesn't match. In cases where the RuleKey matches, the output from the rule can be fetched from the cache. diff --git a/docs/legacy/about/performance.md b/docs/legacy/about/performance.md deleted file mode 100644 index 342fafc9f079c..0000000000000 --- a/docs/legacy/about/performance.md +++ /dev/null @@ -1,15 +0,0 @@ -# Performance Tuning - -## Performance Tuning Your Builds - -Buck [does a lot of work](https://buck.build/concept/what_makes_buck_so_fast.html) to make builds as fast as possible, and we also give developers tools to figure out where the time is being spent inside of their builds. - -## Super Console - -When running Buck in an [Ansi](http://en.wikipedia.org/wiki/ANSI_escape_code) compliant terminal, Buck displays the break down of what each thread is doing, updated every 100ms, in what we affectionately call "SuperConsole." While a build is running, this gives developers a good idea of what Buck is spending its time doing, and can often help people spot issues in their builds. If you want to see what happened after the fact or to have a trace you can send around your team, use Chrome Tracing. - -## Chrome Tracing - -The Chrome team has built an awesome framework for viewing performance traces right inside of [Chrome](http://www.chromium.org/developers/how-tos/trace-event-profiling-tool). You can access this by going to `chrome://tracing` in your browser. Consult the trace viewer's [project page](https://github.com/google/trace-viewer/) for more information on the trace viewer and the file format. -After Buck is done with each build, it will produce a Chrome Trace file that can be loaded up in `chrome://tracing` in the directory `buck-out/log/traces/`. Buck will save a file in the format `build.[timestamp].trace`, and then create a symlink from the most recent trace to `build.trace`. -To load up this trace, visit `chrome://tracing` inside of Chrome, and hit "Load". Load the trace file of interest, and look around to see where time was spent. Each row represents a different thread, and all of the steps taken for a given rule are logged underneath that rule. Additionally, we log information about how the rule was built and and the rule key for each artifact fetch. Press *?* to get the help menu for the Chrome Trace Viewer. diff --git a/docs/legacy/about/troubleshooting.md b/docs/legacy/about/troubleshooting.md deleted file mode 100644 index d224ece68d230..0000000000000 --- a/docs/legacy/about/troubleshooting.md +++ /dev/null @@ -1,25 +0,0 @@ -# Troubleshooting - -If Buck stops working, then there are several things that you can try to do to fix it. - -## Run `buck clean` - -Ideally, this solution will never work. Seriously. If Buck is working correctly, then it should know which files have been modified and which files need to be rebuilt. -That said, Buck is not perfect, so it is possible that you have found a defect. In this case, give `buck clean` a shot and file a bug if you have found a reproducible bug. - -## Delete all generated files in your project. - -Buck is designed so that all generated files are written to the `buck-out` directory, which makes `buck clean` trivial to implement. However, you may use additional tools (such as an IDE) that generate files in other parts of the tree. Such files may inadvertently get included via [`glob()`](https://buck.build/function/glob.html) rules, which would interfere with Buck. -For example, if you are using Git, then you can run: - -``` -git clean -xfdn -``` - -to get a list of files in your project that are not under version control. The `-n` switch is for "dry run," which means that Git will not delete any files when you run `git clean`. If you want to use Git to remove the generated files while preserving some non-versioned files (such as `.buckconfig.local`), then use it with the `-e` switch: - -``` -git clean -xfd -e .buckconfig.local -``` - -Note that `-e` can be specified multiple times. diff --git a/docs/legacy/basics/cheatsheet.md b/docs/legacy/basics/cheatsheet.md deleted file mode 100644 index 5f04839a0fb5c..0000000000000 --- a/docs/legacy/basics/cheatsheet.md +++ /dev/null @@ -1,118 +0,0 @@ -# Buck Cheat Sheet - -This section provides example command lines that you can use to obtain information about Buck and about your build. These techniques can help you to understand how your build works and to troubleshoot issues with your build. -Most of these examples use the [`buck query`](https://buck.build/command/query.html), [`buck targets`](https://buck.build/command/targets.html), and [`buck audit`](https://buck.build/command/audit.html) commands. For more information and examples, see the reference pages for those commands. -* * * - -* How do I get a list of all the rules that Buck supports from the command line? -* How do I see the arguments for a given rule from the command line? -* How do I find all the targets for a package? -* How do I specify more than one target to buck query? -* How do I get the attribute names and values for the targets that result from a query? -* How do I perform a query inside of a rule? -* How do I find the dependencies for a target, that is, the targets on which a specified target depends? -* How do I find the reverse-dependencies for a target, that is, the targets that depend on a specified target? -* How do I find the build file that contains the target that owns a source file? - -* * * - -### How do I get a list of all the rules that Buck supports,** ***from the command line*, so that I can process them with** **`grep`,** **`sed`, etc? - -Use [`buck audit`](https://buck.build/command/audit.html) with the `prelude` subcommand, which returns an alphabetized list of all the rules that Buck supports. -The following command line uses `buck audit prelude` with the `grep` command to print all the build rules that have the string `android` in their names. - -``` -buck audit prelude | grep android -``` - -Note that these are not all the rules that Buck provides for Android development. For example, the rules `apk_genrule` and `ndk_library` support Android development, but do not themselves contain the string `android` in their names. -How do I see the arguments for a rule from the command line? -Use [`buck audit`](https://buck.build/command/audit.html) with the `ruletype` (singular) subcommand followed by the name of the rule. -The following command line uses `buck audit ruletype` to view the arguments supported by the [`remote_file`](https://buck.build/rule/remote_file.html) rule. - -``` -buck audit ruletype remote_file -def remote_file ( - name, - sha1, - url, - labels = None, - licenses = None, - out = None, - type = None, -): - ... -``` - -### How do I find all the targets for a package? - -Specify a *build target pattern* that represents the targets in the package. - -``` -buck query //path/to/dir/... -``` - -The `buck query` command can accept a [build target pattern](https://buck.build/concept/build_target_pattern.html) as a parameter. If you specify a build target pattern, Buck evaluates this pattern and shows all the build targets that match it. -How do I specify more than one target to** **`buck query`? -Use the [`buck query set()`](https://buck.build/command/query.html#set) operator. -The following command line returns the target `main` in the build file in the root of the Buck project and all the targets from the build file in the `myclass` subdirectory of the root. - -``` -buck query "set( '//:main' '//myclass:' )" -``` - -### How do I get the attribute names and values for the targets returned by a query? - -Add the `--output-attributes` option to the command line, followed by regular expressions that represent the attributes of interest. - -``` -buck query "deps(//foo:bar)" --output-attributes 'name' 'exported_headers' -``` - -The `--output-attributes` option enables you to specify which attributes Buck should return. Instead of returning the names of the targets that match the query expression, Buck returns the names and values of the specified attributes for those targets in JSON format. Attributes are specified as regular expressions. For example, `'.*'` matches all attributes. See the [buck query page](https://buck.build/command/query.html#output-attributes) for more details. The output for the example query above might look something like the following. - -``` -{"//foo/bar/lib:lib" : {"exported_headers" : [ "App/util.h" ],"name" : "lib"},"//foo/bar:app" : {"exported_headers" : [ "App/lib.h" ],"name" : "app"}} -``` - -### How do I perform a query** ***inside*** **of a rule? - -Use [**string parameter macros**](https://buck.build/function/string_parameter_macros.html), specifically, the *query* macros: - -``` -$(query_targets "queryfunction(//:foo)") -$(query_outputs "queryfunction(//:foo)") -$(query_targets_and_outputs [SEPARATOR] "queryfunction(//:foo)") -``` - -Note, however, that the query macros are supported only for [`genrule`](https://buck.build/rule/genrule.html) and [`apk_genrule`](https://buck.build/rule/apk_genrule.html). - -### How do I find the dependencies for a target? - -Use the `deps()` operator. - -``` -buck query "deps('//foo:bar')" -buck query "deps('//foo:bar', 1, first_order_deps())" -buck query "deps(set('//foo:bar' '//foo:lib' '//foo/baz:util'))" -``` - -The [deps](https://buck.build/command/query.html#deps) operator finds the dependencies of the specified targets. The first argument represents the targets of interest. This can be a single [build target](https://buck.build/concept/build_target.html) or [build target pattern](https://buck.build/concept/build_target_pattern.html), or a set of these. -The optional second argument is the *depth* of the search for dependencies from the specified targets. For example, `1`, as shown in the example above, returns only the direct dependencies. If you do not provide this argument, the output is the complete set of transitive dependencies. -How do I find the reverse-dependencies for a target, that is, the targets that** ***depend on*** **a specified target? -Use the `buck query` [`rdeps`](https://buck.build/command/query.html#rdeps) (reverse dependencies) operator. -The following example, returns the targets in the [transitive closure](https://en.wikipedia.org/wiki/Transitive_closure) of `//foo:bar` that depend directly on `//example:baz`. - -``` -buck query "rdeps('//foo:bar', '//example:baz', 1)" -``` - -### How do I find the buildfile that contains the target that owns a source file? - -In order to find the build file associated with a source file, combine the `owner` operator with `buildfile`. For example, - -``` -buck query "buildfile(owner('foo/bar/main.cpp'))" -``` - -first finds the targets that *own* `foo/bar/main.cpp` and then returns the build files, such as `foo/bar/BUCK`, that define those targets. diff --git a/docs/legacy/basics/getting-started.md b/docs/legacy/basics/getting-started.md deleted file mode 100644 index 5c0f25467fb1d..0000000000000 --- a/docs/legacy/basics/getting-started.md +++ /dev/null @@ -1,244 +0,0 @@ -# Getting Started - -**Note:** If you are a member of an organization that is using Buck, please consult with your colleagues to see if your organization has *site-specific documentation* for Buck. Buck is flexible and configurable, and many organizations have created their own Buck documentation, which is specific to their use cases—in addition to the documentation here. - -## Quick Starts for various target platforms - - - -|**Platform:** |AndroidiOSJavaOther | -|--- |--- | -|**Development OS:** |macOSLinuxWindows | - ->While not a prerequisite for installing Buck itself, to build Android applications, you will also need at least the [Android SDK](https://developer.android.com/studio/index.html) and the [Android NDK](https://developer.android.com/ndk/index.html), which can be installed via [Homebrew](http://brewformulas.org/Android-sdk) or manually downloaded and installed. - -The commands in this guide are designed to be copy-pasteable, idempotent, and usable on its representative operating system (macOS, Linux, Windows). Sometimes this results in some unusual constructions (such as using `echo` instead of `vi` or `Emacs` to create a file). Bear in mind that this is a *quick* start guide, and few things are quicker than copy-and-paste! - -## Install with Homebrew - -Buck is available as a bottle on [Homebrew](http://brew.sh/). - -### Prerequisites - -* [Command Line Tools](https://developer.apple.com/xcode/features/) -* [Java Runtime Environment version 11](https://java.com/en/download/) (support for future versions is in the works) - If you have multiple installations of Java on your development computer, you might get warnings from Buck that you are using an unsupported version of Java. To resolve this issue, set the `JAVA_HOME` environment variable to the directory for **version 8** of the Java Development Kit (JDK). Note that the directory that `JAVA_HOME` points to should contain a `bin` subdirectory which in turn contains binaries for the Java compiler (`javac`) and Java runtime (`java`). - -``` -# Install command line tools. NOTE: If you have Xcode installed, these may -# already be installed. -xcode-select --install -# Download and Install Java SE 8 from: -+# https://www.oracle.com/technetwork/java/javase/downloads/index.html. -+# This installs the JDK 8, a superset of the JRE. -+# Alternatively, install AdoptOpenJDK 8 with Homebrew: -+brew tap AdoptOpenJDK/openjdk -+brew install --cask adoptopenjdk8 -``` - -### Brew install - -You have two choices when using Homebrew. You can choose to get the latest binary [release](https://github.com/facebook/buck/releases/latest): - -``` -brew tap facebook/fb -brew install buck -``` - -Or, you can get the latest code and build it locally: - -``` -brew update -brew tap facebook/fb -brew install --HEAD buck -``` - -## Build from Source - -### Prerequisites - -To manually build Buck, download and install the following prerequisites: - -* [Command Line Tools](https://developer.apple.com/xcode/features/) -* [Oracle Java Development Kit version 8](http://www.oracle.com/technetwork/java/javase/downloads/index.html) (support for future versions is in the works) -* [Apache Ant 1.9 (or newer)](http://ant.apache.org/) -* [Python 2.7](https://www.python.org/downloads/) -* [Git](http://git-scm.com/download) -* [Watchman](https://facebook.github.io/watchman/docs/install) - ->We strongly recommended that you install Watchman. With watchman, Buck uses a daemon ([buckd](https://buck.build/command/buckd.html)) which prevents Buck from needing to parse all of your [build files](https://buck.build/concept/build_file.html) every time you build—and it caches some other components of your build as well. - -You can use [Homebrew](http://homebrew.sh/) to install many of the prerequisites on a Mac. - -``` -# Install Command Line tools first. NOTE: If you have Xcode installed, these may -# already be installed. -xcode-select --install -# Then the JDK (superset of the JRE) -brew update -brew tap caskroom/cask -brew tap caskroom/versions -brew cask install java8 -# Then... -brew install ant python git watchman -``` - -### Build - -Once you have the above tools installed, you can build Buck as follows: - -``` -git clone https://github.com/facebook/buck.git -cd buck -ant -./bin/buck build --show-output buck -buck-out/gen/programs/buck.pex --help -``` - -If everything worked correctly, you should see something like: - -``` -buck build tool -usage: - buck [options] - buck command --help - buck command [command-options] -available commands: - audit lists the inputs for the specified target - build builds the specified target - cache makes calls to the artifact cache - clean deletes any generated files - fetch downloads remote resources to your local machine - install builds and installs an application - kill kill buckd for the current project - killall kill all buckd processes - project generates project configuration files for an IDE - query provides facilities to query information about the configured target nodes graph - root prints the absolute path to the root of the current buck project - run runs a target as a command - server query and control the http server - targets prints the list of buildable targets - test builds and runs the tests for the specified target - uninstall uninstalls an APK - uquery provides facilities to query information about the unconfigured target nodes graph -options: - --help : Shows this screen and exits. - --version (-V) : Show version number. -``` - -Because you will likely be running `./bin/buck` often, you should add it to your path so that you can simply run `buck` from the command line. - -### Set Location of Android SDK and NDK - -You will need to tell Buck where to find the Android SDK and NDK. -To find the location of the **Android SDK**, Buck looks at the following values *in the following order*: - -* `ANDROID_SDK` environment variable -* `ANDROID_HOME` environment variable -* `ANDROID_SDK_ROOT` environment variable -* The value of the [`[android].sdk_path`](https://buck.build/files-and-dirs/buckconfig.html#android.sdk_path) property in `.buckconfig`. - -To find the location of a specific **NDK**, Buck looks at the following values *in the following order*: - -* `ANDROID_NDK` environment variable. -* `NDK_HOME` environment variable. -* The value of the [`[ndk].ndk_path`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_path) property in `.buckconfig`. - -If you have **multiple NDKs** installed into a single enclosing directory, you can specify this directory to Buck using either of the following values: - -* `ANDROID_NDK_REPOSITORY` environment variable. -* The [`[ndk].ndk_repository_path`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_repository_path) property in `.buckconfig`. - -If you specify *both* the environment variable and the `.buckconfig` setting, the environment variable takes precedence. -If you specify an NDK repository, Buck selects the NDK based on the version that you specify in the [`[ndk].ndk_version`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_version) property of `.buckconfig`. - -## Trying Buck - -Now that Buck is installed, it is time to use Buck in a sample project. - -### Clone [Buck samples repo](https://github.com/fbsamples/bucksamples/) - -``` -git clone https://github.com/fbsamples/bucksamples.git -cd bucksamples/cross-platform-scale-2015-demo/ -``` - -### Key Android Files - -This sample app has all the files necessary to use Buck to build an Android project. From the root directory, you will find: - -* `android/java/com/facebook/buck/demo/Hello.java`: The main Java file supported by other associated resources. -* `android/BUCK`: The [build file](https://buck.build/concept/build_file.html) is what makes Buck work. It defines all the [build rule](https://buck.build/concept/build_rule.html)s for your source code. A [build rule](https://buck.build/concept/build_rule.html) can also include dependencies (generally via `deps`), which may be from other [build file](https://buck.build/concept/build_file.html)s, as in the case of this app. -* `.buckconfig`: A [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html) file allows for various flag and alias settings for any project (even beyond Android) within the root directory. - -### Configure the environment - -Before building an app you need to configure environment variables to let Buck know the locations of Android SDK and Android NDK. -First of all, check for existing variables: - -``` -$ env | grep ANDROID_ -ANDROID_HOME= -ANDROID_NDK_REPOSITORY= -ANDROID_SDK= -ANDROID_SDK_ROOT= -``` - -Set the missing variables to the locations of Android SDK and Android NDK or set the paths in your [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html) file. -Before building make sure you installed correct build tools and a target in Android SDK and correct version of Android NDK. You can find the required versions of these tools in [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html): - -* See [`[android].build_tools_version`](https://buck.build/files-and-dirs/buckconfig.html#android.build_tools_version) to get the version of build tools in Android SDK. -* [`[android].compile_sdk_version`](https://buck.build/files-and-dirs/buckconfig.html#android.compile_sdk_version) points to the Android SDK to build against. -* [`[ndk].ndk_version`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_version) points to the version of Android NDK. - -Optionally: - -* [`[android].sdk_path`](https://buck.build/files-and-dirs/buckconfig.html#android.sdk_path) is an absolute path to the Android SDK. -* [`[ndk].ndk_path`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_path) is an absolute path to the Android NDK. -* [`[ndk].ndk_repository_path`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_repository_path) is an absolute path to a directory that contains multiple Android NDKs in subdirectories. Buck selects which NDK to use based on the value of the [`[ndk].ndk_version`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_version) property in `.buckconfig`. - -### Build the Android sample - -In order to build the app, you use the [`buck build`](https://buck.build/command/build.html)command, specifying your app as the target. The target may be defined in the [`[alias]`](https://buck.build/files-and-dirs/buckconfig.html#alias) section in the [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html) file or it would be the name of your Android project prepended by `//[the directory where your project is located]:` (e.g., `//android:demo-app`). - -``` -# From the root `cross-platform-scale-2015-demo/` directory -# demo_app_android is an alias in .buckconfig for //android:demo-app. Either works. -buck build demo_app_android -``` - -You should see output similar to: - -``` -export ANDROID_NDK=$HOME/android-sdk -buck build demo_app_android -[-] PROCESSING BUCK FILES...FINISHED 0.0s [100%] -[-] DOWNLOADING... (0.00 B/S AVG, TOTAL: 0.00 B, 0 Artifacts) -[-] BUILDING...FINISHED 0.7s [100%] (1/1 JOBS, 0 UPDATED, 0 [0.0%] CACHE MISS) -``` - ->The first time you build, you will most likely see a longer time and cache misses. Subsequent builds should be much faster, with minimal cache misses. - -Buck outputs its results in the `buck-out/` directory. - -### Run the built Android App - -Now that you know your app has built successfully, you can install and run the app with [`buck install`](https://buck.build/command/install.html). This command both compiles and installs the application on the Android emulator. Using the `--run` flag will launch the emulator as well. - -``` -buck install --run demo_app_android -Installing apk on emulator-5554 (android-emulator). -[-] PROCESSING BUCK FILES...FINISHED 0.1s [100%] -[-] DOWNLOADING... (0.00 B/S AVG, TOTAL: 0.00 B, 0 Artifacts) -[-] BUILDING...FINISHED 0.8s [100%] (1/1 JOBS, 0 UPDATED, 0 [0.0%] CACHE MISS) -[+] INSTALLING...0.9s -Successfully ran install apk //android:demo-app on 1 device(s) -Starting activity com.facebook.buck.demo/.App... -Successfully ran start activity on 1 device(s) -``` - ->If you get an error either that you do not have certain Android add-ons (e.g., Google APIs) or that there is no emulator to run, you should launch the Android SDK Manager (e.g., `android sdk`) and install the appropriate packages and/or run your emulator (usually found under `Tools | Manage AVDs`). - -### Success! - -If all goes well, you should see something similar to: diff --git a/docs/legacy/basics/key-concepts.md b/docs/legacy/basics/key-concepts.md deleted file mode 100644 index 786ed581ae471..0000000000000 --- a/docs/legacy/basics/key-concepts.md +++ /dev/null @@ -1,26 +0,0 @@ -# Key concepts - -Buck has a number of fundamental concepts: - -* A [***build rule***](https://buck.build/concept/build_rule.html) describes how to produce an output file from a set of input files. Most build rules are specific to a particular language or platform. For example, you would use the [`cxx_binary`](https://buck.build/rule/cxx_binary.html) rule to create a C++ binary, but you would use the [`android_binary`](https://buck.build/rule/android_binary.html) rule to create an Android APK. -* A [***build target***](https://buck.build/concept/build_target.html) is a string that uniquely identifies a build rule. It can be thought of as a URI for the build rule within the Buck project. -* A [***build file***](https://buck.build/concept/build_file.html) defines one or more build rules. In Buck, build files are typically named `BUCK`. A `BUCK` file is analogous to the `Makefile` used by the Make utility. In your project, you will usually have a separate `BUCK` file for each buildable unit of software—such as a binary or library. For large projects, you could have hundreds of `BUCK` files. - -A Buck ***package*** comprises: a Buck build file (a `BUCK` file), all files—such as source files and headers—in the same directory as the `BUCK` file or in subdirectories, provided those subdirectories do not themselves contain a `BUCK` file. To say it another way, a `BUCK` file defines the root of a package, but Buck packages might not include all their subdirectories because Buck packages do not overlap or contain other Buck packages. -For example, in the following diagram, the BUCK file in directory `app-dir-1` defines that directory as the root of a package—which is labeled **Package A** in the diagram. The directory `app-dir-2` is part of Package A because it is a subdirectory of `app-dir-1`, but does not itself contain a BUCK file. -Now, consider directory `app-dir-3`. Because `app-dir-3` contains a BUCK file it is the root of a new package (**Package B**). Although `app-dir-3` is a subdirectory of `app-dir-1`, it is *not* part of Package A. -Buck has the concept of a ***cell***, which defines a directory tree of one or more Buck packages. A Buck build could involve multiple cells. Cells often correspond to repositories, but this isn't required. -The root of a Buck cell contains a global configuration file called [**`.buckconfig`**](https://buck.build/files-and-dirs/buckconfig.html). Note that although the cell root should contain a `.buckconfig`, the presence of a `.buckconfig` file doesn't in itself define a cell. Rather, *the cells involved in a build are defined at the time Buck is invoked*; they are specified in the `.buckconfig` for the Buck *project* (see below). -A Buck ***project*** is defined by the `.buckconfig` where Buck is invoked, or if that directory doesn't contain a `.buckconfig`, the project is defined by the `.buckconfig` in the nearest ancestor directory. -The `.buckconfig` for the project specifies the cells that constitute the Buck project. Specifically, these cells are specified in the [`[repositories]`](https://buck.build/files-and-dirs/buckconfig.html#repositories) section of the `.buckconfig`. Note that the directory tree rooted at this `.buckconfig` is automatically considered a cell by Buck; in other words, the project's `.buckconfig` doesn't need to specify the project cell explicitly—although it is a good practice to do so. - -### Buck's dependency graph - -Every build rule can have zero or more dependencies. You can specify these dependencies using, for example, the `deps` argument to the build rule. For more information about specifying dependencies, consult the reference page for the build rule you are using. -These dependencies form a directed graph, called the *target graph*. Buck requires the graph to be acyclic. When building the output of a build rule, all of the rule's transitive dependencies are built first. This means that the graph is built in a "bottom-up" fashion. A build rule knows only which rules it depends on, not which rules depend on it. This makes the graph easier to reason about and enables Buck to identify independent subgraphs that can be built in parallel. It also enables Buck to determine the minimal set of build targets that need to be rebuilt. -For more information about how Buck leverages the graph of build dependencies, see [What Makes Buck so Fast](https://buck.build/concept/what_makes_buck_so_fast.html). - -### Multiple Buck projects in a single repository - -Buck is designed to build multiple deliverables from a single repository—that is, a *monorepo*—rather than from multiple repositories. Support for the monorepo design motivated Buck's support for cells and projects. -It is Facebook's experience that maintaining all dependencies in the same repository makes it easier to ensure that all developers have the correct version of the code and simplifies the process of making atomic commits. diff --git a/docs/legacy/basics/tutorials.md b/docs/legacy/basics/tutorials.md deleted file mode 100644 index dc4921ec5b814..0000000000000 --- a/docs/legacy/basics/tutorials.md +++ /dev/null @@ -1,235 +0,0 @@ -# Tutorials - -This expanded tutorial shows extended concepts about using Buck to build a project after you have installed Buck, including creating a project, building a project, packaging a project, etc. - ->**Currently this tutorial is Android specific for either Mac or Linux. We will be adding iOS, Java and Windows specific tutorial information in the near future.** - - - -|**Platform:** |AndroidMacOSLinux | -|--- |--- | -|**Development OS:** |macOSLinux | -|**Language:** |JavaKotlinRust | - -## Path Setup - -Add Buck to your `$PATH` and set up [`buckd`](https://buck.build/concept/buckd.html): - -``` -sudo ln -s ${PWD}/bin/buck /usr/bin/buck -sudo ln -s ${PWD}/bin/buckd /usr/bin/buckd -``` - -## Create Project - -We are going to build a sample application. We should start our project in an empty directory, so create a new one and navigate to it: - -``` -mkdir -p ~/my-first-buck-project/ -cd ~/my-first-buck-project/ -``` - ->**Note: the following instructions will now assume that all commands are run from your `~/my-first-buck-project` directory.** - -## Compile Your Code - -Android applications are typically written in Java and kotlin, so the first thing we will do is to configure Buck to compile code against the Android API. To do so, Buck needs to know where your Android SDK is. Assuming that your Android SDK is installed in `~/android-sdk`, run the following command to set a `ANDROID_SDK` environment variable that tells Buck where to find your Android SDK: - -``` -export ANDROID_SDK=$HOME/android-sdk -``` - -Now that Buck can locate your Android SDK, it is time to compile some Java code. First, we create a simple `Activity` at `java/com/example/activity/MyFirstActivity.java`: - -``` -mkdir -p java/com/example/activity/ -echo "package com.example.activity; - -import android.app.Activity; -import android.os.Bundle; - -public class MyFirstActivity extends Activity { - - @Override - public void onCreate(Bundle savedInstanceState) { - super.onCreate(savedInstanceState); - } -}" > java/com/example/activity/MyFirstActivity.java -``` - -Now we need a build file that defines a build rule to compile this Java code, so we create an [`android_library()`](https://buck.build/learning/rule/android_library.html) rule in `java/com/example/activity/BUCK`: - -``` -echo "android_library( - name = 'activity', - srcs = glob(['*.java']), - visibility = [ 'PUBLIC' ], -)" > java/com/example/activity/BUCK -``` - -Now we can compile our Java code using Buck: - -``` -buck build //java/com/example/activity:activity -``` - ->Buck generates its output in the `buck-out` directory, so this is a good time to specify `buck-out` as something that should be ignored by your version control system. - -## Package Resources - -Android applications frequently contain resources, such as strings and images. For this example, we will create a trivial Android resource bundle that contains a single string: - -``` -mkdir -p res/com/example/activity/res/values/ -echo " - - Hello World -" > res/com/example/activity/res/values/strings.xml -``` - -Buck needs a way to reference this collection of resources, so we need to create a build file that defines an [`android_resource`](https://buck.build/rule/android_resource.html) rule: - -``` -echo "android_resource( - name = 'res', - res = subdir_glob([('res', '**')]), - package = 'com.example', - visibility = [ - '//apps/myapp:', - ], -)" > res/com/example/activity/BUCK -``` - -## Create a Keystore - -In practice, you will want to be able to test your Android app on a physical Android device, which means that it needs to be signed. We will create app-specific information, such as the key and manifest, in its own directory to keep things tidy: - -``` -mkdir -p apps/myapp/ -``` - -To keep things simple, we will create a self-signed certificate for debugging. - ->Unfortunately, this is not a one-liner because there is a number of prompts from the `keytool` command. - -``` -keytool -genkey -keystore apps/myapp/debug.keystore -alias my_alias \ - -keyalg RSA -keysize 2048 -validity 10000 -``` - -When prompted for a keystore password, just use `android` (and then type it again to confirm it), and hit `Enter` to accept the default values for name, organizational unit, etc. -Then create a `.properties` file that stores all of this information: - -``` -echo "key.alias=my_alias -key.store.password=android -key.alias.password=android" > apps/myapp/debug.keystore.properties -``` - -## Build an APK - -An Android application needs a manifest named `AndroidManifest.xml`, so we must create such a file: - -``` -echo " - - - - - - - - - - - -" > apps/myapp/AndroidManifest.xml -``` - -Now we define an [`android_binary`](https://buck.build/rule/android_binary.html) and [`keystore`](https://buck.build/rule/keystore.html) rule in our build file: - -``` -echo "android_binary( - name = 'app', - manifest = 'AndroidManifest.xml', - manifest_entries = { - 'version_code': 1, - 'version_name': '1.0', - 'min_sdk_version': 26, - 'target_sdk_version': 29 - }, - keystore = ':debug_keystore', - deps = [ - '//java/com/example/activity:activity', - '//res/com/example/activity:res', - ], -) - -keystore( - name = 'debug_keystore', - store = 'debug.keystore', - properties = 'debug.keystore.properties', -)" > apps/myapp/BUCK -``` - -Building an [`android_binary`](https://buck.build/rule/android_binary.html) rule will produce an APK: - -``` -buck build //apps/myapp:app -``` - -Alternatively, if you have an Android device connected to your computer, you can build and install the APK in one step with [`buck install`](https://buck.build/command/install.html): - -``` -buck install //apps/myapp:app -``` - -## Create an Alias - -Typing `buck build //apps/myapp:app` every time you want to rebuild your APK can be tedious. Fortunately, Buck makes it possible to define an *alias* for a build target. An alias can always be used in place of a build target when using Buck's command-line interface. -Aliases must be defined in the [`[alias]`](https://buck.build/files-and-dirs/buckconfig.html#alias) a config file in the root of the project: - -``` -echo "[alias] - app = //apps/myapp:app" > .buckconfig -``` - -With this alias in place, the command to build and install the APK is much shorter and easier to remember: - -``` -buck install app -``` - -## Create an IntelliJ Project - -You likely want to develop your Android app using an IDE. Fortunately, Buck can generate an IntelliJ project from the build rules you defined in your build files. -In order to ensure that IntelliJ recognizes where your Java folders are, you need to specify the [`[java].src_roots`](https://buck.build/files-and-dirs/buckconfig.html#java.src_roots) in your [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html) file: - -``` -echo "[java] - src_roots = /java/" >> .buckconfig -``` - -Now you can create the IntelliJ project by running [`buck project`](https://buck.build/command/project.html): - -``` -buck project --ide intellij -``` - -Note that you will likely want to exclude these generated files from version control, so add the following to your `.gitignore` file (or `.hgignore` if you are using Mercurial) along with the files generated by [`buckd`](https://buck.build/concept/buckd.html): - -``` -echo "/.buckd -/buck-out -*.iml -/.idea/compiler.xml -/.idea/libraries/*.xml -/.idea/modules.xml -/.idea/runConfigurations/Debug_Buck_test.xml" > .gitignore -``` - -Now you can build your Android application from either IntelliJ or the command line. diff --git a/docs/legacy/concepts/buck-daemon.md b/docs/legacy/concepts/buck-daemon.md deleted file mode 100644 index 570f60823ee47..0000000000000 --- a/docs/legacy/concepts/buck-daemon.md +++ /dev/null @@ -1,18 +0,0 @@ -# Buck Daemon (buckd) - -The first time that you run a Buck command, Buck starts a daemon process for the current project in the current working directory. For subsequent commands, Buck checks for the running daemon process and if found, uses the daemon to execute the command. Using the Buck daemon can save significant time as it enables Buck to take advantage of caches for build-file parsing, and for Buck's target graph and action graph. - -It is safe to run multiple Buck daemons started from different project directories as they do not interfere with each other, making `buckd` suitable for use in shared-server environments or where several projects are being worked on concurrently. - -While it runs, the Buck daemon process monitors the project's file system and invalidates cached build rules if any build input files change. The Buck daemon excludes from monitoring any subtrees of the project file system that are specified in the [`[project].ignore`](https://buck.build/files-and-dirs/buckconfig.html#project.ignore) setting of `.buckconfig`. By adding project-specific output directories and source-control directories, such as`.git`, to this setting, you can significantly improve performance; this might be necessary to avoid file-change overflows when using Buck daemons to build large projects. - -By default, Buck daemon processes ignore changes to temporary files created by text editors. - -## Killing or disabling the Buck daemon - -The Buck daemon process is killed if - -* the [`buck clean`](https://buck.build/command/clean.html) command is run. - -You can also kill the Buck daemon explicitly by running [`buck kill`](https://buck.build/command/kill.html) in the directory tree for your project. Note that if—for some reason—multiple instances of the daemon are running, the `buck kill` command kills only one of them. -*If the daemon is killed, you might experience a significant delay the next time that you invoke a Buck command as the daemon restarts.* diff --git a/docs/legacy/concepts/build-file.md b/docs/legacy/concepts/build-file.md deleted file mode 100644 index 9b22e9d14bcf0..0000000000000 --- a/docs/legacy/concepts/build-file.md +++ /dev/null @@ -1,44 +0,0 @@ -# Build File - -A *build file* is a file, typically named `BUCK`, that defines one or more [build rule](https://buck.build/concept/build_rule.html)s. -Note that you can change the name that Buck uses for the build file in the `buildfile` section of [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html). -A source file in your project can only be referenced by rules in its "nearest" build file, where "nearest" means its closest direct ancestor in your project's file tree. (If a source file has a build file as a sibling, then that is its nearest ancestor.) For example, if your project had the following `BUCK` files: - -``` -java/com/facebook/base/BUCK -java/com/facebook/common/BUCK -java/com/facebook/common/collect/BUCK -``` - -Then your build rules would have the following constraints: - -* Rules in `java/com/facebook/base/BUCK` can reference any file under `java/com/facebook/base/`. -* Rules in `java/com/facebook/common/` can reference any files under that directory, except for those under `java/com/facebook/common/collect/`, as those "belong" to the `BUCK` file in the `collect` directory. - -The set of source files accessible to a build file is also known as its *build package*. -The way to refer to code across build packages is to create build rules and use `deps` to refer to that code. Going back to the previous example, suppose code in `java/com/facebook/common/concurrent/` wants to depend on code in `java/com/facebook/common/collect/`. Presumably `java/com/facebook/common/collect/BUCK` has a build rule like: - -``` -java_library( - name = 'collect', - srcs = glob(['*.java']), - deps = ['//java/com/facebook/base:base',],) -``` - -Then `java/com/facebook/common/BUCK` could have a rule like: - -``` -java_library( - name = 'concurrent', - srcs = glob(['concurrent/*.java']), - deps = ['//java/com/facebook/base:base','//java/com/facebook/common/collect:collect',],) -``` - -whereas the following **would be invalid** because `java/com/facebook/common/collect/` has its own build file, so `//java/com/facebook/common/collect:concurrent` cannot list `java/com/facebook/common/collect/*.java` in its `srcs`. - -``` -java_library( - name = 'concurrent', - srcs = glob(['collect/*.java', 'concurrent/*.java']), - deps = ['//java/com/facebook/base:base',],) -``` diff --git a/docs/legacy/concepts/build-rule.md b/docs/legacy/concepts/build-rule.md deleted file mode 100644 index c09f23b02d9c9..0000000000000 --- a/docs/legacy/concepts/build-rule.md +++ /dev/null @@ -1,71 +0,0 @@ -# Build Rule - -A *build rule* is a procedure for producing output files from a set of input files in the context of a specified build configuration. Build rules are specified in [build file](https://buck.build/concept/build_file.html)s—typically named BUCK. -**Note:** A build rule must explicitly specify, in its arguments, all of its required inputs in order for Buck to be able to build the rule's output in a way that is deterministic and reproducible. - -## Buck's collection of build rules - -Buck comes with a collection of built-in build rules for many common build procedures. For example, compiling Java code against the Android SDK is a common procedure, so Buck provides the build rule [`android_library`](https://buck.build/rule/android_library.html) to do that. Similarly, the final product of most Android development is an APK, so you can use the build rule [`android_binary`](https://buck.build/rule/android_binary.html) to create an APK. -This documentation organizes Buck's build rules by development language and by target platform. Examples are: **C++**, **Java**, **Python** (development languages) and **Android**, **iOS**, **.NET** (target platforms). Consult the table of contents to locate the build rules that are appropriate for your development project. -You can view a list of Buck's build rules from the command line with the command: - -``` -buck audit ruletypes -``` - -You can view the arguments supported by a particular rule with the command: - -``` -buck audit ruletype -``` - -Note that the first of these commands uses the *plural* `ruletypes`, and the second uses the *singular* `ruletype`. For more information, see the [`buck audit`](https://buck.build/command/audit.html) documentation. - -## Source files as inputs to build rules - -Most build rules specify source files as inputs. For example, a [`cxx_library`](https://buck.build/rule/cxx_library.html) rule would specify `.cpp` files as inputs. To support specifying these files, a `cxx_library` rule provides the `srcs` argument. Some languages, such as C++, use header files as well. To specify these, `cxx_library` provides a `headers` argument. -In addition to `srcs` and `headers`, some rules provide variants of these arguments, such as `platform_srcs` and `platform_headers`. These arguments support groups of source files that should be used as inputs only when building for specific platforms. For more information, see the descriptions for `platform_srcs` and `platform_headers` in, for example, the [`cxx_library`](https://buck.build/rule/cxx_library.html) topic. - -### Package boundaries and access to source files - -In Buck, a BUCK file defines a *package*, which corresponds *roughly* to the directory that contains the BUCK file and those subdirectories that do not themselves contain BUCK files. (To learn more, see the [Key Concepts](https://buck.build/about/overview.html) topic.) -A rule in a BUCK file cannot specify a source file as an input unless that source file is in that BUCK file's package. An exception to this restriction exists for header files, but only if a rule in the package that contains the header file *exports* that header file using the `exported_headers` argument. For more details, see the description for `exported_headers` in, for example, the [`cxx_library`](https://buck.build/rule/cxx_library.html) topic. -More commonly though, the package for a BUCK file contains all the source files required for the rules defined in that BUCK file. Functionality in source files from other packages is made available through the artifacts produced by the rules in the BUCK files for those packages. For example, a [`cxx_binary`](https://buck.build/rule/cxx_binary.html) might use the functionality in a `cxx_library` that is defined in another package. To access that functionality, the `cxx_binary` would take that `cxx_library` as a *dependency*. - -##### Symlinks: Use with caution if at all - -We recommend that you do *not* use symlinks—either absolute or relative—to specify input files to build rules. Although using symlinks in this context does sometimes work, it can lead to unexpected behavior and errors. - -## Dependencies: Output from one rule as input to another rule - -A build rule can use the output from another build rule as one of its inputs by specifying that rule as a *dependency*. Typically, a build rule specifies its dependencies as a list of [build target](https://buck.build/concept/build_target.html)s in its `deps` argument. However, the rule can also specify dependencies—as build targets—in other arguments, such as `srcs`. -**Example:** The output of a [`java_library`](https://buck.build/rule/java_library.html) rule is a JAR file. If a `java_library` rule specifies another `java_library` rule as a dependency, the JAR file produced by the specified rule is added to the classpath for the `java_library` that depends on it. -**Example:** If a [`java_binary`](https://buck.build/rule/java_binary.html) rule specifies a `java_library` rule as a dependency, the JAR file for the specified `java_library` is available on the classpath for the `java_binary`. In addition, in the case of `java_binary`, the JAR files for any dependencies of the `java_library` rule *are also* made available to the `java_binary` rule—and if those dependencies have dependencies of their own, they are added as well. This exhaustive cascade of dependencies is referred to as the rule's *transitive closure*. - -### Required dependencies are always built first - -Buck guarantees that any dependencies that a rule lists that are required in order to build that rule are built successfully *before* Buck builds the rule itself. Note though that there can be special cases—such as [`apple_bundle`](https://buck.build/rule/apple_bundle.html)—where a rule's listed dependencies do not actually need to be built before the rule. - -### Visibility - -In order for a build rule to take a dependency on another build rule, the build rule on which the dependency is taken must be *visible* to the build rule taking the dependency. A build rule's `visibility` argument is a list of [build target pattern](https://buck.build/concept/build_target_pattern.html)s that specify the rules that can take that rule as a dependency. For more information about the concept of visibility in Buck, see the [Visibility](https://buck.build/concept/visibility.html) topic. - -### Dependencies define a graph - -Build rules and their dependencies define a directed acyclic graph (DAG). Buck requires this graph to be acyclic to make it possible to build independent subgraphs in parallel. - -## How to handle special cases: genrules and macros - -Although Buck provides a rich set of built-in build rules for developers, it is not able to address all possible needs. As an "escape hatch," Buck provides a category of generic build rules called *genrules*. With genrules, you can perform arbitrary operations using shell scripts. The genrules supported by Buck are: - -* [`genrule`](https://buck.build/rule/genrule.html) -* [`apk_genrule`](https://buck.build/rule/apk_genrule.html) -* [`cxx_genrule`](https://buck.build/rule/cxx_genrule.html) - -### Multiple output files with genrules - -In most cases, a build rule produces exactly one output file. However, with genrules, you can specify an output *directory* and write arbitrary files to that directory. - -### Macros - -Finally, note that you can define functions that generate build rules. In general, this should not be something that you need to do, but taking advantage of this option might help you add needed functionality to Buck's without editing its source code. For more details, see the [Custom Macros](https://buck.build/extending/macros.html) topic. diff --git a/docs/legacy/concepts/build-target-pattern.md b/docs/legacy/concepts/build-target-pattern.md deleted file mode 100644 index a05ad492be049..0000000000000 --- a/docs/legacy/concepts/build-target-pattern.md +++ /dev/null @@ -1,51 +0,0 @@ -# Build Target Pattern - -A *build target pattern* is a string that describes a set of one or more [build target](https://buck.build/concept/build_target.html)s. You can use build target patterns as arguments to commands, such as [`buck build`](https://buck.build/command/build.html) and [`buck query`](https://buck.build/command/query.html). You can also use build target patterns in the [Visibility](https://buck.build/concept/visibility.html) argument of your build rules. -The simplest build target pattern matches the build target of the same name: - -``` -# -# Matches //apps/myapp:app -# -//apps/myapp:app -``` - -A build target pattern that ends with a colon matches all build targets in the build file at the preceding directory path. For example, suppose that the build file - -``` -apps/myapp/BUCK -``` - -defines the rules: `app_debug` and `app_release`, then the following build target pattern would match both of those rules: - -``` -# -# Matches //apps/myapp:app_debug and //apps/myapp:app_release -# -//apps/myapp: -``` - -A build target pattern that ends with an ellipsis (`/...`) matches all build targets in the build file in the directory that precedes the ellipsis and also *all build targets in build files in subdirectories*. For example, suppose that you have the following build files: - -``` -apps/BUCK -apps/myapp/BUCK -``` - -then the following pattern would match all build targets in both of those files: - -``` -# -# Matches (for example) //apps:common and //apps/myapp:app -# -//apps/... -``` - -### Build target patterns are not allowed in the deps argument - -Build target patterns cannot be used with the `deps` argument of a build rule. Buck requires that you specify all dependencies explicitly as either fully-qualified or relative build targets. -By making dependencies explicit, Buck prevents build rules from *inadvertently* adding new dependencies, which can result in non-reproducible builds. In addition, if the added dependencies are not actually required, they can unnecessarily drive up the computational cost of the build. - -### Target aliases - -Buck supports the ability to define *aliases* for build targets; using aliases can improve brevity when specifying targets on the Buck command line. For more information, see the [`[alias]`](https://buck.build/files-and-dirs/buckconfig.html#alias) section in the documentation for [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html). diff --git a/docs/legacy/concepts/build-target.md b/docs/legacy/concepts/build-target.md deleted file mode 100644 index cce3dd79a09d8..0000000000000 --- a/docs/legacy/concepts/build-target.md +++ /dev/null @@ -1,81 +0,0 @@ -# Build Target - -A *build target* is a string that identifies a build rule in your project. Build targets are used as arguments to Buck commands, such as [`buck build`](https://buck.build/command/build.html) and [`buck run`](https://buck.build/command/run.html). Build targets are also used as arguments to [build rules](https://buck.build/concept/build_rule.html) to enable one build rule to reference another. For example, a build rule might use a build target to reference another rule in order to specify that rule as a *dependency*. - -#### Fully-qualified build targets - -Here is an example of a *fully-qualified* build target: - -``` -//java/com/facebook/share:ui -``` - -A fully-qualified build target has three components: - -1. The `//` prefix indicates that the subsequent path is from the *root* of your project. You can use the [`buck root`](https://buck.build/command/root.html) command to identify the root of your project. -2. The `java/com/facebook/share` between the `//` prefix and the colon (`:`) indicates that the [build file](https://buck.build/concept/build_file.html) (usually named `BUCK`) is located in the directory `java/com/facebook/share`. -3. The `ui` after the colon (`:`) indicates the name of the build rule within the build file. Build rule names must be unique within a build file. By *name* we mean, more formally, the value of the `name` argument to the build rule. - -Note that the name of the build file itself—usually BUCK—does *not* occur in the build target. All build files within a given Buck project must have the same name—defined in the [`[buildfile].name`](https://buck.build/files-and-dirs/buckconfig.html#buildfile.name) entry of `.buckconfig`. Therefore, it is unnecessary to include the name in the target. -The full regular expression for a fully-qualified build target is as follows: - -``` -[A-Za-z0-9._-]*//[A-Za-z0-9/._-]*:[A-Za-z0-9_/.=,@~+-]+ -|- cell name -| | package path | |--- rule name ----| -``` - -In Buck, a *cell* defines a directory tree of one or more Buck packages. For more information about Buck cells and their relationship to packages and projects, see the [Key Concepts](https://buck.build/about/overview.html) topic. -**NOTE:** All target paths are assumed to start from the root of the Buck project. Buck does not support specifying a target path that starts from a directory below the root. Although the double forward slash (`//`) that prefixes target paths can be omitted when specifying a target from the command line (see **Pro Tips** below), Buck still assumes that the path is from the root. Buck does support *relative* build paths, but in Buck, that concept refers to specifying build targets *from within* a build file. See **Relative build targets** below for more details. - -#### Relative build targets - -A *relative* build target can be used to reference a [build rule](https://buck.build/concept/build_rule.html) *within the same *[*build file*](https://buck.build/concept/build_file.html). A relative build target starts with a colon (`:`) and is followed by only the third component (or *short name*) of the fully-qualified build target. -The following snippet from a build file shows an example of using a relative path. - -``` -## Assume this rule is in //java/com/facebook/share/BUCK# -java_binary( - name = 'ui_jar', - deps = [## The following target path## //java/com/facebook/share:ui## is the same as using the following relative path.#':ui',],) -``` - -## Command-line Pro Tips - -Here are some ways that you can reduce your typing when you specify build targets as command-line arguments to the [`buck build`](https://buck.build/command/build.html) or [`buck run`](https://buck.build/command/run.html) commands. -Consider the following example of a fully-qualified build target used with the `buck build` command: - -``` -buck build //java/com/facebook/share:share -``` - -Although Buck is always strict when parsing build targets in build files, Buck is flexible when parsing build targets on the command-line. Specifically, the leading `//` is optional on the command line, so the above could be: - -``` -buck build java/com/facebook/share:share -``` - -Also, if there is a forward slash before the colon, it is ignored, so this could also be written as: - -``` -buck build java/com/facebook/share/:share -``` - -which enables you to produce the red text shown below using tab-completion, which dramatically reduces how much you need to type: - -``` -buck build java/com/facebook/share/:share -``` - -Finally, if the final path element matches the value specified after the colon, it can be omitted: - -``` -# This is treated as //java/com/facebook/share:share. -buck build java/com/facebook/share/ -``` - -which makes the build target even easier to tab-complete. For this reason, the name of the build rule for the primary deliverable in a build file is often named the same as the parent directory. That way, it can be built from the command-line with less typing. - -## See also - -Buck supports the ability to define ***aliases* for build targets**; using aliases can improve brevity when specifying targets on the Buck command line. For more information, see the [`[alias]`](https://buck.build/files-and-dirs/buckconfig.html#alias) section in the documentation for [`.buckconfig`](https://buck.build/files-and-dirs/buckconfig.html). -A [**build target pattern**](https://buck.build/concept/build_target_pattern.html) is a string that describes a set of one or more build targets. For example, the pattern `//...` is used to build an entire project. For more information, see the **Build Target Pattern** topic. diff --git a/docs/legacy/concepts/visibility.md b/docs/legacy/concepts/visibility.md deleted file mode 100644 index b60b12ba521bc..0000000000000 --- a/docs/legacy/concepts/visibility.md +++ /dev/null @@ -1,56 +0,0 @@ -# Visibility - -Visibility determines whether a build rule can include a build target in its list of `deps`. In a large project, you may want to prevent developers from "reaching across" the project and pulling in additional code. Reducing the visibility of build rules can help prevent that type of behavior. -There are two types of visibility attributes available, each of which takes a list of [build target patterns](https://buck.build/concept/build_target_pattern.html): `visibility`, which determines what other targets can depend on a target, and `within_view`, which determines what other targets a target can depend on. -Both attributes act as allowlists, with some exceptions. In general, if a target is not listed, there may be no dependency relationship. If the `within_view` list is empty or unset, however, its check is bypassed. Similarly, targets defined in the same build file always act as if they were members of their siblings' `visibility` lists. -There is also a special value, `'PUBLIC'`, which makes a build rule visible to all other rules. `'PUBLIC'` is valid in `visibility` but not `within_view`. -In case of logically-conflicting lists, `within_view` takes precedence over `visibility`. If `//foo:bar` defines `//hello:world` in its `visibility` list, but `//hello:world` does not define `//foo:bar` in its `within_view` list, then `//hello:world` may not depend on `//foo:bar`. - -## Examples - -A common library like Guava should be able to be included by any build rule: - -``` -prebuilt_jar( - name = 'guava', - binary_jar = 'guava-14.0.1.jar', - visibility = ['PUBLIC',],) -``` - -It is common to restrict the visibility of Android resources to the Java code that uses it: - -``` -android_resource( - name = 'ui_res', - res = 'res', - package = 'com.example', - visibility = ['//java/com/example/ui:ui',],) -``` - -Or it may be simpler to make it visible to the entire directory in case additional build rules are added to `java/com/example/ui/BUCK`: - -``` -android_resource( - name = 'ui_res', - res = 'res', - package = 'com.example', - visibility = ['//java/com/example/ui:',],) -``` - -Also, it is common to limit code for testing to be visible only to tests. If you define all of your Java unit tests in a folder named `javatests/` in the root of your project, then you could define the following rule to ensure that only allow build rules under `javatests/` can depend on JUnit: - -``` -prebuilt_jar( - name = 'junit', - binary_jar = 'junit-4.11.jar', - visibility = ['//javatests/...',],) -``` - -Finally, restricting the view of a target can be useful for preventing dependency creep: - -``` -java_library( - name = 'example', - visibility = ['PUBLIC',], - within_view = ['//foo:bar','//hello:world',],) -``` diff --git a/docs/legacy/files-and-directories/buck-out.md b/docs/legacy/files-and-directories/buck-out.md deleted file mode 100644 index 4f0cfb2791f67..0000000000000 --- a/docs/legacy/files-and-directories/buck-out.md +++ /dev/null @@ -1,19 +0,0 @@ -# buck-out - -Buck stores build artifacts in a directory named `buck-out` in the root of your [project](https://buck.build/about/overview.html). -You should not make assumptions about where Buck places your build artifacts within the directory structure beneath `buck-out` as these locations depend on Buck's implementation and could potentially change over time. Instead, to obtain the location of the build artifact for a particular target, use the `--show-output` option with the [`buck build`](https://buck.build/command/build.html) or the [`buck targets`](https://buck.build/command/targets.html) command. - -``` -buck targets --show-output -buck build --show-output -``` - -You can also obtain the locations of your build artifacts by specifying either the `--build-report` or`--keep-going` options with `buck build`. -Note that `--show-output` is going to be deprecated soon for `buck build` and replaced with `--show-outputs`. `--show-outputs` may print more than one build artifact per build target. - -``` -buck build --build-report -buck build --keep-going -``` - -For more information about these options, see the topics for the [`buck build`](https://buck.build/command/build.html) and [`buck targets`](https://buck.build/command/targets.html) commands. diff --git a/docs/legacy/files-and-directories/dot-buckconfig.md b/docs/legacy/files-and-directories/dot-buckconfig.md deleted file mode 100644 index 277999eeb2ac5..0000000000000 --- a/docs/legacy/files-and-directories/dot-buckconfig.md +++ /dev/null @@ -1,3156 +0,0 @@ -# .buckconfig - -The root of your [project](https://buck.build/about/overview.html) must contain a configuration file named `.buckconfig`. Before executing, Buck reads this file to incorporate any customizations it specifies. - -## Performance impact of Buck configuration changes - -Also, because configuration settings are sometimes included in the cache keys that Buck uses in its caching system, changes to Buck's configuration can invalidate previously-built artifacts in Buck's caches. If this occurs, Buck rebuilds those artifacts, which can impact your build time. - -## The .buckconfig file uses the INI file format - -The `.buckconfig` file uses the [INI file format](http://en.wikipedia.org/wiki/INI_file). That is, it is divided into *sections* where each section contains a collection of key *names* and key *values*. The `.buckconfig` implementation supports some modifications to the INI file format; these are discussed below. - -### Other INI file parsers - -As mentioned previously, we have extended the INI file parser that Buck uses to parse configuration files. As a result, *INI file parsers provided by other languages or libraries are often not able to parse Buck's configuration files successfully*. - -### Dot character not supported in section names - -We do not support the use of the *dot* character (`.`) in section names within Buck configuration files. For example, the following is **not** supported—*although Buck does not issue a warning or error*. - -```ini -[foo.bar] - baz=1 -``` - -Note that sometimes you might need to define your own custom sections, such as for platform flavors for [C++](https://buck.build/files-and-dirs/buckconfig.html#cxx) or [Python](https://buck.build/files-and-dirs/buckconfig.html#python). These scenarios are examples of when you should be careful not to introduce the dot character in section names. -This constraint is because Buck uses the dot character to delimit section names and key names in other contexts such as the `--config` command-line parameter. For information about `--config`, see the [**Common Parameters**](https://buck.build/command/common_parameters.html) topic. - -## Character encoding - -To ensure that any character can be encoded in a `.buckconfig` key value, you can use escape sequences to encode characters that would otherwise be problematic. -The following escape sequences are supported. - -|`\\` |backslash | -|--- |--- | -|`\"` |double quote | -|`\n` |newline | -|`\r` |carriage return | -|`\t` |tab | -|`\x##` |Unicode character with code point ## (in hex) | -|`\u####` |Unicode character with code point #### (in hex) | -|`\U########` |Unicode character with code point ######## (in hex) | - -## Key values as lists - -Although the standard INI format supports only key values that represent a single item, Buck supports key values that represent a list of items. The syntax is to separate the items in the list using the space (`0x20`) character. For example, a key value for the list of command-line flags to be passed to a compiler could be represented as a list of the flags separated by spaces: - -``` -flags = -foo -bar -baz -qux -``` - -When a key value is parsed as a list instead of a single item, the separator character is interpreted as a separator only when it occurs *outside of double quotes*. For example, if `flags` is a key value interpreted as a list of items separated by spaces, then - -``` -flags = -foo "-bar \u0429" -``` - -results in the two strings: `foo` and `-bar Щ`; the space character between `-bar` and `\u0429` is not interpreted as a separator. - -## Transclusion of values from one key to another - -Values from other keys can be transcluded into the current key using the following syntax inside the current key value. - -``` -$(config
    .) -``` - -For example, to use the [`[go].vendor_path`](https://buck.build/files-and-dirs/buckconfig.html#go.vendor_path) in a custom setting: - -``` -[custom_section]custom_value = $(config go.vendor_path) -``` - -## Comments - -In addition to the semicolon (`;`), you can use the pound sign (`#`), as a comment character in `.buckconfig`. - -## .buckconfig.local - -The root of your [project](https://buck.build/about/overview.html) may contain a second configuration file named `.buckconfig.local`. Its format is the same as that of `.buckconfig`, but settings in `.buckconfig.local` override those in `.buckconfig`. In practice, `.buckconfig` is a version-controlled file that contains settings that are applicable to all team members, whereas `.buckconfig.local` is excluded from version control to allow users to define personal settings, such as personal aliases. - -## Other initialization files - -In addition to the `.buckconfig` and `.buckconfig.local` files in the project root, Buck reads configuration settings from the following additional locations, some of which are actually directories: - -1. Directory `.buckconfig.d` located in the project root directory. -2. File `.buckconfig` and directory `.buckconfig.d` located in the current user's home directory which, on Unix-like systems, is available from the `HOME` environment variable or through the `~` symbol. -3. File `buckconfig` and directory `buckconfig.d` located in system directory `/etc/`. - -Buck treats *any* file—irrespective of name—in a `.buckconfig.d`(`buckconfig.d`) directory (excluding files found in subdirectories) as a Buck configuration file, provided that it adheres to `.buckconfig` syntax. -Note that a `.buckconfig.d` directory is distinct from the similarly-named `.buckd` directory which is used by the [Buck Daemon (`buckd`)](https://buck.build/concept/buckd.html) . -For a description of how Buck resolves collisions between settings in these configuration files, see the section [**Precedence of Buck configuration specifications**](https://buck.build/files-and-dirs/buckconfig.html#config-precedence) -below. - -## Command-line control of configuration - -In addition to the above configuration files, Buck supports specifying additional configuration files from the Buck command line using the `--config-file` parameter. -You can also specify configuration settings *individually* on the Buck command line using the `--config` (`-c`) parameter. Furthermore, you can aggregate these settings into *flag files* using the `--flagfile` parameter. A flag file provides similar functionality to a configuration file but uses a different syntax. Flag files are sometimes called *mode files* or *at* (`@`) files. -For more information about the `--config-file` and `--flagfile` parameters, see the [**Common Parameters**](https://buck.build/command/common_parameters.html) topic. - -## Precedence of Buck configuration specifications - -The following list shows the order of precedence for how Buck interprets its configuration specifications. Settings specified using a method closer to the top of the list have higher precedence and will override those lower on the list. For example, the `.buckconfig` file in the project directory overrides a `.buckconfig` file in the user's `HOME` directory. - -1. Configuration specified on the command line using `--config` (`-c`), `--config-file` and `--flagfile`. Configuration specified later on the command line overrides configuration specified earlier. -2. `.buckconfig.local` in the project directory. -3. `.buckconfig` in the project directory. -4. `.buckconfig` in the `HOME` directory. -5. Files in a `.buckconfig.d` subdirectory of the project directory, irrespective of filename. -6. Files in a `.buckconfig.d` subdirectory of the `HOME` directory, irrespective of filename. -7. `buckconfig` in the `/etc/` directory. -8. Files in a `buckconfig.d` subdirectory of the `/etc/` directory, irrespective of filename. - -Files in a `.buckconfig.d` (`buckconfig.d`) directory have precedence according to the lexicographical order of their file names. Files *later* in the lexicographical order have precedence over files earlier in that order. - -## Configuration files can include other files - -Any of the configuration files that we've discussed so far can also include by reference other files that contain configuration information. These included files can contain complete `.buckconfig` sections or they can contain a group of key name/value pairs that constitute part of a section. In this second use case, you'll need to ensure that the *included* file is referenced beneath the appropriate section in the *including* file. Because of this additional complexity, we recommend that you include only files that contain complete sections. -**Note:** Inclusion of files is a Buck-specific extension to the INI file parser that Buck uses. Therefore, if you use this feature, your Buck configuration files will probably not be parsable by other more-generic INI file parsers. -The syntax to include a file is - -``` - -``` - -where *path-to-included-file* is either a relative path from the including file (recommended) or an absolute path from the root of the file system. -You can also specify that the file should be included only if it exists by prefixing with a question mark (`?`). - -``` - -``` - -If you use this prefix, it is not an error condition if the file does not exist; Buck just silently continues to process the rest of the configuration file. -In the following example, the `.buckconfig` file includes the file `cxx-other-platform.include` which exists in the subdirectory `cxx-other-platform`. The `.buckconfig` file will also include the file `future-platform` from the directory `future-platform.include` if that file exists. - -``` -# -# .buckconfig -# -[cxx] - cxxppflags="-D MYMACRO=\"Buck\"" - - - - -# -# cxx-other-platform.include -# -[cxx#other_platform] - cxxppflags="-D MYMACRO=\"Watchman\"" -``` - -## Sections - -The following sections are recognized by Buck: -[`[adb]`](https://buck.build/files-and-dirs/buckconfig.html#adb) -[`[alias]`](https://buck.build/files-and-dirs/buckconfig.html#alias) -[`[android]`](https://buck.build/files-and-dirs/buckconfig.html#android) -[`[apple]`](https://buck.build/files-and-dirs/buckconfig.html#apple) -[`[build]`](https://buck.build/files-and-dirs/buckconfig.html#build) -[`[buildfile]`](https://buck.build/files-and-dirs/buckconfig.html#buildfile) -[`[cache]`](https://buck.build/files-and-dirs/buckconfig.html#cache) -[`[client]`](https://buck.build/files-and-dirs/buckconfig.html#client) -[`[color]`](https://buck.build/files-and-dirs/buckconfig.html#color) -[`[credentials]`](https://buck.build/files-and-dirs/buckconfig.html#credentials) -[`[cxx]`](https://buck.build/files-and-dirs/buckconfig.html#cxx) -[`[d]`](https://buck.build/files-and-dirs/buckconfig.html#d) -[`[doctor]`](https://buck.build/files-and-dirs/buckconfig.html#doctor) -[`[download]`](https://buck.build/files-and-dirs/buckconfig.html#download) -[`[dx]`](https://buck.build/files-and-dirs/buckconfig.html#dx) -[`[export_file]`](https://buck.build/files-and-dirs/buckconfig.html#export_file) -[`[go]`](https://buck.build/files-and-dirs/buckconfig.html#go) -[`[groovy]`](https://buck.build/files-and-dirs/buckconfig.html#groovy) -[`[halide]`](https://buck.build/files-and-dirs/buckconfig.html#halide) -[`[httpserver]`](https://buck.build/files-and-dirs/buckconfig.html#httpserver) -[`[incompatible]`](https://buck.build/files-and-dirs/buckconfig.html#incompatible) -[`[intellij]`](https://buck.build/files-and-dirs/buckconfig.html#intellij) -[`[java]`](https://buck.build/files-and-dirs/buckconfig.html#java) -[`[kotlin]`](https://buck.build/files-and-dirs/buckconfig.html#kotlin) -[`[log]`](https://buck.build/files-and-dirs/buckconfig.html#log) -[`[lua]`](https://buck.build/files-and-dirs/buckconfig.html#lua) -[`[maven_repositories]`](https://buck.build/files-and-dirs/buckconfig.html#maven_repositories) -[`[ndk]`](https://buck.build/files-and-dirs/buckconfig.html#ndk) -[`[ocaml]`](https://buck.build/files-and-dirs/buckconfig.html#ocaml) -[`[parser]`](https://buck.build/files-and-dirs/buckconfig.html#parser) -[`[project]`](https://buck.build/files-and-dirs/buckconfig.html#project) -[`[python]`](https://buck.build/files-and-dirs/buckconfig.html#python) -[`[repositories]`](https://buck.build/files-and-dirs/buckconfig.html#repositories) -[`[resources]`](https://buck.build/files-and-dirs/buckconfig.html#resources) -[`[resources_per_rule]`](https://buck.build/files-and-dirs/buckconfig.html#resources_per_rule) -[`[rust]`](https://buck.build/files-and-dirs/buckconfig.html#rust) -[`[sandbox]`](https://buck.build/files-and-dirs/buckconfig.html#sandbox) -[`[test]`](https://buck.build/files-and-dirs/buckconfig.html#test) -[`[thrift]`](https://buck.build/files-and-dirs/buckconfig.html#thrift) -[`[tools]`](https://buck.build/files-and-dirs/buckconfig.html#tools) -[`[ui]`](https://buck.build/files-and-dirs/buckconfig.html#ui) -[`[worker]`](https://buck.build/files-and-dirs/buckconfig.html#worker) - -## [adb] - -This section configures adb behavior. - -### adb_restart_on_failure - -This specifies whether to restart adb on failure or not. - -``` -[adb]adb_restart_on_failure = true -``` - -### multi_install_mode - -This specifies whether multi-install mode is enabled or disabled by default. - -``` -[adb]multi_install_mode = false -``` - -### staged_install_mode - -This specifies whether staged install mode is enabled or disabled by default. - -``` -[adb]staged_install_mode = false -``` - -## [alias] - -This section contains definitions of [build target](https://buck.build/concept/build_target.html) aliases. - -``` -[alias]app = //apps/myapp:app - apptest = //apps/myapp:test -``` - -These aliases can then be used from the command line: - -``` -$ buck build app -$ buck test apptest -``` - -You can also suffix aliases with flavors: - -``` -$ buck build app#src_jar# This will expand the alias and effectively build the target returned by: -$ buck targets --resolve-alias app#src_jar//apps/myapp:app#src_jar -``` - -## [android] - -This section configures android-specific build behavior. - -### build_tools_version - -This specifies the version of the Android SDK Build-tools that all Android code in the project should be built against. By default, Buck will select the newest version found on the system. - -``` -[android]build_tools_version = 23.0.1 -``` - -### compile_sdk_version - -This specifies the version of the Android SDK that all Android code in the project should be built against. Even if not specified, the version that Buck chose to use will be printed to the console during the build. A list of valid values on your system can be found by running `android list target --compact`. - -``` -[android]compile_sdk_version = Google Inc.:Google APIs:21 -``` - -### sdk_path - -This specifies the absolute path to the Android SDK that all Android code in the project should be built against. The default is empty. -Setting this property has the same effect as if you had set either of the following environment variables to the same value: - -* `ANDROID_SDK` -* `ANDROID_SDK_ROOT` -* `ANDROID_HOME` - -Note that Buck gives precedence to the values of these environment variables—in the order in which they are listed above—over the value of this property in `.buckconfig`. - -``` -[android]sdk_path = /Library/Android/sdk -``` - -## [apple] - -This section includes settings that control settings that are specific to Apple platform rules. - -### asset_catalog_validation - -Buck can check errors in .xcassets' contents that can later cause silent failures, like having multiple images with the same name or missing `Contents.json` files. To add extra validation above what Xcode does, set this option to `STRICT`. - -``` -[apple]asset_catalog_validation = XCODE -``` - -### codesign - -To override a default path to `codesign`, set this setting to either a file path or buck target. - -``` -[apple]codesign = //path/to/target/that/creates:codesign -``` - -### codesign_timeout - -The timeout of the code-signing step in seconds. The value is set to 300 seconds by default if not specified explicitly. - -``` -[apple]codesign_timeout = 600 -``` - -### code_sign_identities_command - -Specifies a command with any optional arguments that Buck will use to get the current key fingerprints available for code signing. This command should output a list of hashes and common names to standard output in the same format as `security find-identity -v -p codesigning`. If unspecified, Buck will use `security find-identity -v -p codesigning`. - -``` -[apple]code_sign_identities_command = path/to/command --arg1 --arg2 -``` - -### default_debug_info_format_for_binaries - -`default_debug_info_format_for_binaries` setting controls the default debug info format that is used when building binary targets. If you don't specify it, `DWARF_AND_DSYM` value will be used. You can disable debug data by specifying `NONE` value. You can produce unstripped binary by specifying`DWARF` value. - -``` -[apple]default_debug_info_format_for_binaries = NONE -``` - -### default_debug_info_format_for_libraries - -`default_debug_info_format_for_libraries` setting controls the default debug info format that is used when building dynamic library targets. If you don't specify it, `DWARF` value will be used. You can disable debug data by specifying `NONE` value. You can produce dSYM file for the library by specifying`DWARF_AND_DSYM` value. - -``` -[apple]default_debug_info_format_for_libraries = DWARF -``` - -### default_debug_info_format_for_tests - -`default_debug_info_format_for_tests` setting controls the default debug info format that is used when building test targets. If you don't specify it, `DWARF` value will be used. You can disable debug data by specifying `NONE` value. You can produce dSYM file by specifying`DWARF_AND_DSYM` value. - -``` -[apple]default_debug_info_format_for_tests = DWARF_AND_DSYM -``` - -### device_helper_path - -If you want to have Buck be able to install to devices, you need to provide the path to the [`fbsimctl`](https://github.com/facebook/FBSimulatorControl/) binary. - -``` -[apple]device_helper_path = third-party/fbsimctl/fbsimctl -``` - -### ipa_compression_level - -Specify a compression level used when creating ipa. The possible values are: - -* `none`: Do not compress ipa. -* `min`: Use minimum compression level. -* `default` (default): Use medium compression level. -* `max`: Use maximum compression level. - -If omitted, the `default` value will be used. - -``` -[apple]ipa_compression_level = min -``` - -### provisioning_profile_read_command - -Specifies a command with any optional arguments that Buck will use to decode Apple's provisioning profiles for iOS builds. The full path of the provisioning profile will be appended after the command and any arguments specified here. If unspecified, Buck will use `openssl smime -inform der -verify -noverify -in`. - -``` -[apple]provisioning_profile_read_command = path/to/command --arg1 --arg2 -``` - -### provisioning_profile_search_path - -Specifies a path where Buck will look for provisioning profiles (files with extension `.mobileprovision`) that it can use to provision the application to be used on a device. You can specify either an absolute path or one relative to the project root. If unspecified, Buck will look in `~/Library/MobileDevice/Provisioning Profiles`. - -``` -[apple]provisioning_profile_search_path = path/to/provisioning/profiles -``` - -### target_sdk_version - -For each platform, you can specify the target SDK version to use. The format is `{platform}_target_sdk_version`. - -``` -[apple]iphonesimulator_target_sdk_version = 7.0 - iphoneos_target_sdk_version = 7.0 - macosx_target_sdk_version = 10.9 -``` - -### test_log - -When running Apple tests via `xctool`, Buck can set environment variables to tell the tests where to write debug logs and what log level to use. By default, Buck tells `xctool` to set two environment variables named `FB_LOG_DIRECTORY`and `FB_LOG_LEVEL` when running tests which you can read from your test environment: - -``` - FB_LOG_DIRECTORY=buck-out/gen/path/to/logs - FB_LOG_LEVEL=debug -``` - -You can override the default names for these environment variables and the value for the debug log level via the following config settings: - -``` - [apple] - test_log_directory_environment_variable=MY_LOG_DIRECTORY - test_log_level_environment_variable=MY_LOG_LEVEL - test_log_level=verbose -``` - -### use_flavored_cxx_sections - -By default, Buck uses the C/C++ toolchain and flag settings in the `cxx`section to extend Apple C/C++ platform. With this parameter set, Buck will instead use settings in `cxx#` sections (e.g. `cxx#macosx-x86_64.cxx_flags=-foo`). - -``` -[apple]use_flavored_cxx_sections = true -``` - -### use_header_maps_in_xcode - -Xcode projects generated by Buck by default use header maps for header search paths. This speeds up builds for large projects over using regular directory header search paths, but breaks some Xcode features, like header file name autocompletion. If that is an issue, use the following option to disable the use of header maps. - -``` -[apple]use_header_maps_in_xcode = false -``` - -### xcode_developer_dir - -By default, Buck will use the output of `xcode-select --print-path` to determine where Xcode's developer directory is. However, you can specify a directory in the config to override whatever value that would return. - -``` -[apple]xcode_developer_dir = path/to/developer/directory -``` - -### xcode_developer_dir_for_tests - -Optionally override the Xcode developer directory for running tests, if you want them to be run with a different Xcode version than the version used for building. If absent, falls back to `xcode_developer_dir` and finally `xcode-select --print-path`. - -``` -[apple]xcode_developer_dir_for_tests = path/to/developer/directory/for_tests -``` - -### xctool_default_destination_specifier - -This setting is passed directly to `xctool`, and then to`xcodebuild` as the `-destination` argument. - -``` -[apple]xctool_default_destination_specifier = platform=iOS Simulator -``` - -For more detail, see the man page for `xcodebuild`. To access the man page, type the following from your Terminal prompt: - -``` -man xcodebuild -``` - -and then use `/` to search for the string `Destinations`. - -### xctool_path - -If you want to run tests with Buck, you will need to get [`xctool`](https://github.com/facebook/xctool) and tell Buck where to find it. This setting lets you specify a path to a binary. You should use either this setting or [`[apple].xctool_zip_target`](https://buck.build/files-and-dirs/buckconfig.html#apple.xctool_zip_target). - -``` -[apple]xctool_path = path/to/binary/of/xctool -``` - -### xctool_zip_target - -If you want to run tests with Buck, you will need to get [`xctool`](https://github.com/facebook/xctool) and tell Buck where to find it. This setting lets you specify a [build target](https://buck.build/concept/build_target.html). You should use either this setting or [`[apple].xctool_path`](https://buck.build/files-and-dirs/buckconfig.html#apple.xctool_path). - -``` -[apple]xctool_zip_target = //path/to/target/that/creates:xctool-zip -``` - -### *_package_command - -Specify a custom command to run for `apple_package()` rules. The syntax of this field is similar to the `cmd` field of [`genrule`](https://buck.build/rule/genrule.html), and supports some expansions: -`SRCS` -Expands to the absolute path of the `bundle` argument output to the`apple_package()` rule. -`OUT` -Expands to the output file for the `apple_package()` rule. The file specified by this variable must always be written by this command. -`SDKROOT` -Expands to the SDK root directory for the requested SDK. For example,`/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk/`. -Note that since strings in the config can be quoted, literal quotes can only be written by quoting the string and use escaped quotes. If omitted, this will revert to the built-in behavior. When this option is specified, `*_package_extension` must also be specified. - -``` -[apple]iphoneos_package_command = "\"$PLATFORM_DIR/Developer/usr/bin/PackageApplication\" \"$SRCS\" \"$OUT\"" - iphoneos_package_extension = zip -``` - -### *_package_extension - -Specify the output extension for custom `apple_package` rules configured with`*_package_command`. This config option must be specified when `*_package_command` is specified, or both omitted. - -### *_replacement - -Replace Xcode provided tools from imported SDKs and toolchains. Input path must point to a valid executable file. This takes precedence over `apple.*_xcode_tool_name_override` which only searches for replacement within workspace. - -``` -[apple]*_replacement = /usr/bin/true -``` - -### *_toolchains_override - -Specify a comma-delimited custom list of toolchains to use when building with a particular SDK. This is the Buck equivalent of the `TOOLCHAINS` environment variable when building with Xcode. If omitted, this will revert to the built-in behavior. - -``` -osx_toolchains_override = tools.stable,tools.swift40,tools.common -``` - -### *_version_override - -Specify version string to use for Xcode tool. By default, Xcode tool's version value is calculated automatically from its container SDK and toolchain. But in some cases (e.g. when tools are overridden by `apple.*_replacement`), it needs to be manually overridden in order to prevent rule key collision. - -``` -[apple]actool_replacement=/some/path/to/custom/actool - actool_version_override=custom_actool_1.0 -``` - -### *_xcode_tool_name_override - -Specify custom Xcode tool name to use in place of existing one. When set, buck will lookup Xcode search paths to locate the tool, and use it for tool invocations. This value is ignored when `apple.*_replacement` for the same tool is set. - -``` -[apple]# Use (my_clang|my_actool) executable which exists in one of the# imported SDKs and toolchains, instead of the defaults.clang_xcode_tool_name_override=my_clang - actool_xcode_tool_name_override=my_actool -``` - -## [build] - -This section includes settings that control build engine behavior. - -### artifact_cache_size_limit - -Specifies the maximum size, in bytes, of a build artifact (output file) that Buck caches. - -``` -# -# Use a limit of 50 MB. -# -artifact_cache_size_limit = 52428800 -``` - -This value is optional. If you do not specify a value, then it sets no limit to the size of an artifact that Buck caches—but see note below regarding distributed caches. -**Note:** This value sets an upper bound on artifact size for all values of [`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode). The parameter [`[cache].http_max_store_size`](https://buck.build/files-and-dirs/buckconfig.html#cache.http_max_store_size) sets an artifact size limit *only* for distributed cache modes (`http` and `thrift_over_http`). Therefore, it is not meaningful to set a value for `http_max_store_size` which is larger than the value of `artifact_cache_size_limit`. - -### delete_temporaries - -If true, Buck deletes some temporary files immediate after executing a build rule. This is useful for conserving disk space when performing large builds. By default, temporary files are not deleted. - -``` -[build]delete_temporaries = false -``` - -### depfiles - -Configures the use of dependency files for rules that support them. This is an optimization that is useful when dependencies are over-specified and the rule can dynamically determine the subset of dependencies it actually needs. The possible values are: - -* `enabled`: Use dependency files to avoid unnecessary rebuilds. -* `cache` (default): Use dependency files to avoid unnecessary rebuilds and to store/fetch artifacts to/from the cache. -* `disabled`: Do not use dependency files for rebuild detection. - -``` -[build]depfiles = cache -``` - -### engine - -This has two possible values that change the behavior of how Buck operates when building a [build target](https://buck.build/concept/build_target.html): - -* `shallow` (default): only the required transitive dependencies of a [build target](https://buck.build/concept/build_target.html) are materialized locally. Cache hits can result in missing transitive dependencies that are not needed for the final output. -* `deep`: ensure that all transitive dependencies of a [build target](https://buck.build/concept/build_target.html) are materialized locally. - -``` -[build]engine = shallow -``` - -### max_depfile_cache_entries - -Sets the maximum size of the depfile cache for each input source file. This is only used when setting [`[build].depfiles`](https://buck.build/files-and-dirs/buckconfig.html#build.depfiles) to `cache`. An ideal setting for this should be big enough for the working set of all possible header states that a given unchanged source file uses. - -``` -[build]max_depfile_cache_entries = 256 -``` - -### network_threads - -The number of threads to be used for network I/O. The default value is number of cores of the machine. - -``` -[build]network_threads = 8 -``` - -### rule_key_caching - -Enables caching of rule key calculations between builds when using the Buck daemon. - -``` -[build]rule_key_caching = true -``` - -### threads - -Sets the maximum number of threads to use for building. By default, Buck uses the number of available cores multiplied by `1.25`. - -``` -[build]threads = 4 -``` - -### thread_core_ratio - -Sets the maximum number of threads to use for building as a ratio of the number of available cores (e.g. `0.75` on a 4 core machine would limit building to 3 threads, or a value of `1.25` on the same machine would attempt to use 5 threads). - -``` -[build]thread_core_ratio = 0.75 -``` - -### thread_core_ratio_max_threads - -The maximum number of threads to use when calculating the number of build threads from thread_core_ratio. (e.g. a value of 2 on a 4 core machine would ensure that, at most, 2 threads were used, and value of 10 on a 40 core machine would ensure that, at most, 10 threads were used). - -``` -[build]thread_core_ratio_max_threads = 10 -``` - -### thread_core_ratio_min_threads - -The minimum number of threads to use when calculating the number of build threads from thread_core_ratio. (e.g. a value of 1 on a 4 core machine would ensure that, at least, 1 thread was used, and value of 4 on a 40 core machine would ensure that, at least, 10 threads were used). - -``` -[build]thread_core_ratio_min_threads = 1 -``` - -### thread_core_ratio_reserved_cores - -Limit the maximum number of build threads to be the number of detected cores minus this value. (e.g. a value of 1 on a 4 core machine would ensure that, at most, 3 cores were used, and a value of 2 on a 40 core machine would ensure that, at most, 38 cores were used). - -``` -[build]thread_core_ratio_reserved_cores = 1 -``` - -### type - -Sets the type of the build that buck has been built with. This allows buck to distinguish different kinds of builds. When you run `ant` locally, this will be automatically set to `LOCAL_ANT`. When you build buck using buck locally, e.g. `buck build buck`, this will be automatically set to `LOCAL_PEX`. If you are deploying buck through central deployment system, you may want to set build type to `RELEASE_PEX`: - -``` -buck build buck --config build.type=RELEASE_PEX -``` - -**Note:** this setting does not affect how buck builds other rules. It only affects the way how *buck will build buck*. - -``` -[build]type = RELEASE_PEX -``` - -## [buildfile] - -This section includes settings that control build file behavior. - -### includes - -This sets a list of paths to files that will be automatically included by every build file. This is equivalent to calling [`include_defs()`](https://buck.build/function/include_defs.html) in every build file. -**NOTE:** We recommend that you do not use this property. This property can make your builds difficult to maintain and debug, and it will be deprecated in a future release of Buck. - -``` -[buildfile]includes = //core/DEFS -``` - -### name - -The name of [build file](https://buck.build/concept/build_file.html)s within a project. This defaults to `BUCK`. We recommend that you use the default name. However, you could specify a different name—such as `TARGETS` shown below—in order to support, for example, a legacy project that used different buildfile naming conventions. - -``` -[buildfile]name = TARGETS -``` - -## [cache] - -This section configures build artifact caching. Caching can be configured to use the local filesystem, an SQLite database, or a remote distributed cache that can be shared among developers. Caching is disabled by default. The [`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) setting—described below—determines which properties are relevant to the caching configuration; other properties are ignored by Buck. - -### mode - -A comma-separated list of caching policies to use. Valid values are: - -* `dir` (default): Use a directory-based cache on the local filesystem. -* `http`: Use an http-based cache. See [Binary HTTP Cache API](https://buck.build/concept/http_cache_api.html#binary_http). -* `thrift_over_http`: Use an http-based cache that uses thrift for object metadata. See [Thrift over HTTP Cache API](https://buck.build/concept/http_cache_api.html#thrift_http). -* `sqlite`: Use a SQLite-based cache that inlines small artifacts in the database and stores large artifacts on the local filesystem. - -``` -[cache]mode = dir, http, sqlite -``` - -### dir - -The path to use for directory-based caching. The path can be: -An absolute path in your local file system, such as `/Volumes/mySSD/cache`. -A path relative to your home directory, that uses [tilde (`~`) expansion](https://www.gnu.org/software/bash/manual/html_node/Tilde-Expansion.html). such as `~/local/cache`. -A path that is relative to the root of your Buck project, such as [**`buck-out`**](https://buck.build/files-and-dirs/buck-out.html)**`/cache`**, which is the **default**. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `dir`. - -``` -[cache]dir = buck-out/cache -``` - -### dir_cache_names - -A comma-separated list of names used to configure multiple dir caches. The caches will be used **serially** in the order in which their names are specified here. If an artifact is found further along in the list, an attempt to store it in the caches earlier in the list will be made. In the following example, if the artifact is found in the `warm` cache, it will not be stored in the `local` cache. Note: if `[cache] dir` or `[cache] dir_mode` are found, then Buck will fall back to single dir cache more and `[cache] dir_cache_names` will be completely ignored. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `dir`. - -``` -[cache]mode = dir - dir_cache_names = warm, local -[cache#warm]dir = ~/prefetched_cache - dir_mode = readonly -[cache#local]dir = ~/buck_cache - dir_mode = readwrite -``` - -### dir_max_size - -The maximum cache size for directory-based caching. The default size is unlimited. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `dir`. - -``` -[cache]dir_max_size = 10GB -``` - -### dir_mode - -Dictates if the cache is `readonly`, `passthrough`, or `readwrite` (default) when using directory-based caching. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `dir`. - -``` -[cache]dir_mode = readwrite -``` - -### serve_local_cache - -Make the directory-based cache available to other hosts on the network via Buck's HTTP server (enabled under [`[httpserver]`](https://buck.build/files-and-dirs/buckconfig.html#httpserver)). -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `dir`. - -``` -[cache]serve_local_cache = false -``` - -### served_local_cache_mode - -Dictates if the cache is `readonly` (default) or `readwrite` when [`[cache].serve_local_cache`](https://buck.build/files-and-dirs/buckconfig.html#cache.serve_local_cache) is enabled. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `dir`. - -``` -[cache]served_local_cache_mode = readwrite -``` - -### http_url - -The URL to use to contact the cache when using http-based caching. Buck communicates with the server using a [simple API](https://buck.build/concept/http_cache_api.html). -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_url = http://localhost:8080 -``` - -### http_mode - -Dictates if the cache is `readonly` or `readwrite` (default) when using http-based caching. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_mode = readwrite -``` - -### http_read_headers - -A semicolon-separated set of HTTP headers to use when reading from the cache when using http-based caching. The default is no headers. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_read_headers = User-Agent: buck -``` - -### http_write_headers - -A semicolon-separated set of HTTP headers to use when writing to the cache when using http-based caching. The default is no headers. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_write_headers = Authorization: XXXXXXX; User-Agent: buck -``` - -### http_timeout_seconds - -Dictates the timeout per connection when using http-based caching. It will be the default value for http_connect_timeout_seconds, http_read_timeout_seconds, http_write_timeout_seconds if they're not set. The default is `3`. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_timeout_seconds = 3 -``` - -### http_connect_timeout_seconds - -Dictates the timeout on http connect when using http-based caching. If the value is not set, it will try to use the value set for http_timeout_seconds then use the default value `3`. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_connect_timeout_seconds = 3 -``` - -### http_read_timeout_seconds - -Dictates the timeout on http writes when using http-based caching. If the value is not set, it will try to use the value set for http_timeout_seconds then use the default value `3`. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_read_timeout_seconds = 3 -``` - -### http_write_timeout_seconds - -Dictates the timeout on http reads when using http-based caching. If the value is not set, it will try to use the value set for http_timeout_seconds then use the default value `3`. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_write_timeout_seconds = 3 -``` - -### http_max_concurrent_writes - -The number of writer threads to use to upload to the http cache when using http-based caching. The default is `1`. Note that when using multiple http caches (see below), the writer thread pool is shared between them all. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_max_concurrent_writes = 1 -``` - -### http_writer_shutdown_timeout_seconds - -The length of time to wait after the build completes for any remaining http cache uploads to complete before forcefully shutting down the writer thread pool when using http-based caching. The default is `1800` (30 minutes). -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http`. - -``` -[cache]http_writer_shutdown_timeout_seconds = 1800 -``` - -### http_error_message_format - -This setting allows for the customization of how http cache errors appear to the user. If the text `{cache_name}` is present, it will be replaced with the name of the cache. If the text `{error_message}`, it will be replaced with the error message. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_error_message_format = The cache named {cache_name} encountered an error: {error_message} -``` - -### http_error_message_limit - -This setting allows to set after how many errors Buck will print the `http_error_message_format`. Every time it prints it the counter resets to 0 to avoid spamming the console. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_error_message_limit = 100 -``` - -### http_max_store_attempts - -Maximum number of times to attempt to store item to the cache before giving up. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_max_store_attempts = 1 -``` - -### http_store_retry_interval_millis - -Interval to wait if previous cache store request failed. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_store_retry_interval_millis = 1000 -``` - -### http_max_store_size - -The max size in bytes that an artifact can be to get pushed to an http cache. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_max_store_size = 5000000 -``` - -### http_client_tls_cert - -The path to a PEM encoded client X.509 TLS certificate that should be used for any HTTP requests to a remote cache. This operates on both read and write connections. -This can be useful within a server to restrict access to a write path, log which users are writing which artifacts, and generally authenticate cache clients. -**Note:** `http_client_tls_key` must be set for this setting to be used. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_client_tls_cert = /etc/pki/client.crt -``` - -### http_client_tls_key - -The path to a PEM encoded PKCS#8 key that should be used for any HTTP requests to a remote cache. This operates on both read and write connections. -This can be useful within a server to restrict access to a write path, log which users are writing which artifacts, and generally authenticate cache clients. -**Note:** `http_client_tls_cert` must be set for this setting to be used. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `http` or `thrift_over_http`. - -``` -[cache]http_client_tls_key = /etc/pki/client.key -``` - -### hybrid_thrift_endpoint - -The HTTP endpoint to call if using [Thrift over HTTP Cache API](https://buck.build/concept/http_cache_api.html#thrift_http). -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `thrift_over_http`. - -``` -[cache]hybrid_thrift_endpoint = /hybrid_endpoint -``` - -### sqlite_inlined_size - -The maximum size for artifacts to be inlined. The default size is 40B. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `sqlite`. - -``` -[cache]sqlite_inlined_size = 10kB -``` - -### sqlite_max_size - -The maximum cache size for SQLite-based caching. The default size is unlimited. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `sqlite`. - -``` -[cache]sqlite_max_size = 10GB -``` - -### sqlite_mode - -Dictates if the cache is `readonly`, `passthrough` or `readwrite` (default) when using SQLite-based caching -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `sqlite`. - -``` -[cache]sqlite_mode = readwrite -``` - -### sqlite_cache_names - -A comma-separated list of names used to configure multiple SQLite caches. The caches will be used **serially** in the order in which their names are specified here. If an artifact is found further along in the list, an attempt to store it in the caches earlier in the list will be made. In the following example, if the artifact is found in the `warm` cache, it will not be stored in the `local` cache. -[`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode) must contain `sqlite`. - -``` -[cache]mode = sqlite - sqlite_cache_names = warm, local -[cache#warm]sqlite_mode = readonly -[cache#local]sqlite_mode = readwrite -``` - -### two_level_cache_enabled - -Have the Buck client perform 2-level stores and lookups on the artifacts. Every cache operation consists of 2 steps: content hash-based and RuleKey-based. This makes it easier to reuse locally cached artifacts across different buck versions at the expense of higher latencies in the case where artifacts are not present in the local cache. - -``` -[cache]two_level_cache_enabled = false -``` - -### two_level_cache_minimum_size - -When performing a store artifacts smaller than this size will be stored directly, without the content hash redirection. - -``` -[cache]two_level_cache_minimum_size = 1024 -``` - -### two_level_cache_maximum_size - -When performing a store artifacts bigger than this size will be stored directly, without the content hash redirection. - -``` -[cache]two_level_cache_maximum_size = 1024 -``` - -### action_graph_cache_check_enabled - -It enables an integrity checking mechanism in the action graph cache that compares the a newly generated action graph with the one already in the cache in the case of a cache hit. If the graphs do not match the build is stopped and the mismatching rules are printed and logged. - -``` -[cache]action_graph_cache_check_enabled = false -``` - -### max_action_graph_cache_entries - -Sets the maximum number of action graphs to cache. After this number, the least-recently-used graph will be evicted. Defaults to 1. - -``` -[cache]max_action_graph_cache_entries = 3 -``` - -### load_balancing_type - -Decides whether the distributed cache connects to a single URL or it has a pool of servers and chooses which one to use based on client side load balancing. NOTE: 'slb_*' configs only apply when CLIENT_SLB is enabled. - -``` -[cache]load_balancing_type = SINGLE_SERVER, CLIENT_SLB -``` - -### slb_server_pool - -A comma separated list of server URLs of valid servers. The client side load balancer will try to pick the best server to connect to for every single connection. - -``` -[cache]slb_server_pool = http://my.server.one/,http://my.server.two -``` - -### slb_ping_endpoint - -The client side load balancer will use this endpoint to check whether the server is in healthy state or not. It will also be used to measure request latency. - -``` -[cache]slb_ping_endpoint = /ping.php -``` - -### slb_health_check_internal_millis - -The timeout in milliseconds between two consecutive client side load balancer health checks to the slb_server_pool. - -``` -[cache]slb_health_check_internal_millis = 1000 -``` - -### slb_timeout_millis - -The connection timeout per health request made to each of the slb_server_pool servers. Any server that fails to respond within this period will be deemed unhealthy and not be used for cache requests. - -``` -[cache]slb_timeout_millis = 1000 -``` - -### slb_error_check_time_range_millis - -The error rate to each individual server taking part in the slb_server_pool will be measured in the time range/window specified by this config. In different words, 'errors per second' is computed only for the last slb_error_check_time_range_millis. - -``` -[cache]slb_error_check_time_range_millis = 300000 -``` - -### slb_max_error_percentage - -The max error percentage allowed within the last slb_error_check_time_range_millis that is acceptable to keep a particular server marked as healthy and usable by the load balancer. Expects a float value in the interval [0, 1]. - -``` -[cache]slb_max_error_percentage = 0.1 -``` - -### slb_latency_check_time_range_millis - -The latency to each individual server taking part in the slb_server_pool will be measured in the time range/window specified by this config. In different words, 'server latency' is computed only for the last slb_latency_check_time_range_millis. - -``` -[cache]slb_latency_check_time_range_millis = 300000 -``` - -### slb_max_acceptable_latency_millis - -If the latency of a ping request to a server in slb_server_pool is higher than this, the server is deemed unhealthy and not used for cache operations. - -``` -[cache]slb_max_acceptable_latency_millis = 1000 -``` - -## [client] - -This section includes settings that provide information about the caller. Although these can be specified in `.buckconfig`, in practice, they are specified exclusively on the command line: - -``` -$ buck --config client.id=tool-making-this-buck-invocation build buck -``` - -### id - -It is good practice for tools that call Buck to identify themselves via `--config client.id=`. This makes it easier for developers to audit the source of Buck invocations that they did not make directly. -Note that the value of `client.id` is not factored into a build rule's cache key. It is purely for auditing purposes. - -### skip-action-graph-cache - -When Buck is run as a daemon, it caches the last Action Graph it used for a build so that if the next build identifies the same set of targets, the [possibly expensive] Action Graph construction step can be avoided. Because only the last Action Graph is cached, it may be costly to interleave a small build job among a series of incremental builds of an expensive rule: - -``` -$ buck build //big:expensive-rule # Initial Action Graph. -$ buck build //big:expensive-rule # Action Graph is reused. -$ buck build //library#compilation-database # Evicts costly Action Graph. -$ buck build //big:expensive-rule # Action Graph is rebuilt. -``` - -Although this scenario may sound contrived, it is very common when other tools may also be running `buck build` in the background. Work done by IDEs and linters frequently fall into this category. In this case, the best practice is to add `--config client.skip-action-graph-cache=true` for any sort of "one-off" build for which the cost of caching the Action Graph for the new build likely outweighs the benefit of evicting the Action Graph from the previous build. As this is commonly the case for tools, this flag is frequently used in concert with `--config client.id`: - -``` -$ buck build //big:expensive-rule # Initial Action Graph. -$ buck build //big:expensive-rule # Action Graph is reused. -$ buck build \ # Cached Graph is unaffected.--config client.skip-action-graph-cache=true \ - --config client.id=nuclide \ - //library#compilation-database -$ buck build //big:expensive-rule # Action Graph is reused. -``` - -## [color] - -This section configures colored output of Buck. - -### ui - -Enables (default) or disables colorized output in the terminal. - -``` -[color]ui = true -``` - -## [credentials] - -This section configures credentials to be used when fetching from authenticated Maven repositories via HTTPS. -For a repository `repo` appearing in [`[maven_repositories]`](https://buck.build/files-and-dirs/buckconfig.html#maven_repositories), Buck reads the values of `repo_user` and `repo_pass` in this section (if present), and passes them to the server using [basic access authentication](https://en.wikipedia.org/wiki/Basic_access_authentication#Client_side) when fetching. -Note that authenticating in this way over plain HTTP connections is disallowed and will result in an error. - -``` -[maven_repositories]repo = https://example.com/repo[credentials]repo_user = joeuser - repo_pass = hunter2 -``` - -## [cxx] - -This section configures the paths to the C and C++ toolchains' binaries and the default flags to pass to all invocations of them. - -#### C/C++ platform flavors in `.buckconfig` - -Buck enables you to create additional platform *flavors* for C/C++ in `.buckconfig`. A platform flavor groups together a set of configuration parameters, which you can then reference at build time. -To create a new C/C++ platform flavor, add a section with the header - -``` -[cxx#**flavor**] -``` - -to `.buckconfig`. -If you invoke Buck with the specified *flavor* appended to the [build target](https://buck.build/concept/build_target.html), Buck uses the values in this section instead of those in `[cxx]`. For example, to build with the values in `[cxx#my-custom-flavor]` instead of `[cxx]`, you could invoke Buck using the following command: - -``` -$ buck build app#my-custom-flavor -``` - -You can also use these platform flavors, in the `platform_*` arguments of the [`cxx_binary`](https://buck.build/rule/cxx_binary.html) and [`cxx_library`](https://buck.build/rule/cxx_library.html) rules. -The [Buck sample for C++](https://github.com/fbsamples/bucksamples/tree/master/hello-buck-cxx) demonstrates how to use a custom platform flavor. - -### cpp - -The path to the C preprocessor. - -``` -[cxx]cpp = /usr/bin/gcc -``` - -### cc - -The path to the C compiler. - -``` -[cxx]cc = /usr/bin/gcc -``` - -### ld - -The path to the C/C++ linker driver. - -``` -[cxx]ld = /usr/bin/g++ -``` - -### linker_platform - -The platform for the linker. Normally this is autodetected based on the system, but it useful to set when cross compiling. Valid values are: - -* `DARWIN` -* `GNU` -* `WINDOWS` - -``` -[cxx]linker_platform = DARWIN -``` - -### cxxpp - -The path to the C++ preprocessor. - -``` -[cxx]cxxpp = /usr/bin/g++ -``` - -### cxx - -The path to the C++ compiler. - -``` -[cxx]cxx = /usr/bin/g++ -``` - -### aspp - -The path to the assembly preprocessor. - -``` -[cxx]aspp = /usr/bin/gcc -``` - -### as - -The path to the assembler. - -``` -[cxx]as = /usr/bin/as -``` - -### ar - -The path to the archiver. - -``` -[cxx]ar = /usr/bin/ar -``` - -### archiver_platform - -The platform for the archiver. Normally this is autodetected based on the system, but it useful to set when cross compiling. Valid values are: - -* `LINUX` -* `MACOS` -* `FREEBSD` -* `WINDOWS` - -``` -[cxx]archiver_platform = MACOS -``` - -### cppflags - -The flags to pass to the C preprocessor. - -``` -[cxx]cppflags = -Wall -``` - -### cflags - -The flags to pass to the C compiler and preprocessor. - -``` -[cxx]cflags = -Wall -``` - -### ldflags - -The flags to pass to the linker. - -``` -[cxx]ldflags = --strip-all -``` - -### cxxppflags - -The flags to pass to the C++ preprocessor. - -``` -[cxx]cxxppflags = -Wall -``` - -### cxxflags - -The flags to pass to the C++ compiler and preprocessor. - -``` -[cxx]cxxflags = -Wall -``` - -### asppflags - -The flags to pass to the assembly preprocessor. - -``` -[cxx]asppflags = -W -``` - -### asflags - -The flags to pass to the assembler and assembly preprocessor. - -``` -[cxx]asflags = -W -``` - -### arflags - -The flags to pass to the archiver. - -``` -[cxx]arflags = -X32_64 -``` - -### ranlibflags - -The flags to pass to the archive indexer. - -``` -[cxx]ranlibflags = --plugin someplugin -``` - -### gtest_dep - -The [build rule](https://buck.build/concept/build_rule.html) to compile the [Google Test](https://github.com/google/googletest) framework. - -``` -[cxx]gtest_dep = //third-party/gtest:gtest -``` - -If you had your Google Test code in `third-party/gtest/`, the [build file](https://buck.build/concept/build_file.html) in that directory would look something like this: - -``` -cxx_library( - name = 'gtest', - srcs = ['googletest/src/gtest-all.cc','googlemock/src/gmock-all.cc','googlemock/src/gmock_main.cc',], - header_namespace = '', - exported_headers = subdir_glob([('googletest/include', '**/*.h'),('googlemock/include', '**/*.h'),]), - headers = subdir_glob([('googletest', 'src/*.cc'),('googletest', 'src/*.h'),('googlemock', 'src/*.cc'),('googlemock', 'src/*.h'),]), - platform_linker_flags = [('android', []),('', ['-lpthread']),], - visibility = ['//test/...',],) -``` - -### untracked_headers - -How to handle header files that get included in a preprocessing step, but which aren't explicitly owned by any dependencies. By default, Buck sandboxes headers into symlink trees, but file relative inclusion and explicit preprocessor flags can still cause untracked headers to get pulled into the build which can break caching. - -* `ignore` (default): Untracked headers are allowed in the build. -* `warn`: Print a warning to the console when an untracked header is used. -* `error`: Fail the build when an untracked header is used. - -``` -[cxx]untracked_headers = error -``` - -### untracked_headers_whitelist - -A list of regexes which match headers to exempt from untracked header verification. - -``` -[cxx]untracked_headers_whitelist = /usr/include/.*, /usr/local/include/.* -``` - -### should_remap_host_platform - -Specifies whether the `default` flavor should be remapped to the value of the [`[cxx].host_platform`](https://buck.build/files-and-dirs/buckconfig.html#cxx.host_platform) configuration parameter. - -``` -[cxx] - should_remap_host_platform = true -``` - -Default is `false`. -Because Buck is designed for cross-platform development, Buck normally ignores the host platform when building a target. For example, Buck normally builds the same Linux target irrespective of whether Buck itself is running on, say, Linux or macOS. The `should_remap_host_platform` configuration parameter enables you to change Buck's behavior so that Buck's target platform is the host platform on which Buck is running. - -### host_platform - -Specifies the host platform to use if [`[cxx].should_remap_host_platform`](https://buck.build/files-and-dirs/buckconfig.html#cxx.should_remap_host_platform) is `true`. -The value that you specify could be one of Buck's internal platform flavors, such as `linux-x86_64` or `macosx-x86_64`: - -``` -[cxx] - host_platform = linux-x86_64 -[cxx] - host_platform = macosx-x86_64 -``` - -or the value could be a custom platform flavor: - -``` -[cxx] - host_platform = my-custom-flavor -``` - -If `[cxx].should_remap_host_platform` is `true`, but `host_platform` is unspecified, then Buck infers the host platform from the local computer to be one of the following values: - -* `linux-x86_64` (Linux) -* `macosx-x86_64` (macOS) -* `freebsd-x86_64` (FreeBSD) -* `windows-x86_64` (Windows) - -If `[cxx].should_remap_host_platform` is unset—or explicitly set to `false`—then Buck ignores the value of `host_platform` . - -### default_platform - -Override the default platform for build rules. - -``` -[cxx]default_platform = iphonesimulator-x86_64 -``` - -### pch_enabled - -Whether prefix headers used by a [`cxx_library`](https://buck.build/rule/cxx_library.html) or other such build rule's `prefix_header` parameter should be separately precompiled, and used in that rule's build. -If this is disabled, the prefix header is included as-is, without precompilation. -Default is `true`. - -``` -[cxx]pch_enabled = false -``` - -### link_weight - -The number of jobs that each C/C++ link rule consumes when running. By default, this is `1`, but this can overridden to change how many link rules can execute in parallel for a given `-j` value. This is useful for builds with large I/O intensive static links where using a lower `-j` value is undesirable (since it reduces the parallelism for other build rule types). - -``` -[cxx]link_weight = 3 -``` - -### cache_links - -C/C++ link rules are cached by default. However, static C/C++ link jobs can take up lots of cache space and also get relatively low hit rates, so this config option provides a way to disable caching of all C/C++ link rules in the build. - -``` -[cxx]cache_links = false -``` - -### default_reexport_all_header_dependencies - -Default value used for [`reexport_all_header_dependencies`](https://buck.build/rule/cxx_library.html#reexport_all_header_dependencies), when it's undefined on the build rule. - -``` -[cxx]default_reexport_all_header_dependencies = true -``` - -### shlib_interfaces - -When linking a executable or shared library, any dependencies that build shared libraries are normally added to the link line. If this option is set, Buck will use shared library interfaces for these dependencies instead of full shared libraries. Shared library interfaces are a subset of the original shared library, removing parts of the shared library (e.g. the `.text` segment for ELF) which are typically unused used when this library is being linked against. Using shared library interfaces can allow Buck's input-based rule keys to avoid potentially unnecessary re-links (see `CxxSharedLibraryInterfaceIntegrationTest` for examples). - -``` -[cxx]shlib_interfaces = enabled -``` - -### independent_shlib_interfaces - -Normally, a shared library interface for a rule is generated using it's shared library. Since linking a rule's shared library requires the shared library interfaces for all dependencies be built, this means that dynamic linking has inherent non-parallelism, due to this build dependency tree. When this option is set, Buck will build shared library interfaces independent of the rule's shared library (e.g. by linking it's own shared library without any dependency shared libraries), allowing all shared library interfaces to be built in parallel, and therefore also allowing subsequent shared libraries to be built in parallel. - -``` -[cxx]independent_shlib_interfaces = true -``` - -## [d] - -This section configures how code written in D is compiled. - -### base_compiler_flags - -Flags to pass to every invocation of the D compiler. This is a space-separated list. It defaults to an empty list. - -``` -[d]base_compiler_flags = -I/some/path -g -O3 -``` - -### compiler - -Path to the D compiler. If this parameter is not specified, Buck attempts to find the D compiler automatically. - -``` -[d]compiler = /opt/dmd/bin/dmd -``` - -### library_path - -Directories to be searched for the D runtime libraries. This is a colon-separated list. If this parameter is not specified, Buck attempts to detect the location of the libraries automatically. - -``` -[d]library_path = /usr/local/lib:/opt/dmd/lib -``` - -### linker_flags - -Flags to pass to the linker when linking D code into an executable. This is a space-separated list. If omitted, this value is constructed from d.library_path. - -``` -[d]linker_flags = "-L/path to phobos" -lphobos2 -``` - -## [doctor] - -This section defines variables that are associated with command `doctor`. - -### protocol - -The protocol of communication, it can be either simple or JSON. - -``` -[doctor]protocol = json -``` - -### endpoint_url - -The address of the remote endpoint that the request will go. This needs to be defined in order for the command to work. - -``` -[doctor]endpoint_url = http://localhost:4545 -``` - -### endpoint_timeout_ms - -The timeout in milliseconds before giving up contacting the analysis endpoint. - -``` -[doctor]endpoint_timeout_ms = 15 -``` - -### endpoint_extra_request_args - -This sections of keys and values is added as parameters to the POST request send to the doctor remote endpoint. - -``` -[doctor]endpoint_extra_request_args = ref=>1245,token=>42 -``` - -### report_upload_path - -The address of the remote endpoint the report will be uploaded. - -``` -[doctor]report_upload_path = http://localhost:4546 -``` - -### report_max_size - -The maximum size that the report endpoint can handle before giving up and storing it only locally. - -``` -[doctor]report_max_size = 512MB -``` - -### report_timeout_ms - -The timeout in milliseconds before giving up contacting the report endpoint. - -``` -[doctor]report_timeout_ms = 15 -``` - -### report_max_upload_retries - -Times to try to upload to the report endpoint. - -``` -[doctor]report_max_upload_retries = 2 -``` - -### report_extra_info_command - -An extra command that the report should and attach the information to the uploaded report. - -``` -[doctor]report_extra_info_command = /custom/script/to/run.sh -``` - -## [download] - -This section configures downloading from the network during [`buck fetch`](https://buck.build/command/fetch.html). - -### proxy - -Buck will attempt to fetch files from the network, however, if you happen to be behind a] firewall, this may not work correctly. You can supply a proxy when downloading from HTTP[S] servers with these three settings. Valid types for `proxy_type` are `HTTP` (default) and `SOCKS`. These values correspond to [Java's Proxy.Type](http://docs.oracle.com/javase/8/docs/api/java/net/Proxy.Type.html). - -``` -[download]proxy_host=proxy.example.com - proxy_port=8080 - proxy_type=HTTP -``` - -### maven_repo - -If a remote file's URL starts with `mvn:`, that file (usually a jar) is supposed to come from a maven repo. You can specify the repo to download from here, or by setting one or more repositories in [`[maven_repositories]`](https://buck.build/files-and-dirs/buckconfig.html#maven_repositories). - -``` -[download]maven_repo = https://repo1.maven.org/maven2 -``` - -### max_number_of_retries - -In case buck is unable to download a file, it will retry specified number of times before giving up. By default it's not set, so Buck is not going to retry failed downloads. - -``` -[download]max_number_of_retries = 3 -``` - -### in_build - -If true, allow downloads to be part of the build process. If false, buck build / run / test will require the user to run 'buck fetch' first. This generally should not be changed, to avoid surprising users with unexpected build times, when the cause is mostly download times. By default this set to false. - -``` -[download]in_build = true -``` - -## [dx] - -This section controls how Buck invokes the dx tool. - -### threads - -Fixed number of threads to run dexing steps with. If not specified, the optimal number is inferred from hardware specification of running machine. - -``` -[dx]threads = 4 -``` - -### max_threads - -The maximum number of threads allowed to run the dexing steps with. Since the dexing steps can use a lot of memory, it might be useful to set this to a lower value to avoid out-of-memory on systems that have a lot of CPU cores. This parameter is mostly useful when [`[dx].threads`](https://buck.build/files-and-dirs/buckconfig.html#dx.threads) is not specified and the number of threads is obtained based on hardware. - -``` -[dx]max_threads = 8 -``` - -### max_heap_size - -This option specifies how much memory is available when running dx out of process. - -``` -[dx]max_heap_size = 2g -``` - -## [export_file] - -This section configures behavior of `export_file` build rule. - -### input_directory_action - -Defines the behavior of `export_file` when input of a build rule is a directory. Support for directories will be removed soon and this option provides a way to migrate a project to a state when none of the `export_file` rules use directories as inputs. -The valid values are: - -* `allow` (default): directories are allowed and no action is taken, -* `warn`: emit a warning to the console, -* `fail`: fail the build. - -``` -[export_file]input_directory_action = fail -``` - -## [go] - -This section defines the Go toolchain. By default Buck will try to discovery the Go compiler and linker from the `go` tool found in your `PATH`. - -### root - -If you have a non-standard Go install, you will need to set the Go root. The root should contain `pkg` and `bin` directories. - -``` -[go]root = /opt/golang/libexec -``` - -### prefix - -For interoperability with the go tool, you may specify a prefix for your default package names. - -``` -[go]prefix = github.com/facebook/buck -``` - -### tool - -You can specify the path to find the `go` tool. This in turn will allow Buck to discover the compiler/linker by default. This defaults to `${go.root}/bin/go`. - -``` -[go]tool = /usr/local/bin/go -``` - -### compiler - -The full path to the Go compiler. This is normally automatically discovered. - -``` -[go]compiler = /usr/local/libexec/go/pkg/tool/darwin_amd64/compile -``` - -### assembler - -The full path to the Go assembler. This is normally automatically discovered. - -``` -[go]assembler = /usr/local/libexec/go/pkg/tool/darwin_amd64/asm -``` - -### packer - -The full path to the Go packer. This is normally automatically discovered. - -``` -[go]packer = /usr/local/libexec/go/pkg/tool/darwin_amd64/pack -``` - -### linker - -The full path to the Go linker. This is normally automatically discovered. - -``` -[go]linker = /usr/local/libexec/go/pkg/tool/darwin_amd64/link -``` - -### vendor_path - -A list of colon (:) separated list of directories to include for including in the importmap for Go dependencies. Packages in these directories are allowed to be imported given just the relative path to the package. This is similar to how 'vendor' directories work. e.g you can use `import golang.org/x/net` for a package that lives in`/golang.org/x/net`. - -``` -[go]vendor_path = third-party/go -``` - -### project_path - -You can specify the path where `buck project` will store dynamically generated files (ex. genrule). This is extension to `$GOPATH`, particularly usefully while working with native go toolchain or IDE's. - -``` -[go]project_path = third-party/go -``` - -## [groovy] - -This section configures the [Groovy](http://groovy-lang.org/) toolchain. - -### groovy_home - -This defines the value of `GROOVY_HOME` that Buck should use. If it is not provided, Buck will use the system's `GROOVY_HOME` by default. - -``` -[groovy]groovy_home = /path/to/groovy_home -``` - -## [halide] - -This section configures the [Halide](http://halide-lang.org/) platform mappings and toolchain. - -### target - -This defines the C++ platform flavor to Halide target mapping. Each key should begin with the prefix `target_`, followed by the flavor name. The corresponding value should be the Halide target string to use when building for that flavor. - -``` -[halide]target_iphonesimulator-x86_64 = x86-64-osx - target_iphoneos-arm64 = arm-64-ios -``` - -### xcode_compile_script - -The optional path to a shell script which should be used for invoking the Halide AOT "compiler" when building projects that include Halide targets in Xcode. - -``` -[halide]xcode_compile_script = //path/to/script.sh -``` - -## [httpserver] - -Option to enable an experimental web server that presents a UI to explore build data. Note that Buck must be run as a daemon in order for the web server to be available. - -### port - -This sets the port to use for the web server. There are three possible values: - -* `n > 0`: For any positive integer, Buck will attempt to make the server available on that port. -* `0`: Buck will find a free port for the server to use and print it out on the command line. -* `-1`: Explicitly disables the server. - -``` -[httpserver]port = 8080 -``` - -## [incompatible] - -This section controls features of buck that are in the process of being deprecated. - -## [intellij] - -This section configures a project generated for IntelliJ IDEA by `buck project` command. - -### default_android_manifest_path - -The default manifest file that should be used in Android IntelliJ modules when buck cannot detect the correct manifest to use. - -``` -[intellij]default_android_manifest_path = shared/AndroidManifest.xml -``` - -### jdk_name - -IntelliJ project SDK name. - -``` -[intellij]jdk_name = Java SDK 1.6 -``` - -### jdk_type - -IntelliJ project SDK type. - -``` -[intellij]jdk_type = Android SDK or JavaSDK -``` - -### android_module_sdk_type - -Default Android SDK type for android modules. - -``` -[intellij]android_module_sdk_type = Android SDK -``` - -### android_module_sdk_name - -Default Android SDK name for android modules. - -``` -[intellij]android_module_sdk_name = Android API 23 Platform -``` - -### java_module_sdk_type - -SDK type for Java modules. - -``` -[intellij]java_module_sdk_type = JavaSDK -``` - -### java_module_sdk_name - -SDK name for Java modules. - -``` -[intellij]java_module_sdk_name = 1.8 -``` - -### default_min_android_sdk_version - -Default minimum Android SDK version supported for this project. Overwritten by min SDK version if specified in target's AndroidManifest.xml. - -``` -[intellij]default_min_android_sdk_version = 9 -``` - -### generated_sources_label_map - -Allows adding folders with generated source code to IntelliJ project. These folders are added when a target has a label specified in this option. In the example below, if target `//app/target` has label `generated_code1` folder `buck-out/gen/app/lib/__lib_target1__` will be added to IntelliJ project. - -``` -[intellij]generated_sources_label_map = generated_code_1 => __%name%_target1__, - generated_code2 => __%name%_target2__ -``` - -### include_transitive_dependencies - -Add transitive dependencies as RUNTIME library. - -``` -[intellij]include_transitive_dependencies = false -``` - -### module_group_name - -Specify module group name when grouping modules. If it is set to '', modules are not grouped. - -``` -[intellij]module_group_name = modules -``` - -### remove_unused_libraries - -Removes unused libraries from .idea/libraries. - -``` -[intellij]remove_unused_libraries = true -``` - -### aggregate_android_resource_modules - -Forces `buck project` to aggregate modules with Android resources. This aggregation is performed only if aggregation mode is not `none`. -**Note:** using this type of aggregation disables Android layout editor provided by Android plugin. The layout files can still be edited using the XML editor. - -``` -[intellij]aggregate_android_resource_modules = true -``` - -### android_resource_module_aggregation_limit - -The maximum number of targets that can be aggregated into one module with Android resources. This limit is a workaround to avoid a problem when Android plugin cannot operate on modules with a big number of resource folders. - -``` -[intellij]android_resource_module_aggregation_limit = 1000 -``` - -### project_compiler_output_url - -The output directory for IntelliJ's builds. - -``` -[intellij]project_compiler_output_url = intellij-out/classes -``` - -### extra_compiler_output_modules_path - -This option specifies the location of additional modules for code generated outside of buck graph. For example, it can be used to specify the location of R.java classes generated for Android plugin to help Layout Preview with resolving references to resources. - -``` -[intellij]extra_compiler_output_modules_path = buck-out/extra-intellij-output -``` - -## [java] - -This section configures the Java toolchain. - -### src_roots - -The paths to roots of Java code (where a root contains a tree of Java folders where the folder structure mirrors the package structure). This list of paths is comma-delimited. Paths that start with a slash are relative to the root of the project, and all other paths can match a folder anywhere in the tree. In the example below, we match all folders named `src`, and `java` and `javatests` at the root of the project. - -``` -[java]src_roots = src, /java/, /javatests/ -``` - -### extra_arguments - -A comma-delimited list of flags to pass the Java compiler. - -``` -[java]extra_arguments = -g -``` - -### source_level - -The default version of Java for source files. Also defines the project language level in IntelliJ. - -``` -[java]source_level = 7 -``` - -### target_level - -The default version of Java for generated code. - -``` -[java]target_level = 7 -``` - -### skip_checking_missing_deps - -Buck will attempt to analyze build failures and suggest dependencies that might not be declared in order to fix the failure. On large projects, this can be slow. This setting disables the check. - -``` -[java]skip_checking_missing_deps = false -``` - -### jar_spool_mode - -Specifies how the compiler output to the `.jar` file should be spooled. The valid modes are: - -* `intermediate_to_disk` (default): writes the intermediate `.class` files from the compiler output to disk. They are then packed into a `.jar`. -* `direct_to_jar`: compiler output will be directly written to a `.jar` file with the intermediate `.class` files held in memory. The compiler output will still be written to disk if there are any post-processing commands specified during the build. - -``` -[java]jar_spool_mode = intermediate_to_disk -``` - -### abi_generation_mode - -Specifies how Buck should create ABI jars when computing [ABI rule keys](https://buck.build/concept/rule_keys.html#abi_rule_keys). Values other than `class` may not be suitable for all rules; this setting may be overridden on a per-rule basis using the `abi_generation_mode` parameter on each rule. -The valid modes are: - -* `class` (default): creates an ABI jar for each library by first building the library and then stripping out any information that is not part of the interface (such as method bodies and private members). - -`source`: creates an ABI jar for each library in the process of building the library, via a plugin to the Java compiler. This improves build times by allowing each library's dependents to start building before the library is done building. -Implies `jar_spool_mode = direct_to_jar`. -`source_only`: creates an ABI jar for each library by referencing only the source code of the library, without considering (most of) its dependencies. This can drastically improve build times, especially in larger apps or in build environments with a large number of cores by allowing all ABI jars to be built in parallel, and then all library jars to be built in parallel (up to the available parallelism in the build environment). Additionally, in environments with network-based caches it can reduce the number of calls to the cache required for each build. -Requires some changes to how Java code is written. To migrate, first do some builds in `migrating_to_source_only` mode, using [`buck fix`](https://buck.build/command/fix.html) to fix any issues encountered. Once migrated, errors will still be encountered from time to time when new code does not meet the requirements of this mode.[`buck fix`](https://buck.build/command/fix.html) can be used to address these. -When building with `source_only`, using [`buck build`](https://buck.build/command/build.html)` --keep-going` is recommended since some errors that occur when building an ABI jar will actually have their root cause in another rule that builds later. -Read more about source-only ABIs [here](https://buck.build/concept/java_abis.html). -`migrating_to_source_only`: used when migrating from `source` to `source_only`. Acts like `source`, but issues warnings (in`buck.log`, not to the console) for any Java code that would cause errors under`source_only`. [`buck fix`](https://buck.build/command/fix.html) can be used to fix most such warnings. - -``` -[java]abi_generation_mode = source -``` - -### unused_dependencies_action - -Action performed when Buck detects that some dependencies are not used during Java compilation. -Note that this feature is experimental and does not handle run-time dependencies. -The valid values are: - -* `ignore` (default): ignore unused dependencies, -* `warn`: emit a warning to the console, -* `fail`: fail the compilation. - -``` -[java]unused_dependencies_action = warn -``` - -### duplicates_log_level - -Verbosity of logs emitted on duplicates when building binary. -The valid values are: - -* `info` (default): emit an info to the console, -* `warn`: emit a warning to the console, -* `fine`: emit a fine info to the console, visible only at high verbosity levels. - -``` -[java]duplicates_log_level = info -``` - -## [kotlin] - -This section configures various aspects of the [Kotlin](https://kotlinlang.org/) toolchain. - -### kotlinc - -The path to the `kotlinc` compiler executable to use when external compilation is forced. This setting has no effect by itself and must be paired with the [`[kotlin].external`](https://buck.build/files-and-dirs/buckconfig.html#kotlin.external) setting. - -``` -[kotlin]kotlinc = /usr/local/bin/kotlinc -``` - -### external - -Forces external compilation via `kotlinc`. When external compilation is forced the following heuristics are used to locate the `kotlinc` executable: - -* If the [`[kotlin].kotlinc`](https://buck.build/files-and-dirs/buckconfig.html#kotlin.kotlinc) setting is specified, the executable specified by that path will be used. -* If the [`[kotlin].kotlin_home`](https://buck.build/files-and-dirs/buckconfig.html#kotlin.kotlin_home) path setting is specified, Buck will look for a `bin` directory under that path for an executable named `kotlinc`. -* If a `KOTLIN_HOME` environment variable is present, Buck will look for a `bin` directory under that path for an executable named `kotlinc`. -* Lastly, if none of the above are specified, Buck will look for the `kotlinc` executable in the paths listed in the `PATH` environment variable. - -Defaults to `false`. - -``` -[kotlin]external = true -``` - -### kotlin_home - -The path to the Kotlin root folder, typically the installation folder, where various Kotlin assets (executables and JAR files) can be found. This path is used in the following ways: - -* When in-memory compilation is used, the `kotlin-compiler.jar` and other related Kotlin JARs required for compilation are located via this path using the following heuristics: - * The root of the directory specified by this path is searched. - * If there is a `lib` directory under this path, it is searched. - * If there is a `libexec` directory under this path, it is searched. -* If external compilation is called for (see [`[kotlin].external`](https://buck.build/files-and-dirs/buckconfig.html#kotlin.external)), a `bin` directory under this directory will be searched to locate the `kotlinc` executable. - -If this setting is not specified, the location of the Kotlin home directory can be specified via the `KOTLIN_HOME` environment variable. If neither the [`[kotlin].kotlin_home`](https://buck.build/files-and-dirs/buckconfig.html#kotlin.kotlin_home) setting nor the `KOTLIN_HOME` environment variable is specified, Buck will attempt to locate the home directory by searching for the `kotlinc` executable in the paths specified by the `PATH` environment variable. If the `kotlinc` executable is found, Buck assumes that the *parent directory* of that executable is the Kotlin home. - -``` -[kotlin]kotlin_home = /usr/local/Cellar/kotlin/1.1.1 -``` - -## [log] - -This section controls how Buck will log information about builds for later inspection. Settings in this section will appear as features are in the processing of being deprecated, and be removed after features are removed from Buck. - -### max_traces - -Sets the maximum number of [Chrome Traces](https://buck.build/about/performance_tuning.html) that Buck will create. - -``` -[log]max_traces = 25 -``` - -### compress_traces - -`true` if Buck should GZIP the traces, false otherwise. - -``` -[log]compress_traces = true -``` - -### machine_readable_logger_enabled - -`true` if Buck should output to a machine readable log file under name `buck-machine-log`. Log entries are formatted one per line like `< Event type >< space >< JSON >`. - -``` -[log]machine_readable_logger_enabled = true -``` - -### build_details_template - -If provided, Buck prints the specified string at the end of each build. The string `{build_id}` is replaced with the current build ID. This can be helpful to link to external systems that may have more details about the build. - -``` -[log]build_details_template = "Details at https://example.com/builds/{build_id}" -``` - -### build_details_commands - -If [`build_details_template`](https://buck.build/files-and-dirs/buckconfig.html#log.build_details_template) is provided, Buck prints the specified string to the console for each of the specified list of commands. This can be useful for ensuring that users do not have too much information provided, but allows configuring log-heavy environments like CI systems to output more information for commands like 'query'. Default value is build, test, install - -``` -[log]build_details_commands = build, test, install, query, targets -``` - -## [lua] - -This section defines settings relevant to `lua_*` rules. - -### lua - -The path to the Lua interpreter. By default, Buck will search for the binary `lua` in your `PATH`. - -``` -[lua]lua = /usr/bin/lua -``` - -### cxx_library - -The build target of the Lua C library to use to link a standalone interpreter. By default, Buck will use `-llua` from the C/C++ linker's default library search path. - -``` -[lua]cxx_library = //third-party/lua:lua -``` - -### starter_type - -The method for bootstrapping Lua binaries. By default, `native` is chosen if the binary contains native libraries and `pure` is chosen otherwise. - -* `pure`: The binary bootstrap process uses pure Lua code. This method cannot be used if the binary includes native code. -* `native`: The binary bootstrap process links in the Lua C library (specified in [`[lua].cxx_library`](https://buck.build/files-and-dirs/buckconfig.html#lua.cxx_library)) to form a standalone native interpreter. - -``` -[lua]starter_type = pure -``` - -### native_starter_library - -A C/C++ library to use as a custom starter for Lua binaries which use the`NATIVE` bootstrap method. The library is expected to define the following function: - -``` -#ifdef __cplusplusextern "C"#endifint run_starter(int argc,const char **argv,const char *main_module,const char *modules_dir,const char *extension_suffix); -``` - -Where the arguments are as follows: - -* `argc`: The number of command-line arguments. -* `argv`: The array of command-line arguments. -* `main_module`: The name of the binary's main module. -* `modules_dir`: The path, relative the binary, to the modules directory. -* `extension_suffix`: The suffix used for native libraries (e.g. `.so`). - -``` -[lua]native_starter_library = //third-party/lua:starter -``` - -### extension - -The extension to use for Lua binaries. Defaults to `.lex`. - -``` -[lua]extension = .lex -``` - -## [maven_repositories] - -This section defines the set of maven repositories that Buck can use when attempting to resolve maven artifacts. It takes the form of key value pairs of a short name for the repo and the URL. The URL may either be an HTTP(S) URL, or point to a directory on your local disk. - -``` -[maven_repositories]central = https://repo1.maven.org/maven2 - m2 = ~/.m2/repository -``` - -Note that if you are using Buck to talk to Maven and you are using IPv6, you might need to [add the following option to your `.buckjavaargs` file](https://buck.build/files-and-dirs/buckjavaargs.html): - -``` --Djava.net.preferIPv6Addresses=true -``` - -## [ndk] - -This section defines properties to configure building native code against the Android NDK. - -### ndk_version - -The version of the NDK that Buck should use to build native code. Buck searches for this version in the subdirectories beneath the directory specified by either the `ANDROID_NDK_REPOSITORY` environment variable or the value of the [`[ndk].ndk_repository_path`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_repository_path) property. Buck prefers an exact match, and otherwise accepts a prefix match. -NDKs with a version prior to `r11` store their version in the file `RELEASE.TXT`. For example, in version r10c this file contains `r10c (64-bit)`. In this case, you would use `r10c` for the value of `ndk_version`. - -``` -[ndk] - ndk_version = r10c -``` - -NDKs with a version after `r11` use a different format for their version and store their version in the `Pkg.Revision` property of the file `source.properties`. For example, this is the content of that file for version r13b: - -``` -Pkg.Desc = Android NDKPkg.Revision = 13.1.3345770 -``` - -In this case, you would use `13.1.3345770` for the value of `ndk_version`. - -``` -[ndk] - ndk_version = 13.1.3345770 -``` - -### ndk_path - -This specifies an absolute path to the Android NDK. The default is empty. -Setting this property has the same effect as if you had set either of the following environment variables to the same value: - -* `ANDROID_NDK` -* `NDK_HOME` - -Note that Buck gives precedence to the values of these environment variables—in the order in which they are listed above—over the value of this property in `.buckconfig`. - -``` -[ndk]ndk_path = /Library/Android/ndk/r10c -``` - -### ndk_repository_path - -This specifies the absolute path to a directory that contains multiple versions of the Android NDK in subdirectories. The default is empty. -Setting this property has the same effect as if you had set the `ANDROID_NDK_REPOSITORY` environment variable to the same value. However, Buck gives precedence to the value of this environment variables over the value of this property in`.buckconfig`. -Buck selects which NDK to use based on the value of the [`[ndk].ndk_version`](https://buck.build/files-and-dirs/buckconfig.html#ndk.ndk_version) property. Currently, if you do not specify a value for `ndk.ndk_version`, Buck selects the most-recent NDK. However, you should not rely on this behavior as it could change in a future release. - -``` -[ndk]ndk_repository_path = /Library/Android/ndk -``` - -### app_platform - -The android platform libraries that the code is targeting. This is equivalent to the `APP_TARGET` in the NDK build system. The default is `android-16`. - -``` -[ndk]app_platform = android-21 -``` - -### app_platform_per_cpu_abi - -The android platform libraries that the code is targeting, set on a per-CPU ABI basis. This is equivalent to the `APP_TARGET` in the NDK build system. -If no value is set for a particular CPU ABI, the value from [`app_platform`](https://buck.build/files-and-dirs/buckconfig.html#ndk.app_platform) is used as a fallback. - -``` -[ndk]app_platform_per_cpu_abi = arm => android-19, arm64 => android-22 -``` - -### cpu_abis - -A comma separated list of the CPU ABIs that this repo supports. Buck will only build NDK code for these ABIs. - -``` -[ndk]cpu_abis = armv7, x86 -``` - -### compiler - -When compiling [`cxx_library`](https://buck.build/rule/cxx_library.html) rules, this specifies the compiler family to use from the NDK. The possible values are: - -* `gcc` (default): Use the GCC family of compilation tools. -* `clang`: Use the Clang family of compilation tools. - -``` -[ndk]compiler = gcc -``` - -### gcc_version - -When compiling [`cxx_library`](https://buck.build/rule/cxx_library.html) rules, this specifies the version of GCC to use. This will be used regardless of the value in [`[ndk].compiler`](https://buck.build/files-and-dirs/buckconfig.html#ndk.compiler), as other compiler families still use tools from the GCC toolchain (such as `ar`). The default value is `4.8`. - -``` -[ndk]gcc_version = 4.8 -``` - -### clang_version - -When compiling [`cxx_library`](https://buck.build/rule/cxx_library.html) rules, this specifies the version of Clang to use. The default value is `3.4`. - -``` -[ndk]clang_version = 3.4 -``` - -### cxx_runtime - -When compiling [`cxx_library`](https://buck.build/rule/cxx_library.html) rules, this specifies the variant of the [C/C++ runtime](http://www.kandroid.org/ndk/docs/CPLUSPLUS-SUPPORT.html) to use. Possible values are: - -* `gabixx` -* `gnustl` (default) -* `libcxx` -* `stlport` -* `system` - -``` -[ndk]cxx_runtime = gnustl -``` - -### cxx_runtime_type - -When compiling [`cxx_library`](https://buck.build/rule/cxx_library.html) rules, this specifies how libraries are intended to be linked with the runtime. If this is `static`, then the C/C++ runtime library will not be packaged in the APK. Possible values are: - -* `dynamic` (default) -* `static` - -``` -[ndk]cxx_runtime_type = dynamic -``` - -## [ocaml] - -This section configures the paths to the OCaml toolchain's binaries. - -### ocaml.bytecode.compiler - -The path to the [OCaml bytecode compiler (ocamlc)](https://caml.inria.fr/pub/docs/manual-ocaml/native.html). - -``` -[ocaml]ocaml.bytecode.compiler = /usr/local/bin/ocamlc.opt -``` - -### ocaml.compiler - -The path to the [OCaml native-code compiler (ocamlopt)](https://caml.inria.fr/pub/docs/manual-ocaml/comp.html). - -``` -[ocaml]ocaml.compiler = /usr/local/bin/ocamlopt.opt -``` - -### dep.tool - -The path to the [OCaml dependency generator (ocamldep)](https://caml.inria.fr/pub/docs/manual-ocaml/depend.html). - -``` -[ocaml]dep.tool = /usr/local/bin/ocamldep.opt -``` - -### lex.compiler - -The path to the [OCaml lexer generator (ocamllex)](https://caml.inria.fr/pub/docs/manual-ocaml/lexyacc.html#sec296). - -``` -[ocaml]lex.compiler = /usr/local/bin/ocamllex.opt -``` - -### yacc.compiler - -The path to the [OCaml parser generator (ocamlyacc)](https://caml.inria.fr/pub/docs/manual-ocaml/lexyacc.html#sec307). - -``` -[ocaml]yacc.compiler = /usr/local/bin/ocamlyacc -``` - -### debug - -The path to the [OCaml debugger (ocamldebug)](https://caml.inria.fr/pub/docs/manual-ocaml/debugger.html). - -``` -[ocaml]debug = /usr/local/bin/ocamldebug -``` - -### interop.includes - -The path to the OCaml standard library directory (see [Interfacing C with OCaml](https://caml.inria.fr/pub/docs/manual-ocaml/intfc.html)). - -``` -[ocaml]interop.includes = /usr/local/lib/ocaml -``` - -## [parser] - -This section defines settings for the BUCK parser. - -### python_interpreter - -The path to the python interpreter to use for parsing. If not specified, the [`[python].interpreter`](https://buck.build/files-and-dirs/buckconfig.html#python.interpreter) setting is used. - -``` -[parser]python_interpreter = /usr/bin/python -``` - -### python_path - -The `PYTHONPATH` environment variable set for the python interpreter used by the parser to use. By default, this is unset. - -``` -[parser]python_path = /path1:/path2 -``` - -### polyglot_parsing_enabled - -Indicates whether support for experimental polyglot parsing should be enabled. When enabled, build file can have a `# BUILD FILE SYNTAX: `marker followed by one of supported syntax names that include `PYTHON_DSL` and an experimental `SKYLARK`. This flag is disabled by default. - -``` -[parser]polyglot_parsing_enabled = true -``` - -### default_build_file_syntax - -Specifies the default syntax assumed when parsing build files without explicit build syntax marker (`# BUILD FILE SYNTAX: `). This flag is only applicable when`parser.polyglot_parsing_enabled` configuration is set to `true`. By default it's value is set to `PYTHON_DSL`. - -``` -[parser]default_build_file_syntax = SKYLARK -``` - -### disable_implicit_native_rules - -If set, native rules ([`cxx_library`](https://buck.build/rule/cxx_library.html), [`android_library`](https://buck.build/rule/android_library.html), etc) cannot be used in BUCK files. This can be useful if your team has a common set of macros that should be loaded, and one desires a fast-feedback way to make sure that Buck's native rules are not inadvertently used. If set, native rules can only be accessed via the 'native' object within an extension file that is evaluated with [`load()`](https://buck.build/function/load.html) or [`include_defs()`](https://buck.build/function/include_defs.html). This flag is disabled by default (native rules can be used in build files). - -``` -[parser]disable_implicit_native_rules = true -``` - -### warn_about_deprecated_syntax - -If set, warnings about deprecated syntax in BUCK files will be issued. This flag is enabled by default. - -``` -[parser]warn_about_deprecated_syntax = false -``` - -## [project] - -This section defines project-level settings. - -### generate_android_manifest - -Forces Buck to generate "AndroidManifest.xml" files for Android IntelliJ modules. The generated manifests contain package name only to allow Android IntelliJ plugin resolve references to resources correctly. -Manifests are generated for modules that have information about package name and have either none or more than one targets with Android manifests. When a module has exactly one target with Android manifest this manifest is used as a manifest in the module. - -``` -[project]generate_android_manifest = true -``` - -### ide - -Buck attempts to figure out the correct IDE to use based on the type of rule (e.g. for [`apple_library`](https://buck.build/rule/apple_library.html) it will generate Xcode workspace), but for cross-platform libraries (like [`cxx_library`](https://buck.build/rule/cxx_library.html)) it is not possible. This setting lets you specify the default IDE that [`buck project`](https://buck.build/command/project.html) generates. Possible values are: - -* `intellij` -* `xcode` - -``` -[project]ide = xcode -``` - -### glob_handler - -The [`glob()`](https://buck.build/function/glob.html) handler that Buck will use. The possible values are: - -* `python` (default): evaluates globs in the Python interpreter while parsing [build file](https://buck.build/concept/build_file.html)s. -* `watchman`: evaluates the globs with [Watchman](https://facebook.github.io/watchman/), which is generally much faster. - -``` -[project]glob_handler = python -``` - -### allow_symlinks - -If set to `forbid`, Buck will disallow symbolic links to source and `BUCK` files. This allows Buck to enable a number of performance improvements. If set to `allow`, Buck will silently ignore symlinks. -The default value is `warn`. - -``` -[project]allow_symlinks = forbid -``` - -### build_file_search_method - -How Buck finds `BUCK` files. This is used when a [build target pattern contains `/...`](https://buck.build/concept/build_target_pattern.html) and for commands like [`buck project`](https://buck.build/command/project.html). Possible values are: - -* `filesystem_crawl` (default): walk the file system recursively using APIs provided by the operating system. -* `watchman`: query [Watchman](https://facebook.github.io/watchman/) with a glob query like `**/BUCK`. For file systems such as EdenFS, `watchman` can be faster than `filesystem_crawl`. - -This setting in independent of [`[project].glob_handler`](https://buck.build/files-and-dirs/buckconfig.html#project.glob_handler). - -``` -[project]build_file_search_method = filesystem_crawl -``` - -### watchman_query_timeout_ms - -When communicating with [Watchman](https://facebook.github.io/watchman/), Buck will wait this long for a response. The default is `60000` ms. - -``` -[project]watchman_query_timeout_ms = 60000 -``` - -### ide_force_kill - -Configures how the `buck project` command responds if an instance of Apple's Xcode IDE is running. - -``` -[project]ide_force_kill = never -``` - -Possible values are: - -* `always` : Always terminate Xcode. Do not ask first. -* `never` : Never terminate Xcode. -* `prompt` : Ask the user whether to terminate Xcode. This is the default. - -To specify that Buck should respond in a way that is different than the `.buckconfig` setting, use the`--config` command-line option. - -``` -buck project --config project.ide_force_kill=always -``` - -For more information about the `--config` option, see the [**Common Parameters**](https://buck.build/command/common_parameters.html) topic. - -### initial_targets - -A space-separated list of [build target](https://buck.build/concept/build_target.html)s to run when [`buck project`](https://buck.build/command/project.html) is executed. This is often a list of [`genrule`](https://buck.build/rule/genrule.html)s whose outputs need to exist in order for an IDE to be able to build a project without the help of Buck. - -``` -[project]initial_targets = //java/com/facebook/schema:generate_thrift_jar -``` - -### ignore - -A comma-separated list of subtrees within the project root which are ignored in the following contexts: - -* Buck daemon filesystem monitoring. -* Filesystem traversal when searching for tests and BUCK files -* IntelliJ project indexing - -Buck automatically excludes its own output, e.g. `buck-out`, `.buckd`, and `.idea`, as well as the cache directory (see [`[cache].mode`](https://buck.build/files-and-dirs/buckconfig.html#cache.mode)), but it makes no assumptions about source control systems. - -``` -[project]ignore = .git -``` - -### pre_process - -A script that should be executed before the project files are generated. This should only be used to do some project-specific actions that are reasonably fast. -The environment of this script contains the following variables: - -* `BUCK_PROJECT_TARGETS` - whitespace-separated list of input targets. -* `BUCK_PROJECT_TYPE` - the type of a project, can be "xcode" or "intellij". - -``` -[project]pre_process = scripts/pre_process_buck_project.py -``` - -### post_process - -A script that should be executed after the project files are generated. This should only be used to do some project-specific actions that are reasonably fast. -The environment of this script contains the following variables: - -* `BUCK_PROJECT_TARGETS` - whitespace-separated list of input targets. -* `BUCK_PROJECT_TYPE` - the type of a project, can be "xcode" or "intellij". - -``` -[project]post_process = scripts/post_process_buck_project.py -``` - -### parallel_parsing - -When set to `true`, Buck will parse your [build file](https://buck.build/concept/build_file.html)s in parallel. - -``` -[project]parallel_parsing = false -``` - -### parsing_threads - -When [`[project].parallel_parsing`](https://buck.build/files-and-dirs/buckconfig.html#project.parallel_parsing) is enabled, this specifies the number of threads Buck uses to parse. By default, this is equal to the number of threads Buck uses to build, and will be the minimum of this setting and [`[build].threads`](https://buck.build/files-and-dirs/buckconfig.html#build.threads). - -``` -[project]parsing_threads = 2 -``` - -### build_file_import_whitelist - -A comma-separated list that configures which Python modules can be imported in build files. - -``` -[project]build_file_import_whitelist = math, Foo -``` - -### shared_libraries_in_bundles - -When using xcode project, for projects that depend on a library, if set to `'true'`, if that library is the 'binary' of a bundle, the bundle will replace the library in the Xcode linking phase - -``` -[project]shared_libraries_in_bundles = false -``` - -### motd - -A plain text message that will be printed first when a user interacts with buck. This supports simple special characters like newlines (\n). - -``` -[project]motd = "DO NOT BREAK THE BUILD" -``` - -## [python] - -This section defines settings relevant to `python_*` rules. - -#### Python platform flavors in `.buckconfig` - -Buck enables you to create additional platform *flavors* for Python in `.buckconfig`. A platform flavor groups together a set of configuration parameters, which you can then reference at build time. -To create a new Python platform flavor, add a section with the header - -``` -[python#**flavor**] -``` - -to `.buckconfig`. If you invoke Buck with the specified *flavor* appended to the [build target](https://buck.build/concept/build_target.html), Buck uses the values in this section instead of those in `[python]`. For example, to build with the values in `[python#py3]` instead of `[python]`, you could invoke Buck using the following command: - -``` -$ buck build app#py3 -``` - -This is useful if, for example, you have both Python 2 and Python 3 code in your project and need to differentiate between them by changing the value of the [`[python].interpreter`](https://buck.build/files-and-dirs/buckconfig.html#python.interpreter). -You can also use these platform flavors, in the `platform` argument of the [`python_binary`](https://buck.build/rule/python_binary.html) rule, and in the `platform_sources` and `platform_resources` arguments of the [`python_library`](https://buck.build/rule/python_library.html) rule. - -### interpreter - -The path to the python interpreter to use. By default, Buck will search for this in your `PATH`. - -``` -[python]interpreter = /usr/bin/python -``` - -### inplace_interpreter_flags - -Flags to pass to the python interpreter when running a .pex file that is configured to run 'inplace'. Defaults to `-Es` - -``` -[python]inplace_interpreter_flags = -EBs -``` - -### library - -The [build rule](https://buck.build/concept/build_rule.html), typically a [`prebuilt_cxx_library`](https://buck.build/rule/prebuilt_cxx_library.html), wrapping the `libpython.so` that `cpp_python_extension` rules should build against. - -``` -[python]library = //third-party/python:python -``` - -### native_link_strategy - -The strategy used for pulling in native dependencies: - -* `merged`: Native dependencies which are first-order dependencies of `python_*` rules are linked as full, separate, shared libraries. Transitive native dependencies are statically linked into a single monolithic shared library. This is preferred to reduce the native code size and shared library count. -* `separate` (default): Transitive native dependencies are linked as full, separate, shared libraries. This is preferred for faster build-time speed. - -``` -[python]native_link_strategy = separate -``` - -### package_style - -The packaging style to use for [`python_binary`](https://buck.build/rule/python_binary.html) and [`python_test`](https://buck.build/rule/python_test.html). Valid values are: - -* `inplace`: builds executables which are only able to run from within the repository. This style of packaging is significantly faster than `standalone` packages. -* `standalone` (default): builds self-contained executable packages that can be run outside of the repository. - -``` -[python]package_style = standalone -``` - -### path_to_pex_executor - -The path to the tool used to run executable Python packages. For self-executing packages, this should just by the shell. - -``` -[python]path_to_pex_executor = /bin/sh -``` - -### pex_extension - -The extension to use for executable Python packages. - -``` -[python]pex_extension = .pex -``` - -### version - -The implementation and version of the Python interpreter. The syntax is: - -``` - -``` - -The implementation and version should be separated by a space. -The version should comprise only numerals and periods; it should not contain characters such as `+`, although some Python versions use such characters. -To obtain the implementation, you can use the following command, invoked using the relevant Python interpreter: - -``` -python -c "import platform; print( platform.python_implementation() )" -``` - -Similarly, to obtain the version, use: - -``` -python -c "import platform; print( platform.python_version() )" -``` - -Example: - -``` -[python]version = CPython 2.7 -``` - -## [repositories] - -Lists the cells that constitute the Buck project. Buck builds that are part of this project—that is, which use this `.buckconfig`—can access the cells specified in this section. - -``` -[repositories] - buck = . - bazel_skylib = ./third-party/skylark/bazel-skylib -``` - -The string on the left-hand side of the equals sign is the *alias* for the cell. The string on the right-hand side of the equals sign is the path to the cell from the directory that contains this `.buckconfig` file. -It is not necessary to include the current cell in this section, but we consider it a best practice to do so: - -``` -buck = . -``` - -You can view the contents of this section using the [`buck audit cell`](https://buck.build/command/audit.html) command. -Although the name of the section is *repositories*, the section actually lists *cells*. In practice, Buck cells often correspond to repositories, but this is not a requirement. -For more information about the relationship between Buck projects, cells, and repositories, see the [Key Concepts](https://buck.build/about/overview.html) topic. - -## [resources] - -The settings to control how Buck uses resources to schedule the work. When resource-aware scheduler is enabled, Buck will create more threads in attempt to run resource-independent work in parallel. Number of build threads is still controlled by `num_threads` option. Buck will also create a number of additional threads that will be used for tasks that don't require CPU: network fetches, disk operations, etc. Total number of threads that Buck will operate is controlled by`managed_thread_count` option, that is, it includes build threads and additional threads. - -### resource_aware_scheduling_enabled - -When set to `true`, Buck will attempt to use resource-aware scheduler. - -``` -[resources]resource_aware_scheduling_enabled = true -``` - -### managed_thread_count - -Buck will use `num_threads` threads for CPU intensive tasks (e.g. local building) and it will use `managed_thread_count - num_threads`for other purposes. Thus, `managed_thread_count` value must be greater or equal to `num_threads` value. If you don't specify this value, Buck will create built-in number of additional threads which equals to the number of CPU cores on the machine. These additional threads will be used for non-CPU work like networking, disk I/O and etc. But if one of the `num_threads` threads is free then Buck will probably use it for non-CPU stuff as well. - -``` -[resources]managed_thread_count = 40 -``` - -### default_cpu_amount - -Amount of CPU resource required by arbitrary job which has no specific setting for its resource amounts. By default is 1 - a single CPU is required for the job to be completed. - -``` -[resources]default_cpu_amount = 1 -``` - -### default_memory_amount - -Amount of memory resource required by arbitrary job which has no specific setting for its resource amounts. By default is 1 - a single memory resource is required for the job to be completed. A single memory resource is an abstract value, currently it equals to 100 Mb. - -``` -[resources]default_memory_amount = 1 -``` - -### default_disk_io_amount - -Amount of disk I/O resource required by arbitrary job which has no specific setting for its resource amounts. A single disk resource is an abstract value. Think about it as like SSD can handle 50 parallel disk jobs with weight 1, while HDD can handle only 20. Thus, if job needs to read or write a lot of data, it is better to assign a higher value for its disk I/O amount. This will reduce the risk to have several similar jobs running concurrently and performing huge disk I/O operations, slowing down build itself and system performance. - -``` -[resources]default_disk_io_amount = 1 -``` - -### default_network_io_amount - -A single network resource is an abstract value. Think about it as Ethernet can handle 50 parallel network jobs with weight 1. Slower network interfaces can handle less amount of jobs. If job needs to send or receive a lot of data, it is better to assign a higher value for its network I/O amount. - -``` -[resources]default_network_io_amount = 1 -``` - -### max_memory_resource - -Maximum memory resource available to Buck. By default is size of Java heap divided by 100 Mb. A single memory resource is an abstract value, currently it equals to 100 Mb. - -``` -[resources]max_memory_resource = 30 -``` - -### max_disk_io_resource - -Maximum disk I/O resource available to Buck. By default the value is 50. Think about it as like SSD can handle 50 parallel disk jobs with weight 1, while HDD can handle only 20. Thus, if job needs to read or write a lot of data, it should require higher disk I/O resource. - -``` -[resources]max_disk_io_resource = 30 -``` - -### max_network_io_resource - -Maximum disk I/O resource available to Buck. By default the value is 30. Think about it as Ethernet can handle 50 parallel network jobs with weight 1. Slower network interfaces can handle less amount of jobs. If job needs to send or receive a lot of data, it should require higher network I/O resource. - -``` -[resources]max_network_io_resource = 30 -``` - -## [resources_per_rule] - -This section contains required resource amounts for various build rules. If amount for some build rule is not specified in this section, then amount of 1 (CPU), 1 (Memory), 0 (disk i/o) and 0 (network i/o) is used. Amounts are used in local building, so in most cases build rule will require 0 for network I/O unless it fetches any data from network. Rule's name is constructed by converting the camel-style class name of the `BuildRule`in Buck's source code (e.g. `MyBuildRule`) into lower underscored name (e.g. `my_build_rule`). - -``` -[resources_per_rule]cxx_link = 1, 1, 5, 0 - android_binary = 8, 30, 30, 0 - -``` - -Buck will use the defined resource amounts during the build process in order to attempt to use all available resources. - -## [rust] - -The settings to control how Buck builds `rust_*` rules. - -### compiler - -The path that Buck should use to compile Rust files. By default, it checks your `PATH`. - -``` -[rust]compiler = /usr/local/bin/rustc -``` - -### rustc_flags - -Default command-line flags passed to all invocations of the rust compiler. - -``` -[rust]rustc_flags = -g -``` - -### rustc_binary_flags - -Default command-line flags passed to invocations of the rust compiler in `rust_binary` rules, in addition to options set in `rustc_flags`. - -``` -[rust]rustc_binary_flags = -C lto -``` - -### rustc_library_flags - -Default command-line flags passed to invocations of the rust compiler in `rust_library` rules, in addition to options set in `rustc_flags`. - -``` -[rust]rustc_library_flags = --cfg=debug -``` - -### unflavored_binaries - -Controls whether the output from `rust_binary` or `rust_test` rules include a flavor from the platform in the path or not. Even unflavored, the path includes `#binary`. - -``` -[rust]unflavored_binaries = true -``` - -### remap_src_paths - -Controls whether `rustc` remaps the source paths in its output. Buck will always construct a link tree with the sources required for a given rule, which means the paths passed to `rustc` are not the original source paths. This option will remap those paths in compiler output, debug info, `file!()` and elsewhere to match the original source names. The options are "no" (don't remap), and "yes" (remap). - -``` -[rust]remap_src_paths = no -``` - -### force_rlib - -When `force_rlib` is true, then buck will always compile static (rlib) libraries even when the final target (binary or unit test) is being linked with a shared link style. Rust code is typically always statically linked, and a lot of surrounding tooling doesn't cope well with dynamically linked Rust crates. Linking with a shared link style will still dynamically link with C/C++ shared objects. - -``` -[rust]force_rlib = false -``` - -### prefer_static_libs - -When `prefer_static_libs` is true, then buck will always prefer to link with static versions of a library when building a shared target. In practice, this only affects linking with the standard library crates. - -``` -[rust]prefer_static_libs = false -``` - -### incremental - -When set, enable rustc's incremental build option. -Rust's incremental compilation mode operates transparently to the build system - it is guaranteed to produce bit-for-bit identical output to non-incremental builds. To do this it maintains a separate incremental database to one side. The only requirement is that there is only ever one instance of `rustc` for a given crate at one time. Buck guarantees this by making sure there's a separate incremental database for each flavor (since builds for different flavors of the same target can be run concurrently). -The value of this option is an additional path fragment used for the incremental database path. This allows the user to use separate databases for optimized, debug, etc command lines. If this is not required, then it can be any valid pathname fragment. - -``` -[rust]incremental = opt -``` - -### default_edition - -Set the default edition for Rust rules. The edition can be specified on a per-rule basis, but this sets the default if nothing is specified. The default is "2015". - -``` -[rust]default_edition = 2018 -``` - -## [sandbox] - -This section controls sandboxing. Sandbox execution provides better guarantees about resources accessible to the processes by using system-provided capabilities to restrict certain usages (for example, restricting the set of files allowed to be read and write). - -### darwin_sandbox_enabled - -This option specifies whether sandboxing is enabled on OS X or not. - -``` -[sandbox]darwin_sandbox_enabled = true -``` - -### genrule_sandbox_enabled - -Enables sandbox in `genrule`. - -``` -[sandbox]genrule_sandbox_enabled = true -``` - -## [test] - -The settings to control how Buck runs tests. - -### incl_no_location_classes - -This specifies whether jacoco code coverage is enabled on classes without source location. The default is false. Set to true to enable code coverage with robolectric tests. Note that setting to true will include dynamically created sources in code coverage, such as that created by mocking (e.g. jmockit) or persistence frameworks. - -``` -[test]incl_no_location_classes = true -``` - -### timeout - -The number of milliseconds per test to allow before stopping the test and reporting a failure. The default is no timeout. Not all `*_test` rules utilize this value. A JUnit test can override this via the `@Test` annotation. - -``` -[test]timeout = 300000 -``` - -### rule_timeout - -The number of milliseconds per `*_test` rule to allow before stopping it and reporting a failure. The default is no timeout. - -``` -[test]rule_timeout = 1200000 -``` - -### external_runner - -This specifies an external test runner command to use instead of Buck's built-in test runner. The external test runner is invoked by Buck after it has built all the test rules. It passes the test runner the path to a file which contains a JSON-encoded list of test file infos via the `--buck-test-info [path]` command line option. -Additionally, if [`buck test`](https://buck.build/command/test.html) is invoked with `-- [extra-runner-args]`, these will be passed to the external runner before `--buck-test-info`. -The JSON-encoded test file contains an array of infos. Those infos have the following fields: - -* `target`: The [build target](https://buck.build/concept/build_target.html) of the test rule. -* `type`: A string describing the type of the test. -* `command`: An array of command line arguments the test runner should invoke to run the test. -* `env`: A map of environments variables that should be defined by the test runner when running the test. -* `labels`: An array of labels that are defined on the test rule. -* `contacts`: An array of contacts that are defined on the test rule. These are typically user names or email addresses. - -``` -[test]external_runner = command args... -``` - -### thread_utilization_ratio - -Sets the maximum number of threads to use for testing as a ratio of the number of threads used for building. By default(`1.0`), buck uses runs tests on all threads that were used for building. - -``` -[test]thread_utilization_ratio = 0.5 -``` - -### parallel_external_test_spec_computation_enabled - -Whether external test spec computation is allowed to happen in parallel. Enabling this option can significantly speed up test execution when many test targets are requested. By default it is disabled. - -``` -[test]parallel_external_test_spec_computation_enabled = false -``` - -### threads - -Specify number of threads used when running test. - -``` -[test]threads = 5 -``` - -## [thrift] - -This section provides settings to locate required thrift components. - -### compiler - -The path or [build target](https://buck.build/concept/build_target.html) that builds the [thrift](https://thrift.apache.org/) compiler that Buck should use. - -``` -[thrift]compiler = /usr/local/bin/thrift -``` - -### compiler2 - -The path or [build target](https://buck.build/concept/build_target.html) that builds the [thrift2](https://github.com/facebook/fbthrift) compiler that Buck should use. If this is unset, it defaults to the value of [`[thrift].compiler`](https://buck.build/files-and-dirs/buckconfig.html#thrift.compiler). - -``` -[thrift]compiler2 = /usr/local/bin/thrift2 -``` - -## [tools] - -This section tells Buck how to find certain tools e.g. how the Java compilation occurs and how auxiliary tools are used e.g. the [ProGuard](http://proguard.sourceforge.net/) Java class file optimizer which is used as part of the Android build process. - -### javac - -The `javac` option is a path to a program that acts like Java javac. When set, buck uses this program instead of the system Java compiler. When neither this nor [`[tools].javac_jar`](https://buck.build/files-and-dirs/buckconfig.html#tools.javac_jar) is set, Buck defaults to using the system compiler in-memory. - -### javac_jar - -When this option is set to a JAR file, Buck loads the referenced compiler in-memory. When neither this nor [`[tools].javac`](https://buck.build/files-and-dirs/buckconfig.html#tools.javac) is set, Buck defaults to using the system compiler in-memory. - -### java_for_tests - -The `java_for_tests` option is a path to a `java` binary. When set, buck uses that binary to execute Java tests—when using either the internal or external test runners—instead of the `java` binary used to run Buck itself. When this option is not set, Buck executes Java tests using the same binary used to run Buck. - -### compiler_class_name - -When javac_jar is set, Buck loads the referenced compiler class name from the jar. When it is not set but javac_jar is set, Buck uses the default compiler class. - -### proguard - -This option specifies the location of the JAR file to be used to invoke ProGuard. This overrides the default ProGuard JAR file that would have been picked up from the Android SDK. Here is an example setting: - -``` -[tools]proguard = proguard/proguard-fork.jar -``` - -### proguard-max-heap-size - -This option specifies how much memory is used when running proguard. Defaults to `1024M`. You may want to give ProGuard more memory to try and improve performance. - -``` -[tools]proguard-max-heap-size = 4096M -``` - -### proguard-agentpath - -This option allows the specification of a Java profiling agent which is set with the `-agentpath` argument when the ProGuard jar file is executed. Typically this would be set in a `.buckconfig.local` configuration file for when you want to profile a build running on your local machine. Set this to the actual path of the installed agent on the machine where ProGuard will run. - -``` -[tools]proguard-agentpath = /Applications/YourKit_Java_Profiler_2015_build_15084.app/Contents/Resources/bin/mac/libyjpagent.jnilib -``` - -## [ui] - -This section configures the appearance of Buck's command line interface. - -### always_sort_threads_by_time - -Specifies whether the lines with information about building and testing threads should always be sorted by the time spent running the rules they are currently executing. When set to false, threads are only sorted if there are more threads than available lines (see [`[ui].thread_line_limit`](https://buck.build/files-and-dirs/buckconfig.html#ui.thread_line_limit) for an option to configure this limit). Only effective when the super console is used. The default value is false. - -``` -[ui]always_sort_threads_by_time = true -``` - -### error_message_augmentations - -This setting is preliminary and is likely to change. -Specifies a comma-separated list of mappings from regular expressions (regexes) to message strings. -If the text of a Buck parser error matches one of the specified regexes, the corresponding message string is appended to the error. You can use the message string to provide additional helpful information to the user. -If the regex contains unescaped parentheses, `()`, the text that the parentheses enclose is captured. You can then insert this captured text in the appended string by using `$1` for the first captured text string, `$2` for the second, and so on. This works exactly like Java regex replacement strings. - -``` -[ui]error_message_augmentations = "The rule (//S+)-cxx could not be found." => "Please make sure that $1 is a cxx library. -``` - -### relativize_targets_to_working_directory - -Determines whether [build target pattern](https://buck.build/concept/build_target_pattern.html)s provided on the command line are relativized to the current working directory. For example, if `buck build bar/...`is run from the `foo` subdirectory of the project, the pattern`//foo/bar/...` is built instead. If set to `false`, `//bar/...`would be built. This defaults to true. - -``` -[ui]relativize_targets_to_working_directory = false -``` - -### enable_show_output_warning - -Determines whether a deprecation warning for `--show-output` should be shown. The warning also informs users that they should be using `--show-outputs` instead. This defaults to false. - -``` -[ui]enable_show_output_warning = false -``` - -### thread_line_limit - -Specifies how many lines will be used to show the status of running threads during building and testing by default. Only effective when the super console is used. The value has to be a positive number. The default value is 10. - -``` -[ui]thread_line_limit = 10 -``` - -### thread_line_limit_on_warning - -Specifies how many lines will be used to show the status of running threads during building and testing after a warning is reported. Only effective when the super console is used. The value has to be a positive number. Defaults to the value of [`[ui].thread_line_limit`](https://buck.build/files-and-dirs/buckconfig.html#ui.thread_line_limit). - -``` -[ui]thread_line_limit_on_warning = 10 -``` - -### thread_line_limit_on_error - -Specifies how many lines will be used to show the status of running threads during building and testing after an error is reported. Only effective when the super console is used. The value has to be a positive number. Defaults to the value of [`[ui].thread_line_limit`](https://buck.build/files-and-dirs/buckconfig.html#ui.thread_line_limit). - -``` -[ui]thread_line_limit_on_error = 10 -``` - -### truncate_failing_command - -Determines whether a failing executed command is truncated in error messages. This defaults to true. - -``` -[ui]truncate_failing_command = true -``` - -### superconsole - -Whether the super console is enabled. If so, a more reactive UI will be shown. Valid values are ENABLED, DISABLED, and AUTO. By default, this is set to AUTO which will take OS, terminal settings and other things into account. In most interactive cases, it will be enabled. - -``` -[ui]superconsole = ENABLED -``` - -### warn_on_config_file_overrides - -Whether to display a warning when using configuration overrides from `.buckconfig.local` or any of the files mentioned in [**Precedence of Buck configuration specifications**](https://buck.build/files-and-dirs/buckconfig.html#config-precedence) -This is true by default. - -``` -[ui]warn_on_config_file_overrides = false -``` - -### warn_on_config_file_overrides_ignored_files - -A comma-separated list of names of configuration files that should be ignored. -By default, Buck prints a warning if settings are in use from any of the files in [**Precedence of Buck configuration specifications**](https://buck.build/files-and-dirs/buckconfig.html#config-precedence) -Sometimes, however, a user should not be alerted about specific files. For example, there may be global Buck settings in `/etc/buckconfig.d/system` that are managed by an IT organization, not the user, and the warning would just be ignored. In this case, this setting could be set to `system`so that `/etc/buckconfig.d/system` being present would not elicit a warning. - -``` -[ui]warn_on_config_file_overrides_ignored_files = experiments,system -``` - -## [worker] - -This section configures how Buck's workers (`worker_tool`s and similar) work. - -### persistent - -Specifies whether by default workers run in persistent mode (reusing the worker process across builds). The `persistent` option of `worker_tool` overrides this default. The default value is false. Be careful when switching this to true since the workers will not shut down after buck commands and will continue consuming system resources. - -``` -[worker]persistent = false -``` diff --git a/docs/rfcs/attr-metadata.md b/docs/rfcs/attr-metadata.md index d1950fc1ba717..d25b47f3b58fd 100644 --- a/docs/rfcs/attr-metadata.md +++ b/docs/rfcs/attr-metadata.md @@ -1,44 +1,41 @@ # RFC: labels -> metadata attribute -This RFC proposes to add new builtin per target attribute: `metadata`, -as replacement for `labels`. +This RFC proposes to add new builtin per target attribute: `metadata`, as +replacement for `labels`. ## Context: labels -In buck1 we have `labels` builtin rule attribute, which is a list of -strings. +In buck1 we have `labels` builtin rule attribute, which is a list of strings. -In buck2 we have `labels` attribute which is configured in prelude, it -does not have special meaning. +In buck2 we have `labels` attribute which is configured in prelude, it does not +have special meaning. ## Context: package values -`PACKAGE` files have a function: `write_package_value(key, value)`, -where a key is a word-dot-word string, and value is arbitrary starlark -value which should be serializable as JSON. +`PACKAGE` files have a function: `write_package_value(key, value)`, where a key +is a word-dot-word string, and value is arbitrary starlark value which should be +serializable as JSON. ## Context: metadata we use or we need -There are several spaces where we use or need metadata to be stored in -buck2 target graph. +There are several spaces where we use or need metadata to be stored in buck2 +target graph. -* fbcode uses per-package values to switch code to new - clang +- fbcode uses per-package values to switch code to new clang ([example](https://www.internalfb.com/code/fbsource/[ef740e6f2610c64621f7547a3b46d54d32af8600]/fbcode/ownership/code_metadata/PACKAGE?lines=3)) -* testinfra wants to use `PACKAGE` values to mark a set of folders to a - logical larger project -* it is likely that per-target `metadata` attribute should be used - in [configuration factory function](drafts/cfg-modifiers/api.md). -* TD wants to declare CI trigger jobs per-target or per-package, and - this logic is to be specified in `BUCK` or `PACKAGE` files — as - metadata +- testinfra wants to use `PACKAGE` values to mark a set of folders to a logical + larger project +- it is likely that per-target `metadata` attribute should be used in + configuration factory function. +- TD wants to declare CI trigger jobs per-target or per-package, and this logic + is to be specified in `BUCK` or `PACKAGE` files — as metadata ## Proposal: metadata attribute Add builtin `metadata` attribute to all the targets. -`metadata` has the same structure as package values: word-dot-word to -arbitrary value serializable to JSON. +`metadata` has the same structure as package values: word-dot-word to arbitrary +value serializable to JSON. For example: diff --git a/docs/rfcs/audit_visibility.md b/docs/rfcs/audit_visibility.md index a1c81800cf23a..deeaf033e0f29 100644 --- a/docs/rfcs/audit_visibility.md +++ b/docs/rfcs/audit_visibility.md @@ -1,54 +1,89 @@ # `buck2 audit visibility` command ## Context -Buck has a concept of [Visibility](../legacy/concepts/visibility.md) for every target. -It allows users to define, for each target, the targets it can depend on and targets that can depend on it. -Visibility is specified as an allowlist of targets/target patterns, and any target used that falls outside of the allowlist fails visibility checking. -Visibility pattern can be specified on `visibility` and `within_view` attributes in buildfiles and [PACKAGE files](https://www.internalfb.com/intern/wiki/Buck-users/Key_Concepts/Package_Files/). - -Visibility is important to lots of codebase maintainers because it can be used to keep projects from pulling in unwanted dependencies. -As some examples, App Core teams are using Buck visibility as a [replacement to current supermodules for protecting app modularity](https://fb.prod.workplace.com/groups/2292177024436518/permalink/3112235492430663/). - Instagram's using visibility to [protect modularity and define Link Groups used for build speed optimizations](https://fb.prod.workplace.com/groups/devx.build.bffs/posts/5169450219756775/?comment_id=5169500636418400). - There's interest from various DevX teams in using Buck visibility on [PACKAGE files](https://www.internalfb.com/intern/wiki/Buck-users/Key_Concepts/Package_Files/) to [enforce repo boundaries, which will allow target determinators to migrate off of sparse profiles and onto Eden](https://fb.prod.workplace.com/groups/devx.build.bffs/posts/5169450219756775/), although visibility in its current form is likely not fit for enforcing such repo boundaries. - Visibility has also been used to enforce [requirements that only certain targets are allowed to depend on targets in fbcode/scripts](https://fb.workplace.com/groups/buckeng/permalink/4392940254087889/). - -For perf reasons, buck2 doesn't always enforce visibility. -Instead, it only enforces visibility on construction of the configured target graph. -Visibility checking is expensive memory-wise because it requires tracking all deps at each node. -When constructing configured target graph, this cost is already paid for when buck2 checks transitive target compatibility. -When constructing the unconfigured target graph, however, this is costly, so we avoid checking visibility there. -(Note that buck does not allow you to specify selects in visibility attributes.) - -In practice, this means that commands like `cquery` and `build` can enforce visibility whereas commands like `uquery` and `targets` cannot. -Having visibility checked only on the configured target graph is problematic for 2 reasons: - -1) Visibility is only checked on configured deps after selects are resolved, so it's possible for a target to pass visibility checking in one configuration but fail visibility checking in another. -For example, a target may pass visibility checking on a linux configuration but fail visibility checking on mac configuration if it has a bad mac-only dependency. -This makes visibility enforcement more difficult because now you have to query the same graph in both linux and mac configuration before you know that visibility is always valid. - -2) Uquery (querying the unconfigured target graph) has better performance than cquery (querying the configured target graph). -Big-O wise, uquery scales with O(# of targets) whereas cquery scales with O((# number of configurations) x (# of targets)). -Having a way to check visibility on unconfigured target graph can be much cheaper than doing so on configured target graph. + +Buck has a concept of Visibility for every target. It allows users to define, +for each target, the targets it can depend on and targets that can depend on it. +Visibility is specified as an allowlist of targets/target patterns, and any +target used that falls outside of the allowlist fails visibility checking. +Visibility pattern can be specified on `visibility` and `within_view` attributes +in buildfiles and +[PACKAGE files](https://www.internalfb.com/intern/wiki/Buck-users/Key_Concepts/Package_Files/). + +Visibility is important to lots of codebase maintainers because it can be used +to keep projects from pulling in unwanted dependencies. As some examples, App +Core teams are using Buck visibility as a +[replacement to current supermodules for protecting app modularity](https://fb.prod.workplace.com/groups/2292177024436518/permalink/3112235492430663/). +Instagram's using visibility to +[protect modularity and define Link Groups used for build speed optimizations](https://fb.prod.workplace.com/groups/devx.build.bffs/posts/5169450219756775/?comment_id=5169500636418400). +There's interest from various DevX teams in using Buck visibility on +[PACKAGE files](https://www.internalfb.com/intern/wiki/Buck-users/Key_Concepts/Package_Files/) +to +[enforce repo boundaries, which will allow target determinators to migrate off of sparse profiles and onto Eden](https://fb.prod.workplace.com/groups/devx.build.bffs/posts/5169450219756775/), +although visibility in its current form is likely not fit for enforcing such +repo boundaries. Visibility has also been used to enforce +[requirements that only certain targets are allowed to depend on targets in fbcode/scripts](https://fb.workplace.com/groups/buckeng/permalink/4392940254087889/). + +For perf reasons, buck2 doesn't always enforce visibility. Instead, it only +enforces visibility on construction of the configured target graph. Visibility +checking is expensive memory-wise because it requires tracking all deps at each +node. When constructing configured target graph, this cost is already paid for +when buck2 checks transitive target compatibility. When constructing the +unconfigured target graph, however, this is costly, so we avoid checking +visibility there. (Note that buck does not allow you to specify selects in +visibility attributes.) + +In practice, this means that commands like `cquery` and `build` can enforce +visibility whereas commands like `uquery` and `targets` cannot. Having +visibility checked only on the configured target graph is problematic for 2 +reasons: + +1. Visibility is only checked on configured deps after selects are resolved, so + it's possible for a target to pass visibility checking in one configuration + but fail visibility checking in another. For example, a target may pass + visibility checking on a linux configuration but fail visibility checking on + mac configuration if it has a bad mac-only dependency. This makes visibility + enforcement more difficult because now you have to query the same graph in + both linux and mac configuration before you know that visibility is always + valid. + +2. Uquery (querying the unconfigured target graph) has better performance than + cquery (querying the configured target graph). Big-O wise, uquery scales with + O(# of targets) whereas cquery scales with O((# number of configurations) x + (# of targets)). Having a way to check visibility on unconfigured target + graph can be much cheaper than doing so on configured target graph. ## Proposed Solution: `audit visibility` command -It's clear that we need a way to check visibility on the unconfigured target graph, but we don't want `buck2 uquery` and `buck2 targets` to regress in memory use. -To get the best of both worlds, I propose adding a separate command to buck2, `buck2 audit visibility`, that will check visibility on the unconfigured target graph. -Instead of checking on construction of the unconfigured target graph, this command will check after construction, which will avoid any memory regression. -The tradeoff is that the visibility checking won't be cached, and rerunning `audit visibility` will rerun visibility checking on each invocation. +It's clear that we need a way to check visibility on the unconfigured target +graph, but we don't want `buck2 uquery` and `buck2 targets` to regress in memory +use. To get the best of both worlds, I propose adding a separate command to +buck2, `buck2 audit visibility`, that will check visibility on the unconfigured +target graph. Instead of checking on construction of the unconfigured target +graph, this command will check after construction, which will avoid any memory +regression. The tradeoff is that the visibility checking won't be cached, and +rerunning `audit visibility` will rerun visibility checking on each invocation. ## Usage and Invocation -`buck2 audit visibility` command will take in a list of target patterns as well as common build args like config flags and mode files as args. -It will construct the unconfigured target graph based on the **transitive deps** of those targets and check that this graph has valid visibility. -Checking transitive deps matches the behavior of visibility checking on cquery, but we may revisit this decision in the future if there is a need for just verifying the immediate dependencies. +`buck2 audit visibility` command will take in a list of target patterns as well +as common build args like config flags and mode files as args. It will construct +the unconfigured target graph based on the **transitive deps** of those targets +and check that this graph has valid visibility. Checking transitive deps matches +the behavior of visibility checking on cquery, but we may revisit this decision +in the future if there is a need for just verifying the immediate dependencies. + +For example, an invocation to check visibility on the transitive closure of +fbobjc can be -For example, an invocation to check visibility on the transitive closure of fbobjc can be ```shell buck2 audit visibility fbsource//fbobjc/... ``` -It cannot be used to check that a target has a valid visibility with respect to targets outside of the transitive closure of its deps. -For example, `buck2 audit visibility fbcode//buck2/starlark-rust/starlark:starlark` will just check that all transitive deps of `starlark` target (including `starlark` target) have valid visibility - with respect to each other. -It will not check that any targets that depend on `starlark` respect `starlark` target's visibility attribute. +It cannot be used to check that a target has a valid visibility with respect to +targets outside of the transitive closure of its deps. For example, +`buck2 audit visibility fbcode//buck2/starlark-rust/starlark:starlark` will just +check that all transitive deps of `starlark` target (including `starlark` +target) have valid visibility with respect to each other. It will not check that +any targets that depend on `starlark` respect `starlark` target's visibility +attribute. diff --git a/docs/rfcs/bxl-analysis.md b/docs/rfcs/bxl-analysis.md index 5357d0b209b85..66f989b3631e2 100644 --- a/docs/rfcs/bxl-analysis.md +++ b/docs/rfcs/bxl-analysis.md @@ -2,23 +2,45 @@ ## Intro -As Bob and I continue to build out `bxl` we want users to be able to inspect the providers and actions for a given target label. In order to support this, we need to be able to provide access to `AnalysisResult` via `starlark`, obtained via a call to `RuleAnalysisCalculation::get_analysis_result`. +As Bob and I continue to build out `bxl` we want users to be able to inspect the +providers and actions for a given target label. In order to support this, we +need to be able to provide access to `AnalysisResult` via `starlark`, obtained +via a call to `RuleAnalysisCalculation::get_analysis_result`. ## How to implement it? Our three principle options are as follows: -1. `BxlContext::analyze(targetlabel: ConfiguredTargetLabelLike)`, where `ConfiguredTargetLabelLike` accepts `ConfiguredTargetLabel`, `ConfiguredTargetNode`, or sets and lists of these things + acceptable strings. - -In this scenario, we attach the analysis method onto the bxl context itself, and require that users pass in the target label-ish thing when they want to construct an analysis result. It's a little awkward in some ways because the analysis is more naturally a method on the argument being passed in and the `BxlContext` is a context that is needed to perform the calculation. On the other hand, this allows us to construct a type analogous to `TargetExpr` which can parse from a wide variety of different `ConfiguredTarget` like things (strings, nodes, labels, sets, ...). It also is a bit nice from an implementational standpoint since we don't have to pass the context around everywhere. This isn't a huge pro though, since we can stick it in the global eval field. +1. `BxlContext::analyze(targetlabel: ConfiguredTargetLabelLike)`, where + `ConfiguredTargetLabelLike` accepts `ConfiguredTargetLabel`, + `ConfiguredTargetNode`, or sets and lists of these things + acceptable + strings. + +In this scenario, we attach the analysis method onto the bxl context itself, and +require that users pass in the target label-ish thing when they want to +construct an analysis result. It's a little awkward in some ways because the +analysis is more naturally a method on the argument being passed in and the +`BxlContext` is a context that is needed to perform the calculation. On the +other hand, this allows us to construct a type analogous to `TargetExpr` which +can parse from a wide variety of different `ConfiguredTarget` like things +(strings, nodes, labels, sets, ...). It also is a bit nice from an +implementational standpoint since we don't have to pass the context around +everywhere. This isn't a huge pro though, since we can stick it in the global +eval field. ```python result = bxl.analyze(bxl.cquery.deps("foo")) ``` -2. `ConfiguredTargetLabel::analyze()`, `ConfiguredTargetNode::analyze()`, ... where we carry around the `BxlContext` in the `eval` global field and implement analysis on each type that is target label like. +2. `ConfiguredTargetLabel::analyze()`, `ConfiguredTargetNode::analyze()`, ... + where we carry around the `BxlContext` in the `eval` global field and + implement analysis on each type that is target label like. -The pro of this one is that it's quite natural - you can take a `ConfiguredStarlarkTargetLabel` and then just ... call `analyze()` on it like you might expect to. The two downsides are that we have to propagate the context around behind the scenes, and we'll have to provide an implementation of `analyze` on everything that we'd like to have be able to be `analyzable`. +The pro of this one is that it's quite natural - you can take a +`ConfiguredStarlarkTargetLabel` and then just ... call `analyze()` on it like +you might expect to. The two downsides are that we have to propagate the context +around behind the scenes, and we'll have to provide an implementation of +`analyze` on everything that we'd like to have be able to be `analyzable`. ```python result = "root//bin:the_binary".analyze() @@ -37,12 +59,18 @@ for n in nodes: nodes.analysis ``` -3. `BxlContext::analysis(): AnalysisContext` where `AnalysisContext` exposes `AnalysisContext::analyze(targetlabel: ConfiguredTargetLabelLike)`. +3. `BxlContext::analysis(): AnalysisContext` where `AnalysisContext` exposes + `AnalysisContext::analyze(targetlabel: ConfiguredTargetLabelLike)`. -There's not really any pros of this approach except that it's similar to the flow for `cquery` where we return a `cqueryctx` object to call `cquery` methods through. +There's not really any pros of this approach except that it's similar to the +flow for `cquery` where we return a `cqueryctx` object to call `cquery` methods +through. ```python result = ctx.analysis().analyze("//lib:file1") ``` -We can also restrict the API to require that users go through `cquery` to obtain a `ConfiguredTargetNode` prior to calling `analysis`, although we don't *have to*. I say that we don't have to because the `get_analysis_result` method mentioned above is configured to accept a label anyway. +We can also restrict the API to require that users go through `cquery` to obtain +a `ConfiguredTargetNode` prior to calling `analysis`, although we don't _have +to_. I say that we don't have to because the `get_analysis_result` method +mentioned above is configured to accept a label anyway. diff --git a/docs/rfcs/bxl.md b/docs/rfcs/bxl.md index 147cef7581007..2ba6e640887d9 100644 --- a/docs/rfcs/bxl.md +++ b/docs/rfcs/bxl.md @@ -1,9 +1,11 @@ +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + # Buck Extension Language (BXL) -Buck2 will allow more complex introspection and interaction with its graphs via the `bxl` feature. -BXL will be a starlark script that allows integrators to interact with `buck` commands like -build and query within starlark, creating a sequence of operations that introspect, build, -and extend the build graph. +Buck2 will allow more complex introspection and interaction with its graphs via +the `bxl` feature. BXL will be a starlark script that allows integrators to +interact with `buck` commands like build and query within starlark, creating a +sequence of operations that introspect, build, and extend the build graph. @@ -11,12 +13,15 @@ https://fb.workplace.com/groups/buck2prototyping/permalink/2404233936540759/. -These are essentially custom buck operations, defined in Starlark, -that still follow the constraints of Buck2, which will enable -the same level of incrementality and caching as native buck2 operations. Furthermore, bxl will have subscriptions enabled in the future, where based on the incrementality tracking, buck2 can provide “updated” bxl executions when its known that its dependencies change, and even when generated sources need to be regenerated. +These are essentially custom buck operations, defined in Starlark, that still +follow the constraints of Buck2, which will enable the same level of +incrementality and caching as native buck2 operations. Furthermore, bxl will +have subscriptions enabled in the future, where based on the incrementality +tracking, buck2 can provide "updated" bxl executions when its known that its +dependencies change, and even when generated sources need to be regenerated. -The following proposes a basic set of bxl api and building blocks -that are targeted at solving key issues for IDE integration. +The following proposes a basic set of bxl api and building blocks that are +targeted at solving key issues for IDE integration. ## Use Cases @@ -29,107 +34,109 @@ I’ve previously defined some proposed integrations -Lsp prefers to have a single buck command that given a file, -returns the corresponding compilation database. This requires a single command, i.e a bxl, -that accepts a file as input, performs `owners` queries, -and uses the owning target plus the desired file to get the clang flags, -and then writes it to disk in comp db format. -It’s possible to write the same features using buck calls to cquery, -and build using subtargets to generate compilation database per file. -However, this requires lsp owners to maintain code in several locations and languages, -and parse and reserialize data. -It also does not provide the same incrementality and subscription -update features of the resulting comp db that writing this in bxl would have. -Furthermore, we may explore the idea of trimming the compilation command -to only dependencies required per the file requested. -Bxl actions provides a straightforward api for adding this when writing -the actual comp db file. +Lsp prefers to have a single buck command that given a file, returns the +corresponding compilation database. This requires a single command, i.e a bxl, +that accepts a file as input, performs `owners` queries, and uses the owning +target plus the desired file to get the clang flags, and then writes it to disk +in comp db format. It’s possible to write the same features using buck calls to +cquery, and build using subtargets to generate compilation database per file. +However, this requires lsp owners to maintain code in several locations and +languages, and parse and reserialize data. It also does not provide the same +incrementality and subscription update features of the resulting comp db that +writing this in bxl would have. Furthermore, we may explore the idea of trimming +the compilation command to only dependencies required per the file requested. +Bxl actions provides a straightforward api for adding this when writing the +actual comp db file. ### Android LSP Android project requires traversing the target graph to find and java libraries, -grouping and converting them between modules or project libraries -depending on the number of references, and restructuring the graph as directory based. -Android LSP is able to take advantage of subscriptions in the future when available, -allowing developers to keep their IDE up-to-date automatically without needing -to manually regenerate the project. - -With bxl, the graph traversals can be written in starlark, -allowing propagation of information down the graph, -accessing targets’ attributes to analyze dependencies, -and access providers for artifacts and action information needed to output the project file. -Project generation also performs directory listings -that buck2’s dice already performs and caches (I think, need to confirm). -Bxl poses the interesting possibility that we can expose a limited set of IO operations -that are tracked by dice so bxl can access the same cached file operations as rest of buck2. -Android project generation currently doesn’t write project files to buck-out, -which prevents it from using buck2 actions. It will have to rely on an external script -to process the graph information printed by buck and write the actual project files. -If it moves to `buck-out` based, then it can take advantage of creating actions directly -using the graph information processed, and potentially take advantage of incremental actions api -to avoid writing the entire graph on each subsequent update. +grouping and converting them between modules or project libraries depending on +the number of references, and restructuring the graph as directory based. +Android LSP is able to take advantage of subscriptions in the future when +available, allowing developers to keep their IDE up-to-date automatically +without needing to manually regenerate the project. + +With bxl, the graph traversals can be written in starlark, allowing propagation +of information down the graph, accessing targets’ attributes to analyze +dependencies, and access providers for artifacts and action information needed +to output the project file. Project generation also performs directory listings +that buck2’s dice already performs and caches (I think, need to confirm). Bxl +poses the interesting possibility that we can expose a limited set of IO +operations that are tracked by dice so bxl can access the same cached file +operations as rest of buck2. Android project generation currently doesn’t write +project files to buck-out, which prevents it from using buck2 actions. It will +have to rely on an external script to process the graph information printed by +buck and write the actual project files. If it moves to `buck-out` based, then +it can take advantage of creating actions directly using the graph information +processed, and potentially take advantage of incremental actions api to avoid +writing the entire graph on each subsequent update. ### iOS Project -iOS is currently being implemented as a series of queries that are aggregated -by an external python script, that then invokes builds of subtargets. -The same can be achieved in bxl, but with the entire sequence being cacheable -and subscribable so that when the graph is updated, or even when generated files -need updating, buck2 can automatically push the updates. -However, it is uncertain whether xcode itself can make use of push updates. +iOS is currently being implemented as a series of queries that are aggregated by +an external python script, that then invokes builds of subtargets. The same can +be achieved in bxl, but with the entire sequence being cacheable and +subscribable so that when the graph is updated, or even when generated files +need updating, buck2 can automatically push the updates. However, it is +uncertain whether xcode itself can make use of push updates. -In https://docs.google.com/document/d/1USZ_ZYxq45DHUFF-BAYo6zS4lAHlpvNk9uM5SBL9e-w/edit?disco=AAAAQv4gLQ0, +In +https://docs.google.com/document/d/1USZ_ZYxq45DHUFF-BAYo6zS4lAHlpvNk9uM5SBL9e-w/edit?disco=AAAAQv4gLQ0, it was also proposed that project generation may need information to flow down -as part of the generation, which is only possible via bxl defining its own actions. -(Although, there may have been a workaround per Chatura). +as part of the generation, which is only possible via bxl defining its own +actions. (Although, there may have been a workaround per Chatura). ### Rust LSP -(note from dbarsky@: I’m adding this at Bob’s request. Can be removed as needed.) +(note from dbarsky@: I’m adding this at Bob’s request. Can be removed as +needed.) ### Visual Studio Project (vsgo) -Vsgo is a pile of python that converts buck query/buck targets output -via a variety of heuristics into inputs to a custom fork gyp which is then invoked -to generate visual studio projects for a given buck target. -Having direct access to the internals of buck would allow us to remove -the heuristics and possibly even move project generation directly into bxl. +Vsgo is a pile of python that converts buck query/buck targets output via a +variety of heuristics into inputs to a custom fork gyp which is then invoked to +generate visual studio projects for a given buck target. Having direct access to +the internals of buck would allow us to remove the heuristics and possibly even +move project generation directly into bxl. ## Goals -From the above use cases, BXL should offer a simple Starlark API -that allows easy introspection of the buck2 graph -at unconfigured, configured, providers, and actions stage, -maintaining incremental behaviour of the BXL evaluation itself. +From the above use cases, BXL should offer a simple Starlark API that allows +easy introspection of the buck2 graph at unconfigured, configured, providers, +and actions stage, maintaining incremental behaviour of the BXL evaluation +itself. Some minimal API should be offered to allow BXL to provide additional behaviour such as output artifacts, and print results. -Most use cases from LSP desire to also propagate information via the command line -for these operations, so BXL should support command line arguments as inputs. +Most use cases from LSP desire to also propagate information via the command +line for these operations, so BXL should support command line arguments as +inputs. ## API ### Defining a bxl function -There are multiple models possible. We can have each file be its own bxl, -or have each file declare multiple bxl like rules. +There are multiple models possible. We can have each file be its own bxl, or +have each file declare multiple bxl like rules. -There are multiple advantages to allowing declaration of multiple bxls, -such as grouping similar bxls in the same file, allowing them to “invoke” each other. -It doesn’t necessarily add much more complexity for the author, -as even with one bxl per file, the author still has to have some declaration for the bxls arguments. +There are multiple advantages to allowing declaration of multiple bxls, such as +grouping similar bxls in the same file, allowing them to "invoke" each other. It +doesn’t necessarily add much more complexity for the author, as even with one +bxl per file, the author still has to have some declaration for the bxls +arguments. ```python # sample.bxl func1 = bxl_main( impl = my_func1, args = { - “arg1”: arg.list(arg.str()), + "arg1": arg.list(arg.str()), } ) @@ -153,13 +160,12 @@ args = ctx.args.args_for_bxl Args defined like attrs when declaring the bxl function above - ### Accessing target nodes -All standard query functions will be enabled in bxl, allowing users to run query operations, -storing them in variables and interacting with them. -These allow introspection of the unconfigured targets, -or the configured targets based on api +All standard query functions will be enabled in bxl, allowing users to run query +operations, storing them in variables and interacting with them. These allow +introspection of the unconfigured targets, or the configured targets based on +api ```python # some.bxl @@ -172,7 +178,7 @@ for target in targets: # also inspect the target like below ctx.print(target.label) -target = ctx.cquery(“//foo”, “//x86”).attributes # cquery has selects resolved +target = ctx.cquery("//foo", "//x86").attributes # cquery has selects resolved ``` ### Inspect providers @@ -187,24 +193,26 @@ ctx.analysis(target).providers # access the providers ### Actions -For IDEs, to generate compilation databases, or generate project files, -writing them in bxl will entail creating actions, and executing them. -As such, bxl will also be given the rules api to register actions, -including dynamic outputs for the rule in the current bxl invocation to build artifacts -as part of a bxl function. +For IDEs, to generate compilation databases, or generate project files, writing +them in bxl will entail creating actions, and executing them. As such, bxl will +also be given the rules api to register actions, including dynamic outputs for +the rule in the current bxl invocation to build artifacts as part of a bxl +function. BXL has the ability to create actions with some constraints: + 1. Action is tied to a particular target -2. It’s output location is determined in the same pattern as regular actions defined via rules +2. It’s output location is determined in the same pattern as regular actions + defined via rules ```python -targets = ctx.cquery(‘deps(“//foo:rule”)’) +targets = ctx.cquery(‘deps("//foo:rule")’) for t in targets: action_ctx = ctx.analysis(t).actions # the action context here is tied to the configured target `t` # actions registered by bxl will be attached with bxl prefix key - action_ctx.registry.write(some_output, “foo”) + action_ctx.registry.write(some_output, "foo") ``` @@ -212,41 +220,41 @@ BXL can also interact with the existing actions on an action via the action_ctx, such as iterating through it, analyzing its outputs, or requesting it to be ran. ```python -targets = deps(“foo:rule”) +targets = deps("foo:rule") for t in targets: action_ctx = ctx.analysis(t).actions for action in action_ctx.iter(): - if “foo/path” in action.output: + if "foo/path" in action.output: ctx.build(action) ``` ### What is cached? -All computations requested by a bxl function will be treated as inputs. -So if a bxl function calls uquery, then uses the result to do a cquery, and then a build, -if buck2 detects that any of the recorded calls to uquery, cquery, and build changes, -the entire bxl will be reran, with no early cutoff. -The computations itself will still be cached via DICE, -so no major performance issues are expected. -However, in the event that a bxl function is computationally heavy, -the recommendation would be to move that to an action, -or split up the bxl and use inter-bxl caching described below. +All computations requested by a bxl function will be treated as inputs. So if a +bxl function calls uquery, then uses the result to do a cquery, and then a +build, if buck2 detects that any of the recorded calls to uquery, cquery, and +build changes, the entire bxl will be reran, with no early cutoff. The +computations itself will still be cached via DICE, so no major performance +issues are expected. However, in the event that a bxl function is +computationally heavy, the recommendation would be to move that to an action, or +split up the bxl and use inter-bxl caching described below. ### Inter-bxl caching? -Different bxl can be cacheable between each other if structured as “outputs”/artifacts. -This is essentially the same behaviour as a bxl requesting `ctx.build`, which is cached. -Since we have those as hashes on RE, we can track properly and not require -storing the values in dice. +Different bxl can be cacheable between each other if structured as +"outputs"/artifacts. This is essentially the same behaviour as a bxl requesting +`ctx.build`, which is cached. Since we have those as hashes on RE, we can track +properly and not require storing the values in dice. i.e. + ```python # caching_sample.bxl func1 = bxl_main( impl = my_func1, args = { - “arg1”: arg.list(arg.str()), + "arg1": arg.list(arg.str()), } ) @@ -262,7 +270,7 @@ func2 = bxl_main( ) my_func2(ctx): - artifact = ctx.bxl(“:func1”) + artifact = ctx.bxl(":func1") # now read artifact value # everything below will only be reran if the artifact content changes … diff --git a/docs/rfcs/cfg-modifiers/modifiers.pdf b/docs/rfcs/cfg-modifiers/modifiers.pdf new file mode 100644 index 0000000000000..2f737ce6a1f50 Binary files /dev/null and b/docs/rfcs/cfg-modifiers/modifiers.pdf differ diff --git a/docs/rfcs/configured-alias.md b/docs/rfcs/configured-alias.md index cd7b457a83e17..dd5f35179ba00 100644 --- a/docs/rfcs/configured-alias.md +++ b/docs/rfcs/configured-alias.md @@ -21,25 +21,27 @@ configured_alias( ) ``` -When this rule is built, it ignores "current" target configuration, -and builds the "actual" target with the configuration specified as "platform" argument. +When this rule is built, it ignores "current" target configuration, and builds +the "actual" target with the configuration specified as "platform" argument. ## How to implement it in buck v2? ### New rule attribute type: `configured_dep` Currently, we have several dependency attributes: -* `attrs.dep` -* `attrs.exec_dep` -* `attrs.transition_dep` -* `attrs.split_transition_dep` + +- `attrs.dep` +- `attrs.exec_dep` +- `attrs.transition_dep` +- `attrs.split_transition_dep` This RFC proposes adding another attribute: -* `attrs.configured_dep` +- `attrs.configured_dep` -`configured_dep` is an attribute which accepts a pair of strings: target and configuration. -During analysis, configured attr deps are resolved to providers resolved using given configuration. +`configured_dep` is an attribute which accepts a pair of strings: target and +configuration. During analysis, configured attr deps are resolved to providers +resolved using given configuration. ### `configured_alias_impl` user defined rule @@ -69,8 +71,8 @@ def configured_alias(name, actual, platform): ### No `configured_alias` -Each specific case where `configured_alias` is used, it can be done with defining -custom transition, and using custom transition rule. +Each specific case where `configured_alias` is used, it can be done with +defining custom transition, and using custom transition rule. But having `configured_alias` is a convenient stopgap to unblock people. @@ -81,8 +83,9 @@ Instead of passing `confiured_target_label(x, y)` pass `x + "@" + y`. ### Accept `configured_target_label` in `dep` attribute `dep` attribute could support all of: -* regular target label as string -* configured target label (as either `configured_target_label` or `x@y` -I don't know practical applications for this magic, -and unless there are uses for it, better keep API simple and explicit. +- regular target label as string +- configured target label (as either `configured_target_label` or `x@y` + +I don't know practical applications for this magic, and unless there are uses +for it, better keep API simple and explicit. diff --git a/docs/rfcs/drafts/bxl-actions.md b/docs/rfcs/drafts/bxl-actions.md index b71cb06fe6301..ead5acdadad66 100644 --- a/docs/rfcs/drafts/bxl-actions.md +++ b/docs/rfcs/drafts/bxl-actions.md @@ -1,36 +1,36 @@ # Bxl Actions and Build API -Bxl allows integrators to write Starlark snippets that introspect the buck2 graph, -and perform various operations on them within Starlark to accomplish complex -operations, as previously proposed in [bxl RFC](../bxl.md)) +Bxl allows integrators to write Starlark snippets that introspect the buck2 +graph, and perform various operations on them within Starlark to accomplish +complex operations, as previously proposed in [bxl RFC](../bxl.md)) - -This document is intended at discussing the aspects of build and actions declaration -of a bxl function in more details, and proposed changes to deferred framework to -support bxl actions. +This document is intended at discussing the aspects of build and actions +declaration of a bxl function in more details, and proposed changes to deferred +framework to support bxl actions. ## Actions API -The actions API should be the same as rules' actions API. That is, it has the same -`ctx.actions` that allows registering of artifacts, creating actions, dynamic -actions via the same api. +The actions API should be the same as rules' actions API. That is, it has the +same `ctx.actions` that allows registering of artifacts, creating actions, +dynamic actions via the same api. ## Creating and Building the Actions -Bxl allows users to build targets and actions. However, when creating actions, they -are not bound/buildable until the artifact/action factories are finalized. -As such, we will introduce the limitation that bxl cannot build artifacts that they -themselves declared within the bxl. Instead, they will return a set of artifacts -to expose to users, which buck2 will automatically build after finalizing the -action factory. -For dynamic-ness, bxl users will use the standard dynamic output api. -There is an issue that during the dynamic output api's lambda, bxl functions will not -be able to access the regular bxl functions for queries, etc. However, this is likely -not important as most use cases should reasonably query bxl data before the dynamic -outputs, and have limited power in dynamic-ness. We can also always replace the -ctx of the dynamic to be the bxl context in the future, as we see fit. +Bxl allows users to build targets and actions. However, when creating actions, +they are not bound/buildable until the artifact/action factories are finalized. +As such, we will introduce the limitation that bxl cannot build artifacts that +they themselves declared within the bxl. Instead, they will return a set of +artifacts to expose to users, which buck2 will automatically build after +finalizing the action factory. For dynamic-ness, bxl users will use the standard +dynamic output api. There is an issue that during the dynamic output api's +lambda, bxl functions will not be able to access the regular bxl functions for +queries, etc. However, this is likely not important as most use cases should +reasonably query bxl data before the dynamic outputs, and have limited power in +dynamic-ness. We can also always replace the ctx of the dynamic to be the bxl +context in the future, as we see fit. Sample: + ```python def my_bxl(ctx): actions_factory = ctx.bxl_actions.factory() @@ -43,30 +43,29 @@ def my_bxl(ctx): return [artifact] # exposes the declared artifact to users ``` - ## Internal Representation (Deferred Framework) -The existing actions framework attaches all actions to a deferred, which is based -off a `ConfiguredLabel`, which also corresponds to the output path prefix. -bxl actions should also have a unique output path prefix, and follow the same system -of having a base deferred key to reuse the action implementation. - -We should extend the `BaseKey` of a `DeferredKey` to support beyond a `ConfiguredLabel`, -so that we can use a `BxlFunctionLabel` in its place. -This would allow `owner` of these actions to point to the correct creator. The output -path would be determined by using the `BxlFunctionLabel` as prefix similar to a label. -While this means that not all outputs are associated with an actual rule, this is -arguably more correct as bxl that creates outputs that doesn't fit the target graph -structure (i.e android project generation follows directory structure rather than the -packages defined by targets) to not have to conform the attaching their actions to -existing rules. bxl functions can examine multiple rules and create a single action, -attached only to their function label. - -The ActionRegistry will be attached to the evaluation result of `bxl`. Since we do not -allow bxl to explicitly request build of the actions itself declares, we can wait until -the end of the bxl function to finalize the actions. Then, the action lookup can simply -refer to the result of the `bxl`. - -With the above changes, the rest of the actions framework does not need changed to support -the proposed API. -DICE caching will work as today. +The existing actions framework attaches all actions to a deferred, which is +based off a `ConfiguredLabel`, which also corresponds to the output path prefix. +bxl actions should also have a unique output path prefix, and follow the same +system of having a base deferred key to reuse the action implementation. + +We should extend the `BaseKey` of a `DeferredKey` to support beyond a +`ConfiguredLabel`, so that we can use a `BxlFunctionLabel` in its place. This +would allow `owner` of these actions to point to the correct creator. The output +path would be determined by using the `BxlFunctionLabel` as prefix similar to a +label. While this means that not all outputs are associated with an actual rule, +this is arguably more correct as bxl that creates outputs that doesn't fit the +target graph structure (i.e android project generation follows directory +structure rather than the packages defined by targets) to not have to conform +the attaching their actions to existing rules. bxl functions can examine +multiple rules and create a single action, attached only to their function +label. + +The ActionRegistry will be attached to the evaluation result of `bxl`. Since we +do not allow bxl to explicitly request build of the actions itself declares, we +can wait until the end of the bxl function to finalize the actions. Then, the +action lookup can simply refer to the result of the `bxl`. + +With the above changes, the rest of the actions framework does not need changed +to support the proposed API. DICE caching will work as today. diff --git a/docs/rfcs/drafts/cfg-modifiers/api.md b/docs/rfcs/drafts/cfg-modifiers/api.md deleted file mode 100644 index d0ccc882aa558..0000000000000 --- a/docs/rfcs/drafts/cfg-modifiers/api.md +++ /dev/null @@ -1,454 +0,0 @@ -# [RFC] Configuration Modifiers - -## Why do we need new configuration setup? - -A target usually needs to be built in multiple build settings. -For example, there may be different OS (ex. linux, mac, windows), -architectures (ex. x86, arm), and sanitizers -(ex. asan, tsan, ubsan) to use for a single target. Buck has 2 main ways of supporting customizations today: -1. Buckconfigs specified through `--config` or `-c` flags. They are global flags and are often aggregated in modefiles (`@` on the command line). -2. Target platforms specified through `default_target_platform` attribute or `--target-platforms` flag), which become a target's "configuration". `--target-platforms` flags are also commonly specified via modefiles. - -These methods are problematic for the following reasons. - -1. *We have too many modefiles*. A project that needs customizations often ends -up adding its own set of modefiles, causing a continued rise in number of -custom modefiles in the repo. Internally, the number of modefiles in our -monorepo is currently on the order of **10,000s**. - -1. *Changing buckconfigs invalidates Buck's state*. Changing buckconfigs or -modefiles of buckconfigs invalidates global state, which adds non-trivial Buck -overhead on every incremental build that changes state. This does not affect -target platforms. - -1. *Different modefiles of buckconfigs cannot be used in same build*. -Users that need to run multi-configuration builds today often work around this -by writing scripts that wraps multiple buck build invocations of different -modes. This is slow because Buck state keeps getting repeatedly invalidated. -There is also no way to build a target in different modes (ex. dev and opt) at -the same time, so users that need to do this always have to do this -sequentially. This does not affect target platforms. - -1. *Target platform generation is exponential in number of build settings*. -Suppose I want to customize targets based on 3 OSes, 2 architectures, and 3 -compilers. With target platforms, I need to first generate all 18 permutations -of these settings as platform targets before using them. This is not scalable. - -1. *Target platform does not compose well on command line*. Suppose I want to -use ASAN on top of some existing platform. It's not possible to say specify -ASAN on top of an existing platform on the command line. Instead, I must create -a new platform target with ASAN added to the existing platform before I can use -it. - -1. *Poor user Experience*. When every project needs its own set of modes, it's -onerous for users to track what modes are needed to build what targets. Users -often don't realize when they are using the wrong or unnecessary command line -flags. - -1. *Poor tooling integration*. Similar to user, it's just onerous for tooling -to keep track of what modes are needed to build a target with. Buckconfigs are -also bad for performance for tools like language servers because it's impossible -to request the builds of two modes in parallel when two targets needs different -modes. - -1. *Antithetical to Buck's principles*. Buck's main strength is the ability to abstract away builds of different languages and libraries under one common syntax for the user. The need for project-custom flags goes against this principle. - -The Modifier API introduces a unified way to specify build settings on a -project, target, and command line level. Like target platforms, it constructs Buck configurations so it supports multi-configuration builds. It -avoids modefile proliferation by allowing users to easily -set project-specific build settings like compiler and toolchain versions in -the repo rather than on the command line. It avoids scalability problems of -platform generation by being composition-first. The goals of this project is to: - -1. *Make `buck build` work on any platform without the use of special flags*. -Today, building a mac target on mac often requires a mac mode, -and likewise for windows. Instead, `buck build` should always work -out of the box on any platform so that there's no need to specify mac mode on -macs or windows mode on windows. -1. *Define a small constrained set of common modifiers that can be used to build any target -in the repo*. This will include common options like mode (ex. dev, opt, release), OS (ex. linux, mac, iphoneos), and architecture (ex. x86, arm). -1. *Unblock cross-building for the majority of targets*. `host_info()` is -a hack to obtain information about the host machine that is the main blocker to -Buck2 cross-building (ex. building a mac or windows -target from linux) working everywhere. As an extension of "making `buck build` -work on any platform", modifiers should make it possible to kill off most use cases of `host_info` in the repo. -1. *Simplify building build tooling*. Because `buck build` works out of -the box, tools like language servers can build targets they need without using -project-specific modefiles or flags. -1. *Delete most modefiles from the repo*. -1. *Deprecate target platforms for modifiers as the sole way of configuring top-level -targets in Buck*. - -## Configuration Background - -*Feel free to skip this if you already understand Buck configurations.* - -A configuration is a collection of `constraint_value` targets -(commonly referred to as constraints). -It defines the build settings used by a target. -A constraint value is keyed by a `constraint_setting`, so there can only -be one `constraint_value` of a `constraint_setting` in a configuration. - -For example, suppose `cfg//os:_` is a constraint setting with constraint -values `cfg//os:linux`, `cfg//os:macos`, and `cfg//os:windows`. Then -a configuration may contain either `cfg//os:linux`, `cfg//os:macos`, -or `cfg//os:windows` to indicate which OS a target is built for. - -A constraint or a set of constraints can be selected on via `select()` to -customize a target's behavior. For example, the following adds a linux only -dep to a target. - -```python -deps = select({ - "cfg//os:linux": [":linux_only_dep"], - "DEFAULT": [], -}) -``` - -Before building a target on the command line (known as the top-level target), -Buck needs to know its configuration in order to resolve selects. Modifiers -are a new way to resolve a target's configuration for every top-level target. - -## API - -Every top-level target starts with an empty configuration, and Buck will apply -a list of "modifiers" to obtain a configuration. A modifier is a modification -of a constraint from the existing configuration to obtain a new - configuration. - -The simplest modifier is a constraint value, which inserts -that value into the configuration for its respective constraint setting, -replacing any existing constraint value for that setting. -For example, specifying `cfg//os:linux` as a modifier will -insert `cfg//os:linux` into the configuration, -overriding any existing constraint value for the -`cfg//os:_` constraint setting. - -Another type of modifier is a `modifier_select()` of a constraint value. -This can change the constraint value inserted based on the existing -configuration. For example, a modifier like -```python -modifier_select({ - "cfg//os:windows": "cfg//compiler:msvc", - "DEFAULT": "cfg//compiler:clang", -}) -``` -will insert msvc constraint into the configuration if OS is windows or clang -constraint otherwise. -A `modifier_select()` behaves similarly to Buck's `select()` but can only -be used in a modifier. -A `modifier_select()` can only be used to modify a single constraint setting, -so the following example is not valid. -```python -# This fails because a modifier cannot modify both compiler and OS. -modifier_select({ - "cfg//os:windows": "cfg//compiler:msvc", - "DEFAULT": "cfg//os:linux", -}) -``` - -A modifier can be specified in a PACKAGE file, on a target, or on the command -line. This provides the flexibility needed to customize targets on a project, -target, or cli level. - -### PACKAGE Modifier - -In a PACKAGE file, modifiers can be specified using the `cfg_modifiers` -function and would apply to all targets covered under that PACKAGE. For -example, modifiers specified in `repo/PACKAGE` would apply to any target under -`repo//...`. Modifiers specified in `repo/foo/PACKAGE` would apply to any target under `repo//foo/...` (For resolution order, see "Modifier -Resolution" section). - -The `cfg_modifiers` function takes as input -a dictionary of constraint setting to modifier for that setting. -For example, the following is an example that sets modifiers for OS and compiler settings in the repo's top PACKAGE file for all targets in repo. - -```python -# repo/PACKAGE - -cfg_modifiers({ - "cfg//os:_": "cfg//:linux", - "cfg//compiler:_": modifier_select({ - "DEFAULT": "cfg//compiler:clang", - "cfg//os:windows": "cfg//compiler:msvc", - }) -}) -``` - -To make constraints easier to type, you can specify aliases for modifier targets -via Buck's target aliases. - -For example, suppose the following aliases exist in `repo/.buckconfig`. - -```ini -[alias] - os = cfg//os:_ - linux = cfg//os:linux - macos = cfg//os:macos - windows = cfg//os:windows - compiler = cfg//compiler:_ - clang = cfg//compiler:clang - msvc = cfg//compiler:msvc -``` -Then the same PACKAGE modifiers can be specified as follows. - -```python -# repo/PACKAGE - -cfg_modifiers({ - "os": "linux", - "compiler": modifier_select({ - "DEFAULT": "clang", - "windows": "msvc", - }) -}) -``` - -### Target Modifier - -On a target, modifiers can be specified on the `cfg_modifiers` attribute. -For example, the following specifies modifiers for `repo//foo:bar`. -```python -# repo/foo/BUCK - -python_binary( - name = "bar", - # ... - cfg_modifiers = { - "cfg//os:_": "cfg//os:windows", - # Target modifiers can also use aliases - "compiler": "clang", - }, -) -``` - -### CLI Modifier - -On the command line, modifiers are specified as -`buck2 build ?`. - -For example, -`buck2 build repo//foo:bar?cfg//sanitizer:asan` applies asan -modifier on the command line. -`buck2 build repo//foo:bar?cfg//os:linux,cfg//sanitizer:asan` -will apply linux and asan modifiers. -Aliases can also be used on command line, so -`buck2 build repo//foo:bar?asan` is valid. - -Command line modifiers cannot be selects, although this may -be revisited if necessary. - -Modifiers can be specified for any target pattern, so -`buck2 build repo//foo/...?asan` and -`buck2 build repo//foo:?asan` are also valid. -When specifying a subtarget and modifier with `?`, -subtarget should go before modifier, -ex. `buck2 build repo//foo:bar[comp-db]?asan`. - -To specify modifiers to a list of target patterns on the command line, -you can use the `--modifier` or `-m` flag. -For example, `buck2 build repo//foo:bar repo//foo:baz -m release` -is equivalent to `buck2 build repo//foo:bar?release //foo:baz?release`. - -`--modifier` flag can be specified multiple times to add multiple modifier, so -`buck2 build --modifier=linux --modifier=release repo//foo:bar` -is equivalent to `buck2 build repo//foo:bar?linux,release`. - -It is prohibited to specify both `--modifier` flag and `?` in target pattern. -This restriction can be lifted in the future if there is a need. - -When two modifiers of the same constraint setting are specified, then the later one overrides the earlier one. For example, -`buck2 build repo//foo:bar?dev,release` is equivalent to -`buck2 build repo//foo:bar?release`. - -On command line, a `config_setting` target can be specified as a collection of -modifiers after `--modifier` or `?`. This will be equivalent to specifying each -constraint inside the `config_setting` as a separate modifier. - -### Modifier Resolution - -Modifiers are resolved in order of constraint setting, and for each constraint -setting, modifiers for that setting are resolved in order of PACKAGE, target, -and command line, with modifier from parent PACKAGE applied before child -PACKAGE. The end of this section will describe how Buck determines the order -of constraint setting to resolve. - -Suppose modifiers for `repo//foo:bar` are specified as follows. - -```python -# repo/PACKAGE - -cfg_modifiers({ - "cfg//os:_": "cfg//:linux", - "cfg//compiler:_": modifier_select({ - "DEFAULT": "cfg//compiler:clang", - "cfg//os:windows": "cfg//compiler:msvc", - }) -}) - -# repo/foo/PACKAGE - -cfg_modifiers({ - "cfg//os:_": "cfg//os:macos", -}) - -# repo/foo/BUCK - -python_binary( - name = "bar", - # ... - cfg_modifiers = { - "cfg//os:_": "cfg//os:windows", - }, -) -``` - -At the beginning, the configuration will be empty. -When resolving modifiers, Buck will first resolve all modifiers for -`cfg//os:_` before resolving all modifiers for `cfg//compiler:_`. - -For OS, the linux modifier from `repo/PACKAGE` will apply first, followed by -macos modifier from `repo/foo/PACKAGE` and windows modifier from -`repo//foo:bar`'s target modifiers, so `repo//foo:bar` will end up with -`cfg//os:windows` for `cfg//os:_` in its configuration. Next, to resolve -compiler modifier, the `modifier_select` from `repo/PACKAGE` will resolve to -`cfg//compiler:msvc` since existing configuration is windows and apply that as -the modifier. The target configuration for `repo//foo:bar` ends up with windows -and msvc. - -However, suppose user invokes `repo//foo:bar?linux` on the command line. When -resolving OS modifier, the linux modifier from cli will override any existing -OS constraint and insert linux into the configuraiton. Then, when resolving the -compiler modifier, the `modifier_select` will resolve to `cfg//compiler:clang`, -giving clang and linux as the final configuration. - -Because command line modifiers will apply at the end, they -are also known as required modifiers. Any modifier specified on the command line -will always override any modifier for the same constraint setting specified in -the repo. - -The ordering of constraint setting to resolve modifiers is determined based on -dependency order of constraints specified in the keys of the `modifier_select` -specified. Because some modifiers select on other constraints, modifiers for -those constraints must be resolved first. In the previous example, because -compiler modifier selects on OS constraints, Buck will resolve all -OS modifiers before resolving compiler modifiers. -`modifier_select` that ends up with a cycle of selected constraints -(ex. compiler modifier selects on sanitizer but sanitizer modifier also selects -on compiler) will be an error. - -### Modifier-Specific Selects - -Modifiers have 3 types of select operators that allow for powerful compositions. -Each operation is a function that accepts a dictionary where the keys are -conditionals and values are modifiers. - -1. `modifier_select`. Introduced in the previous sections, this is capable of -inserting constraints based on constraints in the existing configuration. - -2. `rule_select`. This is capable of selecting based on the rule name (also -known as rule type). The keys are regex patterns to match against the rule -name or "DEFAULT". Partial matches are allowed. - -3. `host_select`. This selects based on the host configuration, -whereas `modifier_select` selects based on the target configuration. This -host configuration is constructed when resolving modifiers. `host_select` is -important to making `buck build` work anywhere on any platform. For example, -when the OS to configure is not specified, it's best to assume that the user -wants to target the same OS as the host machine. - -An example using `rule_select` and `host_select` is as follows. - -```python -# root/PACKAGE - -# We want OS to target the host machine by default. -# Ex. build linux on linux machine, build windows on windows machine, -# and build mac on mac machine. -# However, if the rule is apple or android specific, then we should -# always be building for apple/android as OS, no matter the host -# configuration. - -cfg_modifiers({ - "cfg//os:_": rule_select({ - "apple_.*": "cfg//os:iphone", - "android_.*": "cfg//os:android", - "DEFAULT": host_select({ - "cfg//os:linux": "cfg//os:linux", - "cfg//os:macos": "cfg//os:macos", - "cfg//os:windows": "cfg//os:windows", - }) - }) -}) -``` - -On select resolution, Buck's `select` currently requires unambiguous -keys in the dictionary and resolves to the key with the most refined match. -The select operators used in modifiers will diverge from this and implement -a "first-match" behavior, where select resolves to the first condition that evalutes to true in the dictionary. - -### Legacy Target platform - -Target platform (`--target-platforms` flag or `default_target_platform` -attribute) will be a deprecated way of specifying configuration and will be -killed once all use cases migrate to modifiers. To maintain backwards compatibility -with target platforms during the migration process, modifier resolution -will take into account the target platform specified. This allows for an easy -migration where modifiers can be introduced one at a time without reaching -feature parity of target platform. - -If a target's modifiers resolve to an empty configuration, then Buck will reuse -the target platform as the configuration. If modifiers resolve to a non-empty -configuration, then Buck look for any constraint in the target platform not -covered by a constraint setting from the modifier configuration and add those -to the configuration. -For example, suppose in the previous example, the target platform for `repo// -foo:bar` includes `cfg//sanitizer:asan`, then this constraint will be inserted -into the configuration since no modifier covered the sanitizer constraint -setting. - -## Debugging modifiers - -Because many layers of modifiers can be applied before obtaining -a final configuration, it is important that modifier resolution is easy -to debug and understand. Here are some ways that modifier resolution -can be interpreted. - -1. *`buck2 audit modifiers` command*. There will be a `buck2 audit modifiers` -command to show all PACKAGE, target, and required modifiers for a target. It -can also show configuration changes from modifier resolution process if -requested by the user. - -2. *Starlark print statements or debugger*. -Modifier resolution process will be implemented in Starlark in prelude. -This means that any user can use any of the existing way to debug starlark -(ex. print statements, Starlark debugger in VSCode) to debug the resolution -process. - -## How configuration modifiers differ from transitions - -Modifiers are largely inspired by configuration transitions. -The difference between modifier and transition is that a transition can change -the configuration of any target in the graph, but a modifier can only change -the configuration of a top-level target. In other words, if you have target A -that depends on target B and you request a build of A, then A's target -configuration would be resolved via modifiers and propagated down to B, but dep -B would not do its own modifier resolution. When a top-level target goes through -a per-rule transition, that transition is applied after modifiers are resolved. - -Below are some examples that show when to use modifier and when to use -transition. - -1. *Python version* should be modeled as a transition and not modifier. - Suppose we have `python_binary` A nested as a resource of - another `python_binary` B. A should not inherit the python version - from B, so a transition is needed to change A's python version - when depended on by B. -2. *Library target* should use modifiers and not transitions. - A C++ library target should always inherit the configuration - of its parent C++ binary when it is used as a dep, - but a top-level C++ library target can still have its configuration - changed via modifiers when requested from command line. - -In the future, we may add support for modifier transition, which can -transition via modifiers, but that is out of the scope of this RFC. diff --git a/docs/rfcs/drafts/configuration-at-syntax.md b/docs/rfcs/drafts/configuration-at-syntax.md index 532ed337a5fa8..5d44dd79e4dc5 100644 --- a/docs/rfcs/drafts/configuration-at-syntax.md +++ b/docs/rfcs/drafts/configuration-at-syntax.md @@ -16,8 +16,7 @@ buck2 build //foo:bar --target-platforms=//platform:linux-x86_64 ## Why -Might be convenient if we define global -(or per-target, as proposed in +Might be convenient if we define global (or per-target, as proposed in [target configuration discovery RFC](https://www.internalfb.com/diff/D35135886)) alias. For example, if there's an alias @@ -32,8 +31,8 @@ buck2 build //foo:bar@release ``` Additionally, if we have -[configuration expression RFC](https://www.internalfb.com/diff/D35135496) implemented, -we can do something like: +[configuration expression RFC](https://www.internalfb.com/diff/D35135496) +implemented, we can do something like: ```shell buck2 build //foo:bar@release+gcc @@ -42,8 +41,10 @@ buck2 build //foo:bar@release+gcc ## Possible future extensions For now, at-syntax only applies to command line arguments -* of `build`/`targets`/`run`/`test` commands -* probably `cquery` query -It would be reasonable to expect that this syntax should be allowed anywhere we need -a target (e.g. in `deps` attribute), but this is out of scope of this proposal. +- of `build`/`targets`/`run`/`test` commands +- probably `cquery` query + +It would be reasonable to expect that this syntax should be allowed anywhere we +need a target (e.g. in `deps` attribute), but this is out of scope of this +proposal. diff --git a/docs/rfcs/drafts/digest-kinds.md b/docs/rfcs/drafts/digest-kinds.md index 33337f174994e..73e8e73cbea0d 100644 --- a/docs/rfcs/drafts/digest-kinds.md +++ b/docs/rfcs/drafts/digest-kinds.md @@ -2,41 +2,57 @@ ## Use cases: -- Buck2 needs to support more than just SHA1 for open-sourcing, since publicly available RE providers use SHA256. -- Internally, we want to migrate to (potentially keyed) Blake3, and there will be a transition period where we need to support both Blake3 and SHA1. - +- Buck2 needs to support more than just SHA1 for open-sourcing, since publicly + available RE providers use SHA256. +- Internally, we want to migrate to (potentially keyed) Blake3, and there will + be a transition period where we need to support both Blake3 and SHA1. ## Proposed plan -Make all the ways in which Buck2 *ingests* digests either configurable or explicit about the type of digest they expect. - -Internally, we may keep track of digest types for debugging purposes, but we will never compute more than one digest. It follows that we won't expose configuration for the digests we *output* (namely: to use on RE): if we only have one digest for each blob, making it configurable has no utility since you never have a choice about the hash to use. +Make all the ways in which Buck2 _ingests_ digests either configurable or +explicit about the type of digest they expect. +Internally, we may keep track of digest types for debugging purposes, but we +will never compute more than one digest. It follows that we won't expose +configuration for the digests we _output_ (namely: to use on RE): if we only +have one digest for each blob, making it configurable has no utility since you +never have a choice about the hash to use. ## Implementation ### Hashes received from RE -For interactions with RE, we'll expose two configurations (this can be on the CommandExecutorConfig): +For interactions with RE, we'll expose two configurations (this can be on the +CommandExecutorConfig): -- Preferred hash to use when Buck2 is doing the hashing (e.g. hashing directories). +- Preferred hash to use when Buck2 is doing the hashing (e.g. hashing + directories). - Accepted hashes. -We'll use the format of the digests we receive from RE (in particular their size) to infer what algorithm they used (remember: the RE API provides no way of knowing the format of a digest, it's just a string). - +We'll use the format of the digests we receive from RE (in particular their +size) to infer what algorithm they used (remember: the RE API provides no way of +knowing the format of a digest, it's just a string). ### Hashes of files -We'll expose the hash to use via a buckconfig. Our things-that-produce-hashes-of-files should either use the config to choose how they hash, or fail if they cannot provide the right hash format (e.g. that'll be true of Eden I/O). - +We'll expose the hash to use via a buckconfig. Our +things-that-produce-hashes-of-files should either use the config to choose how +they hash, or fail if they cannot provide the right hash format (e.g. that'll be +true of Eden I/O). ### Hashes of directories -This one gets a little tricky. Our directories currently have an implementation of fingerprinting that receives only the directory as input, so some refactoring is in order. +This one gets a little tricky. Our directories currently have an implementation +of fingerprinting that receives only the directory as input, so some refactoring +is in order. We have two options: -- Pick the hashing algorithm based on the contents of the directory (pick one that's already used). Dealing with empty directories is a bit annoying. -- Refactor the directory implementation and have directories parameterized over their fingerprints, not their hasher. +- Pick the hashing algorithm based on the contents of the directory (pick one + that's already used). Dealing with empty directories is a bit annoying. +- Refactor the directory implementation and have directories parameterized over + their fingerprints, not their hasher. -The first one is easier but has the downside of not working with keyed Blake3 (because you don't have a way to bring in the key), so I'm aiming for the second implementation for now. +The first one is easier but has the downside of not working with keyed Blake3 +(because you don't have a way to bring in the key), so I'm aiming for the second +implementation for now. diff --git a/docs/rfcs/drafts/plugin-deps.md b/docs/rfcs/drafts/plugin-deps.md index dc6cebfb4cdf3..3ef52e2c2535e 100644 --- a/docs/rfcs/drafts/plugin-deps.md +++ b/docs/rfcs/drafts/plugin-deps.md @@ -2,76 +2,82 @@ ### Background on Rust proc macros -Rust proc macros are compiler plugins. They are a special kind of crate that is compiled to a dylib, -which is then loaded by the compiler when another crate depends on the proc macro. Notably, like all -Rust crates, proc macros may also be re-exported. This means that if there is a dependency chain -like `bin -> lib -> proc_macro`, the proc macro must be made available when compiling the binary, -even though it does not appear directly in the dependencies. +Rust proc macros are compiler plugins. They are a special kind of crate that is +compiled to a dylib, which is then loaded by the compiler when another crate +depends on the proc macro. Notably, like all Rust crates, proc macros may also +be re-exported. This means that if there is a dependency chain like +`bin -> lib -> proc_macro`, the proc macro must be made available when compiling +the binary, even though it does not appear directly in the dependencies. Proc macros have posed a challenge to buck2, for two reasons: - 1. Rust users generally expect to not have to distinguish between proc macros and normal crates - when specifying their dependencies. This means it is not easily possible to make the `lib -> - proc_macro` edge an `exec_dep`. - 2. `bin` and `lib` might end up with different exec platforms. This means that even if `proc_macro` - were to be correctly configured as an exec dep of `lib`, that configuration might be wrong for - `bin`. + +1. Rust users generally expect to not have to distinguish between proc macros + and normal crates when specifying their dependencies. This means it is not + easily possible to make the `lib -> proc_macro` edge an `exec_dep`. +2. `bin` and `lib` might end up with different exec platforms. This means that + even if `proc_macro` were to be correctly configured as an exec dep of + `lib`, that configuration might be wrong for `bin`. FIXME: Other use cases for this feature ### Plugins deps -This RFC proposes introducing a concept of "plugin deps" to solve this problem. Plugin deps are deps -that can be propagated up the build graph at configuration time, instead of at analysis time. Here's -what this looks like: +This RFC proposes introducing a concept of "plugin deps" to solve this problem. +Plugin deps are deps that can be propagated up the build graph at configuration +time, instead of at analysis time. Here's what this looks like: -First, plugin deps come in "kinds." Plugin kinds can be created like `MyKind = plugins.kind()`. These -act as identifiers that can be used to divide all the possible plugin deps up however users need to. +First, plugin deps come in "kinds." Plugin kinds can be created like +`MyKind = plugins.kind()`. These act as identifiers that can be used to divide +all the possible plugin deps up however users need to. -Each configured target has plugin lists: There is one list for each plugin kind. The elements of -these list are an *unconfigured* target, together with a `should_propagate` bool. The same -unconfigured target cannot appear more than once. In other words, this is a `HashMap>`. We need to describe two things: How to *use* these list, and how to -*create* them. +Each configured target has plugin lists: There is one list for each plugin kind. +The elements of these list are an _unconfigured_ target, together with a +`should_propagate` bool. The same unconfigured target cannot appear more than +once. In other words, this is a `HashMap>`. We +need to describe two things: How to _use_ these list, and how to _create_ them. ### Using a target's plugin lists -Using plugin lists is very simple: The rule sets `uses_plugins = [MyKind]` when declared. Setting -this make the elements of the plugin list for the given kind appear as exec deps on the configured -nodes for this rule. This also means that the plugins participate in exec dep resolution like all -other exec deps. +Using plugin lists is very simple: The rule sets `uses_plugins = [MyKind]` when +declared. Setting this make the elements of the plugin list for the given kind +appear as exec deps on the configured nodes for this rule. This also means that +the plugins participate in exec dep resolution like all other exec deps. -Analysis will then be able to access a list of the providers for each of the plugins via -`ctx.plugins[MyKind]`. +Analysis will then be able to access a list of the providers for each of the +plugins via `ctx.plugins[MyKind]`. -The `should_propagate` bool that is associated with each element of the list is ignored at this -stage. +The `should_propagate` bool that is associated with each element of the list is +ignored at this stage. ### Creating a target's plugin lists Plugin lists are created by accumulating from two sources: -The first of these is direct plugin deps. They are defined via a new `attrs.plugin_dep(kind = -"foo")`. This attribute (like other deps), is set to a label when the target is declared. It then -resolves as follows: +The first of these is direct plugin deps. They are defined via a new +`attrs.plugin_dep(kind = "foo")`. This attribute (like other deps), is set to a +label when the target is declared. It then resolves as follows: - * In the unconfigured graph: To the appropriate unconfigured target - * In the configured graph: To the label of the unconfigured target. In other words, this will still - be displayed in `buck2 cquery -A`, but will not appear in the deps. - * During analysis: Also to the unconfigured target label. +- In the unconfigured graph: To the appropriate unconfigured target +- In the configured graph: To the label of the unconfigured target. In other + words, this will still be displayed in `buck2 cquery -A`, but will not appear + in the deps. +- During analysis: Also to the unconfigured target label. -The target that appears in the `plugin_dep` is added to the `MyKind` plugin list with -`should_propagate` set. +The target that appears in the `plugin_dep` is added to the `MyKind` plugin list +with `should_propagate` set. -The second way to add to the plugin list is by inheriting from regular deps. This works as follows: -Elements of the plugin lists for which the `should_propagate` value is true are made available to -the immediate rdeps of a configured target. The rdep can use them by setting `pulls_plugins = -[MyKind]` in the appropriate `attrs.dep()` invocation. This will make the targets appear in the -plugin list for the rdep with `should_propagate` unset. Alternatively, the rdep can set -`pulls_and_pushes_plugins = [MyKind]` to add the targets to the plugin lists with `should_propagate` -set to true. This enables transitive propagation further up the configured graph. +The second way to add to the plugin list is by inheriting from regular deps. +This works as follows: Elements of the plugin lists for which the +`should_propagate` value is true are made available to the immediate rdeps of a +configured target. The rdep can use them by setting `pulls_plugins = [MyKind]` +in the appropriate `attrs.dep()` invocation. This will make the targets appear +in the plugin list for the rdep with `should_propagate` unset. Alternatively, +the rdep can set `pulls_and_pushes_plugins = [MyKind]` to add the targets to the +plugin lists with `should_propagate` set to true. This enables transitive +propagation further up the configured graph. -To decide later: Should we allow plugin rules to appear in regular/exec deps, with no special -behavior? I don't see why not. +To decide later: Should we allow plugin rules to appear in regular/exec deps, +with no special behavior? I don't see why not. ### Example: Proc macros @@ -159,28 +165,34 @@ rust_binary( ``` Analysis for `:l` will see: - 1. `deps` which contains only the `RustProcMacroMarker("p")` - 2. `doc_deps` which contains only the `RustProcMacroMarker("p2")` - 3. `ctx.plugins[RustProcMacro]` which contains the providers of `:p1_REAL` and `:p2_REAL`, - correctly configured for the execution platform of `:l`. + +1. `deps` which contains only the `RustProcMacroMarker("p")` +2. `doc_deps` which contains only the `RustProcMacroMarker("p2")` +3. `ctx.plugins[RustProcMacro]` which contains the providers of `:p1_REAL` and + `:p2_REAL`, correctly configured for the execution platform of `:l`. Analysis for `:b` will see: - 1. `deps` which contain the providers of `l` - 2. `ctx.plugins[RustProcMacro]` which contain the providers of `:p1_REAL`, also correctly - configured for its own execution platform (which may be different from `:l`'s). - Note that because `rust_library` does not re-push doc deps, `:b` will not see `:p2_REAL`. +1. `deps` which contain the providers of `l` +2. `ctx.plugins[RustProcMacro]` which contain the providers of `:p1_REAL`, also + correctly configured for its own execution platform (which may be different + from `:l`'s). + + Note that because `rust_library` does not re-push doc deps, `:b` will not + see `:p2_REAL`. -As a result, the implementation of the `rust_library` rule should not propagate the providers of its -proc macro deps (unlike its regular deps). +As a result, the implementation of the `rust_library` rule should not propagate +the providers of its proc macro deps (unlike its regular deps). -There is one downside to this solution: `buck2 build :p` does absolutely none of the things that the -user is probably expecting. They need `buck2 build :p_REAL`. That's a bit sad. Thankfully directly -building proc macros is not that important a use case? +There is one downside to this solution: `buck2 build :p` does absolutely none of +the things that the user is probably expecting. They need `buck2 build :p_REAL`. +That's a bit sad. Thankfully directly building proc macros is not that important +a use case? #### Alias -It is already the case today that we can't use the normal `alias` rule on toolchains. A similar -situation crops up here, where aliasing a target that pushes plugins causes the plugins to "get -lost." The right solution to this is to probably allow `plugins.ALL` as a special value on -`pulls_plugins` and `pulls_and_pushes_plugins`, and then set that for the alias rule. +It is already the case today that we can't use the normal `alias` rule on +toolchains. A similar situation crops up here, where aliasing a target that +pushes plugins causes the plugins to "get lost." The right solution to this is +to probably allow `plugins.ALL` as a special value on `pulls_plugins` and +`pulls_and_pushes_plugins`, and then set that for the alias rule. diff --git a/docs/rfcs/drafts/test-info-v2.md b/docs/rfcs/drafts/test-info-v2.md index 9a34f3b21247d..25fc51f745984 100644 --- a/docs/rfcs/drafts/test-info-v2.md +++ b/docs/rfcs/drafts/test-info-v2.md @@ -1,5 +1,5 @@ # RFC: TestInfo v2 -A stub RFC for TestInfo v2 to track lessons learned about TestInfo v1. The -stack starting D36339960 contains the original code for the TestInfo and -templated test API experiment. +A stub RFC for TestInfo v2 to track lessons learned about TestInfo v1. The stack +starting D36339960 contains the original code for the TestInfo and templated +test API experiment. diff --git a/docs/rfcs/drafts/universal-cfg-naming.md b/docs/rfcs/drafts/universal-cfg-naming.md new file mode 100644 index 0000000000000..8295bf2189d5b --- /dev/null +++ b/docs/rfcs/drafts/universal-cfg-naming.md @@ -0,0 +1,61 @@ +# Universal Configuration Naming Function + +_tl;dr:_ This RFC proposes using a single naming function to generate names for +all configurations. + +## Context + +NOTE: The configuration name consists of a readable string followed by the hash +of the configuration. The readable string is technically the `PlatformInfo` +name. For sake of ease of writing, this doc uses configuration name and platform +name interchangeably to describe this concept. + +Currently, there are 3 ways to create and name a configuration. + +1. A `platform` target defines a configuration, and the platform target label + becomes the platform name. +2. A transition function defines the configuration and generates a name for the + configuration. +3. When a modifier is used, the cfg constructor function for modifiers defines + the configuration and its name. There is currently a single naming function + that generates all modifier-based configuration names. + +Modifiers are intended to replace platforms, so in the future all configuration +names will be generated. Unfortuately, most of the generated names today used +today in transitions are not very good. Problems that I've seen in practice +include: + +1. Configuration names barely contain any useful information about the + configuration. This happens a lot in transitions. For example, the android + split CPU architecture transition names the generated configurations "x86_64" + and "arm64", which tells very little about the configuration beyond the CPU + architectures it splits on. +2. Transition function incorrectly retains the old configuration name that is no + longer relevant, misleading the user about what this configuration actually + does. I've seen this happen where a configuration has py3.8 in name but the + python version constraint stored is actually py3.10. + +## Proposal + +Register a single Starlark function to define all configuration names. This +Starlark function would accept a `ConfigurationInfo` and return a string for the +name of the `ConfigurationInfo`. + +```python +# Example +def name(cfg: ConfigurationInfo) -> str: + # ... +``` + +`PlatformInfo` is no longer available in Starlark. Any place that previously +uses a `PlatformInfo` will now use `ConfigurationInfo` instead. Buck2 will +invoke this function each time it encounters a new `ConfigurationInfo` to define +its name. + +This function will attempt to provide a useful name based on the constraints in +the configuration, which mitigates the issue of short or misleading +configuration names. There are some risks that there will be high amount of code +complexity in a function if all configurations are named by one function. + +This function will most likely be registered via a `set_cfg_name` function or +something callable from root PACKAGE file or potentially prelude. diff --git a/docs/rfcs/implemented/provider-collection-at.md b/docs/rfcs/implemented/provider-collection-at.md index 34ecdf657af7b..a2cbc61fde5cc 100644 --- a/docs/rfcs/implemented/provider-collection-at.md +++ b/docs/rfcs/implemented/provider-collection-at.md @@ -1,12 +1,13 @@ # Return error in `ProviderCollection[]` on undeclared provider -Currently, `ctx.attrs.foo[UnknownInfo]` returns `None` if `foo` is -a provider collection. +Currently, `ctx.attrs.foo[UnknownInfo]` returns `None` if `foo` is a provider +collection. This RFC proposes these changes: -* `ctx.attrs.foo[UnknownInfo]` is an error -* `UnknownInfo in ctx.attrs.foo` is `False` -* `ctx.attrs.foo.get(UnknownInfo)` returns `None` + +- `ctx.attrs.foo[UnknownInfo]` is an error +- `UnknownInfo in ctx.attrs.foo` is `False` +- `ctx.attrs.foo.get(UnknownInfo)` returns `None` ## Why @@ -23,6 +24,7 @@ Object of type `NoneType` has no attribute `bar` ``` Instead, the error will be something like: + ``` provider collection does not contain `UnknownInfo`, defined providers are `FooInfo`, `BarInfo`. diff --git a/docs/rfcs/package-local-values.md b/docs/rfcs/package-local-values.md index 98bddd4b87634..f5092b4f22902 100644 --- a/docs/rfcs/package-local-values.md +++ b/docs/rfcs/package-local-values.md @@ -4,12 +4,12 @@ This RFC proposes to extend buck2 Starlark with package-local values. ## Why -DevX people want to have some per-directory configuration files, -accessible from Starlark macros. +DevX people want to have some per-directory configuration files, accessible from +Starlark macros. -For example, a project NNN may want to switch to building using LLVM 15 by default. -End users would want to have an easy instruction how to do that, -after DevX people provided instructions and infrastructure for that. +For example, a project NNN may want to switch to building using LLVM 15 by +default. End users would want to have an easy instruction how to do that, after +DevX people provided instructions and infrastructure for that. ## What we have now @@ -21,8 +21,8 @@ Currently, in fbcode, we have `get_modes` mechanism. This symbol can be accessed from macros using [implicit_package_symbol](https://fburl.com/code/u5coj9s7) function. -`get_modes` functions are package-local, but all `BUILD_MODE.bzl` -files need to be registered in global buckconfig, which is not ideal. +`get_modes` functions are package-local, but all `BUILD_MODE.bzl` files need to +be registered in global buckconfig, which is not ideal. Proposed per-package properties can replace `get_modes` mechanism. @@ -30,24 +30,22 @@ Proposed per-package properties can replace `get_modes` mechanism. ### `PACKAGE` files -Before evaluating `BUCK` file, buck2 will evaluate all `PACKAGE` files -in the same directory and all parent directories. -Absent `PACKAGE` files are treated as empty files. +Before evaluating `BUCK` file, buck2 will evaluate all `PACKAGE` files in the +same directory and all parent directories. Absent `PACKAGE` files are treated as +empty files. -All relevant `PACKAGE` files are executed sequentially -from the root directory to the current directory -(but unrelated `PACKAGE` files can be executed in parallel). -Evaluating `PACKAGE` files sequentially provides additional guarantees, -for example, attempt to override a property (unless explicitly requested) -should fail with Starlark call stack. +All relevant `PACKAGE` files are executed sequentially from the root directory +to the current directory (but unrelated `PACKAGE` files can be executed in +parallel). Evaluating `PACKAGE` files sequentially provides additional +guarantees, for example, attempt to override a property (unless explicitly +requested) should fail with Starlark call stack. Each `PACKAGE` file is evaluated at most once (like `bzl` file). -`PACKAGE` files may load arbitrary `bzl` files. -`BUCK`-specific functions called in `bzl` files (like rule functions) -are available, but calling functions from `PACKAGE` files is an error. -This way, `bzl` files are evaluated only once regardless of whether -they are loaded from `PACKAGE` or `BUCK` file. +`PACKAGE` files may load arbitrary `bzl` files. `BUCK`-specific functions called +in `bzl` files (like rule functions) are available, but calling functions from +`PACKAGE` files is an error. This way, `bzl` files are evaluated only once +regardless of whether they are loaded from `PACKAGE` or `BUCK` file. ### API @@ -63,30 +61,31 @@ def write_package_value( ): ... ``` -Name is a string which must contain exactly one dot symbol (just to enforce code style). +Name is a string which must contain exactly one dot symbol (just to enforce code +style). -Value is an arbitrary Starlark value, for example, an integer, a list of integer, -a struct or a function. +Value is an arbitrary Starlark value, for example, an integer, a list of +integer, a struct or a function. -When `overwrite` is `False` (default), attempt to overwrite per-package -value defined in parent `PACKAGE` file will fail. +When `overwrite` is `False` (default), attempt to overwrite per-package value +defined in parent `PACKAGE` file will fail. Written values are frozen when `PACKAGE` file evaluation is finished. -Note `write_package_value` symbol exists in `bzl` globals, -and it can be called from `bzl` file in context of `PACKAGE` evaluation, -but calling `write_package_file` is an error on context of `BUCK` evaluation. +Note `write_package_value` symbol exists in `bzl` globals, and it can be called +from `bzl` file in context of `PACKAGE` evaluation, but calling +`write_package_file` is an error on context of `BUCK` evaluation. Modifying `PACKAGE` file logically invalidates the `BUCK` file of this package, -and all `PACKAGE` and `BUCK` files of subpackages. -However, `BUCK` file evaluation may track which package-local values were -accessed and only invalidate `BUCK` files which were potentially affected -(similarly to how we do it with buckconfigs, with individual properties -being projection keys). +and all `PACKAGE` and `BUCK` files of subpackages. However, `BUCK` file +evaluation may track which package-local values were accessed and only +invalidate `BUCK` files which were potentially affected (similarly to how we do +it with buckconfigs, with individual properties being projection keys). #### `BUCK` file API -`BUCK` files (and `bzl` files included from `BUCK` files) have a global function: +`BUCK` files (and `bzl` files included from `BUCK` files) have a global +function: ```python def read_package_value( @@ -94,15 +93,15 @@ def read_package_value( ): ... ``` -This function returns the nearest value registered per package, -or `None` is such value does not exist. +This function returns the nearest value registered per package, or `None` is +such value does not exist. -This function is available in `bzl` files, but attempt to call this -function in context of `PACKAGE` file evaluation results in an error. -This restriction can be lifted in the future. +This function is available in `bzl` files, but attempt to call this function in +context of `PACKAGE` file evaluation results in an error. This restriction can +be lifted in the future. -Per-package values are **not** accessible as global symbols in `BUCK` files. -We may reconsider it in the future. +Per-package values are **not** accessible as global symbols in `BUCK` files. We +may reconsider it in the future. ### `read_config` diff --git a/docs/rfcs/selectable.pdf b/docs/rfcs/selectable.pdf new file mode 100644 index 0000000000000..511a35257ad7b Binary files /dev/null and b/docs/rfcs/selectable.pdf differ diff --git a/docs/rfcs/unified_constraint_rule.pdf b/docs/rfcs/unified_constraint_rule.pdf new file mode 100644 index 0000000000000..e0d5356becade Binary files /dev/null and b/docs/rfcs/unified_constraint_rule.pdf differ diff --git a/docs/rule_authors/alias.md b/docs/rule_authors/alias.md index 5f22b8ff2f45f..4be02fbed3ca1 100644 --- a/docs/rule_authors/alias.md +++ b/docs/rule_authors/alias.md @@ -3,15 +3,17 @@ id: alias title: Alias --- -The `alias` rule creates another name by which an existing rule can be referred to. There two variants: [versioned_alias](#versionedalias) and [configured_alias](#configuredalias), which are detailed below. +The `alias` rule creates another name by which an existing rule can be referred +to. There two variants: [versioned_alias](#versionedalias) and +[configured_alias](#configuredalias), which are detailed below. ## alias The `alias` rule has the following relevant attributes: -* `name` - (required) what the `actual`'s label should be aliased as. -* `actual` - (required) a target label. -* `default_host_platform` - default host platform to use for the aliased target. +- `name` - (required) what the `actual`'s label should be aliased as. +- `actual` - (required) a target label. +- `default_host_platform` - default host platform to use for the aliased target. **Example** @@ -31,11 +33,13 @@ alias( The `versioned_alias` rule has the following relevant attributes: -* `name` - (required) what the `actual`'s label should be aliased as. -* `actual` - (required) a target label. -* `versions` - (required) a map of versions to their respective versioned target labels. +- `name` - (required) what the `actual`'s label should be aliased as. +- `versions` - (required) a map of versions to their respective versioned target + labels. -Under the hood, any versioned parameters from the `versioned_alias`'s underlying `actual` are translated into their `select`-based equivalents, which rely on constraint settings added to the target platform. +Under the hood, any versioned parameters from the `versioned_alias`'s underlying +`actual` are translated into their `select`-based equivalents, which rely on +constraint settings added to the target platform. **Example** @@ -57,21 +61,34 @@ versioned_alias( The `configured_alias` rule has the following relevant attributes: -* `name` - (required) what the `actual`'s label should be aliased as. -* `configured_actual` - a configured label (mapped to a configured dep under the hood so the providers can be simply forwarded). -* `fallback_actual` - if `configured_actual` is not set, then fallback to this value, which is an unconfigured dep. If `configured_actual` is not set, then `fallback_actual` must be set. -* `platform` - the platform to build the aliased target with. +- `name` - (required) what the `actual`'s label should be aliased as. +- `configured_actual` - a configured label (mapped to a configured dep under the + hood so the providers can be simply forwarded). +- `fallback_actual` - if `configured_actual` is not set, then fallback to this + value, which is an unconfigured dep. If `configured_actual` is not set, then + `fallback_actual` must be set. +- `platform` - the platform to build the aliased target with. + :::note The `actual` field is available for `configured_alias` but it is not used under the hood (to keep compatibility of output format with Buck1 queries). + + ::: -Outside of simply pointing at another target, this target has one other useful feature - it contains a platform argument. +Outside of simply pointing at another target, this target has one other useful +feature - it contains a platform argument. This makes the alias rule useful for two distinct scenarios: -* **Configuration switching during the build**. For example, there is an iOS target that needs to build a dependency for WatchOS so it can include it in the bundle. This can be represented by the iOS target having a dependency on an alias of the Watch app with `platform = "//the/desired/watchos:platform"`. -* **Using a target to refer to another in a non-standard configuration**. For example, if you want to have an experimental version of an app, you could represent that as an alias with an 'experimental' configuration pointing to the original target. +- **Configuration switching during the build**. For example, there is an iOS + target that needs to build a dependency for WatchOS so it can include it in + the bundle. This can be represented by the iOS target having a dependency on + an alias of the Watch app with `platform = "//the/desired/watchos:platform"`. +- **Using a target to refer to another in a non-standard configuration**. For + example, if you want to have an experimental version of an app, you could + represent that as an alias with an 'experimental' configuration pointing to + the original target. **Example** diff --git a/docs/rule_authors/anon_targets.md b/docs/rule_authors/anon_targets.md index e9e3fdab64c4c..1ba5e96b275fc 100644 --- a/docs/rule_authors/anon_targets.md +++ b/docs/rule_authors/anon_targets.md @@ -3,18 +3,270 @@ id: anon_targets title: Anonymous Targets --- -An anonymous target is defined by the hash of its attributes, rather than its name. During analysis, rules can define and access the providers of anonymous targets before producing their own providers. Two distinct rules might ask for the same anonymous target, sharing the work it performs. +An anonymous target is defined by the hash of its attributes, rather than its +name. During analysis, rules can define and access the providers of anonymous +targets before producing their own providers. Two distinct rules might ask for +the same anonymous target, sharing the work it performs. This solves two distinct problems: -* **The sharing problem** - if you have two processes that want to share some work, you can create an anon target that does that work once, which is then reused by the two processes. Without such a mechanism, all sharing must be present in the target graph: you can't create any new sharing. -* **The overlay problem** - this is the idea that you want to have a shadow-graph, similar in structure to the normal graph, but with additional information attached. Bazel accomplishes this with [Aspects](https://bazel.build/extending/aspects). With Anonymous (anon) targets, you can create a shadow-graph by convention, just by using the target name you wish to shadow as the attribute. +- **The sharing problem** - if you have two processes that want to share some + work, you can create an anon target that does that work once, which is then + reused by the two processes. Without such a mechanism, all sharing must be + present in the target graph: you can't create any new sharing. +- **The overlay problem** - this is the idea that you want to have a + shadow-graph, similar in structure to the normal graph, but with additional + information attached. Bazel accomplishes this with + [Aspects](https://bazel.build/extending/aspects). With Anonymous (anon) + targets, you can create a shadow-graph by convention, just by using the target + name you wish to shadow as the attribute. -Dynamic dependencies, in their full generality, enable users to do a thing, look at the result, then ask for fresh things. However, this full generality is not provided as it breaks processes, like query, that power the Target Determinator. +Dynamic dependencies, in their full generality, enable users to do a thing, look +at the result, then ask for fresh things. However, this full generality is not +provided as it breaks processes, like query, that power the Target Determinator. -In Buck2, dynamic dependencies are implemented using `dynamic_output`, which provides users with the ability to create new actions, after running actions, then look at the result. `dynamic_output` is restricted in its power when compared to fully generic dynamic dependencies, as detailed in the [Dynamic Dependencies](dynamic_dependencies.md) page. +In Buck2, dynamic dependencies are implemented using `dynamic_output`, which +provides users with the ability to create new actions, after running actions, +then look at the result. `dynamic_output` is restricted in its power when +compared to fully generic dynamic dependencies, as detailed in the +[Dynamic Dependencies](dynamic_dependencies.md) page. -Anon targets enable users to create a new analysis (that is, call an anon target that may not have existed before) after looking at the result of a previous analysis (which is passed in, or after looking at an anon target). In many ways, anon target is the version of `dynamic_output` at analysis time, rather than action time. +Anon targets enable users to create a new analysis (that is, call an anon target +that may not have existed before) after looking at the result of a previous +analysis (which is passed in, or after looking at an anon target). In many ways, +anon target is the version of `dynamic_output` at analysis time, rather than +action time. + +The execution platform for an anon target is that of the inherited from the +calling target, which is part of the hash. If that is too restrictive, you could +use execution groups, where an anon target gets told which execution group to +use. + +# Creating anon targets + +## Anon rule + +An anonymous rule is defined using `rule` or `anon_rule`. + +Example: + +```python +my_anon_rule = rule( + impl = _anon_impl, + attrs = {}, +) + +# Or: + +my_anon_rule = anon_rule( + impl = _anon_impl, + attrs = {}, + artifact_promise_mappings = {} # only available for anon_rule +) +``` + +For `rule`, these are normal rules, with the difference that they are not in a +configuration, so `ctx.actions.label` won't show configuration information, but +just `unspecified`. + +For `anon_rule`, the configuration restrictions also apply, and there is an +`artifact_promise_mappings` field which you can specify a dict of artifact +promise names to the map function, which would be applied to the anon target's +promise during rule resolution. + +## Anon target + +An anonymous rule is used via `ctx.actions.anon_target` or +`ctx.actions.anon_targets`, passing in the rule and the attributes for the rule. + +The return values of those functions are a `AnonTarget` and `AnonTargets` type, +respectively. + +Example: + +```python +my_anon_rule1 = anon_rule( + impl = _anon_impl, + attrs = {}, + artifact_promise_mappings = {} +) + +my_anon_rule2 = anon_rule( + impl = _anon_impl, + attrs = {}, + artifact_promise_mappings = {} +) + +# +anon_target = ctx.actions.anon_target(my_anon_rule1, {}) + +anon_targets = ctx.actions.anon_targets([(my_anon_rule1, {}), (my_anon_rule2, {})]) +``` + +### `AnonTarget` and `AnonTargets` + +`AnonTarget` has a `promise` attribute, and `artifact()` and `artifacts()` +functions. `AnonTargets` has a `promise` attribute and `anon_targets` attribute. + +The `promise` attribute for both types returns the anon target's promise (type +is `promise`), which when evaluated returns the providers of the anonymous +target. The `promise` type has a few special behaviors. + +- It has a `map` function, which takes a function and applies it to the future, + returning a new future +- All promises will eventually resolve to a list of providers + +For `AnonTarget`, the `artifact()` and `artifacts()` functions only return +something if using `anon_rule`. `artifact()` takes in an artifact name, which +should be found in the `artifact_promise_mappings` dict, and returns the +artifact promise. `artifacts()` returns the dict of all promise artifact names +to the artifact promise itself, as defined in `artifact_promise_mappings`. See +[Convert promise to artifact](#convert-promise-to-artifact) below for more +information about artifact promises. + +Example: + +```python +HelloInfo = provider(fields = ["output"]) + +my_anon_rule = anon_rule( + impl = _anon_impl, + attrs = {}, + artifact_promise_mappings = { + "hello": lambda x: x[HelloInfo].output, + } +) + +# +anon_target = ctx.actions.anon_target(my_anon_rule, {}) +artifact = anon_target.artifact("hello") +artifact_from_dict = anon_target.artifacts()["hello"] +``` + +For `AnonTargets`, the `anon_targets` attribute returns a list of the underlying +`AnonTarget`s. + +Example: + +```python +HelloInfo = provider(fields = ["output"]) +GoodbyeInfo = provider(fields = ["output"]) + +my_anon_rule1 = anon_rule( + impl = _anon_impl, + attrs = {}, + artifact_promise_mappings = { + "hello": lambda x: x[HelloInfo].output, + } +) + +my_anon_rule2 = anon_rule( + impl = _anon_impl, + attrs = {}, + artifact_promise_mappings = { + "goodbye": lambda x: x[GoodbyeInfo].output, + } +) + +# +all_targets = ctx.actions.anon_targets([(my_anon_rule1, {}), (my_anon_rule2, {})]) +hello = all_targets.anon_targets[0].artifact("hello") +goodbye = all_targets.anon_targets[1].artifact("goodbye") +``` + +# Attributes + +Anon targets only support a subset of attributes that normal rules support. + +Supported attributes: + +- `bool` +- `int` +- `str` +- `enum` +- `dep` + - `deps` attributes do not take strings, but dependencies, already in a + configuration + - `exec_deps` are available if the passed in `dep`'s execution platform + matches + - Default `attr.deps` (as used for toolchains) are not permitted, as the + default can't express a dependency. They must be passed forward from the + caller. that of the anon target's caller +- `source` + - Accepts bound artifacts or promise artifacts +- `arg` + - Can only be used if `anon_target_compatible` is `True` when declaring + `attrs.arg` (ex: `attrs.arg(anon_target_compatible = True)`) +- `label` +- `list` +- `tuple` +- `dict` +- `one_of` +- `option` + +You can use these attributes like you would in normal rules: + +```python +my_anon_rule = anon_rule( + impl = _my_anon_impl, + attrs = { + "my_int": attrs.int(), + "my_string_with_default": attrs.string(default = "foo"), + "my_optional_source": attrs.option(attrs.source()), + "my_list_of_labels": attrs.list(attrs.label()), + }, + artifact_promise_mappings = {} +) + +def _my_anon_impl(ctx: AnalysisContext) -> list[Provider]: + my_int = ctx.attrs.my_int + my_string_with_default = ctx.attrs.my_string_with_default + my_optional_source = ctx.attrs.my_optional_source + my_list_of_labels = ctx.attrs.my_list_of_labels + + # do something with the attributes... + + return [DefaultInfo()] +``` + +## Attribute resolution + +Attribute resolution is handled differently from normal code: + +- Transitions and more complex forms of attributes are banned. +- The `name` attribute is a reserved attribute. It is an implicit attribute when + defining a rule for an anon target, but can be optionally set when creating an + anon target. If present, it must be a syntactically valid target, but could + refer to a cell/package that does not exist. If not present, buck2 will + generate a name for the target automatically. + +### `name` attribute example + +```python +# Rule definition for anon target +my_rule = rule( + impl = _my_impl, + attrs = { + # `name` is already implicitly defined as an attribute, and will error + # out if you try to define it again during rule declaration + }, +) + +# Anon target instantiation, elsewhere + ctx.actions.anon_target( + my_rule, + { + # you can optionally pass `name` into the attributes even though it's + # not explicitly defined in the `attrs` field for `my_rule` + "name": "foo//bar:baz" + }, +) +``` + +To access the `name` attribute from an analysis context, you can use +`ctx.label.name`. + +# Examples ## Simple Example @@ -44,27 +296,12 @@ def impl(ctx): .map(k) ``` -Notes: - -* An anonymous rule is defined using `rule` or `anon_rule`. For `rule`, these are normal rules, with the difference that they are not in a configuration, so `ctx.actions.label` won't show configuration information, but just `unspecified`. For `anon_rule`, the configuration restrictions also apply, and there is an `artifact_promise_mappings` field which you can specify a dict of artifact promise names to the map function, which would be applied to the anon target's promise during rule resolution. -* An anonymous rule is used via `ctx.actions.anon_target`, passing in the rule and the attributes for the rule. -* The return value is a `AnonTarget` type, which has a `promise` attribute, and `artifact()` and `artifacts()` functions. -* The `promise` attribute returns the anon target's promise (type is `promise`), which when evaluated returns the providers of the anonymous target. The `promise` type has a few special behaviors. - * It has a `map` function, which takes a function and applies it to the future, returning a new future. - * If analysis returns a `promise` type, the outer Rust layer invokes the future to get at the analysis result. If that future then returns another future, Rust keeps going until it has a final result. It must eventually get to a list of providers. -* Attribute resolution is handled differently from normal code: - * String/Int/Bool happen as normal. - * The name attribute is optional, but, if present, must be a syntactically valid target, but can refer to a cell/package that does not exist. - * Deps attributes do not take strings, but dependencies, already in a configuration. - * Exec_deps are available if the passed in dep's execution platform matches that of the anon target's caller. - * Transitions and more complex forms of attributes are banned. - * Default `attr.deps` (as used for toolchains) are not permitted, as the default can't express a dependency. They must be passed forward from the caller. -* The `artifact()` and `artifacts()` functions only return something if using `anon_rule`. `artifact()` takes in an artifact name, which should be found in the `artifact_promise_mappings` dict, and returns the artifact promise. `artifacts()` returns the dict of all promise artifact names to the artifact promise itself, as defined in `artifact_promise_mappings`. See [Convert promise to artifact](#convert-promise-to-artifact) below for more information about artifact promises. -* The execution platform for an anon target is that of the inherited from the calling target, which is part of the hash. If that is too restrictive, you could use execution groups, where an anon target gets told which execution group to use. - ## Longer example -The following code represents a scenario for a compile-and-link language where, if two targets end up compiling the same file (for example, they are in the same package and both list it, or it gets export_file'd), then that file is compiled just once: +The following code represents a scenario for a compile-and-link language where, +if two targets end up compiling the same file (for example, they are in the same +package and both list it, or it gets export_file'd), then that file is compiled +just once: ```python ## BUCK ############## @@ -125,7 +362,6 @@ silly_binary = rule( impl = _silly_binary_impl, attrs = { "srcs": attr.list(attr.src()), - "link_flags": attr.args(), "_silly_toolchain": attr.dep(default = "toolchains//:silly"), }, ) @@ -133,7 +369,15 @@ silly_binary = rule( ## Convert promise to artifact -It can be challenging to pass around the promises from anon_target and structure functions to support that. If you only need an artifact (or multiple artifacts) from an anon_target, you can use `artifacts()` function on the anon target to convert a promise to an artifact. This artifact can be passed to most things that expect artifacts, but until it is resolved (at the end of the current analysis) it can't be inspected with artifact functions like `.extension`, etc. `.short_path` is supported if `ctx.actions.assert_short_path()` was called, which produces an artifact type. The promise must resolve to a build (not source) artifact with no associated artifacts. +It can be challenging to pass around the promises from anon_target and structure +functions to support that. If you only need an artifact (or multiple artifacts) +from an anon_target, you can use `artifact()` function on the anon target to +convert a promise to an artifact. This artifact can be passed to most things +that expect artifacts, but until it is resolved (at the end of the current +analysis) it can't be inspected with artifact functions like `.extension`, etc. +`.short_path` is supported if `ctx.actions.assert_short_path()` was called, +which produces an artifact type. The promise must resolve to a build (not +source) artifact with no associated artifacts. Example: @@ -171,5 +415,4 @@ def _use_impl(ctx: AnalysisContext) -> ["provider"]: use_promise_artifact = rule(impl = _use_impl, attrs = { "some_tool": attr.exec_dep(), }) - ``` diff --git a/docs/rule_authors/configuration_transitions.md b/docs/rule_authors/configuration_transitions.md index 86b4fd5de1426..b0cdbdf886a8a 100644 --- a/docs/rule_authors/configuration_transitions.md +++ b/docs/rule_authors/configuration_transitions.md @@ -3,33 +3,39 @@ id: configuration_transitions title: Configuration Transitions --- -Configuration transition is a mechanism for changing the configuration when depending on a target. +Configuration transition is a mechanism for changing the configuration when +depending on a target. Currently, Buck2 has incoming and outgoing transitions: -* **Incoming** - (or per-rule transitions) declared on the rule. -* **Outgoing** - (or per-attribute transitions) declared on the attribute. +- **Incoming** - (or per-rule transitions) declared on the rule. +- **Outgoing** - (or per-attribute transitions) declared on the attribute. ## Transition rule Transition rules are defined in `.bzl` files using the `transition` built-in. -The `transition` function creates a configuration-related object. -The `transition` object is opaque, it does not have any operations, and can only be used as an argument to `rule` function or attribute constructor. -The `transition` function call must be assigned to a global variable (this is similar to user-defined provider declarations). +The `transition` function creates a configuration-related object. The +`transition` object is opaque, it does not have any operations, and can only be +used as an argument to `rule` function or attribute constructor. The +`transition` function call must be assigned to a global variable (this is +similar to user-defined provider declarations). The `transition` function takes three arguments: -* `implementation` - a function. -* `refs` - references to configuration rules to be resolved and passed to the implementation function. -* `split` - (optional) `bool` flag (default `False`) to indicate whether transition is a split transition (used in per attribute transitions). +- `implementation` - a function. +- `refs` - references to configuration rules to be resolved and passed to the + implementation function. +- `split` - (optional) `bool` flag (default `False`) to indicate whether + transition is a split transition (used in per attribute transitions). The `implementation` function takes two arguments: -* `platform` - a configuration to transition. -* `refs` - resolved references as a struct. +- `platform` - a configuration to transition. +- `refs` - resolved references as a struct. -Example transition from ios to watchos (for example, to build a watchOS bundle as part of an iOS build): +Example transition from ios to watchos (for example, to build a watchOS bundle +as part of an iOS build): ```python def _impl(platform: PlatformInfo.type, refs: struct.type) -> PlatformInfo.type: @@ -66,21 +72,27 @@ iphone_to_watch_transition = transition(_impl, refs = { }) ``` -A transition function applied twice must produce the configuration identical to the configuration produced after applying transition once. +A transition function applied twice must produce the configuration identical to +the configuration produced after applying transition once. ```python assert tr(tr(platform=platform, refs=refs), refs=refs) == tr(platform=platform, refs=refs) ``` -If this invariant is not held, certain operations produce incorrect and possibly infinite graphs. This is not yet enforced. +If this invariant is not held, certain operations produce incorrect and possibly +infinite graphs. This is not yet enforced. ## Per rule transition -The `rule` function has an optional `cfg` attribute, which takes a reference to the `transition` object (created with the `transition` function; not a string). +The `rule` function has an optional `cfg` attribute, which takes a reference to +the `transition` object (created with the `transition` function; not a string). -When such a rule is called, it is instantiated, not with the requested configuration, but with the requested configuration transformed with a given rule transition. +When such a rule is called, it is instantiated, not with the requested +configuration, but with the requested configuration transformed with a given +rule transition. -For example, the transition for watchos when the iOS target depends on watchos resource: +For example, the transition for watchos when the iOS target depends on watchos +resource: ```python watchos_resource = rule( @@ -93,13 +105,16 @@ watchos_resource = rule( The `attrs` object has two attribute constructors: -* `attrs.transition_dep(cfg)` -* `attrs.split_transition_dep(cfg)` +- `attrs.transition_dep(cfg)` +- `attrs.split_transition_dep(cfg)` -These attributes are similar to the `dep` attribute. When dependencies are resolved for the rule instance, then they are resolved not with the rule instance configuration, -but with the configuration transformed with the given transition. +These attributes are similar to the `dep` attribute. When dependencies are +resolved for the rule instance, then they are resolved not with the rule +instance configuration, but with the configuration transformed with the given +transition. -For split transition, each dependency is resolved into a dict of marker to providers. +For split transition, each dependency is resolved into a dict of marker to +providers. For example: @@ -120,7 +135,8 @@ android_binary( ) ``` -Then the rule implementation gets something like the following in the `deps` attribute: +Then the rule implementation gets something like the following in the `deps` +attribute: ```python { @@ -138,16 +154,20 @@ Then the rule implementation gets something like the following in the `deps` att } ``` + :::note It is an error to pass a split transition object to `attrs.transition_dep` and a non-split transition to `attrs.split_transition_dep`. + + ::: ## Per target transition -The Buck2 team is considering the implementation of per target transitions (that is, transitions referenced at a rule instantiation site as opposed to rule declaration site). -No specific plans or APIs exists at the moment. +The Buck2 team is considering the implementation of per target transitions (that +is, transitions referenced at a rule instantiation site as opposed to rule +declaration site). No specific plans or APIs exists at the moment. -It *could* be something like the following: +It _could_ be something like the following: ```python cxx_binary( @@ -163,9 +183,14 @@ For information, see [RFC](../rfcs/drafts/configuration-at-syntax.md). ## Access rule attributes in transition function implementation -It might be useful for the transition function to be able to query rule attributes (for example, to perform transition to different configurations depending on `java_version` attribute). +It might be useful for the transition function to be able to query rule +attributes (for example, to perform transition to different configurations +depending on `java_version` attribute). -Both incoming (per rule) and outgoing (per dependency) transitions can access rule attributes. For outgoing transitions, transition rule implementation accesses the attributes of the target that has dependencies with transitions, not attributes of dependency targets. +Both incoming (per rule) and outgoing (per dependency) transitions can access +rule attributes. For outgoing transitions, transition rule implementation +accesses the attributes of the target that has dependencies with transitions, +not attributes of dependency targets. ```python def _tr(platform, refs, attrs): diff --git a/docs/rule_authors/configurations.md b/docs/rule_authors/configurations.md index 0139384e6ae7e..ec62da6f708b9 100644 --- a/docs/rule_authors/configurations.md +++ b/docs/rule_authors/configurations.md @@ -3,34 +3,57 @@ id: configurations title: Configurations --- -This page mostly focuses on how configurations and related features are implemented. +This page mostly focuses on how configurations and related features are +implemented. ## Context -Buck configurations provide an API to express the different ways in which projects and targets can be built. +Buck configurations provide an API to express the different ways in which +projects and targets can be built. -A configuration consists of a set of constraints and config settings (values from buckconfig). These are determined by a base platform that sets the initial values and then a series of transitions that may change them. +A configuration consists of a set of constraints and config settings (values +from buckconfig). These are determined by a base platform that sets the initial +values and then a series of transitions that may change them. -The common way that users are exposed to configurations is in `select()` invocations where the resolution is based on the configuration. +The common way that users are exposed to configurations is in `select()` +invocations where the resolution is based on the configuration. -A build may involve many configurations. A particular target label (`//:foo`) may end up with multiple instances in the configured graph with different configurations. +A build may involve many configurations. A particular target label (`//:foo`) +may end up with multiple instances in the configured graph with different +configurations. ## Selectable attributes -Almost all rule attributes can be set to a `select()` value; such an attribute is 'selectable'. These attributes' final resolved values will depend on the configuration. +Almost all rule attributes can be set to a `select()` value; such an attribute +is 'selectable'. These attributes' final resolved values will depend on the +configuration. -There are some attributes that cannot use a `select()`; such attributes are termed 'not selectable'. Examples include attributes that buck needs to read from the unconfigured node (such as `name` and `default_target_platform`) and attributes that are used by `platform()` rules and their dependencies (see below). +There are some attributes that cannot use a `select()`; such attributes are +termed 'not selectable'. Examples include attributes that buck needs to read +from the unconfigured node (such as `name` and `default_target_platform`) and +attributes that are used by `platform()` rules and their dependencies (see +below). ## Selectable resolution -Resolving selectable attributes is pretty straightforward, it happens when constructing the 'configured target node'. At that point, the full configuration is available so Buck can lookup whether each constraint in the select is satisfied or not. +Resolving selectable attributes is pretty straightforward, it happens when +constructing the 'configured target node'. At that point, the full configuration +is available so Buck can lookup whether each constraint in the select is +satisfied or not. -If multiple conditions of the select() match, then the select will be resolved to the 'most refined' of the conditions that match. A set of constraints (as in -a `config_setting`) is said to 'refine' another if it is a superset of that other's constraints. The 'most refined' of a set is then the condition that refines all the others. If there is no 'most refined' condition of the matching ones, it is an error. +If multiple conditions of the select() match, then the select will be resolved +to the 'most refined' of the conditions that match. A set of constraints (as in +a `config_setting`) is said to 'refine' another if it is a superset of that +other's constraints. The 'most refined' of a set is then the condition that +refines all the others. If there is no 'most refined' condition of the matching +ones, it is an error. ## Target Platform Resolution -In the event that targets are provided on the command line, or when there is no indication of what configuration the target will be built in, configurations are determined by performing 'target platform resolution' on the unconfigured target labels. +In the event that targets are provided on the command line, or when there is no +indication of what configuration the target will be built in, configurations are +determined by performing 'target platform resolution' on the unconfigured target +labels. The target platform resolution for a target `//:foo` works as follows: @@ -39,84 +62,137 @@ The target platform resolution for a target `//:foo` works as follows: 1. If there's a `default_target_platform` attribute, use that. 1. Else, use the cell's default platform. -This is performed independently for any targets that need a platform. Since this resolution is done without a configuration, it means that the `default_target_platform` attribute **is not selectable**. +This is performed independently for any targets that need a platform. Since this +resolution is done without a configuration, it means that the +`default_target_platform` attribute **is not selectable**. This target platform will form the initial configuration for the node. ## Configuration propagation -Once the top-level nodes have been configured via the target platform resolution, the configuration is propagated to dependencies (possibly altered by transitions). +Once the top-level nodes have been configured via the target platform +resolution, the configuration is propagated to dependencies (possibly altered by +transitions). + :::note The target platform resolution is not applied to all nodes in the graph. + + ::: ## Transitions -A transition transforms a configuration by adding or changing constraint values and config settings or by setting an entirely new underlying target platform. +A transition transforms a configuration by adding or changing constraint values +and config settings or by setting an entirely new underlying target platform. For more details, see [Configuration transitions](configuration_transitions.md). ## `ConfigurationInfo`, `platform()` analysis, and more -The definition of a platform (either execution or target) is done with a `platform` rule instance. The configuration is actually part of the analysis result of the platform target (the `ConfigurationInfo` provider instance). This is convenient from -an implementation standpoint, but it leads to a situation where some nodes are analyzed with an 'unbound' Configuration. +The definition of a platform (either execution or target) is done with a +`platform` rule instance. The configuration is actually part of the analysis +result of the platform target (the `ConfigurationInfo` provider instance). This +is convenient from an implementation standpoint, but it leads to a situation +where some nodes are analyzed with an 'unbound' Configuration. -All the rule types involved in defining a platform may be analyzed with an unbound configuration (`platform()`, `config_setting()`, `constraint_setting()`, and so on). These are sometimes called 'configuration rules'. This means that all the attributes of these rules are not selectable. +All the rule types involved in defining a platform may be analyzed with an +unbound configuration (`platform()`, `config_setting()`, `constraint_setting()`, +and so on). These are sometimes called 'configuration rules'. This means that +all the attributes of these rules are not selectable. -Configurations also reference a few other provider instances such as `ConstraintSettingInfo`. All of these end up being potentially produced in a context with an unbound configuration. +Configurations also reference a few other provider instances such as +`ConstraintSettingInfo`. All of these end up being potentially produced in a +context with an unbound configuration. -Using analysis for this also means that 'configuration' and 'analysis' are not distinct phases within a build (although they are still distinct for a node and are still conceptually useful). +Using analysis for this also means that 'configuration' and 'analysis' are not +distinct phases within a build (although they are still distinct for a node and +are still conceptually useful). ## Configurations and output paths -Since a target may appear within a build in multiple different configurations, output paths cannot be derived based on just targets (as multiple actions would map to the same outputs). For this reason, the target and the configuration are encoded into output paths. The configuration is currently represented as a hash of its values (a 'hashed buck-out'). +Since a target may appear within a build in multiple different configurations, +output paths cannot be derived based on just targets (as multiple actions would +map to the same outputs). For this reason, the target and the configuration are +encoded into output paths. The configuration is currently represented as a hash +of its values (a 'hashed buck-out'). ## Target platform compatibility -All (non-configuration) rules support a `target_compatible_with` attribute. In addition, the rule itself can define `target_compatible_with` constraints that affect all instances. The `target_compatible_with` attribute is a list of constraints/config settings and it **is selectable**. +All (non-configuration) rules support a `target_compatible_with` attribute. In +addition, the rule itself can define `target_compatible_with` constraints that +affect all instances. The `target_compatible_with` attribute is a list of +constraints/config settings and it **is selectable**. -Target platform compatibility is transitive, all *dependents* of an incompatible target are incompatible. In other words, a node is compatible if and only if the node itself and all of its transitive dependencies are compatible. +Target platform compatibility is transitive, all _dependents_ of an incompatible +target are incompatible. In other words, a node is compatible if and only if the +node itself and all of its transitive dependencies are compatible. -In buck, this is implemented by graph configuration returning either a configured target node or an indicator that the node is incompatible with the target platform. +In buck, this is implemented by graph configuration returning either a +configured target node or an indicator that the node is incompatible with the +target platform. ### Buck v1 compatibility -Buck2 also supports the Buck v1 legacy `compatible_with` field on nodes but it has different behavior. +Buck2 also supports the Buck v1 legacy `compatible_with` field on nodes but it +has different behavior. In summary: -* `compatible_with`: List of constraints, where *any* of them must match the configuration to be compatible. -* `target_compatible_with`: List of constraints, where *all* of them must match the configuration to be compatible. +- `compatible_with`: List of constraints, where _any_ of them must match the + configuration to be compatible. +- `target_compatible_with`: List of constraints, where _all_ of them must match + the configuration to be compatible. ## Incompatible target skipping -In a build-like command where a non-literal target pattern is provided (for example, `buck build //:` or `buck build //foo/...`), the target pattern will be resolved to a set of unconfigured targets. Those targets will then go through [target platform resolution](#target-platform-resolution). If any of those targets resolve to a platform where they are incompatible, building them will be skipped. Users generally expect and prefer this behavior to needing to explicitly specify only the targets that can build in their current context. +In a build-like command where a non-literal target pattern is provided (for +example, `buck build //:` or `buck build //foo/...`), the target pattern will be +resolved to a set of unconfigured targets. Those targets will then go through +[target platform resolution](#target-platform-resolution). If any of those +targets resolve to a platform where they are incompatible, building them will be +skipped. Users generally expect and prefer this behavior to needing to +explicitly specify only the targets that can build in their current context. If an explicitly specified literal is incompatible, it is an error. -The implementation checks compatibility when looking up the analysis results for configured nodes requested (in the non-ignored flow, it uses -that analysis result to lookup the default outputs and build them). +The implementation checks compatibility when looking up the analysis results for +configured nodes requested (in the non-ignored flow, it uses that analysis +result to lookup the default outputs and build them). ## Execution platforms -Execution platforms/configurations are used to represent the platforms where build execution happens. These are defined in a similar manner to target platforms. -These may or may not be what one would logically consider different 'platforms'. For example, there could be multiple different execution platforms that all execute things similarly on the local machine. +Execution platforms/configurations are used to represent the platforms where +build execution happens. These are defined in a similar manner to target +platforms. These may or may not be what one would logically consider different +'platforms'. For example, there could be multiple different execution platforms +that all execute things similarly on the local machine. A build configures a fixed list of one or more execution platforms. ## Execution deps -Some target deps are 'execution deps'. These are the dependencies of the target that should be built for the execution platform. For example, a compiler or other build tool would be an execution dep. This includes all exe macro deps (for example, `$(exe //:tool)`) and includes all `attrs.exec_dep()` deps. +Some target deps are 'execution deps'. These are the dependencies of the target +that should be built for the execution platform. For example, a compiler or +other build tool would be an execution dep. This includes all exe macro deps +(for example, `$(exe //:tool)`) and includes all `attrs.exec_dep()` deps. ## Toolchain deps -In addition to `attrs.exec_dep()`, there are `attrs.toolchain_dep()`, which are similar but differ in an important way. These nodes don't select their execution platform, but instead have it forced on them by whatever includes them; hence, it must be recorded in the configured target label. The execution platform resolution sees through them. - -In other words, `attrs.toolchain_dep()` is like a mix of `attrs.dep()` and `attrs.exec_dep()`: it inherits target platform like `attrs.dep()` (so any -`select()`s on the target of the `attrs.toolchain_dep()` will evaluate as if they were on the target containing the `attrs.toolchain_dep()` - the target -platform gets inherited as normal) and any `attrs.exec_dep()`s of the `attrs.toolchain_dep()` target become `attrs.exec_deps()` on the dependent of -target the `attrs.toolchain_dep()` (they get passed up the dep tree, so participate in exec platform resolution). +In addition to `attrs.exec_dep()`, there are `attrs.toolchain_dep()`, which are +similar but differ in an important way. These nodes don't select their execution +platform, but instead have it forced on them by whatever includes them; hence, +it must be recorded in the configured target label. The execution platform +resolution sees through them. + +In other words, `attrs.toolchain_dep()` is like a mix of `attrs.dep()` and +`attrs.exec_dep()`: it inherits target platform like `attrs.dep()` (so any +`select()`s on the target of the `attrs.toolchain_dep()` will evaluate as if +they were on the target containing the `attrs.toolchain_dep()` - the target +platform gets inherited as normal) and any `attrs.exec_dep()`s of the +`attrs.toolchain_dep()` target become `attrs.exec_deps()` on the dependent of +target the `attrs.toolchain_dep()` (they get passed up the dep tree, so +participate in exec platform resolution). This is illustrated in the following example: @@ -131,16 +207,35 @@ target( ) ``` -The above means that `:C` will be an execution dependency of `:A` and any `select()`s defined in `:B` would be evaluated against the same target platform as `:A` (as target platform gets inherited by `attrs.toolchain_dep()`s). +The above means that `:C` will be an execution dependency of `:A` and any +`select()`s defined in `:B` would be evaluated against the same target platform +as `:A` (as target platform gets inherited by `attrs.toolchain_dep()`s). ## Running non-execution deps -If you have a binary that you want to run, but it isn't a build tool, then you should use `$(exe_target //:binary)` rather than `$(exe //:binary)`. That will run the same binary that you'd get from `buck2 build`, rather than one that is built for the execution platform. +If you have a binary that you want to run, but it isn't a build tool, then you +should use `$(exe_target //:binary)` rather than `$(exe //:binary)`. That will +run the same binary that you'd get from `buck2 build`, rather than one that is +built for the execution platform. + +The path macros vary along two axes: + +- **Path Source**: either `DefaultInfo` or `RunInfo` providers +- **Configuration**: inherits the configuration or transitions to an execution + platform configuration + +Specifically: + +- `$location`: `DefaultInfo` path source, inherits configuration +- `$exe`: `RunInfo` path source, exec platform configuration +- `$exe_target`: `RunInfo` path source, inherits configuration ## Execution platform resolution -During analysis, unlike target platform resolution, every configured node undergoes execution platform resolution independently (see exception below). This -means that even for a specific target platform, different nodes in the graph can be built on different execution platforms. +During analysis, unlike target platform resolution, every configured node +undergoes execution platform resolution independently (see exception below). +This means that even for a specific target platform, different nodes in the +graph can be built on different execution platforms. This works roughly as follows: @@ -154,12 +249,17 @@ next: for platform in execution_platforms: return err ``` -One important note here is that until the execution platform has been resolved, **the configuration for execution deps is not known**. Only after -execution platform has been resolved can the execution deps be configured (also, analysis for them can only be performed at that point). +One important note here is that until the execution platform has been resolved, +**the configuration for execution deps is not known**. Only after execution +platform has been resolved can the execution deps be configured (also, analysis +for them can only be performed at that point). -For the normal use case, a particular configured target node performs execution platform resolution a single time. The execution platform **is not** encoded in output paths. +For the normal use case, a particular configured target node performs execution +platform resolution a single time. The execution platform **is not** encoded in +output paths. -Regarding target compatibility, imagine the following pseudo-code for the `target_compatible_with()` function above: +Regarding target compatibility, imagine the following pseudo-code for the +`target_compatible_with()` function above: ```python def target_compatible_with(target, cfg): @@ -186,5 +286,6 @@ def target_compatible_with(target, cfg): ## Execution groups -Execution groups are a future feature that will allow a rule to perform execution platform resolution multiple times and then specify in which of the resolved -platforms each action runs in. +Execution groups are a future feature that will allow a rule to perform +execution platform resolution multiple times and then specify in which of the +resolved platforms each action runs in. diff --git a/docs/rule_authors/dep_files.md b/docs/rule_authors/dep_files.md index 1558873573c6f..089b5e368009f 100644 --- a/docs/rule_authors/dep_files.md +++ b/docs/rule_authors/dep_files.md @@ -3,31 +3,46 @@ id: dep_files title: Dep Files --- -Dep files allow commands to declare which subset of their inputs were used when the command executed. +Dep files allow commands to declare which subset of their inputs were used when +the command executed. -When a command produces a dep file and is later invalidated due to an inputs change, Buck2 uses the dep file to check whether the inputs that changed were in the set that the command reported as having used. If none of the inputs that changed were in that set, Buck2 omits re-running the command and reuses the previous result. +When a command produces a dep file and is later invalidated due to an inputs +change, Buck2 uses the dep file to check whether the inputs that changed were in +the set that the command reported as having used. If none of the inputs that +changed were in that set, Buck2 omits re-running the command and reuses the +previous result. ## Use Cases -Dep files are used to make dependencies finer grained than what exists in the target graph, but they're not a substitute for avoiding unused dependencies. They're often useful when targets export many outputs (such as C++ headers) that aren't all used by all their dependents. +Dep files are used to make dependencies finer grained than what exists in the +target graph, but they're not a substitute for avoiding unused dependencies. +They're often useful when targets export many outputs (such as C++ headers) that +aren't all used by all their dependents. -Dep files are currently used to skip recompilation steps in C++ when an unused header changed. They're also used in Java to skip recompilation when an unused class changed. +Dep files are currently used to skip recompilation steps in C++ when an unused +header changed. They're also used in Java to skip recompilation when an unused +class changed. ## Using dep files To use dep files, you need to do the following: -* Declare what output is a dep file and associate it with your command. -* Declare which inputs are covered by the dep file (this can be a subset of your inputs). -* Have your command produce the dep file in a format Buck2 can use. +- Declare what output is a dep file and associate it with your command. +- Declare which inputs are covered by the dep file (this can be a subset of your + inputs). +- Have your command produce the dep file in a format Buck2 can use. -You must also enable [Deferred Materialization](../users/advanced/deferred_materialization.md) to use dep files. +You must also enable +[Deferred Materialization](../users/advanced/deferred_materialization.md) to use +dep files. ## Declaring the dep files and associating inputs -To declare a dep file and associate it with your command, you need to tag your artifacts. +To declare a dep file and associate it with your command, you need to tag your +artifacts. -Specifically, you'll tag the output (the dep file) and the inputs it covers, as shown in the following code: +Specifically, you'll tag the output (the dep file) and the inputs it covers, as +shown in the following code: ```python # First, create a tag @@ -57,40 +72,64 @@ ctx.actions.run( ## Producing the dep file -Your command must produce dep files in the format Buck2 expects, which is simply a list of all the inputs that were used, one per line. +Your command must produce dep files in the format Buck2 expects, which is simply +a list of all the inputs that were used, one per line. -The paths must be the paths Buck2 would use for your inputs, which means paths relative to the project root. +The paths must be the paths Buck2 would use for your inputs, which means paths +relative to the project root. -If this is not the format your tool produces, use a wrapper to take whatever output your command produces and rewrite it in the format Buck2 expects. +If this is not the format your tool produces, use a wrapper to take whatever +output your command produces and rewrite it in the format Buck2 expects. ## Testing dep files -When writing a command that produces a dep file, you should test it! At a minimum, check that the inputs you expect are tagged properly. +When writing a command that produces a dep file, you should test it! At a +minimum, check that the inputs you expect are tagged properly. -To do so, build your target, then use `buck2 audit dep-files TARGET CATEGORY IDENTIFIER`, which will show you the set of inputs your command used and how they're tagged. +To do so, build your target, then use +`buck2 audit dep-files TARGET CATEGORY IDENTIFIER`, which will show you the set +of inputs your command used and how they're tagged. ## Extra notes to the implementer ### Limitations -Dep files only work if a previous invocation of the command is known to your Buck2 daemon. Dep files are dropped when the daemon restarts or when you run `buck2 debug flush-dep-files`. +Dep files only work if a previous invocation of the command is known to your +Buck2 daemon. Dep files are dropped when the daemon restarts or when you run +`buck2 debug flush-dep-files`. -This means that, for example, if you change an unused header, then run a build on a fresh daemon, Buck2 will still need to execute this command in order to identify that the header was in fact unused. In contrast, if you did the build (and got a remote cache hit on the command), then applied your change and re-built, Buck2 would use the dep file on the second execution, and you wouldn't need to execute anything. +This means that, for example, if you change an unused header, then run a build +on a fresh daemon, Buck2 will still need to execute this command in order to +identify that the header was in fact unused. In contrast, if you did the build +(and got a remote cache hit on the command), then applied your change and +re-built, Buck2 would use the dep file on the second execution, and you wouldn't +need to execute anything. ### Dep files don't need to be covering -It's OK for the dep file to only cover a subset of the inputs of your action. However, within that subset, the dep file must declare all the inputs that were used. +It's OK for the dep file to only cover a subset of the inputs of your action. +However, within that subset, the dep file must declare all the inputs that were +used. -If you fail to report some inputs you used, then your command will not re-run when they change, and you'll get stale output. +If you fail to report some inputs you used, then your command will not re-run +when they change, and you'll get stale output. ### Dep files are lazy -Dep files aren't parsed by Buck2 unless the command needs to re-run. If the command ran on RE, they aren't even downloaded until then. This ensures dep files don't cause a performance hit unless they are used, at which point they stand a chance of giving a performance boost instead. +Dep files aren't parsed by Buck2 unless the command needs to re-run. If the +command ran on RE, they aren't even downloaded until then. This ensures dep +files don't cause a performance hit unless they are used, at which point they +stand a chance of giving a performance boost instead. -This means that if you produce an invalid dep file, Buck2 will not report this until your command runs again, at which point Buck2 will report that the dep file is invalid and refuse to proceed (note: you can unblock yourself using `buck2 debug flush-dep-files`). +This means that if you produce an invalid dep file, Buck2 will not report this +until your command runs again, at which point Buck2 will report that the dep +file is invalid and refuse to proceed (note: you can unblock yourself using +`buck2 debug flush-dep-files`). -To flush out issues during development, you can pass `--eager-dep-files` to Buck2 to force Buck2 to parse your dep files as they are produced. +To flush out issues during development, you can pass `--eager-dep-files` to +Buck2 to force Buck2 to parse your dep files as they are produced. ## Dep files will traverse symlinks -If your dep file reports that a symlink was used, Buck2 will track the symlink's target as covered by this dep file. +If your dep file reports that a symlink was used, Buck2 will track the symlink's +target as covered by this dep file. diff --git a/docs/rule_authors/dynamic_dependencies.md b/docs/rule_authors/dynamic_dependencies.md index bbca8804e5b0f..14abf381a28d6 100644 --- a/docs/rule_authors/dynamic_dependencies.md +++ b/docs/rule_authors/dynamic_dependencies.md @@ -3,20 +3,35 @@ id: dynamic_dependencies title: Dynamic Dependencies --- -Dynamic dependencies allow a rule to use information that was not available when the rule was first run at analysis time. Dynamic dependencies in Buck2 are implemented using `dynamic_output` and are restricted in their power compared to fully generic dynamic dependencies. +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; -A rule for a target is run with the attributes of the target, plus the providers of its attribute dependencies, which contain artifacts. Those values (but not the artifact contents) are all available directly and immediately when running the rule. The rule generates providers containing artifacts. Using `dynamic_output`, a rule can read the contents of an artifact to produce new artifacts and bind existing artifacts, which were already returned in providers. +Dynamic dependencies allow a rule to use information that was not available when +the rule was first run at analysis time. Dynamic dependencies in Buck2 are +implemented using `dynamic_output` and are restricted in their power compared to +fully generic dynamic dependencies. + +A rule for a target is run with the attributes of the target, plus the providers +of its attribute dependencies, which contain artifacts. Those values (but not +the artifact contents) are all available directly and immediately when running +the rule. The rule generates providers containing artifacts. Using +`dynamic_output`, a rule can read the contents of an artifact to produce new +artifacts and bind existing artifacts, which were already returned in providers. Examples of rules requiring dynamic dependencies include: -* Distributed ThinLTO, where the index file says what the dependencies are. -* OCaml builds, where the dependencies between source files can only be obtained from running `ocamldeps`. -* Erlang header files, where only a subset of the available headers are accessed, which can be determined by reading the source file. -* Erlang BEAM files, where some subset of BEAM files must be compiled in a given order, as they provide features like compiler plugins, but most can be compiled in parallel. +- Distributed ThinLTO, where the index file says what the dependencies are. +- OCaml builds, where the dependencies between source files can only be obtained + from running `ocamldeps`. +- Erlang header files, where only a subset of the available headers are + accessed, which can be determined by reading the source file. +- Erlang BEAM files, where some subset of BEAM files must be compiled in a given + order, as they provide features like compiler plugins, but most can be + compiled in parallel. -The original design document with discussion is available [here](https://docs.google.com/document/d/1K8RgvDMvdDFsLWAu0cehauJstHZaFe-7NeaAqWe4-L4/edit). +The original design document with discussion is available +[here](https://docs.google.com/document/d/1K8RgvDMvdDFsLWAu0cehauJstHZaFe-7NeaAqWe4-L4/edit). @@ -30,20 +45,42 @@ ctx.actions.dynamic_output(dynamic, inputs, outputs, lambda ctx: …) The arguments are: -* `dynamic` - a list of artifacts whose values will be available in the function. These will be built before the function is run. -* `inputs` - a container of artifacts (`cmd_args`, list of artifacts, and so on). - * These inputs must include all the inputs that are referenced by the body of the function argument, apart from those listed in `dynamic` and `outputs`: extra inputs may be passed that are not used. - * The inputs are used for `buck2 aquery` functionality, but do not cause speculative building. In fact, these inputs may form a cycle with other `dynamic_output` actions if they were all required. - * In the future, it may be possible to not pass all the inputs if the repo is set to permissive mode, allowing a more powerful form of dynamic dependencies. -* `outputs` - a list of unbound artifacts (created with `declare_artifact`) which will be bound by the function. -* The function argument is given 3 arguments: - * `ctx` (context) - which is the same as that passed to the initial rule analysis. - * `outputs` - using one of the artifacts from the `dynamic_output`'s `outputs` (example usage: `outputs[artifact_from_dynamic_output_outputs]`) gives an unbounded artifact. The function argument must use its `outputs` argument to bind output artifacts, rather than reusing artifacts from the outputs passed into `dynamic_output` directly. - * `artifacts` - using one of the artifacts from `dynamic` (example usage: `artifacts[artifact_from_dynamic])` gives an artifact value containing the methods `read_string`, `read_lines`, and `read_json` to obtain the values from the disk in various formats. Anything too complex should be piped through a Python script for transformation to JSON. -* The function must call `ctx.actions` (probably `ctx.actions.run`) to bind all outputs. It can examine the values of the dynamic variables and depends on the inputs. - * The function will usually be a `def`, as `lambda` in Starlark does not allow statements, making it quite underpowered. - -Following is an example of using the function to determine Erlang BEAM dependencies: +- `dynamic` - a list of artifacts whose values will be available in the + function. These will be built before the function is run. +- `inputs` - a container of artifacts (`cmd_args`, list of artifacts, and so + on). + - These inputs must include all the inputs that are referenced by the body of + the function argument, apart from those listed in `dynamic` and `outputs`: + extra inputs may be passed that are not used. + - The inputs are used for `buck2 aquery` functionality, but do not cause + speculative building. In fact, these inputs may form a cycle with other + `dynamic_output` actions if they were all required. + - In the future, it may be possible to not pass all the inputs if the repo is + set to permissive mode, allowing a more powerful form of dynamic + dependencies. +- `outputs` - a list of unbound artifacts (created with `declare_artifact`) + which will be bound by the function. +- The function argument is given 3 arguments: + - `ctx` (context) - which is the same as that passed to the initial rule + analysis. + - `outputs` - using one of the artifacts from the `dynamic_output`'s `outputs` + (example usage: `outputs[artifact_from_dynamic_output_outputs]`) gives an + unbounded artifact. The function argument must use its `outputs` argument to + bind output artifacts, rather than reusing artifacts from the outputs passed + into `dynamic_output` directly. + - `artifacts` - using one of the artifacts from `dynamic` (example usage: + `artifacts[artifact_from_dynamic])` gives an artifact value containing the + methods `read_string`, `read_lines`, and `read_json` to obtain the values + from the disk in various formats. Anything too complex should be piped + through a Python script for transformation to JSON. +- The function must call `ctx.actions` (probably `ctx.actions.run`) to bind all + outputs. It can examine the values of the dynamic variables and depends on the + inputs. + - The function will usually be a `def`, as `lambda` in Starlark does not allow + statements, making it quite underpowered. + +Following is an example of using the function to determine Erlang BEAM +dependencies: ```python def erlang(ctx): @@ -64,4 +101,5 @@ def erlang(ctx): return [ErlangInfo(objects = beams.values())] ``` -The above code uses `declare_output` for the `beam_file` then binds it within the function `f`, after having read the `dep_file` with `read_lines`. +The above code uses `declare_output` for the `beam_file` then binds it within +the function `f`, after having read the `dep_file` with `read_lines`. diff --git a/docs/rule_authors/incremental_actions.md b/docs/rule_authors/incremental_actions.md index 869095cb32bb4..f0a251aba6420 100644 --- a/docs/rule_authors/incremental_actions.md +++ b/docs/rule_authors/incremental_actions.md @@ -3,18 +3,34 @@ id: incremental_actions title: Incremental Actions --- -It's possible to make certain Buck2 actions behave incrementally, that is, to produce results for a current invocation based on the result from the previous run. Incrementality could significantly improve performance of some actions such as packaging (such as Apple App Bundles) or linking (MSVC incremental linking). +It's possible to make certain Buck2 actions behave incrementally, that is, to +produce results for a current invocation based on the result from the previous +run. Incrementality could significantly improve performance of some actions such +as packaging (such as Apple App Bundles) or linking (MSVC incremental linking). There are two essential requirements to make an action incremental: -* The result from the previous run should be accessible. -* An understanding of which parts of the result need to be updated; it should be easy to compare inputs from a previous run with inputs from the current run and detect those changed. +- The result from the previous run should be accessible. +- An understanding of which parts of the result need to be updated; it should be + easy to compare inputs from a previous run with inputs from the current run + and detect those changed. -The only way to run user-defined commands in Buck2 is with `ctx.actions.run`. Both of the above requirements are met via its `metadata_env_var`, `metadata_path` and `no_outputs_cleanup` parameters. +The only way to run user-defined commands in Buck2 is with `ctx.actions.run`. +Both of the above requirements are met via its `metadata_env_var`, +`metadata_path` and `no_outputs_cleanup` parameters. -When the `no_outputs_cleanup` flag is turned on, Buck2 won't perform any deletion of old outputs for the action. That means the result from the previous run will be accessible, but the user script has to detect which parts of it should be deleted and perform a manual cleanup. +When the `no_outputs_cleanup` flag is turned on, Buck2 won't perform any +deletion of old outputs for the action. That means the result from the previous +run will be accessible, but the user script has to detect which parts of it +should be deleted and perform a manual cleanup. -When the `metadata_env_var` and `metadata_path` parameters are present, Buck2 will create a JSON file on a disk before actually executing the command. The file will contain a list of paths and hash digests for every command action input. All paths in the file are relative to the Buck2 project root. Symlinks are not included in metadata because it is possible for the user script to resolve symlink and use a resolved path to get the destination hash digest from action metadata if it's needed, as shown in the following JSON example: +When the `metadata_env_var` and `metadata_path` parameters are present, Buck2 +will create a JSON file on a disk before actually executing the command. The +file will contain a list of paths and hash digests for every command action +input. All paths in the file are relative to the Buck2 project root. Symlinks +are not included in metadata because it is possible for the user script to +resolve symlink and use a resolved path to get the destination hash digest from +action metadata if it's needed, as shown in the following JSON example: ```json { @@ -29,11 +45,17 @@ When the `metadata_env_var` and `metadata_path` parameters are present, Buck2 wi } ``` -A user script that is run as a part of an action execution is responsible for parsing the JSON file. +A user script that is run as a part of an action execution is responsible for +parsing the JSON file. -The `version` field is bumped every time there is a non-backwards compatible change to the format of the file. The user script should verify that the provided data is of a supported version and should be updated accordingly when the current version is newer than the supported one. +The `version` field is bumped every time there is a non-backwards compatible +change to the format of the file. The user script should verify that the +provided data is of a supported version and should be updated accordingly when +the current version is newer than the supported one. -The path of the JSON file is provided to the user script via an environment variable with a key equal to `metadata_env_var`. The user is able to specify the part of the path relative to the result directory via `metadata_path`. +The path of the JSON file is provided to the user script via an environment +variable with a key equal to `metadata_env_var`. The user is able to specify the +part of the path relative to the result directory via `metadata_path`. For example, if some rule implementation has the following code: @@ -55,9 +77,16 @@ Then `my_script.py` will be executed as: ACTION_METADATA=project/relative/path/to/target/action_metadata.json my_script.py --output resolved/path/to/result ``` -`my_script.py` is responsible for reading the `ACTION_METADATA` environment variable and parsing a JSON file with the action metadata. +`my_script.py` is responsible for reading the `ACTION_METADATA` environment +variable and parsing a JSON file with the action metadata. -Parsed metadata provides information about inputs for the current run, but the script needs somehow to obtain similar information about inputs from the previous run. Such information could just be another output of the user script (as with the previous result, it won't be deleted when `no_outputs_cleanup = True`). The Format of such a file is an implementation detail of the user script, but at the very least it should contain a list of every source that was used to form the result and hash digests for such sources. +Parsed metadata provides information about inputs for the current run, but the +script needs somehow to obtain similar information about inputs from the +previous run. Such information could just be another output of the user script +(as with the previous result, it won't be deleted when +`no_outputs_cleanup = True`). The Format of such a file is an implementation +detail of the user script, but at the very least it should contain a list of +every source that was used to form the result and hash digests for such sources. The rule implementation would look something like the following: @@ -76,6 +105,11 @@ ctx.actions.run( The user script would then: -1. Parse `incremental_state.json` and delete it. Deletion prior to amending the result is important so it doesn't result in a situation where an incremental state file is out of sync with the result when the user script fails while changing the result. Such a corrupted state might lead to subsequent incorrect builds reported as "successful". -2. Parse action metadata file, compute what is needed to update the result, and amend it accordingly. +1. Parse `incremental_state.json` and delete it. Deletion prior to amending the + result is important so it doesn't result in a situation where an incremental + state file is out of sync with the result when the user script fails while + changing the result. Such a corrupted state might lead to subsequent + incorrect builds reported as "successful". +2. Parse action metadata file, compute what is needed to update the result, and + amend it accordingly. 3. Calculate the new state and write it into the new `incremental_state.json`. diff --git a/docs/rule_authors/local_resources.md b/docs/rule_authors/local_resources.md index 5ccb686e197bd..7da168d3dc309 100644 --- a/docs/rule_authors/local_resources.md +++ b/docs/rule_authors/local_resources.md @@ -3,15 +3,29 @@ id: local_resources title: Local Resources For Tests Execution --- -Executing a test might require an external resource which is expensive to create. For example running an iOS UI test requires an iOS simulator and it takes relatively long time to setup it prior to test execution. When tests are executed remotely resources initialization and allocation could be preemptively managed by remote execution tier which is not the case for local execution. To effectively manage such resources needed for local execution of tests there is a separate Buck2 feature backed by `LocalResourceInfo` provider. +Executing a test might require an external resource which is expensive to +create. For example running an iOS UI test requires an iOS simulator and it +takes relatively long time to setup it prior to test execution. When tests are +executed remotely resources initialization and allocation could be preemptively +managed by remote execution tier which is not the case for local execution. To +effectively manage such resources needed for local execution of tests there is a +separate Buck2 feature backed by `LocalResourceInfo` provider. ## `LocalResourceInfo` provider -This provider describes how to initialize and clean up a pool of homogeneous local resources. Management of initialized resources is done by Buck2 itself when it executes tests requiring such resources. +This provider describes how to initialize and clean up a pool of homogeneous +local resources. Management of initialized resources is done by Buck2 itself +when it executes tests requiring such resources. Fields: -* `setup` — command represented by `cmd_args` object which is executed to initialize a local resource. Running this command should write a JSON to stdout. This JSON represents a pool of local resources which are ready to be used. -* `resource_env_vars` — key-value mapping `{str: str}` from environment variable (appended to an execution command for test which is dependent on this local resource) to keys in JSON output of `setup` command. + +- `setup` — command represented by `cmd_args` object which is executed to + initialize a local resource. Running this command should write a JSON to + stdout. This JSON represents a pool of local resources which are ready to be + used. +- `resource_env_vars` — key-value mapping `{str: str}` from environment variable + (appended to an execution command for test which is dependent on this local + resource) to keys in JSON output of `setup` command. Example JSON output of `setup` command: @@ -26,33 +40,63 @@ Example JSON output of `setup` command: ``` JSON keys: -* `pid` — an optional attribute which maps to a PID of a process that holds initialized local resources. If present, on non-Windows platforms the process will be sent `SIGTERM` when those resources are no longer needed. Signal should be handled to release any system resources related to local resources. -* `resources` — a list of resource instances, each is a mapping from a string alias (e.g. `socket_address`) to a value which represents resource. The number of concurrently running tests that require resources of the same type is limited by how many instances are in a list. String alias is mapped to an environment variable key (which will be added to a command requiring such resource) using a `resource_env_vars` field in `LocalResourceInfo` provider (see [example](#example-usage) below). - -## Test Execution - -For a general context on how tests are executed, see [Test Execution](test_execution.md). - -A decision whether certain local resource is required for specific test is made by a test runner. List of required resources is then passed to Buck2 in `required_local_resources` field of `ExecuteRequest2` test API protobuf message. -If resource is required for a certain test execution and test could potentially be executed locally, `local_resources` field in test's `ExternalRunnerTestInfo` provider is used to select appropriate `LocalResourceInfo` provider. +- `pid` — an optional attribute which maps to a PID of a process that holds + initialized local resources. If present, on non-Windows platforms the process + will be sent `SIGTERM` when those resources are no longer needed. Signal + should be handled to release any system resources related to local resources. +- `resources` — a list of resource instances, each is a mapping from a string + alias (e.g. `socket_address`) to a value which represents resource. The number + of concurrently running tests that require resources of the same type is + limited by how many instances are in a list. String alias is mapped to an + environment variable key (which will be added to a command requiring such + resource) using a `resource_env_vars` field in `LocalResourceInfo` provider + (see [example](#example-usage) below). -`ExternalRunnerTestInfo.local_resources` is a key-value mapping `{str: ["label", None]}`. Keys represent resource types that match the values passed from the test runner, and values are labels that should point to a target exposing the `LocalResourceInfo` provider to be used for the initialization of the resource of that type. If the value is `None`, it indicates that a resource of that type will not be provided, even if the test runner requests it. +## Test Execution -Before running a test, `setup` command from selected provider is executed and its output is used to create a pool of resource instances. This pool is shared across all tests pointing to the same configured target label containing `LocalResourceInfo` provider (normally that means pool is shared for tests requiring same resource type). A resource is acquired (with potential queuing) from that pool prior single test is executed and is returned back to the pool when test finished execution. After `buck2 test` command is finished, cleanup is performed when SIGTERM is sent to each process holding a pool of resources. +For a general context on how tests are executed, see +[Test Execution](test_execution.md). + +A decision whether certain local resource is required for specific test is made +by a test runner. List of required resources is then passed to Buck2 in +`required_local_resources` field of `ExecuteRequest2` test API protobuf message. + +If resource is required for a certain test execution and test could potentially +be executed locally, `local_resources` field in test's `ExternalRunnerTestInfo` +provider is used to select appropriate `LocalResourceInfo` provider. + +`ExternalRunnerTestInfo.local_resources` is a key-value mapping +`{str: ["label", None]}`. Keys represent resource types that match the values +passed from the test runner, and values are labels that should point to a target +exposing the `LocalResourceInfo` provider to be used for the initialization of +the resource of that type. If the value is `None`, it indicates that a resource +of that type will not be provided, even if the test runner requests it. + +Before running a test, `setup` command from selected provider is executed and +its output is used to create a pool of resource instances. This pool is shared +across all tests pointing to the same configured target label containing +`LocalResourceInfo` provider (normally that means pool is shared for tests +requiring same resource type). A resource is acquired (with potential queuing) +from that pool prior single test is executed and is returned back to the pool +when test finished execution. After `buck2 test` command is finished, cleanup is +performed when SIGTERM is sent to each process holding a pool of resources. ## Example Usage Define a target which has `LocalResourceInfo` provider: + ``` simulator( name = "my_resource", broker = ":broker", ) ``` + where `broker` points to a runnable handling actual simulators. Implementation of `simulator` rule would be: + ``` def _impl(ctx: AnalysisContext) -> ["provider"]: return [ @@ -65,6 +109,7 @@ def _impl(ctx: AnalysisContext) -> ["provider"]: ``` Running a `:broker` via `setup` command produces the following JSON: + ``` { "pid": 42, @@ -75,9 +120,16 @@ Running a `:broker` via `setup` command produces the following JSON: } ``` -When Buck2 locally executes a test which requires this particular type of local resource, it reserves one resource from the pool (e.g. `{"socket_address": "bar:2"}`) and add environment variable representing this resource to execution command (e.g. `IDB_COMPANION=bar:2`). In our examples `"socket_address"` alias was substituted by ``"IDB_COMPANION"`` based on `LocalResourceInfo.resource_env_vars` field. +When Buck2 locally executes a test which requires this particular type of local +resource, it reserves one resource from the pool (e.g. +`{"socket_address": "bar:2"}`) and add environment variable representing this +resource to execution command (e.g. `IDB_COMPANION=bar:2`). In our examples +`"socket_address"` alias was substituted by `"IDB_COMPANION"` based on +`LocalResourceInfo.resource_env_vars` field. -The last part is to map a resource type to desired `LocalResourceInfo` provider. Let's assume a test runner requires a resource of type "ios_simulator" for every `apple_test` rule. +The last part is to map a resource type to desired `LocalResourceInfo` provider. +Let's assume a test runner requires a resource of type "ios_simulator" for every +`apple_test` rule. Pass `:my_resource` target as a dependency into `apple_test` rule: @@ -92,7 +144,8 @@ apple_test = rule( ) ``` -Actually map "ios_simulator" resource type to `:broker` target containing `LocalResourceInfo` provider: +Actually map "ios_simulator" resource type to `:broker` target containing +`LocalResourceInfo` provider: ``` def apple_test_impl(ctx: AnalysisContext) -> ["provider"]: diff --git a/docs/rule_authors/optimization.md b/docs/rule_authors/optimization.md index 74c690f05aebc..2197174b791ab 100644 --- a/docs/rule_authors/optimization.md +++ b/docs/rule_authors/optimization.md @@ -1,32 +1,62 @@ --- id: optimization -title: Optimization +title: Observability and Optimization --- -Optimization involves the use of techniques for determining and improving the performance of Buck2 and specific actions performed by Buck2. This page covers the internals for developers of Buck2 and provides details of Starlark that are likely to be relevant to end users. +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + +Optimization involves the use of techniques for determining and improving the +performance of Buck2 and specific actions performed by Buck2. This page covers +the internals for developers of Buck2 and provides details of Starlark that are +likely to be relevant to end users. ## Starlark profiling -`buck2` supports profiling of the evaluation of specific `BUCK` files and profiling of the analysis of specific targets. +`buck2` supports profiling of the evaluation of specific `BUCK` files and +profiling of the analysis of specific targets. There are three `buck2` profiling commands: -* `buck2 profile loading` -* `buck2 profile analysis` -* `buck2 profile bxl` +- `buck2 profile loading` +- `buck2 profile analysis` +- `buck2 profile bxl` For example: ```shell -buck2 profile loading --mode=heap-summary -o heap-summary.csv //some/package: -buck2 profile analysis --mode=heap-summary -o heap-summary.csv //some/package:target +buck2 profile loading --mode=heap-summary-allocated -o heap-summary.csv //some/package: +buck2 profile analysis --mode=heap-summary-allocated -o heap-summary.csv //some/package:target ``` +Possible values for profiling modes are as follows: + +- [heap-summary-allocated](#summary-profiling): The heap profile mode provides + information about the time spent in each function and allocations performed by + each function. Enabling this mode has the side effect of disabling + garbage-collection. This profiling mode is the recommended one. +- heap-summary-retained: Like heap summary, but information about retained + memory after module is frozen. +- time-flame: Provide output compatible with + [flamegraph.pl](https://github.com/brendangregg/FlameGraph/blob/master/flamegraph.pl). +- heap-flame-allocated: Like heap profile, but writes output comparible with + [flamegraph.pl](https://github.com/brendangregg/FlameGraph/blob/master/flamegraph.pl). +- heap-flame-retained: Like heap flame, but information about retained memory + after module is frozen. +- [statement](#statement-profiling): The statement profile mode provides + information about time spent in each statement. +- bytecode: The bytecode profile mode provides information about bytecode + instruction pairs. +- bytecode-pairs: The bytecode profile mode provides information about bytecode + instruction pairs. +- typecheck: Profile runtime typechecking. + ### Summary profiling -The first profiling mode provides the time spent within a function and the allocations that are performed. +The first profiling mode (`heap-summary-allocated`) provides the time spent +within a function and the allocations that are performed. -As an example, running over a folly BUCK file, produces a CSV file whose top-left corner is: +As an example, running over a folly BUCK file, produces a CSV file whose +top-left corner is: ```text Function Time(s) TimeRec(s) Calls Allocs @@ -40,16 +70,28 @@ type 0.435 0.435 2053296 0 This reveals the following: -* Total execution was 10.455s, which will be a bit slower than normal, because profiling is on. -* 1.163s was spent in `fbchain_configs` itself and 2.514s in that function and the things it calls. -* A disturbing 1.5M calls and 1.028s is spent testing if things are strings, which is almost certainly responsible for half the type calls. -* Happily, `is_string` doesn't allocate, but `fbchain_configs` does. Scrolling to the right, on the full CSV file (not shown), reveals it allocates 1 tuple and 2 dict per call. It can also be seen that `fbchain_configs` is mostly called by `_add_code_coverage_configs`. - -This profiling mode is implemented by turning off garbage collection, so the heap retains everything, and pushing function entry/exit entries on to the heap with the time they happen. After execution, the heap can be scanned in order to reconstruct the call tree and allocation patterns. As a result, this profile mode may consume significantly more memory. +- Total execution was 10.455s, which will be a bit slower than normal, because + profiling is on. +- 1.163s was spent in `fbchain_configs` itself and 2.514s in that function and + the things it calls. +- A disturbing 1.5M calls and 1.028s is spent testing if things are strings, + which is almost certainly responsible for half the type calls. +- Happily, `is_string` doesn't allocate, but `fbchain_configs` does. Scrolling + to the right, on the full CSV file (not shown), reveals it allocates 1 tuple + and 2 dict per call. It can also be seen that `fbchain_configs` is mostly + called by `_add_code_coverage_configs`. + +This profiling mode is implemented by turning off garbage collection, so the +heap retains everything, and pushing function entry/exit entries on to the heap +with the time they happen. After execution, the heap can be scanned in order to +reconstruct the call tree and allocation patterns. As a result, this profile +mode may consume significantly more memory. ### Statement profiling -The second profiling mode tells us which statements spent most time executing. Running it over a structured-logger `BUCK` file gives us a CSV file starting with: +The second profiling mode tells us which statements spent most time executing. +Running it over a structured-logger `BUCK` file gives us a CSV file starting +with: ```text File Span Duration(s) Count @@ -61,7 +103,8 @@ prelude.bzl 28:9-29:20 0.07 1004 ... ``` -This profile shows how much time is spent in each statement. Looking at the relevant portion of `fbode_allowed_list.bzl`: +This profile shows how much time is spent in each statement. Looking at the +relevant portion of `fbode_allowed_list.bzl`: ```python for _package in _recursive_allowlist: @@ -69,30 +112,53 @@ for _package in _recursive_allowlist: return True ``` -The `if` statement is at location 420:9-423:1 and takes 0.27s. The `if` statement runs approximately 456K times. While looking at the outer statement in the profile (not shown), it can be seen that the `for` loop is only called 3188 times, implying an average of 143 iterations per call. It's possible that this loop could be rewritten as some clever dictionary lookup, perhaps iterating over the path components of `_package`. +The `if` statement is at location 420:9-423:1 and takes 0.27s. The `if` +statement runs approximately 456K times. While looking at the outer statement in +the profile (not shown), it can be seen that the `for` loop is only called 3188 +times, implying an average of 143 iterations per call. It's possible that this +loop could be rewritten as some clever dictionary lookup, perhaps iterating over +the path components of `_package`. -Line profiling builds on top of the `before_stmt` hook that is used for debugging. It records the time each statement is entered then blames that statement for all time until the next statement. That means that sometimes, due to statements making function calls, the `return` of the function call may be 'blamed' until the next statement executes. As a result, treat the results with slight caution. +Line profiling builds on top of the `before_stmt` hook that is used for +debugging. It records the time each statement is entered then blames that +statement for all time until the next statement. That means that sometimes, due +to statements making function calls, the `return` of the function call may be +'blamed' until the next statement executes. As a result, treat the results with +slight caution. ### Flame profiling -The flame profiling modes produces a `.svg` flamegraph showing either time spent or allocations. +The flame profiling modes produces a `.svg` flamegraph showing either time spent +or allocations. You can open it in Google chrome and inspect the resulting flame +graph. -The flame profile provides a list of how time is used based on call stacks (you can download an example [here](https://www.internalfb.com/intern/px/p/1Mz2W)). +The flame profile provides a list of how time is used based on call stacks (you +can download an example [here](https://www.internalfb.com/intern/px/p/1Mz2W)). ## Native profiling -* Profiling on Linux can be done with `perf record -g --call-graph=dwarf,20000 ...` and `perf report --call-graph` - * Don't profile the `buck2` process directly unless you are interested in profiling the CLI; you likely want to profile the `buck2` daemon process. You can find the pid with `buck2 status` and attach `perf` to that PID. -* Profiling on Mac can be done with `Instruments` (for details, see the Wiki article [Running and Testing Builds](https://www.internalfb.com/intern/wiki/GraphQL/Build_Infra/Running_and_Testing_Builds/#profiling-the-rust-code)). +- Profiling on Linux can be done with + `perf record -g --call-graph=dwarf,20000 ...` and `perf report --call-graph` + - Don't profile the `buck2` process directly unless you are interested in + profiling the CLI; you likely want to profile the `buck2` daemon process. + You can find the pid with `buck2 status` and attach `perf` to that PID. +- Profiling on Mac can be done with `Instruments` (for details, + see the Wiki article + [Running and Testing Builds](https://www.internalfb.com/intern/wiki/GraphQL/Build_Infra/Running_and_Testing_Builds/#profiling-the-rust-code)). ## Benchmarking -* If you want to do proper statistically relevant A/B testing, use `absh -a testa -b testb` (see [absh](https://github.com/stepancheg/absh) in the GitHub repository). -* To measure the number of instructions: - * On Linux, use `perf stat foo` - * On Mac, use `/usr/bin/time -lp foo` -* On Mac, to run something with the time profiler on the command line, use `xcrun xctrace record --template 'Time Profiler' --launch -- foo`, then `open Foo.trace` for the name of the trace file it spits out (or pass `--output` to control the output filename). +- If you want to do proper statistically relevant A/B testing, use + `absh -a testa -b testb` (see [absh](https://github.com/stepancheg/absh) in + the GitHub repository). +- To measure the number of instructions: + - On Linux, use `perf stat foo` + - On Mac, use `/usr/bin/time -lp foo` +- On Mac, to run something with the time profiler on the command line, use + `xcrun xctrace record --template 'Time Profiler' --launch -- foo`, then + `open Foo.trace` for the name of the trace file it spits out (or pass + `--output` to control the output filename). diff --git a/docs/rule_authors/package.md b/docs/rule_authors/package.md new file mode 100644 index 0000000000000..cf12e1ec76ba3 --- /dev/null +++ b/docs/rule_authors/package.md @@ -0,0 +1,129 @@ +--- +id: package_files +title: PACKAGE Files +--- + +`PACKAGE` files are per-directory configuration files which are accessible from +Starlark rules/macros. It supports things like per-directory properties, reading +parent `PACKAGE` values (`read_parent_package_value()`), writing `PACKAGE` +values (`write_package_value()`), loading helper `bzl` files, and you can also +inspect `PACKAGE` values via `buck2 audit package-values`. + +Before evaluating `BUCK` file, buck2 will evaluate all `PACKAGE` files in the +same directory and all parent directories. Absent `PACKAGE` files are treated as +empty files. + +All relevant `PACKAGE` files are executed sequentially from the root directory +to the current directory (but unrelated `PACKAGE` files can be executed in +parallel). Evaluating `PACKAGE` files sequentially provides additional +guarantees, for example, attempt to override a property (unless explicitly +requested) should fail with Starlark call stack. + +Each `PACKAGE` file is evaluated at most once (like `bzl` files). + +`PACKAGE` files may load arbitrary `bzl` files. `BUCK`-specific functions called +in `bzl` files (like rule functions) are available, but calling functions from +`PACKAGE` files is an error. This way, `bzl` files are evaluated only once +regardless of whether they are loaded from `PACKAGE` or `BUCK` file. + +## APIs + +### `PACKAGE` APIs + +#### [`write_package_value`](../../api/build#write_package_value) + +```python +def write_package_value( + name: str, + value: "", + overwrite: bool = False, +): ... +``` + +This global API is only available in `PACKAGE` files, or `bzl` files included in +`PACKAGE` files. + +`name` is a string which must contain exactly one dot symbol (just to enforce +code style). + +`value` is an arbitrary Starlark value, for example, an integer, a list of +integer, a struct or a function. The value must be serializable into JSON. + +When `overwrite` is `False` (default), attempt to overwrite per-`PACKAGE` value +defined in parent `PACKAGE` file will fail. + +Written values are frozen when `PACKAGE` file evaluation is finished. + +Note `write_package_value` symbol exists in `bzl` globals, and it can be called +from `bzl` file in context of `PACKAGE` evaluation, but calling +`write_package_file` is an error on context of `BUCK` evaluation. + +Modifying `PACKAGE` file logically invalidates the `BUCK` file of this +directory, and all `PACKAGE` and `BUCK` files of sub-`PACKAGE`s. However, `BUCK` +file evaluation may track which `PACKAGE`-local values were accessed and only +invalidate `BUCK` files which were potentially affected (similarly to how we do +it with buckconfigs). + +#### [`read_parent_package_value`](../../api/build#read_parent_package_value) + +```python +def read_parent_package_value( + key: str, +): ... +``` + +This global API is only available in `PACKAGE` files, or `bzl` files included in +`PACKAGE` files. + +This function returns the `PACKAGE` value defined in a parent `PACKAGE` file, or +`None` is such value does not exist. + +This function is available in `PACKAGE` files, but attempt to call this function +in context of `bzl` file evaluation results in an error. + +#### [`package`](../../api/build#package) + +```python +def package( + inherit: bool = False, + visibility: list[str] | tuple[str, ...] = [], + within_view: list[str] | tuple[str, ...] = [] +) -> None +``` + +This global API is only available in `PACKAGE` files, or `bzl` files included in +`PACKAGE` files. + +`visibility` is a list of visibility patterns to apply to all targets contained +within the directory, unless the target defines it's own visibility patterns. + +`within_view` is a list of visibility patterns restricting what all target +contained within the `PACKAGE` directory can depend on. Applies to first-order +deps, and not transitive deps. + +If `inherit` is `True`, then the `visibility` and `within_view` will be +inherited from the nearest parent `PACKAGE`. + +#### [`read_config`](../../api/build#read_config) + +`PACKAGE` files are able to call `read_config` to read buckconfigs. + +### `BUCK`-specific API + +#### [`read_package_value`](../../api/build#read_package_value) + +```python +def read_package_value( + name: str, +): ... +``` + +This global API is only available in `BUCK` files, or `bzl` files included in +`BUCK` files. + +This function returns the nearest `name` value registered per `PACKAGE`, or +`None` is such value does not exist. + +This function is available in `bzl` files, but attempt to call this function in +context of `PACKAGE` file evaluation results in an error. This restriction can +be lifted in the future. diff --git a/docs/rule_authors/rule_api.md b/docs/rule_authors/rule_api.md deleted file mode 100644 index 2351ccb0c0287..0000000000000 --- a/docs/rule_authors/rule_api.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -id: rule_api -title: Rule APIs ---- - -When implementing a rule, you are given a value of type `context` and are expected to produce providers. This page details those providers and the attributes and APIs that they offer. - -## Providers - -* `DefaultInfo(default_outputs : ["artifact"], other_outputs : [["artifact", "cmd_args"]] = [], sub_targets : {str: ["provider"]} = {})` - the provider that is used for: - * `buck2 build` - builds everything in `default_outputs` and `other_outputs`. - * `$(location)` - uses the `default_outputs`. - * `buck2 build my_target[foo]` - selects the `foo` value from `sub_targets`. - * **Note**: if you use `cmd_args` in `other_outputs`, then it will expand to all the inputs referenced by the `cmd_args` you provide. -* `RunInfo(args)` - used for `buck2 run`, where `args` is anything that can be converted into `cmd_args`, including a command line itself. -* `ExternalRunnerTestInfo(...)` - for details, see [Test Execution](test_execution.md). - -## Type `context` - -The starting type, usually bound as `ctx`. - -* `ctx.attrs` - returns the attributes of the target as a Starlark struct with a field for each attribute, which varies per rule. -* `ctx.actions` - returns `actions` allowing you to define actions. -* `ctx.label` - returns a `label` representing the target. - -## Type `actions` - -Most output filenames can either be artifacts created with `declare_output` or strings that are implicitly converted to output artifacts. - -* `ctx.actions.declare_output([prefix], filename, dir = False)` - returns an `artifact` with the name `filename`, which when asked for its name, will return `filename` (which may include a directory portion). - * `prefix` (optional) - provides a silent part of the filename, which can be used to disambiguate but whose presence will not be visible to anyone using the `artifact`. By default, outputs are considered files; pass `dir = True` to indicate it is a directory. - * `declare_output` - mainly used to produce an unbound artifact for passing to `ctx.actions.run`. - -* `ctx.actions.write(filename, content, is_executable : bool = false, allow_args : bool = false)` - returns an `artifact` whose contents are `content`. - * `filename` - can be a string or an existing artifact created with `declare_output`. - * `is_executable` (optional) - indicates whether the resulting file should be marked with executable permissions. - * `allow_args` (optional) - must be set to `True` if you want to write parameter arguments to the file (in particular, macros that write to file). - * If it is true, the result will be a pair of the `artifact` containing `content` and a list of `artifact` values that were written by macros, which should be used in hidden fields or similar. - -* `ctx.actions.write_json(filename, content, with_inputs = False)` - returns an `artifact` whose contents are `content` written as a JSON value. - * `filename` - can be a string, or an existing artifact created with `declare_output`. - * `content` - must be composed of the basic json types (Boolean, number, string, list/tuple, dictionary) plus artifacts and command lines. - * An artifact will be written as a string containing the path. - * A command line will be written as a list of strings, unless `joined=True` is set, in which case it will be a string. - * If you pass `with_inputs = True`, you'll get back a `cmd_args` that expands to the JSON file but carries all the underlying inputs as dependencies (so you don't have to use, for example, `hidden` for them to be added to an action that already receives the JSON file). - -* `ctx.actions.copy_file(dest, src)` - copies the source `artifact` to the destination (which can be a string representing a filename or an output `artifact`) and returns the output `artifact`. The copy works for files or directories. - -* `ctx.actions.symlink_file(dest, src)` - creates a symlink to the source `artifact` at the destination (which can be a string representing a filename or an output `artifact`) and returns the output `artifact`. The symlink works for files or directories. - -* `ctx.actions.symlinked_dir(output, srcs : {str: "artifact"})` - returns an artifact that is a directory containing symlinks. The `srcs` must be a dictionary of path (as string, relative to the result directory) to bound `artifact,` which will be laid out in the directory. - -* `ctx.actions.copied_dir(output, srcs : {str: "artifact"}, copy : bool = false)` - returns an artifact which is a directory containing copied files. The `srcs` must be a dictionary of path (as string, relative to the result directory) to the bound `artifact`, which will be laid out in the directory. - -* `ctx.actions.download_file(output, url : str, sha1: str, is_executable : bool = false)` - downloads a URL to an output (filename as string or output `artifact`). The file at the URL must have the given `sha1` or the command will fail. The optional parameter `is_executable` indicates whether the resulting file should be marked with executable permissions. - -* `ctx.actions.run(arguments, category : str, identifier : str = "", env : {str: str} = {}, local_only : bool = false, always_print_stderr : bool = false, weight : int = 1, metadata_env_var: str = None, metadata_path: str = None, no_outputs_cleanup: bool = false)` - runs a command. - * `arguments` - must be of type `cmd_args`, or a type convertible to such (such as a list of strings and artifacts) and must contain at least one `.as_output()` artifact. - * `category` and `identifier` - when used together, identify the action in Buck2's event stream, and must be unique for a given target. - * `weight` is used to note how heavy the command is and will typically be set to a higher value to indicate that less such commands should be run in parallel (if running locally). - * `no_outputs_cleanup` - if this flag is set then Buck2 won't clean the outputs of a previous build that might be present on a disk; in which case, command from `arguments` should be responsible for the cleanup (that is useful, for example, when an action is supporting incremental mode and its outputs are based on result from a previous build). - * `metadata_env_var` and `metadata_path` - both should either be set or unset. - * `metadata_path` defines a path relative to the result directory for a file with action metadata, which will be created right before the command will be run. - * Metadata contains the path relative to the Buck2 project root and hash digest for every action input (this excludes symlinks as they could be resolved by a user script if needed). The resolved path relative to the Buck2 project for the metadata file will be passed to command from `arguments`, via the environment variable, with its name set by `metadata_env_var`. - * Both `metadata_env_var` and `metadata_path` are useful when making actions behave in an incremental manner (for details, see [Incremental Actions](./incremental_actions.md)) - -* `ctx.actions.tset(type, value = None, children = None)` - creates a new transitive set (for details, see [Transitive Sets](./transitive_sets.md)). - -* `ctx.actions.cas_artifact(output, digest : str, use_case: str, expires_after_timestamp: int, is_executable : bool = false)` - downloads a CAS artifact to an output. - * `digest` - must look like `SHA1:SIZE`. - * `use_case` - your RE use case. - * `expires_after_timestamp` - must be a UNIX timestamp. Your digest's TTL must exceed this timestamp. Your build *will* break once the digest expires, so make sure the expiry is long enough (preferably, in years). - * `is_executable` (optional) - indicates the resulting file should be marked with executable permissions. - -## Type `cmd_args` - -The `cmd_args` type is created by `cmd_args` and is consumed by `ctx.actions.run`. The type is a mutable collection of strings and `artifact` values. In general, command lines, artifacts, strings, `RunInfo` and lists thereof can be added to or used to construct a `cmd_args` value. All these methods operate mutably on `cmd` and return that value too. - -* `cmd_args(*args, format: str = "", delimiter: str = None, prepend: str = None, quote: str = None)` - creates and returns a `cmd_args` type. - * `*args` - a list of things to add to the command line, each of which must be coercible to a command line. Further items can be added with `cmd.add`. - * `format` (optional) - a string that provides a format to apply to the argument. for example, `cmd_args(x, format="--args={}")` would prepend `--args=` before `x`, or if `x` was a list, before each element in `x`. - * `delimiter` (optional) - added between arguments to join them together. For example, `cmd_args(["--args=",x], delimiter="")` would produce a single argument to the underlying tool. - * `prepend` (optional) - added as a separate argument before each argument. - * `quote` (optional) - indicates whether quoting is to be applied to each argument. Note: the only current valid value is `"shell"`. - -* `cmd.add(*args)` - a list of arguments to be added to the command line, as per `cmd_args`. - -* `cmd.hidden(*args)` - things to add to the command line which do not show up but are added as dependencies. - -* `cmd.ignore_artifacts()` - conceptually the opposite of `hidden()`. It causes none of the arguments of the command line to be added as dependencies. - * Use this if you need the path to an artifact but *not* the artifact itself. - * Note: if you do find yourself needing any of the inputs referenced by this command, you will hit build errors due to missing dependencies. - -* `cmd.relative_to(directory, parent : int = 0)` - complex magic. Before using this, please contact Meta's Buck2 team. - -* `cmd.absolute_prefix(prefix : str)`- adds a prefix to the front of every artifact. - -* `cmd.absolute_suffix(suffix : str)` - adds a suffix to the end of every artifact. - -* `cmd.parent(count : int = 1)` - uses the parent of all given artifacts. Often used as `cmd_args(artifact, format="-L{}").parent()`. - -* `cmd.replace_regex(pattern : str, replacement : str)` - replaces all parts matching `pattern` regular expression in each argument with `replacement` string. Several replacements can be added by multiple `replace_regex` calls. - -* `cmd.copy()` - returns a copy of the `cmd_args` such that any modifications to the original or the returned value will not impact each other. - -* `cmd.inputs` - returns a list of the artifacts that are inputs to this command line. - -* `cmd.outputs` - returns a list of the artifacts that are outputs of this command line. - -## Type `label` - -A label represents a configured target. - -For example, the label `fbcode//buck2/hello:world (ovr_config//platform/linux:x86_64-fbcode-46b26edb4b80a905)` has the following attributes: - -* `package` gives back `buck2/hello` -* `name` gives back `world` -* `sub_target` gives back `None` -* `path` gives back `fbcode/buck2/hello` -* `cell` gives back `fbcode` -* `raw_target()` gives back `fbcode//buck2/hello:world` without the configuration - -## Type `artifact` - -An artifact, which has a location on disk. Some of that location is considered private, and some (the suffix) is available for use. - -The examples below assume an artifact such as one created with `ctx.actions.declare_output("hello/world.txt")`. It has the following attributes: - -* `basename` gives back `world.txt` -* `extension` gives back `.txt` -* `is_source` - `True` if the artifact is a source, otherwise `False`. -* `owner` gives back a `label` representing the rule that created it (if it is a build output) or `None` (if it is a source). -* `as_output()` gives a value suitable for setting as an output to `ctx.actions.run`. -* `short_path` gives back `hello/world.txt` - -### Projected artifacts - -Artifacts can be *projected* via the `project()` method. Projecting an artifact yields a path within it. - -For example, if artifact `foo` is a directory containing a file `bar`, then `foo.project("bar")` yields the file `bar`. - -It is possible for projected artifacts to hide the prefix in order to have the short name of the resulting artifact only contain the projected path, by passing `hide_prefix = True` to `project()`. diff --git a/docs/rule_authors/test_execution.md b/docs/rule_authors/test_execution.md index 0c308e10716b3..f36e24adaf468 100644 --- a/docs/rule_authors/test_execution.md +++ b/docs/rule_authors/test_execution.md @@ -3,87 +3,132 @@ id: test_execution title: Test Execution --- +import { FbInternalOnly, OssOnly } from +'docusaurus-plugin-internaldocs-fb/internal'; + Test execution in Buck2 is a collaboration with a separate test runner process. In its open-source build, Buck2 ships with a built-in simplistic test runner. -This test runner receives the commands defined by `ExternalRunnerTestInfo` and simply executes them. Exit code zero means the test passed, and one means it failed. +This test runner receives the commands defined by `ExternalRunnerTestInfo` and +simply executes them. Exit code zero means the test passed, and one means it +failed. -Users can of course develop their own test runners. Look at `fbcode/buck2/app/buck2_test_runner` as a sample. For comparison, here's how it's used at Meta: +Users can of course develop their own test runners. Look at +`fbcode/buck2/app/buck2_test_runner` as a sample. For comparison, here's how +it's used at Meta: -At Meta, this test runner is Tpx [Tpx](https://www.internalfb.com/intern/wiki/TAE/tpx/). - -Tpx has a large number of responsibilities when used with Buck2, which can be grouped as follows: - -* **Translation**: - * Understands the output formats of various supported test frameworks. This is used to identify test cases and collect test results. - * Understands, to an extent, the input formats. For example, given a test case, Tpx can identify what command needs to run to execute just that test. -* **Orchestration**: - * Interacts with Test Infra to discover what tests should run, under a number of configurations. - * Separates listing of tests (identifying what tests exists in a test target) and execution (running specific tests within that target). - * Coordinates the execution of tests. For example, it may request retries, or choose to bundle multiple tests in a single execution (or not). - * Reports test results to Test Infra as well. - -In Buck2, rules interact with the test runner via a provider called `ExternalRunnerTestInfo`. +At Meta, this test runner is Tpx +[Tpx](https://www.internalfb.com/intern/wiki/TAE/tpx/). + +Tpx has a large number of responsibilities when used with Buck2, which can be +grouped as follows: + +- **Translation**: + - Understands the output formats of various supported test frameworks. This is + used to identify test cases and collect test results. + - Understands, to an extent, the input formats. For example, given a test + case, Tpx can identify what command needs to run to execute just that test. +- **Orchestration**: + - Interacts with Test Infra to discover what tests should run, under a number + of configurations. + - Separates listing of tests (identifying what tests exists in a test target) + and execution (running specific tests within that target). + - Coordinates the execution of tests. For example, it may request retries, or + choose to bundle multiple tests in a single execution (or not). + - Reports test results to Test Infra as well. + +In Buck2, rules interact with the test runner via a provider called +`ExternalRunnerTestInfo`. ## Anatomy of a test run When a user runs `buck2 test $targets`: -* Buck2 identifies all matching targets that have an `ExternalRunnerTestInfo`. -* Buck2 builds all the artifacts referenced by those targets (this will likely change eventually to build them only if they are used). -* Buck2 then notifies the test runner that those tests exist. Currently, the test runner receives a subset of `ExternalRunnerTestInfo`. -* The test runner can request command execution from Buck2 to list and execute tests. -* When it receives command results from Buck2, the test runner may fire off events that the end-user will see (such as test results), upload logs externally, request further executions, and so on. - +- Buck2 identifies all matching targets that have an `ExternalRunnerTestInfo`. +- Buck2 builds all the artifacts referenced by those targets (this will likely + change eventually to build them only if they are used). +- Buck2 then notifies the test runner that those tests exist. Currently, the + test runner receives a subset of `ExternalRunnerTestInfo`. +- The test runner can request command execution from Buck2 to list and execute + tests. +- When it receives command results from Buck2, the test runner may fire off + events that the end-user will see (such as test results), upload logs + externally, request further executions, and so on. + + :::note If more than one target is being built, test building and execution will proceed concurrently. + + ::: ## Information available on `ExternalRunnerTestInfo` -As noted, rules communicate their testing capabilities via `ExternalRunnerTestInfo`. There are a number of fields available on `ExternalRunnerTestInfo` to control how a given target is tested, as detailed in the following sub-sections. +As noted, rules communicate their testing capabilities via +`ExternalRunnerTestInfo`. There are a number of fields available on +`ExternalRunnerTestInfo` to control how a given target is tested, as detailed in +the following sub-sections. ### Fields exposed to the test runner -The following list shows what is available in `ExternalRunnerTestInfo`, with which the test runner can interact: - -* `type` - a string key that defines the type of test this is. - - Tpx uses this internally to choose a translator. Examples include `gtest`, `apple_test`, `custom`. - Note that Tpx also allows labels to influence the orchestrator selection. - -* `command` and `env` - respectively, a list and a key-value mapping of arguments. - These are the inputs to translation in Tpx. - They are not always visible to the test runner (for more details, see [Verbatim arguments and handles](#verbatim-arguments-and-handles), below). -* `labels` - a set of string labels to pass to the test runner. - - They have no meaning to Buck2, but some labels have impact on translation in Tpx. - -* `contacts` - a list of contacts for the tests; usually oncalls. -* `executor_overrides` - a key-value mapping of executor configurations that the test runner can use when requesting execution from Buck2. -* `local_resources` - a key-value mapping from resource type to optional `LocalResourceInfo` provider. Provider is used for initialization of that resource type. If the value is `None` resource type is ignored even though test runner required it. For context see [Local Resources For Tests Execution](local_resources.md). +The following list shows what is available in `ExternalRunnerTestInfo`, with +which the test runner can interact: + +- `type` - a string key that defines the type of test this is. + Tpx uses this internally to choose a translator. Examples include `gtest`, + `apple_test`, `custom`. Note that Tpx also allows labels to influence the + orchestrator selection. +- `command` and `env` - respectively, a list and a key-value mapping of + arguments. These are the inputs to translation in + Tpx. They are not always visible to the test runner (for more + details, see + [Verbatim arguments and handles](#verbatim-arguments-and-handles), below). +- `labels` - a set of string labels to pass to the test runner. + They have no meaning to Buck2, but some labels have impact on translation in + Tpx. +- `contacts` - a list of contacts for the tests; usually oncalls. +- `executor_overrides` - a key-value mapping of executor configurations that the + test runner can use when requesting execution from Buck2. +- `local_resources` - a key-value mapping from resource type to optional + `LocalResourceInfo` provider. Provider is used for initialization of that + resource type. If the value is `None` resource type is ignored even though + test runner required it. For context see + [Local Resources For Tests Execution](local_resources.md). ### Fields pertinent for Remote Execution -For compatibility with Remote Execution (RE), there are two fields that rules should set in their `ExternalRunnerTestInfo` if they should be run on RE: +For compatibility with Remote Execution (RE), there are two fields that rules +should set in their `ExternalRunnerTestInfo` if they should be run on RE: -* `use_project_relative_paths` - if `true` (the default is `false` `true`), - Buck2 will produce relative paths. If not, it'll produce absolute paths. -* `run_from_project_root` - if `true` (the default is `false` `true`), - tests will run from the project root (their `cwd` will be the project root, which is the same as all build commands). If `false`, it'll be the cell root. +- `use_project_relative_paths` - if `true` (the default is + `false` `true`), Buck2 + will produce relative paths. If not, it'll produce absolute paths. +- `run_from_project_root` - if `true` (the default is + `false` `true`), tests + will run from the project root (their `cwd` will be the project root, which is + the same as all build commands). If `false`, it'll be the cell root. -Note that passing `--unstable-allow-all-tests-on-re` to `buck2 test` will override those fields and set them to `true`, since they are a pre-requisite to run on RE. In contrast, passing `--unstable-allow-compatible-tests-on-re` will only allow tests that already set both those fields to `true` to execute on RE. +Note that passing `--unstable-allow-all-tests-on-re` to `buck2 test` will +override those fields and set them to `true`, since they are a pre-requisite to +run on RE. In contrast, passing `--unstable-allow-compatible-tests-on-re` will +only allow tests that already set both those fields to `true` to execute on RE. -Also note that when `executor_overrides` are set, if an executor override is used and results in execution on RE, it'll happen on RE unconditionally. Therefore, it's a good idea to set those fields if RE-only executor overrides are provided. +Also note that when `executor_overrides` are set, if an executor override is +used and results in execution on RE, it'll happen on RE unconditionally. +Therefore, it's a good idea to set those fields if RE-only executor overrides +are provided. ## Verbatim arguments and handles -As noted above, the test runner only interacts with a subset of arguments provided by rules in `ExternalRunnerTestInfo`. The reason for this is that the test runner doesn't get to access, for example, artifacts, that Buck2 knows about. +As noted above, the test runner only interacts with a subset of arguments +provided by rules in `ExternalRunnerTestInfo`. The reason for this is that the +test runner doesn't get to access, for example, artifacts, that Buck2 knows +about. Consider the following example: @@ -92,9 +137,14 @@ binary = ctx.attrs.dep[RunInfo] test_info = ExternalRunnerTestInfo(command = [binary, "run-tests"], ...) ``` -When Buck2 actually runs this command, `binary` is expanded to a path (and possibly to more args). Buck2 would also account for any hidden arguments and make those available where the command is executed. It is important for Buck2 to retain this capability when running with the test runner. +When Buck2 actually runs this command, `binary` is expanded to a path (and +possibly to more args). Buck2 would also account for any hidden arguments and +make those available where the command is executed. It is important for Buck2 to +retain this capability when running with the test runner. -To that end, all non-trivial arguments present in `command` (and in the values of `env`), such as `cmd_args` or `RunInfo`, are exposed to the test runner as opaque handles, and simple string arguments are passed as-is to the test runner. +To that end, all non-trivial arguments present in `command` (and in the values +of `env`), such as `cmd_args` or `RunInfo`, are exposed to the test runner as +opaque handles, and simple string arguments are passed as-is to the test runner. This means that the test runner would see the command described above as: @@ -102,13 +152,21 @@ This means that the test runner would see the command described above as: [ArgHandle(index = 0), Verbatim("foobar")] ``` -When requesting execution from Buck2, the test runner can use the `ArgHandle` and Buck2 will swap it back for the underlying value that was set on the provider. +When requesting execution from Buck2, the test runner can use the `ArgHandle` +and Buck2 will swap it back for the underlying value that was set on the +provider. -This allows the test runner to introspect and modify parts of the command lines it receives, as long as it doesn't need to access the actual text value of non-verbatim arguments. Usually, this works out to be sufficient (or can be made sufficient with a bit of refactoring in the test runner). +This allows the test runner to introspect and modify parts of the command lines +it receives, as long as it doesn't need to access the actual text value of +non-verbatim arguments. Usually, this works out to be sufficient (or can be made +sufficient with a bit of refactoring in the test runner). ## Execution Configurations -By default, tests execute using the execution configuration of the associated target. This is the execution configuration that would be used for run actions (`ctx.actions.run`) declared in the same target. This is a default that actually makes little sense but works out as long as cross-compiling is not the norm. +By default, tests execute using the execution configuration of the associated +target. This is the execution configuration that would be used for run actions +(`ctx.actions.run`) declared in the same target. This is a default that actually +makes little sense but works out as long as cross-compiling is not the norm. @@ -116,13 +174,18 @@ That said, it's easy to see where this breaks down. For example: -* For iOS tests, the execution platform for builds needs to be XCode (local or RE Mac). -* For test listing, XCode is not needed (it's preferable to do it on RE Linux where capacity is cheaper). -* To run the tests, a simulator is required. +- For iOS tests, the execution platform for builds needs to be Xcode (local or + RE Mac). +- For test listing, Xcode is not needed (it's preferable to do it on RE Linux + where capacity is cheaper). +- To run the tests, a simulator is required. -To support this, `ExternalRunnerTestInfo` allows specifying override platforms, which are given a name. The test runner can request execution on them by passing their name when it sends execution requests to Buck2, as shown in the following code: +To support this, `ExternalRunnerTestInfo` allows specifying override platforms, +which are given a name. The test runner can request execution on them by passing +their name when it sends execution requests to Buck2, as shown in the following +code: ```python ExternalRunnerTestInfo( @@ -171,4 +234,5 @@ Tests can be run from the cell root by setting `run_from_project_root = False`. As noted above, tests run from the cell root unless `run_from_project_root` is set. -To produce paths relative to the cell root for use by tests, use `relative_to(ctx.label.cell_root)` on `cmd_args`. +To produce paths relative to the cell root for use by tests, use +`relative_to(ctx.label.cell_root)` on `cmd_args`. diff --git a/docs/rule_authors/transitive_sets.md b/docs/rule_authors/transitive_sets.md index 60cb2c6875ec9..658127983d749 100644 --- a/docs/rule_authors/transitive_sets.md +++ b/docs/rule_authors/transitive_sets.md @@ -3,21 +3,23 @@ id: transitive_sets title: Transitive Sets --- -Transitive sets enable the propagation of data up dependency trees in a -manner that is both efficient in Starlark code (low cost of creation, -low memory usage) and efficient for execution by Buck (edges can be -shared instead of having each action depend directly on all its inputs). +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + +Transitive sets enable the propagation of data up dependency trees in a manner +that is both efficient in Starlark code (low cost of creation, low memory usage) +and efficient for execution by Buck (edges can be shared instead of having each +action depend directly on all its inputs). Examples of where transitive sets are useful include: -* Propagating transitive link-time dependencies of a library all the way - to a binary to build. -* Propagating transitive compile-time headers. +- Propagating transitive link-time dependencies of a library all the way to a + binary to build. +- Propagating transitive compile-time headers. ## Rule API -First, you need to declare your transitive set type, then you can use -it, as follows: +First, you need to declare your transitive set type, then you can use it, as +follows: ```starlark # This is the type @@ -28,25 +30,25 @@ set1 = ctx.actions.tset(MySet, value = "foo") set2 = ctx.actions.tset(MySet, value = "bar", children = [set1]) ``` -Values are optional, and so are children. This means you can have a set -with no value and sets with no children. +Values are optional, and so are children. This means you can have a set with no +value and sets with no children. ## Projections: using transitive sets in command lines Sets aren't useful unless you can use their contents! -To use a set in a command line, you use a concept called a 'projection', -which defines how to turn individual values found in the set into -command line arguments. +To use a set in a command line, you use a concept called a 'projection', which +defines how to turn individual values found in the set into command line +arguments. -To define a projection, you write a function that takes a value of your -set and returns a command-line like -object (`cmd_args`, `string`, `attr.arg()` attributes, `artifact`, and -so on) or a list of them in whichever way makes sense for your use case. +To define a projection, you write a function that takes a value of your set and +returns a command-line like object (`cmd_args`, `string`, `attr.arg()` +attributes, `artifact`, and so on) or a list of them in whichever way makes +sense for your use case. Then, you call `project_as_args` to turn a set into a value suitable for -inclusion in a command line. When expanded, this projection will expand -like a list of all the node's individual projected values. +inclusion in a command line. When expanded, this projection will expand like a +list of all the node's individual projected values. Following is an example: @@ -69,20 +71,19 @@ args = set2.project_as_args("define") When you use `args` in a command line, it will expand to `-Dbar -Dfoo`. -Note that creating projections is very cheap. Notably, it is independent -of the size of the set. +Note that creating projections is very cheap. Notably, it is independent of the +size of the set. ## Projections: using transitive sets in write_json() -As with command lines, sets can form json projections to be used in -write_json. +As with command lines, sets can form json projections to be used in write_json. -A json projection is defined in the same way as an arg projection. The -function should return a value that `write_json` otherwise supports. -Then, you call `project_as_json` to turn a set into a value that can be -passed to `write_json` (or can appear within the value passed to it, it -doesn't need to be the top-level value). When expanded, the projection -will expand like a list of all the node's individual projected values. +A json projection is defined in the same way as an arg projection. The function +should return a value that `write_json` otherwise supports. Then, you call +`project_as_json` to turn a set into a value that can be passed to `write_json` +(or can appear within the value passed to it, it doesn't need to be the +top-level value). When expanded, the projection will expand like a list of all +the node's individual projected values. Following is an example: @@ -103,18 +104,17 @@ set2 = ctx.actions.tset(MySet, value = "bar", children = [set1]) args = set2.project_as_json("define") ``` -Note that if your projected values include (or may include) artifacts, -you will likely want to use `write_json(with_inputs=True)` to get back a -cmd_args that has all the artifacts in the json structure already in -its `.hidden`. +Note that if your projected values include (or may include) artifacts, you will +likely want to use `write_json(with_inputs=True)` to get back a cmd_args that +has all the artifacts in the json structure already in its `.hidden`. ### Traversals in depth -Transitive sets form DAGs. Notably, this means individual nodes can -exist more than once in a given transitive set. +Transitive sets form DAGs. Notably, this means individual nodes can exist more +than once in a given transitive set. -When a transitive set is traversed, nodes that have already been visited -are skipped. This means their arguments will only be emitted once. +When a transitive set is traversed, nodes that have already been visited are +skipped. This means their arguments will only be emitted once. For example: @@ -136,25 +136,25 @@ set3 = ctx.actions.tset(MySet, value = "qux", children = [set1, set2]) args = set3.project_as_args("define") ``` -This will expand to `-Dqux -Dfoo -Dbar`, even though `set1` (`"foo"`) -shows up twice in the DAG. +This will expand to `-Dqux -Dfoo -Dbar`, even though `set1` (`"foo"`) shows up +twice in the DAG. ## Other APIs ### Transitive set reductions -You can aggregate values of a transitive set via a reduction. This can -be helpful for tasks such as propagating Boolean flags up the tree. +You can aggregate values of a transitive set via a reduction. This can be +helpful for tasks such as propagating Boolean flags up the tree. Following is a real-world example. -When defining a reduction, you receive the reduced values of all your -children, and an optional value for the current node (the value will -be `None` when you create a set and you don't pass a `value`), and you -need to merge them together to produce this node's value: +When defining a reduction, you receive the reduced values of all your children, +and an optional value for the current node (the value will be `None` when you +create a set and you don't pass a `value`), and you need to merge them together +to produce this node's value: ```starlark -def link_info_has_default_filelist(children: ["bool"], infos: ["LinkInfos", None]): +def link_info_has_default_filelist(children: list[bool], infos: LinkInfos | None): if infos: info = infos.default if info.filelist: @@ -171,16 +171,15 @@ LinkInfosTSet = transitive_set( ### Transitive set iteration -You *can* iterate over a transitive set. This will yield each value -once. You can also iterate over projections. +You _can_ iterate over a transitive set. This will yield each value once. You +can also iterate over projections. -However, note that this is generally not recommended, since unlike -creating and using a projection, this operation is `O(set)`. +However, note that this is generally not recommended, since unlike creating and +using a projection, this operation is `O(set)`. -You should use this as an escape hatch if and only if you need to -implement something transitive sets don't support via projections or -reductions, because in doing so you'll lose a lot of the performance -benefits. +You should use this as an escape hatch if and only if you need to implement +something transitive sets don't support via projections or reductions, because +in doing so you'll lose a lot of the performance benefits. For example: @@ -196,15 +195,14 @@ This will yield `["qux", "foo", "bar"]`. ### Ordering -Transitive set iteration uses a left-to-right, pre-order traversal by -default, and ignores nodes that have already been visited. This order is -reflected in projections as well. +Transitive set iteration uses a left-to-right, pre-order traversal by default, +and ignores nodes that have already been visited. This order is reflected in +projections as well. -A few different traversal orders are supported with the `ordering` -attribute: +A few different traversal orders are supported with the `ordering` attribute: | Ordering | Description | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `preorder` (default) | Traverses using a depth-first-search, visiting nodes left-to-right. | | `postorder` | Traverses children left-to-right, and then visits the current node. | | `topological` | A Topological sort, such that nodes are listed after all nodes that have them as descendants. This is similar to a pre-order traversal, except that when nodes are shared with more than one parent it is returned in the order of its last occurrence. | @@ -236,7 +234,7 @@ flowchart TD ``` | Ordering | Result | -|---------------|-------------------------| +| ------------- | ----------------------- | | `preorder` | `["qux", "foo", "bar"]` | | `postorder` | `["foo", "bar", "qux"]` | | `topological` | `["qux", "bar", "foo"]` | @@ -259,8 +257,15 @@ assert_eq(list(set3.traverse(ordering = "bfs")), ["qux", "foo", "bar"]) ## Implementation details -### Projection evaluation +### Performance + +The performance benefits of tsets arise due to: + +- **Caching**: projections and reductions are cached. +- **Lazy Evaluation**: projection traversals are evaluated lazily. + +### Evaluation -Projections are evaluated eagerly for each node of your transitive set. -This means that if your projection throws an error, you'll find out when -creating a set via `ctx.actions.tset`. +Projections and reductions are evaluated eagerly for each node of your +transitive set. This means that if your projection throws an error, you'll find +out when creating a set via `ctx.actions.tset`. diff --git a/docs/rule_authors/writing_rules.md b/docs/rule_authors/writing_rules.md index 52d64d1a0c9e2..d9bc92f8a5021 100644 --- a/docs/rule_authors/writing_rules.md +++ b/docs/rule_authors/writing_rules.md @@ -3,42 +3,61 @@ id: writing_rules title: Writing Rules --- -This page describes how to write rules for Buck2 and explains the flow for implementing rules that are already defined in Buck1. +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; -For a list of the API functions available, see [Rule APIs](rule_api.md). +This page describes how to write rules for Buck2 and explains the flow for +implementing rules that are already defined in Buck1. +For a list of the API functions available, see the +[Build APIs](../../api/build). + + :::note Rules such as `@fbcode_macros//build_defs:native_rules.bzl buck_genrule` are not actually rules, they are _macros_ (Starlark functions that eventually call out the underlying `genrule` _rule_). Macros in Buck2 are mostly compatible with Buck1 and should be written in the same way. + + ::: ## Workflow by example -The built-in Buck2 rules are stored in `fbsource` in `fbcode/buck2/prelude`. To add a rule for a language, say `pascal`: - -1. Look at [prelude/decls](https://github.com/facebook/buck2/blob/main/prelude/decls/) to see the attributes that are supported in Buck1 and are mirrored into Buck2. If `pascal` was an existing rule, you would see what attributes it takes (often it will be `pascal_library` and `pascal_binary`). +The built-in Buck2 rules are stored in the `prelude` folder in the buck2 repo. +To add a rule for a language, say `pascal`: -2. Create a file at `prelude/pascal.bzl` that will contain your rule implementations. The details are explained later, but a dummy rule looks like the following: +1. Look at + [prelude/decls](https://github.com/facebook/buck2/blob/main/prelude/decls/) + to see the attributes that are supported in Buck1 and are mirrored into + Buck2. If `pascal` was an existing rule, you would see what attributes it + takes (often it will be `pascal_library` and `pascal_binary`). - ```python - def pascal_binary_impl(_ctx: AnalysisContext) -> list[Provider]: - return [DefaultInfo()] - ``` +2. Create a file `pascal.bzl` that will contain your rule implementations. The + details are explained later, but a dummy rule looks like the following: -3. Register that rule in [`prelude/rules_impl.bzl`](https://github.com/facebook/buck2/blob/main/prelude/rules_impl.bzl), which involves adding a `load(":pascal.bzl", "pascal_binary_impl")` at the top and an additional entry in `implemented_rules` section to wire up `pascal_binary = pascal_binary_impl`. + ```python + def pascal_binary_impl(_ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo()] + ``` -4. Create a directory in `fbcode/buck2/tests/targets/rules/pascal` with `TARGETS` and whatever source files and test targets you need to test your project. Note, Apple tests are currently located at `xplat/buck2/tests/apple/...`. +3. Create a directory in `fbcode/buck2/tests/targets/rules/pascal` with + `TARGETS` and whatever source files and test targets you need to test your + project. Note, Apple tests are currently located at + `xplat/buck2/tests/apple/...`. -5. Test your code with `buck2 build fbcode//buck2/tests/targets/rules/pascal:`. They should succeed with no actual output produced. +4. Test your code with `buck2 build fbcode//buck2/tests/targets/rules/pascal:`. + They should succeed with no actual output produced. -6. Now implement the rules (see the rest of this page). +5. Now implement the rules (see the rest of this page). + :::note Before merging a diff, it's important that all your Starlark is warning free (if you don't want to set up Buck2 for local development, test it in CI). If you do set it up locally, see the `README.md` in the root of `fbcode/buck2`. Running `./test.py --lint-only` will confirm your Starlark code is warning free. + + ::: ## Concepts and design -A *rule* for a *target* uses *attributes* to declare *actions*, which produce *artifacts* that get included in *providers*. +A _rule_ for a _target_ uses _attributes_ to declare _actions_, which produce +_artifacts_ that get included in _providers_. For example, given: @@ -59,33 +78,69 @@ pascal_binary = rule(impl = pascal_binary_impl, attrs = { In the above snippet: -* **Rule** is `pascal_binary`, which is implemented by `pascal_binary_impl`. The rule says how to build things. -* **Target** will be something like `fbcode//buck2/tests/targets/rules/pascal:my_binary`. The rule implementation `pascal_binary_impl` will be called once per target. -* **Attributes** are the fields on the target (for example, you might have `out`, which can be accessed via `ctx.attrs.out`). -* **Actions** are declared by the rule with things like `ctx.actions.run`, which takes a command line. Note that the actions are not run by the rule, but declared, so that Buck2 can run them later. -* **Artifacts** represent files on disk, which could be source or build outputs (`binary` in the above example). - * For build outputs, the artifact is produced by an action, and the existence of the artifact does not imply the build has been run: the artifact 'remembers' what should be run if it is required. -* **Providers** are returned, which is information that other rules get to use. These will often contain artifacts. - -The rule implementation takes in a `ctx`, which is the rule context. The two most important fields are `ctx.attrs`, which picks up the attributes declared by the rule, and `ctx.actions`, which lets you create new actions to actually do something. - -The output of any actions performed will be materialized in `buck-out`. However, only the defined outputs of providers are available for dependent rules to consume and only the actions necessary to produce those outputs being consumed will be run. By default, the `default_output` of the `DefaultInfo` provider is built and output during a `buck build`. +- **Rule** is `pascal_binary`, which is implemented by `pascal_binary_impl`. The + rule says how to build things. +- **Target** will be something like + `fbcode//buck2/tests/targets/rules/pascal:my_binary`. The rule implementation + `pascal_binary_impl` will be called once per target. +- **Attributes** are the fields on the target (for example, you might have + `out`, which can be accessed via `ctx.attrs.out`). +- **Actions** are declared by the rule with things like `ctx.actions.run`, which + takes a command line. Note that the actions are not run by the rule, but + declared, so that Buck2 can run them later. +- **Artifacts** represent files on disk, which could be source or build outputs + (`binary` in the above example). + - For build outputs, the artifact is produced by an action, and the existence + of the artifact does not imply the build has been run: the artifact + 'remembers' what should be run if it is required. +- **Providers** are returned, which is information that other rules get to use. + These will often contain artifacts. + +The rule implementation takes in a `ctx`, which is the rule context. The two +most important fields are `ctx.attrs`, which picks up the attributes declared by +the rule, and `ctx.actions`, which lets you create new actions to actually do +something. + +The output of any actions performed will be materialized in `buck-out`. However, +only the defined outputs of providers are available for dependent rules to +consume and only the actions necessary to produce those outputs being consumed +will be run. By default, the `default_output` of the `DefaultInfo` provider is +built and output during a `buck build`. ### Providers -Providers are the data returned from a rule and are the only way that information from this rule is available to rules that depend on it. Every rule must return at least the `DefaultInfo` provider, but most will also return either `RunInfo` (because they are executable) or some custom provider (because they are incorporated into something that is ultimately executable). +Providers are the data returned from a rule and are the only way that +information from this rule is available to rules that depend on it. Every rule +must return at least the `DefaultInfo` provider, but most will also return +either `RunInfo` (because they are executable) or some custom provider (because +they are incorporated into something that is ultimately executable). -The `DefaultInfo` provider has a field `default_output`, which is the file that will be built when someone executes a `buck2 build` on this particular target, and the file that will be used when someone runs `$(location target)` or uses it as a source file (such as `srcs = [":my_target"]`.) +The `DefaultInfo` provider has a field `default_output`, which is the file that +will be built when someone executes a `buck2 build` on this particular target, +and the file that will be used when someone runs `$(location target)` or uses it +as a source file (such as `srcs = [":my_target"]`.) -The current rule of thumb is that if you can build the `default_output`, the rule must 'work', and, if usable, should be 'ready'. For example, for a binary, the executable and runtime libraries it depends on might be returned. For a library, because neither the static or dynamic library is the 'default', you merely have to do enough work to ensure that the static and dynamic library probably work. +The current rule of thumb is that if you can build the `default_output`, the +rule must 'work', and, if usable, should be 'ready'. For example, for a binary, +the executable and runtime libraries it depends on might be returned. For a +library, because neither the static or dynamic library is the 'default', you +merely have to do enough work to ensure that the static and dynamic library +probably work. -Similar to how `DefaultInfo` wraps a list of artifacts and `$(location)` selects from `DefaultInfo`, `RunInfo` wraps a command line and `$(exe)` selects from `RunInfo`. +Similar to how `DefaultInfo` wraps a list of artifacts and `$(location)` selects +from `DefaultInfo`, `RunInfo` wraps a command line and `$(exe)` selects from +`RunInfo`. For more information about command lines, see [Run action](#run-action), below. -For libraries, usually you need to pass some information about the library up to the binary. The *only* information that dependents on the library get are the providers, so designing the information that flows around the provider is critical to designing good rules. +For libraries, usually you need to pass some information about the library up to +the binary. The _only_ information that dependents on the library get are the +providers, so designing the information that flows around the provider is +critical to designing good rules. -For a hypothetical rule, you may decide you want the name of the library and the artifact that represents the `.so` file, for which you could define the following provider: +For a hypothetical rule, you may decide you want the name of the library and the +artifact that represents the `.so` file, for which you could define the +following provider: ```python PascalLibraryInfo = provider(fields=[ @@ -101,7 +156,12 @@ Often, you'll grab your dependencies from all your providers: my_deps = [x[PascalLibraryInfo] for x in ctx.attrs.deps] ``` -In many cases, it becomes apparent you need the transitive closure of all libraries (for example, the libraries and everything they depend upon), in which case, the standard pattern is to move to a provider of a list of `record` (see the [types.md](https://github.com/facebookexperimental/starlark-rust/blob/main/docs/types.md) document in GitHub) and the `flatten/dedupe` functions, defining it as: +In many cases, it becomes apparent you need the transitive closure of all +libraries (for example, the libraries and everything they depend upon), in which +case, the standard pattern is to move to a provider of a list of `record` (see +the +[types.md](https://github.com/facebook/starlark-rust/blob/main/docs/types.md) +document in GitHub) and the `flatten/dedupe` functions, defining it as: ```python PascalLibraryInfo = provider(fields=["links"]) # a list of LinkData @@ -116,7 +176,9 @@ my_links = dedupe(flatten([x[PascalLibraryInfo].links for x in ctx.attrs.deps])) my_info = PascalLibraryInfo(links = my_links) ``` -However, this `flatten`/`dupe` pattern can get expensive, especially when you have a deep dependency graph. To fix that it's recommended to use [transitive sets](transitive_sets.md). +However, this `flatten`/`dupe` pattern can get expensive, especially when you +have a deep dependency graph. To fix that it's recommended to use +[transitive sets](transitive_sets.md). ### Actions @@ -124,9 +186,12 @@ There are several actions you can use to create symlink trees, and so on. #### Run action -Of the various actions, the `run` action is by far the most important: it's the one that invokes a command line. +Of the various actions, the `run` action is by far the most important: it's the +one that invokes a command line. -A command line is both a list of string arguments and a list of artifacts they depend on; with syntactic niceties for adding artifacts to command lines in a way that ensures the dependencies are usually correct. +A command line is both a list of string arguments and a list of artifacts they +depend on; with syntactic niceties for adding artifacts to command lines in a +way that ensures the dependencies are usually correct. Following are examples of command line manipulations: @@ -139,9 +204,16 @@ cmd.add(out.as_output()) ctx.actions.run(cmd) ``` -The action `declare_output` creates a new artifact which is not bound to anything. You can call `.as_output()` on it when adding it to a command line to say that this command line doesn't take the artifact as an input but produces it as an output. +The action `declare_output` creates a new artifact which is not bound to +anything. You can call `.as_output()` on it when adding it to a command line to +say that this command line doesn't take the artifact as an input but produces it +as an output. -From now on, if `out` is used as a dependency (either to another command line, or in `DefaultInfo`) then the action will be run to produce that artifact. Typically, these outputs are declared (`declare_output`), bound in a `ctx.actions.run` call with `.as_output()`, then either used locally as the input to another action or returned in a provider. +From now on, if `out` is used as a dependency (either to another command line, +or in `DefaultInfo`) then the action will be run to produce that artifact. +Typically, these outputs are declared (`declare_output`), bound in a +`ctx.actions.run` call with `.as_output()`, then either used locally as the +input to another action or returned in a provider. As another example: @@ -150,36 +222,56 @@ cmd = cmd_args(["cp", input, output.as_output()]) ctx.actions.run(cmd) ``` -A command provides both a string (what to write when used) and a list of artifacts (what must be available when used). Normally, as in the case above, the artifacts that are used correspond to those on the command line. But imagine the rule is changed to write the command to a shell script first: +A command provides both a string (what to write when used) and a list of +artifacts (what must be available when used). Normally, as in the case above, +the artifacts that are used correspond to those on the command line. But imagine +the rule is changed to write the command to a shell script first: ```python sh = ctx.actions.write("test.sh", ["cp", input, output]) -cmd = cmd_args(["sh",sh]) -cmd.hidden([input, output.as_output()]) +cmd = cmd_args(["sh",sh],hidden=[input, output.as_output()]) ctx.actions.run(cmd) ``` -The command has been written to a shell script, which is now run. Beforehand, all the artifacts used by the command appeared on the command line. Now they don't. However, the shell script still accesses input and output. To inform the run command, use the hidden field of the command line to declare the dependency. +The command has been written to a shell script, which is now run. Beforehand, +all the artifacts used by the command appeared on the command line. Now they +don't. However, the shell script still accesses input and output. To inform the +run command, use the hidden field of the command line to declare the dependency. -For more complicated actions, which perform meaningful logic beyond invoking a simple command, the tendency is to write custom Python scripts. Python scripts are used instead of shell scripts as they have better cross-platform compatibility and fewer hidden corners (especially in error paths). +For more complicated actions, which perform meaningful logic beyond invoking a +simple command, the tendency is to write custom Python scripts. Python scripts +are used instead of shell scripts as they have better cross-platform +compatibility and fewer hidden corners (especially in error paths). -As an example of a Python helper, see [make_comp_db.py](https://github.com/facebook/buck2/blob/main/prelude/cxx/tools/make_comp_db.py). +As an example of a Python helper, see +[make_comp_db.py](https://github.com/facebook/buck2/blob/main/prelude/cxx/tools/make_comp_db.py). -A further advantage of using Python is that these commands can be tested in isolation, outside of Buck2. +A further advantage of using Python is that these commands can be tested in +isolation, outside of Buck2. ## Debugging -The functions `fail`, `print` and `pprint` are your friends. To get started, a `buck2 build fbcode//buck2/tests/targets/rules/pascal:` builds everything or `buck2 run fbcode//buck2/tests/targets/rules/pascal:my_binary` runs a specific binary that returns a `RunInfo`. +The functions `fail`, `print` and `pprint` are your friends. To get started, a +`buck2 build fbcode//buck2/tests/targets/rules/pascal:` builds everything or +`buck2 run fbcode//buck2/tests/targets/rules/pascal:my_binary` runs a specific +binary that returns a `RunInfo`. ## Testing Rules -A common way to test is to use `genrule` to cause the produced binary to run and assert some properties from it. If your rule is in Buck1 and Buck2, use a `TARGETS` file so you can test with both. If your tests are incompatible with Buck1 (such as if it is a new rule), use `TARGETS.v2`, which will only be seen by Buck2 and won't cause errors with Buck1. +A common way to test is to use `genrule` to cause the produced binary to run and +assert some properties from it. If your rule is in Buck1 and Buck2, use a +`TARGETS` file so you can test with both. If your tests are incompatible with +Buck1 (such as if it is a new rule), use `TARGETS.v2`, which will only be seen +by Buck2 and won't cause errors with Buck1. ## New rules -If your rule is **not** already in Buck1, then you can define it wherever you like, with a preference for it not being in `fbcode/buck2/prelude`. +If your rule is **not** already in Buck1, then you can define it wherever you +like, with a preference for it not being in `fbcode/buck2/prelude`. -The only advantage of the `prelude` is that rules can be used without a corresponding `load`, which is generally considered a misfeature. The attributes should usually be placed adjacent to the rule itself. +The only advantage of the `prelude` is that rules can be used without a +corresponding `load`, which is generally considered a misfeature. The attributes +should usually be placed adjacent to the rule itself. As an example, just below the `pascal_binary_impl` function, you could write: diff --git a/docs/users/advanced/deferred_materialization.md b/docs/users/advanced/deferred_materialization.md index fdd8fb1f092eb..00d73ff622316 100644 --- a/docs/users/advanced/deferred_materialization.md +++ b/docs/users/advanced/deferred_materialization.md @@ -3,41 +3,55 @@ id: deferred_materialization title: Deferred Materialization --- -When using [Remote Execution](../remote_execution.md), Buck2 can optionally operate with Deferred Materialization, which means that Buck2 will avoid downloading outputs until they are required by a local action. +import { OssOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; -This can provide very substantial performance savings on builds that execute primarily on Remote Execution, since those builds become able to proceed without ever downloading any intermediary outputs. +When using [Remote Execution](../remote_execution.md), Buck2 can optionally +operate with Deferred Materialization, which means that Buck2 will avoid +downloading outputs until they are required by a local action. -At Meta, despite very fast networks being used internally, this was was observed to make real-world builds finish approximately 2.5 times faster. +This can provide very substantial performance savings on builds that execute +primarily on Remote Execution, since those builds become able to proceed without +ever downloading any intermediary outputs. +At Meta, despite very fast networks being used internally, this was was observed +to make real-world builds finish approximately 2.5 times faster. ## Pitfalls -Buck2's deferred materialization makes assumptions about your Remote Execution backend. In particular, it expects that the TTL returned from action cache entries by your Remote Execution backend always exceeds the TTL of all output artifacts it references. +Buck2's deferred materialization makes assumptions about your Remote Execution +backend. In particular, it expects that the TTL returned from action cache +entries by your Remote Execution backend always exceeds the TTL of all output +artifacts it references. -Nonetheless, artifacts may also eventually expire from your Remote Execution backend. When that happens, builds using Deferred Materialization may fail if those artifacts are needed locally. +Nonetheless, artifacts may also eventually expire from your Remote Execution +backend. When that happens, builds using Deferred Materialization may fail if +those artifacts are needed locally. -A kill is necessary to recover from those builds. However, the [Restarter](restarter.md) can be used to mitigate this issue by restarting Buck when it encounters an expired artifact. +A kill is necessary to recover from those builds. However, the +[Restarter](restarter.md) can be used to mitigate this issue by restarting Buck2 +daemon when it encounters an expired artifact. At Meta, artifacts get periodically refreshed, but open source RE backends do not expose the TTL of artifacts, so this feature does not work outside of Meta. - ## Enabling Deferred Materialization -To enable deferred materialization, add this to your Buckconfig: +To enable deferred materialization, add this to your +[Buckconfig](../../concepts/buckconfig.md): ``` [buck2] materializations = deferred ``` - ## On-disk state -Buck2 can also optionally track its state on disk in a SQLite database. This allows Buck2 to remember what files are on disk across restarts. +Buck2 can also optionally track its state on disk in a SQLite database. This +allows Buck2 to remember what files are on disk across restarts. -This can allow Buck2 to avoid re-downloading outputs from your Remote Execution backend if they are already on disk. +This can allow Buck2 to avoid re-downloading outputs from your Remote Execution +backend if they are already on disk. To enable, add this to your Buckconfig: @@ -46,10 +60,10 @@ To enable, add this to your Buckconfig: sqlite_materializer_state = true ``` - ## Deferring Write Actions -To further speedup builds, Buck2 can also be instructed to not execute any writes on the critical path for a build. +To further speedup builds, Buck2 can also be instructed to not execute any +writes on the critical path for a build. To enable, add this to your Buckconfig: @@ -58,11 +72,47 @@ To enable, add this to your Buckconfig: defer_write_actions = true ``` -This mechanism is recommended if you're using the On-disk State, since it means Buck can omit writes entirely if the same content is already on disk. - +This mechanism is recommended if you're using the On-disk State, since it means +Buck can omit writes entirely if the same content is already on disk. ## `buck2 clean --stale` -When enabling the on-disk state, Buck2 can also optionally delete only artifacts that were not used recently. This also requires enabling deferred write actions. +The deferred materializer can be configured to continuously delete stale +artifacts, that haven't been recently accessed, or untracked artifacts, that +exist in buck-out but not in the materalizer state. + +Unlike `buck2 clean` this does not fully wipe buck-out but it should not +negatively impact build performance if you are building and rebasing regularly. + +Enabling this requires enabling [on-disk state](#on-disk-state) and +[deferred write actions](#deferring-write-actions), and adding this to your +Buckconfig: + +``` +[buck2] +clean_stale_enabled = true +``` + +It can be further configured by changing these default values: + +``` +[buck2] +# one week +clean_stale_artifact_ttl_hours = 24 * 7 +clean_stale_period_hours = 24 +clean_stale_start_offset_hours = 12 +``` + +- `clean_stale_start_offset_hours` determines the time following daemon start up + before the first clean will be scheduled. +- `clean_stale_period_hours` determines how frequently to schedule recurring + clean events. +- `clean_stale_artifact_ttl_hours` determines how long artifacts should be kept + in buck-out before cleaning them. + +If clean stale is running in the background at the same time that a build begins +to materialize artifacts, the clean will be interrupted and not run again until +after the next scheduled period, but it should be able to make gradual progress +and prevent long term accumulation of artifacts. -You can use this mechanism via `buck2 clean --stale`. +If needed, a clean can be manually triggered by calling `buck2 clean --stale`. diff --git a/docs/users/advanced/external_cells.md b/docs/users/advanced/external_cells.md new file mode 100644 index 0000000000000..b6d863c9ae182 --- /dev/null +++ b/docs/users/advanced/external_cells.md @@ -0,0 +1,100 @@ +--- +id: external_cells +title: External Cells +--- + +Normally, buck2 requires source files to be checked into the repo. However, this +is sometimes inconvenient. It makes distribution of the prelude hard, and users +may want to pull in third party dependencies without vendoring them or using +source control tricks. + +To help support these use cases, buck2 has a concept of "external cells." +External cells act much like [normal cells], except that instead of having their +source files checked into the repo, the source files have some alternative +origin. + +[normal cells]: ../../concepts/buckconfig.md/#cells + +## Setting up an external cell + +Configuring an external cell looks much like configuring a regular cell. First, +add the cell to the `cells` section of your `.buckconfig` like normal: + +``` +[cells] + prelude = some/path +``` + +The external cell's files won't actually be generated in the repo. However, you +still need to provide a path for it - this path influences the handling of tree +files, since those cross cell boundaries. It's also used for +`expand-external-cells`, more on that below. + +Next, add an entry to the `external_cells` buckconfig section that specifies the +"origin" of the external cell given an alias. This tells buck2 where you want to +get the cell from, if not files in the source repo. + +``` +[external_cells] + prelude = bundled +``` + +For the `bundled` origin, that's it. Other origins may require additional +configuration. + +## Origins + +Buck2 currently supports two external cell origins, `bundled` and `git`. + +### The `bundled` origin + +The bundled origin can only be used with the `prelude` cell, and provides access +to a copy of the prelude that is bundled as part of the buck2 binary. This is +useful as an easier-to-install alternative to vendoring or submoduling the +prelude. + +### The `git` origin + +The `git` origin indicates that an external cell's content should be loaded from +some git repo. It accepts two additional configuration parameters, `git_origin` +and `commit`, like this: + +``` +[cells] + root = . + libfoo = libfoo + +[external_cells] + libfoo = git + +[external_cell_libfoo] + git_origin = https://github.com/facebook/foo + commit_hash = +``` + +The `commit_hash` value must be a sha1, it cannot be eg a branch name. + +## Expanding external cells + +Because external cells only represent a different way to access source files, +buck2 provides an `expand-external-cell` command. This command will make a copy +of the external cell into the path in the repo you specified for your cell. By +commenting out the `external_cells` buckconfig entry, this allows you to make +direct edits to the cell's files in your repo. + +## Details & Limitations + +- External cells can only be configured in the project root's `.buckconfig`. + This also means that there is no support for "transitive" external cells, ie + an external cell cannot specify additional external cells to pull in. +- External cells cannot have nested cells inside them. +- The `cells` buckconfig section of external cells is ignored. This is done to + ensure that when using an external cell to access some dependency in a git + repo, that git repo can still be an independently building project that + specifies its own toolchain and prelude configuration. + + Because of this difference between external and non-external cells, it's + possible that running `buck2 expand-external-cell` may not produce a working + cell immediately, but instead require you to delete the `cells` section first. + + `cell_aliases` still work just like with regular cells. diff --git a/docs/users/advanced/in_memory_cache.md b/docs/users/advanced/in_memory_cache.md index 27a86d61545ff..123073e6bd5ee 100644 --- a/docs/users/advanced/in_memory_cache.md +++ b/docs/users/advanced/in_memory_cache.md @@ -3,11 +3,16 @@ id: in_memory_cache title: In Memory Cache --- -Buck2 can maintain an in-memory cache of actions it executed. This allows actions to skip re-running even when they are (transitively) affected by file changes. +Buck2 can maintain an in-memory cache of actions it executed. This allows +actions to skip re-running even when they are (transitively) affected by file +changes. ## Enabling the in-memory cache -This feature requires enabling [Deferred Materialization](deferred_materialization.md) first. This is necessary so that Buck2 knows what's on disk. This requirement might go away once we decouple keeping track of what's on disk and deferred materialization. +This feature requires enabling +[Deferred Materialization](deferred_materialization.md) first. This is necessary +so that Buck2 knows what's on disk. This requirement might go away once we +decouple keeping track of what's on disk and deferred materialization. Once done, to enable, add this to your Buckconfig: diff --git a/docs/users/advanced/restarter.md b/docs/users/advanced/restarter.md index 1876141e2a423..e0b9c57e746b2 100644 --- a/docs/users/advanced/restarter.md +++ b/docs/users/advanced/restarter.md @@ -3,9 +3,13 @@ id: restarter title: Restarter --- -The Restarter can automatically restart Buck2 when Buck2 detects that it hit a condition that may be recovered by restarting the Buck2 daemon. +The Restarter can automatically restart Buck2 when Buck2 detects that it hit a +condition that may be recovered by restarting the Buck2 daemon. -This is particularly useful with [Deferred Materialization](deferred_materialization.md), which may require a daemon restart if your daemon holds references to artifacts that have expired in your Remote Execution backend. +This is particularly useful with +[Deferred Materialization](deferred_materialization.md), which may require a +daemon restart if your daemon holds references to artifacts that have expired in +your Remote Execution backend. ## Enabling the Restarter diff --git a/docs/users/build_observability/build_report.md b/docs/users/build_observability/build_report.md new file mode 100644 index 0000000000000..b8b34a3c59b06 --- /dev/null +++ b/docs/users/build_observability/build_report.md @@ -0,0 +1,218 @@ +--- +id: build_report +title: Build Report +--- + +The build report is a JSON file that you can ask buck to output which contains +structured information about the result of your build. It is particularly +valuable for its reporting of _unsuccessful_ outcomes in addition to +_successful_ ones; usually, most use cases that only need to care about +successful outcomes are well served by direct usage of the CLI. + +To request a build report, pass `--build-report ` to `buck build` on the +CLI. + +At a high level, the build report outputs information for each of the targets +that you requested to have built on the CLI. As a result, it may report +information for more than one configuration or subtarget of a target. For +example, this can happen if you passed `--target-platforms` or built `:target` +and `:target[sub]`. + +## Schema + +``` +BuildReport { + # A unique ID identifying this buck invocation. Currently a UUID, however + # that may change in the future. + trace_id: str, + + # True if all requested targets built successfully + success: bool, + + # The absolute path to the project root + project_root: Path, + + # The results of the build, categorized by unconfigured target + results: dict[TargetLabel, BuildReportEntry], + + # A cache for error message lookup. This is meant for deduplicating strings + # that might otherwise appear many times in the build report and cause an + # unnecessary size increase. They keys are used in other fields in the build + # report in reference to these strings. + strings: dict[str, str], + + # BUCK1 BACKCOMPAT ONLY! + # + # Currently always empty. Will be filled in if a flag is passed in the future. + # + # A map from targets that failed to build to error messages describing the + # failure. + failures: dict[TargetLabel, str], +} + +BuildReportEntry { + # The results of building the target in the given configurations + configured: dict[Configuration, ConfiguredBuildReportEntry], + + # Errors encountered while building this target. + # + # Note that this does not include the errors that are found within the + # `ConfiguredBuildReportEntry`s. Instead, it includes additional errors + # which could not be associated with a specific configuration of the + # target, typically because they occurred before the target could be + # configured. + errors: list[Error], + + # BUCK1 BACKCOMPAT ONLY! + # + # The two fields below are included for buck1 backwards compatibility only. + # They are both computed by aggregating across all the configured targets in + # the way you might expect. + success: "FAIL" | "SUCCESS, + outputs: dict[str, list[Path]], + + # The path to the package containing this target, relative to the project + # root. This is the source code location for this target. + package_project_relative_path: Optional[str] +} + +ConfiguredBuildReportEntry { + # Did this target build successfully or not? + success: "FAIL" | "SUCCESS, + + # A map of subtargets that were built to a list of the successfully built + # outputs for that subtarget. + # + # The keys are generated by joining the subtargets with a `|`. For example, + # if you request to have `:target` and `:target[foo][bar]` built on the CLI, + # this list will contain one entry for `""` and one for `"foo|bar"`. + outputs: dict[str, list[Path]], + + # The number of targets in the configured dependency graph of this target. + # + # This is only included if `-c buck2.log_configured_graph_size=true` is set. + # Otherwise, it is left as None. + configured_graph_size: Optional[uint], +} + +Error { + # The stringified hash of the same stringified error message that is shown to the user on the + # console. The hash is stored as the key in the `strings` cache of the `BuildReport` + message_content: str, + + # Structured action error. Present only if the error was actually an action error + action_error: Optional[ActionError], + + # An index that can be used to detect duplicate errors. Two errors with the + # same cause index have the same cause. Note that that does not mean that + # they have the same error message. + cause_index: uint, +} + +ActionError { + # The action key + key: ActionKey, + + # The action name + name: ActionName, + + # Digest of the action + digest: str, + + # Stringified hash of the stderr of the action + stderr: str, + + # Stringified hash of the stdout of the action + stdout: str, + + # Stringified hash of the same stringified error message that is provided by the action + error: str, + + # Optional list of error categorizations provided by an error handler which is invoked + # in the event of a failed action, or an error message if the error handler failed. + error_diagnostics: Optional[ActionErrorDiagnostics], +} + +ActionKey { + # The configured target, anon target, or bxl function which owns this action + owner: str, +} + +ActionName { + # The category of the action + category: str, + + # The optional identifier of the action + identifier: Optional[str], +} + +enum ActionErrorDiagnostics { + # The list of sub errors if the error handler succeeded + sub_errors: list[ActionSubError], + + # The stringified hash of the error message if the error handler failed + handler_invocation_error: String, +} + +ActionSubError { + # Name of the error category. The category should be finer grain error categorizations + # provided by the rule authors, and tend to be language specific. These should not be + # any kind of shared concepts among all errors for all languages/rules. For example, + # timeouts and infra errors should not go here - buck2 tries to categorize these types + # of errors automatically. An example of a finer grain error category may be the error + # code for rustc outputs. + category: str, + + # The stringified hash of the extra message provided for the specific sub-error category. + message_content: str, + + # List of error locations, if any + locations: Optional[list[ActionErrorLocation]], +} + +ActionErrorLocation { + # File path where the error appeared, preferrably either project-relative or absolute. + file: str, + + # Optional line number + line: Optional[u64] +} +``` + +### On Compatibility + +The format of the build report is generally stable. However, note that new +fields may be added at any time, and you should ensure this does not cause your +parsing to fail. + +A number of fields above are marked as being for buck1 backwards compatibility +only. These fields all have superior alternatives available in the build report +already. We would strongly prefer that new code neither use nor parse them, as +this increases the likelyhood that they can be removed one day. + +The build report additionally outputs a few fields that are intentionally not +documented here. Those fields are even less useful than ones documented as being +for backwards compatibility only, and even closer to removal. **Please** avoid +using or parsing these if at all possible. + +### Limitations + +The build report currently has at least the following limitations: + +1. It includes only one action error per failed target. This is the expected + behavior when `--keep-going` is not passed, but when `--keep-going` is + passed, this is a bug. +1. It is currently not generated when a non-existant package is specified on + the command line. This is also a bug. +1. It cannot be requested for any buck2 command other than `build` +1. Errors do not contain any additional metadata outside of the error message. + This will be made available as such metadata is available in buck2. +1. The "failures" field is always empty. This will be changed under a + backcompat opt-in flag in the future. + +Finally, it's worth raising that the concept of error deduplication has some +fundamental limitations; if two targets both refer to the same non-existant +dependency, do those errors have the same cause (the dependency doesn't exist) +or different causes (each target is individually broken)? As a result, the exact +details of when two errors are considered to have the same cause are not +generally stable, and may not always be what you expect. diff --git a/docs/users/build_observability/interactive_console.md b/docs/users/build_observability/interactive_console.md index bdfacc7a9cee7..79d9c2127c328 100644 --- a/docs/users/build_observability/interactive_console.md +++ b/docs/users/build_observability/interactive_console.md @@ -1,14 +1,86 @@ --- id: interactive_console -title: Buck2 Interactive Console +title: Buck2 Consoles --- -This will work as long as stdin is a TTY, which will be true most of the time if you're not piping anything into Buck2. +Buck2 offers several console types for build-like commands (e.g. `build`, +`install`, `test`, etc.). The console is always written to stderr. -To see what's available you can press `?`. +The console can be specified via the `--console` flag, or the `BUCK_CONSOLE` env +variable. The default console type is `auto`. Supported `--console` types: -To disable to allow alternate use of stdin, or for follow up pasted commands to not get swallowed: +- `auto` - Default console type. Auto defaults to the superconsole if the stderr + is a TTY. Otherwise will uses simple console +- `simple` - Build a simpleconsole with TTY, if TTY is supported by the OS. See + [Simpleconsole](#simpleconsole) +- `simplenotty` - Build a simpleconsole without TTY. See + [Simpleconsole](#simpleconsole) +- `simpletty` - Build a simpleconsole with TTY. See + [Simpleconsole](#simpleconsole) +- `super` - Build a superconsole regardless of whether stderr is a TTY. See + [Superconsole](#superconsole) +- `none` - See [No console](#no-console) -Environment Variable: `BUCK_NO_INTERACTIVE_CONSOLE` or flag: `--no-interactive-console` +If `simplenotty` or `none` are specified, or if TTY is not supported by the OS, +then we strip out any color within the error messages. + +All console options will output the build result, whether succeeded or not, to +stdout. Note that action execution stderr is hidden if the build succeeded. + +The simple and superconsole will also print metadata about the build itself, +such as the Buck2 UUID, the percentage of cache hits, and the number of action +commands ran. In addition, they will print the event spans detected within the +build. + +## Simpleconsole + +The simpleconsole prints the stdout/stderr messages and event spans, line by +line. There is no resource usage telemetry emitted. + +### Demo + +![Simpleconsole running a build](simpleconsole.gif) + +## Superconsole + +The superconsole uses the +[superconsole](https://github.com/facebookincubator/superconsole) library to +provide an interactive console which shows the event spans going on within +Buck2. + +### Demo + +![Superconsole running a build](superconsole.gif) + +### Toggles + +The superconsole also provides several toggles to inspect ongoing Buck2 +telemetry. + +To see what's available you can press `?` or `h`. This will work as long as +stdin is a TTY, which will be true most of the time if you're not piping +anything into Buck2. To disable to allow alternate use of stdin, or for follow +up pasted commands to not get swallowed, you can set the +`BUCK_NO_INTERACTIVE_CONSOLE` environment variable, or use the flag +`--no-interactive-console`. + +We support the following toggles: + +- `c` - toggle commands (shown in superconsole by default) +- `d` - toggle DICE key states +- `e` - toggle debugging events, such as spans and instant event counts +- `2` - toggle two lines mode when showing events +- `r` - toggle detailed remote execution info, such as uploads, downloads, and + action cache calls +- `i` - toggle I/O counters +- `p` - display target configurations +- `+` - show more lines +- `-` - show fewer lines +- `h` - show help Note: Not available yet for Windows + +## No console + +When specifying the `none` console type, Buck2 will only print if the build +succeeded, or the error if the build failed. diff --git a/docs/users/build_observability/logging.md b/docs/users/build_observability/logging.md index de69ba856dc58..d4b651827c10b 100644 --- a/docs/users/build_observability/logging.md +++ b/docs/users/build_observability/logging.md @@ -3,6 +3,158 @@ id: logging title: Logging --- -Buck2 produces detailed event logs for each invocation. They follow a schema outlined in `data.proto`. +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; -Those logs can be accessed using commands under `buck2 log`. +Buck2 produces detailed event logs for each invocation, which follow a schema +outlined in `app/buck2_data/data.proto` in the buck2 parent directory. The event +logs that Buck2 produces automatically are always in protobuf zstd-compressed +format (see [Viewing the event log](#viewing-the-event-log) for more details). + +## Event log format + +Warning: the schemas are all subject to change, so we do not recommend relying +on the format. For the source of truth, take a look at `data.proto`. + +### Invocation header + +The first line of the event log is always the `Invocation` header: + +```python +Invocation { + # CLI args split into a list of strings + command_line_args: List[str], + # Expanded CLI args, which expand any argsfiles + expanded_command_line_args: List[str], + # Absolute path of the current working directory of the Buck2 command + working_dir: str, + # UUID of the Buck2 command + trace_id: str, +} +``` + +### Command result footer + +The last line is always the `CommandResult`: + +```python +Result { + # One of the result types of CommandResult protobuf type in data.proto + result: BuildResponse | CqueryResponse | BxlResponse | ..., +} +``` + +### Buck events + +The rest of the event log contain `BuckEvent`s, which are either +`SpanStartEvent`s, `SpanEndEvent`s, or `InstantEvent`s. + +The `BuckEvent` format is roughly as follows: + +```python +Event { + # When the event was fired. This is always a 2-item list, where the first + # value is millis, second value is micros + timestamp: List[u64], + # UUID of the Buck2 command, same one as the invocation header + trace_id: str, + # A trace-unique 64-bit integer identifying this event's span ID, + # if this event begins a new span or belongs to one. + span_id: u64, + # A trace-unique 64-bit identifying the span that this event is logically + # parented to. + parent_id: u64, + # See sections below for more details + data: SpanStart | SpanEnd | Instant, +} +``` + +#### Span starts + +The `SpanStartEvent` indicates that a span of work starting: + +```python +SpanStart { + # One of the data types of SpanStartEvent protobuf type in data.proto + data: AnalysisStart | ActionExecutionStart | ..., +} +``` + +#### Span ends + +The `SpanEndEvent` indicates that a span of work has finished: + +```python +SpanEnd { + # Duration of the span + duration_us: u64, + # CPU poll times for this span + stats: SpanStats, + # One of the data types of SpanEndEvent protobuf type in data.proto + data: AnalysisEnd | ActionExecutionEnd | ..., +} + +# CPU poll times for this span +SpanStats { + max_poll_time_us: u64, + total_poll_time_us: u64, +} +``` + +#### Instant events + +The `InstantEvent` represents a single point in time: + +```python +InstantEvent { + # One of the data types of InstantEvent protobuf type in data.proto + data: ConsoleMessage | ActionError | ..., +} +``` + +One specific instant event type that may be of interest is the `SnapShot` event, +which includes some interesting details like RSS, CPU, I/O, remote execution, +and DICE metrics. + +## Viewing the event log + +Event logs can be accessed using commands under `buck2 log show`, which outputs +the event logs in JSONL format. You can run `buck2 log show --help` to see all +available options. Some useful commands: + +- Show the logs for the most recent Buck2 command: + +```sh +buck2 log show +``` + +- Show the logs for a specific Buck2 command, given the command's UUID: + +```sh +buck2 log show --trace-id +``` + +- Show the logs for a recent Buck2 command: + +```sh +buck2 log show --recent +``` + + + +You can also download the logs locally from Buck2 UI. The logs will be +downloaded from Manifold in protobuf zstd-compressed format, and you can view +them in JSONL format by passing the path into `buck2 log show`. + + +The JSON schema is derived from the protobuf types, and the log itself could be +quite large. [jq](https://jqlang.github.io/jq/) can be useful to find specific +things. For example, this jq script shows the max event delay between a snapshot +event creation on the daemon side, and when the client receives it. + +```sh +buck2 log show | jq -s ' + map( + .Event.data.Instant.data.Snapshot.this_event_client_delay_ms + | select(. != null) + ) | max' +``` diff --git a/docs/users/build_observability/simpleconsole.gif b/docs/users/build_observability/simpleconsole.gif new file mode 100644 index 0000000000000..079ae7038d5a5 Binary files /dev/null and b/docs/users/build_observability/simpleconsole.gif differ diff --git a/docs/users/build_observability/superconsole.gif b/docs/users/build_observability/superconsole.gif new file mode 100644 index 0000000000000..f04a970e67d0e Binary files /dev/null and b/docs/users/build_observability/superconsole.gif differ diff --git a/docs/users/cheatsheet.md b/docs/users/cheatsheet.md new file mode 100644 index 0000000000000..05d911ea7f7c5 --- /dev/null +++ b/docs/users/cheatsheet.md @@ -0,0 +1,129 @@ +--- +id: cheat_sheet +title: Cheat Sheet +--- + +# Buck2 Cheat Sheet + +This section provides example command lines that you can use to obtain +information about Buck2 and about your build. These techniques can help you to +understand how your build works and to troubleshoot issues with your build. +These examples use the [`buck2 cquery`](../query/cquery) command. We recommend +cquery over uquery in most cases because cquery operates on the configured +graph, which means that targets have had the expected configurations applied on +them. + +--- + +- How do I find all the targets for a package? +- How do I specify more than one target to `buck2 cquery`? +- How do I get the attribute names and values for the targets that result from a + query? +- How do I perform a query inside of a rule? +- How do I find the dependencies for a target, that is, the targets on which a + specified target depends? +- How do I find the reverse-dependencies for a target, that is, the targets that + depend on a specified target? +- How do I find the build file that contains the target that owns a source file? + +--- + +### How do I find all the targets for a package? + +Specify a _build target pattern_ that represents the targets in the package. + +``` +buck2 cquery //path/to/dir/... +``` + +The `buck2 cquery` command can accept a +[build target pattern](../../concepts/target_pattern) as a parameter. If you +specify a build target pattern, Buck2 evaluates this pattern and shows all the +build targets that match it. + +### How do I specify more than one target to `buck2 cquery`? + +Use the `buck2 cquery set()` operator. The following command line returns the +target `main` in the build file in the root of the Buck2 project and all the +targets from the build file in the `myclass` subdirectory of the root. + +``` +buck2 cquery "set( '//:main' '//myclass:' )" +``` + +### How do I get the attribute names and values for the targets returned by a query? + +Add the `--output-attribute ` or `--output-all-attributes` option to +the command line, followed by regular expressions that represent the attributes +of interest. + +``` +buck2 cquery "deps(//foo:bar)" --output-attribute 'name' 'exported_headers' +``` + +The `--output-attribute` option enables you to specify which attributes Buck2 +should return. Instead of returning the names of the targets that match the +query expression, Buck2 returns the names and values of the specified attributes +for those targets in JSON format. Attributes are specified as regular +expressions. For example, `'.*'` matches all attributes. See the +[`buck2 cquery` docs](../query/cquery) for more details. The output for the +example query above might look something like the following. + +``` +{"//foo/bar/lib:lib" : {"exported_headers" : [ "App/util.h" ],"name" : "lib"},"//foo/bar:app" : {"exported_headers" : [ "App/lib.h" ],"name" : "app"}} +``` + +### How do I perform a query** \***inside**\* **of a rule? + +Buck2 supports certain string parameter macros to be used when defining a +target. You can use the query macros as such: + +``` +$(query_targets "queryfunction(//:foo)") +$(query_outputs "queryfunction(//:foo)") +$(query_targets_and_outputs [SEPARATOR] "queryfunction(//:foo)") +``` + +Note, however, that the query macros are supported only for +[`genrule`](../../prelude/globals/#genrule) and +[`apk_genrule`](../../prelude/globals/#apk_genrule). + +### How do I find the dependencies for a target? + +Use the `deps()` operator. + +``` +buck2 cquery "deps('//foo:bar')" +buck2 cquery "deps('//foo:bar', 1, first_order_deps())" +buck2 cquery "deps(set('//foo:bar' '//foo:lib' '//foo/baz:util'))" +``` + +The `deps` operator finds the dependencies of the specified targets. The first +argument represents the targets of interest. This can be a single +[build target](../../concepts/build_target) or +[build target pattern](../../concepts/target_pattern), or a set of these. The +optional second argument is the _depth_ of the search for dependencies from the +specified targets. For example, `1`, as shown in the example above, returns only +the direct dependencies. If you do not provide this argument, the output is the +complete set of transitive dependencies. How do I find the reverse-dependencies +for a target, that is, the targets that** \***depend on**\* **a specified +target? Use the `buck2 cquery rdeps()` (reverse dependencies) operator. The +following example, returns the targets in the +[transitive closure](https://en.wikipedia.org/wiki/Transitive_closure) of +`//foo:bar` that depend directly on `//example:baz`. + +``` +buck2 cquery "rdeps('//foo:bar', '//example:baz', 1)" +``` + +### How do I find the buildfile that contains the target that owns a source file? + +In order to find the build file associated with a source file, combine the +`owner` operator with `buildfile`. For example, + +``` +buck2 cquery "buildfile(owner('foo/bar/main.cpp'))" +``` + +first finds the targets that _own_ `foo/bar/main.cpp` and then returns the build +files, such as `foo/bar/BUCK`, that define those targets. diff --git a/docs/users/commands.md b/docs/users/commands.md deleted file mode 100644 index f93957cffed8f..0000000000000 --- a/docs/users/commands.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -id: commands -title: Commands ---- - -To get help for a given buck2 subcommand, use `buck2 $SUBCOMMAND --help`, e.g. `buck2 build --help`. diff --git a/docs/users/faq/buck_hanging.md b/docs/users/faq/buck_hanging.md new file mode 100644 index 0000000000000..255d8db8c2da0 --- /dev/null +++ b/docs/users/faq/buck_hanging.md @@ -0,0 +1,86 @@ +--- +id: buck_hanging +title: Why is Buck2 hanging? +--- + +import { FbInternalOnly, OssOnly } from +'docusaurus-plugin-internaldocs-fb/internal'; + +Let's look at how to troubleshoot when buck2 hangs, i.e. it just sits there +saying "Jobs: In progress: 0, ..." but it’s not finishing... + +When buck2 hangs, there are two possibilities: It’s either hanging doing +_something_, or it’s hanging doing _nothing_. The first thing you should do is +figure out which of those is happening. That’s because the tools to debug either +of those are _very_ different! We will mainly focus on the first in this case. + +To figure out which hang you have on your hands, just look at how much CPU buck2 +is using when the hang occurs using your favorite activity monitor (e.g. `top`, +`htop`). Remember that you can find the buck2 daemon’s PID using `buck2 status`. +Ideally, break the utilization down by threads (in top, that’s `top -Hp $PID`). + +If any thread is using 100% CPU for some period of time, then you probably have +a busy hang (buck2 is doing “something”) which are usually easier to debug. + +## How to debug a “busy” hang + +### Getting a stack trace + +When debugging a busy hang, the first thing to do is to work out what the +process is doing. There are many tools you can use for this (like a profiler), +but the absolute simplest one is quickstack: just run `quickstack -p $PID`, and +it’ll show you a stack dump for all the threads in your process. If you prefer +`gdb`, you can use `gdb -p $PID`, then `thread apply all bt`, and that’s the +same thing. + +Note that a stack trace tells you what the process is doing at a point in time, +so don’t just look at the very last frame and call it the culprit. Instead, look +at the stack as a whole. If you need more perspective, use a sampling profiler +(strobeclient run --pid $PID). You can also +just grab stack traces at a few points in time and see if they look similar: +this is exactly what a sampling profiler does, albeit at a higher frequency. + +### Interpreting the stack trace + +Let's consider an example user report ( see +[here](https://fb.workplace.com/groups/buck2users/permalink/3232782826978076/)) +with the following stack trace: + +``` +#01 0x0000000005b1ec26 in as core::iter::traits::iterator::Iterator>::next () from ... +#02 0x0000000005b23998 in as itertools::Itertools>::exactly_one () from ... +#03 0x00000000059dbb2c in buck2_server_commands::commands::build::create_unhashed_outputs () from ... +#04 0x0000000005c3c677 in ::command::{closure#0}> as core::future::future::Future>::poll () from ... +#05 0x00000000054c58a3 in as buck2_server_ctx::ctx::ServerCommandDiceContext>::with_dice_ctx::{closure#0}::{closure#0}::{closure#0}, core::pin::Pin> + core::marker::Send>>, cli_proto::BuildResponse>::{closure#0}> as core::future::future::Future>::poll () from ... +#06 0x00000000054c7ae3 in ::{closure#0}::{closure#0}> as core::future::future::Future>::poll () from ... +#07 0x0000000005370df8 in ::call_in_span::, buck2_data::CommandEnd)>, ::span_async::{closure#0}::{closure#0}>, core::result::Result>::{closure#0}::{closure#0}::{closure#0}> () from ... +#08 0x00000000054f7288 in ::build::{closure#0}> as core::future::future::Future>::poll () from... + ... +``` + +At this point, you can look at the code, and note that there is no span around +the output symlink creation function (`create_unhashed_outputs`). This suggests +you’ve found your culprit: there is indeed a buck2 bug and we’re spending ages +creating unhashed output symlinks, and since you need a span to get any console +feedback, the console says nothing is happening. + +**An easy fix**: In this particular instance, Thomas spotted +[an easy optimization](https://github.com/facebook/buck2/commit/d677e41253b73a31aafa1255a532c38992482efd) +which resolved the issue. But, of course, that’s not always possible. If the +easy fix hadn't been available, we’d be at a dead end, so what do we do next? + +**A harder fix**: If it is not clear what the root-cause is, you can +bisect[you can bisect](users/faq/how_to_bisect.fb.md): +i.e. do a binary search across commits for the commit that introduced a given +breakage/perf degradation. Thanks to the fact that we enforce a +linear history, bisecting is pretty straightforward in +`fbsource`. Then, once you identify their commit that caused +breakage, investigate what caused the issue. + +## How to debug a “doing nothing” hang + +**Cycle in dependencies**: If buck2 seems to be doing nothing (e.g. CPU usage is +0%), one of the reasons could be a cycle in your dependencies, which may cause +buck2 to hang (buck2 does implement a form of cycle detection, but it +unfortunately has false negatives). You can confirm this by running buck1, which +will report cycles properly. diff --git a/docs/users/faq/common_issues.md b/docs/users/faq/common_issues.md index af755991a2bd7..95cec828c5369 100644 --- a/docs/users/faq/common_issues.md +++ b/docs/users/faq/common_issues.md @@ -3,21 +3,30 @@ id: common_issues title: Common Issues --- +import { FbInternalOnly } from 'docusaurus-plugin-internaldocs-fb/internal'; + ## Why is stdin being swallowed? Buck2 offers an interactive console by default. -To disable either use an env var: `BUCK_NO_INTERACTIVE_CONSOLE` or a flag: `--no-interactive-console` +To disable either use an env var: `BUCK_NO_INTERACTIVE_CONSOLE` or a flag: +`--no-interactive-console` ## Where is my output file? -To find the location of output for a target, use `buck2 build //foo:bar --show-output`. This will print the output corresponding to each built target, in this case `//foo:bar output_path`. +To find the location of output for a target, use +`buck2 build //foo:bar --show-output`. This will print the output corresponding +to each built target, in this case `//foo:bar output_path`. -To only get the output path (without the target behorehand) you want to use `buck2 build //foo:bar --show-simple-output`. +To only get the output path (without the target beforehand) you want to use +`buck2 build //foo:bar --show-simple-output`. -The resultant path is relative to the root of the repo (such as `~/repo_root/...`). For the full path use `--show-full-output` or `--show-full-simple-output`. +The resultant path is relative to the root of the repo (such as +`~/repo_root/...`). For the full path use `--show-full-output` or +`--show-full-simple-output`. -Note: in Buck1, the path is relative to the enclosing cell (such as `~/repo_root/cell/...`). +Note: in Buck1, the path is relative to the enclosing cell (such as +`~/repo_root/cell/...`). For Meta, repo_root = fbsource, cell = fbcode/fbobjc/... @@ -25,30 +34,66 @@ For Meta, repo_root = fbsource, cell = fbcode/fbobjc/... ## Why is Buck2 hanging? -If Buck2 seems to be doing nothing, it could be caused be a cycle in your dependencies, which may cause Buck2 to hang (Buck2 does implement a form of cycle detection, but it unfortunately has false negatives). You can confirm this by running Buck1, which will report cycles properly. +If Buck2 seems to be doing nothing, it could be caused be a cycle in your +dependencies, which may cause Buck2 to hang (Buck2 does implement a form of +cycle detection, but it unfortunately has false negatives). You can confirm this +by running Buck1, which will report cycles properly. ## How do I get the commands Buck2 executed so I can reproduce them in isolation? -For information, see [Finding Commands that Buck2 Ran](../../developers/what-ran.md). +For information, see +[Finding Commands that Buck2 Ran](../../developers/what-ran.md). ## Are multiple concurrent commands supported? -Yes, they are supported. There are 2 types of concurrent commands: 1) parallel invocations, and 2) recursive invocations. +Yes, they are supported. There are 2 types of concurrent commands: 1) parallel +invocations, and 2) recursive invocations. **Parallel invocations:** -If the state of all the commands are the same, then they will run at the same time. "State" is referring to the same configs and source files. If the state is different amongst the commands, then buck2 will block the commands properly such that the states do not interfere with each other. Different states are caused by source file changes or config changes (ex: using a different mode). +If the state of all the commands are the same, then they will run at the same +time. "State" is referring to the same configs and source files. If the state is +different amongst the commands, then buck2 will block the commands properly such +that the states do not interfere with each other. Different states are caused by +source file changes or config changes (ex: using a different mode). **Recursive invocations:** -A recursive invocation is when an outer buck2 command ends up calling another buck2 command as it's running. Recursive invocations are most commonly seen with genrules and tests. For example: -* If you have a `genrule` where the command contains a `buck2 cquery`, and you build the genrule with `buck2 build`, you have a recursive invocation where the outer command is `buck2 build` and the inner command is `buck2 cquery` -* If you have a test which contains `buck2 build`, and you run your test with `buck2 test`, you have a recursive invocation where the outer command is `buck2 test` and the inner command is `buck2 build` +A recursive invocation is when an outer buck2 command ends up calling another +buck2 command as it's running. Recursive invocations are most commonly seen with +genrules and tests. For example: + +- If you have a `genrule` where the command contains a `buck2 cquery`, and you + build the genrule with `buck2 build`, you have a recursive invocation where + the outer command is `buck2 build` and the inner command is `buck2 cquery` +- If you have a test which contains `buck2 build`, and you run your test with + `buck2 test`, you have a recursive invocation where the outer command is + `buck2 test` and the inner command is `buck2 build` -Recursive invocations should specify an `--isolation-dir`, or else buck2 will return an error. +Recursive invocations should specify an `--isolation-dir`, or else buck2 will +return an error. ## Why did my build OOM? -If your build OOMs, you can check the last actions running by using `buck2 log whatup`. This will print the superconsole state at the moment the event log ended, which will indicate what actions were being run (and consuming memory) when your machine ran out of memory. +If your build OOMs, you can check the last actions running by using +`buck2 log whatup`. This will print the superconsole state at the moment the +event log ended, which will indicate what actions were being run (and consuming +memory) when your machine ran out of memory. + +You can also use the `--after ` option to see all open spans at a +certain point in time of the build. + +## Why does my target not have any outputs? + +If you see that your build succeeded, but the console message stated that your +target did not have any outputs, this means that the underlying rule did not +declare any outputs artifacts, defined as outputs declared in: + +- `default_outputs` and/or `other_outputs` in `DefaultInfo` +- `cmd_args` in `RunInfo` +- `cmd_args` inside the `command` in `ExternalRunnerTestInfo` -You can also use the `--after ` option to see all open spans at a certain point in time of the build. +For example, building a target which uses a `python_library` rule merely groups +source files together and does not generate any output artifacts such as a +python executable. You would need to build a `python_binary` which uses that +library in order to get an output. diff --git a/docs/users/faq/starlark_peak_mem.md b/docs/users/faq/starlark_peak_mem.md new file mode 100644 index 0000000000000..f2f29b0a61366 --- /dev/null +++ b/docs/users/faq/starlark_peak_mem.md @@ -0,0 +1,171 @@ +--- +id: starlark_peak_mem +title: Debugging Excess Starlark Peak Memory +--- + +import { FbInternalOnly, OssOnly } from +'docusaurus-plugin-internaldocs-fb/internal'; + +## Wut memory? + +Peak memory is the maximum amount of memory used during evaluation of that +particular Starlark file. The memory is usually released after we finish the +evaluation of the file. Because Starlark is only garbage collected in between +top-level statements in the BUCK file, but not garbage collected inside function +calls/macros, on large servers with 64 hardware threads (or more), memory usage +might accumulate, causing slowdowns or OOMs or even SEVs (e.g. +S372092). See +[this post](https://fb.workplace.com/groups/1267349253953900/permalink/1312921066063385/) +for more details on how Starlark's current GC works . + +To prevent such issues until proper GC is implemented, we have set a hard `2GB` +memory limit for Starlark's evaluation of build files. This is a per-file limit. + +Note that this is different than the actual process memory which might include +other things apart from Starlark’s evaluation. + +## How do I see my build file's peak memory usage? + +To see the Starlark peak memory usage of a build file, you can inspect the event +log for your build file. Here is an example entry from the event log for buck2 +uquery `target` showing that it uses 1.5GB: + +``` +{"Event":{..."data":{"Load":{"module_id":"target:BUCK","cell":"...","error":null,"starlark_peak_allocated_bytes":1610608640}}}}}} +``` + +## Profiler to the rescue! + +If you want to see more detailed breakdown where the memory is used, you should +profile Starlark's evaluation of build files. See +[this page](../../rule_authors/optimization.md/#starlark-profiling) for details +of profiling in the loading stage. This is a great starting point for +troubleshooting. + +## How do I reduce memory footprint? + +There are many reasons why Starlark's evaluation of your build file might use a +lot of memory. We list a few common cases below but there might be more +cases. See +[this post](https://fb.workplace.com/groups/buck2eng/permalink/3309329642697846/) +for a few real world examples of debugging Starlark peak memory usage of core +Android macros that have saved over 5.7GB peak memory! + +High level guidance is to pay attention to loops as a starting point. Are there +any unnecessary computations? Can you shave them off? + +### Repeatedly allocating memory unnecessarily in a loop + +A common case where memory usage might accumulate is repeatedly allocating +memory in a loop. For instance, below we call a memory intensive function in a +loop unnecessarily: + +``` +for target in huge_target_list: + memory_intensive_fun(x,y) + ... +``` + +Instead, if we know that arguments `x` and `y` don't change, we could hoist the +call to `memory_intensive_fun` outside of the loop as follows: + +``` +memory_intensive_fun(x,y) +for target in huge_target_list: + ... +``` + +### Simply allocating very big data-structures! + +Another reason why Starlark uses a lot of memory could simply be because the +build file allocates a very big-data structure. For instance, below we allocate +a list with 1 billion integers! + +``` +million_list = [1 for i in range(1 << 20)] +billion_list = million_list * (1 << 10) + +``` + +As a workaround, could you think of splitting the list? + +### Algorithmically inefficient code + +Another reason could be because memory efficiency of your code is bad, i.e. you +are unnecessarily allocating a lot of memory. Let's look at an example where we +try to process a bunch of targets inefficiently: + +``` +targets = generate_targets(n) +for target in targets: + process(target) + +``` + +If `targets` list is big **and** each target takes a lot of space in memory, +memory usage might exceed the limit. Instead, a more efficient version might be +to process each target as you generate it: + +``` +for i in range(n): + target = generate_target(i) + process(target) +``` + +In this version, each target is processed as it is generated so we never need to +store more than one target in memory. + +### Usage of inefficient library calls + +A more subtle reason could be unknowingly invoking library calls that allocate +each time they are called. A well-known case of this is the `dict.items()` call. + +``` +for project, version in constraints.items(): + # process each project .... +``` + +We do an allocation on every call to `constraints.items()`. Especially if this +is a hot code in Starlark, this could cause an OOM. Instead, a potential fix is +to hoist the call out: + +``` +constraints = constraints.items() +for project, version in constraints: + # process each project .... +``` + +However, you need to ensure that the dictionary is not mutated inside, otherwise +you would get functionally different code. A similar case occurs for +`dict.keys()` where it returns a new list for containing the keys. + +### Allocating for rare cases + +Finally, another pattern is allocating memory for the rare cases. For instance, +consdier the following example + +``` +for target in huge_target_list: + if memory_intensive_condition([target]) + fail(...) +``` + +Above program could be optimized as follows: + +``` +if memory_intensive_condition(huge_target_list) + for target in huge_target_list: + if memory_intensive_condition([target]) + fail(...) +``` + +so that in the common non-failure case, we don't end up allocating excessive +memory. + +## I still need more help! + +If you still can not figure out how to reduce Starlark memory footprint of your +build files, please post in +[Buck2 Users](https://fb.workplace.com/groups/buck2users)raise +[an issue](https://github.com/facebook/buck2/issues) in our Github +project. diff --git a/docs/users/remote_execution.md b/docs/users/remote_execution.md index 9da8f1e2186b3..1d72fa14d91a0 100644 --- a/docs/users/remote_execution.md +++ b/docs/users/remote_execution.md @@ -3,25 +3,44 @@ id: remote_execution title: Remote Execution --- -Buck2 can use services that expose [Bazel's remote execution API](https://github.com/bazelbuild/remote-apis) in order to run actions remotely. +Buck2 can use services that expose +[Bazel's remote execution API](https://github.com/bazelbuild/remote-apis) in +order to run actions remotely. -Buck2 projects have been successfully tested for remote execution against [EngFlow](https://www.engflow.com/), [BuildBarn](https://github.com/buildbarn/bb-remote-execution) and [BuildBuddy](https://www.buildbuddy.io). Sample project configurations for those providers are available under [examples/remote_execution](https://github.com/facebook/buck2/tree/main/examples/remote_execution). +Buck2 projects have been successfully tested for remote execution against +[EngFlow](https://www.engflow.com/), +[BuildBarn](https://github.com/buildbarn/bb-remote-execution) and +[BuildBuddy](https://www.buildbuddy.io). Sample project configurations for those +providers are available under +[examples/remote_execution](https://github.com/facebook/buck2/tree/main/examples/remote_execution). ## RE configuration in `.buckconfig` -Configuration for remote execution can be found under `[buck2_re_client]` in `.buckconfig`. +Configuration for remote execution can be found under `[buck2_re_client]` in +`.buckconfig`. Keys supported include: -* `engine_address` - address to your RE's engine. -* `action_cache_address` - address to your action cache endpoint. -* `cas_address` - address to your content-addressable storage (CAS) endpoint. -* `tls_ca_certs` - path to a CA certificates bundle. This must be PEM-encoded. If none is set, a default bundle will be used. This path contains environment variables using shell interpolation syntax (i.e. $VAR). They will be substituted before reading the file. -* `tls_client_cert` - path to a client certificate (and intermediate chain), as well as its associated private key. This must be PEM-encoded. This path can contain environment variables using shell interpolation syntax (i.e. $VAR). They will be substituted before reading the file. -* `http_headers` - HTTP headers to inject in all requests to RE. This is a comma-separated list of `Header: Value` pairs. Minimal validation of those headers is done here. This can contain environment variables using shell interpolation syntax ($VAR). They will be substituted before reading the file. -* `instance_name` - an instance name to pass on execution, action cache, and CAS requests. +- `engine_address` - address to your RE's engine. +- `action_cache_address` - address to your action cache endpoint. +- `cas_address` - address to your content-addressable storage (CAS) endpoint. +- `tls_ca_certs` - path to a CA certificates bundle. This must be PEM-encoded. + If none is set, a default bundle will be used. This path contains environment + variables using shell interpolation syntax (i.e. $VAR). They will be + substituted before reading the file. +- `tls_client_cert` - path to a client certificate (and intermediate chain), as + well as its associated private key. This must be PEM-encoded. This path can + contain environment variables using shell interpolation syntax (i.e. $VAR). + They will be substituted before reading the file. +- `http_headers` - HTTP headers to inject in all requests to RE. This is a + comma-separated list of `Header: Value` pairs. Minimal validation of those + headers is done here. This can contain environment variables using shell + interpolation syntax ($VAR). They will be substituted before reading the file. +- `instance_name` - an instance name to pass on execution, action cache, and CAS + requests. -Buck2 uses `SHA256` for all its hashing by default. If your RE engine requires something else, this can be configured in `.buckconfig` as follows: +Buck2 uses `SHA256` for all its hashing by default. If your RE engine requires +something else, this can be configured in `.buckconfig` as follows: ```ini [buck2] @@ -31,12 +50,23 @@ digest_algorithms = BLAKE3 ## RE platform configuration -Next, your build will need an [execution platform](https://buck2.build/docs/concepts/glossary/#execution-platform) that specifies how and where actions should be executed. For a sample platform definition that sets up an execution platform to utilize RE, take a look at the [EngFlow example](https://github.com/facebook/buck2/blob/main/examples/remote_execution/engflow/platforms/defs.bzl), [BuildBarn example](https://github.com/facebook/buck2/blob/main/examples/remote_execution/buildbarn/platforms/defs.bzl), or the [BuildBuddy example](https://github.com/facebook/buck2/blob/main/examples/remote_execution/buildbuddy/platforms/defs.bzl). +Next, your build will need an +[execution platform](https://buck2.build/docs/concepts/glossary/#execution-platform) +that specifies how and where actions should be executed. For a sample platform +definition that sets up an execution platform to utilize RE, take a look at the +[EngFlow example](https://github.com/facebook/buck2/blob/main/examples/remote_execution/engflow/platforms/defs.bzl), +[BuildBarn example](https://github.com/facebook/buck2/blob/main/examples/remote_execution/buildbarn/platforms/defs.bzl), +or the +[BuildBuddy example](https://github.com/facebook/buck2/blob/main/examples/remote_execution/buildbuddy/platforms/defs.bzl). -To enable remote execution, configure the following fields in [CommandExecutorConfig](https://buck2.build/docs/api/build/globals/#commandexecutorconfig) as follows: +To enable remote execution, configure the following fields in +[CommandExecutorConfig](https://buck2.build/docs/api/build/globals/#commandexecutorconfig) +as follows: -* `remote_enabled` - set to `True`. -* `local_enabled` - set to `True` if you also want to run actions locally. -* `use_limited_hybrid` - set to `False` unless you want to exclusively run remotely when possible. -* `remote_execution_properties` - other additional properties. - * If the RE engine requires a container image, this can be done by setting `container-image` to an image URL, as is done in the example above. +- `remote_enabled` - set to `True`. +- `local_enabled` - set to `True` if you also want to run actions locally. +- `use_limited_hybrid` - set to `False` unless you want to exclusively run + remotely when possible. +- `remote_execution_properties` - other additional properties. + - If the RE engine requires a container image, this can be done by setting + `container-image` to an image URL, as is done in the example above. diff --git a/docs/why.md b/docs/why.md deleted file mode 100644 index fd86722e39bb7..0000000000000 --- a/docs/why.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: why -title: Why Buck2 ---- - -Buck2 is a build system from Meta. This page answers the questions: [why does Buck2 exist](#why-does-buck2-exist), [what's different about Buck2](#whats-different-about-buck2), and [why use Buck2](#why-use-buck2). - -## Why does Buck2 exist? - -Meta employs a very large monorepo, consisting of a variety of programming languages, including C++, Python, Rust, Kotlin, Swift, Objective-C, Haskell, OCaml, and more. Google employs a different but functionally similar monorepo. - -These large scale and multi-language repositories are generally beyond the capabilities of traditional build systems like `make`. To optimize the build and performance of these large systems, Facebook and Google developed their own build systems, respectively Buck and Bazel. While the internal version of Bazel was started first (also known as Blaze), Buck was open sourced first (back in March 2013), followed by Bazel a few years later (March 2015). - -The retroactively named Buck1 was a capable build system and is still in use today at Meta (although many users have migrated). Buck2 is a rewrite that aims to keep the best bits of Buck1 (with a high degree of target compatibility) but also borrows ideas from [academic](https://ndmitchell.com/#shake_10_sep_2012) [research](https://ndmitchell.com/#shake_21_apr_2020) and build systems, including [Bazel](https://bazel.build/), [Pants](https://www.pantsbuild.org/), [Shake](https://shakebuild.com/), [Tup](https://gittup.org/tup/), and more. - -Following are aspects common to Buck1 and Buck2 (and in most cases, Bazel): - -* **Targets that can be queried** - the build is defined as a series of targets, specified in `BUCK` files, that depend on other targets. This graph of targets can be queried to understand how they relate to each other and what the potential impact of a change might be. -* **Remote execution** - the build can send actions to a set of remote servers to be executed, increasing the parallelism significantly. -* **Multi-language composability** - there can be lots of different languages in a single build, and they can be put together. For example, you could have a Python library that depends on a Rust library, which, in turn depends on a C library. -* **File watching** - at large enough scale, simply looking for changed files is prohibitively expensive. Buck can integrate with [Watchman](https://facebook.github.io/watchman/) to discover which files have changed efficiently. However, for simplicity of setup, the open-source version defaults to using `inotify` or similar functionality. -* **Uses Starlark** - Starlark is a deterministic Python-like language used to specify the targets, enabling the definition of targets as literals and more advanced manipulation/sharing. - -## What's different about Buck2? - -Buck2 has many minor differences from Buck1, but there are a number that give new efficiency or expressiveness that are of note (most of these are also differences from Bazel). - -* **Buck2 is written in Rust** - Buck1 was written in Java. One of the advantages of using Rust is the absence of GC pauses, However, Java also has advantages, such as better memory profiling tools. -* **Buck2 is remote execution first** - local execution is considered a special case of remote execution, in contrast to Buck1 where it was added after. That means that things such as directory hashes can be pre-computed ready to send to remote execution, giving efficiency benefits. -* **All Buck2 rules are written in Starlark** - whereas, in Buck1, they were written in Java as part of the binary, which makes iteration on rules much faster. -* **The Buck2 binary is entirely language agnostic** - as a consequence of having all the rules external to the binary, the most important and complex rule (such as in C++), don't have access to magic internal features. As a result, features have been made available to all rules, including: - * [Dep files](rule_authors/dep_files.md) - the ability to declare that a subset of the files weren't actually used, and thus not be sensitive to changes within them. - * [Incremental actions](rule_authors/incremental_actions.md) - the ability to have the action short-circuit some subset of the work if run again. -* **Buck2 uses a dynamic (aka monadic) graph as its underlying computation engine** - while most dependencies are specified statically, there are two particular features that expose dynamic power to rule authors: - * [Dynamic dependencies](rule_authors/dynamic_dependencies.md) - enable rules to build a file then look at its contents before specifying the dependencies and steps in future actions. Common uses are languages where the dependency structure within a project must follow imports (e.g. Haskell, OCaml) and distributed ThinLTO (where the best optimization plan is generated from summaries). - * [Anonymous targets](rule_authors/anon_targets.md) - enable rules to create a graph that has more sharing than the original user graph. As a result, two unrelated binaries can compile shared code only once, despite the shared code not knowing about this commonality. This feature is useful for rules like Swift feature resolution. -* **[Transitive-sets](rule_authors/transitive_sets.md)** - similar in purpose to Bazel's [depset](https://bazel.build/rules/lib/depset). But, instead of being just a memory optimization, are also wired into the dependency graph, providing a reduction in the size of the dependency graph. -* **Buck2 is not phased** - there are no target graph/action graph phases, just a series of dependencies in a [single graph on DICE](https://github.com/facebook/buck2/blob/main/dice/dice/docs/index.md) that result in whatever the user requested. That means that Buck2 can sometimes parallelise different phases and track changes very precisely. -* **Buck2 can integrate with the virtual filesystem [Eden](https://github.com/facebook/sapling)** - this provides good performance, even when the file system is backed by source control fetches. However, Eden is not required, and a normal file system will also work well. -* **The Buck2 Starlark implementation is available [as a standalone library](https://developers.facebook.com/blog/post/2021/04/08/rust-starlark-library/)** - this provides features such as IDE integration (both LSP and DAP bindings), linters, typecheckers, and more. These features are integrated into Buck2 to give a better developer experience (which is still evolving). -* **Buck2 supports configurations** - (such as `select`) to provide multi-platform/architecture builds, which are heavily inspired by Bazel. Within that space, there is a number of small differences, such as `toolchain_deps`. -* **Buck2 is fast** - in our internal tests, we observed that Buck2 completed builds 2x as fast as Buck1. - -For a comprehensive list of benefits, see [Benefits Compared to Buck1](benefits.md). - -## Why use Buck2? - -It would be delightful if you tried out Buck2! But it is early-stage software, so users may run into unexpected issues. If you encounter an issue, please report it via [Github issues](https://github.com/facebook/buck2/issues). - -Buck2 is being used internally within Meta and is available as open-source from 2023. - -The are several differences between the internal and open-source versions: - -* Meta uses an internal version of remote execution with builds always hooked up to remote execution. The open-source binding, which uses Buck2 without remote execution, may be less polished. -* There are some configuration differences between the open source and internal versions. For example, file changes default to `inotify` in open-source, and to Watchman internally. -* The prelude (containing all the rules) is the same for open-source as internal, but toolchains are not open-sourced. The required custom toolchains may not work as well. - -There are also some things that aren't quite yet finished: - -* There are not yet mechanisms to build in release mode (that should be achieved by modifying the toolchain). -* Windows/Mac builds are still in progress; open-source code is mostly tested on Linux. - -If none of that puts you off, [give Buck2 a go](getting_started.md)! diff --git a/examples/.gitignore b/examples/.gitignore new file mode 100644 index 0000000000000..ce07be6844203 --- /dev/null +++ b/examples/.gitignore @@ -0,0 +1 @@ +buck-out/ diff --git a/examples/BUCK b/examples/BUCK new file mode 100644 index 0000000000000..0e89a27dddfe4 --- /dev/null +++ b/examples/BUCK @@ -0,0 +1,4 @@ +# These files aren't part of the normal build graph, +# just examples for our open source project. + +oncall("build_infra") diff --git a/examples/README.md b/examples/README.md index e0c6614d5f5b3..79bbce77e3c27 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,23 +1,22 @@ # buck2 examples -In these folders are some examples on how to get buck2 working with -your favorite languages and tools. +In these folders are some examples on how to get buck2 working with your +favorite languages and tools. ## with_prelude -Examples taking advantage of the prelude to create toolchain-independent -build definitions in cpp and python. Includes as an example a usecase -for building and using c-extension-backed python libraries. +Examples taking advantage of the prelude to create toolchain-independent build +definitions in cpp and python. Includes as an example a usecase for building and +using c-extension-backed python libraries. -Note: to take advantage of these examples you must symlink the prelude -into this folder. +Note: to take advantage of these examples you must symlink the prelude into this +folder. ## no_prelude -Preludeless examples for those wanting to use buck2 with their own -rules and toolchains. In here you can learn about how BUILD -files interact with rules, and how the provider abstraction can be -used to encapsulate build logic. +Preludeless examples for those wanting to use buck2 with their own rules and +toolchains. In here you can learn about how BUILD files interact with rules, and +how the provider abstraction can be used to encapsulate build logic. ## toolchains diff --git a/examples/bootstrap/.buckconfig b/examples/bootstrap/.buckconfig index 4c3602c9f5be7..d6834f5db46ef 100644 --- a/examples/bootstrap/.buckconfig +++ b/examples/bootstrap/.buckconfig @@ -1,15 +1,18 @@ -[repositories] +[cells] root = . prelude = prelude toolchains = toolchains bootstrap = bootstrap none = none -[repository_aliases] +[cell_aliases] config = prelude buck = none fbcode = none fbsource = none +[external_cells] + prelude = bundled + [parser] target_platform_detector_spec = target:root//...->bootstrap//platform:default diff --git a/examples/bootstrap/README.md b/examples/bootstrap/README.md index aa60fe87849db..8dbb02ae09dca 100644 --- a/examples/bootstrap/README.md +++ b/examples/bootstrap/README.md @@ -1,11 +1,14 @@ # Configuring a bootstrapping toolchain setup -This project provides an example of what it might look like to configure a bootstrapping toolchain and construct a different toolchain using an artifact built with the former. +This project provides an example of what it might look like to configure a +bootstrapping toolchain and construct a different toolchain using an artifact +built with the former. ## How to build 1. Build or install `buck2` with Cargo -2. This project assumes Rust, Clang, and Python to be present. See `toolchains/BUCK` for how we pull those in from the system. +2. This project assumes Rust, Clang, and Python to be present. See + `toolchains/BUCK` for how we pull those in from the system. 3. Run `buck2 init --git` 4. Run commands: e.g. `buck2 run :hello_world`, `buck2 build //...` @@ -13,7 +16,9 @@ This project provides an example of what it might look like to configure a boots ### Bootstrap constraint -In order to differentiate between a regular toolchain and a bootstrap toolchain, we introduce a new constraint setting `bootstrap//:bootstrap` and a corresponding constraint value `bootstrap//:use_bootstrap`. +In order to differentiate between a regular toolchain and a bootstrap toolchain, +we introduce a new constraint setting `bootstrap//:bootstrap` and a +corresponding constraint value `bootstrap//:use_bootstrap`. ```python constraint_setting( @@ -28,7 +33,9 @@ constraint_value( ### Bootstrap platform -We then define a new platform `bootstrap//platform:bootstrap`, which inherits everything from the default platform `bootstrap//platform:default` and adds the extra `bootstrap` constraint defined above. +We then define a new platform `bootstrap//platform:bootstrap`, which inherits +everything from the default platform `bootstrap//platform:default` and adds the +extra `bootstrap` constraint defined above. ```python platform( @@ -40,12 +47,15 @@ platform( ### Bootstrap toolchain -We are using Rust for this example, but the concept is not specific to Rust. Our goal is to -build a Rust compiler with the bootstrap toolchain, construct a new toolchain with the compiler, -then build a Rust binary with the newly built Rust compiler. For simplicity, we are not building -an actual Rust compiler, but using a small wrapper Rust binary that execs into the compiler picked from the system. +We are using Rust for this example, but the concept is not specific to Rust. Our +goal is to build a Rust compiler with the bootstrap toolchain, construct a new +toolchain with the compiler, then build a Rust binary with the newly built Rust +compiler. For simplicity, we are not building an actual Rust compiler, but using +a small wrapper Rust binary that execs into the compiler picked from the system. + +First, we setup a bootstrap toolchain using the `system_rust_toolchain` provided +in the prelude. -First, we setup a bootstrap toolchain using the `system_rust_toolchain` provided in the prelude. ```python system_rust_toolchain( name = "rust_bootstrap_toolchain", @@ -53,6 +63,7 @@ system_rust_toolchain( ``` Then, we configure a build for our "rustc". + ```python rust_binary( name = "rustc_wrapper", @@ -60,7 +71,9 @@ rust_binary( ) ``` -To construct a new toolchain that uses the new "rustc", we use `configured_alias` to tack on the `bootstrap` to the binary. +To construct a new toolchain that uses the new "rustc", we use +`configured_alias` to tack on the `bootstrap` to the binary. + ```python rust_toolchain( name = "rust_toolchain_with_compiled_rustc", @@ -74,7 +87,9 @@ configured_alias( ) ``` -Now that we have both toolchains constructed, we can create our final Rust toolchain that switches between the two based on the `use_bootstrap` constraint. +Now that we have both toolchains constructed, we can create our final Rust +toolchain that switches between the two based on the `use_bootstrap` constraint. + ```python toolchain_alias( name = "rust", diff --git a/examples/bootstrap/toolchains/BUCK b/examples/bootstrap/toolchains/BUCK index 399d18c30c305..bf8e20de39113 100644 --- a/examples/bootstrap/toolchains/BUCK +++ b/examples/bootstrap/toolchains/BUCK @@ -2,7 +2,6 @@ load("@prelude//toolchains:cxx.bzl", "system_cxx_toolchain") load("@prelude//toolchains:python.bzl", "system_python_bootstrap_toolchain", "system_python_toolchain") load("@prelude//toolchains:rust.bzl", "system_rust_toolchain") load(":rust_toolchain.bzl", "rust_toolchain") -load(":toolchain.bzl", "toolchain_alias") ### Toolchains for Cxx/Python/etc. ### system_cxx_toolchain( diff --git a/examples/bootstrap/toolchains/rust_toolchain.bzl b/examples/bootstrap/toolchains/rust_toolchain.bzl index 92e08a103bf65..ff43d5a264873 100644 --- a/examples/bootstrap/toolchains/rust_toolchain.bzl +++ b/examples/bootstrap/toolchains/rust_toolchain.bzl @@ -5,7 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//rust:rust_toolchain.bzl", "RustToolchainInfo") +load("@prelude//rust:rust_toolchain.bzl", "PanicRuntime", "RustToolchainInfo") # almost identical to the system_rust_toolchain implementation, with the only # the difference being the ability to specify rustc @@ -29,15 +29,15 @@ def _rust_toolchain_impl(ctx): return [ DefaultInfo(), RustToolchainInfo( - clippy_driver = "clippy-driver", + clippy_driver = RunInfo(args = ["clippy-driver"]), compiler = ctx.attrs.compiler[RunInfo], default_edition = ctx.attrs.default_edition, - extern_html_root_url_prefix = ctx.attrs.extern_html_root_url_prefix, failure_filter_action = ctx.attrs.failure_filter_action[RunInfo], + panic_runtime = PanicRuntime("unwind"), rustc_action = ctx.attrs.rustc_action[RunInfo], rustc_flags = ctx.attrs.rustc_flags, rustc_target_triple = ctx.attrs.rustc_target_triple, - rustdoc = "rustdoc", + rustdoc = RunInfo(args = ["rustdoc"]), rustdoc_flags = ctx.attrs.rustdoc_flags, rustdoc_test_with_resources = ctx.attrs.rustdoc_test_with_resources[RunInfo], ), @@ -48,7 +48,6 @@ rust_toolchain = rule( attrs = { "compiler": attrs.exec_dep(providers = [RunInfo]), "default_edition": attrs.option(attrs.string(), default = None), - "extern_html_root_url_prefix": attrs.option(attrs.string(), default = None), "failure_filter_action": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//rust/tools:failure_filter_action")), "rustc_action": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//rust/tools:rustc_action")), "rustc_flags": attrs.list(attrs.string(), default = []), diff --git a/examples/bootstrap/toolchains/toolchain.bzl b/examples/bootstrap/toolchains/toolchain.bzl deleted file mode 100644 index 4c7fe07282b68..0000000000000 --- a/examples/bootstrap/toolchains/toolchain.bzl +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -def _toolchain_impl(ctx: AnalysisContext) -> list[Provider]: - if ctx.attrs.dep != None: - return ctx.attrs.dep.providers - return [DefaultInfo()] - -toolchain = rule( - attrs = {"dep": attrs.option(attrs.exec_dep(), default = None)}, - impl = _toolchain_impl, - is_toolchain_rule = True, -) - -def _toolchain_alias_impl(ctx: AnalysisContext) -> list[Provider]: - return ctx.attrs.actual.providers - -toolchain_alias = rule( - doc = """ -toolchain_alias acts like alias but for toolchain rules. - -The toolchain_alias itself is a toolchain rule and the `actual` argument is -expected to be a toolchain_rule as well. - """, - attrs = {"actual": attrs.toolchain_dep(doc = "The actual toolchain that is being aliased. This should be a toolchain rule.")}, - impl = _toolchain_alias_impl, - is_toolchain_rule = True, -) diff --git a/examples/hello_world/.buckconfig b/examples/hello_world/.buckconfig index b80bc62d8e5ed..91bf5092cb6eb 100644 --- a/examples/hello_world/.buckconfig +++ b/examples/hello_world/.buckconfig @@ -1,14 +1,18 @@ -[repositories] +[cells] root = . prelude = prelude toolchains = toolchains none = none -[repository_aliases] +[cell_aliases] config = prelude fbcode = none fbsource = none buck = none +[external_cells] + prelude = bundled + + [parser] target_platform_detector_spec = target:root//...->prelude//platforms:default diff --git a/examples/hello_world/BUCK b/examples/hello_world/BUCK index ebd90692f7e65..b0e97423fd55c 100644 --- a/examples/hello_world/BUCK +++ b/examples/hello_world/BUCK @@ -7,7 +7,7 @@ cxx_binary( cxx_library( name = "print", - srcs = glob(["**/*.cpp"]), + srcs = ["library.cpp"], exported_headers = glob(["**/*.hpp"]), visibility = ["PUBLIC"], ) diff --git a/examples/hello_world/README.md b/examples/hello_world/README.md index 2f346e313642f..7ca7d66cbdb7d 100644 --- a/examples/hello_world/README.md +++ b/examples/hello_world/README.md @@ -1,10 +1,14 @@ ## A simple Hello World project using the buck2-prelude -This example demonstrates how a simple C++ project might be built with Buck2 using the prelude. +This example demonstrates how a simple C++ project might be built with Buck2 +using the prelude. + +In the `toolchains` cell, we define two toolchains needed: +`system_cxx_toolchain` and `system_python_bootstrap_toolchain`, both pulled in +from the prelude. The `BUCK` file at the project root contain a `cxx_binary` +target and its `cxx_library` dependency. `.buckconfig` contains the +configuration to set the target platform for the project: -In the `toolchains` cell, we define two toolchains needed: `system_cxx_toolchain` and `system_python_bootstrap_toolchain`, both pulled in from the prelude. -The `BUCK` file at the project root contain a `cxx_binary` target and its `cxx_library` dependency. -`.buckconfig` contains the configuration to set the target platform for the project: ``` [parser] target_platform_detector_spec = target:root//...->prelude//platforms:default diff --git a/examples/no_prelude/.buckconfig b/examples/no_prelude/.buckconfig index 2703f42e9deb3..1b25bee1e15a1 100644 --- a/examples/no_prelude/.buckconfig +++ b/examples/no_prelude/.buckconfig @@ -1,4 +1,3 @@ -[repositories] +[cells] root = . -prelude = prelude toolchains = toolchains diff --git a/examples/no_prelude/README.md b/examples/no_prelude/README.md index 21dbf878ceb4b..d49dfd2df4ec7 100644 --- a/examples/no_prelude/README.md +++ b/examples/no_prelude/README.md @@ -1,16 +1,23 @@ ## No-prelude example -This is an example project that does not rely on https://github.com/facebook/buck2-prelude. Instead the prelude cell points to a `prelude` directory with an empty `prelude.bzl` file, like so: + +This is an example project that does not rely on +https://github.com/facebook/buck2-prelude. Instead the prelude cell points to a +`prelude` directory with an empty `prelude.bzl` file, like so: + ``` #.buckconfig -[repositories] +[cells] root = . prelude = prelude ``` -All rules and toolchains are defined manually within each of the subdirectories. (e.g. `cpp/rules.bzl`, `cpp/toolchain.bzl`) +All rules and toolchains are defined manually within each of the subdirectories. +(e.g. `cpp/rules.bzl`, `cpp/toolchain.bzl`) ## Sample commands + Install Buck2, cd into a project, and run + ```bash # List all targets buck2 targets //... diff --git a/examples/no_prelude/cpp/rules.bzl b/examples/no_prelude/cpp/rules.bzl index ef06c40352077..6e39a68bac7ee 100644 --- a/examples/no_prelude/cpp/rules.bzl +++ b/examples/no_prelude/cpp/rules.bzl @@ -14,7 +14,10 @@ def _cpp_binary_impl(ctx: AnalysisContext) -> list[Provider]: extension = ".exe" if host_info().os.is_windows else "" out = ctx.actions.declare_output("main" + extension) - cmd = cmd_args([ctx.attrs.toolchain[CxxCompilerInfo].compiler_path, "-o", out.as_output()] + sources).hidden(ctx.attrs.headers) + cmd = cmd_args( + [ctx.attrs.toolchain[CxxCompilerInfo].compiler_path, "-o", out.as_output()] + sources, + hidden = ctx.attrs.headers, + ) ctx.actions.run(cmd, category = "compile") @@ -39,7 +42,10 @@ def _cpp_library_impl(ctx: AnalysisContext) -> list[Provider]: extension = ".dll" if host_info().os.is_windows else ".so" out = ctx.actions.declare_output("lib" + extension) - cmd = cmd_args([ctx.attrs.toolchain[CxxCompilerInfo].compiler_path, "-shared", "-undefined", "dynamic_lookup", "-o", out.as_output()] + sources).hidden(ctx.attrs.headers) + cmd = cmd_args( + [ctx.attrs.toolchain[CxxCompilerInfo].compiler_path, "-shared", "-undefined", "dynamic_lookup", "-o", out.as_output()] + sources, + hidden = ctx.attrs.headers, + ) ctx.actions.run(cmd, category = "compile") diff --git a/examples/no_prelude/toolchains/go_toolchain.bzl b/examples/no_prelude/toolchains/go_toolchain.bzl index 5a0ea76ae1ed9..6d921aed9d9f9 100644 --- a/examples/no_prelude/toolchains/go_toolchain.bzl +++ b/examples/no_prelude/toolchains/go_toolchain.bzl @@ -17,11 +17,11 @@ def _go_toolchain_impl(ctx): cmd = cmd_args() if host_info().os.is_windows: - compiler_src = cmd_args(download, format = "{}\\go\\bin\\go.exe") - cmd.add([ctx.attrs._symlink_bat, compiler_dst.as_output(), compiler_src.relative_to(compiler_dst, parent = 1)]) + compiler_src = cmd_args(download, format = "{}\\go\\bin\\go.exe", relative_to = (compiler_dst, 1)) + cmd.add([ctx.attrs._symlink_bat, compiler_dst.as_output(), compiler_src]) else: - compiler_src = cmd_args(download, format = "{}/go/bin/go") - cmd.add(["ln", "-sf", compiler_src.relative_to(compiler_dst, parent = 1), compiler_dst.as_output()]) + compiler_src = cmd_args(download, format = "{}/go/bin/go", relative_to = (compiler_dst, 1)) + cmd.add(["ln", "-sf", compiler_src, compiler_dst.as_output()]) ctx.actions.run(cmd, category = "cp_compiler") return [DefaultInfo(default_output = download), GoCompilerInfo(compiler_path = compiler_dst, GOROOT = "")] @@ -57,10 +57,16 @@ def _download_toolchain(ctx: AnalysisContext): script_content.append(cmd_args(output, format = "mkdir {}")) else: script_content.append(cmd_args(output, format = "mkdir -p {}")) - script_content.extend([ - cmd_args(output, format = "cd {}"), - cmd_args(["tar", compress_flag, "-x", "-f", archive], delimiter = " ").relative_to(output), - ]) + if host_info().os.is_windows: + script_content.extend([ + cmd_args(output, format = "cd {}"), + cmd_args(["unzip", archive], delimiter = " ", relative_to = output), + ]) + else: + script_content.extend([ + cmd_args(output, format = "cd {}"), + cmd_args(["tar", compress_flag, "-x", "-f", archive], delimiter = " ", relative_to = output), + ]) script, _ = ctx.actions.write( script_name, script_content, @@ -68,18 +74,19 @@ def _download_toolchain(ctx: AnalysisContext): allow_args = True, ) - if host_info().os.is_windows: - cmd = cmd_args([script]) - else: - cmd = cmd_args(["/bin/sh", script]) + cmd = cmd_args( + ([] if host_info().os.is_windows else ["/bin/sh"]) + [script], + hidden = [archive, output.as_output()], + ) - ctx.actions.run(cmd.hidden([archive, output.as_output()]), category = "extract_go_toolchain") + ctx.actions.run(cmd, category = "extract_go_toolchain") return output def _toolchain_config(): version = "1.20.7" os = host_info().os + arch = host_info().arch if os.is_windows: return struct( sha256 = "736dc6c7fcab1c96b682c8c93e38d7e371e62a17d34cb2c37d451a1147f66af9", @@ -88,12 +95,22 @@ def _toolchain_config(): version = version, ) if os.is_macos: - return struct( - sha256 = "eea1e7e4c2f75c72629050e6a6c7c46c446d64056732a7787fb3ba16ace1982e", - platform = "darwin-arm64", - archive_extension = "tar.gz", - version = version, - ) + if arch.is_aarch64: + return struct( + sha256 = "eea1e7e4c2f75c72629050e6a6c7c46c446d64056732a7787fb3ba16ace1982e", + platform = "darwin-arm64", + archive_extension = "tar.gz", + version = version, + ) + elif arch.is_x86_64: + return struct( + sha256 = "785170eab380a8985d53896808b0a71336d0ea60e0a26099b4ccec77798b1cf4", + platform = "darwin-amd64", + archive_extension = "tar.gz", + version = version, + ) + else: + fail("unrecognized architecture: couldn't select macOS go toolchain") # Default linux return struct( diff --git a/examples/remote_execution/buildbarn/.buckconfig b/examples/remote_execution/buildbarn/.buckconfig index 26aad816249e4..cd5023bc576bf 100644 --- a/examples/remote_execution/buildbarn/.buckconfig +++ b/examples/remote_execution/buildbarn/.buckconfig @@ -1,6 +1,5 @@ -[repositories] +[cells] root = . -prelude = prelude [buck2_re_client] action_cache_address = grpc://localhost:8980 diff --git a/examples/remote_execution/buildbarn/README.md b/examples/remote_execution/buildbarn/README.md index 2dbe380234618..be2506315fee3 100644 --- a/examples/remote_execution/buildbarn/README.md +++ b/examples/remote_execution/buildbarn/README.md @@ -1,12 +1,46 @@ -## Remote execution integration with Buildbarn +# Remote execution integration with Buildbarn -This project provides a small example of what a project that utilizes [Buildbarn](https://github.com/buildbarn). +This project provides a small example of what a project that utilizes +[Buildbarn](https://github.com/buildbarn). -In this document, we will go over the key configs used in this setup. -Using a local docker-compose deployment from the [example deployment repo](https://github.com/buildbarn/bb-deployments). -If you already have a Buildbarn deployment you can skip that. +In this document, we will go over the key configs used in this setup. Using a +local docker-compose deployment from the +[example deployment repo](https://github.com/buildbarn/bb-deployments). If you +already have a `Buildbarn` deployment you can use that instead. -### Deploy a local Buildbarn +## Deploy a local Buildbarn + +### Set up a basic PATH + +`Buildbarn` runs all actions in a completely bare environment by default, and +relies on the build client to specify the `$PATH` and other environment +variables in the REAPI action message. As `buck2` does not send a default +`$PATH` build will fail with errors about missing executables. + +Add the environment variables to the worker config you want - normally either +`docker-compose/config/worker-fuse-ubuntu22-04.jsonnet` or +`docker-compose/config/worker-hardlinking-ubuntu22-04.jsonnet` - after the +`workerId` field like this: + +``` + { ... + buildDirectories: [{ ... + runners: [{ + endpoint: { address: 'unix:///worker/runner' }, + platform: { + properties: [ ... ], + }, + workerId: { ... }, ++ environment_variables: { ++ PATH: '/bin:/usr/bin:/usr/local/bin', ++ }, + }], + }], + filePool: { ... }, + } +``` + +### Deploy ``` ... $ git clone https://github.com/buildbarn/bb-deployments @@ -15,14 +49,14 @@ If you already have a Buildbarn deployment you can skip that. .../bb-deployments/docker-compose $ ./run.sh ``` -This uses `docker-compose` to spin up the required Buildbarn services. -Using FUSE based workers, those are generally the fastest as they can load action files on demand -and avoids the overhead of setting up the full input root up front. -In practice many actions do not read all the files in the input root. +This uses `docker-compose` to spin up the required `Buildbarn` services. Using +FUSE based workers, those are generally the fastest as they can load action +files on demand and avoids the overhead of setting up the full input root up +front. In practice many actions do not read all the files in the input root. If you do not want FUSE workers you can instead switch to hardlinking workers -The example deployments have two worker kinds "fuse", and "hardlinking", -you can see the queues in the Buildbarn scheduler, http://localhost:7982. +The example deployments have two worker kinds "fuse", and "hardlinking", you can +see the queues in the `Buildbarn` scheduler, http://localhost:7982. ``` Buildbarn Scheduler @@ -38,11 +72,12 @@ Instance name Platform properties ubuntu:act-22.04@sha256:5f9c35c25db1d51a8ddaae5c0ba8d3c163c5e9a4a6cc97acd409ac7eae239448" ``` -More information is available in the [repo](https://github.com/buildbarn/bb-deployments). +More information is available in the +[repo](https://github.com/buildbarn/bb-deployments). -### Relevant configs in .buckconfig +## Relevant configs in .buckconfig -First, the Buildbarn endpoint should be configured as the following: +Configure the `Buildbarn` endpoint as follows: ```ini [buck2_re_client] @@ -57,7 +92,7 @@ instance_name = fuse TLS is not used in this example. -### Relevant configs in `ExecutionPlatformInfo` +## Relevant configs in `ExecutionPlatformInfo` -Buildbarn takes in a Docker image and `OSFamily` in its RE properties to select a worker. -This is configured in `root//platforms:platforms`. +`Buildbarn` takes in a Docker image and `OSFamily` in its RE properties to +select a worker. This is configured in `root//platforms:platforms`. diff --git a/examples/remote_execution/buildbuddy/.buckconfig b/examples/remote_execution/buildbuddy/.buckconfig index d0e7ba1f32676..eab391bc083e0 100644 --- a/examples/remote_execution/buildbuddy/.buckconfig +++ b/examples/remote_execution/buildbuddy/.buckconfig @@ -1,6 +1,5 @@ -[repositories] +[cells] root = . -prelude = prelude [buck2] digest_algorithms = SHA256 diff --git a/examples/remote_execution/buildbuddy/README.md b/examples/remote_execution/buildbuddy/README.md index 011a9398b3ed1..f50c9cbb5cc8e 100644 --- a/examples/remote_execution/buildbuddy/README.md +++ b/examples/remote_execution/buildbuddy/README.md @@ -1,12 +1,14 @@ ## Remote execution integration with BuildBuddy -This project provides a small example of what a project that utilizies [BuildBuddy](https://www.buildbuddy.io/)'s RE might look like. +This project provides a small example of what a project that utilizies +[BuildBuddy](https://www.buildbuddy.io/)'s RE might look like. In this document, we will go over the key configs used in this setup. ### Relevant configs in .buckconfig -First, the BuildBuddy endpoint and api key should be configured as the following: +First, the BuildBuddy endpoint and api key should be configured as the +following: ```ini [buck2_re_client] @@ -18,5 +20,7 @@ http_headers = x-buildbuddy-api-key:$BUILDBUDDY_API_KEY ### Relevant configs in `ExecutionPlatformInfo` -BuildBuddy takes in a Docker image and OSFamily in its execution platform's execution properties(`exec_properties`) to select an executor. -The execution platform used in this project `root//platforms:platforms` uses the `container-image` key to set this up. +BuildBuddy takes in a Docker image and OSFamily in its execution platform's +execution properties(`exec_properties`) to select an executor. The execution +platform used in this project `root//platforms:platforms` uses the +`container-image` key to set this up. diff --git a/examples/remote_execution/engflow/.buckconfig b/examples/remote_execution/engflow/.buckconfig index 00ec74a6c1ac8..ddfd96236566a 100644 --- a/examples/remote_execution/engflow/.buckconfig +++ b/examples/remote_execution/engflow/.buckconfig @@ -1,6 +1,5 @@ -[repositories] +[cells] root = . -prelude = prelude [buck2] # Adjust as needed. diff --git a/examples/remote_execution/engflow/README.md b/examples/remote_execution/engflow/README.md index c84964edfbd94..383f629c0121a 100644 --- a/examples/remote_execution/engflow/README.md +++ b/examples/remote_execution/engflow/README.md @@ -1,12 +1,14 @@ ## Remote execution integration with EngFlow -This project provides a small example of what a project that utilizes [EngFlow](https://www.engflow.com/)'s RE offering might look like. +This project provides a small example of what a project that utilizes +[EngFlow](https://www.engflow.com/)'s RE offering might look like. In this document, we will go over the key configs used in this setup. ### Relevant configs in .buckconfig -First, the EngFlow endpoint and certificate should be configured as the following: +First, the EngFlow endpoint and certificate should be configured as the +following: ```ini [buck2_re_client] @@ -17,6 +19,7 @@ tls_client_cert = $ENGFLOW_CERTIFICATE ``` Additionally, set the `digest_algorithm` config to `SHA256`. + ```ini [buck2] digest_algorithms = SHA256 @@ -24,5 +27,6 @@ digest_algorithms = SHA256 ### Relevant configs in `ExecutionPlatformInfo` -EngFlow takes in a Docker image as its execution platform. -The execution platform used in this project `root//platforms:platforms` uses the `container-image` key to set this up. +EngFlow takes in a Docker image as its execution platform. The execution +platform used in this project `root//platforms:platforms` uses the +`container-image` key to set this up. diff --git a/examples/remote_execution/internal/.buckconfig b/examples/remote_execution/internal/.buckconfig index be4fa31baf262..eaa191790cb4a 100644 --- a/examples/remote_execution/internal/.buckconfig +++ b/examples/remote_execution/internal/.buckconfig @@ -1,6 +1,5 @@ -[repositories] +[cells] root = . -prelude = prelude [buck2] # Adjust as needed. diff --git a/examples/remote_execution/nativelink/.buckconfig b/examples/remote_execution/nativelink/.buckconfig new file mode 100644 index 0000000000000..1ade56ec8fe78 --- /dev/null +++ b/examples/remote_execution/nativelink/.buckconfig @@ -0,0 +1,12 @@ +[cells] +root = . + +[buck2_re_client] +action_cache_address = grpc://localhost:50051 +engine_address = grpc://localhost:50051 +cas_address = grpc://localhost:50051 +tls = false +instance_name = main + +[build] + execution_platforms = root//platforms:platforms diff --git a/examples/remote_execution/nativelink/.buckroot b/examples/remote_execution/nativelink/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/remote_execution/nativelink/.gitignore b/examples/remote_execution/nativelink/.gitignore new file mode 100644 index 0000000000000..d60c5d299b29c --- /dev/null +++ b/examples/remote_execution/nativelink/.gitignore @@ -0,0 +1 @@ +buck-out diff --git a/examples/remote_execution/nativelink/README.md b/examples/remote_execution/nativelink/README.md new file mode 100644 index 0000000000000..87a1224651adf --- /dev/null +++ b/examples/remote_execution/nativelink/README.md @@ -0,0 +1,62 @@ +# Remote execution integration with NativeLink + +This project provides a small example of what a project that utilizes +[NativeLink](https://github.com/Tracemachina/nativelink). + +In this document, we will go over the key configs used in this setup. If you +already have a `NativeLink` deployment you can use that instead. + +## Deploy a local NativeLink + +### 📦 Installing with Cargo + +1. First install Rust, but skip to step 2 if you have it already. + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +2. Install NativeLink with Cargo. + +```bash +cargo install --git https://github.com/TraceMachina/nativelink --tag v0.4.0 +``` + +### ⚙️ Configure and 🦾 Start NativeLink + +The `nativelink` executable reads a JSON file as it's only parameter, +`--config`. See +[`nativelink-config`](https://github.com/TraceMachina/nativelink/tree/main/nativelink-config/examples/basic_cas.json) +for more details and examples. + +To grab the example in your current working directory, run: + +```bash +curl -O https://raw.githubusercontent.com/TraceMachina/nativelink/main/nativelink-config/examples/basic_cas.json + +### you can modify the example above to replace the filesystem store with the memory store if you favor speed over data durability. +nativelink basic_cas.json +``` + +More information is available in the +[repo](https://github.com/Tracemachina/nativelink). + +## Relevant configs in .buckconfig + +Configure the `NativeLink` endpoint as follows: + +```ini +[buck2_re_client] +action_cache_address = grpc://localhost:50051 +engine_address = grpc://localhost:50051 +cas_address = grpc://localhost:50051 +tls = false +instance_name = main +``` + +TLS is not used in this example. + +## Relevant configs in `ExecutionPlatformInfo` + +`NativeLink` takes in a Docker image and `OSFamily` in its RE properties to +select a worker. This is configured in `root//platforms:platforms`. diff --git a/examples/remote_execution/nativelink/platforms/BUCK b/examples/remote_execution/nativelink/platforms/BUCK new file mode 100644 index 0000000000000..63f852afecbda --- /dev/null +++ b/examples/remote_execution/nativelink/platforms/BUCK @@ -0,0 +1,3 @@ +load(":defs.bzl", "platforms") + +platforms(name = "platforms") diff --git a/examples/remote_execution/nativelink/platforms/defs.bzl b/examples/remote_execution/nativelink/platforms/defs.bzl new file mode 100644 index 0000000000000..754201cbbeea8 --- /dev/null +++ b/examples/remote_execution/nativelink/platforms/defs.bzl @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _platforms(ctx): + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = configuration, + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = True, + use_limited_hybrid = True, + # Set those up based on what workers you've registered with NativeLink. + remote_execution_properties = { + "OSFamily": "linux", + "container-image": "docker://ghcr.io/catthehacker/ubuntu:act-22.04@sha256:5f9c35c25db1d51a8ddaae5c0ba8d3c163c5e9a4a6cc97acd409ac7eae239448", + }, + remote_execution_use_case = "buck2-default", + remote_output_paths = "output_paths", + ), + ) + + return [DefaultInfo(), ExecutionPlatformRegistrationInfo(platforms = [platform])] + +platforms = rule(attrs = {}, impl = _platforms) diff --git a/examples/remote_execution/nativelink/tests/BUCK b/examples/remote_execution/nativelink/tests/BUCK new file mode 100644 index 0000000000000..8200f35ccd441 --- /dev/null +++ b/examples/remote_execution/nativelink/tests/BUCK @@ -0,0 +1,3 @@ +load(":defs.bzl", "tests") + +tests(name = "tests") diff --git a/examples/remote_execution/nativelink/tests/defs.bzl b/examples/remote_execution/nativelink/tests/defs.bzl new file mode 100644 index 0000000000000..3533c234c001c --- /dev/null +++ b/examples/remote_execution/nativelink/tests/defs.bzl @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _tests(ctx): + # Create locally + stage0 = ctx.actions.declare_output("stage0") + ctx.actions.run( + ["sh", "-c", 'head -c 10 /dev/urandom > "$1"', "--", stage0.as_output()], + category = "stage0", + local_only = True, + ) + + # Use on RE + stage1 = ctx.actions.declare_output("stage1") + ctx.actions.run(["sh", "-c", 'cat "$1" "$1" > "$2"', "--", stage0, stage1.as_output()], category = "stage1") + + # Reuse on RE + stage2 = ctx.actions.declare_output("stage2") + ctx.actions.run(["sh", "-c", 'cat "$1" "$1" > "$2"', "--", stage1, stage2.as_output()], category = "stage2") + + # Reuse locally + stage3 = ctx.actions.declare_output("stage3") + ctx.actions.run( + ["sh", "-c", 'cat "$1" "$1" > "$2"', "--", stage2, stage3.as_output()], + category = "stage3", + local_only = True, + ) + + # Verify + stage4 = ctx.actions.declare_output("stage4") + ctx.actions.run( + [ + "sh", + "-c", + 'cat "$1" "$1" "$1" "$1" "$1" "$1" "$1" "$1" > "$3" && diff "$2" "$3"', + "--", + stage0, + stage3, + stage4.as_output(), + ], + category = "stage4", + ) + + return [DefaultInfo(stage4)] + +tests = rule(attrs = {}, impl = _tests) diff --git a/examples/toolchains/conan_toolchain/.buckconfig b/examples/toolchains/conan_toolchain/.buckconfig new file mode 100644 index 0000000000000..d1340351d85d6 --- /dev/null +++ b/examples/toolchains/conan_toolchain/.buckconfig @@ -0,0 +1,18 @@ +[cells] +self = . +prelude = prelude +toolchains = toolchains +none = none + +[cell_aliases] +config = prelude +buck = none +fbcode = none +fbcode_macros = none +fbsource = none + +[external_cells] + prelude = bundled + +[parser] +target_platform_detector_spec = target://...->prelude//platforms:default diff --git a/examples/toolchains/conan_toolchain/.buckroot b/examples/toolchains/conan_toolchain/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/toolchains/conan_toolchain/cpp/conan/BUCK b/examples/toolchains/conan_toolchain/cpp/conan/BUCK new file mode 100644 index 0000000000000..08aacddc8b78c --- /dev/null +++ b/examples/toolchains/conan_toolchain/cpp/conan/BUCK @@ -0,0 +1,54 @@ +load( + "@prelude//toolchains/conan:defs.bzl", + "conan_generate", + "conan_lock", + "conan_update", + "lock_generate", +) +load("//test_utils.bzl", "assert_output") + +conan_lock( + name = "lock", + conanfile = "conanfile.txt", + visibility = ["//cpp/conan/import:"], +) + +lock_generate( + name = "lock-generate", + lockfile = ":lock", +) + +# TODO[AH] Prevent double build of packages. +# This rule builds or fetches all transitive dependencies defined by the +# conanfile in order to generate the import targets. These packages will +# later be built or fetched again one-by-one again as their corresponding +# conan_package targets. Avoid this double building. Mark this target manual, +# or don't tie the generation to it's default output. +conan_generate( + name = "conan-generate", + conanfile = "conanfile.txt", + lockfile = ":lock", +) + +conan_update( + name = "update", + conan_generate = ":conan-generate", + conanfile = "conanfile.txt", + lock_generate = ":lock-generate", + lockfile = ":lock", + lockfile_name = "conan.lock", + targets_name = "import/BUCK", +) + +cxx_binary( + name = "main", + srcs = ["main.cpp"], + link_style = "static", + deps = ["//cpp/conan/import:zlib"], +) + +assert_output( + name = "check_main", + command = "$(exe_target :main)", + output = "395248644", +) diff --git a/examples/toolchains/conan_toolchain/cpp/conan/conanfile.txt b/examples/toolchains/conan_toolchain/cpp/conan/conanfile.txt new file mode 100644 index 0000000000000..25ce9313e7027 --- /dev/null +++ b/examples/toolchains/conan_toolchain/cpp/conan/conanfile.txt @@ -0,0 +1,2 @@ +[requires] +zlib/1.2.13 diff --git a/examples/toolchains/conan_toolchain/cpp/conan/import/BUCK b/examples/toolchains/conan_toolchain/cpp/conan/import/BUCK new file mode 100644 index 0000000000000..fbfc1f3b088b2 --- /dev/null +++ b/examples/toolchains/conan_toolchain/cpp/conan/import/BUCK @@ -0,0 +1,38 @@ +# @generated +# Update using `buck2 run self//cpp/conan:update` + +load( + "@prelude//toolchains/conan:defs.bzl", + "conan_component", + "conan_dep", + "conan_package", +) + +conan_package( + name = '_package_zlib', + lockfile = 'self//cpp/conan:lock', + reference = 'zlib/1.2.13', + package_id = 'c10a1a185befd155ccf9af892387d3946f445cf6', + deps = [], + build_deps = [], +) + +conan_dep( + name = 'zlib', + components = {'zlib': ':_component_zlib_zlib'}, + visibility = ['PUBLIC'], +) + +conan_component( + name = '_component_zlib_zlib', + defines = [], + cflags = [], + cppflags = [], + include_paths = ['include'], + libs = ['z'], + static_libs = {'z': ['lib/libz.a']}, + shared_libs = {}, + system_libs = [], + deps = [], + package = ':_package_zlib', +) diff --git a/examples/toolchains/conan_toolchain/cpp/conan/main.cpp b/examples/toolchains/conan_toolchain/cpp/conan/main.cpp new file mode 100644 index 0000000000000..879e29e1f56c4 --- /dev/null +++ b/examples/toolchains/conan_toolchain/cpp/conan/main.cpp @@ -0,0 +1,21 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include +#include +#include + +int main() { + uLong adler = adler32(0L, Z_NULL, 0); + + const char msg[] = "Hello Conan"; + adler = adler32(adler, reinterpret_cast(msg), strlen(msg)); + + std::cout << adler << std::endl; +} diff --git a/examples/toolchains/conan_toolchain/test_utils.bzl b/examples/toolchains/conan_toolchain/test_utils.bzl new file mode 100644 index 0000000000000..139272e38fc4d --- /dev/null +++ b/examples/toolchains/conan_toolchain/test_utils.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def assert_output(name, command, output): + return native.genrule( + name = name, + bash = command + " | grep \"" + output + "\" && touch \"$OUT\"", + cmd_exe = command + " | findstr \"" + output + "\" && type nul > \"$OUT\"", + out = "out.txt", + ) diff --git a/examples/toolchains/conan_toolchain/toolchains/BUCK b/examples/toolchains/conan_toolchain/toolchains/BUCK new file mode 100644 index 0000000000000..58aea38a67e62 --- /dev/null +++ b/examples/toolchains/conan_toolchain/toolchains/BUCK @@ -0,0 +1,70 @@ +load("@prelude//toolchains:demo.bzl", "system_demo_toolchains") +load("@prelude//toolchains/conan:defs.bzl", "conan_init", "conan_profile", "system_conan_toolchain") + +system_conan_toolchain( + name = "conan", + conan_path = "conan", + visibility = ["PUBLIC"], +) + +conan_profile( + name = "conan-profile-linux", + arch = "x86_64", + build_type = "Release", + compiler = "gcc", + compiler_libcxx = "libstdc++", + compiler_version = "11.3", + os = "Linux", +) + +conan_profile( + name = "conan-profile-macos-x86_64", + arch = "x86_64", + build_type = "Release", + compiler = "clang", + compiler_libcxx = "libc++", + compiler_version = "15", + os = "Macos", +) + +conan_profile( + name = "conan-profile-macos-arm64", + arch = "armv8", + build_type = "Release", + compiler = "clang", + compiler_libcxx = "libc++", + compiler_version = "15", + os = "Macos", +) + +conan_profile( + name = "conan-profile-windows", + arch = "x86_64", + build_type = "Release", + compiler = "gcc", + compiler_libcxx = "libstdc++", + compiler_version = "11.3", + os = "Windows", +) + +alias( + name = "conan-profile", + actual = select({ + "prelude//os:linux": ":conan-profile-linux", + "prelude//os:macos": select({ + "prelude//cpu:arm64": ":conan-profile-macos-arm64", + "prelude//cpu:x86_64": ":conan-profile-macos-x86_64", + }), + "prelude//os:windows": ":conan-profile-windows", + }), +) + +conan_init( + name = "conan-init", + profile = ":conan-profile", + visibility = ["PUBLIC"], +) + +# All the default toolchains, suitable for a quick demo or early prototyping. +# Most real projects should copy/paste the implementation to configure them. +system_demo_toolchains() diff --git a/examples/toolchains/cxx_zig_toolchain/.buckconfig b/examples/toolchains/cxx_zig_toolchain/.buckconfig index 2d4cffb711d5a..d1340351d85d6 100644 --- a/examples/toolchains/cxx_zig_toolchain/.buckconfig +++ b/examples/toolchains/cxx_zig_toolchain/.buckconfig @@ -1,15 +1,18 @@ -[repositories] +[cells] self = . prelude = prelude toolchains = toolchains none = none -[repository_aliases] +[cell_aliases] config = prelude buck = none fbcode = none fbcode_macros = none fbsource = none +[external_cells] + prelude = bundled + [parser] target_platform_detector_spec = target://...->prelude//platforms:default diff --git a/examples/toolchains/cxx_zig_toolchain/.buckroot b/examples/toolchains/cxx_zig_toolchain/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/toolchains/cxx_zig_toolchain/toolchains/.buckconfig b/examples/toolchains/cxx_zig_toolchain/toolchains/.buckconfig index 2b6746c61de68..8c348f4d3d90d 100644 --- a/examples/toolchains/cxx_zig_toolchain/toolchains/.buckconfig +++ b/examples/toolchains/cxx_zig_toolchain/toolchains/.buckconfig @@ -1,6 +1,3 @@ -[repositories] +[cells] toolchains = . prelude = ../prelude - -[buildfile] -name = BUILD diff --git a/examples/toolchains/cxx_zig_toolchain/toolchains/BUCK b/examples/toolchains/cxx_zig_toolchain/toolchains/BUCK index 1e8ae34452b88..44f99d1e68f7c 100644 --- a/examples/toolchains/cxx_zig_toolchain/toolchains/BUCK +++ b/examples/toolchains/cxx_zig_toolchain/toolchains/BUCK @@ -1,11 +1,9 @@ +load("@prelude//toolchains:python.bzl", "system_python_bootstrap_toolchain") load("@prelude//toolchains/cxx/zig:defs.bzl", "cxx_zig_toolchain", "download_zig_distribution") -# TODO Replace by prelude toolchain once available -load("@toolchains//python:defs.bzl", "system_python_bootstrap_toolchain") - download_zig_distribution( name = "zig", - version = "0.9.1", + version = "0.11.0", ) cxx_zig_toolchain( diff --git a/examples/toolchains/cxx_zig_toolchain/toolchains/README.md b/examples/toolchains/cxx_zig_toolchain/toolchains/README.md index 990a4e6ff7952..0280fabf27649 100644 --- a/examples/toolchains/cxx_zig_toolchain/toolchains/README.md +++ b/examples/toolchains/cxx_zig_toolchain/toolchains/README.md @@ -1,15 +1,17 @@ This example tests the `zig cc` based self-contained C/C++ toolchain. To build it within the open source tree of buck2 to you need to -* Create a symlink for the prelude + +- Create a symlink for the prelude ``` ln -s ../../../prelude prelude ``` -* Remove the top-level `.buckconfig` +- Remove the top-level `.buckconfig` ``` rm ../../../.buckconfig ``` -* Apply the following patch to the prelude +- Apply the following patch to the prelude + ``` diff --git a/prelude/cxx/tools/TARGETS.v2 b/prelude/cxx/tools/TARGETS.v2 index 2030d2f..5db1689 100644 diff --git a/examples/toolchains/cxx_zig_toolchain/toolchains/python/defs.bzl b/examples/toolchains/cxx_zig_toolchain/toolchains/python/defs.bzl deleted file mode 100644 index 2a02f927997d5..0000000000000 --- a/examples/toolchains/cxx_zig_toolchain/toolchains/python/defs.bzl +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -load( - "@prelude//python_bootstrap:python_bootstrap.bzl", - "PythonBootstrapToolchainInfo", -) - -def _system_python_bootstrap_toolchain(_ctx): - return [ - DefaultInfo(), - PythonBootstrapToolchainInfo( - interpreter = RunInfo(args = ["python3"]), - ), - ] - -system_python_bootstrap_toolchain = rule( - impl = _system_python_bootstrap_toolchain, - attrs = { - }, - is_toolchain_rule = True, -) diff --git a/examples/visual_studio/.buckconfig b/examples/visual_studio/.buckconfig new file mode 100644 index 0000000000000..a2b715d465306 --- /dev/null +++ b/examples/visual_studio/.buckconfig @@ -0,0 +1,28 @@ +[cells] +root = . +prelude = prelude +toolchains = toolchains +none = none + +[cell_aliases] +config = prelude +fbcode = none +fbsource = none +buck = none + +[external_cells] + prelude = bundled + + +[parser] +target_platform_detector_spec = target:root//...->root//buck2_utils/platforms:windows_debug + +[build] +execution_platforms = root//buck2_utils/platforms:default + +[buck2_re_client] +action_cache_address = grpc://localhost:8980 +engine_address = grpc://localhost:8980 +cas_address = grpc://localhost:8980 +tls = false +instance_name = fuse diff --git a/examples/visual_studio/.buckroot b/examples/visual_studio/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/visual_studio/.gitignore b/examples/visual_studio/.gitignore new file mode 100644 index 0000000000000..714e9028dc05f --- /dev/null +++ b/examples/visual_studio/.gitignore @@ -0,0 +1,4 @@ +/buck-out +/compile_commands.json +/install +/.cache diff --git a/examples/visual_studio/.vscode/extensions.json b/examples/visual_studio/.vscode/extensions.json new file mode 100644 index 0000000000000..b8158f146e2e5 --- /dev/null +++ b/examples/visual_studio/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["llvm-vs-code-extensions.vscode-clangd", "vadimcn.vscode-lldb"] +} diff --git a/examples/visual_studio/.vscode/launch.json b/examples/visual_studio/.vscode/launch.json new file mode 100644 index 0000000000000..e7ea8093a2fb3 --- /dev/null +++ b/examples/visual_studio/.vscode/launch.json @@ -0,0 +1,13 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug main.exe", + "preLaunchTask": "Build and create compilation database", + "program": "${workspaceFolder}/install/main.exe", + "cwd": "${workspaceFolder}/install" + } + ] +} \ No newline at end of file diff --git a/examples/visual_studio/.vscode/settings.json b/examples/visual_studio/.vscode/settings.json new file mode 100644 index 0000000000000..3868e33919f46 --- /dev/null +++ b/examples/visual_studio/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "C_Cpp.intelliSenseEngine": "disabled", + "search.followSymlinks": false +} \ No newline at end of file diff --git a/examples/visual_studio/.vscode/tasks.json b/examples/visual_studio/.vscode/tasks.json new file mode 100644 index 0000000000000..37e2fd4d8b7e7 --- /dev/null +++ b/examples/visual_studio/.vscode/tasks.json @@ -0,0 +1,55 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "Build Main", + "type": "shell", + "command": "buck2", + "args": [ + "build", + "--console", + "simple", + "--no-interactive-console", + "--fail-fast", + "--show-full-output", + ":main", + "|", + "powershell", + "-ExecutionPolicy", + "Bypass", + "-File", + "buck2_utils/install.ps1", + "install" + ] + }, + { + "label": "Create compilation database", + "type": "shell", + "command": "buck2", + "args": [ + "bxl", + "buck2_utils/create_compile_commands.bxl:gen_compilation_database", + "--", + "--directory", + "${workspaceFolder}", + "|", + "powershell", + "-ExecutionPolicy", + "Bypass", + "-File", + "buck2_utils/copy.ps1" + ] + }, + { + "label": "Build and create compilation database", + "dependsOn": [ + "Create compilation database", + "Build Main" + ], + "group": { + "kind": "build", + "isDefault": true + } + } + ] +} \ No newline at end of file diff --git a/examples/visual_studio/BUCK b/examples/visual_studio/BUCK new file mode 100644 index 0000000000000..96db3aca90ae6 --- /dev/null +++ b/examples/visual_studio/BUCK @@ -0,0 +1,15 @@ +cxx_binary( + name = "main", + srcs = ["main.c"], + link_style = "shared", + resources = [":print[shared]"], + deps = [":print"], +) + +cxx_library( + name = "print", + srcs = ["library.cpp"], + exported_headers = glob(["**/*.hpp"]), + preprocessor_flags = ["-DLIBRARY_EXPORT"], + visibility = ["PUBLIC"], +) diff --git a/examples/visual_studio/README.md b/examples/visual_studio/README.md new file mode 100644 index 0000000000000..d4251f9afccd5 --- /dev/null +++ b/examples/visual_studio/README.md @@ -0,0 +1,11 @@ +## An example showing how Buck2 can be used in Visual Studio Code on Windows + +After completing the setup, below, click F5 to run with the debugger attached. +You can also use Ctrl + Shift + B to build. By default it compiles with the +"debug" configuration. To compile in release pass "-c release" to Buck2's build +command. + +## Setup + +Run `buck2 init --git`. Open this folder in Visual Studio Code and install the +recommended extensions. diff --git a/examples/visual_studio/buck2_utils/configuration/BUCK b/examples/visual_studio/buck2_utils/configuration/BUCK new file mode 100644 index 0000000000000..8c4dcb243e5a8 --- /dev/null +++ b/examples/visual_studio/buck2_utils/configuration/BUCK @@ -0,0 +1,16 @@ +constraint_setting( + name = "configuration", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "debug", + constraint_setting = ":configuration", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "release", + constraint_setting = ":configuration", + visibility = ["PUBLIC"], +) diff --git a/examples/visual_studio/buck2_utils/copy.ps1 b/examples/visual_studio/buck2_utils/copy.ps1 new file mode 100644 index 0000000000000..3045cb30837b8 --- /dev/null +++ b/examples/visual_studio/buck2_utils/copy.ps1 @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +param( + [parameter(Mandatory=$true)] [String] $FilePath +) + +New-Item -ItemType HardLink -Force -Path compile_commands.json -Target $FilePath.Trim() | Out-Null diff --git a/examples/visual_studio/buck2_utils/create_compile_commands.bxl b/examples/visual_studio/buck2_utils/create_compile_commands.bxl new file mode 100644 index 0000000000000..ba3e7cd9318c5 --- /dev/null +++ b/examples/visual_studio/buck2_utils/create_compile_commands.bxl @@ -0,0 +1,140 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") + +def group_by_category(actions): + groups = {} + for a in actions: + category = a.attrs.category.value() + if category in groups: + groups[category].append(a) + else: + groups[category] = [a] + return groups + +def create_compilation_database_entry(directory, buildfile_folder, argsfiles, action): + # the "cmd" attribute looks like a list. For example, it might be something like: + # cmd = [cl.exe, -c, foo.cpp] + # but in reality this is quite literally the string "[cl.exe, -c, foo.cpp]". We need + # it as an actual list, so we have to employ this unfortunate hack of stripping the brackets, + # splitting on comma operator, and then stripping spaces from each entry to make an actual + # list. Hopefully this doesn't fail in any weird edge cases, but it seems to be the best we + # can do until buck2 upstream actually stores a Starlark list as the value of this attribute. + cmd = action.attrs.cmd.value() + cmd = cmd.strip("[]").split(",") + cmd = list(map(lambda x: x.strip(), cmd)) + + compiler = cmd[0].replace(".bat", ".exe") + + # Find the argsfile. On Windows it might contain backslashes, so convert those to forward + # slashes. Hopefully we don't have any filenames with actual backslashes in them. Then + # strip off the directory name so we just have the filename of the argsfile. + argsfile = filter(lambda x: x.startswith("@"), cmd)[0][1:] + identifier = paths.basename(argsfile.replace("\\", "/")) + + # Get the path of the source file relative to the project root by appending the identifier + # to the directory of the BUCK file. + project_rel_path = paths.join(buildfile_folder, action.attrs.identifier.value()) + + # Using the previously computed list of argfiles referenced by this target, find the one + # that this particular action references. + arguments = [] + if identifier in argsfiles: + arguments = argsfiles[identifier] + + # Now build the compilation database entry + entry = { + "arguments": [compiler] + arguments, + "directory": directory, + "file": project_rel_path, + } + return entry + +def gen_compilation_database_impl(ctx): + # Generate compilation database for all targets unless user requests a more narrow set + target_filter = ctx.cli_args.targets or "..." + targets = ctx.configured_targets(target_filter, target_platform = ctx.cli_args.platform) + aquery = ctx.aquery() + + entries = {} + for target in targets: + target_actions = list(aquery.all_actions(target.label)) + + # The individual compilation actions only identify the source files relative to where + # the BUCK file is. So to build a path relative to the root of the project, we need + # to get the BUCK file path and append the source file path to it. + buildfile_path = ctx.fs.project_rel_path(target.buildfile_path) + buildfile_folder = paths.dirname(buildfile_path) + + # In order to generate a compilation database entry for a file, we need this target + # to satisfy two conditions: + # 1. It has compilations + # 2. It writes an argsfile. + # + # The second requirement is necessary because the argsfile is where buck2 includes + # important command line arguments such as preprocessor definitions and include + # paths. If both conditions are not satisfied, we should skip this target because + # either it doesn't have source files anyway, or we don't understand how to write + # compilation database entries for them. + actions_by_category = group_by_category(target_actions) + + if not "write" in actions_by_category: + continue + if not "cxx_compile" in actions_by_category and not "c_compile" in actions_by_category: + continue + + argsfiles = {} + + # There are lots of kinds of "write" actions, but we are interested specifically in argsfile + # write actions. This is because the source file compilation actions will reference them, + # so for each source file we need to map it back to the argsfile that its command line references + # so we can write those arguments to the compilation database entry. Build this mapping here. + for write_action in actions_by_category["write"]: + identifier = write_action.attrs.identifier.value() + if identifier.endswith(".argsfile"): + # The content of the argsfile is a newline separated list. We need a Starlark list. So + # split on newline. + arguments = write_action.attrs.contents.value() + arguments = arguments.split("\n") + + # For whatever reason, there are a lot of unnecessary quotes and double slashes. Probably + # something to do with the nature of the tools requiring shell-escaped values. Since we're + # storing this in a JSON array and the tool that processes the compilation database does its + # own escaping of these values, we can undo all of this. Convert double backslash to + # single forward slash, and remove quotes at the beginning and end of entries. + arguments = list(map(lambda x: x.strip('"').replace("\\\\", "/"), arguments)) + argsfiles[identifier] = arguments + + # Now walk each compilation action and generate a compilation database entry for it. + for action in actions_by_category.get("cxx_compile", []) + actions_by_category.get("c_compile", []): + entry = create_compilation_database_entry(ctx.cli_args.directory, buildfile_folder, argsfiles, action) + is_pic = entry["file"].endswith(" (pic)") + file_name = entry["file"].removesuffix(" (pic)") + entry["file"] = file_name + entry["is_pic"] = is_pic + existing_entry = entries.get(file_name) + if existing_entry == None or existing_entry["is_pic"]: + entries[file_name] = entry + + for entry in entries.values(): + entry.pop("is_pic") + + actions = ctx.bxl_actions(target_platform = ctx.cli_args.platform).actions + db_artifact = actions.write_json("compile_commands.json", entries.values()) + db_artifact_ensured = ctx.output.ensure(db_artifact) + + ctx.output.print(db_artifact_ensured) + +gen_compilation_database = bxl_main( + impl = gen_compilation_database_impl, + cli_args = { + "directory": cli_args.string(), + "platform": cli_args.option(cli_args.target_label()), + "targets": cli_args.option(cli_args.target_expr()), + }, +) diff --git a/examples/visual_studio/buck2_utils/install.ps1 b/examples/visual_studio/buck2_utils/install.ps1 new file mode 100644 index 0000000000000..dc45e1cf2cff9 --- /dev/null +++ b/examples/visual_studio/buck2_utils/install.ps1 @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +param( + [parameter(Mandatory=$true)] [String] $InstallDirectory, + [parameter(Mandatory=$true,ValueFromPipeline=$true)] [String] $Buck2Output +) + +begin +{ + $ErrorActionPreference = "Stop" + + function Copy-Directory + { + param( + [parameter(Mandatory=$true)] [String] $SourcePath, + [parameter(Mandatory=$true)] [String] $TargetPath + ) + robocopy /MIR "$SourcePath" "$TargetPath" | Out-Null + } + + function Copy-File + { + param( + [parameter(Mandatory=$true)] [String] $SourcePath, + [parameter(Mandatory=$true)] [String] $TargetPath + ) + New-Item -ItemType File -Path $TargetPath -Force | Out-Null + Copy-Item $SourcePath -Destination $TargetPath | Out-Null + } +} +process +{ + $OutputSplit = $input.Split(" ") + $TargetPath = $OutputSplit[0].Trim() + $ExecutablePath = $OutputSplit[1].Trim() + $ResourcesPath = $ExecutablePath + ".resources.json" + + $CopySource = $ExecutablePath + $CopyTarget = Join-Path -Path $InstallDirectory -ChildPath (Split-Path -Leaf $ExecutablePath) + Copy-File $CopySource $CopyTarget + + try + { + $ResourcesContent = Get-Content $ResourcesPath -ErrorAction Stop | ConvertFrom-Json + $HasResources = $true + } + catch [System.Management.Automation.ItemNotFoundException] + { + $ResourcesContent = @() + $HasResources = $false + } + if($HasResources) + { + foreach($ResourceProperty in $ResourcesContent.PSObject.Properties) + { + $ResourceTargetPath = $ResourceProperty.Name + $ResourceSourcePath = $ResourceProperty.Value + $CopySource = Join-Path -Path (Split-Path -Parent $ExecutablePath) -ChildPath $ResourceSourcePath + $CopyTarget = Join-Path -Path $InstallDirectory -ChildPath $ResourceTargetPath + + $GetItemResult = Get-Item $CopySource + if($GetItemResult.PSIsContainer) + { + Copy-Directory $CopySource $CopyTarget + } + else + { + Copy-File $CopySource $CopyTarget + } + } + } +} diff --git a/examples/visual_studio/buck2_utils/platforms/BUCK b/examples/visual_studio/buck2_utils/platforms/BUCK new file mode 100644 index 0000000000000..934ffbdb5ef42 --- /dev/null +++ b/examples/visual_studio/buck2_utils/platforms/BUCK @@ -0,0 +1,40 @@ +load(":defs.bzl", "execution_platforms") + +oncall("build_infra") + +execution_platforms( + name = "default", + visibility = ["PUBLIC"], +) + +platform( + name = "windows_debug", + constraint_values = [ + "config//os/constraints:windows", + "root//buck2_utils/configuration:debug", + ], +) + +platform( + name = "windows_release", + constraint_values = [ + "config//os/constraints:windows", + "root//buck2_utils/configuration:release", + ], +) + +platform( + name = "linux_debug", + constraint_values = [ + "config//os/constraints:linux", + "root//buck2_utils/configuration:debug", + ], +) + +platform( + name = "linux_release", + constraint_values = [ + "config//os/constraints:linux", + "root//buck2_utils/configuration:release", + ], +) diff --git a/examples/visual_studio/buck2_utils/platforms/defs.bzl b/examples/visual_studio/buck2_utils/platforms/defs.bzl new file mode 100644 index 0000000000000..930456ba54809 --- /dev/null +++ b/examples/visual_studio/buck2_utils/platforms/defs.bzl @@ -0,0 +1,68 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def is_remote_enabled() -> bool: + re_enabled = read_config("buck2_re_client", "enabled", "false") + return re_enabled == "true" + +def _execution_platforms_impl(ctx: AnalysisContext) -> list[Provider]: + is_re_enabled = is_remote_enabled() + + name = ctx.label.raw_target() + + constraints_windows = dict() + constraints_windows.update(ctx.attrs.os_configuration_windows[ConfigurationInfo].constraints) + constraints_linux = dict() + constraints_linux.update(ctx.attrs.os_configuration_linux[ConfigurationInfo].constraints) + + platforms_details = [ + ("windows", host_info().os.is_windows, constraints_windows), + ("linux", host_info().os.is_linux, constraints_linux), + ] + + platforms = [] + for platform_name, is_local_enabled, constraints in platforms_details: + if is_re_enabled or is_local_enabled: + platforms.append( + ExecutionPlatformInfo( + label = name, + configuration = ConfigurationInfo( + constraints = constraints, + values = {}, + ), + executor_config = CommandExecutorConfig( + local_enabled = is_local_enabled, + remote_enabled = is_re_enabled, + use_limited_hybrid = True, + remote_execution_properties = { + "OSFamily": platform_name, + "container-image": "docker://" + platform_name + "_build", + }, + remote_execution_use_case = "buck2-default", + use_windows_path_separators = platform_name == "windows", + ), + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = platforms), + ] + +execution_platforms = rule( + impl = _execution_platforms_impl, + attrs = { + "os_configuration_linux": attrs.dep( + providers = [ConfigurationInfo], + default = "config//os:linux", + ), + "os_configuration_windows": attrs.dep( + providers = [ConfigurationInfo], + default = "config//os:windows", + ), + }, +) diff --git a/examples/visual_studio/library.cpp b/examples/visual_studio/library.cpp new file mode 100644 index 0000000000000..c833fce36ea7e --- /dev/null +++ b/examples/visual_studio/library.cpp @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include "library.hpp" + +#include + +void print_hello() { + std::cout << "Hello from a C++ Buck2 program!" << std::endl; +} diff --git a/examples/visual_studio/library.hpp b/examples/visual_studio/library.hpp new file mode 100644 index 0000000000000..9850d4ee6dd22 --- /dev/null +++ b/examples/visual_studio/library.hpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#ifdef _WIN32 +#ifdef LIBRARY_EXPORT +#define DLL_API __declspec(dllexport) +#else +#define DLL_API __declspec(dllimport) +#endif +#else +#define DLL_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif +DLL_API void print_hello(); +#ifdef __cplusplus +} +#endif diff --git a/examples/visual_studio/main.c b/examples/visual_studio/main.c new file mode 100644 index 0000000000000..05915f3aa13cd --- /dev/null +++ b/examples/visual_studio/main.c @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include "library.hpp" + +int main() { + print_hello(); +} diff --git a/examples/visual_studio/toolchains/BUCK b/examples/visual_studio/toolchains/BUCK new file mode 100644 index 0000000000000..2b1937d2730d5 --- /dev/null +++ b/examples/visual_studio/toolchains/BUCK @@ -0,0 +1,136 @@ +load("@prelude//toolchains:cxx.bzl", "cxx_tools_info_toolchain") +load("@prelude//toolchains:python.bzl", "system_python_bootstrap_toolchain") +load("@prelude//toolchains/cxx/clang:tools.bzl", "path_clang_tools") +load("@prelude//toolchains/msvc:tools.bzl", "find_msvc_tools") +load("@root//buck2_utils/platforms:defs.bzl", "is_remote_enabled") + +oncall("build_infra") + +find_msvc_tools( + name = "msvc_tools", + target_compatible_with = ["config//os:windows"], + use_path_compilers = is_remote_enabled(), + use_path_linkers = is_remote_enabled() and not host_info().os.is_windows, + visibility = ["PUBLIC"], +) + +path_clang_tools( + name = "clang_tools", + target_compatible_with = ["config//os:linux"], + visibility = ["PUBLIC"], +) + +cxx_tools_info_toolchain( + name = "cxx", + #The flags in the below attributes' Windows config are copied from Visual Studio's project "Console App", with some changes listed in each attribute + c_flags = select({ + "config//os:linux": [], + #Copied from the C++ flags + "config//os:windows": [ + "/c", + "/Z7", + "/nologo", + "/W3", + "/WX-", + "/diagnostics:column", + "/sdl", + "/D_CONSOLE", + "/D_UNICODE", + "/DUNICODE", + "/EHsc", + "/Zc:forScope", + "/Zc:inline", + "/permissive-", + "/TC", + ] + select({ + "root//buck2_utils/configuration:debug": [ + "/Od", + "/D_DEBUG", + "/RTC1", + "/MDd", + ], + "root//buck2_utils/configuration:release": [ + "/O2", + "/Oi", + "/GL", + "/DNDEBUG", + "/MD", + "/Gy", + ], + }), + }), + cxx_flags = select({ + "config//os:linux": [], + #Added C++ 20 version flag + #Removed flags that are already the default: /GS, /fp:precise /Gd /Zc:wchar_t /Gm- + #Removed deprecated /errorReport:prompt + #Converted /ZI to /Z7 because I'm guessing /ZI won't play very well with remote execution + #Removed /FC because it probably doesn't work with remote execution + #Removed /external:W3 because it's redundant + #Removed /JMC because it doesn't work with lldb + "config//os:windows": [ + "/std:c++20", + "/c", + "/Z7", + "/nologo", + "/W3", + "/WX-", + "/diagnostics:column", + "/sdl", + "/D_CONSOLE", + "/D_UNICODE", + "/DUNICODE", + "/EHsc", + "/Zc:forScope", + "/Zc:inline", + "/permissive-", + "/TP", + ] + select({ + "root//buck2_utils/configuration:debug": [ + "/Od", + "/D_DEBUG", + "/RTC1", + "/MDd", + ], + "root//buck2_utils/configuration:release": [ + "/O2", + "/Oi", + "/GL", + "/DNDEBUG", + "/MD", + "/Gy", + ], + }), + }), + cxx_tools_info = select({ + "config//os:linux": ":clang_tools", + "config//os:windows": ":msvc_tools", + }), + link_flags = select({ + "config//os:linux": [], + #Removed flags that are already the default: /TLBID:1 /DYNAMICBASE /NXCOMPAT /ERRORREPORT:PROMPT /MANIFEST /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /SUBSYSTEM:CONSOLE + #Removed /manifest:embed because I don't know what it does + #Removed default extra libs that are unnecessary most of the time: kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib + #Added /INCREMENTAL:NO to force non-incremental mode + "config//os:windows": [ + "/NOLOGO", + "/DEBUG", + "/INCREMENTAL:NO", + ] + select({ + "root//buck2_utils/configuration:debug": [], + #Converted /LTCG:incremental to /LTCG because I'm guessing incremental actions won't work well with Buck2 + "root//buck2_utils/configuration:release": [ + "/OPT:REF", + "/OPT:ICF", + "/LTCG", + ], + }), + }), + link_style = "static", + visibility = ["PUBLIC"], +) + +system_python_bootstrap_toolchain( + name = "python_bootstrap", + visibility = ["PUBLIC"], +) diff --git a/examples/with_prelude/.buckconfig b/examples/with_prelude/.buckconfig index b80bc62d8e5ed..49063dec2535a 100644 --- a/examples/with_prelude/.buckconfig +++ b/examples/with_prelude/.buckconfig @@ -1,14 +1,17 @@ -[repositories] +[cells] root = . prelude = prelude toolchains = toolchains none = none -[repository_aliases] +[cell_aliases] config = prelude fbcode = none fbsource = none buck = none +[external_cells] + prelude = bundled + [parser] target_platform_detector_spec = target:root//...->prelude//platforms:default diff --git a/examples/with_prelude/README.md b/examples/with_prelude/README.md index 3eb4951dcaa6b..801800359ee67 100644 --- a/examples/with_prelude/README.md +++ b/examples/with_prelude/README.md @@ -17,7 +17,11 @@ Now all targets aside from OCaml related ones are ready to be built. The information in this section is (at this time) Linux and macOS specific. -The commands in `setup.sh` assume an activated [opam](https://opam.ocaml.org/) installation. Their effect is to create symlinks in the 'third-party/opam' directory. These symlinks support building the example OCaml targets. If any of the symlinks are found to already exist, they will not be overwritten. +The commands in `ocaml-setup.sh` assume an activated +[opam](https://opam.ocaml.org/) installation. Their effect is to create a +symlink in the 'third-party/opam' directory. This symlink supports building the +example OCaml targets. If the symlink is found to already exist, it will not be +overwritten. ## Sample commands diff --git a/examples/with_prelude/go/hello/BUCK b/examples/with_prelude/go/hello/BUCK new file mode 100644 index 0000000000000..3e50514847edc --- /dev/null +++ b/examples/with_prelude/go/hello/BUCK @@ -0,0 +1,10 @@ +_SUPPORTED = not host_info().os.is_windows + +# buildifier: disable=no-effect +go_binary( + name = "hello", + srcs = glob(["*.go"]), + deps = [ + "//go/hello/greeting:greeting", + ], +) if _SUPPORTED else None diff --git a/examples/with_prelude/go/hello/greeting/BUCK b/examples/with_prelude/go/hello/greeting/BUCK new file mode 100644 index 0000000000000..569a0beddaf99 --- /dev/null +++ b/examples/with_prelude/go/hello/greeting/BUCK @@ -0,0 +1,14 @@ +_SUPPORTED = not host_info().os.is_windows + +# buildifier: disable=no-effect +go_library( + name = "greeting", + srcs = glob(["*.go"]), + visibility = ["PUBLIC"], +) if _SUPPORTED else None + +# buildifier: disable=no-effect +go_test( + name = "greeting_test", + srcs = glob(["*.go"]), +) if _SUPPORTED else None diff --git a/examples/with_prelude/go/hello/greeting/greeting.go b/examples/with_prelude/go/hello/greeting/greeting.go new file mode 100644 index 0000000000000..0ca84dd23064e --- /dev/null +++ b/examples/with_prelude/go/hello/greeting/greeting.go @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +package greeting + +// Greeting returns a greeting message +func Greeting() string { + return "Hello, world!" +} diff --git a/examples/with_prelude/go/hello/greeting/greeting_test.go b/examples/with_prelude/go/hello/greeting/greeting_test.go new file mode 100644 index 0000000000000..be1fa5343d6f1 --- /dev/null +++ b/examples/with_prelude/go/hello/greeting/greeting_test.go @@ -0,0 +1,18 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +package greeting + +import "testing" + +func TestHello(t *testing.T) { + if Greeting() != "Hello, world!" { + t.Errorf("Greeting() = %v, want \"Hello, world!\"", Greeting()) + } +} diff --git a/examples/with_prelude/go/hello/main.go b/examples/with_prelude/go/hello/main.go new file mode 100644 index 0000000000000..454103ee5e361 --- /dev/null +++ b/examples/with_prelude/go/hello/main.go @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +package main + +import ( + "fmt" + + "go/hello/greeting" +) + +func main() { + fmt.Println(greeting.Greeting()) +} diff --git a/examples/with_prelude/haskell-setup.sh b/examples/with_prelude/haskell-setup.sh new file mode 100755 index 0000000000000..4b711a686e489 --- /dev/null +++ b/examples/with_prelude/haskell-setup.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +set -euo pipefail + +# Check for ghc-9.10.1. +if [ ! -e "$HOME/.ghcup/ghc/9.10.1/lib/ghc-9.10.1" ]; then + echo "$HOME/.ghcup/ghc/9.10.1/lib/ghc-9.10.1 does not exist. First \"ghcup install ghc 9.10.1\" and then try running $0 again." + exit 3 +fi + +# Link 'third-party/haskell/ghc'. +if [ ! -L third-party/haskell/ghc ]; then + (cd third-party/haskell && ln -s "$HOME/.ghcup/ghc/9.10.1/lib/ghc-9.10.1" ghc) +else + echo "Link 'third-party/haskell/ghc' exists. To overwrite it, first remove it and run $0 again" + exit 2 +fi diff --git a/examples/with_prelude/haskell/BUCK b/examples/with_prelude/haskell/BUCK new file mode 100644 index 0000000000000..eecf6c5e17f42 --- /dev/null +++ b/examples/with_prelude/haskell/BUCK @@ -0,0 +1,25 @@ +load("//test_utils.bzl", "assert_output", "haskell_binary", "haskell_library") + +_SUPPORTED = host_info().os.is_macos or host_info().os.is_linux + +# buildifier: disable=no-effect +haskell_library( + name = "library", + srcs = ["Library.hs"], +) if _SUPPORTED else None + +# buildifier: disable=no-effect +haskell_binary( + name = "main", + srcs = ["Main.hs"], + deps = [ + ":library", + ], +) if _SUPPORTED else None + +# buildifier: disable=no-effect +assert_output( + name = "hello-world-check", + command = "$(exe_target :main)", + output = "Hello World!", +) if _SUPPORTED else None diff --git a/examples/with_prelude/haskell/Library.hs b/examples/with_prelude/haskell/Library.hs new file mode 100644 index 0000000000000..dec76b1eba470 --- /dev/null +++ b/examples/with_prelude/haskell/Library.hs @@ -0,0 +1,13 @@ +{- + Copyright (c) Meta Platforms, Inc. and affiliates. + + This source code is licensed under both the MIT license found in the + LICENSE-MIT file in the root directory of this source tree and the Apache + License, Version 2.0 found in the LICENSE-APACHE file in the root directory + of this source tree. +-} + +module Library(helloWorld) where + +helloWorld :: String +helloWorld = "Hello World!" diff --git a/examples/with_prelude/haskell/Main.hs b/examples/with_prelude/haskell/Main.hs new file mode 100644 index 0000000000000..4f6c4a9a6aa69 --- /dev/null +++ b/examples/with_prelude/haskell/Main.hs @@ -0,0 +1,15 @@ +{- + Copyright (c) Meta Platforms, Inc. and affiliates. + + This source code is licensed under both the MIT license found in the + LICENSE-MIT file in the root directory of this source tree and the Apache + License, Version 2.0 found in the LICENSE-APACHE file in the root directory + of this source tree. +-} + +module Main(main) where + +import Library qualified + +main :: IO () +main = putStrLn Library.helloWorld diff --git a/examples/with_prelude/ocaml-setup.sh b/examples/with_prelude/ocaml-setup.sh index 57c0658c36ade..d81858fcec44a 100755 --- a/examples/with_prelude/ocaml-setup.sh +++ b/examples/with_prelude/ocaml-setup.sh @@ -27,21 +27,6 @@ if [ -z "$OPAM_SWITCH_PREFIX" ]; then fi set -u -# Check for ocamlopt.opt. -if ! command -v ocamlopt.opt &> /dev/null -then - echo "Failed to run 'ocamlopt.opt'." - exit 1 -fi - -# Link 'third-party/ocaml/standard_library'. -if [ ! -L third-party/ocaml/standard_library ]; then - (cd third-party/ocaml && ln -s "$(ocamlopt.opt -config | grep standard_library: | awk '{ print $2 }' )" standard_library) -else - echo "Link 'third-party/ocaml/standard_library' exists. To overwrite it, first remove it and run $0 again" - exit 2 -fi - # Link 'third-party/ocaml/opam'. if [ ! -L third-party/ocaml/opam ]; then (cd third-party/ocaml && ln -s "$OPAM_SWITCH_PREFIX" opam) diff --git a/examples/with_prelude/ocaml/calc/BUCK b/examples/with_prelude/ocaml/calc/BUCK index 1641dd9e40710..8b3a310c261c8 100644 --- a/examples/with_prelude/ocaml/calc/BUCK +++ b/examples/with_prelude/ocaml/calc/BUCK @@ -1,3 +1,5 @@ +_SUPPORTED = not host_info().os.is_windows + # buildifier: disable=no-effect ocaml_binary( name = "calc", @@ -6,4 +8,4 @@ ocaml_binary( "lexer.mll", "parser.mly", ], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None diff --git a/examples/with_prelude/ocaml/embed/BUCK b/examples/with_prelude/ocaml/embed/BUCK index 113e594918ed9..cb2262fa2d493 100644 --- a/examples/with_prelude/ocaml/embed/BUCK +++ b/examples/with_prelude/ocaml/embed/BUCK @@ -1,10 +1,12 @@ load("//test_utils.bzl", "assert_output") +_SUPPORTED = not host_info().os.is_windows + # buildifier: disable=no-effect ocaml_object( name = "fib-ml", srcs = ["fib.ml"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect cxx_binary( @@ -14,7 +16,7 @@ cxx_binary( ":fib-ml", "//third-party/ocaml:ocaml-dev", ], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect rust_binary( @@ -23,18 +25,18 @@ rust_binary( crate_root = "fib.rs", link_style = "static", deps = [":fib-ml"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect assert_output( name = "check-fib-cpp", command = "$(exe_target :fib-cpp)", output = "fib(10) = Result is: 89", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect assert_output( name = "check-fib-rs", command = "$(exe_target :fib-rs)", output = "fib(10) = Result is: 89", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None diff --git a/examples/with_prelude/ocaml/extend/BUCK b/examples/with_prelude/ocaml/extend/BUCK index 99f2e1e0cce6b..c13643d3fbe69 100644 --- a/examples/with_prelude/ocaml/extend/BUCK +++ b/examples/with_prelude/ocaml/extend/BUCK @@ -1,5 +1,7 @@ load("//test_utils.bzl", "assert_output") +_SUPPORTED = not host_info().os.is_windows + # buildifier: disable=no-effect ocaml_binary( name = "hello-c", @@ -7,7 +9,7 @@ ocaml_binary( "hello.ml", ], deps = [":hello-stubs-c"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect cxx_library( @@ -16,7 +18,7 @@ cxx_library( "hello_stubs.c", ], deps = ["//third-party/ocaml:ocaml-dev"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_binary( @@ -25,7 +27,7 @@ ocaml_binary( "hello.ml", ], deps = [":hello-stubs-rs"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect rust_library( @@ -34,18 +36,18 @@ rust_library( "hello_stubs.rs", ], crate_root = "hello_stubs.rs", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect assert_output( name = "check-hello-c", command = "$(exe_target :hello-c)", output = "Hello C", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect assert_output( name = "check-hello-rs", command = "$(exe_target :hello-rs)", output = "Hello Rust", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None diff --git a/examples/with_prelude/ocaml/hello_world/BUCK b/examples/with_prelude/ocaml/hello_world/BUCK index 63fd74496fc94..46e2e71801226 100644 --- a/examples/with_prelude/ocaml/hello_world/BUCK +++ b/examples/with_prelude/ocaml/hello_world/BUCK @@ -1,22 +1,24 @@ load("//test_utils.bzl", "assert_output") +_SUPPORTED = not host_info().os.is_windows + # buildifier: disable=no-effect ocaml_binary( name = "hello-world", srcs = ["hello_world.ml"], deps = [":hello-world-lib"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_library( name = "hello-world-lib", srcs = ["hello_world_lib.ml"], visibility = ["PUBLIC"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect assert_output( name = "hello-world-check", command = "$(exe_target :hello-world)", output = "Hello world!", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None diff --git a/examples/with_prelude/ocaml/ppx/BUCK b/examples/with_prelude/ocaml/ppx/BUCK index 000b749610e4c..64a021e9035d3 100644 --- a/examples/with_prelude/ocaml/ppx/BUCK +++ b/examples/with_prelude/ocaml/ppx/BUCK @@ -1,5 +1,7 @@ load("//test_utils.bzl", "assert_output") +_SUPPORTED = not host_info().os.is_windows + # buildifier: disable=no-effect ocaml_library( name = "ppx-record-selectors", @@ -7,7 +9,7 @@ ocaml_library( deps = [ "//third-party/ocaml:ppxlib", ], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_binary( @@ -19,7 +21,7 @@ ocaml_binary( deps = [ ":ppx-record-selectors", ], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_binary( @@ -29,7 +31,7 @@ ocaml_binary( "-ppx", "$(exe_target :ppx) --as-ppx", ], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # [Note: Use the 'expand' sub-target to see the effects of # preprocessor expansion] @@ -53,4 +55,4 @@ assert_output( name = "ppx-record-selectors-test-check", command = "$(exe_target :ppx-record-selectors-test)", output = "4 quux", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None diff --git a/examples/with_prelude/ocaml/wrap/BUCK b/examples/with_prelude/ocaml/wrap/BUCK index 33eb0626429f8..b04902388d874 100644 --- a/examples/with_prelude/ocaml/wrap/BUCK +++ b/examples/with_prelude/ocaml/wrap/BUCK @@ -1,5 +1,7 @@ load("//test_utils.bzl", "assert_output") +_SUPPORTED = not host_info().os.is_windows + export_file( name = "mylib.mli", src = "mylib.mli", @@ -22,7 +24,7 @@ ocaml_library( "-49", ], visibility = [":mylib"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_library( @@ -46,7 +48,7 @@ ocaml_library( ], visibility = ["PUBLIC"], deps = [":mylib__"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_binary( @@ -54,11 +56,11 @@ ocaml_binary( srcs = ["test_Mylib.ml"], visibility = ["PUBLIC"], deps = [":mylib"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect assert_output( name = "test-Mylib-check", command = "$(exe_target :test-Mylib)", output = "Hello world!", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None diff --git a/examples/with_prelude/ocaml/wrap/with-masking/BUCK b/examples/with_prelude/ocaml/wrap/with-masking/BUCK index f9f71aa9e3899..ef252822d453b 100644 --- a/examples/with_prelude/ocaml/wrap/with-masking/BUCK +++ b/examples/with_prelude/ocaml/wrap/with-masking/BUCK @@ -1,5 +1,7 @@ load("//test_utils.bzl", "assert_output") +_SUPPORTED = not host_info().os.is_windows + # buildifier: disable=no-effect export_file( name = "al__.mli", @@ -7,7 +9,7 @@ export_file( ":al__", ":al__imp", ], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_library( @@ -18,7 +20,7 @@ ocaml_library( ], compiler_flags = ["-no-alias-deps"], visibility = [":al__imp"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_library( @@ -39,7 +41,7 @@ ocaml_library( ], visibility = [":al"], deps = [":al__"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_library( @@ -51,7 +53,7 @@ ocaml_library( ], visibility = ["PUBLIC"], deps = [":al__imp"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect ocaml_binary( @@ -59,11 +61,11 @@ ocaml_binary( srcs = ["test_Al.ml"], visibility = ["PUBLIC"], deps = [":al"], -) if not host_info().os.is_windows else None +) if _SUPPORTED else None # buildifier: disable=no-effect assert_output( name = "test-Al-check", command = "$(exe_target :test-Al)", output = "Hello world!", -) if not host_info().os.is_windows else None +) if _SUPPORTED else None diff --git a/examples/with_prelude/test_utils.bzl b/examples/with_prelude/test_utils.bzl index 139272e38fc4d..087f826d1c15b 100644 --- a/examples/with_prelude/test_utils.bzl +++ b/examples/with_prelude/test_utils.bzl @@ -12,3 +12,17 @@ def assert_output(name, command, output): cmd_exe = command + " | findstr \"" + output + "\" && type nul > \"$OUT\"", out = "out.txt", ) + +def haskell_library(deps = [], **kwargs): + native.haskell_library( + deps = deps + ["//third-party/haskell:base"], + **kwargs + ) + +def haskell_binary(linker_flags = [], deps = [], **kwargs): + native.haskell_binary( + # Workaround for as yet not triaged runtime segfault. + linker_flags = linker_flags + ["-dynamic"] if host_info().os.is_macos else linker_flags, + deps = deps + ["//third-party/haskell:base"], + **kwargs + ) diff --git a/examples/with_prelude/third-party/haskell/BUCK b/examples/with_prelude/third-party/haskell/BUCK new file mode 100644 index 0000000000000..20b18d638501a --- /dev/null +++ b/examples/with_prelude/third-party/haskell/BUCK @@ -0,0 +1,674 @@ +oncall("build_infra") + +_SUPPORTED = host_info().os.is_linux or host_info().os.is_macos + +config_setting( + name = "linux-x86_64", + constraint_values = [ + "config//cpu/constraints:x86_64", + "config//os/constraints:linux", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "macos-arm64", + constraint_values = [ + "config//cpu/constraints:arm64", + "config//os/constraints:macos", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "macos-x86_64", + constraint_values = [ + "config//cpu/constraints:x86_64", + "config//os/constraints:macos", + ], + visibility = ["PUBLIC"], +) + +# buildifier: disable=no-effect +haskell_prebuilt_library( + name = "rts", + cxx_header_dirs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/rts-1.0.2/include"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/rts-1.0.2/include"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/rts-1.0.2/include"], + }), + db = "ghc/lib/package.conf.d", + exported_linker_flags = select({ + "config//os:linux": [ + "-Xlinker", + "-u", + "ghczminternal_GHCziInternalziTopHandler_runIO_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziTopHandler_runNonIO_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTuple_Z0T_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTypes_True_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTypes_False_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziPack_unpackCString_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziWeakziFinalizze_runFinalizzerBatch_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_stackOverflow_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_heapOverflow_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_allocationLimitExceeded_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_blockedIndefinitelyOnMVar_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_blockedIndefinitelyOnSTM_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_cannotCompactFunction_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_cannotCompactPinned_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOziException_cannotCompactMutable_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziIOPort_doubleReadException_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziControlziExceptionziBase_nonTermination_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziControlziExceptionziBase_nestedAtomically_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziEventziThread_blockedOnBadFD_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziConcziSync_runSparks_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziConcziIO_ensureIOManagerIsRunning_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziConcziIO_interruptIOManager_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziConcziIO_ioManagerCapabilitiesChanged_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziConcziSignal_runHandlersPtr_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziTopHandler_flushStdHandles_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziTopHandler_runMainIO_closure", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTypes_Czh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTypes_Izh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTypes_Fzh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTypes_Dzh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczmprim_GHCziTypes_Wzh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziPtr_Ptr_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziPtr_FunPtr_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziInt_I8zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziInt_I16zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziInt_I32zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziInt_I64zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziWord_W8zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziWord_W16zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziWord_W32zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziWord_W64zh_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziStable_StablePtr_con_info", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_add8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_add16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_add32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_add64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_sub8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_sub16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_sub32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_sub64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_and8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_and16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_and32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_and64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_nand8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_nand16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_nand32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_nand64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_or8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_or16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_or32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_or64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_xor8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_xor16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_xor32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomic_xor64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_cmpxchg8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_cmpxchg16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_cmpxchg32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_cmpxchg64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_xchg8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_xchg16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_xchg32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_xchg64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicread8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicread16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicread32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicread64", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicwrite8", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicwrite16", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicwrite32", + "-Xlinker", + "-u", + "-Xlinker", + "hs_atomicwrite64", + "-Xlinker", + "-u", + "-Xlinker", + "ghczminternal_GHCziInternalziStackziCloneStack_StackSnapshot_closure", + ], + "config//os:macos": [ + "-u", + "ghczminternal_GHCziInternalziTopHandler_runIO_closure", + "-u", + "ghczminternal_GHCziInternalziTopHandler_runNonIO_closure", + "-u", + "ghczmprim_GHCziTuple_Z0T_closure", + "-u", + "ghczmprim_GHCziTypes_True_closure", + "-u", + "ghczmprim_GHCziTypes_False_closure", + "-u", + "ghczminternal_GHCziInternalziPack_unpackCString_closure", + "-u", + "ghczminternal_GHCziInternalziWeakziFinalizze_runFinalizzerBatch_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_stackOverflow_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_heapOverflow_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_allocationLimitExceeded_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_blockedIndefinitelyOnMVar_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_blockedIndefinitelyOnSTM_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_cannotCompactFunction_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_cannotCompactPinned_closure", + "-u", + "ghczminternal_GHCziInternalziIOziException_cannotCompactMutable_closure", + "-u", + "ghczminternal_GHCziInternalziIOPort_doubleReadException_closure", + "-u", + "ghczminternal_GHCziInternalziControlziExceptionziBase_nonTermination_closure", + "-u", + "ghczminternal_GHCziInternalziControlziExceptionziBase_nestedAtomically_closure", + "-u", + "ghczminternal_GHCziInternalziEventziThread_blockedOnBadFD_closure", + "-u", + "ghczminternal_GHCziInternalziConcziSync_runSparks_closure", + "-u", + "ghczminternal_GHCziInternalziConcziIO_ensureIOManagerIsRunning_closure", + "-u", + "ghczminternal_GHCziInternalziConcziIO_interruptIOManager_closure", + "-u", + "ghczminternal_GHCziInternalziConcziIO_ioManagerCapabilitiesChanged_closure", + "-u", + "ghczminternal_GHCziInternalziConcziSignal_runHandlersPtr_closure", + "-u", + "ghczminternal_GHCziInternalziTopHandler_flushStdHandles_closure", + "-u", + "ghczminternal_GHCziInternalziTopHandler_runMainIO_closure", + "-u", + "ghczmprim_GHCziTypes_Czh_con_info", + "-u", + "ghczmprim_GHCziTypes_Izh_con_info", + "-u", + "ghczmprim_GHCziTypes_Fzh_con_info", + "-u", + "ghczmprim_GHCziTypes_Dzh_con_info", + "-u", + "ghczmprim_GHCziTypes_Wzh_con_info", + "-u", + "ghczminternal_GHCziInternalziPtr_Ptr_con_info", + "-u", + "ghczminternal_GHCziInternalziPtr_FunPtr_con_info", + "-u", + "ghczminternal_GHCziInternalziInt_I8zh_con_info", + "-u", + "ghczminternal_GHCziInternalziInt_I16zh_con_info", + "-u", + "ghczminternal_GHCziInternalziInt_I32zh_con_info", + "-u", + "ghczminternal_GHCziInternalziInt_I64zh_con_info", + "-u", + "ghczminternal_GHCziInternalziWord_W8zh_con_info", + "-u", + "ghczminternal_GHCziInternalziWord_W16zh_con_info", + "-u", + "ghczminternal_GHCziInternalziWord_W32zh_con_info", + "-u", + "ghczminternal_GHCziInternalziWord_W64zh_con_info", + "-u", + "ghczminternal_GHCziInternalziStable_StablePtr_con_info", + "-u", + "hs_atomic_add8", + "-u", + "hs_atomic_add16", + "-u", + "hs_atomic_add32", + "-u", + "hs_atomic_add64", + "-u", + "hs_atomic_sub8", + "-u", + "hs_atomic_sub16", + "-u", + "hs_atomic_sub32", + "-u", + "hs_atomic_sub64", + "-u", + "hs_atomic_and8", + "-u", + "hs_atomic_and16", + "-u", + "hs_atomic_and32", + "-u", + "hs_atomic_and64", + "-u", + "hs_atomic_nand8", + "-u", + "hs_atomic_nand16", + "-u", + "hs_atomic_nand32", + "-u", + "hs_atomic_nand64", + "-u", + "hs_atomic_or8", + "-u", + "hs_atomic_or16", + "-u", + "hs_atomic_or32", + "-u", + "hs_atomic_or64", + "-u", + "hs_atomic_xor8", + "-u", + "hs_atomic_xor16", + "-u", + "hs_atomic_xor32", + "-u", + "hs_atomic_xor64", + "-u", + "hs_cmpxchg8", + "-u", + "hs_cmpxchg16", + "-u", + "hs_cmpxchg32", + "-u", + "hs_cmpxchg64", + "-u", + "hs_xchg8", + "-u", + "hs_xchg16", + "-u", + "hs_xchg32", + "-u", + "hs_xchg64", + "-u", + "hs_atomicread8", + "-u", + "hs_atomicread16", + "-u", + "hs_atomicread32", + "-u", + "hs_atomicread64", + "-u", + "hs_atomicwrite8", + "-u", + "hs_atomicwrite16", + "-u", + "hs_atomicwrite32", + "-u", + "hs_atomicwrite64", + "-u", + "ghczminternal_GHCziInternalziStackziCloneStack_StackSnapshot_closure", + ], + }), + id = "rts-1.0.2", + shared_libs = select({ + ":linux-x86_64": {"libHSrts-1.0.2_thr-ghc9.10.1.so": "ghc/lib/x86_64-linux-ghc-9.10.1/libHSrts-1.0.2_thr-ghc9.10.1.so"}, + ":macos-arm64": {"libHSrts-ghc9.10.1.dylib": "ghc/lib/aarch64-osx-ghc-9.10.1/libHSrts-ghc9.10.1.dylib"}, + ":macos-x86_64": {"libHSrts-1.0.2_thr-ghc9.10.1.dylib": "ghc/lib/x86_64-osx-ghc-9.10.1/libHSrts-1.0.2_thr-ghc9.10.1.dylib"}, + }), + static_libs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/rts-1.0.2/libHSrts-1.0.2_thr.a"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/rts-1.0.2/libHSrts-1.0.2_thr.a"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/rts-1.0.2/libHSrts-1.0.2_thr.a"], + }), + version = "1.0.2", + visibility = ["PUBLIC"], +) if _SUPPORTED else None + +# buildifier: disable=no-effect +haskell_prebuilt_library( + name = "ghc-prim", + db = "ghc/lib/package.conf.d", + id = select({ + ":linux-x86_64": "ghc-prim-0.11.0-633d", + ":macos-arm64": "ghc-prim-0.11.0-e020", + ":macos-x86_64": "ghc-prim-0.11.0-2e13", + }), + shared_libs = select({ + ":linux-x86_64": {"libHSghc-prim-0.11.0-633d-ghc9.10.1.so": "ghc/lib/x86_64-linux-ghc-9.10.1/libHSghc-prim-0.11.0-633d-ghc9.10.1.so"}, + ":macos-arm64": {"libHSghc-prim-0.11.0-e020-ghc9.10.1.dylib": "ghc/lib/aarch64-osx-ghc-9.10.1/libHSghc-prim-0.11.0-e020-ghc9.10.1.dylib"}, + ":macos-x86_64": {"libHSghc-prim-0.11.0-2e13-ghc9.10.1.dylib": "ghc/lib/x86_64-osx-ghc-9.10.1/libHSghc-prim-0.11.0-2e13-ghc9.10.1.dylib"}, + }), + static_libs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/ghc-prim-0.11.0-633d/libHSghc-prim-0.11.0-633d.a"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/ghc-prim-0.11.0-e020/libHSghc-prim-0.11.0-e020.a"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/ghc-prim-0.11.0-2e13/libHSghc-prim-0.11.0-2e13.a"], + }), + version = "0.11.0", + visibility = ["PUBLIC"], + deps = [ + ":rts", + ], +) if _SUPPORTED else None + +# buildifier: disable=no-effect +haskell_prebuilt_library( + name = "ghc-bignum", + cxx_header_dirs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/ghc-bignum-1.3-5dfa/include"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/ghc-bignum-1.3-09c4/include"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/ghc-bignum-1.3-5e05/include"], + }), + db = "ghc/lib/package.conf.d", + id = select({ + ":linux-x86_64": "ghc-bignum-1.3-5dfa", + ":macos-arm64": "ghc-bignum-1.3-09c4", + ":macos-x86_64": "ghc-bignum-1.3-5e05", + }), + shared_libs = select({ + ":linux-x86_64": {"libHSghc-bignum-1.3-5dfa-ghc9.10.1.so": "ghc/lib/x86_64-linux-ghc-9.10.1/libHSghc-bignum-1.3-5dfa-ghc9.10.1.so"}, + ":macos-arm64": {"libHSghc-bignum-1.3-09c4-ghc9.10.1.dylib": "ghc/lib/aarch64-osx-ghc-9.10.1/libHSghc-bignum-1.3-09c4-ghc9.10.1.dylib"}, + ":macos-x86_64": {"libHSghc-bignum-1.3-5e05-ghc9.10.1.dylib": "ghc/lib/x86_64-osx-ghc-9.10.1/libHSghc-bignum-1.3-5e05-ghc9.10.1.dylib"}, + }), + static_libs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/ghc-bignum-1.3-5dfa/libHSghc-bignum-1.3-5dfa.a"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/ghc-bignum-1.3-09c4/libHSghc-bignum-1.3-09c4.a"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/ghc-bignum-1.3-5e05/libHSghc-bignum-1.3-5e05.a"], + }), + version = "1.3", + visibility = ["PUBLIC"], + deps = [ + ":ghc-prim", + ], +) if _SUPPORTED else None + +# buildifier: disable=no-effect +haskell_prebuilt_library( + name = "ghc-internal", + cxx_header_dirs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/ghc-internal-9.1001.0-79a4/include"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/ghc-internal-9.1001.0-4685/include"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/ghc-internal-9.1001.0-e204/include"], + }), + db = "ghc/lib/package.conf.d", + id = select({ + ":linux-x86_64": "ghc-internal-9.1001.0-79a4", + ":macos-arm64": "ghc-internal-9.1001.0-4685", + ":macos-x86_64": "ghc-internal-9.1001.0-e204", + }), + shared_libs = select({ + ":linux-x86_64": {"libHSghc-internal-9.1001.0-79a4-ghc9.10.1.so": "ghc/lib/x86_64-linux-ghc-9.10.1/libHSghc-internal-9.1001.0-79a4-ghc9.10.1.so"}, + ":macos-arm64": {"libHSghc-internal-9.1001.0-4685-ghc9.10.1.dylib": "ghc/lib/aarch64-osx-ghc-9.10.1/libHSghc-internal-9.1001.0-4685-ghc9.10.1.dylib"}, + ":macos-x86_64": {"libHSghc-internal-9.1001.0-e204-ghc9.10.1.dylib": "ghc/lib/x86_64-osx-ghc-9.10.1/libHSghc-internal-9.1001.0-e204-ghc9.10.1.dylib"}, + }), + static_libs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/ghc-internal-9.1001.0-79a4/libHSghc-internal-9.1001.0-79a4.a"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/ghc-internal-9.1001.0-4685/libHSghc-internal-9.1001.0-4685.a"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/ghc-internal-9.1001.0-e204/libHSghc-internal-9.1001.0-e204.a"], + }), + version = "9.1001.0", + visibility = ["PUBLIC"], + deps = [ + ":ghc-bignum", + ":ghc-prim", + ":rts", + ], +) if _SUPPORTED else None + +# buildifier: disable=no-effect +haskell_prebuilt_library( + name = "base", + db = "ghc/lib/package.conf.d", + id = select({ + ":linux-x86_64": "base-4.20.0.0-4014", + ":macos-arm64": "base-4.20.0.0-380b", + ":macos-x86_64": "base-4.20.0.0-8a80", + }), + shared_libs = select({ + ":linux-x86_64": {"libHSbase-4.20.0.0-4014-ghc9.10.1.so": "ghc/lib/x86_64-linux-ghc-9.10.1/libHSbase-4.20.0.0-4014-ghc9.10.1.so"}, + ":macos-arm64": {"libHSbase-4.20.0.0-380b-ghc9.10.1.dylib": "ghc/lib/aarch64-osx-ghc-9.10.1/libHSbase-4.20.0.0-380b-ghc9.10.1.dylib"}, + ":macos-x86_64": {"libHSbase-4.20.0.0-8a80-ghc9.10.1.dylib": "ghc/lib/x86_64-osx-ghc-9.10.1/libHSbase-4.20.0.0-8a80-ghc9.10.1.dylib"}, + }), + static_libs = select({ + ":linux-x86_64": ["ghc/lib/x86_64-linux-ghc-9.10.1/base-4.20.0.0-4014/libHSbase-4.20.0.0-4014.a"], + ":macos-arm64": ["ghc/lib/aarch64-osx-ghc-9.10.1/base-4.20.0.0-380b/libHSbase-4.20.0.0-380b.a"], + ":macos-x86_64": ["ghc/lib/x86_64-osx-ghc-9.10.1/base-4.20.0.0-8a80/libHSbase-4.20.0.0-8a80.a"], + }), + version = "4.20.0.0", + visibility = ["PUBLIC"], + deps = [ + ":ghc-internal", + ":ghc-prim", + ], +) if _SUPPORTED else None diff --git a/examples/with_prelude/third-party/ocaml/BUCK b/examples/with_prelude/third-party/ocaml/BUCK index 52a6f54d224a8..aa206426f0be0 100644 --- a/examples/with_prelude/third-party/ocaml/BUCK +++ b/examples/with_prelude/third-party/ocaml/BUCK @@ -1,7 +1,7 @@ # buildifier: disable=no-effect prebuilt_cxx_library( name = "ocaml-dev", - header_dirs = ["standard_library"], + header_dirs = ["opam/lib/ocaml"], header_only = True, visibility = ["PUBLIC"], ) if not host_info().os.is_windows else None @@ -27,9 +27,9 @@ prebuilt_ocaml_library( # buildifier: disable=no-effect prebuilt_ocaml_library( name = "ocaml.compiler-libs.common", - bytecode_lib = "standard_library/compiler-libs/ocamlcommon.cma", - include_dir = "standard_library/compiler-libs", - native_lib = "standard_library/compiler-libs/ocamlcommon.cmxa", + bytecode_lib = "opam/lib/ocaml/compiler-libs/ocamlcommon.cma", + include_dir = "opam/lib/ocaml/compiler-libs", + native_lib = "opam/lib/ocaml/compiler-libs/ocamlcommon.cmxa", visibility = ["PUBLIC"], deps = [], ) if not host_info().os.is_windows else None diff --git a/flake.lock b/flake.lock index e9be09d39e53a..c209307df300e 100644 --- a/flake.lock +++ b/flake.lock @@ -5,11 +5,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "lastModified": 1726560853, + "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a", "type": "github" }, "original": { @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1689352711, - "narHash": "sha256-xWYFt8vWnstDIVsZ26y9mf6h3714lVmXd6l+hTQz6tw=", + "lastModified": 1728979988, + "narHash": "sha256-GBJRnbFLDg0y7ridWJHAP4Nn7oss50/VNgqoXaf/RVk=", "owner": "nixos", "repo": "nixpkgs", - "rev": "2047c642ce0f75307e8a0f2ec94715218c481184", + "rev": "7881fbfd2e3ed1dfa315fca889b2cfd94be39337", "type": "github" }, "original": { @@ -43,19 +43,16 @@ }, "rust-overlay": { "inputs": { - "flake-utils": [ - "flake-utils" - ], "nixpkgs": [ "nixpkgs" ] }, "locked": { - "lastModified": 1689302058, - "narHash": "sha256-yD74lcHTrw4niXcE9goJLbzsgyce48rQQoy5jK5ZK40=", + "lastModified": 1729184663, + "narHash": "sha256-uNyi5vQrzaLkt4jj6ZEOs4+4UqOAwP6jFG2s7LIDwIk=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "7b8dbbf4c67ed05a9bf3d9e658c12d4108bc24c8", + "rev": "16fb78d443c1970dda9a0bbb93070c9d8598a925", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 0792fbcc00cce..10e37494f61f3 100644 --- a/flake.nix +++ b/flake.nix @@ -13,7 +13,6 @@ url = "github:oxalica/rust-overlay"; inputs = { nixpkgs.follows = "nixpkgs"; - flake-utils.follows = "flake-utils"; }; }; }; @@ -38,7 +37,7 @@ IOKit Security ]); - packages = [ pkgs.cargo-bloat my-rust-bin pkgs.mold pkgs.reindeer pkgs.lld_16 pkgs.clang_16 ]; + packages = [ pkgs.cargo-bloat my-rust-bin pkgs.mold-wrapped pkgs.reindeer pkgs.lld_16 pkgs.clang_16 ]; shellHook = '' export BUCK2_BUILD_PROTOC=${pkgs.protobuf}/bin/protoc diff --git a/gazebo/README.md b/gazebo/README.md index a41cf04d5b0b4..23da12907b59d 100644 --- a/gazebo/README.md +++ b/gazebo/README.md @@ -6,41 +6,75 @@ [![docs.rs availability](https://img.shields.io/docsrs/gazebo?label=docs.rs)](https://docs.rs/gazebo/) [![Build status](https://img.shields.io/github/workflow/status/facebookincubator/gazebo/ci.svg)](https://github.com/facebookincubator/gazebo/actions) -This library contains a collection of well-tested utilities. Most modules stand alone, but taking a few representative examples: +This library contains a collection of well-tested utilities. Most modules stand +alone, but taking a few representative examples: -* `gazebo::prelude::*` is intended to be imported as such, and provides extension traits to common types. For example, it provides `Vec::map` which is equivalent to `iter().map(f).collect::>()`, and `str::split1` like `split` but which only splits once. We hope some of these functions one day make it into the Rust standard library. -* `gazebo::dupe` provides the trait `Dupe` with the member `dupe`, all of which are exactly like `Clone`. The difference is that `Dupe` should not be implemented for types that reallocate or have expensive `clone` operations - e.g. there is `Dupe` for `Arc` and `usize`, but not for `String` and `Vec`. By using `dupe` it is easy to focus on the `clone` calls (which should be rare) and ignore things whose cost is minimal. -* `gazebo::cell::ARef` provides a type which is either a `Ref` or a direct reference `&T`, with operations that make it look like `Ref` -- allowing you to uniformly convert a reference into something like a `Ref`. +- `gazebo::prelude::*` is intended to be imported as such, and provides + extension traits to common types. For example, it provides `Vec::map` which is + equivalent to `iter().map(f).collect::>()`, and `str::split1` like + `split` but which only splits once. We hope some of these functions one day + make it into the Rust standard library. +- `gazebo::dupe` provides the trait `Dupe` with the member `dupe`, all of which + are exactly like `Clone`. The difference is that `Dupe` should not be + implemented for types that reallocate or have expensive `clone` operations - + e.g. there is `Dupe` for `Arc` and `usize`, but not for `String` and `Vec`. By + using `dupe` it is easy to focus on the `clone` calls (which should be rare) + and ignore things whose cost is minimal. +- `gazebo::cell::ARef` provides a type which is either a `Ref` or a direct + reference `&T`, with operations that make it look like `Ref` -- allowing you + to uniformly convert a reference into something like a `Ref`. -The functionality provided by Gazebo is not stable, and continues to evolve with both additions (as we find new useful features) and removals (as we find better patterns or libraries encapsulating the ideas better). While the code varies in usefulness and design quality, it is all well tested and documented. +The functionality provided by Gazebo is not stable, and continues to evolve with +both additions (as we find new useful features) and removals (as we find better +patterns or libraries encapsulating the ideas better). While the code varies in +usefulness and design quality, it is all well tested and documented. ## Using Gazebo -Gazebo can be depended upon by adding `gazebo` to your `[dependencies]`, using the standard [Cargo patterns](https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html). +Gazebo can be depended upon by adding `gazebo` to your `[dependencies]`, using +the standard +[Cargo patterns](https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html). -The two interesting directories in this repo are `gazebo` (which contains the source to Gazebo itself) and `gazebo_derive` (which contains support for `#[derive(Dupe)]` and other Gazebo traits). Usually you will directly import `gazebo`, but `gazebo_derive` is a required transitive dependency if you are sourcing the library from GitHub. +The two interesting directories in this repo are `gazebo` (which contains the +source to Gazebo itself) and `gazebo_derive` (which contains support for +`#[derive(Dupe)]` and other Gazebo traits). Usually you will directly import +`gazebo`, but `gazebo_derive` is a required transitive dependency if you are +sourcing the library from GitHub. ## Learn More -You can learn more about Gazebo in [this introductory video](https://www.youtube.com/watch?v=pQJkx9HL_04), or from the following blog posts: +You can learn more about Gazebo in +[this introductory video](https://www.youtube.com/watch?v=pQJkx9HL_04), or from +the following blog posts: -* [Rust Nibbles - Gazebo: Prelude](https://developers.facebook.com/blog/post/2021/06/29/rust-nibbles-gazebo-prelude/) -* [Rust Nibbles - Gazebo: Dupe](https://developers.facebook.com/blog/post/2021/07/06/rust-nibbles-gazebo-dupe/) -* [Rust Nibbles - Gazebo: Variants](https://developers.facebook.com/blog/post/2021/07/13/rust-nibbles-gazebo-variants) -* [Rust Nibbles - Gazebo: AnyLifetime](https://developers.facebook.com/blog/post/2021/07/20/rust-nibbles-gazebo-any-lifetime/) -* [Rust Nibbles - Gazebo: Comparisons](https://developers.facebook.com/blog/post/2021/07/27/rust-nibbles-gazebo-comparisons/) -* [Rust Nibbles - Gazebo: Casts and Transmute](https://developers.facebook.com/blog/post/2021/08/03/rust-nibbles-gazebo-casts-transmute/) -* [Rust Nibbles - Gazebo: The rest of the tent](https://developers.facebook.com/blog/post/2021/08/10/rust-nibbles-gazebo-rest-of-tent/) +- [Rust Nibbles - Gazebo: Prelude](https://developers.facebook.com/blog/post/2021/06/29/rust-nibbles-gazebo-prelude/) +- [Rust Nibbles - Gazebo: Dupe](https://developers.facebook.com/blog/post/2021/07/06/rust-nibbles-gazebo-dupe/) +- [Rust Nibbles - Gazebo: Variants](https://developers.facebook.com/blog/post/2021/07/13/rust-nibbles-gazebo-variants) +- [Rust Nibbles - Gazebo: AnyLifetime](https://developers.facebook.com/blog/post/2021/07/20/rust-nibbles-gazebo-any-lifetime/) +- [Rust Nibbles - Gazebo: Comparisons](https://developers.facebook.com/blog/post/2021/07/27/rust-nibbles-gazebo-comparisons/) +- [Rust Nibbles - Gazebo: Casts and Transmute](https://developers.facebook.com/blog/post/2021/08/03/rust-nibbles-gazebo-casts-transmute/) +- [Rust Nibbles - Gazebo: The rest of the tent](https://developers.facebook.com/blog/post/2021/08/10/rust-nibbles-gazebo-rest-of-tent/) ## Making a release -1. Check the [GitHub Actions](https://github.com/facebookincubator/gazebo/actions) are green. -2. Update `CHANGELOG.md` with the changes since the last release. [This link](https://github.com/facebookincubator/gazebo/compare/v0.1.0...main) can help (update to compare against the last release). -3. Update the version numbers of the two `Cargo.toml` files. Bump them by 0.0.1 if there are no incompatible changes, or 0.1.0 if there are. Bump the dependency in `gazebo` to point at the latest `gazebo_derive` version. -4. Copy the files `CHANGELOG.md`, the two `LICENSE-` files and `README.md` into each `gazebo` and `gazebo_derive` subdirectory. -5. Run `cargo publish --allow-dirty --dry-run`, then without the `--dry-run`, first in `gazebo_derive` and then `gazebo` directories. -6. Create a [GitHub release](https://github.com/facebookincubator/gazebo/releases/new) with `v0.X.Y`, using the `gazebo` version as the name. +1. Check the + [GitHub Actions](https://github.com/facebookincubator/gazebo/actions) are + green. +2. Update `CHANGELOG.md` with the changes since the last release. + [This link](https://github.com/facebookincubator/gazebo/compare/v0.1.0...main) + can help (update to compare against the last release). +3. Update the version numbers of the two `Cargo.toml` files. Bump them by 0.0.1 + if there are no incompatible changes, or 0.1.0 if there are. Bump the + dependency in `gazebo` to point at the latest `gazebo_derive` version. +4. Copy the files `CHANGELOG.md`, the two `LICENSE-` files and `README.md` into + each `gazebo` and `gazebo_derive` subdirectory. +5. Run `cargo publish --allow-dirty --dry-run`, then without the `--dry-run`, + first in `gazebo_derive` and then `gazebo` directories. +6. Create a + [GitHub release](https://github.com/facebookincubator/gazebo/releases/new) + with `v0.X.Y`, using the `gazebo` version as the name. ## License -Gazebo is both MIT and Apache License, Version 2.0 licensed, as found in the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. +Gazebo is both MIT and Apache License, Version 2.0 licensed, as found in the +[LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. diff --git a/gazebo/cmp_any/BUCK b/gazebo/cmp_any/BUCK index c9b8323530757..eb283582e48e2 100644 --- a/gazebo/cmp_any/BUCK +++ b/gazebo/cmp_any/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/gazebo/cmp_any/Cargo.toml b/gazebo/cmp_any/Cargo.toml index 19739a0d2f0f3..6114b3a198dc5 100644 --- a/gazebo/cmp_any/Cargo.toml +++ b/gazebo/cmp_any/Cargo.toml @@ -1,13 +1,10 @@ [package] -name = "cmp_any" -version = "0.8.1" -license = "MIT OR Apache-2.0" authors = ["Facebook"] -edition = "2021" -repository = "https://github.com/facebookincubator/gazebo" -documentation = "https://docs.rs/cmp_any" categories = ["rust-patterns"] description = "Comparison for &dyn types" - -[features] -str_pattern_extensions = [] +documentation = "https://docs.rs/cmp_any" +edition = "2021" +license = { workspace = true } +name = "cmp_any" +repository = "https://github.com/facebookincubator/gazebo" +version = "0.8.1" diff --git a/gazebo/display_container/BUCK b/gazebo/display_container/BUCK index 54f583a859ee7..fdc6d6a461742 100644 --- a/gazebo/display_container/BUCK +++ b/gazebo/display_container/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/gazebo/display_container/Cargo.toml b/gazebo/display_container/Cargo.toml index 0cfb3121e951a..b75d6b974776a 100644 --- a/gazebo/display_container/Cargo.toml +++ b/gazebo/display_container/Cargo.toml @@ -1,13 +1,13 @@ [package] -name = "display_container" -version = "0.9.0" -license = "MIT OR Apache-2.0" authors = ["Facebook"] -edition = "2021" +categories = ["rust-patterns"] description = "Utilities to implement Display" -repository = "https://github.com/facebookincubator/gazebo" documentation = "https://docs.rs/display_container" -categories = ["rust-patterns"] +edition = "2021" +license = { workspace = true } +name = "display_container" +repository = "https://github.com/facebookincubator/gazebo" +version = "0.9.0" [dependencies] either = { workspace = true } diff --git a/gazebo/display_container/src/lib.rs b/gazebo/display_container/src/lib.rs index d6a81993093dd..97223f18cc118 100644 --- a/gazebo/display_container/src/lib.rs +++ b/gazebo/display_container/src/lib.rs @@ -13,17 +13,21 @@ //! //! ``` //! use std::fmt; +//! //! use display_container::*; //! //! struct MyItems(Vec<(String, i32)>); //! //! impl fmt::Display for MyItems { //! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { -//! fmt_container(f, "{", "}", +//! fmt_container( +//! f, +//! "{", +//! "}", //! iter_display_chain( //! &["magic"], -//! self.0.iter().map(|(k, v)| display_pair(k, "=", v)) -//! ) +//! self.0.iter().map(|(k, v)| display_pair(k, "=", v)), +//! ), //! ) //! } //! } @@ -195,6 +199,33 @@ pub fn fmt_container>( helper.end(suffix) } +/// Helper for display implementation of container-y types (like list, tuple). +pub fn display_container<'a, C: 'a>(prefix: &'a str, suffix: &'a str, items: C) -> impl Display + 'a +where + C: Copy + IntoIterator, + ::Item: Display, +{ + struct Impl<'a, C> { + prefix: &'a str, + suffix: &'a str, + items: C, + } + impl<'a, C> Display for Impl<'a, C> + where + C: Copy + IntoIterator, + ::Item: Display, + { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt_container(f, self.prefix, self.suffix, self.items) + } + } + Impl { + prefix, + suffix, + items, + } +} + /// Helper for display implementation of container-y types (like dict, struct). /// /// Equivalent to [`fmt_container`] where the items have [`display_pair`] applied to them. @@ -231,8 +262,6 @@ where #[cfg(test)] mod tests { - use std::fmt; - use super::*; #[test] @@ -324,4 +353,9 @@ mod tests { "{magic, hello=1, world=2}" ); } + + #[test] + fn test_display_container() { + assert_eq!("[1]", display_container("[", "]", &vec![1]).to_string()); + } } diff --git a/gazebo/dupe/BUCK b/gazebo/dupe/BUCK index 68bf8792693e3..17424340e4c79 100644 --- a/gazebo/dupe/BUCK +++ b/gazebo/dupe/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/gazebo/dupe/Cargo.toml b/gazebo/dupe/Cargo.toml index 0ea0049481818..5b4b7d7732c40 100644 --- a/gazebo/dupe/Cargo.toml +++ b/gazebo/dupe/Cargo.toml @@ -1,13 +1,13 @@ [package] -name = "dupe" -version = "0.9.0" -license = "MIT OR Apache-2.0" authors = ["Facebook"] -edition = "2021" +categories = ["rust-patterns"] description = "Marker for types which are cheap to clone" -repository = "https://github.com/facebookincubator/gazebo" documentation = "https://docs.rs/dupe" -categories = ["rust-patterns"] +edition = "2021" +license = { workspace = true } +name = "dupe" +repository = "https://github.com/facebookincubator/gazebo" +version = "0.9.0" [dependencies] dupe_derive = { version = "=0.9.0", path = "../dupe_derive" } diff --git a/gazebo/dupe/src/__macro_refs.rs b/gazebo/dupe/src/__macro_refs.rs new file mode 100644 index 0000000000000..8fe16b63ee325 --- /dev/null +++ b/gazebo/dupe/src/__macro_refs.rs @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![doc(hidden)] + +use crate::Dupe; + +#[inline] +pub const fn assert_dupe() {} diff --git a/gazebo/dupe/src/iter.rs b/gazebo/dupe/src/iter.rs index 82ff76ea6b38c..9cd6431871db5 100644 --- a/gazebo/dupe/src/iter.rs +++ b/gazebo/dupe/src/iter.rs @@ -19,6 +19,7 @@ pub trait IterDupedExt: Sized { /// /// ``` /// use std::rc::Rc; + /// /// use dupe::IterDupedExt; /// let inputs = vec![Rc::new("Hello"), Rc::new("World")]; /// let outputs = inputs.iter().duped().collect::>(); diff --git a/gazebo/dupe/src/lib.rs b/gazebo/dupe/src/lib.rs index afd04b344ac1f..1eb1dd559abef 100644 --- a/gazebo/dupe/src/lib.rs +++ b/gazebo/dupe/src/lib.rs @@ -7,12 +7,14 @@ * of this source tree. */ -//! A cheap version of [`Clone`](Clone). +//! A cheap version of [`Clone`]. +pub mod __macro_refs; pub(crate) mod iter; pub(crate) mod option; use std::cell::Cell; +use std::mem::ManuallyDrop; use std::num::*; use std::rc::Rc; use std::sync::Arc; @@ -25,8 +27,8 @@ pub use dupe_derive::Dupe_; pub use crate::iter::IterDupedExt; pub use crate::option::OptionDupedExt; -/// Like [`Clone`](Clone), but should only be available if [`Clone`](Clone) is -/// constant time and zero allocation (e.g. a few [`Arc`](Arc) bumps). +/// Like [`Clone`], but should only be available if [`Clone`] is +/// constant time and zero allocation (e.g. a few [`Arc`] bumps). /// The implementation of `dupe` should _always_ call `clone`. pub trait Dupe: Clone { #[inline] @@ -44,6 +46,7 @@ impl Dupe for std::sync::Weak {} impl Dupe for Rc {} impl Dupe for std::rc::Weak {} impl Dupe for Cell {} +impl Dupe for ManuallyDrop {} // Small containers impl Dupe for Option {} @@ -52,12 +55,30 @@ impl Dupe for std::ops::Bound {} impl Dupe for std::pin::Pin {} impl Dupe for std::ptr::NonNull {} impl Dupe for std::task::Poll {} +impl Dupe for () {} impl Dupe for (A,) {} -// Not clear if Dupe should be implemented for pairs or not. -// Concern is deeply nested pairs could be exponentially more expensive than their inner dupes. +impl Dupe for (A, B) {} +impl Dupe for (A, B, C) {} +impl Dupe for (A, B, C, D) {} +impl Dupe for (A, B, C, D, E) {} +impl Dupe for (A, B, C, D, E, F) {} +impl Dupe for (A, B, C, D, E, F, G) {} +impl Dupe + for (A, B, C, D, E, F, G, H) +{ +} +impl Dupe + for (A, B, C, D, E, F, G, H, I) +{ +} +impl Dupe + for (A, B, C, D, E, F, G, H, I, J) +{ +} + +impl Dupe for [A; N] {} // Atomic types -impl Dupe for () {} impl Dupe for bool {} impl Dupe for char {} impl Dupe for u8 {} diff --git a/gazebo/dupe/src/option.rs b/gazebo/dupe/src/option.rs index e1ac3db80c31b..899d8b42cd6ba 100644 --- a/gazebo/dupe/src/option.rs +++ b/gazebo/dupe/src/option.rs @@ -17,6 +17,7 @@ pub trait OptionDupedExt { /// /// ``` /// use std::rc::Rc; + /// /// use dupe::OptionDupedExt; /// let rc = Rc::new("test"); /// assert_eq!(Some(&rc).duped(), Some(rc)); diff --git a/gazebo/dupe_derive/BUCK b/gazebo/dupe_derive/BUCK index 28f890fd25db8..1d55f1649fd50 100644 --- a/gazebo/dupe_derive/BUCK +++ b/gazebo/dupe_derive/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/gazebo/dupe_derive/Cargo.toml b/gazebo/dupe_derive/Cargo.toml index 72ace3acb2a05..a1e4c764e2ee5 100644 --- a/gazebo/dupe_derive/Cargo.toml +++ b/gazebo/dupe_derive/Cargo.toml @@ -1,18 +1,18 @@ [package] -name = "dupe_derive" -version = "0.9.0" -license = "MIT OR Apache-2.0" authors = ["Facebook"] -edition = "2021" +categories = ["rust-patterns"] description = "Derive Dupe" -repository = "https://github.com/facebookincubator/gazebo" documentation = "https://docs.rs/dupe" -categories = ["rust-patterns"] +edition = "2021" +license = { workspace = true } +name = "dupe_derive" +repository = "https://github.com/facebookincubator/gazebo" +version = "0.9.0" [lib] proc-macro = true [dependencies] proc-macro2 = "1.0" -syn = { version = "2", features = ["extra-traits"] } quote = "1.0.3" +syn = { version = "2", features = ["extra-traits"] } diff --git a/gazebo/dupe_derive/src/dupe.rs b/gazebo/dupe_derive/src/dupe.rs index 73d3ba297901b..6a6fd5b94cff3 100644 --- a/gazebo/dupe_derive/src/dupe.rs +++ b/gazebo/dupe_derive/src/dupe.rs @@ -7,15 +7,16 @@ * of this source tree. */ +use proc_macro2::TokenStream; use quote::quote; use syn::parse_macro_input; use syn::parse_quote; use syn::DeriveInput; use syn::Ident; +use syn::Type; use syn::TypeParamBound; use crate::util::add_trait_bounds; -use crate::util::check_each_field_impls; use crate::util::extract_all_field_tys; pub fn derive_dupe(input: proc_macro::TokenStream) -> proc_macro::TokenStream { @@ -49,7 +50,7 @@ fn derive_dupe_explicit( return e.into_compile_error().into(); } }; - let check_each_field_dupe = check_each_field_impls(all_fields, parse_quote!(dupe::Dupe)); + let check_each_field_dupe = check_each_field_dupe(all_fields); let check_func_name = Ident::new( &format!("__implicit_dupe_check_for_fields_of_{}", name), @@ -69,3 +70,12 @@ fn derive_dupe_explicit( gen.into() } + +fn check_each_field_dupe<'a>(tys: impl IntoIterator) -> TokenStream { + let tys = tys.into_iter(); + quote! { + #( + dupe::__macro_refs::assert_dupe::<#tys>(); + )* + } +} diff --git a/gazebo/dupe_derive/src/lib.rs b/gazebo/dupe_derive/src/lib.rs index 8f668ecf11d0c..dff79bc0a340f 100644 --- a/gazebo/dupe_derive/src/lib.rs +++ b/gazebo/dupe_derive/src/lib.rs @@ -24,13 +24,13 @@ pub fn derive_dupe_(input: proc_macro::TokenStream) -> proc_macro::TokenStream { dupe::derive_dupe_(input) } -/// Derive the [`Clone` trait](Clone), but without requiring all type arguments to implement [`Clone`](Clone). +/// Derive the [`Clone` trait](Clone), but without requiring all type arguments to implement [`Clone`]. #[proc_macro_derive(Clone_)] pub fn derive_clone_(input: proc_macro::TokenStream) -> proc_macro::TokenStream { clone::derive_clone_(input) } -/// Derive the [`Copy` trait](Copy), but without requiring all type arguments to implement [`Copy`](Copy). +/// Derive the [`Copy` trait](Copy), but without requiring all type arguments to implement [`Copy`]. #[proc_macro_derive(Copy_)] pub fn derive_copy_(input: proc_macro::TokenStream) -> proc_macro::TokenStream { copy::derive_copy_(input) diff --git a/gazebo/dupe_derive/src/util.rs b/gazebo/dupe_derive/src/util.rs index 79eea6e3f4531..7944ff236229d 100644 --- a/gazebo/dupe_derive/src/util.rs +++ b/gazebo/dupe_derive/src/util.rs @@ -174,20 +174,3 @@ fn extract_all_field_tys_variant<'a>(data: &'a Variant) -> Box(data: &'a DataEnum) -> Box + 'a> { Box::new(data.variants.iter().flat_map(extract_all_field_tys_variant)) } - -pub(crate) fn check_each_field_impls<'a>( - iter: impl IntoIterator, - trait_required: Type, -) -> TokenStream { - let checks = iter.into_iter().map(|ty| { - quote! { - assert_impl_all::<#ty>(); - } - }); - - quote! { - fn assert_impl_all() {} - - #(#checks)* - } -} diff --git a/gazebo/gazebo/BUCK b/gazebo/gazebo/BUCK index f5c1d3fac3340..7eb275e0ce68d 100644 --- a/gazebo/gazebo/BUCK +++ b/gazebo/gazebo/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/gazebo/gazebo/Cargo.toml b/gazebo/gazebo/Cargo.toml index 932758366d90d..080352762daf6 100644 --- a/gazebo/gazebo/Cargo.toml +++ b/gazebo/gazebo/Cargo.toml @@ -1,17 +1,17 @@ [package] -name = "gazebo" -version = "0.8.1" -license = "MIT OR Apache-2.0" authors = ["Facebook"] -edition = "2021" +categories = ["rust-patterns"] description = "A collection of well-tested utilities" -repository = "https://github.com/facebookincubator/gazebo" documentation = "https://docs.rs/gazebo" -categories = ["rust-patterns"] +edition = "2021" +license = { workspace = true } +name = "gazebo" +repository = "https://github.com/facebookincubator/gazebo" +version = "0.8.1" [features] str_pattern_extensions = [] [dependencies] -gazebo_derive = { version = "0.8.0", path = "../gazebo_derive" } dupe = { version = "0.9.0", path = "../dupe" } +gazebo_derive = { version = "0.8.0", path = "../gazebo_derive" } diff --git a/gazebo/gazebo/src/cell.rs b/gazebo/gazebo/src/cell.rs index 39e2ea4597a96..630b262ef0011 100644 --- a/gazebo/gazebo/src/cell.rs +++ b/gazebo/gazebo/src/cell.rs @@ -7,7 +7,7 @@ * of this source tree. */ -//! Additions to the [`Ref`](Ref) mechanism. +//! Additions to the [`Ref`] mechanism. // We used to implement `ARef` as an enum of `{Ptr(&'a T), Ref(Ref<'a, T>)}`. // That works, but consumes 3 words and requires a branch on every access of the underlying @@ -50,9 +50,9 @@ enum ARefImpl<'a, T: ?Sized + 'a> { Ref(Ref<'a, T>), } -/// A [`Ref`](Ref) that might not actually be borrowed. +/// A [`Ref`] that might not actually be borrowed. /// Either a `Ptr` (a normal & style reference), or a `Ref` (like from -/// [`RefCell`](std::cell::RefCell)), but exposes all the methods available on [`Ref`](Ref). +/// [`RefCell`]), but exposes all the methods available on [`Ref`]. #[derive(Debug)] pub struct ARef<'a, T: ?Sized + 'a>(ARefImpl<'a, T>); @@ -78,7 +78,7 @@ impl<'a, T: ?Sized + 'a> ARef<'a, T> { ARef(ARefImpl::Ref(x)) } - /// See [`Ref.clone`](Ref::clone). Not a self method since that interferes with the [`Deref`](Deref). + /// See [`Ref.clone`](Ref::clone). Not a self method since that interferes with the [`Deref`]. #[allow(clippy::should_implement_trait)] pub fn clone(orig: &Self) -> Self { match &orig.0 { @@ -87,7 +87,7 @@ impl<'a, T: ?Sized + 'a> ARef<'a, T> { } } - /// See [`Ref.map`](Ref::map). Not a self method since that interferes with the [`Deref`](Deref). + /// See [`Ref.map`](Ref::map). Not a self method since that interferes with the [`Deref`]. pub fn map(orig: ARef<'a, T>, f: F) -> ARef<'a, U> where F: FnOnce(&T) -> &U, @@ -99,7 +99,7 @@ impl<'a, T: ?Sized + 'a> ARef<'a, T> { } /// See [`Ref.map_split`](Ref::map_split). Not a self method since that interferes with the - /// [`Deref`](Deref). + /// [`Deref`]. pub fn map_split(orig: ARef<'a, T>, f: F) -> (ARef<'a, U>, ARef<'a, V>) where F: FnOnce(&T) -> (&U, &V), @@ -117,7 +117,7 @@ impl<'a, T: ?Sized + 'a> ARef<'a, T> { } /// See [`Ref.filter_map`](Ref::filter_map). Not a self method since that interferes with the - /// [`Deref`](Deref). + /// [`Deref`]. pub fn filter_map(orig: ARef<'a, T>, f: F) -> Result, Self> where F: FnOnce(&T) -> Option<&U>, @@ -171,7 +171,7 @@ impl Ord for ARef<'_, A> { } } -/// Obtain an [`ARef`] from either a normal pointer or a [`RefCell`](std::cell::RefCell). +/// Obtain an [`ARef`] from either a normal pointer or a [`RefCell`]. pub trait AsARef { /// Get an [`ARef`] pointing at this type. fn as_aref(this: &Self) -> ARef; @@ -208,11 +208,9 @@ impl AsARef for RefCell { #[cfg(test)] mod tests { - use std::cell::RefCell; use std::mem; use super::*; - use crate::cast; #[test] fn test_from_ref_docs() { @@ -276,11 +274,11 @@ mod tests { let orig = RefCell::new("test".to_owned()); let p = orig.borrow(); let p2 = Ref::clone(&p); - let (pointer, cell): (usize, usize) = unsafe { mem::transmute(p) }; + let (pointer, cell): (*const String, *const ()) = unsafe { mem::transmute(p) }; // We expect the first to be a pointer to the underlying string - assert_eq!(pointer, cast::ptr_to_usize(Ref::deref(&p2))); + assert_eq!(pointer, Ref::deref(&p2) as *const String); // We want to make sure the second is never zero - assert_ne!(cell, 0); + assert!(!cell.is_null()); // Put it back as it was, to make sure our test doesn't leak memory let _ignore: Ref = unsafe { mem::transmute((pointer, cell)) }; diff --git a/gazebo/gazebo/src/cmp.rs b/gazebo/gazebo/src/cmp.rs index 2e459ead9318a..b405d269a2064 100644 --- a/gazebo/gazebo/src/cmp.rs +++ b/gazebo/gazebo/src/cmp.rs @@ -18,6 +18,7 @@ /// /// ``` /// use std::cmp::Ordering; +/// /// use gazebo::cmp_chain; /// /// assert_eq!( diff --git a/gazebo/gazebo/src/ext/mod.rs b/gazebo/gazebo/src/ext.rs similarity index 100% rename from gazebo/gazebo/src/ext/mod.rs rename to gazebo/gazebo/src/ext.rs diff --git a/gazebo/gazebo/src/ext/iter.rs b/gazebo/gazebo/src/ext/iter.rs index 07afe98345fe0..2aa80b6eef5e7 100644 --- a/gazebo/gazebo/src/ext/iter.rs +++ b/gazebo/gazebo/src/ext/iter.rs @@ -20,11 +20,7 @@ pub trait IterExt { /// use gazebo::prelude::*; /// /// fn true_if_even_throw_on_zero(x: &usize) -> Result { - /// if *x == 0 { - /// Err(()) - /// } else { - /// Ok(x % 2 == 0) - /// } + /// if *x == 0 { Err(()) } else { Ok(x % 2 == 0) } /// } /// /// let x = [1, 3, 2]; @@ -35,7 +31,6 @@ pub trait IterExt { /// /// let x = [1, 0, 2]; /// assert_eq!(x.iter().try_any(true_if_even_throw_on_zero), Err(())); - /// /// ``` fn try_any(self, any: F) -> Result where @@ -49,11 +44,7 @@ pub trait IterExt { /// use gazebo::prelude::*; /// /// fn true_if_even_throw_on_zero(x: &usize) -> Result { - /// if *x == 0 { - /// Err(()) - /// } else { - /// Ok(x % 2 == 0) - /// } + /// if *x == 0 { Err(()) } else { Ok(x % 2 == 0) } /// } /// /// let x = [2, 4, 2]; @@ -64,7 +55,6 @@ pub trait IterExt { /// /// let x = [2, 0, 2]; /// assert_eq!(x.iter().try_all(true_if_even_throw_on_zero), Err(())); - /// /// ``` fn try_all(self, any: F) -> Result where @@ -105,9 +95,10 @@ pub trait IterExt { /// on the first encounter of `Err`. /// /// ``` - /// use gazebo::prelude::*; /// use std::cmp::Ordering; /// + /// use gazebo::prelude::*; + /// /// fn double_cmp_throw_on_zero(x: &usize, y: &usize) -> Result { /// if *x == 0 || *y == 0 { /// Err(()) @@ -119,27 +110,42 @@ pub trait IterExt { /// let x = [1, 4, 2]; /// let y = [2, 8, 4]; /// - /// assert_eq!(x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), Ok(Ordering::Equal)); + /// assert_eq!( + /// x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), + /// Ok(Ordering::Equal) + /// ); /// /// let x = [1, 2, 2]; /// let y = [2, 8, 4]; /// - /// assert_eq!(x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), Ok(Ordering::Less)); + /// assert_eq!( + /// x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), + /// Ok(Ordering::Less) + /// ); /// /// let x = [1, 4]; /// let y = [2, 8, 4]; /// - /// assert_eq!(x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), Ok(Ordering::Less)); + /// assert_eq!( + /// x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), + /// Ok(Ordering::Less) + /// ); /// /// let x = [1, 4, 4]; /// let y = [2, 8, 4]; /// - /// assert_eq!(x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), Ok(Ordering::Greater)); + /// assert_eq!( + /// x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), + /// Ok(Ordering::Greater) + /// ); /// /// let x = [1, 4, 2, 3]; /// let y = [2, 8, 4]; /// - /// assert_eq!(x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), Ok(Ordering::Greater)); + /// assert_eq!( + /// x.iter().try_cmp_by(&y, double_cmp_throw_on_zero), + /// Ok(Ordering::Greater) + /// ); /// /// let x = [1, 4, 2]; /// let y = [2, 0, 4]; @@ -164,7 +170,10 @@ pub trait IterExt { /// /// let i = vec![Ok((1, "a")), Err(()), Ok((2, "b"))]; /// - /// assert_eq!(i.into_iter().try_unzip::<_, _, Vec<_>, Vec<_>, _>(), Err(())); + /// assert_eq!( + /// i.into_iter().try_unzip::<_, _, Vec<_>, Vec<_>, _>(), + /// Err(()) + /// ); /// ``` fn try_unzip(self) -> Result<(FromA, FromB), E> where @@ -199,7 +208,10 @@ pub trait IterOwned: Sized { /// /// let inputs = vec!["a", "b", "c"]; /// let outputs = inputs.into_iter().owned().collect::>(); - /// assert_eq!(outputs, vec!["a".to_owned(), "b".to_owned(), "c".to_owned()]) + /// assert_eq!( + /// outputs, + /// vec!["a".to_owned(), "b".to_owned(), "c".to_owned()] + /// ) /// ``` fn owned(self) -> Owned; } diff --git a/gazebo/gazebo/src/ext/str.rs b/gazebo/gazebo/src/ext/str.rs index f58ccdac657a8..1408830eefdf4 100644 --- a/gazebo/gazebo/src/ext/str.rs +++ b/gazebo/gazebo/src/ext/str.rs @@ -10,11 +10,11 @@ #[cfg(feature = "str_pattern_extensions")] use std::str::pattern::*; -/// Extension traits on [`str`](str). +/// Extension traits on [`str`]. /// /// Set the configuration option `str_pattern_extensions` to enable the associated methods. -/// The setting `str_pattern_extensions` requires the unstable features -/// `pattern` and `associated_type_bounds`, so only works with Rust nightly. +/// The setting `str_pattern_extensions` requires the unstable feature `pattern`, +/// so only works with Rust nightly. pub trait StrExt { /// Like `split`, but only separates off the first element. For example: /// diff --git a/gazebo/gazebo/src/ext/vec.rs b/gazebo/gazebo/src/ext/vec.rs index 49eb5f4a715f2..b70b79ee6166d 100644 --- a/gazebo/gazebo/src/ext/vec.rs +++ b/gazebo/gazebo/src/ext/vec.rs @@ -36,7 +36,7 @@ fn collect_result(mut it: impl ExactSizeIterator>) -> } } -/// Extension traits on slices/[`Vec`](Vec). +/// Extension traits on slices/[`Vec`]. pub trait SliceExt { type Item; @@ -44,8 +44,8 @@ pub trait SliceExt { /// /// ``` /// use gazebo::prelude::*; - /// assert_eq!([1,2,3][..].map(|x| x*x), vec![1,4,9]); - /// assert_eq!(vec![1,2,3].map(|x| x*x), vec![1,4,9]); + /// assert_eq!([1, 2, 3][..].map(|x| x * x), vec![1, 4, 9]); + /// assert_eq!(vec![1, 2, 3].map(|x| x * x), vec![1, 4, 9]); /// ``` /// /// Note that from Rust 1.55.0 there is a `map` method on @@ -59,8 +59,14 @@ pub trait SliceExt { /// /// ``` /// use gazebo::prelude::*; - /// assert_eq!([1,2,3].try_map(|x| Ok(x*x)), Ok::<_, bool>(vec![1,4,9])); - /// assert_eq!([1,2,-3].try_map(|x| if *x > 0 { Ok(x*x) } else { Err(false) }), Err(false)); + /// assert_eq!( + /// [1, 2, 3].try_map(|x| Ok(x * x)), + /// Ok::<_, bool>(vec![1, 4, 9]) + /// ); + /// assert_eq!( + /// [1, 2, -3].try_map(|x| if *x > 0 { Ok(x * x) } else { Err(false) }), + /// Err(false) + /// ); /// ``` /// /// This function will be generalised to [`Try`](std::ops::Try) once it has been @@ -162,12 +168,12 @@ impl SliceExt for [T] { /// struct X; /// /// let x = [&X]; -/// let y : Vec = x.cloned(); +/// let y: Vec = x.cloned(); /// /// assert_eq!(y, vec![X]); /// /// let x = vec![&X]; -/// let y : Vec = x.cloned(); +/// let y: Vec = x.cloned(); /// /// assert_eq!(y, vec![X]); /// ``` @@ -198,12 +204,12 @@ where /// struct X; /// /// let x = [&X]; -/// let y : Vec = x.duped(); +/// let y: Vec = x.duped(); /// /// assert_eq!(y, vec![X]); /// /// let x = vec![&X]; -/// let y : Vec = x.duped(); +/// let y: Vec = x.duped(); /// /// assert_eq!(y, vec![X]); /// ``` @@ -233,12 +239,12 @@ where /// struct X; /// /// let x = [&X]; -/// let y : Vec = x.copied(); +/// let y: Vec = x.copied(); /// /// assert_eq!(y, vec![X]); /// /// let x = vec![&X]; -/// let y : Vec = x.copied(); +/// let y: Vec = x.copied(); /// /// assert_eq!(y, vec![X]); /// ``` @@ -259,7 +265,7 @@ where } } -/// Extension traits on [`Vec`](Vec). +/// Extension traits on [`Vec`]. pub trait VecExt { type Item; @@ -267,7 +273,7 @@ pub trait VecExt { /// /// ``` /// use gazebo::prelude::*; - /// assert_eq!(vec![1,2,3].into_map(|x| x*x), vec![1,4,9]); + /// assert_eq!(vec![1, 2, 3].into_map(|x| x * x), vec![1, 4, 9]); /// ``` fn into_map(self, f: F) -> Vec where @@ -277,8 +283,14 @@ pub trait VecExt { /// /// ``` /// use gazebo::prelude::*; - /// assert_eq!(vec![1,2,3].into_try_map(|x| Ok(x*x)), Ok::<_, bool>(vec![1,4,9])); - /// assert_eq!(vec![1,2,-3].into_try_map(|x| if x > 0 { Ok(x*x) } else { Err(false) }), Err(false)); + /// assert_eq!( + /// vec![1, 2, 3].into_try_map(|x| Ok(x * x)), + /// Ok::<_, bool>(vec![1, 4, 9]) + /// ); + /// assert_eq!( + /// vec![1, 2, -3].into_try_map(|x| if x > 0 { Ok(x * x) } else { Err(false) }), + /// Err(false) + /// ); /// ``` /// /// This function will be generalised to [`Try`](std::ops::Try) once it has been diff --git a/gazebo/gazebo/src/hash.rs b/gazebo/gazebo/src/hash.rs index ef76b8cce2a2a..f924a5ae24d01 100644 --- a/gazebo/gazebo/src/hash.rs +++ b/gazebo/gazebo/src/hash.rs @@ -56,12 +56,6 @@ impl AsRef for Hashed { } } -impl AsMut for Hashed { - fn as_mut(&mut self) -> &mut T { - &mut self.value - } -} - impl From for Hashed { fn from(value: T) -> Self { Self::new(value) diff --git a/gazebo/gazebo/src/lib.rs b/gazebo/gazebo/src/lib.rs index ecb478fce782e..46368d40ccfaa 100644 --- a/gazebo/gazebo/src/lib.rs +++ b/gazebo/gazebo/src/lib.rs @@ -8,7 +8,6 @@ */ #![cfg_attr(feature = "str_pattern_extensions", feature(pattern))] -#![cfg_attr(feature = "str_pattern_extensions", feature(associated_type_bounds))] //! A collection of well-tested primitives that have been useful. Most modules stand alone. diff --git a/gazebo/gazebo/src/phantom.rs b/gazebo/gazebo/src/phantom.rs index e996898ba8410..efb42ad6696d4 100644 --- a/gazebo/gazebo/src/phantom.rs +++ b/gazebo/gazebo/src/phantom.rs @@ -7,7 +7,7 @@ * of this source tree. */ -//! Additional [`PhantomData`](PhantomData) related types. +//! Additional [`PhantomData`] related types. use std::cell::Cell; use std::fmt; @@ -18,7 +18,7 @@ use std::marker::PhantomData; use dupe::Dupe; -/// A type like [`PhantomData`](PhantomData), but where the contained `T` is invariant +/// A type like [`PhantomData`], but where the contained `T` is invariant /// in both lifetimes and types. See [variance on the Nomicon](https://doc.rust-lang.org/nomicon/subtyping.html#variance) for an /// explanation of these terms. pub struct PhantomDataInvariant(PhantomData>); diff --git a/gazebo/gazebo/src/prelude.rs b/gazebo/gazebo/src/prelude.rs index 2808351e10206..88a577c25fa5e 100644 --- a/gazebo/gazebo/src/prelude.rs +++ b/gazebo/gazebo/src/prelude.rs @@ -11,7 +11,7 @@ //! //! Contains: //! -//! * Extension methods for [`str`](str) and slice/[`Vec`](Vec). +//! * Extension methods for [`str`] and slice/[`Vec`]. //! * Defines [`Default_`] macro. //! //! The derivation macros appended with underscore are like the normal diff --git a/gazebo/gazebo/src/types.rs b/gazebo/gazebo/src/types.rs index b45c86faed15a..55bac92a7b627 100644 --- a/gazebo/gazebo/src/types.rs +++ b/gazebo/gazebo/src/types.rs @@ -18,8 +18,8 @@ /// /// ``` /// use gazebo::types::TEq; -/// fn foo>(x: A) -> String { -/// x.teq() +/// fn foo>(x: A) -> String { +/// x.teq() /// } /// ``` /// diff --git a/gazebo/gazebo/src/variants.rs b/gazebo/gazebo/src/variants.rs index b22f070536d90..96118d9f4db34 100644 --- a/gazebo/gazebo/src/variants.rs +++ b/gazebo/gazebo/src/variants.rs @@ -88,11 +88,12 @@ pub use gazebo_derive::UnpackVariants; /// assert_eq!(Foo::Baz(1).variant_name(), "Baz"); /// assert_eq!(Foo::Qux { i: 1 }.variant_name(), "Qux"); /// ``` -/// pub use gazebo_derive::VariantName; pub trait VariantName { fn variant_name(&self) -> &'static str; + + fn variant_name_lowercase(&self) -> &'static str; } impl VariantName for Option { @@ -102,6 +103,13 @@ impl VariantName for Option { None => "None", } } + + fn variant_name_lowercase(&self) -> &'static str { + match self { + Self::Some(_) => "some", + None => "none", + } + } } impl VariantName for Result { @@ -111,6 +119,13 @@ impl VariantName for Result { Self::Err(_) => "Err", } } + + fn variant_name_lowercase(&self) -> &'static str { + match self { + Self::Ok(_) => "ok", + Self::Err(_) => "err", + } + } } #[cfg(test)] @@ -126,16 +141,19 @@ mod tests { enum MyEnum { Foo, Bar(usize), - Baz { field: usize }, + FooBaz { field: usize }, } let x = MyEnum::Foo; assert_eq!(x.variant_name(), "Foo"); + assert_eq!(x.variant_name_lowercase(), "foo"); let x = MyEnum::Bar(1); assert_eq!(x.variant_name(), "Bar"); + assert_eq!(x.variant_name_lowercase(), "bar"); - let x = MyEnum::Baz { field: 1 }; - assert_eq!(x.variant_name(), "Baz"); + let x = MyEnum::FooBaz { field: 1 }; + assert_eq!(x.variant_name(), "FooBaz"); + assert_eq!(x.variant_name_lowercase(), "foo_baz"); } } diff --git a/gazebo/gazebo_derive/BUCK b/gazebo/gazebo_derive/BUCK index 5ce93a0068911..132d292e66ccb 100644 --- a/gazebo/gazebo_derive/BUCK +++ b/gazebo/gazebo_derive/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/gazebo/gazebo_derive/Cargo.toml b/gazebo/gazebo_derive/Cargo.toml index c8df8d55453ad..f7943505b255f 100644 --- a/gazebo/gazebo_derive/Cargo.toml +++ b/gazebo/gazebo_derive/Cargo.toml @@ -1,18 +1,18 @@ [package] -name = "gazebo_derive" -version = "0.8.0" -license = "MIT OR Apache-2.0" authors = ["Facebook"] -edition = "2021" +categories = ["rust-patterns"] description = "Derive macros for the Gazebo library" -repository = "https://github.com/facebookincubator/gazebo" documentation = "https://docs.rs/gazebo_derive" -categories = ["rust-patterns"] +edition = "2021" +license = { workspace = true } +name = "gazebo_derive" +repository = "https://github.com/facebookincubator/gazebo" +version = "0.8.0" [lib] proc-macro = true [dependencies] proc-macro2 = "1.0" -syn = { version = "2", features = ["extra-traits"] } quote = "1.0.3" +syn = { version = "2", features = ["extra-traits"] } diff --git a/gazebo/gazebo_derive/src/lib.rs b/gazebo/gazebo_derive/src/lib.rs index 6eee7f0939374..2a1ad8d6ffe1e 100644 --- a/gazebo/gazebo_derive/src/lib.rs +++ b/gazebo/gazebo_derive/src/lib.rs @@ -19,7 +19,7 @@ use syn::DeriveInput; mod default; mod variant; -/// Derive the [`Default` trait](Default), but without requiring all type arguments to implement [`Default`](Default). +/// Derive the [`Default` trait](Default), but without requiring all type arguments to implement [`Default`]. #[proc_macro_derive(Default_)] pub fn derive_default_(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); diff --git a/gazebo/gazebo_derive/src/variant.rs b/gazebo/gazebo_derive/src/variant.rs index 2bcc447685be8..288ba472ea26a 100644 --- a/gazebo/gazebo_derive/src/variant.rs +++ b/gazebo/gazebo_derive/src/variant.rs @@ -18,6 +18,7 @@ use syn::Ident; pub(crate) fn derive_variant_names(input: DeriveInput) -> syn::Result { if let Data::Enum(data_enum) = input.data { let mut variant_body = Vec::new(); + let mut variant_lowercase_body = Vec::new(); for variant in data_enum.variants { let variant_name = &variant.ident; let patterns = match variant.fields { @@ -26,9 +27,13 @@ pub(crate) fn derive_variant_names(input: DeriveInput) -> syn::Result quote! { (..) }, }; let variant_name_str = variant_name.to_string(); + let variant_name_lowercase_str = to_snake_case(&variant_name_str); variant_body.push(quote! { Self::#variant_name #patterns => #variant_name_str }); + variant_lowercase_body.push(quote! { + Self::#variant_name #patterns => #variant_name_lowercase_str + }); } let name = &input.ident; @@ -41,6 +46,12 @@ pub(crate) fn derive_variant_names(input: DeriveInput) -> syn::Result &'static str { + match self { + #(#variant_lowercase_body,)* + } + } } }; diff --git a/host_sharing/BUCK b/host_sharing/BUCK index bf10921bfb7f4..8a154cd5539cf 100644 --- a/host_sharing/BUCK +++ b/host_sharing/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/host_sharing/Cargo.toml b/host_sharing/Cargo.toml index df4e670b36794..a9f5dd04192a2 100644 --- a/host_sharing/Cargo.toml +++ b/host_sharing/Cargo.toml @@ -1,8 +1,10 @@ [package] -name = "host_sharing" -version = "0.1.0" authors = ["Logan Wendholt "] edition = "2021" +license = { workspace = true } +name = "host_sharing" +repository = { workspace = true } +version = "0.1.0" [dependencies] allocative = { workspace = true } diff --git a/host_sharing/src/host_sharing.rs b/host_sharing/src/host_sharing.rs index b52a268b2646a..e52368471967e 100644 --- a/host_sharing/src/host_sharing.rs +++ b/host_sharing/src/host_sharing.rs @@ -27,7 +27,7 @@ const SINGLE_RUN: usize = 1; /// on Sandcastle machines with 56 cores so we want to move away from the core-analogy and instead use /// the term "permits" to describe the limited resources available on each machine. /// More long term we want improve this to also take into account memory usage, cpu usage etc. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Allocative)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Allocative, Hash)] pub enum WeightClass { /// Tests can require any number of permits and this can be used to mimic resource utilization like /// memory or cpu. For now, we map the Testpilot behaviour as Normal->Permits(1) and Heavy->Permits(4). @@ -45,7 +45,7 @@ impl fmt::Display for WeightClass { } } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Allocative)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Allocative, Hash)] pub struct WeightPercentage { value: u8, // Between 0 and 100 } @@ -79,7 +79,7 @@ impl WeightPercentage { /// to check for other instances of the same binary. /// Some commands required the full host to run, others just dont care. /// This enum encapsulates all the different scenarios. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Allocative, Hash)] pub enum HostSharingRequirements { /// Needs exclusive access to the host. No other processes should run. ExclusiveAccess, @@ -89,6 +89,18 @@ pub enum HostSharingRequirements { Shared(WeightClass), } +impl fmt::Display for HostSharingRequirements { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self { + HostSharingRequirements::ExclusiveAccess => write!(f, "ExclusiveAccess"), + HostSharingRequirements::OnePerToken(name, class) => { + write!(f, "OnePerToken({},{})", name, class) + } + HostSharingRequirements::Shared(class) => write!(f, "Shared({})", class), + } + } +} + impl Default for HostSharingRequirements { fn default() -> HostSharingRequirements { HostSharingRequirements::Shared(WeightClass::Permits(1)) diff --git a/integrations/resources/rust/BUCK b/integrations/resources/rust/BUCK new file mode 100644 index 0000000000000..f8e5ccc96db56 --- /dev/null +++ b/integrations/resources/rust/BUCK @@ -0,0 +1,39 @@ +load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("rust_libraries") + +rust_library( + name = "buck_resources", + srcs = [ + "src/lib.rs", + "src/manifest.rs", + ], + # Prefer to use the published version of this library through crates.io, + # instead of this target. fbsource//third-party/rust:buck-resources + visibility = [], + deps = [ + "fbsource//third-party/rust:dunce", + "fbsource//third-party/rust:once_cell", + "fbsource//third-party/rust:serde", + "fbsource//third-party/rust:serde_json", + "fbsource//third-party/rust:thiserror", + ], +) + +rust_binary( + name = "hello", + srcs = ["tests/src/hello.rs"], + unittests = False, +) + +rust_binary( + name = "buck_resources_test", + srcs = ["tests/src/main.rs"], + resources = { + "hello_binary": ":hello", + }, + deps = [ + ":buck_resources", + ], +) diff --git a/integrations/resources/rust/Cargo.toml b/integrations/resources/rust/Cargo.toml new file mode 100644 index 0000000000000..186ed8f4e5990 --- /dev/null +++ b/integrations/resources/rust/Cargo.toml @@ -0,0 +1,15 @@ +[package] +autotests = false +description = "Load resource paths from a resources.json produced by Buck" +edition = "2021" +license = { workspace = true } +name = "buck-resources" +repository = { workspace = true } +version = "1.0.0" + +[dependencies] +dunce = { workspace = true } +once_cell = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +thiserror = { workspace = true } diff --git a/integrations/resources/rust/README.md b/integrations/resources/rust/README.md new file mode 100644 index 0000000000000..6c8ecfcc51574 --- /dev/null +++ b/integrations/resources/rust/README.md @@ -0,0 +1,3 @@ +This is a simple library for looking up paths generated by +`rust_binary.resources` in Buck 2 under `buck2 run`. See tests in this folder +for a minimal example. diff --git a/integrations/resources/rust/src/lib.rs b/integrations/resources/rust/src/lib.rs new file mode 100644 index 0000000000000..4af8335998887 --- /dev/null +++ b/integrations/resources/rust/src/lib.rs @@ -0,0 +1,136 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod manifest; + +use std::collections::HashMap; +use std::env; +use std::fs; +use std::io; +use std::path::PathBuf; + +use once_cell::sync::OnceCell; +use serde::de::DeserializeSeed as _; +use thiserror::Error; + +use crate::manifest::ResourcesMap; + +#[derive(Debug, Error)] +pub enum BuckResourcesError { + #[error("Failed to look up our own executable path")] + NoCurrentExe { source: io::Error }, + + #[error( + "Failed to read manifest file: `{manifest_path}`. \ + Are you maybe running `buck1`? `rust_binary` only supports `resources` under `buck2`!" + )] + ReadFailed { + manifest_path: PathBuf, + source: io::Error, + }, + + #[error("Failed to parse manifest file: `{manifest_path}`")] + ParsingFailed { + manifest_path: PathBuf, + source: serde_json::Error, + }, + + #[error("No resource named `{name}` found in manifest file: `{manifest_path}`")] + NoSuchResource { + name: String, + manifest_path: PathBuf, + }, + + #[error( + "Resource `{name}` points to invalid path `{resource_path}` in manifest `{manifest_path}`" + )] + BadResourcePath { + name: String, + resource_path: PathBuf, + manifest_path: PathBuf, + source: io::Error, + }, +} + +/// Look up a resource based on a manifest file. Built to work seamlessly +/// with `resources` defined in a `rust_binary` target, but in principle +/// it would work with any correct manifest file. +/// +/// Resources follow the naming format: +/// +/// ```text +/// {PATH_TO_TARGETS_FOLDER}/{TARGET_NAME} +/// ``` +/// +/// So for `//path/to:target`, the resource is named `path/to/target`. +/// +/// Still unsure about a resource path? Inspect the JSON manifest file +/// found in the `BuckResourcesError`. +/// +/// * Manifest location: `$CUR_EXE.resources.json`, where `$CUR_EXE` is +/// the absolute path of the currently executing binary. +/// * Relative paths in the manifest are resolved relative to the location +/// of the currently executing binary. +pub fn get(name: S) -> Result +where + S: AsRef, +{ + static MANIFEST: OnceCell<(PathBuf, HashMap)> = OnceCell::new(); + + let (manifest_path, manifest) = MANIFEST.get_or_try_init(|| { + let manifest_path = match env::current_exe() { + Ok(mut value) => { + value.as_mut_os_string().push(".resources.json"); + value + } + Err(source) => { + return Err(BuckResourcesError::NoCurrentExe { source }); + } + }; + + let data = match fs::read(&manifest_path) { + Ok(x) => x, + Err(source) => { + return Err(BuckResourcesError::ReadFailed { + manifest_path, + source, + }); + } + }; + + let base_dir = manifest_path.parent().unwrap_or(&manifest_path); + + let deserializer = &mut serde_json::Deserializer::from_slice(&data); + let manifest = match ResourcesMap::new(base_dir).deserialize(deserializer) { + Ok(x) => x, + Err(source) => { + return Err(BuckResourcesError::ParsingFailed { + manifest_path, + source, + }); + } + }; + + Ok((manifest_path, manifest)) + })?; + + if let Some(resource_path) = manifest.get(name.as_ref()) { + dunce::canonicalize(resource_path).map_err(|source| BuckResourcesError::BadResourcePath { + name: name.as_ref().to_owned(), + resource_path: resource_path.clone(), + manifest_path: manifest_path.clone(), + source, + }) + } else { + Err(BuckResourcesError::NoSuchResource { + name: name.as_ref().to_owned(), + manifest_path: manifest_path.clone(), + }) + } +} diff --git a/integrations/resources/rust/src/manifest.rs b/integrations/resources/rust/src/manifest.rs new file mode 100644 index 0000000000000..7f8d88a750a3d --- /dev/null +++ b/integrations/resources/rust/src/manifest.rs @@ -0,0 +1,112 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::collections::HashMap; +use std::fmt; +use std::path::Path; +use std::path::PathBuf; + +use serde::de; + +struct ResourcePath<'a> { + base_dir: &'a Path, +} + +impl<'de> de::Visitor<'de> for ResourcePath<'_> { + type Value = PathBuf; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("string path") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + Ok(self.base_dir.join(v)) + } +} + +impl<'de> de::DeserializeSeed<'de> for ResourcePath<'_> { + type Value = PathBuf; + + fn deserialize(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + deserializer.deserialize_str(self) + } +} + +pub struct ResourcesMap<'a> { + base_dir: &'a Path, +} + +impl<'a> ResourcesMap<'a> { + pub fn new(base_dir: &'a Path) -> Self { + ResourcesMap { base_dir } + } +} + +impl<'de> de::Visitor<'de> for ResourcesMap<'_> { + type Value = HashMap; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("map of string to path") + } + + fn visit_map(self, mut map: M) -> Result + where + M: de::MapAccess<'de>, + { + let mut manifest = HashMap::new(); + while let Some(key) = map.next_key::()? { + let seed = ResourcePath { + base_dir: self.base_dir, + }; + let value = map.next_value_seed(seed)?; + manifest.insert(key, value); + } + Ok(manifest) + } +} + +impl<'de> de::DeserializeSeed<'de> for ResourcesMap<'_> { + type Value = HashMap; + + fn deserialize(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + deserializer.deserialize_map(self) + } +} + +#[cfg(test)] +mod tests { + use serde::de::DeserializeSeed as _; + + use super::*; + + #[test] + fn test_resources_map_deserialize() { + let deserializer = + &mut serde_json::Deserializer::from_str(r#" {"a": "x.exe", "b": "y\\z.exe"} "#); + let manifest = ResourcesMap::new(Path::new("/tmp")) + .deserialize(deserializer) + .unwrap(); + assert_eq!( + manifest, + HashMap::from([ + ("a".into(), "/tmp/x.exe".into()), + ("b".into(), "/tmp/y\\z.exe".into()), + ]), + ); + } +} diff --git a/integrations/resources/rust/tests/src/hello.rs b/integrations/resources/rust/tests/src/hello.rs new file mode 100644 index 0000000000000..84dead74ca310 --- /dev/null +++ b/integrations/resources/rust/tests/src/hello.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +fn main() { + println!("Hello World!"); +} diff --git a/integrations/resources/rust/tests/src/main.rs b/integrations/resources/rust/tests/src/main.rs new file mode 100644 index 0000000000000..0f1785315af84 --- /dev/null +++ b/integrations/resources/rust/tests/src/main.rs @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::process::Command; + +fn main() { + println!("Hello from resources.rs. Will now execute the `hello_binary` resource."); + let hello_path = buck_resources::get("buck2/integrations/resources/rust/hello_binary").unwrap(); + if let Err(err) = Command::new(&hello_path).status() { + panic!("Failed to execute {}: {}", hello_path.display(), err); + } +} + +#[test] +fn resource_exists_in_unittest() { + buck_resources::get("buck2/integrations/resources/rust/hello_binary").unwrap(); +} diff --git a/integrations/rust-project/BUCK b/integrations/rust-project/BUCK index 9f0040e424aa8..101aea5d729ca 100644 --- a/integrations/rust-project/BUCK +++ b/integrations/rust-project/BUCK @@ -1,7 +1,6 @@ load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") -oncall("cpplangex") +oncall("rust_devx") rust_binary( name = "rust-project", @@ -9,18 +8,21 @@ rust_binary( "src/**/*.rs", "templates/*", ]), - contacts = ["oncall+cpplangex@xmail.facebook.com"], + contacts = ["oncall+rust_devx@xmail.facebook.com"], crate_root = "src/main.rs", link_style = "static", deps = [ "fbsource//third-party/rust:anyhow", - "fbsource//third-party/rust:clap-3", - "fbsource//third-party/rust:crossbeam", - "fbsource//third-party/rust:lsp-server", - "fbsource//third-party/rust:lsp-types", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:dunce", + "fbsource//third-party/rust:elf", + "fbsource//third-party/rust:rustc-hash", "fbsource//third-party/rust:serde", "fbsource//third-party/rust:serde_json", "fbsource//third-party/rust:tracing", "fbsource//third-party/rust:tracing-subscriber", + "fbsource//third-party/rust:whoami", + # @oss-disable: "//common/rust/shed/fbinit:fbinit", + # @oss-disable: "//common/rust/shed/scuba_sample:scuba_sample", ], ) diff --git a/integrations/rust-project/Cargo.toml b/integrations/rust-project/Cargo.toml index b60439cdb2dcf..c720165cdef9d 100644 --- a/integrations/rust-project/Cargo.toml +++ b/integrations/rust-project/Cargo.toml @@ -1,17 +1,27 @@ [package] -name = "rust-project" -version = "0.0.0" -edition = "2021" description = "A Rust-centric companion tool to Buck for usage in IDEs." +edition = "2021" +license = { workspace = true } +name = "rust-project" readme = "README.md" +repository = { workspace = true } +version = "0.0.0" [dependencies] anyhow = { workspace = true } clap = { workspace = true } crossbeam = { workspace = true } +dunce = { workspace = true } +elf = "0.7.0" lsp-server = { workspace = true } lsp-types = { workspace = true } +rustc-hash = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tracing = { workspace = true } +tracing-core = "0.1.32" tracing-subscriber = { workspace = true } +whoami = "1.4.0" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fbcode_build)"] } diff --git a/integrations/rust-project/src/buck.rs b/integrations/rust-project/src/buck.rs index ee8dfb5576031..9edcc6120cb28 100644 --- a/integrations/rust-project/src/buck.rs +++ b/integrations/rust-project/src/buck.rs @@ -7,8 +7,8 @@ * of this source tree. */ -use std::collections::BTreeMap; -use std::collections::HashMap; +use std::ffi::OsStr; +use std::fs; use std::io; use std::path::Path; use std::path::PathBuf; @@ -18,6 +18,8 @@ use std::process::Output; use std::process::Stdio; use anyhow::Context; +use rustc_hash::FxHashMap; +use rustc_hash::FxHashSet; use serde::Deserialize; use tracing::enabled; use tracing::info; @@ -26,8 +28,13 @@ use tracing::trace; use tracing::warn; use tracing::Level; +use crate::cli::Input; +use crate::json_project::Build; use crate::json_project::Edition; use crate::json_project::JsonProject; +use crate::json_project::Runnable; +use crate::json_project::RunnableKind; +use crate::json_project::Source; use crate::json_project::Sysroot; use crate::target::AliasedTargetInfo; use crate::target::ExpandedAndResolved; @@ -35,18 +42,19 @@ use crate::target::Kind; use crate::target::MacroOutput; use crate::target::Target; use crate::target::TargetInfo; -use crate::target::TargetInfoEntry; use crate::Crate; use crate::Dep; -pub fn to_json_project( +pub(crate) fn to_json_project( sysroot: Sysroot, expanded_and_resolved: ExpandedAndResolved, - aliases: BTreeMap, + aliases: FxHashMap, relative_paths: bool, + check_cycles: bool, ) -> Result { let mode = select_mode(None); let buck = Buck::new(mode); + let project_root = buck.resolve_project_root()?; let ExpandedAndResolved { expanded_targets: _, @@ -55,36 +63,48 @@ pub fn to_json_project( } = expanded_and_resolved; let target_index = merge_unit_test_targets(target_map); - let project_root = buck.resolve_project_root()?; - let mut crates: Vec = Vec::with_capacity(target_index.len()); - for (target, TargetInfoEntry { info, index: _ }) in &target_index { - let mut deps = resolve_dependencies_aliases(info, &target_index, &aliases, &proc_macros); - resolve_renamed_dependencies(info, &target_index, &mut deps); + // A rust-project.json uses file indexes to associate dependencies with the + // relevant crate. + // + // "crates": [ + // { + // "display_name": "my-project", + // "deps": [ + // { + // "crate": 1, + // "name": "my-lib" + // } + // ] + // }, + // { + // "display_name": "my-lib", + // "deps": [] + // } + // ] + // + // This means that we must iterate over targets in a consistent order, so + // the indexes are correct. Build an ordered Vec and a corresponding HashMap + // from target name to index. + let targets_vec = target_index.keys().cloned().collect::>(); + + let mut targets_to_ids: FxHashMap<&Target, usize> = FxHashMap::default(); + for (index, target) in targets_vec.iter().enumerate() { + targets_to_ids.insert(target, index); + } + + let mut crates: Vec = Vec::with_capacity(targets_vec.len()); + for target in &targets_vec { + let info = target_index.get(&target).unwrap(); + + let dep_targets = resolve_aliases(&info.deps, &aliases, &proc_macros); + let deps = as_deps(&dep_targets, info, &targets_to_ids, &target_index); let edition = match &info.edition { Some(edition) => edition.clone(), None => Edition::Edition2021, }; - // we need to take the existing features and prefix `feature=` - // before passing it to rust-analyzer via `rust-project.json`. - let mut cfg = info - .clone() - .features - .into_iter() - .map(|f| format!("feature=\"{f}\"")) - .collect::>(); - - // Include "test" cfg so rust-analyzer picks up #[cfg(test)] code. - cfg.push("test".to_owned()); - - #[cfg(fbcode_build)] - { - // FIXME(JakobDegen): This should be set via a configuration mechanism of some kind. - cfg.push("fbcode_build".to_owned()); - } - // the mapping here is inverted, which means we need to search through the keys for the Target. // thankfully, most projects don't have to many proc macros, which means the size of this list // remains in the two digit space. @@ -96,16 +116,36 @@ pub fn to_json_project( trace!(?target, ?dylib, "target is a proc macro"); } + // corresponds to the BUCK/TARGETS file of a target. + let mut build_file = info.project_relative_buildfile.clone(); + // We don't need to push the source folder as rust-analyzer by default will use the root-module parent(). // info.root_module() will output either the fbcode source file or the symlinked one based on if it's a mapped source or not let mut root_module = info.root_module(); if relative_paths { proc_macro_dylib_path = proc_macro_dylib_path.map(|p| relative_to(&p, &project_root)); - root_module = relative_to(&root_module, &project_root); + root_module = root_module.map(|x| relative_to(&x, &project_root)); + } else { + let path = project_root.join(build_file); + build_file = path; } - let mut env: BTreeMap = BTreeMap::new(); + let root_module = match root_module { + Err(e) => { + warn!(?target, "root module does not exist: {}", e); + continue; + } + Ok(x) => x, + }; + + let mut env = FxHashMap::default(); + + // Populate the environment variables the target configuration's environment variables, + // but ignore OUT_DIR as we handle that later. + env.extend(info.env.clone().into_iter()); + + // If $CARGO_MANIFEST_DIR is set, resolve it to an absolute path. if let Some(rel_cargo_manifest_dir) = info.env.get("CARGO_MANIFEST_DIR") { let cargo_manifest_dir = info.source_folder.join(rel_cargo_manifest_dir); env.insert( @@ -114,25 +154,82 @@ pub fn to_json_project( ); } + let mut include_dirs = FxHashSet::default(); + if let Some(out_dir) = info.env.get("OUT_DIR") { + // to ensure that the `OUT_DIR` is included as part of the `PackageRoot` in rust-analyzer, + // manually insert the parent of the `out_dir` into `include_dirs`. + if let Some(parent) = Path::new(out_dir).parent() { + include_dirs.insert(parent.to_owned()); + } + } + + if let Some(parent) = root_module.parent() { + include_dirs.insert(parent.to_owned()); + } + + let build = if info.in_workspace { + let build = Build { + label: target.clone(), + build_file: build_file.to_owned(), + target_kind: info.kind.clone().into(), + }; + Some(build) + } else { + None + }; + let crate_info = Crate { - display_name: Some(info.name.clone()), + display_name: Some(info.display_name()), root_module, edition, deps, is_workspace_member: info.in_workspace, - source: None, - cfg, - target: None, + source: Some(Source { + include_dirs, + exclude_dirs: FxHashSet::default(), + }), + cfg: info.cfg(), env, + build, is_proc_macro: info.proc_macro.unwrap_or(false), proc_macro_dylib_path, + target: None, }; crates.push(crate_info); } + if check_cycles { + check_cycles_in_crate_graph(&crates); + } + let jp = JsonProject { sysroot, crates, + runnables: vec![ + Runnable { + program: "buck".to_owned(), + args: vec![ + "build".to_owned(), + "-c=client.id=rust-project".to_owned(), + "{label}".to_owned(), + ], + cwd: project_root.to_owned(), + kind: RunnableKind::Check, + }, + Runnable { + program: "buck".to_owned(), + args: vec![ + "test".to_owned(), + "-c=client.id=rust-project".to_owned(), + "{label}".to_owned(), + "--".to_owned(), + "{test_id}".to_owned(), + "--print-passing-details".to_owned(), + ], + cwd: project_root.to_owned(), + kind: RunnableKind::TestOne, + }, + ], // needed to ignore the generated `rust-project.json` in diffs, but including the actual // string will mark this file as generated generated: String::from("\x40generated"), @@ -141,8 +238,82 @@ pub fn to_json_project( Ok(jp) } +/// Check that there are no cycles in the crate dependency graph: a +/// crate should never transitively depend on itself. +/// +/// If a cycle is found, print the offending crate and terminate. +fn check_cycles_in_crate_graph(crates: &[Crate]) { + // From a start crate ID, each ID we can reach, along with an example route. + let mut reachable: FxHashMap>> = FxHashMap::default(); + + // Initialize the reachable crates from immediate dependencies. + for (idx, krate) in crates.iter().enumerate() { + let mut routes: FxHashMap> = FxHashMap::default(); + for dep in &krate.deps { + routes.insert(dep.crate_index, vec![idx, dep.crate_index]); + } + + reachable.insert(idx, routes); + } + + let mut changed = true; + while changed { + changed = false; + + let mut new_reachable = reachable.clone(); + + // Iterate all the dependencies, and add any transitive + // dependencies that weren't already in `reachable`. + for (crate_idx, deps_idxs) in reachable.iter() { + for (dep_idx, route) in deps_idxs.iter() { + for transitive_dep_idx in reachable[dep_idx].keys() { + if transitive_dep_idx == crate_idx { + let mut cycle_route = route.clone(); + cycle_route.push(*transitive_dep_idx); + + tracing::error!( + crate = crates[*crate_idx].display_name, + route = format_route(&cycle_route, crates), + "Found a cycle", + ); + std::process::exit(2); + } + + if !deps_idxs.contains_key(transitive_dep_idx) { + let mut new_route = route.clone(); + new_route.push(*transitive_dep_idx); + + new_reachable + .get_mut(crate_idx) + .expect("We should always have initialized the dependencies for each crate.") + .insert(*transitive_dep_idx, new_route); + changed = true; + } + } + } + } + + reachable = new_reachable; + } +} + +fn format_route(route: &[usize], crates: &[Crate]) -> String { + let mut formatted_crates = vec![]; + for idx in route { + formatted_crates.push(format!( + "{} ({idx})", + crates[*idx] + .display_name + .clone() + .unwrap_or("".to_owned()), + )); + } + + formatted_crates.join(" -> ") +} + /// If `path` starts with `base`, drop the prefix. -pub fn relative_to(path: &Path, base: &Path) -> PathBuf { +pub(crate) fn relative_to(path: &Path, base: &Path) -> PathBuf { match path.strip_prefix(base) { Ok(rel_path) => rel_path, Err(_) => path, @@ -150,98 +321,102 @@ pub fn relative_to(path: &Path, base: &Path) -> PathBuf { .to_owned() } -fn resolve_dependencies_aliases( - info: &TargetInfo, - target_index: &BTreeMap, - aliases: &BTreeMap, - proc_macros: &BTreeMap, -) -> Vec { - let mut deps = vec![]; - for dependency_target in &info.deps { - let dependency_target = match aliases.get(dependency_target) { +/// If any target in `targets` is an alias, resolve it to the actual target. +fn resolve_aliases( + targets: &[Target], + aliases: &FxHashMap, + proc_macros: &FxHashMap, +) -> Vec { + let mut seen = FxHashSet::default(); + let mut resolved_targets = vec![]; + + for target in targets { + let destination_target = match aliases.get(target) { Some(actual) => &actual.actual, None => { // we fall back to check the proc macros for aliases // (these should exist in the aliases map, but they don't. yolo.) - match proc_macros.get(dependency_target) { + match proc_macros.get(target) { Some(MacroOutput { actual, .. }) => actual, - None => dependency_target, + None => target, } } }; - if let Some(entry) = target_index.get(dependency_target) { - trace!(?dependency_target, "present in target_index"); - let dep = Dep { - crate_index: entry.index, - name: entry.info.crate_name(), - }; - deps.push(dep); - } else { - trace!(?dependency_target, "not present in target_index"); + if !seen.contains(destination_target) { + resolved_targets.push(destination_target.to_owned()); + seen.insert(destination_target); } } - deps + resolved_targets } -fn resolve_renamed_dependencies( +/// Convert `dep_targets` to `Dep` values. +fn as_deps( + dep_targets: &[Target], info: &TargetInfo, - target_index: &BTreeMap, - deps: &mut Vec, -) { - // we handled named_deps when constructing the dependency, as rust-analyzer cares about the - // the crate name for correct resolution. `named_deps` are distinct from `deps` in buck2 and - // are not currently unified into a `buck.direct_dependencies` or `$deps` in buck2. - // TODO: once https://fb.workplace.com/groups/buck2users/posts/3137264549863238/?comment_id=3137265756529784 - // is resolved, switch to `$deps`. - for (renamed_crate, dependency_target) in &info.named_deps { - if let Some(entry) = target_index.get(dependency_target) { - trace!(old_name = ?entry.info.crate_name(), new_name = ?renamed_crate, "renamed crate"); - // if the renamed dependency was encountered before, rename the existing `Dep` rather - // than create a new one with a new name but the same index. While this duplication doesn't - // seem to have any noticeable impact in limited testing, the behavior will be closer to - // that of Rusty and Cargo. - // - // However, if the renamed dependency wasn't encountered before, we create a new `Dep` with - // the new name. - // - // The primary invariant that is being upheld is that each index should - // have one associated name. - match deps.iter_mut().find(|dep| dep.crate_index == entry.index) { - Some(dep) => dep.name = renamed_crate.to_string(), - None => { - let dep = Dep { - crate_index: entry.index, - name: renamed_crate.to_string(), - }; - deps.push(dep); - } + target_to_ids: &FxHashMap<&Target, usize>, + target_index: &FxHashMap, +) -> Vec { + let overridden_names = info.overridden_dep_names(); + + let mut seen_targets = FxHashSet::default(); + + let mut deps = vec![]; + for dep_target in dep_targets { + seen_targets.insert(dep_target); + + let Some(info) = target_index.get(dep_target) else { + trace!(?dep_target, "not present in target_index"); + continue; + }; + + let crate_index = *target_to_ids.get(dep_target).unwrap(); + let name = match overridden_names.get(dep_target) { + Some(n) => n.replace('-', "_"), + None => info.crate_name(), + }; + + deps.push(Dep { crate_index, name }); + } + + for (target, name) in overridden_names.into_iter() { + if !seen_targets.contains(&target) { + let Some(crate_index) = target_to_ids.get(&target) else { + continue; }; + deps.push(Dep { + crate_index: *crate_index, + name: name.replace('-', "_"), + }); } } + + deps } /// For every test target, drop it from `target_map` and include test /// target's dependencies in the target that references the test /// target. fn merge_unit_test_targets( - target_map: BTreeMap, -) -> BTreeMap { - let mut target_index = BTreeMap::new(); + target_map: FxHashMap, +) -> FxHashMap { + let mut target_index = FxHashMap::default(); - let (tests, mut targets): (BTreeMap, BTreeMap) = + let (tests, mut targets): (FxHashMap, FxHashMap) = target_map .into_iter() .partition(|(_, info)| info.kind == Kind::Test); let (generated_unit_tests, standalone_tests): ( - BTreeMap, - BTreeMap, - ) = tests.into_iter().partition(|(target, _)| { - targets - .iter() - .any(|(_, value)| value.tests.contains(target)) + FxHashMap, + FxHashMap, + ) = tests.into_iter().partition(|(test_target, _)| { + test_target.ends_with("-unittest") + && targets + .iter() + .any(|(_, value)| value.test_deps.contains(test_target)) }); targets.extend(standalone_tests); @@ -251,40 +426,80 @@ fn merge_unit_test_targets( // Merge the `-unittest` target with the parent target. let unittest_target = Target::new(format!("{target}-unittest")); - if info.tests.contains(&unittest_target) { + if info.test_deps.contains(&unittest_target) { if let Some(test_info) = generated_unit_tests.get(&unittest_target) { for test_dep in &test_info.deps { if !info.deps.contains(test_dep) && *test_dep != target { info.deps.push(test_dep.clone()) } } + + info.in_workspace |= test_info.in_workspace; } } - target_index.insert(target.to_owned(), TargetInfoEntry { index, info }); + target_index.insert(target.to_owned(), info); } target_index } #[derive(Debug, Default)] -pub struct Buck { +pub(crate) struct Buck { mode: Option, } impl Buck { - pub fn new(mode: Option) -> Self { + pub(crate) fn new(mode: Option) -> Self { Buck { mode } } - pub fn command(&self) -> Command { - Command::new("buck2") + /// Invoke `buck2` with the given subcommands. + /// + /// Care should be taken to ensure that buck is invoked with the same set + /// options and configuration to avoid invalidating caches. + fn command(&self, subcommands: I) -> Command + where + I: IntoIterator, + S: AsRef, + { + let mut cmd = self.command_without_config(subcommands); + cmd.args([ + "-c=client.id=rust-project", + "-c=xplat.available_platforms=CXX,FBCODE", + "-c=rust.rust_project_build=true", + ]); + cmd } - /// Return the absolute path of the current Buck project root. - pub fn resolve_project_root(&self) -> Result { - let mut command = self.command(); + /// Invoke `buck` with the given subcommands. + /// + /// This method should only be used with with buck commands that do not accept + /// configuration options, such as `root`. [`Buck::command`] should be preferred. + fn command_without_config(&self, subcommands: I) -> Command + where + I: IntoIterator, + S: AsRef, + { + let mut cmd = Command::new("buck2"); + + // rust-analyzer invokes the check-on-save command with `RUST_BACKTRACE=short` + // set. Unfortunately, buck2 doesn't handle that well and becomes extremely + // slow when the daemon is started with backtrace variables set. Until that is + // fixed, just unset them here. + cmd.env_remove("RUST_BACKTRACE") + .env_remove("RUST_LIB_BACKTRACE"); + + cmd.args(["--isolation-dir", ".rust-analyzer"]); + cmd.args(subcommands); + cmd.args(["--oncall", "rust_devx"]); + + cmd + } - command.args(["root", "--kind=project"]); + /// Return the absolute path of the current Buck project root. + pub(crate) fn resolve_project_root(&self) -> Result { + let mut command = self.command_without_config(["root"]); + command.arg("--kind=project"); let mut stdout = utf8_output(command.output(), &command)?; truncate_line_ending(&mut stdout); @@ -296,9 +511,9 @@ impl Buck { Ok(stdout.into()) } - pub fn resolve_sysroot_src(&self) -> Result { - let mut command = self.command(); - command.args(["audit", "config", "--json", "--", "rust.sysroot_src_path"]); + pub(crate) fn resolve_sysroot_src(&self) -> Result { + let mut command = self.command(["audit", "config"]); + command.args(["--json", "--", "rust.sysroot_src_path"]); command .stderr(Stdio::null()) .stdout(Stdio::piped()) @@ -322,76 +537,77 @@ impl Buck { /// Determines the owning target(s) of the saved file and builds them. #[instrument] - pub fn check_saved_file( + pub(crate) fn check_saved_file( &self, use_clippy: bool, - saved_filed: &Path, - ) -> Result, anyhow::Error> { - let mut command = self.command(); - - command.args(["--isolation-dir", ".rust-analyzer"]); - - command.arg("bxl"); - command.args(["--oncall", "rust_devx", "-c", "client.id=rust-project"]); + saved_file: &Path, + ) -> Result { + let mut command = self.command(["bxl"]); if let Some(mode) = &self.mode { command.arg(mode); } - command.args([ - "prelude//rust/rust-analyzer/check.bxl:check", - "-c=rust.failure_filter=true", - "-c=rust.incremental=true", - ]); + command.arg("prelude//rust/rust-analyzer/check.bxl:check"); + + let mut file_path = saved_file.to_owned(); + if !file_path.is_absolute() { + if let Ok(cwd) = std::env::current_dir() { + file_path = cwd.join(saved_file); + } + } // apply BXL scripts-specific arguments: command.args(["--", "--file"]); - command.arg(saved_filed.as_os_str()); + command.arg(file_path.as_os_str()); command.args(["--use-clippy", &use_clippy.to_string()]); - let output = command.output(); - if let Ok(output) = &output { - if output.stdout.is_empty() { - return Ok(vec![]); - } + // Set working directory to the containing directory of the target file. + // This fixes cases where the working directory happens to be an + // unrelated buck project (e.g. www). + if let Some(parent_dir) = saved_file.parent() { + command.current_dir(parent_dir); } + let output = command.output(); + let files = deserialize_output(output, &command)?; Ok(files) } #[instrument(skip_all)] - pub fn expand_and_resolve(&self, targets: &[Target]) -> anyhow::Result { + pub(crate) fn expand_and_resolve( + &self, + targets: &[Target], + exclude_workspaces: bool, + ) -> anyhow::Result { if targets.is_empty() { return Ok(ExpandedAndResolved::default()); } - let mut command = self.command(); - command.args(["--isolation-dir", ".rust-analyzer"]); - command.arg("bxl"); - command.args(["--oncall", "rust_devx", "-c", "client.id=rust-project"]); + let mut command = self.command(["bxl"]); if let Some(mode) = &self.mode { command.arg(mode); } command.args([ - "prelude//rust/rust-analyzer/resolve_deps.bxl:expand_and_resolve", - "-c=rust.failure_filter=true", - "-c=rust.incremental=true", + "prelude//rust/rust-analyzer/resolve_deps.bxl:resolve_targets", "--", + "--exclude_workspaces", + exclude_workspaces.to_string().as_str(), "--targets", ]); command.args(targets); - deserialize_output(command.output(), &command) + deserialize_file_output(command.output(), &command) } #[instrument(skip_all)] - pub fn query_aliased_libraries( + pub(crate) fn query_aliased_libraries( &self, targets: &[Target], - ) -> Result, anyhow::Error> { + ) -> Result, anyhow::Error> { // FIXME: Do this in bxl as well instead of manually writing a separate query - let mut command = self.command(); + let mut command = self.command(["cquery"]); // Fetch all aliases used by transitive deps. This is so we // can translate an apparent dependency of e.g. @@ -399,7 +615,6 @@ impl Buck { // name of fbsource//third-party/rust:once_cell-1.15. This // query also fetches non-Rust aliases, but they shouldn't // hurt anything. - command.arg("cquery"); if let Some(mode) = &self.mode { command.arg(mode); } @@ -407,7 +622,7 @@ impl Buck { command.args(targets); info!("resolving aliased libraries"); - let raw: BTreeMap = + let raw: FxHashMap = deserialize_output(command.output(), &command)?; if enabled!(Level::TRACE) { @@ -418,32 +633,59 @@ impl Buck { Ok(raw) } + /// Find the buildfile that owns each file specified, and return the path to + /// each buildfile along with all the targets it contains. #[instrument(skip_all)] - pub fn query_owner( + pub(crate) fn query_owners( &self, - files: Vec, - ) -> Result>, anyhow::Error> { - let mut command = self.command(); + input: Input, + max_extra_targets: usize, + ) -> Result>, anyhow::Error> { + let mut command = self.command(["bxl"]); command.args([ - "uquery", - // Limit fb_xplat to just generate CXX targets (unsuffixed) - // so that we don't end up with a bunch of duplicate targets - // pointing to the same crate - "-c=xplat.available_platforms=CXX", - "--json", - "owner(\"%s\")", + "prelude//rust/rust-analyzer/resolve_deps.bxl:resolve_owning_buildfile", "--", ]); - command.args(&files); - info!(?files, "querying buck to determine owner"); + info!( + ?input, + "querying buck to determine owning buildfile and its targets" + ); + + match input { + Input::Targets(targets) => { + command.arg("--targets"); + command.args(targets); + } + Input::Files(files) => { + command.arg("--files"); + command.args(files); + } + Input::Buildfile(files) => { + command.arg("--buildfiles"); + command.args(files); + } + }; + + command.arg("--max_extra_targets"); + command.arg(max_extra_targets.to_string()); + let out = deserialize_output(command.output(), &command)?; Ok(out) } } -pub fn utf8_output(output: io::Result, command: &Command) -> Result { +#[derive(Debug, Deserialize)] +pub(crate) struct CheckOutput { + pub(crate) diagnostic_paths: Vec, + pub(crate) project_root: PathBuf, +} + +pub(crate) fn utf8_output( + output: io::Result, + command: &Command, +) -> Result { match output { Ok(Output { stdout, @@ -467,7 +709,28 @@ pub fn utf8_output(output: io::Result, command: &Command) -> Result( +fn deserialize_output(output: io::Result, command: &Command) -> Result +where + T: for<'a> Deserialize<'a>, +{ + match output { + Ok(Output { + stdout, + stderr, + status, + }) => { + tracing::debug!(?command, "parsing command output"); + serde_json::from_slice(&stdout) + .with_context(|| cmd_err(command, status, &stderr)) + .context("failed to deserialize command output") + } + Err(err) => Err(err) + .with_context(|| format!("command `{:?}`", command)) + .context("failed to execute command"), + } +} + +fn deserialize_file_output( output: io::Result, command: &Command, ) -> Result @@ -480,8 +743,8 @@ where stderr, status, }) => { - tracing::debug!(?command, "parsing command output"); - serde_json::from_slice(&stdout) + tracing::debug!(?command, "parsing file output"); + serde_json_from_stdout_path(&stdout) .with_context(|| cmd_err(command, status, &stderr)) .context("failed to deserialize command output") } @@ -491,6 +754,17 @@ where } } +fn serde_json_from_stdout_path(stdout: &[u8]) -> Result +where + T: for<'a> Deserialize<'a>, +{ + let file_path = std::str::from_utf8(stdout)?; + let file_path = Path::new(file_path.lines().next().context("no file path in output")?); + let contents = + fs::read_to_string(file_path).with_context(|| format!("failed to read {:?}", file_path))?; + serde_json::from_str(&contents).context("failed to deserialize file") +} + fn cmd_err(command: &Command, status: ExitStatus, stderr: &[u8]) -> anyhow::Error { anyhow::anyhow!( "command `{:?}` (exit code: {})\nstderr:\n{}", @@ -502,18 +776,18 @@ fn cmd_err(command: &Command, status: ExitStatus, stderr: &[u8]) -> anyhow::Erro /// Trim a trailing new line from `String`. /// Useful when trimming command output. -pub fn truncate_line_ending(s: &mut String) { +pub(crate) fn truncate_line_ending(s: &mut String) { if let Some(x) = s.strip_suffix("\r\n").or_else(|| s.strip_suffix('\n')) { s.truncate(x.len()); } } -pub fn select_mode(mode: Option) -> Option { +pub(crate) fn select_mode(mode: Option<&str>) -> Option { if let Some(mode) = mode { - Some(mode) - } else if cfg!(target_os = "macos") { + Some(mode.to_owned()) + } else if cfg!(all(fbcode_build, target_os = "macos")) { Some("@fbcode//mode/mac".to_owned()) - } else if cfg!(target_os = "windows") { + } else if cfg!(all(fbcode_build, target_os = "windows")) { Some("@fbcode//mode/win".to_owned()) } else { // fallback to the platform default mode. This is likely slower than optimal, but @@ -526,7 +800,7 @@ pub fn select_mode(mode: Option) -> Option { /// with a target that depends on itself. #[test] fn merge_tests_no_cycles() { - let mut targets = BTreeMap::new(); + let mut targets = FxHashMap::default(); targets.insert( Target::new("//foo"), @@ -536,17 +810,20 @@ fn merge_tests_no_cycles() { kind: Kind::Library, edition: None, srcs: vec![], - mapped_srcs: BTreeMap::new(), + mapped_srcs: FxHashMap::default(), crate_name: None, - crate_root: None, + crate_dynamic: None, + crate_root: PathBuf::default(), deps: vec![], - tests: vec![Target::new("//foo-unittest")], - named_deps: BTreeMap::new(), + test_deps: vec![Target::new("//foo-unittest")], + named_deps: FxHashMap::default(), proc_macro: None, features: vec![], - env: BTreeMap::new(), + env: FxHashMap::default(), source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo/BUCK"), in_workspace: false, + rustc_flags: vec![], }, ); @@ -558,28 +835,31 @@ fn merge_tests_no_cycles() { kind: Kind::Test, edition: None, srcs: vec![], - mapped_srcs: BTreeMap::new(), + mapped_srcs: FxHashMap::default(), crate_name: None, - crate_root: None, + crate_dynamic: None, + crate_root: PathBuf::default(), deps: vec![Target::new("//foo")], - tests: vec![], - named_deps: BTreeMap::new(), + test_deps: vec![], + named_deps: FxHashMap::default(), proc_macro: None, features: vec![], - env: BTreeMap::new(), + env: FxHashMap::default(), source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo-unittest/BUCK"), in_workspace: false, + rustc_flags: vec![], }, ); let res = merge_unit_test_targets(targets.clone()); let merged_target = res.get(&Target::new("//foo")).unwrap(); - assert_eq!(*merged_target.info.deps, vec![]); + assert_eq!(*merged_target.deps, vec![]); } #[test] fn merge_target_multiple_tests_no_cycles() { - let mut targets = BTreeMap::new(); + let mut targets = FxHashMap::default(); targets.insert( Target::new("//foo"), @@ -589,20 +869,23 @@ fn merge_target_multiple_tests_no_cycles() { kind: Kind::Library, edition: None, srcs: vec![], - mapped_srcs: BTreeMap::new(), + mapped_srcs: FxHashMap::default(), crate_name: None, - crate_root: None, + crate_dynamic: None, + crate_root: PathBuf::default(), deps: vec![Target::new("//foo@rust")], - tests: vec![ + test_deps: vec![ Target::new("//foo_test"), Target::new("//foo@rust-unittest"), ], - named_deps: BTreeMap::new(), + named_deps: FxHashMap::default(), proc_macro: None, features: vec![], - env: BTreeMap::new(), + env: FxHashMap::default(), source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo/BUCK"), in_workspace: false, + rustc_flags: vec![], }, ); @@ -614,20 +897,23 @@ fn merge_target_multiple_tests_no_cycles() { kind: Kind::Library, edition: None, srcs: vec![], - mapped_srcs: BTreeMap::new(), + mapped_srcs: FxHashMap::default(), crate_name: None, - crate_root: None, + crate_dynamic: None, + crate_root: PathBuf::default(), deps: vec![], - tests: vec![ + test_deps: vec![ Target::new("//foo_test"), Target::new("//foo@rust-unittest"), ], - named_deps: BTreeMap::new(), + named_deps: FxHashMap::default(), proc_macro: None, features: vec![], - env: BTreeMap::new(), + env: FxHashMap::default(), source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo/BUCK"), in_workspace: false, + rustc_flags: vec![], }, ); @@ -639,20 +925,23 @@ fn merge_target_multiple_tests_no_cycles() { kind: Kind::Test, edition: None, srcs: vec![], - mapped_srcs: BTreeMap::new(), + mapped_srcs: FxHashMap::default(), crate_name: None, - crate_root: None, + crate_dynamic: None, + crate_root: PathBuf::default(), // foo_test depends on foo, which is reasonable, but // we need to be careful when merging test // dependencies of foo@rust to avoid creating cycles. deps: vec![Target::new("//foo"), Target::new("//bar")], - tests: vec![], - named_deps: BTreeMap::new(), + test_deps: vec![], + named_deps: FxHashMap::default(), proc_macro: None, features: vec![], - env: BTreeMap::new(), + env: FxHashMap::default(), source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo_test/BUCK"), in_workspace: false, + rustc_flags: vec![], }, ); @@ -664,32 +953,226 @@ fn merge_target_multiple_tests_no_cycles() { kind: Kind::Test, edition: None, srcs: vec![], - mapped_srcs: BTreeMap::new(), + mapped_srcs: FxHashMap::default(), crate_name: None, - crate_root: None, + crate_dynamic: None, + crate_root: PathBuf::default(), deps: vec![Target::new("//test-framework")], - tests: vec![], - named_deps: BTreeMap::new(), + test_deps: vec![], + named_deps: FxHashMap::default(), proc_macro: None, features: vec![], - env: BTreeMap::new(), + env: FxHashMap::default(), source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo/BUCK"), in_workspace: false, + rustc_flags: vec![], }, ); let res = merge_unit_test_targets(targets.clone()); let merged_foo_target = res.get(&Target::new("//foo")).unwrap(); assert_eq!( - *merged_foo_target.info.deps, + *merged_foo_target.deps, vec![Target::new("//foo@rust")], "Additional dependencies should only come from the foo-unittest crate" ); let merged_foo_rust_target = res.get(&Target::new("//foo@rust")).unwrap(); assert_eq!( - *merged_foo_rust_target.info.deps, + *merged_foo_rust_target.deps, vec![Target::new("//test-framework")], "Test dependencies should only come from the foo@rust-unittest crate" ); } + +#[test] +fn integration_tests_preserved() { + let mut targets = FxHashMap::default(); + + targets.insert( + Target::new("//foo"), + TargetInfo { + name: "foo".to_owned(), + label: "foo".to_owned(), + kind: Kind::Library, + edition: None, + srcs: vec![], + mapped_srcs: FxHashMap::default(), + crate_name: None, + crate_dynamic: None, + crate_root: PathBuf::default(), + deps: vec![], + test_deps: vec![Target::new("//foo-integration-test")], + named_deps: FxHashMap::default(), + proc_macro: None, + features: vec![], + env: FxHashMap::default(), + source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo/BUCK"), + in_workspace: false, + rustc_flags: vec![], + }, + ); + + targets.insert( + Target::new("//foo-integration-test"), + TargetInfo { + name: "foo-integration-test".to_owned(), + label: "foo-integration-test".to_owned(), + kind: Kind::Test, + edition: None, + srcs: vec![], + mapped_srcs: FxHashMap::default(), + crate_name: None, + crate_dynamic: None, + crate_root: PathBuf::default(), + deps: vec![Target::new("//foo")], + test_deps: vec![], + named_deps: FxHashMap::default(), + proc_macro: None, + features: vec![], + env: FxHashMap::default(), + source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo/BUCK"), + in_workspace: false, + rustc_flags: vec![], + }, + ); + + let res = merge_unit_test_targets(targets.clone()); + assert!(res.contains_key(&Target::new("//foo-integration-test"))); +} + +#[test] +fn named_deps_underscores() { + let mut target_index = FxHashMap::default(); + target_index.insert( + Target::new("//bar"), + TargetInfo { + name: "bar".to_owned(), + label: "bar".to_owned(), + kind: Kind::Library, + edition: None, + srcs: vec![], + mapped_srcs: FxHashMap::default(), + crate_name: None, + crate_dynamic: None, + crate_root: PathBuf::default(), + deps: vec![], + test_deps: vec![], + named_deps: FxHashMap::default(), + proc_macro: None, + features: vec![], + env: FxHashMap::default(), + source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("bar/BUCK"), + in_workspace: false, + rustc_flags: vec![], + }, + ); + + let mut named_deps = FxHashMap::default(); + named_deps.insert("bar-baz".to_owned(), Target::new("//bar")); + + let info = TargetInfo { + name: "foo".to_owned(), + label: "foo".to_owned(), + kind: Kind::Library, + edition: None, + srcs: vec![], + mapped_srcs: FxHashMap::default(), + crate_name: None, + crate_dynamic: None, + crate_root: PathBuf::default(), + deps: vec![], + test_deps: vec![], + named_deps, + proc_macro: None, + features: vec![], + env: FxHashMap::default(), + source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("foo/BUCK"), + in_workspace: false, + rustc_flags: vec![], + }; + + let mut targets_to_ids = FxHashMap::default(); + let bar_target = Target::new("//bar"); + targets_to_ids.insert(&bar_target, 0); + + let dep_targets = resolve_aliases(&info.deps, &FxHashMap::default(), &FxHashMap::default()); + let deps = as_deps(&dep_targets, &info, &targets_to_ids, &target_index); + + assert_eq!( + deps, + vec![Dep { + crate_index: 0, + name: "bar_baz".to_owned() + }] + ); +} + +#[test] +fn alias_of_existing_target() { + let targets = vec![ + Target::new("//foo"), + Target::new("//foo-alias"), + Target::new("//bar"), + ]; + + let mut aliases = FxHashMap::default(); + aliases.insert( + Target::new("//foo-alias"), + AliasedTargetInfo { + actual: Target::new("//foo"), + }, + ); + + let dep_targets = resolve_aliases(&targets, &aliases, &FxHashMap::default()); + + assert_eq!( + dep_targets, + vec![Target::new("//foo"), Target::new("//bar"),] + ); +} + +#[test] +fn test_select_mode() { + // Test default behavior without the fbcode_build cfg + if cfg!(not(fbcode_build)) { + assert_eq!(select_mode(None), None); + assert_eq!( + select_mode(Some("custom-mode")), + Some("custom-mode".to_owned()) + ); + } + + // Test behavior with the fbcode_build cfg enabled + if cfg!(all(fbcode_build, target_os = "macos")) { + assert_eq!(select_mode(None), Some("@fbcode//mode/mac".to_owned())); + assert_eq!( + select_mode(Some("custom-mode")), + Some("custom-mode".to_owned()) + ); + } + + if cfg!(all(fbcode_build, target_os = "windows")) { + assert_eq!(select_mode(None), Some("@fbcode//mode/win".to_owned())); + assert_eq!( + select_mode(Some("custom-mode")), + Some("custom-mode".to_owned()) + ); + } + + if cfg!(all( + fbcode_build, + not(any(target_os = "macos", target_os = "windows")) + )) { + assert_eq!(select_mode(None), None); + assert_eq!( + select_mode(Some("custom-mode")), + Some("custom-mode".to_owned()) + ); + } +} diff --git a/integrations/rust-project/src/cli.rs b/integrations/rust-project/src/cli.rs new file mode 100644 index 0000000000000..3643c5b21a472 --- /dev/null +++ b/integrations/rust-project/src/cli.rs @@ -0,0 +1,28 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +mod check; +mod develop; +mod new; + +#[derive(Debug, Clone)] +pub(crate) enum Input { + Targets(Vec), + Files(Vec), + Buildfile(Vec), +} + +use std::path::PathBuf; + +pub(crate) use check::Check; +pub(crate) use develop::Develop; +pub(crate) use new::New; +pub(crate) use new::ProjectKind; + +use crate::target::Target; diff --git a/integrations/rust-project/src/cli/check.rs b/integrations/rust-project/src/cli/check.rs index 026927c05856d..d581ade25a888 100644 --- a/integrations/rust-project/src/cli/check.rs +++ b/integrations/rust-project/src/cli/check.rs @@ -7,22 +7,26 @@ * of this source tree. */ +use std::path::Path; use std::path::PathBuf; use std::str::FromStr; use crate::buck; use crate::buck::select_mode; use crate::diagnostics; +use crate::path::canonicalize; -pub struct Check { - pub buck: buck::Buck, - pub use_clippy: bool, - pub saved_file: PathBuf, +pub(crate) struct Check { + pub(crate) buck: buck::Buck, + pub(crate) use_clippy: bool, + pub(crate) saved_file: PathBuf, } impl Check { - pub fn new(mode: Option, use_clippy: bool, saved_file: PathBuf) -> Self { - let mode = select_mode(mode); + pub(crate) fn new(mode: Option, use_clippy: bool, saved_file: PathBuf) -> Self { + let saved_file = canonicalize(&saved_file).unwrap_or(saved_file); + + let mode = select_mode(mode.as_deref()); let buck = buck::Buck::new(mode); Self { buck, @@ -31,13 +35,14 @@ impl Check { } } - pub fn run(&self) -> Result<(), anyhow::Error> { + pub(crate) fn run(&self) -> Result<(), anyhow::Error> { + let start = std::time::Instant::now(); let buck = &self.buck; - let cell_root = buck.resolve_project_root()?; - let diagnostic_files = buck.check_saved_file(self.use_clippy, &self.saved_file)?; + + let check_output = buck.check_saved_file(self.use_clippy, &self.saved_file)?; let mut diagnostics = vec![]; - for path in diagnostic_files { + for path in check_output.diagnostic_paths { let contents = std::fs::read_to_string(path)?; for l in contents.lines() { // rustc (and with greater relevance, the underlying build.bxl script) emits diagnostics as newline-delimited JSON. @@ -52,16 +57,8 @@ impl Check { // we rewrite the file paths in the diagnostics to be relative to the buck2 project root, resulting in a fully absolute // path. if let Ok(mut message) = serde_json::from_str::(l) { - for span in message.spans.iter_mut() { - span.file_name = cell_root.join(&span.file_name); - } - for span in message - .children - .iter_mut() - .flat_map(|child| child.spans.iter_mut()) - { - span.file_name = cell_root.join(&span.file_name); - } + make_message_absolute(&mut message, &check_output.project_root); + let span = serde_json::to_value(message)?; // this is done under the assumption that the number of diagnostics inside the vector // is small (e.g., 32 or 64), so a linear seach of a vector will faster than hashing each element. @@ -80,6 +77,30 @@ impl Check { println!("{}", out); } + crate::scuba::log_check(start.elapsed(), &self.saved_file, self.use_clippy); + Ok(()) } } + +fn make_message_absolute(message: &mut diagnostics::Message, base_dir: &Path) { + for span in message.spans.iter_mut() { + make_span_absolute(span, base_dir); + } + + for message in message.children.iter_mut() { + make_message_absolute(message, base_dir); + } +} + +fn make_span_absolute(span: &mut diagnostics::Span, base_dir: &Path) { + span.file_name = base_dir.join(&span.file_name); + + if let Some(expansion) = &mut span.expansion { + if let Some(def_site_span) = &mut expansion.def_site_span { + make_span_absolute(def_site_span, base_dir); + } + + make_span_absolute(&mut expansion.span, base_dir); + } +} diff --git a/integrations/rust-project/src/cli/develop.rs b/integrations/rust-project/src/cli/develop.rs index 1ddef0aa21bc1..cd929bc9d00cd 100644 --- a/integrations/rust-project/src/cli/develop.rs +++ b/integrations/rust-project/src/cli/develop.rs @@ -12,46 +12,50 @@ use std::io::Write; use std::path::Path; use std::path::PathBuf; +use rustc_hash::FxHashMap; +use serde::Deserialize; +use serde::Serialize; use tracing::info; -use tracing::warn; +use super::Input; use crate::buck; use crate::buck::relative_to; use crate::buck::select_mode; use crate::buck::to_json_project; use crate::json_project::JsonProject; use crate::json_project::Sysroot; +use crate::path::canonicalize; use crate::sysroot::resolve_buckconfig_sysroot; use crate::sysroot::resolve_rustup_sysroot; use crate::sysroot::SysrootConfig; use crate::target::Target; +use crate::Command; -pub struct Develop { - pub input: Input, - pub sysroot: SysrootConfig, - pub relative_paths: bool, - pub mode: Option, +#[derive(Debug)] +pub(crate) struct Develop { + pub(crate) sysroot: SysrootConfig, + pub(crate) relative_paths: bool, + pub(crate) buck: buck::Buck, + pub(crate) check_cycles: bool, + pub(crate) invoked_by_ra: bool, } -pub struct OutputCfg { +pub(crate) struct OutputCfg { out: Output, pretty: bool, } -impl Develop { - pub fn new(paths: Vec) -> Self { - Self { - input: Input::Files(paths), - sysroot: SysrootConfig::BuckConfig, - mode: None, - relative_paths: false, - } - } +#[derive(Debug)] +pub(crate) enum Output { + Path(PathBuf), + Stdout, +} - pub fn from_command(command: crate::Command) -> (Develop, OutputCfg) { +impl Develop { + pub(crate) fn from_command(command: Command) -> (Develop, Input, OutputCfg) { if let crate::Command::Develop { - targets, files, + targets, out, stdout, prefer_rustup_managed_toolchain, @@ -59,14 +63,10 @@ impl Develop { pretty, relative_paths, mode, + check_cycles, + .. } = command { - let input = if !targets.is_empty() { - Input::Targets(targets) - } else { - Input::Files(files) - }; - let out = if stdout { Output::Stdout } else { @@ -81,64 +81,177 @@ impl Develop { SysrootConfig::BuckConfig }; + let mode = select_mode(mode.as_deref()); + let buck = buck::Buck::new(mode); + let develop = Develop { - input, sysroot, relative_paths, - mode, + buck, + check_cycles, + invoked_by_ra: false, }; let out = OutputCfg { out, pretty }; - return (develop, out); + let input = if !targets.is_empty() { + let targets = targets.into_iter().map(Target::new).collect(); + Input::Targets(targets) + } else { + Input::Files(files) + }; + + return (develop, input, out); + } + + if let crate::Command::DevelopJson { + sysroot_mode, args, .. + } = command + { + let out = Output::Stdout; + let mode = select_mode(None); + + let sysroot = match sysroot_mode { + crate::SysrootMode::BuckConfig => SysrootConfig::BuckConfig, + crate::SysrootMode::Rustc => SysrootConfig::Rustup, + crate::SysrootMode::FullPath(path) => SysrootConfig::Sysroot(path), + crate::SysrootMode::Command(cmd_args) => { + let cmd = cmd_args[0].clone(); + let args = cmd_args[1..].to_vec(); + let output = std::process::Command::new(cmd).args(args).output().unwrap(); + let path = String::from_utf8(output.stdout).unwrap(); + SysrootConfig::Sysroot(PathBuf::from(path.trim())) + } + }; + + let buck = buck::Buck::new(mode); + + let develop = Develop { + sysroot, + relative_paths: false, + buck, + check_cycles: false, + invoked_by_ra: true, + }; + let out = OutputCfg { out, pretty: false }; + + let input = match args { + crate::JsonArguments::Path(path) => Input::Files(vec![path]), + crate::JsonArguments::Buildfile(path) => Input::Buildfile(vec![path]), + crate::JsonArguments::Label(target) => Input::Targets(vec![Target::new(target)]), + }; + + return (develop, input, out); } unreachable!("No other subcommand is supported.") } } -pub enum Input { - Targets(Vec), - Files(Vec), -} +const DEFAULT_EXTRA_TARGETS: usize = 50; -#[derive(Debug)] -pub enum Output { - Path(PathBuf), - Stdout, +#[derive(Serialize, Deserialize)] +pub(crate) struct OutputData { + pub(crate) buildfile: PathBuf, + pub(crate) project: JsonProject, + pub(crate) kind: String, } impl Develop { - pub fn run(self) -> Result { - let Develop { - input, - sysroot, - relative_paths, - mode, - } = self; - let mode = select_mode(mode); - let buck = buck::Buck::new(mode); - let project_root = buck.resolve_project_root()?; + pub(crate) fn run(self, input: Input, cfg: OutputCfg) -> Result<(), anyhow::Error> { + let start = std::time::Instant::now(); + let input = match input { + Input::Targets(targets) => Input::Targets(targets), + Input::Files(files) => { + let canonical_files = files + .into_iter() + .map(|p| match canonicalize(&p) { + Ok(path) => path, + Err(_) => p, + }) + .collect::>(); - let targets = match input { - Input::Targets(targets) => targets.iter().map(Target::new).collect::>(), - // the owners query returns a `HashMap>` - Input::Files(files) => buck.query_owner(files)?.into_values().flatten().collect(), + Input::Files(canonical_files) + } + Input::Buildfile(buildfiles) => Input::Buildfile(buildfiles), + }; + let mut writer: BufWriter> = match cfg.out { + Output::Path(ref p) => { + let out = std::fs::File::create(p)?; + BufWriter::new(Box::new(out)) + } + Output::Stdout => BufWriter::new(Box::new(std::io::stdout())), }; + let targets = self.related_targets(input.clone())?; if targets.is_empty() { - warn!("Could not find any targets associated with the files specified."); + let err = anyhow::anyhow!("No owning target found") + .context(format!("Could not find owning target for {:?}", input)); + return Err(err); + } + + if self.invoked_by_ra { + for (buildfile, targets) in targets { + let project = self.run_inner(targets)?; + + // we have to log before we write the output, because rust-analyzer will kill us after the write + crate::scuba::log_develop(start.elapsed(), input.clone(), self.invoked_by_ra); + + let out = OutputData { + buildfile, + project, + kind: "finished".to_owned(), + }; + let out = serde_json::to_string(&out)?; + println!("{}", out); + } + } else { + let mut targets = targets.into_values().flatten().collect::>(); + targets.sort(); + targets.dedup(); + + let project = self.run_inner(targets)?; + crate::scuba::log_develop(start.elapsed(), input, self.invoked_by_ra); + + if cfg.pretty { + serde_json::to_writer_pretty(&mut writer, &project)?; + } else { + serde_json::to_writer(&mut writer, &project)?; + } + writeln!(writer)?; + match &cfg.out { + Output::Path(p) => info!(file = ?p, "wrote rust-project.json"), + Output::Stdout => info!("wrote rust-project.json to stdout"), + } } + Ok(()) + } + + pub(crate) fn run_inner(&self, targets: Vec) -> Result { + let Develop { + sysroot, + relative_paths, + buck, + check_cycles, + .. + } = self; + + let project_root = buck.resolve_project_root()?; + info!("building generated code"); - let expanded_and_resolved = buck.expand_and_resolve(&targets)?; + let exclude_workspaces = + std::env::var("RUST_PROJECT_EXCLUDE_WORKSPACES").is_ok_and(|it| it != "0"); + let expanded_and_resolved = buck.expand_and_resolve(&targets, exclude_workspaces)?; + + info!("resolving aliased libraries"); let aliased_libraries = buck.query_aliased_libraries(&expanded_and_resolved.expanded_targets)?; info!("fetching sysroot"); let sysroot = match &sysroot { SysrootConfig::Sysroot(path) => { - let mut sysroot_path = expand_tilde(path)?.canonicalize()?; - if relative_paths { + let mut sysroot_path = canonicalize(expand_tilde(path)?)?; + if *relative_paths { sysroot_path = relative_to(&sysroot_path, &project_root); } @@ -147,7 +260,9 @@ impl Develop { sysroot_src: None, } } - SysrootConfig::BuckConfig => resolve_buckconfig_sysroot(&project_root, relative_paths)?, + SysrootConfig::BuckConfig => { + resolve_buckconfig_sysroot(&project_root, *relative_paths)? + } SysrootConfig::Rustup => resolve_rustup_sysroot()?, }; info!("converting buck info to rust-project.json"); @@ -155,35 +270,28 @@ impl Develop { sysroot, expanded_and_resolved, aliased_libraries, - relative_paths, + *relative_paths, + *check_cycles, )?; + Ok(rust_project) } - pub fn run_as_cli(self, cfg: OutputCfg) -> Result<(), anyhow::Error> { - let rust_project = self.run()?; - - let mut writer: BufWriter> = match cfg.out { - Output::Path(ref p) => { - let out = std::fs::File::create(p)?; - BufWriter::new(Box::new(out)) - } - Output::Stdout => BufWriter::new(Box::new(std::io::stdout())), + /// For every Rust file, return the relevant buck targets that should be used to configure rust-analyzer. + pub(crate) fn related_targets( + &self, + input: Input, + ) -> Result>, anyhow::Error> { + // We want to load additional targets from the enclosing buildfile, to help users + // who have a bunch of small targets in their buildfile. However, we want to set a limit + // so we don't try to load everything in very large generated buildfiles. + let max_extra_targets: usize = match std::env::var("RUST_PROJECT_EXTRA_TARGETS") { + Ok(s) => s.parse::().unwrap_or(DEFAULT_EXTRA_TARGETS), + Err(_) => DEFAULT_EXTRA_TARGETS, }; - if cfg.pretty { - serde_json::to_writer_pretty(&mut writer, &rust_project)?; - } else { - serde_json::to_writer(&mut writer, &rust_project)?; - } - writeln!(writer)?; - - match cfg.out { - Output::Path(p) => info!(file = ?p, "wrote rust-project.json"), - Output::Stdout => info!("wrote rust-project.json to stdout"), - } - - Ok(()) + // We always want the targets that directly own these Rust files. + self.buck.query_owners(input, max_extra_targets) } } diff --git a/integrations/rust-project/src/cli/lsp.rs b/integrations/rust-project/src/cli/lsp.rs deleted file mode 100644 index f44fe9319101e..0000000000000 --- a/integrations/rust-project/src/cli/lsp.rs +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -use std::path::PathBuf; -use std::time::Instant; - -use crossbeam::channel::Sender; -use lsp_server::Connection; -use lsp_server::ExtractError; -use lsp_server::ReqQueue; -use lsp_server::Request; -use lsp_server::RequestId; -use lsp_server::Response; -use lsp_types::OneOf; -use lsp_types::ServerCapabilities; -use serde::Deserialize; -use serde::Serialize; -use tracing::info; -use tracing_subscriber::reload::Handle; -use tracing_subscriber::Layer; -use tracing_subscriber::Registry; - -use crate::cli::Develop; -use crate::json_project::JsonProject; -use crate::progress::ProgressLayer; - -pub struct Lsp; - -impl Lsp { - pub fn start( - handle: Handle + Send + Sync + 'static>>, Registry>, - ) -> Result<(), anyhow::Error> { - let (connection, io_threads) = Connection::stdio(); - - // Run the server and wait for the two threads to end (typically by trigger LSP Exit event). - let capabilities = ServerCapabilities { - definition_provider: Some(OneOf::Left(true)), - ..Default::default() - }; - - let capabilities = serde_json::to_value(capabilities).unwrap(); - let _initialization_params = connection.initialize(capabilities)?; - - let (sender, receiver) = (connection.sender, connection.receiver); - let mut server = Server { - sender: sender.clone(), - req_queue: ReqQueue::default(), - }; - - handle - .modify(|layers| { - let progress = ProgressLayer::new(sender); - layers.push(progress.boxed()) - }) - .expect("Unable to update subscriber"); - - info!("waiting for incoming messages"); - - for msg in &receiver { - if let lsp_server::Message::Request(req) = &msg { - match cast::(req.clone()) { - Ok(_) => break, - Err(err @ ExtractError::JsonError { .. }) => { - tracing::error!(?err, "unable to deserialize message"); - } - Err(ExtractError::MethodMismatch(req)) => { - tracing::error!(?req, "method was mismatched"); - } - } - }; - - handle_server(&mut server, msg)?; - } - - io_threads.join()?; - Ok(()) - } -} - -fn handle_server(server: &mut Server, msg: lsp_server::Message) -> Result<(), anyhow::Error> { - let token: lsp_types::NumberOrString = - lsp_types::ProgressToken::String("rust-project/discoverBuckTargets".to_owned()); - - match msg { - lsp_server::Message::Request(req) => match cast::(req.clone()) { - Ok((id, params)) => { - info!(?id, ?params, "received request"); - server.register_request(&req, std::time::Instant::now()); - - // this request is load-bearing: it is necessary in order to start showing in-editor progress. - server.send_request::( - lsp_types::WorkDoneProgressCreateParams { token }, - |_, _| (), - ); - let _guard = tracing::span!(target: "lsp_progress", tracing::Level::INFO, "resolving targets").entered(); - - let project = Develop::new(params.text_documents).run()?; - - let result = serde_json::to_value(&project)?; - let resp = Response { - id, - result: Some(result), - error: None, - }; - - tracing::info!(crate_len = project.crates.len(), "created index"); - - server.respond(resp); - } - Err(err @ ExtractError::JsonError { .. }) => { - tracing::error!(?err, "unable to deserialize message"); - } - Err(ExtractError::MethodMismatch(req)) => { - tracing::error!(?req, "method was mismatched"); - } - }, - lsp_server::Message::Response(resp) => { - tracing::info!(resp = ?resp, "notification"); - } - lsp_server::Message::Notification(notification) => { - tracing::info!(notification = ?notification, "notification"); - } - } - Ok(()) -} - -type ReqHandler = fn(&mut Server, lsp_server::Response); - -pub(crate) struct Server { - sender: Sender, - req_queue: ReqQueue<(String, Instant), ReqHandler>, -} - -impl Server { - pub(crate) fn register_request( - &mut self, - request: &lsp_server::Request, - request_received: Instant, - ) { - self.req_queue.incoming.register( - request.id.clone(), - (request.method.clone(), request_received), - ); - } - - pub(crate) fn send_request(&mut self, params: R::Params, handler: ReqHandler) - where - R: lsp_types::request::Request, - { - let request = self - .req_queue - .outgoing - .register(R::METHOD.to_owned(), params, handler); - self.send(request.into()); - } - - pub(crate) fn respond(&mut self, response: lsp_server::Response) { - if let Some((method, start)) = self.req_queue.incoming.complete(response.id.clone()) { - let duration = start.elapsed(); - tracing::info!( - "handled {} - ({}) in {:0.2?}", - method, - response.id, - duration - ); - self.send(response.into()); - } else { - tracing::error!("Unable to complete response"); - } - } - - pub(crate) fn send(&self, message: lsp_server::Message) { - self.sender.send(message).expect("unable to send message"); - } -} - -#[derive(Debug)] -pub enum DiscoverBuckTargets {} - -impl lsp_types::request::Request for DiscoverBuckTargets { - type Params = DiscoverBuckTargetParams; - type Result = JsonProject; - const METHOD: &'static str = "rust-project/discoverBuckTargets"; -} - -#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct DiscoverBuckTargetParams { - text_documents: Vec, -} - -fn cast(req: Request) -> Result<(RequestId, R::Params), ExtractError> -where - R: lsp_types::request::Request, - R::Params: serde::de::DeserializeOwned, -{ - req.extract(R::METHOD) -} diff --git a/integrations/rust-project/src/cli/mod.rs b/integrations/rust-project/src/cli/mod.rs deleted file mode 100644 index 73fe3d05c0207..0000000000000 --- a/integrations/rust-project/src/cli/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -mod check; -mod develop; -mod lsp; -mod new; - -pub use check::Check; -pub use develop::Develop; -pub use lsp::Lsp; -pub use new::New; -pub use new::ProjectKind; diff --git a/integrations/rust-project/src/cli/new.rs b/integrations/rust-project/src/cli/new.rs index 9ad0dec8fae51..f34d5e0513d77 100644 --- a/integrations/rust-project/src/cli/new.rs +++ b/integrations/rust-project/src/cli/new.rs @@ -16,20 +16,22 @@ use anyhow::Context; use clap::ValueEnum; use tracing::info; +use crate::path::canonicalize; + #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)] -pub enum ProjectKind { +pub(crate) enum ProjectKind { Binary, Library, } -pub struct New { - pub name: String, - pub kind: ProjectKind, - pub path: Option, +pub(crate) struct New { + pub(crate) name: String, + pub(crate) kind: ProjectKind, + pub(crate) path: Option, } impl New { - pub fn run(self) -> Result<(), anyhow::Error> { + pub(crate) fn run(self) -> Result<(), anyhow::Error> { let name = self.name; let (target, kind) = match self.kind { @@ -50,9 +52,7 @@ impl New { Some(path) => path, None => std::env::current_dir()?, }; - let path = path - .canonicalize() - .context("Unable to canonicalize current directory")?; + let path = canonicalize(path).context("Unable to canonicalize current directory")?; let project_dir = Path::new(&name); let path = path.join(project_dir); @@ -96,15 +96,15 @@ impl New { } } -pub enum EntryFile { +enum EntryFile { Main(MainFile), Lib(LibFile), } -pub struct MainFile; +struct MainFile; impl MainFile { - pub fn render(&self) -> String { + fn render(&self) -> String { "fn main() { println!(\"Hello from Rust at Meta!\"); } @@ -113,10 +113,10 @@ impl MainFile { } } -pub struct LibFile; +struct LibFile; impl LibFile { - pub fn render(&self) -> String { + fn render(&self) -> String { "pub fn add_numbers(left: usize, right: usize) -> usize { left + right } @@ -137,13 +137,13 @@ mod tests { } #[derive(Debug, PartialEq, Eq)] -pub enum Target { +enum Target { Library { name: String }, Binary { name: String }, } impl Target { - pub fn render(&self) -> String { + fn render(&self) -> String { match self { Target::Library { name } => { let template = include_str!("../../templates/TARGETS_LIB"); diff --git a/integrations/rust-project/src/diagnostics.rs b/integrations/rust-project/src/diagnostics.rs index 30c410f6b7cc0..a832fc8be2ec6 100644 --- a/integrations/rust-project/src/diagnostics.rs +++ b/integrations/rust-project/src/diagnostics.rs @@ -14,41 +14,41 @@ use serde::Serialize; /// This is the same as rustfix::Diagnostic, but with a more complete schema. #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)] -pub struct Message { - pub message: String, - pub code: Option, - pub level: String, - pub spans: Vec, - pub children: Vec, - pub rendered: Option, +pub(crate) struct Message { + pub(crate) message: String, + pub(crate) code: Option, + pub(crate) level: String, + pub(crate) spans: Vec, + pub(crate) children: Vec, + pub(crate) rendered: Option, } #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)] -pub struct Span { - pub file_name: PathBuf, - pub byte_start: usize, - pub byte_end: usize, - pub line_start: usize, - pub line_end: usize, - pub column_start: usize, - pub column_end: usize, - pub is_primary: bool, - pub text: Vec, - pub label: Option, - pub suggested_replacement: Option, - pub suggestion_applicability: Option, - pub expansion: Option, +pub(crate) struct Span { + pub(crate) file_name: PathBuf, + pub(crate) byte_start: usize, + pub(crate) byte_end: usize, + pub(crate) line_start: usize, + pub(crate) line_end: usize, + pub(crate) column_start: usize, + pub(crate) column_end: usize, + pub(crate) is_primary: bool, + pub(crate) text: Vec, + pub(crate) label: Option, + pub(crate) suggested_replacement: Option, + pub(crate) suggestion_applicability: Option, + pub(crate) expansion: Option, } #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)] -pub struct Expansion { - pub span: Box, - pub macro_decl_name: String, - pub def_site_span: Option>, +pub(crate) struct Expansion { + pub(crate) span: Box, + pub(crate) macro_decl_name: String, + pub(crate) def_site_span: Option>, } #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)] -pub enum Applicability { +pub(crate) enum Applicability { MachineApplicable, HasPlaceholders, MaybeIncorrect, @@ -56,14 +56,14 @@ pub enum Applicability { } #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)] -pub struct Code { - pub code: String, - pub explanation: Option, +pub(crate) struct Code { + pub(crate) code: String, + pub(crate) explanation: Option, } #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)] -pub struct Text { - pub text: String, - pub highlight_start: usize, - pub highlight_end: usize, +pub(crate) struct Text { + pub(crate) text: String, + pub(crate) highlight_start: usize, + pub(crate) highlight_end: usize, } diff --git a/integrations/rust-project/src/json_project.rs b/integrations/rust-project/src/json_project.rs index 38848b2ac796c..9836c6aa4d761 100644 --- a/integrations/rust-project/src/json_project.rs +++ b/integrations/rust-project/src/json_project.rs @@ -15,34 +15,37 @@ //! //! [documentation]: https://rust-analyzer.github.io/manual.html#non-cargo-based-projects -use std::collections::BTreeMap; -use std::collections::BTreeSet; use std::path::PathBuf; +use rustc_hash::FxHashMap; +use rustc_hash::FxHashSet; use serde::Deserialize; use serde::Serialize; -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct JsonProject { +use crate::target::Target; + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +pub(crate) struct JsonProject { #[serde(flatten)] - pub sysroot: Sysroot, + pub(crate) sysroot: Sysroot, /// The set of crates comprising the project. /// /// Must include all transitive dependencies as well as sysroot crate (libstd, /// libcore, etc.). - pub crates: Vec, - pub generated: String, + pub(crate) crates: Vec, + pub(crate) runnables: Vec, + pub(crate) generated: String, } -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct Crate { +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] +pub(crate) struct Crate { /// Optional crate name used for display purposes; has no semantic significance. - pub display_name: Option, + pub(crate) display_name: Option, /// The path to the root module of the crate. - pub root_module: PathBuf, - pub edition: Edition, - pub deps: Vec, + pub(crate) root_module: PathBuf, + pub(crate) edition: Edition, + pub(crate) deps: Vec, /// Should this crate be treated as a member of /// current "workspace". /// @@ -54,7 +57,7 @@ pub struct Crate { /// library and 3rd party crates to enable /// performance optimizations (rust-analyzer /// assumes that non-member crates don't change). - pub is_workspace_member: bool, + pub(crate) is_workspace_member: bool, /// Optionally specify the (super)set of `.rs` /// files comprising this crate. /// @@ -71,32 +74,123 @@ pub struct Crate { /// rust-analyzer assumes that files from one /// source can't refer to files in another source. #[serde(skip_serializing_if = "Option::is_none")] - pub source: Option, + pub(crate) source: Option, /// The set of cfgs activated for a given crate. /// /// With how fb imports crates into fbsource/third-party, /// the answer is "all of them". - pub cfg: Vec, + pub(crate) cfg: Vec, /// The target triple for a given crate. #[serde(skip_serializing_if = "Option::is_none")] - pub target: Option, + pub(crate) target: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) build: Option, /// Environment for the crate, often used by `env!`. - pub env: BTreeMap, + pub(crate) env: FxHashMap, /// Whether the crate is a proc-macro crate/ - pub is_proc_macro: bool, + pub(crate) is_proc_macro: bool, /// For proc-macro crates, path to compiled /// proc-macro (.so, .dylib, or .dll. depends on the platform.) #[serde(skip_serializing_if = "Option::is_none")] - pub proc_macro_dylib_path: Option, + pub(crate) proc_macro_dylib_path: Option, +} + +/// Build system-specific additions the `rust-project.json`. +/// +/// rust-analyzer encodes Cargo-specific knowledge in features +/// such as flycheck or runnable and constructs Cargo-specific commands +/// on the fly. This is a reasonable decision on its part, as most people +/// use Cargo. However, to support equivalent functionality with non-Cargo +/// build systems in rust-analyzer, this struct encodes pre-defined runnables +/// and other bits of metadata. Below is an example of `TargetSpec` in JSON: +/// +/// ```json +/// "target_spec": { +/// "manifest_file": "/Users/dbarsky/fbsource/fbcode/buck2/integrations/rust-project/TARGETS", +/// "target_label": "fbcode//buck2/integrations/rust-project:rust-project", +/// "target_kind": "bin", +/// "runnables": { +/// "check": [ +/// "build", +/// "fbcode//buck2/integrations/rust-project:rust-project" +/// ], +/// "run": [ +/// "run", +/// "fbcode//buck2/integrations/rust-project:rust-project" +/// ], +/// "test": [ +/// "test", +/// "fbcode//buck2/integrations/rust-project:rust-project", +/// "--", +/// "{test_id}", +/// "--print-passing-details" +/// ] +/// }, +/// "flycheck_command": [ +/// "build", +/// "fbcode//buck2/integrations/rust-project:rust-project" +/// ] +/// } +/// ``` +#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, Eq)] +pub(crate) struct Build { + pub(crate) label: Target, + /// `build_file` corresponds to the `BUCK`/`TARGETS` file. + pub(crate) build_file: PathBuf, + pub(crate) target_kind: TargetKind, +} + +#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub(crate) enum TargetKind { + #[default] + Bin, + /// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro, ...). + Lib, + Example, + Test, + Bench, + BuildScript, + Other, +} + +impl From for TargetKind { + fn from(value: crate::target::Kind) -> Self { + use crate::target::Kind::*; + match value { + Binary => TargetKind::Bin, + Library => TargetKind::Lib, + Test => TargetKind::Test, + } + } +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct Runnable { + pub program: String, + pub args: Vec, + pub cwd: PathBuf, + pub kind: RunnableKind, +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub enum RunnableKind { + Check, + Flycheck, + Run, + TestOne, } -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, Eq)] #[serde(rename = "edition")] -pub enum Edition { +pub(crate) enum Edition { #[serde(rename = "2015")] Edition2015, #[serde(rename = "2018")] Edition2018, + #[default] #[serde(rename = "2021")] Edition2021, } @@ -107,17 +201,17 @@ pub enum Edition { /// `Crate::root_module` can belong to a crate. `include_dirs` /// are included recursively, unless a subdirectory is /// specified in `include_dirs`. -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Default)] -pub struct Source { - pub include_dirs: BTreeSet, - pub exclude_dirs: BTreeSet, +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Default)] +pub(crate) struct Source { + pub(crate) include_dirs: FxHashSet, + pub(crate) exclude_dirs: FxHashSet, } -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct Dep { +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +pub(crate) struct Dep { #[serde(rename = "crate")] - pub crate_index: usize, - pub name: String, + pub(crate) crate_index: usize, + pub(crate) name: String, } /// Sysroot paths. These are documented in the rust-analyzer manual: @@ -125,8 +219,8 @@ pub struct Dep { /// /// /// rust-analyzer treats both paths as optional, but we always provide sysroot. -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct Sysroot { +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +pub(crate) struct Sysroot { /// Path to the directory of the sysroot; this is a superset of `sysroot_src`. /// /// This path provides rust-analyzer both the *source code* of libraries @@ -139,12 +233,12 @@ pub struct Sysroot { /// macros and the source code location can be predictably inferred. /// Assuming the example sysroot above, the source code would be located in /// `/lib/rustlib/src/rust/`. - pub sysroot: PathBuf, + pub(crate) sysroot: PathBuf, /// Legacy sysroot config containing only the source code of libraries such /// as `std` and core`. /// /// Inside Meta, this is necessary on non-Linux platforms since the sources /// are packaged seperately from binaries such as `rust-analyzer-proc-macro-srv`. #[serde(skip_serializing_if = "Option::is_none")] - pub sysroot_src: Option, + pub(crate) sysroot_src: Option, } diff --git a/integrations/rust-project/src/main.rs b/integrations/rust-project/src/main.rs index bbf08b37cc80f..c42a5ebb0ebf0 100644 --- a/integrations/rust-project/src/main.rs +++ b/integrations/rust-project/src/main.rs @@ -11,19 +11,23 @@ mod buck; mod cli; mod diagnostics; mod json_project; +mod path; mod progress; +mod scuba; mod sysroot; mod target; use std::io; use std::io::IsTerminal as _; use std::path::PathBuf; +use std::str::FromStr; +use clap::ArgAction; use clap::Parser; use clap::Subcommand; +use serde::Deserialize; use tracing_subscriber::filter::LevelFilter; use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::reload; use tracing_subscriber::EnvFilter; use tracing_subscriber::Layer; @@ -34,11 +38,14 @@ use crate::json_project::Dep; #[derive(Parser, Debug, PartialEq)] struct Opt { #[clap(subcommand)] - command: Command, + command: Option, + /// Print the current version. + #[arg(short = 'V', long)] + version: bool, } #[derive(Subcommand, Debug, PartialEq)] -pub enum Command { +enum Command { /// Create a new Rust project New { /// Name of the project being created. @@ -54,13 +61,13 @@ pub enum Command { /// Convert buck's build to a format that rust-analyzer can consume. Develop { /// Buck targets to include in rust-project.json. - #[clap(required = true, conflicts_with = "files", multiple_values = true)] + #[clap(required = true, conflicts_with = "files", num_args=1..)] targets: Vec, /// Path of the file being developed. /// /// Used to discover the owning set of targets. - #[clap(required = true, last = true, multiple_values = true)] + #[clap(required = true, last = true, num_args=1..)] files: Vec, /// Where to write the generated `rust-project.json`. @@ -89,56 +96,323 @@ pub enum Command { #[clap(short, long)] pretty: bool, + /// Check that there are no cycles in the generated crate graph. + #[clap(long)] + check_cycles: bool, + /// Use paths relative to the project root in `rust-project.json`. #[clap(long, hide = true)] relative_paths: bool, + /// The name of the client invoking rust-project, such as 'vscode'. + #[clap(long)] + client: Option, + /// Optional argument specifying build mode. #[clap(short = 'm', long)] mode: Option, }, + /// `DevelopJson` is a more limited, stripped down [`Command::Develop`]. + /// + /// This is meant to be called by rust-analyzer directly. + DevelopJson { + // FIXME XXX: remove this after everything in fbcode is migrated off + // of buckconfig implicitly. + #[cfg(fbcode_build)] + #[clap(long, default_value = "buckconfig")] + sysroot_mode: SysrootMode, + + #[cfg(not(fbcode_build))] + #[clap(long, default_value = "rustc")] + sysroot_mode: SysrootMode, + + /// The name of the client invoking rust-project, such as 'vscode'. + #[clap(long)] + client: Option, + + args: JsonArguments, + }, /// Build the saved file's owning target. This is meant to be used by IDEs to provide diagnostics on save. Check { /// Optional argument specifying build mode. #[clap(short = 'm', long)] mode: Option, - #[clap(short = 'c', long, default_value = "true")] + + #[clap(short = 'c', long, default_value = "true", action = ArgAction::Set)] use_clippy: bool, + + /// The name of the client invoking rust-project, such as 'vscode'. + #[clap(long)] + client: Option, + /// The file saved by the user. `rust-project` will infer the owning target(s) of the saved file and build them. saved_file: PathBuf, }, - /// Start an LSP server whose functionality is similar to [Command::Develop]. - #[clap(hide = true)] - LspServer, +} + +/// The 'develop-json' command needs to have 3 modes: +/// 1. Static `.buckconfig` setting +/// 2. Absolute path setting +/// 3. Use `rustc --print=sysroot` ("rustup mode") +/// 4. Run a command and take the output from stdout +#[derive(PartialEq, Clone, Debug, Deserialize)] +enum SysrootMode { + Rustc, + Command(Vec), + FullPath(PathBuf), + BuckConfig, +} + +impl FromStr for SysrootMode { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + if s == "rustc" { + Ok(SysrootMode::Rustc) + } else if s == "buckconfig" { + Ok(SysrootMode::BuckConfig) + } else if s.starts_with("path:") { + let s = s.trim_start_matches("path:"); + Ok(SysrootMode::FullPath(PathBuf::from(s))) + } else if s.starts_with("cmd:") { + let s = s.trim_start_matches("cmd:"); + Ok(SysrootMode::Command( + s.split_whitespace() + .map(|s| s.to_owned()) + .collect::>(), + )) + } else { + Err(anyhow::anyhow!("Invalid mode: {}", s)) + } + } +} + +#[derive(PartialEq, Clone, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +enum JsonArguments { + Path(PathBuf), + Buildfile(PathBuf), + Label(String), +} + +impl FromStr for JsonArguments { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + serde_json::from_str(s).map_err(|e| anyhow::anyhow!("Error parsing my struct: {}", e)) + } } fn main() -> Result<(), anyhow::Error> { + #[cfg(fbcode_build)] + { + // SAFETY: This is as safe as using fbinit::main but with slightly less conditional compilation. + unsafe { fbinit::perform_init() }; + } + + let opt = Opt::parse(); + let filter = EnvFilter::builder() .with_default_directive(LevelFilter::INFO.into()) .from_env()?; + if opt.version { + println!("{}", build_info()); + return Ok(()); + } + + let Some(command) = opt.command else { + eprintln!("Expected a subcommand, see --help for more information."); + return Ok(()); + }; + let fmt = tracing_subscriber::fmt::layer() .with_ansi(io::stderr().is_terminal()) .with_writer(io::stderr); - let (layer, reload_handle) = reload::Layer::new(vec![fmt.with_filter(filter).boxed()]); + match command { + c @ Command::Develop { .. } => { + let subscriber = tracing_subscriber::registry().with(fmt.with_filter(filter)); + tracing::subscriber::set_global_default(subscriber)?; - let subscriber = tracing_subscriber::registry().with(layer); - tracing::subscriber::set_global_default(subscriber)?; + let (develop, input, out) = cli::Develop::from_command(c); + match develop.run(input.clone(), out) { + Ok(_) => Ok(()), + Err(e) => { + crate::scuba::log_develop_error(&e, input, false); + tracing::error!( + error = >::as_ref(&e), + source = e.source(), + kind = "error", + ); + Ok(()) + } + } + } + c @ Command::DevelopJson { .. } => { + let subscriber = tracing_subscriber::registry() + .with(progress::ProgressLayer::new(std::io::stdout).with_filter(filter)); + tracing::subscriber::set_global_default(subscriber)?; - let cli = Opt::parse(); + let (develop, input, out) = cli::Develop::from_command(c); + match develop.run(input.clone(), out) { + Ok(_) => Ok(()), + Err(e) => { + crate::scuba::log_develop_error(&e, input, true); + tracing::error!( + error = >::as_ref(&e), + source = e.source(), + kind = "error", + ); + Ok(()) + } + } + } + Command::New { name, kind, path } => { + let subscriber = tracing_subscriber::registry().with(fmt.with_filter(filter)); + tracing::subscriber::set_global_default(subscriber)?; - match cli.command { - Command::New { name, kind, path } => cli::New { name, kind, path }.run(), + cli::New { name, kind, path }.run() + } Command::Check { mode, use_clippy, saved_file, - } => cli::Check::new(mode, use_clippy, saved_file).run(), - c @ Command::Develop { .. } => { - let (develop, out) = cli::Develop::from_command(c); - develop.run_as_cli(out) + .. + } => { + let subscriber = tracing_subscriber::registry().with(fmt.with_filter(filter)); + tracing::subscriber::set_global_default(subscriber)?; + + cli::Check::new(mode, use_clippy, saved_file.clone()) + .run() + .inspect_err(|e| crate::scuba::log_check_error(&e, &saved_file, use_clippy)) } - Command::LspServer => cli::Lsp::start(reload_handle), } } + +#[cfg(not(unix))] +fn build_info() -> String { + "No build info available.".to_owned() +} + +#[cfg(unix)] +fn build_info() -> String { + match fb_build_info_from_elf() { + Ok(s) => s, + Err(_) => "No build info available.".to_owned(), + } +} + +#[cfg(unix)] +fn fb_build_info_from_elf() -> Result { + let bin_path = std::env::current_exe()?; + let bin_bytes = std::fs::read(&bin_path)?; + + let elf_file = elf::ElfBytes::::minimal_parse(&bin_bytes)?; + let elf_section = elf_file + .section_header_by_name("fb_build_info")? + .ok_or(anyhow::anyhow!("no header"))?; + + let (section_bytes, _) = elf_file.section_data(&elf_section)?; + let section_cstr = std::ffi::CStr::from_bytes_with_nul(section_bytes)?; + + let build_info: serde_json::Value = serde_json::from_str(§ion_cstr.to_str()?)?; + let revision = build_info["revision"].as_str().unwrap_or("(unknown)"); + let build_time = build_info["time"].as_str().unwrap_or("(unknown)"); + + Ok(format!("revision: {revision}, build time: {build_time}")) +} + +#[test] +fn test_parse_use_clippy() { + assert!(matches!( + Opt::try_parse_from([ + "rust-project", + "check", + "--use-clippy=true", + "fbcode/foo.rs", + ]), + Ok(Opt { + command: Some(Command::Check { + use_clippy: true, + .. + }), + .. + }) + )); + + assert!(matches!( + Opt::try_parse_from([ + "rust-project", + "check", + "--use-clippy=false", + "fbcode/foo.rs", + ]), + Ok(Opt { + command: Some(Command::Check { + use_clippy: false, + .. + }), + .. + }) + )); +} + +#[test] +#[ignore] +fn json_args_pass() { + let args = JsonArguments::Path(PathBuf::from("buck2/integrations/rust-project/src/main.rs")); + let expected = Opt { + command: Some(Command::DevelopJson { + args, + sysroot_mode: SysrootMode::Rustc, + client: None, + }), + version: false, + }; + let actual = Opt::try_parse_from([ + "rust-project", + "develop-json", + "{\"path\":\"buck2/integrations/rust-project/src/main.rs\"}", + ]) + .expect("Unable to parse args"); + assert_eq!(actual, expected); + + let args = JsonArguments::Label("//buck2/integrations/rust-project:rust-project".to_owned()); + let expected = Opt { + command: Some(Command::DevelopJson { + args, + sysroot_mode: SysrootMode::Rustc, + client: None, + }), + version: false, + }; + let actual = Opt::try_parse_from([ + "rust-project", + "develop-json", + "{\"label\":\"//buck2/integrations/rust-project:rust-project\"}", + ]) + .expect("Unable to parse args"); + assert_eq!(actual, expected); + + let args = JsonArguments::Buildfile(PathBuf::from("buck2/integrations/rust-project/BUCK")); + let expected = Opt { + command: Some(Command::DevelopJson { + args, + sysroot_mode: SysrootMode::Rustc, + client: None, + }), + version: false, + }; + let actual = Opt::try_parse_from([ + "rust-project", + "develop-json", + "{\"buildfile\":\"buck2/integrations/rust-project/BUCK\"}", + ]) + .expect("Unable to parse args"); + assert_eq!(actual, expected); +} diff --git a/integrations/rust-project/src/path.rs b/integrations/rust-project/src/path.rs new file mode 100644 index 0000000000000..47cd224d0cbd8 --- /dev/null +++ b/integrations/rust-project/src/path.rs @@ -0,0 +1,25 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io; +use std::path::Path; +use std::path::PathBuf; + +pub(crate) fn canonicalize>(path: P) -> io::Result { + let canonical_path = dunce::canonicalize(&path)?; + + if cfg!(windows) && path.as_ref().starts_with("\\\\?\\") { + tracing::warn!( + path = path.as_ref().display().to_string(), + "Couldn't strip UNC prefix from path. Using it as-is." + ); + } + + Ok(canonical_path) +} diff --git a/integrations/rust-project/src/progress.rs b/integrations/rust-project/src/progress.rs index 3297befb59e5d..b7d4cde533041 100644 --- a/integrations/rust-project/src/progress.rs +++ b/integrations/rust-project/src/progress.rs @@ -7,34 +7,46 @@ * of this source tree. */ -use std::collections::BTreeMap; +use std::io::Write; -use crossbeam::channel::Sender; -use lsp_types::notification::Notification; -use lsp_types::WorkDoneProgress; +use rustc_hash::FxHashMap; +use serde::Serialize; +use serde_json::Value; use tracing::span; +use tracing_subscriber::fmt::MakeWriter; use tracing_subscriber::Layer; -pub(crate) struct ProgressLayer { +pub(crate) struct ProgressLayer { _s: std::marker::PhantomData, - sender: Sender, + writer: W, } -impl ProgressLayer { - pub(crate) fn new(sender: Sender) -> Self { - ProgressLayer { +impl ProgressLayer { + pub(crate) fn new(writer: W) -> Self { + Self { _s: std::marker::PhantomData, - sender, + writer, } } } +#[derive(Serialize, Debug, Clone, PartialEq)] +struct Out<'a> { + #[serde(flatten)] + event_fields: &'a FxHashMap, + #[serde(flatten)] + span_fields: &'a FxHashMap, +} + #[derive(Debug, Clone, PartialEq)] -struct ProgressStorage; +struct ProgressStorage { + data: FxHashMap, +} -impl Layer for ProgressLayer +impl Layer for ProgressLayer where S: tracing::Subscriber + for<'a> tracing_subscriber::registry::LookupSpan<'a>, + W: for<'a> MakeWriter<'a> + 'static, { fn on_new_span( &self, @@ -42,106 +54,64 @@ where id: &span::Id, ctx: tracing_subscriber::layer::Context<'_, S>, ) { - if attrs.metadata().target() != "lsp_progress" { - return; - } - let span = ctx.span(id).unwrap(); let mut extensions = span.extensions_mut(); - extensions.insert(ProgressStorage); - let mut fields = BTreeMap::new(); - let mut visitor = StringVisitor(&mut fields); + let mut fields = FxHashMap::default(); + let mut visitor = JsonVisitor(&mut fields); attrs.record(&mut visitor); - let begin = lsp_types::WorkDoneProgressBegin { - title: String::from(attrs.metadata().name()), - cancellable: Some(true), - message: Some(String::from("resolving targets")), - percentage: None, - }; - - let token: lsp_types::NumberOrString = - lsp_types::ProgressToken::String("rust-project/discoverBuckTargets".to_owned()); - - let notification = lsp_server::Notification::new( - lsp_types::notification::Progress::METHOD.to_owned(), - lsp_types::ProgressParams { - token, - value: lsp_types::ProgressParamsValue::WorkDone(WorkDoneProgress::Begin(begin)), - }, - ); - - let _err = self.sender.send(notification.into()); + extensions.insert(ProgressStorage { data: fields }); } - fn on_event(&self, event: &tracing::Event<'_>, _: tracing_subscriber::layer::Context<'_, S>) { - let mut fields = BTreeMap::new(); - let mut visitor = StringVisitor(&mut fields); + fn on_event(&self, event: &tracing::Event<'_>, ctx: tracing_subscriber::layer::Context<'_, S>) { + let mut event_fields = FxHashMap::default(); + let mut visitor = JsonVisitor(&mut event_fields); event.record(&mut visitor); - let message = fields.get("message").map(|value| value.to_owned()); - - let token: lsp_types::NumberOrString = - lsp_types::ProgressToken::String("rust-project/discoverBuckTargets".to_owned()); - - let report = lsp_types::WorkDoneProgressReport { - message, - cancellable: Some(true), - percentage: None, + let span_fields = match ctx.lookup_current() { + Some(span) => { + let ext = span.extensions(); + if let Some(storage) = ext.get::() { + storage.data.clone() + } else { + FxHashMap::default() + } + } + _ => FxHashMap::default(), }; - let report: lsp_types::ProgressParamsValue = - lsp_types::ProgressParamsValue::WorkDone(WorkDoneProgress::Report(report)); - let notification = lsp_server::Notification::new( - lsp_types::notification::Progress::METHOD.to_owned(), - lsp_types::ProgressParams { - token, - value: report, - }, - ); - - let _err = self.sender.send(notification.into()); - } - - fn on_close(&self, id: span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { - let span = ctx.span(&id).unwrap(); - let extensions = span.extensions(); - if extensions.get::().is_none() { - return; + if !event_fields.contains_key("kind") { + event_fields.insert("kind".to_owned(), Value::String("progress".to_owned())); } - let token: lsp_types::NumberOrString = - lsp_types::ProgressToken::String("rust-project/discoverBuckTargets".to_owned()); - - let end = lsp_types::WorkDoneProgressEnd { - message: Some(String::from("resolving targets")), + let out = Out { + event_fields: &event_fields, + span_fields: &span_fields, }; - - let report: lsp_types::ProgressParamsValue = - lsp_types::ProgressParamsValue::WorkDone(WorkDoneProgress::End(end)); - - let notification = lsp_server::Notification::new( - lsp_types::notification::Progress::METHOD.to_owned(), - lsp_types::ProgressParams { - token, - value: report, - }, - ); - - let _err = self.sender.send(notification.into()); + let out = serde_json::to_string(&out).unwrap(); + let mut writer = self.writer.make_writer(); + writeln!(writer, "{}", out).expect("unable to write"); } } -struct StringVisitor<'a>(&'a mut BTreeMap); +struct JsonVisitor<'a>(&'a mut FxHashMap); -impl<'a> tracing::field::Visit for StringVisitor<'a> { +impl<'a> tracing::field::Visit for JsonVisitor<'a> { fn record_str(&mut self, field: &tracing::field::Field, value: &str) { - self.0.insert(field.name().to_owned(), String::from(value)); + let value: String = if field.name() == "project" { + serde_json::from_str(value).unwrap() + } else { + String::from(value) + }; + self.0 + .insert(field.name().to_owned(), serde_json::Value::from(value)); } fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { - self.0 - .insert(field.name().to_owned(), format!("{:?}", value)); + self.0.insert( + field.name().to_owned(), + serde_json::Value::from(format!("{:?}", value)), + ); } } diff --git a/integrations/rust-project/src/scuba.rs b/integrations/rust-project/src/scuba.rs new file mode 100644 index 0000000000000..9ef13e4d51f86 --- /dev/null +++ b/integrations/rust-project/src/scuba.rs @@ -0,0 +1,125 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![allow(deprecated)] // whoami::hostname is deprecated + +use std::path::Path; +use std::time::Duration; + +use crate::cli::Input; + +#[cfg(fbcode_build)] +pub(crate) fn log_develop(duration: Duration, input: Input, invoked_by_ra: bool) { + let mut sample = new_sample("develop"); + sample.add("duration_ms", duration.as_millis() as i64); + sample.add("input", format!("{:?}", input)); + sample.add("revision", get_sl_revision()); + sample.add("invoked_by_ra", invoked_by_ra); + emit_log(sample); +} + +#[cfg(not(fbcode_build))] +pub(crate) fn log_develop(_duration: Duration, _input: Input, _invoked_by_ra: bool) {} + +#[cfg(fbcode_build)] +pub(crate) fn log_develop_error(error: &anyhow::Error, input: Input, invoked_by_ra: bool) { + let mut sample = new_sample("develop"); + sample.add("error", format!("{:#?}", error)); + sample.add("input", format!("{:?}", input)); + sample.add("revision", get_sl_revision()); + sample.add("invoked_by_ra", invoked_by_ra); + emit_log(sample); +} + +#[cfg(not(fbcode_build))] +pub(crate) fn log_develop_error(_error: &anyhow::Error, _input: Input, _invoked_by_ra: bool) {} + +#[cfg(fbcode_build)] +fn get_sl_revision() -> String { + std::process::Command::new("sl") + .arg("id") + .output() + .ok() + .and_then(|output| String::from_utf8(output.stdout).ok()) + .unwrap_or("unknown".to_owned()) +} + +#[cfg(fbcode_build)] +pub(crate) fn log_check(duration: Duration, saved_file: &Path, use_clippy: bool) { + let mut sample = new_sample("check"); + sample.add("duration_ms", duration.as_millis() as i64); + sample.add("saved_file", saved_file.display().to_string()); + sample.add("use_clippy", use_clippy.to_string()); + emit_log(sample); +} + +#[cfg(not(fbcode_build))] +pub(crate) fn log_check(_duration: Duration, _saved_file: &Path, _use_clippy: bool) {} + +#[cfg(fbcode_build)] +pub(crate) fn log_check_error(error: &anyhow::Error, saved_file: &Path, use_clippy: bool) { + let mut sample = new_sample("check"); + sample.add("error", format!("{:#?}", error)); + sample.add("saved_file", saved_file.display().to_string()); + sample.add("use_clippy", use_clippy.to_string()); + emit_log(sample); +} + +#[cfg(not(fbcode_build))] +pub(crate) fn log_check_error(_error: &anyhow::Error, _saved_file: &Path, _use_clippy: bool) {} + +#[cfg(fbcode_build)] +fn new_sample(kind: &str) -> scuba_sample::ScubaSampleBuilder { + let fb = fbinit::expect_init(); + let mut sample = scuba_sample::ScubaSampleBuilder::new(fb, "rust_project"); + sample.add("root_span", kind); + sample.add("unixname", whoami::username()); + sample.add("hostname", whoami::hostname()); + + // RA_PROXY_SESSION_ID is an environment variable set by the VS Code extension when it starts + // rust-analyzer-proxy. rust-analyzer-proxy then starts rust-analyzer with the same + // environment, and rust-analyzer invokes rust-project with the inherited environment. + if let Ok(session_id) = std::env::var("RA_PROXY_SESSION_ID") { + sample.add("session_id", session_id); + } + sample +} + +#[cfg(fbcode_build)] +fn emit_log(message: scuba_sample::ScubaSampleBuilder) { + use std::io::Write; + use std::process::Child; + use std::process::Command; + use std::process::Stdio; + + let message = message.to_json().unwrap().to_string(); + let mut child: Child = match Command::new("scribe_cat") + .stdin(Stdio::piped()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .args(["perfpipe_rust_project"]) + .spawn() + { + Ok(child) => child, + Err(_err) => { + eprintln!("Error spawning scribe_cat child process"); + return; + } + }; + + if child + .stdin + .as_mut() + .unwrap() + .write_all(message.as_bytes()) + .is_err() + { + eprintln!("Could not write to scribe_cat stdin"); + }; +} diff --git a/integrations/rust-project/src/sysroot.rs b/integrations/rust-project/src/sysroot.rs index 1ab68e15d9bc2..ab247e13b0bb4 100644 --- a/integrations/rust-project/src/sysroot.rs +++ b/integrations/rust-project/src/sysroot.rs @@ -21,7 +21,8 @@ use crate::buck::utf8_output; use crate::buck::Buck; use crate::json_project::Sysroot; -pub enum SysrootConfig { +#[derive(Debug)] +pub(crate) enum SysrootConfig { Sysroot(PathBuf), BuckConfig, Rustup, @@ -36,7 +37,7 @@ pub enum SysrootConfig { /// `sysroot_src` is the directory that contains the source to std crates: /// #[instrument(ret)] -pub fn resolve_buckconfig_sysroot( +pub(crate) fn resolve_buckconfig_sysroot( project_root: &Path, relative_paths: bool, ) -> Result { @@ -56,10 +57,17 @@ pub fn resolve_buckconfig_sysroot( project_root.join(sysroot_src) }; - // TODO(diliopoulos): remove hardcoded path to toolchain sysroot and replace with: - // buck2 run fbcode//third-party-buck/platform010/build/rust:bin/rustc -- --print sysroot + // TODO(diliopoulos): remove hardcoded path to toolchain sysroot and replace with something + // derived from buck, e.g. + // + // $ buck cquery -u fbcode//buck2/integrations/rust-project:rust-project -a compiler fbcode//buck2/platform/toolchain:rust_bootstrap + // ... + // "compiler": "fbcode//tools/build/buck/wrappers:rust-platform010-clang-17-nosan-compiler (fbcode//buck2/platform/execution:linux-x86_64#54c5d1cbad5316cb)" + // $ buck cquery -u fbcode//buck2/integrations/rust-project:rust-project -a exe fbcode//tools/build/buck/wrappers:rust-platform010-clang-17-nosan-compiler + // ... + // "exe": "fbcode//third-party-buck/platform010/build/rust/llvm-fb-17:bin/rustc (fbcode//buck2/platform/execution:linux-x86_64#54c5d1cbad5316cb)", let sysroot = Sysroot { - sysroot: base.join("fbcode/third-party-buck/platform010/build/rust/llvm-fb-12"), + sysroot: base.join("fbcode/third-party-buck/platform010/build/rust/llvm-fb-17"), sysroot_src: Some(sysroot_src), }; @@ -67,15 +75,7 @@ pub fn resolve_buckconfig_sysroot( } // Spawn both `rustc` and `buck audit config` in parallel without blocking. let fbsource_rustc = project_root.join("xplat/rust/toolchain/current/basic/bin/rustc"); - let mut sysroot_cmd = if cfg!(target_os = "macos") { - // On Apple silicon, buck builds at Meta run under Rosetta. - // So we force an x86-64 sysroot to avoid mixing architectures. - let mut cmd = Command::new("arch"); - cmd.arg("-x86_64").arg(fbsource_rustc); - cmd - } else { - Command::new(fbsource_rustc) - }; + let mut sysroot_cmd = Command::new(fbsource_rustc); sysroot_cmd .arg("--print=sysroot") .stdin(Stdio::null()) @@ -109,7 +109,7 @@ pub fn resolve_buckconfig_sysroot( } #[instrument(ret)] -pub fn resolve_rustup_sysroot() -> Result { +pub(crate) fn resolve_rustup_sysroot() -> Result { let mut cmd = Command::new("rustc"); cmd.arg("--print=sysroot") .stdin(Stdio::null()) @@ -119,10 +119,16 @@ pub fn resolve_rustup_sysroot() -> Result { let mut output = utf8_output(cmd.output(), &cmd)?; truncate_line_ending(&mut output); let sysroot = PathBuf::from(output); + let sysroot_src = sysroot + .join("lib") + .join("rustlib") + .join("src") + .join("rust") + .join("library"); let sysroot = Sysroot { sysroot, - sysroot_src: None, + sysroot_src: Some(sysroot_src), }; Ok(sysroot) } diff --git a/integrations/rust-project/src/target.rs b/integrations/rust-project/src/target.rs index 35539776f81e2..d0df456d4979b 100644 --- a/integrations/rust-project/src/target.rs +++ b/integrations/rust-project/src/target.rs @@ -7,23 +7,31 @@ * of this source tree. */ -use std::collections::BTreeMap; use std::ffi::OsStr; use std::fmt; +use std::fs; use std::ops::Deref; use std::path::Path; use std::path::PathBuf; +use anyhow::Context; +use rustc_hash::FxHashMap; +use serde::de::Error as _; +use serde::de::MapAccess; +use serde::de::SeqAccess; +use serde::de::Visitor; use serde::Deserialize; use serde::Deserializer; +use serde::Serialize; use crate::json_project::Edition; +use crate::path::canonicalize; -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)] -pub struct Target(String); +#[derive(Serialize, Debug, Default, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)] +pub(crate) struct Target(String); impl Target { - pub fn new(target: T) -> Target + pub(crate) fn new(target: T) -> Target where T: Into, { @@ -77,12 +85,12 @@ impl AsRef<[u8]> for Target { } #[derive(Debug, Clone, Deserialize, PartialEq, Eq)] -pub struct MacroOutput { - pub actual: Target, - pub dylib: PathBuf, +pub(crate) struct MacroOutput { + pub(crate) actual: Target, + pub(crate) dylib: PathBuf, } #[derive(Debug, Clone, Deserialize, PartialEq, Eq)] -pub enum Kind { +pub(crate) enum Kind { #[serde(rename = "prelude//rules.bzl:rust_binary")] Binary, #[serde(rename = "prelude//rules.bzl:rust_library")] @@ -92,116 +100,196 @@ pub enum Kind { } #[derive(Debug, Clone, Deserialize, PartialEq, Eq)] -pub struct TargetInfo { - pub name: String, - pub label: String, - pub kind: Kind, - pub edition: Option, - pub srcs: Vec, +pub(crate) struct TargetInfo { + pub(crate) name: String, + pub(crate) label: String, + pub(crate) kind: Kind, + pub(crate) edition: Option, + pub(crate) srcs: Vec, /// Mapped srcs are effectively aliases. The key is a buck target /// of some kind, and the value is a path/filename that can be /// referred to in the rest of the rule. /// /// Asking buck to build the targets and tell us the output path /// is how we are able to support generated sources. - pub mapped_srcs: BTreeMap, + pub(crate) mapped_srcs: FxHashMap, #[serde(rename = "crate")] - pub crate_name: Option, - pub crate_root: Option, - #[serde(rename = "deps", alias = "buck.direct_dependencies", default)] - pub deps: Vec, - pub tests: Vec, + pub(crate) crate_name: Option, + pub(crate) crate_dynamic: Option, + pub(crate) crate_root: PathBuf, + pub(crate) deps: Vec, + #[serde(rename = "tests")] + pub(crate) test_deps: Vec, // Optional set of renamed crates. in buck2, these are not unified with // `buck.direct_dependencies` and are instead a separate entry. - pub named_deps: BTreeMap, - pub proc_macro: Option, + #[serde(deserialize_with = "deserialize_named_deps")] + pub(crate) named_deps: FxHashMap, + pub(crate) proc_macro: Option, // Set of features enabled for this crate. - pub features: Vec, - pub env: BTreeMap, + pub(crate) features: Vec, + pub(crate) env: FxHashMap, // The ensured folder containing symlinks to all sources - pub source_folder: PathBuf, - pub in_workspace: bool, + pub(crate) source_folder: PathBuf, + pub(crate) project_relative_buildfile: PathBuf, + pub(crate) in_workspace: bool, + pub(crate) rustc_flags: Vec, } #[derive(Debug, Clone, Deserialize, PartialEq, Eq)] -pub struct AliasedTargetInfo { - pub actual: Target, -} - -#[derive(Debug)] -pub struct TargetInfoEntry { - pub index: usize, - pub info: TargetInfo, +pub(crate) struct AliasedTargetInfo { + pub(crate) actual: Target, } impl TargetInfo { - pub fn crate_name(&self) -> String { + pub(crate) fn crate_name(&self) -> String { + if let Some(crate_dynamic) = &self.crate_dynamic { + if let Ok(contents) = fs::read_to_string(crate_dynamic) { + return contents.trim().to_owned(); + } + } self.crate_name.as_deref().map_or_else( || self.name.as_str().replace('-', "_"), |crate_name| crate_name.to_owned(), ) } - pub fn root_module(&self) -> PathBuf { - if let Some(crate_root) = &self.crate_root { - // If provided with a crate_root directly, and it's valid, use it. - if let Ok(path) = self.source_folder.join(crate_root).canonicalize() { - return path; - } + pub(crate) fn display_name(&self) -> String { + let name = self.name.strip_suffix("-unittest").unwrap_or(&self.name); + name.to_owned() + } + + pub(crate) fn root_module(&self) -> anyhow::Result { + let p = self.source_folder.join(&self.crate_root); + canonicalize(&p).with_context(|| format!("path={}", p.display())) + } + + pub(crate) fn overridden_dep_names(&self) -> FxHashMap { + let mut overridden = FxHashMap::default(); + for (name, target) in &self.named_deps { + overridden.insert(target.clone(), name.to_owned()); } - // Matches buck crate_root fetching logic - let root_candidates = - // Use buck rust build.bxl fallback logic - vec![ - PathBuf::from("lib.rs"), - PathBuf::from("main.rs"), - PathBuf::from(&self.name.replace('-', "_")), - ]; - - tracing::trace!( - ?self, - ?root_candidates, - "trying to discover a good root module" - ); - // for all normal sources, we need to reference the file on the fbcode tree so navigation works - match self.srcs.iter().find(|src| { - root_candidates - .iter() - .any(|candidate| src.ends_with(candidate)) - }) { - // If a real source is provided, returns it's absolute path. - // This will not work with crate using more than one target as a direct src. - // Fortunately this is not used at the moment. Likely to be fixed in BXL instead - Some(path) => return path.to_path_buf(), - None => tracing::debug!(?self, "unable to find root for crate"), - }; - - for (dest, _) in self.mapped_srcs.iter() { - if root_candidates.iter().any(|c| dest.ends_with(c)) { - // Returns the files as seen in the materialized source - return self.source_folder.join(dest); - } + overridden + } + + pub(crate) fn cfg(&self) -> Vec { + // we need to take the existing features and prefix `feature=` + let feature_cfgs = self.features.iter().map(|f| format!("feature=\"{f}\"")); + + // parse out rustc --cfg= flags + let rustc_flags_cfgs = self + .rustc_flags + .iter() + .filter_map(|flag| flag.strip_prefix("--cfg=").map(str::to_string)); + + let mut cfg = feature_cfgs + .chain(rustc_flags_cfgs) + .collect::>(); + + // Include "test" cfg so rust-analyzer picks up #[cfg(test)] code. + cfg.push("test".to_owned()); + + #[cfg(fbcode_build)] + { + // FIXME(JakobDegen): This should be set via a configuration mechanism of some kind. + cfg.push("fbcode_build".to_owned()); } - if let Some(fallback_path) = self.srcs.first().cloned().or_else(|| { - self.mapped_srcs - .keys() - .next() - .map(|mapped_path| self.source_folder.join(mapped_path)) - }) { - return fallback_path; + cfg + } +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Default)] +pub(crate) struct ExpandedAndResolved { + pub(crate) expanded_targets: Vec, + pub(crate) queried_proc_macros: FxHashMap, + pub(crate) resolved_deps: FxHashMap, +} + +fn deserialize_named_deps<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + struct NamedDepsVisitor; + + impl<'de> Visitor<'de> for NamedDepsVisitor { + type Value = FxHashMap; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("dict or list") } - tracing::error!(?self, "no crate root can be found"); + fn visit_map(self, map: M) -> Result + where + M: MapAccess<'de>, + { + FxHashMap::deserialize(serde::de::value::MapAccessDeserializer::new(map)) + } + + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + let names: PathBuf = seq + .next_element()? + .ok_or_else(|| S::Error::invalid_length(0, &self))?; + let content = fs::read_to_string(&names).map_err(|e| { + S::Error::custom(format!("failed to read {}: {}", names.display(), e)) + })?; + let mut lines = content.lines(); - panic!("Invariant broken: rust-project is unable to determine a root module") + let mut named_deps = FxHashMap::default(); + while let Some(target) = seq.next_element()? { + let name = lines.next().ok_or_else(|| { + S::Error::custom(format!("not enough lines in {}", names.display())) + })?; + named_deps.insert(name.to_owned(), target); + } + Ok(named_deps) + } } + + deserializer.deserialize_any(NamedDepsVisitor) } -#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Default)] -pub struct ExpandedAndResolved { - pub expanded_targets: Vec, - pub queried_proc_macros: BTreeMap, - pub resolved_deps: BTreeMap, +#[test] +fn test_cfg() { + let info = TargetInfo { + name: "bar".to_owned(), + label: "bar".to_owned(), + kind: Kind::Library, + edition: None, + srcs: vec![], + mapped_srcs: FxHashMap::default(), + crate_name: None, + crate_dynamic: None, + crate_root: PathBuf::default(), + deps: vec![], + test_deps: vec![], + named_deps: FxHashMap::default(), + proc_macro: None, + features: vec!["foo_feature".to_owned()], + env: FxHashMap::default(), + source_folder: PathBuf::from("/tmp"), + project_relative_buildfile: PathBuf::from("bar/BUCK"), + in_workspace: false, + rustc_flags: vec!["--cfg=foo_cfg".to_owned(), "--other".to_owned()], + }; + + let expected = if cfg!(fbcode_build) { + vec![ + "feature=\"foo_feature\"".to_owned(), + "foo_cfg".to_owned(), + "test".to_owned(), + "fbcode_build".to_owned(), + ] + } else { + vec![ + "feature=\"foo_feature\"".to_owned(), + "foo_cfg".to_owned(), + "test".to_owned(), + ] + }; + + assert_eq!(info.cfg(), expected); } diff --git a/integrations/rust-project/tests/BUCK b/integrations/rust-project/tests/BUCK deleted file mode 100644 index 0c6440194f1cc..0000000000000 --- a/integrations/rust-project/tests/BUCK +++ /dev/null @@ -1,13 +0,0 @@ -load("@fbcode_macros//build_defs:buck_e2e.bzl", "buck2_e2e_test") - -buck2_e2e_test( - name = "test_rust_project", - srcs = [ - "test_rust_project.py", - ], - test_with_compiled_buck2 = False, - test_with_deployed_buck2 = True, - deps = [ - "//buck2/tests/e2e_util:utils", - ], -) diff --git a/integrations/rust-project/tests/BUCK.v2 b/integrations/rust-project/tests/BUCK.v2 new file mode 100644 index 0000000000000..d8c75b2842885 --- /dev/null +++ b/integrations/rust-project/tests/BUCK.v2 @@ -0,0 +1,13 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +buck2_e2e_test( + name = "test_rust_project", + srcs = [ + "test_rust_project.py", + ], + test_with_compiled_buck2 = False, + test_with_deployed_buck2 = True, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/integrations/rust-project/tests/targets/alias/BUCK b/integrations/rust-project/tests/targets/alias/BUCK.v2 similarity index 100% rename from integrations/rust-project/tests/targets/alias/BUCK rename to integrations/rust-project/tests/targets/alias/BUCK.v2 diff --git a/integrations/rust-project/tests/targets/bar/BUCK b/integrations/rust-project/tests/targets/bar/BUCK.v2 similarity index 100% rename from integrations/rust-project/tests/targets/bar/BUCK rename to integrations/rust-project/tests/targets/bar/BUCK.v2 diff --git a/integrations/rust-project/tests/targets/foo/BUCK b/integrations/rust-project/tests/targets/foo/BUCK deleted file mode 100644 index edcc78f9389c9..0000000000000 --- a/integrations/rust-project/tests/targets/foo/BUCK +++ /dev/null @@ -1,26 +0,0 @@ -load("@fbcode_macros//build_defs/rust_library.bzl", "rust_library") - -rust_library( - name = "a", - srcs = ["lib_a.rs"], - crate_root = "lib_a.rs", - deps = ["//buck2/integrations/rust-project/tests/targets/bar:c"], -) - -rust_library( - name = "b", - srcs = ["lib_b.rs"], - crate_root = "lib_b.rs", - deps = ["//buck2/integrations/rust-project/tests/targets/bar:d"], -) - -rust_library( - name = "e", - srcs = ["lib.rs"], - deps = [":f"], -) - -rust_library( - name = "f", - srcs = ["lib.rs"], -) diff --git a/integrations/rust-project/tests/targets/foo/BUCK.v2 b/integrations/rust-project/tests/targets/foo/BUCK.v2 new file mode 100644 index 0000000000000..78d364dffba86 --- /dev/null +++ b/integrations/rust-project/tests/targets/foo/BUCK.v2 @@ -0,0 +1,28 @@ +load("@fbcode_macros//build_defs/rust_library.bzl", "rust_library") + +rust_library( + name = "a", + srcs = ["lib_a.rs"], + crate_root = "lib_a.rs", + deps = ["//buck2/integrations/rust-project/tests/targets/bar:c"], +) + +rust_library( + name = "b", + srcs = ["lib_b.rs"], + crate_root = "lib_b.rs", + deps = ["//buck2/integrations/rust-project/tests/targets/bar:d"], +) + +rust_library( + name = "e", + srcs = ["lib_e.rs"], + crate_root = "lib_e.rs", + deps = [":f"], +) + +rust_library( + name = "f", + srcs = ["lib_f.rs"], + crate_root = "lib_f.rs", +) diff --git a/integrations/rust-project/tests/targets/foo/lib_e.rs b/integrations/rust-project/tests/targets/foo/lib_e.rs new file mode 100644 index 0000000000000..f0b07150ba668 --- /dev/null +++ b/integrations/rust-project/tests/targets/foo/lib_e.rs @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use f as _; diff --git a/integrations/rust-project/tests/targets/foo/lib.rs b/integrations/rust-project/tests/targets/foo/lib_f.rs similarity index 100% rename from integrations/rust-project/tests/targets/foo/lib.rs rename to integrations/rust-project/tests/targets/foo/lib_f.rs diff --git a/integrations/rust-project/tests/test_rust_project.py b/integrations/rust-project/tests/test_rust_project.py index 8d4b8cf2f2f7f..178aa9e411573 100644 --- a/integrations/rust-project/tests/test_rust_project.py +++ b/integrations/rust-project/tests/test_rust_project.py @@ -6,10 +6,12 @@ # of this source tree. import json +from pathlib import Path from buck2.tests.e2e_util.api.buck import Buck from buck2.tests.e2e_util.buck_workspace import buck_test + # Uses a dependency graph like this: # # foo:a foo:b @@ -27,12 +29,12 @@ @buck_test(inplace=True, skip_for_os=["darwin", "windows"]) async def test_workspaces(buck: Buck) -> None: result = await buck.bxl( - "prelude//rust/rust-analyzer/resolve_deps.bxl:expand_and_resolve", + "prelude//rust/rust-analyzer/resolve_deps.bxl:resolve_targets", "--", "--targets", "//buck2/integrations/rust-project/tests/targets/foo:e", ) - result = json.loads(result.stdout) + result = json.load(open(result.stdout.rstrip())) assert result["expanded_targets"] == [ "fbcode//buck2/integrations/rust-project/tests/targets/bar:d", "fbcode//buck2/integrations/rust-project/tests/targets/foo:a", @@ -40,50 +42,93 @@ async def test_workspaces(buck: Buck) -> None: target_and_in_workspace = { t: v["in_workspace"] for t, v in result["resolved_deps"].items() } - assert target_and_in_workspace == { + expected_subset = { "fbcode//buck2/integrations/rust-project/tests/targets/foo:a": True, "fbcode//buck2/integrations/rust-project/tests/targets/bar:c": False, "fbcode//buck2/integrations/rust-project/tests/targets/bar:d": True, "fbcode//buck2/integrations/rust-project/tests/targets/foo:e": True, "fbcode//buck2/integrations/rust-project/tests/targets/foo:f": True, } + assert expected_subset.items() <= target_and_in_workspace.items() # The target being edited is not in any workspaces result = await buck.bxl( - "prelude//rust/rust-analyzer/resolve_deps.bxl:expand_and_resolve", + "prelude//rust/rust-analyzer/resolve_deps.bxl:resolve_targets", "--", "--targets", "//buck2/integrations/rust-project/tests/targets/bar:c", ) - result = json.loads(result.stdout) + result = json.load(open(result.stdout.rstrip())) assert result["expanded_targets"] == [ "fbcode//buck2/integrations/rust-project/tests/targets/bar:c" ] target_and_in_workspace = { t: v["in_workspace"] for t, v in result["resolved_deps"].items() } - assert target_and_in_workspace == { + + expected_subset = { "fbcode//buck2/integrations/rust-project/tests/targets/bar:c": True, "fbcode//buck2/integrations/rust-project/tests/targets/foo:e": False, "fbcode//buck2/integrations/rust-project/tests/targets/foo:f": False, } + assert expected_subset.items() <= target_and_in_workspace.items() + @buck_test(inplace=True, skip_for_os=["darwin", "windows"]) async def test_alias(buck: Buck) -> None: result = await buck.bxl( - "prelude//rust/rust-analyzer/resolve_deps.bxl:expand_and_resolve", + "prelude//rust/rust-analyzer/resolve_deps.bxl:resolve_targets", "--", "--targets", "fbcode//buck2/integrations/rust-project/tests/targets/alias/...", ) - result = json.loads(result.stdout) + result = json.load(open(result.stdout.rstrip())) assert result["expanded_targets"] == [ "fbcode//buck2/integrations/rust-project/tests/targets/alias:l", "fbcode//buck2/integrations/rust-project/tests/targets/alias:l_alias", ] +@buck_test(inplace=True, skip_for_os=["darwin", "windows"]) +async def test_resolve_owning_buildfile_no_extra_targets(buck: Buck) -> None: + result = await buck.bxl( + "prelude//rust/rust-analyzer/resolve_deps.bxl:resolve_owning_buildfile", + "--", + "--max_extra_targets=0", + "--files", + str( + Path("buck2/integrations/rust-project/tests/targets/foo/lib_f.rs").resolve() + ), + ) + result = json.loads(result.stdout) + assert len(result) == 1 + buildfile_path, owners = result.popitem() + assert buildfile_path.endswith( + "buck2/integrations/rust-project/tests/targets/foo/TARGETS.v2" + ) + owners.sort() + assert owners == [ + "fbcode//buck2/integrations/rust-project/tests/targets/foo:f", + "fbcode//buck2/integrations/rust-project/tests/targets/foo:f-unittest", + ] + + +@buck_test(inplace=True, skip_for_os=["darwin", "windows"]) +async def test_exclude_workspaces(buck: Buck) -> None: + result = await buck.bxl( + "prelude//rust/rust-analyzer/resolve_deps.bxl:resolve_targets", + "--", + "--targets", + "//buck2/integrations/rust-project/tests/targets/foo:e", + "--exclude_workspaces=true", + ) + result = json.load(open(result.stdout.rstrip())) + assert result["expanded_targets"] == [ + "fbcode//buck2/integrations/rust-project/tests/targets/foo:e", + ] + + # FIXME: Remove once actual tests work on mac and windows @buck_test(inplace=True) async def test_noop(buck: Buck) -> None: diff --git a/lint_levels.bzl b/lint_levels.bzl new file mode 100644 index 0000000000000..31a36942256ff --- /dev/null +++ b/lint_levels.bzl @@ -0,0 +1,107 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# buildifier: keep sorted +CLIPPY_ALLOW = [ + "clippy::arc-with-non-send-sync", # Needs triage, see 'dashmap_directory_interner.rs:39:20' (`DashMap` is not `Send` or `Sync`) + "clippy::assigning-clones", # FIXME new in Rust 1.78.0 + "clippy::await_holding_lock", # FIXME new in Rust 1.74 + "clippy::blocks-in-conditions", # FIXME new in Rust 1.77.1 + "clippy::bool-assert-comparison", # Sometimes more clear to write it this way + "clippy::bool-to-int-with-if", # Using if branches to return 1 or 0 is valid, but this complains that we should use `int::from`, which is arguably less clear + "clippy::cognitive_complexity", # This is an arbitrary linter + "clippy::collapsible-else-if", # Sometimes nesting better expresses intent + "clippy::collapsible-if", # Sometimes nesting better expresses intent + "clippy::comparison_chain", # Generates worse code and harder to read + "clippy::comparison_to_empty", # x == "" is clearer than x.is_empty() + "clippy::derive_partial_eq_without_eq", # In generated protobuf code + "clippy::disallowed_names", # Not using foo, bar, baz in test data is silly + "clippy::enum-variant-names", # Sometimes you do want the same prefixes + "clippy::from_iter_instead_of_collect", # https://fb.workplace.com/groups/buck2core/posts/835300915330313 + "clippy::implicit-hasher", # Makes code more complex for little benefit + "clippy::len-without-is-empty", # len() == 0 is perfectly clear + "clippy::manual-range-contains", # a <= b && b <= c is way clearer than (a..=c).contains(&b) + "clippy::many_single_char_names", # match(a,b,c,d,e) sometimes makes sense + "clippy::match-like-matches-macro", # Using matches! is sometimes clearer, sometimes not + "clippy::match-wild-err-arm", # Seems reasonable to panic on Err(_) + "clippy::missing-safety-doc", # Documentation should be tailored to the reader, not the linter + "clippy::module_inception", # Unnecessary restriction. + "clippy::multiple-bound-locations", # FIXME New in 1.78.0 + "clippy::mut_from_ref", # Tries to check soundness, which Rust already does + "clippy::mutable_key_type", # FIXME new in Rust 1.80 + "clippy::naive-bytecount", # Requires an extra dependency for marginal gains. + "clippy::needless_borrows_for_generic_args", # FIXME new in Rust 1.74 + "clippy::needless_collect", # False positives: doesn't understand lifetimes, or e.g. DoubleEndedIterator. + "clippy::needless_lifetimes", # This is throwing false positives + "clippy::needless_pass_by_ref_mut", # Mostly identifies cases where we are accepting `&mut T` because we logically accept a mut reference but don't technically require it (i.e. we want the api to enforce the caller has a mut ref, but we don't technically need it). + "clippy::needless_raw_string_hashes", # False positives + "clippy::needless_update", # Our RE structs have slightly different definitions in internal and OSS. + "clippy::new_without_default", # Default is not always useful + "clippy::non_canonical_partial_ord_impl", # Almost exclusively identifies cases where a type delegates ord/partial ord to something else (including Derivative-derived PartialOrd) and in that case being explicit about that delegation is better than following some canonical partialord impl. + "clippy::question_mark", + "clippy::single_match", # Sometimes a single match looks good + "clippy::too_many_arguments", # This is an arbitrary limit set on number of arguments and not always useful + "clippy::type_complexity", # This is an arbitrary limit set on number of type parameterizations and not always useful + "clippy::unconditional_recursion", # FIXME new in Rust 1.77.1 + "clippy::unnecessary-wraps", # Sometimes unnecessary wraps provide the right API + "clippy::unwrap-or-default", # Defaults aren't always more clear as it removes the type information when reading code + "clippy::useless_conversion", # Removed all obvious but there are some reports I'm unclear how to fix + "clippy::wrong_self_convention", # These rules are useless pedantry +] + +# buildifier: keep sorted +CLIPPY_DENY = [ + "clippy::all", + "clippy::await_holding_lock", + "clippy::await_holding_refcell_ref", + "clippy::dbg_macro", + "clippy::debug_assert_with_mut_call", + "clippy::empty_enum", + "clippy::filter_map_next", + "clippy::flat_map_option", + "clippy::large_stack_arrays", + "clippy::linkedlist", + "clippy::macro_use_imports", + "clippy::maybe_infinite_iter", + "clippy::mut_mut", + "clippy::needless_borrow", + "clippy::needless_continue", + "clippy::needless_range_loop", + "clippy::nonstandard_macro_braces", + "clippy::rc_mutex", + "clippy::ref_option_ref", + "clippy::rest_pat_in_fully_bound_structs", + "clippy::same_functions_in_if_condition", + "clippy::str_to_string", + "clippy::string_to_string", + "clippy::todo", + "clippy::trivially_copy_pass_by_ref", + "clippy::tuple_array_conversions", + "clippy::unnecessary-literal-unwrap", # TBD if this should be CLIPPY_ALLOW + "clippy::useless-vec", # TBD if this should be CLIPPY_ALLOW + "clippy::useless_transmute", + "clippy::verbose_file_reads", + "let_underscore_drop", + "unused_extern_crates", +] + +# buildifier: keep sorted +CLIPPY_AUTOFIX = [ + # Only add machine-fixable warnings in this list, or we'll see them all + # the time in CI. + "clippy::cloned_instead_of_copied", + "clippy::inconsistent_struct_constructor", + "clippy::inefficient_to_string", + "clippy::let_unit_value", + "clippy::map_flatten", + "clippy::map_unwrap_or", + "clippy::needless_bitwise_bool", + "clippy::needless_borrow", + "clippy::range_minus_one", + "clippy::unwrap_or_default", + "clippy::useless-conversion", +] diff --git a/prelude/.buckconfig b/prelude/.buckconfig index ca580463d8865..1fe72c2033860 100644 --- a/prelude/.buckconfig +++ b/prelude/.buckconfig @@ -5,17 +5,6 @@ prelude = . # but our custom config format (yuk) doesn't accept inline comments. # Therefore, we hide the name of the group when not open source. -[not_repositories] # @oss-enable -fbcode = ../.. -fbsource = ../../.. -ovr_config = ../../../arvr/tools/build_defs/config -bazel_skylib = ../../../third-party/bazel-skylib -fbcode_macros = ../../../tools/build_defs/fbcode_macros -fbobjc_dylibs = ../../../xplat/configurations/buck/apple/dylibs -buck = ../../../xplat/build_infra/buck_client -buck_bazel_skylib = ../../../xplat/build_infra/buck_client/third-party/skylark/bazel-skylib -toolchains = ../toolchains - [repository_aliases] [not_repository_aliases] # @oss-enable config = ovr_config diff --git a/prelude/BUCK b/prelude/BUCK index f12663a051707..afce0b50f28e6 100644 --- a/prelude/BUCK +++ b/prelude/BUCK @@ -1,14 +1,27 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") load(":native.bzl", prelude = "native") oncall("build_infra") +source_listing(exclude = [ + # Exclude PACKAGE file using modifiers since those are not enabled everywhere yet. + "PACKAGE", +]) + # Done to avoid triggering a lint rule that replaces glob with an fbcode macro globby = glob srcs = globby( ["**"], # Context: https://fb.workplace.com/groups/buck2users/posts/3121903854732641/ - exclude = ["**/.pyre_configuration.local"], + exclude = [ + "**/.pyre_configuration.local", + # Unfortunately, using modifiers require loading bzl files in outside of prelude, + # and that currently breaks isolated tests that attempt to grab a best-effort prelude + # from the filegroup below. + # TODO: Switch these tests to use the bundled prelude instead. + "PACKAGE", + ], ) # Re-export filegroups that are behind package boundary violations for diff --git a/prelude/abi/BUCK b/prelude/abi/BUCK deleted file mode 100644 index bb72595e77f46..0000000000000 --- a/prelude/abi/BUCK +++ /dev/null @@ -1,23 +0,0 @@ -config_setting( - name = "gnu", - constraint_values = [ - "prelude//abi/constraints:gnu", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "msvc", - constraint_values = [ - "prelude//abi/constraints:msvc", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "musl", - constraint_values = [ - "prelude//abi/constraints:musl", - ], - visibility = ["PUBLIC"], -) diff --git a/prelude/abi/BUCK.v2 b/prelude/abi/BUCK.v2 new file mode 100644 index 0000000000000..aa06c41471d53 --- /dev/null +++ b/prelude/abi/BUCK.v2 @@ -0,0 +1,29 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +config_setting( + name = "gnu", + constraint_values = [ + "prelude//abi/constraints:gnu", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "msvc", + constraint_values = [ + "prelude//abi/constraints:msvc", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "musl", + constraint_values = [ + "prelude//abi/constraints:musl", + ], + visibility = ["PUBLIC"], +) diff --git a/prelude/abi/constraints/BUCK b/prelude/abi/constraints/BUCK deleted file mode 100644 index 9b5673523b716..0000000000000 --- a/prelude/abi/constraints/BUCK +++ /dev/null @@ -1,24 +0,0 @@ -# Used by open source projects to support `prelude//` - -constraint_setting( - name = "abi", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "gnu", - constraint_setting = ":abi", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "msvc", - constraint_setting = ":abi", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "musl", - constraint_setting = ":abi", - visibility = ["PUBLIC"], -) diff --git a/prelude/abi/constraints/BUCK.v2 b/prelude/abi/constraints/BUCK.v2 new file mode 100644 index 0000000000000..7448fa7c4a71b --- /dev/null +++ b/prelude/abi/constraints/BUCK.v2 @@ -0,0 +1,30 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +# Used by open source projects to support `prelude//` + +constraint_setting( + name = "abi", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "gnu", + constraint_setting = ":abi", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "msvc", + constraint_setting = ":abi", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "musl", + constraint_setting = ":abi", + visibility = ["PUBLIC"], +) diff --git a/prelude/alias.bzl b/prelude/alias.bzl index 0593f3a841d89..d11ef4f884f70 100644 --- a/prelude/alias.bzl +++ b/prelude/alias.bzl @@ -19,6 +19,9 @@ def configured_alias_impl(ctx: AnalysisContext) -> list[Provider]: return ctx.attrs.fallback_actual.providers fail("must set one of `configured_actual` or `fallback_actual`") +def toolchain_alias_impl(ctx: AnalysisContext) -> list[Provider]: + return ctx.attrs.actual.providers + def versioned_alias_impl(_ctx: AnalysisContext) -> list[Provider]: # Should be intercepted in macro stub and converted to `alias`. fail("unsupported") diff --git a/prelude/android/aapt2_link.bzl b/prelude/android/aapt2_link.bzl index 84f2797bf3b23..46d4fd8cd5a88 100644 --- a/prelude/android/aapt2_link.bzl +++ b/prelude/android/aapt2_link.bzl @@ -9,13 +9,13 @@ load("@prelude//android:android_providers.bzl", "Aapt2LinkInfo", "AndroidResourc load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") BASE_PACKAGE_ID = 0x7f -ZIP_NOTHING_TO_DO_EXIT_CODE = 12 def get_aapt2_link( ctx: AnalysisContext, android_toolchain: AndroidToolchainInfo, resource_infos: list[AndroidResourceInfo], android_manifest: Artifact, + manifest_entries: dict, includes_vector_drawables: bool, no_auto_version: bool, no_version_transitions: bool, @@ -23,9 +23,8 @@ def get_aapt2_link( no_resource_removal: bool, should_keep_raw_values: bool, package_id_offset: int, - resource_stable_ids: [Artifact, None], + resource_stable_ids: Artifact | None, preferred_density: [str, None], - min_sdk: [str, None], filter_locales: bool, locales: list[str], compiled_resource_apks: list[Artifact], @@ -34,9 +33,9 @@ def get_aapt2_link( link_infos = [] for use_proto_format in [False, True]: if use_proto_format: - identifier = "use_proto_format" + identifier = "use_proto" else: - identifier = "not_proto_format" + identifier = "not_proto" aapt2_command = cmd_args(android_toolchain.aapt2) aapt2_command.add("link") @@ -49,8 +48,13 @@ def get_aapt2_link( aapt2_command.add(["--proguard", proguard_config.as_output()]) # We don't need the R.java output, but aapt2 won't output R.txt unless we also request R.java. - r_dot_java = ctx.actions.declare_output("{}/initial-rdotjava".format(identifier), dir = True) + # A drawback of this is that the directory structure for the R.java output is deep, resulting + # in long path issues on Windows. The structure is //unused-rjava//R.java + # We can declare a custom dummy package to drastically shorten , which is sketchy, but effective + r_dot_java = ctx.actions.declare_output("{}/unused-rjava".format(identifier), dir = True) aapt2_command.add(["--java", r_dot_java.as_output()]) + aapt2_command.add(["--custom-package", "dummy.package"]) + r_dot_txt = ctx.actions.declare_output("{}/R.txt".format(identifier)) aapt2_command.add(["--output-text-symbols", r_dot_txt.as_output()]) @@ -77,8 +81,23 @@ def get_aapt2_link( aapt2_command.add(["--stable-ids", resource_stable_ids]) if preferred_density != None: aapt2_command.add(["--preferred-density", preferred_density]) - if min_sdk != None: - aapt2_command.add(["--min-sdk-version", min_sdk]) + + manifest_entries_min_sdk = manifest_entries.get("min_sdk_version", None) + if manifest_entries_min_sdk != None: + aapt2_command.add(["--min-sdk-version", str(manifest_entries_min_sdk)]) + manifest_entries_target_sdk = manifest_entries.get("target_sdk_version", None) + if manifest_entries_target_sdk != None: + aapt2_command.add(["--target-sdk-version", str(manifest_entries_target_sdk)]) + manifest_entries_version_code = manifest_entries.get("version_code", None) + if manifest_entries_version_code != None: + aapt2_command.add(["--version-code", manifest_entries_version_code]) + manifest_entries_version_name = manifest_entries.get("version_name", None) + if manifest_entries_version_name != None: + aapt2_command.add(["--version-name", manifest_entries_version_name]) + manifest_entries_debug_mode = str(manifest_entries.get("debug_mode", "False")).lower() == "true" + if manifest_entries_debug_mode: + aapt2_command.add(["--debug-mode"]) + if filter_locales and len(locales) > 0: aapt2_command.add("-c") @@ -98,8 +117,11 @@ def get_aapt2_link( aapt2_compile_rules_args_file = ctx.actions.write("{}/aapt2_compile_rules_args_file".format(identifier), cmd_args(aapt2_compile_rules, delimiter = " ")) aapt2_command.add("-R") - aapt2_command.add(cmd_args(aapt2_compile_rules_args_file, format = "@{}")) - aapt2_command.hidden(aapt2_compile_rules) + aapt2_command.add(cmd_args( + aapt2_compile_rules_args_file, + format = "@{}", + hidden = aapt2_compile_rules, + )) aapt2_command.add(additional_aapt2_params) @@ -114,17 +136,11 @@ def get_aapt2_link( # If zip -d returns that there was nothing to do, then we don't fail. if len(extra_filtered_resources) > 0: filtered_resources_apk = ctx.actions.declare_output("{}/filtered-resource-apk.ap_".format(identifier)) - filter_resources_sh_cmd = cmd_args([ - "sh", - "-c", - 'cp "$1" "$2" && chmod 644 "$2"; zip -d "$2" "$3"; if [$? -eq $4]; then\nexit 0\nfi\nexit $?;', - "--", - resources_apk, - filtered_resources_apk.as_output(), - extra_filtered_resources, - str(ZIP_NOTHING_TO_DO_EXIT_CODE), - ]) - ctx.actions.run(filter_resources_sh_cmd, category = "aapt2_filter_resources", identifier = identifier) + filter_resources_cmd = cmd_args(ctx.attrs._android_toolchain[AndroidToolchainInfo].aapt2_filter_resources) + filter_resources_cmd.add(cmd_args(resources_apk, format = "--input-apk={}")) + filter_resources_cmd.add(cmd_args(filtered_resources_apk.as_output(), format = "--output-apk={}")) + filter_resources_cmd.add(cmd_args(extra_filtered_resources, format = "--extra-filtered-resources={}")) + ctx.actions.run(filter_resources_cmd, category = "aapt2_filter_resources", identifier = identifier) primary_resources_apk = filtered_resources_apk else: primary_resources_apk = resources_apk diff --git a/prelude/android/android.bzl b/prelude/android/android.bzl index e9f56ca8a8ede..b8cad162dc361 100644 --- a/prelude/android/android.bzl +++ b/prelude/android/android.bzl @@ -5,12 +5,18 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//:validation_deps.bzl", + "VALIDATION_DEPS_ATTR_NAME", +) load("@prelude//android:cpu_filters.bzl", "ALL_CPU_FILTERS") load("@prelude//java:java.bzl", "AbiGenerationMode", "dex_min_sdk_version") -load("@prelude//decls/android_rules.bzl", "AaptMode", "DuplicateResourceBehaviour", "TargetCpuType") +load("@prelude//decls/android_rules.bzl", "AaptMode", "DuplicateResourceBehaviour") load("@prelude//decls/common.bzl", "buck") +load("@prelude//decls/core_rules.bzl", "TargetCpuType") load("@prelude//decls/toolchains_common.bzl", "toolchains_common") load("@prelude//genrule.bzl", "genrule_attributes") +load("@prelude//transitions/constraint_overrides.bzl", "constraint_overrides") load(":android_aar.bzl", "android_aar_impl") load(":android_apk.bzl", "android_apk_impl") load(":android_build_config.bzl", "android_build_config_impl") @@ -50,6 +56,27 @@ implemented_rules = { # Can't load `read_bool` here because it will cause circular load. FORCE_SINGLE_CPU = read_root_config("buck2", "android_force_single_cpu") in ("True", "true") FORCE_SINGLE_DEFAULT_CPU = read_root_config("buck2", "android_force_single_default_cpu") in ("True", "true") +DISABLE_STRIPPING = read_root_config("android", "disable_stripping") in ("True", "true") + +# Format is {"ovveride_name": {"re_cap_key": "re_cap_value"}}; for example: +# { +# "dynamic-listing": { +# "platform": "riot", +# "pool": "EUREKA_POOL", +# }, +# "test-execution": { +# "platform": "riot", +# "pool": "EUREKA_POOL", +# }, +# } +_RE_CAPS = attrs.option(attrs.dict(key = attrs.string(), value = attrs.dict(key = attrs.string(), value = attrs.string())), default = None) + +# Format is {"ovveride_name": "re_use_case"}; for example: +# { +# "dynamic-listing": "riot", +# "test-execution": "riot", +# } +_RE_USE_CASE = attrs.option(attrs.dict(key = attrs.string(), value = attrs.string()), default = None) extra_attributes = { "android_aar": { @@ -59,8 +86,12 @@ extra_attributes = { "deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition), default = []), "min_sdk_version": attrs.option(attrs.int(), default = None), "native_library_merge_glue": attrs.option(attrs.split_transition_dep(cfg = cpu_split_transition), default = None), - "package_asset_libraries": attrs.default_only(attrs.bool(default = True)), + "native_library_merge_linker_args": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.arg())), default = None), + "package_asset_libraries": attrs.bool(default = True), + "package_resources": attrs.bool(default = True), + "relinker_extra_deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition), default = []), "resources_root": attrs.option(attrs.string(), default = None), + "strip_libraries": attrs.default_only(attrs.bool(default = not DISABLE_STRIPPING)), "_android_toolchain": toolchains_common.android(), "_cxx_toolchain": attrs.split_transition_dep(cfg = cpu_split_transition, default = "toolchains//:android-hack"), "_is_building_android_binary": attrs.default_only(attrs.bool(default = True)), @@ -74,9 +105,9 @@ extra_attributes = { }, "android_binary": { "aapt_mode": attrs.enum(AaptMode, default = "aapt1"), # Match default in V1 + "application_module_blacklist": attrs.option(attrs.list(attrs.transition_dep(cfg = cpu_transition)), default = None), "application_module_configs": attrs.dict(key = attrs.string(), value = attrs.list(attrs.transition_dep(cfg = cpu_transition)), sorted = False, default = {}), "build_config_values_file": attrs.option(attrs.one_of(attrs.transition_dep(cfg = cpu_transition), attrs.source()), default = None), - "constraint_overrides": attrs.list(attrs.string(), default = []), "deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition), default = []), "dex_tool": attrs.string(default = "d8"), # Match default in V1 "duplicate_resource_behavior": attrs.enum(DuplicateResourceBehaviour, default = "allow_by_default"), # Match default in V1 @@ -86,6 +117,9 @@ extra_attributes = { "module_manifest_skeleton": attrs.option(attrs.one_of(attrs.transition_dep(cfg = cpu_transition), attrs.source()), default = None), "native_library_merge_code_generator": attrs.option(attrs.exec_dep(), default = None), "native_library_merge_glue": attrs.option(attrs.split_transition_dep(cfg = cpu_split_transition), default = None), + "native_library_merge_linker_args": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.arg())), default = None), + "relinker_extra_deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition), default = []), + "strip_libraries": attrs.bool(default = not DISABLE_STRIPPING), "_android_toolchain": toolchains_common.android(), "_cxx_toolchain": attrs.split_transition_dep(cfg = cpu_split_transition, default = "toolchains//:android-hack"), "_dex_toolchain": toolchains_common.dex(), @@ -94,7 +128,8 @@ extra_attributes = { "_is_force_single_cpu": attrs.default_only(attrs.bool(default = FORCE_SINGLE_CPU)), "_is_force_single_default_cpu": attrs.default_only(attrs.bool(default = FORCE_SINGLE_DEFAULT_CPU)), "_java_toolchain": toolchains_common.java_for_android(), - }, + VALIDATION_DEPS_ATTR_NAME: attrs.set(attrs.transition_dep(cfg = cpu_transition), sorted = True, default = []), + } | constraint_overrides.attributes, "android_build_config": { "_android_toolchain": toolchains_common.android(), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), @@ -103,6 +138,7 @@ extra_attributes = { }, "android_bundle": { "aapt_mode": attrs.enum(AaptMode, default = "aapt1"), # Match default in V1 + "application_module_blacklist": attrs.option(attrs.list(attrs.transition_dep(cfg = cpu_transition)), default = None), "application_module_configs": attrs.dict(key = attrs.string(), value = attrs.list(attrs.transition_dep(cfg = cpu_transition)), sorted = False, default = {}), "build_config_values_file": attrs.option(attrs.one_of(attrs.transition_dep(cfg = cpu_transition), attrs.source()), default = None), "deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition), default = []), @@ -112,13 +148,20 @@ extra_attributes = { "manifest_skeleton": attrs.option(attrs.one_of(attrs.transition_dep(cfg = cpu_transition), attrs.source()), default = None), "min_sdk_version": attrs.option(attrs.int(), default = None), "module_manifest_skeleton": attrs.option(attrs.one_of(attrs.transition_dep(cfg = cpu_transition), attrs.source()), default = None), + "native_library_merge_code_generator": attrs.option(attrs.exec_dep(), default = None), + "native_library_merge_glue": attrs.option(attrs.split_transition_dep(cfg = cpu_split_transition), default = None), + "native_library_merge_linker_args": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.arg())), default = None), + "relinker_extra_deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition), default = []), + "use_derived_apk": attrs.bool(default = False), "_android_toolchain": toolchains_common.android(), + "_cxx_toolchain": attrs.split_transition_dep(cfg = cpu_split_transition, default = "toolchains//:android-hack"), "_dex_toolchain": toolchains_common.dex(), "_exec_os_type": buck.exec_os_type_arg(), "_is_building_android_binary": attrs.default_only(attrs.bool(default = True)), "_is_force_single_cpu": attrs.default_only(attrs.bool(default = FORCE_SINGLE_CPU)), "_is_force_single_default_cpu": attrs.default_only(attrs.bool(default = FORCE_SINGLE_DEFAULT_CPU)), "_java_toolchain": toolchains_common.java_for_android(), + VALIDATION_DEPS_ATTR_NAME: attrs.set(attrs.transition_dep(cfg = cpu_transition), sorted = True, default = []), }, "android_instrumentation_apk": { "aapt_mode": attrs.enum(AaptMode, default = "aapt1"), # Match default in V1 @@ -126,26 +169,43 @@ extra_attributes = { "cpu_filters": attrs.list(attrs.enum(TargetCpuType), default = []), "deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition), default = []), "dex_tool": attrs.string(default = "d8"), # Match default in V1 + "is_self_instrumenting": attrs.bool(default = False), "manifest": attrs.option(attrs.one_of(attrs.transition_dep(cfg = cpu_transition), attrs.source()), default = None), "manifest_skeleton": attrs.option(attrs.one_of(attrs.transition_dep(cfg = cpu_transition), attrs.source()), default = None), "min_sdk_version": attrs.option(attrs.int(), default = None), + "native_library_merge_map": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.regex()), sorted = False), default = None), + "native_library_merge_sequence": attrs.option(attrs.list(attrs.any()), default = None), "_android_toolchain": toolchains_common.android(), "_dex_toolchain": toolchains_common.dex(), + "_exec_os_type": buck.exec_os_type_arg(), "_is_building_android_binary": attrs.default_only(attrs.bool(default = True)), "_is_force_single_cpu": attrs.default_only(attrs.bool(default = FORCE_SINGLE_CPU)), "_is_force_single_default_cpu": attrs.default_only(attrs.bool(default = FORCE_SINGLE_DEFAULT_CPU)), "_java_toolchain": toolchains_common.java_for_android(), }, "android_instrumentation_test": { + "extra_instrumentation_args": attrs.option(attrs.dict(key = attrs.string(), value = attrs.arg()), default = None), + "instrumentation_test_listener": attrs.option(attrs.exec_dep(), default = None), + "instrumentation_test_listener_class": attrs.option(attrs.string(), default = None), + "is_self_instrumenting": attrs.bool(default = False), + "re_caps": _RE_CAPS, + "re_use_case": _RE_USE_CASE, "_android_toolchain": toolchains_common.android(), "_exec_os_type": buck.exec_os_type_arg(), + "_java_test_toolchain": toolchains_common.java_for_host_test(), "_java_toolchain": toolchains_common.java_for_android(), }, "android_library": { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), + "android_optional_jars": attrs.option(attrs.list(attrs.source()), default = None), "resources_root": attrs.option(attrs.string(), default = None), + VALIDATION_DEPS_ATTR_NAME: attrs.set(attrs.dep(), sorted = True, default = []), "_android_toolchain": toolchains_common.android(), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), + "_compose_stability_config": attrs.option(attrs.source(), default = select({ + "DEFAULT": None, + "fbsource//tools/build_defs/android/compose:enable-compose-stability-config": "fbsource//tools/build_defs/android/compose:stability_config", + })), "_dex_min_sdk_version": attrs.default_only(attrs.option(attrs.int(), default = dex_min_sdk_version())), "_dex_toolchain": toolchains_common.dex(), "_exec_os_type": buck.exec_os_type_arg(), @@ -175,14 +235,21 @@ extra_attributes = { "res": attrs.option(attrs.one_of(attrs.source(allow_directory = True), attrs.dict(key = attrs.string(), value = attrs.source(), sorted = True)), default = None), "_android_toolchain": toolchains_common.android(), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), + "_java_toolchain": toolchains_common.java_for_android(), }, "apk_genrule": genrule_attributes() | { + "default_outs": attrs.option(attrs.set(attrs.string(), sorted = False), default = None), + "outs": attrs.option(attrs.dict(key = attrs.string(), value = attrs.set(attrs.string(), sorted = False), sorted = False), default = None), "type": attrs.string(default = "apk"), + "use_derived_apk": attrs.bool(default = False), "_android_toolchain": toolchains_common.android(), + "_exec_os_type": buck.exec_os_type_arg(), + "_java_toolchain": toolchains_common.java_for_android(), }, "gen_aidl": { "import_paths": attrs.list(attrs.arg(), default = []), "_android_toolchain": toolchains_common.android(), + "_exec_os_type": buck.exec_os_type_arg(), "_java_toolchain": toolchains_common.java_for_android(), }, "prebuilt_native_library": { @@ -190,8 +257,11 @@ extra_attributes = { }, "robolectric_test": { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), + "android_optional_jars": attrs.option(attrs.list(attrs.source()), default = None), + "java_agents": attrs.list(attrs.source(), default = []), "resources_root": attrs.option(attrs.string(), default = None), "robolectric_runtime_dependencies": attrs.list(attrs.source(), default = []), + "test_class_names_file": attrs.option(attrs.source(), default = None), "unbundled_resources_root": attrs.option(attrs.source(allow_directory = True), default = None), "_android_toolchain": toolchains_common.android(), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), diff --git a/prelude/android/android_aar.bzl b/prelude/android/android_aar.bzl index 759aba86beb77..5944f0db24801 100644 --- a/prelude/android/android_aar.bzl +++ b/prelude/android/android_aar.bzl @@ -14,18 +14,22 @@ load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") load("@prelude//android:configuration.bzl", "get_deps_by_platform") load("@prelude//android:cpu_filters.bzl", "CPU_FILTER_FOR_DEFAULT_PLATFORM", "CPU_FILTER_FOR_PRIMARY_PLATFORM") load("@prelude//android:util.bzl", "create_enhancement_context") -load("@prelude//java:java_providers.bzl", "get_all_java_packaging_deps", "get_all_java_packaging_deps_from_packaging_infos") +load("@prelude//java:java_providers.bzl", "create_java_packaging_dep", "get_all_java_packaging_deps", "get_all_java_packaging_deps_from_packaging_infos") load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") +load("@prelude//utils:argfile.bzl", "argfile") +load("@prelude//utils:set.bzl", "set") def android_aar_impl(ctx: AnalysisContext) -> list[Provider]: deps_by_platform = get_deps_by_platform(ctx) primary_platform = CPU_FILTER_FOR_PRIMARY_PLATFORM if CPU_FILTER_FOR_PRIMARY_PLATFORM in deps_by_platform else CPU_FILTER_FOR_DEFAULT_PLATFORM deps = deps_by_platform[primary_platform] - java_packaging_deps = [packaging_dep for packaging_dep in get_all_java_packaging_deps(ctx, deps)] + excluded_java_packaging_deps = get_all_java_packaging_deps(ctx, ctx.attrs.excluded_java_deps) + excluded_java_packaging_deps_targets = set([excluded_dep.label.raw_target() for excluded_dep in excluded_java_packaging_deps]) + java_packaging_deps = [packaging_dep for packaging_dep in get_all_java_packaging_deps(ctx, deps) if not excluded_java_packaging_deps_targets.contains(packaging_dep.label.raw_target())] android_packageable_info = merge_android_packageable_info(ctx.label, ctx.actions, deps) - android_manifest = get_manifest(ctx, android_packageable_info, manifest_entries = {}) + android_manifest = get_manifest(ctx, android_packageable_info, ctx.attrs.manifest_entries, should_replace_application_id_placeholders = False) if ctx.attrs.include_build_config_class: build_config_infos = list(android_packageable_info.build_config_infos.traverse()) if android_packageable_info.build_config_infos else [] @@ -34,16 +38,23 @@ def android_aar_impl(ctx: AnalysisContext) -> list[Provider]: get_build_config_java_libraries(ctx, build_config_infos, package_type = "release", exopackage_modes = []), )) + enhancement_ctx = create_enhancement_context(ctx) + android_binary_native_library_info = get_android_binary_native_library_info(enhancement_ctx, android_packageable_info, deps_by_platform) + java_packaging_deps.extend([create_java_packaging_dep( + ctx, + lib.library_output.full_library, + ) for lib in android_binary_native_library_info.generated_java_code]) + jars = [dep.jar for dep in java_packaging_deps if dep.jar] classes_jar = ctx.actions.declare_output("classes.jar") java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] classes_jar_cmd = cmd_args([ java_toolchain.jar_builder, "--entries-to-jar", - ctx.actions.write("classes_jar_entries.txt", jars), + argfile(actions = ctx.actions, name = "classes_jar_entries.txt", args = jars), "--output", classes_jar.as_output(), - ]).hidden(jars) + ]) if ctx.attrs.remove_classes: remove_classes_file = ctx.actions.write("remove_classes.txt", ctx.attrs.remove_classes) @@ -56,6 +67,31 @@ def android_aar_impl(ctx: AnalysisContext) -> list[Provider]: ctx.actions.run(classes_jar_cmd, category = "create_classes_jar") + sub_targets = {} + dependency_sources_jars = [dep.sources_jar for dep in java_packaging_deps if dep.sources_jar] + if dependency_sources_jars: + combined_sources_jar = ctx.actions.declare_output("sources.jar") + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] + combined_sources_jar_cmd = cmd_args([ + java_toolchain.jar_builder, + "--entries-to-jar", + argfile(actions = ctx.actions, name = "combined_sources_jar_entries.txt", args = dependency_sources_jars), + "--output", + combined_sources_jar.as_output(), + ]) + + if ctx.attrs.remove_classes: + remove_classes_file = ctx.actions.write("sources_remove_classes.txt", ctx.attrs.remove_classes) + combined_sources_jar_cmd.add([ + "--blocklist-patterns", + remove_classes_file, + "--blocklist-patterns-matcher", + "remove_classes_patterns_matcher", + ]) + + ctx.actions.run(combined_sources_jar_cmd, category = "create_sources_jar") + sub_targets["sources.jar"] = [DefaultInfo(default_output = combined_sources_jar)] + entries = [android_manifest, classes_jar] resource_infos = list(android_packageable_info.resource_infos.traverse()) if android_packageable_info.resource_infos else [] @@ -63,19 +99,20 @@ def android_aar_impl(ctx: AnalysisContext) -> list[Provider]: android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] if resource_infos: res_dirs = [resource_info.res for resource_info in resource_infos if resource_info.res] - merged_resource_sources_dir = ctx.actions.declare_output("merged_resource_sources_dir/res", dir = True) - merge_resource_sources_cmd = cmd_args([ - android_toolchain.merge_android_resource_sources[RunInfo], - "--resource-paths", - ctx.actions.write("resource_paths.txt", res_dirs), - "--output", - merged_resource_sources_dir.as_output(), - ]).hidden(res_dirs) + if ctx.attrs.package_resources and res_dirs: + merged_resource_sources_dir = ctx.actions.declare_output("merged_resource_sources_dir/res", dir = True) + merge_resource_sources_cmd = cmd_args([ + android_toolchain.merge_android_resource_sources[RunInfo], + "--resource-paths", + argfile(actions = ctx.actions, name = "resource_paths.txt", args = res_dirs), + "--output", + merged_resource_sources_dir.as_output(), + ]) - ctx.actions.run(merge_resource_sources_cmd, category = "merge_android_resource_sources") + ctx.actions.run(merge_resource_sources_cmd, category = "merge_android_resource_sources") - r_dot_txt = get_text_symbols(ctx, merged_resource_sources_dir, [dep for dep in deps if AndroidResourceInfo in dep or ExportedAndroidResourceInfo in dep]) - entries.extend([merged_resource_sources_dir, r_dot_txt]) + r_dot_txt = get_text_symbols(ctx, merged_resource_sources_dir, [dep for dep in deps if AndroidResourceInfo in dep or ExportedAndroidResourceInfo in dep]) + entries.extend([merged_resource_sources_dir, r_dot_txt]) assets_dirs = [resource_infos.assets for resource_infos in resource_infos if resource_infos.assets] entries.extend(assets_dirs) @@ -84,28 +121,31 @@ def android_aar_impl(ctx: AnalysisContext) -> list[Provider]: if cxx_resources: entries.append(cxx_resources) - enhancement_ctx = create_enhancement_context(ctx) - android_binary_native_library_info = get_android_binary_native_library_info(enhancement_ctx, android_packageable_info, deps_by_platform) - native_libs_file = ctx.actions.write("native_libs_entries.txt", android_binary_native_library_info.native_libs_for_primary_apk) - native_libs_assets_file = ctx.actions.write("native_libs_assets_entries.txt", android_binary_native_library_info.root_module_native_lib_assets) + native_libs_file = argfile(actions = ctx.actions, name = "native_libs_entries.txt", args = android_binary_native_library_info.native_libs_for_primary_apk) + native_libs_assets_file = argfile(actions = ctx.actions, name = "native_libs_assets_entries.txt", args = android_binary_native_library_info.root_module_native_lib_assets) entries_file = ctx.actions.write("entries.txt", entries) aar = ctx.actions.declare_output("{}.aar".format(ctx.label.name)) - create_aar_cmd = cmd_args([ - android_toolchain.aar_builder, - "--output_path", - aar.as_output(), - "--entries_file", - entries_file, - "--on_duplicate_entry", - "fail", - "--native_libs_file", - native_libs_file, - "--native_libs_assets_file", - native_libs_assets_file, - ]).hidden(entries, android_binary_native_library_info.native_libs_for_primary_apk, android_binary_native_library_info.root_module_native_lib_assets) + create_aar_cmd = cmd_args( + [ + android_toolchain.aar_builder, + "--output_path", + aar.as_output(), + "--entries_file", + entries_file, + "--on_duplicate_entry", + "fail", + "--native_libs_file", + native_libs_file, + "--native_libs_assets_file", + native_libs_assets_file, + ], + hidden = [ + entries, + ], + ) ctx.actions.run(create_aar_cmd, category = "create_aar") - return [DefaultInfo(default_outputs = [aar], sub_targets = enhancement_ctx.get_sub_targets())] + return [DefaultInfo(default_outputs = [aar], sub_targets = enhancement_ctx.get_sub_targets() | sub_targets)] diff --git a/prelude/android/android_apk.bzl b/prelude/android/android_apk.bzl index c084d4e258ffd..66bf15c405f24 100644 --- a/prelude/android/android_apk.bzl +++ b/prelude/android/android_apk.bzl @@ -5,11 +5,16 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load("@prelude//android:android_binary.bzl", "get_binary_info") load("@prelude//android:android_providers.bzl", "AndroidApkInfo", "AndroidApkUnderTestInfo", "AndroidBinaryNativeLibsInfo", "AndroidBinaryResourcesInfo", "DexFilesInfo", "ExopackageInfo") load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") +load("@prelude//java:class_to_srcs.bzl", "merge_class_to_source_map_from_jar") load("@prelude//java:java_providers.bzl", "KeystoreInfo") -load("@prelude//java/utils:java_utils.bzl", "get_class_to_source_map_info", "get_path_separator_for_exec_os") +load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//java/utils:java_utils.bzl", "get_class_to_source_map_info") +load("@prelude//utils:argfile.bzl", "argfile") load("@prelude//utils:set.bzl", "set") def android_apk_impl(ctx: AnalysisContext) -> list[Provider]: @@ -30,35 +35,64 @@ def android_apk_impl(ctx: AnalysisContext) -> list[Provider]: native_library_info = native_library_info, resources_info = resources_info, compress_resources_dot_arsc = ctx.attrs.resource_compression == "enabled" or ctx.attrs.resource_compression == "enabled_with_strings_as_assets", + validation_deps_outputs = get_validation_deps_outputs(ctx), + packaging_options = ctx.attrs.packaging_options, ) - exopackage_info = ExopackageInfo( - secondary_dex_info = dex_files_info.secondary_dex_exopackage_info, - native_library_info = native_library_info.exopackage_info, - resources_info = resources_info.exopackage_info, - ) + if dex_files_info.secondary_dex_exopackage_info or native_library_info.exopackage_info or resources_info.exopackage_info: + exopackage_info = ExopackageInfo( + secondary_dex_info = dex_files_info.secondary_dex_exopackage_info, + native_library_info = native_library_info.exopackage_info, + resources_info = resources_info.exopackage_info, + ) + default_output = ctx.actions.write( + "{}_exopackage_apk_warning".format(ctx.label.name), + "exopackage apks should not be used externally, try buck install or building with exopackage disabled\n", + ) + sub_targets["exo_apk"] = [DefaultInfo(default_output = output_apk)] # Used by tests + else: + exopackage_info = None + default_output = output_apk - class_to_srcs, class_to_srcs_subtargets = get_class_to_source_map_info( + class_to_srcs, _, class_to_srcs_subtargets = get_class_to_source_map_info( ctx, outputs = None, deps = android_binary_info.deps_by_platform[android_binary_info.primary_platform], ) + transitive_class_to_src_map = merge_class_to_source_map_from_jar( + actions = ctx.actions, + name = ctx.label.name + ".transitive_class_to_src.json", + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo], + relative_to = None, + deps = [class_to_srcs], + ) + sub_targets["transitive_class_to_src_map"] = [DefaultInfo(default_output = transitive_class_to_src_map)] + + # We can only be sure that an APK has native libs if it has any shared libraries. Prebuilt native libraries dirs can exist but be empty. + definitely_has_native_libs = bool(native_library_info.shared_libraries) + + install_info = get_install_info(ctx, output_apk = output_apk, manifest = resources_info.manifest, exopackage_info = exopackage_info, definitely_has_native_libs = definitely_has_native_libs) return [ - AndroidApkInfo(apk = output_apk, manifest = resources_info.manifest), + AndroidApkInfo( + apk = output_apk, + manifest = resources_info.manifest, + materialized_artifacts = android_binary_info.materialized_artifacts, + unstripped_shared_libraries = native_library_info.unstripped_shared_libraries, + ), AndroidApkUnderTestInfo( java_packaging_deps = set([dep.label.raw_target() for dep in java_packaging_deps]), keystore = keystore, manifest_entries = ctx.attrs.manifest_entries, - prebuilt_native_library_dirs = set([native_lib.raw_target for native_lib in native_library_info.apk_under_test_prebuilt_native_library_dirs]), + prebuilt_native_library_dirs = set([native_lib.raw_target for native_lib in native_library_info.prebuilt_native_library_dirs]), platforms = android_binary_info.deps_by_platform.keys(), primary_platform = android_binary_info.primary_platform, resource_infos = set([info.raw_target for info in resources_info.unfiltered_resource_infos]), r_dot_java_packages = set([info.specified_r_dot_java_package for info in resources_info.unfiltered_resource_infos if info.specified_r_dot_java_package]), - shared_libraries = set(native_library_info.apk_under_test_shared_libraries), + shared_libraries = set(native_library_info.shared_libraries), ), - DefaultInfo(default_output = output_apk, other_outputs = _get_exopackage_outputs(exopackage_info), sub_targets = sub_targets | class_to_srcs_subtargets), - get_install_info(ctx, output_apk = output_apk, manifest = resources_info.manifest, exopackage_info = exopackage_info), + DefaultInfo(default_output = default_output, other_outputs = install_info.files.values() + android_binary_info.materialized_artifacts, sub_targets = sub_targets | class_to_srcs_subtargets), + install_info, TemplatePlaceholderInfo( keyed_variables = { "classpath": cmd_args([dep.jar for dep in java_packaging_deps if dep.jar], delimiter = get_path_separator_for_exec_os(ctx)), @@ -76,10 +110,12 @@ def build_apk( dex_files_info: DexFilesInfo, native_library_info: AndroidBinaryNativeLibsInfo, resources_info: AndroidBinaryResourcesInfo, - compress_resources_dot_arsc: bool = False) -> Artifact: + compress_resources_dot_arsc: bool = False, + validation_deps_outputs: [list[Artifact], None] = None, + packaging_options: dict | None = None) -> Artifact: output_apk = actions.declare_output("{}.apk".format(label.name)) - apk_builder_args = cmd_args([ + apk_builder_args = cmd_args( android_toolchain.apk_builder[RunInfo], "--output-apk", output_apk.as_output(), @@ -93,12 +129,12 @@ def build_apk( keystore.properties, "--zipalign_tool", android_toolchain.zipalign[RunInfo], - ]) - - if android_toolchain.package_meta_inf_version_files: - apk_builder_args.add("--package-meta-inf-version-files") - if compress_resources_dot_arsc: - apk_builder_args.add("--compress-resources-dot-arsc") + "--package-meta-inf-version-files" if android_toolchain.package_meta_inf_version_files else [], + "--compress-resources-dot-arsc" if compress_resources_dot_arsc else [], + # The outputs of validation_deps need to be added as hidden arguments + # to an action for the validation_deps targets to be built and enforced. + hidden = validation_deps_outputs or [], + ) asset_directories = ( native_library_info.root_module_native_lib_assets + @@ -107,15 +143,11 @@ def build_apk( dex_files_info.non_root_module_secondary_dex_dirs + resources_info.module_manifests ) - asset_directories_file = actions.write("asset_directories.txt", asset_directories) - apk_builder_args.hidden(asset_directories) - native_library_directories = actions.write("native_library_directories", native_library_info.native_libs_for_primary_apk) - apk_builder_args.hidden(native_library_info.native_libs_for_primary_apk) + asset_directories_file = argfile(actions = actions, name = "asset_directories.txt", args = asset_directories) + native_library_directories = argfile(actions = actions, name = "native_library_directories", args = native_library_info.native_libs_for_primary_apk) all_zip_files = [resources_info.packaged_string_assets] if resources_info.packaged_string_assets else [] - zip_files = actions.write("zip_files", all_zip_files) - apk_builder_args.hidden(all_zip_files) - jar_files_that_may_contain_resources = actions.write("jar_files_that_may_contain_resources", resources_info.jar_files_that_may_contain_resources) - apk_builder_args.hidden(resources_info.jar_files_that_may_contain_resources) + zip_files = argfile(actions = actions, name = "zip_files", args = all_zip_files) + jar_files_that_may_contain_resources = argfile(actions = actions, name = "jar_files_that_may_contain_resources", args = resources_info.jar_files_that_may_contain_resources) apk_builder_args.add([ "--asset-directories-list", @@ -128,15 +160,28 @@ def build_apk( jar_files_that_may_contain_resources, ]) + if packaging_options: + for key, value in packaging_options.items(): + if key != "excluded_resources": + fail("Only 'excluded_resources' is supported in packaging_options right now!") + else: + apk_builder_args.add("--excluded-resources", actions.write("excluded_resources.txt", value)) + actions.run(apk_builder_args, category = "apk_build") return output_apk -def get_install_info(ctx: AnalysisContext, output_apk: Artifact, manifest: Artifact, exopackage_info: [ExopackageInfo, None]) -> InstallInfo: +def get_install_info( + ctx: AnalysisContext, + output_apk: Artifact, + manifest: Artifact, + exopackage_info: [ExopackageInfo, None], + definitely_has_native_libs: bool = True, + apex_mode: bool = False) -> InstallInfo: files = { ctx.attrs.name: output_apk, "manifest": manifest, - "options": generate_install_config(ctx), + "options": generate_install_config(ctx, apex_mode), } if exopackage_info: @@ -167,7 +212,7 @@ def get_install_info(ctx: AnalysisContext, output_apk: Artifact, manifest: Artif if secondary_dex_exopackage_info or native_library_exopackage_info or resources_info: files["exopackage_agent_apk"] = ctx.attrs._android_toolchain[AndroidToolchainInfo].exopackage_agent_apk - if hasattr(ctx.attrs, "cpu_filters"): + if definitely_has_native_libs and hasattr(ctx.attrs, "cpu_filters"): files["cpu_filters"] = ctx.actions.write("cpu_filters.txt", ctx.attrs.cpu_filters) return InstallInfo( @@ -175,45 +220,23 @@ def get_install_info(ctx: AnalysisContext, output_apk: Artifact, manifest: Artif files = files, ) -def _get_exopackage_outputs(exopackage_info: ExopackageInfo) -> list[Artifact]: - outputs = [] - secondary_dex_exopackage_info = exopackage_info.secondary_dex_info - if secondary_dex_exopackage_info: - outputs.append(secondary_dex_exopackage_info.metadata) - outputs.append(secondary_dex_exopackage_info.directory) - - native_library_exopackage_info = exopackage_info.native_library_info - if native_library_exopackage_info: - outputs.append(native_library_exopackage_info.metadata) - outputs.append(native_library_exopackage_info.directory) - - resources_info = exopackage_info.resources_info - if resources_info: - outputs.append(resources_info.res) - outputs.append(resources_info.res_hash) - - if resources_info.assets: - outputs.append(resources_info.assets) - outputs.append(resources_info.assets_hash) - - return outputs - -def generate_install_config(ctx: AnalysisContext) -> Artifact: - data = get_install_config() +def generate_install_config(ctx: AnalysisContext, apex_mode: bool) -> Artifact: + data = get_install_config(apex_mode) return ctx.actions.write_json("install_android_options.json", data) -def get_install_config() -> dict[str, typing.Any]: +def get_install_config(apex_mode: bool) -> dict[str, typing.Any]: # TODO: read from toolchains install_config = { "adb_restart_on_failure": read_root_config("adb", "adb_restart_on_failure", "true"), "agent_port_base": read_root_config("adb", "agent_port_base", "2828"), "always_use_java_agent": read_root_config("adb", "always_use_java_agent", "false"), + "apex_mode": apex_mode, "is_zstd_compression_enabled": read_root_config("adb", "is_zstd_compression_enabled", "false"), "max_retries": read_root_config("adb", "retries", "5"), "multi_install_mode": read_root_config("adb", "multi_install_mode", "false"), "retry_delay_millis": read_root_config("adb", "retry_delay_millis", "500"), "skip_install_metadata": read_root_config("adb", "skip_install_metadata", "false"), - "staged_install_mode": read_root_config("adb", "staged_install_mode", "false"), + "staged_install_mode": read_root_config("adb", "staged_install_mode", None), } adb_executable = read_root_config("android", "adb", None) diff --git a/prelude/android/android_binary.bzl b/prelude/android/android_binary.bzl index 5f823c6e79008..f1f2908f3bdc3 100644 --- a/prelude/android/android_binary.bzl +++ b/prelude/android/android_binary.bzl @@ -10,6 +10,8 @@ load("@prelude//android:android_binary_resources_rules.bzl", "get_android_binary load("@prelude//android:android_build_config.bzl", "generate_android_build_config", "get_build_config_fields") load( "@prelude//android:android_providers.bzl", + "AndroidBinaryNativeLibsInfo", # @unused Used as type + "AndroidBinaryResourcesInfo", # @unused Used as type "AndroidBuildConfigInfo", # @unused Used as type "BuildConfigField", "DexFilesInfo", @@ -24,21 +26,30 @@ load("@prelude//android:preprocess_java_classes.bzl", "get_preprocessed_java_cla load("@prelude//android:proguard.bzl", "get_proguard_output") load("@prelude//android:util.bzl", "create_enhancement_context") load("@prelude//android:voltron.bzl", "get_target_to_module_mapping") -load("@prelude//java:java_providers.bzl", "JavaPackagingInfo", "create_java_packaging_dep", "get_all_java_packaging_deps", "get_all_java_packaging_deps_from_packaging_infos") -load("@prelude//utils:utils.bzl", "expect") +load( + "@prelude//java:java_providers.bzl", + "JavaPackagingDep", # @unused Used as type + "JavaPackagingInfo", + "create_java_packaging_dep", + "get_all_java_packaging_deps", + "get_all_java_packaging_deps_from_packaging_infos", +) +load("@prelude//utils:expect.bzl", "expect") AndroidBinaryInfo = record( sub_targets = dict, - java_packaging_deps = list["JavaPackagingDep"], + java_packaging_deps = list[JavaPackagingDep], deps_by_platform = dict, primary_platform = str, dex_files_info = DexFilesInfo, - native_library_info = "AndroidBinaryNativeLibsInfo", - resources_info = "AndroidBinaryResourcesInfo", + native_library_info = AndroidBinaryNativeLibsInfo, + resources_info = AndroidBinaryResourcesInfo, + materialized_artifacts = list[Artifact], ) def get_binary_info(ctx: AnalysisContext, use_proto_format: bool) -> AndroidBinaryInfo: sub_targets = {} + materialized_artifacts = [] _verify_params(ctx) @@ -80,17 +91,34 @@ def get_binary_info(ctx: AnalysisContext, use_proto_format: bool) -> AndroidBina use_proto_format = use_proto_format, referenced_resources_lists = referenced_resources_lists, manifest_entries = ctx.attrs.manifest_entries, + generate_strings_and_ids_separately = should_pre_dex, aapt2_preferred_density = ctx.attrs.aapt2_preferred_density, ) + sub_targets["manifest"] = [DefaultInfo(default_output = resources_info.manifest)] android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] - java_packaging_deps += [ + compiled_r_dot_java_deps = [ create_java_packaging_dep( ctx, - r_dot_java.library_output.full_library, + r_dot_java.library_info.library_output.full_library, dex_weight_factor = android_toolchain.r_dot_java_weight_factor, ) - for r_dot_java in resources_info.r_dot_javas + for r_dot_java in resources_info.r_dot_java_infos ] + java_packaging_deps += compiled_r_dot_java_deps + sub_targets["compiled_r_dot_java"] = [ + DefaultInfo( + default_outputs = [ + compiled_r_dot_java_dep.jar + for compiled_r_dot_java_dep in compiled_r_dot_java_deps + ], + ), + ] + for r_dot_java_info in resources_info.r_dot_java_infos: + sub_targets[r_dot_java_info.identifier + "_src"] = [ + DefaultInfo( + default_output = r_dot_java_info.source_zipped, + ), + ] dex_java_packaging_deps = [packaging_dep for packaging_dep in java_packaging_deps if packaging_dep.dex and packaging_dep.dex.dex.owner.raw_target() not in no_dx_target_labels] if should_pre_dex: @@ -108,7 +136,9 @@ def get_binary_info(ctx: AnalysisContext, use_proto_format: bool) -> AndroidBina else: jars_to_owners = {packaging_dep.jar: packaging_dep.jar.owner.raw_target() for packaging_dep in dex_java_packaging_deps} if ctx.attrs.preprocess_java_classes_bash: - jars_to_owners = get_preprocessed_java_classes(ctx, jars_to_owners) + jars_to_owners, materialized_artifacts_dir = get_preprocessed_java_classes(enhancement_ctx, jars_to_owners) + if materialized_artifacts_dir: + materialized_artifacts.append(materialized_artifacts_dir) if has_proguard_config: proguard_output = get_proguard_output( ctx, @@ -117,6 +147,7 @@ def get_binary_info(ctx: AnalysisContext, use_proto_format: bool) -> AndroidBina resources_info.proguard_config_file, [no_dx[DefaultInfo].default_outputs[0] for no_dx in ctx.attrs.no_dx if len(no_dx[DefaultInfo].default_outputs) == 1], ) + materialized_artifacts.extend(proguard_output.proguard_artifacts) jars_to_owners = proguard_output.jars_to_owners dir_srcs = {artifact.basename: artifact for artifact in proguard_output.proguard_artifacts} for i, hidden_artifact in enumerate(proguard_output.proguard_hidden_artifacts): @@ -169,6 +200,7 @@ def get_binary_info(ctx: AnalysisContext, use_proto_format: bool) -> AndroidBina dex_files_info = dex_files_info, native_library_info = native_library_info, resources_info = resources_info, + materialized_artifacts = materialized_artifacts, ) def get_build_config_java_libraries( @@ -189,6 +221,8 @@ def get_build_config_java_libraries( default_build_config_fields = get_build_config_fields(ctx.attrs.build_config_values) + android_binary_values_file = ctx.attrs.build_config_values_file[DefaultInfo].default_outputs[0] if isinstance(ctx.attrs.build_config_values_file, Dependency) else ctx.attrs.build_config_values_file + java_libraries = [] java_packages_seen = [] for build_config_info in build_config_infos: @@ -200,13 +234,14 @@ def get_build_config_java_libraries( for build_config_field in build_config_info.build_config_fields + default_build_config_fields + build_config_constants: all_build_config_values[build_config_field.name] = build_config_field + values_file = android_binary_values_file if android_binary_values_file else build_config_info.values_file java_libraries.append(generate_android_build_config( ctx, java_package, java_package, True, # use_constant_expressions all_build_config_values.values(), - ctx.attrs.build_config_values_file[DefaultInfo].default_outputs[0] if isinstance(ctx.attrs.build_config_values_file, Dependency) else ctx.attrs.build_config_values_file, + values_file, )[1]) return java_libraries diff --git a/prelude/android/android_binary_native_library_rules.bzl b/prelude/android/android_binary_native_library_rules.bzl index 7e7f12ea33b77..834b4834f27eb 100644 --- a/prelude/android/android_binary_native_library_rules.bzl +++ b/prelude/android/android_binary_native_library_rules.bzl @@ -17,7 +17,7 @@ load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") load("@prelude//android:cpu_filters.bzl", "CPU_FILTER_FOR_PRIMARY_PLATFORM", "CPU_FILTER_TO_ABI_DIRECTORY") load("@prelude//android:util.bzl", "EnhancementContext") load("@prelude//android:voltron.bzl", "ROOT_MODULE", "all_targets_in_root_module", "get_apk_module_graph_info", "is_root_module") -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo", "PicBehavior") load( "@prelude//cxx:link.bzl", "cxx_link_shared_library", @@ -25,7 +25,7 @@ load( load("@prelude//cxx:link_types.bzl", "link_options") load( "@prelude//cxx:symbols.bzl", - "extract_global_syms", + "extract_defined_syms", "extract_undefined_syms", ) load("@prelude//java:java_library.bzl", "compile_to_jar") # @unused @@ -36,9 +36,11 @@ load( "LibOutputStyle", "LinkArgs", "LinkInfo", - "Linkage", + "LinkOrdering", "SharedLibLinkable", + "get_lib_output_style", "set_link_info_link_whole", + "unpack_link_args", "wrap_link_info", ) load( @@ -52,14 +54,19 @@ load( "@prelude//linking:shared_libraries.bzl", "SharedLibrary", # @unused Used as a type "SharedLibraryInfo", # @unused Used as a type + "create_shlib", "get_strip_non_global_flags", "merge_shared_libraries", "traverse_shared_library_info", + "with_unique_str_sonames", ) load("@prelude//linking:strip.bzl", "strip_object") -load("@prelude//utils:graph_utils.bzl", "breadth_first_traversal_by", "post_order_traversal", "topo_sort", "topo_sort_by") -load("@prelude//utils:set.bzl", "set_type") # @unused Used as a type -load("@prelude//utils:utils.bzl", "dedupe_by_value", "expect") +load("@prelude//linking:types.bzl", "Linkage") +load("@prelude//utils:argfile.bzl", "argfile") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:graph_utils.bzl", "GraphTraversal", "depth_first_traversal_by", "post_order_traversal", "pre_order_traversal") +load("@prelude//utils:set.bzl", "set", "set_type") # @unused Used as a type +load("@prelude//utils:utils.bzl", "dedupe_by_value") # Native libraries on Android are built for a particular Application Binary Interface (ABI). We # package native libraries for one (or more, for multi-arch builds) ABIs into an Android APK. @@ -76,22 +83,19 @@ load("@prelude//utils:utils.bzl", "dedupe_by_value", "expect") # 2. As assets. These are passed to the APK build as assets, and are stored at # `assets/lib//library.so` In the root module, we only package a native library as an # asset if it is eligible to be an asset (e.g. `can_be_asset` on a `cxx_library`), and -# `package_asset_libraries` is set to True for the APK. We will additionally compress all the -# assets into a single `assets/lib/libs.xz` (or `assets/libs/libs.zstd` for `zstd` compression) -# if `compress_asset_libraries` is set to True for the APK. Regardless of whether we compress -# the assets or not, we create a metadata file at `assets/libs/metadata.txt` that has a single -# line entry for each packaged asset consisting of ' '. +# `package_asset_libraries` is set to True for the APK. We create a metadata file at +# `assets/libs/metadata.txt` that has a single line entry for each packaged asset consisting of +# ' '. # # Any native library that is not part of the root module (i.e. it is part of some other Voltron -# module) is automatically packaged as an asset, and the assets for each module are compressed -# to a single `assets//libs.xz`. Similarly, the metadata for each module is stored +# module) is automatically packaged as an asset. Similarly, the metadata for each module is stored # at `assets//libs.txt`. def get_android_binary_native_library_info( enhance_ctx: EnhancementContext, android_packageable_info: AndroidPackageableInfo, deps_by_platform: dict[str, list[Dependency]], - apk_module_graph_file: [Artifact, None] = None, + apk_module_graph_file: Artifact | None = None, prebuilt_native_library_dirs_to_exclude: [set_type, None] = None, shared_libraries_to_exclude: [set_type, None] = None) -> AndroidBinaryNativeLibsInfo: ctx = enhance_ctx.ctx @@ -104,14 +108,28 @@ def get_android_binary_native_library_info( ] included_shared_lib_targets = [] - platform_to_original_native_linkables = {} + original_shared_libs_by_platform = {} # dict[str, dict[str (soname), list[SharedLibrary]]] for platform, deps in deps_by_platform.items(): - if platform == CPU_FILTER_FOR_PRIMARY_PLATFORM and platform not in ctx.attrs.cpu_filters: + if platform == CPU_FILTER_FOR_PRIMARY_PLATFORM and platform not in ctx.attrs.cpu_filters and len(ctx.attrs.cpu_filters) != 0: continue - native_linkables = get_native_linkables_by_default(ctx, platform, deps, shared_libraries_to_exclude) - included_shared_lib_targets.extend([lib.label.raw_target() for lib in native_linkables.values()]) - platform_to_original_native_linkables[platform] = native_linkables + shared_libs = get_default_shared_libs(ctx, deps, shared_libraries_to_exclude) + included_shared_lib_targets.extend([lib.label.raw_target() for lib in shared_libs.values()]) + original_shared_libs_by_platform[platform] = shared_libs + + if not all_prebuilt_native_library_dirs and not included_shared_lib_targets: + enhance_ctx.debug_output("unstripped_native_libraries", ctx.actions.write("unstripped_native_libraries", [])) + enhance_ctx.debug_output("unstripped_native_libraries_json", ctx.actions.write_json("unstripped_native_libraries_json", {})) + return AndroidBinaryNativeLibsInfo( + prebuilt_native_library_dirs = [], + shared_libraries = [], + native_libs_for_primary_apk = [], + exopackage_info = None, + root_module_native_lib_assets = [], + non_root_module_native_lib_assets = [], + generated_java_code = [], + unstripped_shared_libraries = None, + ) native_libs = ctx.actions.declare_output("native_libs_symlink") native_libs_metadata = ctx.actions.declare_output("native_libs_metadata_symlink") @@ -119,9 +137,8 @@ def get_android_binary_native_library_info( native_lib_assets_for_primary_apk = ctx.actions.declare_output("native_lib_assets_for_primary_apk_symlink") stripped_native_linkable_assets_for_primary_apk = ctx.actions.declare_output("stripped_native_linkable_assets_for_primary_apk_symlink") root_module_metadata_assets = ctx.actions.declare_output("root_module_metadata_assets_symlink") - root_module_compressed_lib_assets = ctx.actions.declare_output("root_module_compressed_lib_assets_symlink") non_root_module_metadata_assets = ctx.actions.declare_output("non_root_module_metadata_assets_symlink") - non_root_module_compressed_lib_assets = ctx.actions.declare_output("non_root_module_compressed_lib_assets_symlink") + non_root_module_lib_assets = ctx.actions.declare_output("non_root_module_lib_assets_symlink") unstripped_native_libraries = ctx.actions.declare_output("unstripped_native_libraries") unstripped_native_libraries_json = ctx.actions.declare_output("unstripped_native_libraries_json") @@ -137,9 +154,8 @@ def get_android_binary_native_library_info( unstripped_native_libraries_files, stripped_native_linkable_assets_for_primary_apk, root_module_metadata_assets, - root_module_compressed_lib_assets, non_root_module_metadata_assets, - non_root_module_compressed_lib_assets, + non_root_module_lib_assets, ] fake_input = ctx.actions.write("dynamic.trigger", "") @@ -148,7 +164,7 @@ def get_android_binary_native_library_info( dynamic_inputs = [fake_input] if apk_module_graph_file: dynamic_inputs.append(apk_module_graph_file) - native_library_merge_map = None + split_groups_map = None native_library_merge_dir = None native_merge_debug = None generated_java_code = [] @@ -164,15 +180,17 @@ def get_android_binary_native_library_info( expect(glue_linkable.preferred_linkage == Linkage("static"), "buck2 currently only supports preferred_linkage='static' native_library_merge_glue") glue_linkables[platform] = (glue.label, glue_linkable.link_infos[LibOutputStyle("pic_archive")].default) - flattened_linkable_graphs_by_platform = {} + linkable_nodes_by_platform = {} native_library_merge_sequence = getattr(ctx.attrs, "native_library_merge_sequence", None) - has_native_merging = native_library_merge_sequence or getattr(ctx.attrs, "native_library_merge_map", None) + native_library_merge_map = getattr(ctx.attrs, "native_library_merge_map", None) + native_library_merge_non_asset_libs = getattr(ctx.attrs, "native_library_merge_non_asset_libs", False) + has_native_merging = native_library_merge_sequence or native_library_merge_map + enable_relinker = getattr(ctx.attrs, "enable_relinker", False) - if has_native_merging: - native_merge_debug = ctx.actions.declare_output("native_merge.debug") + if has_native_merging or enable_relinker: + native_merge_debug = ctx.actions.declare_output("native_merge_debug", dir = True) dynamic_outputs.append(native_merge_debug) - if native_library_merge_sequence: # We serialize info about the linkable graph and the apk module mapping and pass that to an # external subcommand to apply a merge sequence algorithm and return us the merge mapping. for platform, deps in deps_by_platform.items(): @@ -180,26 +198,31 @@ def get_android_binary_native_library_info( graph_node_map = get_linkable_graph_node_map_func(linkable_graph)() linkables_debug = ctx.actions.write("linkables." + platform, list(graph_node_map.keys())) enhance_ctx.debug_output("linkables." + platform, linkables_debug) + linkable_nodes_by_platform[platform] = graph_node_map + + lib_outputs_by_platform = _declare_library_subtargets(ctx, dynamic_outputs, original_shared_libs_by_platform, native_library_merge_map, native_library_merge_sequence, enable_relinker) - flattened_linkable_graphs_by_platform[platform] = graph_node_map # _get_flattened_linkable_graph(ctx, graph_node_map) + if native_library_merge_sequence: native_library_merge_input_file = ctx.actions.write_json("mergemap.input", { - "linkable_graphs_by_platform": encode_linkable_graph_for_mergemap(flattened_linkable_graphs_by_platform), + "linkable_graphs_by_platform": encode_linkable_graph_for_mergemap(linkable_nodes_by_platform), "native_library_merge_sequence": ctx.attrs.native_library_merge_sequence, - "native_library_merge_sequence_blocklist": ctx.attrs.native_library_merge_sequence_blocklist, + "native_library_merge_sequence_blocklist": ctx.attrs.native_library_merge_sequence_blocklist or [], }) mergemap_cmd = cmd_args(ctx.attrs._android_toolchain[AndroidToolchainInfo].mergemap_tool) mergemap_cmd.add(cmd_args(native_library_merge_input_file, format = "--mergemap-input={}")) if apk_module_graph_file: mergemap_cmd.add(cmd_args(apk_module_graph_file, format = "--apk-module-graph={}")) + if native_library_merge_non_asset_libs: + mergemap_cmd.add(cmd_args("--merge-non-asset-libs")) native_library_merge_dir = ctx.actions.declare_output("merge_sequence_output") native_library_merge_map = native_library_merge_dir.project("merge.map") + split_groups_map = native_library_merge_dir.project("split_groups.map") mergemap_cmd.add(cmd_args(native_library_merge_dir.as_output(), format = "--output={}")) ctx.actions.run(mergemap_cmd, category = "compute_mergemap") enhance_ctx.debug_output("compute_merge_sequence", native_library_merge_dir) dynamic_inputs.append(native_library_merge_map) - elif has_native_merging: - flattened_linkable_graphs_by_platform = {platform: {} for platform in platform_to_original_native_linkables.keys()} + dynamic_inputs.append(split_groups_map) mergemap_gencode_jar = None if has_native_merging and ctx.attrs.native_library_merge_code_generator: @@ -210,6 +233,7 @@ def get_android_binary_native_library_info( abi = mergemap_gencode_jar, abi_as_dir = None, required_for_source_only_abi = False, + abi_jar_snapshot = None, ) generated_java_code.append( JavaLibraryInfo( @@ -221,9 +245,18 @@ def get_android_binary_native_library_info( def dynamic_native_libs_info(ctx: AnalysisContext, artifacts, outputs): get_module_from_target = all_targets_in_root_module + get_module_tdeps = all_targets_in_root_module + get_calculated_module_deps = all_targets_in_root_module + get_deps_debug_data = None if apk_module_graph_file: - get_module_from_target = get_apk_module_graph_info(ctx, apk_module_graph_file, artifacts).target_to_module_mapping_function - + apk_module_graph = get_apk_module_graph_info(ctx, apk_module_graph_file, artifacts) + get_module_from_target = apk_module_graph.target_to_module_mapping_function + get_module_tdeps = apk_module_graph.transitive_module_deps_function + get_calculated_module_deps = apk_module_graph.calculated_deps_function + get_deps_debug_data = apk_module_graph.get_deps_debug_data + + split_groups = None + merged_shared_lib_targets_by_platform = {} # dict[str, dict[Label, str]] if has_native_merging: native_library_merge_debug_outputs = {} @@ -231,45 +264,69 @@ def get_android_binary_native_library_info( # then set it as the binary's precomputed_apk_module_graph attr. if ctx.attrs.native_library_merge_sequence: merge_map_by_platform = artifacts[native_library_merge_map].read_json() + split_groups = artifacts[split_groups_map].read_json() native_library_merge_debug_outputs["merge_sequence_output"] = native_library_merge_dir elif ctx.attrs.native_library_merge_map: merge_map_by_platform = {} - for platform, linkable_nodes in flattened_linkable_graphs_by_platform.items(): + for platform, linkable_nodes in linkable_nodes_by_platform.items(): merge_map = merge_map_by_platform.setdefault(platform, {}) + merge_lib_to_fancy_regexes = { + merge_lib: [regex(pattern, fancy = True) for pattern in patterns] + for merge_lib, patterns in ctx.attrs.native_library_merge_map.items() + } for target, _node in linkable_nodes.items(): raw_target = str(target.raw_target()) merge_result = None - for merge_lib, patterns in ctx.attrs.native_library_merge_map.items(): - if merge_result: - break - for pattern in patterns: - if pattern.match(raw_target): + for merge_lib, fancy_regexes in merge_lib_to_fancy_regexes.items(): + for fancy_regex in fancy_regexes: + if fancy_regex.match(raw_target): merge_result = merge_lib break + if merge_result: + break if merge_result: merge_map[str(target)] = merge_result - merge_map = ctx.actions.write_json("merge.map", merge_map_by_platform) + merge_map = ctx.actions.write_json("merge.map", merge_map_by_platform, pretty = True) native_library_merge_debug_outputs["merge_map_output"] = merge_map else: fail("unreachable") - merged_linkables = _get_merged_linkables( - ctx, - { - platform: LinkableMergeData( - glue_linkable = glue_linkables[platform], - default_shared_libs = platform_to_original_native_linkables[platform], - linkable_nodes = flattened_linkable_graphs_by_platform[platform], - merge_map = merge_map_by_platform[platform], - apk_module_graph = get_module_from_target, - ) - for platform in platform_to_original_native_linkables - }, - ) - debug_data_json = ctx.actions.write_json("native_merge_debug.json", merged_linkables.debug_info) + shared_object_targets = {} + debug_info_by_platform = {} # dict[str, MergedLinkablesDebugInfo] + merged_shared_libs_by_platform = {} # dict[str, dict[str, MergedSharedLibrary]] + for platform in original_shared_libs_by_platform: + merged_shared_libs, debug_info = _get_merged_linkables_for_platform( + ctx, + ctx.attrs._cxx_toolchain[platform][CxxToolchainInfo], + platform if len(original_shared_libs_by_platform) > 1 else None, + glue_linkable = glue_linkables[platform] if glue_linkables else None, + default_shared_libs = original_shared_libs_by_platform[platform], + linkable_nodes = linkable_nodes_by_platform[platform], + merge_map = merge_map_by_platform[platform], + merge_linker_args = ctx.attrs.native_library_merge_linker_args or {}, + apk_module_graph = get_module_from_target, + ) + debug_info_by_platform[platform] = debug_info + merged_shared_libs_by_platform[platform] = merged_shared_libs + merged_shared_lib_targets = {} + for soname, lib in merged_shared_libs.items(): + shared_object_targets[soname] = [str(target.raw_target()) for target in lib.primary_constituents] + + for target in lib.primary_constituents: + merged_shared_lib_targets[target] = soname + merged_shared_lib_targets_by_platform[platform] = merged_shared_lib_targets + + debug_data_json = ctx.actions.write_json("native_merge_debug.json", debug_info_by_platform, pretty = True) native_library_merge_debug_outputs["native_merge_debug.json"] = debug_data_json + + shared_object_targets_lines = "" + for soname, targets in shared_object_targets.items(): + shared_object_targets_lines += soname + " " + " ".join(targets) + "\n" + shared_object_targets_txt = ctx.actions.write("shared_object_targets.txt", shared_object_targets_lines) + native_library_merge_debug_outputs["shared_object_targets.txt"] = shared_object_targets_txt + if mergemap_gencode_jar: - merged_library_map = write_merged_library_map(ctx, merged_linkables) + merged_library_map = write_merged_library_map(ctx, merged_shared_libs_by_platform) mergemap_gencode = run_mergemap_codegen(ctx, merged_library_map) compile_to_jar(ctx, [mergemap_gencode], output = outputs[mergemap_gencode_jar]) native_library_merge_debug_outputs["NativeLibraryMergeGeneratedCode.java"] = mergemap_gencode @@ -278,18 +335,30 @@ def get_android_binary_native_library_info( ctx.actions.symlinked_dir(outputs[native_merge_debug], native_library_merge_debug_outputs) - final_platform_to_native_linkables = { + final_shared_libs_by_platform = { platform: {soname: d.lib for soname, d in merged_shared_libs.items()} - for platform, merged_shared_libs in merged_linkables.shared_libs_by_platform.items() + for platform, merged_shared_libs in merged_shared_libs_by_platform.items() } + elif enable_relinker: + final_shared_libs_by_platform, native_library_merge_debug_outputs = _create_all_relinkable_links( + ctx, + original_shared_libs_by_platform, + linkable_nodes_by_platform, + ) + ctx.actions.symlinked_dir(outputs[native_merge_debug], native_library_merge_debug_outputs) + else: - final_platform_to_native_linkables = platform_to_original_native_linkables + final_shared_libs_by_platform = original_shared_libs_by_platform - if getattr(ctx.attrs, "enable_relinker", False): - final_platform_to_native_linkables = relink_libraries(ctx, final_platform_to_native_linkables) + if enable_relinker: + unrelinked_shared_libs_by_platform = final_shared_libs_by_platform + final_shared_libs_by_platform = relink_libraries(ctx, final_shared_libs_by_platform) + _link_library_subtargets(ctx, outputs, lib_outputs_by_platform, original_shared_libs_by_platform, unrelinked_shared_libs_by_platform, merged_shared_lib_targets_by_platform, split_groups, native_merge_debug, unrelinked = True) + + _link_library_subtargets(ctx, outputs, lib_outputs_by_platform, original_shared_libs_by_platform, final_shared_libs_by_platform, merged_shared_lib_targets_by_platform, split_groups, native_merge_debug) unstripped_libs = {} - for platform, libs in final_platform_to_native_linkables.items(): + for platform, libs in final_shared_libs_by_platform.items(): for lib in libs.values(): unstripped_libs[lib.lib.output] = platform ctx.actions.write(outputs[unstripped_native_libraries], unstripped_libs.keys()) @@ -299,11 +368,28 @@ def get_android_binary_native_library_info( for lib, platform in unstripped_libs.items() }) + if ctx.attrs._android_toolchain[AndroidToolchainInfo].cross_module_native_deps_check: + # note: can only detect these if linkable_nodes_by_platform is created, ie. if using relinker or merging + cross_module_link_errors = [] + for linkable_nodes in linkable_nodes_by_platform.values(): + for target, node in linkable_nodes.items(): + node_target = str(target.raw_target()) + node_module = get_module_from_target(node_target) + for dep in node.deps: + dep_target = str(dep.raw_target()) + dep_module = get_module_from_target(dep_target) + if not is_root_module(dep_module) and node_module != dep_module and dep_module not in get_module_tdeps(node_module) and dep_module not in get_calculated_module_deps(node_module): + cross_module_link_errors.append("{} (module: {}) -> {} (module: {}) ".format(node_target, node_module, dep_target, dep_module)) + + if cross_module_link_errors: + cross_module_link_errors.append(get_deps_debug_data()) + fail("Native libraries in modules should only depend on libraries in the same module or the root. Remove these deps:\n" + "\n".join(cross_module_link_errors)) + dynamic_info = _get_native_libs_and_assets( ctx, get_module_from_target, all_prebuilt_native_library_dirs, - final_platform_to_native_linkables, + final_shared_libs_by_platform, ) # Since we are using a dynamic action, we need to declare the outputs in advance. @@ -315,14 +401,14 @@ def get_android_binary_native_library_info( ctx.actions.symlink_file(outputs[native_lib_assets_for_primary_apk], dynamic_info.native_lib_assets_for_primary_apk if dynamic_info.native_lib_assets_for_primary_apk else ctx.actions.symlinked_dir("empty_native_lib_assets", {})) ctx.actions.symlink_file(outputs[stripped_native_linkable_assets_for_primary_apk], dynamic_info.stripped_native_linkable_assets_for_primary_apk if dynamic_info.stripped_native_linkable_assets_for_primary_apk else ctx.actions.symlinked_dir("empty_stripped_native_linkable_assets", {})) ctx.actions.symlink_file(outputs[root_module_metadata_assets], dynamic_info.root_module_metadata_assets) - ctx.actions.symlink_file(outputs[root_module_compressed_lib_assets], dynamic_info.root_module_compressed_lib_assets) ctx.actions.symlink_file(outputs[non_root_module_metadata_assets], dynamic_info.non_root_module_metadata_assets) - ctx.actions.symlink_file(outputs[non_root_module_compressed_lib_assets], dynamic_info.non_root_module_compressed_lib_assets) + ctx.actions.symlink_file(outputs[non_root_module_lib_assets], dynamic_info.non_root_module_lib_assets if dynamic_info.non_root_module_lib_assets else ctx.actions.symlinked_dir("empty_non_root_module_lib_assets", {})) - ctx.actions.dynamic_output(dynamic = dynamic_inputs, inputs = [], outputs = dynamic_outputs, f = dynamic_native_libs_info) + ctx.actions.dynamic_output(dynamic = dynamic_inputs, inputs = [], outputs = [o.as_output() for o in dynamic_outputs], f = dynamic_native_libs_info) all_native_libs = ctx.actions.symlinked_dir("debug_all_native_libs", {"others": native_libs, "primary": native_libs_always_in_primary_apk}) - enhance_ctx.debug_output("debug_native_libs", all_native_libs) + lib_subtargets = _create_library_subtargets(lib_outputs_by_platform, native_libs) + enhance_ctx.debug_output("native_libs", all_native_libs, sub_targets = lib_subtargets) if native_merge_debug: enhance_ctx.debug_output("native_merge_debug", native_merge_debug) @@ -331,15 +417,134 @@ def get_android_binary_native_library_info( native_libs_for_primary_apk, exopackage_info = _get_exopackage_info(ctx, native_libs_always_in_primary_apk, native_libs, native_libs_metadata) return AndroidBinaryNativeLibsInfo( - apk_under_test_prebuilt_native_library_dirs = all_prebuilt_native_library_dirs, - apk_under_test_shared_libraries = included_shared_lib_targets, + prebuilt_native_library_dirs = all_prebuilt_native_library_dirs, + shared_libraries = included_shared_lib_targets, native_libs_for_primary_apk = native_libs_for_primary_apk, exopackage_info = exopackage_info, - root_module_native_lib_assets = [native_lib_assets_for_primary_apk, stripped_native_linkable_assets_for_primary_apk, root_module_metadata_assets, root_module_compressed_lib_assets], - non_root_module_native_lib_assets = [non_root_module_metadata_assets, non_root_module_compressed_lib_assets], + root_module_native_lib_assets = [native_lib_assets_for_primary_apk, stripped_native_linkable_assets_for_primary_apk, root_module_metadata_assets], + non_root_module_native_lib_assets = [non_root_module_metadata_assets, non_root_module_lib_assets], generated_java_code = generated_java_code, + unstripped_shared_libraries = unstripped_native_libraries_files, ) +_NativeLibSubtargetArtifacts = record( + default = Artifact, + unrelinked = Artifact | None, +) + +# Merged libraries are dynamic dependencies, but outputs need to be declared in advance to be used by subtargets. +# This means we have to declare outputs for all possible merged libs (every merged name and every unmerged library name). +def _declare_library_subtargets( + ctx: AnalysisContext, + dynamic_outputs: list[Artifact], + original_shared_libs_by_platform: dict[str, dict[str, SharedLibrary]], + native_library_merge_map, + native_library_merge_sequence, + enable_relinker: bool) -> dict[str, dict[str, _NativeLibSubtargetArtifacts]]: + lib_outputs_by_platform = {} + for platform, original_shared_libs in original_shared_libs_by_platform.items(): + sonames = set() + sonames.update(original_shared_libs.keys()) + if native_library_merge_map: + sonames.update(native_library_merge_map.keys()) + elif native_library_merge_sequence: + for entry in native_library_merge_sequence: + if type(entry) == "list": + sonames.update([soname for (soname, _) in entry]) + else: + (soname, _) = entry + sonames.add(soname) + + lib_outputs = {} + for soname in sonames.list(): + output_path = _platform_output_path(soname, platform if len(original_shared_libs_by_platform) > 1 else None) + lib_output = ctx.actions.declare_output(output_path, dir = True) + dynamic_outputs.append(lib_output) + if enable_relinker: + output_path = output_path + ".unrelinked" + unrelinked_lib_output = ctx.actions.declare_output(output_path, dir = True) + dynamic_outputs.append(unrelinked_lib_output) + lib_outputs[soname] = _NativeLibSubtargetArtifacts( + default = lib_output, + unrelinked = unrelinked_lib_output, + ) + else: + lib_outputs[soname] = _NativeLibSubtargetArtifacts( + default = lib_output, + unrelinked = None, + ) + + lib_outputs_by_platform[platform] = lib_outputs + return lib_outputs_by_platform + +# Bind debug library subtarget outputs to actual outputs. +# For individual libraries, link to either the unmerged or merged output. +# For merged libraries, link to either the merged output, or a symlinked dir of all merged split group outputs. +def _link_library_subtargets( + ctx: AnalysisContext, + outputs, # IndexSet[OutputArtifact] + lib_outputs_by_platform: dict[str, dict[str, _NativeLibSubtargetArtifacts]], # dict[platform, dict[soname, _NativeLibSubtargetArtifacts]] + original_shared_libs_by_platform: dict[str, dict[str, SharedLibrary]], + final_shared_libs_by_platform: dict[str, dict[str, SharedLibrary]], + merged_shared_lib_targets_by_platform: dict[str, dict[Label, str]], + split_groups: dict[str, str] | None, + native_merge_debug, + unrelinked: bool = False): + for platform, final_shared_libs in final_shared_libs_by_platform.items(): + merged_lib_outputs = {} + for soname, lib in final_shared_libs.items(): + base_soname = soname + if split_groups and soname in split_groups: + base_soname = split_groups[soname] + + group_outputs = merged_lib_outputs.setdefault(base_soname, {}) + group_outputs[soname] = lib.lib.output + + for soname, lib_outputs in lib_outputs_by_platform[platform].items(): + if soname in merged_lib_outputs: + group_outputs = merged_lib_outputs[soname] + elif soname in original_shared_libs_by_platform[platform]: + # link unmerged soname to merged output + original_shared_lib = original_shared_libs_by_platform[platform][soname] + merged_soname = merged_shared_lib_targets_by_platform[platform][original_shared_lib.label] + if split_groups and merged_soname in split_groups: + merged_soname = split_groups[merged_soname] + group_outputs = merged_lib_outputs[merged_soname] + else: + # merged group name has no constituents, link to debug output + group_outputs = {soname: native_merge_debug} + + output = lib_outputs.default + if unrelinked: + output = lib_outputs.unrelinked + ctx.actions.symlinked_dir(outputs[output], group_outputs) + +def _create_library_subtargets(lib_outputs_by_platform: dict[str, dict[str, _NativeLibSubtargetArtifacts]], native_libs: Artifact): + def create_library_subtarget(output: _NativeLibSubtargetArtifacts): + if output.unrelinked: + sub_targets = {"unrelinked": [DefaultInfo(default_outputs = [output.unrelinked])]} + return [DefaultInfo(default_outputs = [output.default], sub_targets = sub_targets)] + return [DefaultInfo(default_outputs = [output.default])] + + if len(lib_outputs_by_platform) > 1: + return { + platform: [DefaultInfo(default_outputs = [native_libs], sub_targets = { + soname: create_library_subtarget(output) + for soname, output in lib_outputs.items() + })] + for platform, lib_outputs in lib_outputs_by_platform.items() + } + elif len(lib_outputs_by_platform) == 1: + lib_outputs = list(lib_outputs_by_platform.values())[0] + return { + soname: create_library_subtarget(output) + for soname, output in lib_outputs.items() + } + else: + # TODO(ctolliday) at this point we should have thrown an error earlier if no libraries matched cpu_filters + # (or returned earlier if there are no native library deps) + return {} + # We could just return two artifacts of libs (one for the primary APK, one which can go # either into the primary APK or be exopackaged), and one artifact of assets, # but we'd need an extra action in order to combine them (we can't use `symlinked_dir` since @@ -348,12 +553,11 @@ _NativeLibsAndAssetsInfo = record( native_libs = Artifact, native_libs_metadata = Artifact, native_libs_always_in_primary_apk = Artifact, - native_lib_assets_for_primary_apk = [Artifact, None], - stripped_native_linkable_assets_for_primary_apk = [Artifact, None], + native_lib_assets_for_primary_apk = Artifact | None, + stripped_native_linkable_assets_for_primary_apk = Artifact | None, root_module_metadata_assets = Artifact, - root_module_compressed_lib_assets = Artifact, non_root_module_metadata_assets = Artifact, - non_root_module_compressed_lib_assets = Artifact, + non_root_module_lib_assets = [Artifact, None], ) def _get_exopackage_info( @@ -378,6 +582,7 @@ def _get_native_libs_and_assets( prebuilt_native_library_dirs_always_in_primary_apk = [] prebuilt_native_library_dir_assets_for_primary_apk = [] prebuilt_native_library_dir_module_assets_map = {} + prebuilt_native_library_dir_module_libs_map = {} for native_lib in all_prebuilt_native_library_dirs: native_lib_target = str(native_lib.raw_target) module = get_module_from_target(native_lib_target) @@ -393,7 +598,7 @@ def _get_native_libs_and_assets( if native_lib.is_asset: prebuilt_native_library_dir_module_assets_map.setdefault(module, []).append(native_lib) else: - prebuilt_native_library_dirs.append(native_lib) + prebuilt_native_library_dir_module_libs_map.setdefault(module, []).append(native_lib) elif native_lib.is_asset and is_packaging_native_libs_as_assets_supported: expect(not native_lib.for_primary_apk, "{} which is marked as needing to be in the primary APK cannot be an asset".format(native_lib_target)) prebuilt_native_library_dir_assets_for_primary_apk.append(native_lib) @@ -421,40 +626,53 @@ def _get_native_libs_and_assets( ) if prebuilt_native_library_dir_assets_for_primary_apk else None native_lib_module_assets_map = {} for module, native_lib_dir in prebuilt_native_library_dir_module_assets_map.items(): - native_lib_module_assets_map[module] = [_filter_prebuilt_native_library_dir( + native_lib_module_assets_map.setdefault(module, []).append(_filter_prebuilt_native_library_dir( ctx, native_lib_dir, "native_lib_assets_for_module_{}".format(module), package_as_assets = True, module = module, - )] + )) + for module, native_lib_dir in prebuilt_native_library_dir_module_libs_map.items(): + native_lib_module_assets_map.setdefault(module, []).append(_filter_prebuilt_native_library_dir( + ctx, + native_lib_dir, + "native_lib_libs_for_module_{}".format(module), + package_as_assets = False, + module = module, + )) stripped_linkables = _get_native_linkables(ctx, platform_to_native_linkables, get_module_from_target, is_packaging_native_libs_as_assets_supported) for module, native_linkable_assets in stripped_linkables.linkable_module_assets_map.items(): native_lib_module_assets_map.setdefault(module, []).append(native_linkable_assets) root_module_metadata_srcs = {} - root_module_compressed_lib_srcs = {} non_root_module_metadata_srcs = {} - non_root_module_compressed_lib_srcs = {} + non_root_module_libs_srcs = [] assets_for_primary_apk = filter(None, [native_lib_assets_for_primary_apk, stripped_linkables.linkable_assets_for_primary_apk]) stripped_linkable_assets_for_primary_apk = stripped_linkables.linkable_assets_for_primary_apk if assets_for_primary_apk: - metadata_file, native_library_paths = _get_native_libs_as_assets_metadata(ctx, assets_for_primary_apk, ROOT_MODULE) + metadata_file = _get_native_libs_as_assets_metadata(ctx, assets_for_primary_apk, ROOT_MODULE) root_module_metadata_srcs[paths.join(_get_native_libs_as_assets_dir(ROOT_MODULE), "metadata.txt")] = metadata_file - if ctx.attrs.compress_asset_libraries: - compressed_lib_dir = _get_compressed_native_libs_as_assets(ctx, assets_for_primary_apk, native_library_paths, ROOT_MODULE) - root_module_compressed_lib_srcs[_get_native_libs_as_assets_dir(ROOT_MODULE)] = compressed_lib_dir - - # Since we're storing these as compressed assets, we need to ignore the uncompressed libs. - native_lib_assets_for_primary_apk = None - stripped_linkable_assets_for_primary_apk = None for module, native_lib_assets in native_lib_module_assets_map.items(): - metadata_file, native_library_paths = _get_native_libs_as_assets_metadata(ctx, native_lib_assets, module) - non_root_module_metadata_srcs[paths.join(_get_native_libs_as_assets_dir(module), "libs.txt")] = metadata_file - compressed_lib_dir = _get_compressed_native_libs_as_assets(ctx, native_lib_assets, native_library_paths, module) - non_root_module_compressed_lib_srcs[_get_native_libs_as_assets_dir(module)] = compressed_lib_dir + metadata_file = _get_native_libs_as_assets_metadata(ctx, native_lib_assets, module) + libs_metadata_path = paths.join("assets", "libs.txt") + non_root_module_metadata_srcs[paths.join(_get_native_libs_as_assets_dir(module), libs_metadata_path)] = metadata_file + non_root_module_libs_srcs.extend(native_lib_assets) + + non_root_module_libs = None + if non_root_module_libs_srcs: + non_root_module_libs = ctx.actions.declare_output("non_root_module_libs") + ctx.actions.run( + cmd_args([ + ctx.attrs._android_toolchain[AndroidToolchainInfo].combine_native_library_dirs[RunInfo], + "--output-dir", + non_root_module_libs.as_output(), + "--library-dirs", + ] + non_root_module_libs_srcs), + category = "combine_non_root_module_native_libs", + ) combined_native_libs = ctx.actions.declare_output("combined_native_libs", dir = True) native_libs_metadata = ctx.actions.declare_output("native_libs_metadata.txt") @@ -486,9 +704,8 @@ def _get_native_libs_and_assets( native_lib_assets_for_primary_apk = native_lib_assets_for_primary_apk, stripped_native_linkable_assets_for_primary_apk = stripped_linkable_assets_for_primary_apk, root_module_metadata_assets = ctx.actions.symlinked_dir("root_module_metadata_assets", root_module_metadata_srcs), - root_module_compressed_lib_assets = ctx.actions.symlinked_dir("root_module_compressed_lib_assets", root_module_compressed_lib_srcs), non_root_module_metadata_assets = ctx.actions.symlinked_dir("non_root_module_metadata_assets", non_root_module_metadata_srcs), - non_root_module_compressed_lib_assets = ctx.actions.symlinked_dir("non_root_module_compressed_lib_assets", non_root_module_compressed_lib_srcs), + non_root_module_lib_assets = non_root_module_libs, ) def _filter_prebuilt_native_library_dir( @@ -500,12 +717,16 @@ def _filter_prebuilt_native_library_dir( cpu_filters = ctx.attrs.cpu_filters or CPU_FILTER_TO_ABI_DIRECTORY.keys() abis = [CPU_FILTER_TO_ABI_DIRECTORY[cpu] for cpu in cpu_filters] filter_tool = ctx.attrs._android_toolchain[AndroidToolchainInfo].filter_prebuilt_native_library_dir[RunInfo] - native_libs_dirs = [native_lib.dir for native_lib in native_libs] - native_libs_dirs_file = ctx.actions.write("{}_list.txt".format(identifier), native_libs_dirs) + native_libs_dirs_file = argfile(actions = ctx.actions, name = "{}_list.txt".format(identifier), args = [native_lib.dir for native_lib in native_libs]) base_output_dir = ctx.actions.declare_output(identifier, dir = True) - output_dir = base_output_dir.project(_get_native_libs_as_assets_dir(module)) if package_as_assets else base_output_dir + if module == ROOT_MODULE: + output_dir = base_output_dir.project(_get_native_libs_as_assets_dir(module)) if package_as_assets else base_output_dir + elif package_as_assets: + output_dir = base_output_dir.project(paths.join(_get_native_libs_as_assets_dir(module), "assets")) + else: + output_dir = base_output_dir.project(paths.join(_get_native_libs_as_assets_dir(module), "lib")) ctx.actions.run( - cmd_args([filter_tool, native_libs_dirs_file, output_dir.as_output(), "--abis"] + abis).hidden(native_libs_dirs), + cmd_args([filter_tool, native_libs_dirs_file, output_dir.as_output(), "--abis"] + abis), category = "filter_prebuilt_native_library_dir", identifier = identifier, ) @@ -515,7 +736,7 @@ def _filter_prebuilt_native_library_dir( _StrippedNativeLinkables = record( linkables = Artifact, linkables_always_in_primary_apk = Artifact, - linkable_assets_for_primary_apk = [Artifact, None], + linkable_assets_for_primary_apk = Artifact | None, linkable_module_assets_map = dict[str, Artifact], ) @@ -528,6 +749,7 @@ def _get_native_linkables( stripped_native_linkables_always_in_primary_apk_srcs = {} stripped_native_linkable_assets_for_primary_apk_srcs = {} stripped_native_linkable_module_assets_srcs = {} + strip_libraries = getattr(ctx.attrs, "strip_libraries", True) cpu_filters = ctx.attrs.cpu_filters for platform, native_linkables in platform_to_native_linkables.items(): @@ -538,6 +760,7 @@ def _get_native_linkables( for so_name, native_linkable in native_linkables.items(): native_linkable_target = str(native_linkable.label.raw_target()) module = get_module_from_target(native_linkable_target) + lib = native_linkable.stripped_lib if strip_libraries else native_linkable.lib.output expect( not native_linkable.for_primary_apk or is_root_module(module), @@ -547,18 +770,22 @@ def _get_native_linkables( not native_linkable.for_primary_apk or not native_linkable.can_be_asset, "{} which is marked as needing to be in the primary APK cannot be an asset".format(native_linkable_target), ) - if native_linkable.can_be_asset and not is_root_module(module): - so_name_path = paths.join(_get_native_libs_as_assets_dir(module), abi_directory, so_name) - stripped_native_linkable_module_assets_srcs.setdefault(module, {})[so_name_path] = native_linkable.stripped_lib - elif native_linkable.can_be_asset and package_native_libs_as_assets_enabled: - so_name_path = paths.join(_get_native_libs_as_assets_dir(module), abi_directory, so_name) - stripped_native_linkable_assets_for_primary_apk_srcs[so_name_path] = native_linkable.stripped_lib - else: - so_name_path = paths.join(abi_directory, so_name) - if native_linkable.for_primary_apk: - stripped_native_linkables_always_in_primary_apk_srcs[so_name_path] = native_linkable.stripped_lib + + if is_root_module(module): + if native_linkable.can_be_asset and package_native_libs_as_assets_enabled: + native_libs_assets_dir = paths.join(_get_native_libs_as_assets_dir(module)) + so_name_path = paths.join(native_libs_assets_dir, abi_directory, so_name) + stripped_native_linkable_assets_for_primary_apk_srcs[so_name_path] = lib else: - stripped_native_linkables_srcs[so_name_path] = native_linkable.stripped_lib + so_name_path = paths.join(abi_directory, so_name) + if native_linkable.for_primary_apk: + stripped_native_linkables_always_in_primary_apk_srcs[so_name_path] = lib + else: + stripped_native_linkables_srcs[so_name_path] = lib + else: + module_dir = "assets" if native_linkable.can_be_asset else "lib" + so_name_path = paths.join(_get_native_libs_as_assets_dir(module), module_dir, abi_directory, so_name) + stripped_native_linkable_module_assets_srcs.setdefault(module, {})[so_name_path] = lib stripped_native_linkables = ctx.actions.symlinked_dir( "stripped_native_linkables", @@ -589,53 +816,34 @@ def _get_native_linkables( def _get_native_libs_as_assets_metadata( ctx: AnalysisContext, native_lib_assets: list[Artifact], - module: str) -> (Artifact, Artifact): - native_lib_assets_file = ctx.actions.write("{}/native_lib_assets".format(module), [cmd_args([native_lib_asset, _get_native_libs_as_assets_dir(module)], delimiter = "/") for native_lib_asset in native_lib_assets]) + module: str) -> Artifact: + native_lib_assets_file = argfile( + actions = ctx.actions, + name = "{}/native_lib_assets".format(module), + args = [cmd_args([native_lib_asset, _get_native_libs_as_assets_dir(module)], delimiter = "/") for native_lib_asset in native_lib_assets], + ) metadata_output = ctx.actions.declare_output("{}/native_libs_as_assets_metadata.txt".format(module)) - native_library_paths = ctx.actions.declare_output("{}/native_libs_as_assets_paths.txt".format(module)) metadata_cmd = cmd_args([ ctx.attrs._android_toolchain[AndroidToolchainInfo].native_libs_as_assets_metadata[RunInfo], "--native-library-dirs", native_lib_assets_file, "--metadata-output", metadata_output.as_output(), - "--native-library-paths-output", - native_library_paths.as_output(), - ]).hidden(native_lib_assets) + ]) ctx.actions.run(metadata_cmd, category = "get_native_libs_as_assets_metadata", identifier = module) - return metadata_output, native_library_paths - -def _get_compressed_native_libs_as_assets( - ctx: AnalysisContext, - native_lib_assets: list[Artifact], - native_library_paths: Artifact, - module: str) -> Artifact: - output_dir = ctx.actions.declare_output("{}/compressed_native_libs_as_assets_dir".format(module)) - compressed_libraries_cmd = cmd_args([ - ctx.attrs._android_toolchain[AndroidToolchainInfo].compress_libraries[RunInfo], - "--libraries", - native_library_paths, - "--output-dir", - output_dir.as_output(), - "--compression-type", - ctx.attrs.asset_compression_algorithm or "xz", - "--xz-compression-level", - str(ctx.attrs.xz_compression_level), - ]).hidden(native_lib_assets) - ctx.actions.run(compressed_libraries_cmd, category = "compress_native_libs_as_assets", identifier = module) - return output_dir + return metadata_output def _get_native_libs_as_assets_dir(module: str) -> str: return "assets/{}".format("lib" if is_root_module(module) else module) -def get_native_linkables_by_default(ctx: AnalysisContext, _platform: str, deps: list[Dependency], shared_libraries_to_exclude) -> dict[str, SharedLibrary]: +def get_default_shared_libs(ctx: AnalysisContext, deps: list[Dependency], shared_libraries_to_exclude) -> dict[str, SharedLibrary]: shared_library_info = merge_shared_libraries( ctx.actions, deps = filter(None, [x.get(SharedLibraryInfo) for x in deps]), ) return { - so_name: shared_lib - for so_name, shared_lib in traverse_shared_library_info(shared_library_info).items() + soname: shared_lib + for soname, shared_lib in with_unique_str_sonames(traverse_shared_library_info(shared_library_info)).items() if not (shared_libraries_to_exclude and shared_libraries_to_exclude.contains(shared_lib.label.raw_target())) } @@ -653,6 +861,9 @@ def encode_linkable_graph_for_mergemap(graph_node_map_by_platform: dict[str, dic platform: { target: _LinkableSharedNode( raw_target = str(target.raw_target()), + # FIXME(JakobDegen): The definition of `LinkableNode` claims that it's ok for this + # to be `None` (I assume in the case of static preferred linkage), so either that is + # wrong or this is. See the diff that added this FIXME for how to reproduce soname = node.default_soname, labels = node.labels, deps = node.deps + node.exported_deps, @@ -681,23 +892,7 @@ MergedSharedLibrary = record( # this only includes solib constituents that are included in the android merge map solib_constituents = list[str], is_actually_merged = bool, -) - -# Output of the linkables merge process, the list of shared libs for each platform and -# debug information about the merge process itself. -MergedLinkables = record( - # dict[platform, dict[final_soname, MergedSharedLibrary]] - shared_libs_by_platform = dict[str, dict[str, MergedSharedLibrary]], - debug_info = dict[str, MergedLinkablesDebugInfo], -) - -# Input data to the linkables merge process -LinkableMergeData = record( - glue_linkable = [(Label, LinkInfo), None], - default_shared_libs = dict[str, SharedLibrary], - linkable_nodes = dict[Label, LinkableNode], - merge_map = dict[str, [str, None]], - apk_module_graph = typing.Callable, + primary_constituents = list[Label], ) # information about a link group derived from the merge mapping @@ -707,22 +902,31 @@ LinkGroupData = record( apk_module = str, ) +# Lookup key for somerge groups, either the soname for shared libraries or the target name for unmerged statics +GroupLabel = str + +# Represents the primary constituents and deps of primary constituents used to create a LinkGroupLinkableNode for a non-prebuilt shared library. +LinkGroupMergeInfo = record( + label = GroupLabel, + deps = list[GroupLabel], + exported_deps = list[GroupLabel], + constituent_link_infos = list[LinkInfo], +) + # Represents a node in the final merged linkable map. Most of these will be shared libraries, either prebuilt shared libs or -# libraries that are created below for a node in the link_groups_graph. The exception is for non-merged static-only nodes, in -# that case this +# libraries that are created below for a node in the link_groups_graph. The exception is for non-merged static-only nodes. LinkGroupLinkableNode = record( # The LinkInfo to add to the link line for a node that links against this. link = LinkInfo, - deps = list[str], - exported_deps = list[str], + deps = list[GroupLabel], + exported_deps = list[GroupLabel], shared_lib = [SharedLibrary, None], - # linker flags to be exported by any node that links against this. This can only be non-None for non-merged static only nodes (as we don't # propagate exported linker flags through transitive shared lib deps). exported_linker_flags = [(list[typing.Any], list[typing.Any]), None], ) -def write_merged_library_map(ctx: AnalysisContext, merged_linkables: MergedLinkables) -> Artifact: +def write_merged_library_map(ctx: AnalysisContext, shared_libs_by_platform: dict[str, dict[str, MergedSharedLibrary]]) -> Artifact: """ Writes the "merged library map". This is a map of original soname to final soname of the form: @@ -733,24 +937,16 @@ def write_merged_library_map(ctx: AnalysisContext, merged_linkables: MergedLinka ... ``` """ - solib_map = None - for _, shared_libs in merged_linkables.shared_libs_by_platform.items(): - platform_solib_map = {} - - # we sort these just so that they will be deterministic when we compare across different platforms - for soname in sorted(shared_libs.keys()): + solib_map = {} # dict[final_soname, set[original_soname]] + for _, shared_libs in shared_libs_by_platform.items(): + for soname in shared_libs.keys(): merged_shared_lib = shared_libs[soname] if merged_shared_lib.is_actually_merged: - platform_solib_map[soname] = sorted(merged_shared_lib.solib_constituents) - if solib_map: - if solib_map != platform_solib_map: - fail("DO NOT COMMIT") - else: - solib_map = platform_solib_map + solib_map.setdefault(soname, set()).update(merged_shared_lib.solib_constituents) lines = [] - for final_soname, sonames in solib_map.items(): - for original_soname in sonames: + for final_soname in sorted(solib_map.keys()): + for original_soname in solib_map[final_soname].list(): lines.append("{} {}".format(original_soname, final_soname)) # we wanted it sorted by original_soname @@ -763,15 +959,100 @@ def run_mergemap_codegen(ctx: AnalysisContext, merged_library_map: Artifact) -> ctx.actions.run(args, category = "mergemap_codegen") return mapping_java -def expect_dedupe(v): - # asserts that the input list is unique - o = dedupe_by_value(v) - expect(len(o) == len(v), "expected `{}` to be a list of unique items, but it wasn't. deduped list was `{}`.", v, o) - return v +# We can't merge a prebuilt shared (that has no archive) and must use it's original info. +# Ideally this would probably be structured info on the linkablenode. +def _is_prebuilt_shared(node_data: LinkableNode) -> bool: + shared_link_info = node_data.link_infos.get(LibOutputStyle("shared_lib"), None) + if not shared_link_info or not shared_link_info.default.linkables: + return False + pic_archive_info = node_data.link_infos.get(LibOutputStyle("pic_archive"), None) + if not pic_archive_info or not pic_archive_info.default.linkables: + return True + return False + +def _has_linkable(node_data: LinkableNode) -> bool: + for _, output in node_data.link_infos.items(): + if output.default.linkables: + return True + return False + +def _platform_output_path(path: str, platform: [str, None] = None): + if platform: + return platform + "/" + path + return path + +def _transitive_has_linkable( + target: Label, + linkable_nodes: dict[Label, LinkableNode], + transitive_linkable_cache: dict[Label, bool]) -> bool: + if target in transitive_linkable_cache: + return transitive_linkable_cache[target] + + target_node = linkable_nodes.get(target) + for dep in target_node.deps: + if _has_linkable(linkable_nodes.get(dep)) or _transitive_has_linkable(dep, linkable_nodes, transitive_linkable_cache): + transitive_linkable_cache[target] = True + return True + for dep in target_node.exported_deps: + if _has_linkable(linkable_nodes.get(dep)) or _transitive_has_linkable(dep, linkable_nodes, transitive_linkable_cache): + transitive_linkable_cache[target] = True + return True + + transitive_linkable_cache[target] = False + return False + +def _shared_lib_for_prebuilt_shared( + ctx: AnalysisContext, + cxx_toolchain: CxxToolchainInfo, + target: Label, + node_data: LinkableNode, + linkable_nodes: dict[Label, LinkableNode], + transitive_linkable_cache: dict[Label, bool], + platform: [str, None] = None) -> SharedLibrary: + expect( + len(node_data.shared_libs.libraries) == 1, + "unexpected shared_libs length for somerge of {} ({})".format(target, node_data.shared_libs), + ) + + # TODO(cjhopman): We don't currently support prebuilt shared libs with deps on other libs because + # we don't compute the shared lib deps of prebuilt shared libs here. That + # shouldn't be too hard, but we haven't needed it. + for dep in node_data.deps: + expect( + not _transitive_has_linkable(dep, linkable_nodes, transitive_linkable_cache), + "prebuilt shared library `{}` with deps not supported by somerge".format(target), + ) + for dep in node_data.exported_deps: + expect( + not _transitive_has_linkable(dep, linkable_nodes, transitive_linkable_cache), + "prebuilt shared library `{}` with exported_deps not supported by somerge".format(target), + ) + + shlib = node_data.shared_libs.libraries[0] + soname = shlib.soname.ensure_str() + shlib = shlib.lib + output_path = _platform_output_path(soname, platform) + return create_shlib( + lib = shlib, + stripped_lib = strip_lib(ctx, cxx_toolchain, shlib.output, output_path), + link_args = None, + shlib_deps = None, + can_be_asset = node_data.can_be_asset, + for_primary_apk = False, + soname = soname, + label = target, + ) -def _get_merged_linkables( +def _get_merged_linkables_for_platform( ctx: AnalysisContext, - merged_data_by_platform: dict[str, LinkableMergeData]) -> MergedLinkables: + cxx_toolchain: CxxToolchainInfo, + platform: str | None, + glue_linkable: [(Label, LinkInfo), None], + default_shared_libs: dict[str, SharedLibrary], + linkable_nodes: dict[Label, LinkableNode], + merge_map: dict[str, [str, None]], + merge_linker_args: dict[str, typing.Any], + apk_module_graph: typing.Callable) -> (dict[str, MergedSharedLibrary], MergedLinkablesDebugInfo): """ This takes the merge mapping and constructs the resulting merged shared libraries. @@ -813,335 +1094,514 @@ def _get_merged_linkables( of a primary constituent. A public node is linked via "link whole". 2. linker_flags of primary constituents are included in the link, for non primary they are not """ - debug_info_by_platform = {} - shared_libs_by_platform = {} - for platform, merge_data in merged_data_by_platform.items(): - debug_info = debug_info_by_platform.setdefault(platform, MergedLinkablesDebugInfo( - unmerged_statics = [], - group_debug = {}, - with_default_soname = [], - missing_default_solibs = [], - )) - linkable_nodes = merge_data.linkable_nodes - - linkable_nodes_graph = {k: dedupe(v.deps + v.exported_deps) for k, v in linkable_nodes.items()} - topo_sorted_targets = topo_sort(linkable_nodes_graph) - - # first we collect basic information about each link group, this will populate the fields in LinkGroupData and - # map target labels to their link group name. - link_groups = {} - target_to_link_group = {} - - for target in topo_sorted_targets: - expect(target not in target_to_link_group, "prelude internal error, target seen twice?") - target_apk_module = merge_data.apk_module_graph(str(target.raw_target())) - - link_group = merge_data.merge_map[str(target)] - if not link_group: - link_group = str(target) - link_groups[link_group] = LinkGroupData( - group_name = target, - constituents = [target], - apk_module = target_apk_module, + debug_info = MergedLinkablesDebugInfo( + unmerged_statics = [], + group_debug = {}, + with_default_soname = [], + missing_default_solibs = [], + ) + + linkable_nodes_graph = {k: dedupe(v.deps + v.exported_deps) for k, v in linkable_nodes.items()} + topo_sorted_targets = pre_order_traversal(linkable_nodes_graph) + + # first we collect basic information about each link group, this will populate the fields in LinkGroupData and + # map target labels to their link group name. + link_groups = {} + target_to_link_group = {} + + # Because we cannot attach this to the LinkableNode after the fact, declare a cache for each platform + transitive_linkable_cache = {} + + for target in topo_sorted_targets: + expect(target not in target_to_link_group, "prelude internal error, target seen twice?") + target_apk_module = apk_module_graph(str(target.raw_target())) + + link_group = merge_map.get(str(target), None) + if not link_group: + link_group = str(target) + link_groups[link_group] = LinkGroupData( + group_name = target, + constituents = [target], + apk_module = target_apk_module, + ) + elif link_group in link_groups: + link_group_data = link_groups[link_group] + + # TODO(cjhopman): buck1 provides a more useful error here in that it lists the module mappings for all + # constituents of the merge group (rather than just one conflict). That allows users to resolve all the + # issues at once. With merge sequence merging (the replacement for merge map), this error shouldn't ever be hit + # and so maybe it's not necessary to improve it. + expect( + link_group_data.apk_module == target_apk_module, + "Native library merge of {} has inconsistent application module mappings:\n{} is in module {}\n{} is in module {}", + link_group_data.group_name, + target, + target_apk_module, + link_group_data.constituents[0], + link_group_data.apk_module, + ) + link_groups[link_group].constituents.append(target) + else: + link_groups[link_group] = LinkGroupData( + group_name = link_group, + constituents = [target], + apk_module = target_apk_module, + ) + + target_to_link_group[target] = link_group + + # Now that all targets are assigned to a link group, build up the link group graph. + link_groups_graph_builder = {} + for target in topo_sorted_targets: + target_group = target_to_link_group[target] + group_deps = link_groups_graph_builder.setdefault(target_group, {}) + for dep in linkable_nodes_graph[target]: + dep_group = target_to_link_group[dep] + if target_group != dep_group and dep_group not in group_deps: + # Store one example of why target_group depends on dep_group + group_deps[dep_group] = (target, dep) + link_groups_graph = {k: list(v.keys()) for k, v in link_groups_graph_builder.items()} + + archive_output_style = LibOutputStyle("pic_archive") + shlib_output_style = LibOutputStyle("shared_lib") + + link_group_linkable_nodes = {} + group_shared_libs = {} + included_default_solibs = {} + + def edge_explainer(src_group, dest_group): + """Explains in an error why src_group has a dependency on dest_group""" + if src_group not in link_groups_graph_builder or dest_group not in link_groups_graph_builder[src_group]: + return ["Unknown"] + + src_target, dest_target = link_groups_graph_builder[src_group][dest_group] + return [" " + str(src_target), "-> " + str(dest_target)] + + # Now we will traverse from the leaves up the graph (the link groups graph). As we traverse, we will produce + # a link group linkablenode for each group. + for group in post_order_traversal(link_groups_graph, edge_explainer = edge_explainer): + group_data = link_groups[group] + is_actually_merged = len(group_data.constituents) > 1 + + can_be_asset = True + for target in group_data.constituents: + if not linkable_nodes[target].can_be_asset: + can_be_asset = False + break + + if not is_actually_merged: + target = group_data.constituents[0] + node_data = linkable_nodes[target] + + if node_data.preferred_linkage == Linkage("static") or not _has_linkable(node_data): + debug_info.unmerged_statics.append(target) + link_group_linkable_nodes[group] = LinkGroupLinkableNode( + link = node_data.link_infos[archive_output_style].default, + deps = dedupe_by_value([target_to_link_group[t] for t in node_data.deps]), + exported_deps = dedupe_by_value([target_to_link_group[t] for t in node_data.exported_deps]), + shared_lib = None, + exported_linker_flags = (node_data.linker_flags.exported_flags, node_data.linker_flags.exported_post_flags), ) - elif link_group in link_groups: - link_group_data = link_groups[link_group] - - # TODO(cjhopman): buck1 provides a more useful error here in that it lists the module mappings for all - # constituents of the merge group (rather than just one conflict). That allows users to resolve all the - # issues at once. With merge sequence merging (the replacement for merge map), this error shouldn't ever be hit - # and so maybe it's not necessary to improve it. - expect( - link_group_data.apk_module == target_apk_module, - "Native library merge of {} has inconsistent application module mappings:\n{} is in module {}\n{} is in module {}", - link_group_data.group_name, + continue + + if _is_prebuilt_shared(node_data): + shared_lib = _shared_lib_for_prebuilt_shared( + ctx, + cxx_toolchain, target, - target_apk_module, - link_group_data.constituents[0], - link_group_data.apk_module, + node_data, + linkable_nodes, + transitive_linkable_cache, + platform, ) - link_groups[link_group].constituents.append(target) - else: - link_groups[link_group] = LinkGroupData( - group_name = link_group, - constituents = [target], - apk_module = target_apk_module, + link_group_linkable_nodes[group] = LinkGroupLinkableNode( + link = node_data.link_infos[shlib_output_style].default, + deps = [], + exported_deps = [], + shared_lib = shared_lib, + # exported linker flags for shared libs are in their linkinfo itself and are not exported from dependents + exported_linker_flags = None, + ) + group_shared_libs[shared_lib.soname.ensure_str()] = MergedSharedLibrary( + soname = shared_lib.soname.ensure_str(), + lib = shared_lib, + apk_module = group_data.apk_module, + solib_constituents = [], + is_actually_merged = False, + primary_constituents = [target], ) + continue + + exported_linker_flags = [] + exported_linker_post_flags = [] + links = [] + + if is_actually_merged and glue_linkable: + links.append(set_link_info_link_whole(glue_linkable[1])) + + solib_constituents = [] + group_deps = [] + group_exported_deps = [] + for key in group_data.constituents: + expect(target_to_link_group[key] == group) + node = linkable_nodes[key] + + default_solibs = list([shlib.soname.ensure_str() for shlib in node.shared_libs.libraries]) + if not default_solibs and node.preferred_linkage == Linkage("static"): + default_solibs = [node.default_soname] + + for soname in default_solibs: + included_default_solibs[soname] = True + if node.include_in_android_mergemap: + solib_constituents.append(soname) + + node = linkable_nodes[key] + link_info = node.link_infos[archive_output_style].default + + # the propagated link info should already be wrapped with exported flags. + link_info = wrap_link_info( + link_info, + pre_flags = node.linker_flags.flags, + post_flags = node.linker_flags.post_flags, + ) + exported_linker_flags.extend(node.linker_flags.exported_flags) + exported_linker_post_flags.extend(node.linker_flags.exported_post_flags) + links.append(set_link_info_link_whole(link_info)) - target_to_link_group[target] = link_group + dep_groups = [target_to_link_group[dep] for dep in node.deps] + group_deps.extend([dep_group for dep_group in dep_groups if dep_group != group]) - # Now that all targets are assigned to a link group, build up the link group graph. - link_groups_graph_builder = {} - for target in topo_sorted_targets: - target_group = target_to_link_group[target] - group_deps = link_groups_graph_builder.setdefault(target_group, {}) - for dep in linkable_nodes_graph[target]: - dep_group = target_to_link_group[dep] - if target_group != dep_group: - group_deps[dep_group] = True - link_groups_graph = {k: list(v.keys()) for k, v in link_groups_graph_builder.items()} + exported_dep_groups = [target_to_link_group[dep] for dep in node.exported_deps] + group_exported_deps.extend([dep_group for dep_group in exported_dep_groups if dep_group != group]) - archive_output_style = LibOutputStyle("pic_archive") - shlib_output_style = LibOutputStyle("shared_lib") + soname = group + if not is_actually_merged: + soname = linkable_nodes[group_data.constituents[0]].default_soname + debug_info.with_default_soname.append((soname, group_data.constituents[0])) - cxx_toolchain = ctx.attrs._cxx_toolchain[platform][CxxToolchainInfo] + output_path = _platform_output_path(soname, platform) - link_group_linkable_nodes = {} - group_shared_libs = {} - included_default_solibs = {} - - # Now we will traverse from the leaves up the graph (the link groups graph). As we traverse, we will produce - # a link group linkablenode for each group. - for group in post_order_traversal(link_groups_graph): - group_data = link_groups[group] - is_actually_merged = len(group_data.constituents) > 1 - can_be_asset = True - - if not is_actually_merged: - target = group_data.constituents[0] - node_data = linkable_nodes[target] - can_be_asset = node_data.can_be_asset - - def has_linkable(node_data: LinkableNode) -> bool: - for _, output in node_data.link_infos.items(): - if output.default.linkables: - return True - return False - - if node_data.preferred_linkage == Linkage("static") or not has_linkable(node_data): - debug_info.unmerged_statics.append(target) - link_group_linkable_nodes[group] = LinkGroupLinkableNode( - link = node_data.link_infos[archive_output_style].default, - deps = dedupe_by_value([target_to_link_group[t] for t in node_data.deps]), - exported_deps = dedupe_by_value([target_to_link_group[t] for t in node_data.exported_deps]), - shared_lib = None, - exported_linker_flags = (node_data.linker_flags.exported_flags, node_data.linker_flags.exported_post_flags), - ) - continue - - # We can't merge a prebuilt shared (that has no archive) and must use it's original info. - # Ideally this would probably be structured info on the linkablenode. - def is_prebuilt_shared(node_data: LinkableNode) -> bool: - shared_link_info = node_data.link_infos.get(shlib_output_style, None) - if not shared_link_info or not shared_link_info.default.linkables: - return False - pic_archive_info = node_data.link_infos.get(archive_output_style, None) - if not pic_archive_info or not pic_archive_info.default.linkables: - return True - return False - - if is_prebuilt_shared(node_data): - expect( - len(node_data.shared_libs) == 1, - "unexpected shared_libs length for somerge of {} ({})".format(target, node_data.shared_libs), - ) - expect(not node_data.deps, "prebuilt shared libs with deps not supported by somerge") - expect(not node_data.exported_deps, "prebuilt shared libs with exported_deps not supported by somerge") - soname, shlib = node_data.shared_libs.items()[0] - shared_lib = SharedLibrary( - lib = shlib, - stripped_lib = strip_lib(ctx, cxx_toolchain, shlib.output), - link_args = None, - shlib_deps = None, - can_be_asset = can_be_asset, - for_primary_apk = False, - soname = soname, - label = target, - ) - - link_group_linkable_nodes[group] = LinkGroupLinkableNode( - link = node_data.link_infos[shlib_output_style].default, - deps = [], - exported_deps = [], - shared_lib = shared_lib, - # exported linker flags for shared libs are in their linkinfo itself and are not exported from dependents - exported_linker_flags = None, - ) - group_shared_libs[soname] = MergedSharedLibrary( - soname = soname, - lib = shared_lib, - apk_module = group_data.apk_module, - solib_constituents = [], - is_actually_merged = False, - ) - continue - - # Keys in the current group stay as a Label, deps get converted to the group key. - def convert_to_merged_graph_deps(deps: list[Label], curr_group: str) -> list[[Label, str]]: - converted = [] - for dep in deps: - dep_group = target_to_link_group[dep] - if dep_group == curr_group: - converted.append(dep) - elif dep_group: - converted.append(dep_group) - return dedupe_by_value(converted) - - # For the current group, this will traverse the original linkable graph to find the LinkableNodes for - # the constituents of the group and traverses the link_group graph for non-constituent deps. - def get_merged_graph_traversal(curr_group: str, exported_only: bool) -> typing.Callable: - def traversal(key: [Label, str]) -> list[[Label, str]]: - if eval_type(Label).matches(key): - expect(target_to_link_group[key] == curr_group) - node = linkable_nodes[key] - if exported_only: - return convert_to_merged_graph_deps(node.exported_deps, curr_group) - return convert_to_merged_graph_deps(node.deps + node.exported_deps, curr_group) - else: - link_group_node = link_group_linkable_nodes[key] - if exported_only: - return link_group_node.exported_deps - return dedupe_by_value(link_group_node.deps + link_group_node.exported_deps) - - # It's easy for us to accidentally get this merged traversal wrong, so this provides one guardrail - def checked_traversal(key: [Label, str]) -> list[[Label, str]]: - return expect_dedupe(traversal(key)) - - return checked_traversal - - # note that this will possibly contain shared lib dependencies which aren't really public. that's handled below. - public_node_roots = group_data.constituents - - # this is a hybrid of buck1 somerge behavior and what we do for link groups. - # like link groups, we expose link group by setting link_whole on its link infos (this matches buck1 for - # primary constituents, but not for other constituents). - # like buck1, we treat all primary constituents as public node roots (as opposed to link groups that only treats - # preferred_linkage=shared and edges with an outbound dep as public roots), and then traverse exported deps from - # those roots to find all public nodes. - # the main thing to note from this is that for non-primary constituents that are identified as public, we will - # use link_whole whereas buck1 will make dependents link against them directly - exported_public_nodes = { - d: True - for d in breadth_first_traversal_by( - None, - public_node_roots, - get_merged_graph_traversal(group, True), - ) - } + link_merge_info = LinkGroupMergeInfo( + label = group, + deps = dedupe_by_value(group_deps), + exported_deps = dedupe_by_value(group_exported_deps), + constituent_link_infos = links, + ) + link_args, shlib_deps, link_deps_graph = _create_merged_link_args( + root_target = link_merge_info, + linkable_nodes = link_group_linkable_nodes, + cxx_toolchain = cxx_toolchain, + ) + link_args = [link_args] + if soname in merge_linker_args: + link_args += [LinkArgs(infos = [LinkInfo(pre_flags = merge_linker_args[soname])])] - exported_linker_flags = [] - exported_linker_post_flags = [] - links = [] - shared_lib_deps = [] - real_constituents = [] - - if is_actually_merged and merge_data.glue_linkable: - real_constituents.append(merge_data.glue_linkable[0]) - links.append(set_link_info_link_whole(merge_data.glue_linkable[1])) - - solib_constituents = [] - link_group_deps = [] - ordered_group_constituents = topo_sort_by(group_data.constituents, get_merged_graph_traversal(group, False)) - representative_label = ordered_group_constituents[0] - for key in ordered_group_constituents: - real_constituents.append(key) - if eval_type(Label).matches(key): - # This is handling targets within this link group - expect(target_to_link_group[key] == group) - node = linkable_nodes[key] - - default_solibs = list(node.shared_libs.keys()) - if not default_solibs and node.preferred_linkage == Linkage("static"): - default_solibs = [node.default_soname] - - for soname in default_solibs: - included_default_solibs[soname] = True - if node.include_in_android_mergemap: - solib_constituents.append(soname) - - node = linkable_nodes[key] - link_info = node.link_infos[archive_output_style].default - - # the propagated link info should already be wrapped with exported flags. - link_info = wrap_link_info( - link_info, - pre_flags = node.linker_flags.flags, - post_flags = node.linker_flags.post_flags, - ) - exported_linker_flags.extend(node.linker_flags.exported_flags) - exported_linker_post_flags.extend(node.linker_flags.exported_post_flags) - if key in exported_public_nodes: - link_info = set_link_info_link_whole(link_info) - else: - # This is cross-link-group deps. We add information to the link line from the LinkGroupLinkableNode of the dep. - link_group_node = link_group_linkable_nodes[key] - link_info = link_group_node.link - if link_group_node.shared_lib: - shared_lib_deps.append(link_group_node.shared_lib.soname) - link_group_deps.append(key) - elif key in exported_public_nodes: - link_info = set_link_info_link_whole(link_info) - - if link_group_node.exported_linker_flags: - exported_linker_flags.extend(link_group_node.exported_linker_flags[0]) - exported_linker_post_flags.extend(link_group_node.exported_linker_flags[1]) - - links.append(link_info) - - soname = group - if not is_actually_merged: - soname = linkable_nodes[group_data.constituents[0]].default_soname - debug_info.with_default_soname.append((soname, group_data.constituents[0])) - - debug_info.group_debug.setdefault( - group, - struct( - soname = soname, - merged = is_actually_merged, - constituents = real_constituents, - shlib_deps = shared_lib_deps, - exported_public_nodes = exported_public_nodes, - exported_linker_flags = exported_linker_flags, - exported_linker_post_flags = exported_linker_post_flags, - ), + shared_lib = create_shared_lib( + ctx, + output_path = output_path, + soname = soname, + link_args = link_args, + cxx_toolchain = cxx_toolchain, + shared_lib_deps = [link_group_linkable_nodes[label].shared_lib.soname.ensure_str() for label in shlib_deps], + label = group_data.constituents[0], + can_be_asset = can_be_asset, + ) + + link_group_linkable_nodes[group] = LinkGroupLinkableNode( + link = LinkInfo( + name = soname, + pre_flags = exported_linker_flags, + linkables = [SharedLibLinkable( + lib = shared_lib.lib.output, + )], + post_flags = exported_linker_post_flags, + ), + deps = link_merge_info.deps, + exported_deps = link_merge_info.exported_deps, + shared_lib = shared_lib, + # exported linker flags for shared libs are in their linkinfo itself and are not exported from dependents + exported_linker_flags = None, + ) + group_shared_libs[soname] = MergedSharedLibrary( + soname = soname, + lib = shared_lib, + apk_module = group_data.apk_module, + solib_constituents = solib_constituents, + is_actually_merged = is_actually_merged, + primary_constituents = group_data.constituents, + ) + + debug_info.group_debug.setdefault( + group, + struct( + soname = soname, + merged = is_actually_merged, + primary_constituents = group_data.constituents, + real_constituents = link_deps_graph.keys(), + shlib_deps = shlib_deps, + exported_linker_flags = exported_linker_flags, + exported_linker_post_flags = exported_linker_post_flags, + ), + ) + + debug_info.missing_default_solibs.extend([d for d in default_shared_libs if d not in included_default_solibs]) + + return group_shared_libs, debug_info + +# The current default link strategies don't produce enough information in the +# SharedLibrary objects to perform relinking. To do that best, linking should be based on +# the LinkableGraph rather than the current approach with MergedLinkInfo. +# The overall plan for linking is to move to linkable graph-based linking, but for now +# we can do it just for the case that we need it. +def _create_all_relinkable_links( + ctx: AnalysisContext, + platform_to_original_native_linkables: dict[str, dict[str, SharedLibrary]], + graph_node_map_by_platform: dict[str, dict[Label, LinkableNode]]) -> (dict[str, dict[str, SharedLibrary]], dict[str, typing.Any]): + final_platform_to_native_linkables = {} + link_graphs_by_platform = {} + for platform in platform_to_original_native_linkables: + linkables, link_graphs = _create_relinkable_links( + ctx, + cxx_toolchain = ctx.attrs._cxx_toolchain[platform][CxxToolchainInfo], + linkable_nodes = graph_node_map_by_platform[platform], + platform = platform, + ) + link_graphs_by_platform[platform] = link_graphs + final_platform_to_native_linkables[platform] = linkables + + # sanity check that we produce the same list of linkables that are produced by standard linking. + original_sonames = sorted(platform_to_original_native_linkables.keys()) + final_sonames = sorted(final_platform_to_native_linkables.keys()) + expect(original_sonames == final_sonames, "Unexpected differences in final sonames! {} {}".format(original_sonames, final_sonames)) + + debug_outputs = {} + + # The biggest issue we could run into here is that we produce different link args than the original, so let's make that easy to debug. + for platform in platform_to_original_native_linkables: + for soname, lib in platform_to_original_native_linkables[platform].items(): + final = final_platform_to_native_linkables[platform][soname] + original_args, _ = ctx.actions.write( + "{}/{}/original.args".format(platform, soname), + [unpack_link_args(args, LinkOrdering("topological")) for args in lib.link_args] if lib.link_args else "", + allow_args = True, + ) + final_args, _ = ctx.actions.write( + "{}/{}/final.args".format(platform, soname), + [unpack_link_args(args, LinkOrdering("topological")) for args in final.link_args] if final.link_args else "", + allow_args = True, ) + debug_outputs["{}/{}/original.args".format(platform, soname)] = original_args + debug_outputs["{}/{}/final.args".format(platform, soname)] = final_args + + if lib.label in link_graphs_by_platform[platform]: + link_graph = ctx.actions.write_json( + "{}/{}/link.graph".format(platform, soname), + link_graphs_by_platform[platform][lib.label], + pretty = True, + ) + debug_outputs["{}/{}/link.graph".format(platform, soname)] = link_graph + + # TODO(cjhopman): should we also just produce a diff here? We could also consider creating sort of a merged diff or a list + # of the differing argsfiles. + # We can't compare them eagerly because the link args have large tsets that we don't want to traverse at analysis time. + + return final_platform_to_native_linkables, debug_outputs - output_path = soname - if len(merged_data_by_platform) > 1: - output_path = platform + "/" + output_path - link_args = [LinkArgs(infos = links)] +def _create_relinkable_links( + ctx: AnalysisContext, + *, + cxx_toolchain: CxxToolchainInfo, + linkable_nodes: dict[Label, LinkableNode], + platform: str) -> (dict[str, SharedLibrary], dict[Label, dict[Label, list[Label]]]): + linkable_nodes_graph = {target: value.deps + value.exported_deps for target, value in linkable_nodes.items()} + shared_libs = {} + shared_lib_overrides = {} + transitive_linkable_cache = {} + debug_link_deps = {} + for target in post_order_traversal(linkable_nodes_graph): + node = linkable_nodes[target] + if node.preferred_linkage == Linkage("static") or not _has_linkable(node): + continue + if _is_prebuilt_shared(node): + shared_lib = _shared_lib_for_prebuilt_shared(ctx, cxx_toolchain, target, node, linkable_nodes, transitive_linkable_cache, platform) + else: + soname = node.default_soname + output_path = "relinkable-libs/{}/{}".format(platform, soname) + link_args, shlib_deps, link_deps_graph = _create_link_args( + cxx_toolchain = cxx_toolchain, + root_target = target, + node = node, + graph = linkable_nodes, + shared_lib_overrides = shared_lib_overrides, + ) + debug_link_deps[target] = link_deps_graph shared_lib = create_shared_lib( ctx, output_path = output_path, soname = soname, - link_args = link_args, + link_args = [link_args], cxx_toolchain = cxx_toolchain, - shared_lib_deps = shared_lib_deps, - label = representative_label, - can_be_asset = can_be_asset, + shared_lib_deps = [shared_lib_overrides[lib].name for lib in shlib_deps if lib in shared_lib_overrides], + label = target, + can_be_asset = node.can_be_asset, ) + shared_lib_overrides[target] = LinkInfo( + name = shared_lib.soname.ensure_str(), + pre_flags = node.linker_flags.exported_flags, + linkables = [SharedLibLinkable( + lib = shared_lib.lib.output, + )], + post_flags = node.linker_flags.exported_post_flags, + ) + shared_libs[shared_lib.soname.ensure_str()] = shared_lib - link_group_linkable_nodes[group] = LinkGroupLinkableNode( - link = LinkInfo( - name = soname, - pre_flags = exported_linker_flags, - linkables = [SharedLibLinkable( - lib = shared_lib.lib.output, - )], - post_flags = exported_linker_post_flags, - ), - deps = link_group_deps, - exported_deps = [], - shared_lib = shared_lib, - # exported linker flags for shared libs are in their linkinfo itself and are not exported from dependents - exported_linker_flags = None, - ) - group_shared_libs[soname] = MergedSharedLibrary( - soname = soname, - lib = shared_lib, - apk_module = group_data.apk_module, - solib_constituents = solib_constituents, - is_actually_merged = is_actually_merged, + return {lib.soname.ensure_str(): lib for lib in shared_libs.values()}, debug_link_deps + +# To support migration from a tset-based link strategy, we are trying to match buck's internal tset +# traversal logic here. Look for implementation of TopologicalTransitiveSetIteratorGen +def _rust_matching_topological_traversal( + roots: list[typing.Any], + get_nodes_to_traverse_func: typing.Callable) -> list[typing.Any]: + counts = {} + + for label in depth_first_traversal_by(None, roots, get_nodes_to_traverse_func, GraphTraversal("preorder-right-to-left")): + for dep in get_nodes_to_traverse_func(label): + if dep in counts: + counts[dep] += 1 + else: + counts[dep] = 1 + + # some of the targets in roots might be transitive deps of others, we only put those that are true roots + # in the stack at this point + stack = [root_target for root_target in roots if not root_target in counts] + true_roots = len(stack) + + result = [] + for _ in range(2000000000): + if not stack: + break + next = stack.pop() + result.append(next) + deps = get_nodes_to_traverse_func(next) + for child in deps[::-1]: # reverse order ensures we put things on the stack in the same order as rust's tset traversal + counts[child] -= 1 + if counts[child] == 0: + stack.append(child) + + if len(result) != true_roots + len(counts): + fail() # fail_cycle + + return result + +def _create_link_args( + *, + cxx_toolchain: CxxToolchainInfo, + root_target: Label, + node: LinkableNode, + graph: dict[Label, LinkableNode], + shared_lib_overrides: dict[Label, LinkInfo] | None = None) -> (LinkArgs, list[Label], dict[Label, list[Label]]): + if LinkOrdering(cxx_toolchain.linker_info.link_ordering) != LinkOrdering("topological"): + fail("don't yet support link ordering {}".format(cxx_toolchain.linker_info.link_ordering)) + + # TODO(cjhopman): verify picbehavior == pic + link_strategy = node.default_link_strategy + if not shared_lib_overrides: + shared_lib_overrides = {} + + link_traversal_cache = {} + + def link_traversal(label: Label) -> list[Label]: + def link_traversal_deps(label): + node = graph[label] + if label == root_target: + return node.deps + node.exported_deps + actual_linkable_type = get_lib_output_style(link_strategy, node.preferred_linkage, PicBehavior("supported")) + if actual_linkable_type == LibOutputStyle("shared_lib"): + return node.exported_deps + else: + return node.deps + node.exported_deps + + res = link_traversal_cache.get(label, None) + if res: + return res + res = link_traversal_deps(label) + link_traversal_cache[label] = res + return res + + links = [] + shlib_deps = [] + for target in _rust_matching_topological_traversal([root_target], link_traversal): + is_root = target == root_target + node = graph[target] + preferred_linkable_type = get_lib_output_style(link_strategy, node.preferred_linkage, PicBehavior("supported")) + + if is_root: + link_info = node.link_infos[LibOutputStyle("pic_archive")].default + link_info = wrap_link_info( + link_info, + pre_flags = node.linker_flags.flags, + post_flags = node.linker_flags.post_flags, ) + links.append(set_link_info_link_whole(link_info)) + elif preferred_linkable_type == LibOutputStyle("shared_lib"): + if target in shared_lib_overrides: + links.append(shared_lib_overrides[target]) + else: + links.append(node.link_infos[LibOutputStyle("shared_lib")].default) + shlib_deps.append(target) + else: + links.append(node.link_infos[preferred_linkable_type].default) - shared_libs_by_platform[platform] = group_shared_libs - debug_info.missing_default_solibs.extend([d for d in merge_data.default_shared_libs if d not in included_default_solibs]) + extra_runtime_flags = cxx_toolchain.linker_info.shared_dep_runtime_ld_flags or [] + if extra_runtime_flags: + links.append(LinkInfo(pre_flags = extra_runtime_flags)) + return LinkArgs(infos = links), shlib_deps, link_traversal_cache - return MergedLinkables( - shared_libs_by_platform = shared_libs_by_platform, - debug_info = debug_info_by_platform, - ) +# Equivalent to _create_link_args but for somerge +def _create_merged_link_args( + *, + cxx_toolchain: CxxToolchainInfo, + root_target: LinkGroupMergeInfo, + linkable_nodes: dict[GroupLabel, LinkGroupLinkableNode]) -> (LinkArgs, list[GroupLabel], dict[GroupLabel, list[GroupLabel]]): + if LinkOrdering(cxx_toolchain.linker_info.link_ordering) != LinkOrdering("topological"): + fail("don't yet support link ordering {}".format(cxx_toolchain.linker_info.link_ordering)) + + link_traversal_cache = {} + + def link_traversal(label: GroupLabel) -> list[GroupLabel]: + def link_traversal_deps(label: GroupLabel): + if label == root_target.label: + return root_target.deps + root_target.exported_deps + + linkable_node = linkable_nodes[label] + if linkable_node.shared_lib: + return linkable_node.exported_deps + else: + return linkable_node.deps + linkable_node.exported_deps + + res = link_traversal_cache.get(label, None) + if res: + return res + res = link_traversal_deps(label) + link_traversal_cache[label] = res + return res + + links = [] + shlib_deps = [] + for label in _rust_matching_topological_traversal([root_target.label], link_traversal): + if label == root_target.label: + links.extend(root_target.constituent_link_infos) + else: + linkable_node = linkable_nodes[label] + links.append(linkable_node.link) + if linkable_node.shared_lib: + shlib_deps.append(label) + + extra_runtime_flags = cxx_toolchain.linker_info.shared_dep_runtime_ld_flags or [] + if extra_runtime_flags: + links.append(LinkInfo(pre_flags = extra_runtime_flags)) + return LinkArgs(infos = links), shlib_deps, link_traversal_cache # When linking shared libraries, by default, all symbols are exported from the library. In a # particular application, though, many of those symbols may never be used. Ideally, in each apk, @@ -1166,6 +1626,20 @@ def _get_merged_linkables( # 5. extract the list of undefined symbols in the relinked libs (i.e. those symbols needed from dependencies and what had been # used in (1) above from higher nodes). def relink_libraries(ctx: AnalysisContext, libraries_by_platform: dict[str, dict[str, SharedLibrary]]) -> dict[str, dict[str, SharedLibrary]]: + relinker_extra_deps = getattr(ctx.attrs, "relinker_extra_deps", None) + red_linkables = {} + if relinker_extra_deps: + for red_elem in relinker_extra_deps: + for platform, red in red_elem.items(): + red_link_graph = red.get(LinkableGraph) + expect(red_link_graph != None, "relinker_extra_deps (`{}`) should be a linkable target", red.label) + red_linkable = red_link_graph.nodes.value.linkable + expect(red_linkable != None, "relinker_extra_deps (`{}`) should be a linkable target", red.label) + expect(red_linkable.preferred_linkage == Linkage("static"), "buck2 currently only supports preferred_linkage='static' relinker_extra_deps") + if platform not in red_linkables: + red_linkables[platform] = [] + red_linkables[platform].append((red.label, red_linkable.link_infos[LibOutputStyle("pic_archive")].default)) + relinked_libraries_by_platform = {} for platform, shared_libraries in libraries_by_platform.items(): cxx_toolchain = ctx.attrs._cxx_toolchain[platform][CxxToolchainInfo] @@ -1184,7 +1658,7 @@ def relink_libraries(ctx: AnalysisContext, libraries_by_platform: dict[str, dict shlib_graph[soname].append(dep) rev_shlib_graph.setdefault(dep, []).append(soname) needed_symbols_files = {} - for soname in topo_sort(shlib_graph): + for soname in pre_order_traversal(shlib_graph): if soname in unsupported_libs: relinked_libraries[soname] = shared_libraries[soname] continue @@ -1198,11 +1672,15 @@ def relink_libraries(ctx: AnalysisContext, libraries_by_platform: dict[str, dict create_relinker_version_script( ctx.actions, output = relinker_version_script, - relinker_blocklist = [regex(s) for s in ctx.attrs.relinker_whitelist], + relinker_allowlist = [regex(s) for s in ctx.attrs.relinker_whitelist], provided_symbols = provided_symbols_file, needed_symbols = needed_symbols_for_this, ) - relinker_link_args = original_shared_library.link_args + [LinkArgs(flags = [cmd_args(relinker_version_script, format = "-Wl,--version-script={}")])] + relinker_link_args = ( + original_shared_library.link_args + + [LinkArgs(flags = [cmd_args(relinker_version_script, format = "-Wl,--version-script={}")])] + + ([LinkArgs(infos = [set_link_info_link_whole(red_linkable[1]) for red_linkable in red_linkables[platform]])] if len(red_linkables) > 0 else []) + ) shared_lib = create_shared_lib( ctx, @@ -1224,9 +1702,9 @@ def relink_libraries(ctx: AnalysisContext, libraries_by_platform: dict[str, dict return relinked_libraries_by_platform def extract_provided_symbols(ctx: AnalysisContext, toolchain: CxxToolchainInfo, lib: Artifact) -> Artifact: - return extract_global_syms(ctx, toolchain, lib, "relinker_extract_provided_symbols") + return extract_defined_syms(ctx, toolchain, lib, "relinker_extract_provided_symbols") -def create_relinker_version_script(actions: AnalysisActions, relinker_blocklist: list[regex], output: Artifact, provided_symbols: Artifact, needed_symbols: list[Artifact]): +def create_relinker_version_script(actions: AnalysisActions, relinker_allowlist: list[regex], output: Artifact, provided_symbols: Artifact, needed_symbols: list[Artifact]): def create_version_script(ctx, artifacts, outputs): all_needed_symbols = {} for symbols_file in needed_symbols: @@ -1243,7 +1721,7 @@ def create_relinker_version_script(actions: AnalysisActions, relinker_blocklist: elif "Java_" in symbol: keep_symbol = True else: - for pattern in relinker_blocklist: + for pattern in relinker_allowlist: if pattern.match(symbol): keep_symbol = True break @@ -1260,7 +1738,7 @@ def create_relinker_version_script(actions: AnalysisActions, relinker_blocklist: version_script += "};\n" ctx.actions.write(outputs[output], version_script) - actions.dynamic_output(dynamic = needed_symbols + [provided_symbols], inputs = [], outputs = [output], f = create_version_script) + actions.dynamic_output(dynamic = needed_symbols + [provided_symbols], inputs = [], outputs = [output.as_output()], f = create_version_script) def extract_undefined_symbols(ctx: AnalysisContext, toolchain: CxxToolchainInfo, lib: Artifact) -> Artifact: return extract_undefined_syms(ctx, toolchain, lib, "relinker_extract_undefined_symbols") @@ -1274,15 +1752,16 @@ def union_needed_symbols(actions: AnalysisActions, output: Artifact, needed_symb symbols = sorted(unioned_symbols.keys()) ctx.actions.write(outputs[output], symbols) - actions.dynamic_output(dynamic = needed_symbols, inputs = [], outputs = [output], f = compute_union) + actions.dynamic_output(dynamic = needed_symbols, inputs = [], outputs = [output.as_output()], f = compute_union) -def strip_lib(ctx: AnalysisContext, cxx_toolchain: CxxToolchainInfo, shlib: Artifact): +def strip_lib(ctx: AnalysisContext, cxx_toolchain: CxxToolchainInfo, shlib: Artifact, output_path: [str, None] = None): strip_flags = cmd_args(get_strip_non_global_flags(cxx_toolchain)) return strip_object( ctx, cxx_toolchain, shlib, strip_flags, + output_path = output_path, ) def create_shared_lib( @@ -1295,6 +1774,15 @@ def create_shared_lib( shared_lib_deps: list[str], label: Label, can_be_asset: bool) -> SharedLibrary: + for link_arg in link_args: + flags = link_arg.flags or [] + for info in link_arg.infos or []: + flags += info.pre_flags or [] + flags += info.post_flags or [] + for flag in flags: + flag = str(flag) + if flag.endswith("--exclude-libs,ALL") or flag.endswith("--exclude-libs=ALL"): + fail("The behavior of --exclude-libs,ALL is not predictable when building Android binaries and may cause runtime crashes, remove it from {} (or its merged constituents)".format(label)) link_result = cxx_link_shared_library( ctx = ctx, output = output_path, @@ -1309,7 +1797,7 @@ def create_shared_lib( ) shlib = link_result.linked_object - return SharedLibrary( + return create_shlib( lib = shlib, stripped_lib = strip_lib(ctx, cxx_toolchain, shlib.output), shlib_deps = shared_lib_deps, diff --git a/prelude/android/android_binary_resources_rules.bzl b/prelude/android/android_binary_resources_rules.bzl index 7766a4a450a72..b12240075c546 100644 --- a/prelude/android/android_binary_resources_rules.bzl +++ b/prelude/android/android_binary_resources_rules.bzl @@ -17,10 +17,19 @@ load( "@prelude//java:java_providers.bzl", "JavaPackagingDep", # @unused Used as type ) +load("@prelude//utils:expect.bzl", "expect") load("@prelude//utils:set.bzl", "set_type") # @unused Used as a type -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:utils.bzl", "flatten") load("@prelude//decls/android_rules.bzl", "RType") +_FilteredResourcesOutput = record( + resource_infos = list[AndroidResourceInfo], + voltron_res = list[Artifact], + override_symbols = Artifact | None, + string_files_list = Artifact | None, + string_files_res_dirs = list[Artifact], +) + def get_android_binary_resources_info( ctx: AnalysisContext, deps: list[Dependency], @@ -28,32 +37,35 @@ def get_android_binary_resources_info( java_packaging_deps: list[JavaPackagingDep], use_proto_format: bool, referenced_resources_lists: list[Artifact], - apk_module_graph_file: [Artifact, None] = None, + apk_module_graph_file: Artifact | None = None, manifest_entries: dict = {}, resource_infos_to_exclude: [set_type, None] = None, r_dot_java_packages_to_exclude: [list[str], None] = [], generate_strings_and_ids_separately: [bool, None] = True, - aapt2_min_sdk: [str, None] = None, aapt2_preferred_density: [str, None] = None) -> AndroidBinaryResourcesInfo: android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] - unfiltered_resource_infos = [ + + # Use reverse topological sort in resource merging to make sure a resource target will overwrite its dependencies. + unfiltered_resource_infos = reversed([ resource_info - for resource_info in list(android_packageable_info.resource_infos.traverse() if android_packageable_info.resource_infos else []) + for resource_info in list(android_packageable_info.resource_infos.traverse(ordering = "topological") if android_packageable_info.resource_infos else []) if not (resource_infos_to_exclude and resource_infos_to_exclude.contains(resource_info.raw_target)) - ] - resource_infos, override_symbols, string_files_list, string_files_res_dirs = _maybe_filter_resources( + ]) + filtered_resources_output = _maybe_filter_resources( ctx, unfiltered_resource_infos, android_toolchain, ) + resource_infos = filtered_resources_output.resource_infos - android_manifest = get_manifest(ctx, android_packageable_info, manifest_entries) + android_manifest = get_manifest(ctx, android_packageable_info, manifest_entries, should_replace_application_id_placeholders = True) non_proto_format_aapt2_link_info, proto_format_aapt2_link_info = get_aapt2_link( ctx, ctx.attrs._android_toolchain[AndroidToolchainInfo], resource_infos, android_manifest, + manifest_entries = getattr(ctx.attrs, "manifest_entries", {}), includes_vector_drawables = getattr(ctx.attrs, "includes_vector_drawables", False), no_auto_version = getattr(ctx.attrs, "no_auto_version_resources", False), no_version_transitions = getattr(ctx.attrs, "no_version_transitions_resources", False), @@ -67,13 +79,11 @@ def get_android_binary_resources_info( extra_filtered_resources = getattr(ctx.attrs, "extra_filtered_resources", []), locales = getattr(ctx.attrs, "locales", []) or getattr(ctx.attrs, "locales_for_binary_resources", []), filter_locales = getattr(ctx.attrs, "aapt2_locale_filtering", False) or bool(getattr(ctx.attrs, "locales_for_binary_resources", [])), - min_sdk = aapt2_min_sdk, preferred_density = aapt2_preferred_density, ) module_manifests = _get_module_manifests( ctx, - android_packageable_info, manifest_entries, apk_module_graph_file, use_proto_format, @@ -86,12 +96,14 @@ def get_android_binary_resources_info( cxx_resources = get_cxx_resources(ctx, deps) is_exopackaged_enabled_for_resources = "resources" in getattr(ctx.attrs, "exopackage_modes", []) - primary_resources_apk, exopackaged_assets, exopackaged_assets_hash = _merge_assets( + primary_resources_apk, exopackaged_assets, exopackaged_assets_hash, module_assets_apks_dir = _merge_assets( ctx, is_exopackaged_enabled_for_resources, aapt2_link_info.primary_resources_apk, resource_infos, cxx_resources, + use_proto_format, # indicates that this is a .aab build + apk_module_graph_file, ) if is_exopackaged_enabled_for_resources: @@ -127,9 +139,9 @@ def get_android_binary_resources_info( exopackage_info = None r_dot_txt = aapt2_link_info.r_dot_txt - override_symbols_paths = [override_symbols] if override_symbols else [] + override_symbols_paths = [filtered_resources_output.override_symbols] if filtered_resources_output.override_symbols else [] resources = [resource for resource in resource_infos if resource.res != None] - r_dot_javas = [] if len(resources) == 0 else generate_r_dot_javas( + r_dot_java_infos = generate_r_dot_javas( ctx, ctx.attrs._android_toolchain[AndroidToolchainInfo].merge_android_resources[RunInfo], resources, @@ -149,21 +161,21 @@ def get_android_binary_resources_info( string_source_map = _maybe_generate_string_source_map( ctx.actions, getattr(ctx.attrs, "build_string_source_map", False), - resources, + [resource.res for resource in resources if resource.res != None], android_toolchain, ) voltron_string_source_map = _maybe_generate_string_source_map( ctx.actions, getattr(ctx.attrs, "is_voltron_language_pack_enabled", False), - resources, + filtered_resources_output.voltron_res, android_toolchain, is_voltron_string_source_map = True, ) packaged_string_assets = _maybe_package_strings_as_assets( ctx, - string_files_list, - string_files_res_dirs, + filtered_resources_output.string_files_list, + filtered_resources_output.string_files_res_dirs, r_dot_txt, android_toolchain, ) @@ -172,10 +184,11 @@ def get_android_binary_resources_info( exopackage_info = exopackage_info, manifest = android_manifest, module_manifests = module_manifests, + module_assets = module_assets_apks_dir, packaged_string_assets = packaged_string_assets, primary_resources_apk = primary_resources_apk, proguard_config_file = aapt2_link_info.proguard_config_file, - r_dot_javas = r_dot_javas, + r_dot_java_infos = r_dot_java_infos, string_source_map = string_source_map, voltron_string_source_map = voltron_string_source_map, jar_files_that_may_contain_resources = prebuilt_jars, @@ -185,7 +198,7 @@ def get_android_binary_resources_info( def _maybe_filter_resources( ctx: AnalysisContext, resources: list[AndroidResourceInfo], - android_toolchain: AndroidToolchainInfo) -> (list[AndroidResourceInfo], [Artifact, None], [Artifact, None], list[Artifact]): + android_toolchain: AndroidToolchainInfo) -> _FilteredResourcesOutput: resources_filter_strings = getattr(ctx.attrs, "resource_filter", []) resources_filter = _get_resources_filter(resources_filter_strings) resource_compression_mode = getattr(ctx.attrs, "resource_compression", "disabled") @@ -203,10 +216,16 @@ def _maybe_filter_resources( ) if not needs_resource_filtering: - return resources, None, None, [] + return _FilteredResourcesOutput( + resource_infos = resources, + voltron_res = [resource.res for resource in resources if resource.res != None], + override_symbols = None, + string_files_list = None, + string_files_res_dirs = [], + ) - res_info_to_out_res_dir = {} - voltron_res_info_to_out_res_dir = {} + res_to_out_res_dir = {} + voltron_res_to_out_res_dir = {} res_infos_with_no_res = [] skip_crunch_pngs = getattr(ctx.attrs, "skip_crunch_pngs", None) or False is_voltron_language_pack_enabled = getattr(ctx.attrs, "is_voltron_language_pack_enabled", False) @@ -215,36 +234,28 @@ def _maybe_filter_resources( res_infos_with_no_res.append(resource) else: filtered_res = ctx.actions.declare_output("filtered_res_{}".format(i), dir = True) - res_info_to_out_res_dir[resource] = filtered_res + res_to_out_res_dir[resource.res] = filtered_res if is_voltron_language_pack_enabled: filtered_res_for_voltron = ctx.actions.declare_output("filtered_res_for_voltron_{}".format(i), dir = True) - voltron_res_info_to_out_res_dir[resource] = filtered_res_for_voltron + voltron_res_to_out_res_dir[resource.res] = filtered_res_for_voltron filter_resources_cmd = cmd_args(android_toolchain.filter_resources[RunInfo]) - in_res_dir_to_out_res_dir_dict = { - in_res.res: out_res - for in_res, out_res in res_info_to_out_res_dir.items() - } - in_res_dir_to_out_res_dir_map = ctx.actions.write_json("in_res_dir_to_out_res_dir_map", {"res_dir_map": in_res_dir_to_out_res_dir_dict}) - in_res_dirs = [in_res.res for in_res in res_info_to_out_res_dir.keys()] - filter_resources_cmd.hidden(in_res_dirs) - filter_resources_cmd.hidden([out_res.as_output() for out_res in res_info_to_out_res_dir.values()]) + in_res_dirs = res_to_out_res_dir.keys() + filter_resources_cmd.add(cmd_args( + hidden = + in_res_dirs + [out_res.as_output() for out_res in res_to_out_res_dir.values()], + )) filter_resources_cmd.add([ "--in-res-dir-to-out-res-dir-map", - in_res_dir_to_out_res_dir_map, + ctx.actions.write_json("in_res_dir_to_out_res_dir_map", {"res_dir_map": res_to_out_res_dir}), ]) if is_voltron_language_pack_enabled: - voltron_in_res_dir_to_out_res_dir_dict = { - in_res.res: out_res - for in_res, out_res in voltron_res_info_to_out_res_dir.items() - } - voltron_in_res_dir_to_out_res_dir_map = ctx.actions.write_json("voltron_in_res_dir_to_out_res_dir_map", {"res_dir_map": voltron_in_res_dir_to_out_res_dir_dict}) - filter_resources_cmd.hidden([out_res.as_output() for out_res in voltron_res_info_to_out_res_dir.values()]) + filter_resources_cmd.add(cmd_args(hidden = [out_res.as_output() for out_res in voltron_res_to_out_res_dir.values()])) filter_resources_cmd.add([ "--voltron-in-res-dir-to-out-res-dir-map", - voltron_in_res_dir_to_out_res_dir_map, + ctx.actions.write_json("voltron_in_res_dir_to_out_res_dir_map", {"res_dir_map": voltron_res_to_out_res_dir}), ]) if resources_filter: @@ -278,6 +289,12 @@ def _maybe_filter_resources( ctx.actions.write("not_filtered_string_dirs", not_filtered_string_dirs), ]) + allowlisted_locales = {resource.res: resource.allowlisted_locales for resource in resources if resource.allowlisted_locales} + filter_resources_cmd.add([ + "--allowlisted-locales", + ctx.actions.write_json("allowlisted_locales", allowlisted_locales), + ]) + if needs_resource_filtering_for_locales: filter_resources_cmd.add([ "--locales", @@ -305,7 +322,7 @@ def _maybe_filter_resources( if resource.res == None: continue - filtered_res = res_info_to_out_res_dir[resource] + filtered_res = res_to_out_res_dir[resource.res] filtered_aapt2_compile_output = aapt2_compile( ctx, filtered_res, @@ -324,11 +341,12 @@ def _maybe_filter_resources( ) filtered_resource_infos.append(filtered_resource) - return ( - res_infos_with_no_res + filtered_resource_infos, - override_symbols_artifact, - all_strings_files_list, - all_strings_files_res_dirs, + return _FilteredResourcesOutput( + resource_infos = res_infos_with_no_res + filtered_resource_infos, + voltron_res = voltron_res_to_out_res_dir.values(), + override_symbols = override_symbols_artifact, + string_files_list = all_strings_files_list, + string_files_res_dirs = all_strings_files_res_dirs, ) ResourcesFilter = record( @@ -350,14 +368,13 @@ def _get_resources_filter(resources_filter_strings: list[str]) -> [ResourcesFilt def _maybe_generate_string_source_map( actions: AnalysisActions, should_build_source_string_map: bool, - resource_infos: list[AndroidResourceInfo], + res_dirs: list[Artifact], android_toolchain: AndroidToolchainInfo, - is_voltron_string_source_map: bool = False) -> [Artifact, None]: - if not should_build_source_string_map or len(resource_infos) == 0: + is_voltron_string_source_map: bool = False) -> Artifact | None: + if not should_build_source_string_map or len(res_dirs) == 0: return None prefix = "voltron_" if is_voltron_string_source_map else "" - res_dirs = [resource_info.res for resource_info in resource_infos] output = actions.declare_output("{}string_source_map".format(prefix), dir = True) res_dirs_file = actions.write("resource_dirs_for_{}string_source_map".format(prefix), res_dirs) generate_string_source_map_cmd = cmd_args([ @@ -366,7 +383,7 @@ def _maybe_generate_string_source_map( res_dirs_file, "--output", output.as_output(), - ]).hidden(res_dirs) + ], hidden = res_dirs) if is_voltron_string_source_map: generate_string_source_map_cmd.add("--is-voltron") @@ -377,10 +394,10 @@ def _maybe_generate_string_source_map( def _maybe_package_strings_as_assets( ctx: AnalysisContext, - string_files_list: [Artifact, None], + string_files_list: Artifact | None, string_files_res_dirs: list[Artifact], r_dot_txt: Artifact, - android_toolchain: AndroidToolchainInfo) -> [Artifact, None]: + android_toolchain: AndroidToolchainInfo) -> Artifact | None: resource_compression_mode = getattr(ctx.attrs, "resource_compression", "disabled") is_store_strings_as_assets = _is_store_strings_as_assets(resource_compression_mode) expect(is_store_strings_as_assets == (string_files_list != None)) @@ -406,7 +423,7 @@ def _maybe_package_strings_as_assets( string_assets_zip.as_output(), "--all-locales-string-assets-zip", all_locales_string_assets_zip.as_output(), - ]).hidden(string_files_res_dirs) + ], hidden = string_files_res_dirs) if locales: package_strings_as_assets_cmd.add("--locales", ",".join(locales)) @@ -418,7 +435,8 @@ def _maybe_package_strings_as_assets( def get_manifest( ctx: AnalysisContext, android_packageable_info: AndroidPackageableInfo, - manifest_entries: dict) -> Artifact: + manifest_entries: dict, + should_replace_application_id_placeholders: bool) -> Artifact: robolectric_manifest = getattr(ctx.attrs, "robolectric_manifest", None) if robolectric_manifest: return robolectric_manifest @@ -446,7 +464,7 @@ def get_manifest( manifest_entries.get("placeholders", {}), ) - if android_toolchain.set_application_id_to_specified_package: + if android_toolchain.set_application_id_to_specified_package and should_replace_application_id_placeholders: android_manifest_with_replaced_application_id = ctx.actions.declare_output("android_manifest_with_replaced_application_id/AndroidManifest.xml") replace_application_id_placeholders_cmd = cmd_args([ ctx.attrs._android_toolchain[AndroidToolchainInfo].replace_application_id_placeholders[RunInfo], @@ -465,9 +483,8 @@ def get_manifest( def _get_module_manifests( ctx: AnalysisContext, - android_packageable_info: AndroidPackageableInfo, manifest_entries: dict, - apk_module_graph_file: [Artifact, None], + apk_module_graph_file: Artifact | None, use_proto_format: bool, primary_resources_apk: Artifact) -> list[Artifact]: if not apk_module_graph_file: @@ -484,16 +501,9 @@ def _get_module_manifests( android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] module_manifests_dir = ctx.actions.declare_output("module_manifests_dir", dir = True) - android_manifests = list(android_packageable_info.manifests.traverse()) if android_packageable_info.manifests else [] def get_manifests_modular(ctx: AnalysisContext, artifacts, outputs): apk_module_graph_info = get_apk_module_graph_info(ctx, apk_module_graph_file, artifacts) - get_module_from_target = apk_module_graph_info.target_to_module_mapping_function - module_to_manifests = {} - for android_manifest in android_manifests: - module_name = get_module_from_target(str(android_manifest.target_label)) - if not is_root_module(module_name): - module_to_manifests.setdefault(module_name, []).append(android_manifest.manifest) merged_module_manifests = {} for module_name in apk_module_graph_info.module_list: @@ -505,7 +515,8 @@ def _get_module_manifests( android_toolchain.generate_manifest[RunInfo], module_manifest_skeleton, module_name, - module_to_manifests.get(module_name, []), + # Note - the expectation of voltron modules is that the AndroidManifest entries are merged into the base APK's manifest. + None, manifest_entries.get("placeholders", {}), ) @@ -525,7 +536,7 @@ def _get_module_manifests( ctx.actions.dynamic_output( dynamic = [apk_module_graph_file], inputs = [], - outputs = [module_manifests_dir], + outputs = [module_manifests_dir.as_output()], f = get_manifests_modular, ) @@ -538,35 +549,99 @@ def _merge_assets( is_exopackaged_enabled_for_resources: bool, base_apk: Artifact, resource_infos: list[AndroidResourceInfo], - cxx_resources: [Artifact, None]) -> (Artifact, [Artifact, None], [Artifact, None]): - assets_dirs = [resource_info.assets for resource_info in resource_infos if resource_info.assets] - if cxx_resources != None: - assets_dirs.extend([cxx_resources]) - if len(assets_dirs) == 0: - return base_apk, None, None - - merge_assets_cmd = cmd_args(ctx.attrs._android_toolchain[AndroidToolchainInfo].merge_assets[RunInfo]) + cxx_resources: Artifact | None, + is_bundle_build: bool, + apk_module_graph_file: Artifact | None) -> (Artifact, Artifact | None, Artifact | None, Artifact | None): + expect( + not (is_exopackaged_enabled_for_resources and is_bundle_build), + "Cannot use exopackage-for-resources with AAB builds.", + ) + expect( + not (is_exopackaged_enabled_for_resources and apk_module_graph_file), + "Cannot use exopackage-for-resources with Voltron builds.", + ) + asset_resource_infos = [resource_info for resource_info in resource_infos if resource_info.assets] + if not asset_resource_infos and not cxx_resources: + return base_apk, None, None, None merged_assets_output = ctx.actions.declare_output("merged_assets.ap_") - merge_assets_cmd.add(["--output-apk", merged_assets_output.as_output()]) - if is_exopackaged_enabled_for_resources: - merged_assets_output_hash = ctx.actions.declare_output("merged_assets.ap_.hash") - merge_assets_cmd.add(["--output-apk-hash", merged_assets_output_hash.as_output()]) - else: - merge_assets_cmd.add(["--base-apk", base_apk]) - merged_assets_output_hash = None + def get_common_merge_assets_cmd( + ctx: AnalysisContext, + output_apk: Artifact) -> (cmd_args, Artifact | None): + merge_assets_cmd = cmd_args(ctx.attrs._android_toolchain[AndroidToolchainInfo].merge_assets[RunInfo]) + merge_assets_cmd.add(["--output-apk", output_apk.as_output()]) - assets_dirs_file = ctx.actions.write("assets_dirs", assets_dirs) - merge_assets_cmd.add(["--assets-dirs", assets_dirs_file]) - merge_assets_cmd.hidden(assets_dirs) + if getattr(ctx.attrs, "extra_no_compress_asset_extensions", None): + merge_assets_cmd.add("--extra-no-compress-asset-extensions") + merge_assets_cmd.add(ctx.attrs.extra_no_compress_asset_extensions) - ctx.actions.run(merge_assets_cmd, category = "merge_assets") + if is_exopackaged_enabled_for_resources: + merged_assets_output_hash = ctx.actions.declare_output("merged_assets.ap_.hash") + merge_assets_cmd.add(["--output-apk-hash", merged_assets_output_hash.as_output()]) + else: + merge_assets_cmd.add(["--base-apk", base_apk]) + merged_assets_output_hash = None + + merge_assets_cmd.add("--binary-type", "aab" if is_bundle_build else "apk") + + return merge_assets_cmd, merged_assets_output_hash + + if apk_module_graph_file: + declared_outputs = [merged_assets_output] + if is_bundle_build: + # For Voltron AAB builds, we need to put assets into a separate "APK" for each module. + module_assets_apks_dir = ctx.actions.declare_output("module_assets_apks") + declared_outputs.append(module_assets_apks_dir) + else: + module_assets_apks_dir = None + + def merge_assets_modular(ctx: AnalysisContext, artifacts, outputs): + apk_module_graph_info = get_apk_module_graph_info(ctx, apk_module_graph_file, artifacts) + + module_to_assets_dirs = {} + if cxx_resources != None: + module_to_assets_dirs.setdefault(ROOT_MODULE, []).extend([cxx_resources]) + for asset_resource_info in asset_resource_infos: + module_name = apk_module_graph_info.target_to_module_mapping_function(str(asset_resource_info.raw_target)) + module_to_assets_dirs.setdefault(module_name, []).append(asset_resource_info.assets) + + merge_assets_cmd, _ = get_common_merge_assets_cmd(ctx, outputs[merged_assets_output]) + + if is_bundle_build: + merge_assets_cmd.add(["--module-assets-apks-dir", outputs[module_assets_apks_dir].as_output()]) + + assets_dirs_file = ctx.actions.write_json("assets_dirs.json", module_to_assets_dirs) + merge_assets_cmd.add(["--assets-dirs", assets_dirs_file]) + merge_assets_cmd.add(cmd_args(hidden = flatten(module_to_assets_dirs.values()))) + + ctx.actions.run(merge_assets_cmd, category = "merge_assets") + + ctx.actions.dynamic_output( + dynamic = [apk_module_graph_file], + inputs = [], + outputs = [o.as_output() for o in declared_outputs], + f = merge_assets_modular, + ) + + return merged_assets_output, None, None, module_assets_apks_dir - if is_exopackaged_enabled_for_resources: - return base_apk, merged_assets_output, merged_assets_output_hash else: - return merged_assets_output, None, None + merge_assets_cmd, merged_assets_output_hash = get_common_merge_assets_cmd(ctx, merged_assets_output) + + assets_dirs = [resource_info.assets for resource_info in asset_resource_infos] + if cxx_resources: + assets_dirs.extend([cxx_resources]) + assets_dirs_file = ctx.actions.write_json("assets_dirs.json", {ROOT_MODULE: assets_dirs}) + merge_assets_cmd.add(["--assets-dirs", assets_dirs_file]) + merge_assets_cmd.add(cmd_args(hidden = assets_dirs)) + + ctx.actions.run(merge_assets_cmd, category = "merge_assets") + + if is_exopackaged_enabled_for_resources: + return base_apk, merged_assets_output, merged_assets_output_hash, None + else: + return merged_assets_output, None, None, None def get_effective_banned_duplicate_resource_types( duplicate_resource_behavior: str, @@ -587,7 +662,7 @@ def get_effective_banned_duplicate_resource_types( else: fail("Unrecognized duplicate_resource_behavior: {}".format(duplicate_resource_behavior)) -def get_cxx_resources(ctx: AnalysisContext, deps: list[Dependency], dir_name: str = "cxx_resources_dir") -> [Artifact, None]: +def get_cxx_resources(ctx: AnalysisContext, deps: list[Dependency], dir_name: str = "cxx_resources_dir") -> Artifact | None: cxx_resources = gather_resources( label = ctx.label, resources = {}, @@ -597,8 +672,8 @@ def get_cxx_resources(ctx: AnalysisContext, deps: list[Dependency], dir_name: st symlink_tree_dict = {} resource_maps = cxx_resources.values() for resource_map in resource_maps: - for name, (resource, _other) in resource_map.items(): - symlink_tree_dict["cxx-resources/{}".format(name)] = resource + for name, resource in resource_map.items(): + symlink_tree_dict["cxx-resources/{}".format(name)] = resource.default_output return ctx.actions.symlinked_dir(dir_name, symlink_tree_dict) if symlink_tree_dict else None diff --git a/prelude/android/android_build_config.bzl b/prelude/android/android_build_config.bzl index b1a32b8a3fa54..95ab62c55c748 100644 --- a/prelude/android/android_build_config.bzl +++ b/prelude/android/android_build_config.bzl @@ -22,11 +22,11 @@ def android_build_config_impl(ctx: AnalysisContext) -> list[Provider]: providers = [] default_build_config_fields = get_build_config_fields(ctx.attrs.values) - android_build_config_info = AndroidBuildConfigInfo(package = ctx.attrs.package, build_config_fields = default_build_config_fields) + android_build_config_info = AndroidBuildConfigInfo(package = ctx.attrs.package, build_config_fields = default_build_config_fields, values_file = ctx.attrs.values_file) providers.append(android_build_config_info) providers.append(merge_android_packageable_info(ctx.label, ctx.actions, deps = [], build_config_info = android_build_config_info)) - build_config_dot_java_library, java_packaging_info = generate_android_build_config( + build_config_dot_java_library, java_packaging_info, build_config_dot_java = generate_android_build_config( ctx, ctx.attrs.name, ctx.attrs.package, @@ -38,7 +38,14 @@ def android_build_config_impl(ctx: AnalysisContext) -> list[Provider]: providers.append(java_packaging_info) providers.append(build_config_dot_java_library) - providers.append(DefaultInfo(default_output = build_config_dot_java_library.library_output.full_library)) + providers.append( + DefaultInfo( + default_output = build_config_dot_java_library.library_output.full_library, + sub_targets = { + "build_config_dot_java": [DefaultInfo(default_output = build_config_dot_java)], + }, + ), + ) return providers def generate_android_build_config( @@ -47,7 +54,7 @@ def generate_android_build_config( java_package: str, use_constant_expressions: bool, default_values: list[BuildConfigField], - values_file: [Artifact, None]) -> (JavaLibraryInfo, JavaPackagingInfo): + values_file: Artifact | None) -> (JavaLibraryInfo, JavaPackagingInfo, Artifact): build_config_dot_java = _generate_build_config_dot_java(ctx, source, java_package, use_constant_expressions, default_values, values_file) compiled_build_config_dot_java = _compile_and_package_build_config_dot_java(ctx, java_package, build_config_dot_java) @@ -61,7 +68,7 @@ def generate_android_build_config( output_for_classpath_macro = library_output.full_library, ), JavaPackagingInfo( packaging_deps = packaging_deps, - )) + ), build_config_dot_java) def _generate_build_config_dot_java( ctx: AnalysisContext, @@ -69,7 +76,7 @@ def _generate_build_config_dot_java( java_package: str, use_constant_expressions: bool, default_values: list[BuildConfigField], - values_file: [Artifact, None]) -> Artifact: + values_file: Artifact | None) -> Artifact: generate_build_config_cmd = cmd_args(ctx.attrs._android_toolchain[AndroidToolchainInfo].generate_build_config[RunInfo]) generate_build_config_cmd.add([ "--source", diff --git a/prelude/android/android_bundle.bzl b/prelude/android/android_bundle.bzl index 1dc9b3ca9a2a8..3168922a1177b 100644 --- a/prelude/android/android_bundle.bzl +++ b/prelude/android/android_bundle.bzl @@ -5,10 +5,14 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load("@prelude//android:android_binary.bzl", "get_binary_info") load("@prelude//android:android_providers.bzl", "AndroidAabInfo", "AndroidBinaryNativeLibsInfo", "AndroidBinaryResourcesInfo", "DexFilesInfo") load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") -load("@prelude//java/utils:java_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//android:bundletool_util.bzl", "derive_universal_apk") +load("@prelude//java:java_providers.bzl", "KeystoreInfo") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//utils:argfile.bzl", "argfile") def android_bundle_impl(ctx: AnalysisContext) -> list[Provider]: android_binary_info = get_binary_info(ctx, use_proto_format = True) @@ -20,12 +24,31 @@ def android_bundle_impl(ctx: AnalysisContext) -> list[Provider]: dex_files_info = android_binary_info.dex_files_info, native_library_info = android_binary_info.native_library_info, resources_info = android_binary_info.resources_info, + bundle_config = ctx.attrs.bundle_config_file, + validation_deps_outputs = get_validation_deps_outputs(ctx), + packaging_options = ctx.attrs.packaging_options, ) + sub_targets = {} + sub_targets.update(android_binary_info.sub_targets) + if ctx.attrs.use_derived_apk: + keystore = ctx.attrs.keystore[KeystoreInfo] + default_output = derive_universal_apk( + ctx, + android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo], + app_bundle = output_bundle, + keystore = keystore, + ) + sub_targets["aab"] = [DefaultInfo( + default_outputs = [output_bundle], + )] + else: + default_output = output_bundle + java_packaging_deps = android_binary_info.java_packaging_deps return [ - DefaultInfo(default_output = output_bundle, sub_targets = android_binary_info.sub_targets), - AndroidAabInfo(aab = output_bundle, manifest = android_binary_info.resources_info.manifest), + DefaultInfo(default_output = default_output, other_outputs = android_binary_info.materialized_artifacts, sub_targets = sub_targets), + AndroidAabInfo(aab = output_bundle, manifest = android_binary_info.resources_info.manifest, materialized_artifacts = android_binary_info.materialized_artifacts), TemplatePlaceholderInfo( keyed_variables = { "classpath": cmd_args([dep.jar for dep in java_packaging_deps if dep.jar], delimiter = get_path_separator_for_exec_os(ctx)), @@ -40,10 +63,13 @@ def build_bundle( android_toolchain: AndroidToolchainInfo, dex_files_info: DexFilesInfo, native_library_info: AndroidBinaryNativeLibsInfo, - resources_info: AndroidBinaryResourcesInfo) -> Artifact: + resources_info: AndroidBinaryResourcesInfo, + bundle_config: Artifact | None, + validation_deps_outputs: [list[Artifact], None] = None, + packaging_options: dict | None = None) -> Artifact: output_bundle = actions.declare_output("{}.aab".format(label.name)) - bundle_builder_args = cmd_args([ + bundle_builder_args = cmd_args( android_toolchain.bundle_builder[RunInfo], "--output-bundle", output_bundle.as_output(), @@ -51,30 +77,39 @@ def build_bundle( resources_info.primary_resources_apk, "--dex-file", dex_files_info.primary_dex, - ]) + # The outputs of validation_deps need to be added as hidden arguments + # to an action for the validation_deps targets to be built and enforced. + hidden = validation_deps_outputs or [], + ) + + if bundle_config: + bundle_builder_args.add(["--path-to-bundle-config-file", bundle_config]) if android_toolchain.package_meta_inf_version_files: bundle_builder_args.add("--package-meta-inf-version-files") root_module_asset_directories = native_library_info.root_module_native_lib_assets + dex_files_info.root_module_secondary_dex_dirs - root_module_asset_directories_file = actions.write("root_module_asset_directories.txt", root_module_asset_directories) - bundle_builder_args.hidden(root_module_asset_directories) - non_root_module_asset_directories = resources_info.module_manifests + native_library_info.non_root_module_native_lib_assets + dex_files_info.non_root_module_secondary_dex_dirs - non_root_module_asset_directories_file = actions.write("non_root_module_asset_directories.txt", non_root_module_asset_directories) - bundle_builder_args.hidden(non_root_module_asset_directories) - native_library_directories = actions.write("native_library_directories", native_library_info.native_libs_for_primary_apk) - bundle_builder_args.hidden(native_library_info.native_libs_for_primary_apk) + root_module_asset_directories_file = argfile(actions = actions, name = "root_module_asset_directories.txt", args = root_module_asset_directories) + + non_root_module_asset_directories = resources_info.module_manifests + dex_files_info.non_root_module_secondary_dex_dirs + non_root_module_asset_directories_file = argfile(actions = actions, name = "non_root_module_asset_directories.txt", args = non_root_module_asset_directories) + non_root_module_asset_native_lib_directories = argfile(actions = actions, name = "non_root_module_asset_native_lib_directories.txt", args = native_library_info.non_root_module_native_lib_assets) + + native_library_directories = argfile(actions = actions, name = "native_library_directories", args = native_library_info.native_libs_for_primary_apk) all_zip_files = [resources_info.packaged_string_assets] if resources_info.packaged_string_assets else [] - zip_files = actions.write("zip_files", all_zip_files) - bundle_builder_args.hidden(all_zip_files) - jar_files_that_may_contain_resources = actions.write("jar_files_that_may_contain_resources", resources_info.jar_files_that_may_contain_resources) - bundle_builder_args.hidden(resources_info.jar_files_that_may_contain_resources) + zip_files = argfile(actions = actions, name = "zip_files", args = all_zip_files) + jar_files_that_may_contain_resources = argfile(actions = actions, name = "jar_files_that_may_contain_resources", args = resources_info.jar_files_that_may_contain_resources) + + if resources_info.module_assets: + bundle_builder_args.add(["--module-assets-dir", resources_info.module_assets]) bundle_builder_args.add([ "--root-module-asset-directories-list", root_module_asset_directories_file, "--non-root-module-asset-directories-list", non_root_module_asset_directories_file, + "--non-root-module-asset-native-lib-directories-list", + non_root_module_asset_native_lib_directories, "--native-libraries-directories-list", native_library_directories, "--zip-files-list", @@ -85,6 +120,13 @@ def build_bundle( android_toolchain.zipalign[RunInfo], ]) + if packaging_options: + for key, value in packaging_options.items(): + if key != "excluded_resources": + fail("Only 'excluded_resources' is supported in packaging_options right now!") + else: + bundle_builder_args.add("--excluded-resources", actions.write("excluded_resources.txt", value)) + actions.run(bundle_builder_args, category = "bundle_build") return output_bundle diff --git a/prelude/android/android_instrumentation_apk.bzl b/prelude/android/android_instrumentation_apk.bzl index d330c06c8563d..f7eb09a8d3140 100644 --- a/prelude/android/android_instrumentation_apk.bzl +++ b/prelude/android/android_instrumentation_apk.bzl @@ -12,14 +12,20 @@ load("@prelude//android:android_providers.bzl", "AndroidApkInfo", "AndroidApkUnd load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") load("@prelude//android:configuration.bzl", "get_deps_by_platform") load("@prelude//android:dex_rules.bzl", "get_multi_dex", "get_single_primary_dex", "get_split_dex_merge_config", "merge_to_single_dex", "merge_to_split_dex") +load("@prelude//android:preprocess_java_classes.bzl", "get_preprocessed_java_classes") load("@prelude//android:util.bzl", "create_enhancement_context") +load("@prelude//java:class_to_srcs.bzl", "merge_class_to_source_map_from_jar") load("@prelude//java:java_providers.bzl", "create_java_packaging_dep", "get_all_java_packaging_deps") +load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") load("@prelude//java/utils:java_utils.bzl", "get_class_to_source_map_info") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") def android_instrumentation_apk_impl(ctx: AnalysisContext): _verify_params(ctx) + # jar preprocessing cannot be used when the jars were dexed already, so we have to disable predex when we want to preprocess the jars. + disable_pre_dex = ctx.attrs.disable_pre_dex or ctx.attrs.preprocess_java_classes_bash + apk_under_test_info = ctx.attrs.apk[AndroidApkUnderTestInfo] # android_instrumentation_apk uses the same platforms as the APK-under-test @@ -34,10 +40,12 @@ def android_instrumentation_apk_impl(ctx: AnalysisContext): # We use the deps that don't have _build_only_native_code = True deps = unfiltered_deps_by_platform.values()[0] + is_self_instrumenting = ctx.attrs.is_self_instrumenting + java_packaging_deps = [ packaging_dep for packaging_dep in get_all_java_packaging_deps(ctx, deps) - if packaging_dep.dex and not apk_under_test_info.java_packaging_deps.contains(packaging_dep.label.raw_target()) + if packaging_dep.dex and (is_self_instrumenting or not apk_under_test_info.java_packaging_deps.contains(packaging_dep.label.raw_target())) ] android_packageable_info = merge_android_packageable_info(ctx.label, ctx.actions, deps) @@ -50,20 +58,23 @@ def android_instrumentation_apk_impl(ctx: AnalysisContext): use_proto_format = False, referenced_resources_lists = [], manifest_entries = apk_under_test_info.manifest_entries, - resource_infos_to_exclude = apk_under_test_info.resource_infos, - r_dot_java_packages_to_exclude = apk_under_test_info.r_dot_java_packages.list(), + resource_infos_to_exclude = apk_under_test_info.resource_infos if not is_self_instrumenting else None, + r_dot_java_packages_to_exclude = apk_under_test_info.r_dot_java_packages.list() if not is_self_instrumenting else [], ) android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] java_packaging_deps += [ create_java_packaging_dep( ctx, - r_dot_java.library_output.full_library, + r_dot_java.library_info.library_output.full_library, dex_weight_factor = android_toolchain.r_dot_java_weight_factor, ) - for r_dot_java in resources_info.r_dot_javas + for r_dot_java in resources_info.r_dot_java_infos ] - if not ctx.attrs.disable_pre_dex: + enhance_ctx = create_enhancement_context(ctx) + sub_targets = enhance_ctx.get_sub_targets() + materialized_artifacts = [] + if not disable_pre_dex: pre_dexed_libs = [java_packaging_dep.dex for java_packaging_dep in java_packaging_deps] if ctx.attrs.use_split_dex: dex_merge_config = get_split_dex_merge_config(ctx, android_toolchain) @@ -77,6 +88,10 @@ def android_instrumentation_apk_impl(ctx: AnalysisContext): dex_files_info = merge_to_single_dex(ctx, android_toolchain, pre_dexed_libs) else: jars_to_owners = {packaging_dep.jar: packaging_dep.jar.owner.raw_target() for packaging_dep in java_packaging_deps} + if ctx.attrs.preprocess_java_classes_bash: + jars_to_owners, materialized_artifacts_dir = get_preprocessed_java_classes(enhance_ctx, jars_to_owners) + if materialized_artifacts_dir: + materialized_artifacts.append(materialized_artifacts_dir) if ctx.attrs.use_split_dex: dex_files_info = get_multi_dex( ctx, @@ -91,13 +106,12 @@ def android_instrumentation_apk_impl(ctx: AnalysisContext): jars_to_owners.keys(), ) - enhance_ctx = create_enhancement_context(ctx) native_library_info = get_android_binary_native_library_info( enhance_ctx, android_packageable_info, filtered_deps_by_platform, - prebuilt_native_library_dirs_to_exclude = apk_under_test_info.prebuilt_native_library_dirs, - shared_libraries_to_exclude = apk_under_test_info.shared_libraries, + prebuilt_native_library_dirs_to_exclude = apk_under_test_info.prebuilt_native_library_dirs if not is_self_instrumenting else None, + shared_libraries_to_exclude = apk_under_test_info.shared_libraries if not is_self_instrumenting else None, ) output_apk = build_apk( @@ -110,16 +124,24 @@ def android_instrumentation_apk_impl(ctx: AnalysisContext): resources_info = resources_info, ) - class_to_srcs, _ = get_class_to_source_map_info( + class_to_srcs, _, class_to_srcs_subtargets = get_class_to_source_map_info( ctx, outputs = None, deps = deps, ) + transitive_class_to_src_map = merge_class_to_source_map_from_jar( + actions = ctx.actions, + name = ctx.label.name + ".transitive_class_to_src.json", + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo], + relative_to = None, + deps = [class_to_srcs], + ) + sub_targets["transitive_class_to_src_map"] = [DefaultInfo(default_output = transitive_class_to_src_map)] return [ - AndroidApkInfo(apk = output_apk, manifest = resources_info.manifest), + AndroidApkInfo(apk = output_apk, materialized_artifacts = materialized_artifacts, manifest = resources_info.manifest), AndroidInstrumentationApkInfo(apk_under_test = ctx.attrs.apk[AndroidApkInfo].apk), - DefaultInfo(default_output = output_apk, sub_targets = enhance_ctx.get_sub_targets()), + DefaultInfo(default_output = output_apk, other_outputs = materialized_artifacts, sub_targets = sub_targets | class_to_srcs_subtargets), class_to_srcs, ] diff --git a/prelude/android/android_instrumentation_test.bzl b/prelude/android/android_instrumentation_test.bzl index efa4ab61dc0db..3c128d5a4b046 100644 --- a/prelude/android/android_instrumentation_test.bzl +++ b/prelude/android/android_instrumentation_test.bzl @@ -8,25 +8,60 @@ load("@prelude//android:android_providers.bzl", "AndroidApkInfo", "AndroidInstrumentationApkInfo") load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") load("@prelude//java:class_to_srcs.bzl", "JavaClassToSourceMapInfo") +load("@prelude//java:java_providers.bzl", "JavaPackagingInfo", "get_all_java_packaging_deps_tset") load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") -load("@prelude//java/utils:java_utils.bzl", "get_path_separator_for_exec_os") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibraryInfo", + "create_shlib_symlink_tree", + "merge_shared_libraries", + "traverse_shared_library_info", +) +load("@prelude//utils:argfile.bzl", "at_argfile") +load("@prelude//utils:expect.bzl", "expect") load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") +ANDROID_EMULATOR_ABI_LABEL_PREFIX = "tpx-re-config::" DEFAULT_ANDROID_SUBPLATFORM = "android-30" +DEFAULT_ANDROID_PLATFORM = "android-emulator" +DEFAULT_ANDROID_INSTRUMENTATION_TESTS_USE_CASE = "instrumentation-tests" +RIOT_USE_CASE = "riot" +SUPPORTED_POOLS = ["EUREKA_POOL", "HOLLYWOOD_POOL", "STAGE_DELPHI_POOL", "PANTHER_POOL", "SEACLIFF_POOL"] +SUPPORTED_PLATFORMS = ["riot", "android-emulator"] +SUPPORTED_USE_CASES = [RIOT_USE_CASE, DEFAULT_ANDROID_INSTRUMENTATION_TESTS_USE_CASE] def android_instrumentation_test_impl(ctx: AnalysisContext): android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] - cmd = [ctx.attrs._java_toolchain[JavaToolchainInfo].java_for_tests] + cmd = [ctx.attrs._java_test_toolchain[JavaToolchainInfo].java_for_tests] classpath = android_toolchain.instrumentation_test_runner_classpath classpath_args = cmd_args() classpath_args.add("-classpath") - classpath_args.add(cmd_args(classpath, delimiter = get_path_separator_for_exec_os(ctx))) - classpath_args_file = ctx.actions.write("classpath_args_file", classpath_args) - cmd.append(cmd_args(classpath_args_file, format = "@{}").hidden(classpath_args)) + env = ctx.attrs.env or {} + extra_classpath = [] + if ctx.attrs.instrumentation_test_listener != None: + extra_classpath.extend([ + get_all_java_packaging_deps_tset(ctx, java_packaging_infos = [ctx.attrs.instrumentation_test_listener[JavaPackagingInfo]]) + .project_as_args("full_jar_args", ordering = "bfs"), + ]) + + shared_library_info = merge_shared_libraries( + ctx.actions, + deps = [ctx.attrs.instrumentation_test_listener[SharedLibraryInfo]], + ) + + cxx_library_symlink_tree = create_shlib_symlink_tree( + actions = ctx.actions, + out = "cxx_library_symlink_tree", + shared_libs = traverse_shared_library_info(shared_library_info), + ) + + env["BUCK_LD_SYMLINK_TREE"] = cxx_library_symlink_tree + classpath_args.add(cmd_args(extra_classpath + classpath, delimiter = get_path_separator_for_exec_os(ctx))) + cmd.append(at_argfile(actions = ctx.actions, name = "classpath_args_file", args = classpath_args)) cmd.append(android_toolchain.instrumentation_test_runner_main_class) @@ -36,6 +71,17 @@ def android_instrumentation_test_impl(ctx: AnalysisContext): instrumentation_apk_info = ctx.attrs.apk.get(AndroidInstrumentationApkInfo) if instrumentation_apk_info != None: cmd.extend(["--apk-under-test-path", instrumentation_apk_info.apk_under_test]) + if ctx.attrs.is_self_instrumenting: + cmd.extend(["--is-self-instrumenting"]) + extra_instrumentation_args = ctx.attrs.extra_instrumentation_args + if extra_instrumentation_args: + for arg_name, arg_value in extra_instrumentation_args.items(): + cmd.extend( + [ + "--extra-instrumentation-argument", + cmd_args([arg_name, arg_value], delimiter = "="), + ], + ) target_package_file = ctx.actions.declare_output("target_package_file") package_file = ctx.actions.declare_output("package_file") @@ -52,7 +98,6 @@ def android_instrumentation_test_impl(ctx: AnalysisContext): test_runner_file.as_output(), ]) ctx.actions.run(manifest_utils_cmd, category = "get_manifest_info") - cmd.extend( [ "--test-package-name", @@ -64,6 +109,28 @@ def android_instrumentation_test_impl(ctx: AnalysisContext): ], ) + if ctx.attrs.instrumentation_test_listener_class != None: + cmd.extend(["--extra-instrumentation-test-listener", ctx.attrs.instrumentation_test_listener_class]) + + if ctx.attrs.clear_package_data: + cmd.append("--clear-package-data") + + if ctx.attrs.disable_animations: + cmd.append("--disable-animations") + + if ctx.attrs.collect_tombstones: + cmd.append("--collect-tombstones") + if ctx.attrs.record_video: + cmd.append("--record-video") + if ctx.attrs.log_extractors: + for arg_name, arg_value in ctx.attrs.log_extractors.items(): + cmd.extend( + [ + "--log-extractor", + cmd_args([arg_name, arg_value], delimiter = "="), + ], + ) + cmd.extend( [ "--adb-executable-path", @@ -76,46 +143,117 @@ def android_instrumentation_test_impl(ctx: AnalysisContext): test_info = ExternalRunnerTestInfo( type = "android_instrumentation", command = cmd, - env = ctx.attrs.env, + env = env, labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, run_from_project_root = True, use_project_relative_paths = True, - executor_overrides = { - "android-emulator": CommandExecutorConfig( - local_enabled = android_toolchain.instrumentation_test_can_run_locally, - remote_enabled = True, - remote_execution_properties = { - "platform": "android-emulator", - "subplatform": _compute_emulator_target(ctx.attrs.labels or []), - }, - remote_execution_use_case = "instrumentation-tests", - ), - "static-listing": CommandExecutorConfig( - local_enabled = True, - remote_enabled = True, - remote_execution_properties = { - "platform": "linux-remote-execution", - }, - remote_execution_use_case = "buck2-default", - ), - }, + executor_overrides = _compute_executor_overrides(ctx, android_toolchain.instrumentation_test_can_run_locally), local_resources = { - "android_emulator": None, + "android_emulator": None if ctx.attrs._android_emulators == None else ctx.attrs._android_emulators.label, }, + required_local_resources = [RequiredTestLocalResource("android_emulator", listing = True, execution = True)], ) classmap_source_info = [ctx.attrs.apk[JavaClassToSourceMapInfo]] if JavaClassToSourceMapInfo in ctx.attrs.apk else [] - return inject_test_run_info(ctx, test_info) + [ + test_info, run_info = inject_test_run_info(ctx, test_info) + + # We append additional args so that "buck2 run" will work with sane defaults + run_info.args.add(cmd_args(["--auto-run-on-connected-device", "--output", ".", "--adb-executable-path", "adb"])) + return [ + test_info, + run_info, DefaultInfo(), ] + classmap_source_info +def _compute_executor_overrides(ctx: AnalysisContext, instrumentation_test_can_run_locally: bool) -> dict[str, CommandExecutorConfig]: + remote_execution_properties = { + "platform": _compute_emulator_platform(ctx.attrs.labels or []), + "subplatform": _compute_emulator_subplatform(ctx.attrs.labels or []), + } + + re_emulator_abi = _compute_emulator_abi(ctx.attrs.labels or []) + if re_emulator_abi != None: + remote_execution_properties["abi"] = re_emulator_abi + + default_executor_override = CommandExecutorConfig( + local_enabled = instrumentation_test_can_run_locally, + remote_enabled = True, + remote_execution_properties = remote_execution_properties, + remote_execution_use_case = _compute_re_use_case(ctx.attrs.labels or []), + ) + dynamic_listing_executor_override = default_executor_override + test_execution_executor_override = default_executor_override + + if ctx.attrs.re_caps and ctx.attrs.re_use_case: + if "dynamic-listing" in ctx.attrs.re_caps and "dynamic-listing" in ctx.attrs.re_use_case: + _validate_executor_override_re_config(ctx.attrs.re_caps["dynamic-listing"], ctx.attrs.re_use_case["dynamic-listing"]) + dynamic_listing_executor_override = CommandExecutorConfig( + local_enabled = instrumentation_test_can_run_locally, + remote_enabled = True, + remote_execution_properties = ctx.attrs.re_caps["dynamic-listing"], + remote_execution_use_case = ctx.attrs.re_use_case["dynamic-listing"], + ) + if "test-execution" in ctx.attrs.re_caps and "test-execution" in ctx.attrs.re_use_case: + _validate_executor_override_re_config(ctx.attrs.re_caps["test-execution"], ctx.attrs.re_use_case["test-execution"]) + test_execution_executor_override = CommandExecutorConfig( + local_enabled = instrumentation_test_can_run_locally, + remote_enabled = True, + remote_execution_properties = ctx.attrs.re_caps["test-execution"], + remote_execution_use_case = ctx.attrs.re_use_case["test-execution"], + ) + + return { + "android-emulator": default_executor_override, + "dynamic-listing": dynamic_listing_executor_override, + "static-listing": CommandExecutorConfig( + local_enabled = True, + remote_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-default", + ), + "test-execution": test_execution_executor_override, + } + +def _compute_emulator_abi(labels: list[str]): + emulator_abi_labels = [label for label in labels if label.startswith(ANDROID_EMULATOR_ABI_LABEL_PREFIX)] + expect(len(emulator_abi_labels) <= 1, "multiple '{}' labels were found:[{}], there must be only one!".format(ANDROID_EMULATOR_ABI_LABEL_PREFIX, ", ".join(emulator_abi_labels))) + if len(emulator_abi_labels) == 0: + return None + else: # len(emulator_abi_labels) == 1: + return emulator_abi_labels[0].replace(ANDROID_EMULATOR_ABI_LABEL_PREFIX, "") + # replicating the logic in https://fburl.com/code/1fqowxu4 to match buck1's behavior -def _compute_emulator_target(labels: list[str]) -> str: - emulator_target_labels = [label for label in labels if label.startswith("re_emulator_")] - expect(len(emulator_target_labels) <= 1, "multiple 're_emulator_' labels were found:[{}], there must be only one!".format(", ".join(emulator_target_labels))) - if len(emulator_target_labels) == 0: +def _compute_emulator_subplatform(labels: list[str]) -> str: + emulator_subplatform_labels = [label for label in labels if label.startswith("re_emulator_")] + expect(len(emulator_subplatform_labels) <= 1, "multiple 're_emulator_' labels were found:[{}], there must be only one!".format(", ".join(emulator_subplatform_labels))) + if len(emulator_subplatform_labels) == 0: return DEFAULT_ANDROID_SUBPLATFORM - else: # len(emulator_target_labels) == 1: - return emulator_target_labels[0].replace("re_emulator_", "") + else: # len(emulator_subplatform_labels) == 1: + return emulator_subplatform_labels[0].replace("re_emulator_", "") + +def _compute_emulator_platform(labels: list[str]) -> str: + emulator_platform_labels = [label for label in labels if label.startswith("re_platform_")] + expect(len(emulator_platform_labels) <= 1, "multiple 're_platform_' labels were found:[{}], there must be only one!".format(", ".join(emulator_platform_labels))) + if len(emulator_platform_labels) == 0: + return DEFAULT_ANDROID_PLATFORM + else: # len(emulator_platform_labels) == 1: + return emulator_platform_labels[0].replace("re_platform_", "") + +def _compute_re_use_case(labels: list[str]) -> str: + re_use_case_labels = [label for label in labels if label.startswith("re_opts_use_case=")] + expect(len(re_use_case_labels) <= 1, "multiple 're_opts_use_case' labels were found:[{}], there must be only one!".format(", ".join(re_use_case_labels))) + if len(re_use_case_labels) == 0: + return DEFAULT_ANDROID_INSTRUMENTATION_TESTS_USE_CASE + else: # len(re_use_case_labels) == 1: + return re_use_case_labels[0].replace("re_opts_use_case=", "") + +def _validate_executor_override_re_config(re_caps: dict[str, str], re_use_case: str): + expect(re_use_case in SUPPORTED_USE_CASES, "Unexpected {} use case found, value is expected to be on of the following: {}", re_use_case, ", ".join(SUPPORTED_USE_CASES)) + if "pool" in re_caps: + expect(re_caps["pool"] in SUPPORTED_POOLS, "Unexpected {} pool found, value is expected to be on of the following: {}", re_caps["pool"], ", ".join(SUPPORTED_POOLS)) + if "platform" in re_caps: + expect(re_caps["platform"] in SUPPORTED_PLATFORMS, "Unexpected {} platform found, value is expected to be on of the following: {}", re_caps["platform"], ", ".join(SUPPORTED_PLATFORMS)) diff --git a/prelude/android/android_library.bzl b/prelude/android/android_library.bzl index cf4ba80334fc8..40f562904a51b 100644 --- a/prelude/android/android_library.bzl +++ b/prelude/android/android_library.bzl @@ -5,6 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load( "@prelude//android:android_providers.bzl", "AndroidLibraryIntellijInfo", @@ -24,7 +25,7 @@ load( load("@prelude//kotlin:kotlin_library.bzl", "build_kotlin_library") def android_library_impl(ctx: AnalysisContext) -> list[Provider]: - packaging_deps = ctx.attrs.deps + (ctx.attrs.deps_query or []) + ctx.attrs.exported_deps + ctx.attrs.runtime_deps + packaging_deps = ctx.attrs.deps + ctx.attrs.exported_deps + ctx.attrs.runtime_deps if ctx.attrs._build_only_native_code: shared_library_info, cxx_resource_info, linkable_graph = create_native_providers(ctx, ctx.label, packaging_deps) return [ @@ -34,9 +35,15 @@ def android_library_impl(ctx: AnalysisContext) -> list[Provider]: linkable_graph, # Add an unused default output in case this target is used as an attr.source() anywhere. DefaultInfo(default_output = ctx.actions.write("{}/unused.jar".format(ctx.label.name), [])), + TemplatePlaceholderInfo(keyed_variables = { + "classpath": "unused_but_needed_for_analysis", + }), ] - java_providers, android_library_intellij_info = build_android_library(ctx) + java_providers, android_library_intellij_info = build_android_library( + ctx = ctx, + validation_deps_outputs = get_validation_deps_outputs(ctx), + ) android_providers = [android_library_intellij_info] if android_library_intellij_info else [] return to_list(java_providers) + [ @@ -49,14 +56,20 @@ def android_library_impl(ctx: AnalysisContext) -> list[Provider]: merge_exported_android_resource_info(ctx.attrs.exported_deps), ] + android_providers +def optional_jars(ctx: AnalysisContext) -> list[Artifact]: + return ctx.attrs.android_optional_jars or [] + def build_android_library( ctx: AnalysisContext, - r_dot_java: [Artifact, None] = None) -> (JavaProviders, [AndroidLibraryIntellijInfo, None]): - bootclasspath_entries = [] + ctx.attrs._android_toolchain[AndroidToolchainInfo].android_bootclasspath - additional_classpath_entries = [] + r_dot_java: Artifact | None = None, + extra_sub_targets = {}, + validation_deps_outputs: [list[Artifact], None] = None, + classpath_entries: [list[Artifact], None] = None) -> (JavaProviders, [AndroidLibraryIntellijInfo, None]): + bootclasspath_entries = [] + ctx.attrs._android_toolchain[AndroidToolchainInfo].android_bootclasspath + optional_jars(ctx) + additional_classpath_entries = list(classpath_entries) if classpath_entries != None else [] dummy_r_dot_java, android_library_intellij_info = _get_dummy_r_dot_java(ctx) - extra_sub_targets = {} + extra_sub_targets = dict(extra_sub_targets) if r_dot_java: additional_classpath_entries.append(r_dot_java) @@ -70,6 +83,7 @@ def build_android_library( additional_classpath_entries = additional_classpath_entries, bootclasspath_entries = bootclasspath_entries, extra_sub_targets = extra_sub_targets, + validation_deps_outputs = validation_deps_outputs, ), android_library_intellij_info else: return build_java_library( @@ -78,25 +92,26 @@ def build_android_library( additional_classpath_entries = additional_classpath_entries, bootclasspath_entries = bootclasspath_entries, extra_sub_targets = extra_sub_targets, + validation_deps_outputs = validation_deps_outputs, ), android_library_intellij_info def _get_dummy_r_dot_java( - ctx: AnalysisContext) -> ([Artifact, None], [AndroidLibraryIntellijInfo, None]): + ctx: AnalysisContext) -> (Artifact | None, [AndroidLibraryIntellijInfo, None]): android_resources = dedupe([resource for resource in filter(None, [ x.get(AndroidResourceInfo) - for x in ctx.attrs.deps + (ctx.attrs.deps_query or []) + ctx.attrs.provided_deps + (getattr(ctx.attrs, "provided_deps_query", []) or []) + for x in ctx.attrs.deps + ctx.attrs.provided_deps + (getattr(ctx.attrs, "provided_deps_query", []) or []) ]) if resource.res != None]) if len(android_resources) == 0: return (None, None) - dummy_r_dot_java_library_info = get_dummy_r_dot_java( + dummy_r_dot_java_info = get_dummy_r_dot_java( ctx, ctx.attrs._android_toolchain[AndroidToolchainInfo].merge_android_resources[RunInfo], android_resources, ctx.attrs.resource_union_package, ) - dummy_r_dot_java = dummy_r_dot_java_library_info.library_output.abi + dummy_r_dot_java = dummy_r_dot_java_info.library_output.abi return (dummy_r_dot_java, AndroidLibraryIntellijInfo( dummy_r_dot_java = dummy_r_dot_java, android_resource_deps = android_resources, diff --git a/prelude/android/android_manifest.bzl b/prelude/android/android_manifest.bzl index 22ff17e37f449..af7e06791d800 100644 --- a/prelude/android/android_manifest.bzl +++ b/prelude/android/android_manifest.bzl @@ -13,6 +13,7 @@ load( ) load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") load("@prelude//android:voltron.bzl", "ROOT_MODULE") +load("@prelude//utils:argfile.bzl", "argfile") def android_manifest_impl(ctx: AnalysisContext) -> list[Provider]: output, merge_report = generate_android_manifest( @@ -49,14 +50,13 @@ def generate_android_manifest( elif type(manifests) == "transitive_set": manifests = manifests.project_as_args("artifacts", ordering = "bfs") - library_manifest_paths_file = ctx.actions.write("{}/library_manifest_paths_file".format(module_name), manifests) + library_manifest_paths_file = argfile(actions = ctx.actions, name = "{}/library_manifest_paths_file".format(module_name), args = manifests) generate_manifest_cmd.add(["--library-manifests-list", library_manifest_paths_file]) - generate_manifest_cmd.hidden(manifests) placeholder_entries_args = cmd_args() for key, val in placeholder_entries.items(): - placeholder_entries_args.add(cmd_args(key, val, delimiter = " ")) + placeholder_entries_args.add(cmd_args(str(key), str(val), delimiter = " ")) placeholder_entries_file = ctx.actions.write("{}/placeholder_entries_file".format(module_name), placeholder_entries_args) generate_manifest_cmd.add(["--placeholder-entries-list", placeholder_entries_file]) diff --git a/prelude/android/android_prebuilt_aar.bzl b/prelude/android/android_prebuilt_aar.bzl index eda9485c4f940..0392c71fb41fb 100644 --- a/prelude/android/android_prebuilt_aar.bzl +++ b/prelude/android/android_prebuilt_aar.bzl @@ -13,6 +13,7 @@ load( "JavaClasspathEntry", "create_abi", "create_java_library_providers", + "generate_java_classpath_snapshot", ) load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") @@ -25,6 +26,7 @@ def android_prebuilt_aar_impl(ctx: AnalysisContext) -> list[Provider]: jni = ctx.actions.declare_output("jni", dir = True) annotation_jars_dir = ctx.actions.declare_output("annotation_jars", dir = True) proguard_config = ctx.actions.declare_output("proguard.txt") + lint_jar = ctx.actions.declare_output("lint.jar") android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] unpack_aar_tool = android_toolchain.unpack_aar[RunInfo] @@ -53,6 +55,8 @@ def android_prebuilt_aar_impl(ctx: AnalysisContext) -> list[Provider]: proguard_config.as_output(), "--jar-builder-tool", jar_builder_tool, + "--lint-jar-path", + lint_jar.as_output(), ] ctx.actions.run(unpack_aar_cmd, category = "android_unpack_aar") @@ -70,22 +74,28 @@ def android_prebuilt_aar_impl(ctx: AnalysisContext) -> list[Provider]: ) abi = None if java_toolchain.is_bootstrap_toolchain else create_abi(ctx.actions, java_toolchain.class_abi_generator, all_classes_jar) + abi_jar_snapshot = generate_java_classpath_snapshot(ctx.actions, java_toolchain.cp_snapshot_generator, abi or all_classes_jar, "") library_output_classpath_entry = JavaClasspathEntry( full_library = all_classes_jar, abi = abi or all_classes_jar, abi_as_dir = None, required_for_source_only_abi = ctx.attrs.required_for_source_only_abi, + abi_jar_snapshot = abi_jar_snapshot, ) - java_library_info, java_packaging_info, shared_library_info, linkable_graph, cxx_resource_info, template_placeholder_info, java_library_intellij_info = create_java_library_providers( + java_library_info, java_packaging_info, global_code_info, shared_library_info, linkable_graph, cxx_resource_info, template_placeholder_info, java_library_intellij_info = create_java_library_providers( ctx = ctx, library_output = library_output_classpath_entry, + global_code_config = java_toolchain.global_code_config, exported_deps = ctx.attrs.deps, + provided_deps = ctx.attrs.desugar_deps, needs_desugar = True, is_prebuilt_jar = True, annotation_jars_dir = annotation_jars_dir, proguard_config = proguard_config, + lint_jar = lint_jar, + sources_jar = ctx.attrs.source_jar, ) native_library = PrebuiltNativeLibraryDir( @@ -98,12 +108,21 @@ def android_prebuilt_aar_impl(ctx: AnalysisContext) -> list[Provider]: return [ java_library_info, java_packaging_info, + global_code_info, shared_library_info, cxx_resource_info, linkable_graph, template_placeholder_info, java_library_intellij_info, - merge_android_packageable_info(ctx.label, ctx.actions, ctx.attrs.deps, manifest = manifest, prebuilt_native_library_dir = native_library, resource_info = resource_info), + merge_android_packageable_info( + ctx.label, + ctx.actions, + ctx.attrs.deps, + manifest = manifest, + prebuilt_native_library_dir = native_library, + resource_info = resource_info, + for_primary_apk = ctx.attrs.for_primary_apk, + ), resource_info, DefaultInfo(default_output = all_classes_jar, other_outputs = [ manifest, diff --git a/prelude/android/android_providers.bzl b/prelude/android/android_providers.bzl index 8839bcab66e9f..b47bb853b27ab 100644 --- a/prelude/android/android_providers.bzl +++ b/prelude/android/android_providers.bzl @@ -34,22 +34,29 @@ ExopackageNativeInfo = record( ) ExopackageResourcesInfo = record( - assets = [Artifact, None], - assets_hash = [Artifact, None], + assets = Artifact | None, + assets_hash = Artifact | None, res = Artifact, res_hash = Artifact, ) +RDotJavaInfo = record( + identifier = str, + library_info = JavaLibraryInfo, + source_zipped = Artifact, +) + AndroidBinaryNativeLibsInfo = record( - apk_under_test_prebuilt_native_library_dirs = list[PrebuiltNativeLibraryDir], + prebuilt_native_library_dirs = list[PrebuiltNativeLibraryDir], # Indicates which shared lib producing targets are included in the binary. Used by instrumentation tests # to exclude those from the test apk. - apk_under_test_shared_libraries = list[TargetLabel], - exopackage_info = ["ExopackageNativeInfo", None], + shared_libraries = list[TargetLabel], + exopackage_info = [ExopackageNativeInfo, None], root_module_native_lib_assets = list[Artifact], non_root_module_native_lib_assets = list[Artifact], native_libs_for_primary_apk = list[Artifact], generated_java_code = list[JavaLibraryInfo], + unstripped_shared_libraries = [Artifact, None], ) AndroidBinaryResourcesInfo = record( @@ -59,22 +66,24 @@ AndroidBinaryResourcesInfo = record( manifest = Artifact, # per-module manifests (packaged as assets) module_manifests = list[Artifact], + # per-module assets APKs (for .aabs only) + module_assets = Artifact | None, # zip containing any strings packaged as assets - packaged_string_assets = [Artifact, None], + packaged_string_assets = Artifact | None, # "APK" containing resources to be used by the Android binary primary_resources_apk = Artifact, # proguard config needed to retain used resources proguard_config_file = Artifact, # R.java jars containing all the linked resources - r_dot_javas = list[JavaLibraryInfo], + r_dot_java_infos = list[RDotJavaInfo], # directory containing filtered string resources files - string_source_map = [Artifact, None], + string_source_map = Artifact | None, # directory containing filtered string resources files for Voltron language packs - voltron_string_source_map = [Artifact, None], + voltron_string_source_map = Artifact | None, # list of jars that could contain resources that should be packaged into the APK jar_files_that_may_contain_resources = list[Artifact], # The resource infos that are used in this APK - unfiltered_resource_infos = list["AndroidResourceInfo"], + unfiltered_resource_infos = list, # list[AndroidResourceInfo] ) # Information about an `android_build_config` @@ -89,6 +98,7 @@ AndroidBuildConfigInfo = provider( fields = { "package": str, "build_config_fields": list[BuildConfigField], + "values_file": provider_field(typing.Any, default = None), }, ) @@ -104,6 +114,8 @@ AndroidApkInfo = provider( fields = { "apk": provider_field(typing.Any, default = None), "manifest": provider_field(typing.Any, default = None), + "materialized_artifacts": provider_field(typing.Any, default = None), + "unstripped_shared_libraries": provider_field(typing.Any, default = None), # artifact }, ) @@ -111,6 +123,7 @@ AndroidAabInfo = provider( fields = { "aab": provider_field(typing.Any, default = None), "manifest": provider_field(typing.Any, default = None), + "materialized_artifacts": provider_field(typing.Any, default = None), }, ) @@ -152,6 +165,7 @@ ResourceInfoTSet = transitive_set() DepsInfo = record( name = TargetLabel, deps = list[TargetLabel], + for_primary_apk = bool, ) AndroidPackageableInfo = provider( @@ -177,6 +191,8 @@ AndroidResourceInfo = provider( "raw_target": provider_field(typing.Any, default = None), # TargetLabel # output of running `aapt2_compile` on the resources, if resources are present "aapt2_compile_output": provider_field(typing.Any, default = None), # Artifact | None + # locales that should always be included in the APK for this resource + "allowlisted_locales": provider_field(typing.Any, default = []), # List # if False, then the "res" are not affected by the strings-as-assets resource filter "allow_strings_as_assets_resource_filtering": provider_field(typing.Any, default = None), # bool # assets defined by this rule. May be empty @@ -205,11 +221,11 @@ ExportedAndroidResourceInfo = provider( DexFilesInfo = record( primary_dex = Artifact, - primary_dex_class_names = [Artifact, None], + primary_dex_class_names = Artifact | None, root_module_secondary_dex_dirs = list[Artifact], non_root_module_secondary_dex_dirs = list[Artifact], secondary_dex_exopackage_info = [ExopackageDexInfo, None], - proguard_text_files_path = [Artifact, None], + proguard_text_files_path = Artifact | None, ) ExopackageInfo = record( @@ -232,9 +248,10 @@ def merge_android_packageable_info( actions: AnalysisActions, deps: list[Dependency], build_config_info: [AndroidBuildConfigInfo, None] = None, - manifest: [Artifact, None] = None, + manifest: Artifact | None = None, prebuilt_native_library_dir: [PrebuiltNativeLibraryDir, None] = None, - resource_info: [AndroidResourceInfo, None] = None) -> AndroidPackageableInfo: + resource_info: [AndroidResourceInfo, None] = None, + for_primary_apk: bool = False) -> AndroidPackageableInfo: android_packageable_deps = filter(None, [x.get(AndroidPackageableInfo) for x in deps]) build_config_infos = _get_transitive_set( @@ -250,6 +267,7 @@ def merge_android_packageable_info( DepsInfo( name = label.raw_target(), deps = [dep.target_label for dep in android_packageable_deps], + for_primary_apk = for_primary_apk, ), AndroidDepsTSet, ) diff --git a/prelude/android/android_resource.bzl b/prelude/android/android_resource.bzl index 19221cd546cf4..e544acf8ccee8 100644 --- a/prelude/android/android_resource.bzl +++ b/prelude/android/android_resource.bzl @@ -5,14 +5,16 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//java:java_providers.bzl", "get_java_packaging_info") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//java:java_providers.bzl", "derive_compiling_deps", "get_global_code_info", "get_java_packaging_info") +load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") +load("@prelude//utils:argfile.bzl", "argfile") +load("@prelude//utils:expect.bzl", "expect") load(":android_providers.bzl", "AndroidResourceInfo", "ExportedAndroidResourceInfo", "RESOURCE_PRIORITY_NORMAL", "merge_android_packageable_info") load(":android_toolchain.bzl", "AndroidToolchainInfo") JAVA_PACKAGE_FILENAME = "java_package.txt" -def _convert_to_artifact_dir(ctx: AnalysisContext, attr: [Dependency, dict, Artifact, None], attr_name: str) -> [Artifact, None]: +def _convert_to_artifact_dir(ctx: AnalysisContext, attr: [Dependency, dict, Artifact, None], attr_name: str) -> Artifact | None: if isinstance(attr, Dependency): expect(len(attr[DefaultInfo].default_outputs) == 1, "Expect one default output from build dep of attr {}!".format(attr_name)) return attr[DefaultInfo].default_outputs[0] @@ -45,6 +47,7 @@ def android_resource_impl(ctx: AnalysisContext) -> list[Provider]: resource_info = AndroidResourceInfo( raw_target = ctx.label.raw_target(), aapt2_compile_output = aapt2_compile_output, + allowlisted_locales = ctx.attrs.allowlisted_locales, allow_strings_as_assets_resource_filtering = not ctx.attrs.has_whitelisted_strings, assets = assets, manifest_file = ctx.attrs.manifest, @@ -58,6 +61,7 @@ def android_resource_impl(ctx: AnalysisContext) -> list[Provider]: resource_info = AndroidResourceInfo( raw_target = ctx.label.raw_target(), aapt2_compile_output = None, + allowlisted_locales = ctx.attrs.allowlisted_locales, allow_strings_as_assets_resource_filtering = not ctx.attrs.has_whitelisted_strings, assets = assets, manifest_file = ctx.attrs.manifest, @@ -71,6 +75,8 @@ def android_resource_impl(ctx: AnalysisContext) -> list[Provider]: providers.append(merge_android_packageable_info(ctx.label, ctx.actions, ctx.attrs.deps, manifest = ctx.attrs.manifest, resource_info = resource_info)) providers.append(get_java_packaging_info(ctx, ctx.attrs.deps)) providers.append(DefaultInfo(default_output = default_output, sub_targets = sub_targets)) + compiling_deps = derive_compiling_deps(ctx.actions, None, ctx.attrs.deps) + providers.append(get_global_code_info(ctx, ctx.attrs.deps, ctx.attrs.deps, derive_compiling_deps(ctx.actions, None, []), compiling_deps, compiling_deps, ctx.attrs._java_toolchain[JavaToolchainInfo].global_code_config)) return providers @@ -80,20 +86,20 @@ def aapt2_compile( android_toolchain: AndroidToolchainInfo, skip_crunch_pngs: bool = False, identifier: [str, None] = None) -> Artifact: - aapt2_command = cmd_args(android_toolchain.aapt2) - aapt2_command.add("compile") - aapt2_command.add("--legacy") + aapt2_command = [cmd_args(android_toolchain.aapt2)] + aapt2_command.append("compile") + aapt2_command.append("--legacy") if skip_crunch_pngs: - aapt2_command.add("--no-crunch") - aapt2_command.add(["--dir", resources_dir]) + aapt2_command.append("--no-crunch") + aapt2_command.extend(["--dir", resources_dir]) aapt2_output = ctx.actions.declare_output("{}_resources.flata".format(identifier) if identifier else "resources.flata") - aapt2_command.add("-o", aapt2_output.as_output()) + aapt2_command.extend(["-o", aapt2_output.as_output()]) - ctx.actions.run(aapt2_command, category = "aapt2_compile", identifier = identifier) + ctx.actions.run(cmd_args(aapt2_command), category = "aapt2_compile", identifier = identifier) return aapt2_output -def _get_package(ctx: AnalysisContext, package: [str, None], manifest: [Artifact, None]) -> Artifact: +def _get_package(ctx: AnalysisContext, package: [str, None], manifest: Artifact | None) -> Artifact: if package: return ctx.actions.write(JAVA_PACKAGE_FILENAME, package) else: @@ -102,9 +108,13 @@ def _get_package(ctx: AnalysisContext, package: [str, None], manifest: [Artifact def extract_package_from_manifest(ctx: AnalysisContext, manifest: Artifact) -> Artifact: r_dot_java_package = ctx.actions.declare_output(JAVA_PACKAGE_FILENAME) - extract_package_cmd = cmd_args(ctx.attrs._android_toolchain[AndroidToolchainInfo].manifest_utils[RunInfo]) - extract_package_cmd.add(["--manifest-path", manifest]) - extract_package_cmd.add(["--package-output", r_dot_java_package.as_output()]) + extract_package_cmd = cmd_args( + ctx.attrs._android_toolchain[AndroidToolchainInfo].manifest_utils[RunInfo], + "--manifest-path", + manifest, + "--package-output", + r_dot_java_package.as_output(), + ) ctx.actions.run(extract_package_cmd, category = "android_extract_package") @@ -123,10 +133,9 @@ def get_text_symbols( dep_symbols = _get_dep_symbols(deps) dep_symbol_paths.add(dep_symbols) - dep_symbol_paths_file, _ = ctx.actions.write("{}_dep_symbol_paths_file".format(identifier) if identifier else "dep_symbol_paths_file", dep_symbol_paths, allow_args = True) + dep_symbol_paths_file = argfile(actions = ctx.actions, name = "{}_dep_symbol_paths_file".format(identifier) if identifier else "dep_symbol_paths_file", args = dep_symbol_paths, allow_args = True) mini_aapt_cmd.add(["--dep-symbol-paths", dep_symbol_paths_file]) - mini_aapt_cmd.hidden(dep_symbols) text_symbols = ctx.actions.declare_output("{}_R.txt".format(identifier) if identifier else "R.txt") mini_aapt_cmd.add(["--output-path", text_symbols.as_output()]) diff --git a/prelude/android/android_toolchain.bzl b/prelude/android/android_toolchain.bzl index 82a21d9bec602..7142a94cd5419 100644 --- a/prelude/android/android_toolchain.bzl +++ b/prelude/android/android_toolchain.bzl @@ -13,16 +13,23 @@ AndroidToolchainInfo = provider( # @unsorted-dict-items fields = { "aapt2": provider_field(typing.Any, default = None), + "aapt2_filter_resources": provider_field(typing.Any, default = None), "aar_builder": provider_field(typing.Any, default = None), "adb": provider_field(typing.Any, default = None), "aidl": provider_field(typing.Any, default = None), "android_jar": provider_field(typing.Any, default = None), "android_bootclasspath": provider_field(typing.Any, default = None), + "android_optional_jars": provider_field(typing.Any, default = None), "apk_builder": provider_field(typing.Any, default = None), "apk_module_graph": provider_field(typing.Any, default = None), + "app_without_resources_stub": provider_field(typing.Any, default = None), + "bundle_apks_builder": provider_field(typing.Any, default = None), "bundle_builder": provider_field(typing.Any, default = None), "combine_native_library_dirs": provider_field(typing.Any, default = None), - "compress_libraries": provider_field(typing.Any, default = None), + "cross_module_native_deps_check": provider_field( + typing.Any, + default = None, + ), "d8_command": provider_field(typing.Any, default = None), "exo_resources_rewriter": provider_field(typing.Any, default = None), "exopackage_agent_apk": provider_field(typing.Any, default = None), @@ -47,6 +54,7 @@ AndroidToolchainInfo = provider( "mini_aapt": provider_field(typing.Any, default = None), "native_libs_as_assets_metadata": provider_field(typing.Any, default = None), "optimized_proguard_config": provider_field(typing.Any, default = None), + "p7zip": provider_field(typing.Any, default = None), "package_meta_inf_version_files": provider_field(typing.Any, default = None), "package_strings_as_assets": provider_field(typing.Any, default = None), "prebuilt_aar_resources_have_low_priority": provider_field(typing.Any, default = None), diff --git a/prelude/android/apk_genrule.bzl b/prelude/android/apk_genrule.bzl index 0c9ac2e90009f..adcefa21fc621 100644 --- a/prelude/android/apk_genrule.bzl +++ b/prelude/android/apk_genrule.bzl @@ -8,18 +8,32 @@ load("@prelude//:genrule.bzl", "process_genrule") load("@prelude//android:android_apk.bzl", "get_install_info") load("@prelude//android:android_providers.bzl", "AndroidAabInfo", "AndroidApkInfo", "AndroidApkUnderTestInfo") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") +load("@prelude//android:bundletool_util.bzl", "derive_universal_apk") +load("@prelude//java:java_providers.bzl", "KeystoreInfo") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//java/class_to_srcs.bzl", "JavaClassToSourceMapInfo") def apk_genrule_impl(ctx: AnalysisContext) -> list[Provider]: expect((ctx.attrs.apk == None) != (ctx.attrs.aab == None), "Exactly one of 'apk' and 'aab' must be specified") + input_android_apk_under_test_info = None + input_unstripped_shared_libraries = None + input_android_apk_subtargets = None if ctx.attrs.apk != None: # TODO(T104150125) The underlying APK should not have exopackage enabled input_android_apk_info = ctx.attrs.apk[AndroidApkInfo] expect(input_android_apk_info != None, "'apk' attribute must be an Android APK!") input_apk = input_android_apk_info.apk input_manifest = input_android_apk_info.manifest + input_materialized_artifacts = input_android_apk_info.materialized_artifacts + input_unstripped_shared_libraries = input_android_apk_info.unstripped_shared_libraries input_android_apk_under_test_info = ctx.attrs.apk[AndroidApkUnderTestInfo] + input_android_apk_subtargets = ctx.attrs.apk[DefaultInfo].sub_targets + + env_vars = { + "APK": cmd_args(input_apk), + } else: input_android_aab_info = ctx.attrs.aab[AndroidAabInfo] expect(input_android_aab_info != None, "'aab' attribute must be an Android Bundle!") @@ -27,33 +41,105 @@ def apk_genrule_impl(ctx: AnalysisContext) -> list[Provider]: # It's not an APK, but buck1 does this so we do it too for compatibility input_apk = input_android_aab_info.aab input_manifest = input_android_aab_info.manifest + input_materialized_artifacts = input_android_aab_info.materialized_artifacts - env_vars = { - "APK": cmd_args(input_apk), - } + env_vars = { + "AAB": cmd_args(input_apk), + } - # Like buck1, we ignore the 'out' attribute and construct the output path ourselves. - output_apk_name = "{}.apk".format(ctx.label.name) + genrule_providers = process_genrule(ctx, ctx.attrs.out, ctx.attrs.outs, env_vars, other_outputs = input_materialized_artifacts) - genrule_providers = process_genrule(ctx, output_apk_name, None, env_vars) + genrule_default_info = filter(lambda x: isinstance(x, DefaultInfo), genrule_providers) expect( - len(genrule_providers) == 1 and isinstance(genrule_providers[0], DefaultInfo), - "Expecting just a single DefaultInfo, but got {}".format(genrule_providers), + len(genrule_default_info) == 1, + "Expecting a single DefaultInfo, but got {}", + genrule_default_info, ) - output_apk = genrule_providers[0].default_outputs[0] - install_info = get_install_info( - ctx, - output_apk = output_apk, - manifest = input_manifest, - exopackage_info = None, + genrule_default_output = genrule_default_info[0].default_outputs[0] + genrule_default_output_is_aab = genrule_default_output.extension == ".aab" + genrule_default_output_is_apk = genrule_default_output.extension == ".apk" + + expect( + genrule_default_output_is_aab or genrule_default_output_is_apk, + "apk_genrule must output a '.apk' or '.aab' file, but got {}", + genrule_default_info, ) - return genrule_providers + [ - AndroidApkInfo( - apk = output_apk, - manifest = input_manifest, - ), - install_info, - ] + filter(None, [input_android_apk_under_test_info]) + if ctx.attrs.aab: + if genrule_default_output_is_aab: + output_aab_info = AndroidAabInfo( + aab = genrule_default_output, + manifest = input_manifest, + materialized_artifacts = input_materialized_artifacts, + ) + output_apk = None + else: + output_aab_info = None + output_apk = genrule_default_output + + if ctx.attrs.use_derived_apk: + expect(genrule_default_output_is_aab, "Default genrule output must end in '.aab' if use_derived_apk is True.") + + output_apk = derive_universal_apk( + ctx = ctx, + android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo], + app_bundle = genrule_default_output, + keystore = ctx.attrs.keystore[KeystoreInfo] if ctx.attrs.keystore else None, + ) + default_providers = [ + DefaultInfo( + default_output = output_apk, + other_outputs = input_materialized_artifacts + genrule_default_info[0].other_outputs, + sub_targets = { + "aab": [DefaultInfo( + default_outputs = [genrule_default_output], + )], + }, + ), + ] + filter(lambda x: not isinstance(x, DefaultInfo), genrule_providers) + else: + default_providers = genrule_providers + + else: + sub_targets = {k: [v[DefaultInfo]] for k, v in genrule_default_info[0].sub_targets.items()} + sub_targets.update({ + "unstripped_native_libraries": [input_android_apk_subtargets["unstripped_native_libraries"][DefaultInfo]], + "unstripped_native_libraries_json": [input_android_apk_subtargets["unstripped_native_libraries_json"][DefaultInfo]], + }) + expect(genrule_default_output_is_apk, "apk_genrule output must end in '.apk'") + output_apk = genrule_default_output + output_aab_info = None + default_providers = [ + DefaultInfo( + default_output = output_apk, + other_outputs = genrule_default_info[0].other_outputs, + sub_targets = sub_targets, + ), + ] + filter(lambda x: not isinstance(x, DefaultInfo), genrule_providers) + + class_to_src_map = [ctx.attrs.apk[JavaClassToSourceMapInfo]] if (ctx.attrs.apk and JavaClassToSourceMapInfo in ctx.attrs.apk) else [] + + if output_apk: + apk_providers = [ + AndroidApkInfo( + apk = output_apk, + manifest = input_manifest, + materialized_artifacts = input_materialized_artifacts, + unstripped_shared_libraries = input_unstripped_shared_libraries, + ), + get_install_info( + ctx, + output_apk = output_apk, + manifest = input_manifest, + exopackage_info = None, + ), + ] + else: + apk_providers = [] + + aab_providers = filter(None, [output_aab_info]) + apk_under_test_providers = filter(None, [input_android_apk_under_test_info]) + + return default_providers + apk_providers + aab_providers + apk_under_test_providers + class_to_src_map diff --git a/prelude/android/bundletool_util.bzl b/prelude/android/bundletool_util.bzl new file mode 100644 index 0000000000000..9105e1ccbe11a --- /dev/null +++ b/prelude/android/bundletool_util.bzl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") +load("@prelude//java:java_providers.bzl", "KeystoreInfo") # @unused used as type + +def derive_universal_apk( + ctx: AnalysisContext, + android_toolchain: AndroidToolchainInfo, + app_bundle: Artifact, + keystore: [KeystoreInfo, None]) -> Artifact: + output_apk = ctx.actions.declare_output("universal.apk") + + bundle_apks_builder_args = cmd_args([ + android_toolchain.bundle_apks_builder[RunInfo], + "--input-bundle", + app_bundle, + "--p7zip", + android_toolchain.p7zip, + "--aapt2", + android_toolchain.aapt2, + "--zipalign", + android_toolchain.zipalign[RunInfo], + "--output-apk", + output_apk.as_output(), + ]) + + if keystore: + bundle_apks_builder_args.add(cmd_args([ + "--keystore", + keystore.store, + "--keystore-properties", + keystore.properties, + ])) + + ctx.actions.run(bundle_apks_builder_args, category = "bundle_build", identifier = "build_universal_apk") + + return output_apk diff --git a/prelude/android/configuration.bzl b/prelude/android/configuration.bzl index b3890538861f7..54b95cc3b9756 100644 --- a/prelude/android/configuration.bzl +++ b/prelude/android/configuration.bzl @@ -7,7 +7,8 @@ load("@prelude//android:cpu_filters.bzl", "ALL_CPU_FILTERS", "CPU_FILTER_FOR_DEFAULT_PLATFORM", "CPU_FILTER_FOR_PRIMARY_PLATFORM") load("@prelude//android:min_sdk_version.bzl", "get_min_sdk_version_constraint_value_name", "get_min_sdk_version_range") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//cfg/modifier:name.bzl", "cfg_name") +load("@prelude//utils:expect.bzl", "expect") # Android binaries (APKs or AABs) can be built for one or more different platforms. buck2 supports # building Android binaries for arm32, arm64, x86, and x86_64. The platform(s) that we are building @@ -23,13 +24,14 @@ load("@prelude//utils:utils.bzl", "expect") # platforms). We only use the "arm64" native libraries if it is one of the specified platforms. We # "throw away" the non-native libraries for all other configured sub-graphs. +_DEFAULT_PLATFORM = "config//platform/android:arm64-fbsource" + _REFS = { "arm64": "config//cpu/constraints:arm64", "armv7": "config//cpu/constraints:arm32", "build_only_native_code": "prelude//android/constraints:build_only_native_code", "building_android_binary": "prelude//os:building_android_binary", "cpu": "config//cpu/constraints:cpu", - "default_platform": "config//platform/android:x86_32-fbsource", "maybe_build_only_native_code": "prelude//android/constraints:maybe_build_only_native_code", "maybe_building_android_binary": "prelude//os:maybe_building_android_binary", "min_sdk_version": "prelude//android/constraints:min_sdk_version", @@ -40,6 +42,8 @@ for min_sdk in get_min_sdk_version_range(): constraint_value_name = get_min_sdk_version_constraint_value_name(min_sdk) _REFS[constraint_value_name] = "prelude//android/constraints:{}".format(constraint_value_name) +_REFS["default_platform"] = read_root_config("build", "default_platform", _DEFAULT_PLATFORM) + def _cpu_split_transition_impl( platform: PlatformInfo, refs: struct, @@ -70,6 +74,13 @@ def _cpu_split_transition( if len(cpu_filters) == 1 and cpu_filters[0] == "default": default = refs.default_platform[PlatformInfo] + + # Use `cfg_name` function from modifier resolution so that we get the same cfg as default cfg + # of android libraries. + default = PlatformInfo( + label = cfg_name(default.configuration), + configuration = default.configuration, + ) return {CPU_FILTER_FOR_DEFAULT_PLATFORM: default} expect(CPU_FILTER_FOR_PRIMARY_PLATFORM == "arm64") @@ -105,12 +116,16 @@ def _cpu_split_transition( if len(new_configs) > 0: updated_constraints[refs.maybe_build_only_native_code[ConstraintSettingInfo].label] = refs.build_only_native_code[ConstraintValueInfo] + cfg_info = ConfigurationInfo( + constraints = updated_constraints, + values = platform.configuration.values, + ) + + # Use `cfg_name` function from modifier resolution so that we get the same cfg as default cfg + # of android libraries. new_configs[platform_name] = PlatformInfo( - label = platform_name, - configuration = ConfigurationInfo( - constraints = updated_constraints, - values = platform.configuration.values, - ), + label = cfg_name(cfg_info), + configuration = cfg_info, ) return new_configs diff --git a/prelude/android/constraints/BUCK b/prelude/android/constraints/BUCK deleted file mode 100644 index 9cf136a81636d..0000000000000 --- a/prelude/android/constraints/BUCK +++ /dev/null @@ -1,33 +0,0 @@ -load("@prelude//android:min_sdk_version.bzl", "get_min_sdk_version_constraint_value_name", "get_min_sdk_version_range") - -native.constraint_setting( - name = "maybe_build_only_native_code", - visibility = ["PUBLIC"], -) - -native.constraint_value( - name = "build_only_native_code", - constraint_setting = ":maybe_build_only_native_code", - visibility = ["PUBLIC"], -) - -native.filegroup( - name = "files", - srcs = glob( - ["**"], - ), - visibility = ["PUBLIC"], -) - -native.constraint_setting( - name = "min_sdk_version", - visibility = ["PUBLIC"], -) - -[ - native.constraint_value( - name = get_min_sdk_version_constraint_value_name(min_sdk), - constraint_setting = ":min_sdk_version", - ) - for min_sdk in get_min_sdk_version_range() -] diff --git a/prelude/android/constraints/BUCK.v2 b/prelude/android/constraints/BUCK.v2 new file mode 100644 index 0000000000000..a211f28db068a --- /dev/null +++ b/prelude/android/constraints/BUCK.v2 @@ -0,0 +1,51 @@ +load("@prelude//android:min_sdk_version.bzl", "get_min_sdk_version_constraint_value_name", "get_min_sdk_version_range") +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native # Avoid warnings and auto-formatters + +prelude.constraint_setting( + name = "maybe_build_only_native_code", + visibility = ["PUBLIC"], +) + +prelude.constraint_value( + name = "build_only_native_code", + constraint_setting = ":maybe_build_only_native_code", + visibility = ["PUBLIC"], +) + +prelude.constraint_setting( + name = "maybe_merge_native_libraries", + visibility = ["PUBLIC"], +) + +prelude.constraint_value( + name = "merge_native_libraries", + constraint_setting = ":maybe_merge_native_libraries", + visibility = ["PUBLIC"], +) + +prelude.filegroup( + name = "files", + srcs = glob( + ["**"], + ), + visibility = ["PUBLIC"], +) + +prelude.constraint_setting( + name = "min_sdk_version", + visibility = ["PUBLIC"], +) + +[ + prelude.constraint_value( + name = get_min_sdk_version_constraint_value_name(min_sdk), + constraint_setting = ":min_sdk_version", + ) + for min_sdk in get_min_sdk_version_range() +] diff --git a/prelude/android/cpu_filters.bzl b/prelude/android/cpu_filters.bzl index 796d6a16bc76d..814a38c2da2f0 100644 --- a/prelude/android/cpu_filters.bzl +++ b/prelude/android/cpu_filters.bzl @@ -14,7 +14,7 @@ CPU_FILTER_TO_ABI_DIRECTORY = { ALL_CPU_FILTERS = CPU_FILTER_TO_ABI_DIRECTORY.keys() -CPU_FILTER_FOR_DEFAULT_PLATFORM = "x86" +CPU_FILTER_FOR_DEFAULT_PLATFORM = "arm64" # The "primary platform" is the one that we use for all # the non-native targets. We keep this consistent regardless diff --git a/prelude/android/dex_rules.bzl b/prelude/android/dex_rules.bzl index f3c1a5a465d5e..f1928a4df6b8e 100644 --- a/prelude/android/dex_rules.bzl +++ b/prelude/android/dex_rules.bzl @@ -11,7 +11,9 @@ load("@prelude//android:voltron.bzl", "ROOT_MODULE", "get_apk_module_graph_info" load("@prelude//java:dex.bzl", "DexLibraryInfo", "get_dex_produced_from_java_library") load("@prelude//java:dex_toolchain.bzl", "DexToolchainInfo") load("@prelude//java:java_library.bzl", "compile_to_jar") -load("@prelude//utils:utils.bzl", "expect", "flatten") +load("@prelude//utils:argfile.bzl", "argfile", "at_argfile") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "flatten") load("@prelude//paths.bzl", "paths") # Android builds use a tool called `d8` to compile Java bytecode is DEX (Dalvik EXecutable) @@ -101,15 +103,14 @@ def get_single_primary_dex( output_dex_file = ctx.actions.declare_output("classes.dex") d8_cmd.add(["--output-dex-file", output_dex_file.as_output()]) - jar_to_dex_file = ctx.actions.write("jar_to_dex_file.txt", java_library_jars) + jar_to_dex_file = argfile(actions = ctx.actions, name = "jar_to_dex_file.txt", args = java_library_jars) d8_cmd.add(["--files-to-dex-list", jar_to_dex_file]) - d8_cmd.hidden(java_library_jars) d8_cmd.add(["--android-jar", android_toolchain.android_jar]) if not is_optimized: d8_cmd.add("--no-optimize") - ctx.actions.run(d8_cmd, category = "d8", identifier = "{}:{}".format(ctx.label.package, ctx.label.name)) + ctx.actions.run(d8_cmd, category = "get_single_primary_dex", identifier = "{}:{}".format(ctx.label.package, ctx.label.name)) return DexFilesInfo( primary_dex = output_dex_file, @@ -125,10 +126,10 @@ def get_multi_dex( android_toolchain: AndroidToolchainInfo, java_library_jars_to_owners: dict[Artifact, TargetLabel], primary_dex_patterns: list[str], - proguard_configuration_output_file: [Artifact, None] = None, - proguard_mapping_output_file: [Artifact, None] = None, + proguard_configuration_output_file: Artifact | None = None, + proguard_mapping_output_file: Artifact | None = None, is_optimized: bool = False, - apk_module_graph_file: [Artifact, None] = None) -> DexFilesInfo: + apk_module_graph_file: Artifact | None = None) -> DexFilesInfo: expect( not _is_exopackage_enabled_for_secondary_dex(ctx), "secondary dex exopackage can only be enabled on pre-dexed builds!", @@ -153,7 +154,7 @@ def get_multi_dex( secondary_dex_dir_srcs = {} all_jars = flatten(module_to_jars.values()) - all_jars_list = ctx.actions.write("all_jars_classpath.txt", all_jars) + all_jars_list = argfile(actions = ctx.actions, name = "all_jars_classpath.txt", args = all_jars) for module, jars in module_to_jars.items(): multi_dex_cmd = cmd_args(android_toolchain.multi_dex_command[RunInfo]) secondary_dex_compression_cmd = cmd_args(android_toolchain.secondary_dex_compression_command[RunInfo]) @@ -174,9 +175,8 @@ def get_multi_dex( android_toolchain, ) - primary_dex_jar_to_dex_file = ctx.actions.write("primary_dex_jars_to_dex_file_for_root_module.txt", primary_dex_jars) + primary_dex_jar_to_dex_file = argfile(actions = ctx.actions, name = "primary_dex_jars_to_dex_file_for_root_module.txt", args = primary_dex_jars) multi_dex_cmd.add("--primary-dex-files-to-dex-list", primary_dex_jar_to_dex_file) - multi_dex_cmd.hidden(primary_dex_jars) multi_dex_cmd.add("--minimize-primary-dex") else: jars_to_dex = jars @@ -193,16 +193,14 @@ def get_multi_dex( secondary_dex_compression_cmd.add("--secondary-dex-output-dir", secondary_dex_dir_for_module.as_output()) jars_to_dex = jars multi_dex_cmd.add("--classpath-files", all_jars_list) - multi_dex_cmd.hidden(all_jars) multi_dex_cmd.add("--module", module) multi_dex_cmd.add("--canary-class-name", apk_module_graph_info.module_to_canary_class_name_function(module)) secondary_dex_compression_cmd.add("--module", module) secondary_dex_compression_cmd.add("--canary-class-name", apk_module_graph_info.module_to_canary_class_name_function(module)) - jar_to_dex_file = ctx.actions.write("jars_to_dex_file_for_module_{}.txt".format(module), jars_to_dex) + jar_to_dex_file = argfile(actions = ctx.actions, name = "jars_to_dex_file_for_module_{}.txt".format(module), args = jars_to_dex) multi_dex_cmd.add("--files-to-dex-list", jar_to_dex_file) - multi_dex_cmd.hidden(jars_to_dex) multi_dex_cmd.add("--android-jar", android_toolchain.android_jar) if not is_optimized: @@ -221,7 +219,7 @@ def get_multi_dex( ctx.actions.symlinked_dir(outputs[secondary_dex_dir], secondary_dex_dir_srcs) - ctx.actions.dynamic_output(dynamic = inputs, inputs = [], outputs = outputs, f = do_multi_dex) + ctx.actions.dynamic_output(dynamic = inputs, inputs = [], outputs = [o.as_output() for o in outputs], f = do_multi_dex) return DexFilesInfo( primary_dex = primary_dex_file, @@ -237,8 +235,8 @@ def _get_primary_dex_and_secondary_dex_jars( jars: list[Artifact], java_library_jars_to_owners: dict[Artifact, TargetLabel], primary_dex_patterns_file: Artifact, - proguard_configuration_output_file: [Artifact, None], - proguard_mapping_output_file: [Artifact, None], + proguard_configuration_output_file: Artifact | None, + proguard_mapping_output_file: Artifact | None, android_toolchain: AndroidToolchainInfo) -> (list[Artifact], list[Artifact]): primary_dex_jars = [] secondary_dex_jars = [] @@ -320,7 +318,7 @@ DexInputsWithClassNamesAndWeightEstimatesFile = record( SecondaryDexMetadataConfig = record( secondary_dex_compression = str, secondary_dex_metadata_path = [str, None], - secondary_dex_metadata_file = [Artifact, None], + secondary_dex_metadata_file = Artifact | None, secondary_dex_metadata_line = Artifact, secondary_dex_canary_class_name = str, ) @@ -364,8 +362,7 @@ def _filter_pre_dexed_libs( batch_number: int) -> DexInputsWithClassNamesAndWeightEstimatesFile: weight_estimate_and_filtered_class_names_file = actions.declare_output("class_names_and_weight_estimates_for_batch_{}".format(batch_number)) - filter_dex_cmd = cmd_args([ - android_toolchain.filter_dex_class_names[RunInfo], + filter_dex_cmd_args = cmd_args([ "--primary-dex-patterns", primary_dex_patterns_file, "--dex-target-identifiers", @@ -377,6 +374,15 @@ def _filter_pre_dexed_libs( "--output", weight_estimate_and_filtered_class_names_file.as_output(), ]) + + filter_dex_cmd = cmd_args([ + android_toolchain.filter_dex_class_names[RunInfo], + at_argfile( + actions = actions, + name = "filter_dex_cmd_args_{}".format(batch_number), + args = filter_dex_cmd_args, + ), + ]) actions.run(filter_dex_cmd, category = "filter_dex", identifier = "batch_{}".format(batch_number)) return DexInputsWithClassNamesAndWeightEstimatesFile(libs = pre_dexed_libs, weight_estimate_and_filtered_class_names_file = weight_estimate_and_filtered_class_names_file) @@ -392,7 +398,7 @@ def merge_to_split_dex( android_toolchain: AndroidToolchainInfo, pre_dexed_libs: list[DexLibraryInfo], split_dex_merge_config: SplitDexMergeConfig, - apk_module_graph_file: [Artifact, None] = None) -> DexFilesInfo: + apk_module_graph_file: Artifact | None = None) -> DexFilesInfo: is_exopackage_enabled_for_secondary_dex = _is_exopackage_enabled_for_secondary_dex(ctx) if is_exopackage_enabled_for_secondary_dex: expect( @@ -548,7 +554,7 @@ def merge_to_split_dex( metadata_lines.append(artifacts[metadata_line_artifact].read_string().strip()) ctx.actions.write(outputs[metadata_dot_txt], metadata_lines) - ctx.actions.dynamic_output(dynamic = flatten(metadata_line_artifacts_by_module.values()), inputs = [], outputs = metadata_dot_txt_files_by_module.values(), f = write_metadata_dot_txts) + ctx.actions.dynamic_output(dynamic = flatten(metadata_line_artifacts_by_module.values()), inputs = [], outputs = [o.as_output() for o in metadata_dot_txt_files_by_module.values()], f = write_metadata_dot_txts) ctx.actions.symlinked_dir( outputs[root_module_secondary_dexes_dir], @@ -559,7 +565,7 @@ def merge_to_split_dex( non_root_module_secondary_dexes_for_symlinking, ) - ctx.actions.dynamic_output(dynamic = input_artifacts, inputs = [], outputs = outputs, f = merge_pre_dexed_libs) + ctx.actions.dynamic_output(dynamic = input_artifacts, inputs = [], outputs = [o.as_output() for o in outputs], f = merge_pre_dexed_libs) if is_exopackage_enabled_for_secondary_dex: root_module_secondary_dex_dirs = [] @@ -586,15 +592,14 @@ def _merge_dexes( output_dex_file: Artifact, pre_dexed_artifacts: list[Artifact], pre_dexed_artifacts_file: Artifact, - class_names_to_include: [Artifact, None] = None, - secondary_output_dex_file: [Artifact, None] = None, + class_names_to_include: Artifact | None = None, + secondary_output_dex_file: Artifact | None = None, secondary_dex_metadata_config: [SecondaryDexMetadataConfig, None] = None): d8_cmd = cmd_args(android_toolchain.d8_command[RunInfo]) d8_cmd.add(["--output-dex-file", output_dex_file.as_output()]) - pre_dexed_artifacts_to_dex_file = ctx.actions.write(pre_dexed_artifacts_file.as_output(), pre_dexed_artifacts) + pre_dexed_artifacts_to_dex_file = argfile(actions = ctx.actions, name = pre_dexed_artifacts_file, args = pre_dexed_artifacts) d8_cmd.add(["--files-to-dex-list", pre_dexed_artifacts_to_dex_file]) - d8_cmd.hidden(pre_dexed_artifacts) d8_cmd.add(["--android-jar", android_toolchain.android_jar]) d8_cmd.add(_DEX_MERGE_OPTIONS) @@ -614,7 +619,7 @@ def _merge_dexes( ctx.actions.run( d8_cmd, - category = "d8", + category = "merge_dexes", identifier = "{}:{} {}".format(ctx.label.package, ctx.label.name, output_dex_file.short_path), ) @@ -695,9 +700,9 @@ def _get_raw_secondary_dex_name(index: int, module: str) -> str: if is_root_module(module): return "classes{}.dex".format(index + 2) elif index == 0: - return "classes.dex".format(module) + return "classes.dex" else: - return "classes{}.dex".format(module, index + 1) + return "classes{}.dex".format(index + 1) def _get_raw_secondary_dex_path(index: int, module: str): if is_root_module(module): diff --git a/prelude/android/exopackage.bzl b/prelude/android/exopackage.bzl index 7d851439dd3cf..3f7a1276bc8b1 100644 --- a/prelude/android/exopackage.bzl +++ b/prelude/android/exopackage.bzl @@ -5,7 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") SECONDARY_DEX = 1 NATIVE_LIBRARY = 2 diff --git a/prelude/android/gen_aidl.bzl b/prelude/android/gen_aidl.bzl index be0ec9d527720..a2b56d27b7f1e 100644 --- a/prelude/android/gen_aidl.bzl +++ b/prelude/android/gen_aidl.bzl @@ -6,6 +6,7 @@ # of this source tree. load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") +load("@prelude//os_lookup:defs.bzl", "OsLookup") load(":android_toolchain.bzl", "AndroidToolchainInfo") _AidlSourceInfo = provider(fields = { @@ -14,14 +15,14 @@ _AidlSourceInfo = provider(fields = { def gen_aidl_impl(ctx: AnalysisContext) -> list[Provider]: android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] - aidl_cmd = cmd_args(android_toolchain.aidl) - aidl_cmd.add("-p", android_toolchain.framework_aidl_file) - aidl_cmd.add("-I", ctx.attrs.import_path) - for path in ctx.attrs.import_paths: - aidl_cmd.add("-I", path) - - # We need the `aidl_srcs` files - otherwise the search on the `import_path` won't find anything. - aidl_cmd.hidden(ctx.attrs.aidl_srcs) + aidl_cmd = cmd_args( + [android_toolchain.aidl] + + ["-p", android_toolchain.framework_aidl_file] + + ["-I", ctx.attrs.import_path] + + [a for path in ctx.attrs.import_paths for a in ["-I", path]], + # We need the `aidl_srcs` files - otherwise the search on the `import_path` won't find anything. + hidden = ctx.attrs.aidl_srcs, + ) # Allow gen_aidl rules to depend on other gen_aidl rules, and make the source files from the # deps accessible in this context. This is an alternative to adding dependent files in @@ -34,12 +35,31 @@ def gen_aidl_impl(ctx: AnalysisContext) -> list[Provider]: else: warning("`{}` dependency `{}` is not a `gen_aidl` rule and will be ignored".format(ctx.label, dep.label)) - aidl_cmd.hidden(dep_srcs) + aidl_cmd.add(cmd_args(hidden = dep_srcs)) aidl_out = ctx.actions.declare_output("aidl_output", dir = True) aidl_cmd.add("-o", aidl_out.as_output()) aidl_cmd.add(ctx.attrs.aidl) - ctx.actions.run(aidl_cmd, category = "aidl") + + # Aidl does not create any output for parcelables. Therefore, we always initialize the output + # directory so that we don't get an "Action failed to produce outputs" error. + if ctx.attrs._exec_os_type[OsLookup].platform == "windows": + sh_cmd = cmd_args([ + cmd_args(["cmd.exe", "/c", cmd_args([aidl_out.as_output()], format = "if not exist {} md {}")]), + "&&", + aidl_cmd, + ]) + else: + sh_cmd = cmd_args([ + "sh", + "-c", + "mkdir -p $1 && $2", + "--", + aidl_out.as_output(), + cmd_args(aidl_cmd, delimiter = " "), + ]) + + ctx.actions.run(sh_cmd, category = "aidl") # Put the generated Java files into a zip file to be used as srcs to other rules. java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] diff --git a/prelude/android/preprocess_java_classes.bzl b/prelude/android/preprocess_java_classes.bzl index 0eca15c149dc0..4f4e8cc705987 100644 --- a/prelude/android/preprocess_java_classes.bzl +++ b/prelude/android/preprocess_java_classes.bzl @@ -6,33 +6,43 @@ # of this source tree. load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") +load("@prelude//android:util.bzl", "EnhancementContext") load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") -load("@prelude//java/utils:java_utils.bzl", "get_path_separator_for_exec_os") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//utils:expect.bzl", "expect") -def get_preprocessed_java_classes(ctx: AnalysisContext, input_jars = {"artifact": "target_label"}) -> dict[Artifact, TargetLabel]: +def get_preprocessed_java_classes(enhance_ctx: EnhancementContext, input_jars: dict[Artifact, TargetLabel]) -> (dict[Artifact, TargetLabel], Artifact | None): if not input_jars: - return {} + return {}, None + + ctx = enhance_ctx.ctx input_srcs = {} output_jars_to_owners = {} output_dir = ctx.actions.declare_output("preprocessed_java_classes/output_dir") + input_jars_to_owners = {} for i, (input_jar, target_label) in enumerate(input_jars.items()): expect(input_jar.extension == ".jar", "Expected {} to have extension .jar!".format(input_jar)) jar_name = "{}_{}".format(i, input_jar.basename) input_srcs[jar_name] = input_jar + input_jars_to_owners[jar_name] = target_label output_jar = output_dir.project(jar_name) output_jars_to_owners[output_jar] = target_label input_dir = ctx.actions.symlinked_dir("preprocessed_java_classes/input_dir", input_srcs) + input_jars_map = ctx.actions.write_json("preprocessed_java_classes/input_jars_map.json", input_jars_to_owners) + materialized_artifacts_dir = ctx.actions.declare_output("preprocessed_java_classes/materialized_artifacts") + android_toolchain = ctx.attrs._android_toolchain[AndroidToolchainInfo] env = { "ANDROID_BOOTCLASSPATH": cmd_args( - ctx.attrs._android_toolchain[AndroidToolchainInfo].android_bootclasspath, + android_toolchain.android_bootclasspath + android_toolchain.android_optional_jars, delimiter = get_path_separator_for_exec_os(ctx), ), "IN_JARS_DIR": cmd_args(input_dir), + "IN_JARS_MAP": cmd_args(input_jars_map), + "MATERIALIZED_ARTIFACTS_DIR": materialized_artifacts_dir.as_output(), "OUT_JARS_DIR": output_dir.as_output(), "PREPROCESS": ctx.attrs.preprocess_java_classes_bash, "ZIP_SCRUBBER": ctx.attrs._java_toolchain[JavaToolchainInfo].zip_scrubber, @@ -46,16 +56,24 @@ def get_preprocessed_java_classes(ctx: AnalysisContext, input_jars = {"artifact" "bash", "-c", # Note: ZIP_SCRUBBER might expand to multiple words, so no quoting there. - 'mkdir -p "$OUT_JARS_DIR" && eval "$PREPROCESS" && $ZIP_SCRUBBER --paths-to-scrub "$@"', + 'mkdir -p "$OUT_JARS_DIR" && mkdir -p "$MATERIALIZED_ARTIFACTS_DIR" && eval "$PREPROCESS" && $ZIP_SCRUBBER --paths-to-scrub "$@"', "--", output_jars_file, ] - preprocess_cmd = cmd_args(preprocess_cmd) - preprocess_cmd.hidden([output_jar.as_output() for output_jar in output_jars]) - for dep in ctx.attrs.preprocess_java_classes_deps: - preprocess_cmd.hidden(dep[DefaultInfo].default_outputs + dep[DefaultInfo].other_outputs) + preprocess_cmd = cmd_args( + preprocess_cmd, + hidden = [output_jar.as_output() for output_jar in output_jars] + + [ + dep[DefaultInfo].default_outputs + dep[DefaultInfo].other_outputs + for dep in ctx.attrs.preprocess_java_classes_deps + ], + ) ctx.actions.run(preprocess_cmd, env = env, category = "preprocess_java_classes") - return output_jars_to_owners + enhance_ctx.debug_output("preprocess_java_classes_input_dir", input_dir) + enhance_ctx.debug_output("preprocess_java_classes_input_jars_map", input_jars_map) + enhance_ctx.debug_output("preprocess_java_classes_materialized_artifacts_dir", materialized_artifacts_dir) + + return output_jars_to_owners, materialized_artifacts_dir diff --git a/prelude/android/proguard.bzl b/prelude/android/proguard.bzl index c9ed1e67f0a31..e51278988dfd4 100644 --- a/prelude/android/proguard.bzl +++ b/prelude/android/proguard.bzl @@ -11,14 +11,13 @@ load( "JavaPackagingDep", # @unused Used as type ) load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") -load("@prelude//java/utils:java_utils.bzl", "get_path_separator_for_exec_os") -load("@prelude//utils:utils.bzl", "expect") - -_UNSCRUBBED_JARS_DIR = "unscrubbed" +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//os_lookup:defs.bzl", "OsLookup") +load("@prelude//utils:expect.bzl", "expect") ProguardOutput = record( jars_to_owners = dict[Artifact, TargetLabel], - proguard_configuration_output_file = [Artifact, None], + proguard_configuration_output_file = Artifact | None, proguard_mapping_output_file = Artifact, proguard_artifacts = list[Artifact], proguard_hidden_artifacts = list[Artifact], @@ -26,13 +25,13 @@ ProguardOutput = record( def _get_proguard_command_line_args( ctx: AnalysisContext, - inputs_to_unscrubbed_outputs: dict[Artifact, Artifact], + input_jars_to_output_jars: dict[Artifact, Artifact], proguard_configs: list[Artifact], additional_library_jars: list[Artifact], mapping: Artifact, - configuration: [Artifact, None], - seeds: [Artifact, None], - usage: [Artifact, None], + configuration: Artifact | None, + seeds: Artifact | None, + usage: Artifact | None, android_toolchain: AndroidToolchainInfo) -> (cmd_args, list[Artifact]): cmd = cmd_args() hidden = [] @@ -54,10 +53,10 @@ def _get_proguard_command_line_args( cmd.add(cmd_args("\"", proguard_config, "\"", delimiter = "")) hidden.append(proguard_config) - for jar_input, jar_output in inputs_to_unscrubbed_outputs.items(): + for jar_input, jar_output in input_jars_to_output_jars.items(): cmd.add("-injars", jar_input, "-outjars", jar_output if jar_output == jar_input else jar_output.as_output()) - library_jars = android_toolchain.android_bootclasspath + additional_library_jars + library_jars = android_toolchain.android_bootclasspath + android_toolchain.android_optional_jars + additional_library_jars cmd.add("-libraryjars") cmd.add(cmd_args(library_jars, delimiter = get_path_separator_for_exec_os(ctx))) hidden.extend(library_jars) @@ -79,7 +78,8 @@ def run_proguard( command_line_args_file: Artifact, command_line_args: cmd_args, mapping_file: Artifact, - usage_file: Artifact): + usage_file: Artifact, + output_jars: list[Artifact]): run_proguard_cmd = cmd_args() run_proguard_cmd.add( java_toolchain.java[RunInfo], @@ -89,20 +89,40 @@ def run_proguard( "-jar", android_toolchain.proguard_jar, ) - run_proguard_cmd.add(cmd_args(command_line_args_file, format = "@{}")) - run_proguard_cmd.hidden(command_line_args) + run_proguard_cmd.add( + cmd_args(command_line_args_file, format = "@{}", hidden = command_line_args), + ) + + output_jars_file = ctx.actions.write("proguard/output_jars.txt", output_jars) + + is_windows = hasattr(ctx.attrs, "_exec_os_type") and ctx.attrs._exec_os_type[OsLookup].platform == "windows" # Some proguard configs can propagate the "-dontobfuscate" flag which disables # obfuscation and prevents the mapping.txt and usage.txt file from being generated. - sh_cmd = cmd_args([ - "sh", - "-c", - "touch $1 && touch $2 && $3", - "--", - mapping_file.as_output(), - usage_file.as_output(), - cmd_args(run_proguard_cmd, delimiter = " "), - ]) + # Scrub all jars emitted from proguard to make them deterministic. + if not is_windows: + sh_cmd = cmd_args([ + "sh", + "-c", + "touch $1 && touch $2 && $3 && $4 --paths-to-scrub $5 --create-if-not-present", + "--", + mapping_file.as_output(), + usage_file.as_output(), + cmd_args(run_proguard_cmd, delimiter = " "), + cmd_args(ctx.attrs._java_toolchain[JavaToolchainInfo].zip_scrubber, delimiter = " "), + output_jars_file, + ]) + else: + sh_cmd = cmd_args([ + "cmd.exe", + "/c", + cmd_args([ + cmd_args([mapping_file.as_output()], format = "echo. > {}"), + cmd_args([usage_file.as_output()], format = "echo. > {}"), + cmd_args(run_proguard_cmd, delimiter = " "), + cmd_args(ctx.attrs._java_toolchain[JavaToolchainInfo].zip_scrubber, "--paths-to-scrub", output_jars_file, "--create-if-not-present", delimiter = " "), + ], delimiter = " && "), + ]) ctx.actions.run(sh_cmd, category = "run_proguard") @@ -112,7 +132,7 @@ def get_proguard_output( ctx: AnalysisContext, input_jars: dict[Artifact, TargetLabel], java_packaging_deps: list[JavaPackagingDep], - aapt_generated_proguard_config: [Artifact, None], + aapt_generated_proguard_config: Artifact | None, additional_library_jars: list[Artifact]) -> ProguardOutput: proguard_configs = [packaging_dep.proguard_config for packaging_dep in java_packaging_deps if packaging_dep.proguard_config] if ctx.attrs.proguard_config: @@ -121,14 +141,14 @@ def get_proguard_output( proguard_configs.append(aapt_generated_proguard_config) if ctx.attrs.skip_proguard: - inputs_to_unscrubbed_outputs = {input_jar: input_jar for input_jar in input_jars.keys()} + input_jars_to_output_jars = {input_jar: input_jar for input_jar in input_jars.keys()} mapping = ctx.actions.write("proguard/mapping.txt", []) configuration = None seeds = None usage = None else: - inputs_to_unscrubbed_outputs = {input_jar: ctx.actions.declare_output( - "proguard_output_jars/{}/{}_{}_obfuscated.jar".format(_UNSCRUBBED_JARS_DIR, input_jar.short_path, i), + input_jars_to_output_jars = {input_jar: ctx.actions.declare_output( + "proguard_output_jars/{}_{}_obfuscated.jar".format(input_jar.short_path, i), ) for i, input_jar in enumerate(input_jars.keys())} mapping = ctx.actions.declare_output("proguard/mapping.txt") configuration = ctx.actions.declare_output("proguard/configuration.txt") @@ -137,7 +157,7 @@ def get_proguard_output( command_line_args, hidden_artifacts = _get_proguard_command_line_args( ctx, - inputs_to_unscrubbed_outputs, + input_jars_to_output_jars, proguard_configs, additional_library_jars, mapping, @@ -158,7 +178,6 @@ def get_proguard_output( proguard_hidden_artifacts = hidden_artifacts, ) else: - unscrubbed_output_jars = {unscrubbed_output: input_jars[input_jar] for input_jar, unscrubbed_output in inputs_to_unscrubbed_outputs.items()} run_proguard( ctx, ctx.attrs._android_toolchain[AndroidToolchainInfo], @@ -167,17 +186,9 @@ def get_proguard_output( command_line_args, mapping, usage, + input_jars_to_output_jars.values(), ) - output_jars = {} - for i, (unscrubbed_jar, target_label) in enumerate(unscrubbed_output_jars.items()): - output = ctx.actions.declare_output(unscrubbed_jar.short_path.replace("{}/".format(_UNSCRUBBED_JARS_DIR), "")) - ctx.actions.run( - cmd_args([ctx.attrs._java_toolchain[JavaToolchainInfo].zip_scrubber, unscrubbed_jar, output.as_output()]), - category = "scrub_jar", - identifier = str(i), - ) - output_jars[output] = target_label - + output_jars = {output: input_jars[input_jar] for input_jar, output in input_jars_to_output_jars.items()} return ProguardOutput( jars_to_owners = output_jars, proguard_configuration_output_file = configuration, diff --git a/prelude/android/r_dot_java.bzl b/prelude/android/r_dot_java.bzl index 82f5a834e3cc5..00b500b3ff1b4 100644 --- a/prelude/android/r_dot_java.bzl +++ b/prelude/android/r_dot_java.bzl @@ -5,18 +5,20 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//android:android_providers.bzl", "AndroidResourceInfo") +load("@prelude//android:android_providers.bzl", "AndroidResourceInfo", "RDotJavaInfo") +load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") load("@prelude//java:java_library.bzl", "compile_to_jar") -load("@prelude//java:java_providers.bzl", "JavaClasspathEntry", "JavaLibraryInfo", "derive_compiling_deps") -load("@prelude//utils:set.bzl", "set") +load("@prelude//java:java_providers.bzl", "JavaClasspathEntry", "JavaLibraryInfo", "derive_compiling_deps", "generate_java_classpath_snapshot") +load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") +load("@prelude//utils:argfile.bzl", "argfile") RDotJavaSourceCode = record( r_dot_java_source_code_dir = Artifact, r_dot_java_source_code_zipped = Artifact, - strings_source_code_dir = [Artifact, None], - strings_source_code_zipped = [Artifact, None], - ids_source_code_dir = [Artifact, None], - ids_source_code_zipped = [Artifact, None], + strings_source_code_dir = Artifact | None, + strings_source_code_zipped = Artifact | None, + ids_source_code_dir = Artifact | None, + ids_source_code_zipped = Artifact | None, ) def get_dummy_r_dot_java( @@ -25,16 +27,11 @@ def get_dummy_r_dot_java( android_resources: list[AndroidResourceInfo], union_package: [str, None]) -> JavaLibraryInfo: r_dot_java_source_code = _generate_r_dot_java_source_code(ctx, merge_android_resources_tool, android_resources, "dummy_r_dot_java", union_package = union_package) - library_output = _generate_and_compile_r_dot_java( + return _generate_and_compile_r_dot_java( ctx, r_dot_java_source_code.r_dot_java_source_code_zipped, "dummy_r_dot_java", - ) - return JavaLibraryInfo( - compiling_deps = derive_compiling_deps(ctx.actions, library_output, []), - library_output = library_output, - output_for_classpath_macro = library_output.full_library, - ) + ).library_info def generate_r_dot_javas( ctx: AnalysisContext, @@ -43,11 +40,23 @@ def generate_r_dot_javas( banned_duplicate_resource_types: list[str], uber_r_dot_txt_files: list[Artifact], override_symbols_paths: list[Artifact], - duplicate_resources_allowlist: [Artifact, None], + duplicate_resources_allowlist: Artifact | None, union_package: [str, None], referenced_resources_lists: list[Artifact], generate_strings_and_ids_separately: [bool, None] = True, - remove_classes: list[str] = []) -> list[JavaLibraryInfo]: + remove_classes: list[str] = []) -> list[RDotJavaInfo]: + if not android_resources: + # d8 will fail if its input contains no classes. Rather than add empty input handling in multiple places, + # like buck1 we just generate a stub class if we have no resources. This will be stripped from release + # builds and have minimal impact on debug builds. + return [ + _generate_and_compile_r_dot_java( + ctx, + ctx.attrs._android_toolchain[AndroidToolchainInfo].app_without_resources_stub, + "main_r_dot_java", + ), + ] + r_dot_java_source_code = _generate_r_dot_java_source_code( ctx, merge_android_resources_tool, @@ -63,34 +72,31 @@ def generate_r_dot_javas( referenced_resources_lists = referenced_resources_lists, ) - main_library_output = _generate_and_compile_r_dot_java( - ctx, - r_dot_java_source_code.r_dot_java_source_code_zipped, - "main_r_dot_java", - remove_classes = remove_classes, - ) - if generate_strings_and_ids_separately: - strings_library_output = _generate_and_compile_r_dot_java( - ctx, - r_dot_java_source_code.strings_source_code_zipped, - "strings_r_dot_java", - remove_classes = remove_classes + [".R$"], - ) - ids_library_output = _generate_and_compile_r_dot_java( + library_infos = [ + _generate_and_compile_r_dot_java( ctx, - r_dot_java_source_code.ids_source_code_zipped, - "ids_r_dot_java", - remove_classes = remove_classes + [".R$"], - ) - else: - strings_library_output = None - ids_library_output = None - - return [JavaLibraryInfo( - compiling_deps = derive_compiling_deps(ctx.actions, library_output, []), - library_output = library_output, - output_for_classpath_macro = library_output.full_library, - ) for library_output in filter(None, [main_library_output, strings_library_output, ids_library_output])] + r_dot_java_source_code.r_dot_java_source_code_zipped, + "main_r_dot_java", + remove_classes = remove_classes, + ), + ] + if generate_strings_and_ids_separately: + library_infos += [ + _generate_and_compile_r_dot_java( + ctx, + r_dot_java_source_code.strings_source_code_zipped, + "strings_r_dot_java", + remove_classes = remove_classes + [".R$"], + ), + _generate_and_compile_r_dot_java( + ctx, + r_dot_java_source_code.ids_source_code_zipped, + "ids_r_dot_java", + remove_classes = remove_classes + [".R$"], + ), + ] + + return library_infos def _generate_r_dot_java_source_code( ctx: AnalysisContext, @@ -102,20 +108,23 @@ def _generate_r_dot_java_source_code( banned_duplicate_resource_types: list[str] = [], uber_r_dot_txt_files: list[Artifact] = [], override_symbols_paths: list[Artifact] = [], - duplicate_resources_allowlist: [Artifact, None] = None, + duplicate_resources_allowlist: Artifact | None = None, union_package: [str, None] = None, referenced_resources_lists: list[Artifact] = []) -> RDotJavaSourceCode: merge_resources_cmd = cmd_args(merge_android_resources_tool) r_dot_txt_info = cmd_args() deduped_android_resources = set([(android_resource.text_symbols, android_resource.r_dot_java_package, android_resource.raw_target) for android_resource in android_resources]) - for (text_symbols, r_dot_java_package, raw_target) in deduped_android_resources.list(): + for (text_symbols, r_dot_java_package, raw_target) in deduped_android_resources: r_dot_txt_info.add(cmd_args([text_symbols, r_dot_java_package, raw_target], delimiter = " ")) r_dot_txt_info_file = ctx.actions.write("r_dot_txt_info_file_for_{}.txt".format(identifier), r_dot_txt_info) merge_resources_cmd.add(["--symbol-file-info", r_dot_txt_info_file]) - merge_resources_cmd.hidden([android_resource.r_dot_java_package for android_resource in android_resources]) - merge_resources_cmd.hidden([android_resource.text_symbols for android_resource in android_resources]) + merge_resources_cmd.add(cmd_args( + hidden = + [android_resource.r_dot_java_package for android_resource in android_resources] + + [android_resource.text_symbols for android_resource in android_resources], + )) output_dir = ctx.actions.declare_output("{}_source_code".format(identifier), dir = True) merge_resources_cmd.add(["--output-dir", output_dir.as_output()]) @@ -145,14 +154,12 @@ def _generate_r_dot_java_source_code( merge_resources_cmd.add(["--banned-duplicate-resource-types", banned_duplicate_resource_types_file]) if len(uber_r_dot_txt_files) > 0: - uber_r_dot_txt_files_list = ctx.actions.write("uber_r_dot_txt_files_list", uber_r_dot_txt_files) + uber_r_dot_txt_files_list = argfile(actions = ctx.actions, name = "uber_r_dot_txt_files_list", args = uber_r_dot_txt_files) merge_resources_cmd.add(["--uber-r-dot-txt", uber_r_dot_txt_files_list]) - merge_resources_cmd.hidden(uber_r_dot_txt_files) if len(override_symbols_paths) > 0: - override_symbols_paths_list = ctx.actions.write("override_symbols_paths_list", override_symbols_paths) + override_symbols_paths_list = argfile(actions = ctx.actions, name = "override_symbols_paths_list", args = override_symbols_paths) merge_resources_cmd.add(["--override-symbols", override_symbols_paths_list]) - merge_resources_cmd.hidden(override_symbols_paths) if duplicate_resources_allowlist != None: merge_resources_cmd.add(["--duplicate-resource-allowlist-path", duplicate_resources_allowlist]) @@ -161,9 +168,8 @@ def _generate_r_dot_java_source_code( merge_resources_cmd.add(["--union-package", union_package]) if referenced_resources_lists: - referenced_resources_file = ctx.actions.write("referenced_resources_lists", referenced_resources_lists) + referenced_resources_file = argfile(actions = ctx.actions, name = "referenced_resources_lists", args = referenced_resources_lists) merge_resources_cmd.add(["--referenced-resources-lists", referenced_resources_file]) - merge_resources_cmd.hidden(referenced_resources_lists) ctx.actions.run(merge_resources_cmd, category = "r_dot_java_merge_resources", identifier = identifier) @@ -180,7 +186,7 @@ def _generate_and_compile_r_dot_java( ctx: AnalysisContext, r_dot_java_source_code_zipped: Artifact, identifier: str, - remove_classes: list[str] = []) -> JavaClasspathEntry: + remove_classes: list[str] = []) -> RDotJavaInfo: r_dot_java_out = ctx.actions.declare_output("{}.jar".format(identifier)) compile_to_jar( @@ -193,11 +199,21 @@ def _generate_and_compile_r_dot_java( ) # Extracting an abi is unnecessary as there's not really anything to strip. - outputs = JavaClasspathEntry( + jar_snapshot = generate_java_classpath_snapshot(ctx.actions, ctx.attrs._java_toolchain[JavaToolchainInfo].cp_snapshot_generator, r_dot_java_out, identifier) + library_output = JavaClasspathEntry( full_library = r_dot_java_out, abi = r_dot_java_out, abi_as_dir = None, required_for_source_only_abi = False, + abi_jar_snapshot = jar_snapshot, ) - return outputs + return RDotJavaInfo( + identifier = identifier, + library_info = JavaLibraryInfo( + compiling_deps = derive_compiling_deps(ctx.actions, library_output, []), + library_output = library_output, + output_for_classpath_macro = library_output.full_library, + ), + source_zipped = r_dot_java_source_code_zipped, + ) diff --git a/prelude/android/robolectric_test.bzl b/prelude/android/robolectric_test.bzl index bc0e1d2331ed9..386c927316b19 100644 --- a/prelude/android/robolectric_test.bzl +++ b/prelude/android/robolectric_test.bzl @@ -6,12 +6,13 @@ # of this source tree. load("@prelude//android:android_binary_resources_rules.bzl", "get_android_binary_resources_info") -load("@prelude//android:android_library.bzl", "build_android_library") +load("@prelude//android:android_library.bzl", "build_android_library", "optional_jars") load("@prelude//android:android_providers.bzl", "merge_android_packageable_info") load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") +load("@prelude//java:java_providers.bzl", "JavaLibraryInfo") load("@prelude//java:java_test.bzl", "build_junit_test") load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") def robolectric_test_impl(ctx: AnalysisContext) -> list[Provider]: @@ -35,7 +36,7 @@ def robolectric_test_impl(ctx: AnalysisContext) -> list[Provider]: if runtime_dependencies_dir: extra_cmds.append(cmd_args(runtime_dependencies_dir, format = "-Drobolectric.dependency.dir={}")) - all_packaging_deps = ctx.attrs.deps + (ctx.attrs.deps_query or []) + ctx.attrs.exported_deps + ctx.attrs.runtime_deps + all_packaging_deps = ctx.attrs.deps + ctx.attrs.exported_deps + ctx.attrs.runtime_deps android_packageable_info = merge_android_packageable_info(ctx.label, ctx.actions, all_packaging_deps) resources_info = get_android_binary_resources_info( ctx, @@ -45,7 +46,6 @@ def robolectric_test_impl(ctx: AnalysisContext) -> list[Provider]: use_proto_format = False, referenced_resources_lists = [], generate_strings_and_ids_separately = False, - aapt2_min_sdk = ctx.attrs.manifest_entries.get("min_sdk_version", None), aapt2_preferred_density = ctx.attrs.preferred_density_for_binary_resources, ) @@ -69,14 +69,20 @@ def robolectric_test_impl(ctx: AnalysisContext) -> list[Provider]: ".", ]) ctx.actions.run(jar_cmd, category = "test_config_properties_jar_cmd") - extra_cmds.append(cmd_args().hidden(resources_info.primary_resources_apk, resources_info.manifest)) + extra_cmds.append(cmd_args(hidden = [resources_info.primary_resources_apk, resources_info.manifest])) - r_dot_javas = [r_dot_java.library_output.full_library for r_dot_java in resources_info.r_dot_javas if r_dot_java.library_output] + r_dot_javas = [r_dot_java.library_info.library_output.full_library for r_dot_java in resources_info.r_dot_java_infos if r_dot_java.library_info.library_output] expect(len(r_dot_javas) <= 1, "android_library only works with single R.java") - java_providers, _ = build_android_library(ctx, r_dot_java = r_dot_javas[0] if r_dot_javas else None) + extra_sub_targets = {} + if r_dot_javas: + r_dot_java = r_dot_javas[0] + extra_sub_targets["r_dot_java"] = [DefaultInfo(default_output = r_dot_java)] + else: + r_dot_java = None + java_providers, _ = build_android_library(ctx, r_dot_java = r_dot_java, extra_sub_targets = extra_sub_targets) - extra_classpath_entries = [test_config_properties_jar] + ctx.attrs._android_toolchain[AndroidToolchainInfo].android_bootclasspath + extra_classpath_entries = [test_config_properties_jar] + ctx.attrs._android_toolchain[AndroidToolchainInfo].android_bootclasspath + optional_jars(ctx) extra_classpath_entries.extend(r_dot_javas) external_runner_test_info = build_junit_test( ctx, @@ -87,11 +93,23 @@ def robolectric_test_impl(ctx: AnalysisContext) -> list[Provider]: extra_classpath_entries = extra_classpath_entries, ) - return inject_test_run_info(ctx, external_runner_test_info) + [ - java_providers.java_library_info, + providers = inject_test_run_info(ctx, external_runner_test_info) + [ java_providers.java_library_intellij_info, java_providers.java_packaging_info, java_providers.template_placeholder_info, java_providers.default_info, java_providers.class_to_src_map, + java_providers.java_global_code_info, ] + + if ctx.attrs.used_as_dependency_deprecated_do_not_use: + providers.append(java_providers.java_library_info) + else: + java_library_without_compiling_deps = JavaLibraryInfo( + compiling_deps = None, + library_output = java_providers.java_library_info.library_output, + output_for_classpath_macro = java_providers.java_library_info.output_for_classpath_macro, + ) + providers.append(java_library_without_compiling_deps) + + return providers diff --git a/prelude/android/tools/BUCK b/prelude/android/tools/BUCK deleted file mode 100644 index efa79526b9cf5..0000000000000 --- a/prelude/android/tools/BUCK +++ /dev/null @@ -1,97 +0,0 @@ -native.python_binary( - name = "unpack_aar", - labels = ["buck2-only"], - main = "unpack_aar.py", - visibility = ["PUBLIC"], - deps = [ - ":unpack_aar_lib", - "prelude//java/tools:utils_lib", - ], -) - -native.python_library( - name = "unpack_aar_lib", - srcs = [ - "unpack_aar.py", - ], - labels = ["buck2-only"], -) - -native.python_binary( - name = "filter_dex", - labels = ["buck2-only"], - main = "filter_dex.py", - visibility = ["PUBLIC"], - deps = [ - ":filter_dex_lib", - ], -) - -native.python_library( - name = "filter_dex_lib", - srcs = [ - "filter_dex.py", - ], - labels = ["buck2-only"], -) - -native.python_binary( - name = "combine_native_library_dirs", - labels = ["buck2-only"], - main = "combine_native_library_dirs.py", - visibility = ["PUBLIC"], - deps = [ - ":combine_native_library_dirs_lib", - ], -) - -native.python_library( - name = "combine_native_library_dirs_lib", - srcs = [ - "combine_native_library_dirs.py", - ], - labels = ["buck2-only"], -) - -native.python_binary( - name = "filter_prebuilt_native_library_dir", - labels = ["buck2-only"], - main = "filter_prebuilt_native_library_dir.py", - visibility = ["PUBLIC"], - deps = [ - ":filter_prebuilt_native_library_dir_lib", - ], -) - -native.python_library( - name = "filter_prebuilt_native_library_dir_lib", - srcs = [ - "filter_prebuilt_native_library_dir.py", - ], - labels = ["buck2-only"], -) - -native.python_binary( - name = "native_libs_as_assets_metadata", - labels = ["buck2-only"], - main = "native_libs_as_assets_metadata.py", - visibility = ["PUBLIC"], - deps = [ - ":native_libs_as_assets_metadata_lib", - ], -) - -native.python_library( - name = "native_libs_as_assets_metadata_lib", - srcs = [ - "native_libs_as_assets_metadata.py", - ], - labels = ["buck2-only"], -) - -native.python_binary( - name = "compute_merge_sequence", - main = "merge_sequence.py", - labels = ["buck2-only"], - visibility = ["PUBLIC"], -) diff --git a/prelude/android/tools/BUCK.v2 b/prelude/android/tools/BUCK.v2 new file mode 100644 index 0000000000000..9f6de47604bd5 --- /dev/null +++ b/prelude/android/tools/BUCK.v2 @@ -0,0 +1,110 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native # Avoid warnings and auto-formatters + +prelude.python_bootstrap_binary( + name = "unpack_aar", + main = "unpack_aar.py", + visibility = ["PUBLIC"], + deps = [ + "prelude//java/tools:utils_lib", + ":unpack_aar_lib", + ], +) + +prelude.python_bootstrap_library( + name = "unpack_aar_lib", + srcs = [ + "unpack_aar.py", + ], +) + +prelude.python_bootstrap_binary( + name = "filter_dex", + main = "filter_dex.py", + visibility = ["PUBLIC"], + deps = [ + ":filter_dex_lib", + ], +) + +prelude.python_bootstrap_library( + name = "filter_dex_lib", + srcs = [ + "filter_dex.py", + ], +) + +prelude.python_bootstrap_binary( + name = "combine_native_library_dirs", + main = "combine_native_library_dirs.py", + visibility = ["PUBLIC"], + deps = [ + ":combine_native_library_dirs_lib", + ], +) + +prelude.python_bootstrap_library( + name = "combine_native_library_dirs_lib", + srcs = [ + "combine_native_library_dirs.py", + ], +) + +prelude.python_bootstrap_binary( + name = "filter_prebuilt_native_library_dir", + main = "filter_prebuilt_native_library_dir.py", + visibility = ["PUBLIC"], + deps = [ + ":filter_prebuilt_native_library_dir_lib", + ], +) + +prelude.python_bootstrap_library( + name = "filter_prebuilt_native_library_dir_lib", + srcs = [ + "filter_prebuilt_native_library_dir.py", + ], +) + +prelude.python_bootstrap_binary( + name = "native_libs_as_assets_metadata", + main = "native_libs_as_assets_metadata.py", + visibility = ["PUBLIC"], + deps = [ + ":native_libs_as_assets_metadata_lib", + ], +) + +prelude.python_bootstrap_library( + name = "native_libs_as_assets_metadata_lib", + srcs = [ + "native_libs_as_assets_metadata.py", + ], +) + +prelude.python_bootstrap_binary( + name = "compute_merge_sequence", + main = "merge_sequence.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "filter_extra_resources", + main = "filter_extra_resources.py", + visibility = ["PUBLIC"], + deps = [ + "prelude//java/tools:utils_lib", + ], +) + +prelude.zip_file( + name = "app_without_resources_stub", + srcs = ["com/facebook/buck_generated/AppWithoutResourcesStub.java"], + out = "app_without_resources_stub.src.zip", + visibility = ["PUBLIC"], +) diff --git a/prelude/android/tools/com/facebook/buck_generated/AppWithoutResourcesStub.java b/prelude/android/tools/com/facebook/buck_generated/AppWithoutResourcesStub.java new file mode 100644 index 0000000000000..9d3de2fc3a736 --- /dev/null +++ b/prelude/android/tools/com/facebook/buck_generated/AppWithoutResourcesStub.java @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +package com.facebook.buck_generated; + +final class AppWithoutResourcesStub {} diff --git a/prelude/android/tools/combine_native_library_dirs.py b/prelude/android/tools/combine_native_library_dirs.py index 70d51bbd3acf5..bbb52597e9b7a 100644 --- a/prelude/android/tools/combine_native_library_dirs.py +++ b/prelude/android/tools/combine_native_library_dirs.py @@ -43,8 +43,20 @@ def main() -> None: for lib in all_libs: relative_path = lib.relative_to(library_dir) output_path = args.output_dir / relative_path - output_path.parent.mkdir(exist_ok=True) - output_path.symlink_to(os.readlink(lib)) + assert ( + not output_path.exists() + ), "Duplicate library name: {}! Source1: {}, source2: {}".format( + output_path.name, + os.path.realpath(output_path), + lib, + ) + + output_path.parent.mkdir(exist_ok=True, parents=True) + relative_path_to_lib = os.path.relpath( + os.path.realpath(lib), + start=os.path.realpath(os.path.dirname(output_path)), + ) + output_path.symlink_to(relative_path_to_lib) if args.metadata_file: with open(lib, "rb") as f: diff --git a/prelude/android/tools/filter_dex.py b/prelude/android/tools/filter_dex.py index e26d507e0adab..808f586e218da 100644 --- a/prelude/android/tools/filter_dex.py +++ b/prelude/android/tools/filter_dex.py @@ -72,7 +72,8 @@ def class_name_matches_filter(self, class_name): def _parse_args(): parser = argparse.ArgumentParser( - description="Tool to filter a dex for primary class names." + description="Tool to filter a dex for primary class names.", + fromfile_prefix_chars="@", ) parser.add_argument( diff --git a/prelude/android/tools/filter_extra_resources.py b/prelude/android/tools/filter_extra_resources.py new file mode 100644 index 0000000000000..5e795ca29cf94 --- /dev/null +++ b/prelude/android/tools/filter_extra_resources.py @@ -0,0 +1,62 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import pathlib +import shutil + +import utils + +ZIP_NOTHING_TO_DO_EXIT_CODE = 12 + + +def _parse_args(): + parser = argparse.ArgumentParser( + description="Tool to remove extra resources from apk." + ) + parser.add_argument( + "--input-apk", + type=pathlib.Path, + required=True, + help="a path to the original apk", + ) + parser.add_argument( + "--output-apk", + type=pathlib.Path, + required=True, + help="a path to the output apk with removed resources", + ) + parser.add_argument( + "--extra-filtered-resources", + type=str, + action="append", + required=True, + help="list of patterns of files to filter out from the input apk", + ) + return parser.parse_args() + + +def main(): + args = _parse_args() + shutil.copyfile(args.input_apk, args.output_apk) + utils.execute_command(["chmod", "644", args.output_apk]) + + # The normal resource filtering apparatus is super slow because it extracts the whole apk, + # strips files out of it, then repackages it. + # + # This is a faster filtering step that just uses zip -d to remove entries from the archive. + # It's also superbly dangerous. + # + # If zip -d returns that there was nothing to do, then we don't fail. + utils.execute_command_ignore_exit_codes( + ["zip", "-d", str(args.output_apk)] + args.extra_filtered_resources, + [ZIP_NOTHING_TO_DO_EXIT_CODE], + ) + + +if __name__ == "__main__": + main() diff --git a/prelude/android/tools/merge_sequence.py b/prelude/android/tools/merge_sequence.py index 234cda40ab827..5552b58b029a0 100644 --- a/prelude/android/tools/merge_sequence.py +++ b/prelude/android/tools/merge_sequence.py @@ -11,13 +11,19 @@ """ Applies the merge sequence to the linkable_graph and module graph to produce merged libraries. -The merge sequence is a list of "merge entries". Each entry is a "merge spec", which is a name and a list of roots. +The merge sequence is a list of "merge entries". Each entry is comprised of "merge specs", each of which have a name and +a list of roots. A merge entry may be a merge spec or a list of merge specs. + For example: ``` [ - {"group1.so": [//group1:root1, //group1:root2]}, - {"group2.so": [//group2:root]}, + ("libgroup1.so", [//group1:root1, //group1:root2]), + ("libgroup2.so", [//group2:root]), + ( + ("libgroup3lib1.so": [//group3:root1]), + ("libgroup3lib2.so": [//group3:root2]), + ), ... ] ``` @@ -26,15 +32,18 @@ each entry defines a new merge group. That group consists of all the roots in the entry and all of the transitive dependencies of those roots, except for nodes that have already been assigned a merge group. -We then need to split that merge group into "split groups" for three reasons: defining valid libraries, avoiding adding -implicit module dependencies to targets, and excluding targets that shouldn't be merged. That imposes these constraints: -1. Targets in a split group will all be in the same module. A library cannot span multiple modules. -2. For split groups in the root module, all targets will have the same set of transitive module dependencies (including +We then need to split that merge group into "split groups" for four reasons: avoiding unwanted dependencies between +libraries, defining valid libraries, avoiding adding implicit module dependencies to targets, and excluding targets that +shouldn't be merged. That imposes these constraints: +1. Libraries defined by specs in the same entry should not depend on one another, so any shared dependencies must be + split into separate libraries. +2. Targets in a split group will all be in the same module. A library cannot span multiple modules. +3. For split groups in the root module, all targets will have the same set of transitive module dependencies (including through targets in other merge groups). A non-root module cannot be loaded without loading all of its module dependencies, but if a JNI entry-point target is in the root module and has module dependencies, those dependencies would have to be loaded explicitly in advance, or the library load for the target will fail. Making any such target depend on *more* modules in merged-library builds would therefore risk merged-build-only runtime crashes. -3. Some targets are excluded from merging (explicitly via a blocklist, or implicitly because they cannot be packaged as +4. Some targets are excluded from merging (explicitly via a blocklist, or implicitly because they cannot be packaged as assets). Non-asset targets are rare, often cause issues when merged, and complicate split-group "layering" as described below, so it significantly simplifies merge sequence configuration and mechanics to exclude them. @@ -83,21 +92,14 @@ suffixes. We aim to minimize the suffixing of the largest, most central layers, so we apply the following rules: -1. Library names start with the merge spec name. +1. Library names start with the names of the merge specs from the current entry whose roots include or depend on the + library's targets. This is just the spec name if there is only one, or `lib_shared_` plus the underscore-separated + list of spec names if there are multiple. 2. If a merge spec includes targets from multiple modules, each library in a non-root module will be suffixed with that module name. 3. We perform a topological traversal of the final library graph, maintaining counters of times we've encountered each (possibly module-suffixed) library name. Each final library after the first encountered for its library name will be further suffixed with that library name's counter value. - -# TODO(cjhopman): This file has PARTIAL, BROKEN logic handling merge entries that are a list of merge specs. This is -# NOT currently supported or tested, and its interface should be considered non-final. Do not interact with this! - -# TODO(cjhopman): In multi-spec merge entries, split groups must be split by sets of specs in the current merge entry -# whose roots are transitive dependents. - -# TODO(cjhopman): We should consider requiring a "supergroup name" in multi-spec merge entries. Right now, we produce -# unreasonable library names by applying the first spec's name to libraries from all specs in the entry. """ from __future__ import annotations @@ -194,20 +196,38 @@ class SplitGroupKey(typing.NamedTuple): excluded: typing.Optional[Label] module: str current_merge_group: int - # If this is a subgroup, holds the primary spec name and a unique identifier. - merge_subgroup: Optional[set[str]] + # Holds the set of names of specs in the entry that depend on the targets. + # This will contain only one string for most targets/entries. + merge_subgroup: set[str] # For the root module, holds the structified set of transitively reachable modules transitive_module_key: typing.Optional[frozenset[str]] + def get_base_name(self) -> str: + if self.excluded: + return self.excluded + + assert len(self.merge_subgroup) > 0 + + dependent_spec_names = sorted(self.merge_subgroup) + + if len(dependent_spec_names) == 1: + return dependent_spec_names[0] + else: + base_name = "lib_shared" + for dependent_spec_name in dependent_spec_names: + base_name += f"_{dependent_spec_name[3:-3]}" + base_name += ".so" + return base_name + class NodeData(typing.NamedTuple): base_library_name: str + could_be_root_for: list[str] module: str merge_group: int is_excluded: bool final_lib_key: FinalLibKey - transitive_module_deps: frozenset[str] - split_group_exit_counts: dict[int, int] + dependent_included_split_group_entry_counts: dict[int, int] def debug(self) -> object: return self._asdict() @@ -226,6 +246,7 @@ class FinalLibData(typing.NamedTuple): is_excluded: bool key: FinalLibKey deps: set[FinalLibKey] + entry_point_targets: set[str] class FinalLibGraph: @@ -236,7 +257,7 @@ class FinalLibGraph: def __init__(self) -> None: self.graph = {} - def add_node(self, node_data: NodeData, deps_data: list[NodeData]) -> None: + def _ensure_lib_data(self, node_data: NodeData) -> FinalLibData: lib_key = node_data.final_lib_key lib_data = self.graph.get(lib_key, None) if not lib_data: @@ -249,21 +270,41 @@ def add_node(self, node_data: NodeData, deps_data: list[NodeData]) -> None: is_excluded=node_data.is_excluded, key=lib_key, deps=set(), + entry_point_targets=set(), ), ) else: assert lib_data.module == node_data.module, (lib_data, node_data) assert lib_data.merge_group == node_data.merge_group, (lib_data, node_data) - for dep_data in deps_data: - if dep_data.final_lib_key != lib_key: + return lib_data + + def add_node( + self, + node_data: NodeData, + deps: list[str], + deps_data: list[NodeData], + ) -> None: + lib_data = self._ensure_lib_data(node_data) + + for dep, dep_data in zip(deps, deps_data): + if dep_data.final_lib_key != node_data.final_lib_key: lib_data.deps.add(dep_data.final_lib_key) + dep_lib_data = self._ensure_lib_data(dep_data) + dep_lib_data.entry_point_targets.add(dep) - def dump_graph(self, names: dict[FinalLibKey, str]) -> dict[str, list[str]]: + def dump_lib_edges(self, names: dict[FinalLibKey, str]) -> dict[str, list[str]]: return { names[k]: [names[d] for d in node.deps] for k, node in self.graph.items() } + def dump_entry_point_targets( + self, names: dict[FinalLibKey, str] + ) -> dict[str, list[str]]: + return { + names[k]: list(node.entry_point_targets) for k, node in self.graph.items() + } + def assign_names( self, merge_group_module_constituents: list[set[str]] ) -> dict[FinalLibKey, str]: @@ -272,7 +313,9 @@ def assign_names( final_lib_graph[key] = list(dep_data.deps) # this topo_sort also verifies that we produced an acyclic final lib graph - sorted_final_lib_keys = topo_sort(final_lib_graph) + sorted_final_lib_keys = topo_sort( + final_lib_graph, lambda x: self.graph[x].module if self.graph[x] else str(x) + ) name_counters = {} final_lib_names: dict[FinalLibKey, str] = {} @@ -290,7 +333,7 @@ def assign_names( count = name_counters.setdefault(lib_name, 0) + 1 name_counters[lib_name] = count if count > 1: - lib_name += "_{}".format(count) + lib_name += "_{}".format(count - 1) final_lib_names[key] = lib_name + ext return final_lib_names @@ -305,61 +348,61 @@ def assign_names( class MergeSequenceGroupSpec: - has_multiple_specs: bool - group_roots_patterns: list[re.Pattern[str]] - merge_group_name: str + # These lists run parallel to one another; the group_roots_patterns + # entry for a given index contains all the root patterns for the + # group_specs entry at the same index. + group_specs: list[MergeGroupSpecDef] + group_roots_patterns: list[list[re.Pattern[str]]] def __init__( self, group_specs: typing.Union[MergeGroupSpecDef, list[MergeGroupSpecDef]] ) -> None: - self.group_specs = group_specs - - def _parse() -> typing.Tuple[bool, str, list[str]]: - if isinstance(group_specs[0], str): - typed = typing.cast(MergeGroupSpecDef, group_specs) - return (False, typed[0], typed[1]) - else: - group_roots_patterns = [x for spec in group_specs for x in spec[1]] - # TODO(cjhopman): Fix this, we don't yet actually fully support multiple specs in a group. - return (True, group_specs[0][0], group_roots_patterns) - - has_multiple_specs, merge_group_name, group_roots_patterns = _parse() + self.group_specs = ( + [group_specs] if isinstance(group_specs[0], str) else group_specs + ) - self.has_multiple_specs = has_multiple_specs - self.group_roots_patterns = [re.compile(p) for p in group_roots_patterns] - self.merge_group_name = merge_group_name + for group_spec in self.group_specs: + library_name = group_spec[0] + assert library_name.startswith( + "lib" + ), f"native merge library name {library_name} does not begin with 'lib'" + assert library_name.endswith( + ".so" + ), f"native merge library name {library_name} does not end with '.so'" + + self.group_roots_patterns = [ + [re.compile(x) for x in spec[1]] for spec in self.group_specs + ] - def is_root(self, raw_target: str) -> bool: - for p in self.group_roots_patterns: - if p.search(raw_target): - return True - return False + def get_rooted_specs(self, raw_target: str) -> set[str]: + result = set() + for patterns, group_spec in zip(self.group_roots_patterns, self.group_specs): + for p in patterns: + if p.search(raw_target): + result.add(group_spec[0]) + break - def group_name(self) -> str: - return self.merge_group_name + return result def compute_merge_subgroup_mapping( self, + group_roots: dict[Label, set[str]], post_ordered_targets: list[Label], graph_map: dict[Label, LinkableGraphNode], ) -> MergeSubgroupMapping: - if self.has_multiple_specs: - reachable_merge_spec_roots = defaultdict(set) + merge_specs_with_reachable_roots = defaultdict(set) - for s in self.group_specs: - for root in s[1]: - reachable_merge_spec_roots[root].add(root) + for target, rooted_specs in group_roots.items(): + merge_specs_with_reachable_roots[target].update(rooted_specs) - for target in post_ordered_targets[::-1]: - node = graph_map.get(target) - assert node is not None - roots_to_add = reachable_merge_spec_roots[target] - for child in node.deps: - reachable_merge_spec_roots[child].update(roots_to_add) + for target in post_ordered_targets[::-1]: + node = graph_map.get(target) + assert node is not None + roots_to_add = merge_specs_with_reachable_roots[target] + for child in node.deps: + merge_specs_with_reachable_roots[child].update(roots_to_add) - return lambda x: reachable_merge_spec_roots[x] - else: - return lambda x: None + return lambda x: frozenset(merge_specs_with_reachable_roots[x]) def get_native_linkables_by_merge_sequence( # noqa: C901 @@ -367,13 +410,17 @@ def get_native_linkables_by_merge_sequence( # noqa: C901 native_library_merge_sequence: list[MergeSequenceGroupSpec], native_library_merge_sequence_blocklist: list[typing.Pattern], apk_module_graph: ApkModuleGraph, + native_library_merge_non_asset_libs: bool, ) -> typing.Tuple[dict[Label, NodeData], dict[FinalLibKey, str], FinalLibGraph]: final_lib_graph = FinalLibGraph() node_data: dict[Label, NodeData] = {} + transitive_module_deps_map: dict[Label, frozenset[str]] = {} + + dependents_in_current_merge_group_map: dict[Label, list[Label]] = {} def check_is_excluded(target: Label) -> bool: node = graph_node_map[target] - if not node.can_be_asset: + if not native_library_merge_non_asset_libs and not node.can_be_asset: return True raw_target = node.raw_target @@ -388,7 +435,15 @@ def check_is_excluded(target: Label) -> bool: def get_children_without_merge_group(label: Label) -> list[Label]: node = graph_node_map[label] - return [child for child in node.deps if child not in node_data] + group_deps = [child for child in node.deps if child not in node_data] + + # In addition to applying an ordering to targets, this traversal identifies within-merge-group dependents. + for group_dep in group_deps: + dependents_in_current_merge_group_map.setdefault(group_dep, []).append( + label + ) + + return group_deps current_merge_group = 0 @@ -397,25 +452,25 @@ def get_children_without_merge_group(label: Label) -> list[Label]: merge_group_module_constituents: list[set[str]] = [] for current_merge_group in range(len(native_library_merge_sequence)): + dependents_in_current_merge_group_map = {} + merge_group_module_constituents.append(set()) group_specs: MergeSequenceGroupSpec = native_library_merge_sequence[ current_merge_group ] - group_roots = [] + group_roots = {} for label, node in graph_node_map.items(): - if group_specs.is_root(node.raw_target) and label not in node_data: - group_roots.append(label) + if label not in node_data: + rooted_specs = group_specs.get_rooted_specs(node.raw_target) + if rooted_specs: + group_roots[label] = rooted_specs - merge_group_name = group_specs.merge_group_name post_ordered_targets = post_order_traversal_by( - group_roots, get_children_without_merge_group + group_roots.keys(), get_children_without_merge_group ) - # TODO(cjhopman): The restrictions on single-spec merge entries can be computed in two traversals, but - # multi-spec entries will require an additional top-down traversal to determine the set of transitive dependent - # spec roots, which will be an additional factor separating split groups in such merge groups. merge_subgroup_mapping = group_specs.compute_merge_subgroup_mapping( - post_ordered_targets, graph_node_map + group_roots, post_ordered_targets, graph_node_map ) def get_split_group( @@ -424,7 +479,7 @@ def get_split_group( module: str, current_merge_group: int = current_merge_group, merge_subgroup_mapping: MergeSubgroupMapping = merge_subgroup_mapping, - ) -> typing.Tuple[bool, int]: + ) -> typing.Tuple[bool, int, str]: excluded = None if check_is_excluded(label): excluded = label @@ -443,10 +498,13 @@ def get_split_group( merge_subgroup=merge_subgroup, transitive_module_key=transitive_module_key, ) - return excluded is not None, split_groups.setdefault( - split_group_key, len(split_groups) + return ( + excluded is not None, + split_groups.setdefault(split_group_key, len(split_groups)), + split_group_key.get_base_name(), ) + # Bottom-up traversal (compute transitive module dependencies) for target in post_ordered_targets: assert target not in node_data, "{}: {}".format( target, post_ordered_targets @@ -454,60 +512,87 @@ def get_split_group( node = graph_node_map[target] module = apk_module_graph.module_for_target(node.raw_target) + merge_group_module_constituents[current_merge_group].add(module) transitive_module_deps = {module} + for dep in node.deps: + transitive_module_deps.update(transitive_module_deps_map[dep]) + transitive_module_deps_map[target] = frozenset(transitive_module_deps) - deps_data = [node_data[dep] for dep in node.deps] - - for dep_data in deps_data: - transitive_module_deps.update(dep_data.transitive_module_deps) + # Top-down traversal (determine split groups, compute dependent split group reentry counts, finalize layers) + for target in reversed(post_ordered_targets): + assert target not in node_data, "{}: {}".format( + target, post_ordered_targets + ) - transitive_module_deps = frozenset(transitive_module_deps) + node = graph_node_map[target] + module = apk_module_graph.module_for_target(node.raw_target) - is_excluded, split_group = get_split_group( - target, transitive_module_deps, module + is_excluded, split_group, base_library_name = get_split_group( + target, transitive_module_deps_map[target], module ) - split_group_exit_counts: dict[int, int] = {} - - for dep_data in deps_data: - if current_merge_group == dep_data.merge_group: - dep_split_group = dep_data.final_lib_key.split_group - is_cross_group_edge = split_group != dep_split_group - - # if this is the first exit edge from the group, it won't apper in dep_data's map so we add it - # explicitly if we don't yet have a non-zero count (except if it's exited an excluded node - # where there's no need to track exit counts) - if ( - not dep_data.is_excluded - and is_cross_group_edge - and dep_split_group not in split_group_exit_counts - ): - split_group_exit_counts[dep_split_group] = 1 - for (group, count) in dep_data.split_group_exit_counts.items(): - if group == dep_split_group and is_cross_group_edge: - count += 1 - curr_count = split_group_exit_counts.get(group, 0) - if count > curr_count: - split_group_exit_counts[group] = count + dependent_included_split_group_entry_counts: dict[int, int] = {} + + for dependent_in_group in dependents_in_current_merge_group_map.get( + target, [] + ): + dependent_in_group_data = node_data[dependent_in_group] + dependent_split_group = ( + dependent_in_group_data.final_lib_key.split_group + ) + is_included_split_group_entry = ( + split_group != dependent_split_group + and not dependent_in_group_data.is_excluded + ) + + # dependent_included_split_group_entry_counts do not count entry into a target's own split group layer, + # so we have to ensure that its dependencies record an entry count of at least 1 for that split group. + if ( + is_included_split_group_entry + and dependent_split_group + not in dependent_included_split_group_entry_counts + ): + dependent_included_split_group_entry_counts[ + dependent_split_group + ] = 1 + + for ( + group, + entry_count, + ) in ( + dependent_in_group_data.dependent_included_split_group_entry_counts.items() + ): + if group == dependent_split_group and is_included_split_group_entry: + entry_count += 1 + + curr_entry_count = dependent_included_split_group_entry_counts.get( + group, 0 + ) + if entry_count > curr_entry_count: + dependent_included_split_group_entry_counts[group] = entry_count this_node_data = NodeData( - base_library_name=node.raw_target if is_excluded else merge_group_name, + base_library_name=base_library_name, + could_be_root_for=list(group_roots.get(target, set())), module=module, merge_group=current_merge_group, final_lib_key=FinalLibKey( split_group=split_group, - cycle_breaker=frozenset(split_group_exit_counts.items()) - if is_root_module(module) - else split_group_exit_counts.get(split_group, 0), + cycle_breaker=dependent_included_split_group_entry_counts.get( + split_group, 0 + ), ), is_excluded=is_excluded, - transitive_module_deps=transitive_module_deps, - split_group_exit_counts=split_group_exit_counts, + dependent_included_split_group_entry_counts=dependent_included_split_group_entry_counts, ) node_data[target] = this_node_data - final_lib_graph.add_node(this_node_data, deps_data) + + for target in post_ordered_targets: + node = graph_node_map[target] + deps_data = [node_data[dep] for dep in node.deps] + final_lib_graph.add_node(node_data[target], node.deps, deps_data) final_lib_names = final_lib_graph.assign_names(merge_group_module_constituents) return node_data, final_lib_names, final_lib_graph @@ -517,7 +602,9 @@ def get_split_group( def post_order_traversal_by( - roots: list[T], get_nodes_to_traverse_func: typing.Callable[[T], list[T]] + roots: list[T], + get_nodes_to_traverse_func: typing.Callable[[T], list[T]], + get_node_str: typing.Callable[[T], str] = None, ) -> list[T]: """ Returns the post-order sorted list of the nodes in the traversal. @@ -546,9 +633,17 @@ def post_order_traversal_by( work.append((OUTPUT, node)) for dep in get_nodes_to_traverse_func(node): if dep in current_parents: + current_parents_strs = [] + for k in current_parents: + current_parents_strs.append( + get_node_str(k) if get_node_str else str(k) + ) raise AssertionError( "detected cycle: {}".format( - " -> ".join(current_parents + [dep]) + " -> ".join( + current_parents_strs + + [get_node_str(dep) if get_node_str else str(dep)] + ) ) ) @@ -567,7 +662,9 @@ def is_root_module(module: str) -> bool: return module == ROOT_MODULE -def topo_sort(graph: dict[T, list[T]]) -> list[T]: +def topo_sort( + graph: dict[T, list[T]], get_node_str: typing.Callable[[T], str] = None +) -> list[T]: """ Topo-sort the given graph. """ @@ -583,7 +680,7 @@ def topo_sort(graph: dict[T, list[T]]) -> list[T]: if in_degree == 0: roots.append(node) - postordered = post_order_traversal_by(roots, lambda x: graph[x]) + postordered = post_order_traversal_by(roots, lambda x: graph[x], get_node_str) postordered.reverse() return postordered @@ -618,12 +715,14 @@ def main() -> int: # noqa: C901 parser.add_argument("--mergemap-input", required=True) parser.add_argument("--apk-module-graph") parser.add_argument("--output") + parser.add_argument("--merge-non-asset-libs", action="store_true") args = parser.parse_args() apk_module_graph = read_apk_module_graph(args.apk_module_graph) final_result = {} debug_results = {} + split_groups = {} mergemap_input = read_mergemap_input(args.mergemap_input) for platform, nodes in mergemap_input.nodes_by_platform.items(): ( @@ -635,6 +734,7 @@ def main() -> int: # noqa: C901 mergemap_input.merge_sequence, mergemap_input.blocklist, apk_module_graph, + args.merge_non_asset_libs, ) final_mapping = {} @@ -645,12 +745,20 @@ def main() -> int: # noqa: C901 final_mapping[target] = None else: final_mapping[target] = final_lib_names[node.final_lib_key] + split_groups[final_lib_names[node.final_lib_key]] = ( + node.base_library_name + ) else: final_mapping[target] = str(target) debug_results[platform] = ( + # Target name -> various information {k: v.debug() for k, v in node_data.items()}, + # Serialized FinalLibKey -> final library name {str(k): v for k, v in final_lib_names.items()}, - final_lib_graph.dump_graph(final_lib_names), + # Final library name -> final names of direct library dependencies + final_lib_graph.dump_lib_edges(final_lib_names), + # Final library name -> entry point targets + final_lib_graph.dump_entry_point_targets(final_lib_names), ) final_result[platform] = final_mapping @@ -658,6 +766,8 @@ def main() -> int: # noqa: C901 pathlib.Path(args.output).mkdir(parents=True, exist_ok=True) with open(os.path.join(args.output, "merge.map"), "w") as outfile: json.dump(final_result, outfile, indent=2) + with open(os.path.join(args.output, "split_groups.map"), "w") as outfile: + json.dump(split_groups, outfile, indent=2) # When writing an output dir we also produce some debugging information. for platform, result in final_result.items(): diff --git a/prelude/android/tools/native_libs_as_assets_metadata.py b/prelude/android/tools/native_libs_as_assets_metadata.py index 6b31c0b7e6298..87f8d5a1c3c9f 100644 --- a/prelude/android/tools/native_libs_as_assets_metadata.py +++ b/prelude/android/tools/native_libs_as_assets_metadata.py @@ -45,11 +45,6 @@ def main() -> None: type=Path, help="Metadata is written to this file", ) - parser.add_argument( - "--native-library-paths-output", - type=Path, - help="The actual paths of all the native libraries", - ) args = parser.parse_args() native_libraries = [] @@ -83,11 +78,6 @@ def main() -> None: ) ) - with open(args.native_library_paths_output, "w") as f: - f.write( - "\n".join([str(native_lib.full_path) for native_lib in native_libraries]) - ) - if __name__ == "__main__": main() diff --git a/prelude/android/tools/unpack_aar.py b/prelude/android/tools/unpack_aar.py index 83c9212d653c8..b86bfb1382a09 100644 --- a/prelude/android/tools/unpack_aar.py +++ b/prelude/android/tools/unpack_aar.py @@ -8,12 +8,12 @@ import argparse import pathlib -import shlex +import platform import shutil import zipfile from tempfile import TemporaryDirectory -from java.tools import utils +import utils CLASSES_JAR_FILE_NAME = "classes.jar" @@ -77,6 +77,12 @@ def _parse_args(): required=True, help="a path to the proguard config that is unpacked", ) + parser.add_argument( + "--lint-jar-path", + type=pathlib.Path, + required=True, + help="a path to the lint jar file that is unpacked", + ) parser.add_argument( "--jar-builder-tool", type=str, @@ -99,6 +105,7 @@ def main(): r_dot_txt_path = args.r_dot_txt_path annotation_jars_dir = args.annotation_jars_dir proguard_config_path = args.proguard_config_path + lint_jar_path = args.lint_jar_path jar_builder_tool = args.jar_builder_tool with TemporaryDirectory() as temp_dir: @@ -106,10 +113,11 @@ def main(): with zipfile.ZipFile(aar_path, "r") as aar_zip: aar_zip.extractall(unpack_dir) - # If the zip file was built on e.g. Windows, then it might not have - # correct permissions (which means we can't read any of the files), so - # make sure we actually read everything here. - utils.execute_command(["chmod", "-R", "+rX", unpack_dir]) + if platform.system() != "Windows": + # If the zip file was built on e.g. Windows, then it might not have + # correct permissions (which means we can't read any of the files), so + # make sure we actually read everything here. + utils.execute_command(["chmod", "-R", "+rX", unpack_dir]) unpacked_manifest = unpack_dir / "AndroidManifest.xml" assert unpacked_manifest.exists() @@ -150,6 +158,12 @@ def main(): else: proguard_config_path.touch() + unpacked_lint_jar = unpack_dir / "lint.jar" + if unpacked_lint_jar.exists(): + shutil.copyfile(unpacked_lint_jar, lint_jar_path) + else: + lint_jar_path.touch() + # Java .class files can exist at `classes.jar` or any jar file in /libs, # so combine them into a single `.jar` file. all_jars = [] @@ -166,7 +180,7 @@ def main(): with open(jars_list, "w") as f: f.write("\n".join([str(jar) for jar in all_jars])) - combine_all_jars_cmd = shlex.split(jar_builder_tool) + [ + combine_all_jars_cmd = utils.shlex_split(jar_builder_tool) + [ "--entries-to-jar", jars_list, "--output", diff --git a/prelude/android/user/android_emulators.bzl b/prelude/android/user/android_emulators.bzl new file mode 100644 index 0000000000000..29da9e6e7bd91 --- /dev/null +++ b/prelude/android/user/android_emulators.bzl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") + +def _impl(ctx: AnalysisContext) -> list[Provider]: + return [ + DefaultInfo(), + LocalResourceInfo( + setup = cmd_args([ctx.attrs.broker[RunInfo]] + ctx.attrs.args), + resource_env_vars = { + "ANDROID_SERIAL": "serial_number", + }, + ), + ] + +registration_spec = RuleRegistrationSpec( + name = "android_emulators", + impl = _impl, + attrs = { + "args": attrs.list(attrs.arg(), default = []), + "broker": attrs.exec_dep(providers = [RunInfo]), + }, +) diff --git a/prelude/android/util.bzl b/prelude/android/util.bzl index 484b2cddae277..6c64544520d8a 100644 --- a/prelude/android/util.bzl +++ b/prelude/android/util.bzl @@ -21,9 +21,9 @@ EnhancementContext = record( def create_enhancement_context(ctx: AnalysisContext) -> EnhancementContext: extra_sub_targets = {} - def debug_output(name: str, output: Artifact, other_outputs = []): + def debug_output(name: str, output: Artifact, other_outputs = [], sub_targets: dict[str, typing.Any] = {}): """Adds a subtarget to expose debugging outputs.""" - extra_sub_targets[name] = [DefaultInfo(default_outputs = [output], other_outputs = other_outputs)] + extra_sub_targets[name] = [DefaultInfo(default_outputs = [output], other_outputs = other_outputs, sub_targets = sub_targets)] def get_sub_targets(): return extra_sub_targets diff --git a/prelude/android/voltron.bzl b/prelude/android/voltron.bzl index 048cd27c239cd..20fcfc85ba9b7 100644 --- a/prelude/android/voltron.bzl +++ b/prelude/android/voltron.bzl @@ -15,7 +15,9 @@ load( "merge_shared_libraries", "traverse_shared_library_info", ) -load("@prelude//utils:utils.bzl", "expect", "flatten") +load("@prelude//utils:argfile.bzl", "argfile") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "flatten") # "Voltron" gives us the ability to split our Android APKs into different "modules". These # modules can then be downloaded on demand rather than shipped with the "main" APK. @@ -66,7 +68,7 @@ def android_app_modularity_impl(ctx: AnalysisContext) -> list[Provider]: ctx.actions, ctx.label, [android_packageable_info], - traversed_shared_library_info.values(), + traversed_shared_library_info, ctx.attrs._android_toolchain[AndroidToolchainInfo], ctx.attrs.application_module_configs, ctx.attrs.application_module_dependencies, @@ -77,19 +79,19 @@ def android_app_modularity_impl(ctx: AnalysisContext) -> list[Provider]: no_dx_target_labels = [no_dx_target.label.raw_target() for no_dx_target in ctx.attrs.no_dx] java_packaging_deps = [packaging_dep for packaging_dep in get_all_java_packaging_deps(ctx, all_deps) if packaging_dep.dex and packaging_dep.dex.dex.owner.raw_target() not in no_dx_target_labels] targets_to_jars_args = [cmd_args([str(packaging_dep.label.raw_target()), packaging_dep.jar], delimiter = " ") for packaging_dep in java_packaging_deps] - targets_to_jars = ctx.actions.write("targets_to_jars.txt", targets_to_jars_args) + targets_to_jars = argfile(actions = ctx.actions, name = "targets_to_jars.txt", args = targets_to_jars_args) cmd.add([ "--targets-to-jars", targets_to_jars, - ]).hidden(targets_to_jars_args) + ]) if ctx.attrs.should_include_libraries: - targets_to_so_names_args = [cmd_args([str(shared_lib.label.raw_target()), so_name], delimiter = " ") for so_name, shared_lib in traversed_shared_library_info.items()] - targets_to_so_names = ctx.actions.write("targets_to_so_names.txt", targets_to_so_names_args) + targets_to_so_names_args = [cmd_args([str(shared_lib.label.raw_target()), shared_lib.soname.ensure_str()], delimiter = " ") for shared_lib in traversed_shared_library_info] + targets_to_so_names = argfile(actions = ctx.actions, name = "targets_to_so_names.txt", args = targets_to_so_names_args) cmd.add([ "--targets-to-so-names", targets_to_so_names, - ]).hidden(targets_to_so_names_args) + ]) traversed_prebuilt_native_library_dirs = android_packageable_info.prebuilt_native_library_dirs.traverse() if android_packageable_info.prebuilt_native_library_dirs else [] targets_to_non_assets_prebuilt_native_library_dirs_args = [ @@ -97,17 +99,21 @@ def android_app_modularity_impl(ctx: AnalysisContext) -> list[Provider]: for prebuilt_native_library_dir in traversed_prebuilt_native_library_dirs if not prebuilt_native_library_dir.is_asset and not prebuilt_native_library_dir.for_primary_apk ] - targets_to_non_assets_prebuilt_native_library_dirs = ctx.actions.write("targets_to_non_assets_prebuilt_native_library_dirs.txt", targets_to_non_assets_prebuilt_native_library_dirs_args) + targets_to_non_assets_prebuilt_native_library_dirs = argfile( + actions = ctx.actions, + name = "targets_to_non_assets_prebuilt_native_library_dirs.txt", + args = targets_to_non_assets_prebuilt_native_library_dirs_args, + ) cmd.add([ "--targets-to-non-asset-prebuilt-native-library-dirs", targets_to_non_assets_prebuilt_native_library_dirs, - ]).hidden(targets_to_non_assets_prebuilt_native_library_dirs_args) + ]) ctx.actions.run(cmd, category = "apk_module_graph") return [DefaultInfo(default_output = output)] -def get_target_to_module_mapping(ctx: AnalysisContext, deps_by_platform: dict[str, list[Dependency]]) -> [Artifact, None]: +def get_target_to_module_mapping(ctx: AnalysisContext, deps_by_platform: dict[str, list[Dependency]]) -> Artifact | None: if not ctx.attrs.application_module_configs: return None @@ -119,7 +125,7 @@ def get_target_to_module_mapping(ctx: AnalysisContext, deps_by_platform: dict[st ctx.actions, deps = filter(None, [x.get(SharedLibraryInfo) for x in deps]), ) - shared_libraries.extend(traverse_shared_library_info(shared_library_info).values()) + shared_libraries.extend(traverse_shared_library_info(shared_library_info)) cmd, output = _get_base_cmd_and_output( ctx.actions, @@ -146,13 +152,16 @@ def _get_base_cmd_and_output( android_toolchain: AndroidToolchainInfo, application_module_configs: dict[str, list[Dependency]], application_module_dependencies: [dict[str, list[str]], None], - application_module_blocklist: [list[list[Dependency]], None]) -> (cmd_args, Artifact): + application_module_blocklist: [list[Dependency], None]) -> (cmd_args, Artifact): deps_map = {} + primary_apk_deps = set() for android_packageable_info in android_packageable_infos: if android_packageable_info.deps: for deps_info in android_packageable_info.deps.traverse(): deps = deps_map.setdefault(deps_info.name, []) deps_map[deps_info.name] = dedupe(deps + deps_info.deps) + if deps_info.for_primary_apk: + primary_apk_deps.add(deps_info.name) target_graph_file = actions.write_json("target_graph.json", deps_map) application_module_configs_map = { @@ -182,10 +191,10 @@ def _get_base_cmd_and_output( used_by_wrap_script_libs = [str(shared_lib.label.raw_target()) for shared_lib in shared_libraries if shared_lib.for_primary_apk] prebuilt_native_library_dirs = flatten([list(android_packageable_info.prebuilt_native_library_dirs.traverse()) if android_packageable_info.prebuilt_native_library_dirs else [] for android_packageable_info in android_packageable_infos]) prebuilt_native_library_targets_for_primary_apk = dedupe([str(native_lib_dir.raw_target) for native_lib_dir in prebuilt_native_library_dirs if native_lib_dir.for_primary_apk]) - if application_module_blocklist or used_by_wrap_script_libs or prebuilt_native_library_targets_for_primary_apk: - all_blocklisted_deps = used_by_wrap_script_libs + prebuilt_native_library_targets_for_primary_apk + if application_module_blocklist or used_by_wrap_script_libs or prebuilt_native_library_targets_for_primary_apk or len(primary_apk_deps) > 0: + all_blocklisted_deps = used_by_wrap_script_libs + prebuilt_native_library_targets_for_primary_apk + list(primary_apk_deps) if application_module_blocklist: - all_blocklisted_deps.extend([str(blocklisted_dep.label.raw_target()) for blocklisted_dep in flatten(application_module_blocklist)]) + all_blocklisted_deps.extend([str(blocklisted_dep.label.raw_target()) for blocklisted_dep in application_module_blocklist]) application_module_blocklist_file = actions.write( "application_module_blocklist.txt", @@ -211,6 +220,9 @@ APKModuleGraphInfo = record( target_to_module_mapping_function = typing.Callable, module_to_canary_class_name_function = typing.Callable, module_to_module_deps_function = typing.Callable, + transitive_module_deps_function = typing.Callable, + calculated_deps_function = typing.Callable, + get_deps_debug_data = typing.Callable, ) def get_root_module_only_apk_module_graph_info() -> APKModuleGraphInfo: @@ -227,6 +239,9 @@ def get_root_module_only_apk_module_graph_info() -> APKModuleGraphInfo: target_to_module_mapping_function = all_targets_in_root_module, module_to_canary_class_name_function = root_module_canary_class_name, module_to_module_deps_function = root_module_deps, + transitive_module_deps_function = root_module_deps, + calculated_deps_function = root_module_deps, + get_deps_debug_data = root_module_deps, ) def get_apk_module_graph_info( @@ -241,6 +256,9 @@ def get_apk_module_graph_info( module_to_canary_class_name_map = {} module_to_module_deps_map = {} + transitive_module_deps_map = {} + calculated_deps_map = {} + shared_module_rdeps = {} for line in module_infos: line_data = line.split(" ") module_name = line_data[0] @@ -248,12 +266,37 @@ def get_apk_module_graph_info( module_deps = [module_dep for module_dep in line_data[2:] if module_dep] module_to_canary_class_name_map[module_name] = canary_class_name module_to_module_deps_map[module_name] = module_deps + shared_modules = [module_dep for module_dep in module_deps if module_dep.startswith("s_")] + for shared_module in shared_modules: + rdeps = shared_module_rdeps.get(shared_module, set()) + rdeps.add(module_name) + shared_module_rdeps[shared_module] = rdeps target_to_module_mapping = {str(ctx.label.raw_target()): ROOT_MODULE} for line in target_to_module_lines: target, module = line.split(" ") target_to_module_mapping[target] = module + for module, deps in module_to_module_deps_map.items(): + visited_modules = set() + queue = [d for d in deps] + for _ in range(1, 1000): # represents a while loop since while loops dont exist in starlark + if len(queue) == 0: + transitive_module_deps_map[module] = visited_modules + continue + node = queue.pop() + visited_modules.add(node) + for d in module_to_module_deps_map[node]: + if d not in visited_modules: + queue.append(d) + for shared_module, rdeps in shared_module_rdeps.items(): + rdeps_list = list(rdeps) + head = rdeps_list[0] + intersection = transitive_module_deps_map[head] + for rdep in rdeps_list[1:]: + intersection = intersection & transitive_module_deps_map[rdep] + calculated_deps_map[shared_module] = intersection | rdeps + def target_to_module_mapping_function(raw_target: str) -> str: mapped_module = target_to_module_mapping.get(raw_target) expect(mapped_module != None, "No module found for target {}!".format(raw_target)) @@ -265,9 +308,21 @@ def get_apk_module_graph_info( def module_to_module_deps_function(voltron_module: str) -> list: return module_to_module_deps_map.get(voltron_module) + def transitive_module_deps_function(voltron_module: str) -> set[str]: + return transitive_module_deps_map.get(voltron_module) + + def calculated_deps_function(voltron_module: str) -> set[str]: + return calculated_deps_map.get(voltron_module) if voltron_module in calculated_deps_map else set() + + def get_deps_debug_data() -> str: + return "tdeps - {} \n calculated deps - {}".format(transitive_module_deps_map, calculated_deps_map) + return APKModuleGraphInfo( module_list = module_to_canary_class_name_map.keys(), target_to_module_mapping_function = target_to_module_mapping_function, module_to_canary_class_name_function = module_to_canary_class_name_function, module_to_module_deps_function = module_to_module_deps_function, + transitive_module_deps_function = transitive_module_deps_function, + calculated_deps_function = calculated_deps_function, + get_deps_debug_data = get_deps_debug_data, ) diff --git a/prelude/apple/apple_asset_catalog.bzl b/prelude/apple/apple_asset_catalog.bzl index a336cc2be6240..c16c1b68a0ce1 100644 --- a/prelude/apple/apple_asset_catalog.bzl +++ b/prelude/apple/apple_asset_catalog.bzl @@ -38,7 +38,13 @@ def compile_apple_asset_catalog(ctx: AnalysisContext, specs: list[AppleAssetCata processing_options = get_bundle_resource_processing_options(ctx) compilation_options = get_apple_asset_catalogs_compilation_options(ctx) command = _get_actool_command(ctx, single_spec, catalog.as_output(), plist.as_output(), compilation_options) - ctx.actions.run(command, prefer_local = processing_options.prefer_local, allow_cache_upload = processing_options.allow_cache_upload, category = "apple_asset_catalog") + ctx.actions.run( + command, + prefer_local = processing_options.prefer_local, + prefer_remote = processing_options.prefer_remote, + allow_cache_upload = processing_options.allow_cache_upload, + category = "apple_asset_catalog", + ) return AppleAssetCatalogResult(compiled_catalog = catalog, catalog_plist = plist) def _merge_asset_catalog_specs(ctx: AnalysisContext, xs: list[AppleAssetCatalogSpec]) -> AppleAssetCatalogSpec: @@ -111,5 +117,5 @@ def _get_actool_command(ctx: AnalysisContext, info: AppleAssetCatalogSpec, catal ], allow_args = True, ) - command = cmd_args(["/bin/sh", wrapper_script]).hidden([actool_command, catalog_output]) + command = cmd_args(["/bin/sh", wrapper_script], hidden = [actool_command, catalog_output]) return command diff --git a/prelude/apple/apple_binary.bzl b/prelude/apple/apple_binary.bzl index b2926e2de46a2..fada44cbd1de1 100644 --- a/prelude/apple/apple_binary.bzl +++ b/prelude/apple/apple_binary.bzl @@ -5,9 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:attrs_validators.bzl", "get_attrs_validators_outputs") load("@prelude//:paths.bzl", "paths") +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load("@prelude//apple:apple_stripping.bzl", "apple_strip_args") -# @oss-disable: load("@prelude//apple/meta_only:linker_outputs.bzl", "add_extra_linker_outputs") +# @oss-disable: load("@prelude//apple/meta_only:linker_outputs.bzl", "get_extra_linker_output_flags", "get_extra_linker_outputs") load( "@prelude//apple/swift:swift_compilation.bzl", "compile_swift", @@ -22,13 +24,13 @@ load( "@prelude//cxx:argsfiles.bzl", "CompileArgsfiles", ) +load("@prelude//cxx:cxx_executable.bzl", "cxx_executable") +load("@prelude//cxx:cxx_library_utility.bzl", "cxx_attr_deps", "cxx_attr_exported_deps") load( - "@prelude//cxx:compile.bzl", + "@prelude//cxx:cxx_sources.bzl", "CxxSrcWithFlags", # @unused Used as a type + "get_srcs_with_flags", ) -load("@prelude//cxx:cxx_executable.bzl", "cxx_executable") -load("@prelude//cxx:cxx_library_utility.bzl", "cxx_attr_deps", "cxx_attr_exported_deps") -load("@prelude//cxx:cxx_sources.bzl", "get_srcs_with_flags") load( "@prelude//cxx:cxx_types.bzl", "CxxRuleAdditionalParams", @@ -40,6 +42,7 @@ load( "cxx_get_regular_cxx_headers_layout", "prepare_headers", ) +load("@prelude//cxx:index_store.bzl", "create_index_store_subtargets_and_provider") load( "@prelude//cxx:link_groups.bzl", "get_link_group_info", @@ -51,19 +54,21 @@ load( ) load( "@prelude//linking:link_info.bzl", + "CxxSanitizerRuntimeInfo", + "ExtraLinkerOutputs", "LinkCommandDebugOutputInfo", "UnstrippedLinkOutputInfo", ) load("@prelude//utils:arglike.bzl", "ArgLike") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") load(":apple_bundle_types.bzl", "AppleBundleLinkerMapInfo", "AppleMinDeploymentVersionInfo") load(":apple_bundle_utility.bzl", "get_bundle_infos_from_graph", "merge_bundle_linker_maps_info") load(":apple_code_signing_types.bzl", "AppleEntitlementsInfo") load(":apple_dsym.bzl", "DSYM_SUBTARGET", "get_apple_dsym") +load(":apple_entitlements.bzl", "entitlements_link_flags") +load(":apple_error_handler.bzl", "apple_build_error_handler") load(":apple_frameworks.bzl", "get_framework_search_path_flags") -load(":apple_sdk_metadata.bzl", "IPhoneSimulatorSdkMetadata", "MacOSXCatalystSdkMetadata") -load(":apple_target_sdk_version.bzl", "get_min_deployment_version_for_node", "get_min_deployment_version_target_linker_flags", "get_min_deployment_version_target_preprocessor_flags") -load(":apple_toolchain_types.bzl", "AppleToolchainInfo") +load(":apple_target_sdk_version.bzl", "get_min_deployment_version_for_node") load(":apple_utility.bzl", "get_apple_cxx_headers_layout", "get_apple_stripped_attr_value_with_default_fallback") load(":debug.bzl", "AppleDebuggableInfo") load(":resource_groups.bzl", "create_resource_graph") @@ -77,9 +82,10 @@ def apple_binary_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: objc_bridging_header_flags = _get_bridging_header_flags(ctx) cxx_srcs, swift_srcs = _filter_swift_srcs(ctx) + contains_swift_sources = len(swift_srcs) > 0 framework_search_path_flags = get_framework_search_path_flags(ctx) - swift_compile = compile_swift( + swift_compile, _ = compile_swift( ctx, swift_srcs, False, # parse_as_library @@ -89,30 +95,28 @@ def apple_binary_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: framework_search_path_flags, objc_bridging_header_flags, ) - swift_object_files = [swift_compile.object_file] if swift_compile else [] - + swift_object_files = swift_compile.object_files if swift_compile else [] swift_preprocessor = [swift_compile.pre] if swift_compile else [] - - extra_linker_output_flags, extra_linker_output_providers = [], {} # @oss-enable - # @oss-disable: extra_linker_output_flags, extra_linker_output_providers = add_extra_linker_outputs(ctx) - extra_link_flags = get_min_deployment_version_target_linker_flags(ctx) + _entitlements_link_flags(ctx) + extra_linker_output_flags + extra_link_flags = entitlements_link_flags(ctx) framework_search_path_pre = CPreprocessor( - relative_args = CPreprocessorArgs(args = [framework_search_path_flags]), + args = CPreprocessorArgs(args = [framework_search_path_flags]), ) - swift_dependency_info = swift_compile.dependency_info if swift_compile else get_swift_dependency_info(ctx, None, None, deps_providers) + swift_dependency_info = swift_compile.dependency_info if swift_compile else get_swift_dependency_info(ctx, None, deps_providers) swift_debug_info = get_swift_debug_infos( ctx, swift_dependency_info, swift_compile, ) + validation_deps_outputs = get_validation_deps_outputs(ctx) stripped = get_apple_stripped_attr_value_with_default_fallback(ctx) constructor_params = CxxRuleConstructorParams( rule_type = "apple_binary", headers_layout = get_apple_cxx_headers_layout(ctx), extra_link_flags = extra_link_flags, + extra_hidden = validation_deps_outputs, srcs = cxx_srcs, additional = CxxRuleAdditionalParams( srcs = swift_srcs, @@ -130,18 +134,32 @@ def apple_binary_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: ), ], }, + external_debug_info_tags = [], # This might be used to materialise all transitive Swift related object files with ArtifactInfoTag("swiftmodule") ), extra_link_input = swift_object_files, extra_link_input_has_external_debug_info = True, - extra_preprocessors = get_min_deployment_version_target_preprocessor_flags(ctx) + [framework_search_path_pre] + swift_preprocessor, + extra_preprocessors = [framework_search_path_pre] + swift_preprocessor, strip_executable = stripped, strip_args_factory = apple_strip_args, - cxx_populate_xcode_attributes_func = apple_populate_xcode_attributes, + cxx_populate_xcode_attributes_func = lambda local_ctx, **kwargs: apple_populate_xcode_attributes(local_ctx, contains_swift_sources = contains_swift_sources, **kwargs), link_group_info = get_link_group_info(ctx), prefer_stripped_objects = ctx.attrs.prefer_stripped_objects, # Some apple rules rely on `static` libs *not* following dependents. link_groups_force_static_follows_dependents = False, swiftmodule_linkable = get_swiftmodule_linkable(swift_compile), + compiler_flags = ctx.attrs.compiler_flags, + lang_compiler_flags = ctx.attrs.lang_compiler_flags, + platform_compiler_flags = ctx.attrs.platform_compiler_flags, + lang_platform_compiler_flags = ctx.attrs.lang_platform_compiler_flags, + preprocessor_flags = ctx.attrs.preprocessor_flags, + lang_preprocessor_flags = ctx.attrs.lang_preprocessor_flags, + platform_preprocessor_flags = ctx.attrs.platform_preprocessor_flags, + lang_platform_preprocessor_flags = ctx.attrs.lang_platform_preprocessor_flags, + error_handler = apple_build_error_handler, + index_stores = swift_compile.index_stores if swift_compile else None, + executable_name = ctx.attrs.executable_name, + extra_linker_outputs_factory = _get_extra_linker_outputs, + extra_linker_outputs_flags_factory = _get_extra_linker_outputs_flags, ) cxx_output = cxx_executable(ctx, constructor_params) @@ -163,16 +181,17 @@ def apple_binary_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: action_identifier = unstripped_binary.short_path, ) cxx_output.sub_targets[DSYM_SUBTARGET] = [DefaultInfo(default_output = dsym_artifact)] - cxx_output.sub_targets.update(extra_linker_output_providers) min_version = get_min_deployment_version_for_node(ctx) min_version_providers = [AppleMinDeploymentVersionInfo(version = min_version)] + non_exported_deps = cxx_attr_deps(ctx) + exported_deps = cxx_attr_exported_deps(ctx) resource_graph = create_resource_graph( ctx = ctx, labels = ctx.attrs.labels, - deps = cxx_attr_deps(ctx), - exported_deps = cxx_attr_exported_deps(ctx), + deps = non_exported_deps, + exported_deps = exported_deps, ) bundle_infos = get_bundle_infos_from_graph(resource_graph) if cxx_output.linker_map_data: @@ -182,42 +201,46 @@ def apple_binary_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: if cxx_output.link_command_debug_output: link_command_providers.append(LinkCommandDebugOutputInfo(debug_outputs = [cxx_output.link_command_debug_output])) + sanitizer_runtime_providers = [] + if cxx_output.sanitizer_runtime_files: + sanitizer_runtime_providers.append(CxxSanitizerRuntimeInfo(runtime_files = cxx_output.sanitizer_runtime_files)) + + attrs_validators_providers, attrs_validators_subtargets = get_attrs_validators_outputs(ctx) + + index_stores = [] + if swift_compile and swift_compile.index_stores: + index_stores.extend(swift_compile.index_stores) + index_stores.extend(cxx_output.index_stores) + + index_store_subtargets, index_store_info = create_index_store_subtargets_and_provider(ctx, index_stores, non_exported_deps + exported_deps) + cxx_output.sub_targets.update(index_store_subtargets) + return [ - DefaultInfo(default_output = cxx_output.binary, sub_targets = cxx_output.sub_targets), - RunInfo(args = cmd_args(cxx_output.binary).hidden(cxx_output.runtime_files)), + DefaultInfo(default_output = cxx_output.binary, sub_targets = cxx_output.sub_targets | attrs_validators_subtargets), + RunInfo(args = cmd_args(cxx_output.binary, hidden = cxx_output.runtime_files)), AppleEntitlementsInfo(entitlements_file = ctx.attrs.entitlements_file), AppleDebuggableInfo(dsyms = [dsym_artifact], debug_info_tset = cxx_output.external_debug_info), cxx_output.xcode_data, cxx_output.compilation_db, merge_bundle_linker_maps_info(bundle_infos), UnstrippedLinkOutputInfo(artifact = unstripped_binary), - ] + [resource_graph] + min_version_providers + link_command_providers + index_store_info, + ] + [resource_graph] + min_version_providers + link_command_providers + sanitizer_runtime_providers + attrs_validators_providers if uses_explicit_modules(ctx): return get_swift_anonymous_targets(ctx, get_apple_binary_providers) else: return get_apple_binary_providers([]) -_SDK_NAMES_NEED_ENTITLEMENTS_IN_BINARY = [ - IPhoneSimulatorSdkMetadata.name, - MacOSXCatalystSdkMetadata.name, -] - -def _needs_entitlements_in_binary(ctx: AnalysisContext) -> bool: - apple_toolchain_info = ctx.attrs._apple_toolchain[AppleToolchainInfo] - return apple_toolchain_info.sdk_name in _SDK_NAMES_NEED_ENTITLEMENTS_IN_BINARY - -def _entitlements_link_flags(ctx: AnalysisContext) -> list[typing.Any]: - return [ - "-Xlinker", - "-sectcreate", - "-Xlinker", - "__TEXT", - "-Xlinker", - "__entitlements", - "-Xlinker", - ctx.attrs.entitlements_file, - ] if (ctx.attrs.entitlements_file and _needs_entitlements_in_binary(ctx)) else [] +def _get_extra_linker_outputs(ctx: AnalysisContext) -> ExtraLinkerOutputs: + _ = ctx # buildifier: disable=unused-variable + # @oss-disable: return get_extra_linker_outputs(ctx) + return ExtraLinkerOutputs() # @oss-enable + +def _get_extra_linker_outputs_flags(ctx: AnalysisContext, outputs: dict[str, Artifact]) -> list[ArgLike]: + _ = ctx # buildifier: disable=unused-variable + # @oss-disable: return get_extra_linker_output_flags(ctx, outputs) + return [] # @oss-enable def _filter_swift_srcs(ctx: AnalysisContext) -> (list[CxxSrcWithFlags], list[CxxSrcWithFlags]): cxx_srcs = [] @@ -243,8 +266,7 @@ def _get_bridging_header_flags(ctx: AnalysisContext) -> list[ArgLike]: header_map = {paths.join(h.namespace, h.name): h.artifact for h in headers} # We need to expose private headers to swift-compile action, in case something is imported to bridging header. - # TODO(chatatap): Handle absolute paths here. - header_root = prepare_headers(ctx, header_map, "apple-binary-private-headers", None) + header_root = prepare_headers(ctx, header_map, "apple-binary-private-headers") if header_root != None: private_headers_args = [cmd_args("-I"), header_root.include_path] else: diff --git a/prelude/apple/apple_bundle.bzl b/prelude/apple/apple_bundle.bzl index 4e89bc9d70c7c..f28ba61a047ff 100644 --- a/prelude/apple/apple_bundle.bzl +++ b/prelude/apple/apple_bundle.bzl @@ -12,13 +12,23 @@ load( "project_artifacts", ) load("@prelude//:paths.bzl", "paths") +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") +load("@prelude//apple:apple_xctest_frameworks_utility.bzl", "get_xctest_frameworks_bundle_parts") +load("@prelude//apple:debug.bzl", "AppleSelectiveDebuggableMetadata") # @oss-disable: load("@prelude//apple/meta_only:linker_outputs.bzl", "subtargets_for_apple_bundle_extra_outputs") load("@prelude//apple/user:apple_selected_debug_path_file.bzl", "SELECTED_DEBUG_PATH_FILE_NAME") load("@prelude//apple/user:apple_selective_debugging.bzl", "AppleSelectiveDebuggingInfo") +load("@prelude//apple/validation:debug_artifacts.bzl", "get_debug_artifacts_validators") +load( + "@prelude//cxx:index_store.bzl", + "IndexStoreInfo", # @unused Used as a type + "create_index_store_subtargets_and_provider", +) load( "@prelude//ide_integrations:xcode.bzl", "XCODE_DATA_SUB_TARGET", + "XcodeDataInfoKeys", "generate_xcode_data", ) load( @@ -33,23 +43,34 @@ load( "make_link_command_debug_output_json_info", ) load("@prelude//utils:arglike.bzl", "ArgLike") -load( - "@prelude//utils:set.bzl", - "set", -) +load("@prelude//utils:lazy.bzl", "lazy") load( "@prelude//utils:utils.bzl", - "expect", "flatten", - "is_any", ) load(":apple_bundle_destination.bzl", "AppleBundleDestination") load(":apple_bundle_part.bzl", "AppleBundlePart", "SwiftStdlibArguments", "assemble_bundle", "bundle_output", "get_apple_bundle_part_relative_destination_path", "get_bundle_dir_name") -load(":apple_bundle_resources.bzl", "get_apple_bundle_resource_part_list", "get_is_watch_bundle") -load(":apple_bundle_types.bzl", "AppleBinaryExtraOutputsInfo", "AppleBundleBinaryOutput", "AppleBundleExtraOutputsInfo", "AppleBundleInfo", "AppleBundleLinkerMapInfo", "AppleBundleResourceInfo") +load(":apple_bundle_resources.bzl", "get_apple_bundle_resource_part_list") +load( + ":apple_bundle_types.bzl", + "AppleBinaryExtraOutputsInfo", + "AppleBundleBinaryOutput", + "AppleBundleExtraOutputsInfo", + "AppleBundleInfo", + "AppleBundleLinkerMapInfo", + "AppleBundleResourceInfo", + "AppleBundleType", + "AppleBundleTypeDefault", +) load(":apple_bundle_utility.bzl", "get_bundle_min_target_version", "get_default_binary_dep", "get_flattened_binary_deps", "get_product_name") -load(":apple_dsym.bzl", "DSYM_INFO_SUBTARGET", "DSYM_SUBTARGET", "get_apple_dsym", "get_apple_dsym_ext", "get_apple_dsym_info") +load(":apple_code_signing_types.bzl", "CodeSignConfiguration") +load(":apple_dsym.bzl", "DSYM_INFO_SUBTARGET", "DSYM_SUBTARGET", "EXTENDED_DSYM_INFO_SUBTARGET", "get_apple_dsym", "get_apple_dsym_ext", "get_apple_dsym_info_json") load(":apple_sdk.bzl", "get_apple_sdk_name") +load( + ":apple_sdk_metadata.bzl", + "MacOSXCatalystSdkMetadata", + "MacOSXSdkMetadata", +) load(":apple_universal_binaries.bzl", "create_universal_binary") load( ":debug.bzl", @@ -59,7 +80,6 @@ load( ) load(":xcode.bzl", "apple_xcode_data_add_xctoolchain") -INSTALL_DATA_SUB_TARGET = "install-data" _INSTALL_DATA_FILE_NAME = "install_apple_data.json" _PLIST = "plist" @@ -67,8 +87,7 @@ _PLIST = "plist" _XCTOOLCHAIN_SUB_TARGET = "xctoolchain" AppleBundleDebuggableInfo = record( - # Can be `None` for WatchKit stub - binary_info = field([AppleDebuggableInfo, None]), + binary_info = field(AppleDebuggableInfo), # Debugable info of all bundle deps dep_infos = field(list[AppleDebuggableInfo]), # Concat of `binary_info` and `dep_infos` @@ -88,13 +107,6 @@ AppleBundlePartListOutput = record( ) def _get_binary(ctx: AnalysisContext) -> AppleBundleBinaryOutput: - # No binary means we are building watchOS bundle. In v1 bundle binary is present, but its sources are empty. - if ctx.attrs.binary == None: - return AppleBundleBinaryOutput( - binary = _get_watch_kit_stub_artifact(ctx), - is_watchkit_stub_binary = True, - ) - if len(get_flattened_binary_deps(ctx.attrs.binary)) > 1: if ctx.attrs.selective_debugging != None: fail("Selective debugging is not supported for universal binaries.") @@ -115,9 +127,10 @@ def _get_binary(ctx: AnalysisContext) -> AppleBundleBinaryOutput: def _get_bundle_dsym_name(ctx: AnalysisContext) -> str: return paths.replace_extension(get_bundle_dir_name(ctx), ".dSYM") -def _scrub_binary(ctx, binary: Artifact, binary_execution_preference_info: None | LinkExecutionPreferenceInfo) -> Artifact: +def _scrub_binary(ctx, binary: Artifact, binary_execution_preference_info: None | LinkExecutionPreferenceInfo, focused_targets_labels: list[Label] = []) -> Artifact: # If fast adhoc code signing is enabled, we need to resign the binary as it won't be signed later. - if ctx.attrs._fast_adhoc_signing_enabled: + code_signing_configuration = CodeSignConfiguration(ctx.attrs._code_signing_configuration) + if code_signing_configuration == CodeSignConfiguration("fast-adhoc"): apple_tools = ctx.attrs._apple_tools[AppleToolsInfo] adhoc_codesign_tool = apple_tools.adhoc_codesign_tool else: @@ -125,7 +138,7 @@ def _scrub_binary(ctx, binary: Artifact, binary_execution_preference_info: None selective_debugging_info = ctx.attrs.selective_debugging[AppleSelectiveDebuggingInfo] preference = binary_execution_preference_info.preference if binary_execution_preference_info else LinkExecutionPreference("any") - return selective_debugging_info.scrub_binary(ctx, binary, preference, adhoc_codesign_tool) + return selective_debugging_info.scrub_binary(ctx, binary, preference, adhoc_codesign_tool, focused_targets_labels) def _maybe_scrub_binary(ctx, binary_dep: Dependency) -> AppleBundleBinaryOutput: binary = binary_dep[DefaultInfo].default_outputs[0] @@ -133,26 +146,47 @@ def _maybe_scrub_binary(ctx, binary_dep: Dependency) -> AppleBundleBinaryOutput: if ctx.attrs.selective_debugging == None: return AppleBundleBinaryOutput(binary = binary, debuggable_info = debuggable_info) - binary = _scrub_binary(ctx, binary, binary_dep.get(LinkExecutionPreferenceInfo)) - if not debuggable_info: - return AppleBundleBinaryOutput(binary = binary) + if debuggable_info: + if debuggable_info.selective_metadata: + fail("Binary cannot contain selective metadata, as it only gets scrubbed when embedded in a bundle") - # If we have debuggable info for this binary, create the scrubed dsym for the binary and filter debug info. - debug_info_tset = debuggable_info.debug_info_tset - dsym_artifact = _get_scrubbed_binary_dsym(ctx, binary, debug_info_tset) + # If we have debuggable info for this binary, create the scrubed dsym for the binary and filter debug info. + debug_info_tset = debuggable_info.debug_info_tset - all_debug_info = debug_info_tset._tset.traverse() - selective_debugging_info = ctx.attrs.selective_debugging[AppleSelectiveDebuggingInfo] - filtered_debug_info = selective_debugging_info.filter(all_debug_info) + # The traversal is intentionally designed to be topological, allowing us to skip + # portions of the debug info that are not transitive in relation to the focused targets. + all_debug_info = debug_info_tset._tset.traverse(ordering = "topological") + selective_debugging_info = ctx.attrs.selective_debugging[AppleSelectiveDebuggingInfo] + filtered_debug_info = selective_debugging_info.filter(ctx, all_debug_info) - filtered_external_debug_info = make_artifact_tset( - actions = ctx.actions, - label = ctx.label, - artifacts = flatten(filtered_debug_info.map.values()), - ) - debuggable_info = AppleDebuggableInfo(dsyms = [dsym_artifact], debug_info_tset = filtered_external_debug_info, filtered_map = filtered_debug_info.map) + filtered_external_debug_info = make_artifact_tset( + actions = ctx.actions, + label = ctx.label, + infos = filtered_debug_info.infos, + ) - return AppleBundleBinaryOutput(binary = binary, debuggable_info = debuggable_info) + binary = _scrub_binary(ctx, binary, binary_dep.get(LinkExecutionPreferenceInfo), filtered_debug_info.swift_modules_labels) + dsym_artifact = _get_scrubbed_binary_dsym(ctx, binary, debug_info_tset) + + filtered_map = {} + for info in filtered_debug_info.infos: + filtered_map.setdefault(info.label, []).extend(info.artifacts) + + debuggable_info = AppleDebuggableInfo( + dsyms = [dsym_artifact], + debug_info_tset = filtered_external_debug_info, + filtered_map = filtered_map, + selective_metadata = [ + AppleSelectiveDebuggableMetadata( + dsym = dsym_artifact, + metadata = filtered_debug_info.metadata, + ), + ], + ) + return AppleBundleBinaryOutput(binary = binary, debuggable_info = debuggable_info) + else: + binary = _scrub_binary(ctx, binary, binary_dep.get(LinkExecutionPreferenceInfo)) + return AppleBundleBinaryOutput(binary = binary) def _get_scrubbed_binary_dsym(ctx, binary: Artifact, debug_info_tset: ArtifactTSet) -> Artifact: debug_info = project_artifacts( @@ -171,9 +205,6 @@ def _get_binary_bundle_parts(ctx: AnalysisContext, binary_output: AppleBundleBin """Returns a tuple of all binary bundle parts and the primary bundle binary.""" result = [] - if binary_output.is_watchkit_stub_binary: - # If we're using a stub binary from watchkit, we also need to add extra part for stub. - result.append(AppleBundlePart(source = binary_output.binary, destination = AppleBundleDestination("watchkitstub"), new_name = "WK")) primary_binary_part = AppleBundlePart(source = binary_output.binary, destination = AppleBundleDestination("executables"), new_name = get_product_name(ctx)) result.append(primary_binary_part) @@ -184,10 +215,6 @@ def _get_binary_bundle_parts(ctx: AnalysisContext, binary_output: AppleBundleBin return result, primary_binary_part def _get_dsym_input_binary_arg(ctx: AnalysisContext, primary_binary_path_arg: cmd_args) -> cmd_args: - # No binary means we are building watchOS bundle. In v1 bundle binary is present, but its sources are empty. - if ctx.attrs.binary == None: - return cmd_args(_get_watch_kit_stub_artifact(ctx)) - binary_dep = get_default_binary_dep(ctx.attrs.binary) default_binary = binary_dep[DefaultInfo].default_outputs[0] @@ -203,17 +230,16 @@ def _get_dsym_input_binary_arg(ctx: AnalysisContext, primary_binary_path_arg: cm else: return primary_binary_path_arg -def _get_watch_kit_stub_artifact(ctx: AnalysisContext) -> Artifact: - expect(ctx.attrs.binary == None, "Stub is useful only when binary is not set which means watchOS bundle is built.") - stub_binary = ctx.attrs._apple_toolchain[AppleToolchainInfo].watch_kit_stub_binary - if stub_binary == None: - fail("Expected Watch Kit stub binary to be provided when bundle binary is not set.") - return stub_binary - def _apple_bundle_run_validity_checks(ctx: AnalysisContext): if ctx.attrs.extension == None: fail("`extension` attribute is required") +def _get_deps_selective_metadata(deps_debuggable_infos: list[AppleDebuggableInfo]) -> list[AppleSelectiveDebuggableMetadata]: + all_metadatas = [] + for debuggable_info in deps_debuggable_infos: + all_metadatas.extend(debuggable_info.selective_metadata) + return all_metadatas + def _get_deps_debuggable_infos(ctx: AnalysisContext) -> list[AppleDebuggableInfo]: binary_labels = filter(None, [getattr(binary_dep, "label", None) for binary_dep in get_flattened_binary_deps(ctx.attrs.binary)]) deps_debuggable_infos = filter( @@ -225,10 +251,6 @@ def _get_deps_debuggable_infos(ctx: AnalysisContext) -> list[AppleDebuggableInfo return deps_debuggable_infos def _get_bundle_binary_dsym_artifacts(ctx: AnalysisContext, binary_output: AppleBundleBinaryOutput, executable_arg: ArgLike) -> list[Artifact]: - # We don't care to process the watchkit stub binary. - if binary_output.is_watchkit_stub_binary: - return [] - if not ctx.attrs.split_arch_dsym: # Calling `dsymutil` on the correctly named binary in the _final bundle_ to yield dsym files # with naming convention compatible with Meta infra. @@ -248,18 +270,21 @@ def _get_bundle_binary_dsym_artifacts(ctx: AnalysisContext, binary_output: Apple return binary_output.debuggable_info.dsyms def _get_all_agg_debug_info(ctx: AnalysisContext, binary_output: AppleBundleBinaryOutput, deps_debuggable_infos: list[AppleDebuggableInfo]) -> AggregatedAppleDebugInfo: - all_debug_infos = deps_debuggable_infos - if not binary_output.is_watchkit_stub_binary: - binary_debuggable_info = binary_output.debuggable_info - all_debug_infos = all_debug_infos + [binary_debuggable_info] + all_debug_infos = deps_debuggable_infos + ([binary_output.debuggable_info] if binary_output.debuggable_info else []) return get_aggregated_debug_info(ctx, all_debug_infos) +def _maybe_scrub_selected_debug_paths_file(ctx: AnalysisContext, package_names: list[str]) -> Artifact: + if not ctx.attrs.selective_debugging: + return ctx.actions.write(SELECTED_DEBUG_PATH_FILE_NAME, sorted(set(package_names))) + + selective_debugging_info = ctx.attrs.selective_debugging[AppleSelectiveDebuggingInfo] + return selective_debugging_info.scrub_selected_debug_paths_file(ctx, package_names, SELECTED_DEBUG_PATH_FILE_NAME) + def _get_selected_debug_targets_part(ctx: AnalysisContext, agg_debug_info: AggregatedAppleDebugInfo) -> [AppleBundlePart, None]: # Only app bundle need this, and this file is searched by FBReport at the bundle root if ctx.attrs.extension == "app" and agg_debug_info.debug_info.filtered_map: package_names = [label.package for label in agg_debug_info.debug_info.filtered_map.keys()] - package_names = set(package_names).list() - output = ctx.actions.write(SELECTED_DEBUG_PATH_FILE_NAME, package_names) + output = _maybe_scrub_selected_debug_paths_file(ctx, package_names) return AppleBundlePart(source = output, destination = AppleBundleDestination("bundleroot"), new_name = SELECTED_DEBUG_PATH_FILE_NAME) else: return None @@ -274,11 +299,30 @@ def get_apple_bundle_part_list(ctx: AnalysisContext, params: AppleBundlePartList if resource_part_list == None: resource_part_list = get_apple_bundle_resource_part_list(ctx) + xctest_frameworks_parts = [] + if getattr(ctx.attrs, "embed_xctest_frameworks", False): + if getattr(ctx.attrs, "extension", "") == "app": + # XCTest frameworks should only be enabled for the top-level app, + # not for any other bundles in the dep graph + xctest_frameworks_parts = get_xctest_frameworks_bundle_parts( + ctx, + # It's not possible to pass information down the graph whether + # the `apple_test()` rdep needs Swift support, so just assume + # it does, in the future, Obj-C only test targets would be rare. + swift_support_needed = True, + ) + return AppleBundlePartListOutput( - parts = resource_part_list.resource_parts + params.binaries, + parts = resource_part_list.resource_parts + params.binaries + xctest_frameworks_parts, info_plist_part = resource_part_list.info_plist_part, ) +def _infer_apple_bundle_type(ctx: AnalysisContext) -> AppleBundleType: + if ctx.attrs.bundle_type != None: + return AppleBundleType(ctx.attrs.bundle_type) + + return AppleBundleTypeDefault + def apple_bundle_impl(ctx: AnalysisContext) -> list[Provider]: _apple_bundle_run_validity_checks(ctx) @@ -294,11 +338,27 @@ def apple_bundle_impl(ctx: AnalysisContext) -> list[Provider]: primary_binary_rel_path = get_apple_bundle_part_relative_destination_path(ctx, primary_binary_part) - sub_targets = assemble_bundle(ctx, bundle, apple_bundle_part_list_output.parts, apple_bundle_part_list_output.info_plist_part, SwiftStdlibArguments(primary_binary_rel_path = primary_binary_rel_path)) + validation_deps_outputs = get_validation_deps_outputs(ctx) + + incremental_bundling_override = None + sdk_name = get_apple_sdk_name(ctx) + if sdk_name == MacOSXSdkMetadata.name or sdk_name == MacOSXCatalystSdkMetadata.name: + incremental_bundling_override = False + + bundle_result = assemble_bundle( + ctx, + bundle, + apple_bundle_part_list_output.parts, + apple_bundle_part_list_output.info_plist_part, + SwiftStdlibArguments(primary_binary_rel_path = primary_binary_rel_path), + validation_deps_outputs, + incremental_bundling_override = incremental_bundling_override, + ) + sub_targets = bundle_result.sub_targets sub_targets.update(aggregated_debug_info.sub_targets) primary_binary_path = cmd_args([bundle, primary_binary_rel_path], delimiter = "/") - primary_binary_path_arg = cmd_args(primary_binary_path).hidden(bundle) + primary_binary_path_arg = cmd_args(primary_binary_path, hidden = bundle) linker_maps_directory, linker_map_info = _linker_maps_data(ctx) sub_targets["linker-maps"] = [DefaultInfo(default_output = linker_maps_directory)] @@ -315,9 +375,30 @@ def apple_bundle_impl(ctx: AnalysisContext) -> list[Provider]: if dsym_artifacts: sub_targets[DSYM_SUBTARGET] = [DefaultInfo(default_outputs = dsym_artifacts)] - dsym_info = get_apple_dsym_info(ctx, binary_dsyms = binary_dsym_artifacts, dep_dsyms = dep_dsym_artifacts) + dsym_json_info = get_apple_dsym_info_json(binary_dsym_artifacts, dep_dsym_artifacts) + dsym_info = ctx.actions.write_json("dsym-info.json", dsym_json_info.json_object, pretty = True) sub_targets[DSYM_INFO_SUBTARGET] = [ - DefaultInfo(default_output = dsym_info, other_outputs = dsym_artifacts), + DefaultInfo(default_output = dsym_info, other_outputs = dsym_json_info.outputs), + ] + + deps_selective_metadata = _get_deps_selective_metadata(deps_debuggable_infos) + binary_selective_metadata = [] + if binary_outputs.debuggable_info and binary_outputs.debuggable_info.selective_metadata: + if len(binary_outputs.debuggable_info.selective_metadata) > 1: + fail("Binary cannot have multiple selective metadata") + + # `AppleSelectiveDebuggableMetadata` for the binary is computed here because + # the dSYMs for the bundle get regenerated (via call to `_get_bundle_binary_dsym_artifacts()`). + # To ensure we have the correct dSYM path, metadata needs to be created here, as otherwise + # the map will contain the value of the dSYM for the standalone binary, not for the binary + # as part of the bundle. + binary_selective_metadata = [AppleSelectiveDebuggableMetadata(dsym = binary_dsym, metadata = binary_outputs.debuggable_info.selective_metadata[0].metadata) for binary_dsym in binary_dsym_artifacts] + all_selective_metadata = binary_selective_metadata + deps_selective_metadata + + extended_dsym_json_info = get_apple_dsym_info_json(binary_dsym_artifacts, dep_dsym_artifacts, all_selective_metadata) + extended_dsym_info = ctx.actions.write_json("extended-dsym-info.json", extended_dsym_json_info.json_object, pretty = True) + sub_targets[EXTENDED_DSYM_INFO_SUBTARGET] = [ + DefaultInfo(default_output = extended_dsym_info, other_outputs = extended_dsym_json_info.outputs), ] sub_targets[_PLIST] = [DefaultInfo(default_output = apple_bundle_part_list_output.info_plist_part.source)] @@ -325,10 +406,10 @@ def apple_bundle_impl(ctx: AnalysisContext) -> list[Provider]: sub_targets[_XCTOOLCHAIN_SUB_TARGET] = ctx.attrs._apple_xctoolchain.providers # Define the xcode data sub target - xcode_data_default_info, xcode_data_info = generate_xcode_data(ctx, "apple_bundle", bundle, _xcode_populate_attributes, processed_info_plist = apple_bundle_part_list_output.info_plist_part.source) + plist_bundle_relative_path = get_apple_bundle_part_relative_destination_path(ctx, apple_bundle_part_list_output.info_plist_part) + xcode_data_default_info, xcode_data_info = generate_xcode_data(ctx, "apple_bundle", bundle, _xcode_populate_attributes, processed_info_plist = apple_bundle_part_list_output.info_plist_part.source, info_plist_relative_path = plist_bundle_relative_path) sub_targets[XCODE_DATA_SUB_TARGET] = xcode_data_default_info - plist_bundle_relative_path = get_apple_bundle_part_relative_destination_path(ctx, apple_bundle_part_list_output.info_plist_part) install_data = generate_install_data(ctx, plist_bundle_relative_path) # Collect extra bundle outputs @@ -336,19 +417,42 @@ def apple_bundle_impl(ctx: AnalysisContext) -> list[Provider]: # @oss-disable: extra_output_subtargets = subtargets_for_apple_bundle_extra_outputs(ctx, extra_output_provider) # @oss-disable: sub_targets.update(extra_output_subtargets) + # index store + index_store_subtargets, index_store_info = _index_store_data(ctx) + sub_targets.update(index_store_subtargets) + + bundle_and_dsym_info_json = { + "bundle": bundle, + "dsym": dsym_json_info.json_object, + } + bundle_and_dsym_info = ctx.actions.write_json("bundle-and-dsym-info.json", bundle_and_dsym_info_json) + sub_targets["bundle-and-dsym-info"] = [ + DefaultInfo( + default_output = bundle_and_dsym_info, + other_outputs = [bundle] + dsym_json_info.outputs, + ), + ] + + (validation_providers, validation_subtargets) = _get_debug_validators_subtargets_and_providers( + ctx, + aggregated_debug_info.debug_info.debug_info_tset, + ) + sub_targets.update(validation_subtargets) + return [ DefaultInfo(default_output = bundle, sub_targets = sub_targets), AppleBundleInfo( bundle = bundle, + bundle_type = _infer_apple_bundle_type(ctx), binary_name = get_product_name(ctx), - is_watchos = get_is_watch_bundle(ctx), - contains_watchapp = is_any(lambda part: part.destination == AppleBundleDestination("watchapp"), apple_bundle_part_list_output.parts), + contains_watchapp = lazy.is_any(lambda part: part.destination == AppleBundleDestination("watchapp"), apple_bundle_part_list_output.parts), skip_copying_swift_stdlib = ctx.attrs.skip_copying_swift_stdlib, ), AppleDebuggableInfo( dsyms = dsym_artifacts, debug_info_tset = aggregated_debug_info.debug_info.debug_info_tset, filtered_map = aggregated_debug_info.debug_info.filtered_map, + selective_metadata = all_selective_metadata, ), InstallInfo( installer = ctx.attrs._apple_toolchain[AppleToolchainInfo].installer, @@ -362,20 +466,52 @@ def apple_bundle_impl(ctx: AnalysisContext) -> list[Provider]: xcode_data_info, extra_output_provider, link_cmd_debug_info, - ] + index_store_info, + ] + bundle_result.providers + validation_providers -def _xcode_populate_attributes(ctx, processed_info_plist: Artifact) -> dict[str, typing.Any]: +def _xcode_populate_attributes(ctx, processed_info_plist: Artifact, info_plist_relative_path: str) -> dict[str, typing.Any]: data = { - "deployment_version": get_bundle_min_target_version(ctx, get_default_binary_dep(ctx.attrs.binary)), - "info_plist": ctx.attrs.info_plist, - "processed_info_plist": processed_info_plist, - "product_name": get_product_name(ctx), - "sdk": get_apple_sdk_name(ctx), + XcodeDataInfoKeys.DEPLOYMENT_VERSION: get_bundle_min_target_version(ctx, get_default_binary_dep(ctx.attrs.binary)), + XcodeDataInfoKeys.INFO_PLIST: ctx.attrs.info_plist, + XcodeDataInfoKeys.PROCESSED_INFO_PLIST: processed_info_plist, + XcodeDataInfoKeys.INFO_PLIST_RELATIVE_PATH: info_plist_relative_path, + XcodeDataInfoKeys.PRODUCT_NAME: get_product_name(ctx), + XcodeDataInfoKeys.SDK: get_apple_sdk_name(ctx), } apple_xcode_data_add_xctoolchain(ctx, data) return data +def _get_debug_validators_subtargets_and_providers(ctx, artifacts: ArtifactTSet) -> (list[Provider], dict[str, list[Provider]]): + name_to_debug_validator_artifact = get_debug_artifacts_validators(ctx, artifacts) + if not name_to_debug_validator_artifact: + return ([], {}) + + return ( + [ + ValidationInfo( + validations = [ + ValidationSpec( + name = name, + validation_result = artifact, + ) + for name, artifact in name_to_debug_validator_artifact.items() + ], + ), + ], + { + "debug-artifacts-validators": [ + DefaultInfo( + default_outputs = name_to_debug_validator_artifact.values(), + sub_targets = { + name: [DefaultInfo(default_output = artifact)] + for name, artifact in name_to_debug_validator_artifact.items() + }, + ), + ], + }, + ) + def _linker_maps_data(ctx: AnalysisContext) -> (Artifact, AppleBundleLinkerMapInfo): deps_with_binary = ctx.attrs.deps + get_flattened_binary_deps(ctx.attrs.binary) deps_linker_map_infos = filter( @@ -401,6 +537,11 @@ def _link_command_debug_data(ctx: AnalysisContext) -> (Artifact, LinkCommandDebu link_cmd_debug_output_file = make_link_command_debug_output_json_info(ctx, all_debug_infos) return link_cmd_debug_output_file, LinkCommandDebugOutputInfo(debug_outputs = all_debug_infos) +def _index_store_data(ctx: AnalysisContext) -> (dict[str, list[Provider]], IndexStoreInfo): + deps_with_binary = ctx.attrs.deps + get_flattened_binary_deps(ctx.attrs.binary) + index_store_subtargets, index_store_info = create_index_store_subtargets_and_provider(ctx, [], deps_with_binary) + return index_store_subtargets, index_store_info + def _extra_output_provider(ctx: AnalysisContext) -> AppleBundleExtraOutputsInfo: # Collect the sub_targets for this bundle's binary that are extra_linker_outputs. extra_outputs = [] @@ -428,6 +569,7 @@ def generate_install_data( data = { "fullyQualifiedName": ctx.label, "info_plist": plist_path, + "platform_name": get_apple_sdk_name(ctx), "use_idb": "true", ## TODO(T110665037): read from .buckconfig # We require the user to have run `xcode-select` and `/var/db/xcode_select_link` to symlink diff --git a/prelude/apple/apple_bundle_attrs.bzl b/prelude/apple/apple_bundle_attrs.bzl index 26048160c9a16..c9c34a47d1a24 100644 --- a/prelude/apple/apple_bundle_attrs.bzl +++ b/prelude/apple/apple_bundle_attrs.bzl @@ -5,8 +5,57 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -def get_apple_info_plist_build_system_identification_attrs(): +load("@prelude//apple:apple_platforms.bzl", "APPLE_PLATFORMS_KEY") +load("@prelude//apple:apple_rules_impl_utility.bzl", "apple_bundle_extra_attrs") +load("@prelude//apple:resource_groups.bzl", "RESOURCE_GROUP_MAP_ATTR") +load("@prelude//decls/apple_rules.bzl", "AppleBundleExtension") + +def _apple_bundle_base_attrs(): return { - "info_plist_identify_build_system": attrs.option(attrs.bool(), default = None), - "_info_plist_identify_build_system_default": attrs.bool(default = False), + # Attributes comes from `attributes.bzl` but since it's autogenerated, we cannot easily abstract + "asset_catalogs_compilation_options": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}), + "codesign_flags": attrs.list(attrs.string(), default = []), + "codesign_identity": attrs.option(attrs.string(), default = None), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "default_platform": attrs.option(attrs.string(), default = None), + "deps": attrs.list(attrs.dep(), default = []), + "extension": attrs.one_of(attrs.enum(AppleBundleExtension), attrs.string()), + "ibtool_flags": attrs.option(attrs.list(attrs.string()), default = None), + "ibtool_module_flag": attrs.option(attrs.bool(), default = None), + "incremental_bundling_enabled": attrs.option(attrs.bool(), default = None), + "info_plist": attrs.source(), + "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "platform_binary": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.dep())), default = None), + "product_name": attrs.option(attrs.string(), default = None), + "resource_group": attrs.option(attrs.string(), default = None), + "resource_group_map": attrs.option(RESOURCE_GROUP_MAP_ATTR, default = None), + "skip_copying_swift_stdlib": attrs.option(attrs.bool(), default = None), + "try_skip_code_signing": attrs.option(attrs.bool(), default = None), + "xcode_product_type": attrs.option(attrs.string(), default = None), } + +def _apple_bundle_default_attrs(): + attributes = {} + attributes.update(_apple_bundle_base_attrs()) + attributes.update(apple_bundle_extra_attrs()) + attributes.update({ + APPLE_PLATFORMS_KEY: attrs.dict(key = attrs.string(), value = attrs.dep(), sorted = False, default = {}), + }) + return attributes + +def apple_watchos_bundle_attrs(): + attributes = _apple_bundle_default_attrs() + attributes.update({ + "bundle_type": attrs.string(default = "watchapp"), + }) + return attributes + +def apple_macos_bundle_attrs(): + attributes = _apple_bundle_default_attrs() + attributes.update({ + "bundle_type": attrs.string(default = "default"), + }) + return attributes diff --git a/prelude/apple/apple_bundle_config.bzl b/prelude/apple/apple_bundle_config.bzl index d81c0e4a98f48..002dc35399b4c 100644 --- a/prelude/apple/apple_bundle_config.bzl +++ b/prelude/apple/apple_bundle_config.bzl @@ -5,35 +5,39 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load(":apple_code_signing_types.bzl", "CodeSignConfiguration") + def _maybe_get_bool(config: str, default: [None, bool]) -> [None, bool]: result = read_root_config("apple", config, None) if result == None: return default return result.lower() == "true" -def _get_bundling_path_conflicts_check_enabled(): - check_enabled = _maybe_get_bool("bundling_path_conflicts_check_enabled", None) - if check_enabled != None: - return check_enabled +def _get_code_signing_configuration() -> str: + is_dry_run = _maybe_get_bool("dry_run_code_signing", False) + + # This is a kill switch for the feature, it can also be disabled by setting + # `apple.fast_adhoc_signing_enabled=false` in a global buckconfig file. + is_fast_adhoc_signing_enabled = _maybe_get_bool("fast_adhoc_signing_enabled", True) - return select({ - "DEFAULT": False, - "ovr_config//features/apple/constraints:bundling_path_conflicts_check_disabled": False, - "ovr_config//features/apple/constraints:bundling_path_conflicts_check_enabled": True, - }) + if is_dry_run: + return CodeSignConfiguration("dry-run").value + elif is_fast_adhoc_signing_enabled: + return CodeSignConfiguration("fast-adhoc").value + else: + return CodeSignConfiguration("none").value def apple_bundle_config() -> dict[str, typing.Any]: return { "_bundling_cache_buster": read_root_config("apple", "bundling_cache_buster", None), "_bundling_log_file_enabled": _maybe_get_bool("bundling_log_file_enabled", True), "_bundling_log_file_level": read_root_config("apple", "bundling_log_file_level", None), - "_bundling_path_conflicts_check_enabled": _get_bundling_path_conflicts_check_enabled(), + "_code_signing_configuration": _get_code_signing_configuration(), + "_codesign_identities_command_override": read_root_config("apple", "codesign_identities_command_override", None), "_codesign_type": read_root_config("apple", "codesign_type_override", None), "_compile_resources_locally_override": _maybe_get_bool("compile_resources_locally_override", None), - "_dry_run_code_signing": _maybe_get_bool("dry_run_code_signing", False), - # This is a kill switch for the feature, it can also be disabled by setting - # `apple.fast_adhoc_signing_enabled=false` in a global buckconfig file. - "_fast_adhoc_signing_enabled": _maybe_get_bool("fast_adhoc_signing_enabled", True), + "_embed_provisioning_profile_when_adhoc_code_signing": _maybe_get_bool("embed_provisioning_profile_when_adhoc_code_signing", None), + "_fast_provisioning_profile_parsing_enabled": _maybe_get_bool("fast_provisioning_profile_parsing_enabled", False), "_incremental_bundling_enabled": _maybe_get_bool("incremental_bundling_enabled", True), "_info_plist_identify_build_system_default": _maybe_get_bool("info_plist_identify_build_system", True), "_profile_bundling_enabled": _maybe_get_bool("profile_bundling_enabled", False), diff --git a/prelude/apple/apple_bundle_destination.bzl b/prelude/apple/apple_bundle_destination.bzl index e801b388ea553..172ddb9150d49 100644 --- a/prelude/apple/apple_bundle_destination.bzl +++ b/prelude/apple/apple_bundle_destination.bzl @@ -22,9 +22,10 @@ AppleBundleDestination = enum( "headers", "modules", "quicklook", - "watchkitstub", "bundleroot", "loginitems", + "appclips", + "extensionkit_extensions", ) AppleBundleDestinationPaths = record( @@ -38,9 +39,10 @@ AppleBundleDestinationPaths = record( headers = field(str, ""), modules = field(str, ""), quicklook = field(str, ""), - watchkitstub = field(str, ""), bundleroot = field(str, ""), loginitems = field(str, ""), + appclips = field(str, ""), + extensionkit_extensions = field(str, ""), ) _IOSBundleDestinationPaths = AppleBundleDestinationPaths( @@ -49,7 +51,8 @@ _IOSBundleDestinationPaths = AppleBundleDestinationPaths( xpcservices = "XPCServices", watchapp = "Watch", quicklook = "Library/QuickLook", - watchkitstub = "_WatchKitStub", + appclips = "AppClips", + extensionkit_extensions = "Extensions", ) _IOSFrameworkBundleDestinationPaths = AppleBundleDestinationPaths( @@ -71,7 +74,6 @@ _MacOSBundleDestinationPaths = AppleBundleDestinationPaths( headers = macOS_content_path, modules = macOS_content_path, quicklook = paths.join(macOS_content_path, "Library/QuickLook"), - watchkitstub = macOS_content_path, bundleroot = macOS_content_path, loginitems = paths.join(macOS_content_path, "Library/LoginItems"), ) @@ -79,27 +81,43 @@ _MacOSBundleDestinationPaths = AppleBundleDestinationPaths( _MacOSFrameworkBundleDestinationPaths = AppleBundleDestinationPaths( resources = "Resources", frameworks = "Frameworks", + plugins = "PlugIns", xpcservices = "XPCServices", metadata = "Resources", headers = "Headers", modules = "Modules", ) +macOS_versioned_path = "Versions/A" +_MacOSVersionedFrameworkBundleDestinationPaths = AppleBundleDestinationPaths( + resources = paths.join(macOS_versioned_path, "Resources"), + frameworks = paths.join(macOS_versioned_path, "Frameworks"), + plugins = paths.join(macOS_versioned_path, "PlugIns"), + xpcservices = paths.join(macOS_versioned_path, "XPCServices"), + metadata = paths.join(macOS_versioned_path, "Resources"), + headers = paths.join(macOS_versioned_path, "Headers"), + modules = paths.join(macOS_versioned_path, "Modules"), + executables = macOS_versioned_path, +) + def _get_apple_bundle_destinations_for_sdk_name(name: str) -> AppleBundleDestinationPaths: if name == "macosx" or name == "maccatalyst": return _MacOSBundleDestinationPaths else: return _IOSBundleDestinationPaths -def _get_apple_framework_bundle_destinations_for_sdk_name(name: str) -> AppleBundleDestinationPaths: +def _get_apple_framework_bundle_destinations_for_sdk_name(name: str, versioned_macos_bundle: bool) -> AppleBundleDestinationPaths: if name == "macosx" or name == "maccatalyst": - return _MacOSFrameworkBundleDestinationPaths + if versioned_macos_bundle: + return _MacOSVersionedFrameworkBundleDestinationPaths + else: + return _MacOSFrameworkBundleDestinationPaths else: return _IOSFrameworkBundleDestinationPaths -def bundle_relative_path_for_destination(destination: AppleBundleDestination, sdk_name: str, extension: str) -> str: +def bundle_relative_path_for_destination(destination: AppleBundleDestination, sdk_name: str, extension: str, versioned_macos_bundle: bool) -> str: if extension == "framework": - bundle_destinations = _get_apple_framework_bundle_destinations_for_sdk_name(sdk_name) + bundle_destinations = _get_apple_framework_bundle_destinations_for_sdk_name(sdk_name, versioned_macos_bundle) else: bundle_destinations = _get_apple_bundle_destinations_for_sdk_name(sdk_name) @@ -109,6 +127,8 @@ def bundle_relative_path_for_destination(destination: AppleBundleDestination, sd return bundle_destinations.frameworks elif destination.value == "executables": return bundle_destinations.executables + elif destination.value == "extensionkit_extensions": + return bundle_destinations.extensionkit_extensions elif destination.value == "plugins": return bundle_destinations.plugins elif destination.value == "xpcservices": @@ -123,10 +143,10 @@ def bundle_relative_path_for_destination(destination: AppleBundleDestination, sd return bundle_destinations.modules elif destination.value == "quicklook": return bundle_destinations.quicklook - elif destination.value == "watchkitstub": - return bundle_destinations.watchkitstub elif destination.value == "bundleroot": return bundle_destinations.bundleroot elif destination.value == "loginitems": return bundle_destinations.loginitems + elif destination.value == "appclips": + return bundle_destinations.appclips fail("Unsupported Apple bundle destination {}".format(destination)) diff --git a/prelude/apple/apple_bundle_part.bzl b/prelude/apple/apple_bundle_part.bzl index 9447cd8231258..d9b5b5e14ea8a 100644 --- a/prelude/apple/apple_bundle_part.bzl +++ b/prelude/apple/apple_bundle_part.bzl @@ -6,11 +6,14 @@ # of this source tree. load("@prelude//:paths.bzl", "paths") -load("@prelude//utils:arglike.bzl", "ArgLike") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "value_or") load(":apple_bundle_destination.bzl", "AppleBundleDestination", "bundle_relative_path_for_destination") -load(":apple_bundle_utility.bzl", "get_default_binary_dep", "get_extension_attr", "get_product_name") -load(":apple_code_signing_types.bzl", "AppleEntitlementsInfo", "CodeSignType") +load(":apple_bundle_types.bzl", "AppleBundleManifest", "AppleBundleManifestInfo", "AppleBundleManifestLogFiles") +load(":apple_bundle_utility.bzl", "get_extension_attr", "get_product_name") +load(":apple_code_signing_types.bzl", "CodeSignConfiguration", "CodeSignType") +load(":apple_entitlements.bzl", "get_entitlements_codesign_args", "should_include_entitlements") +load(":apple_error_handler.bzl", "apple_build_error_handler") load(":apple_sdk.bzl", "get_apple_sdk_name") load(":apple_sdk_metadata.bzl", "get_apple_sdk_metadata_for_sdk_name") load(":apple_swift_stdlib.bzl", "should_copy_swift_stdlib") @@ -30,12 +33,21 @@ AppleBundlePart = record( new_name = field([str, None], None), # Marks parts which should be code signed separately from the whole bundle. codesign_on_copy = field(bool, False), + # Entitlements to use when this part is code signed separately. + codesign_entitlements = field(Artifact | None, None), + # If present, override the codesign flags with these flags, when this part is code signed separately. + codesign_flags_override = field([list[str], None], None), ) SwiftStdlibArguments = record( primary_binary_rel_path = field(str), ) +AppleBundleConstructionResult = record( + providers = field(list[Provider]), + sub_targets = field(dict[str, list[Provider]]), +) + def bundle_output(ctx: AnalysisContext) -> Artifact: bundle_dir_name = get_bundle_dir_name(ctx) output = ctx.actions.declare_output(bundle_dir_name) @@ -46,27 +58,36 @@ def assemble_bundle( bundle: Artifact, parts: list[AppleBundlePart], info_plist_part: [AppleBundlePart, None], - swift_stdlib_args: [SwiftStdlibArguments, None]) -> dict[str, list[Provider]]: + swift_stdlib_args: [SwiftStdlibArguments, None], + extra_hidden: list[Artifact] = [], + skip_adhoc_signing: bool = False, + incremental_bundling_override = None) -> AppleBundleConstructionResult: """ Returns extra subtargets related to bundling. """ all_parts = parts + [info_plist_part] if info_plist_part else [] - spec_file = _bundle_spec_json(ctx, all_parts) + codesign_type = _detect_codesign_type(ctx, skip_adhoc_signing) + spec_file = _bundle_spec_json(ctx, all_parts, codesign_type) tools = ctx.attrs._apple_tools[AppleToolsInfo] tool = tools.assemble_bundle codesign_args = [] - codesign_type = _detect_codesign_type(ctx) codesign_tool = ctx.attrs._apple_toolchain[AppleToolchainInfo].codesign - if ctx.attrs._dry_run_code_signing: + code_signing_configuration = CodeSignConfiguration(ctx.attrs._code_signing_configuration) + if code_signing_configuration == CodeSignConfiguration("dry-run"): codesign_configuration_args = ["--codesign-configuration", "dry-run"] codesign_tool = tools.dry_codesign_tool - elif ctx.attrs._fast_adhoc_signing_enabled: - codesign_configuration_args = ["--codesign-configuration", "fast-adhoc"] - else: + elif code_signing_configuration == CodeSignConfiguration("fast-adhoc"): + if _get_fast_adhoc_signing_enabled(ctx): + codesign_configuration_args = ["--codesign-configuration", "fast-adhoc"] + else: + codesign_configuration_args = [] + elif code_signing_configuration == CodeSignConfiguration("none"): codesign_configuration_args = [] + else: + fail("Code signing configuration `{}` not supported".format(code_signing_configuration)) codesign_required = codesign_type.value in ["distribution", "adhoc"] swift_support_required = swift_stdlib_args and (not ctx.attrs.skip_copying_swift_stdlib) and should_copy_swift_stdlib(bundle.extension) @@ -82,9 +103,13 @@ def assemble_bundle( "--binary-destination", swift_stdlib_args.primary_binary_rel_path, "--frameworks-destination", - bundle_relative_path_for_destination(AppleBundleDestination("frameworks"), sdk_name, ctx.attrs.extension), + bundle_relative_path_for_destination(AppleBundleDestination("frameworks"), sdk_name, ctx.attrs.extension, ctx.attrs.versioned_macos_bundle), "--plugins-destination", - bundle_relative_path_for_destination(AppleBundleDestination("plugins"), sdk_name, ctx.attrs.extension), + bundle_relative_path_for_destination(AppleBundleDestination("plugins"), sdk_name, ctx.attrs.extension, ctx.attrs.versioned_macos_bundle), + "--extensionkit-extensions-destination", + bundle_relative_path_for_destination(AppleBundleDestination("extensionkit_extensions"), sdk_name, ctx.attrs.extension, ctx.attrs.versioned_macos_bundle), + "--appclips-destination", + bundle_relative_path_for_destination(AppleBundleDestination("appclips"), sdk_name, ctx.attrs.extension, ctx.attrs.versioned_macos_bundle), "--swift-stdlib-command", cmd_args(ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info.swift_stdlib_tool, delimiter = " ", quote = "shell"), "--sdk-root", @@ -100,7 +125,8 @@ def assemble_bundle( codesign_tool, ] - if codesign_type.value != "adhoc": + profile_selection_required = _should_embed_provisioning_profile(ctx, codesign_type) + if profile_selection_required: provisioning_profiles = ctx.attrs._provisioning_profiles[DefaultInfo] expect( len(provisioning_profiles.default_outputs) == 1, @@ -110,14 +136,20 @@ def assemble_bundle( codesign_args.extend(provisioning_profiles_args) identities_command = ctx.attrs._apple_toolchain[AppleToolchainInfo].codesign_identities_command + if ctx.attrs._codesign_identities_command_override: + identities_command = ctx.attrs._codesign_identities_command_override[RunInfo] identities_command_args = ["--codesign-identities-command", cmd_args(identities_command)] if identities_command else [] codesign_args.extend(identities_command_args) - else: + + if codesign_type.value == "adhoc": codesign_args.append("--ad-hoc") if ctx.attrs.codesign_identity: codesign_args.extend(["--ad-hoc-codesign-identity", ctx.attrs.codesign_identity]) + if profile_selection_required: + codesign_args.append("--embed-provisioning-profile-when-signing-ad-hoc") - codesign_args += _get_entitlements_codesign_args(ctx, codesign_type) + codesign_args += get_entitlements_codesign_args(ctx, codesign_type) + codesign_args += _get_extra_codesign_args(ctx) info_plist_args = [ "--info-plist-source", @@ -126,24 +158,42 @@ def assemble_bundle( get_apple_bundle_part_relative_destination_path(ctx, info_plist_part), ] if info_plist_part else [] codesign_args.extend(info_plist_args) + + if ctx.attrs.provisioning_profile_filter: + codesign_args.extend([ + "--provisioning-profile-filter", + ctx.attrs.provisioning_profile_filter, + ]) + + strict_provisioning_profile_search = value_or(ctx.attrs.strict_provisioning_profile_search, ctx.attrs._strict_provisioning_profile_search_default) + if strict_provisioning_profile_search: + codesign_args.append("--strict-provisioning-profile-search") elif codesign_type.value == "skip": pass else: fail("Code sign type `{}` not supported".format(codesign_type)) - command = cmd_args([ - tool, - "--output", - bundle.as_output(), - "--spec", - spec_file, - ] + codesign_args + platform_args + swift_args) - command.hidden([part.source for part in all_parts]) + command = cmd_args( + [ + tool, + "--output", + bundle.as_output(), + "--spec", + spec_file, + ] + codesign_args + platform_args + swift_args, + hidden = + [part.source for part in all_parts] + + [part.codesign_entitlements for part in all_parts if part.codesign_entitlements] + + # Ensures any genrule deps get built, such targets are used for validation + extra_hidden, + ) run_incremental_args = {} incremental_state = ctx.actions.declare_output("incremental_state.json").as_output() # Fallback to value from buckconfig incremental_bundling_enabled = ctx.attrs.incremental_bundling_enabled or ctx.attrs._incremental_bundling_enabled + if incremental_bundling_override != None: + incremental_bundling_enabled = incremental_bundling_override if incremental_bundling_enabled: command.add("--incremental-state", incremental_state) @@ -157,14 +207,18 @@ def assemble_bundle( # overwrite file with incremental state so if previous and next builds are incremental # (as opposed to the current non-incremental one), next one won't assume there is a # valid incremental state. - command.hidden(ctx.actions.write_json(incremental_state, {})) + command.add(cmd_args(hidden = ctx.actions.write_json(incremental_state, {}))) category = "apple_assemble_bundle" if ctx.attrs._profile_bundling_enabled: profile_output = ctx.actions.declare_output("bundling_profile.txt").as_output() command.add("--profile-output", profile_output) + if ctx.attrs._fast_provisioning_profile_parsing_enabled: + command.add("--fast-provisioning-profile-parsing") + subtargets = {} + bundling_log_output = None if ctx.attrs._bundling_log_file_enabled: bundling_log_output = ctx.actions.declare_output("bundling_log.txt") command.add("--log-file", bundling_log_output.as_output()) @@ -172,11 +226,38 @@ def assemble_bundle( command.add("--log-level-file", ctx.attrs._bundling_log_file_level) subtargets["bundling-log"] = [DefaultInfo(default_output = bundling_log_output)] - if ctx.attrs._bundling_path_conflicts_check_enabled: - command.add("--check-conflicts") - + command.add("--check-conflicts") + if ctx.attrs.versioned_macos_bundle: + command.add("--versioned-if-macos") command.add(codesign_configuration_args) + command_json = ctx.actions.declare_output("bundling_command.json") + command_json_cmd_args = ctx.actions.write_json(command_json, command, with_inputs = True, pretty = True) + subtargets["command"] = [DefaultInfo(default_output = command_json, other_outputs = [command_json_cmd_args])] + + bundle_manifest_log_file_map = { + ctx.label: AppleBundleManifestLogFiles( + command_file = command_json, + spec_file = spec_file, + log_file = bundling_log_output, + ), + } + + if hasattr(ctx.attrs, "deps"): + for dep in ctx.attrs.deps: + dep_manifest_info = dep.get(AppleBundleManifestInfo) + if dep_manifest_info: + bundle_manifest_log_file_map.update(dep_manifest_info.manifest.log_file_map) + + bundle_manifest = AppleBundleManifest(log_file_map = bundle_manifest_log_file_map) + bundle_manifest_json_object = _convert_bundle_manifest_to_json_object(bundle_manifest) + + bundle_manifest_json_file = ctx.actions.declare_output("bundle_manifest.json") + bundle_manifest_cmd_args = ctx.actions.write_json(bundle_manifest_json_file, bundle_manifest_json_object, with_inputs = True, pretty = True) + subtargets["manifest"] = [DefaultInfo(default_output = bundle_manifest_json_file, other_outputs = [bundle_manifest_cmd_args])] + + providers = [AppleBundleManifestInfo(manifest = bundle_manifest)] + env = {} cache_buster = ctx.attrs._bundling_cache_buster if cache_buster: @@ -189,21 +270,23 @@ def assemble_bundle( prefer_local = not force_local_bundling, category = category, env = env, + error_handler = apple_build_error_handler, **run_incremental_args ) - return subtargets + return AppleBundleConstructionResult(sub_targets = subtargets, providers = providers) def get_bundle_dir_name(ctx: AnalysisContext) -> str: return paths.replace_extension(get_product_name(ctx), "." + get_extension_attr(ctx)) def get_apple_bundle_part_relative_destination_path(ctx: AnalysisContext, part: AppleBundlePart) -> str: - bundle_relative_path = bundle_relative_path_for_destination(part.destination, get_apple_sdk_name(ctx), ctx.attrs.extension) + bundle_relative_path = bundle_relative_path_for_destination(part.destination, get_apple_sdk_name(ctx), ctx.attrs.extension, ctx.attrs.versioned_macos_bundle) destination_file_or_directory_name = part.new_name if part.new_name != None else paths.basename(part.source.short_path) return paths.join(bundle_relative_path, destination_file_or_directory_name) # Returns JSON to be passed into bundle assembling tool. It should contain a dictionary which maps bundle relative destination paths to source paths." -def _bundle_spec_json(ctx: AnalysisContext, parts: list[AppleBundlePart]) -> Artifact: +def _bundle_spec_json(ctx: AnalysisContext, parts: list[AppleBundlePart], codesign_type: CodeSignType) -> Artifact: specs = [] + include_entitlements = should_include_entitlements(ctx, codesign_type) for part in parts: part_spec = { @@ -212,50 +295,72 @@ def _bundle_spec_json(ctx: AnalysisContext, parts: list[AppleBundlePart]) -> Art } if part.codesign_on_copy: part_spec["codesign_on_copy"] = True + if include_entitlements and part.codesign_entitlements: + part_spec["codesign_entitlements"] = part.codesign_entitlements + if part.codesign_flags_override: + part_spec["codesign_flags_override"] = part.codesign_flags_override specs.append(part_spec) - return ctx.actions.write_json("bundle_spec.json", specs) + return ctx.actions.write_json("bundle_spec.json", specs, pretty = True) -def _detect_codesign_type(ctx: AnalysisContext) -> CodeSignType: - if ctx.attrs.extension not in ["app", "appex", "xctest"]: - # Only code sign application bundles, extensions and test bundles - return CodeSignType("skip") +def _get_codesign_type_from_attribs(ctx: AnalysisContext) -> [CodeSignType, None]: + # Target-level attribute takes highest priority + if ctx.attrs.codesign_type: + return CodeSignType(ctx.attrs.codesign_type) + # Config-based global default if ctx.attrs._codesign_type: return CodeSignType(ctx.attrs._codesign_type) - sdk_name = get_apple_sdk_name(ctx) - is_ad_hoc_sufficient = get_apple_sdk_metadata_for_sdk_name(sdk_name).is_ad_hoc_code_sign_sufficient - return CodeSignType("adhoc" if is_ad_hoc_sufficient else "distribution") + return None + +def _detect_codesign_type(ctx: AnalysisContext, skip_adhoc_signing: bool) -> CodeSignType: + def compute_codesign_type(): + if ctx.attrs.extension not in ["app", "appex", "xctest", "driver"]: + # Only code sign application bundles, extensions and test bundles + return CodeSignType("skip") + + codesign_type_attrib = _get_codesign_type_from_attribs(ctx) + if codesign_type_attrib != None: + return codesign_type_attrib -def _entitlements_file(ctx: AnalysisContext) -> [Artifact, None]: - if hasattr(ctx.attrs, "entitlements_file"): - # Bundling `apple_test` which doesn't have a binary to provide the entitlements, so they are provided via `entitlements_file` attribute directly. - return ctx.attrs.entitlements_file + sdk_name = get_apple_sdk_name(ctx) + is_ad_hoc_sufficient = get_apple_sdk_metadata_for_sdk_name(sdk_name).is_ad_hoc_code_sign_sufficient + return CodeSignType("adhoc" if is_ad_hoc_sufficient else "distribution") - if not ctx.attrs.binary: - return None + codesign_type = compute_codesign_type() + if skip_adhoc_signing and codesign_type.value == "adhoc": + codesign_type = CodeSignType("skip") - # The `binary` attribute can be either an apple_binary or a dynamic library from apple_library - binary_entitlement_info = get_default_binary_dep(ctx.attrs.binary)[AppleEntitlementsInfo] - if binary_entitlement_info and binary_entitlement_info.entitlements_file: - return binary_entitlement_info.entitlements_file + return codesign_type - return ctx.attrs._codesign_entitlements +def _get_extra_codesign_args(ctx: AnalysisContext) -> list[str]: + codesign_args = ctx.attrs.codesign_flags if hasattr(ctx.attrs, "codesign_flags") else [] + return ["--codesign-args={}".format(flag) for flag in codesign_args] -def _should_include_entitlements(ctx: AnalysisContext, codesign_type: CodeSignType) -> bool: +def _should_embed_provisioning_profile(ctx: AnalysisContext, codesign_type: CodeSignType) -> bool: if codesign_type.value == "distribution": return True if codesign_type.value == "adhoc": # The config-based override value takes priority over target value - if ctx.attrs._use_entitlements_when_adhoc_code_signing != None: - return ctx.attrs._use_entitlements_when_adhoc_code_signing - return ctx.attrs.use_entitlements_when_adhoc_code_signing + if ctx.attrs._embed_provisioning_profile_when_adhoc_code_signing != None: + return ctx.attrs._embed_provisioning_profile_when_adhoc_code_signing + return ctx.attrs.embed_provisioning_profile_when_adhoc_code_signing return False -def _get_entitlements_codesign_args(ctx: AnalysisContext, codesign_type: CodeSignType) -> list[ArgLike]: - include_entitlements = _should_include_entitlements(ctx, codesign_type) - maybe_entitlements = _entitlements_file(ctx) if include_entitlements else None - entitlements_args = ["--entitlements", maybe_entitlements] if maybe_entitlements else [] - return entitlements_args +def _convert_bundle_manifest_to_json_object(manifest: AppleBundleManifest) -> dict[Label, typing.Any]: + manifest_dict = {} + for target_label, logs in manifest.log_file_map.items(): + manifest_dict[target_label] = { + "command": logs.command_file, + "log": logs.log_file, + "spec": logs.spec_file, + } + return manifest_dict + +def _get_fast_adhoc_signing_enabled(ctx: AnalysisContext) -> bool: + fast_adhoc_signing_enabled = ctx.attrs.fast_adhoc_signing_enabled + if fast_adhoc_signing_enabled != None: + return fast_adhoc_signing_enabled + return ctx.attrs._fast_adhoc_signing_enabled_default diff --git a/prelude/apple/apple_bundle_resources.bzl b/prelude/apple/apple_bundle_resources.bzl index 858859b9f8112..237e32ecb461d 100644 --- a/prelude/apple/apple_bundle_resources.bzl +++ b/prelude/apple/apple_bundle_resources.bzl @@ -5,10 +5,14 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:artifacts.bzl", "single_artifact") load("@prelude//:paths.bzl", "paths") -load("@prelude//:resources.bzl", "gather_resources") load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") -load("@prelude//utils:utils.bzl", "expect", "flatten_dict") +load( + "@prelude//linking:link_info.bzl", + "CxxSanitizerRuntimeInfo", +) +load("@prelude//utils:utils.bzl", "flatten_dict") load( ":apple_asset_catalog.bzl", "compile_apple_asset_catalog", @@ -19,18 +23,21 @@ load( ) load(":apple_bundle_destination.bzl", "AppleBundleDestination") load(":apple_bundle_part.bzl", "AppleBundlePart") -load(":apple_bundle_types.bzl", "AppleBundleInfo") -load(":apple_bundle_utility.bzl", "get_bundle_resource_processing_options", "get_extension_attr", "get_product_name") +load(":apple_bundle_types.bzl", "AppleBundleInfo", "AppleBundleTypeAppClip", "AppleBundleTypeDefault", "AppleBundleTypeExtensionKitExtension", "AppleBundleTypeWatchApp") +load(":apple_bundle_utility.bzl", "get_bundle_resource_processing_options", "get_default_binary_dep", "get_extension_attr", "get_flattened_binary_deps", "get_is_watch_bundle", "get_product_name") load(":apple_core_data.bzl", "compile_apple_core_data") load( ":apple_core_data_types.bzl", "AppleCoreDataSpec", # @unused Used as a type ) load(":apple_info_plist.bzl", "process_info_plist", "process_plist") +load(":apple_library.bzl", "AppleLibraryForDistributionInfo") +load(":apple_library_types.bzl", "AppleLibraryInfo") load( ":apple_resource_types.bzl", "AppleResourceDestination", "AppleResourceSpec", # @unused Used as a type + "CxxResourceSpec", # @unused Used as a type ) load(":apple_resource_utility.bzl", "apple_bundle_destination_from_resource_destination") load( @@ -57,21 +64,19 @@ def get_apple_bundle_resource_part_list(ctx: AnalysisContext) -> AppleBundleReso parts = [] parts.extend(_create_pkg_info_if_needed(ctx)) + parts.extend(_copy_privacy_manifest_if_needed(ctx)) - (resource_specs, asset_catalog_specs, core_data_specs, scene_kit_assets_spec) = _select_resources(ctx) + (resource_specs, asset_catalog_specs, core_data_specs, scene_kit_assets_spec, cxx_resource_specs) = _select_resources(ctx) # If we've pulled in native/C++ resources from deps, inline them into the # bundle under the `CxxResources` namespace. - cxx_resources = flatten_dict(gather_resources( - label = ctx.label, - deps = ctx.attrs.deps, - ).values()) + cxx_resources = flatten_dict([s.resources for s in cxx_resource_specs]) if cxx_resources: cxx_res_dir = ctx.actions.copied_dir( "CxxResources", { - name: resource - for name, (resource, _) in cxx_resources.items() + name: resource.default_output + for name, resource in cxx_resources.items() }, ) resource_specs.append( @@ -81,6 +86,16 @@ def get_apple_bundle_resource_part_list(ctx: AnalysisContext) -> AppleBundleReso ), ) + cxx_sanitizer_runtime_info = get_default_binary_dep(ctx.attrs.binary).get(CxxSanitizerRuntimeInfo) if ctx.attrs.binary else None + if cxx_sanitizer_runtime_info: + runtime_resource_spec = AppleResourceSpec( + files = cxx_sanitizer_runtime_info.runtime_files, + destination = AppleResourceDestination("frameworks"), + # Sanitizer dylibs require signing, for hardened runtime on macOS and iOS device builds + codesign_files_on_copy = True, + ) + resource_specs.append(runtime_resource_spec) + asset_catalog_result = compile_apple_asset_catalog(ctx, asset_catalog_specs) if asset_catalog_result != None: asset_catalog_part = AppleBundlePart( @@ -116,6 +131,9 @@ def get_apple_bundle_resource_part_list(ctx: AnalysisContext) -> AppleBundleReso parts.extend(_copy_resources(ctx, resource_specs)) parts.extend(_copy_first_level_bundles(ctx)) + parts.extend(_copy_public_headers(ctx)) + parts.extend(_copy_module_map(ctx)) + parts.extend(_copy_swift_library_evolution_support(ctx)) return AppleBundleResourcePartListOutput( resource_parts = parts, @@ -130,10 +148,23 @@ def _create_pkg_info_if_needed(ctx: AnalysisContext) -> list[AppleBundlePart]: artifact = ctx.actions.write("PkgInfo", "APPLWRUN\n") return [AppleBundlePart(source = artifact, destination = AppleBundleDestination("metadata"))] -def _select_resources(ctx: AnalysisContext) -> ((list[AppleResourceSpec], list[AppleAssetCatalogSpec], list[AppleCoreDataSpec], list[SceneKitAssetsSpec])): +def _copy_privacy_manifest_if_needed(ctx: AnalysisContext) -> list[AppleBundlePart]: + privacy_manifest = ctx.attrs.privacy_manifest + if privacy_manifest == None: + return [] + + # According to apple docs, privacy manifest has to be named as `PrivacyInfo.xcprivacy` + if privacy_manifest.short_path.split("/", 1)[-1] == "PrivacyInfo.xcprivacy": + artifact = privacy_manifest + else: + output = ctx.actions.declare_output("PrivacyInfo.xcprivacy") + artifact = ctx.actions.copy_file(output.as_output(), privacy_manifest) + return [AppleBundlePart(source = artifact, destination = AppleBundleDestination("resources"))] + +def _select_resources(ctx: AnalysisContext) -> ((list[AppleResourceSpec], list[AppleAssetCatalogSpec], list[AppleCoreDataSpec], list[SceneKitAssetsSpec], list[CxxResourceSpec])): resource_group_info = get_resource_group_info(ctx) if resource_group_info: - resource_groups_deps = resource_group_info.implicit_deps + resource_groups_deps = resource_group_info.resource_group_to_implicit_deps_mapping.get(ctx.attrs.resource_group, []) if ctx.attrs.resource_group else [] resource_group_mappings = resource_group_info.mappings else: resource_groups_deps = [] @@ -142,13 +173,77 @@ def _select_resources(ctx: AnalysisContext) -> ((list[AppleResourceSpec], list[A resource_graph = create_resource_graph( ctx = ctx, labels = [], - bundle_binary = ctx.attrs.binary, + bundle_binary = get_default_binary_dep(ctx.attrs.binary), deps = ctx.attrs.deps + resource_groups_deps, exported_deps = [], ) resource_graph_node_map_func = get_resource_graph_node_map_func(resource_graph) return get_filtered_resources(ctx.label, resource_graph_node_map_func, ctx.attrs.resource_group, resource_group_mappings) +def _copy_swift_library_evolution_support(ctx: AnalysisContext) -> list[AppleBundlePart]: + extension = get_extension_attr(ctx) + if not extension == "framework": + return [] + + binary_deps = getattr(ctx.attrs, "binary") + if binary_deps == None: + return [] + + swiftmodule_files = {} + + module_name = None + for binary in get_flattened_binary_deps(binary_deps): + apple_library_for_distribution_info = binary.get(AppleLibraryForDistributionInfo) + if apple_library_for_distribution_info == None: + continue + module_name = apple_library_for_distribution_info.module_name + swiftmodule_files.update({ + apple_library_for_distribution_info.target_triple + ".swiftinterface": apple_library_for_distribution_info.swiftinterface, + apple_library_for_distribution_info.target_triple + ".private.swiftinterface": apple_library_for_distribution_info.private_swiftinterface, + apple_library_for_distribution_info.target_triple + ".swiftdoc": apple_library_for_distribution_info.swiftdoc, + }) + + if len(swiftmodule_files) == 0 or module_name == None: + return [] + + framework_module_dir = ctx.actions.declare_output(module_name + "framework.swiftmodule", dir = True) + ctx.actions.copied_dir(framework_module_dir.as_output(), swiftmodule_files) + return [AppleBundlePart(source = framework_module_dir, destination = AppleBundleDestination("modules"), new_name = module_name + ".swiftmodule")] + +def _copy_public_headers(ctx: AnalysisContext) -> list[AppleBundlePart]: + if not ctx.attrs.copy_public_framework_headers: + return [] + binary_deps = getattr(ctx.attrs, "binary") + if binary_deps == None: + return [] + + binary = get_default_binary_dep(binary_deps) + apple_library_info = binary.get(AppleLibraryInfo) + if apple_library_info == None: + return [] + tset = apple_library_info.public_framework_headers + + bundle_parts = [] + if tset._tset: + for public_framework_headers in tset._tset.traverse(): + for public_framework_header in public_framework_headers: + for artifact in public_framework_header.artifacts: + bundle_parts.append(AppleBundlePart(source = artifact, destination = AppleBundleDestination("headers"))) + + if apple_library_info.swift_header: + bundle_parts.append(AppleBundlePart(source = apple_library_info.swift_header, destination = AppleBundleDestination("headers"))) + + return bundle_parts + +def _copy_module_map(ctx: AnalysisContext) -> list[AppleBundlePart]: + extension = get_extension_attr(ctx) + if not extension == "framework": + return [] + module_map = ctx.attrs.module_map + if not module_map: + return [] + return [AppleBundlePart(source = module_map, destination = AppleBundleDestination("modules"))] + def _copy_resources(ctx: AnalysisContext, specs: list[AppleResourceSpec]) -> list[AppleBundlePart]: result = [] @@ -156,10 +251,12 @@ def _copy_resources(ctx: AnalysisContext, specs: list[AppleResourceSpec]) -> lis bundle_destination = apple_bundle_destination_from_resource_destination(spec.destination) result += [_process_apple_resource_file_if_needed( ctx = ctx, - file = _extract_single_artifact(x), + file = single_artifact(x).default_output, destination = bundle_destination, destination_relative_path = None, codesign_on_copy = spec.codesign_files_on_copy, + codesign_entitlements = spec.codesign_entitlements, + codesign_flags_override = spec.codesign_flags_override, ) for x in spec.files] result += _bundle_parts_for_dirs(spec.dirs, bundle_destination, False) result += _bundle_parts_for_dirs(spec.content_dirs, bundle_destination, True) @@ -167,20 +264,6 @@ def _copy_resources(ctx: AnalysisContext, specs: list[AppleResourceSpec]) -> lis return result -def _extract_single_artifact(x: [Dependency, Artifact]) -> Artifact: - if type(x) == "artifact": - return x - else: - # Otherwise, this is a dependency, so extract the resource and other - # resources from the `DefaultInfo` provider. - info = x[DefaultInfo] - expect( - len(info.default_outputs) == 1, - "expected exactly one default output from {} ({})" - .format(x, info.default_outputs), - ) - return info.default_outputs[0] - def _copy_first_level_bundles(ctx: AnalysisContext) -> list[AppleBundlePart]: first_level_bundle_infos = filter(None, [dep.get(AppleBundleInfo) for dep in ctx.attrs.deps]) return filter(None, [_copied_bundle_spec(info) for info in first_level_bundle_infos]) @@ -192,18 +275,40 @@ def _copied_bundle_spec(bundle_info: AppleBundleInfo) -> [None, AppleBundlePart] destination = AppleBundleDestination("frameworks") codesign_on_copy = True elif bundle_extension == ".app": - expect(bundle_info.is_watchos != None, "Field should be set for bundles with extension {}".format(bundle_extension)) - destination = AppleBundleDestination("watchapp" if bundle_info.is_watchos else "plugins") + app_destination_type = "plugins" + if bundle_info.bundle_type == AppleBundleTypeWatchApp: + app_destination_type = "watchapp" + elif bundle_info.bundle_type == AppleBundleTypeAppClip: + app_destination_type = "appclips" + elif bundle_info.bundle_type != AppleBundleTypeDefault: + fail("Unhandled bundle type `{}`".format(bundle_info.bundle_type)) + destination = AppleBundleDestination(app_destination_type) codesign_on_copy = False elif bundle_extension == ".appex": - destination = AppleBundleDestination("plugins") + # We have two types of extensions: App Extensions and ExtensionKit Extensions + # + # +----------------------+-------------------------------+-------------------------------+ + # | | App Extension | ExtensionKit Extension | + # +----------------------+-------------------------------+-------------------------------+ + # | xcode project type | com.apple.product-type.app- | com.apple.product-type. | + # | | extension | extensionkit-extension | + # +----------------------+-------------------------------+-------------------------------+ + # | Info.plist | NSExtensions | EXAppExtensionAttributes | + # +----------------------+-------------------------------+-------------------------------+ + # | bundle folder | *.app/PlugIns | *.app/Extensions | + # +----------------------+-------------------------------+-------------------------------+ + # + if bundle_info.bundle_type == AppleBundleTypeExtensionKitExtension: + destination = AppleBundleDestination("extensionkit_extensions") + else: + destination = AppleBundleDestination("plugins") codesign_on_copy = False elif bundle_extension == ".qlgenerator": destination = AppleBundleDestination("quicklook") - codesign_on_copy = False + codesign_on_copy = True elif bundle_extension == ".xpc": destination = AppleBundleDestination("xpcservices") - codesign_on_copy = False + codesign_on_copy = True else: fail("Extension `{}` is not yet supported.".format(bundle_extension)) return AppleBundlePart( @@ -290,12 +395,29 @@ def _run_ibtool( ], allow_args = True, ) - command = cmd_args(["/bin/sh", wrapper_script]).hidden([ibtool_command, output]) + command = cmd_args(["/bin/sh", wrapper_script], hidden = [ibtool_command, output]) else: command = ibtool_command processing_options = get_bundle_resource_processing_options(ctx) - ctx.actions.run(command, prefer_local = processing_options.prefer_local, allow_cache_upload = processing_options.allow_cache_upload, category = "apple_ibtool", identifier = action_identifier) + ctx.actions.run( + command, + prefer_local = processing_options.prefer_local, + prefer_remote = processing_options.prefer_remote, + allow_cache_upload = processing_options.allow_cache_upload, + category = "apple_ibtool", + identifier = action_identifier, + ) + +def _ibtool_identifier(action: str, raw_file: Artifact) -> str: + "*.xib files can live in .lproj folders and have the same name, so we need to split the id" + identifier_parts = [] + variant_name = _get_variant_dirname(raw_file) + if variant_name: + # variant_name is like "zh_TW.lproj", and we only want "zh_TW" + identifier_parts.append(variant_name) + identifier_parts += [raw_file.basename] + return "ibtool_" + action + " " + "/".join(identifier_parts) def _compile_ui_resource( ctx: AnalysisContext, @@ -309,7 +431,7 @@ def _compile_ui_resource( output = output, action_flags = ["--compile"], target_device = target_device, - action_identifier = "compile_" + raw_file.basename, + action_identifier = _ibtool_identifier("compile", raw_file), output_is_dir = output_is_dir, ) @@ -325,7 +447,7 @@ def _link_ui_resource( output = output, action_flags = ["--link"], target_device = target_device, - action_identifier = "link_" + raw_file.basename, + action_identifier = _ibtool_identifier("link", raw_file), output_is_dir = output_is_dir, ) @@ -334,7 +456,9 @@ def _process_apple_resource_file_if_needed( file: Artifact, destination: AppleBundleDestination, destination_relative_path: [str, None], - codesign_on_copy: bool = False) -> AppleBundlePart: + codesign_on_copy: bool = False, + codesign_entitlements: Artifact | None = None, + codesign_flags_override: list[str] | None = None) -> AppleBundlePart: output_dir = "_ProcessedResources" basename = paths.basename(file.short_path) output_is_contents_dir = False @@ -347,6 +471,8 @@ def _process_apple_resource_file_if_needed( action_id = destination_relative_path, ) elif basename.endswith(".storyboard"): + if destination_relative_path: + destination_relative_path = paths.replace_extension(destination_relative_path, ".storyboardc") compiled = ctx.actions.declare_output(paths.join(output_dir, paths.replace_extension(file.short_path, ".storyboardc")), dir = True) if get_is_watch_bundle(ctx): output_is_contents_dir = True @@ -357,6 +483,8 @@ def _process_apple_resource_file_if_needed( processed = compiled _compile_ui_resource(ctx, file, processed.as_output()) elif basename.endswith(".xib"): + if destination_relative_path: + destination_relative_path = paths.replace_extension(destination_relative_path, ".nib") processed = ctx.actions.declare_output(paths.join(output_dir, paths.replace_extension(file.short_path, ".nib"))) _compile_ui_resource(ctx, file, processed.as_output()) else: @@ -365,17 +493,18 @@ def _process_apple_resource_file_if_needed( # When name is empty string only content of the directory will be copied, as opposed to the directory itself. # When name is `None`, directory or file will be copied as it is, without renaming. new_name = destination_relative_path if destination_relative_path else ("" if output_is_contents_dir else None) - return AppleBundlePart(source = processed, destination = destination, new_name = new_name, codesign_on_copy = codesign_on_copy) + return AppleBundlePart(source = processed, destination = destination, new_name = new_name, codesign_on_copy = codesign_on_copy, codesign_entitlements = codesign_entitlements, codesign_flags_override = codesign_flags_override) # Returns a path relative to the _parent_ of the lproj dir. # For example, given a variant file with a short path of`XX/YY.lproj/ZZ` # it would return `YY.lproj/ZZ`. def _get_dest_subpath_for_variant_file(variant_file: Artifact) -> str: - dir_name = paths.basename(paths.dirname(variant_file.short_path)) - if not dir_name.endswith("lproj"): + dir_name = _get_variant_dirname(variant_file) + if not dir_name: fail("Variant files have to be in a directory with name ending in '.lproj' but `{}` was not.".format(variant_file.short_path)) file_name = paths.basename(variant_file.short_path) return paths.join(dir_name, file_name) -def get_is_watch_bundle(ctx: AnalysisContext) -> bool: - return ctx.attrs._apple_toolchain[AppleToolchainInfo].watch_kit_stub_binary != None +def _get_variant_dirname(variant_file: Artifact) -> str | None: + dir_name = paths.basename(paths.dirname(variant_file.short_path)) + return dir_name if dir_name.endswith("lproj") else None diff --git a/prelude/apple/apple_bundle_types.bzl b/prelude/apple/apple_bundle_types.bzl index 7cf736c7e1feb..a073a1099460d 100644 --- a/prelude/apple/apple_bundle_types.bzl +++ b/prelude/apple/apple_bundle_types.bzl @@ -7,6 +7,39 @@ load(":debug.bzl", "AppleDebuggableInfo") +AppleBundleType = enum( + "default", + # Bundle was built for watchOS Apple platform + "watchapp", + # Bundle represents an App Clip to be embedded + "appclip", + # Bundle represents an ExtensionKit extension to be embedded + "extensionkit_extension", +) + +ApplePackageExtension = enum( + "ipa", + "pkg", + "dmg", + "zip", +) + +AppleBundleManifestLogFiles = record( + command_file = field(Artifact), + spec_file = field(Artifact), + log_file = field([Artifact, None], None), +) + +AppleBundleManifest = record( + log_file_map = dict[Label, AppleBundleManifestLogFiles], +) + +AppleBundleManifestInfo = provider( + fields = { + "manifest": provider_field(AppleBundleManifest), + }, +) + # Provider flagging that result of the rule contains Apple bundle. # It might be copied into main bundle to appropriate place if rule # with this provider is a dependency of `apple_bundle`. @@ -15,12 +48,10 @@ AppleBundleInfo = provider( fields = { # Result bundle "bundle": provider_field(Artifact), + "bundle_type": provider_field(AppleBundleType), # The name of the executable within the bundle. "binary_name": provider_field([str, None], default = None), - # If the bundle was built for watchOS Apple platform, this affects packaging. - # Might be omitted for certain types of bundle (e.g. frameworks) when packaging doesn't depend on it. - "is_watchos": provider_field([bool, None]), - # If the bundle contains a Watch Extension executable, we have to update the packaging. + # If the bundle contains a Watch bundle, we have to update the packaging. # Similar to `is_watchos`, this might be omitted for certain types of bundles which don't depend on it. "contains_watchapp": provider_field([bool, None]), # By default, non-framework, non-appex binaries copy Swift libraries into the final @@ -57,6 +88,16 @@ AppleBundleExtraOutputsInfo = provider(fields = { AppleBundleBinaryOutput = record( binary = field(Artifact), debuggable_info = field([AppleDebuggableInfo, None], None), - # In the case of watchkit, the `ctx.attrs.binary`'s not set, and we need to create a stub binary. - is_watchkit_stub_binary = field(bool, False), +) + +AppleBundleTypeDefault = AppleBundleType("default") +AppleBundleTypeWatchApp = AppleBundleType("watchapp") +AppleBundleTypeAppClip = AppleBundleType("appclip") +AppleBundleTypeExtensionKitExtension = AppleBundleType("extensionkit_extension") + +# Represents the user-visible type which is distinct from the internal one (`AppleBundleType`) +AppleBundleTypeAttributeType = enum( + "appclip", + "extensionkit_extension", + "watchapp", ) diff --git a/prelude/apple/apple_bundle_utility.bzl b/prelude/apple/apple_bundle_utility.bzl index efef63b91a907..abf26c7d9d1ae 100644 --- a/prelude/apple/apple_bundle_utility.bzl +++ b/prelude/apple/apple_bundle_utility.bzl @@ -14,6 +14,9 @@ load(":resource_groups.bzl", "ResourceGraphInfo") # `ctx` in all functions below is expected to be of `apple_bundle` or `apple_test` rule +def get_is_watch_bundle(ctx: AnalysisContext) -> bool: + return ctx.attrs._apple_toolchain[AppleToolchainInfo].sdk_name.startswith("watch") + def _get_bundle_target_name(ctx: AnalysisContext): if hasattr(ctx.attrs, "_bundle_target_name"): # `apple_resource_bundle` rules are proxies for the real rules, @@ -27,7 +30,10 @@ def get_product_name(ctx: AnalysisContext) -> str: def get_extension_attr(ctx: AnalysisContext) -> typing.Any: return ctx.attrs.extension -def get_default_binary_dep(binary_deps: dict[str, Dependency]) -> [Dependency, None]: +def get_default_binary_dep(binary_deps: [dict[str, Dependency], Dependency, None]) -> [Dependency, None]: + if not type(binary_deps) == "dict": + return binary_deps + if len(binary_deps.items()) == 1: return binary_deps.values()[0] @@ -39,22 +45,14 @@ def get_flattened_binary_deps(binary_deps: dict[str, Dependency]) -> list[Depend # Derives the effective deployment target for the bundle. It's # usually the deployment target of the binary if present, # otherwise it falls back to other values (see implementation). -def get_bundle_min_target_version(ctx: AnalysisContext, binary: [Dependency, None]) -> str: +def get_bundle_min_target_version(ctx: AnalysisContext, binary_or_binaries: [dict[str, Dependency], Dependency, None]) -> str: + binary = get_default_binary_dep(binary_or_binaries) + binary_min_version = None - # Could be not set for e.g. watchOS bundles which have a stub - # binary that comes from the apple_toolchain(), not from the - # apple_bundle() itself (i.e., binary field will be None). - # - # TODO(T114147746): The top-level stub bundle for a watchOS app - # does not have the ability to set its deployment target via - # a binary (as that field is empty). If it contains asset - # catalogs (can it?), we need to use correct target version. - # - # The solution might to be support SDK version from - # Info.plist (T110378109). + # apple_xcuitest bundles do not have a binary if binary != None: - min_version_info = binary[AppleMinDeploymentVersionInfo] + min_version_info = binary[AppleMinDeploymentVersionInfo] if AppleMinDeploymentVersionInfo in binary else None if min_version_info != None: binary_min_version = min_version_info.version @@ -69,7 +67,13 @@ def get_bundle_min_target_version(ctx: AnalysisContext, binary: [Dependency, Non def get_bundle_resource_processing_options(ctx: AnalysisContext) -> AppleResourceProcessingOptions: compile_resources_locally = value_or(ctx.attrs._compile_resources_locally_override, ctx.attrs._apple_toolchain[AppleToolchainInfo].compile_resources_locally) - return AppleResourceProcessingOptions(prefer_local = compile_resources_locally, allow_cache_upload = compile_resources_locally) + is_watch_bundle = get_is_watch_bundle(ctx) + return AppleResourceProcessingOptions( + prefer_local = compile_resources_locally and (not is_watch_bundle), + # TODO: Remote execution preference should be part of `apple_toolchain()`, same as `compile_resources_locally` + prefer_remote = is_watch_bundle, + allow_cache_upload = compile_resources_locally, + ) def get_bundle_infos_from_graph(graph: ResourceGraphInfo) -> list[AppleBundleLinkerMapInfo]: bundle_infos = [] diff --git a/prelude/apple/apple_code_signing_types.bzl b/prelude/apple/apple_code_signing_types.bzl index 555a04f8aac53..66ac6cad95c0e 100644 --- a/prelude/apple/apple_code_signing_types.bzl +++ b/prelude/apple/apple_code_signing_types.bzl @@ -7,7 +7,7 @@ # Provider which exposes a field from `apple_binary` to `apple_bundle` as it might be used during code signing. AppleEntitlementsInfo = provider(fields = { - "entitlements_file": provider_field([Artifact, None], default = None), + "entitlements_file": provider_field(Artifact | None, default = None), }) CodeSignType = enum( @@ -15,3 +15,9 @@ CodeSignType = enum( "adhoc", "distribution", ) + +CodeSignConfiguration = enum( + "dry-run", + "fast-adhoc", + "none", +) diff --git a/prelude/apple/apple_common.bzl b/prelude/apple/apple_common.bzl new file mode 100644 index 0000000000000..cf9f369e9142c --- /dev/null +++ b/prelude/apple/apple_common.bzl @@ -0,0 +1,235 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# TODO(cjhopman): This was generated by scripts/hacks/rules_shim_with_docs.py, +# but should be manually edited going forward. There may be some errors in +# the generated docs, and so those should be verified to be accurate and +# well-formatted (and then delete this TODO) + +load("@prelude//:is_full_meta_repo.bzl", "is_full_meta_repo") + +def _headers_arg(): + return { + "headers": attrs.named_set(attrs.source(), sorted = True, default = [], doc = """ + The set of header files that are made available for inclusion to the source files in this + target. These should be specified as either a list of header files or a dictionary of header names + to header files. The header names can contain forward slashes (`/`). If a list of + header files is specified, the headers can be imported + with `#import "$HEADER_PATH_PREFIX/$HEADER_NAME"` or `#import + "$HEADER_NAME"`, where `$HEADER_PATH_PREFIX` is the value of + the target's `header_path_prefix` attribute, and `$HEADER_NAME` is + the filename of the header file. If a dictionary is specified, each header can be imported + with `#import "$HEADER_NAME"`, where `$HEADER_NAME` is the key + corresponding to this file. In this case, the `header_path_prefix` attribute is + ignored. In either case, quotes in the import statements can be replaced with angle brackets. +"""), + } + +def _exported_headers_arg(): + return { + "exported_headers": attrs.named_set(attrs.source(), sorted = True, default = [], doc = """ + The set of header files that are made available for inclusion to the source files in this + target and all targets that transitively depend on this one. These should be specified as + either a list of header files or a dictionary of header names + to header files. The header names can contain forward slashes (`/`). If a list of + header files is specified, the headers can be imported + with `#import "$HEADER_PATH_PREFIX/$HEADER_NAME"` or, if a header file that belongs to + the same rule is being imported, with `#import + "$HEADER_NAME"`, where `$HEADER_PATH_PREFIX` is the value of + the target's `header_path_prefix` attribute, and `$HEADER_NAME` is + the filename of the header file. If a dictionary is specified, each header can be imported + with `#import "$HEADER_NAME"`, where `$HEADER_NAME` is the key + corresponding to this file. In this case, the `header_path_prefix` attribute is + ignored. In either case, quotes in the import statements can be replaced with angle brackets. +"""), + } + +def _header_path_prefix_arg(): + return { + "header_path_prefix": attrs.option(attrs.string(), default = None, doc = """ + A path prefix when including headers of this target. For example, headers from a library defined + using + + ``` + apple_library( + name = "Library", + headers = glob(["**/*.h"]), + header_path_prefix = "Lib", + ) + ``` + + can be imported using following mapping + + ``` + Library/SubDir/Header1.h -> Lib/Header1.h + Library/Header2.h -> Lib/Header2.h + ``` + + Defaults to the short name of the target. Can contain forward slashes (`/`), but + cannot start with one. See `headers` for more information. +"""), + } + +def _frameworks_arg(): + return { + "frameworks": attrs.list(attrs.string(), default = [], doc = """ + A list of system frameworks that the code in this target uses. Each entry should be a path + starting with `$SDKROOT` or `$PLATFORM_DIR` to denote that the rest of the + path is relative to the root of the SDK used for the build or to the platform toolchain + directory. +"""), + } + +def _target_sdk_version(): + return { + "target_sdk_version": attrs.option(attrs.string(), default = None, doc = """ + The minimum OS version that the library target should support, overriding the minimum set in + `.buckconfig`. When set, Buck will automatically add flags to both Objective-C and + Swift compilation that will allow the use of the new APIs without guarding code inside availability + checks. +"""), + } + +def _info_plist_arg(): + return { + "info_plist": attrs.source(doc = """ + A path to an `Info.plist` file that will be placed in the bundle. The specified file + will be processed by substituting variable names with their values + (see `info_plist_substitutions` for more information). +"""), + } + +def _info_plist_substitutions_arg(): + return { + "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}, doc = """ + A dictionary that assigns variable names to their values. It is used for variable + substitution when processing the file specified in `info_plist`. For example if this + argument is set to `{\'VAR\': \'MyValue\'}`, then each occurrence of `$(VAR)` or `${VAR}` in the file will be replaced by `MyValue`. +"""), + } + +def _extra_xcode_sources(): + return { + "extra_xcode_sources": attrs.list(attrs.source(), default = [], doc = """ + When the project is generated, this is the list of files that will added to the build phase + "Compile Sources" of the given target. +"""), + } + +def _extra_xcode_files(): + return { + "extra_xcode_files": attrs.list(attrs.source(), default = [], doc = """ + When the project is generated, this is the list of files that will added to the project. + Those files won't be added to the build phase "Compile Sources". +"""), + } + +def _privacy_manifest_arg(): + return { + "privacy_manifest": attrs.option(attrs.source(), default = None, doc = """ + A path to an `.xcprivacy` file that will be placed in the bundle. +"""), + } + +def _debug_artifacts_validators_arg(): + return { + "debug_artifacts_validators": attrs.dict( + attrs.string(), + attrs.tuple( + # A target which will be passed two named arguments: + # --artifacts: A path to a file containing a list of artifact paths to inspect. + # --output: The path to write the analysis output to. + attrs.exec_dep(providers = [RunInfo]), + # A target which is passed the outputs of the previous script + # and emits a ValidationSpec validation_result JSON file. + # --analysis-json-path: A path to a JSON artifact. Keys are the configured target. + # --output: The path to write the ValidationSpec validation_result JSON file. + # value is a list of artifact outputs from the previous script. + attrs.exec_dep(providers = [RunInfo]), + ), + default = {}, + ), + } + +def _serialize_debugging_options_arg(): + return { + # Need ability to distinguish between no value provided by users + # vs value explicitly set to `True` (in the latter case, we should + # show warning if value cannot be respected in mixed modules while + # in the former, we do not show a warning). + # + # Lack of value defaults to enabling serialized debugging options. + "serialize_debugging_options": attrs.option(attrs.bool(), default = None), + } + +def _uses_explicit_modules_arg(): + return { + "uses_explicit_modules": attrs.bool(default = False), + } + +def _meta_apple_library_validation_enabled_default_value(): + if not is_full_meta_repo(): + return False + + meta_apple_library_validation_enabled_default = (read_root_config("apple", "meta_apple_library_validation", "false").lower() == "true") + return select({ + "DEFAULT": select({ + "DEFAULT": meta_apple_library_validation_enabled_default, + "config//features/apple:fb_xplat_suffixing_check_disabled": False, + "config//features/apple:fb_xplat_suffixing_check_enabled": True, + }), + # arvr targets do not use suffixed targets, as any xplat target deps + # get rewritten without the Apple-specific suffixes. + "config//build_mode/constraints:arvr_mode_enabled": False, + }) + +def _meta_apple_library_validation_enabled_arg(): + return { + "_meta_apple_library_validation_enabled": attrs.bool(default = _meta_apple_library_validation_enabled_default_value()), + } + +def _skip_universal_resource_dedupe_default_value(): + if not is_full_meta_repo(): + return False + + return select({ + "DEFAULT": False, + "config//features/apple:skip_universal_resource_dedupe_disabled": False, + "config//features/apple:skip_universal_resource_dedupe_enabled": True, + }) + +def _skip_universal_resource_dedupe_arg(): + return { + "skip_universal_resource_dedupe": attrs.bool(default = _skip_universal_resource_dedupe_default_value()), + } + +def _apple_sanitizer_compatibility_arg(): + if not is_full_meta_repo(): + return {} + + return { + "_sanitizer_compatibility": attrs.default_only(attrs.dep(default = "fbsource//tools/build_defs/apple/sanitizers:sanitizer_compatibility")), + } + +apple_common = struct( + headers_arg = _headers_arg, + exported_headers_arg = _exported_headers_arg, + header_path_prefix_arg = _header_path_prefix_arg, + frameworks_arg = _frameworks_arg, + target_sdk_version = _target_sdk_version, + info_plist_arg = _info_plist_arg, + info_plist_substitutions_arg = _info_plist_substitutions_arg, + extra_xcode_sources = _extra_xcode_sources, + extra_xcode_files = _extra_xcode_files, + privacy_manifest_arg = _privacy_manifest_arg, + debug_artifacts_validators_arg = _debug_artifacts_validators_arg, + serialize_debugging_options_arg = _serialize_debugging_options_arg, + uses_explicit_modules_arg = _uses_explicit_modules_arg, + meta_apple_library_validation_enabled_arg = _meta_apple_library_validation_enabled_arg, + skip_universal_resource_dedupe_arg = _skip_universal_resource_dedupe_arg, + apple_sanitizer_compatibility_arg = _apple_sanitizer_compatibility_arg, +) diff --git a/prelude/apple/apple_core_data.bzl b/prelude/apple/apple_core_data.bzl index 82721a4af965a..6c0becbc6ef85 100644 --- a/prelude/apple/apple_core_data.bzl +++ b/prelude/apple/apple_core_data.bzl @@ -5,14 +5,17 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:paths.bzl", "paths") load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") load(":apple_bundle_utility.bzl", "get_bundle_min_target_version", "get_bundle_resource_processing_options") load(":apple_core_data_types.bzl", "AppleCoreDataSpec") load(":apple_sdk.bzl", "get_apple_sdk_name") +load(":apple_target_sdk_version.bzl", "get_platform_name_for_sdk", "get_platform_version_for_sdk_version") load(":resource_groups.bzl", "create_resource_graph") def apple_core_data_impl(ctx: AnalysisContext) -> list[Provider]: spec = AppleCoreDataSpec( + module = ctx.attrs.module, path = ctx.attrs.path, ) graph = create_resource_graph( @@ -24,17 +27,18 @@ def apple_core_data_impl(ctx: AnalysisContext) -> list[Provider]: ) return [DefaultInfo(), graph] -def compile_apple_core_data(ctx: AnalysisContext, specs: list[AppleCoreDataSpec], product_name: str) -> [Artifact, None]: +def compile_apple_core_data(ctx: AnalysisContext, specs: list[AppleCoreDataSpec], product_name: str) -> Artifact | None: if len(specs) == 0: return None output = ctx.actions.declare_output("AppleCoreDataCompiled") - # Aggregate all the coredata momc commands together - momc_commands = [] + # Aggregate all the coredata momc and mapc commands together + tool_commands = [] for spec in specs: - momc_command = _get_momc_command(ctx, spec, product_name, cmd_args("$TMPDIR")) - momc_commands.append(momc_command) + tool, output_path = _get_model_args(ctx, spec) + tool_command = _get_tool_command(ctx, spec, product_name, tool, output_path) + tool_commands.append(tool_command) # Sandboxing and fs isolation on RE machines results in Xcode tools failing # when those are working in freshly created directories in buck-out. @@ -42,29 +46,50 @@ def compile_apple_core_data(ctx: AnalysisContext, specs: list[AppleCoreDataSpec] # As a workaround create a directory in tmp, use it for Xcode tools, then # copy the result to buck-out. wrapper_script, _ = ctx.actions.write( - "momc_wrapper.sh", + "tool_wrapper.sh", [ cmd_args("set -euo pipefail"), cmd_args('export TMPDIR="$(mktemp -d)"'), - cmd_args(momc_commands), + cmd_args(tool_commands), cmd_args(output, format = 'mkdir -p {} && cp -r "$TMPDIR"/ {}'), ], allow_args = True, ) - combined_command = cmd_args(["/bin/sh", wrapper_script]).hidden(momc_commands + [output.as_output()]) + combined_command = cmd_args(["/bin/sh", wrapper_script], hidden = tool_commands + [output.as_output()]) processing_options = get_bundle_resource_processing_options(ctx) - ctx.actions.run(combined_command, prefer_local = processing_options.prefer_local, allow_cache_upload = processing_options.allow_cache_upload, category = "apple_core_data") + ctx.actions.run( + combined_command, + prefer_local = processing_options.prefer_local, + prefer_remote = processing_options.prefer_remote, + allow_cache_upload = processing_options.allow_cache_upload, + category = "apple_core_data", + ) return output -def _get_momc_command(ctx: AnalysisContext, core_data_spec: AppleCoreDataSpec, product_name: str, output_directory: cmd_args) -> cmd_args: +def _get_model_args(ctx: AnalysisContext, core_data_spec: AppleCoreDataSpec): + toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo] + + if core_data_spec.path.extension == ".xcmappingmodel": + filename = paths.replace_extension(core_data_spec.path.basename, ".cdm") + return toolchain.mapc, cmd_args("$TMPDIR/" + filename) + else: + return toolchain.momc, cmd_args("$TMPDIR") + +def _get_tool_command(ctx: AnalysisContext, core_data_spec: AppleCoreDataSpec, product_name: str, tool: RunInfo, output: cmd_args) -> cmd_args: + sdk_name = get_apple_sdk_name(ctx) + deployment_target = get_platform_version_for_sdk_version( + sdk_name = sdk_name, + sdk_version = get_bundle_min_target_version(ctx, ctx.attrs.binary), + ) + return cmd_args([ - ctx.attrs._apple_toolchain[AppleToolchainInfo].momc, + tool, "--sdkroot", ctx.attrs._apple_toolchain[AppleToolchainInfo].sdk_path, - "--" + get_apple_sdk_name(ctx) + "-deployment-target", - get_bundle_min_target_version(ctx, ctx.attrs.binary), + "--" + get_platform_name_for_sdk(sdk_name) + "-deployment-target", + deployment_target, "--module", - product_name, - core_data_spec.path, - output_directory, - ], delimiter = " ") + core_data_spec.module if core_data_spec.module else product_name, + cmd_args(core_data_spec.path, format = "./{}"), + output, + ], delimiter = " ", hidden = core_data_spec.path) diff --git a/prelude/apple/apple_core_data_types.bzl b/prelude/apple/apple_core_data_types.bzl index 700a5d602bfec..a3cc54c047a5f 100644 --- a/prelude/apple/apple_core_data_types.bzl +++ b/prelude/apple/apple_core_data_types.bzl @@ -6,5 +6,6 @@ # of this source tree. AppleCoreDataSpec = record( + module = field(str | None), path = field(Artifact), ) diff --git a/prelude/apple/apple_dsym.bzl b/prelude/apple/apple_dsym.bzl index 499f6f148c6d7..3e6397da3e9e1 100644 --- a/prelude/apple/apple_dsym.bzl +++ b/prelude/apple/apple_dsym.bzl @@ -7,47 +7,65 @@ load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type load(":apple_toolchain_types.bzl", "AppleToolchainInfo") -load( - ":debug.bzl", - "AppleDebugInfo", # @unused Used as a type -) +load(":debug.bzl", "AppleSelectiveDebuggableMetadata") # @unused Used as a type DSYM_SUBTARGET = "dsym" DSYM_INFO_SUBTARGET = "dsym-info" +EXTENDED_DSYM_INFO_SUBTARGET = "extended-dsym-info" DWARF_AND_DSYM_SUBTARGET = "dwarf-and-dsym" -def get_apple_dsym(ctx: AnalysisContext, executable: Artifact, debug_info: list[AppleDebugInfo], action_identifier: str, output_path_override: [str, None] = None) -> Artifact: +def get_apple_dsym(ctx: AnalysisContext, executable: Artifact, debug_info: list[ArgLike], action_identifier: str, output_path_override: [str, None] = None) -> Artifact: output_path = output_path_override or "{}.dSYM".format(executable.short_path) return get_apple_dsym_ext(ctx, executable, debug_info, action_identifier, output_path) # TODO(T110672942): Things which are still unsupported: -# - pass in dsymutil_extra_flags # - oso_prefix -# - dsym_verification -def get_apple_dsym_ext(ctx: AnalysisContext, executable: [ArgLike, Artifact], debug_info: list[AppleDebugInfo], action_identifier: str, output_path: str) -> Artifact: +def get_apple_dsym_ext(ctx: AnalysisContext, executable: [ArgLike, Artifact], debug_info: list[ArgLike], action_identifier: str, output_path: str) -> Artifact: dsymutil = ctx.attrs._apple_toolchain[AppleToolchainInfo].dsymutil output = ctx.actions.declare_output(output_path, dir = True) + cmd = cmd_args( + [ + dsymutil, + "--verify-dwarf={}".format(ctx.attrs._dsymutil_verify_dwarf), + # Reproducers are not useful, we can reproduce from the action digest. + "--reproducer=Off", + ], + # Mach-O executables don't contain DWARF data. + # Instead, they contain paths to the object files which themselves contain DWARF data. + # So, those object files are needed for dsymutil to be to create the dSYM bundle. + hidden = debug_info, + ) + if ctx.attrs.dsym_uses_parallel_linker: + cmd.add("--linker=parallel") - cmd = cmd_args([dsymutil, "-o", output.as_output()]) - cmd.add(executable) - - # Mach-O executables don't contain DWARF data. - # Instead, they contain paths to the object files which themselves contain DWARF data. - # - # So, those object files are needed for dsymutil to be to create the dSYM bundle. - cmd.hidden(debug_info) + cmd.add(ctx.attrs._dsymutil_extra_flags) + cmd.add( + [ + "-o", + output.as_output(), + executable, + ], + ) ctx.actions.run(cmd, category = "apple_dsym", identifier = action_identifier) - return output -def get_apple_dsym_info(ctx: AnalysisContext, binary_dsyms: list[Artifact], dep_dsyms: list[Artifact]) -> Artifact: +AppleDsymJsonInfo = record( + # JSON object containing the list of dSYMs + json_object = field(dict[str, typing.Any]), + # A list of all artifacts referenced in `json_object` + outputs = field(list[Artifact]), +) + +def get_apple_dsym_info_json( + binary_dsyms: list[Artifact], + dep_dsyms: list[Artifact], + metadata: list[AppleSelectiveDebuggableMetadata] | None = None) -> AppleDsymJsonInfo: dsym_info = {} - # WatchOS stub does not have a dSYM, so it's possible that we get zero `binary_dsyms` if len(binary_dsyms) == 1: dsym_info["binary"] = binary_dsyms[0] - elif len(binary_dsyms) > 1: - fail("There cannot be more than one binary dSYM") + else: + fail("There can only be one binary dSYM") if dep_dsyms: # `dedupe` needed as it's possible for the same dSYM to bubble up @@ -55,4 +73,15 @@ def get_apple_dsym_info(ctx: AnalysisContext, binary_dsyms: list[Artifact], dep_ # + bundle in the `deps` field of a parent bundle). dsym_info["deps"] = dedupe(dep_dsyms) - return ctx.actions.write_json("dsym-info.json", dsym_info) + metadata_dsym_outputs = [] + if metadata != None: + metadata_json_obj = {} + for debuggable_metadata in metadata: + metadata_json_obj[debuggable_metadata.dsym] = debuggable_metadata.metadata + metadata_dsym_outputs += [debuggable_metadata.dsym, debuggable_metadata.metadata] + dsym_info["selective_metadata"] = metadata_json_obj + + return AppleDsymJsonInfo( + json_object = dsym_info, + outputs = binary_dsyms + dep_dsyms + metadata_dsym_outputs, + ) diff --git a/prelude/apple/apple_dsym_config.bzl b/prelude/apple/apple_dsym_config.bzl new file mode 100644 index 0000000000000..6d5dd7404df77 --- /dev/null +++ b/prelude/apple/apple_dsym_config.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:buckconfig.bzl", "read_choice", "read_list") + +def apple_dsym_config() -> dict[str, typing.Any]: + return { + "_dsymutil_extra_flags": read_list("apple", "dsymutil_extra_flags", delimiter = " ", default = [], root_cell = True), + # The default value of `--verify-dwarf` depends on the toolchain build mode. Default to `none` to unify behavior. + # https://github.com/llvm/llvm-project/blob/e3eb12cce97fa75d1d2443bcc2c2b26aa660fe34/llvm/tools/dsymutil/dsymutil.cpp#L94-L98 + "_dsymutil_verify_dwarf": read_choice("apple", "dsymutil_verify_dwarf", choices = ["none", "input", "output", "all", "auto"], default = "none", root_cell = True), + } diff --git a/prelude/apple/apple_entitlements.bzl b/prelude/apple/apple_entitlements.bzl new file mode 100644 index 0000000000000..6342f926eb848 --- /dev/null +++ b/prelude/apple/apple_entitlements.bzl @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type +load(":apple_bundle_utility.bzl", "get_default_binary_dep") +load(":apple_code_signing_types.bzl", "AppleEntitlementsInfo", "CodeSignType") +load(":apple_sdk_metadata.bzl", "IPhoneSimulatorSdkMetadata", "MacOSXCatalystSdkMetadata") +load(":apple_toolchain_types.bzl", "AppleToolchainInfo") + +def get_entitlements_codesign_args(ctx: AnalysisContext, codesign_type: CodeSignType) -> list[ArgLike]: + include_entitlements = should_include_entitlements(ctx, codesign_type) + maybe_entitlements = _entitlements_file(ctx) if include_entitlements else None + entitlements_args = ["--entitlements", maybe_entitlements] if maybe_entitlements else [] + return entitlements_args + +def should_include_entitlements(ctx: AnalysisContext, codesign_type: CodeSignType) -> bool: + if codesign_type.value == "distribution": + return True + + if codesign_type.value == "adhoc": + # The config-based override value takes priority over target value + if ctx.attrs._use_entitlements_when_adhoc_code_signing != None: + return ctx.attrs._use_entitlements_when_adhoc_code_signing + return ctx.attrs.use_entitlements_when_adhoc_code_signing + + return False + +def _entitlements_file(ctx: AnalysisContext) -> Artifact | None: + if hasattr(ctx.attrs, "entitlements_file"): + # Bundling `apple_test` which doesn't have a binary to provide the entitlements, so they are provided via `entitlements_file` attribute directly. + return ctx.attrs.entitlements_file + + if not ctx.attrs.binary: + return None + + # The `binary` attribute can be either an apple_binary or a dynamic library from apple_library + binary_entitlement_info = get_default_binary_dep(ctx.attrs.binary).get(AppleEntitlementsInfo) + if binary_entitlement_info and binary_entitlement_info.entitlements_file: + return binary_entitlement_info.entitlements_file + + return ctx.attrs._codesign_entitlements + +_SDK_NAMES_NEED_ENTITLEMENTS_IN_BINARY = [ + IPhoneSimulatorSdkMetadata.name, + MacOSXCatalystSdkMetadata.name, +] + +def _needs_entitlements_in_binary(ctx: AnalysisContext) -> bool: + apple_toolchain_info = ctx.attrs._apple_toolchain[AppleToolchainInfo] + return apple_toolchain_info.sdk_name in _SDK_NAMES_NEED_ENTITLEMENTS_IN_BINARY + +def entitlements_link_flags(ctx: AnalysisContext) -> list[typing.Any]: + return [ + "-Xlinker", + "-sectcreate", + "-Xlinker", + "__TEXT", + "-Xlinker", + "__entitlements", + "-Xlinker", + ctx.attrs.entitlements_file, + ] if (ctx.attrs.entitlements_file and _needs_entitlements_in_binary(ctx)) else [] diff --git a/prelude/apple/apple_error_handler.bzl b/prelude/apple/apple_error_handler.bzl new file mode 100644 index 0000000000000..674182f399882 --- /dev/null +++ b/prelude/apple/apple_error_handler.bzl @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_error_handler_types.bzl", "AppleErrorCategories") +# @oss-disable: load("@prelude//apple/meta_only:apple_extra_error_categories.bzl", "APPLE_META_STDERR_ERROR_CATEGORIES") + +_APPLE_STDERR_ERROR_CATEGORIES = [ + #codesigning issues + AppleErrorCategories(string_match = "codesignprovisioningerror", categories = ["apple_code_sign_error", "code_sign_provisioning_error"]), + AppleErrorCategories(string_match = "the timestamp service is not available", categories = ["apple_code_sign_error"]), + #compilation issues + AppleErrorCategories(string_match = "failed to emit precompiled module", categories = ["apple_compilation_failure", "apple_pcm_compilation_failure"]), + AppleErrorCategories(string_match = "please rebuild precompiled header", categories = ["apple_compilation_failure", "apple_pcm_compilation_failure"]), + AppleErrorCategories(string_match = "llvm-lipo", categories = ["apple_lipo_failure"]), + AppleErrorCategories(string_match = ".swift:", categories = ["apple_compilation_failure", "apple_swift_compilation_failure"]), + AppleErrorCategories(string_match = ".cpp:", categories = ["apple_compilation_failure", "apple_cpp_compilation_failure"]), + AppleErrorCategories(string_match = ".cxx:", categories = ["apple_compilation_failure", "apple_cpp_compilation_failure"]), + AppleErrorCategories(string_match = ".m:", categories = ["apple_compilation_failure", "apple_objc_compilation_failure"]), + AppleErrorCategories(string_match = ".mm:", categories = ["apple_compilation_failure", "apple_objc_compilation_failure", "apple_cpp_compilation_failure", "apple_objcpp_compilation_failure"]), + AppleErrorCategories(string_match = ".c:", categories = ["apple_compilation_failure", "apple_c_compilation_failure"]), + AppleErrorCategories(string_match = ".modulemap:", categories = ["apple_compilation_failure", "apple_modulemap_compilation_failure"]), + AppleErrorCategories(string_match = "missing required modules", categories = ["apple_compilation_failure", "apple_missing_required_modules_error"]), + AppleErrorCategories(string_match = "has a minimum deployment target", categories = ["apple_compilation_failure", "apple_deployment_target_error"]), + + #toolchain / genrule issues + AppleErrorCategories(string_match = "stack dump:", categories = ["apple_binary_execution_failure"]), + AppleErrorCategories(string_match = "thread 'main' panicked", categories = ["apple_binary_execution_failure"]), + AppleErrorCategories(string_match = "error while loading shared libraries", categories = ["apple_binary_execution_failure"]), + AppleErrorCategories(string_match = "traceback (most recent call last)", categories = ["apple_python_execution_failure"]), + AppleErrorCategories(string_match = "command not found", categories = ["apple_command_not_found_failure"]), + AppleErrorCategories(string_match = "command timed out", categories = ["apple_timeout_failure"]), + AppleErrorCategories(string_match = "no such file or directory", categories = ["apple_no_such_file_failure"]), + + #user errors + AppleErrorCategories(string_match = "unknown target", categories = ["apple_unknown_buck_target_failure"]), + + #linker issues + AppleErrorCategories(string_match = "linker command failed", categories = ["apple_linker_failure"]), + AppleErrorCategories(string_match = "duplicate symbol", categories = ["apple_duplicate_symbol_failure"]), + AppleErrorCategories(string_match = "undefined symbol", categories = ["apple_undefined_symbol_failure"]), + AppleErrorCategories(string_match = "framework not found", categories = ["apple_framework_not_found_failure"]), + + #buck configuration issues + AppleErrorCategories(string_match = "unknown cell alias", categories = ["apple_buck_configuration_failure", "apple_unknown_cell_alias_failure"]), +] + +def _add_category_strings(lowercase_stderr: str, category_string_target: set[str], source: list[AppleErrorCategories]): + for error_category in source: + if error_category.string_match in lowercase_stderr: + for category_string in error_category.categories: + category_string_target.add(category_string) + +def apple_build_error_handler(ctx: ActionErrorCtx) -> list[ActionSubError]: + lowercase_stderr = ctx.stderr.lower() + categories = set() + _add_category_strings(lowercase_stderr, categories, _APPLE_STDERR_ERROR_CATEGORIES) + # @oss-disable: _add_category_strings(lowercase_stderr, categories, APPLE_META_STDERR_ERROR_CATEGORIES) + + return [ctx.new_sub_error(category = category_string) for category_string in sorted(categories)] diff --git a/prelude/apple/apple_error_handler_types.bzl b/prelude/apple/apple_error_handler_types.bzl new file mode 100644 index 0000000000000..02b71f2c063d9 --- /dev/null +++ b/prelude/apple/apple_error_handler_types.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +AppleErrorCategories = record( + # Lowercase string that should (preferably uniquely) match the lowercased + # stderr output caused by an error of interest. + string_match = str, + # List of category tags to be applied in the event of this error. + categories = list[str], +) diff --git a/prelude/apple/apple_finalize_codesign.bzl b/prelude/apple/apple_finalize_codesign.bzl new file mode 100644 index 0000000000000..64ccb4f152a0e --- /dev/null +++ b/prelude/apple/apple_finalize_codesign.bzl @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":apple_bundle_types.bzl", "AppleBundleInfo") + +def _apple_finalize_bundle_impl(ctx): + bundle_artifact = ctx.attrs.bundle[DefaultInfo].default_outputs[0] + finalized_bundle = ctx.actions.declare_output(bundle_artifact.basename) + + cmd = cmd_args([ + ctx.attrs.finalizer[RunInfo], + "--input-bundle-path", + bundle_artifact, + "--output-bundle-path", + finalized_bundle.as_output(), + "--sign-key", + ctx.attrs.sign_key, + ]) + ctx.actions.run( + cmd, + category = "apple_finalize_bundle", + identifier = bundle_artifact.basename, + ) + + original_bundle_info = ctx.attrs.bundle[AppleBundleInfo] + finalized_bundle_info = AppleBundleInfo( + bundle = finalized_bundle, + bundle_type = original_bundle_info.bundle_type, + binary_name = original_bundle_info.binary_name, + contains_watchapp = original_bundle_info.contains_watchapp, + skip_copying_swift_stdlib = original_bundle_info.skip_copying_swift_stdlib, + ) + + return [ + DefaultInfo(default_output = finalized_bundle), + finalized_bundle_info, + ] + +apple_finalize_bundle = rule( + attrs = { + "bundle": attrs.dep(), + "finalizer": attrs.exec_dep(providers = [RunInfo]), + "sign_key": attrs.string(default = "fbios-debug"), + }, + impl = _apple_finalize_bundle_impl, +) diff --git a/prelude/apple/apple_framework_versions.bzl b/prelude/apple/apple_framework_versions.bzl index 3f6761b8a2068..f876d83cb9b90 100644 --- a/prelude/apple/apple_framework_versions.bzl +++ b/prelude/apple/apple_framework_versions.bzl @@ -9,7 +9,7 @@ load(":apple_sdk.bzl", "get_apple_sdk_name") load(":apple_target_sdk_version.bzl", "get_min_deployment_version_for_node") load(":apple_utility.bzl", "has_apple_toolchain") -_FRAMEWORK_INTRODUCED_VERSIONS = { +FRAMEWORK_INTRODUCED_VERSIONS = { "AGL": {"macosx": (10, 0, 0)}, "ARKit": {"iphoneos": (11, 0, 0), "maccatalyst": (14, 0, 0)}, "AVFAudio": { @@ -52,6 +52,9 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "macosx": (11, 0, 0), "watchos": (7, 0, 0), }, + "AccessorySetupKit": { + "iphoneos": (18, 0, 0), + }, "Accounts": { "iphoneos": (5, 0, 0), "maccatalyst": (13, 0, 0), @@ -188,6 +191,9 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "macosx": (10, 15, 0), "watchos": (6, 0, 0), }, + "ContactProvider": { + "iphoneos": (18, 0, 0), + }, "Contacts": { "iphoneos": (9, 0, 0), "maccatalyst": (13, 0, 0), @@ -246,6 +252,9 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "macosx": (10, 8, 0), "watchos": (2, 0, 0), }, + "CoreHID": { + "macosx": (15, 0, 0), + }, "CoreHaptics": { "appletvos": (14, 0, 0), "iphoneos": (13, 0, 0), @@ -426,6 +435,9 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "maccatalyst": (13, 0, 0), "macosx": (10, 13, 0), }, + "FSKit": { + "macosx": (15, 0, 0), + }, "FamilyControls": {"iphoneos": (15, 0, 0), "maccatalyst": (15, 0, 0)}, "FileProvider": {"iphoneos": (11, 0, 0), "macosx": (10, 15, 0)}, "FileProviderUI": { @@ -582,6 +594,10 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "maccatalyst": (16, 0, 0), "macosx": (12, 0, 0), }, + "LockedCameraCapture": { + "iphoneos": (18, 0, 0), + "maccatalyst": (18, 0, 0), + }, "MLCompute": { "appletvos": (14, 0, 0), "iphoneos": (14, 0, 0), @@ -612,6 +628,10 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "maccatalyst": (13, 0, 0), "macosx": (10, 9, 0), }, + "MediaExtension": { + "maccatalyst": (18, 0, 0), + "macosx": (15, 0, 0), + }, "MediaLibrary": {"maccatalyst": (13, 0, 0), "macosx": (10, 9, 0)}, "MediaPlayer": { "appletvos": (9, 0, 0), @@ -621,6 +641,10 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "watchos": (5, 0, 0), }, "MediaSetup": {"iphoneos": (14, 0, 0), "maccatalyst": (15, 4, 0)}, + "MediaToolbox": { + "iphoneos": (6, 0, 0), + "macosx": (10, 9, 0), + }, "MessageUI": {"iphoneos": (3, 0, 0), "maccatalyst": (13, 0, 0)}, "Messages": {"iphoneos": (10, 0, 0), "maccatalyst": (14, 0, 0)}, "Metal": { @@ -660,6 +684,7 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "MobileCoreServices": { "appletvos": (9, 0, 0), "iphoneos": (2, 0, 0), + "maccatalyst": (14, 0, 0), "watchos": (1, 0, 0), }, "ModelIO": { @@ -718,6 +743,14 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "macosx": (10, 15, 0), "watchos": (8, 0, 0), }, + "Observation": { + "iphoneos": (17, 0, 0), + "maccatalyst": (17, 0, 0), + "macosx": (14, 0, 0), + "tvos": (17, 0, 0), + "visionos": (1, 0, 0), + "watchos": (10, 0, 0), + }, "OpenCL": {"macosx": (10, 6, 0)}, "OpenDirectory": {"maccatalyst": (13, 0, 0), "macosx": (10, 6, 0)}, "OpenGL": {"macosx": (10, 0, 0)}, @@ -760,6 +793,10 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "watchos": (9, 0, 0), }, "PreferencePanes": {"maccatalyst": (14, 0, 0), "macosx": (10, 1, 0)}, + "ProximitReaderStub": { + "maccatalyst": (18, 0, 0), + "macosx": (15, 0, 0), + }, "ProximityReader": {"iphoneos": (15, 4, 0), "maccatalyst": (15, 4, 0)}, "PushKit": { "iphoneos": (8, 0, 0), @@ -834,6 +871,11 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { }, "SecurityFoundation": {"maccatalyst": (13, 0, 0), "macosx": (10, 3, 0)}, "SecurityInterface": {"macosx": (10, 3, 0)}, + "SensitiveContentAnalysis": { + "iphoneos": (17, 0, 0), + "maccatalyst": (17, 0, 0), + "macosx": (14, 0, 0), + }, "SensorKit": { "iphoneos": (14, 0, 0), "maccatalyst": (14, 0, 0), @@ -883,6 +925,10 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "macosx": (10, 9, 0), "watchos": (3, 0, 0), }, + "StickerKit": { + "iphoneos": (18, 0, 0), + "macosx": (15, 0, 0), + }, "StoreKit": { "appletvos": (9, 0, 0), "iphoneos": (3, 0, 0), @@ -898,6 +944,14 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "watchos": (6, 0, 0), }, "SyncServices": {"macosx": (10, 4, 0)}, + "Synchronization": { + "iphoneos": (18, 0, 0), + "maccatalyst": (18, 0, 0), + "macosx": (15, 0, 0), + "tvos": (18, 0, 0), + "visionos": (2, 0, 0), + "watchos": (11, 0, 0), + }, "System": { "appletvos": (14, 0, 0), "iphoneos": (14, 0, 0), @@ -916,6 +970,9 @@ _FRAMEWORK_INTRODUCED_VERSIONS = { "TVServices": {"appletvos": (9, 0, 0)}, "TVUIKit": {"appletvos": (12, 0, 0)}, "TWAIN": {"macosx": (10, 2, 0)}, + "TabletopKit": { + "visionos": (2, 0, 0), + }, "TabularData": { "appletvos": (15, 0, 0), "iphoneos": (15, 0, 0), @@ -1015,6 +1072,13 @@ def _parse_version(version: str) -> (int, int, int): result[i] = components[i] return (result[0], result[1], result[2]) +def validate_sdk_frameworks(frameworks: list[str]) -> None: + for framework in frameworks: + if framework.startswith("$SDKROOT/System/Library/Frameworks"): + framework_name = framework[len("$SDKROOT/System/Library/Frameworks/"):-len(".framework")] + if framework_name not in FRAMEWORK_INTRODUCED_VERSIONS: + fail("Framework {} is missing version information".format(framework_name)) + def get_framework_linker_args(ctx: AnalysisContext, framework_names: list[str]) -> list[str]: if not has_apple_toolchain(ctx): return _get_unchecked_framework_linker_args(framework_names) @@ -1033,7 +1097,7 @@ def get_framework_linker_args(ctx: AnalysisContext, framework_names: list[str]) args = [] for name in framework_names: - versions = _FRAMEWORK_INTRODUCED_VERSIONS.get(name, None) + versions = FRAMEWORK_INTRODUCED_VERSIONS.get(name, None) if versions: introduced = versions.get(sdk_name, None) if not introduced: diff --git a/prelude/apple/apple_frameworks.bzl b/prelude/apple/apple_frameworks.bzl index 996fb28e902c4..2957cbc660303 100644 --- a/prelude/apple/apple_frameworks.bzl +++ b/prelude/apple/apple_frameworks.bzl @@ -22,8 +22,8 @@ load( "merge_swift_runtime_linkables", "merge_swiftmodule_linkables", ) -load("@prelude//utils:utils.bzl", "expect") -load(":apple_framework_versions.bzl", "get_framework_linker_args") +load("@prelude//utils:expect.bzl", "expect") +load(":apple_framework_versions.bzl", "get_framework_linker_args", "validate_sdk_frameworks") load(":apple_toolchain_types.bzl", "AppleToolchainInfo") _IMPLICIT_SDKROOT_FRAMEWORK_SEARCH_PATHS = [ @@ -55,6 +55,7 @@ def _get_apple_frameworks_linker_flags(ctx: AnalysisContext, linkable: [Framewor return flags def get_framework_search_path_flags(ctx: AnalysisContext) -> cmd_args: + validate_sdk_frameworks(ctx.attrs.frameworks) unresolved_framework_dirs = _get_non_sdk_unresolved_framework_directories(ctx.attrs.frameworks) expanded_framework_dirs = _expand_sdk_framework_paths(ctx, unresolved_framework_dirs) return _get_framework_search_path_flags(expanded_framework_dirs) diff --git a/prelude/apple/apple_info_plist.bzl b/prelude/apple/apple_info_plist.bzl index 44a33f7a51fee..6e9c4d28cb8f0 100644 --- a/prelude/apple/apple_info_plist.bzl +++ b/prelude/apple/apple_info_plist.bzl @@ -7,7 +7,7 @@ load(":apple_bundle_destination.bzl", "AppleBundleDestination") load(":apple_bundle_part.bzl", "AppleBundlePart") -load(":apple_bundle_utility.bzl", "get_bundle_min_target_version", "get_product_name") +load(":apple_bundle_utility.bzl", "get_bundle_min_target_version", "get_default_binary_dep", "get_product_name") load(":apple_sdk.bzl", "get_apple_sdk_name") load( ":apple_sdk_metadata.bzl", @@ -18,9 +18,10 @@ load( "WatchSimulatorSdkMetadata", "get_apple_sdk_metadata_for_sdk_name", ) +load(":apple_target_sdk_version.bzl", "get_platform_name_for_sdk", "get_platform_version_for_sdk_version") load(":apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") -def process_info_plist(ctx: AnalysisContext, override_input: [Artifact, None]) -> AppleBundlePart: +def process_info_plist(ctx: AnalysisContext, override_input: Artifact | None) -> AppleBundlePart: input = _preprocess_info_plist(ctx) output = ctx.actions.declare_output("Info.plist") additional_keys = _additional_keys_as_json_file(ctx) @@ -64,7 +65,7 @@ def _preprocess_info_plist(ctx: AnalysisContext) -> Artifact: ctx.actions.run(command, category = "apple_preprocess_info_plist", **_get_plist_run_options()) return output -def _plist_substitutions_as_json_file(ctx: AnalysisContext) -> [Artifact, None]: +def _plist_substitutions_as_json_file(ctx: AnalysisContext) -> Artifact | None: info_plist_substitutions = ctx.attrs.info_plist_substitutions if not info_plist_substitutions: return None @@ -72,7 +73,7 @@ def _plist_substitutions_as_json_file(ctx: AnalysisContext) -> [Artifact, None]: substitutions_json = ctx.actions.write_json("plist_substitutions.json", info_plist_substitutions) return substitutions_json -def process_plist(ctx: AnalysisContext, input: Artifact, output: OutputArtifact, override_input: [Artifact, None] = None, additional_keys: [Artifact, None] = None, override_keys: [Artifact, None] = None, action_id: [str, None] = None): +def process_plist(ctx: AnalysisContext, input: Artifact, output: OutputArtifact, override_input: Artifact | None = None, additional_keys: Artifact | None = None, override_keys: Artifact | None = None, action_id: [str, None] = None): apple_tools = ctx.attrs._apple_tools[AppleToolsInfo] processor = apple_tools.info_plist_processor override_input_arguments = ["--override-input", override_input] if override_input != None else [] @@ -94,14 +95,19 @@ def _additional_keys_as_json_file(ctx: AnalysisContext) -> Artifact: def _info_plist_additional_keys(ctx: AnalysisContext) -> dict[str, typing.Any]: sdk_name = get_apple_sdk_name(ctx) + platform_name = get_platform_name_for_sdk(sdk_name) sdk_metadata = get_apple_sdk_metadata_for_sdk_name(sdk_name) result = _extra_mac_info_plist_keys(sdk_metadata, ctx.attrs.extension) result["CFBundleSupportedPlatforms"] = sdk_metadata.info_plist_supported_platforms_values - result["DTPlatformName"] = sdk_name + result["DTPlatformName"] = platform_name sdk_version = ctx.attrs._apple_toolchain[AppleToolchainInfo].sdk_version if sdk_version: - result["DTPlatformVersion"] = sdk_version - result["DTSDKName"] = sdk_name + sdk_version + platform_version = get_platform_version_for_sdk_version( + sdk_name = sdk_name, + sdk_version = sdk_version, + ) + result["DTPlatformVersion"] = platform_version + result["DTSDKName"] = platform_name + platform_version sdk_build_version = ctx.attrs._apple_toolchain[AppleToolchainInfo].sdk_build_version if sdk_build_version: result["DTPlatformBuild"] = sdk_build_version @@ -112,7 +118,10 @@ def _info_plist_additional_keys(ctx: AnalysisContext) -> dict[str, typing.Any]: xcode_version = ctx.attrs._apple_toolchain[AppleToolchainInfo].xcode_version if xcode_version: result["DTXcode"] = xcode_version - result[sdk_metadata.min_version_plist_info_key] = get_bundle_min_target_version(ctx, ctx.attrs.binary) + result[sdk_metadata.min_version_plist_info_key] = get_platform_version_for_sdk_version( + sdk_name = sdk_name, + sdk_version = get_bundle_min_target_version(ctx, get_default_binary_dep(ctx.attrs.binary)), + ) identify_build_system = ctx.attrs._info_plist_identify_build_system_default if ctx.attrs.info_plist_identify_build_system != None: @@ -124,7 +133,7 @@ def _info_plist_additional_keys(ctx: AnalysisContext) -> dict[str, typing.Any]: return result def _extra_mac_info_plist_keys(sdk_metadata: AppleSdkMetadata, extension: str) -> dict[str, typing.Any]: - if sdk_metadata.name == MacOSXSdkMetadata.name and extension == "xpc": + if sdk_metadata.name == MacOSXSdkMetadata.name and extension != "xpc": return { "NSHighResolutionCapable": True, "NSSupportsAutomaticGraphicsSwitching": True, @@ -142,6 +151,9 @@ def _info_plist_override_keys(ctx: AnalysisContext) -> dict[str, typing.Any]: if sdk_name == MacOSXSdkMetadata.name: if ctx.attrs.extension != "xpc": result["LSRequiresIPhoneOS"] = False - elif sdk_name not in [WatchOSSdkMetadata.name, WatchSimulatorSdkMetadata.name, MacOSXCatalystSdkMetadata.name]: + elif sdk_name in [WatchOSSdkMetadata.name, WatchSimulatorSdkMetadata.name]: + result["UIDeviceFamily"] = [4] + result["WKApplication"] = True + elif sdk_name not in [MacOSXCatalystSdkMetadata.name]: result["LSRequiresIPhoneOS"] = True return result diff --git a/prelude/apple/apple_info_plist_substitutions_parsing.bzl b/prelude/apple/apple_info_plist_substitutions_parsing.bzl index 38fec6f4f3348..c5e726fdb36dd 100644 --- a/prelude/apple/apple_info_plist_substitutions_parsing.bzl +++ b/prelude/apple/apple_info_plist_substitutions_parsing.bzl @@ -51,7 +51,7 @@ def _expand_codesign_entitlements_path(info_plist_substitutions: dict[str, str], path = prefix + maybe_value + suffix fail("Too many iteration (loop might be present) to expand `{}` with substitutions `{}`".format(path, info_plist_substitutions)) -def parse_codesign_entitlements(info_plist_substitutions: [dict[str, str], None]) -> [str, None]: +def parse_codesign_entitlements(info_plist_substitutions: [dict[str, str | Select], None]) -> [str, None]: if not info_plist_substitutions: return None maybe_path = info_plist_substitutions.get(_CODE_SIGN_ENTITLEMENTS_KEY) diff --git a/prelude/apple/apple_library.bzl b/prelude/apple/apple_library.bzl index b2d04862dfb64..3ef3c923fbb82 100644 --- a/prelude/apple/apple_library.bzl +++ b/prelude/apple/apple_library.bzl @@ -7,13 +7,21 @@ load( "@prelude//:artifact_tset.bzl", + "make_artifact_tset", "project_artifacts", ) +load("@prelude//:attrs_validators.bzl", "get_attrs_validators_outputs") +load("@prelude//:paths.bzl", "paths") +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load("@prelude//apple:apple_dsym.bzl", "DSYM_SUBTARGET", "get_apple_dsym") +load("@prelude//apple:apple_error_handler.bzl", "apple_build_error_handler") load("@prelude//apple:apple_stripping.bzl", "apple_strip_args") -# @oss-disable: load("@prelude//apple/meta_only:linker_outputs.bzl", "add_extra_linker_outputs") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") +# @oss-disable: load("@prelude//apple/meta_only:apple_library_meta_validation.bzl", "apple_library_validate_for_meta_restrictions") +# @oss-disable: load("@prelude//apple/meta_only:linker_outputs.bzl", "get_extra_linker_output_flags", "get_extra_linker_outputs") load( "@prelude//apple/swift:swift_compilation.bzl", + "SwiftLibraryForDistributionOutput", # @unused Used as a type "compile_swift", "get_swift_anonymous_targets", "get_swift_debug_infos", @@ -30,7 +38,8 @@ load( ) load( "@prelude//cxx:compile.bzl", - "CxxSrcWithFlags", # @unused Used as a type + "AsmExtensions", + "CxxSrcCompileCommand", # @unused Used as a type ) load( "@prelude//cxx:cxx_library.bzl", @@ -42,7 +51,15 @@ load( "cxx_attr_deps", "cxx_attr_exported_deps", ) -load("@prelude//cxx:cxx_sources.bzl", "get_srcs_with_flags") +load( + "@prelude//cxx:cxx_sources.bzl", + "CxxSrcWithFlags", # @unused Used as a type + "get_srcs_with_flags", +) +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", # @unused Used as type +) load( "@prelude//cxx:cxx_types.bzl", "CxxRuleAdditionalParams", @@ -50,7 +67,7 @@ load( "CxxRuleProviderParams", "CxxRuleSubTargetParams", ) -load("@prelude//cxx:headers.bzl", "cxx_attr_exported_headers") +load("@prelude//cxx:headers.bzl", "cxx_attr_exported_headers", "cxx_attr_headers_list") load( "@prelude//cxx:linker.bzl", "SharedLibraryFlagOverrides", @@ -61,16 +78,20 @@ load( "CPreprocessorArgs", "CPreprocessorInfo", # @unused Used as a type ) +load("@prelude//cxx:target_sdk_version.bzl", "get_unversioned_target_triple") load( "@prelude//linking:link_info.bzl", + "ExtraLinkerOutputs", "LibOutputStyle", ) load("@prelude//utils:arglike.bzl", "ArgLike") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//apple/mockingbird/mockingbird_types.bzl", "MockingbirdLibraryInfo", "MockingbirdLibraryInfoTSet", "MockingbirdLibraryRecord", "MockingbirdSourcesInfo", "MockingbirdTargetType") load(":apple_bundle_types.bzl", "AppleBundleLinkerMapInfo", "AppleMinDeploymentVersionInfo") load(":apple_frameworks.bzl", "get_framework_search_path_flags") +load(":apple_library_types.bzl", "AppleLibraryInfo") load(":apple_modular_utility.bzl", "MODULE_CACHE_PATH") -load(":apple_target_sdk_version.bzl", "get_min_deployment_version_for_node", "get_min_deployment_version_target_linker_flags", "get_min_deployment_version_target_preprocessor_flags") +load(":apple_target_sdk_version.bzl", "get_min_deployment_version_for_node") load(":apple_utility.bzl", "get_apple_cxx_headers_layout", "get_apple_stripped_attr_value_with_default_fallback", "get_module_name") load( ":debug.bzl", @@ -82,6 +103,13 @@ load(":resource_groups.bzl", "create_resource_graph") load(":xcode.bzl", "apple_populate_xcode_attributes") load(":xctest_swift_support.bzl", "xctest_swift_support_info") +AppleSharedLibraryMachOFileType = enum( + # dynamically bound shared library file + "dylib", + # dynamically bound bundle file aka Mach-O bundle + "bundle", +) + AppleLibraryAdditionalParams = record( # Name of the top level rule utilizing the apple_library rule. rule_type = str, @@ -104,30 +132,185 @@ AppleLibraryAdditionalParams = record( force_link_group_linking = field(bool, False), ) +AppleLibraryForDistributionInfo = provider( + fields = { + "module_name": str, + "private_swiftinterface": Artifact, + "swiftdoc": Artifact, + "swiftinterface": Artifact, + "target_triple": str, + }, +) + def apple_library_impl(ctx: AnalysisContext) -> [Promise, list[Provider]]: + # @oss-disable: apple_library_validate_for_meta_restrictions(ctx) + def get_apple_library_providers(deps_providers) -> list[Provider]: + shared_type = AppleSharedLibraryMachOFileType(ctx.attrs.shared_library_macho_file_type) + if shared_type == AppleSharedLibraryMachOFileType("bundle"): + shared_library_flags_overrides = SharedLibraryFlagOverrides( + # When `-bundle` is used we can't use the `-install_name` args, thus we keep this field empty. + shared_library_name_linker_flags_format = [], + shared_library_flags = ["-bundle"], + ) + elif shared_type == AppleSharedLibraryMachOFileType("dylib"): + shared_library_flags_overrides = None + else: + fail("Unsupported `shared_library_macho_file_type` attribute value: `{}`".format(shared_type)) constructor_params = apple_library_rule_constructor_params_and_swift_providers( ctx, AppleLibraryAdditionalParams( rule_type = "apple_library", generate_providers = CxxRuleProviderParams( java_packaging_info = False, + java_global_code_info = False, android_packageable_info = False, omnibus_root = False, + # We generate a provider on our own, disable to avoid several providers of same type. + cxx_resources_as_apple_resources = False, ), + shared_library_flags = shared_library_flags_overrides, ), deps_providers, ) output = cxx_library_parameterized(ctx, constructor_params) - return output.providers + + return output.providers + _make_mockingbird_library_info_provider(ctx) if uses_explicit_modules(ctx): return get_swift_anonymous_targets(ctx, get_apple_library_providers) else: return get_apple_library_providers([]) +def _compile_index_store(ctx: AnalysisContext, src_compile_cmd: CxxSrcCompileCommand, toolchain: CxxToolchainInfo, compile_cmd: cmd_args, pic: bool) -> Artifact | None: + identifier = src_compile_cmd.src.short_path + if src_compile_cmd.index != None: + # Add a unique postfix if we have duplicate source files with different flags + identifier = identifier + "_" + str(src_compile_cmd.index) + filename_base = identifier + identifier += " (index_store)" + + # We generate the index only for pic compilations + if not pic: + return None + + if src_compile_cmd.src.extension in AsmExtensions.values(): + return None + + cmd = compile_cmd.copy() + + # We use `-fsyntax-only` flag, so output will be not generated. + # The output here is used for the identifier of the index unit file + output_name = paths.join( + ctx.label.cell, + ctx.label.package, + ctx.label.name, + "{}.{}".format(filename_base, toolchain.linker_info.object_file_extension), + ) + cmd.add(["-o", output_name]) + + index_store = ctx.actions.declare_output(paths.join("__indexstore__", filename_base, "index_store"), dir = True) + + # Haven't use `-fdebug-prefix-map` for now, will use index-import to remap the path. But it's not ideal. + cmd.add([ + "-fsyntax-only", + "-index-ignore-system-symbols", + "-index-store-path", + index_store.as_output(), + ]) + + category = "apple_cxx_index_store" + ctx.actions.run( + cmd, + category = category, + identifier = identifier, + allow_cache_upload = True, + ) + + return index_store + +def _make_apple_library_for_distribution_info_provider(ctx: AnalysisContext, swift_library_for_distribution: [None, SwiftLibraryForDistributionOutput]) -> list[AppleLibraryForDistributionInfo]: + if not swift_library_for_distribution: + return [] + return [AppleLibraryForDistributionInfo( + target_triple = get_unversioned_target_triple(ctx).replace("macosx", "macos"), + swiftinterface = swift_library_for_distribution.swiftinterface, + private_swiftinterface = swift_library_for_distribution.private_swiftinterface, + swiftdoc = swift_library_for_distribution.swiftdoc, + module_name = get_module_name(ctx), + )] + +def _make_apple_library_info_provider(ctx: AnalysisContext, swift_header: [None, Artifact]) -> list[AppleLibraryInfo]: + public_framework_headers = cxx_attr_headers_list(ctx, ctx.attrs.public_framework_headers, [], get_apple_cxx_headers_layout(ctx)) + all_deps = cxx_attr_deps(ctx) + cxx_attr_exported_deps(ctx) + apple_library_infos = filter(None, [dep.get(AppleLibraryInfo) for dep in all_deps]) + + public_framework_header_tset = make_artifact_tset( + actions = ctx.actions, + label = ctx.label, + artifacts = [header.artifact for header in public_framework_headers], + children = [apple_library.public_framework_headers for apple_library in apple_library_infos], + ) + + return [AppleLibraryInfo( + public_framework_headers = public_framework_header_tset, + swift_header = swift_header, + target = ctx.label, + labels = ctx.attrs.labels, + )] + +def _make_mockingbird_library_info_provider(ctx: AnalysisContext) -> list[MockingbirdLibraryInfo]: + _, swift_sources = _filter_swift_srcs(ctx) + + if len(swift_sources) == 0: + return [] + + deps_mockingbird_infos = filter(None, [dep.get(MockingbirdLibraryInfo) for dep in cxx_attr_deps(ctx)]) + exported_deps_mockingbird_infos = filter(None, [dep.get(MockingbirdLibraryInfo) for dep in cxx_attr_exported_deps(ctx)]) + + children = [] + dep_names = [] + exported_dep_names = [] + for info in deps_mockingbird_infos: + dep_names.append(info.name) + children.append(info.tset) + + for info in exported_deps_mockingbird_infos: + exported_dep_names.append(info.name) + children.append(info.tset) + + mockingbird_srcs_folder = ctx.actions.declare_output("mockingbird_srcs_" + ctx.attrs.name, dir = True) + + ctx.actions.symlinked_dir( + mockingbird_srcs_folder, + {source.file.basename: source.file for source in swift_sources}, + ) + + mockingbird_record = MockingbirdLibraryRecord( + name = ctx.attrs.name, + srcs = [src.file for src in swift_sources], + dep_names = dep_names, + exported_dep_names = exported_dep_names, + type = MockingbirdTargetType("library"), + src_dir = mockingbird_srcs_folder, + ) + + mockingbird_tset = ctx.actions.tset(MockingbirdLibraryInfoTSet, value = mockingbird_record, children = children) + + return [MockingbirdLibraryInfo( + name = ctx.attrs.name, + tset = mockingbird_tset, + )] + def apple_library_rule_constructor_params_and_swift_providers(ctx: AnalysisContext, params: AppleLibraryAdditionalParams, deps_providers: list = [], is_test_target: bool = False) -> CxxRuleConstructorParams: - cxx_srcs, swift_srcs = _filter_swift_srcs(ctx) + mockingbird_gen_sources = [] + if not "dummy_library" in ctx.attrs.labels: + for dep in cxx_attr_deps(ctx) + cxx_attr_exported_deps(ctx): + if MockingbirdSourcesInfo in dep: + for src in dep[MockingbirdSourcesInfo].srcs: + mockingbird_gen_sources.append(src) + + cxx_srcs, swift_srcs = _filter_swift_srcs(ctx, mockingbird_gen_sources) # First create a modulemap if necessary. This is required for importing # ObjC code in Swift so must be done before Swift compilation. @@ -138,7 +321,7 @@ def apple_library_rule_constructor_params_and_swift_providers(ctx: AnalysisConte modulemap_pre = None framework_search_paths_flags = get_framework_search_path_flags(ctx) - swift_compile = compile_swift( + swift_compile, swift_interface = compile_swift( ctx, swift_srcs, True, # parse_as_library @@ -148,7 +331,7 @@ def apple_library_rule_constructor_params_and_swift_providers(ctx: AnalysisConte framework_search_paths_flags, params.extra_swift_compiler_flags, ) - swift_object_files = [swift_compile.object_file] if swift_compile else [] + swift_object_files = swift_compile.object_files if swift_compile else [] swift_pre = CPreprocessor() if swift_compile: @@ -166,13 +349,23 @@ def apple_library_rule_constructor_params_and_swift_providers(ctx: AnalysisConte else: exported_pre = None - swift_dependency_info = swift_compile.dependency_info if swift_compile else get_swift_dependency_info(ctx, None, None, deps_providers) + swift_dependency_info = swift_compile.dependency_info if swift_compile else get_swift_dependency_info(ctx, None, deps_providers) swift_debug_info = get_swift_debug_infos( ctx, swift_dependency_info, swift_compile, ) + swift_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info + if swift_toolchain and swift_toolchain.supports_relative_resource_dir: + resource_dir_args = [] + else: + # We have to use this hack to make compilation work when Clang modules + # are enabled and using toolchains that don't support relative resource + # directories correctly. The builtin headers will be embedded relative + # to the CWD, so need to add . to be located correctly. + resource_dir_args = ["-I."] + modular_pre = CPreprocessor( uses_modules = ctx.attrs.uses_modules, modular_args = [ @@ -180,18 +373,14 @@ def apple_library_rule_constructor_params_and_swift_providers(ctx: AnalysisConte "-fmodules", "-fmodule-name=" + get_module_name(ctx), "-fmodules-cache-path=" + MODULE_CACHE_PATH, - # TODO(T123756899): We have to use this hack to make compilation work - # when Clang modules are enabled and using toolchains. That's because - # resource-dir is passed as a relative path (so that no abs paths appear - # in any .pcm). The compiler will then expand and generate #include paths - # that won't work unless we have the directive below. - "-I.", - ], + ] + resource_dir_args, ) contains_swift_sources = bool(swift_srcs) xctest_swift_support_provider = xctest_swift_support_info(ctx, contains_swift_sources, is_test_target) + attrs_validators_providers, attrs_validators_subtargets = get_attrs_validators_outputs(ctx) + def additional_providers_factory(propagated_exported_preprocessor_info: [CPreprocessorInfo, None]) -> list[Provider]: # Expose `SwiftPCMUncompiledInfo` which represents the ObjC part of a target, # if a target also has a Swift part, the provider will expose the generated `-Swift.h` header. @@ -204,21 +393,70 @@ def apple_library_rule_constructor_params_and_swift_providers(ctx: AnalysisConte providers = [swift_pcm_uncompile_info] if swift_pcm_uncompile_info else [] providers.append(swift_dependency_info) providers.append(xctest_swift_support_provider) + providers.extend(attrs_validators_providers) + return providers framework_search_path_pre = CPreprocessor( - relative_args = CPreprocessorArgs(args = [framework_search_paths_flags]), + args = CPreprocessorArgs(args = [framework_search_paths_flags]), ) + validation_deps_outputs = get_validation_deps_outputs(ctx) + if swift_compile: + swift_objc_header = swift_compile.exported_swift_header + swift_library_for_distribution_output = swift_compile.swift_library_for_distribution_output + else: + swift_objc_header = None + swift_library_for_distribution_output = None + + extra_apple_providers = [] + if not is_test_target: + extra_apple_providers = _make_apple_library_info_provider(ctx, swift_objc_header) + _make_apple_library_for_distribution_info_provider(ctx, swift_library_for_distribution_output) + + # Always provide a valid JSON object, so that tooling can depend on its existance + modulemap_info_json = {"modulemap": exported_pre.modulemap_path} if (exported_pre and exported_pre.modulemap_path) else {} + modulemap_info_json_file = ctx.actions.declare_output("modulemap-info.json") + modulemap_info_json_cmd_args = ctx.actions.write_json(modulemap_info_json_file, modulemap_info_json, with_inputs = True, pretty = True) + modulemap_info_providers = [DefaultInfo(default_output = modulemap_info_json_file, other_outputs = [modulemap_info_json_cmd_args])] + + subtargets = { + "modulemap-info": modulemap_info_providers, + "swift-compilation-database": [DefaultInfo(default_output = None)], + "swift-compile": [DefaultInfo(default_output = None)], + "swift-interface": [swift_interface], + "swiftmodule": [DefaultInfo(default_output = None)], + } + if swift_compile: + subtargets["swift-compilation-database"] = [ + DefaultInfo( + default_output = swift_compile.compilation_database.db, + other_outputs = [swift_compile.compilation_database.other_outputs], + ), + ] + subtargets["swift-compile"] = [DefaultInfo(default_outputs = swift_compile.object_files)] + + if swift_compile.output_map_artifact: + subtargets["swift-output-file-map"] = [DefaultInfo(default_output = swift_compile.output_map_artifact)] + + if swift_compile.swiftdeps: + subtargets["swiftdeps"] = [ + DefaultInfo( + default_output = swift_compile.swiftdeps[0], + other_outputs = swift_compile.swiftdeps[1:], + ), + ] + + subtargets["swiftmodule"] = [DefaultInfo(default_output = swift_compile.swiftmodule)] + return CxxRuleConstructorParams( rule_type = params.rule_type, is_test = (params.rule_type == "apple_test"), headers_layout = get_apple_cxx_headers_layout(ctx), extra_exported_link_flags = params.extra_exported_link_flags, - extra_link_flags = [_get_linker_flags(ctx)], + extra_hidden = validation_deps_outputs, extra_link_input = swift_object_files, extra_link_input_has_external_debug_info = True, - extra_preprocessors = get_min_deployment_version_target_preprocessor_flags(ctx) + [swift_pre, modular_pre], + extra_preprocessors = [swift_pre, modular_pre], extra_exported_preprocessors = filter(None, [framework_search_path_pre, exported_pre]), srcs = cxx_srcs, additional = CxxRuleAdditionalParams( @@ -229,43 +467,54 @@ def apple_library_rule_constructor_params_and_swift_providers(ctx: AnalysisConte # follow. static_external_debug_info = swift_debug_info.static, shared_external_debug_info = swift_debug_info.shared, - subtargets = { - "swift-compilation-database": [ - DefaultInfo( - default_output = swift_compile.compilation_database.db if swift_compile else None, - other_outputs = [swift_compile.compilation_database.other_outputs] if swift_compile else [], - ), - ], - "swift-compile": [DefaultInfo(default_output = swift_compile.object_file if swift_compile else None)], - }, + subtargets = subtargets | attrs_validators_subtargets, additional_providers_factory = additional_providers_factory, + external_debug_info_tags = [], # This might be used to materialise all transitive Swift related object files with ArtifactInfoTag("swiftmodule") ), - output_style_sub_targets_and_providers_factory = _get_link_style_sub_targets_and_providers, + output_style_sub_targets_and_providers_factory = _get_link_style_sub_targets_and_providers(extra_apple_providers), shared_library_flags = params.shared_library_flags, # apple_library's 'stripped' arg only applies to shared subtargets, or, # targets with 'preferred_linkage = "shared"' strip_executable = get_apple_stripped_attr_value_with_default_fallback(ctx), strip_args_factory = apple_strip_args, force_link_group_linking = params.force_link_group_linking, - cxx_populate_xcode_attributes_func = lambda local_ctx, **kwargs: _xcode_populate_attributes(ctx = local_ctx, populate_xcode_attributes_func = params.populate_xcode_attributes_func, **kwargs), + cxx_populate_xcode_attributes_func = lambda local_ctx, **kwargs: _xcode_populate_attributes(ctx = local_ctx, populate_xcode_attributes_func = params.populate_xcode_attributes_func, contains_swift_sources = contains_swift_sources, **kwargs), generate_sub_targets = params.generate_sub_targets, generate_providers = params.generate_providers, # Some apple rules rely on `static` libs *not* following dependents. link_groups_force_static_follows_dependents = False, - extra_linker_outputs_factory = _get_extra_linker_flags_and_outputs, + extra_linker_outputs_factory = _get_extra_linker_outputs, + extra_linker_outputs_flags_factory = _get_extra_linker_outputs_flags, swiftmodule_linkable = get_swiftmodule_linkable(swift_compile), + extra_shared_library_interfaces = [swift_compile.exported_symbols] if (swift_compile and swift_compile.exported_symbols) else None, + compiler_flags = ctx.attrs.compiler_flags, + lang_compiler_flags = ctx.attrs.lang_compiler_flags, + platform_compiler_flags = ctx.attrs.platform_compiler_flags, + lang_platform_compiler_flags = ctx.attrs.lang_platform_compiler_flags, + preprocessor_flags = ctx.attrs.preprocessor_flags, + lang_preprocessor_flags = ctx.attrs.lang_preprocessor_flags, + platform_preprocessor_flags = ctx.attrs.platform_preprocessor_flags, + lang_platform_preprocessor_flags = ctx.attrs.lang_platform_preprocessor_flags, + swift_objc_header = swift_objc_header, + error_handler = apple_build_error_handler, + index_store_factory = _compile_index_store, + index_stores = swift_compile.index_stores if swift_compile else None, ) -def _get_extra_linker_flags_and_outputs( - ctx: AnalysisContext) -> (list[ArgLike], dict[str, list[DefaultInfo]]): +def _get_extra_linker_outputs(ctx: AnalysisContext) -> ExtraLinkerOutputs: _ = ctx # buildifier: disable=unused-variable - # @oss-disable: return add_extra_linker_outputs(ctx) - return [], {} # @oss-enable + # @oss-disable: return get_extra_linker_outputs(ctx) + return ExtraLinkerOutputs() # @oss-enable -def _filter_swift_srcs(ctx: AnalysisContext) -> (list[CxxSrcWithFlags], list[CxxSrcWithFlags]): +def _get_extra_linker_outputs_flags(ctx: AnalysisContext, outputs: dict[str, Artifact]) -> list[ArgLike]: + _ = ctx # buildifier: disable=unused-variable + # @oss-disable: return get_extra_linker_output_flags(ctx, outputs) + return [] # @oss-enable + +def _filter_swift_srcs(ctx: AnalysisContext, additional_srcs: list = []) -> (list[CxxSrcWithFlags], list[CxxSrcWithFlags]): cxx_srcs = [] swift_srcs = [] - for s in get_srcs_with_flags(ctx): + for s in get_srcs_with_flags(ctx, additional_srcs): if s.file.extension == SWIFT_EXTENSION: swift_srcs.append(s) else: @@ -274,76 +523,79 @@ def _filter_swift_srcs(ctx: AnalysisContext) -> (list[CxxSrcWithFlags], list[Cxx return cxx_srcs, swift_srcs def _get_link_style_sub_targets_and_providers( - output_style: LibOutputStyle, - ctx: AnalysisContext, - output: [CxxLibraryOutput, None]) -> (dict[str, list[Provider]], list[Provider]): - # We always propagate a resource graph regardless of link style or empty output - resource_graph = create_resource_graph( - ctx = ctx, - labels = ctx.attrs.labels, - deps = cxx_attr_deps(ctx), - exported_deps = cxx_attr_exported_deps(ctx), - # Shared libraries should not propagate their resources to rdeps, - # they should only be contained in their frameworks apple_bundle. - should_propagate = output_style != LibOutputStyle("shared_lib"), - ) - if output_style != LibOutputStyle("shared_lib") or output == None: - return ({}, [resource_graph]) + extra_providers: list[Provider]) -> typing.Callable: + def get_link_style_sub_targets_impl( + output_style: LibOutputStyle, + ctx: AnalysisContext, + output: [CxxLibraryOutput, None]) -> (dict[str, list[Provider]], list[Provider]): + # We always propagate a resource graph regardless of link style or empty output + resource_graph = create_resource_graph( + ctx = ctx, + labels = ctx.attrs.labels, + deps = cxx_attr_deps(ctx), + exported_deps = cxx_attr_exported_deps(ctx), + # Shared libraries should not propagate their resources to rdeps, + # they should only be contained in their frameworks apple_bundle. + should_propagate = output_style != LibOutputStyle("shared_lib"), + ) - min_version = get_min_deployment_version_for_node(ctx) - min_version_providers = [AppleMinDeploymentVersionInfo(version = min_version)] + if output_style != LibOutputStyle("shared_lib") or output == None: + return ({}, [resource_graph] + extra_providers) - debug_info = project_artifacts( - actions = ctx.actions, - tsets = [output.external_debug_info], - ) + min_version = get_min_deployment_version_for_node(ctx) + min_version_providers = [AppleMinDeploymentVersionInfo(version = min_version)] - if get_apple_stripped_attr_value_with_default_fallback(ctx): - if False: - # TODO(nga): `output.unstripped` is never `None`. - def unknown(): - pass + debug_info = project_artifacts( + actions = ctx.actions, + tsets = [output.external_debug_info], + ) - output = unknown() - expect(output.unstripped != None, "Expecting unstripped output to be non-null when stripping is enabled.") - dsym_executable = output.unstripped - else: - dsym_executable = output.default - dsym_artifact = get_apple_dsym( - ctx = ctx, - executable = dsym_executable, - debug_info = debug_info, - action_identifier = dsym_executable.short_path, - ) - debug_info_artifacts_manifest = ctx.actions.write( - "debuginfo.artifacts", - debug_info, - with_inputs = True, - ) - subtargets = { - DSYM_SUBTARGET: [DefaultInfo(default_output = dsym_artifact)], - DEBUGINFO_SUBTARGET: [DefaultInfo(default_output = debug_info_artifacts_manifest)], - } - providers = [ - AppleDebuggableInfo(dsyms = [dsym_artifact], debug_info_tset = output.external_debug_info), - resource_graph, - ] + min_version_providers + if get_apple_stripped_attr_value_with_default_fallback(ctx): + if False: + # TODO(nga): `output.unstripped` is never `None`. + def unknown(): + pass + + output = unknown() + expect(output.unstripped != None, "Expecting unstripped output to be non-null when stripping is enabled.") + dsym_executable = output.unstripped + else: + dsym_executable = output.default + dsym_artifact = get_apple_dsym( + ctx = ctx, + executable = dsym_executable, + debug_info = debug_info, + action_identifier = dsym_executable.short_path, + ) + debug_info_artifacts_manifest = ctx.actions.write( + "debuginfo.artifacts", + debug_info, + with_inputs = True, + ) + subtargets = { + DSYM_SUBTARGET: [DefaultInfo(default_output = dsym_artifact)], + DEBUGINFO_SUBTARGET: [DefaultInfo(default_output = debug_info_artifacts_manifest)], + } + providers = [ + AppleDebuggableInfo(dsyms = [dsym_artifact], debug_info_tset = output.external_debug_info), + resource_graph, + ] + min_version_providers + extra_providers - if output.linker_map != None: - subtargets["linker-map"] = [DefaultInfo(default_output = output.linker_map.map, other_outputs = [output.linker_map.binary])] - providers += [AppleBundleLinkerMapInfo(linker_maps = [output.linker_map.map])] + if output.linker_map != None: + subtargets["linker-map"] = [DefaultInfo(default_output = output.linker_map.map, other_outputs = [output.linker_map.binary])] + providers += [AppleBundleLinkerMapInfo(linker_maps = [output.linker_map.map])] - return (subtargets, providers) + return (subtargets, providers) -def _get_linker_flags(ctx: AnalysisContext) -> cmd_args: - return cmd_args(get_min_deployment_version_target_linker_flags(ctx)) + return get_link_style_sub_targets_impl def _xcode_populate_attributes( ctx, srcs: list[CxxSrcWithFlags], argsfiles: dict[str, CompileArgsfile], populate_xcode_attributes_func: typing.Callable, + contains_swift_sources: bool, **_kwargs) -> dict[str, typing.Any]: # Overwrite the product name - data = populate_xcode_attributes_func(ctx, srcs = srcs, argsfiles = argsfiles, product_name = ctx.attrs.name) + data = populate_xcode_attributes_func(ctx, srcs = srcs, argsfiles = argsfiles, product_name = ctx.attrs.name, contains_swift_sources = contains_swift_sources) return data diff --git a/prelude/apple/apple_library_types.bzl b/prelude/apple/apple_library_types.bzl new file mode 100644 index 0000000000000..6d0af422dd1e5 --- /dev/null +++ b/prelude/apple/apple_library_types.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//:artifact_tset.bzl", + "ArtifactTSet", +) + +AppleLibraryInfo = provider( + fields = { + "labels": list[str], + "public_framework_headers": ArtifactTSet, + "swift_header": [Artifact, None], + "target": Label, + }, +) diff --git a/prelude/apple/apple_macro_layer.bzl b/prelude/apple/apple_macro_layer.bzl index f3067037429e5..75092f37a9c2d 100644 --- a/prelude/apple/apple_macro_layer.bzl +++ b/prelude/apple/apple_macro_layer.bzl @@ -5,7 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//apple/user:apple_ipa_package.bzl", "make_apple_ipa_package_target") load(":apple_bundle_config.bzl", "apple_bundle_config") +load(":apple_dsym_config.bzl", "apple_dsym_config") load(":apple_info_plist_substitutions_parsing.bzl", "parse_codesign_entitlements") load(":apple_package_config.bzl", "apple_package_config") load(":apple_resource_bundle.bzl", "make_resource_bundle_rule") @@ -31,6 +33,13 @@ APPLE_LINK_LIBRARIES_LOCALLY_OVERRIDE = AppleBuckConfigAttributeOverride( skip_if_false = True, ) +APPLE_LINK_LIBRARIES_REMOTELY_OVERRIDE = AppleBuckConfigAttributeOverride( + name = "link_execution_preference", + key = "link_libraries_remotely_override", + value_if_true = "remote", + skip_if_false = True, +) + APPLE_STRIPPED_DEFAULT = AppleBuckConfigAttributeOverride( name = "_stripped_default", key = "stripped_default", @@ -39,20 +48,29 @@ APPLE_STRIPPED_DEFAULT = AppleBuckConfigAttributeOverride( _APPLE_LIBRARY_LOCAL_EXECUTION_OVERRIDES = [ APPLE_LINK_LIBRARIES_LOCALLY_OVERRIDE, + APPLE_LINK_LIBRARIES_REMOTELY_OVERRIDE, AppleBuckConfigAttributeOverride(name = APPLE_ARCHIVE_OBJECTS_LOCALLY_OVERRIDE_ATTR_NAME, key = "archive_objects_locally_override"), ] -_APPLE_BINARY_LOCAL_EXECUTION_OVERRIDES = [ +# If both configs are set the last one wins +_APPLE_BINARY_EXECUTION_OVERRIDES = [ AppleBuckConfigAttributeOverride( name = "link_execution_preference", key = "link_binaries_locally_override", value_if_true = "local", skip_if_false = True, ), + AppleBuckConfigAttributeOverride( + name = "link_execution_preference", + key = "link_binaries_remotely_override", + value_if_true = "remote", + skip_if_false = True, + ), ] _APPLE_TEST_LOCAL_EXECUTION_OVERRIDES = [ APPLE_LINK_LIBRARIES_LOCALLY_OVERRIDE, + APPLE_LINK_LIBRARIES_REMOTELY_OVERRIDE, ] def apple_macro_layer_set_bool_override_attrs_from_config(overrides: list[AppleBuckConfigAttributeOverride]) -> dict[str, Select]: @@ -72,6 +90,7 @@ def apple_macro_layer_set_bool_override_attrs_from_config(overrides: list[AppleB def apple_test_macro_impl(apple_test_rule, apple_resource_bundle_rule, **kwargs): kwargs.update(apple_bundle_config()) + kwargs.update(apple_dsym_config()) kwargs.update(apple_macro_layer_set_bool_override_attrs_from_config(_APPLE_TEST_LOCAL_EXECUTION_OVERRIDES)) # `extension` is used both by `apple_test` and `apple_resource_bundle`, so provide default here @@ -81,9 +100,16 @@ def apple_test_macro_impl(apple_test_rule, apple_resource_bundle_rule, **kwargs) **kwargs ) +def apple_xcuitest_macro_impl(apple_xcuitest_rule, **kwargs): + kwargs.update(apple_bundle_config()) + apple_xcuitest_rule( + **kwargs + ) + def apple_bundle_macro_impl(apple_bundle_rule, apple_resource_bundle_rule, **kwargs): info_plist_substitutions = kwargs.get("info_plist_substitutions") kwargs.update(apple_bundle_config()) + kwargs.update(apple_dsym_config()) apple_bundle_rule( _codesign_entitlements = parse_codesign_entitlements(info_plist_substitutions), _resource_bundle = make_resource_bundle_rule(apple_resource_bundle_rule, **kwargs), @@ -91,30 +117,48 @@ def apple_bundle_macro_impl(apple_bundle_rule, apple_resource_bundle_rule, **kwa ) def apple_library_macro_impl(apple_library_rule = None, **kwargs): + kwargs.update(apple_dsym_config()) kwargs.update(apple_macro_layer_set_bool_override_attrs_from_config(_APPLE_LIBRARY_LOCAL_EXECUTION_OVERRIDES)) kwargs.update(apple_macro_layer_set_bool_override_attrs_from_config([APPLE_STRIPPED_DEFAULT])) apple_library_rule(**kwargs) +def prebuilt_apple_framework_macro_impl(prebuilt_apple_framework_rule = None, **kwargs): + kwargs.update(apple_macro_layer_set_bool_override_attrs_from_config([APPLE_STRIPPED_DEFAULT])) + prebuilt_apple_framework_rule(**kwargs) + def apple_binary_macro_impl(apple_binary_rule = None, apple_universal_executable = None, **kwargs): - kwargs.update(apple_macro_layer_set_bool_override_attrs_from_config(_APPLE_BINARY_LOCAL_EXECUTION_OVERRIDES)) + dsym_args = apple_dsym_config() + kwargs.update(dsym_args) + kwargs.update(apple_macro_layer_set_bool_override_attrs_from_config(_APPLE_BINARY_EXECUTION_OVERRIDES)) kwargs.update(apple_macro_layer_set_bool_override_attrs_from_config([APPLE_STRIPPED_DEFAULT])) - binary_name = kwargs.pop("name") + original_binary_name = kwargs.pop("name") if kwargs.pop("supports_universal", False): - universal_wrapper_name = binary_name - binary_name = universal_wrapper_name + "ThinBinary" + binary_name = original_binary_name + "ThinBinary" apple_universal_executable( - name = universal_wrapper_name, + name = original_binary_name, executable = ":" + binary_name, + executable_name = original_binary_name, labels = kwargs.get("labels"), visibility = kwargs.get("visibility"), + default_target_platform = kwargs.get("default_target_platform"), + **dsym_args ) + else: + binary_name = original_binary_name apple_binary_rule(name = binary_name, **kwargs) -def apple_package_macro_impl(apple_package_rule = None, **kwargs): +def apple_package_macro_impl(apple_package_rule = None, apple_ipa_package_rule = None, **kwargs): kwargs.update(apple_package_config()) apple_package_rule( + _ipa_package = make_apple_ipa_package_target(apple_ipa_package_rule, **kwargs), + **kwargs + ) + +def apple_universal_executable_macro_impl(apple_universal_executable_rule = None, **kwargs): + kwargs.update(apple_dsym_config()) + apple_universal_executable_rule( **kwargs ) diff --git a/prelude/apple/apple_native.bzl b/prelude/apple/apple_native.bzl new file mode 100644 index 0000000000000..a64ccd68bd883 --- /dev/null +++ b/prelude/apple/apple_native.bzl @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:prelude.bzl", "native") +load( + "@prelude//platforms/apple:constants.bzl", + "APPLE", +) +load("@prelude//platforms/apple:platforms.bzl", "config_backed_apple_target_platform", "get_default_target_platform_for_platform", "set_apple_platforms") +load("@prelude//platforms/apple/platforms_map.bzl", "APPLE_SDK_DEFAULT_PLATFORM_MAP") +load("@prelude//utils/buckconfig.bzl", "read") + +def _apple_library(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_library(**kwargs) + +def _apple_asset_catalog(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_asset_catalog(**kwargs) + +def _apple_binary(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_binary(**kwargs) + +def _apple_bundle(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_bundle(**kwargs) + +def _apple_watchos_bundle(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_watchos_bundle(**kwargs) + +def _apple_package(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_package(**kwargs) + +def _apple_resource(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_resource(**kwargs) + +def _apple_test(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_test(**kwargs) + +def _apple_xcuitest(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_xcuitest(**kwargs) + +def _apple_xcframework(**kwargs): + kwargs = _update_platforms(**kwargs) + native.apple_xcframework(**kwargs) + +def _update_platforms(**kwargs): + platform = _get_default_platform() + + default_target_platform = kwargs.pop("default_target_platform", None) + base_config_backed_target_platform = kwargs.pop("config_backed_target_platform", None) + + if default_target_platform != None and base_config_backed_target_platform != None: + name = kwargs.get("name", "UNKNOWN_TARGET") + fail("{} has both a default_target_platform and a config_backed_target_platform, which is not allowed".format(name)) + + if base_config_backed_target_platform != None: + default_target_platform = config_backed_apple_target_platform(base_config_backed_target_platform, platform) + elif default_target_platform == None: + default_target_platform = get_default_target_platform_for_platform(platform) + + if default_target_platform != None: + kwargs["default_target_platform"] = default_target_platform + + kwargs = set_apple_platforms(platform, base_config_backed_target_platform, kwargs) + + return kwargs + +def _get_default_platform(): + config_platform = read("cxx", "default_platform") + if config_platform != None: + return config_platform + return APPLE_SDK_DEFAULT_PLATFORM_MAP.get(APPLE) + +apple_native = struct( + apple_asset_catalog = _apple_asset_catalog, + apple_binary = _apple_binary, + apple_bundle = _apple_bundle, + apple_watchos_bundle = _apple_watchos_bundle, + apple_library = _apple_library, + apple_package = _apple_package, + apple_resource = _apple_resource, + apple_test = _apple_test, + apple_xcuitest = _apple_xcuitest, + apple_xcframework = _apple_xcframework, +) diff --git a/prelude/apple/apple_package.bzl b/prelude/apple/apple_package.bzl index bbeb35fce8f01..c0e56d0aacb22 100644 --- a/prelude/apple/apple_package.bzl +++ b/prelude/apple/apple_package.bzl @@ -5,46 +5,57 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//:paths.bzl", "paths") -load("@prelude//utils:arglike.bzl", "ArgLike") -load(":apple_bundle_destination.bzl", "AppleBundleDestination", "bundle_relative_path_for_destination") -load(":apple_bundle_types.bzl", "AppleBundleInfo") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolsInfo") load(":apple_package_config.bzl", "IpaCompressionLevel") -load(":apple_sdk.bzl", "get_apple_sdk_name") -load(":apple_swift_stdlib.bzl", "should_copy_swift_stdlib") -load(":apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") def apple_package_impl(ctx: AnalysisContext) -> list[Provider]: - unprocessed_ipa_contents = _get_ipa_contents(ctx) - package = ctx.actions.declare_output("{}.{}".format(ctx.attrs.bundle.label.name, ctx.attrs.ext)) + package_name = ctx.attrs.package_name if ctx.attrs.package_name else ctx.attrs.bundle.label.name + package = ctx.actions.declare_output("{}.{}".format(package_name, ctx.attrs.ext)) + contents = ( + ctx.attrs.bundle[DefaultInfo].default_outputs[0] if ctx.attrs.packager else _get_ipa_contents(ctx) + ) if ctx.attrs.packager: process_ipa_cmd = cmd_args([ ctx.attrs.packager[RunInfo], - "--contents-dir", - unprocessed_ipa_contents, + "--app-bundle-path", + contents, "--output-path", package.as_output(), ctx.attrs.packager_args, ]) category = "apple_package_make_custom" - - if ctx.attrs.validator: - fail( - "{} doesn't support a setting `packager` and `validator` at the same time.".format(ctx.attrs.name), - ) - else: process_ipa_cmd = _get_default_package_cmd( ctx, - unprocessed_ipa_contents, + contents, package.as_output(), ) category = "apple_package_make" + sub_targets = {} + + prepackaged_validators_artifacts = _get_prepackaged_validators_outputs(ctx, contents) + if prepackaged_validators_artifacts: + # Add the artifacts to packaging cmd so that they are run. + process_ipa_cmd.add(cmd_args(hidden = prepackaged_validators_artifacts)) + sub_targets["prepackaged_validators"] = [ + DefaultInfo(default_outputs = prepackaged_validators_artifacts), + ] + ctx.actions.run(process_ipa_cmd, category = category) - return [DefaultInfo(default_output = package)] + return [DefaultInfo( + default_output = package, + sub_targets = sub_targets, + )] + +def _get_ipa_contents(ctx: AnalysisContext) -> Artifact: + ipa_package_dep = ctx.attrs._ipa_package + default_outputs = ipa_package_dep[DefaultInfo].default_outputs + if len(default_outputs) != 1: + fail("Expect exactly one output for .ipa package") + return default_outputs[0] def _get_default_package_cmd(ctx: AnalysisContext, unprocessed_ipa_contents: Artifact, output: OutputArtifact) -> cmd_args: apple_tools = ctx.attrs._apple_tools[AppleToolsInfo] @@ -57,103 +68,9 @@ def _get_default_package_cmd(ctx: AnalysisContext, unprocessed_ipa_contents: Art "--compression-level", _compression_level_arg(IpaCompressionLevel(ctx.attrs._ipa_compression_level)), ]) - if ctx.attrs.validator != None: - process_ipa_cmd.add([ - "--validator", - ctx.attrs.validator[RunInfo], - ]) return process_ipa_cmd -def _get_ipa_contents(ctx) -> Artifact: - bundle = ctx.attrs.bundle - app = bundle[DefaultInfo].default_outputs[0] - - contents = { - paths.join("Payload", app.basename): app, - } - - apple_bundle_info = bundle[AppleBundleInfo] - if (not apple_bundle_info.skip_copying_swift_stdlib) and should_copy_swift_stdlib(app.extension): - swift_support_path = paths.join("SwiftSupport", get_apple_sdk_name(ctx)) - contents[swift_support_path] = _get_swift_support_dir(ctx, app, apple_bundle_info) - - if apple_bundle_info.contains_watchapp: - contents["Symbols"] = _build_symbols_dir(ctx) - - return ctx.actions.copied_dir( - "__unzipped_ipa_contents__", - contents, - ) - -def _build_symbols_dir(ctx) -> Artifact: - symbols_dir = ctx.actions.declare_output("__symbols__", dir = True) - ctx.actions.run( - cmd_args(["mkdir", "-p", symbols_dir.as_output()]), - category = "watchos_symbols_dir", - ) - - return symbols_dir - -def _get_swift_support_dir(ctx, bundle_output: Artifact, bundle_info: AppleBundleInfo) -> Artifact: - stdlib_tool = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info.swift_stdlib_tool - sdk_name = get_apple_sdk_name(ctx) - - # .app -> app - # This is the way the input is expected. - extension = bundle_output.extension[1:] - swift_support_dir = ctx.actions.declare_output("__swift_dylibs__", dir = True) - script, _ = ctx.actions.write( - "build_swift_support.sh", - [ - cmd_args("set -euo pipefail"), - cmd_args(swift_support_dir, format = "mkdir -p {}"), - cmd_args( - [ - stdlib_tool, - # If you're debugging, you can pass the '--verbose' flag here. - "--copy", - "--scan-executable", - cmd_args( - [ - bundle_output, - bundle_relative_path_for_destination(AppleBundleDestination("executables"), sdk_name, extension), - bundle_info.binary_name, - ], - delimiter = "/", - ), - _get_scan_folder_args(AppleBundleDestination("plugins"), bundle_output, sdk_name, extension), - _get_scan_folder_args(AppleBundleDestination("frameworks"), bundle_output, sdk_name, extension), - "--destination", - swift_support_dir, - ], - delimiter = " ", - quote = "shell", - ), - ], - allow_args = True, - ) - ctx.actions.run( - cmd_args(["/bin/sh", script]).hidden([stdlib_tool, bundle_output, swift_support_dir.as_output()]), - category = "copy_swift_stdlibs", - ) - - return swift_support_dir - -def _get_scan_folder_args(dest: AppleBundleDestination, bundle_output: Artifact, sdk_name, extension) -> ArgLike: - return cmd_args( - [ - "--scan-folder", - cmd_args( - [ - bundle_output, - bundle_relative_path_for_destination(dest, sdk_name, extension), - ], - delimiter = "/", - ), - ], - ) - def _compression_level_arg(compression_level: IpaCompressionLevel) -> str: if compression_level.value == "none": return "0" @@ -165,3 +82,33 @@ def _compression_level_arg(compression_level: IpaCompressionLevel) -> str: return "9" else: fail("Unknown .ipa compression level: " + str(compression_level)) + +def _get_prepackaged_validators_outputs(ctx: AnalysisContext, prepackaged_contents: Artifact) -> list[Artifact]: + if not ctx.attrs.prepackaged_validators: + return [] + + outputs = [] + for idx, validator in enumerate(ctx.attrs.prepackaged_validators): + if type(validator) == "tuple": + validator, validator_args = validator + else: + validator = validator + validator_args = [] + + output = ctx.actions.declare_output(validator.label.name + "_{}".format(idx)) + outputs.append(output) + + ctx.actions.run( + cmd_args([ + validator[RunInfo], + "--contents-dir", + prepackaged_contents, + "--output-path", + output.as_output(), + validator_args, + ]), + category = "prepackaged_validator", + identifier = str(idx), + ) + + return outputs diff --git a/prelude/apple/apple_platforms.bzl b/prelude/apple/apple_platforms.bzl new file mode 100644 index 0000000000000..c9beeaa1ad902 --- /dev/null +++ b/prelude/apple/apple_platforms.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +APPLE_PLATFORMS_KEY = "_apple_platforms" diff --git a/prelude/apple/apple_resource.bzl b/prelude/apple/apple_resource.bzl index 7955fe1eaa638..ac300a3e3d939 100644 --- a/prelude/apple/apple_resource.bzl +++ b/prelude/apple/apple_resource.bzl @@ -5,9 +5,33 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//ide_integrations:xcode.bzl", + "XCODE_DATA_SUB_TARGET", + "generate_xcode_data", +) load(":apple_resource_types.bzl", "AppleResourceDestination", "AppleResourceSpec") load(":resource_groups.bzl", "create_resource_graph") +def _artifacts(deps: (list[[Artifact, Dependency]])) -> list[Artifact]: + artifacts = [] + for dep in deps: + if isinstance(dep, Dependency): + artifacts.extend(dep[DefaultInfo].default_outputs) + else: + artifacts.append(dep) + return artifacts + +def _xcode_populate_attributes(ctx) -> dict[str, typing.Any]: + data = { + "product_name": ctx.attrs.name.replace(".", "_"), + } + artifacts = _artifacts(ctx.attrs.files) + if artifacts: + data["extra_xcode_files"] = artifacts + + return data + def apple_resource_impl(ctx: AnalysisContext) -> list[Provider]: destination = ctx.attrs.destination or "resources" resource_spec = AppleResourceSpec( @@ -18,6 +42,8 @@ def apple_resource_impl(ctx: AnalysisContext) -> list[Provider]: variant_files = ctx.attrs.variants or [], named_variant_files = ctx.attrs.named_variants or {}, codesign_files_on_copy = ctx.attrs.codesign_on_copy, + codesign_entitlements = ctx.attrs.codesign_entitlements, + codesign_flags_override = ctx.attrs.codesign_flags_override, ) # `files` can contain `apple_library()` which in turn can have `apple_resource()` deps @@ -30,10 +56,13 @@ def apple_resource_impl(ctx: AnalysisContext) -> list[Provider]: exported_deps = [], resource_spec = resource_spec, ) + xcode_data_default_info, xcode_data_info = generate_xcode_data(ctx, "apple_resource", None, _xcode_populate_attributes) + return [DefaultInfo( sub_targets = { "headers": [ DefaultInfo(default_outputs = []), ], + XCODE_DATA_SUB_TARGET: xcode_data_default_info, }, - ), graph] + ), graph, xcode_data_info] diff --git a/prelude/apple/apple_resource_bundle.bzl b/prelude/apple/apple_resource_bundle.bzl index d32030728e84a..e014a9bd58dc9 100644 --- a/prelude/apple/apple_resource_bundle.bzl +++ b/prelude/apple/apple_resource_bundle.bzl @@ -38,11 +38,12 @@ # +------>| Binary |<--------+ # +-------------------+ -load("@prelude//apple:apple_bundle_attrs.bzl", "get_apple_info_plist_build_system_identification_attrs") +load("@prelude//apple:apple_rules_impl_utility.bzl", "get_apple_info_plist_build_system_identification_attrs") _RESOURCE_BUNDLE_FIELDS = [ "asset_catalogs_compilation_options", "binary", + "copy_public_framework_headers", "default_target_platform", "deps", "extension", @@ -50,7 +51,9 @@ _RESOURCE_BUNDLE_FIELDS = [ "ibtool_module_flag", "info_plist", "info_plist_substitutions", + "module_map", "product_name", + "privacy_manifest", "resource_group", "resource_group_map", "within_view", @@ -59,7 +62,8 @@ _RESOURCE_BUNDLE_FIELDS = [ def _is_resources_toolchain_enabled() -> bool: is_arvr_query_mode = read_root_config("fb", "arvr_query_mode") in ("True", "true") - if is_arvr_query_mode: + is_xplat_query_mode = read_root_config("mode", "is_xplat_mode_query") in ("True", "true") + if is_arvr_query_mode or is_xplat_query_mode: # Avoid returning buck2-only targets return False @@ -76,7 +80,10 @@ def make_resource_bundle_rule(apple_resource_bundle_rule, **kwargs) -> [None, st resource_bundle_name = kwargs["name"] + "__ResourceBundle_Private" resource_bundle_kwargs = { + "compatible_with": kwargs.get("compatible_with"), + "exec_compatible_with": kwargs.get("exec_compatible_with"), "labels": ["generated"], + "target_compatible_with": kwargs.get("target_compatible_with"), "_bundle_target_name": kwargs["name"], "_compile_resources_locally_override": kwargs["_compile_resources_locally_override"], } diff --git a/prelude/apple/apple_resource_dedupe_alias.bzl b/prelude/apple/apple_resource_dedupe_alias.bzl new file mode 100644 index 0000000000000..c7c7a0b845b2f --- /dev/null +++ b/prelude/apple/apple_resource_dedupe_alias.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_common.bzl", "apple_common") +load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") +load("@prelude//apple/user/apple_resource_transition.bzl", "apple_resource_transition") + +def _apple_resource_dedupe_alias_impl(ctx: AnalysisContext) -> list[Provider]: + return ctx.attrs.actual.providers + +registration_spec = RuleRegistrationSpec( + name = "apple_resource_dedupe_alias", + impl = _apple_resource_dedupe_alias_impl, + attrs = { + "actual": attrs.transition_dep(cfg = apple_resource_transition), + } | apple_common.skip_universal_resource_dedupe_arg(), +) diff --git a/prelude/apple/apple_resource_types.bzl b/prelude/apple/apple_resource_types.bzl index f0760596be773..d78994036a1c0 100644 --- a/prelude/apple/apple_resource_types.bzl +++ b/prelude/apple/apple_resource_types.bzl @@ -5,6 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:artifacts.bzl", "ArtifactOutputs") + # Represents the values for the `destination` field of `apple_resource` AppleResourceDestination = enum( "executables", @@ -26,10 +28,17 @@ AppleResourceSpec = record( # `{ "ru.lproj" : ["Localizable.strings"] }` named_variant_files = field(dict[str, list[Artifact]], {}), codesign_files_on_copy = field(bool, False), + codesign_entitlements = field(Artifact | None, None), + codesign_flags_override = field(list[str] | None, None), ) -# Used when invoking `ibtool`, `actool` and `momc` +# Used when invoking `ibtool`, `actool`, `mapc` and `momc` AppleResourceProcessingOptions = record( prefer_local = field(bool, False), + prefer_remote = field(bool, False), allow_cache_upload = field(bool, False), ) + +CxxResourceSpec = record( + resources = field(dict[str, ArtifactOutputs], {}), +) diff --git a/prelude/apple/apple_rules_impl.bzl b/prelude/apple/apple_rules_impl.bzl index bd46a30fcdece..b7f2a606466e6 100644 --- a/prelude/apple/apple_rules_impl.bzl +++ b/prelude/apple/apple_rules_impl.bzl @@ -5,20 +5,28 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:attrs_validators.bzl", "ATTRS_VALIDATORS_NAME", "ATTRS_VALIDATORS_TYPE") +load( + "@prelude//:validation_deps.bzl", + "VALIDATION_DEPS_ATTR_NAME", + "VALIDATION_DEPS_ATTR_TYPE", +) +load("@prelude//apple:apple_common.bzl", "apple_common") +load("@prelude//apple/swift:swift_incremental_support.bzl", "SwiftCompilationMode") load("@prelude//apple/swift:swift_toolchain.bzl", "swift_toolchain_impl") load("@prelude//apple/swift:swift_toolchain_types.bzl", "SwiftObjectFormat") -load("@prelude//apple/user:cpu_split_transition.bzl", "cpu_split_transition") -load("@prelude//cxx:headers.bzl", "CPrecompiledHeaderInfo") -load("@prelude//cxx/user:link_group_map.bzl", "link_group_map_attr") +load("@prelude//apple/user:apple_xcframework.bzl", "apple_xcframework_extra_attrs") +load("@prelude//cxx:headers.bzl", "CPrecompiledHeaderInfo", "HeaderMode") +load("@prelude//cxx:link_groups_types.bzl", "LINK_GROUP_MAP_ATTR") load("@prelude//linking:execution_preference.bzl", "link_execution_preference_attr") load("@prelude//linking:link_info.bzl", "LinkOrdering") -load("@prelude//decls/common.bzl", "Linkage") +load("@prelude//linking:types.bzl", "Linkage") load(":apple_asset_catalog.bzl", "apple_asset_catalog_impl") load(":apple_binary.bzl", "apple_binary_impl") load(":apple_bundle.bzl", "apple_bundle_impl") -load(":apple_bundle_types.bzl", "AppleBundleInfo") +load(":apple_bundle_types.bzl", "AppleBundleInfo", "ApplePackageExtension") load(":apple_core_data.bzl", "apple_core_data_impl") -load(":apple_library.bzl", "apple_library_impl") +load(":apple_library.bzl", "AppleSharedLibraryMachOFileType", "apple_library_impl") load(":apple_package.bzl", "apple_package_impl") load(":apple_package_config.bzl", "IpaCompressionLevel") load(":apple_resource.bzl", "apple_resource_impl") @@ -26,19 +34,19 @@ load( ":apple_rules_impl_utility.bzl", "APPLE_ARCHIVE_OBJECTS_LOCALLY_OVERRIDE_ATTR_NAME", "apple_bundle_extra_attrs", - "apple_test_extra_attrs", + "apple_dsymutil_attrs", + "apple_xcuitest_extra_attrs", "get_apple_toolchain_attr", "get_apple_xctoolchain_attr", "get_apple_xctoolchain_bundle_id_attr", + "get_enable_library_evolution", ) load(":apple_test.bzl", "apple_test_impl") load(":apple_toolchain.bzl", "apple_toolchain_impl") load(":apple_toolchain_types.bzl", "AppleToolsInfo") -load(":apple_universal_executable.bzl", "apple_universal_executable_impl") +load(":apple_xcuitest.bzl", "apple_xcuitest_impl") load(":prebuilt_apple_framework.bzl", "prebuilt_apple_framework_impl") load(":scene_kit_assets.bzl", "scene_kit_assets_impl") -load(":xcode_postbuild_script.bzl", "xcode_postbuild_script_impl") -load(":xcode_prebuild_script.bzl", "xcode_prebuild_script_impl") implemented_rules = { "apple_asset_catalog": apple_asset_catalog_impl, @@ -49,85 +57,111 @@ implemented_rules = { "apple_resource": apple_resource_impl, "apple_test": apple_test_impl, "apple_toolchain": apple_toolchain_impl, - "apple_universal_executable": apple_universal_executable_impl, + "apple_xcuitest": apple_xcuitest_impl, "core_data_model": apple_core_data_impl, "prebuilt_apple_framework": prebuilt_apple_framework_impl, "scene_kit_assets": scene_kit_assets_impl, "swift_toolchain": swift_toolchain_impl, - "xcode_postbuild_script": xcode_postbuild_script_impl, - "xcode_prebuild_script": xcode_prebuild_script_impl, } _APPLE_TOOLCHAIN_ATTR = get_apple_toolchain_attr() -ApplePackageExtension = enum( - "ipa", - "pkg", - "dmg", - "zip", -) - -extra_attributes = { - "apple_asset_catalog": { - "dirs": attrs.list(attrs.source(allow_directory = True), default = []), - }, - "apple_binary": { +def _apple_binary_extra_attrs(): + attribs = { "binary_linker_flags": attrs.list(attrs.arg(), default = []), + "dist_thin_lto_codegen_flags": attrs.list(attrs.arg(), default = []), "enable_distributed_thinlto": attrs.bool(default = False), + "enable_library_evolution": attrs.option(attrs.bool(), default = None), "extra_xcode_sources": attrs.list(attrs.source(allow_directory = True), default = []), "link_execution_preference": link_execution_preference_attr(), - "link_group_map": link_group_map_attr(), + "link_group_map": LINK_GROUP_MAP_ATTR, "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), "precompiled_header": attrs.option(attrs.dep(providers = [CPrecompiledHeaderInfo]), default = None), "prefer_stripped_objects": attrs.bool(default = False), - "preferred_linkage": attrs.enum(Linkage, default = "any"), + "preferred_linkage": attrs.enum(Linkage.values(), default = "any"), + "propagated_target_sdk_version": attrs.option(attrs.string(), default = None), + "sanitizer_runtime_enabled": attrs.option(attrs.bool(), default = None), "stripped": attrs.option(attrs.bool(), default = None), + "swift_compilation_mode": attrs.enum(SwiftCompilationMode.values(), default = "wmo"), + "swift_package_name": attrs.option(attrs.string(), default = None), "_apple_toolchain": _APPLE_TOOLCHAIN_ATTR, - # FIXME: prelude// should be standalone (not refer to fbsource//) - "_apple_tools": attrs.exec_dep(default = "fbsource//xplat/buck2/platform/apple:apple-tools", providers = [AppleToolsInfo]), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), "_apple_xctoolchain": get_apple_xctoolchain_attr(), "_apple_xctoolchain_bundle_id": get_apple_xctoolchain_bundle_id_attr(), + "_enable_library_evolution": get_enable_library_evolution(), "_stripped_default": attrs.bool(default = False), - }, - "apple_bundle": apple_bundle_extra_attrs(), - "apple_library": { + VALIDATION_DEPS_ATTR_NAME: VALIDATION_DEPS_ATTR_TYPE, + ATTRS_VALIDATORS_NAME: ATTRS_VALIDATORS_TYPE, + } + attribs.update(apple_dsymutil_attrs()) + return attribs + +def _apple_library_extra_attrs(): + attribs = { + "dist_thin_lto_codegen_flags": attrs.list(attrs.arg(), default = []), + "enable_distributed_thinlto": attrs.bool(default = False), + "enable_library_evolution": attrs.option(attrs.bool(), default = None), "extra_xcode_sources": attrs.list(attrs.source(allow_directory = True), default = []), + "header_mode": attrs.option(attrs.enum(HeaderMode.values()), default = None), "link_execution_preference": link_execution_preference_attr(), - "link_group_map": link_group_map_attr(), + "link_group_map": LINK_GROUP_MAP_ATTR, "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), "precompiled_header": attrs.option(attrs.dep(providers = [CPrecompiledHeaderInfo]), default = None), - "preferred_linkage": attrs.enum(Linkage, default = "any"), - "serialize_debugging_options": attrs.bool(default = True), + "preferred_linkage": attrs.enum(Linkage.values(), default = "any"), + "propagated_target_sdk_version": attrs.option(attrs.string(), default = None), + # Mach-O file type for binary when the target is built as a shared library. + "shared_library_macho_file_type": attrs.enum(AppleSharedLibraryMachOFileType.values(), default = "dylib"), "stripped": attrs.option(attrs.bool(), default = None), "supports_header_symlink_subtarget": attrs.bool(default = False), "supports_shlib_interfaces": attrs.bool(default = True), + "swift_compilation_mode": attrs.enum(SwiftCompilationMode.values(), default = "wmo"), + "swift_package_name": attrs.option(attrs.string(), default = None), "use_archive": attrs.option(attrs.bool(), default = None), "_apple_toolchain": _APPLE_TOOLCHAIN_ATTR, - # FIXME: prelude// should be standalone (not refer to fbsource//) - "_apple_tools": attrs.exec_dep(default = "fbsource//xplat/buck2/platform/apple:apple-tools", providers = [AppleToolsInfo]), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), "_apple_xctoolchain": get_apple_xctoolchain_attr(), "_apple_xctoolchain_bundle_id": get_apple_xctoolchain_bundle_id_attr(), + "_enable_library_evolution": get_enable_library_evolution(), "_stripped_default": attrs.bool(default = False), APPLE_ARCHIVE_OBJECTS_LOCALLY_OVERRIDE_ATTR_NAME: attrs.option(attrs.bool(), default = None), - }, + ATTRS_VALIDATORS_NAME: ATTRS_VALIDATORS_TYPE, + VALIDATION_DEPS_ATTR_NAME: VALIDATION_DEPS_ATTR_TYPE, + } + attribs.update(apple_dsymutil_attrs()) + return attribs + +extra_attributes = { + "apple_asset_catalog": { + "dirs": attrs.list(attrs.source(allow_directory = True), default = []), + } | apple_common.skip_universal_resource_dedupe_arg(), + "apple_binary": _apple_binary_extra_attrs(), + "apple_bundle": apple_bundle_extra_attrs(), + "apple_library": _apple_library_extra_attrs(), "apple_package": { "bundle": attrs.dep(providers = [AppleBundleInfo]), "ext": attrs.enum(ApplePackageExtension.values(), default = "ipa"), + "package_name": attrs.option(attrs.string(), default = None), "packager": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "packager_args": attrs.list(attrs.arg(), default = []), - "validator": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), - "_apple_toolchain": _APPLE_TOOLCHAIN_ATTR, - # FIXME: prelude// should be standalone (not refer to fbsource//) - "_apple_tools": attrs.exec_dep(default = "fbsource//xplat/buck2/platform/apple:apple-tools", providers = [AppleToolsInfo]), + "prepackaged_validators": attrs.list( + attrs.one_of( + attrs.exec_dep(providers = [RunInfo]), + attrs.tuple(attrs.exec_dep(providers = [RunInfo]), attrs.list(attrs.arg())), + ), + default = [], + ), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), "_ipa_compression_level": attrs.enum(IpaCompressionLevel.values()), + "_ipa_package": attrs.dep(), }, "apple_resource": { + "codesign_entitlements": attrs.option(attrs.source(), default = None), + "codesign_flags_override": attrs.option(attrs.list(attrs.string()), default = None), "codesign_on_copy": attrs.bool(default = False), "content_dirs": attrs.list(attrs.source(allow_directory = True), default = []), "dirs": attrs.list(attrs.source(allow_directory = True), default = []), "files": attrs.list(attrs.one_of(attrs.dep(), attrs.source()), default = []), - }, - "apple_test": apple_test_extra_attrs(), + } | apple_common.skip_universal_resource_dedupe_arg(), "apple_toolchain": { # The Buck v1 attribute specs defines those as `attrs.source()` but # we want to properly handle any runnable tools that might have @@ -136,7 +170,7 @@ extra_attributes = { "codesign": attrs.exec_dep(providers = [RunInfo]), "codesign_allocate": attrs.exec_dep(providers = [RunInfo]), "codesign_identities_command": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), - # Controls invocations of `ibtool`, `actool` and `momc` + # Controls invocations of `ibtool`, `actool` `mapc` and `momc` "compile_resources_locally": attrs.bool(default = False), "copy_scene_kit_assets": attrs.exec_dep(providers = [RunInfo]), "cxx_toolchain": attrs.toolchain_dep(), @@ -144,12 +178,13 @@ extra_attributes = { "dwarfdump": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "extra_linker_outputs": attrs.set(attrs.string(), default = []), "ibtool": attrs.exec_dep(providers = [RunInfo]), - "installer": attrs.default_only(attrs.label(default = "buck//src/com/facebook/buck/installer/apple:apple_installer")), + "installer": attrs.default_only(attrs.label(default = "fbsource//xplat/toolchains/android/sdk/src/com/facebook/buck/installer/apple:apple_installer")), "libtool": attrs.exec_dep(providers = [RunInfo]), "lipo": attrs.exec_dep(providers = [RunInfo]), - "min_version": attrs.option(attrs.string(), default = None), + "mapc": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), + "merge_index_store": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//apple/tools/index:merge_index_store")), "momc": attrs.exec_dep(providers = [RunInfo]), - "odrcov": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), + "objdump": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), # A placeholder tool that can be used to set up toolchain constraints. # Useful when fat and thin toolchahins share the same underlying tools via `command_alias()`, # which requires setting up separate platform-specific aliases with the correct constraints. @@ -172,31 +207,34 @@ extra_attributes = { # pass abs paths during development and using the currently selected Xcode. "_internal_sdk_path": attrs.option(attrs.string(), default = None), }, - "apple_universal_executable": { - "executable": attrs.split_transition_dep(cfg = cpu_split_transition), - "labels": attrs.list(attrs.string()), - "split_arch_dsym": attrs.bool(default = False), - "universal": attrs.option(attrs.bool(), default = None), - "_apple_toolchain": _APPLE_TOOLCHAIN_ATTR, - "_apple_tools": attrs.exec_dep(default = "fbsource//xplat/buck2/platform/apple:apple-tools", providers = [AppleToolsInfo]), - }, + "apple_xcframework": apple_xcframework_extra_attrs(), + "apple_xcuitest": apple_xcuitest_extra_attrs(), "core_data_model": { + "module": attrs.option(attrs.string(), default = None), "path": attrs.source(allow_directory = True), }, "prebuilt_apple_framework": { + "contains_swift": attrs.bool(default = False), + "dsyms": attrs.list(attrs.source(allow_directory = True), default = []), "framework": attrs.option(attrs.source(allow_directory = True), default = None), - "preferred_linkage": attrs.enum(Linkage, default = "any"), + "modular": attrs.bool(default = True), + "preferred_linkage": attrs.enum(Linkage.values(), default = "any"), + "sdk_modules": attrs.list(attrs.string(), default = []), + "stripped": attrs.option(attrs.bool(), default = None), "_apple_toolchain": _APPLE_TOOLCHAIN_ATTR, + "_apple_tools": attrs.default_only(attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo])), + "_stripped_default": attrs.bool(default = False), }, "scene_kit_assets": { "path": attrs.source(allow_directory = True), }, "swift_library": { - "preferred_linkage": attrs.enum(Linkage, default = "any"), + "preferred_linkage": attrs.enum(Linkage.values(), default = "any"), }, "swift_toolchain": { "architecture": attrs.option(attrs.string(), default = None), # TODO(T115173356): Make field non-optional "make_swift_comp_db": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//apple/tools:make_swift_comp_db")), + "make_swift_interface": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//apple/tools:make_swift_interface")), "object_format": attrs.enum(SwiftObjectFormat.values(), default = "object"), # A placeholder tool that can be used to set up toolchain constraints. # Useful when fat and thin toolchahins share the same underlying tools via `command_alias()`, @@ -205,6 +243,7 @@ extra_attributes = { "platform_path": attrs.option(attrs.source(), default = None), # Mark as optional until we remove `_internal_platform_path` "sdk_modules": attrs.list(attrs.exec_dep(), default = []), # A list or a root target that represent a graph of sdk modules (e.g Frameworks) "sdk_path": attrs.option(attrs.source(), default = None), # Mark as optional until we remove `_internal_sdk_path` + "swift_ide_test_tool": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "swift_stdlib_tool": attrs.exec_dep(providers = [RunInfo]), "swiftc": attrs.exec_dep(providers = [RunInfo]), # TODO(T111858757): Mirror of `platform_path` but treated as a string. It allows us to diff --git a/prelude/apple/apple_rules_impl_utility.bzl b/prelude/apple/apple_rules_impl_utility.bzl index 0069e25bf4f64..1447d93178634 100644 --- a/prelude/apple/apple_rules_impl_utility.bzl +++ b/prelude/apple/apple_rules_impl_utility.bzl @@ -5,23 +5,25 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//apple:apple_bundle_attrs.bzl", "get_apple_info_plist_build_system_identification_attrs") -load("@prelude//apple:apple_bundle_types.bzl", "AppleBundleResourceInfo") -load("@prelude//apple:apple_code_signing_types.bzl", "CodeSignType") +load("@prelude//:attrs_validators.bzl", "ATTRS_VALIDATORS_NAME", "ATTRS_VALIDATORS_TYPE") +load("@prelude//apple:apple_bundle_types.bzl", "AppleBundleResourceInfo", "AppleBundleTypeAttributeType") +load("@prelude//apple:apple_code_signing_types.bzl", "CodeSignConfiguration", "CodeSignType") +load("@prelude//apple:apple_common.bzl", "apple_common") load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") +load("@prelude//apple:resource_groups.bzl", "RESOURCE_GROUP_MAP_ATTR") +load("@prelude//apple/swift:swift_incremental_support.bzl", "SwiftCompilationMode") load("@prelude//apple/user:apple_selective_debugging.bzl", "AppleSelectiveDebuggingInfo") load("@prelude//apple/user:cpu_split_transition.bzl", "cpu_split_transition") -load("@prelude//apple/user:resource_group_map.bzl", "resource_group_map_attr") load("@prelude//cxx:headers.bzl", "CPrecompiledHeaderInfo") load("@prelude//linking:execution_preference.bzl", "link_execution_preference_attr") load("@prelude//linking:link_info.bzl", "LinkOrdering") -load("@prelude//decls/common.bzl", "LinkableDepType", "Linkage") +load("@prelude//utils/clear_platform.bzl", "clear_platform_transition") def get_apple_toolchain_attr(): # FIXME: prelude// should be standalone (not refer to fbcode//) return attrs.toolchain_dep(default = "fbcode//buck2/platform/toolchain:apple-default", providers = [AppleToolchainInfo]) -def _get_apple_bundle_toolchain_attr(): +def get_apple_bundle_toolchain_attr(): # FIXME: prelude// should be standalone (not refer to fbcode//) return attrs.toolchain_dep(default = "fbcode//buck2/platform/toolchain:apple-bundle", providers = [AppleToolchainInfo]) @@ -33,34 +35,88 @@ def get_apple_xctoolchain_bundle_id_attr(): # FIXME: prelude// should be standalone (not refer to fbcode//) return attrs.toolchain_dep(default = "fbcode//buck2/platform/toolchain:apple-xctoolchain-bundle-id") +def get_enable_library_evolution(): + return attrs.bool(default = select({ + "DEFAULT": False, + "config//features/apple:swift_library_evolution_enabled": True, + })) + +def _get_enable_dsym_uses_parallel_linker(): + return attrs.bool(default = select({ + "DEFAULT": False, + "config//features/apple:dsym_uses_parallel_linker_enabled": True, + })) + +def _strict_provisioning_profile_search_default_attr(): + default_value = (read_root_config("apple", "strict_provisioning_profile_search", "true").lower() == "true") + return attrs.bool(default = select({ + "DEFAULT": default_value, + "config//features/apple:strict_provisioning_profile_search_enabled": True, + })) + +def _fast_adhoc_signing_enabled_default_attr(): + return attrs.bool(default = select({ + "DEFAULT": True, + "config//features/apple:fast_adhoc_signing_disabled": False, + "config//features/apple:fast_adhoc_signing_enabled": True, + })) + APPLE_ARCHIVE_OBJECTS_LOCALLY_OVERRIDE_ATTR_NAME = "_archive_objects_locally_override" APPLE_USE_ENTITLEMENTS_WHEN_ADHOC_CODE_SIGNING_CONFIG_OVERRIDE_ATTR_NAME = "_use_entitlements_when_adhoc_code_signing" APPLE_USE_ENTITLEMENTS_WHEN_ADHOC_CODE_SIGNING_ATTR_NAME = "use_entitlements_when_adhoc_code_signing" +APPLE_EMBED_PROVISIONING_PROFILE_WHEN_ADHOC_CODE_SIGNING_CONFIG_OVERRIDE_ATTR_NAME = "_embed_provisioning_profile_when_adhoc_code_signing" +APPLE_EMBED_PROVISIONING_PROFILE_WHEN_ADHOC_CODE_SIGNING_ATTR_NAME = "embed_provisioning_profile_when_adhoc_code_signing" + +APPLE_VALIDATION_DEPS_ATTR_NAME = "validation_deps" +APPLE_VALIDATION_DEPS_ATTR_TYPE = attrs.set(attrs.dep(), sorted = True, default = []) + +def apple_dsymutil_attrs(): + return { + "dsym_uses_parallel_linker": _get_enable_dsym_uses_parallel_linker(), + "_dsymutil_extra_flags": attrs.list(attrs.string()), + "_dsymutil_verify_dwarf": attrs.string(), + } + +def get_apple_info_plist_build_system_identification_attrs(): + return { + "info_plist_identify_build_system": attrs.option(attrs.bool(), default = None), + "_info_plist_identify_build_system_default": attrs.bool(default = False), + } def _apple_bundle_like_common_attrs(): # `apple_bundle()` and `apple_test()` share a common set of extra attrs attribs = { - # FIXME: prelude// should be standalone (not refer to fbsource//) - "_apple_tools": attrs.exec_dep(default = "fbsource//xplat/buck2/platform/apple:apple-tools", providers = [AppleToolsInfo]), + "codesign_type": attrs.option(attrs.enum(CodeSignType.values()), default = None), + "fast_adhoc_signing_enabled": attrs.option(attrs.bool(), default = None), + "provisioning_profile_filter": attrs.option(attrs.string(), default = None), + "strict_provisioning_profile_search": attrs.option(attrs.bool(), default = None), + "versioned_macos_bundle": attrs.bool(default = False), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), "_apple_xctoolchain": get_apple_xctoolchain_attr(), "_apple_xctoolchain_bundle_id": get_apple_xctoolchain_bundle_id_attr(), "_bundling_cache_buster": attrs.option(attrs.string(), default = None), "_bundling_log_file_enabled": attrs.bool(default = False), "_bundling_log_file_level": attrs.option(attrs.string(), default = None), - "_bundling_path_conflicts_check_enabled": attrs.bool(default = False), + "_code_signing_configuration": attrs.option(attrs.enum(CodeSignConfiguration.values()), default = None), + "_codesign_identities_command_override": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "_codesign_type": attrs.option(attrs.enum(CodeSignType.values()), default = None), "_compile_resources_locally_override": attrs.option(attrs.bool(), default = None), - "_dry_run_code_signing": attrs.bool(default = False), - "_fast_adhoc_signing_enabled": attrs.bool(default = False), + "_fast_adhoc_signing_enabled_default": _fast_adhoc_signing_enabled_default_attr(), + "_fast_provisioning_profile_parsing_enabled": attrs.bool(default = False), "_incremental_bundling_enabled": attrs.bool(default = False), "_profile_bundling_enabled": attrs.bool(default = False), # FIXME: prelude// should be standalone (not refer to fbsource//) "_provisioning_profiles": attrs.dep(default = "fbsource//xplat/buck2/platform/apple:provisioning_profiles"), "_resource_bundle": attrs.option(attrs.dep(providers = [AppleBundleResourceInfo]), default = None), + "_strict_provisioning_profile_search_default": _strict_provisioning_profile_search_default_attr(), APPLE_USE_ENTITLEMENTS_WHEN_ADHOC_CODE_SIGNING_CONFIG_OVERRIDE_ATTR_NAME: attrs.option(attrs.bool(), default = None), APPLE_USE_ENTITLEMENTS_WHEN_ADHOC_CODE_SIGNING_ATTR_NAME: attrs.bool(default = False), + APPLE_EMBED_PROVISIONING_PROFILE_WHEN_ADHOC_CODE_SIGNING_CONFIG_OVERRIDE_ATTR_NAME: attrs.option(attrs.bool(), default = None), + APPLE_EMBED_PROVISIONING_PROFILE_WHEN_ADHOC_CODE_SIGNING_ATTR_NAME: attrs.bool(default = False), + APPLE_VALIDATION_DEPS_ATTR_NAME: APPLE_VALIDATION_DEPS_ATTR_TYPE, } attribs.update(get_apple_info_plist_build_system_identification_attrs()) + attribs.update(apple_dsymutil_attrs()) return attribs def apple_test_extra_attrs(): @@ -68,41 +124,87 @@ def apple_test_extra_attrs(): # wrap this test library into an `apple_bundle`. Because of this, `apple_test` has attributes # from both `apple_library` and `apple_bundle`. attribs = { + ATTRS_VALIDATORS_NAME: ATTRS_VALIDATORS_TYPE, # Expected by `apple_bundle`, for `apple_test` this field is always None. "binary": attrs.option(attrs.dep(), default = None), + "enable_library_evolution": attrs.option(attrs.bool(), default = None), # The resulting test bundle should have .xctest extension. "extension": attrs.string(), "extra_xcode_sources": attrs.list(attrs.source(allow_directory = True), default = []), "link_execution_preference": link_execution_preference_attr(), "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), - # Used to create the shared test library. Any library deps whose `preferred_linkage` isn't "shared" will - # be treated as "static" deps and linked into the shared test library. - "link_style": attrs.enum(LinkableDepType, default = "static"), "precompiled_header": attrs.option(attrs.dep(providers = [CPrecompiledHeaderInfo]), default = None), - # The test source code and lib dependencies should be built into a shared library. - "preferred_linkage": attrs.enum(Linkage, default = "shared"), + "propagated_target_sdk_version": attrs.option(attrs.string(), default = None), # Expected by `apple_bundle`, for `apple_test` this field is always None. "resource_group": attrs.option(attrs.string(), default = None), # Expected by `apple_bundle`, for `apple_test` this field is always None. "resource_group_map": attrs.option(attrs.string(), default = None), + "sanitizer_runtime_enabled": attrs.option(attrs.bool(), default = None), "stripped": attrs.bool(default = False), + "swift_compilation_mode": attrs.enum(SwiftCompilationMode.values(), default = "wmo"), + "swift_package_name": attrs.option(attrs.string(), default = None), + "test_re_capabilities": attrs.option(attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), default = None, doc = """ + An optional dictionary with the RE capabilities for the test execution. + Overrides a default selection mechanism. + """), + "test_re_use_case": attrs.option(attrs.string(), default = None, doc = """ + An optional name of the RE use case for the test execution. + Overrides a default selection mechanism. + """), "_apple_toolchain": get_apple_toolchain_attr(), - "_ios_booted_simulator": attrs.default_only(attrs.dep(default = "fbsource//xplat/buck2/platform/apple:ios_booted_simulator", providers = [LocalResourceInfo])), - "_ios_unbooted_simulator": attrs.default_only(attrs.dep(default = "fbsource//xplat/buck2/platform/apple:ios_unbooted_simulator", providers = [LocalResourceInfo])), - "_macos_idb_companion": attrs.default_only(attrs.dep(default = "fbsource//xplat/buck2/platform/apple:macos_idb_companion", providers = [LocalResourceInfo])), + "_enable_library_evolution": get_enable_library_evolution(), + "_ios_booted_simulator": attrs.transition_dep(cfg = clear_platform_transition, default = "fbsource//xplat/buck2/platform/apple:ios_booted_simulator", providers = [LocalResourceInfo]), + "_ios_unbooted_simulator": attrs.transition_dep(cfg = clear_platform_transition, default = "fbsource//xplat/buck2/platform/apple:ios_unbooted_simulator", providers = [LocalResourceInfo]), + "_macos_idb_companion": attrs.transition_dep(cfg = clear_platform_transition, default = "fbsource//xplat/buck2/platform/apple:macos_idb_companion", providers = [LocalResourceInfo]), } attribs.update(_apple_bundle_like_common_attrs()) return attribs +def apple_xcuitest_extra_attrs(): + attribs = { + # This is ignored, but required for info plist processing. + "binary": attrs.option(attrs.source(), default = None), + "codesign_identity": attrs.option(attrs.string(), default = None), + "enable_library_evolution": attrs.option(attrs.bool(), default = None), + "entitlements_file": attrs.option(attrs.source(), default = None), + "extension": attrs.default_only(attrs.string(default = "app")), + "incremental_bundling_enabled": attrs.bool(default = False), + "info_plist": attrs.source(), + "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "target_sdk_version": attrs.option(attrs.string(), default = None), + # The test bundle to package in the UI test runner app. + "test_bundle": attrs.dep(), + "_apple_toolchain": get_apple_toolchain_attr(), + "_enable_library_evolution": get_enable_library_evolution(), + } + attribs.update(_apple_bundle_like_common_attrs()) + attribs.pop("_dsymutil_extra_flags", None) + attribs.pop("_dsymutil_verify_dwarf", None) + + return attribs + +def _embed_xctest_frameworks_default_value(): + return select({ + "DEFAULT": False, + # Xcode copies XCTest frameworks to test host apps, required when the + # selected Xcode version != Xcode version used to build an app under test + "config//marker/apple/constraints:embed_xctest_frameworks_enabled": True, + }) + def apple_bundle_extra_attrs(): attribs = { "binary": attrs.option(attrs.split_transition_dep(cfg = cpu_split_transition), default = None), - "resource_group_map": resource_group_map_attr(), + "bundle_type": attrs.option(attrs.enum(AppleBundleTypeAttributeType.values()), default = None), + "copy_public_framework_headers": attrs.option(attrs.bool(), default = None), + "embed_xctest_frameworks": attrs.bool(default = _embed_xctest_frameworks_default_value()), + "module_map": attrs.option(attrs.source(), default = None), + "propagated_target_sdk_version": attrs.option(attrs.string(), default = None), + "resource_group_map": RESOURCE_GROUP_MAP_ATTR, "selective_debugging": attrs.option(attrs.dep(providers = [AppleSelectiveDebuggingInfo]), default = None), "split_arch_dsym": attrs.bool(default = False), "universal": attrs.option(attrs.bool(), default = None), - "_apple_toolchain": _get_apple_bundle_toolchain_attr(), + "_apple_toolchain": get_apple_bundle_toolchain_attr(), "_codesign_entitlements": attrs.option(attrs.source(), default = None), - } + } | apple_common.debug_artifacts_validators_arg() attribs.update(_apple_bundle_like_common_attrs()) return attribs diff --git a/prelude/apple/apple_static_archive.bzl b/prelude/apple/apple_static_archive.bzl new file mode 100644 index 0000000000000..e43c4575caa13 --- /dev/null +++ b/prelude/apple/apple_static_archive.bzl @@ -0,0 +1,119 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:artifact_tset.bzl", "make_artifact_tset") +load("@prelude//:validation_deps.bzl", "VALIDATION_DEPS_ATTR_NAME", "VALIDATION_DEPS_ATTR_TYPE", "get_validation_deps_outputs") +load("@prelude//apple:apple_library.bzl", "AppleLibraryForDistributionInfo") +load("@prelude//apple:apple_library_types.bzl", "AppleLibraryInfo") +load("@prelude//apple:apple_rules_impl_utility.bzl", "get_apple_toolchain_attr") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") +load("@prelude//linking:link_info.bzl", "LinkStrategy", "get_link_args_for_strategy", "unpack_link_args") +load("@prelude//linking:linkables.bzl", "linkables") +load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") +load("@prelude//utils:arglike.bzl", "ArgLike") + +def _apple_static_archive_impl(ctx: AnalysisContext) -> list[Provider]: + libtool = ctx.attrs._apple_toolchain[AppleToolchainInfo].libtool + archive_name = ctx.attrs.name if ctx.attrs.archive_name == None else ctx.attrs.archive_name + output = ctx.actions.declare_output(archive_name) + + artifacts = _get_static_link_args(ctx) + validation_deps_outputs = get_validation_deps_outputs(ctx) + + #TODO(T193127271): Support thin archives + cmd = cmd_args([libtool, "-static", "-o", output.as_output(), artifacts], hidden = validation_deps_outputs or []) + ctx.actions.run(cmd, category = "libtool", identifier = output.short_path) + + providers = [DefaultInfo(default_output = output), _get_apple_library_info(ctx)] + _get_apple_library_for_distribution_info(ctx) + + return providers + +def _get_apple_library_for_distribution_info(ctx: AnalysisContext) -> list[AppleLibraryForDistributionInfo]: + if ctx.attrs.distribution_flat_dep != None: + apple_library_for_distribution = ctx.attrs.distribution_flat_dep.get(AppleLibraryForDistributionInfo) + if apple_library_for_distribution != None: + return [apple_library_for_distribution] + return [] + +def _get_apple_library_info(ctx: AnalysisContext) -> AppleLibraryInfo: + all_flat_deps = filter(None, ctx.attrs.flat_deps + [ctx.attrs.distribution_flat_dep]) + flat_apple_library_infos = filter(None, [dep.get(AppleLibraryInfo) for dep in all_flat_deps]) + flat_public_framework_headers = [] + for apple_library_info in flat_apple_library_infos: + tset = apple_library_info.public_framework_headers._tset + if tset != None: + for headers in tset.value: + flat_public_framework_headers += headers.artifacts + + flat_header_tset = make_artifact_tset( + actions = ctx.actions, + label = ctx.label, + artifacts = flat_public_framework_headers, + ) + + apple_library_infos = filter(None, [dep.get(AppleLibraryInfo) for dep in ctx.attrs.deps]) + public_framework_header_tset = make_artifact_tset( + actions = ctx.actions, + label = ctx.label, + children = [apple_library.public_framework_headers for apple_library in apple_library_infos] + [flat_header_tset], + ) + + swift_header = None + if ctx.attrs.distribution_flat_dep != None: + distribution_flat_dep_apple_library_info = ctx.attrs.distribution_flat_dep.get(AppleLibraryInfo) + if distribution_flat_dep_apple_library_info: + swift_header = distribution_flat_dep_apple_library_info.swift_header + + return AppleLibraryInfo( + public_framework_headers = public_framework_header_tset, + swift_header = swift_header, + target = ctx.label, + labels = ctx.attrs.labels, + ) + +def _get_static_link_args(ctx: AnalysisContext) -> list[ArgLike]: + args = [] + + for dep in ctx.attrs.flat_deps: + default_info = dep.get(DefaultInfo) + if default_info == None: + continue + default_outputs = default_info.default_outputs + if len(default_outputs) > 0: + args.append(default_outputs[0]) + + if ctx.attrs.distribution_flat_dep: + default_info = ctx.attrs.distribution_flat_dep.get(DefaultInfo) + if default_info != None: + default_outputs = default_info.default_outputs + if len(default_outputs) > 0: + args.append(default_outputs[0]) + + args = dedupe(args) + + transitive_link_args = get_link_args_for_strategy( + ctx, + [x.merged_link_info for x in linkables(ctx.attrs.deps)], + LinkStrategy("static"), + ) + args.append(unpack_link_args(transitive_link_args)) + + return args + +registration_spec = RuleRegistrationSpec( + name = "apple_static_archive", + impl = _apple_static_archive_impl, + attrs = { + "archive_name": attrs.option(attrs.string(), default = None), + "deps": attrs.list(attrs.dep(), default = []), + "distribution_flat_dep": attrs.option(attrs.dep(), default = None), + "flat_deps": attrs.list(attrs.dep(), default = []), + "labels": attrs.list(attrs.string(), default = []), + VALIDATION_DEPS_ATTR_NAME: VALIDATION_DEPS_ATTR_TYPE, + "_apple_toolchain": get_apple_toolchain_attr(), + }, +) diff --git a/prelude/apple/apple_target_sdk_version.bzl b/prelude/apple/apple_target_sdk_version.bzl index 4b1290384efca..e3b0ca339c792 100644 --- a/prelude/apple/apple_target_sdk_version.bzl +++ b/prelude/apple/apple_target_sdk_version.bzl @@ -5,76 +5,70 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") -load("@prelude//cxx:preprocessor.bzl", "CPreprocessor", "CPreprocessorArgs") -load(":apple_sdk.bzl", "get_apple_sdk_name") +load("@prelude//cxx:target_sdk_version.bzl", "get_target_sdk_version") -# TODO(T112099448): In the future, the min version flag should live on the apple_toolchain() -# TODO(T113776898): Switch to -mtargetos= flag which should live on the apple_toolchain() -_APPLE_MIN_VERSION_FLAG_SDK_MAP = { - "iphoneos": "-mios-version-min", - "iphonesimulator": "-mios-simulator-version-min", - "maccatalyst": "-mios-version-min", # Catalyst uses iOS min version flags - "macosx": "-mmacosx-version-min", - "watchos": "-mwatchos-version-min", - "watchsimulator": "-mwatchsimulator-version-min", +_MACCATALYST_IOS_TO_MACOS_VERSION_MAP = { + "13.0": "10.15", # Catalina + "13.1": "10.15", + "13.2": "10.15.1", + "13.3": "10.15.2", + "13.4": "10.15.4", + "13.5": "10.15.5", + "13.6": "10.15.5", # Xcode reported 10.15 + "14.0": "11.0", # Big Sur + "14.1": "11.0", + "14.2": "11.0", + "14.3": "11.1", + "14.4": "11.2", + "14.5": "11.3", + "14.6": "11.4", + "14.7": "11.5", + "15.0": "12.0", # Monterey + "15.1": "12.0", # Xcode reported 10.15 + "15.2": "12.1", + "15.3": "12.2", + "15.4": "12.3", + "15.5": "12.4", + "15.6": "12.5", + "16.0": "13.0", # Ventura + "16.1": "13.0", + "16.2": "13.1", + "16.3": "13.2", + "16.4": "13.3", + "16.5": "13.4", + "16.6": "13.5", + "17.0": "14.0", # Sonoma + "17.1": "14.1", + "17.2": "14.2", + "17.3": "14.3", + "17.4": "14.4", + "17.5": "14.5", + "18.0": "15.0", # Sequoia + "18.1": "15.1", + "18.2": "15.2", } -# Returns the target SDK version for apple_(binary|library) and uses -# apple_toolchain() min version as a fallback. This is the central place -# where the version for a particular node is defined, no other places -# should be accessing `attrs.target_sdk_version` or `attrs.min_version`. -def get_min_deployment_version_for_node(ctx: AnalysisContext) -> [None, str]: - toolchain_min_version = ctx.attrs._apple_toolchain[AppleToolchainInfo].min_version - if toolchain_min_version == "": - toolchain_min_version = None - return getattr(ctx.attrs, "target_sdk_version", None) or toolchain_min_version - -# Returns the min deployment flag to pass to the compiler + linker -def _get_min_deployment_version_target_flag(ctx: AnalysisContext) -> [None, str]: - target_sdk_version = get_min_deployment_version_for_node(ctx) - if target_sdk_version == None: - return None - - sdk_name = get_apple_sdk_name(ctx) - min_version_flag = _APPLE_MIN_VERSION_FLAG_SDK_MAP.get(sdk_name) - if min_version_flag == None: - fail("Could not determine min version flag for SDK {}".format(sdk_name)) +_SDK_NAME_TO_PLATFORM_NAME_OVERRIDE_MAP = { + "maccatalyst": "macosx", +} - return "{}={}".format(min_version_flag, target_sdk_version) +def get_platform_version_for_sdk_version(sdk_name: str, sdk_version: str) -> str: + if sdk_name == "maccatalyst": + macos_version = _MACCATALYST_IOS_TO_MACOS_VERSION_MAP.get(sdk_version, None) + if macos_version == None: + fail("No macos version for maccatalyst version {}".format(sdk_version)) + return macos_version -# There are two main ways in which we can pass target SDK version: -# - versioned target triple -# - unversioned target triple + version flag -# -# A versioned target triple overrides any version flags and requires -# additional flags to disable the warning/error (`-Woverriding-t-option`), -# so we prefer to use an unversioned target triple + version flag. -# -# Furthermore, we want to ensure that there's _exactly one_ version flag -# on a compiler/link line. This makes debugging easier and avoids issues -# with multiple layers each adding/overriding target SDK. It also makes -# it easier to switch to versioned target triple. -# -# There are exactly two ways in which to specify the target SDK: -# - apple_toolchain.min_version sets the default value -# - apple_(binary|library).target_sdk_version sets the per-target value -# -# apple_toolchain() rules should _never_ add any version flags because -# the rule does _not_ know whether a particular target will request a -# non-default value. Otherwise, we end up with multiple version flags, -# one added by the toolchain and then additional overrides by targets. + return sdk_version -def get_min_deployment_version_target_linker_flags(ctx: AnalysisContext) -> list[str]: - min_version_flag = _get_min_deployment_version_target_flag(ctx) - return [min_version_flag] if min_version_flag != None else [] +def get_platform_name_for_sdk(sdk_name: str) -> str: + return _SDK_NAME_TO_PLATFORM_NAME_OVERRIDE_MAP.get(sdk_name, sdk_name) -def get_min_deployment_version_target_preprocessor_flags(ctx: AnalysisContext) -> list[CPreprocessor]: - min_version_flag = _get_min_deployment_version_target_flag(ctx) - if min_version_flag == None: - return [] +# Returns the target_sdk_version specified for this build, falling +# back to the toolchain version when unset. +def get_min_deployment_version_for_node(ctx: AnalysisContext) -> str: + version = get_target_sdk_version(ctx) + if version == None: + fail("No target_sdk_version set on target or toolchain") - args = cmd_args(min_version_flag) - return [CPreprocessor( - relative_args = CPreprocessorArgs(args = [args]), - )] + return version diff --git a/prelude/apple/apple_test.bzl b/prelude/apple/apple_test.bzl index ac700440b3708..4c87390412dd5 100644 --- a/prelude/apple/apple_test.bzl +++ b/prelude/apple/apple_test.bzl @@ -5,55 +5,61 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//:paths.bzl", "paths") +load( + "@prelude//:artifact_tset.bzl", + "project_artifacts", +) load("@prelude//apple:apple_library.bzl", "AppleLibraryAdditionalParams", "apple_library_rule_constructor_params_and_swift_providers") load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") -# @oss-disable: load("@prelude//apple/meta_only:apple_test_re_capabilities.bzl", "apple_test_re_capabilities") +load("@prelude//apple:apple_xctest_frameworks_utility.bzl", "get_xctest_frameworks_bundle_parts") +# @oss-disable: load("@prelude//apple/meta_only:apple_test_re_capabilities.bzl", "ios_test_re_capabilities", "macos_test_re_capabilities") # @oss-disable: load("@prelude//apple/meta_only:apple_test_re_use_case.bzl", "apple_test_re_use_case") load("@prelude//apple/swift:swift_compilation.bzl", "get_swift_anonymous_targets", "uses_explicit_modules") load( "@prelude//cxx:argsfiles.bzl", "CompileArgsfile", # @unused Used as a type ) +load("@prelude//cxx:cxx_library.bzl", "cxx_library_parameterized") load( - "@prelude//cxx:compile.bzl", + "@prelude//cxx:cxx_sources.bzl", "CxxSrcWithFlags", # @unused Used as a type ) -load("@prelude//cxx:cxx_library.bzl", "cxx_library_parameterized") load("@prelude//cxx:cxx_types.bzl", "CxxRuleProviderParams", "CxxRuleSubTargetParams") load( "@prelude//cxx:linker.bzl", "SharedLibraryFlagOverrides", ) +load("@prelude//ide_integrations:xcode.bzl", "XcodeDataInfoKeys") load( "@prelude//utils:dicts.bzl", "flatten_x", ) -load( - "@prelude//utils:utils.bzl", - "expect", -) +load("@prelude//utils:expect.bzl", "expect") load(":apple_bundle.bzl", "AppleBundlePartListConstructorParams", "get_apple_bundle_part_list") load(":apple_bundle_destination.bzl", "AppleBundleDestination", "bundle_relative_path_for_destination") load(":apple_bundle_part.bzl", "AppleBundlePart", "SwiftStdlibArguments", "assemble_bundle", "bundle_output", "get_apple_bundle_part_relative_destination_path", "get_bundle_dir_name") load(":apple_bundle_types.bzl", "AppleBundleInfo") load(":apple_bundle_utility.bzl", "get_product_name") load(":apple_dsym.bzl", "DSYM_SUBTARGET", "DWARF_AND_DSYM_SUBTARGET", "get_apple_dsym") +load(":apple_entitlements.bzl", "entitlements_link_flags") load(":apple_sdk.bzl", "get_apple_sdk_name") load( ":apple_sdk_metadata.bzl", "MacOSXSdkMetadata", ) -load(":debug.bzl", "DEBUGINFO_SUBTARGET") +load(":debug.bzl", "AppleDebuggableInfo") load(":xcode.bzl", "apple_populate_xcode_attributes") load(":xctest_swift_support.bzl", "XCTestSwiftSupportInfo") +_XCTOOLCHAIN_SUB_TARGET = "xctoolchain" + def apple_test_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: def get_apple_test_providers(deps_providers) -> list[Provider]: xctest_bundle = bundle_output(ctx) test_host_app_bundle = _get_test_host_app_bundle(ctx) test_host_app_binary = _get_test_host_app_binary(ctx, test_host_app_bundle) + ui_test_target_app_bundle = _get_ui_test_target_app_bundle(ctx) objc_bridging_header_flags = [ # Disable bridging header -> PCH compilation to mitigate an issue in Xcode 13 beta. @@ -62,6 +68,16 @@ def apple_test_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: cmd_args(ctx.attrs.bridging_header), ] if ctx.attrs.bridging_header else [] + shared_library_flags = ["-bundle"] + + # Embedding entitlements (if present) means that we can skip adhoc codesigning + # any xctests altogether, provided the test dylib is adhoc signed + shared_library_flags += entitlements_link_flags(ctx) + + # The linker will include adhoc signature for ARM64 by default, lets + # ensure we always have an adhoc signature regardless of arch/linker logic. + shared_library_flags += ["-Wl,-adhoc_codesign"] + constructor_params = apple_library_rule_constructor_params_and_swift_providers( ctx, AppleLibraryAdditionalParams( @@ -73,7 +89,7 @@ def apple_test_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: shared_library_name_linker_flags_format = [], # When building Apple tests, we want to link with `-bundle` instead of `-shared` to allow # linking against the bundle loader. - shared_library_flags = ["-bundle"], + shared_library_flags = shared_library_flags, ), generate_sub_targets = CxxRuleSubTargetParams( compilation_database = True, @@ -84,7 +100,7 @@ def apple_test_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: compilation_database = True, default = False, linkable_graph = False, - link_style_outputs = False, + link_style_outputs = True, merged_native_link_info = False, omnibus_root = False, preprocessors = False, @@ -103,7 +119,11 @@ def apple_test_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: ) cxx_library_output = cxx_library_parameterized(ctx, constructor_params) - test_binary_output = ctx.actions.declare_output(get_product_name(ctx)) + + # Locate the temporary binary that is bundled into the xctest in a binaries directory. When Xcode loads the test out of the target's output dir, + # it will utilize a binary with the test name from the output dir instead of the xctest bundle. Which then results in paths to test resources + # being incorrect. Locating the temporary binary elsewhere works around this issue. + test_binary_output = ctx.actions.declare_output("__binaries__", get_product_name(ctx)) # Rename in order to generate dSYM with correct binary name (dsymutil doesn't provide a way to control binary name in output dSYM bundle). test_binary = ctx.actions.copy_file(test_binary_output, cxx_library_output.default_output.default) @@ -112,55 +132,88 @@ def apple_test_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: part_list_output = get_apple_bundle_part_list(ctx, AppleBundlePartListConstructorParams(binaries = [binary_part])) xctest_swift_support_needed = None + debug_info = None + cxx_providers = [] for p in cxx_library_output.providers: if isinstance(p, XCTestSwiftSupportInfo): xctest_swift_support_needed = p.support_needed - break + elif isinstance(p, AppleDebuggableInfo): + debug_info = project_artifacts(ctx.actions, [p.debug_info_tset]) + elif isinstance(p, ValidationInfo): + cxx_providers.append(p) expect(xctest_swift_support_needed != None, "Expected `XCTestSwiftSupportInfo` provider to be present") - - bundle_parts = part_list_output.parts + _get_xctest_framework(ctx, xctest_swift_support_needed) + expect(debug_info != None, "Expected `AppleDebuggableInfo` provider to be present") + + bundle_parts = part_list_output.parts + if not ctx.attrs.embed_xctest_frameworks_in_test_host_app: + # The XCTest frameworks should only be embedded in a single place, + # either the test host (as per Xcode) or in the test itself + bundle_parts += get_xctest_frameworks_bundle_parts(ctx, xctest_swift_support_needed) + + for sanitizer_runtime_dylib in cxx_library_output.sanitizer_runtime_files: + frameworks_destination = AppleBundleDestination("frameworks") + bundle_parts.append( + AppleBundlePart( + source = sanitizer_runtime_dylib, + destination = frameworks_destination, + codesign_on_copy = True, + ), + ) primary_binary_rel_path = get_apple_bundle_part_relative_destination_path(ctx, binary_part) swift_stdlib_args = SwiftStdlibArguments(primary_binary_rel_path = primary_binary_rel_path) - sub_targets = assemble_bundle(ctx, xctest_bundle, bundle_parts, part_list_output.info_plist_part, swift_stdlib_args) - + bundle_result = assemble_bundle( + ctx, + xctest_bundle, + bundle_parts, + part_list_output.info_plist_part, + swift_stdlib_args, + # Adhoc signing can be skipped because the test executable is adhoc signed + # + includes any entitlements if present. + skip_adhoc_signing = True, + incremental_bundling_override = False, + ) + sub_targets = bundle_result.sub_targets sub_targets.update(cxx_library_output.sub_targets) - (debuginfo,) = sub_targets[DEBUGINFO_SUBTARGET] + dsym_artifact = get_apple_dsym( ctx = ctx, executable = test_binary, - debug_info = debuginfo.other_outputs, + debug_info = debug_info, action_identifier = "generate_apple_test_dsym", output_path_override = get_bundle_dir_name(ctx) + ".dSYM", ) sub_targets[DSYM_SUBTARGET] = [DefaultInfo(default_output = dsym_artifact)] - # If the test has a test host, add a subtarget to build the test host app bundle. + # If the test has a test host and a ui test target, add the subtargets to build the app bundles. sub_targets["test-host"] = [DefaultInfo(default_output = test_host_app_bundle)] if test_host_app_bundle else [DefaultInfo()] + sub_targets["ui-test-target"] = [DefaultInfo(default_output = ui_test_target_app_bundle)] if ui_test_target_app_bundle else [DefaultInfo()] sub_targets[DWARF_AND_DSYM_SUBTARGET] = [ - DefaultInfo(default_output = xctest_bundle, other_outputs = [dsym_artifact]), - _get_test_info(ctx, xctest_bundle, test_host_app_bundle, dsym_artifact), + DefaultInfo(default_output = xctest_bundle, other_outputs = [dsym_artifact], sub_targets = {_XCTOOLCHAIN_SUB_TARGET: ctx.attrs._apple_xctoolchain.providers}), + _get_test_info(ctx, xctest_bundle, test_host_app_bundle, dsym_artifact, ui_test_target_app_bundle), ] + sub_targets[_XCTOOLCHAIN_SUB_TARGET] = ctx.attrs._apple_xctoolchain.providers + return [ DefaultInfo(default_output = xctest_bundle, sub_targets = sub_targets), - _get_test_info(ctx, xctest_bundle, test_host_app_bundle), + _get_test_info(ctx, xctest_bundle, test_host_app_bundle, ui_test_target_app_bundle = ui_test_target_app_bundle), cxx_library_output.xcode_data_info, cxx_library_output.cxx_compilationdb_info, - ] + ] + bundle_result.providers + cxx_providers if uses_explicit_modules(ctx): return get_swift_anonymous_targets(ctx, get_apple_test_providers) else: return get_apple_test_providers([]) -def _get_test_info(ctx: AnalysisContext, xctest_bundle: Artifact, test_host_app_bundle: [Artifact, None], dsym_artifact: [Artifact, None] = None) -> Provider: +def _get_test_info(ctx: AnalysisContext, xctest_bundle: Artifact, test_host_app_bundle: Artifact | None, dsym_artifact: Artifact | None = None, ui_test_target_app_bundle: Artifact | None = None) -> Provider: # When interacting with Tpx, we just pass our various inputs via env vars, # since Tpx basiclaly wants structured output for this. - xctest_bundle = cmd_args(xctest_bundle).hidden(dsym_artifact) if dsym_artifact else xctest_bundle + xctest_bundle = cmd_args(xctest_bundle, hidden = dsym_artifact) if dsym_artifact else xctest_bundle env = {"XCTEST_BUNDLE": xctest_bundle} if test_host_app_bundle == None: @@ -169,24 +222,31 @@ def _get_test_info(ctx: AnalysisContext, xctest_bundle: Artifact, test_host_app_ env["HOST_APP_BUNDLE"] = test_host_app_bundle tpx_label = "tpx:apple_test:buck2:appTest" + if ui_test_target_app_bundle != None: + env["TARGET_APP_BUNDLE"] = ui_test_target_app_bundle + tpx_label = "tpx:apple_test:buck2:uiTest" + labels = ctx.attrs.labels + [tpx_label] labels.append(tpx_label) sdk_name = get_apple_sdk_name(ctx) - if sdk_name == MacOSXSdkMetadata.name: - # It's not possible to execute macOS tests on RE yet - local_enabled = True - remote_enabled = False - remote_execution_properties = None - remote_execution_use_case = None + if ctx.attrs.test_re_capabilities: + remote_execution_properties = ctx.attrs.test_re_capabilities + + elif sdk_name == MacOSXSdkMetadata.name: + # @oss-disable: remote_execution_properties = macos_test_re_capabilities() + remote_execution_properties = None # @oss-enable + else: - local_enabled = False - remote_enabled = True # @oss-disable: requires_ios_booted_simulator = ctx.attrs.test_host_app != None or ctx.attrs.ui_test_target_app != None - # @oss-disable: remote_execution_properties = apple_test_re_capabilities(use_unbooted_simulator = not requires_ios_booted_simulator) - # @oss-disable: remote_execution_use_case = apple_test_re_use_case() + # @oss-disable: remote_execution_properties = ios_test_re_capabilities(use_unbooted_simulator = not requires_ios_booted_simulator) remote_execution_properties = None # @oss-enable - remote_execution_use_case = None # @oss-enable + + # @oss-disable: remote_execution_use_case = ctx.attrs.test_re_use_case or apple_test_re_use_case(macos_test = sdk_name == MacOSXSdkMetadata.name) + + remote_execution_use_case = None # @oss-enable + local_enabled = remote_execution_use_case == None + remote_enabled = remote_execution_use_case != None return ExternalRunnerTestInfo( type = "custom", # We inherit a label via the macro layer that overrides this. @@ -212,7 +272,7 @@ def _get_test_info(ctx: AnalysisContext, xctest_bundle: Artifact, test_host_app_ }, ) -def _get_test_host_app_bundle(ctx: AnalysisContext) -> [Artifact, None]: +def _get_test_host_app_bundle(ctx: AnalysisContext) -> Artifact | None: """ Get the bundle for the test host app, if one exists for this test. """ if ctx.attrs.test_host_app: # Copy the test host app bundle into test's output directory @@ -223,18 +283,29 @@ def _get_test_host_app_bundle(ctx: AnalysisContext) -> [Artifact, None]: return None -def _get_test_host_app_binary(ctx: AnalysisContext, test_host_app_bundle: [Artifact, None]) -> [cmd_args, None]: +def _get_test_host_app_binary(ctx: AnalysisContext, test_host_app_bundle: Artifact | None) -> [cmd_args, None]: """ Reference to the binary with the test host app bundle, if one exists for this test. Captures the bundle as an artifact in the cmd_args. """ if ctx.attrs.test_host_app == None: return None parts = [test_host_app_bundle] - rel_path = bundle_relative_path_for_destination(AppleBundleDestination("executables"), get_apple_sdk_name(ctx), ctx.attrs.extension) + rel_path = bundle_relative_path_for_destination(AppleBundleDestination("executables"), get_apple_sdk_name(ctx), ctx.attrs.extension, False) if len(rel_path) > 0: parts.append(rel_path) parts.append(ctx.attrs.test_host_app[AppleBundleInfo].binary_name) return cmd_args(parts, delimiter = "/") +def _get_ui_test_target_app_bundle(ctx: AnalysisContext) -> Artifact | None: + """ Get the bundle for the ui test target app, if one exists for this test. """ + if ctx.attrs.ui_test_target_app: + # Copy the ui test target app bundle into test's output directory + original_bundle = ctx.attrs.ui_test_target_app[AppleBundleInfo].bundle + ui_test_target_app_bundle = ctx.actions.declare_output(original_bundle.basename) + ctx.actions.copy_file(ui_test_target_app_bundle, original_bundle) + return ui_test_target_app_bundle + + return None + def _get_bundle_loader_flags(binary: [cmd_args, None]) -> list[typing.Any]: if binary: # During linking we need to link the test shared lib against the test host binary. The @@ -251,9 +322,14 @@ def _xcode_populate_attributes( test_host_app_binary: [cmd_args, None], **_kwargs) -> dict[str, typing.Any]: data = apple_populate_xcode_attributes(ctx = ctx, srcs = srcs, argsfiles = argsfiles, product_name = ctx.attrs.name) - data["output"] = xctest_bundle - if test_host_app_binary: - data["test_host_app_binary"] = test_host_app_binary + data[XcodeDataInfoKeys.OUTPUT] = xctest_bundle + if ctx.attrs.ui_test_target_app: + data[XcodeDataInfoKeys.TEST_TYPE] = "ui-test" + data[XcodeDataInfoKeys.TEST_TARGET] = ctx.attrs.ui_test_target_app.label.raw_target() + else: + data[XcodeDataInfoKeys.TEST_TYPE] = "unit-test" + if test_host_app_binary: + data[XcodeDataInfoKeys.TEST_HOST_APP_BINARY] = test_host_app_binary return data def _get_xctest_framework_search_paths(ctx: AnalysisContext) -> (cmd_args, cmd_args): @@ -279,28 +355,3 @@ def _get_xctest_framework_linker_flags(ctx: AnalysisContext) -> list[[cmd_args, "-F", xctest_framework_search_path, ] - -def _get_xctest_framework(ctx: AnalysisContext, swift_support_needed: bool) -> list[AppleBundlePart]: - swift_support = [ - _get_object_from_platform_path(ctx, "Developer/usr/lib/libXCTestSwiftSupport.dylib"), - ] if swift_support_needed else [] - return [ - _get_object_from_platform_path(ctx, "Developer/Library/Frameworks/XCTest.framework"), - _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCTAutomationSupport.framework"), - _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCTestCore.framework"), - _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCTestSupport.framework"), - _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCUIAutomation.framework"), - _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCUnit.framework"), - _get_object_from_platform_path(ctx, "Developer/usr/lib/libXCTestBundleInject.dylib"), - ] + swift_support - -def _get_object_from_platform_path(ctx: AnalysisContext, platform_relative_path: str) -> AppleBundlePart: - toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo] - copied_framework = ctx.actions.declare_output(paths.basename(platform_relative_path)) - - # We have to copy because: - # 1) Platform path might be a string (e.g. for Xcode toolchains) - # 2) It's not possible to project artifact which is not produced by different target (and platform path is a separate target for distributed toolchains). - ctx.actions.run(["cp", "-PR", cmd_args(toolchain.platform_path, platform_relative_path, delimiter = "/"), copied_framework.as_output()], category = "extract_framework", identifier = platform_relative_path) - - return AppleBundlePart(source = copied_framework, destination = AppleBundleDestination("frameworks"), codesign_on_copy = True) diff --git a/prelude/apple/apple_test_host_app_transition.bzl b/prelude/apple/apple_test_host_app_transition.bzl new file mode 100644 index 0000000000000..d239b018a3180 --- /dev/null +++ b/prelude/apple/apple_test_host_app_transition.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _apple_test_host_app_transition_impl(platform: PlatformInfo, refs: struct, attrs: struct) -> PlatformInfo: + if not attrs.embed_xctest_frameworks_in_test_host_app: + return platform + + updated_constraints = dict(platform.configuration.constraints) + + test_host_marker_setting_label = refs.embed_xctest_frameworks_constraint_setting[ConstraintSettingInfo].label + if test_host_marker_setting_label in updated_constraints: + return platform + + test_host_marker_value_info = refs.embed_xctest_frameworks_marker_constraint_value[ConstraintValueInfo] + updated_constraints[test_host_marker_setting_label] = test_host_marker_value_info + + return PlatformInfo( + label = platform.label + "-test-host-app", + configuration = ConfigurationInfo( + constraints = updated_constraints, + values = platform.configuration.values, + ), + ) + +apple_test_host_app_transition = transition( + impl = _apple_test_host_app_transition_impl, + refs = { + "embed_xctest_frameworks_constraint_setting": "config//marker/apple/constraints:embed_xctest_frameworks", + "embed_xctest_frameworks_marker_constraint_value": "config//marker/apple/constraints:embed_xctest_frameworks_enabled", + }, + attrs = [ + "embed_xctest_frameworks_in_test_host_app", + ], +) diff --git a/prelude/apple/apple_toolchain.bzl b/prelude/apple/apple_toolchain.bzl index 2f84d2506ce9c..e5c236b3fcf77 100644 --- a/prelude/apple/apple_toolchain.bzl +++ b/prelude/apple/apple_toolchain.bzl @@ -12,10 +12,12 @@ load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo", "CxxToolchainIn def apple_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: sdk_path = ctx.attrs._internal_sdk_path or ctx.attrs.sdk_path platform_path = ctx.attrs._internal_platform_path or ctx.attrs.platform_path + return [ DefaultInfo(), AppleToolchainInfo( actool = ctx.attrs.actool[RunInfo], + architecture = ctx.attrs.architecture, codesign = ctx.attrs.codesign[RunInfo], codesign_allocate = ctx.attrs.codesign_allocate[RunInfo], codesign_identities_command = ctx.attrs.codesign_identities_command[RunInfo] if ctx.attrs.codesign_identities_command else None, @@ -30,16 +32,16 @@ def apple_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: installer = ctx.attrs.installer, libtool = ctx.attrs.libtool[RunInfo], lipo = ctx.attrs.lipo[RunInfo], - min_version = ctx.attrs.min_version, + mapc = ctx.attrs.mapc[RunInfo] if ctx.attrs.mapc else None, + merge_index_store = ctx.attrs.merge_index_store[RunInfo], momc = ctx.attrs.momc[RunInfo], - odrcov = ctx.attrs.odrcov[RunInfo] if ctx.attrs.odrcov else None, + objdump = ctx.attrs.objdump[RunInfo] if ctx.attrs.objdump else None, platform_path = platform_path, sdk_build_version = ctx.attrs.build_version, sdk_name = ctx.attrs.sdk_name, sdk_path = sdk_path, sdk_version = ctx.attrs.version, swift_toolchain_info = ctx.attrs.swift_toolchain[SwiftToolchainInfo] if ctx.attrs.swift_toolchain else None, - watch_kit_stub_binary = ctx.attrs.watch_kit_stub_binary, xcode_build_version = ctx.attrs.xcode_build_version, xcode_version = ctx.attrs.xcode_version, xctest = ctx.attrs.xctest[RunInfo], diff --git a/prelude/apple/apple_toolchain_types.bzl b/prelude/apple/apple_toolchain_types.bzl index 0f7435a91ab47..d4a75136fb6e2 100644 --- a/prelude/apple/apple_toolchain_types.bzl +++ b/prelude/apple/apple_toolchain_types.bzl @@ -5,54 +5,59 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//apple/swift:swift_toolchain_types.bzl", "SwiftToolchainInfo") +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo", "CxxToolchainInfo") + AppleToolchainInfo = provider( # @unsorted-dict-items fields = { - "actool": provider_field(typing.Any, default = None), # "RunInfo" - "codesign_allocate": provider_field(typing.Any, default = None), # "RunInfo" - "codesign_identities_command": provider_field(typing.Any, default = None), # ["RunInfo", None] - "codesign": provider_field(typing.Any, default = None), # "RunInfo" - "compile_resources_locally": provider_field(typing.Any, default = None), # bool - "copy_scene_kit_assets": provider_field(typing.Any, default = None), # "RunInfo" - "cxx_platform_info": provider_field(typing.Any, default = None), # "CxxPlatformInfo" - "cxx_toolchain_info": provider_field(typing.Any, default = None), # "CxxToolchainInfo" - "dsymutil": provider_field(typing.Any, default = None), # "RunInfo" - "dwarfdump": provider_field(typing.Any, default = None), # ["RunInfo", None] - "extra_linker_outputs": provider_field(typing.Any, default = None), # [str] - "ibtool": provider_field(typing.Any, default = None), # "RunInfo" - "installer": provider_field(typing.Any, default = None), # label - "libtool": provider_field(typing.Any, default = None), # "RunInfo" - "lipo": provider_field(typing.Any, default = None), # "RunInfo" - "min_version": provider_field(typing.Any, default = None), # [None, str] - "momc": provider_field(typing.Any, default = None), # "RunInfo" - "odrcov": provider_field(typing.Any, default = None), # ["RunInfo", None] - "platform_path": provider_field(typing.Any, default = None), # [str, artifact] - "sdk_build_version": provider_field(typing.Any, default = None), # "[None, str]" + "actool": provider_field(RunInfo), + "architecture": provider_field(str), + "codesign_allocate": provider_field(RunInfo), + "codesign_identities_command": provider_field(RunInfo | None, default = None), + "codesign": provider_field(RunInfo), + "compile_resources_locally": provider_field(bool), + "copy_scene_kit_assets": provider_field(RunInfo), + "cxx_platform_info": provider_field(CxxPlatformInfo), + "cxx_toolchain_info": provider_field(CxxToolchainInfo), + "dsymutil": provider_field(RunInfo), + "dwarfdump": provider_field(RunInfo | None, default = None), + "extra_linker_outputs": provider_field(list[str]), + "ibtool": provider_field(RunInfo), + "installer": provider_field(Label), + "libtool": provider_field(RunInfo), + "lipo": provider_field(RunInfo), + "mapc": provider_field(RunInfo | None, default = None), + "merge_index_store": provider_field(RunInfo), + "momc": provider_field(RunInfo), + "objdump": provider_field(RunInfo | None, default = None), + "platform_path": provider_field(str | Artifact), + "sdk_build_version": provider_field(str | None, default = None), # SDK name to be passed to tools (e.g. actool), equivalent to ApplePlatform::getExternalName() in v1. - "sdk_name": provider_field(typing.Any, default = None), # str - "sdk_path": provider_field(typing.Any, default = None), # [str, artifact] + "sdk_name": provider_field(str), + "sdk_path": provider_field(str | Artifact), # TODO(T124581557) Make it non-optional once there is no "selected xcode" toolchain - "sdk_version": provider_field(typing.Any, default = None), # [None, str] - "swift_toolchain_info": provider_field(typing.Any, default = None), # "SwiftToolchainInfo" - "watch_kit_stub_binary": provider_field(typing.Any, default = None), # "artifact" - "xcode_build_version": provider_field(typing.Any, default = None), # "[None, str]" - "xcode_version": provider_field(typing.Any, default = None), # "[None, str]" - "xctest": provider_field(typing.Any, default = None), # "RunInfo" + "sdk_version": provider_field(str | None, default = None), + "swift_toolchain_info": provider_field(SwiftToolchainInfo), + "xcode_build_version": provider_field(str | None, default = None), + "xcode_version": provider_field(str | None, default = None), + "xctest": provider_field(RunInfo), }, ) AppleToolsInfo = provider( # @unsorted-dict-items fields = { - "assemble_bundle": provider_field(typing.Any, default = None), # RunInfo - "split_arch_combine_dsym_bundles_tool": provider_field(typing.Any, default = None), # RunInfo - "dry_codesign_tool": provider_field(typing.Any, default = None), # "RunInfo" - "adhoc_codesign_tool": provider_field(typing.Any, default = None), # "RunInfo" - "selective_debugging_scrubber": provider_field(typing.Any, default = None), # "RunInfo" - "info_plist_processor": provider_field(typing.Any, default = None), # RunInfo - "ipa_package_maker": provider_field(typing.Any, default = None), # RunInfo - "make_modulemap": provider_field(typing.Any, default = None), # "RunInfo" - "make_vfsoverlay": provider_field(typing.Any, default = None), # "RunInfo" - "swift_objc_header_postprocess": provider_field(typing.Any, default = None), # "RunInfo" + "assemble_bundle": provider_field(RunInfo), + "split_arch_combine_dsym_bundles_tool": provider_field(RunInfo), + "dry_codesign_tool": provider_field(RunInfo), + "adhoc_codesign_tool": provider_field(RunInfo), + "selective_debugging_scrubber": provider_field(RunInfo), + "info_plist_processor": provider_field(RunInfo), + "ipa_package_maker": provider_field(RunInfo), + "make_modulemap": provider_field(RunInfo), + "make_vfsoverlay": provider_field(RunInfo), + "framework_sanitizer": provider_field(RunInfo), + "xcframework_maker": provider_field(RunInfo), }, ) diff --git a/prelude/apple/apple_universal_binaries.bzl b/prelude/apple/apple_universal_binaries.bzl index 43c76fc80142c..d5c1a9286768a 100644 --- a/prelude/apple/apple_universal_binaries.bzl +++ b/prelude/apple/apple_universal_binaries.bzl @@ -11,32 +11,61 @@ load(":apple_bundle_types.bzl", "AppleBundleBinaryOutput") load(":apple_toolchain_types.bzl", "AppleToolsInfo") load(":debug.bzl", "AppleDebuggableInfo") -def create_universal_binary( +def get_universal_binary_name(ctx: AnalysisContext) -> str: + if ctx.attrs.executable_name: + return ctx.attrs.executable_name + binary_deps = ctx.attrs.executable + + # Because `binary_deps` is a split transition of the same target, + # the filenames would be identical, so we just pick the first one. + first_binary_dep = binary_deps.values()[0] + first_binary_artifact = first_binary_dep[DefaultInfo].default_outputs[0] + + # The universal executable should have the same name as the base/thin ones + return first_binary_artifact.short_path + +def lipo_binaries( ctx: AnalysisContext, binary_deps: dict[str, Dependency], binary_name: [str, None], - dsym_bundle_name: [str, None], - split_arch_dsym: bool) -> AppleBundleBinaryOutput: + lipo: RunInfo) -> Artifact: binary_output = ctx.actions.declare_output("UniversalBinary" if binary_name == None else binary_name, dir = False) - lipo_cmd = cmd_args([ctx.attrs._apple_toolchain[AppleToolchainInfo].lipo]) + lipo_cmd = [lipo] for (_, binary) in binary_deps.items(): - lipo_cmd.add(cmd_args(binary[DefaultInfo].default_outputs[0])) + lipo_cmd.append(cmd_args(binary[DefaultInfo].default_outputs[0])) + + lipo_cmd.extend(["-create", "-output", binary_output.as_output()]) + ctx.actions.run(cmd_args(lipo_cmd), category = "lipo") + + return binary_output - lipo_cmd.add(["-create", "-output", binary_output.as_output()]) - ctx.actions.run(lipo_cmd, category = "lipo") +def create_universal_binary( + ctx: AnalysisContext, + binary_deps: dict[str, Dependency], + binary_name: [str, None], + dsym_bundle_name: [str, None], + split_arch_dsym: bool) -> AppleBundleBinaryOutput: + binary_output = lipo_binaries(ctx, binary_deps, binary_name, ctx.attrs._apple_toolchain[AppleToolchainInfo].lipo) + + # Universal binaries can be created out of plain `cxx_binary()` / `cxx_library()` + # which lack the `AppleDebuggableInfo` provider. + # TODO(T174234334): Uniformly support debuggable info for apple_*/cxx_* + contains_full_debuggable_info = _all_binaries_have_apple_debuggable_info(binary_deps) dsym_output = None - if split_arch_dsym: + if split_arch_dsym and contains_full_debuggable_info: dsym_output = ctx.actions.declare_output("UniversalBinary.dSYM" if dsym_bundle_name == None else dsym_bundle_name, dir = True) - dsym_combine_cmd = cmd_args([ctx.attrs._apple_tools[AppleToolsInfo].split_arch_combine_dsym_bundles_tool]) + dsym_combine_cmd = [ctx.attrs._apple_tools[AppleToolsInfo].split_arch_combine_dsym_bundles_tool] for (arch, binary) in binary_deps.items(): - dsym_combine_cmd.add(["--dsym-bundle", cmd_args(binary.get(AppleDebuggableInfo).dsyms[0]), "--arch", arch]) - dsym_combine_cmd.add(["--output", dsym_output.as_output()]) - ctx.actions.run(dsym_combine_cmd, category = "universal_binaries_dsym") + dsym_combine_cmd.extend(["--dsym-bundle", cmd_args(binary.get(AppleDebuggableInfo).dsyms[0]), "--arch", arch]) + dsym_combine_cmd.extend(["--output", dsym_output.as_output()]) + ctx.actions.run(cmd_args(dsym_combine_cmd), category = "universal_binaries_dsym") - all_debug_info_tsets = [binary.get(AppleDebuggableInfo).debug_info_tset for binary in binary_deps.values()] + all_debug_info_tsets = [] + if contains_full_debuggable_info: + all_debug_info_tsets = [binary.get(AppleDebuggableInfo).debug_info_tset for binary in binary_deps.values()] return AppleBundleBinaryOutput( binary = binary_output, @@ -50,3 +79,10 @@ def create_universal_binary( ), ), ) + +def _all_binaries_have_apple_debuggable_info(binary_deps: dict[str, Dependency]) -> bool: + for binary in binary_deps.values(): + info = binary.get(AppleDebuggableInfo) + if info == None: + return False + return True diff --git a/prelude/apple/apple_universal_executable.bzl b/prelude/apple/apple_universal_executable.bzl index 80042e1613e07..0e06bcae8f175 100644 --- a/prelude/apple/apple_universal_executable.bzl +++ b/prelude/apple/apple_universal_executable.bzl @@ -13,8 +13,8 @@ load(":apple_bundle_types.bzl", "AppleBundleLinkerMapInfo", "AppleMinDeploymentV load(":apple_bundle_utility.bzl", "get_default_binary_dep", "get_flattened_binary_deps", "merge_bundle_linker_maps_info") load(":apple_code_signing_types.bzl", "AppleEntitlementsInfo") load(":apple_dsym.bzl", "DSYM_SUBTARGET", "get_apple_dsym_ext") -load(":apple_universal_binaries.bzl", "create_universal_binary") -load(":debug.bzl", "AppleDebuggableInfo") +load(":apple_universal_binaries.bzl", "create_universal_binary", "get_universal_binary_name") +load(":debug.bzl", "AppleDebuggableInfo", "DEBUGINFO_SUBTARGET") load(":resource_groups.bzl", "ResourceGraphInfo") _FORWARDED_PROVIDER_TYPES = [ @@ -28,25 +28,21 @@ _MERGED_PROVIDER_TYPES = [ AppleBundleLinkerMapInfo, ] -def _get_universal_binary_name(binary_deps: dict[str, Dependency]): - # Because `binary_deps` is a split transition of the same target, - # the filenames would be identical, so we just pick the first one. - first_binary_dep = binary_deps.values()[0] - first_binary_artifact = first_binary_dep[DefaultInfo].default_outputs[0] - - # The universal executable should have the same name as the base/thin ones - return first_binary_artifact.short_path - def apple_universal_executable_impl(ctx: AnalysisContext) -> list[Provider]: dsym_name = ctx.attrs.name + ".dSYM" binary_outputs = create_universal_binary( ctx = ctx, binary_deps = ctx.attrs.executable, - binary_name = _get_universal_binary_name(ctx.attrs.executable), + binary_name = get_universal_binary_name(ctx), dsym_bundle_name = dsym_name, split_arch_dsym = ctx.attrs.split_arch_dsym, ) + debug_info = project_artifacts( + actions = ctx.actions, + tsets = [binary_outputs.debuggable_info.debug_info_tset], + ) + sub_targets = {} if ctx.attrs.split_arch_dsym: dsyms = binary_outputs.debuggable_info.dsyms @@ -54,15 +50,19 @@ def apple_universal_executable_impl(ctx: AnalysisContext) -> list[Provider]: dsyms = [get_apple_dsym_ext( ctx = ctx, executable = binary_outputs.binary, - debug_info = project_artifacts( - actions = ctx.actions, - tsets = [binary_outputs.debuggable_info.debug_info_tset], - ), + debug_info = debug_info, action_identifier = ctx.attrs.name + "_dsym", output_path = dsym_name, )] sub_targets[DSYM_SUBTARGET] = [DefaultInfo(default_outputs = dsyms)] + debug_info_artifacts_manifest = ctx.actions.write( + "debuginfo.artifacts", + debug_info, + with_inputs = True, + ) + sub_targets[DEBUGINFO_SUBTARGET] = [DefaultInfo(default_output = debug_info_artifacts_manifest)] + default_binary = get_default_binary_dep(ctx.attrs.executable) forwarded_providers = [] for forward_provider_type in _FORWARDED_PROVIDER_TYPES: diff --git a/prelude/apple/apple_utility.bzl b/prelude/apple/apple_utility.bzl index 81fcb867a0a0f..d37c50cdecf16 100644 --- a/prelude/apple/apple_utility.bzl +++ b/prelude/apple/apple_utility.bzl @@ -8,20 +8,6 @@ load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") load("@prelude//cxx:headers.bzl", "CxxHeadersLayout", "CxxHeadersNaming") load("@prelude//utils:utils.bzl", "value_or") -load(":apple_target_sdk_version.bzl", "get_min_deployment_version_for_node") - -_VERSION_PLACEHOLDER = "(VERSION)" - -# TODO(T115177501): Make target triples part of the toolchains -# Map from SDK name -> target triple _without_ leading architecture -_TARGET_TRIPLE_MAP = { - "iphoneos": "apple-ios{}".format(_VERSION_PLACEHOLDER), - "iphonesimulator": "apple-ios{}-simulator".format(_VERSION_PLACEHOLDER), - "maccatalyst": "apple-ios{}-macabi".format(_VERSION_PLACEHOLDER), - "macosx": "apple-macosx{}".format(_VERSION_PLACEHOLDER), - "watchos": "apple-watchos{}".format(_VERSION_PLACEHOLDER), - "watchsimulator": "apple-watchos{}-simulator".format(_VERSION_PLACEHOLDER), -} def get_apple_cxx_headers_layout(ctx: AnalysisContext) -> CxxHeadersLayout: namespace = value_or(ctx.attrs.header_path_prefix, ctx.attrs.name) @@ -33,23 +19,8 @@ def get_module_name(ctx: AnalysisContext) -> str: def has_apple_toolchain(ctx: AnalysisContext) -> bool: return hasattr(ctx.attrs, "_apple_toolchain") -def get_versioned_target_triple(ctx: AnalysisContext) -> str: - apple_toolchain_info = ctx.attrs._apple_toolchain[AppleToolchainInfo] - swift_toolchain_info = apple_toolchain_info.swift_toolchain_info - - architecture = swift_toolchain_info.architecture - if architecture == None: - fail("Need to set `architecture` field of swift_toolchain(), target: {}".format(ctx.label)) - - target_sdk_version = get_min_deployment_version_for_node(ctx) or "" - - sdk_name = apple_toolchain_info.sdk_name - target_triple_with_version_placeholder = _TARGET_TRIPLE_MAP.get(sdk_name) - if target_triple_with_version_placeholder == None: - fail("Could not find target triple for sdk = {}".format(sdk_name)) - - versioned_target_triple = target_triple_with_version_placeholder.replace(_VERSION_PLACEHOLDER, target_sdk_version) - return "{}-{}".format(architecture, versioned_target_triple) +def get_apple_architecture(ctx: AnalysisContext) -> str: + return ctx.attrs._apple_toolchain[AppleToolchainInfo].architecture def get_apple_stripped_attr_value_with_default_fallback(ctx: AnalysisContext) -> bool: stripped = ctx.attrs.stripped @@ -70,15 +41,15 @@ def expand_relative_prefixed_sdk_path( "$RESOURCEDIR": swift_resource_dir, "$SDKROOT": sdk_path, } - expanded_cmd = cmd_args() + expanded_cmd = [] for (path_variable, path_value) in path_expansion_map.items(): if path_to_expand.startswith(path_variable): path = path_to_expand[len(path_variable):] if path.find("$") == 0: fail("Failed to expand framework path: {}".format(path)) - expanded_cmd.add(cmd_args([path_value, path], delimiter = "")) + expanded_cmd.append(cmd_args([path_value, path], delimiter = "")) - return expanded_cmd + return cmd_args(expanded_cmd) def get_disable_pch_validation_flags() -> list[str]: """ @@ -92,3 +63,22 @@ def get_disable_pch_validation_flags() -> list[str]: "-Xcc", "-fno-validate-pch", ] + +def get_base_swiftinterface_compilation_flags(module_name: str) -> cmd_args: + cmd = cmd_args([ + "-frontend", + "-compile-module-from-interface", + "-disable-implicit-swift-modules", + "-serialize-parseable-module-interface-dependency-hashes", + "-disable-modules-validate-system-headers", + "-suppress-warnings", + "-module-name", + module_name, + "-Xcc", + "-fno-implicit-modules", + "-Xcc", + "-fno-implicit-module-maps", + ]) + cmd.add(get_disable_pch_validation_flags()) + + return cmd diff --git a/prelude/apple/apple_xctest_frameworks_utility.bzl b/prelude/apple/apple_xctest_frameworks_utility.bzl new file mode 100644 index 0000000000000..c33d59a922246 --- /dev/null +++ b/prelude/apple/apple_xctest_frameworks_utility.bzl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//apple:apple_bundle_destination.bzl", "AppleBundleDestination") +load("@prelude//apple:apple_bundle_part.bzl", "AppleBundlePart") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") + +def get_xctest_frameworks_bundle_parts(ctx: AnalysisContext, swift_support_needed: bool) -> list[AppleBundlePart]: + swift_support = [] + if swift_support_needed: + swift_support.append(_get_object_from_platform_path(ctx, "Developer/usr/lib/libXCTestSwiftSupport.dylib")) + + # T201426509: Xcode 16 introduces the Swift Testing framework + # that is a load dependency of libXCTestSwiftSupport.dylib + if int(ctx.attrs._apple_toolchain[AppleToolchainInfo].xcode_version[:2]) >= 16: + swift_support.append(_get_object_from_platform_path(ctx, "Developer/Library/Frameworks/Testing.framework")) + + return [ + _get_object_from_platform_path(ctx, "Developer/Library/Frameworks/XCTest.framework"), + _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCTAutomationSupport.framework"), + _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCTestCore.framework"), + _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCTestSupport.framework"), + _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCUIAutomation.framework"), + _get_object_from_platform_path(ctx, "Developer/Library/PrivateFrameworks/XCUnit.framework"), + _get_object_from_platform_path(ctx, "Developer/usr/lib/libXCTestBundleInject.dylib"), + ] + swift_support + +def _get_object_from_platform_path(ctx: AnalysisContext, platform_relative_path: str) -> AppleBundlePart: + toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo] + copied_framework = ctx.actions.declare_output(paths.basename(platform_relative_path)) + + # We have to copy because: + # 1) Platform path might be a string (e.g. for Xcode toolchains) + # 2) It's not possible to project artifact which is not produced by different target (and platform path is a separate target for distributed toolchains). + ctx.actions.run(["cp", "-PR", cmd_args(toolchain.platform_path, platform_relative_path, delimiter = "/"), copied_framework.as_output()], category = "extract_framework", identifier = platform_relative_path) + + return AppleBundlePart(source = copied_framework, destination = AppleBundleDestination("frameworks"), codesign_on_copy = True) diff --git a/prelude/apple/apple_xcuitest.bzl b/prelude/apple/apple_xcuitest.bzl new file mode 100644 index 0000000000000..d08d726460555 --- /dev/null +++ b/prelude/apple/apple_xcuitest.bzl @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") +load(":apple_bundle_destination.bzl", "AppleBundleDestination") +load(":apple_bundle_part.bzl", "AppleBundlePart", "assemble_bundle") +load(":apple_bundle_types.bzl", "AppleBundleInfo", "AppleBundleType") +load(":apple_info_plist.bzl", "process_info_plist") +load(":apple_utility.bzl", "get_apple_architecture") + +def apple_xcuitest_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: + # The XCUITest runner app bundle copies the application from the platform + # directory, and includes the UI test bundle in the PlugIns folder. + output_bundle = ctx.actions.declare_output(ctx.attrs.name + "." + ctx.attrs.extension) + bundle_parts = [ + _get_xctrunner_binary(ctx), + _get_uitest_bundle(ctx), + ] + _get_xctrunner_frameworks(ctx) + bundle_result = assemble_bundle( + ctx = ctx, + bundle = output_bundle, + info_plist_part = process_info_plist(ctx, override_input = None), + parts = bundle_parts, + swift_stdlib_args = None, + ) + + return [ + DefaultInfo(default_output = output_bundle), + AppleBundleInfo( + bundle = output_bundle, + bundle_type = AppleBundleType("default"), + binary_name = ctx.attrs.name, + contains_watchapp = False, + # The test runner binary does not contain Swift + skip_copying_swift_stdlib = True, + ), + ] + bundle_result.providers + +def _get_uitest_bundle(ctx: AnalysisContext) -> AppleBundlePart: + return AppleBundlePart( + source = ctx.attrs.test_bundle[DefaultInfo].default_outputs[0], + destination = AppleBundleDestination("plugins"), + ) + +def _get_xctrunner_binary(ctx: AnalysisContext) -> AppleBundlePart: + arch = get_apple_architecture(ctx) + lipo = ctx.attrs._apple_toolchain[AppleToolchainInfo].lipo + platform_path = ctx.attrs._apple_toolchain[AppleToolchainInfo].platform_path + thin_binary = ctx.actions.declare_output(ctx.attrs.name) + xctrunner_path = cmd_args(platform_path, "Developer/Library/Xcode/Agents/XCTRunner.app/XCTRunner", delimiter = "/") + ctx.actions.run([ + lipo, + xctrunner_path, + "-extract", + arch, + "-output", + thin_binary.as_output(), + ], category = "copy_xctrunner") + + return AppleBundlePart( + source = thin_binary, + destination = AppleBundleDestination("executables"), + ) + +def _get_xctrunner_frameworks(ctx: AnalysisContext) -> list[AppleBundlePart]: + # We need to copy the framework as AppleBundlePart requires an artifact. + # It would be nicer to make this an arglike and avoid the copies. + # It would also be nicer to exclude the headers. + def copy_platform_framework(platform_relative_path: str) -> AppleBundlePart: + copied_framework = ctx.actions.declare_output(paths.basename(platform_relative_path)) + path = cmd_args(ctx.attrs._apple_toolchain[AppleToolchainInfo].platform_path, platform_relative_path, delimiter = "/") + ctx.actions.run(["cp", "-PR", path, copied_framework.as_output()], category = "copy_framework", identifier = platform_relative_path) + return AppleBundlePart( + source = copied_framework, + destination = AppleBundleDestination("frameworks"), + codesign_on_copy = True, + ) + + runner_frameworks = [ + "Developer/Library/Frameworks/XCTest.framework", + "Developer/Library/PrivateFrameworks/XCTAutomationSupport.framework", + "Developer/Library/PrivateFrameworks/XCTestCore.framework", + "Developer/Library/PrivateFrameworks/XCTestSupport.framework", + "Developer/Library/PrivateFrameworks/XCUIAutomation.framework", + "Developer/Library/PrivateFrameworks/XCUnit.framework", + ] + return [copy_platform_framework(p) for p in runner_frameworks] diff --git a/prelude/apple/cxx_universal_executable.bzl b/prelude/apple/cxx_universal_executable.bzl new file mode 100644 index 0000000000000..c083f03f15640 --- /dev/null +++ b/prelude/apple/cxx_universal_executable.bzl @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_universal_binaries.bzl", "get_universal_binary_name", "lipo_binaries") +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") + +def cxx_universal_executable_impl(ctx: AnalysisContext) -> list[Provider]: + lipo = ctx.attrs._cxx_toolchain[CxxToolchainInfo].lipo + if not lipo: + fail("`cxx_toolchain()` target does not have a valid `lipo` tool: {}".format(ctx.attrs._cxx_toolchain.label)) + + universal_binary = lipo_binaries( + ctx = ctx, + binary_deps = ctx.attrs.executable, + binary_name = get_universal_binary_name(ctx), + lipo = lipo, + ) + + return [ + DefaultInfo(default_output = universal_binary), + RunInfo(args = cmd_args(universal_binary)), + ] diff --git a/prelude/apple/debug.bzl b/prelude/apple/debug.bzl index 7b6b7239bd471..b63c01fba07a7 100644 --- a/prelude/apple/debug.bzl +++ b/prelude/apple/debug.bzl @@ -17,6 +17,16 @@ DEBUGINFO_DB_SUBTARGET = "debuginfo-db" AppleDebugInfo = TransitiveSetArgsProjection +AppleSelectiveDebuggableMetadata = record( + # dSYM which the metadata applies for. + dsym = field(Artifact), + # JSON file containing metadata about the dSYM. + # + # This must be an artifact because there's no access to the focused targets + # JSON file at analysis time. + metadata = field(Artifact), +) + # Represents Apple debug info from both executables and bundles. AppleDebuggableInfo = provider( # @unsorted-dict-items @@ -26,13 +36,14 @@ AppleDebuggableInfo = provider( # a. the owning library target to artifacts, or # b. the owning bundle target to filtered artifacts "debug_info_tset": provider_field(ArtifactTSet), - # In the case of b above, contians the map of library target to artifacts, else None + # In the case of b above, contains the map of library target to artifacts, else None "filtered_map": provider_field([dict[Label, list[Artifact]], None], default = None), + "selective_metadata": provider_field(list[AppleSelectiveDebuggableMetadata], default = []), }, ) _AppleDebugInfo = record( - debug_info_tset = "ArtifactTSet", + debug_info_tset = ArtifactTSet, filtered_map = field([dict[Label, list[Artifact]], None]), ) diff --git a/prelude/apple/mockingbird/mockingbird_mock.bzl b/prelude/apple/mockingbird/mockingbird_mock.bzl new file mode 100644 index 0000000000000..96690b9d216c5 --- /dev/null +++ b/prelude/apple/mockingbird/mockingbird_mock.bzl @@ -0,0 +1,184 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_platforms.bzl", "APPLE_PLATFORMS_KEY") +load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") +load(":mockingbird_types.bzl", "MockingbirdLibraryInfo", "MockingbirdLibraryRecord", "MockingbirdSourcesInfo") + +def _impl(ctx: AnalysisContext) -> list[Provider]: + mockingbird_info = ctx.attrs.module[MockingbirdLibraryInfo] + + dep_names = [dep[MockingbirdLibraryInfo].name for dep in ctx.attrs.deps] + included_srcs = [src.basename for src in ctx.attrs.srcs] + excluded_srcs = [src.basename for src in ctx.attrs.excluded_srcs] + + for src_name in included_srcs: + if not src_name.endswith(".swift"): + fail("srcs should only specify Swift files. Other source files, such as {}, do not need to be included.".format(src_name)) + + for src_name in excluded_srcs: + if not src_name.endswith(".swift"): + fail("excluded_srcs should only specify Swift files. Other source files, such as {}, do not need to be included.".format(src_name)) + + (json_project_description, src_dirs) = _get_mockingbird_json_project_description(info = mockingbird_info, included_srcs = included_srcs, excluded_srcs = excluded_srcs, dep_names = dep_names) + json_project_description_output = ctx.actions.declare_output("mockingbird_project.json") + ctx.actions.write_json(json_project_description_output.as_output(), json_project_description) + + mockingbird_source = ctx.actions.declare_output(mockingbird_info.name + "Mocks.generated.swift", dir = False) + cmd = cmd_args( + hidden = src_dirs, + ) + + params = [ + ctx.attrs._mockingbird_bin[RunInfo], + "generate", + "--target", + mockingbird_info.name, + "--project", + json_project_description_output, + "--output", + mockingbird_source.as_output(), + "--header", + "// (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.", + "--support", + ctx.attrs._mockingbird_support[DefaultInfo].default_outputs, + "--verbose", + "--disable-cache", + ] + + if ctx.attrs.only_protocols: + params.append("--only-protocols") + + cmd.add(params) + + ctx.actions.run( + cmd, + category = "mockingbird", + local_only = True, # Mockingbird creates sockets for interprocess communication, which is deliberately blocked on RE. + weight_percentage = 100, + allow_cache_upload = True, + ) + + return [ + DefaultInfo(mockingbird_source), + MockingbirdSourcesInfo(srcs = [mockingbird_source]), + ] + +def _attrs(): + attribs = { + ## If the superclass for an object being mocked is in another module add it as a dep so mockingbird can find the implementation. + "deps": attrs.list(attrs.dep(), default = []), + ## The list of source files to exclude. Only the name of the file, excluding the path, should be set. If set, the srcs attribute will be ignored. + "excluded_srcs": attrs.set(attrs.source(), sorted = True, default = []), + ## The module to generate mocks for. + "module": attrs.dep(), + ## Whether to only generate mocks for Swift protocols. + "only_protocols": attrs.bool(default = False), + ## A list of source files to include. Only the name of the file, excluding the path, should be set. By default all source files are included and this doesn't need to be specified. + "srcs": attrs.set(attrs.source(), sorted = True, default = []), + "_mockingbird_bin": attrs.exec_dep(providers = [RunInfo], default = "fbsource//fbobjc/VendorLib/Mockingbird:mockingbird-binary"), + "_mockingbird_support": attrs.dep(providers = [DefaultInfo], default = "fbsource//fbobjc/VendorLib/Mockingbird:MockingbirdSupport"), + APPLE_PLATFORMS_KEY: attrs.dict(key = attrs.string(), value = attrs.dep(), sorted = False, default = {}), + } + return attribs + +registration_spec = RuleRegistrationSpec( + name = "mockingbird_mock", + impl = _impl, + attrs = _attrs(), +) + +# Produce JSON project description for Mockingbird codegen +# https://mockingbirdswift.com/json-project-description +# { +# "targets": [ +# { +# "name": "MyLibrary", +# "type": "library", +# "path": "/path/to/MyLibrary", +# "dependencies": [], +# "sources": [ +# "SourceFileA.swift", +# "SourceFileB.swift" +# ] +# }, +# { +# "name": "MyOtherLibrary", +# "type": "library", +# "path": "/path/to/MyOtherLibrary", +# "dependencies": [ +# "MyLibrary" +# ], +# "sources": [ +# "SourceFileA.swift", +# "SourceFileB.swift" +# ] +# }, +# { +# "name": "MyLibraryTests", +# "type": "test", +# "path": "/path/to/MyLibraryTests", +# "dependencies": [ +# "MyLibrary" +# ], +# "sources": [ +# "SourceFileA.swift", +# "SourceFileB.swift" +# ] +# } +# ] +# } +def _get_mockingbird_json_project_description(info: MockingbirdLibraryInfo, included_srcs: list[str], excluded_srcs: list[str], dep_names: list[str]) -> (dict, list): + targets = [] + src_dirs = [] + for record in info.tset.traverse(): + if record.name == info.name: + targets.append(_target_dict_for_mockingbird_record(record = record, included_srcs = included_srcs, excluded_srcs = excluded_srcs, include_non_exported_deps = True)) + src_dirs.append(record.src_dir) + elif record.name in dep_names: + targets.append(_target_dict_for_mockingbird_record(record = record, included_srcs = [], excluded_srcs = [], include_non_exported_deps = False)) + src_dirs.append(record.src_dir) + json = { + "targets": targets, + } + + return (json, src_dirs) + +def _target_dict_for_mockingbird_record(record: MockingbirdLibraryRecord, included_srcs: list[str], excluded_srcs: list[str], include_non_exported_deps: bool) -> dict: + srcs = [] + if len(included_srcs) > 0 and len(excluded_srcs) > 0: + fail("Included srcs and excluded srcs cannot both be set at the same time") + + record_src_names = [src.basename for src in record.srcs] + + for specified_src in included_srcs + excluded_srcs: + if specified_src not in record_src_names: + fail("The source file {} does not exist in target {}".format(specified_src, record.name)) + + if len(included_srcs) > 0: + for src_name in record_src_names: + if src_name in included_srcs: + srcs.append(src_name) + elif len(excluded_srcs) > 0: + for src_name in record_src_names: + if src_name not in excluded_srcs: + srcs.append(src_name) + else: + srcs = record_src_names + + deps = record.exported_dep_names + + if include_non_exported_deps: + deps = deps + record.dep_names + + return { + "dependencies": deps, + "name": record.name, + "path": record.src_dir, + "sources": srcs, + "type": record.type, + } diff --git a/prelude/apple/mockingbird/mockingbird_types.bzl b/prelude/apple/mockingbird/mockingbird_types.bzl new file mode 100644 index 0000000000000..0eee9cb67f147 --- /dev/null +++ b/prelude/apple/mockingbird/mockingbird_types.bzl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +MockingbirdLibraryInfoTSet = transitive_set() + +MockingbirdTargetType = enum("library", "test") + +MockingbirdLibraryInfo = provider( + fields = { + # The name of the target. + "name": provider_field(str), + # Contains a tset with this target's MockingbirdLibraryRecord as the value + # and all of its dependency's MockingbirdLibraryRecord in the children. + "tset": provider_field(MockingbirdLibraryInfoTSet), + }, +) + +MockingbirdLibraryRecord = record( + # The names of this target's dependencies. + dep_names = field(list[str]), + # The names of this target's exported dependencies. + exported_dep_names = field(list[str]), + # The name of the target. + name = str, + # Swift sources in this target. + srcs = field(list[Artifact]), + # Whether this is a library or a test. + type = field(MockingbirdTargetType), + # Symlinked directory containing the source files. + src_dir = field(Artifact), +) + +MockingbirdSourcesInfo = provider( + fields = { + # Source files containing the auto generated mocks produced by mockingbird-cli. + "srcs": provider_field(list[Artifact]), + }, +) diff --git a/prelude/apple/modulemap.bzl b/prelude/apple/modulemap.bzl index 7cba3e9eb9c46..7cfb0b7eb71b1 100644 --- a/prelude/apple/modulemap.bzl +++ b/prelude/apple/modulemap.bzl @@ -17,7 +17,7 @@ load( ) load(":apple_utility.bzl", "get_module_name") -def preprocessor_info_for_modulemap(ctx: AnalysisContext, name: str, headers: list[CHeader], swift_header: [Artifact, None]) -> CPreprocessor: +def preprocessor_info_for_modulemap(ctx: AnalysisContext, name: str, headers: list[CHeader], swift_header: Artifact | None) -> CPreprocessor: # We don't want to name this module.modulemap to avoid implicit importing if name == "module": fail("Don't use the name `module` for modulemaps, this will allow for implicit importing.") @@ -69,20 +69,20 @@ def preprocessor_info_for_modulemap(ctx: AnalysisContext, name: str, headers: li ctx.actions.run(cmd, category = "modulemap", identifier = name) return CPreprocessor( - relative_args = CPreprocessorArgs(args = _exported_preprocessor_args(symlink_tree)), - absolute_args = CPreprocessorArgs(args = _exported_preprocessor_args(symlink_tree)), + args = CPreprocessorArgs(args = _exported_preprocessor_args(symlink_tree)), modular_args = _args_for_modulemap(output, symlink_tree, swift_header), - modulemap_path = cmd_args(output).hidden(cmd_args(symlink_tree)), + modulemap_path = cmd_args(output, hidden = cmd_args(symlink_tree)), ) def _args_for_modulemap( modulemap: Artifact, symlink_tree: Artifact, - swift_header: [Artifact, None]) -> list[cmd_args]: - cmd = cmd_args(modulemap, format = "-fmodule-map-file={}") - cmd.hidden(symlink_tree) - if swift_header: - cmd.hidden(swift_header) + swift_header: Artifact | None) -> list[cmd_args]: + cmd = cmd_args( + modulemap, + format = "-fmodule-map-file={}", + hidden = [symlink_tree] + ([swift_header] if swift_header else []), + ) return [cmd] diff --git a/prelude/apple/prebuilt_apple_framework.bzl b/prelude/apple/prebuilt_apple_framework.bzl index 302e4c54c11e7..b5d72bf12e83a 100644 --- a/prelude/apple/prebuilt_apple_framework.bzl +++ b/prelude/apple/prebuilt_apple_framework.bzl @@ -5,10 +5,33 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//:artifact_tset.bzl", + "ArtifactInfoTag", + "ArtifactTSet", + "make_artifact_tset", +) +load("@prelude//apple:apple_utility.bzl", "get_base_swiftinterface_compilation_flags") +load("@prelude//apple/swift:apple_sdk_modules_utility.bzl", "is_sdk_modules_provided") +load( + "@prelude//apple/swift:swift_compilation.bzl", + "SwiftDependencyInfo", + "create_swift_dependency_info", + "get_external_debug_info_tsets", + "get_swift_framework_anonymous_targets", +) +load("@prelude//apple/swift:swift_pcm_compilation.bzl", "compile_framework_pcm") +load( + "@prelude//apple/swift:swift_pcm_compilation_types.bzl", + "SwiftPCMUncompiledInfo", +) +load("@prelude//apple/swift:swift_swiftinterface_compilation.bzl", "compile_swiftinterface_common") +load("@prelude//apple/swift:swift_types.bzl", "FrameworkImplicitSearchPathInfo", "get_implicit_framework_search_path_providers") load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") load( "@prelude//cxx:cxx_library_utility.bzl", "cxx_attr_exported_linker_flags", + "cxx_attr_preferred_linkage", "cxx_platform_supported", ) load( @@ -18,6 +41,7 @@ load( "cxx_inherited_preprocessor_infos", "cxx_merge_cpreprocessors", ) +load("@prelude//cxx:target_sdk_version.bzl", "get_target_triple", "get_unversioned_target_triple") load( "@prelude//linking:link_groups.bzl", "merge_link_group_lib_info", @@ -27,7 +51,6 @@ load( "LibOutputStyle", "LinkInfo", "LinkInfos", - "Linkage", "create_merged_link_info", ) load( @@ -41,75 +64,228 @@ load( "SharedLibraryInfo", "merge_shared_libraries", ) +load("@prelude//linking:strip.bzl", "strip_object") load("@prelude//utils:utils.bzl", "filter_and_map_idx") -load(":apple_bundle_types.bzl", "AppleBundleInfo") +load(":apple_bundle_types.bzl", "AppleBundleInfo", "AppleBundleTypeDefault") +load(":apple_dsym.bzl", "DSYM_SUBTARGET") load(":apple_frameworks.bzl", "to_framework_name") +load(":apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") +load(":apple_utility.bzl", "get_apple_stripped_attr_value_with_default_fallback") +load(":debug.bzl", "AppleDebuggableInfo") -def prebuilt_apple_framework_impl(ctx: AnalysisContext) -> list[Provider]: - providers = [] +def prebuilt_apple_framework_impl(ctx: AnalysisContext) -> [list[Provider], Promise]: + def get_prebuilt_apple_framework_providers(deps_providers) -> list[Provider]: + providers = [] - framework_directory_artifact = ctx.attrs.framework - - # Check this rule's `supported_platforms_regex` with the current platform. - if cxx_platform_supported(ctx): - # Sandbox the framework, to avoid leaking other frameworks via search paths. + framework_directory_artifact = ctx.attrs.framework framework_name = to_framework_name(framework_directory_artifact.basename) - framework_dir = ctx.actions.symlinked_dir( - "Frameworks", - {framework_name + ".framework": framework_directory_artifact}, - ) - # Add framework & pp info from deps. - inherited_pp_info = cxx_inherited_preprocessor_infos(ctx.attrs.deps) - providers.append(cxx_merge_cpreprocessors( - ctx, - [CPreprocessor(relative_args = CPreprocessorArgs(args = ["-F", framework_dir]))], - inherited_pp_info, - )) + # Check this rule's `supported_platforms_regex` with the current platform. + if cxx_platform_supported(ctx): + # Sandbox the framework, to avoid leaking other frameworks via search paths. + framework_dir = ctx.actions.symlinked_dir( + "Frameworks", + {framework_name + ".framework": framework_directory_artifact}, + ) - # Add framework to link args. - # TODO(T110378120): Support shared linking for mac targets: - # https://fburl.com/code/pqrtt1qr. - args = [] - args.extend(cxx_attr_exported_linker_flags(ctx)) - args.extend(["-F", framework_dir]) - args.extend(["-framework", framework_name]) - link = LinkInfo( - name = framework_name, - pre_flags = args, - ) - providers.append(create_merged_link_info( - ctx, - get_cxx_toolchain_info(ctx).pic_behavior, - {output_style: LinkInfos(default = link) for output_style in LibOutputStyle}, - )) + # Add framework & pp info from deps. + inherited_pp_info = cxx_inherited_preprocessor_infos(ctx.attrs.deps) + providers.append(cxx_merge_cpreprocessors( + ctx, + [CPreprocessor(args = CPreprocessorArgs(args = ["-F", framework_dir]))], + inherited_pp_info, + )) - # Create, augment and provide the linkable graph. - linkable_graph = create_linkable_graph( - ctx, - node = create_linkable_graph_node( + # Add framework to link args. + # TODO(T110378120): Support shared linking for mac targets: + # https://fburl.com/code/pqrtt1qr. + args = [] + args.extend(cxx_attr_exported_linker_flags(ctx)) + args.extend(["-F", framework_dir]) + args.extend(["-framework", framework_name]) + link = LinkInfo( + name = framework_name, + pre_flags = args, + ) + link_info = LinkInfos(default = link) + + providers.append(create_merged_link_info( ctx, - linkable_node = create_linkable_node( + get_cxx_toolchain_info(ctx).pic_behavior, + {output_style: link_info for output_style in LibOutputStyle}, + )) + + # Create, augment and provide the linkable graph. + linkable_graph = create_linkable_graph( + ctx, + node = create_linkable_graph_node( ctx, - preferred_linkage = Linkage("shared"), - link_infos = {LibOutputStyle("shared_lib"): LinkInfos(default = link)}, - # TODO(cjhopman): this should be set to non-None - default_soname = None, + linkable_node = create_linkable_node( + ctx, + preferred_linkage = cxx_attr_preferred_linkage(ctx), + link_infos = {output_style: link_info for output_style in LibOutputStyle}, + # TODO(cjhopman): this should be set to non-None + default_soname = None, + ), + excluded = {ctx.label: None}, ), - excluded = {ctx.label: None}, - ), + ) + providers.append(linkable_graph) + + providers.append(merge_link_group_lib_info(deps = ctx.attrs.deps)) + providers.append(merge_shared_libraries(ctx.actions, deps = filter_and_map_idx(SharedLibraryInfo, ctx.attrs.deps))) + + # The default output is the provided framework. + sub_targets = { + "distribution": _sanitize_framework_for_app_distribution(ctx, framework_directory_artifact) + providers, + } + + if ctx.attrs.dsyms: + sub_targets[DSYM_SUBTARGET] = [DefaultInfo(default_outputs = ctx.attrs.dsyms)] + providers.append(AppleDebuggableInfo(dsyms = ctx.attrs.dsyms, debug_info_tset = ArtifactTSet())) + + providers.append(DefaultInfo(default_output = framework_directory_artifact, sub_targets = sub_targets)) + providers.append(AppleBundleInfo( + bundle = framework_directory_artifact, + bundle_type = AppleBundleTypeDefault, + skip_copying_swift_stdlib = True, + contains_watchapp = None, + )) + + if ctx.attrs.modular: + pcm_provider = _create_uncompiled_pcm_module_info(ctx, framework_directory_artifact, framework_name) + providers.append(pcm_provider) + + # Since not all frameworks expose a swiftinterface, we use the `contains_swift` attribute to determine if one is available. + if ctx.attrs.contains_swift: + swift_dependency_info = _compile_swiftinterface( + ctx, + framework_name, + pcm_provider, + deps_providers, + framework_directory_artifact, + ) + providers.append(swift_dependency_info) + + implicit_search_path_tset = get_implicit_framework_search_path_providers( + ctx, + cmd_args("-F", cmd_args(framework_directory_artifact, parent = 1)), + ctx.attrs.deps, ) - providers.append(linkable_graph) - # The default output is the provided framework. - providers.append(DefaultInfo(default_output = framework_directory_artifact)) + providers.append( + FrameworkImplicitSearchPathInfo(tset = implicit_search_path_tset), + ) + + return providers + + # We cannot determine whether Swift Explicit modules are enabled at this point. + # Therefore, we always return providers for both Implicit and Explicit modules, if SDK modules are available. + # This approach is safe and won't trigger compilation of swiftinterfaces or pcm modules, + # as no upper-level targets will depend on the artifacts from these compilations. + swift_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info + if is_sdk_modules_provided(swift_toolchain): + return get_swift_framework_anonymous_targets(ctx, get_prebuilt_apple_framework_providers) + else: + return get_prebuilt_apple_framework_providers([]) + +def _create_uncompiled_pcm_module_info(ctx: AnalysisContext, framework_directory_artifact: Artifact, framework_name: str) -> SwiftPCMUncompiledInfo: + exported_pp_info = CPreprocessor( + headers = [], + modular_args = [], + args = CPreprocessorArgs(args = [ + cmd_args(["-F", cmd_args(framework_directory_artifact, parent = 1)], delimiter = ""), + ]), + modulemap_path = cmd_args(framework_directory_artifact, "/Modules/module.modulemap", delimiter = ""), + ) + return SwiftPCMUncompiledInfo( + name = framework_name, + is_transient = False, + exported_preprocessor = exported_pp_info, + exported_deps = ctx.attrs.deps, + propagated_preprocessor_args_cmd = cmd_args([]), + uncompiled_sdk_modules = ctx.attrs.sdk_modules, + ) + +def _compile_swiftinterface( + ctx: AnalysisContext, + framework_name: str, + pcm_provider: SwiftPCMUncompiledInfo, + deps_providers, + framework_directory_artifact: Artifact) -> SwiftDependencyInfo: + # To compile the framework's swiftinterface, the PCM module must be precompiled first. + compiled_underlying_pcm = compile_framework_pcm( + ctx, + framework_name, + pcm_provider, + deps_providers, + ["-target", get_target_triple(ctx)], + ) + + partial_cmd = get_base_swiftinterface_compilation_flags(framework_name) + + swiftinterface_path = cmd_args( + framework_directory_artifact, + "/Modules/", + framework_name, + ".swiftmodule/" + get_unversioned_target_triple(ctx) + ".swiftinterface", + delimiter = "", + ) + + swift_compiled_module, _ = compile_swiftinterface_common( + ctx, + ctx.attrs.deps, + True, # is_framework + framework_name, + partial_cmd, + deps_providers, + swiftinterface_path, + "prebuilt_framework_swiftinterface_compilation", + compiled_underlying_pcm, + ) + + debug_info_tset = make_artifact_tset( + actions = ctx.actions, + artifacts = [swift_compiled_module.output_artifact], + children = get_external_debug_info_tsets(ctx.attrs.deps), + label = ctx.label, + tags = [ArtifactInfoTag("swiftmodule")], + ) + + swift_dependency_info = create_swift_dependency_info( + ctx, + ctx.attrs.deps, + deps_providers, + swift_compiled_module, + debug_info_tset, + ) + + return swift_dependency_info + +def _sanitize_framework_for_app_distribution(ctx: AnalysisContext, framework_directory_artifact: Artifact) -> list[Provider]: + framework_name = to_framework_name(framework_directory_artifact.basename) + bundle_for_app_distribution = ctx.actions.declare_output(framework_name + ".framework", dir = True) + + apple_tools = ctx.attrs._apple_tools[AppleToolsInfo] + framework_sanitize_command = cmd_args([ + apple_tools.framework_sanitizer, + "--input", + framework_directory_artifact, + "--output", + bundle_for_app_distribution.as_output(), + ]) + + if get_apple_stripped_attr_value_with_default_fallback(ctx): + strip_args = cmd_args("-x") + stripped = strip_object(ctx, ctx.attrs._apple_toolchain[AppleToolchainInfo].cxx_toolchain_info, framework_directory_artifact.project(framework_name), strip_args, "framework_distribution") + framework_sanitize_command.add("--replacement-binary", stripped) + + ctx.actions.run(framework_sanitize_command, category = "sanitize_prebuilt_apple_framework") + providers = [DefaultInfo(default_output = bundle_for_app_distribution)] providers.append(AppleBundleInfo( - bundle = framework_directory_artifact, - is_watchos = None, + bundle = bundle_for_app_distribution, + bundle_type = AppleBundleTypeDefault, skip_copying_swift_stdlib = True, contains_watchapp = None, )) - providers.append(merge_link_group_lib_info(deps = ctx.attrs.deps)) - providers.append(merge_shared_libraries(ctx.actions, deps = filter_and_map_idx(SharedLibraryInfo, ctx.attrs.deps))) - return providers diff --git a/prelude/apple/resource_groups.bzl b/prelude/apple/resource_groups.bzl index 6b5546946a8ea..abc9a49238add 100644 --- a/prelude/apple/resource_groups.bzl +++ b/prelude/apple/resource_groups.bzl @@ -5,18 +5,14 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load( - "@prelude//cxx:groups.bzl", - "Group", - "MATCH_ALL_LABEL", -) +load("@prelude//cxx:groups_types.bzl", "Group", "MATCH_ALL_LABEL") load( "@prelude//utils:graph_utils.bzl", - "breadth_first_traversal_by", + "depth_first_traversal_by", ) load(":apple_asset_catalog_types.bzl", "AppleAssetCatalogSpec") load(":apple_core_data_types.bzl", "AppleCoreDataSpec") -load(":apple_resource_types.bzl", "AppleResourceSpec") +load(":apple_resource_types.bzl", "AppleResourceSpec", "CxxResourceSpec") load(":scene_kit_assets_types.bzl", "SceneKitAssetsSpec") ResourceGroupInfo = provider( @@ -31,10 +27,12 @@ ResourceGroupInfo = provider( # NOTE(agallagher): We do this to maintain existing behavior w/ the # standalone `resource_group_map()` rule, but it's not clear if it's # actually desirable behavior. - "implicit_deps": provider_field(list[Dependency]), + "resource_group_to_implicit_deps_mapping": provider_field(dict[str, list[Dependency]]), }, ) +RESOURCE_GROUP_MAP_ATTR = attrs.option(attrs.dep(providers = [ResourceGroupInfo]), default = None) + ResourceGraphNode = record( label = field(Label), # Attribute labels on the target. @@ -51,6 +49,8 @@ ResourceGraphNode = record( core_data_spec = field([AppleCoreDataSpec, None], None), # Actual scene kit assets, present when node corresponds to `scene_kit_assets` target scene_kit_assets_spec = field([SceneKitAssetsSpec, None], None), + # Actual resource data, present when node corresponds to `cxx_library` target containing resources. + cxx_resource_spec = field([CxxResourceSpec, None], None), ) ResourceGraphTSet = transitive_set() @@ -71,6 +71,7 @@ def create_resource_graph( asset_catalog_spec: [AppleAssetCatalogSpec, None] = None, core_data_spec: [AppleCoreDataSpec, None] = None, scene_kit_assets_spec: [SceneKitAssetsSpec, None] = None, + cxx_resource_spec: [CxxResourceSpec, None] = None, should_propagate: bool = True) -> ResourceGraphInfo: # Collect deps and exported_deps with resources that should propagate. dep_labels, dep_graphs = _filtered_labels_and_graphs(deps) @@ -95,6 +96,7 @@ def create_resource_graph( asset_catalog_spec = asset_catalog_spec, core_data_spec = core_data_spec, scene_kit_assets_spec = scene_kit_assets_spec, + cxx_resource_spec = cxx_resource_spec, ) children = [child_node.nodes for child_node in dep_graphs + exported_dep_graphs] return ResourceGraphInfo( @@ -145,7 +147,7 @@ def get_filtered_resources( root: Label, resource_graph_node_map_func, resource_group: [str, None], - resource_group_mappings: [dict[Label, str], None]) -> (list[AppleResourceSpec], list[AppleAssetCatalogSpec], list[AppleCoreDataSpec], list[SceneKitAssetsSpec]): + resource_group_mappings: [dict[Label, str], None]) -> (list[AppleResourceSpec], list[AppleAssetCatalogSpec], list[AppleCoreDataSpec], list[SceneKitAssetsSpec], list[CxxResourceSpec]): """ Walks the provided DAG and collects resources matching resource groups definition. """ @@ -156,7 +158,7 @@ def get_filtered_resources( node = resource_graph_node_map[target] # buildifier: disable=uninitialized return node.exported_deps + node.deps - targets = breadth_first_traversal_by( + targets = depth_first_traversal_by( resource_graph_node_map, get_traversed_deps(root), get_traversed_deps, @@ -166,6 +168,7 @@ def get_filtered_resources( asset_catalog_specs = [] core_data_specs = [] scene_kit_assets_specs = [] + cxx_resource_specs = [] for target in targets: target_resource_group = resource_group_mappings.get(target) @@ -189,5 +192,8 @@ def get_filtered_resources( scene_kit_assets_spec = node.scene_kit_assets_spec if scene_kit_assets_spec: scene_kit_assets_specs.append(scene_kit_assets_spec) + cxx_resource_spec = node.cxx_resource_spec + if cxx_resource_spec: + cxx_resource_specs.append(cxx_resource_spec) - return resource_specs, asset_catalog_specs, core_data_specs, scene_kit_assets_specs + return resource_specs, asset_catalog_specs, core_data_specs, scene_kit_assets_specs, cxx_resource_specs diff --git a/prelude/apple/scene_kit_assets.bzl b/prelude/apple/scene_kit_assets.bzl index 650919b339908..96268c8fb6100 100644 --- a/prelude/apple/scene_kit_assets.bzl +++ b/prelude/apple/scene_kit_assets.bzl @@ -24,7 +24,7 @@ def scene_kit_assets_impl(ctx: AnalysisContext) -> list[Provider]: ) return [DefaultInfo(), graph] -def compile_scene_kit_assets(ctx: AnalysisContext, specs: list[SceneKitAssetsSpec]) -> [Artifact, None]: +def compile_scene_kit_assets(ctx: AnalysisContext, specs: list[SceneKitAssetsSpec]) -> Artifact | None: if len(specs) == 0: return None @@ -50,9 +50,15 @@ def compile_scene_kit_assets(ctx: AnalysisContext, specs: list[SceneKitAssetsSpe ], allow_args = True, ) - combined_command = cmd_args(["/bin/sh", wrapper_script]).hidden(copy_scene_kit_assets_cmds + [output.as_output()]) + combined_command = cmd_args(["/bin/sh", wrapper_script], hidden = copy_scene_kit_assets_cmds + [output.as_output()]) processing_options = get_bundle_resource_processing_options(ctx) - ctx.actions.run(combined_command, prefer_local = processing_options.prefer_local, allow_cache_upload = processing_options.allow_cache_upload, category = "scene_kit_assets") + ctx.actions.run( + combined_command, + prefer_local = processing_options.prefer_local, + prefer_remote = processing_options.prefer_remote, + allow_cache_upload = processing_options.allow_cache_upload, + category = "scene_kit_assets", + ) return output def _get_copy_scene_kit_assets_cmd(ctx: AnalysisContext, scene_kit_assets_spec: SceneKitAssetsSpec) -> cmd_args: diff --git a/prelude/apple/swift/apple_sdk_clang_module.bzl b/prelude/apple/swift/apple_sdk_clang_module.bzl index f861940873d1c..4ef7ab889ea1d 100644 --- a/prelude/apple/swift/apple_sdk_clang_module.bzl +++ b/prelude/apple/swift/apple_sdk_clang_module.bzl @@ -21,6 +21,7 @@ def apple_sdk_clang_module_impl(ctx: AnalysisContext) -> list[Provider]: partial_cmd = cmd, input_relative_path = ctx.attrs.modulemap_relative_path, deps = ctx.attrs.deps, + cxx_deps = ctx.attrs.cxx_deps, ) return [ @@ -32,6 +33,7 @@ def apple_sdk_clang_module_impl(ctx: AnalysisContext) -> list[Provider]: apple_sdk_clang_module = rule( impl = apple_sdk_clang_module_impl, attrs = { + "cxx_deps": attrs.list(attrs.dep(), default = []), "deps": attrs.list(attrs.dep(), default = []), "is_framework": attrs.bool(default = False), # This is a real module name, contrary to `name` diff --git a/prelude/apple/swift/apple_sdk_swift_module.bzl b/prelude/apple/swift/apple_sdk_swift_module.bzl index c734c0e456d14..f5cc50c4c3977 100644 --- a/prelude/apple/swift/apple_sdk_swift_module.bzl +++ b/prelude/apple/swift/apple_sdk_swift_module.bzl @@ -5,27 +5,12 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//apple:apple_utility.bzl", "get_disable_pch_validation_flags") +load("@prelude//apple:apple_utility.bzl", "get_base_swiftinterface_compilation_flags") load(":swift_toolchain_types.bzl", "SdkSwiftOverlayInfo", "SdkUncompiledModuleInfo") def apple_sdk_swift_module_impl(ctx: AnalysisContext) -> list[Provider]: module_name = ctx.attrs.module_name - - cmd = cmd_args([ - "-frontend", - "-compile-module-from-interface", - "-disable-implicit-swift-modules", - "-serialize-parseable-module-interface-dependency-hashes", - "-disable-modules-validate-system-headers", - "-suppress-warnings", - "-module-name", - module_name, - "-Xcc", - "-fno-implicit-modules", - "-Xcc", - "-fno-implicit-module-maps", - ]) - cmd.add(get_disable_pch_validation_flags()) + cmd = get_base_swiftinterface_compilation_flags(module_name) if module_name == "Swift" or module_name == "SwiftOnoneSupport": cmd.add([ @@ -37,6 +22,7 @@ def apple_sdk_swift_module_impl(ctx: AnalysisContext) -> list[Provider]: overlays = [SdkSwiftOverlayInfo(overlays = ctx.attrs.overlays)] module_info = SdkUncompiledModuleInfo( + cxx_deps = ctx.attrs.cxx_deps, deps = ctx.attrs.deps, input_relative_path = ctx.attrs.swiftinterface_relative_path, is_framework = ctx.attrs.is_framework, @@ -55,6 +41,7 @@ def apple_sdk_swift_module_impl(ctx: AnalysisContext) -> list[Provider]: apple_sdk_swift_module = rule( impl = apple_sdk_swift_module_impl, attrs = { + "cxx_deps": attrs.list(attrs.dep(), default = []), "deps": attrs.list(attrs.dep(), default = []), "is_framework": attrs.bool(default = False), # This is a real module name, contrary to `name` diff --git a/prelude/apple/swift/swift_compilation.bzl b/prelude/apple/swift/swift_compilation.bzl index bc68b6ca29932..ae21c1b86508a 100644 --- a/prelude/apple/swift/swift_compilation.bzl +++ b/prelude/apple/swift/swift_compilation.bzl @@ -7,20 +7,25 @@ load( "@prelude//:artifact_tset.bzl", + "ArtifactInfoTag", "ArtifactTSet", # @unused Used as a type "make_artifact_tset", "project_artifacts", ) load("@prelude//:paths.bzl", "paths") -load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") -load("@prelude//apple:apple_utility.bzl", "get_disable_pch_validation_flags", "get_module_name", "get_versioned_target_triple") +load("@prelude//apple:apple_error_handler.bzl", "apple_build_error_handler") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") +load("@prelude//apple:apple_utility.bzl", "get_disable_pch_validation_flags", "get_module_name") load("@prelude//apple:modulemap.bzl", "preprocessor_info_for_modulemap") -load("@prelude//apple/swift:swift_types.bzl", "SWIFTMODULE_EXTENSION", "SWIFT_EXTENSION") +load("@prelude//apple/swift:swift_types.bzl", "SWIFTMODULE_EXTENSION", "SWIFT_EXTENSION", "get_implicit_framework_search_path_providers") load("@prelude//cxx:argsfiles.bzl", "CompileArgsfile", "CompileArgsfiles") +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") +load("@prelude//cxx:cxx_library_utility.bzl", "cxx_use_shlib_intfs_mode") load( - "@prelude//cxx:compile.bzl", + "@prelude//cxx:cxx_sources.bzl", "CxxSrcWithFlags", # @unused Used as a type ) +load("@prelude//cxx:cxx_toolchain_types.bzl", "ShlibInterfacesMode") load("@prelude//cxx:headers.bzl", "CHeader") load( "@prelude//cxx:link_groups.bzl", @@ -34,6 +39,7 @@ load( "cxx_inherited_preprocessor_infos", "cxx_merge_cpreprocessors", ) +load("@prelude//cxx:target_sdk_version.bzl", "get_target_triple") load( "@prelude//linking:link_info.bzl", "LinkInfo", # @unused Used as a type @@ -46,14 +52,19 @@ load( "extract_and_merge_clang_debug_infos", "extract_and_merge_swift_debug_infos", ) -load(":swift_module_map.bzl", "write_swift_module_map_with_swift_deps") +load( + ":swift_incremental_support.bzl", + "get_incremental_object_compilation_flags", + "should_build_swift_incrementally", +) +load(":swift_module_map.bzl", "write_swift_module_map_with_deps") load(":swift_pcm_compilation.bzl", "compile_underlying_pcm", "get_compiled_pcm_deps_tset", "get_swift_pcm_anon_targets") load( ":swift_pcm_compilation_types.bzl", "SwiftPCMUncompiledInfo", ) load(":swift_sdk_pcm_compilation.bzl", "get_swift_sdk_pcm_anon_targets") -load(":swift_sdk_swiftinterface_compilation.bzl", "get_swift_interface_anon_targets") +load(":swift_swiftinterface_compilation.bzl", "get_swift_interface_anon_targets") load( ":swift_toolchain_types.bzl", "SwiftCompiledModuleInfo", @@ -62,12 +73,8 @@ load( "SwiftToolchainInfo", ) -# {"module_name": [exported_headers]}, used for Swift header post processing -ExportedHeadersTSet = transitive_set() - SwiftDependencyInfo = provider(fields = { "debug_info_tset": provider_field(ArtifactTSet), - "exported_headers": provider_field(ExportedHeadersTSet), # Includes modules through exported_deps, used for compilation "exported_swiftmodules": provider_field(SwiftCompiledModuleTset), }) @@ -77,9 +84,22 @@ SwiftCompilationDatabase = record( other_outputs = field(ArgLike), ) +SwiftObjectOutput = record( + object_files = field(list[Artifact]), + argsfiles = field(CompileArgsfiles), + output_map_artifact = field(Artifact | None), + swiftdeps = field(list[Artifact]), +) + +SwiftLibraryForDistributionOutput = record( + swiftinterface = field(Artifact), + private_swiftinterface = field(Artifact), + swiftdoc = field(Artifact), +) + SwiftCompilationOutput = record( # The object file output from compilation. - object_file = field(Artifact), + object_files = field(list[Artifact]), object_format = field(SwiftObjectFormat), # The swiftmodule file output from compilation. swiftmodule = field(Artifact), @@ -90,6 +110,8 @@ SwiftCompilationOutput = record( pre = field(CPreprocessor), # Exported preprocessor info required for ObjC compilation of rdeps. exported_pre = field(CPreprocessor), + # Exported -Swift.h header + exported_swift_header = field(Artifact), # Argsfiles used to compile object files. argsfiles = field(CompileArgsfiles), # A tset of (SDK/first-party) swiftmodule artifacts required to be linked into binary. @@ -99,6 +121,16 @@ SwiftCompilationOutput = record( clang_debug_info = field(ArtifactTSet), # Info required for `[swift-compilation-database]` subtarget. compilation_database = field(SwiftCompilationDatabase), + # An artifact that represent the Swift module map for this target. + output_map_artifact = field(Artifact | None), + # An optional artifact of the exported symbols emitted for this module. + exported_symbols = field(Artifact | None), + # An optional artifact with files that support consuming the generated library with later versions of the swift compiler. + swift_library_for_distribution_output = field(SwiftLibraryForDistributionOutput | None), + # A list of artifacts that stores the index data + index_stores = field(list[Artifact]), + # A list of artifacts of the swiftdeps files produced during incremental compilation. + swiftdeps = field(list[Artifact]), ) SwiftDebugInfo = record( @@ -106,7 +138,43 @@ SwiftDebugInfo = record( shared = list[ArtifactTSet], ) -REQUIRED_SDK_MODULES = ["Swift", "SwiftOnoneSupport", "Darwin", "_Concurrency", "_StringProcessing"] +_REQUIRED_SDK_MODULES = ["Swift", "SwiftOnoneSupport", "Darwin", "_Concurrency", "_StringProcessing"] + +_REQUIRED_SDK_CXX_MODULES = _REQUIRED_SDK_MODULES + ["std"] + +def get_swift_framework_anonymous_targets(ctx: AnalysisContext, get_providers: typing.Callable) -> Promise: + # Get SDK deps from direct dependencies, + # all transitive deps will be compiled recursively. + direct_uncompiled_sdk_deps = get_uncompiled_sdk_deps( + ctx.attrs.sdk_modules, + _REQUIRED_SDK_MODULES, + ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info, + ) + + # Recursively compiling headers of direct and transitive deps as PCM modules, + # prebuilt_apple_framework rule doesn't support custom compiler_flags, so we pass only target triple. + pcm_targets = get_swift_pcm_anon_targets( + ctx, + ctx.attrs.deps, + ["-target", get_target_triple(ctx)], + False, # C++ Interop is disabled for now. + ) + + # Recursively compiling SDK's Clang dependencies, + # prebuilt_apple_framework rule doesn't support custom compiler_flags, so we pass only target triple. + sdk_pcm_targets = get_swift_sdk_pcm_anon_targets( + ctx, + False, + direct_uncompiled_sdk_deps, + ["-target", get_target_triple(ctx)], + ) + + # Recursively compile SDK and prebuilt_apple_framework's Swift dependencies. + swift_interface_anon_targets = get_swift_interface_anon_targets( + ctx, + direct_uncompiled_sdk_deps, + ) + return ctx.actions.anon_targets(pcm_targets + sdk_pcm_targets + swift_interface_anon_targets).promise.map(get_providers) def get_swift_anonymous_targets(ctx: AnalysisContext, get_apple_library_providers: typing.Callable) -> Promise: swift_cxx_flags = get_swift_cxx_flags(ctx) @@ -115,7 +183,7 @@ def get_swift_anonymous_targets(ctx: AnalysisContext, get_apple_library_provider # all transitive deps will be compiled recursively. direct_uncompiled_sdk_deps = get_uncompiled_sdk_deps( ctx.attrs.sdk_modules, - REQUIRED_SDK_MODULES, + _REQUIRED_SDK_CXX_MODULES if ctx.attrs.enable_cxx_interop else _REQUIRED_SDK_MODULES, ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info, ) @@ -125,12 +193,14 @@ def get_swift_anonymous_targets(ctx: AnalysisContext, get_apple_library_provider ctx, ctx.attrs.deps + ctx.attrs.exported_deps, swift_cxx_flags, + ctx.attrs.enable_cxx_interop, ) # Recursively compiling SDK's Clang dependencies, # passing apple_library's cxx flags through that must be used for all downward PCM compilations. sdk_pcm_targets = get_swift_sdk_pcm_anon_targets( ctx, + ctx.attrs.enable_cxx_interop, direct_uncompiled_sdk_deps, swift_cxx_flags, ) @@ -141,10 +211,7 @@ def get_swift_anonymous_targets(ctx: AnalysisContext, get_apple_library_provider ctx, direct_uncompiled_sdk_deps, ) - return ctx.actions.anon_targets(pcm_targets + sdk_pcm_targets + swift_interface_anon_targets, with_artifacts = True).promise.map(get_apple_library_providers) - -def _get_explicit_modules_forwards_warnings_as_errors() -> bool: - return read_root_config("swift", "explicit_modules_forwards_warnings_as_errors", "false").lower() == "true" + return ctx.actions.anon_targets(pcm_targets + sdk_pcm_targets + swift_interface_anon_targets).promise.map(get_apple_library_providers) def get_swift_cxx_flags(ctx: AnalysisContext) -> list[str]: """Iterates through `swift_compiler_flags` and returns a list of flags that might affect Clang compilation""" @@ -152,7 +219,7 @@ def get_swift_cxx_flags(ctx: AnalysisContext) -> list[str]: # Each target needs to propagate the compilers target triple. # This can vary depending on the deployment target of each library. - gather += ["-target", get_versioned_target_triple(ctx)] + gather += ["-target", get_target_triple(ctx)] for f in ctx.attrs.swift_compiler_flags: if next: @@ -161,11 +228,13 @@ def get_swift_cxx_flags(ctx: AnalysisContext) -> list[str]: next = False elif str(f) == "\"-Xcc\"": next = True - elif _get_explicit_modules_forwards_warnings_as_errors() and str(f) == "\"-warnings-as-errors\"": + elif str(f) == "\"-warnings-as-errors\"": gather.append("-warnings-as-errors") + elif str(f) == "\"-no-warnings-as-errors\"": + gather.append("-no-warnings-as-errors") if ctx.attrs.enable_cxx_interop: - gather += ["-Xfrontend", "-enable-cxx-interop"] + gather += ["-cxx-interoperability-mode=default"] if ctx.attrs.swift_version != None: gather += ["-swift-version", ctx.attrs.swift_version] @@ -180,10 +249,7 @@ def compile_swift( exported_headers: list[CHeader], objc_modulemap_pp_info: [CPreprocessor, None], framework_search_paths_flags: cmd_args, - extra_search_paths_flags: list[ArgLike] = []) -> [SwiftCompilationOutput, None]: - if not srcs: - return None - + extra_search_paths_flags: list[ArgLike] = []) -> ([SwiftCompilationOutput, None], DefaultInfo): # If this target imports XCTest we need to pass the search path to its swiftmodule. framework_search_paths = cmd_args() framework_search_paths.add(_get_xctest_swiftmodule_search_path(ctx)) @@ -195,7 +261,7 @@ def compile_swift( # If a target exports ObjC headers and Swift explicit modules are enabled, # we need to precompile a PCM of the underlying module and supply it to the Swift compilation. - if objc_modulemap_pp_info and ctx.attrs.uses_explicit_modules: + if objc_modulemap_pp_info and uses_explicit_modules(ctx): underlying_swift_pcm_uncompiled_info = get_swift_pcm_uncompile_info( ctx, None, @@ -214,12 +280,7 @@ def compile_swift( else: compiled_underlying_pcm = None - toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info - module_name = get_module_name(ctx) - output_header = ctx.actions.declare_output(module_name + "-Swift.h") - output_object = ctx.actions.declare_output(module_name + ".o") - output_swiftmodule = ctx.actions.declare_output(module_name + SWIFTMODULE_EXTENSION) shared_flags = _get_shared_flags( ctx, @@ -232,15 +293,42 @@ def compile_swift( extra_search_paths_flags, ) shared_flags.add(framework_search_paths) + swift_interface_info = _create_swift_interface(ctx, shared_flags, module_name) - if toolchain.can_toolchain_emit_obj_c_header_textually: - _compile_swiftmodule(ctx, toolchain, shared_flags, srcs, output_swiftmodule, output_header) - else: - unprocessed_header = ctx.actions.declare_output(module_name + "-SwiftUnprocessed.h") - _compile_swiftmodule(ctx, toolchain, shared_flags, srcs, output_swiftmodule, unprocessed_header) - _perform_swift_postprocessing(ctx, module_name, unprocessed_header, output_header) + if not srcs: + return (None, swift_interface_info) + + toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info + + if ctx.attrs.serialize_debugging_options: + if exported_headers: + # TODO(T99100029): We cannot use VFS overlays with Buck2, so we have to disable + # serializing debugging options for mixed libraries to debug successfully + warning("Mixed libraries cannot serialize debugging options, disabling for module `{}` in rule `{}`".format(module_name, ctx.label)) + elif not toolchain.prefix_serialized_debugging_options: + warning("The current toolchain does not support prefixing serialized debugging options, disabling for module `{}` in rule `{}`".format(module_name, ctx.label)) + + output_header = ctx.actions.declare_output(module_name + "-Swift.h") + output_swiftmodule = ctx.actions.declare_output(module_name + SWIFTMODULE_EXTENSION) + + swift_framework_output = None + if _should_compile_with_evolution(ctx): + swift_framework_output = SwiftLibraryForDistributionOutput( + swiftinterface = ctx.actions.declare_output(module_name + ".swiftinterface"), + private_swiftinterface = ctx.actions.declare_output(module_name + ".private.swiftinterface"), + swiftdoc = ctx.actions.declare_output(module_name + ".swiftdoc"), #this is generated automatically once we pass -emit-module-info, so must have this name + ) + + output_symbols = None + + if cxx_use_shlib_intfs_mode(ctx, ShlibInterfacesMode("stub_from_headers")): + output_symbols = ctx.actions.declare_output("__tbd__/" + module_name + ".swift_symbols.txt") + + _compile_swiftmodule(ctx, toolchain, shared_flags, srcs, output_swiftmodule, output_header, output_symbols, swift_framework_output) + + object_output = _compile_object(ctx, toolchain, shared_flags, srcs) - argsfiles = _compile_object(ctx, toolchain, shared_flags, srcs, output_object) + index_stores = _compile_index_stores(ctx, toolchain, shared_flags, srcs) # Swift libraries extend the ObjC modulemaps to include the -Swift.h header modulemap_pp_info = preprocessor_info_for_modulemap(ctx, "swift-extended", exported_headers, output_header) @@ -253,7 +341,7 @@ def compile_swift( exported_pp_info = CPreprocessor( headers = [exported_swift_header], modular_args = modulemap_pp_info.modular_args, - relative_args = CPreprocessorArgs(args = modulemap_pp_info.relative_args.args), + args = CPreprocessorArgs(args = modulemap_pp_info.args.args), modulemap_path = modulemap_pp_info.modulemap_path, ) @@ -267,42 +355,24 @@ def compile_swift( pre = CPreprocessor(headers = [swift_header]) # Pass up the swiftmodule paths for this module and its exported_deps - return SwiftCompilationOutput( - object_file = output_object, + return (SwiftCompilationOutput( + output_map_artifact = object_output.output_map_artifact, + object_files = object_output.object_files, object_format = toolchain.object_format, swiftmodule = output_swiftmodule, - dependency_info = get_swift_dependency_info(ctx, exported_pp_info, output_swiftmodule, deps_providers), + dependency_info = get_swift_dependency_info(ctx, output_swiftmodule, deps_providers), pre = pre, exported_pre = exported_pp_info, - argsfiles = argsfiles, + exported_swift_header = exported_swift_header.artifact, + argsfiles = object_output.argsfiles, swift_debug_info = extract_and_merge_swift_debug_infos(ctx, deps_providers, [output_swiftmodule]), clang_debug_info = extract_and_merge_clang_debug_infos(ctx, deps_providers), - compilation_database = _create_compilation_database(ctx, srcs, argsfiles.absolute[SWIFT_EXTENSION]), - ) - -# Swift headers are postprocessed to make them compatible with Objective-C -# compilation that does not use -fmodules. This is a workaround for the bad -# performance of -fmodules without Explicit Modules, once Explicit Modules is -# supported, this postprocessing should be removed. -def _perform_swift_postprocessing( - ctx: AnalysisContext, - module_name: str, - unprocessed_header: Artifact, - output_header: Artifact): - transitive_exported_headers = { - module: module_exported_headers - for exported_headers_map in _get_exported_headers_tset(ctx).traverse() - if exported_headers_map - for module, module_exported_headers in exported_headers_map.items() - } - deps_json = ctx.actions.write_json(module_name + "-Deps.json", transitive_exported_headers) - postprocess_cmd = cmd_args(ctx.attrs._apple_tools[AppleToolsInfo].swift_objc_header_postprocess) - postprocess_cmd.add([ - unprocessed_header, - deps_json, - output_header.as_output(), - ]) - ctx.actions.run(postprocess_cmd, category = "swift_objc_header_postprocess") + compilation_database = _create_compilation_database(ctx, srcs, object_output.argsfiles.relative[SWIFT_EXTENSION]), + exported_symbols = output_symbols, + swift_library_for_distribution_output = swift_framework_output, + index_stores = index_stores, + swiftdeps = object_output.swiftdeps, + ), swift_interface_info) # We use separate actions for swiftmodule and object file output. This # improves build parallelism at the cost of duplicated work, but by disabling @@ -314,44 +384,167 @@ def _compile_swiftmodule( shared_flags: cmd_args, srcs: list[CxxSrcWithFlags], output_swiftmodule: Artifact, - output_header: Artifact) -> CompileArgsfiles: + output_header: Artifact, + output_symbols: Artifact | None, + swift_framework_output: SwiftLibraryForDistributionOutput | None) -> CompileArgsfiles: argfile_cmd = cmd_args(shared_flags) argfile_cmd.add([ - "-Xfrontend", - "-experimental-skip-non-inlinable-function-bodies-without-types", + "-disable-cmo", "-emit-module", - "-emit-objc-header", + "-experimental-emit-module-separately", + "-wmo", ]) + + if ctx.attrs.swift_module_skip_function_bodies: + argfile_cmd.add([ + "-Xfrontend", + "-experimental-skip-non-inlinable-function-bodies-without-types", + ]) + + if _should_compile_with_evolution(ctx): + argfile_cmd.add(["-enable-library-evolution"]) + argfile_cmd.add(["-emit-module-interface"]) + cmd = cmd_args([ - "-emit-module-path", - output_swiftmodule.as_output(), + "-emit-objc-header", "-emit-objc-header-path", output_header.as_output(), + "-emit-module-path", + output_swiftmodule.as_output(), ]) - return _compile_with_argsfile(ctx, "swiftmodule_compile", SWIFTMODULE_EXTENSION, argfile_cmd, srcs, cmd, toolchain) + + if swift_framework_output: + # this is generated implicitly once we pass -emit-module + cmd.add(cmd_args(hidden = swift_framework_output.swiftdoc.as_output())) + cmd.add([ + "-emit-parseable-module-interface-path", + swift_framework_output.swiftinterface.as_output(), + "-emit-private-module-interface-path", + swift_framework_output.private_swiftinterface.as_output(), + ]) + + output_tbd = None + if output_symbols != None: + # Two step process, first we need to emit the TBD + output_tbd = ctx.actions.declare_output("__tbd__/" + ctx.attrs.name + "-Swift.tbd") + cmd.add([ + "-emit-tbd", + "-emit-tbd-path", + output_tbd.as_output(), + ]) + + ret = _compile_with_argsfile(ctx, "swiftmodule_compile", SWIFTMODULE_EXTENSION, argfile_cmd, srcs, cmd, toolchain, num_threads = 1) + + if output_tbd != None: + # Now we have run the TBD action we need to extract the symbols + extract_cmd = cmd_args([ + get_cxx_toolchain_info(ctx).linker_info.mk_shlib_intf[RunInfo], + "extract", + "-o", + output_symbols.as_output(), + "--tbd", + output_tbd, + ]) + ctx.actions.run(extract_cmd, category = "extract_tbd_symbols", error_handler = apple_build_error_handler) + + return ret def _compile_object( ctx: AnalysisContext, toolchain: SwiftToolchainInfo, shared_flags: cmd_args, - srcs: list[CxxSrcWithFlags], - output_object: Artifact) -> CompileArgsfiles: - object_format = toolchain.object_format.value - embed_bitcode = False - if toolchain.object_format == SwiftObjectFormat("object-embed-bitcode"): - object_format = "object" - embed_bitcode = True + srcs: list[CxxSrcWithFlags]) -> SwiftObjectOutput: + if should_build_swift_incrementally(ctx, len(srcs)): + incremental_compilation_output = get_incremental_object_compilation_flags(ctx, srcs) + num_threads = incremental_compilation_output.num_threads + output_map_artifact = incremental_compilation_output.output_map_artifact + objects = incremental_compilation_output.artifacts + cmd = incremental_compilation_output.incremental_flags_cmd + swiftdeps = incremental_compilation_output.swiftdeps + else: + num_threads = 1 + output_map_artifact = None + swiftdeps = [] + output_object = ctx.actions.declare_output(get_module_name(ctx) + ".o") + objects = [output_object] + object_format = toolchain.object_format.value + embed_bitcode = False + if toolchain.object_format == SwiftObjectFormat("object-embed-bitcode"): + object_format = "object" + embed_bitcode = True + + cmd = cmd_args([ + "-emit-{}".format(object_format), + "-o", + output_object.as_output(), + "-wmo", + ]) - cmd = cmd_args([ - "-emit-{}".format(object_format), - "-o", - output_object.as_output(), - ]) + if embed_bitcode: + cmd.add("--embed-bitcode") + + if _should_compile_with_evolution(ctx): + cmd.add(["-enable-library-evolution"]) + + argsfiles = _compile_with_argsfile(ctx, "swift_compile", SWIFT_EXTENSION, shared_flags, srcs, cmd, toolchain, num_threads = num_threads) + + return SwiftObjectOutput( + object_files = objects, + argsfiles = argsfiles, + output_map_artifact = output_map_artifact, + swiftdeps = swiftdeps, + ) + +def _compile_index_stores( + ctx: AnalysisContext, + toolchain: SwiftToolchainInfo, + shared_flags: cmd_args, + srcs: list[CxxSrcWithFlags]) -> list[Artifact]: + index_stores = [] + for src in srcs: + additional_flags = cmd_args() + + # With -index-file flag, swiftc will not go through all phases of the compiler + # and will not ouput anything except the index data + # The output here is only used for the identifier of the index unit file + # The output path is used for computing the hash value in the unit file name + output_name = paths.join( + ctx.label.cell, + ctx.label.package, + ctx.label.name, + "{}.indexData".format(src.file.short_path), + ) + additional_flags.add(["-o", output_name]) - if embed_bitcode: - cmd.add("--embed-bitcode") + index_store_folder_name = paths.join("__indexstore__", get_module_name(ctx), src.file.short_path, "index_store") + index_store = ctx.actions.declare_output(index_store_folder_name, dir = True) - return _compile_with_argsfile(ctx, "swift_compile", SWIFT_EXTENSION, shared_flags, srcs, cmd, toolchain) + additional_flags.add([ + "-index-file", + "-index-ignore-system-modules", + "-index-store-path", + index_store.as_output(), + ]) + + # -index-file-path only can accept one file, so we need to build index data for each source file + additional_flags.add([ + "-index-file-path", + src.file, + ]) + + _compile_with_argsfile( + ctx, + "swift_index_compile", + index_store_folder_name, + shared_flags, + srcs, + additional_flags, + toolchain, + index_store_folder_name, + ) + index_stores.append(index_store) + + return index_stores def _compile_with_argsfile( ctx: AnalysisContext, @@ -360,11 +553,13 @@ def _compile_with_argsfile( shared_flags: cmd_args, srcs: list[CxxSrcWithFlags], additional_flags: cmd_args, - toolchain: SwiftToolchainInfo) -> CompileArgsfiles: + toolchain: SwiftToolchainInfo, + identifier: str | None = None, + num_threads: int = 1) -> CompileArgsfiles: shell_quoted_args = cmd_args(shared_flags, quote = "shell") - argsfile, _ = ctx.actions.write(extension + ".argsfile", shell_quoted_args, allow_args = True) + argsfile, _ = ctx.actions.write(extension + "_compile_argsfile", shell_quoted_args, allow_args = True) input_args = [shared_flags] - cmd_form = cmd_args(cmd_args(argsfile, format = "@{}", delimiter = "")).hidden(input_args) + cmd_form = cmd_args(cmd_args(argsfile, format = "@{}", delimiter = ""), hidden = input_args) cmd_form.add([s.file for s in srcs]) cmd = cmd_args(toolchain.compiler) @@ -375,18 +570,48 @@ def _compile_with_argsfile( # so that CI builds populate caches used by developer machines. explicit_modules_enabled = uses_explicit_modules(ctx) + build_swift_incrementally = should_build_swift_incrementally(ctx, len(srcs)) + + # When Swift code is built incrementally, the swift-driver embeds absolute paths into the artifacts. + # Unfortunately, this compels us to execute these actions locally. + run_extra_args = { + # Even though the incremental artifacts (`.priors`, `.swiftdeps`) contain abs paths and + # are not cacheable, it's actually fine to still upload to the cache. This is because + # the downside of cached incremental artifacts with abs paths is that it will perform + # a full module compile on the first source change in a module (or any of its transitive + # deps where the public API changes). But this is exactly what would happen if we did not + # allow any caching at all - instead, every cold build would have to rebuild *everything* + # as there will be zero caching (as all incremental actions must run locally and do not + # allow cache upload). + # + # Thus, by allowing cache uploads, we get cold build caching, even if we end up caching + # non-hermetic Swift incremental artifacts. It's just that those non-hermetic artifacts + # do not result in further build perf efficiency later on when modules need to be recompiled. + "allow_cache_upload": True, + } + if build_swift_incrementally and not toolchain.supports_relative_resource_dir: + # When adding -working-directory= we end up with absolute paths in the + # swiftdeps files. + run_extra_args["local_only"] = True + else: + # Swift compilation on RE without explicit modules is impractically expensive + # because there's no shared module cache across different libraries. + run_extra_args["prefer_local"] = not explicit_modules_enabled + # Make it easier to debug whether Swift actions get compiled with explicit modules or not category = category_prefix + ("_with_explicit_mods" if explicit_modules_enabled else "") ctx.actions.run( cmd, category = category, - # Swift compilation on RE without explicit modules is impractically expensive - # because there's no shared module cache across different libraries. - prefer_local = not explicit_modules_enabled, - allow_cache_upload = True, + identifier = identifier, + # When building incrementally, we need to preserve local state between invocations. + no_outputs_cleanup = build_swift_incrementally, + error_handler = apple_build_error_handler, + weight = num_threads, + **run_extra_args ) - relative_argsfile = CompileArgsfile( + argsfile = CompileArgsfile( file = argsfile, cmd_form = cmd_form, input_args = input_args, @@ -394,8 +619,13 @@ def _compile_with_argsfile( args_without_file_prefix_args = shared_flags, ) - # Swift correctly handles relative paths and we can utilize the relative argsfile for absolute paths. - return CompileArgsfiles(relative = {extension: relative_argsfile}, absolute = {extension: relative_argsfile}) + # Swift correctly handles relative paths and we can utilize the relative argsfile for Xcode. + return CompileArgsfiles(relative = {extension: argsfile}, xcode = {extension: argsfile}) + +def _get_serialize_debugging_options_attr_value(ctx: AnalysisContext): + if ctx.attrs.serialize_debugging_options == None: + return True + return ctx.attrs.serialize_debugging_options def _get_shared_flags( ctx: AnalysisContext, @@ -408,16 +638,23 @@ def _get_shared_flags( extra_search_paths_flags: list[ArgLike] = []) -> cmd_args: toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info cmd = cmd_args() - cmd.add([ + + if not toolchain.supports_relative_resource_dir: # Setting this to empty will get the driver to make all paths absolute when # passed to the frontend. We later debug prefix these to ensure relative paths # in the debug info. - "-working-directory=", + cmd.add(["-working-directory="]) + + cmd.add([ + # Always use color, consistent with clang. + "-color-diagnostics", + # Unset the working directory in the debug information. + "-file-compilation-dir", + ".", "-sdk", toolchain.sdk_path, "-target", - get_versioned_target_triple(ctx), - "-wmo", + get_target_triple(ctx), "-module-name", module_name, "-Xfrontend", @@ -429,14 +666,28 @@ def _get_shared_flags( "-parse-as-library", ]) - if uses_explicit_modules(ctx): - # We set -fmodule-file-home-is-cwd as this is used to correctly - # set the working directory of modules when generating debug info. + if ctx.attrs.swift_package_name != None: + cmd.add([ + "-package-name", + ctx.attrs.swift_package_name, + ]) + + explicit_modules_enabled = uses_explicit_modules(ctx) + if explicit_modules_enabled: cmd.add([ "-Xcc", "-Xclang", "-Xcc", + # We set -fmodule-file-home-is-cwd as this is used to correctly + # set the working directory of modules when generating debug info. "-fmodule-file-home-is-cwd", + "-Xcc", + "-Xclang", + "-Xcc", + # This is the default for compilation, but not in sourcekitd. + # Set it explicitly here so that indexing will not fail with + # invalid module format errors. + "-fmodule-format=obj", ]) cmd.add(get_disable_pch_validation_flags()) @@ -455,19 +706,7 @@ def _get_shared_flags( else: cmd.add(["-enable-experimental-cxx-interop"]) - serialize_debugging_options = False - if ctx.attrs.serialize_debugging_options: - if objc_headers: - # TODO(T99100029): We cannot use VFS overlays with Buck2, so we have to disable - # serializing debugging options for mixed libraries to debug successfully - warning("Mixed libraries cannot serialize debugging options, disabling for module `{}` in rule `{}`".format(module_name, ctx.label)) - elif not toolchain.prefix_serialized_debugging_options: - warning("The current toolchain does not support prefixing serialized debugging options, disabling for module `{}` in rule `{}`".format(module_name, ctx.label)) - else: - # Apply the debug prefix map to Swift serialized debugging info. - # This will allow for debugging remotely built swiftmodule files. - serialize_debugging_options = True - + serialize_debugging_options = _get_serialize_debugging_options_attr_value(ctx) and (not explicit_modules_enabled) and (not objc_headers) and toolchain.prefix_serialized_debugging_options if serialize_debugging_options: cmd.add([ "-Xfrontend", @@ -493,13 +732,29 @@ def _get_shared_flags( "-disable-cxx-interop-requirement-at-import", ]) + if toolchain.supports_swift_importing_objc_forward_declarations and ctx.attrs.import_obj_c_forward_declarations and ctx.attrs.swift_version != "6": + cmd.add([ + "-Xfrontend", + "-enable-upcoming-feature", + "-Xfrontend", + "ImportObjcForwardDeclarations", + ]) + pcm_deps_tset = get_compiled_pcm_deps_tset(ctx, deps_providers) - sdk_clang_deps_tset = get_compiled_sdk_clang_deps_tset(ctx, deps_providers) - sdk_swift_deps_tset = get_compiled_sdk_swift_deps_tset(ctx, deps_providers) - # Add flags required to import ObjC module dependencies - _add_clang_deps_flags(ctx, pcm_deps_tset, sdk_clang_deps_tset, cmd) - _add_swift_deps_flags(ctx, sdk_swift_deps_tset, cmd) + # If Swift Explicit Modules are enabled, a few things must be provided to a compilation job: + # 1. Direct and transitive SDK deps from `sdk_modules` attribute. + # 2. Direct and transitive user-defined deps. + # 3. Transitive SDK deps of user-defined deps. + # (This is the case, when a user-defined dep exports a type from SDK module, + # thus such SDK module should be implicitly visible to consumers of that custom dep) + if uses_explicit_modules(ctx): + sdk_clang_deps_tset = get_compiled_sdk_clang_deps_tset(ctx, deps_providers) + sdk_swift_deps_tset = get_compiled_sdk_swift_deps_tset(ctx, deps_providers) + _add_swift_module_map_args(ctx, sdk_swift_deps_tset, pcm_deps_tset, sdk_clang_deps_tset, cmd) + + _add_clang_deps_flags(ctx, pcm_deps_tset, cmd) + _add_swift_deps_flags(ctx, cmd) # Add flags for importing the ObjC part of this library _add_mixed_library_flags_to_cmd(ctx, cmd, underlying_module, objc_headers, objc_modulemap_pp_info) @@ -511,28 +766,34 @@ def _get_shared_flags( return cmd -def _add_swift_deps_flags( +def _add_swift_module_map_args( ctx: AnalysisContext, + sdk_swiftmodule_deps_tset: SwiftCompiledModuleTset, + pcm_deps_tset: SwiftCompiledModuleTset, sdk_deps_tset: SwiftCompiledModuleTset, cmd: cmd_args): - # If Explicit Modules are enabled, a few things must be provided to a compilation job: - # 1. Direct and transitive SDK deps from `sdk_modules` attribute. - # 2. Direct and transitive user-defined deps. - # 3. Transitive SDK deps of user-defined deps. - # (This is the case, when a user-defined dep exports a type from SDK module, - # thus such SDK module should be implicitly visible to consumers of that custom dep) + module_name = get_module_name(ctx) + sdk_swiftmodule_deps_tset = [sdk_swiftmodule_deps_tset] if sdk_swiftmodule_deps_tset else [] + all_deps_tset = ctx.actions.tset( + SwiftCompiledModuleTset, + children = _get_swift_paths_tsets(ctx.attrs.deps + ctx.attrs.exported_deps) + [pcm_deps_tset, sdk_deps_tset] + sdk_swiftmodule_deps_tset, + ) + swift_module_map_artifact = write_swift_module_map_with_deps( + ctx, + module_name, + all_deps_tset, + ) + cmd.add([ + "-Xfrontend", + "-explicit-swift-module-map-file", + "-Xfrontend", + swift_module_map_artifact, + ]) + +def _add_swift_deps_flags( + ctx: AnalysisContext, + cmd: cmd_args): if uses_explicit_modules(ctx): - module_name = get_module_name(ctx) - swift_deps_tset = ctx.actions.tset( - SwiftCompiledModuleTset, - children = _get_swift_paths_tsets(ctx.attrs.deps + ctx.attrs.exported_deps), - ) - swift_module_map_artifact = write_swift_module_map_with_swift_deps( - ctx, - module_name, - sdk_deps_tset, - swift_deps_tset, - ) cmd.add([ "-Xcc", "-fno-implicit-modules", @@ -540,27 +801,24 @@ def _add_swift_deps_flags( "-fno-implicit-module-maps", "-Xfrontend", "-disable-implicit-swift-modules", - "-Xfrontend", - "-explicit-swift-module-map-file", - "-Xfrontend", - swift_module_map_artifact, ]) else: depset = ctx.actions.tset(SwiftCompiledModuleTset, children = _get_swift_paths_tsets(ctx.attrs.deps + ctx.attrs.exported_deps)) cmd.add(depset.project_as_args("module_search_path")) + implicit_search_path_tset = get_implicit_framework_search_path_providers( + ctx, + None, + ctx.attrs.deps, + ) + cmd.add(implicit_search_path_tset.project_as_args("swift_framework_implicit_search_paths_args")) + def _add_clang_deps_flags( ctx: AnalysisContext, pcm_deps_tset: SwiftCompiledModuleTset, - sdk_deps_tset: SwiftCompiledModuleTset, cmd: cmd_args) -> None: - # If a module uses Explicit Modules, all direct and - # transitive Clang deps have to be explicitly added. if uses_explicit_modules(ctx): - cmd.add(pcm_deps_tset.project_as_args("clang_deps")) - - # Add Clang sdk modules which do not go to swift modulemap - cmd.add(sdk_deps_tset.project_as_args("clang_deps")) + cmd.add(pcm_deps_tset.project_as_args("clang_importer_flags")) else: inherited_preprocessor_infos = cxx_inherited_preprocessor_infos(ctx.attrs.deps + ctx.attrs.exported_deps) preprocessors = cxx_merge_cpreprocessors(ctx, [], inherited_preprocessor_infos) @@ -577,23 +835,25 @@ def _add_mixed_library_flags_to_cmd( if uses_explicit_modules(ctx): if underlying_module: cmd.add(underlying_module.clang_importer_args) + cmd.add(underlying_module.clang_module_file_args) cmd.add("-import-underlying-module") return if not objc_headers: return - # TODO(T99100029): We cannot use VFS overlays to mask this import from - # the debugger as they require absolute paths. Instead we will enforce - # that mixed libraries do not have serialized debugging info and rely on - # rdeps to serialize the correct paths. - for arg in objc_modulemap_pp_info.relative_args.args: - cmd.add("-Xcc") - cmd.add(arg) + if objc_modulemap_pp_info: + # TODO(T99100029): We cannot use VFS overlays to mask this import from + # the debugger as they require absolute paths. Instead we will enforce + # that mixed libraries do not have serialized debugging info and rely on + # rdeps to serialize the correct paths. + for arg in objc_modulemap_pp_info.args.args: + cmd.add("-Xcc") + cmd.add(arg) - for arg in objc_modulemap_pp_info.modular_args: - cmd.add("-Xcc") - cmd.add(arg) + for arg in objc_modulemap_pp_info.modular_args: + cmd.add("-Xcc") + cmd.add(arg) cmd.add("-import-underlying-module") @@ -604,24 +864,13 @@ def _get_swift_paths_tsets(deps: list[Dependency]) -> list[SwiftCompiledModuleTs if SwiftDependencyInfo in d ] -def _get_external_debug_info_tsets(deps: list[Dependency]) -> list[ArtifactTSet]: +def get_external_debug_info_tsets(deps: list[Dependency]) -> list[ArtifactTSet]: return [ d[SwiftDependencyInfo].debug_info_tset for d in deps if SwiftDependencyInfo in d ] -def _get_exported_headers_tset(ctx: AnalysisContext, exported_headers: [list[str], None] = None) -> ExportedHeadersTSet: - return ctx.actions.tset( - ExportedHeadersTSet, - value = {get_module_name(ctx): exported_headers} if exported_headers else None, - children = [ - dep.exported_headers - for dep in [x.get(SwiftDependencyInfo) for x in ctx.attrs.exported_deps] - if dep and dep.exported_headers - ], - ) - def get_swift_pcm_uncompile_info( ctx: AnalysisContext, propagated_exported_preprocessor_info: [CPreprocessorInfo, None], @@ -634,64 +883,66 @@ def get_swift_pcm_uncompile_info( name = get_module_name(ctx), is_transient = not ctx.attrs.modular or not exported_pre, exported_preprocessor = exported_pre, - exported_deps = ctx.attrs.exported_deps, + exported_deps = _exported_deps(ctx), propagated_preprocessor_args_cmd = propagated_pp_args_cmd, uncompiled_sdk_modules = ctx.attrs.sdk_modules, ) return None -def get_swift_dependency_info( +def create_swift_dependency_info( ctx: AnalysisContext, - exported_pre: [CPreprocessor, None], - output_module: [Artifact, None], - deps_providers: list) -> SwiftDependencyInfo: - all_deps = ctx.attrs.exported_deps + ctx.attrs.deps - if ctx.attrs.reexport_all_header_dependencies: - exported_deps = all_deps - else: - exported_deps = ctx.attrs.exported_deps - - # We only need to pass up the exported_headers for Swift header post-processing. - # If the toolchain can emit textual imports already then we skip the extra work. - exported_headers = [] - if not ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info.can_toolchain_emit_obj_c_header_textually: - exported_headers = [_header_basename(header) for header in ctx.attrs.exported_headers] - exported_headers += [header.name for header in exported_pre.headers] if exported_pre else [] - + deps, + deps_providers: list, + compiled_info: [SwiftCompiledModuleInfo, None], + debug_info_tset: ArtifactTSet): # We pass through the SDK swiftmodules here to match Buck 1 behaviour. This is # pretty loose, but it matches Buck 1 behavior so cannot be improved until # migration is complete. - transitive_swiftmodule_deps = _get_swift_paths_tsets(exported_deps) + [get_compiled_sdk_swift_deps_tset(ctx, deps_providers)] + transitive_swiftmodule_deps = _get_swift_paths_tsets(deps) + [get_compiled_sdk_swift_deps_tset(ctx, deps_providers)] + + if compiled_info: + exported_swiftmodules = ctx.actions.tset(SwiftCompiledModuleTset, value = compiled_info, children = transitive_swiftmodule_deps) + else: + exported_swiftmodules = ctx.actions.tset(SwiftCompiledModuleTset, children = transitive_swiftmodule_deps) + + return SwiftDependencyInfo( + debug_info_tset = debug_info_tset, + exported_swiftmodules = exported_swiftmodules, + ) + +def get_swift_dependency_info( + ctx: AnalysisContext, + output_module: Artifact | None, + deps_providers: list) -> SwiftDependencyInfo: + exported_deps = _exported_deps(ctx) + if output_module: compiled_info = SwiftCompiledModuleInfo( is_framework = False, + is_sdk_module = False, is_swiftmodule = True, module_name = get_module_name(ctx), output_artifact = output_module, ) - exported_swiftmodules = ctx.actions.tset(SwiftCompiledModuleTset, value = compiled_info, children = transitive_swiftmodule_deps) else: - exported_swiftmodules = ctx.actions.tset(SwiftCompiledModuleTset, children = transitive_swiftmodule_deps) + compiled_info = None debug_info_tset = make_artifact_tset( actions = ctx.actions, artifacts = [output_module] if output_module != None else [], - children = _get_external_debug_info_tsets(all_deps), + children = get_external_debug_info_tsets(ctx.attrs.deps + ctx.attrs.exported_deps), label = ctx.label, + tags = [ArtifactInfoTag("swiftmodule")], ) - return SwiftDependencyInfo( - debug_info_tset = debug_info_tset, - exported_headers = _get_exported_headers_tset(ctx, exported_headers), - exported_swiftmodules = exported_swiftmodules, + return create_swift_dependency_info( + ctx, + exported_deps, + deps_providers, + compiled_info, + debug_info_tset, ) -def _header_basename(header: [Artifact, str]) -> str: - if type(header) == type(""): - return paths.basename(header) - else: - return header.basename - def uses_explicit_modules(ctx: AnalysisContext) -> bool: swift_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info return ctx.attrs.uses_explicit_modules and is_sdk_modules_provided(swift_toolchain) @@ -734,7 +985,7 @@ def get_swift_debug_infos( ctx: AnalysisContext, swift_dependency_info: [SwiftDependencyInfo, None], swift_output: [SwiftCompilationOutput, None]) -> SwiftDebugInfo: - # When determing the debug info for shared libraries, if the shared library is a link group, we rely on the link group links to + # When determining the debug info for shared libraries, if the shared library is a link group, we rely on the link group links to # obtain the debug info for linked libraries and only need to provide any swift debug info for this library itself. Otherwise # if linking standard shared, we need to obtain the transitive debug info. if get_link_group(ctx): @@ -766,8 +1017,8 @@ def _create_compilation_database( swift_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info mk_comp_db = swift_toolchain.mk_swift_comp_db[RunInfo] - indentifier = module_name + ".swift_comp_db.json" - cdb_artifact = ctx.actions.declare_output(indentifier) + identifier = module_name + ".swift_comp_db.json" + cdb_artifact = ctx.actions.declare_output(identifier) cmd = cmd_args(mk_comp_db) cmd.add(cmd_args(cdb_artifact.as_output(), format = "--output={}")) cmd.add(cmd_args(_get_project_root_file(ctx), format = "--project-root-file={}")) @@ -775,6 +1026,64 @@ def _create_compilation_database( cmd.add("--") cmd.add(argfile.cmd_form) - ctx.actions.run(cmd, category = "swift_compilation_database", identifier = indentifier) + ctx.actions.run( + cmd, + category = "swift_compilation_database", + identifier = identifier, + error_handler = apple_build_error_handler, + ) return SwiftCompilationDatabase(db = cdb_artifact, other_outputs = argfile.cmd_form) + +def _create_swift_interface(ctx: AnalysisContext, shared_flags: cmd_args, module_name: str) -> DefaultInfo: + swift_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info + swift_ide_test_tool = swift_toolchain.swift_ide_test_tool + if not swift_ide_test_tool: + return DefaultInfo() + mk_swift_interface = swift_toolchain.mk_swift_interface + + identifier = module_name + ".swift_interface" + + argsfile, _ = ctx.actions.write( + identifier + "_argsfile", + shared_flags, + allow_args = True, + ) + interface_artifact = ctx.actions.declare_output(identifier) + + mk_swift_args = cmd_args( + mk_swift_interface, + "--swift-ide-test-tool", + swift_ide_test_tool, + "--module", + module_name, + "--out", + interface_artifact.as_output(), + "--", + cmd_args(cmd_args(argsfile, format = "@{}", delimiter = ""), hidden = [shared_flags]), + ) + + ctx.actions.run( + mk_swift_args, + category = "mk_swift_interface", + identifier = identifier, + error_handler = apple_build_error_handler, + ) + + return DefaultInfo( + default_output = interface_artifact, + other_outputs = [ + argsfile, + ], + ) + +def _exported_deps(ctx) -> list[Dependency]: + if ctx.attrs.reexport_all_header_dependencies: + return ctx.attrs.exported_deps + ctx.attrs.deps + else: + return ctx.attrs.exported_deps + +def _should_compile_with_evolution(ctx) -> bool: + if ctx.attrs.enable_library_evolution != None: + return ctx.attrs.enable_library_evolution + return ctx.attrs._enable_library_evolution diff --git a/prelude/apple/swift/swift_debug_info_utils.bzl b/prelude/apple/swift/swift_debug_info_utils.bzl index 828f118fdf1fe..50e9d26adef0c 100644 --- a/prelude/apple/swift/swift_debug_info_utils.bzl +++ b/prelude/apple/swift/swift_debug_info_utils.bzl @@ -7,6 +7,7 @@ load( "@prelude//:artifact_tset.bzl", + "ArtifactInfoTag", "ArtifactTSet", "make_artifact_tset", ) @@ -27,6 +28,7 @@ def extract_and_merge_swift_debug_infos(ctx: AnalysisContext, compiled_pcm_deps_ label = ctx.label, artifacts = artifacts, children = swift_debug_tsets, + tags = [ArtifactInfoTag("swiftmodule")], ) def extract_and_merge_clang_debug_infos(ctx: AnalysisContext, compiled_pcm_deps_providers, artifacts: list[Artifact] = []) -> ArtifactTSet: @@ -41,4 +43,5 @@ def extract_and_merge_clang_debug_infos(ctx: AnalysisContext, compiled_pcm_deps_ label = ctx.label, artifacts = artifacts, children = clang_debug_tsets, + tags = [ArtifactInfoTag("swift_pcm")], ) diff --git a/prelude/apple/swift/swift_incremental_support.bzl b/prelude/apple/swift/swift_incremental_support.bzl new file mode 100644 index 0000000000000..452892714a804 --- /dev/null +++ b/prelude/apple/swift/swift_incremental_support.bzl @@ -0,0 +1,132 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") +load("@prelude//apple:apple_utility.bzl", "get_module_name") +load("@prelude//apple/swift:swift_toolchain_types.bzl", "SwiftObjectFormat") +load("@prelude//apple/swift:swift_types.bzl", "SwiftCompilationModes") +load( + "@prelude//cxx:cxx_sources.bzl", + "CxxSrcWithFlags", # @unused Used as a type +) + +_WriteOutputFileMapOutput = record( + artifacts = field(list[Artifact]), + swiftdeps = field(list[Artifact]), + output_map_artifact = field(Artifact), +) + +IncrementalCompilationOutput = record( + incremental_flags_cmd = field(cmd_args), + artifacts = field(list[Artifact]), + output_map_artifact = field(Artifact), + num_threads = field(int), + swiftdeps = field(list[Artifact]), +) + +SwiftCompilationMode = enum(*SwiftCompilationModes) + +_INCREMENTAL_SRC_THRESHOLD = 20 + +# The maxmium number of threads, we don't use +# host_info to prevent cache misses across +# different hardware models. +_MAX_NUM_THREADS = 4 + +# The maximum number of srcs per parallel action +_SRCS_PER_THREAD = 50 + +def should_build_swift_incrementally(ctx: AnalysisContext, srcs_count: int) -> bool: + toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info + + # Incremental builds are only supported when object files are generated. + if toolchain.object_format != SwiftObjectFormat("object"): + return False + + mode = SwiftCompilationMode(ctx.attrs.swift_compilation_mode) + if mode == SwiftCompilationMode("wmo"): + return False + elif mode == SwiftCompilationMode("incremental"): + return True + return srcs_count >= _INCREMENTAL_SRC_THRESHOLD + +def get_incremental_object_compilation_flags(ctx: AnalysisContext, srcs: list[CxxSrcWithFlags]) -> IncrementalCompilationOutput: + output_file_map = _write_output_file_map(ctx, get_module_name(ctx), srcs, "object", ".o") + return _get_incremental_compilation_flags_and_objects(output_file_map, len(srcs), cmd_args(["-emit-object"])) + +def _get_incremental_num_threads(num_srcs: int) -> int: + if num_srcs == 0: + return 1 + + src_threads = (num_srcs + _SRCS_PER_THREAD - 1) // _SRCS_PER_THREAD + return min(_MAX_NUM_THREADS, src_threads) + +def _get_incremental_compilation_flags_and_objects( + output_file_map: _WriteOutputFileMapOutput, + num_srcs: int, + additional_flags: cmd_args) -> IncrementalCompilationOutput: + num_threads = _get_incremental_num_threads(num_srcs) + cmd = cmd_args( + [ + "-incremental", + "-enable-incremental-imports", + "-disable-cmo", # To minimize changes in generated swiftmodule file. + "-enable-batch-mode", + "-output-file-map", + output_file_map.output_map_artifact, + "-j", + str(num_threads), + additional_flags, + ], + hidden = [swiftdep.as_output() for swiftdep in output_file_map.swiftdeps] + + [artifact.as_output() for artifact in output_file_map.artifacts], + ) + + return IncrementalCompilationOutput( + incremental_flags_cmd = cmd, + artifacts = output_file_map.artifacts, + output_map_artifact = output_file_map.output_map_artifact, + num_threads = num_threads, + swiftdeps = output_file_map.swiftdeps, + ) + +def _write_output_file_map( + ctx: AnalysisContext, + module_name: str, + srcs: list[CxxSrcWithFlags], + compilation_mode: str, # Either "object" or "swiftmodule" + extension: str) -> _WriteOutputFileMapOutput: # Either ".o" or ".swiftmodule" + # swift-driver doesn't respect extension for root swiftdeps file and it always has to be `.priors`. + module_swiftdeps = ctx.actions.declare_output("module-build-record." + compilation_mode + ".priors") + output_file_map = { + "": { + "swift-dependencies": module_swiftdeps, + }, + } + + artifacts = [] + swiftdeps = [module_swiftdeps] + for src in srcs: + file_name = src.file.basename + output_artifact = ctx.actions.declare_output(file_name + extension) + swiftdeps_artifact = ctx.actions.declare_output(file_name + "." + compilation_mode + ".swiftdeps") + + part_map = { + compilation_mode: output_artifact, + "swift-dependencies": swiftdeps_artifact, + } + output_file_map[src.file] = part_map + artifacts.append(output_artifact) + swiftdeps.append(swiftdeps_artifact) + + output_map_artifact = ctx.actions.write_json(module_name + "-OutputFileMap." + compilation_mode + ".json", output_file_map) + + return _WriteOutputFileMapOutput( + artifacts = artifacts, + swiftdeps = swiftdeps, + output_map_artifact = output_map_artifact, + ) diff --git a/prelude/apple/swift/swift_module_map.bzl b/prelude/apple/swift/swift_module_map.bzl index 5547232f07ded..dd60747cfe84c 100644 --- a/prelude/apple/swift/swift_module_map.bzl +++ b/prelude/apple/swift/swift_module_map.bzl @@ -8,22 +8,10 @@ load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type load(":swift_toolchain_types.bzl", "SwiftCompiledModuleTset") -def write_swift_module_map( +def write_swift_module_map_with_deps( ctx: AnalysisContext, module_name: str, - sdk_deps: SwiftCompiledModuleTset) -> ArgLike: - return write_swift_module_map_with_swift_deps(ctx, module_name, sdk_deps, None) - -def write_swift_module_map_with_swift_deps( - ctx: AnalysisContext, - module_name: str, - sdk_swift_deps: SwiftCompiledModuleTset, - swift_deps: [SwiftCompiledModuleTset, None]) -> ArgLike: - if swift_deps: - all_deps = ctx.actions.tset(SwiftCompiledModuleTset, children = [sdk_swift_deps, swift_deps]) - else: - all_deps = sdk_swift_deps - + all_deps: SwiftCompiledModuleTset) -> ArgLike: return ctx.actions.write_json( module_name + ".swift_module_map.json", all_deps.project_as_json("swift_module_map"), diff --git a/prelude/apple/swift/swift_pcm_compilation.bzl b/prelude/apple/swift/swift_pcm_compilation.bzl index 94faaca7b8ba4..4428fc03ebe5d 100644 --- a/prelude/apple/swift/swift_pcm_compilation.bzl +++ b/prelude/apple/swift/swift_pcm_compilation.bzl @@ -23,6 +23,8 @@ load(":swift_toolchain_types.bzl", "SwiftCompiledModuleInfo", "SwiftCompiledModu _REQUIRED_SDK_MODULES = ["Foundation"] +_REQUIRED_SDK_CXX_MODULES = _REQUIRED_SDK_MODULES + ["std"] + def get_compiled_pcm_deps_tset(ctx: AnalysisContext, pcm_deps_providers: list) -> SwiftCompiledModuleTset: pcm_deps = [ pcm_deps_provider[WrappedSwiftPCMCompiledInfo].tset @@ -34,10 +36,12 @@ def get_compiled_pcm_deps_tset(ctx: AnalysisContext, pcm_deps_providers: list) - def get_swift_pcm_anon_targets( ctx: AnalysisContext, uncompiled_deps: list[Dependency], - swift_cxx_args: list[str]): + swift_cxx_args: list[str], + enable_cxx_interop: bool): deps = [ { "dep": uncompiled_dep, + "enable_cxx_interop": enable_cxx_interop, "name": uncompiled_dep.label, "swift_cxx_args": swift_cxx_args, "_apple_toolchain": ctx.attrs._apple_toolchain, @@ -54,16 +58,16 @@ def _compile_with_argsfile( args: cmd_args, additional_cmd: cmd_args): shell_quoted_cmd = cmd_args(args, quote = "shell") - argfile, _ = ctx.actions.write(module_name + ".pcm.argsfile", shell_quoted_cmd, allow_args = True) + argfile, _ = ctx.actions.write(module_name + ".swift_pcm_argsfile", shell_quoted_cmd, allow_args = True) swift_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info - cmd = cmd_args(swift_toolchain.compiler) - cmd.add(cmd_args(["@", argfile], delimiter = "")) - - # Action should also depend on all artifacts from the argsfile, otherwise they won't be materialised. - cmd.hidden([args]) - - cmd.add(additional_cmd) + cmd = cmd_args( + swift_toolchain.compiler, + cmd_args(["@", argfile], delimiter = ""), + additional_cmd, + # Action should also depend on all artifacts from the argsfile, otherwise they won't be materialised. + hidden = args, + ) ctx.actions.run( cmd, @@ -77,9 +81,9 @@ def _compiled_module_info( module_name: str, pcm_output: Artifact, pcm_info: SwiftPCMUncompiledInfo) -> SwiftCompiledModuleInfo: - clang_importer_args = cmd_args() - clang_importer_args.add("-Xcc") - clang_importer_args.add( + clang_deps_args = cmd_args() + clang_deps_args.add("-Xcc") + clang_deps_args.add( cmd_args( [ "-fmodule-file=", @@ -90,8 +94,8 @@ def _compiled_module_info( delimiter = "", ), ) - clang_importer_args.add("-Xcc") - clang_importer_args.add( + clang_deps_args.add("-Xcc") + clang_deps_args.add( cmd_args( [ "-fmodule-map-file=", @@ -100,16 +104,21 @@ def _compiled_module_info( delimiter = "", ), ) - clang_importer_args.add("-Xcc") - clang_importer_args.add(pcm_info.exported_preprocessor.relative_args.args) - clang_importer_args.hidden(pcm_info.exported_preprocessor.modular_args) + + clang_importer_args = cmd_args( + cmd_args(pcm_info.exported_preprocessor.args.args, prepend = "-Xcc"), + hidden = pcm_info.exported_preprocessor.modular_args, + ) return SwiftCompiledModuleInfo( + clang_module_file_args = clang_deps_args, clang_importer_args = clang_importer_args, is_framework = False, + is_sdk_module = False, is_swiftmodule = False, module_name = module_name, output_artifact = pcm_output, + clang_modulemap = pcm_info.exported_preprocessor.modulemap_path, ) def _swift_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Provider]]: @@ -181,15 +190,17 @@ def _swift_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Provider ), ] + required_sdk_modules = _REQUIRED_SDK_CXX_MODULES if ctx.attrs.enable_cxx_interop else _REQUIRED_SDK_MODULES direct_uncompiled_sdk_deps = get_uncompiled_sdk_deps( ctx.attrs.dep[SwiftPCMUncompiledInfo].uncompiled_sdk_modules, - _REQUIRED_SDK_MODULES, + required_sdk_modules, ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info, ) # Recursively compiling SDK's Clang dependencies sdk_pcm_deps_anon_targets = get_swift_sdk_pcm_anon_targets( ctx, + ctx.attrs.enable_cxx_interop, direct_uncompiled_sdk_deps, ctx.attrs.swift_cxx_args, ) @@ -199,26 +210,28 @@ def _swift_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Provider ctx, ctx.attrs.dep[SwiftPCMUncompiledInfo].exported_deps, ctx.attrs.swift_cxx_args, + ctx.attrs.enable_cxx_interop, ) - return ctx.actions.anon_targets(sdk_pcm_deps_anon_targets + swift_pcm_anon_targets, with_artifacts = True).promise.map(k) + return ctx.actions.anon_targets(sdk_pcm_deps_anon_targets + swift_pcm_anon_targets).promise.map(k) _swift_pcm_compilation = rule( impl = _swift_pcm_compilation_impl, attrs = { "dep": attrs.dep(), + "enable_cxx_interop": attrs.bool(), "swift_cxx_args": attrs.list(attrs.string(), default = []), "_apple_toolchain": attrs.dep(), }, ) -def compile_underlying_pcm( +def _compile_pcm( ctx: AnalysisContext, + action_name: str, + module_name: str, uncompiled_pcm_info: SwiftPCMUncompiledInfo, compiled_pcm_deps_providers, swift_cxx_args: list[str], - framework_search_path_flags: cmd_args) -> SwiftCompiledModuleInfo: - module_name = get_module_name(ctx) - + additional_args: cmd_args) -> SwiftCompiledModuleInfo: # `compiled_pcm_deps_providers` will contain `WrappedSdkCompiledModuleInfo` providers # from direct SDK deps and transitive deps that export sdk deps. sdk_deps_tset = get_compiled_sdk_clang_deps_tset(ctx, compiled_pcm_deps_providers) @@ -235,23 +248,58 @@ def compile_underlying_pcm( pcm_deps_tset, swift_cxx_args, ) + cmd.add(additional_args) + + _compile_with_argsfile( + ctx, + action_name, + module_name, + cmd, + additional_cmd, + ) + return _compiled_module_info(module_name, pcm_output, uncompiled_pcm_info) + +def compile_framework_pcm( + ctx: AnalysisContext, + module_name: str, + uncompiled_pcm_info: SwiftPCMUncompiledInfo, + compiled_pcm_deps_providers, + swift_cxx_args: list[str]) -> SwiftCompiledModuleInfo: + return _compile_pcm( + ctx, + "swift_prebuilt_framework_pcm_compile", + module_name, + uncompiled_pcm_info, + compiled_pcm_deps_providers, + swift_cxx_args, + cmd_args(), + ) + +def compile_underlying_pcm( + ctx: AnalysisContext, + uncompiled_pcm_info: SwiftPCMUncompiledInfo, + compiled_pcm_deps_providers, + swift_cxx_args: list[str], + framework_search_path_flags: cmd_args) -> SwiftCompiledModuleInfo: + module_name = get_module_name(ctx) modulemap_path = uncompiled_pcm_info.exported_preprocessor.modulemap_path - cmd.add([ + cmd = cmd_args([ "-Xcc", "-I", "-Xcc", - cmd_args([cmd_args(modulemap_path).parent(), "exported_symlink_tree"], delimiter = "/"), + cmd_args([cmd_args(modulemap_path, parent = 1), "exported_symlink_tree"], delimiter = "/"), ]) cmd.add(framework_search_path_flags) - _compile_with_argsfile( + return _compile_pcm( ctx, "swift_underlying_pcm_compile", module_name, + uncompiled_pcm_info, + compiled_pcm_deps_providers, + swift_cxx_args, cmd, - additional_cmd, ) - return _compiled_module_info(module_name, pcm_output, uncompiled_pcm_info) def _get_base_pcm_flags( ctx: AnalysisContext, @@ -261,41 +309,33 @@ def _get_base_pcm_flags( pcm_deps_tset: SwiftCompiledModuleTset, swift_cxx_args: list[str]) -> (cmd_args, cmd_args, Artifact): swift_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info + modulemap_path = uncompiled_pcm_info.exported_preprocessor.modulemap_path + pcm_output = ctx.actions.declare_output(module_name + ".pcm") - cmd = cmd_args() - cmd.add(get_shared_pcm_compilation_args(module_name)) - cmd.add(["-sdk", swift_toolchain.sdk_path]) - cmd.add(swift_toolchain.compiler_flags) - - if swift_toolchain.resource_dir: - cmd.add([ + cmd = cmd_args( + get_shared_pcm_compilation_args(module_name), + ["-sdk", swift_toolchain.sdk_path], + swift_toolchain.compiler_flags, + ([ "-resource-dir", swift_toolchain.resource_dir, - ]) - - cmd.add(sdk_deps_tset.project_as_args("clang_deps")) - cmd.add(pcm_deps_tset.project_as_args("clang_deps")) + ] if swift_toolchain.resource_dir else []), + sdk_deps_tset.project_as_args("clang_module_file_flags"), + pcm_deps_tset.project_as_args("clang_module_file_flags"), + pcm_deps_tset.project_as_args("clang_importer_flags"), + # To correctly resolve modulemap's headers, + # a search path to the root of modulemap should be passed. + cmd_args(uncompiled_pcm_info.exported_preprocessor.args.args, prepend = "-Xcc"), + # Modular deps like `-Swift.h` have to be materialized. + hidden = uncompiled_pcm_info.exported_preprocessor.modular_args, + ) - modulemap_path = uncompiled_pcm_info.exported_preprocessor.modulemap_path - pcm_output = ctx.actions.declare_output(module_name + ".pcm") + cmd.add(swift_cxx_args) - additional_cmd = cmd_args(swift_cxx_args) - additional_cmd.add([ + additional_cmd = cmd_args( "-o", pcm_output.as_output(), modulemap_path, - ]) - - # To correctly resolve modulemap's headers, - # a search path to the root of modulemap should be passed. - cmd.add([ - "-Xcc", - "-I", - "-Xcc", - cmd_args(modulemap_path).parent(), - ]) - - # Modular deps like `-Swift.h` have to be materialized. - cmd.hidden(uncompiled_pcm_info.exported_preprocessor.modular_args) + ) return (cmd, additional_cmd, pcm_output) diff --git a/prelude/apple/swift/swift_sdk_pcm_compilation.bzl b/prelude/apple/swift/swift_sdk_pcm_compilation.bzl index a1bd5dd21c7e5..934a721f38af7 100644 --- a/prelude/apple/swift/swift_sdk_pcm_compilation.bzl +++ b/prelude/apple/swift/swift_sdk_pcm_compilation.bzl @@ -44,10 +44,10 @@ def get_shared_pcm_compilation_args(module_name: str) -> cmd_args: # to avoid serializing it as an absolute path. "-Xcc", "-working-directory=", - # Using a relative resource dir requires we add the working directory as a search - # path to be able to find the compiler generated includes. + # AssetsLibrary is shipping with a #warning, which we shouldn't error on when compiling + # the SDK module. I don't think this is actually avoidable or removable until the next xcode major version "-Xcc", - "-I.", + "-Wno-error=#warnings", ]) cmd.add(get_disable_pch_validation_flags()) @@ -90,6 +90,7 @@ def _add_sdk_module_search_path(cmd, uncompiled_sdk_module_info, apple_toolchain def get_swift_sdk_pcm_anon_targets( ctx: AnalysisContext, + enable_cxx_interop: bool, uncompiled_sdk_deps: list[Dependency], swift_cxx_args: list[str]): # We include the Swift deps here too as we need @@ -97,6 +98,7 @@ def get_swift_sdk_pcm_anon_targets( return [ (_swift_sdk_pcm_compilation, { "dep": module_dep, + "enable_cxx_interop": enable_cxx_interop, "name": module_dep.label, "swift_cxx_args": swift_cxx_args, "_apple_toolchain": ctx.attrs._apple_toolchain, @@ -135,7 +137,15 @@ def _swift_sdk_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Prov swift_toolchain.resource_dir, ]) - cmd.add(sdk_deps_tset.project_as_args("clang_deps")) + if not swift_toolchain.supports_relative_resource_dir: + # When the compiler does not correctly serialize builtin header paths + # we need to specify the CWD as a search path to find the headers. + cmd.add([ + "-Xcc", + "-I.", + ]) + + cmd.add(sdk_deps_tset.project_as_args("clang_module_file_flags")) expanded_modulemap_path_cmd = expand_relative_prefixed_sdk_path( cmd_args(swift_toolchain.sdk_path), @@ -164,6 +174,29 @@ def _swift_sdk_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Prov cmd.add(ctx.attrs.swift_cxx_args) + if ctx.attrs.enable_cxx_interop: + # The stdlib headers have deprecation warnings set when targeting + # more recent versions. These warnings get serialized in the + # modules and make it impossible to import the std module, so + # suppress them during compilation instead. + cmd.add([ + "-Xcc", + "-D_LIBCPP_DISABLE_DEPRECATION_WARNINGS", + ]) + + if module_name == "Darwin": + # The Darwin module requires special handling with cxx interop + # to ensure that it does not include the c++ headers. The module + # is marked with [no_undeclared_includes] which will prevent + # including headers declared in other modulemaps. So that the + # cxx modules are visible we need to pass the module map path + # without the corresponding module file, which we cannot build + # until the Darwin module is available. + cmd.add([ + "-Xcc", + cmd_args(swift_toolchain.sdk_path, format = "-fmodule-map-file={}/usr/include/c++/v1/module.modulemap"), + ]) + _add_sdk_module_search_path(cmd, uncompiled_sdk_module_info, apple_toolchain) ctx.actions.run( @@ -175,9 +208,9 @@ def _swift_sdk_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Prov ) # Construct the args needed to be passed to the clang importer - clang_importer_args = cmd_args() - clang_importer_args.add("-Xcc") - clang_importer_args.add( + clang_deps_args = cmd_args() + clang_deps_args.add("-Xcc") + clang_deps_args.add( cmd_args( [ "-fmodule-file=", @@ -188,8 +221,8 @@ def _swift_sdk_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Prov delimiter = "", ), ) - clang_importer_args.add("-Xcc") - clang_importer_args.add( + clang_deps_args.add("-Xcc") + clang_deps_args.add( cmd_args( [ "-fmodule-map-file=", @@ -200,11 +233,13 @@ def _swift_sdk_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Prov ) compiled_sdk = SwiftCompiledModuleInfo( - clang_importer_args = clang_importer_args, + clang_module_file_args = clang_deps_args, is_framework = uncompiled_sdk_module_info.is_framework, + is_sdk_module = True, is_swiftmodule = False, module_name = module_name, output_artifact = pcm_output, + clang_modulemap = expanded_modulemap_path_cmd, ) return [ @@ -216,18 +251,21 @@ def _swift_sdk_pcm_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Prov ] # Compile the transitive clang module deps of this target. + deps = ctx.attrs.dep[SdkUncompiledModuleInfo].cxx_deps if ctx.attrs.enable_cxx_interop else ctx.attrs.dep[SdkUncompiledModuleInfo].deps clang_module_deps = get_swift_sdk_pcm_anon_targets( ctx, - ctx.attrs.dep[SdkUncompiledModuleInfo].deps, + ctx.attrs.enable_cxx_interop, + deps, ctx.attrs.swift_cxx_args, ) - return ctx.actions.anon_targets(clang_module_deps, with_artifacts = True).promise.map(k) + return ctx.actions.anon_targets(clang_module_deps).promise.map(k) _swift_sdk_pcm_compilation = rule( impl = _swift_sdk_pcm_compilation_impl, attrs = { "dep": attrs.dep(), + "enable_cxx_interop": attrs.bool(), "swift_cxx_args": attrs.list(attrs.string(), default = []), "_apple_toolchain": attrs.dep(), }, diff --git a/prelude/apple/swift/swift_sdk_swiftinterface_compilation.bzl b/prelude/apple/swift/swift_sdk_swiftinterface_compilation.bzl deleted file mode 100644 index 28063009fa5db..0000000000000 --- a/prelude/apple/swift/swift_sdk_swiftinterface_compilation.bzl +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") -load("@prelude//apple:apple_utility.bzl", "expand_relative_prefixed_sdk_path") -load("@prelude//apple/swift:swift_types.bzl", "SWIFTMODULE_EXTENSION") -load(":apple_sdk_modules_utility.bzl", "get_compiled_sdk_clang_deps_tset", "get_compiled_sdk_swift_deps_tset") -load( - ":swift_debug_info_utils.bzl", - "extract_and_merge_clang_debug_infos", - "extract_and_merge_swift_debug_infos", -) -load(":swift_module_map.bzl", "write_swift_module_map") -load(":swift_sdk_pcm_compilation.bzl", "get_swift_sdk_pcm_anon_targets") -load(":swift_toolchain_types.bzl", "SdkUncompiledModuleInfo", "SwiftCompiledModuleInfo", "SwiftCompiledModuleTset", "WrappedSdkCompiledModuleInfo") - -def get_swift_interface_anon_targets( - ctx: AnalysisContext, - uncompiled_sdk_deps: list[Dependency]): - return [ - ( - _swift_interface_compilation, - { - "dep": d, - "name": d.label, - "_apple_toolchain": ctx.attrs._apple_toolchain, - }, - ) - for d in uncompiled_sdk_deps - if d[SdkUncompiledModuleInfo].is_swiftmodule - ] - -def _swift_interface_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Provider]]: - def k(sdk_deps_providers) -> list[Provider]: - uncompiled_sdk_module_info = ctx.attrs.dep[SdkUncompiledModuleInfo] - uncompiled_module_info_name = uncompiled_sdk_module_info.module_name - apple_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo] - swift_toolchain = apple_toolchain.swift_toolchain_info - cmd = cmd_args(swift_toolchain.compiler) - cmd.add(uncompiled_sdk_module_info.partial_cmd) - cmd.add(["-sdk", swift_toolchain.sdk_path]) - - if swift_toolchain.resource_dir: - cmd.add([ - "-resource-dir", - swift_toolchain.resource_dir, - ]) - - clang_deps_tset = get_compiled_sdk_clang_deps_tset(ctx, sdk_deps_providers) - swift_deps_tset = get_compiled_sdk_swift_deps_tset(ctx, sdk_deps_providers) - swift_module_map_artifact = write_swift_module_map(ctx, uncompiled_module_info_name, swift_deps_tset) - cmd.add([ - "-explicit-swift-module-map-file", - swift_module_map_artifact, - ]) - cmd.add(clang_deps_tset.project_as_args("clang_deps")) - - swiftmodule_output = ctx.actions.declare_output(uncompiled_module_info_name + SWIFTMODULE_EXTENSION) - expanded_swiftinterface_cmd = expand_relative_prefixed_sdk_path( - cmd_args(swift_toolchain.sdk_path), - cmd_args(swift_toolchain.resource_dir), - cmd_args(apple_toolchain.platform_path), - uncompiled_sdk_module_info.input_relative_path, - ) - cmd.add([ - "-o", - swiftmodule_output.as_output(), - expanded_swiftinterface_cmd, - ]) - - ctx.actions.run( - cmd, - category = "sdk_swiftinterface_compile", - identifier = uncompiled_module_info_name, - ) - - compiled_sdk = SwiftCompiledModuleInfo( - is_framework = uncompiled_sdk_module_info.is_framework, - is_swiftmodule = True, - module_name = uncompiled_module_info_name, - output_artifact = swiftmodule_output, - ) - - return [ - DefaultInfo(), - WrappedSdkCompiledModuleInfo( - swift_deps = ctx.actions.tset(SwiftCompiledModuleTset, value = compiled_sdk, children = [swift_deps_tset]), - swift_debug_info = extract_and_merge_swift_debug_infos(ctx, sdk_deps_providers, [swiftmodule_output]), - clang_debug_info = extract_and_merge_clang_debug_infos(ctx, sdk_deps_providers), - ), - ] - - # For each swiftinterface compile its transitive clang deps with the provided target. - module_info = ctx.attrs.dep[SdkUncompiledModuleInfo] - clang_module_deps = get_swift_sdk_pcm_anon_targets( - ctx, - module_info.deps, - ["-target", module_info.target], - ) - - # Compile the transitive swiftmodule deps. - swift_module_deps = get_swift_interface_anon_targets(ctx, module_info.deps) - - return ctx.actions.anon_targets(clang_module_deps + swift_module_deps, with_artifacts = True).promise.map(k) - -_swift_interface_compilation = rule( - impl = _swift_interface_compilation_impl, - attrs = { - "dep": attrs.dep(), - "_apple_toolchain": attrs.dep(), - }, -) diff --git a/prelude/apple/swift/swift_swiftinterface_compilation.bzl b/prelude/apple/swift/swift_swiftinterface_compilation.bzl new file mode 100644 index 0000000000000..0302e6e0d4829 --- /dev/null +++ b/prelude/apple/swift/swift_swiftinterface_compilation.bzl @@ -0,0 +1,157 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") +load("@prelude//apple:apple_utility.bzl", "expand_relative_prefixed_sdk_path") +load("@prelude//apple/swift:swift_pcm_compilation.bzl", "get_compiled_pcm_deps_tset") +load("@prelude//apple/swift:swift_types.bzl", "SWIFTMODULE_EXTENSION") +load(":apple_sdk_modules_utility.bzl", "get_compiled_sdk_clang_deps_tset", "get_compiled_sdk_swift_deps_tset") +load( + ":swift_debug_info_utils.bzl", + "extract_and_merge_clang_debug_infos", + "extract_and_merge_swift_debug_infos", +) +load(":swift_module_map.bzl", "write_swift_module_map_with_deps") +load(":swift_sdk_pcm_compilation.bzl", "get_swift_sdk_pcm_anon_targets") +load(":swift_toolchain_types.bzl", "SdkUncompiledModuleInfo", "SwiftCompiledModuleInfo", "SwiftCompiledModuleTset", "WrappedSdkCompiledModuleInfo") + +def get_swift_interface_anon_targets( + ctx: AnalysisContext, + uncompiled_sdk_deps: list[Dependency]): + return [ + ( + _swift_interface_compilation, + { + "dep": d, + "name": d.label, + "_apple_toolchain": ctx.attrs._apple_toolchain, + }, + ) + for d in uncompiled_sdk_deps + if d[SdkUncompiledModuleInfo].is_swiftmodule + ] + +def compile_swiftinterface_common( + ctx, + deps, + is_framework, + uncompiled_module_info_name, + partial_cmd, + sdk_deps_providers, + expanded_swiftinterface_cmd, + category, + additional_compiled_pcm): + apple_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo] + swift_toolchain = apple_toolchain.swift_toolchain_info + cmd = cmd_args(swift_toolchain.compiler) + cmd.add(partial_cmd) + cmd.add(["-sdk", swift_toolchain.sdk_path]) + + if swift_toolchain.resource_dir: + cmd.add([ + "-resource-dir", + swift_toolchain.resource_dir, + ]) + + pcm_deps_tset = get_compiled_pcm_deps_tset(ctx, sdk_deps_providers) + + if additional_compiled_pcm: + pcm_deps_tset = ctx.actions.tset(SwiftCompiledModuleTset, value = additional_compiled_pcm, children = [pcm_deps_tset]) + + clang_deps_tset = get_compiled_sdk_clang_deps_tset(ctx, sdk_deps_providers) + swift_deps_tset = get_compiled_sdk_swift_deps_tset(ctx, sdk_deps_providers + deps) + + all_deps_tset = ctx.actions.tset( + SwiftCompiledModuleTset, + children = [pcm_deps_tset, clang_deps_tset, swift_deps_tset], + ) + + swift_module_map_artifact = write_swift_module_map_with_deps(ctx, uncompiled_module_info_name, all_deps_tset) + cmd.add([ + "-explicit-swift-module-map-file", + swift_module_map_artifact, + ]) + + swiftmodule_output = ctx.actions.declare_output(uncompiled_module_info_name + SWIFTMODULE_EXTENSION) + cmd.add([ + "-o", + swiftmodule_output.as_output(), + expanded_swiftinterface_cmd, + ]) + + ctx.actions.run( + cmd, + category = category, + identifier = uncompiled_module_info_name, + ) + + return SwiftCompiledModuleInfo( + is_framework = is_framework, + is_sdk_module = True, + is_swiftmodule = True, + module_name = uncompiled_module_info_name, + output_artifact = swiftmodule_output, + ), swift_deps_tset + +def _swift_interface_compilation_impl(ctx: AnalysisContext) -> [Promise, list[Provider]]: + def k(sdk_deps_providers) -> list[Provider]: + uncompiled_sdk_module_info = ctx.attrs.dep[SdkUncompiledModuleInfo] + uncompiled_module_info_name = uncompiled_sdk_module_info.module_name + apple_toolchain = ctx.attrs._apple_toolchain[AppleToolchainInfo] + swift_toolchain = apple_toolchain.swift_toolchain_info + + expanded_swiftinterface_cmd = expand_relative_prefixed_sdk_path( + cmd_args(swift_toolchain.sdk_path), + cmd_args(swift_toolchain.resource_dir), + cmd_args(apple_toolchain.platform_path), + uncompiled_sdk_module_info.input_relative_path, + ) + + compiled_sdk, swift_deps_tset = compile_swiftinterface_common( + ctx, + [], + uncompiled_sdk_module_info.is_framework, + uncompiled_module_info_name, + uncompiled_sdk_module_info.partial_cmd, + sdk_deps_providers, + expanded_swiftinterface_cmd, + "sdk_swiftinterface_compile", + None, + ) + + wrapped_sdk_compiled_module_info = WrappedSdkCompiledModuleInfo( + swift_deps = ctx.actions.tset(SwiftCompiledModuleTset, value = compiled_sdk, children = [swift_deps_tset]), + swift_debug_info = extract_and_merge_swift_debug_infos(ctx, sdk_deps_providers, [compiled_sdk.output_artifact]), + clang_debug_info = extract_and_merge_clang_debug_infos(ctx, sdk_deps_providers), + ) + + return [ + DefaultInfo(), + wrapped_sdk_compiled_module_info, + ] + + # For each swiftinterface compile its transitive clang deps with the provided target. + module_info = ctx.attrs.dep[SdkUncompiledModuleInfo] + clang_module_deps = get_swift_sdk_pcm_anon_targets( + ctx, + False, + module_info.deps, + ["-target", module_info.target], + ) + + # Compile the transitive swiftmodule deps. + swift_module_deps = get_swift_interface_anon_targets(ctx, module_info.deps) + + return ctx.actions.anon_targets(clang_module_deps + swift_module_deps).promise.map(k) + +_swift_interface_compilation = rule( + impl = _swift_interface_compilation_impl, + attrs = { + "dep": attrs.dep(), + "_apple_toolchain": attrs.dep(), + }, +) diff --git a/prelude/apple/swift/swift_toolchain.bzl b/prelude/apple/swift/swift_toolchain.bzl index 1496299f7c446..36ab429d11fab 100644 --- a/prelude/apple/swift/swift_toolchain.bzl +++ b/prelude/apple/swift/swift_toolchain.bzl @@ -29,12 +29,13 @@ def traverse_sdk_modules_graph( elif not uncompiled_sdk_module_info.is_swiftmodule and uncompiled_sdk_module_info.module_name in clang_sdk_module_name_to_deps: return - for uncompiled_dep in uncompiled_sdk_module_info.deps: + for uncompiled_dep in uncompiled_sdk_module_info.deps + uncompiled_sdk_module_info.cxx_deps: traverse_sdk_modules_graph(swift_sdk_module_name_to_deps, clang_sdk_module_name_to_deps, uncompiled_dep) - if uncompiled_sdk_module_info.is_swiftmodule: - swift_sdk_module_name_to_deps[uncompiled_sdk_module_info.module_name] = sdk_module_dep - else: - clang_sdk_module_name_to_deps[uncompiled_sdk_module_info.module_name] = sdk_module_dep + + if uncompiled_sdk_module_info.is_swiftmodule: + swift_sdk_module_name_to_deps[uncompiled_sdk_module_info.module_name] = sdk_module_dep + else: + clang_sdk_module_name_to_deps[uncompiled_sdk_module_info.module_name] = sdk_module_dep def swift_toolchain_impl(ctx): # All Clang's PCMs need to be compiled with cxx flags of the target that imports them, @@ -55,22 +56,23 @@ def swift_toolchain_impl(ctx): SwiftToolchainInfo( architecture = ctx.attrs.architecture, can_toolchain_emit_obj_c_header_textually = ctx.attrs.can_toolchain_emit_obj_c_header_textually, - # TODO(T99038725): until we add -debug-compilation-dir we need to wrap - # the Swift invocations so that we can apply a debug prefix map for - # the current directory while maintaining cache hit. - uncompiled_swift_sdk_modules_deps = uncompiled_swift_sdk_modules_deps, - uncompiled_clang_sdk_modules_deps = uncompiled_clang_sdk_modules_deps, compiler = cmd_args(ctx.attrs._swiftc_wrapper[RunInfo]).add(ctx.attrs.swiftc[RunInfo]), compiler_flags = ctx.attrs.swiftc_flags, - prefix_serialized_debugging_options = ctx.attrs.prefix_serialized_debug_info, + mk_swift_comp_db = ctx.attrs.make_swift_comp_db, + mk_swift_interface = cmd_args(ctx.attrs._swiftc_wrapper[RunInfo]).add(ctx.attrs.make_swift_interface[RunInfo]), object_format = SwiftObjectFormat(ctx.attrs.object_format) if ctx.attrs.object_format else SwiftObjectFormat("object"), + prefix_serialized_debugging_options = ctx.attrs.prefix_serialized_debug_info, resource_dir = ctx.attrs.resource_dir, + runtime_run_paths = ctx.attrs.runtime_run_paths, sdk_path = ctx.attrs._internal_sdk_path or ctx.attrs.sdk_path, + supports_cxx_interop_requirement_at_import = ctx.attrs.supports_cxx_interop_requirement_at_import, + supports_relative_resource_dir = ctx.attrs.supports_relative_resource_dir, + supports_swift_cxx_interoperability_mode = ctx.attrs.supports_swift_cxx_interoperability_mode, + supports_swift_importing_objc_forward_declarations = ctx.attrs.supports_swift_importing_obj_c_forward_declarations, + swift_ide_test_tool = ctx.attrs.swift_ide_test_tool[RunInfo] if ctx.attrs.swift_ide_test_tool else None, swift_stdlib_tool = ctx.attrs.swift_stdlib_tool[RunInfo], swift_stdlib_tool_flags = ctx.attrs.swift_stdlib_tool_flags, - runtime_run_paths = ctx.attrs.runtime_run_paths, - supports_swift_cxx_interoperability_mode = ctx.attrs.supports_swift_cxx_interoperability_mode, - supports_cxx_interop_requirement_at_import = ctx.attrs.supports_cxx_interop_requirement_at_import, - mk_swift_comp_db = ctx.attrs.make_swift_comp_db, + uncompiled_clang_sdk_modules_deps = uncompiled_clang_sdk_modules_deps, + uncompiled_swift_sdk_modules_deps = uncompiled_swift_sdk_modules_deps, ), ] diff --git a/prelude/apple/swift/swift_toolchain_types.bzl b/prelude/apple/swift/swift_toolchain_types.bzl index 12676f535c4be..7e2737a78a1d4 100644 --- a/prelude/apple/swift/swift_toolchain_types.bzl +++ b/prelude/apple/swift/swift_toolchain_types.bzl @@ -17,30 +17,34 @@ SwiftObjectFormat = enum( ) SwiftToolchainInfo = provider( - # @unsorted-dict-items fields = { "architecture": provider_field(typing.Any, default = None), "can_toolchain_emit_obj_c_header_textually": provider_field(typing.Any, default = None), # bool - "uncompiled_swift_sdk_modules_deps": provider_field(typing.Any, default = None), # {str: dependency} Expose deps of uncompiled Swift SDK modules. - "uncompiled_clang_sdk_modules_deps": provider_field(typing.Any, default = None), # {str: dependency} Expose deps of uncompiled Clang SDK modules. - "compiler_flags": provider_field(typing.Any, default = None), "compiler": provider_field(typing.Any, default = None), - "prefix_serialized_debugging_options": provider_field(typing.Any, default = None), # bool + "compiler_flags": provider_field(typing.Any, default = None), + "mk_swift_comp_db": provider_field(typing.Any, default = None), + "mk_swift_interface": provider_field(typing.Any, default = None), "object_format": provider_field(typing.Any, default = None), # "SwiftObjectFormat" + "prefix_serialized_debugging_options": provider_field(typing.Any, default = None), # bool "resource_dir": provider_field(typing.Any, default = None), # "artifact", - "sdk_path": provider_field(typing.Any, default = None), - "swift_stdlib_tool_flags": provider_field(typing.Any, default = None), - "swift_stdlib_tool": provider_field(typing.Any, default = None), "runtime_run_paths": provider_field(typing.Any, default = None), # [str] - "supports_swift_cxx_interoperability_mode": provider_field(typing.Any, default = None), # bool + "sdk_path": provider_field(typing.Any, default = None), "supports_cxx_interop_requirement_at_import": provider_field(typing.Any, default = None), # bool - "mk_swift_comp_db": provider_field(typing.Any, default = None), + "supports_relative_resource_dir": provider_field(typing.Any, default = None), # bool + "supports_swift_cxx_interoperability_mode": provider_field(typing.Any, default = None), # bool + "supports_swift_importing_objc_forward_declarations": provider_field(typing.Any, default = None), # bool + "swift_ide_test_tool": provider_field(typing.Any, default = None), + "swift_stdlib_tool": provider_field(typing.Any, default = None), + "swift_stdlib_tool_flags": provider_field(typing.Any, default = None), + "uncompiled_clang_sdk_modules_deps": provider_field(typing.Any, default = None), # {str: dependency} Expose deps of uncompiled Clang SDK modules. + "uncompiled_swift_sdk_modules_deps": provider_field(typing.Any, default = None), # {str: dependency} Expose deps of uncompiled Swift SDK modules. }, ) # A provider that represents a non-yet-compiled SDK (Swift or Clang) module, # and doesn't contain any artifacts because Swift toolchain isn't resolved yet. SdkUncompiledModuleInfo = provider(fields = { + "cxx_deps": provider_field(typing.Any, default = None), # [Dependency] "deps": provider_field(typing.Any, default = None), # [Dependency] "input_relative_path": provider_field(typing.Any, default = None), # A relative prefixed path to a textual swiftinterface/modulemap file within an SDK. "is_framework": provider_field(typing.Any, default = None), # This is mostly needed for the generated Swift module map file. @@ -62,8 +66,11 @@ SdkSwiftOverlayInfo = provider(fields = { }) SwiftCompiledModuleInfo = provider(fields = { - "clang_importer_args": provider_field(typing.Any, default = None), # cmd_args of include flags for the clang importer. + "clang_importer_args": provider_field(typing.Any, default = None), # cmd_args of additional flags for the clang importer. + "clang_module_file_args": provider_field(typing.Any, default = None), # cmd_args of include flags for the clang importer. + "clang_modulemap": provider_field(typing.Any, default = None), # Clang modulemap file which is required for generation of swift_module_map. "is_framework": provider_field(typing.Any, default = None), + "is_sdk_module": provider_field(bool, default = False), "is_swiftmodule": provider_field(typing.Any, default = None), # If True then contains a compiled swiftmodule, otherwise Clang's pcm. "module_name": provider_field(typing.Any, default = None), # A real name of a module, without distinguishing suffixes. "output_artifact": provider_field(typing.Any, default = None), # Compiled artifact either swiftmodule or pcm. @@ -71,24 +78,43 @@ SwiftCompiledModuleInfo = provider(fields = { def _add_swiftmodule_search_path(module_info: SwiftCompiledModuleInfo): # We need to import the containing folder, not the file itself. - return ["-I", cmd_args(module_info.output_artifact).parent()] if module_info.is_swiftmodule else [] + # We skip SDK modules as those are found via the -sdk flag. + if module_info.is_swiftmodule and not module_info.is_sdk_module: + return ["-I", cmd_args(module_info.output_artifact, parent = 1)] + + return [] -def _add_clang_import_flags(module_info: SwiftCompiledModuleInfo): +def _add_clang_module_file_flags(module_info: SwiftCompiledModuleInfo): if module_info.is_swiftmodule: return [] else: - return [module_info.clang_importer_args] + return [module_info.clang_module_file_args] + +def _add_clang_importer_flags(module_info: SwiftCompiledModuleInfo): + if module_info.is_swiftmodule: + return [] + else: + return [module_info.clang_importer_args] if module_info.clang_importer_args else [] def _swift_module_map_struct(module_info: SwiftCompiledModuleInfo): - return struct( - isFramework = module_info.is_framework, - moduleName = module_info.module_name, - modulePath = module_info.output_artifact, - ) + if module_info.is_swiftmodule: + return struct( + isFramework = module_info.is_framework, + moduleName = module_info.module_name, + modulePath = module_info.output_artifact, + ) + else: + return struct( + isFramework = module_info.is_framework, + moduleName = module_info.module_name, + clangModulePath = module_info.output_artifact, + clangModuleMapPath = cmd_args([module_info.clang_modulemap], delimiter = ""), + ) SwiftCompiledModuleTset = transitive_set( args_projections = { - "clang_deps": _add_clang_import_flags, + "clang_importer_flags": _add_clang_importer_flags, # Additional clang flags required for compilation. + "clang_module_file_flags": _add_clang_module_file_flags, # Projects pcm modules as cli flags. "module_search_path": _add_swiftmodule_search_path, }, json_projections = { diff --git a/prelude/apple/swift/swift_types.bzl b/prelude/apple/swift/swift_types.bzl index eac1b0f8390ab..507f32b45dcb3 100644 --- a/prelude/apple/swift/swift_types.bzl +++ b/prelude/apple/swift/swift_types.bzl @@ -8,3 +8,29 @@ SWIFT_EXTENSION = ".swift" SWIFTMODULE_EXTENSION = ".swiftmodule" + +SwiftCompilationModes = ["wmo", "incremental", "auto"] + +def _swift_framework_implicit_search_paths_args(args: cmd_args): + return args + +FrameworkImplicitSearchPathInfoTSet = transitive_set( + args_projections = { + "swift_framework_implicit_search_paths_args": _swift_framework_implicit_search_paths_args, + }, +) + +FrameworkImplicitSearchPathInfo = provider(fields = { + "tset": provider_field(typing.Any, default = None), # A tset of FrameworkImplicitSearchPathInfoTSet +}) + +def get_implicit_framework_search_path_providers(ctx: AnalysisContext, value: [cmd_args, None], deps: list[Dependency]) -> FrameworkImplicitSearchPathInfoTSet: + deps_infos = [ + d[FrameworkImplicitSearchPathInfo].tset + for d in deps + if FrameworkImplicitSearchPathInfo in d and d[FrameworkImplicitSearchPathInfo].tset != None + ] + if value: + return ctx.actions.tset(FrameworkImplicitSearchPathInfoTSet, value = value, children = deps_infos) + else: + return ctx.actions.tset(FrameworkImplicitSearchPathInfoTSet, children = deps_infos) diff --git a/prelude/apple/tools/BUCK b/prelude/apple/tools/BUCK deleted file mode 100644 index d49624186f63b..0000000000000 --- a/prelude/apple/tools/BUCK +++ /dev/null @@ -1,36 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "make_modulemap", - main = "make_modulemap.py", - visibility = ["PUBLIC"], -) - -prelude.export_file( - name = "swift_exec.sh", - src = "swift_exec.sh", -) - -prelude.command_alias( - name = "swift_exec", - exe = ":swift_exec.sh", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "make_vfsoverlay", - main = "make_vfsoverlay.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "swift_objc_header_postprocess", - main = "swift_objc_header_postprocess.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "make_swift_comp_db", - main = "make_swift_comp_db.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/apple/tools/BUCK.v2 b/prelude/apple/tools/BUCK.v2 new file mode 100644 index 0000000000000..19dd72957e8d5 --- /dev/null +++ b/prelude/apple/tools/BUCK.v2 @@ -0,0 +1,91 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +apple_tools( + name = "apple-tools", + adhoc_codesign_tool = None, # @oss-enable + # @oss-disable: adhoc_codesign_tool = "prelude//apple/tools/meta_only/codesign_rust:adhoc-signer", + assemble_bundle = "prelude//apple/tools/bundling:assemble_bundle", + dry_codesign_tool = ":dry_codesign_tool", + framework_sanitizer = ":framework_sanitizer", + info_plist_processor = "prelude//apple/tools/info_plist_processor:tool", + ipa_package_maker = ":ipa_package_maker", + make_modulemap = ":make_modulemap", + make_vfsoverlay = ":make_vfsoverlay", + selective_debugging_scrubber = "prelude//apple/tools/selective_debugging:tool", + split_arch_combine_dsym_bundles_tool = ":split_arch_combine_dsym_bundles_tool", + visibility = ["PUBLIC"], + xcframework_maker = ":xcframework_maker", +) + +python_binary( + name = "framework_sanitizer", + main = "framework_sanitizer.py", + visibility = ["PUBLIC"], +) + +python_binary( + name = "xcframework_maker", + main = "xcframework_maker.py", + visibility = ["PUBLIC"], +) + +python_binary( + name = "dry_codesign_tool", + main = "dry_codesign_tool.py", + visibility = ["PUBLIC"], +) + +python_binary( + name = "ipa_package_maker", + main = "ipa_package_maker.py", + visibility = ["PUBLIC"], + deps = [ + "prelude//apple/tools/re_compatibility_utils:re_compatibility_utils", + ], +) + +python_bootstrap_binary( + name = "make_modulemap", + main = "make_modulemap.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_binary( + name = "make_swift_comp_db", + main = "make_swift_comp_db.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_binary( + name = "make_swift_interface", + main = "make_swift_interface.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_binary( + name = "make_vfsoverlay", + main = "make_vfsoverlay.py", + visibility = ["PUBLIC"], +) + +python_library( + name = "plistlib_utils", + srcs = ["plistlib_utils.py"], + visibility = ["PUBLIC"], +) + +python_binary( + name = "split_arch_combine_dsym_bundles_tool", + main = "split_arch_combine_dsym_bundles_tool.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_binary( + name = "swift_exec", + main = "swift_exec.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/apple/tools/bundling/BUCK.v2 b/prelude/apple/tools/bundling/BUCK.v2 new file mode 100644 index 0000000000000..1352fa2736a54 --- /dev/null +++ b/prelude/apple/tools/bundling/BUCK.v2 @@ -0,0 +1,46 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load("@prelude//apple/tools/defs.bzl", "meta_python_test") + +oncall("build_infra") + +source_listing() + +python_binary( + name = "assemble_bundle", + main = "main.py", + visibility = ["PUBLIC"], + deps = [ + "prelude//apple/tools/code_signing:lib", + "prelude//apple/tools/re_compatibility_utils:re_compatibility_utils", + ":lib", + ], +) + +python_library( + name = "lib", + srcs = glob( + [ + "*.py", + ], + exclude = [ + "*_test.py", + "main.py", + ], + ), + deps = [ + "prelude//apple/tools/code_signing:lib", + "prelude//apple/tools/re_compatibility_utils:re_compatibility_utils", + ], +) + +meta_python_test( + name = "bundling_test", + srcs = glob(["*_test.py"]), + resources = glob([ + "test_resources/*", + ]), + deps = [ + "fbsource//third-party/pkg_resources:pkg_resources", + ":lib", + ], +) diff --git a/prelude/apple/tools/bundling/action_metadata.py b/prelude/apple/tools/bundling/action_metadata.py new file mode 100644 index 0000000000000..ade702a703a06 --- /dev/null +++ b/prelude/apple/tools/bundling/action_metadata.py @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json +import os +from dataclasses import dataclass +from io import TextIOBase +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +_METADATA_VERSION = 1 + + +@dataclass +class _Item: + path: Path + digest: str + + +@dataclass +class _Metadata: + version: int + digests: List[_Item] + + +def _object_hook(dict: Dict[str, Any]) -> Union[_Item, _Metadata]: + if "version" in dict: + return _Metadata(**dict) + else: + dict["path"] = Path(dict.pop("path")) + return _Item(**dict) + + +def parse_action_metadata(data: TextIOBase) -> Optional[Dict[Path, str]]: + """ + Returns: + Mapping from project relative path to hash digest for every file present action metadata. + """ + start_stream_position = data.tell() + try: + metadata = json.load(data, object_hook=_object_hook) + except BaseException: + data.seek(start_stream_position) + version = json.load(data)["version"] + if version != _METADATA_VERSION: + raise RuntimeError( + f"Expected metadata version to be `{_METADATA_VERSION}` got `{version}`." + ) + else: + raise + return {item.path: item.digest for item in metadata.digests} + + +def action_metadata_if_present( + environment_variable_key: str, +) -> Optional[Dict[Path, str]]: + """ + Returns: + Mapping from project relative path to hash digest for every file present action metadata. + """ + environment_variable = os.getenv(environment_variable_key) + if environment_variable is None: + return None + path = Path(environment_variable) + if not path.exists(): + raise RuntimeError( + "Expected file with action metadata to exist given related environment variable is set." + ) + else: + with path.open() as f: + return parse_action_metadata(f) diff --git a/prelude/apple/tools/bundling/action_metadata_test.py b/prelude/apple/tools/bundling/action_metadata_test.py new file mode 100644 index 0000000000000..066fa12f7db36 --- /dev/null +++ b/prelude/apple/tools/bundling/action_metadata_test.py @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import unittest +from json import JSONDecodeError +from pathlib import Path + +import pkg_resources + +from .action_metadata import parse_action_metadata + + +class TestActionMetadata(unittest.TestCase): + def test_valid_metadata_is_parsed_successfully(self): + file_content = pkg_resources.resource_stream( + __name__, "test_resources/valid_action_metadata.json" + ) + result = parse_action_metadata(file_content) + self.assertEqual( + result, + { + Path("repo/foo.txt"): "foo_digest", + Path("buck-out/bar.txt"): "bar_digest", + }, + ) + + def test_error_when_invalid_metadata(self): + file_content = pkg_resources.resource_stream( + __name__, "test_resources/the.broken_json" + ) + with self.assertRaises(JSONDecodeError): + _ = parse_action_metadata(file_content) + + def test_user_friendly_error_when_metadata_with_newer_version(self): + file_content = pkg_resources.resource_stream( + __name__, "test_resources/newer_version_action_metadata.json" + ) + with self.assertRaises(Exception) as context: + _ = parse_action_metadata(file_content) + self.assertEqual( + context.exception, + RuntimeError("Expected metadata version to be `1` got `2`."), + ) diff --git a/prelude/apple/tools/bundling/assemble_bundle.py b/prelude/apple/tools/bundling/assemble_bundle.py new file mode 100644 index 0000000000000..13ae19a4ea1b0 --- /dev/null +++ b/prelude/apple/tools/bundling/assemble_bundle.py @@ -0,0 +1,276 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import filecmp +import logging +import os +import shutil +from pathlib import Path +from typing import Any, cast, Dict, List, Optional + +from .assemble_bundle_types import BundleSpecItem, IncrementalContext +from .incremental_state import IncrementalState, IncrementalStateItem +from .incremental_utils import ( + calculate_incremental_state, + should_assemble_incrementally, +) + +_LOGGER: logging.Logger = logging.getLogger(__name__) + + +def assemble_bundle( + spec: List[BundleSpecItem], + bundle_path: Path, + incremental_context: Optional[IncrementalContext], + check_conflicts: bool, + versioned_if_macos: bool, +) -> Optional[List[IncrementalStateItem]]: + incremental_result = None + if incremental_context: + if should_assemble_incrementally(spec, incremental_context): + incremental_result = _assemble_incrementally( + bundle_path, + spec, + incremental_context.metadata, + cast(IncrementalState, incremental_context.state), + check_conflicts, + versioned_if_macos, + ) + else: + _assemble_non_incrementally( + bundle_path, spec, check_conflicts, versioned_if_macos + ) + incremental_result = calculate_incremental_state( + spec, incremental_context.metadata + ) + else: + _assemble_non_incrementally( + bundle_path, spec, check_conflicts, versioned_if_macos + ) + + # External tooling (e.g., Xcode) might depend on the timestamp of the bundle + bundle_path.touch() + + return incremental_result + + +def _cleanup_output(incremental: bool, path: Path) -> None: + if not incremental and path.exists(): + shutil.rmtree(path) + path.mkdir(parents=True, exist_ok=True) + + +def _assemble_non_incrementally( + bundle_path: Path, + spec: List[BundleSpecItem], + check_conflicts: bool, + versioned_if_macos: bool, +) -> None: + _LOGGER.info("Assembling bundle non-incrementally.") + _cleanup_output(incremental=False, path=bundle_path) + + copied_contents: Dict[Path, str] = {} + + def _copy(src: str, dst: Path, **kwargs: Any) -> None: + if check_conflicts: + if dst in copied_contents: + if filecmp.cmp(src, str(dst), shallow=False): + _LOGGER.info( + f"Found a conflict for destination `{os.path.relpath(dst, bundle_path)}` but the files are identical. Treating as a non-conflict as this can normally happen for universal builds." + ) + return + + raise RuntimeError( + f"Found a conflict for destination `{os.path.relpath(dst, bundle_path)}`: `{src}` conflicts with `{copied_contents[dst]}`" + ) + shutil.copy2(src, dst, follow_symlinks=False) + if check_conflicts: + copied_contents[dst] = src + + symlinks = set() + + for spec_item in spec: + source_path = spec_item.src + destination_path = bundle_path / spec_item.dst + + destination_path.parent.mkdir(parents=True, exist_ok=True) + if spec_item.dst.startswith("Versions/A") and versioned_if_macos: + parts = Path(spec_item.dst).parts + if len(parts) <= 2: + raise RuntimeError( + "Versioned bundles cannot be created from a single copy directly to Versions/A" + ) + symlinks.add(parts[2]) + + if os.path.isdir(source_path): + shutil.copytree( + source_path, + destination_path, + symlinks=True, + dirs_exist_ok=True, + copy_function=_copy, + ) + else: + _copy(source_path, destination_path) + + _create_symlinks(symlinks, bundle_path) + + +def _create_symlinks(symlinks: set[str], bundle_path: Path) -> None: + if symlinks and not Path.exists(bundle_path / "Versions/Current"): + os.symlink("A", bundle_path / "Versions/Current") + for dir_to_link in symlinks: + if not Path.exists(bundle_path / dir_to_link): + os.symlink("Versions/Current/" + dir_to_link, bundle_path / dir_to_link) + + +def _assemble_incrementally( + bundle_path: Path, + spec: List[BundleSpecItem], + action_metadata: Dict[Path, str], + incremental_state: IncrementalState, + check_conflicts: bool, + versioned_if_macos: bool, +) -> List[IncrementalStateItem]: + _LOGGER.info("Assembling bundle incrementally.") + _cleanup_output(incremental=True, path=bundle_path) + _delete_swift_stdlib_files(bundle_path, incremental_state.swift_stdlib_paths) + paths_to_delete = { + i.destination_relative_to_bundle for i in incremental_state.items + } + old_digests = { + item.destination_relative_to_bundle: item.digest + for item in incremental_state.items + if item.digest is not None + } + old_symlink_destinations = { + item.destination_relative_to_bundle: item.resolved_symlink + for item in incremental_state.items + if item.resolved_symlink is not None + } + + new_incremental_state = calculate_incremental_state(spec, action_metadata) + + if check_conflicts: + _check_path_conflicts(new_incremental_state) + + # Still need to run filtering even when check_conflicts is set, for removing the conflicts with same files + new_incremental_state = _filter_conflicting_paths(new_incremental_state) + + new_symlinks = set() + versioned_subdir = Path("Versions/A") + + for item in new_incremental_state: + # Added file might not be present in old result, need to check first. + dst = item.destination_relative_to_bundle + if dst in paths_to_delete: + paths_to_delete.remove(dst) + project_relative_dst = bundle_path / dst + if item.digest is not None: + new_digest = item.digest + old_digest = old_digests.get(dst, None) + is_changed = new_digest != old_digest + else: + assert ( + item.resolved_symlink is not None + ), "Expected item to represent a symlink when digest is missing" + new_resolved_symlink = item.resolved_symlink + old_resolved_symlink = old_symlink_destinations.get(dst, None) + is_changed = new_resolved_symlink != old_resolved_symlink + if is_changed: + project_relative_dst.unlink() + if is_changed: + _LOGGER.debug( + f"Bundle item at path `{dst}` changed, updating with `{item.source}`." + ) + project_relative_dst.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(item.source, project_relative_dst, follow_symlinks=False) + if Path(dst).is_relative_to(versioned_subdir): + symlink = Path(dst).relative_to(versioned_subdir).parts[0] + new_symlinks.add(symlink) + + if versioned_if_macos: + _create_symlinks(new_symlinks, bundle_path) + + for path in paths_to_delete: + (bundle_path / path).unlink() + + _cleanup_empty_redundant_directories( + bundle_path, incremental_state.items, new_incremental_state + ) + + return new_incremental_state + + +def _check_path_conflicts(incremental_state: List[IncrementalStateItem]) -> None: + """ + Throws an exception if there are multiple items with the same destination path, and those are different files. + """ + checked = {} + for item in incremental_state: + dst = item.destination_relative_to_bundle + if dst in checked: + if item.digest != checked[dst].digest: + raise RuntimeError( + f"Found a conflict for destination `{dst}`: `{item.source}` conflicts with `{checked[dst].source}`" + ) + else: + _LOGGER.info( + f"Found a conflict for destination `{dst}` but the files are identical. Treating as a non-conflict as this can normally happen for universal builds." + ) + checked[dst] = item + + +def _filter_conflicting_paths( + incremental_state: List[IncrementalStateItem], +) -> List[IncrementalStateItem]: + """ + Filter out conflicting paths leaving only the last item from the conflicting items. That practically means that the last item overrides all other conflicting items which makes: + 1) incremental build deterministic even when there are multiple conflicting destination paths + 2) bundling result has the same structure as in Buck1 even when there are multiple conflicting destination paths + WARNING: This logic is tightly coupled with how spec items are sorted in `assemble_bundle` method. Don't change unless you fully understand what is going on here. + """ + result = {} + for item in incremental_state: + dst = item.destination_relative_to_bundle + # Keep the same ordering of elements as in incremental state. + # That means we don't just overwrite the item for existing key, but need to delete it first. + if dst in result: + result.pop(dst) + result[dst] = item + return list(result.values()) + + +def _delete_swift_stdlib_files( + bundle_path: Path, swift_stdlib_paths: List[Path] +) -> None: + for p in swift_stdlib_paths: + path = bundle_path / p + if path.is_dir(): + shutil.rmtree(path) + else: + path.unlink(missing_ok=True) + + +def _cleanup_empty_redundant_directories( + bundle_path: Path, + old_state: List[IncrementalStateItem], + new_state: List[IncrementalStateItem], +) -> None: + old_directories = { + p for item in old_state for p in item.destination_relative_to_bundle.parents + } + new_directories = { + p for item in new_state for p in item.destination_relative_to_bundle.parents + } + versioned_subdir = Path("Versions/A") + for redundant_directory in old_directories - new_directories: + shutil.rmtree(bundle_path / redundant_directory, ignore_errors=True) + if redundant_directory.parent == versioned_subdir: + Path.unlink(bundle_path / redundant_directory.name) diff --git a/prelude/apple/tools/bundling/assemble_bundle_types.py b/prelude/apple/tools/bundling/assemble_bundle_types.py new file mode 100644 index 0000000000000..563b3bef976a5 --- /dev/null +++ b/prelude/apple/tools/bundling/assemble_bundle_types.py @@ -0,0 +1,104 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from __future__ import annotations + +import functools +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Optional + +from apple.tools.code_signing.codesign_bundle import CodesignConfiguration + +from .incremental_state import IncrementalState + + +@functools.total_ordering +@dataclass +class BundleSpecItem: + src: str + # Should be bundle relative path, empty string means the root of the bundle + dst: str + codesign_on_copy: bool = False + codesign_entitlements: Optional[str] = None + codesign_flags_override: Optional[List[str]] = None + + def __eq__(self: BundleSpecItem, other: Optional[BundleSpecItem]) -> bool: + return ( + other is not None + and self.src == other.src + and self.dst == other.dst + and self.codesign_on_copy == other.codesign_on_copy + and self.codesign_entitlements == other.codesign_entitlements + and self.codesign_flags_override == other.codesign_flags_override + ) + + def __ne__(self: BundleSpecItem, other: BundleSpecItem) -> bool: + return not self.__eq__(other) + + def __hash__(self: BundleSpecItem) -> int: + return hash( + ( + self.src, + self.dst, + self.codesign_on_copy, + self.codesign_entitlements, + ( + tuple(self.codesign_flags_override) + if self.codesign_flags_override is not None + else hash(None) + ), + ) + ) + + def __lt__(self: BundleSpecItem, other: BundleSpecItem) -> bool: + return ( + self.src < other.src + or self.dst < other.dst + or self.codesign_on_copy < other.codesign_on_copy + or ( + self.codesign_entitlements < other.codesign_entitlements + if ( + self.codesign_entitlements is not None + and other.codesign_entitlements is not None + ) + else ( + self.codesign_entitlements is None + and other.codesign_entitlements is not None + ) + ) + or ( + self.codesign_flags_override < other.codesign_flags_override + if ( + self.codesign_flags_override is not None + and other.codesign_flags_override is not None + ) + else ( + self.codesign_flags_override is None + and other.codesign_flags_override is not None + ) + ) + ) + + +@dataclass +class IncrementalContext: + """ + Additional data you need to bundle incrementally (extra vs when non-incrementally). + """ + + # Maps buck-project relative path to hash digest of the input file. + metadata: Dict[Path, str] + # Present when there is a valid incremental state on disk (i.e. previous build produced it). + state: Optional[IncrementalState] + codesigned: bool + codesign_configuration: Optional[CodesignConfiguration] + codesign_identity: Optional[str] + codesign_arguments: List[str] + versioned_if_macos: bool diff --git a/prelude/apple/tools/bundling/incremental_state.py b/prelude/apple/tools/bundling/incremental_state.py new file mode 100644 index 0000000000000..49bb78f8acb12 --- /dev/null +++ b/prelude/apple/tools/bundling/incremental_state.py @@ -0,0 +1,165 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from __future__ import annotations + +import json +from dataclasses import dataclass +from io import TextIOBase +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +from apple.tools.code_signing.codesign_bundle import CodesignConfiguration + +_VERSION = 7 + + +@dataclass +class IncrementalStateItem: + source: Path + """ + Path relative to buck project + """ + destination_relative_to_bundle: Path + digest: Optional[str] + """ + Required when the source file is not a symlink + """ + resolved_symlink: Optional[Path] + """ + Required when the source file is a symlink + """ + + +@dataclass +class CodesignedOnCopy: + path: Path + """ + Path relative to bundle root which needs to be codesigned + """ + entitlements_digest: Optional[str] + """ + Digest of entitlements used when the given path is codesigned on copy + """ + codesign_flags_override: Optional[List[str]] + """ + If present, overrides codesign arguments (which are used for root bundle) when the given path is codesigned on copy + """ + + def __hash__(self: CodesignedOnCopy) -> int: + return hash( + ( + self.path, + self.entitlements_digest, + ( + tuple(self.codesign_flags_override) + if self.codesign_flags_override is not None + else hash(None) + ), + ) + ) + + +@dataclass +class IncrementalState: + """ + Describes a bundle output from a previous run of this bundling script. + """ + + items: List[IncrementalStateItem] + codesigned: bool + codesign_configuration: CodesignConfiguration + codesigned_on_copy: List[CodesignedOnCopy] + codesign_identity: Optional[str] + codesign_arguments: List[str] + swift_stdlib_paths: List[Path] + versioned_if_macos: bool + version: int = _VERSION + + +class IncrementalStateJSONEncoder(json.JSONEncoder): + def default(self, o: object) -> object: + if isinstance(o, IncrementalState): + return { + "items": [self.default(i) for i in o.items], + "codesigned": o.codesigned, + "codesign_configuration": ( + o.codesign_configuration.value if o.codesign_configuration else None + ), + "codesigned_on_copy": [self.default(i) for i in o.codesigned_on_copy], + "codesign_identity": o.codesign_identity, + "swift_stdlib_paths": [str(p) for p in o.swift_stdlib_paths], + "version": o.version, + "codesign_arguments": o.codesign_arguments, + "versioned_if_macos": o.versioned_if_macos, + } + elif isinstance(o, IncrementalStateItem): + result = { + "source": str(o.source), + "destination_relative_to_bundle": str(o.destination_relative_to_bundle), + } + if o.digest is not None: + result["digest"] = o.digest + if o.resolved_symlink is not None: + result["resolved_symlink"] = str(o.resolved_symlink) + return result + elif isinstance(o, CodesignedOnCopy): + result = {} + result["path"] = str(o.path) + if o.entitlements_digest is not None: + result["entitlements_digest"] = str(o.entitlements_digest) + if o.codesign_flags_override is not None: + result["codesign_flags_override"] = o.codesign_flags_override + return result + else: + return super().default(o) + + +def _object_hook( + dict: Dict[str, Any] +) -> Union[IncrementalState, IncrementalStateItem, CodesignedOnCopy]: + if "version" in dict: + codesign_configuration = dict.pop("codesign_configuration") + dict["codesign_configuration"] = ( + CodesignConfiguration(codesign_configuration) + if codesign_configuration + else None + ) + dict["swift_stdlib_paths"] = [Path(p) for p in dict.pop("swift_stdlib_paths")] + return IncrementalState(**dict) + elif "destination_relative_to_bundle" in dict: + dict["source"] = Path(dict.pop("source")) + dict["destination_relative_to_bundle"] = Path( + dict.pop("destination_relative_to_bundle") + ) + dict["digest"] = dict.pop("digest", None) + resolved_symlink = dict.pop("resolved_symlink", None) + dict["resolved_symlink"] = Path(resolved_symlink) if resolved_symlink else None + return IncrementalStateItem(**dict) + else: + dict["path"] = Path(dict.pop("path")) + dict["entitlements_digest"] = dict.pop("entitlements_digest", None) + dict["codesign_flags_override"] = dict.pop("codesign_flags_override", None) + return CodesignedOnCopy(**dict) + + +def parse_incremental_state(data: TextIOBase) -> IncrementalState: + start_stream_position = data.tell() + try: + incremental_state = json.load(data, object_hook=_object_hook) + except BaseException: + data.seek(start_stream_position) + version = json.load(data)["version"] + if version != _VERSION: + raise RuntimeError( + f"Expected incremental state version to be `{_VERSION}` got `{version}`." + ) + else: + raise + return incremental_state diff --git a/prelude/apple/tools/bundling/incremental_state_test.py b/prelude/apple/tools/bundling/incremental_state_test.py new file mode 100644 index 0000000000000..b893e1cd20a08 --- /dev/null +++ b/prelude/apple/tools/bundling/incremental_state_test.py @@ -0,0 +1,148 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import io +import json +import unittest +from pathlib import Path + +import pkg_resources + +from .incremental_state import ( + CodesignedOnCopy, + IncrementalState, + IncrementalStateItem, + IncrementalStateJSONEncoder, + parse_incremental_state, +) + + +class TestIncrementalState(unittest.TestCase): + def test_state_serialization_and_deserialization(self): + expected = IncrementalState( + items=[ + IncrementalStateItem( + source=Path("repo/foo.txt"), + destination_relative_to_bundle=Path("foo.txt"), + digest="foo_digest", + resolved_symlink=None, + ), + IncrementalStateItem( + source=Path("buck-out/bar.txt"), + destination_relative_to_bundle=Path("Resources/bar.txt"), + digest="bar_digest", + resolved_symlink=None, + ), + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[ + CodesignedOnCopy( + path=Path("Resources/bar.txt"), + entitlements_digest=None, + codesign_flags_override=None, + ), + CodesignedOnCopy( + path=Path("Resources/baz.txt"), + entitlements_digest="abc", + codesign_flags_override=None, + ), + CodesignedOnCopy( + path=Path("Resources/qux.txt"), + entitlements_digest=None, + codesign_flags_override=["--deep", "--force"], + ), + ], + codesign_identity="Johnny Appleseed", + codesign_arguments=[ + "--force", + ], + swift_stdlib_paths=[Path("Frameworks/libswiftCore.dylib")], + versioned_if_macos=False, + ) + json_result = json.dumps(expected, cls=IncrementalStateJSONEncoder) + result = parse_incremental_state(io.StringIO(json_result)) + self.assertEqual( + result, + expected, + ) + + def test_valid_state_is_parsed_successfully(self): + file_content = pkg_resources.resource_stream( + __name__, "test_resources/valid_incremental_state.json" + ) + result = parse_incremental_state(file_content) + expected = IncrementalState( + items=[ + IncrementalStateItem( + source=Path("repo/foo.txt"), + destination_relative_to_bundle=Path("foo.txt"), + digest="foo_digest", + resolved_symlink=None, + ), + IncrementalStateItem( + source=Path("buck-out/bar.txt"), + destination_relative_to_bundle=Path("Resources/bar.txt"), + digest="bar_digest", + resolved_symlink=None, + ), + IncrementalStateItem( + source=Path("buck-out/bar"), + destination_relative_to_bundle=Path("Resources/bar"), + digest=None, + resolved_symlink=Path("bar.txt"), + ), + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[ + CodesignedOnCopy( + path=Path("Resources/bar.txt"), + entitlements_digest=None, + codesign_flags_override=None, + ), + CodesignedOnCopy( + path=Path("Resources/baz.txt"), + entitlements_digest="abc", + codesign_flags_override=None, + ), + CodesignedOnCopy( + path=Path("Resources/qux.txt"), + entitlements_digest=None, + codesign_flags_override=["--deep", "--force"], + ), + ], + codesign_identity="Johny Appleseed", + codesign_arguments=[ + "--force", + "--deep", + ], + swift_stdlib_paths=[Path("Frameworks/libswiftCore.dylib")], + versioned_if_macos=True, + ) + self.assertEqual( + result, + expected, + ) + + def test_error_when_invalid_metadata(self): + file_content = pkg_resources.resource_stream( + __name__, "test_resources/the.broken_json" + ) + with self.assertRaises(json.JSONDecodeError): + _ = parse_incremental_state(file_content) + + def test_user_friendly_error_when_metadata_with_newer_version(self): + file_content = pkg_resources.resource_stream( + __name__, "test_resources/newer_version_incremental_state.json" + ) + with self.assertRaises(Exception) as context: + _ = parse_incremental_state(file_content) + self.assertEqual( + context.exception, + RuntimeError("Expected incremental state version to be `2` got `3`."), + ) diff --git a/prelude/apple/tools/bundling/incremental_utils.py b/prelude/apple/tools/bundling/incremental_utils.py new file mode 100644 index 0000000000000..03e0f9a0c4561 --- /dev/null +++ b/prelude/apple/tools/bundling/incremental_utils.py @@ -0,0 +1,218 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import logging +import os +from pathlib import Path +from typing import Dict, List, Optional, Set, Tuple + +from .assemble_bundle_types import BundleSpecItem, IncrementalContext +from .incremental_state import CodesignedOnCopy, IncrementalStateItem + +FILES_TO_BE_IGNORED: Set[str] = { + # Storage of Finder settings, which shouldn't be added when enumerating files from sources + ".DS_Store", +} + + +def should_assemble_incrementally( + spec: List[BundleSpecItem], incremental_context: IncrementalContext +) -> bool: + previous_run_state = incremental_context.state + if previous_run_state is None: + logging.getLogger(__name__).info( + "Decided not to assemble incrementally — no incremental state for previous build." + ) + return False + if previous_run_state.versioned_if_macos != incremental_context.versioned_if_macos: + logging.getLogger(__name__).info( + "Decided not to assemble incrementally — current build and previous build have different versioned_if_macos settings." + ) + return False + + previously_codesigned = previous_run_state.codesigned + # If previously bundle was not code signed there should be no problems with code signing + # currently in incremental mode. Existing binaries could be code signed "on + # top" if that's needed. + if not previously_codesigned: + logging.getLogger(__name__).info( + "Decided to assemble incrementally — previous build is not codesigned." + ) + return True + # For simplicity and correctness purposes instead of stripping code signatures we + # perform non-incremental run. + if not incremental_context.codesigned: + logging.getLogger(__name__).info( + "Decided not to assemble incrementally — current build is not codesigned, while previous build is codesigned." + ) + return False + # If previous identity is different from the current one also perform non-incremental run. + if previous_run_state.codesign_identity != incremental_context.codesign_identity: + logging.getLogger(__name__).info( + "Decided not to assemble incrementally — previous vs current builds have mismatching codesigning identities." + ) + return False + # If previous codesign arguments are different from the current ones also perform non-incremental run. + if previous_run_state.codesign_arguments != incremental_context.codesign_arguments: + logging.getLogger(__name__).info( + "Decided not to assemble incrementally — previous vs current builds have mismatching codesigning arguments." + ) + return False + # If bundle from previous run was signed in a different configuration vs the current run (e.g. dry code signed while now regular code signing is required) perform non-incremental run. + if ( + previous_run_state.codesign_configuration + != incremental_context.codesign_configuration + ): + logging.getLogger(__name__).info( + "Decided not to assemble incrementally — previous vs current builds have mismatching codesigning configurations." + ) + return False + # If there is an artifact that was code signed on copy in previous run which is + # present in current run and not code signed on copy, we should perform + # non-incremental run for simplicity and correctness reasons. + current_codesigned_on_copy_items = { + codesigned_on_copy_item( + path=Path(i.dst), + entitlements=( + Path(i.codesign_entitlements) if i.codesign_entitlements else None + ), + incremental_context=incremental_context, + codesign_flags_override=i.codesign_flags_override, + ) + for i in spec + if i.codesign_on_copy + } + + codesigned_on_copy_paths_from_previous_build_which_are_present_in_current_build = _codesigned_on_copy_paths_from_previous_build_which_are_present_in_current_build( + previous_run_state.codesigned_on_copy, + {Path(i.dst) for i in spec}, + ) + codesign_on_copy_paths_are_compatible = codesigned_on_copy_paths_from_previous_build_which_are_present_in_current_build.issubset( + current_codesigned_on_copy_items + ) + if not codesign_on_copy_paths_are_compatible: + logging.getLogger(__name__).info( + f"Decided not to assemble incrementally — there is at least one artifact `{list(codesigned_on_copy_paths_from_previous_build_which_are_present_in_current_build - current_codesigned_on_copy_items)[0]}` that was code signed on copy in previous build which is present in current run and not code signed on copy (or codesigned but with a different set of entitlements and flags)." + ) + return codesign_on_copy_paths_are_compatible + + +def _codesigned_on_copy_paths_from_previous_build_which_are_present_in_current_build( + previously_codesigned_on_copy: List[CodesignedOnCopy], + all_input_files: Set[Path], +) -> Set[CodesignedOnCopy]: + all_input_files_and_directories = all_input_files | { + i for file in all_input_files for i in file.parents + } + return { + i + for i in previously_codesigned_on_copy + if i.path in all_input_files_and_directories + } + + +def _get_new_digest(action_metadata: Dict[Path, str], path: Path) -> str: + # While a resource file can be in a symlinked folder, like the `ghi/def` example below, + # ``` + # project_root + # ├── abc + # │ └── def + # └── ghi -> abc + # ``` + # In this case, Python would say `ghi/abc` not a symlink. However the `action_metadata` comes + # with the actual resolved path (`abc/def`). We need to resolve the path then. + # Given Python doesn't think it's a symlink, the `readlink` API wouldn't work either + resolved_path = path.resolve().relative_to(Path.cwd()) + return action_metadata[resolved_path] + + +def calculate_incremental_state( + spec: List[BundleSpecItem], action_metadata: Dict[Path, str] +) -> List[IncrementalStateItem]: + """ + `action_metadata` maps Buck project relative paths to hash digest + for every input file of the action which executes this script + """ + result = [] + source_with_destination_files = _source_with_destination_files(spec) + for src, dst in source_with_destination_files: + is_symlink = src.is_symlink() + new_digest = _get_new_digest(action_metadata, src) if not is_symlink else None + resolved_symlink = Path(os.readlink(src)) if is_symlink else None + result.append( + IncrementalStateItem( + source=src, + destination_relative_to_bundle=dst, + digest=new_digest, + resolved_symlink=resolved_symlink, + ) + ) + return result + + +def _source_with_destination_files( + spec: List[BundleSpecItem], +) -> List[Tuple[Path, Path]]: + """ + Returns: + Ordered mapping from source path to destination path (relative to bundle) for every file + present in a bundle. Directories that were parts of the spec are split into actual files. + """ + result = [] + for spec_item in spec: + file_or_dir = Path(spec_item.src) + if file_or_dir.is_file(): + if not spec_item.dst: + raise RuntimeError( + f'Invalid input bundle spec. File located at {file_or_dir} should not have `""` destination (only directories are allowed to have such value).' + ) + result.append((file_or_dir, Path(spec_item.dst))) + elif file_or_dir.is_dir(): + result.extend( + [ + (file, spec_item.dst / file.relative_to(file_or_dir)) + for file in _list_directory_deterministically(file_or_dir) + ] + ) + else: + raise RuntimeError( + f"Path {file_or_dir} is not a file and not a dir, don't know how to handle it." + ) + + return [(src, dst) for src, dst in result if src.name not in FILES_TO_BE_IGNORED] + + +def _list_directory_deterministically(directory: Path) -> List[Path]: + result = [] + for current_dir_path, dir_names, file_names in os.walk(directory): + result += [Path(os.path.join(current_dir_path, f)) for f in sorted(file_names)] + # Sort in order for walk to be deterministic. + dir_names.sort() + return result + + +def codesigned_on_copy_item( + path: Path, + entitlements: Optional[Path], + incremental_context: IncrementalContext, + codesign_flags_override: Optional[List[str]], +) -> CodesignedOnCopy: + if entitlements is not None: + digest = incremental_context.metadata.get(entitlements) + if digest is None: + raise RuntimeError( + f"Expected digest for entitlements file path `{entitlements}` to be present in action metadata." + ) + else: + digest = None + return CodesignedOnCopy( + path=path, + entitlements_digest=digest, + codesign_flags_override=codesign_flags_override, + ) diff --git a/prelude/apple/tools/bundling/incremental_utils_test.py b/prelude/apple/tools/bundling/incremental_utils_test.py new file mode 100644 index 0000000000000..47e7b1b975963 --- /dev/null +++ b/prelude/apple/tools/bundling/incremental_utils_test.py @@ -0,0 +1,587 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import tempfile +import unittest +from pathlib import Path +from typing import Generator + +from apple.tools.code_signing.codesign_bundle import CodesignConfiguration + +from .assemble_bundle_types import BundleSpecItem +from .incremental_state import CodesignedOnCopy, IncrementalState, IncrementalStateItem +from .incremental_utils import ( + calculate_incremental_state, + IncrementalContext, + should_assemble_incrementally, +) + +try: + from contextlib import chdir # pyre-ignore[21], Python 3.11+ +except ImportError: + from contextlib import contextmanager + + @contextmanager + def chdir(path: os.PathLike) -> Generator[None, None, None]: + cwd = os.getcwd() + try: + os.chdir(path) + yield + finally: + os.chdir(cwd) + + +class TestIncrementalUtils(unittest.TestCase): + maxDiff = None + + def test_not_run_incrementally_when_previous_build_not_incremental(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=False, + ) + ] + incremental_context = IncrementalContext( + metadata={Path("foo"): "digest"}, + state=None, + codesigned=False, + codesign_configuration=None, + codesign_identity=None, + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + + def test_run_incrementally_when_previous_build_not_codesigned(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=False, + ) + ] + incremental_context = IncrementalContext( + metadata={Path("foo"): "digest"}, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=False, + codesign_configuration=None, + codesigned_on_copy=[], + codesign_identity=None, + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=True, + codesign_configuration=None, + codesign_identity=None, + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_not_run_incrementally_when_previous_build_codesigned_and_current_is_not( + self, + ): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=False, + ) + ] + incremental_context = IncrementalContext( + metadata={Path("foo"): "digest"}, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[], + codesign_identity=None, + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=False, + codesign_configuration=None, + codesign_identity=None, + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + # Check that behavior changes when both builds are codesigned + incremental_context.codesigned = True + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_not_run_incrementally_when_previous_build_codesigned_with_different_identity( + self, + ): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=False, + ) + ] + incremental_context = IncrementalContext( + metadata={Path("foo"): "digest"}, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[], + codesign_identity="old_identity", + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=True, + codesign_configuration=None, + codesign_identity="new_identity", + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + # Check that behavior changes when identities are same + incremental_context.state.codesign_identity = "same_identity" + incremental_context.codesign_identity = "same_identity" + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_run_incrementally_when_codesign_on_copy_paths_match(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=True, + ), + BundleSpecItem( + src="src/bar", + dst="bar", + codesign_on_copy=True, + ), + BundleSpecItem( + src="src/baz", + dst="baz", + codesign_on_copy=True, + codesign_entitlements="entitlements.plist", + ), + ] + incremental_context = IncrementalContext( + metadata={ + Path("src/foo"): "digest", + Path("src/baz"): "digest2", + Path("entitlements.plist"): "entitlements_digest", + }, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ), + IncrementalStateItem( + source=Path("src/baz"), + destination_relative_to_bundle=Path("baz"), + digest="digest2", + resolved_symlink=None, + ), + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[ + CodesignedOnCopy( + path=Path("foo"), + entitlements_digest=None, + codesign_flags_override=None, + ), + CodesignedOnCopy( + path=Path("baz"), + entitlements_digest="entitlements_digest", + codesign_flags_override=None, + ), + ], + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=True, + codesign_configuration=None, + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_not_run_incrementally_when_codesign_on_copy_paths_mismatch(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + # want it to be not codesigned in new build + codesign_on_copy=False, + ) + ] + incremental_context = IncrementalContext( + metadata={Path("src/foo"): "digest"}, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=True, + codesign_configuration=None, + # but it was codesigned in old build + codesigned_on_copy=[ + CodesignedOnCopy( + path=Path("foo"), + entitlements_digest=None, + codesign_flags_override=None, + ) + ], + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=True, + codesign_configuration=None, + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + spec[0].codesign_on_copy = True + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_not_run_incrementally_when_codesign_on_copy_entitlements_mismatch(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=True, + codesign_entitlements="baz/entitlements.plist", + ) + ] + incremental_context = IncrementalContext( + metadata={ + Path("src/foo"): "digest", + Path("baz/entitlements.plist"): "new_digest", + }, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[ + CodesignedOnCopy( + path=Path("foo"), + entitlements_digest="old_digest", + codesign_flags_override=None, + ) + ], + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=True, + codesign_configuration=None, + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + incremental_context.metadata[Path("baz/entitlements.plist")] = "old_digest" + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_not_run_incrementally_when_codesign_on_copy_flags_mismatch(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=True, + codesign_flags_override=["--force"], + ) + ] + incremental_context = IncrementalContext( + metadata={ + Path("src/foo"): "digest", + }, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[ + CodesignedOnCopy( + path=Path("foo"), + entitlements_digest=None, + codesign_flags_override=["--force", "--deep"], + ) + ], + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=True, + codesign_configuration=None, + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + ) + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + incremental_context.state.codesigned_on_copy[0].codesign_flags_override = [ + "--force" + ] + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_not_run_incrementally_when_codesign_arguments_mismatch(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + ) + ] + incremental_context = IncrementalContext( + metadata={ + Path("src/foo"): "digest", + }, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=True, + codesign_configuration=None, + codesigned_on_copy=[], + codesign_identity="same_identity", + codesign_arguments=["--force"], + swift_stdlib_paths=[], + versioned_if_macos=True, + ), + codesigned=True, + codesign_configuration=None, + codesign_identity="same_identity", + codesign_arguments=["--force", "--deep"], + versioned_if_macos=True, + ) + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + incremental_context.codesign_arguments = ["--force"] + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + + def test_not_run_incrementally_when_codesign_configurations_mismatch(self): + spec = [ + BundleSpecItem( + src="src/foo", + dst="foo", + codesign_on_copy=True, + ) + ] + incremental_context = IncrementalContext( + metadata={Path("src/foo"): "digest"}, + state=IncrementalState( + items=[ + IncrementalStateItem( + source=Path("src/foo"), + destination_relative_to_bundle=Path("foo"), + digest="digest", + resolved_symlink=None, + ) + ], + codesigned=True, + # Dry codesigned in old build + codesign_configuration=CodesignConfiguration.dryRun, + codesigned_on_copy=[ + CodesignedOnCopy( + path=Path("foo"), + entitlements_digest=None, + codesign_flags_override=None, + ) + ], + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + swift_stdlib_paths=[], + ), + codesigned=True, + codesign_configuration=CodesignConfiguration.dryRun, + codesign_identity="same_identity", + codesign_arguments=[], + versioned_if_macos=True, + ) + # Canary + self.assertTrue(should_assemble_incrementally(spec, incremental_context)) + # Now we want a regular signing in new build + incremental_context.codesign_configuration = None + self.assertFalse(should_assemble_incrementally(spec, incremental_context)) + + def test_calculate_incremental_state(self): + with tempfile.TemporaryDirectory() as project_root, chdir(project_root): + # project_root + # ├── foo + # ├── bar + # │ ├── baz + # │ └── qux -> baz + # ├── abc + # │ └── def + # └── ghi -> abc + Path("foo").write_text("hello") + bar_path = Path("bar") + bar_path.mkdir() + (bar_path / "baz").write_text("world") + (bar_path / "qux").symlink_to("baz") + abc_path = Path("abc") + abc_path.mkdir() + (abc_path / "def").write_text("yo") + Path("ghi").symlink_to("abc") + + action_metadata = { + Path("foo"): "hash(foo)", + Path("bar/baz"): "hash(baz)", + Path("abc/def"): "hash(def)", + } + spec = [ + BundleSpecItem( + src="foo", + dst="foo", + codesign_on_copy=False, + ), + BundleSpecItem( + src="bar", + dst="tux", + codesign_on_copy=True, + ), + BundleSpecItem( + src="ghi", + dst="ghi", + codesign_on_copy=True, + ), + ] + state = calculate_incremental_state(spec, action_metadata) + self.assertEqual( + state, + [ + IncrementalStateItem( + source=Path("foo"), + destination_relative_to_bundle=Path("foo"), + digest="hash(foo)", + resolved_symlink=None, + ), + IncrementalStateItem( + source=Path("bar/baz"), + destination_relative_to_bundle=Path("tux/baz"), + digest="hash(baz)", + resolved_symlink=None, + ), + IncrementalStateItem( + source=Path("bar/qux"), + destination_relative_to_bundle=Path("tux/qux"), + digest=None, + resolved_symlink=Path("baz"), + ), + IncrementalStateItem( + source=Path("ghi/def"), + destination_relative_to_bundle=Path("ghi/def"), + digest="hash(def)", + resolved_symlink=None, + ), + ], + ) + + def test_calculate_incremental_state_with_ds_store(self) -> None: + with tempfile.TemporaryDirectory() as project_root, chdir(project_root): + # project_root + # ├── foo + # └── bar + # ├── baz + # └── .DS_Store + Path("foo").write_text("hello") + bar_path = Path("bar") + bar_path.mkdir() + (bar_path / "baz").write_text("world") + (bar_path / ".DS_Store").touch() + + action_metadata = { + Path("foo"): "hash(foo)", + Path("bar/baz"): "hash(baz)", + } + spec = [ + BundleSpecItem( + src="foo", + dst="foo", + codesign_on_copy=False, + ), + BundleSpecItem( + src="bar", + dst="bar", + codesign_on_copy=True, + ), + ] + state = calculate_incremental_state(spec, action_metadata) + self.assertEqual( + state, + [ + IncrementalStateItem( + source=Path("foo"), + destination_relative_to_bundle=Path("foo"), + digest="hash(foo)", + resolved_symlink=None, + ), + IncrementalStateItem( + source=Path("bar/baz"), + destination_relative_to_bundle=Path("bar/baz"), + digest="hash(baz)", + resolved_symlink=None, + ), + ], + ) diff --git a/prelude/apple/tools/bundling/main.py b/prelude/apple/tools/bundling/main.py new file mode 100644 index 0000000000000..11c4a8928364f --- /dev/null +++ b/prelude/apple/tools/bundling/main.py @@ -0,0 +1,684 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import cProfile +import json +import logging +import pstats +import shlex +import sys +from pathlib import Path +from typing import Dict, List, Optional + +from apple.tools.code_signing.apple_platform import ApplePlatform +from apple.tools.code_signing.codesign_bundle import ( + AdhocSigningContext, + codesign_bundle, + CodesignConfiguration, + CodesignedPath, + signing_context_with_profile_selection, +) +from apple.tools.code_signing.list_codesign_identities import ( + AdHocListCodesignIdentities, + ListCodesignIdentities, +) + +from apple.tools.re_compatibility_utils.writable import make_dir_recursively_writable + +from .action_metadata import action_metadata_if_present + +from .assemble_bundle import assemble_bundle +from .assemble_bundle_types import BundleSpecItem, IncrementalContext +from .incremental_state import ( + CodesignedOnCopy, + IncrementalState, + IncrementalStateItem, + IncrementalStateJSONEncoder, + parse_incremental_state, +) +from .incremental_utils import codesigned_on_copy_item +from .swift_support import run_swift_stdlib_tool, SwiftSupportArguments + + +_METADATA_PATH_KEY = "ACTION_METADATA" + + +def _args_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description="Tool which assembles the Apple bundle." + ) + parser.add_argument( + "--output", + metavar="", + type=Path, + required=True, + help="Absolute path to Apple bundle result.", + ) + parser.add_argument( + "--spec", + metavar="", + type=Path, + required=True, + help="Path to file with JSON representing the bundle contents. It should contain a dictionary which maps bundle relative destination paths to source paths.", + ) + parser.add_argument( + "--codesign", + action="store_true", + help="Should the final bundle be codesigned.", + ) + parser.add_argument( + "--codesign-tool", + metavar="", + type=Path, + required=False, + help="Path to code signing utility. If not provided standard `codesign` tool will be used.", + ) + parser.add_argument( + "--strict-provisioning-profile-search", + action="store_true", + required=False, + help="Fail code signing if more than one matching profile found.", + ) + parser.add_argument( + "--provisioning-profile-filter", + metavar="", + type=str, + required=False, + help="Regex to disambiguate multiple matching profiles, evaluated against provisioning profile filename.", + ) + parser.add_argument( + "--codesign-args", + type=str, + default=[], + required=False, + action="append", + help="Add additional args to pass during codesigning. Pass as`--codesign-args=ARG` to ensure correct arg parsing.", + ) + parser.add_argument( + "--info-plist-source", + metavar="", + type=Path, + required=False, + help="Path to Info.plist source file which is used only to make code signing decisions (to be bundled `Info.plist` should be present in spec parameter). Required if code signing is requested.", + ) + parser.add_argument( + "--info-plist-destination", + metavar="", + type=Path, + required=False, + help="Required if code signing is requested. Bundle relative destination path to Info.plist file if it is present in bundle.", + ) + parser.add_argument( + "--entitlements", + metavar="", + type=Path, + required=False, + help="Path to file with entitlements to be used during code signing. If it's not provided the minimal entitlements are going to be generated.", + ) + parser.add_argument( + "--profiles-dir", + metavar="", + type=Path, + required=False, + help="Required if non-ad-hoc code signing is requested. Path to directory with provisioning profile files.", + ) + parser.add_argument( + "--codesign-identities-command", + metavar='<"/signing/identities --available">', + type=str, + required=False, + help="Command listing available code signing identities. If it's not provided `security` utility is assumed to be available and is used.", + ) + parser.add_argument( + "--ad-hoc", + action="store_true", + help="Perform ad-hoc signing if set.", + ) + parser.add_argument( + "--embed-provisioning-profile-when-signing-ad-hoc", + action="store_true", + help="Perform selection of provisioining profile and embed it into final bundle when ad-hoc signing if set.", + ) + parser.add_argument( + "--ad-hoc-codesign-identity", + metavar="", + type=str, + required=False, + help="Codesign identity to use when ad-hoc signing is performed. Should be present when selection of provisioining profile is requested for ad-hoc signing.", + ) + parser.add_argument( + "--codesign-configuration", + required=False, + type=CodesignConfiguration, + choices=[e.value for e in CodesignConfiguration], + help=f""" + Augments how code signing is run. + Pass `{CodesignConfiguration.fastAdhoc}` to skip adhoc signing bundles if the executables are already adhoc signed. + Pass `{CodesignConfiguration.dryRun}` for code signing to be run in dry mode (instead of actual signing only .plist + files with signing parameters will be generated in the root of each signed bundle). + """, + ) + parser.add_argument( + "--platform", + metavar="", + type=ApplePlatform, + required=False, + help="Required if code signing or Swift support is requested. Apple platform for which the bundle is built.", + ) + parser.add_argument( + "--incremental-state", + metavar="", + type=Path, + required=False, + help="Required if script is run in incremental mode. Path to file with JSON which describes the contents of bundle built previously.", + ) + parser.add_argument( + "--profile-output", + metavar="", + type=Path, + required=False, + help="Path to the profiling output. If present profiling will be enabled.", + ) + parser.add_argument( + "--log-level-stderr", + choices=["debug", "info", "warning", "error", "critical"], + type=str, + required=False, + default="warning", + help="Logging level for messages written to stderr.", + ) + parser.add_argument( + "--log-level-file", + choices=["debug", "info", "warning", "error", "critical"], + type=str, + required=False, + default="info", + help="Logging level for messages written to a log file.", + ) + parser.add_argument( + "--log-file", + type=Path, + required=False, + help="Path to a log file. If present logging will be directed to this file in addition to stderr.", + ) + parser.add_argument( + "--binary-destination", + metavar="", + type=Path, + required=False, + help="Required if swift support was requested. Bundle relative destination path to bundle binary.", + ) + parser.add_argument( + "--frameworks-destination", + metavar="", + type=Path, + required=False, + help="Required if swift support was requested. Bundle relative destination path to frameworks directory.", + ) + parser.add_argument( + "--extensionkit-extensions-destination", + metavar="", + type=Path, + required=False, + help="Required if swift support was requested. Bundle relative destination path to ExtensionKit Extensions directory.", + ) + parser.add_argument( + "--plugins-destination", + metavar="", + type=Path, + required=False, + help="Required if swift support was requested. Bundle relative destination path to plugins directory.", + ) + parser.add_argument( + "--appclips-destination", + metavar="", + type=Path, + required=False, + help="Required if swift support was requested. Bundle relative destination path to appclips directory.", + ) + parser.add_argument( + "--sdk-root", + metavar="", + type=Path, + required=False, + help="Required if swift support was requested. Path to SDK root.", + ) + parser.add_argument( + "--swift-stdlib-command", + metavar='<"/swift/stdlib/tool --foo bar/qux">', + type=str, + required=False, + help="Swift stdlib command prefix. If present, output bundle will contain needed Swift standard libraries (to support the lack of ABI stability or certain backports usage).", + ) + parser.add_argument( + "--check-conflicts", + action="store_true", + help="Check there are no path conflicts between different source parts of the bundle if enabled.", + ) + parser.add_argument( + "--fast-provisioning-profile-parsing", + action="store_true", + help="Uses experimental faster provisioning profile parsing.", + ) + parser.add_argument( + "--versioned-if-macos", + action="store_true", + help="Create symlinks for versioned macOS bundle", + ) + + return parser + + +def _main() -> None: + args_parser = _args_parser() + args = args_parser.parse_args() + + if args.log_file: + with open(args.log_file, "w") as _: + # We need to open the log file for two reasons: + # - Ensure it exists after action runs, as it's an output and thus required + # - It gets erased, so that we get new logs when doing incremental bundling + pass + + _setup_logging( + stderr_level=getattr(logging, args.log_level_stderr.upper()), + file_level=getattr(logging, args.log_level_file.upper()), + log_path=args.log_file, + ) + + pr = cProfile.Profile() + profiling_enabled = args.profile_output is not None + if profiling_enabled: + pr.enable() + + if args.codesign: + if not args.info_plist_source: + raise RuntimeError( + "Paths to Info.plist source file should be set when code signing is required." + ) + if not args.info_plist_destination: + raise RuntimeError( + "Info.plist destination path should be set when code signing is required." + ) + if not args.platform: + raise RuntimeError( + "Apple platform should be set when code signing is required." + ) + list_codesign_identities = ( + ListCodesignIdentities.override( + shlex.split(args.codesign_identities_command) + ) + if args.codesign_identities_command + else ListCodesignIdentities.default() + ) + if args.ad_hoc: + if args.embed_provisioning_profile_when_signing_ad_hoc: + if not args.profiles_dir: + raise RuntimeError( + "Path to directory with provisioning profile files should be set when selection of provisioining profile is enabled for ad-hoc code signing." + ) + if not args.ad_hoc_codesign_identity: + raise RuntimeError( + "Code signing identity should be set when selection of provisioining profile is enabled for ad-hoc code signing." + ) + profile_selection_context = signing_context_with_profile_selection( + info_plist_source=args.info_plist_source, + info_plist_destination=args.info_plist_destination, + provisioning_profiles_dir=args.profiles_dir, + entitlements_path=args.entitlements, + platform=args.platform, + list_codesign_identities=AdHocListCodesignIdentities( + original=list_codesign_identities, + subject_common_name=args.ad_hoc_codesign_identity, + ), + log_file_path=args.log_file, + should_use_fast_provisioning_profile_parsing=args.fast_provisioning_profile_parsing, + strict_provisioning_profile_search=args.strict_provisioning_profile_search, + provisioning_profile_filter=args.provisioning_profile_filter, + ) + else: + profile_selection_context = None + signing_context = AdhocSigningContext( + codesign_identity=args.ad_hoc_codesign_identity, + profile_selection_context=profile_selection_context, + ) + selected_identity_argument = args.ad_hoc_codesign_identity + else: + if not args.profiles_dir: + raise RuntimeError( + "Path to directory with provisioning profile files should be set when signing is not ad-hoc." + ) + signing_context = signing_context_with_profile_selection( + info_plist_source=args.info_plist_source, + info_plist_destination=args.info_plist_destination, + provisioning_profiles_dir=args.profiles_dir, + entitlements_path=args.entitlements, + platform=args.platform, + list_codesign_identities=list_codesign_identities, + log_file_path=args.log_file, + should_use_fast_provisioning_profile_parsing=args.fast_provisioning_profile_parsing, + strict_provisioning_profile_search=args.strict_provisioning_profile_search, + provisioning_profile_filter=args.provisioning_profile_filter, + ) + selected_identity_argument = ( + signing_context.selected_profile_info.identity.fingerprint + ) + else: + signing_context = None + selected_identity_argument = None + + with args.spec.open(mode="rb") as spec_file: + spec = json.load(spec_file, object_hook=lambda d: BundleSpecItem(**d)) + spec = _deduplicate_spec(spec) + + incremental_context = _incremental_context( + incremenatal_state_path=args.incremental_state, + codesigned=args.codesign, + codesign_configuration=args.codesign_configuration, + codesign_identity=selected_identity_argument, + codesign_arguments=args.codesign_args, + versioned_if_macos=args.versioned_if_macos, + ) + + incremental_state = assemble_bundle( + spec=spec, + bundle_path=args.output, + incremental_context=incremental_context, + check_conflicts=args.check_conflicts, + versioned_if_macos=args.versioned_if_macos, + ) + + swift_support_args = _swift_support_arguments( + args_parser, + args, + ) + + if swift_support_args: + swift_stdlib_paths = run_swift_stdlib_tool( + bundle_path=args.output, + args=swift_support_args, + ) + else: + swift_stdlib_paths = [] + + if args.codesign: + # Vendored frameworks/bundles could already be pre-signed, in which case, + # re-signing them requires modifying them. On RE, the umask is such that + # copied files (when constructing the bundle) are not writable. + make_dir_recursively_writable(args.output) + if signing_context is None: + raise RuntimeError( + "Expected signing context to be created before bundling is done if codesign is requested." + ) + + bundle_path = CodesignedPath( + path=args.output, entitlements=args.entitlements, flags=args.codesign_args + ) + codesign_on_copy_paths = [ + CodesignedPath( + path=bundle_path.path / i.dst, + entitlements=( + Path(i.codesign_entitlements) if i.codesign_entitlements else None + ), + flags=( + i.codesign_flags_override + if (i.codesign_flags_override is not None) + else args.codesign_args + ), + ) + for i in spec + if i.codesign_on_copy + ] + [ + CodesignedPath( + path=bundle_path.path / path, + entitlements=None, + flags=args.codesign_args, + ) + for path in swift_stdlib_paths + ] + + codesign_bundle( + bundle_path=bundle_path, + signing_context=signing_context, + platform=args.platform, + codesign_on_copy_paths=codesign_on_copy_paths, + codesign_tool=args.codesign_tool, + codesign_configuration=args.codesign_configuration, + ) + + if incremental_state: + if incremental_context is None: + raise RuntimeError( + "Expected incremental context to be present when incremental state is non-null." + ) + _write_incremental_state( + spec=spec, + items=incremental_state, + path=args.incremental_state, + codesigned=args.codesign, + codesign_configuration=args.codesign_configuration, + selected_codesign_identity=selected_identity_argument, + codesign_arguments=args.codesign_args, + swift_stdlib_paths=swift_stdlib_paths, + versioned_if_macos=args.versioned_if_macos, + incremental_context=incremental_context, + ) + + if profiling_enabled: + pr.disable() + with open(args.profile_output, "w") as s: + sortby = pstats.SortKey.CUMULATIVE + ps = pstats.Stats(pr, stream=s).sort_stats(sortby) + ps.print_stats() + + +def _incremental_context( + incremenatal_state_path: Optional[Path], + codesigned: bool, + codesign_configuration: CodesignConfiguration, + codesign_identity: Optional[str], + codesign_arguments: List[str], + versioned_if_macos: bool, +) -> Optional[IncrementalContext]: + action_metadata = action_metadata_if_present(_METADATA_PATH_KEY) + if action_metadata is None: + # Environment variable not set, running in non-incremental mode. + return None + # If there is no incremental state or we failed to parse it (maybe because of a format change) + # do a clean (non-incremental) assemble right now but generate proper state for next run. + incremental_state = ( + _read_incremental_state(incremenatal_state_path) + if incremenatal_state_path + else None + ) + return IncrementalContext( + metadata=action_metadata, + state=incremental_state, + codesigned=codesigned, + codesign_configuration=codesign_configuration, + codesign_identity=codesign_identity, + codesign_arguments=codesign_arguments, + versioned_if_macos=versioned_if_macos, + ) + + +def _read_incremental_state(path: Path) -> Optional[IncrementalState]: + logging.getLogger(__name__).info(f"Will read incremental state from `{path}`.") + if not path.exists(): + logging.getLogger(__name__).warning( + f"File with incremental state doesn't exist at `{path}`." + ) + return None + try: + with path.open() as f: + return parse_incremental_state(f) + except Exception: + logging.getLogger(__name__).exception("Failed to read incremental state") + return None + finally: + # If something goes wrong and we don't delete the file + # we probably end up in faulty state where incremental state + # doesn't match the output. Hence delete it early. + path.unlink() + + +def _swift_support_arguments( + parser: argparse.ArgumentParser, + args: argparse.Namespace, +) -> Optional[SwiftSupportArguments]: + if not args.swift_stdlib_command: + return None + if not args.binary_destination: + parser.error( + "Expected `--binary-destination` argument to be specified when `--swift-stdlib-command` is present." + ) + if not args.appclips_destination: + parser.error( + "Expected `--appclips-destination` argument to be specified when `--swift-stdlib-command` is present." + ) + if not args.frameworks_destination: + parser.error( + "Expected `--frameworks-destination` argument to be specified when `--swift-stdlib-command` is present." + ) + if not args.extensionkit_extensions_destination: + parser.error( + "Expected `--extensionkit-extensions-destination` argument to be specified when `--swift-stdlib-command` is present." + ) + if not args.plugins_destination: + parser.error( + "Expected `--plugins-destination` argument to be specified when `--swift-stdlib-command` is present." + ) + if not args.platform: + parser.error( + "Expected `--platform` argument to be specified when `--swift-stdlib-command` is present." + ) + if not args.sdk_root: + parser.error( + "Expected `--sdk-root` argument to be specified when `--swift-stdlib-command` is present." + ) + return SwiftSupportArguments( + swift_stdlib_command=args.swift_stdlib_command, + binary_destination=args.binary_destination, + appclips_destination=args.appclips_destination, + frameworks_destination=args.frameworks_destination, + extensionkit_extensions_destination=args.extensionkit_extensions_destination, + plugins_destination=args.plugins_destination, + platform=args.platform, + sdk_root=args.sdk_root, + ) + + +def _write_incremental_state( + spec: List[BundleSpecItem], + items: List[IncrementalStateItem], + path: Path, + codesigned: bool, + codesign_configuration: CodesignConfiguration, + selected_codesign_identity: Optional[str], + codesign_arguments: List[str], + swift_stdlib_paths: List[Path], + versioned_if_macos: bool, + incremental_context: IncrementalContext, +) -> None: + state = IncrementalState( + items, + codesigned=codesigned, + codesign_configuration=codesign_configuration, + codesigned_on_copy=[ + codesigned_on_copy_item( + path=Path(i.dst), + entitlements=( + Path(i.codesign_entitlements) if i.codesign_entitlements else None + ), + incremental_context=incremental_context, + codesign_flags_override=i.codesign_flags_override, + ) + for i in spec + if i.codesign_on_copy + ], + codesign_identity=selected_codesign_identity, + codesign_arguments=codesign_arguments, + swift_stdlib_paths=swift_stdlib_paths, + versioned_if_macos=versioned_if_macos, + ) + path.touch() + try: + with path.open(mode="w") as f: + json.dump(state, f, cls=IncrementalStateJSONEncoder) + except Exception: + path.unlink() + raise + + +def _deduplicate_spec(spec: List[BundleSpecItem]) -> List[BundleSpecItem]: + # It's possible to have the same spec multiple times as different + # apple_resource() targets can refer to the _same_ resource file. + # + # On RE, we're not allowed to overwrite files, so prevent doing + # identical file copies. + # + # Do not reorder spec items to achieve determinism. + # Rely on the fact that `dict` preserves key order. + deduplicated_spec = list(dict.fromkeys(spec)) + # Force same sorting as in Buck1 for `SourcePathWithAppleBundleDestination` + # WARNING: This logic is tightly coupled with how spec filtering is done in `_filter_conflicting_paths` method during incremental bundling. Don't change unless you fully understand what is going on here. + deduplicated_spec.sort() + return deduplicated_spec + + +def _setup_logging( + stderr_level: int, file_level: int, log_path: Optional[Path] +) -> None: + stderr_handler = logging.StreamHandler() + stderr_handler.setLevel(stderr_level) + log_format = ( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" + ) + stderr_handler.setFormatter( + ColoredLogFormatter(log_format) + if sys.stderr.isatty() + else logging.Formatter(log_format) + ) + + handlers: List[logging.Handler] = [stderr_handler] + + if log_path: + file_handler = logging.FileHandler(log_path, encoding="utf-8") + file_handler.setFormatter(logging.Formatter(log_format)) + file_handler.setLevel(file_level) + handlers.append(file_handler) + + logging.basicConfig(level=logging.DEBUG, handlers=handlers) + + +class ColoredLogFormatter(logging.Formatter): + + _colors: Dict[int, str] = { + logging.DEBUG: "\x1b[m", + logging.INFO: "\x1b[37m", + logging.WARNING: "\x1b[33m", + logging.ERROR: "\x1b[31m", + logging.CRITICAL: "\x1b[1;31m", + } + _reset_color = "\x1b[0m" + + def __init__(self, text_format: str) -> None: + self.text_format = text_format + + def format(self, record: logging.LogRecord) -> str: + colored_format = ( + self._colors[record.levelno] + self.text_format + self._reset_color + ) + formatter = logging.Formatter(colored_format) + return formatter.format(record) + + +if __name__ == "__main__": + _main() diff --git a/prelude/apple/tools/bundling/swift_support.py b/prelude/apple/tools/bundling/swift_support.py new file mode 100644 index 0000000000000..58552408e07f8 --- /dev/null +++ b/prelude/apple/tools/bundling/swift_support.py @@ -0,0 +1,84 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import logging +import os +import shlex +import shutil +import subprocess +import tempfile +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional, Union + +_LOGGER: logging.Logger = logging.getLogger(__name__) + + +@dataclass +class SwiftSupportArguments: + swift_stdlib_command: str + binary_destination: Path + appclips_destination: Path + frameworks_destination: Path + extensionkit_extensions_destination: Path + plugins_destination: Path + platform: str + sdk_root: Path + + +def run_swift_stdlib_tool(bundle_path: Path, args: SwiftSupportArguments) -> List[Path]: + # TODO(T181556849) when incremental bundling is on, binary, frameworks and plugins are not changed, signing identity is unchanged skip this step. + bundle_relative_output_paths = [] + with tempfile.TemporaryDirectory() as tmp_dir: + # When signing, swift-stdlib-tool needs a proper PATH environment variable. + # Assume the current environment has it already. + env = os.environ.copy() + # xcrun doesn't like relative paths + env["SDKROOT"] = os.path.abspath(args.sdk_root) + cmd = _execution_command(bundle_path, args, tmp_dir) + _LOGGER.info( + f"Running Swift stdlib tool with command: `{cmd}` and environment `{env}`." + ) + result = subprocess.run(cmd, env=env) + result.check_returncode() + outputs = sorted(os.listdir(tmp_dir)) + frameworks_path = bundle_path / args.frameworks_destination + if outputs: + frameworks_path.mkdir(exist_ok=True) + for output in outputs: + shutil.move(os.path.join(tmp_dir, output), frameworks_path) + bundle_relative_output_paths = [ + args.frameworks_destination / o for o in outputs + ] + return bundle_relative_output_paths + + +def _execution_command( + bundle_path: Path, + args: SwiftSupportArguments, + tmp_dir: str, +) -> List[Union[str, Path]]: + return shlex.split(args.swift_stdlib_command) + [ + "--copy", + "--strip-bitcode", + "--scan-executable", + bundle_path / args.binary_destination, + "--scan-executable", + bundle_path / args.appclips_destination, + "--scan-folder", + bundle_path / args.frameworks_destination, + "--scan-folder", + bundle_path / args.extensionkit_extensions_destination, + "--scan-folder", + bundle_path / args.plugins_destination, + "--destination", + tmp_dir, + "--platform", + args.platform, + ] diff --git a/prelude/apple/tools/bundling/test_resources/newer_version_action_metadata.json b/prelude/apple/tools/bundling/test_resources/newer_version_action_metadata.json new file mode 100644 index 0000000000000..11d045815d8aa --- /dev/null +++ b/prelude/apple/tools/bundling/test_resources/newer_version_action_metadata.json @@ -0,0 +1,6 @@ +{ + "version": 2, + "data": { + "something": [] + } +} diff --git a/prelude/apple/tools/bundling/test_resources/newer_version_incremental_state.json b/prelude/apple/tools/bundling/test_resources/newer_version_incremental_state.json new file mode 100644 index 0000000000000..79b549518e6c8 --- /dev/null +++ b/prelude/apple/tools/bundling/test_resources/newer_version_incremental_state.json @@ -0,0 +1,6 @@ +{ + "version": 7, + "data": { + "something": [] + } +} diff --git a/prelude/apple/tools/bundling/test_resources/the.broken_json b/prelude/apple/tools/bundling/test_resources/the.broken_json new file mode 100644 index 0000000000000..843527a50bb35 --- /dev/null +++ b/prelude/apple/tools/bundling/test_resources/the.broken_json @@ -0,0 +1,2 @@ +{ + "version": \ No newline at end of file diff --git a/prelude/apple/tools/bundling/test_resources/valid_action_metadata.json b/prelude/apple/tools/bundling/test_resources/valid_action_metadata.json new file mode 100644 index 0000000000000..ad9e8845ee025 --- /dev/null +++ b/prelude/apple/tools/bundling/test_resources/valid_action_metadata.json @@ -0,0 +1,13 @@ +{ + "version": 1, + "digests": [ + { + "path": "repo/foo.txt", + "digest": "foo_digest" + }, + { + "path": "buck-out/bar.txt", + "digest": "bar_digest" + } + ] +} diff --git a/prelude/apple/tools/bundling/test_resources/valid_incremental_state.json b/prelude/apple/tools/bundling/test_resources/valid_incremental_state.json new file mode 100644 index 0000000000000..ff8bc2e49b040 --- /dev/null +++ b/prelude/apple/tools/bundling/test_resources/valid_incremental_state.json @@ -0,0 +1,44 @@ +{ + "items": [ + { + "source": "repo/foo.txt", + "destination_relative_to_bundle": "foo.txt", + "digest": "foo_digest" + }, + { + "source": "buck-out/bar.txt", + "destination_relative_to_bundle": "Resources/bar.txt", + "digest": "bar_digest" + }, + { + "source": "buck-out/bar", + "destination_relative_to_bundle": "Resources/bar", + "resolved_symlink": "bar.txt" + } + ], + "codesign_configuration": null, + "codesigned": true, + "codesigned_on_copy": [ + { + "path": "Resources/bar.txt" + }, + { + "path": "Resources/baz.txt", + "entitlements_digest": "abc" + }, + { + "path": "Resources/qux.txt", + "codesign_flags_override": ["--deep", "--force"] + } + ], + "codesign_identity": "Johny Appleseed", + "codesign_arguments": [ + "--force", + "--deep" + ], + "versioned_if_macos": true, + "swift_stdlib_paths": [ + "Frameworks/libswiftCore.dylib" + ], + "version": 7 +} diff --git a/prelude/apple/tools/code_signing/BUCK.v2 b/prelude/apple/tools/code_signing/BUCK.v2 new file mode 100644 index 0000000000000..85f36aba315f9 --- /dev/null +++ b/prelude/apple/tools/code_signing/BUCK.v2 @@ -0,0 +1,59 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load("@prelude//apple/tools/defs.bzl", "meta_python_test") + +oncall("build_infra") + +source_listing() + +configured_alias( + name = "dummy_binary_for_signing_configured", + actual = ":dummy_binary_for_signing", + platform = "config//platform/macos:base", +) + +cxx_binary( + name = "dummy_binary_for_signing", + srcs = ["dummy_binary_for_signing.c"], + default_target_platform = "config//platform/macos:base", +) + +python_library( + name = "lib", + srcs = glob( + [ + "*.py", + ], + exclude = [ + "*_test.py", + "main.py", + ], + ), + resources = [":dummy_binary_for_signing_configured"], + visibility = ["PUBLIC"], + deps = [ + "prelude//apple/tools:plistlib_utils", + "prelude//apple/tools/info_plist_processor:process", + ], +) + +meta_python_test( + name = "tests", + srcs = glob(["*_test.py"]), + resources = glob([ + "test_resources/*", + ]), + deps = [ + "fbsource//third-party/pkg_resources:pkg_resources", + ":lib", + ], +) + +python_binary( + name = "codesign_bundle", + main = "main.py", + visibility = ["PUBLIC"], + deps = [ + "fbsource//third-party/pypi/typed-argument-parser:typed-argument-parser", + ":lib", + ], +) diff --git a/prelude/apple/tools/code_signing/app_id.py b/prelude/apple/tools/code_signing/app_id.py new file mode 100644 index 0000000000000..d657a4da94326 --- /dev/null +++ b/prelude/apple/tools/code_signing/app_id.py @@ -0,0 +1,62 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from __future__ import annotations + +import re +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, Optional + + +@dataclass +class AppId: + team_id: str + bundle_id: str + + class _ReGroupName(str, Enum): + team_id = "team_id" + bundle_id = "bundle_id" + + _re_string: str = "^(?P<{team_id}>[A-Z0-9]{{10}})\\.(?P<{bundle_id}>.+)$".format( + team_id=_ReGroupName.team_id, + bundle_id=_ReGroupName.bundle_id, + ) + _re_pattern: re.Pattern[str] = re.compile(_re_string) + + # Takes a application identifier and splits it into Team ID and bundle ID. + # Prefix is always a ten-character alphanumeric sequence. Bundle ID may be a fully-qualified name or a wildcard ending in *. + @classmethod + def from_string(cls, string: str) -> AppId: + match = re.match(cls._re_pattern, string) + if not match: + raise RuntimeError( + "Malformed app ID string: '{}'. " + "We expected a prefix of a ten-character alphanumeric sequence and a Bundle ID which may be a fully-qualified name or a wildcard ending in '*'.".format( + string + ) + ) + return AppId( + match.group(cls._ReGroupName.team_id), + match.group(cls._ReGroupName.bundle_id), + ) + + # Returns the App ID if it can be inferred from keys in the entitlement. Otherwise, it returns `None`. + @staticmethod + def infer_from_entitlements(entitlements: Dict[str, Any]) -> Optional[AppId]: + try: + keychain_access_groups = entitlements.get("keychain-access-groups") + if not keychain_access_groups: + return None + app_id_string = keychain_access_groups[0] + return AppId.from_string(app_id_string) + except Exception as e: + raise RuntimeError( + "Error when parsing the entitlements for the app ID: {}".format(e) + ) diff --git a/prelude/apple/tools/code_signing/app_id_test.py b/prelude/apple/tools/code_signing/app_id_test.py new file mode 100644 index 0000000000000..ab1c32bb83acd --- /dev/null +++ b/prelude/apple/tools/code_signing/app_id_test.py @@ -0,0 +1,58 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import plistlib +import unittest + +import pkg_resources + +from .app_id import AppId + + +class TestAppId(unittest.TestCase): + def test_string_parsing(self): + result = AppId.from_string("ABCDE12345.com.example.TestApp") + expected = AppId("ABCDE12345", "com.example.TestApp") + self.assertEqual(expected, result) + + result = AppId.from_string("ABCDE12345.*") + expected = AppId("ABCDE12345", "*") + self.assertEqual(expected, result) + + with self.assertRaisesRegex( + RuntimeError, + "Malformed app ID string: 'invalid.'. We expected a prefix of a ten-character alphanumeric sequence and a Bundle ID which may be a fully-qualified name or a wildcard ending in '*'.", + ): + _ = AppId.from_string("invalid.") + + def test_entitlements_parsing(self): + with pkg_resources.resource_stream( + __name__, "test_resources/Entitlements.plist" + ) as file: + entitlements = plistlib.load(file) + result = AppId.infer_from_entitlements(entitlements) + expected = AppId("ABCDE12345", "com.example.TestApp") + self.assertEqual(expected, result) + + invalid_file = b""" + + + + keychain-access-groups + + com.facebook.CommonTestHost + p + + """ + + invalid_entitlement = plistlib.loads(invalid_file) + with self.assertRaisesRegex( + RuntimeError, + "Error when parsing the entitlements for the app ID: Malformed app ID string: 'com.facebook.CommonTestHost'. " + "We expected a prefix of a ten-character alphanumeric sequence and a Bundle ID which may be a fully-qualified name or a wildcard ending in '*'.", + ): + AppId.infer_from_entitlements(invalid_entitlement) diff --git a/prelude/apple/tools/code_signing/apple_platform.py b/prelude/apple/tools/code_signing/apple_platform.py new file mode 100644 index 0000000000000..ee32486c85cc9 --- /dev/null +++ b/prelude/apple/tools/code_signing/apple_platform.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from enum import Enum +from typing import Optional + + +class ApplePlatform(str, Enum): + macos = "macosx" + ios_device = "iphoneos" + ios_simulator = "iphonesimulator" + watch_device = "watchos" + watch_simulator = "watchsimulator" + tv_device = "appletvos" + tv_simulator = "appletvsimulator" + mac_catalyst = "maccatalyst" + driver_kit = "driverkit" + + def is_desktop(self) -> bool: + return self == ApplePlatform.macos or self == ApplePlatform.mac_catalyst + + def provisioning_profile_name(self) -> Optional[str]: + """ + Returns: + The platform name as it could be found inside provisioning profiles and used to match them. + Not all platforms use provisioning profiles, those will return `None`. + """ + if self == ApplePlatform.ios_device or self == ApplePlatform.watch_device: + return "iOS" + elif self == ApplePlatform.tv_device: + return "tvOS" + else: + return None + + def embedded_provisioning_profile_path(self) -> str: + """ + Returns: + The name of the provisioning profile in the final application bundle. + """ + if self.is_desktop(): + return "Contents/embedded.provisionprofile" + else: + return "embedded.mobileprovision" diff --git a/prelude/apple/tools/code_signing/codesign_bundle.py b/prelude/apple/tools/code_signing/codesign_bundle.py new file mode 100644 index 0000000000000..46307f9f570e3 --- /dev/null +++ b/prelude/apple/tools/code_signing/codesign_bundle.py @@ -0,0 +1,637 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import asyncio +import importlib.resources +import logging +import os +import shutil +import subprocess +import tempfile +import uuid +from contextlib import ExitStack +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Any, cast, Dict, List, Optional, Union + +from apple.tools.plistlib_utils import detect_format_and_load + +from .apple_platform import ApplePlatform +from .codesign_command_factory import ( + DefaultCodesignCommandFactory, + DryRunCodesignCommandFactory, + ICodesignCommandFactory, +) +from .fast_adhoc import is_fast_adhoc_codesign_allowed, should_skip_adhoc_signing_path +from .identity import CodeSigningIdentity +from .info_plist_metadata import InfoPlistMetadata +from .list_codesign_identities import IListCodesignIdentities +from .prepare_code_signing_entitlements import prepare_code_signing_entitlements +from .prepare_info_plist import prepare_info_plist +from .provisioning_profile_diagnostics import ( + interpret_provisioning_profile_diagnostics, + META_IOS_BUILD_AND_RUN_ON_DEVICE_LINK, + META_IOS_PROVISIONING_PROFILES_COMMAND, + META_IOS_PROVISIONING_PROFILES_LINK, +) +from .provisioning_profile_metadata import ProvisioningProfileMetadata +from .provisioning_profile_selection import ( + CodeSignProvisioningError, + select_best_provisioning_profile, + SelectedProvisioningProfileInfo, +) +from .read_provisioning_profile_command_factory import ( + DefaultReadProvisioningProfileCommandFactory, + IReadProvisioningProfileCommandFactory, +) + +_default_read_provisioning_profile_command_factory = ( + DefaultReadProvisioningProfileCommandFactory() +) + +_LOGGER: logging.Logger = logging.getLogger(__name__) + + +@dataclass +class CodesignedPath: + path: Path + """ + Path relative to bundle root which needs to be codesigned + """ + entitlements: Optional[Path] + """ + Path to entitlements to be used when codesigning, relative to buck project + """ + flags: List[str] + """ + Flags to be passed to codesign command when codesigning this particular path + """ + + +def _log_codesign_identities(identities: List[CodeSigningIdentity]) -> None: + if len(identities) == 0: + _LOGGER.warning("ZERO codesign identities available") + else: + _LOGGER.info("Listing available codesign identities") + for identity in identities: + _LOGGER.info( + f" Subject Common Name: {identity.subject_common_name}, Fingerprint: {identity.fingerprint}" + ) + + +def _select_provisioning_profile( + info_plist_metadata: InfoPlistMetadata, + provisioning_profiles_dir: Path, + entitlements_path: Optional[Path], + platform: ApplePlatform, + list_codesign_identities: IListCodesignIdentities, + should_use_fast_provisioning_profile_parsing: bool, + strict_provisioning_profile_search: bool, + provisioning_profile_filter: Optional[str], + log_file_path: Optional[Path] = None, +) -> SelectedProvisioningProfileInfo: + read_provisioning_profile_command_factory = ( + _default_read_provisioning_profile_command_factory + ) + identities = list_codesign_identities.list_codesign_identities() + _log_codesign_identities(identities) + _LOGGER.info( + f"Fast provisioning profile parsing enabled: {should_use_fast_provisioning_profile_parsing}" + ) + provisioning_profiles = [] + if should_use_fast_provisioning_profile_parsing: + provisioning_profiles = asyncio.run( + _fast_read_provisioning_profiles_async( + provisioning_profiles_dir, + read_provisioning_profile_command_factory, + ) + ) + else: + provisioning_profiles = _read_provisioning_profiles( + provisioning_profiles_dir, + read_provisioning_profile_command_factory, + ) + if not provisioning_profiles: + raise CodeSignProvisioningError( + ( + f"\n\nFailed to find any provisioning profiles. Please make sure to install required provisioning profiles and make sure they are located at '{provisioning_profiles_dir}'.\n\n" + f"Execute `{META_IOS_PROVISIONING_PROFILES_COMMAND}` to download the profiles.\n" + f"Please follow the wiki to build & run on device: {META_IOS_BUILD_AND_RUN_ON_DEVICE_LINK}.\n" + f"Provisioning profiles for your app can also be downloaded from {META_IOS_PROVISIONING_PROFILES_LINK}.\n" + ) + ) + entitlements = _read_entitlements_file(entitlements_path) + selected_profile_info, mismatches = select_best_provisioning_profile( + info_plist_metadata, + identities, + provisioning_profiles, + entitlements, + platform, + strict_provisioning_profile_search, + provisioning_profile_filter, + ) + if selected_profile_info is None: + if not mismatches: + raise RuntimeError( + f"Expected diagnostics information for at least one mismatching provisioning profile when `{provisioning_profiles_dir}` directory is not empty." + ) + raise CodeSignProvisioningError( + interpret_provisioning_profile_diagnostics( + diagnostics=mismatches, + bundle_id=info_plist_metadata.bundle_id, + provisioning_profiles_dir=provisioning_profiles_dir, + identities=identities, + log_file_path=log_file_path, + ) + ) + return selected_profile_info + + +@dataclass +class SigningContextWithProfileSelection: + info_plist_source: Path + info_plist_destination: Path + info_plist_metadata: InfoPlistMetadata + selected_profile_info: SelectedProvisioningProfileInfo + + +@dataclass +class AdhocSigningContext: + codesign_identity: str + profile_selection_context: Optional[SigningContextWithProfileSelection] + + def __init__( + self, + codesign_identity: Optional[str] = None, + profile_selection_context: Optional[SigningContextWithProfileSelection] = None, + ) -> None: + self.codesign_identity = codesign_identity or "-" + self.profile_selection_context = profile_selection_context + + +def signing_context_with_profile_selection( + info_plist_source: Path, + info_plist_destination: Path, + provisioning_profiles_dir: Path, + entitlements_path: Optional[Path], + platform: ApplePlatform, + list_codesign_identities: IListCodesignIdentities, + log_file_path: Optional[Path] = None, + should_use_fast_provisioning_profile_parsing: bool = False, + strict_provisioning_profile_search: bool = False, + provisioning_profile_filter: Optional[str] = None, +) -> SigningContextWithProfileSelection: + with open(info_plist_source, mode="rb") as info_plist_file: + info_plist_metadata = InfoPlistMetadata.from_file(info_plist_file) + selected_profile_info = _select_provisioning_profile( + info_plist_metadata=info_plist_metadata, + provisioning_profiles_dir=provisioning_profiles_dir, + entitlements_path=entitlements_path, + platform=platform, + list_codesign_identities=list_codesign_identities, + log_file_path=log_file_path, + should_use_fast_provisioning_profile_parsing=should_use_fast_provisioning_profile_parsing, + strict_provisioning_profile_search=strict_provisioning_profile_search, + provisioning_profile_filter=provisioning_profile_filter, + ) + + return SigningContextWithProfileSelection( + info_plist_source, + info_plist_destination, + info_plist_metadata, + selected_profile_info, + ) + + +# IMPORTANT: This enum is a part of incremental API, amend carefully. +class CodesignConfiguration(str, Enum): + fastAdhoc = "fast-adhoc" + dryRun = "dry-run" + + +def codesign_bundle( + bundle_path: CodesignedPath, + signing_context: Union[AdhocSigningContext, SigningContextWithProfileSelection], + platform: ApplePlatform, + codesign_on_copy_paths: List[CodesignedPath], + codesign_tool: Optional[Path] = None, + codesign_configuration: Optional[CodesignConfiguration] = None, +) -> None: + with tempfile.TemporaryDirectory() as tmp_dir: + if isinstance(signing_context, SigningContextWithProfileSelection): + selection_profile_context = signing_context + elif isinstance(signing_context, AdhocSigningContext): + selection_profile_context = signing_context.profile_selection_context + else: + raise RuntimeError( + f"Unexpected type of signing context `{type(signing_context)}`" + ) + + if selection_profile_context: + bundle_path_with_prepared_entitlements = ( + _prepare_entitlements_and_info_plist( + bundle_path=bundle_path, + platform=platform, + signing_context=selection_profile_context, + tmp_dir=tmp_dir, + ) + ) + selected_identity_fingerprint = ( + selection_profile_context.selected_profile_info.identity.fingerprint + ) + else: + if not isinstance(signing_context, AdhocSigningContext): + raise AssertionError( + f"Expected `AdhocSigningContext`, got `{type(signing_context)}` instead." + ) + if signing_context.profile_selection_context: + raise AssertionError( + "Expected no profile selection context in `AdhocSigningContext` when `selection_profile_context` is `None`." + ) + bundle_path_with_prepared_entitlements = bundle_path + selected_identity_fingerprint = signing_context.codesign_identity + + if codesign_configuration is CodesignConfiguration.dryRun: + if codesign_tool is None: + raise RuntimeError( + "Expected codesign tool not to be the default one when dry run codesigning is requested." + ) + _dry_codesign_everything( + root=bundle_path_with_prepared_entitlements, + codesign_on_copy_paths=codesign_on_copy_paths, + identity_fingerprint=selected_identity_fingerprint, + tmp_dir=tmp_dir, + codesign_tool=codesign_tool, + platform=platform, + ) + else: + fast_adhoc_signing_enabled = ( + codesign_configuration is CodesignConfiguration.fastAdhoc + and is_fast_adhoc_codesign_allowed() + ) + _LOGGER.info(f"Fast adhoc signing enabled: {fast_adhoc_signing_enabled}") + _codesign_everything( + root=bundle_path_with_prepared_entitlements, + codesign_on_copy_paths=codesign_on_copy_paths, + identity_fingerprint=selected_identity_fingerprint, + tmp_dir=tmp_dir, + codesign_command_factory=DefaultCodesignCommandFactory(codesign_tool), + platform=platform, + fast_adhoc_signing=fast_adhoc_signing_enabled, + ) + + +def _prepare_entitlements_and_info_plist( + bundle_path: CodesignedPath, + platform: ApplePlatform, + signing_context: SigningContextWithProfileSelection, + tmp_dir: str, +) -> CodesignedPath: + info_plist_metadata = signing_context.info_plist_metadata + selected_profile = signing_context.selected_profile_info.profile + prepared_entitlements_path = prepare_code_signing_entitlements( + bundle_path.entitlements, + info_plist_metadata.bundle_id, + selected_profile, + tmp_dir, + ) + prepared_info_plist_path = prepare_info_plist( + signing_context.info_plist_source, + info_plist_metadata, + selected_profile, + tmp_dir, + ) + os.replace( + prepared_info_plist_path, + bundle_path.path / signing_context.info_plist_destination, + ) + shutil.copy2( + selected_profile.file_path, + bundle_path.path / platform.embedded_provisioning_profile_path(), + ) + return CodesignedPath( + path=bundle_path.path, + entitlements=prepared_entitlements_path, + flags=bundle_path.flags, + ) + + +async def _fast_read_provisioning_profiles_async( + dirpath: Path, + read_provisioning_profile_command_factory: IReadProvisioningProfileCommandFactory, +) -> List[ProvisioningProfileMetadata]: + tasks = [] + for f in os.listdir(dirpath): + if f.endswith(".mobileprovision") or f.endswith(".provisionprofile"): + filepath = dirpath / f + tasks.append( + _provisioning_profile_from_file_path_async( + filepath, + read_provisioning_profile_command_factory, + should_use_fast_provisioning_profile_parsing=True, + ) + ) + results = await asyncio.gather(*tasks) + return cast(List[ProvisioningProfileMetadata], results) + + +async def _provisioning_profile_from_file_path_async( + path: Path, + read_provisioning_profile_command_factory: IReadProvisioningProfileCommandFactory, + should_use_fast_provisioning_profile_parsing: bool, +) -> ProvisioningProfileMetadata: + loop = asyncio.get_running_loop() + return await loop.run_in_executor( + None, + _provisioning_profile_from_file_path, + path, + read_provisioning_profile_command_factory, + should_use_fast_provisioning_profile_parsing, + ) + + +def _read_provisioning_profiles( + dirpath: Path, + read_provisioning_profile_command_factory: IReadProvisioningProfileCommandFactory, +) -> List[ProvisioningProfileMetadata]: + + return [ + _provisioning_profile_from_file_path( + dirpath / f, + read_provisioning_profile_command_factory, + should_use_fast_provisioning_profile_parsing=False, + ) + for f in os.listdir(dirpath) + if (f.endswith(".mobileprovision") or f.endswith(".provisionprofile")) + ] + + +def _provisioning_profile_from_file_path( + path: Path, + read_provisioning_profile_command_factory: IReadProvisioningProfileCommandFactory, + should_use_fast_provisioning_profile_parsing: bool, +) -> ProvisioningProfileMetadata: + if should_use_fast_provisioning_profile_parsing: + # Provisioning profiles have a plist embedded in them that we can extract directly. + # This is much faster than calling an external command like openssl. + with open(path, "rb") as f: + content = f.read() + start_index = content.find(b"", start_index) + len(b"") + if start_index >= 0 and end_index >= 0: + plist_data = content[start_index:end_index] + return ProvisioningProfileMetadata.from_provisioning_profile_file_content( + path, plist_data + ) + else: + _LOGGER.warning( + f"Failed to find plist in provisioning profile at {path}. Falling back to slow parsing." + ) + + # Fallback to slow parsing if fast parsing is disabled or fails + return _provisioning_profile_from_file_path_using_factory( + path, read_provisioning_profile_command_factory + ) + + +def _provisioning_profile_from_file_path_using_factory( + path: Path, + read_provisioning_profile_command_factory: IReadProvisioningProfileCommandFactory, +) -> ProvisioningProfileMetadata: + output: bytes = subprocess.check_output( + read_provisioning_profile_command_factory.read_provisioning_profile_command( + path + ), + stderr=subprocess.DEVNULL, + ) + return ProvisioningProfileMetadata.from_provisioning_profile_file_content( + path, output + ) + + +def _read_entitlements_file(path: Optional[Path]) -> Optional[Dict[str, Any]]: + if not path: + return None + with open(path, mode="rb") as f: + return detect_format_and_load(f) + + +def _dry_codesign_everything( + root: CodesignedPath, + codesign_on_copy_paths: List[CodesignedPath], + identity_fingerprint: str, + tmp_dir: str, + codesign_tool: Path, + platform: ApplePlatform, +) -> None: + codesign_command_factory = DryRunCodesignCommandFactory(codesign_tool) + + codesign_on_copy_directory_paths = [ + p for p in codesign_on_copy_paths if p.path.is_dir() + ] + + # First sign codesign-on-copy directory paths + _codesign_paths( + paths=codesign_on_copy_directory_paths, + identity_fingerprint=identity_fingerprint, + tmp_dir=tmp_dir, + codesign_command_factory=codesign_command_factory, + platform=platform, + ) + + # Dry codesigning creates a .plist inside every directory it signs. + # That approach doesn't work for files so those files are written into .plist for root bundle. + codesign_on_copy_file_paths = [ + p.path.relative_to(root.path) + for p in codesign_on_copy_paths + if p.path.is_file() + ] + codesign_command_factory.set_codesign_on_copy_file_paths( + codesign_on_copy_file_paths + ) + + # Lastly sign whole bundle + _codesign_paths( + paths=[root], + identity_fingerprint=identity_fingerprint, + tmp_dir=tmp_dir, + codesign_command_factory=codesign_command_factory, + platform=platform, + ) + + +def _codesign_everything( + root: CodesignedPath, + codesign_on_copy_paths: List[CodesignedPath], + identity_fingerprint: str, + tmp_dir: str, + codesign_command_factory: ICodesignCommandFactory, + platform: ApplePlatform, + fast_adhoc_signing: bool, +) -> None: + # First sign codesign-on-copy paths + codesign_on_copy_filtered_paths = _filter_out_fast_adhoc_paths( + paths=codesign_on_copy_paths, + identity_fingerprint=identity_fingerprint, + platform=platform, + fast_adhoc_signing=fast_adhoc_signing, + ) + # If we have > 1 paths to sign (including root bundle), access keychain first to avoid user playing whack-a-mole + # with permission grant dialog windows. + if codesign_on_copy_filtered_paths: + obtain_keychain_permissions( + identity_fingerprint, tmp_dir, codesign_command_factory + ) + _codesign_paths( + codesign_on_copy_filtered_paths, + identity_fingerprint, + tmp_dir, + codesign_command_factory, + platform, + ) + # Lastly sign whole bundle + root_filtered_paths = _filter_out_fast_adhoc_paths( + paths=[root], + identity_fingerprint=identity_fingerprint, + platform=platform, + fast_adhoc_signing=fast_adhoc_signing, + ) + _codesign_paths( + root_filtered_paths, + identity_fingerprint, + tmp_dir, + codesign_command_factory, + platform, + ) + + +@dataclass +class ParallelProcess: + process: subprocess.Popen[bytes] + stdout_path: Optional[str] + stderr_path: str + + def check_result(self) -> None: + if self.process.returncode == 0: + return + with ExitStack() as stack: + stderr = stack.enter_context(open(self.stderr_path, encoding="utf8")) + stderr_string = f"\nstderr:\n{stderr.read()}\n" + stdout = ( + stack.enter_context(open(self.stdout_path, encoding="utf8")) + if self.stdout_path + else None + ) + stdout_string = f"\nstdout:\n{stdout.read()}\n" if stdout else "" + raise RuntimeError(f"{stdout_string}{stderr_string}") + + +def _spawn_process( + command: List[Union[str, Path]], + tmp_dir: str, + stack: ExitStack, + pipe_stdout: bool = False, +) -> ParallelProcess: + if pipe_stdout: + stdout_path = None + stdout = subprocess.PIPE + else: + stdout_path = os.path.join(tmp_dir, uuid.uuid4().hex) + stdout = stack.enter_context(open(stdout_path, "w")) + stderr_path = os.path.join(tmp_dir, uuid.uuid4().hex) + stderr = stack.enter_context(open(stderr_path, "w")) + _LOGGER.info(f"Executing command: {command}") + process = subprocess.Popen(command, stdout=stdout, stderr=stderr) + return ParallelProcess( + process, + stdout_path, + stderr_path, + ) + + +def _spawn_codesign_process( + path: CodesignedPath, + identity_fingerprint: str, + tmp_dir: str, + codesign_command_factory: ICodesignCommandFactory, + stack: ExitStack, +) -> ParallelProcess: + command = codesign_command_factory.codesign_command( + path.path, identity_fingerprint, path.entitlements, path.flags + ) + return _spawn_process(command=command, tmp_dir=tmp_dir, stack=stack) + + +def _codesign_paths( + paths: List[CodesignedPath], + identity_fingerprint: str, + tmp_dir: str, + codesign_command_factory: ICodesignCommandFactory, + platform: ApplePlatform, +) -> None: + """Codesigns several paths in parallel.""" + processes: List[ParallelProcess] = [] + with ExitStack() as stack: + for path in paths: + process = _spawn_codesign_process( + path=path, + identity_fingerprint=identity_fingerprint, + tmp_dir=tmp_dir, + codesign_command_factory=codesign_command_factory, + stack=stack, + ) + processes.append(process) + for p in processes: + p.process.wait() + for p in processes: + p.check_result() + + +def _filter_out_fast_adhoc_paths( + paths: List[CodesignedPath], + identity_fingerprint: str, + platform: ApplePlatform, + fast_adhoc_signing: bool, +) -> List[CodesignedPath]: + if not fast_adhoc_signing: + return paths + # TODO(T149863217): Make skip checks run in parallel, they're usually fast (~15ms) + # but if we have many of them (e.g., 30+ frameworks), it can add about ~0.5s.' + return [ + p + for p in paths + if not should_skip_adhoc_signing_path( + p.path, identity_fingerprint, p.entitlements, platform + ) + ] + + +def obtain_keychain_permissions( + identity_fingerprint: str, + tmp_dir: str, + codesign_command_factory: ICodesignCommandFactory, +) -> None: + with ExitStack() as stack, importlib.resources.path( + __package__, "dummy_binary_for_signing" + ) as dummy_binary_path: + # Copy the binary to avoid races vs other bundling actions + dummy_binary_copied = os.path.join(tmp_dir, "dummy_binary_for_signing") + shutil.copyfile(dummy_binary_path, dummy_binary_copied, follow_symlinks=True) + p = _spawn_codesign_process( + path=CodesignedPath( + path=Path(dummy_binary_copied), entitlements=None, flags=[] + ), + identity_fingerprint=identity_fingerprint, + tmp_dir=tmp_dir, + codesign_command_factory=codesign_command_factory, + stack=stack, + ) + p.process.wait() + p.check_result() diff --git a/prelude/apple/tools/code_signing/codesign_command_factory.py b/prelude/apple/tools/code_signing/codesign_command_factory.py new file mode 100644 index 0000000000000..9a90e03231bec --- /dev/null +++ b/prelude/apple/tools/code_signing/codesign_command_factory.py @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from abc import ABCMeta, abstractmethod +from pathlib import Path +from typing import List, Optional, Union + + +class ICodesignCommandFactory(metaclass=ABCMeta): + @abstractmethod + def codesign_command( + self, + path: Path, + identity_fingerprint: str, + entitlements: Optional[Path], + codesign_args: List[str], + ) -> List[Union[str, Path]]: + raise NotImplementedError + + +class DefaultCodesignCommandFactory(ICodesignCommandFactory): + codesign_tool: Path + _command_args: List[str] = ["--force", "--sign"] + + def __init__(self, codesign_tool: Optional[Path]) -> None: + self.codesign_tool = codesign_tool or Path("codesign") + + def codesign_command( + self, + path: Path, + identity_fingerprint: str, + entitlements: Optional[Path], + codesign_args: List[str], + ) -> List[Union[str, Path]]: + entitlements_args = ["--entitlements", entitlements] if entitlements else [] + return ( + [self.codesign_tool] + + DefaultCodesignCommandFactory._command_args + + [identity_fingerprint] + + codesign_args + + entitlements_args + + [path] + ) + + +class DryRunCodesignCommandFactory(ICodesignCommandFactory): + codesign_tool: Path + codesign_on_copy_file_paths: Optional[List[Path]] + + def __init__(self, codesign_tool: Path) -> None: + self.codesign_tool = codesign_tool + self.codesign_on_copy_file_paths = None + + def set_codesign_on_copy_file_paths(self, file_paths: List[Path]) -> None: + self.codesign_on_copy_file_paths = file_paths + + def codesign_command( + self, + path: Path, + identity_fingerprint: str, + entitlements: Optional[Path], + codesign_args: List[str], + ) -> List[Union[str, Path]]: + args = [path, "--identity", identity_fingerprint] + if entitlements: + args += ["--entitlements", entitlements] if entitlements else [] + codesign_on_copy_file_paths = self.codesign_on_copy_file_paths + if codesign_on_copy_file_paths: + args += ["--extra-paths-to-sign"] + args += codesign_on_copy_file_paths + return [self.codesign_tool] + args diff --git a/prelude/apple/tools/code_signing/dummy_binary_for_signing.c b/prelude/apple/tools/code_signing/dummy_binary_for_signing.c new file mode 100644 index 0000000000000..22b15a7fa58d2 --- /dev/null +++ b/prelude/apple/tools/code_signing/dummy_binary_for_signing.c @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +int main() { + return 0; +} diff --git a/prelude/apple/tools/code_signing/fast_adhoc.py b/prelude/apple/tools/code_signing/fast_adhoc.py new file mode 100644 index 0000000000000..10bb78934be68 --- /dev/null +++ b/prelude/apple/tools/code_signing/fast_adhoc.py @@ -0,0 +1,136 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import logging +import os +import subprocess +import sys + +from pathlib import Path +from typing import List, Optional, Union + +from .apple_platform import ApplePlatform + +_LOGGER: logging.Logger = logging.getLogger(__name__) + + +def _find_executable_for_signed_path(path: Path, platform: ApplePlatform) -> Path: + extension = path.suffix + if extension not in [".app", ".appex", ".framework"]: + return path + + contents_subdir = "Contents/MacOS" if platform.is_desktop() else "" + contents_dir = path / contents_subdir + # TODO(): Read binary name from Info.plist + return contents_dir / path.stem + + +def _logged_subprocess_run( + name: str, why: str, args: List[Union[str, Path]] +) -> subprocess.CompletedProcess[str]: + _LOGGER.info(f" Calling {name} to {why}: `{args}`") + result = subprocess.run( + args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + ) + + _LOGGER.info(f" {name} exit code: {result.returncode}") + if result.stdout: + _LOGGER.info(f" {name} stdout:") + _LOGGER.info( + "\n" + "\n".join([f" {line}" for line in result.stdout.split("\n")]) + ) + if result.stderr: + _LOGGER.info(f" {name} stderr:") + _LOGGER.info( + "\n" + "\n".join([f" {line}" for line in result.stderr.split("\n")]) + ) + + return result + + +def is_fast_adhoc_codesign_allowed() -> bool: + if sys.platform != "darwin": + # This is a macOS-only optimisation + _LOGGER.info( + f"Running on non-macOS ({sys.platform}), fast adhoc signing not allowed" + ) + return False + if not os.path.exists("/var/db/xcode_select_link"): + _LOGGER.info( + "Developer tools do not exist, cannot use `otool`, fast adhoc signing not allowed" + ) + return False + + return True + + +def should_skip_adhoc_signing_path( + path: Path, + identity_fingerprint: str, + entitlements_path: Optional[Path], + platform: ApplePlatform, +) -> bool: + logging.getLogger(__name__).info( + f"Checking if should skip adhoc signing path `{path}` with identity `{identity_fingerprint}` and entitlements `{entitlements_path}` for platform `{platform}`" + ) + + if identity_fingerprint != "-": + # Non-adhoc environments _always_ code sign + _LOGGER.info(" Requested non-adhoc signing, not adhoc skipping signing") + return False + + if "libclang_rt" in str(path): + # Sanitizer runtime dylibs require re-signing, even though they're already pre-signed. + # Otherwise, `codesign` fails to sign the top-level bundle (as the adhoc pre-signed + # sanitizer dylibs have been signed within a different context). + return False + + codesign_args = ["/usr/bin/codesign", "-d", "-v", path] + codesign_result = _logged_subprocess_run( + "codesign", "check pre-existing signature", codesign_args + ) + + # Anything that's _already_ adhoc signed can be skipped. + # On ARM64 systems, the linker will already codesign using adhoc, + # so performing the signing twice is unnecessary. + # + # The entitlements file can be ignored under adhoc signing because: + # + # - Frameworks/dylibs do not need entitlements (they operate under the entitlements of their loading binary) + # - Apps (+ app extensions) have binaries which embed the entitlements via __entitlements section at link time + # + # Note that certain features require non-adhoc signing (e.g., app groups) while other features like keychain + # and "Sign in with Apple" just need the entitlements present in the binary (which it will per the above). + is_adhoc_signed = "Signature=adhoc" in codesign_result.stderr + if not is_adhoc_signed: + _LOGGER.info(" Path is not adhoc signed, not skipping adhoc signing") + return False + + if entitlements_path: + # Adhoc entitlements do not require postprocessing, so we just need to check existence + binary_path = _find_executable_for_signed_path(path, platform) + otool_arg = ["/usr/bin/otool", "-s", "__TEXT", "__entitlements", binary_path] + otool_result = _logged_subprocess_run( + "otool", "check entitlements presence in binary", otool_arg + ) + + contains_entitlements = ( + "Contents of (__TEXT,__entitlements) section" in otool_result.stdout + ) + if not contains_entitlements: + _LOGGER.info( + f" Binary path `{binary_path}` does not contain entitlements, not skipping adhoc signing" + ) + return False + + _LOGGER.info(f" All checks passed for `{path}`, skipping adhoc signing") + return True diff --git a/prelude/apple/tools/code_signing/identity.py b/prelude/apple/tools/code_signing/identity.py new file mode 100644 index 0000000000000..191e526916184 --- /dev/null +++ b/prelude/apple/tools/code_signing/identity.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from __future__ import annotations + +import re +from dataclasses import dataclass +from enum import Enum +from typing import List + + +@dataclass +class CodeSigningIdentity: + fingerprint: str + subject_common_name: str + + class _ReGroupName(str, Enum): + fingerprint = "fingerprint" + subject_common_name = "subject_common_name" + + _re_string: str = ( + '(?P<{fingerprint}>[A-F0-9]{{40}}) "(?P<{subject_common_name}>.+)"(?!.*CSSMERR_.+)'.format( + fingerprint=_ReGroupName.fingerprint, + subject_common_name=_ReGroupName.subject_common_name, + ) + ) + + _pattern: re.Pattern[str] = re.compile(_re_string) + + @classmethod + def parse_security_stdout(cls, text: str) -> List[CodeSigningIdentity]: + return [ + CodeSigningIdentity( + match.group(cls._ReGroupName.fingerprint), + match.group(cls._ReGroupName.subject_common_name), + ) + for match in re.finditer(cls._pattern, text) + ] diff --git a/prelude/apple/tools/code_signing/identity_test.py b/prelude/apple/tools/code_signing/identity_test.py new file mode 100644 index 0000000000000..fe9806dc339cb --- /dev/null +++ b/prelude/apple/tools/code_signing/identity_test.py @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import unittest + +from .identity import CodeSigningIdentity + + +class TestParse(unittest.TestCase): + def test_multiple_certificates_are_parsed(self): + text = r""" + 1) 5C5E14F66E6B3C2697107764C9D728EE5AB393B9 "Apple Development: Johnny Appleseed (B4H6M5LP3J)" + 2) 3348E051F7E7E1ED509D2D620567BAF796210C36 "iPhone Developer: Johnny Appleseed (B4H6M5LP3J)" + 2 valid identities found +""" + expected = [ + CodeSigningIdentity( + "5C5E14F66E6B3C2697107764C9D728EE5AB393B9", + "Apple Development: Johnny Appleseed (B4H6M5LP3J)", + ), + CodeSigningIdentity( + "3348E051F7E7E1ED509D2D620567BAF796210C36", + "iPhone Developer: Johnny Appleseed (B4H6M5LP3J)", + ), + ] + result = CodeSigningIdentity.parse_security_stdout(text) + self.assertEqual(expected, result) + + def test_expired_certificates_are_ignored(self): + text = r""" + 1) 5C5E14F66E6B3C2697107764C9D728EE5AB393B9 "Apple Development: Johnny Appleseed (B4H6M5LP3J)" + 2) 3348E051F7E7E1ED509D2D620567BAF796210C36 "iPhone Developer: Johnny Appleseed (B4H6M5LP3J)" (CSSMERR_TP_CERT_EXPIRED) + 2 valid identities found +""" + expected = [ + CodeSigningIdentity( + "5C5E14F66E6B3C2697107764C9D728EE5AB393B9", + "Apple Development: Johnny Appleseed (B4H6M5LP3J)", + ), + ] + result = CodeSigningIdentity.parse_security_stdout(text) + self.assertEqual(expected, result) diff --git a/prelude/apple/tools/code_signing/info_plist_metadata.py b/prelude/apple/tools/code_signing/info_plist_metadata.py new file mode 100644 index 0000000000000..beb99b5ead940 --- /dev/null +++ b/prelude/apple/tools/code_signing/info_plist_metadata.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from __future__ import annotations + +from dataclasses import dataclass +from typing import IO, Optional + +from apple.tools.plistlib_utils import detect_format_and_load + + +@dataclass +class InfoPlistMetadata: + bundle_id: str + bundle_type: Optional[str] + is_watchos_app: bool + + @staticmethod + def from_file(info_plist_file: IO[bytes]) -> InfoPlistMetadata: + root = detect_format_and_load(info_plist_file) + return InfoPlistMetadata( + root["CFBundleIdentifier"], + root.get("CFBundlePackageType"), + root.get("WKApplication", False), + ) diff --git a/prelude/apple/tools/code_signing/info_plist_metadata_test.py b/prelude/apple/tools/code_signing/info_plist_metadata_test.py new file mode 100644 index 0000000000000..98ab53adac368 --- /dev/null +++ b/prelude/apple/tools/code_signing/info_plist_metadata_test.py @@ -0,0 +1,67 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import io +import unittest + +from .info_plist_metadata import InfoPlistMetadata + + +class TestParse(unittest.TestCase): + def test_canary(self): + plist = io.BytesIO( + b""" + + + + CFBundleIdentifier + com.company.application + CFBundlePackageType + APPL + WKApplication + + + +""" + ) + expected = InfoPlistMetadata("com.company.application", "APPL", False) + result = InfoPlistMetadata.from_file(plist) + self.assertEqual(expected, result) + + def test_not_watch_application_by_default(self): + plist = io.BytesIO( + b""" + + + + CFBundleIdentifier + com.company.application + CFBundlePackageType + APPL + + +""" + ) + expected = InfoPlistMetadata("com.company.application", "APPL", False) + result = InfoPlistMetadata.from_file(plist) + self.assertEqual(expected, result) + + def test_package_type_can_be_omitted(self): + plist = io.BytesIO( + b""" + + + + CFBundleIdentifier + com.company.application + + +""" + ) + expected = InfoPlistMetadata("com.company.application", None, False) + result = InfoPlistMetadata.from_file(plist) + self.assertEqual(expected, result) diff --git a/prelude/apple/tools/code_signing/list_codesign_identities.py b/prelude/apple/tools/code_signing/list_codesign_identities.py new file mode 100644 index 0000000000000..2a4d458b9d6ee --- /dev/null +++ b/prelude/apple/tools/code_signing/list_codesign_identities.py @@ -0,0 +1,75 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from __future__ import annotations + +import subprocess + +from abc import ABCMeta, abstractmethod +from typing import List + +from .identity import CodeSigningIdentity + + +class IListCodesignIdentities(metaclass=ABCMeta): + @abstractmethod + def list_codesign_identities(self) -> List[CodeSigningIdentity]: + raise NotImplementedError + + +class ListCodesignIdentities(IListCodesignIdentities): + _default_command = ["security", "find-identity", "-v", "-p", "codesigning"] + + def __init__(self, command: List[str]) -> None: + self.command = command + + @classmethod + def default(cls) -> IListCodesignIdentities: + return cls(cls._default_command) + + @classmethod + def override(cls, command: List[str]) -> IListCodesignIdentities: + return cls(command) + + def list_codesign_identities(self) -> List[CodeSigningIdentity]: + return _list_identities(self.command) + + +def _list_identities( + command: List[str], +) -> List[CodeSigningIdentity]: + output = subprocess.check_output( + command, + encoding="utf-8", + ) + return CodeSigningIdentity.parse_security_stdout(output) + + +class AdHocListCodesignIdentities(IListCodesignIdentities): + def __init__( + self, original: IListCodesignIdentities, subject_common_name: str + ) -> None: + self.original = original + self.subject_common_name = subject_common_name + + def list_codesign_identities(self) -> List[CodeSigningIdentity]: + unfiltered_identities = self.original.list_codesign_identities() + identity = next( + ( + i + for i in unfiltered_identities + if i.subject_common_name == self.subject_common_name + ), + None, + ) + if not identity: + raise RuntimeError( + f"No identity found with subject common name `{self.subject_common_name}`" + ) + return [identity] diff --git a/prelude/apple/tools/code_signing/main.py b/prelude/apple/tools/code_signing/main.py new file mode 100644 index 0000000000000..dd6ed288db6dc --- /dev/null +++ b/prelude/apple/tools/code_signing/main.py @@ -0,0 +1,183 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import pathlib +import sys +from typing import List, Optional + +from tap import Tap + +from .apple_platform import ApplePlatform +from .codesign_bundle import ( + AdhocSigningContext, + codesign_bundle, + CodesignedPath, + signing_context_with_profile_selection, +) +from .list_codesign_identities import ListCodesignIdentities +from .provisioning_profile_selection import CodeSignProvisioningError + + +class Arguments(Tap): + """ + Tool which code signs the Apple bundle. `Info.plist` file is amended as a part of it. + """ + + # pyre-fixme[13]: Attribute `bundle_path` is never initialized. + bundle_path: pathlib.Path + # pyre-fixme[13]: Attribute `info_plist` is never initialized. + info_plist: pathlib.Path + entitlements: Optional[pathlib.Path] = None + profiles_dir: Optional[pathlib.Path] = None + ad_hoc: bool = False + ad_hoc_codesign_identity: Optional[str] = None + # pyre-fixme[13]: Attribute `platform` is never initialized. + platform: ApplePlatform + codesign_on_copy: Optional[List[pathlib.Path]] = None + fast_provisioning_profile_parsing: bool = False + strict_provisioning_profile_search: bool = False + provisioning_profile_filter: Optional[str] = None + + def configure(self) -> None: + """ + Configure the arguments. + """ + self.add_argument( + "--bundle-path", + metavar="", + type=pathlib.Path, + required=True, + help="Absolute path to Apple bundle result.", + ) + self.add_argument( + "--info-plist", + metavar="", + type=pathlib.Path, + required=True, + help="Bundle relative destination path to Info.plist file if it is present in bundle.", + ) + self.add_argument( + "--entitlements", + metavar="", + type=pathlib.Path, + required=False, + help="Path to file with entitlements to be used during code signing. If it's not provided the minimal entitlements are going to be generated.", + ) + self.add_argument( + "--profiles-dir", + metavar="", + type=pathlib.Path, + required=False, + help="Path to directory with provisioning profile files. Required if code signing is not ad-hoc.", + ) + self.add_argument( + "--ad-hoc", + action="store_true", + required=False, + help="Perform ad-hoc signing if set.", + ) + self.add_argument( + "--ad-hoc-codesign-identity", + metavar="", + type=str, + required=False, + help="Codesign identity to use when ad-hoc signing is performed.", + ) + self.add_argument( + "--platform", + metavar="", + type=ApplePlatform, + required=True, + help="Apple platform for which the bundle was built.", + ) + self.add_argument( + "--codesign-on-copy", + metavar="", + type=pathlib.Path, + action="append", + required=False, + help="Bundle relative path that should be codesigned prior to result bundle.", + ) + self.add_argument( + "--fast-provisioning-profile-parsing", + action="store_true", + required=False, + help="Uses experimental faster provisioning profile parsing.", + ) + self.add_argument( + "--strict-provisioning-profile-search", + action="store_true", + required=False, + help="Fail code signing if more than one matching profile found.", + ) + self.add_argument( + "--provisioning-profile-filter", + metavar="", + type=str, + required=False, + help="Regex to disambiguate multiple matching profiles, evaluated against provisioning profile filename.", + ) + + +# Add emoji to beginning of actionable error message so it stands out more. +def decorate_error_message(message: str) -> str: + return " ".join(["❗️", message]) + + +def _main() -> None: + args = Arguments().parse_args() + try: + if args.ad_hoc: + signing_context = AdhocSigningContext( + codesign_identity=args.ad_hoc_codesign_identity + ) + else: + assert ( + args.profiles_dir + ), "Path to directory with provisioning profile files should be set when signing is not ad-hoc." + non_optional_profiles_dir = args.profiles_dir + signing_context = signing_context_with_profile_selection( + info_plist_source=args.bundle_path / args.info_plist, + info_plist_destination=args.info_plist, + provisioning_profiles_dir=non_optional_profiles_dir, + entitlements_path=args.entitlements, + list_codesign_identities=ListCodesignIdentities.default(), + platform=args.platform, + should_use_fast_provisioning_profile_parsing=args.fast_provisioning_profile_parsing, + strict_provisioning_profile_search=args.strict_provisioning_profile_search, + provisioning_profile_filter=args.provisioning_profile_filter, + ) + + bundle_path = CodesignedPath( + path=args.bundle_path, entitlements=args.entitlements, flags=[] + ) + codesign_on_copy_paths = ( + [ + CodesignedPath( + path=bundle_path.path / path, entitlements=None, flags=[] + ) + for path in args.codesign_on_copy + ] + if args.codesign_on_copy + else [] + ) + + codesign_bundle( + bundle_path=bundle_path, + signing_context=signing_context, + platform=args.platform, + codesign_on_copy_paths=codesign_on_copy_paths, + ) + except CodeSignProvisioningError as e: + print(decorate_error_message(str(e)), file=sys.stderr) + exit(1) + + +if __name__ == "__main__": + _main() diff --git a/prelude/apple/tools/code_signing/prepare_code_signing_entitlements.py b/prelude/apple/tools/code_signing/prepare_code_signing_entitlements.py new file mode 100644 index 0000000000000..2ed16222bfd8d --- /dev/null +++ b/prelude/apple/tools/code_signing/prepare_code_signing_entitlements.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os +import plistlib +import tempfile +from pathlib import Path +from typing import Optional + +from apple.tools.info_plist_processor.process import process as process_info_plist + +from .provisioning_profile_metadata import ProvisioningProfileMetadata + + +# Buck v1 corresponding code is in `ProvisioningProfileCopyStep::execute` in `ProvisioningProfileCopyStep.java` +def prepare_code_signing_entitlements( + entitlements_path: Optional[Path], + bundle_id: str, + profile: ProvisioningProfileMetadata, + tmp_dir: str, +) -> Path: + fd, output_path = tempfile.mkstemp(dir=tmp_dir) + with os.fdopen(fd, mode="wb") as output: + if entitlements_path: + with open(entitlements_path, "rb") as entitlements_file: + process_info_plist( + input_file=entitlements_file, + output_file=output, + additional_keys=profile.get_mergeable_entitlements(), + output_format=plistlib.FMT_XML, + ) + else: + app_id = profile.get_app_id().team_id + "." + bundle_id + entitlements = profile.get_mergeable_entitlements() + entitlements["application-identifier"] = app_id + entitlements["keychain-access-groups"] = [app_id] + plistlib.dump(entitlements, output, fmt=plistlib.FMT_XML) + return Path(output_path) diff --git a/prelude/apple/tools/code_signing/prepare_code_signing_entitlements_test.py b/prelude/apple/tools/code_signing/prepare_code_signing_entitlements_test.py new file mode 100644 index 0000000000000..069ffcfc6e5f9 --- /dev/null +++ b/prelude/apple/tools/code_signing/prepare_code_signing_entitlements_test.py @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import plistlib +import tempfile +import unittest +from datetime import datetime +from pathlib import Path + +from .prepare_code_signing_entitlements import prepare_code_signing_entitlements +from .provisioning_profile_metadata import ProvisioningProfileMetadata + + +class Test(unittest.TestCase): + def test_minimal_entitlements_generated_based_on_provisioning_profile(self): + with tempfile.TemporaryDirectory() as tmp_dir: + profile = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {}, + { + "application-identifier": "ABCDEFGHIJ.*", + "com.apple.developer.aps-environment": "development", + }, + ) + result = prepare_code_signing_entitlements( + None, "com.company.application", profile, tmp_dir + ) + with open(result, mode="rb") as result_file: + self.assertEqual( + plistlib.load(result_file), + { + "application-identifier": "ABCDEFGHIJ.com.company.application", + "com.apple.developer.aps-environment": "development", + "keychain-access-groups": [ + "ABCDEFGHIJ.com.company.application" + ], + }, + ) + + def test_entitlements_enriched_by_profile(self): + with tempfile.TemporaryDirectory() as tmp_dir: + entitlements = {"foo": "bar"} + entitlements_path = os.path.join(tmp_dir, "Entitlements.plist") + with open(entitlements_path, mode="wb") as entitlements_file: + plistlib.dump(entitlements, entitlements_file, fmt=plistlib.FMT_XML) + profile = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {}, + { + "application-identifier": "ABCDEFGHIJ.com.company.application", + "com.apple.developer.aps-environment": "development", + "should.be.ignored": "dummy", + }, + ) + result = prepare_code_signing_entitlements( + entitlements_path, "com.company.application", profile, tmp_dir + ) + with open(result, "rb") as result_file: + self.assertEqual( + plistlib.load(result_file), + { + "foo": "bar", + "application-identifier": "ABCDEFGHIJ.com.company.application", + "com.apple.developer.aps-environment": "development", + }, + ) diff --git a/prelude/apple/tools/code_signing/prepare_info_plist.py b/prelude/apple/tools/code_signing/prepare_info_plist.py new file mode 100644 index 0000000000000..a5e7104e64681 --- /dev/null +++ b/prelude/apple/tools/code_signing/prepare_info_plist.py @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os +import tempfile +from pathlib import Path +from typing import Any, Dict + +from apple.tools.info_plist_processor.process import process as process_info_plist + +from .info_plist_metadata import InfoPlistMetadata +from .provisioning_profile_metadata import ProvisioningProfileMetadata + + +# Buck v1 corresponding code is in `ProvisioningProfileCopyStep::execute` in `ProvisioningProfileCopyStep.java` +def prepare_info_plist( + info_plist: Path, + info_plist_metadata: InfoPlistMetadata, + profile: ProvisioningProfileMetadata, + tmp_dir: str, +) -> Path: + fd, output_path = tempfile.mkstemp(dir=tmp_dir) + with open(info_plist, "rb") as input, os.fdopen(fd, mode="wb") as output: + additional_keys = _additional_keys(info_plist_metadata, profile) + process_info_plist( + input_file=input, output_file=output, additional_keys=additional_keys + ) + return Path(output_path) + + +# Equivalent Buck v1 code is in `ProvisioningProfileCopyStep.java` in `ProvisioningProfileCopyStep::getInfoPlistAdditionalKeys` method. +def _additional_keys( + info_plist_metadata: InfoPlistMetadata, profile: ProvisioningProfileMetadata +) -> Dict[str, Any]: + result = {} + # Restrict additional keys based on bundle type. Skip additional keys for watchOS bundles (property keys whitelist). + if ( + info_plist_metadata.bundle_type == "APPL" + and not info_plist_metadata.is_watchos_app + ): + # Construct AppID using the Provisioning Profile info (app prefix) + app_id = profile.get_app_id().team_id + "." + info_plist_metadata.bundle_id + result["ApplicationIdentifier"] = app_id + return result diff --git a/prelude/apple/tools/code_signing/prepare_info_plist_test.py b/prelude/apple/tools/code_signing/prepare_info_plist_test.py new file mode 100644 index 0000000000000..bf4d0061d4162 --- /dev/null +++ b/prelude/apple/tools/code_signing/prepare_info_plist_test.py @@ -0,0 +1,74 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import plistlib +import tempfile +import unittest +from datetime import datetime +from pathlib import Path + +from .info_plist_metadata import InfoPlistMetadata +from .prepare_info_plist import prepare_info_plist +from .provisioning_profile_metadata import ProvisioningProfileMetadata + + +class Test(unittest.TestCase): + def test_app_id_set_for_non_watchos_apps(self): + with tempfile.TemporaryDirectory() as tmp_dir: + profile = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {}, + { + "application-identifier": "ABCDEFGHIJ.*", + }, + ) + info_plist = { + "CFBundleIdentifier": "com.facebook.test", + "CFBundlePackageType": "APPL", + } + info_plist_path, info_plist_metadata = _write_info_plist( + info_plist, tmp_dir, "Info.plist" + ) + result = prepare_info_plist( + info_plist_path, info_plist_metadata, profile, tmp_dir + ) + with open(result, "rb") as result_file: + self.assertEqual( + plistlib.load(result_file), + { + "CFBundleIdentifier": "com.facebook.test", + "CFBundlePackageType": "APPL", + "ApplicationIdentifier": "ABCDEFGHIJ.com.facebook.test", + }, + ) + # Same but for watchOS Info.plist + info_plist = { + "CFBundleIdentifier": "com.facebook.test", + "CFBundlePackageType": "APPL", + "WKApplication": True, + } + info_plist_path, info_plist_metadata = _write_info_plist( + info_plist, tmp_dir, "Info.plist" + ) + result = prepare_info_plist( + info_plist_path, info_plist_metadata, profile, tmp_dir + ) + with open(result, "rb") as result_file: + self.assertNotIn("ApplicationIdentifier", plistlib.load(result_file)) + + +def _write_info_plist(plist, tmp_dir, name): + path = os.path.join(tmp_dir, "Info.plist") + with open(path, mode="wb") as file: + plistlib.dump(plist, file, fmt=plistlib.FMT_XML) + with open(path, mode="rb") as file: + metadata = InfoPlistMetadata.from_file(file) + return (path, metadata) diff --git a/prelude/apple/tools/code_signing/provisioning_profile_diagnostics.py b/prelude/apple/tools/code_signing/provisioning_profile_diagnostics.py new file mode 100644 index 0000000000000..92f9a99f21d33 --- /dev/null +++ b/prelude/apple/tools/code_signing/provisioning_profile_diagnostics.py @@ -0,0 +1,256 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from abc import ABCMeta, abstractmethod +from pathlib import Path +from typing import List, Optional, Type, TypeVar + +from .apple_platform import ApplePlatform + +from .identity import CodeSigningIdentity + +from .provisioning_profile_metadata import ProvisioningProfileMetadata + +META_IOS_DEVELOPER_CERTIFICATE_LINK: str = ( + "https://www.internalfb.com/intern/qa/5198/how-do-i-get-the-fb-ios-developer-certificate" +) +META_IOS_PROVISIONING_PROFILES_LINK: str = ( + "https://www.internalfb.com/intern/apple/download-provisioning-profile/" +) +META_IOS_PROVISIONING_PROFILES_COMMAND: str = ( + "arc ios-certs --download-provisioning-profiles" +) +META_IOS_CERTS_ALL_COMMAND: str = "arc ios-certs --all" +# TODO(T197258387): Remove references to `arc download-provisioning-profile` in this wiki page. +META_IOS_BUILD_AND_RUN_ON_DEVICE_LINK: str = ( + "https://www.internalfb.com/intern/wiki/Ios-first-steps/running-on-device/#2-register-your-device-i" +) + + +class IProvisioningProfileDiagnostics(metaclass=ABCMeta): + profile: ProvisioningProfileMetadata + + def __init__(self, profile: ProvisioningProfileMetadata) -> None: + self.profile = profile + + @abstractmethod + def log_message(self) -> str: + raise NotImplementedError + + +class TeamIdMismatch(IProvisioningProfileDiagnostics): + team_id: str + team_id_constraint: str + + def __init__( + self, + profile: ProvisioningProfileMetadata, + team_id: str, + team_id_constraint: str, + ) -> None: + super().__init__(profile) + self.team_id = team_id + self.team_id_constraint = team_id_constraint + + def log_message(self) -> str: + return f"Profile team ID `{self.team_id}` is not matching constraint `{self.team_id_constraint}`" + + +class BundleIdMismatch(IProvisioningProfileDiagnostics): + bundle_id: str + bundle_id_constraint: str + + def __init__( + self, + profile: ProvisioningProfileMetadata, + bundle_id: str, + bundle_id_constraint: str, + ) -> None: + super().__init__(profile) + self.bundle_id = bundle_id + self.bundle_id_constraint = bundle_id_constraint + + def log_message(self) -> str: + return f"Bundle ID `{self.bundle_id}` is not matching constraint `{self.bundle_id_constraint}`" + + +class ProfileExpired(IProvisioningProfileDiagnostics): + bundle_id_match_length: int + + def __init__( + self, + profile: ProvisioningProfileMetadata, + bundle_id_match_length: int, + ) -> None: + super().__init__(profile) + self.bundle_id_match_length = bundle_id_match_length + + def log_message(self) -> str: + return "Provisioning profile expired." + + +class UnsupportedPlatform(IProvisioningProfileDiagnostics): + bundle_id_match_length: int + platform_constraint: str + + def __init__( + self, + profile: ProvisioningProfileMetadata, + bundle_id_match_length: int, + platform_constraint: ApplePlatform, + ) -> None: + super().__init__(profile) + self.bundle_id_match_length = bundle_id_match_length + self.platform_constraint = platform_constraint + + def log_message(self) -> str: + supported_profile_platforms = ", ".join(self.profile.platforms) + return f"Requested platform `{self.platform_constraint}` is not in provisioning profile's supported platforms `{supported_profile_platforms}`." + + +class EntitlementsMismatch(IProvisioningProfileDiagnostics): + bundle_id_match_length: int + mismatched_key: str + mismatched_value: str + + def __init__( + self, + profile: ProvisioningProfileMetadata, + bundle_id_match_length: int, + mismatched_key: str, + mismatched_value: str, + ) -> None: + super().__init__(profile) + self.bundle_id_match_length = bundle_id_match_length + self.mismatched_key = mismatched_key + self.mismatched_value = mismatched_value + + def log_message(self) -> str: + return f"Expected entitlement item key `{self.mismatched_key}` with value `{self.mismatched_value}` not found in provisioning profile." + + +class DeveloperCertificateMismatch(IProvisioningProfileDiagnostics): + bundle_id_match_length: int + + def __init__( + self, + profile: ProvisioningProfileMetadata, + bundle_id_match_length: int, + ) -> None: + super().__init__(profile) + self.bundle_id_match_length = bundle_id_match_length + + def log_message(self) -> str: + certificate_fingerprints = ", ".join( + self.profile.developer_certificate_fingerprints + ) + return f"Expected identity fingerprint not found in profile's certificate fingerprints `{certificate_fingerprints}`." + + +_T = TypeVar("T") + + +def interpret_provisioning_profile_diagnostics( + diagnostics: List[IProvisioningProfileDiagnostics], + bundle_id: str, + provisioning_profiles_dir: Path, + identities: List[CodeSigningIdentity], + log_file_path: Optional[Path] = None, +) -> str: + if not diagnostics: + raise RuntimeError( + "Expected diagnostics information for at least one mismatching provisioning profile." + ) + + header = f"Failed to find provisioning profile in directory `{provisioning_profiles_dir}` that is suitable for code signing. Here is the best guess for how to fix it:\n\n⚠️ " + footer = f"\n\nFor more info about running on an iOS device read {META_IOS_BUILD_AND_RUN_ON_DEVICE_LINK}." + if log_file_path: + footer += ( + f" Full list of mismatched profiles can be found at `{log_file_path}`.\n" + ) + else: + provisioning_profile_errors = "\n\n".join( + [ + f"`{mismatch.profile.file_path.name}`: {mismatch.log_message()}" + for mismatch in diagnostics + ] + ) + footer += f" Full list of mismatched profiles:{provisioning_profile_errors}\n" + + def find_mismatch(class_type: Type[_T]) -> Optional[_T]: + return next( + iter( + sorted( + filter(lambda d: isinstance(d, class_type), diagnostics), + key=lambda d: d.bundle_id_match_length, + reverse=True, + ) + ), + None, + ) + + if mismatch := find_mismatch(DeveloperCertificateMismatch): + identities_description = ( + "WARNING: NO SIGNING IDENTITIES FOUND!" + if len(identities) == 0 + else f"List of signing identities: `{identities}`." + ) + return "\n".join( + [ + header, + f"The provisioning profile `{mismatch.profile.file_path.name}` satisfies all constraints, but no matching certificates were found in your keychain. ", + identities_description, + f"Execute `{META_IOS_CERTS_ALL_COMMAND}` or download and install the latest certificate from {META_IOS_DEVELOPER_CERTIFICATE_LINK}.", + footer, + ] + ) + + if mismatch := find_mismatch(EntitlementsMismatch): + return "".join( + [ + header, + f"The provisioning profile `{mismatch.profile.file_path.name}` is the best match, but it doesn't contain all the needed entitlements. ", + f"Expected entitlement item with key `{mismatch.mismatched_key}` and value `{mismatch.mismatched_value}` is missing. ", + f"Usually that means the application entitlements were changed recently, provisioning profile was updated and you need to download & install the latest version of provisioning profile for Bundle ID `{bundle_id}`.", + f"Execute `{META_IOS_PROVISIONING_PROFILES_COMMAND}` or download from from {META_IOS_PROVISIONING_PROFILES_LINK}", + footer, + ] + ) + + if mismatch := find_mismatch(UnsupportedPlatform): + supported_platforms = ", ".join([f"`{p}`" for p in mismatch.profile.platforms]) + return "".join( + [ + header, + f"The provisioning profile `{mismatch.profile.file_path.name}` is the best match, but it only supports the following platforms: {supported_platforms}. Unfortunately, it doesn't include the requested platform, `{mismatch.platform_constraint}`. ", + f"This could indicate two possibilities: either the provisioning profile was recently updated to include the needed platform, or there is a separate profile supporting the required platform that you are missing. In the latter case, you would need to download and install it from {META_IOS_PROVISIONING_PROFILES_LINK}", + footer, + ] + ) + + if mismatch := find_mismatch(ProfileExpired): + return "".join( + [ + header, + f"The provisioning profile `{mismatch.profile.file_path.name}` is the the best match; however, it has expired", + f"Execute `{META_IOS_PROVISIONING_PROFILES_COMMAND}` to get the latest provisioning profiles.", + f"Alternatively, please download and install a valid profile from {META_IOS_PROVISIONING_PROFILES_LINK}", + footer, + ] + ) + + return "".join( + [ + header, + f"No provisioning profile matching the Bundle ID `{bundle_id}` was found. ", + f"Execute `{META_IOS_PROVISIONING_PROFILES_COMMAND}` to get the latest provisioning profiles.", + f"Alternatively, please download and install the appropriate profile from {META_IOS_PROVISIONING_PROFILES_LINK}", + footer, + ] + ) diff --git a/prelude/apple/tools/code_signing/provisioning_profile_metadata.py b/prelude/apple/tools/code_signing/provisioning_profile_metadata.py new file mode 100644 index 0000000000000..733b32e6a4fed --- /dev/null +++ b/prelude/apple/tools/code_signing/provisioning_profile_metadata.py @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from __future__ import annotations + +import hashlib +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, FrozenSet, Set + +from apple.tools.plistlib_utils import detect_format_and_loads + +from .app_id import AppId + + +@dataclass +class ProvisioningProfileMetadata: + # Path to the provisioning profile file + file_path: Path + uuid: str + # Naïve object with ignored timezone, see https://bugs.python.org/msg110249 + expiration_date: datetime + platforms: FrozenSet[str] + # Let's agree they are uppercased + developer_certificate_fingerprints: FrozenSet[str] + entitlements: Dict[str, Any] + + _mergeable_entitlements_keys: FrozenSet[str] = frozenset( + [ + "application-identifier", + "beta-reports-active", + "get-task-allow", + "com.apple.developer.aps-environment", + "com.apple.developer.team-identifier", + ] + ) + + # See `ProvisioningProfileMetadataFactory::getAppIDFromEntitlements` from `ProvisioningProfileMetadataFactory.java` in Buck v1 + def get_app_id(self) -> AppId: + maybe_app_id = self.entitlements.get( + "application-identifier" + ) or self.entitlements.get("com.apple.application-identifier") + if not maybe_app_id: + raise RuntimeError( + "Entitlements do not contain app ID: {}".format(self.entitlements) + ) + return AppId.from_string(maybe_app_id) + + # See `ProvisioningProfileMetadata::getMergeableEntitlements` from `ProvisioningProfileMetadata.java` in Buck v1 + def get_mergeable_entitlements(self) -> Dict[str, Any]: + return { + k: v + for k, v in self.entitlements.items() + if k in ProvisioningProfileMetadata._mergeable_entitlements_keys + } + + # See `ProvisioningProfileMetadataFactory::fromProvisioningProfilePath` from `ProvisioningProfileMetadataFactory.java` in Buck v1 + @staticmethod + def from_provisioning_profile_file_content( + file_path: Path, content: bytes + ) -> ProvisioningProfileMetadata: + root = detect_format_and_loads(content) + developer_certificate_fingerprints = { + hashlib.sha1(c).hexdigest().upper() for c in root["DeveloperCertificates"] + } + assert ( + len(developer_certificate_fingerprints) > 0 + ), "Expected at least one suitable certificate." + return ProvisioningProfileMetadata( + file_path=file_path, + uuid=root["UUID"], + expiration_date=root["ExpirationDate"], + platforms=frozenset(root["Platform"]), + developer_certificate_fingerprints=frozenset( + developer_certificate_fingerprints + ), + entitlements=root["Entitlements"], + ) + + def __hash__(self) -> int: + return hash( + ( + self.file_path, + self.uuid, + self.expiration_date, + self.platforms, + self.developer_certificate_fingerprints, + ) + ) diff --git a/prelude/apple/tools/code_signing/provisioning_profile_metadata_test.py b/prelude/apple/tools/code_signing/provisioning_profile_metadata_test.py new file mode 100644 index 0000000000000..36170abef5cc3 --- /dev/null +++ b/prelude/apple/tools/code_signing/provisioning_profile_metadata_test.py @@ -0,0 +1,69 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import datetime +import unittest +from pathlib import Path + +import pkg_resources + +from .app_id import AppId +from .provisioning_profile_metadata import ProvisioningProfileMetadata + + +class TestParse(unittest.TestCase): + def test_canary(self): + path = Path("test_resources/sample.mobileprovision") + file_content = pkg_resources.resource_string(__name__, str(path)) + metadata = ProvisioningProfileMetadata.from_provisioning_profile_file_content( + path, file_content + ) + self.assertEqual(metadata.uuid, "00000000-0000-0000-0000-000000000000") + self.assertEqual( + metadata.get_app_id(), AppId("ABCDE12345", "com.example.TestApp") + ) + self.assertEqual( + metadata.expiration_date, + datetime.datetime.fromisoformat("9999-03-05T01:33:40"), + ) + self.assertEqual( + metadata.developer_certificate_fingerprints, + {"BE16FC419BFB6B59A86BC08755BA0F332EC574FB"}, + ) + self.assertEqual( + metadata.platforms, + {"iOS"}, + ) + + def test_qualified_entitlements_parsed(self): + path = Path("test_resources/sample.mobileprovision") + file_content = pkg_resources.resource_string(__name__, str(path)) + metadata = ProvisioningProfileMetadata.from_provisioning_profile_file_content( + path, file_content + ) + self.assertEqual( + metadata.get_app_id(), AppId("ABCDE12345", "com.example.TestApp") + ) + + def test_filtered_entitlements_stripped_out(self): + path = Path("test_resources/sample.mobileprovision") + file_content = pkg_resources.resource_string(__name__, str(path)) + metadata = ProvisioningProfileMetadata.from_provisioning_profile_file_content( + path, file_content + ) + self.assertIn( + "com.apple.developer.icloud-container-development-container-identifiers", + metadata.entitlements, + ) + self.assertEqual( + metadata.get_mergeable_entitlements(), + { + "application-identifier": "ABCDE12345.com.example.TestApp", + "get-task-allow": False, + "com.apple.developer.team-identifier": "12345ABCDE", + }, + ) diff --git a/prelude/apple/tools/code_signing/provisioning_profile_selection.py b/prelude/apple/tools/code_signing/provisioning_profile_selection.py new file mode 100644 index 0000000000000..5e3c4a9b59bce --- /dev/null +++ b/prelude/apple/tools/code_signing/provisioning_profile_selection.py @@ -0,0 +1,354 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import datetime +import logging +import re +from collections import defaultdict +from dataclasses import dataclass +from typing import Any, cast, Dict, List, Optional, Tuple + +from .app_id import AppId +from .apple_platform import ApplePlatform +from .identity import CodeSigningIdentity +from .info_plist_metadata import InfoPlistMetadata +from .provisioning_profile_diagnostics import ( + BundleIdMismatch, + DeveloperCertificateMismatch, + EntitlementsMismatch, + IProvisioningProfileDiagnostics, + ProfileExpired, + TeamIdMismatch, + UnsupportedPlatform, +) +from .provisioning_profile_metadata import ProvisioningProfileMetadata + +_LOGGER: logging.Logger = logging.getLogger(__name__) + + +class CodeSignProvisioningError(Exception): + pass + + +def _parse_team_id_from_entitlements( + entitlements: Optional[Dict[str, Any]] +) -> Optional[str]: + if not entitlements: + return None + maybe_app_id = AppId.infer_from_entitlements(entitlements) + if not maybe_app_id: + return None + return maybe_app_id.team_id + + +def _matches_or_array_is_subset_of( + entitlement_name: str, + expected_value: object, + actual_value: object, + platform: ApplePlatform, +) -> bool: + if expected_value is None: + return actual_value is None + if ( + actual_value is None + and platform.is_desktop() + and entitlement_name.startswith("com.apple.security") + ): + # For macOS apps, including Catalyst, the provisioning profile would _not_ have entries for + # the sandbox entitlements, so any value matches. + return True + if isinstance(expected_value, list) and isinstance(actual_value, list): + return set(expected_value).issubset(set(actual_value)) + return actual_value == expected_value + + +def _bundle_match_length(expected_bundle_id: str, bundle_id_pattern: str) -> int: + if bundle_id_pattern.endswith("*"): + # Chop the ending * if wildcard. + bundle_id_without_wildcard = bundle_id_pattern[:-1] + if expected_bundle_id.startswith(bundle_id_without_wildcard): + return len(bundle_id_without_wildcard) + elif expected_bundle_id == bundle_id_pattern: + return len(bundle_id_pattern) + return -1 + + +# For those keys let the tooling decide if code signing should fail or succeed (every other key +# mismatch results in provisioning profile being skipped). +_IGNORE_MISMATCH_ENTITLEMENTS_KEYS = { + "keychain-access-groups", + "application-identifier", + "com.apple.developer.associated-domains", + "com.apple.developer.icloud-container-development-container-identifiers", + "com.apple.developer.icloud-container-environment", + "com.apple.developer.icloud-container-identifiers", + "com.apple.developer.icloud-services", + "com.apple.developer.ubiquity-container-identifiers", + "com.apple.developer.ubiquity-kvstore-identifier", +} + + +def _check_entitlements_match( + expected_entitlements: Optional[Dict[str, Any]], + profile: ProvisioningProfileMetadata, + platform: ApplePlatform, + bundle_id_match_length: int, +) -> Tuple[bool, Optional[EntitlementsMismatch]]: + if expected_entitlements is None: + return (True, None) + for key, value in expected_entitlements.items(): + profile_entitlement = profile.entitlements.get(key) + if (key not in _IGNORE_MISMATCH_ENTITLEMENTS_KEYS) and ( + not _matches_or_array_is_subset_of( + key, value, profile_entitlement, platform + ) + ): + return ( + False, + EntitlementsMismatch( + profile=profile, + bundle_id_match_length=bundle_id_match_length, + mismatched_key=key, + mismatched_value=value, + ), + ) + return (True, None) + + +def _check_developer_certificates_match( + profile: ProvisioningProfileMetadata, + identities: List[CodeSigningIdentity], + bundle_id_match_length: int, +) -> Tuple[Optional[CodeSigningIdentity], Optional[DeveloperCertificateMismatch]]: + for identity in identities: + if identity.fingerprint in profile.developer_certificate_fingerprints: + return (identity, None) + return ( + None, + DeveloperCertificateMismatch( + profile=profile, bundle_id_match_length=bundle_id_match_length + ), + ) + + +def _make_multiple_matching_profiles_message( + profiles: list[ProvisioningProfileMetadata], + strict_search: bool, +) -> str: + messages = [f"Found MULTIPLE matching profiles: {len(profiles)}"] + messages += [ + f" Matching Profile = UUID:{profile.uuid}, file path: {profile.file_path}" + for profile in profiles + ] + + if strict_search: + messages += [ + "Strict provisioning profile search is ENABLED, build will FAIL due to ambiguous provisioning profile search results.", + "To resolve the problem, ensure only a single profile matches.", + "To unblock, you have two options:", + "Option 1: Disable strict provisioning profile search for the targets failing to build.", + " If the target failing to build is an `apple_bundle()`, set the `strict_provisioning_profile_search` attribute to `False`.", + " If the target failing to build is produced by `ios_binary()`, set the `bundle_strict_provisioning_profile_search` attribute to `False`.", + " You can commit such a change, so that the issue can be investigated without blocking other developers.", + " NB: This is a TEMPORARY WORKAROUND, as it only disables the strict checking, it does not resolve the ambiguity.", + "Option 2: Pass `--config apple.strict_provisioning_profile_search=false` as part of your build command.", + " DO NOT COMMIT such a change by adding this to any CI configs.", + ] + + return "\n".join(messages) + + +@dataclass +class SelectedProvisioningProfileInfo: + profile: ProvisioningProfileMetadata + identity: CodeSigningIdentity + + +def _filter_matching_selected_provisioning_profile_infos( + selected_profile_infos: list[SelectedProvisioningProfileInfo], + provisioning_profile_filter: Optional[str], +) -> list[SelectedProvisioningProfileInfo]: + if len(selected_profile_infos) <= 1 or (not provisioning_profile_filter): + return selected_profile_infos + + preference_regex = re.compile(provisioning_profile_filter) + return [ + matching_profile_info + for matching_profile_info in selected_profile_infos + if preference_regex.search(matching_profile_info.profile.file_path.name) + ] + + +# See `ProvisioningProfileStore::getBestProvisioningProfile` in `ProvisioningProfileStore.java` for Buck v1 equivalent +def select_best_provisioning_profile( + info_plist_metadata: InfoPlistMetadata, + code_signing_identities: List[CodeSigningIdentity], + provisioning_profiles: List[ProvisioningProfileMetadata], + entitlements: Optional[Dict[str, Any]], + platform: ApplePlatform, + strict_search: bool, + provisioning_profile_filter: Optional[str], +) -> Tuple[ + Optional[SelectedProvisioningProfileInfo], List[IProvisioningProfileDiagnostics] +]: + """Selects the best provisioning profile and certificate to use when code signing the bundle. + Such profile could be successfully used to sign the bundle taking into account + different constraints like entitlements or bundle ID. Given several profiles + could be successfully used to sign the bundle the "best" one is considered + to be a profile which matches bundle ID from `Info.plist` the most + (i.e. profiles with specific bundle ID are preferred to wildcard bundle IDs). + Args: + info_plist_metadata: Object representing `Info.plist` file in the bundle. + code_signing_identities: Code signing identities to choose from. + provisioning_profiles: Provisioning profiles to choose from. + entitlements: Code signing entitlements if any. + platform: Apple platform which the bundle is built for. + Returns: + Provisioning profile and certificate pair to use for code signing. + """ + maybe_team_id_constraint = _parse_team_id_from_entitlements(entitlements) + + best_match_length = -1 + + # Used for error messages + diagnostics: List[IProvisioningProfileDiagnostics] = [] + + def log_mismatched_profile(mismatch: IProvisioningProfileDiagnostics) -> None: + diagnostics.append(mismatch) + _LOGGER.info( + f"Skipping provisioning profile `{mismatch.profile.file_path.name}`: {mismatch.log_message()}" + ) + + selected_profile_infos_for_match_length = defaultdict(list) + + for profile in provisioning_profiles: + app_id = profile.get_app_id() + if maybe_team_id_constraint and maybe_team_id_constraint != app_id.team_id: + log_mismatched_profile( + TeamIdMismatch( + profile=profile, + team_id=app_id.team_id, + team_id_constraint=maybe_team_id_constraint, + ) + ) + continue + + bundle_id = app_id.bundle_id + current_match_length = _bundle_match_length( + info_plist_metadata.bundle_id, bundle_id + ) + if current_match_length < 0: + log_mismatched_profile( + BundleIdMismatch( + profile=profile, + bundle_id=app_id.bundle_id, + bundle_id_constraint=info_plist_metadata.bundle_id, + ) + ) + continue + + if datetime.datetime.now() > profile.expiration_date: + log_mismatched_profile( + ProfileExpired( + profile=profile, bundle_id_match_length=current_match_length + ) + ) + continue + + maybe_provisioning_profile_name = platform.provisioning_profile_name() + if ( + maybe_provisioning_profile_name + and maybe_provisioning_profile_name not in profile.platforms + ): + log_mismatched_profile( + UnsupportedPlatform( + profile=profile, + bundle_id_match_length=current_match_length, + platform_constraint=platform, + ) + ) + continue + + entitlements_matched, mismatch = _check_entitlements_match( + expected_entitlements=entitlements, + profile=profile, + platform=platform, + bundle_id_match_length=current_match_length, + ) + if not entitlements_matched: + log_mismatched_profile(cast(EntitlementsMismatch, mismatch)) + continue + + certificate, mismatch = _check_developer_certificates_match( + profile=profile, + identities=code_signing_identities, + bundle_id_match_length=current_match_length, + ) + if not certificate: + log_mismatched_profile(cast(DeveloperCertificateMismatch, mismatch)) + continue + + _LOGGER.info( + f"Matching provisioning profile `{profile.file_path.name}` with score {current_match_length}" + ) + + selected_profile_info = SelectedProvisioningProfileInfo(profile, certificate) + selected_profile_infos_for_match_length[current_match_length] += [ + selected_profile_info + ] + + if current_match_length > best_match_length: + best_match_length = current_match_length + + all_matching_selected_profile_infos = selected_profile_infos_for_match_length.get( + best_match_length, [] + ) + + all_matching_selected_profile_infos = ( + _filter_matching_selected_provisioning_profile_infos( + all_matching_selected_profile_infos, provisioning_profile_filter + ) + ) + + if len(all_matching_selected_profile_infos) > 1: + all_matching_profiles = [ + selected_profile_info.profile + for selected_profile_info in all_matching_selected_profile_infos + ] + multiple_profiles_message = _make_multiple_matching_profiles_message( + all_matching_profiles, + strict_search, + ) + _LOGGER.info(multiple_profiles_message) + if strict_search: + raise CodeSignProvisioningError(multiple_profiles_message) + + result = ( + # By definition, we always pick the _last_ matching prov profile + all_matching_selected_profile_infos[-1] + if all_matching_selected_profile_infos + else None + ) + + if result: + _LOGGER.info( + ( + f"Found matching provisioning profile and identity\n" + f" Selected Identity: {result.identity}\n" + f" Provisioning Profile: `{result.profile.file_path.name}`\n" + f" UUID: {result.profile.uuid}\n" + f" File: {result.profile.file_path}\n" + f" Expiration: {result.profile.expiration_date}\n" + f" Platforms: {result.profile.platforms}\n" + f" Fingerprints: {result.profile.developer_certificate_fingerprints}\n" + f" Entitlements: {result.profile.entitlements}" + ) + ) + + return result, diagnostics diff --git a/prelude/apple/tools/code_signing/provisioning_profile_selection_test.py b/prelude/apple/tools/code_signing/provisioning_profile_selection_test.py new file mode 100644 index 0000000000000..8d7f213330d73 --- /dev/null +++ b/prelude/apple/tools/code_signing/provisioning_profile_selection_test.py @@ -0,0 +1,513 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import copy +import unittest +from datetime import datetime +from pathlib import Path +from typing import List + +from .apple_platform import ApplePlatform +from .identity import CodeSigningIdentity +from .info_plist_metadata import InfoPlistMetadata +from .provisioning_profile_diagnostics import IProvisioningProfileDiagnostics +from .provisioning_profile_metadata import ProvisioningProfileMetadata +from .provisioning_profile_selection import ( + select_best_provisioning_profile, + SelectedProvisioningProfileInfo, +) + + +class TestSelection(unittest.TestCase): + def verify_diagnostic_info_candidate_profile( + self, + diagnostic_info: List[IProvisioningProfileDiagnostics], + reason: str, + ): + self.assertEqual(len(diagnostic_info), 1) + candidate_profile = diagnostic_info[0] + self.assertEqual( + candidate_profile.log_message(), + reason, + ) + + def test_expired_profiles_are_ignored(self): + info_plist = InfoPlistMetadata("com.company.application", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + expired_provisioning_profile = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.min, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "ABCDEFGHIJ.com.company.application"}, + ) + selected, diagnostic_info = select_best_provisioning_profile( + info_plist, + [identity], + [expired_provisioning_profile], + {}, + ApplePlatform.ios_device, + False, + None, + ) + self.assertIsNone(selected) + self.verify_diagnostic_info_candidate_profile( + diagnostic_info, + "Provisioning profile expired.", + ) + + fresh_provisioning_profiles = copy.copy(expired_provisioning_profile) + fresh_provisioning_profiles.expiration_date = datetime.max + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + [fresh_provisioning_profiles], + {}, + ApplePlatform.ios_device, + False, + None, + ) + self.assertIsNotNone(selected) + + def test_multiple_matching_profiles_strict_mode(self): + info_plist = InfoPlistMetadata("com.company.application", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + first = ProvisioningProfileMetadata( + Path("/foo.first"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "AAAAAAAAAA.*"}, + ) + second = ProvisioningProfileMetadata( + Path("/foo.second"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "AAAAAAAAAA.*"}, + ) + profiles = [ + first, + second, + ] + + selection_failed = False + try: + _, _ = select_best_provisioning_profile( + info_plist, + [identity], + profiles, + {"keychain-access-groups": ["AAAAAAAAAA.*"]}, + ApplePlatform.ios_device, + True, + None, + ) + except Exception: + selection_failed = True + + self.assertTrue(selection_failed) + + def test_multiple_matching_profiles_with_preference(self): + info_plist = InfoPlistMetadata("com.company.application", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + first = ProvisioningProfileMetadata( + Path("/foo.first"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "AAAAAAAAAA.*"}, + ) + second = ProvisioningProfileMetadata( + Path("/foo.second"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "AAAAAAAAAA.*"}, + ) + third = ProvisioningProfileMetadata( + Path("/foo.third"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "AAAAAAAAAA.*"}, + ) + profiles = [ + first, + second, + third, + ] + + selection_failed = False + try: + _, _ = select_best_provisioning_profile( + info_plist, + [identity], + profiles, + {"keychain-access-groups": ["AAAAAAAAAA.*"]}, + ApplePlatform.ios_device, + True, + None, + ) + except Exception: + selection_failed = True + + # Check selection fails without preference + self.assertTrue(selection_failed) + + selected_profile_info, _ = select_best_provisioning_profile( + info_plist, + [identity], + profiles, + {"keychain-access-groups": ["AAAAAAAAAA.*"]}, + ApplePlatform.ios_device, + True, + ".+second", + ) + # Check the middle profile got chosen (i.e., not first or last) + self.assertEqual(selected_profile_info.profile, second) + + def test_prefix_override(self): + info_plist = InfoPlistMetadata("com.company.application", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + expected = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "AAAAAAAAAA.*"}, + ) + profiles = [ + expected, + ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + {"application-identifier": "BBBBBBBBBB.com.company.application"}, + ), + ] + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + profiles, + {"keychain-access-groups": ["AAAAAAAAAA.*"]}, + ApplePlatform.ios_device, + False, + None, + ) + self.assertEqual(selected, SelectedProvisioningProfileInfo(expected, identity)) + + def test_entitlement_keys_are_matched(self): + info_plist = InfoPlistMetadata("com.company.application", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + expected = ProvisioningProfileMetadata( + Path("/foo"), + "11111111-1111-1111-1111-111111111111", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + { + "application-identifier": "AAAAAAAAAA.com.company.application", + "keychain-access-groups": ["AAAAAAAAAA.*"], + "aps-environment": "production", + "com.apple.security.application-groups": ["foo", "bar", "baz"], + }, + ) + unexpected = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + { + "application-identifier": "AAAAAAAAAA.com.company.application", + "keychain-access-groups": ["AAAAAAAAAA.*"], + "aps-environment": "development", + "com.apple.security.application-groups": ["foo", "bar"], + }, + ) + profiles = [ + expected, + unexpected, + ] + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + profiles, + { + "keychain-access-groups": ["AAAAAAAAAA.*"], + "aps-environment": "production", + "com.apple.security.application-groups": ["foo", "bar"], + }, + ApplePlatform.ios_device, + False, + None, + ) + self.assertEqual(selected, SelectedProvisioningProfileInfo(expected, identity)) + + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + profiles, + { + "aps-environment": "production", + "com.apple.security.application-groups": ["foo", "bar"], + }, + ApplePlatform.ios_device, + False, + None, + ) + self.assertEqual(selected, SelectedProvisioningProfileInfo(expected, identity)) + + selected, diagnostic_info = select_best_provisioning_profile( + info_plist, + [identity], + [unexpected], + { + "aps-environment": "production", + "com.apple.security.application-groups": ["foo", "xxx"], + }, + ApplePlatform.ios_device, + False, + None, + ) + self.assertIsNone(selected) + self.verify_diagnostic_info_candidate_profile( + diagnostic_info, + "Expected entitlement item key `aps-environment` with value `production` not found in provisioning profile.", + ) + + def test_only_profiles_containing_valid_fingerprints_are_matched(self): + info_plist = InfoPlistMetadata("com.company.application", None, False) + valid_identity = CodeSigningIdentity( + "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", + "iPhone Developer: Foo Bar (54321EDCBA)", + ) + other_identity = CodeSigningIdentity( + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + "iPhone Developer: Foo Bar (ABCDE12345)", + ) + expected = ProvisioningProfileMetadata( + Path("/foo"), + "11111111-1111-1111-1111-111111111111", + datetime.max, + {"iOS"}, + {valid_identity.fingerprint, other_identity.fingerprint}, + { + "application-identifier": "AAAAAAAAAA.*", + }, + ) + unexpected = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {other_identity.fingerprint}, + { + "application-identifier": "AAAAAAAAAA.com.company.application", + }, + ) + + profiles = [expected, unexpected] + selected, _ = select_best_provisioning_profile( + info_plist, + [valid_identity], + profiles, + {}, + ApplePlatform.ios_device, + False, + None, + ) + self.assertEqual( + selected, SelectedProvisioningProfileInfo(expected, valid_identity) + ) + selected, diagnostic_info = select_best_provisioning_profile( + info_plist, + [valid_identity], + [unexpected], + {}, + ApplePlatform.ios_device, + False, + None, + ) + self.assertIsNone(selected) + self.verify_diagnostic_info_candidate_profile( + diagnostic_info, + "Expected identity fingerprint not found in profile's certificate fingerprints `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`.", + ) + + def test_matches_specific_app(self): + info_plist = InfoPlistMetadata("com.facebook.test", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + expected = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + { + "application-identifier": "BBBBBBBBBB.com.facebook.test", + }, + ) + profiles = [ + expected, + ProvisioningProfileMetadata( + Path("/foo"), + "11111111-1111-1111-1111-111111111111", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + { + "application-identifier": "BBBBBBBBBB.com.facebook.*", + }, + ), + ] + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + profiles, + {}, + ApplePlatform.ios_device, + False, + None, + ) + self.assertEqual(selected, SelectedProvisioningProfileInfo(expected, identity)) + + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + reversed(profiles), + {}, + ApplePlatform.ios_device, + False, + None, + ) + self.assertEqual(selected, SelectedProvisioningProfileInfo(expected, identity)) + + def test_matches_wildcard(self): + info_plist = InfoPlistMetadata("com.facebook.test", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + expected = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + { + "application-identifier": "BBBBBBBBBB.*", + }, + ) + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + [expected], + None, + ApplePlatform.ios_device, + False, + None, + ) + self.assertEqual(selected, SelectedProvisioningProfileInfo(expected, identity)) + + def test_force_included_app_entitlements(self): + info_plist = InfoPlistMetadata("com.facebook.test", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + profile = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + { + "application-identifier": "AAAAAAAAAA.com.facebook.test", + "keychain-access-groups": ["AAAAAAAAAA.*"], + "aps-environment": "production", + }, + ) + selected, _ = select_best_provisioning_profile( + info_plist, + [identity], + [profile], + { + # Force included key, even if not present in the profile + "application-identifier": "AAAAAAAAAA.com.facebook.BuckApp", + "keychain-access-groups": ["AAAAAAAAAA.*"], + "aps-environment": "production", + }, + ApplePlatform.ios_device, + False, + None, + ) + self.assertIsNotNone(selected) + + def test_unmatched_app_entitlement(self): + info_plist = InfoPlistMetadata("com.facebook.test", None, False) + identity = CodeSigningIdentity( + "fingerprint", + "name", + ) + profile = ProvisioningProfileMetadata( + Path("/foo"), + "00000000-0000-0000-0000-000000000000", + datetime.max, + {"iOS"}, + {identity.fingerprint}, + { + "application-identifier": "AAAAAAAAAA.com.facebook.test", + "keychain-access-groups": ["AAAAAAAAAA.*"], + "aps-environment": "production", + }, + ) + selected, diagnostic_info = select_best_provisioning_profile( + info_plist, + [identity], + [profile], + { + "keychain-access-groups": ["AAAAAAAAAA.*"], + "aps-environment": "production", + "com.made.up.entitlement": "buck", + }, + ApplePlatform.ios_device, + False, + None, + ) + self.assertIsNone(selected) + self.verify_diagnostic_info_candidate_profile( + diagnostic_info, + "Expected entitlement item key `com.made.up.entitlement` with value `buck` not found in provisioning profile.", + ) diff --git a/prelude/apple/tools/code_signing/read_provisioning_profile_command_factory.py b/prelude/apple/tools/code_signing/read_provisioning_profile_command_factory.py new file mode 100644 index 0000000000000..c6f09fce8c3ff --- /dev/null +++ b/prelude/apple/tools/code_signing/read_provisioning_profile_command_factory.py @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from abc import ABCMeta, abstractmethod +from pathlib import Path +from typing import List, Union + + +class IReadProvisioningProfileCommandFactory(metaclass=ABCMeta): + @abstractmethod + def read_provisioning_profile_command(self, path: Path) -> List[Union[str, Path]]: + raise NotImplementedError + + +class DefaultReadProvisioningProfileCommandFactory( + IReadProvisioningProfileCommandFactory +): + # See `DEFAULT_READ_COMMAND` in `AppleConfig.java` in Buck v1 + _command = [ + "openssl", + "smime", + "-inform", + "der", + "-verify", + "-noverify", + "-nosigs", + "-in", + ] + + def read_provisioning_profile_command(self, path: Path) -> List[Union[str, Path]]: + return DefaultReadProvisioningProfileCommandFactory._command + [path] diff --git a/prelude/apple/tools/code_signing/test_resources/Entitlements.plist b/prelude/apple/tools/code_signing/test_resources/Entitlements.plist new file mode 100644 index 0000000000000..d0c34258fc14d --- /dev/null +++ b/prelude/apple/tools/code_signing/test_resources/Entitlements.plist @@ -0,0 +1,12 @@ + + + + + application-identifier + ABCDE12345.com.example.TestApp + keychain-access-groups + + ABCDE12345.com.example.TestApp + + + diff --git a/prelude/apple/tools/code_signing/test_resources/qualified_sample.mobileprovision b/prelude/apple/tools/code_signing/test_resources/qualified_sample.mobileprovision new file mode 100644 index 0000000000000..d0a2945fd7c52 --- /dev/null +++ b/prelude/apple/tools/code_signing/test_resources/qualified_sample.mobileprovision @@ -0,0 +1,78 @@ + + + + + AppIDName + Example + ApplicationIdentifierPrefix + + ABCDE12345 + + CreationDate + 2015-07-08T20:27:14Z + Platform + + iOS + + DeveloperCertificates + + + MIIDSzCCAjOgAwIBAgIBATANBgkqhkiG9w0BAQsFADBTMSAwHgYDVQQDDBdCdWNrIFRlc3QgQ29kZXNp + Z24gQ2VydDELMAkGA1UEBhMCVVMxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20wHhcN + MTUxMDIzMjAwNzI1WhcNMzIwMzI3MjAwNzI1WjBTMSAwHgYDVQQDDBdCdWNrIFRlc3QgQ29kZXNpZ24g + Q2VydDELMAkGA1UEBhMCVVMxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20wggEiMA0G + CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrmmMdeV17d72ihmdbB0MTn5v3iu9LSGVYlbnJbI+FNmDJ + 1U/0PAR7aLIPux0puN3N1Rob5VMdSZCG+SvsjV5zUNyepiRYlhf1jMyYTGDuoF8EOtfE94w9kr1EH9gl + aKRqir4770oXrFywbOaDO1iLPkpiVnfKVkkoU4dM0UD3YEtXoM70LIpYouGtVvNyc0Sx9/emvyiMH3iA + yQz5erVdfJQ2O85eyXz+U6rrcsNQpE0AuyQNnBnm0irSvtvnoPxlCsf0EbLnmnkDlv8yTrgCxzj/jmys + 0zs+EZFAjYCX/I2ef63Ll+Wm7ilh7CjwNaRMIQmL0QpJIR9kUYnp3v33AgMBAAGjKjAoMA4GA1UdDwEB + /wQEAwIHgDAWBgNVHSUBAf8EDDAKBggrBgEFBQcDAzANBgkqhkiG9w0BAQsFAAOCAQEATpWzY5IXhyb4 + PXiRS64Q6Sh2YXj6Dot2srWC+FJwWngGbF6J8aQfoymsRTkgTa+69WinMefmdRkEzysmXvAUfD1iWpBX + jRREQXkUhmt2FwqKp8pWbYSlgOTaQMojQn3DdvDu8l5ZFIrU+8Z8X4g/aggvffoIOygHvhbwkcgrA6Lq + TziJssmKd56+QKfVfx52vg6pv1Ok03pmVsQ8semHTJr53juc4QutADhtJBXe9inVs1evosO15At0N731 + Lz37Bzwedpu8DSEr4PIdwF5yBDRbQoyAF29zQaJgO/mU2G4i9wYTZJrj75BD/qr2ukjQOYOtz4IVNUuz + kPu+rDPz4A== + + + Entitlements + + keychain-access-groups + + ABCDE12345.* + + com.apple.security.get-task-allow + + com.apple.application-identifier + ABCDE12345.com.example.TestApp + com.apple.security.application-groups + + group.com.example + + com.apple.developer.team-identifier + 12345ABCDE + com.apple.developer.icloud-container-development-container-identifiers + + iCloud.com.appleseedinc.MyProject + iCloud.com.appleseedinc.container1 + + + ExpirationDate + 9999-03-05T01:33:40Z + Name + Example + ProvisionsAllDevices + + TeamIdentifier + + 12345ABCDE + + TeamName + Example + TimeToLive + 240 + UUID + 00000000-0000-0000-0000-000000000000 + Version + 1 + + diff --git a/prelude/apple/tools/code_signing/test_resources/sample.mobileprovision b/prelude/apple/tools/code_signing/test_resources/sample.mobileprovision new file mode 100644 index 0000000000000..4c3dca2e85e88 --- /dev/null +++ b/prelude/apple/tools/code_signing/test_resources/sample.mobileprovision @@ -0,0 +1,78 @@ + + + + + AppIDName + Example + ApplicationIdentifierPrefix + + ABCDE12345 + + CreationDate + 2015-07-08T20:27:14Z + Platform + + iOS + + DeveloperCertificates + + + MIIDSzCCAjOgAwIBAgIBATANBgkqhkiG9w0BAQsFADBTMSAwHgYDVQQDDBdCdWNrIFRlc3QgQ29kZXNp + Z24gQ2VydDELMAkGA1UEBhMCVVMxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20wHhcN + MTUxMDIzMjAwNzI1WhcNMzIwMzI3MjAwNzI1WjBTMSAwHgYDVQQDDBdCdWNrIFRlc3QgQ29kZXNpZ24g + Q2VydDELMAkGA1UEBhMCVVMxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20wggEiMA0G + CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrmmMdeV17d72ihmdbB0MTn5v3iu9LSGVYlbnJbI+FNmDJ + 1U/0PAR7aLIPux0puN3N1Rob5VMdSZCG+SvsjV5zUNyepiRYlhf1jMyYTGDuoF8EOtfE94w9kr1EH9gl + aKRqir4770oXrFywbOaDO1iLPkpiVnfKVkkoU4dM0UD3YEtXoM70LIpYouGtVvNyc0Sx9/emvyiMH3iA + yQz5erVdfJQ2O85eyXz+U6rrcsNQpE0AuyQNnBnm0irSvtvnoPxlCsf0EbLnmnkDlv8yTrgCxzj/jmys + 0zs+EZFAjYCX/I2ef63Ll+Wm7ilh7CjwNaRMIQmL0QpJIR9kUYnp3v33AgMBAAGjKjAoMA4GA1UdDwEB + /wQEAwIHgDAWBgNVHSUBAf8EDDAKBggrBgEFBQcDAzANBgkqhkiG9w0BAQsFAAOCAQEATpWzY5IXhyb4 + PXiRS64Q6Sh2YXj6Dot2srWC+FJwWngGbF6J8aQfoymsRTkgTa+69WinMefmdRkEzysmXvAUfD1iWpBX + jRREQXkUhmt2FwqKp8pWbYSlgOTaQMojQn3DdvDu8l5ZFIrU+8Z8X4g/aggvffoIOygHvhbwkcgrA6Lq + TziJssmKd56+QKfVfx52vg6pv1Ok03pmVsQ8semHTJr53juc4QutADhtJBXe9inVs1evosO15At0N731 + Lz37Bzwedpu8DSEr4PIdwF5yBDRbQoyAF29zQaJgO/mU2G4i9wYTZJrj75BD/qr2ukjQOYOtz4IVNUuz + kPu+rDPz4A== + + + Entitlements + + keychain-access-groups + + ABCDE12345.* + + get-task-allow + + application-identifier + ABCDE12345.com.example.TestApp + com.apple.security.application-groups + + group.com.example + + com.apple.developer.team-identifier + 12345ABCDE + com.apple.developer.icloud-container-development-container-identifiers + + iCloud.com.appleseedinc.MyProject + iCloud.com.appleseedinc.container1 + + + ExpirationDate + 9999-03-05T01:33:40Z + Name + Example + ProvisionsAllDevices + + TeamIdentifier + + 12345ABCDE + + TeamName + Example + TimeToLive + 240 + UUID + 00000000-0000-0000-0000-000000000000 + Version + 1 + + diff --git a/prelude/apple/tools/defs.bzl b/prelude/apple/tools/defs.bzl new file mode 100644 index 0000000000000..55f20fa39734f --- /dev/null +++ b/prelude/apple/tools/defs.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @oss-disable: load("@fbsource//tools/build_defs:python_platform.bzl", "set_platform_decorator_for_python") +load("@prelude//:native.bzl", _native = "native") + +set_platform_decorator_for_python = lambda **kwargs: kwargs # @oss-enable + +def meta_python_test(name, **kwargs): + # Set the platform attributes as needed for proper exec platform resolution + kwargs = set_platform_decorator_for_python( + # @oss-disable: set_python_constraint_overrides = True, + **kwargs + ) + + _native.python_test( + name = name, + **kwargs + ) diff --git a/prelude/apple/tools/dry_codesign_tool.py b/prelude/apple/tools/dry_codesign_tool.py new file mode 100644 index 0000000000000..38a34e7996b63 --- /dev/null +++ b/prelude/apple/tools/dry_codesign_tool.py @@ -0,0 +1,74 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import plistlib +import shutil + +from pathlib import Path + +_CODE_SIGN_DRY_RUN_ARGS_FILE = "BUCK_code_sign_args.plist" +_CODE_SIGN_DRY_RUN_ENTITLEMENTS_FILE = "BUCK_code_sign_entitlements.plist" + + +def _args_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=""" + Tool which implements `DryCodeSignStep` class from buck1. + Instead of code signing the bundle it just creates a file named `BUCK_code_sign_args.plist` inside, + which contains all parameters needed to perform a deferred signing later. + """ + ) + parser.add_argument( + "root", + type=Path, + ) + parser.add_argument( + "--entitlements", + metavar="", + type=Path, + required=False, + help="Path to file with entitlements to be used during code signing.", + ) + parser.add_argument( + "--identity", + type=str, + required=True, + ) + parser.add_argument( + "--extra-paths-to-sign", + type=str, + nargs="*", + ) + + return parser + + +def _main() -> None: + args = _args_parser().parse_args() + content = { + # This is always empty string if you check `DryCodeSignStep` class usages in buck1 + "relative-path-to-sign": "", + "use-entitlements": args.entitlements is not None, + "identity": args.identity, + } + if args.extra_paths_to_sign: + content["extra-paths-to-sign"] = args.extra_paths_to_sign + with open(args.root / _CODE_SIGN_DRY_RUN_ARGS_FILE, "wb") as f: + # Do not sort to keep the ordering same as in buck1. + plistlib.dump(content, f, sort_keys=False, fmt=plistlib.FMT_XML) + if args.entitlements: + shutil.copy2( + args.entitlements, + args.root / _CODE_SIGN_DRY_RUN_ENTITLEMENTS_FILE, + ) + + +if __name__ == "__main__": + _main() diff --git a/prelude/apple/tools/framework_sanitizer.py b/prelude/apple/tools/framework_sanitizer.py new file mode 100644 index 0000000000000..8a314dd1eb934 --- /dev/null +++ b/prelude/apple/tools/framework_sanitizer.py @@ -0,0 +1,72 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +""" +Removes files from a framework that are not necessary when distributing +inside app bundles. Specifically Modules/* Headers/* and Documentation/* + +Example Usage: +xcframework_sanitizer.py --input original/Foo.framework \ +--output output/Foo.framework +""" + +import argparse +import os +import re +import shutil + +from pathlib import Path +from typing import Callable, Iterable, Pattern + + +def _should_ignore( + framework_root: str, +) -> Callable[[str, list[str]], Iterable[str]]: + prohibited: list[str] = ["Modules", "Headers", "Documentation"] + + def _should_ignore_impl(root: str, contents: list[str]) -> Iterable[str]: + if re.sub(r"/Versions/[A-Z]", "", root) == framework_root: + return prohibited + return [] + + return _should_ignore_impl + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Prepare a framework for distribution by removing unnecessary files." + ) + + parser.add_argument("--input", required=True) + parser.add_argument("--output", required=True) + parser.add_argument("--replacement-binary") + args = parser.parse_args() + + out_path = Path(args.output) + + shutil.copytree( + args.input, + out_path, + symlinks=True, + dirs_exist_ok=False, + ignore=_should_ignore(args.input), + ) + + if args.replacement_binary: + framework_name = os.path.splitext(os.path.basename(out_path))[0] + # Use realpath() because for macOS versioned bundles + # we may need to follow a symlink: + framework_binary_path = os.path.realpath(out_path / framework_name) + os.chmod(framework_binary_path, 644) + + shutil.copy(args.replacement_binary, framework_binary_path) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/index/BUCK.v2 b/prelude/apple/tools/index/BUCK.v2 new file mode 100644 index 0000000000000..83078b7209a4c --- /dev/null +++ b/prelude/apple/tools/index/BUCK.v2 @@ -0,0 +1,25 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load("@prelude//apple/tools/defs.bzl", "meta_python_test") + +source_listing() + +python_bootstrap_binary( + name = "merge_index_store", + main = "merge_index_store.py", + visibility = ["PUBLIC"], + deps = [":merge_index_store_lib"], +) + +python_bootstrap_library( + name = "merge_index_store_lib", + srcs = ["merge_index_store.py"], + tests = [":merge_index_store_tests"], +) + +meta_python_test( + name = "merge_index_store_tests", + srcs = [ + "merge_index_store.py", + "merge_index_store_tests.py", + ], +) diff --git a/prelude/apple/tools/index/merge_index_store.py b/prelude/apple/tools/index/merge_index_store.py new file mode 100644 index 0000000000000..b690551cd18bd --- /dev/null +++ b/prelude/apple/tools/index/merge_index_store.py @@ -0,0 +1,69 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import os +import subprocess +import sys +from argparse import Namespace +from concurrent.futures import as_completed, ThreadPoolExecutor +from pathlib import Path + +MAX_WORKDERS = 8 + + +def parse_arguments() -> Namespace: + parser = argparse.ArgumentParser() + parser.add_argument("-d", "--dest", type=str, required=True) + parser.add_argument("-s", "--sources", nargs="+", type=str, required=True) + parser.add_argument("--dummy-output", type=str, required=False) + return parser.parse_args() + + +def merge_directories(source: str, destination: str) -> None: + if os.path.isdir(source): + print(f"Merging {source} to {destination}", file=sys.stderr) + if not source.endswith("/"): + source = source + "/" + # Use rsync to copy files from source to destination + # shutil.copytree will show file eixst errors when mergeing parallelly + result = subprocess.run( + ["rsync", "-a", "--ignore-existing", source, destination], + stderr=subprocess.PIPE, + text=True, + ) + if result.returncode != 0: + raise Exception( + f"Failed to merge {source} to {destination}:\n\t{result.stderr}" + ) + else: + raise Exception(f"Directory {source} does not exist or is not a directory") + + +def main() -> None: + args = parse_arguments() + destination = args.dest + directories = args.sources + + Path(destination).mkdir(parents=True, exist_ok=True) + if args.dummy_output: + # For dummy output, create a file to avoid empty output for buck2 + Path(args.dummy_output).touch() + + with ThreadPoolExecutor(max_workers=MAX_WORKDERS) as executor: + futures = [ + executor.submit(merge_directories, index_dir, destination) + for index_dir in directories + ] + for future in as_completed(futures): + future.result() # This will raise any exceptions that occurred + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/index/merge_index_store_tests.py b/prelude/apple/tools/index/merge_index_store_tests.py new file mode 100644 index 0000000000000..bd52a61b37366 --- /dev/null +++ b/prelude/apple/tools/index/merge_index_store_tests.py @@ -0,0 +1,60 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import subprocess +import unittest +from unittest.mock import MagicMock, patch + +from .merge_index_store import merge_directories, parse_arguments + + +class TestMergeIndexStore(unittest.TestCase): + def test_parse_arguments(self) -> None: + test_args = ["-d", "destination", "-s", "source1", "source2"] + with patch("sys.argv", ["script"] + test_args): + args = parse_arguments() + self.assertEqual(args.dest, "destination") + self.assertEqual(args.sources, ["source1", "source2"]) + + @patch("os.path.isdir") + @patch("subprocess.run") + def test_merge_directories( + self, mock_run: MagicMock, mock_isdir: MagicMock + ) -> None: + mock_isdir.return_value = True + mock_run.return_value = MagicMock(returncode=0, stderr="") + + merge_directories("source", "destination") + mock_run.assert_called_once_with( + ["rsync", "-a", "--ignore-existing", "source/", "destination"], + stderr=subprocess.PIPE, + text=True, + ) + + @patch("os.path.isdir") + @patch("subprocess.run") + def test_merge_directories_failure( + self, mock_run: MagicMock, mock_isdir: MagicMock + ) -> None: + mock_isdir.return_value = True + mock_run.return_value = MagicMock(returncode=1, stderr="Error") + + with self.assertRaises(Exception) as context: + merge_directories("source", "destination") + self.assertTrue("Failed to merge" in str(context.exception)) + + @patch("os.path.isdir") + def test_merge_non_existing_directory(self, mock_isdir: MagicMock) -> None: + mock_isdir.return_value = False + with self.assertRaises(Exception) as context: + merge_directories("source", "destination") + self.assertTrue( + "Directory source does not exist or is not a directory" + in str(context.exception) + ) diff --git a/prelude/apple/tools/info_plist_processor/BUCK.v2 b/prelude/apple/tools/info_plist_processor/BUCK.v2 new file mode 100644 index 0000000000000..6e7c1a443c795 --- /dev/null +++ b/prelude/apple/tools/info_plist_processor/BUCK.v2 @@ -0,0 +1,40 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load("@prelude//apple/tools/defs.bzl", "meta_python_test") + +oncall("build_infra") + +source_listing() + +python_library( + name = "preprocess", + srcs = ["preprocess.py"], +) + +meta_python_test( + name = "preprocess_test", + srcs = ["preprocess_test.py"], + deps = [":preprocess"], +) + +python_library( + name = "process", + srcs = ["process.py"], + visibility = ["PUBLIC"], + deps = ["prelude//apple/tools:plistlib_utils"], +) + +meta_python_test( + name = "process_test", + srcs = ["process_test.py"], + deps = [":process"], +) + +python_binary( + name = "tool", + main = "main.py", + visibility = ["PUBLIC"], + deps = [ + ":preprocess", + ":process", + ], +) diff --git a/prelude/apple/tools/info_plist_processor/main.py b/prelude/apple/tools/info_plist_processor/main.py new file mode 100644 index 0000000000000..995b385772ff8 --- /dev/null +++ b/prelude/apple/tools/info_plist_processor/main.py @@ -0,0 +1,152 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +from contextlib import ExitStack +from enum import Enum +from pathlib import Path + +from .preprocess import preprocess +from .process import process + + +class _SubcommandName(str, Enum): + preprocess = "preprocess" + process = "process" + + +def _create_preprocess_subparser( + subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]", +) -> None: + parser = subparsers.add_parser( + _SubcommandName.preprocess.value, + description="Sub-command to expand macro variables in parametrized Info.plist files. It's the Buck v2 equivalent of what `FindAndReplaceStep` and `InfoPlistSubstitution` do.", + ) + parser.add_argument( + "--input", + metavar="", + type=Path, + required=True, + help="Path to the input which is a .plist file ", + ) + parser.add_argument( + "--output", + metavar="", + type=Path, + required=True, + help="Path where the output, .plist with applied substitutions, should be written to", + ) + parser.add_argument( + "--product-name", + metavar="", + type=str, + required=True, + help="Product name, the value of `apple_bundle().product_name` attribute to be used in substitutions", + ) + parser.add_argument( + "--substitutions-json", + metavar="", + type=Path, + help="JSON file containing substitutions mapping", + ) + + +def _create_process_subparser( + subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]", +) -> None: + parser = subparsers.add_parser( + _SubcommandName.process.value, + description="Sub-command to do the final processing of the Info.plist before it's copied to the application bundle. It's the Buck v2 equivalent of what `PlistProcessStep` does in v1.", + ) + parser.add_argument( + "--input", + metavar="", + type=Path, + required=True, + help="Path to unprocessed .plist file", + ) + parser.add_argument( + "--override-input", + metavar="", + type=Path, + help="Path to the additional .plist file which should be merged into final result overriding keys present in unprocessed file or any other --additional-* argument.", + ) + parser.add_argument( + "--additional-keys", + metavar="", + type=Path, + help="Path to .json file containing additional data which should be merged into the final result if keys are not yet present in unprocessed file.", + ) + parser.add_argument( + "--override-keys", + metavar="", + type=Path, + help="Path to .json file with additional data which should be merged into the final result overriding keys present in unprocessed file or any other --additional-* or --override-* argument.", + ) + parser.add_argument( + "--output", + metavar="", + type=Path, + required=True, + help="Path where processed .plist file should be placed", + ) + + +def _parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Tool to process Info.plist file before it is placed into the bundle. It's the Buck v2 equivalent of what `AppleInfoPlist` build rule from v1 does." + ) + subparsers = parser.add_subparsers(dest="subcommand_name") + _create_preprocess_subparser(subparsers) + _create_process_subparser(subparsers) + return parser.parse_args() + + +def main() -> None: + args = _parse_args() + if args.subcommand_name == _SubcommandName.preprocess: + with ExitStack() as stack: + input_file = stack.enter_context(args.input.open(mode="r")) + output_file = stack.enter_context(args.output.open(mode="w")) + substitutions_json = ( + stack.enter_context(args.substitutions_json.open(mode="r")) + if args.substitutions_json is not None + else None + ) + preprocess(input_file, output_file, substitutions_json, args.product_name) + elif args.subcommand_name == _SubcommandName.process: + with ExitStack() as stack: + input_file = stack.enter_context(args.input.open(mode="rb")) + output_file = stack.enter_context(args.output.open(mode="wb")) + override_input = ( + stack.enter_context(args.override_input.open(mode="rb")) + if args.override_input is not None + else None + ) + additional_keys = ( + stack.enter_context(args.additional_keys.open(mode="rb")) + if args.additional_keys is not None + else None + ) + override_keys = ( + stack.enter_context(args.override_keys.open(mode="rb")) + if args.override_keys is not None + else None + ) + process( + input_file=input_file, + output_file=output_file, + override_input_file=override_input, + additional_keys_file=additional_keys, + override_keys_file=override_keys, + ) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/info_plist_processor/preprocess.py b/prelude/apple/tools/info_plist_processor/preprocess.py new file mode 100644 index 0000000000000..937959fe830ab --- /dev/null +++ b/prelude/apple/tools/info_plist_processor/preprocess.py @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json +import re +from enum import Enum +from typing import Dict, TextIO + + +class _ReGroupName(str, Enum): + openparen = "openparen" + variable = "variable" + modifier = "modifier" + closeparen = "closeparen" + + +_re_string: str = ( + "\\$(?P<{openparen}>[\\{{\\(])(?P<{variable}>[^\\}}\\):]+)(?::(?P<{modifier}>[^\\}}\\)]+))?(?P<{closeparen}>[\\}}\\)])".format( + openparen=_ReGroupName.openparen, + variable=_ReGroupName.variable, + modifier=_ReGroupName.modifier, + closeparen=_ReGroupName.closeparen, + ) +) + + +def _make_substitution_dict( + substitutions_json_file: TextIO, product_name: str +) -> Dict[str, str]: + result = { + "EXECUTABLE_NAME": product_name, + "PRODUCT_NAME": product_name, + } + if substitutions_json_file is not None: + # JSON file take precedence over default substitutions + result.update(json.load(substitutions_json_file)) + return result + + +def _process_line( + line: str, pattern: re.Pattern[str], substitutions: Dict[str, str] +) -> str: + result = line + pos = 0 + substituted_keys = set() + while True: + match = pattern.search(result, pos) + if match is None: + break + key = match.group(_ReGroupName.variable) + if key in substituted_keys: + raise RuntimeError("Recursive plist variable: ... -> {} -> ...".format(key)) + if key in substitutions: + result = ( + result[: match.start()] + substitutions[key] + result[match.end() :] + ) + substituted_keys.add(key) + # Keep the same position to handle the situation when variable was expanded into another variable + new_pos = match.start() + else: + new_pos = match.end() + if new_pos != pos: + substituted_keys = set() + pos = new_pos + return result + + +def preprocess( + input_file: TextIO, + output_file: TextIO, + substitutions_file: TextIO, + product_name: str, +) -> None: + pattern = re.compile(_re_string) + substitutions = _make_substitution_dict(substitutions_file, product_name) + for line in input_file: + output_file.write(_process_line(line, pattern, substitutions)) diff --git a/prelude/apple/tools/info_plist_processor/preprocess_test.py b/prelude/apple/tools/info_plist_processor/preprocess_test.py new file mode 100644 index 0000000000000..1e3f31e3103f7 --- /dev/null +++ b/prelude/apple/tools/info_plist_processor/preprocess_test.py @@ -0,0 +1,90 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import io +import unittest + +from .preprocess import preprocess + + +class TestPreprocess(unittest.TestCase): + def test_default_substitution_values(self): + input_file = io.StringIO( + r""" +${PRODUCT_NAME} +${EXECUTABLE_NAME} +""" + ) + expected = r""" +my_product_name +my_product_name +""" + substitutions_file = io.StringIO(r"{}") + output_file = io.StringIO("") + preprocess(input_file, output_file, substitutions_file, "my_product_name") + self.assertEqual(output_file.getvalue(), expected) + + def test_json_substitution_values_precede_default_ones(self): + input_file = io.StringIO( + r""" +${PRODUCT_NAME} +${EXECUTABLE_NAME} +""" + ) + expected = r""" +foo +bar +""" + substitutions_file = io.StringIO( + r""" +{ + "PRODUCT_NAME": "foo", + "EXECUTABLE_NAME": "bar" +} +""" + ) + output_file = io.StringIO("") + preprocess(input_file, output_file, substitutions_file, "my_product_name") + self.assertEqual(output_file.getvalue(), expected) + + def test_chained_substitutions(self): + input_file = io.StringIO(r"${foo}") + expected = r"baz" + substitutions_file = io.StringIO( + r""" +{ + "foo": "${bar}", + "bar": "baz" +} +""" + ) + output_file = io.StringIO("") + preprocess(input_file, output_file, substitutions_file, "my_product_name") + self.assertEqual(output_file.getvalue(), expected) + + def test_recursive_substitutions_throws(self): + input_file = io.StringIO(r"${foo}") + substitutions_file = io.StringIO( + r""" +{ + "foo": "${bar}", + "bar": "${foo}" +} +""" + ) + output_file = io.StringIO("") + with self.assertRaises(Exception) as context: + preprocess(input_file, output_file, substitutions_file, "my_product_name") + self.assertTrue("Recursive" in str(context.exception)) + + def test_variable_with_modifier(self): + input_file = io.StringIO(r"${foo:modifier}") + expected = r"bar" + substitutions_file = io.StringIO(r'{"foo":"bar"}') + output_file = io.StringIO("") + preprocess(input_file, output_file, substitutions_file, "my_product_name") + self.assertEqual(output_file.getvalue(), expected) diff --git a/prelude/apple/tools/info_plist_processor/process.py b/prelude/apple/tools/info_plist_processor/process.py new file mode 100644 index 0000000000000..178bcbfc22ae8 --- /dev/null +++ b/prelude/apple/tools/info_plist_processor/process.py @@ -0,0 +1,56 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json +import plistlib +from typing import Any, Dict, IO, Optional, TextIO + +from apple.tools.plistlib_utils import detect_format_and_load + + +# Corresponding v1 code is contained in `com/facebook/buck/apple/PlistProcessStep.java`, `PlistProcessStep::execute` method. +def _merge_plist_dicts( + source: Dict[str, Any], destination: Dict[str, Any], override: bool = False +) -> None: + for key, value in source.items(): + if key not in destination: + destination[key] = value + elif isinstance(value, dict) and isinstance(destination[key], dict): + destination[key].update(value) + elif override: + # override the value + destination[key] = value + + +def process( + input_file: IO[bytes], + output_file: IO[bytes], + override_input_file: Optional[IO[bytes]] = None, + additional_keys: Optional[Dict[str, Any]] = None, + additional_keys_file: Optional[TextIO] = None, + override_keys_file: Optional[TextIO] = None, + output_format: plistlib.PlistFormat = plistlib.FMT_BINARY, +) -> None: + root = detect_format_and_load(input_file) + if override_input_file is not None: + extra = detect_format_and_load(override_input_file) + # Overriding here for backwards compatibility with v1, + # see `PlistProcessStep::execute` implementation + _merge_plist_dicts(source=extra, destination=root, override=True) + if additional_keys is not None: + _merge_plist_dicts(source=additional_keys, destination=root) + if additional_keys_file is not None: + additional_keys_from_file = json.load(additional_keys_file) + _merge_plist_dicts(source=additional_keys_from_file, destination=root) + if override_keys_file is not None: + override_keys_from_file = json.load(override_keys_file) + _merge_plist_dicts( + source=override_keys_from_file, destination=root, override=True + ) + plistlib.dump(root, output_file, fmt=output_format) diff --git a/prelude/apple/tools/info_plist_processor/process_test.py b/prelude/apple/tools/info_plist_processor/process_test.py new file mode 100644 index 0000000000000..d53829d47b654 --- /dev/null +++ b/prelude/apple/tools/info_plist_processor/process_test.py @@ -0,0 +1,188 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import io +import plistlib +import unittest + +from .process import process + + +class TestProcess(unittest.TestCase): + def test_canary(self): + input_file = io.BytesIO( + b""" + + + + foo + bar + + +""" + ) + output_file = io.BytesIO() + process(input_file, output_file) + self.assertTrue(len(output_file.getvalue()) > 0) + + def test_additional_input_given_no_keys_conflict(self): + input_file = io.BytesIO( + b""" + + + + foo + bar + + +""" + ) + override_input_file = io.BytesIO( + b""" + + + + baz + qux + + +""" + ) + output_file = io.BytesIO() + process(input_file, output_file, override_input_file) + output_file.seek(0) + root = plistlib.load(output_file) + self.assertEqual(root, {"foo": "bar", "baz": "qux"}) + + def test_additional_input_given_keys_conflict(self): + input_file = io.BytesIO( + b""" + + + + foo + bar + qux + + a + b + b + c + + foobar + + hello + world + + + +""" + ) + override_input_file = io.BytesIO( + b""" + + + + foo + baz + qux + + a + z + c + x + + foobar + zanzibar + + +""" + ) + output_file = io.BytesIO() + process(input_file, output_file, override_input_file) + output_file.seek(0) + root = plistlib.load(output_file) + self.assertEqual( + root, + {"foo": "baz", "qux": {"a": "z", "b": "c", "c": "x"}, "foobar": "zanzibar"}, + ) + + def test_additional_keys(self): + input_file = io.BytesIO( + b""" + + + + foo + bar + + +""" + ) + additional_keys = {"baz": "qux"} + output_file = io.BytesIO() + process(input_file, output_file, additional_keys=additional_keys) + output_file.seek(0) + root = plistlib.load(output_file) + self.assertEqual(root, {"foo": "bar", "baz": "qux"}) + + def test_additional_keys_do_not_override(self): + input_file = io.BytesIO( + b""" + + + + foo + bar + + +""" + ) + additional_keys = {"foo": "baz"} + output_file = io.BytesIO() + process(input_file, output_file, additional_keys=additional_keys) + output_file.seek(0) + root = plistlib.load(output_file) + self.assertEqual(root, {"foo": "bar"}) + + def test_additional_keys_from_file(self): + input_file = io.BytesIO( + b""" + + + + foo + bar + + +""" + ) + additional_keys_file = io.BytesIO(b"""{"baz": "qux"}""") + output_file = io.BytesIO() + process(input_file, output_file, additional_keys_file=additional_keys_file) + output_file.seek(0) + root = plistlib.load(output_file) + self.assertEqual(root, {"foo": "bar", "baz": "qux"}) + + def test_override_keys_from_file(self): + input_file = io.BytesIO( + b""" + + + + foo + bar + + +""" + ) + override_keys_file = io.BytesIO(b"""{"foo": "baz"}""") + output_file = io.BytesIO() + process(input_file, output_file, override_keys_file=override_keys_file) + output_file.seek(0) + root = plistlib.load(output_file) + self.assertEqual(root, {"foo": "baz"}) diff --git a/prelude/apple/tools/ipa_package_maker.py b/prelude/apple/tools/ipa_package_maker.py new file mode 100644 index 0000000000000..ac3275d67cdd8 --- /dev/null +++ b/prelude/apple/tools/ipa_package_maker.py @@ -0,0 +1,105 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import os +import shutil +import subprocess + +import tempfile + +from pathlib import Path + +from apple.tools.re_compatibility_utils.writable import make_dir_recursively_writable + + +def _copy_ipa_contents(ipa_contents_dir: Path, output_dir: Path) -> None: + if os.path.exists(output_dir): + shutil.rmtree(output_dir, ignore_errors=False) + shutil.copytree(ipa_contents_dir, output_dir, symlinks=True, dirs_exist_ok=False) + + +def _delete_empty_SwiftSupport_dir(output_dir: Path) -> None: + swiftSupportDir = output_dir / "SwiftSupport" + if not swiftSupportDir.exists(): + return + + swiftSupportDirHasFiles = False + for _, _, files in os.walk(swiftSupportDir): + if files: + swiftSupportDirHasFiles = True + break + + if not swiftSupportDirHasFiles: + shutil.rmtree(swiftSupportDir) + + +def _package_ipa_contents( + ipa_contents_dir: Path, + ipa_output_path: Path, + compression_level: int, +) -> None: + with tempfile.TemporaryDirectory() as processed_package_dir: + processed_package_dir_path = Path(processed_package_dir) + _copy_ipa_contents(ipa_contents_dir, processed_package_dir_path) + + # Apple requires SwiftSupport dir to be non-empty + _delete_empty_SwiftSupport_dir(processed_package_dir_path) + + # Apple requires all executable files in an `.ipa` to be _writable_, otherwise App Store validation fails with: + # "Asset validation failed (90711) Invalid executable permissions. The executable $X does not have its writable bit set." + # Furthermore, there's additional internal infra that needs certain files to be writable. + # + # In normal development outside Meta, all files in an .ipa will be user writable, so let's just the sensible thing + # and mirror behavior which Apple expects, so we're future-proof. + make_dir_recursively_writable(str(processed_package_dir_path)) + + with open(ipa_output_path, "wb") as ipa_file: + zip_cmd = ["zip", "-X", "-r", f"-{compression_level}", "-", "."] + subprocess.run( + zip_cmd, + # .ipa zip file requires to be created relative to the package dir, + # zip command has no way to express apart from changing cwd + cwd=processed_package_dir_path, + stdout=ipa_file, + check=True, + ) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Tool to make an .ipa package file.") + parser.add_argument( + "--ipa-contents-dir", + required=True, + type=Path, + help="The directory which has the contents of the .ipa file.", + ) + parser.add_argument( + "--ipa-output-path", + required=True, + type=Path, + help="The path to the output .ipa file.", + ) + parser.add_argument( + "--compression-level", + type=int, + required=True, + help="The compression level to use for 'zip'.", + ) + + args = parser.parse_args() + _package_ipa_contents( + args.ipa_contents_dir, + args.ipa_output_path, + args.compression_level, + ) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/make_modulemap.py b/prelude/apple/tools/make_modulemap.py index f92f037df34a2..d863a57965f58 100755 --- a/prelude/apple/tools/make_modulemap.py +++ b/prelude/apple/tools/make_modulemap.py @@ -10,7 +10,29 @@ import os import re from io import TextIOWrapper -from typing import Dict, Iterable, List +from typing import Dict, FrozenSet, Iterable, List + + +_RESERVED_KEYWORDS: FrozenSet[str] = frozenset( + [ + "config_macros", + "conflict", + "exclude", + "explicit", + "extern", + "export_as", + "export", + "framework", + "header", + "link", + "module", + "private", + "requires", + "textual", + "umbrella", + "use", + ] +) class Module: @@ -30,7 +52,10 @@ def get_submodule(self, name: str) -> "Module": def render(self, f: TextIOWrapper, path_prefix: str, indent: int = 0) -> None: space = " " * indent - f.write(f"{space}module {self.name} {{\n") + name = self.name + if name in _RESERVED_KEYWORDS: + name = f"{name}_" + f.write(f"{space}module {name} {{\n") submodule_names = set() for submodule_name in sorted(self.submodules.keys()): @@ -81,7 +106,7 @@ def _write_submodules( module = root_module for i, component in enumerate(h.split(os.sep)): if i == 0 and component == name: - # The common case is we have a singe header path prefix that matches the module name. + # The common case is we have a single header path prefix that matches the module name. # In this case we add the headers directly to the root module. pass else: diff --git a/prelude/apple/tools/make_swift_interface.py b/prelude/apple/tools/make_swift_interface.py new file mode 100755 index 0000000000000..13c91db7e4024 --- /dev/null +++ b/prelude/apple/tools/make_swift_interface.py @@ -0,0 +1,282 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Remaps swiftc arguments to be usable by swift-ide-test, and invokes +swift-ide-test with said arguments. +""" + +import argparse +import dataclasses +import optparse +import pathlib +import subprocess as proc +import sys + +from contextlib import contextmanager +from typing import Iterable, List, Optional + + +@dataclasses.dataclass +class SwiftIDETestArguments(object): + sdk: Optional[str] + target: Optional[str] + xcc: Iterable[str] + defines: Iterable[str] + frameworks: Iterable[str] + includes: Iterable[str] + resource_dir: str + enable_cxx_interop: bool + cxx_interoperability_mode: Optional[str] + upcoming_features: Iterable[str] + explicit_swift_module_map: Optional[str] + swift_version: Optional[str] + + def to_args(self) -> List[str]: + args = [] + if self.target: + args.append("--target") + args.append(self.target) + + if self.sdk: + args.append("--sdk") + args.append(self.sdk) + + for define in self.defines: + args.append("-D") + args.append(define) + + for include in self.includes: + args.append("-I") + args.append(include) + + for framework in self.frameworks: + args.append("-F") + args.append(framework) + + for xcc in self.xcc: + args.append("--Xcc") + args.append(xcc) + + args.append("--resource-dir") + args.append(self.resource_dir) + + if self.enable_cxx_interop: + args.append("-enable-experimental-cxx-interop") + + if self.cxx_interoperability_mode: + # swift-ide-test only understands -enable-experimental-cxx-interop, + # not the versioned -cxx-interoperability-mode=. + args.append("-enable-experimental-cxx-interop") + + if self.upcoming_features: + for feature in self.upcoming_features: + args.append("-enable-upcoming-feature") + args.append(feature) + + if self.explicit_swift_module_map: + args.append("--explicit-swift-module-map-file") + args.append(self.explicit_swift_module_map) + + if self.swift_version: + args.append("-swift-version") + args.append(self.swift_version) + return args + + +class LongSingleDashOpt(optparse.Option): + """ + This Option subclass allows for long arguments specified with single dashes, + e.g. -sdk (the default implementation only allows long options with two + dashes) + """ + + def _set_opt_strings(self, opts): + for opt in opts: + if len(opt) < 2: + raise optparse.OptionError( + "invalid option string %r: " + "must be at least two characters long" % opt, + self, + ) + elif len(opt) == 2: + self._short_opts.append(opt) + else: + self._long_opts.append(opt) + + +class IgnoreUnknownLongSingleDashOptParser(optparse.OptionParser): + """ + This OptionParser subclass allows for + (a) long arguments specified with single dashes (e.g. -sdk) + (b) ignoring unknown arguments + The default OptionParser doesn't have either of these behaviors. + """ + + def __init__(self, *args, **kwargs): + kwargs["option_class"] = LongSingleDashOpt + super().__init__(*args, **kwargs) + + def _process_args(self, largs, rargs, values): + while rargs: + try: + arg = rargs[0] + if arg == "--": + del rargs[0] + return + elif arg[0:2] == "--": + self._process_long_opt(rargs, values) + elif arg[:1] == "-" and len(arg) > 1: + if len(arg) > 2: + self._process_long_opt(rargs, values) + else: + self._process_short_opts(rargs, values) + elif self.allow_interspersed_args: + largs.append(arg) + del rargs[0] + else: + return + except optparse.BadOptionError: + continue + + +def parse_swiftc_args(arguments: List[str]) -> SwiftIDETestArguments: # noqa: C901 + """ + We can't use argparse to do our parsing because arguments like -Xcc + need to accept arguments that are prefixed with `-`. + + optparse can handle this, and it's only soft deprecated (i.e. it should + stay around, just not actively developed), so we should be safe to use it. + + Additionally, our subclasses above are safe, since optparse is no longer + actively developed. + """ + parser = IgnoreUnknownLongSingleDashOptParser() + + parser.add_option("-sdk", dest="sdk") + parser.add_option("-target", dest="target") + parser.add_option("-Xcc", action="append", default=[], dest="xcc") + parser.add_option("-D", dest="defines", action="append", default=[]) + parser.add_option("-F", dest="frameworks", action="append", default=[]) + parser.add_option("-I", dest="includes", action="append", default=[]) + parser.add_option("-resource-dir", dest="resource_dir") + parser.add_option( + "-enable-experimental-cxx-interop", + action="store_true", + default=False, + dest="enable_experimental_cxx_interop", + ) + parser.add_option("-Xfrontend", action="append", default=[], dest="xfrontend") + parser.add_option("-swift-version", dest="swift_version") + parser.add_option("-cxx-interoperability-mode", dest="cxx_interoperability_mode") + + options, leftovers = parser.parse_args(arguments) + + frontend_parser = IgnoreUnknownLongSingleDashOptParser() + frontend_parser.add_option( + "-enable-upcoming-feature", + dest="enable_upcoming_feature", + action="append", + default=[], + ) + frontend_parser.add_option( + "-explicit-swift-module-map-file", dest="explicit_swift_module_map" + ) + frontend_options = frontend_parser.parse_args(options.xfrontend)[0] + + resource_dir = options.resource_dir + if not resource_dir: + # If an explicit resource dir was not provided, we need to figure out + # which resource id would have been used, which, in the case of Xcode, + # is relative to the swiftc used. + assert len(leftovers) >= 1 + compiler_path = pathlib.Path(leftovers[0]) + assert compiler_path.name == "swiftc" + resource_dir_path = compiler_path.parents[1] / "lib" / "swift" + assert resource_dir_path.exists() + resource_dir = str(resource_dir_path) + + return SwiftIDETestArguments( + options.sdk, + options.target, + options.xcc, + options.defines, + options.frameworks, + options.includes, + resource_dir, + options.enable_experimental_cxx_interop, + options.cxx_interoperability_mode, + frontend_options.enable_upcoming_feature, + frontend_options.explicit_swift_module_map, + options.swift_version, + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Uses swift-ide-test to generate a swift interface", + fromfile_prefix_chars="@", + ) + parser.add_argument( + "--swift-ide-test-tool", + required=True, + help="Path to swift-ide-test binary.", + ) + parser.add_argument( + "--module", + required=True, + help="Name of the module to generate the interface for.", + ) + parser.add_argument( + "--out", + help="Path to output file.", + default="-", + ) + parser.add_argument( + "arguments", + nargs="*", + default=[], + help="File containing compiler arguments to use to invoke" + + " swift-ide-test. Note these arguments should be in the format CC" + + " expects, not swift-ide-test, as this tool converts the arguments" + + " as needed", + ) + return parser.parse_args() + + +@contextmanager +def open_or_stdout(out): + if out == "-": + yield sys.stdout + else: + with open(out, "w") as f: + yield f + + +def main() -> None: + args = parse_args() + + parsed = parse_swiftc_args(args.arguments) + with open_or_stdout(args.out) as out: + proc.run( + [ + args.swift_ide_test_tool, + "--source-filename=x", + "--print-module", + "--module-to-print", + args.module, + "--module-print-submodules", + ] + + parsed.to_args(), + stdout=out, + check=True, + ) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/plistlib_utils.py b/prelude/apple/tools/plistlib_utils.py new file mode 100644 index 0000000000000..39141677c78e2 --- /dev/null +++ b/prelude/apple/tools/plistlib_utils.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import plistlib +from io import BytesIO +from typing import Any, Dict, IO + + +def _is_fmt_binary(header: bytes) -> bool: + return header[:8] == b"bplist00" + + +def detect_format_and_load(fp: IO[bytes]) -> Dict[str, Any]: + header = fp.read(32) + fp.seek(0) + if _is_fmt_binary(header): + fmt = plistlib.FMT_BINARY + else: + fmt = plistlib.FMT_XML + return plistlib.load(fp, fmt=fmt) + + +def detect_format_and_loads(value: bytes) -> Dict[str, Any]: + fp = BytesIO(value) + return detect_format_and_load(fp) diff --git a/prelude/apple/tools/re_compatibility_utils/BUCK b/prelude/apple/tools/re_compatibility_utils/BUCK new file mode 100644 index 0000000000000..a567b6984b50c --- /dev/null +++ b/prelude/apple/tools/re_compatibility_utils/BUCK @@ -0,0 +1,15 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_library( + name = "re_compatibility_utils", + srcs = [ + "writable.py", + ], + visibility = ["PUBLIC"], +) diff --git a/prelude/apple/tools/re_compatibility_utils/writable.py b/prelude/apple/tools/re_compatibility_utils/writable.py new file mode 100644 index 0000000000000..af853b566ebf6 --- /dev/null +++ b/prelude/apple/tools/re_compatibility_utils/writable.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os +import pathlib +import platform +import stat +import sys + + +def make_path_user_writable(path: str) -> None: + # On Linux, `os.chmod()` does not support setting the permissions on a symlink. + # `chmod` manpage says: + # > AT_SYMLINK_NOFOLLOW If pathname is a symbolic link, do not + # > dereference it: instead operate on the link itself. + # > This flag is not currently implemented. + # + # In Python, an exception will be thrown: + # > NotImplementedError: chmod: follow_symlinks unavailable on this platform + # + # Darwin supports permission setting on symlinks. + follow_symlinks = platform.system() != "Darwin" + st = os.stat(path, follow_symlinks=False) + + try: + os.chmod(path, st.st_mode | stat.S_IWUSR, follow_symlinks=follow_symlinks) + except FileNotFoundError as e: + path_obj = pathlib.Path(path) + if path_obj.is_symlink(): + resolved_path_obj = path_obj.resolve() + if not resolved_path_obj.exists(): + # On Linux systems, all symlinks are followed when `chmod`-ing + # (see comment above about `AT_SYMLINK_NOFOLLOW`). If that happens, + # we can ignore the `chmod` error as its harmless. + print( + f"Tried setting permission on a symlink to a non-existing path, ignoring error... {e}", + file=sys.stderr, + ) + return + raise e + + +def make_dir_recursively_writable(dir: str) -> None: + for dirpath, _, filenames in os.walk(dir): + make_path_user_writable(dirpath) + for filename in filenames: + make_path_user_writable(os.path.join(dirpath, filename)) diff --git a/prelude/apple/tools/resource_broker/BUCK.v2 b/prelude/apple/tools/resource_broker/BUCK.v2 new file mode 100644 index 0000000000000..0a3e34a211a83 --- /dev/null +++ b/prelude/apple/tools/resource_broker/BUCK.v2 @@ -0,0 +1,38 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +python_binary( + name = "resource_broker", + main = "main.py", + visibility = ["PUBLIC"], + deps = [ + ":main", + ], +) + +python_library( + name = "main", + srcs = ["main.py"], + deps = [ + ":lib", + ], +) + +python_library( + name = "lib", + srcs = glob( + [ + "*.py", + ], + exclude = [ + "main.py", + ], + ), + deps = [ + "fbsource//third-party/pypi/dataclasses-json:dataclasses-json", + "fbsource//third-party/pypi/packaging:packaging", + ], +) diff --git a/prelude/apple/tools/resource_broker/idb_companion.py b/prelude/apple/tools/resource_broker/idb_companion.py new file mode 100644 index 0000000000000..aa2b450a35de6 --- /dev/null +++ b/prelude/apple/tools/resource_broker/idb_companion.py @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os +import signal +from dataclasses import dataclass +from io import TextIOWrapper + + +@dataclass +class IdbCompanion: + socket_address: str + pid: int + stderr: TextIOWrapper + + def cleanup(self) -> None: + os.kill(self.pid, signal.SIGTERM) + self.stderr.close() diff --git a/prelude/apple/tools/resource_broker/idb_target.py b/prelude/apple/tools/resource_broker/idb_target.py new file mode 100644 index 0000000000000..b735f38c11768 --- /dev/null +++ b/prelude/apple/tools/resource_broker/idb_target.py @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json +from dataclasses import dataclass +from enum import Enum +from typing import List, Optional + +from dataclasses_json import dataclass_json + + +class SimState(str, Enum): + booted = "Booted" + shutdown = "Shutdown" + + +@dataclass_json +@dataclass +class IdbTarget: + name: str + os_version: str + udid: str + state: SimState + host: str = "" + port: int = 0 + + +@dataclass +class SimulatorInfo: + udid: str + device_set_path: str + + +def managed_simulator_from_stdout(stdout: Optional[str]) -> IdbTarget: + if not stdout: + return None + return IdbTarget.from_dict(json.loads(stdout)) + + +def managed_simulators_list_from_stdout(stdout: Optional[str]) -> List[IdbTarget]: + if not stdout: + return [] + targets = map( + # pyre-ignore[16]: `from_dict` is dynamically provided by `dataclass_json` + IdbTarget.from_dict, + json.loads(stdout), + ) + return list(targets) diff --git a/prelude/apple/tools/resource_broker/ios.py b/prelude/apple/tools/resource_broker/ios.py new file mode 100644 index 0000000000000..41e3a6ea4f1d2 --- /dev/null +++ b/prelude/apple/tools/resource_broker/ios.py @@ -0,0 +1,230 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os +from typing import List, Optional + +from packaging.version import Version + +from .idb_companion import IdbCompanion + +from .idb_target import ( + IdbTarget, + managed_simulator_from_stdout, + managed_simulators_list_from_stdout, + SimState, + SimulatorInfo, +) + +from .simctl_runtime import list_ios_runtimes, XCSimRuntime + +from .timeouts import SIMULATOR_BOOT_TIMEOUT + +from .utils import ( + execute_generic_text_producing_command, + spawn_companion, + wait_for_idb_companions, +) + + +def _device_set_path() -> str: + return os.path.expanduser("~/Library/Developer/Buck2IdbDeviceSet") + + +def _list_managed_simulators_command(simulator_manager: str) -> List[str]: + return [ + simulator_manager, + "list", + "--device-set-path", + _device_set_path(), + "--only", + "simulator", + ] + + +def _create_simulator_command(simulator_manager: str, sim_spec: str) -> List[str]: + return [ + simulator_manager, + "create", + "--device-set-path", + _device_set_path(), + "--configuration", + sim_spec, + ] + + +def _boot_simulator_command(simulator_manager: str, udid: str) -> List[str]: + return [ + simulator_manager, + "boot", + "--device-set-path", + _device_set_path(), + udid, + ] + + +def _compatible_device_type_from_runtime(runtime: XCSimRuntime) -> Optional[str]: + iphones = filter( + lambda t: t.product_family == "iPhone", runtime.supported_device_types + ) + if not iphones: + return None + default = next(iphones) + return next( + ( + device_type.name + for device_type in iphones + if device_type.name == "iPhone 11" + ), + default.name, + ) + + +def _select_latest_simulator_spec(runtimes: List[XCSimRuntime]) -> str: + runtimes.sort(key=lambda x: Version(x.version), reverse=True) + for runtime in runtimes: + device_type = _compatible_device_type_from_runtime(runtime) + if device_type: + return f"{device_type},{runtime.name}" + raise RuntimeError( + "No XCode simctl compatible iOS runtime and device available. Try to `sudo xcode-select -s ` and *open Xcode to install all required components*." + ) + + +def _spawn_companion_for_simulator_command( + udid: str, grpc_domain_sock: str +) -> List[str]: + return [ + "idb_companion", + "--device-set-path", + _device_set_path(), + "--udid", + udid, + "--only", + "simulator", + "--grpc-domain-sock", + grpc_domain_sock, + ] + + +async def _generic_managed_simulators_list_command( + name: str, cmd: List[str] +) -> List[IdbTarget]: + stdout = await execute_generic_text_producing_command(name=name, cmd=cmd) + return managed_simulators_list_from_stdout(stdout) + + +async def _generic_managed_simulator_command(name: str, cmd: List[str]) -> IdbTarget: + stdout = await execute_generic_text_producing_command(name=name, cmd=cmd) + return managed_simulator_from_stdout(stdout) + + +async def _list_managed_simulators(simulator_manager: str) -> List[IdbTarget]: + list_cmd = _list_managed_simulators_command(simulator_manager=simulator_manager) + return await _generic_managed_simulators_list_command( + name="list managed simulators", cmd=list_cmd + ) + + +async def _create_simulator(simulator_manager: str) -> IdbTarget: + runtimes = await list_ios_runtimes() + spec = _select_latest_simulator_spec(runtimes) + create_cmd = _create_simulator_command( + simulator_manager=simulator_manager, sim_spec=spec + ) + return await _generic_managed_simulator_command( + name="create simulators", cmd=create_cmd + ) + + +async def _get_managed_simulators_create_if_needed( + simulator_manager: str, +) -> List[IdbTarget]: + managed_simulators = await _list_managed_simulators( + simulator_manager=simulator_manager + ) + if managed_simulators: + return managed_simulators + + managed_simulator = await _create_simulator(simulator_manager=simulator_manager) + if managed_simulator: + return [managed_simulator] + + raise RuntimeError( + "Failed to create an iOS simulator. Try to `sudo xcode-select -s ` and *open Xcode to install all required components*." + ) + + +def _select_simulator( + only_booted: bool, all_simulators: List[IdbTarget] +) -> Optional[IdbTarget]: + return next( + filter( + lambda s: s.state == SimState.booted if only_booted else True, + iter(all_simulators), + ), + None, + ) + + +def _select_simulator_with_preference( + prefer_booted: bool, all_simulators: List[IdbTarget] +) -> IdbTarget: + simulator = _select_simulator( + only_booted=prefer_booted, all_simulators=all_simulators + ) + if not simulator and prefer_booted: + simulator = _select_simulator(only_booted=False, all_simulators=all_simulators) + if not simulator: + raise RuntimeError("Expected at least unbooted simulator entity to be selected") + return simulator + + +async def prepare_simulator(simulator_manager: str, booted: bool) -> SimulatorInfo: + managed_simulators = await _get_managed_simulators_create_if_needed( + simulator_manager=simulator_manager + ) + simulator = _select_simulator_with_preference( + prefer_booted=booted, all_simulators=managed_simulators + ) + if simulator.state != SimState.booted and booted: + boot_cmd = _boot_simulator_command( + simulator_manager=simulator_manager, udid=simulator.udid + ) + await execute_generic_text_producing_command( + name="boot simulator", + cmd=boot_cmd, + timeout=SIMULATOR_BOOT_TIMEOUT, + ) + return SimulatorInfo( + udid=simulator.udid, + device_set_path=_device_set_path(), + ) + + +async def _ios_simulator(simulator_manager: str, booted: bool) -> List[IdbCompanion]: + simulator = await prepare_simulator( + simulator_manager=simulator_manager, booted=booted + ) + grpc_domain_sock = f"/tmp/buck2_idb_companion_{simulator.udid}" + process = await spawn_companion( + command=_spawn_companion_for_simulator_command( + simulator.udid, grpc_domain_sock + ), + log_file_suffix=f"companion_launch_logs_for_{simulator.udid}.log", + ) + return await wait_for_idb_companions([process]) + + +async def ios_unbooted_simulator(simulator_manager: str) -> List[IdbCompanion]: + return await _ios_simulator(simulator_manager=simulator_manager, booted=False) + + +async def ios_booted_simulator(simulator_manager: str) -> List[IdbCompanion]: + return await _ios_simulator(simulator_manager=simulator_manager, booted=True) diff --git a/prelude/apple/tools/resource_broker/macos.py b/prelude/apple/tools/resource_broker/macos.py new file mode 100644 index 0000000000000..ad103a031a477 --- /dev/null +++ b/prelude/apple/tools/resource_broker/macos.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import asyncio +from typing import cast, List + +from .idb_companion import IdbCompanion + +from .utils import IdbCompanionProcess, spawn_companion, wait_for_idb_companions + + +def _boot_macos_companion_command(grpc_domain_sock: str) -> List[str]: + return [ + "idb_companion", + "--udid", + "mac", + "--grpc-domain-sock", + grpc_domain_sock, + ] + + +async def macos_idb_companions() -> List[IdbCompanion]: + addresses = [(i, f"/tmp/buck2_idb_companion_mac_{i}") for i in range(10)] + awaitables = [ + spawn_companion( + command=_boot_macos_companion_command(addr), + log_file_suffix=f"macos_companion_{i}.log", + ) + for i, addr in addresses + ] + results = await asyncio.gather(*awaitables, return_exceptions=True) + + if exception := next(filter(lambda r: isinstance(r, BaseException), results), None): + [r.cleanup() for r in results if isinstance(r, IdbCompanionProcess)] + raise cast(BaseException, exception) + + return await wait_for_idb_companions(cast(List[IdbCompanionProcess], results)) diff --git a/prelude/apple/tools/resource_broker/main.py b/prelude/apple/tools/resource_broker/main.py new file mode 100644 index 0000000000000..e6a422e5413bf --- /dev/null +++ b/prelude/apple/tools/resource_broker/main.py @@ -0,0 +1,135 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import asyncio +import json +import os +import signal +import sys +from enum import Enum +from time import sleep +from typing import List, Optional + +from .idb_companion import IdbCompanion + +from .ios import ios_booted_simulator, ios_unbooted_simulator, prepare_simulator + +from .macos import macos_idb_companions + +idb_companions: List[IdbCompanion] = [] + + +def _args_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description="Utility which helps to set up IDB companions which are used later by buck2 it runs tests locally." + ) + parser.add_argument( + "--simulator-manager", + required=False, + type=str, + help="Tool to manage simulators and their lifecycle. Required for iOS testing", + ) + parser.add_argument( + "--type", + metavar="", + type=_ResourceType, + choices=[e.value for e in _ResourceType], + required=True, + help=f""" + Type of required resources. + Pass `{_ResourceType.iosUnbootedSimulator}` to get a companion for iOS unbooted simulator. + Pass `{_ResourceType.iosBootedSimulator}` to get a companion for iOS booted simulator. + Pass `{_ResourceType.macosIdbCompanion}` to get MacOS companions. + """, + ) + parser.add_argument( + "--no-companion", + default=False, + action="store_true", + help=""" + If passed, will only create simulator. No idb_companion will be spawned. + """, + ) + return parser + + +class _ResourceType(str, Enum): + iosUnbootedSimulator = "ios_unbooted_simulator" + iosBootedSimulator = "ios_booted_simulator" + macosIdbCompanion = "macos_idb_companion" + + +def _exit_gracefully(*args: List[object]) -> None: + for idb_companion in idb_companions: + idb_companion.cleanup() + exit(0) + + +def _check_simulator_manager_exists(simulator_manager: Optional[str]) -> None: + if not simulator_manager: + raise Exception("Simulator manager is not specified") + + +def main() -> None: + args = _args_parser().parse_args() + if args.no_companion: + if args.type == _ResourceType.macosIdbCompanion: + raise Exception( + "No resource brocker is required for MacOS tests without companion" + ) + + booted = args.type == _ResourceType.iosBootedSimulator + sim = asyncio.run( + prepare_simulator(simulator_manager=args.simulator_manager, booted=booted) + ) + result = { + "resources": [ + { + "udid": sim.udid, + "device_set_path": sim.device_set_path, + } + ] + } + json.dump(result, sys.stdout) + else: + _create_companion(args) + + +def _create_companion(args: argparse.Namespace) -> None: + if args.type == _ResourceType.iosBootedSimulator: + _check_simulator_manager_exists(args.simulator_manager) + idb_companions.extend(asyncio.run(ios_booted_simulator(args.simulator_manager))) + elif args.type == _ResourceType.iosUnbootedSimulator: + _check_simulator_manager_exists(args.simulator_manager) + idb_companions.extend( + asyncio.run(ios_unbooted_simulator(args.simulator_manager)) + ) + elif args.type == _ResourceType.macosIdbCompanion: + idb_companions.extend(asyncio.run(macos_idb_companions())) + pid = os.fork() + if pid == 0: + # child + signal.signal(signal.SIGINT, _exit_gracefully) + signal.signal(signal.SIGTERM, _exit_gracefully) + while True: + sleep(0.1) + else: + # Do not leak open FDs in parent + for c in idb_companions: + c.stderr.close() + result = { + "pid": pid, + "resources": [{"socket_address": c.socket_address} for c in idb_companions], + } + json.dump(result, sys.stdout) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/resource_broker/simctl_runtime.py b/prelude/apple/tools/resource_broker/simctl_runtime.py new file mode 100644 index 0000000000000..6787b2b5c9329 --- /dev/null +++ b/prelude/apple/tools/resource_broker/simctl_runtime.py @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json +from dataclasses import dataclass, field +from typing import List, Optional + +from dataclasses_json import config, dataclass_json + +from .utils import execute_generic_text_producing_command + + +@dataclass_json +@dataclass +class XCSimDevice: + name: str + product_family: str = field(metadata=config(field_name="productFamily")) + + +@dataclass_json +@dataclass +class XCSimRuntime: + name: str + version: str + supported_device_types: List[XCSimDevice] = field( + metadata=config(field_name="supportedDeviceTypes") + ) + + +@dataclass_json +@dataclass +class _XCSimRuntimes: + runtimes: List[XCSimRuntime] + + +def _list_ios_runtimes_command() -> List[str]: + return [ + "xcrun", + "simctl", + "list", + "runtimes", + "iOS", + "available", + "--json", + ] + + +def _simctl_runtimes_from_stdout(stdout: Optional[str]) -> List[XCSimRuntime]: + if not stdout: + return [] + data = json.loads(stdout) + # pyre-ignore[16]: `from_dict` is dynamically provided by `dataclass_json` + return _XCSimRuntimes.from_dict(data).runtimes + + +async def list_ios_runtimes() -> List[XCSimRuntime]: + stdout = await execute_generic_text_producing_command( + name="list iOS runtimes", cmd=_list_ios_runtimes_command() + ) + return _simctl_runtimes_from_stdout(stdout) diff --git a/prelude/apple/tools/resource_broker/timeouts.py b/prelude/apple/tools/resource_broker/timeouts.py new file mode 100644 index 0000000000000..a5694dd67708a --- /dev/null +++ b/prelude/apple/tools/resource_broker/timeouts.py @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +DEFAULT_OPERATION_TIMEOUT = 10 + +# Simulator boot is an expensive command and can take a long time to complete +# depending on machine configuration and current machine load. +SIMULATOR_BOOT_TIMEOUT = 90 diff --git a/prelude/apple/tools/resource_broker/utils.py b/prelude/apple/tools/resource_broker/utils.py new file mode 100644 index 0000000000000..80d36e7169b97 --- /dev/null +++ b/prelude/apple/tools/resource_broker/utils.py @@ -0,0 +1,142 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import asyncio +import json +import shlex +from dataclasses import dataclass +from io import TextIOWrapper +from pathlib import Path +from typing import Any, List, Tuple + +from dataclasses_json import dataclass_json + +from .idb_companion import IdbCompanion +from .timeouts import DEFAULT_OPERATION_TIMEOUT + + +@dataclass_json +@dataclass +class _IdbStdout: + grpc_path: str + + +@dataclass +class IdbCompanionProcess: + process: asyncio.subprocess.Process + stderr: TextIOWrapper + stderr_path: Path + + def cleanup(self) -> None: + self.process.terminate() + self.stderr.close() + + +async def _read_until_valid_json(stream: asyncio.StreamReader) -> object: + buffer = b"" + while True: + data = await stream.readuntil(b"}") + buffer += data + try: + return json.loads(buffer.decode()) + except json.JSONDecodeError: + pass + raise RuntimeError( + "Should not be reachable since either the valid JSON is there or `asyncio.IncompleteReadError` is raised." + ) + + +async def _read_stdout(p: IdbCompanionProcess) -> Tuple[int, TextIOWrapper, object]: + if not p.process.stdout: + raise ValueError("Expected stdout to be set for idb companion launch process.") + try: + json = await _read_until_valid_json(p.process.stdout) + except asyncio.IncompleteReadError as e: + if not e.partial: + with open(p.stderr_path) as f: + lines = f.readlines() + raise RuntimeError( + f"idb companion terminated unexpectedly with the following stderr:\n{lines}" + ) from e + else: + raise + return p.process.pid, p.stderr, json + + +async def wait_for_idb_companions( + processes: List[IdbCompanionProcess], + timeout: float = DEFAULT_OPERATION_TIMEOUT, +) -> List[IdbCompanion]: + reads = [asyncio.Task(_read_stdout(p)) for p in processes] + done, pending = await asyncio.wait( + reads, + timeout=timeout, + ) + if not pending: + results = [task.result() for task in done] + return [ + IdbCompanion( + # pyre-ignore[16]: `from_dict` is dynamically provided by `dataclass_json` + socket_address=_IdbStdout.from_dict(json_dict).grpc_path, + pid=pid, + stderr=stderr, + ) + for pid, stderr, json_dict in results + ] + + process_index = {reads[i]: processes[i] for i in range(len(processes))} + + stderr_paths = [] + + for task in pending: + task.cancel() + process_info = process_index[task] + stderr_paths.append(str(process_info.stderr_path)) + process_info.process.terminate() + + raise RuntimeError( + f"Timeout when trying to launch idb companions. List of files with stderr for pending companions: {stderr_paths}" + ) + + +async def execute_generic_text_producing_command( + name: str, cmd: List[str], timeout: float = DEFAULT_OPERATION_TIMEOUT +) -> str: + process = await asyncio.create_subprocess_exec( + *cmd, + stdin=asyncio.subprocess.DEVNULL, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout) + if process.returncode != 0: + raise RuntimeError( + f"Failed to {name} with command:\n```\n{shlex.join(cmd)}\n```\nstdout:\n```\n{stdout.decode(errors='ignore')}\n```\nstdout:\n```\n{stderr.decode(errors='ignore')}\n```\n" + ) + return stdout.decode() + + +async def spawn_companion( + command: List[str], + log_file_suffix: str, +) -> IdbCompanionProcess: + stderr_path = Path("/tmp/buck2_idb_companion_logs") / f"stderr-{log_file_suffix}" + stderr_path.parent.mkdir(parents=True, exist_ok=True) + stderr = stderr_path.open(mode="w") + process = await asyncio.create_subprocess_exec( + *command, + stdin=asyncio.subprocess.DEVNULL, + stdout=asyncio.subprocess.PIPE, + stderr=stderr, + ) + return IdbCompanionProcess( + process=process, + stderr=stderr, + stderr_path=stderr_path, + ) diff --git a/prelude/apple/tools/selective_debugging/BUCK.v2 b/prelude/apple/tools/selective_debugging/BUCK.v2 new file mode 100644 index 0000000000000..bbf9b08f592b5 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/BUCK.v2 @@ -0,0 +1,44 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load("@prelude//apple/tools/defs.bzl", "meta_python_test") + +oncall("build_infra") + +source_listing() + +python_library( + name = "lib", + srcs = [ + "macho.py", + "macho_parser.py", + "scrubber.py", + "spec.py", + "utils.py", + ], + deps = [ + "prelude//apple/tools/re_compatibility_utils:re_compatibility_utils", + ], +) + +python_binary( + name = "tool", + main = "main.py", + visibility = ["PUBLIC"], + deps = [ + ":lib", + ], +) + +meta_python_test( + name = "tests", + srcs = [ + "scrubber_test.py", + "spec_test.py", + ], + resources = glob([ + "test_resources/*", + ]), + deps = [ + "fbsource//third-party/pypi/importlib-resources:importlib-resources", + ":lib", + ], +) diff --git a/prelude/apple/tools/selective_debugging/macho.py b/prelude/apple/tools/selective_debugging/macho.py new file mode 100644 index 0000000000000..8371cbeb6c937 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/macho.py @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from dataclasses import dataclass + +MH_MAGIC = 0xFEEDFACE +MH_CIGAM = 0xCEFAEDFE +MH_MAGIC_64 = 0xFEEDFACF +MH_CIGAM_64 = 0xCFFAEDFE + +LC_CODE_SIGNATURE = 0x1D +LC_SEGMENT_64 = 0x19 +LC_SYMTAB = 0x02 + +N_OSO = 0x66 + + +class MachO: + def __str__(self) -> str: + props = {} + for k, v in self.__dict__.items(): + props[k] = hex(v) + return str(props) + + +@dataclass +class MachOHeader(MachO): + magic: int + cpu_type: int + cpu_subtype: int + file_type: int + n_cmds: int + size_of_cmds: int + flags: int + reserved: int + + @property + def is_valid(self) -> bool: + return self.magic in (MH_CIGAM_64, MH_MAGIC_64) + + +@dataclass +class LoadCommand(MachO): + cmd: int + cmd_size: int + + +@dataclass +class LinkEditCommand(LoadCommand): + segment_name: bytes + VM_addr: int + VM_size: int + file_offset: int + file_size: int + maximum_VM_protection: int + initial_VM_protection: int + n_sections: int + flags: int + + +@dataclass +class SymtabCommand(LoadCommand): + cmd: int + cmd_size: int + symtab_offset: int + n_symbols: int + strtab_offset: int + strtab_size: int + + +@dataclass +class Symbol(MachO): + strtab_index: int + sym_type: int + section_index: int + desc: int + value: int diff --git a/prelude/apple/tools/selective_debugging/macho_parser.py b/prelude/apple/tools/selective_debugging/macho_parser.py new file mode 100644 index 0000000000000..d9717c9db81af --- /dev/null +++ b/prelude/apple/tools/selective_debugging/macho_parser.py @@ -0,0 +1,122 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import sys +from typing import BinaryIO, List, Optional, Tuple + +from .macho import ( + LC_CODE_SIGNATURE, + LC_SEGMENT_64, + LC_SYMTAB, + LinkEditCommand, + MachOHeader, + N_OSO, + Symbol, + SymtabCommand, +) + + +def _read_bytes(f: BinaryIO, n_bytes: int) -> int: + b = f.read(n_bytes) + return int.from_bytes(b, "little") + + +def load_header(f: BinaryIO, offset: int) -> Tuple[MachOHeader, int]: + f.seek(offset) + magic = _read_bytes(f, 4) + cpu_type = _read_bytes(f, 4) + cpu_sub_type = _read_bytes(f, 4) + file_type = _read_bytes(f, 4) + cmd_cnt = _read_bytes(f, 4) + cmd_size = _read_bytes(f, 4) + flags = _read_bytes(f, 4) + reserved = _read_bytes(f, 4) + header = MachOHeader( + magic, cpu_type, cpu_sub_type, file_type, cmd_cnt, cmd_size, flags, reserved + ) + return header, f.tell() + + +def load_commands( + f: BinaryIO, offset: int, n_cmds: int +) -> Tuple[Optional[LinkEditCommand], Optional[SymtabCommand]]: + """ + The OSO entries are identified in segments named __LINKEDIT. + If no segment is found with that name, there is nothing to scrub. + """ + lc_linkedit = None + lc_symtab = None + f.seek(offset) + for _ in range(n_cmds): + pos = f.tell() + cmd = _read_bytes(f, 4) + size = _read_bytes(f, 4) + if cmd == LC_SEGMENT_64: + name = f.read(16) + if "LINKEDIT" in name.decode(): + vm_addr = _read_bytes(f, 8) + vm_size = _read_bytes(f, 8) + file_offset = _read_bytes(f, 8) + file_size = _read_bytes(f, 8) + maximum_vm_protection = _read_bytes(f, 4) + initial_vm_protection = _read_bytes(f, 4) + sections = _read_bytes(f, 4) + flags = _read_bytes(f, 4) + lc_linkedit = LinkEditCommand( + cmd, + size, + name, + vm_addr, + vm_size, + file_offset, + file_size, + maximum_vm_protection, + initial_vm_protection, + sections, + flags, + ) + continue + elif cmd == LC_SYMTAB: + symtab_offset = _read_bytes(f, 4) + n_symbols = _read_bytes(f, 4) + strtab_offset = _read_bytes(f, 4) + strtab_size = _read_bytes(f, 4) + lc_symtab = SymtabCommand( + cmd, size, symtab_offset, n_symbols, strtab_offset, strtab_size + ) + continue + elif cmd == LC_CODE_SIGNATURE: + print("[Focused Debugging][Warning] Code signature found.", file=sys.stderr) + + f.seek(pos) + f.seek(size, 1) + return lc_linkedit, lc_symtab + + +def load_debug_symbols(f: BinaryIO, offset: int, n_symbol: int) -> List[Symbol]: + """ + // Each LC_SYMTAB entry consists of the following fields: + // - String Index: 4 bytes (offset into the string table) + // - Type: 1 byte + // - Section: 1 byte + // - Description: 2 bytes + // - Value: 8 bytes on 64bit, 4 bytes on 32bit + """ + f.seek(offset) + symbols = [] + for _ in range(n_symbol): + strtab_index = _read_bytes(f, 4) + sym_type = _read_bytes(f, 1) + section_idx = _read_bytes(f, 1) + desc = _read_bytes(f, 2) + value = _read_bytes(f, 8) + if sym_type == N_OSO: + symbol = Symbol(strtab_index, sym_type, section_idx, desc, value) + symbols.append(symbol) + return symbols diff --git a/prelude/apple/tools/selective_debugging/main.py b/prelude/apple/tools/selective_debugging/main.py new file mode 100644 index 0000000000000..c9cfd835d99f4 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/main.py @@ -0,0 +1,64 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import sys + +from .scrubber import scrub + + +def _parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Tool to postprocess executables/dylibs." + ) + parser.add_argument( + "--input", + required=True, + help="Path to the input which is an executable/dylib file.", + ) + parser.add_argument("--output", required=True, help="Path to the output file.") + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + "--targets-file", + help="Path to a json file which contains user-focused Buck targets", + ) + group.add_argument( + "--spec-file", + help="Path to a json file which contains user-focused include/exclude specs", + ) + parser.add_argument( + "--adhoc-codesign-tool", + help="An adhoc codesign tool to use to re-sign the executables/dylibs, if provided.", + ) + parser.add_argument( + "--persisted-targets-file", + help="A JSON file with additional targets that must be preserved by the scrubber.", + ) + return parser.parse_args() + + +def main() -> None: + args = _parse_args() + try: + scrub( + input_file=args.input, + output_file=args.output, + persisted_targets_file=args.persisted_targets_file, + targets_file=args.targets_file, + spec_file=args.spec_file, + adhoc_codesign_tool=args.adhoc_codesign_tool, + ) + except Exception as e: + print(f"Focused debugging failed: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/selective_debugging/scrubber.py b/prelude/apple/tools/selective_debugging/scrubber.py new file mode 100644 index 0000000000000..791920b892b63 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/scrubber.py @@ -0,0 +1,227 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json + +import os +import shutil +import subprocess +from typing import BinaryIO, Callable, List, Optional, Set, Tuple + +from apple.tools.re_compatibility_utils.writable import make_path_user_writable + +from .macho import Symbol + +from .macho_parser import load_commands, load_debug_symbols, load_header +from .spec import Spec +from .utils import MachOException + + +FAKE_PATH = b"fake/path" +# buck-out/isolation_dir/gen/project_cell/{hash}/.... +NUM_OF_COMPONENTS_IN_BUCK2_OUTPUT_PATH_BEFORE_PROJECT_PATH = 5 + + +def _always_scrub(_: str) -> bool: + return True + + +# Visible for testing +def load_focused_targets_output_paths(json_file_path: str) -> Set[str]: + if json_file_path is None or not os.path.exists(json_file_path): + return set() + with open(json_file_path, "r") as f: + content = f.read() + if not content: + return set() + data = json.loads(content) + output_paths = set() + for target in data["targets"]: + _, package_and_name = target.split("//") + package, name = package_and_name.split(":") + # This assumes the output path created by buck2, which if + # modified, would break this logic. + output_directory = f"{package}/__{name}__" + output_paths.add(output_directory) + return output_paths + + +# This function converts buck-out/isolation_dir/gen/project_cell/{hash}/X/Y/__name__/libFoo.a +# into X/Y/__name__ to match the focus target output path created by load_focused_targets_output_paths +# Visible for testing +def _get_target_output_path_from_debug_file_path( + debug_target_path: str, +) -> str: + # This function assumes the debug file path created by buck2 is in the following format: + # buck-out/isolation_dir/gen/project_cell/{hash}/.../__name__/libFoo.a + parts = debug_target_path.split("/") + + # We are doing the traverse in reverse order because this way we'll find the first + # target directory sooner. _should_scrub can get called many times, so it's + # important that we make it as efficient as possible. + i = 1 + while i <= len(parts): + if parts[-i].startswith("__") and parts[-i].endswith("__"): + break + i += 1 + if i > len(parts): + raise Exception( + f"Unrecognized format for debug file path : {debug_target_path}" + ) + + return "/".join( + parts[NUM_OF_COMPONENTS_IN_BUCK2_OUTPUT_PATH_BEFORE_PROJECT_PATH : -i + 1] + ) + + +# Visible for testing +def should_scrub_with_focused_targets_output_paths( + focused_targets_output_paths: Set[str], debug_file_path: str +) -> bool: + # All paths to be scrubbed when no focused target is specified + if len(focused_targets_output_paths) == 0: + return True + + # debug_file_path usually have the format x/y/z/libFoo.a(bar.m.o) + if "(" in debug_file_path: + debug_target_path = debug_file_path.split("(")[0] + else: + debug_target_path = debug_file_path + + if debug_file_path.startswith("buck-out/"): + target_output_path = _get_target_output_path_from_debug_file_path( + debug_target_path + ) + return target_output_path not in focused_targets_output_paths + else: + # occasionally archive file can be directly from source. + (package, name) = os.path.split(debug_file_path) + while package != "": + if f"{package}/__{name}__" in focused_targets_output_paths: + return False + (package, name) = os.path.split(package) + + return True + + +def _should_scrub_with_targets_file( + json_file_path: str, additional_labels: Set[str] +) -> Callable[[str], bool]: + focused_targets_output_paths = load_focused_targets_output_paths(json_file_path) + return lambda debug_file_path: should_scrub_with_focused_targets_output_paths( + focused_targets_output_paths.union(additional_labels), debug_file_path + ) + + +def _should_scrub_with_spec_file( + json_file_path: str, additional_labels: Set[str] +) -> Callable[[str], bool]: + spec = Spec(json_file_path) + return lambda debug_file_path: should_scrub_with_focused_targets_output_paths( + additional_labels, debug_file_path + ) and spec.scrub_debug_file_path(debug_file_path) + + +def _scrub( + f: BinaryIO, + strtab_offset: int, + symbols: List[Symbol], + scrub_handler: Callable[[str], bool], +) -> List[Tuple[str, str]]: + """ + Return a list of tuples. + Each tuple contains a pair of the original path and the rewritten path + """ + results = [] + for symbol in symbols: + f.seek(strtab_offset) + f.seek(symbol.strtab_index, 1) + + # Read a byte at a time until we reach the end of the path, denoted by Hex 0. + start = end = f.tell() + path = b"" + b = f.read(1) + while b != b"\x00": + path += b + end += 1 + b = f.read(1) + str_len = end - start + + path_str = path.decode() + if scrub_handler(path_str): + f.seek(start) + # We don't want to modify the length of the path, so pad the replacement + # path with spaces + buffer = FAKE_PATH + b" " * (str_len - len(FAKE_PATH)) + f.write(buffer) + results.append((path_str, buffer.decode())) + else: + results.append((path_str, path_str)) + return results + + +def scrub( + input_file: str, + output_file: str, + persisted_targets_file: str, + targets_file: Optional[str] = None, + spec_file: Optional[str] = None, + adhoc_codesign_tool: Optional[str] = None, +) -> List[Tuple[str, str]]: + additional_labels = load_focused_targets_output_paths(persisted_targets_file) + if targets_file and spec_file: + raise Exception( + "Only one of a targets file or spec file is supported, not both!" + ) + elif targets_file: + scrub_handler = _should_scrub_with_targets_file(targets_file, additional_labels) + elif spec_file: + scrub_handler = _should_scrub_with_spec_file(spec_file, additional_labels) + else: + scrub_handler = _always_scrub + + shutil.copy2(input_file, output_file) + # Make it RE-compatible + make_path_user_writable(output_file) + + results = [] + with open(output_file, "r+b") as f: + header, offset = load_header(f, 0) + if not header.is_valid: + raise MachOException("Invalid macho format!") + lc_linkedit, lc_symtab = load_commands(f, offset, header.n_cmds) + if lc_linkedit is None: + return [] + if lc_symtab is None: + raise MachOException("LC_SYMTAB command not found") + if lc_symtab.strtab_size == 0 or lc_symtab.n_symbols == 0: + return [] + if lc_linkedit.file_size == 0: + raise MachOException("LC_SEGMENT_64 command for string table not found") + f.seek(lc_symtab.strtab_offset) + """ + ld64 deliberately burns the first byte with the space character, so that zero is never a + valid string index and writes 0x00 at offset 1, so that it's always the empty string. + The code for this in ld64 is in LinkEditClassic.hpp (StringPoolAtom::StringPoolAtom). + """ + if f.read(1) != b"\x20": + raise MachOException("First character in the string table is not a space") + if f.read(1) != b"\x00": + raise MachOException("Second character in the string table is not a NUL") + + symbols = load_debug_symbols(f, lc_symtab.symtab_offset, lc_symtab.n_symbols) + results = _scrub(f, lc_symtab.strtab_offset, symbols, scrub_handler) + + if adhoc_codesign_tool: + subprocess.run( + [adhoc_codesign_tool, "--binary", output_file], + check=True, + ) + + return results diff --git a/prelude/apple/tools/selective_debugging/scrubber_test.py b/prelude/apple/tools/selective_debugging/scrubber_test.py new file mode 100644 index 0000000000000..fe3c92a5c0741 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/scrubber_test.py @@ -0,0 +1,227 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import pathlib +import unittest +from tempfile import NamedTemporaryFile +from typing import List, Optional, Tuple +from unittest.mock import call, patch + +import importlib_resources as resources + +from .scrubber import ( + load_focused_targets_output_paths, + scrub, + should_scrub_with_focused_targets_output_paths, +) + +FAKE_PATH = "fake/path" + + +class Test(unittest.TestCase): + def test_no_focused_targets(self): + results, _ = _get_scrubber_results(targets_json_file_path=None) + for _, rewrite_path in results: + # We expect all paths to be scrubbed + self.assertEqual(rewrite_path.strip(), FAKE_PATH) + + def test_empty_focused_targets(self): + results, _ = _get_scrubber_results( + targets_json_file_path="focused_targets_empty.json" + ) + for _, rewrite_path in results: + # We expect all paths to be scrubbed + self.assertEqual(rewrite_path.strip(), FAKE_PATH) + + def test_focused_targets(self): + with resources.as_file( + _get_test_resource_file("focused_targets.json") + ) as targets_json_file: + output_paths = load_focused_targets_output_paths(str(targets_json_file)) + + results, _ = _get_scrubber_results( + targets_json_file_path="focused_targets.json" + ) + + focused_paths = [] + scrubbed_paths = [] + + for orig_path, rewrite_path in results: + for output_path in output_paths: + if output_path in orig_path: + # Ensure we didn't scrub the path + focused_paths.append(orig_path) + self.assertEqual(orig_path, rewrite_path) + else: + # Ensure we scrubbed the path + scrubbed_paths.append(orig_path) + self.assertEqual(rewrite_path.strip(), FAKE_PATH) + + self.assertEqual( + focused_paths, + [ + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__Foo__/libFoo.a(Foo.mm.o)", + ], + ) + self.assertEqual( + scrubbed_paths, + [ + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__HelloWorld__/__objects__/srcs/AppDelegate.m.o", + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__HelloWorld__/__objects__/srcs/RootViewController.m.o", + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__HelloWorld__/__objects__/srcs/main.m.o", + ], + ) + + def test_spec_targets(self): + results, _ = _get_scrubber_results(spec_json_file_path="focused_spec.json") + + focused_paths = [] + scrubbed_paths = [] + + for orig_path, rewrite_path in results: + if rewrite_path.strip() == FAKE_PATH: + scrubbed_paths.append(orig_path) + elif orig_path == rewrite_path: + focused_paths.append(orig_path) + else: + raise Exception( + f"Rewrite path is neither the fake path nor the original path: {rewrite_path}" + ) + + self.assertEqual( + focused_paths, + [ + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__Foo__/libFoo.a(Foo.mm.o)", + ], + ) + self.assertEqual( + scrubbed_paths, + [ + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__HelloWorld__/__objects__/srcs/AppDelegate.m.o", + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__HelloWorld__/__objects__/srcs/RootViewController.m.o", + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/buck2/samples/focused_debugging/__HelloWorld__/__objects__/srcs/main.m.o", + ], + ) + + @patch("subprocess.run") + def test_codesigning(self, subprocess): + subprocess.return_value = [0, 0] + results, output_file = _get_scrubber_results( + spec_json_file_path="focused_spec.json", + adhoc_codesign_tool="/usr/fake/codesign", + ) + expected_calls = [ + call( + [ + "/usr/fake/codesign", + "--binary", + output_file, + ], + check=True, + ), + ] + subprocess.assert_has_calls(expected_calls) + + def test_load_focused_targets_output_paths(self): + with resources.as_file( + _get_test_resource_file("focused_targets.json") + ) as targets_json_file: + output_paths = load_focused_targets_output_paths(str(targets_json_file)) + + self.assertEqual( + output_paths, {"fbobjc/buck2/samples/focused_debugging/__Foo__"} + ) + + def test_should_scrub_with_focused_targets_output_paths(self): + focused_targets_output_paths = { + "fbobjc/some/path/__foo__", + "xplat/some/path/__foo__", + } + self.assertEqual( + True, + should_scrub_with_focused_targets_output_paths( + focused_targets_output_paths, + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/some/path/__baz__/libbar.a(baz.mm.o)", + ), + ) + self.assertEqual( + False, + should_scrub_with_focused_targets_output_paths( + focused_targets_output_paths, + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/some/path/__foo__/libbar.a(baz.mm.o)", + ), + ) + self.assertEqual( + False, + should_scrub_with_focused_targets_output_paths( + focused_targets_output_paths, + "buck-out/v2/gen/fbsource/56628b5feecfab0a/fbobjc/some/path/__foo__/lib.a", + ), + ) + self.assertEqual( + False, + should_scrub_with_focused_targets_output_paths( + focused_targets_output_paths, + "xplat/some/path/foo/lib/prebuilt_lib.a(baz.m.o)", + ), + ) + self.assertEqual( + True, + should_scrub_with_focused_targets_output_paths( + focused_targets_output_paths, + "xplat/some/path/fooo/prebuilt_lib.a(baz.m.o)", + ), + ) + + +@patch( + "apple.tools.selective_debugging.scrubber.make_path_user_writable", + return_value=None, +) +def _get_scrubber_results( + make_path_user_writable_mock, + targets_json_file_path: Optional[str] = None, + spec_json_file_path: Optional[str] = None, + adhoc_codesign_tool: Optional[str] = None, +) -> Tuple[List[Tuple[str, str]], str]: + with resources.as_file(_get_test_resource_file("HelloWorld")) as test_binary_file: + with NamedTemporaryFile() as out_file: + if targets_json_file_path: + with resources.as_file( + _get_test_resource_file(targets_json_file_path) + ) as targets_json_file: + return ( + scrub( + str(test_binary_file), + out_file.name, + None, + targets_file=str(targets_json_file), + adhoc_codesign_tool=adhoc_codesign_tool, + ), + out_file.name, + ) + elif spec_json_file_path: + with resources.as_file( + _get_test_resource_file(spec_json_file_path), + ) as spec_json_file: + return ( + scrub( + str(test_binary_file), + out_file.name, + None, + spec_file=str(spec_json_file), + adhoc_codesign_tool=adhoc_codesign_tool, + ), + out_file.name, + ) + else: + return scrub(str(test_binary_file), out_file.name, None), out_file.name + + +def _get_test_resource_file(name) -> pathlib.Path: + path = resources.files(__package__).joinpath(f"test_resources/{name}") + return path diff --git a/prelude/apple/tools/selective_debugging/spec.py b/prelude/apple/tools/selective_debugging/spec.py new file mode 100644 index 0000000000000..35fa26ca820d3 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/spec.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json +import re +from dataclasses import dataclass, field +from typing import List + + +@dataclass +class BuildTargetPatternOutputPathMatcher: + pattern: str + output_path: str = field(init=False) + + def __post_init__(self) -> None: + _, package_and_name = self.pattern.split("//") + if package_and_name.endswith("..."): + # recursive pattern + output_path, _ = package_and_name.split("...") + elif package_and_name.endswith(":"): + # package pattern + package, _ = package_and_name.split(":") + # This assumes the output path created by buck2, which if + # modified, would break this logic. + output_path = f"{package}/__" + else: + # target pattern + package, name = package_and_name.split(":") + # This assumes the output path created by buck2, which if + # modified, would break this logic. + output_path = f"{package}/__{name}__" + + self.output_path = output_path + + def match_path(self, debug_file_path: str) -> bool: + return self.output_path in debug_file_path + + +@dataclass +class Spec: + spec_path: str + include_build_target_patterns: List[BuildTargetPatternOutputPathMatcher] = field( + init=False + ) + include_regular_expressions: List[re.Pattern[str]] = field(init=False) + exclude_build_target_patterns: List[BuildTargetPatternOutputPathMatcher] = field( + init=False + ) + exclude_regular_expressions: List[re.Pattern[str]] = field(init=False) + + def __post_init__(self) -> None: + with open(self.spec_path, "r") as f: + data = json.load(f) + + self.include_build_target_patterns = [ + BuildTargetPatternOutputPathMatcher(entry) + for entry in data["include_build_target_patterns"] + ] + self.include_regular_expressions = [ + re.compile(entry) for entry in data["include_regular_expressions"] + ] + self.exclude_build_target_patterns = [ + BuildTargetPatternOutputPathMatcher(entry) + for entry in data["exclude_build_target_patterns"] + ] + self.exclude_regular_expressions = [ + re.compile(entry) for entry in data["exclude_regular_expressions"] + ] + + def scrub_debug_file_path(self, debug_file_path: str) -> bool: + if self.include_build_target_patterns or self.include_regular_expressions: + is_included = _path_matches_pattern_or_expression( + debug_file_path, + self.include_build_target_patterns, + self.include_regular_expressions, + ) + else: + is_included = True + + # If the path is included (and not excluded), do not scrub + return not ( + is_included + and not _path_matches_pattern_or_expression( + debug_file_path, + self.exclude_build_target_patterns, + self.exclude_regular_expressions, + ) + ) + + +def _path_matches_pattern_or_expression( + debug_file_path: str, + patterns: List[BuildTargetPatternOutputPathMatcher], + expressions: List[re.Pattern[str]], +) -> bool: + for pattern in patterns: + if pattern.match_path(debug_file_path): + return True + for expression in expressions: + if expression.search(debug_file_path): + return True + return False diff --git a/prelude/apple/tools/selective_debugging/spec_test.py b/prelude/apple/tools/selective_debugging/spec_test.py new file mode 100644 index 0000000000000..0bb42fc44efd7 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/spec_test.py @@ -0,0 +1,104 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import json +import unittest +from tempfile import NamedTemporaryFile +from typing import Dict, List + +from .spec import BuildTargetPatternOutputPathMatcher, Spec + +FAKE_PATH = "fake/path" + + +class Test(unittest.TestCase): + def test_build_target_pattern_matcher(self): + target = BuildTargetPatternOutputPathMatcher("cell//foo:bar") + self.assertTrue(target.match_path("foo/__bar__")) + self.assertFalse(target.match_path("foo/__baz__")) + + package = BuildTargetPatternOutputPathMatcher("cell//foo:") + self.assertTrue(package.match_path("foo/__bar__")) + self.assertTrue(package.match_path("foo/__baz__")) + self.assertFalse(target.match_path("foo/bar/__baz__")) + + recursive = BuildTargetPatternOutputPathMatcher("cell//foo/...") + self.assertTrue(recursive.match_path("foo/__bar__")) + self.assertTrue(recursive.match_path("foo/bar/__baz__")) + self.assertFalse(recursive.match_path("bar/__baz__")) + + def test_spec_with_includes(self): + test_spec = _base_spec() + test_spec["include_build_target_patterns"] = [ + "cell//foo:", + ] + + spec = _get_spec(test_spec) + + # We expect to not scrub anything with "foo/__" + self.assertFalse(spec.scrub_debug_file_path("foo/__bar__")) + self.assertFalse(spec.scrub_debug_file_path("foo/__baz__")) + self.assertTrue(spec.scrub_debug_file_path("foo/bar/__baz__")) + + def test_spec_with_include_regex(self): + test_spec = _base_spec() + test_spec["include_regular_expressions"] = [ + "foo", + ] + + spec = _get_spec(test_spec) + + # We expect to not scrub anything with "foo" + self.assertFalse(spec.scrub_debug_file_path("foo/__bar__")) + self.assertFalse(spec.scrub_debug_file_path("foo/__baz__")) + self.assertTrue(spec.scrub_debug_file_path("bar/bar/__baz__")) + + def test_spec_with_exclude_regex(self): + test_spec = _base_spec() + test_spec["exclude_regular_expressions"] = [ + "foo", + ] + + spec = _get_spec(test_spec) + + # We expect to scrub anything with "foo" + self.assertTrue(spec.scrub_debug_file_path("foo/__bar__")) + self.assertTrue(spec.scrub_debug_file_path("foo/__baz__")) + self.assertFalse(spec.scrub_debug_file_path("bar/bar/__baz__")) + + def test_spec_with_both(self): + test_spec = _base_spec() + test_spec["include_build_target_patterns"] = [ + "cell//foo:", + ] + test_spec["exclude_regular_expressions"] = [ + "bar", + ] + + spec = _get_spec(test_spec) + + # We expect to scrub anything with "bar", and not scrub anything with "foo/__" + self.assertTrue(spec.scrub_debug_file_path("foo/__bar__")) + self.assertFalse(spec.scrub_debug_file_path("foo/__baz__")) + self.assertTrue(spec.scrub_debug_file_path("bar/bar/__baz__")) + + +def _get_spec(test_spec) -> Spec: + with NamedTemporaryFile(mode="w+") as tmp: + json.dump(test_spec, tmp) + tmp.flush() + + return Spec(tmp.name) + + +def _base_spec() -> Dict[str, List[str]]: + return { + "include_build_target_patterns": [], + "include_regular_expressions": [], + "exclude_build_target_patterns": [], + "exclude_regular_expressions": [], + } diff --git a/prelude/apple/tools/selective_debugging/test_resources/HelloWorld b/prelude/apple/tools/selective_debugging/test_resources/HelloWorld new file mode 100755 index 0000000000000..3e97392de479a Binary files /dev/null and b/prelude/apple/tools/selective_debugging/test_resources/HelloWorld differ diff --git a/prelude/apple/tools/selective_debugging/test_resources/focused_spec.json b/prelude/apple/tools/selective_debugging/test_resources/focused_spec.json new file mode 100644 index 0000000000000..eb068fe60576e --- /dev/null +++ b/prelude/apple/tools/selective_debugging/test_resources/focused_spec.json @@ -0,0 +1,8 @@ +{ + "include_build_target_patterns": [ + "//fbobjc/buck2/samples/focused_debugging:Foo" + ], + "include_regular_expressions": [], + "exclude_build_target_patterns": [], + "exclude_regular_expressions": [] +} diff --git a/prelude/apple/tools/selective_debugging/test_resources/focused_targets.json b/prelude/apple/tools/selective_debugging/test_resources/focused_targets.json new file mode 100644 index 0000000000000..b0f0b04144d39 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/test_resources/focused_targets.json @@ -0,0 +1,5 @@ +{ + "targets": [ + "//fbobjc/buck2/samples/focused_debugging:Foo" + ] +} \ No newline at end of file diff --git a/prelude/apple/tools/selective_debugging/test_resources/focused_targets_empty.json b/prelude/apple/tools/selective_debugging/test_resources/focused_targets_empty.json new file mode 100644 index 0000000000000..3e8cd33fcfc68 --- /dev/null +++ b/prelude/apple/tools/selective_debugging/test_resources/focused_targets_empty.json @@ -0,0 +1,4 @@ +{ + "targets": [ + ] +} \ No newline at end of file diff --git a/prelude/apple/tools/selective_debugging/utils.py b/prelude/apple/tools/selective_debugging/utils.py new file mode 100644 index 0000000000000..0d0d78230ceab --- /dev/null +++ b/prelude/apple/tools/selective_debugging/utils.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +class MachOException(Exception): + pass diff --git a/prelude/apple/tools/split_arch_combine_dsym_bundles_tool.py b/prelude/apple/tools/split_arch_combine_dsym_bundles_tool.py new file mode 100644 index 0000000000000..f3897b9367928 --- /dev/null +++ b/prelude/apple/tools/split_arch_combine_dsym_bundles_tool.py @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import os +import shutil + +from pathlib import Path + + +def _args_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=""" + Tool for combining multiple dsym bundles into a single one for Universal Binaries when the + split_arch_dsym is enabled. + """ + ) + parser.add_argument("--dsym-bundle", action="append", type=str) + parser.add_argument("--arch", action="append", type=str) + parser.add_argument( + "--output", + type=Path, + required=True, + help="Path where the output bundle should be written to", + ) + + return parser + + +def _main() -> None: + args = _args_parser().parse_args() + + output_dwarf_path = os.path.join(args.output, "Contents/Resources/DWARF") + os.makedirs(output_dwarf_path) + + if len(args.arch) != len(args.dsym_bundle): + raise Exception( + f"Need to specify an architecture for every dsym bundle, archs:{args.arch}, dsyms:{args.dsym_bundle}" + ) + for i in range(len(args.dsym_bundle)): + dwarf_files_dir = os.path.join(args.dsym_bundle[i], "Contents/Resources/DWARF") + dwarf_files = os.listdir(dwarf_files_dir) + for dwarf_file in dwarf_files: + shutil.copy2( + os.path.join(dwarf_files_dir, dwarf_file), + os.path.join(output_dwarf_path, f"{dwarf_file}.{args.arch[i]}"), + ) + + # pick one of the Info.plist and copy it to the output bundle. + shutil.copy2( + os.path.join(args.dsym_bundle[0], "Contents/Info.plist"), + os.path.join(args.output, "Contents/Info.plist"), + ) + + +if __name__ == "__main__": + _main() diff --git a/prelude/apple/tools/swift_exec.py b/prelude/apple/tools/swift_exec.py new file mode 100755 index 0000000000000..be07f6726fde0 --- /dev/null +++ b/prelude/apple/tools/swift_exec.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import subprocess +import sys + +_RE_TMPDIR_ENV_VAR = "TMPDIR" +_FILE_WRITE_FAILURE_MARKER = "could not write" + + +def main(): + env = os.environ.copy() + if "INSIDE_RE_WORKER" in env and _RE_TMPDIR_ENV_VAR in env: + # Use $TMPDIR for the module cache location. This + # will be set to a unique location for each RE action + # which will avoid sharing modules across RE actions. + # This is necessary as the inputs to the modules will + # be transient and can be removed at any point, causing + # module validation errors to fail builds. + # https://github.com/llvm/llvm-project/blob/main/clang/lib/Driver/ToolChains/Clang.cpp#L3709 + env["CLANG_MODULE_CACHE_PATH"] = os.path.join( + env[_RE_TMPDIR_ENV_VAR], "buck-module-cache" + ) + else: + # For local actions use a shared module cache location. + # This should be safe to share across the other local + # compilation actions. + env["CLANG_MODULE_CACHE_PATH"] = "/tmp/buck-module-cache" + + command = sys.argv[1:] + # Apply a debug prefix map for the current directory + # to make debug info relocatable. To correctly make paths + # relocatable, we must use that path at which the action + # is run (be it locally or on RE) and this is not known + # at the time of action definition. + command += [ + "-debug-prefix-map", + f"{os.getcwd()}/=", + ] + # Apply a coverage prefix map for the current directory + # to make file path metadata relocatable stripping + # the current directory from it. + command += [ + "-coverage-prefix-map", + f"{os.getcwd()}=.", + ] + + result = subprocess.run( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding=sys.stdout.encoding, + env=env, + ) + + print(result.stdout, file=sys.stdout, end="") + print(result.stderr, file=sys.stderr, end="") + + if result.returncode == 0: + # The Swift compiler will return an exit code of 0 and warn when it cannot write auxiliary files. + # Detect and error so that the action is not cached. + failed_write = ( + _FILE_WRITE_FAILURE_MARKER in result.stdout + or _FILE_WRITE_FAILURE_MARKER in result.stderr + ) + if failed_write: + print( + "Detected Swift compiler file write error but compiler exited with code 0, failing command..." + ) + sys.exit(1) + + sys.exit(result.returncode) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/tools/swift_exec.sh b/prelude/apple/tools/swift_exec.sh deleted file mode 100755 index af7c4271e0b7f..0000000000000 --- a/prelude/apple/tools/swift_exec.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -set -e - -if [ -n "$INSIDE_RE_WORKER" ]; then - # Use $TMPDIR for the module cache location. This - # will be set to a unique location for each RE action - # which will avoid sharing modules across RE actions. - # This is necessary as the inputs to the modules will - # be transient and can be removed at any point, causing - # module validation errors to fail builds. - # https://github.com/llvm/llvm-project/blob/main/clang/lib/Driver/ToolChains/Clang.cpp#L3709 - export CLANG_MODULE_CACHE_PATH="$TMPDIR/buck-module-cache" -else - # For local actions use a shared module cache location. - # This should be safe to share across the other local - # compilation actions. - export CLANG_MODULE_CACHE_PATH="/tmp/buck-module-cache" -fi - -# Apply a debug prefix map for the current directory -# to make debug info relocatable. To correctly make paths -# relocatable, we must use that path at which the action -# is run (be it locally or on RE) and this is not known -# at the time of action definition. -exec "$@" -debug-prefix-map "$PWD"/= -file-compilation-dir . diff --git a/prelude/apple/tools/swift_objc_header_postprocess.py b/prelude/apple/tools/swift_objc_header_postprocess.py deleted file mode 100755 index f3ccfc5fcb026..0000000000000 --- a/prelude/apple/tools/swift_objc_header_postprocess.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/env fbpython -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -import argparse -import json -import os -import re -import sys -from typing import Dict, Iterable, TextIO - -# Out-of-date? Update with this command: -# -# xcode-select --print-path | xargs printf '%s/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks/' | xargs ls | rg '^([A-Z].+)\.framework$' -r '${1}' | xargs printf ' "%s",\n' && xcode-select --print-path | xargs printf '%s/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/usr/include/module.modulemap' | xargs cat | rg '^module ([a-zA-Z0-9_]*) .*$' -r '${1}'| xargs printf ' "%s",\n' -APPLE_SYSTEM_MODULES = { - "ARKit", - "AVFAudio", - "AVFoundation", - "AVKit", - "Accelerate", - "Accessibility", - "Accounts", - "AdServices", - "AdSupport", - "AddressBook", - "AddressBookUI", - "AppClip", - "AppTrackingTransparency", - "AssetsLibrary", - "AudioToolbox", - "AudioUnit", - "AuthenticationServices", - "AutomaticAssessmentConfiguration", - "BackgroundTasks", - "BusinessChat", - "CFNetwork", - "CallKit", - "CarPlay", - "ClassKit", - "ClockKit", - "CloudKit", - "Combine", - "Contacts", - "ContactsUI", - "CoreAudio", - "CoreAudioKit", - "CoreAudioTypes", - "CoreBluetooth", - "CoreData", - "CoreFoundation", - "CoreGraphics", - "CoreHaptics", - "CoreImage", - "CoreLocation", - "CoreLocationUI", - "CoreMIDI", - "CoreML", - "CoreMedia", - "CoreMotion", - "CoreNFC", - "CoreServices", - "CoreSpotlight", - "CoreTelephony", - "CoreText", - "CoreVideo", - "CryptoKit", - "CryptoTokenKit", - "DataDetection", - "DeveloperToolsSupport", - "DeviceActivity", - "DeviceCheck", - "EventKit", - "EventKitUI", - "ExposureNotification", - "ExternalAccessory", - "FamilyControls", - "FileProvider", - "FileProviderUI", - "Foundation", - "GLKit", - "GSS", - "GameController", - "GameKit", - "GameplayKit", - "GroupActivities", - "HealthKit", - "HealthKitUI", - "HomeKit", - "IOKit", - "IOSurface", - "IdentityLookup", - "IdentityLookupUI", - "ImageCaptureCore", - "ImageIO", - "Intents", - "IntentsUI", - "JavaScriptCore", - "LinkPresentation", - "LocalAuthentication", - "ManagedSettings", - "ManagedSettingsUI", - "MapKit", - "MediaAccessibility", - "MediaPlayer", - "MediaToolbox", - "MessageUI", - "Messages", - "Metal", - "MetalKit", - "MetalPerformanceShaders", - "MetalPerformanceShadersGraph", - "MetricKit", - "MobileCoreServices", - "ModelIO", - "MultipeerConnectivity", - "MusicKit", - "NaturalLanguage", - "NearbyInteraction", - "Network", - "NetworkExtension", - "NewsstandKit", - "NotificationCenter", - "OSLog", - "OpenAL", - "OpenGLES", - "PDFKit", - "PHASE", - "PassKit", - "PencilKit", - "Photos", - "PhotosUI", - "PushKit", - "QuartzCore", - "QuickLook", - "QuickLookThumbnailing", - "RealityFoundation", - "RealityKit", - "ReplayKit", - "SafariServices", - "SceneKit", - "ScreenTime", - "Security", - "SensorKit", - "ShazamKit", - "Social", - "SoundAnalysis", - "Speech", - "SpriteKit", - "StoreKit", - "SwiftUI", - "SystemConfiguration", - "TabularData", - "Twitter", - "UIKit", - "UniformTypeIdentifiers", - "UserNotifications", - "UserNotificationsUI", - "VideoSubscriberAccount", - "VideoToolbox", - "Vision", - "VisionKit", - "WatchConnectivity", - "WebKit", - "WidgetKit", - "AppleTextureEncoder", - "Compression", - "Darwin", - "asl", - "dnssd", - "os", - "os_object", - "os_workgroup", - "libkern", - "notify", - "zlib", - "SQLite3", -} - -APPLE_TEST_FRAMEWORKS = { - "XCTest", -} - - -# These modules require specific handling, as they do not have an umbrella -# header that matches the module name, as typical Apple frameworks do. -APPLE_SYSTEM_MODULE_OVERRIDES = { - "Dispatch": ("dispatch", ("dispatch.h",)), - "ObjectiveC": ("objc", ("runtime.h",)), -} - - -def write_imports_for_headers(out: TextIO, prefix: str, headers: Iterable[str]) -> None: - for header in headers: - print(f"#import <{prefix}/{header}>", file=out) - - -def write_imports_for_modules( - out: TextIO, - postprocessing_module_name: str, - modules: Iterable[str], - deps: Dict[str, Iterable[str]], -) -> None: - # We only include the traditional textual imports when modules are disabled, so - # that the behavior with modules enabled is identical to the behavior without - # the postprocessing. - print("#else", file=out) - for module in modules: - if headers := deps.get(module): - write_imports_for_headers(out, module, headers) - elif override := APPLE_SYSTEM_MODULE_OVERRIDES.get(module): - write_imports_for_headers(out, override[0], override[1]) - elif module in APPLE_SYSTEM_MODULES or module in APPLE_TEST_FRAMEWORKS: - # When we don't have an explicit override for the module, we use the module's - # name as an umbrella header. This is used for typical Apple frameworks like - # Foundation and UIKit. - write_imports_for_headers(out, module, (f"{module}.h",)) - else: - print( - f""" -The module "{module}" was imported as a dependency of Swift code in "{postprocessing_module_name}", but could not be mapped to a list of header imports by Buck's Swift header postprocessing. There are two possibilities: - -1. If "{module}" is an internal library, it is likely that the exported_deps of "{postprocessing_module_name}" are incorrect. Try fixing them manually or with "arc fixmydeps". This is the most likely issue. - -2. If "{module}" is a system (Apple) framework, the list of Apple system modules in {os.path.basename(__file__)} is out-of-date. There is a command to fix it in that file. This issue is unlikely. -""", - file=sys.stderr, - ) - sys.exit(1) - - -def main() -> None: - parser = argparse.ArgumentParser() - parser.add_argument("header") - parser.add_argument("deps") - parser.add_argument("out") - args = parser.parse_args() - - with open(args.deps) as f: - deps = json.load(f) - - # Strips the suffix from the header name, leaving us with just the name - # of the module that we are postprocessing the header for. This is used - # for error reporting. - postprocessing_module_name = os.path.basename(args.header).split("-")[0] - - # The Swift compiler's output looks like this for Swift5.8: - # - # #if __has_feature(objc_modules) - # #if __has_warning("-Watimport-in-framework-header") - # #pragma clang diagnostic ignored "-Watimport-in-framework-header" - # #endif - # @import ModuleA; - # @import ModuleB; - # @import ModuleC; - # #endif - # - # The implementation here balances being somewhat flexible to changes to the compiler's - # output, unlikely though they may be, with avoiding adding too much complexity and getting - # too close to implementing a full parser for Objective-C un-preprocessed header files. - - with open(args.header) as header, open(args.out, "w") as out: - # When this is None, it means that we are still searching for the start of the conditional - # @import block in the generated header. - modules = None - # The Swift compiler emits an additional #if gate inside the conditional @import block, so - # we need to track whether we're in a further nested conditional so that we know when the - # main conditional block has ended. - if_level = 0 - - for line in header: - line = line.rstrip("\n") - # When the modules has not been set, we are still searching for the start of the - # modules @import section. - if modules is None: - # The line changed from __has_feature(modules) to __has_feature(objc_modules) between Swift5.7 and Swift5.8. - # For the time being, we need to check for either to support both Xcode14.2 and Xcode14.3 onwards. - if ( - line == "#if __has_feature(objc_modules)" - or line == "#if __has_feature(modules)" - ): - modules = [] - if_level = 1 - else: - if line.startswith("@import"): - # Splitting on: - # "@import ": to separate from the @import. - # Semicolon and period: to separate the main module name from submodules or EOL. - # The module name will then be the first item. - modules.append(re.split(r"@import |[;.]", line)[1]) - elif line.startswith("#if"): - # This allows us to handle the Clang diagnostic #if block that the compiler inserts - # within the main #if block for modules. - if_level += 1 - elif line.startswith("#endif"): - if_level -= 1 - if if_level == 0: - write_imports_for_modules( - out, - postprocessing_module_name, - modules, - deps, - ) - modules = None - print(line, file=out) - - -if __name__ == "__main__": - main() diff --git a/prelude/apple/tools/xcframework_maker.py b/prelude/apple/tools/xcframework_maker.py new file mode 100644 index 0000000000000..17806db0e5e07 --- /dev/null +++ b/prelude/apple/tools/xcframework_maker.py @@ -0,0 +1,163 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +""" +Packages given input files into the correct format for an XCFramework, and generates +the required Info.plist. + +Example Usage: +xcframework_maker.py --name FooKit --output_path /tmp/FooKit.xcframework \ + --framework_path ios-arm64 input/ios/FooKit.xcframework \ + --dsym_path ios-arm64 input/ios/dSYM \ + --framework_path ios-arm64_x86_64-simulator input/ios-simulator/FooKit.xcframework \ + --dsym_path ios-arm64_x86_64-simulator input/ios-simulator/dSYM +""" + +import argparse +import plistlib +import shutil + +from pathlib import Path +from typing import Any, Optional + +# functions that take architecture specifiers as 'item'. +# Examples: +# ios-arm64_x86_64-simulator +# -> supported platform: ios +# -> supported architectures [arm64, x86_64] +# -> supported platform variant: simulator +# watchos-arm64_arm64_32 +# -> supported platform: watchos +# -> supported architectures: [arm64, arm64_32] +# -> supported platform variant: None + + +def _supported_architectures(item: str) -> list[str]: + archs = [] + # order is important so that we can + # consume 'arm64_32' first to prevent it + # later matching arm64 + for arch in ["arm64_32", "arm64", "x86_64"]: + if arch in item: + archs.append(arch) + item = item.replace(arch, "") + return archs + + +def _supported_platform(item: str) -> str: + return item.split("-")[0] + + +def _supported_platform_variant(item: str) -> Optional[str]: + components = item.split("-") + if len(components) > 2: + return components[2] + else: + return None + + +def _make_plist_entry( + item: str, binary_path: str, library_path: str, dsym_path: Optional[str] +) -> dict[str, Any]: + entry = { + "BinaryPath": binary_path, + "LibraryIdentifier": item, + "LibraryPath": library_path, + "SupportedArchitectures": _supported_architectures(item), + "SupportedPlatform": _supported_platform(item), + } + variant = _supported_platform_variant(item) + if variant is not None: + entry["SupportedPlatformVariant"] = variant + + if dsym_path is not None: + entry["DebugSymbolsPath"] = dsym_path + + return entry + + +def _make_plist( + items: list[str], + binary_paths: list[str], + library_path: str, + dsym_paths: list[Optional[str]], +) -> bytes: + d = {} + d["AvailableLibraries"] = [ + _make_plist_entry(item, binary_path, library_path, dsym_path) + for (item, binary_path, dsym_path) in zip(items, binary_paths, dsym_paths) + ] + d["CFBundlePackageType"] = "XFWK" + d["XCFrameworkFormatVersion"] = "1.0" + return plistlib.dumps(d) + + +def _find_binary_path(framework_fullpath: str, binary_name: str) -> str: + fullpath = Path(framework_fullpath) + versioned_binary_paths = sorted(fullpath.glob("Versions/Current/" + binary_name)) + if len(versioned_binary_paths) > 0: + return versioned_binary_paths[-1].relative_to(fullpath.parents[0]).as_posix() + return fullpath.name + "/" + binary_name + + +def main() -> None: + parser = argparse.ArgumentParser(description="Tool to make an xcframework bundle.") + parser.add_argument("--output-path") + parser.add_argument("--name") + parser.add_argument("--framework-path", action="append", nargs="+") + parser.add_argument( + "--dsym-path", action="append", nargs="+", default=[], required=False + ) + args = parser.parse_args() + + out_path = Path(args.output_path) + out_path.mkdir(parents=True, exist_ok=False) + + plist_path = out_path / "Info.plist" + items = [fp_args[0] for fp_args in args.framework_path] + binary_paths = [] + dsym_path_map = {} + + for framework_path in args.framework_path: + + # args are structured like this + # --framework_path ios-arm64 buck-out/path/to/MyPkg.framework + + framework_arch = framework_path[0] + framework_fullpath = framework_path[1] + framework_basename = Path(framework_fullpath).name + + shutil.copytree( + framework_fullpath, + out_path / framework_arch / framework_basename, + symlinks=True, + dirs_exist_ok=False, + ) + + binary_paths.append(_find_binary_path(framework_fullpath, args.name)) + + for dsym_path in args.dsym_path: + dsym_arch = dsym_path[0] + dsym_fullpath = dsym_path[1] + shutil.copytree( + dsym_fullpath, + out_path / dsym_arch / "dSYMs" / (args.name + ".framework.dSYM"), + symlinks=True, + dirs_exist_ok=False, + ) + dsym_path_map[dsym_arch] = "dSYMs" + + dsym_paths = [dsym_path_map.get(arch) for arch in items] + + library_path = args.name + ".framework" + plist_path.write_bytes(_make_plist(items, binary_paths, library_path, dsym_paths)) + + +if __name__ == "__main__": + main() diff --git a/prelude/apple/user/apple_ipa_package.bzl b/prelude/apple/user/apple_ipa_package.bzl new file mode 100644 index 0000000000000..e6995536b7ccb --- /dev/null +++ b/prelude/apple/user/apple_ipa_package.bzl @@ -0,0 +1,158 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//apple:apple_bundle_destination.bzl", "AppleBundleDestination", "bundle_relative_path_for_destination") +load("@prelude//apple:apple_bundle_types.bzl", "AppleBundleInfo", "ApplePackageExtension") +load("@prelude//apple:apple_package_config.bzl", "IpaCompressionLevel") +load("@prelude//apple:apple_rules_impl_utility.bzl", "get_apple_bundle_toolchain_attr") +load("@prelude//apple:apple_sdk.bzl", "get_apple_sdk_name") +load("@prelude//apple:apple_swift_stdlib.bzl", "should_copy_swift_stdlib") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") +load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") +load("@prelude//utils:arglike.bzl", "ArgLike") + +def _apple_ipa_package_impl(ctx: AnalysisContext) -> list[Provider]: + ipa_package = _get_ipa_contents(ctx) + return [DefaultInfo(default_output = ipa_package)] + +def _apple_ipa_package_attribs(): + attribs = { + "bundle": attrs.dep(providers = [AppleBundleInfo]), + "ext": attrs.enum(ApplePackageExtension.values(), default = "ipa"), + "labels": attrs.list(attrs.string(), default = []), + "package_name": attrs.option(attrs.string(), default = None), + "_apple_toolchain": get_apple_bundle_toolchain_attr(), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), + "_ipa_compression_level": attrs.enum(IpaCompressionLevel.values()), + } + return attribs + +registration_spec = RuleRegistrationSpec( + name = "apple_ipa_package", + impl = _apple_ipa_package_impl, + attrs = _apple_ipa_package_attribs(), +) + +_IPA_PACKAGE_FORWARDED_FIELDS = [ + "bundle", + "ext", + "package_name", + "_ipa_compression_level", + "compatible_with", + "exec_compatible_with", + "target_compatible_with", + "default_target_platform", + "within_view", + "visibility", +] + +def make_apple_ipa_package_target(apple_ipa_package_rule, **kwargs) -> [None, str]: + ipa_package_kwargs = { + "labels": ["generated"], + } + for field_name in _IPA_PACKAGE_FORWARDED_FIELDS: + ipa_package_kwargs[field_name] = kwargs.get(field_name) + + ipa_package_target_name = kwargs["name"] + "__IPA_Package_Private" + apple_ipa_package_rule( + name = ipa_package_target_name, + **ipa_package_kwargs + ) + + return ":{}".format(ipa_package_target_name) + +def _get_ipa_contents(ctx: AnalysisContext) -> Artifact: + bundle = ctx.attrs.bundle + app = bundle[DefaultInfo].default_outputs[0] + + contents = { + paths.join("Payload", app.basename): app, + } + + apple_bundle_info = bundle[AppleBundleInfo] + if (not apple_bundle_info.skip_copying_swift_stdlib) and should_copy_swift_stdlib(app.extension): + swift_support_path = paths.join("SwiftSupport", get_apple_sdk_name(ctx)) + contents[swift_support_path] = _get_swift_support_dir(ctx, app, apple_bundle_info) + + if apple_bundle_info.contains_watchapp: + contents["Symbols"] = _build_symbols_dir(ctx) + + return ctx.actions.copied_dir( + "__unzipped_ipa_contents__", + contents, + ) + +def _build_symbols_dir(ctx) -> Artifact: + symbols_dir = ctx.actions.declare_output("__symbols__", dir = True) + ctx.actions.run( + cmd_args(["mkdir", "-p", symbols_dir.as_output()]), + category = "watchos_symbols_dir", + ) + + return symbols_dir + +def _get_swift_support_dir(ctx, bundle_output: Artifact, bundle_info: AppleBundleInfo) -> Artifact: + stdlib_tool = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info.swift_stdlib_tool + sdk_name = get_apple_sdk_name(ctx) + + # .app -> app + # This is the way the input is expected. + extension = bundle_output.extension[1:] + swift_support_dir = ctx.actions.declare_output("__swift_dylibs__", dir = True) + script, _ = ctx.actions.write( + "build_swift_support.sh", + [ + cmd_args("set -euo pipefail"), + cmd_args(swift_support_dir, format = "mkdir -p {}"), + cmd_args( + [ + stdlib_tool, + # If you're debugging, you can pass the '--verbose' flag here. + "--copy", + "--scan-executable", + cmd_args( + [ + bundle_output, + bundle_relative_path_for_destination(AppleBundleDestination("executables"), sdk_name, extension, False), + bundle_info.binary_name, + ], + delimiter = "/", + ), + _get_scan_folder_args(AppleBundleDestination("plugins"), bundle_output, sdk_name, extension), + _get_scan_folder_args(AppleBundleDestination("extensionkit_extensions"), bundle_output, sdk_name, extension), + _get_scan_folder_args(AppleBundleDestination("frameworks"), bundle_output, sdk_name, extension), + _get_scan_folder_args(AppleBundleDestination("appclips"), bundle_output, sdk_name, extension), + "--destination", + swift_support_dir, + ], + delimiter = " ", + quote = "shell", + ), + ], + allow_args = True, + ) + ctx.actions.run( + cmd_args(["/bin/sh", script], hidden = [stdlib_tool, bundle_output, swift_support_dir.as_output()]), + category = "copy_swift_stdlibs", + ) + + return swift_support_dir + +def _get_scan_folder_args(dest: AppleBundleDestination, bundle_output: Artifact, sdk_name, extension) -> ArgLike: + return cmd_args( + [ + "--scan-folder", + cmd_args( + [ + bundle_output, + bundle_relative_path_for_destination(dest, sdk_name, extension, False), + ], + delimiter = "/", + ), + ], + ) diff --git a/prelude/apple/user/apple_macos_bundle.bzl b/prelude/apple/user/apple_macos_bundle.bzl new file mode 100644 index 0000000000000..17043975bfa23 --- /dev/null +++ b/prelude/apple/user/apple_macos_bundle.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_bundle.bzl", "apple_bundle_impl") +load("@prelude//apple:apple_bundle_attrs.bzl", "apple_macos_bundle_attrs") +load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") +load(":macos_transition.bzl", "macos_transition") + +def apple_macos_bundle_impl(ctx: AnalysisContext) -> list[Provider]: + # This rule is _equivalent_ to `apple_bundle` except it applies + # an incoming macOS transition. + return apple_bundle_impl(ctx) + +registration_spec = RuleRegistrationSpec( + name = "apple_macos_bundle", + impl = apple_macos_bundle_impl, + attrs = apple_macos_bundle_attrs(), + cfg = macos_transition, +) diff --git a/prelude/apple/user/apple_resource_bundle.bzl b/prelude/apple/user/apple_resource_bundle.bzl index e291b17a8146d..c4021257a098c 100644 --- a/prelude/apple/user/apple_resource_bundle.bzl +++ b/prelude/apple/user/apple_resource_bundle.bzl @@ -5,19 +5,20 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//apple:apple_bundle_attrs.bzl", "get_apple_info_plist_build_system_identification_attrs") load("@prelude//apple:apple_bundle_resources.bzl", "get_apple_bundle_resource_part_list") load("@prelude//apple:apple_bundle_types.bzl", "AppleBundleResourceInfo") +load("@prelude//apple:apple_rules_impl_utility.bzl", "get_apple_info_plist_build_system_identification_attrs") load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo", "AppleToolsInfo") +load("@prelude//apple:resource_groups.bzl", "RESOURCE_GROUP_MAP_ATTR") +load("@prelude//apple/user:cpu_split_transition.bzl", "cpu_split_transition") load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -load("@prelude//decls/ios_rules.bzl", "AppleBundleExtension") -load(":resource_group_map.bzl", "resource_group_map_attr") +load("@prelude//decls/apple_rules.bzl", "AppleBundleExtension") def _get_apple_resources_toolchain_attr(): # FIXME: prelude// should be standalone (not refer to fbcode//) return attrs.toolchain_dep(default = "fbcode//buck2/platform/toolchain:apple-resources", providers = [AppleToolchainInfo]) -def _impl(ctx: AnalysisContext) -> list[Provider]: +def _apple_resource_bundle_impl(ctx: AnalysisContext) -> list[Provider]: resource_output = get_apple_bundle_resource_part_list(ctx) return [ DefaultInfo(), @@ -29,7 +30,8 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: def _apple_resource_bundle_attrs(): attribs = { "asset_catalogs_compilation_options": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}), - "binary": attrs.option(attrs.dep(), default = None), + "binary": attrs.option(attrs.split_transition_dep(cfg = cpu_split_transition), default = None), + "copy_public_framework_headers": attrs.option(attrs.bool(), default = None), "deps": attrs.list(attrs.dep(), default = []), "extension": attrs.one_of(attrs.enum(AppleBundleExtension), attrs.string()), "ibtool_flags": attrs.option(attrs.list(attrs.string()), default = None), @@ -37,13 +39,15 @@ def _apple_resource_bundle_attrs(): "info_plist": attrs.source(), "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), "labels": attrs.list(attrs.string(), default = []), + "module_map": attrs.option(attrs.source(), default = None), + "privacy_manifest": attrs.option(attrs.source(), default = None), "product_name": attrs.option(attrs.string(), default = None), "resource_group": attrs.option(attrs.string(), default = None), - "resource_group_map": resource_group_map_attr(), + "resource_group_map": RESOURCE_GROUP_MAP_ATTR, + "universal": attrs.option(attrs.bool(), default = None), # Only include macOS hosted toolchains, so we compile resources directly on Mac RE "_apple_toolchain": _get_apple_resources_toolchain_attr(), - # FIXME: prelude// should be standalone (not refer to fbsource//) - "_apple_tools": attrs.exec_dep(default = "fbsource//xplat/buck2/platform/apple:apple-tools", providers = [AppleToolsInfo]), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), # Because `apple_resource_bundle` is a proxy for `apple_bundle`, we need to get `name` # field of the `apple_bundle`, as it's used as a fallback value in Info.plist. "_bundle_target_name": attrs.string(), @@ -54,6 +58,6 @@ def _apple_resource_bundle_attrs(): registration_spec = RuleRegistrationSpec( name = "apple_resource_bundle", - impl = _impl, + impl = _apple_resource_bundle_impl, attrs = _apple_resource_bundle_attrs(), ) diff --git a/prelude/apple/user/apple_resource_transition.bzl b/prelude/apple/user/apple_resource_transition.bzl new file mode 100644 index 0000000000000..a3817a5bb1e7f --- /dev/null +++ b/prelude/apple/user/apple_resource_transition.bzl @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _is_universal(platform: PlatformInfo, refs: struct) -> bool: + universal = platform.configuration.constraints.get(refs.universal[ConstraintSettingInfo].label) + return universal.label == refs.universal_enabled[ConstraintValueInfo].label if universal != None else False + +def _apple_resource_transition_impl(platform: PlatformInfo, refs: struct, attrs: struct) -> PlatformInfo: + if attrs.skip_universal_resource_dedupe or not _is_universal(platform, refs): + return platform + else: + cpu_constraint_label = refs.cpu[ConstraintSettingInfo].label + filtered_constraints = { + constraint_setting_label: constraint_setting_value + for (constraint_setting_label, constraint_setting_value) in platform.configuration.constraints.items() + if constraint_setting_label != cpu_constraint_label + } + return PlatformInfo( + label = "apple_universal_deduped_resource", + configuration = ConfigurationInfo( + constraints = filtered_constraints, + values = platform.configuration.values, + ), + ) + +apple_resource_transition = transition( + impl = _apple_resource_transition_impl, + refs = { + "cpu": "config//cpu/constraints:cpu", + "universal": "config//cpu/constraints:universal", + "universal_enabled": "config//cpu/constraints:universal-enabled", + }, + attrs = [ + "skip_universal_resource_dedupe", + ], +) diff --git a/prelude/apple/user/apple_selective_debugging.bzl b/prelude/apple/user/apple_selective_debugging.bzl index 7397d37594bce..ae35156ed6537 100644 --- a/prelude/apple/user/apple_selective_debugging.bzl +++ b/prelude/apple/user/apple_selective_debugging.bzl @@ -5,8 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - +load( + "@prelude//:artifact_tset.bzl", + "ArtifactInfo", + "ArtifactInfoTag", +) load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolsInfo") load( "@prelude//linking:execution_preference.bzl", @@ -21,16 +24,13 @@ load( "BuildTargetPattern", # @unused Used as a type "parse_build_target_pattern", ) -load( - "@prelude//utils:utils.bzl", - "is_any", -) +load("@prelude//utils:lazy.bzl", "lazy") _SelectionCriteria = record( include_build_target_patterns = field(list[BuildTargetPattern], []), - include_regular_expressions = field(list["regex"], []), + include_regular_expressions = field(list[regex], []), exclude_build_target_patterns = field(list[BuildTargetPattern], []), - exclude_regular_expressions = field(list["regex"], []), + exclude_regular_expressions = field(list[regex], []), ) AppleSelectiveDebuggingInfo = provider( @@ -38,11 +38,14 @@ AppleSelectiveDebuggingInfo = provider( fields = { "scrub_binary": provider_field(typing.Callable), "filter": provider_field(typing.Callable), + "scrub_selected_debug_paths_file": provider_field(typing.Callable), }, ) AppleSelectiveDebuggingFilteredDebugInfo = record( - map = field(dict[Label, list[Artifact]]), + infos = field(list[ArtifactInfo]), + swift_modules_labels = field(list[Label]), + metadata = field(Artifact), ) # The type of selective debugging json input to utilze. @@ -58,21 +61,41 @@ _SelectiveDebuggingJsonType = enum(*_SelectiveDebuggingJsonTypes) _LOCAL_LINK_THRESHOLD = 0.2 -def _impl(ctx: AnalysisContext) -> list[Provider]: +_OBJECT_FILE_EXTENSIONS = [ + ".o", + ".a", +] + +def _generate_metadata_json_object(is_any_selected_target_linked: bool) -> dict[str, typing.Any]: + return { + "contains_focused_targets": is_any_selected_target_linked, + } + +def _apple_selective_debugging_impl(ctx: AnalysisContext) -> list[Provider]: json_type = _SelectiveDebuggingJsonType(ctx.attrs.json_type) # process inputs and provide them up the graph with typing include_build_target_patterns = [parse_build_target_pattern(pattern) for pattern in ctx.attrs.include_build_target_patterns] - include_regular_expressions = [experimental_regex(expression) for expression in ctx.attrs.include_regular_expressions] + include_regular_expressions = [ + # TODO(nga): fancy is probably not needed here. + regex(expression, fancy = True) + for expression in ctx.attrs.include_regular_expressions + ] exclude_build_target_patterns = [parse_build_target_pattern(pattern) for pattern in ctx.attrs.exclude_build_target_patterns] - exclude_regular_expressions = [experimental_regex(expression) for expression in ctx.attrs.exclude_regular_expressions] + exclude_regular_expressions = [ + # TODO(nga): fancy is probably not needed here. + regex(expression, fancy = True) + for expression in ctx.attrs.exclude_regular_expressions + ] scrubber = ctx.attrs._apple_tools[AppleToolsInfo].selective_debugging_scrubber + targets_json_file = None cmd = cmd_args(scrubber) if json_type == _SelectiveDebuggingJsonType("targets"): + targets_json_file = ctx.attrs.targets_json_file or ctx.actions.write_json("targets.json", {"targets": []}) + # If a targets json file is not provided, write an empty json file: - targets_json_file = ctx.attrs.targets_json_file or ctx.actions.write_json("targets_json.txt", {"targets": []}) cmd.add("--targets-file") cmd.add(targets_json_file) elif json_type == _SelectiveDebuggingJsonType("spec"): @@ -95,7 +118,41 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: exclude_regular_expressions = exclude_regular_expressions, ) - def scrub_binary(inner_ctx, executable: Artifact, executable_link_execution_preference: LinkExecutionPreference, adhoc_codesign_tool: [RunInfo, None]) -> Artifact: + def scrub_selected_debug_paths_file(inner_ctx: AnalysisContext, package_names: list[str], output_name: str) -> Artifact: + # In the event that _SelectiveDebuggingJsonType was "spec", we expect that `package_names` + # was already filtered as part of scrubbing the binary in the apple_bundle. + # + # See `_maybe_scrub_binary()` in apple_bundle.bzl + if json_type != _SelectiveDebuggingJsonType("targets"): + return inner_ctx.actions.write(output_name, sorted(set(package_names))) + + def scrub_selected_debug_paths_action(dynamic_ctx: AnalysisContext, artifacts, outputs): + packages = [ + # "cell//path/to/some/thing:target" -> "path/to/some/thing" + target.split("//")[1].split(":")[0] + for target in artifacts[targets_json_file].read_json()["targets"] + ] + dynamic_ctx.actions.write( + outputs.values()[0], + sorted(set(filter(lambda p: p in packages, package_names))), + ) + + output = inner_ctx.actions.declare_output(output_name) + inner_ctx.actions.dynamic_output( + dynamic = [targets_json_file], + inputs = [], + outputs = [output.as_output()], + f = scrub_selected_debug_paths_action, + ) + + return output + + def scrub_binary( + inner_ctx, + executable: Artifact, + executable_link_execution_preference: LinkExecutionPreference, + adhoc_codesign_tool: [RunInfo, None], + focused_targets_labels: list[Label]) -> Artifact: inner_cmd = cmd_args(cmd) output = inner_ctx.actions.declare_output("debug_scrubbed/{}".format(executable.short_path)) @@ -107,6 +164,12 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: inner_cmd.add(["--adhoc-codesign-tool", adhoc_codesign_tool]) inner_cmd.add(["--input", executable]) inner_cmd.add(["--output", output.as_output()]) + if len(focused_targets_labels) > 0: + additional_labels_json = inner_ctx.actions.write_json( + inner_ctx.attrs.name + ".additional_labels.json", + {"targets": [label.raw_target() for label in focused_targets_labels]}, + ) + inner_cmd.add(["--persisted-targets-file", additional_labels_json]) inner_ctx.actions.run( inner_cmd, category = "scrub_binary", @@ -118,19 +181,90 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: ) return output - def filter_debug_info(debug_info: TransitiveSetIterator) -> AppleSelectiveDebuggingFilteredDebugInfo: - map = {} + def filter_debug_info(inner_ctx: AnalysisContext, debug_info: TransitiveSetIterator) -> AppleSelectiveDebuggingFilteredDebugInfo: + artifact_infos = [] + linked_targets = set() + is_any_selected_target_linked = False + is_using_spec = (json_type == _SelectiveDebuggingJsonType("spec")) + selected_targets_contain_swift = False for infos in debug_info: for info in infos: - if _is_label_included(info.label, selection_criteria): - map[info.label] = info.artifacts + is_swiftmodule = ArtifactInfoTag("swiftmodule") in info.tags + is_swift_pcm = ArtifactInfoTag("swift_pcm") in info.tags + is_swift_related = is_swiftmodule or is_swift_pcm + + is_label_included = _is_label_included(info.label, selection_criteria) + + is_any_selected_target_linked_when_using_spec = is_using_spec and is_any_selected_target_linked + if not is_any_selected_target_linked_when_using_spec: + # When using spec mode and there's already a selected target, there's no need + # to continue the search whether a selected target is linked - we know at least + # one is already linked. So, the if statement acts as a short-circuit for perf + # reasons to avoid unnecessary work. + # + # In targets mode (i.e., non-spec mode), the full list of `linked_targets` must + # be computed, as that value is used behind a dynamic output, so it's not possible + # terminate the search early (as we cannot determine whether a selected target is linked + # as the list of selected targets is stored in the targets JSON file, which is not available + # at analysis time, only avail behind a dynamic output). + debug_artifact_contains_object_code = lazy.is_any(lambda debug_artifact: debug_artifact.extension in _OBJECT_FILE_EXTENSIONS, info.artifacts) + if debug_artifact_contains_object_code: + if is_using_spec and is_label_included: + is_any_selected_target_linked = True + if not is_using_spec: + # `linked_targets` only used in targets mode, avoid costs in all other modes + linked_targets.add(info.label) - return AppleSelectiveDebuggingFilteredDebugInfo(map = map) + if is_label_included or (selected_targets_contain_swift and is_swift_related): + # There might be a few ArtifactInfo corresponding to the same Label, + # so to avoid overwriting, we need to preserve all artifacts. + artifact_infos.append(info) + selected_targets_contain_swift = selected_targets_contain_swift or ArtifactInfoTag("swiftmodule") in info.tags + + if json_type == _SelectiveDebuggingJsonType("spec"): + metadata_output = inner_ctx.actions.write_json( + "selective_metadata_with_spec.json", + _generate_metadata_json_object(is_any_selected_target_linked), + pretty = True, + ) + elif json_type == _SelectiveDebuggingJsonType("targets"): + def generate_metadata_output(dynamic_ctx: AnalysisContext, artifacts, outputs): + targets = artifacts[targets_json_file].read_json()["targets"] + is_any_selected_target_linked_inner = False + for target in targets: + cell, package_with_target_name = target.split("//") + package, target_name = package_with_target_name.split(":") + + is_any_selected_target_linked_inner = lazy.is_any(lambda linked_target: linked_target.cell == cell and linked_target.package == package and linked_target.name == target_name, linked_targets) + if is_any_selected_target_linked_inner: + break + + dynamic_ctx.actions.write_json( + outputs.values()[0], + _generate_metadata_json_object(is_any_selected_target_linked_inner), + pretty = True, + ) + + metadata_output = inner_ctx.actions.declare_output("selective_metadata_with_targets_file.json") + inner_ctx.actions.dynamic_output( + dynamic = [targets_json_file], + inputs = [], + outputs = [metadata_output.as_output()], + f = generate_metadata_output, + ) + else: + fail("Unexpected type: {}".format(json_type)) + + return AppleSelectiveDebuggingFilteredDebugInfo( + infos = artifact_infos, + swift_modules_labels = [], + metadata = metadata_output, + ) def preference_for_links(links: list[Label], deps_preferences: list[LinkExecutionPreferenceInfo]) -> LinkExecutionPreference: # If any dependent links were run locally, prefer that the current link is also performed locally, # to avoid needing to upload the previous link. - dep_prefered_local = is_any(lambda info: info.preference == LinkExecutionPreference("local"), deps_preferences) + dep_prefered_local = lazy.is_any(lambda info: info.preference == LinkExecutionPreference("local"), deps_preferences) if dep_prefered_local: return LinkExecutionPreference("local") @@ -152,13 +286,14 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: AppleSelectiveDebuggingInfo( scrub_binary = scrub_binary, filter = filter_debug_info, + scrub_selected_debug_paths_file = scrub_selected_debug_paths_file, ), LinkExecutionPreferenceDeterminatorInfo(preference_for_links = preference_for_links), ] registration_spec = RuleRegistrationSpec( name = "apple_selective_debugging", - impl = _impl, + impl = _apple_selective_debugging_impl, attrs = { "exclude_build_target_patterns": attrs.list(attrs.string(), default = []), "exclude_regular_expressions": attrs.list(attrs.string(), default = []), @@ -166,7 +301,7 @@ registration_spec = RuleRegistrationSpec( "include_regular_expressions": attrs.list(attrs.string(), default = []), "json_type": attrs.enum(_SelectiveDebuggingJsonTypes), "targets_json_file": attrs.option(attrs.source(), default = None), - "_apple_tools": attrs.exec_dep(default = "fbsource//xplat/buck2/platform/apple:apple-tools", providers = [AppleToolsInfo]), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), }, ) @@ -179,7 +314,7 @@ def _is_label_included(label: Label, selection_criteria: _SelectionCriteria) -> # If included (above snippet), ensure that this target is not excluded. return not _check_if_label_matches_patterns_or_expressions(label, selection_criteria.exclude_build_target_patterns, selection_criteria.exclude_regular_expressions) -def _check_if_label_matches_patterns_or_expressions(label: Label, patterns: list[BuildTargetPattern], expressions: list["regex"]) -> bool: +def _check_if_label_matches_patterns_or_expressions(label: Label, patterns: list[BuildTargetPattern], expressions: list[regex]) -> bool: for pattern in patterns: if pattern.matches(label): return True diff --git a/prelude/apple/user/apple_simulators.bzl b/prelude/apple/user/apple_simulators.bzl index 24aaee1cf3b29..9f4208478c2cc 100644 --- a/prelude/apple/user/apple_simulators.bzl +++ b/prelude/apple/user/apple_simulators.bzl @@ -7,13 +7,14 @@ load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -def _impl(ctx: AnalysisContext) -> list[Provider]: +def _rule_impl(ctx: AnalysisContext) -> list[Provider]: return [ DefaultInfo(), LocalResourceInfo( - setup = cmd_args([ctx.attrs.broker[RunInfo]] + ctx.attrs.args), + setup = cmd_args([ctx.attrs.broker[RunInfo], "--simulator-manager", ctx.attrs.idb_targets[RunInfo]] + ctx.attrs.args), resource_env_vars = { - "IDB_COMPANION": "socket_address", + "DEVICE_SET_PATH": "device_set_path", + "DEVICE_UDID": "udid", }, setup_timeout_seconds = ctx.attrs.setup_timeout_seconds, ), @@ -21,10 +22,11 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: registration_spec = RuleRegistrationSpec( name = "apple_simulators", - impl = _impl, + impl = _rule_impl, attrs = { "args": attrs.list(attrs.string(), default = []), "broker": attrs.exec_dep(providers = [RunInfo]), + "idb_targets": attrs.exec_dep(providers = [RunInfo]), "setup_timeout_seconds": attrs.option(attrs.int(), default = None), }, ) diff --git a/prelude/apple/user/apple_toolchain_override.bzl b/prelude/apple/user/apple_toolchain_override.bzl index 0a3886e49f3f5..f45ec583bff25 100644 --- a/prelude/apple/user/apple_toolchain_override.bzl +++ b/prelude/apple/user/apple_toolchain_override.bzl @@ -9,13 +9,14 @@ load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -def _impl(ctx: AnalysisContext) -> list[Provider]: +def _apple_toolchain_override_impl(ctx: AnalysisContext) -> list[Provider]: base = ctx.attrs.base[AppleToolchainInfo] cxx_toolchain_override = ctx.attrs.cxx_toolchain[CxxToolchainInfo] return [ DefaultInfo(), AppleToolchainInfo( actool = base.actool, + architecture = base.architecture, codesign = base.codesign, codesign_allocate = base.codesign_allocate, copy_scene_kit_assets = base.copy_scene_kit_assets, @@ -29,16 +30,16 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: installer = base.installer, libtool = base.libtool, lipo = base.lipo, - min_version = base.min_version, + mapc = base.mapc, + merge_index_store = base.merge_index_store, momc = base.momc, - odrcov = base.odrcov, + objdump = base.objdump, platform_path = base.platform_path, sdk_build_version = base.sdk_build_version, sdk_name = base.sdk_name, sdk_path = base.sdk_path, sdk_version = base.sdk_version, swift_toolchain_info = base.swift_toolchain_info, - watch_kit_stub_binary = base.watch_kit_stub_binary, xcode_build_version = base.xcode_build_version, xcode_version = base.xcode_version, xctest = base.xctest, @@ -47,7 +48,7 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: registration_spec = RuleRegistrationSpec( name = "apple_toolchain_override", - impl = _impl, + impl = _apple_toolchain_override_impl, attrs = { "base": attrs.toolchain_dep(providers = [AppleToolchainInfo]), "cxx_toolchain": attrs.toolchain_dep(providers = [CxxToolchainInfo]), diff --git a/prelude/apple/user/apple_tools.bzl b/prelude/apple/user/apple_tools.bzl index c9cf13a9c25c5..1c6155ffc6e6b 100644 --- a/prelude/apple/user/apple_tools.bzl +++ b/prelude/apple/user/apple_tools.bzl @@ -8,7 +8,7 @@ load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolsInfo") load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -def _impl(ctx: AnalysisContext) -> list[Provider]: +def _apple_tools_impl(ctx: AnalysisContext) -> list[Provider]: return [ DefaultInfo(), AppleToolsInfo( @@ -21,7 +21,8 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: make_modulemap = ctx.attrs.make_modulemap[RunInfo], make_vfsoverlay = ctx.attrs.make_vfsoverlay[RunInfo], selective_debugging_scrubber = ctx.attrs.selective_debugging_scrubber[RunInfo], - swift_objc_header_postprocess = ctx.attrs.swift_objc_header_postprocess[RunInfo], + xcframework_maker = ctx.attrs.xcframework_maker[RunInfo], + framework_sanitizer = ctx.attrs.framework_sanitizer[RunInfo], ), ] @@ -30,17 +31,18 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: # toolchain/SDK specific, they're just internal helper tools. registration_spec = RuleRegistrationSpec( name = "apple_tools", - impl = _impl, + impl = _apple_tools_impl, attrs = { - "adhoc_codesign_tool": attrs.dep(providers = [RunInfo]), + "adhoc_codesign_tool": attrs.option(attrs.dep(providers = [RunInfo]), default = None), "assemble_bundle": attrs.dep(providers = [RunInfo]), "dry_codesign_tool": attrs.dep(providers = [RunInfo]), + "framework_sanitizer": attrs.dep(providers = [RunInfo]), "info_plist_processor": attrs.dep(providers = [RunInfo]), "ipa_package_maker": attrs.dep(providers = [RunInfo]), "make_modulemap": attrs.dep(providers = [RunInfo]), "make_vfsoverlay": attrs.dep(providers = [RunInfo]), "selective_debugging_scrubber": attrs.dep(providers = [RunInfo]), "split_arch_combine_dsym_bundles_tool": attrs.dep(providers = [RunInfo]), - "swift_objc_header_postprocess": attrs.dep(providers = [RunInfo]), + "xcframework_maker": attrs.dep(providers = [RunInfo]), }, ) diff --git a/prelude/apple/user/apple_watchos_bundle.bzl b/prelude/apple/user/apple_watchos_bundle.bzl index 261f68b633b79..4a03df0d437bd 100644 --- a/prelude/apple/user/apple_watchos_bundle.bzl +++ b/prelude/apple/user/apple_watchos_bundle.bzl @@ -6,46 +6,10 @@ # of this source tree. load("@prelude//apple:apple_bundle.bzl", "apple_bundle_impl") -load("@prelude//apple:apple_rules_impl_utility.bzl", "apple_bundle_extra_attrs") +load("@prelude//apple:apple_bundle_attrs.bzl", "apple_watchos_bundle_attrs") load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -load("@prelude//decls/common.bzl", "Traversal") -load("@prelude//decls/ios_rules.bzl", "AppleBundleExtension") load(":watch_transition.bzl", "watch_transition") -def _apple_bundle_base_attrs(): - return { - # Attributes comes from `attributes.bzl` but since it's autogenerated, we cannot easily abstract - "asset_catalogs_compilation_options": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}), - "binary": attrs.option(attrs.dep(), default = None), - "codesign_flags": attrs.list(attrs.string(), default = []), - "codesign_identity": attrs.option(attrs.string(), default = None), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "default_platform": attrs.option(attrs.string(), default = None), - "deps": attrs.list(attrs.dep(), default = []), - "extension": attrs.one_of(attrs.enum(AppleBundleExtension), attrs.string()), - "ibtool_flags": attrs.option(attrs.list(attrs.string()), default = None), - "ibtool_module_flag": attrs.option(attrs.bool(), default = None), - "incremental_bundling_enabled": attrs.option(attrs.bool(), default = None), - "info_plist": attrs.source(), - "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "platform_binary": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.dep())), default = None), - "product_name": attrs.option(attrs.string(), default = None), - "resource_group": attrs.option(attrs.string(), default = None), - "resource_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), - "skip_copying_swift_stdlib": attrs.option(attrs.bool(), default = None), - "try_skip_code_signing": attrs.option(attrs.bool(), default = None), - "xcode_product_type": attrs.option(attrs.string(), default = None), - } - -def _apple_watchos_bundle_attrs(): - attributes = {} - attributes.update(_apple_bundle_base_attrs()) - attributes.update(apple_bundle_extra_attrs()) - return attributes - def apple_watchos_bundle_impl(ctx: AnalysisContext) -> list[Provider]: # This rule is _equivalent_ to `apple_bundle` except it applies # an incoming watchOS transition. @@ -54,6 +18,6 @@ def apple_watchos_bundle_impl(ctx: AnalysisContext) -> list[Provider]: registration_spec = RuleRegistrationSpec( name = "apple_watchos_bundle", impl = apple_watchos_bundle_impl, - attrs = _apple_watchos_bundle_attrs(), + attrs = apple_watchos_bundle_attrs(), cfg = watch_transition, ) diff --git a/prelude/apple/user/apple_xcframework.bzl b/prelude/apple/user/apple_xcframework.bzl new file mode 100644 index 0000000000000..03aedbf6cf89c --- /dev/null +++ b/prelude/apple/user/apple_xcframework.bzl @@ -0,0 +1,185 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolsInfo") +load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") + +def _apple_xcframework_impl(ctx: AnalysisContext) -> list[Provider]: + apple_tools = ctx.attrs._apple_tools[AppleToolsInfo] + + xcframework_dir = ctx.actions.declare_output(ctx.attrs.framework_name + ".xcframework", dir = True) + xcframework_command = cmd_args([ + apple_tools.xcframework_maker, + "--output-path", + xcframework_dir.as_output(), + "--name", + ctx.attrs.framework_name, + ]) + + for arch in ctx.attrs.framework: + framework_dep = ctx.attrs.framework[arch] + framework_paths = framework_dep[DefaultInfo].default_outputs + if len(framework_paths) > 1: + fail("xcframework's framework target {} must only produce one output".format(framework_dep.label)) + + xcframework_command.add("--framework-path") + xcframework_command.add(arch) + xcframework_command.add(framework_paths[0]) + + if ctx.attrs.include_dsym: + dsym_dep = framework_dep[DefaultInfo].sub_targets["dsym"] + dsym_path = dsym_dep[DefaultInfo].default_outputs + xcframework_command.add("--dsym-path") + xcframework_command.add(arch) + xcframework_command.add(dsym_path) + + ctx.actions.run(xcframework_command, category = "apple_xcframework") + return [ + DefaultInfo(default_output = xcframework_dir), + ] + +def _strip_os_sdk_and_runtime_constraints(platform: PlatformInfo, refs: struct) -> dict[TargetLabel, ConstraintValueInfo]: + return { + constraint_setting_label: constraint_setting_value + for (constraint_setting_label, constraint_setting_value) in platform.configuration.constraints.items() + if constraint_setting_label not in [refs.os[ConstraintSettingInfo].label, refs.sdk[ConstraintSettingInfo].label, refs.universal[ConstraintSettingInfo].label, refs.runtime[ConstraintSettingInfo].label] + } + +# provides a map of os-platform to cpu architectures +# so we can identify when universal binaries can be created instead of +# two separate frameworks +# +# e.g. input of ["ios-arm64", "iphonesimulator-x86_64", "iphonesimulator-arm64"] +# will produce {"ios": ["arm64"], "iphonesimulator": ["arm64", "x86_64"]} + +def _normalize_platforms(platforms: list[str]) -> dict[str, list[str]]: + result = {} + for platform in platforms: + plat_list = platform.split("-") + plat_type = plat_list[0] + plat_archs = plat_list[1:] + previous_archs = result.get(plat_type, []) + result[plat_type] = sorted(plat_archs + previous_archs) + + return result + +def _apple_xcframework_framework_attrib_split_transition_impl( + platform: PlatformInfo, + refs: struct, + attrs: struct) -> dict[str, PlatformInfo]: + result = {} + + new_platforms = _normalize_platforms(attrs.platforms).items() + for os_value, cpu_values in new_platforms: + updated_constraints = _strip_os_sdk_and_runtime_constraints(platform, refs) + + canonical_platform_suffix = "" + + if os_value == "macos": + canonical_platform_prefix = "macos" + updated_constraints[refs.os[ConstraintSettingInfo].label] = refs.macos[ConstraintValueInfo] + elif os_value == "iphoneos": + canonical_platform_prefix = "ios" + updated_constraints[refs.os[ConstraintSettingInfo].label] = refs.ios[ConstraintValueInfo] + updated_constraints[refs.sdk[ConstraintSettingInfo].label] = refs.ios_device_sdk[ConstraintValueInfo] + elif os_value == "watchos": + canonical_platform_prefix = "watchos" + updated_constraints[refs.os[ConstraintSettingInfo].label] = refs.watchos[ConstraintValueInfo] + updated_constraints[refs.sdk[ConstraintSettingInfo].label] = refs.watchos_device_sdk[ConstraintValueInfo] + elif os_value == "iphonesimulator": + canonical_platform_prefix = "ios" + canonical_platform_suffix = "simulator" + updated_constraints[refs.os[ConstraintSettingInfo].label] = refs.ios[ConstraintValueInfo] + updated_constraints[refs.sdk[ConstraintSettingInfo].label] = refs.ios_simulator_sdk[ConstraintValueInfo] + elif os_value == "watchossimulator": + canonical_platform_prefix = "watchos" + canonical_platform_suffix = "simulator" + updated_constraints[refs.os[ConstraintSettingInfo].label] = refs.watchos[ConstraintValueInfo] + updated_constraints[refs.sdk[ConstraintSettingInfo].label] = refs.watchos_simulator_sdk[ConstraintValueInfo] + elif os_value == "maccatalyst": + canonical_platform_prefix = "ios" + canonical_platform_suffix = "maccatalyst" + updated_constraints[refs.os[ConstraintSettingInfo].label] = refs.ios[ConstraintValueInfo] + updated_constraints[refs.sdk[ConstraintSettingInfo].label] = refs.maccatalyst_sdk[ConstraintValueInfo] + updated_constraints[refs.runtime[ConstraintSettingInfo].label] = refs.maccatalyst_runtime[ConstraintValueInfo] + else: + fail("Unsupported OS value {} in apple_xcframework() platforms.".format(os_value)) + + cpu_constraint_name = refs.cpu[ConstraintSettingInfo].label + + if len(cpu_values) > 1: + updated_constraints[refs.universal[ConstraintSettingInfo].label] = refs.universal_enabled[ConstraintValueInfo] + elif cpu_values[0] == "arm64": + updated_constraints[cpu_constraint_name] = refs.arm64[ConstraintValueInfo] + elif cpu_values[0] == "x86_64": + updated_constraints[cpu_constraint_name] = refs.x86_64[ConstraintValueInfo] + else: + fail("Unsupported CPU value {} in apple_xcframework().".format(cpu_values[0])) + + new_cfg = ConfigurationInfo( + constraints = updated_constraints, + values = platform.configuration.values, + ) + + canonical_platform_name = canonical_platform_prefix + "-" + "_".join(cpu_values) + if len(canonical_platform_suffix) > 0: + canonical_platform_name += "-" + canonical_platform_suffix + + result.update({canonical_platform_name: PlatformInfo( + label = canonical_platform_name + "_transition", + configuration = new_cfg, + )}) + + return result + +framework_split_transition = transition( + impl = _apple_xcframework_framework_attrib_split_transition_impl, + refs = { + "arm32": "config//cpu/constraints:arm32", + "arm64": "config//cpu/constraints:arm64", + "cpu": "config//cpu/constraints:cpu", + "ios": "config//os/constraints:iphoneos", + "ios_device_sdk": "config//os/sdk/apple/constraints:iphoneos", + "ios_simulator_sdk": "config//os/sdk/apple/constraints:iphonesimulator", + "maccatalyst_runtime": "config//runtime/constraints:maccatalyst", + "maccatalyst_sdk": "config//os/sdk/apple/constraints:maccatalyst", + "macos": "config//os/constraints:macos", + "os": "config//os/constraints:os", + "runtime": "config//runtime/constraints:runtime", + "sdk": "config//os/sdk/apple/constraints:_", + "swift_library_evolution": "config//features/apple/constraints:swift_library_evolution", + "swift_library_evolution_enabled": "config//features/apple/constraints:swift_library_evolution_enabled", + "universal": "config//cpu/constraints:universal", + "universal_enabled": "config//cpu/constraints:universal-enabled", + "watchos": "config//os/constraints:watchos", + "watchos_device_sdk": "config//os/sdk/apple/constraints:watchos", + "watchos_simulator_sdk": "config//os/sdk/apple/constraints:watchsimulator", + "x86_64": "config//cpu/constraints:x86_64", + }, + attrs = [ + "platforms", + ], + split = True, +) + +registration_spec = RuleRegistrationSpec( + name = "apple_xcframework", + impl = _apple_xcframework_impl, + attrs = { + "framework": attrs.split_transition_dep(cfg = framework_split_transition), + "framework_name": attrs.string(), + "include_dsym": attrs.option(attrs.bool(), default = None), + "platforms": attrs.list(attrs.string(), default = []), + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), + }, +) + +def apple_xcframework_extra_attrs(): + attribs = { + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), + } + return attribs diff --git a/prelude/apple/user/cpu_split_transition.bzl b/prelude/apple/user/cpu_split_transition.bzl index 027259d98a99b..0ba17eeddc875 100644 --- a/prelude/apple/user/cpu_split_transition.bzl +++ b/prelude/apple/user/cpu_split_transition.bzl @@ -27,6 +27,15 @@ def _universal_constraint_value(platform: PlatformInfo, refs: struct) -> [None, universal = platform.configuration.constraints.get(refs.universal[ConstraintSettingInfo].label) return universal.label == refs.universal_enabled[ConstraintValueInfo].label if universal != None else False +def _filter_incompatible_constraints(platform_name: str, constraints: dict[TargetLabel, ConstraintValueInfo]) -> dict[TargetLabel, ConstraintValueInfo]: + filtered = dict() + for constraint_setting_label, constraint_value_info in constraints.items(): + incompatible_constraint_name = "//cpu/x86" if platform_name == "arm64" else "//cpu/arm" + if incompatible_constraint_name in str(constraint_value_info.label): + continue + filtered[constraint_setting_label] = constraint_value_info + return filtered + def _cpu_split_transition_impl( platform: PlatformInfo, refs: struct, @@ -43,7 +52,7 @@ def _cpu_split_transition_impl( cpu_name_to_cpu_constraint = {} if os_label == refs.ios[ConstraintValueInfo].label: - if sdk == None or sdk_label == refs.ios_simulator_sdk[ConstraintValueInfo].label: + if sdk == None or sdk_label == refs.ios_simulator_sdk[ConstraintValueInfo].label or sdk_label == refs.maccatalyst_sdk[ConstraintValueInfo].label: # default to simulator if SDK is not specified cpu_name_to_cpu_constraint["arm64"] = refs.arm64[ConstraintValueInfo] cpu_name_to_cpu_constraint["x86_64"] = refs.x86_64[ConstraintValueInfo] @@ -54,10 +63,9 @@ def _cpu_split_transition_impl( elif os_label == refs.watchos[ConstraintValueInfo].label: if sdk == None or sdk_label == refs.watchos_simulator_sdk[ConstraintValueInfo].label: cpu_name_to_cpu_constraint["arm64"] = refs.arm64[ConstraintValueInfo] - cpu_name_to_cpu_constraint["x86_64"] = refs.x86_64[ConstraintValueInfo] elif sdk_label == refs.watchos_device_sdk[ConstraintValueInfo].label: cpu_name_to_cpu_constraint["arm64"] = refs.arm64[ConstraintValueInfo] - cpu_name_to_cpu_constraint["arm32"] = refs.arm32[ConstraintValueInfo] + cpu_name_to_cpu_constraint["arm64_32"] = refs.arm64_32[ConstraintValueInfo] else: fail("Unsupported SDK {} for WatchOS".format(sdk_label)) elif os_label == refs.macos[ConstraintValueInfo].label: @@ -68,8 +76,8 @@ def _cpu_split_transition_impl( cpu_constraint_name = refs.cpu[ConstraintSettingInfo].label base_constraints = { - constraint_setting_label: constraint_setting_value - for (constraint_setting_label, constraint_setting_value) in platform.configuration.constraints.items() + constraint_setting_label: constraint_value_info + for (constraint_setting_label, constraint_value_info) in platform.configuration.constraints.items() if constraint_setting_label != cpu_constraint_name } @@ -77,6 +85,8 @@ def _cpu_split_transition_impl( for platform_name, cpu_constraint in cpu_name_to_cpu_constraint.items(): updated_constraints = dict(base_constraints) updated_constraints[cpu_constraint_name] = cpu_constraint + updated_constraints = _filter_incompatible_constraints(platform_name, updated_constraints) + new_configs[platform_name] = PlatformInfo( label = platform_name, configuration = ConfigurationInfo( @@ -92,15 +102,17 @@ cpu_split_transition = transition( refs = { "arm32": "config//cpu/constraints:arm32", "arm64": "config//cpu/constraints:arm64", + "arm64_32": "config//cpu/constraints:arm64_32", "cpu": "config//cpu/constraints:cpu", "ios": "config//os/constraints:iphoneos", "ios_device_sdk": "config//os/sdk/apple/constraints:iphoneos", "ios_simulator_sdk": "config//os/sdk/apple/constraints:iphonesimulator", + "maccatalyst_sdk": "config//os/sdk/apple/constraints:maccatalyst", "macos": "config//os/constraints:macos", "os": "config//os/constraints:os", "sdk": "config//os/sdk/apple/constraints:_", - "universal": "config//build_mode/apple/constraints:universal", - "universal_enabled": "config//build_mode/apple/constraints:universal-enabled", + "universal": "config//cpu/constraints:universal", + "universal_enabled": "config//cpu/constraints:universal-enabled", "watchos": "config//os/constraints:watchos", "watchos_device_sdk": "config//os/sdk/apple/constraints:watchos", "watchos_simulator_sdk": "config//os/sdk/apple/constraints:watchsimulator", diff --git a/prelude/apple/user/macos_transition.bzl b/prelude/apple/user/macos_transition.bzl new file mode 100644 index 0000000000000..74d6bca1057ad --- /dev/null +++ b/prelude/apple/user/macos_transition.bzl @@ -0,0 +1,52 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//transitions:utils.bzl", "filtered_platform_constraints", "get_constraint_value") +load("@prelude//utils:expect.bzl", "expect") + +""" +Transition from catalyst SDK to macOS SDK. Used for AppKit extension bundle rules. +Transforms both OS and SDK constraints. +Only sanity check for source configuration is done. +""" + +def _macos_transition_impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: + # This functions operates in the following way: + # - Start with all the constraints from the platform and filter out the constraints for OS and SDK. + # - Always set the new OS constraint to macOS. + # - If the old SDK constraint was macCatalyst, replace with the equivalent macOS constraint. + # - Return a new platform with the updated constraints. + updated_constraints = filtered_platform_constraints(platform, [refs.os[ConstraintSettingInfo].label, refs.sdk[ConstraintSettingInfo].label]) + + macos = refs.macos[ConstraintValueInfo] + updated_constraints[refs.os[ConstraintSettingInfo].label] = macos + + # Update SDK constraint + old_sdk = get_constraint_value(platform, refs.sdk[ConstraintSettingInfo]) + maccatalyst_sdk = refs.maccatalyst_sdk[ConstraintValueInfo] + macosx_sdk = refs.macos_sdk[ConstraintValueInfo] + + if old_sdk != None: + expect(old_sdk.label in [macosx_sdk.label, maccatalyst_sdk.label], "If present, SDK transitioned non-identically to macOS should be `macCatalyst`, got {}".format(old_sdk.label)) + updated_constraints[refs.sdk[ConstraintSettingInfo].label] = macosx_sdk + + new_cfg = ConfigurationInfo( + constraints = updated_constraints, + values = platform.configuration.values, + ) + return PlatformInfo( + label = "macos_transition", + configuration = new_cfg, + ) + +macos_transition = transition(impl = _macos_transition_impl, refs = { + "maccatalyst_sdk": "config//os/sdk/apple/constraints:maccatalyst", + "macos": "config//os/constraints:macos", + "macos_sdk": "config//os/sdk/apple/constraints:macosx", + "os": "config//os/constraints:os", + "sdk": "config//os/sdk/apple/constraints:_", +}) diff --git a/prelude/apple/user/resource_group_map.bzl b/prelude/apple/user/resource_group_map.bzl index c3113564d38e1..3ad9549aff598 100644 --- a/prelude/apple/user/resource_group_map.bzl +++ b/prelude/apple/user/resource_group_map.bzl @@ -7,6 +7,7 @@ load( "@prelude//apple:resource_groups.bzl", + "ResourceGraphNode", # @unused Used as a type "ResourceGroupInfo", "create_resource_graph", "get_resource_graph_node_map_func", @@ -15,51 +16,55 @@ load( "@prelude//cxx:groups.bzl", "compute_mappings", "create_group", + "get_roots_from_mapping", + "make_info_subtarget_providers", "parse_groups_definitions", ) +load( + "@prelude//cxx:groups_types.bzl", + "GroupMapping", # @unused Used as a type + "Traversal", +) load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -load("@prelude//decls/common.bzl", "Traversal") - -def resource_group_map_attr(): - return attrs.option(attrs.dep(providers = [ResourceGroupInfo]), default = None) +load("@prelude//utils:utils.bzl", "flatten") -def _impl(ctx: AnalysisContext) -> list[Provider]: +def _resource_group_map_impl(ctx: AnalysisContext) -> list[Provider]: resource_groups = parse_groups_definitions(ctx.attrs.map, lambda root: root.label) - # Extract deps from the roots via the raw attrs, as `parse_groups_definitions` - # parses them as labels. - resource_groups_deps = [ - mapping[0] - for entry in ctx.attrs.map - for mapping in entry[1] - ] + resource_group_to_implicit_deps_mapping = { + group: flatten([get_roots_from_mapping(mapping) for mapping in mappings]) + for group, mappings in ctx.attrs.map + } + flattend_resource_group_deps = flatten(resource_group_to_implicit_deps_mapping.values()) + resource_graph = create_resource_graph( ctx = ctx, labels = [], - deps = resource_groups_deps, + deps = flattend_resource_group_deps, exported_deps = [], ) resource_graph_node_map = get_resource_graph_node_map_func(resource_graph)() mappings = compute_mappings( - groups = [ - create_group( + groups_map = { + group.name: create_group( group = group, # User provided mappings may contain entries that don't support # ResourceGraphInfo, which `create_resource_graph` removes above. # So make sure we remove them from the mappings too, otherwise # `compute_mappings` crashes on the inconsistency. - mappings = [ - mapping - for mapping in group.mappings - if mapping.root == None or mapping.root in resource_graph_node_map - ], + mappings = filter( + None, + [_fixup_mapping_to_only_include_roots_in_the_map(m, resource_graph_node_map) for m in group.mappings], + ), ) for group in resource_groups - ], + }, graph_map = resource_graph_node_map, ) return [ - DefaultInfo(), + DefaultInfo(sub_targets = { + "info": make_info_subtarget_providers(ctx, resource_groups, mappings), + }), ResourceGroupInfo( groups = resource_groups, groups_hash = hash(str(resource_groups)), @@ -68,14 +73,44 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: # referenced in our roots, so propagate them here. # NOTE(agallagher): We do this to maintain existing behavior here # but it's not clear if it's actually desirable behavior. - implicit_deps = resource_groups_deps, + resource_group_to_implicit_deps_mapping = resource_group_to_implicit_deps_mapping, ), ] +def _fixup_mapping_to_only_include_roots_in_the_map(mapping: GroupMapping, node_map: dict[Label, ResourceGraphNode]) -> GroupMapping | None: + if not mapping.roots: + return mapping + + filtered_roots = [ + root + for root in mapping.roots + if root in node_map + ] + if not filtered_roots: + return None + + return GroupMapping( + roots = filtered_roots, + traversal = mapping.traversal, + filters = mapping.filters, + preferred_linkage = mapping.preferred_linkage, + ) + registration_spec = RuleRegistrationSpec( name = "resource_group_map", - impl = _impl, + impl = _resource_group_map_impl, attrs = { - "map": attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), + "map": attrs.list( + attrs.tuple( + attrs.string(), + attrs.list( + attrs.tuple( + attrs.one_of(attrs.dep(), attrs.list(attrs.dep())), + attrs.enum(Traversal.values()), + attrs.option(attrs.string()), + ), + ), + ), + ), }, ) diff --git a/prelude/apple/user/target_sdk_version_transition.bzl b/prelude/apple/user/target_sdk_version_transition.bzl new file mode 100644 index 0000000000000..d602fd3dab6df --- /dev/null +++ b/prelude/apple/user/target_sdk_version_transition.bzl @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Apply a constraint if the propagated_target_sdk_version attribute is set. +This overrides any existing target_sdk_version select. +""" + +load("@prelude//apple:versions.bzl", "TARGET_SDK_VERSIONS") + +def _target_sdk_version_transition_impl(platform: PlatformInfo, refs: struct, attrs: struct) -> PlatformInfo: + target_sdk_version = attrs.propagated_target_sdk_version + if not target_sdk_version: + return platform + + constraint_label = refs.version[ConstraintSettingInfo].label + constraint_value = platform.configuration.constraints.get(constraint_label) + version_provider = getattr(refs, target_sdk_version, None) + if version_provider == None: + fail("target sdk version {} is missing constraints".format(target_sdk_version)) + + version_constraint = version_provider[ConstraintValueInfo] + if constraint_value == version_constraint: + return platform + + updated_constraints = platform.configuration.constraints + updated_constraints[constraint_label] = version_constraint + new_cfg = ConfigurationInfo( + constraints = updated_constraints, + values = platform.configuration.values, + ) + return PlatformInfo( + label = platform.label + "_target_sdk_version_" + target_sdk_version, + configuration = new_cfg, + ) + +target_sdk_version_transition = transition( + impl = _target_sdk_version_transition_impl, + refs = dict( + [("version", "@config//version:constraint-setting-target-sdk-version")] + { + version: "@config//version:constraint-value-target-sdk-version-" + version + for version in TARGET_SDK_VERSIONS + }.items(), + ), + attrs = ["propagated_target_sdk_version"], +) diff --git a/prelude/apple/user/watch_transition.bzl b/prelude/apple/user/watch_transition.bzl index affa70ce4468b..22e1dd241e4c3 100644 --- a/prelude/apple/user/watch_transition.bzl +++ b/prelude/apple/user/watch_transition.bzl @@ -11,31 +11,19 @@ Transforms both OS and SDK constraints. Only sanity check for source configuration is done. """ -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//transitions:utils.bzl", "filtered_platform_constraints", "get_constraint_value") +load("@prelude//utils:expect.bzl", "expect") -def _os_and_sdk_unrelated_constraints(platform: PlatformInfo, refs: struct) -> dict[TargetLabel, ConstraintValueInfo]: - return { - constraint_setting_label: constraint_setting_value - for (constraint_setting_label, constraint_setting_value) in platform.configuration.constraints.items() - if constraint_setting_label not in [refs.os[ConstraintSettingInfo].label, refs.sdk[ConstraintSettingInfo].label] - } - -def _old_os_constraint_value(platform: PlatformInfo, refs: struct) -> [None, ConstraintValueInfo]: - return platform.configuration.constraints.get(refs.os[ConstraintSettingInfo].label) - -def _old_sdk_constraint_value(platform: PlatformInfo, refs: struct) -> [None, ConstraintValueInfo]: - return platform.configuration.constraints.get(refs.sdk[ConstraintSettingInfo].label) - -def _impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: +def _watch_transition_impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: # This functions operates in the following way: # - Start with all the constraints from the platform and filter out the constraints for OS and SDK. # - If the old OS constraint was iOS or watchOS, set the new constraint to be always watchOS. # - If the old SDK constraint was iOS, replace with the equivalent watchOS constraint. # - Return a new platform with the updated constraints. - updated_constraints = _os_and_sdk_unrelated_constraints(platform, refs) + updated_constraints = filtered_platform_constraints(platform, [refs.os[ConstraintSettingInfo].label, refs.sdk[ConstraintSettingInfo].label]) # Update OS constraint - old_os = _old_os_constraint_value(platform, refs) + old_os = get_constraint_value(platform, refs.os[ConstraintSettingInfo]) watchos = refs.watchos[ConstraintValueInfo] ios = refs.ios[ConstraintValueInfo] if old_os != None: @@ -43,7 +31,7 @@ def _impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: updated_constraints[refs.os[ConstraintSettingInfo].label] = watchos # Update SDK constraint - old_sdk = _old_sdk_constraint_value(platform, refs) + old_sdk = get_constraint_value(platform, refs.sdk[ConstraintSettingInfo]) watchos_device_sdk = refs.watchos_device_sdk[ConstraintValueInfo] watchos_simulator_sdk = refs.watchos_simulator_sdk[ConstraintValueInfo] ios_device_sdk = refs.ios_device_sdk[ConstraintValueInfo] @@ -71,7 +59,7 @@ def _impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: configuration = new_cfg, ) -watch_transition = transition(impl = _impl, refs = { +watch_transition = transition(impl = _watch_transition_impl, refs = { "ios": "config//os/constraints:iphoneos", "ios_device_sdk": "config//os/sdk/apple/constraints:iphoneos", "ios_simulator_sdk": "config//os/sdk/apple/constraints:iphonesimulator", diff --git a/prelude/apple/validation/debug_artifacts.bzl b/prelude/apple/validation/debug_artifacts.bzl new file mode 100644 index 0000000000000..d1b6db0a36c23 --- /dev/null +++ b/prelude/apple/validation/debug_artifacts.bzl @@ -0,0 +1,98 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//:artifact_tset.bzl", + "ArtifactTSet", # @unused Used as a type +) + +_AnalysisInput = record( + argsfile = field(Artifact), + identifier = field(int), +) + +def get_debug_artifacts_validators(ctx, artifacts: ArtifactTSet) -> dict[str, Artifact]: + label_to_input_artifacts = _get_analysis_input_artifacts(ctx, artifacts) + if not label_to_input_artifacts: + return {} + + name_to_validation_result = {} + for key, validator in ctx.attrs.debug_artifacts_validators.items(): + analysis, reducer = validator + label_to_analysis_artifacts = _analyze_artifacts(ctx, key, analysis[RunInfo], label_to_input_artifacts) + name_to_validation_result[key] = _reduce_analysis_artifacts(ctx, key, reducer[RunInfo], label_to_analysis_artifacts) + + return name_to_validation_result + +def _get_analysis_input_artifacts(ctx, artifacts: ArtifactTSet) -> dict[Label, list[_AnalysisInput]]: + underlying_tset = artifacts._tset + if underlying_tset == None: + return {} + + results = {} + identifier = 0 + for infos in underlying_tset.traverse(): + for info in infos: + argsfile = ctx.actions.write( + "artifacts-{}.txt".format(identifier), + info.artifacts, + with_inputs = True, + ) + results.setdefault(info.label, []).append( + _AnalysisInput(argsfile = argsfile, identifier = identifier), + ) + identifier += 1 + return results + +def _analyze_artifacts( + ctx, + key: str, + analysis_tool: RunInfo, + label_to_artifacts: dict[Label, list[_AnalysisInput]]) -> dict[Label, list[Artifact]]: + label_to_analysis = {} + for label, inputs in label_to_artifacts.items(): + for input in inputs: + output = ctx.actions.declare_output("{}_{}.json".format(key, input.identifier)) + ctx.actions.run( + cmd_args([ + analysis_tool, + "--artifacts", + cmd_args(input.argsfile, format = "@{}"), + "--output", + output.as_output(), + ]), + category = "{}_analysis".format(key), + identifier = "{}_{}".format(ctx.attrs.name, input.identifier), + ) + label_to_analysis.setdefault(label, []).append(output) + + return label_to_analysis + +def _reduce_analysis_artifacts( + ctx, + key: str, + reducer_tool: RunInfo, + label_to_artifacts: dict[Label, list[Artifact]]) -> Artifact: + input_json = ctx.actions.write_json( + "{}_reducer_args.json".format(key), + label_to_artifacts, + with_inputs = True, + ) + + output = ctx.actions.declare_output("{}.json".format(key)) + ctx.actions.run( + cmd_args([ + reducer_tool, + "--analysis-json-path", + input_json, + "--output", + output.as_output(), + ]), + category = "{}_reduce".format(key), + identifier = ctx.attrs.name, + ) + return output diff --git a/prelude/apple/versions.bzl b/prelude/apple/versions.bzl new file mode 100644 index 0000000000000..881d0d056b94d --- /dev/null +++ b/prelude/apple/versions.bzl @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# All the target SDK versions currently in use. +TARGET_SDK_VERSIONS = [ + "1.0", + "4.0", + "7.0", + "9.0", + "10.0", + "10.10", + "10.12", + "10.13", + "10.14", + "10.15", + "11.0", + "11.3", + "12.0", + "12.4", + "13.0", + "13.1", + "13.3", + "13.4", + "14.0", + "14.2", + "15.0", + "15.1", + "15.2", + "15.5", + "16.0", + "16.1", + "16.3", + "17.0", + "18.0", +] diff --git a/prelude/apple/xcode.bzl b/prelude/apple/xcode.bzl index 00849d1b0bf06..b067ceb542582 100644 --- a/prelude/apple/xcode.bzl +++ b/prelude/apple/xcode.bzl @@ -7,33 +7,38 @@ load("@prelude//apple:apple_sdk.bzl", "get_apple_sdk_name") load("@prelude//apple:apple_target_sdk_version.bzl", "get_min_deployment_version_for_node") -load("@prelude//apple:apple_utility.bzl", "has_apple_toolchain") +load("@prelude//apple:apple_utility.bzl", "get_apple_architecture", "has_apple_toolchain") load( "@prelude//cxx:argsfiles.bzl", "CompileArgsfile", # @unused Used as a type ) load( - "@prelude//cxx:compile.bzl", + "@prelude//cxx:cxx_sources.bzl", "CxxSrcWithFlags", # @unused Used as a type ) load("@prelude//cxx:xcode.bzl", "cxx_populate_xcode_attributes") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//ide_integrations:xcode.bzl", "XcodeDataInfoKeys") +load("@prelude//utils:expect.bzl", "expect") def apple_populate_xcode_attributes( ctx, srcs: list[CxxSrcWithFlags], argsfiles: dict[str, CompileArgsfile], - product_name: str) -> dict[str, typing.Any]: + product_name: str, + contains_swift_sources: bool = False) -> dict[str, typing.Any]: data = cxx_populate_xcode_attributes(ctx = ctx, srcs = srcs, argsfiles = argsfiles, product_name = product_name) + data[XcodeDataInfoKeys.CONTAINS_SWIFT_SOURCES] = contains_swift_sources + if has_apple_toolchain(ctx): - data["sdk"] = get_apple_sdk_name(ctx) - data["deployment_version"] = get_min_deployment_version_for_node(ctx) + data[XcodeDataInfoKeys.ARCH] = get_apple_architecture(ctx) + data[XcodeDataInfoKeys.SDK] = get_apple_sdk_name(ctx) + data[XcodeDataInfoKeys.DEPLOYMENT_VERSION] = get_min_deployment_version_for_node(ctx) if hasattr(ctx.attrs, "swift_version"): swift_version = ctx.attrs.swift_version if swift_version != None: - data["swift_version"] = swift_version + data[XcodeDataInfoKeys.SWIFT_VERSION] = swift_version apple_xcode_data_add_xctoolchain(ctx, data) return data @@ -64,7 +69,3 @@ def _get_attribute_with_output(ctx: AnalysisContext, attr_name: str) -> [Depende # So, an empty `DefaultInfo` basically signifies that there's no xctoolchain. return dep return None - -def get_project_root_file(ctx) -> Artifact: - content = cmd_args(ctx.label.project_root) - return ctx.actions.write("project_root", content, absolute = True) diff --git a/prelude/apple/xcode_postbuild_script.bzl b/prelude/apple/xcode_postbuild_script.bzl deleted file mode 100644 index 3c425a48e3dfc..0000000000000 --- a/prelude/apple/xcode_postbuild_script.bzl +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -def xcode_postbuild_script_impl(_ctx: AnalysisContext) -> list[Provider]: - return [DefaultInfo()] diff --git a/prelude/apple/xcode_prebuild_script.bzl b/prelude/apple/xcode_prebuild_script.bzl deleted file mode 100644 index dc56f17ef9254..0000000000000 --- a/prelude/apple/xcode_prebuild_script.bzl +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -def xcode_prebuild_script_impl(_ctx: AnalysisContext) -> list[Provider]: - return [DefaultInfo()] diff --git a/prelude/artifact_tset.bzl b/prelude/artifact_tset.bzl index be08fb1c923b6..0df80f907c417 100644 --- a/prelude/artifact_tset.bzl +++ b/prelude/artifact_tset.bzl @@ -5,15 +5,24 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//utils:expect.bzl", "expect") load( "@prelude//utils:utils.bzl", - "expect", "flatten", ) +# Generic tag to provide more information about the artifact +ArtifactInfoTag = enum( + # Describes swiftmodule artifact generated by swift rules. + "swiftmodule", + # Describes clang pcm module artifact generated and consumed by swift rules. + "swift_pcm", +) + ArtifactInfo = record( label = field(Label), artifacts = field(list[Artifact]), + tags = field(list[ArtifactInfoTag]), ) def _get_artifacts(entries: list[ArtifactInfo]) -> list[Artifact]: @@ -35,7 +44,8 @@ def make_artifact_tset( label: [Label, None] = None, artifacts: list[Artifact] = [], infos: list[ArtifactInfo] = [], - children: list[ArtifactTSet] = []) -> ArtifactTSet: + children: list[ArtifactTSet] = [], + tags: list[ArtifactInfoTag] = []) -> ArtifactTSet: expect( label != None or not artifacts, "must pass in `label` to associate with artifacts", @@ -47,7 +57,7 @@ def make_artifact_tset( # Build list of all non-child values. values = [] if artifacts: - values.append(ArtifactInfo(label = label, artifacts = artifacts)) + values.append(ArtifactInfo(label = label, artifacts = artifacts, tags = tags)) values.extend(infos) # If there's no children or artifacts, return `None`. diff --git a/prelude/artifacts.bzl b/prelude/artifacts.bzl index cafbd7861e2fe..e9d65f253c321 100644 --- a/prelude/artifacts.bzl +++ b/prelude/artifacts.bzl @@ -6,11 +6,9 @@ # of this source tree. load("@prelude//:paths.bzl", "paths") +load("@prelude//dist:dist_info.bzl", "DistInfo") load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type -load( - "@prelude//utils:utils.bzl", - "expect", -) +load("@prelude//utils:expect.bzl", "expect") # A group of artifacts. ArtifactGroupInfo = provider( @@ -19,63 +17,160 @@ ArtifactGroupInfo = provider( }, ) -def _from_default_info(dep: Dependency) -> (Artifact, list[ArgLike]): - info = dep[DefaultInfo] - expect( - len(info.default_outputs) == 1, - "expected exactly one default output from {} ({})" - .format(dep, info.default_outputs), - ) - return (info.default_outputs[0], info.other_outputs) +ArtifactOutputs = record( + # Single output. This is the artifact whose path would go into the resources + # JSON when this artifact is used as a resource. + default_output = field(Artifact), + + # Other artifacts which need to be present in order to run the resource as + # an executable. This includes shared library dependencies and resources. + nondebug_runtime_files = field(list[ArgLike]), + + # Other outputs that would be materialized if this artifact is the output of + # a build, or generally in any context where a user might run this artifact + # in a debugger. + # + # This is a superset of nondebug_runtime_files and also includes external + # debuginfo. + other_outputs = field(list[ArgLike]), +) + +# Wrapper to support wrapping `Artifact`s referencing paths behind external +# symlinks. +ArtifactExt = record( + artifact = field(Artifact), + # If the `artifact` above is a symlink referencing an external path, this + # is an optional sub-path to append when accessing the path. + sub_path = field(str | None, None), + # Returns the resolved path as a `cmd_arg()`, with the optional sub-path + # appended. + as_arg = field(typing.Callable), + join = field(typing.Callable), +) + +# A Provider that mirrors `DefaultInfo` for `Artifact` outputs, but allows +# specifying an `ArtifactExt` as it's default output. +DefaultOutputExt = provider( + fields = dict( + default_output = provider_field(ArtifactExt), + ), +) -def unpack_artifacts(artifacts: list[[Artifact, Dependency]]) -> list[(Artifact, list[ArgLike])]: +def single_artifact(dep: Artifact | Dependency) -> ArtifactOutputs: + if type(dep) == "artifact": + return ArtifactOutputs( + default_output = dep, + nondebug_runtime_files = [], + other_outputs = [], + ) + + if DefaultInfo in dep: + info = dep[DefaultInfo] + expect( + len(info.default_outputs) == 1, + "expected exactly one default output from {} ({})" + .format(dep, info.default_outputs), + ) + default_output = info.default_outputs[0] + other_outputs = info.other_outputs + + dist_info = dep.get(DistInfo) + nondebug_runtime_files = dist_info.nondebug_runtime_files if dist_info else other_outputs + + return ArtifactOutputs( + default_output = default_output, + nondebug_runtime_files = nondebug_runtime_files, + other_outputs = other_outputs, + ) + + fail("unexpected dependency type: {}".format(type(dep))) + +def unpack_artifacts(artifacts: list[Artifact | Dependency]) -> list[ArtifactOutputs]: """ - Unpack a list of `artifact` and `ArtifactGroupInfo` into a flattened list - of `artifact`s + Unpack a heterogeneous list of Artifact and ArtifactGroupInfo into a list + representing their outputs. """ out = [] for artifact in artifacts: if type(artifact) == "artifact": - out.append((artifact, [])) + out.append(ArtifactOutputs( + default_output = artifact, + nondebug_runtime_files = [], + other_outputs = [], + )) continue if ArtifactGroupInfo in artifact: for artifact in artifact[ArtifactGroupInfo].artifacts: - out.append((artifact, [])) + out.append(ArtifactOutputs( + default_output = artifact, + nondebug_runtime_files = [], + other_outputs = [], + )) continue - if DefaultInfo in artifact: - out.append(_from_default_info(artifact)) - continue - - fail("unexpected dependency type: {}".format(type(artifact))) + out.append(single_artifact(artifact)) return out -def unpack_artifact_map(artifacts: dict[str, [Artifact, Dependency]]) -> dict[str, (Artifact, list[ArgLike])]: +def unpack_artifact_map(artifacts: dict[str, Artifact | Dependency]) -> dict[str, ArtifactOutputs]: """ - Unpack a list of `artifact` and `ArtifactGroupInfo` into a flattened list - of `artifact`s + Unpack a heterogeneous dict of Artifact and ArtifactGroupInfo into a dict + representing their outputs. """ out = {} for name, artifact in artifacts.items(): if type(artifact) == "artifact": - out[name] = (artifact, []) + out[name] = ArtifactOutputs( + default_output = artifact, + nondebug_runtime_files = [], + other_outputs = [], + ) continue if ArtifactGroupInfo in artifact: for artifact in artifact[ArtifactGroupInfo].artifacts: - out[paths.join(name, artifact.short_path)] = (artifact, []) - continue - - if DefaultInfo in artifact: - out[name] = _from_default_info(artifact) + out[paths.join(name, artifact.short_path)] = ArtifactOutputs( + default_output = artifact, + nondebug_runtime_files = [], + other_outputs = [], + ) continue - fail("unexpected dependency type: {}".format(type(artifact))) + out[name] = single_artifact(artifact) return out + +def _as_arg(artifact: Artifact, sub_path: str | None) -> ArgLike: + if sub_path == None: + return artifact + return cmd_args(artifact, format = "{{}}/{}".format(sub_path)) + +def artifact_ext( + artifact: Artifact, + sub_path: str | None = None) -> ArtifactExt: + return ArtifactExt( + artifact = artifact, + sub_path = sub_path, + as_arg = lambda: _as_arg(artifact, sub_path), + join = lambda p: artifact_ext( + artifact = artifact, + sub_path = p if sub_path == None else paths.join(sub_path, p), + ), + ) + +def to_artifact_ext(src: Artifact | Dependency) -> ArtifactExt: + if type(src) == "dependency": + ext = src.get(DefaultOutputExt) + if ext != None: + return ext.default_output + else: + (src,) = src[DefaultInfo].default_outputs + return artifact_ext(src) + +def to_arglike(src: Artifact | Dependency) -> ArgLike: + return to_artifact_ext(src).as_arg() diff --git a/prelude/asserts.bzl b/prelude/asserts.bzl index 4ec4b378255bb..3fd6a28718b37 100644 --- a/prelude/asserts.bzl +++ b/prelude/asserts.bzl @@ -1,71 +1,33 @@ -# Copyright 2017 The Bazel Authors. All rights reserved. +# Copyright (c) Meta Platforms, Inc. and affiliates. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# @lint-ignore-every LICENSELINT - -"""Testing support. - -This is a modified version of https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl. -Currently, if there are any failures, these are raised immediately by calling fail(), -which trigger an analysis-time build error. -""" +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. -def _assert_equals(expected, actual, msg = None): - """Asserts that the given `expected` and `actual` are equal. - - Args: - expected: the expected value of some computation. - actual: the actual value return by some computation. - msg: An optional message that will be printed that describes the failure. - If omitted, a default will be used. - """ +def _equals(expected, actual, msg = None): if expected != actual: - expectation_msg = 'Expected "%s", but got "%s"' % (expected, actual) - if msg: - full_msg = "%s (%s)" % (msg, expectation_msg) + if msg == None: + fail("expected: {}, got: {}".format(expected, actual)) else: - full_msg = expectation_msg - fail(full_msg) - -def _assert_true( - condition, - msg = "Expected condition to be true, but was false."): - """Asserts that the given `condition` is true. + fail("{}: expected: {}, got: {}{}".format(msg, expected, actual)) - Args: - condition: A value that will be evaluated in a Boolean context. - msg: An optional message that will be printed that describes the failure. - If omitted, a default will be used. - """ +def _true(condition, msg = None): if not condition: - fail(msg) - -def _assert_false( - condition, - msg = "Expected condition to be false, but was true."): - """Asserts that the given `condition` is false. + if msg != None: + fail(msg) + else: + fail("Condition is not met") - Args: - condition: A value that will be evaluated in a Boolean context. - msg: An optional message that will be printed that describes the failure. - If omitted, a default will be used. - """ +def _false(condition, msg = None): if condition: - fail(msg) + if msg != None: + fail(msg) + else: + fail("Condition is expected to be false") asserts = struct( - equals = _assert_equals, - true = _assert_true, - false = _assert_false, + equals = _equals, + true = _true, + false = _false, ) diff --git a/prelude/attrs_validators.bzl b/prelude/attrs_validators.bzl new file mode 100644 index 0000000000000..74f41acef2df7 --- /dev/null +++ b/prelude/attrs_validators.bzl @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +AttrsValidatorsInfo = provider( + fields = { + "func": typing.Callable[[AnalysisActions, Label, struct], dict[str, Artifact]], + }, +) + +ATTRS_VALIDATORS_NAME = "attrs_validators" +ATTRS_VALIDATORS_TYPE = attrs.option(attrs.list(attrs.dep(providers = [AttrsValidatorsInfo])), default = None) + +def get_attrs_validators_outputs(ctx: AnalysisContext) -> (list[Provider], dict[str, list[Provider]]): + validators = getattr(ctx.attrs, ATTRS_VALIDATORS_NAME, []) + if not validators: + return ([], {}) + + specs = [] + sub_targets = {} + for validator in validators: + for name, output in validator[AttrsValidatorsInfo].func(ctx.actions, ctx.label, ctx.attrs).items(): + specs.append(ValidationSpec(name = name, validation_result = output)) + + if name in sub_targets: + fail("Collison: two attrs_validators with the same name '{}': {} and {}".format( + name, + output, + sub_targets[name], + )) + + sub_targets[name] = [DefaultInfo(output)] + + return ( + [ValidationInfo(validations = specs)] if specs else [], + { + "attrs-validators": [ + DefaultInfo( + # It'll be expensive to put all the artifacts in here, just skip it. + default_outputs = None, + sub_targets = sub_targets, + ), + ], + } if sub_targets else {}, + ) diff --git a/prelude/buck2_compatibility.bzl b/prelude/buck2_compatibility.bzl new file mode 100644 index 0000000000000..439b344af51c4 --- /dev/null +++ b/prelude/buck2_compatibility.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +Buck2Compatibility = enum( + "unknown", # No warnings or failures, default state + "compatible", # Compatible with Buck2, Buck1 will show warning to migrate to Buck2 + "incompatible", # Incompatible with Buck2, Buck2 will show warning about correctness of result + "required", # Buck2 required, Buck1 will fail the build +) + +BUCK2_COMPATIBILITY_ATTRIB_NAME = "buck2_compatibility" +BUCK2_COMPATIBILITY_ATTRIB_TYPE = attrs.enum(Buck2Compatibility.values(), default = "unknown") + +def check_buck2_compatibility(ctx: AnalysisContext): + if hasattr(ctx.attrs, "buck2_compatibility") and ctx.attrs.buck2_compatibility == "incompatible": + warning("The target '{}' is marked as incompatible with buck2, output might be incorrect".format(ctx.label)) diff --git a/prelude/builtin.bzl b/prelude/builtin.bzl deleted file mode 100644 index ef1dd61fc7118..0000000000000 --- a/prelude/builtin.bzl +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -# Definitions we have builtin to Buck. -# Useful for running the Starlark checker on the files. - -def DefaultInfo(): - pass diff --git a/prelude/cfg/modifier/alias.bzl b/prelude/cfg/modifier/alias.bzl new file mode 100644 index 0000000000000..1838bc93ccb26 --- /dev/null +++ b/prelude/cfg/modifier/alias.bzl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Modifier aliases that can be used on the CLI, ex. after `--modifier=` or in `?`. + +These aliases are ones we share between internal and OSS usages. +All constraints used in these aliases must also be available in OSS. +""" + +# It's represented as a struct where the attribute name is the alias and the string +# for the attribute is the fully qualified target. Defining aliases in a struct +# helps enforce that the alias names do not contain any bad character we cannot use on CLI. +# +# We define aliases for modifiers here rather than reusing `alias` section of buckconfig for +# several reasons. +# 1. `alias` buckconconfig is not well-designed. It only supports aliases in a cell and not +# global aliases and users can override aliases in modefiles. +# 2. Modifier aliases can point to conditional modifiers, which `alias` buckconfig does not +# suppport. +# 3. It's unlikely a user has to ever define an alias twice in both the `alias` buckconfig +# and in modifier aliases because a modifier alias is a constraint value/config setting +# and those don't typically get built on CLI. +OSS_ALIASES = struct() diff --git a/prelude/cfg/modifier/asserts.bzl b/prelude/cfg/modifier/asserts.bzl new file mode 100644 index 0000000000000..214c7b30cef21 --- /dev/null +++ b/prelude/cfg/modifier/asserts.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cfg/modifier:types.bzl", "Modifier", "is_modifiers_match") + +def verify_normalized_target(target: str): + # Do some basic checks that target looks reasonably valid and normalized + # Targets should always be fully qualified to improve readability. + if "//" not in target or target.startswith("//") or ":" not in target: + fail( + "Must specify fully qualified target (ex. `cell//foo:bar`). Found `{}`".format( + target, + ), + ) + +def verify_normalized_modifier(modifier: Modifier): + if modifier == None: + pass + elif is_modifiers_match(modifier): + # TODO(scottcao): Add a test case for this once `bxl_test` supports testing failures + for key, sub_modifier in modifier.items(): + if key != "_type": + verify_normalized_modifier(sub_modifier) + elif isinstance(modifier, str): + verify_normalized_target(modifier) + else: + fail("Found unexpected modifier `{}` type `{}`".format(modifier, type(modifier))) diff --git a/prelude/cfg/modifier/cfg_constructor.bzl b/prelude/cfg/modifier/cfg_constructor.bzl new file mode 100644 index 0000000000000..7c24a0176b819 --- /dev/null +++ b/prelude/cfg/modifier/cfg_constructor.bzl @@ -0,0 +1,185 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:graph_utils.bzl", "post_order_traversal") +load( + ":common.bzl", + "get_constraint_setting_deps", + "get_modifier_info", + "json_to_tagged_modifiers", + "modifier_to_refs", + "resolve_alias", + "resolve_modifier", +) +load(":name.bzl", "cfg_name") +load( + ":types.bzl", + "Modifier", # @unused + "ModifierCliLocation", + "ModifierTargetLocation", + "TaggedModifiers", +) + +PostConstraintAnalysisParams = record( + legacy_platform = PlatformInfo | None, + # Merged modifier from PACKAGE, target, and cli modifiers. + merged_modifiers = list[TaggedModifiers], + extra_data = struct, +) + +def cfg_constructor_pre_constraint_analysis( + *, + legacy_platform: PlatformInfo | None, + # dict[str, typing.Any] is JSON dictionary form of `TaggedModifier` passed from buck2 core + package_modifiers: list[dict[str, typing.Any]] | None, + # typing.Any is JSON form of modifier + target_modifiers: list[Modifier] | None, + cli_modifiers: list[str], + rule_name: str, + aliases: struct, + extra_data: struct, + **_kwargs) -> (list[str], PostConstraintAnalysisParams): + """ + First stage of cfg constructor for modifiers. + + Args: + legacy_platform: + PlatformInfo from legacy target platform resolution, if one is specified + package_modifiers: + A list of modifiers specified from all parent PACKAGE files + target_modifier: + A list of modifiers specified from buildfile via `metadata` attribute. + cli_modifiers: + modifiers specified from `--modifier` flag, `?modifier`, or BXL + aliases: + A struct that contains mapping of modifier aliases to modifier. + extra_data: + Some extra data that is for extra logging/validation for our internal modifier implementation. + + Returns `(refs, PostConstraintAnalysisParams)`, where `refs` is a list of fully qualified configuration + targets we need providers for. + """ + package_modifiers = package_modifiers or [] + target_modifiers = target_modifiers or [] + + # Convert JSONs back to TaggedModifiers + package_modifiers = [json_to_tagged_modifiers(modifier_json) for modifier_json in package_modifiers] + + # Filter PACKAGE modifiers based on rule name. + # This only filters out PACKAGE modifiers from `extra_cfg_modifiers_per_rule` argument of `set_cfg_modifiers` function. + package_modifiers = [tagged_modifiers for tagged_modifiers in package_modifiers if tagged_modifiers.rule_name == None or tagged_modifiers.rule_name == rule_name] + merged_modifiers = package_modifiers + + # Add target modifiers as `TaggedModifiers` + if target_modifiers: + merged_modifiers.append(TaggedModifiers(modifiers = target_modifiers, location = ModifierTargetLocation(), rule_name = None)) + + # Resolve all aliases in CLI modifiers + cli_modifiers = [resolved_modifier for modifier in cli_modifiers for resolved_modifier in resolve_alias(modifier, aliases)] + + # Convert CLI modifiers to `TaggedModifier` + if cli_modifiers: + merged_modifiers.append(TaggedModifiers(modifiers = cli_modifiers, location = ModifierCliLocation(), rule_name = None)) + + refs = [] + for tagged_modifiers in merged_modifiers: + for modifier in tagged_modifiers.modifiers: + refs.extend(modifier_to_refs(modifier, tagged_modifiers.location)) + + return refs, PostConstraintAnalysisParams( + legacy_platform = legacy_platform, + merged_modifiers = merged_modifiers, + extra_data = extra_data, + ) + +def cfg_constructor_post_constraint_analysis( + *, + refs: dict[str, ProviderCollection], + params: PostConstraintAnalysisParams) -> PlatformInfo: + """ + Second stage of cfg constructor for modifiers. + + Args: + refs: a dictionary of fully qualified target labels for configuration targets with their providers + params: `PostConstraintAnalysisParams` returned from first stage of cfg constructor + + Returns a PlatformInfo + """ + + if not params.merged_modifiers: + # If there is no modifier and legacy platform is specified, + # then return the legacy platform as is without changing the label or + # configuration. + return params.legacy_platform or PlatformInfo( + # Empty configuration + label = "", + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ) + + constraint_setting_to_modifier_infos = {} + cli_modifier_validation = getattr(params.extra_data, "cli_modifier_validation", None) + + if params.legacy_platform: + for constraint_setting, constraint_value_info in params.legacy_platform.configuration.constraints.items(): + constraint_setting_to_modifier_infos[constraint_setting] = [constraint_value_info] + + for tagged_modifiers in params.merged_modifiers: + for modifier in tagged_modifiers.modifiers: + if modifier: + constraint_setting_label, modifier_info = get_modifier_info( + refs = refs, + modifier = modifier, + location = tagged_modifiers.location, + ) + modifier_infos = constraint_setting_to_modifier_infos.get(constraint_setting_label) or [] + modifier_infos.append(modifier_info) + constraint_setting_to_modifier_infos[constraint_setting_label] = modifier_infos + + if isinstance(tagged_modifiers.location, ModifierCliLocation): + if cli_modifier_validation: + cli_modifier_validation(constraint_setting_label, modifier) + + # Modifiers are resolved in topological ordering of modifier selects. For example, if the CPU modifier + # is a modifier_select on OS constraint, then the OS modifier must be resolved before the CPU modifier. + # To determine this order, we first construct a dep graph of constraint settings based on the modifier + # selects. Then we perform a post order traversal of the said graph. + modifier_dep_graph = { + constraint_setting: [ + dep + for modifier_info in modifier_infos + for dep in get_constraint_setting_deps(modifier_info) + ] + for constraint_setting, modifier_infos in constraint_setting_to_modifier_infos.items() + } + + # For topo-sort, we need to fill in empty edges for nodes that have no deps + for deps in modifier_dep_graph.values(): + for dep in deps: + if dep not in modifier_dep_graph: + modifier_dep_graph[dep] = [] + + constraint_setting_order = post_order_traversal(modifier_dep_graph) + + cfg = ConfigurationInfo( + constraints = {}, + values = {}, + ) + + for constraint_setting in constraint_setting_order: + for modifier_info in constraint_setting_to_modifier_infos.get(constraint_setting) or (): + constraint_value = resolve_modifier(cfg, modifier_info) + if constraint_value: + cfg.constraints[constraint_setting] = constraint_value + + name = cfg_name(cfg) + return PlatformInfo( + label = name, + configuration = cfg, + ) diff --git a/prelude/cfg/modifier/common.bzl b/prelude/cfg/modifier/common.bzl new file mode 100644 index 0000000000000..1d3b1f7a26188 --- /dev/null +++ b/prelude/cfg/modifier/common.bzl @@ -0,0 +1,222 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:asserts.bzl", "asserts") +load(":asserts.bzl", "verify_normalized_modifier") +load( + ":types.bzl", + "ConditionalModifierInfo", + "Modifier", + "ModifierCliLocation", + "ModifierInfo", + "ModifierLocation", + "ModifierPackageLocation", + "ModifierTargetLocation", + "ModifiersMatchInfo", + "TaggedModifiers", + "is_modifiers_match", +) + +MODIFIER_METADATA_KEY = "buck.cfg_modifiers" + +_TARGET_LOCATION_STR = "`metadata` attribute of target" +_CLI_LOCATION_STR = "command line" + +def location_to_string(location: ModifierLocation) -> str: + if isinstance(location, ModifierPackageLocation): + return location.package_path + if isinstance(location, ModifierTargetLocation): + return _TARGET_LOCATION_STR + if isinstance(location, ModifierCliLocation): + return _CLI_LOCATION_STR + fail("Internal error. Unrecognized location type `{}` for location `{}`".format(type(location), location)) + +def get_tagged_modifiers( + cfg_modifiers: list[Modifier], + extra_cfg_modifiers_per_rule: dict[str, list[Modifier]], + location: ModifierLocation) -> list[TaggedModifiers]: + for modifier in cfg_modifiers: + verify_normalized_modifier(modifier) + for _, modifiers in extra_cfg_modifiers_per_rule.items(): + for modifier in modifiers: + verify_normalized_modifier(modifier) + + # Aggreggate all tagged modifiers in a PACKAGE in a single list. + # Per-rule modifiers come the global modifiers so that they are processed later. + return [ + TaggedModifiers( + modifiers = cfg_modifiers, + location = location, + rule_name = None, + ), + ] + [ + TaggedModifiers( + modifiers = modifiers, + location = location, + rule_name = rule_name, + ) + for rule_name, modifiers in extra_cfg_modifiers_per_rule.items() + ] + +def get_constraint_setting(constraint_settings: dict[TargetLabel, None], modifier: Modifier, location: ModifierLocation) -> TargetLabel: + if len(constraint_settings) == 0: + fail("`modifiers.match` cannot be empty. Found empty `modifiers.match` at `{}`".format(location_to_string(location))) + if len(constraint_settings) > 1: + fail( + "A single modifier can only modify a single constraint setting.\n" + + "Modifier `{}` from `{}` is found to modify the following constraint settings:\n".format( + modifier, + location_to_string(location), + ) + "\n".join([str(k) for k in constraint_settings.keys()]), + ) + return list(constraint_settings.keys())[0] + +def get_modifier_info( + refs: dict[str, ProviderCollection], + modifier: Modifier, + location: ModifierLocation) -> ((TargetLabel, ModifierInfo) | None): + # Gets a modifier info from a modifier based on providers from `refs`. + if modifier == None: + return None + if is_modifiers_match(modifier): + default = None + modifiers_match_info = [] + constraint_settings = {} # Used like a set + for key, sub_modifier in modifier.items(): + if key == "DEFAULT": + if sub_modifier: + default_constraint_setting, default = get_modifier_info(refs, sub_modifier, location) + constraint_settings[default_constraint_setting] = None + else: + default = None + elif key != "_type": + cfg_info = refs[key][ConfigurationInfo] + if sub_modifier: + sub_constraint_setting, sub_modifier_info = get_modifier_info(refs, sub_modifier, location) + constraint_settings[sub_constraint_setting] = None + else: + sub_modifier_info = None + modifiers_match_info.append((cfg_info, sub_modifier_info)) + + constraint_setting = get_constraint_setting(constraint_settings, modifier, location) + + return constraint_setting, ModifiersMatchInfo( + default = default, + selector = modifiers_match_info, + ) + if isinstance(modifier, str): + modifier_info = refs[modifier] + if ConditionalModifierInfo in modifier_info: + conditional_modifier_info = modifier_info[ConditionalModifierInfo] + return conditional_modifier_info.key, conditional_modifier_info.inner + cfg_info = modifier_info[ConfigurationInfo] + asserts.true(len(cfg_info.constraints) == 1, "Modifier should only be a single constraint value. Found multiple in `{}`".format(modifier)) + constraint_value_info = list(cfg_info.constraints.values())[0] + return constraint_value_info.setting.label, constraint_value_info + fail("Internal error: Found unexpected modifier `{}` type `{}`".format(modifier, type(modifier))) + +def _is_subset(a: ConfigurationInfo, b: ConfigurationInfo) -> bool: + for (constraint_setting, a_constraint_value) in a.constraints.items(): + b_constraint_value = b.constraints.get(constraint_setting) + if a_constraint_value != b_constraint_value: + return False + return True + +def resolve_modifier(cfg: ConfigurationInfo, modifier: ModifierInfo) -> ConstraintValueInfo | None: + # Resolve the modifier and return the constraint value to add to the configuration, if there is one + if modifier == None: + return None + if isinstance(modifier, ModifiersMatchInfo): + for key, sub_modifier in modifier.selector: + if _is_subset(key, cfg): + # If constraints in key of the select are a subset of the constraints in the + # current configuration, then it's a match. + return resolve_modifier(cfg, sub_modifier) + if modifier.default: + return resolve_modifier(cfg, modifier.default) + return None + if isinstance(modifier, ConstraintValueInfo): + return modifier + fail("Internal error: Found unexpected modifier `{}` type `{}`".format(modifier, type(modifier))) + +def modifier_to_refs(modifier: Modifier, location: ModifierLocation) -> list[str]: + # Obtain a list of targets to analyze from a modifier. + refs = [] + if modifier == None: + pass + elif is_modifiers_match(modifier): + for key, sub_modifier in modifier.items(): + if key != "_type": + if key != "DEFAULT": + refs.append(key) + refs.extend(modifier_to_refs(sub_modifier, location)) + elif isinstance(modifier, str): + refs.append(modifier) + else: + fail("Internal error: Found unexpected modifier `{}` type `{}`".format(modifier, type(modifier))) + return refs + +def tagged_modifiers_to_json(tagged_modifiers: TaggedModifiers) -> dict[str, typing.Any]: + return { + "location": _location_to_json(tagged_modifiers.location), + "modifiers": tagged_modifiers.modifiers, + "rule_name": tagged_modifiers.rule_name, + "_type": "TaggedModifiers", + } + +def _location_to_json(location: ModifierLocation) -> dict[str, str]: + if isinstance(location, ModifierPackageLocation): + return {"package_path": location.package_path, "_type": "ModifierPackageLocation"} + if isinstance(location, ModifierTargetLocation): + return {"_type": "ModifierTargetLocation"} + fail("Internal error: unknown location `{}` with type `{}`".format(location, type(location))) + +def json_to_tagged_modifiers(j: dict[str, typing.Any]) -> TaggedModifiers: + if j["_type"] != "TaggedModifiers": + fail("Internal error: `{}` is not a `TaggedModifiers`".format(j)) + return TaggedModifiers( + location = _json_to_location(j["location"]), + modifiers = j["modifiers"], + rule_name = j["rule_name"], + ) + +def _json_to_location(j: dict[str, str]) -> ModifierLocation: + modifier_type = j.pop("_type") + if modifier_type == "ModifierPackageLocation": + return ModifierPackageLocation(package_path = j["package_path"]) + if modifier_type == "ModifierTargetLocation": + return ModifierTargetLocation() + fail("Internal error: cannot deserialize location `{}`".format(j)) + +def resolve_alias(modifier: Modifier, aliases: struct) -> list[Modifier]: + if isinstance(modifier, ModifiersMatchInfo): + fail("It should not be possible to specify a conditional modifier from command line") + if ":" in modifier: + # This is a target and not an alias + return [modifier] + resolved = getattr(aliases, modifier, None) + if resolved: + return resolved if isinstance(resolved, list) else [resolved] + fail("Found invalid modifier alias `{}`. A list of valid modifier aliases is in buck2/cfg/experimental/alias.bzl".format(modifier)) + +def _get_constraint_setting_deps( + modifier_info: ModifierInfo) -> list[TargetLabel]: + deps = [] + if isinstance(modifier_info, ModifiersMatchInfo): + for key, sub_modifier in modifier_info.selector: + for constraint_setting in key.constraints: + deps.append(constraint_setting) + deps += _get_constraint_setting_deps(sub_modifier) + if modifier_info.default: + deps += _get_constraint_setting_deps(modifier_info.default) + return deps + +def get_constraint_setting_deps( + modifier_info: ModifierInfo) -> list[TargetLabel]: + # Get all constraint settings depended on by a modifier (from keys of `modifier_select`). The modifiers + # for these constraint settings must be resolved before this modifier can be resolved. + return dedupe(_get_constraint_setting_deps(modifier_info)) diff --git a/prelude/cfg/modifier/name.bzl b/prelude/cfg/modifier/name.bzl new file mode 100644 index 0000000000000..79c1fb59ae85d --- /dev/null +++ b/prelude/cfg/modifier/name.bzl @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# This is an ordered dictionary of constraint setting target to an optional transform. +# The constraint settings listed below are used to name the configuration, where the first +# constraint settings are named first in the configuration. The transform, if specified, +# can change how the name appears in the configuration. By default, if there is no transform, +# we just use name of the constraint value in the configuration name. If transform is +# specified, the transform will be applied on the existing constraint value name to return a +# new string to be used in the configuration. +# @unsorted-dict-items +NAMED_CONSTRAINT_SETTINGS = { + # TODO(scottcao): Add OSS constraints as well + "ovr_config//os/constraints:os": None, + "ovr_config//cpu/constraints:cpu": None, + "ovr_config//runtime/constraints:runtime": None, + "ovr_config//runtime/constraints:runtime_version": None, + "ovr_config//os/sdk/apple/constraints:_": None, + "ovr_config//os/sdk/android/ndk/constraints:version": None, + "ovr_config//os/version/android/constraints:api-level": (lambda label: "api" + str(label.name).split("-")[-1]), + "ovr_config//toolchain/clang/constraints:clang-toolchain-version": (lambda label: "clang" + str(label.name)), + "ovr_config//build_mode/constraints:san": None, + "fbcode//fdo/constraints:fdo": (lambda label: str(label.name)), + "ovr_config//build_mode/default_opt_cxx:default_opt_cxx_setting": (lambda label: "opt-by-default" if str(label.name) == "enabled" else None), +} + +# Mark all modifier generated configurations with a `cfg:` prefix. +# We do this so that we can easily recognize which configuration is generated +# by modifiers and query for it in Scuba. +_CFG_PREFIX = "cfg:" +_EMPTY_CFG_NAME = _CFG_PREFIX + "" + +def cfg_name(cfg: ConfigurationInfo) -> str: + """Derives a reasonable name for a ConfigurationInfo""" + + name_list = [] + constraints = {str(key): value for key, value in cfg.constraints.items()} + for constraint_setting, transform in NAMED_CONSTRAINT_SETTINGS.items(): + if constraint_setting in constraints: + constraint_value_label = constraints[constraint_setting].label + if transform: + constraint_name = transform(constraint_value_label) + else: + constraint_name = str(constraint_value_label.name) + if constraint_name: + name_list.append(constraint_name) + if len(name_list) == 0: + name = _EMPTY_CFG_NAME + else: + name = _CFG_PREFIX + "-".join(name_list) + return name diff --git a/prelude/cfg/modifier/set_cfg_modifiers.bzl b/prelude/cfg/modifier/set_cfg_modifiers.bzl new file mode 100644 index 0000000000000..45063df1d1cca --- /dev/null +++ b/prelude/cfg/modifier/set_cfg_modifiers.bzl @@ -0,0 +1,79 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:prelude.bzl", "native") +load(":common.bzl", "MODIFIER_METADATA_KEY", "get_tagged_modifiers", "tagged_modifiers_to_json") +load(":types.bzl", "Modifier", "ModifierPackageLocation") # @unused Used in type annotation + +def set_cfg_modifiers( + cfg_modifiers: list[Modifier] | None = None, + extra_cfg_modifiers_per_rule: dict[str, list[Modifier]] | None = None): + """ + Sets a configuration modifier for all targets under this PACKAGE file. This can only be called from a PACKAGE file context + (e.g. a PACKAGE file or a bzl file transitively loaded by a PACKAGE file). + + Args: + cfg_modifiers: + A list of modifiers to set. The simplest modifier is a constraint value target. + For example, to change the OS to linux in fbsource, this can be specified as `["ovr_config//os/constraints:linux"]`. + extra_cfg_modifiers_per_rule: + A dictionary of rule name to a list of modifiers to set. This is applied on top of modifiers from `cfg_modifiers` parameter + if a target's rule name matches the key, so it can override any modifier from `cfg_modifiers` parameter in the same PACKAGE. + For example, if this dictionary is `{"python_binary": ["ovr_config//os/constraints:macos"]}`, + then all python_binary targets covered will have the macos constraint added to their configurations. + """ + + # Make this buck1-proof + call_stack_frame = getattr(native, "call_stack_frame", None) + + # To ensure that modifiers set in PACKAGE files are easily codemoddable + # We want to enforce that `set_cfg_modifiers` is only invokable from a PACKAGE file and not a bzl file + frame1 = call_stack_frame(1) + if not _is_buck_tree_file(frame1.module_path): + # Now check the old bzl file for `set_cfg_modifiers` in case it is invoked through that one. + frame2 = call_stack_frame(2) + if not (frame2 and frame1.module_path.endswith("fbcode/buck2/cfg/experimental/set_cfg_modifiers.bzl") and _is_buck_tree_file(frame2.module_path)): + fail("set_cfg_modifiers is only allowed to be used from a PACKAGE or BUCK_TREE file, not a bzl file.") + + cfg_modifiers = cfg_modifiers or [] + extra_cfg_modifiers_per_rule = extra_cfg_modifiers_per_rule or {} + + # Make this buck1-proof + write_package_value = getattr(native, "write_package_value", None) + read_parent_package_value = getattr(native, "read_parent_package_value", None) + + merged_modifier_jsons = read_parent_package_value(MODIFIER_METADATA_KEY) + + # `read_parent_package_value` returns immutable values. `list()` makes it mutable. + merged_modifier_jsons = list(merged_modifier_jsons) if merged_modifier_jsons else [] + + tagged_modifiers_list = get_tagged_modifiers( + cfg_modifiers, + extra_cfg_modifiers_per_rule, + ModifierPackageLocation(package_path = _get_package_path()), + ) + merged_modifier_jsons += [tagged_modifiers_to_json(tagged_modifiers) for tagged_modifiers in tagged_modifiers_list] + + write_package_value( + MODIFIER_METADATA_KEY, + merged_modifier_jsons, + overwrite = True, + ) + +def _get_package_path() -> str: + """ + Returns the cell-relative path of the current PACKAGE file. + Ex. `foo//bar/PACKAGE` + """ + + # Make this buck1-proof + get_cell_name = getattr(native, "get_cell_name", None) + get_base_path = getattr(native, "get_base_path", None) + return "{}//{}/PACKAGE".format(get_cell_name(), get_base_path()) + +def _is_buck_tree_file(path: str) -> bool: + return path.endswith(("/PACKAGE", "/BUCK_TREE")) or path in ("PACKAGE", "BUCK_TREE") diff --git a/prelude/cfg/modifier/types.bzl b/prelude/cfg/modifier/types.bzl new file mode 100644 index 0000000000000..f5b7e8a846eb2 --- /dev/null +++ b/prelude/cfg/modifier/types.bzl @@ -0,0 +1,64 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# TODO(scottcao): Annotate these types with comments once implementation is complete + +# Metadata types for where cfg modifier is defined. We need to track this to give users error messages +# that include where the bad modifier comes from. + +# Modifier defined in a PACKAGE file. We track path of that PACKAGE file. +ModifierPackageLocation = record(package_path = str) + +# Modifier defined on the target in buildfile. +ModifierTargetLocation = record() + +# Modifier specified via command line from the user +ModifierCliLocation = record() + +# This is a handy way of specifying a rust-style enum in Starlark. +ModifierLocation = ModifierPackageLocation | ModifierTargetLocation | ModifierCliLocation + +# Modifier types as how they appear to the user via `set_cfg_modifier` or `cfg_modifier` function. + +ModifiersMatch = dict[str, typing.Any] + +Modifier = str | ModifiersMatch | None + +TaggedModifiers = record( + modifiers = list[Modifier], + location = ModifierLocation, + rule_name = str | None, +) + +# Modifier types after analysis of configuration rules. +# There is an equivalent post-constraint-analysis type for every modifier type listed above. +# An "Info" is added to the type name to denote post-constraint-analysis version of the +# modifier type. + +ModifiersMatchInfo = record( + # should be list[(ConfigurationInfo, "ModifierInfo")] once recursive types are supported + selector = list[(ConfigurationInfo, typing.Any)], + default = typing.Any, # should be "ModifierInfo" | None with recursive types +) + +ModifierInfo = ConstraintValueInfo | ModifiersMatchInfo | None + +# A provider for conditional modifier used by cfg constructor function when constructing the +# configuration +ConditionalModifierInfo = provider(fields = { + "inner": ModifierInfo, + "key": TargetLabel, +}) + +def is_modifiers_match(modifier: Modifier) -> bool: + if modifier == None or isinstance(modifier, str): + return False + if isinstance(modifier, dict): + if modifier["_type"] != "ModifiersMatch": + fail("Found unknown dictionary `{}` for modifier".format(modifier)) + return True + fail("Modifier should either be None, a string, or dict. Found `{}`".format(modifier)) diff --git a/prelude/command_alias.bzl b/prelude/command_alias.bzl index 3b7a4db583b3e..b1ba3f70a124c 100644 --- a/prelude/command_alias.bzl +++ b/prelude/command_alias.bzl @@ -24,56 +24,55 @@ def _command_alias_impl_target_unix(ctx, exec_is_windows: bool): else: base = _get_run_info_from_exe(ctx.attrs.exe) - run_info_args = cmd_args() + trampoline_args = cmd_args() + trampoline_args.add("#!/usr/bin/env bash") + trampoline_args.add("set -euo pipefail") + trampoline_args.add('BUCK_COMMAND_ALIAS_ABSOLUTE=$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)') + + for (k, v) in ctx.attrs.env.items(): + # TODO(akozhevnikov): maybe check environment variable is not conflicting with pre-existing one + trampoline_args.add(cmd_args(["export ", k, "=", cmd_args(v, quote = "shell")], delimiter = "")) + + if len(ctx.attrs.platform_exe.items()) > 0: + trampoline_args.add('case "$(uname)" in') + for platform, exe in ctx.attrs.platform_exe.items(): + # Only linux and macos are supported. + if platform == "linux": + _add_platform_case_to_trampoline_args(trampoline_args, "Linux", _get_run_info_from_exe(exe), ctx.attrs.args) + elif platform == "macos": + _add_platform_case_to_trampoline_args(trampoline_args, "Darwin", _get_run_info_from_exe(exe), ctx.attrs.args) + + # Default case + _add_platform_case_to_trampoline_args(trampoline_args, "*", base, ctx.attrs.args) + trampoline_args.add("esac") + else: + _add_args_declaration_to_trampoline_args(trampoline_args, base, ctx.attrs.args) - if len(ctx.attrs.env) > 0 or len(ctx.attrs.platform_exe.items()) > 0: - trampoline_args = cmd_args() - trampoline_args.add("#!/usr/bin/env bash") - trampoline_args.add("set -euo pipefail") - trampoline_args.add('BUCK_COMMAND_ALIAS_ABSOLUTE=$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)') - - for (k, v) in ctx.attrs.env.items(): - # TODO(akozhevnikov): maybe check environment variable is not conflicting with pre-existing one - trampoline_args.add(cmd_args(["export ", k, "=", cmd_args(v, quote = "shell")], delimiter = "")) - - if len(ctx.attrs.platform_exe.items()) > 0: - trampoline_args.add('case "$(uname)" in') - for platform, exe in ctx.attrs.platform_exe.items(): - # Only linux and macos are supported. - if platform == "linux": - _add_platform_case_to_trampoline_args(trampoline_args, "Linux", _get_run_info_from_exe(exe), ctx.attrs.args) - elif platform == "macos": - _add_platform_case_to_trampoline_args(trampoline_args, "Darwin", _get_run_info_from_exe(exe), ctx.attrs.args) - - # Default case - _add_platform_case_to_trampoline_args(trampoline_args, "*", base, ctx.attrs.args) - trampoline_args.add("esac") - else: - _add_args_declaration_to_trampoline_args(trampoline_args, base, ctx.attrs.args) - - trampoline_args.add('exec "${ARGS[@]}"') - - trampoline = _relativize_path( - ctx, - trampoline_args, - "sh", - "$BUCK_COMMAND_ALIAS_ABSOLUTE", - exec_is_windows, - ) + trampoline_args.add('exec "${ARGS[@]}"') + + trampoline = _relativize_path( + ctx, + trampoline_args, + "sh", + "$BUCK_COMMAND_ALIAS_ABSOLUTE", + exec_is_windows, + ) - run_info_args.add(trampoline) - run_info_args.hidden([trampoline_args]) + run_info_args_args = [] + run_info_args_hidden = [] + if len(ctx.attrs.env) > 0 or len(ctx.attrs.platform_exe.items()) > 0: + run_info_args_args.append(trampoline) + run_info_args_hidden.append(trampoline_args) else: - run_info_args.add(base.args) - run_info_args.add(ctx.attrs.args) + run_info_args_args.append(base.args) + run_info_args_args.append(ctx.attrs.args) + + run_info_args_hidden.append(ctx.attrs.resources) - run_info_args.hidden(ctx.attrs.resources) + run_info_args = cmd_args(run_info_args_args, hidden = run_info_args_hidden) - # TODO(cjhopman): Consider what this should have for default outputs. Using - # the base's default outputs may not really be correct (it makes more sense to - # be the outputs required by the args). return [ - DefaultInfo(), + DefaultInfo(default_output = trampoline, other_outputs = [trampoline_args] + ctx.attrs.resources), RunInfo(args = run_info_args), ] @@ -87,50 +86,58 @@ def _command_alias_impl_target_windows(ctx, exec_is_windows: bool): else: base = RunInfo() - run_info_args = cmd_args() + trampoline_args = cmd_args() + trampoline_args.add("@echo off") + + if "close_stdin" in ctx.attrs.labels: + # Avoids waiting for input on the "Terminate batch job (Y/N)?" prompt. + # The prompt itself is unavoidable, but we can avoid having to wait for input. + # This will call the same trampoline batch file with stdin disabled + trampoline_args.add("if not defined STDIN_CLOSED (set STDIN_CLOSED=1 & CALL 0: - trampoline_args = cmd_args() - trampoline_args.add("@echo off") - - # Set BUCK_COMMAND_ALIAS_ABSOLUTE to the drive and full path of the script being created here - # We use this below to prefix any artifacts being referenced in the script - trampoline_args.add("set BUCK_COMMAND_ALIAS_ABSOLUTE=%~dp0") - - # Handle envs - for (k, v) in ctx.attrs.env.items(): - # TODO(akozhevnikov): maybe check environment variable is not conflicting with pre-existing one - trampoline_args.add(cmd_args(["set ", k, "=", v], delimiter = "")) - - # Handle args - # We shell quote the args but not the base. This is due to the same limitation detailed below with T111687922 - cmd = cmd_args([base.args], delimiter = " ") - for arg in ctx.attrs.args: - cmd.add(cmd_args(arg, quote = "shell")) - - # Add on %* to handle any other args passed through the command - cmd.add("%*") - trampoline_args.add(cmd) - - trampoline = _relativize_path( - ctx, - trampoline_args, - "bat", - "%BUCK_COMMAND_ALIAS_ABSOLUTE%", - exec_is_windows, - ) - run_info_args.add(trampoline) - run_info_args.hidden([trampoline_args]) + run_info_args_args.append(trampoline) + run_info_args_hidden.append(trampoline_args) else: - run_info_args.add(base.args) - run_info_args.add(ctx.attrs.args) + run_info_args_args.append(base.args) + run_info_args_args.append(ctx.attrs.args) - run_info_args.hidden(ctx.attrs.resources) + run_info_args_hidden.append(ctx.attrs.resources) + + run_info_args = cmd_args(run_info_args_args, hidden = run_info_args_hidden) - # TODO(cjhopman): Consider what this should have for default outputs. Using - # the base's default outputs may not really be correct (it makes more sense to - # be the outputs required by the args). return [ - DefaultInfo(), + DefaultInfo(default_output = trampoline, other_outputs = [trampoline_args] + ctx.attrs.resources), RunInfo(args = run_info_args), ] @@ -154,7 +161,11 @@ def _relativize_path_unix( trampoline_args: cmd_args) -> Artifact: # FIXME(ndmitchell): more straightforward relativization with better API non_materialized_reference = ctx.actions.write("dummy", "") - trampoline_args.relative_to(non_materialized_reference, parent = 1).absolute_prefix("__BUCK_COMMAND_ALIAS_ABSOLUTE__/") + trampoline_args = cmd_args( + trampoline_args, + relative_to = (non_materialized_reference, 1), + absolute_prefix = "__BUCK_COMMAND_ALIAS_ABSOLUTE__/", + ) trampoline_tmp, _ = ctx.actions.write("__command_alias_trampoline.{}.pre".format(extension), trampoline_args, allow_args = True) @@ -184,7 +195,11 @@ def _relativize_path_windows( trampoline_args: cmd_args) -> Artifact: # FIXME(ndmitchell): more straightforward relativization with better API non_materialized_reference = ctx.actions.write("dummy", "") - trampoline_args.relative_to(non_materialized_reference, parent = 1).absolute_prefix(var + "/") + trampoline_args = cmd_args( + trampoline_args, + relative_to = (non_materialized_reference, 1), + absolute_prefix = var + "/", + ) trampoline, _ = ctx.actions.write("__command_alias_trampoline.{}".format(extension), trampoline_args, allow_args = True) @@ -215,7 +230,10 @@ def _add_args_declaration_to_trampoline_args(trampoline_args: cmd_args, base: Ru trampoline_args.add(")") -def _get_run_info_from_exe(exe: Dependency) -> RunInfo: +def _get_run_info_from_exe(exe: Dependency | Artifact) -> RunInfo: + if isinstance(exe, Artifact): + return RunInfo(args = cmd_args(exe)) + run_info = exe.get(RunInfo) if run_info == None: run_info = RunInfo( diff --git a/prelude/configurations/rules.bzl b/prelude/configurations/rules.bzl index 323560833d9d9..9e80cd6719913 100644 --- a/prelude/configurations/rules.bzl +++ b/prelude/configurations/rules.bzl @@ -5,6 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//cfg/modifier:types.bzl", "ConditionalModifierInfo") load(":util.bzl", "util") # config_setting() accepts a list of constraint_values and a list of values @@ -38,9 +39,16 @@ def constraint_value_impl(ctx): DefaultInfo(), constraint_value, # Provide `ConfigurationInfo` from `constraint_value` so it could be used as select key. - ConfigurationInfo(constraints = { - constraint_value.setting.label: constraint_value, - }, values = {}), + ConfigurationInfo( + constraints = { + constraint_value.setting.label: constraint_value, + }, + values = {}, + ), + ConditionalModifierInfo( + inner = constraint_value, + key = constraint_value.setting.label, + ), ] # platform() declares a platform, it is a list of constraint values. @@ -67,6 +75,9 @@ def platform_impl(ctx): ), ] +def configuration_alias_impl(ctx: AnalysisContext) -> list[Provider]: + return ctx.attrs.actual.providers + # TODO(cjhopman): Update the attributes for these ruletypes to declare the types of providers that they expect in their references. extra_attributes = { "platform": { @@ -76,6 +87,7 @@ extra_attributes = { implemented_rules = { "config_setting": config_setting_impl, + "configuration_alias": configuration_alias_impl, "constraint_setting": constraint_setting_impl, "constraint_value": constraint_value_impl, "platform": platform_impl, diff --git a/prelude/cpu/BUCK b/prelude/cpu/BUCK deleted file mode 100644 index 30d019bfd98bc..0000000000000 --- a/prelude/cpu/BUCK +++ /dev/null @@ -1,39 +0,0 @@ -config_setting( - name = "x86_64", - constraint_values = [ - "prelude//cpu/constraints:x86_64", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "x86_32", - constraint_values = [ - "prelude//cpu/constraints:x86_32", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "arm64", - constraint_values = [ - "prelude//cpu/constraints:arm64", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "arm32", - constraint_values = [ - "prelude//cpu/constraints:arm32", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "wasm32", - constraint_values = [ - "prelude//cpu/constraints:wasm32", - ], - visibility = ["PUBLIC"], -) diff --git a/prelude/cpu/BUCK.v2 b/prelude/cpu/BUCK.v2 new file mode 100644 index 0000000000000..ec7c7b4ed3d63 --- /dev/null +++ b/prelude/cpu/BUCK.v2 @@ -0,0 +1,45 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +config_setting( + name = "x86_64", + constraint_values = [ + "prelude//cpu/constraints:x86_64", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "x86_32", + constraint_values = [ + "prelude//cpu/constraints:x86_32", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "arm64", + constraint_values = [ + "prelude//cpu/constraints:arm64", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "arm32", + constraint_values = [ + "prelude//cpu/constraints:arm32", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "wasm32", + constraint_values = [ + "prelude//cpu/constraints:wasm32", + ], + visibility = ["PUBLIC"], +) diff --git a/prelude/cpu/constraints/BUCK b/prelude/cpu/constraints/BUCK deleted file mode 100644 index 6a0e5dd00e1d9..0000000000000 --- a/prelude/cpu/constraints/BUCK +++ /dev/null @@ -1,38 +0,0 @@ -# Used by open source projects to support `prelude//` - -constraint_setting( - name = "cpu", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "x86_64", - constraint_setting = ":cpu", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "x86_32", - constraint_setting = ":cpu", - visibility = ["PUBLIC"], -) - -# Represents various flavors of ARM64, e.g., arm64_32 -constraint_value( - name = "arm64", - constraint_setting = ":cpu", - visibility = ["PUBLIC"], -) - -# Represents various flavors of ARM32, e.g., ARMv7k -constraint_value( - name = "arm32", - constraint_setting = ":cpu", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "wasm32", - constraint_setting = ":cpu", - visibility = ["PUBLIC"], -) diff --git a/prelude/cpu/constraints/BUCK.v2 b/prelude/cpu/constraints/BUCK.v2 new file mode 100644 index 0000000000000..1c7ba33a070c9 --- /dev/null +++ b/prelude/cpu/constraints/BUCK.v2 @@ -0,0 +1,50 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +# Used by open source projects to support `prelude//` + +constraint_setting( + name = "cpu", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "x86_64", + constraint_setting = ":cpu", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "x86_32", + constraint_setting = ":cpu", + visibility = ["PUBLIC"], +) + +# Represents various flavors of ARM64, e.g., arm64_32 +constraint_value( + name = "arm64", + constraint_setting = ":cpu", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "arm64_32", + constraint_setting = ":cpu", + visibility = ["PUBLIC"], +) + +# Represents various flavors of ARM32, e.g., ARMv7k +constraint_value( + name = "arm32", + constraint_setting = ":cpu", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "wasm32", + constraint_setting = ":cpu", + visibility = ["PUBLIC"], +) diff --git a/prelude/csharp/csharp.bzl b/prelude/csharp/csharp.bzl index da2d4c6bd78d3..53ddfb7c384ac 100644 --- a/prelude/csharp/csharp.bzl +++ b/prelude/csharp/csharp.bzl @@ -5,62 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load(":csharp_providers.bzl", "DllDepTSet", "DllReference", "DotNetLibraryInfo", "generate_target_tset_children") load(":toolchain.bzl", "CSharpToolchainInfo") -# Describes either a reference to a Buck .NET target or a .NET framework DLL. -DllReference = record( - # `str` -> Path to a .NET framework DLL on the local machine. - # `Artifact` -> Buck target dependency. - reference = field([Artifact, str]), -) - -def _args_for_dll_reference(dllref: DllReference) -> cmd_args: - """Projects values in a `DllDepTSet` to csc.exe /reference arguments.""" - return cmd_args(dllref.reference, format = "/reference:{}") - -# A transitive set of DLL references required to build a .NET library. -# -# The transitive set attribute `value` references the outputting assembly, and the children are a -# list of the dependencies required to build it. -DllDepTSet = transitive_set( - args_projections = { - # Projects "/reference:{}" arguments for `csc.exe`. - "reference": _args_for_dll_reference, - }, -) - -def generate_target_tset_children(deps: list[typing.Any], ctx: AnalysisContext) -> list[DllDepTSet]: - """Convert a target's dependencies list into an array of transitive dependencies.""" - - tset_children = [] - - if deps: - for dep in deps: - if isinstance(dep, str): - # Name of a .NET framework DLL (eg "System.Drawing.dll"). - tset_children.append( - ctx.actions.tset(DllDepTSet, value = DllReference(reference = dep)), - ) - else: - # Buck target dependency (eg "//buck/path/to:foobar"). - tset_children.append(dep.get(DotNetLibraryInfo).dll_deps) - - return tset_children - -DotNetLibraryInfo = provider( - doc = "Information about a .NET library and its dependencies", - fields = { - # A tset of DLLs (System or Buck targets) this library depends on. The - # `.value` is a reference to the outputting assembly artifact, and the - # children are the dependencies required to build it. - "dll_deps": provider_field(DllDepTSet), - # The output file name of the library. - "name": provider_field(str), - # The generated .dll artifact that will need to be linked into an .exe. - "object": provider_field(Artifact), - }, -) - def csharp_library_impl(ctx: AnalysisContext) -> list[Provider]: toolchain = ctx.attrs._csharp_toolchain[CSharpToolchainInfo] @@ -72,27 +19,44 @@ def csharp_library_impl(ctx: AnalysisContext) -> list[Provider]: library = ctx.actions.declare_output(dll_name) # Create a command invoking a wrapper script that calls csc.exe to compile the .dll. - cmd = cmd_args(toolchain.csc) + cmd = [toolchain.csc] # Add caller specified compiler flags. - cmd.add(ctx.attrs.compiler_flags) + cmd.append(ctx.attrs.compiler_flags) # Set the output target as a .NET library. - cmd.add("/target:library") - cmd.add(cmd_args( + cmd.append("/target:library") + cmd.append(cmd_args( library.as_output(), format = "/out:{}", )) + # Don't include any default .NET framework assemblies like "mscorlib" or "System" unless + # explicitly requested with `/reference:{}`. This flag also stops injection of other + # default compiler flags. + cmd.append("/noconfig") + + # Don't reference mscorlib.dll unless asked for. This is required for targets that target + # embedded platforms such as Silverlight or WASM. (Originally for Buck1 compatibility.) + cmd.append("/nostdlib") + + # Don't search any paths for .NET libraries unless explicitly referenced with `/lib:{}`. + cmd.append("/nosdkpath") + + # Let csc know the directory path where it can find system assemblies. This is the path + # that is searched by `/reference:{libname}` if `libname` is just a DLL name. + cmd.append(cmd_args(toolchain.framework_dirs[ctx.attrs.framework_ver], format = "/lib:{}")) + # Add a `/reference:{name}` argument for each dependency. + # Buck target refs should be absolute paths and system assemblies just the DLL name. child_deps = generate_target_tset_children(ctx.attrs.deps, ctx) deps_tset = ctx.actions.tset(DllDepTSet, children = child_deps) - cmd.add(deps_tset.project_as_args("reference")) + cmd.append(deps_tset.project_as_args("reference")) # Specify the C# source code files that should be compiled into this target. # NOTE: This must happen after /out and /target! - cmd.add(ctx.attrs.srcs) + cmd.append(ctx.attrs.srcs) # Run the C# compiler to produce the output artifact. ctx.actions.run(cmd, category = "csharp_compile") diff --git a/prelude/csharp/csharp_providers.bzl b/prelude/csharp/csharp_providers.bzl new file mode 100644 index 0000000000000..d824a2b6eba60 --- /dev/null +++ b/prelude/csharp/csharp_providers.bzl @@ -0,0 +1,62 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Stores a reference to a Buck .NET DLL target (`Artifact`) or the name of an assembly dll (`str`) +# that can be found in the .NET framework SDK directory +DllReference = record( + # `str` -> Path to a .NET framework DLL on the local machine. + # `Artifact` -> Buck target dependency. + reference = field([Artifact, str]), +) + +def _args_for_dll_reference(dllref: DllReference) -> cmd_args: + """Projects values in a `DllDepTSet` to csc.exe /reference:{dllname} arguments.""" + return cmd_args(dllref.reference, format = "/reference:{}") + +# A transitive set of DLL references required to build a .NET library. +# +# The transitive set attribute `value` references the outputting assembly, and the children are a +# list of the dependencies required to build it. +DllDepTSet = transitive_set( + args_projections = { + # Projects "/reference:{}" arguments for `csc.exe`. + "reference": _args_for_dll_reference, + }, +) + +def generate_target_tset_children(deps: list[typing.Any], ctx: AnalysisContext) -> list[DllDepTSet]: + """Convert a C# target's dependencies list into an array of transitive dependencies.""" + + tset_children = [] + + if deps: + for dep in deps: + if isinstance(dep, str): + # Name of a .NET framework DLL (eg "System.Drawing.dll"). + tset_children.append( + ctx.actions.tset(DllDepTSet, value = DllReference(reference = dep)), + ) + else: + # Buck target dependency (eg "//buck/path/to:foobar"). + # Adds all of the dependencies of the Buck target dependency to the tset. + tset_children.append(dep.get(DotNetLibraryInfo).dll_deps) + + return tset_children + +DotNetLibraryInfo = provider( + doc = "Information about a .NET library and its dependencies", + fields = { + # A tset of DLLs (System or Buck targets) this library depends on. The + # `.value` is a reference to the outputting assembly artifact, and the + # children are the dependencies required to build it. + "dll_deps": provider_field(DllDepTSet), + # The output file name of the library. + "name": provider_field(str), + # The generated .dll artifact that will need to be linked into an .exe. + "object": provider_field(Artifact), + }, +) diff --git a/prelude/csharp/toolchain.bzl b/prelude/csharp/toolchain.bzl index 1a6e071915beb..69100e7d7883d 100644 --- a/prelude/csharp/toolchain.bzl +++ b/prelude/csharp/toolchain.bzl @@ -7,4 +7,5 @@ CSharpToolchainInfo = provider(fields = [ "csc", + "framework_dirs", ]) diff --git a/prelude/cxx/anon_link.bzl b/prelude/cxx/anon_link.bzl index 354f4aba15a33..143f892eae174 100644 --- a/prelude/cxx/anon_link.bzl +++ b/prelude/cxx/anon_link.bzl @@ -10,7 +10,12 @@ load( "ArtifactInfo", "make_artifact_tset", ) -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", + "LinkerType", +) +load("@prelude//cxx:cxx_utility.bzl", "cxx_attrs_get_allow_cache_upload") load("@prelude//linking:execution_preference.bzl", "LinkExecutionPreference") load( "@prelude//linking:link_info.bzl", @@ -33,7 +38,7 @@ def _serialize_linkable(linkable): return ("archive", ( (linkable.archive.artifact, linkable.archive.external_objects), linkable.link_whole, - linkable.linker_type, + linkable.linker_type.value, linkable.supports_lto, )) @@ -41,7 +46,7 @@ def _serialize_linkable(linkable): return ("objects", ( linkable.objects, linkable.link_whole, - linkable.linker_type, + linkable.linker_type.value, )) if isinstance(linkable, SharedLibLinkable): @@ -106,7 +111,7 @@ def _deserialize_linkable(linkable: (str, typing.Any)) -> typing.Any: external_objects = external_objects, ), link_whole = link_whole, - linker_type = linker_type, + linker_type = LinkerType(linker_type), supports_lto = supports_lto, ) @@ -115,7 +120,7 @@ def _deserialize_linkable(linkable: (str, typing.Any)) -> typing.Any: return ObjectsLinkable( objects = objects, link_whole = link_whole, - linker_type = linker_type, + linker_type = LinkerType(linker_type), ) if typ == "shared": @@ -137,7 +142,7 @@ def _deserialize_link_info(actions: AnalysisActions, label: Label, info) -> Link external_debug_info = make_artifact_tset( actions = actions, infos = [ - ArtifactInfo(label = label, artifacts = artifacts) + ArtifactInfo(label = label, artifacts = artifacts, tags = []) for _label, artifacts in external_debug_info ], ), @@ -168,7 +173,7 @@ def deserialize_anon_attrs( category_suffix = attrs.category_suffix, identifier = attrs.identifier, enable_distributed_thinlto = attrs.enable_distributed_thinlto, - allow_cache_upload = attrs.allow_cache_upload, + allow_cache_upload = cxx_attrs_get_allow_cache_upload(attrs), ) result_type = CxxLinkResultType(attrs.result_type) @@ -206,7 +211,7 @@ ANON_ATTRS = { # ObjectsLinkable attrs.list(attrs.source()), # objects attrs.bool(), # link_whole - attrs.string(), # linker_type + attrs.enum(LinkerType.values()), # linker_type ), attrs.tuple( # ArchiveLinkable @@ -216,7 +221,7 @@ ANON_ATTRS = { attrs.list(attrs.source()), # external_objects ), attrs.bool(), # link_whole - attrs.string(), # linker_type + attrs.enum(LinkerType.values()), # linker_type attrs.bool(), # supports_lto ), attrs.tuple( diff --git a/prelude/cxx/archive.bzl b/prelude/cxx/archive.bzl index e594162957fdd..2f3bf2e5a3614 100644 --- a/prelude/cxx/archive.bzl +++ b/prelude/cxx/archive.bzl @@ -5,15 +5,17 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerInfo") +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerInfo", "LinkerType") load("@prelude//linking:link_info.bzl", "Archive") +load("@prelude//utils:argfile.bzl", "at_argfile") load("@prelude//utils:utils.bzl", "value_or") load(":cxx_context.bzl", "get_cxx_toolchain_info") def _archive_flags( archiver_type: str, - linker_type: str, + linker_type: LinkerType, use_archiver_flags: bool, + symbol_table: bool, thin: bool) -> list[str]: if not use_archiver_flags: return [] @@ -33,21 +35,27 @@ def _archive_flags( # Suppress warning about creating a new archive. flags += "c" - # Run ranlib to generate symbol index for faster linking. - flags += "s" + # Run ranlib to generate symbol index for faster linking if requested. + flags += "s" if symbol_table else "S" # Generate thin archives. if thin: flags += "T" # GNU archivers support generating deterministic archives. - if linker_type == "gnu": + if linker_type == LinkerType("gnu"): flags += "D" return [flags] # Create a static library from a list of object files. -def _archive(ctx: AnalysisContext, name: str, args: cmd_args, thin: bool, prefer_local: bool) -> Artifact: +def _archive( + ctx: AnalysisContext, + name: str, + args: cmd_args, + thin: bool, + prefer_local: bool, + allow_cache_upload: bool) -> Artifact: archive_output = ctx.actions.declare_output(name) toolchain = get_cxx_toolchain_info(ctx) command = cmd_args(toolchain.linker_info.archiver) @@ -56,6 +64,7 @@ def _archive(ctx: AnalysisContext, name: str, args: cmd_args, thin: bool, prefer archiver_type, toolchain.linker_info.type, toolchain.linker_info.use_archiver_flags, + toolchain.linker_info.archive_symbol_table, thin, )) if archiver_type == "windows" or archiver_type == "windows_clang": @@ -67,16 +76,35 @@ def _archive(ctx: AnalysisContext, name: str, args: cmd_args, thin: bool, prefer shell_quoted_args = cmd_args(args, quote = "shell") if toolchain.linker_info.use_archiver_flags and toolchain.linker_info.archiver_flags != None: shell_quoted_args.add(toolchain.linker_info.archiver_flags) - argfile, _ = ctx.actions.write(name + ".argsfile", shell_quoted_args, allow_args = True) - command.hidden([shell_quoted_args]) - command.add(cmd_args(["@", argfile], delimiter = "")) + + command.add(at_argfile( + actions = ctx.actions, + name = name + ".cxx_archive_argsfile", + args = shell_quoted_args, + allow_args = True, + )) else: command.add(args) + # By default, the archive header produced by `ar q` embeds the current unix + # timestamp. With the GNU archiver we use `ar qD` (above in _archive_flags) + # to make it produce a deterministic archive by zeroing the timestamp, but + # other archivers do not support such a flag. Some implementations, notably + # Xcode's, instead support zeroing the timestamp by way of an environment + # variable. + env = {"ZERO_AR_DATE": "1"} + category = "archive" if thin: category = "archive_thin" - ctx.actions.run(command, category = category, identifier = name, prefer_local = prefer_local) + ctx.actions.run( + command, + category = category, + identifier = name, + env = env, + prefer_local = prefer_local, + allow_cache_upload = allow_cache_upload, + ) return archive_output def _archive_locally(ctx: AnalysisContext, linker_info: LinkerInfo) -> bool: @@ -85,21 +113,30 @@ def _archive_locally(ctx: AnalysisContext, linker_info: LinkerInfo) -> bool: return value_or(ctx.attrs._archive_objects_locally_override, archive_locally) return archive_locally +def _archive_allow_cache_upload(ctx: AnalysisContext) -> bool: + return getattr(ctx.attrs, "archive_allow_cache_upload", False) + # Creates a static library given a list of object files. def make_archive( ctx: AnalysisContext, name: str, objects: list[Artifact], - args: [cmd_args, None] = None) -> Archive: + hidden: list[Artifact] = []) -> Archive: if len(objects) == 0: fail("no objects to archive") - if args == None: - args = cmd_args(objects) - linker_info = get_cxx_toolchain_info(ctx).linker_info thin = linker_info.archive_contents == "thin" - archive = _archive(ctx, name, args, thin = thin, prefer_local = _archive_locally(ctx, linker_info)) + object_args = cmd_args(objects, ignore_artifacts = not linker_info.archiver_reads_inputs) + args = cmd_args(object_args, hidden = hidden) + archive = _archive( + ctx, + name, + args, + thin = thin, + prefer_local = _archive_locally(ctx, linker_info), + allow_cache_upload = _archive_allow_cache_upload(ctx), + ) # TODO(T110378125): use argsfiles for GNU archiver for long lists of objects. # TODO(T110378123): for BSD archiver, split long args over multiple invocations. diff --git a/prelude/cxx/argsfiles.bzl b/prelude/cxx/argsfiles.bzl index 81dbdfeea78a3..b3f8627848253 100644 --- a/prelude/cxx/argsfiles.bzl +++ b/prelude/cxx/argsfiles.bzl @@ -6,7 +6,6 @@ # of this source tree. ARGSFILES_SUBTARGET = "argsfiles" -ABS_ARGSFILES_SUBTARGET = "abs-argsfiles" # Information on argsfiles created for compilation. CompileArgsfile = record( @@ -15,7 +14,7 @@ CompileArgsfile = record( # This argsfile as a command form that would use the argsfile (includes dependent inputs). cmd_form = field(cmd_args), # Input args necessary for the argsfile to reference. - input_args = field(list[["artifacts", cmd_args]]), + input_args = field(list[cmd_args]), # Args as written to the argsfile (with shell quoting applied). args = field(cmd_args), # Args aggregated for the argsfile excluding file prefix args (excludes shell quoting). @@ -25,19 +24,19 @@ CompileArgsfile = record( CompileArgsfiles = record( # Relative path argsfiles used for build actions, mapped by extension. relative = field(dict[str, CompileArgsfile], default = {}), - # Absolute path argsfiles used for extra outputs, mapped by extension. - absolute = field(dict[str, CompileArgsfile], default = {}), + # Argsfiles used for Xcode integration, mapped by extension. + xcode = field(dict[str, CompileArgsfile], default = {}), ) def get_argsfiles_output(ctx: AnalysisContext, argsfile_by_ext: dict[str, CompileArgsfile], summary_name: str) -> DefaultInfo: argsfiles = [] - argsfile_names = cmd_args() + argsfile_names = [] dependent_outputs = [] for _, argsfile in argsfile_by_ext.items(): argsfiles.append(argsfile.file) - argsfile_names.add(cmd_args(argsfile.file).ignore_artifacts()) + argsfile_names.append(cmd_args(argsfile.file, ignore_artifacts = True)) dependent_outputs.extend(argsfile.input_args) - argsfiles_summary = ctx.actions.write(summary_name, argsfile_names) + argsfiles_summary = ctx.actions.write(summary_name, cmd_args(argsfile_names)) return DefaultInfo(default_outputs = [argsfiles_summary] + argsfiles, other_outputs = dependent_outputs) diff --git a/prelude/cxx/attr_selection.bzl b/prelude/cxx/attr_selection.bzl index 020040d8d5245..cfd83383809ba 100644 --- a/prelude/cxx/attr_selection.bzl +++ b/prelude/cxx/attr_selection.bzl @@ -32,7 +32,7 @@ def cxx_by_language_ext(x: dict[typing.Any, typing.Any], ext: str) -> list[typin # And you can see them in java code, but somehow it works with # this one, which is seem across the repo. Find out what's happening. key_compiler = "c_cpp_output" - elif ext in (".cpp", ".cc", ".cxx", ".c++"): + elif ext in (".cpp", ".cc", ".cxx", ".c++", ".bc"): key_pp = "cxx" key_compiler = "cxx_cpp_output" elif ext == ".m": @@ -41,7 +41,7 @@ def cxx_by_language_ext(x: dict[typing.Any, typing.Any], ext: str) -> list[typin elif ext == ".mm": key_pp = "objcxx" key_compiler = "objcxx_cpp_output" - elif ext in (".s", ".S"): + elif ext in (".s", ".sx", ".S"): key_pp = "assembler_with_cpp" key_compiler = "assembler" elif ext == ".cu": diff --git a/prelude/cxx/bitcode.bzl b/prelude/cxx/bitcode.bzl index 782e8a30b33b5..ae95ea9d5c9a0 100644 --- a/prelude/cxx/bitcode.bzl +++ b/prelude/cxx/bitcode.bzl @@ -6,6 +6,7 @@ # of this source tree. load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerInfo") +load("@prelude//utils:argfile.bzl", "at_argfile") load("@prelude//utils:utils.bzl", "value_or") load(":cxx_context.bzl", "get_cxx_toolchain_info") @@ -35,14 +36,19 @@ def _bundle(ctx: AnalysisContext, name: str, args: cmd_args, prefer_local: bool) bundle_output = ctx.actions.declare_output(name) - argsfile, _ = ctx.actions.write(name + ".argsfile", args, allow_args = True) - - command = cmd_args(argsfile, format = "@{}", delimiter = "").hidden(args) - llvm_cmd = cmd_args(llvm_link) - llvm_cmd.add(command) - llvm_cmd.add("-v") - llvm_cmd.add("-o") - llvm_cmd.add(bundle_output.as_output()) + command = at_argfile( + actions = ctx.actions, + name = name + ".cxx_bitcode_argsfile", + args = args, + allow_args = True, + ) + llvm_cmd = cmd_args( + llvm_link, + command, + "-v", + "-o", + bundle_output.as_output(), + ) ctx.actions.run(llvm_cmd, category = "bitcode_bundle", identifier = name, prefer_local = prefer_local) return bundle_output @@ -70,7 +76,7 @@ def make_bitcode_bundle( if override and len(objects) > 1: args.add(objects[0]) overrides = cmd_args(objects[1:], format = "--override={}") - args.add(overrides).hidden(objects) + args.add(overrides) else: args.add(objects) diff --git a/prelude/cxx/comp_db.bzl b/prelude/cxx/comp_db.bzl index 616a1d814152c..c0d8151292915 100644 --- a/prelude/cxx/comp_db.bzl +++ b/prelude/cxx/comp_db.bzl @@ -7,6 +7,7 @@ load("@prelude//:paths.bzl", "paths") load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo", "CxxToolchainInfo") +load("@prelude//utils:argfile.bzl", "at_argfile") load( ":compile.bzl", "CxxSrcCompileCommand", # @unused Used as a type @@ -30,47 +31,46 @@ def make_compilation_db_info(src_compile_cmds: list[CxxSrcCompileCommand], toolc def create_compilation_database( ctx: AnalysisContext, src_compile_cmds: list[CxxSrcCompileCommand], - indentifier: str) -> DefaultInfo: - mk_comp_db = get_cxx_toolchain_info(ctx).mk_comp_db[RunInfo] + identifier: str) -> DefaultInfo: + mk_comp_db = get_cxx_toolchain_info(ctx).internal_tools.make_comp_db # Generate the per-source compilation DB entries. entries = {} other_outputs = [] for src_compile_cmd in src_compile_cmds: - cdb_path = paths.join(indentifier, "__comp_db__", src_compile_cmd.src.short_path + ".comp_db.json") + cdb_path = paths.join(identifier, "__comp_db__", src_compile_cmd.src.short_path + ".comp_db.json") if cdb_path not in entries: entry = ctx.actions.declare_output(cdb_path) - cmd = cmd_args(mk_comp_db) - cmd.add("gen") - cmd.add(cmd_args(entry.as_output(), format = "--output={}")) - cmd.add(src_compile_cmd.src.basename) - cmd.add(cmd_args(src_compile_cmd.src).parent()) - cmd.add("--") - cmd.add(src_compile_cmd.cxx_compile_cmd.base_compile_cmd) - cmd.add(src_compile_cmd.cxx_compile_cmd.argsfile.cmd_form) - cmd.add(src_compile_cmd.args) - entry_identifier = paths.join(indentifier, src_compile_cmd.src.short_path) + cmd = cmd_args( + mk_comp_db, + "gen", + cmd_args(entry.as_output(), format = "--output={}"), + src_compile_cmd.src.basename, + cmd_args(src_compile_cmd.src, parent = 1), + "--", + src_compile_cmd.cxx_compile_cmd.base_compile_cmd, + src_compile_cmd.cxx_compile_cmd.argsfile.cmd_form, + src_compile_cmd.args, + ) + entry_identifier = paths.join(identifier, src_compile_cmd.src.short_path) ctx.actions.run(cmd, category = "cxx_compilation_database", identifier = entry_identifier) # Add all inputs the command uses to runtime files. other_outputs.append(cmd) entries[cdb_path] = entry - content = cmd_args() - for v in entries.values(): - content.add(v) - - argfile = ctx.actions.declare_output(paths.join(indentifier, "comp_db.argsfile")) - ctx.actions.write(argfile.as_output(), content) - # Merge all entries into the actual compilation DB. - db = ctx.actions.declare_output(paths.join(indentifier, "compile_commands.json")) + db = ctx.actions.declare_output(paths.join(identifier, "compile_commands.json")) cmd = cmd_args(mk_comp_db) cmd.add("merge") cmd.add(cmd_args(db.as_output(), format = "--output={}")) - cmd.add(cmd_args(argfile, format = "@{}")) - cmd.hidden(entries.values()) - ctx.actions.run(cmd, category = "cxx_compilation_database_merge", identifier = indentifier) + cmd.add(at_argfile( + actions = ctx.actions, + name = identifier + ".cxx_comp_db_argsfile", + args = entries.values(), + )) + + ctx.actions.run(cmd, category = "cxx_compilation_database_merge", identifier = identifier) return DefaultInfo(default_output = db, other_outputs = other_outputs) diff --git a/prelude/cxx/compilation_database_labels.bzl b/prelude/cxx/compilation_database_labels.bzl new file mode 100644 index 0000000000000..ee064331e5ecf --- /dev/null +++ b/prelude/cxx/compilation_database_labels.bzl @@ -0,0 +1,72 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Handles labels used to provide compilation database information for filegroup() and genrule() targets. + +Our language services need to know how to compile files owned solely by filegroup() or genrule() targets like: +* Regular generated sources, that then end up being compiled by regular cxx_ or apple_ targets. +* Manually declared mixin files, that are always compiled by multiple other targets spread across the codebase. +* Files built by external build systems wrapped in genrules(), where compile_commands.json is produced by yet another genrule(). + +Prior approach for the former two cases was to run rdeps() queries to find a compilable target that would have a compile_commands.json entry for the file. +It suffered from reliability and performance issues, as the universe for rdeps() queries had to be quite broad and with no guarantee that there isn't even a single broken target within it. +And for external build system wrappers where there is no compilable target, we could define a rule that would effectively wrap two genrules and expose one of them as [compilation-database] subtarget, +but that wouldn't solve the problem with mixins which is still relevant with external build systems and would put us in the same suboptimal spot in terms of performance and reliability. + +As the IDE needs to operate in O(changes) instead of O(repo), and open files even if some other corner of the repo is broken. +We need to make things both reliable and performant in an ever-growing codebase with a CI that explicitly cannot guarantee that the entire repo is green, and where rdeps() queries are thus flaky and slow. + +And as the IDE needs to react to any local changes and act consistently with local checkout, we cannot simply use a remote cache for rdeps() queries that are slow and flaky. + +So the solution is instead to localize the required information within the target, and directly point to the build system rules that provide compile_commands.json for the target. +""" + +def compilation_database_rules(source_mapping: dict[str, list[str]] | list[str]) -> list[str]: + """ + Takes a mapping from sources to the rules to be used to build compilation databases for those sources. + + Tooling like IDEs needs to obtain compile commands for source files that are exported by filegroup() to be built as part of another target, or are built with an external build system wrapped in a genrule(). + Labels provide a convenient way to link the non-compileable target with a rule that produces a compilation database for its sources: + ``` + load("@prelude//cxx:compilation_database_labels.bzl", "compilation_database_rules") + + # The shorthand way for most cases: + export_file( + name = "gadget_to_be_compiled_as_part_of_another_target.cpp", + labels = compilation_database_rules([ + "//path/to/some/dependent:target", + "//path/to/another/dependent:target", + ]) + ) + + # A per-source mapping for cases when the generated files from one genrule() are compiled in different targets and never together: + genrule( + name = "multiple_gadgets_for_different_purposes", + labels = compilation_database_rules({ + "server_gen.cpp": ["//path/to/dependent/module:server"], + "client_gen.cpp": ["//path/to/dependent/module:client"], + }) + ) + ``` + The tooling can use a BXL script to check the target kind and extract the compilation database rule from its labels. And then iterate over the resulting compilation database and resolve the symlinks in 'file' entries in order to find the matching entry for the original source. + + :param dict[str,str]|list[str] source_mapping: A mapping with source file name regex as key and target as value. The target has to be either a target with [compilation-database] subtarget, or a genrule that produces compile_commands.json (for wrapping external build systems). + """ + if not isinstance(source_mapping, dict): + source_mapping = {".*": source_mapping} + return ["compilation_database_rules=" + json.encode(source_mapping)] + +def get_compilation_database_rules(labels = list[str]) -> dict[str, list[str]] | None: + """ + Retrieves and decodes compilation database targets from target labels, if any. + """ + for label in labels: + value = label.removeprefix("compilation_database_rules=") + if value != label: + return json.decode(value) + return None diff --git a/prelude/cxx/compile.bzl b/prelude/cxx/compile.bzl index ec6a94db8e6ad..d5889e4689ea2 100644 --- a/prelude/cxx/compile.bzl +++ b/prelude/cxx/compile.bzl @@ -7,6 +7,7 @@ load("@prelude//:paths.bzl", "paths") load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load("@prelude//cxx:cxx_utility.bzl", "cxx_attrs_get_allow_cache_upload") load("@prelude//linking:lto.bzl", "LtoMode") load( "@prelude//utils:utils.bzl", @@ -23,10 +24,13 @@ load( "get_pic_flags", ) load(":cxx_context.bzl", "get_cxx_toolchain_info") +load(":cxx_sources.bzl", "CxxSrcWithFlags") load(":cxx_toolchain_types.bzl", "CxxObjectFormat", "DepTrackingMode") +load(":cxx_types.bzl", "CxxRuleConstructorParams") load(":debug.bzl", "SplitDebugMode") load( ":headers.bzl", + "CHeader", "CPrecompiledHeaderInfo", ) load(":platform.bzl", "cxx_by_platform") @@ -34,11 +38,20 @@ load( ":preprocessor.bzl", "CPreprocessor", # @unused Used as a type "CPreprocessorInfo", # @unused Used as a type - "cxx_attr_preprocessor_flags", + "HeaderUnit", # @unused Used as a type "cxx_merge_cpreprocessors", "get_flags_for_compiler_type", ) +# Supported assembly extensions +AsmExtensions = enum( + ".s", + ".sx", + ".S", + ".asm", + ".asmpp", +) + # Supported Cxx file extensions CxxExtension = enum( ".cpp", @@ -46,19 +59,27 @@ CxxExtension = enum( ".cxx", ".c++", ".c", - ".s", - ".S", ".m", ".mm", ".cu", ".hip", - ".asm", - ".asmpp", ".h", ".hpp", ".hh", ".h++", ".hxx", + ".bc", + *AsmExtensions.values() +) + +# Header files included in compilation databases +HeaderExtension = enum( + ".h", + ".hpp", + ".hh", + ".h++", + ".hxx", + ".cuh", ) # File types for dep files @@ -89,10 +110,17 @@ _CxxCompileCommand = record( base_compile_cmd = field(cmd_args), # The argsfile of arguments from the rule and it's dependencies. argsfile = field(CompileArgsfile), + # The argsfile to use for Xcode integration. + xcode_argsfile = field(CompileArgsfile), + # The argsfile containing exported header units args (for precompilation). + header_units_argsfile = field(CompileArgsfile | None), + # The argsfile containing all header units args (for actual compilation). + private_header_units_argsfile = field(CompileArgsfile | None), headers_dep_files = field([_HeadersDepFiles, None]), compiler_type = field(str), # The action category category = field(str), + allow_cache_upload = field(bool), ) # Information about how to compile a source file. @@ -106,28 +134,37 @@ CxxSrcCompileCommand = record( cxx_compile_cmd = field(_CxxCompileCommand), # Arguments specific to the source file. args = field(list[typing.Any]), + # Is this a header file? + is_header = field(bool, False), + # The index store factory to use to generate index store for this source file. + index_store_factory = field(typing.Callable | None, None), + error_handler = field([typing.Callable, None], None), +) + +_CxxSrcPrecompileCommand = record( + # Source file to compile. + src = field(Artifact), + # The CxxCompileCommand to use to compile this file. + cxx_compile_cmd = field(_CxxCompileCommand), + # Arguments specific to the source file. + args = field(list[typing.Any]), + # Extra argsfile to include after any other header units argsfile but before the + # main argsfiles. + extra_argsfile = field([CompileArgsfile, None], None), ) # Output of creating compile commands for Cxx source files. CxxCompileCommandOutput = record( # List of compile commands for each source file. src_compile_cmds = field(list[CxxSrcCompileCommand], default = []), + # Base compile commands for each source file extension. + base_compile_cmds = field(dict[CxxExtension, _CxxCompileCommand], default = {}), # Argsfiles generated for compiling these source files. argsfiles = field(CompileArgsfiles, default = CompileArgsfiles()), # List of compile commands for use in compilation database generation. comp_db_compile_cmds = field(list[CxxSrcCompileCommand], default = []), ) -# An input to cxx compilation, consisting of a file to compile and optional -# file specific flags to compile with. -CxxSrcWithFlags = record( - file = field(Artifact), - flags = field(list[ResolvedStringWithMacros], []), - # If we have multiple source entries with same files but different flags, - # specify an index so we can differentiate them. Otherwise, use None. - index = field([int, None], None), -) - CxxCompileOutput = record( # The compiled `.o` file. object = field(Artifact), @@ -136,45 +173,169 @@ CxxCompileOutput = record( # Externally referenced debug info, which doesn't get linked with the # object (e.g. the above `.o` when using `-gsplit-dwarf=single` or the # the `.dwo` when using `-gsplit-dwarf=split`). - external_debug_info = field([Artifact, None], None), - clang_remarks = field([Artifact, None], None), - clang_trace = field([Artifact, None], None), + external_debug_info = field(Artifact | None, None), + clang_remarks = field(Artifact | None, None), + clang_trace = field(Artifact | None, None), + gcno_file = field(Artifact | None, None), + index_store = field(Artifact | None, None), + assembly = field(Artifact | None, None), + diagnostics = field(Artifact | None, None), + preproc = field(Artifact | None, None), ) +CxxCompileFlavor = enum( + # Default compilation witout alterations + "default", + # Produces position independent compile outputs + "pic", + # Produces position independent compile outputs + # using optimization flags from toolchain + "pic_optimized", +) + +_XCODE_ARG_SUBSTITUTION = [ + (regex("-filter-error=.+"), "-fcolor-diagnostics"), + (regex("-filter-ignore=.+"), "-fcolor-diagnostics"), + (regex("-filter-warning=.+"), "-fcolor-diagnostics"), + # @oss-disable: (regex("-fobjc-export-direct-methods"), "-fcolor-diagnostics"), + # @oss-disable: (regex("-fpika-runtime-checks"), "-fcolor-diagnostics"), +] + +def get_source_extension_for_header(header_extension: str, default: CxxExtension) -> CxxExtension: + """ + Which source file extension to use to get compiler flags for the header. + """ + if header_extension in (".hpp", ".hh", ".h++", ".hxx"): + return CxxExtension(".cpp") + elif header_extension == ".cuh": + return CxxExtension(".cu") + elif header_extension not in HeaderExtension.values(): + return CxxExtension(header_extension) # a file in `headers` has a source extension + else: + return default + +def get_source_extension(src: CxxSrcWithFlags, default_for_headers: CxxExtension) -> CxxExtension: + """ + Which source files extension to use for a source or a header file. We want + headers to appear as though they are source files. + """ + if src.is_header: + return get_source_extension_for_header(src.file.extension, default_for_headers) + else: + return CxxExtension(src.file.extension) + +def collect_extensions(srcs: list[CxxSrcWithFlags]) -> set[CxxExtension]: + """ + Collect extensions of source files while doing light normalization. + """ + + duplicates = { + ".c++": ".cpp", + ".cc": ".cpp", + ".cxx": ".cpp", + } + + extensions = set([CxxExtension(duplicates.get(src.file.extension, src.file.extension)) for src in srcs]) + return extensions + +def default_source_extension_for_plain_header(rule_type: str) -> CxxExtension: + """ + Returns default source file extension to use to get get compiler flags for plain .h headers. + """ + + # Default to (Objective-)C++ instead of plain (Objective-)C as it is more likely to be compatible with both. + return CxxExtension(".mm") if rule_type.startswith("apple_") else CxxExtension(".cpp") + +def detect_source_extension_for_plain_headers(exts: set[CxxExtension], rule_type: str) -> CxxExtension: + """ + For a given list source files determine which source file extension + to use to get compiler flags for plain .h headers. + """ + + # Assembly doesn't need any special handling as included files tend to have .asm extension themselves. + # And the presence of assembly in the target doesn't tell us anything about the language of .h files. + for asm_ext in AsmExtensions: + exts.discard(asm_ext) + + if len(exts) == 0: + return default_source_extension_for_plain_header(rule_type) + + if len(exts) == 1: + return exts.pop() + if CxxExtension(".hip") in exts: + return CxxExtension(".hip") + if CxxExtension(".cu") in exts: + return CxxExtension(".cu") + if CxxExtension(".mm") in exts: + return CxxExtension(".mm") + if CxxExtension(".cpp") in exts and CxxExtension(".m") in exts: + return CxxExtension(".mm") + if CxxExtension(".cpp") in exts: + return CxxExtension(".cpp") + if CxxExtension(".m") in exts: + return CxxExtension(".m") + return CxxExtension(".c") + +def collect_source_extensions( + srcs: list[CxxSrcWithFlags], + default_for_headers: CxxExtension) -> set[CxxExtension]: + """ + Return unique source extensions from a list of source and header files where + header extensions are mapped to corresponding source extensions. + """ + source_extensions = set([get_source_extension(src, default_for_headers) for src in srcs]) + return source_extensions + +def get_header_language_mode(source_extension: CxxExtension) -> str | None: + """ + Returns the header mode to use for plain .h headers based on the + source file extension used to obtain the compiler flags for them. + """ + + # Note: CUDA doesn't have its own header language mode, but the headers have distinct .cuh extension. + modes = { + CxxExtension(".cpp"): "c++-header", + CxxExtension(".m"): "objective-c-header", + CxxExtension(".mm"): "objective-c++-header", + } + return modes.get(source_extension) + def create_compile_cmds( ctx: AnalysisContext, - # TODO(nga): this is `CxxRuleConstructorParams`, - # but there's dependency cycle between `compile.bzl` (this file) - # and `cxx_types.bzl` (where `CxxRuleConstructorParams` is defined). - impl_params: typing.Any, + impl_params: CxxRuleConstructorParams, own_preprocessors: list[CPreprocessor], - inherited_preprocessor_infos: list[CPreprocessorInfo]) -> CxxCompileCommandOutput: + inherited_preprocessor_infos: list[CPreprocessorInfo], + add_coverage_instrumentation_compiler_flags: bool, + header_preprocessor_info: CPreprocessorInfo = CPreprocessorInfo()) -> CxxCompileCommandOutput: """ Forms the CxxSrcCompileCommand to use for each source file based on it's extension and optional source file flags. Returns CxxCompileCommandOutput containing an array of the generated compile commands and argsfile output. """ - srcs_with_flags = [] + srcs_extensions = collect_extensions(impl_params.srcs) + extension_for_plain_headers = detect_source_extension_for_plain_headers(srcs_extensions, impl_params.rule_type) + + srcs_with_flags = [] # type: [CxxSrcWithFlags] + for src in impl_params.srcs: srcs_with_flags.append(src) - header_only = False - if len(srcs_with_flags) == 0 and len(impl_params.additional.srcs) == 0: - all_headers = flatten([x.headers for x in own_preprocessors]) - if len(all_headers) == 0: - all_raw_headers = flatten([x.raw_headers for x in own_preprocessors]) - if len(all_raw_headers) != 0: - header_only = True - for header in all_raw_headers: - if header.extension in [".h", ".hpp"]: - srcs_with_flags.append(CxxSrcWithFlags(file = header)) - else: - return CxxCompileCommandOutput() - else: - header_only = True - for header in all_headers: - if header.artifact.extension in [".h", ".hpp", ".cpp"]: - srcs_with_flags.append(CxxSrcWithFlags(file = header.artifact)) + + # Some targets have .cpp files in their `headers` lists, see D46195628 + # todo: should this be prohibited or expanded to allow all source extensions? + artifact_extensions = HeaderExtension.values() + [".cpp"] + all_headers = flatten([x.headers for x in own_preprocessors]) + for header in all_headers: + if header.artifact.extension in artifact_extensions: + srcs_with_flags.append(CxxSrcWithFlags(file = header.artifact, is_header = True)) + + all_raw_headers = flatten([x.raw_headers for x in own_preprocessors]) + for header in all_raw_headers: + if header.extension in HeaderExtension.values(): + srcs_with_flags.append(CxxSrcWithFlags(file = header, is_header = True)) + + if len(srcs_with_flags) == 0: + return CxxCompileCommandOutput() # TODO(T110378129): Buck v1 validates *all* headers used by a compilation # at compile time, but that doing that here/eagerly might be expensive (but @@ -189,79 +350,324 @@ def create_compile_cmds( ) headers_tag = ctx.actions.artifact_tag() - abs_headers_tag = ctx.actions.artifact_tag() # This headers tag is just for convenience use in _mk_argsfile and is otherwise unused. src_compile_cmds = [] - cxx_compile_cmd_by_ext = {} - argsfile_by_ext = {} - abs_argsfile_by_ext = {} + hdr_compile_cmds = [] + cxx_compile_cmd_by_ext = {} # type: dict[CxxExtension, _CxxCompileCommand] + argsfile_by_ext = {} # type: dict[str, CompileArgsfile] + xcode_argsfile_by_ext = {} # type: dict[str, CompileArgsfile] + + src_extensions = collect_source_extensions(srcs_with_flags, extension_for_plain_headers) + + # Deduplicate shared arguments to save memory. If we compile multiple files + # of the same extension they will have some of the same flags. Save on + # allocations by caching and reusing these objects. + for ext in src_extensions: + cmd = _generate_base_compile_command(ctx, impl_params, pre, header_preprocessor_info, headers_tag, ext) + cxx_compile_cmd_by_ext[ext] = cmd + argsfile_by_ext[ext.value] = cmd.argsfile + xcode_argsfile_by_ext[ext.value] = cmd.xcode_argsfile + + # only specify error_handler if one exists + error_handler_args = {} + if impl_params.error_handler: + error_handler_args["error_handler"] = impl_params.error_handler for src in srcs_with_flags: - # If we have a header_only library we'll send the header files through this path, - # and want them to appear as though they are C++ files. - ext = CxxExtension(".cpp" if header_only else src.file.extension) - - # Deduplicate shared arguments to save memory. If we compile multiple files - # of the same extension they will have some of the same flags. Save on - # allocations by caching and reusing these objects. - if not ext in cxx_compile_cmd_by_ext: - toolchain = get_cxx_toolchain_info(ctx) - compiler_info = _get_compiler_info(toolchain, ext) - base_compile_cmd = _get_compile_base(compiler_info) - category = _get_category(ext) - - headers_dep_files = None - dep_file_file_type_hint = _dep_file_type(ext) - if dep_file_file_type_hint != None and toolchain.use_dep_files: - tracking_mode = _get_dep_tracking_mode(toolchain, dep_file_file_type_hint) - mk_dep_files_flags = get_headers_dep_files_flags_factory(tracking_mode) - if mk_dep_files_flags: - headers_dep_files = _HeadersDepFiles( - processor = cmd_args(compiler_info.dep_files_processor), - mk_flags = mk_dep_files_flags, - tag = headers_tag, - dep_tracking_mode = tracking_mode, - ) - - argsfile_by_ext[ext.value] = _mk_argsfile(ctx, compiler_info, pre, ext, headers_tag, False) - abs_argsfile_by_ext[ext.value] = _mk_argsfile(ctx, compiler_info, pre, ext, abs_headers_tag, True) - - cxx_compile_cmd_by_ext[ext] = _CxxCompileCommand( - base_compile_cmd = base_compile_cmd, - argsfile = argsfile_by_ext[ext.value], - headers_dep_files = headers_dep_files, - compiler_type = compiler_info.compiler_type, - category = category, - ) + src_args = [] + src_args.extend(src.flags) + ext = get_source_extension(src, extension_for_plain_headers) cxx_compile_cmd = cxx_compile_cmd_by_ext[ext] - src_args = [] - src_args.extend(src.flags) - src_args.extend(["-c", src.file]) + if add_coverage_instrumentation_compiler_flags and cxx_compile_cmd.compiler_type != "gcc": + src_args.extend(ctx.attrs.coverage_instrumentation_compiler_flags) - src_compile_command = CxxSrcCompileCommand(src = src.file, cxx_compile_cmd = cxx_compile_cmd, args = src_args, index = src.index) - src_compile_cmds.append(src_compile_command) + if src.is_header: + if cxx_compile_cmd.compiler_type in ["clang", "clang_windows", "gcc"]: + language_mode = get_header_language_mode(ext) + src_args.extend(["-x", language_mode] if language_mode else []) + elif cxx_compile_cmd.compiler_type in ["clang_cl", "windows", "windows_ml64"] and ext == CxxExtension(".cpp"): + src_args.append("/TP") + + if cxx_compile_cmd.compiler_type != "nasm": + src_args.append("-c") + src_args.append(src.file) + + src_compile_command = CxxSrcCompileCommand(src = src.file, cxx_compile_cmd = cxx_compile_cmd, args = src_args, index = src.index, is_header = src.is_header, index_store_factory = impl_params.index_store_factory, **error_handler_args) + if src.is_header: + hdr_compile_cmds.append(src_compile_command) + else: + src_compile_cmds.append(src_compile_command) argsfile_by_ext.update(impl_params.additional.argsfiles.relative) - abs_argsfile_by_ext.update(impl_params.additional.argsfiles.absolute) + xcode_argsfile_by_ext.update(impl_params.additional.argsfiles.xcode) + + return CxxCompileCommandOutput( + src_compile_cmds = src_compile_cmds, + base_compile_cmds = cxx_compile_cmd_by_ext, + argsfiles = CompileArgsfiles( + relative = argsfile_by_ext, + xcode = xcode_argsfile_by_ext, + ), + comp_db_compile_cmds = src_compile_cmds + hdr_compile_cmds, + ) + +def _compile_index_store(ctx: AnalysisContext, src_compile_cmd: CxxSrcCompileCommand, toolchain: CxxToolchainInfo, compile_cmd: cmd_args, pic: bool) -> Artifact | None: + if src_compile_cmd.index_store_factory: + return src_compile_cmd.index_store_factory(ctx, src_compile_cmd, toolchain, compile_cmd, pic) + return None + +def _compile_single_cxx( + ctx: AnalysisContext, + toolchain: CxxToolchainInfo, + default_object_format: CxxObjectFormat, + bitcode_args: cmd_args, + optimization_flags: list, + src_compile_cmd: CxxSrcCompileCommand, + pic: bool, + provide_syntax_only: bool, + use_header_units: bool) -> CxxCompileOutput: + """ + Construct a final compile command for a single CXX source based on + `src_compile_command` and other compilation options. + """ - if header_only: - return CxxCompileCommandOutput(comp_db_compile_cmds = src_compile_cmds) + short_path = src_compile_cmd.src.short_path + if src_compile_cmd.index != None: + # Add a unique postfix if we have duplicate source files with different flags + short_path = short_path + "_" + str(src_compile_cmd.index) + + filename_base = short_path + (".pic" if pic else "") + identifier = short_path + (" (pic)" if pic else "") + + if optimization_flags: + identifier += " (optimized) " + + filename_base = filename_base + (".optimized" if optimization_flags else "") + object = ctx.actions.declare_output( + "__objects__", + "{}.{}".format(filename_base, toolchain.linker_info.object_file_extension), + ) + + compiler_type = src_compile_cmd.cxx_compile_cmd.compiler_type + cmd = _get_base_compile_cmd( + bitcode_args = bitcode_args, + src_compile_cmd = src_compile_cmd, + pic = pic, + use_header_units = use_header_units, + output_args = cmd_args(get_output_flags(compiler_type, object)), + ) + cmd.add(cmd_args(optimization_flags)) + + action_dep_files = {} + + headers_dep_files = src_compile_cmd.cxx_compile_cmd.headers_dep_files + if headers_dep_files: + dep_file = ctx.actions.declare_output( + paths.join("__dep_files__", filename_base), + ).as_output() + + processor_flags, compiler_flags = headers_dep_files.mk_flags(ctx.actions, filename_base, src_compile_cmd.src) + cmd.add(compiler_flags) + + # API: First argument is the dep file source path, second is the + # dep file destination path, other arguments are the actual compile + # command. + cmd = cmd_args([ + headers_dep_files.processor, + headers_dep_files.dep_tracking_mode.value, + processor_flags, + headers_dep_files.tag.tag_artifacts(dep_file), + cmd, + ]) + + action_dep_files["headers"] = headers_dep_files.tag + + clang_remarks = None + if toolchain.clang_remarks and compiler_type == "clang": + cmd.add(["-fsave-optimization-record", "-fdiagnostics-show-hotness", "-foptimization-record-passes=" + toolchain.clang_remarks]) + clang_remarks = ctx.actions.declare_output( + paths.join("__objects__", "{}.opt.yaml".format(filename_base)), + ) + cmd.add(cmd_args(hidden = clang_remarks.as_output())) + + clang_trace = None + if toolchain.clang_trace and compiler_type == "clang": + cmd.add(["-ftime-trace"]) + clang_trace = ctx.actions.declare_output( + paths.join("__objects__", "{}.json".format(filename_base)), + ) + cmd.add(cmd_args(hidden = clang_trace.as_output())) + + gcno_file = None + if toolchain.gcno_files and src_compile_cmd.src.extension not in (".S", ".sx"): + cmd.add(["--coverage"]) + gcno_file = ctx.actions.declare_output( + paths.join("__objects__", "{}.gcno".format(filename_base)), + ) + cmd.add(cmd_args(hidden = gcno_file.as_output())) + + # only specify error_handler if one exists + error_handler_args = {} + if src_compile_cmd.error_handler: + error_handler_args["error_handler"] = src_compile_cmd.error_handler + + ctx.actions.run( + cmd, + category = src_compile_cmd.cxx_compile_cmd.category, + identifier = identifier, + dep_files = action_dep_files, + allow_cache_upload = src_compile_cmd.cxx_compile_cmd.allow_cache_upload, + allow_dep_file_cache_upload = False, + **error_handler_args + ) + + # If we're building with split debugging, where the debug info is in the + # original object, then add the object as external debug info + # FIXME: ThinLTO generates debug info in a separate dwo dir, but we still + # need to track object files if the object file is not compiled to bitcode. + # We should track whether ThinLTO is used on a per-object basis rather than + # globally on a toolchain level. + object_has_external_debug_info = ( + toolchain.split_debug_mode == SplitDebugMode("single") + ) + + # .S extension is native assembly code (machine level, processor specific) + # and clang will happily compile them to .o files, but the object are always + # native even if we ask for bitcode. If we don't mark the output format, + # other tools would try and parse the .o file as LLVM-IR and fail. + if src_compile_cmd.src.extension in [".S", ".s"]: + object_format = CxxObjectFormat("native") else: - return CxxCompileCommandOutput( - src_compile_cmds = src_compile_cmds, - argsfiles = CompileArgsfiles( - relative = argsfile_by_ext, - absolute = abs_argsfile_by_ext, - ), - comp_db_compile_cmds = src_compile_cmds, + object_format = default_object_format + + compile_index_store_cmd = _get_base_compile_cmd( + bitcode_args = bitcode_args, + src_compile_cmd = src_compile_cmd, + pic = pic, + ) + index_store = _compile_index_store(ctx, src_compile_cmd, toolchain, compile_index_store_cmd, pic) + + # Generate asm for compiler which accept `-S` (TODO: support others) + if compiler_type in ["clang", "gcc"]: + # Generate assembler or llvm bitcode output file + assembly_extension = "s" + if compiler_type == "clang" and object_format == CxxObjectFormat("bitcode"): + assembly_extension = "ll" + assembly = ctx.actions.declare_output( + "__assembly__", + "{}.{}".format(filename_base, assembly_extension), ) + assembly_cmd = _get_base_compile_cmd( + bitcode_args = bitcode_args, + src_compile_cmd = src_compile_cmd, + pic = pic, + output_args = cmd_args("-S", get_output_flags(compiler_type, assembly)), + ) + ctx.actions.run( + assembly_cmd, + category = src_compile_cmd.cxx_compile_cmd.category, + identifier = identifier + " (assembly)", + allow_cache_upload = src_compile_cmd.cxx_compile_cmd.allow_cache_upload, + allow_dep_file_cache_upload = False, + **error_handler_args + ) + else: + assembly = None + + if compiler_type == "clang" and provide_syntax_only: + diagnostics = ctx.actions.declare_output( + "__diagnostics__", + "{}.diag.txt".format(short_path), + ) + syntax_only_cmd = _get_base_compile_cmd( + bitcode_args = bitcode_args, + src_compile_cmd = src_compile_cmd, + pic = pic, + output_args = cmd_args("-fsyntax-only"), + ) + ctx.actions.run( + [ + toolchain.internal_tools.stderr_to_file, + cmd_args(diagnostics.as_output(), format = "--out={}"), + syntax_only_cmd, + ], + category = "check", + identifier = short_path, + allow_cache_upload = src_compile_cmd.cxx_compile_cmd.allow_cache_upload, + allow_dep_file_cache_upload = False, + **error_handler_args + ) + else: + diagnostics = None + + # Generate pre-processed sources + preproc = ctx.actions.declare_output( + "__preprocessed__", + "{}.{}".format(filename_base, "i"), + ) + preproc_cmd = _get_base_compile_cmd(bitcode_args, src_compile_cmd, pic, cmd_args("-E", "-dD", get_output_flags(compiler_type, preproc))) + ctx.actions.run( + preproc_cmd, + category = src_compile_cmd.cxx_compile_cmd.category, + identifier = identifier + " (preprocessor)", + allow_cache_upload = src_compile_cmd.cxx_compile_cmd.allow_cache_upload, + allow_dep_file_cache_upload = False, + **error_handler_args + ) + + return CxxCompileOutput( + object = object, + object_format = object_format, + object_has_external_debug_info = object_has_external_debug_info, + clang_remarks = clang_remarks, + clang_trace = clang_trace, + gcno_file = gcno_file, + index_store = index_store, + assembly = assembly, + diagnostics = diagnostics, + preproc = preproc, + ) + +def _get_base_compile_cmd( + bitcode_args: cmd_args, + src_compile_cmd: CxxSrcCompileCommand, + pic: bool, + output_args: cmd_args | None = None, + use_header_units: bool = False) -> cmd_args: + """ + Construct a shared compile command for a single CXX source based on + `src_compile_command` and other compilation options. + """ + cmd = cmd_args(src_compile_cmd.cxx_compile_cmd.base_compile_cmd) + if output_args: + cmd.add(output_args) + + compiler_type = src_compile_cmd.cxx_compile_cmd.compiler_type + + args = cmd_args() + + if pic: + args.add(get_pic_flags(compiler_type)) + + if use_header_units and src_compile_cmd.cxx_compile_cmd.private_header_units_argsfile: + args.add(src_compile_cmd.cxx_compile_cmd.private_header_units_argsfile.cmd_form) + + args.add(src_compile_cmd.cxx_compile_cmd.argsfile.cmd_form) + args.add(src_compile_cmd.args) + + cmd.add(args) + cmd.add(bitcode_args) + + return cmd def compile_cxx( ctx: AnalysisContext, src_compile_cmds: list[CxxSrcCompileCommand], - pic: bool = False) -> list[CxxCompileOutput]: + flavor: CxxCompileFlavor, + provide_syntax_only: bool, + use_header_units: bool = False) -> list[CxxCompileOutput]: """ For a given list of src_compile_cmds, generate output artifacts. """ @@ -287,110 +693,294 @@ def compile_cxx( objects = [] for src_compile_cmd in src_compile_cmds: - identifier = src_compile_cmd.src.short_path - if src_compile_cmd.index != None: - # Add a unique postfix if we have duplicate source files with different flags - identifier = identifier + "_" + str(src_compile_cmd.index) - - filename_base = identifier + (".pic" if pic else "") - object = ctx.actions.declare_output( - "__objects__", - "{}.{}".format(filename_base, linker_info.object_file_extension), + cxx_compile_output = _compile_single_cxx( + ctx = ctx, + toolchain = toolchain, + default_object_format = default_object_format, + bitcode_args = bitcode_args, + optimization_flags = toolchain.optimization_compiler_flags_EXPERIMENTAL if flavor == CxxCompileFlavor("pic_optimized") else [], + src_compile_cmd = src_compile_cmd, + pic = flavor != CxxCompileFlavor("default"), + provide_syntax_only = provide_syntax_only, + use_header_units = use_header_units, ) + objects.append(cxx_compile_output) - cmd = cmd_args(src_compile_cmd.cxx_compile_cmd.base_compile_cmd) - - compiler_type = src_compile_cmd.cxx_compile_cmd.compiler_type - cmd.add(get_output_flags(compiler_type, object)) - - args = cmd_args() - - if pic: - args.add(get_pic_flags(compiler_type)) - - args.add(src_compile_cmd.cxx_compile_cmd.argsfile.cmd_form) - args.add(src_compile_cmd.args) - - cmd.add(args) - cmd.add(bitcode_args) - - action_dep_files = {} - - headers_dep_files = src_compile_cmd.cxx_compile_cmd.headers_dep_files - if headers_dep_files: - dep_file = ctx.actions.declare_output( - paths.join("__dep_files__", filename_base), - ).as_output() - - processor_flags, compiler_flags = headers_dep_files.mk_flags(ctx.actions, filename_base, src_compile_cmd.src) - cmd.add(compiler_flags) - - # API: First argument is the dep file source path, second is the - # dep file destination path, other arguments are the actual compile - # command. - cmd = cmd_args([ - headers_dep_files.processor, - headers_dep_files.dep_tracking_mode.value, - processor_flags, - headers_dep_files.tag.tag_artifacts(dep_file), - cmd, - ]) - - action_dep_files["headers"] = headers_dep_files.tag + return objects - if pic: - identifier += " (pic)" +def _compiler_supports_header_units(compiler_info: typing.Any): + return (compiler_info.compiler_type == "clang" and + compiler_info.supports_two_phase_compilation) + +def _get_module_name(ctx: AnalysisContext, group_name: str) -> str: + return paths.normalize(paths.join( + "__header_units__", + ctx.label.package, + "{}{}.h".format(ctx.label.name, group_name), + )) + +def _get_import_filename(ctx: AnalysisContext, group_name: str) -> str: + return paths.normalize(paths.join( + ctx.label.package, + "__import__{}{}.h".format(ctx.label.name, group_name), + )) + +def _is_standalone_header(header: CHeader) -> bool: + if header.artifact.extension not in HeaderExtension.values(): + return False + if header.name.endswith("-inl.h"): + return False + if header.name.endswith(".tcc"): + return False + if header.name.endswith("-pre.h"): + return False + if header.name.endswith("-post.h"): + return False + return True + +def _convert_raw_header( + ctx: AnalysisContext, + raw_header: Artifact, + include_dirs: list[CellPath]) -> CHeader: + package_prefix = str(ctx.label.path) + ns = paths.dirname(raw_header.short_path) + for d in include_dirs: + abs_dir = str(d) + if paths.starts_with(abs_dir, package_prefix): + prefix = paths.relativize(abs_dir, package_prefix) + if paths.starts_with(ns, prefix): + ns = paths.relativize(ns, prefix) + break + return CHeader( + artifact = raw_header, + name = raw_header.basename, + namespace = ns, + named = False, + ) - clang_remarks = None - if toolchain.clang_remarks and compiler_type == "clang": - args.add(["-fsave-optimization-record", "-fdiagnostics-show-hotness", "-foptimization-record-passes=" + toolchain.clang_remarks]) - clang_remarks = ctx.actions.declare_output( - paths.join("__objects__", "{}.opt.yaml".format(filename_base)), - ) - cmd.hidden(clang_remarks.as_output()) +def _create_precompile_cmd( + ctx: AnalysisContext, + compiler_info: typing.Any, + preprocessors: list[CPreprocessor], + header_group: str | None, + group_name: str, + extra_preprocessors: list[CPreprocessor], + cmd: _CxxCompileCommand) -> _CxxSrcPrecompileCommand: + include_dirs = flatten([x.include_dirs for x in preprocessors]) + converted_headers = [ + _convert_raw_header(ctx, raw_header, include_dirs) + for raw_header in flatten([x.raw_headers for x in preprocessors]) + ] + headers = [ + header + for header in flatten([x.headers for x in preprocessors]) + converted_headers + if (_is_standalone_header(header) if header_group == None else regex_match(header_group, header.name)) + ] + + module_name = _get_module_name(ctx, group_name) + import_name = _get_import_filename(ctx, group_name) + input_header = ctx.actions.write(module_name, "") + + import_stub = ctx.actions.write( + import_name, + """ +#ifdef FACEBOOK_CPP_HEADER_UNIT +export +#endif +import \"{}\"; +""".format(module_name), + ) - clang_trace = None - if toolchain.clang_trace and compiler_type == "clang": - args.add(["-ftime-trace"]) - clang_trace = ctx.actions.declare_output( - paths.join("__objects__", "{}.json".format(filename_base)), - ) - cmd.hidden(clang_trace.as_output()) + modulemap_headers = [] + symlinked_files = {} + for header in headers: + path = paths.normalize(paths.join(header.namespace, header.name)) + symlinked_files[path] = import_stub + modulemap_headers.append(path) + + modulemap_content = """ +module "{}" {{ + header "{}" + export * +}} +""".format(module_name, module_name) + modulemap_file = ctx.actions.write("module.modulemap" + group_name, modulemap_content) + + src_dir = ctx.actions.symlinked_dir( + "header-unit" + group_name, + symlinked_files | { + module_name: input_header, + import_name: import_stub, + "module.modulemap": modulemap_file, + }, + ) - ctx.actions.run( - cmd, - category = src_compile_cmd.cxx_compile_cmd.category, - identifier = identifier, - dep_files = action_dep_files, + args = [] + args.extend([ + "-DFACEBOOK_CPP_HEADER_UNIT=1", + # TODO(nml): Fix warning bugs. + "-Wno-uninitialized", + "-Wno-conversion", + "-Wno-zero-as-null-pointer-constant", + "-Wno-c++98-compat-extra-semi", + ]) + + extra_argsfile = None + if extra_preprocessors: + extra_argsfile = _mk_header_units_argsfile( + ctx = ctx, + compiler_info = compiler_info, + preprocessor = cxx_merge_cpreprocessors(ctx, extra_preprocessors, []), + name = "export" + group_name, + ext = CxxExtension(".cpp"), ) - # If we're building with split debugging, where the debug info is in the - # original object, then add the object as external debug info, *unless* - # we're doing LTO, which generates debug info at link time (*except* for - # fat LTO, which still generates native code and, therefore, debug info). - object_has_external_debug_info = ( - toolchain.split_debug_mode == SplitDebugMode("single") and - linker_info.lto_mode in (LtoMode("none"), LtoMode("fat")) - ) + for header in headers: + args.extend(["-include", paths.join(header.namespace, header.name)]) + args.extend(["-xc++-user-header", "-fmodule-header"]) + args.extend(["-fmodule-name={}".format(module_name)]) + args.extend(["-Xclang", cmd_args(input_header, format = "-fmodules-embed-file={}")]) + args.extend(["--precompile", input_header]) + + return _CxxSrcPrecompileCommand( + src = src_dir, + cxx_compile_cmd = cmd, + args = args, + extra_argsfile = extra_argsfile, + ) - # .S extension is native assembly code (machine level, processor specific) - # and clang will happily compile them to .o files, but the object are always - # native even if we ask for bitcode. If we don't mark the output format, - # other tools would try and parse the .o file as LLVM-IR and fail. - if src_compile_cmd.src.extension in [".S", ".s"]: - object_format = CxxObjectFormat("native") - else: - object_format = default_object_format +def _precompile_single_cxx( + ctx: AnalysisContext, + impl_params: CxxRuleConstructorParams, + group_name: str, + src_compile_cmd: _CxxSrcPrecompileCommand) -> HeaderUnit: + identifier = src_compile_cmd.src.short_path + + filename = "{}.pcm".format(identifier) + module = ctx.actions.declare_output("__pcm_files__", filename) + + cmd = cmd_args(src_compile_cmd.cxx_compile_cmd.base_compile_cmd) + if src_compile_cmd.cxx_compile_cmd.header_units_argsfile: + cmd.add(src_compile_cmd.cxx_compile_cmd.header_units_argsfile.cmd_form) + if src_compile_cmd.extra_argsfile: + cmd.add(src_compile_cmd.extra_argsfile.cmd_form) + cmd.add(src_compile_cmd.cxx_compile_cmd.argsfile.cmd_form) + cmd.add(src_compile_cmd.args) + cmd.add(["-o", module.as_output()]) + + action_dep_files = {} + headers_dep_files = src_compile_cmd.cxx_compile_cmd.headers_dep_files + if headers_dep_files: + dep_file = ctx.actions.declare_output( + paths.join("__dep_files__", identifier), + ).as_output() + + processor_flags, compiler_flags = headers_dep_files.mk_flags( + ctx.actions, + identifier, + src_compile_cmd.src, + ) + cmd.add(compiler_flags) + + # API: First argument is the dep file source path, second is the + # dep file destination path, other arguments are the actual compile + # command. + cmd = cmd_args([ + headers_dep_files.processor, + headers_dep_files.dep_tracking_mode.value, + processor_flags, + headers_dep_files.tag.tag_artifacts(dep_file), + cmd, + ]) + action_dep_files["headers"] = headers_dep_files.tag + + ctx.actions.run( + cmd, + category = "cxx_precompile", + identifier = identifier, + dep_files = action_dep_files, + allow_cache_upload = src_compile_cmd.cxx_compile_cmd.allow_cache_upload, + allow_dep_file_cache_upload = False, + ) - objects.append(CxxCompileOutput( - object = object, - object_format = object_format, - object_has_external_debug_info = object_has_external_debug_info, - clang_remarks = clang_remarks, - clang_trace = clang_trace, - )) + return HeaderUnit( + name = _get_module_name(ctx, group_name), + module = module, + include_dir = src_compile_cmd.src, + import_include = _get_import_filename(ctx, group_name) if impl_params.export_header_unit == "preload" else None, + ) - return objects +def precompile_cxx( + ctx: AnalysisContext, + impl_params: CxxRuleConstructorParams, + preprocessors: list[CPreprocessor], + compile_cmd_output: CxxCompileCommandOutput) -> list[CPreprocessor]: + """ + Produces header units for the target and returns a list of preprocessors enabling + them; depending on those preprocessors will allow the corresponding module to load. + """ + toolchain = get_cxx_toolchain_info(ctx) + if not _compiler_supports_header_units(toolchain.cxx_compiler_info): + return [] + + ext = CxxExtension(".cpp") + if ext not in compile_cmd_output.base_compile_cmds: + return [] + cmd = compile_cmd_output.base_compile_cmds[ext] + + header_unit_preprocessors = [] + if len(impl_params.export_header_unit_filter) <= 1: + group = None + if impl_params.export_header_unit_filter: + group = impl_params.export_header_unit_filter[0] + precompile_cmd = _create_precompile_cmd( + ctx = ctx, + compiler_info = toolchain.cxx_compiler_info, + preprocessors = preprocessors, + header_group = group, + group_name = "", + extra_preprocessors = [], + cmd = cmd, + ) + header_unit = _precompile_single_cxx(ctx, impl_params, "", precompile_cmd) + header_unit_preprocessors.append(CPreprocessor(header_units = [header_unit])) + else: + # Chain preprocessors in order. + i = 0 + for header_group in impl_params.export_header_unit_filter: + name = ".{}".format(i) + precompile_cmd = _create_precompile_cmd( + ctx = ctx, + compiler_info = toolchain.cxx_compiler_info, + preprocessors = preprocessors, + header_group = header_group, + group_name = name, + extra_preprocessors = header_unit_preprocessors, + cmd = cmd, + ) + header_unit = _precompile_single_cxx(ctx, impl_params, name, precompile_cmd) + header_unit_preprocessors.append(CPreprocessor(header_units = [header_unit])) + i += 1 + + return header_unit_preprocessors + +def cxx_objects_sub_targets(outs: list[CxxCompileOutput]) -> dict[str, list[Provider]]: + objects_sub_targets = {} + for obj in outs: + sub_targets = {} + if obj.clang_trace: + sub_targets["clang-trace"] = [DefaultInfo(obj.clang_trace)] + if obj.clang_remarks: + sub_targets["clang-remarks"] = [DefaultInfo(obj.clang_remarks)] + if obj.assembly: + sub_targets["assembly"] = [DefaultInfo(obj.assembly)] + if obj.preproc: + sub_targets["preprocessed"] = [DefaultInfo(obj.preproc)] + objects_sub_targets[obj.object.short_path] = [DefaultInfo( + obj.object, + sub_targets = sub_targets, + )] + return objects_sub_targets def _validate_target_headers(ctx: AnalysisContext, preprocessor: list[CPreprocessor]): path_to_artifact = {} @@ -406,11 +996,11 @@ def _validate_target_headers(ctx: AnalysisContext, preprocessor: list[CPreproces def _get_compiler_info(toolchain: CxxToolchainInfo, ext: CxxExtension) -> typing.Any: compiler_info = None - if ext.value in (".cpp", ".cc", ".mm", ".cxx", ".c++", ".h", ".hpp", ".hh", ".h++", ".hxx"): + if ext.value in (".cpp", ".cc", ".mm", ".cxx", ".c++", ".h", ".hpp", ".hh", ".h++", ".hxx", ".bc"): compiler_info = toolchain.cxx_compiler_info elif ext.value in (".c", ".m"): compiler_info = toolchain.c_compiler_info - elif ext.value in (".s", ".S"): + elif ext.value in (".s", ".sx", ".S"): compiler_info = toolchain.as_compiler_info elif ext.value == ".cu": compiler_info = toolchain.cuda_compiler_info @@ -436,127 +1026,235 @@ def _get_category(ext: CxxExtension) -> str: return "objc_compile" if ext.value == ".mm": return "objcxx_compile" - elif ext.value in (".s", ".S", ".asm", ".asmpp"): + elif ext.value in (".s", ".sx", ".S", ".asm", ".asmpp"): return "asm_compile" elif ext.value == ".cu": return "cuda_compile" elif ext.value == ".hip": return "hip_compile" + elif ext.value == ".bc": + return "bitcode_compile" else: # This should be unreachable as long as we handle all enum values fail("Unknown extension: " + ext.value) -def _get_compile_base(compiler_info: typing.Any) -> cmd_args: +def _get_compile_base(toolchain: CxxToolchainInfo, compiler_info: typing.Any) -> cmd_args: """ Given a compiler info returned by _get_compiler_info, form the base compile args. """ - cmd = cmd_args(compiler_info.compiler) - - return cmd + if toolchain.remap_cwd and compiler_info.compiler_type in ["clang", "clang_windows", "clang_cl"]: + return cmd_args(toolchain.internal_tools.remap_cwd, compiler_info.compiler) + else: + return cmd_args(compiler_info.compiler) def _dep_file_type(ext: CxxExtension) -> [DepFileType, None]: # Raw assembly doesn't make sense to capture dep files for. + # .S is preprocessed assembly, but some builds use it with + # assemblers that don't support -MF, so leave depfiles off. if ext.value in (".s", ".S", ".asm"): return None elif ext.value == ".hip": # TODO (T118797886): HipCompilerInfo doesn't have dep files processor. # Should it? return None + elif ext.value == ".bc": + # Bitcode doesn't have depfiles + return None - # Return the file type aswell + # Return the file type as well if ext.value in (".cpp", ".cc", ".mm", ".cxx", ".c++", ".h", ".hpp", ".hh", ".h++", ".hxx"): return DepFileType("cpp") elif ext.value in (".c", ".m"): return DepFileType("c") elif ext.value == ".cu": return DepFileType("cuda") - elif ext.value in (".asmpp"): + elif ext.value in (".asmpp", ".sx"): return DepFileType("asm") else: # This should be unreachable as long as we handle all enum values fail("Unknown C++ extension: " + ext.value) -def _add_compiler_info_flags(ctx: AnalysisContext, compiler_info: typing.Any, ext: CxxExtension, cmd: cmd_args): - cmd.add(compiler_info.preprocessor_flags or []) - cmd.add(compiler_info.compiler_flags or []) - cmd.add(get_flags_for_reproducible_build(ctx, compiler_info.compiler_type)) +def _add_compiler_info_flags(ctx: AnalysisContext, compiler_info: typing.Any, ext: CxxExtension) -> list: + cmd = [] + cmd.append(compiler_info.preprocessor_flags or []) + cmd.append(compiler_info.compiler_flags or []) + cmd.append(get_flags_for_reproducible_build(ctx, compiler_info.compiler_type)) if ext.value not in (".asm", ".asmpp"): # Clang's asm compiler doesn't support colorful output, so we skip this there. - cmd.add(get_flags_for_colorful_output(compiler_info.compiler_type)) + cmd.append(get_flags_for_colorful_output(compiler_info.compiler_type)) + + return cmd def _mk_argsfile( ctx: AnalysisContext, + file_name: str, + args_list: list, + is_nasm: bool, + is_xcode_argsfile: bool) -> Artifact: + if is_xcode_argsfile: + replace_regex = [] + for re, sub in _XCODE_ARG_SUBSTITUTION: + replace_regex.append((re, sub)) + file_args = cmd_args(args_list, replace_regex = replace_regex) + else: + file_args = cmd_args(args_list) if is_nasm else cmd_args(args_list, quote = "shell") + argsfile, _ = ctx.actions.write(file_name, file_args, allow_args = True) + return argsfile + +def _mk_argsfiles( + ctx: AnalysisContext, + impl_params: CxxRuleConstructorParams, compiler_info: typing.Any, preprocessor: CPreprocessorInfo, ext: CxxExtension, headers_tag: ArtifactTag, - use_absolute_paths: bool) -> CompileArgsfile: + is_xcode_argsfile: bool) -> CompileArgsfile: """ Generate and return an {ext}.argsfile artifact and command args that utilize the argsfile. """ - args = cmd_args() + is_nasm = compiler_info.compiler_type == "nasm" + filename_prefix = "xcode_" if is_xcode_argsfile else "" - _add_compiler_info_flags(ctx, compiler_info, ext, args) + argsfiles = [] + args_list = [] - if use_absolute_paths: - args.add(preprocessor.set.project_as_args("abs_args")) - else: - args.add(headers_tag.tag_artifacts(preprocessor.set.project_as_args("args"))) + compiler_info_flags = _add_compiler_info_flags(ctx, compiler_info, ext) + compiler_info_filename = ext.value + ".{}toolchain_cxx_args".format(filename_prefix) + argsfiles.append(_mk_argsfile(ctx, compiler_info_filename, compiler_info_flags, is_nasm, is_xcode_argsfile)) + args_list.append(compiler_info_flags) + + deps_args = [] + deps_args.append(headers_tag.tag_artifacts(preprocessor.set.project_as_args("args"))) # Different preprocessors will contain whether to use modules, # and the modulemap to use, so we need to get the final outcome. if preprocessor.set.reduce("uses_modules"): - args.add(headers_tag.tag_artifacts(preprocessor.set.project_as_args("modular_args"))) + deps_args.append(headers_tag.tag_artifacts(preprocessor.set.project_as_args("modular_args"))) + + deps_argsfile_filename = ext.value + ".{}deps_cxx_args".format(filename_prefix) + argsfiles.append(_mk_argsfile(ctx, deps_argsfile_filename, deps_args, is_nasm, is_xcode_argsfile)) + args_list.extend(deps_args) - args.add(cxx_attr_preprocessor_flags(ctx, ext.value)) - args.add(get_flags_for_compiler_type(compiler_info.compiler_type)) - args.add(_attr_compiler_flags(ctx, ext.value)) - args.add(headers_tag.tag_artifacts(preprocessor.set.project_as_args("include_dirs"))) + target_args = [] + target_args.append(_preprocessor_flags(ctx, impl_params, ext.value)) + target_args.append(get_flags_for_compiler_type(compiler_info.compiler_type)) + target_args.append(_compiler_flags(ctx, impl_params, ext.value)) + target_args.append(headers_tag.tag_artifacts(preprocessor.set.project_as_args("include_dirs"))) # Workaround as that's not precompiled, but working just as prefix header. # Another thing is that it's clang specific, should be generalized. - if ctx.attrs.precompiled_header != None: - args.add(["-include", headers_tag.tag_artifacts(ctx.attrs.precompiled_header[CPrecompiledHeaderInfo].header)]) - if ctx.attrs.prefix_header != None: - args.add(["-include", headers_tag.tag_artifacts(ctx.attrs.prefix_header)]) + if hasattr(ctx.attrs, "precompiled_header") and ctx.attrs.precompiled_header != None: + target_args.append(["-include", headers_tag.tag_artifacts(ctx.attrs.precompiled_header[CPrecompiledHeaderInfo].header)]) + if hasattr(ctx.attrs, "prefix_header") and ctx.attrs.prefix_header != None: + target_args.append(["-include", headers_tag.tag_artifacts(ctx.attrs.prefix_header)]) + + target_argsfile_filename = ext.value + ".{}target_cxx_args".format(filename_prefix) + argsfiles.append(_mk_argsfile(ctx, target_argsfile_filename, target_args, is_nasm, is_xcode_argsfile)) + args_list.extend(target_args) # Create a copy of the args so that we can continue to modify it later. - args_without_file_prefix_args = cmd_args(args) + args_without_file_prefix_args = cmd_args(args_list) - # Put file_prefix_args in argsfile directly, make sure they do not appear when evaluating $(cxxppflags) + # Put file_prefix_args in argsfile, make sure they do not appear when evaluating $(cxxppflags) # to avoid "argument too long" errors - if use_absolute_paths: - args.add(cmd_args(preprocessor.set.project_as_args("abs_file_prefix_args"))) + file_prefix_args = headers_tag.tag_artifacts(cmd_args(preprocessor.set.project_as_args("file_prefix_args"))) + file_prefix_args_filename = ext.value + ".{}file_prefix_cxx_args".format(filename_prefix) + argsfiles.append(_mk_argsfile(ctx, file_prefix_args_filename, [file_prefix_args], is_nasm, is_xcode_argsfile)) + args_list.append(file_prefix_args) + + if is_xcode_argsfile: + replace_regex = [] + for re, sub in _XCODE_ARG_SUBSTITUTION: + replace_regex.append((re, sub)) + args = cmd_args(args_list, replace_regex = replace_regex) + file_args = cmd_args(argsfiles, format = "@{}") else: - args.add(headers_tag.tag_artifacts(cmd_args(preprocessor.set.project_as_args("file_prefix_args")))) + args = cmd_args(args_list) if is_nasm else cmd_args(args_list, quote = "shell") + file_args = cmd_args(argsfiles, format = "-@{}") if is_nasm else cmd_args(argsfiles, format = "@{}", quote = "shell") - shell_quoted_args = cmd_args(args, quote = "shell") + file_name = ext.value + ".{}cxx_compile_argsfile".format(filename_prefix) - file_name = ext.value + ("-abs.argsfile" if use_absolute_paths else ".argsfile") - argsfile, _ = ctx.actions.write(file_name, shell_quoted_args, allow_args = True, absolute = use_absolute_paths) + # For Xcode to parse argsfiles of argsfiles, the paths in the former must be absolute. + argsfile, _ = ctx.actions.write(file_name, file_args, allow_args = True, absolute = is_xcode_argsfile) - input_args = [args] + input_args = [args, file_args] - cmd_form = cmd_args(argsfile, format = "@{}").hidden(input_args) + format = "-@{}" if is_nasm else "@{}" + cmd_form = cmd_args(argsfile, format = format, hidden = input_args) return CompileArgsfile( file = argsfile, cmd_form = cmd_form, input_args = input_args, - args = shell_quoted_args, + args = args, args_without_file_prefix_args = args_without_file_prefix_args, ) -def _attr_compiler_flags(ctx: AnalysisContext, ext: str) -> list[typing.Any]: +def _compiler_flags(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, ext: str) -> list[typing.Any]: return ( - cxx_by_language_ext(ctx.attrs.lang_compiler_flags, ext) + - flatten(cxx_by_platform(ctx, ctx.attrs.platform_compiler_flags)) + - flatten(cxx_by_platform(ctx, cxx_by_language_ext(ctx.attrs.lang_platform_compiler_flags, ext))) + + cxx_by_language_ext(impl_params.lang_compiler_flags, ext) + + flatten(cxx_by_platform(ctx, impl_params.platform_compiler_flags)) + + flatten(cxx_by_platform(ctx, cxx_by_language_ext(impl_params.lang_platform_compiler_flags, ext))) + # ctx.attrs.compiler_flags need to come last to preserve buck1 ordering, this prevents compiler # flags ordering-dependent build errors - ctx.attrs.compiler_flags + impl_params.compiler_flags + ) + +def _preprocessor_flags(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, ext: str) -> list[typing.Any]: + return ( + impl_params.preprocessor_flags + + cxx_by_language_ext(impl_params.lang_preprocessor_flags, ext) + + flatten(cxx_by_platform(ctx, impl_params.platform_preprocessor_flags)) + + flatten(cxx_by_platform(ctx, cxx_by_language_ext(impl_params.lang_platform_preprocessor_flags, ext))) + ) + +def _mk_header_units_argsfile( + ctx: AnalysisContext, + compiler_info: typing.Any, + preprocessor: CPreprocessorInfo, + name: str, + ext: CxxExtension) -> CompileArgsfile | None: + """ + Generate and return an argsfile artifact containing all header unit options, and + command args that utilize the argsfile. + """ + if not preprocessor.set: + return None + if _get_category(ext) != "cxx_compile": + return None + if not _compiler_supports_header_units(compiler_info): + return None + + file_name = "{}.{}.header_units_args".format(ext.value, name) + args = cmd_args() + args.add([ + # TODO(nml): We only support Clang 17+, which don't need/want the extra -f + # arguments when compiling C++20. Clang 15 is too buggy to work properly, but if + # you wanted to try, you would need the below options at the very least, to get + # started: + # "-fmodules", + # "-fno-implicit-modules", + # "-fno-implicit-module-maps", + "-Wno-experimental-header-units", + "-Wno-ambiguous-macro", + ]) + + # TODO(nml): Tag args with headers_tag.tag_artifacts() once -MD -MF reports correct + # usage of PCMs. + args.add(preprocessor.set.project_as_args("header_units_args")) + input_args = [args] + file_args = cmd_args(args, quote = "shell") + argsfile, _ = ctx.actions.write(file_name, file_args, allow_args = True) + cmd_form = cmd_args(argsfile, format = "@{}", hidden = input_args) + + return CompileArgsfile( + file = argsfile, + cmd_form = cmd_form, + input_args = input_args, + args = file_args, + args_without_file_prefix_args = args, ) def _get_dep_tracking_mode(toolchain: Provider, file_type: DepFileType) -> DepTrackingMode: @@ -566,3 +1264,50 @@ def _get_dep_tracking_mode(toolchain: Provider, file_type: DepFileType) -> DepTr return toolchain.cuda_dep_tracking_mode else: return DepTrackingMode("makefile") + +def _generate_base_compile_command( + ctx: AnalysisContext, + impl_params: CxxRuleConstructorParams, + pre: CPreprocessorInfo, + header_pre: CPreprocessorInfo, + headers_tag: ArtifactTag, + ext: CxxExtension) -> _CxxCompileCommand: + """ + Generate a common part of a compile command that is shared by all sources + with a given extension. + """ + toolchain = get_cxx_toolchain_info(ctx) + compiler_info = _get_compiler_info(toolchain, ext) + base_compile_cmd = _get_compile_base(toolchain, compiler_info) + category = _get_category(ext) + + headers_dep_files = None + dep_file_file_type_hint = _dep_file_type(ext) + if dep_file_file_type_hint != None and toolchain.use_dep_files: + tracking_mode = _get_dep_tracking_mode(toolchain, dep_file_file_type_hint) + mk_dep_files_flags = get_headers_dep_files_flags_factory(tracking_mode) + if mk_dep_files_flags: + headers_dep_files = _HeadersDepFiles( + processor = cmd_args(toolchain.internal_tools.dep_file_processor), + mk_flags = mk_dep_files_flags, + tag = headers_tag, + dep_tracking_mode = tracking_mode, + ) + + argsfile = _mk_argsfiles(ctx, impl_params, compiler_info, pre, ext, headers_tag, False) + xcode_argsfile = _mk_argsfiles(ctx, impl_params, compiler_info, pre, ext, headers_tag, True) + header_units_argsfile = _mk_header_units_argsfile(ctx, compiler_info, header_pre, "public", ext) + private_header_units_argsfile = _mk_header_units_argsfile(ctx, compiler_info, pre, "private", ext) + + allow_cache_upload = cxx_attrs_get_allow_cache_upload(ctx.attrs, default = compiler_info.allow_cache_upload) + return _CxxCompileCommand( + base_compile_cmd = base_compile_cmd, + argsfile = argsfile, + xcode_argsfile = xcode_argsfile, + header_units_argsfile = header_units_argsfile, + private_header_units_argsfile = private_header_units_argsfile, + headers_dep_files = headers_dep_files, + compiler_type = compiler_info.compiler_type, + category = category, + allow_cache_upload = allow_cache_upload, + ) diff --git a/prelude/cxx/compiler.bzl b/prelude/cxx/compiler.bzl index 37967f4e2d6d4..cad51cf3a1fa1 100644 --- a/prelude/cxx/compiler.bzl +++ b/prelude/cxx/compiler.bzl @@ -58,7 +58,9 @@ def tree_style_cc_dep_files( _actions: AnalysisActions, _filename_base: str, input_file: Artifact) -> (cmd_args, cmd_args): - return (cmd_args(input_file), cmd_args(["-H"])) + # If we use color diagnostics, then error messages come through in color, which messes up parsing of the + # -H output in `show_headers_to_dep_file.py`. So make sure to pass -fno-color-diagnostics. + return (cmd_args(input_file), cmd_args(["-H", "-fno-color-diagnostics"])) def windows_cc_dep_files( _actions: AnalysisActions, diff --git a/prelude/cxx/cxx.bzl b/prelude/cxx/cxx.bzl index ab63aa9d77d38..c292ba83ba909 100644 --- a/prelude/cxx/cxx.bzl +++ b/prelude/cxx/cxx.bzl @@ -5,16 +5,18 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:paths.bzl", "paths") load( "@prelude//android:android_providers.bzl", "merge_android_packageable_info", ) load("@prelude//apple:resource_groups.bzl", "create_resource_graph") +load("@prelude//cxx:cxx_sources.bzl", "get_srcs_with_flags") +load("@prelude//cxx:cxx_utility.bzl", "cxx_attrs_get_allow_cache_upload") load( - "@prelude//apple:xcode.bzl", - "get_project_root_file", + "@prelude//cxx:link_groups_types.bzl", + "LinkGroupInfo", # @unused Used as a type ) -load("@prelude//cxx:cxx_sources.bzl", "get_srcs_with_flags") load("@prelude//linking:execution_preference.bzl", "LinkExecutionPreference") load( "@prelude//linking:link_groups.bzl", @@ -24,13 +26,13 @@ load( "@prelude//linking:link_info.bzl", "Archive", "ArchiveLinkable", + "CxxSanitizerRuntimeInfo", "LibOutputStyle", "LinkArgs", "LinkCommandDebugOutputInfo", "LinkInfo", "LinkInfos", "LinkStrategy", - "Linkage", "LinkedObject", "SharedLibLinkable", "create_merged_link_info", @@ -53,16 +55,31 @@ load( "@prelude//linking:linkables.bzl", "linkables", ) -load("@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", "create_shared_libraries", "merge_shared_libraries") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibraries", + "SharedLibraryInfo", + "create_shlib_from_ctx", + "extract_soname_from_shlib", + "merge_shared_libraries", + "to_soname", +) load("@prelude//linking:strip.bzl", "strip_debug_info") +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//os_lookup:defs.bzl", "OsLookup") +load("@prelude//python:manifest.bzl", "create_manifest_for_entries") load( "@prelude//tests:re_utils.bzl", - "get_re_executor_from_props", + "get_re_executors_from_props", +) +load( + "@prelude//third-party:build.bzl", + "create_third_party_build_info", ) +load("@prelude//unix:providers.bzl", "UnixEnv", "create_unix_env_info") +load("@prelude//utils:expect.bzl", "expect") load( "@prelude//utils:utils.bzl", - "expect", "filter_and_map_idx", "value_or", ) @@ -91,8 +108,8 @@ load( "CxxRuleSubTargetParams", ) load( - ":groups.bzl", - "Group", # @unused Used as a type + ":groups_types.bzl", + "Group", "MATCH_ALL_LABEL", "NO_MATCH_LABEL", ) @@ -107,7 +124,6 @@ load( ) load( ":link_groups.bzl", - "LinkGroupInfo", # @unused Used as a type "LinkGroupLibSpec", "get_link_group_info", ) @@ -176,7 +192,9 @@ def cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: sub_target_params, provider_params = _get_params_for_android_binary_cxx_library() else: sub_target_params = CxxRuleSubTargetParams() - provider_params = CxxRuleProviderParams() + provider_params = CxxRuleProviderParams( + third_party_build = True, + ) params = CxxRuleConstructorParams( rule_type = "cxx_library", @@ -185,6 +203,17 @@ def cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: output_style_sub_targets_and_providers_factory = _get_shared_link_style_sub_targets_and_providers, generate_sub_targets = sub_target_params, generate_providers = provider_params, + compiler_flags = ctx.attrs.compiler_flags, + lang_compiler_flags = ctx.attrs.lang_compiler_flags, + platform_compiler_flags = ctx.attrs.platform_compiler_flags, + lang_platform_compiler_flags = ctx.attrs.lang_platform_compiler_flags, + preprocessor_flags = ctx.attrs.preprocessor_flags, + lang_preprocessor_flags = ctx.attrs.lang_preprocessor_flags, + platform_preprocessor_flags = ctx.attrs.platform_preprocessor_flags, + lang_platform_preprocessor_flags = ctx.attrs.lang_platform_preprocessor_flags, + use_header_units = ctx.attrs.use_header_units, + export_header_unit = ctx.attrs.export_header_unit, + export_header_unit_filter = ctx.attrs.export_header_unit_filter, ) output = cxx_library_parameterized(ctx, params) return output.providers @@ -228,29 +257,82 @@ def cxx_binary_impl(ctx: AnalysisContext) -> list[Provider]: link_group_info = get_link_group_info(ctx, filter_and_map_idx(LinkableGraph, cxx_attr_deps(ctx))) params = CxxRuleConstructorParams( rule_type = "cxx_binary", + executable_name = ctx.attrs.executable_name, headers_layout = cxx_get_regular_cxx_headers_layout(ctx), srcs = get_srcs_with_flags(ctx), link_group_info = link_group_info, auto_link_group_specs = get_auto_link_group_specs(ctx, link_group_info), prefer_stripped_objects = ctx.attrs.prefer_stripped_objects, - exe_allow_cache_upload = ctx.attrs.allow_cache_upload, + exe_allow_cache_upload = cxx_attrs_get_allow_cache_upload(ctx.attrs), extra_link_roots = linkables(ctx.attrs.link_group_deps), + compiler_flags = ctx.attrs.compiler_flags, + lang_compiler_flags = ctx.attrs.lang_compiler_flags, + platform_compiler_flags = ctx.attrs.platform_compiler_flags, + lang_platform_compiler_flags = ctx.attrs.lang_platform_compiler_flags, + preprocessor_flags = ctx.attrs.preprocessor_flags, + lang_preprocessor_flags = ctx.attrs.lang_preprocessor_flags, + platform_preprocessor_flags = ctx.attrs.platform_preprocessor_flags, + lang_platform_preprocessor_flags = ctx.attrs.lang_platform_preprocessor_flags, + use_header_units = ctx.attrs.use_header_units, ) output = cxx_executable(ctx, params) extra_providers = [] if output.link_command_debug_output: extra_providers.append(LinkCommandDebugOutputInfo(debug_outputs = [output.link_command_debug_output])) + if output.sanitizer_runtime_files: + extra_providers.append(CxxSanitizerRuntimeInfo(runtime_files = output.sanitizer_runtime_files)) + + # Unix env provider. + extra_providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + binaries = [ + create_manifest_for_entries( + ctx = ctx, + name = "unix_env", + entries = [ + (ctx.label.name, output.binary, ""), + ], + ), + ], + ), + # TODO(agallagher): We only want to traverse deps when dynamically + # linking. + #deps = ctx.attrs.deps, + ), + ) + + # When an executable is the output of a build, also materialize all the + # unpacked external debuginfo that goes with it. This makes `buck2 build + # :main` equivalent to `buck2 build :main :main[debuginfo]`. + # + # This is wasted work if we are building an executable together with its dwp + # subtarget (`buck2 build :main :main[dwp]`) in which case a large number of + # unpacked debuginfo files can end up being materialized redundantly. LLDB + # will ignore them and obtain debuginfo via the single packed debuginfo file + # instead. + # + # But materializing unpacked debuginfo is the right tradeoff because it + # means the output of `buck2 build :main` is always immediately usable in a + # debugger. + # + # External debuginfo is *not* materialized when an executable is depended on + # by another rule, such as by $(exe ...) or exec_dep. + other_outputs = output.runtime_files + output.external_debug_info_artifacts return [ DefaultInfo( default_output = output.binary, - other_outputs = output.runtime_files, + other_outputs = other_outputs, sub_targets = output.sub_targets, ), - RunInfo(args = cmd_args(output.binary).hidden(output.runtime_files)), + RunInfo(args = cmd_args(output.binary, hidden = output.runtime_files)), output.compilation_db, output.xcode_data, + output.dist_info, ] + extra_providers def _prebuilt_item( @@ -334,31 +416,37 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: # Prepare the stripped static lib. static_lib_stripped = None - if static_lib != None: + if not ctx.attrs.prestripped and static_lib != None: static_lib_stripped = strip_debug_info(ctx, static_lib.short_path, static_lib) # Prepare the stripped static PIC lib. If the static PIC lib is the same # artifact as the static lib, then just re-use the stripped static lib. static_pic_lib_stripped = None - if static_lib == static_pic_lib: - static_pic_lib_stripped = static_lib_stripped - elif static_pic_lib != None: - static_pic_lib_stripped = strip_debug_info(ctx, static_pic_lib.short_path, static_pic_lib) + if not ctx.attrs.prestripped: + if static_lib == static_pic_lib: + static_pic_lib_stripped = static_lib_stripped + elif static_pic_lib != None: + static_pic_lib_stripped = strip_debug_info(ctx, static_pic_lib.short_path, static_pic_lib) if ctx.attrs.soname != None: soname = get_shared_library_name_for_param(linker_info, ctx.attrs.soname) + elif shared_lib != None and ctx.attrs.extract_soname: + soname = extract_soname_from_shlib( + actions = ctx.actions, + name = "__soname__.txt", + shared_lib = shared_lib, + ) else: soname = get_shared_library_name(linker_info, ctx.label.name, apply_default_prefix = True) + soname = to_soname(soname) # Use ctx.attrs.deps instead of cxx_attr_deps, since prebuilt rules don't have platform_deps. first_order_deps = ctx.attrs.deps exported_first_order_deps = cxx_attr_exported_deps(ctx) - project_root_file = get_project_root_file(ctx) - # Exported preprocessor info. inherited_pp_infos = cxx_inherited_preprocessor_infos(exported_first_order_deps) - generic_exported_pre = cxx_exported_preprocessor_info(ctx, cxx_get_regular_cxx_headers_layout(ctx), project_root_file, []) + generic_exported_pre = cxx_exported_preprocessor_info(ctx, cxx_get_regular_cxx_headers_layout(ctx), []) args = [] compiler_type = get_cxx_toolchain_info(ctx).cxx_compiler_info.compiler_type if header_dirs != None: @@ -366,12 +454,13 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: args.append(format_system_include_arg(cmd_args(x), compiler_type)) exported_items = [generic_exported_pre] if args: - exported_items.append(CPreprocessor(relative_args = CPreprocessorArgs(args = args))) - providers.append(cxx_merge_cpreprocessors( + exported_items.append(CPreprocessor(args = CPreprocessorArgs(args = args))) + propagated_preprocessor = cxx_merge_cpreprocessors( ctx, exported_items, inherited_pp_infos, - )) + ) + providers.append(propagated_preprocessor) inherited_link = cxx_inherited_link_info(first_order_deps) inherited_exported_link = cxx_inherited_link_info(exported_first_order_deps) @@ -381,7 +470,7 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: # Gather link infos, outputs, and shared libs for effective link style. outputs = {} libraries = {} - solibs = {} + solibs = [] sub_targets = {} for output_style in get_output_styles_for_linkage(preferred_linkage): out = None @@ -396,19 +485,20 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: archive = Archive(artifact = lib), linker_type = linker_type, link_whole = ctx.attrs.link_whole, + supports_lto = ctx.attrs.supports_lto, ) if output_style == LibOutputStyle("archive"): if static_lib: out = static_lib linkable = archive_linkable(static_lib) - linkable_stripped = archive_linkable(static_lib_stripped) + linkable_stripped = archive_linkable(static_lib_stripped) if static_lib_stripped else None elif output_style == LibOutputStyle("pic_archive"): lib = static_pic_lib or static_lib if lib: out = lib linkable = archive_linkable(lib) - linkable_stripped = archive_linkable(static_pic_lib_stripped or static_lib_stripped) + linkable_stripped = archive_linkable(static_pic_lib_stripped or static_lib_stripped) if (static_pic_lib_stripped or static_lib_stripped) else None else: # shared # If no shared library was provided, link one from the static libraries. if shared_lib != None: @@ -424,8 +514,8 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: shlink_args.extend(get_link_whole_args(linker_type, [lib])) link_result = cxx_link_shared_library( ctx = ctx, - output = soname, - name = soname, + output = soname.ensure_str(), + name = soname.ensure_str(), opts = link_options( links = [ LinkArgs(flags = shlink_args), @@ -466,12 +556,18 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: # Provided means something external to the build will provide # the libraries, so we don't need to propagate anything. if not ctx.attrs.provided: - solibs[soname] = shared_lib + solibs.append( + create_shlib_from_ctx( + ctx = ctx, + lib = shared_lib, + soname = soname, + ), + ) # Provide a sub-target that always provides the shared lib # using the soname. - if soname and shared_lib.output.basename != soname: - soname_lib = ctx.actions.copy_file(soname, shared_lib.output) + if soname and soname.is_str and shared_lib.output.basename != paths.basename(soname.ensure_str()): + soname_lib = ctx.actions.copy_file(soname.ensure_str(), shared_lib.output) else: soname_lib = shared_lib.output sub_targets["soname-lib"] = [DefaultInfo(default_output = soname_lib)] @@ -503,21 +599,8 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: default_output = outputs[output_style], )] - # Create the default output for the library rule given it's link style and preferred linkage cxx_toolchain = get_cxx_toolchain_info(ctx) pic_behavior = cxx_toolchain.pic_behavior - link_strategy = to_link_strategy(cxx_toolchain.linker_info.link_style) - actual_output_style = get_lib_output_style(link_strategy, preferred_linkage, pic_behavior) - output = outputs[actual_output_style] - providers.append(DefaultInfo( - default_output = output, - sub_targets = sub_targets, - )) - - # TODO(cjhopman): This is preserving existing behavior, but it doesn't make sense. These lists can be passed - # unmerged to create_merged_link_info below. Potentially that could change link order, so needs to be done more carefully. - merged_inherited_link = create_merged_link_info_for_propagation(ctx, inherited_link) - merged_inherited_exported_link = create_merged_link_info_for_propagation(ctx, inherited_exported_link) # Propagate link info provider. providers.append(create_merged_link_info( @@ -527,25 +610,66 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: libraries, preferred_linkage = preferred_linkage, # Export link info from non-exported deps (when necessary). - deps = [merged_inherited_link], + deps = inherited_link, # Export link info from out (exported) deps. - exported_deps = [merged_inherited_exported_link], + exported_deps = inherited_exported_link, )) # Propagate shared libraries up the tree. + shared_libs = SharedLibraries(libraries = solibs) providers.append(merge_shared_libraries( ctx.actions, - create_shared_libraries(ctx, solibs), + shared_libs, filter(None, [x.get(SharedLibraryInfo) for x in exported_first_order_deps]), )) + providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + native_libs = [shared_libs], + ), + deps = ctx.attrs.deps + ctx.attrs.exported_deps, + ), + ) + + # Third-party provider. + third_party_build_info = create_third_party_build_info( + ctx = ctx, + paths = [] if header_dirs == None else [(d.short_path, d) for d in header_dirs], + cxx_headers = [propagated_preprocessor], + shared_libs = shared_libs.libraries, + cxx_header_dirs = ["include"] + ([] if header_dirs == None else [d.short_path for d in header_dirs]), + deps = ctx.attrs.deps + cxx_attr_exported_deps(ctx), + ) + providers.append(third_party_build_info) + sub_targets["third-party-build"] = [ + DefaultInfo( + default_output = third_party_build_info.build.root.artifact, + sub_targets = dict( + manifest = [DefaultInfo(default_output = third_party_build_info.build.manifest)], + ), + ), + ] + + # Create the default output for the library rule given it's link style and preferred linkage + link_strategy = to_link_strategy(cxx_toolchain.linker_info.link_style) + actual_output_style = get_lib_output_style(link_strategy, preferred_linkage, pic_behavior) + output = outputs[actual_output_style] + providers.append(DefaultInfo( + default_output = output, + sub_targets = sub_targets, + )) + # Omnibus root provider. - if LibOutputStyle("pic_archive") in libraries and (static_pic_lib or static_lib) and not ctx.attrs.header_only: + if LibOutputStyle("pic_archive") in libraries and (static_pic_lib or static_lib) and not ctx.attrs.header_only and soname.is_str: # TODO(cjhopman): This doesn't support thin archives linkable_root = create_linkable_root( - name = soname, + label = ctx.label, + name = soname.ensure_str(), link_infos = LinkInfos(default = LinkInfo( - name = soname, + name = soname.ensure_str(), pre_flags = ( linker_flags.exported_flags + linker_flags.flags @@ -573,13 +697,14 @@ def prebuilt_cxx_library_impl(ctx: AnalysisContext) -> list[Provider]: ctx, linkable_node = create_linkable_node( ctx = ctx, - default_soname = soname, + default_soname = soname.as_str(), preferred_linkage = preferred_linkage, + default_link_strategy = to_link_strategy(cxx_toolchain.linker_info.link_style), exported_deps = exported_first_order_deps, # If we don't have link input for this link style, we pass in `None` so # that omnibus knows to avoid it. link_infos = libraries, - shared_libs = solibs, + shared_libs = shared_libs, linker_flags = linker_flags, can_be_asset = getattr(ctx.attrs, "can_be_asset", False) or False, ), @@ -631,13 +756,21 @@ def cxx_test_impl(ctx: AnalysisContext) -> list[Provider]: auto_link_group_specs = get_auto_link_group_specs(ctx, link_group_info), prefer_stripped_objects = ctx.attrs.prefer_stripped_objects, extra_link_roots = linkables(ctx.attrs.link_group_deps), + compiler_flags = ctx.attrs.compiler_flags, + lang_compiler_flags = ctx.attrs.lang_compiler_flags, + platform_compiler_flags = ctx.attrs.platform_compiler_flags, + lang_platform_compiler_flags = ctx.attrs.lang_platform_compiler_flags, + preprocessor_flags = ctx.attrs.preprocessor_flags, + lang_preprocessor_flags = ctx.attrs.lang_preprocessor_flags, + platform_preprocessor_flags = ctx.attrs.platform_preprocessor_flags, + lang_platform_preprocessor_flags = ctx.attrs.lang_platform_preprocessor_flags, ) output = cxx_executable(ctx, params, is_cxx_test = True) - command = [cmd_args(output.binary).hidden(output.runtime_files)] + ctx.attrs.args + command = [cmd_args(output.binary, hidden = output.runtime_files)] + ctx.attrs.args - # Setup a RE executor based on the `remote_execution` param. - re_executor = get_re_executor_from_props(ctx) + # Setup RE executors based on the `remote_execution` param. + re_executor, executor_overrides = get_re_executors_from_props(ctx) return inject_test_run_info( ctx, @@ -648,6 +781,7 @@ def cxx_test_impl(ctx: AnalysisContext) -> list[Provider]: labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, default_executor = re_executor, + executor_overrides = executor_overrides, # We implicitly make this test via the project root, instead of # the cell root (e.g. fbcode root). run_from_project_root = ( @@ -657,9 +791,14 @@ def cxx_test_impl(ctx: AnalysisContext) -> list[Provider]: use_project_relative_paths = re_executor != None, ), ) + [ - DefaultInfo(default_output = output.binary, other_outputs = output.runtime_files, sub_targets = output.sub_targets), + DefaultInfo( + default_output = output.binary, + other_outputs = output.runtime_files + output.external_debug_info_artifacts, + sub_targets = output.sub_targets, + ), output.compilation_db, output.xcode_data, + output.dist_info, ] def _get_params_for_android_binary_cxx_library() -> (CxxRuleSubTargetParams, CxxRuleProviderParams): @@ -677,6 +816,7 @@ def _get_params_for_android_binary_cxx_library() -> (CxxRuleSubTargetParams, Cxx compilation_database = False, omnibus_root = False, preprocessor_for_tests = False, + cxx_resources_as_apple_resources = False, ) return sub_target_params, provider_params diff --git a/prelude/cxx/cxx_bolt.bzl b/prelude/cxx/cxx_bolt.bzl index 271ab5e804aa3..b355f1ae3cc99 100644 --- a/prelude/cxx/cxx_bolt.bzl +++ b/prelude/cxx/cxx_bolt.bzl @@ -8,30 +8,37 @@ # BOLT (Binary Optimization Layout Tool) is a post link profile guided optimizer used for # performance-critical services in fbcode: https://www.internalfb.com/intern/wiki/HHVM-BOLT/ +load( + "@prelude//:artifact_tset.bzl", + "ArtifactTSet", + "project_artifacts", +) load(":cxx_context.bzl", "get_cxx_toolchain_info") def cxx_use_bolt(ctx: AnalysisContext) -> bool: cxx_toolchain_info = get_cxx_toolchain_info(ctx) return cxx_toolchain_info.bolt_enabled and ctx.attrs.bolt_profile != None -def bolt(ctx: AnalysisContext, prebolt_output: Artifact, identifier: [str, None]) -> Artifact: +def bolt(ctx: AnalysisContext, prebolt_output: Artifact, external_debug_info: ArtifactTSet, identifier: [str, None]) -> Artifact: output_name = prebolt_output.short_path.removesuffix("-wrapper") postbolt_output = ctx.actions.declare_output(output_name) bolt_msdk = get_cxx_toolchain_info(ctx).binary_utilities_info.bolt_msdk if not bolt_msdk or not cxx_use_bolt(ctx): fail("Cannot use bolt if bolt_msdk is not available or bolt profile is not available") - args = cmd_args() + + materialized_external_debug_info = project_artifacts(ctx.actions, [external_debug_info]) # bolt command format: # {llvm_bolt} {input_bin} -o $OUT -data={fdata} {args} - args.add( + args = cmd_args( cmd_args(bolt_msdk, format = "{}/bin/llvm-bolt"), prebolt_output, "-o", postbolt_output.as_output(), cmd_args(ctx.attrs.bolt_profile, format = "-data={}"), ctx.attrs.bolt_flags, + hidden = materialized_external_debug_info, ) ctx.actions.run( @@ -41,4 +48,23 @@ def bolt(ctx: AnalysisContext, prebolt_output: Artifact, identifier: [str, None] local_only = get_cxx_toolchain_info(ctx).linker_info.link_binaries_locally, ) - return postbolt_output + output = postbolt_output + + if hasattr(ctx.attrs, "strip_stapsdt") and ctx.attrs.strip_stapsdt: + stripped_postbolt_output = ctx.actions.declare_output(output_name + "-nostapsdt") + ctx.actions.run( + # We --rename-section instead of --remove-section because objcopy's processing + # in an invalid ELF file + cmd_args([ + get_cxx_toolchain_info(ctx).binary_utilities_info.objcopy, + "--rename-section", + ".stapsdt.base=.deleted_stapsdt_base_section", + postbolt_output, + stripped_postbolt_output.as_output(), + ]), + category = "bolt_strip_stapsdt", + identifier = identifier, + ) + output = stripped_postbolt_output + + return output diff --git a/prelude/cxx/cxx_executable.bzl b/prelude/cxx/cxx_executable.bzl index 340ac51294029..cf639b8b2e326 100644 --- a/prelude/cxx/cxx_executable.bzl +++ b/prelude/cxx/cxx_executable.bzl @@ -23,16 +23,22 @@ load( "apple_create_frameworks_linkable", "apple_get_link_info_by_deduping_link_infos", ) -load( - "@prelude//apple:xcode.bzl", - "get_project_root_file", -) load( "@prelude//cxx:cxx_bolt.bzl", "cxx_use_bolt", ) +load( + "@prelude//cxx:link_groups_types.bzl", + "LinkGroupsDebugLinkInfo", + "LinkGroupsDebugLinkableItem", +) +load( + "@prelude//dist:dist_info.bzl", + "DistInfo", +) load( "@prelude//ide_integrations:xcode.bzl", + "XCODE_ARGSFILES_SUB_TARGET", "XCODE_DATA_SUB_TARGET", "XcodeDataInfo", "generate_xcode_data", @@ -65,6 +71,7 @@ load( ) load( "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", # @unused Used as a type "merge_shared_libraries", "traverse_shared_library_info", ) @@ -77,7 +84,6 @@ load( ) load( ":argsfiles.bzl", - "ABS_ARGSFILES_SUBTARGET", "ARGSFILES_SUBTARGET", "get_argsfiles_output", ) @@ -89,10 +95,16 @@ load( ) load( ":compile.bzl", + "CxxCompileFlavor", "compile_cxx", "create_compile_cmds", + "cxx_objects_sub_targets", ) load(":cxx_context.bzl", "get_cxx_platform_info", "get_cxx_toolchain_info") +load( + ":cxx_instrumentation.bzl", + "is_coverage_enabled_by_any_dep", +) load( ":cxx_library_utility.bzl", "OBJECTS_SUBTARGET", @@ -101,7 +113,6 @@ load( "cxx_attr_linker_flags", "cxx_attr_resources", "cxx_is_gnu", - "cxx_objects_sub_targets", ) load( ":cxx_link_utility.bzl", @@ -111,6 +122,8 @@ load( ":cxx_types.bzl", "CxxRuleConstructorParams", # @unused Used as a type ) +load(":diagnostics.bzl", "check_sub_target") +load(":groups.bzl", "get_dedupped_roots_from_groups") load( ":link.bzl", "CxxLinkerMapData", @@ -118,10 +131,13 @@ load( ) load( ":link_groups.bzl", + "FinalLabelsToLinks", "LINK_GROUP_MAPPINGS_FILENAME_SUFFIX", "LINK_GROUP_MAPPINGS_SUB_TARGET", "LINK_GROUP_MAP_DATABASE_SUB_TARGET", "LinkGroupContext", + "build_shared_libs_for_symlink_tree", + "create_debug_linkable_entries", "create_link_groups", "find_relevant_roots", "get_filtered_labels_to_links_map", @@ -130,8 +146,8 @@ load( "get_link_group", "get_link_group_map_json", "get_link_group_preferred_linkage", + "get_public_link_group_nodes", "get_transitive_deps_matching_labels", - "is_link_group_shlib", ) load( ":link_types.bzl", @@ -146,6 +162,7 @@ load( "PDB_SUB_TARGET", "get_dumpbin_providers", "get_pdb_providers", + "get_shared_library_name", ) load( ":preprocessor.bzl", @@ -156,36 +173,40 @@ load( CxxExecutableOutput = record( binary = Artifact, unstripped_binary = Artifact, - bitcode_bundle = field([Artifact, None], None), - dwp = field([Artifact, None]), - # Files that will likely need to be included as .hidden() arguments - # when executing the executable (ex. RunInfo()) + bitcode_bundle = field(Artifact | None, None), + dwp = field(Artifact | None), + # Files that must be present for the executable to run successfully. These + # are always materialized, whether the executable is the output of a build + # or executed as a host tool. They become hidden= arguments when executing + # the executable via RunInfo(). runtime_files = list[ArgLike], sub_targets = dict[str, list[DefaultInfo]], # The LinkArgs used to create the final executable in 'binary'. link_args = list[LinkArgs], # External components needed to debug the executable. external_debug_info = field(ArtifactTSet, ArtifactTSet()), - # The projection of `external_debug_info` + # The projection of `external_debug_info`. These files need to be + # materialized when this executable is the output of a build, not when it is + # used by other rules. They become other_outputs on DefaultInfo. external_debug_info_artifacts = list[TransitiveSetArgsProjection], - shared_libs = dict[str, LinkedObject], + shared_libs = list[SharedLibrary], # All link group links that were generated in the executable. auto_link_groups = field(dict[str, LinkedObject], {}), compilation_db = CxxCompilationDbInfo, xcode_data = XcodeDataInfo, linker_map_data = [CxxLinkerMapData, None], link_command_debug_output = field([LinkCommandDebugOutput, None], None), + dist_info = DistInfo, + sanitizer_runtime_files = field(list[Artifact], []), + index_stores = field(list[Artifact], []), ) def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, is_cxx_test: bool = False) -> CxxExecutableOutput: - project_root_file = get_project_root_file(ctx) - # Gather preprocessor inputs. preprocessor_deps = cxx_attr_deps(ctx) + filter(None, [ctx.attrs.precompiled_header]) (own_preprocessor_info, test_preprocessor_infos) = cxx_private_preprocessor_info( ctx, impl_params.headers_layout, - project_root_file = project_root_file, raw_headers = ctx.attrs.raw_headers, extra_preprocessors = impl_params.extra_preprocessors, non_exported_deps = preprocessor_deps, @@ -204,13 +225,29 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, impl_params, [own_preprocessor_info] + test_preprocessor_infos, inherited_preprocessor_infos, + is_coverage_enabled_by_any_dep(ctx, preprocessor_deps), + ) + compile_flavor = CxxCompileFlavor("pic") if link_strategy != LinkStrategy("static") else CxxCompileFlavor("default") + cxx_outs = compile_cxx( + ctx = ctx, + src_compile_cmds = compile_cmd_output.src_compile_cmds, + flavor = compile_flavor, + provide_syntax_only = True, + use_header_units = impl_params.use_header_units, ) - cxx_outs = compile_cxx(ctx, compile_cmd_output.src_compile_cmds, pic = link_strategy != LinkStrategy("static")) - sub_targets[ARGSFILES_SUBTARGET] = [get_argsfiles_output(ctx, compile_cmd_output.argsfiles.relative, "argsfiles")] - sub_targets[ABS_ARGSFILES_SUBTARGET] = [get_argsfiles_output(ctx, compile_cmd_output.argsfiles.absolute, "abs-argsfiles")] + sub_targets[ARGSFILES_SUBTARGET] = [get_argsfiles_output(ctx, compile_cmd_output.argsfiles.relative, ARGSFILES_SUBTARGET)] + sub_targets[XCODE_ARGSFILES_SUB_TARGET] = [get_argsfiles_output(ctx, compile_cmd_output.argsfiles.xcode, XCODE_ARGSFILES_SUB_TARGET)] sub_targets[OBJECTS_SUBTARGET] = [DefaultInfo(sub_targets = cxx_objects_sub_targets(cxx_outs))] + diagnostics = { + compile_cmd.src.short_path: out.diagnostics + for compile_cmd, out in zip(compile_cmd_output.src_compile_cmds, cxx_outs) + if out.diagnostics != None + } + if len(diagnostics) > 0: + sub_targets["check"] = check_sub_target(ctx, diagnostics) + # Compilation DB. comp_db = create_compilation_database(ctx, compile_cmd_output.src_compile_cmds, "compilation-database") sub_targets["compilation-database"] = [comp_db] @@ -222,6 +259,9 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, # comp_db_compile_cmds can include header files being compiled as C++ which should not be exposed in the [compilation-database] subtarget comp_db_info = make_compilation_db_info(compile_cmd_output.comp_db_compile_cmds, get_cxx_toolchain_info(ctx), get_cxx_platform_info(ctx)) + # Index Stores created by cxx compile + index_stores = [out.index_store for out in cxx_outs if out.index_store] + # Link deps link_deps = linkables(cxx_attr_deps(ctx)) + impl_params.extra_link_deps @@ -257,10 +297,15 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, ) # Gather link inputs. - own_link_flags = cxx_attr_linker_flags(ctx) + impl_params.extra_link_flags + impl_params.extra_exported_link_flags + own_link_flags = ( + get_cxx_toolchain_info(ctx).linker_info.binary_linker_flags + + cxx_attr_linker_flags(ctx) + + impl_params.extra_link_flags + + impl_params.extra_exported_link_flags + ) # ctx.attrs.binary_linker_flags should come after default link flags so it can be used to override default settings - own_binary_link_flags = impl_params.extra_binary_link_flags + own_link_flags + ctx.attrs.binary_linker_flags + own_exe_link_flags = impl_params.extra_binary_link_flags + own_link_flags + ctx.attrs.binary_linker_flags deps_merged_link_infos = [d.merged_link_info for d in link_deps] frameworks_linkable = apple_create_frameworks_linkable(ctx) swiftmodule_linkable = impl_params.swiftmodule_linkable @@ -271,8 +316,13 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, # Link group libs. link_group_libs = {} + + # Target label to which link group it was included + targets_consumed_by_link_groups = {} auto_link_groups = {} - labels_to_links_map = {} + labels_to_links = FinalLabelsToLinks( + map = {}, + ) if not link_group_mappings: # We cannot support deriving link execution preference off the included links, as we've already @@ -302,26 +352,39 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, # If we're using auto-link-groups, where we generate the link group links # in the prelude, the link group map will give us the link group libs. # Otherwise, pull them from the `LinkGroupLibInfo` provider from out deps. + + public_link_group_nodes = get_public_link_group_nodes( + linkable_graph_node_map, + link_group_mappings, + exec_dep_roots + link_group_extra_link_roots, + link_group, + ) + link_group_libs_debug_info = {} if impl_params.auto_link_group_specs != None: linked_link_groups = create_link_groups( ctx = ctx, link_groups = link_groups, + link_strategy = link_strategy, link_group_mappings = link_group_mappings, link_group_preferred_linkage = link_group_preferred_linkage, executable_deps = exec_dep_roots, linker_flags = own_link_flags, link_group_specs = impl_params.auto_link_group_specs, - root_link_group = link_group, linkable_graph_node_map = linkable_graph_node_map, other_roots = link_group_extra_link_roots, prefer_stripped_objects = impl_params.prefer_stripped_objects, anonymous = ctx.attrs.anonymous_link_groups, + allow_cache_upload = impl_params.exe_allow_cache_upload, + public_nodes = public_link_group_nodes, + error_handler = impl_params.error_handler, ) + link_group_libs_debug_info = linked_link_groups.libs_debug_info for name, linked_link_group in linked_link_groups.libs.items(): auto_link_groups[name] = linked_link_group.artifact if linked_link_group.library != None: link_group_libs[name] = linked_link_group.library - own_binary_link_flags += linked_link_groups.symbol_ldflags + own_exe_link_flags += linked_link_groups.symbol_ldflags + targets_consumed_by_link_groups = linked_link_groups.targets_consumed_by_link_groups else: # NOTE(agallagher): We don't use version scripts and linker scripts @@ -336,7 +399,8 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, # TODO(T110378098): Similar to shared libraries, we need to identify all the possible # scenarios for which we need to propagate up link info and simplify this logic. For now # base which links to use based on whether link groups are defined. - labels_to_links_map = get_filtered_labels_to_links_map( + labels_to_links = get_filtered_labels_to_links_map( + public_link_group_nodes, linkable_graph_node_map, link_group, link_groups, @@ -362,10 +426,24 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, force_static_follows_dependents = impl_params.link_groups_force_static_follows_dependents, ) + link_groups_debug_info = LinkGroupsDebugLinkInfo( + binary = LinkGroupsDebugLinkableItem( + ordered_linkables = create_debug_linkable_entries(labels_to_links.map, root = None), + ), + libs = link_group_libs_debug_info, + ) + sub_targets["link-groups-info"] = [DefaultInfo( + default_output = ctx.actions.write_json( + ctx.label.name + ".link-groups-info.json", + link_groups_debug_info, + ), + )] + if is_cxx_test and link_group != None: # if a cpp_unittest is part of the link group, we need to traverse through all deps # from the root again to ensure we link in gtest deps - labels_to_links_map = labels_to_links_map | get_filtered_labels_to_links_map( + labels_to_links_to_merge = get_filtered_labels_to_links_map( + public_link_group_nodes, linkable_graph_node_map, None, link_groups, @@ -377,6 +455,7 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, is_executable_link = True, prefer_stripped = impl_params.prefer_stripped_objects, ) + labels_to_links.map |= labels_to_links_to_merge.map # NOTE: Our Haskell DLL support impl currently links transitive haskell # deps needed by DLLs which get linked into the main executable as link- @@ -387,18 +466,13 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, public_nodes = get_transitive_deps_matching_labels( linkable_graph_node_map = linkable_graph_node_map, label = ctx.attrs.link_group_public_deps_label, - roots = [ - mapping.root - for group in link_group_info.groups.values() - for mapping in group.mappings - if mapping.root != None - ], + roots = get_dedupped_roots_from_groups(link_group_info.groups.values()), ) - filtered_links = get_filtered_links(labels_to_links_map, set(public_nodes)) - filtered_targets = get_filtered_targets(labels_to_links_map) + filtered_links = get_filtered_links(labels_to_links.map, set(public_nodes)) + filtered_targets = get_filtered_targets(labels_to_links.map) - link_execution_preference = get_resolved_cxx_binary_link_execution_preference(ctx, labels_to_links_map.keys(), impl_params.force_full_hybrid_if_capable) + link_execution_preference = get_resolved_cxx_binary_link_execution_preference(ctx, labels_to_links.map.keys(), impl_params.force_full_hybrid_if_capable) # Unfortunately, link_groups does not use MergedLinkInfo to represent the args # for the resolved nodes in the graph. @@ -410,49 +484,41 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, dep_links = LinkArgs(infos = filtered_links) sub_targets[LINK_GROUP_MAP_DATABASE_SUB_TARGET] = [get_link_group_map_json(ctx, filtered_targets)] - # Set up shared libraries symlink tree only when needed - shared_libs = {} - - # Add in extra, rule-specific shared libs. - for name, shlib in impl_params.extra_shared_libs.items(): - shared_libs[name] = shlib.lib - # Only setup a shared library symlink tree when shared linkage or link_groups is used - gnu_use_link_groups = cxx_is_gnu(ctx) and link_group_mappings + gnu_use_link_groups = cxx_is_gnu(ctx) and len(link_group_mappings) > 0 + shlib_deps = [] if link_strategy == LinkStrategy("shared") or gnu_use_link_groups: - shlib_info = merge_shared_libraries( - ctx.actions, - deps = ( - [d.shared_library_info for d in link_deps] + - [d.shared_library_info for d in impl_params.extra_link_roots] - ), + shlib_deps = ( + [d.shared_library_info for d in link_deps] + + [d.shared_library_info for d in impl_params.extra_link_roots] ) - link_group_ctx = LinkGroupContext( - link_group_mappings = link_group_mappings, - link_group_libs = link_group_libs, - link_group_preferred_linkage = link_group_preferred_linkage, - labels_to_links_map = labels_to_links_map, - ) - - def shlib_filter(_name, shared_lib): - return not gnu_use_link_groups or is_link_group_shlib(shared_lib.label, link_group_ctx) + shlib_info = merge_shared_libraries(ctx.actions, deps = shlib_deps) - for name, shared_lib in traverse_shared_library_info(shlib_info, filter_func = shlib_filter).items(): - shared_libs[name] = shared_lib.lib + link_group_ctx = LinkGroupContext( + link_group_mappings = link_group_mappings, + link_group_libs = link_group_libs, + link_group_preferred_linkage = link_group_preferred_linkage, + labels_to_links_map = labels_to_links.map, + targets_consumed_by_link_groups = targets_consumed_by_link_groups, + ) - if gnu_use_link_groups: - # When there are no matches for a pattern based link group, - # `link_group_mappings` will not have an entry associated with the lib. - for _name, link_group_lib in link_group_libs.items(): - shared_libs.update(link_group_lib.shared_libs) + # Set up shared libraries symlink tree only when needed + shared_libs = build_shared_libs_for_symlink_tree( + gnu_use_link_groups, + link_group_ctx, + link_strategy, + traverse_shared_library_info(shlib_info), + impl_params.extra_shared_libs, + ) toolchain_info = get_cxx_toolchain_info(ctx) linker_info = toolchain_info.linker_info links = [ LinkArgs(infos = [ LinkInfo( - pre_flags = own_binary_link_flags, + dist_thin_lto_codegen_flags = getattr(ctx.attrs, "dist_thin_lto_codegen_flags", []), + pre_flags = own_exe_link_flags, linkables = [ObjectsLinkable( objects = [out.object for out in cxx_outs] + impl_params.extra_link_input, linker_type = linker_info.type, @@ -472,11 +538,19 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, dep_links, ] + impl_params.extra_link_args + # If there are hidden dependencies to this target then add them as + # hidden link args. + if impl_params.extra_hidden: + links.append( + LinkArgs(flags = cmd_args(hidden = impl_params.extra_hidden)), + ) + link_result = _link_into_executable( ctx, # If shlib lib tree generation is enabled, pass in the shared libs (which # will trigger the necessary link tree and link args). - shared_libs if impl_params.exe_shared_libs_link_tree else {}, + shared_libs if impl_params.exe_shared_libs_link_tree else [], + impl_params.executable_name, linker_info.binary_extension, link_options( links = links, @@ -488,6 +562,9 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, strip_args_factory = impl_params.strip_args_factory, category_suffix = impl_params.exe_category_suffix, allow_cache_upload = impl_params.exe_allow_cache_upload, + error_handler = impl_params.error_handler, + extra_linker_outputs_factory = impl_params.extra_linker_outputs_factory, + extra_linker_outputs_flags_factory = impl_params.extra_linker_outputs_flags_factory, ), ) binary = link_result.exe @@ -502,7 +579,7 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, output = binary.output, populate_rule_specific_attributes_func = impl_params.cxx_populate_xcode_attributes_func, srcs = impl_params.srcs + impl_params.additional.srcs, - argsfiles = compile_cmd_output.argsfiles.absolute, + argsfiles = compile_cmd_output.argsfiles.xcode, product_name = get_cxx_executable_product_name(ctx), ) sub_targets[XCODE_DATA_SUB_TARGET] = xcode_data_default_info @@ -515,34 +592,30 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, sub_targets["rpath-tree"] = [DefaultInfo( default_output = shared_libs_symlink_tree, other_outputs = [ - lib.output - for lib in shared_libs.values() + shlib.lib.output + for shlib in shared_libs ] + [ - lib.dwp - for lib in shared_libs.values() - if lib.dwp + shlib.lib.dwp + for shlib in shared_libs + if shlib.lib.dwp ], )] - sub_targets["shared-libraries"] = [DefaultInfo( - default_output = ctx.actions.write_json( - binary.output.basename + ".shared-libraries.json", - { - "libraries": ["{}:{}[shared-libraries][{}]".format(ctx.label.path, ctx.label.name, name) for name in shared_libs.keys()], - "librariesdwp": ["{}:{}[shared-libraries][{}][dwp]".format(ctx.label.path, ctx.label.name, name) for name, lib in shared_libs.items() if lib.dwp], - "rpathtree": ["{}:{}[rpath-tree]".format(ctx.label.path, ctx.label.name)] if shared_libs_symlink_tree else [], - }, - ), - sub_targets = { - name: [DefaultInfo( - default_output = lib.output, - sub_targets = {"dwp": [DefaultInfo(default_output = lib.dwp)]} if lib.dwp else {}, - )] - for name, lib in shared_libs.items() - }, - )] + + # TODO(agallagher) There appears to be pre-existing soname conflicts + # when building this (when using link groups), which prevents using + # `with_unique_str_sonames`. + str_soname_shlibs = { + shlib.soname.ensure_str(): shlib + for shlib in shared_libs + if shlib.soname.is_str + } + + readable_mappings = {} + soname_to_group_mappings = {} if link_group_mappings: - readable_mappings = {} for node, group in link_group_mappings.items(): + soname = get_shared_library_name(linker_info, group, True) + soname_to_group_mappings[soname] = group readable_mappings[group] = readable_mappings.get(group, []) + ["{}//{}:{}".format(node.cell, node.package, node.name)] sub_targets[LINK_GROUP_MAPPINGS_SUB_TARGET] = [DefaultInfo( @@ -562,12 +635,42 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, ), )] - # TODO(T110378140): We can't really enable this yet, as Python binaries - # consuming C++ binaries as resources don't know how to handle the - # extraneous debug paths and will crash. We probably need to add a special - # exported resources provider and make sure we handle the workflows. - # Add any referenced debug paths to runtime files. - #runtime_files.extend(binary.external_debug_info) + shared_libraries_sub_targets = {} + for soname, shlib in str_soname_shlibs.items(): + targets = {"dwp": [DefaultInfo(default_output = shlib.lib.dwp)]} if shlib.lib.dwp else {} + + group = soname_to_group_mappings.get(soname) + if group in readable_mappings: + output_json_file = binary.output.basename + "." + group + LINK_GROUP_MAPPINGS_FILENAME_SUFFIX + targets[LINK_GROUP_MAPPINGS_SUB_TARGET] = [DefaultInfo( + default_output = ctx.actions.write_json( + output_json_file, + {group: readable_mappings[group]}, + ), + )] + shared_libraries_sub_targets[soname] = [DefaultInfo( + default_output = shlib.lib.output, + sub_targets = targets, + )] + + sub_targets["shared-libraries"] = [DefaultInfo( + default_output = ctx.actions.write_json( + binary.output.basename + ".shared-libraries.json", + { + "libraries": [ + "{}:{}[shared-libraries][{}]".format(ctx.label.path, ctx.label.name, soname) + for soname in str_soname_shlibs + ], + "librariesdwp": [ + "{}:{}[shared-libraries][{}][dwp]".format(ctx.label.path, ctx.label.name, soname) + for soname, shlib in str_soname_shlibs.items() + if shlib.lib.dwp + ], + "rpathtree": ["{}:{}[rpath-tree]".format(ctx.label.path, ctx.label.name)] if shared_libs_symlink_tree else [], + }, + ), + sub_targets = shared_libraries_sub_targets, + )] # If we have some resources, write it to the resources JSON file and add # it and all resources to "runtime_files" so that we make to materialize @@ -584,13 +687,22 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, binary = binary.output, resources = resources, )) - for resource, other in resources.values(): - runtime_files.append(resource) - runtime_files.extend(other) + for resource in resources.values(): + runtime_files.append(resource.default_output) + runtime_files.extend(resource.other_outputs) if binary.dwp: - # A `dwp` sub-target which generates the `.dwp` file for this binary. - sub_targets["dwp"] = [DefaultInfo(default_output = binary.dwp)] + # A `dwp` sub-target which generates the `.dwp` file for this binary and its shared lib dependencies. + sub_targets["dwp"] = [ + DefaultInfo( + default_output = binary.dwp, + other_outputs = [ + shlib.lib.dwp + for shlib in shared_libs + if shlib.lib.dwp + ], + ), + ] if binary.pdb: # A `pdb` sub-target which generates the `.pdb` file for this binary. @@ -626,12 +738,13 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, default_output = binary.index_argsfile, )] - # Provide a debug info target to make sure debug info is materialized. + # Provide a debug info target to make sure unpacked external debug info + # (dwo) is materialized. external_debug_info = make_artifact_tset( actions = ctx.actions, children = ( [binary.external_debug_info] + - [s.external_debug_info for s in shared_libs.values()] + + [s.lib.external_debug_info for s in shared_libs] + impl_params.additional.static_external_debug_info ), ) @@ -644,10 +757,20 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, sub_targets["debuginfo"] = [DefaultInfo( default_output = materialize_external_debug_info, )] + sub_targets["debug_coverage_instrumentation"] = [DefaultInfo( + default_output = materialize_external_debug_info, + )] + + sub_targets["exe"] = [DefaultInfo( + default_output = binary.output, + other_outputs = runtime_files, + )] for additional_subtarget, subtarget_providers in impl_params.additional.subtargets.items(): sub_targets[additional_subtarget] = subtarget_providers + sub_targets.update(link_result.extra_outputs) + return CxxExecutableOutput( binary = binary.output, unstripped_binary = binary.unstripped_output, @@ -663,26 +786,47 @@ def cxx_executable(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams, xcode_data = xcode_data_info, linker_map_data = linker_map_data, link_command_debug_output = link_cmd_debug_output, + dist_info = DistInfo( + shared_libs = shlib_info.set, + nondebug_runtime_files = runtime_files, + ), + sanitizer_runtime_files = link_result.sanitizer_runtime_files, + index_stores = index_stores, ) _CxxLinkExecutableResult = record( # The resulting executable exe = LinkedObject, - # List of files/directories that should be present for executable to be run successfully + # Files that must be present for the executable to run successfully. These + # are always materialized, whether the executable is the output of a build + # or executed as a host tool. runtime_files = list[ArgLike], + # Files needed to debug the executable. These need to be materialized when + # this executable is the output of a build, but not when it is used by other + # rules. + external_debug_info = list[TransitiveSetArgsProjection], # Optional shared libs symlink tree symlinked_dir action shared_libs_symlink_tree = [list[Artifact], Artifact, None], linker_map_data = [CxxLinkerMapData, None], + sanitizer_runtime_files = list[Artifact], + # Extra output providers produced by extra_linker_outputs_factory + extra_outputs = dict[str, list[DefaultInfo]], ) def _link_into_executable( ctx: AnalysisContext, - shared_libs: dict[str, LinkedObject], + shared_libs: list[SharedLibrary], + executable_name: [str, None], binary_extension: str, opts: LinkOptions) -> _CxxLinkExecutableResult: - output = ctx.actions.declare_output("{}{}".format(get_cxx_executable_product_name(ctx), "." + binary_extension if binary_extension else "")) - extra_args, runtime_files, shared_libs_symlink_tree = executable_shared_lib_arguments( - ctx.actions, + if executable_name and binary_extension and executable_name.endswith(binary_extension): + # don't append .exe if it already is .exe + output_name = executable_name + else: + output_name = "{}{}".format(executable_name if executable_name else get_cxx_executable_product_name(ctx), "." + binary_extension if binary_extension else "") + output = ctx.actions.declare_output(output_name) + executable_args = executable_shared_lib_arguments( + ctx, get_cxx_toolchain_info(ctx), output, shared_libs, @@ -694,15 +838,18 @@ def _link_into_executable( result_type = CxxLinkResultType("executable"), opts = merge_link_options( opts, - links = [LinkArgs(flags = extra_args)] + opts.links, + links = [LinkArgs(flags = executable_args.extra_link_args)] + opts.links, ), ) return _CxxLinkExecutableResult( exe = link_result.linked_object, - runtime_files = runtime_files, - shared_libs_symlink_tree = shared_libs_symlink_tree, + runtime_files = executable_args.runtime_files + link_result.sanitizer_runtime_files, + external_debug_info = executable_args.external_debug_info, + shared_libs_symlink_tree = executable_args.shared_libs_symlink_tree, linker_map_data = link_result.linker_map_data, + sanitizer_runtime_files = link_result.sanitizer_runtime_files, + extra_outputs = link_result.extra_outputs if link_result.extra_outputs else {}, ) def get_cxx_executable_product_name(ctx: AnalysisContext) -> str: diff --git a/prelude/cxx/cxx_instrumentation.bzl b/prelude/cxx/cxx_instrumentation.bzl new file mode 100644 index 0000000000000..4c3325e1aa2c7 --- /dev/null +++ b/prelude/cxx/cxx_instrumentation.bzl @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def needs_instrumentation(children: list[bool], contains_headers_selected_for_coverage_instrumentation: bool) -> bool: + return contains_headers_selected_for_coverage_instrumentation or any(children) + +CxxExportedNeedsCoverageInstrumentationTSet = transitive_set( + reductions = { + "needs_instrumentation": needs_instrumentation, + }, +) + +CxxExportedNeedsCoverageInstrumentation = provider(fields = { + "nodes": CxxExportedNeedsCoverageInstrumentationTSet, +}) + +def build_needs_coverage_tset(ctx: AnalysisContext, deps: list[Dependency]) -> CxxExportedNeedsCoverageInstrumentationTSet: + return ctx.actions.tset( + CxxExportedNeedsCoverageInstrumentationTSet, + value = ctx.attrs.exported_needs_coverage_instrumentation if hasattr(ctx.attrs, "exported_needs_coverage_instrumentation") else False, + children = [d.get(CxxExportedNeedsCoverageInstrumentation).nodes for d in deps if d.get(CxxExportedNeedsCoverageInstrumentation) != None], + ) + +def build_exported_needs_coverage(ctx: AnalysisContext, deps: list[Dependency]) -> CxxExportedNeedsCoverageInstrumentation: + return CxxExportedNeedsCoverageInstrumentation( + nodes = build_needs_coverage_tset(ctx, deps), + ) + +def is_coverage_enabled_by_any_dep(ctx: AnalysisContext, deps: list[Dependency]) -> bool: + tset = build_needs_coverage_tset(ctx, deps) + + return tset.reduce("needs_instrumentation") + +def needs_coverage(cxx_exported_needs_coverage: CxxExportedNeedsCoverageInstrumentation) -> bool: + return cxx_exported_needs_coverage.nodes.reduce("needs_instrumentation") diff --git a/prelude/cxx/cxx_library.bzl b/prelude/cxx/cxx_library.bzl index b163c341750ce..36f3ff7d65b96 100644 --- a/prelude/cxx/cxx_library.bzl +++ b/prelude/cxx/cxx_library.bzl @@ -26,16 +26,15 @@ load( "apple_create_frameworks_linkable", "apple_get_link_info_by_deduping_link_infos", ) -load( - "@prelude//apple:xcode.bzl", - "get_project_root_file", -) +load("@prelude//apple:resource_groups.bzl", "create_resource_graph") load( "@prelude//apple/swift:swift_runtime.bzl", "create_swift_runtime_linkable", ) +load("@prelude//cxx:headers.bzl", "cxx_attr_exported_headers") load( "@prelude//ide_integrations:xcode.bzl", + "XCODE_ARGSFILES_SUB_TARGET", "XCODE_DATA_SUB_TARGET", "XcodeDataInfo", "generate_xcode_data", @@ -43,6 +42,7 @@ load( load( "@prelude//java:java_providers.bzl", "get_java_packaging_info", + "propagate_global_code_info", ) load("@prelude//linking:execution_preference.bzl", "LinkExecutionPreference", "get_link_execution_preference") load( @@ -63,7 +63,6 @@ load( "LinkInfos", "LinkOrdering", "LinkStrategy", - "Linkage", "LinkedObject", # @unused Used as a type "ObjectsLinkable", "SharedLibLinkable", @@ -71,7 +70,6 @@ load( "SwiftmoduleLinkable", # @unused Used as a type "UnstrippedLinkOutputInfo", "create_merged_link_info", - "create_merged_link_info_for_propagation", "get_lib_output_style", "get_link_args_for_strategy", "get_output_styles_for_linkage", @@ -95,19 +93,27 @@ load( ) load("@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", "create_shared_libraries", "merge_shared_libraries") load("@prelude//linking:strip.bzl", "strip_debug_info") -load("@prelude//utils:arglike.bzl", "ArgLike") +load("@prelude//linking:types.bzl", "Linkage") +load( + "@prelude//third-party:build.bzl", + "create_third_party_build_info", +) +load("@prelude//unix:providers.bzl", "UnixEnv", "create_unix_env_info") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:lazy.bzl", "lazy") load( "@prelude//utils:utils.bzl", - "expect", "flatten", - "is_any", "map_val", "value_or", ) +load( + "@prelude//apple/apple_resource_types.bzl", + "CxxResourceSpec", +) load(":archive.bzl", "make_archive") load( ":argsfiles.bzl", - "ABS_ARGSFILES_SUBTARGET", "ARGSFILES_SUBTARGET", "get_argsfiles_output", ) @@ -121,31 +127,50 @@ load( load( ":compile.bzl", "CxxCompileCommandOutput", + "CxxCompileFlavor", "CxxCompileOutput", # @unused Used as a type + "CxxSrcCompileCommand", "compile_cxx", "create_compile_cmds", + "cxx_objects_sub_targets", + "precompile_cxx", ) load(":cxx_context.bzl", "get_cxx_platform_info", "get_cxx_toolchain_info") +load( + ":cxx_instrumentation.bzl", + "build_exported_needs_coverage", + "needs_coverage", +) load( ":cxx_library_utility.bzl", "OBJECTS_SUBTARGET", "cxx_attr_deps", "cxx_attr_exported_deps", + "cxx_attr_link_strategy", "cxx_attr_link_style", "cxx_attr_linker_flags_all", "cxx_attr_preferred_linkage", "cxx_attr_resources", "cxx_inherited_link_info", "cxx_is_gnu", - "cxx_objects_sub_targets", "cxx_platform_supported", "cxx_use_shlib_intfs", + "cxx_use_shlib_intfs_mode", +) +load( + ":cxx_toolchain_types.bzl", + "LinkerType", + "ShlibInterfacesMode", + "is_bitcode_format", ) -load(":cxx_toolchain_types.bzl", "is_bitcode_format") load( ":cxx_types.bzl", + "CxxLibraryInfo", "CxxRuleConstructorParams", # @unused Used as a type ) +load(":diagnostics.bzl", "check_sub_target") +load(":gcno.bzl", "GcnoFilesInfo") +load(":index_store.bzl", "create_index_store_subtargets_and_provider") load( ":link.bzl", "CxxLinkResult", # @unused Used as a type @@ -190,6 +215,11 @@ load( ) load( ":shared_library_interface.bzl", + "SharedInterfaceInfo", # @unused Used as a type + "create_shared_interface_info", + "create_shared_interface_info_with_children", + "generate_exported_symbols", + "generate_tbd_with_symbols", "shared_library_interface", ) @@ -219,13 +249,13 @@ CxxLibraryOutput = record( # its corresponding DWARF debug info. # May be None when Split DWARF is disabled, for static/static-pic libraries, # for some types of synthetic link objects or for pre-built shared libraries. - dwp = field([Artifact, None], None), + dwp = field(Artifact | None, None), # A shared shared library may have an associated PDB file with # its corresponding Windows debug info. - pdb = field([Artifact, None], None), + pdb = field(Artifact | None, None), # The import library is the linkable output of a Windows shared library build. - implib = field([Artifact, None], None), + implib = field(Artifact | None, None), # Data about the linker map, only available on shared libraries # TODO(cjhopman): always available? when is it/is it not available? linker_map = field([CxxLinkerMapData, None], None), @@ -256,6 +286,7 @@ _CxxAllLibraryOutputs = record( providers = field(list[Provider], default = []), # Shared object name to shared library mapping if this target produces a shared library. solib = field([(str, LinkedObject), None]), + sanitizer_runtime_files = field(list[Artifact], []), ) _CxxLibraryCompileOutput = record( @@ -267,6 +298,7 @@ _CxxLibraryCompileOutput = record( bitcode_objects = field([list[Artifact], None]), # yaml file with optimization remarks about clang compilation clang_remarks = field([list[Artifact], None]), + gcno_files = field([list[Artifact], None]), # json file with trace information about clang compilation clang_traces = field(list[Artifact]), # Externally referenced debug info, which doesn't get linked with the @@ -277,6 +309,10 @@ _CxxLibraryCompileOutput = record( objects_have_external_debug_info = field(bool), # sub_target for each object objects_sub_targets = field(dict[str, list[DefaultInfo]]), + # the generated index stores + index_stores = field(list[Artifact]), + # diagnostics produced by a typecheck-only build (-fsyntax-only) + diagnostics = field(dict[str, Artifact]), ) # The output of compiling all the source files in the library, containing @@ -286,8 +322,12 @@ _CxxCompiledSourcesOutput = record( compile_cmds = field(CxxCompileCommandOutput), # PIC compile outputs pic = field(_CxxLibraryCompileOutput), + # PIC optimized compile outputs + pic_optimized = field([_CxxLibraryCompileOutput, None]), # Non PIC compile outputs non_pic = field([_CxxLibraryCompileOutput, None]), + # Header unit outputs + header_unit_preprocessors = field(list[CPreprocessor]), ) # The outputs of a cxx_library_parameterized rule. @@ -312,6 +352,8 @@ _CxxLibraryParameterizedOutput = record( cxx_compilationdb_info = field([CxxCompilationDbInfo, None], None), # LinkableRootInfo provider, same as above. linkable_root = field([LinkableRootInfo, None], None), + # List of shared libraries for the sanitizer runtime linked into the library + sanitizer_runtime_files = field(list[Artifact], []), ) def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstructorParams) -> _CxxLibraryParameterizedOutput: @@ -339,8 +381,6 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc # TODO(T110378095) right now we implement reexport of exported_* flags manually, we should improve/automate that in the macro layer - project_root_file = get_project_root_file(ctx) - # Gather preprocessor inputs. (own_non_exported_preprocessor_info, test_preprocessor_infos) = cxx_private_preprocessor_info( ctx = ctx, @@ -348,10 +388,10 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc extra_preprocessors = impl_params.extra_preprocessors, non_exported_deps = non_exported_deps, is_test = impl_params.is_test, - project_root_file = project_root_file, ) - own_exported_preprocessor_info = cxx_exported_preprocessor_info(ctx, impl_params.headers_layout, project_root_file, impl_params.extra_exported_preprocessors) + own_exported_preprocessor_info = cxx_exported_preprocessor_info(ctx, impl_params.headers_layout, impl_params.extra_exported_preprocessors) own_preprocessors = [own_non_exported_preprocessor_info, own_exported_preprocessor_info] + test_preprocessor_infos + own_exported_preprocessors = [own_exported_preprocessor_info] inherited_non_exported_preprocessor_infos = cxx_inherited_preprocessor_infos( non_exported_deps + filter(None, [ctx.attrs.precompiled_header]), @@ -360,18 +400,23 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc preferred_linkage = cxx_attr_preferred_linkage(ctx) + exported_needs_coverage = build_exported_needs_coverage(ctx, exported_deps + non_exported_deps) compiled_srcs = cxx_compile_srcs( ctx = ctx, impl_params = impl_params, own_preprocessors = own_preprocessors, + own_exported_preprocessors = own_exported_preprocessors, inherited_non_exported_preprocessor_infos = inherited_non_exported_preprocessor_infos, inherited_exported_preprocessor_infos = inherited_exported_preprocessor_infos, preferred_linkage = preferred_linkage, + add_coverage_instrumentation_compiler_flags = needs_coverage(exported_needs_coverage), ) sub_targets = {} providers = [] + providers.append(exported_needs_coverage) + if len(ctx.attrs.tests) > 0 and impl_params.generate_providers.preprocessor_for_tests: providers.append( CPreprocessorForTestsInfo( @@ -381,8 +426,8 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc ) if impl_params.generate_sub_targets.argsfiles: - sub_targets[ARGSFILES_SUBTARGET] = [get_argsfiles_output(ctx, compiled_srcs.compile_cmds.argsfiles.relative, "argsfiles")] - sub_targets[ABS_ARGSFILES_SUBTARGET] = [get_argsfiles_output(ctx, compiled_srcs.compile_cmds.argsfiles.absolute, "abs-argsfiles")] + sub_targets[ARGSFILES_SUBTARGET] = [get_argsfiles_output(ctx, compiled_srcs.compile_cmds.argsfiles.relative, ARGSFILES_SUBTARGET)] + sub_targets[XCODE_ARGSFILES_SUB_TARGET] = [get_argsfiles_output(ctx, compiled_srcs.compile_cmds.argsfiles.xcode, XCODE_ARGSFILES_SUB_TARGET)] if impl_params.generate_sub_targets.clang_remarks: if compiled_srcs.non_pic and compiled_srcs.non_pic.clang_remarks: @@ -411,6 +456,8 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc if compiled_srcs.non_pic: objects_sub_targets = objects_sub_targets | compiled_srcs.non_pic.objects_sub_targets sub_targets[OBJECTS_SUBTARGET] = [DefaultInfo(sub_targets = objects_sub_targets)] + if len(compiled_srcs.pic.diagnostics) > 0: + sub_targets["check"] = check_sub_target(ctx, compiled_srcs.pic.diagnostics) # Compilation DB. if impl_params.generate_sub_targets.compilation_database: @@ -427,6 +474,22 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc comp_db_info = make_compilation_db_info(compiled_srcs.compile_cmds.comp_db_compile_cmds, get_cxx_toolchain_info(ctx), get_cxx_platform_info(ctx)) providers.append(comp_db_info) + # Shared library interfaces are partial lists of exported symbols that are merged at link time. + exported_symbol_outputs = impl_params.extra_shared_library_interfaces if impl_params.extra_shared_library_interfaces else [] + if cxx_use_shlib_intfs_mode(ctx, ShlibInterfacesMode("stub_from_headers")): + transitive_pp = inherited_exported_preprocessor_infos + if _attr_reexport_all_header_dependencies(ctx): + transitive_pp += inherited_non_exported_preprocessor_infos + + cxx_exported_symbols = generate_exported_symbols( + ctx, + cxx_attr_exported_headers(ctx, impl_params.headers_layout), + own_exported_preprocessor_info, + transitive_pp, + ) + exported_symbol_outputs.append(cxx_exported_symbols) + sub_targets["exported-symbols"] = [DefaultInfo(default_outputs = exported_symbol_outputs)] + # Link Groups link_group = get_link_group(ctx) link_group_info = get_link_group_info(ctx) @@ -456,7 +519,7 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc frameworks_linkable = apple_create_frameworks_linkable(ctx) swiftmodule_linkable = impl_params.swiftmodule_linkable swift_runtime_linkable = create_swift_runtime_linkable(ctx) - dep_infos, link_group_map, link_execution_preference = _get_shared_library_links( + dep_infos, link_group_map, link_execution_preference, shared_interface_info = _get_shared_library_links( ctx, get_linkable_graph_node_map_func(deps_linkable_graph), link_group, @@ -470,6 +533,7 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc swiftmodule_linkable, force_static_follows_dependents = impl_params.link_groups_force_static_follows_dependents, swift_runtime_linkable = swift_runtime_linkable, + exported_symbol_outputs = exported_symbol_outputs, ) if impl_params.generate_sub_targets.link_group_map and link_group_map: sub_targets[LINK_GROUP_MAP_DATABASE_SUB_TARGET] = [link_group_map] @@ -491,8 +555,10 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc extra_static_linkables = extra_static_linkables, gnu_use_link_groups = cxx_is_gnu(ctx) and bool(link_group_mappings), link_execution_preference = link_execution_preference, + shared_interface_info = shared_interface_info, ) solib_as_dict = {library_outputs.solib[0]: library_outputs.solib[1]} if library_outputs.solib else {} + shared_libs = create_shared_libraries(ctx, solib_as_dict) for _, link_style_output in library_outputs.outputs.items(): for key in link_style_output.sub_targets.keys(): @@ -525,6 +591,11 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc # Add any subtargets for this output style. output_style_sub_targets.update(output.sub_targets) + # TBD outputs are collected for each link unit, so propagate whenever + # a library is being linked statically. + if output_style != LibOutputStyle("shared_lib") and shared_interface_info != None: + output_style_providers.append(shared_interface_info) + if impl_params.generate_sub_targets.link_style_outputs: if output: sub_targets[subtarget_for_output_style(output_style)] = [DefaultInfo( @@ -563,7 +634,7 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc output = default_output.default if default_output else None, populate_rule_specific_attributes_func = impl_params.cxx_populate_xcode_attributes_func, srcs = impl_params.srcs + impl_params.additional.srcs, - argsfiles = compiled_srcs.compile_cmds.argsfiles.absolute, + argsfiles = compiled_srcs.compile_cmds.argsfiles.xcode, product_name = get_default_cxx_library_product_name(ctx, impl_params), ) sub_targets[XCODE_DATA_SUB_TARGET] = xcode_data_default_info @@ -575,15 +646,6 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc inherited_non_exported_link = cxx_inherited_link_info(non_exported_deps) inherited_exported_link = cxx_inherited_link_info(exported_deps) - # TODO(cjhopman): This is strange that we construct this intermediate MergedLinkInfo rather than just - # passing the full list of deps below, but I'm keeping it to preserve existing behavior with a refactor. - # I intend to change completely how MergedLinkInfo works, so this should go away then. We cannot just - # pass these to create_merged_link_info because the for_propagation one is used to filter out deps for - # individual link strategies where that dep doesn't provide a linkinfo (which may itself be a bug, but not - # sure). - inherited_non_exported_link = create_merged_link_info_for_propagation(ctx, inherited_non_exported_link) - inherited_exported_link = create_merged_link_info_for_propagation(ctx, inherited_exported_link) - merged_native_link_info = create_merged_link_info( ctx, pic_behavior, @@ -591,9 +653,9 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc library_outputs.link_infos, preferred_linkage = preferred_linkage, # Export link info from non-exported deps (when necessary). - deps = [inherited_non_exported_link], + deps = inherited_non_exported_link, # Export link info from out (exported) deps. - exported_deps = [inherited_exported_link], + exported_deps = inherited_exported_link, frameworks_linkable = frameworks_linkable, swiftmodule_linkable = swiftmodule_linkable, swift_runtime_linkable = swift_runtime_linkable, @@ -609,15 +671,71 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc if impl_params.generate_providers.shared_libraries: providers.append(merge_shared_libraries( ctx.actions, - create_shared_libraries(ctx, solib_as_dict), + shared_libs, filter(None, [x.get(SharedLibraryInfo) for x in non_exported_deps]) + filter(None, [x.get(SharedLibraryInfo) for x in exported_deps]), )) + providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + native_libs = [shared_libs], + ), + deps = exported_deps + non_exported_deps, + ), + ) propagated_preprocessor_merge_list = inherited_exported_preprocessor_infos if _attr_reexport_all_header_dependencies(ctx): propagated_preprocessor_merge_list = inherited_non_exported_preprocessor_infos + propagated_preprocessor_merge_list - propagated_preprocessor = cxx_merge_cpreprocessors(ctx, [own_exported_preprocessor_info], propagated_preprocessor_merge_list) + + # Header unit PCM. + if impl_params.generate_sub_targets.header_unit: + if compiled_srcs.header_unit_preprocessors: + header_unit_preprocessors = [] + header_unit_sub_targets = [] + for x in compiled_srcs.header_unit_preprocessors: + header_unit_preprocessors.append(x) + header_unit_sub_targets.append([ + DefaultInfo(default_outputs = [h.module for h in x.header_units]), + cxx_merge_cpreprocessors( + ctx, + own_exported_preprocessors + header_unit_preprocessors, + propagated_preprocessor_merge_list, + ), + ]) + sub_targets["header-unit"] = [ + DefaultInfo( + default_outputs = [ + h.module + for x in header_unit_preprocessors + for h in x.header_units + ], + sub_targets = { + str(i): x + for i, x in enumerate(header_unit_sub_targets) + }, + ), + header_unit_sub_targets[-1][1], + ] + if impl_params.export_header_unit: + own_exported_preprocessors.extend(header_unit_preprocessors) + else: + sub_targets["header-unit"] = [ + DefaultInfo(), + cxx_merge_cpreprocessors( + ctx, + own_exported_preprocessors, + propagated_preprocessor_merge_list, + ), + ] + + propagated_preprocessor = cxx_merge_cpreprocessors( + ctx, + own_exported_preprocessors, + propagated_preprocessor_merge_list, + ) if impl_params.generate_providers.preprocessors: providers.append(propagated_preprocessor) @@ -625,6 +743,23 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc propagated_exported_preprocessor_info = propagated_preprocessor if impl_params.rule_type == "apple_library" and ctx.attrs.modular else None additional_providers = impl_params.additional.additional_providers_factory(propagated_exported_preprocessor_info) if impl_params.additional.additional_providers_factory else [] + if impl_params.generate_providers.third_party_build: + third_party_build_info = create_third_party_build_info( + ctx = ctx, + cxx_headers = [propagated_preprocessor], + shared_libs = shared_libs.libraries, + deps = exported_deps + non_exported_deps, + ) + providers.append(third_party_build_info) + sub_targets["third-party-build"] = [ + DefaultInfo( + default_output = third_party_build_info.build.root.artifact, + sub_targets = dict( + manifest = [DefaultInfo(default_output = third_party_build_info.build.manifest)], + ), + ), + ] + # For v1's `#headers` functionality. if impl_params.generate_sub_targets.headers: sub_targets["headers"] = [propagated_preprocessor, create_merged_link_info( @@ -652,6 +787,16 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc for additional_subtarget, subtarget_providers in impl_params.additional.subtargets.items(): sub_targets[additional_subtarget] = subtarget_providers + # Index store from swift compile + index_stores = impl_params.index_stores if impl_params.index_stores else [] + + # Index stores from cxx compile. We only generate the index store for pic + if compiled_srcs.pic: + index_stores.extend(compiled_srcs.pic.index_stores) + index_store_subtargets, index_store_info = create_index_store_subtargets_and_provider(ctx, index_stores, non_exported_deps + exported_deps) + sub_targets.update(index_store_subtargets) + providers.append(index_store_info) + linker_flags = cxx_attr_linker_flags_all(ctx) # Omnibus root provider. @@ -663,6 +808,7 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc soname = None linker_type = get_cxx_toolchain_info(ctx).linker_info.type linkable_root = create_linkable_root( + label = ctx.label, name = soname, link_infos = LinkInfos( default = LinkInfo( @@ -711,15 +857,18 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc ctx = ctx, default_soname = _soname(ctx, impl_params), preferred_linkage = preferred_linkage, + default_link_strategy = cxx_attr_link_strategy(ctx.attrs), deps = non_exported_deps, exported_deps = exported_deps, # If we don't have link input for this link style, we pass in `None` so # that omnibus knows to avoid it. include_in_android_mergemap = getattr(ctx.attrs, "include_in_android_merge_map_output", True) and default_output != None, link_infos = library_outputs.link_infos, - shared_libs = solib_as_dict, + shared_libs = shared_libs, linker_flags = linker_flags, can_be_asset = getattr(ctx.attrs, "can_be_asset", False) or False, + # We don't want to propagate shared interaces across shared library boundaries. + shared_interface_info = None if preferred_linkage == Linkage("shared") else create_shared_interface_info(ctx, exported_symbol_outputs, []), ), excluded = {ctx.label: None} if not value_or(ctx.attrs.supports_merged_linking, True) else {}, ), @@ -729,11 +878,22 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc # C++ resource. if impl_params.generate_providers.resources: - providers.append(ResourceInfo(resources = gather_resources( + resources = cxx_attr_resources(ctx) + cxx_resource_info = ResourceInfo(resources = gather_resources( label = ctx.label, - resources = cxx_attr_resources(ctx), + resources = resources, deps = non_exported_deps + exported_deps, - ))) + )) + providers += [cxx_resource_info] + if impl_params.generate_providers.cxx_resources_as_apple_resources: + apple_resource_graph = create_resource_graph( + ctx = ctx, + labels = ctx.attrs.labels, + deps = non_exported_deps, + exported_deps = exported_deps, + cxx_resource_spec = CxxResourceSpec(resources = resources) if resources else None, + ) + providers += [apple_resource_graph] if impl_params.generate_providers.template_placeholders: templ_vars = {} @@ -741,26 +901,28 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc # Some rules, e.g. fbcode//thrift/lib/cpp:thrift-core-module # define preprocessor flags as things like: -DTHRIFT_PLATFORM_CONFIG= # and unless they get quoted, they break shell syntax. - cxx_preprocessor_flags = cmd_args() cxx_compiler_info = get_cxx_toolchain_info(ctx).cxx_compiler_info - cxx_preprocessor_flags.add(cmd_args(cxx_compiler_info.preprocessor_flags or [], quote = "shell")) - cxx_preprocessor_flags.add(cmd_args(propagated_preprocessor.set.project_as_args("args"), quote = "shell")) - cxx_preprocessor_flags.add(propagated_preprocessor.set.project_as_args("include_dirs")) + cxx_preprocessor_flags = cmd_args( + cmd_args(cxx_compiler_info.preprocessor_flags or [], quote = "shell"), + cmd_args(propagated_preprocessor.set.project_as_args("args"), quote = "shell"), + propagated_preprocessor.set.project_as_args("include_dirs"), + ) templ_vars["cxxppflags"] = cxx_preprocessor_flags - c_preprocessor_flags = cmd_args() c_compiler_info = get_cxx_toolchain_info(ctx).c_compiler_info - c_preprocessor_flags.add(cmd_args(c_compiler_info.preprocessor_flags or [], quote = "shell")) - c_preprocessor_flags.add(cmd_args(propagated_preprocessor.set.project_as_args("args"), quote = "shell")) - c_preprocessor_flags.add(propagated_preprocessor.set.project_as_args("include_dirs")) + c_preprocessor_flags = cmd_args( + cmd_args(c_compiler_info.preprocessor_flags or [], quote = "shell"), + cmd_args(propagated_preprocessor.set.project_as_args("args"), quote = "shell"), + propagated_preprocessor.set.project_as_args("include_dirs"), + ) templ_vars["cppflags"] = c_preprocessor_flags # Add in ldflag macros. for link_strategy in (LinkStrategy("static"), LinkStrategy("static_pic")): name = "ldflags-" + link_strategy.value.replace("_", "-") - args = cmd_args() + args = [] linker_info = get_cxx_toolchain_info(ctx).linker_info - args.add(linker_info.linker_flags or []) + args.append(linker_info.linker_flags or []) # Normally, we call get_link_args_for_strategy for getting the args for our own link from our # deps. This case is a bit different as we are effectively trying to get the args for how this library @@ -770,8 +932,8 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc [merged_native_link_info], link_strategy, ) - args.add(unpack_link_args(link_args)) - templ_vars[name] = args + args.append(unpack_link_args(link_args)) + templ_vars[name] = cmd_args(args) # TODO(T110378127): To implement `$(ldflags-shared ...)` properly, we'd need # to setup a symink tree rule for all transitive shared libs. Since this @@ -790,6 +952,9 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc if impl_params.generate_providers.java_packaging_info: providers.append(get_java_packaging_info(ctx, non_exported_deps + exported_deps)) + if impl_params.generate_providers.java_global_code_info: + providers.append(propagate_global_code_info(ctx, ctx.attrs.deps + ctx.attrs.exported_deps)) + # TODO(T107163344) this shouldn't be in cxx_library itself, use overlays to remove it. if impl_params.generate_providers.android_packageable_info: providers.append(merge_android_packageable_info(ctx.label, ctx.actions, non_exported_deps + exported_deps)) @@ -806,6 +971,7 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc pass default_output = unknown() + default_info = DefaultInfo( default_output = default_output.default if default_output != None else None, other_outputs = default_output.other if default_output != None else [], @@ -820,12 +986,20 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc merge_link_group_lib_info( label = ctx.label, name = link_group, - shared_libs = solib_as_dict, + shared_libs = shared_libs, shared_link_infos = library_outputs.link_infos.get(LibOutputStyle("shared_lib")), deps = exported_deps + non_exported_deps, ), ) + if getattr(ctx.attrs, "_meta_apple_library_validation_enabled", False): + providers.append( + CxxLibraryInfo( + target = ctx.label, + labels = ctx.attrs.labels, + ), + ) + return _CxxLibraryParameterizedOutput( default_output = default_output, all_outputs = library_outputs, @@ -835,6 +1009,7 @@ def cxx_library_parameterized(ctx: AnalysisContext, impl_params: CxxRuleConstruc xcode_data_info = xcode_data_info, cxx_compilationdb_info = comp_db_info, linkable_root = linkable_root, + sanitizer_runtime_files = library_outputs.sanitizer_runtime_files, ) def get_default_cxx_library_product_name(ctx, impl_params) -> str: @@ -847,9 +1022,13 @@ def get_default_cxx_library_product_name(ctx, impl_params) -> str: if output_style == LibOutputStyle("shared_lib"): return _soname(ctx, impl_params) else: - return _base_static_library_name(ctx, False) + return _base_static_library_name(ctx, optimized = False, stripped = False) -def _get_library_compile_output(ctx, outs: list[CxxCompileOutput], extra_link_input) -> _CxxLibraryCompileOutput: +def _get_library_compile_output( + ctx: AnalysisContext, + src_compile_cmds: list[CxxSrcCompileCommand], + outs: list[CxxCompileOutput], + extra_link_input: list[Artifact]) -> _CxxLibraryCompileOutput: objects = [out.object for out in outs] stripped_objects = _strip_objects(ctx, objects) @@ -867,15 +1046,30 @@ def _get_library_compile_output(ctx, outs: list[CxxCompileOutput], extra_link_in objects += extra_link_input stripped_objects += extra_link_input + index_stores = [ + out.index_store + for out in outs + if out.index_store + ] + + diagnostics = { + compile_cmd.src.short_path: out.diagnostics + for compile_cmd, out in zip(src_compile_cmds, outs) + if out.diagnostics != None + } + return _CxxLibraryCompileOutput( objects = objects, stripped_objects = stripped_objects, bitcode_objects = bitcode_objects, clang_traces = [out.clang_trace for out in outs if out.clang_trace != None], clang_remarks = [out.clang_remarks for out in outs if out.clang_remarks != None], + gcno_files = [out.gcno_file for out in outs if out.gcno_file != None], external_debug_info = [out.external_debug_info for out in outs if out.external_debug_info != None], - objects_have_external_debug_info = is_any(lambda out: out.object_has_external_debug_info, outs), + objects_have_external_debug_info = lazy.is_any(lambda out: out.object_has_external_debug_info, outs), objects_sub_targets = objects_sub_targets, + index_stores = index_stores, + diagnostics = diagnostics, ) def cxx_compile_srcs( @@ -884,32 +1078,90 @@ def cxx_compile_srcs( own_preprocessors: list[CPreprocessor], inherited_non_exported_preprocessor_infos: list[CPreprocessorInfo], inherited_exported_preprocessor_infos: list[CPreprocessorInfo], - preferred_linkage: Linkage) -> _CxxCompiledSourcesOutput: + preferred_linkage: Linkage, + add_coverage_instrumentation_compiler_flags: bool, + own_exported_preprocessors: list[CPreprocessor] = []) -> _CxxCompiledSourcesOutput: """ Compile objects we'll need for archives and shared libraries. """ # Create the commands and argsfiles to use for compiling each source file + if own_exported_preprocessors: + header_preprocessor_info = cxx_merge_cpreprocessors( + ctx, + own_exported_preprocessors, + inherited_exported_preprocessor_infos, + ) + else: + header_preprocessor_info = CPreprocessorInfo() compile_cmd_output = create_compile_cmds( ctx = ctx, impl_params = impl_params, own_preprocessors = own_preprocessors, inherited_preprocessor_infos = inherited_non_exported_preprocessor_infos + inherited_exported_preprocessor_infos, + header_preprocessor_info = header_preprocessor_info, + add_coverage_instrumentation_compiler_flags = add_coverage_instrumentation_compiler_flags, ) + # Define header unit. + header_unit_preprocessors = precompile_cxx(ctx, impl_params, own_exported_preprocessors, compile_cmd_output) + # Define object files. - pic_cxx_outs = compile_cxx(ctx, compile_cmd_output.src_compile_cmds, pic = True) - pic = _get_library_compile_output(ctx, pic_cxx_outs, impl_params.extra_link_input) + pic_cxx_outs = compile_cxx( + ctx = ctx, + src_compile_cmds = compile_cmd_output.src_compile_cmds, + flavor = CxxCompileFlavor("pic"), + provide_syntax_only = True, + use_header_units = impl_params.use_header_units, + ) + pic = _get_library_compile_output( + ctx = ctx, + src_compile_cmds = compile_cmd_output.src_compile_cmds, + outs = pic_cxx_outs, + extra_link_input = impl_params.extra_link_input, + ) non_pic = None + pic_optimized = None if preferred_linkage != Linkage("shared"): - non_pic_cxx_outs = compile_cxx(ctx, compile_cmd_output.src_compile_cmds, pic = False) - non_pic = _get_library_compile_output(ctx, non_pic_cxx_outs, impl_params.extra_link_input) + non_pic_cxx_outs = compile_cxx( + ctx = ctx, + src_compile_cmds = compile_cmd_output.src_compile_cmds, + flavor = CxxCompileFlavor("default"), + # Diagnostics from the pic and non-pic compilation would be + # identical. We can avoid instantiating a second set of actions. + provide_syntax_only = False, + use_header_units = impl_params.use_header_units, + ) + non_pic = _get_library_compile_output( + ctx = ctx, + src_compile_cmds = compile_cmd_output.src_compile_cmds, + outs = non_pic_cxx_outs, + extra_link_input = impl_params.extra_link_input, + ) + + if get_cxx_toolchain_info(ctx).optimization_compiler_flags_EXPERIMENTAL: + optimized_cxx_outs = compile_cxx( + ctx = ctx, + src_compile_cmds = compile_cmd_output.src_compile_cmds, + flavor = CxxCompileFlavor("pic_optimized"), + # Diagnostics from the pic and non-pic compilation would be + # identical. We can avoid instantiating a second set of actions. + provide_syntax_only = False, + ) + pic_optimized = _get_library_compile_output( + ctx = ctx, + src_compile_cmds = compile_cmd_output.src_compile_cmds, + outs = optimized_cxx_outs, + extra_link_input = impl_params.extra_link_input, + ) return _CxxCompiledSourcesOutput( compile_cmds = compile_cmd_output, pic = pic, + pic_optimized = pic_optimized, non_pic = non_pic, + header_unit_preprocessors = header_unit_preprocessors, ) def _form_library_outputs( @@ -920,12 +1172,15 @@ def _form_library_outputs( dep_infos: LinkArgs, extra_static_linkables: list[[FrameworksLinkable, SwiftmoduleLinkable, SwiftRuntimeLinkable]], gnu_use_link_groups: bool, - link_execution_preference: LinkExecutionPreference) -> _CxxAllLibraryOutputs: + link_execution_preference: LinkExecutionPreference, + shared_interface_info: [SharedInterfaceInfo, None]) -> _CxxAllLibraryOutputs: # Build static/shared libs and the link info we use to export them to dependents. outputs = {} solib = None link_infos = {} providers = [] + sanitizer_runtime_files = [] + gcno_files = [] linker_flags = cxx_attr_linker_flags_all(ctx) @@ -940,6 +1195,7 @@ def _form_library_outputs( # We don't know which outputs consumers may want, so we define all the possibilities given our preferred linkage. for output_style in get_output_styles_for_linkage(preferred_linkage): output = None + optimized_info = None stripped = None info = None @@ -953,6 +1209,27 @@ def _form_library_outputs( if not lib_compile_output: fail("output_style {} requires non_pic compiled srcs, but didn't have any in {}".format(output_style, compiled_srcs)) + gcno_files += lib_compile_output.gcno_files + + if pic and compiled_srcs.pic_optimized and compiled_srcs.pic_optimized.objects: + _, optimized_info = _static_library( + ctx, + impl_params, + compiled_srcs.pic_optimized.objects, + objects_have_external_debug_info = compiled_srcs.pic_optimized.objects_have_external_debug_info, + external_debug_info = make_artifact_tset( + ctx.actions, + label = ctx.label, + artifacts = compiled_srcs.pic_optimized.external_debug_info, + children = impl_params.additional.static_external_debug_info, + ), + pic = pic, + optimized = True, + stripped = False, + extra_linkables = extra_static_linkables, + bitcode_objects = compiled_srcs.pic_optimized.bitcode_objects, + ) + # Only generate an archive if we have objects to include if lib_compile_output.objects: output, info = _static_library( @@ -968,6 +1245,7 @@ def _form_library_outputs( ), pic = pic, stripped = False, + optimized = False, extra_linkables = extra_static_linkables, bitcode_objects = lib_compile_output.bitcode_objects, ) @@ -977,6 +1255,7 @@ def _form_library_outputs( lib_compile_output.stripped_objects, pic = pic, stripped = True, + optimized = False, extra_linkables = extra_static_linkables, bitcode_objects = lib_compile_output.bitcode_objects, ) @@ -1001,22 +1280,25 @@ def _form_library_outputs( label = ctx.label, artifacts = external_debug_artifacts, children = impl_params.additional.shared_external_debug_info, + tags = impl_params.additional.external_debug_info_tags, ) - extra_linker_flags, extra_linker_outputs = impl_params.extra_linker_outputs_factory(ctx) + gcno_files += compiled_srcs.pic.gcno_files + result = _shared_library( - ctx, - impl_params, - compiled_srcs.pic.objects, - external_debug_info, - dep_infos, - gnu_use_link_groups, - extra_linker_flags = extra_linker_flags, + ctx = ctx, + impl_params = impl_params, + objects = compiled_srcs.pic.objects, + external_debug_info = external_debug_info, + dep_infos = dep_infos, + gnu_use_link_groups = gnu_use_link_groups, link_ordering = map_val(LinkOrdering, ctx.attrs.link_ordering), link_execution_preference = link_execution_preference, + shared_interface_info = shared_interface_info, ) shlib = result.link_result.linked_object info = result.info + extra_outputs = result.link_result.extra_outputs link_cmd_debug_output_file = None link_cmd_debug_output = make_link_command_debug_output(shlib) @@ -1033,7 +1315,7 @@ def _form_library_outputs( external_debug_info = shlib.external_debug_info, dwp = shlib.dwp, linker_map = result.link_result.linker_map_data, - sub_targets = extra_linker_outputs | { + sub_targets = extra_outputs | { "linker.argsfile": [DefaultInfo( default_output = shlib.linker_argsfile, )], @@ -1054,6 +1336,12 @@ def _form_library_outputs( providers.append(result.link_result.link_execution_preference_info) + link_sanitizer_runtime_files = result.link_result.sanitizer_runtime_files + if link_sanitizer_runtime_files: + if sanitizer_runtime_files: + fail("Cannot specify sanitizer runtime files multiple times") + sanitizer_runtime_files = link_sanitizer_runtime_files + # you cannot link against header only libraries so create an empty link info info = info if info != None else LinkInfo() if output: @@ -1061,13 +1349,25 @@ def _form_library_outputs( link_infos[output_style] = LinkInfos( default = ldflags(info), stripped = ldflags(stripped) if stripped != None else None, + optimized = ldflags(optimized_info) if optimized_info != None else None, ) + if get_cxx_toolchain_info(ctx).gcno_files: + deps_gcno_files = [ + x[GcnoFilesInfo].gcno_files + for x in ctx.attrs.deps + ctx.attrs.exported_deps + if GcnoFilesInfo in x + ] + providers.append(GcnoFilesInfo( + gcno_files = dedupe(flatten(deps_gcno_files) + gcno_files), + )) + return _CxxAllLibraryOutputs( outputs = outputs, link_infos = link_infos, providers = providers, solib = solib, + sanitizer_runtime_files = sanitizer_runtime_files, ) def _strip_objects(ctx: AnalysisContext, objects: list[Artifact]) -> list[Artifact]: @@ -1075,9 +1375,16 @@ def _strip_objects(ctx: AnalysisContext, objects: list[Artifact]) -> list[Artifa Return new objects with debug info stripped. """ + cxx_toolchain_info = get_cxx_toolchain_info(ctx) + # Stripping is not supported on Windows - linker_type = get_cxx_toolchain_info(ctx).linker_info.type - if linker_type == "windows": + linker_type = cxx_toolchain_info.linker_info.type + if linker_type == LinkerType("windows"): + return objects + + # Disable stripping if no `strip` binary was provided by the toolchain. + if cxx_toolchain_info.binary_utilities_info == None or \ + cxx_toolchain_info.binary_utilities_info.strip == None: return objects outs = [] @@ -1101,8 +1408,9 @@ def _get_shared_library_links( force_link_group_linking, frameworks_linkable: [FrameworksLinkable, None], swiftmodule_linkable: [SwiftmoduleLinkable, None], + exported_symbol_outputs: list[Artifact], force_static_follows_dependents: bool = True, - swift_runtime_linkable: [SwiftRuntimeLinkable, None] = None) -> (LinkArgs, [DefaultInfo, None], LinkExecutionPreference): + swift_runtime_linkable: [SwiftRuntimeLinkable, None] = None) -> (LinkArgs, [DefaultInfo, None], LinkExecutionPreference, [SharedInterfaceInfo, None]): """ Returns LinkArgs with the content to link, and a link group map json output if applicable. @@ -1117,21 +1425,10 @@ def _get_shared_library_links( # If we're not filtering for link groups, link against the shared dependencies if not link_group_mappings and not force_link_group_linking: - deps_merged_link_infos = cxx_inherited_link_info(dedupe(flatten([non_exported_deps, exported_deps]))) - - # Even though we're returning the shared library links, we must still - # respect the `link_style` attribute of the target which controls how - # all deps get linked. For example, you could be building the shared - # output of a library which has `link_style = "static"`. - # - # The fallback equivalent code in Buck v1 is in CxxLibraryFactor::createBuildRule() - # where link style is determined using the `linkableDepType` variable. - link_strategy_value = ctx.attrs.link_style if ctx.attrs.link_style != None else "shared" - - # Note if `static` link style is requested, we assume `static_pic` - # instead, so that code in the shared library can be correctly - # loaded in the address space of any process at any address. - link_strategy_value = "static_pic" if link_strategy_value == "static" else link_strategy_value + deps = dedupe(flatten([non_exported_deps, exported_deps])) + deps_merged_link_infos = cxx_inherited_link_info(deps) + + link_strategy = cxx_attr_link_strategy(ctx.attrs) # We cannot support deriving link execution preference off the included links, as we've already # lost the information on what is in the link. @@ -1139,6 +1436,10 @@ def _get_shared_library_links( # Not all rules calling `cxx_library_parameterized` have `link_execution_preference`. Notably `cxx_python_extension`. link_execution_preference = get_link_execution_preference(ctx, []) if hasattr(ctx.attrs, "link_execution_preference") else LinkExecutionPreference("any") + # Collect the shared interface providers for this link unit and strategy. + # These are merged when linking shared library output. + shared_interface_info = create_shared_interface_info(ctx, exported_symbol_outputs, deps) + return apple_build_link_args_with_deduped_flags( ctx, deps_merged_link_infos, @@ -1147,10 +1448,10 @@ def _get_shared_library_links( # To get the link_strategy, we have to check the link_strategy against the toolchain's pic_behavior. # # For more info, check the PicBehavior docs. - process_link_strategy_for_pic_behavior(LinkStrategy(link_strategy_value), pic_behavior), + process_link_strategy_for_pic_behavior(link_strategy, pic_behavior), swiftmodule_linkable, swift_runtime_linkable = swift_runtime_linkable, - ), None, link_execution_preference + ), None, link_execution_preference, shared_interface_info # Else get filtered link group links prefer_stripped = cxx_is_gnu(ctx) and ctx.attrs.prefer_stripped_objects @@ -1160,8 +1461,11 @@ def _get_shared_library_links( if link_strategy == LinkStrategy("static"): link_strategy = LinkStrategy("static_pic") link_strategy = process_link_strategy_for_pic_behavior(link_strategy, pic_behavior) - filtered_labels_to_links_map = get_filtered_labels_to_links_map( - linkable_graph_node_map_func(), + linkable_graph_label_to_node_map = linkable_graph_node_map_func() + + filtered_labels_to_links = get_filtered_labels_to_links_map( + None, + linkable_graph_label_to_node_map, link_group, {}, link_group_mappings, @@ -1176,10 +1480,10 @@ def _get_shared_library_links( prefer_stripped = prefer_stripped, force_static_follows_dependents = force_static_follows_dependents, ) - filtered_links = get_filtered_links(filtered_labels_to_links_map) - filtered_targets = get_filtered_targets(filtered_labels_to_links_map) + filtered_links = get_filtered_links(filtered_labels_to_links.map) + filtered_targets = get_filtered_targets(filtered_labels_to_links.map) - link_execution_preference = get_link_execution_preference(ctx, filtered_labels_to_links_map.keys()) + link_execution_preference = get_link_execution_preference(ctx, filtered_labels_to_links.map.keys()) # Unfortunately, link_groups does not use MergedLinkInfo to represent the args # for the resolved nodes in the graph. @@ -1187,7 +1491,18 @@ def _get_shared_library_links( if additional_links: filtered_links.append(additional_links) - return LinkArgs(infos = filtered_links), get_link_group_map_json(ctx, filtered_targets), link_execution_preference + # Collect the interface providers from the targets in this link group, these will + # be merged when linking shared library output. If this library has no + # interface output then interface generation is disabled and we can skip collection. + shared_interface_infos = [] + if len(exported_symbol_outputs) > 0: + for label in filtered_labels_to_links.map.keys(): + linkable_node = linkable_graph_label_to_node_map[label] + if linkable_node.shared_interface_info != None: + shared_interface_infos.append(linkable_node.shared_interface_info) + + shared_interface_info = create_shared_interface_info_with_children(ctx, exported_symbol_outputs, shared_interface_infos) + return LinkArgs(infos = filtered_links), get_link_group_map_json(ctx, filtered_targets), link_execution_preference, shared_interface_info def _use_pic(output_style: LibOutputStyle) -> bool: """ @@ -1203,6 +1518,7 @@ def _static_library( impl_params: CxxRuleConstructorParams, objects: list[Artifact], pic: bool, + optimized: bool, stripped: bool, extra_linkables: list[[FrameworksLinkable, SwiftmoduleLinkable, SwiftRuntimeLinkable]], objects_have_external_debug_info: bool = False, @@ -1220,11 +1536,14 @@ def _static_library( linker_info = get_cxx_toolchain_info(ctx).linker_info linker_type = linker_info.type - base_name = _base_static_library_name(ctx, stripped) + base_name = _base_static_library_name(ctx, optimized, stripped) name = _archive_name(base_name, pic = pic, extension = linker_info.static_library_extension) - archive = make_archive(ctx, name, objects) - bitcode_bundle = _bitcode_bundle(ctx, bitcode_objects, pic, stripped) + # If we have extra hidden deps of this target add them to the archive action + # so they are forced to build for static library output. + archive = make_archive(ctx, name, objects, impl_params.extra_hidden) + + bitcode_bundle = _bitcode_bundle(ctx, bitcode_objects, optimized, pic, stripped) if False: # TODO(nga): bitcode_bundle.artifact def unknown(): @@ -1261,7 +1580,7 @@ def _static_library( # On darwin, the linked output references the archive that contains the # object files instead of the originating objects. object_external_debug_info = [] - if linker_type == "darwin": + if linker_type == LinkerType("darwin"): object_external_debug_info.append(archive.artifact) object_external_debug_info.extend(archive.external_objects) elif objects_have_external_debug_info: @@ -1272,6 +1591,7 @@ def _static_library( label = ctx.label, artifacts = object_external_debug_info, children = [external_debug_info], + tags = impl_params.additional.external_debug_info_tags, ) return ( @@ -1303,13 +1623,14 @@ def _static_library( def _bitcode_bundle( ctx: AnalysisContext, objects: [list[Artifact], None], - pic: bool = False, - stripped: bool = False, + optimized: bool, + pic: bool, + stripped: bool, name_extra = "") -> [BitcodeBundle, None]: if objects == None or len(objects) == 0: return None - base_name = _base_static_library_name(ctx, False) + base_name = _base_static_library_name(ctx, optimized, stripped = False) name = name_extra + _bitcode_bundle_name(base_name, pic, stripped) return make_bitcode_bundle(ctx, name, objects) @@ -1318,7 +1639,7 @@ _CxxSharedLibraryResult = record( link_result = CxxLinkResult, # Shared library name (e.g. SONAME) soname = str, - objects_bitcode_bundle = [Artifact, None], + objects_bitcode_bundle = Artifact | None, # `LinkInfo` used to link against the shared library. info = LinkInfo, ) @@ -1330,9 +1651,9 @@ def _shared_library( external_debug_info: ArtifactTSet, dep_infos: LinkArgs, gnu_use_link_groups: bool, - extra_linker_flags: list[ArgLike], link_execution_preference: LinkExecutionPreference, - link_ordering: [LinkOrdering, None] = None) -> _CxxSharedLibraryResult: + link_ordering: [LinkOrdering, None], + shared_interface_info: [SharedInterfaceInfo, None]) -> _CxxSharedLibraryResult: """ Generate a shared library and the associated native link info used by dependents to link against it. @@ -1342,7 +1663,7 @@ def _shared_library( cxx_toolchain = get_cxx_toolchain_info(ctx) linker_info = cxx_toolchain.linker_info - local_bitcode_bundle = _bitcode_bundle(ctx, objects, name_extra = "objects-") + local_bitcode_bundle = _bitcode_bundle(ctx, objects, optimized = False, pic = False, stripped = False, name_extra = "objects-") # NOTE(agallagher): We add exported link flags here because it's what v1 # does, but the intent of exported link flags are to wrap the link output @@ -1350,6 +1671,7 @@ def _shared_library( # generating a link product. linker_flags = cxx_attr_linker_flags_all(ctx) link_info = LinkInfo( + dist_thin_lto_codegen_flags = getattr(ctx.attrs, "dist_thin_lto_codegen_flags", []), pre_flags = ( linker_flags.flags + linker_flags.exported_flags + @@ -1364,23 +1686,35 @@ def _shared_library( post_flags = ( impl_params.extra_exported_link_flags + impl_params.extra_link_flags + - extra_linker_flags + linker_flags.post_flags + (linker_info.shared_dep_runtime_ld_flags or []) # TODO(cjhopman): Why doesn't this add exported_linker_flags.post_flags? ), external_debug_info = external_debug_info, ) + + # If we have extra hidden deps here, add them as hidden inputs + # to the link action so that they are forced to build before linking. + links = [LinkArgs(infos = [link_info]), dep_infos] + if impl_params.extra_hidden: + links.append( + LinkArgs(flags = cmd_args(hidden = impl_params.extra_hidden)), + ) + link_result = cxx_link_shared_library( ctx = ctx, output = soname, opts = link_options( - links = [LinkArgs(infos = [link_info]), dep_infos], + enable_distributed_thinlto = getattr(ctx.attrs, "enable_distributed_thinlto", False), + links = links, identifier = soname, link_ordering = link_ordering, strip = impl_params.strip_executable, strip_args_factory = impl_params.strip_args_factory, link_execution_preference = link_execution_preference, + error_handler = impl_params.error_handler, + extra_linker_outputs_factory = impl_params.extra_linker_outputs_factory, + extra_linker_outputs_flags_factory = impl_params.extra_linker_outputs_flags_factory, ), name = soname if impl_params.use_soname else None, shared_library_flags = impl_params.shared_library_flags, @@ -1390,8 +1724,26 @@ def _shared_library( # If shared library interfaces are enabled, link that and use it as # the shared lib that dependents will link against. if cxx_use_shlib_intfs(ctx): - if not linker_info.produce_interface_from_stub_shared_library: - shlib_for_interface = exported_shlib + mode = get_cxx_toolchain_info(ctx).linker_info.shlib_interfaces + if mode == ShlibInterfacesMode("stub_from_library"): + # Generate a library interface from the linked library output. + # This will prevent relinking rdeps when changes do not affect + # the library symbols. + exported_shlib = shared_library_interface( + ctx = ctx, + shared_lib = exported_shlib, + ) + elif mode == ShlibInterfacesMode("stub_from_headers"): + # Generate a library interface from its deps exported_headers. + # This will allow for linker parallelisation as we do not have + # to wait for dependent libraries to link. + # If the provider is missing this is a non apple_library target, + # so skip producing the interface. + if shared_interface_info != None: + # collect the linker args which are required + # to correctly set symbol visibility. + link_args = [unpack_link_args(link) for link in links] + exported_shlib = generate_tbd_with_symbols(ctx, soname, shared_interface_info.interfaces, link_args) elif not gnu_use_link_groups: # TODO(agallagher): There's a bug in shlib intfs interacting with link # groups, where we don't include the symbols we're meant to export from @@ -1420,22 +1772,15 @@ def _shared_library( identifier = soname + "-interface", link_execution_preference = link_execution_preference, strip = impl_params.strip_executable, + error_handler = impl_params.error_handler, ), name = soname, ) - shlib_for_interface = intf_link_result.linked_object.output - else: - shlib_for_interface = None - - if shlib_for_interface: - # Convert the shared library into an interface. - shlib_interface = shared_library_interface( + exported_shlib = shared_library_interface( ctx = ctx, - shared_lib = shlib_for_interface, + shared_lib = intf_link_result.linked_object.output, ) - exported_shlib = shlib_interface - # Link against import library on Windows. if link_result.linked_object.import_library: exported_shlib = link_result.linked_object.import_library @@ -1465,8 +1810,8 @@ def _soname(ctx: AnalysisContext, impl_params) -> str: return get_shared_library_name_for_param(linker_info, explicit_soname) return get_default_shared_library_name(linker_info, ctx.label) -def _base_static_library_name(ctx: AnalysisContext, stripped: bool) -> str: - return ctx.label.name + ".stripped" if stripped else ctx.label.name +def _base_static_library_name(ctx: AnalysisContext, optimized: bool, stripped: bool) -> str: + return "{}{}{}".format(ctx.label.name, ".optimized" if optimized else "", ".stripped" if stripped else "") def _archive_name(name: str, pic: bool, extension: str) -> str: return "lib{}{}.{}".format(name, ".pic" if pic else "", extension) diff --git a/prelude/cxx/cxx_library_utility.bzl b/prelude/cxx/cxx_library_utility.bzl index d975bf0f4b6ae..5b1e255e0f7e4 100644 --- a/prelude/cxx/cxx_library_utility.bzl +++ b/prelude/cxx/cxx_library_utility.bzl @@ -5,27 +5,31 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//:artifacts.bzl", + "ArtifactOutputs", # @unused Used as a type + "single_artifact", +) load("@prelude//:paths.bzl", "paths") load( "@prelude//linking:link_info.bzl", + "LinkStrategy", "LinkStyle", - "Linkage", "LinkerFlags", "MergedLinkInfo", ) -load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type +load("@prelude//linking:types.bzl", "Linkage") load( "@prelude//utils:utils.bzl", - "expect", "flatten", "from_named_set", ) +load(":cxx_context.bzl", "get_cxx_platform_info", "get_cxx_toolchain_info") load( - ":compile.bzl", - "CxxCompileOutput", # @unused Used as a type + ":cxx_toolchain_types.bzl", + "LinkerType", + "ShlibInterfacesMode", ) -load(":cxx_context.bzl", "get_cxx_platform_info", "get_cxx_toolchain_info") -load(":cxx_toolchain_types.bzl", "ShlibInterfacesMode") load( ":headers.bzl", "cxx_attr_header_namespace", @@ -89,6 +93,23 @@ def cxx_attr_linker_flags(ctx: AnalysisContext) -> list[typing.Any]: (flatten(cxx_by_platform(ctx, ctx.attrs.platform_linker_flags)) if hasattr(ctx.attrs, "platform_linker_flags") else []) ) +# Even though we're returning the shared library links, we must still +# respect the `link_style` attribute of the target which controls how +# all deps get linked. For example, you could be building the shared +# output of a library which has `link_style = "static"`. +# +# The fallback equivalent code in Buck v1 is in CxxLibraryFactor::createBuildRule() +# where link style is determined using the `linkableDepType` variable. + +# Note if `static` link style is requested, we assume `static_pic` +# instead, so that code in the shared library can be correctly +# loaded in the address space of any process at any address. +def cxx_attr_link_strategy(attrs: typing.Any) -> LinkStrategy: + value = attrs.link_style if attrs.link_style != None else "shared" + if value == "static": + value = "static_pic" + return LinkStrategy(value) + def cxx_attr_link_style(ctx: AnalysisContext) -> LinkStyle: if ctx.attrs.link_style != None: return LinkStyle(ctx.attrs.link_style) @@ -110,7 +131,7 @@ def cxx_attr_preferred_linkage(ctx: AnalysisContext) -> Linkage: return Linkage(preferred_linkage) -def cxx_attr_resources(ctx: AnalysisContext) -> dict[str, (Artifact, list[ArgLike])]: +def cxx_attr_resources(ctx: AnalysisContext) -> dict[str, ArtifactOutputs]: """ Return the resources provided by this rule, as a map of resource name to a tuple of the resource artifact and any "other" outputs exposed by it. @@ -121,23 +142,12 @@ def cxx_attr_resources(ctx: AnalysisContext) -> dict[str, (Artifact, list[ArgLik # Use getattr, as apple rules don't have a `resources` parameter. for name, resource in from_named_set(getattr(ctx.attrs, "resources", {})).items(): - if type(resource) == "artifact": - other = [] - else: - info = resource[DefaultInfo] - expect( - len(info.default_outputs) == 1, - "expected exactly one default output from {} ({})" - .format(resource, info.default_outputs), - ) - [resource] = info.default_outputs - other = info.other_outputs - resources[paths.join(namespace, name)] = (resource, other) + resources[paths.join(namespace, name)] = single_artifact(resource) return resources def cxx_is_gnu(ctx: AnalysisContext) -> bool: - return get_cxx_toolchain_info(ctx).linker_info.type == "gnu" + return get_cxx_toolchain_info(ctx).linker_info.type == LinkerType("gnu") def cxx_use_shlib_intfs(ctx: AnalysisContext) -> bool: """ @@ -151,6 +161,12 @@ def cxx_use_shlib_intfs(ctx: AnalysisContext) -> bool: linker_info = get_cxx_toolchain_info(ctx).linker_info return linker_info.shlib_interfaces != ShlibInterfacesMode("disabled") +def cxx_use_shlib_intfs_mode(ctx: AnalysisContext, mode: ShlibInterfacesMode) -> bool: + """ + Verify we are using a specific shared library interface mode. + """ + return cxx_use_shlib_intfs(ctx) and get_cxx_toolchain_info(ctx).linker_info.shlib_interfaces == mode + def cxx_platform_supported(ctx: AnalysisContext) -> bool: """ Return whether this rule's `supported_platforms_regex` matches the current @@ -164,17 +180,3 @@ def cxx_platform_supported(ctx: AnalysisContext) -> bool: ctx.attrs.supported_platforms_regex, get_cxx_platform_info(ctx).name, ) - -def cxx_objects_sub_targets(outs: list[CxxCompileOutput]) -> dict[str, list[Provider]]: - objects_sub_targets = {} - for obj in outs: - sub_targets = {} - if obj.clang_trace: - sub_targets["clang-trace"] = [DefaultInfo(obj.clang_trace)] - if obj.clang_remarks: - sub_targets["clang-remarks"] = [DefaultInfo(obj.clang_remarks)] - objects_sub_targets[obj.object.short_path] = [DefaultInfo( - obj.object, - sub_targets = sub_targets, - )] - return objects_sub_targets diff --git a/prelude/cxx/cxx_link_utility.bzl b/prelude/cxx/cxx_link_utility.bzl index bf0fbe58b0126..6b357eed03864 100644 --- a/prelude/cxx/cxx_link_utility.bzl +++ b/prelude/cxx/cxx_link_utility.bzl @@ -7,18 +7,28 @@ load("@prelude//:artifact_tset.bzl", "project_artifacts") load("@prelude//:paths.bzl", "paths") -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", + "LinkerType", +) load("@prelude//cxx:debug.bzl", "SplitDebugMode") load("@prelude//cxx:linker.bzl", "get_rpath_origin") +load("@prelude//cxx:target_sdk_version.bzl", "get_target_sdk_version_flags") load( "@prelude//linking:link_info.bzl", "LinkArgs", "LinkOrdering", # @unused Used as a type - "LinkedObject", # @unused Used as a type "unpack_link_args", + "unpack_link_args_excluding_filelist", "unpack_link_args_filelist", ) load("@prelude//linking:lto.bzl", "LtoMode") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", # @unused Used as a type + "create_shlib_symlink_tree", +) load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type def generates_split_debug(toolchain: CxxToolchainInfo): @@ -36,14 +46,14 @@ def generates_split_debug(toolchain: CxxToolchainInfo): def linker_map_args(toolchain: CxxToolchainInfo, linker_map) -> LinkArgs: linker_type = toolchain.linker_info.type - if linker_type == "darwin": + if linker_type == LinkerType("darwin"): flags = [ "-Xlinker", "-map", "-Xlinker", linker_map, ] - elif linker_type == "gnu": + elif linker_type == LinkerType("gnu"): flags = [ "-Xlinker", "-Map", @@ -57,22 +67,28 @@ def linker_map_args(toolchain: CxxToolchainInfo, linker_map) -> LinkArgs: LinkArgsOutput = record( link_args = ArgLike, hidden = list[typing.Any], - pdb_artifact = [Artifact, None], + pdb_artifact = Artifact | None, # The filelist artifact which contains the list of all object files. # Only present for Darwin linkers. Note that object files referenced # _inside_ the filelist are _not_ part of the `hidden` field above. # That's by design - we do not want to materialise _all_ object files # to inspect the filelist. Intended to be used for debugging. - filelist = [Artifact, None], + filelist = Artifact | None, ) +def get_extra_darwin_linker_flags() -> cmd_args: + """ + Returns a cmd_args object filled with hard coded linker flags that should be used for all links with a Darwin toolchain. + """ + return cmd_args("-Wl,-oso_prefix,.") + def make_link_args( + ctx: AnalysisContext, actions: AnalysisActions, cxx_toolchain_info: CxxToolchainInfo, links: list[LinkArgs], suffix = None, output_short_path: [str, None] = None, - is_shared: [bool, None] = None, link_ordering: [LinkOrdering, None] = None) -> LinkArgsOutput: """ Merges LinkArgs. Returns the args, files that must be present for those @@ -86,28 +102,32 @@ def make_link_args( linker_info = cxx_toolchain_info.linker_info linker_type = linker_info.type - # On Apple platforms, DWARF data is contained in the object files - # and executables contains paths to the object files (N_OSO stab). - # - # By default, ld64 will use absolute file paths in N_OSO entries - # which machine-dependent executables. Such executables would not - # be debuggable on any host apart from the host which performed - # the linking. Instead, we want produce machine-independent - # hermetic executables, so we need to relativize those paths. - # - # This is accomplished by passing the `oso-prefix` flag to ld64, - # which will strip the provided prefix from the N_OSO paths. - # - # The flag accepts a special value, `.`, which means it will - # use the current workding directory. This will make all paths - # relative to the parent of `buck-out`. - # - # Because all actions in Buck2 are run from the project root - # and `buck-out` is always inside the project root, we can - # safely pass `.` as the `-oso_prefix` without having to - # write a wrapper script to compute it dynamically. - if linker_type == "darwin": - args.add(["-Wl,-oso_prefix,."]) + if linker_type == LinkerType("darwin"): + # Darwin requires a target triple specified to + # control the deployment target being linked for. + args.add(get_target_sdk_version_flags(ctx)) + + # On Apple platforms, DWARF data is contained in the object files + # and executables contains paths to the object files (N_OSO stab). + # + # By default, ld64 will use absolute file paths in N_OSO entries + # which machine-dependent executables. Such executables would not + # be debuggable on any host apart from the host which performed + # the linking. Instead, we want produce machine-independent + # hermetic executables, so we need to relativize those paths. + # + # This is accomplished by passing the `oso-prefix` flag to ld64, + # which will strip the provided prefix from the N_OSO paths. + # + # The flag accepts a special value, `.`, which means it will + # use the current workding directory. This will make all paths + # relative to the parent of `buck-out`. + # + # Because all actions in Buck2 are run from the project root + # and `buck-out` is always inside the project root, we can + # safely pass `.` as the `-oso_prefix` without having to + # write a wrapper script to compute it dynamically. + args.add(get_extra_darwin_linker_flags()) pdb_artifact = None if linker_info.is_pdb_generated and output_short_path != None: @@ -115,23 +135,26 @@ def make_link_args( pdb_artifact = actions.declare_output(pdb_filename) hidden.append(pdb_artifact.as_output()) + filelists = None + if linker_type == LinkerType("darwin"): + filelists = filter(None, [unpack_link_args_filelist(link) for link in links]) + hidden.extend(filelists) + for link in links: - args.add(unpack_link_args(link, is_shared, link_ordering = link_ordering)) + if filelists: + # If we are using a filelist, only add argument that aren't already in the + # filelist. This is to avoid duplicate inputs in the link command. + args.add(unpack_link_args_excluding_filelist(link, link_ordering = link_ordering)) + else: + args.add(unpack_link_args(link, link_ordering = link_ordering)) - filelists = filter(None, [unpack_link_args_filelist(link) for link in links]) - hidden.extend(filelists) + # On Darwin, filelist args _must_ come last as the order can affect symbol + # resolution and result in binary size increases. filelist_file = None if filelists: - if linker_type == "gnu": - fail("filelist populated for gnu linker") - elif linker_type == "darwin": - # On Darwin, filelist args _must_ come last as there's semantical difference - # of the position. - path = actions.write("filelist%s.txt" % suffix, filelists) - args.add(["-Xlinker", "-filelist", "-Xlinker", path]) - filelist_file = path - else: - fail("Linker type {} not supported".format(linker_type)) + path = actions.write("filelist%s.txt" % suffix, filelists) + args.add(cmd_args(["-Xlinker", "-filelist", "-Xlinker", path])) + filelist_file = path return LinkArgsOutput( link_args = args, @@ -143,67 +166,159 @@ def make_link_args( def shared_libs_symlink_tree_name(output: Artifact) -> str: return "__{}__shared_libs_symlink_tree".format(output.short_path) -# Returns a tuple of: -# - list of extra arguments, -# - list of files/directories that should be present for executable to be run successfully -# - optional shared libs symlink tree symlinked_dir action +ExecutableSharedLibArguments = record( + extra_link_args = field(list[ArgLike], []), + # Files that must be present for the executable to run successfully. These + # are always materialized, whether the executable is the output of a build + # or executed as a host tool. + runtime_files = field(list[ArgLike], []), + # Files needed to debug the executable. These need to be materialized when + # this executable is the output of a build, but not when it is used by other + # rules. + external_debug_info = field(list[TransitiveSetArgsProjection], []), + # Optional shared libs symlink tree symlinked_dir action. + shared_libs_symlink_tree = field(list[Artifact] | Artifact | None, None), +) + +CxxSanitizerRuntimeArguments = record( + extra_link_args = field(list[ArgLike], []), + sanitizer_runtime_files = field(list[Artifact], []), +) + +# @executable_path/Frameworks + +def cxx_sanitizer_runtime_arguments( + ctx: AnalysisContext, + cxx_toolchain: CxxToolchainInfo, + output: Artifact) -> CxxSanitizerRuntimeArguments: + linker_info = cxx_toolchain.linker_info + target_sanitizer_runtime_enabled = ctx.attrs.sanitizer_runtime_enabled if hasattr(ctx.attrs, "sanitizer_runtime_enabled") else None + sanitizer_runtime_enabled = target_sanitizer_runtime_enabled if target_sanitizer_runtime_enabled != None else linker_info.sanitizer_runtime_enabled + if not sanitizer_runtime_enabled: + return CxxSanitizerRuntimeArguments() + + if not linker_info.sanitizer_runtime_files: + fail("C++ sanitizer runtime enabled but there are no runtime files") + + if linker_info.type == LinkerType("darwin"): + # ignore_artifacts as the runtime directory is not required at _link_ time + runtime_rpath = cmd_args(ignore_artifacts = True) + runtime_files = linker_info.sanitizer_runtime_files + for runtime_shared_lib in runtime_files: + # Rpath-relative dylibs have an install name of `@rpath/libName.dylib`, + # which means we need to add the parent dir of the dylib as an rpath. + runtime_shared_lib_dir = cmd_args(runtime_shared_lib, parent = 1) + + # The parent dir of the runtime shared lib must appear as a path + # relative to the parent dir of the binary. `@executable_path` + # represents the parent dir of the binary, not the binary itself. + runtime_shared_lib_rpath = cmd_args(runtime_shared_lib_dir, format = "-Wl,-rpath,@executable_path/{}", relative_to = (output, 1)) + runtime_rpath.add(runtime_shared_lib_rpath) + + return CxxSanitizerRuntimeArguments( + extra_link_args = [ + runtime_rpath, + # Add rpaths in case the binary gets bundled and the app bundle is expected to be standalone. + # Not all transitive callers have `CxxPlatformInfo`, so just add both iOS and macOS rpaths. + # There's no downsides to having both, except dyld would check in both locations (and it won't + # find anything for the non-current platform). + "-Wl,-rpath,@loader_path/Frameworks", # iOS + "-Wl,-rpath,@executable_path/Frameworks", # iOS + "-Wl,-rpath,@loader_path/../Frameworks", # macOS + "-Wl,-rpath,@executable_path/../Frameworks", # macOS + ], + sanitizer_runtime_files = runtime_files, + ) + + return CxxSanitizerRuntimeArguments() + def executable_shared_lib_arguments( - actions: AnalysisActions, + ctx: AnalysisContext, cxx_toolchain: CxxToolchainInfo, output: Artifact, - shared_libs: dict[str, LinkedObject]) -> (list[typing.Any], list[ArgLike], [list[Artifact], Artifact, None]): - extra_args = [] + shared_libs: list[SharedLibrary]) -> ExecutableSharedLibArguments: + extra_link_args = [] runtime_files = [] shared_libs_symlink_tree = None - # Add external debug paths to runtime files, so that they're - # materialized when the binary is built. - runtime_files.extend( - project_artifacts( - actions = actions, - tsets = [shlib.external_debug_info for shlib in shared_libs.values()], - ), + # External debug info is materialized only when the executable is the output + # of a build. Do not add to runtime_files. + external_debug_info = project_artifacts( + actions = ctx.actions, + tsets = [shlib.lib.external_debug_info for shlib in shared_libs], ) linker_type = cxx_toolchain.linker_info.type if len(shared_libs) > 0: - if linker_type == "windows": - shared_libs_symlink_tree = [actions.symlink_file( - shlib.output.basename, - shlib.output, - ) for _, shlib in shared_libs.items()] + if linker_type == LinkerType("windows"): + shared_libs_symlink_tree = [ctx.actions.symlink_file( + shlib.lib.output.basename, + shlib.lib.output, + ) for shlib in shared_libs] runtime_files.extend(shared_libs_symlink_tree) # Windows doesn't support rpath. else: - shared_libs_symlink_tree = actions.symlinked_dir( - shared_libs_symlink_tree_name(output), - {name: shlib.output for name, shlib in shared_libs.items()}, + shared_libs_symlink_tree = create_shlib_symlink_tree( + actions = ctx.actions, + out = shared_libs_symlink_tree_name(output), + shared_libs = shared_libs, ) runtime_files.append(shared_libs_symlink_tree) rpath_reference = get_rpath_origin(linker_type) - # We ignore_artifacts() here since we don't want the symlink tree to actually be there for the link. - rpath_arg = cmd_args(shared_libs_symlink_tree, format = "-Wl,-rpath,{}/{{}}".format(rpath_reference)).relative_to(output, parent = 1).ignore_artifacts() - extra_args.append(rpath_arg) + # We ignore_artifacts here since we don't want the symlink tree to actually be there for the link. + rpath_arg = cmd_args( + shared_libs_symlink_tree, + format = "-Wl,-rpath,{}/{{}}".format(rpath_reference), + ignore_artifacts = True, + relative_to = (output, 1), + ) + extra_link_args.append(rpath_arg) + + return ExecutableSharedLibArguments( + extra_link_args = extra_link_args, + runtime_files = runtime_files, + external_debug_info = external_debug_info, + shared_libs_symlink_tree = shared_libs_symlink_tree, + ) - return (extra_args, runtime_files, shared_libs_symlink_tree) +LinkCmdParts = record( + linker = [RunInfo, cmd_args], + linker_flags = cmd_args, + post_linker_flags = cmd_args, + # linker + linker_flags, for convenience + link_cmd = cmd_args, +) -def cxx_link_cmd_parts(toolchain: CxxToolchainInfo) -> ((RunInfo | cmd_args), cmd_args): +def cxx_link_cmd_parts(toolchain: CxxToolchainInfo, executable: bool) -> LinkCmdParts: # `toolchain_linker_flags` can either be a list of strings, `cmd_args` or `None`, # so we need to do a bit more work to satisfy the type checker toolchain_linker_flags = toolchain.linker_info.linker_flags + toolchain_post_linker_flags = toolchain.linker_info.post_linker_flags if toolchain_linker_flags == None: toolchain_linker_flags = cmd_args() elif not type(toolchain_linker_flags) == "cmd_args": toolchain_linker_flags = cmd_args(toolchain_linker_flags) - return toolchain.linker_info.linker, toolchain_linker_flags + if executable: + toolchain_linker_flags = cmd_args( + toolchain_linker_flags, + toolchain.linker_info.executable_linker_flags, + ) -# The command line for linking with C++ -def cxx_link_cmd(toolchain: CxxToolchainInfo) -> cmd_args: - linker, toolchain_linker_flags = cxx_link_cmd_parts(toolchain) - command = cmd_args(linker) - command.add(toolchain_linker_flags) - return command + if toolchain_post_linker_flags == None: + toolchain_post_linker_flags = cmd_args() + elif not type(toolchain_post_linker_flags) == "cmd_args": + toolchain_post_linker_flags = cmd_args(toolchain_post_linker_flags) + + link_cmd = cmd_args(toolchain.linker_info.linker) + link_cmd.add(toolchain_linker_flags) + + return LinkCmdParts( + linker = toolchain.linker_info.linker, + linker_flags = toolchain_linker_flags, + post_linker_flags = toolchain_post_linker_flags, + link_cmd = link_cmd, + ) diff --git a/prelude/cxx/cxx_sources.bzl b/prelude/cxx/cxx_sources.bzl index ece339680fdb8..ffbc433b31130 100644 --- a/prelude/cxx/cxx_sources.bzl +++ b/prelude/cxx/cxx_sources.bzl @@ -9,15 +9,22 @@ load( "@prelude//utils:utils.bzl", "flatten", ) -load( - ":compile.bzl", - "CxxSrcWithFlags", -) load(":platform.bzl", "cxx_by_platform") +# An input to cxx compilation, consisting of a file to compile and optional +# file specific flags to compile with. +CxxSrcWithFlags = record( + file = field(Artifact), + flags = field(list[ResolvedStringWithMacros], []), + # If we have multiple source entries with same files but different flags, + # specify an index so we can differentiate them. Otherwise, use None. + index = field([int, None], None), + is_header = field(bool, False), +) + # The source files -def get_srcs_with_flags(ctx: AnalysisContext) -> list[CxxSrcWithFlags]: - all_srcs = ctx.attrs.srcs + flatten(cxx_by_platform(ctx, ctx.attrs.platform_srcs)) +def get_srcs_with_flags(ctx: AnalysisContext, additional_srcs: list = []) -> list[CxxSrcWithFlags]: + all_srcs = ctx.attrs.srcs + flatten(cxx_by_platform(ctx, ctx.attrs.platform_srcs)) + additional_srcs # src -> flags_hash -> flags flags_sets_by_src = {} diff --git a/prelude/cxx/cxx_toolchain.bzl b/prelude/cxx/cxx_toolchain.bzl index 6dbc3a91c806f..308e9f7533d46 100644 --- a/prelude/cxx/cxx_toolchain.bzl +++ b/prelude/cxx/cxx_toolchain.bzl @@ -6,13 +6,35 @@ # of this source tree. load("@prelude//:is_full_meta_repo.bzl", "is_full_meta_repo") -load("@prelude//cxx:cxx_toolchain_types.bzl", "AsCompilerInfo", "AsmCompilerInfo", "BinaryUtilitiesInfo", "CCompilerInfo", "CudaCompilerInfo", "CxxCompilerInfo", "CxxObjectFormat", "DepTrackingMode", "DistLtoToolsInfo", "HipCompilerInfo", "LinkerInfo", "PicBehavior", "ShlibInterfacesMode", "StripFlagsInfo", "cxx_toolchain_infos") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "AsCompilerInfo", + "AsmCompilerInfo", + "BinaryUtilitiesInfo", + "CCompilerInfo", + "CudaCompilerInfo", + "CvtresCompilerInfo", + "CxxCompilerInfo", + "CxxInternalTools", + "CxxObjectFormat", + "DepTrackingMode", + "HipCompilerInfo", + "LinkerInfo", + "LinkerType", + "PicBehavior", + "RcCompilerInfo", + "ShlibInterfacesMode", + "StripFlagsInfo", + "cxx_toolchain_infos", +) +load("@prelude//cxx:cxx_utility.bzl", "cxx_toolchain_allow_cache_upload_args") load("@prelude//cxx:debug.bzl", "SplitDebugMode") load("@prelude//cxx:headers.bzl", "HeaderMode", "HeadersAsRawHeadersMode") load("@prelude//cxx:linker.bzl", "LINKERS", "is_pdb_generated") +load("@prelude//cxx:target_sdk_version.bzl", "get_toolchain_target_sdk_version") load("@prelude//linking:link_info.bzl", "LinkOrdering", "LinkStyle") load("@prelude//linking:lto.bzl", "LtoMode", "lto_compiler_flags") -load("@prelude//utils:utils.bzl", "value_or") +load("@prelude//utils:utils.bzl", "flatten", "value_or") load("@prelude//decls/cxx_rules.bzl", "cxx_rules") def cxx_toolchain_impl(ctx): @@ -29,40 +51,38 @@ def cxx_toolchain_impl(ctx): c_info = CCompilerInfo( compiler = c_compiler, compiler_type = ctx.attrs.c_compiler_type or ctx.attrs.compiler_type, - compiler_flags = cmd_args(ctx.attrs.c_compiler_flags).add(c_lto_flags), + compiler_flags = cmd_args(ctx.attrs.c_compiler_flags, c_lto_flags), preprocessor = c_compiler, preprocessor_flags = cmd_args(ctx.attrs.c_preprocessor_flags), - dep_files_processor = ctx.attrs._dep_files_processor[RunInfo], + allow_cache_upload = ctx.attrs.c_compiler_allow_cache_upload, ) cxx_compiler = _get_maybe_wrapped_msvc(ctx.attrs.cxx_compiler[RunInfo], ctx.attrs.cxx_compiler_type or ctx.attrs.compiler_type, ctx.attrs._msvc_hermetic_exec[RunInfo]) cxx_info = CxxCompilerInfo( compiler = cxx_compiler, compiler_type = ctx.attrs.cxx_compiler_type or ctx.attrs.compiler_type, - compiler_flags = cmd_args(ctx.attrs.cxx_compiler_flags).add(c_lto_flags), + compiler_flags = cmd_args(ctx.attrs.cxx_compiler_flags, c_lto_flags), preprocessor = cxx_compiler, preprocessor_flags = cmd_args(ctx.attrs.cxx_preprocessor_flags), - dep_files_processor = ctx.attrs._dep_files_processor[RunInfo], + allow_cache_upload = ctx.attrs.cxx_compiler_allow_cache_upload, ) asm_info = AsmCompilerInfo( compiler = ctx.attrs.asm_compiler[RunInfo], compiler_type = ctx.attrs.asm_compiler_type or ctx.attrs.compiler_type, compiler_flags = cmd_args(ctx.attrs.asm_compiler_flags), preprocessor_flags = cmd_args(ctx.attrs.asm_preprocessor_flags), - dep_files_processor = ctx.attrs._dep_files_processor[RunInfo], ) if ctx.attrs.asm_compiler else None as_info = AsCompilerInfo( compiler = ctx.attrs.assembler[RunInfo], compiler_type = ctx.attrs.assembler_type or ctx.attrs.compiler_type, compiler_flags = cmd_args(ctx.attrs.assembler_flags), preprocessor_flags = cmd_args(ctx.attrs.assembler_preprocessor_flags), - dep_files_processor = ctx.attrs._dep_files_processor[RunInfo], ) if ctx.attrs.assembler else None cuda_info = CudaCompilerInfo( compiler = ctx.attrs.cuda_compiler[RunInfo], compiler_type = ctx.attrs.cuda_compiler_type or ctx.attrs.compiler_type, compiler_flags = cmd_args(ctx.attrs.cuda_compiler_flags), preprocessor_flags = cmd_args(ctx.attrs.cuda_preprocessor_flags), - dep_files_processor = ctx.attrs._dep_files_processor[RunInfo], + allow_cache_upload = ctx.attrs.cuda_compiler_allow_cache_upload, ) if ctx.attrs.cuda_compiler else None hip_info = HipCompilerInfo( compiler = ctx.attrs.hip_compiler[RunInfo], @@ -70,24 +90,43 @@ def cxx_toolchain_impl(ctx): compiler_flags = cmd_args(ctx.attrs.hip_compiler_flags), preprocessor_flags = cmd_args(ctx.attrs.hip_preprocessor_flags), ) if ctx.attrs.hip_compiler else None + cvtres_info = CvtresCompilerInfo( + compiler = ctx.attrs.cvtres_compiler[RunInfo], + compiler_type = ctx.attrs.cvtres_compiler_type or ctx.attrs.compiler_type, + compiler_flags = cmd_args(ctx.attrs.cvtres_compiler_flags), + preprocessor_flags = cmd_args(ctx.attrs.cvtres_preprocessor_flags), + ) if ctx.attrs.cvtres_compiler else None + rc_info = RcCompilerInfo( + compiler = ctx.attrs.rc_compiler[RunInfo], + compiler_type = ctx.attrs.rc_compiler_type or ctx.attrs.compiler_type, + compiler_flags = cmd_args(ctx.attrs.rc_compiler_flags), + preprocessor_flags = cmd_args(ctx.attrs.rc_preprocessor_flags), + ) if ctx.attrs.rc_compiler else None + linker_type = LinkerType(ctx.attrs.linker_type) linker_info = LinkerInfo( archiver = ctx.attrs.archiver[RunInfo], archiver_flags = cmd_args(ctx.attrs.archiver_flags), + archiver_reads_inputs = ctx.attrs.archiver_reads_inputs, archiver_supports_argfiles = ctx.attrs.archiver_supports_argfiles, archiver_type = ctx.attrs.archiver_type, archive_contents = ctx.attrs.archive_contents, archive_objects_locally = False, + archive_symbol_table = ctx.attrs.archive_symbol_table, binary_extension = value_or(ctx.attrs.binary_extension, ""), generate_linker_maps = ctx.attrs.generate_linker_maps, - is_pdb_generated = is_pdb_generated(ctx.attrs.linker_type, ctx.attrs.linker_flags), + is_pdb_generated = is_pdb_generated(linker_type, ctx.attrs.linker_flags), link_binaries_locally = not value_or(ctx.attrs.cache_links, True), link_libraries_locally = False, - link_style = LinkStyle("static"), - link_weight = 1, + link_style = LinkStyle(ctx.attrs.link_style), + link_weight = ctx.attrs.link_weight, link_ordering = ctx.attrs.link_ordering, linker = ctx.attrs.linker[RunInfo], - linker_flags = cmd_args(ctx.attrs.linker_flags).add(c_lto_flags), + linker_flags = cmd_args(ctx.attrs.linker_flags, c_lto_flags), + executable_linker_flags = ctx.attrs.executable_linker_flags, + binary_linker_flags = ctx.attrs.binary_linker_flags, + dist_thin_lto_codegen_flags = cmd_args(ctx.attrs.dist_thin_lto_codegen_flags) if ctx.attrs.dist_thin_lto_codegen_flags else None, + post_linker_flags = cmd_args(ctx.attrs.post_linker_flags), lto_mode = lto_mode, mk_shlib_intf = ctx.attrs.shared_library_interface_producer, object_file_extension = ctx.attrs.object_file_extension or "o", @@ -95,6 +134,8 @@ def cxx_toolchain_impl(ctx): independent_shlib_interface_linker_flags = ctx.attrs.shared_library_interface_flags, requires_archives = value_or(ctx.attrs.requires_archives, True), requires_objects = value_or(ctx.attrs.requires_objects, False), + sanitizer_runtime_enabled = ctx.attrs.sanitizer_runtime_enabled, + sanitizer_runtime_files = flatten([runtime_file[DefaultInfo].default_outputs for runtime_file in ctx.attrs.sanitizer_runtime_files]), supports_distributed_thinlto = ctx.attrs.supports_distributed_thinlto, shared_dep_runtime_ld_flags = ctx.attrs.shared_dep_runtime_ld_flags, shared_library_name_default_prefix = _get_shared_library_name_default_prefix(ctx), @@ -103,14 +144,15 @@ def cxx_toolchain_impl(ctx): static_dep_runtime_ld_flags = ctx.attrs.static_dep_runtime_ld_flags, static_library_extension = ctx.attrs.static_library_extension or "a", static_pic_dep_runtime_ld_flags = ctx.attrs.static_pic_dep_runtime_ld_flags, - type = ctx.attrs.linker_type, + thin_lto_premerger_enabled = ctx.attrs.thin_lto_premerger_enabled, + type = linker_type, use_archiver_flags = ctx.attrs.use_archiver_flags, - produce_interface_from_stub_shared_library = ctx.attrs.produce_interface_from_stub_shared_library, ) utilities_info = BinaryUtilitiesInfo( nm = ctx.attrs.nm[RunInfo], objcopy = ctx.attrs.objcopy_for_shared_library_interface[RunInfo], + objdump = ctx.attrs.objdump[RunInfo] if ctx.attrs.objdump else None, ranlib = ctx.attrs.ranlib[RunInfo] if ctx.attrs.ranlib else None, strip = ctx.attrs.strip[RunInfo], dwp = None, @@ -127,6 +169,7 @@ def cxx_toolchain_impl(ctx): return [ DefaultInfo(), ] + cxx_toolchain_infos( + internal_tools = ctx.attrs._internal_tools[CxxInternalTools], platform_name = platform_name, linker_info = linker_info, binary_utilities_info = utilities_info, @@ -137,29 +180,36 @@ def cxx_toolchain_impl(ctx): as_compiler_info = as_info, cuda_compiler_info = cuda_info, hip_compiler_info = hip_info, + cvtres_compiler_info = cvtres_info, + rc_compiler_info = rc_info, header_mode = _get_header_mode(ctx), llvm_link = ctx.attrs.llvm_link[RunInfo] if ctx.attrs.llvm_link else None, object_format = CxxObjectFormat(object_format), headers_as_raw_headers_mode = HeadersAsRawHeadersMode(ctx.attrs.headers_as_raw_headers_mode) if ctx.attrs.headers_as_raw_headers_mode != None else None, conflicting_header_basename_allowlist = ctx.attrs.conflicting_header_basename_exemptions, - mk_hmap = ctx.attrs._mk_hmap[RunInfo], - mk_comp_db = ctx.attrs._mk_comp_db, pic_behavior = PicBehavior(ctx.attrs.pic_behavior), split_debug_mode = SplitDebugMode(ctx.attrs.split_debug_mode), strip_flags_info = strip_flags_info, # TODO(T138705365): Turn on dep files by default use_dep_files = value_or(ctx.attrs.use_dep_files, _get_default_use_dep_files(platform_name)), clang_remarks = ctx.attrs.clang_remarks, + gcno_files = value_or(ctx.attrs.gcno_files, False), clang_trace = value_or(ctx.attrs.clang_trace, False), cpp_dep_tracking_mode = DepTrackingMode(ctx.attrs.cpp_dep_tracking_mode), cuda_dep_tracking_mode = DepTrackingMode(ctx.attrs.cuda_dep_tracking_mode), dumpbin_toolchain_path = ctx.attrs._dumpbin_toolchain_path[DefaultInfo].default_outputs[0] if ctx.attrs._dumpbin_toolchain_path else None, + target_sdk_version = get_toolchain_target_sdk_version(ctx), + lipo = ctx.attrs.lipo[RunInfo] if ctx.attrs.lipo else None, + remap_cwd = ctx.attrs.remap_cwd, + optimization_compiler_flags_EXPERIMENTAL = ctx.attrs.optimization_compiler_flags_EXPERIMENTAL, ) def cxx_toolchain_extra_attributes(is_toolchain_rule): dep_type = attrs.exec_dep if is_toolchain_rule else attrs.dep return { + "archive_symbol_table": attrs.bool(default = True), "archiver": dep_type(providers = [RunInfo]), + "archiver_reads_inputs": attrs.bool(default = True), "archiver_supports_argfiles": attrs.bool(default = False), "asm_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), "asm_preprocessor": attrs.option(dep_type(providers = [RunInfo]), default = None), @@ -172,16 +222,24 @@ def cxx_toolchain_extra_attributes(is_toolchain_rule): "cpp_dep_tracking_mode": attrs.enum(DepTrackingMode.values(), default = "makefile"), "cuda_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), "cuda_dep_tracking_mode": attrs.enum(DepTrackingMode.values(), default = "makefile"), + "cvtres_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), "cxx_compiler": dep_type(providers = [RunInfo]), + "gcno_files": attrs.bool(default = False), "generate_linker_maps": attrs.bool(default = False), "hip_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), "link_ordering": attrs.enum(LinkOrdering.values(), default = "preorder"), + "link_weight": attrs.int(default = 1), "linker": dep_type(providers = [RunInfo]), + "lipo": attrs.option(dep_type(providers = [RunInfo]), default = None), "llvm_link": attrs.option(dep_type(providers = [RunInfo]), default = None), "lto_mode": attrs.enum(LtoMode.values(), default = "none"), + # Darwin only: the minimum deployment target supported + "min_sdk_version": attrs.option(attrs.string(), default = None), "nm": dep_type(providers = [RunInfo]), "objcopy_for_shared_library_interface": dep_type(providers = [RunInfo]), + "objdump": attrs.option(dep_type(providers = [RunInfo]), default = None), "object_format": attrs.enum(CxxObjectFormat.values(), default = "native"), + "optimization_compiler_flags_EXPERIMENTAL": attrs.list(attrs.string(), default = []), "pic_behavior": attrs.enum(PicBehavior.values(), default = "supported"), # A placeholder tool that can be used to set up toolchain constraints. # Useful when fat and thin toolchahins share the same underlying tools via `command_alias()`, @@ -190,19 +248,23 @@ def cxx_toolchain_extra_attributes(is_toolchain_rule): # Used for resolving any 'platform_*' attributes. "platform_name": attrs.option(attrs.string(), default = None), "private_headers_symlinks_enabled": attrs.bool(default = True), - "produce_interface_from_stub_shared_library": attrs.bool(default = True), "public_headers_symlinks_enabled": attrs.bool(default = True), "ranlib": attrs.option(dep_type(providers = [RunInfo]), default = None), + "rc_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), + "remap_cwd": attrs.bool(default = False), "requires_objects": attrs.bool(default = False), + "sanitizer_runtime_enabled": attrs.bool(default = False), + "sanitizer_runtime_files": attrs.set(attrs.dep(), sorted = True, default = []), # Use `attrs.dep()` as it's not a tool, always propagate target platform "shared_library_interface_mode": attrs.enum(ShlibInterfacesMode.values(), default = "disabled"), "shared_library_interface_producer": attrs.option(dep_type(providers = [RunInfo]), default = None), "split_debug_mode": attrs.enum(SplitDebugMode.values(), default = "none"), "strip": dep_type(providers = [RunInfo]), "supports_distributed_thinlto": attrs.bool(default = False), + # Darwin only: the deployment target to use for this build + "target_sdk_version": attrs.option(attrs.string(), default = None), + "thin_lto_premerger_enabled": attrs.bool(default = False), "use_archiver_flags": attrs.bool(default = True), "use_dep_files": attrs.option(attrs.bool(), default = None), - "_dep_files_processor": dep_type(providers = [RunInfo], default = "prelude//cxx/tools:dep_file_processor"), - "_dist_lto_tools": attrs.default_only(dep_type(providers = [DistLtoToolsInfo], default = "prelude//cxx/dist_lto/tools:dist_lto_tools")), # TODO(scottcao): Figure out a slightly better way to integrate this. In theory, this is only needed for clang toolchain. # If we were using msvc, we should be able to use dumpbin directly. "_dumpbin_toolchain_path": attrs.default_only(attrs.option(dep_type(providers = [DefaultInfo]), default = select({ @@ -212,15 +274,14 @@ def cxx_toolchain_extra_attributes(is_toolchain_rule): # to fail, so I need a DEFAULT here when some target without cpu constraint tries to configure against the # windows exec platform. "DEFAULT": None, - "ovr_config//cpu:x86_32": "fbsource//arvr/third-party/toolchains/visual_studio:14.28.29910-cl_32_and_tools", - "ovr_config//cpu:x86_64": "fbsource//arvr/third-party/toolchains/visual_studio:14.28.29910-cl_64_and_tools", + # FIXME: prelude// should be standalone (not refer to fbsource//) + "ovr_config//cpu:x86_32": "fbsource//third-party/toolchains/visual_studio:cl_x86_and_tools", + "ovr_config//cpu:x86_64": "fbsource//third-party/toolchains/visual_studio:cl_x64_and_tools", }), }) if is_full_meta_repo() else None)), - "_mk_comp_db": attrs.default_only(dep_type(providers = [RunInfo], default = "prelude//cxx/tools:make_comp_db")), - # FIXME: prelude// should be standalone (not refer to fbsource//) - "_mk_hmap": attrs.default_only(dep_type(providers = [RunInfo], default = "fbsource//xplat/buck2/tools/cxx:hmap_wrapper")), + "_internal_tools": attrs.default_only(dep_type(providers = [CxxInternalTools], default = "prelude//cxx/tools:internal_tools")), "_msvc_hermetic_exec": attrs.default_only(dep_type(providers = [RunInfo], default = "prelude//windows/tools:msvc_hermetic_exec")), - } + } | cxx_toolchain_allow_cache_upload_args() def _cxx_toolchain_inheriting_target_platform_attrs(): attrs = dict(cxx_rules.cxx_toolchain.attrs) @@ -265,14 +326,14 @@ def _get_shared_library_name_default_prefix(ctx: AnalysisContext) -> str: return "" if extension == "dll" else "lib" def _get_shared_library_name_format(ctx: AnalysisContext) -> str: - linker_type = ctx.attrs.linker_type + linker_type = LinkerType(ctx.attrs.linker_type) extension = ctx.attrs.shared_library_extension if extension == "": extension = LINKERS[linker_type].default_shared_library_extension return "{}." + extension def _get_shared_library_versioned_name_format(ctx: AnalysisContext) -> str: - linker_type = ctx.attrs.linker_type + linker_type = LinkerType(ctx.attrs.linker_type) extension_format = ctx.attrs.shared_library_versioned_extension_format.replace("%s", "{}") if extension_format == "": extension_format = LINKERS[linker_type].default_shared_library_versioned_extension_format diff --git a/prelude/cxx/cxx_toolchain_macro_layer.bzl b/prelude/cxx/cxx_toolchain_macro_layer.bzl index 4b38584ad91a2..adaeada5a36a0 100644 --- a/prelude/cxx/cxx_toolchain_macro_layer.bzl +++ b/prelude/cxx/cxx_toolchain_macro_layer.bzl @@ -7,12 +7,9 @@ def cxx_toolchain_macro_impl(cxx_toolchain_rule = None, **kwargs): # `cxx.linker_map_enabled` overrides toolchain behavior - linker_map_enabled = read_root_config("cxx", "linker_map_enabled") - if linker_map_enabled != None: - if linker_map_enabled.lower() == "true": - kwargs["generate_linker_maps"] = True - else: - kwargs["generate_linker_maps"] = False + if "generate_linker_maps" not in kwargs: + linker_map_enabled = read_root_config("cxx", "linker_map_enabled", "") + kwargs["generate_linker_maps"] = linker_map_enabled.lower() == "true" bitcode = read_root_config("cxx", "bitcode") if bitcode != None: diff --git a/prelude/cxx/cxx_toolchain_types.bzl b/prelude/cxx/cxx_toolchain_types.bzl index 932179e3404e2..944b01ea0cdd0 100644 --- a/prelude/cxx/cxx_toolchain_types.bzl +++ b/prelude/cxx/cxx_toolchain_types.bzl @@ -7,15 +7,9 @@ load("@prelude//cxx:debug.bzl", "SplitDebugMode") -# For cases where our `ld` dependency provides more than an executable and -# would like to give us flags too. We use this to place the flags in the proper -# field (linker_flags), so that things that want ldflags without the linker -# executable can access those. -RichLinkerRunInfo = provider(fields = {"exe": provider_field(typing.Any, default = None), "flags": provider_field(typing.Any, default = None)}) +LinkerType = enum("gnu", "darwin", "windows", "wasm") -LinkerType = ["gnu", "darwin", "windows", "wasm"] - -ShlibInterfacesMode = enum("disabled", "enabled", "defined_only") +ShlibInterfacesMode = enum("disabled", "enabled", "defined_only", "stub_from_library", "stub_from_headers") # TODO(T110378149): Consider whether it makes sense to move these things to # configurations/constraints rather than part of the toolchain. @@ -24,13 +18,16 @@ LinkerInfo = provider( fields = { "archiver": provider_field(typing.Any, default = None), "archiver_flags": provider_field(typing.Any, default = None), + "archiver_reads_inputs": provider_field(bool, default = True), "archiver_supports_argfiles": provider_field(typing.Any, default = None), "archiver_type": provider_field(typing.Any, default = None), "archive_contents": provider_field(typing.Any, default = None), "archive_objects_locally": provider_field(typing.Any, default = None), + "archive_symbol_table": provider_field(bool, default = True), # "archiver_platform", # "" on Unix, "exe" on Windows "binary_extension": provider_field(typing.Any, default = None), # str + "dist_thin_lto_codegen_flags": provider_field([cmd_args, None], default = None), "generate_linker_maps": provider_field(typing.Any, default = None), # bool # Whether to run native links locally. We support this for fbcode platforms # to avoid issues with C++ static links (see comment in @@ -41,14 +38,19 @@ LinkerInfo = provider( # GiBs of object files (which can also lead to RE errors/timesouts etc). "link_libraries_locally": provider_field(typing.Any, default = None), "link_style": provider_field(typing.Any, default = None), # LinkStyle - "link_weight": provider_field(typing.Any, default = None), # int + "link_weight": provider_field(int, default = 1), # int "link_ordering": provider_field(typing.Any, default = None), # LinkOrdering "linker": provider_field(typing.Any, default = None), "linker_flags": provider_field(typing.Any, default = None), + "executable_linker_flags": provider_field(typing.Any, default = []), + "binary_linker_flags": provider_field(typing.Any, default = []), "lto_mode": provider_field(typing.Any, default = None), "mk_shlib_intf": provider_field(typing.Any, default = None), # "o" on Unix, "obj" on Windows "object_file_extension": provider_field(typing.Any, default = None), # str + "post_linker_flags": provider_field(typing.Any, default = None), + "sanitizer_runtime_enabled": provider_field(bool, default = False), + "sanitizer_runtime_files": provider_field(list[Artifact], default = []), "shlib_interfaces": provider_field(ShlibInterfacesMode), "shared_dep_runtime_ld_flags": provider_field(typing.Any, default = None), # "lib" on Linux/Mac/Android, "" on Windows. @@ -64,11 +66,11 @@ LinkerInfo = provider( "requires_objects": provider_field(typing.Any, default = None), "supports_distributed_thinlto": provider_field(typing.Any, default = None), "independent_shlib_interface_linker_flags": provider_field(typing.Any, default = None), - "type": provider_field(typing.Any, default = None), # of "LinkerType" type + "thin_lto_premerger_enabled": provider_field(bool, default = False), + "type": LinkerType, "use_archiver_flags": provider_field(typing.Any, default = None), "force_full_hybrid_if_capable": provider_field(typing.Any, default = None), "is_pdb_generated": provider_field(typing.Any, default = None), # bool - "produce_interface_from_stub_shared_library": provider_field(typing.Any, default = None), # bool }, ) @@ -77,6 +79,7 @@ BinaryUtilitiesInfo = provider(fields = { "dwp": provider_field(typing.Any, default = None), "nm": provider_field(typing.Any, default = None), "objcopy": provider_field(typing.Any, default = None), + "objdump": provider_field(typing.Any, default = None), "ranlib": provider_field(typing.Any, default = None), "strip": provider_field(typing.Any, default = None), }) @@ -121,20 +124,36 @@ _compiler_fields = [ "preprocessor", "preprocessor_type", "preprocessor_flags", - "dep_files_processor", + # Controls cache upload for object files + "allow_cache_upload", + "supports_two_phase_compilation", ] HipCompilerInfo = provider(fields = _compiler_fields) CudaCompilerInfo = provider(fields = _compiler_fields) +CvtresCompilerInfo = provider(fields = _compiler_fields) +RcCompilerInfo = provider(fields = _compiler_fields) CCompilerInfo = provider(fields = _compiler_fields) CxxCompilerInfo = provider(fields = _compiler_fields) AsmCompilerInfo = provider(fields = _compiler_fields) AsCompilerInfo = provider(fields = _compiler_fields) -DistLtoToolsInfo = provider( - # @unsorted-dict-items - fields = {"planner": provider_field(typing.Any, default = None), "opt": provider_field(typing.Any, default = None), "prepare": provider_field(typing.Any, default = None), "copy": provider_field(typing.Any, default = None)}, -) +DistLtoToolsInfo = provider(fields = dict( + planner = dict[LinkerType, RunInfo], + opt = dict[LinkerType, RunInfo], + prepare = RunInfo, + copy = RunInfo, +)) + +CxxInternalTools = provider(fields = dict( + concatenate_diagnostics = RunInfo, + dep_file_processor = RunInfo, + dist_lto = DistLtoToolsInfo, + hmap_wrapper = RunInfo, + make_comp_db = RunInfo, + remap_cwd = RunInfo, + stderr_to_file = RunInfo, +)) CxxObjectFormat = enum( "native", @@ -171,6 +190,7 @@ PicBehavior = enum( CxxToolchainInfo = provider( # @unsorted-dict-items fields = { + "internal_tools": provider_field(CxxInternalTools), "conflicting_header_basename_allowlist": provider_field(typing.Any, default = None), "use_distributed_thinlto": provider_field(typing.Any, default = None), "header_mode": provider_field(typing.Any, default = None), @@ -184,12 +204,12 @@ CxxToolchainInfo = provider( "as_compiler_info": provider_field(typing.Any, default = None), "hip_compiler_info": provider_field(typing.Any, default = None), "cuda_compiler_info": provider_field(typing.Any, default = None), - "mk_comp_db": provider_field(typing.Any, default = None), - "mk_hmap": provider_field(typing.Any, default = None), + "cvtres_compiler_info": provider_field(typing.Any, default = None), + "rc_compiler_info": provider_field(typing.Any, default = None), "llvm_link": provider_field(typing.Any, default = None), - "dist_lto_tools_info": provider_field(typing.Any, default = None), "use_dep_files": provider_field(typing.Any, default = None), "clang_remarks": provider_field(typing.Any, default = None), + "gcno_files": provider_field(typing.Any, default = None), "clang_trace": provider_field(typing.Any, default = None), "cpp_dep_tracking_mode": provider_field(typing.Any, default = None), "cuda_dep_tracking_mode": provider_field(typing.Any, default = None), @@ -198,6 +218,10 @@ CxxToolchainInfo = provider( "bolt_enabled": provider_field(typing.Any, default = None), "pic_behavior": provider_field(typing.Any, default = None), "dumpbin_toolchain_path": provider_field(typing.Any, default = None), + "target_sdk_version": provider_field([str, None], default = None), + "lipo": provider_field([RunInfo, None], default = None), + "remap_cwd": provider_field(bool, default = False), + "optimization_compiler_flags_EXPERIMENTAL": provider_field(typing.Any, default = []), }, ) @@ -215,9 +239,6 @@ def _validate_linker_info(info: LinkerInfo): if info.requires_archives and info.requires_objects: fail("only one of `requires_archives` and `requires_objects` can be enabled") - if info.supports_distributed_thinlto and not info.requires_objects: - fail("distributed thinlto requires enabling `requires_objects`") - def is_bitcode_format(format: CxxObjectFormat) -> bool: return format in [CxxObjectFormat("bitcode"), CxxObjectFormat("embedded-bitcode")] @@ -228,29 +249,34 @@ def cxx_toolchain_infos( linker_info, binary_utilities_info, header_mode, + internal_tools: CxxInternalTools, headers_as_raw_headers_mode = None, conflicting_header_basename_allowlist = [], asm_compiler_info = None, as_compiler_info = None, hip_compiler_info = None, cuda_compiler_info = None, + cvtres_compiler_info = None, + rc_compiler_info = None, object_format = CxxObjectFormat("native"), - mk_comp_db = None, - mk_hmap = None, use_distributed_thinlto = False, use_dep_files = False, clang_remarks = None, + gcno_files = None, clang_trace = False, cpp_dep_tracking_mode = DepTrackingMode("none"), cuda_dep_tracking_mode = DepTrackingMode("none"), strip_flags_info = None, - dist_lto_tools_info: [DistLtoToolsInfo, None] = None, split_debug_mode = SplitDebugMode("none"), bolt_enabled = False, llvm_link = None, platform_deps_aliases = [], pic_behavior = PicBehavior("supported"), - dumpbin_toolchain_path = None): + dumpbin_toolchain_path = None, + target_sdk_version = None, + lipo = None, + remap_cwd = False, + optimization_compiler_flags_EXPERIMENTAL = []): """ Creates the collection of cxx-toolchain Infos for a cxx toolchain. @@ -263,6 +289,7 @@ def cxx_toolchain_infos( _validate_linker_info(linker_info) toolchain_info = CxxToolchainInfo( + internal_tools = internal_tools, conflicting_header_basename_allowlist = conflicting_header_basename_allowlist, header_mode = header_mode, headers_as_raw_headers_mode = headers_as_raw_headers_mode, @@ -275,13 +302,13 @@ def cxx_toolchain_infos( as_compiler_info = as_compiler_info, hip_compiler_info = hip_compiler_info, cuda_compiler_info = cuda_compiler_info, - mk_comp_db = mk_comp_db, - mk_hmap = mk_hmap, + cvtres_compiler_info = cvtres_compiler_info, + rc_compiler_info = rc_compiler_info, object_format = object_format, - dist_lto_tools_info = dist_lto_tools_info, use_distributed_thinlto = use_distributed_thinlto, use_dep_files = use_dep_files, clang_remarks = clang_remarks, + gcno_files = gcno_files, clang_trace = clang_trace, cpp_dep_tracking_mode = cpp_dep_tracking_mode, cuda_dep_tracking_mode = cuda_dep_tracking_mode, @@ -290,6 +317,10 @@ def cxx_toolchain_infos( bolt_enabled = bolt_enabled, pic_behavior = pic_behavior, dumpbin_toolchain_path = dumpbin_toolchain_path, + target_sdk_version = target_sdk_version, + lipo = lipo, + remap_cwd = remap_cwd, + optimization_compiler_flags_EXPERIMENTAL = optimization_compiler_flags_EXPERIMENTAL, ) # Provide placeholder mappings, used primarily by cxx_genrule. @@ -309,9 +340,10 @@ def cxx_toolchain_infos( # NOTE(agallagher): The arg-less variants of the ldflags macro are # identical, and are just separate to match v1's behavior (ideally, # we just have a single `ldflags` macro for this case). - "ldflags-shared": _shell_quote(linker_info.linker_flags), - "ldflags-static": _shell_quote(linker_info.linker_flags), - "ldflags-static-pic": _shell_quote(linker_info.linker_flags), + "ldflags-shared": _shell_quote(linker_info.linker_flags or []), + "ldflags-static": _shell_quote(linker_info.linker_flags or []), + "ldflags-static-pic": _shell_quote(linker_info.linker_flags or []), + "objcopy": binary_utilities_info.objcopy, # TODO(T110378148): $(platform-name) is almost unusued. Should we remove it? "platform-name": platform_name, } diff --git a/prelude/cxx/cxx_types.bzl b/prelude/cxx/cxx_types.bzl index a0dfa96af902d..0ae7606c50c11 100644 --- a/prelude/cxx/cxx_types.bzl +++ b/prelude/cxx/cxx_types.bzl @@ -5,7 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//:artifact_tset.bzl", "ArtifactTSet") # @unused Used as a type +load("@prelude//:artifact_tset.bzl", "ArtifactInfoTag", "ArtifactTSet") +load( + "@prelude//cxx:link_groups_types.bzl", + "LinkGroupInfo", # @unused Used as a type +) load( "@prelude//linking:link_info.bzl", "LinkArgs", @@ -21,7 +25,7 @@ load( ) load(":argsfiles.bzl", "CompileArgsfiles") load( - ":compile.bzl", + ":cxx_sources.bzl", "CxxSrcWithFlags", # @unused Used as a type ) load( @@ -30,7 +34,6 @@ load( ) load( ":link_groups.bzl", - "LinkGroupInfo", # @unused Used as a type "LinkGroupLibSpec", # @unused Used as a type ) load( @@ -47,6 +50,13 @@ load( "cxx_populate_xcode_attributes", ) +CxxLibraryInfo = provider( + fields = dict( + target = provider_field(Label), + labels = provider_field(list[str]), + ), +) + # Parameters to control which sub targets to define when processing Cxx rules. # By default, generates all subtargets. CxxRuleSubTargetParams = record( @@ -60,6 +70,7 @@ CxxRuleSubTargetParams = record( xcode_data = field(bool, True), objects = field(bool, True), bitcode_bundle = field(bool, True), + header_unit = field(bool, True), ) # Parameters to control which providers to define when processing Cxx rules. @@ -69,15 +80,19 @@ CxxRuleProviderParams = record( default = field(bool, True), java_packaging_info = field(bool, True), android_packageable_info = field(bool, True), + java_global_code_info = field(bool, True), linkable_graph = field(bool, True), link_style_outputs = field(bool, True), merged_native_link_info = field(bool, True), omnibus_root = field(bool, True), preprocessors = field(bool, True), + # Whether or not to generate a resource groups provider for raw cxx resources. + cxx_resources_as_apple_resources = field(bool, True), resources = field(bool, True), shared_libraries = field(bool, True), template_placeholders = field(bool, True), preprocessor_for_tests = field(bool, True), + third_party_build = field(bool, False), ) # Parameters to handle non-Clang sources, e.g Swift on Apple's platforms. @@ -92,6 +107,8 @@ CxxRuleAdditionalParams = record( subtargets = field(dict, {}), # [str: ["provider"]] # Might be used to expose additional providers to cxx layer (e.g to support #headers subtarget for Swift) additional_providers_factory = field([typing.Callable, None], None), # ([CPreprocessorInfo, None]) -> ["provider"]: + # The list of tags that should be applied to generated ArtifactTSet of debug information. + external_debug_info_tags = field(list[ArtifactInfoTag], []), ) # Parameters that allows to configure/extend generic implementation of C++ rules. @@ -102,15 +119,20 @@ CxxRuleAdditionalParams = record( # different and need to be specified. The following record holds the data which # is needed to specialize user-facing rule from generic implementation. CxxRuleConstructorParams = record( + #Required + + # Name of the top level rule utilizing the cxx rule. + rule_type = str, + # Header layout to use importing headers. + headers_layout = CxxHeadersLayout, + + #Optional + # Whether to build an empty shared library. This is utilized for rust_python_extensions # so that they can link against the rust shared object. build_empty_so = field(bool, False), - # Name of the top level rule utilizing the cxx rule. - rule_type = str, # If the rule is a test. is_test = field(bool, False), - # Header layout to use importing headers. - headers_layout = CxxHeadersLayout, # Additional information used to preprocess every unit of translation in the rule. extra_preprocessors = field(list[CPreprocessor], []), extra_preprocessors_info = field(list[CPreprocessorInfo], []), @@ -119,6 +141,8 @@ CxxRuleConstructorParams = record( # Additional information used to link every object produced by the rule, # flags are _both_ exported and used to link the target itself. extra_exported_link_flags = field(list[typing.Any], []), + # Additional hidden inputs for link or archive actions. + extra_hidden = field(list[Artifact], []), # Additional flags used _only_ when linking the target itself. # These flags are _not_ propagated up the dep tree. extra_link_flags = field(list[typing.Any], []), @@ -145,6 +169,10 @@ CxxRuleConstructorParams = record( shared_library_flags = field([SharedLibraryFlagOverrides, None], None), # Optional argument to override the default name of the shared object being produced. soname = field([str, None], None), + # Optional argument to override the default name of the executable being produced. + executable_name = field([str, None], None), + # Optional argument to set the deffile for the windows linker on a dll + deffile = field([Artifact, None], None), # If passed to cxx_executable, this field will be used to determine # a shared subtarget's default output should be stripped. strip_executable = field(bool, False), @@ -169,7 +197,7 @@ CxxRuleConstructorParams = record( # shared libs to include in the symlink tree). extra_link_roots = field(list[LinkableProviders], []), # Additional shared libs to "package". - extra_shared_libs = field(dict[str, SharedLibrary], {}), + extra_shared_libs = field(list[SharedLibrary], []), auto_link_group_specs = field([list[LinkGroupLibSpec], None], None), link_group_info = field([LinkGroupInfo, None], None), # Whether to use pre-stripped objects when linking. @@ -179,8 +207,39 @@ CxxRuleConstructorParams = record( # Whether link groups liking should make `preferred_linkage = "static"` libs # "follow" their dependents across link group boundaries. link_groups_force_static_follows_dependents = field(bool, True), - # The intended return type is: (list[ArgLike], dict[str, list[DefaultInfo]]). - extra_linker_outputs_factory = field(typing.Callable, lambda _context: ([], {})), + # A factory function to produce extra artifacts and output providers for a rule + # with signature: f(ctx) -> ExtraLinkerOutputs + extra_linker_outputs_factory = field(typing.Callable | None, None), + # A factory function to produce linker flags for the extra linker outputs + # returned from the extra_linker_outputs_factory. It should have the signature + # f(ctx, dict[str, Artifact]) -> list[ArgLike] + extra_linker_outputs_flags_factory = field(typing.Callable | None, None), # Whether to allow cache uploads for locally-linked executables. exe_allow_cache_upload = field(bool, False), + # Extra shared library interfaces to propagate, eg from mixed Swift libraries. + extra_shared_library_interfaces = field([list[Artifact], None], None), + # Compiler flags + compiler_flags = field(list[typing.Any], []), + lang_compiler_flags = field(dict[typing.Any, typing.Any], {}), + # Platform compiler flags + platform_compiler_flags = field(list[(str, typing.Any)], []), + lang_platform_compiler_flags = field(dict[typing.Any, typing.Any], {}), + # Preprocessor flags + preprocessor_flags = field(list[typing.Any], []), + lang_preprocessor_flags = field(dict[typing.Any, typing.Any], {}), + # Platform preprocessor flags + platform_preprocessor_flags = field(list[(str, typing.Any)], []), + lang_platform_preprocessor_flags = field(dict[typing.Any, typing.Any], {}), + # modulename-Swift.h header for building objc targets that rely on this swift dep + swift_objc_header = field([Artifact, None], None), + error_handler = field([typing.Callable, None], None), + index_store_factory = field(typing.Callable | None, None), + # Swift index stores to propagate + index_stores = field(list[Artifact] | None, None), + # Whether to add header units from dependencies to the command line. + use_header_units = field(bool, False), + # Whether to export a header unit to all dependents. + export_header_unit = field([str, None], None), + # Filter what headers to include in header units. + export_header_unit_filter = field(list[str], []), ) diff --git a/prelude/cxx/cxx_utility.bzl b/prelude/cxx/cxx_utility.bzl new file mode 100644 index 0000000000000..0647e285f0e76 --- /dev/null +++ b/prelude/cxx/cxx_utility.bzl @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def cxx_attrs_get_allow_cache_upload(attrs: struct, default: [None, bool] = None) -> bool: + default_value = default if default != None else False + if not hasattr(attrs, "allow_cache_upload"): + return default_value + value = attrs.allow_cache_upload + return value if value != None else default_value + +def cxx_toolchain_allow_cache_upload_args(): + doc = """ + Whether to allow uploading of object files to cache when the compile + action is executed locally and the configuration allows uploads (i.e., + there is a cache configured and the client has permission to write to it). + """ + return { + "c_compiler_allow_cache_upload": attrs.option( + attrs.bool(), + default = None, + doc = doc, + ), + "cuda_compiler_allow_cache_upload": attrs.option( + attrs.bool(), + default = None, + doc = doc, + ), + "cxx_compiler_allow_cache_upload": attrs.option( + attrs.bool(), + default = None, + doc = doc, + ), + } diff --git a/prelude/cxx/diagnostics.bzl b/prelude/cxx/diagnostics.bzl new file mode 100644 index 0000000000000..a7166d4be167b --- /dev/null +++ b/prelude/cxx/diagnostics.bzl @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:expect.bzl", "expect") +load(":cxx_context.bzl", "get_cxx_toolchain_info") + +def check_sub_target( + ctx: AnalysisContext, + diagnostics: dict[str, Artifact]) -> list[Provider]: + expect(len(diagnostics) > 0) + + if len(diagnostics) == 1: + all_diagnostics = diagnostics.values()[0] + else: + toolchain = get_cxx_toolchain_info(ctx) + concatenate_diagnostics_tool = toolchain.internal_tools.concatenate_diagnostics + all_diagnostics = ctx.actions.declare_output("diagnostics.txt") + ctx.actions.run( + [ + concatenate_diagnostics_tool, + cmd_args(all_diagnostics.as_output(), format = "--out={}"), + diagnostics.values(), + ], + category = "diagnostics", + ) + + return [DefaultInfo( + default_output = all_diagnostics, + sub_targets = { + short_path: [DefaultInfo(default_output = diagnostics)] + for short_path, diagnostics in diagnostics.items() + }, + )] diff --git a/prelude/cxx/dist_lto/README.md b/prelude/cxx/dist_lto/README.md index 1102134a2c296..d2b4f665bbfb2 100644 --- a/prelude/cxx/dist_lto/README.md +++ b/prelude/cxx/dist_lto/README.md @@ -1,23 +1,27 @@ # Distributed ThinLTO in Buck2 + Sean Gillespie, April 2022 -This document is a technical overview into Buck2's implementation of a distributed ThinLTO. -Like all rules in Buck2, this implementation is written entirely in Starlark, contained in -`dist_lto.bzl` (in this same directory). +This document is a technical overview into Buck2's implementation of a +distributed ThinLTO. Like all rules in Buck2, this implementation is written +entirely in Starlark, contained in `dist_lto.bzl` (in this same directory). ## Motivation -First, I highly recommend watching [Teresa Johnson's CppCon2017 talk about ThinLTO](https://www.youtube.com/watch?v=p9nH2vZ2mNo), +First, I highly recommend watching +[Teresa Johnson's CppCon2017 talk about ThinLTO](https://www.youtube.com/watch?v=p9nH2vZ2mNo), which covers the topics in this section in much greater detail than I can. -C and C++ have long enjoyed significant optimizations at the hands of compilers. However, they have also -long suffered a fundamental limitation; a C or C++ compiler can only optimize code that it sees in a single -translation unit. For a language like C or C++, this means in practice that only code that is included via -the preprocessor or specified in the translation unit can be optimized as a single unit. C and C++ compilers -are unable to inline functions that are defined in different translation units. However, a crucial advantage -of this compilation model is that all C and C++ compiler invocations are *completely parallelizable*; despite -sacrificing some code quality, C and C++ compilation turns into a massively parallel problem with a serial -link step at the very end. +C and C++ have long enjoyed significant optimizations at the hands of compilers. +However, they have also long suffered a fundamental limitation; a C or C++ +compiler can only optimize code that it sees in a single translation unit. For a +language like C or C++, this means in practice that only code that is included +via the preprocessor or specified in the translation unit can be optimized as a +single unit. C and C++ compilers are unable to inline functions that are defined +in different translation units. However, a crucial advantage of this compilation +model is that all C and C++ compiler invocations are _completely +parallelizable_; despite sacrificing some code quality, C and C++ compilation +turns into a massively parallel problem with a serial link step at the very end. ``` flowchart LR; @@ -36,20 +40,25 @@ flowchart LR; c.o --> main; ``` -([Rendered](https://fburl.com/mermaid/rzup8o32). Compilation and optimization of a, b, and c can proceed in parallel.) - - -In cases where absolute performance is required, though, the inability to perform cross-translation-unit -(or "cross-module", in LLVM parlance) optimizations becomes more of a problem. To solve this, a new compilation -paradigm was designed, dubbed "Link-Time Optimization" (LTO). In this scheme, a compiler will not produce machine code -when processing a translation unit; rather, it will output the compiler's intermediate representation (e.g. LLVM bitcode). -Later on, when it is time for the linker to run, it will load all of the compiler IR into one giant module, run -optimization passes on the mega-module, and produce a final binary from that. - -This works quite well, if all that you're looking for is run-time performance. A major drawback of the LTO approach is -that all of the parallelism gained from optimizing translation units individually is now completely lost; instead, the -linker (using a plugin) will do a single-threaded pass of *all code* produced by compilation steps. This is extremely -slow, memory-intensive, and unable to be run incrementally. There are targets at Meta that simply can't be LTO-compiled +([Rendered](https://fburl.com/mermaid/rzup8o32). Compilation and optimization of +a, b, and c can proceed in parallel.) + +In cases where absolute performance is required, though, the inability to +perform cross-translation-unit (or "cross-module", in LLVM parlance) +optimizations becomes more of a problem. To solve this, a new compilation +paradigm was designed, dubbed "Link-Time Optimization" (LTO). In this scheme, a +compiler will not produce machine code when processing a translation unit; +rather, it will output the compiler's intermediate representation (e.g. LLVM +bitcode). Later on, when it is time for the linker to run, it will load all of +the compiler IR into one giant module, run optimization passes on the +mega-module, and produce a final binary from that. + +This works quite well, if all that you're looking for is run-time performance. A +major drawback of the LTO approach is that all of the parallelism gained from +optimizing translation units individually is now completely lost; instead, the +linker (using a plugin) will do a single-threaded pass of _all code_ produced by +compilation steps. This is extremely slow, memory-intensive, and unable to be +run incrementally. There are targets at Meta that simply can't be LTO-compiled because of their size. ``` @@ -74,15 +83,21 @@ flowchart LR; main.o --> |ld| main ``` -([Rendered](https://fburl.com/mermaid/kid35io9). `a.bc`, `b.bc`, and `c.bc` are LLVM bitcode; they are all merged -together into a single module, `a_b_c_optimized.bc`, which is then optimized and codegen'd into a final binary.) -The idea of ThinLTO comes from a desire to maintain the ability to optimize modules in parallel while still -allowing for profitable cross-module optimizations. The idea is this: +([Rendered](https://fburl.com/mermaid/kid35io9). `a.bc`, `b.bc`, and `c.bc` are +LLVM bitcode; they are all merged together into a single module, +`a_b_c_optimized.bc`, which is then optimized and codegen'd into a final +binary.) -1. Just like regular LTO, the compiler emits bitcode instead of machine code. However, it also contains some light -metadata such as a call graph of symbols within the module. -2. The monolithic LTO link is split into three steps: `index`, `opt`, and `link`. +The idea of ThinLTO comes from a desire to maintain the ability to optimize +modules in parallel while still allowing for profitable cross-module +optimizations. The idea is this: + +1. Just like regular LTO, the compiler emits bitcode instead of machine code. + However, it also contains some light metadata such as a call graph of symbols + within the module. +2. The monolithic LTO link is split into three steps: `index`, `opt`, and + `link`. ``` flowchart LR; @@ -117,137 +132,192 @@ flowchart LR; ([Rendered](https://fburl.com/mermaid/56oc99t5)) -The `index` step looks like a link step. However, it does not produce a final binary; instead, it looks at every -compiler IR input file that it receives and heuristically determines which other IR modules it should be optimized -with in order to achieve profitable optimizations. These modules might include functions that the index step thinks -probably will get inlined, or globals that are read in the target IR input file. The output of the index step is a -series of files on disk that indicate which sibling object files should be present when optimizing a particular object -file, for each object file in the linker command-line. - -The `opt` step runs in parallel for every object file. Each object file will be optimized using the compiler's -optimizer (e.g. `opt`, for LLVM). The optimizer will combine the objects that were referenced as part of the index -step as potentially profitable to include and optimize them all together. - -The `link` step takes the outputs of `opt` and links them together, like a normal linker. - -In practice, ThinLTO manages to recapture the inherent parallelism of C/C++ compilation by pushing the majority of work -to the parallel `opt` phase of execution. When LLVM performs ThinLTO by default, it will launch a thread pool and process -independent modules in parallel. ThinLTO does not produce as performant a binary as a monolithic LTO; however, in practice, -ThinLTO binaries [paired with AutoFDO](https://fburl.com/wiki/q480euco) perform comparably to monolithic LTO. Furthermore, -ThinLTO's greater efficiency allows for more expensive optimization passes to be run, which can further improve code quality +The `index` step looks like a link step. However, it does not produce a final +binary; instead, it looks at every compiler IR input file that it receives and +heuristically determines which other IR modules it should be optimized with in +order to achieve profitable optimizations. These modules might include functions +that the index step thinks probably will get inlined, or globals that are read +in the target IR input file. The output of the index step is a series of files +on disk that indicate which sibling object files should be present when +optimizing a particular object file, for each object file in the linker +command-line. + +The `opt` step runs in parallel for every object file. Each object file will be +optimized using the compiler's optimizer (e.g. `opt`, for LLVM). The optimizer +will combine the objects that were referenced as part of the index step as +potentially profitable to include and optimize them all together. + +The `link` step takes the outputs of `opt` and links them together, like a +normal linker. + +In practice, ThinLTO manages to recapture the inherent parallelism of C/C++ +compilation by pushing the majority of work to the parallel `opt` phase of +execution. When LLVM performs ThinLTO by default, it will launch a thread pool +and process independent modules in parallel. ThinLTO does not produce as +performant a binary as a monolithic LTO; however, in practice, ThinLTO binaries +[paired with AutoFDO](https://fburl.com/wiki/q480euco) perform comparably to +monolithic LTO. Furthermore, ThinLTO's greater efficiency allows for more +expensive optimization passes to be run, which can further improve code quality near that of a monolithic LTO. -This is all great, and ThinLTO has been in use at Meta for some time. However, Buck2 has the ability to take a step -further than Buck1 could ever have - Buck2 can distribute parallel `opt` actions across many machines via Remote Execution -to achieve drastic speedups in ThinLTO wall clock time, memory usage, and incrementality. +This is all great, and ThinLTO has been in use at Meta for some time. However, +Buck2 has the ability to take a step further than Buck1 could ever have - Buck2 +can distribute parallel `opt` actions across many machines via Remote Execution +to achieve drastic speedups in ThinLTO wall clock time, memory usage, and +incrementality. ## Buck2's Implementation -Buck2's role in a distributed ThinLTO compilation is to construct a graph of actions that directly mirrors the graph -that the `index` step outputs. The graph that the `index` step outputs is entirely dynamic and, as such, the build -system is only aware of what the graph could be after the `index` step is complete. Unlike Buck1 (or even Blaze/Bazel), -Buck2 has explicit support for this paradigm [("dynamic dependencies")](https://fburl.com/gdoc/zklwhkll). Therefore, for Buck2, the basic strategy looks like: - -1. Invoke `clang` to act as `index`. `index` will output a file for every object file that indicates what other modules -need to be present when running `opt` on the object file (an "imports file"). -2. Read imports files and construct a graph of dynamic `opt` actions whose dependencies mirror the contents of the imports files. -3. Collect the outputs from the `opt` actions and invoke the linker to produce a final binary. - -Action `2` is inherently dynamic, since it must read the contents of files produced as part of action `1`. Furthermore, -Buck2's support of `1` is complicated by the fact that certain Buck2 rules can produce an archive of object files as -an output (namely, the Rust compiler). As a result, Buck2's implementation of Distributed ThinLTO is highly dynamic. +Buck2's role in a distributed ThinLTO compilation is to construct a graph of +actions that directly mirrors the graph that the `index` step outputs. The graph +that the `index` step outputs is entirely dynamic and, as such, the build system +is only aware of what the graph could be after the `index` step is complete. +Unlike Buck1 (or even Blaze/Bazel), Buck2 has explicit support for this paradigm +[("dynamic dependencies")](https://fburl.com/gdoc/zklwhkll). Therefore, for +Buck2, the basic strategy looks like: + +1. Invoke `clang` to act as `index`. `index` will output a file for every object + file that indicates what other modules need to be present when running `opt` + on the object file (an "imports file"). +2. Read imports files and construct a graph of dynamic `opt` actions whose + dependencies mirror the contents of the imports files. +3. Collect the outputs from the `opt` actions and invoke the linker to produce a + final binary. + +Action `2` is inherently dynamic, since it must read the contents of files +produced as part of action `1`. Furthermore, Buck2's support of `1` is +complicated by the fact that certain Buck2 rules can produce an archive of +object files as an output (namely, the Rust compiler). As a result, Buck2's +implementation of Distributed ThinLTO is highly dynamic. Buck2's implementation contains four phases of actions: -1. `thin_lto_prepare`, which specifically handles archives containing LLVM IR and prepares them to be inputs to `thin_lto_index`, -2. `thin_lto_index`, which invokes LLVM's ThinLTO indexer to produce a imports list for every object file to be optimized, -3. `thin_lto_opt`, which optimizes each object file in parallel with its imports present, +1. `thin_lto_prepare`, which specifically handles archives containing LLVM IR + and prepares them to be inputs to `thin_lto_index`, +2. `thin_lto_index`, which invokes LLVM's ThinLTO indexer to produce a imports + list for every object file to be optimized, +3. `thin_lto_opt`, which optimizes each object file in parallel with its imports + present, 4. `thin_lto_link`, which links together the optimized code into a final binary. ### thin_lto_prepare -It is a reality of Buck2 today that some rules don't produce a statically-known list of object files. The list of object -files is known *a priori* during C/C++ compilation, since they have a one-to-one correspondence to source files; however, -the Rust compiler emits an archive of object files; without inspecting the archive, Buck2 has no way of knowing what -the contents of the archive are, or even if they contain bitcode at all. +It is a reality of Buck2 today that some rules don't produce a statically-known +list of object files. The list of object files is known _a priori_ during C/C++ +compilation, since they have a one-to-one correspondence to source files; +however, the Rust compiler emits an archive of object files; without inspecting +the archive, Buck2 has no way of knowing what the contents of the archive are, +or even if they contain bitcode at all. -Future steps (particularly `thin_lto_index`) are defined to only operate on a list of object files - a limitation [inherited from LLVM](https://lists.llvm.org/pipermail/llvm-dev/2019-June/133145.html). Therefore, it is the job of `thin_lto_prepare` to turn an archive into a list of objects - namely, by extracting the archive into a directory. +Future steps (particularly `thin_lto_index`) are defined to only operate on a +list of object files - a limitation +[inherited from LLVM](https://lists.llvm.org/pipermail/llvm-dev/2019-June/133145.html). +Therefore, it is the job of `thin_lto_prepare` to turn an archive into a list of +objects - namely, by extracting the archive into a directory. -Buck2 dispatches a `thin_lto_prepare` action for every archive. Each prepare action has two outputs: +Buck2 dispatches a `thin_lto_prepare` action for every archive. Each prepare +action has two outputs: -1. An **output directory** (called `objects` in the code), a directory that contains the unextracted contents of the archive. -2. A **archive manifest**, a JSON document containing a list of object files that are contained in the output directory. +1. An **output directory** (called `objects` in the code), a directory that + contains the unextracted contents of the archive. +2. A **archive manifest**, a JSON document containing a list of object files + that are contained in the output directory. -The core logic of this action is implemented in the Python script `dist_lto_prepare.py`, contained in the `tools` directory. In addition to unpacking each archive, Buck2 -keeps track of the list of archives as a Starlark array that will be referenced by index -in later steps. +The core logic of this action is implemented in the Python script +`dist_lto_prepare.py`, contained in the `tools` directory. In addition to +unpacking each archive, Buck2 keeps track of the list of archives as a Starlark +array that will be referenced by index in later steps. ### thin_lto_index -With all archives prepared, the next step is to invoke LLVM's ThinLTO indexer. For the purposes of Buck2, the indexer -looks like a linker; because of this, Buck2 must construct a reasonable link line. Buck2 does this by iterating over the -list of linkables that it has been given and constructing a link line from them. Uniquely for distributed ThinLTO, Buck2 -must wrap all objects that were derived from `thin_lto_prepare` (i.e. were extracted from archives) with `-Wl,--start-lib` -and `-Wl,--end-lib` to ensure that they are still treated as if they were archives by the indexer. - -Invoking the indexer is relatively straightforward in that Buck2 invokes it like it would any other linker. However, -once the indexer returns, Buck2 must post-process its output into a format that Buck2's Starlark can understand and -translate into a graph of dynamic `opt` actions. The first thing that Buck2 is write a "meta file" to disk, which -communicates inputs and outputs of `thin_lto_index` to a Python script, `dist_lto_planner.py`. The meta file contains -a list of 7-tuples, whose members are: - -1. The path to the source bitcode file. This is used as an index into - a dictionary that records much of the metadata coming - from these lines. -2. The path to an output file. `dist_lto_planner.py`is expected to place a - ThinLTO index file at this location (suffixed `.thinlto.bc`). -3. The path to an output plan. This script is expected to place a link - plan here (a JSON document indicating which other object files this) - object file depends on, among other things. -4. If this object file came from an archive, the index of the archive in - the Starlark archives array. +With all archives prepared, the next step is to invoke LLVM's ThinLTO indexer. +For the purposes of Buck2, the indexer looks like a linker; because of this, +Buck2 must construct a reasonable link line. Buck2 does this by iterating over +the list of linkables that it has been given and constructing a link line from +them. Uniquely for distributed ThinLTO, Buck2 must wrap all objects that were +derived from `thin_lto_prepare` (i.e. were extracted from archives) with +`-Wl,--start-lib` and `-Wl,--end-lib` to ensure that they are still treated as +if they were archives by the indexer. + +Invoking the indexer is relatively straightforward in that Buck2 invokes it like +it would any other linker. However, once the indexer returns, Buck2 must +post-process its output into a format that Buck2's Starlark can understand and +translate into a graph of dynamic `opt` actions. The first thing that Buck2 is +write a "meta file" to disk, which communicates inputs and outputs of +`thin_lto_index` to a Python script, `dist_lto_planner.py`. The meta file +contains a list of 7-tuples, whose members are: + +1. The path to the source bitcode file. This is used as an index into a + dictionary that records much of the metadata coming from these lines. +2. The path to an output file. `dist_lto_planner.py` is expected to place a + ThinLTO index file at this location (suffixed `.thinlto.bc`). +3. The path to an output plan. This script is expected to place a link plan here + (a JSON document indicating which other object files this) object file + depends on, among other things. +4. If this object file came from an archive, the index of the archive in the + Starlark archives array. 5. If this object file came from an archive, the name of the archive. -6. If this object file came from an archive, the path to an output plan. - This script is expected to produce an archive link plan here (a JSON) - document similar to the object link plan, except containing link - information for every file in the archive from which this object - came. +6. If this object file came from an archive, the path to an output plan. This + script is expected to produce an archive link plan here (a JSON) document + similar to the object link plan, except containing link information for every + file in the archive from which this object came. 7. If this object file came from an archive, the indexes directory of that - archive. This script is expected to place all ThinLTO indexes derived - from object files originating from this archive in that directory. - -There are two indices that are derived from this meta file: the object -index (`mapping["index"]`) and the archive index (`mapping["archive_index"]`). -These indices are indices into Starlark arrays for all objects and archive -linkables, respectively. `dist_lto_planner.py` script does not inspect them; rather, -it is expected to communicate these indices back to Starlark by writing them to the + archive. This script is expected to place all ThinLTO indexes derived from + object files originating from this archive in that directory. + +There are two indices that are derived from this meta file: the object index +(`mapping["index"]`) and the archive index (`mapping["archive_index"]`). These +indices are indices into Starlark arrays for all objects and archive linkables, +respectively. `dist_lto_planner.py` script does not inspect them; rather, it is +expected to communicate these indices back to Starlark by writing them to the link plan. -`dist_lto_planner.py` reads the index and imports file produced by LLVM and derives -a number of artifacts: - -1. For each object file, a `thinlto.bc` file (`bitcode_file`). This file is the same as the input bitcode file, except that LLVM has inserted a number of module imports to refer to the other modules that will be present when the object file is optimized. -2. For each object file, an optimization plan (`plan`). The optimization plan is a JSON document indicating how to construct an `opt` action for this object file. This plan includes -this object file's module imports, whether or not this file contains bitcode at all, a location to place the optimized object file, and a list of archives that this object file imported. -3. For each archive, an optimization plan (`archive_plan`), which contains optimization plans for all of the object files contained within the archive. - -This action is a dynamic action because, in the case that there are archives that needed to be preprocessed by `thin_lto_prepare`, this action must read the archive manifest. +`dist_lto_planner.py` reads the index and imports file produced by LLVM and +derives a number of artifacts: + +1. For each object file, a `thinlto.bc` file (`bitcode_file`). This file is the + same as the input bitcode file, except that LLVM has inserted a number of + module imports to refer to the other modules that will be present when the + object file is optimized. +2. For each object file, an optimization plan (`plan`). The optimization plan is + a JSON document indicating how to construct an `opt` action for this object + file. This plan includes this object file's module imports, whether or not + this file contains bitcode at all, a location to place the optimized object + file, and a list of archives that this object file imported. +3. For each archive, an optimization plan (`archive_plan`), which contains + optimization plans for all of the object files contained within the archive. + +This action is a dynamic action because, in the case that there are archives +that needed to be preprocessed by `thin_lto_prepare`, this action must read the +archive manifest. ### thin_lto_opt -After `thin_lto_index` completes, Buck2 launches `thin_lto_opt` actions for every object file and for every archive. For each object file, Buck2 reads that object file's optimization plan. -At this phase, it is Buck2's responsibility to declare dependencies on every object file referenced by that object's compilation plan; it does so here by adding `hidden` dependencies -on every object file and archive that the archive plan says that this object depends on. - -`thin_lto_opt` uses a Python wrapper around LLVM because of a bug (T116695431) where LTO fatal errors don't prevent `clang` from returning an exit code of zero. The Python script wraps -`clang` and exits with a non-zero exit code if `clang` produced an empty object file. - -For each archive, Buck2 reads the archive's optimization plan and constructs additional `thin_lto_opt` actions for each object file contained in the archive. Buck2 creates a directory of -symlinks (`opt_objects`) that either contains symlinks to optimized object files (if the object file contained bitcode) or the original object file (if it didn't). The purpose of this symlink directory is to allow the final link to consume object files directly -from this directory without having to know whether they were optimized or not. Paths to these files are passed to the link step -via the optimization manifest (`opt_manifest`). +After `thin_lto_index` completes, Buck2 launches `thin_lto_opt` actions for +every object file and for every archive. For each object file, Buck2 reads that +object file's optimization plan. At this phase, it is Buck2's responsibility to +declare dependencies on every object file referenced by that object's +compilation plan; it does so here by adding `hidden` dependencies on every +object file and archive that the archive plan says that this object depends on. + +`thin_lto_opt` uses a Python wrapper around LLVM because of a bug (T116695431) +where LTO fatal errors don't prevent `clang` from returning an exit code of +zero. The Python script wraps `clang` and exits with a non-zero exit code if +`clang` produced an empty object file. + +For each archive, Buck2 reads the archive's optimization plan and constructs +additional `thin_lto_opt` actions for each object file contained in the archive. +Buck2 creates a directory of symlinks (`opt_objects`) that either contains +symlinks to optimized object files (if the object file contained bitcode) or the +original object file (if it didn't). The purpose of this symlink directory is to +allow the final link to consume object files directly from this directory +without having to know whether they were optimized or not. Paths to these files +are passed to the link step via the optimization manifest (`opt_manifest`). ### thin_lto_link -The final link step. Similar to `thin_lto_index`, this involves creating a link line to feed to the linker that uses the optimized artifacts that we just calculated. In cases where Buck2 -would put an archive on the link line, it instead inserts `-Wl,--start-lib`, `-Wl,--end-lib`, and references to the objects in `opt_objects`. +The final link step. Similar to `thin_lto_index`, this involves creating a link +line to feed to the linker that uses the optimized artifacts that we just +calculated. In cases where Buck2 would put an archive on the link line, it +instead inserts `-Wl,--start-lib`, `-Wl,--end-lib`, and references to the +objects in `opt_objects`. diff --git a/prelude/cxx/dist_lto/darwin_dist_lto.bzl b/prelude/cxx/dist_lto/darwin_dist_lto.bzl new file mode 100644 index 0000000000000..bcd36834874d1 --- /dev/null +++ b/prelude/cxx/dist_lto/darwin_dist_lto.bzl @@ -0,0 +1,667 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//:artifact_tset.bzl", + "ArtifactTSet", +) +load("@prelude//:paths.bzl", "paths") +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") +load( + "@prelude//cxx:cxx_link_utility.bzl", + "cxx_link_cmd_parts", + "get_extra_darwin_linker_flags", + "linker_map_args", +) +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerType") +load("@prelude//cxx:link_types.bzl", "LinkOptions") +load("@prelude//cxx:target_sdk_version.bzl", "get_target_sdk_version_flags") +load( + "@prelude//linking:link_info.bzl", + "ArchiveLinkable", + "ExtraLinkerOutputs", + "FrameworksLinkable", # @unused Used as a type + "LinkInfo", + "LinkedObject", + "ObjectsLinkable", + "SharedLibLinkable", # @unused Used as a type + "SwiftRuntimeLinkable", # @unused Used as a type + "SwiftmoduleLinkable", # @unused Used as a type + "append_linkable_args", + "map_to_link_infos", +) +load("@prelude//linking:strip.bzl", "strip_object") +load("@prelude//utils:argfile.bzl", "at_argfile") +load("@prelude//utils:lazy.bzl", "lazy") + +_BitcodeLinkData = record( + name = str, + initial_object = Artifact, + bc_file = Artifact, + plan = Artifact, + opt_object = Artifact, + merged_bc = field([Artifact, None]), +) + +_ArchiveLinkData = record( + name = str, + manifest = Artifact, + # A file containing paths to artifacts that are known to reside in opt_objects_dir. + opt_manifest = Artifact, + objects_dir = Artifact, + opt_objects_dir = Artifact, + indexes_dir = Artifact, + plan = Artifact, + link_whole = bool, + merged_bc_dir = field([Artifact, None]), +) + +_DynamicLibraryLinkData = record( + linkable = SharedLibLinkable, +) + +_DataType = enum( + "bitcode", + "archive", + "dynamic_library", +) + +_IndexLinkData = record( + data_type = _DataType, + link_data = field([_BitcodeLinkData, _ArchiveLinkData, _DynamicLibraryLinkData]), +) + +def cxx_darwin_dist_link( + ctx: AnalysisContext, + # The destination for the link output. + output: Artifact, + opts: LinkOptions, + premerger_enabled: bool, + executable_link: bool, + linker_map: Artifact | None = None) -> (LinkedObject, dict[str, list[DefaultInfo]]): + """ + Perform a distributed thin-lto link into the supplied output + + Distributed thinlto splits the link into three stages: + 1. global "indexing" step + 2. many individual compilation unit optimization steps + 3. final global link step + + The 2nd and 3rd of those are done just by constructing compiler/linker commands (in dynamic_output + sections) using the output of the first. + + For the first, we need to post-process the linker index output to get it into a form + that is easy for us to consume from within bzl. + """ + + links = opts.links + + # A category suffix that will be added to the category of the link action that is generated. + category_suffix = opts.category_suffix + + # An identifier that will uniquely name this link action in the context of a category. Useful for + # differentiating multiple link actions in the same rule. + identifier = opts.identifier + + def make_cat(c: str) -> str: + """ Used to make sure categories for our actions include the provided suffix """ + if category_suffix != None: + return c + "_" + category_suffix + return c + + def make_id(i: str) -> str: + """ Used to make sure identifiers for our actions include the provided identifier """ + if identifier != None: + return identifier + "_" + i + return i + + recorded_outputs = {} + + def name_for_obj(link_name: str, object_artifact: Artifact) -> str: + """ Creates a unique name/path we can use for a particular object file input """ + prefix = "{}/{}".format(link_name, object_artifact.short_path) + + # it's possible (though unlikely) that we can get duplicate name/short_path, so just uniquify them + if prefix in recorded_outputs: + recorded_outputs[prefix] += 1 + extra = recorded_outputs[prefix] + prefix = "{}-{}".format(prefix, extra) + else: + recorded_outputs[prefix] = 1 + return prefix + + names = {} + + def name_for_link(info: LinkInfo) -> str: + """ Creates a unique name for a LinkInfo that we are consuming """ + name = info.name or "unknown" + if name not in names: + names[name] = 1 + else: + names[name] += 1 + name += "-{}".format(names[name]) + return make_id(name) + + link_infos = map_to_link_infos(links) + + cxx_toolchain = get_cxx_toolchain_info(ctx) + lto_planner = cxx_toolchain.internal_tools.dist_lto.planner[LinkerType("darwin")] + lto_opt = cxx_toolchain.internal_tools.dist_lto.opt[LinkerType("darwin")] + lto_prepare = cxx_toolchain.internal_tools.dist_lto.prepare + lto_copy = cxx_toolchain.internal_tools.dist_lto.copy + + unsorted_index_link_data = [] + linker_flags = [] + common_link_flags = cmd_args(get_target_sdk_version_flags(ctx), get_extra_darwin_linker_flags()) + extra_codegen_flags = get_target_sdk_version_flags(ctx) + + # Information used to construct the dynamic plan: + plan_inputs = [] + plan_outputs = [] + + # Information used to construct the opt dynamic outputs: + archive_opt_manifests = [] + + prepare_cat = make_cat("thin_lto_prepare") + + for link in link_infos: + link_name = name_for_link(link) + + linker_flags.append(link.pre_flags) + linker_flags.append(link.post_flags) + + for linkable in link.linkables: + if isinstance(linkable, ObjectsLinkable): + for obj in linkable.objects: + name = name_for_obj(link_name, obj) + bc_output = ctx.actions.declare_output(name + ".thinlto.bc") + plan_output = ctx.actions.declare_output(name + ".opt.plan") + opt_output = ctx.actions.declare_output(name + ".opt.o") + merged_bc_output = None + if premerger_enabled: + merged_bc_output = ctx.actions.declare_output(name + ".merged.bc") + plan_outputs.append(merged_bc_output.as_output()) + + data = _IndexLinkData( + data_type = _DataType("bitcode"), + link_data = _BitcodeLinkData( + name = name, + initial_object = obj, + bc_file = bc_output, + plan = plan_output, + opt_object = opt_output, + merged_bc = merged_bc_output, + ), + ) + unsorted_index_link_data.append(data) + plan_outputs.extend([bc_output.as_output(), plan_output.as_output()]) + + elif isinstance(linkable, ArchiveLinkable): + # Our implementation of Distributed ThinLTO operates on individual objects, not archives. Since these + # archives might still contain LTO-able bitcode, we first extract the objects within the archive into + # another directory and write a "manifest" containing the list of objects that the archive contained. + # + # Later actions in the LTO compilation pipeline will read this manifest and dynamically dispatch + # actions on the objects that the manifest reports. + + name = name_for_obj(link_name, linkable.archive.artifact) + archive_manifest = ctx.actions.declare_output("%s/%s/manifest.json" % (prepare_cat, name)) + archive_objects = ctx.actions.declare_output("%s/%s/objects" % (prepare_cat, name), dir = True) + archive_opt_objects = ctx.actions.declare_output("%s/%s/opt_objects" % (prepare_cat, name), dir = True) + archive_indexes = ctx.actions.declare_output("%s/%s/indexes" % (prepare_cat, name), dir = True) + archive_plan = ctx.actions.declare_output("%s/%s/plan.json" % (prepare_cat, name)) + archive_opt_manifest = ctx.actions.declare_output("%s/%s/opt_objects.manifest" % (prepare_cat, name)) + archive_merged_bc_files = None + if premerger_enabled: + archive_merged_bc_files = ctx.actions.declare_output("%s/%s/merged_bc_files" % (prepare_cat, name), dir = True) + plan_outputs.append(archive_merged_bc_files.as_output()) + + prepare_args = cmd_args([ + lto_prepare, + "--manifest-out", + archive_manifest.as_output(), + "--objects-out", + archive_objects.as_output(), + "--ar", + cxx_toolchain.linker_info.archiver, + "--archive", + linkable.archive.artifact, + "--name", + name, + ]) + ctx.actions.run(prepare_args, category = make_cat("thin_lto_prepare"), identifier = name) + + data = _IndexLinkData( + data_type = _DataType("archive"), + link_data = _ArchiveLinkData( + name = name, + manifest = archive_manifest, + opt_manifest = archive_opt_manifest, + objects_dir = archive_objects, + opt_objects_dir = archive_opt_objects, + indexes_dir = archive_indexes, + plan = archive_plan, + link_whole = linkable.link_whole, + merged_bc_dir = archive_merged_bc_files, + ), + ) + unsorted_index_link_data.append(data) + archive_opt_manifests.append(archive_opt_manifest) + plan_inputs.extend([archive_manifest, archive_objects]) + plan_outputs.extend([archive_indexes.as_output(), archive_plan.as_output()]) + elif isinstance(linkable, SharedLibLinkable): + data = _IndexLinkData( + data_type = _DataType("dynamic_library"), + link_data = _DynamicLibraryLinkData(linkable = linkable), + ) + unsorted_index_link_data.append(data) + elif isinstance(linkable, FrameworksLinkable) or isinstance(linkable, SwiftRuntimeLinkable) or isinstance(linkable, SwiftmoduleLinkable): + # These linkables are handled separately for flag deduplication purposes, as in append_linkable_args: + # https://www.internalfb.com/code/fbsource/[c6d2c820b394]/fbcode/buck2/prelude/linking/link_info.bzl?lines=271-278 + pass + else: + fail("Unhandled linkable type: {}".format(str(linkable))) + + def sort_index_link_data(input_list: list[_IndexLinkData]) -> list[_IndexLinkData]: + # Sort link datas to reduce binary size. The idea is to encourage the linker to load the minimal number of object files possible. We load force loaded archives first (since they will be loaded no matter what), then non lazy object files (which will also be loaded no matter what), then shared libraries (to share as many symbols as possible), then finally regular archives + force_loaded_archives = [] + regular_archives = [] + object_files = [] + dynamic_libraries = [] + for link_data in input_list: + if link_data.data_type == _DataType("bitcode"): + object_files.append(link_data) + elif link_data.data_type == _DataType("archive"): + if link_data.link_data.link_whole: + force_loaded_archives.append(link_data) + else: + regular_archives.append(link_data) + elif link_data.data_type == _DataType("dynamic_library"): + dynamic_libraries.append(link_data) + + return force_loaded_archives + object_files + dynamic_libraries + regular_archives + + sorted_index_link_data = sort_index_link_data(unsorted_index_link_data) + + index_argsfile_out = ctx.actions.declare_output(output.basename + ".thinlto_index_argsfile") + final_link_index = ctx.actions.declare_output(output.basename + ".final_link_index") + + def prepare_index_flags(include_inputs: bool, index_args_out: cmd_args, index_meta_args_out: cmd_args, ctx: AnalysisContext, artifacts, outputs): + for flag in linker_flags: + index_args_out.add(flag) + + if include_inputs: + # buildifier: disable=uninitialized + for idx, artifact in enumerate(sorted_index_link_data): + link_data = artifact.link_data + + if artifact.data_type == _DataType("bitcode"): + index_args_out.add(link_data.initial_object) + if premerger_enabled: + index_meta_args_out.add(link_data.initial_object, outputs[link_data.bc_file].as_output(), outputs[link_data.merged_bc].as_output(), outputs[link_data.plan].as_output(), str(idx), "", "", "", "") + else: + index_meta_args_out.add(link_data.initial_object, outputs[link_data.bc_file].as_output(), "", outputs[link_data.plan].as_output(), str(idx), "", "", "", "") + + elif artifact.data_type == _DataType("archive"): + manifest = artifacts[link_data.manifest].read_json() + + if not manifest["objects"]: + # Despite not having any objects (and thus not needing a plan), we still need to bind the plan output. + ctx.actions.write(outputs[link_data.plan].as_output(), "{}") + make_indexes_dir_cmd = cmd_args(["/bin/sh", "-c", "mkdir", "-p", outputs[link_data.indexes_dir].as_output()]) + ctx.actions.run(make_indexes_dir_cmd, category = make_cat("thin_lto_mkdir"), identifier = link_data.name + "_indexes_dir") + if premerger_enabled: + make_merged_bc_dir_cmd = cmd_args(["/bin/sh", "-c", "mkdir", "-p", outputs[link_data.merged_bc_dir].as_output()]) + ctx.actions.run(make_merged_bc_dir_cmd, category = make_cat("thin_lto_mkdir"), identifier = link_data.name + "_merged_bc_dir") + continue + + index_args_out.add(cmd_args(hidden = link_data.objects_dir)) + + if not link_data.link_whole: + index_args_out.add("-Wl,--start-lib") + + for obj in manifest["objects"]: + if premerger_enabled: + index_meta_args_out.add(obj, "", "", "", str(idx), link_data.name, outputs[link_data.plan].as_output(), outputs[link_data.indexes_dir].as_output(), outputs[link_data.merged_bc_dir].as_output()) + else: + index_meta_args_out.add(obj, "", "", "", str(idx), link_data.name, outputs[link_data.plan].as_output(), outputs[link_data.indexes_dir].as_output(), "") + index_args_out.add(obj) + + if not link_data.link_whole: + index_args_out.add("-Wl,--end-lib") + + elif artifact.data_type == _DataType("dynamic_library"): + append_linkable_args(index_args_out, link_data.linkable) + + else: + fail("Unhandled data type: {}".format(str(artifact.data_type))) + + output_as_string = cmd_args(output, ignore_artifacts = True) + index_args_out.add("-o", output_as_string) + + # The flags used for the thin-link action. Unlike index_args, this does not include input files, and + # is only used for debugging and testing, and can be determined without dynamic output. + index_flags_for_debugging = cmd_args() + index_cmd_parts = cxx_link_cmd_parts(cxx_toolchain, executable_link) + index_flags_for_debugging.add(index_cmd_parts.linker_flags) + index_flags_for_debugging.add(common_link_flags) + index_flags_for_debugging.add(index_cmd_parts.post_linker_flags) + prepare_index_flags(include_inputs = False, index_args_out = index_flags_for_debugging, index_meta_args_out = cmd_args(), ctx = ctx, artifacts = None, outputs = None) + index_flags_for_debugging_argsfile, _ = ctx.actions.write(output.basename + ".thinlto_index_debugging_argsfile", index_flags_for_debugging, allow_args = True) + + def dynamic_plan(link_plan: Artifact, index_argsfile_out: Artifact, final_link_index: Artifact): + def plan(ctx: AnalysisContext, artifacts, outputs): + # index link command args + index_args = cmd_args() + + # See comments in dist_lto_planner.py for semantics on the values that are pushed into index_meta. + index_meta = cmd_args() + + prepare_index_flags(include_inputs = True, index_args_out = index_args, index_meta_args_out = index_meta, ctx = ctx, artifacts = artifacts, outputs = outputs) + + index_argfile, _ = ctx.actions.write( + outputs[index_argsfile_out].as_output(), + index_args, + allow_args = True, + ) + + index_cat = make_cat("thin_lto_index") + index_file_out = ctx.actions.declare_output(make_id(index_cat) + "/index") + index_out_dir = cmd_args(index_file_out.as_output(), parent = 1) + + index_cmd_parts = cxx_link_cmd_parts(cxx_toolchain, executable_link) + + index_cmd = index_cmd_parts.link_cmd + index_cmd.add(common_link_flags) + index_cmd.add(cmd_args(index_argfile, format = "@{}")) + + index_cmd.add(cmd_args(index_file_out.as_output(), format = "-Wl,--thinlto-index-only={}")) + index_cmd.add("-Wl,--thinlto-emit-imports-files") + index_cmd.add("-Wl,--thinlto-full-index") + index_cmd.add(cmd_args(index_out_dir, format = "-Wl,--thinlto-prefix-replace=;{}/")) + index_cmd.add(index_cmd_parts.post_linker_flags) + + # Terminate the index file with a newline. + index_meta.add("") + index_meta_file = ctx.actions.write( + output.basename + ".thinlto.meta", + index_meta, + ) + + plan_cmd = cmd_args([lto_planner, "--meta", index_meta_file, "--index", index_out_dir, "--link-plan", outputs[link_plan].as_output(), "--final-link-index", outputs[final_link_index].as_output()]) + if premerger_enabled: + plan_cmd.add("--enable-premerger") + plan_cmd.add("--", index_cmd) + + plan_cmd.add(cmd_args(hidden = [ + index_meta, + index_args, + ])) + + ctx.actions.run(plan_cmd, category = index_cat, identifier = identifier, local_only = True) + + # TODO(T117513091) - dynamic_output does not allow for an empty list of dynamic inputs. If we have no archives + # to process, we will have no dynamic inputs, and the plan action can be non-dynamic. + # + # However, buck2 disallows `dynamic_output` with a empty input list. We also can't call our `plan` function + # directly, since it uses `ctx.outputs` to bind its outputs. Instead of doing Starlark hacks to work around + # the lack of `ctx.outputs`, we declare an empty file as a dynamic input. + plan_inputs.append(ctx.actions.write(output.basename + ".plan_hack.txt", "")) + plan_outputs.extend([link_plan.as_output(), index_argsfile_out.as_output(), final_link_index.as_output()]) + ctx.actions.dynamic_output(dynamic = plan_inputs, inputs = [], outputs = plan_outputs, f = plan) + + link_plan_out = ctx.actions.declare_output(output.basename + ".link-plan.json") + dynamic_plan(link_plan = link_plan_out, index_argsfile_out = index_argsfile_out, final_link_index = final_link_index) + + def prepare_opt_flags(link_infos: list[LinkInfo]) -> cmd_args: + opt_flags = cmd_args(cxx_toolchain.linker_info.dist_thin_lto_codegen_flags) + opt_flags.add(extra_codegen_flags) + for link in link_infos: + opt_flags.add(link.dist_thin_lto_codegen_flags) + return opt_flags + + common_opt_cmd = cmd_args(cxx_toolchain.linker_info.linker) + common_opt_cmd.add(prepare_opt_flags(link_infos)) + + # Create an argsfile and dump all the flags to be processed later by lto_opt. + # These flags are common to all opt actions, we don't need an argfile for each action, one + # for the entire link unit will do. + opt_argsfile = ctx.actions.declare_output(output.basename + ".lto_opt_argsfile") + ctx.actions.write(opt_argsfile.as_output(), common_opt_cmd, allow_args = True) + + # We don't want the linker itself in the argsfile for debugging / testing codegen flags + opt_flags_for_debugging = prepare_opt_flags(link_infos) + opt_flags_for_debugging_argsfile = ctx.actions.declare_output(output.basename + ".thin_lto_codegen_debugging_argsfile") + ctx.actions.write(opt_flags_for_debugging_argsfile.as_output(), opt_flags_for_debugging, allow_args = True) + + # We declare a separate dynamic_output for every object file. It would + # maybe be simpler to have a single dynamic_output that produced all the + # opt actions, but an action needs to re-run whenever the analysis that + # produced it re-runs. And so, with a single dynamic_output, we'd need to + # re-run all actions when any of the plans changed. + def dynamic_optimize(name: str, initial_object: Artifact, bc_file: Artifact, plan: Artifact, opt_object: Artifact, merged_bc: Artifact | None): + def optimize_object(ctx: AnalysisContext, artifacts, outputs): + plan_json = artifacts[plan].read_json() + + # If the object was not compiled with thinlto flags, then there + # won't be valid outputs for it from the indexing, but we still + # need to bind the artifact. Similarily, if a bitcode file is not + # loaded by the indexing phase, or is absorbed by another module, + # there is no point optimizing it. + if "not_loaded_by_linker" in plan_json or not plan_json["is_bc"] or ("merge_state" in plan_json and plan_json["merge_state"] == "ABSORBED"): + ctx.actions.write(outputs[opt_object], "") + return + + opt_cmd = cmd_args(lto_opt) + opt_cmd.add("--out", outputs[opt_object].as_output()) + if premerger_enabled: + if plan_json["merge_state"] == "STANDALONE": + opt_cmd.add("--input", initial_object) + elif plan_json["merge_state"] == "ROOT": + opt_cmd.add("--input", merged_bc) + else: + fail("Invalid merge state {} for bitcode file: {}".format(plan_json["merge_state"], bc_file)) + else: + opt_cmd.add("--input", initial_object) + + opt_cmd.add("--index", bc_file) + + opt_cmd.add(cmd_args(hidden = common_opt_cmd)) + opt_cmd.add("--args", opt_argsfile) + + opt_cmd.add("--") + opt_cmd.add(cxx_toolchain.cxx_compiler_info.compiler) + + imported_input_bitcode_files = [sorted_index_link_data[idx].link_data.initial_object for idx in plan_json["imports"]] + imported_archives_input_bitcode_files_directory = [sorted_index_link_data[idx].link_data.objects_dir for idx in plan_json["archive_imports"]] + + if premerger_enabled: + imported_merged_input_bitcode_files = [sorted_index_link_data[idx].link_data.merged_bc for idx in plan_json["imports"]] + imported_archives_merged_bitcode_files_directory = [sorted_index_link_data[idx].link_data.merged_bc_dir for idx in plan_json["archive_imports"]] + opt_cmd.add(cmd_args(hidden = imported_merged_input_bitcode_files + imported_archives_merged_bitcode_files_directory)) + + opt_cmd.add(cmd_args(hidden = imported_input_bitcode_files + imported_archives_input_bitcode_files_directory)) + ctx.actions.run(opt_cmd, category = make_cat("thin_lto_opt_object"), identifier = name) + + ctx.actions.dynamic_output(dynamic = [plan], inputs = [], outputs = [opt_object.as_output()], f = optimize_object) + + def dynamic_optimize_archive(archive: _ArchiveLinkData): + def optimize_archive(ctx: AnalysisContext, artifacts, outputs): + plan_json = artifacts[archive.plan].read_json() + if "objects" not in plan_json or not plan_json["objects"] or lazy.is_all(lambda e: not e["is_bc"], plan_json["objects"]): + # Nothing in this directory was lto-able; let's just copy the archive. + ctx.actions.copy_file(outputs[archive.opt_objects_dir], archive.objects_dir) + ctx.actions.write(outputs[archive.opt_manifest], "") + return + + output_dir = {} + output_manifest = cmd_args() + for entry in plan_json["objects"]: + if "not_loaded_by_linker" in entry: + continue + + if premerger_enabled and entry["merge_state"] == "ABSORBED": + continue + + base_dir = plan_json["base_dir"] + source_path = paths.relativize(entry["path"], base_dir) + if not entry["is_bc"]: + opt_object = ctx.actions.declare_output("%s/%s" % (make_cat("thin_lto_opt_copy"), source_path)) + output_manifest.add(opt_object) + copy_cmd = cmd_args([ + lto_copy, + "--to", + opt_object.as_output(), + "--from", + entry["path"], + ], hidden = archive.objects_dir) + ctx.actions.run(copy_cmd, category = make_cat("thin_lto_opt_copy"), identifier = source_path) + output_dir[source_path] = opt_object + continue + + opt_object = ctx.actions.declare_output("%s/%s" % (make_cat("thin_lto_opt_archive"), source_path)) + output_manifest.add(opt_object) + output_dir[source_path] = opt_object + opt_cmd = cmd_args(lto_opt) + opt_cmd.add("--out", opt_object.as_output()) + if premerger_enabled and entry["merge_state"] == "ROOT": + opt_cmd.add("--input", entry["merged_bitcode_path"]) + else: + opt_cmd.add("--input", entry["path"]) + opt_cmd.add("--index", entry["bitcode_file"]) + + opt_cmd.add(cmd_args(hidden = common_opt_cmd)) + opt_cmd.add("--args", opt_argsfile) + + opt_cmd.add("--") + opt_cmd.add(cxx_toolchain.cxx_compiler_info.compiler) + + imported_input_bitcode_files = [sorted_index_link_data[idx].link_data.initial_object for idx in entry["imports"]] + imported_archives_input_bitcode_files_directory = [sorted_index_link_data[idx].link_data.objects_dir for idx in entry["archive_imports"]] + if premerger_enabled: + imported_merged_input_bitcode_files = [sorted_index_link_data[idx].link_data.merged_bc for idx in entry["imports"]] + imported_archives_merged_bitcode_files_directory = [sorted_index_link_data[idx].link_data.merged_bc_dir for idx in entry["archive_imports"]] + opt_cmd.add(cmd_args(hidden = imported_merged_input_bitcode_files + imported_archives_merged_bitcode_files_directory + [archive.merged_bc_dir])) + + opt_cmd.add(cmd_args( + hidden = imported_input_bitcode_files + imported_archives_input_bitcode_files_directory + [archive.indexes_dir, archive.objects_dir], + )) + ctx.actions.run(opt_cmd, category = make_cat("thin_lto_opt_archive"), identifier = source_path) + + ctx.actions.symlinked_dir(outputs[archive.opt_objects_dir], output_dir) + ctx.actions.write(outputs[archive.opt_manifest], output_manifest, allow_args = True) + + archive_opt_inputs = [archive.plan] + archive_opt_outputs = [archive.opt_objects_dir.as_output(), archive.opt_manifest.as_output()] + ctx.actions.dynamic_output(dynamic = archive_opt_inputs, inputs = [], outputs = archive_opt_outputs, f = optimize_archive) + + for artifact in sorted_index_link_data: + link_data = artifact.link_data + if artifact.data_type == _DataType("bitcode"): + dynamic_optimize( + name = link_data.name, + initial_object = link_data.initial_object, + bc_file = link_data.bc_file, + plan = link_data.plan, + opt_object = link_data.opt_object, + merged_bc = link_data.merged_bc, + ) + elif artifact.data_type == _DataType("archive"): + dynamic_optimize_archive(link_data) + + linker_argsfile_out = ctx.actions.declare_output(output.basename + ".thinlto_link_argsfile") + + # Declare any extra outputs here so we can look them up in the final_link closure + extra_outputs = opts.extra_linker_outputs_factory(ctx) if opts.extra_linker_outputs_factory else ExtraLinkerOutputs() + + def thin_lto_final_link(ctx: AnalysisContext, artifacts, outputs): + plan = artifacts[link_plan_out].read_json() + link_args = cmd_args() + plan_index = {int(k): v for k, v in plan["index"].items()} + + # non_lto_objects are the ones that weren't compiled with thinlto + # flags. In that case, we need to link against the original object. + non_lto_objects = {int(k): 1 for k in plan["non_lto_objects"]} + opt_objects = [] + for flag in linker_flags: + link_args.add(flag) + + for idx, artifact in enumerate(sorted_index_link_data): + if artifact.data_type == _DataType("dynamic_library"): + append_linkable_args(link_args, artifact.link_data.linkable) + elif artifact.data_type == _DataType("bitcode"): + if idx in plan_index: + opt_objects.append(artifact.link_data.opt_object) + elif idx in non_lto_objects: + opt_objects.append(artifact.link_data.initial_object) + + link_cmd_parts = cxx_link_cmd_parts(cxx_toolchain, executable_link) + link_cmd = link_cmd_parts.link_cmd + link_cmd.add(common_link_flags) + link_cmd_hidden = [] + + if opts.extra_linker_outputs_flags_factory != None: + # We need the inner artifacts here + mapped_outputs = {output_type: outputs[artifact] for output_type, artifact in extra_outputs.artifacts.items()} + link_cmd.add(opts.extra_linker_outputs_flags_factory(ctx, mapped_outputs)) + + # buildifier: disable=uninitialized + for artifact in sorted_index_link_data: + if artifact.data_type == _DataType("archive"): + link_cmd_hidden.append(artifact.link_data.opt_objects_dir) + link_cmd.add(at_argfile( + actions = ctx.actions, + name = outputs[linker_argsfile_out], + args = link_args, + allow_args = True, + )) + link_cmd.add(cmd_args(final_link_index, format = "@{}")) + link_cmd.add("-o", outputs[output].as_output()) + if linker_map: + link_cmd.add(linker_map_args(cxx_toolchain, outputs[linker_map].as_output()).flags) + link_cmd_hidden.extend([ + link_args, + opt_objects, + ]) + link_cmd.add(link_cmd_parts.post_linker_flags) + link_cmd.add(cmd_args(hidden = link_cmd_hidden)) + + ctx.actions.run(link_cmd, category = make_cat("thin_lto_link"), identifier = identifier, local_only = True) + + final_link_inputs = [link_plan_out, final_link_index] + archive_opt_manifests + final_link_outputs = [output.as_output(), linker_argsfile_out.as_output()] + if linker_map: + final_link_outputs.append(linker_map.as_output()) + + final_link_outputs += [o.as_output() for o in extra_outputs.artifacts.values()] + + ctx.actions.dynamic_output( + dynamic = final_link_inputs, + inputs = [], + outputs = final_link_outputs, + f = thin_lto_final_link, + ) + + final_output = output + unstripped_output = output + if opts.strip: + strip_args = opts.strip_args_factory(ctx) if opts.strip_args_factory else cmd_args() + final_output = strip_object(ctx, cxx_toolchain, final_output, strip_args, category_suffix) + + return LinkedObject( + output = final_output, + unstripped_output = unstripped_output, + prebolt_output = output, + dwp = None, + external_debug_info = ArtifactTSet(), + linker_argsfile = linker_argsfile_out, + linker_filelist = None, # DistLTO doesn't use filelists + linker_command = None, # There is no notion of a single linker command for DistLTO + index_argsfile = index_argsfile_out, + dist_thin_lto_codegen_argsfile = opt_flags_for_debugging_argsfile, + dist_thin_lto_index_argsfile = index_flags_for_debugging_argsfile, + ), extra_outputs.providers diff --git a/prelude/cxx/dist_lto/dist_lto.bzl b/prelude/cxx/dist_lto/dist_lto.bzl index 71f3b450bba8e..08aa45b9a0896 100644 --- a/prelude/cxx/dist_lto/dist_lto.bzl +++ b/prelude/cxx/dist_lto/dist_lto.bzl @@ -16,22 +16,23 @@ load( "bolt", "cxx_use_bolt", ) +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") load( "@prelude//cxx:cxx_link_utility.bzl", - "cxx_link_cmd", + "cxx_link_cmd_parts", "linker_map_args", ) -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerType") load("@prelude//cxx:debug.bzl", "SplitDebugMode") load( "@prelude//cxx:dwp.bzl", "run_dwp_action", ) +load("@prelude//cxx:link_types.bzl", "LinkOptions") load( "@prelude//linking:link_info.bzl", "ArchiveLinkable", "FrameworksLinkable", # @unused Used as a type - "LinkArgs", "LinkInfo", "LinkedObject", "ObjectsLinkable", @@ -40,7 +41,10 @@ load( "map_to_link_infos", "unpack_external_debug_info", ) -load("@prelude//utils:utils.bzl", "is_all") +load("@prelude//linking:stamp_build_info.bzl", "stamp_build_info") +load("@prelude//linking:strip.bzl", "strip_object") +load("@prelude//utils:argfile.bzl", "at_argfile") +load("@prelude//utils:lazy.bzl", "lazy") _BitcodeLinkData = record( name = str, @@ -79,17 +83,12 @@ _PrePostFlags = record( post_flags = list, ) -def cxx_dist_link( +def cxx_gnu_dist_link( ctx: AnalysisContext, - links: list[LinkArgs], # The destination for the link output. output: Artifact, - linker_map: [Artifact, None] = None, - # A category suffix that will be added to the category of the link action that is generated. - category_suffix: [str, None] = None, - # An identifier that will uniquely name this link action in the context of a category. Useful for - # differentiating multiple link actions in the same rule. - identifier: [str, None] = None, + opts: LinkOptions, + linker_map: Artifact | None = None, # This action will only happen if split_dwarf is enabled via the toolchain. generate_dwp: bool = True, executable_link: bool = True) -> LinkedObject: @@ -108,6 +107,15 @@ def cxx_dist_link( that is easy for us to consume from within bzl. """ + links = opts.links + + # A category suffix that will be added to the category of the link action that is generated. + category_suffix = opts.category_suffix + + # An identifier that will uniquely name this link action in the context of a category. Useful for + # differentiating multiple link actions in the same rule. + identifier = opts.identifier + def make_cat(c: str) -> str: """ Used to make sure categories for our actions include the provided suffix """ if category_suffix != None: @@ -149,11 +157,11 @@ def cxx_dist_link( link_infos = map_to_link_infos(links) - cxx_toolchain = ctx.attrs._cxx_toolchain[CxxToolchainInfo] - lto_planner = cxx_toolchain.dist_lto_tools_info.planner - lto_opt = cxx_toolchain.dist_lto_tools_info.opt - lto_prepare = cxx_toolchain.dist_lto_tools_info.prepare - lto_copy = cxx_toolchain.dist_lto_tools_info.copy + cxx_toolchain = get_cxx_toolchain_info(ctx) + lto_planner = cxx_toolchain.internal_tools.dist_lto.planner[LinkerType("gnu")] + lto_opt = cxx_toolchain.internal_tools.dist_lto.opt[LinkerType("gnu")] + lto_prepare = cxx_toolchain.internal_tools.dist_lto.prepare + lto_copy = cxx_toolchain.internal_tools.dist_lto.copy PREPEND_ARCHIVE_NAMES = [ # T130644072: If linked with `--whole-archive`, Clang builtins must be at the @@ -227,7 +235,7 @@ def cxx_dist_link( ), ) index_link_data.append(data) - plan_outputs.extend([bc_output, plan_output]) + plan_outputs.extend([bc_output.as_output(), plan_output.as_output()]) elif isinstance(linkable, ArchiveLinkable) and linkable.supports_lto: # Our implementation of Distributed ThinLTO operates on individual objects, not archives. Since these # archives might still contain LTO-able bitcode, we first extract the objects within the archive into @@ -275,12 +283,12 @@ def cxx_dist_link( index_link_data.append(data) archive_opt_manifests.append(archive_opt_manifest) plan_inputs.extend([archive_manifest, archive_objects]) - plan_outputs.extend([archive_indexes, archive_plan]) + plan_outputs.extend([archive_indexes.as_output(), archive_plan.as_output()]) else: add_linkable(idx, linkable) index_link_data.append(None) - index_argsfile_out = ctx.actions.declare_output(output.basename + ".thinlto.index.argsfile") + index_argsfile_out = ctx.actions.declare_output(output.basename + ".thinlto_index_argsfile") final_link_index = ctx.actions.declare_output(output.basename + ".final_link_index") def dynamic_plan(link_plan: Artifact, index_argsfile_out: Artifact, final_link_index: Artifact): @@ -335,7 +343,7 @@ def cxx_dist_link( archive_args = prepend_index_args if link_data.prepend else index_args - archive_args.hidden(link_data.objects_dir) + archive_args.add(cmd_args(hidden = link_data.objects_dir)) if not link_data.link_whole: archive_args.add("-Wl,--start-lib") @@ -347,8 +355,6 @@ def cxx_dist_link( if not link_data.link_whole: archive_args.add("-Wl,--end-lib") - archive_args.hidden(link_data.objects_dir) - add_post_flags(idx) index_argfile, _ = ctx.actions.write( @@ -359,18 +365,20 @@ def cxx_dist_link( index_cat = make_cat("thin_lto_index") index_file_out = ctx.actions.declare_output(make_id(index_cat) + "/index") - index_out_dir = cmd_args(index_file_out.as_output()).parent() + index_out_dir = cmd_args(index_file_out.as_output(), parent = 1) + + index_cmd_parts = cxx_link_cmd_parts(cxx_toolchain, executable_link) - index_cmd = cxx_link_cmd(cxx_toolchain) + index_cmd = index_cmd_parts.link_cmd index_cmd.add(cmd_args(index_argfile, format = "@{}")) - output_as_string = cmd_args(output) - output_as_string.ignore_artifacts() + output_as_string = cmd_args(output, ignore_artifacts = True) index_cmd.add("-o", output_as_string) index_cmd.add(cmd_args(index_file_out.as_output(), format = "-Wl,--thinlto-index-only={}")) index_cmd.add("-Wl,--thinlto-emit-imports-files") index_cmd.add("-Wl,--thinlto-full-index") index_cmd.add(cmd_args(index_out_dir, format = "-Wl,--thinlto-prefix-replace=;{}/")) + index_cmd.add(index_cmd_parts.post_linker_flags) # Terminate the index file with a newline. index_meta.add("") @@ -382,10 +390,10 @@ def cxx_dist_link( plan_cmd = cmd_args([lto_planner, "--meta", index_meta_file, "--index", index_out_dir, "--link-plan", outputs[link_plan].as_output(), "--final-link-index", outputs[final_link_index].as_output(), "--"]) plan_cmd.add(index_cmd) - plan_extra_inputs = cmd_args() - plan_extra_inputs.add(index_meta) - plan_extra_inputs.add(index_args) - plan_cmd.hidden(plan_extra_inputs) + plan_cmd.add(cmd_args(hidden = [ + index_meta, + index_args, + ])) ctx.actions.run(plan_cmd, category = index_cat, identifier = identifier, local_only = True) @@ -396,24 +404,32 @@ def cxx_dist_link( # directly, since it uses `ctx.outputs` to bind its outputs. Instead of doing Starlark hacks to work around # the lack of `ctx.outputs`, we declare an empty file as a dynamic input. plan_inputs.append(ctx.actions.write(output.basename + ".plan_hack.txt", "")) - plan_outputs.extend([link_plan, index_argsfile_out, final_link_index]) + plan_outputs.extend([link_plan.as_output(), index_argsfile_out.as_output(), final_link_index.as_output()]) ctx.actions.dynamic_output(dynamic = plan_inputs, inputs = [], outputs = plan_outputs, f = plan) link_plan_out = ctx.actions.declare_output(output.basename + ".link-plan.json") dynamic_plan(link_plan = link_plan_out, index_argsfile_out = index_argsfile_out, final_link_index = final_link_index) def prepare_opt_flags(link_infos: list[LinkInfo]) -> cmd_args: - opt_args = cmd_args() - opt_args.add(cxx_link_cmd(cxx_toolchain)) + opt_cmd_parts = cxx_link_cmd_parts(cxx_toolchain, executable_link) + opt_args = opt_cmd_parts.link_cmd # buildifier: disable=uninitialized for link in link_infos: for raw_flag in link.pre_flags + link.post_flags: opt_args.add(raw_flag) + + opt_args.add(opt_cmd_parts.post_linker_flags) return opt_args opt_common_flags = prepare_opt_flags(link_infos) + # Create an argsfile and dump all the flags to be processed later by lto_opt. + # These flags are common to all opt actions, we don't need an argfile for each action, one + # for the entire link unit will do. + opt_argsfile = ctx.actions.declare_output(output.basename + ".lto_opt_argsfile") + ctx.actions.write(opt_argsfile.as_output(), opt_common_flags, allow_args = True) + # We declare a separate dynamic_output for every object file. It would # maybe be simpler to have a single dynamic_output that produced all the # opt actions, but an action needs to re-run whenever the analysis that @@ -443,13 +459,12 @@ def cxx_dist_link( # Local thinlto generates .dwo files by default. For distributed thinlto, however, we # want to keep all dwo debug info in the object file to reduce the number of files to # materialize. - if cxx_toolchain.split_debug_mode == SplitDebugMode("single"): + if cxx_toolchain.split_debug_mode == SplitDebugMode("none"): + opt_cmd.add("--split-dwarf=none") + elif cxx_toolchain.split_debug_mode == SplitDebugMode("single"): opt_cmd.add("--split-dwarf=single") - # Create an argsfile and dump all the flags to be processed later. - opt_argsfile = ctx.actions.declare_output(outputs[opt_object].basename + ".opt.argsfile") - ctx.actions.write(opt_argsfile.as_output(), opt_common_flags, allow_args = True) - opt_cmd.hidden(opt_common_flags) + opt_cmd.add(cmd_args(hidden = opt_common_flags)) opt_cmd.add("--args", opt_argsfile) opt_cmd.add("--") @@ -457,16 +472,15 @@ def cxx_dist_link( imports = [index_link_data[idx].link_data.initial_object for idx in plan_json["imports"]] archives = [index_link_data[idx].link_data.objects_dir for idx in plan_json["archive_imports"]] - opt_cmd.hidden(imports) - opt_cmd.hidden(archives) - ctx.actions.run(opt_cmd, category = make_cat("thin_lto_opt"), identifier = name) + opt_cmd.add(cmd_args(hidden = imports + archives)) + ctx.actions.run(opt_cmd, category = make_cat("thin_lto_opt_object"), identifier = name) - ctx.actions.dynamic_output(dynamic = [plan], inputs = [], outputs = [opt_object], f = optimize_object) + ctx.actions.dynamic_output(dynamic = [plan], inputs = [], outputs = [opt_object.as_output()], f = optimize_object) def dynamic_optimize_archive(archive: _ArchiveLinkData): def optimize_archive(ctx: AnalysisContext, artifacts, outputs): plan_json = artifacts[archive.plan].read_json() - if "objects" not in plan_json or not plan_json["objects"] or is_all(lambda e: not e["is_bc"], plan_json["objects"]): + if "objects" not in plan_json or not plan_json["objects"] or lazy.is_all(lambda e: not e["is_bc"], plan_json["objects"]): # Nothing in this directory was lto-able; let's just copy the archive. ctx.actions.copy_file(outputs[archive.opt_objects_dir], archive.objects_dir) ctx.actions.write(outputs[archive.opt_manifest], "") @@ -486,14 +500,12 @@ def cxx_dist_link( opt_object.as_output(), "--from", entry["path"], - ]) - - copy_cmd.hidden(archive.objects_dir) + ], hidden = archive.objects_dir) ctx.actions.run(copy_cmd, category = make_cat("thin_lto_opt_copy"), identifier = source_path) output_dir[source_path] = opt_object continue - opt_object = ctx.actions.declare_output("%s/%s" % (make_cat("thin_lto_opt"), source_path)) + opt_object = ctx.actions.declare_output("%s/%s" % (make_cat("thin_lto_opt_archive"), source_path)) output_manifest.add(opt_object) output_dir[source_path] = opt_object opt_cmd = cmd_args(lto_opt) @@ -501,12 +513,12 @@ def cxx_dist_link( opt_cmd.add("--input", entry["path"]) opt_cmd.add("--index", entry["bitcode_file"]) - if cxx_toolchain.split_debug_mode == SplitDebugMode("single"): + if cxx_toolchain.split_debug_mode == SplitDebugMode("none") or getattr(ctx.attrs, "distributed_thinlto_partial_split_dwarf", False): + opt_cmd.add("--split-dwarf=none") + elif cxx_toolchain.split_debug_mode == SplitDebugMode("single"): opt_cmd.add("--split-dwarf=single") - opt_argsfile = ctx.actions.declare_output(opt_object.basename + ".opt.argsfile") - ctx.actions.write(opt_argsfile.as_output(), opt_common_flags, allow_args = True) - opt_cmd.hidden(opt_common_flags) + opt_cmd.add(cmd_args(hidden = opt_common_flags)) opt_cmd.add("--args", opt_argsfile) opt_cmd.add("--") @@ -514,17 +526,16 @@ def cxx_dist_link( imports = [index_link_data[idx].link_data.initial_object for idx in entry["imports"]] archives = [index_link_data[idx].link_data.objects_dir for idx in entry["archive_imports"]] - opt_cmd.hidden(imports) - opt_cmd.hidden(archives) - opt_cmd.hidden(archive.indexes_dir) - opt_cmd.hidden(archive.objects_dir) - ctx.actions.run(opt_cmd, category = make_cat("thin_lto_opt"), identifier = source_path) + opt_cmd.add(cmd_args( + hidden = imports + archives + [archive.indexes_dir, archive.objects_dir], + )) + ctx.actions.run(opt_cmd, category = make_cat("thin_lto_opt_archive"), identifier = source_path) ctx.actions.symlinked_dir(outputs[archive.opt_objects_dir], output_dir) ctx.actions.write(outputs[archive.opt_manifest], output_manifest, allow_args = True) archive_opt_inputs = [archive.plan] - archive_opt_outputs = [archive.opt_objects_dir, archive.opt_manifest] + archive_opt_outputs = [archive.opt_objects_dir.as_output(), archive.opt_manifest.as_output()] ctx.actions.dynamic_output(dynamic = archive_opt_inputs, inputs = [], outputs = archive_opt_outputs, f = optimize_archive) for artifact in index_link_data: @@ -542,7 +553,7 @@ def cxx_dist_link( elif artifact.data_type == _DataType("archive"): dynamic_optimize_archive(link_data) - linker_argsfile_out = ctx.actions.declare_output(output.basename + ".thinlto.link.argsfile") + linker_argsfile_out = ctx.actions.declare_output(output.basename + ".thinlto_link_argsfile") def thin_lto_final_link(ctx: AnalysisContext, artifacts, outputs): plan = artifacts[link_plan_out].read_json() @@ -554,46 +565,44 @@ def cxx_dist_link( non_lto_objects = {int(k): 1 for k in plan["non_lto_objects"]} current_index = 0 opt_objects = [] - archives = [] for link in link_infos: link_args.add(link.pre_flags) for linkable in link.linkables: if isinstance(linkable, ObjectsLinkable): - new_objs = [] for obj in linkable.objects: if current_index in plan_index: - new_objs.append(index_link_data[current_index].link_data.opt_object) opt_objects.append(index_link_data[current_index].link_data.opt_object) elif current_index in non_lto_objects: - new_objs.append(obj) opt_objects.append(obj) current_index += 1 else: current_index += 1 link_args.add(link.post_flags) - link_cmd = cxx_link_cmd(cxx_toolchain) - final_link_argfile, final_link_inputs = ctx.actions.write( - outputs[linker_argsfile_out].as_output(), - link_args, - allow_args = True, - ) + link_cmd_parts = cxx_link_cmd_parts(cxx_toolchain, executable_link) + link_cmd = link_cmd_parts.link_cmd + link_cmd_hidden = [] # buildifier: disable=uninitialized for artifact in index_link_data: if artifact != None and artifact.data_type == _DataType("archive"): - link_cmd.hidden(artifact.link_data.opt_objects_dir) - link_cmd.add(cmd_args(final_link_argfile, format = "@{}")) + link_cmd_hidden.append(artifact.link_data.opt_objects_dir) + link_cmd.add(at_argfile( + actions = ctx.actions, + name = outputs[linker_argsfile_out], + args = link_args, + allow_args = True, + )) link_cmd.add(cmd_args(final_link_index, format = "@{}")) link_cmd.add("-o", outputs[output].as_output()) if linker_map: link_cmd.add(linker_map_args(cxx_toolchain, outputs[linker_map].as_output()).flags) - link_cmd_extra_inputs = cmd_args() - link_cmd_extra_inputs.add(final_link_inputs) - link_cmd.hidden(link_cmd_extra_inputs) - link_cmd.hidden(link_args) - link_cmd.hidden(opt_objects) - link_cmd.hidden(archives) + link_cmd_hidden.extend([ + link_args, + opt_objects, + ]) + link_cmd.add(link_cmd_parts.post_linker_flags) + link_cmd.add(cmd_args(hidden = link_cmd_hidden)) ctx.actions.run(link_cmd, category = make_cat("thin_lto_link"), identifier = identifier, local_only = True) @@ -601,7 +610,7 @@ def cxx_dist_link( ctx.actions.dynamic_output( dynamic = final_link_inputs, inputs = [], - outputs = [output] + ([linker_map] if linker_map else []) + [linker_argsfile_out], + outputs = [output.as_output()] + ([linker_map.as_output()] if linker_map else []) + [linker_argsfile_out.as_output()], f = thin_lto_final_link, ) @@ -613,7 +622,9 @@ def cxx_dist_link( ], ) - final_output = output if not (executable_link and cxx_use_bolt(ctx)) else bolt(ctx, output, identifier) + final_output = output if not (executable_link and cxx_use_bolt(ctx)) else bolt(ctx, output, external_debug_info, identifier) + final_output = stamp_build_info(ctx, final_output) if executable_link else final_output + dwp_output = ctx.actions.declare_output(output.short_path.removesuffix("-wrapper") + ".dwp") if generate_dwp else None if generate_dwp: @@ -632,14 +643,21 @@ def cxx_dist_link( local_only = True, ) + unstripped_output = final_output + if opts.strip: + strip_args = opts.strip_args_factory(ctx) if opts.strip_args_factory else cmd_args() + final_output = strip_object(ctx, cxx_toolchain, final_output, strip_args, category_suffix) + return LinkedObject( output = final_output, - unstripped_output = final_output, + unstripped_output = unstripped_output, prebolt_output = output, dwp = dwp_output, external_debug_info = external_debug_info, linker_argsfile = linker_argsfile_out, - linker_filelist = None, # DistLTO unsupported for Darwin linkers - linker_command = None, # DistLTO unsupported for debugging of command + linker_filelist = None, # DistLTO doesn't use filelists + linker_command = None, # There is no notion of a single linker command for DistLTO index_argsfile = index_argsfile_out, + dist_thin_lto_codegen_argsfile = None, # Only Darwin builds provide is argsfile + dist_thin_lto_index_argsfile = None, # Only Darwin builds provide this argsfile ) diff --git a/prelude/cxx/dist_lto/tools.bzl b/prelude/cxx/dist_lto/tools.bzl index 604ccb37e1728..7adb34c4a6c2d 100644 --- a/prelude/cxx/dist_lto/tools.bzl +++ b/prelude/cxx/dist_lto/tools.bzl @@ -5,15 +5,21 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx:cxx_toolchain_types.bzl", "DistLtoToolsInfo") +load("@prelude//cxx:cxx_toolchain_types.bzl", "DistLtoToolsInfo", "LinkerType") def _impl(ctx): return [ DefaultInfo(), DistLtoToolsInfo( - planner = ctx.attrs.planner[RunInfo], + planner = { + LinkerType(linker_type): planner[RunInfo] + for linker_type, planner in ctx.attrs.planner.items() + }, + opt = { + LinkerType(linker_type): opt[RunInfo] + for linker_type, opt in ctx.attrs.opt.items() + }, prepare = ctx.attrs.prepare[RunInfo], - opt = ctx.attrs.opt[RunInfo], copy = ctx.attrs.copy[RunInfo], ), ] @@ -21,9 +27,15 @@ def _impl(ctx): dist_lto_tools = rule( impl = _impl, attrs = { - "copy": attrs.dep(), - "opt": attrs.dep(), - "planner": attrs.dep(), - "prepare": attrs.dep(), + "copy": attrs.dep(providers = [RunInfo]), + "opt": attrs.dict( + key = attrs.enum(LinkerType.values()), + value = attrs.dep(providers = [RunInfo]), + ), + "planner": attrs.dict( + key = attrs.enum(LinkerType.values()), + value = attrs.dep(providers = [RunInfo]), + ), + "prepare": attrs.dep(providers = [RunInfo]), }, ) diff --git a/prelude/cxx/dist_lto/tools/BUCK b/prelude/cxx/dist_lto/tools/BUCK deleted file mode 100644 index 3abffa28230c7..0000000000000 --- a/prelude/cxx/dist_lto/tools/BUCK +++ /dev/null @@ -1,44 +0,0 @@ -load("@prelude//cxx/dist_lto:tools.bzl", "dist_lto_tools") - -prelude = native - -prelude.python_bootstrap_binary( - name = "dist_lto_planner", - main = "dist_lto_planner.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "dist_lto_opt", - main = "dist_lto_opt.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "dist_lto_prepare", - main = "dist_lto_prepare.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "dist_lto_copy", - main = "dist_lto_copy.py", - visibility = ["PUBLIC"], -) - -dist_lto_tools( - name = "dist_lto_tools", - planner = ":dist_lto_planner", - opt = ":dist_lto_opt", - prepare = ":dist_lto_prepare", - copy = ":dist_lto_copy", - visibility = ["PUBLIC"], -) - -prelude.python_test( - name = "test_dist_lto_opt", - srcs = [ - "tests/test_dist_lto_opt.py", - "dist_lto_opt.py", - ], -) diff --git a/prelude/cxx/dist_lto/tools/BUCK.v2 b/prelude/cxx/dist_lto/tools/BUCK.v2 new file mode 100644 index 0000000000000..9e1b5d325e897 --- /dev/null +++ b/prelude/cxx/dist_lto/tools/BUCK.v2 @@ -0,0 +1,67 @@ +load("@prelude//cxx/dist_lto:tools.bzl", "dist_lto_tools") +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "dist_lto_planner_gnu", + main = "dist_lto_planner_gnu.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "dist_lto_planner_darwin", + main = "dist_lto_planner_darwin.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "dist_lto_opt_gnu", + main = "dist_lto_opt_gnu.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "dist_lto_opt_darwin", + main = "dist_lto_opt_darwin.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "dist_lto_prepare", + main = "dist_lto_prepare.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "dist_lto_copy", + main = "dist_lto_copy.py", + visibility = ["PUBLIC"], +) + +dist_lto_tools( + name = "dist_lto_tools", + copy = ":dist_lto_copy", + opt = { + "darwin": ":dist_lto_opt_darwin", + "gnu": ":dist_lto_opt_gnu", + }, + planner = { + "darwin": ":dist_lto_planner_darwin", + "gnu": ":dist_lto_planner_gnu", + }, + prepare = ":dist_lto_prepare", + visibility = ["PUBLIC"], +) + +prelude.python_test( + name = "test_dist_lto_opt", + srcs = [ + "dist_lto_opt_gnu.py", + "tests/test_dist_lto_opt.py", + ], +) diff --git a/prelude/cxx/dist_lto/tools/dist_lto_opt.py b/prelude/cxx/dist_lto/tools/dist_lto_opt.py deleted file mode 100644 index acc8cc9f043cf..0000000000000 --- a/prelude/cxx/dist_lto/tools/dist_lto_opt.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -""" -Python wrapper around `clang` intended for use by the parallel opt phase of -a Distributed ThinLTO compilation. This script works around a LLVM bug where -LLVM will return a zero exit code in the case where ThinLTO fails with a -fatal error. - -Instead of trusting the exit code of the compiler, this script checks the -output file and returns 1 if the file has zero size. -""" - -import argparse -import os -import subprocess -import sys -from typing import List - -EXIT_SUCCESS, EXIT_FAILURE = 0, 1 - -# Filter opt related flags -def _filter_flags(clang_flags: List[str]) -> List[str]: # noqa: C901 - # List of llvm flags to be ignored. - # They either don't have an valid mapping or unused during opt. - IGNORE_OPT_FLAGS = [ - "-Wl,-plugin-opt,-function-sections", - "-Wl,--lto-whole-program-visibility", - "-Wl,--no-lto-whole-program-visibility", - "-flto=thin", - ] - # Conservatively, we only translate llvms flags in our known list - KNOWN_LLVM_SHARED_LIBRARY_FLAGS = ["-pie", "-shared"] - - # Start with default flags for opt. - # The default values may change across compiler versions. - # Make sure they are always synced with the current values. - opt_flags = [ - # TODO(T139459294): - # -O2 is the default optimization flag for the link-time optimizer - # this setting matches current llvm implementation: - # https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/LTO/Config.h#L57 - "-O2", - # TODO(T139459170): Remove after clang-15. NPM is the default. - "-fexperimental-new-pass-manager", - "-ffunction-sections", - "-fdata-sections", - ] - - # Clang driver passes through lld flags with "-Wl," prefix. There are 4 type of flags with unique - # prefixes: - # 1. "--lto-...": these are native lld flags. - # 2. "-plugin-opt,..." or "-plugin-opt=...": these are the aliases of the native lld flags (1). - # 3. "-mllvm,...": these are llvm flags. - # 4. "-plugin-opt,-..." or "-plugin-opt=-...": these are the aliases of llvm flags (3). Note that they differ from (2) and always start with "-". - # - # For (1) and (2), we need to convert them case by case. - # For (3) and (4), we should be able to pass them through into the optimizer directly by prefixing "-mllvm". - # TODO(T139448744): Cover all the flags. Check available flags using "ld.lld --help | grep -A 1 '\-\-plugin-opt='" - PLUGIN_OPT_PREFIXES = ["-Wl,-plugin-opt,", "-Wl,-plugin-opt="] - - def _find_plugin_opt_prefix(flag: str) -> str: - matched_prefix = [ - prefix for prefix in PLUGIN_OPT_PREFIXES if flag.startswith(prefix) - ] - if matched_prefix: - return matched_prefix[0] - return "" - - plugin_opt_to_llvm_flag_map = { - "sample-profile=": "-fprofile-sample-use=", - "O": "-O", - } - - def _plugin_opt_to_clang_flag(flag: str) -> str: - for k, v in plugin_opt_to_llvm_flag_map.items(): - if flag.startswith(k): - return flag.replace(k, v) - return None - - index = 0 - while index < len(clang_flags): - raw_flag = clang_flags[index] - flag = raw_flag.replace('"', "") - if flag in IGNORE_OPT_FLAGS: - index += 1 - continue - if _find_plugin_opt_prefix(flag): - # Convert "-Wl,-plugin-opt,...". - flag = flag.replace(_find_plugin_opt_prefix(flag), "", 1) - if flag.startswith("-"): - # If flag starts with "-", it is an llvm flag. Pass it through directly. - opt_flags.extend(["-mllvm", flag]) - else: - flag = _plugin_opt_to_clang_flag(flag) - if flag is None: - # Bail on any unknown flag. - print(f"error: unrecognized flag {raw_flag}") - return None - opt_flags.append(flag) - elif flag.startswith("-Wl,-mllvm,"): - # Convert "-Wl,-mllvm,...". It is an llvm flag. Pass it through directly. - flag = flag.replace("-Wl,-mllvm,", "", 1) - opt_flags.extend(["-mllvm", flag]) - elif flag in KNOWN_LLVM_SHARED_LIBRARY_FLAGS: - # The target is a shared library, `-fPIC` is needed in opt phase to correctly generate PIC ELF. - opt_flags.append("-fPIC") - elif flag.startswith("-f"): - # Always pass in -f flags which are presumed to be Clang flags. - opt_flags.append(flag) - elif flag == "-Xlinker": - # Handle -Xlinker -xxxx flags. -Xlinker flags are passed in two - # lines, the first line being just "-Xlinker" and the second line - # being the actual arg. - if index + 1 >= len(clang_flags): - print( - f"error: cannot handle -Xlinker flags {clang_flags}, " - "-Xlinker should be followed by an option" - ) - return EXIT_FAILURE - if clang_flags[index + 1] == "-mllvm": - # Validate -Xlinker - # -mllvm - # -Xlinker - # -xxxx structure - # This assumes -mllvm and its arg are provided consecutively, - # mostly to handle the case where they come from Buck's - # linker_flags. - # TODO(T159109840): Generalize this logic to handle -Xlinker - # -mllvm -unrelated-flag -Xlinker -actual-mllvm-arg - if ( - index + 2 >= len(clang_flags) - or index + 3 >= len(clang_flags) - or clang_flags[index + 2] != "-Xlinker" - ): - print( - f"error: cannot handle -Xlinker flags {clang_flags}, " - "-mllvm should be followed by an llvm option" - ) - return EXIT_FAILURE - opt_flags.extend(["-mllvm", clang_flags[index + 3]]) - index += 3 - else: - # Otherwise skip this -Xlinker flag and its arg - index += 1 - index += 1 - return opt_flags - - -# Clean up clang flags by obtaining the cc1 flags and filtering out those unwanted. -# clang_opt_flags is mutated after calling this function. -def _cleanup_flags(clang_opt_flags: List[str]) -> List[str]: - for i, arg in enumerate(clang_opt_flags): - if arg.startswith("--cc="): - # Find the clang binary path. - clang_opt_flags[i] = arg.replace("--cc=", "") - break - - # Get the cc1 flag dump with '-###' - try: - output = ( - subprocess.check_output( - clang_opt_flags + ["-###"], stderr=subprocess.STDOUT - ) - .decode() - .splitlines() - ) - except subprocess.CalledProcessError as e: - print(e.output.decode()) - return None - - # Flags that may conflict with the existing bitcode attributes. - # The value indicates if the flag is followed with a value. - flags_to_delete = { - "-mframe-pointer=none": False, - "-fmath-errno": False, - "-fno-rounding-math": False, - "-mconstructor-aliases": False, - "-munwind-tables": False, - "-target-cpu": True, - "-tune-cpu": True, - } - - clean_output = [] - skip_next = False - for f in output[-1].split()[1:]: - if skip_next: - skip_next = False - else: - f = f.strip('"') - if f in flags_to_delete: - skip_next = flags_to_delete[f] - else: - clean_output.append(f) - return clean_output - - -def main(argv: List[str]) -> int: - parser = argparse.ArgumentParser() - parser.add_argument("--out", help="The output native object file.") - parser.add_argument("--input", help="The input bitcode object file.") - parser.add_argument("--index", help="The thinlto index file.") - parser.add_argument("--split-dwarf", required=False, help="Split dwarf option.") - parser.add_argument( - "--args", help="The argsfile containing unfiltered and unprocessed flags." - ) - parser.add_argument("--debug", action="store_true", help="Dump clang -cc1 flags.") - parser.add_argument("opt_args", nargs=argparse.REMAINDER) - args = parser.parse_args(argv[1:]) - - with open(args.args, "r") as argsfile: - clang_opt_flags = _filter_flags(argsfile.read().splitlines()) - if clang_opt_flags is None: - return EXIT_FAILURE - - clang_opt_flags.extend( - [ - "-o", - args.out, - "-x", - "ir", - "-c", - args.input, - f"-fthinlto-index={args.index}", - ] - ) - if args.split_dwarf: - clang_opt_flags.append(f"-gsplit-dwarf={args.split_dwarf}") - - # The following args slices manipulating may be confusing. The first 3 element of opt_args are: - # 1. a spliter "--", it's not used anywhere; - # 2. the fbcc wrapper script path - # 3. the "-cc" arg pointing to the compiler we use - # EXAMPLE: ['--', 'buck-out/v2/gen/fbcode/8e3db19fe005003a/tools/build/buck/wrappers/__fbcc__/fbcc', '--cc=fbcode/third-party-buck/platform010/build/llvm-fb/12/bin/clang++', '--target=x86_64-redhat-linux-gnu', ...] - clang_cc1_flags = _cleanup_flags(args.opt_args[2:] + clang_opt_flags) - if clang_cc1_flags is None: - return EXIT_FAILURE - - fbcc_cmd = args.opt_args[1:3] + clang_cc1_flags - if args.debug: - # Print fbcc commandline and exit. - print(" ".join(fbcc_cmd)) - return EXIT_SUCCESS - - subprocess.check_call(fbcc_cmd) - if os.stat(args.out).st_size == 0: - print("error: opt produced empty file") - return EXIT_FAILURE - return EXIT_SUCCESS - - -if __name__ == "__main__": - sys.exit(main(sys.argv)) diff --git a/prelude/cxx/dist_lto/tools/dist_lto_opt_darwin.py b/prelude/cxx/dist_lto/tools/dist_lto_opt_darwin.py new file mode 100644 index 0000000000000..3af21826fdb5e --- /dev/null +++ b/prelude/cxx/dist_lto/tools/dist_lto_opt_darwin.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Python wrapper around clang intended to optimize and codegen bitcode files +to native object files for distributed thin lto. This script munges compiler +flags to prepare a suitable clang invocation. +""" + +import argparse +import subprocess +import sys + +from typing import List + + +def main(argv: List[str]) -> int: + parser = argparse.ArgumentParser() + parser.add_argument("--out", help="The output native object file.") + parser.add_argument("--input", help="The input bitcode object file.") + parser.add_argument("--index", help="The thinlto index file.") + # Split dwarf isn't applicable to Darwin, ignore the flag + parser.add_argument("--split-dwarf", required=False, help="Split dwarf option.") + parser.add_argument( + "--args", help="The argsfile containing unfiltered and unprocessed flags." + ) + parser.add_argument("opt_args", nargs=argparse.REMAINDER) + args = parser.parse_args(argv[1:]) + + with open(args.args, "r") as argsfile: + clang_opt_flags = argsfile.read().splitlines() + + clang_opt_flags.extend( + [ + "-o", + args.out, + "-x", + "ir", # Without this the input file type is incorrectly inferred. + "-c", + args.input, + f"-fthinlto-index={args.index}", + # When lto_mode=thin/full all compile actions are passed `-flto=thin/full`. We + # want to generate a native object file here. + "-fno-lto", + "-Werror=unused-command-line-argument", + ] + ) + + # TODO(T187767988) - Check if returning the subprocesses exit code is sufficient. Server LLVM created such a wrapper + # script in the first place because of a bug in Clang where it fails but does not set a non-zero exit code (T116695431). Fbcode's + # version of this script measure the size of the output file to determine success. The task is closed, but if the bug + # still persists, we may need to do the same. + result = subprocess.run(clang_opt_flags) + return result.returncode + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/prelude/cxx/dist_lto/tools/dist_lto_opt_gnu.py b/prelude/cxx/dist_lto/tools/dist_lto_opt_gnu.py new file mode 100644 index 0000000000000..e6d34d3b6fde2 --- /dev/null +++ b/prelude/cxx/dist_lto/tools/dist_lto_opt_gnu.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Python wrapper around `clang` intended for use by the parallel opt phase of +a Distributed ThinLTO compilation. This script works around a LLVM bug where +LLVM will return a zero exit code in the case where ThinLTO fails with a +fatal error. + +Instead of trusting the exit code of the compiler, this script checks the +output file and returns 1 if the file has zero size. +""" + +import argparse +import os +import subprocess +import sys +from typing import List + +EXIT_SUCCESS, EXIT_FAILURE = 0, 1 + + +# Filter opt related flags +def _filter_flags(clang_flags: List[str]) -> List[str]: # noqa: C901 + # List of llvm flags to be ignored. + # They either don't have an valid mapping or unused during opt. + IGNORE_OPT_FLAGS = [ + "-Wl,-plugin-opt,-function-sections", + "-Wl,--lto-whole-program-visibility", + "-Wl,--no-lto-whole-program-visibility", + "-flto=thin", + ] + # Conservatively, we only translate llvms flags in our known list + KNOWN_LLVM_SHARED_LIBRARY_FLAGS = ["-pie", "-shared"] + + # Start with default flags for opt. + # The default values may change across compiler versions. + # Make sure they are always synced with the current values. + opt_flags = [ + # TODO(T139459294): + # -O2 is the default optimization flag for the link-time optimizer + # this setting matches current llvm implementation: + # https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/LTO/Config.h#L57 + "-O2", + "-ffunction-sections", + "-fdata-sections", + ] + + # Clang driver passes through lld flags with "-Wl," prefix. There are 4 type of flags with unique + # prefixes: + # 1. "--lto-...": these are native lld flags. + # 2. "-plugin-opt,..." or "-plugin-opt=...": these are the aliases of the native lld flags (1). + # 3. "-mllvm,...": these are llvm flags. + # 4. "-plugin-opt,-..." or "-plugin-opt=-...": these are the aliases of llvm flags (3). Note that they differ from (2) and always start with "-". + # + # For (1) and (2), we need to convert them case by case. + # For (3) and (4), we should be able to pass them through into the optimizer directly by prefixing "-mllvm". + # TODO(T139448744): Cover all the flags. Check available flags using "ld.lld --help | grep -A 1 '\-\-plugin-opt='" + PLUGIN_OPT_PREFIXES = ["-Wl,-plugin-opt,", "-Wl,-plugin-opt="] + + def _find_plugin_opt_prefix(flag: str) -> str: + matched_prefix = [ + prefix for prefix in PLUGIN_OPT_PREFIXES if flag.startswith(prefix) + ] + if matched_prefix: + return matched_prefix[0] + return "" + + plugin_opt_to_llvm_flag_map = { + "sample-profile=": "-fprofile-sample-use=", + "O": "-O", + } + + def _plugin_opt_to_clang_flag(flag: str) -> str: + for k, v in plugin_opt_to_llvm_flag_map.items(): + if flag.startswith(k): + return flag.replace(k, v) + return None + + index = 0 + while index < len(clang_flags): + raw_flag = clang_flags[index] + flag = raw_flag.replace('"', "") + if flag in IGNORE_OPT_FLAGS: + index += 1 + continue + if _find_plugin_opt_prefix(flag): + # Convert "-Wl,-plugin-opt,...". + flag = flag.replace(_find_plugin_opt_prefix(flag), "", 1) + if flag.startswith("-"): + # If flag starts with "-", it is an llvm flag. Pass it through directly. + opt_flags.extend(["-mllvm", flag]) + else: + flag = _plugin_opt_to_clang_flag(flag) + if flag is None: + # Bail on any unknown flag. + print(f"error: unrecognized flag {raw_flag}") + return None + opt_flags.append(flag) + elif flag.startswith("-Wl,-mllvm,"): + # Convert "-Wl,-mllvm,...". It is an llvm flag. Pass it through directly. + flag = flag.replace("-Wl,-mllvm,", "", 1) + opt_flags.extend(["-mllvm", flag]) + elif flag in KNOWN_LLVM_SHARED_LIBRARY_FLAGS: + # The target is a shared library, `-fPIC` is needed in opt phase to correctly generate PIC ELF. + opt_flags.append("-fPIC") + elif flag.startswith("-f"): + # Always pass in -f flags which are presumed to be Clang flags. + opt_flags.append(flag) + elif flag == "-Xlinker": + # Handle -Xlinker -xxxx flags. -Xlinker flags are passed in two + # lines, the first line being just "-Xlinker" and the second line + # being the actual arg. + if index + 1 >= len(clang_flags): + print( + f"error: cannot handle -Xlinker flags {clang_flags}, " + "-Xlinker should be followed by an option" + ) + return EXIT_FAILURE + if clang_flags[index + 1] == "-mllvm": + # Validate -Xlinker + # -mllvm + # -Xlinker + # -xxxx structure + # This assumes -mllvm and its arg are provided consecutively, + # mostly to handle the case where they come from Buck's + # linker_flags. + # TODO(T159109840): Generalize this logic to handle -Xlinker + # -mllvm -unrelated-flag -Xlinker -actual-mllvm-arg + if ( + index + 2 >= len(clang_flags) + or index + 3 >= len(clang_flags) + or clang_flags[index + 2] != "-Xlinker" + ): + print( + f"error: cannot handle -Xlinker flags {clang_flags}, " + "-mllvm should be followed by an llvm option" + ) + return EXIT_FAILURE + opt_flags.extend(["-mllvm", clang_flags[index + 3]]) + index += 3 + else: + # Otherwise skip this -Xlinker flag and its arg + index += 1 + index += 1 + return opt_flags + + +# Clean up clang flags by obtaining the cc1 flags and filtering out those unwanted. +# clang_opt_flags is mutated after calling this function. +def _cleanup_flags(clang_opt_flags: List[str]) -> List[str]: + for i, arg in enumerate(clang_opt_flags): + if arg.startswith("--cc="): + # Find the clang binary path. + clang_opt_flags[i] = arg.replace("--cc=", "") + break + + # Get the cc1 flag dump with '-###' + try: + output = ( + subprocess.check_output( + clang_opt_flags + ["-###"], stderr=subprocess.STDOUT + ) + .decode() + .splitlines() + ) + except subprocess.CalledProcessError as e: + print(e.output.decode()) + return None + + # Flags that may conflict with the existing bitcode attributes. + # The value indicates if the flag is followed with a value. + flags_to_delete = { + "-mframe-pointer=none": False, + "-fmath-errno": False, + "-fno-rounding-math": False, + "-mconstructor-aliases": False, + "-munwind-tables": False, + "-target-cpu": True, + "-tune-cpu": True, + } + + clean_output = [] + skip_next = False + for f in output[-1].split()[1:]: + if skip_next: + skip_next = False + else: + f = f.strip('"') + if f in flags_to_delete: + skip_next = flags_to_delete[f] + else: + clean_output.append(f) + return clean_output + + +def main(argv: List[str]) -> int: + parser = argparse.ArgumentParser() + parser.add_argument("--out", help="The output native object file.") + parser.add_argument("--input", help="The input bitcode object file.") + parser.add_argument("--index", help="The thinlto index file.") + parser.add_argument("--split-dwarf", required=False, help="Split dwarf option.") + parser.add_argument( + "--args", help="The argsfile containing unfiltered and unprocessed flags." + ) + parser.add_argument("--debug", action="store_true", help="Dump clang -cc1 flags.") + parser.add_argument("opt_args", nargs=argparse.REMAINDER) + args = parser.parse_args(argv[1:]) + + with open(args.args, "r") as argsfile: + clang_opt_flags = _filter_flags(argsfile.read().splitlines()) + if clang_opt_flags is None: + return EXIT_FAILURE + + clang_opt_flags.extend( + [ + "-o", + args.out, + "-x", + "ir", + "-c", + args.input, + f"-fthinlto-index={args.index}", + ] + ) + if args.split_dwarf == "none": + clang_opt_flags.append("-gno-split-dwarf") + else: + clang_opt_flags.append(f"-gsplit-dwarf={args.split_dwarf}") + + # The following args slices manipulating may be confusing. The first 3 element of opt_args are: + # 1. a spliter "--", it's not used anywhere; + # 2. the fbcc wrapper script path + # 3. the "-cc" arg pointing to the compiler we use + # EXAMPLE: ['--', 'buck-out/v2/gen/fbcode/8e3db19fe005003a/tools/build/buck/wrappers/__fbcc__/fbcc', '--cc=fbcode/third-party-buck/platform010/build/llvm-fb//bin/clang++', '--target=x86_64-redhat-linux-gnu', ...] + clang_cc1_flags = _cleanup_flags(args.opt_args[2:] + clang_opt_flags) + if clang_cc1_flags is None: + return EXIT_FAILURE + + fbcc_cmd = args.opt_args[1:3] + clang_cc1_flags + if args.debug: + # Print fbcc commandline and exit. + print(" ".join(fbcc_cmd)) + return EXIT_SUCCESS + + subprocess.check_call(fbcc_cmd) + if os.stat(args.out).st_size == 0: + print("error: opt produced empty file") + return EXIT_FAILURE + return EXIT_SUCCESS + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/prelude/cxx/dist_lto/tools/dist_lto_planner.py b/prelude/cxx/dist_lto/tools/dist_lto_planner.py deleted file mode 100755 index 09a92105771c7..0000000000000 --- a/prelude/cxx/dist_lto/tools/dist_lto_planner.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -""" -A simple wrapper around a distributed thinlto index command to fit into buck2's -distributed thinlto build. - -This reads in a couple of things: - 1. The "meta" file. This is a list of tuples of (object file, index output, - plan output). All items are line-separated (so each tuple is three lines). - 2. The index and link plan output paths - 3. The commands for the actual index command. - -It will invoke the index command and then copy the index outputs to the -requested locations and write a plan for each of those objects. This "plan" is -a simple json file with the most important thing being a list of the indices -of the imports needed for that file. - -It will then additionally write a link plan, which is just a translation of -the thinlto index (which lists the objects actually needed for the final link). - - -Both opt and link plans use indices to refer to other files because it allows the bzl -code to easily map back to other objects held in buck memory. -""" - -# pyre-unsafe - -import argparse -import json -import os -import os.path -import subprocess -import sys -from typing import List - - -def _get_argsfile(args) -> str: - # go through the flags passed to linker and find the index argsfile - argsfiles = list( - filter(lambda arg: arg.endswith("thinlto.index.argsfile"), args.index_args) - ) - assert ( - len(argsfiles) == 1 - ), f"expect only 1 argsfile but seeing multiple ones: {argsfiles}" - argsfile = argsfiles[0] - if argsfile.startswith("@"): - argsfile = argsfile[1:] - return argsfile - - -def _extract_lib_search_path(argsfile_path: str) -> List[str]: - lib_search_path = [] - with open(argsfile_path) as argsfile: - for line in argsfile: - if line.startswith("-L"): - lib_search_path.append(line.strip()) - return lib_search_path - - -def main(argv): - parser = argparse.ArgumentParser() - parser.add_argument("--meta") - parser.add_argument("--index") - parser.add_argument("--link-plan") - parser.add_argument("--final-link-index") - parser.add_argument("index_args", nargs=argparse.REMAINDER) - args = parser.parse_args(argv[1:]) - - subprocess.check_call(args.index_args[1:]) - - bitcode_suffix = ".thinlto.bc" - imports_suffix = ".imports" - opt_objects_suffix = ".opt.o" # please note the files are not exist yet, this is to generate the index file use in final link - - with open(args.meta) as meta: - meta_lines = [line.strip() for line in meta.readlines()] - - def read_imports(path, imports_path): - with open(imports_path) as infile: - return [line.strip() for line in infile.readlines()] - - def index_path(path): - return os.path.join(args.index, path) - - # The meta file comes directly from dist_lto.bzl and consists of a list of - # 7-tuples of information. It is easiest for us to write each tuple member - # as a separate line in Starlark, so these 7-tuples are encoded in groups - # of seven lines. - # - # The seven pieces of information are: - # 1. The path to the source bitcode file. This is used as an index into - # a dictionary (`mapping`) that records much of the metadata coming - # from these lines. - # 2. The path to an output bitcode file. This script is expected to place a - # ThinLTO index file at this location (suffixed `.thinlto.bc`). - # 3. The path to an output plan. This script is expected to place a link - # plan here (a JSON document indicating which other object files this) - # object file depends on, among other things. - # 4. The link data's index in the Starlark array. - # 5. If this object file came from an archive, the name of the archive. Otherwise, - # this line is empty. - # 6. If this object file came from an archive, the path to an output plan. - # This script is expected to produce an archive link plan here (a JSON) - # document similar to the object link plan, except containing link - # information for every file in the archive from which this object - # came. Otherwise, this line is empty. - # 7. If this object file came from an archive, the indexes directory of that - # archive. This script is expected to place all ThinLTO indexes derived - # from object files originating from this archive in that directory. - # Otherwise, this line is empty. - # - # There are two indices that are derived from this meta file: the object - # index (mapping["index"]) and the archive index (mapping["archive_index"]). - # These indices are indices into Starlark arrays for all objects and archive - # linkables, respectively. This script does not inspect them. - mapping = {} - archives = {} - for i in range(0, len(meta_lines), 7): - path = meta_lines[i] - output = meta_lines[i + 1] - plan_output = meta_lines[i + 2] - idx = int(meta_lines[i + 3]) - archive_name = meta_lines[i + 4] - archive_plan = meta_lines[i + 5] - archive_index_dir = meta_lines[i + 6] - - archive_idx = idx if output == "" else None # archives do not have outputs - mapping[path] = { - "output": output, - "plan_output": plan_output, - "index": idx, - "archive_index": archive_idx, - "archive_name": archive_name, - } - if archive_idx is not None: - archives[idx] = { - "name": archive_name, - "objects": [], - "plan": archive_plan, - "index_dir": archive_index_dir, - } - - non_lto_objects = {} - for path, data in sorted(mapping.items(), key=lambda v: v[0]): - output_loc = data["output"] - if os.path.exists(output_loc): - continue - - if data["archive_index"] is not None: - archives[data["archive_index"]]["objects"].append(path) - continue - - bc_file = index_path(path) + bitcode_suffix - imports_path = index_path(path) + imports_suffix - os.makedirs(os.path.dirname(output_loc), exist_ok=True) - - if os.path.exists(imports_path): - assert os.path.exists(bc_file), "missing bc file for %s" % path - os.rename(bc_file, output_loc) - imports = read_imports(path, imports_path) - imports_list = [] - archives_list = [] - for path in imports: - entry = mapping[path] - if entry["archive_index"] is not None: - archives_list.append(int(entry["archive_index"])) - else: - imports_list.append(entry["index"]) - plan = { - "imports": imports_list, - "archive_imports": archives_list, - "index": data["index"], - "bitcode_file": bc_file, - "path": path, - "is_bc": True, - } - else: - non_lto_objects[data["index"]] = 1 - with open(output_loc, "w"): - pass - plan = { - "is_bc": False, - } - - with open(data["plan_output"], "w") as planout: - json.dump(plan, planout, sort_keys=True) - - for archive in archives.values(): - # For archives, we must produce a plan that provides Starlark enough - # information about how to launch a dynamic opt for each object file - # in the archive. - archive_plan = {} - - # This is convenient to store, since it's difficult for Starlark to - # calculate it. - archive_plan["base_dir"] = os.path.dirname(archive["plan"]) - object_plans = [] - for obj in archive["objects"]: - imports_path = index_path(obj) + imports_suffix - output_path = archive["index_dir"] - os.makedirs(output_path, exist_ok=True) - if os.path.exists(imports_path): - bc_file = index_path(obj) + bitcode_suffix - os.rename(bc_file, os.path.join(output_path, os.path.basename(bc_file))) - imports = read_imports(path, imports_path) - - imports_list = [] - archives_list = [] - for path in imports: - entry = mapping[path] - if entry["archive_index"] is not None: - archives_list.append(int(entry["archive_index"])) - else: - imports_list.append(entry["index"]) - object_plans.append( - { - "is_bc": True, - "path": obj, - "imports": imports_list, - "archive_imports": archives_list, - "bitcode_file": os.path.join( - output_path, os.path.basename(bc_file) - ), - } - ) - else: - object_plans.append( - { - "is_bc": False, - "path": obj, - } - ) - - archive_plan["objects"] = object_plans - with open(archive["plan"], "w") as planout: - json.dump(archive_plan, planout, sort_keys=True) - - # We read the `index`` and `index.full`` files produced by linker in index stage - # and translate them to 2 outputs: - # 1. A link plan build final_link args. (This one may be able to be removed if we refactor the workflow) - # 2. A files list (*.final_link_index) used for final link stage which includes all the - # files needed. it's based on index.full with some modification, like path updates - # and redundant(added by toolchain) dependencies removing. - index = {} - index_files_set = set() - # TODO(T130322878): since we call linker wrapper twice (in index and in final_link), to avoid these libs get - # added twice we remove them from the index file for now. - KNOWN_REMOVABLE_DEPS_SUFFIX = [ - "glibc/lib/crt1.o", - "glibc/lib/crti.o", - "glibc/lib/Scrt1.o", - "crtbegin.o", - "crtbeginS.o", - ".build_info.o", - "crtend.o", - "crtendS.o", - "glibc/lib/crtn.o", - ] - with open(index_path("index")) as indexfile: - for line in indexfile: - line = line.strip() - index_files_set.add(line) - path = os.path.relpath(line, start=args.index) - index[mapping[path]["index"]] = 1 - - with open(args.link_plan, "w") as outfile: - json.dump( - { - "non_lto_objects": non_lto_objects, - "index": index, - }, - outfile, - indent=2, - sort_keys=True, - ) - - # Append all search path flags (e.g -Lfbcode/third-party-buck/platform010/build/glibc/lib) from argsfile to final_index - # this workaround is to make dist_lto compatible with link_group. see T136415235 for more info - argsfile = _get_argsfile(args) - lib_search_path = _extract_lib_search_path(argsfile) - - # build index file for final link use - with open(index_path("index.full")) as full_index_input, open( - args.final_link_index, "w" - ) as final_link_index_output: - final_link_index_output.write("\n".join(lib_search_path) + "\n") - for line in full_index_input: - line = line.strip() - if any(filter(line.endswith, KNOWN_REMOVABLE_DEPS_SUFFIX)): - continue - path = os.path.relpath(line, start=args.index) - if line in index_files_set: - if mapping[path]["output"]: - # handle files that were not extracted from archives - output = mapping[path]["output"].replace( - bitcode_suffix, opt_objects_suffix - ) - final_link_index_output.write(output + "\n") - elif os.path.exists(index_path(path) + imports_suffix): - # handle files built from source that were extracted from archives - opt_objects_path = path.replace( - "/objects/", "/opt_objects/objects/" - ) - final_link_index_output.write(opt_objects_path + "\n") - else: - # handle pre-built archives - final_link_index_output.write(line + "\n") - else: - # handle input files that did not come from linker input, e.g. linkerscirpts - final_link_index_output.write(line + "\n") - - -sys.exit(main(sys.argv)) diff --git a/prelude/cxx/dist_lto/tools/dist_lto_planner_darwin.py b/prelude/cxx/dist_lto/tools/dist_lto_planner_darwin.py new file mode 100644 index 0000000000000..f27bde27604f8 --- /dev/null +++ b/prelude/cxx/dist_lto/tools/dist_lto_planner_darwin.py @@ -0,0 +1,434 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +A simple wrapper around a distributed thinlto index command to fit into buck2's +distributed thinlto build. + +This reads in a couple of things: + 1. The "meta" file. This is a list of tuples of (object file, index output, + plan output). All items are line-separated (so each tuple is three lines). + 2. The index and link plan output paths + 3. The commands for the actual index command. + +It will invoke the index command and then copy the index outputs to the +requested locations and write a plan for each of those objects. This "plan" is +a simple json file with the most important thing being a list of the indices +of the imports needed for that file. + +It will then additionally write a link plan, which is just a translation of +the thinlto index (which lists the objects actually needed for the final link). + + +Both opt and link plans use indices to refer to other files because it allows the bzl +code to easily map back to other objects held in buck memory. +""" + +# pyre-unsafe + +import argparse +import json +import os +import os.path +import subprocess +import sys +import tempfile +from enum import Enum +from typing import List + + +def _get_argsfile(args) -> str: + # go through the flags passed to linker and find the index argsfile + argsfiles = list( + filter(lambda arg: arg.endswith("thinlto_index_argsfile"), args.index_args) + ) + assert ( + len(argsfiles) == 1 + ), f"expect only 1 argsfile but seeing multiple ones: {argsfiles}" + argsfile = argsfiles[0] + if argsfile.startswith("@"): + argsfile = argsfile[1:] + return argsfile + + +def _extract_lib_search_path(argsfile_path: str) -> List[str]: + lib_search_path = [] + with open(argsfile_path) as argsfile: + for line in argsfile: + if line.startswith("-L"): + lib_search_path.append(line.strip()) + return lib_search_path + + +class BitcodeMergeState(str, Enum): + STANDALONE = "STANDALONE" + ABSORBED = "ABSORBED" + ROOT = "ROOT" + NOT_LOADED = "NOT_LOADED" + + +def read_merged_bitcode_file(merged_bitcode_path) -> BitcodeMergeState: + if os.path.getsize(merged_bitcode_path) == 0: + return BitcodeMergeState.NOT_LOADED + + result = subprocess.check_output( + f"file {merged_bitcode_path}", shell=True, text=True + ) + if "LLVM bitcode" in result: + return BitcodeMergeState.ROOT + + with open(merged_bitcode_path) as merged_bitcode_file: + for line in merged_bitcode_file: + if "standalone" in line: + return BitcodeMergeState.STANDALONE + if "absorbed" in line: + return BitcodeMergeState.ABSORBED + + assert False, f"unexpected merged bitcode file contents: {merged_bitcode_path}" + + +def main(argv): + parser = argparse.ArgumentParser() + parser.add_argument("--meta") + parser.add_argument("--index") + parser.add_argument("--link-plan") + parser.add_argument("--final-link-index") + parser.add_argument("--enable-premerger", action="store_true") + parser.add_argument("index_args", nargs=argparse.REMAINDER) + args = parser.parse_args(argv[1:]) + + bitcode_suffix = ".thinlto.bc" + imports_suffix = ".imports" + opt_objects_suffix = ".opt.o" # please note the files are not exist yet, this is to generate the index file use in final link + merged_bitcode_suffix = ".merged.bc" + + premerger_enabled = args.enable_premerger + + with open(args.meta) as meta: + meta_lines = [line.strip() for line in meta.readlines()] + + def read_imports(imports_path): + with open(imports_path) as infile: + if not premerger_enabled: + return [line.strip() for line in infile.readlines()] + + result = [] + for line in infile.readlines(): + if line.strip().endswith(".merged.bc"): + result.append( + output_merged_bitcode_file_path_to_input_bitcode_file_path_mapping[ + line.strip() + ] + ) + else: + result.append(line.strip()) + return result + + def index_path(path): + return os.path.join(args.index, path) + + # The meta file comes directly from dist_lto.bzl and consists of a list of + # 9-tuples of information. It is easiest for us to write each tuple member + # as a separate line in Starlark, so these 9-tuples are encoded in groups + # of nine lines. + # + # The nine pieces of information are: + # 1. The path to the source bitcode file. This is used as an index into + # a dictionary (`mapping`) that records much of the metadata coming + # from these lines. + # 2. The path to an output bitcode file. This script is expected to place a + # ThinLTO index file at this location (suffixed `.thinlto.bc`). + # 3. If the premerger is enabled, the path to a merged bitcode file. + # This script is expected to place a file at this location (suffixed `.merged.bc`). + # 4. The path to an output plan. This script is expected to place a link + # plan here (a JSON document indicating which other object files this) + # object file depends on, among other things. + # 5. The link data's index in the Starlark array. + # 6. If this object file came from an archive, the name of the archive. Otherwise, + # this line is empty. + # 7. If this object file came from an archive, the path to an output plan. + # This script is expected to produce an archive link plan here (a JSON) + # document similar to the object link plan, except containing link + # information for every file in the archive from which this object + # came. Otherwise, this line is empty. + # 8. If this object file came from an archive, the indexes directory of that + # archive. This script is expected to place all ThinLTO indexes derived + # from object files originating from this archive in that directory. + # Otherwise, this line is empty. + # 9. If this object file came from an archive, and the premerger is enabled, + # the merged bc directory of that archive. This script is expected to place + # all merged bitcode files derived from object files originating + # from this archive in that directory. Otherwise, this line is empty. + # + # There are two indices that are derived from this meta file: the object + # index (mapping["index"]) and the archive index (mapping["archive_index"]). + # These indices are indices into Starlark arrays for all objects and archive + # linkables, respectively. This script does not inspect them. + mapping = {} + archives = {} + input_bitcode_file_path_to_output_merged_bitcode_file_path_mapping = {} + output_merged_bitcode_file_path_to_input_bitcode_file_path_mapping = {} + for i in range(0, len(meta_lines), 9): + path = meta_lines[i] + output = meta_lines[i + 1] + merged_output = meta_lines[i + 2] + plan_output = meta_lines[i + 3] + idx = int(meta_lines[i + 4]) + archive_name = meta_lines[i + 5] + archive_plan = meta_lines[i + 6] + archive_index_dir = meta_lines[i + 7] + archive_merged_bitcode_dir = meta_lines[i + 8] + + archive_idx = idx if output == "" else None # archives do not have outputs + mapping[path] = { + "output": output, + "merged_output": merged_output, + "plan_output": plan_output, + "index": idx, + "archive_index": archive_idx, + "archive_name": archive_name, + } + if archive_idx is not None: + if premerger_enabled: + input_bitcode_file_path_to_output_merged_bitcode_file_path_mapping[ + path + ] = os.path.join( + archive_merged_bitcode_dir, path + merged_bitcode_suffix + ) + output_merged_bitcode_file_path_to_input_bitcode_file_path_mapping[ + os.path.join( + archive_merged_bitcode_dir, path + merged_bitcode_suffix + ) + ] = path + archives[idx] = { + "name": archive_name, + "objects": [], + "plan": archive_plan, + "index_dir": archive_index_dir, + "merged_bitcode_dir": archive_merged_bitcode_dir, + } + elif premerger_enabled: + input_bitcode_file_path_to_output_merged_bitcode_file_path_mapping[path] = ( + merged_output + ) + output_merged_bitcode_file_path_to_input_bitcode_file_path_mapping[ + merged_output + ] = path + + if premerger_enabled: + with tempfile.NamedTemporaryFile(mode="w+t") as premerger_output_mapping: + json.dump( + input_bitcode_file_path_to_output_merged_bitcode_file_path_mapping, + premerger_output_mapping, + ) + premerger_output_mapping.flush() + thin_link_args = args.index_args[1:] + thin_link_args.append( + f"-Wl,-mllvm,-premerger-output-map={premerger_output_mapping.name}" + ) + subprocess.check_call(thin_link_args) + else: + subprocess.check_call(args.index_args[1:]) + + # We read the `index`` and `index.full`` files produced by linker in index stage + # and translate them to 2 outputs: + # 1. A link plan build final_link args. (This one may be able to be removed if we refactor the workflow) + # 2. A files list (*.final_link_index) used for final link stage which includes all the + # files needed. it's based on index.full with some modification, like path updates + # and redundant(added by toolchain) dependencies removing. + index = {} + index_files_set = set() + loaded_input_bitcode_files = set() + absorbed_source_files = set() + with open(index_path("index")) as indexfile: + for line in indexfile: + line = line.strip() + index_files_set.add(line) + path = os.path.relpath(line, start=args.index) + loaded_input_bitcode_files.add(path) + index[mapping[path]["index"]] = 1 + + def _input_bitcode_file_path_is_loaded_by_linker(path): + return path in loaded_input_bitcode_files + + non_lto_objects = {} + for path, data in sorted(mapping.items(), key=lambda v: v[0]): + output_loc = data["output"] + if os.path.exists(output_loc): + continue + + if data["archive_index"] is not None: + archives[data["archive_index"]]["objects"].append(path) + continue + + bc_file = index_path(path) + bitcode_suffix + imports_path = index_path(path) + imports_suffix + os.makedirs(os.path.dirname(output_loc), exist_ok=True) + + if os.path.exists(imports_path): + assert os.path.exists(bc_file), "missing bc file for %s" % path + os.rename(bc_file, output_loc) + imports = read_imports(imports_path) + imports_list = [] + archives_list = [] + for path in imports: + entry = mapping[path] + if entry["archive_index"] is not None: + archives_list.append(int(entry["archive_index"])) + else: + imports_list.append(entry["index"]) + plan = { + "imports": imports_list, + "archive_imports": archives_list, + "index": data["index"], + "bitcode_file": bc_file, + "path": path, + "is_bc": True, + } + + if premerger_enabled: + merged_bitcode_path = ( + input_bitcode_file_path_to_output_merged_bitcode_file_path_mapping[ + path + ] + ) + assert os.path.exists( + merged_bitcode_path + ), f"missing merged bitcode file at {merged_bitcode_path}" + merge_state = read_merged_bitcode_file(merged_bitcode_path) + if merge_state == BitcodeMergeState.ABSORBED: + absorbed_source_files.add(path) + plan["merge_state"] = merge_state.value + else: + non_lto_objects[data["index"]] = 1 + with open(output_loc, "w"): + pass + plan = { + "is_bc": False, + } + + with open(data["plan_output"], "w") as planout: + json.dump(plan, planout, sort_keys=True) + + for archive in archives.values(): + # For archives, we must produce a plan that provides Starlark enough + # information about how to launch a dynamic opt for each object file + # in the archive. + archive_plan = {} + + # This is convenient to store, since it's difficult for Starlark to + # calculate it. + archive_plan["base_dir"] = os.path.dirname(archive["plan"]) + object_plans = [] + output_path = archive["index_dir"] + os.makedirs(output_path, exist_ok=True) + if premerger_enabled: + merged_bitcode_output_path = archive["merged_bitcode_dir"] + os.makedirs(merged_bitcode_output_path, exist_ok=True) + for obj in archive["objects"]: + imports_path = index_path(obj) + imports_suffix + if os.path.exists(imports_path): + bc_file = index_path(obj) + bitcode_suffix + index_path(obj) + merged_bitcode_suffix + os.rename(bc_file, os.path.join(output_path, os.path.basename(bc_file))) + + imports = read_imports(imports_path) + imports_list = [] + archives_list = [] + for path in imports: + entry = mapping[path] + if entry["archive_index"] is not None: + archives_list.append(int(entry["archive_index"])) + else: + imports_list.append(entry["index"]) + + object_plan = { + "is_bc": True, + "path": obj, + "imports": imports_list, + "archive_imports": archives_list, + "bitcode_file": os.path.join( + output_path, os.path.basename(bc_file) + ), + } + + if not _input_bitcode_file_path_is_loaded_by_linker(obj): + object_plan["not_loaded_by_linker"] = True + + if premerger_enabled: + merged_bc_file = input_bitcode_file_path_to_output_merged_bitcode_file_path_mapping[ + obj + ] + merge_state = read_merged_bitcode_file(merged_bc_file) + if merge_state == BitcodeMergeState.ABSORBED: + absorbed_source_files.add(obj) + object_plan["merge_state"] = merge_state.value + object_plan["merged_bitcode_path"] = merged_bc_file + + object_plans.append(object_plan) + else: + object_plans.append( + { + "is_bc": False, + "path": obj, + "merge_state": BitcodeMergeState.STANDALONE.value, + } + ) + + archive_plan["objects"] = object_plans + with open(archive["plan"], "w") as planout: + json.dump(archive_plan, planout, sort_keys=True) + + with open(args.link_plan, "w") as outfile: + json.dump( + { + "non_lto_objects": non_lto_objects, + "index": index, + }, + outfile, + indent=2, + sort_keys=True, + ) + + # Append all search path flags (e.g -Lfbcode/third-party-buck/platform010/build/glibc/lib) from argsfile to final_index + # this workaround is to make dist_lto compatible with link_group. see T136415235 for more info + argsfile = _get_argsfile(args) + lib_search_path = _extract_lib_search_path(argsfile) + + # build index file for final link use + with open(index_path("index.full")) as full_index_input, open( + args.final_link_index, "w" + ) as final_link_index_output: + final_link_index_output.write("\n".join(lib_search_path) + "\n") + for line in full_index_input: + line = line.strip() + path = os.path.relpath(line, start=args.index) + if path in absorbed_source_files: + continue + if line in index_files_set: + if mapping[path]["output"]: + # handle files that were not extracted from archives + output = mapping[path]["output"].replace( + bitcode_suffix, opt_objects_suffix + ) + final_link_index_output.write(output + "\n") + elif os.path.exists(index_path(path) + imports_suffix): + # handle files built from source that were extracted from archives + opt_objects_path = path.replace( + "/objects/", "/opt_objects/objects/" + ) + final_link_index_output.write(opt_objects_path + "\n") + else: + # handle pre-built archives + final_link_index_output.write(line + "\n") + else: + # handle input files that did not come from linker input, e.g. linkerscirpts + final_link_index_output.write(line + "\n") + + +sys.exit(main(sys.argv)) diff --git a/prelude/cxx/dist_lto/tools/dist_lto_planner_gnu.py b/prelude/cxx/dist_lto/tools/dist_lto_planner_gnu.py new file mode 100644 index 0000000000000..a83f90a809fe6 --- /dev/null +++ b/prelude/cxx/dist_lto/tools/dist_lto_planner_gnu.py @@ -0,0 +1,319 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +A simple wrapper around a distributed thinlto index command to fit into buck2's +distributed thinlto build. + +This reads in a couple of things: + 1. The "meta" file. This is a list of tuples of (object file, index output, + plan output). All items are line-separated (so each tuple is three lines). + 2. The index and link plan output paths + 3. The commands for the actual index command. + +It will invoke the index command and then copy the index outputs to the +requested locations and write a plan for each of those objects. This "plan" is +a simple json file with the most important thing being a list of the indices +of the imports needed for that file. + +It will then additionally write a link plan, which is just a translation of +the thinlto index (which lists the objects actually needed for the final link). + + +Both opt and link plans use indices to refer to other files because it allows the bzl +code to easily map back to other objects held in buck memory. +""" + +# pyre-unsafe + +import argparse +import json +import os +import os.path +import subprocess +import sys +from typing import List + + +def _get_argsfile(args) -> str: + # go through the flags passed to linker and find the index argsfile + argsfiles = list( + filter(lambda arg: arg.endswith("thinlto_index_argsfile"), args.index_args) + ) + assert ( + len(argsfiles) == 1 + ), f"expect only 1 argsfile but seeing multiple ones: {argsfiles}" + argsfile = argsfiles[0] + if argsfile.startswith("@"): + argsfile = argsfile[1:] + return argsfile + + +def _extract_lib_search_path(argsfile_path: str) -> List[str]: + lib_search_path = [] + with open(argsfile_path) as argsfile: + for line in argsfile: + if line.startswith("-L"): + lib_search_path.append(line.strip()) + return lib_search_path + + +def main(argv): + parser = argparse.ArgumentParser() + parser.add_argument("--meta") + parser.add_argument("--index") + parser.add_argument("--link-plan") + parser.add_argument("--final-link-index") + parser.add_argument("index_args", nargs=argparse.REMAINDER) + args = parser.parse_args(argv[1:]) + + subprocess.check_call(args.index_args[1:]) + + bitcode_suffix = ".thinlto.bc" + imports_suffix = ".imports" + opt_objects_suffix = ".opt.o" # please note the files are not exist yet, this is to generate the index file use in final link + + with open(args.meta) as meta: + meta_lines = [line.strip() for line in meta.readlines()] + + def read_imports(path, imports_path): + with open(imports_path) as infile: + return [line.strip() for line in infile.readlines()] + + def index_path(path): + return os.path.join(args.index, path) + + # The meta file comes directly from dist_lto.bzl and consists of a list of + # 7-tuples of information. It is easiest for us to write each tuple member + # as a separate line in Starlark, so these 7-tuples are encoded in groups + # of seven lines. + # + # The seven pieces of information are: + # 1. The path to the source bitcode file. This is used as an index into + # a dictionary (`mapping`) that records much of the metadata coming + # from these lines. + # 2. The path to an output bitcode file. This script is expected to place a + # ThinLTO index file at this location (suffixed `.thinlto.bc`). + # 3. The path to an output plan. This script is expected to place a link + # plan here (a JSON document indicating which other object files this) + # object file depends on, among other things. + # 4. The link data's index in the Starlark array. + # 5. If this object file came from an archive, the name of the archive. Otherwise, + # this line is empty. + # 6. If this object file came from an archive, the path to an output plan. + # This script is expected to produce an archive link plan here (a JSON) + # document similar to the object link plan, except containing link + # information for every file in the archive from which this object + # came. Otherwise, this line is empty. + # 7. If this object file came from an archive, the indexes directory of that + # archive. This script is expected to place all ThinLTO indexes derived + # from object files originating from this archive in that directory. + # Otherwise, this line is empty. + # + # There are two indices that are derived from this meta file: the object + # index (mapping["index"]) and the archive index (mapping["archive_index"]). + # These indices are indices into Starlark arrays for all objects and archive + # linkables, respectively. This script does not inspect them. + mapping = {} + archives = {} + for i in range(0, len(meta_lines), 7): + path = meta_lines[i] + output = meta_lines[i + 1] + plan_output = meta_lines[i + 2] + idx = int(meta_lines[i + 3]) + archive_name = meta_lines[i + 4] + archive_plan = meta_lines[i + 5] + archive_index_dir = meta_lines[i + 6] + + archive_idx = idx if output == "" else None # archives do not have outputs + mapping[path] = { + "output": output, + "plan_output": plan_output, + "index": idx, + "archive_index": archive_idx, + "archive_name": archive_name, + } + if archive_idx is not None: + archives[idx] = { + "name": archive_name, + "objects": [], + "plan": archive_plan, + "index_dir": archive_index_dir, + } + + non_lto_objects = {} + for path, data in sorted(mapping.items(), key=lambda v: v[0]): + output_loc = data["output"] + if os.path.exists(output_loc): + continue + + if data["archive_index"] is not None: + archives[data["archive_index"]]["objects"].append(path) + continue + + bc_file = index_path(path) + bitcode_suffix + imports_path = index_path(path) + imports_suffix + os.makedirs(os.path.dirname(output_loc), exist_ok=True) + + if os.path.exists(imports_path): + assert os.path.exists(bc_file), "missing bc file for %s" % path + os.rename(bc_file, output_loc) + imports = read_imports(path, imports_path) + imports_list = [] + archives_list = [] + for path in imports: + entry = mapping[path] + if entry["archive_index"] is not None: + archives_list.append(int(entry["archive_index"])) + else: + imports_list.append(entry["index"]) + plan = { + "imports": imports_list, + "archive_imports": archives_list, + "index": data["index"], + "bitcode_file": bc_file, + "path": path, + "is_bc": True, + } + else: + non_lto_objects[data["index"]] = 1 + with open(output_loc, "w"): + pass + plan = { + "is_bc": False, + } + + with open(data["plan_output"], "w") as planout: + json.dump(plan, planout, sort_keys=True) + + for archive in archives.values(): + # For archives, we must produce a plan that provides Starlark enough + # information about how to launch a dynamic opt for each object file + # in the archive. + archive_plan = {} + + # This is convenient to store, since it's difficult for Starlark to + # calculate it. + archive_plan["base_dir"] = os.path.dirname(archive["plan"]) + object_plans = [] + for obj in archive["objects"]: + imports_path = index_path(obj) + imports_suffix + output_path = archive["index_dir"] + os.makedirs(output_path, exist_ok=True) + if os.path.exists(imports_path): + bc_file = index_path(obj) + bitcode_suffix + os.rename(bc_file, os.path.join(output_path, os.path.basename(bc_file))) + imports = read_imports(path, imports_path) + + imports_list = [] + archives_list = [] + for path in imports: + entry = mapping[path] + if entry["archive_index"] is not None: + archives_list.append(int(entry["archive_index"])) + else: + imports_list.append(entry["index"]) + object_plans.append( + { + "is_bc": True, + "path": obj, + "imports": imports_list, + "archive_imports": archives_list, + "bitcode_file": os.path.join( + output_path, os.path.basename(bc_file) + ), + } + ) + else: + object_plans.append( + { + "is_bc": False, + "path": obj, + } + ) + + archive_plan["objects"] = object_plans + with open(archive["plan"], "w") as planout: + json.dump(archive_plan, planout, sort_keys=True) + + # We read the `index`` and `index.full`` files produced by linker in index stage + # and translate them to 2 outputs: + # 1. A link plan build final_link args. (This one may be able to be removed if we refactor the workflow) + # 2. A files list (*.final_link_index) used for final link stage which includes all the + # files needed. it's based on index.full with some modification, like path updates + # and redundant(added by toolchain) dependencies removing. + index = {} + index_files_set = set() + # TODO(T130322878): since we call linker wrapper twice (in index and in final_link), to avoid these libs get + # added twice we remove them from the index file for now. + KNOWN_REMOVABLE_DEPS_SUFFIX = [ + "glibc/lib/crt1.o", + "glibc/lib/crti.o", + "glibc/lib/Scrt1.o", + "crtbegin.o", + "crtbeginS.o", + ".build_info.o", + "crtend.o", + "crtendS.o", + "glibc/lib/crtn.o", + ] + with open(index_path("index")) as indexfile: + for line in indexfile: + line = line.strip() + index_files_set.add(line) + path = os.path.relpath(line, start=args.index) + index[mapping[path]["index"]] = 1 + + with open(args.link_plan, "w") as outfile: + json.dump( + { + "non_lto_objects": non_lto_objects, + "index": index, + }, + outfile, + indent=2, + sort_keys=True, + ) + + # Append all search path flags (e.g -Lfbcode/third-party-buck/platform010/build/glibc/lib) from argsfile to final_index + # this workaround is to make dist_lto compatible with link_group. see T136415235 for more info + argsfile = _get_argsfile(args) + lib_search_path = _extract_lib_search_path(argsfile) + + # build index file for final link use + with open(index_path("index.full")) as full_index_input, open( + args.final_link_index, "w" + ) as final_link_index_output: + final_link_index_output.write("\n".join(lib_search_path) + "\n") + for line in full_index_input: + line = line.strip() + if any(filter(line.endswith, KNOWN_REMOVABLE_DEPS_SUFFIX)): + continue + path = os.path.relpath(line, start=args.index) + if line in index_files_set: + if mapping[path]["output"]: + # handle files that were not extracted from archives + output = mapping[path]["output"].replace( + bitcode_suffix, opt_objects_suffix + ) + final_link_index_output.write(output + "\n") + elif os.path.exists(index_path(path) + imports_suffix): + # handle files built from source that were extracted from archives + opt_objects_path = path.replace( + "/objects/", "/opt_objects/objects/" + ) + final_link_index_output.write(opt_objects_path + "\n") + else: + # handle pre-built archives + final_link_index_output.write(line + "\n") + else: + # handle input files that did not come from linker input, e.g. linkerscirpts + final_link_index_output.write(line + "\n") + + +sys.exit(main(sys.argv)) diff --git a/prelude/cxx/dist_lto/tools/dist_lto_prepare.py b/prelude/cxx/dist_lto/tools/dist_lto_prepare.py index 69f7fce54c5ae..3fe9983963397 100644 --- a/prelude/cxx/dist_lto/tools/dist_lto_prepare.py +++ b/prelude/cxx/dist_lto/tools/dist_lto_prepare.py @@ -15,8 +15,10 @@ import enum import json import os +import shutil import subprocess import sys +import tempfile from typing import List, Tuple @@ -80,67 +82,58 @@ def main(argv: List[str]) -> int: # a long time, llvm-ar does not support --output and the change in llvm-ar # looks like it has stalled for years (https://reviews.llvm.org/D69418) # So, we need to invoke ar in the directory that we want it to extract into, and so - # need to adjust some paths. - ar_path = os.path.relpath(args.ar, start=objects_path) - archive_path = os.path.relpath(args.archive, start=objects_path) + # need absolute paths. + ar_path = os.path.abspath(args.ar) + archive_path = os.path.abspath(args.archive) output = subprocess.check_output( [ar_path, "t", archive_path], cwd=objects_path ).decode() member_list = [member for member in output.split("\n") if member] - # no duplicated filename + # This will extract all the members of the archive, including duplicates + # replacing existing duplicates. That is if first/foo.txt and second/foo.txt + # are placed in an archive in that order, this will leave second/foo.txt + # in the objects_path. output = subprocess.check_output( [ar_path, "xv", archive_path], cwd=objects_path ).decode() - for line in output.splitlines(): - assert line.startswith("x - ") - obj = line[4:] - known_objects.append(_gen_path(objects_path, obj)) # Count all members of the same name. counter = {} for member in member_list: counter.setdefault(member, 0) counter[member] += 1 - - for member, count in counter.items(): - if count <= 1: - continue - for current in range(1, count + 1): - if current == 1: - # just extract the file - output = subprocess.check_output( - [ar_path, "xN", str(current), archive_path, member], - cwd=objects_path, - ).decode() - assert not output - # We've already added this above. - else: - # llvm doesn't allow --output so we need this clumsiness - tmp_filename = "tmp" - current_file = _gen_filename(member, current) - # rename current 'member' file to tmp - output = subprocess.check_output( - ["mv", member, tmp_filename], cwd=objects_path - ).decode() - assert not output + # Insert all objects at most once into the list of known objects + if counter[member] == 1: + known_objects.append(_gen_path(objects_path, member)) + + with tempfile.TemporaryDirectory() as temp_dir: + # For each duplicate member, rename and extract duplicates 1 through N + # inclusive. While N was already extracted above, we don't want to rely + # upon this implementation detail of llvm-ar. + for member, count in counter.items(): + if count <= 1: + continue + for current in range(1, count + 1): # extract the file from archive output = subprocess.check_output( - [ar_path, "xN", str(current), archive_path, member], - cwd=objects_path, - ).decode() - assert not output - # rename the newly extracted file - output = subprocess.check_output( - ["mv", member, current_file], cwd=objects_path - ).decode() - assert not output - # rename the tmp file back to 'member' - output = subprocess.check_output( - ["mv", tmp_filename, member], cwd=objects_path + [ + ar_path, + "xN", + str(current), + archive_path, + member, + ], + cwd=temp_dir, ).decode() - assert not output - known_objects.append(_gen_path(objects_path, current_file)) + unique_name = _gen_filename(member, current) + # rename and move the newly extracted file to objects_path + shutil.move( + os.path.join(temp_dir, member), + os.path.join(os.path.abspath(objects_path), unique_name), + ) + if current > 1: + known_objects.append(_gen_path(objects_path, unique_name)) elif file_type == ArchiveKind.THIN_ARCHIVE: output = subprocess.check_output([args.ar, "t", args.archive]).decode() diff --git a/prelude/cxx/dist_lto/tools/tests/test_dist_lto_opt.py b/prelude/cxx/dist_lto/tools/tests/test_dist_lto_opt.py index 83cda1e6d94e1..b83bd40bd90d7 100644 --- a/prelude/cxx/dist_lto/tools/tests/test_dist_lto_opt.py +++ b/prelude/cxx/dist_lto/tools/tests/test_dist_lto_opt.py @@ -8,7 +8,7 @@ import unittest -from cxx.dist_lto.tools.dist_lto_opt import _filter_flags +from cxx.dist_lto.tools.dist_lto_opt_gnu import _filter_flags class TestDistLtoOpt(unittest.TestCase): @@ -30,7 +30,6 @@ def test_filter_flags(self): flags, [ "-O2", - "-fexperimental-new-pass-manager", "-ffunction-sections", "-fdata-sections", "-mllvm", @@ -52,16 +51,13 @@ def test_filter_flags_hhvm_case_rev_0f8618f31(self): "--target=x86_64-redhat-linux-gnu", "-nostdinc", "-resource-dir", - "fbcode/third-party-buck/platform010/build/llvm-fb/12/lib/clang/stable", "-idirafter", - "fbcode/third-party-buck/platform010/build/llvm-fb/12/lib/clang/stable/include", "-idirafter", "fbcode/third-party-buck/platform010/build/glibc/include", "-idirafter", "fbcode/third-party-buck/platform010/build/kernel-headers/include", "-Bfbcode/third-party-buck/platform010/build/binutils/x86_64-facebook-linux/bin", "--cflag=--target=x86_64-redhat-linux-gnu", - "--ar=fbcode/third-party-buck/platform010/build/llvm-fb/12/bin/llvm-ar", "-Bfbcode/third-party-buck/platform010/build/glibc/lib", "-Bfbcode/third-party-buck/platform010/tools/gcc/lib/gcc/x86_64-redhat-linux-gnu/trunk", "-Lfbcode/third-party-buck/platform010/build/libgcc/lib/gcc/x86_64-facebook-linux/trunk", @@ -69,7 +65,6 @@ def test_filter_flags_hhvm_case_rev_0f8618f31(self): "-Wl,--dynamic-linker,/usr/local/fbcode/platform010/lib/ld.so", "-Wl,--disable-new-dtags", "-Bfbcode/third-party-buck/platform010/build/binutils/x86_64-facebook-linux/bin", - "-Bbuck-out/v2/gen/fbcode/8e3db19fe005003a/third-party-buck/platform010/build/llvm-fb/12/__lld_path__/lld_path/bin", "-Wl,--no-mmap-output-file", "-nodefaultlibs", "--target=x86_64-redhat-linux-gnu", @@ -92,7 +87,6 @@ def test_filter_flags_hhvm_case_rev_0f8618f31(self): "-Wl,-mllvm,-hot-callsite-threshold=12000", "-Wl,--lto-whole-program-visibility", "-fwhole-program-vtables", - "-fexperimental-new-pass-manager", "-Wl,--no-discard-section=.nv_fatbin", "-Wl,--no-discard-section=.nvFatBinSegment", "fbcode/tools/build/move_gpu_sections_implicit_linker_script.txt", @@ -143,7 +137,6 @@ def test_filter_flags_hhvm_case_rev_0f8618f31(self): flags, [ "-O2", - "-fexperimental-new-pass-manager", "-ffunction-sections", "-fdata-sections", "-mllvm", @@ -159,10 +152,7 @@ def test_filter_flags_hhvm_case_rev_0f8618f31(self): def test_filter_flags_unicorn_case_rev_0f8618f31(self): inputs = [ - "--ld=fbcode/third-party-buck/platform010/build/llvm-fb/12/bin/clang++", - "--cc=buck-out/v2/gen/fbcode/8e3db19fe005003a/tools/build/buck/wrappers/__fbcc__/fbcc --cc=fbcode/third-party-buck/platform010/build/llvm-fb/12/bin/clang --target=x86_64-redhat-linux-gnu -nostdinc -resource-dir fbcode/third-party-buck/platform010/build/llvm-fb/12/lib/clang/stable -idirafter fbcode/third-party-buck/platform010/build/llvm-fb/12/lib/clang/stable/include -idirafter fbcode/third-party-buck/platform010/build/glibc/include -idirafter fbcode/third-party-buck/platform010/build/kernel-headers/include -Bfbcode/third-party-buck/platform010/build/binutils/x86_64-facebook-linux/bin", "--cflag=--target=x86_64-redhat-linux-gnu", - "--ar=fbcode/third-party-buck/platform010/build/llvm-fb/12/bin/llvm-ar", "-Bfbcode/third-party-buck/platform010/build/glibc/lib", "-Bfbcode/third-party-buck/platform010/tools/gcc/lib/gcc/x86_64-redhat-linux-gnu/trunk", "-Lfbcode/third-party-buck/platform010/build/libgcc/lib/gcc/x86_64-facebook-linux/trunk", @@ -170,7 +160,6 @@ def test_filter_flags_unicorn_case_rev_0f8618f31(self): "-Wl,--dynamic-linker,/usr/local/fbcode/platform010/lib/ld.so", "-Wl,--disable-new-dtags", "-Bfbcode/third-party-buck/platform010/build/binutils/x86_64-facebook-linux/bin", - "-Bbuck-out/v2/gen/fbcode/8e3db19fe005003a/third-party-buck/platform010/build/llvm-fb/12/__lld_path__/lld_path/bin", "-Wl,--no-mmap-output-file", "-nodefaultlibs", "--target=x86_64-redhat-linux-gnu", @@ -190,7 +179,6 @@ def test_filter_flags_unicorn_case_rev_0f8618f31(self): "-Wl,--discard-section=.rela.debug_types", "-Wl,-O1", "-Wl,--build-id=sha1", - "-fexperimental-new-pass-manager", "-Xlinker", "-znow", "-Xlinker", @@ -262,7 +250,6 @@ def test_filter_flags_unicorn_case_rev_0f8618f31(self): flags, [ "-O2", - "-fexperimental-new-pass-manager", "-ffunction-sections", "-fdata-sections", "-fprofile-sample-use=buck-out/v2/gen/fbcode/40fc99293b37c503/fdo/autofdo/default_profile/__autofdo__/out/profile", diff --git a/prelude/cxx/dwp.bzl b/prelude/cxx/dwp.bzl index c3cafa7ba8953..76e6e3c4e5d15 100644 --- a/prelude/cxx/dwp.bzl +++ b/prelude/cxx/dwp.bzl @@ -24,17 +24,17 @@ def run_dwp_action( referenced_objects: [ArgLike, list[Artifact]], dwp_output: Artifact, local_only: bool): - args = cmd_args() dwp = toolchain.binary_utilities_info.dwp - # llvm trunk now supports 64-bit debug cu indedx, add --continue-on-cu-index-overflow by default - # to suppress dwp file overflow warning - args.add("/bin/sh", "-c", '"$1" --continue-on-cu-index-overflow -o "$2" -e "$3" && touch "$2"', "") - args.add(dwp, dwp_output.as_output(), obj) - - # All object/dwo files referenced in the library/executable are implicitly - # processed by dwp. - args.hidden(referenced_objects) + args = cmd_args( + # llvm trunk now supports 64-bit debug cu indedx, add --continue-on-cu-index-overflow by default + # to suppress dwp file overflow warning + ["/bin/sh", "-c", '"$1" --continue-on-cu-index-overflow -o "$2" -e "$3" && touch "$2"', ""] + + [dwp, dwp_output.as_output(), obj], + # All object/dwo files referenced in the library/executable are implicitly + # processed by dwp. + hidden = referenced_objects, + ) category = "dwp" if category_suffix != None: diff --git a/prelude/cxx/gcno.bzl b/prelude/cxx/gcno.bzl new file mode 100644 index 0000000000000..cfb9d85b0710d --- /dev/null +++ b/prelude/cxx/gcno.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Provider that exposes the .gcno files produced during compilation +GcnoFilesInfo = provider(fields = { + "gcno_files": provider_field(list[Artifact]), +}) diff --git a/prelude/cxx/groups.bzl b/prelude/cxx/groups.bzl index 65880422f78df..62a027050de89 100644 --- a/prelude/cxx/groups.bzl +++ b/prelude/cxx/groups.bzl @@ -5,10 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load( - "@prelude//linking:link_info.bzl", - "Linkage", -) +load("@prelude//cxx:groups_types.bzl", "Traversal") +load("@prelude//linking:types.bzl", "Linkage") load( "@prelude//utils:build_target_pattern.bzl", "BuildTargetPattern", @@ -16,7 +14,7 @@ load( ) load( "@prelude//utils:graph_utils.bzl", - "breadth_first_traversal_by", + "depth_first_traversal_by", ) load( "@prelude//utils:strings.bzl", @@ -24,53 +22,18 @@ load( ) load( "@prelude//utils:utils.bzl", - "map_val", "value_or", ) - -# Types of group traversal -Traversal = enum( - # Includes the target and all of it's transitive dependencies in the group. - "tree", - # Includes only the target in the group. - "node", -) - -# Optional type of filtering -FilterType = enum( - # Filters for targets with labels matching the regex pattern defined after `label:`. - "label", - # Filters for targets for the build target pattern defined after "pattern:". - "pattern", -) - -BuildTargetFilter = record( - pattern = field(BuildTargetPattern), - _type = field(FilterType, FilterType("pattern")), -) - -LabelFilter = record( - regex = field("regex"), - _type = field(FilterType, FilterType("label")), -) - -# Label for special group mapping which makes every target associated with it to be included in all groups -MATCH_ALL_LABEL = "MATCH_ALL" - -# Label for special group mapping which makes every target associated with it to be linked directly -# against the final binary -NO_MATCH_LABEL = "NO_MATCH" - -# Representation of a parsed group mapping -GroupMapping = record( - # The root to apply this mapping to. - root = field([Label, None], None), - # The type of traversal to use. - traversal = field(Traversal, Traversal("tree")), - # Optional filter type to apply to the traversal. - filters = field(list[[BuildTargetFilter, LabelFilter]], []), - # Preferred linkage for this target when added to a link group. - preferred_linkage = field([Linkage, None], None), +load( + ":groups_types.bzl", + "BuildTargetFilter", + "FilterType", + "Group", + "GroupAttrs", + "GroupDefinition", + "GroupMapping", + "LabelFilter", + "TargetRegexFilter", ) _VALID_ATTRS = [ @@ -79,45 +42,49 @@ _VALID_ATTRS = [ "exported_linker_flags", "discard_group", "linker_flags", + "requires_root_node_exists", + "prohibit_file_duplicates", + "prefer_optimized_experimental", ] -# Representation of group attributes -GroupAttrs = record( - # Use distributed thinlto to build the link group shared library. - enable_distributed_thinlto = field(bool, False), - # Enable this link group if the binary's node count exceeds the given threshold - enable_if_node_count_exceeds = field([int, None], None), - # Discard all dependencies in the link group, useful for dropping unused dependencies - # from the build graph. - discard_group = field(bool, False), - # Adds additional linker flags used to link the link group shared object. - linker_flags = field(list, []), - # Adds additional linker flags to apply to dependents that link against the - # link group's shared object. - exported_linker_flags = field(list, []), -) - -# Representation of a parsed group -Group = record( - # The name for this group. - name = str, - # The mappings that are part of this group. - mappings = list[GroupMapping], - attrs = GroupAttrs, -) +# Traversal types in this list will only assign the node +# to a target (as opposed to the transitive deps of the node's tree). +_TRAVERSALS_TO_ASSIGN_NODE = [ + Traversal("node"), + Traversal("subfolders"), + # TODO (dust): Possible perf optimization: + # When intersecting configured targets, it's not possible to intersect + # a parent without also intersecting it's children. + # + # As a result, there's a possible perf optimization to assign 'tree' + # to intersected targets instead, and leverage that to avoid traversing + # the entire tree of every root. + # + # For example: + # If iterating the tree of 'root2' we find a node which + # was also present in 'root1', we can skip traversing the subtree + # because it's evitable that everything is going to match there too. + Traversal("intersect_any_roots"), +] # Creates a group from an existing group, overwriting any properties provided def create_group( group: Group, name: [None, str] = None, mappings: [None, list[GroupMapping]] = None, - attrs: [None, GroupAttrs] = None): + attrs: [None, GroupAttrs] = None, + definition_type: [None, GroupDefinition] = None): return Group( name = value_or(name, group.name), mappings = value_or(mappings, group.mappings), attrs = value_or(attrs, group.attrs), + definition_type = value_or(definition_type, group.definition_type), ) +def get_roots_from_mapping(mapping): + deps = mapping[0] if type(mapping[0]) == "list" else [mapping[0]] + return filter(None, deps) + def parse_groups_definitions( map: list, # Function to parse a root label from the input type, allowing different @@ -138,20 +105,34 @@ def parse_groups_definitions( exported_linker_flags = attrs.get("exported_linker_flags", []), discard_group = attrs.get("discard_group", False), linker_flags = attrs.get("linker_flags", []), + requires_root_node_exists = attrs.get("requires_root_node_exists", True), + prohibit_file_duplicates = attrs.get("prohibit_file_duplicates", False), + prefer_optimized_experimental = attrs.get("prefer_optimized_experimental", False), ) parsed_mappings = [] for entry in mappings: traversal = _parse_traversal_from_mapping(entry[1]) mapping = GroupMapping( - root = map_val(parse_root, entry[0]), + roots = filter(None, [parse_root(root) for root in get_roots_from_mapping(entry)]), traversal = traversal, filters = _parse_filter_from_mapping(entry[2]), preferred_linkage = Linkage(entry[3]) if len(entry) > 3 and entry[3] else None, ) + num_roots = len(mapping.roots) if mapping.roots else 0 + if num_roots > 1 and mapping.traversal != Traversal("intersect_any_roots"): + fail("Invariant. A link_group mapping with traversal type: {} can only have 1 root node. {} found.".format(mapping.traversal, mapping.roots)) + elif mapping.traversal == Traversal("intersect_any_roots") and num_roots < 2: + fail("Invariant. A link_group mapping with traversal type 'intersect' must have at least 2 root nodes. {} found.".format(mapping.roots)) + parsed_mappings.append(mapping) - group = Group(name = name, mappings = parsed_mappings, attrs = group_attrs) + group = Group( + name = name, + mappings = parsed_mappings, + attrs = group_attrs, + definition_type = GroupDefinition("explicit"), + ) groups.append(group) return groups @@ -161,10 +142,14 @@ def _parse_traversal_from_mapping(entry: str) -> Traversal: return Traversal("tree") elif entry == "node": return Traversal("node") + elif entry == "subfolders": + return Traversal("subfolders") + elif entry == "intersect_any_roots": + return Traversal("intersect_any_roots") else: fail("Unrecognized group traversal type: " + entry) -def _parse_filter(entry: str) -> [BuildTargetFilter, LabelFilter]: +def _parse_filter(entry: str) -> [BuildTargetFilter, LabelFilter, TargetRegexFilter]: for prefix in ("label:", "tag:"): label_regex = strip_prefix(prefix, entry) if label_regex != None: @@ -172,53 +157,87 @@ def _parse_filter(entry: str) -> [BuildTargetFilter, LabelFilter]: # anywhere in the text, while we want full text match for group label # text. return LabelFilter( - regex = experimental_regex("^{}$".format(label_regex)), + # TODO(nga): fancy is probably not needed here. + regex = regex("^{}$".format(label_regex), fancy = True), ) + target_regex = strip_prefix("target_regex:", entry) + if target_regex != None: + return TargetRegexFilter(regex = regex("^{}$".format(target_regex), fancy = True)) + pattern = strip_prefix("pattern:", entry) if pattern != None: return BuildTargetFilter( pattern = parse_build_target_pattern(pattern), ) - fail("Invalid group mapping filter: {}\nFilter must begin with `label:`, `tag:`, or `pattern:`.".format(entry)) + fail("Invalid group mapping filter: {}\nFilter must begin with `label:`, `tag:`, `target_regex` or `pattern:`.".format(entry)) -def _parse_filter_from_mapping(entry: [list[str], str, None]) -> list[[BuildTargetFilter, LabelFilter]]: +def _parse_filter_from_mapping(entry: [list[str], str, None]) -> list[[BuildTargetFilter, LabelFilter, TargetRegexFilter]]: if type(entry) == type([]): return [_parse_filter(e) for e in entry] if type(entry) == type(""): return [_parse_filter(entry)] return [] -def compute_mappings(groups: list[Group], graph_map: dict[Label, typing.Any]) -> dict[Label, str]: +def compute_mappings(groups_map: dict[str, Group], graph_map: dict[Label, typing.Any]) -> dict[Label, str]: """ - Returns the group mappings {target label -> group name} based on the provided groups and graph. + Returns the group mappings {target label -> group name} based on the provided groups_map and graph. """ - if not groups: + if not groups_map: return {} target_to_group_map = {} node_traversed_targets = {} - for group in groups: + for group in groups_map.values(): for mapping in group.mappings: targets_in_mapping = _find_targets_in_mapping(graph_map, mapping) for target in targets_in_mapping: # If the target doesn't exist in our graph, skip the mapping. if target not in graph_map: continue - _update_target_to_group_mapping(graph_map, target_to_group_map, node_traversed_targets, group.name, mapping, target) + _update_target_to_group_mapping(graph_map, target_to_group_map, node_traversed_targets, group, groups_map, mapping, target) return target_to_group_map +def get_dedupped_roots_from_groups(groups: list[Group]) -> list[Label]: + roots = {} + for group in groups: + for mapping in group.mappings: + if not mapping.roots: + continue + + for root in mapping.roots: + roots[root] = True + + return list(roots.keys()) + def _find_targets_in_mapping( graph_map: dict[Label, typing.Any], mapping: GroupMapping) -> list[Label]: # If we have no filtering, we don't need to do any traversal to find targets to include. if not mapping.filters: - if mapping.root == None: - fail("no filter or explicit root given: {}", mapping) - return [mapping.root] + if not mapping.roots: + # Some link groups may want to partially define their mapping roots based on constraint + # that potentially can be resolved to `None`. + # + # E.g: + # ``` + # ("evict-mkl", [ + # (":mkl_ilp64_omp", "node", None, "shared"), + # (select( + # {"DEFAULT": None, "ovr_config//runtime:platform010": "//IntelComposerXE:mkl_ilp64_omp" }), + # "node", None, "shared" + # ), + # ]) + # ``` + # Second mapping will be resolved to `(None, "node", None, "shared")` and will not handle anything. + # There is no convenient way to gracefully handle that in user-facing link groups API. + return [] + + elif mapping.traversal != Traversal("intersect_any_roots"): + return mapping.roots # Else find all dependencies that match the filter. matching_targets = {} @@ -238,11 +257,14 @@ def _find_targets_in_mapping( if filter._type == FilterType("label"): if not any_labels_match(filter.regex, labels): return False + elif filter._type == FilterType("target_regex"): + target_str = str(target.raw_target()) + return filter.regex.match(target_str) elif not filter.pattern.matches(target): return False return True - def find_matching_targets(node): # Label -> [Label]: + def populate_matching_targets(node): # Label -> bool: graph_node = graph_map[node] if matches_target(node, graph_node.labels): matching_targets[node] = None @@ -250,48 +272,171 @@ def _find_targets_in_mapping( # We can stop traversing the tree at this point because we've added the # build target to the list of all targets that will be traversed by the # algorithm that applies the groups. - return [] - return graph_node.deps + graph_node.exported_deps + return False + return True - if mapping.root == None: + def populate_matching_targets_bfs_wrapper(node): # (Label) -> list + if populate_matching_targets(node): + graph_node = graph_map[node] + return graph_node.deps + graph_node.exported_deps + return [] + + if not mapping.roots: for node in graph_map: - find_matching_targets(node) + populate_matching_targets(node) + elif mapping.traversal == Traversal("intersect_any_roots"): + targets_to_counter = {} + for root in mapping.roots: + # This is a captured variable inside `populate_matching_targets`. + # We reset it for each root we visit so that we don't have results + # from other roots. + matching_targets = {} + depth_first_traversal_by(graph_map, [root], populate_matching_targets_bfs_wrapper) + for t in matching_targets: + targets_to_counter[t] = targets_to_counter.get(t, 0) + 1 + + return [ + t + for t, count in targets_to_counter.items() + if count > 1 + ] else: - breadth_first_traversal_by(graph_map, [mapping.root], find_matching_targets) + depth_first_traversal_by(graph_map, mapping.roots, populate_matching_targets_bfs_wrapper) return matching_targets.keys() +# Extracted from `_update_target_to_group_mapping` to avoid function allocations inside the loop +def _assign_target_to_group( + target_to_group_map, #: {"label": str} + node_traversed_targets, #: {"label": None} + group, # Group, + groups_map, # {str: Group} + mapping, # GroupMapping + target, # Label + node_traversal): # bool + # If the target hasn't already been assigned to a group, assign it to the + # first group claiming the target. Return whether the target was already assigned. + if target not in target_to_group_map: + if mapping.traversal == Traversal("subfolders"): + generated_group_name = _generate_group_subfolder_name(group.name, target.package) + _add_to_implicit_link_group(generated_group_name, group, groups_map, target_to_group_map, target) + else: + target_to_group_map[target] = group.name + + if node_traversal: + node_traversed_targets[target] = None + return False + else: + return True + +# Extracted from `_update_target_to_group_mapping` to avoid function allocations inside the loop +def _transitively_add_targets_to_group_mapping( + assign_target_to_group, # (Label, bool) -> bool + node_traversed_targets, #: {"label": None} + graph_map, # {"label": "_b"} + node): # ([Label]) -> None + previously_processed = assign_target_to_group(node, False) + + # If the node has been previously processed, and it was via tree (not node), all child nodes have been assigned + if previously_processed and node not in node_traversed_targets: + return None + graph_node = graph_map[node] + return graph_node.deps + graph_node.exported_deps + # Types removed to avoid unnecessary type checking which degrades performance. def _update_target_to_group_mapping( graph_map, # {"label": "_b"} target_to_group_map, #: {"label": str} node_traversed_targets, #: {"label": None} - group, # str, + group, # Group, + groups_map, # {str: Group} mapping, # GroupMapping target): # Label - def assign_target_to_group( - target: Label, - node_traversal: bool) -> bool: - # If the target hasn't already been assigned to a group, assign it to the - # first group claiming the target. Return whether the target was already assigned. - if target not in target_to_group_map: - target_to_group_map[target] = group - if node_traversal: - node_traversed_targets[target] = None - return False - else: - return True - - def transitively_add_targets_to_group_mapping(node: Label) -> list[Label]: - previously_processed = assign_target_to_group(target = node, node_traversal = False) - - # If the node has been previously processed, and it was via tree (not node), all child nodes have been assigned - if previously_processed and node not in node_traversed_targets: - return [] - graph_node = graph_map[node] - return graph_node.deps + graph_node.exported_deps + assign_target_to_group = partial(_assign_target_to_group, target_to_group_map, node_traversed_targets, group, groups_map, mapping) # (Label, bool) -> bool + transitively_add_targets_to_group_mapping = partial(_transitively_add_targets_to_group_mapping, assign_target_to_group, node_traversed_targets, graph_map) # (Label) -> list[Label] - if mapping.traversal == Traversal("node"): - assign_target_to_group(target = target, node_traversal = True) + if mapping.traversal in _TRAVERSALS_TO_ASSIGN_NODE: + assign_target_to_group(target, True) else: # tree - breadth_first_traversal_by(graph_map, [target], transitively_add_targets_to_group_mapping) + depth_first_traversal_by(graph_map, [target], transitively_add_targets_to_group_mapping) + +def _add_to_implicit_link_group( + generated_group_name, # str + group, # Group + groups_map, # {str: Group} + target_to_group_map, # {Label: str} + target): # Label + target_to_group_map[target] = generated_group_name + if generated_group_name not in groups_map: + groups_map[generated_group_name] = create_group( + group = group, + name = generated_group_name, + definition_type = GroupDefinition("implicit"), + ) + elif groups_map[generated_group_name].definition_type == GroupDefinition("explicit"): + hashed_group_name = _hash_group_name(group.name, generated_group_name) + _add_to_implicit_link_group(hashed_group_name, group, groups_map, target_to_group_map, target) + +def _generate_group_subfolder_name( + group, # str, + package): # str + """ Dynamically generating link group name for "subfolders" traversal.""" + name = group + "_" + package.replace("/", "_") + + if len(name) > 246: + # Maximum filename size in unix is 255. + # We prefix all libraries with "lib" (3 symbols) and suffix with ".dylib" (6 symbols) or ".so" (3 symbols). + # Assuming ".dylib" suffix cause it's the longest, we can allow (255 - 3 - 6) = 246 symbols for the rest of the name. + name = _hash_group_name(group, name) + return name + +def _hash_group_name(prefix: str, name: str) -> str: + """ + Creates new name via simple hashing. + Hash algorithm is stable in starlark: https://fburl.com/code/ptegkov6 + """ + return "{}_{}".format(prefix, str(hash(name))) + +def _make_json_info_for_build_target_pattern(build_target_pattern: BuildTargetPattern) -> dict[str, typing.Any]: + # `BuildTargetPattern` contains lambdas which are not serializable, so + # have to generate the JSON representation + return { + "cell": build_target_pattern.cell, + "kind": build_target_pattern.kind, + "name": build_target_pattern.name, + "path": build_target_pattern.path, + } + +def _make_json_info_for_group_mapping_filters(filters: list[[BuildTargetFilter, LabelFilter]]) -> list[dict[str, typing.Any]]: + json_filters = [] + for filter in filters: + if filter._type == FilterType("label"): + json_filters += [{"regex": str(filter.regex)}] + elif filter._type == FilterType("pattern"): + json_filters += [_make_json_info_for_build_target_pattern(filter.pattern)] + else: + fail("Unknown filter type: " + filter) + return json_filters + +def _make_json_info_for_group_mapping(group_mapping: GroupMapping) -> dict[str, typing.Any]: + return { + "filters": _make_json_info_for_group_mapping_filters(group_mapping.filters), + "preferred_linkage": group_mapping.preferred_linkage, + "roots": group_mapping.roots, + "traversal": group_mapping.traversal, + } + +def _make_json_info_for_group(group: Group) -> dict[str, typing.Any]: + return { + "attrs": group.attrs, + "mappings": [_make_json_info_for_group_mapping(mapping) for mapping in group.mappings], + "name": group.name, + } + +def make_info_subtarget_providers(ctx: AnalysisContext, groups: list[Group], mappings: dict[Label, str]) -> list[Provider]: + info_json = { + "groups": {group.name: _make_json_info_for_group(group) for group in groups}, + "mappings": mappings, + } + json_output = ctx.actions.write_json("link_group_map_info.json", info_json, pretty = True) + return [DefaultInfo(default_output = json_output)] diff --git a/prelude/cxx/groups_types.bzl b/prelude/cxx/groups_types.bzl new file mode 100644 index 0000000000000..f5700966882e5 --- /dev/null +++ b/prelude/cxx/groups_types.bzl @@ -0,0 +1,113 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//linking:types.bzl", "Linkage") +load( + "@prelude//utils:build_target_pattern.bzl", + "BuildTargetPattern", +) + +# Label for special group mapping which makes every target associated with it to be included in all groups +MATCH_ALL_LABEL = "MATCH_ALL" + +# Label for special group mapping which makes every target associated with it to be linked directly +# against the final binary +NO_MATCH_LABEL = "NO_MATCH" + +Traversal = enum( + # Includes the target and all of it's transitive dependencies in the group. + "tree", + # Includes only the target in the group. + "node", + # Uses pattern and separates all targets by full folder path. + "subfolders", + # Includes targets found in the transitive deps of *any* roots. + # Filters for these mappings will be applied to the intersected deps. + "intersect_any_roots", +) + +# Optional type of filtering +FilterType = enum( + # Filters for targets with labels matching the regex pattern defined after `label:`. + "label", + # Filters for targets for the build target pattern defined after "pattern:". + "pattern", + # Filters for targets matching the regex pattern defined after "target_regex:". + "target_regex", +) + +BuildTargetFilter = record( + pattern = field(BuildTargetPattern), + _type = field(FilterType, FilterType("pattern")), +) + +LabelFilter = record( + regex = regex, + _type = field(FilterType, FilterType("label")), +) + +TargetRegexFilter = record( + regex = regex, + _type = field(FilterType, FilterType("target_regex")), +) + +# Representation of a parsed group mapping +GroupMapping = record( + # The root to apply this mapping to. + roots = field(list[Label], []), + # The type of traversal to use. + traversal = field(Traversal, Traversal("tree")), + # Optional filter type to apply to the traversal. + filters = field(list[[BuildTargetFilter, LabelFilter, TargetRegexFilter]], []), + # Preferred linkage for this target when added to a link group. + preferred_linkage = field([Linkage, None], None), +) + +# Representation of group attributes +GroupAttrs = record( + # Use distributed thinlto to build the link group shared library. + enable_distributed_thinlto = field(bool, False), + # Enable this link group if the binary's node count exceeds the given threshold + enable_if_node_count_exceeds = field([int, None], None), + # Discard all dependencies in the link group, useful for dropping unused dependencies + # from the build graph. + discard_group = field(bool, False), + # Adds additional linker flags used to link the link group shared object. + linker_flags = field(list, []), + # Adds additional linker flags to apply to dependents that link against the + # link group's shared object. + exported_linker_flags = field(list, []), + # Requires root nodes in specs to always exist in dependency graph. + # Otherwise fails. + requires_root_node_exists = field(bool, True), + # For certain wide-scale generic link groups we want to enable + # initial duplicate analysis. This is useful for detecting dduplicated symbols problem early + # for automatoc link groups that we not aware about (e.g. evicting whole root package folder into link group) + prohibit_file_duplicates = field(bool, False), + # Uses optimized compilation outputs if available. + prefer_optimized_experimental = field(bool, False), +) + +# Types of group traversal +GroupDefinition = enum( + # Group is explicitly defined in mapping provided by user. + # That is the default behavior. + "explicit", + # Group is implicitly created during mapping computations. + # For example, group can be created for "subfolders" traversal. + "implicit", +) + +# Representation of a parsed group +Group = record( + # The name for this group. + name = str, + # The mappings that are part of this group. + mappings = list[GroupMapping], + attrs = GroupAttrs, + definition_type = field(GroupDefinition, GroupDefinition("explicit")), +) diff --git a/prelude/cxx/headers.bzl b/prelude/cxx/headers.bzl index a5a7efedb0d3a..c78c16ecb2702 100644 --- a/prelude/cxx/headers.bzl +++ b/prelude/cxx/headers.bzl @@ -6,7 +6,11 @@ # of this source tree. load("@prelude//:paths.bzl", "paths") -load("@prelude//utils:utils.bzl", "expect", "from_named_set", "is_any", "map_val", "value_or") +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerType") +load("@prelude//cxx:cxx_utility.bzl", "cxx_attrs_get_allow_cache_upload") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:lazy.bzl", "lazy") +load("@prelude//utils:utils.bzl", "from_named_set", "map_val", "value_or") load(":cxx_context.bzl", "get_cxx_toolchain_info") load(":platform.bzl", "cxx_by_platform") @@ -64,7 +68,7 @@ HeaderStyle = enum( Headers = record( include_path = field(cmd_args), # NOTE(agallagher): Used for module hack replacement. - symlink_tree = field([Artifact, None], None), + symlink_tree = field(Artifact | None, None), # args that map symlinked private headers to source path file_prefix_args = field([cmd_args, None], None), ) @@ -108,15 +112,16 @@ CPrecompiledHeaderInfo = provider(fields = { def cxx_attr_header_namespace(ctx: AnalysisContext) -> str: return value_or(ctx.attrs.header_namespace, ctx.label.package) -def cxx_attr_exported_headers(ctx: AnalysisContext, headers_layout: CxxHeadersLayout) -> list[CHeader]: - headers = _get_attr_headers(ctx.attrs.exported_headers, headers_layout.namespace, headers_layout.naming) - platform_headers = _get_attr_headers(_headers_by_platform(ctx, ctx.attrs.exported_platform_headers), headers_layout.namespace, headers_layout.naming) +def cxx_attr_headers_list(ctx: AnalysisContext, headers: typing.Any, platform_headers: typing.Any, headers_layout: CxxHeadersLayout) -> list[CHeader]: + headers = _get_attr_headers(headers, headers_layout.namespace, headers_layout.naming) + platform_headers = _get_attr_headers(_headers_by_platform(ctx, platform_headers), headers_layout.namespace, headers_layout.naming) return headers + platform_headers +def cxx_attr_exported_headers(ctx: AnalysisContext, headers_layout: CxxHeadersLayout) -> list[CHeader]: + return cxx_attr_headers_list(ctx, ctx.attrs.exported_headers, ctx.attrs.exported_platform_headers, headers_layout) + def cxx_attr_headers(ctx: AnalysisContext, headers_layout: CxxHeadersLayout) -> list[CHeader]: - headers = _get_attr_headers(ctx.attrs.headers, headers_layout.namespace, headers_layout.naming) - platform_headers = _get_attr_headers(_headers_by_platform(ctx, ctx.attrs.platform_headers), headers_layout.namespace, headers_layout.naming) - return headers + platform_headers + return cxx_attr_headers_list(ctx, ctx.attrs.headers, ctx.attrs.platform_headers, headers_layout) def cxx_get_regular_cxx_headers_layout(ctx: AnalysisContext) -> CxxHeadersLayout: namespace = cxx_attr_header_namespace(ctx) @@ -164,12 +169,22 @@ def as_raw_headers( ) def _header_mode(ctx: AnalysisContext) -> HeaderMode: + toolchain_header_mode = get_cxx_toolchain_info(ctx).header_mode + + # If the toolchain disabled header maps, respect that since the compiler + # simply cannot accept anything else. + if toolchain_header_mode == HeaderMode("symlink_tree_only"): + return toolchain_header_mode + + # If the target specifies a header mode, use that in case it needs + # a symlink tree (even with header maps) header_mode = map_val(HeaderMode, getattr(ctx.attrs, "header_mode", None)) if header_mode != None: return header_mode - return get_cxx_toolchain_info(ctx).header_mode -def prepare_headers(ctx: AnalysisContext, srcs: dict[str, Artifact], name: str, project_root_file: [Artifact, None]) -> [Headers, None]: + return toolchain_header_mode + +def prepare_headers(ctx: AnalysisContext, srcs: dict[str, Artifact], name: str) -> [Headers, None]: """ Prepare all the headers we want to use, depending on the header_mode set on the target's toolchain. @@ -188,26 +203,26 @@ def prepare_headers(ctx: AnalysisContext, srcs: dict[str, Artifact], name: str, # by https://reviews.llvm.org/D103930 so, until it lands, disable header # maps when we see a module map. if (header_mode == HeaderMode("symlink_tree_with_header_map") and - is_any(lambda n: paths.basename(n) == "module.modulemap", srcs.keys())): + lazy.is_any(lambda n: paths.basename(n) == "module.modulemap", srcs.keys())): header_mode = HeaderMode("symlink_tree_only") - output_name = name + "-abs" if project_root_file else name + output_name = name if header_mode == HeaderMode("header_map_only"): headers = {h: (a, "{}") for h, a in srcs.items()} - hmap = _mk_hmap(ctx, output_name, headers, project_root_file) + hmap = _mk_hmap(ctx, output_name, headers) return Headers( - include_path = cmd_args(hmap).hidden(srcs.values()), + include_path = cmd_args(hmap, hidden = srcs.values()), ) symlink_dir = ctx.actions.symlinked_dir(output_name, _normalize_header_srcs(srcs)) if header_mode == HeaderMode("symlink_tree_only"): return Headers(include_path = cmd_args(symlink_dir), symlink_tree = symlink_dir) if header_mode == HeaderMode("symlink_tree_with_header_map"): headers = {h: (symlink_dir, "{}/" + h) for h in srcs} - hmap = _mk_hmap(ctx, output_name, headers, project_root_file) + hmap = _mk_hmap(ctx, output_name, headers) file_prefix_args = _get_debug_prefix_args(ctx, symlink_dir) return Headers( - include_path = cmd_args(hmap).hidden(symlink_dir), + include_path = cmd_args(hmap, hidden = symlink_dir), symlink_tree = symlink_dir, file_prefix_args = file_prefix_args, ) @@ -320,31 +335,31 @@ def _get_dict_header_namespace(namespace: str, naming: CxxHeadersNaming) -> str: def _get_debug_prefix_args(ctx: AnalysisContext, header_dir: Artifact) -> [cmd_args, None]: # NOTE(@christylee): Do we need to enable debug-prefix-map for darwin and windows? - if get_cxx_toolchain_info(ctx).linker_info.type != "gnu": + if get_cxx_toolchain_info(ctx).linker_info.type != LinkerType("gnu"): return None - debug_prefix_args = cmd_args() fmt = "-fdebug-prefix-map={}=" + value_or(header_dir.owner.cell, ".") - debug_prefix_args.add( + return cmd_args( cmd_args(header_dir, format = fmt), ) - return debug_prefix_args -def _mk_hmap(ctx: AnalysisContext, name: str, headers: dict[str, (Artifact, str)], project_root_file: [Artifact, None]) -> Artifact: +def _mk_hmap(ctx: AnalysisContext, name: str, headers: dict[str, (Artifact, str)]) -> Artifact: output = ctx.actions.declare_output(name + ".hmap") - cmd = cmd_args(get_cxx_toolchain_info(ctx).mk_hmap) - cmd.add(["--output", output.as_output()]) header_args = cmd_args() for n, (path, fmt) in headers.items(): header_args.add(n) # We don't care about the header contents -- just their names. - header_args.add(cmd_args(path, format = fmt).ignore_artifacts()) + header_args.add(cmd_args(path, format = fmt, ignore_artifacts = True)) - hmap_args_file = ctx.actions.write(output.basename + ".argsfile", cmd_args(header_args, quote = "shell")) - cmd.add(["--mappings-file", hmap_args_file]).hidden(header_args) - if project_root_file: - cmd.add(["--project-root-file", project_root_file]) - ctx.actions.run(cmd, category = "generate_hmap", identifier = name) + hmap_args_file = ctx.actions.write(output.basename + ".cxx_hmap_argsfile", cmd_args(header_args, quote = "shell")) + + cmd = cmd_args( + [get_cxx_toolchain_info(ctx).internal_tools.hmap_wrapper] + + ["--output", output.as_output()] + + ["--mappings-file", hmap_args_file], + hidden = header_args, + ) + ctx.actions.run(cmd, category = "generate_hmap", identifier = name, allow_cache_upload = cxx_attrs_get_allow_cache_upload(ctx.attrs)) return output diff --git a/prelude/cxx/index_store.bzl b/prelude/cxx/index_store.bzl new file mode 100644 index 0000000000000..ca00472ce6354 --- /dev/null +++ b/prelude/cxx/index_store.bzl @@ -0,0 +1,139 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolchainInfo") + +INDEX_STORE_SUBTARGET = "index-store" +FULL_INDEX_STORE_SUBTARGET = "full-index-store" + +# Magic number here. There is a trade off here: +# more buckets more materializion time but less time to wait from RE to merge bucket +# less buckets less materializion time but more time to wait from RE to merge bucket +_BUCK_COUNT = 20 + +def _index_store_args(artifact: Artifact) -> Artifact: + return artifact + +IndexStoreTSet = transitive_set( + args_projections = { + "args": _index_store_args, + }, +) + +IndexStoreInfo = provider( + fields = { + # The name of the target. + "name": provider_field(str), + # A tset with this target's index store and all of its dependency's index stores in the children. + "tset": provider_field(IndexStoreTSet), + }, +) + +def _get_merge_index_store_tool(ctx: AnalysisContext) -> RunInfo | None: + apple_toolchain = getattr(ctx.attrs, "_apple_toolchain", None) + if apple_toolchain == None: + return None + return apple_toolchain[AppleToolchainInfo].merge_index_store + +def _merge_index_store(ctx: AnalysisContext, index_stores: list[Artifact] | TransitiveSet, merge_output_dir_name: str | None = None) -> Artifact | None: + if isinstance(index_stores, list): + if len(index_stores) == 0: + return None + + if len(index_stores) == 1: + return index_stores[0] + + merge_index_store_tool = _get_merge_index_store_tool(ctx) + if merge_index_store_tool == None: + return None + if merge_output_dir_name == None: + merge_output_dir_name = paths.join("__indexstore__", ctx.attrs.name, "index_store") + merged_index_store = ctx.actions.declare_output(merge_output_dir_name) + cmd = cmd_args([merge_index_store_tool]) + cmd.add(["--dest", merged_index_store.as_output()]) + cmd.add(["--sources"]) + if isinstance(index_stores, list): + cmd.add(index_stores) + else: + cmd.add(index_stores.project_as_args("args")) + + # use prefer_remote = True here, it would have two following advantages + # 1. Each bucket will perform a merge on RE, which will fully utilize the high-speed network for materalizaion + # and utalize the resource to mergre parallel. + # 2. After merging for each bucket, the index store will be smaller, which makes it less to materialize locally + # and speeds up the local merge, thus accelerating the overall process. + ctx.actions.run(cmd, category = "merge_index_store", identifier = merge_output_dir_name, allow_cache_upload = True, prefer_remote = True) + return merged_index_store + +def _hash_bucket_index_stores(index_stores: list[Artifact]): + buckets_to_artifacts = {} + for index_store in index_stores: + hash_value = hash(index_store.short_path) + bucket = hash_value % _BUCK_COUNT + if bucket not in buckets_to_artifacts: + buckets_to_artifacts[bucket] = [] + buckets_to_artifacts[bucket].append(index_store) + return buckets_to_artifacts + +def _merge_all_index_store(ctx: AnalysisContext, index_stores: TransitiveSet) -> Artifact | None: + index_store_output_path = read_config("apple", "index_store_output", None) + if index_store_output_path: + index_stores = list(index_stores.traverse()) + merge_index_store_tool = _get_merge_index_store_tool(ctx) + if merge_index_store_tool == None: + return None + outputs = [] + + buckets_to_artifacts = _hash_bucket_index_stores(index_stores) + + for bucket in buckets_to_artifacts: + index_stores = buckets_to_artifacts[bucket] + merged_bucket_index_store = _merge_index_store(ctx, index_stores, merge_output_dir_name = "merge_bucket/{}/index_store".format(bucket)) + + name = "loal_merge/{}/index_store".format(bucket) + local_merged_index_store = ctx.actions.declare_output(name, dir = False) # this is a dummy output, it will be empty. It is used to make buck2 to run the actions + cmd = cmd_args([merge_index_store_tool, "--dest", index_store_output_path, "--sources", merged_bucket_index_store]) + cmd.add(["--dummy-output", local_merged_index_store.as_output()]) + + # each index_store will run local action to merge to the same local index store, + # in this cases , each index store will not wait for all index stores to be materalized + ctx.actions.run(cmd, category = "index_store_local_merge", identifier = name, local_only = True) + outputs.append(local_merged_index_store) + + final_output = ctx.actions.declare_output("dummy_final_local_merge", dir = False) # this is a dummy output, it will be empty. It is used to make buck2 to run the actions + + # Pass `outputs` to hidden to make the `final_output` depends on the `outputs``. + cmd = cmd_args(["touch", final_output.as_output()], hidden = outputs) + + ctx.actions.run(cmd, category = "index_store_local_merge", identifier = "final_local_merge (dummy)", local_only = True) + return final_output + else: + return _merge_index_store(ctx, index_stores, paths.join("__indexstore__", ctx.attrs.name, "full_index_stores")) + +def _gather_deps_index_store_tsets(deps: list[Dependency]) -> list[IndexStoreTSet]: + deps_indexstore_infos = filter(None, [dep.get(IndexStoreInfo) for dep in deps]) + return [info.tset for info in deps_indexstore_infos] + +def create_index_store_subtargets_and_provider(ctx: AnalysisContext, current_target_index_stores: list[Artifact], deps: list[Dependency]) -> (dict[str, list[Provider]], IndexStoreInfo): + # Create a subtarget for the current target's index store + sub_targets = {} + merged_index_store = _merge_index_store(ctx, current_target_index_stores) + sub_targets[INDEX_STORE_SUBTARGET] = [DefaultInfo(default_output = merged_index_store)] + + # Crate a subtarget for the merged all deps' and itself's index store + deps_indexstore_tsets = _gather_deps_index_store_tsets(deps) + if merged_index_store: + index_store_tset = ctx.actions.tset(IndexStoreTSet, value = merged_index_store, children = deps_indexstore_tsets) + else: + index_store_tset = ctx.actions.tset(IndexStoreTSet, children = deps_indexstore_tsets) + index_store_info = IndexStoreInfo(name = ctx.attrs.name, tset = index_store_tset) + + output = _merge_all_index_store(ctx, index_store_tset) + sub_targets[FULL_INDEX_STORE_SUBTARGET] = [DefaultInfo(default_output = output)] + + return (sub_targets, index_store_info) diff --git a/prelude/cxx/link.bzl b/prelude/cxx/link.bzl index 86138e6cb6d79..a396981bb1416 100644 --- a/prelude/cxx/link.bzl +++ b/prelude/cxx/link.bzl @@ -16,15 +16,24 @@ load( "bolt", "cxx_use_bolt", ) -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", + "LinkerType", +) +load( + "@prelude//cxx/dist_lto:darwin_dist_lto.bzl", + "cxx_darwin_dist_link", +) load( "@prelude//cxx/dist_lto:dist_lto.bzl", - "cxx_dist_link", + "cxx_gnu_dist_link", ) load("@prelude//linking:execution_preference.bzl", "LinkExecutionPreference", "LinkExecutionPreferenceInfo", "get_action_execution_attributes") load( "@prelude//linking:link_info.bzl", "ArchiveLinkable", + "ExtraLinkerOutputs", "LinkArgs", "LinkOrdering", "LinkedObject", @@ -34,10 +43,13 @@ load( ) load( "@prelude//linking:lto.bzl", + "LtoMode", "get_split_debug_lto_info", ) +load("@prelude//linking:stamp_build_info.bzl", "stamp_build_info") load("@prelude//linking:strip.bzl", "strip_object") -load("@prelude//utils:utils.bzl", "expect", "map_val", "value_or") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "map_val", "value_or") load( ":anon_link.bzl", "ANON_ATTRS", @@ -49,6 +61,7 @@ load(":cxx_context.bzl", "get_cxx_toolchain_info") load( ":cxx_link_utility.bzl", "cxx_link_cmd_parts", + "cxx_sanitizer_runtime_arguments", "generates_split_debug", "linker_map_args", "make_link_args", @@ -58,6 +71,7 @@ load(":link_types.bzl", "CxxLinkResultType", "LinkOptions", "merge_link_options" load( ":linker.bzl", "SharedLibraryFlagOverrides", # @unused Used as a type + "get_deffile_flags", "get_import_library", "get_output_flags", "get_shared_library_flags", @@ -74,13 +88,18 @@ CxxLinkResult = record( linked_object = LinkedObject, linker_map_data = [CxxLinkerMapData, None], link_execution_preference_info = LinkExecutionPreferenceInfo, + # A list of runtime shared libraries + sanitizer_runtime_files = field(list[Artifact]), + # A dictionary of extra linker outputs generated from + # the extra_linker_outputs_factory + extra_outputs = field(dict[str, list[DefaultInfo]], default = {}), ) def link_external_debug_info( ctx: AnalysisContext, links: list[LinkArgs], - split_debug_output: [Artifact, None] = None, - pdb: [Artifact, None] = None) -> ArtifactTSet: + split_debug_output: Artifact | None = None, + pdb: Artifact | None = None) -> ArtifactTSet: external_debug_artifacts = [] # When using LTO+split-dwarf, the link step will generate externally @@ -129,24 +148,43 @@ def cxx_link_into( linker_map_data = None if linker_info.supports_distributed_thinlto and opts.enable_distributed_thinlto: - if not linker_info.requires_objects: - fail("Cannot use distributed thinlto if the cxx toolchain doesn't require_objects") - exe = cxx_dist_link( - ctx, - opts.links, - output, - linker_map, - opts.category_suffix, - opts.identifier, - should_generate_dwp, - is_result_executable, - ) + if not linker_info.lto_mode == LtoMode("thin"): + fail("Cannot use distributed thinlto if the cxx toolchain doesn't use thin-lto lto_mode") + sanitizer_runtime_args = cxx_sanitizer_runtime_arguments(ctx, cxx_toolchain_info, output) + if sanitizer_runtime_args.extra_link_args or sanitizer_runtime_args.sanitizer_runtime_files: + fail("Cannot use distributed thinlto with sanitizer runtime") + + linker_type = linker_info.type + if linker_type == LinkerType("darwin"): + exe, extra_outputs = cxx_darwin_dist_link( + ctx, + output, + opts, + linker_info.thin_lto_premerger_enabled, + is_result_executable, + linker_map, + ) + elif linker_type == LinkerType("gnu"): + exe = cxx_gnu_dist_link( + ctx, + output, + opts, + linker_map, + should_generate_dwp, + is_result_executable, + ) + extra_outputs = {} + else: + fail("Linker type {} not supported for distributed thin-lto".format(linker_type)) + return CxxLinkResult( linked_object = exe, linker_map_data = linker_map_data, link_execution_preference_info = LinkExecutionPreferenceInfo( preference = opts.link_execution_preference, ), + sanitizer_runtime_files = [], + extra_outputs = extra_outputs, ) if linker_info.generate_linker_maps: @@ -154,10 +192,18 @@ def cxx_link_into( else: links_with_linker_map = opts.links - linker, toolchain_linker_flags = cxx_link_cmd_parts(cxx_toolchain_info) - all_link_args = cmd_args(toolchain_linker_flags) + link_cmd_parts = cxx_link_cmd_parts(cxx_toolchain_info, is_result_executable) + all_link_args = cmd_args(link_cmd_parts.linker_flags) all_link_args.add(get_output_flags(linker_info.type, output)) + # Add the linker args required for any extra linker outputs requested + extra_linker_outputs = opts.extra_linker_outputs_factory(ctx) if opts.extra_linker_outputs_factory != None else ExtraLinkerOutputs() + if len(extra_linker_outputs.artifacts) > 0: + if opts.extra_linker_outputs_flags_factory == None: + fail("Extra outputs requested but missing flag factory") + + all_link_args.add(opts.extra_linker_outputs_flags_factory(ctx, extra_linker_outputs.artifacts)) + # Darwin LTO requires extra link outputs to preserve debug info split_debug_output = None split_debug_lto_info = get_split_debug_lto_info(ctx.actions, cxx_toolchain_info, output.short_path) @@ -175,12 +221,12 @@ def cxx_link_into( else: link_args_suffix = opts.category_suffix link_args_output = make_link_args( + ctx, ctx.actions, cxx_toolchain_info, links_with_linker_map, suffix = link_args_suffix, output_short_path = output.short_path, - is_shared = result_type.value == "shared_library", link_ordering = value_or( opts.link_ordering, # Fallback to toolchain default. @@ -189,6 +235,12 @@ def cxx_link_into( ) all_link_args.add(link_args_output.link_args) + # Sanitizer runtime args must appear at the end because it can affect + # behavior of Swift runtime loading when the app also has an embedded + # Swift runtime. + sanitizer_runtime_args = cxx_sanitizer_runtime_arguments(ctx, cxx_toolchain_info, output) + all_link_args.add(sanitizer_runtime_args.extra_link_args) + bitcode_linkables = [] for link_item in opts.links: if link_item.infos == None: @@ -211,21 +263,27 @@ def cxx_link_into( pdb = link_args_output.pdb_artifact, ) - if linker_info.type == "windows": + all_link_args.add(link_cmd_parts.post_linker_flags) + + if linker_info.type == LinkerType("windows"): shell_quoted_args = cmd_args(all_link_args) else: shell_quoted_args = cmd_args(all_link_args, quote = "shell") argfile, _ = ctx.actions.write( - output.short_path + ".linker.argsfile", + output.short_path + ".cxx_link_argsfile", shell_quoted_args, allow_args = True, ) - command = cmd_args(linker) - command.add(cmd_args(argfile, format = "@{}")) - command.hidden(link_args_output.hidden) - command.hidden(shell_quoted_args) + command = cmd_args( + link_cmd_parts.linker, + cmd_args(argfile, format = "@{}"), + hidden = [ + link_args_output.hidden, + shell_quoted_args, + ], + ) category = "cxx_link" if opts.category_suffix != None: category += "_" + opts.category_suffix @@ -234,11 +292,13 @@ def cxx_link_into( # generate a DWO directory, so make sure we at least `mkdir` and empty # one to make v2/RE happy. if split_debug_output != None: - cmd = cmd_args(["/bin/sh", "-c"]) - cmd.add(cmd_args(split_debug_output.as_output(), format = 'mkdir -p {}; "$@"')) - cmd.add('""').add(command) - cmd.hidden(command) - command = cmd + command = cmd_args( + "/bin/sh", + "-c", + cmd_args(split_debug_output.as_output(), format = 'mkdir -p {}; "$@"'), + '""', + command, + ) link_execution_preference_info = LinkExecutionPreferenceInfo( preference = opts.link_execution_preference, @@ -247,6 +307,11 @@ def cxx_link_into( opts.link_execution_preference, ) + # only specify error_handler if one exists + error_handler_args = {} + if opts.error_handler: + error_handler_args["error_handler"] = opts.error_handler + ctx.actions.run( command, prefer_local = action_execution_properties.prefer_local, @@ -257,13 +322,16 @@ def cxx_link_into( identifier = opts.identifier, force_full_hybrid_if_capable = action_execution_properties.full_hybrid, allow_cache_upload = opts.allow_cache_upload, + **error_handler_args ) unstripped_output = output if opts.strip: strip_args = opts.strip_args_factory(ctx) if opts.strip_args_factory else cmd_args() output = strip_object(ctx, cxx_toolchain_info, output, strip_args, opts.category_suffix) - final_output = output if not (is_result_executable and cxx_use_bolt(ctx)) else bolt(ctx, output, opts.identifier) + final_output = output if not (is_result_executable and cxx_use_bolt(ctx)) else bolt(ctx, output, external_debug_info, opts.identifier) + final_output = stamp_build_info(ctx, final_output) if is_result_executable else final_output + dwp_artifact = None if should_generate_dwp: # TODO(T110378144): Once we track split dwarf from compiles, we should @@ -289,6 +357,7 @@ def cxx_link_into( linked_object = LinkedObject( output = final_output, + link_args = opts.links, bitcode_bundle = bitcode_artifact.artifact if bitcode_artifact else None, prebolt_output = output, unstripped_output = unstripped_output, @@ -306,6 +375,8 @@ def cxx_link_into( linked_object = linked_object, linker_map_data = linker_map_data, link_execution_preference_info = link_execution_preference_info, + sanitizer_runtime_files = sanitizer_runtime_args.sanitizer_runtime_files, + extra_outputs = extra_linker_outputs.providers, ) _AnonLinkInfo = provider(fields = { @@ -374,7 +445,6 @@ def _anon_cxx_link( opts = opts, ) ), - with_artifacts = True, ) dwp = None @@ -393,6 +463,10 @@ def _anon_cxx_link( split_debug_output = split_debug_output, ) + # The anon target API doesn't allow us to return the list of artifacts for + # sanitizer runtime, so it has be computed here + sanitizer_runtime_args = cxx_sanitizer_runtime_arguments(ctx, cxx_toolchain, output) + return CxxLinkResult( linked_object = LinkedObject( output = output, @@ -404,6 +478,7 @@ def _anon_cxx_link( link_execution_preference_info = LinkExecutionPreferenceInfo( preference = LinkExecutionPreference("any"), ), + sanitizer_runtime_files = sanitizer_runtime_args.sanitizer_runtime_files, ) def cxx_link( @@ -461,7 +536,9 @@ def cxx_link_shared_library( output, ) - links_with_extra_args = [LinkArgs(flags = extra_args)] + opts.links + [LinkArgs(flags = import_library_args)] + deffile_args = get_deffile_flags(ctx, linker_type) + + links_with_extra_args = [LinkArgs(flags = extra_args)] + opts.links + [LinkArgs(flags = import_library_args + deffile_args)] opts = merge_link_options( opts, diff --git a/prelude/cxx/link_groups.bzl b/prelude/cxx/link_groups.bzl index 35bbcea27a4f6..cdde311ee8058 100644 --- a/prelude/cxx/link_groups.bzl +++ b/prelude/cxx/link_groups.bzl @@ -6,6 +6,16 @@ # of this source tree. load("@prelude//:paths.bzl", "paths") +load( + "@prelude//cxx:groups_types.bzl", + "Group", # @unused Used as a type +) +load( + "@prelude//cxx:link_groups_types.bzl", + "LinkGroupInfo", + "LinkGroupsDebugLinkableEntry", + "LinkGroupsDebugLinkableItem", +) load("@prelude//linking:execution_preference.bzl", "LinkExecutionPreference") load( "@prelude//linking:link_groups.bzl", @@ -18,7 +28,6 @@ load( "LinkInfo", "LinkInfos", "LinkStrategy", - "Linkage", "LinkedObject", # @unused Used as a type "SharedLibLinkable", "get_lib_output_style", @@ -38,10 +47,19 @@ load( "get_linkable_graph_node_map_func", "get_transitive_deps", ) +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibraries", + "SharedLibrary", + "Soname", + "create_shlib", +) +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//utils:arglike.bzl", "ArgLike") +load("@prelude//utils:expect.bzl", "expect") load( "@prelude//utils:graph_utils.bzl", - "breadth_first_traversal_by", + "depth_first_traversal_by", ) load( "@prelude//utils:set.bzl", @@ -50,7 +68,6 @@ load( ) load( "@prelude//utils:utils.bzl", - "expect", "value_or", ) load(":cxx_context.bzl", "get_cxx_toolchain_info") @@ -61,12 +78,14 @@ load( load(":cxx_toolchain_types.bzl", "PicBehavior") load( ":groups.bzl", - "Group", # @unused Used as a type - "MATCH_ALL_LABEL", - "NO_MATCH_LABEL", "compute_mappings", "parse_groups_definitions", ) +load( + ":groups_types.bzl", + "MATCH_ALL_LABEL", + "NO_MATCH_LABEL", +) load( ":link.bzl", "cxx_link_shared_library", @@ -107,25 +126,13 @@ LINK_GROUP_MAP_DATABASE_FILENAME = "link_group_map_database.json" LINK_GROUP_MAPPINGS_SUB_TARGET = "link-group-mappings" LINK_GROUP_MAPPINGS_FILENAME_SUFFIX = ".link_group_map.json" -LinkGroupInfo = provider( - # @unsorted-dict-items - fields = { - "groups": provider_field(typing.Any, default = None), # dict[str, Group] - "groups_hash": provider_field(typing.Any, default = None), # str - "mappings": provider_field(typing.Any, default = None), # dict[ConfiguredProvidersLabel, str] - # Additional graphs needed to cover labels referenced by the groups above. - # This is useful in cases where the consumer of this provider won't already - # have deps covering these. - # NOTE(agallagher): We do this to maintain existing behavior w/ the - # standalone `link_group_map()` rule, but it's not clear if it's actually - # desirable behavior. - "graph": provider_field(typing.Any, default = None), # LinkableGraph - }, -) - LinkGroupLinkInfo = record( link_info = field(LinkInfo), output_style = field(LibOutputStyle), + + # Where this link info is originated from. + # Either target label or link group name + link_name = field(Label | str), ) LinkGroupLibSpec = record( @@ -142,6 +149,7 @@ LinkGroupLibSpec = record( root = field([LinkableRootInfo, None], None), # The link group to link. group = field(Group), + label = field(Label | None, None), ) _LinkedLinkGroup = record( @@ -152,6 +160,9 @@ _LinkedLinkGroup = record( _LinkedLinkGroups = record( libs = field(dict[str, _LinkedLinkGroup]), symbol_ldflags = field(list[typing.Any], []), + libs_debug_info = field(dict[typing.Any, typing.Any]), + # Mapping from a target to a link group name it was linked into. + targets_consumed_by_link_groups = field(dict[Label, str]), ) def get_link_group(ctx: AnalysisContext) -> [str, None]: @@ -172,7 +183,7 @@ def build_link_group_info( filtered_groups[group.name] = group mappings = compute_mappings( - groups = filtered_groups.values(), + groups_map = filtered_groups, graph_map = linkable_graph_node_map, ) @@ -220,12 +231,22 @@ def get_link_group_info( ) def get_link_group_preferred_linkage(link_groups: list[Group]) -> dict[Label, Linkage]: - return { - mapping.root: mapping.preferred_linkage - for group in link_groups - for mapping in group.mappings - if mapping.root != None and mapping.preferred_linkage != None - } + root_to_linkage = {} + for group in link_groups: + for mapping in group.mappings: + if not mapping.roots: + continue + + if not mapping.preferred_linkage: + continue + + for root in mapping.roots: + # TODO: There might be a bug here - if the same root is listed in + # two different link_group_map entries, we'll only use the preferred_linkage + # of the last style passed. + root_to_linkage[root] = mapping.preferred_linkage + + return root_to_linkage LinkGroupContext = record( link_group_mappings = field([dict[Label, str], None]), @@ -235,6 +256,8 @@ LinkGroupContext = record( link_group_libs = field(dict[str, typing.Any]), link_group_preferred_linkage = field(dict[Label, Linkage]), labels_to_links_map = field(dict[Label, LinkGroupLinkInfo]), + # Mapping from a target to a link group name it was linked into. + targets_consumed_by_link_groups = field(dict[Label, str]), ) def is_link_group_shlib( @@ -263,7 +286,8 @@ def _transitively_update_shared_linkage( link_strategy: LinkStrategy, link_group_preferred_linkage: dict[Label, Linkage], link_group_roots: dict[Label, str], - pic_behavior: PicBehavior): + pic_behavior: PicBehavior, + link_group_mappings: [dict[Label, str], None]): # Identify targets whose shared linkage style may be propagated to # dependencies. Implicitly created root libraries are skipped. shared_lib_roots = [] @@ -279,19 +303,50 @@ def _transitively_update_shared_linkage( shared_lib_roots.append(target) # buildifier: disable=uninitialized - def process_dependency(node: Label) -> list[Label]: + def process_dependency(node: Label) -> list[Label] | None: + if link_group_mappings and link_group_mappings.get(node) == NO_MATCH_LABEL: + # Do not propagate shared linkage via nodes that are excluded from link groups. + return None linkable_node = linkable_graph_node_map[node] if linkable_node.preferred_linkage == Linkage("any"): link_group_preferred_linkage[node] = Linkage("shared") return get_deps_for_link(linkable_node, link_strategy, pic_behavior) - breadth_first_traversal_by( + depth_first_traversal_by( linkable_graph_node_map, shared_lib_roots, process_dependency, ) +def create_debug_linkable_entries( + labels_to_links_map: dict[Label, LinkGroupLinkInfo], + root: Label | None) -> list[LinkGroupsDebugLinkableEntry]: + entries = [] + if root: + root_entry = LinkGroupsDebugLinkableEntry( + name = root, + output_style = LibOutputStyle("pic_archive"), + ) + entries.append(root_entry) + + for link_info in labels_to_links_map.values(): + link_groups_linkable_info = LinkGroupsDebugLinkableEntry( + name = link_info.link_name, + output_style = link_info.output_style, + ) + entries.append(link_groups_linkable_info) + + return entries + +# This stores final information about link arguments +# that maps to linker.argsfile for link group or final binary. +FinalLabelsToLinks = record( + # Static archives and shared libraries inputs. + map = field(dict[Label, LinkGroupLinkInfo]), +) + def get_filtered_labels_to_links_map( + public_nodes: [set_record, None], linkable_graph_node_map: dict[Label, LinkableNode], link_group: [str, None], link_groups: dict[str, Group], @@ -303,7 +358,8 @@ def get_filtered_labels_to_links_map( link_group_libs: dict[str, ([Label, None], LinkInfos)] = {}, prefer_stripped: bool = False, is_executable_link: bool = False, - force_static_follows_dependents: bool = True) -> dict[Label, LinkGroupLinkInfo]: + force_static_follows_dependents: bool = True, + prefer_optimized = False) -> FinalLabelsToLinks: """ Given a linkable graph, link style and link group mappings, finds all links to consider for linking traversing the graph as necessary and then @@ -312,29 +368,18 @@ def get_filtered_labels_to_links_map( """ def get_potential_linkables(node: Label) -> list[Label]: - linkable_node = linkable_graph_node_map[node] # buildifier: disable=uninitialized - - # Always link against exported deps - node_linkables = list(linkable_node.exported_deps) + linkable_node = linkable_graph_node_map[node] # If the preferred linkage is `static` or `any` we need to link against the deps too. - # TODO(cjhopman): This code originally was as commented out below and the comment indicated that the - # intent was to not traverse in the second case if link style was shared, but at this point idk which - # behavior we actually want. - should_traverse_private_deps = False - if linkable_node.preferred_linkage == Linkage("static"): - should_traverse_private_deps = True - elif linkable_node.preferred_linkage == Linkage("any"): - should_traverse_private_deps = True - # should_traverse = link_style != Linkage("shared") + should_traverse_private_deps = linkable_node.preferred_linkage == Linkage("static") or linkable_node.preferred_linkage == Linkage("any") if should_traverse_private_deps: - node_linkables += linkable_node.deps - - return node_linkables + return linkable_node.all_deps + else: + return linkable_node.exported_deps # Get all potential linkable targets - linkables = breadth_first_traversal_by( + linkables = depth_first_traversal_by( linkable_graph_node_map, roots, get_potential_linkables, @@ -357,6 +402,7 @@ def get_filtered_labels_to_links_map( link_group_preferred_linkage, link_group_roots, pic_behavior, + link_group_mappings, ) linkable_map = {} @@ -365,15 +411,36 @@ def get_filtered_labels_to_links_map( # already. This avoids use adding the same link group lib multiple times, # for each of the possible multiple nodes that maps to it. link_group_added = {} + group_srcs = {} def add_link(target: Label, output_style: LibOutputStyle): linkable_map[target] = LinkGroupLinkInfo( - link_info = get_link_info(linkable_graph_node_map[target], output_style, prefer_stripped), + link_info = get_link_info(linkable_graph_node_map[target], output_style, prefer_stripped, prefer_optimized), output_style = output_style, - ) # buildifier: disable=uninitialized + link_name = target, + ) def add_link_group(target: Label, target_group: str): # If we've already added this link group to the link line, we're done. + + link_group_spec = link_groups.get(target_group, None) + if link_group_spec and link_group_spec.attrs.prohibit_file_duplicates and public_nodes and public_nodes.contains(target): + if target_group not in group_srcs: + group_srcs[target_group] = {} + target_group_srcs = group_srcs[target_group] + for src in linkable_graph_node_map[target].srcs: + if not isinstance(src, Artifact): + # "src" is either source file or source file with list of compilation flags. + # We do not handle the case where we have compilation flags attached to source files + # because it we don't know is link gonna fail or not. So we let user deal with linker errors if there are any. + continue + + previous_target = target_group_srcs.get(src, None) + if previous_target and previous_target != target: + fail("'{}' artifact included multiple times into '{}' link group. From '{}:{}' and '{}:{}'".format(src, target_group, target.package, target.name, previous_target.package, previous_target.name)) + else: + target_group_srcs[src] = target + if target_group in link_group_added: return @@ -392,48 +459,81 @@ def get_filtered_labels_to_links_map( linkable_map[target] = LinkGroupLinkInfo( link_info = get_link_info_from_link_infos(shared_link_infos), output_style = LibOutputStyle("shared_lib"), - ) # buildifier: disable=uninitialized + link_name = target_group, + ) filtered_groups = [None, NO_MATCH_LABEL, MATCH_ALL_LABEL] for target in linkables: node = linkable_graph_node_map[target] + target_link_group = link_group_mappings.get(target) + output_style = get_lib_output_style(link_strategy, link_group_preferred_linkage.get(target, node.preferred_linkage), pic_behavior) + output_style_for_static_strategy = get_lib_output_style(LinkStrategy("static_pic"), link_group_preferred_linkage.get(target, node.preferred_linkage), pic_behavior) + is_forced_shared_linkage = output_style_for_static_strategy == LibOutputStyle("shared_lib") - # Always link any shared dependencies - if output_style == LibOutputStyle("shared_lib"): + # We should always add force-static libs to the link. + is_force_static_lib = force_static_follows_dependents and node.preferred_linkage == Linkage("static") and not node.ignore_force_static_follows_dependents + + # If this belongs to the match all link group or the group currently being evaluated + matches_current_link_group = target_link_group == MATCH_ALL_LABEL or target_link_group == link_group + + if is_forced_shared_linkage: # filter out any dependencies to be discarded - group = link_groups.get(link_group_mappings.get(target)) + group = link_groups.get(target_link_group) if group != None and group.attrs.discard_group: continue # If this target is a link group root library, we # 1) don't propagate shared linkage down the tree, and # 2) use the provided link info in lieu of what's in the grph. - target_link_group = link_group_roots.get(target) - if target_link_group != None and target_link_group != link_group: - add_link_group(target, target_link_group) + root_link_group = link_group_roots.get(target) + if root_link_group != None and root_link_group != link_group: + add_link_group(target, root_link_group) else: add_link(target, LibOutputStyle("shared_lib")) - else: # static or static_pic - target_link_group = link_group_mappings.get(target) - - # Always add force-static libs to the link. - if force_static_follows_dependents and node.preferred_linkage == Linkage("static"): - add_link(target, output_style) - elif not target_link_group and not link_group: - # Ungrouped linkable targets belong to the unlabeled executable - add_link(target, output_style) - elif is_executable_link and target_link_group == NO_MATCH_LABEL: - # Targets labeled NO_MATCH belong to the unlabeled executable - add_link(target, output_style) - elif target_link_group == MATCH_ALL_LABEL or target_link_group == link_group: - # If this belongs to the match all link group or the group currently being evaluated - add_link(target, output_style) - elif target_link_group not in filtered_groups: - add_link_group(target, target_link_group) - - return linkable_map + + else: + # Shared vs static linkage branches are similar, but separated for + # clarity and ease of debugging. + if link_strategy == LinkStrategy("shared"): + if (target_link_group and matches_current_link_group) or is_force_static_lib: + # Target linked statically if: + # 1. It belongs to current link group (unique symbols across graph) + # 2. It matches all link groups (can duplicate symbols across graph) + # 3. It forces static linkage (can duplicate symbols across graph) + add_link(target, output_style_for_static_strategy) + + elif not target_link_group or target_link_group == NO_MATCH_LABEL: + # Target directly linked dynamically if: + # 1. It doesn't belong to any link group + # 2. It belongs to NO_MATCH group + add_link(target, output_style) + + elif target_link_group not in filtered_groups: + # Targets linked through other link group dynamically if: + # 1. It matches other link group + add_link_group(target, target_link_group) + + else: # static or static_pic + # Always add force-static libs to the link. + if is_force_static_lib: + add_link(target, output_style) + elif not target_link_group and not link_group: + # Ungrouped linkable targets belong to the unlabeled executable + add_link(target, output_style) + elif is_executable_link and target_link_group == NO_MATCH_LABEL: + # Targets labeled NO_MATCH belong to the unlabeled executable + add_link(target, output_style) + elif matches_current_link_group: + # If this belongs to the match all link group or the group currently being evaluated + add_link(target, output_style) + elif target_link_group not in filtered_groups: + add_link_group(target, target_link_group) + + return FinalLabelsToLinks( + map = linkable_map, + ) # Find all link group libraries that are first order deps or exported deps of # the exectuble or another link group's libs @@ -496,7 +596,7 @@ def get_public_link_group_nodes( external_link_group_nodes.update( # get transitive exported deps - breadth_first_traversal_by( + depth_first_traversal_by( linkable_graph_node_map, external_link_group_nodes.list(), discover_link_group_linkables, @@ -526,6 +626,46 @@ def get_link_group_map_json(ctx: AnalysisContext, targets: list[TargetLabel]) -> json_map = ctx.actions.write_json(LINK_GROUP_MAP_DATABASE_FILENAME, sorted(targets)) return DefaultInfo(default_output = json_map) +def _find_all_relevant_roots( + specs: list[LinkGroupLibSpec], + link_group_mappings: dict[Label, str], # target label to link group name + roots: list[Label], + linkable_graph_node_map: dict[Label, LinkableNode]) -> dict[str, list[Label]]: + relevant_roots = {} + link_groups_for_full_traversal = set() # list[str] + + for spec in specs: + if spec.root != None: + relevant_roots[spec.group.name] = spec.root.deps + else: + roots_from_mappings, has_empty_root = _get_roots_from_mappings(spec, linkable_graph_node_map) + relevant_roots[spec.group.name] = roots_from_mappings + if has_empty_root: + link_groups_for_full_traversal.add(spec.group.name) + + def collect_and_traverse_roots(node_target: Label) -> list[Label]: + node = linkable_graph_node_map.get(node_target) + if node.preferred_linkage == Linkage("static") and not node.ignore_force_static_follows_dependents: + return node.all_deps + + node_link_group = link_group_mappings.get(node_target) + + if node_link_group == MATCH_ALL_LABEL: + # Add node into the list of roots for all link groups + for link_group in relevant_roots.keys(): + relevant_roots[link_group].append(node_target) + elif link_groups_for_full_traversal.contains(node_link_group) and node_link_group != NO_MATCH_LABEL: + relevant_roots[node_link_group].append(node_target) + return node.all_deps + + depth_first_traversal_by( + linkable_graph_node_map, + roots, + collect_and_traverse_roots, + ) + + return relevant_roots + def find_relevant_roots( link_group: [str, None] = None, linkable_graph_node_map: dict[Label, LinkableNode] = {}, @@ -533,22 +673,26 @@ def find_relevant_roots( roots: list[Label] = []): # Walk through roots looking for the first node which maps to the current # link group. - def collect_and_traverse_roots(roots, node_target): + + def collect_and_traverse_roots(roots, node_target: Label) -> list[Label] | None: node = linkable_graph_node_map.get(node_target) - if node.preferred_linkage == Linkage("static"): - return node.deps + node.exported_deps + if node.preferred_linkage == Linkage("static") and not node.ignore_force_static_follows_dependents: + return node.all_deps + node_link_group = link_group_mappings.get(node_target) + if node_link_group == MATCH_ALL_LABEL: roots.append(node_target) - return [] - if node_link_group == link_group: + elif node_link_group == link_group: roots.append(node_target) - return [] - return node.deps + node.exported_deps + else: + return node.all_deps + + return None relevant_roots = [] - breadth_first_traversal_by( + depth_first_traversal_by( linkable_graph_node_map, roots, partial(collect_and_traverse_roots, relevant_roots), @@ -556,24 +700,46 @@ def find_relevant_roots( return relevant_roots +def _get_roots_from_mappings( + spec: LinkGroupLibSpec, + linkable_graph_node_map: dict[Label, LinkableNode]) -> (list[Label], bool): + roots = [] + has_empty_root = False + for mapping in spec.group.mappings: + # If there's no explicit root, this means we need to search the entire + # graph to find candidate nodes. + if not mapping.roots: + has_empty_root = True + elif spec.group.attrs.requires_root_node_exists: + # If spec requires root to always exist (default True), always include to traversal to fail hard if it is not in deps. + # Otherwise add to traversal only if we sure it is in deps graph. + roots.extend(mapping.roots) + else: + roots.extend([root for root in mapping.roots if root in linkable_graph_node_map]) + return (roots, has_empty_root) + +_CreatedLinkGroup = record( + linked_object = field(LinkedObject), + labels_to_links = field(FinalLabelsToLinks), +) + def _create_link_group( ctx: AnalysisContext, spec: LinkGroupLibSpec, - # The deps of the top-level executable. - executable_deps: list[Label] = [], - # Additional roots involved in the link. - other_roots: list[Label] = [], + roots: list[Label], + link_strategy: LinkStrategy, public_nodes: set_record = set(), linkable_graph_node_map: dict[Label, LinkableNode] = {}, linker_flags: list[typing.Any] = [], link_groups: dict[str, Group] = {}, link_group_mappings: dict[Label, str] = {}, link_group_preferred_linkage: dict[Label, Linkage] = {}, - link_strategy: LinkStrategy = LinkStrategy("static_pic"), link_group_libs: dict[str, ([Label, None], LinkInfos)] = {}, prefer_stripped_objects: bool = False, category_suffix: [str, None] = None, - anonymous: bool = False) -> [LinkedObject, None]: + anonymous: bool = False, + allow_cache_upload = False, + error_handler = None) -> _CreatedLinkGroup | None: """ Link a link group library, described by a `LinkGroupLibSpec`. This is intended to handle regular shared libs and e.g. Python extensions. @@ -593,10 +759,6 @@ def _create_link_group( get_ignore_undefined_symbols_flags(linker_type), )) - # Get roots to begin the linkable search. - # TODO(agallagher): We should use the groups "public" nodes as the roots. - roots = [] - has_empty_root = False if spec.root != None: # If there's a linkable root attached to the spec, use that to guide # linking, as that will contain things like private linker flags that @@ -605,31 +767,10 @@ def _create_link_group( spec.root.link_infos, prefer_stripped = prefer_stripped_objects, )) - roots.extend(spec.root.deps) - else: - for mapping in spec.group.mappings: - # If there's no explicit root, this means we need to search the entire - # graph to find candidate nodes. - if mapping.root == None: - has_empty_root = True - else: - roots.append(mapping.root) - - # If this link group has an empty mapping, we need to search everything - # -- even the additional roots -- to find potential nodes in the link - # group. - if has_empty_root: - roots.extend( - find_relevant_roots( - link_group = spec.group.name, - linkable_graph_node_map = linkable_graph_node_map, - link_group_mappings = link_group_mappings, - roots = executable_deps + other_roots, - ), - ) # Add roots... - filtered_labels_to_links_map = get_filtered_labels_to_links_map( + filtered_labels_to_links = get_filtered_labels_to_links_map( + public_nodes, linkable_graph_node_map, spec.group.name, link_groups, @@ -641,10 +782,11 @@ def _create_link_group( roots = roots, is_executable_link = False, prefer_stripped = prefer_stripped_objects, + prefer_optimized = spec.group.attrs.prefer_optimized_experimental, ) - inputs.extend(get_filtered_links(filtered_labels_to_links_map, public_nodes)) + inputs.extend(get_filtered_links(filtered_labels_to_links.map, public_nodes)) - if not filtered_labels_to_links_map and not spec.root: + if not filtered_labels_to_links.map and not spec.root: # don't create empty shared libraries return None @@ -660,10 +802,15 @@ def _create_link_group( # TODO: anonymous targets cannot be used with dynamic output yet enable_distributed_thinlto = False if anonymous else spec.group.attrs.enable_distributed_thinlto, link_execution_preference = LinkExecutionPreference("any"), + allow_cache_upload = allow_cache_upload, + error_handler = error_handler, ), anonymous = anonymous, ) - return link_result.linked_object + return _CreatedLinkGroup( + linked_object = link_result.linked_object, + labels_to_links = filtered_labels_to_links, + ) def _stub_library( ctx: AnalysisContext, @@ -770,17 +917,24 @@ def _symbol_flags_for_link_groups( def create_link_groups( ctx: AnalysisContext, + public_nodes: set_record, + link_strategy: LinkStrategy, link_groups: dict[str, Group] = {}, link_group_specs: list[LinkGroupLibSpec] = [], executable_deps: list[Label] = [], other_roots: list[Label] = [], - root_link_group: [str, None] = None, linker_flags: list[typing.Any] = [], prefer_stripped_objects: bool = False, linkable_graph_node_map: dict[Label, LinkableNode] = {}, link_group_preferred_linkage: dict[Label, Linkage] = {}, link_group_mappings: [dict[Label, str], None] = None, - anonymous: bool = False) -> _LinkedLinkGroups: + anonymous: bool = False, + allow_cache_upload = False, + error_handler: [typing.Callable, None] = None) -> _LinkedLinkGroups: + # We linking libraries here so we need pic + if link_strategy == LinkStrategy("static"): + link_strategy = LinkStrategy("static_pic") + # Generate stubs first, so that subsequent links can link against them. link_group_shared_links = {} specs = [] @@ -799,26 +953,27 @@ def create_link_groups( anonymous = anonymous, ) + targets_consumed_by_link_groups = {} linked_link_groups = {} + link_groups_debug_info = {} undefined_symfiles = [] global_symfiles = [] - - public_nodes = get_public_link_group_nodes( - linkable_graph_node_map, + roots = _find_all_relevant_roots( + specs, link_group_mappings, executable_deps + other_roots, - root_link_group, + linkable_graph_node_map, ) for link_group_spec in specs: # NOTE(agallagher): It might make sense to move this down to be # done when we generated the links for the executable, so we can # handle the case when a link group can depend on the executable. - link_group_lib = _create_link_group( + created_link_group = _create_link_group( ctx = ctx, spec = link_group_spec, - executable_deps = executable_deps, - other_roots = other_roots, + roots = roots[link_group_spec.group.name], + link_strategy = link_strategy, linkable_graph_node_map = linkable_graph_node_map, public_nodes = public_nodes, linker_flags = ( @@ -838,12 +993,30 @@ def create_link_groups( prefer_stripped_objects = prefer_stripped_objects, category_suffix = "link_group", anonymous = anonymous, + allow_cache_upload = allow_cache_upload, + error_handler = error_handler, ) - if link_group_lib == None: + if created_link_group == None: # the link group did not match anything, don't create shlib interface continue + link_group_lib = created_link_group.linked_object + + root_label = link_group_spec.root.label if link_group_spec.root else None + link_groups_debug_info[link_group_spec.name] = LinkGroupsDebugLinkableItem( + ordered_linkables = create_debug_linkable_entries(created_link_group.labels_to_links.map, root_label), + ) + + for (linked_target, link_info) in created_link_group.labels_to_links.map.items(): + if link_info.output_style != LibOutputStyle("shared_lib"): + # Remember all targets that were statically linked into link group + targets_consumed_by_link_groups[linked_target] = link_group_spec.group.name + + if link_group_spec.root: + # If link group has root it always being linked statically + targets_consumed_by_link_groups[link_group_spec.root.label] = link_group_spec.group.name + # On GNU, use shlib interfaces. if cxx_is_gnu(ctx): shlib_for_link = shared_library_interface( @@ -860,7 +1033,15 @@ def create_link_groups( linked_link_groups[link_group_spec.group.name] = _LinkedLinkGroup( artifact = link_group_lib, library = None if not link_group_spec.is_shared_lib else LinkGroupLib( - shared_libs = {link_group_spec.name: link_group_lib}, + shared_libs = SharedLibraries( + libraries = [ + create_shlib( + label = link_group_spec.label or ctx.label, + soname = link_group_spec.name, + lib = link_group_lib, + ), + ], + ), shared_link_infos = LinkInfos( default = wrap_link_info( link_info, @@ -896,6 +1077,8 @@ def create_link_groups( return _LinkedLinkGroups( libs = linked_link_groups, symbol_ldflags = symbol_ldflags, + libs_debug_info = link_groups_debug_info, + targets_consumed_by_link_groups = targets_consumed_by_link_groups, ) def get_transitive_deps_matching_labels( @@ -913,3 +1096,87 @@ def get_transitive_deps_matching_labels( continue nodes.append(dep) return nodes + +def build_shared_libs_for_symlink_tree( + use_link_groups: bool, + link_group_ctx: LinkGroupContext, + link_strategy: LinkStrategy, + shared_libraries: list[SharedLibrary], + extra_shared_libraries: list[SharedLibrary]) -> list[SharedLibrary]: + # Which targets we actually materialized as symlinks to link group + added_link_group_symlinks_libs = set() + symlink_tree_shared_libraries = [] + + def is_shlib_added(soname: Soname) -> bool: + return soname.is_str and added_link_group_symlinks_libs.contains(soname.ensure_str()) + + def add_shib(shlib: SharedLibrary): + if shlib.soname.is_str: + added_link_group_symlinks_libs.add(shlib.soname.ensure_str()) + symlink_tree_shared_libraries.append(shlib) + + if use_link_groups: + # When there are no matches for a pattern based link group, + # `link_group_mappings` will not have an entry associated with the lib. + for _name, link_group_lib in link_group_ctx.link_group_libs.items(): + for link_group_shlib in link_group_lib.shared_libs.libraries: + add_shib(link_group_shlib) + + for shlib in shared_libraries: + if is_shlib_added(shlib.soname): + # Shlib was already materialised as link group. + # This may happen if link group spec had this target + # as root. That will produce link group with exact + # .so file and dynamic linker will be satisfied. + continue + + if link_strategy == LinkStrategy("shared") and shlib.label in link_group_ctx.targets_consumed_by_link_groups: + link_group_link = create_link_group_link( + link_group_ctx.link_group_libs[link_group_ctx.targets_consumed_by_link_groups[shlib.label]], + shlib, + ) + add_shib(link_group_link) + + elif not use_link_groups or is_link_group_shlib(shlib.label, link_group_ctx): + add_shib(shlib) + + # Add in extra, rule-specific shared libs. + for extra_shlib in extra_shared_libraries: + if not is_shlib_added(extra_shlib.soname): + add_shib(extra_shlib) + + return symlink_tree_shared_libraries + +def create_link_group_link( + link_group_lib: LinkGroupLib, + consumed_library: SharedLibrary) -> SharedLibrary: + """ + This method implements symlinking from original .so to link group .so + for link groups in **dynamic linking**. + Current problem is: with following setup + ``` + :bin + | | + :A :C + | │ + └ :B ┘ + ``` + + If we put `:A` and `:B` to link group, `lib_c.so` will still add `lib_b.so` to `NEEDS` section. + But `lib_b.so` is gonna be grouped to `lib_a_b_lg.so` and there is no way to propagate this information to `lib_c.so`. + But we actually can have "stubs" for `lib_a.so` and `lib_b.so` that all point to actual `lib_a_b_lg.so`. + This approach satisfies dynamic linker. + """ + + if len(link_group_lib.shared_libs.libraries) != 1: + fail("This method should only be used with auto link groups that produce exactly one shared libray") + link_group_shlib = link_group_lib.shared_libs.libraries[0] + + return create_shlib( + lib = link_group_shlib.lib, + link_args = link_group_shlib.link_args, + shlib_deps = link_group_shlib.shlib_deps, + can_be_asset = link_group_shlib.can_be_asset, + soname = consumed_library.soname, # <=== we match original target soname that will symlink to link group + label = consumed_library.label, + ) diff --git a/prelude/cxx/link_groups_types.bzl b/prelude/cxx/link_groups_types.bzl new file mode 100644 index 0000000000000..7c0037e5e5e7b --- /dev/null +++ b/prelude/cxx/link_groups_types.bzl @@ -0,0 +1,87 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//linking:link_info.bzl", + "LibOutputStyle", +) +load("@prelude//linking:types.bzl", "Linkage") +load(":groups_types.bzl", "Group", "Traversal") + +# These are targets or link groups that will be added to .linker.argsfile +# Targets will be expanded to .o files, link groups will be added to NEEDS +LinkGroupsDebugLinkableEntry = record( + name = field(Label | str), + output_style = field(LibOutputStyle), +) + +# This is info about single output unit. It is either a final binary or +# one of link groups. +LinkGroupsDebugLinkableItem = record( + ordered_linkables = field(list[LinkGroupsDebugLinkableEntry]), +) + +LinkGroupsDebugLinkInfo = record( + binary = field(LinkGroupsDebugLinkableItem), + libs = field(dict[str, LinkGroupsDebugLinkableItem]), +) + +LinkGroupInfo = provider( + fields = { + # Additional graphs needed to cover labels referenced by the groups above. + # This is useful in cases where the consumer of this provider won't already + # have deps covering these. + # NOTE(agallagher): We do this to maintain existing behavior w/ the + # standalone `link_group_map()` rule, but it's not clear if it's actually + # desirable behavior. + "graph": provider_field(typing.Any, default = None), # LinkableGraph + "groups": provider_field(dict[str, Group]), + "groups_hash": provider_field(int), + "mappings": provider_field(dict[Label, str]), + }, +) + +def link_group_inlined_map_attr(root_attr): + return attrs.list( + attrs.tuple( + # name + attrs.string(), + # list of mappings + attrs.list( + # a single mapping + attrs.tuple( + # root node + attrs.one_of(root_attr, attrs.list(root_attr)), + # traversal + attrs.enum(Traversal.values()), + # filters, either `None`, a single filter, or a list of filters + # (which must all match). + attrs.option(attrs.one_of(attrs.list(attrs.string()), attrs.string())), + # linkage + attrs.option(attrs.enum(Linkage.values())), + ), + ), + # attributes + attrs.option( + attrs.dict(key = attrs.string(), value = attrs.any(), sorted = False), + ), + ), + ) + +LINK_GROUP_MAP_ATTR = attrs.option( + attrs.one_of( + attrs.dep(providers = [LinkGroupInfo]), + link_group_inlined_map_attr( + # Inlined `link_group_map` will parse roots as `label`s, to avoid + # bloating deps w/ unrelated mappings (e.g. it's common to use + # a default mapping for all rules, which would otherwise add + # unrelated deps to them). + root_attr = attrs.option(attrs.label()), + ), + ), + default = None, +) diff --git a/prelude/cxx/link_types.bzl b/prelude/cxx/link_types.bzl index ddcac7c52bf4d..bf92db7d42b06 100644 --- a/prelude/cxx/link_types.bzl +++ b/prelude/cxx/link_types.bzl @@ -32,11 +32,15 @@ LinkOptions = record( strip = bool, # A function/lambda which will generate the strip args using the ctx. strip_args_factory = [typing.Callable, None], - import_library = [Artifact, None], + import_library = Artifact | None, allow_cache_upload = bool, cxx_toolchain = [CxxToolchainInfo, None], # Force callers to use link_options() or merge_link_options() to create. __private_use_link_options_function_to_construct = None, + error_handler = [typing.Callable, None], + # Factory methods used to provide extra linker outputs and flags. + extra_linker_outputs_factory = field(typing.Callable | None, None), + extra_linker_outputs_flags_factory = field(typing.Callable | None, None), ) def link_options( @@ -49,9 +53,12 @@ def link_options( identifier: [str, None] = None, strip: bool = False, strip_args_factory = None, - import_library: [Artifact, None] = None, + import_library: Artifact | None = None, allow_cache_upload: bool = False, - cxx_toolchain: [CxxToolchainInfo, None] = None) -> LinkOptions: + cxx_toolchain: [CxxToolchainInfo, None] = None, + error_handler: [typing.Callable, None] = None, + extra_linker_outputs_factory: [typing.Callable, None] = None, + extra_linker_outputs_flags_factory: [typing.Callable, None] = None) -> LinkOptions: """ A type-checked constructor for LinkOptions because by default record constructors aren't typed. @@ -70,9 +77,12 @@ def link_options( allow_cache_upload = allow_cache_upload, cxx_toolchain = cxx_toolchain, __private_use_link_options_function_to_construct = None, + error_handler = error_handler, + extra_linker_outputs_factory = extra_linker_outputs_factory, + extra_linker_outputs_flags_factory = extra_linker_outputs_flags_factory, ) -# A marker instance to differentiate explicitly-passed None and a field tha +# A marker instance to differentiate explicitly-passed None and a field that # isn't provided in merge_link_options. _NotProvided = record() _NOT_PROVIDED = _NotProvided() @@ -110,4 +120,7 @@ def merge_link_options( allow_cache_upload = base.allow_cache_upload if allow_cache_upload == _NOT_PROVIDED else allow_cache_upload, cxx_toolchain = base.cxx_toolchain if cxx_toolchain == _NOT_PROVIDED else cxx_toolchain, __private_use_link_options_function_to_construct = None, + error_handler = base.error_handler, + extra_linker_outputs_factory = base.extra_linker_outputs_factory, + extra_linker_outputs_flags_factory = base.extra_linker_outputs_flags_factory, ) diff --git a/prelude/cxx/linker.bzl b/prelude/cxx/linker.bzl index 1d1143a183a40..09ec0eea0c6a6 100644 --- a/prelude/cxx/linker.bzl +++ b/prelude/cxx/linker.bzl @@ -5,9 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerInfo") +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerInfo", "LinkerType") load("@prelude//utils:arglike.bzl", "ArgLike") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") # Platform-specific linker flags handling. Modeled after the `Linker` abstraction # in v1 (https://fburl.com/diffusion/kqd2ylcy). @@ -30,23 +30,23 @@ SharedLibraryFlagOverrides = record( # How to format arguments to the linker to set a shared lib name. shared_library_name_linker_flags_format = list[str], # Flags to pass to the linker to make it generate a shared library. - shared_library_flags = list[str], + shared_library_flags = list[ArgLike], ) LINKERS = { - "darwin": Linker( + LinkerType("darwin"): Linker( default_shared_library_extension = "dylib", default_shared_library_versioned_extension_format = "{}.dylib", shared_library_name_linker_flags_format = ["-install_name", "@rpath/{}"], shared_library_flags = ["-shared"], ), - "gnu": Linker( + LinkerType("gnu"): Linker( default_shared_library_extension = "so", default_shared_library_versioned_extension_format = "so.{}", shared_library_name_linker_flags_format = ["-Wl,-soname,{}"], shared_library_flags = ["-shared"], ), - "wasm": Linker( + LinkerType("wasm"): Linker( default_shared_library_extension = "wasm", default_shared_library_versioned_extension_format = "{}.wasm", shared_library_name_linker_flags_format = [], @@ -54,7 +54,7 @@ LINKERS = { # See https://github.com/WebAssembly/tool-conventions/blob/main/DynamicLinking.md#llvm-implementation shared_library_flags = ["-shared"], ), - "windows": Linker( + LinkerType("windows"): Linker( default_shared_library_extension = "dll", default_shared_library_versioned_extension_format = "dll", # NOTE(agallagher): I *think* windows doesn't support a flag to set the @@ -138,7 +138,7 @@ def get_default_shared_library_name(linker_info: LinkerInfo, label: Label): short_name = "{}_{}".format(_sanitize(label.package), _sanitize(label.name)) return get_shared_library_name(linker_info, short_name, apply_default_prefix = True) -def get_shared_library_name_linker_flags(linker_type: str, soname: str, flag_overrides: [SharedLibraryFlagOverrides, None] = None) -> list[str]: +def get_shared_library_name_linker_flags(linker_type: LinkerType, soname: str, flag_overrides: [SharedLibraryFlagOverrides, None] = None) -> list[str]: """ Arguments to pass to the linker to set the given soname. """ @@ -152,7 +152,7 @@ def get_shared_library_name_linker_flags(linker_type: str, soname: str, flag_ove for f in shared_library_name_linker_flags_format ] -def get_shared_library_flags(linker_type: str, flag_overrides: [SharedLibraryFlagOverrides, None] = None) -> list[str]: +def get_shared_library_flags(linker_type: LinkerType, flag_overrides: [SharedLibraryFlagOverrides, None] = None) -> list[ArgLike]: """ Arguments to pass to the linker to link a shared library. """ @@ -161,24 +161,24 @@ def get_shared_library_flags(linker_type: str, flag_overrides: [SharedLibraryFla return LINKERS[linker_type].shared_library_flags -def get_link_whole_args(linker_type: str, inputs: list[Artifact]) -> list[typing.Any]: +def get_link_whole_args(linker_type: LinkerType, inputs: list[Artifact]) -> list[typing.Any]: """ Return linker args used to always link all the given inputs. """ args = [] - if linker_type == "gnu": + if linker_type == LinkerType("gnu"): args.append("-Wl,--whole-archive") args.extend(inputs) args.append("-Wl,--no-whole-archive") - elif linker_type == "darwin": + elif linker_type == LinkerType("darwin"): for inp in inputs: args.append("-Xlinker") args.append("-force_load") args.append("-Xlinker") args.append(inp) - elif linker_type == "windows": + elif linker_type == LinkerType("windows"): for inp in inputs: args.append(inp) args.append("/WHOLEARCHIVE:" + inp.basename) @@ -187,42 +187,42 @@ def get_link_whole_args(linker_type: str, inputs: list[Artifact]) -> list[typing return args -def get_objects_as_library_args(linker_type: str, objects: list[Artifact]) -> list[typing.Any]: +def get_objects_as_library_args(linker_type: LinkerType, objects: list[Artifact]) -> list[typing.Any]: """ Return linker args used to link the given objects as a library. """ args = [] - if linker_type == "gnu": + if linker_type == LinkerType("gnu"): args.append("-Wl,--start-lib") args.extend(objects) args.append("-Wl,--end-lib") - elif linker_type == "windows": + elif linker_type == LinkerType("darwin") or linker_type == LinkerType("windows"): args.extend(objects) else: fail("Linker type {} not supported".format(linker_type)) return args -def get_ignore_undefined_symbols_flags(linker_type: str) -> list[str]: +def get_ignore_undefined_symbols_flags(linker_type: LinkerType) -> list[str]: """ Return linker args used to suppress undefined symbol errors. """ args = [] - if linker_type == "gnu": + if linker_type == LinkerType("gnu"): args.append("-Wl,--allow-shlib-undefined") args.append("-Wl,--unresolved-symbols=ignore-all") - elif linker_type == "darwin": - args.append("-Wl,-flat_namespace,-undefined,suppress") + elif linker_type == LinkerType("darwin"): + args.append("-Wl,-undefined,dynamic_lookup") else: fail("Linker type {} not supported".format(linker_type)) return args -def get_no_as_needed_shared_libs_flags(linker_type: str) -> list[str]: +def get_no_as_needed_shared_libs_flags(linker_type: LinkerType) -> list[str]: """ Return linker args used to prevent linkers from dropping unused shared library dependencies from the e.g. DT_NEEDED tags of the link. @@ -230,49 +230,59 @@ def get_no_as_needed_shared_libs_flags(linker_type: str) -> list[str]: args = [] - if linker_type == "gnu": + if linker_type == LinkerType("gnu"): args.append("-Wl,--no-as-needed") - elif linker_type == "darwin": + elif linker_type == LinkerType("darwin"): pass else: fail("Linker type {} not supported".format(linker_type)) return args -def get_output_flags(linker_type: str, output: Artifact) -> list[ArgLike]: - if linker_type == "windows": +def get_output_flags(linker_type: LinkerType, output: Artifact) -> list[ArgLike]: + if linker_type == LinkerType("windows"): return ["/Brepro", cmd_args(output.as_output(), format = "/OUT:{}")] else: return ["-o", output.as_output()] def get_import_library( ctx: AnalysisContext, - linker_type: str, - output_short_path: str) -> ([Artifact, None], list[ArgLike]): - if linker_type == "windows": + linker_type: LinkerType, + output_short_path: str) -> (Artifact | None, list[ArgLike]): + if linker_type == LinkerType("windows"): import_library = ctx.actions.declare_output(output_short_path + ".imp.lib") return import_library, [cmd_args(import_library.as_output(), format = "/IMPLIB:{}")] else: return None, [] +def get_deffile_flags( + ctx: AnalysisContext, + linker_type: LinkerType) -> list[ArgLike]: + if linker_type == LinkerType("windows") and ctx.attrs.deffile != None: + return [ + cmd_args(ctx.attrs.deffile, format = "/DEF:{}"), + ] + else: + return [] + def get_rpath_origin( - linker_type: str) -> str: + linker_type: LinkerType) -> str: """ Return the macro that runtime loaders resolve to the main executable at runtime. """ - if linker_type == "gnu": + if linker_type == LinkerType("gnu"): return "$ORIGIN" - if linker_type == "darwin": + if linker_type == LinkerType("darwin"): return "@loader_path" fail("Linker type {} not supported".format(linker_type)) def is_pdb_generated( - linker_type: str, + linker_type: LinkerType, linker_flags: list[[str, ResolvedStringWithMacros]]) -> bool: - if linker_type != "windows": + if linker_type != LinkerType("windows"): return False for flag in reversed(linker_flags): flag = str(flag).upper() diff --git a/prelude/cxx/omnibus.bzl b/prelude/cxx/omnibus.bzl index db93c9162ff77..0de7f4b705aca 100644 --- a/prelude/cxx/omnibus.bzl +++ b/prelude/cxx/omnibus.bzl @@ -6,7 +6,11 @@ # of this source tree. load("@prelude//:local_only.bzl", "get_resolved_cxx_binary_link_execution_preference") -load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "LinkerType", + "PicBehavior", +) load( "@prelude//cxx:link.bzl", "CxxLinkResult", # @unused Used as a type @@ -20,7 +24,6 @@ load( "LinkInfo", "LinkInfos", "LinkStrategy", - "Linkage", "LinkedObject", "SharedLibLinkable", "get_lib_output_style", @@ -38,12 +41,19 @@ load( "linkable_deps", "linkable_graph", ) +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", # @unused Used as a type + "create_shlib", +) +load("@prelude//linking:types.bzl", "Linkage") +load("@prelude//utils:expect.bzl", "expect") load( "@prelude//utils:graph_utils.bzl", - "breadth_first_traversal_by", + "depth_first_traversal_by", "post_order_traversal", ) -load("@prelude//utils:utils.bzl", "expect", "flatten", "value_or") +load("@prelude//utils:utils.bzl", "flatten", "value_or") load(":cxx_context.bzl", "get_cxx_toolchain_info") load( ":link_types.bzl", @@ -117,7 +127,7 @@ OmnibusRootProduct = record( # The result of the omnibus link. OmnibusSharedLibraries = record( omnibus = field([CxxLinkResult, None], None), - libraries = field(dict[str, LinkedObject], {}), + libraries = field(list[SharedLibrary], []), roots = field(dict[Label, OmnibusRootProduct], {}), exclusion_roots = field(list[Label]), excluded = field(list[Label]), @@ -138,7 +148,8 @@ def get_roots(deps: list[Dependency]) -> dict[Label, LinkableRootInfo]: roots = {} for dep in deps: if LinkableRootInfo in dep: - roots[dep.label] = dep[LinkableRootInfo] + root = dep[LinkableRootInfo] + roots[root.label] = root return roots def get_excluded(deps: list[Dependency] = []) -> dict[Label, None]: @@ -150,11 +161,13 @@ def get_excluded(deps: list[Dependency] = []) -> dict[Label, None]: return excluded_nodes def create_linkable_root( + label: Label, link_infos: LinkInfos, name: [str, None] = None, - deps: list[Dependency] = []) -> LinkableRootInfo: + deps: list[LinkableGraph | Dependency] = []) -> LinkableRootInfo: # Only include dependencies that are linkable. return LinkableRootInfo( + label = label, name = name, link_infos = link_infos, deps = linkable_deps(deps), @@ -191,7 +204,7 @@ def _link_deps( def find_deps(node: Label): return get_deps_for_link(link_infos[node], LinkStrategy("shared"), pic_behavior) - return breadth_first_traversal_by(link_infos, deps, find_deps) + return depth_first_traversal_by(link_infos, deps, find_deps) def _create_root( ctx: AnalysisContext, @@ -287,10 +300,7 @@ def _create_root( links = [LinkArgs(flags = extra_ldflags), LinkArgs(infos = inputs)], category_suffix = "omnibus_root", identifier = root.name or output, - # We prefer local execution because there are lot of cxx_link_omnibus_root - # running simultaneously, so while their overall load is reasonable, - # their peak execution load is very high. - link_execution_preference = LinkExecutionPreference("local"), + link_execution_preference = LinkExecutionPreference("any"), allow_cache_upload = allow_cache_upload, ), ) @@ -504,16 +514,16 @@ def _create_omnibus( # Add global symbols version script. # FIXME(agallagher): Support global symbols for darwin. - if linker_info.type != "darwin": + if linker_info.type != LinkerType("darwin"): global_sym_vers = _create_global_symbols_version_script( ctx, # Extract symbols from roots... root_products.values(), # ... and the shared libs from excluded nodes. [ - shared_lib.output + shared_lib.lib.output for label in spec.excluded - for shared_lib in spec.link_infos[label].shared_libs.values() + for shared_lib in spec.link_infos[label].shared_libs.libraries ], # Extract explicit global symbol names from flags in all body link args. global_symbols_link_args, @@ -586,11 +596,18 @@ def _build_omnibus_spec( if label not in excluded } - # Find the deps of the root nodes. These form the roots of the nodes - # included in the omnibus link. + # Find the deps of the root nodes that should be linked into + # 'libomnibus.so'. + # + # If a dep indicates preferred linkage static, it is linked directly into + # this omnimbus root and therefore not added to `first_order_root_deps` and + # thereby will not be linked into 'libomnibus.so'. If the dep does not + # indicate preferred linkage static, then it is added to + # `first_order_root_deps` and thereby will be linked into 'libomnibus.so'. first_order_root_deps = [] for label in _link_deps(graph.nodes, flatten([r.deps for r in roots.values()]), get_cxx_toolchain_info(ctx).pic_behavior): - # We only consider deps which aren't *only* statically linked. + # Per the comment above, only consider deps which aren't *only* + # statically linked. if _is_static_only(graph.nodes[label]): continue @@ -646,9 +663,10 @@ def _ordered_roots( """ # Calculate all deps each root node needs to link against. - link_deps = {} - for label, root in spec.roots.items(): - link_deps[label] = _link_deps(spec.link_infos, root.deps, pic_behavior) + link_deps = { + label: _link_deps(spec.link_infos, root.deps, pic_behavior) + for label, root in spec.roots.items() + } # Used the link deps to create the graph of root nodes. root_graph = { @@ -656,14 +674,12 @@ def _ordered_roots( for node, deps in link_deps.items() } - ordered_roots = [] - # Emit the root link info in post-order, so that we generate root link rules # for dependencies before their dependents. - for label in post_order_traversal(root_graph): - root = spec.roots[label] - deps = link_deps[label] - ordered_roots.append((label, root, deps)) + ordered_roots = [ + (label, spec.roots[label], link_deps[label]) + for label in post_order_traversal(root_graph) + ] return ordered_roots @@ -671,15 +687,14 @@ def create_omnibus_libraries( ctx: AnalysisContext, graph: OmnibusGraph, extra_ldflags: list[typing.Any] = [], - prefer_stripped_objects: bool = False, - allow_cache_upload: bool = False) -> OmnibusSharedLibraries: + prefer_stripped_objects: bool = False) -> OmnibusSharedLibraries: spec = _build_omnibus_spec(ctx, graph) pic_behavior = get_cxx_toolchain_info(ctx).pic_behavior # Create dummy omnibus dummy_omnibus = create_dummy_omnibus(ctx, extra_ldflags) - libraries = {} + libraries = [] root_products = {} # Link all root nodes against the dummy libomnibus lib. @@ -695,10 +710,16 @@ def create_omnibus_libraries( pic_behavior, extra_ldflags, prefer_stripped_objects, - allow_cache_upload = allow_cache_upload, + allow_cache_upload = True, ) if root.name != None: - libraries[root.name] = product.shared_library + libraries.append( + create_shlib( + soname = root.name, + lib = product.shared_library, + label = label, + ), + ) root_products[label] = product # If we have body nodes, then link them into the monolithic libomnibus.so. @@ -711,14 +732,19 @@ def create_omnibus_libraries( pic_behavior, extra_ldflags, prefer_stripped_objects, - allow_cache_upload = allow_cache_upload, + allow_cache_upload = True, + ) + libraries.append( + create_shlib( + soname = _omnibus_soname(ctx), + lib = omnibus.linked_object, + label = ctx.label, + ), ) - libraries[_omnibus_soname(ctx)] = omnibus.linked_object # For all excluded nodes, just add their regular shared libs. for label in spec.excluded: - for name, lib in spec.link_infos[label].shared_libs.items(): - libraries[name] = lib + libraries.extend(spec.link_infos[label].shared_libs.libraries) return OmnibusSharedLibraries( omnibus = omnibus, diff --git a/prelude/cxx/prebuilt_cxx_library_group.bzl b/prelude/cxx/prebuilt_cxx_library_group.bzl index 3524526b682db..6370818ec542f 100644 --- a/prelude/cxx/prebuilt_cxx_library_group.bzl +++ b/prelude/cxx/prebuilt_cxx_library_group.bzl @@ -5,7 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "LinkerType", + "PicBehavior", +) load( "@prelude//cxx:preprocessor.bzl", "CPreprocessor", @@ -25,11 +29,9 @@ load( "LinkInfo", "LinkInfos", "LinkStrategy", - "Linkage", "LinkedObject", "SharedLibLinkable", "create_merged_link_info", - "create_merged_link_info_for_propagation", "get_lib_output_style", "get_output_styles_for_linkage", ) @@ -46,7 +48,10 @@ load( "merge_shared_libraries", ) load("@prelude//linking:strip.bzl", "strip_debug_info") -load("@prelude//utils:utils.bzl", "expect", "flatten_dict") +load("@prelude//linking:types.bzl", "Linkage") +load("@prelude//unix:providers.bzl", "UnixEnv", "create_unix_env_info") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "flatten_dict") load(":cxx_context.bzl", "get_cxx_toolchain_info") load( ":cxx_library_utility.bzl", @@ -112,7 +117,7 @@ def _parse_macro(arg: str) -> [(str, str), None]: def _get_static_link_infos( ctx: AnalysisContext, - linker_type: str, + linker_type: LinkerType, libs: list[Artifact], args: list[str]) -> LinkInfos: """ @@ -267,7 +272,7 @@ def prebuilt_cxx_library_group_impl(ctx: AnalysisContext) -> list[Provider]: args.extend(ctx.attrs.exported_preprocessor_flags) for inc_dir in ctx.attrs.include_dirs: args += ["-isystem", inc_dir] - preprocessor = CPreprocessor(relative_args = CPreprocessorArgs(args = args)) + preprocessor = CPreprocessor(args = CPreprocessorArgs(args = args)) inherited_pp_info = cxx_inherited_preprocessor_infos(exported_deps) providers.append(cxx_merge_cpreprocessors(ctx, [preprocessor], inherited_pp_info)) @@ -321,11 +326,6 @@ def prebuilt_cxx_library_group_impl(ctx: AnalysisContext) -> list[Provider]: static_output_style = get_lib_output_style(LinkStrategy("static"), preferred_linkage, pic_behavior) providers.append(DefaultInfo(default_outputs = outputs[static_output_style])) - # TODO(cjhopman): This is preserving existing behavior, but it doesn't make sense. These lists can be passed - # unmerged to create_merged_link_info below. Potentially that could change link order, so needs to be done more carefully. - merged_inherited_non_exported_link = create_merged_link_info_for_propagation(ctx, inherited_non_exported_link) - merged_inherited_exported_link = create_merged_link_info_for_propagation(ctx, inherited_exported_link) - # Provider for native link. providers.append(create_merged_link_info( ctx, @@ -334,15 +334,16 @@ def prebuilt_cxx_library_group_impl(ctx: AnalysisContext) -> list[Provider]: preferred_linkage = preferred_linkage, # Export link info from our (non-exported) deps (e.g. when we're linking # statically). - deps = [merged_inherited_non_exported_link], + deps = inherited_non_exported_link, # Export link info from our (exported) deps. - exported_deps = [merged_inherited_exported_link], + exported_deps = inherited_exported_link, )) # Propagate shared libraries up the tree. + shared_libs = create_shared_libraries(ctx, solibs) providers.append(merge_shared_libraries( ctx.actions, - create_shared_libraries(ctx, solibs), + shared_libs, filter(None, [x.get(SharedLibraryInfo) for x in deps + exported_deps]), )) @@ -357,7 +358,7 @@ def prebuilt_cxx_library_group_impl(ctx: AnalysisContext) -> list[Provider]: exported_deps = exported_deps, preferred_linkage = preferred_linkage, link_infos = libraries, - shared_libs = solibs, + shared_libs = shared_libs, can_be_asset = getattr(ctx.attrs, "can_be_asset", False) or False, # TODO(cjhopman): this should be set to non-None default_soname = None, @@ -369,4 +370,15 @@ def prebuilt_cxx_library_group_impl(ctx: AnalysisContext) -> list[Provider]: providers.append(merge_link_group_lib_info(deps = deps + exported_deps)) + providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + native_libs = [shared_libs], + ), + deps = deps + exported_deps, + ), + ) + return providers diff --git a/prelude/cxx/preprocessor.bzl b/prelude/cxx/preprocessor.bzl index 00cd190a67c96..570119a6d0983 100644 --- a/prelude/cxx/preprocessor.bzl +++ b/prelude/cxx/preprocessor.bzl @@ -6,6 +6,7 @@ # of this source tree. load("@prelude//:paths.bzl", "paths") +load("@prelude//cxx:target_sdk_version.bzl", "get_target_sdk_version_flags") load( "@prelude//utils:utils.bzl", "flatten", @@ -32,7 +33,7 @@ SystemIncludeDirs = record( # Compiler type to infer correct include flags compiler_type = field(str), # Directories to be included via [-isystem | /external:I] [arglike things] - include_dirs = field(list["label_relative_path"]), + include_dirs = field(list[CellPath]), ) CPreprocessorArgs = record( @@ -42,25 +43,33 @@ CPreprocessorArgs = record( file_prefix_args = field(list[typing.Any], []), ) +HeaderUnit = record( + name = field(str), + module = field(Artifact), + include_dir = field(Artifact), + import_include = field(str | None), +) + # Note: Any generic attributes are assumed to be relative. CPreprocessor = record( # Relative path args to be used for build operations. - relative_args = field(CPreprocessorArgs, CPreprocessorArgs()), - # Absolute path args used to generate extra user-specific outputs. - absolute_args = field(CPreprocessorArgs, CPreprocessorArgs()), + args = field(CPreprocessorArgs, CPreprocessorArgs()), # Header specs headers = field(list[CHeader], []), # Those should be mutually exclusive with normal headers as per documentation raw_headers = field(list[Artifact], []), # Directories to be included via -I, [arglike things] - include_dirs = field(list["label_relative_path"], []), + include_dirs = field(list[CellPath], []), # Directories to be included via -isystem, [arglike things] system_include_dirs = field([SystemIncludeDirs, None], None), # Whether to compile with modules support uses_modules = field(bool, False), # Modular args to set when modules are in use, [arglike things] modular_args = field(list[typing.Any], []), - modulemap_path = field(typing.Any, None), + # Path to the modulemap which defines the API exposed to Swift + modulemap_path = field([cmd_args, None], None), + # Header units to load transitively and supporting args. + header_units = field(list[HeaderUnit], []), ) # Methods for transitive_sets must be declared prior to their use. @@ -68,13 +77,7 @@ CPreprocessor = record( def _cpreprocessor_args(pres: list[CPreprocessor]): args = cmd_args() for pre in pres: - args.add(pre.relative_args.args) - return args - -def _cpreprocessor_abs_args(pres: list[CPreprocessor]): - args = cmd_args() - for pre in pres: - args.add(pre.absolute_args.args) + args.add(pre.args.args) return args def _cpreprocessor_modular_args(pres: list[CPreprocessor]): @@ -83,16 +86,21 @@ def _cpreprocessor_modular_args(pres: list[CPreprocessor]): args.add(pre.modular_args) return args -def _cpreprocessor_file_prefix_args(pres: list[CPreprocessor]): +def _cpreprocessor_header_units_args(pres: list[CPreprocessor]): args = cmd_args() for pre in pres: - args.add(pre.relative_args.file_prefix_args) + for h in pre.header_units: + args.add(cmd_args(h.module, format = "-fmodule-file={}={{}}".format(h.name))) + args.add(cmd_args(h.include_dir, format = "-I{}")) + args.add(cmd_args(h.include_dir, format = "-fmodule-map-file={}/module.modulemap")) + if h.import_include: + args.add(["-include", h.import_include]) return args -def _cpreprocessor_abs_file_prefix_args(pres: list[CPreprocessor]): +def _cpreprocessor_file_prefix_args(pres: list[CPreprocessor]): args = cmd_args() for pre in pres: - args.add(pre.absolute_args.file_prefix_args) + args.add(pre.args.file_prefix_args) return args def _cpreprocessor_include_dirs(pres: list[CPreprocessor]): @@ -118,10 +126,9 @@ def _cpreprocessor_uses_modules(children: list[bool], pres: [list[CPreprocessor] # exported pp info and one for not-exported). CPreprocessorTSet = transitive_set( args_projections = { - "abs_args": _cpreprocessor_abs_args, - "abs_file_prefix_args": _cpreprocessor_abs_file_prefix_args, "args": _cpreprocessor_args, "file_prefix_args": _cpreprocessor_file_prefix_args, + "header_units_args": _cpreprocessor_header_units_args, "include_dirs": _cpreprocessor_include_dirs, "modular_args": _cpreprocessor_modular_args, }, @@ -149,15 +156,6 @@ CPreprocessorForTestsInfo = provider( }, ) -# Preprocessor flags -def cxx_attr_preprocessor_flags(ctx: AnalysisContext, ext: str) -> list[typing.Any]: - return ( - ctx.attrs.preprocessor_flags + - cxx_by_language_ext(ctx.attrs.lang_preprocessor_flags, ext) + - flatten(cxx_by_platform(ctx, ctx.attrs.platform_preprocessor_flags)) + - flatten(cxx_by_platform(ctx, cxx_by_language_ext(ctx.attrs.lang_platform_preprocessor_flags, ext))) - ) - def cxx_attr_exported_preprocessor_flags(ctx: AnalysisContext) -> list[typing.Any]: return ( ctx.attrs.exported_preprocessor_flags + @@ -192,7 +190,7 @@ def format_system_include_arg(path: cmd_args, compiler_type: str) -> list[cmd_ar else: return [cmd_args("-isystem"), path] -def cxx_exported_preprocessor_info(ctx: AnalysisContext, headers_layout: CxxHeadersLayout, project_root_file: Artifact, extra_preprocessors: list[CPreprocessor] = []) -> CPreprocessor: +def cxx_exported_preprocessor_info(ctx: AnalysisContext, headers_layout: CxxHeadersLayout, extra_preprocessors: list[CPreprocessor] = []) -> CPreprocessor: """ This rule's preprocessor info which is both applied to the compilation of its source and propagated to the compilation of dependent's sources. @@ -237,25 +235,28 @@ def cxx_exported_preprocessor_info(ctx: AnalysisContext, headers_layout: CxxHead include_dirs.extend([ctx.label.path.add(x) for x in ctx.attrs.public_include_directories]) system_include_dirs.extend([ctx.label.path.add(x) for x in ctx.attrs.public_system_include_directories]) - relative_args = _get_exported_preprocessor_args(ctx, exported_header_map, style, compiler_type, raw_headers, extra_preprocessors, None) - absolute_args = _get_exported_preprocessor_args(ctx, exported_header_map, style, compiler_type, raw_headers, extra_preprocessors, project_root_file) + args = _get_exported_preprocessor_args(ctx, exported_header_map, style, compiler_type, raw_headers, extra_preprocessors) modular_args = [] for pre in extra_preprocessors: modular_args.extend(pre.modular_args) + header_units = [] + for pre in extra_preprocessors: + header_units.extend(pre.header_units) + return CPreprocessor( - relative_args = CPreprocessorArgs(args = relative_args.args, file_prefix_args = relative_args.file_prefix_args), - absolute_args = CPreprocessorArgs(args = absolute_args.args, file_prefix_args = absolute_args.file_prefix_args), + args = CPreprocessorArgs(args = args.args, file_prefix_args = args.file_prefix_args), headers = exported_headers, raw_headers = raw_headers, include_dirs = include_dirs, system_include_dirs = SystemIncludeDirs(compiler_type = compiler_type, include_dirs = system_include_dirs), modular_args = modular_args, + header_units = header_units, ) -def _get_exported_preprocessor_args(ctx: AnalysisContext, headers: dict[str, Artifact], style: HeaderStyle, compiler_type: str, raw_headers: list[Artifact], extra_preprocessors: list[CPreprocessor], project_root_file: [Artifact, None]) -> CPreprocessorArgs: - header_root = prepare_headers(ctx, headers, "buck-headers", project_root_file) +def _get_exported_preprocessor_args(ctx: AnalysisContext, headers: dict[str, Artifact], style: HeaderStyle, compiler_type: str, raw_headers: list[Artifact], extra_preprocessors: list[CPreprocessor]) -> CPreprocessorArgs: + header_root = prepare_headers(ctx, headers, "buck-headers") # Process args to handle the `$(cxx-header-tree)` macro. args = [] @@ -278,23 +279,22 @@ def _get_exported_preprocessor_args(ctx: AnalysisContext, headers: dict[str, Art if raw_headers: # NOTE(agallagher): It's a bit weird adding an "empty" arg, but this # appears to do the job (and not e.g. expand to `""`). - args.append(cmd_args().hidden(raw_headers)) + args.append(cmd_args(hidden = raw_headers)) # Append any extra preprocessor info passed in via the constructor params for pre in extra_preprocessors: - args.extend(pre.absolute_args.args if project_root_file else pre.relative_args.args) + args.extend(pre.args.args) return CPreprocessorArgs(args = args, file_prefix_args = file_prefix_args) def cxx_private_preprocessor_info( ctx: AnalysisContext, headers_layout: CxxHeadersLayout, - project_root_file: [Artifact, None], raw_headers: list[Artifact] = [], extra_preprocessors: list[CPreprocessor] = [], non_exported_deps: list[Dependency] = [], is_test: bool = False) -> (CPreprocessor, list[CPreprocessor]): - private_preprocessor = _cxx_private_preprocessor_info(ctx, headers_layout, raw_headers, extra_preprocessors, project_root_file) + private_preprocessor = _cxx_private_preprocessor_info(ctx, headers_layout, raw_headers, extra_preprocessors) test_preprocessors = [] if is_test: @@ -309,8 +309,7 @@ def _cxx_private_preprocessor_info( ctx: AnalysisContext, headers_layout: CxxHeadersLayout, raw_headers: list[Artifact], - extra_preprocessors: list[CPreprocessor], - project_root_file: [Artifact, None]) -> CPreprocessor: + extra_preprocessors: list[CPreprocessor]) -> CPreprocessor: """ This rule's preprocessor info which is only applied to the compilation of its source, and not propagated to dependents. @@ -352,23 +351,21 @@ def _cxx_private_preprocessor_info( all_raw_headers.extend(raw_headers) include_dirs.extend([ctx.label.path.add(x) for x in ctx.attrs.include_directories]) - relative_args = _get_private_preprocessor_args(ctx, header_map, compiler_type, all_raw_headers, None) - absolute_args = _get_private_preprocessor_args(ctx, header_map, compiler_type, all_raw_headers, project_root_file) + args = _get_private_preprocessor_args(ctx, header_map, compiler_type, all_raw_headers) return CPreprocessor( - relative_args = CPreprocessorArgs(args = relative_args.args, file_prefix_args = relative_args.file_prefix_args), - absolute_args = CPreprocessorArgs(args = absolute_args.args, file_prefix_args = absolute_args.file_prefix_args), + args = CPreprocessorArgs(args = args.args, file_prefix_args = args.file_prefix_args), headers = headers, raw_headers = all_raw_headers, include_dirs = include_dirs, uses_modules = uses_modules, ) -def _get_private_preprocessor_args(ctx: AnalysisContext, headers: dict[str, Artifact], compiler_type: str, all_raw_headers: list[Artifact], project_root_file: [Artifact, None]) -> CPreprocessorArgs: +def _get_private_preprocessor_args(ctx: AnalysisContext, headers: dict[str, Artifact], compiler_type: str, all_raw_headers: list[Artifact]) -> CPreprocessorArgs: # Create private header tree and propagate via args. - args = [] + args = get_target_sdk_version_flags(ctx) file_prefix_args = [] - header_root = prepare_headers(ctx, headers, "buck-private-headers", project_root_file) + header_root = prepare_headers(ctx, headers, "buck-private-headers") if header_root != None: args.extend(_format_include_arg("-I", header_root.include_path, compiler_type)) if header_root.file_prefix_args != None: @@ -379,7 +376,7 @@ def _get_private_preprocessor_args(ctx: AnalysisContext, headers: dict[str, Arti if all_raw_headers: # NOTE(agallagher): It's a bit weird adding an "empty" arg, but this # appears to do the job (and not e.g. expand to `""`). - args.append(cmd_args().hidden(all_raw_headers)) + args.append(cmd_args(hidden = all_raw_headers)) return CPreprocessorArgs(args = args, file_prefix_args = file_prefix_args) diff --git a/prelude/cxx/shared_library_interface.bzl b/prelude/cxx/shared_library_interface.bzl index a5bc1520f13a1..3b7c58a0fc67f 100644 --- a/prelude/cxx/shared_library_interface.bzl +++ b/prelude/cxx/shared_library_interface.bzl @@ -5,9 +5,21 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:artifact_tset.bzl", "ArtifactTSet", "make_artifact_tset", "project_artifacts") load("@prelude//:paths.bzl", "paths") +load("@prelude//cxx:preprocessor.bzl", "CPreprocessor", "CPreprocessorInfo") +load("@prelude//cxx:target_sdk_version.bzl", "get_target_triple") +load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type +load("@prelude//utils:lazy.bzl", "lazy") load(":cxx_context.bzl", "get_cxx_toolchain_info") load(":cxx_toolchain_types.bzl", "CxxToolchainInfo") +load(":headers.bzl", "CHeader") + +# The transitive artifacts of partial shared interface for a library. +# These need to be collected and merged to produce the final shared interface. +SharedInterfaceInfo = provider(fields = { + "interfaces": provider_field(ArtifactTSet), +}) def _shared_library_interface( ctx: AnalysisContext, @@ -71,7 +83,6 @@ def shared_library_interface( shared_lib = shared_lib, identifier = shared_lib.short_path, ), - with_artifacts = True, ).artifact("shared_library_interface") return ctx.actions.assert_short_path(shared_lib_interface_artifact, short_path = output) else: @@ -81,3 +92,136 @@ def shared_library_interface( shared_lib = shared_lib, identifier = shared_lib.short_path, ) + +def generate_exported_symbols(ctx: AnalysisContext, exported_headers: list[CHeader], exported_preprocessor: CPreprocessor, transitive_preprocessor: list[CPreprocessorInfo]) -> Artifact: + # Use the c++ compiler to correctly generate c++ symbols. + compiler_info = get_cxx_toolchain_info(ctx).cxx_compiler_info + + # Collect the exported headers for this library and create a filelist for them. + # The exported headers are possibly hidden behind a modulemap, + # so cannot be fetched directly from exported_preprocessor. + filelist_headers = [] + for h in exported_headers: + filelist_headers.append({ + "path": h.artifact, + "type": "public", + }) + + # We need to collect all raw_headers that belong in a public include dir + include_dirs = ctx.attrs.public_include_directories + ctx.attrs.public_system_include_directories + include_dirs = [d if d.endswith("/") else d + "/" for d in include_dirs] + if len(include_dirs) > 0: + filelist_headers.extend([ + { + "path": h, + "type": "public", + } + for h in exported_preprocessor.raw_headers + if lazy.is_any(lambda d: h.short_path.startswith(d), include_dirs) + ]) + + filelist_contents = { + "headers": filelist_headers, + "version": "2", + } + filelist = ctx.actions.write_json( + paths.join("__tbd__", ctx.attrs.name + "_exported_headers.json"), + filelist_contents, + with_inputs = True, + ) + + # Run the shlib interface tool with the filelist and required args + output_file = ctx.actions.declare_output( + paths.join("__tbd__", ctx.attrs.name + ".exported_symbols.txt"), + ) + args = cmd_args(get_cxx_toolchain_info(ctx).linker_info.mk_shlib_intf[RunInfo]) + args.add([ + "installapi", + "--filelist", + filelist, + "-o", + output_file.as_output(), + "--target", + get_target_triple(ctx), + ]) + args.add(cmd_args(compiler_info.preprocessor_flags, prepend = "-Xparser")) + args.add(cmd_args(compiler_info.compiler_flags, prepend = "-Xparser")) + args.add(cmd_args(exported_preprocessor.args.args, prepend = "-Xparser")) + for ppinfo in transitive_preprocessor: + args.add(cmd_args(ppinfo.set.project_as_args("args"), prepend = "-Xparser")) + args.add(cmd_args(ppinfo.set.project_as_args("include_dirs"), prepend = "-Xparser")) + + # We need the targets compiler flags to pick up base flags that are applied + # in the macros instead of the toolchain for historical reasons. + args.add(cmd_args(ctx.attrs.compiler_flags, prepend = "-Xparser")) + + ctx.actions.run( + args, + category = "exported_symbols", + identifier = ctx.attrs.name, + ) + + return output_file + +def generate_tbd_with_symbols(ctx: AnalysisContext, soname: str, exported_symbol_inputs: ArtifactTSet, links: list[ArgLike]) -> Artifact: + # Use arglists for the inputs, otherwise we will overflow ARGMAX + symbol_args = project_artifacts(ctx.actions, [exported_symbol_inputs]) + input_argfile, _ = ctx.actions.write("__tbd__/" + ctx.attrs.name + ".symbols.filelist", symbol_args, allow_args = True) + + # Run the shlib interface tool with the merge command + tbd_file = ctx.actions.declare_output( + paths.join("__tbd__", ctx.attrs.name + ".merged.tbd"), + ) + args = cmd_args( + get_cxx_toolchain_info(ctx).linker_info.mk_shlib_intf[RunInfo], + "merge", + "-install_name", + "@rpath/" + soname, + "--symbols-filelist", + input_argfile, + "--target", + get_target_triple(ctx), + "-o", + tbd_file.as_output(), + hidden = symbol_args, + ) + + # Pass through the linker args as we need to honour any flags + # related to exported or unexported symbols. + for link_args in links: + args.add(cmd_args(link_args, prepend = "-Xparser")) + + ctx.actions.run( + args, + category = "generate_tbd", + identifier = ctx.attrs.name, + ) + return tbd_file + +def create_shared_interface_info(ctx: AnalysisContext, symbol_artifacts: list[Artifact], deps: list[Dependency]) -> [SharedInterfaceInfo, None]: + children = [d[SharedInterfaceInfo].interfaces for d in deps if SharedInterfaceInfo in d] + if len(symbol_artifacts) == 0 and len(children) == 0: + return None + + return SharedInterfaceInfo( + interfaces = make_artifact_tset( + actions = ctx.actions, + label = ctx.label, + artifacts = symbol_artifacts, + children = children, + ), + ) + +def create_shared_interface_info_with_children(ctx: AnalysisContext, symbol_artifacts: list[Artifact], children: list[SharedInterfaceInfo]) -> [SharedInterfaceInfo, None]: + children = [d.interfaces for d in children] + if len(symbol_artifacts) == 0 and len(children) == 0: + return None + + return SharedInterfaceInfo( + interfaces = make_artifact_tset( + actions = ctx.actions, + label = ctx.label, + artifacts = symbol_artifacts, + children = children, + ), + ) diff --git a/prelude/cxx/symbols.bzl b/prelude/cxx/symbols.bzl index 5204b5a490f21..dbd8ca84c9d36 100644 --- a/prelude/cxx/symbols.bzl +++ b/prelude/cxx/symbols.bzl @@ -6,7 +6,13 @@ # of this source tree. load("@prelude//:paths.bzl", "paths") -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", + "LinkerType", +) +load("@prelude//cxx:cxx_utility.bzl", "cxx_attrs_get_allow_cache_upload") +load("@prelude//os_lookup:defs.bzl", "OsLookup") def _extract_symbol_names( ctx: AnalysisContext, @@ -15,6 +21,7 @@ def _extract_symbol_names( objects: list[Artifact], category: str, identifier: [str, None] = None, + defined_only: bool = False, undefined_only: bool = False, dynamic: bool = False, prefer_local: bool = False, @@ -29,6 +36,9 @@ def _extract_symbol_names( if not objects: fail("no objects provided") + if defined_only and undefined_only: + fail("only one of defined_only and undefined_only should be True") + nm = cxx_toolchain.binary_utilities_info.nm output = ctx.actions.declare_output(paths.join("__symbols__", name)) @@ -42,29 +52,73 @@ def _extract_symbol_names( nm_flags += "u" # darwin objects don't have dynamic symbol tables. - if dynamic and cxx_toolchain.linker_info.type != "darwin": + if dynamic and cxx_toolchain.linker_info.type != LinkerType("darwin"): nm_flags += "D" - script = ( - "set -euo pipefail; " + - '"$1" {} "${{@:2}}"'.format(nm_flags) + - # Grab only the symbol name field. - ' | cut -d" " -f2 ' + - # Strip off ABI Version (@...) when using llvm-nm to keep compat with buck1 - " | cut -d@ -f1 " + - # Sort and dedup symbols. Use the `C` locale and do it in-memory to - # make it significantly faster. CAUTION: if ten of these processes - # run in parallel, they'll have cumulative allocations larger than RAM. - " | LC_ALL=C sort -S 10% -u > {}" - ) + # llvm-nm supports -U for this but gnu nm doesn't. + if defined_only: + nm_flags += " --defined-only" - ctx.actions.run( - [ + is_windows = hasattr(ctx.attrs, "_exec_os_type") and ctx.attrs._exec_os_type[OsLookup].platform == "windows" + + if is_windows: + script = ( + """& {{ + $result = & $args[0] {} $($args[1..($args.Length-1)] -join " ") + $lines = $result -split '`n' + $lines = $lines | ForEach-Object {{ ($_ -split ' ')[1] }} + $lines = $lines | ForEach-Object {{ ($_ -split '@')[0] }} + $lines = $lines | Where-Object {{ $_ -notmatch '__odr_asan_gen_.*' }} + $lines = $lines | Sort-Object -Unique + # Avoid a trailing newline for empty symbol lists + if ($lines.count -eq 0) {{ + [IO.File]::WriteAllText('{{}}', $lines) + }} else {{ + [IO.File]::WriteAllLines('{{}}', $lines) + }} + }}""".format(nm_flags) + ) + symbol_extraction_args = [ + "powershell", + "-Command", + cmd_args(output.as_output(), format = script), + ] + else: + script = ( + "set -euo pipefail; " + + '"$1" {} "${{@:2}}"'.format(nm_flags) + + # Grab only the symbol name field. + ' | cut -d" " -f2 ' + + # Strip off ABI Version (@...) when using llvm-nm to keep compat with buck1 + " | cut -d@ -f1 " + + # Remove ASAN ODR generated symbols: __odr_asan_gen_*. They are + # handled by a separate asan_dynamic_list.txt list of asan patterns. + # BUT MORE IMPORTANTLY, symbols like __odr_asan_XXX[abi:cxx11] force + # lld into a code path that repeatedly does a linear scan of all + # symbols for O(num_patterns_with_bracket * num_symbols) (because of + # the [] being treated as a glob pattern). This totally tanks link + # time for builds with sanitizers! Anecdotally, a binary with 3.7M + # symbols and 2K __odr_asan_XXX[abi:cxx11] can spend 6 mins + # processing patterns and 10s actually linking. We use sed instead + # of grep -v here to avoid an error exit code when there's no input + # symbols, which is not an error for us. + ' | sed "/__odr_asan_gen_.*/d"' + + # Sort and dedup symbols. Use the `C` locale and do it in-memory to + # make it significantly faster. CAUTION: if ten of these processes + # run in parallel, they'll have cumulative allocations larger than RAM. + " | LC_ALL=C sort -S 10% -u > {}" + ) + symbol_extraction_args = [ "/usr/bin/env", "bash", "-c", cmd_args(output.as_output(), format = script), "", + ] + + ctx.actions.run( + symbol_extraction_args + + [ nm, ] + objects, @@ -75,6 +129,7 @@ def _extract_symbol_names( weight_percentage = 15, # 10% + a little padding allow_cache_upload = allow_cache_upload, ) + return output _SymbolsInfo = provider(fields = { @@ -94,7 +149,7 @@ def _anon_extract_symbol_names_impl(ctx): objects = ctx.attrs.objects, prefer_local = ctx.attrs.prefer_local, undefined_only = ctx.attrs.undefined_only, - allow_cache_upload = ctx.attrs.allow_cache_upload, + allow_cache_upload = cxx_attrs_get_allow_cache_upload(ctx.attrs), ) return [DefaultInfo(), _SymbolsInfo(artifact = output)] @@ -141,7 +196,6 @@ def extract_symbol_names( output = name, **kwargs ), - with_artifacts = True, ).artifact("symbols") return ctx.actions.assert_short_path(artifact, short_path = paths.join("__symbols__", name)) @@ -153,6 +207,29 @@ def extract_symbol_names( **kwargs ) +def extract_defined_syms( + ctx: AnalysisContext, + cxx_toolchain: CxxToolchainInfo, + output: Artifact, + category_prefix: str, + prefer_local: bool = False, + anonymous: bool = False, + allow_cache_upload: bool = False) -> Artifact: + return extract_symbol_names( + ctx = ctx, + cxx_toolchain = cxx_toolchain, + name = output.short_path + ".defined_syms.txt", + objects = [output], + dynamic = True, + global_only = True, + defined_only = True, + category = "{}_defined_syms".format(category_prefix), + identifier = output.short_path, + prefer_local = prefer_local, + anonymous = anonymous, + allow_cache_upload = allow_cache_upload, + ) + def extract_undefined_syms( ctx: AnalysisContext, cxx_toolchain: CxxToolchainInfo, @@ -213,7 +290,7 @@ def _create_symbols_file_from_script( """ all_symbol_files = actions.write(name + ".symbols", symbol_files) - all_symbol_files = cmd_args(all_symbol_files).hidden(symbol_files) + all_symbol_files = cmd_args(all_symbol_files, hidden = symbol_files) output = actions.declare_output(name) cmd = [ "/usr/bin/env", @@ -241,7 +318,7 @@ def get_undefined_symbols_args( category: [str, None] = None, identifier: [str, None] = None, prefer_local: bool = False) -> cmd_args: - if cxx_toolchain.linker_info.type == "gnu": + if cxx_toolchain.linker_info.type == LinkerType("gnu"): # linker script is only supported in gnu linkers linker_script = create_undefined_symbols_linker_script( ctx.actions, diff --git a/prelude/cxx/target_sdk_version.bzl b/prelude/cxx/target_sdk_version.bzl new file mode 100644 index 0000000000000..df8c97ba445ba --- /dev/null +++ b/prelude/cxx/target_sdk_version.bzl @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cxx:cxx_context.bzl", "get_cxx_platform_info", "get_cxx_toolchain_info") + +def _version_is_greater(left: str, right: str) -> bool: + # Assumes version strings are in dotted format 1.2.4. + # After comparing components the longer remainder is + # considered larger. + left_components = left.split(".") + right_components = right.split(".") + for pair in zip(left_components, right_components): + x = int(pair[0]) + y = int(pair[1]) + if x < y: + return False + elif x > y: + return True + + return len(left_components) > len(right_components) + +def get_toolchain_target_sdk_version(ctx: AnalysisContext) -> [None, str]: + min_version = ctx.attrs.min_sdk_version + target_version = ctx.attrs.target_sdk_version + if min_version == None and target_version == None: + return None + elif min_version != None and target_version == None: + return min_version + elif min_version == None and target_version != None: + fail("Cannot set target_sdk_version without min_sdk_version") + elif _version_is_greater(min_version, target_version): + warning("Target SDK version {} is less than minimum supported version {}".format(target_version, min_version)) + return min_version + else: + return target_version + +def get_target_sdk_version(ctx: AnalysisContext) -> [None, str]: + toolchain_target_sdk_version = get_cxx_toolchain_info(ctx).target_sdk_version + target_sdk_version = getattr(ctx.attrs, "target_sdk_version", None) + if toolchain_target_sdk_version == None and target_sdk_version == None: + return None + elif toolchain_target_sdk_version != None and target_sdk_version == None: + return toolchain_target_sdk_version + elif toolchain_target_sdk_version == None and target_sdk_version != None: + return target_sdk_version + elif _version_is_greater(target_sdk_version, toolchain_target_sdk_version): + # The requested target_sdk_version on the toolchain must be >= + # the version set on the target, which should be the minimum + # allowed for this version to build. + fail("{} has target_sdk_version {}, which is larger than the toolchain target_sdk_version of {}".format( + ctx.label, + target_sdk_version, + toolchain_target_sdk_version, + )) + else: + return toolchain_target_sdk_version + +_PLATFORM_TARGET_TRIPLE_MAP = { + "appletvos": "{architecture}-apple-tvos{version}", + "appletvsimulator": "{architecture}-apple-tvos{version}-simulator", + "iphoneos": "{architecture}-apple-ios{version}", + "iphonesimulator": "{architecture}-apple-ios{version}-simulator", + "maccatalyst": "{architecture}-apple-ios{version}-macabi", + "macosx": "{architecture}-apple-macosx{version}", + "visionos": "{architecture}-apple-xros{version}", + "visionsimulator": "{architecture}-apple-xros{version}-simulator", + "watchos": "{architecture}-apple-watchos{version}", + "watchsimulator": "{architecture}-apple-watchos{version}-simulator", +} + +def _format_target_triple(ctx: AnalysisContext, version: str) -> str: + platform_info = get_cxx_platform_info(ctx) + platform_components = platform_info.name.split("-") + if platform_components[0] not in _PLATFORM_TARGET_TRIPLE_MAP: + fail("missing target triple for {}".format(platform_components[0])) + + triple_format_str = _PLATFORM_TARGET_TRIPLE_MAP[platform_components[0]] + return triple_format_str.format(architecture = platform_components[1], version = version) + +def get_target_triple(ctx: AnalysisContext) -> [None, str]: + target_sdk_version = get_target_sdk_version(ctx) + if target_sdk_version == None: + return None + + return _format_target_triple(ctx, target_sdk_version) + +def get_unversioned_target_triple(ctx: AnalysisContext) -> str: + return _format_target_triple(ctx, "") + +def get_target_sdk_version_flags(ctx: AnalysisContext) -> list[str]: + if not (hasattr(ctx.attrs, "_cxx_toolchain") or hasattr(ctx.attrs, "_apple_toolchain")): + return [] + + target_triple = get_target_triple(ctx) + if target_triple == None: + return [] + + return ["-target", target_triple] diff --git a/prelude/cxx/tools/BUCK b/prelude/cxx/tools/BUCK deleted file mode 100644 index 0eaa21b4115d9..0000000000000 --- a/prelude/cxx/tools/BUCK +++ /dev/null @@ -1,41 +0,0 @@ -load(":defs.bzl", "cxx_hacks") - -prelude = native - -prelude.python_bootstrap_binary( - name = "make_comp_db", - main = "make_comp_db.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "dep_file_processor", - main = "dep_file_processor.py", - deps = [ - ":dep_file_processors", - ], - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_library( - name = "dep_file_processors", - srcs = [ - "makefile_to_dep_file.py", - "show_headers_to_dep_file.py", - "show_includes_to_dep_file.py", - "dep_file_utils.py", - ], - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "linker_wrapper", - main = "linker_wrapper.py", - visibility = ["PUBLIC"], -) - -# Required to support the $(cxx-header-tree) macro -cxx_hacks( - name = "cxx_hacks", - visibility = ["PUBLIC"], -) diff --git a/prelude/cxx/tools/BUCK.v2 b/prelude/cxx/tools/BUCK.v2 new file mode 100644 index 0000000000000..8c4f47914b8e8 --- /dev/null +++ b/prelude/cxx/tools/BUCK.v2 @@ -0,0 +1,86 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load(":defs.bzl", "cxx_hacks", "cxx_internal_tools") + +oncall("build_infra") + +source_listing() + +prelude = native + +cxx_internal_tools( + name = "internal_tools", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "hmap_wrapper.py", + main = "hmap_wrapper.py", +) + +prelude.command_alias( + name = "hmap_wrapper", + args = [ + "--hmap-tool=$(exe_target prelude//third-party/hmaptool:hmaptool)", + ], + exe = ":hmap_wrapper.py", + labels = ["buck2-only"], +) + +prelude.python_bootstrap_binary( + name = "make_comp_db", + main = "make_comp_db.py", +) + +prelude.python_bootstrap_binary( + name = "dep_file_processor", + main = "dep_file_processor.py", + visibility = ["PUBLIC"], + deps = [ + ":dep_file_processors", + ], +) + +prelude.python_bootstrap_library( + name = "dep_file_processors", + srcs = [ + "dep_file_utils.py", + "makefile_to_dep_file.py", + "show_headers_to_dep_file.py", + "show_includes_to_dep_file.py", + ], +) + +prelude.python_bootstrap_binary( + name = "linker_wrapper", + main = "linker_wrapper.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "remap_cwd", + main = "remap_cwd.py", +) + +prelude.python_bootstrap_binary( + name = "simple_ar", + main = "simple_ar.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "concatenate_diagnostics", + main = "concatenate_diagnostics.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "stderr_to_file", + main = "stderr_to_file.py", + visibility = ["PUBLIC"], +) + +# Required to support the $(cxx-header-tree) macro +cxx_hacks( + name = "cxx_hacks", + visibility = ["PUBLIC"], +) diff --git a/prelude/cxx/tools/concatenate_diagnostics.py b/prelude/cxx/tools/concatenate_diagnostics.py new file mode 100755 index 0000000000000..d30c2cba2a685 --- /dev/null +++ b/prelude/cxx/tools/concatenate_diagnostics.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Usage: concatenate_diagnostics.py --out path/to/output.txt [path/to/input.txt...] +""" + +import argparse +from pathlib import Path +from typing import List, NamedTuple + + +class Args(NamedTuple): + out: Path + subtarget_diagnostics: List[Path] + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--out", type=Path, required=True) + parser.add_argument("subtarget_diagnostics", nargs="*", type=Path) + args = Args(**vars(parser.parse_args())) + + needs_blank_line = False + with open(args.out, "wb") as out: + for f in args.subtarget_diagnostics: + with open(f, "rb") as f: + content = f.read() + if len(content) == 0: + continue + if needs_blank_line: + out.write(b"\n") + out.write(content) + needs_blank_line = True + + +if __name__ == "__main__": + main() diff --git a/prelude/cxx/tools/defs.bzl b/prelude/cxx/tools/defs.bzl index fd4d9c4193496..aac498141d97e 100644 --- a/prelude/cxx/tools/defs.bzl +++ b/prelude/cxx/tools/defs.bzl @@ -5,6 +5,35 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxInternalTools", "DistLtoToolsInfo") + +def _cxx_internal_tools_impl(ctx: AnalysisContext) -> list[Provider]: + return [ + DefaultInfo(), + CxxInternalTools( + concatenate_diagnostics = ctx.attrs.concatenate_diagnostics[RunInfo], + dep_file_processor = ctx.attrs.dep_file_processor[RunInfo], + dist_lto = ctx.attrs.dist_lto[DistLtoToolsInfo], + hmap_wrapper = ctx.attrs.hmap_wrapper[RunInfo], + make_comp_db = ctx.attrs.make_comp_db[RunInfo], + remap_cwd = ctx.attrs.remap_cwd[RunInfo], + stderr_to_file = ctx.attrs.stderr_to_file[RunInfo], + ), + ] + +cxx_internal_tools = rule( + impl = _cxx_internal_tools_impl, + attrs = { + "concatenate_diagnostics": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//cxx/tools:concatenate_diagnostics")), + "dep_file_processor": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//cxx/tools:dep_file_processor")), + "dist_lto": attrs.default_only(attrs.dep(providers = [DistLtoToolsInfo], default = "prelude//cxx/dist_lto/tools:dist_lto_tools")), + "hmap_wrapper": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//cxx/tools:hmap_wrapper")), + "make_comp_db": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//cxx/tools:make_comp_db")), + "remap_cwd": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//cxx/tools:remap_cwd")), + "stderr_to_file": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//cxx/tools:stderr_to_file")), + }, +) + def _cxx_hacks_impl(_ctx): return [DefaultInfo(), TemplatePlaceholderInfo( unkeyed_variables = { diff --git a/prelude/cxx/tools/hmap_wrapper.py b/prelude/cxx/tools/hmap_wrapper.py new file mode 100755 index 0000000000000..3e04a7da0e747 --- /dev/null +++ b/prelude/cxx/tools/hmap_wrapper.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import itertools +import json +import os +import shlex +import subprocess +import sys +import tempfile + + +def main(argv): + parser = argparse.ArgumentParser() + parser.add_argument("--hmap-tool", required=True) + parser.add_argument("--output", required=True) + parser.add_argument("--mappings-file", required=True) + parser.add_argument("--project-root-file", required=False) + args = parser.parse_args(argv[1:]) + + with open(args.mappings_file, "r") as argsfile: + mapping_args = shlex.split(argsfile.read()) + + if len(mapping_args) % 2 != 0: + parser.error("mappings must be dest-source pairs") + + # Convert the hmap mappings passed on the command line to a dict. + mappings = {} + for src, dst in itertools.zip_longest(*([iter(mapping_args)] * 2)): + mappings[src] = dst + + # NOTE(agallagher): Add a mapping from the mapped path to itself. If + # this is not present, clang will use the mapped path as the new key + # and continue searching subsequent header maps, which has a couple + # implications: a) it's slower, as we still search every header map and + # b) it means we need to use a `-I` anchor to finally terminate the + # search. + mappings[dst] = dst + + # Write out the mappings to a JSON file that LLVM's hmaptool accepts. + with tempfile.TemporaryDirectory() as td: + output_filename = os.path.join(td, "output") + with open(output_filename, mode="w") as tf: + json.dump({"mappings": mappings}, tf, sort_keys=True, indent=2) + + # Delegate to LLVM's hmaptool to generate the hmap. + subprocess.check_call( + [sys.executable, args.hmap_tool, "write", output_filename, args.output] + ) + + +sys.exit(main(sys.argv)) diff --git a/prelude/cxx/tools/make_comp_db.py b/prelude/cxx/tools/make_comp_db.py index 7c41b93ef4563..fa31b5f17b94b 100755 --- a/prelude/cxx/tools/make_comp_db.py +++ b/prelude/cxx/tools/make_comp_db.py @@ -20,9 +20,29 @@ import json import shlex import sys +from typing import List -def gen(args): +def process_arguments(arguments: List[str]) -> List[str]: + """ + Process arguments to expand argsfiles. + """ + + combined_arguments = [] + for arg in arguments: + if arg.startswith("@"): + with open(arg[1:]) as argsfile: + # The argsfile's arguments are separated by newlines; we + # don't want those included in the argument list. + lines = [" ".join(shlex.split(line)) for line in argsfile.readlines()] + # Support nested argsfiles. + combined_arguments.extend(process_arguments(lines)) + else: + combined_arguments.append(arg) + return combined_arguments + + +def gen(args: argparse.Namespace) -> None: """ Generate a single compilation command in JSON form. """ @@ -30,24 +50,13 @@ def gen(args): entry = {} entry["file"] = args.directory + "/" + args.filename entry["directory"] = "." - - arguments = [] - for arg in args.arguments: - if arg.startswith("@"): - with open(arg[1:]) as argsfile: - for line in argsfile: - # The argsfile's arguments are separated by newlines; we - # don't want those included in the argument list. - arguments.append(" ".join(shlex.split(line))) - else: - arguments.append(arg) - entry["arguments"] = arguments + entry["arguments"] = process_arguments(args.arguments) json.dump(entry, args.output, indent=2) args.output.close() -def merge(args): +def merge(args: argparse.Namespace) -> None: """ Merge multiple compilation DB commands into a single DB. """ @@ -69,7 +78,7 @@ def merge(args): args.output.close() -def main(argv): +def main(argv: List[str]) -> int: parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() @@ -89,6 +98,7 @@ def main(argv): args = parser.parse_args(argv[1:]) args.func(args) + return 0 sys.exit(main(sys.argv)) diff --git a/prelude/cxx/tools/makefile_to_dep_file.py b/prelude/cxx/tools/makefile_to_dep_file.py index ec173cac3295f..edf66fc74dc01 100755 --- a/prelude/cxx/tools/makefile_to_dep_file.py +++ b/prelude/cxx/tools/makefile_to_dep_file.py @@ -8,7 +8,6 @@ # pyre-unsafe -import os import subprocess import sys @@ -88,7 +87,7 @@ def process_dep_file(args): Expects the src dep file to be the first argument, dst dep file to be the second argument, and the command to follow. """ - ret = subprocess.call(args[2:]) + ret = subprocess.call(args[2:], stdin=subprocess.DEVNULL) if ret == 0: rewrite_dep_file(args[0], args[1]) sys.exit(ret) diff --git a/prelude/cxx/tools/remap_cwd.py b/prelude/cxx/tools/remap_cwd.py new file mode 100755 index 0000000000000..b2b75689ba71c --- /dev/null +++ b/prelude/cxx/tools/remap_cwd.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Usage: remap_cwd.py path/to/clang++ [args...] + +Runs `path/to/clang++ -ffile-prefix-map=$PWD= [args...]` +""" + +import os +import subprocess +import sys + + +if __name__ == "__main__": + cwd = os.getcwd() + # Add trailing slash + cwd = os.path.join(cwd, "") + + ret = subprocess.call( + [ + sys.argv[1], + f"-ffile-prefix-map={cwd}=", + *sys.argv[2:], + ], + ) + sys.exit(ret) diff --git a/prelude/cxx/tools/show_headers_to_dep_file.py b/prelude/cxx/tools/show_headers_to_dep_file.py index b2bf4900e82a0..037eb249374f1 100644 --- a/prelude/cxx/tools/show_headers_to_dep_file.py +++ b/prelude/cxx/tools/show_headers_to_dep_file.py @@ -8,12 +8,14 @@ # pyre-unsafe +import re import sys from subprocess import PIPE, run import dep_file_utils + # output_path -> path to write the dep file to # cmd_args -> command to be run to get dependencies from compiler # input_file -> Path to the file we're generating the dep file for. We need this since @@ -21,12 +23,11 @@ # the file itself, so we need the path to add it manually def process_show_headers_dep_file(output_path, cmd_args, input_file): ret = run(cmd_args, stderr=PIPE, encoding="utf-8") - if ret.returncode == 0: - parse_into_dep_file(ret.stderr, output_path, input_file) + parse_into_dep_file(ret.stderr, output_path, input_file, ret.returncode) sys.exit(ret.returncode) -def parse_into_dep_file(output, dst_path, input_file): +def parse_into_dep_file(output, dst_path, input_file, returncode): """ Convert stderr generated by clang to dep file. This will be a mix of output like: @@ -45,17 +46,24 @@ def parse_into_dep_file(output, dst_path, input_file): lines = output.splitlines() - deps = [] - for line in lines: - if line.startswith("."): - path = remove_leading_dots(line.replace(" ", "")) - if len(path) > 0: - deps.append(path.strip()) + if returncode == 0: + deps = [] + for line in lines: + if line.startswith("."): + path = remove_leading_dots(line.replace(" ", "")) + if len(path) > 0: + deps.append(path.strip()) + continue + print(line, file=sys.stderr) # This was a warning/error + + deps.append(input_file) + dep_file_utils.normalize_and_write_deps(deps, dst_path) + else: + for line in lines: + if re.match(r"^\.+ ", line): continue - print(line, file=sys.stderr) # This was a warning/error - deps.append(input_file) - dep_file_utils.normalize_and_write_deps(deps, dst_path) + print(line, file=sys.stderr) def remove_leading_dots(s): diff --git a/prelude/cxx/tools/show_includes_to_dep_file.py b/prelude/cxx/tools/show_includes_to_dep_file.py index a525789a98c8d..ff25b332761fb 100644 --- a/prelude/cxx/tools/show_includes_to_dep_file.py +++ b/prelude/cxx/tools/show_includes_to_dep_file.py @@ -11,6 +11,8 @@ import dep_file_utils DEP_PREFIX = "Note: including file:" + + # output_path -> path to write the dep field to # cmd_args -> command to be run to get dependencies from compiler # source_file -> Path to the file we're generating the dep file for. We need this since diff --git a/prelude/cxx/tools/simple_ar.py b/prelude/cxx/tools/simple_ar.py new file mode 100755 index 0000000000000..c6ae5a75361f6 --- /dev/null +++ b/prelude/cxx/tools/simple_ar.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +For certain platform and linker versions (e.g. LLD 15+ for ELF), the +linker ignores the archive symbol table and accesses archive members +directly. When combined with thin archives, this produces trivial +archive files just embedding object paths, but ar (both GNU and LLVM) +still requires access to the object files to produce the archive, +resulting in unnecessary downloads and RE traffic. This implementation +instead embeds the paths directly without needing the actual files +present. The trade-offs are: +- Any problems in the object files will be detected at link time instead + of archive creation time. This should be very rare though. +- Since we can't access the object files, we store their sizes as zero + instead of the actual file size in the archive member headers. LLD for + ELF handles this correctly but I can't speak to other linkers. +""" + +import argparse +import os.path +import typing as t +from pathlib import Path + + +class ThinArchive: + MAGIC = b"!\n" + + def __init__(self, inputs: t.Sequence[Path], output: Path) -> None: + self._output = output + # llvm-ar always uses the long name member, and we follow suit for simplicity. + self._create_name_data(inputs, output.parent) + + def write(self) -> None: + with self._output.open("wb") as archive: + archive.write(self.MAGIC) + self._write_member_header( + archive, + name="//", + mtime="", + owner_id="", + group_id="", + mode="", + size=len(self._name_data), + ) + archive.write(self._name_data) + + for offset in self._name_offsets: + self._write_member_header( + archive, + name=f"/{offset}", + mtime="0", + owner_id="0", + group_id="0", + mode="644", + size=0, # as discussed in the file docblock + ) + + def _create_name_data(self, inputs: t.Sequence[Path], output_dir: Path) -> None: + self._name_data = bytearray() + self._name_offsets = [] + for input_path in inputs: + # Paths are stored relative to the archive. We use os.path.relpath instead + # of Pathlib.relative_to because the latter requires a common root. We use + # forward slashes everywhere for consistency and to mimic llvm-ar. + relative_path = Path(os.path.relpath(input_path, output_dir)).as_posix() + encoded = (relative_path + "/\n").encode() # add terminator + self._name_offsets.append(len(self._name_data)) + self._name_data.extend(encoded) + + if len(self._name_data) % 2 != 0: + self._name_data.extend(b"\n") # pad to an even size + + def _write_member_header( + self, + archive: t.BinaryIO, + *, + name: str, + mtime: str, + owner_id: str, + group_id: str, + mode: str, + size: int, + ) -> None: + # https://en.wikipedia.org/wiki/Ar_(Unix)#File_header + archive.write(self._encode_header_field(name, 16)) + archive.write(self._encode_header_field(mtime, 12)) + archive.write(self._encode_header_field(owner_id, 6)) + archive.write(self._encode_header_field(group_id, 6)) + archive.write(self._encode_header_field(mode, 8)) + archive.write(self._encode_header_field(str(size), 10)) + archive.write(b"`\n") # ending characters + + def _encode_header_field(self, value: str, length: int) -> bytes: + encoded = value.encode() + padding = length - len(encoded) + if padding < 0: + raise ValueError(f"Encoding of {str} is larger than {length} bytes") + + return encoded + b" " * padding + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Simple thin archive creator", fromfile_prefix_chars="@" + ) + parser.add_argument("modifiers", help="Operation and modifiers (limited support)") + parser.add_argument("output", type=Path, help="The output file") + parser.add_argument("inputs", nargs="+", help="The input files") + args = parser.parse_args() + + if args.output.exists(): + raise ValueError("Appending to an existing archive is unsupported") + + thin = False + for modifier in args.modifiers: + if modifier == "s": + raise ValueError("Archive symbol tables are unsupported") + elif modifier == "T": + thin = True + elif modifier not in "qcSD": + raise ValueError(f"Unsupported operation or modifier {modifier}") + + if not thin: + raise ValueError("Only thin archives are supported") + + # Strip any leading or trailing quotes (present in Windows argsfiles) + inputs = [Path(p.lstrip('"').rstrip('"')) for p in args.inputs] + archive = ThinArchive(inputs, args.output) + archive.write() + + +if __name__ == "__main__": + main() diff --git a/prelude/cxx/tools/stderr_to_file.py b/prelude/cxx/tools/stderr_to_file.py new file mode 100755 index 0000000000000..1e11ee5d02666 --- /dev/null +++ b/prelude/cxx/tools/stderr_to_file.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Usage: stderr_to_file.py --out=path/to/output path/to/clang++ [args...] +""" + +import argparse +import asyncio +import signal +import subprocess +import sys +from pathlib import Path +from typing import List, NamedTuple + + +# Exit code of `bash -c 'sleep 100'` +_INTERRUPTED = 128 + signal.SIGINT.value + + +class Args(NamedTuple): + out: Path + command: List[str] + + +class SubprocessProtocol(asyncio.SubprocessProtocol): + """Write subprocess stderr to both self.out and sys.stderr""" + + def __init__(self, out, exit_future): + self.out = out + self.exit_future = exit_future + self.pipe_closed = False + self.exited = False + + def pipe_data_received(self, fd, data): + if fd == sys.stderr.fileno(): + # Blocking write to file. This is buffered in a Python + # io.BufferedRandom. + self.out.write(data) + # Blocking unbuffered write to stderr. Our writes will be exactly as + # buffered as the subprocess's writes. + sys.stderr.buffer.write(data) + sys.stderr.flush() + + def pipe_connection_lost(self, fd, exc): + if fd == sys.stderr.fileno(): + self.pipe_closed = True + # Either of pipe_connection_lost() or process_exited() can be called + # before the other. Wait until both methods are called. + self._check_for_exit() + + def process_exited(self): + self.exited = True + # Either of pipe_connection_lost() or process_exited() can be called + # before the other. Wait until both methods are called. + self._check_for_exit() + + def _check_for_exit(self): + if self.pipe_closed and self.exited: + try: + self.exit_future.set_result(True) + except asyncio.InvalidStateError: + # Event loop has shut down. + pass + + +async def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--out", type=Path, required=True) + parser.add_argument("command", nargs=argparse.REMAINDER) + args = Args(**vars(parser.parse_args())) + + loop = asyncio.get_running_loop() + exit_future = asyncio.Future(loop=loop) + + with open(args.out, "wb+") as out: + transport, protocol = await loop.subprocess_exec( + lambda: SubprocessProtocol(out, exit_future), + *args.command, + stdin=None, # inherit + stdout=None, # inherit + stderr=subprocess.PIPE, + ) + await exit_future + transport.close() + + returncode = transport.get_returncode() + if returncode is None: + return _INTERRUPTED + else: + return returncode + + +try: + sys.exit(asyncio.run(main())) +except KeyboardInterrupt: + sys.exit(_INTERRUPTED) diff --git a/prelude/cxx/user/cxx_toolchain_override.bzl b/prelude/cxx/user/cxx_toolchain_override.bzl index 1cc4cd7262902..c86fb1c93c09d 100644 --- a/prelude/cxx/user/cxx_toolchain_override.bzl +++ b/prelude/cxx/user/cxx_toolchain_override.bzl @@ -5,18 +5,37 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx:cxx_toolchain_types.bzl", "AsCompilerInfo", "AsmCompilerInfo", "BinaryUtilitiesInfo", "CCompilerInfo", "CxxCompilerInfo", "CxxObjectFormat", "CxxPlatformInfo", "CxxToolchainInfo", "LinkerInfo", "LinkerType", "PicBehavior", "ShlibInterfacesMode", "StripFlagsInfo", "cxx_toolchain_infos") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "AsCompilerInfo", + "AsmCompilerInfo", + "BinaryUtilitiesInfo", + "CCompilerInfo", + "CxxCompilerInfo", + "CxxInternalTools", + "CxxObjectFormat", + "CxxPlatformInfo", + "CxxToolchainInfo", + "LinkerInfo", + "LinkerType", + "PicBehavior", + "ShlibInterfacesMode", + "StripFlagsInfo", + "cxx_toolchain_infos", +) +load("@prelude//cxx:cxx_utility.bzl", "cxx_toolchain_allow_cache_upload_args") load("@prelude//cxx:debug.bzl", "SplitDebugMode") load("@prelude//cxx:headers.bzl", "HeaderMode") load("@prelude//cxx:linker.bzl", "is_pdb_generated") +load("@prelude//cxx:target_sdk_version.bzl", "get_toolchain_target_sdk_version") load( "@prelude//linking:link_info.bzl", "LinkStyle", ) load("@prelude//linking:lto.bzl", "LtoMode") load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -load("@prelude//utils:pick.bzl", _pick = "pick", _pick_and_add = "pick_and_add", _pick_bin = "pick_bin", _pick_dep = "pick_dep") -load("@prelude//utils:utils.bzl", "map_val", "value_or") +load("@prelude//utils:pick.bzl", _pick = "pick", _pick_and_add = "pick_and_add", _pick_bin = "pick_bin", _pick_dep = "pick_dep", _pick_raw = "pick_raw") +load("@prelude//utils:utils.bzl", "flatten", "map_val", "value_or") def _cxx_toolchain_override(ctx): base_toolchain = ctx.attrs.base[CxxToolchainInfo] @@ -30,18 +49,16 @@ def _cxx_toolchain_override(ctx): preprocessor = _pick_bin(ctx.attrs.as_compiler, base_as_info.preprocessor), preprocessor_type = base_as_info.preprocessor_type, preprocessor_flags = _pick(ctx.attrs.as_preprocessor_flags, base_as_info.preprocessor_flags), - dep_files_processor = base_as_info.dep_files_processor, ) asm_info = base_toolchain.asm_compiler_info if asm_info != None: asm_info = AsmCompilerInfo( compiler = _pick_bin(ctx.attrs.asm_compiler, asm_info.compiler), - compiler_type = asm_info.compiler_type, + compiler_type = _pick_raw(ctx.attrs.asm_compiler_type, asm_info.compiler_type), compiler_flags = _pick(ctx.attrs.asm_compiler_flags, asm_info.compiler_flags), preprocessor = _pick_bin(ctx.attrs.asm_compiler, asm_info.preprocessor), preprocessor_type = asm_info.preprocessor_type, preprocessor_flags = _pick(ctx.attrs.asm_preprocessor_flags, asm_info.preprocessor_flags), - dep_files_processor = asm_info.dep_files_processor, ) base_c_info = base_toolchain.c_compiler_info c_info = CCompilerInfo( @@ -51,7 +68,7 @@ def _cxx_toolchain_override(ctx): preprocessor = _pick_bin(ctx.attrs.c_compiler, base_c_info.preprocessor), preprocessor_type = base_c_info.preprocessor_type, preprocessor_flags = _pick(ctx.attrs.c_preprocessor_flags, base_c_info.preprocessor_flags), - dep_files_processor = base_c_info.dep_files_processor, + allow_cache_upload = _pick_raw(ctx.attrs.c_compiler_allow_cache_upload, base_c_info.allow_cache_upload), ) base_cxx_info = base_toolchain.cxx_compiler_info cxx_info = CxxCompilerInfo( @@ -61,20 +78,21 @@ def _cxx_toolchain_override(ctx): preprocessor = _pick_bin(ctx.attrs.cxx_compiler, base_cxx_info.preprocessor), preprocessor_type = base_cxx_info.preprocessor_type, preprocessor_flags = _pick(ctx.attrs.cxx_preprocessor_flags, base_cxx_info.preprocessor_flags), - dep_files_processor = base_cxx_info.dep_files_processor, + allow_cache_upload = _pick_raw(ctx.attrs.cxx_compiler_allow_cache_upload, base_cxx_info.allow_cache_upload), ) base_linker_info = base_toolchain.linker_info - linker_type = ctx.attrs.linker_type if ctx.attrs.linker_type != None else base_linker_info.type + linker_type = LinkerType(ctx.attrs.linker_type) if ctx.attrs.linker_type != None else base_linker_info.type pdb_expected = is_pdb_generated(linker_type, ctx.attrs.linker_flags) if ctx.attrs.linker_flags != None else base_linker_info.is_pdb_generated - # This handles case when linker type is overriden to non-windows from + # This handles case when linker type is overridden to non-windows from # windows but linker flags are inherited. # When it's changed from non-windows to windows but flags are not changed, # we can't inspect base linker flags and disable PDB subtargets. # This shouldn't be a problem because to use windows linker after non-windows # linker flags should be changed as well. - pdb_expected = linker_type == "windows" and pdb_expected + pdb_expected = linker_type == LinkerType("windows") and pdb_expected shlib_interfaces = ShlibInterfacesMode(ctx.attrs.shared_library_interface_mode) if ctx.attrs.shared_library_interface_mode else None + sanitizer_runtime_files = flatten([runtime_file[DefaultInfo].default_outputs for runtime_file in ctx.attrs.sanitizer_runtime_files]) if ctx.attrs.sanitizer_runtime_files != None else None linker_info = LinkerInfo( archiver = _pick_bin(ctx.attrs.archiver, base_linker_info.archiver), archiver_type = base_linker_info.archiver_type, @@ -90,6 +108,7 @@ def _cxx_toolchain_override(ctx): link_ordering = base_linker_info.link_ordering, linker = _pick_bin(ctx.attrs.linker, base_linker_info.linker), linker_flags = _pick(ctx.attrs.linker_flags, base_linker_info.linker_flags), + post_linker_flags = _pick(ctx.attrs.post_linker_flags, base_linker_info.post_linker_flags), lto_mode = value_or(map_val(LtoMode, ctx.attrs.lto_mode), base_linker_info.lto_mode), object_file_extension = base_linker_info.object_file_extension, shlib_interfaces = value_or(shlib_interfaces, base_linker_info.shlib_interfaces), @@ -98,6 +117,8 @@ def _cxx_toolchain_override(ctx): requires_objects = base_linker_info.requires_objects, supports_distributed_thinlto = base_linker_info.supports_distributed_thinlto, independent_shlib_interface_linker_flags = base_linker_info.independent_shlib_interface_linker_flags, + sanitizer_runtime_enabled = value_or(ctx.attrs.sanitizer_runtime_enabled, base_linker_info.sanitizer_runtime_enabled), + sanitizer_runtime_files = value_or(sanitizer_runtime_files, base_linker_info.sanitizer_runtime_files), shared_dep_runtime_ld_flags = [], shared_library_name_default_prefix = ctx.attrs.shared_library_name_default_prefix if ctx.attrs.shared_library_name_default_prefix != None else base_linker_info.shared_library_name_default_prefix, shared_library_name_format = ctx.attrs.shared_library_name_format if ctx.attrs.shared_library_name_format != None else base_linker_info.shared_library_name_format, @@ -109,13 +130,13 @@ def _cxx_toolchain_override(ctx): use_archiver_flags = value_or(ctx.attrs.use_archiver_flags, base_linker_info.use_archiver_flags), force_full_hybrid_if_capable = value_or(ctx.attrs.force_full_hybrid_if_capable, base_linker_info.force_full_hybrid_if_capable), is_pdb_generated = pdb_expected, - produce_interface_from_stub_shared_library = value_or(ctx.attrs.produce_interface_from_stub_shared_library, base_linker_info.produce_interface_from_stub_shared_library), ) base_binary_utilities_info = base_toolchain.binary_utilities_info binary_utilities_info = BinaryUtilitiesInfo( nm = _pick_bin(ctx.attrs.nm, base_binary_utilities_info.nm), objcopy = _pick_bin(ctx.attrs.objcopy, base_binary_utilities_info.objcopy), + objdump = _pick_bin(ctx.attrs.objdump, base_binary_utilities_info.objdump), ranlib = _pick_bin(ctx.attrs.ranlib, base_binary_utilities_info.ranlib), strip = _pick_bin(ctx.attrs.strip, base_binary_utilities_info.strip), dwp = base_binary_utilities_info.dwp, @@ -123,15 +144,19 @@ def _cxx_toolchain_override(ctx): ) base_strip_flags_info = base_toolchain.strip_flags_info - strip_flags_info = StripFlagsInfo( - strip_debug_flags = _pick(ctx.attrs.strip_debug_flags, base_strip_flags_info.strip_debug_flags), - strip_non_global_flags = _pick(ctx.attrs.strip_non_global_flags, base_strip_flags_info.strip_non_global_flags), - strip_all_flags = _pick(ctx.attrs.strip_all_flags, base_strip_flags_info.strip_all_flags), - ) + if base_strip_flags_info: + strip_flags_info = StripFlagsInfo( + strip_debug_flags = _pick(ctx.attrs.strip_debug_flags, base_strip_flags_info.strip_debug_flags), + strip_non_global_flags = _pick(ctx.attrs.strip_non_global_flags, base_strip_flags_info.strip_non_global_flags), + strip_all_flags = _pick(ctx.attrs.strip_all_flags, base_strip_flags_info.strip_all_flags), + ) + else: + strip_flags_info = None return [ DefaultInfo(), ] + cxx_toolchain_infos( + internal_tools = ctx.attrs._internal_tools[CxxInternalTools], platform_name = ctx.attrs.platform_name if ctx.attrs.platform_name != None else ctx.attrs.base[CxxPlatformInfo].name, platform_deps_aliases = ctx.attrs.platform_deps_aliases if ctx.attrs.platform_deps_aliases != None else [], linker_info = linker_info, @@ -147,40 +172,40 @@ def _cxx_toolchain_override(ctx): hip_compiler_info = base_toolchain.hip_compiler_info, header_mode = HeaderMode(ctx.attrs.header_mode) if ctx.attrs.header_mode != None else base_toolchain.header_mode, headers_as_raw_headers_mode = base_toolchain.headers_as_raw_headers_mode, - mk_comp_db = _pick_bin(ctx.attrs.mk_comp_db, base_toolchain.mk_comp_db), - mk_hmap = _pick_bin(ctx.attrs.mk_hmap, base_toolchain.mk_hmap), - dist_lto_tools_info = base_toolchain.dist_lto_tools_info, use_dep_files = base_toolchain.use_dep_files, clang_remarks = base_toolchain.clang_remarks, + gcno_files = base_toolchain.gcno_files, clang_trace = base_toolchain.clang_trace, object_format = CxxObjectFormat(ctx.attrs.object_format) if ctx.attrs.object_format != None else base_toolchain.object_format, conflicting_header_basename_allowlist = base_toolchain.conflicting_header_basename_allowlist, strip_flags_info = strip_flags_info, pic_behavior = PicBehavior(ctx.attrs.pic_behavior) if ctx.attrs.pic_behavior != None else base_toolchain.pic_behavior.value, split_debug_mode = SplitDebugMode(value_or(ctx.attrs.split_debug_mode, base_toolchain.split_debug_mode.value)), + target_sdk_version = value_or(get_toolchain_target_sdk_version(ctx), base_toolchain.target_sdk_version), ) -def _cxx_toolchain_override_inheriting_target_platform_attrs(is_toolchain_rule): - dep_type = attrs.exec_dep if is_toolchain_rule else attrs.dep - base_dep_type = attrs.toolchain_dep if is_toolchain_rule else attrs.dep - return { +cxx_toolchain_override_registration_spec = RuleRegistrationSpec( + name = "cxx_toolchain_override", + impl = _cxx_toolchain_override, + attrs = { "additional_c_compiler_flags": attrs.option(attrs.list(attrs.arg()), default = None), "additional_cxx_compiler_flags": attrs.option(attrs.list(attrs.arg()), default = None), "archive_objects_locally": attrs.option(attrs.bool(), default = None), - "archiver": attrs.option(dep_type(providers = [RunInfo]), default = None), + "archiver": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "archiver_supports_argfiles": attrs.option(attrs.bool(), default = None), - "as_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), + "as_compiler": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "as_compiler_flags": attrs.option(attrs.list(attrs.arg()), default = None), "as_preprocessor_flags": attrs.option(attrs.list(attrs.arg()), default = None), - "asm_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), + "asm_compiler": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "asm_compiler_flags": attrs.option(attrs.list(attrs.arg()), default = None), + "asm_compiler_type": attrs.option(attrs.string(), default = None), "asm_preprocessor_flags": attrs.option(attrs.list(attrs.arg()), default = None), - "base": base_dep_type(providers = [CxxToolchainInfo]), + "base": attrs.toolchain_dep(providers = [CxxToolchainInfo]), "bolt_enabled": attrs.option(attrs.bool(), default = None), - "c_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), + "c_compiler": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "c_compiler_flags": attrs.option(attrs.list(attrs.arg()), default = None), "c_preprocessor_flags": attrs.option(attrs.list(attrs.arg()), default = None), - "cxx_compiler": attrs.option(dep_type(providers = [RunInfo]), default = None), + "cxx_compiler": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "cxx_compiler_flags": attrs.option(attrs.list(attrs.arg()), default = None), "cxx_preprocessor_flags": attrs.option(attrs.list(attrs.arg()), default = None), "force_full_hybrid_if_capable": attrs.option(attrs.bool(), default = None), @@ -190,43 +215,37 @@ def _cxx_toolchain_override_inheriting_target_platform_attrs(is_toolchain_rule): "link_libraries_locally": attrs.option(attrs.bool(), default = None), "link_style": attrs.option(attrs.enum(LinkStyle.values()), default = None), "link_weight": attrs.option(attrs.int(), default = None), - "linker": attrs.option(dep_type(providers = [RunInfo]), default = None), + "linker": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "linker_flags": attrs.option(attrs.list(attrs.arg()), default = None), - "linker_type": attrs.option(attrs.enum(LinkerType), default = None), - "llvm_link": attrs.option(dep_type(providers = [RunInfo]), default = None), + "linker_type": attrs.option(attrs.enum(LinkerType.values()), default = None), + "lipo": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), + "llvm_link": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "lto_mode": attrs.option(attrs.enum(LtoMode.values()), default = None), - "mk_comp_db": attrs.option(dep_type(providers = [RunInfo]), default = None), - "mk_hmap": attrs.option(dep_type(providers = [RunInfo]), default = None), - "mk_shlib_intf": attrs.option(dep_type(providers = [RunInfo]), default = None), - "nm": attrs.option(dep_type(providers = [RunInfo]), default = None), - "objcopy": attrs.option(dep_type(providers = [RunInfo]), default = None), + "min_sdk_version": attrs.option(attrs.string(), default = None), + "mk_shlib_intf": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), + "nm": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), + "objcopy": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), + "objdump": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "object_format": attrs.enum(CxxObjectFormat.values(), default = "native"), "pic_behavior": attrs.enum(PicBehavior.values(), default = "supported"), "platform_deps_aliases": attrs.option(attrs.list(attrs.string()), default = None), "platform_name": attrs.option(attrs.string(), default = None), - "produce_interface_from_stub_shared_library": attrs.option(attrs.bool(), default = None), - "ranlib": attrs.option(dep_type(providers = [RunInfo]), default = None), + "post_linker_flags": attrs.option(attrs.list(attrs.arg()), default = None), + "ranlib": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), + "sanitizer_runtime_enabled": attrs.bool(default = False), + "sanitizer_runtime_files": attrs.option(attrs.set(attrs.dep(), sorted = True, default = []), default = None), # Use `attrs.dep()` as it's not a tool, always propagate target platform "shared_library_interface_mode": attrs.option(attrs.enum(ShlibInterfacesMode.values()), default = None), "shared_library_name_default_prefix": attrs.option(attrs.string(), default = None), "shared_library_name_format": attrs.option(attrs.string(), default = None), "shared_library_versioned_name_format": attrs.option(attrs.string(), default = None), "split_debug_mode": attrs.option(attrs.enum(SplitDebugMode.values()), default = None), - "strip": attrs.option(dep_type(providers = [RunInfo]), default = None), + "strip": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), "strip_all_flags": attrs.option(attrs.list(attrs.arg()), default = None), "strip_debug_flags": attrs.option(attrs.list(attrs.arg()), default = None), "strip_non_global_flags": attrs.option(attrs.list(attrs.arg()), default = None), + "target_sdk_version": attrs.option(attrs.string(), default = None), "use_archiver_flags": attrs.option(attrs.bool(), default = None), - } - -cxx_toolchain_override_registration_spec = RuleRegistrationSpec( - name = "cxx_toolchain_override", - impl = _cxx_toolchain_override, - attrs = _cxx_toolchain_override_inheriting_target_platform_attrs(is_toolchain_rule = False), -) - -cxx_toolchain_override_inheriting_target_platform_registration_spec = RuleRegistrationSpec( - name = "cxx_toolchain_override_inheriting_target_platform", - impl = _cxx_toolchain_override, - attrs = _cxx_toolchain_override_inheriting_target_platform_attrs(is_toolchain_rule = True), + "_internal_tools": attrs.default_only(attrs.exec_dep(providers = [CxxInternalTools], default = "prelude//cxx/tools:internal_tools")), + } | cxx_toolchain_allow_cache_upload_args(), is_toolchain_rule = True, ) diff --git a/prelude/cxx/user/link_group_map.bzl b/prelude/cxx/user/link_group_map.bzl index 93ee74c706dd9..d3e55c87bb6d3 100644 --- a/prelude/cxx/user/link_group_map.bzl +++ b/prelude/cxx/user/link_group_map.bzl @@ -7,18 +7,18 @@ load( "@prelude//cxx:groups.bzl", - "BuildTargetFilter", # @unused Used as a type - "FilterType", - "Group", # @unused Used as a type - "GroupMapping", # @unused Used as a type - "LabelFilter", # @unused Used as a type + "get_roots_from_mapping", + "make_info_subtarget_providers", "parse_groups_definitions", ) load( "@prelude//cxx:link_groups.bzl", - "LinkGroupInfo", "build_link_group_info", ) +load( + "@prelude//cxx:link_groups_types.bzl", + "link_group_inlined_map_attr", +) load( "@prelude//linking:link_groups.bzl", "LinkGroupLibInfo", @@ -37,132 +37,26 @@ load( "SharedLibraryInfo", ) load("@prelude//user:rule_spec.bzl", "RuleRegistrationSpec") -load( - "@prelude//utils:build_target_pattern.bzl", - "BuildTargetPattern", # @unused Used as a type -) -load("@prelude//decls/common.bzl", "Linkage", "Traversal") - -def _v1_attrs( - optional_root: bool = False, - # Whether we should parse `root` fields as a `dependency`, instead of a `label`. - root_is_dep: bool = True): - if root_is_dep: - attrs_root = attrs.dep(providers = [ - LinkGroupLibInfo, - LinkableGraph, - MergedLinkInfo, - SharedLibraryInfo, - ]) - else: - attrs_root = attrs.label() - - if optional_root: - attrs_root = attrs.option(attrs_root) - - return attrs.list( - attrs.tuple( - # name - attrs.string(), - # list of mappings - attrs.list( - # a single mapping - attrs.tuple( - # root node - attrs_root, - # traversal - attrs.enum(Traversal), - # filters, either `None`, a single filter, or a list of filters - # (which must all match). - attrs.option(attrs.one_of(attrs.list(attrs.string()), attrs.string())), - # linkage - attrs.option(attrs.enum(Linkage)), - ), - ), - # attributes - attrs.option( - attrs.dict(key = attrs.string(), value = attrs.any(), sorted = False), - ), - ), - ) - -def link_group_map_attr(): - v2_attrs = attrs.dep(providers = [LinkGroupInfo]) - return attrs.option( - attrs.one_of( - v2_attrs, - _v1_attrs( - optional_root = True, - # Inlined `link_group_map` will parse roots as `label`s, to avoid - # bloating deps w/ unrelated mappings (e.g. it's common to use - # a default mapping for all rules, which would otherwise add - # unrelated deps to them). - root_is_dep = False, - ), - ), - default = None, - ) - -def _make_json_info_for_build_target_pattern(build_target_pattern: BuildTargetPattern) -> dict[str, typing.Any]: - # `BuildTargetPattern` contains lambdas which are not serializable, so - # have to generate the JSON representation - return { - "cell": build_target_pattern.cell, - "kind": build_target_pattern.kind, - "name": build_target_pattern.name, - "path": build_target_pattern.path, - } - -def _make_json_info_for_group_mapping_filters(filters: list[[BuildTargetFilter, LabelFilter]]) -> list[dict[str, typing.Any]]: - json_filters = [] - for filter in filters: - if filter._type == FilterType("label"): - json_filters += [{"regex": str(filter.regex)}] - elif filter._type == FilterType("pattern"): - json_filters += [_make_json_info_for_build_target_pattern(filter.pattern)] - else: - fail("Unknown filter type: " + filter) - return json_filters - -def _make_json_info_for_group_mapping(group_mapping: GroupMapping) -> dict[str, typing.Any]: - return { - "filters": _make_json_info_for_group_mapping_filters(group_mapping.filters), - "preferred_linkage": group_mapping.preferred_linkage, - "root": group_mapping.root, - "traversal": group_mapping.traversal, - } - -def _make_json_info_for_group(group: Group) -> dict[str, typing.Any]: - return { - "attrs": group.attrs, - "mappings": [_make_json_info_for_group_mapping(mapping) for mapping in group.mappings], - "name": group.name, - } - -def _make_info_subtarget_providers(ctx: AnalysisContext, link_group_info: LinkGroupInfo) -> list[Provider]: - info_json = { - "groups": {name: _make_json_info_for_group(group) for name, group in link_group_info.groups.items()}, - "mappings": link_group_info.mappings, - } - json_output = ctx.actions.write_json("link_group_map_info.json", info_json) - return [DefaultInfo(default_output = json_output)] +load("@prelude//utils:utils.bzl", "flatten") def _impl(ctx: AnalysisContext) -> list[Provider]: # Extract graphs from the roots via the raw attrs, as `parse_groups_definitions` # parses them as labels. + + deps = flatten([ + get_roots_from_mapping(mapping) + for entry in ctx.attrs.map + for mapping in entry[1] + ]) linkable_graph = create_linkable_graph( ctx, - deps = [ - mapping[0][LinkableGraph] - for entry in ctx.attrs.map - for mapping in entry[1] - ], + deps = [dep[LinkableGraph] for dep in deps], ) link_groups = parse_groups_definitions(ctx.attrs.map, lambda root: root.label) link_group_info = build_link_group_info(linkable_graph, link_groups) return [ DefaultInfo(sub_targets = { - "info": _make_info_subtarget_providers(ctx, link_group_info), + "info": make_info_subtarget_providers(ctx, link_group_info.groups.values(), link_group_info.mappings), }), link_group_info, ] @@ -171,6 +65,15 @@ registration_spec = RuleRegistrationSpec( name = "link_group_map", impl = _impl, attrs = { - "map": _v1_attrs(), + "map": link_group_inlined_map_attr( + root_attr = attrs.dep( + providers = [ + LinkGroupLibInfo, + LinkableGraph, + MergedLinkInfo, + SharedLibraryInfo, + ], + ), + ), }, ) diff --git a/prelude/cxx/windows_resource.bzl b/prelude/cxx/windows_resource.bzl new file mode 100644 index 0000000000000..17071680dc6ba --- /dev/null +++ b/prelude/cxx/windows_resource.bzl @@ -0,0 +1,95 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") +load("@prelude//cxx:headers.bzl", "cxx_get_regular_cxx_headers_layout") +load("@prelude//cxx:preprocessor.bzl", "cxx_merge_cpreprocessors", "cxx_private_preprocessor_info") +load("@prelude//linking:link_groups.bzl", "LinkGroupLibInfo") +load("@prelude//linking:link_info.bzl", "LibOutputStyle", "LinkInfo", "LinkInfos", "ObjectsLinkable", "create_merged_link_info") +load("@prelude//linking:linkable_graph.bzl", "create_linkable_graph") +load("@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo") + +def windows_resource_impl(ctx: AnalysisContext) -> list[Provider]: + (own_non_exported_preprocessor_info, _) = cxx_private_preprocessor_info( + ctx = ctx, + headers_layout = cxx_get_regular_cxx_headers_layout(ctx), + raw_headers = ctx.attrs.raw_headers, + extra_preprocessors = [], + non_exported_deps = [], + is_test = False, + ) + + preprocessor = cxx_merge_cpreprocessors( + ctx, + [own_non_exported_preprocessor_info], + [], + ) + + headers_tag = ctx.actions.artifact_tag() + + objects = [] + + toolchain = get_cxx_toolchain_info(ctx) + for src in ctx.attrs.srcs: + rc_output = ctx.actions.declare_output( + "__objects__", + "{}.res".format(src.short_path), + ) + rc_cmd = cmd_args( + toolchain.rc_compiler_info.compiler, + toolchain.rc_compiler_info.compiler_flags, + cmd_args(rc_output.as_output(), format = "/fo{}"), + headers_tag.tag_artifacts(preprocessor.set.project_as_args("args")), + headers_tag.tag_artifacts(preprocessor.set.project_as_args("include_dirs")), + src, + ) + + ctx.actions.run( + rc_cmd, + category = "rc_compile", + ) + + cvtres_output = ctx.actions.declare_output( + "__objects__", + "{}.obj".format(src.short_path), + ) + cvtres_cmd = cmd_args( + toolchain.cvtres_compiler_info.compiler, + toolchain.cvtres_compiler_info.compiler_flags, + cmd_args(cvtres_output.as_output(), format = "/OUT:{}"), + rc_output, + ) + + ctx.actions.run( + cvtres_cmd, + category = "cvtres_compile", + ) + + objects.append(cvtres_output) + + link = LinkInfo( + name = ctx.attrs.name, + linkables = [ObjectsLinkable( + objects = objects, + linker_type = toolchain.linker_info.type, + link_whole = True, + )], + ) + + providers = [ + DefaultInfo(default_output = None), + SharedLibraryInfo(set = None), + LinkGroupLibInfo(libs = {}), + create_linkable_graph(ctx), + create_merged_link_info( + ctx, + toolchain.pic_behavior, + {output_style: LinkInfos(default = link) for output_style in LibOutputStyle}, + ), + ] + + return providers diff --git a/prelude/cxx/xcode.bzl b/prelude/cxx/xcode.bzl index 07c98c91221b6..5209955f8d2c4 100644 --- a/prelude/cxx/xcode.bzl +++ b/prelude/cxx/xcode.bzl @@ -10,9 +10,10 @@ load( "CompileArgsfile", # @unused Used as a type ) load( - "@prelude//cxx:compile.bzl", + "@prelude//cxx:cxx_sources.bzl", "CxxSrcWithFlags", # @unused Used as a type ) +load("@prelude//ide_integrations:xcode.bzl", "XcodeDataInfoKeys") def cxx_populate_xcode_attributes( ctx, @@ -32,17 +33,17 @@ def cxx_populate_xcode_attributes( converted_srcs[src.file] = file_properties data = { - "argsfiles_by_ext": { + XcodeDataInfoKeys.ARGSFILES_BY_EXT: { ext: argsfile.file for ext, argsfile in argsfiles.items() }, - "headers": _get_artifacts_with_owners(ctx.attrs.headers), - "product_name": product_name, - "srcs": converted_srcs, + XcodeDataInfoKeys.HEADERS: _get_artifacts_with_owners(ctx.attrs.headers), + XcodeDataInfoKeys.PRODUCT_NAME: product_name, + XcodeDataInfoKeys.SRCS: converted_srcs, } if hasattr(ctx.attrs, "exported_headers"): - data["exported_headers"] = _get_artifacts_with_owners(ctx.attrs.exported_headers) + data[XcodeDataInfoKeys.EXPORTED_HEADERS] = _get_artifacts_with_owners(ctx.attrs.exported_headers) return data diff --git a/prelude/debugging/common.bzl b/prelude/debugging/common.bzl index 61fb29cbf1369..b484d780ee9a1 100644 --- a/prelude/debugging/common.bzl +++ b/prelude/debugging/common.bzl @@ -5,22 +5,20 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - # Utility functions used by "fdb.bxl" load("@prelude//debugging/types.bzl", "TargetInfo") -def target_name(node: "target_node") -> str: +def target_name(node: bxl.ConfiguredTargetNode) -> str: return "{}:{}".format(str(node.label.path), node.label.name) -def rule_type(node: "target_node") -> str: +def rule_type(node: bxl.ConfiguredTargetNode) -> str: return node.rule_type -def create_target_info(target: "target_node") -> TargetInfo: +def create_target_info(target: bxl.ConfiguredTargetNode) -> TargetInfo: attrs = target.attrs_lazy() return TargetInfo( target = target_name(target), - target_type = rule_type(target), + target_type = rule_type(target).removeprefix("prelude//rules.bzl:"), labels = attrs.get("labels").value() if attrs.get("labels") != None else [], ) diff --git a/prelude/debugging/ensure_dwp.bzl b/prelude/debugging/ensure_dwp.bzl new file mode 100644 index 0000000000000..a5f21c93a9ff3 --- /dev/null +++ b/prelude/debugging/ensure_dwp.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# split dwarf targets have ["dwp"] subtargets. this function ensures that the dwp file is materialized +def ensure_dwp(ctx: bxl.Context, target: bxl.ConfiguredTargetNode): + providers = ctx.analysis(target).providers() + subtargets = providers[DefaultInfo].sub_targets + + if "dwp" in subtargets: + ctx.output.ensure(subtargets["dwp"][DefaultInfo].default_outputs[0]) diff --git a/prelude/debugging/fdb.bxl b/prelude/debugging/fdb.bxl index ea3077b568ed3..7be854219f98b 100644 --- a/prelude/debugging/fdb.bxl +++ b/prelude/debugging/fdb.bxl @@ -5,8 +5,6 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - # Bxl script in this file is addressing the problem of retrieving additional information # from language rule providers in order to correctly setup debugging tools # Typical use case is: @@ -27,7 +25,7 @@ load("@prelude//debugging/inspect_java.bzl", "inspect_java_rule") load("@prelude//debugging/labels.bzl", "DBG_INFO_EXEC", "DBG_INFO_REF", "get_info_ref", "get_label_or_mark") load("@prelude//debugging/types.bzl", "ScriptSettings") -def inspect_alias_rule(ctx: bxl.Context, actions: AnalysisActions, target: "target_node", settings: ScriptSettings): +def inspect_alias_rule(ctx: bxl.Context, actions: AnalysisActions, target: bxl.ConfiguredTargetNode, settings: ScriptSettings): attrs = target.attrs_lazy() actual = attrs.get("actual") return inspect_any_target(ctx, actions, ctx.configured_targets(actual.value().configured_target()), settings) @@ -38,15 +36,18 @@ INSPECT_BY_RULE = { "prelude//rules.bzl:android_instrumentation_apk": inspect_java_rule, "prelude//rules.bzl:android_instrumentation_test": inspect_java_rule, "prelude//rules.bzl:android_library": inspect_java_rule, + "prelude//rules.bzl:apk_genrule": inspect_java_rule, "prelude//rules.bzl:configured_alias": inspect_alias_rule, "prelude//rules.bzl:java_binary": inspect_java_rule, + "prelude//rules.bzl:java_library": inspect_java_rule, "prelude//rules.bzl:java_test": inspect_java_rule, "prelude//rules.bzl:kotlin_binary": inspect_java_rule, + "prelude//rules.bzl:kotlin_library": inspect_java_rule, "prelude//rules.bzl:kotlin_test": inspect_java_rule, "prelude//rules.bzl:robolectric_test": inspect_java_rule, } -def inspect_info_ref_rule(ctx: bxl.Context, actions: AnalysisActions, target: "target_node", settings: ScriptSettings): +def inspect_info_ref_rule(ctx: bxl.Context, actions: AnalysisActions, target: bxl.ConfiguredTargetNode, settings: ScriptSettings): aliased_target_label = get_info_ref(target.attrs_lazy().get("labels").value()) if not aliased_target_label: return inspect_default( @@ -69,7 +70,7 @@ INSPECT_BY_LABEL = { DBG_INFO_EXEC: inspect_dbg_exec, } -def inspect_any_target(ctx: bxl.Context, actions: AnalysisActions, target: "target_node", settings: ScriptSettings): +def inspect_any_target(ctx: bxl.Context, actions: AnalysisActions, target: bxl.ConfiguredTargetNode, settings: ScriptSettings): attrs = target.attrs_lazy() labels = attrs.get("labels").value() if attrs.get("labels") else [] inspect_func = INSPECT_BY_RULE.get(rule_type(target), inspect_default) @@ -78,12 +79,12 @@ def inspect_any_target(ctx: bxl.Context, actions: AnalysisActions, target: "targ return inspect_func(ctx, actions, target, settings) -def inspect(ctx: bxl.Context, actions: AnalysisActions, target: "target_node", settings: ScriptSettings): +def inspect(ctx: bxl.Context, actions: AnalysisActions, target: bxl.ConfiguredTargetNode, settings: ScriptSettings): result = inspect_any_target(ctx, actions, target, settings) # when getting ExecInfo based on external action it's not possible to provide result as ExecInfo # in this case we'll return the artifact which is assumed to have ExecInfo serialized in it - if type(result) == "artifact": + if isinstance(result, Artifact): return result return actions.write_json("out.json", result) diff --git a/prelude/debugging/inspect_dbg_exec.bzl b/prelude/debugging/inspect_dbg_exec.bzl index 64f53e3be9d07..33dfafb01f6fa 100644 --- a/prelude/debugging/inspect_dbg_exec.bzl +++ b/prelude/debugging/inspect_dbg_exec.bzl @@ -5,13 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load("@prelude//debugging/common.bzl", "create_target_info", "target_name") load("@prelude//debugging/types.bzl", "JavaInfo", "ScriptSettings") load("@prelude//java/class_to_srcs.bzl", "JavaClassToSourceMapInfo") -def inspect_dbg_exec(ctx: bxl.Context, actions: AnalysisActions, target: "target_node", settings: ScriptSettings): +def inspect_dbg_exec(ctx: bxl.Context, actions: AnalysisActions, target: bxl.ConfiguredTargetNode, settings: ScriptSettings): pointer_name = target_name(target) if not pointer_name.endswith("_fdb"): pointer_name = "{}_fdb".format(pointer_name) @@ -20,8 +18,7 @@ def inspect_dbg_exec(ctx: bxl.Context, actions: AnalysisActions, target: "target providers = ctx.analysis(fbsource_alias_target).providers() fdb_helper = providers[RunInfo] fdb_helper_out = actions.declare_output("fdb_helper.json") - cmd = cmd_args(fdb_helper) - cmd.add(settings.args) + cmd = cmd_args(fdb_helper, settings.args) actions.run(cmd, category = "fdb_helper", env = {"FDB_OUTPUT_FILE": fdb_helper_out.as_output()}, local_only = True) result = actions.declare_output("final_out.json") @@ -49,7 +46,7 @@ def inspect_dbg_exec(ctx: bxl.Context, actions: AnalysisActions, target: "target actions.dynamic_output( dynamic = [fdb_helper_out], inputs = [], - outputs = [result], + outputs = [result.as_output()], f = build_exec_info, ) return result diff --git a/prelude/debugging/inspect_default.bzl b/prelude/debugging/inspect_default.bzl index 479c0b5e3581a..f5493582cae16 100644 --- a/prelude/debugging/inspect_default.bzl +++ b/prelude/debugging/inspect_default.bzl @@ -5,14 +5,15 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load("@prelude//debugging/common.bzl", "create_target_info", "target_name") +load("@prelude//debugging/ensure_dwp.bzl", "ensure_dwp") load("@prelude//debugging/types.bzl", "ExecInfo", "ScriptSettings") # "inspect_default" is reused across "fdb.bxl" to provide a fallback default information # in case special handling for the rule type isn't implemented yet -def inspect_default(_ctx: bxl.Context, _actions: AnalysisActions, _target: "target_node", settings: ScriptSettings) -> ExecInfo: +def inspect_default(ctx: bxl.Context, _actions: AnalysisActions, target: bxl.ConfiguredTargetNode, settings: ScriptSettings) -> ExecInfo: + ensure_dwp(ctx, target) + return ExecInfo( target_name = target_name(settings.target), target_info = create_target_info(settings.target), diff --git a/prelude/debugging/inspect_java.bzl b/prelude/debugging/inspect_java.bzl index a1a23036d7113..352e3fbc28998 100644 --- a/prelude/debugging/inspect_java.bzl +++ b/prelude/debugging/inspect_java.bzl @@ -5,13 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load("@prelude//debugging/common.bzl", "create_target_info", "target_name") load("@prelude//debugging/types.bzl", "ExecInfo", "JavaInfo", "ScriptSettings", "TargetExtraInfo") load("@prelude//java/class_to_srcs.bzl", "JavaClassToSourceMapInfo") -def inspect_java_rule(ctx: bxl.Context, _actions: AnalysisActions, target: "target_node", settings: ScriptSettings) -> ExecInfo: +def inspect_java_rule(ctx: bxl.Context, _actions: AnalysisActions, target: bxl.ConfiguredTargetNode, settings: ScriptSettings) -> ExecInfo: providers = ctx.analysis(target).providers() debuginfo = providers[JavaClassToSourceMapInfo].debuginfo if JavaClassToSourceMapInfo in providers else None if debuginfo: diff --git a/prelude/debugging/labels.bzl b/prelude/debugging/labels.bzl index adc5589dc7b23..94f99b10c6502 100644 --- a/prelude/debugging/labels.bzl +++ b/prelude/debugging/labels.bzl @@ -12,7 +12,7 @@ # For example: # Running "buck run //another:target" (or via using [RunInfo]) should produce `ExecInfo` as its stdout -# 3. If target has a label `dbg:info:ref=//another:target` we assume a presense of //another:target which we can inspect for the presense of relevant providers (see fdb.bxl) +# 3. If target has a label `dbg:info:ref=//another:target` we assume a presence of //another:target which we can inspect for the presence of relevant providers (see fdb.bxl) # This label indicates where to locate "[RunInfo]" which would output `ExecInfo` -compatible output DBG_INFO_EXEC = "dbg:info:exec" @@ -34,13 +34,6 @@ def get_info_ref(labels: list[str]) -> [str, None]: return result return None -def get_info_exec(labels: list[str]) -> [str, None]: - for label in labels: - result = _get_value_by_mark(DBG_INFO_EXEC, label) - if result: - return result - return None - def get_label_or_mark(label: str) -> str: for mark in [DBG_INFO_EXEC, DBG_INFO_REF]: if label.startswith(mark): diff --git a/prelude/debugging/types.bzl b/prelude/debugging/types.bzl index bfc9ea4e701c4..13149d2e684c6 100644 --- a/prelude/debugging/types.bzl +++ b/prelude/debugging/types.bzl @@ -17,7 +17,7 @@ load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type # Even though arguments are available in "bxl_ctx", when using ctx.dynamic_output it's not possible to access arguments from there # One way to work this around is by capturing an object in a closure and this type is used to carry the contract for this object ScriptSettings = record( - target = field("target_node"), + target = field(bxl.ConfiguredTargetNode), args = field(list[ArgLike], default = []), ) @@ -60,7 +60,7 @@ Custom = record( # Java DAP server requires this file in order to correctly locate classes in the source files # The integration with a tool is available as a part of "JVM" rules. (java/kotlin_library/binary/test) JavaInfo = record( - classmap_file = field([Artifact, None]), + classmap_file = field(Artifact | None), ) # Customizations that are understood by debugging tool diff --git a/prelude/decls/android_common.bzl b/prelude/decls/android_common.bzl index eee2f5d1b6bc7..6766684371edc 100644 --- a/prelude/decls/android_common.bzl +++ b/prelude/decls/android_common.bzl @@ -17,14 +17,14 @@ def _manifest_apk_arg(): case is that the manifest will be in the same directory as the rule, in which case this will simply be `'AndroidManifest.xml'`, but it can also reference - an `android\\_manifest()`rule. + an `android_manifest()` rule. Prefer using `manifest_skeleton`, which performs merging automatically. Exactly one of `manifest` and `manifest_skeleton` must be set. """), "manifest_skeleton": attrs.option(attrs.source(), default = None, doc = """ Relative path to the skeleton Android manifest for the APK. - An `android\\_manifest()`will be created automatically to merge + An `android_manifest()` will be created automatically to merge all manifests from libraries and resources going into the app. The common case is that the manifest will be in the same directory as the rule, in which case this will simply be @@ -47,7 +47,7 @@ def _deps_apk_arg(): def _manifest_arg(): return { "manifest": attrs.option(attrs.source(), default = None, doc = """ - An optional [Android Manifest](http://developer.android.com/guide/topics/manifest/manifest-intro.html) for the to declare any permissions or intents it may need or want to handle. May either be a file or a `android\\_manifest()`target. + An optional [Android Manifest](http://developer.android.com/guide/topics/manifest/manifest-intro.html) for the to declare any permissions or intents it may need or want to handle. May either be a file or an `android_manifest()` target. """), } diff --git a/prelude/decls/android_rules.bzl b/prelude/decls/android_rules.bzl index e48c2cc18a209..741e715f214e8 100644 --- a/prelude/decls/android_rules.bzl +++ b/prelude/decls/android_rules.bzl @@ -10,10 +10,13 @@ # the generated docs, and so those should be verified to be accurate and # well-formatted (and then delete this TODO) +load("@prelude//utils/clear_platform.bzl", "clear_platform_transition") load(":android_common.bzl", "android_common") load(":common.bzl", "AbiGenerationMode", "AnnotationProcessingTool", "ForkMode", "LogLevel", "OnDuplicateEntry", "SourceAbiVerificationMode", "TestType", "UnusedDependenciesAction", "buck", "prelude_rule") +load(":core_rules.bzl", "TargetCpuType") load(":genrule_common.bzl", "genrule_common") load(":jvm_common.bzl", "jvm_common") +load(":re_test_common.bzl", "re_test_common") AaptMode = ["aapt1", "aapt2"] @@ -35,8 +38,6 @@ ResourceCompressionMode = ["disabled", "enabled", "enabled_strings_only", "enabl SdkProguardType = ["default", "optimized", "none"] -TargetCpuType = ["arm", "armv7", "arm64", "x86", "x86_64", "mips"] - android_aar = prelude_rule( name = "android_aar", docs = """ @@ -76,10 +77,10 @@ android_aar = prelude_rule( # @unsorted-dict-items { "manifest_skeleton": attrs.source(doc = """ - The skeleton manifest file used to generate the final `AndroidManifest.xml` . May either be a file or a `android\\_manifest()`target. + The skeleton manifest file used to generate the final `AndroidManifest.xml` . May either be a file or an `android_manifest()` target. """), "build_config_values": attrs.list(attrs.string(), default = [], doc = """ - See the documentation on the values argument for `android\\_build\\_config()`. + See the documentation on the values argument for `android_build_config()`. """), "include_build_config_class": attrs.bool(default = False, doc = """ Whether to include the `BuildConfig` class files in the final .aar file. Needs @@ -115,35 +116,29 @@ android_aar = prelude_rule( "build_config_values_file": attrs.option(attrs.source(), default = None), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "deps_query": attrs.option(attrs.query(), default = None), "enable_relinker": attrs.bool(default = False), - "exported_deps": attrs.list(attrs.dep(), default = []), - "exported_provided_deps": attrs.list(attrs.dep(), default = []), + "excluded_java_deps": attrs.list(attrs.dep(), default = []), "extra_arguments": attrs.list(attrs.string(), default = []), "extra_kotlinc_arguments": attrs.list(attrs.string(), default = []), - "extra_non_source_only_abi_kotlinc_arguments": attrs.list(attrs.string(), default = []), "friend_paths": attrs.list(attrs.dep(), default = []), "java_version": attrs.option(attrs.string(), default = None), - "javac": attrs.option(attrs.source(), default = None), - "kotlin_compiler_plugins": attrs.dict(key = attrs.source(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}), "labels": attrs.list(attrs.string(), default = []), "language": attrs.option(attrs.enum(JvmLanguage), default = None), "licenses": attrs.list(attrs.source(), default = []), "manifest": attrs.option(attrs.source(), default = None), + "manifest_entries": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), - "native_library_merge_code_generator": attrs.option(attrs.dep(), default = None), + "native_library_merge_code_generator": attrs.option(attrs.exec_dep(), default = None), "native_library_merge_glue": attrs.option(attrs.dep(), default = None), - "native_library_merge_localized_symbols": attrs.option(attrs.set(attrs.string(), sorted = True), default = None), "native_library_merge_map": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.regex()), sorted = False), default = None), - "native_library_merge_sequence": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.regex()))), default = None), + "native_library_merge_sequence": attrs.option(attrs.list(attrs.any()), default = None), "native_library_merge_sequence_blocklist": attrs.option(attrs.list(attrs.regex()), default = None), + "native_library_merge_non_asset_libs": attrs.bool(default = False), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), - "provided_deps": attrs.list(attrs.dep(), default = []), - "provided_deps_query": attrs.option(attrs.query(), default = None), + "relinker_extra_deps": attrs.list(attrs.dep(), default = [], doc = "Deps statically linked to every native lib by the relinker."), "relinker_whitelist": attrs.list(attrs.regex(), default = []), "required_for_source_only_abi": attrs.bool(default = False), "resource_union_package": attrs.option(attrs.string(), default = None), @@ -157,7 +152,7 @@ android_aar = prelude_rule( "target": attrs.option(attrs.string(), default = None), "use_jvm_abi_gen": attrs.option(attrs.bool(), default = None), } - ), + ) | jvm_common.plugins() | jvm_common.javac(), ) android_app_modularity = prelude_rule( @@ -168,7 +163,7 @@ android_app_modularity = prelude_rule( attrs = ( # @unsorted-dict-items { - "application_module_blacklist": attrs.option(attrs.list(attrs.query()), default = None), + "application_module_blacklist": attrs.option(attrs.list(attrs.dep()), default = None), "application_module_configs": attrs.dict(key = attrs.string(), value = attrs.list(attrs.dep()), sorted = False, default = {}), "application_module_dependencies": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.string()), sorted = False), default = None), "contacts": attrs.list(attrs.string(), default = []), @@ -199,7 +194,7 @@ android_binary = prelude_rule( "allow_r_dot_java_in_secondary_dex": attrs.bool(default = False), "allowed_duplicate_resource_types": attrs.list(attrs.enum(RType), default = []), "android_sdk_proguard_config": attrs.option(attrs.enum(SdkProguardType), default = None), - "application_module_blacklist": attrs.option(attrs.list(attrs.query()), default = None), + "application_module_blacklist": attrs.option(attrs.list(attrs.dep()), default = None), "application_module_configs": attrs.dict(key = attrs.string(), value = attrs.list(attrs.dep()), sorted = False, default = {}), "application_module_dependencies": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.string()), sorted = False), default = None), "asset_compression_algorithm": attrs.option(attrs.enum(CompressionAlgorithm), default = None), @@ -220,6 +215,7 @@ android_binary = prelude_rule( "duplicate_resource_whitelist": attrs.option(attrs.source(), default = None), "enable_relinker": attrs.bool(default = False), "exopackage_modes": attrs.list(attrs.enum(ExopackageMode), default = []), + "extra_no_compress_asset_extensions": attrs.list(attrs.string(), default = []), "extra_filtered_resources": attrs.list(attrs.string(), default = []), "field_ref_count_buffer_space": attrs.int(default = 0), "ignore_aapt_proguard_config": attrs.bool(default = False), @@ -239,10 +235,10 @@ android_binary = prelude_rule( "module_manifest_skeleton": attrs.option(attrs.source(), default = None), "native_library_merge_code_generator": attrs.option(attrs.dep(), default = None), "native_library_merge_glue": attrs.option(attrs.dep(), default = None), - "native_library_merge_localized_symbols": attrs.option(attrs.set(attrs.string(), sorted = True), default = None), "native_library_merge_map": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.regex()), sorted = False), default = None), - "native_library_merge_sequence": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.regex()))), default = None), + "native_library_merge_sequence": attrs.option(attrs.list(attrs.any()), default = None), "native_library_merge_sequence_blocklist": attrs.option(attrs.list(attrs.regex()), default = None), + "native_library_merge_non_asset_libs": attrs.bool(default = False), "no_auto_add_overlay_resources": attrs.bool(default = False), "no_auto_version_resources": attrs.bool(default = False), "no_dx": attrs.list(attrs.dep(), default = []), @@ -251,6 +247,7 @@ android_binary = prelude_rule( "package_asset_libraries": attrs.bool(default = False), "package_type": attrs.enum(PackageType, default = "debug"), "packaged_locales": attrs.list(attrs.string(), default = []), + "packaging_options": attrs.dict(key = attrs.string(), value = attrs.list(attrs.string()), default = {}), "post_filter_resources_cmd": attrs.option(attrs.arg(), default = None), "preprocess_java_classes_bash": attrs.option(attrs.arg(), default = None), "preprocess_java_classes_cmd": attrs.option(attrs.arg(), default = None), @@ -258,6 +255,7 @@ android_binary = prelude_rule( "primary_dex_patterns": attrs.list(attrs.string(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "proguard_jvm_args": attrs.list(attrs.string(), default = []), + "relinker_extra_deps": attrs.list(attrs.dep(), default = [], doc = "Deps statically linked to every native lib by the relinker."), "relinker_whitelist": attrs.list(attrs.regex(), default = []), "resource_compression": attrs.enum(ResourceCompressionMode, default = "disabled"), "resource_filter": attrs.list(attrs.string(), default = []), @@ -266,6 +264,7 @@ android_binary = prelude_rule( "secondary_dex_weight_limit": attrs.option(attrs.int(), default = None), "skip_crunch_pngs": attrs.option(attrs.bool(), default = None), "skip_proguard": attrs.bool(default = False), + "strip_libraries": attrs.bool(default = True), "trim_resource_ids": attrs.bool(default = False), "use_split_dex": attrs.bool(default = False), "xz_compression_level": attrs.int(default = 4), @@ -278,7 +277,7 @@ android_build_config = prelude_rule( docs = """ An `android_build_config()` rule is used to generate a `BuildConfig` class with global configuration variables - that other `android\\_library()`rules can compile against. + that other `android_library()` rules can compile against. Currently, the only variable exposed by `BuildConfig` is a global `boolean` named `DEBUG`, much like the `BuildConfig.java` generated by the official Android @@ -291,7 +290,7 @@ android_build_config = prelude_rule( be replaced with a new version where: * The fields will be set to literal values (i.e., constant expressions). * The `boolean BuildConfig.DEBUG` field will correspond to - that of the `package_type` argument to the `android\\_binary()`rule + that of the `package_type` argument to the `android_binary()` rule that is packaging it. @@ -302,7 +301,7 @@ android_build_config = prelude_rule( examples = """ Here is an example of an `android_build_config()` rule that is transitively included by both *debug* and *release* versions - of an `android\\_binary()`rule. The value + of an `android_binary()` rule. The value of `com.example.pkg.BuildConfig.DEBUG` will be different in each APK even though they both transitively depend on the same `:build_config` rule. @@ -393,14 +392,14 @@ android_build_config = prelude_rule( generated `BuildConfig.java` file. Like `DEBUG`, the values will be non-constant-expressions that evaluate to the value specified in the file at compilation time. - To override the values in an APK, specify build\\_config\\_values or build\\_config\\_values\\_file in `android\\_binary()`. + To override the values in an APK, specify build\\_config\\_values or build\\_config\\_values\\_file in `android_binary()`. """), "values_file": attrs.option(attrs.source(), default = None, doc = """ Optional path to a file that defines additional fields (and values) that should be declared in the generated `BuildConfig.java` file. Like `DEBUG`, the values will be non-constant-expressions that evaluate to the value specified in the file at compilation time. - To override the values in an APK, specify build\\_config\\_values or build\\_config\\_values\\_file in `android\\_binary()`. + To override the values in an APK, specify build\\_config\\_values or build\\_config\\_values\\_file in `android_binary()`. Note that values\\_file can be a generated file, as can build\\_config\\_values\\_file as demonstrated in the example below. @@ -429,7 +428,7 @@ android_bundle = prelude_rule( "allow_r_dot_java_in_secondary_dex": attrs.bool(default = False), "allowed_duplicate_resource_types": attrs.list(attrs.enum(RType), default = []), "android_sdk_proguard_config": attrs.option(attrs.enum(SdkProguardType), default = None), - "application_module_blacklist": attrs.option(attrs.list(attrs.query()), default = None), + "application_module_blacklist": attrs.option(attrs.list(attrs.dep()), default = None), "application_module_configs": attrs.dict(key = attrs.string(), value = attrs.list(attrs.dep()), sorted = False, default = {}), "application_module_dependencies": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.string()), sorted = False), default = None), "asset_compression_algorithm": attrs.option(attrs.enum(CompressionAlgorithm), default = None), @@ -451,6 +450,7 @@ android_bundle = prelude_rule( "duplicate_resource_whitelist": attrs.option(attrs.source(), default = None), "enable_relinker": attrs.bool(default = False), "exopackage_modes": attrs.list(attrs.enum(ExopackageMode), default = []), + "extra_no_compress_asset_extensions": attrs.list(attrs.string(), default = []), "extra_filtered_resources": attrs.list(attrs.string(), default = []), "field_ref_count_buffer_space": attrs.int(default = 0), "ignore_aapt_proguard_config": attrs.bool(default = False), @@ -470,10 +470,10 @@ android_bundle = prelude_rule( "module_manifest_skeleton": attrs.option(attrs.source(), default = None), "native_library_merge_code_generator": attrs.option(attrs.dep(), default = None), "native_library_merge_glue": attrs.option(attrs.dep(), default = None), - "native_library_merge_localized_symbols": attrs.option(attrs.set(attrs.string(), sorted = True), default = None), "native_library_merge_map": attrs.option(attrs.dict(key = attrs.string(), value = attrs.list(attrs.regex()), sorted = False), default = None), - "native_library_merge_sequence": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.regex()))), default = None), + "native_library_merge_sequence": attrs.option(attrs.list(attrs.any()), default = None), "native_library_merge_sequence_blocklist": attrs.option(attrs.list(attrs.regex()), default = None), + "native_library_merge_non_asset_libs": attrs.bool(default = False), "no_auto_add_overlay_resources": attrs.bool(default = False), "no_auto_version_resources": attrs.bool(default = False), "no_dx": attrs.list(attrs.dep(), default = []), @@ -482,6 +482,7 @@ android_bundle = prelude_rule( "package_asset_libraries": attrs.bool(default = False), "package_type": attrs.enum(PackageType, default = "debug"), "packaged_locales": attrs.list(attrs.string(), default = []), + "packaging_options": attrs.dict(key = attrs.string(), value = attrs.list(attrs.string()), default = {}), "post_filter_resources_cmd": attrs.option(attrs.arg(), default = None), "preprocess_java_classes_bash": attrs.option(attrs.arg(), default = None), "preprocess_java_classes_cmd": attrs.option(attrs.arg(), default = None), @@ -489,6 +490,7 @@ android_bundle = prelude_rule( "primary_dex_patterns": attrs.list(attrs.string(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "proguard_jvm_args": attrs.list(attrs.string(), default = []), + "relinker_extra_deps": attrs.list(attrs.dep(), default = [], doc = "Deps statically linked to every native lib by the relinker."), "relinker_whitelist": attrs.list(attrs.regex(), default = []), "resource_compression": attrs.enum(ResourceCompressionMode, default = "disabled"), "resource_filter": attrs.list(attrs.string(), default = []), @@ -522,7 +524,7 @@ android_instrumentation_apk = prelude_rule( when running the test. """, examples = """ - Here is an example of an `android_instrumentation_apk()` rule that tests a `android_binary()`, and depends on a test + Here is an example of an `android_instrumentation_apk()` rule that tests an `android_binary()`, and depends on a test package. @@ -563,8 +565,7 @@ android_instrumentation_apk = prelude_rule( { "apk": attrs.dep(doc = """ APK build target, which should be used for the instrumentation APK. - Can be either a `android\\_binary()`or a - `apk\\_genrule()`. + Can be either an `android_binary()` or an `apk_genrule()`. """), } | android_common.deps_apk_arg() | @@ -579,6 +580,9 @@ android_instrumentation_apk = prelude_rule( "licenses": attrs.list(attrs.source(), default = []), "use_split_dex": attrs.option(attrs.bool(), default = None), "primary_dex_patterns": attrs.list(attrs.string(), default = []), + "preprocess_java_classes_bash": attrs.option(attrs.arg(), default = None), + "preprocess_java_classes_cmd": attrs.option(attrs.arg(), default = None), + "preprocess_java_classes_deps": attrs.list(attrs.dep(), default = []), } ), ) @@ -586,7 +590,7 @@ android_instrumentation_apk = prelude_rule( android_instrumentation_test = prelude_rule( name = "android_instrumentation_test", docs = """ - A `android_instrumentation_test()` rule is used to define + An `android_instrumentation_test()` rule is used to define apks that should be used to run Android instrumentation tests. """, examples = """ @@ -626,19 +630,34 @@ android_instrumentation_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | { "apk": attrs.dep(doc = """ - The APK containing the tests. Can be an `android\\_binary()`, - an `apk\\_genrule()`or an `android\\_instrumentation\\_apk()`. + The APK containing the tests. Can be an `android_binary()`, + an `apk_genrule()` or an `android_instrumentation_apk()`. """), } | buck.test_label_arg() | buck.test_rule_timeout_ms() | { + "clear_package_data": attrs.bool(default = False, doc = """ + Runs `pm clear` on the app and test packages before the test run if set to True. + """), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "disable_animations": attrs.bool(default = False, doc = """ + Disables animations on the emulator if set to True. + """), + "collect_tombstones": attrs.bool(default = False, doc = """ + Checks whether the test generated any tombstones, and downloads them from the emulator if true. + """), + "record_video": attrs.bool(default = False, doc = "Record video of test run and collect it as TRA"), + "log_extractors": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), "env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}), "licenses": attrs.list(attrs.source(), default = []), + "_android_emulators": attrs.option(attrs.transition_dep(cfg = clear_platform_transition, providers = [LocalResourceInfo]), default = None, doc = """ + If provided, local resource of "android_emulators" type will be required to run this test locally and this target will be used to manage it. If omitted, local resource of "android_emulators" type will be ignored even if requested by the test runner. + """), } ), ) @@ -653,10 +672,10 @@ android_library = prelude_rule( """, examples = """ An `android_library` rule used in concert with an - `android\\_resource()`rule. + `android_resource()` rule. This would be a common arrangement for a standard Android Library project as defined by - + [http://developer.android.com/tools/projects/index.html](http://developer.android.com/tools/projects/index.html) ``` @@ -711,12 +730,6 @@ android_library = prelude_rule( Overrides the value in "target\\_level" in the "java" section of `.buckconfig`. """), - "javac": attrs.option(attrs.source(), default = None, doc = """ - Specifies the Java compiler program to use for this rule. - The value is a source path (e.g., //foo/bar:bar). - Overrides the value in "javac" in the "tools" section - of `.buckconfig`. - """), "extra_arguments": attrs.list(attrs.string(), default = [], doc = """ List of additional arguments to pass into the Java compiler. These arguments follow the ones specified in `.buckconfig`. @@ -735,12 +748,14 @@ android_library = prelude_rule( jvm_common.exported_deps() | jvm_common.provided_deps() | jvm_common.exported_provided_deps() | - buck.deps_query_arg() | buck.provided_deps_query_arg() | jvm_common.abi_generation_mode() | jvm_common.source_only_abi_deps() | jvm_common.required_for_source_only_abi() | jvm_common.k2() | + jvm_common.kotlin_compiler_plugins() | + jvm_common.incremental() | + jvm_common.javac() | { "remove_classes": attrs.list(attrs.regex(), default = [], doc = """ List of classes to remove from the output jar. It only removes classes from the target's own @@ -751,10 +766,9 @@ android_library = prelude_rule( "annotation_processors": attrs.list(attrs.string(), default = []), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "extra_non_source_only_abi_kotlinc_arguments": attrs.list(attrs.string(), default = []), "friend_paths": attrs.list(attrs.dep(), default = []), "java_version": attrs.option(attrs.string(), default = None), - "kotlin_compiler_plugins": attrs.dict(key = attrs.source(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}), + "jar_postprocessor": attrs.option(attrs.exec_dep(), default = None), "labels": attrs.list(attrs.string(), default = []), "language": attrs.option(attrs.enum(JvmLanguage), default = None), "licenses": attrs.list(attrs.source(), default = []), @@ -762,7 +776,6 @@ android_library = prelude_rule( "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "resource_union_package": attrs.option(attrs.string(), default = None), "resources_root": attrs.option(attrs.source(), default = None), @@ -770,7 +783,7 @@ android_library = prelude_rule( "source_abi_verification_mode": attrs.option(attrs.enum(SourceAbiVerificationMode), default = None), "use_jvm_abi_gen": attrs.option(attrs.bool(), default = None), } - ), + ) | jvm_common.plugins(), ) android_manifest = prelude_rule( @@ -778,7 +791,7 @@ android_manifest = prelude_rule( docs = """ An `android_manifest()` rule is used to generate an [Android - Manifest](http://developer.android.com/guide/topics/manifest/manifest-intro.html) to be used by `android\\_binary()`and `android\\_aar()`rules. This + Manifest](http://developer.android.com/guide/topics/manifest/manifest-intro.html) to be used by `android_binary()` and `android_aar()` rules. This rule takes a skeleton manifest, and merges it with manifests found in any deps. """, examples = """ @@ -822,20 +835,20 @@ android_manifest = prelude_rule( ``` - You could also use a `genrule()`to generate the manifest file and reference the - `build target`in the `skeleton` argument. + You could also use a `genrule()` to generate the manifest file and reference the + `build target` in the `skeleton` argument. """, further = None, attrs = ( # @unsorted-dict-items { "skeleton": attrs.source(doc = """ - Either a `build target`or a path to a file representing the manifest that + Either a `build target` or a path to a file representing the manifest that will be merged with any manifests associated with this rule's `deps`. """), "deps": attrs.list(attrs.dep(), default = [], doc = """ A collection of dependencies that includes android\\_library rules. The manifest files of the - `android\\_library()`rules will be filtered out to become dependent source files for + `android_library()` rules will be filtered out to become dependent source files for the manifest. """), "contacts": attrs.list(attrs.string(), default = []), @@ -865,7 +878,7 @@ android_prebuilt_aar = prelude_rule( docs = """ An `android_prebuilt_aar()` rule takes an `.aar` file and makes it available as an Android dependency. As expected, - an `android\\_binary()`that transitively depends on + an `android_binary()` that transitively depends on an `android_prebuilt_aar()` will include its contents in the generated APK. @@ -918,6 +931,8 @@ android_prebuilt_aar = prelude_rule( "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "deps": attrs.list(attrs.dep(), default = []), + "desugar_deps": attrs.list(attrs.dep(), default = []), + "for_primary_apk": attrs.bool(default = False), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "maven_coords": attrs.option(attrs.string(), default = None), @@ -1057,23 +1072,34 @@ apk_genrule = prelude_rule( { "apk": attrs.option(attrs.dep(), default = None, doc = """ The input `android_binary()` rule. The path to the APK can be - accessed with the `$APK` shell variable. + accessed with the `$APK` shell variable. Only one of `apk` or + `aab` can be provided. """), + "keystore": attrs.option(attrs.dep(), default = None), } | genrule_common.srcs_arg() | genrule_common.cmd_arg() | genrule_common.bash_arg() | genrule_common.cmd_exe_arg() | genrule_common.type_arg() | + genrule_common.weight_arg() | { "out": attrs.option(attrs.string(), default = None, doc = """ - This argument only exists for historical reasons and it does not have any - effect. It will be deprecated and removed in the future. + The name of the output file or directory. The complete path to this + argument is provided to the shell command through + the `OUT` environment variable. Only one of `out` + or `outs` may be present. + + For an apk_genrule the output should be a '.apk' or '.aab' file. """), } | genrule_common.environment_expansion_separator() | { - "aab": attrs.option(attrs.dep(), default = None), + "aab": attrs.option(attrs.dep(), default = None, doc = """ + The input `android_binary()` rule. The path to the AAB can be + accessed with the `$AAB` shell variable. Only one of `apk` or + `aab` can be provided. + """), "cacheable": attrs.option(attrs.bool(), default = None), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), @@ -1154,7 +1180,7 @@ keystore = prelude_rule( docs = """ A `keystore()` contains the data for a key pair created by the `keytool` executable that comes - with the JDK. This is a required input for an `android\\_binary()`rule. + with the JDK. This is a required input for an `android_binary()` rule. """, examples = None, further = None, @@ -1296,6 +1322,7 @@ prebuilt_jar = prelude_rule( `binary_jar` is already built, there should be nothing to build, so this should be empty. """), + "desugar_deps": attrs.list(attrs.dep(), default = []), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "generate_abi": attrs.bool(default = False), @@ -1315,7 +1342,7 @@ prebuilt_native_library = prelude_rule( (i.e., `.so` files) for Android. """, examples = """ - Most of the time, a `prebuilt_native_library` is private to the `android\\_library()`that uses it: + Most of the time, a `prebuilt_native_library` is private to the `android_library()` that uses it: ``` @@ -1380,13 +1407,14 @@ robolectric_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | { "robolectric_runtime_dependency": attrs.option(attrs.source(), default = None, doc = """ Robolectric only runs in offline mode with buck. Specify the relative directory containing all the jars Robolectric uses at runtime. """), "robolectric_manifest": attrs.source(doc = """ - An [Android Manifest](http://developer.android.com/guide/topics/manifest/manifest-intro.html) for the rule to declare any permissions or intents it may need or want to handle. May either be a file or a `android\\_manifest()`target. + An [Android Manifest](http://developer.android.com/guide/topics/manifest/manifest-intro.html) for the rule to declare any permissions or intents it may need or want to handle. May either be a file or an `android_manifest()` target. """), "extra_kotlinc_arguments": attrs.list(attrs.string(), default = [], doc = """ List of additional arguments to pass into the Kotlin compiler. @@ -1401,18 +1429,15 @@ robolectric_test = prelude_rule( "default_cxx_platform": attrs.option(attrs.string(), default = None), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "deps": attrs.list(attrs.dep(), default = []), - "deps_query": attrs.option(attrs.query(), default = None), "env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}), "exported_deps": attrs.list(attrs.dep(), default = []), "exported_provided_deps": attrs.list(attrs.dep(), default = []), "extra_arguments": attrs.list(attrs.string(), default = []), - "extra_non_source_only_abi_kotlinc_arguments": attrs.list(attrs.string(), default = []), "fork_mode": attrs.enum(ForkMode, default = "none"), "friend_paths": attrs.list(attrs.dep(), default = []), + "jar_postprocessor": attrs.option(attrs.exec_dep(), default = None), "java_version": attrs.option(attrs.string(), default = None), "java": attrs.option(attrs.dep(), default = None), - "javac": attrs.option(attrs.source(), default = None), - "kotlin_compiler_plugins": attrs.dict(key = attrs.source(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}), "labels": attrs.list(attrs.string(), default = []), "language": attrs.option(attrs.enum(JvmLanguage), default = None), "licenses": attrs.list(attrs.source(), default = []), @@ -1423,7 +1448,6 @@ robolectric_test = prelude_rule( "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "preferred_density_for_binary_resources": attrs.option(attrs.string(), default = None), "proguard_config": attrs.option(attrs.source(), default = None), "provided_deps": attrs.list(attrs.dep(), default = []), @@ -1450,9 +1474,16 @@ robolectric_test = prelude_rule( "unbundled_resources_root": attrs.option(attrs.source(allow_directory = True), default = None), "use_cxx_libraries": attrs.option(attrs.bool(), default = None), "use_dependency_order_classpath": attrs.option(attrs.bool(), default = None), + "used_as_dependency_deprecated_do_not_use": attrs.bool(default = False), "use_jvm_abi_gen": attrs.option(attrs.bool(), default = None), "vm_args": attrs.list(attrs.arg(), default = []), - } | jvm_common.k2() + } | + jvm_common.k2() | + jvm_common.incremental() | + jvm_common.plugins() | + jvm_common.kotlin_compiler_plugins() | + jvm_common.javac() | + re_test_common.test_args() ), ) diff --git a/prelude/decls/apple_common.bzl b/prelude/decls/apple_common.bzl deleted file mode 100644 index 3c44e6c83e4df..0000000000000 --- a/prelude/decls/apple_common.bzl +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -# TODO(cjhopman): This was generated by scripts/hacks/rules_shim_with_docs.py, -# but should be manually edited going forward. There may be some errors in -# the generated docs, and so those should be verified to be accurate and -# well-formatted (and then delete this TODO) - -def _headers_arg(): - return { - "headers": attrs.named_set(attrs.source(), sorted = True, default = [], doc = """ - The set of header files that are made available for inclusion to the source files in this - target. These should be specified as either a list of header files or a dictionary of header names - to header files. The header names can contain forward slashes (`/`). If a list of - header files is specified, the headers can be imported - with `#import "$HEADER_PATH_PREFIX/$HEADER_NAME"` or `#import - "$HEADER_NAME"`, where `$HEADER_PATH_PREFIX` is the value of - the target's `header_path_prefix` attribute, and `$HEADER_NAME` is - the filename of the header file. If a dictionary is specified, each header can be imported - with `#import "$HEADER_NAME"`, where `$HEADER_NAME` is the key - corresponding to this file. In this case, the `header_path_prefix` attribute is - ignored. In either case, quotes in the import statements can be replaced with angle brackets. -"""), - } - -def _exported_headers_arg(): - return { - "exported_headers": attrs.named_set(attrs.source(), sorted = True, default = [], doc = """ - The set of header files that are made available for inclusion to the source files in this - target and all targets that transitively depend on this one. These should be specified as - either a list of header files or a dictionary of header names - to header files. The header names can contain forward slashes (`/`). If a list of - header files is specified, the headers can be imported - with `#import "$HEADER_PATH_PREFIX/$HEADER_NAME"` or, if a header file that belongs to - the same rule is being imported, with `#import - "$HEADER_NAME"`, where `$HEADER_PATH_PREFIX` is the value of - the target's `header_path_prefix` attribute, and `$HEADER_NAME` is - the filename of the header file. If a dictionary is specified, each header can be imported - with `#import "$HEADER_NAME"`, where `$HEADER_NAME` is the key - corresponding to this file. In this case, the `header_path_prefix` attribute is - ignored. In either case, quotes in the import statements can be replaced with angle brackets. -"""), - } - -def _header_path_prefix_arg(): - return { - "header_path_prefix": attrs.option(attrs.string(), default = None, doc = """ - A path prefix when including headers of this target. For example, headers from a library defined - using - - ``` - - apple_library( - name = "Library", - headers = glob(["**/*.h"]), - header_path_prefix = "Lib", - ) - - ``` - can be imported using following mapping - - ``` - - Library/SubDir/Header1.h -> Lib/Header1.h - Library/Header2.h -> Lib/Header2.h - - ``` - Defaults to the short name of the target. Can contain forward slashes (`/`), but - cannot start with one. See `headers` for more information. -"""), - } - -def _frameworks_arg(): - return { - "frameworks": attrs.list(attrs.string(), default = [], doc = """ - A list of system frameworks that the code in this target uses. Each entry should be a path - starting with `$SDKROOT` or `$PLATFORM_DIR` to denote that the rest of the - path is relative to the root of the SDK used for the build or to the platform toolchain - directory. -"""), - } - -def _target_sdk_version(): - return { - "target_sdk_version": attrs.option(attrs.string(), default = None, doc = """ - The minimum OS version that the library target should support, overriding the minimum set in - `.buckconfig`. When set, Buck will automatically add flags to both Objective-C and - Swift compilation that will allow the use of the new APIs without guarding code inside availability - checks. -"""), - } - -def _info_plist_arg(): - return { - "info_plist": attrs.source(doc = """ - A path to an `Info.plist` file that will be placed in the bundle. The specified file - will be processed by substituting variable names with their values - (see `info_plist_substitutions` for more information). -"""), - } - -def _info_plist_substitutions_arg(): - return { - "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}, doc = """ - A dictionary that assigns variable names to their values. It is used for variable - substitution when processing the file specified in `info_plist`. For example if this - argument is set to `{\'VAR\': \'MyValue\'}`, then each occurrence of `$(VAR)` or `${VAR}` in the file will be replaced by `MyValue`. -"""), - } - -def _extra_xcode_sources(): - return { - "extra_xcode_sources": attrs.list(attrs.source(), default = [], doc = """ - When the project is generated, this is the list of files that will added to the build phase - "Compile Sources" of the given target. -"""), - } - -def _extra_xcode_files(): - return { - "extra_xcode_files": attrs.list(attrs.source(), default = [], doc = """ - When the project is generated, this is the list of files that will added to the project. - Those files won't be added to the build phase "Compile Sources". -"""), - } - -apple_common = struct( - headers_arg = _headers_arg, - exported_headers_arg = _exported_headers_arg, - header_path_prefix_arg = _header_path_prefix_arg, - frameworks_arg = _frameworks_arg, - target_sdk_version = _target_sdk_version, - info_plist_arg = _info_plist_arg, - info_plist_substitutions_arg = _info_plist_substitutions_arg, - extra_xcode_sources = _extra_xcode_sources, - extra_xcode_files = _extra_xcode_files, -) diff --git a/prelude/decls/apple_rules.bzl b/prelude/decls/apple_rules.bzl new file mode 100644 index 0000000000000..ff81fabbfb0f6 --- /dev/null +++ b/prelude/decls/apple_rules.bzl @@ -0,0 +1,1157 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# TODO(cjhopman): This was generated by scripts/hacks/rules_shim_with_docs.py, +# but should be manually edited going forward. There may be some errors in +# the generated docs, and so those should be verified to be accurate and +# well-formatted (and then delete this TODO) + +load("@prelude//apple:apple_common.bzl", "apple_common") +load("@prelude//apple:apple_rules_impl_utility.bzl", "apple_dsymutil_attrs", "apple_test_extra_attrs", "get_apple_toolchain_attr") +load("@prelude//apple:apple_test_host_app_transition.bzl", "apple_test_host_app_transition") +load("@prelude//apple:apple_toolchain_types.bzl", "AppleToolsInfo") +load("@prelude//apple:apple_universal_executable.bzl", "apple_universal_executable_impl") +load("@prelude//apple:cxx_universal_executable.bzl", "cxx_universal_executable_impl") +load("@prelude//apple:resource_groups.bzl", "RESOURCE_GROUP_MAP_ATTR") +load("@prelude//apple/user:cpu_split_transition.bzl", "cpu_split_transition") +load("@prelude//cxx:link_groups_types.bzl", "LINK_GROUP_MAP_ATTR") +load("@prelude//linking:types.bzl", "Linkage") +load("@prelude//decls/toolchains_common.bzl", "toolchains_common") +load(":common.bzl", "CxxRuntimeType", "CxxSourceType", "HeadersAsRawHeadersMode", "IncludeType", "LinkableDepType", "buck", "prelude_rule") +load(":cxx_common.bzl", "cxx_common") +load(":native_common.bzl", "native_common") + +AdditionalActions = ["pre_scheme_actions", "post_scheme_actions"] + +AppleBundleExtension = ["app", "framework", "appex", "plugin", "bundle", "xctest", "dsym", "xpc", "prefpane", "qlgenerator"] + +AppleResourceBundleDestination = ["resources", "frameworks", "executables", "plugins", "xpcservices", "loginitems", "systemextensions"] + +LaunchStyle = ["auto", "wait"] + +SchemeActionType = ["build", "launch", "test", "profile", "analyze", "archive"] + +WatchInterface = ["main", "complication", "dynamic_notification", "static_notification"] + +apple_asset_catalog = prelude_rule( + name = "apple_asset_catalog", + docs = """ + An `apple_asset_catalog()` rule contains resources stored in Apple asset catalog + directories. This rule does not have any output on its own and can be built only as a dependency + (either direct or transitive) of an `apple_bundle()` rule, in which case all `apple_asset_catalog()` rules + that the bundle rule depends on are merged and placed into the final output bundle together. + """, + examples = """ + ``` + + apple_asset_catalog( + name = 'MyAssetCatalog', + dirs = [ + 'MyResources.xcassets', + ], + ) + + # A asset catalog with a app icon and launch image + apple_asset_catalog( + name = 'AssetCatalog', + dirs = [ 'AssetCatalog.xcassets' ], + app_icon = 'Icon', + launch_image = 'LaunchImage', + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + { + "dirs": attrs.list(attrs.source(), default = [], doc = """ + Set of paths of Apple asset catalogs contained by this rule. All paths have to end with the `.xcassets` extension and be compatible with the asset catalog format used by Xcode. + """), + "app_icon": attrs.option(attrs.string(), default = None, doc = """ + An optional reference to a `.appiconset` containing a image set representing an + application icon. (The extension itself should not be included.) This parameter + may be specified at most once in a given `apple_bundle`'s transitive dependencies. + """), + "launch_image": attrs.option(attrs.string(), default = None, doc = """ + An optional reference to a `.launchimage` containing a image set representing an + application launch image. (The extension itself should not be included.) This parameter + may be specified at most once in a given `apple_bundle`'s transitive dependencies. + """), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + } + ), +) + +apple_binary = prelude_rule( + name = "apple_binary", + docs = """ + An `apple_binary()` rule builds a native executable - such as an iOS or OSX app - from + the supplied set of Objective-C/C++ source files and dependencies. It is similar to + a `cxx_binary()` rule with which it shares many attributes. In addition + to those common attributes, `apple_binary()` has a some additional attributes + that are specific to binaries intended to be built using the Apple toolchain. + Note, however, that `apple_binary()` and `cxx_binary()` differ + in the way that they import header files, in order to better accommodate existing conventions. + See the sections for the `headers` and `exported_headers` attributes for more details. + + + Buck enables you to override components of the Apple toolchain with + alternate tools, either from the Xcode search paths or from directories + that you specify. + See `.buckconfig` + and `.buckconfig` + for more information. + """, + examples = """ + ``` + + apple_binary( + name = 'MyBinary', + deps = [ + ':MyLibrary', + '//Libraries:AnotherLibrary', + ], + preprocessor_flags = ['-fobjc-arc'], + headers = [ + 'MyHeader.h', + ], + srcs = [ + 'MySource.m', + ], + frameworks = [ + '$SDKROOT/System/Library/Frameworks/UIKit.framework', + '$SDKROOT/System/Library/Frameworks/Foundation.framework', + ], + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + cxx_common.srcs_arg() | + cxx_common.platform_srcs_arg() | + apple_common.headers_arg() | + { + "entitlements_file": attrs.option(attrs.source(), default = None, doc = """ + An optional name of a plist file to be embedded in the binary. Some platforms like + `iphonesimulator` require this to run properly. + """), + } | + apple_common.exported_headers_arg() | + apple_common.header_path_prefix_arg() | + apple_common.frameworks_arg() | + cxx_common.preprocessor_flags_arg() | + cxx_common.exported_preprocessor_flags_arg(exported_preprocessor_flags_type = attrs.list(attrs.arg(), default = [])) | + cxx_common.compiler_flags_arg() | + cxx_common.platform_compiler_flags_arg() | + cxx_common.linker_extra_outputs_arg() | + cxx_common.linker_flags_arg() | + cxx_common.exported_linker_flags_arg() | + cxx_common.platform_linker_flags_arg() | + native_common.link_style() | + native_common.link_group_public_deps_label() | + apple_common.target_sdk_version() | + apple_common.extra_xcode_sources() | + apple_common.extra_xcode_files() | + apple_common.serialize_debugging_options_arg() | + apple_common.uses_explicit_modules_arg() | + apple_common.apple_sanitizer_compatibility_arg() | + { + "bridging_header": attrs.option(attrs.source(), default = None), + "can_be_asset": attrs.option(attrs.bool(), default = None), + "contacts": attrs.list(attrs.string(), default = []), + "cxx_runtime_type": attrs.option(attrs.enum(CxxRuntimeType), default = None), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "default_platform": attrs.option(attrs.string(), default = None), + "defaults": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "deps": attrs.list(attrs.dep(), default = []), + "devirt_enabled": attrs.bool(default = False), + "diagnostics": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), + "enable_cxx_interop": attrs.bool(default = False), + "executable_name": attrs.option(attrs.string(), default = None), + "exported_deps": attrs.list(attrs.dep(), default = []), + "exported_header_style": attrs.enum(IncludeType, default = "local"), + "exported_lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "exported_lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "exported_platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), + "exported_platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), + "exported_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "exported_platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "exported_post_linker_flags": attrs.list(attrs.arg(), default = []), + "exported_post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "fat_lto": attrs.bool(default = False), + "focused_list_target": attrs.option(attrs.dep(), default = None), + "force_static": attrs.option(attrs.bool(), default = None), + "header_namespace": attrs.option(attrs.string(), default = None), + "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), + "import_obj_c_forward_declarations": attrs.bool(default = True), + "include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "info_plist": attrs.option(attrs.source(), default = None), + "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "labels": attrs.list(attrs.string(), default = []), + "lang_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "lang_platform_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "libraries": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "link_group": attrs.option(attrs.string(), default = None), + "link_group_map": LINK_GROUP_MAP_ATTR, + "link_whole": attrs.option(attrs.bool(), default = None), + "modular": attrs.bool(default = False), + "module_name": attrs.option(attrs.string(), default = None), + "module_requires_cxx": attrs.bool(default = False), + "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), + "platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), + "platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "post_linker_flags": attrs.list(attrs.arg(), default = []), + "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "precompiled_header": attrs.option(attrs.source(), default = None), + "preferred_linkage": attrs.option(attrs.enum(Linkage.values()), default = None), + "prefix_header": attrs.option(attrs.source(), default = None), + "public_include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "public_system_include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "raw_headers": attrs.set(attrs.source(), sorted = True, default = []), + "reexport_all_header_dependencies": attrs.option(attrs.bool(), default = None), + "sdk_modules": attrs.list(attrs.string(), default = []), + "soname": attrs.option(attrs.string(), default = None), + "static_library_basename": attrs.option(attrs.string(), default = None), + "supported_platforms_regex": attrs.option(attrs.regex(), default = None), + "supports_merged_linking": attrs.option(attrs.bool(), default = None), + "swift_compiler_flags": attrs.list(attrs.arg(), default = []), + "swift_module_skip_function_bodies": attrs.bool(default = True), + "swift_version": attrs.option(attrs.string(), default = None), + "thin_lto": attrs.bool(default = False), + "use_submodules": attrs.bool(default = True), + "uses_cxx_explicit_modules": attrs.bool(default = False), + "uses_modules": attrs.bool(default = False), + } | + buck.allow_cache_upload_arg() + ), +) + +apple_bundle = prelude_rule( + name = "apple_bundle", + docs = """ + An `apple_bundle()` rule takes an Apple binary and all of the resources and asset + catalogs in the rule's transitive dependencies and generates a bundle containing all of those files. + Optionally the generated bundle can also be signed using specified provisioning profiles. + + + Code signing will embed entitlements pointed to by the `entitlements_file` arg in + the bundle's `apple_binary`. This is the preferred way to specify entitlements + when building with Buck. + + If the entitlements file is not present, it falls back to the `CODE_SIGN_ENTITLEMENTS` entry in + `info_plist_substitutions`. + + + If after these checks, an entitlements file is still not specified, it will be derived based + on the entitlements of the selected provisioning profile. Provisioning profiles will be selected + from profiles pointed to by `apple.provisioning_profile_search_path`, based on a + non-expired profile that matches the bundle id and entitlements. + + + Code signing will embed entitlements pointed to by the `CODE_SIGN_ENTITLEMENTS` entry in + `info_plist_substitutions`. If an entitlements file is omitted, it will be derived based + on the entitlements of the selected provisioning profile. Provisioning profiles will be selected + from profiles pointed to by `apple.provisioning_profile_search_path`, based on a + non-expired profile that matches the bundle id and entitlements. + """, + examples = """ + ``` + + apple_bundle( + name = 'AppBundle', + binary = ':MyBinary', + extension = 'app', + info_plist = 'Info.plist', + ) + + ``` + + ``` + + # iOS app with embedded WatchOS 2.0 app/extension + apple_bundle( + name = 'DemoWatchAppExtension', + binary = ':DemoWatchAppExtensionBinary', + extension = 'appex', + info_plist = 'WatchExtension/Resources/Info.plist', + ) + + apple_bundle( + name = 'DemoWatchApp', + binary = ':DemoWatchAppBinary', + deps = [':DemoWatchAppResources', ':DemoWatchAppExtension'], + extension = 'app', + info_plist = 'WatchApplication/Info.plist', + ) + + apple_bundle( + name = 'DemoApp', + binary = ':DemoAppBinary', + deps = [':DemoWatchApp#watch'], + extension = 'app', + info_plist = 'Info.plist', + ) + + ``` + + ``` + + # iOS app using safeAreaInsets delivering to iOS 9.x + apple_bundle( + name = 'DemoIBApp', + binary = ':DemoIBAppBinary', + deps = [':DemoIBAppResources'], + extension = 'app', + ibtool_flags = ["--minimum-deployment-target", "9.0"], + info_plist = 'Info.plist', + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + { + "deps": attrs.list(attrs.dep(), default = [], doc = """ + A list of dependencies of this bundle as build targets. You can embed application + extensions by specifying the extension's bundle target. To include a WatchKit app, append the + flavor `#watch` to the target specification. Buck will automatically substitute the appropriate + platform flavor (either `watchsimulator` or `watchos`) based on the parent. + """), + "product_name": attrs.option(attrs.string(), default = None, doc = """ + The name of the resulting bundle and binary. The setting behaves like PRODUCT\\_NAME Xcode build setting. + For example, if your rule is named "MyApp" and extension is "app", by default buck will generate MyApp.app bundle. + But if you will set product name to "SuperApp", bundle will get "SuperApp.app" name. + """), + "extension": attrs.one_of(attrs.enum(AppleBundleExtension), attrs.string(), doc = """ + The extension of the generated bundle. For example `'app'` for an application bundle + or `'appex'` for an application extension bundle. + """), + "binary": attrs.option(attrs.dep(), default = None, doc = """ + A `build target` identifying + an `apple_binary()` rule or + an `apple_library()` rule whose output will + be used as the main executable binary of the generated bundle. The required rule type depends + on the value in the `extension` attribute. For example, application bundles expect + a binary (e.g. `'//Apps/MyApp:MyApp'`), application extension bundles expect a shared + library (e.g. `'//Libraries/MyLibrary:MyLibrary#shared'`). + """), + } | + apple_common.info_plist_arg() | + apple_common.info_plist_substitutions_arg() | + apple_common.privacy_manifest_arg() | + { + "asset_catalogs_compilation_options": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}, doc = """ + A dict holding parameters for asset catalogs compiler (actool). Its options include: + + * `notices` (defaults to `True`) + * `warnings` (defaults to `True`) + * `errors` (defaults to `True`) + * `compress_pngs` (defaults to `True`) + * `optimization` (defaults to `'space'`) + * `output_format` (defaults to `'human-readable-text'`) + * `extra_flags` (defaults to `[]`) + """), + "ibtool_flags": attrs.option(attrs.list(attrs.string()), default = None, doc = """ + List of flags to be passed to ibtool during interface builder file compilation. + """), + "codesign_flags": attrs.list(attrs.string(), default = []), + "codesign_identity": attrs.option(attrs.string(), default = None), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "default_platform": attrs.option(attrs.string(), default = None), + "ibtool_module_flag": attrs.option(attrs.bool(), default = None), + "incremental_bundling_enabled": attrs.option(attrs.bool(), default = None), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "platform_binary": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.dep())), default = None), + "resource_group": attrs.option(attrs.string(), default = None), + "resource_group_map": attrs.option(RESOURCE_GROUP_MAP_ATTR, default = None), + "skip_copying_swift_stdlib": attrs.option(attrs.bool(), default = None), + "try_skip_code_signing": attrs.option(attrs.bool(), default = None), + "xcode_product_type": attrs.option(attrs.string(), default = None), + } + ), +) + +apple_library = prelude_rule( + name = "apple_library", + docs = """ + An `apple_library()` rule represents a set of Objective-C/C++/Swift + source files and is similar to a `cxx_library()` rule with which it shares many + attributes. In addition to those common attributes, `apple_library()` has a some additional attributes + that are specific to binaries intended to be built using the Apple toolchain. + Note, however, that `apple_library()` and `cxx_library()` differ + in the way that they import header files, in order to better accommodate existing conventions. + See the sections for the `headers` and `exported_headers` attributes for more details. + + + Buck enables you to override components of the Apple toolchain with + alternate tools, either from the Xcode search paths or from directories + that you specify. + See `.buckconfig` + and `.buckconfig` + for more information. + """, + examples = """ + ``` + + apple_library( + name = 'MyLibrary', + deps = [ + ':OtherLibrary', + '//Libraries:YetAnotherLibrary', + ], + preprocessor_flags = ['-fobjc-arc'], + headers = [ + 'MyHeader.h', + ], + srcs = [ + 'MySource.m', + 'MySource.swift', + ], + frameworks = [ + '$SDKROOT/System/Library/Frameworks/UIKit.framework', + '$SDKROOT/System/Library/Frameworks/Foundation.framework', + ], + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + cxx_common.srcs_arg() | + cxx_common.platform_srcs_arg() | + apple_common.headers_arg() | + apple_common.exported_headers_arg() | + apple_common.header_path_prefix_arg() | + cxx_common.header_namespace_arg() | + apple_common.frameworks_arg() | + cxx_common.preprocessor_flags_arg() | + cxx_common.exported_preprocessor_flags_arg(exported_preprocessor_flags_type = attrs.list(attrs.arg(), default = [])) | + cxx_common.compiler_flags_arg() | + cxx_common.platform_compiler_flags_arg() | + cxx_common.linker_extra_outputs_arg() | + cxx_common.linker_flags_arg() | + cxx_common.exported_linker_flags_arg() | + cxx_common.exported_platform_linker_flags_arg() | + apple_common.target_sdk_version() | + native_common.preferred_linkage(preferred_linkage_type = attrs.option(attrs.enum(Linkage.values()), default = None)) | + native_common.link_style() | + native_common.link_whole(link_whole_type = attrs.option(attrs.bool(), default = None)) | + cxx_common.reexport_all_header_dependencies_arg() | + cxx_common.exported_deps_arg() | + apple_common.extra_xcode_sources() | + apple_common.extra_xcode_files() | + apple_common.serialize_debugging_options_arg() | + apple_common.uses_explicit_modules_arg() | + apple_common.meta_apple_library_validation_enabled_arg() | + { + "bridging_header": attrs.option(attrs.source(), default = None), + "can_be_asset": attrs.option(attrs.bool(), default = None), + "contacts": attrs.list(attrs.string(), default = []), + "cxx_runtime_type": attrs.option(attrs.enum(CxxRuntimeType), default = None), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "default_platform": attrs.option(attrs.string(), default = None), + "defaults": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "deps": attrs.list(attrs.dep(), default = []), + "devirt_enabled": attrs.bool(default = False), + "diagnostics": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), + "enable_cxx_interop": attrs.bool(default = False), + "executable_name": attrs.option(attrs.string(), default = None), + "exported_header_style": attrs.enum(IncludeType, default = "local"), + "exported_lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "exported_lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "exported_platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), + "exported_platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), + "exported_platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "exported_post_linker_flags": attrs.list(attrs.arg(), default = []), + "exported_post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "fat_lto": attrs.bool(default = False), + "focused_list_target": attrs.option(attrs.dep(), default = None), + "force_static": attrs.option(attrs.bool(), default = None), + "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), + "import_obj_c_forward_declarations": attrs.bool(default = True), + "include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "info_plist": attrs.option(attrs.source(), default = None), + "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "labels": attrs.list(attrs.string(), default = []), + "lang_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "lang_platform_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "libraries": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "link_group": attrs.option(attrs.string(), default = None), + "link_group_map": LINK_GROUP_MAP_ATTR, + "modular": attrs.bool(default = False), + "module_name": attrs.option(attrs.string(), default = None), + "module_requires_cxx": attrs.bool(default = False), + "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), + "platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), + "platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "post_linker_flags": attrs.list(attrs.arg(), default = []), + "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "precompiled_header": attrs.option(attrs.source(), default = None), + "prefix_header": attrs.option(attrs.source(), default = None), + "public_framework_headers": attrs.named_set(attrs.source(), sorted = True, default = []), + "public_include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "public_system_include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "raw_headers": attrs.set(attrs.source(), sorted = True, default = []), + "sdk_modules": attrs.list(attrs.string(), default = []), + "soname": attrs.option(attrs.string(), default = None), + "static_library_basename": attrs.option(attrs.string(), default = None), + "supported_platforms_regex": attrs.option(attrs.regex(), default = None), + "supports_merged_linking": attrs.option(attrs.bool(), default = None), + "swift_compiler_flags": attrs.list(attrs.arg(), default = []), + "swift_module_skip_function_bodies": attrs.bool(default = True), + "swift_version": attrs.option(attrs.string(), default = None), + "thin_lto": attrs.bool(default = False), + "use_submodules": attrs.bool(default = True), + "uses_cxx_explicit_modules": attrs.bool(default = False), + "uses_modules": attrs.bool(default = False), + } | + buck.allow_cache_upload_arg() + ), +) + +apple_package = prelude_rule( + name = "apple_package", + docs = """ + An `apple_package()` rule takes the output of + an `apple_bundle()` rule and compresses it in + an IPA (iOS App Store Package) file. + + This rule can be customized using the config options `.buckconfig` + and `.buckconfig` + . + """, + examples = """ + ``` + + apple_package( + name = 'AppPackage', + bundle = ':AppBundle', + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + { + "bundle": attrs.dep(doc = """ + A build target identifying + an `apple_bundle()` rule whose output will + be stored in the IPA package generated by this rule. + """), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "default_platform": attrs.option(attrs.string(), default = None), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "need_android_tools": attrs.bool(default = False), + } + ), +) + +apple_resource = prelude_rule( + name = "apple_resource", + docs = """ + An `apple_resource()` rule contains sets of resource directories, files and file variants + that can be bundled in an application bundle. This rule does not have any output on its own and can + be built only as a dependency (either direct or transitive) of + an `apple_bundle()` rule. + """, + examples = """ + ``` + + apple_resource( + name = 'Images', + files = glob([ + '*.png', + ]), + dirs = [ + 'PrettyImages', + ], + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + { + "dirs": attrs.list(attrs.source(), default = [], doc = """ + Set of paths of resource directories that should be placed in an application bundle. + """), + "content_dirs": attrs.list(attrs.source(), default = [], doc = """ + Set of paths of directories containing resource files that should be placed in an application bundle. Unlike `dirs`, the directories themselves are not placed in the bundle. + """), + "files": attrs.list(attrs.source(), default = [], doc = """ + Set of paths of resource files that should be placed in an application bundle. + """), + "variants": attrs.list(attrs.source(), default = [], doc = """ + Set of paths of resource file variants that should be placed in an application bundle. The files + mentioned here should be placed in a directory named `$VARIANT_NAME.lproj`, + where `$VARIANT_NAME` is the name of the variant + (e.g. `Base`, `en`). This argument makes it possible to use different + resource files based on the active locale. + """), + "named_variants": attrs.dict(key = attrs.string(), value = attrs.set(attrs.source(), sorted = False), sorted = False, default = {}, doc = """ + Mapping from a variant name to the list of resource file paths which should be placed in an application bundle. Those files + will be placed in a directory with name equal to the corresponding key in this mapping. Keys should end with `.lproj` suffix. + (e.g. `Base.lproj`, `en.lproj`). + """), + "resources_from_deps": attrs.list(attrs.dep(), default = [], doc = """ + Set of build targets whose transitive `apple_resource`s should be considered as part of + the current resource when collecting resources for bundles. + + Usually, an `apple_bundle` collects all `apple_resource` rules transitively + reachable through apple\\_library rules. This field allows for resources which are not reachable + using the above traversal strategy to be considered for inclusion in the bundle. + """), + "destination": attrs.option(attrs.enum(AppleResourceBundleDestination), default = None, doc = """ + Specifies the destination in the final application bundle where resource will be copied. Possible + values: "resources", "frameworks", "executables", "plugins", "xpcservices". + """), + "codesign_on_copy": attrs.bool(default = False, doc = """ + Indicates whether the files specified in the files arg in this resource should be code signed with the identity used to sign the overall bundle. This is useful for e.g. + dylibs or other additional binaries copied into the bundle. The caller is responsible to ensure that the file format is valid for codesigning. + """), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + } + ), +) + +apple_test = prelude_rule( + name = "apple_test", + docs = """ + An `apple_test()` rule contains Objective-C/C++ code which can be built and used to test + code contained in other rules. The tests can be executed by running `buck test`. + """, + examples = """ + ``` + + apple_test( + name = 'MyTest', + info_plist = 'MyTest-Info.plist', + preprocessor_flags = ['-fobjc-arc'], + srcs = [ + 'MyTest.m', + ], + deps = [ + ':MyLibrary', + ], + frameworks = [ + '$SDKROOT/System/Library/Frameworks/Foundation.framework', + '$SDKROOT/System/Library/Frameworks/UIKit.framework', + '$PLATFORM_DIR/Developer/Library/Frameworks/XCTest.framework', + ], + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + apple_common.info_plist_arg() | + apple_common.info_plist_substitutions_arg() | + { + "test_host_app": attrs.option(attrs.transition_dep(cfg = apple_test_host_app_transition), default = None, doc = """ + A build target identifying + an `apple_bundle()` rule that builds an + application bundle. Output of the specified rule will be used as a test host of this test. This + implies `run_test_separately`. + Since symbols that are defined in the test host application and its dependencies will not be + linked into the test binary, to make those symbols accessible to the test target they need + to be specified as a dependency of this target and `['-undefined', 'dynamic_lookup']` needs to be added to this target's `linker_flags` (this will suppress undefined + reference errors during compilation, but if the symbols do not exist, it might result in runtime + crashes). + """), + "embed_xctest_frameworks_in_test_host_app": attrs.option(attrs.bool(), default = None, doc = """ + Controls whether a marker constraint is added to the `test_host_app`. + """), + } | + cxx_common.srcs_arg() | + cxx_common.platform_srcs_arg() | + apple_common.headers_arg() | + apple_common.header_path_prefix_arg() | + apple_common.frameworks_arg() | + cxx_common.preprocessor_flags_arg() | + cxx_common.compiler_flags_arg() | + cxx_common.platform_compiler_flags_arg() | + cxx_common.linker_flags_arg() | + apple_common.target_sdk_version() | + buck.run_test_separately_arg(run_test_separately_type = attrs.bool(default = False)) | + buck.test_label_arg() | + apple_common.extra_xcode_sources() | + apple_common.extra_xcode_files() | + apple_common.serialize_debugging_options_arg() | + apple_common.uses_explicit_modules_arg() | + apple_common.apple_sanitizer_compatibility_arg() | + { + "asset_catalogs_compilation_options": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}), + "bridging_header": attrs.option(attrs.source(), default = None), + "can_be_asset": attrs.option(attrs.bool(), default = None), + "codesign_flags": attrs.list(attrs.string(), default = []), + "codesign_identity": attrs.option(attrs.string(), default = None), + "contacts": attrs.list(attrs.string(), default = []), + "cxx_runtime_type": attrs.option(attrs.enum(CxxRuntimeType), default = None), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "default_platform": attrs.option(attrs.string(), default = None), + "defaults": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "deps": attrs.list(attrs.dep(), default = []), + "destination_specifier": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), + "devirt_enabled": attrs.bool(default = False), + "diagnostics": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), + "enable_cxx_interop": attrs.bool(default = False), + "entitlements_file": attrs.option(attrs.source(), default = None), + "env": attrs.option(attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False), default = None), + "executable_name": attrs.option(attrs.string(), default = None), + "exported_deps": attrs.list(attrs.dep(), default = []), + "exported_header_style": attrs.enum(IncludeType, default = "local"), + "exported_headers": attrs.named_set(attrs.source(), sorted = True, default = []), + "exported_lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "exported_lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "exported_linker_flags": attrs.list(attrs.arg(), default = []), + "exported_platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), + "exported_platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), + "exported_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "exported_platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "exported_post_linker_flags": attrs.list(attrs.arg(), default = []), + "exported_post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "exported_preprocessor_flags": attrs.list(attrs.arg(), default = []), + "fat_lto": attrs.bool(default = False), + "focused_list_target": attrs.option(attrs.dep(), default = None), + "force_static": attrs.option(attrs.bool(), default = None), + "header_namespace": attrs.option(attrs.string(), default = None), + "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), + "import_obj_c_forward_declarations": attrs.bool(default = True), + "include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "incremental_bundling_enabled": attrs.option(attrs.bool(), default = None), + "is_ui_test": attrs.bool(default = False), + "lang_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "lang_platform_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), + "lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), + "libraries": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "link_group": attrs.option(attrs.string(), default = None), + "link_group_map": LINK_GROUP_MAP_ATTR, + # Used to create the shared test library. Any library deps whose `preferred_linkage` isn't "shared" will + # be treated as "static" deps and linked into the shared test library. + "link_style": attrs.enum(LinkableDepType, default = "static"), + "link_whole": attrs.option(attrs.bool(), default = None), + "linker_extra_outputs": attrs.list(attrs.string(), default = []), + "modular": attrs.bool(default = False), + "module_name": attrs.option(attrs.string(), default = None), + "module_requires_cxx": attrs.bool(default = False), + "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), + "platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), + "platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + "post_linker_flags": attrs.list(attrs.arg(), default = []), + "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), + # The test source code and lib dependencies should be built into a shared library. + "preferred_linkage": attrs.enum(Linkage.values(), default = "shared"), + "prefix_header": attrs.option(attrs.source(), default = None), + "public_include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "public_system_include_directories": attrs.set(attrs.string(), sorted = True, default = []), + "raw_headers": attrs.set(attrs.source(), sorted = True, default = []), + "reexport_all_header_dependencies": attrs.option(attrs.bool(), default = None), + "runner": attrs.option(attrs.dep(), default = None), + "sdk_modules": attrs.list(attrs.string(), default = []), + "skip_copying_swift_stdlib": attrs.option(attrs.bool(), default = None), + "snapshot_reference_images_path": attrs.option(attrs.one_of(attrs.source(), attrs.string()), default = None), + "soname": attrs.option(attrs.string(), default = None), + "specs": attrs.option(attrs.arg(json = True), default = None), + "static_library_basename": attrs.option(attrs.string(), default = None), + "supported_platforms_regex": attrs.option(attrs.regex(), default = None), + "supports_merged_linking": attrs.option(attrs.bool(), default = None), + "swift_compiler_flags": attrs.list(attrs.arg(), default = []), + "swift_module_skip_function_bodies": attrs.bool(default = True), + "swift_version": attrs.option(attrs.string(), default = None), + "test_rule_timeout_ms": attrs.option(attrs.int(), default = None), + "thin_lto": attrs.bool(default = False), + "try_skip_code_signing": attrs.option(attrs.bool(), default = None), + "ui_test_target_app": attrs.option(attrs.dep(), default = None), + "use_submodules": attrs.bool(default = True), + "uses_cxx_explicit_modules": attrs.bool(default = False), + "uses_modules": attrs.bool(default = False), + "xcode_product_type": attrs.option(attrs.string(), default = None), + } | + buck.allow_cache_upload_arg() | + buck.inject_test_env_arg() | + apple_test_extra_attrs() + ), +) + +apple_toolchain = prelude_rule( + name = "apple_toolchain", + docs = "", + examples = None, + further = None, + attrs = ( + # @unsorted-dict-items + { + "actool": attrs.source(), + "architecture": attrs.string(default = ""), + "build_version": attrs.option(attrs.string(), default = None), + "codesign": attrs.source(), + "codesign_allocate": attrs.source(), + "contacts": attrs.list(attrs.string(), default = []), + "copy_scene_kit_assets": attrs.option(attrs.source(), default = None), + "cxx_toolchain": attrs.dep(), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "developer_path": attrs.option(attrs.source(), default = None), + "dsymutil": attrs.source(), + "dwarfdump": attrs.option(attrs.source(), default = None), + "ibtool": attrs.source(), + "labels": attrs.list(attrs.string(), default = []), + "libtool": attrs.source(), + "licenses": attrs.list(attrs.source(), default = []), + "lipo": attrs.source(), + "mapc": attrs.option(attrs.source(), default = None), + "min_version": attrs.string(default = ""), + "momc": attrs.source(), + "platform_path": attrs.source(), + "sdk_environment": attrs.option(attrs.string(), default = None), + "sdk_name": attrs.string(default = ""), + "sdk_path": attrs.source(), + "swift_toolchain": attrs.option(attrs.dep(), default = None), + "version": attrs.string(default = ""), + "watch_kit_stub_binary": attrs.option(attrs.source(), default = None), + "work_around_dsymutil_lto_stack_overflow_bug": attrs.option(attrs.bool(), default = None), + "xcode_build_version": attrs.string(default = ""), + "xcode_version": attrs.string(default = ""), + "xctest": attrs.source(), + } + ), +) + +apple_toolchain_set = prelude_rule( + name = "apple_toolchain_set", + docs = "", + examples = None, + further = None, + attrs = ( + # @unsorted-dict-items + { + "apple_toolchains": attrs.list(attrs.dep(), default = []), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + } + ), +) + +core_data_model = prelude_rule( + name = "core_data_model", + docs = """ + An `core_data_model()` rule contains models for Apple's Core Data framework. This rule does not have any output on its own and can be built only as a dependency + (either direct or transitive) of an `apple_bundle()` rule in which case all `core_data_model()` rules + that the bundle rule depends on are merged and placed into the final output bundle together. + """, + examples = """ + ``` + + core_data_model( + name = 'MyCoreDataModel', + path = 'MyCoreDataModel.xcdatamodeld', + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + { + "path": attrs.source(doc = """ + Relative path of the .xcdatamodeld package directory. + """), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + } + ), +) + +prebuilt_apple_framework = prelude_rule( + name = "prebuilt_apple_framework", + docs = """ + A `prebuilt_apple_framework()` rule represents a set of + Objective-C/C++ source files and is very similar to a `prebuilt_cxx_library()` rule. + """, + examples = """ + ``` + + prebuilt_apple_framework( + name = 'MyPrebuiltFramework', + framework = 'myPrebuiltFramework.framework', + preferred_linkage = 'static', + visibility = [ + 'PUBLIC' + ] + ) + + ``` + """, + further = None, + attrs = ( + # @unsorted-dict-items + { + "preferred_linkage": attrs.enum(Linkage.values(), doc = """ + How to link to a binary: use `dynamic` for a dynamic + framework, and `static` for old universal static + frameworks manually lipo-ed together. `dynamic` will + copy the frameworks into the `Frameworks` directory + of an Apple bundle, and configure framework search paths and linker flags. + `static` will copy the resources of the framework into + an Apple bundle. + """), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "deps": attrs.list(attrs.dep(), default = []), + "exported_linker_flags": attrs.list(attrs.string(), default = []), + "exported_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.string())), default = []), + "framework": attrs.source(), + "frameworks": attrs.list(attrs.string(), default = []), + "labels": attrs.list(attrs.string(), default = []), + "libraries": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "supported_platforms_regex": attrs.option(attrs.regex(), default = None), + } + ), +) + +scene_kit_assets = prelude_rule( + name = "scene_kit_assets", + docs = "", + examples = None, + further = None, + attrs = ( + # @unsorted-dict-items + { + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "path": attrs.source(), + } + ), +) + +swift_library = prelude_rule( + name = "swift_library", + docs = "", + examples = None, + further = None, + attrs = ( + # @unsorted-dict-items + apple_common.serialize_debugging_options_arg() | + { + "bridging_header": attrs.option(attrs.source(), default = None), + "compiler_flags": attrs.list(attrs.arg(), default = []), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "deps": attrs.list(attrs.dep(), default = []), + "enable_cxx_interop": attrs.bool(default = False), + "frameworks": attrs.list(attrs.string(), default = []), + "import_obj_c_forward_declarations": attrs.bool(default = True), + "labels": attrs.list(attrs.string(), default = []), + "libraries": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "module_name": attrs.option(attrs.string(), default = None), + "preferred_linkage": attrs.option(attrs.enum(Linkage.values()), default = None), + "sdk_modules": attrs.list(attrs.string(), default = []), + "soname": attrs.option(attrs.string(), default = None), + "srcs": attrs.list(attrs.source(), default = []), + "supported_platforms_regex": attrs.option(attrs.regex(), default = None), + "target_sdk_version": attrs.option(attrs.string(), default = None), + "version": attrs.option(attrs.string(), default = None), + } | + apple_common.uses_explicit_modules_arg() + ), +) + +swift_toolchain = prelude_rule( + name = "swift_toolchain", + docs = "", + examples = None, + further = None, + attrs = ( + # @unsorted-dict-items + { + "can_toolchain_emit_obj_c_header_textually": attrs.bool(default = False), + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "explicit_modules_uses_gmodules": attrs.bool(default = False), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), + "platform_path": attrs.source(), + "prefix_serialized_debug_info": attrs.bool(default = False), + "resource_dir": attrs.option(attrs.source(), default = None), + "runtime_paths_for_bundling": attrs.list(attrs.string(), default = []), + "runtime_paths_for_linking": attrs.list(attrs.string(), default = []), + "runtime_run_paths": attrs.list(attrs.string(), default = []), + "sdk_path": attrs.source(), + "static_runtime_paths": attrs.list(attrs.string(), default = []), + "supports_relative_resource_dir": attrs.bool(default = False), + "supports_swift_cxx_interoperability_mode": attrs.bool(default = False), + "supports_swift_importing_obj_c_forward_declarations": attrs.bool(default = False), + "supports_cxx_interop_requirement_at_import": attrs.bool(default = False), + "swift_stdlib_tool": attrs.option(attrs.source(), default = None), + "swift_stdlib_tool_flags": attrs.list(attrs.arg(), default = []), + "swiftc": attrs.source(), + "swiftc_flags": attrs.list(attrs.arg(), default = []), + } + ), +) + +_APPLE_TOOLCHAIN_ATTR = get_apple_toolchain_attr() + +def _apple_universal_executable_attrs(): + attribs = { + "executable": attrs.split_transition_dep(cfg = cpu_split_transition, doc = """ + A build target identifying the binary which will be built for multiple architectures. + The target will be transitioned into different configurations, with distinct architectures. + + The target can be one of: + - `apple_binary()` and `cxx_binary()` + - `[shared]` subtarget of `apple_library()` and `cxx_library()` + - `apple_library()` and `cxx_library()` which have `preferred_linkage = shared` attribute + """), + "executable_name": attrs.option(attrs.string(), default = None, doc = """ + By default, the name of the universal executable is same as the name of the binary + from the `binary` target attribute. Set `executable_name` to override the default. + """), + "labels": attrs.list(attrs.string(), default = []), + "split_arch_dsym": attrs.bool(default = False, doc = """ + If enabled, each architecture gets its own dSYM binary. Use this if the combined + universal dSYM binary exceeds 4GiB. + """), + "universal": attrs.option(attrs.bool(), default = None, doc = """ + Controls whether the output is universal binary. Any value overrides the presence + of the `config//cpu/constraints:universal-enabled` constraint. Read the rule docs + for more information on resolution. + """), + "_apple_toolchain": _APPLE_TOOLCHAIN_ATTR, + "_apple_tools": attrs.exec_dep(default = "prelude//apple/tools:apple-tools", providers = [AppleToolsInfo]), + } + attribs.update(apple_dsymutil_attrs()) + return attribs + +apple_universal_executable = prelude_rule( + name = "apple_universal_executable", + impl = apple_universal_executable_impl, + docs = """ + An `apple_universal_executable()` rule takes a target via its + `binary` attribute, builds it for multiple architectures and + combines the result into a single binary using `lipo`. + + The output of the rule is a universal binary: + - If `config//cpu/constraints:universal-enabled` is present in the target platform. + - If the `universal` attribute is set to `True`. + + If none of the conditions are met, then the rule acts as a nop `alias()`. + + The `universal` attribute, if present, takes precedence over constraint. + For example, if `universal = False`, then the presence of the constraint + would not affect the output. + + `apple_bundle()` supports building of universal binaries, + `apple_universal_executable()` is only needed if you have a standalone + binary target which is not embedded in an `apple_bundle()` (usually a + CLI tool). + """, + examples = None, + further = None, + attrs = _apple_universal_executable_attrs(), +) + +def _cxx_universal_executable_attrs(): + return { + "executable": attrs.split_transition_dep(cfg = cpu_split_transition, doc = """ + A build target identifying the binary which will be built for multiple architectures. + The target will be transitioned into different configurations, with distinct architectures. + + The target can be one of: + - `cxx_binary()` + - `[shared]` subtarget `cxx_library()` + - `cxx_library()` which have `preferred_linkage = shared` attribute + """), + "executable_name": attrs.option(attrs.string(), default = None, doc = """ + By default, the name of the universal executable is same as the name of the binary + from the `binary` target attribute. Set `executable_name` to override the default. + """), + "labels": attrs.list(attrs.string(), default = []), + "universal": attrs.option(attrs.bool(), default = None, doc = """ + Controls whether the output is universal binary. Any value overrides the presence + of the `config//cpu/constraints:universal-enabled` constraint. Read the rule docs + for more information on resolution. + """), + "_cxx_toolchain": toolchains_common.cxx(), + } + +cxx_universal_executable = prelude_rule( + name = "cxx_universal_executable", + impl = cxx_universal_executable_impl, + docs = """ + A `cxx_universal_executable()` rule takes a target via its + `binary` attribute, builds it for multiple architectures and + combines the result into a single binary using `lipo`. + + The output of the rule is a universal binary: + - If `config//cpu/constraints:universal-enabled` is present in the target platform. + - If the `universal` attribute is set to `True`. + + If none of the conditions are met, then the rule acts as a nop `alias()`. + + The `universal` attribute, if present, takes precedence over constraint. + For example, if `universal = False`, then the presence of the constraint + would not affect the output. + """, + examples = None, + further = None, + attrs = _cxx_universal_executable_attrs(), +) + +ios_rules = struct( + apple_asset_catalog = apple_asset_catalog, + apple_binary = apple_binary, + apple_bundle = apple_bundle, + apple_library = apple_library, + apple_package = apple_package, + apple_resource = apple_resource, + apple_test = apple_test, + apple_toolchain = apple_toolchain, + apple_toolchain_set = apple_toolchain_set, + apple_universal_executable = apple_universal_executable, + core_data_model = core_data_model, + cxx_universal_executable = cxx_universal_executable, + prebuilt_apple_framework = prebuilt_apple_framework, + scene_kit_assets = scene_kit_assets, + swift_library = swift_library, + swift_toolchain = swift_toolchain, +) diff --git a/prelude/decls/common.bzl b/prelude/decls/common.bzl index 2ca044d8ebfa1..b598801c57a54 100644 --- a/prelude/decls/common.bzl +++ b/prelude/decls/common.bzl @@ -10,9 +10,6 @@ # the generated docs, and so those should be verified to be accurate and # well-formatted (and then delete this TODO) -load("@prelude//:build_mode.bzl", "BuildModeInfo") -load("@prelude//:is_full_meta_repo.bzl", "is_full_meta_repo") - def validate_uri(_s): return True @@ -23,7 +20,7 @@ prelude_rule = record( further = field([str, None], None), attrs = field(dict[str, Attr]), impl = field([typing.Callable, None], None), - uses_plugins = field([list["PluginKind"], None], None), + uses_plugins = field([list[plugins.PluginKind], None], None), ) AbiGenerationMode = ["unknown", "class", "source", "migrating_to_source_only", "source_only", "unrecognized"] @@ -42,8 +39,6 @@ IncludeType = ["local", "system", "raw"] LinkableDepType = ["static", "static_pic", "shared"] -Linkage = ["any", "static", "shared"] - LogLevel = ["off", "severe", "warning", "info", "config", "fine", "finer", "finest", "all"] OnDuplicateEntry = ["fail", "overwrite", "append"] @@ -52,8 +47,6 @@ SourceAbiVerificationMode = ["off", "log", "fail"] TestType = ["junit", "junit5", "testng"] -Traversal = ["tree", "node"] - UnusedDependenciesAction = ["unknown", "fail", "warn", "ignore", "unrecognized"] def _name_arg(name_type): @@ -132,7 +125,7 @@ def _platform_deps_arg(): def _labels_arg(): return { "labels": attrs.list(attrs.string(), default = [], doc = """ - Set of arbitrary strings which allow you to annotate a `build rule`with tags + Set of arbitrary strings which allow you to annotate a `build rule` with tags that can be searched for over an entire dependency tree using `buck query()` . """), @@ -206,51 +199,11 @@ def _target_os_type_arg() -> Attr: def _exec_os_type_arg() -> Attr: return attrs.default_only(attrs.exec_dep(default = "prelude//os_lookup/targets:os_lookup")) -def _re_opts_for_tests_arg() -> Attr: - # Attributes types do not have records. - # The expected shape of re_opts is: - # { - # "capabilities": Dict | None - # "use_case": str | None - # "remote_cache_enabled": bool | None - # } - return attrs.option( - attrs.dict( - key = attrs.string(), - value = attrs.option( - attrs.one_of( - attrs.dict( - key = attrs.string(), - value = attrs.string(), - sorted = False, - ), - attrs.string(), - attrs.bool(), - ), - # TODO(cjhopman): I think this default does nothing, it should be deleted - default = None, - ), - sorted = False, - ), - default = None, - ) - -def _re_action_key_provider_arg() -> Attr: - if is_full_meta_repo(): - return attrs.dep(providers = [BuildModeInfo], default = "fbcode//buck2/platform/build_mode:build_mode") - else: - return attrs.option(attrs.dep(providers = [BuildModeInfo]), default = None) - -def _re_test_args() -> dict[str, Attr]: - return { - "remote_execution": _re_opts_for_tests_arg(), - "remote_execution_action_key_providers": _re_action_key_provider_arg(), - } - def _allow_cache_upload_arg(): return { - "allow_cache_upload": attrs.bool( - default = False, + "allow_cache_upload": attrs.option( + attrs.bool(), + default = None, doc = """ Whether to allow uploading the output of this rule to be uploaded to cache when the action is executed locally if the configuration @@ -260,6 +213,15 @@ def _allow_cache_upload_arg(): ), } +def _inject_test_env_arg(): + return { + # NOTE: We make this a `dep` not an `exec_dep` even though we'll execute + # it, because it needs to execute in the same platform as the test itself + # (we run tests in the target platform not the exec platform, since the + # goal is to test the code that is being built!). + "_inject_test_env": attrs.default_only(attrs.dep(default = "prelude//test/tools:inject_test_env")), + } + buck = struct( name_arg = _name_arg, deps_query_arg = _deps_query_arg, @@ -274,7 +236,7 @@ buck = struct( run_test_separately_arg = _run_test_separately_arg, fork_mode = _fork_mode, test_rule_timeout_ms = _test_rule_timeout_ms, - re_test_args = _re_test_args, target_os_type_arg = _target_os_type_arg, allow_cache_upload_arg = _allow_cache_upload_arg, + inject_test_env_arg = _inject_test_env_arg, ) diff --git a/prelude/decls/core_rules.bzl b/prelude/decls/core_rules.bzl index 592c9b3fea440..a7569c7065f0b 100644 --- a/prelude/decls/core_rules.bzl +++ b/prelude/decls/core_rules.bzl @@ -21,6 +21,8 @@ Platform = ["linux", "macos", "windows", "freebsd", "unknown"] RemoteFileType = ["data", "executable", "exploded_zip"] +TargetCpuType = ["arm", "armv7", "arm64", "x86", "x86_64", "mips"] + alias = prelude_rule( name = "alias", docs = "", @@ -52,7 +54,7 @@ command_alias = prelude_rule( You can reference a `command_alias` target in - the `cmd` parameter of a `genrule()`by + the `cmd` parameter of a `genrule()` by using the `exe` macro: @@ -156,9 +158,12 @@ command_alias = prelude_rule( attrs = ( # @unsorted-dict-items { - "exe": attrs.option(attrs.dep(), default = None, doc = """ - A `build target`for a rule that outputs - an executable, such as an `sh\\_binary()`. + # Match `dep` before `source` so that we can support extracting the + # `RunInfo` provider of it, if one exists. + "exe": attrs.option(attrs.one_of(attrs.dep(), attrs.source()), default = None, doc = """ + A `build target` for a rule that outputs + an executable, such as an `sh_binary()`, + or an executable source file. """), "platform_exe": attrs.dict(key = attrs.enum(Platform), value = attrs.dep(), sorted = False, default = {}, doc = """ A mapping from platforms to `build target`. @@ -219,6 +224,23 @@ config_setting = prelude_rule( ), ) +configuration_alias = prelude_rule( + name = "configuration_alias", + docs = "", + examples = None, + further = None, + attrs = ( + # @unsorted-dict-items + { + # configuration_alias acts like alias but for configuration rules. + + # The configuration_alias itself is a configuration rule and the `actual` argument is + # expected to be a configuration rule as well. + "actual": attrs.dep(pulls_and_pushes_plugins = plugins.All), + } + ), +) + configured_alias = prelude_rule( name = "configured_alias", docs = "", @@ -278,10 +300,6 @@ constraint_value = prelude_rule( export_file = prelude_rule( name = "export_file", docs = """ - **Warning:** this build rule is deprecated for folders. - Use `filegroup()`instead. It is still supported for individual files. - - An `export_file()` takes a single file or folder and exposes it so other rules can use it. """, @@ -360,7 +378,7 @@ export_file = prelude_rule( genrule( name = 'demo', - out = 'result.html' + out = 'result.html', cmd = 'cp $(location :example) $OUT', ) @@ -558,11 +576,12 @@ genrule = prelude_rule( genrule_common.bash_arg() | genrule_common.cmd_exe_arg() | genrule_common.type_arg() | + genrule_common.weight_arg() | { "out": attrs.option(attrs.string(), default = None, doc = """ The name of the output file or directory. The complete path to this argument is provided to the shell command through - the `OUT` environment variable. Only one of`out` + the `OUT` environment variable. Only one of `out` or `outs` may be present. """), "outs": attrs.option(attrs.dict(key = attrs.string(), value = attrs.set(attrs.string(), sorted = False), sorted = False), default = None, doc = """ @@ -641,8 +660,8 @@ genrule = prelude_rule( ``` is not. """), - "env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}), } | + genrule_common.env_arg() | genrule_common.environment_expansion_separator() | { "enable_sandbox": attrs.option(attrs.bool(), default = None, doc = """ @@ -677,7 +696,7 @@ http_archive = prelude_rule( An `http_archive()` rule is used to download and extract archives from the Internet to be used as dependencies for other rules. These rules are downloaded by running `fetch`, or can be downloaded as part of - `build`by setting `.buckconfig` + `build` by setting `.buckconfig` """, examples = """ Using `http_archive()`, third party packages can be downloaded from @@ -733,7 +752,7 @@ http_archive = prelude_rule( * foo\\_prime/bar-0.1.2 Only `data.dat` will be extracted, and it will be extracted into the output - directory specified in\302\240`http\\_archive()out`. + directory specified in `out`. """), "excludes": attrs.list(attrs.regex(), default = [], doc = """ An optional list of regex patterns. All file paths in the extracted archive which match @@ -777,9 +796,9 @@ http_file = prelude_rule( docs = """ An `http_file()` rule is used to download files from the Internet to be used as dependencies for other rules. This rule only downloads single files, and can - optionally make them executable (see `http\\_file()executable`) + optionally make them executable (see `http_file()executable`) These rules are downloaded by running `fetch`, or can - be downloaded as part of `build`by setting `.buckconfig` + be downloaded as part of `build` by setting `.buckconfig` """, examples = """ Using `http_file()`, third party packages can be downloaded from @@ -858,7 +877,7 @@ http_file = prelude_rule( """), "executable": attrs.option(attrs.bool(), default = None, doc = """ Whether or not the file should be made executable after downloading. If true, - this can also be used via `run`and the + this can also be used via `run` and the `$(exe )` `string parameter macros` """), "contacts": attrs.list(attrs.string(), default = []), @@ -907,7 +926,7 @@ remote_file = prelude_rule( ``` Here's an example of a `remote_file()` using a `mvn` URL being referenced - by a `prebuilt\\_jar()`. + by a `prebuilt_jar()`. ``` @@ -1096,6 +1115,19 @@ test_suite = prelude_rule( ), ) +toolchain_alias = prelude_rule( + name = "toolchain_alias", + docs = """ +toolchain_alias acts like alias but for toolchain rules. + +The toolchain_alias itself is a toolchain rule and the `actual` argument is +expected to be a toolchain_rule as well. + """, + examples = None, + further = None, + attrs = {"actual": attrs.toolchain_dep(doc = "The actual toolchain that is being aliased. This should be a toolchain rule.")}, +) + versioned_alias = prelude_rule( name = "versioned_alias", docs = "", @@ -1324,8 +1356,8 @@ worker_tool = prelude_rule( # @unsorted-dict-items { "exe": attrs.option(attrs.dep(), default = None, doc = """ - A `build target`for a rule that outputs - an executable, such as an `sh\\_binary()`. + A `build target` for a rule that outputs + an executable, such as an `sh_binary()`. Buck runs this executable only once per build. """), "args": attrs.one_of(attrs.arg(), attrs.list(attrs.arg()), default = [], doc = """ @@ -1471,6 +1503,7 @@ core_rules = struct( alias = alias, command_alias = command_alias, config_setting = config_setting, + configuration_alias = configuration_alias, configured_alias = configured_alias, constraint_setting = constraint_setting, constraint_value = constraint_value, @@ -1483,6 +1516,7 @@ core_rules = struct( platform = platform, remote_file = remote_file, test_suite = test_suite, + toolchain_alias = toolchain_alias, versioned_alias = versioned_alias, worker_tool = worker_tool, zip_file = zip_file, diff --git a/prelude/decls/cxx_rules.bzl b/prelude/decls/cxx_rules.bzl index d807bac7bcfe4..06f1078aa6c46 100644 --- a/prelude/decls/cxx_rules.bzl +++ b/prelude/decls/cxx_rules.bzl @@ -10,8 +10,11 @@ # the generated docs, and so those should be verified to be accurate and # well-formatted (and then delete this TODO) -load(":apple_common.bzl", "apple_common") -load(":common.bzl", "CxxRuntimeType", "CxxSourceType", "HeadersAsRawHeadersMode", "LinkableDepType", "Linkage", "Traversal", "buck", "prelude_rule") +load("@prelude//apple:apple_common.bzl", "apple_common") +load("@prelude//cxx:link_groups_types.bzl", "LINK_GROUP_MAP_ATTR") +load("@prelude//linking:link_info.bzl", "LinkStyle") +load("@prelude//linking:types.bzl", "Linkage") +load(":common.bzl", "CxxRuntimeType", "CxxSourceType", "HeadersAsRawHeadersMode", "buck", "prelude_rule") load(":cxx_common.bzl", "cxx_common") load(":genrule_common.bzl", "genrule_common") load(":native_common.bzl", "native_common") @@ -117,7 +120,7 @@ cxx_binary = prelude_rule( "licenses": attrs.list(attrs.source(), default = []), "link_deps_query_whole": attrs.bool(default = False), "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), + "link_group_map": LINK_GROUP_MAP_ATTR, "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), "post_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg(anon_target_compatible = True))), default = []), @@ -127,6 +130,7 @@ cxx_binary = prelude_rule( "thin_lto": attrs.bool(default = False), "version_universe": attrs.option(attrs.string(), default = None), "weak_framework_names": attrs.list(attrs.string(), default = []), + "use_header_units": attrs.bool(default = False), } | buck.allow_cache_upload_arg() ), @@ -136,8 +140,8 @@ cxx_genrule = prelude_rule( name = "cxx_genrule", docs = """ A `cxx_genrule()` enables you to run shell commands as part - of the Buck build process. A `cxx_genrule()` exposes\342\200\224through - a set of string parameter macros and variables\342\200\224information about the + of the Buck build process. A `cxx_genrule()` exposes - through + a set of string parameter macros and variables - information about the tools and configuration options used by the Buck environment, specifically those related to the C/C++ toolchain. @@ -147,7 +151,7 @@ cxx_genrule = prelude_rule( the settings in `.buckconfig` and `.buckconfig.local`, and the result of various command-line overrides specified through - the `common\\_parameters`command-line option. + the `common_parameters` command-line option. This information is available only @@ -296,7 +300,7 @@ cxx_genrule = prelude_rule( Additionally, if you embed these paths in a shell script, you should - execute that script using the `sh\\_binary()`rule and include + execute that script using the `sh_binary()` rule and include the targets for these paths in the `resources` argument of that `sh_binary` rule. These are the same targets that you pass to the string parameter macros. @@ -362,7 +366,9 @@ cxx_genrule = prelude_rule( genrule_common.bash_arg() | genrule_common.cmd_exe_arg() | genrule_common.type_arg() | + genrule_common.weight_arg() | genrule_common.out_arg() | + genrule_common.env_arg() | genrule_common.environment_expansion_separator() | { "enable_sandbox": attrs.option(attrs.bool(), default = None, doc = """ @@ -391,13 +397,13 @@ cxx_library = prelude_rule( Whether a Buck command builds the `cxx_library` is determined by the inclusion of a top-level target, such as - a `cxx\\_binary()`or `android\\_binary()`, that + a `cxx_binary()` or `android_binary()`, that transitively depends on the `cxx_library`. The set of targets specified to the Buck command (`buck build`, `buck run`, etc) must include one of these top-level targets in order for Buck to build the `cxx_library`. Note that you could specify the top-level target - implicitly using a `build target pattern`or you could also specify - the top-level target using an buckconfig#`alias`defined in `.buckconfig`. + implicitly using a `build target pattern` or you could also specify + the top-level target using a buckconfig `alias` defined in `.buckconfig`. *How* Buck builds the library also depends on the specified top-level target. @@ -408,10 +414,10 @@ cxx_library = prelude_rule( #### Dependencies of the cxx\\_library also require a top-level target Similarly, in order for Buck to build a target that - the `cxx_library` depends on, such as a `cxx\\_genrule()`, + the `cxx_library` depends on, such as a `cxx_genrule()`, you must specify in the Buck command a top-level target that depends on the `cxx_library`. For example, you could specify - to `build`a `cxx_binary` that + to `build` a `cxx_binary` that depends on the `cxx_library`. If you specify as your build target the `cxx_library` itself, the build targets that the `cxx_library` depends on *might not be built*. @@ -517,17 +523,15 @@ cxx_library = prelude_rule( cxx_common.exported_post_platform_linker_flags_arg() | native_common.link_style() | native_common.link_whole(link_whole_type = attrs.option(attrs.bool(), default = None)) | + native_common.soname() | cxx_common.raw_headers_arg() | cxx_common.include_directories_arg() | cxx_common.public_include_directories_arg() | cxx_common.public_system_include_directories_arg() | { - "soname": attrs.option(attrs.string(), default = None, doc = """ - Sets the soname ("shared object name") of any shared library produced from this rule. - The default value is based on the full rule name. - The macro `$(ext)` will be replaced with a platform-appropriate extension. - An argument can be provided, which is a library version. - For example `soname = 'libfoo.$(ext 2.3)'` will be `libfoo.2.3.dylib` on Mac and `libfoo.so.2.3` on Linux. + "deffile": attrs.option(attrs.source(), default = None, doc = """ + Specifies the *.def file used on windows to modify a dll's exports in place of explicit `__declspec(dllexport)` declarations. + The default is to not use a defile. """), "used_by_wrap_script": attrs.bool(default = False, doc = """ When using an exopackage @@ -542,14 +546,17 @@ cxx_library = prelude_rule( } | cxx_common.supported_platforms_regex_arg() | cxx_common.force_static(force_static_type = attrs.option(attrs.bool(), default = None)) | - native_common.preferred_linkage(preferred_linkage_type = attrs.option(attrs.enum(Linkage), default = None)) | + native_common.preferred_linkage(preferred_linkage_type = attrs.option(attrs.enum(Linkage.values()), default = None)) | cxx_common.reexport_all_header_dependencies_arg() | cxx_common.exported_deps_arg() | cxx_common.exported_platform_deps_arg() | cxx_common.precompiled_header_arg() | apple_common.extra_xcode_sources() | apple_common.extra_xcode_files() | + apple_common.uses_explicit_modules_arg() | + apple_common.meta_apple_library_validation_enabled_arg() | { + "archive_allow_cache_upload": attrs.bool(default = False), "bridging_header": attrs.option(attrs.source(), default = None), "can_be_asset": attrs.option(attrs.bool(), default = None), "contacts": attrs.list(attrs.string(), default = []), @@ -570,7 +577,7 @@ cxx_library = prelude_rule( "libraries": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), + "link_group_map": LINK_GROUP_MAP_ATTR, "module_name": attrs.option(attrs.string(), default = None), "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), "post_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), @@ -583,12 +590,41 @@ cxx_library = prelude_rule( "thin_lto": attrs.bool(default = False), "use_archive": attrs.option(attrs.bool(), default = None), "uses_cxx_explicit_modules": attrs.bool(default = False), - "uses_explicit_modules": attrs.bool(default = False), "version_universe": attrs.option(attrs.string(), default = None), "weak_framework_names": attrs.list(attrs.string(), default = []), - "xcode_private_headers_symlinks": attrs.option(attrs.bool(), default = None), - "xcode_public_headers_symlinks": attrs.option(attrs.bool(), default = None), - } + "use_header_units": attrs.bool(default = False, doc = """ + If True, makes any header unit exported by a dependency (including + recursively) through export_header_unit available to the compiler. If + false, the compilation ignores header units, regardless of what is + exported by dependencies. + """), + "export_header_unit": attrs.option(attrs.enum(["include", "preload"]), default = None, doc = """ + If not None, export a C++20 header unit visible to dependants (including + recursively) with use_header_units set to True. + + "include": replace includes of each file in exported_headers or + raw_headers with an import of the precompiled header unit; files + that do not include any of those headers do not load the header + unit. + + "preload": automatically load the precompiled header unit in any + dependant that uses header units. + """), + "export_header_unit_filter": attrs.list(attrs.string(), default = [], doc = """ + A list of regexes. Each regex should match a set of headers in + exported_headers or raw_headers to be precompiled together into one + C++20 header unit. + + When used with export_header_unit="include", this allows different + subsets of headers to be loaded only by files that use them. Each group + should only depend on headers in previous groups. + + If a header is not matched by any group, it is not precompiled and will + be included textually. If no filter is specified, the rule excludes + inline headers based on a name heuristics (e.g. "-inl.h"). + """), + } | + buck.allow_cache_upload_arg() ), ) @@ -597,7 +633,7 @@ cxx_precompiled_header = prelude_rule( docs = """ A `cxx_precompiled_header` rule specifies a single header file that can be precompiled and made available for use in other build rules such as - a `cxx\\_library()`or a `cxx\\_binary()`. + a `cxx_library()` or a `cxx_binary()`. This header file is precompiled by the preprocessor on behalf of the @@ -709,8 +745,14 @@ cxx_precompiled_header = prelude_rule( """, further = None, attrs = ( - # @unsorted-dict-items { + "contacts": attrs.list(attrs.string(), default = []), + "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "deps": attrs.list(attrs.dep(), default = [], doc = """ + Dependency rules which export headers used by the header specified in `src`. + """), + "labels": attrs.list(attrs.string(), default = []), + "licenses": attrs.list(attrs.source(), default = []), "src": attrs.source(doc = """ The path to the header file that should be precompiled. Only one header file can be specified. But of course this header could include @@ -718,18 +760,61 @@ cxx_precompiled_header = prelude_rule( be `exported_headers` from -- another rule, in which case, the rule would have to be added to `deps` as usual. """), - "deps": attrs.list(attrs.dep(), default = [], doc = """ - Dependency rules which export headers used by the header specified in `src`. - """), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), "version_universe": attrs.option(attrs.string(), default = None), } ), ) +windows_resource = prelude_rule( + name = "windows_resource", + docs = """ + A `windows_resource()` rule specifies a set of Window's Resource File (.rc) that + are compiled into object files. + + The files are compiled into .res files using rc.exe and then compiled into object files + using cvtres.exe. + They are not part of cxx_library because Microsoft's linker ignores the resources + unless they are specified as an object file, meaning including them in a possibly static + library is unintuitive. + """, + examples = """ + ``` + + # A rule that includes a single .rc file and compiles it into an object file. + windows_resource( + name = "resources", + srcs = [ + "resources.rc", + ], + ) + + # A rule that links against the above windows_resource rule. + cxx_binary( + name = "app", + srcs = [ + "main.cpp", + ], + deps = [ + ":resources" + ], + ) + + ``` + """, + further = None, + attrs = ( + cxx_common.srcs_arg() | + cxx_common.headers_arg() | + cxx_common.platform_headers_arg() | + cxx_common.header_namespace_arg() | + cxx_common.raw_headers_arg() | + cxx_common.include_directories_arg() | + { + "labels": attrs.list(attrs.string(), default = []), + } + ), +) + cxx_test = prelude_rule( name = "cxx_test", docs = """ @@ -752,6 +837,7 @@ cxx_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | cxx_common.srcs_arg() | cxx_common.headers_arg() | cxx_common.preprocessor_flags_arg() | @@ -809,6 +895,7 @@ cxx_test = prelude_rule( buck.test_rule_timeout_ms() | native_common.link_group_deps() | native_common.link_group_public_deps_label() | + native_common.link_style() | { "additional_coverage_targets": attrs.list(attrs.source(), default = []), "contacts": attrs.list(attrs.string(), default = []), @@ -833,8 +920,7 @@ cxx_test = prelude_rule( "licenses": attrs.list(attrs.source(), default = []), "link_deps_query_whole": attrs.bool(default = False), "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), - "link_style": attrs.option(attrs.enum(LinkableDepType), default = None), + "link_group_map": LINK_GROUP_MAP_ATTR, "linker_extra_outputs": attrs.list(attrs.string(), default = []), "platform_compiler_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), @@ -850,7 +936,8 @@ cxx_test = prelude_rule( "use_default_test_main": attrs.option(attrs.bool(), default = None), "version_universe": attrs.option(attrs.string(), default = None), "weak_framework_names": attrs.list(attrs.string(), default = []), - } + } | + buck.allow_cache_upload_arg() ), ) @@ -860,7 +947,6 @@ cxx_toolchain = prelude_rule( examples = None, further = None, attrs = ( - # @unsorted-dict-items { "archive_contents": attrs.enum(ArchiveContents, default = "normal"), "archiver": attrs.source(), @@ -879,6 +965,17 @@ cxx_toolchain = prelude_rule( "assembler_preprocessor_type": attrs.option(attrs.enum(CxxToolProviderType), default = None), "assembler_type": attrs.option(attrs.enum(CxxToolProviderType), default = None), "binary_extension": attrs.option(attrs.string(), default = None), + "binary_linker_flags": attrs.list( + attrs.arg(anon_target_compatible = True), + default = [], + doc = """ + Linker flags that apply to all links coordinated by a binary + rule. One key distinction between these and `executable_linker_flags` + is that these will also apply to library links coordinated by + binary rules (e.g. linking roots/deps when using native python or + omnibus link strategies). + """, + ), "c_compiler": attrs.source(), "c_compiler_flags": attrs.list(attrs.arg(), default = []), "c_compiler_type": attrs.option(attrs.enum(CxxToolProviderType), default = None), @@ -891,6 +988,10 @@ cxx_toolchain = prelude_rule( "cuda_compiler_flags": attrs.list(attrs.arg(), default = []), "cuda_compiler_type": attrs.option(attrs.enum(CxxToolProviderType), default = None), "cuda_preprocessor_flags": attrs.list(attrs.arg(), default = []), + "cvtres_compiler": attrs.option(attrs.source(), default = None), + "cvtres_compiler_flags": attrs.list(attrs.arg(), default = []), + "cvtres_compiler_type": attrs.option(attrs.enum(CxxToolProviderType), default = None), + "cvtres_preprocessor_flags": attrs.list(attrs.arg(), default = []), "cxx_compiler": attrs.source(), "cxx_compiler_flags": attrs.list(attrs.arg(), default = []), "cxx_compiler_type": attrs.option(attrs.enum(CxxToolProviderType), default = None), @@ -898,7 +999,14 @@ cxx_toolchain = prelude_rule( "debug_path_prefix_map_sanitizer_format": attrs.option(attrs.string(), default = None), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "detailed_untracked_header_messages": attrs.bool(default = False), - "filepath_length_limited": attrs.bool(default = False), + "dist_thin_lto_codegen_flags": attrs.list(attrs.arg(), default = []), + "executable_linker_flags": attrs.list( + attrs.arg(anon_target_compatible = True), + default = [], + doc = """ + Linker flags that only apply when linking an executable. + """, + ), "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), "headers_whitelist": attrs.list(attrs.string(), default = []), "hip_compiler": attrs.option(attrs.source(), default = None), @@ -908,18 +1016,29 @@ cxx_toolchain = prelude_rule( "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "link_path_normalization_args_enabled": attrs.bool(default = False), + "link_style": attrs.enum( + LinkStyle.values(), + default = "static", + doc = """ + The default value of the `link_style` attribute for rules that use this toolchain. + """, + ), "linker": attrs.source(), "linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "linker_type": attrs.enum(LinkerProviderType), "nm": attrs.source(), "objcopy_for_shared_library_interface": attrs.source(), - "objcopy_recalculates_layout": attrs.bool(default = False), + "objdump": attrs.option(attrs.source(), default = None), "object_file_extension": attrs.string(default = ""), - "pic_type_for_shared_linking": attrs.enum(PicType, default = "pic"), + "post_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "private_headers_symlinks_enabled": attrs.bool(default = False), "public_headers_symlinks_enabled": attrs.bool(default = False), "ranlib": attrs.option(attrs.source(), default = None), "ranlib_flags": attrs.list(attrs.arg(), default = []), + "rc_compiler": attrs.option(attrs.source(), default = None), + "rc_compiler_flags": attrs.list(attrs.arg(), default = []), + "rc_compiler_type": attrs.option(attrs.enum(CxxToolProviderType), default = None), + "rc_preprocessor_flags": attrs.list(attrs.arg(), default = []), "requires_archives": attrs.bool(default = False), "shared_dep_runtime_ld_flags": attrs.list(attrs.arg(), default = []), "shared_library_extension": attrs.string(default = ""), @@ -933,7 +1052,6 @@ cxx_toolchain = prelude_rule( "strip_all_flags": attrs.option(attrs.list(attrs.arg()), default = None), "strip_debug_flags": attrs.option(attrs.list(attrs.arg()), default = None), "strip_non_global_flags": attrs.option(attrs.list(attrs.arg()), default = None), - "use_arg_file": attrs.bool(default = False), "use_header_map": attrs.bool(default = False), } ), @@ -1006,24 +1124,26 @@ prebuilt_cxx_library = prelude_rule( """, further = None, attrs = ( - # @unsorted-dict-items { - "header_only": attrs.bool(default = False, doc = """ - Indicates if this library only consists of headers or not. If this is set to - `True`, Buck will not link this library into any library that depends on it. - """), "header_dirs": attrs.option(attrs.list(attrs.source()), default = None, doc = """ A directory that headers can be included from. These directories are added to the include path using `-isystem`. """), + "header_only": attrs.bool(default = False, doc = """ + Indicates if this library only consists of headers or not. If this is set to + `True`, Buck will not link this library into any library that depends on it. + """), "platform_header_dirs": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.source()))), default = None, doc = """ Platform specific header directories. These should be specified as a list of pairs where the first element is an un-anchored regex (in java.util.regex.Pattern syntax) against which the platform name is matched, and the second element is either a list of header directories. See `header_dirs` for more information. """), - "static_lib": attrs.option(attrs.source(), default = None, doc = """ - The path to the library to use when performing static linking. + "platform_shared_lib": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.source())), default = None, doc = """ + Platform specific shared library. These should be specified as a list of pairs where the first + element is an un-anchored regex (in java.util.regex.Pattern syntax) against which the platform + name is matched, and the second element the path to the library. + See `shared_lib` for more information. """), "platform_static_lib": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.source())), default = None, doc = """ Platform specific static library. These should be specified as a list of pairs where the first @@ -1031,9 +1151,6 @@ prebuilt_cxx_library = prelude_rule( name is matched, and the second element the path to the library. See `static_lib` for more information. """), - "static_pic_lib": attrs.option(attrs.source(), default = None, doc = """ - The path to the library to use when performing static PIC linking. - """), "platform_static_pic_lib": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.source())), default = None, doc = """ Platform specific static PIC library. These should be specified as a list of pairs where the first element is an un-anchored regex (in java.util.regex.Pattern syntax) against which the platform @@ -1043,11 +1160,11 @@ prebuilt_cxx_library = prelude_rule( "shared_lib": attrs.option(attrs.source(), default = None, doc = """ The path to the library to use when performing shared linking. """), - "platform_shared_lib": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.source())), default = None, doc = """ - Platform specific shared library. These should be specified as a list of pairs where the first - element is an un-anchored regex (in java.util.regex.Pattern syntax) against which the platform - name is matched, and the second element the path to the library. - See `shared_lib` for more information. + "static_lib": attrs.option(attrs.source(), default = None, doc = """ + The path to the library to use when performing static linking. + """), + "static_pic_lib": attrs.option(attrs.source(), default = None, doc = """ + The path to the library to use when performing static PIC linking. """), } | cxx_common.supported_platforms_regex_arg() | @@ -1058,7 +1175,7 @@ prebuilt_cxx_library = prelude_rule( cxx_common.exported_platform_preprocessor_flags_arg() | cxx_common.exported_linker_flags_arg() | cxx_common.force_static(force_static_type = attrs.bool(default = False)) | - native_common.preferred_linkage(preferred_linkage_type = attrs.option(attrs.enum(Linkage), default = None)) | + native_common.preferred_linkage(preferred_linkage_type = attrs.option(attrs.enum(Linkage.values()), default = None)) | cxx_common.exported_deps_arg() | cxx_common.exported_platform_deps_arg() | cxx_common.supports_merged_linking() | @@ -1067,12 +1184,17 @@ prebuilt_cxx_library = prelude_rule( "can_be_asset": attrs.bool(default = False), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "deffile": attrs.option(attrs.source(), default = None, doc = """ + Specifies the *.def file used on windows to modify a dll's exports in place of explicit `__declspec(dllexport)` declarations. + The default is to not use a defile. + """), "deps": attrs.list(attrs.dep(), default = []), "exported_lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), "exported_lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), "exported_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg(anon_target_compatible = True))), default = []), "exported_post_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "exported_post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg(anon_target_compatible = True))), default = []), + "extract_soname": attrs.bool(default = False), "frameworks": attrs.list(attrs.string(), default = []), "import_lib": attrs.option(attrs.source(), default = None), "include_in_android_merge_map_output": attrs.bool(default = True), @@ -1082,6 +1204,7 @@ prebuilt_cxx_library = prelude_rule( "link_whole": attrs.bool(default = False), "link_without_soname": attrs.bool(default = False), "platform_import_lib": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.source())), default = None), + "prestripped": attrs.bool(default = False, doc = "When set, skips running `strip` commands when building this library."), "provided": attrs.bool(default = False), "soname": attrs.option(attrs.string(), default = None), "supports_shared_library_interface": attrs.bool(default = True), @@ -1095,7 +1218,8 @@ prebuilt_cxx_library = prelude_rule( "versioned_soname": attrs.option(attrs.versioned(attrs.string()), default = None), "versioned_static_lib": attrs.option(attrs.versioned(attrs.source()), default = None), "versioned_static_pic_lib": attrs.option(attrs.versioned(attrs.source()), default = None), - } + } | + buck.allow_cache_upload_arg() ), ) @@ -1197,7 +1321,7 @@ prebuilt_cxx_library_group = prelude_rule( "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "deps": attrs.list(attrs.dep(), default = []), "import_libs": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), - "include_dirs": attrs.list(attrs.source(), default = []), + "include_dirs": attrs.list(attrs.source(allow_directory = True), default = []), "include_in_android_merge_map_output": attrs.bool(default = True), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), @@ -1242,6 +1366,7 @@ cxx_rules = struct( cxx_genrule = cxx_genrule, cxx_library = cxx_library, cxx_precompiled_header = cxx_precompiled_header, + windows_resource = windows_resource, cxx_test = cxx_test, cxx_toolchain = cxx_toolchain, prebuilt_cxx_library = prebuilt_cxx_library, diff --git a/prelude/decls/erlang_rules.bzl b/prelude/decls/erlang_rules.bzl index 542fb14546f65..4ee634bb9e755 100644 --- a/prelude/decls/erlang_rules.bzl +++ b/prelude/decls/erlang_rules.bzl @@ -6,11 +6,12 @@ # of this source tree. load("@prelude//erlang/erlang_application.bzl", "StartTypeValues") -load(":common.bzl", "buck", "prelude_rule") +load(":common.bzl", "prelude_rule") +load(":re_test_common.bzl", "re_test_common") def re_test_args(): # remove reference to fbcode targets - args = buck.re_test_args() + args = re_test_common.test_args() return {"remote_execution": args["remote_execution"]} common_attributes = { @@ -48,7 +49,7 @@ common_application_attributes = { dependency is desired. These fields will be used to construct equally named fields in the generated `*.app` file for the application. - OTP applications are specified with the target path `otp//:`. + OTP applications are specified with the target path `prelude//erlang/applications:`. **NOTE**: _If you use the `app_src` field and the references application resource file template specifies `applications` or `included_applications` buck2 checks that the target definitions and information in the template are @@ -87,7 +88,7 @@ rules_attributes = { This attribute controls if the output of the builds also create edoc chunks. """), "env": attrs.option(attrs.dict(key = attrs.string(), value = attrs.string()), default = None, doc = """ - The `env` field allows to set the application env variables. The key value pairs will materialise in tha applications `.app` + The `env` field allows to set the application env variables. The key value pairs will materialise in the application's `.app` file and can then be accessed by [`application:get_env/2`](https://www.erlang.org/doc/man/application.html#get_env-2). """), "erl_opts": attrs.option(attrs.list(attrs.string()), default = None, doc = """ @@ -120,6 +121,9 @@ rules_attributes = { [application_opt()](https://www.erlang.org/doc/man/application.html#load-2). The key-value pair will be stored in the applications `.app` file and can be accessed by `file:consult/1`. """), + "include_src": attrs.bool(default = True, doc = """ + This field controls if the generated application directory contains a src/ directory with the Erlang code or not. + """), "includes": attrs.list(attrs.source(), default = [], doc = """ The public header files accessible via `-include_lib("appname/include/header.hrl")` from other erlang files. """), @@ -128,6 +132,11 @@ rules_attributes = { difference, that the module name, and the individual start arguments need to be given as the string representation of the corresponding Erlang terms. """), + "peek_private_includes": attrs.bool(default = False, doc = """ + This attribute allows you to use the private includes of the application's dependencies. This can be useful for + test applications, to create shared abstractions for tests. It's not advisable to use this attribute for prodution + code. All private includes transitively must be non-ambiguous. + """), "resources": attrs.list(attrs.dep(), default = [], doc = """ The `resources` field specifies targets whose default output are placed in the applications `priv/` directory. For regular files this field is typically combined with `export_file`, `filegroup`, or similar targets. However, it @@ -144,6 +153,12 @@ rules_attributes = { This field indicates if global parse_tranforms should be applied to this application as well. It often makes sense for third-party dependencies to not be subjected to global parse_transforms, similar to OTP applications. """), + "xrl_includefile": attrs.option(attrs.source(), default = None, doc = """ + Customised prologue file to replace the default. See [`includefile` option](https://www.erlang.org/doc/apps/parsetools/leex.html#file/2) for details. + """), + "yrl_includefile": attrs.option(attrs.source(), default = None, doc = """ + Customised prologue file to replace the default. See [`includefile` option](https://www.erlang.org/doc/apps/parsetools/yecc.html#file/2) for details. + """), } | common_application_attributes, "erlang_app_includes": { "application_name": attrs.string(), @@ -151,6 +166,12 @@ rules_attributes = { "_toolchain": attrs.toolchain_dep(default = "toolchains//:erlang-default"), }, "erlang_escript": { + "bundled": attrs.bool(default = True, doc = """ + Setting bundled to `True` does generate a folder structure and escript trampoline instead of an archive. + """), + "configs": attrs.list(attrs.dep(), default = [], doc = """ + This attribute allows to set config files for the escript. The dependencies that are typically used + here are `export_file` targets."""), "deps": attrs.list(attrs.dep(), doc = """ List of Erlang applications that are bundled in the escript. This includes all transitive dependencies as well. """), @@ -164,7 +185,7 @@ rules_attributes = { `resources` field, the `priv` folders files can then be accessed by `escript"extract/2`. """), "main_module": attrs.option(attrs.string(), default = None, doc = """ - Overrides the default main module. Instead of defering the main module from the scripts filename, the specified module + Overrides the default main module. Instead of deferring the main module from the scripts filename, the specified module is used. That module needs to export a `main/1` function that is called as entry point. """), "resources": attrs.list(attrs.dep(), default = [], doc = """ @@ -197,11 +218,11 @@ rules_attributes = { [`permanent`](https://www.erlang.org/doc/man/application.html#type-restart_type). """), "include_erts": attrs.bool(default = False, doc = """ - This field controls wether OTP applications and the Erlang runtime system should be included as part of the release. + This field controls whether OTP applications and the Erlang runtime system should be included as part of the release. Please note, that at the moment the erts folder is just `erts/`. """), "multi_toolchain": attrs.option(attrs.list(attrs.dep()), default = None, doc = """ - This field controls wether the release should be built with a single toolchain, or multiple toolchains. In the + This field controls whether the release should be built with a single toolchain, or multiple toolchains. In the latter case, all output paths are prefixed with the toolchain name. """), "overlays": attrs.dict(key = attrs.string(), value = attrs.list(attrs.dep()), default = {}, doc = """ @@ -218,6 +239,9 @@ rules_attributes = { "_toolchain": attrs.toolchain_dep(default = "toolchains//:erlang-default"), }, "erlang_test": { + "common_app_env": attrs.dict(key = attrs.string(), value = attrs.string(), default = {}, doc = """ + Application environment variables for the `common` application. + """), "config_files": attrs.list(attrs.dep(), default = [], doc = """ Will specify what config files the erlang beam machine running test with should load, for reference look at [OTP documentation](https://www.erlang.org/doc/man/config.html). These ones should consist of default_output of @@ -238,6 +262,10 @@ rules_attributes = { "extra_ct_hooks": attrs.list(attrs.string(), default = [], doc = """ List of additional Common Test hooks. The strings are interpreted as Erlang terms. """), + "extra_erl_flags": attrs.list(attrs.string(), default = [], doc = """ + List of additional command line arguments given to the erl command invocation. These + arguments are added to the front of the argument list. + """), "preamble": attrs.string(default = read_root_config("erlang", "erlang_test_preamble", "test:info(),test:ensure_initialized(),test:start_shell()."), doc = """ """), "property_tests": attrs.list(attrs.dep(), default = [], doc = """ @@ -253,17 +281,17 @@ rules_attributes = { "suite": attrs.source(doc = """ The source file for the test suite. If you are using the macro, you should use the `suites` attribute instead. - The suites attribtue specify which erlang_test targets should be generated. For each suite "path_to_suite/suite_SUITE.erl" an + The suites attribute specifies which erlang_test targets should be generated. For each suite "path_to_suite/suite_SUITE.erl" an implicit 'erlang_test' target suite_SUITE will be generated. """), - "_artifact_annotation_mfa": attrs.string(), + "_artifact_annotation_mfa": attrs.string(default = "artifact_annotations:default_annotation/1"), "_cli_lib": attrs.dep(default = "prelude//erlang/common_test/test_cli_lib:test_cli_lib"), "_ct_opts": attrs.string(default = read_root_config("erlang", "erlang_test_ct_opts", "")), - "_providers": attrs.string(), - "_test_binary": attrs.dep(default = "prelude//erlang/common_test/test_binary:escript"), + "_providers": attrs.string(default = ""), "_test_binary_lib": attrs.dep(default = "prelude//erlang/common_test/test_binary:test_binary"), "_toolchain": attrs.toolchain_dep(default = "toolchains//:erlang-default"), - "_trampoline": attrs.option(attrs.dep(), default = None), + "_trampoline": attrs.option(attrs.dep(), default = None, doc = "DEPRECATED. Use _trampolines instead."), + "_trampolines": attrs.option(attrs.list(attrs.dep()), default = None), } | common_shell_attributes | re_test_args(), } @@ -509,7 +537,7 @@ erlang_test = prelude_rule( For each suite `_SUITE.erl`, if a data_dir `_SUITE_data` is present along the suite, (as per [the data_dir naming scheme for ct](https://www.erlang.org/doc/apps/common_test/write_test_chapter.html#data-and-private-directories)), - it will automatically adds the coresponding resource target to the generated test target of the suite. + it will automatically adds the corresponding resource target to the generated test target of the suite. Resources will be placed in the [Data directory (data_dir)](https://www.erlang.org/doc/apps/common_test/write_test_chapter.html#data_priv_dir) of each of the suite. @@ -520,16 +548,12 @@ erlang_test = prelude_rule( The `erlang_tests` macro forwards all attributes to the `erlang_test`. It defines some attributes that control how the targets get generated: - - `use_default_configs` (bool): Parameter that controls if the config files specified by the global config variable - `erlang.erlang_tests_default_config` should be used, default to True. - - `use_default_deps` (bool): Parameter that controls if the dependencies specified by the global config variable - `erlang.erlang_tests_default_apps` should be pulled, default to True. - `srcs` ([source]): Set of files that the suites might depend on and that are not part of any specific application. A "meta" application having those files as sources will automatically be created, and included in the dependencies of the tests. - Ene can call - - `buck2 build //my_app:test_SUITE` to compile the test files together with its depedencies. + One can call + - `buck2 build //my_app:test_SUITE` to compile the test files together with its dependencies. - `buck2 test //my_app:other_test_SUITE` to run the test. - `buck2 run //my_app:other_test_SUITE` to open an interactive test shell, where tests can be run iteratively. diff --git a/prelude/decls/genrule_common.bzl b/prelude/decls/genrule_common.bzl index 7bb6137476f75..d0eb117ecdc8d 100644 --- a/prelude/decls/genrule_common.bzl +++ b/prelude/decls/genrule_common.bzl @@ -49,13 +49,13 @@ def _cmd_arg(): A string expansion of the `srcs` argument delimited by the `environment_expansion_separator` argument where each element of `srcs` will be translated - into an absolute path. + into a relative path. `${SRCDIR}` - The absolute path to a directory to which sources are copied + The relative path to a directory to which sources are copied prior to running the command. @@ -64,8 +64,7 @@ def _cmd_arg(): The output file or directory for the `genrule()`. This variable will have whatever value is specified by - the `out` argument if not using\302\240 - named outputs + the `out` argument if not using named outputs. If using named outputs, this variable will be the output directory. @@ -73,7 +72,9 @@ def _cmd_arg(): command determine whether this filepath is treated as a file or a directory. If the filepath is a directory, then the shell command needs to create it if not using named outputs. Otherwise, it will - be automatically created. + be automatically created. All outputs (directories and files) must + be readable, writable, and (in the case of directories) executable + by the current user. The file or directory specified by this variable must always @@ -87,71 +88,6 @@ def _cmd_arg(): A temporary directory which can be used for intermediate results and will not be bundled into the output. - - - ##### String parameter macros - - It is also possible to expand references to other rules within the - `cmd`, using builtin `string parameter macros` - . - All build rules expanded in the command are automatically considered - to be dependencies of the `genrule()`. - - - Note that the paths returned by these macros are *absolute* paths. You should convert these paths to be relative paths before - embedding them in, for example, a shell script or batch file. Using - relative paths ensures that your builds are *hermetic*, that - is, they are reproducible across different machine environments. - - - Additionally, if you embed these paths in a shell script, you should - execute that script using the `sh\\_binary()`rule and include - the targets for these paths in the `resources` argument of - that `sh_binary` rule. These are the same targets that you - pass to the string parameter macros. - - - `$(classpath //path/to:target)` - - - Expands to the transitive classpath of the specified build - rule, provided that the rule has a Java classpath. If the rule - does not have (or contribute to) a classpath, then an - exception is thrown and the build breaks. - - - `$(exe //path/to:target)` - - - Expands a build rule that results in an executable to the - commands necessary to run that executable. For example, - a `java_binary()` might expand to a call - to `java -jar path/to/target.jar` . Files that are - executable (perhaps generated by a `genrule()`) - are also expanded. If the build rule does not generate an - executable output, then an exception is thrown and the build - breaks. - - - `$(location //path/to:target)` - - - Expands to the location of the output of the specified build - rule. This means that you can refer to the output without - needing to be aware of how Buck is storing data on the disk - mid-build. - - - `$(maven_coords //path/to:target)` - - - Expands to the Maven coordinates for the specified build rule. - This allows you to access the Maven coordinates for - Maven-aware build rules. The format of the expansion is: - - ``` - - ``` """), } @@ -174,6 +110,13 @@ def _cmd_exe_arg(): """), } +def _weight_arg(): + return { + "weight": attrs.option(attrs.int(), default = None, doc = """ + How many local slots these genrule should take when executing locally. +"""), + } + def _out_arg(): return { "out": attrs.option(attrs.string(), default = None, doc = """ @@ -244,6 +187,13 @@ def _environment_expansion_separator(): """), } +def _env_arg(): + return { + "env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}, doc = """ + A map of variables to be set in the environment where the shell command is run. +"""), + } + genrule_common = struct( srcs_arg = _srcs_arg, cmd_arg = _cmd_arg, @@ -251,5 +201,7 @@ genrule_common = struct( cmd_exe_arg = _cmd_exe_arg, out_arg = _out_arg, type_arg = _type_arg, + weight_arg = _weight_arg, environment_expansion_separator = _environment_expansion_separator, + env_arg = _env_arg, ) diff --git a/prelude/decls/go_common.bzl b/prelude/decls/go_common.bzl index 845f3861bc7ae..4c26077dc783f 100644 --- a/prelude/decls/go_common.bzl +++ b/prelude/decls/go_common.bzl @@ -30,6 +30,16 @@ def _srcs_arg(): """), } +def _package_root_arg(): + return { + "package_root": attrs.option(attrs.string(), default = None, doc = """ + Sets Go package direactory (relative to BUCK file). + By default (or if None passes) package_root is being detected automatically. + Empty string of Go package is on the same level as BUCK file otherwise the subdirectory name. + Example for srcs = ["foo/bar.go"], package_root = "foo" +"""), + } + def _link_style_arg(): return { "link_style": attrs.option(attrs.enum(LinkableDepType), default = None, doc = """ @@ -48,13 +58,6 @@ def _link_mode_arg(): """), } -def _cgo_compiler_flags_arg(): - return { - "cgo_compiler_flags": attrs.list(attrs.string(), default = [], doc = """ - The set of additional compiler flags to pass to `go tool cgo`. -"""), - } - def _package_name_arg(): return { "package_name": attrs.option(attrs.string(), default = None, doc = """ @@ -88,8 +91,6 @@ def _external_linker_flags_arg(): return { "external_linker_flags": attrs.list(attrs.arg(), default = [], doc = """ Extra external linker flags passed to go link via `-extld` argument. - If argument is non-empty or `cgo_library` is used, the link mode - will switch to `external`. """), } @@ -124,16 +125,82 @@ def _embedcfg_arg(): """), } +def _cgo_enabled_arg(): + return { + "cgo_enabled": attrs.option(attrs.bool(), default = None, doc = """ + Analog of CGO_ENABLED env-var, applies to this target and its dependencies. + If None `go_toolchain.default_cgo_enabled` value will be applied. +"""), + } + +def _override_cgo_enabled_arg(): + return { + "override_cgo_enabled": attrs.option(attrs.bool(), default = None, doc = """ + Per-target analog of CGO_ENABLED env-var, overrides its value for the target, but not for its dependencies. +"""), + } + +def _race_arg(): + return { + "race": attrs.bool(default = False, doc = """ + If true, enable data race detection. +"""), + } + +def _asan_arg(): + return { + "asan": attrs.bool(default = False, doc = """ + If true, enable ASAN. +"""), + } + +def _tags_arg(): + return { + "tags": attrs.list(attrs.string(), default = [], doc = """ + Build tags to apply to this target and its dependencies. +"""), + } + +def _cxx_compiler_flags_arg(): + return { + "cxx_compiler_flags": attrs.list(attrs.arg(), default = [], doc = """ + GCC/Clang flags to use when compiling any of the above C/C++ sources (which require compilation). +"""), + } + +def _cxx_preprocessor_flags_arg(): + return { + "cxx_preprocessor_flags": attrs.list(attrs.arg(), default = [], doc = """ + GCC/Clang flags to use when preprocessing any of the above C/C++ sources (which require preprocessing). +"""), + } + +def _generate_exported_header(): + return { + "generate_exported_header": attrs.bool(default = False, doc = """ + Generate header file with declaration for functions exported with `//export` + The header name for target `cell//foo/bar:lib` will be `foo/bar/lib.h` +"""), + } + go_common = struct( deps_arg = _deps_arg, srcs_arg = _srcs_arg, + package_root_arg = _package_root_arg, link_style_arg = _link_style_arg, link_mode_arg = _link_mode_arg, - cgo_compiler_flags_arg = _cgo_compiler_flags_arg, package_name_arg = _package_name_arg, compiler_flags_arg = _compiler_flags_arg, assembler_flags_arg = _assembler_flags_arg, linker_flags_arg = _linker_flags_arg, external_linker_flags_arg = _external_linker_flags_arg, embedcfg_arg = _embedcfg_arg, + cgo_enabled_arg = _cgo_enabled_arg, + override_cgo_enabled_arg = _override_cgo_enabled_arg, + race_arg = _race_arg, + asan_arg = _asan_arg, + tags_arg = _tags_arg, + cxx_compiler_flags_arg = _cxx_compiler_flags_arg, + cxx_preprocessor_flags_arg = _cxx_preprocessor_flags_arg, + generate_exported_header = _generate_exported_header, ) diff --git a/prelude/decls/go_rules.bzl b/prelude/decls/go_rules.bzl index 2ec5f5ebdd951..f1d3cf3085c60 100644 --- a/prelude/decls/go_rules.bzl +++ b/prelude/decls/go_rules.bzl @@ -10,125 +10,15 @@ # the generated docs, and so those should be verified to be accurate and # well-formatted (and then delete this TODO) -load(":common.bzl", "CxxRuntimeType", "CxxSourceType", "HeadersAsRawHeadersMode", "Traversal", "buck", "prelude_rule") +load(":common.bzl", "buck", "prelude_rule") load(":cxx_common.bzl", "cxx_common") load(":go_common.bzl", "go_common") -load(":native_common.bzl", "native_common") +load(":re_test_common.bzl", "re_test_common") BuildMode = ["executable", "c_shared", "c_archive"] GoTestCoverStepMode = ["set", "count", "atomic", "none"] -cgo_library = prelude_rule( - name = "cgo_library", - docs = """ - A cgo\\_library() rule builds an object from the supplied set of Go/C source files and - dependencies. The outputs are linked into go executable in the last step (compile). - - The 'go build' command would collect the cgo directives from the source files, however - with buck the flags needs to be passed in the cgo\\_library manually - - This rule borrows from `cxx\\_binary()`since C/C++ sources are being compiled. - """, - examples = """ - ``` - - # A rule that builds a Go native executable with linked cgo library based on - # C/C++ util library. - go_binary( - name = "bin", - srcs = ["main.go"], - deps = [":lib"] - ) - - cgo_library( - name = "lib", - srcs = ["cgo_source.go"], - deps = [":util"], - ) - - cxx_library( - name = "util", - srcs = ["util.c"], - headers = ["util.h"], - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - go_common.package_name_arg() | - { - "srcs": attrs.list(attrs.one_of(attrs.source(), attrs.tuple(attrs.source(), attrs.list(attrs.arg()))), default = [], doc = """ - The set of source files to be compiled by this rule. .go files will be compiled with the CGO - compiler. Each file needs to have `import "C"` declared. - """), - "go_srcs": attrs.list(attrs.source(), default = [], doc = """ - The set of source files to be compiled by this rule. Go (`.go`) files are compiled with the Go - compiler. In contrast to the `srcs` argument, these files *cannot* have `import "C"` declared. - """), - } | - cxx_common.headers_arg() | - cxx_common.preprocessor_flags_arg() | - cxx_common.platform_preprocessor_flags_arg() | - go_common.cgo_compiler_flags_arg() | - go_common.embedcfg_arg() | - cxx_common.compiler_flags_arg() | - cxx_common.platform_compiler_flags_arg() | - cxx_common.linker_extra_outputs_arg() | - cxx_common.linker_flags_arg() | - cxx_common.platform_linker_flags_arg() | - native_common.link_style() | - cxx_common.raw_headers_arg() | - { - "go_compiler_flags": attrs.list(attrs.string(), default = [], doc = """ - The set of additional compiler flags to pass to `go tool compile`. - """), - "go_assembler_flags": attrs.list(attrs.string(), default = [], doc = """ - The set of additional assembler flags to pass to `go tool asm`. - """), - "contacts": attrs.list(attrs.string(), default = []), - "cxx_runtime_type": attrs.option(attrs.enum(CxxRuntimeType), default = None), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "default_platform": attrs.option(attrs.string(), default = None), - "defaults": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "deps": attrs.list(attrs.dep(), default = []), - "deps_query": attrs.option(attrs.query(), default = None), - "devirt_enabled": attrs.bool(default = False), - "executable_name": attrs.option(attrs.string(), default = None), - "exported_deps": attrs.list(attrs.dep(), default = []), - "fat_lto": attrs.bool(default = False), - "focused_list_target": attrs.option(attrs.dep(), default = None), - "frameworks": attrs.list(attrs.string(), default = []), - "header_namespace": attrs.option(attrs.string(), default = None), - "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), - "include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "labels": attrs.list(attrs.string(), default = []), - "lang_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "lang_platform_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "libraries": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "link_deps_query_whole": attrs.bool(default = False), - "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), - "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), - "platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), - "platform_srcs": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.one_of(attrs.source(), attrs.tuple(attrs.source(), attrs.list(attrs.arg()))), sorted = True)), default = []), - "post_linker_flags": attrs.list(attrs.arg(), default = []), - "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "precompiled_header": attrs.option(attrs.source(), default = None), - "prefer_stripped_objects": attrs.bool(default = False), - "prefix_header": attrs.option(attrs.source(), default = None), - "thin_lto": attrs.bool(default = False), - "version_universe": attrs.option(attrs.string(), default = None), - "weak_framework_names": attrs.list(attrs.string(), default = []), - } - ), -) - go_binary = prelude_rule( name = "go_binary", docs = """ @@ -173,6 +63,7 @@ go_binary = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + go_common.package_name_arg() | go_common.srcs_arg() | go_common.deps_arg() | go_common.link_style_arg() | @@ -182,6 +73,15 @@ go_binary = prelude_rule( go_common.linker_flags_arg() | go_common.external_linker_flags_arg() | go_common.embedcfg_arg() | + go_common.package_root_arg() | + go_common.cgo_enabled_arg() | + go_common.race_arg() | + go_common.asan_arg() | + go_common.tags_arg() | + cxx_common.headers_arg() | + cxx_common.header_namespace_arg() | + go_common.cxx_preprocessor_flags_arg() | + go_common.cxx_compiler_flags_arg() | { "resources": attrs.list(attrs.source(), default = [], doc = """ Static files to be symlinked into the working directory of the test. You can access these in your @@ -192,7 +92,6 @@ go_binary = prelude_rule( "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "platform": attrs.option(attrs.string(), default = None), - "platform_external_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), } ), ) @@ -217,13 +116,12 @@ go_exported_library = prelude_rule( deps = [":example"], ) - cgo_library( + go_library( name = "example", package_name = "cgo", srcs = [ "export-to-c.go", # file with //export annotations ], - cgo_compiler_flags = [], compiler_flags = [], headers = [], ) @@ -248,6 +146,7 @@ go_exported_library = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + go_common.package_name_arg() | go_common.srcs_arg() | go_common.deps_arg() | { @@ -260,12 +159,22 @@ go_exported_library = prelude_rule( `gcflags`, `ldflags` and `asmflags`` """), } | + cxx_common.headers_arg() | + cxx_common.header_namespace_arg() | + go_common.cxx_preprocessor_flags_arg() | + go_common.cxx_compiler_flags_arg() | go_common.link_style_arg() | go_common.link_mode_arg() | go_common.compiler_flags_arg() | go_common.assembler_flags_arg() | go_common.linker_flags_arg() | go_common.external_linker_flags_arg() | + go_common.package_root_arg() | + go_common.cgo_enabled_arg() | + go_common.race_arg() | + go_common.asan_arg() | + go_common.tags_arg() | + go_common.generate_exported_header() | { "resources": attrs.list(attrs.source(), default = [], doc = """ Static files to be symlinked into the working directory of the test. You can access these in your @@ -277,7 +186,6 @@ go_exported_library = prelude_rule( "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "platform": attrs.option(attrs.string(), default = None), - "platform_external_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), } ), ) @@ -315,10 +223,18 @@ go_library = prelude_rule( go_common.compiler_flags_arg() | go_common.assembler_flags_arg() | go_common.embedcfg_arg() | + go_common.package_root_arg() | + go_common.override_cgo_enabled_arg() | + cxx_common.headers_arg() | + cxx_common.header_namespace_arg() | + go_common.cxx_preprocessor_flags_arg() | + go_common.cxx_compiler_flags_arg() | + go_common.external_linker_flags_arg() | + go_common.link_style_arg() | + go_common.generate_exported_header() | { "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "exported_deps": attrs.list(attrs.dep(), default = []), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), } @@ -389,6 +305,7 @@ go_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | go_common.srcs_arg() | { "library": attrs.option(attrs.dep(), default = None, doc = """ @@ -415,6 +332,15 @@ go_test = prelude_rule( go_common.linker_flags_arg() | go_common.external_linker_flags_arg() | go_common.embedcfg_arg() | + go_common.package_root_arg() | + go_common.cgo_enabled_arg() | + go_common.race_arg() | + go_common.asan_arg() | + go_common.tags_arg() | + cxx_common.headers_arg() | + cxx_common.header_namespace_arg() | + go_common.cxx_preprocessor_flags_arg() | + go_common.cxx_compiler_flags_arg() | { "resources": attrs.list(attrs.source(), default = [], doc = """ Static files that are symlinked into the working directory of the @@ -437,75 +363,25 @@ go_test = prelude_rule( "platform": attrs.option(attrs.string(), default = None), "runner": attrs.option(attrs.dep(), default = None), "specs": attrs.option(attrs.arg(json = True), default = None), - } + } | + re_test_common.test_args() ), ) - -go_test_runner = prelude_rule( - name = "go_test_runner", - docs = "", - examples = None, - further = None, +go_bootstrap_binary = prelude_rule( + name = "go_bootstrap_binary", attrs = ( - # @unsorted-dict-items - { - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "test_runner_generator": attrs.source(), - } - ), -) - -prebuilt_go_library = prelude_rule( - name = "prebuilt_go_library", - docs = """ - A prebuilt\\_go\\_library() rule provides a native library from the specified file. - """, - examples = """ - For more examples, check out our [integration tests](https://github.com/facebook/buck/tree/dev/test/com/facebook/buck/features/go/testdata). - - - ``` - - prebuilt_go_library( - name='greeting', - package_name='greeting', - library='greeting.a', - deps=[ - ':join', - ], - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "library": attrs.source(doc = """ - Path to the precompiled Go library - typically of the form 'foo.a'. - """), - } | - go_common.package_name_arg() | - go_common.deps_arg() | + go_common.srcs_arg() | { - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "exported_deps": attrs.list(attrs.dep(), default = []), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), + "entrypoints": attrs.list(attrs.string(), default = [], doc = """Package name or file names"""), + "workdir": attrs.string(default = "", doc = """Change to subdir before running the command"""), } ), ) go_rules = struct( - cgo_library = cgo_library, go_binary = go_binary, + go_bootstrap_binary = go_bootstrap_binary, go_exported_library = go_exported_library, go_library = go_library, go_test = go_test, - go_test_runner = go_test_runner, - prebuilt_go_library = prebuilt_go_library, ) diff --git a/prelude/decls/groovy_rules.bzl b/prelude/decls/groovy_rules.bzl index 0e5aef98b3302..6152e1d76b302 100644 --- a/prelude/decls/groovy_rules.bzl +++ b/prelude/decls/groovy_rules.bzl @@ -11,6 +11,7 @@ # well-formatted (and then delete this TODO) load(":common.bzl", "ForkMode", "LogLevel", "SourceAbiVerificationMode", "TestType", "UnusedDependenciesAction", "prelude_rule") +load(":jvm_common.bzl", "jvm_common") groovy_library = prelude_rule( name = "groovy_library", @@ -70,22 +71,22 @@ groovy_library = prelude_rule( `.java`, cross compilation using the jdk found in `JAVA_HOME` will occur. """), "resources": attrs.list(attrs.source(), default = [], doc = """ - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "deps": attrs.list(attrs.dep(), default = [], doc = """ Rules (usually other `groovy_library` or ``java_library()`` rules) that are used to generate the classpath required to compile this `groovy_library`. - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "exported_deps": attrs.list(attrs.dep(), default = [], doc = """ Other `groovy_library` and ``java_library()`` rules that depend on this rule will also include its `exported_deps` in their classpaths. - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "provided_deps": attrs.list(attrs.dep(), default = [], doc = """ - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "extra_groovyc_arguments": attrs.list(attrs.string(), default = [], doc = """ List of additional arguments to pass into the Groovy compiler. @@ -93,22 +94,22 @@ groovy_library = prelude_rule( "source": attrs.option(attrs.string(), default = None, doc = """ Only used during cross compilation. - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "target": attrs.option(attrs.string(), default = None, doc = """ Only used during cross compilation. - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "java_version": attrs.option(attrs.string(), default = None, doc = """ Only used during cross compilation. - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "extra_arguments": attrs.list(attrs.string(), default = [], doc = """ Only used during cross compilation. - This is the same as in `java\\_library()`. + This is the same as in `java_library()`. """), "annotation_processor_deps": attrs.list(attrs.dep(), default = []), "annotation_processor_params": attrs.list(attrs.string(), default = []), @@ -116,14 +117,12 @@ groovy_library = prelude_rule( "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "exported_provided_deps": attrs.list(attrs.dep(), default = []), - "javac": attrs.option(attrs.source(), default = None), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "remove_classes": attrs.list(attrs.regex(), default = []), "required_for_source_only_abi": attrs.bool(default = False), @@ -132,7 +131,7 @@ groovy_library = prelude_rule( "source_abi_verification_mode": attrs.option(attrs.enum(SourceAbiVerificationMode), default = None), "source_only_abi_deps": attrs.list(attrs.dep(), default = []), } - ), + ) | jvm_common.plugins() | jvm_common.javac(), ) groovy_test = prelude_rule( @@ -159,14 +158,12 @@ groovy_test = prelude_rule( "extra_groovyc_arguments": attrs.list(attrs.string(), default = []), "fork_mode": attrs.enum(ForkMode, default = "none"), "java_version": attrs.option(attrs.string(), default = None), - "javac": attrs.option(attrs.source(), default = None), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "provided_deps": attrs.list(attrs.dep(), default = []), "remove_classes": attrs.list(attrs.regex(), default = []), @@ -189,7 +186,7 @@ groovy_test = prelude_rule( "use_dependency_order_classpath": attrs.option(attrs.bool(), default = None), "vm_args": attrs.list(attrs.arg(), default = []), } - ), + ) | jvm_common.plugins() | jvm_common.javac(), ) groovy_rules = struct( diff --git a/prelude/decls/halide_rules.bzl b/prelude/decls/halide_rules.bzl index 52db23f1086e3..7ddda0ec7cabd 100644 --- a/prelude/decls/halide_rules.bzl +++ b/prelude/decls/halide_rules.bzl @@ -10,7 +10,8 @@ # the generated docs, and so those should be verified to be accurate and # well-formatted (and then delete this TODO) -load(":common.bzl", "CxxRuntimeType", "CxxSourceType", "HeadersAsRawHeadersMode", "LinkableDepType", "Traversal", "prelude_rule") +load("@prelude//cxx:link_groups_types.bzl", "LINK_GROUP_MAP_ATTR") +load(":common.bzl", "CxxRuntimeType", "CxxSourceType", "HeadersAsRawHeadersMode", "LinkableDepType", "prelude_rule") load(":cxx_common.bzl", "cxx_common") halide_library = prelude_rule( @@ -98,7 +99,7 @@ halide_library = prelude_rule( "licenses": attrs.list(attrs.source(), default = []), "link_deps_query_whole": attrs.bool(default = False), "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), + "link_group_map": LINK_GROUP_MAP_ATTR, "link_style": attrs.option(attrs.enum(LinkableDepType), default = None), "linker_extra_outputs": attrs.list(attrs.string(), default = []), "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), diff --git a/prelude/decls/haskell_common.bzl b/prelude/decls/haskell_common.bzl index 7c03f4bce834e..8a8ee90ecae65 100644 --- a/prelude/decls/haskell_common.bzl +++ b/prelude/decls/haskell_common.bzl @@ -13,14 +13,14 @@ def _srcs_arg(): return { "srcs": attrs.named_set(attrs.source(), sorted = True, default = [], doc = """ - A list of Haskell sources to be built by this rule. + A list of Haskell sources to be built by this rule. The dictionary option is deprecated. """), } def _deps_arg(): return { "deps": attrs.list(attrs.dep(), default = [], doc = """ - Either `haskell\\_library()`or `prebuilt\\_haskell\\_library()`rules + Either `haskell_library()` or `prebuilt_haskell_library()` rules from which this rules sources import modules or native linkable rules exporting symbols this rules sources call into. """), diff --git a/prelude/decls/haskell_rules.bzl b/prelude/decls/haskell_rules.bzl index e6ddcdd469e21..0233dcdd4a128 100644 --- a/prelude/decls/haskell_rules.bzl +++ b/prelude/decls/haskell_rules.bzl @@ -10,7 +10,8 @@ # the generated docs, and so those should be verified to be accurate and # well-formatted (and then delete this TODO) -load(":common.bzl", "LinkableDepType", "Linkage", "buck", "prelude_rule") +load("@prelude//linking:types.bzl", "Linkage") +load(":common.bzl", "LinkableDepType", "buck", "prelude_rule") load(":haskell_common.bzl", "haskell_common") load(":native_common.bzl", "native_common") @@ -106,10 +107,11 @@ haskell_haddock = prelude_rule( "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "deps": attrs.list(attrs.dep(), default = []), "deps_query": attrs.option(attrs.query(), default = None), - "haddock_flags": attrs.list(attrs.string(), default = []), + "haddock_flags": attrs.list(attrs.arg(), default = []), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "platform": attrs.option(attrs.string(), default = None), + "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), } ), ) @@ -165,14 +167,14 @@ haskell_library = prelude_rule( haskell_common.deps_arg() | buck.platform_deps_arg() | native_common.link_whole(link_whole_type = attrs.bool(default = False)) | - native_common.preferred_linkage(preferred_linkage_type = attrs.enum(Linkage)) | + native_common.preferred_linkage(preferred_linkage_type = attrs.enum(Linkage.values())) | { "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "enable_profiling": attrs.bool(default = False), "ghci_platform_preload_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), "ghci_preload_deps": attrs.set(attrs.dep(), sorted = True, default = []), - "haddock_flags": attrs.list(attrs.string(), default = []), + "haddock_flags": attrs.list(attrs.arg(), default = []), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "linker_flags": attrs.list(attrs.arg(), default = []), @@ -230,6 +232,7 @@ haskell_prebuilt_library = prelude_rule( } | haskell_common.exported_linker_flags_arg() | { + "exported_post_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "contacts": attrs.list(attrs.string(), default = []), "cxx_header_dirs": attrs.list(attrs.source(), default = []), "db": attrs.source(), diff --git a/prelude/decls/ios_rules.bzl b/prelude/decls/ios_rules.bzl deleted file mode 100644 index 4e037a94f32df..0000000000000 --- a/prelude/decls/ios_rules.bzl +++ /dev/null @@ -1,1100 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -# TODO(cjhopman): This was generated by scripts/hacks/rules_shim_with_docs.py, -# but should be manually edited going forward. There may be some errors in -# the generated docs, and so those should be verified to be accurate and -# well-formatted (and then delete this TODO) - -load(":apple_common.bzl", "apple_common") -load(":common.bzl", "CxxRuntimeType", "CxxSourceType", "HeadersAsRawHeadersMode", "IncludeType", "Linkage", "Traversal", "buck", "prelude_rule") -load(":cxx_common.bzl", "cxx_common") -load(":native_common.bzl", "native_common") - -AdditionalActions = ["pre_scheme_actions", "post_scheme_actions"] - -AppleBundleExtension = ["app", "framework", "appex", "plugin", "bundle", "xctest", "dsym", "xpc", "prefpane", "qlgenerator"] - -AppleResourceBundleDestination = ["resources", "frameworks", "executables", "plugins", "xpcservices", "loginitems", "systemextensions"] - -LaunchStyle = ["auto", "wait"] - -SchemeActionType = ["build", "launch", "test", "profile", "analyze", "archive"] - -WatchInterface = ["main", "complication", "dynamic_notification", "static_notification"] - -apple_asset_catalog = prelude_rule( - name = "apple_asset_catalog", - docs = """ - An `apple_asset_catalog()` rule contains resources stored in Apple asset catalog - directories. This rule does not have any output on its own and can be built only as a dependency - (either direct or transitive) of an `apple_bundle()` rule, in which case all `apple_asset_catalog()` rules - that the bundle rule depends on are merged and placed into the final output bundle together. - """, - examples = """ - ``` - - apple_asset_catalog( - name = 'MyAssetCatalog', - dirs = [ - 'MyResources.xcassets', - ], - ) - - # A asset catalog with a app icon and launch image - apple_asset_catalog( - name = 'AssetCatalog', - dirs = [ 'AssetCatalog.xcassets' ], - app_icon = 'Icon', - launch_image = 'LaunchImage', - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "dirs": attrs.list(attrs.source(), default = [], doc = """ - Set of paths of Apple asset catalogs contained by this rule. All paths have to end with the `.xcassets` extension and be compatible with the asset catalog format used by Xcode. - """), - "app_icon": attrs.option(attrs.string(), default = None, doc = """ - An optional reference to a `.appiconset` containing a image set representing an - application icon. (The extension itself should not be included.) This parameter - may be specified at most once in a given `apple_bundle`'s transitive dependencies. - """), - "launch_image": attrs.option(attrs.string(), default = None, doc = """ - An optional reference to a `.launchimage` containing a image set representing an - application launch image. (The extension itself should not be included.) This parameter - may be specified at most once in a given `apple_bundle`'s transitive dependencies. - """), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - } - ), -) - -apple_binary = prelude_rule( - name = "apple_binary", - docs = """ - An `apple_binary()` rule builds a native executable\342\200\224such as an iOS or OSX app\342\200\224from - the supplied set of Objective-C/C++ source files and dependencies. It is similar to - a `cxx\\_binary()`rule with which it shares many attributes. In addition - to those common attributes, `apple_binary()` has a some additional attributes - that are specific to binaries intended to be built using the Apple toolchain. - Note, however, that `apple_binary()` and `cxx_binary()` differ - in the way that they import header files, in order to better accommodate existing conventions. - See the sections for the `headers` and `exported_headers` attributes for more details. - - - Buck enables you to override components of the Apple toolchain with - alternate tools, either from the Xcode search paths or from directories - that you specify. - See `.buckconfig` - and `.buckconfig` - for more information. - """, - examples = """ - ``` - - apple_binary( - name = 'MyBinary', - deps = [ - ':MyLibrary', - '//Libraries:AnotherLibrary', - ], - preprocessor_flags = ['-fobjc-arc'], - headers = [ - 'MyHeader.h', - ], - srcs = [ - 'MySource.m', - ], - frameworks = [ - '$SDKROOT/System/Library/Frameworks/UIKit.framework', - '$SDKROOT/System/Library/Frameworks/Foundation.framework', - ], - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - cxx_common.srcs_arg() | - cxx_common.platform_srcs_arg() | - apple_common.headers_arg() | - { - "entitlements_file": attrs.option(attrs.source(), default = None, doc = """ - An optional name of a plist file to be embedded in the binary. Some platforms like - `iphonesimulator` require this to run properly. - """), - } | - apple_common.exported_headers_arg() | - apple_common.header_path_prefix_arg() | - apple_common.frameworks_arg() | - cxx_common.preprocessor_flags_arg() | - cxx_common.exported_preprocessor_flags_arg(exported_preprocessor_flags_type = attrs.list(attrs.arg(), default = [])) | - cxx_common.compiler_flags_arg() | - cxx_common.platform_compiler_flags_arg() | - cxx_common.linker_extra_outputs_arg() | - cxx_common.linker_flags_arg() | - cxx_common.exported_linker_flags_arg() | - cxx_common.platform_linker_flags_arg() | - native_common.link_style() | - native_common.link_group_public_deps_label() | - apple_common.target_sdk_version() | - apple_common.extra_xcode_sources() | - apple_common.extra_xcode_files() | - { - "bridging_header": attrs.option(attrs.source(), default = None), - "can_be_asset": attrs.option(attrs.bool(), default = None), - "configs": attrs.dict(key = attrs.string(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}), - "contacts": attrs.list(attrs.string(), default = []), - "cxx_runtime_type": attrs.option(attrs.enum(CxxRuntimeType), default = None), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "default_platform": attrs.option(attrs.string(), default = None), - "defaults": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "deps": attrs.list(attrs.dep(), default = []), - "devirt_enabled": attrs.bool(default = False), - "diagnostics": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), - "enable_cxx_interop": attrs.bool(default = False), - "executable_name": attrs.option(attrs.string(), default = None), - "exported_deps": attrs.list(attrs.dep(), default = []), - "exported_header_style": attrs.enum(IncludeType, default = "local"), - "exported_lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "exported_lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "exported_platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), - "exported_platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), - "exported_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "exported_platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "exported_post_linker_flags": attrs.list(attrs.arg(), default = []), - "exported_post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "fat_lto": attrs.bool(default = False), - "focused_list_target": attrs.option(attrs.dep(), default = None), - "force_static": attrs.option(attrs.bool(), default = None), - "header_namespace": attrs.option(attrs.string(), default = None), - "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), - "include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "info_plist": attrs.option(attrs.source(), default = None), - "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "labels": attrs.list(attrs.string(), default = []), - "lang_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "lang_platform_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "libraries": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), - "link_whole": attrs.option(attrs.bool(), default = None), - "modular": attrs.bool(default = False), - "module_name": attrs.option(attrs.string(), default = None), - "module_requires_cxx": attrs.bool(default = False), - "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), - "platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), - "platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "post_linker_flags": attrs.list(attrs.arg(), default = []), - "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "precompiled_header": attrs.option(attrs.source(), default = None), - "preferred_linkage": attrs.option(attrs.enum(Linkage), default = None), - "prefix_header": attrs.option(attrs.source(), default = None), - "public_include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "public_system_include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "raw_headers": attrs.set(attrs.source(), sorted = True, default = []), - "reexport_all_header_dependencies": attrs.option(attrs.bool(), default = None), - "sdk_modules": attrs.list(attrs.string(), default = []), - "serialize_debugging_options": attrs.bool(default = False), - "soname": attrs.option(attrs.string(), default = None), - "static_library_basename": attrs.option(attrs.string(), default = None), - "supported_platforms_regex": attrs.option(attrs.regex(), default = None), - "supports_merged_linking": attrs.option(attrs.bool(), default = None), - "swift_compiler_flags": attrs.list(attrs.arg(), default = []), - "swift_version": attrs.option(attrs.string(), default = None), - "thin_lto": attrs.bool(default = False), - "use_submodules": attrs.bool(default = False), - "uses_cxx_explicit_modules": attrs.bool(default = False), - "uses_explicit_modules": attrs.bool(default = False), - "uses_modules": attrs.bool(default = False), - "xcode_private_headers_symlinks": attrs.option(attrs.bool(), default = None), - "xcode_public_headers_symlinks": attrs.option(attrs.bool(), default = None), - } - ), -) - -apple_bundle = prelude_rule( - name = "apple_bundle", - docs = """ - An `apple_bundle()` rule takes an Apple binary and all of the resources and asset - catalogs in the rule's transitive dependencies and generates a bundle containing all of those files. - Optionally the generated bundle can also be signed using specified provisioning profiles. - - - Code signing will embed entitlements pointed to by the `entitlements_file` arg in - the bundle's `apple_binary`. This is the preferred way to specify entitlements - when building with Buck. - - If the entitlements file is not present, it falls back to the `CODE_SIGN_ENTITLEMENTS` entry in - `info_plist_substitutions`. - - - If after these checks, an entitlements file is still not specified, it will be derived based - on the entitlements of the selected provisioning profile. Provisioning profiles will be selected - from profiles pointed to by `apple.provisioning_profile_search_path`, based on a - non-expired profile that matches the bundle id and entitlements. - - - Code signing will embed entitlements pointed to by the `CODE_SIGN_ENTITLEMENTS` entry in - `info_plist_substitutions`. If an entitlements file is omitted, it will be derived based - on the entitlements of the selected provisioning profile. Provisioning profiles will be selected - from profiles pointed to by `apple.provisioning_profile_search_path`, based on a - non-expired profile that matches the bundle id and entitlements. - """, - examples = """ - ``` - - apple_bundle( - name = 'AppBundle', - binary = ':MyBinary', - extension = 'app', - info_plist = 'Info.plist', - ) - - ``` - - ``` - - # iOS app with embedded WatchOS 2.0 app/extension - apple_bundle( - name = 'DemoWatchAppExtension', - binary = ':DemoWatchAppExtensionBinary', - extension = 'appex', - info_plist = 'WatchExtension/Resources/Info.plist', - ) - - apple_bundle( - name = 'DemoWatchApp', - binary = ':DemoWatchAppBinary', - deps = [':DemoWatchAppResources', ':DemoWatchAppExtension'], - extension = 'app', - info_plist = 'WatchApplication/Info.plist', - ) - - apple_bundle( - name = 'DemoApp', - binary = ':DemoAppBinary', - deps = [':DemoWatchApp#watch'], - extension = 'app', - info_plist = 'Info.plist', - ) - - ``` - - ``` - - # iOS app using safeAreaInsets delivering to iOS 9.x - apple_bundle( - name = 'DemoIBApp', - binary = ':DemoIBAppBinary', - deps = [':DemoIBAppResources'], - extension = 'app', - ibtool_flags = ["--minimum-deployment-target", "9.0"], - info_plist = 'Info.plist', - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "deps": attrs.list(attrs.dep(), default = [], doc = """ - A list of dependencies of this bundle as build targets. You can embed application - extensions by specifying the extension's bundle target. To include a WatchKit app, append the - flavor `#watch` to the target specification. Buck will automatically substitute the appropriate - platform flavor (either `watchsimulator` or `watchos`) based on the parent. - """), - "product_name": attrs.option(attrs.string(), default = None, doc = """ - The name of the resulting bundle and binary. The setting behaves like PRODUCT\\_NAME Xcode build setting. - For example, if your rule is named "MyApp" and extension is "app", by default buck will generate MyApp.app bundle. - But if you will set product name to "SuperApp", bundle will get "SuperApp.app" name. - """), - "extension": attrs.one_of(attrs.enum(AppleBundleExtension), attrs.string(), doc = """ - The extension of the generated bundle. For example `'app'` for an application bundle - or `'appex'` for an application extension bundle. - """), - "binary": attrs.option(attrs.dep(), default = None, doc = """ - A `build target` identifying - an `apple_binary()` rule or - an `apple_library()` rule whose output will - be used as the main executable binary of the generated bundle. The required rule type depends - on the value in the `extension` attribute. For example, application bundles expect - a binary (e.g. `'//Apps/MyApp:MyApp'`), application extension bundles expect a shared - library (e.g. `'//Libraries/MyLibrary:MyLibrary#shared'`). - """), - } | - apple_common.info_plist_arg() | - apple_common.info_plist_substitutions_arg() | - { - "asset_catalogs_compilation_options": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}, doc = """ - A dict holding parameters for asset catalogs compiler (actool). Its options include: - * `notices` (defaults to `True`) - * `warnings` (defaults to `True`) - * `errors` (defaults to `True`) - * `compress_pngs` (defaults to `True`) - * `optimization` (defaults to `'space'`) - * `output_format` (defaults to `'human-readable-text'`) - * `extra_flags` (defaults to `[]`) - """), - "ibtool_flags": attrs.option(attrs.list(attrs.string()), default = None, doc = """ - List of flags to be passed to ibtool during interface builder file compilation. - """), - "codesign_flags": attrs.list(attrs.string(), default = []), - "codesign_identity": attrs.option(attrs.string(), default = None), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "default_platform": attrs.option(attrs.string(), default = None), - "ibtool_module_flag": attrs.option(attrs.bool(), default = None), - "incremental_bundling_enabled": attrs.option(attrs.bool(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "platform_binary": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.dep())), default = None), - "resource_group": attrs.option(attrs.string(), default = None), - "resource_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), - "skip_copying_swift_stdlib": attrs.option(attrs.bool(), default = None), - "try_skip_code_signing": attrs.option(attrs.bool(), default = None), - "xcode_product_type": attrs.option(attrs.string(), default = None), - } - ), -) - -apple_library = prelude_rule( - name = "apple_library", - docs = """ - An `apple_library()` rule represents a set of Objective-C/C++/Swift - source files and is similar to a `cxx_library()` rule with which it shares many - attributes. In addition to those common attributes, `apple_library()` has a some additional attributes - that are specific to binaries intended to be built using the Apple toolchain. - Note, however, that `apple_library()` and `cxx_library()` differ - in the way that they import header files, in order to better accommodate existing conventions. - See the sections for the `headers` and `exported_headers` attributes for more details. - - - Buck enables you to override components of the Apple toolchain with - alternate tools, either from the Xcode search paths or from directories - that you specify. - See `.buckconfig` - and `.buckconfig` - for more information. - """, - examples = """ - ``` - - apple_library( - name = 'MyLibrary', - deps = [ - ':OtherLibrary', - '//Libraries:YetAnotherLibrary', - ], - preprocessor_flags = ['-fobjc-arc'], - headers = [ - 'MyHeader.h', - ], - srcs = [ - 'MySource.m', - 'MySource.swift', - ], - frameworks = [ - '$SDKROOT/System/Library/Frameworks/UIKit.framework', - '$SDKROOT/System/Library/Frameworks/Foundation.framework', - ], - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - cxx_common.srcs_arg() | - cxx_common.platform_srcs_arg() | - apple_common.headers_arg() | - apple_common.exported_headers_arg() | - apple_common.header_path_prefix_arg() | - cxx_common.header_namespace_arg() | - apple_common.frameworks_arg() | - cxx_common.preprocessor_flags_arg() | - cxx_common.exported_preprocessor_flags_arg(exported_preprocessor_flags_type = attrs.list(attrs.arg(), default = [])) | - cxx_common.compiler_flags_arg() | - cxx_common.platform_compiler_flags_arg() | - cxx_common.linker_extra_outputs_arg() | - cxx_common.linker_flags_arg() | - cxx_common.exported_linker_flags_arg() | - cxx_common.exported_platform_linker_flags_arg() | - apple_common.target_sdk_version() | - native_common.preferred_linkage(preferred_linkage_type = attrs.option(attrs.enum(Linkage), default = None)) | - native_common.link_style() | - native_common.link_whole(link_whole_type = attrs.option(attrs.bool(), default = None)) | - cxx_common.reexport_all_header_dependencies_arg() | - cxx_common.exported_deps_arg() | - apple_common.extra_xcode_sources() | - apple_common.extra_xcode_files() | - { - "bridging_header": attrs.option(attrs.source(), default = None), - "can_be_asset": attrs.option(attrs.bool(), default = None), - "configs": attrs.dict(key = attrs.string(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}), - "contacts": attrs.list(attrs.string(), default = []), - "cxx_runtime_type": attrs.option(attrs.enum(CxxRuntimeType), default = None), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "default_platform": attrs.option(attrs.string(), default = None), - "defaults": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "deps": attrs.list(attrs.dep(), default = []), - "devirt_enabled": attrs.bool(default = False), - "diagnostics": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), - "enable_cxx_interop": attrs.bool(default = False), - "executable_name": attrs.option(attrs.string(), default = None), - "exported_header_style": attrs.enum(IncludeType, default = "local"), - "exported_lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "exported_lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "exported_platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), - "exported_platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), - "exported_platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "exported_post_linker_flags": attrs.list(attrs.arg(), default = []), - "exported_post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "fat_lto": attrs.bool(default = False), - "focused_list_target": attrs.option(attrs.dep(), default = None), - "force_static": attrs.option(attrs.bool(), default = None), - "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), - "include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "info_plist": attrs.option(attrs.source(), default = None), - "info_plist_substitutions": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "labels": attrs.list(attrs.string(), default = []), - "lang_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "lang_platform_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "libraries": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), - "modular": attrs.bool(default = False), - "module_name": attrs.option(attrs.string(), default = None), - "module_requires_cxx": attrs.bool(default = False), - "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), - "platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), - "platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "post_linker_flags": attrs.list(attrs.arg(), default = []), - "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "precompiled_header": attrs.option(attrs.source(), default = None), - "prefix_header": attrs.option(attrs.source(), default = None), - "public_include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "public_system_include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "raw_headers": attrs.set(attrs.source(), sorted = True, default = []), - "sdk_modules": attrs.list(attrs.string(), default = []), - "serialize_debugging_options": attrs.bool(default = False), - "soname": attrs.option(attrs.string(), default = None), - "static_library_basename": attrs.option(attrs.string(), default = None), - "supported_platforms_regex": attrs.option(attrs.regex(), default = None), - "supports_merged_linking": attrs.option(attrs.bool(), default = None), - "swift_compiler_flags": attrs.list(attrs.arg(), default = []), - "swift_version": attrs.option(attrs.string(), default = None), - "thin_lto": attrs.bool(default = False), - "use_submodules": attrs.bool(default = False), - "uses_cxx_explicit_modules": attrs.bool(default = False), - "uses_explicit_modules": attrs.bool(default = False), - "uses_modules": attrs.bool(default = False), - "xcode_private_headers_symlinks": attrs.option(attrs.bool(), default = None), - "xcode_public_headers_symlinks": attrs.option(attrs.bool(), default = None), - } - ), -) - -apple_package = prelude_rule( - name = "apple_package", - docs = """ - An `apple_package()` rule takes the output of - an `apple_bundle()` rule and compresses it in - an IPA (iOS App Store Package) file. - - This rule can be customized using the config options `.buckconfig` - and `.buckconfig` - . - """, - examples = """ - ``` - - apple_package( - name = 'AppPackage', - bundle = ':AppBundle', - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "bundle": attrs.dep(doc = """ - A build target identifying - an `apple_bundle()` rule whose output will - be stored in the IPA package generated by this rule. - """), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "default_platform": attrs.option(attrs.string(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "need_android_tools": attrs.bool(default = False), - } - ), -) - -apple_resource = prelude_rule( - name = "apple_resource", - docs = """ - An `apple_resource()` rule contains sets of resource directories, files and file variants - that can be bundled in an application bundle. This rule does not have any output on its own and can - be built only as a dependency (either direct or transitive) of - an `apple_bundle()` rule. - """, - examples = """ - ``` - - apple_resource( - name = 'Images', - files = glob([ - '*.png', - ]), - dirs = [ - 'PrettyImages', - ], - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "dirs": attrs.list(attrs.source(), default = [], doc = """ - Set of paths of resource directories that should be placed in an application bundle. - """), - "content_dirs": attrs.list(attrs.source(), default = [], doc = """ - Set of paths of directories containing resource files that should be placed in an application bundle. Unlike `dirs`, the directories themselves are not placed in the bundle. - """), - "files": attrs.list(attrs.source(), default = [], doc = """ - Set of paths of resource files that should be placed in an application bundle. - """), - "variants": attrs.list(attrs.source(), default = [], doc = """ - Set of paths of resource file variants that should be placed in an application bundle. The files - mentioned here should be placed in a directory named `$VARIANT_NAME.lproj`, - where `$VARIANT_NAME` is the name of the variant - (e.g. `Base`, `en`). This argument makes it possible to use different - resource files based on the active locale. - """), - "named_variants": attrs.dict(key = attrs.string(), value = attrs.set(attrs.source(), sorted = False), sorted = False, default = {}, doc = """ - Mapping from a variant name to the list of resource file paths which should be placed in an application bundle. Those files - will be placed in a directory with name equal to the corresponding key in this mapping. Keys should end with `.lproj` suffix. - (e.g. `Base.lproj`, `en.lproj`). - """), - "resources_from_deps": attrs.list(attrs.dep(), default = [], doc = """ - Set of build targets whose transitive `apple_resource`s should be considered as part of - the current resource when collecting resources for bundles. - - Usually, an `apple_bundle` collects all `apple_resource` rules transitively - reachable through apple\\_library rules. This field allows for resources which are not reachable - using the above traversal strategy to be considered for inclusion in the bundle. - """), - "destination": attrs.option(attrs.enum(AppleResourceBundleDestination), default = None, doc = """ - Specifies the destination in the final application bundle where resource will be copied. Possible - values: "resources", "frameworks", "executables", "plugins", "xpcservices". - """), - "codesign_on_copy": attrs.bool(default = False, doc = """ - Indicates whether the files specified in the files arg in this resource should be code signed with the identity used to sign the overall bundle. This is useful for e.g. - dylibs or other additional binaries copied into the bundle. The caller is responsible to ensure that the file format is valid for codesigning. - """), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - } - ), -) - -apple_test = prelude_rule( - name = "apple_test", - docs = """ - An `apple_test()` rule contains Objective-C/C++ code which can be built and used to test - code contained in other rules. The tests can be executed by running `buck test`. - """, - examples = """ - ``` - - apple_test( - name = 'MyTest', - info_plist = 'MyTest-Info.plist', - preprocessor_flags = ['-fobjc-arc'], - srcs = [ - 'MyTest.m', - ], - deps = [ - ':MyLibrary', - ], - frameworks = [ - '$SDKROOT/System/Library/Frameworks/Foundation.framework', - '$SDKROOT/System/Library/Frameworks/UIKit.framework', - '$PLATFORM_DIR/Developer/Library/Frameworks/XCTest.framework', - ], - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - apple_common.info_plist_arg() | - apple_common.info_plist_substitutions_arg() | - { - "test_host_app": attrs.option(attrs.dep(), default = None, doc = """ - A build target identifying - an `apple_bundle()` rule that builds an - application bundle. Output of the specified rule will be used as a test host of this test. This - implies `run_test_separately`. - Since symbols that are defined in the test host application and its dependencies will not be - linked into the test binary, to make those symbols accessible to the test target they need - to be specified as a dependency of this target and `['-undefined', 'dynamic_lookup']` needs to be added to this target's `linker_flags` (this will suppress undefined - reference errors during compilation, but if the symbols do not exist, it might result in runtime - crashes). - """), - } | - cxx_common.srcs_arg() | - cxx_common.platform_srcs_arg() | - apple_common.headers_arg() | - apple_common.header_path_prefix_arg() | - apple_common.frameworks_arg() | - cxx_common.preprocessor_flags_arg() | - cxx_common.compiler_flags_arg() | - cxx_common.platform_compiler_flags_arg() | - cxx_common.linker_flags_arg() | - native_common.link_style() | - apple_common.target_sdk_version() | - buck.run_test_separately_arg(run_test_separately_type = attrs.bool(default = False)) | - buck.test_label_arg() | - apple_common.extra_xcode_sources() | - apple_common.extra_xcode_files() | - { - "asset_catalogs_compilation_options": attrs.dict(key = attrs.string(), value = attrs.any(), default = {}), - "bridging_header": attrs.option(attrs.source(), default = None), - "can_be_asset": attrs.option(attrs.bool(), default = None), - "codesign_flags": attrs.list(attrs.string(), default = []), - "codesign_identity": attrs.option(attrs.string(), default = None), - "configs": attrs.dict(key = attrs.string(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}), - "contacts": attrs.list(attrs.string(), default = []), - "cxx_runtime_type": attrs.option(attrs.enum(CxxRuntimeType), default = None), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "default_platform": attrs.option(attrs.string(), default = None), - "defaults": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "deps": attrs.list(attrs.dep(), default = []), - "destination_specifier": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), - "devirt_enabled": attrs.bool(default = False), - "diagnostics": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), - "enable_cxx_interop": attrs.bool(default = False), - "entitlements_file": attrs.option(attrs.source(), default = None), - "env": attrs.option(attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False), default = None), - "executable_name": attrs.option(attrs.string(), default = None), - "exported_deps": attrs.list(attrs.dep(), default = []), - "exported_header_style": attrs.enum(IncludeType, default = "local"), - "exported_headers": attrs.named_set(attrs.source(), sorted = True, default = []), - "exported_lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "exported_lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "exported_linker_flags": attrs.list(attrs.arg(), default = []), - "exported_platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), - "exported_platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), - "exported_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "exported_platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "exported_post_linker_flags": attrs.list(attrs.arg(), default = []), - "exported_post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "exported_preprocessor_flags": attrs.list(attrs.arg(), default = []), - "fat_lto": attrs.bool(default = False), - "focused_list_target": attrs.option(attrs.dep(), default = None), - "force_static": attrs.option(attrs.bool(), default = None), - "header_namespace": attrs.option(attrs.string(), default = None), - "headers_as_raw_headers_mode": attrs.option(attrs.enum(HeadersAsRawHeadersMode), default = None), - "include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "incremental_bundling_enabled": attrs.option(attrs.bool(), default = None), - "is_ui_test": attrs.bool(default = False), - "lang_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "lang_platform_compiler_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_platform_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg()))), sorted = False, default = {}), - "lang_preprocessor_flags": attrs.dict(key = attrs.enum(CxxSourceType), value = attrs.list(attrs.arg()), sorted = False, default = {}), - "libraries": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": attrs.option(attrs.list(attrs.tuple(attrs.string(), attrs.list(attrs.tuple(attrs.dep(), attrs.enum(Traversal), attrs.option(attrs.string()))))), default = None), - "link_whole": attrs.option(attrs.bool(), default = None), - "linker_extra_outputs": attrs.list(attrs.string(), default = []), - "modular": attrs.bool(default = False), - "module_name": attrs.option(attrs.string(), default = None), - "module_requires_cxx": attrs.bool(default = False), - "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), - "platform_headers": attrs.list(attrs.tuple(attrs.regex(), attrs.named_set(attrs.source(), sorted = True)), default = []), - "platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "platform_preprocessor_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "post_linker_flags": attrs.list(attrs.arg(), default = []), - "post_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg())), default = []), - "precompiled_header": attrs.option(attrs.source(), default = None), - "preferred_linkage": attrs.option(attrs.enum(Linkage), default = None), - "prefix_header": attrs.option(attrs.source(), default = None), - "public_include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "public_system_include_directories": attrs.set(attrs.string(), sorted = True, default = []), - "raw_headers": attrs.set(attrs.source(), sorted = True, default = []), - "reexport_all_header_dependencies": attrs.option(attrs.bool(), default = None), - "runner": attrs.option(attrs.dep(), default = None), - "sdk_modules": attrs.list(attrs.string(), default = []), - "serialize_debugging_options": attrs.bool(default = False), - "skip_copying_swift_stdlib": attrs.option(attrs.bool(), default = None), - "snapshot_reference_images_path": attrs.option(attrs.one_of(attrs.source(), attrs.string()), default = None), - "soname": attrs.option(attrs.string(), default = None), - "specs": attrs.option(attrs.arg(json = True), default = None), - "static_library_basename": attrs.option(attrs.string(), default = None), - "supported_platforms_regex": attrs.option(attrs.regex(), default = None), - "supports_merged_linking": attrs.option(attrs.bool(), default = None), - "swift_compiler_flags": attrs.list(attrs.arg(), default = []), - "swift_version": attrs.option(attrs.string(), default = None), - "test_rule_timeout_ms": attrs.option(attrs.int(), default = None), - "thin_lto": attrs.bool(default = False), - "try_skip_code_signing": attrs.option(attrs.bool(), default = None), - "ui_test_target_app": attrs.option(attrs.dep(), default = None), - "use_submodules": attrs.bool(default = False), - "uses_cxx_explicit_modules": attrs.bool(default = False), - "uses_explicit_modules": attrs.bool(default = False), - "uses_modules": attrs.bool(default = False), - "xcode_private_headers_symlinks": attrs.option(attrs.bool(), default = None), - "xcode_product_type": attrs.option(attrs.string(), default = None), - "xcode_public_headers_symlinks": attrs.option(attrs.bool(), default = None), - } - ), -) - -apple_toolchain = prelude_rule( - name = "apple_toolchain", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "actool": attrs.source(), - "architecture": attrs.string(default = ""), - "build_version": attrs.option(attrs.string(), default = None), - "codesign": attrs.source(), - "codesign_allocate": attrs.source(), - "contacts": attrs.list(attrs.string(), default = []), - "copy_scene_kit_assets": attrs.option(attrs.source(), default = None), - "cxx_toolchain": attrs.dep(), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "developer_path": attrs.option(attrs.source(), default = None), - "dsymutil": attrs.source(), - "dwarfdump": attrs.option(attrs.source(), default = None), - "ibtool": attrs.source(), - "labels": attrs.list(attrs.string(), default = []), - "libtool": attrs.source(), - "licenses": attrs.list(attrs.source(), default = []), - "lipo": attrs.source(), - "min_version": attrs.string(default = ""), - "momc": attrs.source(), - "platform_path": attrs.source(), - "sdk_environment": attrs.option(attrs.string(), default = None), - "sdk_name": attrs.string(default = ""), - "sdk_path": attrs.source(), - "swift_toolchain": attrs.option(attrs.dep(), default = None), - "version": attrs.string(default = ""), - "watch_kit_stub_binary": attrs.option(attrs.source(), default = None), - "work_around_dsymutil_lto_stack_overflow_bug": attrs.option(attrs.bool(), default = None), - "xcode_build_version": attrs.string(default = ""), - "xcode_version": attrs.string(default = ""), - "xctest": attrs.source(), - } - ), -) - -apple_toolchain_set = prelude_rule( - name = "apple_toolchain_set", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "apple_toolchains": attrs.list(attrs.dep(), default = []), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - } - ), -) - -core_data_model = prelude_rule( - name = "core_data_model", - docs = """ - An `core_data_model()` rule contains models for Apple's Core Data framework. This rule does not have any output on its own and can be built only as a dependency - (either direct or transitive) of an `apple_bundle()` rule in which case all `core_data_model()` rules - that the bundle rule depends on are merged and placed into the final output bundle together. - """, - examples = """ - ``` - - core_data_model( - name = 'MyCoreDataModel', - path = 'MyCoreDataModel.xcdatamodeld', - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "path": attrs.source(doc = """ - Relative path of the .xcdatamodeld package directory. - """), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - } - ), -) - -prebuilt_apple_framework = prelude_rule( - name = "prebuilt_apple_framework", - docs = """ - A `prebuilt_apple_framework()` rule represents a set of - Objective-C/C++ source files and is very similar to a `prebuilt_cxx_library()` rule. - """, - examples = """ - ``` - - prebuilt_apple_framework( - name = 'MyPrebuiltFramework', - framework = 'myPrebuiltFramework.framework', - preferred_linkage = 'static', - visibility = [ - 'PUBLIC' - ] - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "preferred_linkage": attrs.enum(Linkage, doc = """ - How to link to a binary: use `dynamic` for a dynamic - framework, and `static` for old universal static - frameworks manually lipo-ed together. `dynamic` will - copy the frameworks into the `Frameworks` directory - of an Apple bundle, and configure framework search paths and linker flags. - `static` will copy the resources of the framework into - an Apple bundle. - """), - "code_sign_on_copy": attrs.option(attrs.bool(), default = None), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "deps": attrs.list(attrs.dep(), default = []), - "exported_linker_flags": attrs.list(attrs.string(), default = []), - "exported_platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.string())), default = []), - "framework": attrs.source(), - "frameworks": attrs.list(attrs.string(), default = []), - "labels": attrs.list(attrs.string(), default = []), - "libraries": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "supported_platforms_regex": attrs.option(attrs.regex(), default = None), - } - ), -) - -scene_kit_assets = prelude_rule( - name = "scene_kit_assets", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "path": attrs.source(), - } - ), -) - -swift_library = prelude_rule( - name = "swift_library", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "bridging_header": attrs.option(attrs.source(), default = None), - "compiler_flags": attrs.list(attrs.arg(), default = []), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "deps": attrs.list(attrs.dep(), default = []), - "enable_cxx_interop": attrs.bool(default = False), - "frameworks": attrs.list(attrs.string(), default = []), - "labels": attrs.list(attrs.string(), default = []), - "libraries": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "module_name": attrs.option(attrs.string(), default = None), - "preferred_linkage": attrs.option(attrs.enum(Linkage), default = None), - "sdk_modules": attrs.list(attrs.string(), default = []), - "serialize_debugging_options": attrs.bool(default = False), - "soname": attrs.option(attrs.string(), default = None), - "srcs": attrs.list(attrs.source(), default = []), - "supported_platforms_regex": attrs.option(attrs.regex(), default = None), - "target_sdk_version": attrs.option(attrs.string(), default = None), - "uses_explicit_modules": attrs.bool(default = False), - "version": attrs.option(attrs.string(), default = None), - } - ), -) - -swift_toolchain = prelude_rule( - name = "swift_toolchain", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "can_toolchain_emit_obj_c_header_textually": attrs.bool(default = False), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "explicit_modules_uses_gmodules": attrs.bool(default = False), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "platform_path": attrs.source(), - "prefix_serialized_debug_info": attrs.bool(default = False), - "resource_dir": attrs.option(attrs.source(), default = None), - "runtime_paths_for_bundling": attrs.list(attrs.string(), default = []), - "runtime_paths_for_linking": attrs.list(attrs.string(), default = []), - "runtime_run_paths": attrs.list(attrs.string(), default = []), - "sdk_dependencies_path": attrs.option(attrs.string(), default = None), - "sdk_path": attrs.source(), - "static_runtime_paths": attrs.list(attrs.string(), default = []), - "supports_swift_cxx_interoperability_mode": attrs.bool(default = False), - "supports_cxx_interop_requirement_at_import": attrs.bool(default = False), - "swift_stdlib_tool": attrs.option(attrs.source(), default = None), - "swift_stdlib_tool_flags": attrs.list(attrs.arg(), default = []), - "swiftc": attrs.source(), - "swiftc_flags": attrs.list(attrs.arg(), default = []), - } - ), -) - -xcode_postbuild_script = prelude_rule( - name = "xcode_postbuild_script", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "cmd": attrs.string(default = ""), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "input_file_lists": attrs.list(attrs.string(), default = []), - "inputs": attrs.list(attrs.string(), default = []), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "output_file_lists": attrs.list(attrs.string(), default = []), - "outputs": attrs.list(attrs.string(), default = []), - "srcs": attrs.list(attrs.source(), default = []), - } - ), -) - -xcode_prebuild_script = prelude_rule( - name = "xcode_prebuild_script", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "cmd": attrs.string(default = ""), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "input_file_lists": attrs.list(attrs.string(), default = []), - "inputs": attrs.list(attrs.string(), default = []), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "output_file_lists": attrs.list(attrs.string(), default = []), - "outputs": attrs.list(attrs.string(), default = []), - "srcs": attrs.list(attrs.source(), default = []), - } - ), -) - -xcode_workspace_config = prelude_rule( - name = "xcode_workspace_config", - docs = "", - examples = None, - further = None, - attrs = ( - # @unsorted-dict-items - { - "action_config_names": attrs.dict(key = attrs.enum(SchemeActionType), value = attrs.string(), sorted = False, default = {}), - "additional_scheme_actions": attrs.option(attrs.dict(key = attrs.enum(SchemeActionType), value = attrs.dict(key = attrs.enum(AdditionalActions), value = attrs.list(attrs.string()), sorted = False), sorted = False), default = None), - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "environment_variables": attrs.option(attrs.dict(key = attrs.enum(SchemeActionType), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False), default = None), - "explicit_runnable_path": attrs.option(attrs.string(), default = None), - "extra_schemes": attrs.dict(key = attrs.string(), value = attrs.dep(), sorted = False, default = {}), - "extra_shallow_targets": attrs.list(attrs.dep(), default = []), - "extra_targets": attrs.list(attrs.dep(), default = []), - "extra_tests": attrs.list(attrs.dep(), default = []), - "is_remote_runnable": attrs.option(attrs.bool(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "launch_style": attrs.option(attrs.enum(LaunchStyle), default = None), - "licenses": attrs.list(attrs.source(), default = []), - "notification_payload_file": attrs.option(attrs.string(), default = None), - "src_target": attrs.option(attrs.dep(), default = None), - "was_created_for_app_extension": attrs.option(attrs.bool(), default = None), - "watch_interface": attrs.option(attrs.enum(WatchInterface), default = None), - "workspace_name": attrs.option(attrs.string(), default = None), - } - ), -) - -ios_rules = struct( - apple_asset_catalog = apple_asset_catalog, - apple_binary = apple_binary, - apple_bundle = apple_bundle, - apple_library = apple_library, - apple_package = apple_package, - apple_resource = apple_resource, - apple_test = apple_test, - apple_toolchain = apple_toolchain, - apple_toolchain_set = apple_toolchain_set, - core_data_model = core_data_model, - prebuilt_apple_framework = prebuilt_apple_framework, - scene_kit_assets = scene_kit_assets, - swift_library = swift_library, - swift_toolchain = swift_toolchain, - xcode_postbuild_script = xcode_postbuild_script, - xcode_prebuild_script = xcode_prebuild_script, - xcode_workspace_config = xcode_workspace_config, -) diff --git a/prelude/decls/java_rules.bzl b/prelude/decls/java_rules.bzl index 7e61ae0585cc4..bfd34773900ab 100644 --- a/prelude/decls/java_rules.bzl +++ b/prelude/decls/java_rules.bzl @@ -12,6 +12,7 @@ load(":common.bzl", "AbiGenerationMode", "LogLevel", "SourceAbiVerificationMode", "TestType", "UnusedDependenciesAction", "buck", "prelude_rule") load(":jvm_common.bzl", "jvm_common") +load(":re_test_common.bzl", "re_test_common") Style = ["obf", "pretty", "detailed"] @@ -26,16 +27,16 @@ gwt_binary = prelude_rule( "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "deps": attrs.list(attrs.dep(), default = []), - "draft_compile": attrs.option(attrs.bool(), default = None), + "draft_compile": attrs.bool(default = False), "experimental_args": attrs.list(attrs.string(), default = []), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), - "local_workers": attrs.option(attrs.int(), default = None), + "local_workers": attrs.int(default = 2), "module_deps": attrs.list(attrs.dep(), default = []), "modules": attrs.list(attrs.string(), default = []), - "optimize": attrs.option(attrs.int(), default = None), - "strict": attrs.option(attrs.bool(), default = None), - "style": attrs.option(attrs.enum(Style), default = None), + "optimize": attrs.int(default = 9), + "strict": attrs.bool(default = False), + "style": attrs.enum(Style, default = "obf"), "vm_args": attrs.list(attrs.string(), default = []), } ), @@ -58,6 +59,7 @@ jar_genrule = prelude_rule( "enable_sandbox": attrs.option(attrs.bool(), default = None), "environment_expansion_separator": attrs.option(attrs.string(), default = None), "labels": attrs.list(attrs.string(), default = []), + "weight": attrs.option(attrs.int(), default = None), "licenses": attrs.list(attrs.source(), default = []), "need_android_tools": attrs.bool(default = False), "remote": attrs.option(attrs.bool(), default = None), @@ -100,6 +102,10 @@ java_binary = prelude_rule( attrs = ( # @unsorted-dict-items { + "base_dep": attrs.option(attrs.dep(), default = None, doc = """ + Rule (normally of type `java_library`) that should be + compiled and used as a base JAR to receive all dependencies through an append operation. + """), "deps": attrs.list(attrs.dep(), default = [], doc = """ Rules (normally of type `java_library`) that should be compiled and whose `.class` files and resources should be @@ -156,6 +162,9 @@ java_binary = prelude_rule( "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "generate_wrapper": attrs.bool(default = False), "do_not_create_inner_jar": attrs.bool(default = False), + "incremental_target_prefix": attrs.option(attrs.string(), default = None), + "java_version": attrs.option(attrs.string(), default = None, doc = "Expected java version used at compile time"), + "java_runtime": attrs.option(attrs.string(), default = None, doc = "Expected java version used at runtime"), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), } @@ -230,7 +239,7 @@ java_library = prelude_rule( If any of the files in this list end in `.src.zip`, then the entries in the ZIP file that end in `.java` will be included as ordinary inputs to compilation. This is common when using - a `genrule()`to auto-generate some Java source code that + a `genrule()` to auto-generate some Java source code that needs to be compiled with some hand-written Java code. """), } | @@ -255,12 +264,6 @@ java_library = prelude_rule( "java_version": attrs.option(attrs.string(), default = None, doc = """ Equivalent to setting both `source` and `target` to the given value. Setting this and `source` or `target` (or both!) is an error. """), - "javac": attrs.option(attrs.source(), default = None, doc = """ - Specifies the Java compiler program to use for this rule. - The value is a source path (e.g., //foo/bar:bar). - Overrides the value in "javac" in the "tools" section - of `.buckconfig`. - """), "extra_arguments": attrs.list(attrs.string(), default = [], doc = """ List of additional arguments to pass into the Java compiler. These arguments follow the ones specified in `.buckconfig`. @@ -274,18 +277,20 @@ java_library = prelude_rule( jvm_common.source_only_abi_deps() | jvm_common.required_for_source_only_abi() | jvm_common.on_unused_dependencies() | + jvm_common.plugins() | + jvm_common.javac() | { "annotation_processor_deps": attrs.list(attrs.dep(), default = []), "annotation_processor_params": attrs.list(attrs.string(), default = []), "annotation_processors": attrs.list(attrs.string(), default = []), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), + "jar_postprocessor": attrs.option(attrs.exec_dep(), default = None), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "runtime_deps": attrs.list(attrs.dep(), default = []), "source_abi_verification_mode": attrs.option(attrs.enum(SourceAbiVerificationMode), default = None), @@ -324,6 +329,7 @@ java_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | { "srcs": attrs.list(attrs.source(), default = [], doc = """ Like `java_library()`, @@ -364,6 +370,7 @@ java_test = prelude_rule( } | buck.run_test_separately_arg(run_test_separately_type = attrs.bool(default = False)) | buck.fork_mode() | + re_test_common.test_args() | buck.test_rule_timeout_ms() | { "std_out_log_level": attrs.option(attrs.one_of(attrs.enum(LogLevel), attrs.int()), default = None, doc = """ @@ -375,7 +382,7 @@ java_test = prelude_rule( Same as `std_out_log_level`, but for std err. """), "use_cxx_libraries": attrs.option(attrs.bool(), default = None, doc = """ - Whether or not to build and link against `cxx\\_library()`dependencies when testing. + Whether or not to build and link against `cxx_library()` dependencies when testing. """), "cxx_library_whitelist": attrs.list(attrs.dep(), default = [], doc = """ EXPERIMENTAL. @@ -399,15 +406,14 @@ java_test = prelude_rule( "exported_deps": attrs.list(attrs.dep(), default = []), "exported_provided_deps": attrs.list(attrs.dep(), default = []), "extra_arguments": attrs.list(attrs.string(), default = []), + "jar_postprocessor": attrs.option(attrs.exec_dep(), default = None), "java_version": attrs.option(attrs.string(), default = None), "java": attrs.option(attrs.dep(), default = None), - "javac": attrs.option(attrs.source(), default = None), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "provided_deps": attrs.list(attrs.dep(), default = []), "remove_classes": attrs.list(attrs.regex(), default = []), @@ -422,7 +428,7 @@ java_test = prelude_rule( "unbundled_resources_root": attrs.option(attrs.source(allow_directory = True), default = None), "use_dependency_order_classpath": attrs.option(attrs.bool(), default = None), } - ), + ) | jvm_common.plugins() | jvm_common.javac(), ) java_test_runner = prelude_rule( @@ -444,7 +450,6 @@ java_test_runner = prelude_rule( "exported_provided_deps": attrs.list(attrs.dep(), default = []), "extra_arguments": attrs.list(attrs.string(), default = []), "java_version": attrs.option(attrs.string(), default = None), - "javac": attrs.option(attrs.source(), default = None), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "main_class": attrs.string(default = ""), @@ -452,7 +457,6 @@ java_test_runner = prelude_rule( "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "provided_deps": attrs.list(attrs.dep(), default = []), "remove_classes": attrs.list(attrs.regex(), default = []), @@ -465,7 +469,7 @@ java_test_runner = prelude_rule( "source_only_abi_deps": attrs.list(attrs.dep(), default = []), "srcs": attrs.list(attrs.source(), default = []), "target": attrs.option(attrs.string(), default = None), - } + } | jvm_common.plugins() | jvm_common.javac() ), ) diff --git a/prelude/decls/js_rules.bzl b/prelude/decls/js_rules.bzl index bcc05534232e2..b820c8c1109b7 100644 --- a/prelude/decls/js_rules.bzl +++ b/prelude/decls/js_rules.bzl @@ -64,6 +64,7 @@ js_bundle_genrule = prelude_rule( "rewrite_sourcemap": attrs.bool(default = False), "skip_resources": attrs.bool(default = False), "srcs": attrs.named_set(attrs.source(), sorted = False, default = []), + "weight": attrs.option(attrs.int(), default = None), } ), ) diff --git a/prelude/decls/jvm_common.bzl b/prelude/decls/jvm_common.bzl index 86a674b21c19c..3a9e0831fe974 100644 --- a/prelude/decls/jvm_common.bzl +++ b/prelude/decls/jvm_common.bzl @@ -154,6 +154,97 @@ def _k2(): """), } +def _incremental(): + return { + "incremental": attrs.bool(default = False, doc = """ + Enables Kotlin incremental compilation. + """), + } + +def _plugins(): + return { + "plugins": attrs.list( + attrs.one_of( + attrs.dep(), + attrs.tuple(attrs.dep(), attrs.list(attrs.string())), + ), + default = [], + ), + } + +def _kotlin_compiler_plugins(): + return { + "kotlin_compiler_plugins": attrs.dict(key = attrs.source(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}, doc = """ + Use this to specify [Kotlin compiler plugins](https://kotlinlang.org/docs/reference/compiler-plugins.html) to use when compiling this library. + This takes a map, with each entry specify one plugin. Entry's key is plugin source path, + and value is a map of plugin option key value pair. Unlike `extra_kotlinc_arguments`, + these can be *source paths*, not just strings. + + A special option value is + `__codegen_dir__`, in which case Buck will provide a default codegen folder's path as + option value instead. + E.g. + + ``` +fbcode/buck2/prelude/decls/jvm_common.bzl + kotlin_compiler_plugins = { + "somePluginSourcePath": { + "plugin:somePluginId:somePluginOptionKey": "somePluginOptionValue", + "plugin:somePluginId:someDirectoryRelatedOptionKey": "__codegen_dir__", + }, + }, + + ``` + Each plugin source path will be prefixed with `-Xplugin=` and passed as extra + arguments to the compiler. Plugin options will be appended after its plugin with `-P`. + + A specific example is, if you want to use [kotlinx.serialization](https://github.com/Kotlin/kotlinx.serialization) + with `kotlin_library()`, you need to specify `kotlinx-serialization-compiler-plugin.jar` under `kotlin_compiler_plugins` and `kotlinx-serialization-runtime.jar` (which you may have to fetch from Maven) in your `deps`: + + ``` + + kotlin_library( + name = "example", + srcs = glob(["*.kt"]), + deps = [ + ":kotlinx-serialization-runtime", + ], + kotlin_compiler_plugins = { + # Likely copied from your $KOTLIN_HOME directory. + "kotlinx-serialization-compiler-plugin.jar": {}, + }, + ) + + prebuilt_jar( + name = "kotlinx-serialization-runtime", + binary_jar = ":kotlinx-serialization-runtime-0.10.0", + ) + + # Note you probably want to set + # maven_repo=http://jcenter.bintray.com/ in your .buckconfig until + # https://github.com/Kotlin/kotlinx.serialization/issues/64 + # is closed. + remote_file( + name = "kotlinx-serialization-runtime-0.10.0", + out = "kotlinx-serialization-runtime-0.10.0.jar", + url = "mvn:org.jetbrains.kotlinx:kotlinx-serialization-runtime:jar:0.10.0", + sha1 = "23d777a5282c1957c7ce35946374fff0adab114c" + ) + + ``` + """), + } + +def _javac(): + return { + "javac": attrs.option(attrs.one_of(attrs.exec_dep(), attrs.source()), default = None, doc = """ + Specifies the Java compiler program to use for this rule. + The value is a source path or an execution dep (e.g., //foo/bar:bar). + Overrides the value in "javac" in the "tools" section + of `.buckconfig`. + """), + } + jvm_common = struct( test_env = _test_env, resources_arg = _resources_arg, @@ -166,4 +257,8 @@ jvm_common = struct( required_for_source_only_abi = _required_for_source_only_abi, on_unused_dependencies = _on_unused_dependencies, k2 = _k2, + incremental = _incremental, + plugins = _plugins, + kotlin_compiler_plugins = _kotlin_compiler_plugins, + javac = _javac, ) diff --git a/prelude/decls/kotlin_rules.bzl b/prelude/decls/kotlin_rules.bzl index 6b0cf71f5bb5e..da1a8e00184d2 100644 --- a/prelude/decls/kotlin_rules.bzl +++ b/prelude/decls/kotlin_rules.bzl @@ -12,6 +12,7 @@ load(":common.bzl", "AbiGenerationMode", "AnnotationProcessingTool", "LogLevel", "SourceAbiVerificationMode", "TestType", "UnusedDependenciesAction", "buck", "prelude_rule") load(":jvm_common.bzl", "jvm_common") +load(":re_test_common.bzl", "re_test_common") kotlin_library = prelude_rule( name = "kotlin_library", @@ -89,65 +90,6 @@ kotlin_library = prelude_rule( Rules (usually other `kotlin_library` rules) that are used to generate the classpath required to compile this `kotlin_library`. """), - "kotlin_compiler_plugins": attrs.dict(key = attrs.source(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}, doc = """ - Use this to specify [Kotlin compiler plugins](https://kotlinlang.org/docs/reference/compiler-plugins.html) to use when compiling this library. - This takes a map, with each entry specify one plugin. Entry's key is plugin source path, - and value is a map of plugin option key value pair. Unlike `extra_kotlinc_arguments`, - these can be *source paths*, not just strings. - - A special option value is - `__codegen_dir__`, in which case Buck will provide a default codegen folder's path as - option value instead. - E.g. - - ``` - - kotlin_compiler_plugins = { - "somePluginSourcePath": { - "plugin:somePluginId:somePluginOptionKey": "somePluginOptionValue", - "plugin:somePluginId:someDirectoryRelatedOptionKey": "__codegen_dir__", - }, - }, - - ``` - Each plugin source path will be prefixed with `-Xplugin=` and passed as extra - arguments to the compiler. Plugin options will be appended after its plugin with `-P`. - - A specific example is, if you want to use [kotlinx.serialization](https://github.com/Kotlin/kotlinx.serialization) - with `kotlin_library()`, you need to specify `kotlinx-serialization-compiler-plugin.jar` under `kotlin_compiler_plugins` and `kotlinx-serialization-runtime.jar` (which you may have to fetch from Maven) in your `deps`: - - ``` - - kotlin_library( - name = "example", - srcs = glob(["*.kt"]), - deps = [ - ":kotlinx-serialization-runtime", - ], - kotlin_compiler_plugins = { - # Likely copied from your $KOTLIN_HOME directory. - "kotlinx-serialization-compiler-plugin.jar": {}, - }, - ) - - prebuilt_jar( - name = "kotlinx-serialization-runtime", - binary_jar = ":kotlinx-serialization-runtime-0.10.0", - ) - - # Note you probably want to set - # maven_repo=http://jcenter.bintray.com/ in your .buckconfig until - # https://github.com/Kotlin/kotlinx.serialization/issues/64 - # is closed. - remote_file( - name = "kotlinx-serialization-runtime-0.10.0", - out = "kotlinx-serialization-runtime-0.10.0.jar", - url = "mvn:org.jetbrains.kotlinx:kotlinx-serialization-runtime:jar:0.10.0", - sha1 = "23d777a5282c1957c7ce35946374fff0adab114c" - ) - - ``` - """), "extra_kotlinc_arguments": attrs.list(attrs.string(), default = [], doc = """ List of additional arguments to pass into the Kotlin compiler. """), @@ -168,7 +110,10 @@ kotlin_library = prelude_rule( jvm_common.provided_deps() | jvm_common.exported_provided_deps() | jvm_common.k2() | - buck.labels_arg() | + jvm_common.kotlin_compiler_plugins() | + jvm_common.incremental() | + jvm_common.plugins() | + jvm_common.javac() | buck.labels_arg() | { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), "annotation_processor_deps": attrs.list(attrs.dep(), default = []), @@ -177,15 +122,13 @@ kotlin_library = prelude_rule( "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "extra_arguments": attrs.list(attrs.string(), default = []), - "extra_non_source_only_abi_kotlinc_arguments": attrs.list(attrs.string(), default = []), "java_version": attrs.option(attrs.string(), default = None), - "javac": attrs.option(attrs.source(), default = None), + "jar_postprocessor": attrs.option(attrs.exec_dep(), default = None), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "required_for_source_only_abi": attrs.bool(default = False), "runtime_deps": attrs.list(attrs.dep(), default = []), @@ -194,7 +137,7 @@ kotlin_library = prelude_rule( "source_only_abi_deps": attrs.list(attrs.dep(), default = []), "target": attrs.option(attrs.string(), default = None), "use_jvm_abi_gen": attrs.option(attrs.bool(), default = None), - } + } | jvm_common.plugins() ), ) @@ -208,6 +151,7 @@ kotlin_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | { "srcs": attrs.list(attrs.source(), default = [], doc = """ Like ``kotlin_library()``, @@ -222,13 +166,13 @@ kotlin_test = prelude_rule( `glob(['**/*Test.kt'])`. """), "resources": attrs.list(attrs.source(), default = [], doc = """ - Same as `kotlin\\_library()`. + Same as `kotlin_library()`. """), } | buck.test_label_arg() | { "deps": attrs.list(attrs.dep(), default = [], doc = """ - Same as `kotlin\\_library()`. + Same as `kotlin_library()`. // org.junit.rules.Timeout was not introduced until 4.7. Must include JUnit (version 4.7 or later) as a dependency for JUnit tests. Must include TestNG (version 6.2 or later) and hamcrest as a dependencies for TestNG tests. @@ -240,6 +184,7 @@ kotlin_test = prelude_rule( } | buck.run_test_separately_arg(run_test_separately_type = attrs.bool(default = False)) | buck.fork_mode() | + re_test_common.test_args() | buck.test_rule_timeout_ms() | { "std_out_log_level": attrs.option(attrs.one_of(attrs.enum(LogLevel), attrs.int()), default = None, doc = """ @@ -255,7 +200,10 @@ kotlin_test = prelude_rule( """), } | jvm_common.k2() | + jvm_common.kotlin_compiler_plugins() | + jvm_common.incremental() | jvm_common.test_env() | + jvm_common.javac() | { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), "annotation_processing_tool": attrs.option(attrs.enum(AnnotationProcessingTool), default = None), @@ -271,12 +219,9 @@ kotlin_test = prelude_rule( "exported_provided_deps": attrs.list(attrs.dep(), default = []), "extra_arguments": attrs.list(attrs.string(), default = []), "extra_kotlinc_arguments": attrs.list(attrs.string(), default = []), - "extra_non_source_only_abi_kotlinc_arguments": attrs.list(attrs.string(), default = []), "friend_paths": attrs.list(attrs.dep(), default = []), "java_version": attrs.option(attrs.string(), default = None), "java": attrs.option(attrs.dep(), default = None), - "javac": attrs.option(attrs.source(), default = None), - "kotlin_compiler_plugins": attrs.dict(key = attrs.source(), value = attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), sorted = False, default = {}), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), diff --git a/prelude/decls/lua_rules.bzl b/prelude/decls/lua_rules.bzl index d46e9bf85a216..068745021adf1 100644 --- a/prelude/decls/lua_rules.bzl +++ b/prelude/decls/lua_rules.bzl @@ -112,7 +112,7 @@ lua_binary = prelude_rule( name = "lua_binary", docs = """ A `lua_library()` rule is used to group together Lua sources - to be packaged into a top-level `lua\\_binary()`rule. + to be packaged into a top-level `lua_binary()` rule. """, examples = """ ``` @@ -140,7 +140,7 @@ lua_binary = prelude_rule( The module which serves as the entry point for this rule. """), "deps": attrs.list(attrs.dep(), default = [], doc = """ - `lua\\_library()`rules to this binary will access. + `lua_library()` rules to this binary will access. """), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), @@ -159,7 +159,7 @@ lua_library = prelude_rule( name = "lua_library", docs = """ A `lua_library()` rule is used to group together Lua sources - to be packaged into a top-level `lua\\_binary()`rule. + to be packaged into a top-level `lua_binary()` rule. """, examples = """ ``` diff --git a/prelude/decls/native_common.bzl b/prelude/decls/native_common.bzl index 929c75602b1c5..acc501a972355 100644 --- a/prelude/decls/native_common.bzl +++ b/prelude/decls/native_common.bzl @@ -46,10 +46,22 @@ def _link_group_public_deps_label(): """), } +def _soname(): + return { + "soname": attrs.option(attrs.string(), default = None, doc = """ + Sets the soname ("shared object name") of any shared library produced from this rule. + The default value is based on the full rule name. + The macro `$(ext)` will be replaced with a platform-appropriate extension. + An argument can be provided, which is a library version. + For example `soname = 'libfoo.$(ext 2.3)'` will be `libfoo.2.3.dylib` on Mac and `libfoo.so.2.3` on Linux. +"""), + } + native_common = struct( link_group_deps = _link_group_deps, link_group_public_deps_label = _link_group_public_deps_label, link_style = _link_style, link_whole = _link_whole, preferred_linkage = _preferred_linkage, + soname = _soname, ) diff --git a/prelude/decls/python_common.bzl b/prelude/decls/python_common.bzl index c792a7aaafcb3..3a3b5b279b4d2 100644 --- a/prelude/decls/python_common.bzl +++ b/prelude/decls/python_common.bzl @@ -27,7 +27,7 @@ def _platform_srcs_arg(): regex against which the platform name is matched, and the second element is a list of source files. The regex should use `java.util.regex.Pattern` syntax. The platform name is a Python platform *flavor* defined in - the buckconfig#`python`section of `.buckconfig`. + the buckconfig#`python` section of `.buckconfig`. """), } @@ -49,7 +49,7 @@ def _platform_resources_arg(): regex against which the platform name is matched, and the second element is a list of resource files. The regex should use `java.util.regex.Pattern` syntax. The platform name is a Python platform *flavor* defined in - the buckconfig#`python`section of `.buckconfig`. + the buckconfig#`python `section of `.buckconfig`. """), } @@ -75,9 +75,9 @@ def _linker_flags_arg(): "linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = [], doc = """ Additional linker flags that should be applied to any linking which is specific to this rule. Note that whether these flags are used is dependent on the native link strategy selected in - `.buckconfig` and currently applies only to the `merged` ``.buckconfig``; + `.buckconfig` and currently applies only to the merged `.buckconfig`; the `separate` link strategy pulls in shared libraries that are linked in the - context of the rules that own them, such as `cxx\\_library()`. + context of the rules that own them, such as `cxx_library()`. """), } diff --git a/prelude/decls/python_rules.bzl b/prelude/decls/python_rules.bzl index e84a135a792f9..3d04a03ff1218 100644 --- a/prelude/decls/python_rules.bzl +++ b/prelude/decls/python_rules.bzl @@ -12,6 +12,21 @@ load(":python_common.bzl", "python_common") NativeLinkStrategy = ["separate", "merged"] +def _typing_arg(): + return { + "py_version_for_type_checking": attrs.option(attrs.string(), default = None, doc = """ + This option will force the type checker to perform checking under a specific version of Python interpreter. +"""), + "shard_typing": attrs.option(attrs.bool(), default = None, doc = """ + Determines if sharding should be enabled on a given target. +"""), + # NOTE(grievejia): Setting default to True here may have non-trivial impact on build memory + # usage (see S395002) + "typing": attrs.bool(default = False, doc = """ + Determines whether to perform type checking on the given target. Default is False. +"""), + } + cxx_python_extension = prelude_rule( name = "cxx_python_extension", docs = """ @@ -114,7 +129,7 @@ prebuilt_python_library = prelude_rule( name = "prebuilt_python_library", docs = """ A `prebuilt_python_library()` rule is used to include prebuilt python packages into the output of a - top-level `python_binary()`or `python_test()`rule. + top-level `python_binary()` or `python_test()` rule. These prebuilt libraries can either be [whl files](https://www.python.org/dev/peps/pep-0427/) or eggs @@ -179,9 +194,21 @@ prebuilt_python_library = prelude_rule( { "compile": attrs.bool(default = False), "contacts": attrs.list(attrs.string(), default = []), + "cxx_header_dirs": attrs.option(attrs.list(attrs.string()), default = None), + "infer_cxx_header_dirs": attrs.bool(default = False), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "ignore_compile_errors": attrs.bool(default = False), "licenses": attrs.list(attrs.source(), default = []), + "strip_soabi_tags": attrs.bool( + default = False, + doc = """ + Strip the SOABI tags from extensions in the prebuilt library. + + Note that this should be considered unsafe, as it removes builtin + protections that fail fast when a potententially incompatible + native extension is imported. + """, + ), } ), ) @@ -266,11 +293,14 @@ python_binary = prelude_rule( "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), "platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg(anon_target_compatible = True))), default = []), "platform_preload_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = False)), default = []), + "repl_only_deps": attrs.list(attrs.dep(), default = []), + "repl_main": attrs.option(attrs.string(), default = None), "prefer_stripped_native_objects": attrs.bool(default = False), "version_universe": attrs.option(attrs.string(), default = None), "zip_safe": attrs.option(attrs.bool(), default = None), } | - buck.allow_cache_upload_arg() + buck.allow_cache_upload_arg() | + _typing_arg() ), ) @@ -339,7 +369,8 @@ python_library = prelude_rule( "versioned_resources": attrs.option(attrs.versioned(attrs.named_set(attrs.source(), sorted = True)), default = None), "versioned_srcs": attrs.option(attrs.versioned(attrs.named_set(attrs.source(), sorted = True)), default = None), "zip_safe": attrs.option(attrs.bool(), default = None), - } + } | + _typing_arg() ), ) @@ -382,6 +413,7 @@ python_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | buck.labels_arg() | python_common.srcs_arg() | python_common.platform_srcs_arg() | @@ -442,6 +474,8 @@ python_test = prelude_rule( "platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), "platform_linker_flags": attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.arg(anon_target_compatible = True))), default = []), "platform_preload_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = False)), default = []), + "repl_only_deps": attrs.list(attrs.dep(), default = []), + "repl_main": attrs.option(attrs.string(), default = None), "prefer_stripped_native_objects": attrs.bool(default = False), "runner": attrs.option(attrs.dep(), default = None), "specs": attrs.option(attrs.arg(json = True), default = None), @@ -449,7 +483,8 @@ python_test = prelude_rule( "versioned_resources": attrs.option(attrs.versioned(attrs.named_set(attrs.source(), sorted = True)), default = None), "versioned_srcs": attrs.option(attrs.versioned(attrs.named_set(attrs.source(), sorted = True)), default = None), "zip_safe": attrs.option(attrs.bool(), default = None), - } + } | + _typing_arg() ), ) diff --git a/prelude/decls/re_test_common.bzl b/prelude/decls/re_test_common.bzl new file mode 100644 index 0000000000000..4a9e7240875b5 --- /dev/null +++ b/prelude/decls/re_test_common.bzl @@ -0,0 +1,68 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:build_mode.bzl", "BuildModeInfo") +load("@prelude//:is_full_meta_repo.bzl", "is_full_meta_repo") +load(":toolchains_common.bzl", "toolchains_common") + +def _opts_for_tests_arg() -> Attr: + # Attributes types do not have records. + # The expected shape of re_opts is: + # { + # "capabilities": Dict | None + # "listing_capabilities": Dict | None + # "local_listing_enabled": bool | None + # "local_enabled": bool | None + # "use_case": str | None + # "remote_cache_enabled": bool | None + # "dependencies": list> | [] + # "resource_units": int | None + # } + return attrs.dict( + key = attrs.string(), + value = attrs.option( + attrs.one_of( + attrs.dict( + key = attrs.string(), + value = attrs.string(), + sorted = False, + ), + attrs.string(), + attrs.bool(), + attrs.list(attrs.dict(key = attrs.string(), value = attrs.string()), default = []), + attrs.int(), + ), + # TODO(cjhopman): I think this default does nothing, it should be deleted + default = None, + ), + sorted = False, + ) + +def _action_key_provider_arg() -> Attr: + if is_full_meta_repo(): + default_build_mode = read_root_config("fb", "remote_execution_test_build_mode", "fbcode//buck2/platform/build_mode:build_mode") + return attrs.dep(providers = [BuildModeInfo], default = default_build_mode) + else: + return attrs.option(attrs.dep(providers = [BuildModeInfo]), default = None) + +def _test_args() -> dict[str, Attr]: + return { + "remote_execution": attrs.option( + attrs.one_of( + attrs.string(), + _opts_for_tests_arg(), + ), + default = None, + ), + "remote_execution_action_key_providers": _action_key_provider_arg(), + "_remote_test_execution_toolchain": toolchains_common.remote_test_execution(), + } + +re_test_common = struct( + test_args = _test_args, + opts_for_tests_arg = _opts_for_tests_arg, +) diff --git a/prelude/decls/rust_common.bzl b/prelude/decls/rust_common.bzl index d5af7762593cd..36602720257fb 100644 --- a/prelude/decls/rust_common.bzl +++ b/prelude/decls/rust_common.bzl @@ -70,6 +70,18 @@ def _linker_flags_arg(): """), } +def _exported_linker_flags_arg(): + return { + "exported_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = [], doc = """ + A set of additional flag to pass before this item on the link line, even if + this items is compiled to a DSO. +"""), + "exported_post_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = [], doc = """ + A set of additional flag to pass after this item on the link line, even if + this items is compiled to a DSO. +"""), + } + def _crate(crate_type): return { "crate": crate_type, @@ -83,6 +95,15 @@ def _crate_root(): """), } +def _default_roots_arg(): + return { + "default_roots": attrs.option(attrs.list(attrs.string()), default = None, doc = """ + Set the candidate source names to consider for crate root. Typically used to disambiguate between + lib.rs or main.rs for rust_test, which may be declare a test suite for either library or binary + rules. Has no effect if an explicit `crate_root` is provided. +"""), + } + def _env_arg(): return { "env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}, doc = """ @@ -91,6 +112,23 @@ def _env_arg(): """), } +def _run_env_arg(): + return { + "run_env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}, doc = """ + Set environment variables during test execution. The environment variable values may + include macros which are expanded. +"""), + } + +def _build_and_run_env_arg(): + # Same as env_arg(), but with different documentation. + return { + "env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}, doc = """ + Set environment variables for this rule's invocations of rustc *and* during execution of + the tests. The environment variable values may include macros which are expanded. +"""), + } + def _mapped_srcs_arg(): return { "mapped_srcs": attrs.dict(key = attrs.source(), value = attrs.string(), sorted = False, default = {}, doc = """ @@ -106,7 +144,7 @@ def _mapped_srcs_arg(): def _named_deps_arg(is_binary: bool): return { - "named_deps": attrs.dict(key = attrs.string(), value = rust_target_dep(is_binary), sorted = False, default = {}, doc = """ + "named_deps": attrs.one_of(attrs.dict(key = attrs.string(), value = rust_target_dep(is_binary), sorted = False), attrs.list(attrs.tuple(attrs.arg(), rust_target_dep(is_binary))), default = {}, doc = """ Add crate dependencies and define a local name by which to use that dependency by. This allows a crate to have multiple dependencies with the same crate name. For example: `named_deps = {"local_name", ":some_rust_crate" }`. @@ -115,12 +153,16 @@ def _named_deps_arg(is_binary: bool): """), } -def _toolchains_args(): +def _rust_toolchain_arg(): return { - "_cxx_toolchain": toolchains_common.cxx(), "_rust_toolchain": toolchains_common.rust(), } +def _cxx_toolchain_arg(): + return { + "_cxx_toolchain": toolchains_common.cxx(), + } + def _workspaces_arg(): return { "_workspaces": attrs.list(attrs.label(), default = [], doc = """ @@ -136,11 +178,16 @@ rust_common = struct( edition_arg = _edition_arg, rustc_flags_arg = _rustc_flags_arg, linker_flags_arg = _linker_flags_arg, + exported_linker_flags_arg = _exported_linker_flags_arg, crate = _crate, crate_root = _crate_root, + default_roots_arg = _default_roots_arg, env_arg = _env_arg, + run_env_arg = _run_env_arg, + build_and_run_env_arg = _build_and_run_env_arg, mapped_srcs_arg = _mapped_srcs_arg, named_deps_arg = _named_deps_arg, - toolchains_args = _toolchains_args, + rust_toolchain_arg = _rust_toolchain_arg, + cxx_toolchain_arg = _cxx_toolchain_arg, workspaces_arg = _workspaces_arg, ) diff --git a/prelude/decls/rust_rules.bzl b/prelude/decls/rust_rules.bzl index 9a79baad7c5ba..b60d97487664f 100644 --- a/prelude/decls/rust_rules.bzl +++ b/prelude/decls/rust_rules.bzl @@ -5,68 +5,20 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx/user:link_group_map.bzl", "link_group_map_attr") +load("@prelude//cxx:link_groups_types.bzl", "LINK_GROUP_MAP_ATTR") +load("@prelude//linking:types.bzl", "Linkage") +load("@prelude//rust:clippy_configuration.bzl", "ClippyConfiguration") load("@prelude//rust:link_info.bzl", "RustProcMacroPlugin") load("@prelude//rust:rust_binary.bzl", "rust_binary_impl", "rust_test_impl") -load("@prelude//rust:rust_library.bzl", "prebuilt_rust_library_impl", "rust_library_impl") -load(":common.bzl", "LinkableDepType", "Linkage", "buck", "prelude_rule") +load("@prelude//rust:rust_library.bzl", "rust_library_impl") +load(":common.bzl", "buck", "prelude_rule") load(":native_common.bzl", "native_common") +load(":re_test_common.bzl", "re_test_common") load(":rust_common.bzl", "rust_common", "rust_target_dep") -prebuilt_rust_library = prelude_rule( - name = "prebuilt_rust_library", - impl = prebuilt_rust_library_impl, - docs = """ - A prebuilt\\_rust\\_library() specifies a pre-built Rust crate, and any dependencies - it may have on other crates (typically also prebuilt). - - - Note: Buck is currently tested with (and therefore supports) version 1.32.0 of Rust. - """, - examples = """ - ``` - - prebuilt_rust_library( - name = 'dailygreet', - rlib = 'libdailygreet.rlib', - deps = [ - ':jinsy', - ], - ) - - prebuilt_rust_library( - name = 'jinsy', - rlib = 'libarbiter-6337e9cb899bd295.rlib', - ) - - ``` - """, - further = None, - attrs = ( - # @unsorted-dict-items - { - "rlib": attrs.source(doc = """ - Path to the precompiled Rust crate - typically of the form 'libfoo.rlib', or - 'libfoo-abc123def456.rlib' if it has symbol versioning metadata. - """), - } | - rust_common.crate(crate_type = attrs.string(default = "")) | - rust_common.deps_arg(is_binary = False) | - { - "contacts": attrs.list(attrs.string(), default = []), - "default_host_platform": attrs.option(attrs.configuration_label(), default = None), - "labels": attrs.list(attrs.string(), default = []), - "licenses": attrs.list(attrs.source(), default = []), - "link_style": attrs.option(attrs.enum(LinkableDepType), default = None), - "proc_macro": attrs.bool(default = False), - } | - rust_common.toolchains_args() - ), - uses_plugins = [RustProcMacroPlugin], -) - def _rust_common_attributes(is_binary: bool): return { + "clippy_configuration": attrs.option(attrs.dep(providers = [ClippyConfiguration]), default = None), "contacts": attrs.list(attrs.string(), default = []), "coverage": attrs.bool(default = False), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), @@ -87,7 +39,6 @@ def _rust_binary_attrs_group(prefix: str) -> dict[str, Attr]: attrs = (rust_common.deps_arg(is_binary = True) | rust_common.named_deps_arg(is_binary = True) | rust_common.linker_flags_arg() | - rust_common.env_arg() | native_common.link_style()) return {prefix + name: v for name, v in attrs.items()} @@ -98,8 +49,9 @@ _RUST_EXECUTABLE_ATTRIBUTES = { "auto_link_groups": attrs.bool(default = True), # TODO: enable distributed thinlto "enable_distributed_thinlto": attrs.bool(default = False), - "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": link_group_map_attr(), + # Required by the rules but not supported, since Rust is auto-link groups only + "link_group": attrs.default_only(attrs.option(attrs.string(), default = None)), + "link_group_map": LINK_GROUP_MAP_ATTR, "link_group_min_binary_node_count": attrs.option(attrs.int(), default = None), "rpath": attrs.bool(default = False, doc = """ Set the "rpath" in the executable when using a shared link style. @@ -117,9 +69,7 @@ rust_binary = prelude_rule( If you invoke a build with the `check` flavor, then Buck will invoke rustc to check the code (typecheck, produce warnings, etc), but won't generate an executable code. When applied to binaries it produces no output; for libraries it produces metadata for - consumers of the library. When building with `check`, extra compiler flags from - the `rust.rustc_check_flags` are added to the compiler's command line options, - to allow for extra warnings, etc. + consumers of the library. Note: Buck is currently tested with (and therefore supports) version 1.32.0 of Rust. @@ -169,10 +119,12 @@ rust_binary = prelude_rule( rust_common.rustc_flags_arg() | rust_common.crate(crate_type = attrs.option(attrs.string(), default = None)) | rust_common.crate_root() | + rust_common.env_arg() | _rust_binary_attrs_group(prefix = "") | _rust_common_attributes(is_binary = True) | _RUST_EXECUTABLE_ATTRIBUTES | - rust_common.toolchains_args() | + rust_common.cxx_toolchain_arg() | + rust_common.rust_toolchain_arg() | rust_common.workspaces_arg() | buck.allow_cache_upload_arg() ), @@ -190,9 +142,7 @@ rust_library = prelude_rule( If you invoke a build with the `check` flavor, then Buck will invoke rustc to check the code (typecheck, produce warnings, etc), but won't generate an executable code. When applied to binaries it produces no output; for libraries it produces metadata for - consumers of the library. When building with `check`, extra compiler flags from - the `rust.rustc_check_flags` are added to the compiler's command line options, - to allow for extra warnings, etc. + consumers of the library. Note: Buck is currently tested with (and therefore supports) version 1.32.0 of Rust. @@ -232,19 +182,24 @@ rust_library = prelude_rule( # don't have to know whether we're building a rust_binary or a # rust_library. rust_common.linker_flags_arg() | + rust_common.exported_linker_flags_arg() | rust_common.env_arg() | rust_common.crate(crate_type = attrs.option(attrs.string(), default = None)) | rust_common.crate_root() | - native_common.preferred_linkage(preferred_linkage_type = attrs.enum(Linkage, default = "any")) | + native_common.preferred_linkage(preferred_linkage_type = attrs.enum(Linkage.values(), default = "any")) | + native_common.soname() | + native_common.link_style() | _rust_common_attributes(is_binary = False) | { "crate_dynamic": attrs.option(attrs.dep(), default = None), + "doc_env": rust_common.env_arg()["env"], "doctests": attrs.option(attrs.bool(), default = None), "proc_macro": attrs.bool(default = False), "supports_python_dlopen": attrs.option(attrs.bool(), default = None), } | _rust_binary_attrs_group(prefix = "doc_") | - rust_common.toolchains_args() | + rust_common.cxx_toolchain_arg() | + rust_common.rust_toolchain_arg() | rust_common.workspaces_arg() ), uses_plugins = [RustProcMacroPlugin], @@ -298,6 +253,7 @@ rust_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | rust_common.srcs_arg() | rust_common.mapped_srcs_arg() | rust_common.edition_arg() | @@ -305,6 +261,9 @@ rust_test = prelude_rule( rust_common.rustc_flags_arg() | rust_common.crate(crate_type = attrs.option(attrs.string(), default = None)) | rust_common.crate_root() | + rust_common.default_roots_arg() | + rust_common.run_env_arg() | + rust_common.build_and_run_env_arg() | _rust_binary_attrs_group(prefix = "") | _rust_common_attributes(is_binary = True) | _RUST_EXECUTABLE_ATTRIBUTES | @@ -315,15 +274,15 @@ rust_test = prelude_rule( same command-line parameters and produce the same output as the test framework. """), } | - buck.re_test_args() | - rust_common.toolchains_args() | + re_test_common.test_args() | + rust_common.cxx_toolchain_arg() | + rust_common.rust_toolchain_arg() | rust_common.workspaces_arg() ), uses_plugins = [RustProcMacroPlugin], ) rust_rules = struct( - prebuilt_rust_library = prebuilt_rust_library, rust_binary = rust_binary, rust_library = rust_library, rust_test = rust_test, diff --git a/prelude/decls/scala_rules.bzl b/prelude/decls/scala_rules.bzl index 093ac8e5a2c02..e4482dad03020 100644 --- a/prelude/decls/scala_rules.bzl +++ b/prelude/decls/scala_rules.bzl @@ -6,6 +6,7 @@ # of this source tree. load(":common.bzl", "AbiGenerationMode", "ForkMode", "LogLevel", "SourceAbiVerificationMode", "TestType", "UnusedDependenciesAction", "prelude_rule") +load(":jvm_common.bzl", "jvm_common") scala_library = prelude_rule( name = "scala_library", @@ -26,14 +27,12 @@ scala_library = prelude_rule( "exported_provided_deps": attrs.list(attrs.dep(), default = []), "extra_arguments": attrs.list(attrs.string(), default = []), "java_version": attrs.option(attrs.string(), default = None), - "javac": attrs.option(attrs.source(), default = None), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "provided_deps": attrs.list(attrs.dep(), default = []), "remove_classes": attrs.list(attrs.regex(), default = []), @@ -46,7 +45,7 @@ scala_library = prelude_rule( "source_only_abi_deps": attrs.list(attrs.dep(), default = []), "srcs": attrs.list(attrs.source(), default = []), "target": attrs.option(attrs.string(), default = None), - } + } | jvm_common.plugins() | jvm_common.javac() ), ) @@ -74,14 +73,12 @@ scala_test = prelude_rule( "extra_arguments": attrs.list(attrs.string(), default = []), "fork_mode": attrs.enum(ForkMode, default = "none"), "java_version": attrs.option(attrs.string(), default = None), - "javac": attrs.option(attrs.source(), default = None), "labels": attrs.list(attrs.string(), default = []), "licenses": attrs.list(attrs.source(), default = []), "manifest_file": attrs.option(attrs.source(), default = None), "maven_coords": attrs.option(attrs.string(), default = None), "never_mark_as_unused_dependency": attrs.option(attrs.bool(), default = None), "on_unused_dependencies": attrs.option(attrs.enum(UnusedDependenciesAction), default = None), - "plugins": attrs.list(attrs.dep(), default = []), "proguard_config": attrs.option(attrs.source(), default = None), "provided_deps": attrs.list(attrs.dep(), default = []), "remove_classes": attrs.list(attrs.regex(), default = []), @@ -103,7 +100,7 @@ scala_test = prelude_rule( "use_cxx_libraries": attrs.option(attrs.bool(), default = None), "use_dependency_order_classpath": attrs.option(attrs.bool(), default = None), "vm_args": attrs.list(attrs.arg(), default = []), - } + } | jvm_common.plugins() | jvm_common.javac() ), ) diff --git a/prelude/decls/shell_rules.bzl b/prelude/decls/shell_rules.bzl index 71485bcfcca2d..01d992975fea9 100644 --- a/prelude/decls/shell_rules.bzl +++ b/prelude/decls/shell_rules.bzl @@ -6,6 +6,7 @@ # of this source tree. load(":common.bzl", "buck", "prelude_rule") +load(":re_test_common.bzl", "re_test_common") sh_binary = prelude_rule( name = "sh_binary", @@ -75,6 +76,11 @@ sh_binary = prelude_rule( that contains all the resources, laid out in their locations relative to the original buck project root. """), + "append_script_extension": attrs.bool(default = True, doc = """ + By default, sh_binary ensures that the script has an appropriate extension (e.g. `.sh` or `.bat`), + appending one itself if necessary. Setting this to False prevents that behavior and makes the caller + responsible for ensuring an existing appropriate extension. + """), "contacts": attrs.list(attrs.string(), default = []), "default_host_platform": attrs.option(attrs.configuration_label(), default = None), "deps": attrs.list(attrs.dep(), default = []), @@ -154,6 +160,7 @@ sh_test = prelude_rule( further = None, attrs = ( # @unsorted-dict-items + buck.inject_test_env_arg() | { "test": attrs.option(attrs.one_of(attrs.dep(), attrs.source()), default = None, doc = """ Either the path to the script (relative to the build file), or a `build target`. @@ -186,7 +193,7 @@ sh_test = prelude_rule( "run_env": attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False, default = {}), "run_test_separately": attrs.bool(default = False), "test_rule_timeout_ms": attrs.option(attrs.int(), default = None), - } | buck.re_test_args() + } | re_test_common.test_args() ), ) diff --git a/prelude/decls/toolchains_common.bzl b/prelude/decls/toolchains_common.bzl index 08664ac2370d5..6cd2ad2c41502 100644 --- a/prelude/decls/toolchains_common.bzl +++ b/prelude/decls/toolchains_common.bzl @@ -7,9 +7,9 @@ load("@prelude//android:android_toolchain.bzl", "AndroidPlatformInfo", "AndroidToolchainInfo") load("@prelude//csharp:toolchain.bzl", "CSharpToolchainInfo") -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo", "CxxToolchainInfo") load("@prelude//go:toolchain.bzl", "GoToolchainInfo") -load("@prelude//haskell:haskell.bzl", "HaskellPlatformInfo", "HaskellToolchainInfo") +load("@prelude//go_bootstrap:go_bootstrap.bzl", "GoBootstrapToolchainInfo") +load("@prelude//haskell:toolchain.bzl", "HaskellPlatformInfo", "HaskellToolchainInfo") load("@prelude//java:dex_toolchain.bzl", "DexToolchainInfo") load( "@prelude//java:java_toolchain.bzl", @@ -25,12 +25,10 @@ load( load("@prelude//python:toolchain.bzl", "PythonPlatformInfo", "PythonToolchainInfo") load("@prelude//python_bootstrap:python_bootstrap.bzl", "PythonBootstrapToolchainInfo") load("@prelude//rust:rust_toolchain.bzl", "RustToolchainInfo") +load("@prelude//tests:remote_test_execution_toolchain.bzl", "RemoteTestExecutionToolchainInfo") load("@prelude//zip_file:zip_file_toolchain.bzl", "ZipFileToolchainInfo") def _toolchain(lang: str, providers: list[typing.Any]) -> Attr: - return attrs.default_only(attrs.toolchain_dep(default = "toolchains//:" + lang, providers = providers)) - -def _toolchain_with_override(lang: str, providers: list[typing.Any]) -> Attr: return attrs.toolchain_dep(default = "toolchains//:" + lang, providers = providers) def _android_toolchain(): @@ -40,19 +38,23 @@ def _csharp_toolchain(): return _toolchain("csharp", [CSharpToolchainInfo]) def _cxx_toolchain(): - return _toolchain("cxx", [CxxToolchainInfo, CxxPlatformInfo]) + # `CxxToolchainInfo, CxxPlatformInfo`, but python doesn't require it + return _toolchain("cxx", []) def _dex_toolchain(): - return _toolchain_with_override("dex", [DexToolchainInfo]) + return _toolchain("dex", [DexToolchainInfo]) def _go_toolchain(): return _toolchain("go", [GoToolchainInfo]) +def _go_bootstrap_toolchain(): + return _toolchain("go_bootstrap", [GoBootstrapToolchainInfo]) + def _haskell_toolchain(): return _toolchain("haskell", [HaskellToolchainInfo, HaskellPlatformInfo]) def _java_toolchain(): - return _toolchain_with_override("java", [JavaToolchainInfo, JavaPlatformInfo]) + return _toolchain("java", [JavaToolchainInfo, JavaPlatformInfo]) def _java_for_android_toolchain(): return _toolchain("java_for_android", [JavaToolchainInfo, JavaPlatformInfo]) @@ -64,11 +66,11 @@ def _java_test_toolchain(): return _toolchain("java_test", [JavaTestToolchainInfo]) def _kotlin_toolchain(): - return _toolchain_with_override("kotlin", [KotlinToolchainInfo]) + return _toolchain("kotlin", [KotlinToolchainInfo]) def _prebuilt_jar_toolchain(): # Override is allowed for bootstrapping prebuilt jar toolchains - return _toolchain_with_override("prebuilt_jar", [PrebuiltJarToolchainInfo]) + return _toolchain("prebuilt_jar", [PrebuiltJarToolchainInfo]) def _python_toolchain(): return _toolchain("python", [PythonToolchainInfo, PythonPlatformInfo]) @@ -82,12 +84,16 @@ def _rust_toolchain(): def _zip_file_toolchain(): return _toolchain("zip_file", [ZipFileToolchainInfo]) +def _remote_test_execution_toolchain(): + return _toolchain("remote_test_execution", [RemoteTestExecutionToolchainInfo]) + toolchains_common = struct( android = _android_toolchain, csharp = _csharp_toolchain, cxx = _cxx_toolchain, dex = _dex_toolchain, go = _go_toolchain, + go_bootstrap = _go_bootstrap_toolchain, haskell = _haskell_toolchain, java = _java_toolchain, java_for_android = _java_for_android_toolchain, @@ -99,4 +105,5 @@ toolchains_common = struct( python_bootstrap = _python_bootstrap_toolchain, rust = _rust_toolchain, zip_file = _zip_file_toolchain, + remote_test_execution = _remote_test_execution_toolchain, ) diff --git a/prelude/dist/dist_info.bzl b/prelude/dist/dist_info.bzl new file mode 100644 index 0000000000000..b44fbb1570247 --- /dev/null +++ b/prelude/dist/dist_info.bzl @@ -0,0 +1,51 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//linking:shared_libraries.bzl", "SharedLibrariesTSet") +load("@prelude//utils:arglike.bzl", "ArgLike") + +# DistInfo is a provider that indicates what other targets/artifacts might be +# necessary to ship alongside the current target as part of a package or other +# distribution mechanism, such as shared libraries, default configuration, +# graphical/media assets, etc. +DistInfo = provider(fields = { + # The collection of other artifacts that must be available when an + # executable is executed, such as resources. + # + # These files are typically a subset of `DefaultInfo.other_outputs` of the + # executable. `other_outputs` may contain other auxiliary helpful but + # nonessential runtime files, such as external debuginfo which one would + # need in order to run the executable in a debugger. + # + # When an executable is the end goal of a build (i.e. `buck2 build :main`) + # and `--materializations=none` has not been passed, then all of + # `other_outputs` is what gets materialized. For convenience to developers, + # we choose to make this include external debuginfo, rather than requiring + # them to build something like `buck2 build :main :main[debuginfo]` in order + # to have an executable that is debuggable locally. + # + # In contrast, `nondebug_runtime_files` are the things required always in + # any context where an executable runs. For example when this executable is + # the exe in a genrule -- there would be no point materializing its external + # debuginfo in that situation because no debugger is involved. + # + # There are yet other contexts where not even `nondebug_runtime_files` would + # need to be materialized. For example a rule that depends on an executable + # in order to compute a checksum or signature for it. As the executable does + # not run during that action, runtime files would not be needed. As such, + # bundling `nondebug_runtime_files` into an artifact group with the + # executable artifact itself, using `artifact.with_associated_artifacts`, is + # not a universally suitable alternative to nondebug runtime files. + # + # Unlike `ResourceInfo` this provider does not attempt to identify the label + # from which each of these artifacts originates. This is just the projected + # set of all the files for materialization purposes. + "nondebug_runtime_files": provider_field(list[ArgLike]), + + # Transitive shared library dependencies. + "shared_libs": provider_field(SharedLibrariesTSet | None, default = None), +}) diff --git a/prelude/erlang/applications/BUCK b/prelude/erlang/applications/BUCK deleted file mode 100644 index 1148a21894c6c..0000000000000 --- a/prelude/erlang/applications/BUCK +++ /dev/null @@ -1,3 +0,0 @@ -load("@prelude//erlang:erlang_otp_application.bzl", "gen_otp_applications") - -gen_otp_applications() diff --git a/prelude/erlang/applications/BUCK.v2 b/prelude/erlang/applications/BUCK.v2 new file mode 100644 index 0000000000000..b476ddc2592ab --- /dev/null +++ b/prelude/erlang/applications/BUCK.v2 @@ -0,0 +1,8 @@ +load("@prelude//erlang:erlang_otp_application.bzl", "gen_otp_applications") +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +gen_otp_applications() diff --git a/prelude/erlang/common_test/.elp.toml b/prelude/erlang/common_test/.elp.toml index a87913c0aa7d5..81c33a6a09a7d 100644 --- a/prelude/erlang/common_test/.elp.toml +++ b/prelude/erlang/common_test/.elp.toml @@ -1,4 +1,4 @@ [buck] -enabled = true build_deps = false -included_targets = ["//erlang/common_test/..."] +enabled = true +included_targets = ["prelude//erlang/common_test/..."] diff --git a/prelude/erlang/common_test/common/BUCK b/prelude/erlang/common_test/common/BUCK deleted file mode 100644 index 7bd533f7a364f..0000000000000 --- a/prelude/erlang/common_test/common/BUCK +++ /dev/null @@ -1,19 +0,0 @@ -erlang_application( - name = "common", - srcs = glob([ - "src/*.erl", - "src/*.hrl", - ]), - applications = [ - "kernel", - "stdlib", - ], - erl_opts = [ - "+debug_info", - "+warnings_as_errors", - ], - includes = glob(["include/*.hrl"]), - shell_libs = [], - use_global_parse_transforms = False, - visibility = ["PUBLIC"], -) diff --git a/prelude/erlang/common_test/common/BUCK.v2 b/prelude/erlang/common_test/common/BUCK.v2 new file mode 100644 index 0000000000000..fbd20f2c1a266 --- /dev/null +++ b/prelude/erlang/common_test/common/BUCK.v2 @@ -0,0 +1,25 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +erlang_application( + name = "common", + srcs = glob([ + "src/*.erl", + "src/*.hrl", + ]), + applications = [ + "kernel", + "stdlib", + ], + erl_opts = [ + "+debug_info", + "+warnings_as_errors", + ], + includes = glob(["include/*.hrl"]), + shell_libs = [], + use_global_parse_transforms = False, + visibility = ["PUBLIC"], +) diff --git a/prelude/erlang/common_test/common/include/buck_ct_records.hrl b/prelude/erlang/common_test/common/include/buck_ct_records.hrl index f73aa93547e34..b9dcf9ec1a7c4 100644 --- a/prelude/erlang/common_test/common/include/buck_ct_records.hrl +++ b/prelude/erlang/common_test/common/include/buck_ct_records.hrl @@ -8,12 +8,14 @@ %% % @format -record(test_info, { - dependencies :: [string()], - test_suite :: string(), - config_files :: [string()], + dependencies :: [file:filename()], + test_suite :: binary(), + config_files :: [binary()], providers :: [{atom(), [term()]}], ct_opts :: [term()], - erl_cmd :: string(), + erl_cmd :: [binary()], + extra_flags :: [string()], + common_app_env :: #{string() => string()}, artifact_annotation_mfa :: artifact_annotations:annotation_function() }). @@ -28,7 +30,9 @@ config_files :: [file:filename_all()], providers :: [{module(), [term()]}], ct_opts :: [term()], - erl_cmd :: string(), + common_app_env :: #{string() => string()}, + erl_cmd :: [binary()], + extra_flags :: [string()], artifact_annotation_mfa :: artifact_annotations:annotation_function() }). diff --git a/prelude/erlang/common_test/common/src/artifact_annotations.erl b/prelude/erlang/common_test/common/src/artifact_annotations.erl index 1b823c7cdea81..2bf533bfc9db4 100644 --- a/prelude/erlang/common_test/common/src/artifact_annotations.erl +++ b/prelude/erlang/common_test/common/src/artifact_annotations.erl @@ -37,7 +37,7 @@ -export_type([annotation_function/0]). -spec serialize(test_result_artifact_annotations()) -> binary(). -serialize(ArtifactAnnotation) -> jsone:encode(ArtifactAnnotation). +serialize(ArtifactAnnotation) -> json:encode(ArtifactAnnotation). -spec create_artifact_annotation(file:filename(), #test_env{}) -> test_result_artifact_annotations(). create_artifact_annotation(FileName, TestEnv) -> diff --git a/prelude/erlang/common_test/common/src/buck_ct_parser.erl b/prelude/erlang/common_test/common/src/buck_ct_parser.erl index ddf6690b73b0f..b0488dce499dd 100644 --- a/prelude/erlang/common_test/common/src/buck_ct_parser.erl +++ b/prelude/erlang/common_test/common/src/buck_ct_parser.erl @@ -18,10 +18,23 @@ %% Public API -export([parse_str/1]). --spec parse_str(string()) -> term(). -parse_str("") -> +-spec parse_str(binary()) -> term(). +parse_str(<<"">>) -> []; parse_str(StrArgs) -> - {ok, Tokens, _} = erl_scan:string(StrArgs ++ "."), - {ok, Term} = erl_parse:parse_term(Tokens), - Term. + try + {ok, Tokens, _} = erl_scan:string(unicode:characters_to_list([StrArgs, "."])), + erl_parse:parse_term(Tokens) + of + {ok, Term} -> + Term; + {error, Reason} -> + error(lists:flatten(io_lib:format("Error parsing StrArgs ~p, Reason: ~p", [StrArgs, Reason]))) + catch + E:R:S -> + error( + lists:flatten( + io_lib:format("Error parsing StrArgs ~p, error ~ts", [StrArgs, erl_error:format_exception(E, R, S)]) + ) + ) + end. diff --git a/prelude/erlang/common_test/common/src/buck_ct_provider.erl b/prelude/erlang/common_test/common/src/buck_ct_provider.erl index 2d04eb4dd5576..3807f11230410 100644 --- a/prelude/erlang/common_test/common/src/buck_ct_provider.erl +++ b/prelude/erlang/common_test/common/src/buck_ct_provider.erl @@ -115,7 +115,7 @@ execute_method_on_provider(Method, ProviderName, ProviderState, Args) -> {error, Reason} -> ErrorMsg = unicode:characters_to_list( io_lib:format( - "Method ~p on provider ~p with sate ~p ~n returned with error ~p ~n", [ + "Method ~p on provider ~p with state ~p ~n returned with error ~p ~n", [ Method, ProviderName, ProviderState, Reason ] ) @@ -138,7 +138,7 @@ execute_method_on_provider(Method, ProviderName, ProviderState, Args) -> catch Class:Reason:StackTrace -> ErrorMsg = unicode:characters_to_list( - io_lib:format("Method ~p on provider ~p with sate ~p ~n ~s ~n", [ + io_lib:format("Method ~p on provider ~p with state ~p ~n ~s ~n", [ Method, ProviderName, ProviderState, diff --git a/prelude/erlang/common_test/common/src/ct_error_printer.erl b/prelude/erlang/common_test/common/src/ct_error_printer.erl index 52cf17dbee787..2bfe18251e58e 100644 --- a/prelude/erlang/common_test/common/src/ct_error_printer.erl +++ b/prelude/erlang/common_test/common/src/ct_error_printer.erl @@ -50,6 +50,7 @@ format_error(ErrType, Reason, FormatStackTrace) -> -spec format_reason(term()) -> {ok, unicode:chardata()}. format_reason(Reason) -> + blame_reason(Reason), lists:foldl( fun (_Formatter, Acc = {ok, _Formatted}) -> Acc; @@ -63,6 +64,22 @@ format_reason(Reason) -> ] ). +-spec blame_reason(term()) -> ok. +blame_reason({Reason, StackTrace}) -> + try + case application:get_env(common, exception_blame) of + undefined -> + ok; + {ok, Blame} when is_atom(Blame) -> + Blame:format_blame(Reason, StackTrace) + end + catch + C:E:S -> + io:format("Error: ~ts", [erl_error:format_exception(C, E, S)]) + end; +blame_reason(_Reason) -> + ok. + -spec maybe_custom_format(term()) -> unrecognized_error | {ok, [unicode:chardata()]}. maybe_custom_format({{Type, Props}, StackTrace}) when is_atom(Type), is_list(Props) -> try proplists:to_map(Props) of @@ -81,7 +98,7 @@ maybe_custom_format({{Type, Props = #{formatter := Formatter}}, _StackTrace}) wh "~s~n", [erl_error:format_exception(E, R, ST)] ), - io_lib:format("original assertion: ~n" "~p~n", {Type, Props}) + io_lib:format("original assertion: ~n" "~p~n", [{Type, Props}]) ]} end; maybe_custom_format(_Reason) -> diff --git a/prelude/erlang/common_test/common/src/execution_logs.erl b/prelude/erlang/common_test/common/src/execution_logs.erl deleted file mode 100644 index c809394ad875b..0000000000000 --- a/prelude/erlang/common_test/common/src/execution_logs.erl +++ /dev/null @@ -1,154 +0,0 @@ -%% Copyright (c) Meta Platforms, Inc. and affiliates. -%% -%% This source code is licensed under both the MIT license found in the -%% LICENSE-MIT file in the root directory of this source tree and the Apache -%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory -%% of this source tree. - -%%%------------------------------------------------------------------- -%%% @doc -%%% Search in the execution directory produced by buck2 test -%%% for relevant logs to display to the user. -%%% Link them into a temporary directory, and produce a json output -%%% that lists them. -%%% @end -%%% % @format - --module(execution_logs). - --compile(warn_missing_spec). - -%% Public API --export([create_dir_summary/1]). - --type key() :: - buck2_exec_dir | log_private | suite_html | scuba_link | test_log_json | ct_log | ct_stdout. - --type key_entry() :: {key(), string()} | not_found. - --spec create_dir_summary(file:filename()) -> #{atom() => binary()}. -create_dir_summary(ExecDir) -> - TempDir = create_temp_directory(), - Funcs = [ - fun add_test_log/2, - fun add_test_log_json/2, - fun add_suite_html/2, - fun add_log_private/2, - fun add_exec_dir/2, - fun add_ct_log/2, - fun add_ct_stdout/2 - ], - lists:foldl( - fun(Func, Map) -> - case Func(TempDir, ExecDir) of - not_found -> Map; - {Key, Path} -> Map#{Key => list_to_binary(Path)} - end - end, - #{}, - Funcs - ). - --spec add_ct_log(file:filename(), file:filename()) -> key_entry(). -add_ct_log(TempDir, ExecDir) -> - case find_pattern(ExecDir, "ct_executor.log", file) of - {error, _} -> - not_found; - TestLogJson -> - file:make_symlink(TestLogJson, filename:join(TempDir, "ct.log")), - {ct_log, filename:join(TempDir, "ct.log")} - end. - --spec add_ct_stdout(file:filename(), file:filename()) -> key_entry(). -add_ct_stdout(TempDir, ExecDir) -> - case find_pattern(ExecDir, "ct_executor.stdout.txt", file) of - {error, _} -> - not_found; - TestLogJson -> - file:make_symlink(TestLogJson, filename:join(TempDir, "ct.stdout")), - {ct_stdout, filename:join(TempDir, "ct.stdout")} - end. - --spec add_test_log(file:filename(), file:filename()) -> key_entry(). -add_test_log(TempDir, ExecDir) -> - case find_pattern(ExecDir, "**/test.log", file) of - {error, _} -> - not_found; - TestLogJson -> - file:make_symlink(TestLogJson, filename:join(TempDir, "test.log")), - {test_log, filename:join(TempDir, "test.log")} - end. - --spec add_test_log_json(file:filename(), file:filename()) -> key_entry(). -add_test_log_json(TempDir, ExecDir) -> - case find_pattern(ExecDir, "**/test.log.json", file) of - {error, _} -> - not_found; - TestLogJson -> - file:make_symlink(TestLogJson, filename:join(TempDir, "test.log.json")), - {test_log_json, filename:join(TempDir, "test.log.json")} - end. - --spec add_suite_html(file:filename(), file:filename()) -> key_entry(). -add_suite_html(TempDir, ExecDir) -> - case find_pattern(ExecDir, "**/suite.log.html", file) of - {error, _} -> - not_found; - SuiteHtml -> - file:make_symlink(filename:dirname(SuiteHtml), filename:join(TempDir, "htmls")), - {suite_html, filename:join([TempDir, "htmls", "suite.log.html"])} - end. - --spec add_log_private(file:filename(), file:filename()) -> key_entry(). -add_log_private(TempDir, ExecDir) -> - case find_pattern(ExecDir, "**/log_private", folder) of - {error, _} -> - not_found; - LogPrivate -> - file:make_symlink(LogPrivate, filename:join(TempDir, "log_private")), - {log_private, filename:join(TempDir, "log_private")} - end. - --spec add_exec_dir(file:filename(), file:filename()) -> key_entry(). -add_exec_dir(TempDir, ExecDir) -> - file:make_symlink(ExecDir, filename:join(TempDir, "exec_dir")), - {buck2_exec_dir, filename:join(TempDir, "exec_dir")}. - --spec create_temp_directory() -> file:filename(). -create_temp_directory() -> - RootTmpDir = - case os:getenv("TEMPDIR") of - false -> - NewTmpDir = os:cmd("mktemp"), - filename:dirname(NewTmpDir); - Dir -> - Dir - end, - {_, _, Micro} = TS = os:timestamp(), - {{_Year, Month, Day}, {Hour, Minute, Second}} = calendar:now_to_universal_time(TS), - DateTime = unicode:characters_to_list( - io_lib:format("~2..0w.~2..0wT~2..0w.~2..0w.~2..0w.~w", [ - Month, Day, Hour, Minute, Second, Micro - ]) - ), - is_list(DateTime) orelse error(uncode_format_failed, DateTime), - TmpDir = filename:join([RootTmpDir, "buck2_test_logs", DateTime]), - filelib:ensure_path(TmpDir), - TmpDir. - --spec find_pattern(file:filename(), file:filename(), file | folder) -> - {error, not_found} | file:filename(). -find_pattern(ExecDir, Pattern, FolderOrFile) -> - Func = - case FolderOrFile of - folder -> fun filelib:is_dir/1; - file -> fun filelib:is_regular/1 - end, - Candidates = [ - Path - || Path <- filelib:wildcard(filename:join(ExecDir, Pattern)), Func(Path) - ], - case Candidates of - [] -> {error, not_found}; - [LogPrivate | _Tail] -> LogPrivate - end. diff --git a/prelude/erlang/common_test/cth_hooks/BUCK b/prelude/erlang/common_test/cth_hooks/BUCK deleted file mode 100644 index 631e312d51a0e..0000000000000 --- a/prelude/erlang/common_test/cth_hooks/BUCK +++ /dev/null @@ -1,30 +0,0 @@ -erlang_application( - name = "cth_hooks", - srcs = glob([ - "src/*.erl", - "src/*.hrl", - ]), - applications = [ - "kernel", - "stdlib", - "common_test", - ], - erl_opts = [ - "+debug_info", - "+warnings_as_errors", - ], - shell_libs = [], - use_global_parse_transforms = False, - visibility = ["PUBLIC"], -) - -erlang_application( - name = "compiled_suites", - srcs = glob(["tests/cth_tpx_SUITE_data/*.erl"]), - applications = ["stdlib"], - erl_opts = [ - "+debug_info", - "+warnings_as_errors", - ], - visibility = ["PUBLIC"], -) diff --git a/prelude/erlang/common_test/cth_hooks/BUCK.v2 b/prelude/erlang/common_test/cth_hooks/BUCK.v2 new file mode 100644 index 0000000000000..cca3d1d4d4469 --- /dev/null +++ b/prelude/erlang/common_test/cth_hooks/BUCK.v2 @@ -0,0 +1,36 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +erlang_application( + name = "cth_hooks", + srcs = glob([ + "src/*.erl", + "src/*.hrl", + ]), + applications = [ + "kernel", + "stdlib", + "common_test", + ], + erl_opts = [ + "+debug_info", + "+warnings_as_errors", + ], + shell_libs = [], + use_global_parse_transforms = False, + visibility = ["PUBLIC"], +) + +erlang_application( + name = "compiled_suites", + srcs = glob(["tests/cth_tpx_SUITE_data/*.erl"]), + applications = ["stdlib"], + erl_opts = [ + "+debug_info", + "+warnings_as_errors", + ], + visibility = ["PUBLIC"], +) diff --git a/prelude/erlang/common_test/cth_hooks/src/cth_tpx.erl b/prelude/erlang/common_test/cth_hooks/src/cth_tpx.erl index 72ee0a2b40e77..ce4479c5908c3 100644 --- a/prelude/erlang/common_test/cth_hooks/src/cth_tpx.erl +++ b/prelude/erlang/common_test/cth_hooks/src/cth_tpx.erl @@ -105,7 +105,7 @@ fmt_stack(Suite, CasePat, CaseArgs, {_Class, {Reason, ST}}, Label) -> fmt_stack(Suite, CasePat, CaseArgs, {Reason, ST}, Label); fmt_stack(_Suite, _CasePat, _CaseArgs, Reason, _Label) -> Output = ct_error_printer:format_error(Reason, true), - unicode:characters_to_list(io_lib:format("~s", [Output])). + unicode:characters_to_list(io_lib:format("~ts", [Output])). %% ----------------------------------------------------------------------------- %% CT hooks functions @@ -489,7 +489,7 @@ add_result( case Truncated of true -> StdOutLocation = - case os:getenv("SANDCASTLE") of + case is_running_in_sandcastle() of true -> "tab Diagnostics: Artifacts/ct_executor.stdout.txt"; _ -> @@ -497,13 +497,15 @@ add_result( filename:dirname(OutputFile), "ct_executor.stdout.txt" ) end, - Io ++ + [ io_lib:format( - "\n The std_out has been truncated, see ~s for the full suite std_out.", + "The stdout logs have been truncated, see ~s for the full suite stdout. Showing tail below\n", [ StdOutLocation ] - ); + ) + | Io + ]; false -> Io end @@ -672,3 +674,15 @@ modify_shared_state(HookState, Caller, Action) -> {ok, Action(State)} end), NewHookState. + +-spec is_running_in_sandcastle() -> boolean(). +is_running_in_sandcastle() -> + case os:getenv("SANDCASTLE_DIFF_ID") of + [$D | _] -> + true; + _ -> + case os:getenv("SANDCASTLE") of + false -> false; + _ -> true + end + end. diff --git a/prelude/erlang/common_test/cth_hooks/src/cth_tpx_role.erl b/prelude/erlang/common_test/cth_hooks/src/cth_tpx_role.erl index fadc41f38496f..43915817df9bd 100644 --- a/prelude/erlang/common_test/cth_hooks/src/cth_tpx_role.erl +++ b/prelude/erlang/common_test/cth_hooks/src/cth_tpx_role.erl @@ -1,3 +1,10 @@ +%%% Copyright (c) Meta Platforms, Inc. and affiliates. +%%% +%%% This source code is licensed under both the MIT license found in the +%%% LICENSE-MIT file in the root directory of this source tree and the Apache +%%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory +%%% of this source tree. + -module(cth_tpx_role). % -------- What are cth_tpx roles?? --------------- diff --git a/prelude/erlang/common_test/cth_hooks/src/cth_tpx_server.erl b/prelude/erlang/common_test/cth_hooks/src/cth_tpx_server.erl index f719e0807cbf7..90db84fb1fc5f 100644 --- a/prelude/erlang/common_test/cth_hooks/src/cth_tpx_server.erl +++ b/prelude/erlang/common_test/cth_hooks/src/cth_tpx_server.erl @@ -1,3 +1,10 @@ +%%% Copyright (c) Meta Platforms, Inc. and affiliates. +%%% +%%% This source code is licensed under both the MIT license found in the +%%% LICENSE-MIT file in the root directory of this source tree and the Apache +%%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory +%%% of this source tree. + -module(cth_tpx_server). -behaviour(gen_server). @@ -31,11 +38,11 @@ start_link(InitialState) -> -spec get(Handle :: handle()) -> CurrentState :: term(). get(Handle) -> - gen_server:call(Handle, get). + gen_server:call(Handle, get, 6000). -spec modify(Handle :: handle(), Fun :: fun((State) -> {A, State})) -> A. modify(Handle, Fun) -> - gen_server:call(Handle, {modify, Fun}). + gen_server:call(Handle, {modify, Fun}, 6000). %% ---- gen_server callbacks ---------- diff --git a/prelude/erlang/common_test/test_binary/BUCK b/prelude/erlang/common_test/test_binary/BUCK deleted file mode 100644 index 6e0b3e10aa8a5..0000000000000 --- a/prelude/erlang/common_test/test_binary/BUCK +++ /dev/null @@ -1,39 +0,0 @@ -erlang_escript( - name = "escript", - emu_args = [ - "+A0", - "+S1:1", - "+sbtu", - "-mode minimal", - ], - main_module = "test_binary", - visibility = ["PUBLIC"], - deps = [ - ":test_binary", - ], -) - -erlang_application( - name = "test_binary", - srcs = glob([ - "src/*.erl", - "src/*.hrl", - ]), - applications = [ - "kernel", - "stdlib", - "syntax_tools", - "xmerl", - "prelude//erlang/toolchain:resources[jsone]", - "prelude//erlang/common_test/common:common", - "prelude//erlang/common_test/cth_hooks:cth_hooks", - "prelude//erlang/common_test/test_exec:test_exec", - ], - erl_opts = [ - "+debug_info", - "+warnings_as_errors", - ], - includes = glob(["include/*.hrl"]), - use_global_parse_transforms = False, - visibility = ["PUBLIC"], -) diff --git a/prelude/erlang/common_test/test_binary/BUCK.v2 b/prelude/erlang/common_test/test_binary/BUCK.v2 new file mode 100644 index 0000000000000..9c18544dd5722 --- /dev/null +++ b/prelude/erlang/common_test/test_binary/BUCK.v2 @@ -0,0 +1,29 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +erlang_application( + name = "test_binary", + srcs = glob([ + "src/*.erl", + "src/*.hrl", + ]), + applications = [ + "kernel", + "stdlib", + "syntax_tools", + "xmerl", + "prelude//erlang/common_test/common:common", + "prelude//erlang/common_test/cth_hooks:cth_hooks", + "prelude//erlang/common_test/test_exec:test_exec", + ], + erl_opts = [ + "+debug_info", + "+warnings_as_errors", + ], + includes = glob(["include/*.hrl"]), + use_global_parse_transforms = False, + visibility = ["PUBLIC"], +) diff --git a/prelude/erlang/common_test/test_binary/src/json_interfacer.erl b/prelude/erlang/common_test/test_binary/src/json_interfacer.erl index 7e4ffc5d8d723..c10fdd6fbb5d6 100644 --- a/prelude/erlang/common_test/test_binary/src/json_interfacer.erl +++ b/prelude/erlang/common_test/test_binary/src/json_interfacer.erl @@ -72,7 +72,7 @@ write_json_output(OutputDir, TpxResults) -> -spec format_json([case_result()]) -> string(). format_json(TpxResults) -> - jsone:encode(lists:map(fun(CaseResult) -> format_case(CaseResult) end, TpxResults)). + json:encode(lists:map(fun(CaseResult) -> format_case(CaseResult) end, TpxResults)). -spec format_case([case_result()]) -> [formatted_case_result()]. format_case( diff --git a/prelude/erlang/common_test/test_binary/src/list_test.erl b/prelude/erlang/common_test/test_binary/src/list_test.erl index 05de4291d1259..cba1f20bcdf00 100644 --- a/prelude/erlang/common_test/test_binary/src/list_test.erl +++ b/prelude/erlang/common_test/test_binary/src/list_test.erl @@ -72,24 +72,28 @@ list_tests(Suite, Hooks) -> %% @doc Test that all the tests in the list are exported. -spec test_exported_test(suite(), test_name()) -> error | ok. test_exported_test(Suite, Test) -> - case erlang:function_exported(Suite, Test, _Arity = 1) of + case erlang:function_exported(Suite, Test, 1) of false -> - error( - {invalid_test, - io_lib:format( - "The test ~s has been discovered while recursively exploring all/0, " ++ - "groups/0 but is not an exported method of arity 1", - [Test] - )} - ); + case erlang:function_exported(Suite, '$handle_undefined_function', 2) of + true -> + ok; + false -> + error( + {invalid_test, + io_lib:format( + "The test ~s has been discovered while recursively exploring all/0, " ++ + "groups/0 but is not an exported method of arity 1", + [Test] + )} + ) + end; true -> ok end. -spec load_hooks([module()]) -> ok. load_hooks(Hooks) -> - lists:map(fun code:ensure_loaded/1, Hooks), - ok. + ok = code:ensure_modules_loaded(Hooks). %% We extract the call to the groups() method so that we can type it. -spec suite_groups(suite(), [module()]) -> groups_output(). diff --git a/prelude/erlang/common_test/test_binary/src/test_binary.erl b/prelude/erlang/common_test/test_binary/src/test_binary.erl index e11f4f697b764..220b99ec1c0fe 100644 --- a/prelude/erlang/common_test/test_binary/src/test_binary.erl +++ b/prelude/erlang/common_test/test_binary/src/test_binary.erl @@ -9,13 +9,13 @@ -module(test_binary). --export([main/1]). +-export([main/1, main/0]). -include_lib("common/include/buck_ct_records.hrl"). -include_lib("common/include/tpx_records.hrl"). -include_lib("kernel/include/logger.hrl"). -% in ms, the time we give to init to stop before halting. --define(INIT_STOP_TIMEOUT, 5000). +main() -> + main(init:get_plain_arguments()). main([TestInfoFile, "list", OutputDir]) -> test_logger:set_up_logger(OutputDir, test_listing), @@ -31,16 +31,7 @@ main([TestInfoFile, "list", OutputDir]) -> after test_logger:flush() end, - init:stop(ExitCode), - receive - after ?INIT_STOP_TIMEOUT -> - ?LOG_ERROR( - io_lib:format("~p failed to terminate within ~c millisecond", [ - ?MODULE, ?INIT_STOP_TIMEOUT - ]) - ), - erlang:halt(ExitCode) - end; + erlang:halt(ExitCode); main([TestInfoFile, "run", OutputDir | Tests]) -> test_logger:set_up_logger(OutputDir, test_runner), ExitCode = @@ -55,16 +46,7 @@ main([TestInfoFile, "run", OutputDir | Tests]) -> after test_logger:flush() end, - init:stop(ExitCode), - receive - after ?INIT_STOP_TIMEOUT -> - ?LOG_ERROR( - io_lib:format("~p failed to terminate within ~c millisecond", [ - ?MODULE, ?INIT_STOP_TIMEOUT - ]) - ), - erlang:halt(ExitCode) - end; + erlang:halt(ExitCode); main([TestInfoFile]) -> %% without test runner support we run all tests and need to create our own test dir OutputDir = string:trim(os:cmd("mktemp -d")), @@ -93,75 +75,11 @@ main(Other) -> ), erlang:halt(3). --spec load_test_info(string()) -> #test_info{}. -load_test_info(TestInfoFile) -> - {ok, [ - #{ - "dependencies" := Dependencies, - "test_suite" := SuiteName, - "test_dir" := TestDir, - "config_files" := ConfigFiles, - "providers" := Providers, - "ct_opts" := CtOpts, - "extra_ct_hooks" := ExtraCtHooks, - "erl_cmd" := ErlCmd, - "artifact_annotation_mfa" := ArtifactAnnotationMFA - } - ]} = file:consult(TestInfoFile), - Providers1 = buck_ct_parser:parse_str(Providers), - CtOpts1 = make_ct_opts( - buck_ct_parser:parse_str(CtOpts), - [buck_ct_parser:parse_str(CTH) || CTH <- ExtraCtHooks] - ), - #test_info{ - dependencies = [filename:absname(Dep) || Dep <- Dependencies], - test_suite = filename:join(filename:absname(TestDir), [SuiteName, ".beam"]), - config_files = lists:map(fun(ConfigFile) -> filename:absname(ConfigFile) end, ConfigFiles), - providers = Providers1, - artifact_annotation_mfa = parse_mfa(ArtifactAnnotationMFA), - ct_opts = CtOpts1, - erl_cmd = ErlCmd - }. - --spec parse_mfa(string()) -> {ok, artifact_annotations:annotation_function()} | {error, term()}. -parse_mfa(MFA) -> - case erl_scan:string(MFA) of - {ok, - [ - {'fun', _}, - {atom, _, Module}, - {':', _}, - {atom, _, Function}, - {'/', _}, - {integer, _, 1} - ], - _} -> - fun Module:Function/1; - {ok, - [ - {atom, _, Module}, - {':', _}, - {atom, _, Function}, - {'/', _}, - {integer, _, 1} - ], - _} -> - fun Module:Function/1; - Reason -> - {error, Reason} - end. - --type ctopt() :: term(). --type cth() :: module() | {module(), term()}. - --spec make_ct_opts([ctopt()], [cth()]) -> [ctopt()]. -make_ct_opts(CtOpts, []) -> CtOpts; -make_ct_opts(CtOpts, ExtraCtHooks) -> [{ct_hooks, ExtraCtHooks} | CtOpts]. - --spec load_suite(string()) -> [{atom(), string()}]. +-spec load_suite(binary()) -> atom(). load_suite(SuitePath) -> - {module, Module} = code:load_abs(filename:rootname(filename:absname(SuitePath))), - {Module, filename:absname(SuitePath)}. + Path = unicode:characters_to_list(filename:rootname(filename:absname(SuitePath))), + {module, Module} = code:load_abs(Path), + Module. -spec get_hooks(#test_info{}) -> [module()]. get_hooks(TestInfo) -> @@ -177,20 +95,20 @@ get_hooks(TestInfo) -> -spec listing(string(), string()) -> ok. listing(TestInfoFile, OutputDir) -> - TestInfo = load_test_info(TestInfoFile), + TestInfo = test_info:load_from_file(TestInfoFile), Listing = get_listing(TestInfo, OutputDir), listing_interfacer:produce_xml_file(OutputDir, Listing). -spec running(string(), string(), [string()]) -> ok. running(TestInfoFile, OutputDir, Tests) -> AbsOutputDir = filename:absname(OutputDir), - TestInfo = load_test_info(TestInfoFile), + TestInfo = test_info:load_from_file(TestInfoFile), Listing = get_listing(TestInfo, AbsOutputDir), test_runner:run_tests(Tests, TestInfo, AbsOutputDir, Listing). get_listing(TestInfo, OutputDir) -> code:add_paths(TestInfo#test_info.dependencies), - {Suite, _Path} = load_suite(TestInfo#test_info.test_suite), + Suite = load_suite(TestInfo#test_info.test_suite), {ok, ProjectRoot} = file:get_cwd(), true = os:putenv("PROJECT_ROOT", ProjectRoot), @@ -211,7 +129,7 @@ get_listing(TestInfo, OutputDir) -> list_and_run(TestInfoFile, OutputDir) -> os:putenv("ERLANG_BUCK_DEBUG_PRINT", "disabled"), - TestInfo = load_test_info(TestInfoFile), + TestInfo = test_info:load_from_file(TestInfoFile), Listing = get_listing(TestInfo, OutputDir), Tests = listing_to_testnames(Listing), running(TestInfoFile, OutputDir, Tests), @@ -228,7 +146,7 @@ listing_to_testnames(Listing) -> -spec print_results(file:filename()) -> boolean(). print_results(ResultsFile) -> {ok, Data} = file:read_file(ResultsFile), - Results = jsone:decode(Data), + Results = json:decode(Data), {Summary, AnyFailure} = lists:foldl(fun print_individual_results/2, {#{}, false}, Results), io:format("~n~10s: ~b~n~n", ["TOTAL", lists:sum(maps:values(Summary))]), [ diff --git a/prelude/erlang/common_test/test_binary/src/test_info.erl b/prelude/erlang/common_test/test_binary/src/test_info.erl new file mode 100644 index 0000000000000..219d79842f5fd --- /dev/null +++ b/prelude/erlang/common_test/test_binary/src/test_info.erl @@ -0,0 +1,133 @@ +-module(test_info). + +-export([load_from_file/1, write_to_file/2]). +-include_lib("common/include/buck_ct_records.hrl"). + +-type test_info() :: #test_info{}. +-export_type([test_info/0]). + +-spec load_from_file(file:filename_all()) -> test_info(). +load_from_file(TestInfoFile) -> + {ok, Content} = file:read_file(TestInfoFile), + #{ + <<"dependencies">> := Dependencies, + <<"test_suite">> := SuiteName, + <<"test_dir">> := TestDir, + <<"config_files">> := ConfigFiles, + <<"providers">> := Providers, + <<"ct_opts">> := CtOpts, + <<"extra_ct_hooks">> := ExtraCtHooks, + <<"erl_cmd">> := [ErlExec | ErlFlags], + <<"extra_flags">> := ExtraFlags, + <<"artifact_annotation_mfa">> := ArtifactAnnotationMFA, + <<"common_app_env">> := CommonAppEnv + } = json:decode(Content), + Providers1 = buck_ct_parser:parse_str(Providers), + CtOpts1 = make_ct_opts( + buck_ct_parser:parse_str(CtOpts), + [buck_ct_parser:parse_str(CTH) || CTH <- ExtraCtHooks] + ), + + #test_info{ + dependencies = [unicode:characters_to_list(make_path_absolute(Dep)) || Dep <- Dependencies], + test_suite = filename:join((TestDir), [SuiteName, ".beam"]), + config_files = [make_path_absolute(ConfigFile) || ConfigFile <- ConfigFiles], + providers = Providers1, + artifact_annotation_mfa = parse_mfa(ArtifactAnnotationMFA), + ct_opts = CtOpts1, + erl_cmd = [make_path_absolute(ErlExec) | ErlFlags], + extra_flags = ExtraFlags, + common_app_env = CommonAppEnv + }. + +-spec write_to_file(file:filename_all(), test_info()) -> ok | {error, Reason :: term()}. +write_to_file(FileName, TestInfo ) -> + #test_info{ + dependencies = Dependencies, + test_suite = SuiteBeamPath, + config_files = ConfigFiles, + providers = Providers, + artifact_annotation_mfa = ArtifactAnnotationMFA, + ct_opts = CtOpts, + erl_cmd = [ErlCmd | ErlFlags], + extra_flags = ExtraFlags, + common_app_env = CommonAppEnv + } = TestInfo, + ErlTermToStr = fun(Term) -> list_to_binary(lists:flatten(io_lib:format("~p", [Term]))) end, + Json = #{ + <<"dependencies">> => [try_make_path_relative(Dep) || Dep <- Dependencies], + <<"test_suite">> => filename:basename(SuiteBeamPath, ".beam"), + <<"test_dir">> => filename:dirname(SuiteBeamPath), + <<"config_files">> => [try_make_path_relative(ConfigFile) || ConfigFile <- ConfigFiles], + <<"providers">> => ErlTermToStr(Providers), + <<"ct_opts">> => ErlTermToStr(CtOpts), + <<"extra_ct_hooks">> => [], + <<"erl_cmd">> => [try_make_path_relative(ErlCmd) | ErlFlags], + <<"extra_flags">> => ExtraFlags, + <<"artifact_annotation_mfa">> => ErlTermToStr(ArtifactAnnotationMFA), + <<"common_app_env">> => CommonAppEnv + }, + file:write_file(FileName, json:encode(Json)). + + +-spec make_path_absolute(file:filename_all()) -> file:filename_all(). +make_path_absolute(Path) -> + case os:getenv("REPO_ROOT") of + false -> filename:absname(Path); + RepoRoot -> filename:join(RepoRoot, Path) + end. + +-spec try_make_path_relative(file:filename_all()) -> file:filename_all(). +try_make_path_relative(Path) -> + case filename:pathtype(Path) of + relative -> Path; + _ -> + BaseDir = case os:getenv("REPO_ROOT") of + false -> + {ok, CWD} = file:get_cwd(), + CWD; + RepoRoot -> RepoRoot + end, + BaseDirParts = filename:split(BaseDir), + PathParts = filename:split(Path), + case lists:split(length(BaseDirParts), PathParts) of + {BaseDirParts, RelativeParts} -> filename:join(RelativeParts); + _ -> Path + end + end. + + +-spec parse_mfa(binary()) -> artifact_annotations:annotation_function() | {error, term()}. +parse_mfa(MFA) -> + case erl_scan:string(unicode:characters_to_list(MFA)) of + {ok, + [ + {'fun', _}, + {atom, _, Module}, + {':', _}, + {atom, _, Function}, + {'/', _}, + {integer, _, 1} + ], + _} -> + fun Module:Function/1; + {ok, + [ + {atom, _, Module}, + {':', _}, + {atom, _, Function}, + {'/', _}, + {integer, _, 1} + ], + _} -> + fun Module:Function/1; + Reason -> + {error, Reason} + end. + +-type ctopt() :: term(). +-type cth() :: module() | {module(), term()}. + +-spec make_ct_opts([ctopt()], [cth()]) -> [ctopt()]. +make_ct_opts(CtOpts, []) -> CtOpts; +make_ct_opts(CtOpts, ExtraCtHooks) -> [{ct_hooks, ExtraCtHooks} | CtOpts]. diff --git a/prelude/erlang/common_test/test_binary/src/test_runner.erl b/prelude/erlang/common_test/test_binary/src/test_runner.erl index 809dc989ecf72..22ed5662f94b2 100644 --- a/prelude/erlang/common_test/test_binary/src/test_runner.erl +++ b/prelude/erlang/common_test/test_binary/src/test_runner.erl @@ -22,7 +22,7 @@ -spec run_tests([string()], #test_info{}, string(), [#test_spec_test_case{}]) -> ok. run_tests(Tests, #test_info{} = TestInfo, OutputDir, Listing) -> check_ct_opts(TestInfo#test_info.ct_opts), - Suite = list_to_atom(filename:basename(TestInfo#test_info.test_suite, ".beam")), + Suite = binary_to_atom(filename:basename(TestInfo#test_info.test_suite, ".beam")), StructuredTests = lists:map(fun(Test) -> parse_test_name(Test, Suite) end, Tests), case StructuredTests of [] -> @@ -39,7 +39,9 @@ run_tests(Tests, #test_info{} = TestInfo, OutputDir, Listing) -> config_files = TestInfo#test_info.config_files, providers = TestInfo#test_info.providers, ct_opts = TestInfo#test_info.ct_opts, + common_app_env = TestInfo#test_info.common_app_env, erl_cmd = TestInfo#test_info.erl_cmd, + extra_flags = TestInfo#test_info.extra_flags, artifact_annotation_mfa = TestInfo#test_info.artifact_annotation_mfa }) end. @@ -60,10 +62,8 @@ execute_test_suite( Suite, Tests, filename:absname(filename:dirname(SuitePath)), OutputDir, CtOpts ), TestSpecFile = filename:join(OutputDir, "test_spec.spec"), - lists:foreach( - fun(Spec) -> file:write_file(TestSpecFile, io_lib:format("~tp.~n", [Spec]), [append]) end, - TestSpec - ), + FormattedSpec = [io_lib:format("~tp.~n", [Entry]) || Entry <- TestSpec], + file:write_file(TestSpecFile, FormattedSpec), NewTestEnv = TestEnv#test_env{test_spec_file = TestSpecFile, ct_opts = CtOpts}, try run_test(NewTestEnv) of ok -> ok @@ -167,10 +167,10 @@ provide_output_file( case Status of failed -> collect_results_broken_run( - Tests, Suite, "test binary internal crash", ResultExec, OutLog + Tests, Suite, "internal crash", ResultExec, OutLog ); Other when Other =:= passed orelse Other =:= timeout -> - % Here we either pased or timeout. + % Here we either passed or timeout. case file:read_file(ResultsFile) of {ok, JsonFile} -> TreeResults = binary_to_term(JsonFile), @@ -226,8 +226,6 @@ provide_output_file( json -> json_interfacer:write_json_output(OutputDir, Results) end, - JsonLogs = execution_logs:create_dir_summary(OutputDir), - file:write_file(filename:join(OutputDir, "logs.json"), jsone:encode(JsonLogs)), test_artifact_directory:link_to_artifact_dir(test_logger:get_std_out(OutputDir, ct_executor), OutputDir, TestEnv), test_artifact_directory:link_to_artifact_dir(test_logger:get_std_out(OutputDir, test_runner), OutputDir, TestEnv), test_artifact_directory:prepare(OutputDir, TestEnv). @@ -333,7 +331,8 @@ add_or_append(List, {Key, Value}) -> %% @doc Built the test_spec selecting the requested tests and %% specifying the result output. -spec build_test_spec(atom(), [atom()], string(), string(), [term()]) -> [term()]. -build_test_spec(Suite, Tests, TestDir, OutputDir, CtOpts) -> +build_test_spec(Suite, Tests, TestDir0, OutputDir, CtOpts) -> + TestDir = unicode:characters_to_list(TestDir0), ListGroupTest = get_requested_tests(Tests), SpecTests = lists:map( fun @@ -420,7 +419,7 @@ reorder_tests(Tests, #test_spec_test_case{testcases = TestCases}) -> %% Make sure it exists and returns it. set_up_log_dir(OutputDir) -> LogDir = filename:join(OutputDir, "log_dir"), - filelib:ensure_path(LogDir), + ok = filelib:ensure_path(LogDir), LogDir. %% @doc Informs the test runner of a successful test run. diff --git a/prelude/erlang/common_test/test_cli_lib/BUCK b/prelude/erlang/common_test/test_cli_lib/BUCK deleted file mode 100644 index 439cc1c4d618b..0000000000000 --- a/prelude/erlang/common_test/test_cli_lib/BUCK +++ /dev/null @@ -1,15 +0,0 @@ -erlang_application( - name = "test_cli_lib", - srcs = glob(["src/*.erl"]), - applications = [ - "//erlang/common_test/test_binary:test_binary", - ] + read_root_config("erlang", "test_shell_apps", "").split(), - erl_opts = [ - "+debug_info", - "+warnings_as_errors", - ], - resources = [ - # "//erl/config:sys.ct.config", - ], - visibility = ["PUBLIC"], -) diff --git a/prelude/erlang/common_test/test_cli_lib/BUCK.v2 b/prelude/erlang/common_test/test_cli_lib/BUCK.v2 new file mode 100644 index 0000000000000..be0241fb233be --- /dev/null +++ b/prelude/erlang/common_test/test_cli_lib/BUCK.v2 @@ -0,0 +1,35 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +erlang_application( + name = "test_cli_lib", + srcs = glob(["src/*.erl"]), + applications = ["//erlang/common_test/test_binary:test_binary"], + erl_opts = [ + "+debug_info", + "+warnings_as_errors", + ], + resources = [], + visibility = ["PUBLIC"], +) + +erlang_tests( + contacts = ["whatsapp_testing_infra"], + labels = ["e2e"], + suites = ["test/test_cli_e2e_SUITE.erl"], + deps = [ + "stdlib", + ":test_cli_e2e_SUITE_fixtures", + ":test_cli_lib", + ], +) + +erlang_application( + name = "test_cli_e2e_SUITE_fixtures", + srcs = glob(["test/test_cli_e2e_SUITE_data/*.erl"]), + applications = [], + labels = ["test_application"], +) diff --git a/prelude/erlang/common_test/test_cli_lib/src/test.erl b/prelude/erlang/common_test/test_cli_lib/src/test.erl index 88786b1d1104b..ae930da927780 100644 --- a/prelude/erlang/common_test/test_cli_lib/src/test.erl +++ b/prelude/erlang/common_test/test_cli_lib/src/test.erl @@ -15,7 +15,7 @@ -module(test). --include_lib("common/include/tpx_records.hrl"). +-include_lib("common/include/buck_ct_records.hrl"). %% Public API -export([ @@ -24,7 +24,8 @@ list/0, list/1, rerun/1, run/0, run/1, - reset/0 + reset/0, + logs/0 ]). %% init @@ -34,9 +35,18 @@ start_shell/0 ]). --type run_spec() :: string() | non_neg_integer() | [#{name := string(), suite := string()}]. +%% Test functions +-export([ + list_impl/1 +]). + +-type test_id() :: string() | non_neg_integer(). +-type test_info() :: #{name := string(), suite := atom()}. +-type run_spec() :: test_id() | [test_info()]. -type run_result() :: {non_neg_integer(), non_neg_integer()}. +-type provided_test_info() :: test_info:test_info(). + -spec start() -> ok. start() -> info(), @@ -69,13 +79,13 @@ help() -> [ print_help(F, A) || {F, A} <- ?MODULE:module_info(exports), - not lists:member(F, [module_info, ensure_initialized, start, start_shell]) + not lists:member(F, [module_info, ensure_initialized, start, start_shell, list_impl]) ], io:format("~n"), io:format("For more information, use the built in help, e.g. h(test, help)~n"), ok. --spec print_help(function(), arity()) -> ok. +-spec print_help(Fun :: atom(), arity()) -> ok. print_help(Fun, Arity) -> #{args := Args, desc := [DescFirst | DescRest]} = command_description(Fun, Arity), FunSig = string:pad( @@ -83,9 +93,10 @@ print_help(Fun, Arity) -> ), io:format("~s -- ~s~n", [FunSig, DescFirst]), Padding = string:pad("", 34), - [io:format("~s~s~n", [Padding, DescLine]) || DescLine <- DescRest]. + [io:format("~s~s~n", [Padding, DescLine]) || DescLine <- DescRest], + ok. --spec command_description(module(), arity()) -> #{args := [string()], desc := string()}. +-spec command_description(Fun :: atom(), arity()) -> #{args := [string()], desc := [string()]}. command_description(help, 0) -> #{args => [], desc => ["print help"]}; command_description(info, 0) -> @@ -125,12 +136,14 @@ command_description(run, 1) -> }; command_description(reset, 0) -> #{args => [], desc => ["restarts the test node, enabling a clean test state"]}; +command_description(logs, 0) -> + #{args => [], desc => ["print log files of the currently running test suites"]}; command_description(F, A) -> error({help_is_missing, {F, A}}). %% @doc List all available tests %% @equiv test:list("") --spec list() -> non_neg_integer(). +-spec list() -> ok | {error, term()}. list() -> list(""). @@ -138,26 +151,26 @@ list() -> %% [https://www.erlang.org/doc/man/re.html#regexp_syntax] for the supported %% regular expression syntax. If a module is given as argument, list all %% tests from that module instead --spec list(RegExOrModule :: module() | string()) -> non_neg_integer(). +-spec list(RegExOrModule :: module() | string()) -> ok | {error, term()}. list(RegEx) when is_list(RegEx) -> - ensure_initialized(), - Tests = ct_daemon:list(RegEx), - print_tests(Tests). + case list_impl(RegEx) of + {ok, TestsString} -> io:format("~s", [TestsString]); + Error -> Error + end. %% @doc Run a test given by either the test id from the last list() command, or %% a regex that matches exactly one test. Tests are run with the shortest possible %% setup. This call does not recompile the test suite and its dependencies, but %% runs them as is. You can manually recompile code with c(Module). %% To reset the test state use reset(). --spec rerun(string() | non_neg_integer() | [#{name := string(), suite := string()}]) -> - run_result(). +-spec rerun(run_spec()) -> run_result(). rerun(Spec) -> ensure_initialized(), do_plain_test_run(Spec). %% @doc update code and run all tests %% @equiv run("") --spec run() -> ok | error. +-spec run() -> run_result() | error. run() -> run(""). @@ -177,8 +190,15 @@ run(RegExOrId) -> ok -> io:format("Reloading all changed modules... "), Loaded = ct_daemon:load_changed(), - io:format("reloaded ~p modules ~P~n", [erlang:length(Loaded), Loaded, 10]), - rerun(ToRun); + case erlang:length(Loaded) of + 0 -> + do_plain_test_run(ToRun); + ChangedCount -> + io:format("reloaded ~p modules ~P~n", [ChangedCount, Loaded, 10]), + % There were some changes, so list the tests again, then run but without recompiling changes + % Note that if called with the RegEx instead of ToRun test list like above, do_plain_test_run/1 will list the tests again + do_plain_test_run(RegExOrId) + end; Error -> Error end @@ -198,7 +218,27 @@ reset() -> }) end. +%% @doc Print all the logs of the currently running test suites +-spec logs() -> ok. +logs() -> + ensure_initialized(), + case logs_impl() of + {ok, Logs} -> + lists:foreach(fun(LogPath) -> io:format("~s~n", [LogPath]) end, Logs), + io:format("~n"); + {error, not_found} -> + io:format("no logs found~n") + end. + %% internal +-spec list_impl(RegEx :: string()) -> {ok, string()} | {error, term()}. +list_impl(RegEx) -> + ensure_initialized(), + case ct_daemon:list(RegEx) of + {invalid_regex, _} = Err -> {error, Err}; + Tests -> {ok, print_tests(Tests)} + end. + ensure_initialized() -> PrintInit = lists:foldl( fun(Fun, Acc) -> Fun() orelse Acc end, @@ -216,39 +256,52 @@ ensure_initialized() -> ok end. +-spec init_utility_apps() -> boolean(). init_utility_apps() -> + _ = application:load(test_cli_lib), + UtilityApps = application:get_env(test_cli_lib, utility_applications, []), RunningApps = proplists:get_value(running, application:info()), - case proplists:is_defined(test_cli_lib, RunningApps) of + StartResults = [init_utility_app(RunningApps, UtilityApp) || UtilityApp <- UtilityApps], + lists:any(fun(B) when is_boolean(B) -> B end, StartResults). + +-spec init_utility_app(RunningApps :: [atom()], UtilityApp :: atom()) -> boolean(). +init_utility_app(RunningApps, UtilityApp) -> + case proplists:is_defined(UtilityApp, RunningApps) of true -> false; false -> - io:format("starting utility applications...~n", []), - case application:ensure_all_started(test_cli_lib) of + io:format("starting utility application ~s...~n", [UtilityApp]), + case application:ensure_all_started(UtilityApp) of {ok, _} -> true; Error -> - io:format("ERROR: could not start utility applications:~n~p~n", [Error]), - io:format("exiting...~n"), - erlang:halt(-1) + abort("could not start utility applications:~n~p", [Error]) end end. +-define(TYPE_IS_OK(Type), (Type =:= shortnames orelse Type =:= longnames)). + +-spec init_node() -> boolean(). init_node() -> case ct_daemon:alive() of true -> false; false -> io:format("starting test node...~n", []), + #test_info{erl_cmd = ErlCmd} = get_provided_test_info(), case application:get_env(test_cli_lib, node_config) of undefined -> - ct_daemon:start(); - {ok, {Type, NodeName, Cookie}} -> - ct_daemon:start(#{ - name => NodeName, - type => Type, - cookie => Cookie, - options => [{multiply_timetraps, infinity} || is_debug_session()] - }) + ct_daemon:start(ErlCmd); + {ok, {Type, NodeName, Cookie}} when ?TYPE_IS_OK(Type), is_atom(NodeName), is_atom(Cookie) -> + ct_daemon:start( + ErlCmd, + #{ + name => NodeName, + type => Type, + cookie => Cookie, + options => [{multiply_timetraps, infinity} || is_debug_session()] + } + ) end, case is_debug_session() of true -> @@ -259,6 +312,26 @@ init_node() -> true end. +-spec get_provided_test_info() -> provided_test_info(). +get_provided_test_info() -> + case application:get_env(test_cli_lib, test_info_file, undefined) of + undefined -> + abort("test_info_file not provided."); + TestInfoFile when is_binary(TestInfoFile) -> + test_info:load_from_file(TestInfoFile) + end. + +-spec abort(Message :: string()) -> no_return(). +abort(Message) -> + abort(Message, []). + +-spec abort(Format :: string(), Args :: [term()]) -> no_return(). +abort(Format, Args) -> + io:format(standard_error, "ERROR: " ++ Format ++ "~n", Args), + io:format(standard_error, "exiting...~n", []), + erlang:halt(1). + +-spec watchdog() -> no_return(). watchdog() -> Node = ct_daemon_node:get_node(), true = erlang:monitor_node(Node, true), @@ -272,6 +345,7 @@ watchdog() -> erlang:halt() end. +-spec init_group_leader() -> boolean(). init_group_leader() -> %% set the group leader unconditionally, we need to do this since %% during init, the group leader is different then the one from the @@ -279,23 +353,29 @@ init_group_leader() -> ct_daemon:set_gl(), false. +-spec print_tests([{module(), [{non_neg_integer(), string()}]}]) -> string(). print_tests([]) -> - io:format("no tests found~n"); + lists:flatten(io_lib:format("no tests found~n")); print_tests(Tests) -> - print_tests_impl(lists:reverse(Tests)). + lists:flatten(print_tests_impl(lists:reverse(Tests))). +-spec print_tests_impl([{module(), [{non_neg_integer(), string()}]}]) -> io_lib:chars(). print_tests_impl([]) -> - ok; + ""; print_tests_impl([{Suite, SuiteTests} | Rest]) -> - io:format("~s:~n", [Suite]), - [io:format("\t~b - ~s~n", [Id, Test]) || {Id, Test} <- SuiteTests], - print_tests_impl(Rest). + SuiteString = io_lib:format("~s:~n", [Suite]), + TestsString = [io_lib:format("\t~b - ~s~n", [Id, Test]) || {Id, Test} <- SuiteTests], + RestString = print_tests_impl(Rest), + SuiteString ++ TestsString ++ RestString. -spec is_debug_session() -> boolean(). is_debug_session() -> - application:get_env(test_cli_lib, debugger_mode, false). + case application:get_env(test_cli_lib, debugger_mode, false) of + Value when is_boolean(Value) -> + Value + end. --spec collect_results(#{module => [string()]}) -> #{string => ct_daemon_core:run_result()}. +-spec collect_results(#{module => [string()]}) -> #{string() => ct_daemon_core:run_result()}. collect_results(PerSuite) -> maps:fold( fun(Suite, Tests, Acc) -> @@ -330,7 +410,7 @@ ensure_per_suite_encapsulation(Suite) -> end end. --spec discover(string() | non_neg_integer()) -> [#{name := string(), suite := string()}]. +-spec discover(string() | non_neg_integer()) -> [test_info()]. discover(RegExOrId) -> case ct_daemon:discover(RegExOrId) of {error, not_listed_yet} -> @@ -375,11 +455,26 @@ do_plain_test_run(RegExOrId) -> ToRun -> do_plain_test_run(ToRun) end. --spec start_shell() -> no_return(). +-spec start_shell() -> ok | {error, term()}. start_shell() -> case string:to_integer(erlang:system_info(otp_release)) of {Version, _} when Version >= 26 -> shell:start_interactive(); _ -> - user_drv:start() + user_drv:start(), + ok + end. + +-spec logs_impl() -> {ok, [file:filename_all()]} | {error, not_found}. +logs_impl() -> + case ct_daemon:priv_dir() of + undefined -> + {error, not_found}; + PrivDir -> + PatternLog = filename:join(PrivDir, "*.log"), + LogPaths = filelib:wildcard(PatternLog), + PatternLogJson = filename:join(PrivDir, "*.log.json"), + LogJsonPaths = filelib:wildcard(PatternLogJson), + AllLogs = lists:sort(LogPaths ++ LogJsonPaths), + {ok, AllLogs} end. diff --git a/prelude/erlang/common_test/test_cli_lib/test/test_cli_e2e_SUITE.erl b/prelude/erlang/common_test/test_cli_lib/test/test_cli_e2e_SUITE.erl new file mode 100644 index 0000000000000..cc4079dc03816 --- /dev/null +++ b/prelude/erlang/common_test/test_cli_lib/test/test_cli_e2e_SUITE.erl @@ -0,0 +1,59 @@ +%% Copyright (c) Meta Platforms, Inc. and affiliates. +%% This source code is licensed under both the MIT license found in the +%% LICENSE-MIT file in the root directory of this source tree and the Apache +%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory +%% of this source tree. +%%% % @format +-module(test_cli_e2e_SUITE). + +-include_lib("stdlib/include/assert.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("common/include/buck_ct_records.hrl"). + +-export([all/0, init_per_suite/1, end_per_suite/1]). + +-export([ + test_list/1 +]). + +all() -> + [test_list]. + +init_per_suite(Config) -> + PrivDir = ?config(priv_dir, Config), + TestInfoFile = filename:join(PrivDir, <<"test_info">>), + + {ok, [ErlCmd]} = init:get_argument(progname), + + test_info:write_to_file(TestInfoFile, #test_info{ + dependencies = [], + test_suite = list_to_binary(code:which(test_list_SUITE)), + config_files = [], + providers = [], + ct_opts = [], + erl_cmd = ErlCmd, + extra_flags = [], + artifact_annotation_mfa = {foo, bar, 42} + }), + + application:set_env(test_cli_lib, test_info_file, TestInfoFile), + + Config. + +end_per_suite(_Config) -> + ok. + +test_list(_Config) -> + Expected = + "test_cli_e2e_SUITE:\n" + "test_list_SUITE:\n" + "\t1 - test_list_SUITE - .test_pass\n" + "\t2 - test_list_SUITE - default.test_fail\n", + ?assertEqual({ok, Expected}, test:list_impl("test_list_SUITE")), + + ?assertMatch({error, {invalid_regex, _}}, test:list_impl("^[a")), + + EmptyExpected = + "test_cli_e2e_SUITE:\n" + "test_list_SUITE:\n", + ?assertEqual({ok, EmptyExpected}, test:list_impl("does_not_exist_SUITE")). diff --git a/prelude/erlang/common_test/test_cli_lib/test/test_cli_e2e_SUITE_data/test_list_SUITE.erl b/prelude/erlang/common_test/test_cli_lib/test/test_cli_e2e_SUITE_data/test_list_SUITE.erl new file mode 100644 index 0000000000000..c28e4b0e93605 --- /dev/null +++ b/prelude/erlang/common_test/test_cli_lib/test/test_cli_e2e_SUITE_data/test_list_SUITE.erl @@ -0,0 +1,28 @@ +%% Copyright (c) Meta Platforms, Inc. and affiliates. +%% This source code is licensed under both the MIT license found in the +%% LICENSE-MIT file in the root directory of this source tree and the Apache +%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory +%% of this source tree. +%%% % @format +-module(test_list_SUITE). + +-include_lib("stdlib/include/assert.hrl"). + +-export([all/0, groups/0]). + +-export([ + test_pass/1, + test_fail/1 +]). + +all() -> + [test_pass, {group, default}]. + +groups() -> + [{default, [], [test_fail]}]. + +test_pass(_Config) -> + ?assert(true). + +test_fail(_Config) -> + ?assert(false). diff --git a/prelude/erlang/common_test/test_exec/BUCK b/prelude/erlang/common_test/test_exec/BUCK deleted file mode 100644 index 0ea358626cf37..0000000000000 --- a/prelude/erlang/common_test/test_exec/BUCK +++ /dev/null @@ -1,25 +0,0 @@ -erlang_application( - name = "test_exec", - srcs = glob([ - "src/*.erl", - "src/*.hrl", - ]), - app_src = "src/test_exec.app.src", - applications = [ - "kernel", - "stdlib", - "debugger", - "prelude//erlang/common_test/common:common", - "prelude//erlang/common_test/cth_hooks:cth_hooks", - ], - erl_opts = [ - "+debug_info", - "+warnings_as_errors", - ], - includes = glob(["include/*.hrl"]), - mod = ("test_exec", []), - resources = read_root_config("erlang", "erlang_tests_default_config", "").split(), - shell_libs = [], - use_global_parse_transforms = False, - visibility = ["PUBLIC"], -) diff --git a/prelude/erlang/common_test/test_exec/BUCK.v2 b/prelude/erlang/common_test/test_exec/BUCK.v2 new file mode 100644 index 0000000000000..1cb78df30489f --- /dev/null +++ b/prelude/erlang/common_test/test_exec/BUCK.v2 @@ -0,0 +1,41 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +erlang_application( + name = "test_exec", + srcs = glob([ + "src/*.erl", + "src/*.hrl", + ]), + app_src = "src/test_exec.app.src", + applications = [ + "kernel", + "stdlib", + "debugger", + "prelude//erlang/common_test/common:common", + "prelude//erlang/common_test/cth_hooks:cth_hooks", + ], + erl_opts = [ + "+debug_info", + "+warnings_as_errors", + ], + includes = glob(["include/*.hrl"]), + mod = ("test_exec", []), + resources = read_root_config("erlang", "erlang_tests_default_config", "").split(), + shell_libs = [], + use_global_parse_transforms = False, + visibility = ["PUBLIC"], +) + +erlang_tests( + contacts = ["whatsapp_testing_infra"], + labels = ["unit"], + suites = ["test/ct_executor_SUITE.erl"], + deps = [ + "stdlib", + ":test_exec", + ], +) diff --git a/prelude/erlang/common_test/test_exec/src/ct_daemon.erl b/prelude/erlang/common_test/test_exec/src/ct_daemon.erl index b5a4fb7fe7166..21b3f488cc93a 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_daemon.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_daemon.erl @@ -14,7 +14,7 @@ -module(ct_daemon). -export([ - start/0, start/1, + start/1, start/2, stop/0, alive/0, run/1, @@ -26,18 +26,22 @@ discover/1, load_changed/0, setup_state/0, - output_dir/0 + output_dir/0, + priv_dir/0 ]). %% @doc start a test-node with random name and shortname --spec start() -> ok. -start() -> - ct_daemon_node:start(). +-spec start(ErlCommand) -> ok when + ErlCommand :: [binary()]. +start(ErlCommand) -> + ct_daemon_node:start(ErlCommand). %% @doc starts the test node with the given distribution mode and node name --spec start(ct_daemon_node:config()) -> ok. -start(NodeInfo) -> - ct_daemon_node:start(NodeInfo). +-spec start(ErlCommand, Config) -> ok when + ErlCommand :: [binary()], + Config :: ct_daemon_node:config(). +start(ErlCommand, NodeInfo) -> + ct_daemon_node:start(ErlCommand, NodeInfo). %% @doc stops the test node -spec stop() -> ok. @@ -89,7 +93,7 @@ list(RegEx) -> end. -spec discover(pos_integer() | string()) -> - #{suite := module(), name := string()} + [#{suite := module(), name := string()}] | ct_daemon_runner:discover_error(). discover(RegExOrId) -> do_call({discover, RegExOrId}). @@ -107,6 +111,10 @@ setup_state() -> output_dir() -> do_call(output_dir). +-spec priv_dir() -> file:filename_all() | undefined. +priv_dir() -> + do_call(priv_dir). + -spec push_paths(Paths :: [file:filename_all()]) -> ok. push_paths(Paths) -> case alive() of diff --git a/prelude/erlang/common_test/test_exec/src/ct_daemon_core.erl b/prelude/erlang/common_test/test_exec/src/ct_daemon_core.erl index 958321008095a..dd3c560f2f134 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_daemon_core.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_daemon_core.erl @@ -235,8 +235,8 @@ test_part(Config, Suite, Test, Path) -> InitResult = case safe_call(wrap_ct_hook(init_per_testcase, Path, fun Suite:init_per_testcase/2), [Config]) of {error, not_exported} -> Config; - {skipped, Reason} -> {error, {skip, init_per_testcase, Reason}}; - {failed, InitErrReason} -> {error, {skip, init_per_testcase, InitErrReason}}; + {skip, Reason} -> {error, {skip, init_per_testcase, Reason}}; + {fail, InitErrReason} -> {error, {skip, init_per_testcase, InitErrReason}}; {error, InitErrReason} -> {error, {skip, init_per_testcase, InitErrReason}}; InitOutConfig -> InitOutConfig end, @@ -254,10 +254,10 @@ test_part(Config, Suite, Test, Path) -> of {save_config, AfterEndConfig} -> {Result, AfterEndConfig}; - E = {Failed, _} when Failed =:= fail orelse Failed =:= failed -> + {Failed, E} when Failed =:= fail orelse Failed =:= failed -> FinalResult = case status_from_test_result(Result, Test) of - pass_result -> {error, {end_per_testcase, E}}; + pass_result -> {fail, {end_per_testcase, E}}; _ -> Result end, {FinalResult, AfterRunConfig}; @@ -328,10 +328,10 @@ do_part_safe(Id, Fun, Config, TimeTrap) -> end, {name, FunName} = erlang:fun_info(Fun, name), try Fun(Config) of - {skipped, Reason} -> + {skip, Reason} -> ?LOG_DEBUG("got skip for ~p because of: ~p", [Id, Reason]), ParentPid ! {RspRef, {skip, {FunName, Id}, Reason}}; - {failed, Reason} -> + {fail, Reason} -> ?LOG_DEBUG("got fail for ~p because of: ~p", [Id, Reason]), ParentPid ! {RspRef, {fail, {FunName, Id}, Reason}}; {skip_and_save, Reason, _} -> diff --git a/prelude/erlang/common_test/test_exec/src/ct_daemon_hooks.erl b/prelude/erlang/common_test/test_exec/src/ct_daemon_hooks.erl index 00d09048e3817..913edb8eef252 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_daemon_hooks.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_daemon_hooks.erl @@ -7,12 +7,13 @@ %%%------------------------------------------------------------------- %%% @doc -%%% Implementation of hooks functionality +%%% Implementation of hooks functionality. We mimic the behaviour of +%%% common test hooks so that they can run in test shell %%% @end %%% % @format -module(ct_daemon_hooks). --compile(warn_missing_spec). +-compile(warn_missing_spec_all). -behaviour(gen_server). @@ -63,16 +64,50 @@ | on_tc_fail | on_tc_skip. +-type post_hook_call() :: + post_init_per_suite + | post_init_per_group + | post_init_per_testcase + | post_end_per_suite + | post_end_per_group + | post_end_per_testcase. + +-type pre_hook_call() :: + pre_init_per_suite + | pre_init_per_group + | pre_init_per_testcase + | pre_end_per_suite + | pre_end_per_group + | pre_end_per_testcase. + +-type hook_level() :: + suite + | group + | testcase. + +-type hook_response() :: + [config()] + | {skip, term()} + | {fail, term()}. + +-type hook_config() :: + module() + | {module(), Options :: [term()]} + | {module(), Options :: [term()], Priority :: integer()}. + %%-------------------------------------------------------------------- %%% API -spec set_state(id(), hook_state()) -> ok. set_state(Id, State) -> - gen_server:call(?MODULE, {set_state, Id, State}). + ok = gen_server:call(?MODULE, {set_state, Id, State}). --spec get_state(id()) -> {ok, hook_state()} | {error, not_found}. +-spec get_state(id()) -> {ok, hook_state()} | {error, {not_found, list()}}. get_state(Id) -> - gen_server:call(?MODULE, {get_state, Id}). + case gen_server:call(?MODULE, {get_state, Id}) of + {ok, State} -> {ok, State}; + Error = {error, {not_found, Details}} when is_list(Details) -> Error + end. -spec wrap(part(), [atom()], fun()) -> fun(). wrap(Part, Path, Fun) -> @@ -86,7 +121,7 @@ get_hooks() -> %% @doc %% Starts the server within supervision tree --spec start_monitor() -> gen_server:start_ret(). +-spec start_monitor() -> gen_server:start_mon_ret(). start_monitor() -> gen_server:start_monitor({local, ?MODULE}, ?MODULE, [], []). @@ -97,12 +132,13 @@ start_monitor() -> init([]) -> {ok, initialize_hooks()}. --spec handle_call(Request :: term(), From :: gen_server:from(), State :: state()) -> - no_return(). +-spec handle_call({get_state, id()}, gen_server:from(), state()) -> {reply, {ok, hook_state()}, state()} | {error, {not_found, list()}}; + ({set_state, id(), hook_state()}, gen_server:from(), state()) -> {reply, ok, state()}; + ({wrap, part(), fun()}, gen_server:from(), state()) -> {reply, fun(([atom() | config()]) -> term()), state()}. handle_call({get_state, Id}, _From, State = #{states := HookStates}) -> case HookStates of #{Id := HookState} -> {reply, {ok, HookState}, State}; - _ -> {error, not_found, [{state, State}, {id, Id}]} + _ -> {error, {not_found, [{state, State}, {id, Id}]}} end; handle_call({set_state, Id, HookState}, _From, State = #{states := HookStates}) -> {reply, ok, State#{states => HookStates#{Id => HookState}}}; @@ -131,7 +167,7 @@ initialize_hooks() -> end || {Mod, Opts, Prio} <- NormalizedConfiguredHooks ], - %% according to documentation, if two hooks have the same ID, the latter one get's dropped + %% according to documentation, if two hooks have the same ID, the latter one gets dropped PreInitHooks0 = lists:ukeysort(2, HooksWithId), %% now sort with configured prio the inits (default prio being 0) PreInitHooks1 = lists:keysort(1, PreInitHooks0), @@ -156,6 +192,7 @@ initialize_hooks() -> hooks => [Hook || {_Priority, Hook} <- SortedHooks] }. +-spec get_hooks_config() -> [hook_config()]. get_hooks_config() -> application:get_env(test_exec, ct_daemon_hooks, []) ++ proplists:get_value(ct_hooks, application:get_env(test_exec, daemon_options, []), []). @@ -164,8 +201,29 @@ get_hooks_config() -> wrap_part(Part, Fun, State) -> wrap_init_end(Part, Fun, State). -wrap_init_end(Part, Fun, #{hooks := Hooks}) -> - WrappedWithPreAndPost = lists:foldl( +-spec wrap_init_end(part(), fun(), state()) -> fun(([atom() | config()]) -> term()). +wrap_init_end(Part, Fun, #{hooks := HooksInInstallationOrder}) -> + %% NOTE ON EXECUTION ORDER: + %% + %% As of OTP/26 CT's behaviour according to [https://www.erlang.org/doc/apps/common_test/ct_hooks_chapter#cth-execution-order]: + %% > By default, each CTH installed is executed in the order that they are installed for init calls, + %% > and then reversed for end calls. This is not always desired, so Common Test allows the user to specify + %% > a priority for each hook. + %% + %% Implicit here is: + %% - pre_init and post_init functions are executed in the same order + %% - the hook with the "highest numerical priority" will be the first to run pre_init_per xxxx + %% + %% Starting from OTP/27, CT adds a new option ct_hooks_order option. The behaviour above is called `test`, and a + %% new behaviour called `config` will be added, in which the order of the post_xxxx functions is reversed wrt pre_xxxx + %% (see [https://github.com/erlang/otp/issues/7397] for discussion, and [https://github.com/erlang/otp/pull/7496] + %% for the upcoming ct_hooks_order option). + %% + %% Here we implement only the behaviour that corresponds to the new `config` option. + + %% NB. we use a foldr to ensure that the first hook in HookInInstallationOrder is the innermost, so the first one to + %% be executed + WrappedWithPreAndPost = lists:foldr( fun(Hook, FunToWrap) -> fun(FullPathArg, ConfigArg0) -> PathArg = @@ -178,9 +236,9 @@ wrap_init_end(Part, Fun, #{hooks := Hooks}) -> end, case call_if_exists_with_fallback_store_state(Hook, pre(Part), PathArg ++ [ConfigArg0], ok) of {skip, SkipReason} -> - {skipped, SkipReason}; + {skip, SkipReason}; {fail, FailReason} -> - {failed, FailReason}; + {fail, FailReason}; HookCallbackResult -> ConfigArg1 = case is_list(HookCallbackResult) of @@ -199,12 +257,12 @@ wrap_init_end(Part, Fun, #{hooks := Hooks}) -> {tc_status, {skipped, SkipReason}} | lists:keydelete(tc_status, 1, ConfigArg1) ], - {skipped, SkipReason} + {skip, SkipReason} }; {fail, FailReason} -> { [{tc_status, {failed, FailReason}} | lists:keydelete(tc_status, 1, ConfigArg1)], - {failed, FailReason} + {fail, FailReason} }; OkResult -> ConfigArg2 = @@ -229,22 +287,17 @@ wrap_init_end(Part, Fun, #{hooks := Hooks}) -> end end, normalize_part(Part, Fun), - Hooks + HooksInInstallationOrder ), %% after the post_per functions we need to handle now failures, and call either on_tc_fail or on_tc_skip fun(PathArg, ConfigArg) -> [Suite | _] = PathArg, Result = try WrappedWithPreAndPost(PathArg, ConfigArg) of - Skip = {skipped, _Reason} -> - Skip; - Fail = {failed, _Reason} -> - Fail; - %% if we don't have a hook setup, we still need to do the conversion from skip/fail to skipped/failed {skip, SkipReason} -> - {skipped, SkipReason}; + {skip, SkipReason}; {fail, FailReason} -> - {failed, FailReason}; + {fail, FailReason}; MaybeConfig -> case init_or_end(Part) of 'end' -> @@ -259,50 +312,51 @@ wrap_init_end(Part, Fun, #{hooks := Hooks}) -> end end catch - Class:Reason:Stacktrace -> {failed, {'EXIT', {{Class, Reason}, Stacktrace}}} + Class:Reason:Stacktrace -> {fail, {'EXIT', {{Class, Reason}, Stacktrace}}} end, - handle_post_result(Hooks, build_test_name(Part, PathArg), Suite, Result) + handle_post_result(HooksInInstallationOrder, build_test_name(Part, PathArg), Suite, Result) end. +-spec handle_post_result([hook()], test_name(), module(), {ok, [config()]} | {skip, term()} | {fail, term()}) -> hook_response(). handle_post_result(Hooks, TestName, Suite, Result) -> ReverseHooks = lists:reverse(Hooks), case Result of - SkipResult = {skipped, _} -> + {skip, SkipReason} -> [ call_if_exists_with_fallback_store_state( - Hook, on_tc_skip, [Suite, TestName, SkipResult], ok + Hook, on_tc_skip, [Suite, TestName, {tc_user_skip, SkipReason}], ok ) || Hook <- ReverseHooks ], - SkipResult; - FailResult = {failed, _} -> + {skip, SkipReason}; + {fail, FailReason} -> [ call_if_exists_with_fallback_store_state( - Hook, on_tc_fail, [Suite, TestName, FailResult], ok + Hook, on_tc_fail, [Suite, TestName, FailReason], ok ) || Hook <- ReverseHooks ], - FailResult; + {fail, FailReason}; {ok, Config} -> case lists:keyfind(tc_status, 1, Config) of false -> Config; - {tc_status, SkipResult = {skipped, _}} -> + {tc_status, {skipped, SkipReason}} -> [ call_if_exists_with_fallback_store_state( - Hook, on_tc_skip, [Suite, TestName, SkipResult], ok + Hook, on_tc_skip, [Suite, TestName, {tc_user_skip, SkipReason}], ok ) || Hook <- ReverseHooks ], - SkipResult; - {tc_status, FailResult = {failed, _}} -> + {skip, SkipReason}; + {tc_status, {failed, FailReason}} -> [ call_if_exists_with_fallback_store_state( - Hook, on_tc_fail, [Suite, TestName, FailResult], ok + Hook, on_tc_fail, [Suite, TestName, FailReason], ok ) || Hook <- ReverseHooks ], - FailResult + {fail, FailReason} end end. @@ -339,22 +393,21 @@ build_test_name(end_per_testcase, Path) -> [Test, Group | _] = lists:reverse(Path), {Group, Test}. --spec get_hook_module(module() | {module(), Options} | {module(), Options, Priority}) -> module() when - Options :: list(), Priority :: integer(). +-spec get_hook_module(hook_config()) -> module(). get_hook_module({Mod, _, _}) -> Mod; get_hook_module({Mod, _}) -> Mod; get_hook_module(Mod) -> Mod. --spec get_hook_opts(module() | {module(), Options} | {module(), Options, Priority}) -> Options when - Options :: list(), Priority :: integer(). + +-spec get_hook_opts(hook_config()) -> [term()]. get_hook_opts({_, Opts, _}) -> Opts; get_hook_opts({_, Opts}) -> Opts; get_hook_opts(_) -> []. --spec get_hook_priority(module() | {module(), Options} | {module(), Options, Priority}) -> Priority when - Options :: list(), Priority :: integer(). +-spec get_hook_priority(hook_config()) -> integer() | undefined. get_hook_priority({_, _, Prio}) -> Prio; get_hook_priority(_) -> undefined. +-spec normalize_part(part(), fun()) -> fun(). normalize_part(Part, Fun) -> SafeFun = get_safe_part(Part, Fun), case level(Part) of @@ -364,21 +417,24 @@ normalize_part(Part, Fun) -> end. %% wrappers because most calls are optional +-spec call_if_exists(module(), atom(), [term()], Default :: {'$lazy', LazyFun :: fun(() -> term())} | term()) -> term(). call_if_exists(Mod, Fun, Args, Default) -> case erlang:function_exported(Mod, Fun, erlang:length(Args)) of true -> erlang:apply(Mod, Fun, Args); false -> case Default of - {'$lazy', LazyFun} -> LazyFun(); + {'$lazy', LazyFun} when is_function(LazyFun, 0) -> LazyFun(); _ -> Default end end. +-spec call_if_exists_with_fallback(module(), atom(), [term()], term()) -> term(). call_if_exists_with_fallback(Mod, Fun, Args, ReturnDefault) -> [_ | FallbackArgs] = Args, call_if_exists(Mod, Fun, Args, {'$lazy', fun() -> call_if_exists(Mod, Fun, FallbackArgs, ReturnDefault) end}). +-spec call_if_exists_with_fallback_store_state({module(), term()}, atom(), [term()], term()) -> term(). call_if_exists_with_fallback_store_state({Mod, Id}, Fun, Args, ReturnDefault) -> {ok, State} = get_state(Id), Default = @@ -416,6 +472,7 @@ wrapped_init({Mod, Id}, Opts, ConfiguredPriority) -> _ -> {ConfiguredPriority, InitState} end. +-spec pre(part()) -> pre_hook_call(). pre(init_per_suite) -> pre_init_per_suite; pre(init_per_group) -> pre_init_per_group; pre(init_per_testcase) -> pre_init_per_testcase; @@ -423,6 +480,7 @@ pre(end_per_suite) -> pre_end_per_suite; pre(end_per_group) -> pre_end_per_group; pre(end_per_testcase) -> pre_end_per_testcase. +-spec post(part()) -> post_hook_call(). post(init_per_suite) -> post_init_per_suite; post(init_per_group) -> post_init_per_group; post(init_per_testcase) -> post_init_per_testcase; @@ -430,6 +488,7 @@ post(end_per_suite) -> post_end_per_suite; post(end_per_group) -> post_end_per_group; post(end_per_testcase) -> post_end_per_testcase. +-spec level(part()) -> hook_level(). level(init_per_suite) -> suite; level(init_per_group) -> group; level(init_per_testcase) -> testcase; @@ -437,6 +496,7 @@ level(end_per_suite) -> suite; level(end_per_group) -> group; level(end_per_testcase) -> testcase. +-spec init_or_end(part()) -> init | 'end'. init_or_end(init_per_suite) -> init; init_or_end(init_per_group) -> init; init_or_end(init_per_testcase) -> init; @@ -444,12 +504,14 @@ init_or_end(end_per_suite) -> 'end'; init_or_end(end_per_group) -> 'end'; init_or_end(end_per_testcase) -> 'end'. +-spec get_safe_part(part(), fun()) -> fun(). get_safe_part(Part, Fun) -> case is_exported(Fun) of true -> Fun; false -> dummy(Part) end. +-spec dummy(part()) -> fun(). dummy(init_per_suite) -> fun(Config) -> Config end; dummy(init_per_group) -> fun(_, Config) -> Config end; dummy(init_per_testcase) -> fun(_, Config) -> Config end; @@ -457,6 +519,7 @@ dummy(end_per_suite) -> fun(_) -> ok end; dummy(end_per_group) -> fun(_, _) -> ok end; dummy(end_per_testcase) -> fun(_, _) -> ok end. +-spec is_exported(fun()) -> boolean(). is_exported(Fun) -> case maps:from_list(erlang:fun_info(Fun)) of #{ diff --git a/prelude/erlang/common_test/test_exec/src/ct_daemon_logger.erl b/prelude/erlang/common_test/test_exec/src/ct_daemon_logger.erl index ab147833b5e99..74badb9db55d7 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_daemon_logger.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_daemon_logger.erl @@ -7,7 +7,12 @@ %%%------------------------------------------------------------------- %%% @doc -%%% Setup functions for logger and CT printing facilities +%%% CT handles logging and printing by sending a message to the ct_logs +%%% process. We intercept those messages for test shell by starting a +%%% gen_server that intercepts the messages and prints them to the test +%%% shell. We do this instead of using the ct_logs process to have more +%%% control over the output and to avoid starting ct processes that +%%% might interfere with test shell's functionality. %%% @end %%% % @format @@ -15,67 +20,60 @@ -include_lib("kernel/include/logger.hrl"). +-behaviour(gen_server). + %% Public API --export([setup/2]). +-export([start/1]). + +%% gen_server callbacks +-export([init/1, handle_info/2, handle_call/3, handle_cast/2]). + +-type state() :: #{}. + +-spec init(Args) -> Result when + Args :: term(), + Result :: {ok, state()}. +init(_) -> {ok, #{}}. + +-spec handle_info(Info, State) -> {noreply, State} when + Info :: term(), + State :: state(). +handle_info({log, _SyncOrAsync, _FromPid, _GL, _Category, _Importance, Content, _EscChars} = _Info, State) when + is_list(Content) +-> + % Mimics behaviour from the logger_loop function in ct_logs.erl + IoList = lists:foldl( + fun + ({Format, Args}, IoList) when is_list(Format), is_list(Args) -> + [io_lib:format(Format, Args), "\n", IoList]; + (_, IoList) -> + IoList + end, + [], + Content + ), + io:format("~ts~n", [IoList]), + {noreply, State}; +handle_info(_Info, State) -> + % ignore + {noreply, State}. + +-spec handle_call(Request, From, State) -> {noreply, State} when + Request :: term(), + From :: gen_server:from(), + State :: state(). +handle_call(_Info, _From, State) -> {noreply, State}. + +-spec handle_cast(Request, State) -> {noreply, State} when + Request :: term(), + State :: state(). +handle_cast(_Info, State) -> {noreply, State}. %% @doc mocks for ct_logs functions --spec setup(file:filename_all(), boolean()) -> ok. -setup(OutputDir, InstrumentCTLogs) -> +-spec start(file:filename_all()) -> ok. +start(OutputDir) -> LogFile = test_logger:get_log_file(OutputDir, ct_daemon), ok = test_logger:configure_logger(LogFile), - %% check is we need to instrument ct_logs - %% this somehow crashes the node startup if CT runs on the - %% controlling node - case InstrumentCTLogs of - true -> - meck:new(ct_logs, [passthrough, no_link]), - meck:expect(ct_logs, tc_log, fun tc_log/3), - meck:expect(ct_logs, tc_log, fun tc_log/4), - meck:expect(ct_logs, tc_log, fun tc_log/5), - meck:expect(ct_logs, tc_print, fun tc_print/3), - meck:expect(ct_logs, tc_print, fun tc_print/4), - meck:expect(ct_logs, tc_print, fun tc_print/5), - meck:expect(ct_logs, tc_pal, fun tc_pal/3), - meck:expect(ct_logs, tc_pal, fun tc_pal/4), - meck:expect(ct_logs, tc_pal, fun tc_pal/5); - _ -> - ok - end, + {ok, _} = gen_server:start_link({local, ct_logs}, ?MODULE, #{}, []), ok. - -tc_log(Category, Format, Args) -> - tc_print(Category, 1000, Format, Args). - -tc_log(Category, Importance, Format, Args) -> - tc_print(Category, Importance, Format, Args, []). - -tc_log(Category, Importance, Format, Args, _Opts) -> - LogMessage = lists:flatten( - io_lib:format("[ct_logs][~p][~p] ~s", [Category, Importance, Format]) - ), - ?LOG_INFO(LogMessage, Args). - -tc_print(Category, Format, Args) -> - tc_print(Category, 1000, Format, Args). - -tc_print(Category, Importance, Format, Args) -> - tc_print(Category, Importance, Format, Args, []). - -tc_print(_Category, _Importance, Format, Args, _Opts) -> - FormatWithTimesStamp = io_lib:format("[~s] ~s\n", [timestamp(), Format]), - FinalFormat = lists:flatten(FormatWithTimesStamp), - io:format(FinalFormat, Args). - -tc_pal(Category, Format, Args) -> - tc_print(Category, 1000, Format, Args). - -tc_pal(Category, Importance, Format, Args) -> - tc_print(Category, Importance, Format, Args, []). - -tc_pal(Category, Importance, Format, Args, Opts) -> - ct_logs:tc_log(Category, Importance, Format, Args, [no_css | Opts]), - tc_print(Category, Importance, Format, Args, Opts). - -timestamp() -> - calendar:system_time_to_rfc3339(erlang:system_time(second)). diff --git a/prelude/erlang/common_test/test_exec/src/ct_daemon_node.erl b/prelude/erlang/common_test/test_exec/src/ct_daemon_node.erl index d260bc249210e..7a8a627c70a32 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_daemon_node.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_daemon_node.erl @@ -14,10 +14,12 @@ -module(ct_daemon_node). +-compile(warn_missing_spec_all). + -include_lib("kernel/include/logger.hrl"). %% Public API --export([start/0, start/1, stop/0, alive/0, get_node/0]). +-export([start/1, start/2, stop/0, alive/0, get_node/0]). -export([node_main/1, get_domain_type/0]). @@ -37,8 +39,9 @@ -export_type([config/0]). %% @doc start node for running tests in isolated way and keep state --spec start() -> ok. -start() -> +-spec start(ErlCommand) -> ok when + ErlCommand :: nonempty_list(binary()). +start(ErlCommand) -> NodeName = list_to_atom( lists:flatten(io_lib:format("test~s-atn@localhost", [random_name()])) ), @@ -48,11 +51,14 @@ start() -> cookie => ct_runner:cookie(), options => [] }, - start(StartConfig). + start(ErlCommand, StartConfig). %% @doc start node for running tests in isolated way and keep state --spec start(config()) -> ok | {error, {crash_on_startup, integer()}}. +-spec start(ErlCommand, Config) -> ok | {error, {crash_on_startup, integer()}} when + ErlCommand :: nonempty_list(binary()), + Config :: config(). start( + ErlCommand, _Config = #{ type := Type, name := Node, @@ -69,10 +75,9 @@ start( FullOptions = [{output_dir, OutputDir} | Options], Args = build_daemon_args(Type, Node, Cookie, FullOptions, OutputDir), % Replay = maps:get(replay, Config, false), - % We should forward emu flags here, - % see T129435667 Port = ct_runner:start_test_node( - os:find_executable("erl"), + ErlCommand, + [], CodePaths, ConfigFiles, OutputDir, @@ -83,6 +88,7 @@ start( true = erlang:register(?MODULE, self()), port_loop(Port, []). +-spec port_loop(port(), list()) -> ok | {error, {crash_on_startup, integer()}}. port_loop(Port, Acc) -> receive {Port, {data, {eol, Line}}} -> @@ -105,6 +111,7 @@ stop() -> %% monitore node true = erlang:monitor_node(Node, true), %% kill node + %% elp:ignore W0014 _Pid = erlang:spawn(Node, fun() -> erlang:halt() end), %% wait for node to come down receive @@ -125,7 +132,7 @@ alive() -> %% @doc node main entry point -spec node_main([node()]) -> no_return(). -node_main([Parent, OutputDirAtom, InstrumentCTLogs]) -> +node_main([Parent, OutputDirAtom]) -> ok = application:load(test_exec), OutputDir = erlang:atom_to_list(OutputDirAtom), @@ -133,7 +140,7 @@ node_main([Parent, OutputDirAtom, InstrumentCTLogs]) -> erlang:system_flag(backtrace_depth, 20), %% setup logger and prepare IO - ok = ct_daemon_logger:setup(OutputDir, InstrumentCTLogs), + ok = ct_daemon_logger:start(OutputDir), true = net_kernel:connect_node(Parent), @@ -168,7 +175,7 @@ ensure_distribution(Type, RandomName, Cookie) -> ([] = os:cmd("epmd -daemon")), Name = list_to_atom( lists:flatten( - io_lib:format("ct_daemon~s", [RandomName]) + io_lib:format("ct_daemon~s@localhost", [RandomName]) ) ), {ok, _Pid} = net_kernel:start(Name, #{name_domain => Type}), @@ -189,7 +196,6 @@ build_daemon_args(Type, Node, Cookie, Options, OutputDir) -> longnames -> "-name"; shortnames -> "-sname" end, - InstrumentCTLogs = erlang:whereis(ct_logs) =:= undefined, [ DistArg, convert_atom_arg(Node), @@ -202,8 +208,7 @@ build_daemon_args(Type, Node, Cookie, Options, OutputDir) -> convert_atom_arg(?MODULE), "node_main", convert_atom_arg(erlang:node()), - OutputDir, - convert_atom_arg(InstrumentCTLogs) + OutputDir ]. -spec convert_atom_arg(atom()) -> string(). @@ -212,13 +217,8 @@ convert_atom_arg(Arg) -> -spec get_config_files() -> [file:filename_all()]. get_config_files() -> - _ = application:load(test_exec), - PrivDir = code:priv_dir(test_exec), - [ - ConfigFile - || ConfigFile <- filelib:wildcard(filename:join(PrivDir, "*")), - filename:extension(ConfigFile) =:= ".config" - ]. + %% get config files from command line + [F || {config, F} <- init:get_arguments()]. -spec gen_output_dir(RandomName :: string()) -> file:filename(). gen_output_dir(RandomName) -> diff --git a/prelude/erlang/common_test/test_exec/src/ct_daemon_printer.erl b/prelude/erlang/common_test/test_exec/src/ct_daemon_printer.erl index f85b52b3b0d7e..b20b3efbfdf67 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_daemon_printer.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_daemon_printer.erl @@ -32,6 +32,8 @@ print_summary(Total, Passed, FailedOrSkipped) -> ok | fail. print_result(Name, pass_result) -> io:format("~ts ~ts~n", [?CHECK_MARK, Name]); +print_result(Name, {fail, {TestId, {end_per_testcase, E}}}) -> + print_result(Name ++ " [end_per_testcase]", {fail, {TestId, E}}); print_result(Name, {fail, {_TestId, {thrown, {Reason, Stacktrace}}}}) -> print_error(Name, throw, Reason, Stacktrace); print_result(Name, {fail, {_TestId, {Reason, Stacktrace}}}) -> @@ -53,6 +55,10 @@ print_result(Name, {error, {_TestId, {'ct_daemon_core$sentinel_crash', Info}}}) io:format("~ts ~ts~n", [?CROSS_MARK, Name]), io:format("Test process received EXIT signal with reason: ~p~n", [Info]), fail; +print_result(Name, {error, {_TestId, {timetrap, TimeoutValue}}}) -> + io:format("~ts ~ts~n", [?CROSS_MARK, Name]), + io:format("Test timed out after ~p ms~n", [TimeoutValue]), + fail; print_result(Name, Unstructured) -> io:format("~ts ~ts~n", [?CROSS_MARK, Name]), io:format("unable to format failure reason, please report.~n"), @@ -74,7 +80,7 @@ print_skip_error(Name, Where, Type, Reason, Stacktrace) -> skip. print_skip_location({_, GroupOrSuite}) -> - case re:match(atom_to_list(GroupOrSuite), "SUITE$") of + case re:run(atom_to_list(GroupOrSuite), "SUITE$") of nomatch -> io_lib:format("init_per_group of ~s", [GroupOrSuite]); _ -> "init_per_suite" end; diff --git a/prelude/erlang/common_test/test_exec/src/ct_daemon_runner.erl b/prelude/erlang/common_test/test_exec/src/ct_daemon_runner.erl index 529de78d4ae7b..831dbfdb48e6c 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_daemon_runner.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_daemon_runner.erl @@ -95,6 +95,14 @@ handle_call({discover, RegExOrTestId}, _From, State) -> State} end; handle_call({gl, GL}, _From, State) -> + UserReplayPid = spawn(fun Loop() -> + receive + Msg -> GL ! Msg + end, + Loop() + end), + erlang:unregister(user), + erlang:register(user, UserReplayPid), {reply, erlang:group_leader(GL, self()), State}; handle_call(load_changed, _From, State) -> {reply, load_changed_modules(), State}; @@ -105,6 +113,15 @@ handle_call(setup, _From, State) -> handle_call(output_dir, _From, State) -> DaemonOptions = application:get_env(test_exec, daemon_options, []), {reply, proplists:get_value(output_dir, DaemonOptions), State}; +handle_call(priv_dir, _From, State) -> + Response = + case State of + #{setup := #{config := Config}} -> + proplists:get_value(priv_dir, Config); + _ -> + undefined + end, + {reply, Response, State}; handle_call(Request, _From, State) -> {reply, Request, State}. diff --git a/prelude/erlang/common_test/test_exec/src/ct_executor.erl b/prelude/erlang/common_test/test_exec/src/ct_executor.erl index f10db6f1d3d78..e7c8e0f2df290 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_executor.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_executor.erl @@ -11,15 +11,25 @@ %% Notably allows us to call post/pre method on the node if needed, e.g for coverage. -module(ct_executor). - -include_lib("kernel/include/logger.hrl"). -include_lib("common/include/buck_ct_records.hrl"). +-compile(warn_missing_spec_all). -export([run/1]). -% Time we give the beam to close off, in ms. --define(INIT_STOP_TIMEOUT, 5000). +%% `ct_run_arg()` represents an option accepted by ct:run_test/1, such as +%% `multiply_timetraps` or `ct_hooks`. +%% For all the options, see https://www.erlang.org/doc/man/ct#run_test-1 +-type ct_run_arg() :: {atom(), term()}. +-type ct_exec_arg() :: {output_dir | suite | providers, term()}. + +% For testing +-export([split_args/1]). + +-define(STDOUT_MAX_LINES, 1000). +-define(STDOUT_MAX_LINE_LENGTH, 10000). +-spec run([string()]) -> no_return(). run(Args) when is_list(Args) -> ExitCode = try @@ -37,6 +47,9 @@ run(Args) when is_list(Args) -> % Therefore we used io:format to forward information to the % process calling it (ct_runner). try + % We need to load the 'common' application to be able to configure + % it via the `common_app_env` arguments + application:load(common), % We consult all the .app files to load the atoms. % This solution is less than optimal and should be addressed % T120903856 @@ -61,7 +74,9 @@ run(Args) when is_list(Args) -> erlang:system_flag(backtrace_depth, 20), ?LOG_DEBUG("ct_run called with arguments ~p ~n", [CtRunArgs]), Providers1 = [buck_ct_provider:do_pre_running(Provider) || Provider <- Providers0], - {ok, IoBuffer} = io_buffer:start_link(), + {ok, IoBuffer} = io_buffer:start_link(#{ + passthrough => true, max_elements => ?STDOUT_MAX_LINES, max_length => ?STDOUT_MAX_LINE_LENGTH + }), register(cth_tpx_io_buffer, IoBuffer), %% set global timeout Result = ct:run_test(CtRunArgs), @@ -85,23 +100,9 @@ run(Args) when is_list(Args) -> io:format("~ts\n", [erl_error:format_exception(Class1, Reason1, Stack1)]), 1 end, - case ExitCode of - 0 -> - init:stop(0), - receive - after ?INIT_STOP_TIMEOUT -> - ?LOG_ERROR( - io_lib:format("~p failed to terminate within ~c millisecond", [ - ?MODULE, ?INIT_STOP_TIMEOUT - ]) - ), - erlang:halt(0) - end; - _ -> - erlang:halt(ExitCode) - end. + erlang:halt(ExitCode). --spec parse_arguments([string()]) -> {proplists:proplist(), [term()]}. +-spec parse_arguments([string()]) -> {[ct_exec_arg()], [ct_run_arg()]}. parse_arguments(Args) -> % The logger is not set up yet. % This will be sent to the program executing it (ct_runner), @@ -120,14 +121,27 @@ parse_arguments(Args) -> split_args(ParsedArgs). % @doc Splits the argument before those that happens -% before ct_args (the executor args) amd those after -% (the args for ct_run). -split_args(Args) -> split_args(Args, [], []). +% before ct_args (the executor args) and those after +% (the args for ct_run). ct_args will always be +% present in the list +-spec split_args([term()]) -> {[ct_exec_arg()], [ct_run_arg()]}. +split_args(Args) -> + {CtExecutorArgs, [ct_args | CtRunArgs]} = lists:splitwith(fun(Arg) -> Arg =/= ct_args end, Args), + {parse_ct_exec_args(CtExecutorArgs), parse_ct_run_args(CtRunArgs)}. + +-spec parse_ct_run_args([term()]) -> [ct_run_arg()]. +parse_ct_run_args([]) -> + []; +parse_ct_run_args([{Key, _Value} = Arg | Args]) when is_atom(Key) -> + [Arg | parse_ct_run_args(Args)]. -split_args([ct_args | Args], CtExecutorArgs, []) -> {lists:reverse(CtExecutorArgs), Args}; -split_args([Arg | Args], CtExecutorArgs, []) -> split_args(Args, [Arg | CtExecutorArgs], []); -split_args([], CtExecutorArgs, []) -> {lists:reverse(CtExecutorArgs), []}. +-spec parse_ct_exec_args([term()]) -> [ct_exec_arg()]. +parse_ct_exec_args([]) -> + []; +parse_ct_exec_args([{Key, _Value} = Arg | Args]) when Key =:= output_dir; Key =:= suite; Key =:= providers -> + [Arg | parse_ct_exec_args(Args)]. +-spec debug_print(string(), [term()]) -> ok. debug_print(Fmt, Args) -> case os:getenv("ERLANG_BUCK_DEBUG_PRINT") of false -> io:format(Fmt, Args); diff --git a/prelude/erlang/common_test/test_exec/src/ct_runner.erl b/prelude/erlang/common_test/test_exec/src/ct_runner.erl index a56928de6e880..cd401834320e4 100644 --- a/prelude/erlang/common_test/test_exec/src/ct_runner.erl +++ b/prelude/erlang/common_test/test_exec/src/ct_runner.erl @@ -28,8 +28,8 @@ ]). -export([ - start_test_node/5, start_test_node/6, + start_test_node/7, cookie/0, generate_arg_tuple/2, project_root/0 @@ -143,7 +143,9 @@ run_test( suite_path = SuitePath, providers = Providers, suite = Suite, - erl_cmd = ErlCmd + erl_cmd = ErlCmd, + extra_flags = ExtraFlags, + common_app_env = CommonAppEnv } = _TestEnv, PortEpmd ) -> @@ -152,12 +154,13 @@ run_test( SuiteFolder = filename:dirname(filename:absname(SuitePath)), CodePath = [SuiteFolder | Dependencies], - Args = build_run_args(OutputDir, Providers, Suite, TestSpecFile), + Args = build_run_args(OutputDir, Providers, Suite, TestSpecFile, CommonAppEnv), {ok, ProjectRoot} = file:get_cwd(), start_test_node( ErlCmd, + ExtraFlags, CodePath, ConfigFiles, OutputDir, @@ -186,49 +189,79 @@ build_common_args(CodePath, ConfigFiles) -> OutputDir :: file:filename_all(), Providers :: [{module(), [term()]}], Suite :: module(), - TestSpecFile :: file:filename_all() + TestSpecFile :: file:filename_all(), + CommonAppEnv :: #{string() => string()} ) -> [string()]. -build_run_args(OutputDir, Providers, Suite, TestSpecFile) -> - lists:concat([ - ["-run", "ct_executor", "run"], - generate_arg_tuple(output_dir, OutputDir), - generate_arg_tuple(providers, Providers), - generate_arg_tuple(suite, Suite), - ["ct_args"], - generate_arg_tuple(spec, TestSpecFile) - ]). +build_run_args(OutputDir, Providers, Suite, TestSpecFile, CommonAppEnv) -> + lists:append( + [ + ["-run", "ct_executor", "run"], + generate_arg_tuple(output_dir, OutputDir), + generate_arg_tuple(providers, Providers), + generate_arg_tuple(suite, Suite), + ["ct_args"], + generate_arg_tuple(spec, TestSpecFile), + common_app_env_args(CommonAppEnv) + ] + ). + +-spec common_app_env_args(Env :: #{string() => string()}) -> [string()]. +common_app_env_args(Env) -> + lists:append([["-common", Key, Value] || {Key, Value} <- maps:to_list(Env)]). -spec start_test_node( - Erl :: string(), + Erl :: [binary()], + ExtraFlags :: [string()], CodePath :: [file:filename_all()], ConfigFiles :: [file:filename_all()], OutputDir :: file:filename_all(), PortSettings :: port_settings() ) -> port(). -start_test_node(ErlCmd, CodePath, ConfigFiles, OutputDir, PortSettings0) -> - start_test_node(ErlCmd, CodePath, ConfigFiles, OutputDir, PortSettings0, false). +start_test_node( + ErlCmd, + ExtraFlags, + CodePath, + ConfigFiles, + OutputDir, + PortSettings0 +) -> + start_test_node( + ErlCmd, + ExtraFlags, + CodePath, + ConfigFiles, + OutputDir, + PortSettings0, + false + ). -spec start_test_node( - Erl :: string(), + Erl :: [binary()], + ExtraFlags :: [string()], CodePath :: [file:filename_all()], ConfigFiles :: [file:filename_all()], OutputDir :: file:filename_all(), PortSettings :: port_settings(), ReplayIo :: boolean() ) -> port(). -start_test_node(ErlCmd, CodePath, ConfigFiles, OutputDir, PortSettings0, ReplayIo) -> +start_test_node( + ErlCmd, + ExtraFlags, + CodePath, + ConfigFiles, + OutputDir, + PortSettings0, + ReplayIo +) -> % split of args from Erl which can contain emulator flags - [_Executable | ExtraFlags] = string:split(ErlCmd, " ", all), - % we ignore the executable we got, and use the erl command from the - % toolchain that executes this code - ErlExecutable = os:find_executable("erl"), + [ErlExecutable | Flags] = ErlCmd, % HomeDir is the execution directory. HomeDir = set_home_dir(OutputDir), %% merge args, enc, cd settings LaunchArgs = - ExtraFlags ++ + Flags ++ ExtraFlags ++ build_common_args(CodePath, ConfigFiles) ++ proplists:get_value(args, PortSettings0, []), @@ -283,16 +316,48 @@ config_arg(ConfigFiles) -> ["-config"] ++ ConfigFiles. %% Each test execution will have a separate home dir with a %% erlang default cookie file, setting the default cookie to %% buck2-test-runner-cookie --spec set_home_dir(file:filename()) -> file:filename(). +-spec set_home_dir(file:filename_all()) -> file:filename_all(). set_home_dir(OutputDir) -> HomeDir = filename:join(OutputDir, "HOME"), ErlangCookieFile = filename:join(HomeDir, ".erlang.cookie"), ok = filelib:ensure_dir(ErlangCookieFile), ok = file:write_file(ErlangCookieFile, atom_to_list(cookie())), ok = file:change_mode(ErlangCookieFile, 8#00400), + + % In case the system is using dotslash, we leave a symlink to + % the real dotslash cache, otherwise erl could be re-downloaded, etc + try_setup_dotslash_cache(HomeDir), + HomeDir. --spec cookie() -> string(). +-spec try_setup_dotslash_cache(FakeHomeDir :: file:filename_all()) -> ok. +try_setup_dotslash_cache(FakeHomeDir) -> + case init:get_argument(home) of + {ok, [[RealHomeDir]]} -> + RealDotslashCacheDir = filename:basedir(user_cache, "dotslash"), + + case filelib:is_file(RealDotslashCacheDir) of + false -> + ok; + true -> + RealHomeDirParts = filename:split(RealHomeDir), + RealDotslashCacheDirParts = filename:split(RealDotslashCacheDir), + + case lists:split(length(RealHomeDirParts), RealDotslashCacheDirParts) of + {RealHomeDirParts, GenDotslashCacheDirParts} -> + FakeHomeDotslashCacheDir = filename:join([FakeHomeDir | GenDotslashCacheDirParts]), + ok = filelib:ensure_path(filename:dirname(FakeHomeDotslashCacheDir)), + ok = file:make_symlink(RealDotslashCacheDir, FakeHomeDotslashCacheDir), + ok; + _ -> + ok + end + end; + _ -> + ok + end. + +-spec cookie() -> atom(). cookie() -> 'buck2-test-runner-cookie'. diff --git a/prelude/erlang/common_test/test_exec/test/ct_executor_SUITE.erl b/prelude/erlang/common_test/test_exec/test/ct_executor_SUITE.erl new file mode 100644 index 0000000000000..bcf4d0b86600e --- /dev/null +++ b/prelude/erlang/common_test/test_exec/test/ct_executor_SUITE.erl @@ -0,0 +1,41 @@ +%% Copyright (c) Meta Platforms, Inc. and affiliates. +%% This source code is licensed under both the MIT license found in the +%% LICENSE-MIT file in the root directory of this source tree and the Apache +%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory +%% of this source tree. +%%% % @format +-module(ct_executor_SUITE). + +-include_lib("stdlib/include/assert.hrl"). + +-export([all/0]). + +-export([ + test_split_args/1 +]). + +all() -> + [test_split_args]. + +test_split_args(_Config) -> + ?assertEqual( + {[{output_dir, ""}, {providers, [something]}, {suite, a_suite}], [{dir, ""}, {suite, a_suite}, {group, a_group}]}, + ct_executor:split_args([ + {output_dir, ""}, + {providers, [something]}, + {suite, a_suite}, + ct_args, + {dir, ""}, + {suite, a_suite}, + {group, a_group} + ]) + ), + ?assertEqual( + {[{output_dir, ""}, {providers, [something]}, {suite, a_suite}], []}, + ct_executor:split_args([{output_dir, ""}, {providers, [something]}, {suite, a_suite}, ct_args]) + ), + ?assertEqual( + {[], [{dir, ""}, {suite, a_suite}, {group, a_group}]}, + ct_executor:split_args([ct_args, {dir, ""}, {suite, a_suite}, {group, a_group}]) + ), + ?assertEqual({[], []}, ct_executor:split_args([ct_args])). diff --git a/prelude/erlang/elp.bxl b/prelude/erlang/elp.bxl new file mode 100644 index 0000000000000..c82610f4df9b2 --- /dev/null +++ b/prelude/erlang/elp.bxl @@ -0,0 +1,150 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Provide information so that ELP is able to load a BUCK project + +load("@prelude//:paths.bzl", "paths") + +# ------------------ IMPL ------------------ + +def _clean_up_includes(includes): + # - Strip to just dir, not file name + # - Remove duplicates + # Note: Sometimes the buck rule generating the includes has an excludes glob for a directory. + # This flattening will potentially expose excluded files in the directory. + # But we do it, because otherwise the performance in erlang_service parsing is too poor. + include_paths = [_as_path(paths.dirname(p["value"])) for p in includes if p["type"] == "path"] + targets = [t for t in includes if t["type"] == "target"] + return include_paths + targets + +def _get_includes(ctx, includes_target): + return _clean_up_includes([_get_absolute_path(ctx, inc) for inc in includes_target.value()]) + +def _dep_includes(ctx, dep, target_universe): + lookup_val = "{}:{}".format(dep.path, dep.name) + dep_target = target_universe.lookup(lookup_val) + if not dep_target: + return [] + dep_target = dep_target[0] + includes = dep_target.attrs_lazy().get("includes") + + if not includes: + return [] + else: + return _get_includes(ctx, includes) + +def _get_absolute_path(ctx, src) -> dict[str, str]: + """ + Get the absolute path of the thing passed in, which is either an artifact or a target label. + """ + if isinstance(src, ConfiguredProvidersLabel): + return _as_target(str(src.raw_target())) + else: + return _as_path(get_path_without_materialization(src, ctx, abs = True)) + +def _elp_config(ctx): + cells = { + cell: cell_path + for cell, cell_path in ctx.audit().cell(aliases = True).items() + } + + included_targets = ctx.cli_args.included_targets + target_universe = ctx.target_universe(included_targets) + + all = ctx.cquery().kind("^(erlang_app|erlang_test)$", ctx.configured_targets(included_targets)) + if ctx.cli_args.deps_target: + all += ctx.cquery().kind("^erlang_app$", ctx.configured_targets(ctx.cli_args.deps_target)) + result = {} + dep_includes_cache = {} # Cache of includes per dependency + for target in all: + label = target.label + label_name = label.raw_target() + deps = target.attrs_lazy().get("deps") + + includes = target.attrs_lazy().get("includes") + if not includes: + includes = [] + else: + includes = _get_includes(ctx, includes) + + if ctx.cli_args.deps_includes: + if deps: + for dep in deps.value(): + if dep in dep_includes_cache: + dep_includes = dep_includes_cache[dep] + else: + dep_includes = _dep_includes(ctx, dep, target_universe) + dep_includes_cache[dep] = dep_includes + includes = includes + dep_includes + apps = target.attrs_lazy().get("applications") + + if apps: + for app in apps.value(): + includes.append(_as_path(str(_file_node_to_path(cells, app.path)))) + + srcs = target.attrs_lazy().get("srcs") + if not srcs: + srcs = [] + else: + srcs = [_get_absolute_path(ctx, src) for src in srcs.value()] + + suite = target.attrs_lazy().get("suite") + if not suite: + suite = None + elif suite.value() == None: + suite = None + else: + suite_info = _get_absolute_path(ctx, suite.value()) + if suite_info["type"] == "path": + suite = suite_info["value"] + else: + suite = None + + includes = _build_output(includes) + srcs = _build_output(srcs) + result[label_name] = dict( + name = target.attrs_lazy().get("name"), + suite = suite, + srcs = srcs, + includes = includes, + labels = target.attrs_lazy().get("labels"), + ) + ctx.output.print_json(result) + +def _file_node_to_path(cells: dict[str, str], file_node) -> str: + cell, path = str(file_node).split("//", 1) + return paths.join(cells[cell], path) + +def _build_output(items: list[dict[str, str]]) -> list[str]: + # we completely ignore targets, since we don't have support for generated files in ELP + paths = _list_dedupe([p["value"] for p in items if p["type"] == "path"]) + return paths + +def _list_dedupe(xs: list[str]) -> list[str]: + return {x: True for x in xs}.keys() + +def _as_path(src): + return {"type": "path", "value": src} + +def _as_target(src): + return {"type": "target", "value": src} + +# ------------------ INTERFACE ------------------ + +elp_config = bxl_main( + impl = _elp_config, + cli_args = { + "deps_includes": cli_args.bool(False, doc = "Use include paths from the dependencies too."), + "deps_target": cli_args.option(cli_args.string(), doc = "Target to include deps from, if specified. See corresponding field in .elp.toml"), + "included_targets": cli_args.list(cli_args.string(), doc = "Targets to include in the query. See corresponding field in .elp.toml"), + }, +) + +# Run with `buck2 bxl prelude//erlang/elp.bxl:elp_config` +# e.g. +# buck2 bxl prelude//erlang/elp.bxl:elp_config -- --included_targets cell//... +# buck2 bxl prelude//erlang/elp.bxl:elp_config -- --included_targets cell//... --deps_includes true diff --git a/prelude/erlang/erlang.bzl b/prelude/erlang/erlang.bzl index 3ef89fdb18223..01d0c5b123bef 100644 --- a/prelude/erlang/erlang.bzl +++ b/prelude/erlang/erlang.bzl @@ -89,8 +89,7 @@ def erlang_tests( srcs: list[str] = [], property_tests: list[str] = [], config_files: list[str] = [], - use_default_configs: bool = True, - use_default_deps: bool = True, + common_app_env: dict[str, str] = {}, **common_attributes): """ Generate multiple erlang_test targets based on the `suites` field. @@ -104,7 +103,6 @@ def erlang_tests( srcs = srcs, property_tests = property_tests, config_files = config_files, - use_default_configs = use_default_configs, - use_default_deps = use_default_deps, + common_app_env = common_app_env, **common_attributes ) diff --git a/prelude/erlang/erlang_application.bzl b/prelude/erlang/erlang_application.bzl index e9d3a134bf837..29fac0d865f91 100644 --- a/prelude/erlang/erlang_application.bzl +++ b/prelude/erlang/erlang_application.bzl @@ -33,12 +33,8 @@ load( ":erlang_utils.bzl", "action_identifier", "build_paths", - "convert", "multidict_projection", "multidict_projection_key", - "normalise_metadata", - "str_to_bool", - "to_term_args", ) StartDependencySet = transitive_set() @@ -108,7 +104,11 @@ def build_application(ctx, toolchains, dependencies, build_fun) -> list[Provider # generate DefaultInfo and RunInfo providers default_info = _build_default_info(dependencies, primary_app_folder) - run_info = erlang_shell.build_run_info(ctx, dependencies.values(), additional_app_paths = [primary_app_folder]) + run_info = erlang_shell.build_run_info( + ctx, + dependencies = dependencies.values(), + additional_app_paths = [primary_app_folder], + ) return [ default_info, run_info, @@ -161,6 +161,14 @@ def _build_erlang_application(ctx: AnalysisContext, toolchain: Toolchain, depend is_private = True, ) + # maybe peek private includes + build_environment = erlang_build.utils.peek_private_includes( + ctx, + toolchain, + build_environment, + dependencies, + ) + # beams build_environment = erlang_build.build_steps.generate_beam_artifacts( ctx, @@ -233,8 +241,6 @@ def _generate_app_file( NOTE: We are using the .erl files as input to avoid dependencies on beams. """ - tools = toolchain.otp_binaries - _check_application_dependencies(ctx) app_file_name = build_paths.app_file(ctx) @@ -244,22 +250,15 @@ def _generate_app_file( app_file_name, ), ) - script = toolchain.app_file_script app_info_file = _app_info_content(ctx, toolchain, name, srcs, output) app_build_cmd = cmd_args( - [ - tools.escript, - script, - app_info_file, - ], + app_info_file, + hidden = [output.as_output(), srcs] + ([ctx.attrs.app_src] if ctx.attrs.app_src else []), ) - app_build_cmd.hidden(output.as_output()) - app_build_cmd.hidden(srcs) - if ctx.attrs.app_src: - app_build_cmd.hidden(ctx.attrs.app_src) - erlang_build.utils.run_with_env( + erlang_build.utils.run_escript( ctx, toolchain, + toolchain.app_file_script, app_build_cmd, category = "app_resource", identifier = action_identifier(toolchain, paths.basename(app_file_name)), @@ -290,9 +289,7 @@ def _app_info_content( name: str, srcs: list[Artifact], output: Artifact) -> Artifact: - """build an app_info.term file that contains the meta information for building the .app file""" - sources_args = convert(srcs) - sources_args.ignore_artifacts() + """build an app_info.json file that contains the meta information for building the .app file""" data = { "applications": [ app[ErlangAppInfo].name @@ -304,7 +301,7 @@ def _app_info_content( ], "name": name, "output": output, - "sources": sources_args, + "sources": srcs, } if ctx.attrs.version: data["version"] = ctx.attrs.version @@ -313,14 +310,13 @@ def _app_info_content( if ctx.attrs.mod: data["mod"] = ctx.attrs.mod if ctx.attrs.env: - data["env"] = {k: cmd_args(v) for k, v in ctx.attrs.env.items()} + data["env"] = ctx.attrs.env if ctx.attrs.extra_properties: - data["metadata"] = {k: normalise_metadata(v) for k, v in ctx.attrs.extra_properties.items()} + data["metadata"] = ctx.attrs.extra_properties - app_info_content = to_term_args(data) - return ctx.actions.write( - paths.join(erlang_build.utils.build_dir(toolchain), "app_info.term"), - app_info_content, + return ctx.actions.write_json( + paths.join(erlang_build.utils.build_dir(toolchain), "app_info.json"), + data, ) def link_output( @@ -366,7 +362,7 @@ def link_output( def _link_srcs_folder(ctx: AnalysisContext) -> dict[str, Artifact]: """Build mapping for the src folder if erlang.include_src is set""" - if not str_to_bool(read_root_config("erlang", "include_src", "False")): + if not ctx.attrs.include_src: return {} srcs = { paths.join("src", src_file.basename): src_file diff --git a/prelude/erlang/erlang_build.bzl b/prelude/erlang/erlang_build.bzl index 4015619a38700..0afd520cdaf74 100644 --- a/prelude/erlang/erlang_build.bzl +++ b/prelude/erlang/erlang_build.bzl @@ -74,12 +74,12 @@ def _prepare_build_environment( input_mapping = {} # for duplication detection - apps = {} + apps = set() for name, dep in dependencies.items(): if name in apps: fail("duplicated application name found %s" % (name,)) - apps[name] = True + apps.add(name) if ErlangAppInfo in dep: dep_info = dep[ErlangAppInfo] @@ -169,17 +169,19 @@ def _generate_input_mapping(build_environment: BuildEnvironment, input_artifacts def _generated_source_artifacts(ctx: AnalysisContext, toolchain: Toolchain, name: str) -> PathArtifactMapping: """Generate source output artifacts and build actions for generated erl files.""" - inputs = [src for src in ctx.attrs.srcs if _is_xyrl(src)] - outputs = { - module_name(src): _build_xyrl( + + def build(src, custom_include_opt): + return _build_xyrl( ctx, toolchain, src, + custom_include_opt, ctx.actions.declare_output(generated_erl_path(toolchain, name, src)), ) - for src in inputs - } - return outputs + + yrl_outputs = {module_name(src): build(src, "yrl_includefile") for src in ctx.attrs.srcs if _is_yrl(src)} + xrl_outputs = {module_name(src): build(src, "xrl_includefile") for src in ctx.attrs.srcs if _is_xrl(src)} + return yrl_outputs | xrl_outputs def _generate_include_artifacts( ctx: AnalysisContext, @@ -264,8 +266,6 @@ def _generate_beam_artifacts( for src in src_artifacts } - _check_beam_uniqueness(beam_mapping, build_environment.beams) - # dep files beam_deps = _get_deps_files(ctx, toolchain, anchor, src_artifacts, output_mapping) @@ -287,18 +287,25 @@ def _generate_beam_artifacts( input_mapping = build_environment.input_mapping, ) + dep_info_content = _build_dep_info_data(updated_build_environment) + dep_info_file = ctx.actions.write_json(_dep_info_name(toolchain), dep_info_content) + for erl in src_artifacts: - _build_erl(ctx, toolchain, updated_build_environment, erl, beam_mapping[module_name(erl)]) + _build_erl(ctx, toolchain, updated_build_environment, dep_info_file, erl, beam_mapping[module_name(erl)]) return updated_build_environment -def _check_beam_uniqueness( - local_beams: ModuleArtifactMapping, - global_beams: ModuleArtifactMapping) -> None: - for module in local_beams: - if module in global_beams: - fail("duplicated modules found in build: {}".format([module])) - return None +def _build_dep_info_data(build_environment: BuildEnvironment) -> dict[str, dict[str, Artifact | str]]: + """build input for dependency finalizer, this implements uniqueness checks for headers and beams""" + seen = {} + data = {} + for artifact, dep_file in build_environment.deps_files.items(): + if paths.basename(artifact) in seen: + fail("conflicting artifacts found in build: {} and {}".format(seen[paths.basename(artifact)], artifact)) + else: + seen[paths.basename(artifact)] = artifact + data[paths.basename(artifact)] = {"dep_file": dep_file, "path": artifact} + return data def _generate_chunk_artifacts( ctx: AnalysisContext, @@ -331,7 +338,7 @@ def _generate_chunk_artifacts( input_mapping = build_environment.input_mapping, ) - preprocess_modules = read_root_config("erlang", "edoc_preprocess", "").split() + preprocess_modules = toolchain.edoc_preprocess preprocess_all = "__all__" in preprocess_modules for erl in src_artifacts: @@ -371,11 +378,24 @@ def _deps_key(anchor: Artifact, src: Artifact) -> str: def _get_deps_file(ctx: AnalysisContext, toolchain: Toolchain, src: Artifact) -> Artifact: dependency_analyzer = toolchain.dependency_analyzer dependency_json = ctx.actions.declare_output(_dep_file_name(toolchain, src)) - escript = toolchain.otp_binaries.escript + erl = toolchain.otp_binaries.erl dependency_analyzer_cmd = cmd_args( [ - escript, + erl, + "+A0", + "+S1:1", + "+sbtu", + "-mode", + "minimal", + "-noinput", + "-noshell", + "-pa", + toolchain.utility_modules, + "-run", + "escript", + "start", + "--", dependency_analyzer, src, dependency_json.as_output(), @@ -402,21 +422,19 @@ def _build_xyrl( ctx: AnalysisContext, toolchain: Toolchain, xyrl: Artifact, + custom_include_opt: str, output: Artifact) -> Artifact: """Generate an erl file out of an xrl or yrl input file.""" erlc = toolchain.otp_binaries.erlc - erlc_cmd = cmd_args( - [ - erlc, - "-o", - cmd_args(output.as_output()).parent(), - xyrl, - ], - ) + custom_include = getattr(ctx.attrs, custom_include_opt, None) + cmd = cmd_args(erlc) + if custom_include: + cmd.add("-I", custom_include) + cmd.add("-o", cmd_args(output.as_output(), parent = 1), xyrl) _run_with_env( ctx, toolchain, - erlc_cmd, + cmd, category = "erlc", identifier = action_identifier(toolchain, xyrl.basename), ) @@ -426,6 +444,7 @@ def _build_erl( ctx: AnalysisContext, toolchain: Toolchain, build_environment: BuildEnvironment, + dep_info_file: Artifact, src: Artifact, output: Artifact) -> None: """Compile erl files into beams.""" @@ -433,6 +452,22 @@ def _build_erl( trampoline = toolchain.erlc_trampoline erlc = toolchain.otp_binaries.erlc + final_dep_file = ctx.actions.declare_output(_dep_final_name(toolchain, src)) + finalize_deps_cmd = cmd_args( + src, + dep_info_file, + final_dep_file.as_output(), + hidden = build_environment.deps_files.values(), + ) + _run_escript( + ctx, + toolchain, + toolchain.dependency_finalizer, + finalize_deps_cmd, + category = "dependency_finalizer", + identifier = action_identifier(toolchain, src.basename), + ) + def dynamic_lambda(ctx: AnalysisContext, artifacts, outputs): erl_opts = _get_erl_opts(ctx, toolchain, src) erlc_cmd = cmd_args( @@ -445,12 +480,14 @@ def _build_erl( _dependency_code_paths(build_environment), ), "-o", - cmd_args(outputs[output].as_output()).parent(), + cmd_args(outputs[output].as_output(), parent = 1), src, ], ) - erlc_cmd, mapping = _add_dependencies_to_args(ctx, artifacts, [outputs[output].short_path], {}, {}, erlc_cmd, build_environment) - erlc_cmd = _add_full_dependencies(erlc_cmd, build_environment) + deps_args, mapping = _dependencies_to_args(artifacts, final_dep_file, build_environment) + erlc_cmd.add(deps_args) + full_deps_args = _full_dependencies(build_environment) + erlc_cmd.add(full_deps_args) _run_with_env( ctx, toolchain, @@ -461,7 +498,7 @@ def _build_erl( always_print_stderr = True, ) - ctx.actions.dynamic_output(dynamic = build_environment.deps_files.values(), inputs = [src], outputs = [output], f = dynamic_lambda) + ctx.actions.dynamic_output(dynamic = [final_dep_file], inputs = [src], outputs = [output.as_output()], f = dynamic_lambda) return None def _build_edoc( @@ -485,7 +522,7 @@ def _build_edoc( "-pa", toolchain.utility_modules, "-o", - cmd_args(output.as_output()).parent(2), + cmd_args(output.as_output(), parent = 2), ], ) @@ -495,11 +532,14 @@ def _build_edoc( args = _erlc_dependency_args(_dependency_include_dirs(build_environment), [], False) eval_cmd.add(args) + eval_cmd_hidden = [] for include in build_environment.includes.values(): - eval_cmd.hidden(include) + eval_cmd_hidden.append(include) for include in build_environment.private_includes.values(): - eval_cmd.hidden(include) + eval_cmd_hidden.append(include) + + eval_cmd.add(cmd_args(hidden = eval_cmd_hidden)) _run_with_env( ctx, @@ -511,104 +551,90 @@ def _build_edoc( ) return None -def _add_dependencies_to_args( - ctx: AnalysisContext, +def _dependencies_to_args( artifacts, - queue: list[str], - done: dict[str, bool], - input_mapping: dict[str, (bool, [str, Artifact])], - args: cmd_args, + final_dep_file: Artifact, build_environment: BuildEnvironment) -> (cmd_args, dict[str, (bool, [str, Artifact])]): """Add the transitive closure of all per-file Erlang dependencies as specified in the deps files to the `args` with .hidden. - - This function traverses the deps specified in the deps files and adds all discovered dependencies. """ - if not queue: - return args, input_mapping - - next_round = [] + args_hidden = [] - for key in queue: - if key not in build_environment.deps_files: - continue - deps = artifacts[build_environment.deps_files[key]].read_json() - - # silently ignore not found dependencies and let erlc report the not found stuff - for dep in deps: - file = dep["file"] - if dep["type"] == "include_lib": - app = dep["app"] - if (app, file) in build_environment.includes: - artifact = build_environment.includes[(app, file)] - input_mapping[file] = (True, build_environment.input_mapping[artifact.basename]) - else: - # the file might come from OTP - input_mapping[file] = (False, paths.join(app, "include", file)) - continue + input_mapping = {} + deps = artifacts[final_dep_file].read_json() + + # silently ignore not found dependencies and let erlc report the not found stuff + for dep in deps: + artifact = None + file = dep["file"] + if dep["type"] == "include_lib": + app = dep["app"] + if (app, file) in build_environment.includes: + artifact = build_environment.includes[(app, file)] + input_mapping[file] = (True, build_environment.input_mapping[artifact.basename]) + else: + # the file might come from OTP + input_mapping[file] = (False, paths.join(app, "include", file)) + continue - elif dep["type"] == "include": - # these includes can either reside in the private includes - # or the public ones - if file in build_environment.private_includes: - artifact = build_environment.private_includes[file] + elif dep["type"] == "include": + # these includes can either reside in the private includes + # or the public ones + if file in build_environment.private_includes: + artifact = build_environment.private_includes[file] - if artifact.basename in build_environment.input_mapping: - input_mapping[file] = (True, build_environment.input_mapping[artifact.basename]) - else: - # at this point we don't know the application the include is coming - # from, and have to check all public include directories - candidates = [key for key in build_environment.includes.keys() if key[1] == file] - if len(candidates) > 1: - offending_apps = [app for (app, _) in candidates] - fail("-include(\"%s\") is ambiguous as the following applications declare public includes with the same name: %s" % (file, offending_apps)) - elif candidates: - artifact = build_environment.includes[candidates[0]] - input_mapping[file] = (True, build_environment.input_mapping[artifact.basename]) - else: - # we didn't find the include, build will fail during compile - continue - - elif (dep["type"] == "behaviour" or - dep["type"] == "parse_transform" or - dep["type"] == "manual_dependency"): - module, _ = paths.split_extension(file) - if module in build_environment.beams: - artifact = build_environment.beams[module] + if artifact.basename in build_environment.input_mapping: + input_mapping[file] = (True, build_environment.input_mapping[artifact.basename]) + else: + # at this point we don't know the application the include is coming + # from, and have to check all public include directories + candidates = [key for key in build_environment.includes.keys() if key[1] == file] + if len(candidates) > 1: + offending_apps = [app for (app, _) in candidates] + fail("-include(\"%s\") is ambiguous as the following applications declare public includes with the same name: %s" % (file, offending_apps)) + elif candidates: + artifact = build_environment.includes[candidates[0]] + input_mapping[file] = (True, build_environment.input_mapping[artifact.basename]) else: + # we didn't find the include, build will fail during compile continue + elif (dep["type"] == "behaviour" or + dep["type"] == "parse_transform" or + dep["type"] == "manual_dependency"): + module, _ = paths.split_extension(file) + if module in build_environment.beams: + artifact = build_environment.beams[module] else: - fail("unrecognized dependency type %s", (dep["type"])) + continue - next_key = artifact.short_path - if next_key not in done: - done[next_key] = True - next_round.append(next_key) - args.hidden(artifact) + else: + fail("unrecognized dependency type %s", (dep["type"])) + + args_hidden.append(artifact) - # STARLARK does not have unbound loops (while loops) and we use recursion instead. - return _add_dependencies_to_args(ctx, artifacts, next_round, done, input_mapping, args, build_environment) + return cmd_args(hidden = args_hidden), input_mapping -def _add_full_dependencies(erlc_cmd: cmd_args, build_environment: BuildEnvironment) -> cmd_args: +def _full_dependencies(build_environment: BuildEnvironment) -> cmd_args: + erlc_cmd_hidden = [] for artifact in build_environment.full_dependencies: - erlc_cmd.hidden(artifact) - return erlc_cmd + erlc_cmd_hidden.append(artifact) + return cmd_args(hidden = erlc_cmd_hidden) def _dependency_include_dirs(build_environment: BuildEnvironment) -> list[cmd_args]: includes = [ - cmd_args(include_dir_anchor).parent() + cmd_args(include_dir_anchor, parent = 1) for include_dir_anchor in build_environment.private_include_dir ] for include_dir_anchor in build_environment.include_dirs.values(): - includes.append(cmd_args(include_dir_anchor).parent(3)) - includes.append(cmd_args(include_dir_anchor).parent()) + includes.append(cmd_args(include_dir_anchor, parent = 3)) + includes.append(cmd_args(include_dir_anchor, parent = 1)) return includes def _dependency_code_paths(build_environment: BuildEnvironment) -> list[cmd_args]: return [ - cmd_args(ebin_dir_anchor).parent() + cmd_args(ebin_dir_anchor, parent = 1) for ebin_dir_anchor in build_environment.ebin_dirs.values() ] @@ -621,7 +647,7 @@ def _erlc_dependency_args( # A: the whole string would get passed as a single argument, as if it was quoted in CLI e.g. '-I include_path' # ...which the escript cannot parse, as it expects two separate arguments, e.g. '-I' 'include_path' - args = cmd_args([]) + args = cmd_args([], ignore_artifacts = True) # build -I options if path_in_arg: @@ -641,8 +667,6 @@ def _erlc_dependency_args( args.add("-pa") args.add(code_path) - args.ignore_artifacts() - return args def _get_erl_opts( @@ -678,9 +702,9 @@ def _get_erl_opts( for parse_transform, (beam, resource_folder) in parse_transforms.items(): args.add( "+{parse_transform, %s}" % (parse_transform,), - cmd_args(beam, format = "-pa{}").parent(), + cmd_args(beam, format = "-pa{}", parent = 1), ) - args.hidden(resource_folder) + args.add(cmd_args(hidden = resource_folder)) # add relevant compile_info manually args.add(cmd_args( @@ -730,9 +754,13 @@ def _is_erl(in_file: Artifact) -> bool: """ Returns True if the artifact is an erl file """ return _is_ext(in_file, [".erl"]) -def _is_xyrl(in_file: Artifact) -> bool: - """ Returns True if the artifact is a xrl or yrl file """ - return _is_ext(in_file, [".yrl", ".xrl"]) +def _is_yrl(in_file: Artifact) -> bool: + """ Returns True if the artifact is a yrl file """ + return _is_ext(in_file, [".yrl"]) + +def _is_xrl(in_file: Artifact) -> bool: + """ Returns True if the artifact is a xrl file """ + return _is_ext(in_file, [".xrl"]) def _is_ext(in_file: Artifact, extensions: list[str]) -> bool: """ Returns True if the artifact has an extension listed in extensions """ @@ -746,6 +774,20 @@ def _dep_file_name(toolchain: Toolchain, src: Artifact) -> str: src.short_path + ".dep", ) +def _dep_final_name(toolchain: Toolchain, src: Artifact) -> str: + return paths.join( + _build_dir(toolchain), + "__dep_files", + src.short_path + ".final.dep", + ) + +def _dep_info_name(toolchain: Toolchain) -> str: + return paths.join( + _build_dir(toolchain), + "__dep_files", + "app.info.dep", + ) + def _merge(a: dict, b: dict) -> dict: """ sefely merge two dict """ r = dict(a) @@ -785,6 +827,65 @@ def _run_with_env(ctx: AnalysisContext, toolchain: Toolchain, *args, **kwargs): kwargs["env"] = env ctx.actions.run(*args, **kwargs) +def _run_escript(ctx: AnalysisContext, toolchain: Toolchain, script: Artifact, args: cmd_args, **kwargs) -> None: + """ run escript with env and providing toolchain-configured utility modules""" + cmd = cmd_args([ + toolchain.otp_binaries.erl, + "+A0", + "+S1:1", + "+sbtu", + "-mode", + "minimal", + "-noinput", + "-noshell", + "-pa", + toolchain.utility_modules, + "-run", + "escript", + "start", + "--", + script, + args, + ]) + _run_with_env(ctx, toolchain, cmd, **kwargs) + +def _peek_private_includes( + ctx: AnalysisContext, + toolchain: Toolchain, + build_environment: BuildEnvironment, + dependencies: ErlAppDependencies, + force_peek: bool = False) -> BuildEnvironment: + # get mutable dict for private includes + new_private_includes = dict(build_environment.private_includes) + new_private_include_dir = list(build_environment.private_include_dir) + + # get private deps from dependencies + for dep in dependencies.values(): + if ErlangAppInfo in dep: + if dep[ErlangAppInfo].private_include_dir: + new_private_include_dir = new_private_include_dir + dep[ErlangAppInfo].private_include_dir[toolchain.name] + new_private_includes.update(dep[ErlangAppInfo].private_includes[toolchain.name]) + if force_peek or ctx.attrs.peek_private_includes: + return BuildEnvironment( + private_includes = new_private_includes, + private_include_dir = new_private_include_dir, + # copied fields + includes = build_environment.includes, + beams = build_environment.beams, + priv_dirs = build_environment.priv_dirs, + include_dirs = build_environment.include_dirs, + ebin_dirs = build_environment.ebin_dirs, + deps_files = build_environment.deps_files, + app_files = build_environment.app_files, + full_dependencies = build_environment.full_dependencies, + app_includes = build_environment.app_includes, + app_beams = build_environment.app_beams, + app_chunks = build_environment.app_chunks, + input_mapping = build_environment.input_mapping, + ) + else: + return build_environment + # export erlang_build = struct( @@ -799,11 +900,14 @@ erlang_build = struct( utils = struct( is_hrl = _is_hrl, is_erl = _is_erl, - is_xyrl = _is_xyrl, + is_yrl = _is_yrl, + is_xrl = _is_xrl, module_name = module_name, private_include_name = private_include_name, make_dir_anchor = _make_dir_anchor, build_dir = _build_dir, run_with_env = _run_with_env, + run_escript = _run_escript, + peek_private_includes = _peek_private_includes, ), ) diff --git a/prelude/erlang/erlang_escript.bzl b/prelude/erlang/erlang_escript.bzl index ef3cf834d880a..45603f4010000 100644 --- a/prelude/erlang/erlang_escript.bzl +++ b/prelude/erlang/erlang_escript.bzl @@ -7,8 +7,9 @@ load("@prelude//:paths.bzl", "paths") load(":erlang_build.bzl", "erlang_build") -load(":erlang_dependencies.bzl", "check_dependencies", "flatten_dependencies") +load(":erlang_dependencies.bzl", "ErlAppDependencies", "check_dependencies", "flatten_dependencies") load(":erlang_info.bzl", "ErlangAppInfo") +load(":erlang_release.bzl", "build_lib_dir") load( ":erlang_toolchain.bzl", "Toolchain", # @unused Used as type @@ -17,43 +18,60 @@ load( ) load(":erlang_utils.bzl", "action_identifier", "to_term_args") -def create_escript( - ctx: AnalysisContext, - spec_file: Artifact, - toolchain: Toolchain, - files: list[Artifact], - output: Artifact, - escript_name: str) -> None: - """ build the escript with the escript builder tool - """ - script = toolchain.escript_builder - - escript_build_cmd = cmd_args( - [ - toolchain.otp_binaries.escript, - script, - spec_file, - ], - ) - escript_build_cmd.hidden(output.as_output()) - escript_build_cmd.hidden(files) - erlang_build.utils.run_with_env( - ctx, - toolchain, - escript_build_cmd, - category = "escript", - identifier = action_identifier(toolchain, escript_name), - ) - return None - def erlang_escript_impl(ctx: AnalysisContext) -> list[Provider]: # select the correct tools from the toolchain - toolchain_name = get_primary(ctx) toolchain = select_toolchains(ctx)[get_primary(ctx)] # collect all dependencies dependencies = flatten_dependencies(ctx, check_dependencies(ctx.attrs.deps, [ErlangAppInfo])) + if ctx.attrs.bundled: + return _bundled_escript_impl(ctx, dependencies, toolchain) + else: + return _unbundled_escript_impl(ctx, dependencies, toolchain) + +def _unbundled_escript_impl(ctx: AnalysisContext, dependencies: ErlAppDependencies, toolchain: Toolchain) -> list[Provider]: + if ctx.attrs.resources: + fail("resources are not supported with unbundled escripts, add them to an applications priv/ directory instead") + + escript_name = _escript_name(ctx) + + lib_dir = build_lib_dir( + ctx, + toolchain, + escript_name, + dependencies, + ) + + config_files = _escript_config_files(ctx) + escript_trampoline = build_escript_unbundled_trampoline(ctx, toolchain, config_files) + + trampoline = { + "run.escript": escript_trampoline, + } + + all_outputs = {} + for outputs in [lib_dir, trampoline]: + all_outputs.update(outputs) + + for config_file in config_files: + all_outputs[config_file.short_path] = config_file + + output = ctx.actions.symlinked_dir( + escript_name, + all_outputs, + ) + + cmd = cmd_args([ + toolchain.escript_trampoline, + output, + toolchain.otp_binaries.escript, + ]) + + return [DefaultInfo(default_output = output), RunInfo(cmd)] + +def _bundled_escript_impl(ctx: AnalysisContext, dependencies: ErlAppDependencies, toolchain: Toolchain) -> list[Provider]: + toolchain_name = get_primary(ctx) artifacts = {} for dep in dependencies.values(): @@ -81,15 +99,19 @@ def erlang_escript_impl(ctx: AnalysisContext) -> list[Provider]: fail("multiple artifacts defined for path %s", (artifact.short_path)) artifacts[artifact.short_path] = artifact - if ctx.attrs.script_name: - escript_name = ctx.attrs.script_name - else: - escript_name = ctx.attrs.name + ".escript" + escript_name = _escript_name(ctx) output = ctx.actions.declare_output(escript_name) args = ctx.attrs.emu_args - if ctx.attrs.main_module: - args += ["-escript", "main", ctx.attrs.main_module] + + config_files = _escript_config_files(ctx) + for config_file in config_files: + artifacts[config_file.short_path] = config_file + + escript_trampoline = build_escript_bundled_trampoline(ctx, toolchain, config_files) + artifacts[escript_trampoline.basename] = escript_trampoline + + args += ["-escript", "main", "erlang_escript_trampoline"] escript_build_spec = { "artifacts": artifacts, @@ -116,8 +138,136 @@ def erlang_escript_impl(ctx: AnalysisContext) -> list[Provider]: RunInfo(escript_cmd), ] +def create_escript( + ctx: AnalysisContext, + spec_file: Artifact, + toolchain: Toolchain, + files: list[Artifact], + output: Artifact, + escript_name: str) -> None: + """ build the escript with the escript builder tool + """ + script = toolchain.escript_builder + + escript_build_cmd = cmd_args( + [ + toolchain.otp_binaries.escript, + script, + spec_file, + ], + hidden = [ + output.as_output(), + files, + ], + ) + + erlang_build.utils.run_with_env( + ctx, + toolchain, + escript_build_cmd, + category = "escript", + identifier = action_identifier(toolchain, escript_name), + ) + return None + +def _escript_name(ctx: AnalysisContext) -> str: + if ctx.attrs.script_name: + return ctx.attrs.script_name + else: + return ctx.attrs.name + ".escript" + +def _main_module(ctx: AnalysisContext) -> str: + if ctx.attrs.main_module: + return ctx.attrs.main_module + else: + return ctx.attrs.name + +def build_escript_unbundled_trampoline(ctx: AnalysisContext, toolchain, config_files: list[Artifact]) -> Artifact: + data = cmd_args() + + data.add("#!/usr/bin/env escript") + data.add("%% -*- erlang -*-") + data.add("%%! {}".format(" ".join(ctx.attrs.emu_args))) + + data.add("-module('{}').".format(_escript_name(ctx))) + data.add("-export([main/1]).") + data.add("main(Args) ->") + data.add("EscriptDir = filename:dirname(escript:script_name()),") + data.add(_config_files_code_to_erl(config_files)) + data.add(' EBinDirs = filelib:wildcard(filename:join([EscriptDir, "lib", "*", "ebin"])),') + data.add(" code:add_paths(EBinDirs),") + data.add(" {}:main(Args).".format(_main_module(ctx))) + data.add(_parse_bin()) + + return ctx.actions.write( + paths.join(erlang_build.utils.build_dir(toolchain), "run.escript"), + data, + is_executable = True, + ) + +def build_escript_bundled_trampoline(ctx: AnalysisContext, toolchain, config_files: list[Artifact]) -> Artifact: + data = cmd_args() + + data.add("-module('erlang_escript_trampoline').") + data.add("-export([main/1]).") + data.add("main(Args) ->") + data.add("EscriptDir = escript:script_name(),") + data.add(_config_files_code_to_erl(config_files)) + data.add(" {}:main(Args).".format(_main_module(ctx))) + data.add(_parse_bin()) + escript_trampoline_erl = ctx.actions.write( + paths.join(erlang_build.utils.build_dir(toolchain), "erlang_escript_trampoline.erl"), + data, + ) + my_output = ctx.actions.declare_output("erlang_escript_trampoline.beam") + + ctx.actions.run( + cmd_args( + toolchain.otp_binaries.erlc, + "-o", + cmd_args(my_output.as_output(), parent = 1), + escript_trampoline_erl, + ), + category = "erlc_escript_trampoline", + ) + + return my_output + def _ebin_path(file: Artifact, app_name: str) -> str: return paths.join(app_name, "ebin", file.basename) def _priv_path(app_name: str) -> str: return paths.join(app_name, "priv") + +def _escript_config_files(ctx: AnalysisContext) -> list[Artifact]: + config_files = [] + for config_dep in ctx.attrs.configs: + for artifact in config_dep[DefaultInfo].default_outputs + config_dep[DefaultInfo].other_outputs: + (_, ext) = paths.split_extension(artifact.short_path) + if ext == ".config": + config_files.append(artifact) + return config_files + +def _config_files_code_to_erl(config_files: list[Artifact]) -> list[str]: + cmd = [] + cmd.append("ConfigFiles = [") + for i in range(0, len(config_files)): + cmd.append('"{}"'.format(config_files[i].short_path)) + if i < len(config_files) - 1: + cmd.append(",") + cmd.append("],") + cmd.append("[begin ") + cmd.append("{ok, AppConfigBin, _FullName} = erl_prim_loader:get_file(filename:join(EscriptDir, ConfigFile)),") + cmd.append("{ok, AppConfig} = parse_bin(AppConfigBin), ") + cmd.append(" ok = application:set_env(AppConfig, [{persistent, true}])") + cmd.append("end || ConfigFile <- ConfigFiles],") + return cmd + +def _parse_bin() -> str: + return """ +parse_bin(<<"">>) -> + []; +parse_bin(Bin) -> + {ok, Tokens, _} = erl_scan:string(binary_to_list(Bin)), + erl_parse:parse_term(Tokens). + """ diff --git a/prelude/erlang/erlang_info.bzl b/prelude/erlang/erlang_info.bzl index f25dd5616198d..f1fab0250f1a0 100644 --- a/prelude/erlang/erlang_info.bzl +++ b/prelude/erlang/erlang_info.bzl @@ -88,8 +88,10 @@ ErlangToolchainInfo = provider( "escript_builder": provider_field(typing.Any, default = None), # analyzing .(h|e)rl dependencies "dependency_analyzer": provider_field(typing.Any, default = None), + "dependency_finalizer": provider_field(typing.Any, default = None), # trampoline rerouting stdout to stderr "erlc_trampoline": provider_field(typing.Any, default = None), + "escript_trampoline": provider_field(typing.Any, default = None), # name to parse_transform artifacts mapping for core parse_transforms (that are always used) and # user defines ones "core_parse_transforms": provider_field(typing.Any, default = None), @@ -105,6 +107,7 @@ ErlangToolchainInfo = provider( # edoc-generating escript "edoc": provider_field(typing.Any, default = None), "edoc_options": provider_field(typing.Any, default = None), + "edoc_preprocess": provider_field(list[str], default = []), # beams we need for various reasons "utility_modules": provider_field(typing.Any, default = None), # env to be set for toolchain invocations diff --git a/prelude/erlang/erlang_release.bzl b/prelude/erlang/erlang_release.bzl index 1da7d2c3ef875..3559d97e56fb5 100644 --- a/prelude/erlang/erlang_release.bzl +++ b/prelude/erlang/erlang_release.bzl @@ -75,7 +75,7 @@ def _build_primary_release(ctx: AnalysisContext, apps: ErlAppDependencies) -> li def _build_release(ctx: AnalysisContext, toolchain: Toolchain, apps: ErlAppDependencies) -> dict[str, Artifact]: # OTP base structure - lib_dir = _build_lib_dir(ctx, toolchain, apps) + lib_dir = build_lib_dir(ctx, toolchain, _relname(ctx), apps) boot_scripts = _build_boot_script(ctx, toolchain, lib_dir["lib"]) # release specific variables in bin/release_variables @@ -100,12 +100,15 @@ def _build_release(ctx: AnalysisContext, toolchain: Toolchain, apps: ErlAppDepen return all_outputs -def _build_lib_dir(ctx: AnalysisContext, toolchain: Toolchain, all_apps: ErlAppDependencies) -> dict[str, Artifact]: +def build_lib_dir( + ctx: AnalysisContext, + toolchain: Toolchain, + release_name: str, + all_apps: ErlAppDependencies) -> dict[str, Artifact]: """Build lib dir according to OTP specifications. .. seealso:: `OTP Design Principles Release Structure `_ """ - release_name = _relname(ctx) build_dir = erlang_build.utils.build_dir(toolchain) link_spec = { @@ -152,13 +155,13 @@ def _build_boot_script( reverse_start_order = list(root_set.traverse()) reverse_start_order.pop(0) - seen = {} + seen = set() release_applications = [] root_apps_spec = {} for spec in reverse_start_order[::-1]: if spec.name in seen: continue - seen[spec.name] = True + seen.add(spec.name) app_spec = { "name": spec.name, @@ -195,12 +198,14 @@ def _build_boot_script( toolchain.otp_binaries.escript, script, spec_file, - cmd_args(release_resource.as_output()).parent(), + cmd_args(release_resource.as_output(), parent = 1), + ], + hidden = [ + start_script.as_output(), + boot_script.as_output(), + lib_dir, ], ) - boot_script_build_cmd.hidden(start_script.as_output()) - boot_script_build_cmd.hidden(boot_script.as_output()) - boot_script_build_cmd.hidden(lib_dir) erlang_build.utils.run_with_env( ctx, diff --git a/prelude/erlang/erlang_shell.bzl b/prelude/erlang/erlang_shell.bzl index 26e8428238393..671a37b5cf290 100644 --- a/prelude/erlang/erlang_shell.bzl +++ b/prelude/erlang/erlang_shell.bzl @@ -8,18 +8,20 @@ load("@prelude//:paths.bzl", "paths") load(":erlang_dependencies.bzl", "check_dependencies", "flatten_dependencies") load(":erlang_info.bzl", "ErlangAppInfo") -load(":erlang_toolchain.bzl", "get_primary_tools") +load(":erlang_toolchain.bzl", "get_primary", "get_primary_tools") def _build_run_info( ctx: AnalysisContext, + *, dependencies: list[Dependency], additional_app_paths: list[Artifact] = [], additional_paths: list[Artifact] = [], additional_args: list[cmd_args] = []) -> Provider: """Builds an Erlang shell with the dependencies and additional code paths available.""" + primary_toolchain_name = get_primary(ctx) app_paths = [ - dep[ErlangAppInfo].app_folder + dep[ErlangAppInfo].app_folders[primary_toolchain_name] for dep in dependencies if ErlangAppInfo in dep and not dep[ErlangAppInfo].virtual ] + additional_app_paths @@ -29,18 +31,19 @@ def _build_run_info( for dep in all_shell_dependencies.values(): if dep[ErlangAppInfo].virtual: continue - app_paths.append(dep[ErlangAppInfo].app_folder) + app_paths.append(dep[ErlangAppInfo].app_folders[primary_toolchain_name]) erl_args = cmd_args([]) for app_path in app_paths: - erl_args.add(cmd_args(app_path, format = "-pa \"${REPO_ROOT}\"/{}/ebin \\", delimiter = "")) + erl_args.add(cmd_args(app_path, format = "-pa \"${REPO_ROOT}\"/{}/ebin", delimiter = "")) for additional_path in additional_paths: - erl_args.add(cmd_args(additional_path, format = "-pa \"${REPO_ROOT}\"/{} \\", delimiter = "")) + erl_args.add(cmd_args(additional_path, format = "-pa \"${REPO_ROOT}\"/{}", delimiter = "")) # add configs + config_files = _shell_config_files(ctx) for config_file in _shell_config_files(ctx): - erl_args.add(cmd_args(config_file, format = "-config \"${REPO_ROOT}\"/{} \\", delimiter = "")) + erl_args.add(cmd_args(config_file, format = "-config \"${REPO_ROOT}\"/{}", delimiter = "")) # add extra args for additional_args in additional_args: @@ -49,18 +52,24 @@ def _build_run_info( erl_args.add('"$@"') tools = get_primary_tools(ctx) - content = cmd_args([]) - content = content.add("REPO_ROOT=$(buck2 root --kind=project)") - content.add(cmd_args(["\"${REPO_ROOT}\"/", cmd_args(tools.erl, delimiter = " "), " \\"], delimiter = "")) - content.add(erl_args) - content.add("") + erl_command = cmd_args([ + "exec", + cmd_args(["\"${REPO_ROOT}\"/", cmd_args(tools.erl, delimiter = " ")], delimiter = ""), + erl_args, + ]) - shell_script = ctx.actions.write("start_shell.sh", content) - shell_cmd = cmd_args(["/usr/bin/env", "bash", shell_script]) + start_shell_content = cmd_args([ + "export REPO_ROOT=$(buck2 root --kind=project)", + cmd_args(erl_command, delimiter = " \\\n"), + "", + ]) - # depend on input paths - for code_path in app_paths + additional_paths: - shell_cmd.hidden(code_path) + shell_script = ctx.actions.write("start_shell.sh", start_shell_content, with_inputs = True) + shell_cmd = cmd_args( + ["/usr/bin/env", "bash", shell_script], + # depend on input paths + hidden = app_paths + additional_paths + config_files, + ) return RunInfo(shell_cmd) diff --git a/prelude/erlang/erlang_tests.bzl b/prelude/erlang/erlang_tests.bzl index 2aafa7fc4fc64..2ec15a246b7ea 100644 --- a/prelude/erlang/erlang_tests.bzl +++ b/prelude/erlang/erlang_tests.bzl @@ -24,6 +24,7 @@ load(":erlang_shell.bzl", "erlang_shell") load( ":erlang_toolchain.bzl", "get_primary", + "get_primary_tools", "select_toolchains", ) load( @@ -31,7 +32,6 @@ load( "file_mapping", "list_dedupe", "preserve_structure", - "to_term_args", ) def erlang_tests_macro( @@ -41,11 +41,10 @@ def erlang_tests_macro( deps: list[str] = [], resources: list[str] = [], property_tests: list[str] = [], - config_files: list[str] = [], srcs: list[str] = [], - use_default_configs: bool = True, - use_default_deps: bool = True, - **common_attributes: dict) -> None: + prefix: str | None = None, + generated_app_labels: list[str] = [], + **common_attributes) -> None: """ Generate multiple erlang_test targets based on the `suites` field. Also adds the default 'config' and 'deps' from the buck2 config. @@ -53,7 +52,6 @@ def erlang_tests_macro( resource targets for files in the suite associated _data folder. """ deps = [normalize_application(dep) for dep in deps] - config_files = list(config_files) if not suites: return @@ -67,26 +65,11 @@ def erlang_tests_macro( erlang_app_rule( name = srcs_app, srcs = srcs, - labels = ["generated", "test_application", "test_utils"], + labels = generated_app_labels, applications = app_deps, ) deps.append(":" + srcs_app) - # add default apps - - default_deps = read_root_config("erlang", "erlang_tests_default_apps", None) if use_default_deps else None - default_config_files = read_root_config("erlang", "erlang_tests_default_config", None) if use_default_configs else None - trampoline = read_root_config("erlang", "erlang_tests_trampoline", None) if use_default_configs else None - providers = read_root_config("erlang", "erlang_test_providers", "") if use_default_configs else "" - defaultAnnotationMFA = "artifact_annotations:default_annotation/1" - annotationsMFA = read_root_config("erlang", "test_artifacts_annotation_mfa", defaultAnnotationMFA) if use_default_configs else defaultAnnotationMFA - - if default_config_files: - config_files += default_config_files.split() - - if default_deps != None: - deps += default_deps.split() - target_resources = list(resources) if not property_tests: @@ -95,11 +78,7 @@ def erlang_tests_macro( if prop_target: property_tests = [prop_target] - common_attributes["labels"] = common_attributes.get("labels", []) + ["tpx-enable-artifact-reporting", "test-framework=39:erlang_common_test"] - - additional_labels = read_config("erlang", "test_labels", None) - if additional_labels != None: - common_attributes["labels"] += additional_labels.split() + common_attributes["labels"] = common_attributes.get("labels", []) common_attributes["labels"] = list_dedupe(common_attributes["labels"]) @@ -117,17 +96,16 @@ def erlang_tests_macro( suite_resource = [target for target in target_resources] suite_resource.append(data_target) + if prefix != None: + suite_name = "{}_{}".format(prefix, suite_name) + # forward resources and deps fields and generate erlang_test target erlang_test_rule( name = suite_name, suite = suite, deps = deps, resources = suite_resource, - config_files = config_files, property_tests = property_tests, - _trampoline = trampoline, - _providers = providers, - _artifact_annotation_mfa = annotationsMFA, **common_attributes ) @@ -135,6 +113,7 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: toolchains = select_toolchains(ctx) primary_toolchain_name = get_primary(ctx) primary_toolchain = toolchains[primary_toolchain_name] + tools = get_primary_tools(ctx) deps = ctx.attrs.deps + [ctx.attrs._test_binary_lib] @@ -145,26 +124,22 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: # prepare build environment pre_build_environment = erlang_build.prepare_build_environment(ctx, primary_toolchain, dependencies) - new_private_include_dir = pre_build_environment.private_include_dir - - # pre_build_environment.private_includes is immutable, that's how we change that. - new_private_includes = {a: b for (a, b) in pre_build_environment.private_includes.items()} - - #Pull private deps from dependencies - for dep in dependencies.values(): - if ErlangAppInfo in dep: - if dep[ErlangAppInfo].private_include_dir: - new_private_include_dir = new_private_include_dir + dep[ErlangAppInfo].private_include_dir[primary_toolchain_name] - new_private_includes.update(dep[ErlangAppInfo].private_includes[primary_toolchain_name]) + pre_build_environment = erlang_build.utils.peek_private_includes( + ctx, + primary_toolchain, + pre_build_environment, + dependencies, + force_peek = True, + ) # Records are immutable, hence we need to create a new record from the previous one. build_environment = BuildEnvironment( includes = pre_build_environment.includes, - private_includes = new_private_includes, + private_includes = pre_build_environment.private_includes, beams = pre_build_environment.beams, priv_dirs = pre_build_environment.priv_dirs, include_dirs = pre_build_environment.include_dirs, - private_include_dir = new_private_include_dir, + private_include_dir = pre_build_environment.private_include_dir, ebin_dirs = pre_build_environment.ebin_dirs, deps_files = pre_build_environment.deps_files, app_files = pre_build_environment.app_files, @@ -180,14 +155,39 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: # Config files for ct config_files = [config_file[DefaultInfo].default_outputs[0] for config_file in ctx.attrs.config_files] - test_binary = ctx.attrs._test_binary[RunInfo] + trampolines = ctx.attrs._trampolines + if ctx.attrs._trampoline != None: + if trampolines != None: + fail("_trampoline and _trampolines can't be both provided") + trampolines = [ctx.attrs._trampoline] - trampoline = ctx.attrs._trampoline cmd = cmd_args([]) - if trampoline: - cmd.add(trampoline[RunInfo]) - - cmd.add(test_binary) + if trampolines: + cmd.add(*[trampoline[RunInfo] for trampoline in trampolines]) + + binary_lib_deps = flatten_dependencies(ctx, check_dependencies([ctx.attrs._test_binary_lib], [ErlangAppInfo])) + cmd.add([ + tools.erl, + "-mode", + "minimal", + "-noinput", + "-noshell", + "+A0", + "+S1:1", + "+sbtu", + "-run", + "test_binary", # provided by ctx.attr._test_binary_lib + "main", + ]) + + for dep in binary_lib_deps.values(): + if dep[ErlangAppInfo].virtual: + continue + app_folder = dep[ErlangAppInfo].app_folders[primary_toolchain_name] + cmd.add(["-pa", cmd_args(app_folder, format = "{}/ebin", delimiter = "")]) + cmd.add(["-pa", primary_toolchain.utility_modules]) + + cmd.add(["--"]) suite = ctx.attrs.suite suite_name = module_name(suite) @@ -209,6 +209,7 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: output_dir = link_output(ctx, suite_name, build_environment, data_dir, property_dir) test_info_file = _write_test_info_file( ctx = ctx, + extra_code_paths = [primary_toolchain.utility_modules], test_suite = suite_name, dependencies = dependencies, test_dir = output_dir, @@ -217,23 +218,34 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: ) cmd.add(test_info_file) - default_info = _build_default_info(dependencies, output_dir) + hidden_args = [] + + default_info = _build_default_info(ctx, dependencies, output_dir) for output_artifact in default_info.other_outputs: - cmd.hidden(output_artifact) + hidden_args.append(output_artifact) for config_file in config_files: - cmd.hidden(config_file) + hidden_args.append(config_file) - cmd.hidden(output_dir) + hidden_args.append(primary_toolchain.utility_modules) + hidden_args.append(output_dir) + cmd.add(cmd_args(hidden = hidden_args)) # prepare shell dependencies - additional_paths = [ + additional_shell_paths = [ dep[ErlangTestInfo].output_dir for dep in dependencies.values() if ErlangTestInfo in dep - ] + [output_dir] + ] + [primary_toolchain.utility_modules, output_dir] - preamble = '-eval "%s" \\' % (ctx.attrs.preamble) - additional_args = [cmd_args(preamble, "-noshell \\")] + # NB. We can't use `quote="shell"` since we need $REPO_ROOT to be expanded by the shell. + # So we wrap everything in extra double-quotes to protect from spaces in the path + test_info_file_arg = cmd_args(test_info_file, format = '"<<\\"${REPO_ROOT}/{}\\">>"') + + additional_shell_args = cmd_args([ + cmd_args(["-test_cli_lib", "test_info_file", test_info_file_arg], delimiter = " "), + cmd_args("-eval", ctx.attrs.preamble, quote = "shell", delimiter = " "), + "-noshell", + ]) all_direct_shell_dependencies = check_dependencies([ctx.attrs._cli_lib], [ErlangAppInfo]) cli_lib_deps = flatten_dependencies(ctx, all_direct_shell_dependencies) @@ -243,9 +255,9 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: run_info = erlang_shell.build_run_info( ctx, - shell_deps.values(), - additional_paths = additional_paths, - additional_args = additional_args, + dependencies = shell_deps.values(), + additional_paths = additional_shell_paths, + additional_args = [additional_shell_args], ) re_executor = get_re_executor_from_props(ctx) @@ -257,7 +269,7 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: type = "erlang_test", command = [cmd], env = ctx.attrs.env, - labels = ["tpx-fb-test-type=16"] + ctx.attrs.labels, + labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, run_from_project_root = True, use_project_relative_paths = True, @@ -271,13 +283,14 @@ def erlang_test_impl(ctx: AnalysisContext) -> list[Provider]: ] # Copied from erlang_application. -def _build_default_info(dependencies: ErlAppDependencies, output_dir: Artifact) -> Provider: +def _build_default_info(ctx: AnalysisContext, dependencies: ErlAppDependencies, output_dir: Artifact) -> Provider: """ generate default_outputs and DefaultInfo provider """ + primary_toolchain_name = get_primary(ctx) outputs = [] for dep in dependencies.values(): if ErlangAppInfo in dep and not dep[ErlangAppInfo].virtual: - outputs.append(dep[ErlangAppInfo].app_folder) + outputs.append(dep[ErlangAppInfo].app_folders[primary_toolchain_name]) if ErlangTestInfo in dep: outputs += dep[DefaultInfo].default_outputs outputs += dep[DefaultInfo].other_outputs @@ -285,47 +298,50 @@ def _build_default_info(dependencies: ErlAppDependencies, output_dir: Artifact) def _write_test_info_file( ctx: AnalysisContext, + extra_code_paths: list[Artifact], test_suite: str, dependencies: ErlAppDependencies, test_dir: Artifact, config_files: list[Artifact], erl_cmd: [cmd_args, Artifact]) -> Artifact: + dependency_paths = _list_code_paths(ctx, dependencies) + dependency_paths.extend(extra_code_paths) tests_info = { "artifact_annotation_mfa": ctx.attrs._artifact_annotation_mfa, + "common_app_env": ctx.attrs.common_app_env, "config_files": config_files, "ct_opts": ctx.attrs._ct_opts, - "dependencies": _list_code_paths(dependencies), - "erl_cmd": cmd_args(['"', cmd_args(erl_cmd, delimiter = " "), '"'], delimiter = ""), + "dependencies": dependency_paths, + "erl_cmd": erl_cmd, "extra_ct_hooks": ctx.attrs.extra_ct_hooks, + "extra_flags": ctx.attrs.extra_erl_flags, "providers": ctx.attrs._providers, "test_dir": test_dir, "test_suite": test_suite, } test_info_file = ctx.actions.declare_output("tests_info") - ctx.actions.write( - test_info_file, - to_term_args(tests_info), - ) + ctx.actions.write_json(test_info_file, tests_info) return test_info_file -def _list_code_paths(dependencies: ErlAppDependencies) -> list[cmd_args]: +def _list_code_paths(ctx: AnalysisContext, dependencies: ErlAppDependencies) -> list[[Artifact, cmd_args]]: """lists all ebin/ dirs from the test targets dependencies""" + primary_toolchain_name = get_primary(ctx) folders = [] for dependency in dependencies.values(): if ErlangAppInfo in dependency: dep_info = dependency[ErlangAppInfo] - if dep_info.virtual: - continue - folders.append(cmd_args( - dep_info.app_folder, - format = '"{}/ebin"', - )) + if not dep_info.virtual: + folders.append(cmd_args( + dep_info.app_folders[primary_toolchain_name], + format = "{}/ebin", + delimiter = "", + )) elif ErlangTestInfo in dependency: dep_info = dependency[ErlangTestInfo] - folders.append(cmd_args(dep_info.output_dir, format = '"{}"')) + folders.append(dep_info.output_dir) return folders -def _build_resource_dir(ctx, resources: list, target_dir: str) -> Artifact: +def _build_resource_dir(ctx: AnalysisContext, resources: list, target_dir: str) -> Artifact: """ build mapping for suite data directory generating the necessary mapping information for the suite data directory diff --git a/prelude/erlang/erlang_toolchain.bzl b/prelude/erlang/erlang_toolchain.bzl index 128ac60190854..b74e5ded53352 100644 --- a/prelude/erlang/erlang_toolchain.bzl +++ b/prelude/erlang/erlang_toolchain.bzl @@ -39,7 +39,9 @@ Toolchain = record( app_file_script = field(Artifact), boot_script_builder = field(Artifact), dependency_analyzer = field(Artifact), + dependency_finalizer = field(Artifact), erlc_trampoline = field(Artifact), + escript_trampoline = field(Artifact), escript_builder = field(Artifact), otp_binaries = field(Tools), release_variables_builder = field(Artifact), @@ -49,6 +51,7 @@ Toolchain = record( parse_transforms_filters = field(dict[str, list[str]]), edoc = field(Artifact), edoc_options = field(list[str]), + edoc_preprocess = field(list[str]), utility_modules = field(Artifact), env = field(dict[str, str]), ) @@ -60,8 +63,10 @@ ToolchainUtillInfo = provider( "boot_script_builder": provider_field(typing.Any, default = None), "core_parse_transforms": provider_field(typing.Any, default = None), "dependency_analyzer": provider_field(typing.Any, default = None), + "dependency_finalizer": provider_field(typing.Any, default = None), "edoc": provider_field(typing.Any, default = None), "erlc_trampoline": provider_field(typing.Any, default = None), + "escript_trampoline": provider_field(typing.Any, default = None), "escript_builder": provider_field(typing.Any, default = None), "release_variables_builder": provider_field(typing.Any, default = None), "include_erts": provider_field(typing.Any, default = None), @@ -91,8 +96,10 @@ def _multi_version_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: app_file_script = toolchain_info.app_file_script, boot_script_builder = toolchain_info.boot_script_builder, dependency_analyzer = toolchain_info.dependency_analyzer, + dependency_finalizer = toolchain_info.dependency_finalizer, erl_opts = toolchain_info.erl_opts, erlc_trampoline = toolchain_info.erlc_trampoline, + escript_trampoline = toolchain_info.escript_trampoline, escript_builder = toolchain_info.escript_builder, otp_binaries = toolchain_info.otp_binaries, release_variables_builder = toolchain_info.release_variables_builder, @@ -102,6 +109,7 @@ def _multi_version_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: parse_transforms_filters = toolchain_info.parse_transforms_filters, edoc = toolchain_info.edoc, edoc_options = toolchain_info.edoc_options, + edoc_preprocess = toolchain_info.edoc_preprocess, utility_modules = toolchain_info.utility_modules, env = toolchain_info.env, ) @@ -121,9 +129,6 @@ multi_version_toolchain_rule = rule( is_toolchain_rule = True, ) -def as_target(name: str) -> str: - return ":" + name - def _config_erlang_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: """ rule for erlang toolchain """ @@ -132,14 +137,13 @@ def _config_erlang_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: erl_opts = ctx.attrs.erl_opts.split() emu_flags = ctx.attrs.emu_flags.split() edoc_options = ctx.attrs.edoc_options.split() + edoc_preprocess = ctx.attrs.edoc_preprocess.split() # get otp binaries binaries_info = ctx.attrs.otp_binaries[ErlangOTPBinariesInfo] erl = cmd_args([binaries_info.erl] + emu_flags) - erlc = cmd_args(binaries_info.erlc) - escript = cmd_args(binaries_info.escript) - erlc.hidden(binaries_info.erl) - escript.hidden(binaries_info.erl) + erlc = cmd_args(binaries_info.erlc, hidden = binaries_info.erl) + escript = cmd_args(binaries_info.escript, hidden = binaries_info.erl) tools_binaries = ToolsBinaries( erl = binaries_info.erl, erlc = binaries_info.erl, @@ -180,10 +184,12 @@ def _config_erlang_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: app_file_script = utils.app_src_script, boot_script_builder = utils.boot_script_builder, dependency_analyzer = utils.dependency_analyzer, + dependency_finalizer = utils.dependency_finalizer, erl_opts = erl_opts, env = ctx.attrs.env, emu_flags = emu_flags, erlc_trampoline = utils.erlc_trampoline, + escript_trampoline = utils.escript_trampoline, escript_builder = utils.escript_builder, otp_binaries = otp_binaries, release_variables_builder = utils.release_variables_builder, @@ -193,6 +199,7 @@ def _config_erlang_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: parse_transforms_filters = ctx.attrs.parse_transforms_filters, edoc = utils.edoc, edoc_options = edoc_options, + edoc_preprocess = edoc_preprocess, utility_modules = utility_modules, ), ] @@ -262,7 +269,7 @@ def _gen_parse_transform_beam( erlc, "+deterministic", "-o", - cmd_args(output.as_output()).parent(), + cmd_args(output.as_output(), parent = 1), src, ]) ctx.actions.run(cmd, category = "erlc", identifier = src.short_path) @@ -273,6 +280,7 @@ config_erlang_toolchain_rule = rule( attrs = { "core_parse_transforms": attrs.list(attrs.dep(), default = ["@prelude//erlang/toolchain:transform_project_root"]), "edoc_options": attrs.string(default = ""), + "edoc_preprocess": attrs.string(default = ""), "emu_flags": attrs.string(default = ""), "env": attrs.dict(key = attrs.string(), value = attrs.string(), default = {}), "erl_opts": attrs.string(default = ""), @@ -298,7 +306,7 @@ def _gen_util_beams( erlc, "+deterministic", "-o", - cmd_args(output.as_output()).parent(), + cmd_args(output.as_output(), parent = 1), src, ], category = "erlc", @@ -350,8 +358,10 @@ def _toolchain_utils(ctx: AnalysisContext) -> list[Provider]: boot_script_builder = ctx.attrs.boot_script_builder, core_parse_transforms = ctx.attrs.core_parse_transforms, dependency_analyzer = ctx.attrs.dependency_analyzer, + dependency_finalizer = ctx.attrs.dependency_finalizer, edoc = ctx.attrs.edoc, erlc_trampoline = ctx.attrs.erlc_trampoline, + escript_trampoline = ctx.attrs.escript_trampoline, escript_builder = ctx.attrs.escript_builder, release_variables_builder = ctx.attrs.release_variables_builder, include_erts = ctx.attrs.include_erts, @@ -366,35 +376,13 @@ toolchain_utilities = rule( "boot_script_builder": attrs.source(), "core_parse_transforms": attrs.list(attrs.dep()), "dependency_analyzer": attrs.source(), + "dependency_finalizer": attrs.source(), "edoc": attrs.source(), "erlc_trampoline": attrs.source(), "escript_builder": attrs.source(), + "escript_trampoline": attrs.source(), "include_erts": attrs.source(), "release_variables_builder": attrs.source(), "utility_modules": attrs.list(attrs.source()), }, ) - -# Resources that need to be plugged in through toolchain// : -# - jsone - -toolchain_resources = rule( - impl = lambda ctx: [ - DefaultInfo( - sub_targets = { - "jsone": ctx.attrs.jsone.providers, - }, - ), - ], - attrs = { - "jsone": attrs.dep(), - }, - is_toolchain_rule = True, -) - -toolchain_resources_internal = rule( - impl = lambda ctx: ctx.attrs._resources.providers, - attrs = { - "_resources": attrs.toolchain_dep(default = "toolchains//:erlang-resources"), - }, -) diff --git a/prelude/erlang/erlang_utils.bzl b/prelude/erlang/erlang_utils.bzl index dcb20b3dfe59f..8ac863d6726c9 100644 --- a/prelude/erlang/erlang_utils.bzl +++ b/prelude/erlang/erlang_utils.bzl @@ -11,22 +11,17 @@ load( "Toolchain", # @unused Used as type ) -def normalise_metadata(data: [str, list[str]]) -> [cmd_args, list[cmd_args]]: - if type(data) == type([]): - return [cmd_args(item) for item in data] - else: - return cmd_args(data) - def to_term_args(data: typing.Any) -> cmd_args: """ convert nested lists/tuple/map data structure to Erlang Term cmd_args """ - args = cmd_args([]) - args.add(cmd_args([ - convert(data), - ".", - ], delimiter = "")) - args.add("") - return args + + return cmd_args( + cmd_args([ + convert(data), + ".", + ], delimiter = ""), + "", + ) # paths def app_file(ctx: AnalysisContext) -> str: @@ -48,7 +43,7 @@ build_paths = struct( linktree = linktree, ) -def convert(data: typing.Any) -> cmd_args: +def convert(data: typing.Any, ignore_artifacts: bool = False) -> cmd_args: """ converts a lists/tuple/map data structure to a sub-term that can be embedded in another to_term_args or convert """ if type(data) == "list": @@ -64,57 +59,50 @@ def convert(data: typing.Any) -> cmd_args: elif type(data) == "bool": return convert_bool(data) - args = cmd_args([]) - args.add(cmd_args(["\"", data, "\""], delimiter = "")) - return args + return cmd_args( + cmd_args(["\"", data, "\""], delimiter = ""), + ignore_artifacts = ignore_artifacts, + ) # internal def convert_list(ls: list, ob: str = "[", cb: str = "]") -> cmd_args: - args = cmd_args([]) - args.add(ob) + args = [] + args.append(ob) if len(ls) >= 1: - args.add(cmd_args([ + args.append(cmd_args([ convert(ls[0]), ], delimiter = "")) for item in ls[1:]: - args.add(cmd_args([ + args.append(cmd_args([ ",", convert(item), ], delimiter = "")) - args.add(cb) - return args + args.append(cb) + return cmd_args(args) def convert_dict(dt: dict) -> cmd_args: - args = cmd_args([]) - args.add("#{") + args = [] + args.append("#{") items = list(dt.items()) if len(items) >= 1: k, v = items[0] - args.add(cmd_args([ + args.append(cmd_args([ convert(k), "=>", convert(v), ], delimiter = "")) for k, v in items[1:]: - args.add(cmd_args([ + args.append(cmd_args([ ",", convert(k), "=>", convert(v), ], delimiter = "")) - args.add("}") - return args - -def convert_args(data: cmd_args) -> cmd_args: - args = cmd_args() - args.add("\"") - args.add(cmd_args(data, delimiter = " ")) - args.add("\"") - return args + args.append("}") + return cmd_args(args) def convert_string(st: str) -> cmd_args: - args = cmd_args() - return args.add(cmd_args(["\"", st.replace("\"", "\\\""), "\""], delimiter = "")) + return cmd_args(cmd_args(["\"", st.replace("\"", "\\\""), "\""], delimiter = "")) def convert_bool(bl: bool) -> cmd_args: if bl: @@ -141,15 +129,6 @@ def action_identifier(toolchain: Toolchain, name: str) -> str: """builds an action identifier parameterized by the toolchain""" return "%s(%s)" % (name, toolchain.name) -def str_to_bool(value: str) -> bool: - """convert string representation of bool to bool""" - if value == "True": - return True - elif value == "False": - return False - else: - fail("{} is not a valid boolean value") - def preserve_structure(path: str) -> dict[str, list[str]]: """Return a mapping from a path that preserves the filestructure relative to the path.""" all_files = glob([paths.join(path, "**")]) diff --git a/prelude/erlang/shell/BUCK b/prelude/erlang/shell/BUCK deleted file mode 100644 index 5f86cd6414ec9..0000000000000 --- a/prelude/erlang/shell/BUCK +++ /dev/null @@ -1,18 +0,0 @@ -erlang_application( - name = "buck2_shell_utils", - srcs = glob(["src/*.erl"]), - applications = [ - "kernel", - "stdlib", - ], - erl_opts = [ - "+debug_info", - "+warnings_as_errors", - ], - included_applications = [ - "prelude//erlang/common_test/test_exec:test_exec", - "prelude//erlang/toolchain:resources[jsone]", - ], - shell_libs = [], - visibility = ["PUBLIC"], -) diff --git a/prelude/erlang/shell/BUCK.v2 b/prelude/erlang/shell/BUCK.v2 new file mode 100644 index 0000000000000..29aa0288bf65a --- /dev/null +++ b/prelude/erlang/shell/BUCK.v2 @@ -0,0 +1,23 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +erlang_application( + name = "buck2_shell_utils", + srcs = glob(["src/*.erl"]), + applications = [ + "kernel", + "stdlib", + ], + erl_opts = [ + "+debug_info", + "+warnings_as_errors", + ], + included_applications = [ + "prelude//erlang/common_test/test_exec:test_exec", + ], + shell_libs = [], + visibility = ["PUBLIC"], +) diff --git a/prelude/erlang/shell/src/shell_buck2_module_search.erl b/prelude/erlang/shell/src/shell_buck2_module_search.erl new file mode 100644 index 0000000000000..2550f8e9756bf --- /dev/null +++ b/prelude/erlang/shell/src/shell_buck2_module_search.erl @@ -0,0 +1,96 @@ +%% Copyright (c) Meta Platforms, Inc. and affiliates. +%% +%% This source code is licensed under both the MIT license found in the +%% LICENSE-MIT file in the root directory of this source tree and the Apache +%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory +%% of this source tree. + +%%%------------------------------------------------------------------- +%%% @doc +%%% Configurable hook for module discovery +%%% @end +%%% % @format + +-module(shell_buck2_module_search). + +-export([find_module/1, find_module_source/1]). + +-callback find_module_source(module()) -> + {source, file:filename_all()} + | {error, not_found | {ambiguous, [file:filename_all()]}}. + +-spec find_module(atom()) -> + available + | {source, file:filename_all()} + | {error, not_found | {ambiguous, [file:filename_all()]}}. +find_module(Module) -> + WantedModuleName = atom_to_list(Module), + case + [ + found + || {ModuleName, _, _} <- code:all_available(), + string:equal(WantedModuleName, ModuleName) + ] + of + [found] -> + available; + _ -> + _ = application:load(buck2_shell_utils), + % elp:ignore W0011 (application_get_env) + case application:get_env(buck2_shell_utils, search_module) of + {ok, Mod} -> + Mod:find_module_source(Module); + _ -> + find_module_source(Module) + end + end. + +-spec find_module_source(module()) -> + {source, file:filename_all()} + | {error, not_found | {ambiguous, [file:filename_all()]}}. +find_module_source(Module) -> + Root = shell_buck2_utils:cell_root(), + io:format("use ~s as root", [Root]), + {ok, Output} = shell_buck2_utils:run_command( + "find ~s -type d " + "\\( -path \"~s/_build*\" -path \"~s/erl/_build*\" -o -path ~s/buck-out \\) -prune " + "-o -name '~s.erl' -print", + [Root, Root, Root, Root, Module] + ), + case + [ + RelPath + || RelPath <- [ + string:prefix(Path, [Root, "/"]) + || Path <- string:split(Output, "\n", all) + ], + RelPath =/= nomatch, + string:prefix(RelPath, "buck-out") == nomatch, + string:str(binary_to_list(RelPath), "_build") == 0 + ] + of + [ModulePath] -> + {source, ModulePath}; + [] -> + {error, not_found}; + Candidates -> + %% check if there are actually targets associated + {ok, RawOutput} = shell_buck2_utils:buck2_query( + "owner(\\\"\%s\\\")", "--json", Candidates + ), + SourceTargetMapping = json:decode(RawOutput), + case + maps:fold( + fun + (_Source, [], Acc) -> Acc; + (Source, _, Acc) -> [Source | Acc] + end, + [], + SourceTargetMapping + ) + of + [] -> {error, not_found}; + [Source] -> {source, Source}; + More -> {error, {ambiguous, More}} + end + end. diff --git a/prelude/erlang/shell/src/shell_buck2_utils.erl b/prelude/erlang/shell/src/shell_buck2_utils.erl index baa88f12d3821..488e884d05909 100644 --- a/prelude/erlang/shell/src/shell_buck2_utils.erl +++ b/prelude/erlang/shell/src/shell_buck2_utils.erl @@ -17,10 +17,11 @@ %% Public API -export([ project_root/0, + cell_root/0, rebuild_modules/1, buck2_build_targets/1, buck2_query/1, buck2_query/2, buck2_query/3, - run_command/2, + run_command/2, run_command/3, get_additional_paths/1 ]). @@ -28,7 +29,15 @@ -spec project_root() -> file:filename(). project_root() -> - case run_command("buck2 root --kind=project 2>/dev/null", [], [{at_root, false}, {replay, false}]) of + root(project). + +-spec cell_root() -> file:filename(). +cell_root() -> + root(cell). + +-spec root(Type :: cell | project) -> file:filename(). +root(Type) -> + case run_command("buck2 root --kind=~s 2>/dev/null", [Type], [{at_root, false}, {replay, false}]) of {ok, Output} -> Dir = string:trim(Output), case filelib:is_dir(Dir) of @@ -39,20 +48,6 @@ project_root() -> error(failed_to_query_project_root) end. --spec project_cell() -> binary(). -project_cell() -> - ProjectRoot = project_root(), - case run_command("buck2 audit cell --json 2>/dev/null", [], [{replay, false}]) of - {ok, Output} -> - [ProjectCell] = [ - Cell - || {Cell, CellRoot} <- maps:to_list(jsone:decode(Output)), string:equal(ProjectRoot, CellRoot) - ], - ProjectCell; - error -> - error(failed_to_query_project_cell) - end. - -spec rebuild_modules([module()]) -> ok | error. rebuild_modules([]) -> ok; @@ -138,13 +133,10 @@ port_loop(Port, Replay, StdOut) -> -spec get_additional_paths(file:filename_all()) -> [file:filename_all()]. get_additional_paths(Path) -> - PrefixedPath = io_lib:format("~s//~s", [project_cell(), Path]), case run_command( "buck2 bxl --reuse-current-config --console super prelude//erlang/shell/shell.bxl:ebin_paths -- --source ~s", - [ - PrefixedPath - ] + [Path] ) of {ok, Output} -> diff --git a/prelude/erlang/shell/src/user_default.erl b/prelude/erlang/shell/src/user_default.erl index 4637cfd0b7a24..5390983a74d92 100644 --- a/prelude/erlang/shell/src/user_default.erl +++ b/prelude/erlang/shell/src/user_default.erl @@ -44,80 +44,15 @@ c(Module, _Options, _Filter) -> -spec l(module()) -> code:load_ret(). l(Module) -> - case find_module(Module) of + case shell_buck2_module_search:find_module(Module) of available -> c:l(Module); {source, RelSource} -> - Paths = shell_buck2_utils:get_additional_paths(RelSource), + AbsSource = filename:absname(RelSource), + Paths = shell_buck2_utils:get_additional_paths(AbsSource), ok = code:add_paths(Paths), ok = ct_daemon:push_paths(Paths), c:l(Module); Error -> Error end. - --spec find_module(module()) -> - available - | {source, file:filename_all()} - | {error, not_found | {ambiguous, [file:filename_all()]}}. -find_module(Module) -> - WantedModuleName = atom_to_list(Module), - case - [ - found - || {ModuleName, _, _} <- code:all_available(), - string:equal(WantedModuleName, ModuleName) - ] - of - [found] -> available; - _ -> find_module_source(Module) - end. - --spec find_module_source(module()) -> - {source, file:filename_all()} - | {error, not_found | {ambiguous, [file:filename_all()]}}. -find_module_source(Module) -> - Root = shell_buck2_utils:project_root(), - {ok, Output} = shell_buck2_utils:run_command( - "find ~s -type d " - "\\( -path \"~s/_build*\" -path \"~s/erl/_build*\" -o -path ~s/buck-out \\) -prune " - "-o -name '~s.erl' -print", - [Root, Root, Root, Root, Module] - ), - case - [ - RelPath - || RelPath <- [ - string:prefix(Path, [Root, "/"]) - || Path <- string:split(Output, "\n", all) - ], - RelPath =/= nomatch, - string:prefix(RelPath, "buck-out") == nomatch, - string:str(binary_to_list(RelPath), "_build") == 0 - ] - of - [ModulePath] -> - {source, ModulePath}; - [] -> - {error, not_found}; - Candidates -> - %% check if there are actually targets associated - {ok, RawOutput} = shell_buck2_utils:buck2_query( - "owner(\\\"\%s\\\")", "--json", Candidates - ), - SourceTargetMapping = jsone:decode(RawOutput), - case - maps:fold( - fun - (_Source, [], Acc) -> Acc; - (Source, _, Acc) -> [Source | Acc] - end, - [], - SourceTargetMapping - ) - of - [] -> {error, not_found}; - [Source] -> {source, Source}; - More -> {error, {ambiguous, More}} - end - end. diff --git a/prelude/erlang/toolchain/BUCK b/prelude/erlang/toolchain/BUCK deleted file mode 100644 index da40848c66993..0000000000000 --- a/prelude/erlang/toolchain/BUCK +++ /dev/null @@ -1,41 +0,0 @@ -load("@prelude//erlang:erlang_toolchain.bzl", "erlang_parse_transform", "toolchain_resources_internal", "toolchain_utilities") - -erlang_parse_transform( - name = "transform_project_root", - src = "transform_project_root.erl", - visibility = ["PUBLIC"], -) - -# export escripts for testing -filegroup( - name = "util_scripts", - srcs = glob([ - "*.escript", - "*.sh", - "*.erl", - ]), - visibility = ["PUBLIC"], -) - -toolchain_utilities( - name = "toolchain_utilities", - app_src_script = "app_src_builder.escript", - boot_script_builder = "boot_script_builder.escript", - core_parse_transforms = [":transform_project_root"], - dependency_analyzer = "dependency_analyzer.escript", - edoc = "edoc_cli.escript", - erlc_trampoline = "erlc_trampoline.sh", - escript_builder = "escript_builder.escript", - include_erts = "include_erts.escript", - release_variables_builder = "release_variables_builder.escript", - utility_modules = [ - "edoc_doclet_chunks.erl", - "edoc_report.erl", - ], - visibility = ["PUBLIC"], -) - -toolchain_resources_internal( - name = "resources", - visibility = ["PUBLIC"], -) diff --git a/prelude/erlang/toolchain/BUCK.v2 b/prelude/erlang/toolchain/BUCK.v2 new file mode 100644 index 0000000000000..ebeceeecb4e43 --- /dev/null +++ b/prelude/erlang/toolchain/BUCK.v2 @@ -0,0 +1,63 @@ +load("@prelude//erlang:erlang_toolchain.bzl", "erlang_parse_transform", "toolchain_utilities") +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +erlang_parse_transform( + name = "transform_project_root", + src = "transform_project_root.erl", + visibility = ["PUBLIC"], +) + +# export escripts for testing +filegroup( + name = "util_scripts", + srcs = glob([ + "*.escript", + "*.sh", + "*.erl", + ]), + visibility = ["PUBLIC"], +) + +toolchain_utilities( + name = "toolchain_utilities", + app_src_script = "app_src_builder.escript", + boot_script_builder = "boot_script_builder.escript", + core_parse_transforms = [":transform_project_root"], + dependency_analyzer = "dependency_analyzer.escript", + dependency_finalizer = "dependency_finalizer.escript", + edoc = "edoc_cli.escript", + erlc_trampoline = "erlc_trampoline.sh", + escript_builder = "escript_builder.escript", + escript_trampoline = "escript_trampoline.sh", + include_erts = "include_erts.escript", + release_variables_builder = "release_variables_builder.escript", + utility_modules = [ + "edoc_doclet_chunks.erl", + "edoc_report.erl", + "epp_dodger.erl", + "json.erl", + ], + visibility = ["PUBLIC"], +) + +# exported for use in shell & test binary, they have a runtime dependency on json +# can be removed once we're on OTP 27 +erlang_application( + name = "toolchain_json", + srcs = ["json.erl"], + applications = [ + "kernel", + "stdlib", + ], + erl_opts = [ + "+deterministic", + "+warnings_as_errors", + ], + shell_libs = [], + use_global_parse_transforms = False, + visibility = ["PUBLIC"], +) diff --git a/prelude/erlang/toolchain/app_src_builder.escript b/prelude/erlang/toolchain/app_src_builder.escript index 477a6f9576e0b..9b3ff831f2ca8 100644 --- a/prelude/erlang/toolchain/app_src_builder.escript +++ b/prelude/erlang/toolchain/app_src_builder.escript @@ -16,23 +16,22 @@ %%% .app.src file. %%% %%% usage: -%%% app_src_builder.escript app_info.term +%%% app_src_builder.escript app_info.json %%% -%%% app_info.term format: +%%% app_info.json format: %%% -%%% The file must contain only a single term which is a map -%%% with the following spec: +%%% The file must contain only a single JSON map with the following spec: %%% %%% #{ -%%% "name" := , -%%% "output" := , -%%% "sources" := [], -%%% "applications" := [], -%%% "included_applications" := I[], -%%% "template" => , -%%% "version" => , -%%% "env" => [application env variable], -%%% "metadata" => map of metadata +%%% <<"name">> := , +%%% <<"output">> := , +%%% <<"sources">> := [], +%%% <<"applications">> := [], +%%% <<"included_applications">> := I[], +%%% <<"template">> => , +%%% <<"version">> => , +%%% <<"env">> => [application env variable], +%%% <<"metadata">> => map of metadata %%% } %%% %%% @end @@ -58,7 +57,7 @@ main(_) -> -spec usage() -> ok. usage() -> - io:format("app_src_builder.escript app_info.term~n"). + io:format("app_src_builder.escript app_info.json~n"). -spec do(file:filename()) -> ok. do(AppInfoFile) -> @@ -73,7 +72,7 @@ do(AppInfoFile) -> mod := Mod, env := Env, metadata := Metadata - } = do_parse_app_info_file(AppInfoFile), + } = AppInfo = do_parse_app_info_file(AppInfoFile), VerifiedTerms = check_and_normalize_template( Name, Version, @@ -94,36 +93,37 @@ do(AppInfoFile) -> output := file:filename() }. do_parse_app_info_file(AppInfoFile) -> - case file:consult(AppInfoFile) of - {ok, [ - #{ - "name" := Name, - "output" := Output, - "sources" := Sources, - "applications" := Applications, - "included_applications" := IncludedApplications - } = Terms - ]} -> - Template = get_template(maps:get("template", Terms, undefined)), - Mod = get_mod(Name, maps:get("mod", Terms, undefined)), - Env = get_env(maps:get("env", Terms, undefined)), - Metadata = get_metadata(maps:get("metadata", Terms, undefined)), - #{ - name => Name, - sources => Sources, - vsn => maps:get("version", Terms, undefined), - output => Output, - template => Template, - applications => - normalize_application([list_to_atom(App) || App <- Applications]), - included_applications => - [list_to_atom(App) || App <- IncludedApplications], - mod => Mod, - env => Env, - metadata => Metadata - }; - {ok, Terms} -> - file_corrupt_error(AppInfoFile, Terms); + case file:read_file(AppInfoFile) of + {ok, Content} -> + case json:decode(Content) of + #{ + <<"name">> := Name, + <<"output">> := Output, + <<"sources">> := Sources, + <<"applications">> := Applications, + <<"included_applications">> := IncludedApplications + } = Terms -> + Template = get_template(maps:get(<<"template">>, Terms, undefined)), + Mod = get_mod(Name, maps:get(<<"mod">>, Terms, undefined)), + Env = get_env(Name, maps:get(<<"env">>, Terms, undefined)), + Metadata = get_metadata(Name, maps:get(<<"metadata">>, Terms, undefined)), + #{ + name => Name, + sources => Sources, + vsn => maps:get(<<"version">>, Terms, undefined), + output => Output, + template => Template, + applications => + normalize_application([binary_to_atom(App) || App <- Applications]), + included_applications => + [binary_to_atom(App) || App <- IncludedApplications], + mod => Mod, + env => Env, + metadata => Metadata + }; + Terms -> + file_corrupt_error(AppInfoFile, Terms) + end; Error -> open_file_error(AppInfoFile, Error) end. @@ -138,34 +138,50 @@ get_template(TemplateFile) -> Error -> open_file_error(TemplateFile, Error) end. --spec get_mod(string(), {string(), [string()]} | undefined) -> mod(). +-spec get_mod(binary(), [binary() | [binary()]] | undefined) -> mod(). get_mod(_, undefined) -> undefined; -get_mod(AppName, {ModuleName, StringArgs}) -> - ModString = unicode:characters_to_list([ - "{", ModuleName, ",[", lists:join(",", StringArgs), "]}." - ]), +get_mod(AppName, [ModuleName, StringArgs]) -> + parse_term( + AppName, + ["{", ModuleName, ",[", lists:join(",", StringArgs), "]}"], + "mod field" + ). + +-spec parse_term(binary(), iolist(), string()) -> term(). +parse_term(AppName, RawString, ErrorDescription) -> + String = unicode:characters_to_list([RawString | "."]), try - {ok, Tokens, _EndLine} = erl_scan:string(ModString), + {ok, Tokens, _EndLine} = erl_scan:string(String), {ok, Term} = erl_parse:parse_term(Tokens), Term catch - _:_ -> module_filed_error(AppName, ModString) + _:_ -> parse_error(AppName, String, ErrorDescription) end. --spec get_env(map() | undefined) -> [tuple()] | undefined. -get_env(undefined) -> undefined; -get_env(Env) -> - [{list_to_atom(K), V} || {K, V} <- maps:to_list(Env)]. - --spec get_metadata(map() | undefined) -> map(). -get_metadata(undefined) -> #{}; -get_metadata(Metadata) -> - maps:from_list([{list_to_atom(K), V} || {K, V} <- maps:to_list(Metadata)]). +-spec get_env(binary(), map() | undefined) -> [tuple()] | undefined. +get_env(_Name, undefined) -> + undefined; +get_env(Name, Env) -> + [ + {binary_to_atom(K), parse_term(Name, V, io_lib:format("env value for ~ts", [K]))} + || K := V <- maps:iterator(Env, ordered) + ]. + +-spec get_metadata(binary(), map() | undefined) -> map(). +get_metadata(_Name, undefined) -> #{}; +get_metadata(Name, Metadata) -> #{binary_to_atom(K) => normalize_metadata_value(Name, K, V) || K := V <- Metadata}. + +-spec normalize_metadata_value(binary(), binary(), binary() | [binary()]) -> atom() | [atom()]. +normalize_metadata_value(AppName, Key, Value) when is_binary(Value) -> + parse_term(AppName, Value, io_lib:format("metadata value for ~ts", [Key])); +normalize_metadata_value(AppName, Key, Values) when is_list(Values) -> + Value = ["[", lists:join(",", Values), "]"], + parse_term(AppName, Value, io_lib:format("metadata value for ~ts", [Key])). -spec check_and_normalize_template( - string(), - string() | undefined, + binary(), + binary() | undefined, term(), [atom()], [atom()], @@ -184,7 +200,7 @@ check_and_normalize_template( Env, Metadata ) -> - App = erlang:list_to_atom(AppName), + App = binary_to_atom(AppName), Props = case Terms of {application, App, P} when erlang:is_list(P) -> @@ -226,20 +242,19 @@ add_optional_fields(Props, [{K, V0} | Fields]) -> _ -> case V0 =:= V1 of true -> add_optional_fields(Props, Fields); - false -> - erlang:error(app_props_not_compatible, [{K, V0}, {K, V1}]) + false -> erlang:error(app_props_not_compatible, [{K, V0}, {K, V1}]) end end; add_optional_fields(Props, [Field | Fields]) -> add_optional_fields([Field | Props], Fields). --spec verify_app_props(string(), string(), [atom()], [atom()], proplists:proplist()) -> ok. +-spec verify_app_props(binary(), binary(), [atom()], [atom()], proplists:proplist()) -> ok. verify_app_props(AppName, Version, Applications, IncludedApplications, Props0) -> Props1 = verify_applications(AppName, Props0), %% ensure defaults ensure_fields(AppName, Version, Applications, IncludedApplications, Props1). --spec verify_applications(string(), proplists:proplist()) -> ok. +-spec verify_applications(binary(), proplists:proplist()) -> ok. verify_applications(AppName, AppDetail) -> case proplists:get_value(applications, AppDetail) of AppList when is_list(AppList) -> @@ -269,14 +284,14 @@ normalize_application(Applications) -> end, Kernel ++ StdLib ++ Applications. --spec ensure_fields(string(), string(), [atom()], [atom()], proplists:proplist()) -> +-spec ensure_fields(binary(), binary(), [atom()], [atom()], proplists:proplist()) -> proplists:proplist(). ensure_fields(AppName, Version, Applications, IncludedApplications, Props) -> %% default means to add the value if not existing %% match meand to overwrite if not existing and check otherwise for Defaults = [ {{registered, []}, default}, - {{vsn, Version}, match}, + {{vsn, binary_to_list(Version)}, match}, {{description, "missing description"}, default}, {{applications, Applications}, match}, {{included_applications, IncludedApplications}, match} @@ -310,7 +325,7 @@ ensure_fields(AppName, Version, Applications, IncludedApplications, Props) -> -spec render_app_file(string(), application_resource(), file:filename(), [file:filename()]) -> ok. render_app_file(AppName, Terms, Output, Srcs) -> - App = erlang:list_to_atom(AppName), + App = binary_to_atom(AppName), Modules = generate_modules(Srcs), {application, App, Props0} = Terms, %% remove modules key @@ -318,19 +333,19 @@ render_app_file(AppName, Terms, Output, Srcs) -> %% construct new terms Spec = {application, App, [{modules, Modules} | Props1]}, - ToWrite = io_lib:format("~p.\n", [Spec]), - file:write_file(Output, ToWrite, [raw]). + ToWrite = io_lib:format("~kp.\n", [Spec]), + ok = file:write_file(Output, ToWrite, [raw]). -spec generate_modules([file:filename()]) -> [atom()]. generate_modules(Sources) -> Modules = lists:foldl( fun(Source, Acc) -> case filename:extension(Source) of - ".hrl" -> + <<".hrl">> -> Acc; - Ext when Ext == ".erl" orelse Ext == ".xrl" orelse Ext == ".yrl" -> + Ext when Ext == <<".erl">> orelse Ext == <<".xrl">> orelse Ext == <<".yrl">> -> ModuleName = filename:basename(Source, Ext), - Module = erlang:list_to_atom(ModuleName), + Module = erlang:binary_to_atom(ModuleName), [Module | Acc]; _ -> unknown_extension_error(Source) @@ -362,7 +377,7 @@ file_corrupt_error(File, Contents) -> {abort, Msg} ). --spec value_match_error(string(), {atom(), term()}, {atom(), term()}) -> no_return(). +-spec value_match_error(binary(), {atom(), term()}, {atom(), term()}) -> no_return(). value_match_error(AppName, Wrong = {_, Value1}, Default = {_, Value2}) when is_list(Value1) andalso is_list(Value2) -> @@ -410,12 +425,12 @@ applications_type_error(AppName, Applications) -> {abort, Msg} ). --spec module_filed_error(string(), string()) -> no_return(). -module_filed_error(AppName, ModString) -> +-spec parse_error(string(), string(), string()) -> no_return(). +parse_error(AppName, String, Description) -> Msg = io_lib:format( - "error when building ~s.app for application ~s: could not parse value for module field: `~p`", + "error when building ~s.app for application ~s: could not parse value for ~ts: `~p`", [ - AppName, AppName, ModString + AppName, AppName, Description, String ] ), erlang:error( @@ -534,10 +549,11 @@ lcs([_SH | ST] = S, [_TH | TT] = T, Cache, Acc) -> -spec add_metadata(proplists:proplist(), map()) -> proplists:proplist(). add_metadata(Props, Metadata) -> ok = verify_metadata(Props, Metadata), - Props ++ maps:to_list(Metadata). + Props ++ maps:to_list(maps:iterator(Metadata, ordered)). -spec verify_metadata(proplists:proplist(), map()) -> ok. -verify_metadata([], _) -> ok; +verify_metadata([], _) -> + ok; verify_metadata([{K, V0} | T], Metadata) -> case maps:get(K, Metadata, undefined) of undefined -> diff --git a/prelude/erlang/toolchain/boot_script_builder.escript b/prelude/erlang/toolchain/boot_script_builder.escript index 06f00be01453b..5d853c13c5328 100644 --- a/prelude/erlang/toolchain/boot_script_builder.escript +++ b/prelude/erlang/toolchain/boot_script_builder.escript @@ -24,8 +24,6 @@ -export([main/1]). --mode(compile). - -define(EXITSUCCESS, 0). -define(EXITERROR, 1). diff --git a/prelude/erlang/toolchain/dependency_analyzer.escript b/prelude/erlang/toolchain/dependency_analyzer.escript index fd9a33a70e767..d087a06428281 100644 --- a/prelude/erlang/toolchain/dependency_analyzer.escript +++ b/prelude/erlang/toolchain/dependency_analyzer.escript @@ -15,21 +15,22 @@ %%% Extract direct dependencies from a given erl or hrl file %%% %%% usage: -%%% dependency_analyzer.escript some_file.(h|e)rl [out.json] +%%% dependency_analyzer.escript some_file.(h|e)rl [out.term] %%% %%% The output of the tool is written either to stdout, -%%% or a given output file. The JSON format is as follows: +%%% or a given output file. The format is as follows and intended to +%%% be consumed by other file:consult/1: %%% ``` -%%% [{"type": "include" -%%% | "include_lib" -%%% | "behaviour" -%%% | "parse_transform" -%%% | "manual_dependency", -%%% "file": "header_or_source_file.(h|e)rl", -%%% ["app": "application"][only for "include_lib"] -%%% }, +%%% [#{<<"type">> := "include" +%%% | "include_lib" +%%% | "behaviour" +%%% | "parse_transform" +%%% | "manual_dependency", +%%% <<"file">> := "header_or_source_file.(h|e)rl", +%%% ["app" => "application"][only for "include_lib"] +%%% }, %%% ... -%%% ] +%%% ]. %%% ''' %%% @end @@ -74,8 +75,7 @@ %% -build_dependencies(Modules) -define(MATCH_MANUAL_DEPENDENCIES(Modules), - {tree, attribute, _, - {attribute, {tree, atom, _, build_dependencies}, [{tree, list, _, {list, Modules, none}}]}} + {tree, attribute, _, {attribute, {tree, atom, _, build_dependencies}, [{tree, list, _, {list, Modules, none}}]}} ). %% entry point @@ -90,16 +90,16 @@ main(_) -> -spec usage() -> ok. usage() -> - io:format("dependency_analyzer.escript some_file.(h|e)rl [out.json]"). + io:format("dependency_analyzer.escript some_file.(h|e)rl [out.term]"). -spec do(file:filename(), {file, file:filename()} | stdout) -> ok. do(InFile, Outspec) -> {ok, Forms} = epp_dodger:parse_file(InFile), - Dependencies = process_forms(Forms, []), - OutData = unicode:characters_to_binary(to_json_list(Dependencies)), + Dependencies = lists:sort(process_forms(Forms, [])), + OutData = unicode:characters_to_binary(json:encode(Dependencies)), case Outspec of {file, File} -> - file:write_file(File, OutData); + ok = file:write_file(File, OutData); stdout -> io:format("~s~n", [OutData]) end. @@ -108,29 +108,33 @@ do(InFile, Outspec) -> process_forms([], Acc) -> Acc; process_forms([?MATCH_INCLUDE(Include) | Rest], Acc) -> - Dependency = #{"file" => filename:basename(Include), "type" => "include"}, + Dependency = #{<<"file">> => list_to_binary(filename:basename(Include)), <<"type">> => <<"include">>}, process_forms(Rest, [Dependency | Acc]); process_forms([?MATCH_INCLUDE_LIB(IncludeLib) | Rest], Acc) -> Dependency = case filename:split(IncludeLib) of [App, "include", Include] -> - #{"app" => App, "file" => Include, "type" => "include_lib"}; + #{ + <<"app">> => list_to_binary(App), + <<"file">> => list_to_binary(Include), + <<"type">> => <<"include_lib">> + }; _ -> error(malformed_header_include_lib) end, process_forms(Rest, [Dependency | Acc]); process_forms([?MATCH_BEHAVIOR(Module) | Rest], Acc) -> - Dependency = #{"file" => module_to_erl(Module), "type" => "behaviour"}, + Dependency = #{<<"file">> => module_to_erl(Module), <<"type">> => <<"behaviour">>}, process_forms(Rest, [Dependency | Acc]); process_forms([?MATCH_BEHAVIOUR(Module) | Rest], Acc) -> - Dependency = #{"file" => module_to_erl(Module), "type" => "behaviour"}, + Dependency = #{<<"file">> => module_to_erl(Module), <<"type">> => <<"behaviour">>}, process_forms(Rest, [Dependency | Acc]); process_forms([?MATCH_PARSETRANSFORM(Module) | Rest], Acc) -> - Dependency = #{"file" => module_to_erl(Module), "type" => "parse_transform"}, + Dependency = #{<<"file">> => module_to_erl(Module), <<"type">> => <<"parse_transform">>}, process_forms(Rest, [Dependency | Acc]); process_forms([?MATCH_MANUAL_DEPENDENCIES(Modules) | Rest], Acc) -> Dependencies = [ - #{"file" => module_to_erl(Module), "type" => "manual_dependency"} + #{<<"file">> => module_to_erl(Module), <<"type">> => <<"manual_dependency">>} || {tree, atom, _, Module} <- Modules ], process_forms(Rest, Dependencies ++ Acc); @@ -139,61 +143,4 @@ process_forms([_ | Rest], Acc) -> -spec module_to_erl(module()) -> file:filename(). module_to_erl(Module) -> - unicode:characters_to_list([atom_to_list(Module), ".erl"]). - -%%% -%%% JSON encoding: base-line escripts we use in our toolchain need to be dependency less -%%% - --spec to_json_list([#{string() => string()}]) -> string(). -to_json_list(Dependencies) -> - [ - "[", - string:join([json_encode_dependency(Dependency) || Dependency <- Dependencies], ","), - "]" - ]. - --spec json_encode_dependency(#{string() => string()}) -> string(). -json_encode_dependency(Dep) -> - Elements = maps:fold( - fun(Key, Value, Acc) -> - [[json_string_escape(Key), ":", json_string_escape(Value)] | Acc] - end, - [], - Dep - ), - ["{", string:join(Elements, ","), "}"]. - --spec json_string_escape(string()) -> string(). -json_string_escape(Str) -> - [ - "\"", - [json_escape_char(C) || C <- Str], - "\"" - ]. - --spec json_escape_char(non_neg_integer()) -> non_neg_integer() | string(). -json_escape_char($\") -> - [$\\, $\"]; -json_escape_char($\\) -> - [$\\, $\\]; -json_escape_char($\/) -> - [$\\, $\/]; -json_escape_char($\b) -> - [$\\, $\b]; -json_escape_char($\f) -> - [$\\, $\f]; -json_escape_char($\n) -> - [$\\, $\n]; -json_escape_char($\r) -> - [$\\, $\r]; -json_escape_char($\t) -> - [$\\, $\t]; -json_escape_char(C) when C >= 16#20 andalso C =< 16#10FFFF -> - %% unescaped, 16#5C (\) and 16#22 (") are handled above - C; -json_escape_char(C) when C < 16#10000 -> - io_lib:format("\\u~s", [string:pad(integer_to_list(C, 16), 4, leading, " ")]); -json_escape_char(_) -> - %% TODO: support extended unicode characters - error(utf8_extended_character_not_supported). + unicode:characters_to_binary([atom_to_list(Module), ".erl"]). diff --git a/prelude/erlang/toolchain/dependency_finalizer.escript b/prelude/erlang/toolchain/dependency_finalizer.escript new file mode 100644 index 0000000000000..eb4574192f4a5 --- /dev/null +++ b/prelude/erlang/toolchain/dependency_finalizer.escript @@ -0,0 +1,92 @@ +%%% % @format +%%% Copyright (c) Meta Platforms, Inc. and affiliates. +%%% +%%% This source code is licensed under both the MIT license found in the +%%% LICENSE-MIT file in the root directory of this source tree and the Apache +%%% License, Version 2.0 found in the LICENSE-APACHE file in the root directory +%%% of this source tree. + +-module(dependency_finalizer). +-author("loscher@meta.com"). + +-type dep_files_data() :: #{file:filename() => #{string() := file:filename()}}. + +-spec main([string()]) -> ok | no_return(). +main([Source, InFile]) -> + do(Source, InFile, stdout); +main([Source, InFile, OutFile]) -> + do(Source, InFile, {file, OutFile}); +main(_) -> + usage(), + erlang:halt(1). + +-spec usage() -> ok. +usage() -> + io:format("~s.escript dependency_spec.term [out.json]", [?MODULE]). + +-spec do(file:filename(), file:filename(), {file, file:filename()} | stdout) -> ok. +do(Source, InFile, OutSpec) -> + case read_file(InFile) of + {ok, DepFiles} -> + Dependencies = build_dep_info(Source, DepFiles), + OutData = unicode:characters_to_binary(json:encode(Dependencies)), + case OutSpec of + {file, File} -> + ok = file:write_file(File, OutData); + stdout -> + io:format("~s~n", [OutData]) + end; + Err -> + io:format(standard_error, "error, could no parse file correctly: ~p~n", [Err]), + erlang:halt(1) + end. + +-spec read_file(file:filename()) -> {ok, dep_files_data()} | {error, term()}. +read_file(File) -> + case file:read_file(File) of + {ok, Data} -> + {ok, json:decode(Data)}; + Err -> + Err + end. + +-spec build_dep_info(file:filename(), dep_files_data()) -> ok. +build_dep_info(Source, DepFiles) -> + Key = list_to_binary(filename:basename(Source, ".erl") ++ ".beam"), + collect_dependencies([Key], DepFiles, sets:new([{version, 2}]), []). + +collect_dependencies([], _, _, Acc) -> + Acc; +collect_dependencies([Key | Rest], DepFiles, Visited, Acc) -> + case DepFiles of + #{Key := #{<<"dep_file">> := DepFile}} -> + {ok, Dependencies} = read_file(DepFile), + {NextKeys, NextVisited, NextAcc} = lists:foldl( + fun(#{<<"file">> := File} = Dep, {KeysAcc, VisitedAcc, DepAcc}) -> + NextKey = key(File), + case sets:is_element(NextKey, VisitedAcc) of + true -> {KeysAcc, VisitedAcc, DepAcc}; + false -> {[NextKey | KeysAcc], sets:add_element(Key, VisitedAcc), [Dep | DepAcc]} + end + end, + {Rest, Visited, Acc}, + Dependencies + ), + collect_dependencies( + NextKeys, + DepFiles, + NextVisited, + NextAcc + ); + _ -> + %% We cannot find key in DepFiles, which means it's from OTP or missing + %% We don't add anything to the dependencies and let the compiler fail for a proper error message. + collect_dependencies(Rest, DepFiles, Visited, Acc) + end. + +-spec key(string()) -> string(). +key(FileName) -> + case filename:extension(FileName) of + ".erl" -> filename:basename(FileName, ".erl") ++ ".beam"; + _ -> FileName + end. diff --git a/prelude/erlang/toolchain/edoc_cli.escript b/prelude/erlang/toolchain/edoc_cli.escript index ee01eac47af5f..80a1a5c423d6d 100644 --- a/prelude/erlang/toolchain/edoc_cli.escript +++ b/prelude/erlang/toolchain/edoc_cli.escript @@ -21,8 +21,6 @@ -module(edoc_cli). -export([main/1]). --mode(compile). - main([]) -> print(usage()); main(Args) -> @@ -74,7 +72,7 @@ remove_loggers() -> [logger:remove_handler(H) || H <- logger:get_handler_ids()]. generate_empty_chunk(File, OutputDir) -> - file:write_file( + ok = file:write_file( chunk_path(File, OutputDir), erlang:term_to_binary(failed_to_build_doc_chunk) ). @@ -87,7 +85,7 @@ verify_files_exist(#{files := Files, out_dir := OutputDir}) -> true -> true; false -> - io:format(standard_error, "error: coudn't generate ~s~n", [ChunkPath]), + io:format(standard_error, "error: couldn't generate ~s~n", [ChunkPath]), false end end, diff --git a/prelude/erlang/toolchain/epp_dodger.erl b/prelude/erlang/toolchain/epp_dodger.erl new file mode 100644 index 0000000000000..52193e10bc885 --- /dev/null +++ b/prelude/erlang/toolchain/epp_dodger.erl @@ -0,0 +1,944 @@ +%% A temporary port of the official OTP epp_dodger from OTP 27, +%% so that EDoc can also be computed for OTP 26 in presence of the +%% maybe operator. See https://github.com/erlang/otp/issues/7266 +%% ===================================================================== +%% Licensed under the Apache License, Version 2.0 (the "License"); you may +%% not use this file except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% Alternatively, you may use this file under the terms of the GNU Lesser +%% General Public License (the "LGPL") as published by the Free Software +%% Foundation; either version 2.1, or (at your option) any later version. +%% If you wish to allow use of your version of this file only under the +%% terms of the LGPL, you should delete the provisions above and replace +%% them with the notice and other provisions required by the LGPL; see +%% . If you do not delete the provisions +%% above, a recipient may use your version of this file under the terms of +%% either the Apache License or the LGPL. +%% +%% @copyright 2001-2006 Richard Carlsson +%% @author Richard Carlsson +%% @end +%% ===================================================================== + +%% @doc `epp_dodger' - bypasses the Erlang preprocessor. +%% +%%

    This module tokenises and parses most Erlang source code without +%% expanding preprocessor directives and macro applications, as long as +%% these are syntactically "well-behaved". Because the normal parse +%% trees of the `erl_parse' module cannot represent these things +%% (normally, they are expanded by the Erlang preprocessor {@link +%% //stdlib/epp} before the parser sees them), an extended syntax tree +%% is created, using the {@link erl_syntax} module.

    + + +%% NOTES: +%% +%% * It's OK if the result does not parse - then at least nothing +%% strange happens, and the user can resort to full preprocessing. +%% However, we must avoid generating a token stream that is accepted by +%% the parser, but has a different meaning than the intended. A typical +%% example is when someone uses token-level string concatenation with +%% macros, as in `"foo" ?bar' (where `?bar' expands to a string). If we +%% replace the tokens `? bar' with `( ... )', to preserve precedence, +%% the result will be parsed as an application `"foo" ( ... )' and cause +%% trouble later on. We must detect such cases and report an error. +%% +%% * It is pointless to add a mechanism for tracking which macros are +%% known to take arguments, and which are known to take no arguments, +%% since a lot of the time we will not have seen the macro definition +%% anyway (it's usually in a header file). Hence, we try to use +%% heuristics instead. In most cases, the token sequence `? foo (' +%% indicates that it is a call of a macro that is supposed to take +%% arguments, but e.g., in the context `: ? foo (', the argument list +%% typically belongs to a remote function call, as in `m:?f(...)' and +%% should be parsed as `m:(?f)(...)' unless it is actually a try-clause +%% pattern such as `throw:?f(...) ->'. +%% +%% * We do our best to make macros without arguments pass the parsing +%% stage transparently. Atoms are accepted in most contexts, but +%% variables are not, so we use only atoms to encode these macros. +%% Sadly, the parsing sometimes discards even the location info from +%% atom tokens, so we can only use the actual characters for this. +%% +%% * We recognize `?m(...' at the start of a form and prevent this from +%% being interpreted as a macro with arguments, since it is probably a +%% function definition. Likewise with attributes `-?m(...'. + +-module(epp_dodger). + +-export([parse_file/1, quick_parse_file/1, parse_file/2, + quick_parse_file/2, parse/1, quick_parse/1, parse/2, + quick_parse/2, parse/3, quick_parse/3, parse_form/2, + parse_form/3, quick_parse_form/2, quick_parse_form/3, + format_error/1, tokens_to_string/1]). + + +%% The following should be: 1) pseudo-uniquely identifiable, and 2) +%% cause nice looking error messages when the parser has to give up. + +-define(macro_call, '? ('). +-define(atom_prefix, "? "). +-define(var_prefix, "?,"). +-define(pp_form, '?preprocessor declaration?'). + + +%% @type errorinfo() = //stdlib/erl_scan:error_info(). +%% +%% This is a so-called Erlang I/O ErrorInfo structure; see the {@link +%% //stdlib/io} module for details. + +-type errorinfo() :: erl_scan:error_info(). + +-type option() :: atom() | {atom(), term()}. + +%% ===================================================================== +%% @spec parse_file(File) -> {ok, Forms} | {error, errorinfo()} +%% File = file:filename() +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @equiv parse_file(File, []) + +-spec parse_file(file:filename()) -> + {'ok', erl_syntax:forms()} | {'error', errorinfo()}. + +parse_file(File) -> + parse_file(File, []). + +%% @spec parse_file(File, Options) -> {ok, Forms} | {error, errorinfo()} +%% File = file:filename() +%% Options = [term()] +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @doc Reads and parses a file. If successful, `{ok, Forms}' +%% is returned, where `Forms' is a list of abstract syntax +%% trees representing the "program forms" of the file (cf. +%% `erl_syntax:is_form/1'). Otherwise, `{error, errorinfo()}' is +%% returned, typically if the file could not be opened. Note that +%% parse errors show up as error markers in the returned list of +%% forms; they do not cause this function to fail or return +%% `{error, errorinfo()}'. +%% +%% Options: +%%
    +%%
    {@type {no_fail, boolean()@}}
    +%%
    If `true', this makes `epp_dodger' replace any program forms +%% that could not be parsed with nodes of type `text' (see {@link +%% erl_syntax:text/1}), representing the raw token sequence of the +%% form, instead of reporting a parse error. The default value is +%% `false'.
    +%%
    {@type {clever, boolean()@}}
    +%%
    If set to `true', this makes `epp_dodger' try to repair the +%% source code as it seems fit, in certain cases where parsing would +%% otherwise fail. Currently, it inserts `++'-operators between string +%% literals and macros where it looks like concatenation was intended. +%% The default value is `false'.
    +%%
    +%% +%% @see parse/2 +%% @see quick_parse_file/1 +%% @see erl_syntax:is_form/1 + +-spec parse_file(file:filename(), [option()]) -> + {'ok', erl_syntax:forms()} | {'error', errorinfo()}. + +parse_file(File, Options) -> + parse_file(File, fun parse/3, Options). + +%% @spec quick_parse_file(File) -> {ok, Forms} | {error, errorinfo()} +%% File = file:filename() +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @equiv quick_parse_file(File, []) + +-spec quick_parse_file(file:filename()) -> + {'ok', erl_syntax:forms()} | {'error', errorinfo()}. + +quick_parse_file(File) -> + quick_parse_file(File, []). + +%% @spec quick_parse_file(File, Options) -> +%% {ok, Forms} | {error, errorinfo()} +%% File = file:filename() +%% Options = [term()] +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @doc Similar to {@link parse_file/2}, but does a more quick-and-dirty +%% processing of the code. Macro definitions and other preprocessor +%% directives are discarded, and all macro calls are replaced with +%% atoms. This is useful when only the main structure of the code is of +%% interest, and not the details. Furthermore, the quick-parse method +%% can usually handle more strange cases than the normal, more exact +%% parsing. +%% +%% Options: see {@link parse_file/2}. Note however that for +%% `quick_parse_file/2', the option `no_fail' is `true' by default. +%% +%% @see quick_parse/2 +%% @see parse_file/2 + +-spec quick_parse_file(file:filename(), [option()]) -> + {'ok', erl_syntax:forms()} | {'error', errorinfo()}. + +quick_parse_file(File, Options) -> + parse_file(File, fun quick_parse/3, Options ++ [no_fail]). + +parse_file(File, Parser, Options) -> + case do_parse_file(utf8, File, Parser, Options) of + {ok, Forms}=Ret -> + case find_invalid_unicode(Forms) of + none -> + Ret; + invalid_unicode -> + case epp:read_encoding(File) of + utf8 -> + Ret; + _ -> + do_parse_file(latin1, File, Parser, Options) + end + end; + Else -> + Else + end. + +do_parse_file(DefEncoding, File, Parser, Options) -> + case file:open(File, [read]) of + {ok, Dev} -> + _ = epp:set_encoding(Dev, DefEncoding), + try Parser(Dev, 1, Options) + after ok = file:close(Dev) + end; + {error, Error} -> + {error, {0, file, Error}} % defer to file:format_error/1 + end. + +find_invalid_unicode([H|T]) -> + case H of + {error, {_Location, file_io_server, invalid_unicode}} -> + invalid_unicode; + _Other -> + find_invalid_unicode(T) + end; +find_invalid_unicode([]) -> none. + +%% ===================================================================== +%% @spec parse(IODevice) -> {ok, Forms} | {error, errorinfo()} +%% @equiv parse(IODevice, 1) + +-spec parse(file:io_device()) -> {'ok', erl_syntax:forms()}. + +parse(Dev) -> + parse(Dev, 1). + +%% @spec parse(IODevice, StartLocation) -> {ok, Forms} | {error, errorinfo()} +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @equiv parse(IODevice, StartLocation, []) +%% @see parse/1 + +-spec parse(file:io_device(), erl_anno:location()) -> {'ok', erl_syntax:forms()}. + +parse(Dev, L) -> + parse(Dev, L, []). + +%% @spec parse(IODevice, StartLocation, Options) -> +%% {ok, Forms} | {error, errorinfo()} +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Options = [term()] +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @doc Reads and parses program text from an I/O stream. Characters are +%% read from `IODevice' until end-of-file; apart from this, the +%% behaviour is the same as for {@link parse_file/2}. `StartLocation' is the +%% initial location. +%% +%% @see parse/2 +%% @see parse_file/2 +%% @see parse_form/2 +%% @see quick_parse/3 + +-spec parse(file:io_device(), erl_anno:location(), [option()]) -> + {'ok', erl_syntax:forms()}. + +parse(Dev, L0, Options) -> + parse(Dev, L0, fun parse_form/3, Options). + +%% @spec quick_parse(IODevice) -> {ok, Forms} | {error, errorinfo()} +%% @equiv quick_parse(IODevice, 1) + +-spec quick_parse(file:io_device()) -> + {'ok', erl_syntax:forms()}. + +quick_parse(Dev) -> + quick_parse(Dev, 1). + +%% @spec quick_parse(IODevice, StartLocation) -> +%% {ok, Forms} | {error, errorinfo()} +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @equiv quick_parse(IODevice, StartLocation, []) +%% @see quick_parse/1 + +-spec quick_parse(file:io_device(), erl_anno:location()) -> + {'ok', erl_syntax:forms()}. + +quick_parse(Dev, L) -> + quick_parse(Dev, L, []). + +%% @spec (IODevice, StartLocation, Options) -> +%% {ok, Forms} | {error, errorinfo()} +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Options = [term()] +%% Forms = [erl_syntax:syntaxTree()] +%% +%% @doc Similar to {@link parse/3}, but does a more quick-and-dirty +%% processing of the code. See {@link quick_parse_file/2} for details. +%% +%% @see quick_parse/2 +%% @see quick_parse_file/2 +%% @see quick_parse_form/2 +%% @see parse/3 + +-spec quick_parse(file:io_device(), erl_anno:location(), [option()]) -> + {'ok', erl_syntax:forms()}. + +quick_parse(Dev, L0, Options) -> + parse(Dev, L0, fun quick_parse_form/3, Options). + +parse(Dev, L0, Parser, Options) -> + parse(Dev, L0, [], Parser, Options). + +parse(Dev, L0, Fs, Parser, Options) -> + case Parser(Dev, L0, Options) of + {ok, none, L1} -> + parse(Dev, L1, Fs, Parser, Options); + {ok, F, L1} -> + parse(Dev, L1, [F | Fs], Parser, Options); + {error, IoErr, L1} -> + parse(Dev, L1, [{error, IoErr} | Fs], Parser, Options); + {eof, _L1} -> + {ok, lists:reverse(Fs)} + end. + + +%% ===================================================================== +%% @spec parse_form(IODevice, StartLocation) -> {ok, Form, Location} +%% | {eof, Location} +%% | {error, errorinfo(), Location} +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Form = erl_syntax:syntaxTree() +%% Location = //stdlib/erl_anno:location() +%% +%% @equiv parse_form(IODevice, StartLocation, []) +%% +%% @see quick_parse_form/2 + +-spec parse_form(file:io_device(), erl_anno:location()) -> + {'ok', erl_syntax:forms(), erl_anno:location()} + | {'eof', erl_anno:location()} | {'error', errorinfo(), erl_anno:location()}. + +parse_form(Dev, L0) -> + parse_form(Dev, L0, []). + +%% @spec parse_form(IODevice, StartLocation, Options) -> +%% {ok, Form, Location} +%% | {eof, Location} +%% | {error, errorinfo(), Location} +%% +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Options = [term()] +%% Form = erl_syntax:syntaxTree() +%% Location = //stdlib/erl_anno:location() +%% +%% @doc Reads and parses a single program form from an I/O stream. +%% Characters are read from `IODevice' until an end-of-form +%% marker is found (a period character followed by whitespace), or until +%% end-of-file; apart from this, the behaviour is similar to that of +%% `parse/3', except that the return values also contain the +%% final location given that `StartLocation' is the initial +%% location, and that `{eof, Location}' may be returned. +%% +%% @see parse/3 +%% @see parse_form/2 +%% @see quick_parse_form/3 + +-spec parse_form(file:io_device(), erl_anno:location(), [option()]) -> + {'ok', erl_syntax:forms(), erl_anno:location()} + | {'eof', erl_anno:location()} | {'error', errorinfo(), erl_anno:location()}. + +parse_form(Dev, L0, Options) -> + parse_form(Dev, L0, fun normal_parser/2, Options). + +%% @spec quick_parse_form(IODevice, StartLocation) -> +%% {ok, Form, Location} +%% | {eof, Location} +%% | {error, errorinfo(), Location} +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Form = erl_syntax:syntaxTree() | none +%% Location = //stdlib/erl_anno:location() +%% +%% @equiv quick_parse_form(IODevice, StartLocation, []) +%% +%% @see parse_form/2 + +-spec quick_parse_form(file:io_device(), erl_anno:location()) -> + {'ok', erl_syntax:forms(), erl_anno:location()} + | {'eof', erl_anno:location()} | {'error', errorinfo(), erl_anno:location()}. + +quick_parse_form(Dev, L0) -> + quick_parse_form(Dev, L0, []). + +%% @spec quick_parse_form(IODevice, StartLocation, Options) -> +%% {ok, Form, Location} +%% | {eof, Location} +%% | {error, errorinfo(), Location} +%% +%% IODevice = pid() +%% StartLocation = //stdlib/erl_anno:location() +%% Options = [term()] +%% Form = erl_syntax:syntaxTree() +%% Location = //stdlib/erl_anno:location() +%% +%% @doc Similar to {@link parse_form/3}, but does a more quick-and-dirty +%% processing of the code. See {@link quick_parse_file/2} for details. +%% +%% @see parse/3 +%% @see quick_parse_form/2 +%% @see parse_form/3 + +-spec quick_parse_form(file:io_device(), erl_anno:location(), [option()]) -> + {'ok', erl_syntax:forms(), erl_anno:location()} + | {'eof', erl_anno:location()} | {'error', errorinfo(), erl_anno:location()}. + +quick_parse_form(Dev, L0, Options) -> + parse_form(Dev, L0, fun quick_parser/2, Options). + +-record(opt, {clever = false :: boolean()}). + +parse_form(Dev, L0, Parser, Options) -> + NoFail = proplists:get_bool(no_fail, Options), + Opt = #opt{clever = proplists:get_bool(clever, Options)}, + + %% This as the *potential* to read options for enabling/disabling + %% features for the parsing of the file. + {ok, {_Ftrs, ResWordFun}} = + erl_features:keyword_fun(Options, fun reserved_word/1), + + case io:scan_erl_form(Dev, "", L0, [{reserved_word_fun,ResWordFun}]) of + {ok, Ts, L1} -> + case catch {ok, Parser(Ts, Opt)} of + {'EXIT', Term} -> + {error, io_error(L1, {unknown, Term}), L1}; + {error, Term} -> + IoErr = io_error(L1, Term), + {error, IoErr, L1}; + {parse_error, _IoErr} when NoFail -> + {ok, erl_syntax:set_pos( + erl_syntax:text(tokens_to_string(Ts)), + erl_anno:new(start_pos(Ts, L1))), + L1}; + {parse_error, IoErr} -> + {error, IoErr, L1}; + {ok, F} -> + {ok, F, L1} + end; + {error, _IoErr, _L1} = Err -> Err; + {error, _Reason} -> {eof, L0}; % This is probably encoding problem + {eof, _L1} = Eof -> Eof + end. + +io_error(L, Desc) -> + {L, ?MODULE, Desc}. + +start_pos([T | _Ts], _L) -> + erl_anno:location(element(2, T)); +start_pos([], L) -> + L. + +%% Exception-throwing wrapper for the standard Erlang parser stage + +parse_tokens(Ts) -> + parse_tokens(Ts, fun fix_form/1). + +parse_tokens(Ts, Fix) -> + case erl_parse:parse_form(Ts) of + {ok, Form} -> + Form; + {error, IoErr} -> + case Fix(Ts) of + {form, Form} -> + Form; + {retry, Ts1, Fix1} -> + parse_tokens(Ts1, Fix1); + error -> + throw({parse_error, IoErr}) + end + end. + +%% --------------------------------------------------------------------- +%% Quick scanning/parsing - deletes macro definitions and other +%% preprocessor directives, and replaces all macro calls with atoms. + +quick_parser(Ts, _Opt) -> + filter_form(parse_tokens(quickscan_form(Ts))). + +quickscan_form([{'-', _Anno}, {atom, AnnoA, define} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, undef} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, include} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, include_lib} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, ifdef} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, ifndef} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {'if', AnnoA} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, elif} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, 'else'} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {'else', AnnoA} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, endif} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', _Anno}, {atom, AnnoA, feature} | _Ts]) -> + kill_form(AnnoA); +quickscan_form([{'-', Anno}, {'?', _}, {Type, _, _}=N | [{'(', _} | _]=Ts]) + when Type =:= atom; Type =:= var -> + %% minus, macro and open parenthesis at start of form - assume that + %% the macro takes no arguments; e.g. `-?foo(...).' + quickscan_macros_1(N, Ts, [{'-', Anno}]); +quickscan_form([{'?', _Anno}, {Type, _, _}=N | [{'(', _} | _]=Ts]) + when Type =:= atom; Type =:= var -> + %% macro and open parenthesis at start of form - assume that the + %% macro takes no arguments (see scan_macros for details) + quickscan_macros_1(N, Ts, []); +quickscan_form(Ts) -> + quickscan_macros(Ts). + +kill_form(A) -> + [{atom, A, ?pp_form}, {'(', A}, {')', A}, {'->', A}, {atom, A, kill}, + {dot, A}]. + +quickscan_macros(Ts) -> + quickscan_macros(Ts, []). + +quickscan_macros([{'?',_}, {Type, _, A} | Ts], [{string, AnnoS, S} | As]) + when Type =:= atom; Type =:= var -> + %% macro after a string literal: change to a single string + {_, Ts1} = skip_macro_args(Ts), + S1 = S ++ quick_macro_string(A), + quickscan_macros(Ts1, [{string, AnnoS, S1} | As]); +quickscan_macros([{'?',_}, {Type, _, _}=N | [{'(',_}|_]=Ts], + [{':',_}|_]=As) + when Type =:= atom; Type =:= var -> + %% macro and open parenthesis after colon - check the token + %% following the arguments (see scan_macros for details) + Ts1 = case skip_macro_args(Ts) of + {_, [{'->',_} | _] = Ts2} -> Ts2; + {_, [{'when',_} | _] = Ts2} -> Ts2; + {_, [{':',_} | _] = Ts2} -> Ts2; + _ -> Ts %% assume macro without arguments + end, + quickscan_macros_1(N, Ts1, As); +quickscan_macros([{'?',_}, {Type, _, _}=N | Ts], As) + when Type =:= atom; Type =:= var -> + %% macro with or without arguments + {_, Ts1} = skip_macro_args(Ts), + quickscan_macros_1(N, Ts1, As); +quickscan_macros([T | Ts], As) -> + quickscan_macros(Ts, [T | As]); +quickscan_macros([], As) -> + lists:reverse(As). + +%% (after a macro has been found and the arglist skipped, if any) +quickscan_macros_1({_Type, _, A}, [{string, AnnoS, S} | Ts], As) -> + %% string literal following macro: change to single string + S1 = quick_macro_string(A) ++ S, + quickscan_macros(Ts, [{string, AnnoS, S1} | As]); +quickscan_macros_1({_Type, AnnoA, A}, Ts, As) -> + %% normal case - just replace the macro with an atom + quickscan_macros(Ts, [{atom, AnnoA, quick_macro_atom(A)} | As]). + +quick_macro_atom(A) -> + list_to_atom("?" ++ atom_to_list(A)). + +quick_macro_string(A) -> + "(?" ++ atom_to_list(A) ++ ")". + +%% Skipping to the end of a macro call, tracking open/close constructs. +%% @spec (Tokens) -> {Skipped, Rest} + +skip_macro_args([{'(',_}=T | Ts]) -> + skip_macro_args(Ts, [')'], [T]); +skip_macro_args(Ts) -> + {[], Ts}. + +skip_macro_args([{'(',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, [')' | Es], [T | As]); +skip_macro_args([{'{',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, ['}' | Es], [T | As]); +skip_macro_args([{'[',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, [']' | Es], [T | As]); +skip_macro_args([{'<<',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, ['>>' | Es], [T | As]); +skip_macro_args([{'begin',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, ['end' | Es], [T | As]); +skip_macro_args([{'if',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, ['end' | Es], [T | As]); +skip_macro_args([{'case',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, ['end' | Es], [T | As]); +skip_macro_args([{'receive',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, ['end' | Es], [T | As]); +skip_macro_args([{'try',_}=T | Ts], Es, As) -> + skip_macro_args(Ts, ['end' | Es], [T | As]); +skip_macro_args([{E,_}=T | Ts], [E], As) -> %final close + {lists:reverse([T | As]), Ts}; +skip_macro_args([{E,_}=T | Ts], [E | Es], As) -> %matching close + skip_macro_args(Ts, Es, [T | As]); +skip_macro_args([T | Ts], Es, As) -> + skip_macro_args(Ts, Es, [T | As]); +skip_macro_args([], _Es, _As) -> + throw({error, macro_args}). + +filter_form({function, _, ?pp_form, _, + [{clause, _, [], [], [{atom, _, kill}]}]}) -> + none; +filter_form(T) -> + T. + + +%% --------------------------------------------------------------------- +%% Normal parsing - try to preserve all information + +normal_parser(Ts0, Opt) -> + case scan_form(Ts0, Opt) of + Ts when is_list(Ts) -> + rewrite_form(parse_tokens(Ts)); + Node -> + Node + end. + +scan_form([{'-', _Anno}, {atom, AnnoA, define} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, define} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, undef} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, undef} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, include} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, include} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, include_lib} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, include_lib} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, ifdef} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, ifdef} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, ifndef} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, ifndef} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {'if', AnnoA} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, 'if'} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, elif} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, 'elif'} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, 'else'} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, 'else'} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {'else', AnnoA} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, 'else'} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, endif} | Ts], Opt) -> + [{atom, AnnoA, ?pp_form}, {'(', AnnoA}, {')', AnnoA}, {'->', AnnoA}, + {atom, AnnoA, endif} | scan_macros(Ts, Opt)]; +scan_form([{'-', _Anno}, {atom, AnnoA, error} | Ts], _Opt) -> + Desc = build_info_string("-error", Ts), + ErrorInfo = {erl_anno:location(AnnoA), ?MODULE, {error, Desc}}, + erl_syntax:error_marker(ErrorInfo); +scan_form([{'-', _Anno}, {atom, AnnoA, warning} | Ts], _Opt) -> + Desc = build_info_string("-warning", Ts), + ErrorInfo = {erl_anno:location(AnnoA), ?MODULE, {warning, Desc}}, + erl_syntax:error_marker(ErrorInfo); +scan_form([{'-', A}, {'?', A1}, {Type, _, _}=N | [{'(', _} | _]=Ts], Opt) + when Type =:= atom; Type =:= var -> + %% minus, macro and open parenthesis at start of form - assume that + %% the macro takes no arguments; e.g. `-?foo(...).' + macro(A1, N, Ts, [{'-', A}], Opt); +scan_form([{'?', A}, {Type, _, _}=N | [{'(', _} | _]=Ts], Opt) + when Type =:= atom; Type =:= var -> + %% macro and open parenthesis at start of form - assume that the + %% macro takes no arguments; probably a function declaration on the + %% form `?m(...) -> ...', which will not parse if it is rewritten as + %% `(?m(...)) -> ...', so it must be handled as `(?m)(...) -> ...' + macro(A, N, Ts, [], Opt); +scan_form(Ts, Opt) -> + scan_macros(Ts, Opt). + +build_info_string(Prefix, Ts0) -> + Ts = lists:droplast(Ts0), + String = lists:droplast(tokens_to_string(Ts)), + Prefix ++ " " ++ String ++ ".". + +scan_macros(Ts, Opt) -> + scan_macros(Ts, [], Opt). + +scan_macros([{'?', _}=M, {Type, _, _}=N | Ts], [{string, AnnoS, _}=S | As], + #opt{clever = true}=Opt) + when Type =:= atom; Type =:= var -> + %% macro after a string literal: be clever and insert ++ + scan_macros([M, N | Ts], [{'++', AnnoS}, S | As], Opt); +scan_macros([{'?', Anno}, {Type, _, _}=N | [{'(',_}|_]=Ts], + [{':',_}|_]=As, Opt) + when Type =:= atom; Type =:= var -> + %% macro and open parentheses after colon - probably a call + %% `m:?F(...)' so the argument list might belong to the call, not + %% the macro - but it could also be a try-clause pattern + %% `...:?T(...) ->' - we need to check the token following the + %% arguments to decide + {Args, Rest} = skip_macro_args(Ts), + case Rest of + [{'->',_} | _] -> + macro_call(Args, Anno, N, Rest, As, Opt); + [{'when',_} | _] -> + macro_call(Args, Anno, N, Rest, As, Opt); + [{':',_} | _] -> + macro_call(Args, Anno, N, Rest, As, Opt); + _ -> + macro(Anno, N, Ts, As, Opt) + end; +scan_macros([{'?', Anno}, {Type, _, _}=N | [{'(',_}|_]=Ts], As, Opt) + when Type =:= atom; Type =:= var -> + %% macro with arguments + {Args, Rest} = skip_macro_args(Ts), + macro_call(Args, Anno, N, Rest, As, Opt); +scan_macros([{'?', Anno }, {Type, _, _}=N | Ts], As, Opt) + when Type =:= atom; Type =:= var -> + %% macro without arguments + macro(Anno, N, Ts, As, Opt); +scan_macros([T | Ts], As, Opt) -> + scan_macros(Ts, [T | As], Opt); +scan_macros([], As, _Opt) -> + lists:reverse(As). + +%% Rewriting to a tuple which will be recognized by the post-parse pass +%% (we insert parentheses to preserve the precedences when parsing). + +macro(Anno, {Type, _, A}, Rest, As, Opt) -> + scan_macros_1([], Rest, [{atom,Anno,macro_atom(Type,A)} | As], Opt). + +macro_call([{'(',_}, {')',_}], Anno, {_, AnnoN, _}=N, Rest, As, Opt) -> + {Open, Close} = parentheses(As), + scan_macros_1([], Rest, + %% {'?macro_call', N } + lists:reverse(Open ++ [{'{', Anno}, + {atom, Anno, ?macro_call}, + {',', Anno}, + N, + {'}', AnnoN}] ++ Close, + As), Opt); +macro_call([{'(',_} | Args], Anno, {_, AnnoN, _}=N, Rest, As, Opt) -> + {Open, Close} = parentheses(As), + %% drop closing parenthesis + {')', _} = lists:last(Args), %% assert + Args1 = lists:droplast(Args), + %% note that we must scan the argument list; it may not be skipped + scan_macros_1(Args1 ++ [{'}', AnnoN} | Close], + Rest, + %% {'?macro_call', N, Arg1, ... } + lists:reverse(Open ++ [{'{', Anno}, + {atom, Anno, ?macro_call}, + {',', Anno}, + N, + {',', AnnoN}], + As), Opt). + +macro_atom(atom, A) -> + list_to_atom(?atom_prefix ++ atom_to_list(A)); +macro_atom(var, A) -> + list_to_atom(?var_prefix ++ atom_to_list(A)). + +%% don't insert parentheses after a string token, to avoid turning +%% `"string" ?macro' into a "function application" `"string"(...)' +%% (see note at top of file) +parentheses([{string, _, _} | _]) -> + {[], []}; +parentheses(_) -> + {[{'(',0}], [{')',0}]}. + +%% (after a macro has been found and the arglist skipped, if any) +scan_macros_1(Args, [{string, AnnoS, _} | _]=Rest, As, + #opt{clever = true}=Opt) -> + %% string literal following macro: be clever and insert ++ + scan_macros(Args ++ [{'++', AnnoS} | Rest], As, Opt); +scan_macros_1(Args, Rest, As, Opt) -> + %% normal case - continue scanning + scan_macros(Args ++ Rest, As, Opt). + +rewrite_form({function, Anno, ?pp_form, _, + [{clause, _, [], [], [{call, _, A, As}]}]}) -> + erl_syntax:set_pos(erl_syntax:attribute(A, rewrite_list(As)), Anno); +rewrite_form({function, Anno, ?pp_form, _, [{clause, _, [], [], [A]}]}) -> + erl_syntax:set_pos(erl_syntax:attribute(A), Anno); +rewrite_form(T) -> + rewrite(T). + +rewrite_list([T | Ts]) -> + [rewrite(T) | rewrite_list(Ts)]; +rewrite_list([]) -> + []. + +%% Note: as soon as we start using erl_syntax:subtrees/1 and similar +%% functions, we cannot assume that we know the exact representation of +%% the syntax tree anymore - we must use erl_syntax functions to analyze +%% and decompose the data. + +rewrite(Node) -> + case erl_syntax:type(Node) of + atom -> + case atom_to_list(erl_syntax:atom_value(Node)) of + ?atom_prefix ++ As -> + A1 = list_to_atom(As), + N = erl_syntax:copy_pos(Node, erl_syntax:atom(A1)), + erl_syntax:copy_pos(Node, erl_syntax:macro(N)); + ?var_prefix ++ As -> + A1 = list_to_atom(As), + N = erl_syntax:copy_pos(Node, erl_syntax:variable(A1)), + erl_syntax:copy_pos(Node, erl_syntax:macro(N)); + _ -> + Node + end; + tuple -> + case erl_syntax:tuple_elements(Node) of + [MagicWord, A | As] -> + case erl_syntax:type(MagicWord) of + atom -> + case erl_syntax:atom_value(MagicWord) of + ?macro_call -> + M = erl_syntax:macro(A, rewrite_list(As)), + erl_syntax:copy_pos(Node, M); + _ -> + rewrite_1(Node) + end; + _ -> + rewrite_1(Node) + end; + _ -> + rewrite_1(Node) + end; + _ -> + rewrite_1(Node) + end. + +rewrite_1(Node) -> + case erl_syntax:subtrees(Node) of + [] -> + Node; + Gs -> + Node1 = erl_syntax:make_tree(erl_syntax:type(Node), + [[rewrite(T) || T <- Ts] + || Ts <- Gs]), + erl_syntax:copy_pos(Node, Node1) + end. + +%% attempting a rescue operation on a token sequence for a single form +%% if it could not be parsed after the normal treatment + +fix_form([{atom, _, ?pp_form}, {'(', _}, {')', _}, {'->', _}, + {atom, _, define}, {'(', _} | _]=Ts) -> + case lists:reverse(Ts) of + [{dot, _}, {')', _} | _] -> + {retry, Ts, fun fix_define/1}; + [{dot, Anno} | Ts1] -> + Ts2 = lists:reverse([{dot, Anno}, {')', Anno} | Ts1]), + {retry, Ts2, fun fix_define/1}; + _ -> + error + end; +fix_form(_Ts) -> + error. + +fix_define([{atom, Anno, ?pp_form}, {'(', _}, {')', _}, {'->', _}, + {atom, AnnoA, define}, {'(', _}, N, {',', _} | Ts]) -> + [{dot, _}, {')', _} | Ts1] = lists:reverse(Ts), + S = tokens_to_string(lists:reverse(Ts1)), + A = erl_syntax:set_pos(erl_syntax:atom(define), AnnoA), + Txt = erl_syntax:set_pos(erl_syntax:text(S), AnnoA), + {form, erl_syntax:set_pos(erl_syntax:attribute(A, [N, Txt]), Anno)}; +fix_define(_Ts) -> + error. + +%% @spec tokens_to_string(Tokens::[term()]) -> string() +%% +%% @doc Generates a string corresponding to the given token sequence. +%% The string can be re-tokenized to yield the same token list again. + +-spec tokens_to_string([term()]) -> string(). + +tokens_to_string([{atom,_,A} | Ts]) -> + io_lib:write_atom(A) ++ " " ++ tokens_to_string(Ts); +tokens_to_string([{string, _, S} | Ts]) -> + io_lib:write_string(S) ++ " " ++ tokens_to_string(Ts); +tokens_to_string([{char, _, C} | Ts]) -> + io_lib:write_char(C) ++ " " ++ tokens_to_string(Ts); +tokens_to_string([{float, _, F} | Ts]) -> + float_to_list(F) ++ " " ++ tokens_to_string(Ts); +tokens_to_string([{integer, _, N} | Ts]) -> + integer_to_list(N) ++ " " ++ tokens_to_string(Ts); +tokens_to_string([{var, _, A} | Ts]) -> + atom_to_list(A) ++ " " ++ tokens_to_string(Ts); +tokens_to_string([{dot, _} | Ts]) -> + ".\n" ++ tokens_to_string(Ts); +tokens_to_string([{A, _} | Ts]) -> + atom_to_list(A) ++ " " ++ tokens_to_string(Ts); +tokens_to_string([]) -> + "". + + +%% @spec format_error(Descriptor::term()) -> string() +%% @hidden +%% @doc Callback function for formatting error descriptors. Not for +%% normal use. + +-spec format_error(term()) -> string(). + +format_error(macro_args) -> + errormsg("macro call missing end parenthesis"); +format_error({error, Error}) -> + Error; +format_error({warning, Error}) -> + Error; +format_error({unknown, Reason}) -> + errormsg(io_lib:format("unknown error: ~tP", [Reason, 15])). + +errormsg(String) -> + io_lib:format("~s: ~ts", [?MODULE, String]). + + +%% ===================================================================== + +%% See #7266: The dodger currently does not process feature attributes +%% correctly, so temporarily consider the `else` and `maybe` atoms +%% always as keywords +-spec reserved_word(Atom :: atom()) -> boolean(). +reserved_word('else') -> true; +reserved_word('maybe') -> true; +reserved_word(Atom) -> erl_scan:f_reserved_word(Atom). diff --git a/prelude/erlang/toolchain/escript_builder.escript b/prelude/erlang/toolchain/escript_builder.escript index 744a56d3d9675..54f3ff3b5c176 100644 --- a/prelude/erlang/toolchain/escript_builder.escript +++ b/prelude/erlang/toolchain/escript_builder.escript @@ -26,8 +26,6 @@ -include_lib("kernel/include/file.hrl"). --mode(compile). - -type escript_artifact_spec() :: #{ ArchivePath :: file:filename() => FileSystemPath :: file:filename() }. diff --git a/prelude/erlang/toolchain/escript_trampoline.sh b/prelude/erlang/toolchain/escript_trampoline.sh new file mode 100755 index 0000000000000..dbf39844bf3a8 --- /dev/null +++ b/prelude/erlang/toolchain/escript_trampoline.sh @@ -0,0 +1,11 @@ +#! /usr/bin/env bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +cmd=("$2" "$1/run.escript" "${@:3}") + +"${cmd[@]}" diff --git a/prelude/erlang/toolchain/include_erts.escript b/prelude/erlang/toolchain/include_erts.escript index ad81e86b7d8c2..90d72d4c1856a 100644 --- a/prelude/erlang/toolchain/include_erts.escript +++ b/prelude/erlang/toolchain/include_erts.escript @@ -23,8 +23,6 @@ -export([main/1]). --mode(compile). - -spec main([string()]) -> ok. main([TargetPath]) -> case filelib:wildcard(filename:join(code:root_dir(), "erts-*")) of diff --git a/prelude/erlang/toolchain/json.erl b/prelude/erlang/toolchain/json.erl new file mode 100644 index 0000000000000..e2f704cd2fcee --- /dev/null +++ b/prelude/erlang/toolchain/json.erl @@ -0,0 +1,1632 @@ +%% A temporary import of OTP27 json.erl with embedded json.hrl and +%% stripped doc sttributed to make json encoding and decoding +%% available in buck2 rules. +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2024-2024. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% % @format +%% +-module(json). + +-dialyzer(no_improper_lists). + +-export([ + encode/1, encode/2, + encode_value/2, + encode_atom/2, + encode_integer/1, + encode_float/1, + encode_list/2, + encode_map/2, + encode_map_checked/2, + encode_key_value_list/2, + encode_key_value_list_checked/2, + encode_binary/1, + encode_binary_escape_all/1 +]). +-export_type([encoder/0, encode_value/0]). + +-export([ + decode/1, decode/3, decode_start/3, decode_continue/2 +]). +-export_type([ + from_binary_fun/0, + array_start_fun/0, + array_push_fun/0, + array_finish_fun/0, + object_start_fun/0, + object_push_fun/0, + object_finish_fun/0, + decoders/0, + decode_value/0, + continuation_state/0 +]). + +-compile(warn_missing_spec). + +-compile( + {inline, [ + encode_atom/2, + encode_integer/1, + encode_float/1, + encode_object/1, + escape/1, + escape_binary/1, + escape_all/1, + utf8t/0, + utf8s/0, + utf8s0/0, + hex_to_int/4, + string/6 + ]} +). + +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2024. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%% A lot of the macros below use multi-value comparisons where +%% range checks would have worked just fine. This is because +%% the compiler & JIT can emit better code in some cases when +%% multiple clauses are to be dispatched based on such sets +%% of values. They'll generate an efficient "jump table", +%% which gets to the correct clause in one go, rather +%% than going through a set of comparisons. +%% However, this might not always be the best way (see is_0_to_9), +%% so as always with any performance work - measure, don't guess! + +-define(is_1_to_9(X), + X =:= $1 orelse + X =:= $2 orelse + X =:= $3 orelse + X =:= $4 orelse + X =:= $5 orelse + X =:= $6 orelse + X =:= $7 orelse + X =:= $8 orelse + X =:= $9 +). + +-define(is_0_to_9(X), X >= $0 andalso X =< $9). + +-define(is_ws(X), X =:= $\s; X =:= $\t; X =:= $\r; X =:= $\n). + +-define(is_ascii_escape(Byte), + Byte =:= 0 orelse + Byte =:= 1 orelse + Byte =:= 2 orelse + Byte =:= 3 orelse + Byte =:= 4 orelse + Byte =:= 5 orelse + Byte =:= 6 orelse + Byte =:= 7 orelse + Byte =:= 8 orelse + Byte =:= 9 orelse + Byte =:= 10 orelse + Byte =:= 11 orelse + Byte =:= 12 orelse + Byte =:= 13 orelse + Byte =:= 14 orelse + Byte =:= 15 orelse + Byte =:= 16 orelse + Byte =:= 17 orelse + Byte =:= 18 orelse + Byte =:= 19 orelse + Byte =:= 20 orelse + Byte =:= 21 orelse + Byte =:= 22 orelse + Byte =:= 23 orelse + Byte =:= 24 orelse + Byte =:= 25 orelse + Byte =:= 26 orelse + Byte =:= 27 orelse + Byte =:= 28 orelse + Byte =:= 29 orelse + Byte =:= 30 orelse + Byte =:= 31 orelse + Byte =:= 34 orelse + Byte =:= 92 +). +-define(is_ascii_plain(Byte), + Byte =:= 32 orelse + Byte =:= 33 orelse + Byte =:= 35 orelse + Byte =:= 36 orelse + Byte =:= 37 orelse + Byte =:= 38 orelse + Byte =:= 39 orelse + Byte =:= 40 orelse + Byte =:= 41 orelse + Byte =:= 42 orelse + Byte =:= 43 orelse + Byte =:= 44 orelse + Byte =:= 45 orelse + Byte =:= 46 orelse + Byte =:= 47 orelse + Byte =:= 48 orelse + Byte =:= 49 orelse + Byte =:= 50 orelse + Byte =:= 51 orelse + Byte =:= 52 orelse + Byte =:= 53 orelse + Byte =:= 54 orelse + Byte =:= 55 orelse + Byte =:= 56 orelse + Byte =:= 57 orelse + Byte =:= 58 orelse + Byte =:= 59 orelse + Byte =:= 60 orelse + Byte =:= 61 orelse + Byte =:= 62 orelse + Byte =:= 63 orelse + Byte =:= 64 orelse + Byte =:= 65 orelse + Byte =:= 66 orelse + Byte =:= 67 orelse + Byte =:= 68 orelse + Byte =:= 69 orelse + Byte =:= 70 orelse + Byte =:= 71 orelse + Byte =:= 72 orelse + Byte =:= 73 orelse + Byte =:= 74 orelse + Byte =:= 75 orelse + Byte =:= 76 orelse + Byte =:= 77 orelse + Byte =:= 78 orelse + Byte =:= 79 orelse + Byte =:= 80 orelse + Byte =:= 81 orelse + Byte =:= 82 orelse + Byte =:= 83 orelse + Byte =:= 84 orelse + Byte =:= 85 orelse + Byte =:= 86 orelse + Byte =:= 87 orelse + Byte =:= 88 orelse + Byte =:= 89 orelse + Byte =:= 90 orelse + Byte =:= 91 orelse + Byte =:= 93 orelse + Byte =:= 94 orelse + Byte =:= 95 orelse + Byte =:= 96 orelse + Byte =:= 97 orelse + Byte =:= 98 orelse + Byte =:= 99 orelse + Byte =:= 100 orelse + Byte =:= 101 orelse + Byte =:= 102 orelse + Byte =:= 103 orelse + Byte =:= 104 orelse + Byte =:= 105 orelse + Byte =:= 106 orelse + Byte =:= 107 orelse + Byte =:= 108 orelse + Byte =:= 109 orelse + Byte =:= 110 orelse + Byte =:= 111 orelse + Byte =:= 112 orelse + Byte =:= 113 orelse + Byte =:= 114 orelse + Byte =:= 115 orelse + Byte =:= 116 orelse + Byte =:= 117 orelse + Byte =:= 118 orelse + Byte =:= 119 orelse + Byte =:= 120 orelse + Byte =:= 121 orelse + Byte =:= 122 orelse + Byte =:= 123 orelse + Byte =:= 124 orelse + Byte =:= 125 orelse + Byte =:= 126 orelse + Byte =:= 127 +). + +-define(are_all_ascii_plain(B1, B2, B3, B4, B5, B6, B7, B8), + (?is_ascii_plain(B1)) andalso + (?is_ascii_plain(B2)) andalso + (?is_ascii_plain(B3)) andalso + (?is_ascii_plain(B4)) andalso + (?is_ascii_plain(B5)) andalso + (?is_ascii_plain(B6)) andalso + (?is_ascii_plain(B7)) andalso + (?is_ascii_plain(B8)) +). + +-define(UTF8_ACCEPT, 0). +-define(UTF8_REJECT, 12). + +%% +%% Encoding implementation +%% + +-type encoder() :: fun((dynamic(), encoder()) -> iodata()). + +-type encode_value() :: + integer() + | float() + | boolean() + | null + | binary() + | atom() + | list(encode_value()) + | encode_map(encode_value()). + +-type encode_map(Value) :: #{binary() | atom() | integer() => Value}. + +-spec encode(encode_value()) -> iodata(). +encode(Term) -> encode(Term, fun do_encode/2). + +-spec encode(dynamic(), encoder()) -> iodata(). +encode(Term, Encoder) when is_function(Encoder, 2) -> + Encoder(Term, Encoder). + +-spec encode_value(dynamic(), encoder()) -> iodata(). +encode_value(Value, Encode) -> + do_encode(Value, Encode). + +-spec do_encode(dynamic(), encoder()) -> iodata(). +do_encode(Value, Encode) when is_atom(Value) -> + encode_atom(Value, Encode); +do_encode(Value, _Encode) when is_binary(Value) -> + escape_binary(Value); +do_encode(Value, _Encode) when is_integer(Value) -> + encode_integer(Value); +do_encode(Value, _Encode) when is_float(Value) -> + encode_float(Value); +do_encode(Value, Encode) when is_list(Value) -> + do_encode_list(Value, Encode); +do_encode(Value, Encode) when is_map(Value) -> + do_encode_map(Value, Encode); +do_encode(Other, _Encode) -> + error({unsupported_type, Other}). + +-spec encode_atom(atom(), encoder()) -> iodata(). +encode_atom(null, _Encode) -> <<"null">>; +encode_atom(true, _Encode) -> <<"true">>; +encode_atom(false, _Encode) -> <<"false">>; +encode_atom(Other, Encode) -> Encode(atom_to_binary(Other, utf8), Encode). + +-spec encode_integer(integer()) -> iodata(). +encode_integer(Integer) -> integer_to_binary(Integer). + +-spec encode_float(float()) -> iodata(). +encode_float(Float) -> float_to_binary(Float, [short]). + +-spec encode_list(list(), encoder()) -> iodata(). +encode_list(List, Encode) when is_list(List) -> + do_encode_list(List, Encode). + +do_encode_list([], _Encode) -> + <<"[]">>; +do_encode_list([First | Rest], Encode) when is_function(Encode, 2) -> + [$[, Encode(First, Encode) | list_loop(Rest, Encode)]. + +list_loop([], _Encode) -> "]"; +list_loop([Elem | Rest], Encode) -> [$,, Encode(Elem, Encode) | list_loop(Rest, Encode)]. + +-spec encode_map(encode_map(dynamic()), encoder()) -> iodata(). +encode_map(Map, Encode) when is_map(Map) -> + do_encode_map(Map, Encode). + +do_encode_map(Map, Encode) when is_function(Encode, 2) -> + encode_object([[$,, key(Key, Encode), $: | Encode(Value, Encode)] || Key := Value <- Map]). + +-spec encode_map_checked(map(), encoder()) -> iodata(). +encode_map_checked(Map, Encode) -> + do_encode_checked(maps:to_list(Map), Encode). + +-spec encode_key_value_list([{term(), term()}], encoder()) -> iodata(). +encode_key_value_list(List, Encode) when is_function(Encode, 2) -> + encode_object([[$,, key(Key, Encode), $: | Encode(Value, Encode)] || {Key, Value} <- List]). + +-spec encode_key_value_list_checked([{term(), term()}], encoder()) -> iodata(). +encode_key_value_list_checked(List, Encode) -> + do_encode_checked(List, Encode). + +do_encode_checked(List, Encode) when is_function(Encode, 2) -> + do_encode_checked(List, Encode, #{}). + +do_encode_checked([{Key, Value} | Rest], Encode, Visited0) -> + EncodedKey = iolist_to_binary(key(Key, Encode)), + case is_map_key(EncodedKey, Visited0) of + true -> + error({duplicate_key, Key}); + _ -> + Visited = Visited0#{EncodedKey => true}, + [$,, EncodedKey, $:, Encode(Value, Encode) | do_encode_checked(Rest, Encode, Visited)] + end; +do_encode_checked([], _, _) -> + []. + +%% Dispatching any value through `Encode` could allow incorrect +%% JSON to be emitted (with keys not being strings). To avoid this, +%% the default encoder only supports binaries, atoms, and numbers. +%% Customisation is possible by overriding how maps are encoded in general. +key(Key, Encode) when is_binary(Key) -> Encode(Key, Encode); +key(Key, Encode) when is_atom(Key) -> Encode(atom_to_binary(Key, utf8), Encode); +key(Key, _Encode) when is_integer(Key) -> [$", encode_integer(Key), $"]; +key(Key, _Encode) when is_float(Key) -> [$", encode_float(Key), $"]. + +encode_object([]) -> <<"{}">>; +encode_object([[_Comma | Entry] | Rest]) -> ["{", Entry, Rest, "}"]. + +-spec encode_binary(binary()) -> iodata(). +encode_binary(Bin) when is_binary(Bin) -> + escape_binary(Bin). + +-spec encode_binary_escape_all(binary()) -> iodata(). +encode_binary_escape_all(Bin) when is_binary(Bin) -> + escape_all(Bin). + +escape_binary(Bin) -> escape_binary_ascii(Bin, [$"], Bin, 0, 0). + +escape_binary_ascii(Binary, Acc, Orig, Skip, Len) -> + case Binary of + <> when ?are_all_ascii_plain(B1, B2, B3, B4, B5, B6, B7, B8) -> + escape_binary_ascii(Rest, Acc, Orig, Skip, Len + 8); + Other -> + escape_binary(Other, Acc, Orig, Skip, Len) + end. + +escape_binary(<>, Acc, Orig, Skip, Len) when ?is_ascii_plain(Byte) -> + %% we got here because there were either less than 8 bytes left + %% or we have an escape in the next 8 bytes, + %% escape_binary_ascii would fail and dispatch here anyway + escape_binary(Rest, Acc, Orig, Skip, Len + 1); +escape_binary(<>, Acc, Orig, Skip0, Len) when ?is_ascii_escape(Byte) -> + Escape = escape(Byte), + Skip = Skip0 + Len + 1, + case Len of + 0 -> + escape_binary_ascii(Rest, [Acc | Escape], Orig, Skip, 0); + _ -> + Part = binary_part(Orig, Skip0, Len), + escape_binary_ascii(Rest, [Acc, Part | Escape], Orig, Skip, 0) + end; +escape_binary(<>, Acc, Orig, Skip, Len) -> + case element(Byte - 127, utf8s0()) of + ?UTF8_REJECT -> invalid_byte(Orig, Skip + Len); + %% all accept cases are ASCII, already covered above + State -> escape_binary_utf8(Rest, Acc, Orig, Skip, Len, State) + end; +escape_binary(_, _Acc, Orig, 0, _Len) -> + [$", Orig, $"]; +escape_binary(_, Acc, _Orig, _Skip, 0) -> + [Acc, $"]; +escape_binary(_, Acc, Orig, Skip, Len) -> + Part = binary_part(Orig, Skip, Len), + [Acc, Part, $"]. + +escape_binary_utf8(<>, Acc, Orig, Skip, Len, State0) -> + Type = element(Byte + 1, utf8t()), + case element(State0 + Type, utf8s()) of + ?UTF8_ACCEPT -> escape_binary_ascii(Rest, Acc, Orig, Skip, Len + 2); + ?UTF8_REJECT -> invalid_byte(Orig, Skip + Len + 1); + State -> escape_binary_utf8(Rest, Acc, Orig, Skip, Len + 1, State) + end; +escape_binary_utf8(_, _Acc, Orig, Skip, Len, _State) -> + unexpected_utf8(Orig, Skip + Len + 1). + +escape_all(Bin) -> escape_all_ascii(Bin, [$"], Bin, 0, 0). + +escape_all_ascii(Binary, Acc, Orig, Skip, Len) -> + case Binary of + <> when ?are_all_ascii_plain(B1, B2, B3, B4, B5, B6, B7, B8) -> + escape_all_ascii(Rest, Acc, Orig, Skip, Len + 8); + Other -> + escape_all(Other, Acc, Orig, Skip, Len) + end. + +escape_all(<>, Acc, Orig, Skip, Len) when ?is_ascii_plain(Byte) -> + escape_all(Rest, Acc, Orig, Skip, Len + 1); +escape_all(<>, Acc, Orig, Skip, Len) when ?is_ascii_escape(Byte) -> + Escape = escape(Byte), + case Len of + 0 -> + escape_all(Rest, [Acc | Escape], Orig, Skip + 1, 0); + _ -> + Part = binary_part(Orig, Skip, Len), + escape_all(Rest, [Acc, Part | Escape], Orig, Skip + Len + 1, 0) + end; +escape_all(<>, Acc, Orig, Skip, 0) -> + escape_char(Rest, Acc, Orig, Skip, Char); +escape_all(<>, Acc, Orig, Skip, Len) -> + Part = binary_part(Orig, Skip, Len), + escape_char(Rest, [Acc | Part], Orig, Skip + Len, Char); +escape_all(<<>>, _Acc, Orig, 0, _Len) -> + [$", Orig, $"]; +escape_all(<<>>, Acc, _Orig, _Skip, 0) -> + [Acc, $"]; +escape_all(<<>>, Acc, Orig, Skip, Len) -> + Part = binary_part(Orig, Skip, Len), + [Acc, Part, $"]; +escape_all(_Other, _Acc, Orig, Skip, Len) -> + invalid_byte(Orig, Skip + Len). + +escape_char(<>, Acc, Orig, Skip, Char) when Char =< 16#FF -> + Acc1 = [Acc, "\\u00" | integer_to_binary(Char, 16)], + escape_all(Rest, Acc1, Orig, Skip + 2, 0); +escape_char(<>, Acc, Orig, Skip, Char) when Char =< 16#7FF -> + Acc1 = [Acc, "\\u0" | integer_to_binary(Char, 16)], + escape_all(Rest, Acc1, Orig, Skip + 2, 0); +escape_char(<>, Acc, Orig, Skip, Char) when Char =< 16#FFF -> + Acc1 = [Acc, "\\u0" | integer_to_binary(Char, 16)], + escape_all(Rest, Acc1, Orig, Skip + 3, 0); +escape_char(<>, Acc, Orig, Skip, Char) when Char =< 16#FFFF -> + Acc1 = [Acc, "\\u" | integer_to_binary(Char, 16)], + escape_all(Rest, Acc1, Orig, Skip + 3, 0); +escape_char(<>, Acc, Orig, Skip, Char0) -> + Char = Char0 - 16#10000, + First = integer_to_binary(16#800 bor (Char bsr 10), 16), + Second = integer_to_binary(16#C00 bor (Char band 16#3FF), 16), + Acc1 = [Acc, "\\uD", First, "\\uD" | Second], + escape_all(Rest, Acc1, Orig, Skip + 4, 0). + +-spec escape(byte()) -> binary() | no. +escape($\x00) -> <<"\\u0000">>; +escape($\x01) -> <<"\\u0001">>; +escape($\x02) -> <<"\\u0002">>; +escape($\x03) -> <<"\\u0003">>; +escape($\x04) -> <<"\\u0004">>; +escape($\x05) -> <<"\\u0005">>; +escape($\x06) -> <<"\\u0006">>; +escape($\x07) -> <<"\\u0007">>; +escape($\b) -> <<"\\b">>; +escape($\t) -> <<"\\t">>; +escape($\n) -> <<"\\n">>; +escape($\x0b) -> <<"\\u000B">>; +escape($\f) -> <<"\\f">>; +escape($\r) -> <<"\\r">>; +escape($\x0e) -> <<"\\u000E">>; +escape($\x0f) -> <<"\\u000F">>; +escape($\x10) -> <<"\\u0010">>; +escape($\x11) -> <<"\\u0011">>; +escape($\x12) -> <<"\\u0012">>; +escape($\x13) -> <<"\\u0013">>; +escape($\x14) -> <<"\\u0014">>; +escape($\x15) -> <<"\\u0015">>; +escape($\x16) -> <<"\\u0016">>; +escape($\x17) -> <<"\\u0017">>; +escape($\x18) -> <<"\\u0018">>; +escape($\x19) -> <<"\\u0019">>; +escape($\x1A) -> <<"\\u001A">>; +escape($\x1B) -> <<"\\u001B">>; +escape($\x1C) -> <<"\\u001C">>; +escape($\x1D) -> <<"\\u001D">>; +escape($\x1E) -> <<"\\u001E">>; +escape($\x1F) -> <<"\\u001F">>; +escape($") -> <<"\\\"">>; +escape($\\) -> <<"\\\\">>; +escape(_) -> no. + +%% This is an adapted table from "Flexible and Economical UTF-8 Decoding" by Bjoern Hoehrmann. +%% http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + +%% Map character to character class +utf8t() -> + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 8, + 8, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 10, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 3, + 3, + 11, + 6, + 6, + 6, + 5, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8 + }. + +%% Transition table mapping combination of state & class to next state +utf8s() -> + { + 12, + 24, + 36, + 60, + 96, + 84, + 12, + 12, + 12, + 48, + 72, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 0, + 12, + 12, + 12, + 12, + 12, + 0, + 12, + 0, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 36, + 12, + 36, + 12, + 12, + 12, + 36, + 12, + 12, + 12, + 12, + 12, + 36, + 12, + 36, + 12, + 12, + 12, + 36, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12 + }. + +%% Optimisation for 1st byte direct state lookup, +%% we know starting state is 0 and ASCII bytes were already handled +utf8s0() -> + { + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 48, + 36, + 36, + 36, + 36, + 36, + 36, + 36, + 36, + 36, + 36, + 36, + 36, + 60, + 36, + 36, + 72, + 84, + 84, + 84, + 96, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12 + }. + +invalid_byte(Bin, Skip) -> + Byte = binary:at(Bin, Skip), + error({invalid_byte, Byte}, none, error_info(Skip)). + +error_info(Skip) -> + [{error_info, #{cause => #{position => Skip}}}]. + +%% +%% Decoding implementation +%% + +-define(ARRAY, array). +-define(OBJECT, object). + +-type from_binary_fun() :: fun((binary()) -> dynamic()). +-type array_start_fun() :: fun((Acc :: dynamic()) -> ArrayAcc :: dynamic()). +-type array_push_fun() :: fun((Value :: dynamic(), Acc :: dynamic()) -> NewAcc :: dynamic()). +-type array_finish_fun() :: fun((ArrayAcc :: dynamic(), OldAcc :: dynamic()) -> {dynamic(), dynamic()}). +-type object_start_fun() :: fun((Acc :: dynamic()) -> ObjectAcc :: dynamic()). +-type object_push_fun() :: fun((Key :: dynamic(), Value :: dynamic(), Acc :: dynamic()) -> NewAcc :: dynamic()). +-type object_finish_fun() :: fun((ObjectAcc :: dynamic(), OldAcc :: dynamic()) -> {dynamic(), dynamic()}). + +-type decoders() :: #{ + array_start => array_start_fun(), + array_push => array_push_fun(), + array_finish => array_finish_fun(), + object_start => object_start_fun(), + object_push => object_push_fun(), + object_finish => object_finish_fun(), + float => from_binary_fun(), + integer => from_binary_fun(), + string => from_binary_fun(), + null => term() +}. + +-record(decode, { + array_start :: array_start_fun() | undefined, + array_push :: array_push_fun() | undefined, + array_finish :: array_finish_fun() | undefined, + object_start :: object_start_fun() | undefined, + object_push :: object_push_fun() | undefined, + object_finish :: object_finish_fun() | undefined, + float = fun erlang:binary_to_float/1 :: from_binary_fun(), + integer = fun erlang:binary_to_integer/1 :: from_binary_fun(), + string :: from_binary_fun() | undefined, + null = null :: term() +}). + +-type acc() :: dynamic(). +-type stack() :: [?ARRAY | ?OBJECT | binary() | acc()]. +-type decode() :: #decode{}. + +-opaque continuation_state() :: tuple(). + +-type decode_value() :: + integer() + | float() + | boolean() + | null + | binary() + | list(decode_value()) + | #{binary() => decode_value()}. + +-spec decode(binary()) -> decode_value(). +decode(Binary) when is_binary(Binary) -> + case value(Binary, Binary, 0, ok, [], #decode{}) of + {Result, _Acc, <<>>} -> + Result; + {_, _, Rest} -> + invalid_byte(Rest, 0); + {continue, {_Bin, _Acc, [], _Decode, {number, Number}}} -> + Number; + {continue, {_, _, _, _, {float_error, Token, Skip}}} -> + unexpected_sequence(Token, Skip); + {continue, _} -> + error(unexpected_end) + end. + +-spec decode(binary(), dynamic(), decoders()) -> + {Result :: dynamic(), Acc :: dynamic(), binary()}. +decode(Binary, Acc0, Decoders) when is_binary(Binary) -> + Decode = maps:fold(fun parse_decoder/3, #decode{}, Decoders), + case value(Binary, Binary, 0, Acc0, [], Decode) of + {continue, {_Bin, Acc, [], _Decode, {number, Val}}} -> + {Val, Acc, <<>>}; + {continue, {_, _, _, _, {float_error, Token, Skip}}} -> + unexpected_sequence(Token, Skip); + {continue, _} -> + error(unexpected_end); + Result -> + Result + end. + +-spec decode_start(binary(), dynamic(), decoders()) -> + {Result :: dynamic(), Acc :: dynamic(), binary()} | {continue, continuation_state()}. +decode_start(Binary, Acc, Decoders) when is_binary(Binary) -> + Decode = maps:fold(fun parse_decoder/3, #decode{}, Decoders), + value(Binary, Binary, 0, Acc, [], Decode). + +-spec decode_continue(binary() | end_of_input, Opaque :: term()) -> + {Result :: dynamic(), Acc :: dynamic(), binary()} | {continue, continuation_state()}. +decode_continue(end_of_input, State) -> + case State of + {_, Acc, [], _Decode, {number, Val}} -> + {Val, Acc, <<>>}; + {_, _, _, _, {float_error, Token, Skip}} -> + unexpected_sequence(Token, Skip); + _ -> + error(unexpected_end) + end; +decode_continue(Cont, {Rest, Acc, Stack, #decode{} = Decode, FuncData}) when is_binary(Cont) -> + Binary = <>, + case FuncData of + value -> + value(Binary, Binary, 0, Acc, Stack, Decode); + {number, _} -> + value(Binary, Binary, 0, Acc, Stack, Decode); + {float_error, _Token, _Skip} -> + value(Binary, Binary, 0, Acc, Stack, Decode); + {array_push, Val} -> + array_push(Binary, Binary, 0, Acc, Stack, Decode, Val); + {object_value, Key} -> + object_value(Binary, Binary, 0, Acc, Stack, Decode, Key); + {object_push, Value, Key} -> + object_push(Binary, Binary, 0, Acc, Stack, Decode, Value, Key); + object_key -> + object_key(Binary, Binary, 0, Acc, Stack, Decode) + end. + +parse_decoder(array_start, Fun, Decode) when is_function(Fun, 1) -> + Decode#decode{array_start = Fun}; +parse_decoder(array_push, Fun, Decode) when is_function(Fun, 2) -> + Decode#decode{array_push = Fun}; +parse_decoder(array_finish, Fun, Decode) when is_function(Fun, 2) -> + Decode#decode{array_finish = Fun}; +parse_decoder(object_start, Fun, Decode) when is_function(Fun, 1) -> + Decode#decode{object_start = Fun}; +parse_decoder(object_push, Fun, Decode) when is_function(Fun, 3) -> + Decode#decode{object_push = Fun}; +parse_decoder(object_finish, Fun, Decode) when is_function(Fun, 2) -> + Decode#decode{object_finish = Fun}; +parse_decoder(float, Fun, Decode) when is_function(Fun, 1) -> + Decode#decode{float = Fun}; +parse_decoder(integer, Fun, Decode) when is_function(Fun, 1) -> + Decode#decode{integer = Fun}; +parse_decoder(string, Fun, Decode) when is_function(Fun, 1) -> + Decode#decode{string = Fun}; +parse_decoder(null, Null, Decode) -> + Decode#decode{null = Null}. + +value(<>, Original, Skip, Acc, Stack, Decode) when ?is_ws(Byte) -> + value(Rest, Original, Skip + 1, Acc, Stack, Decode); +value(<<$0, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + number_zero(Rest, Original, Skip, Acc, Stack, Decode, 1); +value(<>, Original, Skip, Acc, Stack, Decode) when ?is_1_to_9(Byte) -> + number(Rest, Original, Skip, Acc, Stack, Decode, 1); +value(<<$-, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + number_minus(Rest, Original, Skip, Acc, Stack, Decode); +value(<<$t, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + true(Rest, Original, Skip, Acc, Stack, Decode); +value(<<$f, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + false(Rest, Original, Skip, Acc, Stack, Decode); +value(<<$n, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + null(Rest, Original, Skip, Acc, Stack, Decode); +value(<<$", Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + string(Rest, Original, Skip + 1, Acc, Stack, Decode); +value(<<$[, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + array_start(Rest, Original, Skip, Acc, Stack, Decode, 1); +value(<<${, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + object_start(Rest, Original, Skip, Acc, Stack, Decode, 1); +value(<>, Original, Skip, _Acc, _Stack, _Decode) when ?is_ascii_plain(Byte) -> + %% this clause is effecively the same as the last one, but necessary to + %% force compiler to emit a jump table dispatch, rather than binary search + invalid_byte(Original, Skip); +value(_, Original, Skip, Acc, Stack, Decode) -> + unexpected(Original, Skip, Acc, Stack, Decode, 0, 0, value). + +true(<<"rue", Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + continue(Rest, Original, Skip + 4, Acc, Stack, Decode, true); +true(_Rest, Original, Skip, Acc, Stack, Decode) -> + unexpected(Original, Skip, Acc, Stack, Decode, 1, 3, value). + +false(<<"alse", Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + continue(Rest, Original, Skip + 5, Acc, Stack, Decode, false); +false(_Rest, Original, Skip, Acc, Stack, Decode) -> + unexpected(Original, Skip, Acc, Stack, Decode, 1, 4, value). + +null(<<"ull", Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + continue(Rest, Original, Skip + 4, Acc, Stack, Decode, Decode#decode.null); +null(_Rest, Original, Skip, Acc, Stack, Decode) -> + unexpected(Original, Skip, Acc, Stack, Decode, 1, 3, value). + +number_minus(<<$0, Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + number_zero(Rest, Original, Skip, Acc, Stack, Decode, 2); +number_minus(<>, Original, Skip, Acc, Stack, Decode) when ?is_1_to_9(Num) -> + number(Rest, Original, Skip, Acc, Stack, Decode, 2); +number_minus(_Rest, Original, Skip, Acc, Stack, Decode) -> + unexpected(Original, Skip, Acc, Stack, Decode, 1, 0, value). + +number_zero(<<$., Rest/bits>>, Original, Skip, Acc, Stack, Decode, Len) -> + number_frac(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_zero(<>, Original, Skip, Acc, Stack, Decode, Len) when E =:= $E; E =:= $e -> + number_exp_copy(Rest, Original, Skip, Acc, Stack, Decode, Len + 1, <<"0">>); +number_zero(<<>>, Original, Skip, Acc, Stack, Decode, Len) -> + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, {number, 0}); +number_zero(Rest, Original, Skip, Acc, Stack, Decode, Len) -> + continue(Rest, Original, Skip + Len, Acc, Stack, Decode, 0). + +number(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_0_to_9(Num) -> + number(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number(<<$., Rest/bits>>, Original, Skip, Acc, Stack, Decode, Len) -> + number_frac(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number(<>, Original, Skip, Acc, Stack, Decode, Len) when E =:= $E; E =:= $e -> + Prefix = binary_part(Original, Skip, Len), + number_exp_copy(Rest, Original, Skip, Acc, Stack, Decode, Len + 1, Prefix); +number(<<>>, Original, Skip, Acc, Stack, Decode, Len) -> + Int = (Decode#decode.integer)(binary_part(Original, Skip, Len)), + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, {number, Int}); +number(Rest, Original, Skip, Acc, Stack, Decode, Len) -> + Int = (Decode#decode.integer)(binary_part(Original, Skip, Len)), + continue(Rest, Original, Skip + Len, Acc, Stack, Decode, Int). + +number_frac(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_0_to_9(Byte) -> + number_frac_cont(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_frac(_, Original, Skip, Acc, Stack, Decode, Len) -> + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, value). + +number_frac_cont(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_0_to_9(Byte) -> + number_frac_cont(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_frac_cont(<>, Original, Skip, Acc, Stack, Decode, Len) when E =:= $e; E =:= $E -> + number_exp(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_frac_cont(Rest, Original, Skip, Acc, Stack, Decode, Len) -> + Token = binary_part(Original, Skip, Len), + float_decode(Rest, Original, Skip, Acc, Stack, Decode, Len, Token). + +float_decode(<<>>, Original, Skip, Acc, Stack, Decode, Len, Token) -> + try (Decode#decode.float)(Token) of + Float -> unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, {number, Float}) + catch + _:_ -> unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, {float_error, Token, Skip}) + end; +float_decode(<>, Original, Skip, Acc, Stack, Decode, Len, Token) -> + try (Decode#decode.float)(Token) of + Float -> + continue(Rest, Original, Skip + Len, Acc, Stack, Decode, Float) + catch + _:_ -> unexpected_sequence(Token, Skip) + end. + +number_exp(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_0_to_9(Byte) -> + number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_exp(<>, Original, Skip, Acc, Stack, Decode, Len) when Sign =:= $+; Sign =:= $- -> + number_exp_sign(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_exp(_, Original, Skip, Acc, Stack, Decode, Len) -> + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, value). + +number_exp_sign(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_0_to_9(Byte) -> + number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_exp_sign(_, Original, Skip, Acc, Stack, Decode, Len) -> + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, value). + +number_exp_cont(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_0_to_9(Byte) -> + number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len) -> + Token = binary_part(Original, Skip, Len), + float_decode(Rest, Original, Skip, Acc, Stack, Decode, Len, Token). + +number_exp_copy(<>, Original, Skip, Acc, Stack, Decode, Len, Prefix) when ?is_0_to_9(Byte) -> + number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len, Prefix, 1); +number_exp_copy(<>, Original, Skip, Acc, Stack, Decode, Len, Prefix) when Sign =:= $+; Sign =:= $- -> + number_exp_sign(Rest, Original, Skip, Acc, Stack, Decode, Len, Prefix, 1); +number_exp_copy(_, Original, Skip, Acc, Stack, Decode, Len, _Prefix) -> + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, value). + +number_exp_sign(<>, Original, Skip, Acc, Stack, Decode, Len, Prefix, ExpLen) when ?is_0_to_9(Byte) -> + number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len, Prefix, ExpLen + 1); +number_exp_sign(_, Original, Skip, Acc, Stack, Decode, Len, _Prefix, ExpLen) -> + unexpected(Original, Skip, Acc, Stack, Decode, Len + ExpLen, 0, value). + +number_exp_cont(<>, Original, Skip, Acc, Stack, Decode, Len, Prefix, ExpLen) when ?is_0_to_9(Byte) -> + number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len, Prefix, ExpLen + 1); +number_exp_cont(Rest, Original, Skip, Acc, Stack, Decode, Len, Prefix, ExpLen) -> + Suffix = binary_part(Original, Skip + Len, ExpLen), + Token = <>, + float_decode(Rest, Original, Skip, Acc, Stack, Decode, Len + ExpLen, Token). + +string(Binary, Original, Skip, Acc, Stack, Decode) -> + string_ascii(Binary, Original, Skip, Acc, Stack, Decode, 0). + +string_ascii(Binary, Original, Skip, Acc, Stack, Decode, Len) -> + case Binary of + <> when ?are_all_ascii_plain(B1, B2, B3, B4, B5, B6, B7, B8) -> + string_ascii(Rest, Original, Skip, Acc, Stack, Decode, Len + 8); + Other -> + string(Other, Original, Skip, Acc, Stack, Decode, Len) + end. + +-spec string(binary(), binary(), integer(), acc(), stack(), decode(), integer()) -> dynamic(). +string(<>, Orig, Skip, Acc, Stack, Decode, Len) when ?is_ascii_plain(Byte) -> + string(Rest, Orig, Skip, Acc, Stack, Decode, Len + 1); +string(<<$\\, Rest/bits>>, Orig, Skip, Acc, Stack, Decode, Len) -> + Part = binary_part(Orig, Skip, Len), + SAcc = <<>>, + unescape(Rest, Orig, Skip, Acc, Stack, Decode, Skip - 1, Len, <>); +string(<<$", Rest/bits>>, Orig, Skip0, Acc, Stack, Decode, Len) -> + Value = binary_part(Orig, Skip0, Len), + Skip = Skip0 + Len + 1, + case Decode#decode.string of + undefined -> continue(Rest, Orig, Skip, Acc, Stack, Decode, Value); + Fun -> continue(Rest, Orig, Skip, Acc, Stack, Decode, Fun(Value)) + end; +string(<>, Orig, Skip, _Acc, _Stack, _Decode, Len) when ?is_ascii_escape(Byte) -> + invalid_byte(Orig, Skip + Len); +string(<>, Orig, Skip, Acc, Stack, Decode, Len) -> + case element(Byte - 127, utf8s0()) of + ?UTF8_REJECT -> invalid_byte(Orig, Skip + Len); + %% all accept cases are ASCII, already covered above + State -> string_utf8(Rest, Orig, Skip, Acc, Stack, Decode, Len, State) + end; +string(_, Orig, Skip, Acc, Stack, Decode, Len) -> + unexpected(Orig, Skip - 1, Acc, Stack, Decode, Len + 1, 0, value). + +string_utf8(<>, Orig, Skip, Acc, Stack, Decode, Len, State0) -> + Type = element(Byte + 1, utf8t()), + case element(State0 + Type, utf8s()) of + ?UTF8_ACCEPT -> string_ascii(Rest, Orig, Skip, Acc, Stack, Decode, Len + 2); + ?UTF8_REJECT -> invalid_byte(Orig, Skip + Len + 1); + State -> string_utf8(Rest, Orig, Skip, Acc, Stack, Decode, Len + 1, State) + end; +string_utf8(_, Orig, Skip, Acc, Stack, Decode, Len, _State0) -> + unexpected(Orig, Skip - 1, Acc, Stack, Decode, Len + 2, 0, value). + +string_ascii(Binary, Original, Skip, Acc, Stack, Decode, Start, Len, SAcc) -> + case Binary of + <> when ?are_all_ascii_plain(B1, B2, B3, B4, B5, B6, B7, B8) -> + string_ascii(Rest, Original, Skip, Acc, Stack, Decode, Start, Len + 8, SAcc); + Other -> + string(Other, Original, Skip, Acc, Stack, Decode, Start, Len, SAcc) + end. + +-spec string(binary(), binary(), integer(), acc(), stack(), decode(), integer(), integer(), binary()) -> dynamic(). +string(<>, Orig, Skip, Acc, Stack, Decode, Start, Len, SAcc) when ?is_ascii_plain(Byte) -> + string(Rest, Orig, Skip, Acc, Stack, Decode, Start, Len + 1, SAcc); +string(<<$\\, Rest/bits>>, Orig, Skip, Acc, Stack, Decode, Start, Len, SAcc) -> + Part = binary_part(Orig, Skip, Len), + unescape(Rest, Orig, Skip, Acc, Stack, Decode, Start, Len, <>); +string(<<$", Rest/bits>>, Orig, Skip0, Acc, Stack, Decode, _Start, Len, SAcc) -> + Part = binary_part(Orig, Skip0, Len), + Value = <>, + Skip = Skip0 + Len + 1, + case Decode#decode.string of + undefined -> continue(Rest, Orig, Skip, Acc, Stack, Decode, Value); + Fun -> continue(Rest, Orig, Skip, Acc, Stack, Decode, Fun(Value)) + end; +string(<>, Orig, Skip, _Acc, _Stack, _Decode, _Start, Len, _SAcc) when ?is_ascii_escape(Byte) -> + invalid_byte(Orig, Skip + Len); +string(<>, Orig, Skip, Acc, Stack, Decode, Start, Len, SAcc) -> + case element(Byte - 127, utf8s0()) of + ?UTF8_REJECT -> invalid_byte(Orig, Skip + Len); + %% all accept cases are ASCII, already covered above + State -> string_utf8(Rest, Orig, Skip, Acc, Stack, Decode, Start, Len, SAcc, State) + end; +string(_, Orig, Skip, Acc, Stack, Decode, Start, Len, _SAcc) -> + Extra = Skip - Start, + unexpected(Orig, Start, Acc, Stack, Decode, Len + Extra, 0, value). + +string_utf8(<>, Orig, Skip, Acc, Stack, Decode, Start, Len, SAcc, State0) -> + Type = element(Byte + 1, utf8t()), + case element(State0 + Type, utf8s()) of + ?UTF8_ACCEPT -> string_ascii(Rest, Orig, Skip, Acc, Stack, Decode, Start, Len + 2, SAcc); + ?UTF8_REJECT -> invalid_byte(Orig, Skip + Len + 1); + State -> string_utf8(Rest, Orig, Skip, Acc, Stack, Decode, Start, Len + 1, SAcc, State) + end; +string_utf8(_, Orig, Skip, Acc, Stack, Decode, Start, Len, _SAcc, _State0) -> + Extra = Skip - Start, + unexpected(Orig, Start, Acc, Stack, Decode, Len + 1 + Extra, 0, value). + +unescape(<>, Original, Skip, Acc, Stack, Decode, Start, Len, SAcc) -> + Val = + case Byte of + $b -> $\b; + $f -> $\f; + $n -> $\n; + $r -> $\r; + $t -> $\t; + $" -> $"; + $\\ -> $\\; + $/ -> $/; + $u -> unicode; + _ -> error + end, + case Val of + unicode -> unescapeu(Rest, Original, Skip, Acc, Stack, Decode, Start, Len, SAcc); + error -> invalid_byte(Original, Skip + Len + 1); + Int -> string_ascii(Rest, Original, Skip + Len + 2, Acc, Stack, Decode, Start, 0, <>) + end; +unescape(_, Original, Skip, Acc, Stack, Decode, Start, Len, _SAcc) -> + Extra = Skip - Start, + unexpected(Original, Start, Acc, Stack, Decode, Len + 1 + Extra, 0, value). + +unescapeu(<>, Original, Skip, Acc, Stack, Decode, Start, Len, SAcc) -> + try hex_to_int(E1, E2, E3, E4) of + CP when CP >= 16#D800, CP =< 16#DBFF -> + unescape_surrogate(Rest, Original, Skip, Acc, Stack, Decode, Start, Len, SAcc, CP); + CP -> + try <> of + SAcc1 -> string_ascii(Rest, Original, Skip + Len + 6, Acc, Stack, Decode, Start, 0, SAcc1) + catch + _:_ -> unexpected_sequence(binary_part(Original, Skip + Len, 6), Skip + Len) + end + catch + _:_ -> + unexpected_sequence(binary_part(Original, Skip + Len, 6), Skip + Len) + end; +unescapeu(_Rest, Original, Skip, Acc, Stack, Decode, Start, Len, _SAcc) -> + Extra = Skip - Start, + unexpected(Original, Start, Acc, Stack, Decode, Len + 2 + Extra, 4, value). + +unescape_surrogate(<<"\\u", E1, E2, E3, E4, Rest/bits>>, Original, Skip, Acc, Stack, Decode, Start, Len, SAcc, Hi) -> + try hex_to_int(E1, E2, E3, E4) of + Lo when Lo >= 16#DC00, Lo =< 16#DFFF -> + CP = 16#10000 + ((Hi band 16#3FF) bsl 10) + (Lo band 16#3FF), + try <> of + SAcc1 -> string_ascii(Rest, Original, Skip + Len + 12, Acc, Stack, Decode, Start, 0, SAcc1) + catch + _:_ -> unexpected_sequence(binary_part(Original, Skip + Len, 12), Skip + Len) + end; + _ -> + unexpected_sequence(binary_part(Original, Skip + Len, 12), Skip + Len) + catch + _:_ -> unexpected_sequence(binary_part(Original, Skip + Len, 12), Skip + Len) + end; +unescape_surrogate(_Rest, Original, Skip, Acc, Stack, Decode, Start, Len, _SAcc, _Hi) -> + Extra = Skip - Start, + unexpected(Original, Start, Acc, Stack, Decode, Len + 6 + Extra, 5, value). + +%% erlfmt-ignore +%% this is a macro instead of an inlined function - compiler refused to inline +-define(hex_digit(C), element(C - $0 + 1, { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, n, n, n, n, n, %% 0x30 + n, n, 10,11,12,13,14,15,n, n, n, n, n, n, n, %% 0x40 + n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, %% 0x50 + n, n, n, n, 10,11,12,13,14,15 %% 0x60 +})). + +-spec hex_to_int(byte(), byte(), byte(), byte()) -> integer(). +hex_to_int(H1, H2, H3, H4) -> + ?hex_digit(H4) + 16 * (?hex_digit(H3) + 16 * (?hex_digit(H2) + 16 * ?hex_digit(H1))). + +array_start(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_ws(Byte) -> + array_start(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +array_start(<<"]", Rest/bits>>, Original, Skip, Acc, Stack, Decode, Len) -> + {Value, NewAcc} = + case {Decode#decode.array_start, Decode#decode.array_finish} of + {undefined, undefined} -> {[], Acc}; + {Start, undefined} -> {lists:reverse(Start(Acc)), Acc}; + {undefined, Finish} -> Finish([], Acc); + {Start, Finish} -> Finish(Start(Acc), Acc) + end, + continue(Rest, Original, Skip + Len + 1, NewAcc, Stack, Decode, Value); +array_start(<<>>, Original, Skip, Acc, Stack, Decode, Len) -> + %% Handles empty array [] in continuation mode + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, value); +array_start(Rest, Original, Skip, OldAcc, Stack, Decode, Len) -> + case Decode#decode.array_start of + undefined -> value(Rest, Original, Skip + Len, [], [?ARRAY, OldAcc | Stack], Decode); + Fun -> value(Rest, Original, Skip + Len, Fun(OldAcc), [?ARRAY, OldAcc | Stack], Decode) + end. + +array_push(<>, Original, Skip, Acc, Stack, Decode, Value) when ?is_ws(Byte) -> + array_push(Rest, Original, Skip + 1, Acc, Stack, Decode, Value); +array_push(<<"]", Rest/bits>>, Original, Skip, Acc0, Stack0, Decode, Value) -> + Acc = + case Decode#decode.array_push of + undefined -> [Value | Acc0]; + Push -> Push(Value, Acc0) + end, + [_, OldAcc | Stack] = Stack0, + {ArrayValue, NewAcc} = + case Decode#decode.array_finish of + undefined -> {lists:reverse(Acc), OldAcc}; + Finish -> Finish(Acc, OldAcc) + end, + continue(Rest, Original, Skip + 1, NewAcc, Stack, Decode, ArrayValue); +array_push(<<$,, Rest/bits>>, Original, Skip0, Acc, Stack, Decode, Value) -> + Skip = Skip0 + 1, + case Decode#decode.array_push of + undefined -> value(Rest, Original, Skip, [Value | Acc], Stack, Decode); + Fun -> value(Rest, Original, Skip, Fun(Value, Acc), Stack, Decode) + end; +array_push(_, Original, Skip, Acc, Stack, Decode, Value) -> + unexpected(Original, Skip, Acc, Stack, Decode, 0, 0, {?FUNCTION_NAME, Value}). + +object_start(<>, Original, Skip, Acc, Stack, Decode, Len) when ?is_ws(Byte) -> + object_start(Rest, Original, Skip, Acc, Stack, Decode, Len + 1); +object_start(<<"}", Rest/bits>>, Original, Skip, Acc, Stack, Decode, Len) -> + {Value, NewAcc} = + case {Decode#decode.object_start, Decode#decode.object_finish} of + {undefined, undefined} -> {#{}, Acc}; + {Start, undefined} -> {maps:from_list(Start(Acc)), Acc}; + {undefined, Finish} -> Finish([], Acc); + {Start, Finish} -> Finish(Start(Acc), Acc) + end, + continue(Rest, Original, Skip + Len + 1, NewAcc, Stack, Decode, Value); +object_start(<<$", Rest/bits>>, Original, Skip0, OldAcc, Stack0, Decode, Len) -> + Stack = [?OBJECT, OldAcc | Stack0], + Skip = Skip0 + Len + 1, + case Decode#decode.object_start of + undefined -> + string(Rest, Original, Skip, [], Stack, Decode); + Fun -> + Acc = Fun(OldAcc), + string(Rest, Original, Skip, Acc, Stack, Decode) + end; +object_start(_, Original, Skip, Acc, Stack, Decode, Len) -> + unexpected(Original, Skip, Acc, Stack, Decode, Len, 0, value). + +object_value(<>, Original, Skip, Acc, Stack, Decode, Key) when ?is_ws(Byte) -> + object_value(Rest, Original, Skip + 1, Acc, Stack, Decode, Key); +object_value(<<$:, Rest/bits>>, Original, Skip, Acc, Stack, Decode, Key) -> + value(Rest, Original, Skip + 1, Acc, [Key | Stack], Decode); +object_value(_, Original, Skip, Acc, Stack, Decode, Key) -> + unexpected(Original, Skip, Acc, Stack, Decode, 0, 0, {?FUNCTION_NAME, Key}). + +object_push(<>, Original, Skip, Acc, Stack, Decode, Value, Key) when ?is_ws(Byte) -> + object_push(Rest, Original, Skip + 1, Acc, Stack, Decode, Value, Key); +object_push(<<"}", Rest/bits>>, Original, Skip, Acc0, Stack0, Decode, Value, Key) -> + Acc = + case Decode#decode.object_push of + undefined -> [{Key, Value} | Acc0]; + Fun -> Fun(Key, Value, Acc0) + end, + [_, OldAcc | Stack] = Stack0, + {ObjectValue, NewAcc} = + case Decode#decode.object_finish of + undefined -> {maps:from_list(Acc), OldAcc}; + Finish -> Finish(Acc, OldAcc) + end, + continue(Rest, Original, Skip + 1, NewAcc, Stack, Decode, ObjectValue); +object_push(<<$,, Rest/bits>>, Original, Skip, Acc0, Stack, Decode, Value, Key) -> + case Decode#decode.object_push of + undefined -> object_key(Rest, Original, Skip + 1, [{Key, Value} | Acc0], Stack, Decode); + Fun -> object_key(Rest, Original, Skip + 1, Fun(Key, Value, Acc0), Stack, Decode) + end; +object_push(_, Original, Skip, Acc, Stack, Decode, Value, Key) -> + unexpected(Original, Skip, Acc, Stack, Decode, 0, 0, {?FUNCTION_NAME, Value, Key}). + +object_key(<>, Original, Skip, Acc, Stack, Decode) when ?is_ws(Byte) -> + object_key(Rest, Original, Skip + 1, Acc, Stack, Decode); +object_key(<<$", Rest/bits>>, Original, Skip, Acc, Stack, Decode) -> + string(Rest, Original, Skip + 1, Acc, Stack, Decode); +object_key(_, Original, Skip, Acc, Stack, Decode) -> + unexpected(Original, Skip, Acc, Stack, Decode, 0, 0, ?FUNCTION_NAME). + +continue(<>, Original, Skip, Acc, Stack0, Decode, Value) -> + case Stack0 of + [] -> terminate(Rest, Original, Skip, Acc, Value); + [?ARRAY | _] -> array_push(Rest, Original, Skip, Acc, Stack0, Decode, Value); + [?OBJECT | _] -> object_value(Rest, Original, Skip, Acc, Stack0, Decode, Value); + [Key | Stack] -> object_push(Rest, Original, Skip, Acc, Stack, Decode, Value, Key) + end. + +terminate(<>, Original, Skip, Acc, Value) when ?is_ws(Byte) -> + terminate(Rest, Original, Skip + 1, Acc, Value); +terminate(<>, _Original, _Skip, Acc, Value) -> + {Value, Acc, Rest}. + +-spec unexpected_utf8(binary(), non_neg_integer()) -> no_return(). +unexpected_utf8(Original, Skip) when byte_size(Original) =:= Skip -> + error(unexpected_end); +unexpected_utf8(Original, Skip) -> + invalid_byte(Original, Skip). + +unexpected(Original, Skip, Acc, Stack, Decode, Pos, Len, FuncData) -> + RequiredSize = Skip + Pos + Len, + OrigSize = byte_size(Original), + case OrigSize =< RequiredSize of + true -> + <<_:Skip/binary, Rest/binary>> = Original, + {continue, {Rest, Acc, Stack, Decode, FuncData}}; + false -> + invalid_byte(Original, Skip + Pos) + end. + +-spec unexpected_sequence(binary(), non_neg_integer()) -> no_return(). +unexpected_sequence(Value, Skip) -> + error({unexpected_sequence, Value}, none, error_info(Skip)). diff --git a/prelude/erlang/toolchain/release_variables_builder.escript b/prelude/erlang/toolchain/release_variables_builder.escript index da9556823fa65..53fb7e12eeccf 100644 --- a/prelude/erlang/toolchain/release_variables_builder.escript +++ b/prelude/erlang/toolchain/release_variables_builder.escript @@ -28,8 +28,6 @@ -export([main/1]). --mode(compile). - -define(EXITSUCCESS, 0). -define(EXITERROR, 1). diff --git a/prelude/export_exe.bzl b/prelude/export_exe.bzl index af37ff6b0dc7c..653d32bc63936 100644 --- a/prelude/export_exe.bzl +++ b/prelude/export_exe.bzl @@ -9,9 +9,6 @@ def _export_exe_impl(ctx: AnalysisContext) -> list[Provider]: if ctx.attrs.src and ctx.attrs.exe: fail("Must supply one of src or exe to export_exe") - if not ctx.attrs.src and not ctx.attrs.exe: - fail("Must supply one of src or exe to export_exe") - src = ctx.attrs.src if ctx.attrs.src else ctx.attrs.exe return [ @@ -21,7 +18,7 @@ def _export_exe_impl(ctx: AnalysisContext) -> list[Provider]: ), ] -export_exe = rule( +_export_exe = rule( doc = """Exports a file as an executable, for use in $(exe) macros or as a valid target for an exec_dep(). Accepts either a string `src`, which is a relative path to a file that will be directly referenced, or an arg `exe` which should be a path to an executable relative to a $(location) macro. @@ -45,7 +42,7 @@ export_exe = rule( src = "bin/script.sh", ) - The latter form allows executing checked in binaries with required resouces (eg. runtime shared libraries) + The latter form allows executing checked in binaries with required resources (eg. runtime shared libraries) without unnecessary indirection via another rule which allows args, like command_alias. Eg. instead of export_file( @@ -86,3 +83,24 @@ export_exe = rule( "src": attrs.option(attrs.source(), default = None, doc = "path to an executable binary relative to this package"), }, ) + +def export_exe(name, exe = None, src = None, **kwargs): + # If neither `exe` nor `src` is passed, treat the target's name as the src. + # + # export_exe( + # name = "script.sh", + # ) + # + # is equivalent to: + # + # export_exe( + # name = "script.sh", + # src = "script.sh", + # ) + # + _export_exe( + name = name, + exe = exe, + src = src if (exe or src) else name, + **kwargs + ) diff --git a/prelude/genrule.bzl b/prelude/genrule.bzl index af21b3c7b98c5..951ea1a19044e 100644 --- a/prelude/genrule.bzl +++ b/prelude/genrule.bzl @@ -9,6 +9,7 @@ load("@prelude//:cache_mode.bzl", "CacheModeInfo") load("@prelude//:genrule_local_labels.bzl", "genrule_labels_require_local") +load("@prelude//:genrule_prefer_local_labels.bzl", "genrule_labels_prefer_local") load("@prelude//:genrule_toolchain.bzl", "GenruleToolchainInfo") load("@prelude//:is_full_meta_repo.bzl", "is_full_meta_repo") load("@prelude//android:build_only_native_code.bzl", "is_build_only_native_code") @@ -39,6 +40,9 @@ _BUILD_ROOT_LABELS = {label: True for label in [ "app_modules_genrule", # produces JSON containing file paths that are read from the root dir. "android_langpack_strings", # produces JSON containing file paths that are read from the root dir. "windows_long_path_issue", # Windows: relative path length exceeds PATH_MAX, program cannot access file + "flowtype_ota_safety_target", # produces JSON containing file paths that are project-relative + "ctrlr_setting_paths", + "llvm_buck_genrule", ]} # In Buck1 the SRCS environment variable is only set if the substring SRCS is on the command line. @@ -47,6 +51,14 @@ _BUILD_ROOT_LABELS = {label: True for label in [ # that behavior. _NO_SRCS_ENVIRONMENT_LABEL = "no_srcs_environment" +_WINDOWS_ENV_SUBSTITUTIONS = [ + # Replace $OUT and ${OUT} + (regex("\\$(OUT\\b|\\{OUT\\})"), "%OUT%"), + (regex("\\$(SRCDIR\\b|\\{SRCDIR\\})"), "%SRCDIR%"), + (regex("\\$(SRCS\\b|\\{SRCS\\})"), "%SRCS%"), + (regex("\\$(TMP\\b|\\{TMP\\})"), "%TMP%"), +] + def _requires_build_root(ctx: AnalysisContext) -> bool: for label in ctx.attrs.labels: if label in _BUILD_ROOT_LABELS: @@ -56,6 +68,9 @@ def _requires_build_root(ctx: AnalysisContext) -> bool: def _requires_local(ctx: AnalysisContext) -> bool: return genrule_labels_require_local(ctx.attrs.labels) +def _prefers_local(ctx: AnalysisContext) -> bool: + return genrule_labels_prefer_local(ctx.attrs.labels) + def _ignore_artifacts(ctx: AnalysisContext) -> bool: return "buck2_ignore_artifacts" in ctx.attrs.labels @@ -68,9 +83,11 @@ _USE_CACHE_MODE = is_full_meta_repo() # Extra attributes required by every genrule based on genrule_impl def genrule_attributes() -> dict[str, Attr]: attributes = { + "always_print_stderr": attrs.bool(default = False), "metadata_env_var": attrs.option(attrs.string(), default = None), "metadata_path": attrs.option(attrs.string(), default = None), "no_outputs_cleanup": attrs.bool(default = False), + "remote_execution_dependencies": attrs.list(attrs.dict(key = attrs.string(), value = attrs.string()), default = []), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), "_genrule_toolchain": attrs.default_only(attrs.toolchain_dep(default = "toolchains//:genrule", providers = [GenruleToolchainInfo])), } @@ -117,11 +134,13 @@ def process_genrule( out_attr: [str, None], outs_attr: [dict, None], extra_env_vars: dict = {}, - identifier: [str, None] = None) -> list[Provider]: + identifier: [str, None] = None, + other_outputs: list[Artifact] = []) -> list[Provider]: if (out_attr != None) and (outs_attr != None): fail("Only one of `out` and `outs` should be set. Got out=`%s`, outs=`%s`" % (repr(out_attr), repr(outs_attr))) local_only = _requires_local(ctx) + prefer_local = _prefers_local(ctx) # NOTE: Eventually we shouldn't require local_only here, since we should be # fine with caching local fallbacks if necessary (or maybe that should be @@ -163,20 +182,20 @@ def process_genrule( cmd = ctx.attrs.bash if ctx.attrs.bash != None else ctx.attrs.cmd if cmd == None: fail("One of `cmd` or `bash` should be set.") - cmd = cmd_args(cmd) + + replace_regex = [] # For backwards compatibility with Buck1. if is_windows: - # Replace $OUT and ${OUT} - cmd.replace_regex("\\$(OUT\\b|\\{OUT\\})", "%OUT%") - cmd.replace_regex("\\$(SRCDIR\\b|\\{SRCDIR\\})", "%SRCDIR%") - cmd.replace_regex("\\$(SRCS\\b|\\{SRCS\\})", "%SRCS%") - cmd.replace_regex("\\$(TMP\\b|\\{TMP\\})", "%TMP%") + for re, sub in _WINDOWS_ENV_SUBSTITUTIONS: + replace_regex.append((re, sub)) + for extra_env_var in extra_env_vars: - cmd.replace_regex("\\$(%s\\b|\\{%s\\})" % (extra_env_var, extra_env_var), "%%%s%%" % extra_env_var) + replace_regex.append( + (regex("\\$(%s\\b|\\{%s\\})" % (extra_env_var, extra_env_var)), "%%%s%%" % extra_env_var), + ) - if _ignore_artifacts(ctx): - cmd = cmd.ignore_artifacts() + cmd = cmd_args(cmd, ignore_artifacts = _ignore_artifacts(ctx), replace_regex = replace_regex) if type(ctx.attrs.srcs) == type([]): # FIXME: We should always use the short_path, but currently that is sometimes blank. @@ -194,8 +213,13 @@ def process_genrule( symlinks = ctx.attrs.srcs srcs_artifact = ctx.actions.symlinked_dir("srcs" if not identifier else "{}-srcs".format(identifier), symlinks) + if ctx.attrs.environment_expansion_separator: + delimiter = ctx.attrs.environment_expansion_separator + else: + delimiter = " " + # Setup environment variables. - srcs = cmd_args() + srcs = cmd_args(delimiter = delimiter) for symlink in symlinks: srcs.add(cmd_args(srcs_artifact, format = path_sep.join([".", "{}", symlink.replace("/", path_sep)]))) env_vars = { @@ -214,6 +238,10 @@ def process_genrule( if local_only: env_vars["__BUCK2_LOCAL_ONLY_CACHE_BUSTER"] = cmd_args("") + # see comment above + if prefer_local: + env_vars["__BUCK2_PREFER_LOCAL_CACHE_BUSTER"] = cmd_args("") + # For now, when uploads are enabled, be safe and avoid sharing cache hits. cache_bust = _get_cache_mode(ctx).cache_bust_genrules @@ -270,16 +298,20 @@ def process_genrule( if is_windows: rewrite_scratch_path = cmd_args( - cmd_args(ctx.label.project_root).relative_to(srcs_artifact), + cmd_args(ctx.label.project_root, relative_to = srcs_artifact), format = 'set "BUCK_SCRATCH_PATH={}\\%BUCK_SCRATCH_PATH%"', ) else: srcs_dir = cmd_args(srcs_dir, quote = "shell") rewrite_scratch_path = cmd_args( - cmd_args(ctx.label.project_root, quote = "shell").relative_to(srcs_artifact), + cmd_args(ctx.label.project_root, quote = "shell", relative_to = srcs_artifact), format = "export BUCK_SCRATCH_PATH={}/$BUCK_SCRATCH_PATH", ) + # Relativize all paths in the command to the sandbox dir. + for script_cmd in script: + script_cmd.relative_to(srcs_artifact) + script = ( [ # Rewrite BUCK_SCRATCH_PATH @@ -287,12 +319,14 @@ def process_genrule( # Change to the directory that genrules expect. cmd_args(srcs_dir, format = "cd {}"), ] + - # Relativize all paths in the command to the sandbox dir. - [cmd.relative_to(srcs_artifact) for cmd in script] + script ) # Relative all paths in the env to the sandbox dir. - env_vars = {key: val.relative_to(srcs_artifact) for key, val in env_vars.items()} + env_vars = { + key: cmd_args(value, relative_to = srcs_artifact) + for key, value in env_vars.items() + } if is_windows: # Should be in the beginning. @@ -315,25 +349,32 @@ def process_genrule( metadata_args["metadata_env_var"] = ctx.attrs.metadata_env_var if ctx.attrs.metadata_path: metadata_args["metadata_path"] = ctx.attrs.metadata_path + if ctx.attrs.remote_execution_dependencies: + metadata_args["remote_execution_dependencies"] = ctx.attrs.remote_execution_dependencies category = "genrule" if ctx.attrs.type != None: # As of 09/2021, all genrule types were legal snake case if their dashes and periods were replaced with underscores. category += "_" + ctx.attrs.type.replace("-", "_").replace(".", "_") ctx.actions.run( - cmd_args(script_args).hidden([cmd, srcs_artifact, out_artifact.as_output()] + hidden), + cmd_args(script_args, hidden = [cmd, srcs_artifact, out_artifact.as_output()] + hidden), env = env_vars, local_only = local_only, + prefer_local = prefer_local, + weight = value_or(ctx.attrs.weight, 1), allow_cache_upload = cacheable, category = category, identifier = identifier, no_outputs_cleanup = ctx.attrs.no_outputs_cleanup, + always_print_stderr = ctx.attrs.always_print_stderr, **metadata_args ) + sub_targets = {k: [DefaultInfo(default_outputs = v)] for (k, v) in named_outputs.items()} providers = [DefaultInfo( default_outputs = default_outputs, - sub_targets = {k: [DefaultInfo(default_outputs = v)] for (k, v) in named_outputs.items()}, + sub_targets = sub_targets, + other_outputs = other_outputs, )] # The cxx_genrule also forwards here, and that doesn't have .executable, so use getattr diff --git a/prelude/genrule_local_labels.bzl b/prelude/genrule_local_labels.bzl index 9f710b08597ad..927cc5c83ec50 100644 --- a/prelude/genrule_local_labels.bzl +++ b/prelude/genrule_local_labels.bzl @@ -94,6 +94,12 @@ _GENRULE_LOCAL_LABELS = {label: True for label in [ # (https://fb.workplace.com/groups/1042353022615812/posts/1849505965233843/). "uses_php", + # Uses the `libX11-devel` package which is not available on RE. + "uses_x11", + + # Unity license client needs to be set up on RE workers for this to work, and maybe further debugging. + "uses_unity", + # mksquashfs isn't available in RE, so run these locally # (https://fb.workplace.com/groups/buck2users/permalink/3023630007893360/) "uses_mksquashfs", @@ -170,10 +176,12 @@ _GENRULE_LOCAL_LABELS = {label: True for label in [ # Some Qt genrules don't support RE yet "qt_moc", - "qt_qrc_gen", + "qt_qmlcachegen", "qt_qrc_compile", + "qt_qrc_gen", "qt_qsb_gen", - "qt_qmlcachegen", + "qt_rcc", + "qt_uic", # use local jar "uses_jar", @@ -195,6 +203,24 @@ _GENRULE_LOCAL_LABELS = {label: True for label in [ # Uses xcrun which is not in RE (e.g. compile metalshaders) "uses_xcrun", + + # speech translation uses genrule to get models from manifold, high priority project + "speech_translation_high_priority", + + # Uses Apple's codesign command which might not be in RE + "uses_codesign", + + # Uses jf which is not on RE + "uses_jf", + + # On Messenger Desktop few targets are massive and take much longer on RE than + # locally to build on Windows. This is a mitigation until we can break down these + # targets + "zeratul_windows_capacity_hog", + + # The compilation databases produced by Buck have paths relative to the root of + # fbsource. This isn't compatible with RE. + "uses_compilation_database", ]} def genrule_labels_require_local(labels): diff --git a/prelude/genrule_prefer_local_labels.bzl b/prelude/genrule_prefer_local_labels.bzl new file mode 100644 index 0000000000000..7b3229177e5c4 --- /dev/null +++ b/prelude/genrule_prefer_local_labels.bzl @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Handle labels used to make genrules prefer local execution +""" + +# Some rules prefer to be run locally for various reasons listed next to the label. +_GENRULE_PREFER_LOCAL_LABELS = {label: True for label in [ + # Used for rules that just copy large files and will be faster to do locally + "large_copy", +]} + +def genrule_labels_prefer_local(labels): + for label in labels: + if label in _GENRULE_PREFER_LOCAL_LABELS: + return True + return False diff --git a/prelude/git/tools/BUCK b/prelude/git/tools/BUCK deleted file mode 100644 index 9135477da01bd..0000000000000 --- a/prelude/git/tools/BUCK +++ /dev/null @@ -1,7 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "git_fetch", - main = "git_fetch.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/git/tools/BUCK.v2 b/prelude/git/tools/BUCK.v2 new file mode 100644 index 0000000000000..ce7dcb83cc35e --- /dev/null +++ b/prelude/git/tools/BUCK.v2 @@ -0,0 +1,13 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "git_fetch", + main = "git_fetch.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/go/cgo_builder.bzl b/prelude/go/cgo_builder.bzl new file mode 100644 index 0000000000000..6619da057eea2 --- /dev/null +++ b/prelude/go/cgo_builder.bzl @@ -0,0 +1,182 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//cxx:cxx_library.bzl", "cxx_compile_srcs") +load( + "@prelude//cxx:cxx_sources.bzl", + "CxxSrcWithFlags", +) +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load( + "@prelude//cxx:cxx_types.bzl", + "CxxRuleConstructorParams", # @unused Used as a type +) +load("@prelude//cxx:headers.bzl", "cxx_attr_header_namespace", "cxx_get_regular_cxx_headers_layout", "prepare_headers") +load( + "@prelude//cxx:preprocessor.bzl", + "CPreprocessor", + "CPreprocessorArgs", + "CPreprocessorInfo", + "cxx_inherited_preprocessor_infos", + "cxx_merge_cpreprocessors", +) +load( + "@prelude//linking:link_info.bzl", + "LinkStyle", +) +load("@prelude//linking:types.bzl", "Linkage") +load("@prelude//os_lookup:defs.bzl", "OsLookup") +load("@prelude//utils:cmd_script.bzl", "ScriptOs", "cmd_script") +load("@prelude//utils:expect.bzl", "expect") +load(":toolchain.bzl", "GoToolchainInfo", "get_toolchain_env_vars") + +# A map of expected linkages for provided link style +_LINKAGE_FOR_LINK_STYLE = { + LinkStyle("static"): Linkage("static"), + LinkStyle("static_pic"): Linkage("static"), + LinkStyle("shared"): Linkage("shared"), +} + +CGoToolOut = record( + cgo_gotypes = field(Artifact), # _cgo_gotypes.go + cgo_export_h = field(Artifact), # _cgo_export.h + cgo_export_c = field(Artifact), # _cgo_export.c + cgo1_go_files = field(list[Artifact]), # *.cgo1.go + cgo2_c_files = field(list[Artifact]), # *.cgo2.c +) + +def _cgo( + ctx: AnalysisContext, + srcs: list[Artifact], + own_pre: list[CPreprocessor], + inherited_pre: list[CPreprocessorInfo], + c_flags: list[str], + cpp_flags: list[str]) -> (CGoToolOut, Artifact): + """ + Run `cgo` on `.go` sources to generate Go, C, and C-Header sources. + """ + gen_dir = ctx.actions.declare_output("cgo_gen_tmp", dir = True) + + # Return a `cmd_args` to use as the generated sources. + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + + cmd = cmd_args( + go_toolchain.cgo_wrapper, + cmd_args(go_toolchain.cgo, format = "--cgo={}"), + cmd_args(gen_dir.as_output(), format = "--output={}"), + "--", + c_flags + cpp_flags, + ctx.attrs.cxx_compiler_flags, + srcs, + ) + + env = get_toolchain_env_vars(go_toolchain) + env["CC"] = _cxx_wrapper(ctx, own_pre, inherited_pre) + + ctx.actions.run(cmd, env = env, category = "cgo") + + return project_go_and_c_files(srcs, gen_dir), gen_dir + +def project_go_and_c_files(cgo_srcs: list[Artifact], gen_dir: Artifact) -> CGoToolOut: + return CGoToolOut( + cgo_gotypes = gen_dir.project("_cgo_gotypes.go"), + cgo_export_h = gen_dir.project("_cgo_export.h"), + cgo_export_c = gen_dir.project("_cgo_export.c"), + cgo1_go_files = [gen_dir.project(paths.replace_extension(src.basename, ".cgo1.go")) for src in cgo_srcs], + cgo2_c_files = [gen_dir.project(paths.replace_extension(src.basename, ".cgo2.c")) for src in cgo_srcs], + ) + +def _cxx_wrapper(ctx: AnalysisContext, own_pre: list[CPreprocessor], inherited_pre: list[CPreprocessorInfo]) -> cmd_args: + pre = cxx_merge_cpreprocessors(ctx, own_pre, inherited_pre) + pre_args = pre.set.project_as_args("args") + pre_include_dirs = pre.set.project_as_args("include_dirs") + + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + expect(CxxToolchainInfo in ctx.attrs._cxx_toolchain) + cxx_toolchain = ctx.attrs._cxx_toolchain[CxxToolchainInfo] + + c_compiler = cxx_toolchain.c_compiler_info + + # Construct the full C/C++ command needed to preprocess/compile sources. + cxx_cmd = cmd_args( + c_compiler.compiler, + c_compiler.preprocessor_flags, + c_compiler.compiler_flags, + pre_args, + pre_include_dirs, + go_toolchain.c_compiler_flags, + ) + + # Wrap the C/C++ command in a wrapper script to avoid arg length limits. + return cmd_script( + ctx = ctx, + name = "cxx_wrapper", + cmd = cxx_cmd, + os = ScriptOs("windows" if ctx.attrs._exec_os_type[OsLookup].platform == "windows" else "unix"), + ) + +# build CPreprocessor similar as cxx_private_preprocessor_info does, but with our filtered headers +def _own_pre(ctx: AnalysisContext, h_files: list[Artifact]) -> CPreprocessor: + namespace = cxx_attr_header_namespace(ctx) + header_map = {paths.join(namespace, h.short_path): h for h in h_files} + header_root = prepare_headers(ctx, header_map, "h_files-private-headers") + + return CPreprocessor( + args = CPreprocessorArgs(args = ["-I", header_root.include_path] if header_root != None else []), + ) + +def build_cgo(ctx: AnalysisContext, cgo_files: list[Artifact], h_files: list[Artifact], c_files: list[Artifact], c_flags: list[str], cpp_flags: list[str]) -> (list[Artifact], list[Artifact], Artifact): + if len(cgo_files) == 0: + return [], [], ctx.actions.copied_dir("cgo_gen_tmp", {}) + + # Gather preprocessor inputs. + own_pre = _own_pre(ctx, h_files) + inherited_pre = cxx_inherited_preprocessor_infos(ctx.attrs.deps) + + # Separate sources into C++ and GO sources. + cgo_tool_out, gen_dir = _cgo(ctx, cgo_files, [own_pre], inherited_pre, c_flags, cpp_flags) + go_gen_srcs = [cgo_tool_out.cgo_gotypes] + cgo_tool_out.cgo1_go_files + c_gen_headers = [cgo_tool_out.cgo_export_h] + c_gen_srcs = [cgo_tool_out.cgo_export_c] + cgo_tool_out.cgo2_c_files + + # Wrap the generated CGO C headers in a CPreprocessor object for compiling. + cgo_headers_pre = CPreprocessor(args = CPreprocessorArgs(args = [ + "-I", + prepare_headers( + ctx, + {h.basename: h for h in c_gen_headers}, + "cgo-private-headers", + ).include_path, + ])) + + link_style = ctx.attrs.link_style + if link_style == None: + link_style = "static" + linkage = _LINKAGE_FOR_LINK_STYLE[LinkStyle(link_style)] + + # Copmile C++ sources into object files. + c_compile_cmds = cxx_compile_srcs( + ctx, + CxxRuleConstructorParams( + rule_type = "cgo_sources", + headers_layout = cxx_get_regular_cxx_headers_layout(ctx), + srcs = [CxxSrcWithFlags(file = src) for src in c_files + c_gen_srcs], + compiler_flags = c_flags + ctx.attrs.cxx_compiler_flags, + preprocessor_flags = cpp_flags + ctx.attrs.cxx_preprocessor_flags, + ), + # Create private header tree and propagate via args. + [own_pre, cgo_headers_pre], + inherited_pre, + [], + linkage, + False, # add_coverage_instrumentation_compiler_flags + ) + + compiled_objects = c_compile_cmds.pic.objects + + return go_gen_srcs, compiled_objects, gen_dir diff --git a/prelude/go/cgo_library.bzl b/prelude/go/cgo_library.bzl deleted file mode 100644 index 762c2f9557c05..0000000000000 --- a/prelude/go/cgo_library.bzl +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -load("@prelude//:paths.bzl", "paths") -load( - "@prelude//apple:xcode.bzl", - "get_project_root_file", -) -load( - "@prelude//cxx:compile.bzl", - "CxxSrcWithFlags", # @unused Used as a type -) -load("@prelude//cxx:cxx_library.bzl", "cxx_compile_srcs") -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") -load( - "@prelude//cxx:cxx_types.bzl", - "CxxRuleConstructorParams", # @unused Used as a type -) -load("@prelude//cxx:headers.bzl", "cxx_get_regular_cxx_headers_layout", "prepare_headers") -load( - "@prelude//cxx:preprocessor.bzl", - "CPreprocessor", - "CPreprocessorArgs", - "CPreprocessorInfo", - "cxx_inherited_preprocessor_infos", - "cxx_merge_cpreprocessors", - "cxx_private_preprocessor_info", -) -load( - "@prelude//linking:link_info.bzl", - "LinkStyle", - "Linkage", - "MergedLinkInfo", - "create_merged_link_info_for_propagation", -) -load( - "@prelude//linking:shared_libraries.bzl", - "SharedLibraryInfo", - "merge_shared_libraries", -) -load( - "@prelude//utils:utils.bzl", - "expect", - "map_idx", -) -load(":compile.bzl", "GoPkgCompileInfo", "compile", "get_filtered_srcs", "get_inherited_compile_pkgs") -load(":link.bzl", "GoPkgLinkInfo", "get_inherited_link_pkgs") -load(":packages.bzl", "GoPkg", "go_attr_pkg_name", "merge_pkgs") -load(":toolchain.bzl", "GoToolchainInfo", "get_toolchain_cmd_args") - -# A map of expected linkages for provided link style -_LINKAGE_FOR_LINK_STYLE = { - LinkStyle("static"): Linkage("static"), - LinkStyle("static_pic"): Linkage("static"), - LinkStyle("shared"): Linkage("shared"), -} - -def _cgo( - ctx: AnalysisContext, - srcs: list[Artifact], - own_pre: list[CPreprocessor], - inherited_pre: list[CPreprocessorInfo]) -> (list[Artifact], list[Artifact], list[Artifact]): - """ - Run `cgo` on `.go` sources to generate Go, C, and C-Header sources. - """ - - pre = cxx_merge_cpreprocessors(ctx, own_pre, inherited_pre) - pre_args = pre.set.project_as_args("args") - pre_include_dirs = pre.set.project_as_args("include_dirs") - - # If you change this dir or naming convention, please - # update the corresponding logic in `fbgolist`. - # Otherwise editing and linting for Go will break. - gen_dir = "cgo_gen" - - go_srcs = [] - c_headers = [] - c_srcs = [] - go_srcs.append(ctx.actions.declare_output(paths.join(gen_dir, "_cgo_gotypes.go"))) - c_srcs.append(ctx.actions.declare_output(paths.join(gen_dir, "_cgo_export.c"))) - c_headers.append(ctx.actions.declare_output(paths.join(gen_dir, "_cgo_export.h"))) - for src in srcs: - go_srcs.append(ctx.actions.declare_output(paths.join(gen_dir, paths.replace_extension(src.basename, ".cgo1.go")))) - c_srcs.append(ctx.actions.declare_output(paths.join(gen_dir, paths.replace_extension(src.basename, ".cgo2.c")))) - - # Return a `cmd_args` to use as the generated sources. - go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] - expect(go_toolchain.cgo != None) - expect(CxxToolchainInfo in ctx.attrs._cxx_toolchain) - cxx_toolchain = ctx.attrs._cxx_toolchain[CxxToolchainInfo] - - cmd = get_toolchain_cmd_args(go_toolchain, go_root = False) - cmd.add(go_toolchain.cgo_wrapper[RunInfo]) - - args = cmd_args() - args.add(cmd_args(go_toolchain.cgo, format = "--cgo={}")) - - c_compiler = cxx_toolchain.c_compiler_info - # linker = cxx_toolchain.linker_info - - # Passing fbcode-platform ldflags may create S365277, so I would - # comment this change until we really need to do it. - # ldflags = cmd_args( - # linker.linker_flags, - # go_toolchain.external_linker_flags, - # ) - - args.add( - cmd_args(c_compiler.compiler, format = "--env-cc={}"), - # cmd_args(ldflags, format = "--env-ldflags={}"), - ) - - # TODO(agallagher): cgo outputs a dir with generated sources, but I'm not - # sure how to pass in an output dir *and* enumerate the sources we know will - # generated w/o v2 complaining that the output dir conflicts with the nested - # artifacts. - args.add(cmd_args(go_srcs[0].as_output(), format = "--output={}/..")) - args.add(cmd_args(pre_args, format = "--cpp={}")) - args.add(cmd_args(pre_include_dirs, format = "--cpp={}")) - args.add(cmd_args(c_compiler.preprocessor_flags, format = "--cpp={}")) - args.add(cmd_args(c_compiler.compiler_flags, format = "--cpp={}")) - - # Passing the same value as go-build, because our -g flags break cgo in some buck modes - args.add(cmd_args(["-g"], format = "--cpp={}")) - args.add(srcs) - - argsfile = ctx.actions.declare_output(paths.join(gen_dir, ".cgo.argsfile")) - ctx.actions.write(argsfile.as_output(), args, allow_args = True) - - cmd.add(cmd_args(argsfile, format = "@{}").hidden([args])) - - for src in go_srcs + c_headers + c_srcs: - cmd.hidden(src.as_output()) - ctx.actions.run(cmd, category = "cgo") - - return go_srcs, c_headers, c_srcs - -def cgo_library_impl(ctx: AnalysisContext) -> list[Provider]: - pkg_name = go_attr_pkg_name(ctx) - - project_root_file = get_project_root_file(ctx) - - # Gather preprocessor inputs. - (own_pre, _) = cxx_private_preprocessor_info( - ctx, - cxx_get_regular_cxx_headers_layout(ctx), - project_root_file = project_root_file, - ) - inherited_pre = cxx_inherited_preprocessor_infos(ctx.attrs.deps) - - # Separate sources into C++ and CGO sources. - cgo_srcs = [] - cxx_srcs = [] - for src in ctx.attrs.srcs: - if src.extension == ".go": - cgo_srcs.append(src) - elif src.extension in (".c", ".cpp"): - cxx_srcs.append(src) - else: - fail("unexpected extension: {}".format(src)) - - # Generate CGO and C sources. - go_srcs, c_headers, c_srcs = _cgo(ctx, cgo_srcs, [own_pre], inherited_pre) - cxx_srcs.extend(c_srcs) - - # Wrap the generated CGO C headers in a CPreprocessor object for compiling. - cgo_headers_pre = CPreprocessor(relative_args = CPreprocessorArgs(args = [ - "-I", - prepare_headers( - ctx, - {h.basename: h for h in c_headers}, - "cgo-private-headers", - None, - ).include_path, - ])) - - link_style = ctx.attrs.link_style - if link_style == None: - link_style = "static" - linkage = _LINKAGE_FOR_LINK_STYLE[LinkStyle(link_style)] - - # Copmile C++ sources into object files. - c_compile_cmds = cxx_compile_srcs( - ctx, - CxxRuleConstructorParams( - rule_type = "cgo_library", - headers_layout = cxx_get_regular_cxx_headers_layout(ctx), - srcs = [CxxSrcWithFlags(file = src) for src in cxx_srcs], - ), - # Create private header tree and propagate via args. - [own_pre, cgo_headers_pre], - inherited_pre, - [], - linkage, - ) - - compiled_objects = c_compile_cmds.pic.objects - - # Merge all sources together to pass to the Go compile step. - all_srcs = cmd_args(go_srcs + compiled_objects) - if ctx.attrs.go_srcs: - all_srcs.add(get_filtered_srcs(ctx, ctx.attrs.go_srcs)) - - # Build Go library. - static_pkg = compile( - ctx, - pkg_name, - all_srcs, - deps = ctx.attrs.deps + ctx.attrs.exported_deps, - shared = False, - ) - shared_pkg = compile( - ctx, - pkg_name, - all_srcs, - deps = ctx.attrs.deps + ctx.attrs.exported_deps, - shared = True, - ) - pkgs = { - pkg_name: GoPkg( - shared = shared_pkg, - static = static_pkg, - ), - } - - # We need to keep pre-processed cgo source files, - # because they are required for any editing and linting (like VSCode+gopls) - # to work with cgo. And when nearly every FB service client is cgo, - # we need to support it well. - return [ - DefaultInfo(default_output = static_pkg, other_outputs = go_srcs), - GoPkgCompileInfo(pkgs = merge_pkgs([ - pkgs, - get_inherited_compile_pkgs(ctx.attrs.exported_deps), - ])), - GoPkgLinkInfo(pkgs = merge_pkgs([ - pkgs, - get_inherited_link_pkgs(ctx.attrs.deps + ctx.attrs.exported_deps), - ])), - create_merged_link_info_for_propagation(ctx, filter(None, [d.get(MergedLinkInfo) for d in ctx.attrs.deps])), - merge_shared_libraries( - ctx.actions, - deps = filter(None, map_idx(SharedLibraryInfo, ctx.attrs.deps)), - ), - ] diff --git a/prelude/go/compile.bzl b/prelude/go/compile.bzl index ffb4f25cd7f27..e3038756c0e27 100644 --- a/prelude/go/compile.bzl +++ b/prelude/go/compile.bzl @@ -10,10 +10,7 @@ load( ":packages.bzl", "GoPkg", # @Unused used as type "merge_pkgs", - "pkg_artifacts", - "stdlib_pkg_artifacts", ) -load(":toolchain.bzl", "GoToolchainInfo", "get_toolchain_cmd_args") # Provider wrapping packages used for compiling. GoPkgCompileInfo = provider(fields = { @@ -31,135 +28,18 @@ GoTestInfo = provider( }, ) -def _out_root(shared: bool = False): - return "__shared__" if shared else "__static__" - def get_inherited_compile_pkgs(deps: list[Dependency]) -> dict[str, GoPkg]: return merge_pkgs([d[GoPkgCompileInfo].pkgs for d in deps if GoPkgCompileInfo in d]) -def get_filtered_srcs(ctx: AnalysisContext, srcs: list[Artifact], tests: bool = False, force_disable_cgo: bool = False) -> cmd_args: - """ - Filter the input sources based on build pragma - """ - - go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] - - # Delegate to `go list` to filter out srcs with incompatible `// +build` - # pragmas. - filtered_srcs = ctx.actions.declare_output("__filtered_srcs__.txt") - srcs_dir = ctx.actions.symlinked_dir( - "__srcs__", - {src.short_path: src for src in srcs}, - ) - filter_cmd = get_toolchain_cmd_args(go_toolchain, go_root = True, force_disable_cgo = force_disable_cgo) - filter_cmd.add(go_toolchain.filter_srcs[RunInfo]) - filter_cmd.add(cmd_args(go_toolchain.go, format = "--go={}")) - if tests: - filter_cmd.add("--tests") - filter_cmd.add(cmd_args(",".join(go_toolchain.tags), format = "--tags={}")) - filter_cmd.add(cmd_args(filtered_srcs.as_output(), format = "--output={}")) - filter_cmd.add(srcs_dir) - ctx.actions.run(filter_cmd, category = "go_filter_srcs") - - # Add filtered srcs to compile command. - return cmd_args(filtered_srcs, format = "@{}").hidden(srcs).hidden(srcs_dir) - -def _assemble_cmd( - ctx: AnalysisContext, - pkg_name: str, - flags: list[str] = [], - shared: bool = False) -> cmd_args: - go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] - cmd = cmd_args() - cmd.add(go_toolchain.assembler) - cmd.add(flags) - cmd.add("-p", pkg_name) - if shared: - cmd.add("-shared") - - return cmd - -def _compile_cmd( - ctx: AnalysisContext, - pkg_name: str, - pkgs: dict[str, Artifact] = {}, - deps: list[Dependency] = [], - flags: list[str] = [], - shared: bool = False) -> cmd_args: - go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] - - cmd = cmd_args() - cmd.add(go_toolchain.compiler) - cmd.add("-p", pkg_name) - cmd.add("-pack") - cmd.add("-nolocalimports") - cmd.add(flags) - cmd.add("-buildid=") - - # Add shared/static flags. - if shared: - cmd.add("-shared") - cmd.add(go_toolchain.compiler_flags_shared) - else: - cmd.add(go_toolchain.compiler_flags_static) - - # Add Go pkgs inherited from deps to compiler search path. - all_pkgs = merge_pkgs([ - pkgs, - pkg_artifacts(get_inherited_compile_pkgs(deps), shared = shared), - stdlib_pkg_artifacts(go_toolchain, shared = shared), - ]) - - importcfg_content = [] - for name_, pkg_ in all_pkgs.items(): - # Hack: we use cmd_args get "artifact" valid path and write it to a file. - importcfg_content.append(cmd_args("packagefile ", name_, "=", pkg_, delimiter = "")) - - # Future work: support importmap in buck rules insted of hacking here. - if name_.startswith("third-party-source/go/"): - real_name_ = name_.removeprefix("third-party-source/go/") - importcfg_content.append(cmd_args("importmap ", real_name_, "=", name_, delimiter = "")) - - root = _out_root(shared) - importcfg = ctx.actions.declare_output(root, paths.basename(pkg_name) + "-importcfg") - ctx.actions.write(importcfg.as_output(), importcfg_content) - - cmd.add("-importcfg", importcfg) - cmd.hidden(all_pkgs.values()) - - return cmd - -def compile( - ctx: AnalysisContext, - pkg_name: str, - srcs: cmd_args, - pkgs: dict[str, Artifact] = {}, - deps: list[Dependency] = [], - compile_flags: list[str] = [], - assemble_flags: list[str] = [], - shared: bool = False) -> Artifact: - go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] - root = _out_root(shared) - output = ctx.actions.declare_output(root, paths.basename(pkg_name) + ".a") - - cmd = get_toolchain_cmd_args(go_toolchain) - cmd.add(go_toolchain.compile_wrapper[RunInfo]) - cmd.add(cmd_args(output.as_output(), format = "--output={}")) - cmd.add(cmd_args(_compile_cmd(ctx, pkg_name, pkgs, deps, compile_flags, shared = shared), format = "--compiler={}")) - cmd.add(cmd_args(_assemble_cmd(ctx, pkg_name, assemble_flags, shared = shared), format = "--assembler={}")) - cmd.add(cmd_args(go_toolchain.packer, format = "--packer={}")) - if ctx.attrs.embedcfg: - cmd.add(cmd_args(ctx.attrs.embedcfg, format = "--embedcfg={}")) - - argsfile = ctx.actions.declare_output(root, pkg_name + ".go.argsfile") - srcs_args = cmd_args(srcs) - ctx.actions.write(argsfile.as_output(), srcs_args, allow_args = True) - - cmd.add(cmd_args(argsfile, format = "@{}").hidden([srcs_args])) - - identifier = paths.basename(pkg_name) - if shared: - identifier += "[shared]" - ctx.actions.run(cmd, category = "go_compile", identifier = identifier) - - return output +def infer_package_root(srcs: list[Artifact]) -> str: + go_sources = [s for s in srcs if s.extension == ".go"] + if len(go_sources) == 0: + return "" + dir_set = {paths.dirname(s.short_path): None for s in go_sources} + if len(dir_set) > 1: + fail("Provide `package_root` target attribute. Can't infer it when there are multiple directories containing .go files: {}. Sources: {}".format( + dir_set.keys(), + [s.short_path for s in go_sources], + )) + + return dir_set.keys()[0] diff --git a/prelude/go/constraints/BUCK.v2 b/prelude/go/constraints/BUCK.v2 new file mode 100644 index 0000000000000..2eea56458ced9 --- /dev/null +++ b/prelude/go/constraints/BUCK.v2 @@ -0,0 +1,96 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +constraint_setting( + name = "cgo_enabled", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "cgo_enabled_true", + constraint_setting = ":cgo_enabled", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "cgo_enabled_false", + constraint_setting = ":cgo_enabled", + visibility = ["PUBLIC"], +) + +constraint_setting( + name = "compile_shared", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "compile_shared_false", + constraint_setting = ":compile_shared", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "compile_shared_true", + constraint_setting = ":compile_shared", + visibility = ["PUBLIC"], +) + +constraint_setting( + name = "race", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "race_false", + constraint_setting = ":race", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "race_true", + constraint_setting = ":race", + visibility = ["PUBLIC"], +) + +constraint_setting( + name = "asan", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "asan_false", + constraint_setting = ":race", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "asan_true", + constraint_setting = ":race", + visibility = ["PUBLIC"], +) + +constraint_setting( + name = "coverage_mode", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "coverage_mode_set", + constraint_setting = ":coverage_mode", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "coverage_mode_count", + constraint_setting = ":coverage_mode", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "coverage_mode_atomic", + constraint_setting = ":coverage_mode", + visibility = ["PUBLIC"], +) diff --git a/prelude/go/coverage.bzl b/prelude/go/coverage.bzl index b85f6845386fc..cf6ab2808d655 100644 --- a/prelude/go/coverage.bzl +++ b/prelude/go/coverage.bzl @@ -23,14 +23,17 @@ GoCoverResult = record( variables = field(cmd_args), ) -def cover_srcs(ctx: AnalysisContext, pkg_name: str, mode: GoCoverageMode, srcs: cmd_args) -> GoCoverResult: - out_covered_src_dir = ctx.actions.declare_output("__covered_srcs__", dir = True) - out_srcs_argsfile = ctx.actions.declare_output("covered_srcs.txt") - out_coverage_vars_argsfile = ctx.actions.declare_output("coverage_vars.txt") +def cover_srcs(ctx: AnalysisContext, pkg_name: str, mode: GoCoverageMode, srcs: cmd_args, shared: bool) -> GoCoverResult: + path = pkg_name + "_static_" + mode.value + if shared: + path = pkg_name + "shared_" + mode.value + out_covered_src_dir = ctx.actions.declare_output("__covered_" + path + "_srcs__", dir = True) + out_srcs_argsfile = ctx.actions.declare_output("covered_" + path + "_srcs.txt") + out_coverage_vars_argsfile = ctx.actions.declare_output("coverage_" + path + "_vars.txt") go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] cmd = cmd_args() - cmd.add(go_toolchain.cover_srcs[RunInfo]) + cmd.add(go_toolchain.cover_srcs) cmd.add("--cover", go_toolchain.cover) cmd.add("--coverage-mode", mode.value) cmd.add("--coverage-var-argsfile", out_coverage_vars_argsfile.as_output()) @@ -38,9 +41,9 @@ def cover_srcs(ctx: AnalysisContext, pkg_name: str, mode: GoCoverageMode, srcs: cmd.add("--out-srcs-argsfile", out_srcs_argsfile.as_output()) cmd.add("--pkg-name", pkg_name) cmd.add(srcs) - ctx.actions.run(cmd, category = "go_cover") + ctx.actions.run(cmd, category = "go_cover", identifier = path) return GoCoverResult( - srcs = cmd_args(out_srcs_argsfile, format = "@{}").hidden(out_covered_src_dir).hidden(srcs), + srcs = cmd_args(out_srcs_argsfile, format = "@{}", hidden = [out_covered_src_dir, srcs]), variables = cmd_args(out_coverage_vars_argsfile, format = "@{}"), ) diff --git a/prelude/go/go_binary.bzl b/prelude/go/go_binary.bzl index 8a69aac5a132b..32c6e8de18507 100644 --- a/prelude/go/go_binary.bzl +++ b/prelude/go/go_binary.bzl @@ -5,59 +5,76 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:artifacts.bzl", "single_artifact") +load("@prelude//dist:dist_info.bzl", "DistInfo") load( "@prelude//linking:link_info.bzl", "LinkStyle", ) load( "@prelude//utils:utils.bzl", - "expect", "map_val", "value_or", ) -load(":compile.bzl", "compile", "get_filtered_srcs") load(":link.bzl", "link") +load(":package_builder.bzl", "build_package") +load(":packages.bzl", "go_attr_pkg_name") +load(":toolchain.bzl", "GoToolchainInfo", "evaluate_cgo_enabled") def go_binary_impl(ctx: AnalysisContext) -> list[Provider]: - lib = compile( + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + + lib, pkg_info = build_package( ctx, - "main", - get_filtered_srcs(ctx, ctx.attrs.srcs), + go_attr_pkg_name(ctx), + main = True, + srcs = ctx.attrs.srcs, + package_root = ctx.attrs.package_root, deps = ctx.attrs.deps, - compile_flags = ctx.attrs.compiler_flags, + compiler_flags = ctx.attrs.compiler_flags, + tags = ctx.attrs._tags, + race = ctx.attrs._race, + asan = ctx.attrs._asan, + embedcfg = ctx.attrs.embedcfg, + cgo_enabled = evaluate_cgo_enabled(go_toolchain, ctx.attrs.cgo_enabled), ) - (bin, runtime_files) = link( + (bin, runtime_files, external_debug_info) = link( ctx, lib, deps = ctx.attrs.deps, link_style = value_or(map_val(LinkStyle, ctx.attrs.link_style), LinkStyle("static")), linker_flags = ctx.attrs.linker_flags, link_mode = ctx.attrs.link_mode, + race = ctx.attrs._race, + asan = ctx.attrs._asan, + external_linker_flags = ctx.attrs.external_linker_flags, ) - hidden = [] + # runtime_files are all the artifacts that must be present in order for this + # binary to be runnable. Notably, all of its shared library dependencies. + # This is materialized when a Go binary is executed as a genrule. + # + # other_outputs is a superset of runtime_files, adding external debuginfo + # which is necessary for a user to run this binary in a debugger. This is + # materialized when a Go binary is the end result of a build. + runtime_files = list(runtime_files) + other_outputs = runtime_files + external_debug_info + for resource in ctx.attrs.resources: - if type(resource) == "artifact": - hidden.append(resource) - else: - # Otherwise, this is a dependency, so extract the resource and other - # resources from the `DefaultInfo` provider. - info = resource[DefaultInfo] - expect( - len(info.default_outputs) == 1, - "expected exactly one default output from {} ({})" - .format(resource, info.default_outputs), - ) - [resource] = info.default_outputs - other = info.other_outputs - - hidden.append(resource) - hidden.extend(other) + resource = single_artifact(resource) + + runtime_files.append(resource.default_output) + runtime_files.extend(resource.nondebug_runtime_files) + + other_outputs.append(resource.default_output) + other_outputs.extend(resource.other_outputs) return [ DefaultInfo( default_output = bin, - other_outputs = hidden + runtime_files, + other_outputs = other_outputs, ), - RunInfo(args = cmd_args(bin).hidden(hidden + runtime_files)), + RunInfo(args = cmd_args(bin, hidden = other_outputs)), + DistInfo(nondebug_runtime_files = runtime_files), + pkg_info, ] diff --git a/prelude/go/go_exported_library.bzl b/prelude/go/go_exported_library.bzl index 613a286f86543..2a464943846a0 100644 --- a/prelude/go/go_exported_library.bzl +++ b/prelude/go/go_exported_library.bzl @@ -5,40 +5,155 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load("@prelude//cxx:linker.bzl", "get_default_shared_library_name") +load( + "@prelude//cxx:preprocessor.bzl", + "cxx_inherited_preprocessor_infos", + "cxx_merge_cpreprocessors", +) +load( + "@prelude//linking:link_groups.bzl", + "merge_link_group_lib_info", +) load( "@prelude//linking:link_info.bzl", + "Archive", + "ArchiveLinkable", + "LibOutputStyle", + "LinkInfo", + "LinkInfos", "LinkStyle", + "LinkedObject", + "MergedLinkInfo", # @unused Used as a type + "SharedLibLinkable", + "create_merged_link_info", +) +load( + "@prelude//linking:linkable_graph.bzl", + "create_linkable_graph", + "create_linkable_graph_node", + "create_linkable_node", +) +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibraries", + "SharedLibraryInfo", + "create_shlib", + "merge_shared_libraries", ) load( "@prelude//utils:utils.bzl", + "map_idx", "map_val", "value_or", ) -load(":compile.bzl", "compile", "get_filtered_srcs") load(":link.bzl", "GoBuildMode", "link") +load(":package_builder.bzl", "build_package") +load(":packages.bzl", "cgo_exported_preprocessor", "go_attr_pkg_name") +load(":toolchain.bzl", "GoToolchainInfo", "evaluate_cgo_enabled") def go_exported_library_impl(ctx: AnalysisContext) -> list[Provider]: - lib = compile( - ctx, - "main", - get_filtered_srcs(ctx, ctx.attrs.srcs), - deps = ctx.attrs.deps, - compile_flags = ctx.attrs.compiler_flags, - shared = True, - ) - (bin, runtime_files) = link( + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + lib, pkg_info = build_package( ctx, - lib, + go_attr_pkg_name(ctx), + main = True, + srcs = ctx.attrs.srcs, + package_root = ctx.attrs.package_root, deps = ctx.attrs.deps, - build_mode = GoBuildMode(ctx.attrs.build_mode), - link_style = value_or(map_val(LinkStyle, ctx.attrs.link_style), LinkStyle("static_pic")), - linker_flags = ctx.attrs.linker_flags, - external_linker_flags = ctx.attrs.external_linker_flags, - shared = True, + compiler_flags = ctx.attrs.compiler_flags, + tags = ctx.attrs._tags, + race = ctx.attrs._race, + asan = ctx.attrs._asan, + embedcfg = ctx.attrs.embedcfg, + cgo_enabled = evaluate_cgo_enabled(go_toolchain, ctx.attrs.cgo_enabled), ) + + def link_variant(build_mode: GoBuildMode): + (exp_lib, _, _) = link( + ctx, + lib, + deps = ctx.attrs.deps, + build_mode = build_mode, + link_style = value_or(map_val(LinkStyle, ctx.attrs.link_style), LinkStyle("static_pic")), + linker_flags = ctx.attrs.linker_flags, + external_linker_flags = ctx.attrs.external_linker_flags, + race = ctx.attrs._race, + asan = ctx.attrs._asan, + ) + return exp_lib + + c_archive = link_variant(GoBuildMode("c_archive")) # .a - PIC-arcive + c_shared = link_variant(GoBuildMode("c_shared")) # .so - PIC-shared_lib + + cxx_toolchain = ctx.attrs._cxx_toolchain[CxxToolchainInfo] + + soname = get_default_shared_library_name(cxx_toolchain.linker_info, ctx.label) + + link_infos = { + LibOutputStyle("archive"): LinkInfos( + default = LinkInfo(linkables = [ArchiveLinkable( + archive = Archive(artifact = c_archive), + linker_type = cxx_toolchain.linker_info.type, + )]), + ), + LibOutputStyle("pic_archive"): LinkInfos( + default = LinkInfo(linkables = [ArchiveLinkable( + archive = Archive(artifact = c_archive), + linker_type = cxx_toolchain.linker_info.type, + )]), + ), + LibOutputStyle("shared_lib"): LinkInfos( + default = LinkInfo(linkables = [SharedLibLinkable( + lib = c_shared, + )]), + ), + } + + shared_libs = SharedLibraries(libraries = [ + create_shlib( + soname = soname, + label = ctx.label, + lib = LinkedObject( + output = c_shared, + unstripped_output = c_shared, + ), + ), + ]) + + own_exported_preprocessors = [cgo_exported_preprocessor(ctx, pkg_info)] if ctx.attrs.generate_exported_header else [] + return [ DefaultInfo( - default_output = bin, - other_outputs = runtime_files, + default_output = c_archive if ctx.attrs.build_mode == "c_archive" else c_shared, + ), + create_merged_link_info( + ctx, + cxx_toolchain.pic_behavior, + link_infos = link_infos, + deps = filter(None, map_idx(MergedLinkInfo, ctx.attrs.deps)), + ), + merge_shared_libraries( + ctx.actions, + node = shared_libs, + deps = filter(None, map_idx(SharedLibraryInfo, ctx.attrs.deps)), + ), + merge_link_group_lib_info(deps = ctx.attrs.deps), + create_linkable_graph( + ctx, + node = create_linkable_graph_node( + ctx, + linkable_node = create_linkable_node( + ctx, + default_soname = soname, + deps = ctx.attrs.deps, + link_infos = link_infos, + shared_libs = shared_libs, + ), + ), + deps = ctx.attrs.deps, ), + cxx_merge_cpreprocessors(ctx, own_exported_preprocessors, cxx_inherited_preprocessor_infos(ctx.attrs.deps)), + pkg_info, ] diff --git a/prelude/go/go_library.bzl b/prelude/go/go_library.bzl index 2bc66e990cc27..5379cbfac689e 100644 --- a/prelude/go/go_library.bzl +++ b/prelude/go/go_library.bzl @@ -5,15 +5,24 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//cxx:preprocessor.bzl", + "cxx_inherited_preprocessor_infos", + "cxx_merge_cpreprocessors", +) load( "@prelude//linking:link_groups.bzl", - "LinkGroupLibInfo", + "merge_link_group_lib_info", ) load( "@prelude//linking:link_info.bzl", "MergedLinkInfo", "create_merged_link_info_for_propagation", ) +load( + "@prelude//linking:linkable_graph.bzl", + "create_linkable_graph", +) load( "@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", @@ -23,56 +32,51 @@ load( "@prelude//utils:utils.bzl", "map_idx", ) -load(":compile.bzl", "GoPkgCompileInfo", "GoTestInfo", "compile", "get_filtered_srcs", "get_inherited_compile_pkgs") +load(":compile.bzl", "GoPkgCompileInfo", "GoTestInfo") +load(":coverage.bzl", "GoCoverageMode") load(":link.bzl", "GoPkgLinkInfo", "get_inherited_link_pkgs") -load(":packages.bzl", "GoPkg", "go_attr_pkg_name", "merge_pkgs") +load(":package_builder.bzl", "build_package") +load(":packages.bzl", "cgo_exported_preprocessor", "go_attr_pkg_name", "merge_pkgs") +load(":toolchain.bzl", "GoToolchainInfo", "evaluate_cgo_enabled") def go_library_impl(ctx: AnalysisContext) -> list[Provider]: + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + pkgs = {} - default_output = None - pkg_name = None - if ctx.attrs.srcs: - pkg_name = go_attr_pkg_name(ctx) + pkg_name = go_attr_pkg_name(ctx) - # We need to set CGO_DESABLED for "pure" Go libraries, otherwise CGo files may be selected for compilation. - srcs = get_filtered_srcs(ctx, ctx.attrs.srcs, force_disable_cgo = True) + race = ctx.attrs._race + asan = ctx.attrs._asan + coverage_mode = GoCoverageMode(ctx.attrs._coverage_mode) if ctx.attrs._coverage_mode else None - static_pkg = compile( - ctx, - pkg_name, - srcs = srcs, - deps = ctx.attrs.deps + ctx.attrs.exported_deps, - compile_flags = ctx.attrs.compiler_flags, - assemble_flags = ctx.attrs.assembler_flags, - shared = False, - ) + pkg, pkg_info = build_package( + ctx, + pkg_name, + main = False, + srcs = ctx.attrs.srcs + ctx.attrs.headers, + package_root = ctx.attrs.package_root, + deps = ctx.attrs.deps, + compiler_flags = ctx.attrs.compiler_flags, + assembler_flags = ctx.attrs.assembler_flags, + tags = ctx.attrs._tags, + race = race, + asan = asan, + coverage_mode = coverage_mode, + embedcfg = ctx.attrs.embedcfg, + cgo_enabled = evaluate_cgo_enabled(go_toolchain, ctx.attrs._cgo_enabled, ctx.attrs.override_cgo_enabled), + ) - shared_pkg = compile( - ctx, - pkg_name, - srcs = srcs, - deps = ctx.attrs.deps + ctx.attrs.exported_deps, - compile_flags = ctx.attrs.compiler_flags, - assemble_flags = ctx.attrs.assembler_flags, - shared = True, - ) + default_output = pkg.pkg + pkgs[pkg_name] = pkg - default_output = static_pkg - pkgs[pkg_name] = GoPkg( - shared = shared_pkg, - static = static_pkg, - ) + own_exported_preprocessors = [cgo_exported_preprocessor(ctx, pkg_info)] if ctx.attrs.generate_exported_header else [] return [ DefaultInfo(default_output = default_output), - LinkGroupLibInfo(libs = {}), - GoPkgCompileInfo(pkgs = merge_pkgs([ - pkgs, - get_inherited_compile_pkgs(ctx.attrs.exported_deps), - ])), + GoPkgCompileInfo(pkgs = pkgs), GoPkgLinkInfo(pkgs = merge_pkgs([ pkgs, - get_inherited_link_pkgs(ctx.attrs.deps + ctx.attrs.exported_deps), + get_inherited_link_pkgs(ctx.attrs.deps), ])), GoTestInfo( deps = ctx.attrs.deps, @@ -84,4 +88,11 @@ def go_library_impl(ctx: AnalysisContext) -> list[Provider]: ctx.actions, deps = filter(None, map_idx(SharedLibraryInfo, ctx.attrs.deps)), ), + merge_link_group_lib_info(deps = ctx.attrs.deps), + create_linkable_graph( + ctx, + deps = ctx.attrs.deps, + ), + cxx_merge_cpreprocessors(ctx, own_exported_preprocessors, cxx_inherited_preprocessor_infos(ctx.attrs.deps)), + pkg_info, ] diff --git a/prelude/go/go_list.bzl b/prelude/go/go_list.bzl new file mode 100644 index 0000000000000..37bafdb6f3306 --- /dev/null +++ b/prelude/go/go_list.bzl @@ -0,0 +1,132 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load(":toolchain.bzl", "GoToolchainInfo", "get_toolchain_env_vars") + +GoListOut = record( + name = field(str), + imports = field(list[str], default = []), + test_imports = field(list[str], default = []), + x_test_imports = field(list[str], default = []), + go_files = field(list[Artifact], default = []), + h_files = field(list[Artifact], default = []), + c_files = field(list[Artifact], default = []), + cxx_files = field(list[Artifact], default = []), + cgo_files = field(list[Artifact], default = []), + s_files = field(list[Artifact], default = []), + test_go_files = field(list[Artifact], default = []), + x_test_go_files = field(list[Artifact], default = []), + ignored_go_files = field(list[Artifact], default = []), + ignored_other_files = field(list[Artifact], default = []), + embed_files = field(list[Artifact], default = []), + cgo_cflags = field(list[str], default = []), + cgo_cppflags = field(list[str], default = []), +) + +def go_list(ctx: AnalysisContext, pkg_name: str, srcs: list[Artifact], package_root: str, tags: list[str], cgo_enabled: bool, with_tests: bool, asan: bool) -> Artifact: + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + env = get_toolchain_env_vars(go_toolchain) + env["GO111MODULE"] = "off" + env["CGO_ENABLED"] = "1" if cgo_enabled else "0" + + go_list_out = ctx.actions.declare_output(paths.basename(pkg_name) + "_go_list.json") + + # Create file structure that `go list` can recognize + # Use copied_dir, because embed doesn't work with symlinks + srcs_dir = ctx.actions.copied_dir( + "__{}_srcs_dir__".format(paths.basename(pkg_name)), + {src.short_path.removeprefix(package_root).lstrip("/"): src for src in srcs}, + ) + all_tags = [] + go_toolchain.tags + tags + if asan: + all_tags.append("asan") + + required_felds = "Name,Imports,GoFiles,CgoFiles,HFiles,CFiles,CXXFiles,SFiles,EmbedFiles,CgoCFLAGS,CgoCPPFLAGS,IgnoredGoFiles,IgnoredOtherFiles" + if with_tests: + required_felds += ",TestImports,XTestImports,TestGoFiles,XTestGoFiles" + + go_list_args = [ + go_toolchain.go_wrapper, + ["--go", go_toolchain.go], + ["--workdir", srcs_dir], + ["--output", go_list_out.as_output()], + "list", + "-e", + "-json=" + required_felds, + ["-tags", ",".join(all_tags) if all_tags else []], + ".", + ] + + identifier = paths.basename(pkg_name) + ctx.actions.run(go_list_args, env = env, category = "go_list", identifier = identifier) + + return go_list_out + +def parse_go_list_out(srcs: list[Artifact], package_root: str, go_list_out: ArtifactValue) -> GoListOut: + go_list = go_list_out.read_json() + go_files, cgo_files, h_files, c_files, cxx_files, s_files, test_go_files, x_test_go_files, ignored_go_files, ignored_other_files, embed_files = [], [], [], [], [], [], [], [], [], [], [] + + for src in srcs: + # remove package_root prefix from src artifact path to match `go list` output format + src_path = src.short_path.removeprefix(package_root).lstrip("/") + if src_path in go_list.get("GoFiles", []): + go_files.append(src) + if src_path in go_list.get("CgoFiles", []): + cgo_files.append(src) + if src_path in go_list.get("HFiles", []): + h_files.append(src) + if src_path in go_list.get("CFiles", []): + c_files.append(src) + if src_path in go_list.get("CXXFiles", []): + cxx_files.append(src) + if src_path in go_list.get("SFiles", []): + s_files.append(src) + if src_path in go_list.get("TestGoFiles", []): + test_go_files.append(src) + if src_path in go_list.get("XTestGoFiles", []): + x_test_go_files.append(src) + if src_path in go_list.get("IgnoredGoFiles", []): + ignored_go_files.append(src) + if src_path in go_list.get("IgnoredOtherFiles", []): + ignored_other_files.append(src) + if _any_starts_with(go_list.get("EmbedFiles", []), src_path): + embed_files.append(src) + + name = go_list.get("Name", "") + imports = go_list.get("Imports", []) + test_imports = go_list.get("TestImports", []) + x_test_imports = go_list.get("XTestImports", []) + cgo_cflags = go_list.get("CgoCFLAGS", []) + cgo_cppflags = go_list.get("CgoCPPFLAGS", []) + + return GoListOut( + name = name, + imports = imports, + test_imports = test_imports, + x_test_imports = x_test_imports, + go_files = go_files, + h_files = h_files, + c_files = c_files, + cxx_files = cxx_files, + cgo_files = cgo_files, + s_files = s_files, + test_go_files = test_go_files, + x_test_go_files = x_test_go_files, + embed_files = embed_files, + cgo_cflags = cgo_cflags, + cgo_cppflags = cgo_cppflags, + ignored_go_files = ignored_go_files, + ignored_other_files = ignored_other_files, + ) + +def _any_starts_with(files: list[str], path: str): + for file in files: + if paths.starts_with(file, path): + return True + + return False diff --git a/prelude/go/go_stdlib.bzl b/prelude/go/go_stdlib.bzl new file mode 100644 index 0000000000000..afcf079448148 --- /dev/null +++ b/prelude/go/go_stdlib.bzl @@ -0,0 +1,90 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load("@prelude//cxx:target_sdk_version.bzl", "get_target_sdk_version_flags") +load(":packages.bzl", "GoStdlib") +load(":toolchain.bzl", "GoToolchainInfo", "evaluate_cgo_enabled", "get_toolchain_env_vars") + +def go_stdlib_impl(ctx: AnalysisContext) -> list[Provider]: + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + cgo_enabled = evaluate_cgo_enabled(go_toolchain, ctx.attrs._cgo_enabled) + tags = [] + go_toolchain.tags + linker_flags = [] + go_toolchain.linker_flags + assembler_flags = [] + go_toolchain.assembler_flags + compiler_flags = [] + go_toolchain.compiler_flags + compiler_flags += ["-buildid="] # Make builds reproducible. + + if ctx.attrs._asan: + compiler_flags += ["-asan"] + tags += ["asan"] + + env = get_toolchain_env_vars(go_toolchain) + env["GODEBUG"] = "installgoroot=all" + env["CGO_ENABLED"] = "1" if cgo_enabled else "0" + + cxx_toolchain = ctx.attrs._cxx_toolchain[CxxToolchainInfo] + if cgo_enabled and cxx_toolchain != None: + c_compiler = cxx_toolchain.c_compiler_info + cflags = cmd_args(c_compiler.compiler_flags, delimiter = "\t", absolute_prefix = "%cwd%/") + cflags.add(cmd_args(get_target_sdk_version_flags(ctx), delimiter = "\t")) + env["CC"] = cmd_args(c_compiler.compiler, delimiter = "\t", absolute_prefix = "%cwd%/") + env["CGO_CFLAGS"] = cflags + env["CGO_CPPFLAGS"] = cmd_args(c_compiler.preprocessor_flags, delimiter = "\t", absolute_prefix = "%cwd%/") + + importcfg = ctx.actions.declare_output("stdlib.importcfg") + importcfg_shared = ctx.actions.declare_output("stdlib_shared.importcfg") + stdlib_pkgdir = ctx.actions.declare_output("stdlib_pkgdir", dir = True) + stdlib_pkgdir_shared = ctx.actions.declare_output("stdlib_pkgdir_shared", dir = True) + + def build_variant(out: Artifact, shared: bool) -> cmd_args: + local_assembler_flags = [] + assembler_flags + local_compiler_flags = [] + compiler_flags + if shared: + local_assembler_flags += ["-shared"] + local_compiler_flags += ["-shared"] + return cmd_args([ + go_toolchain.go_wrapper, + ["--go", go_toolchain.go], + "install", + "-pkgdir", + out.as_output(), + cmd_args(["-asmflags=", cmd_args(local_assembler_flags, delimiter = " ")], delimiter = "") if local_assembler_flags else [], + cmd_args(["-gcflags=", cmd_args(local_compiler_flags, delimiter = " ")], delimiter = "") if local_compiler_flags else [], + cmd_args(["-ldflags=", cmd_args(linker_flags, delimiter = " ")], delimiter = "") if linker_flags else [], + ["-tags", ",".join(tags)] if tags else [], + ["-race"] if ctx.attrs._race else [], + "std", + ]) + + ctx.actions.run(build_variant(stdlib_pkgdir, False), env = env, category = "go_build_stdlib", identifier = "go_build_stdlib") + ctx.actions.run(build_variant(stdlib_pkgdir_shared, True), env = env, category = "go_build_stdlib", identifier = "go_build_stdlib_shared") + + ctx.actions.run( + [ + go_toolchain.gen_stdlib_importcfg, + ["--stdlib", stdlib_pkgdir], + ["--output", importcfg.as_output()], + ], + category = "go_gen_stdlib_importcfg", + identifier = "go_gen_stdlib_importcfg", + ) + + ctx.actions.run( + [ + go_toolchain.gen_stdlib_importcfg, + ["--stdlib", stdlib_pkgdir_shared], + ["--output", importcfg_shared.as_output()], + ], + category = "go_gen_stdlib_importcfg", + identifier = "go_gen_stdlib_importcfg_shared", + ) + + return [ + DefaultInfo(default_output = stdlib_pkgdir), + GoStdlib(pkgdir = stdlib_pkgdir, importcfg = importcfg, pkgdir_shared = stdlib_pkgdir_shared, importcfg_shared = importcfg_shared), + ] diff --git a/prelude/go/go_test.bzl b/prelude/go/go_test.bzl index 11984687e4298..64026ff62b773 100644 --- a/prelude/go/go_test.bzl +++ b/prelude/go/go_test.bzl @@ -9,42 +9,54 @@ load( "@prelude//linking:link_info.bzl", "LinkStyle", ) +load( + "@prelude//tests:re_utils.bzl", + "get_re_executors_from_props", +) load( "@prelude//utils:utils.bzl", "map_val", "value_or", ) load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") -load(":compile.bzl", "GoTestInfo", "compile", "get_filtered_srcs") -load(":coverage.bzl", "GoCoverageMode", "cover_srcs") +load(":compile.bzl", "GoTestInfo", "get_inherited_compile_pkgs") +load(":coverage.bzl", "GoCoverageMode") load(":link.bzl", "link") +load(":package_builder.bzl", "build_package") load(":packages.bzl", "go_attr_pkg_name") +load(":toolchain.bzl", "GoToolchainInfo", "evaluate_cgo_enabled") def _gen_test_main( ctx: AnalysisContext, pkg_name: str, coverage_mode: [GoCoverageMode, None], - coverage_vars: [cmd_args, None], + coverage_vars: dict[str, cmd_args], srcs: cmd_args) -> Artifact: """ Generate a `main.go` which calls tests from the given sources. """ output = ctx.actions.declare_output("main.go") - cmd = cmd_args() - cmd.add(ctx.attrs._testmaingen[RunInfo]) - if ctx.attrs.coverage_mode: - cmd.add(cmd_args(ctx.attrs.coverage_mode, format = "--cover-mode={}")) - cmd.add(cmd_args(output.as_output(), format = "--output={}")) - cmd.add(cmd_args(pkg_name, format = "--import-path={}")) + cmd = [] + cmd.append(ctx.attrs._testmaingen[RunInfo]) + + # if ctx.attrs.coverage_mode: + # cmd.append(cmd_args(ctx.attrs.coverage_mode, format = "--cover-mode={}")) + cmd.append(cmd_args(output.as_output(), format = "--output={}")) + cmd.append(cmd_args(pkg_name, format = "--import-path={}")) if coverage_mode != None: - cmd.add("--cover-mode", coverage_mode.value) - if coverage_vars != None: - cmd.add(coverage_vars) - cmd.add(srcs) - ctx.actions.run(cmd, category = "go_test_main_gen") + cmd.extend(["--cover-mode", coverage_mode.value]) + for _, vars in coverage_vars.items(): + cmd.append(vars) + cmd.append(srcs) + ctx.actions.run(cmd_args(cmd), category = "go_test_main_gen") return output +def is_subpackage_of(other_pkg_name: str, pkg_name: str) -> bool: + return pkg_name == other_pkg_name or other_pkg_name.startswith(pkg_name + "/") + def go_test_impl(ctx: AnalysisContext) -> list[Provider]: + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + deps = ctx.attrs.deps srcs = ctx.attrs.srcs pkg_name = go_attr_pkg_name(ctx) @@ -59,47 +71,69 @@ def go_test_impl(ctx: AnalysisContext) -> list[Provider]: # TODO: should we assert that pkg_name != None here? pkg_name = lib.pkg_name - srcs = get_filtered_srcs(ctx, srcs, tests = True) - # If coverage is enabled for this test, we need to preprocess the sources # with the Go cover tool. - coverage_mode = None - coverage_vars = None - if ctx.attrs.coverage_mode != None: - coverage_mode = GoCoverageMode(ctx.attrs.coverage_mode) - cov_res = cover_srcs(ctx, pkg_name, coverage_mode, srcs) - srcs = cov_res.srcs - coverage_vars = cov_res.variables + coverage_mode = GoCoverageMode(ctx.attrs._coverage_mode) if ctx.attrs._coverage_mode else None + coverage_vars = {} + pkgs = {} # Compile all tests into a package. - tests = compile( + tests, tests_pkg_info = build_package( ctx, pkg_name, - srcs, + main = False, + srcs = srcs, + package_root = ctx.attrs.package_root, deps = deps, - compile_flags = ctx.attrs.compiler_flags, + pkgs = pkgs, + compiler_flags = ctx.attrs.compiler_flags, + tags = ctx.attrs._tags, + coverage_mode = coverage_mode, + race = ctx.attrs._race, + asan = ctx.attrs._asan, + embedcfg = ctx.attrs.embedcfg, + tests = True, + cgo_enabled = evaluate_cgo_enabled(go_toolchain, ctx.attrs.cgo_enabled), ) + if coverage_mode != None: + coverage_vars[pkg_name] = tests.coverage_vars + + # Get all packages that are linked to the test (i.e. the entire dependency tree) + for name, pkg in get_inherited_compile_pkgs(deps).items(): + if ctx.label != None and is_subpackage_of(name, ctx.label.package): + coverage_vars[name] = pkg.coverage_vars + pkgs[name] = pkg + + pkgs[pkg_name] = tests + # Generate a main function which runs the tests and build that into another # package. - gen_main = _gen_test_main(ctx, pkg_name, coverage_mode, coverage_vars, srcs) - main = compile(ctx, "main", cmd_args(gen_main), pkgs = {pkg_name: tests}) + gen_main = _gen_test_main(ctx, pkg_name, coverage_mode, coverage_vars, tests.srcs_list) + main, _ = build_package(ctx, pkg_name + ".test", True, [gen_main], package_root = "", pkgs = pkgs, coverage_mode = coverage_mode, race = ctx.attrs._race, asan = ctx.attrs._asan, cgo_gen_dir_name = "cgo_gen_test_main") # Link the above into a Go binary. - (bin, runtime_files) = link( + (bin, runtime_files, external_debug_info) = link( ctx = ctx, main = main, - pkgs = {pkg_name: tests}, + pkgs = pkgs, deps = deps, link_style = value_or(map_val(LinkStyle, ctx.attrs.link_style), LinkStyle("static")), linker_flags = ctx.attrs.linker_flags, + race = ctx.attrs._race, + asan = ctx.attrs._asan, + external_linker_flags = ctx.attrs.external_linker_flags, ) - run_cmd = cmd_args(bin).hidden(runtime_files) - # As per v1, copy in resources next to binary. + copied_resources = [] for resource in ctx.attrs.resources: - run_cmd.hidden(ctx.actions.copy_file(resource.short_path, resource)) + copied_resources.append(ctx.actions.copy_file(resource.short_path, resource)) + + run_cmd = cmd_args(bin, hidden = [runtime_files, external_debug_info] + copied_resources) + + # Setup RE executors based on the `remote_execution` param. + re_executor, executor_overrides = get_re_executors_from_props(ctx) return inject_test_run_info( ctx, @@ -109,5 +143,16 @@ def go_test_impl(ctx: AnalysisContext) -> list[Provider]: env = ctx.attrs.env, labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, + default_executor = re_executor, + executor_overrides = executor_overrides, + # FIXME: Consider setting to true + run_from_project_root = re_executor != None, + use_project_relative_paths = re_executor != None, + ), + ) + [ + DefaultInfo( + default_output = bin, + other_outputs = [gen_main] + runtime_files + external_debug_info, ), - ) + [DefaultInfo(default_output = bin, other_outputs = [gen_main] + runtime_files)] + tests_pkg_info, + ] diff --git a/prelude/go/link.bzl b/prelude/go/link.bzl index 175602c1240d7..1b66de1176fd0 100644 --- a/prelude/go/link.bzl +++ b/prelude/go/link.bzl @@ -8,9 +8,12 @@ load("@prelude//cxx:cxx_library_utility.bzl", "cxx_inherited_link_info") load( "@prelude//cxx:cxx_link_utility.bzl", + "ExecutableSharedLibArguments", "executable_shared_lib_arguments", "make_link_args", ) +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load("@prelude//cxx:linker.bzl", "get_default_shared_library_name", "get_shared_library_name_linker_flags") load( "@prelude//linking:link_info.bzl", "LinkStyle", @@ -23,19 +26,19 @@ load( "merge_shared_libraries", "traverse_shared_library_info", ) +load("@prelude//linking:stamp_build_info.bzl", "stamp_build_info") load("@prelude//os_lookup:defs.bzl", "OsLookup") load( "@prelude//utils:utils.bzl", - "map_idx", + "filter_and_map_idx", ) load( ":packages.bzl", "GoPkg", # @Unused used as type + "make_importcfg", "merge_pkgs", - "pkg_artifacts", - "stdlib_pkg_artifacts", ) -load(":toolchain.bzl", "GoToolchainInfo", "get_toolchain_cmd_args") +load(":toolchain.bzl", "GoToolchainInfo", "get_toolchain_env_vars") # Provider wrapping packages used for linking. GoPkgLinkInfo = provider(fields = { @@ -43,8 +46,9 @@ GoPkgLinkInfo = provider(fields = { }) GoBuildMode = enum( - "executable", - "c_shared", + "executable", # non-pic executable + "c_shared", # pic C-shared library + "c_archive", # pic C-static library ) def _build_mode_param(mode: GoBuildMode) -> str: @@ -52,142 +56,173 @@ def _build_mode_param(mode: GoBuildMode) -> str: return "exe" if mode == GoBuildMode("c_shared"): return "c-shared" + if mode == GoBuildMode("c_archive"): + return "c-archive" fail("unexpected: {}", mode) def get_inherited_link_pkgs(deps: list[Dependency]) -> dict[str, GoPkg]: return merge_pkgs([d[GoPkgLinkInfo].pkgs for d in deps if GoPkgLinkInfo in d]) -# TODO(cjhopman): Is link_style a LibOutputStyle or a LinkStrategy here? Based on returning an empty thing for link_style != shared, -# it seems likely its intended to be LibOutputStyle, but it's called in places that are passing what appears to be a LinkStrategy. -def _process_shared_dependencies(ctx: AnalysisContext, artifact: Artifact, deps: list[Dependency], link_style: LinkStyle): +# TODO(cjhopman): Is link_style a LibOutputStyle or a LinkStrategy here? Based +# on returning an empty thing for link_style != shared, it seems likely its +# intended to be LibOutputStyle, but it's called in places that are passing what +# appears to be a LinkStrategy. +def _process_shared_dependencies( + ctx: AnalysisContext, + artifact: Artifact, + deps: list[Dependency], + link_style: LinkStyle) -> ExecutableSharedLibArguments: """ Provides files and linker args needed to for binaries with shared library linkage. - the runtime files needed to run binary linked with shared libraries - linker arguments for shared libraries """ if link_style != LinkStyle("shared"): - return ([], []) + return ExecutableSharedLibArguments() shlib_info = merge_shared_libraries( ctx.actions, - deps = filter(None, map_idx(SharedLibraryInfo, deps)), + deps = filter_and_map_idx(SharedLibraryInfo, deps), ) - shared_libs = {} - for name, shared_lib in traverse_shared_library_info(shlib_info).items(): - shared_libs[name] = shared_lib.lib + shared_libs = traverse_shared_library_info(shlib_info) - extra_link_args, runtime_files, _ = executable_shared_lib_arguments( - ctx.actions, - ctx.attrs._go_toolchain[GoToolchainInfo].cxx_toolchain_for_linking, + return executable_shared_lib_arguments( + ctx, + ctx.attrs._cxx_toolchain[CxxToolchainInfo], artifact, shared_libs, ) - return (runtime_files, extra_link_args) - def link( ctx: AnalysisContext, - main: Artifact, - pkgs: dict[str, Artifact] = {}, + main: GoPkg, + pkgs: dict[str, GoPkg] = {}, deps: list[Dependency] = [], build_mode: GoBuildMode = GoBuildMode("executable"), link_mode: [str, None] = None, link_style: LinkStyle = LinkStyle("static"), linker_flags: list[typing.Any] = [], external_linker_flags: list[typing.Any] = [], - shared: bool = False): + race: bool = False, + asan: bool = False): go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] if go_toolchain.env_go_os == "windows": executable_extension = ".exe" shared_extension = ".dll" + archive_extension = ".lib" else: executable_extension = "" shared_extension = ".so" - file_extension = shared_extension if build_mode == GoBuildMode("c_shared") else executable_extension - output = ctx.actions.declare_output(ctx.label.name + file_extension) - - cmd = get_toolchain_cmd_args(go_toolchain) + archive_extension = ".a" + + if build_mode == GoBuildMode("c_shared"): + file_extension = shared_extension + use_shared_code = True # PIC + elif build_mode == GoBuildMode("c_archive"): + file_extension = archive_extension + use_shared_code = True # PIC + else: # GoBuildMode("executable") + file_extension = executable_extension + use_shared_code = False # non-PIC + final_output_name = ctx.label.name + file_extension + output = ctx.actions.declare_output(ctx.label.name + "-tmp" + file_extension) + + cmd = cmd_args() cmd.add(go_toolchain.linker) - if shared: - cmd.add(go_toolchain.linker_flags_shared) - else: - cmd.add(go_toolchain.linker_flags_static) + cmd.add(go_toolchain.linker_flags) cmd.add("-o", output.as_output()) cmd.add("-buildmode=" + _build_mode_param(build_mode)) cmd.add("-buildid=") # Setting to a static buildid helps make the binary reproducible. + if race: + cmd.add("-race") + + if asan: + cmd.add("-asan") + # Add inherited Go pkgs to library search path. all_pkgs = merge_pkgs([ pkgs, - pkg_artifacts(get_inherited_link_pkgs(deps), shared = shared), - stdlib_pkg_artifacts(go_toolchain, shared = shared), + get_inherited_link_pkgs(deps), ]) - importcfg_content = [] - for name_, pkg_ in all_pkgs.items(): - # Hack: we use cmd_args get "artifact" valid path and write it to a file. - importcfg_content.append(cmd_args("packagefile ", name_, "=", pkg_, delimiter = "")) + identifier_prefix = ctx.label.name + "_" + _build_mode_param(build_mode) - importcfg = ctx.actions.declare_output("importcfg") - ctx.actions.write(importcfg.as_output(), importcfg_content) + importcfg = make_importcfg(ctx, identifier_prefix, all_pkgs, use_shared_code) cmd.add("-importcfg", importcfg) - cmd.hidden(all_pkgs.values()) - runtime_files, extra_link_args = _process_shared_dependencies(ctx, output, deps, link_style) + executable_args = _process_shared_dependencies(ctx, output, deps, link_style) if link_mode == None: - if go_toolchain.cxx_toolchain_for_linking != None: + if build_mode == GoBuildMode("c_shared"): link_mode = "external" - else: - link_mode = "internal" - cmd.add("-linkmode", link_mode) + if build_mode == GoBuildMode("c_archive"): + link_mode = "external" + + if link_mode != None: + cmd.add("-linkmode", link_mode) - if link_mode == "external": + cxx_toolchain = ctx.attrs._cxx_toolchain[CxxToolchainInfo] + if cxx_toolchain != None: is_win = ctx.attrs._exec_os_type[OsLookup].platform == "windows" - cxx_toolchain = go_toolchain.cxx_toolchain_for_linking # Gather external link args from deps. ext_links = get_link_args_for_strategy(ctx, cxx_inherited_link_info(deps), to_link_strategy(link_style)) ext_link_args_output = make_link_args( + ctx, ctx.actions, cxx_toolchain, [ext_links], ) - ext_link_args = cmd_args() - ext_link_args.add(cmd_args(extra_link_args, quote = "shell")) + ext_link_args = cmd_args(hidden = ext_link_args_output.hidden) + ext_link_args.add(cmd_args(executable_args.extra_link_args, quote = "shell")) ext_link_args.add(external_linker_flags) ext_link_args.add(ext_link_args_output.link_args) - ext_link_args.hidden(ext_link_args_output.hidden) + + if build_mode == GoBuildMode("c_shared") and go_toolchain.env_go_os != "windows": + soname = get_default_shared_library_name(cxx_toolchain.linker_info, ctx.label) + soname_flags = get_shared_library_name_linker_flags(cxx_toolchain.linker_info.type, soname) + ext_link_args.add(soname_flags) # Delegate to C++ linker... # TODO: It feels a bit inefficient to generate a wrapper file for every # link. Is there some way to etract the first arg of `RunInfo`? Or maybe - # we can generate te platform-specific stuff once and re-use? + # we can generate the platform-specific stuff once and re-use? cxx_link_cmd = cmd_args( [ cxx_toolchain.linker_info.linker, - cxx_toolchain.linker_info.linker_flags, - go_toolchain.external_linker_flags, ext_link_args, "%*" if is_win else "\"$@\"", ], delimiter = " ", ) linker_wrapper, _ = ctx.actions.write( - "__{}_cxx_link_wrapper__.{}".format(ctx.label.name, "bat" if is_win else "sh"), + "__{}_cxx_link_wrapper__.{}".format(identifier_prefix, "bat" if is_win else "sh"), ([] if is_win else ["#!/bin/sh"]) + [cxx_link_cmd], allow_args = True, is_executable = True, ) - cmd.add("-extld", linker_wrapper).hidden(cxx_link_cmd) + cmd.add("-extld", linker_wrapper, cmd_args(hidden = cxx_link_cmd)) + cmd.add("-extldflags", cmd_args( + cxx_toolchain.linker_info.linker_flags, + go_toolchain.external_linker_flags, + delimiter = " ", + quote = "shell", + )) cmd.add(linker_flags) - cmd.add(main) + cmd.add(main.pkg_shared if use_shared_code else main.pkg) + + env = get_toolchain_env_vars(go_toolchain) + + ctx.actions.run(cmd, env = env, category = "go_link", identifier = identifier_prefix) + + output = stamp_build_info(ctx, output) - ctx.actions.run(cmd, category = "go_link") + final_output = ctx.actions.copy_file(final_output_name, output) - return (output, runtime_files) + return (final_output, executable_args.runtime_files, executable_args.external_debug_info) diff --git a/prelude/go/package_builder.bzl b/prelude/go/package_builder.bzl new file mode 100644 index 0000000000000..558f3caa4beb8 --- /dev/null +++ b/prelude/go/package_builder.bzl @@ -0,0 +1,284 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//utils:utils.bzl", "dedupe_by_value") +load(":cgo_builder.bzl", "build_cgo") +load(":compile.bzl", "get_inherited_compile_pkgs", "infer_package_root") +load( + ":coverage.bzl", + "GoCoverageMode", # @Unused used as type +) +load(":go_list.bzl", "go_list", "parse_go_list_out") +load(":packages.bzl", "GoPackageInfo", "GoPkg", "make_importcfg", "merge_pkgs") +load(":toolchain.bzl", "GoToolchainInfo", "get_toolchain_env_vars") + +def build_package( + ctx: AnalysisContext, + pkg_name: str, + main: bool, + srcs: list[Artifact], + package_root: str | None, + pkgs: dict[str, GoPkg] = {}, + deps: list[Dependency] = [], + compiler_flags: list[str] = [], + assembler_flags: list[str] = [], + tags: list[str] = [], + race: bool = False, + asan: bool = False, + cgo_enabled: bool = False, + coverage_mode: GoCoverageMode | None = None, + embedcfg: Artifact | None = None, + tests: bool = False, + cgo_gen_dir_name: str = "cgo_gen") -> (GoPkg, GoPackageInfo): + if race and coverage_mode not in [None, GoCoverageMode("atomic")]: + fail("`coverage_mode` must be `atomic` when `race=True`") + + out = ctx.actions.declare_output(paths.basename(pkg_name) + "_non-shared.a") + out_shared = ctx.actions.declare_output(paths.basename(pkg_name) + "_shared.a") + + cgo_gen_dir = ctx.actions.declare_output(cgo_gen_dir_name, dir = True) + + srcs = dedupe_by_value(srcs) + + package_root = package_root if package_root != None else infer_package_root(srcs) + + go_list_out = go_list(ctx, pkg_name, srcs, package_root, tags, cgo_enabled, with_tests = tests, asan = asan) + + srcs_list_argsfile = ctx.actions.declare_output(paths.basename(pkg_name) + "_srcs_list.go_package_argsfile") + coverage_vars_argsfile = ctx.actions.declare_output(paths.basename(pkg_name) + "_coverage_vars.go_package_argsfile") + dynamic_outputs = [out, out_shared, srcs_list_argsfile, coverage_vars_argsfile, cgo_gen_dir] + + all_pkgs = merge_pkgs([ + pkgs, + get_inherited_compile_pkgs(deps), + ]) + + def f(ctx: AnalysisContext, artifacts, outputs, go_list_out = go_list_out): + go_list = parse_go_list_out(srcs, package_root, artifacts[go_list_out]) + + # Generate CGO and C sources. + cgo_go_files, cgo_o_files, cgo_gen_tmp_dir = build_cgo(ctx, go_list.cgo_files, go_list.h_files, go_list.c_files + go_list.cxx_files, go_list.cgo_cflags, go_list.cgo_cppflags) + ctx.actions.copy_dir(outputs[cgo_gen_dir], cgo_gen_tmp_dir) + + src_list_for_argsfile = go_list.go_files + (go_list.test_go_files + go_list.x_test_go_files if tests else []) + ctx.actions.write(outputs[srcs_list_argsfile], cmd_args(src_list_for_argsfile, "")) + + go_files = go_list.go_files + cgo_go_files + covered_go_files, coverage_vars_out = _cover(ctx, pkg_name, go_files, coverage_mode) + ctx.actions.write(outputs[coverage_vars_argsfile], coverage_vars_out) + + symabis = _symabis(ctx, pkg_name, main, go_list.s_files, go_list.h_files, assembler_flags) + + def build_variant(shared: bool) -> Artifact: + suffix = ",shared" if shared else ",non-shared" # suffix to make artifacts unique + go_files_to_compile = covered_go_files + ((go_list.test_go_files + go_list.x_test_go_files) if tests else []) + importcfg = make_importcfg(ctx, pkg_name, all_pkgs, shared) + go_a_file, asmhdr = _compile(ctx, pkg_name, main, go_files_to_compile, importcfg, compiler_flags, shared, race, asan, suffix, embedcfg, go_list.embed_files, symabis, len(go_list.s_files) > 0) + + asm_o_files = _asssembly(ctx, pkg_name, main, go_list.s_files, go_list.h_files, asmhdr, assembler_flags, shared, suffix) + + return _pack(ctx, pkg_name, go_a_file, cgo_o_files + asm_o_files, suffix) + + ctx.actions.copy_file(outputs[out], build_variant(shared = False)) + ctx.actions.copy_file(outputs[out_shared], build_variant(shared = True)) + + ctx.actions.dynamic_output(dynamic = [go_list_out], inputs = [], outputs = [o.as_output() for o in dynamic_outputs], f = f) + + return GoPkg( + pkg = out, + pkg_shared = out_shared, + coverage_vars = cmd_args(coverage_vars_argsfile, format = "@{}"), + srcs_list = cmd_args(srcs_list_argsfile, format = "@{}", hidden = srcs), + ), GoPackageInfo( + build_out = out, + cgo_gen_dir = cgo_gen_dir, + package_name = pkg_name, + package_root = package_root, + go_list_out = go_list_out, + ) + +def _compile( + ctx: AnalysisContext, + pkg_name: str, + main: bool, + go_srcs: list[Artifact], + importcfg: cmd_args, + compiler_flags: list[str], + shared: bool, + race: bool, + asan: bool, + suffix: str, + embedcfg: Artifact | None = None, + embed_files: list[Artifact] = [], + symabis: Artifact | None = None, + gen_asmhdr: bool = False) -> (Artifact, Artifact | None): + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + + env = get_toolchain_env_vars(go_toolchain) + out = ctx.actions.declare_output("go_compile_out{}.a".format(suffix)) + + if len(go_srcs) == 0: + ctx.actions.write(out.as_output(), "") + return out, None + + asmhdr = ctx.actions.declare_output("__asmhdr__{}/go_asm.h".format(suffix)) if gen_asmhdr else None + + # Use argsfile to avoid command length limit on Windows + srcs_argsfile = ctx.actions.write(paths.basename(pkg_name) + suffix + "_srcs.go_package_argsfile", go_srcs) + + compile_cmd = cmd_args( + [ + go_toolchain.go_wrapper, + ["--go", go_toolchain.compiler], + "--", + go_toolchain.compiler_flags, + compiler_flags, + "-buildid=", + "-nolocalimports", + ["-trimpath", "%cwd%"], + ["-p", "main" if main else pkg_name], + ["-importcfg", importcfg], + ["-o", out.as_output()], + ["-race"] if race else [], + ["-asan"] if asan else [], + ["-shared"] if shared else [], + ["-embedcfg", embedcfg] if embedcfg else [], + ["-symabis", symabis] if symabis else [], + ["-asmhdr", asmhdr.as_output()] if asmhdr else [], + cmd_args(srcs_argsfile, format = "@{}", hidden = go_srcs), + ], + hidden = embed_files, # files and directories should be available for embedding + ) + + identifier = paths.basename(pkg_name) + ctx.actions.run(compile_cmd, env = env, category = "go_compile", identifier = identifier + suffix) + + return (out, asmhdr) + +def _symabis(ctx: AnalysisContext, pkg_name: str, main: bool, s_files: list[Artifact], h_files: list[Artifact], assembler_flags: list[str]) -> Artifact | None: + if len(s_files) == 0: + return None + + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + env = get_toolchain_env_vars(go_toolchain) + + # we have to supply "go_asm.h" with any content to make asm tool happy + # its content doesn't matter if -gensymabis provided + # https://github.com/golang/go/blob/3f8f929d60a90c4e4e2b07c8d1972166c1a783b1/src/cmd/go/internal/work/gc.go#L441-L443 + fake_asmhdr = ctx.actions.write("__fake_asmhdr__/go_asm.h", "") + symabis = ctx.actions.declare_output("symabis") + asm_cmd = [ + go_toolchain.assembler, + go_toolchain.assembler_flags, + assembler_flags, + _asm_args(ctx, pkg_name, main, False), # flag -shared doesn't matter for symabis + "-gensymabis", + ["-o", symabis.as_output()], + ["-I", cmd_args(fake_asmhdr, parent = 1)], + ["-I", cmd_args(h_files, parent = 1)] if h_files else [], + s_files, + ] + + identifier = paths.basename(pkg_name) + ctx.actions.run(asm_cmd, env = env, category = "go_symabis", identifier = identifier) + + return symabis + +def _asssembly(ctx: AnalysisContext, pkg_name: str, main: bool, s_files: list[Artifact], h_files: list[Artifact], asmhdr: Artifact | None, assembler_flags: list[str], shared: bool, suffix: str) -> list[Artifact]: + if len(s_files) == 0: + return [] + + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + env = get_toolchain_env_vars(go_toolchain) + + o_files = [] + identifier = paths.basename(pkg_name) + for s_file in s_files: + o_file = ctx.actions.declare_output(s_file.short_path + suffix + ".o") + o_files.append(o_file) + + asm_cmd = [ + go_toolchain.assembler, + go_toolchain.assembler_flags, + assembler_flags, + _asm_args(ctx, pkg_name, main, shared), + ["-o", o_file.as_output()], + ["-I", cmd_args(asmhdr, parent = 1)] if asmhdr else [], # can it actually be None? + ["-I", cmd_args(h_files, parent = 1)] if h_files else [], + s_file, + ] + + ctx.actions.run(asm_cmd, env = env, category = "go_assembly", identifier = identifier + "/" + s_file.short_path + suffix) + + return o_files + +def _pack(ctx: AnalysisContext, pkg_name: str, a_file: Artifact, o_files: list[Artifact], suffix: str) -> Artifact: + if len(o_files) == 0: + # no need to repack .a file, if there are no .o files + return a_file + + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + env = get_toolchain_env_vars(go_toolchain) + + pkg_file = ctx.actions.declare_output("pkg{}.a".format(suffix)) + + pack_cmd = [ + go_toolchain.packer, + "c", + pkg_file.as_output(), + a_file, + o_files, + ] + + identifier = paths.basename(pkg_name) + ctx.actions.run(pack_cmd, env = env, category = "go_pack", identifier = identifier + suffix) + + return pkg_file + +def _asm_args(ctx: AnalysisContext, pkg_name: str, main: bool, shared: bool): + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + return [ + ["-p", "main" if main else pkg_name], + ["-I", cmd_args(go_toolchain.env_go_root, absolute_suffix = "/pkg/include")], + ["-D", "GOOS_" + go_toolchain.env_go_os] if go_toolchain.env_go_os else [], + ["-D", "GOARCH_" + go_toolchain.env_go_arch] if go_toolchain.env_go_arch else [], + ["-shared"] if shared else [], + ] + +def _cover(ctx: AnalysisContext, pkg_name: str, go_files: list[Artifact], coverage_mode: GoCoverageMode | None) -> (list[Artifact], str | cmd_args): + if coverage_mode == None: + return go_files, "" + + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + env = get_toolchain_env_vars(go_toolchain) + covered_files = [] + coverage_vars = {} + for go_file in go_files: + covered_file = ctx.actions.declare_output("with_coverage", go_file.short_path) + covered_files.append(covered_file) + + var = "Var_" + sha256(pkg_name + "::" + go_file.short_path) + coverage_vars[var] = go_file.short_path + + cover_cmd = [ + go_toolchain.cover, + ["-mode", coverage_mode.value], + ["-var", var], + ["-o", covered_file.as_output()], + go_file, + ] + + ctx.actions.run(cover_cmd, env = env, category = "go_cover", identifier = paths.basename(pkg_name) + "/" + go_file.short_path) + + coverage_vars_out = "" + if len(coverage_vars) > 0: + # convert coverage_vars to argsfile for compatibility with python implementation + cover_pkg = "{}:{}".format(pkg_name, ",".join(["{}={}".format(var, name) for var, name in coverage_vars.items()])) + coverage_vars_out = cmd_args("--cover-pkgs", cover_pkg) + + return covered_files, coverage_vars_out diff --git a/prelude/go/packages.bzl b/prelude/go/packages.bzl index 630a4f2638717..b3148e055c407 100644 --- a/prelude/go/packages.bzl +++ b/prelude/go/packages.bzl @@ -5,15 +5,41 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//:artifacts.bzl", "ArtifactGroupInfo") +load("@prelude//cxx:headers.bzl", "prepare_headers") +load( + "@prelude//cxx:preprocessor.bzl", + "CPreprocessor", + "CPreprocessorArgs", +) load("@prelude//go:toolchain.bzl", "GoToolchainInfo") load("@prelude//utils:utils.bzl", "value_or") +# Information about a package for GOPACKAGESDRIVER +GoPackageInfo = provider( + fields = { + "build_out": provider_field(Artifact), + "cgo_gen_dir": provider_field(Artifact), + "go_list_out": provider_field(Artifact), + "package_name": provider_field(str), + "package_root": provider_field(str), + }, +) + GoPkg = record( - # Built w/ `-shared`. - shared = field(Artifact), - # Built w/o `-shared`. - static = field(Artifact), + # We have to produce allways shared (PIC) and non-shared (non-PIC) archives + pkg = field(Artifact), + pkg_shared = field(Artifact), + coverage_vars = field(cmd_args), + srcs_list = field(cmd_args), +) + +GoStdlib = provider( + fields = { + "importcfg": provider_field(Artifact), + "importcfg_shared": provider_field(Artifact), + "pkgdir": provider_field(Artifact), + "pkgdir_shared": provider_field(Artifact), + }, ) def go_attr_pkg_name(ctx: AnalysisContext) -> str: @@ -22,7 +48,7 @@ def go_attr_pkg_name(ctx: AnalysisContext) -> str: """ return value_or(ctx.attrs.package_name, ctx.label.package) -def merge_pkgs(pkgss: list[dict[str, typing.Any]]) -> dict[str, typing.Any]: +def merge_pkgs(pkgss: list[dict[str, GoPkg]]) -> dict[str, GoPkg]: """ Merge mappings of packages into a single mapping, throwing an error on conflicts. @@ -31,42 +57,62 @@ def merge_pkgs(pkgss: list[dict[str, typing.Any]]) -> dict[str, typing.Any]: all_pkgs = {} for pkgs in pkgss: - for name, path in pkgs.items(): - if name in pkgs and pkgs[name] != path: - fail("conflict for package {!r}: {} and {}".format(name, path, all_pkgs[name])) - all_pkgs[name] = path + for name, pkg in pkgs.items(): + if name in all_pkgs and all_pkgs[name] != pkg: + fail("conflict for package {!r}: {} and {}".format(name, pkg, all_pkgs[name])) + all_pkgs[name] = pkg return all_pkgs -def pkg_artifacts(pkgs: dict[str, GoPkg], shared: bool = False) -> dict[str, Artifact]: +def pkg_artifacts(pkgs: dict[str, GoPkg], shared: bool) -> dict[str, Artifact]: """ Return a map package name to a `shared` or `static` package artifact. """ return { - name: pkg.shared if shared else pkg.static + name: pkg.pkg_shared if shared else pkg.pkg for name, pkg in pkgs.items() } -def stdlib_pkg_artifacts(toolchain: GoToolchainInfo, shared: bool = False) -> dict[str, Artifact]: - """ - Return a map package name to a `shared` or `static` package artifact of stdlib. - """ +def make_importcfg( + ctx: AnalysisContext, + prefix_name: str, + own_pkgs: dict[str, GoPkg], + shared: bool) -> cmd_args: + go_toolchain = ctx.attrs._go_toolchain[GoToolchainInfo] + stdlib = ctx.attrs._go_stdlib[GoStdlib] + suffix = "__shared" if shared else "" # suffix to make artifacts unique - prebuilt_stdlib = toolchain.prebuilt_stdlib_shared if shared else toolchain.prebuilt_stdlib - stdlib_pkgs = prebuilt_stdlib[ArtifactGroupInfo].artifacts + content = [] + pkg_artifacts_map = pkg_artifacts(own_pkgs, shared) + for name_, pkg_ in pkg_artifacts_map.items(): + # Hack: we use cmd_args get "artifact" valid path and write it to a file. + content.append(cmd_args("packagefile ", name_, "=", pkg_, delimiter = "")) - if len(stdlib_pkgs) == 0: - fail("Stdlib for current platfrom is missing from toolchain.") + own_importcfg = ctx.actions.declare_output("{}{}.importcfg".format(prefix_name, suffix)) + ctx.actions.write(own_importcfg, content) - pkgs = {} - for pkg in stdlib_pkgs: - # remove first directory like `pgk` - _, _, temp_path = pkg.short_path.partition("/") + final_importcfg = ctx.actions.declare_output("{}{}.final.importcfg".format(prefix_name, suffix)) + ctx.actions.run( + [ + go_toolchain.concat_files, + "--output", + final_importcfg.as_output(), + stdlib.importcfg_shared if shared else stdlib.importcfg, + own_importcfg, + ], + category = "concat_importcfgs", + identifier = prefix_name + suffix, + ) - # remove second directory like `darwin_amd64` - # now we have name like `net/http.a` - _, _, pkg_relpath = temp_path.partition("/") - name = pkg_relpath.removesuffix(".a") # like `net/http` - pkgs[name] = pkg + return cmd_args(final_importcfg, hidden = [stdlib.pkgdir_shared if shared else stdlib.pkgdir, pkg_artifacts_map.values()]) - return pkgs +# Return "_cgo_export.h" to expose exported C declarations to non-Go rules +def cgo_exported_preprocessor(ctx: AnalysisContext, pkg_info: GoPackageInfo) -> CPreprocessor: + return CPreprocessor(args = CPreprocessorArgs(args = [ + "-I", + prepare_headers( + ctx, + {"{}/{}.h".format(ctx.label.package, ctx.label.name): pkg_info.cgo_gen_dir.project("_cgo_export.h")}, + "cgo-exported-headers", + ).include_path, + ])) diff --git a/prelude/go/tags/constraints/BUCK.v2 b/prelude/go/tags/constraints/BUCK.v2 new file mode 100644 index 0000000000000..38f5c36f82a60 --- /dev/null +++ b/prelude/go/tags/constraints/BUCK.v2 @@ -0,0 +1,8 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load(":defs.bzl", "generate_tag_constraints") + +oncall("build_infra") + +source_listing() + +generate_tag_constraints() diff --git a/prelude/go/tags/constraints/defs.bzl b/prelude/go/tags/constraints/defs.bzl new file mode 100644 index 0000000000000..92923c5f7a289 --- /dev/null +++ b/prelude/go/tags/constraints/defs.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:native.bzl", "native") +load("@prelude//go/transitions:tags_helper.bzl", "allowed_tags") + +def generate_tag_constraints(): + for tag in allowed_tags: + setting_name = "setting__" + tag + native.constraint_setting( + name = setting_name, + visibility = ["PUBLIC"], + ) + + native.constraint_value( + name = tag, + constraint_setting = ":" + setting_name, + visibility = ["PUBLIC"], + ) diff --git a/prelude/go/toolchain.bzl b/prelude/go/toolchain.bzl index 73d7892428423..97a9c691d5fae 100644 --- a/prelude/go/toolchain.bzl +++ b/prelude/go/toolchain.bzl @@ -8,51 +8,57 @@ GoToolchainInfo = provider( # @unsorted-dict-items fields = { - "assembler": provider_field(typing.Any, default = None), - "cgo": provider_field(typing.Any, default = None), - "cgo_wrapper": provider_field(typing.Any, default = None), - "compile_wrapper": provider_field(typing.Any, default = None), - "compiler": provider_field(typing.Any, default = None), - "compiler_flags_shared": provider_field(typing.Any, default = None), - "compiler_flags_static": provider_field(typing.Any, default = None), - "cover": provider_field(typing.Any, default = None), - "cover_srcs": provider_field(typing.Any, default = None), - "cxx_toolchain_for_linking": provider_field(typing.Any, default = None), - "env_go_arch": provider_field(typing.Any, default = None), - "env_go_os": provider_field(typing.Any, default = None), - "env_go_arm": provider_field(typing.Any, default = None), + "assembler": provider_field(RunInfo), + "assembler_flags": provider_field(typing.Any, default = None), + "c_compiler_flags": provider_field(typing.Any, default = None), + "cgo": provider_field(RunInfo), + "cgo_wrapper": provider_field(RunInfo), + "gen_stdlib_importcfg": provider_field(RunInfo), + "go_wrapper": provider_field(RunInfo), + "compiler": provider_field(RunInfo), + "compiler_flags": provider_field(typing.Any, default = None), + "concat_files": provider_field(RunInfo), + "cover": provider_field(RunInfo), + "default_cgo_enabled": provider_field(bool, default = False), + "env_go_arch": provider_field(str), + "env_go_os": provider_field(str), + "env_go_arm": provider_field(str | None, default = None), "env_go_root": provider_field(typing.Any, default = None), + "env_go_debug": provider_field(dict[str, str], default = {}), "external_linker_flags": provider_field(typing.Any, default = None), - "filter_srcs": provider_field(typing.Any, default = None), - "go": provider_field(typing.Any, default = None), - "linker": provider_field(typing.Any, default = None), - "linker_flags_shared": provider_field(typing.Any, default = None), - "linker_flags_static": provider_field(typing.Any, default = None), - "packer": provider_field(typing.Any, default = None), - "prebuilt_stdlib": provider_field(typing.Any, default = None), - "prebuilt_stdlib_shared": provider_field(typing.Any, default = None), - "tags": provider_field(typing.Any, default = None), + "go": provider_field(RunInfo), + "linker": provider_field(RunInfo), + "linker_flags": provider_field(typing.Any, default = None), + "packer": provider_field(RunInfo), + "tags": provider_field(list[str], default = []), }, ) -def get_toolchain_cmd_args(toolchain: GoToolchainInfo, go_root = True, force_disable_cgo = False) -> cmd_args: - cmd = cmd_args("env") - if toolchain.env_go_arch != None: - cmd.add("GOARCH={}".format(toolchain.env_go_arch)) - if toolchain.env_go_os != None: - cmd.add("GOOS={}".format(toolchain.env_go_os)) +def get_toolchain_env_vars(toolchain: GoToolchainInfo) -> dict[str, str | cmd_args]: + env = { + "GOARCH": toolchain.env_go_arch, + # opt-out from Go1.20 coverage redesign + "GOEXPERIMENT": "nocoverageredesign", + "GOOS": toolchain.env_go_os, + } + if toolchain.env_go_arm != None: - cmd.add("GOARM={}".format(toolchain.env_go_arm)) - if go_root and toolchain.env_go_root != None: - cmd.add(cmd_args(toolchain.env_go_root, format = "GOROOT={}")) + env["GOARM"] = toolchain.env_go_arm + if toolchain.env_go_root != None: + env["GOROOT"] = toolchain.env_go_root + if toolchain.env_go_debug: + godebug = ",".join(["{}={}".format(k, v) for k, v in toolchain.env_go_debug.items()]) + env["GODEBUG"] = godebug + + return env + +# Sets default value of cgo_enabled attribute based on default_cgo_enabled attribute of GoToolchainInfo +def evaluate_cgo_enabled(toolchain: GoToolchainInfo, cgo_enabled: [bool, None], override_cgo_enabled: [bool, None] = None) -> bool: + if override_cgo_enabled != None: + return override_cgo_enabled - if force_disable_cgo: - cmd.add("CGO_ENABLED=0") - else: - # CGO is enabled by default for native compilation, but we need to set it - # explicitly for cross-builds: - # https://go-review.googlesource.com/c/go/+/12603/2/src/cmd/cgo/doc.go - if toolchain.cgo != None: - cmd.add("CGO_ENABLED=1") + if cgo_enabled != None: + return cgo_enabled - return cmd + # Sadly we can't add a check if cxx_toolchain available, because it's always set even when it doesn't make sense + return toolchain.default_cgo_enabled diff --git a/prelude/go/tools/BUCK b/prelude/go/tools/BUCK deleted file mode 100644 index b7499e98f9444..0000000000000 --- a/prelude/go/tools/BUCK +++ /dev/null @@ -1,35 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "compile_wrapper", - main = "compile_wrapper.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "cover_srcs", - main = "cover_srcs.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "filter_srcs", - main = "filter_srcs.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "cgo_wrapper", - main = "cgo_wrapper.py", - visibility = ["PUBLIC"], -) - -prelude.go_binary( - name = "testmaingen", - srcs = [ - "testmaingen.go", - ], - visibility = [ - "PUBLIC", - ], -) diff --git a/prelude/go/tools/BUCK.v2 b/prelude/go/tools/BUCK.v2 new file mode 100644 index 0000000000000..b83c397be2afd --- /dev/null +++ b/prelude/go/tools/BUCK.v2 @@ -0,0 +1,30 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "cgo_wrapper", + main = "cgo_wrapper.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "gen_stdlib_importcfg", + main = "gen_stdlib_importcfg.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "go_wrapper_py", + main = "go_wrapper.py", + visibility = ["PUBLIC"], +) + +prelude.go_stdlib( + name = "stdlib", + visibility = ["PUBLIC"], +) diff --git a/prelude/go/tools/cgo_wrapper.py b/prelude/go/tools/cgo_wrapper.py index 8c5ba97626093..44f98ab6354a7 100644 --- a/prelude/go/tools/cgo_wrapper.py +++ b/prelude/go/tools/cgo_wrapper.py @@ -10,10 +10,8 @@ import argparse import os -import pipes import subprocess import sys -import tempfile from pathlib import Path @@ -21,33 +19,20 @@ def main(argv): parser = argparse.ArgumentParser(fromfile_prefix_chars="@") parser.add_argument("--cgo", action="append", default=[]) parser.add_argument("--output", required=True, type=Path) - parser.add_argument("--cpp", action="append", default=[]) - parser.add_argument("--env-cc", action="append", default=[]) - parser.add_argument("--env-ldflags", action="append", default=[]) parser.add_argument("srcs", type=Path, nargs="*") args = parser.parse_args(argv[1:]) output = args.output.resolve(strict=False) + # the only reason we need this whapper is to create `-objdir`, + # because neither `go tool cgo` nor buck can create it. os.makedirs(output, exist_ok=True) env = os.environ.copy() - env["CC"] = " ".join(args.env_cc) - env["CGO_LDFLAGS"] = " ".join(args.env_ldflags) cmd = [] cmd.extend(args.cgo) - # cmd.append("-importpath={}") - # cmd.append("-srcdir={}") cmd.append(f"-objdir={output}") - # cmd.append(cgoCompilerFlags) cmd.append("--") - # cmd.append(cxxCompilerFlags) - - with tempfile.NamedTemporaryFile("w", delete=False) as argsfile: - for arg in args.cpp: - print(pipes.quote(arg), file=argsfile) - argsfile.flush() - cmd.append("@" + argsfile.name) cmd.extend(args.srcs) return subprocess.call(cmd, env=env) diff --git a/prelude/go/tools/compile_wrapper.py b/prelude/go/tools/compile_wrapper.py deleted file mode 100755 index 8abb145e33627..0000000000000 --- a/prelude/go/tools/compile_wrapper.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -""" -Compile the given Go sources into a Go package. - -Example: - - $ ./compile_wrapper.py \ - --compiler compile \ - --assembler assemble \ - --output srcs.txt src/dir/ - -""" - -# pyre-unsafe - -import argparse -import contextlib -import os -import subprocess -import sys -import tempfile -from pathlib import Path -from typing import List - - -def _call_or_exit(cmd: List[str]): - ret = subprocess.call(cmd) - if ret != 0: - sys.exit(ret) - - -def _compile(compile_prefix: List[str], output: Path, srcs: List[Path]): - cmd = [] - cmd.extend(compile_prefix) - cmd.append("-trimpath={}".format(os.getcwd())) - cmd.append("-o") - cmd.append(output) - cmd.extend(srcs) - _call_or_exit(cmd) - - -def _pack(pack_prefix: List[str], output: Path, items: List[Path]): - cmd = [] - cmd.extend(pack_prefix) - cmd.append("r") - cmd.append(output) - cmd.extend(items) - _call_or_exit(cmd) - - -def main(argv): - parser = argparse.ArgumentParser(fromfile_prefix_chars="@") - parser.add_argument("--compiler", action="append", default=[]) - parser.add_argument("--assembler", action="append", default=[]) - parser.add_argument("--packer", action="append", default=[]) - parser.add_argument("--embedcfg", type=Path, default=None) - parser.add_argument("--output", required=True, type=Path) - parser.add_argument("srcs", type=Path, nargs="*") - args = parser.parse_args(argv[1:]) - - # If there's no srcs, just leave an empty file. - if not args.srcs: - args.output.touch() - return - - # go:embed does not parse symlinks, so following the links to the real paths - real_srcs = [s.resolve() for s in args.srcs] - - go_files = [s for s in real_srcs if s.suffix == ".go"] - s_files = [s for s in real_srcs if s.suffix == ".s"] - o_files = [s for s in real_srcs if s.suffix == ".o"] - - with contextlib.ExitStack() as stack: - - asmhdr_dir = None - - assemble_prefix = [] - assemble_prefix.extend(args.assembler) - - if go_files: - compile_prefix = [] - compile_prefix.extend(args.compiler) - - # If we have assembly files, generate the symabi file to compile - # against, and the asm header. - if s_files: - asmhdr_dir = stack.push(tempfile.TemporaryDirectory()) - - asmhdr = Path(asmhdr_dir.name) / "go_asm.h" - asmhdr.touch() - compile_prefix.extend(["-asmhdr", asmhdr]) - assemble_prefix.extend(["-I", asmhdr_dir.name]) - assemble_prefix.extend( - ["-I", os.path.join(os.environ["GOROOT"], "pkg", "include")] - ) - assemble_prefix.extend(["-D", f"GOOS_{os.environ['GOOS']}"]) - assemble_prefix.extend(["-D", f"GOARCH_{os.environ['GOARCH']}"]) - if "GOAMD64" in os.environ and os.environ["GOARCH"] == "amd64": - assemble_prefix.extend(["-D", f"GOAMD64_{os.environ['GOAMD64']}"]) - - # Note: at this point go_asm.h is empty, but that's OK. As per the Go compiler: - # https://github.com/golang/go/blob/3f8f929d60a90c4e4e2b07c8d1972166c1a783b1/src/cmd/go/internal/work/gc.go#L441-L443 - symabis = args.output.with_suffix(".symabis") - _compile(assemble_prefix + ["-gensymabis"], symabis, s_files) - compile_prefix.extend(["-symabis", symabis]) - - if args.embedcfg is not None: - compile_prefix.extend( - [ - "-embedcfg", - args.embedcfg, - ] - ) - - # This will create go_asm.h - _compile(compile_prefix, args.output, go_files) - - else: - args.output.touch() - - # If there are assembly files, assemble them to an object and add into the - # output archive. - if s_files: - s_object = args.output.with_suffix(".o") - _compile(assemble_prefix, s_object, s_files) - o_files.append(s_object) - - if o_files: - _pack(args.packer, args.output, o_files) - - -sys.exit(main(sys.argv)) diff --git a/prelude/go/tools/cover_srcs.py b/prelude/go/tools/cover_srcs.py deleted file mode 100644 index 4dcaf2cc51b79..0000000000000 --- a/prelude/go/tools/cover_srcs.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -""" -Run `go cover` on non-`_test.go` input sources. -""" - -# pyre-unsafe - -import argparse -import hashlib -import subprocess -import sys -from pathlib import Path - - -def _var(pkg_name, src): - return "Var_" + hashlib.md5(f"{pkg_name}::{src}".encode("utf-8")).hexdigest() - - -def main(argv): - parser = argparse.ArgumentParser(fromfile_prefix_chars="@") - parser.add_argument("--cover", type=Path, required=True) - parser.add_argument("--pkg-name", type=str, required=True) - parser.add_argument("--coverage-mode", type=str, required=True) - parser.add_argument("--covered-srcs-dir", type=Path, required=True) - parser.add_argument("--out-srcs-argsfile", type=Path, required=True) - parser.add_argument("--coverage-var-argsfile", type=Path, required=True) - parser.add_argument("srcs", nargs="*", type=Path) - args = parser.parse_args(argv[1:]) - - out_srcs = [] - coverage_vars = {} - - args.covered_srcs_dir.mkdir(parents=True) - - for src in args.srcs: - if src.name.endswith("_test.go"): - out_srcs.append(src) - else: - var = _var(args.pkg_name, src) - covered_src = args.covered_srcs_dir / src - covered_src.parent.mkdir(parents=True, exist_ok=True) - subprocess.check_call( - [ - args.cover, - "-mode", - args.coverage_mode, - "-var", - var, - "-o", - covered_src, - src, - ] - ) - # we need just the source name for the --cover-pkgs argument - coverage_vars[var] = src.name - out_srcs.append(covered_src) - - with open(args.out_srcs_argsfile, mode="w") as f: - for src in out_srcs: - print(src, file=f) - - with open(args.coverage_var_argsfile, mode="w") as f: - if coverage_vars: - print("--cover-pkgs", file=f) - print( - "{}:{}".format( - args.pkg_name, - ",".join([f"{var}={name}" for var, name in coverage_vars.items()]), - ), - file=f, - ) - - -sys.exit(main(sys.argv)) diff --git a/prelude/go/tools/filter_srcs.py b/prelude/go/tools/filter_srcs.py deleted file mode 100755 index a242e981da128..0000000000000 --- a/prelude/go/tools/filter_srcs.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -""" -Run on a directory of Go source files and print out a list of srcs that should -be compiled. - -Example: - - $ ./filter_srcs.py --output srcs.txt src/dir/ - -""" - -# pyre-unsafe - -import argparse -import json -import os -import subprocess -import sys -import tempfile -from pathlib import Path - - -def main(argv): - parser = argparse.ArgumentParser() - parser.add_argument("--go", default="go", type=Path) - parser.add_argument("--tests", action="store_true") - parser.add_argument("--tags", default="") - parser.add_argument("--output", type=argparse.FileType("w"), default=sys.stdout) - parser.add_argument("srcdir", type=Path) - args = parser.parse_args(argv[1:]) - - # Find all source sub-dirs, which we'll need to run `go list` from. - roots = set() - for root, _dirs, _files in os.walk(args.srcdir): - roots.add(root) - - # Compute absolute paths for GOROOT, to enable `go list` to use `compile/asm/etc` - goroot = os.environ.get("GOROOT", "") - if goroot: - goroot = os.path.realpath(goroot) - - # Run `go list` on all source dirs to filter input sources by build pragmas. - for root in roots: - with tempfile.TemporaryDirectory() as go_cache_dir: - out = subprocess.check_output( - [ - "env", - "-i", - "GOROOT={}".format(goroot), - "GOARCH={}".format(os.environ.get("GOARCH", "")), - "GOOS={}".format(os.environ.get("GOOS", "")), - "CGO_ENABLED={}".format(os.environ.get("CGO_ENABLED", "0")), - "GO111MODULE=off", - "GOCACHE=" + go_cache_dir, - args.go.resolve(), - "list", - "-e", - "-json", - "-tags", - args.tags, - ".", - ], - cwd=root, - ).decode("utf-8") - - # Parse JSON output and print out sources. - idx = 0 - decoder = json.JSONDecoder() - while idx < len(out) - 1: - # The raw_decode method fails if there are any leading spaces, e.g. " {}" fails - # so manually trim the prefix of the string - if out[idx].isspace(): - idx += 1 - continue - - obj, idx = decoder.raw_decode(out, idx) - types = ["GoFiles", "EmbedFiles"] - if args.tests: - types.extend(["TestGoFiles", "XTestGoFiles"]) - else: - types.extend(["SFiles"]) - for typ in types: - for src in obj.get(typ, []): - src = Path(obj["Dir"]) / src - # Resolve the symlink - src = Path( - os.path.normpath(str(src.parent / os.readlink(str(src)))) - ) - # Relativize to the CWD. - src = src.relative_to(os.getcwd()) - print(src, file=args.output) - - args.output.close() - - -sys.exit(main(sys.argv)) diff --git a/prelude/go/tools/gen_stdlib_importcfg.py b/prelude/go/tools/gen_stdlib_importcfg.py new file mode 100644 index 0000000000000..ce973c0ab7df3 --- /dev/null +++ b/prelude/go/tools/gen_stdlib_importcfg.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import os +import sys +from pathlib import Path + + +def main(argv): + parser = argparse.ArgumentParser() + parser.add_argument("--stdlib", type=Path, default=None) + parser.add_argument("--output", type=Path, default=None) + + args = parser.parse_args() + + with open(args.output, "w") as f: + for root, _dirs, files in os.walk(args.stdlib): + for file in files: + pkg_path = Path(root, file) + pkg_name, _ = os.path.splitext(pkg_path.relative_to(args.stdlib)) + # package names always use unix slashes + pkg_name = pkg_name.replace("\\", "/") + f.write(f"packagefile {pkg_name}={pkg_path}\n") + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/prelude/go/tools/go_wrapper.py b/prelude/go/tools/go_wrapper.py new file mode 100644 index 0000000000000..4a4ed0981d92c --- /dev/null +++ b/prelude/go/tools/go_wrapper.py @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import os +import shlex +import subprocess +import sys +from pathlib import Path + + +# A copy of "cmd/internal/quoted" translated into Python with GPT-4 +# Source: https://github.com/golang/go/blob/7e9894449e8a12157a28a4a14fc9341353a6469c/src/cmd/internal/quoted/quoted.go#L65 +def go_join(args): + buf = [] + for i, arg in enumerate(args): + if i > 0: + buf.append(" ") + saw_space, saw_single_quote, saw_double_quote = False, False, False + for c in arg: + if ord(c) > 127: + continue + elif c.isspace(): + saw_space = True + elif c == "'": + saw_single_quote = True + elif c == '"': + saw_double_quote = True + if not saw_space and not saw_single_quote and not saw_double_quote: + buf.append(arg) + elif not saw_single_quote: + buf.append("'") + buf.append(arg) + buf.append("'") + elif not saw_double_quote: + buf.append('"') + buf.append(arg) + buf.append('"') + else: + raise ValueError( + f"Argument {arg} contains both single and double quotes and cannot be quoted" + ) + return "".join(buf) + + +def main(argv): + """ + This is a wrapper script around the `go` binary. + - It fixes GOROOT and GOCACHE + """ + if len(argv) < 2: + print("usage: go_wrapper.py ", file=sys.stderr) + return 1 + + wrapped_binary = Path(argv[1]).resolve() + + parser = argparse.ArgumentParser(fromfile_prefix_chars="@") + parser.add_argument("--workdir", type=Path, default=None) + parser.add_argument("--output", type=argparse.FileType("w"), default=sys.stdout) + parsed, unknown = parser.parse_known_args(argv[2:]) + + env = os.environ.copy() + # Make paths absolute, otherwise go build will fail. + if "GOROOT" in env: + env["GOROOT"] = os.path.realpath(env["GOROOT"]) + + env["GOCACHE"] = os.path.realpath(env["BUCK_SCRATCH_PATH"]) + + cwd = os.getcwd() + for env_var in ["CC", "CGO_CFLAGS", "CGO_CPPFLAGS", "CGO_LDFLAGS"]: + if env_var in env: + # HACK: Split the value into a list of arguments then join them back. + # This is because buck encodes quoted args in a way `go` doesn't like, + # but `go_join` does it in a way that `go` expects. + var_value = go_join(shlex.split(env[env_var])) + # HACK: Replace %cwd% with the current working directory to make it work when `go` does `cd` to a tmp-dir. + env[env_var] = var_value.replace("%cwd%", cwd) + + unknown = [arg.replace("%cwd%", cwd) for arg in unknown] + + retcode = subprocess.call( + [wrapped_binary] + unknown, env=env, cwd=parsed.workdir, stdout=parsed.output + ) + parsed.output.close() + return retcode + + +sys.exit(main(sys.argv)) diff --git a/prelude/go/transitions/defs.bzl b/prelude/go/transitions/defs.bzl new file mode 100644 index 0000000000000..639a4efe95e33 --- /dev/null +++ b/prelude/go/transitions/defs.bzl @@ -0,0 +1,267 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//go:coverage.bzl", "GoCoverageMode") +load(":tags_helper.bzl", "selects_for_tags", "tag_to_constrant_value") + +def _cgo_enabled_transition(platform, refs, attrs): + constraints = platform.configuration.constraints + + # Cancel transition if the value already set + # to enable using configuration modifiers for overriding this option + cgo_enabled_setting = refs.cgo_enabled_true[ConstraintValueInfo].setting + if cgo_enabled_setting.label in constraints: + return platform + + if attrs.cgo_enabled == None: + return platform + elif attrs.cgo_enabled == True: + cgo_enabled_ref = refs.cgo_enabled_true + else: + cgo_enabled_ref = refs.cgo_enabled_false + + cgo_enabled_value = cgo_enabled_ref[ConstraintValueInfo] + constraints[cgo_enabled_value.setting.label] = cgo_enabled_value + + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + + return PlatformInfo( + label = platform.label, + configuration = new_cfg, + ) + +def _race_transition(platform, refs, attrs): + constraints = platform.configuration.constraints + + # Cancel transition if the value already set + # to enable using configuration modifiers for overriding this option + race_setting = refs.race_false[ConstraintValueInfo].setting + if race_setting.label in constraints: + return platform + + # change configuration only when we can't avoid it + if attrs.race == True: + race_ref = refs.race_true + else: + return platform + + race_value = race_ref[ConstraintValueInfo] + constraints[race_value.setting.label] = race_value + + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + + return PlatformInfo( + label = platform.label, + configuration = new_cfg, + ) + +def _asan_transition(platform, refs, attrs): + constraints = platform.configuration.constraints + + # Cancel transition if the value already set + # to enable using configuration modifiers for overriding this option + asan_setting = refs.asan_false[ConstraintValueInfo].setting + if asan_setting.label in constraints: + return platform + + # change configuration only when we can't avoid it + if attrs.asan == True: + asan_ref = refs.asan_true + else: + return platform + + asan_value = asan_ref[ConstraintValueInfo] + constraints[asan_value.setting.label] = asan_value + + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + + return PlatformInfo( + label = platform.label, + configuration = new_cfg, + ) + +def _coverage_mode_transition(platform, refs, attrs): + constraints = platform.configuration.constraints + + # Cancel transition if the value already set + # to enable using configuration modifiers for overriding this option + coverage_mode_setting = refs.coverage_mode_set[ConstraintValueInfo].setting + if coverage_mode_setting.label in constraints: + return platform + + if attrs.coverage_mode == None: + return platform + elif attrs.coverage_mode == "set": + coverage_mode_ref = refs.coverage_mode_set + elif attrs.coverage_mode == "count": + coverage_mode_ref = refs.coverage_mode_count + elif attrs.coverage_mode == "atomic": + coverage_mode_ref = refs.coverage_mode_atomic + else: + fail("`coverage_mode` can be either: 'set', 'count', 'atomic' or None, got: {}".format(attrs.coverage_mode)) + + coverage_mode_value = coverage_mode_ref[ConstraintValueInfo] + constraints[coverage_mode_value.setting.label] = coverage_mode_value + + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + + return PlatformInfo( + label = platform.label, + configuration = new_cfg, + ) + +def _tags_transition(platform, refs, attrs): + constraints = platform.configuration.constraints + + if not attrs.tags: + return platform + + for tag in attrs.tags: + ref_name = "tag_{}__value".format(tag) + if not hasattr(refs, ref_name): + fail("Add tag '{}' to .buckconfig attribute `go.allowed_tags` to allow using it".format(tag)) + + tag_value = getattr(refs, ref_name)[ConstraintValueInfo] + constraints[tag_value.setting.label] = tag_value + + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + + return PlatformInfo( + label = platform.label, + configuration = new_cfg, + ) + +def _force_mingw_on_windows(platform, refs, _): + constraints = platform.configuration.constraints + + abi_gnu_value = refs.abi_gnu[ConstraintValueInfo] + if abi_gnu_value.setting.label in constraints and constraints[abi_gnu_value.setting.label] == abi_gnu_value: + # Already MinGW/GNU, do nothing + return platform + + os_windows_value = refs.os_windows[ConstraintValueInfo] + if os_windows_value.setting.label in constraints and constraints[os_windows_value.setting.label] != os_windows_value: + # Non-Windows, do nothing + return platform + + constraints[abi_gnu_value.setting.label] = abi_gnu_value + + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + + return PlatformInfo( + label = platform.label, + configuration = new_cfg, + ) + +def _chain_transitions(transitions): + def tr(platform, refs, attrs): + for t in transitions: + platform = t(platform, refs, attrs) + return platform + + return tr + +_all_level_tansitions = [_force_mingw_on_windows] +_top_level_tansitions = [_asan_transition, _cgo_enabled_transition, _race_transition, _tags_transition] + _all_level_tansitions + +_all_level_refs = { + "abi_gnu": "prelude//abi/constraints:gnu", + "os_windows": "prelude//os/constraints:windows", +} + +_top_level_refs = { + "asan_false": "prelude//go/constraints:asan_false", + "asan_true": "prelude//go/constraints:asan_true", + "cgo_enabled_false": "prelude//go/constraints:cgo_enabled_false", + "cgo_enabled_true": "prelude//go/constraints:cgo_enabled_true", + "race_false": "prelude//go/constraints:race_false", + "race_true": "prelude//go/constraints:race_true", +} | { + "tag_{}__value".format(tag): constrant_value + for tag, constrant_value in tag_to_constrant_value().items() +} | _all_level_refs + +_attrs = ["asan", "cgo_enabled", "race", "tags"] + +go_binary_transition = transition( + impl = _chain_transitions(_top_level_tansitions), + refs = _top_level_refs, + attrs = _attrs, +) + +go_test_transition = transition( + impl = _chain_transitions(_top_level_tansitions + [_coverage_mode_transition]), + refs = _top_level_refs | { + "coverage_mode_atomic": "prelude//go/constraints:coverage_mode_atomic", + "coverage_mode_count": "prelude//go/constraints:coverage_mode_count", + "coverage_mode_set": "prelude//go/constraints:coverage_mode_set", + }, + attrs = _attrs + ["coverage_mode"], +) + +go_exported_library_transition = transition( + impl = _chain_transitions(_top_level_tansitions), + refs = _top_level_refs, + attrs = _attrs, +) + +go_library_transition = transition( + impl = _chain_transitions(_all_level_tansitions), + refs = _all_level_refs, + attrs = [], +) + +go_stdlib_transition = transition( + impl = _chain_transitions(_all_level_tansitions), + refs = _all_level_refs, + attrs = [], +) + +cgo_enabled_attr = attrs.default_only(attrs.option(attrs.bool(), default = select({ + "DEFAULT": None, + "prelude//go/constraints:cgo_enabled_false": False, + "prelude//go/constraints:cgo_enabled_true": True, +}))) + +race_attr = attrs.default_only(attrs.bool(default = select({ + "DEFAULT": False, + "prelude//go/constraints:race_false": False, + "prelude//go/constraints:race_true": True, +}))) + +asan_attr = attrs.default_only(attrs.bool(default = select({ + "DEFAULT": False, + "prelude//go/constraints:asan_false": False, + "prelude//go/constraints:asan_true": True, +}))) + +coverage_mode_attr = attrs.default_only(attrs.option(attrs.enum(GoCoverageMode.values()), default = select({ + "DEFAULT": None, + "prelude//go/constraints:coverage_mode_atomic": "atomic", + "prelude//go/constraints:coverage_mode_count": "count", + "prelude//go/constraints:coverage_mode_set": "set", +}))) + +tags_attr = attrs.default_only(attrs.list(attrs.string(), default = selects_for_tags())) diff --git a/prelude/go/transitions/tags_helper.bzl b/prelude/go/transitions/tags_helper.bzl new file mode 100644 index 0000000000000..1acb328e41105 --- /dev/null +++ b/prelude/go/transitions/tags_helper.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:buckconfig.bzl", "read_list") + +allowed_tags = read_list("go", "allowed_tags", default = [], root_cell = True) + +def tag_to_constrant_value(): + return {tag: "prelude//go/tags/constraints:{}".format(tag) for tag in allowed_tags} + +def selects_for_tags(): + selects = [] + for tag in allowed_tags: + selects += select({ + "DEFAULT": [], + "prelude//go/tags/constraints:{}".format(tag): [tag], + }) + + return selects diff --git a/prelude/go_bootstrap/go_bootstrap.bzl b/prelude/go_bootstrap/go_bootstrap.bzl new file mode 100644 index 0000000000000..48ec3cefb808c --- /dev/null +++ b/prelude/go_bootstrap/go_bootstrap.bzl @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") + +GoBootstrapToolchainInfo = provider( + fields = { + "env_go_arch": provider_field(str), + "env_go_os": provider_field(str), + "env_go_root": provider_field(typing.Any, default = None), + "go": provider_field(RunInfo), + "go_wrapper": provider_field(RunInfo), + }, +) + +def go_bootstrap_binary_impl(ctx: AnalysisContext) -> list[Provider]: + """ + Produces a Go binary for use in prelude. Similar to `python_bootstrap_binary` + It doesn't depend on other Go rules and uses `go build` under the hood. + CGo is disabled minimise dependencies. + """ + go_toolchain = ctx.attrs._go_bootstrap_toolchain[GoBootstrapToolchainInfo] + + target_is_win = go_toolchain.env_go_os == "windows" + exe_suffix = ".exe" if target_is_win else "" + output = ctx.actions.declare_output(ctx.label.name + exe_suffix) + + # Copy files, because go:embed doesn't work with symlinks + srcs_dir = ctx.actions.copied_dir( + "__srcs_dir__", + {paths.relativize(src.short_path, ctx.attrs.workdir): src for src in ctx.attrs.srcs}, + ) + + cmd = cmd_args([ + go_toolchain.go_wrapper, + go_toolchain.go, + ["--workdir", srcs_dir], + "build", + ["-o", cmd_args(output.as_output(), relative_to = srcs_dir)], + ctx.attrs.entrypoints, + ]) + + env = { + "CGO_ENABLED": "0", + "GO111MODULE": "", + "GOARCH": go_toolchain.env_go_arch, + "GOOS": go_toolchain.env_go_os, + "GOTOOLCHAIN": "local", + } + + if go_toolchain.env_go_root != None: + env["GOROOT"] = go_toolchain.env_go_root + + ctx.actions.run(cmd, env = env, category = "go_bootstrap_binary") + + return [ + DefaultInfo(default_output = output), + RunInfo(args = [output]), + ] diff --git a/prelude/go_bootstrap/tools/BUCK.v2 b/prelude/go_bootstrap/tools/BUCK.v2 new file mode 100644 index 0000000000000..0312ccee3a00b --- /dev/null +++ b/prelude/go_bootstrap/tools/BUCK.v2 @@ -0,0 +1,36 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.go_bootstrap_binary( + name = "go_concat_files", + srcs = prelude.glob(["**/*"]), + entrypoints = [ + "go/concat_files.go", + ], + visibility = ["PUBLIC"], +) + +prelude.go_bootstrap_binary( + name = "go_go_wrapper", + srcs = prelude.glob(["**/*"]), + entrypoints = [ + "go/go_wrapper.go", + ], + visibility = ["PUBLIC"], +) + +prelude.go_bootstrap_binary( + name = "go_testmaingen", + srcs = prelude.glob(["**/*"]), + entrypoints = [ + "go/testmaingen.go", + ], + visibility = [ + "PUBLIC", + ], +) diff --git a/prelude/go_bootstrap/tools/README.md b/prelude/go_bootstrap/tools/README.md new file mode 100644 index 0000000000000..c3dab4b0764e6 --- /dev/null +++ b/prelude/go_bootstrap/tools/README.md @@ -0,0 +1,8 @@ +# How to work with third-party deps? + +Use [`go` tool](https://go.dev/doc/modules/managing-dependencies) for that + +1. Add/Remove a dependency in your code +1. `cd buck2/prelude/go_bootstrap/tools` +1. `go mod tidy` - to resolve deps +1. `go mod vendor` - to save deps in the repo diff --git a/prelude/go_bootstrap/tools/go.mod b/prelude/go_bootstrap/tools/go.mod new file mode 100644 index 0000000000000..6a72b58944586 --- /dev/null +++ b/prelude/go_bootstrap/tools/go.mod @@ -0,0 +1,3 @@ +module tools + +go 1.22.5 diff --git a/prelude/go_bootstrap/tools/go/concat_files.go b/prelude/go_bootstrap/tools/go/concat_files.go new file mode 100644 index 0000000000000..62190cad40ab5 --- /dev/null +++ b/prelude/go_bootstrap/tools/go/concat_files.go @@ -0,0 +1,46 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +package main + +import ( + "flag" + "io" + "log" + "os" +) + +func main() { + var outputFile = flag.String("output", "", "help message for flag n") + flag.Parse() + inputFiles := flag.Args() + + if *outputFile == "" || len(inputFiles) < 2 { + log.Fatal("usage: concat_files.go --output out.txt in1.txt in2.txt") + } + + f, err := os.Create(*outputFile) + if err != nil { + log.Fatal(os.Stderr, "Error creating output file: %v", err) + } + defer f.Close() + + for _, file := range inputFiles { + infile, err := os.Open(file) + if err != nil { + log.Fatal(os.Stderr, "Error opening input file %s: %v", file, err) + } + defer infile.Close() + + _, err = io.Copy(f, infile) + if err != nil { + log.Fatal("Error copying file %s: %v\n", file, err) + } + } +} diff --git a/prelude/go_bootstrap/tools/go/go_wrapper.go b/prelude/go_bootstrap/tools/go/go_wrapper.go new file mode 100644 index 0000000000000..715fa4f67d6a5 --- /dev/null +++ b/prelude/go_bootstrap/tools/go/go_wrapper.go @@ -0,0 +1,236 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +package main + +import ( + "bufio" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "unicode" +) + +// GPT-4 implementation of Python's shlex.split +func shellSplit(input string) ([]string, error) { + var result []string + reader := strings.NewReader(input) + scanner := bufio.NewScanner(reader) + scanner.Split(bufio.ScanWords) + var token strings.Builder + inQuotes := false + var quoteChar rune + appendToken := func() { + if token.Len() > 0 { + result = append(result, token.String()) + token.Reset() + } + } + for scanner.Scan() { + word := scanner.Text() + for _, r := range word { + switch { + case r == '\'' || r == '"': + if inQuotes { + if r == quoteChar { + inQuotes = false + quoteChar = 0 + appendToken() + continue + } + } else { + inQuotes = true + quoteChar = r + continue + } + case unicode.IsSpace(r): + if !inQuotes { + appendToken() + continue + } + } + token.WriteRune(r) + } + if !inQuotes { + appendToken() + } + } + if inQuotes { + return nil, fmt.Errorf("unclosed quote in input: %s", input) + } + if err := scanner.Err(); err != nil { + return nil, err + } + return result, nil +} + +// A copy of https://github.com/golang/go/blob/go1.23.0/src/cmd/internal/quoted/quoted.go#L65 +func join(args []string) (string, error) { + var buf []byte + for i, arg := range args { + if i > 0 { + buf = append(buf, ' ') + } + var sawSpace, sawSingleQuote, sawDoubleQuote bool + for _, c := range arg { + switch { + case c > unicode.MaxASCII: + continue + case isSpaceByte(byte(c)): + sawSpace = true + case c == '\'': + sawSingleQuote = true + case c == '"': + sawDoubleQuote = true + } + } + switch { + case !sawSpace && !sawSingleQuote && !sawDoubleQuote: + buf = append(buf, arg...) + + case !sawSingleQuote: + buf = append(buf, '\'') + buf = append(buf, arg...) + buf = append(buf, '\'') + + case !sawDoubleQuote: + buf = append(buf, '"') + buf = append(buf, arg...) + buf = append(buf, '"') + + default: + return "", fmt.Errorf("argument %q contains both single and double quotes and cannot be quoted", arg) + } + } + return string(buf), nil +} + +// A copy of https://github.com/golang/go/blob/go1.23.0/src/cmd/internal/quoted/quoted.go#L15 +func isSpaceByte(c byte) bool { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' +} + +func loadArgs(args []string) []string { + newArgs := make([]string, 0, 0) + for _, arg := range args { + if !strings.HasPrefix(arg, "@") { + newArgs = append(newArgs, arg) + } else { + file, _ := os.Open(arg[1:]) + defer file.Close() + scanner := bufio.NewScanner(file) + for scanner.Scan() { + newArgs = append(newArgs, scanner.Text()) + } + } + } + return newArgs +} + +func main() { + os.Args = loadArgs(os.Args) + var wrappedBinary = flag.String("go", "", "wrapped go binary") + var outputFile = flag.String("output", "", "file to redirect stdout to") + var workdir = flag.String("workdir", "", "directory to run the command in") + flag.Parse() + unknownArgs := flag.Args() + + if *wrappedBinary == "" { + log.Fatal("No wrapped binary specified") + } + + absWrappedBinary, err := filepath.Abs(*wrappedBinary) + if err != nil { + log.Fatal("Failed to resolve wrapped binary: %s", err) + } + + envs := make(map[string]string) + for _, e := range os.Environ() { + pair := strings.SplitN(e, "=", 2) + envs[pair[0]] = pair[1] + } + + if goroot, ok := envs["GOROOT"]; ok { + absGoroot, err := filepath.Abs(goroot) + if err != nil { + log.Fatal("Failed to resolve GOROOT: %s", err) + } + envs["GOROOT"] = absGoroot + } + + if buckScratchPath, ok := envs["BUCK_SCRATCH_PATH"]; ok { + absBuckScratchPath, err := filepath.Abs(buckScratchPath) + if err != nil { + log.Fatal("Failed to resolve BUCK_SCRATCH_PATH: %s", err) + } + envs["GOCACHE"] = absBuckScratchPath + } + + cwd, err := os.Getwd() + if err != nil { + log.Fatal("Failed to get current working directory: %s", err) + } + + for _, envVar := range []string{"CC", "CGO_CFLAGS", "CGO_CPPFLAGS", "CGO_LDFLAGS"} { + if value, ok := envs[envVar]; ok { + // HACK: Split the value into a list of arguments then join them back. + // This is because buck encodes quoted args in a way `go` doesn't like, + // but `join` does it in a way that `go` expects. + splitValue := strings.Split(value, "\t") + joinedValue, err := join(splitValue) + if err != nil { + log.Fatal("Failed to join %q: %s", envVar, err) + } + // HACK: Replace %cwd% with the current working directory to make it work when `go` does `cd` to a tmp-dir. + envs[envVar] = strings.ReplaceAll(joinedValue, "%cwd%", cwd) + } + } + for i, arg := range unknownArgs { + unknownArgs[i] = strings.ReplaceAll(arg, "%cwd%", cwd) + } + + var output *os.File + if *outputFile == "" { + output = os.Stdout + } else { + output, err = os.Create(*outputFile) + if err != nil { + log.Fatalf("Error creating output file: %s", err) + os.Exit(1) + } + defer output.Close() + } + + cmd := exec.Command(absWrappedBinary, unknownArgs...) + + cmd.Env = make([]string, 0, len(envs)/2) + for k, v := range envs { + cmd.Env = append(cmd.Env, k+"="+v) + } + + if *workdir != "" { + cmd.Dir = *workdir + } + + cmd.Stdout = output + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + exitCode := 1 + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } + fmt.Fprintln(os.Stderr, "Error running command:", err) + os.Exit(exitCode) + } +} diff --git a/prelude/go/tools/testmaingen.go b/prelude/go_bootstrap/tools/go/testmaingen.go similarity index 100% rename from prelude/go/tools/testmaingen.go rename to prelude/go_bootstrap/tools/go/testmaingen.go diff --git a/prelude/haskell/compile.bzl b/prelude/haskell/compile.bzl new file mode 100644 index 0000000000000..222838398e010 --- /dev/null +++ b/prelude/haskell/compile.bzl @@ -0,0 +1,261 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//cxx:preprocessor.bzl", + "cxx_inherited_preprocessor_infos", + "cxx_merge_cpreprocessors", +) +load( + "@prelude//haskell:library_info.bzl", + "HaskellLibraryInfoTSet", +) +load( + "@prelude//haskell:toolchain.bzl", + "HaskellToolchainInfo", +) +load( + "@prelude//haskell:util.bzl", + "attr_deps_haskell_lib_infos", + "attr_deps_haskell_link_infos", + "get_artifact_suffix", + "is_haskell_src", + "output_extensions", + "srcs_to_pairs", +) +load( + "@prelude//linking:link_info.bzl", + "LinkStyle", +) +load("@prelude//utils:argfile.bzl", "at_argfile") + +# The type of the return value of the `_compile()` function. +CompileResultInfo = record( + objects = field(Artifact), + hi = field(Artifact), + stubs = field(Artifact), + producing_indices = field(bool), +) + +CompileArgsInfo = record( + result = field(CompileResultInfo), + srcs = field(cmd_args), + args_for_cmd = field(cmd_args), + args_for_file = field(cmd_args), +) + +PackagesInfo = record( + exposed_package_args = cmd_args, + packagedb_args = cmd_args, + transitive_deps = field(HaskellLibraryInfoTSet), +) + +def _package_flag(toolchain: HaskellToolchainInfo) -> str: + if toolchain.support_expose_package: + return "-expose-package" + else: + return "-package" + +def get_packages_info( + ctx: AnalysisContext, + link_style: LinkStyle, + specify_pkg_version: bool, + enable_profiling: bool) -> PackagesInfo: + haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] + + # Collect library dependencies. Note that these don't need to be in a + # particular order. + direct_deps_link_info = attr_deps_haskell_link_infos(ctx) + libs = ctx.actions.tset( + HaskellLibraryInfoTSet, + children = [ + lib.prof_info[link_style] if enable_profiling else lib.info[link_style] + for lib in direct_deps_link_info + ], + ) + + # base is special and gets exposed by default + package_flag = _package_flag(haskell_toolchain) + exposed_package_args = cmd_args([package_flag, "base"]) + + packagedb_args = cmd_args() + packagedb_set = {} + + for lib in libs.traverse(): + packagedb_set[lib.db] = None + hidden_args = cmd_args(hidden = [ + lib.import_dirs.values(), + lib.stub_dirs, + # libs of dependencies might be needed at compile time if + # we're using Template Haskell: + lib.libs, + ]) + + exposed_package_args.add(hidden_args) + + packagedb_args.add(hidden_args) + + # These we need to add for all the packages/dependencies, i.e. + # direct and transitive (e.g. `fbcode-common-hs-util-hs-array`) + packagedb_args.add([cmd_args("-package-db", x) for x in packagedb_set]) + + haskell_direct_deps_lib_infos = attr_deps_haskell_lib_infos( + ctx, + link_style, + enable_profiling, + ) + + # Expose only the packages we depend on directly + for lib in haskell_direct_deps_lib_infos: + pkg_name = lib.name + if (specify_pkg_version): + pkg_name += "-{}".format(lib.version) + + exposed_package_args.add(package_flag, pkg_name) + + return PackagesInfo( + exposed_package_args = exposed_package_args, + packagedb_args = packagedb_args, + transitive_deps = libs, + ) + +def compile_args( + ctx: AnalysisContext, + link_style: LinkStyle, + enable_profiling: bool, + pkgname = None, + suffix: str = "") -> CompileArgsInfo: + haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] + + compile_cmd = cmd_args() + compile_cmd.add(haskell_toolchain.compiler_flags) + + # Some rules pass in RTS (e.g. `+RTS ... -RTS`) options for GHC, which can't + # be parsed when inside an argsfile. + compile_cmd.add(ctx.attrs.compiler_flags) + + compile_args = cmd_args() + compile_args.add("-no-link", "-i") + + if enable_profiling: + compile_args.add("-prof") + + if link_style == LinkStyle("shared"): + compile_args.add("-dynamic", "-fPIC") + elif link_style == LinkStyle("static_pic"): + compile_args.add("-fPIC", "-fexternal-dynamic-refs") + + osuf, hisuf = output_extensions(link_style, enable_profiling) + compile_args.add("-osuf", osuf, "-hisuf", hisuf) + + if getattr(ctx.attrs, "main", None) != None: + compile_args.add(["-main-is", ctx.attrs.main]) + + artifact_suffix = get_artifact_suffix(link_style, enable_profiling, suffix) + + objects = ctx.actions.declare_output( + "objects-" + artifact_suffix, + dir = True, + ) + hi = ctx.actions.declare_output("hi-" + artifact_suffix, dir = True) + stubs = ctx.actions.declare_output("stubs-" + artifact_suffix, dir = True) + + compile_args.add( + "-odir", + objects.as_output(), + "-hidir", + hi.as_output(), + "-hiedir", + hi.as_output(), + "-stubdir", + stubs.as_output(), + ) + + # Add -package-db and -package/-expose-package flags for each Haskell + # library dependency. + packages_info = get_packages_info( + ctx, + link_style, + specify_pkg_version = False, + enable_profiling = enable_profiling, + ) + + compile_args.add(packages_info.exposed_package_args) + compile_args.add(packages_info.packagedb_args) + + # Add args from preprocess-able inputs. + inherited_pre = cxx_inherited_preprocessor_infos(ctx.attrs.deps) + pre = cxx_merge_cpreprocessors(ctx, [], inherited_pre) + pre_args = pre.set.project_as_args("args") + compile_args.add(cmd_args(pre_args, format = "-optP={}")) + + if pkgname: + compile_args.add(["-this-unit-id", pkgname]) + + arg_srcs = [] + hidden_srcs = [] + for (path, src) in srcs_to_pairs(ctx.attrs.srcs): + # hs-boot files aren't expected to be an argument to compiler but does need + # to be included in the directory of the associated src file + if is_haskell_src(path): + arg_srcs.append(src) + else: + hidden_srcs.append(src) + srcs = cmd_args( + arg_srcs, + hidden = hidden_srcs, + ) + + producing_indices = "-fwrite-ide-info" in ctx.attrs.compiler_flags + + return CompileArgsInfo( + result = CompileResultInfo( + objects = objects, + hi = hi, + stubs = stubs, + producing_indices = producing_indices, + ), + srcs = srcs, + args_for_cmd = compile_cmd, + args_for_file = compile_args, + ) + +# Compile all the context's sources. +def compile( + ctx: AnalysisContext, + link_style: LinkStyle, + enable_profiling: bool, + pkgname: str | None = None) -> CompileResultInfo: + haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] + compile_cmd = cmd_args(haskell_toolchain.compiler) + + args = compile_args(ctx, link_style, enable_profiling, pkgname) + + compile_cmd.add(args.args_for_cmd) + + artifact_suffix = get_artifact_suffix(link_style, enable_profiling) + + if args.args_for_file: + if haskell_toolchain.use_argsfile: + compile_cmd.add(at_argfile( + actions = ctx.actions, + name = artifact_suffix + ".haskell_compile_argsfile", + args = [args.args_for_file, args.srcs], + allow_args = True, + )) + else: + compile_cmd.add(args.args_for_file) + compile_cmd.add(args.srcs) + + artifact_suffix = get_artifact_suffix(link_style, enable_profiling) + ctx.actions.run( + compile_cmd, + category = "haskell_compile_" + artifact_suffix.replace("-", "_"), + no_outputs_cleanup = True, + ) + + return args.result diff --git a/prelude/haskell/haskell.bzl b/prelude/haskell/haskell.bzl index 791a42f97639a..320b8f9360b1f 100644 --- a/prelude/haskell/haskell.bzl +++ b/prelude/haskell/haskell.bzl @@ -13,12 +13,17 @@ load( "@prelude//cxx:cxx.bzl", "get_auto_link_group_specs", ) +load( + "@prelude//cxx:cxx_context.bzl", + "get_cxx_toolchain_info", +) load( "@prelude//cxx:cxx_toolchain_types.bzl", - "CxxPlatformInfo", "CxxToolchainInfo", + "LinkerType", "PicBehavior", ) +load("@prelude//cxx:groups.bzl", "get_dedupped_roots_from_groups") load( "@prelude//cxx:link_groups.bzl", "LinkGroupContext", @@ -28,9 +33,16 @@ load( "get_filtered_links", "get_link_group_info", "get_link_group_preferred_linkage", + "get_public_link_group_nodes", "get_transitive_deps_matching_labels", "is_link_group_shlib", ) +load( + "@prelude//cxx:linker.bzl", + "LINKERS", + "get_rpath_origin", + "get_shared_library_flags", +) load( "@prelude//cxx:preprocessor.bzl", "CPreprocessor", @@ -38,6 +50,45 @@ load( "cxx_inherited_preprocessor_infos", "cxx_merge_cpreprocessors", ) +load( + "@prelude//haskell:compile.bzl", + "CompileResultInfo", + "compile", +) +load( + "@prelude//haskell:haskell_haddock.bzl", + "haskell_haddock_lib", +) +load( + "@prelude//haskell:library_info.bzl", + "HaskellLibraryInfo", + "HaskellLibraryInfoTSet", + "HaskellLibraryProvider", +) +load( + "@prelude//haskell:link_info.bzl", + "HaskellLinkInfo", + "HaskellProfLinkInfo", + "attr_link_style", + "cxx_toolchain_link_style", +) +load( + "@prelude//haskell:toolchain.bzl", + "HaskellToolchainInfo", +) +load( + "@prelude//haskell:util.bzl", + "attr_deps", + "attr_deps_haskell_link_infos_sans_template_deps", + "attr_deps_merged_link_infos", + "attr_deps_profiling_link_infos", + "attr_deps_shared_library_infos", + "get_artifact_suffix", + "is_haskell_src", + "output_extensions", + "src_to_module_name", + "srcs_to_pairs", +) load( "@prelude//linking:link_groups.bzl", "gather_link_group_libs", @@ -52,7 +103,6 @@ load( "LinkInfo", "LinkInfos", "LinkStyle", - "Linkage", "LinkedObject", "MergedLinkInfo", "SharedLibLinkable", @@ -81,70 +131,19 @@ load( "@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", "create_shared_libraries", + "create_shlib_symlink_tree", "merge_shared_libraries", "traverse_shared_library_info", ) +load("@prelude//linking:types.bzl", "Linkage") load( "@prelude//python:python.bzl", "PythonLibraryInfo", ) -load("@prelude//utils:platform_flavors_util.bzl", "by_platform") +load("@prelude//utils:argfile.bzl", "at_argfile") load("@prelude//utils:set.bzl", "set") load("@prelude//utils:utils.bzl", "filter_and_map_idx", "flatten") -_HASKELL_EXTENSIONS = [ - ".hs", - ".lhs", - ".hsc", - ".chs", - ".x", - ".y", -] - -HaskellPlatformInfo = provider(fields = { - "name": provider_field(typing.Any, default = None), -}) - -HaskellToolchainInfo = provider( - # @unsorted-dict-items - fields = { - "compiler": provider_field(typing.Any, default = None), - "compiler_flags": provider_field(typing.Any, default = None), - "linker": provider_field(typing.Any, default = None), - "linker_flags": provider_field(typing.Any, default = None), - "haddock": provider_field(typing.Any, default = None), - "compiler_major_version": provider_field(typing.Any, default = None), - "package_name_prefix": provider_field(typing.Any, default = None), - "packager": provider_field(typing.Any, default = None), - "use_argsfile": provider_field(typing.Any, default = None), - "support_expose_package": provider_field(bool, default = False), - "archive_contents": provider_field(typing.Any, default = None), - "ghci_script_template": provider_field(typing.Any, default = None), - "ghci_iserv_template": provider_field(typing.Any, default = None), - "ide_script_template": provider_field(typing.Any, default = None), - "ghci_binutils_path": provider_field(typing.Any, default = None), - "ghci_lib_path": provider_field(typing.Any, default = None), - "ghci_ghc_path": provider_field(typing.Any, default = None), - "ghci_iserv_path": provider_field(typing.Any, default = None), - "ghci_iserv_prof_path": provider_field(typing.Any, default = None), - "ghci_cxx_path": provider_field(typing.Any, default = None), - "ghci_cc_path": provider_field(typing.Any, default = None), - "ghci_cpp_path": provider_field(typing.Any, default = None), - "ghci_packager": provider_field(typing.Any, default = None), - "cache_links": provider_field(typing.Any, default = None), - "script_template_processor": provider_field(typing.Any, default = None), - }, -) - -# A list of `HaskellLibraryInfo`s. -HaskellLinkInfo = provider( - # Contains a list of HaskellLibraryInfo records. - fields = { - "info": provider_field(typing.Any, default = None), # dict[LinkStyle, list[HaskellLibraryInfo]] # TODO use a tset - "prof_info": provider_field(typing.Any, default = None), # dict[LinkStyle, list[HaskellLibraryInfo]] # TODO use a tset - }, -) - HaskellIndexingTSet = transitive_set() # A list of hie dirs @@ -154,101 +153,10 @@ HaskellIndexInfo = provider( }, ) -# If the target is a haskell library, the HaskellLibraryProvider -# contains its HaskellLibraryInfo. (in contrast to a HaskellLinkInfo, -# which contains the HaskellLibraryInfo for all the transitive -# dependencies). Direct dependencies are treated differently from -# indirect dependencies for the purposes of module visibility. -HaskellLibraryProvider = provider( - fields = { - "lib": provider_field(typing.Any, default = None), # dict[LinkStyle, HaskellLibraryInfo] - "prof_lib": provider_field(typing.Any, default = None), # dict[LinkStyle, HaskellLibraryInfo] - }, -) - -# HaskellProfLinkInfo exposes the MergedLinkInfo of a target and all of its -# dependencies built for profiling. This allows top-level targets (e.g. -# `haskell_binary`) to be defined with profiling enabled by default. -HaskellProfLinkInfo = provider( - fields = { - "prof_infos": provider_field(typing.Any, default = None), # MergedLinkInfo - }, -) - -# A record of a Haskell library. -HaskellLibraryInfo = record( - # The library target name: e.g. "rts" - name = str, - # package config database: e.g. platform009/build/ghc/lib/package.conf.d - db = Artifact, - # e.g. "base-4.13.0.0" - id = str, - # Import dirs indexed by profiling enabled/disabled - import_dirs = dict[bool, Artifact], - stub_dirs = list[Artifact], - - # This field is only used as hidden inputs to compilation, to - # support Template Haskell which may need access to the libraries - # at compile time. The real library flags are propagated up the - # dependency graph via MergedLinkInfo. - libs = field(list[Artifact], []), - # Package version, used to specify the full package when exposing it, - # e.g. filepath-1.4.2.1, deepseq-1.4.4.0. - # Internal packages default to 1.0.0, e.g. `fbcode-dsi-logger-hs-types-1.0.0`. - version = str, - is_prebuilt = bool, - profiling_enabled = bool, -) - -# -- - -def _by_platform(ctx: AnalysisContext, xs: list[(str, list[typing.Any])]) -> list[typing.Any]: - platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo].name - return flatten(by_platform([platform], xs)) - -def attr_deps(ctx: AnalysisContext) -> list[Dependency]: - return ctx.attrs.deps + _by_platform(ctx, ctx.attrs.platform_deps) - -# Disable until we have a need to call this. -# def _attr_deps_merged_link_infos(ctx: AnalysisContext) -> [MergedLinkInfo]: -# return filter(None, [d[MergedLinkInfo] for d in attr_deps(ctx)]) - # This conversion is non-standard, see TODO about link style below def _to_lib_output_style(link_style: LinkStyle) -> LibOutputStyle: return default_output_style_for_link_strategy(to_link_strategy(link_style)) -def _attr_deps_haskell_link_infos(ctx: AnalysisContext) -> list[HaskellLinkInfo]: - return filter( - None, - [ - d.get(HaskellLinkInfo) - for d in attr_deps(ctx) + ctx.attrs.template_deps - ], - ) - -def _attr_deps_haskell_lib_infos( - ctx: AnalysisContext, - link_style: LinkStyle, - enable_profiling: bool) -> list[HaskellLibraryInfo]: - if enable_profiling and link_style == LinkStyle("shared"): - fail("Profiling isn't supported when using dynamic linking") - return [ - x.prof_lib[link_style] if enable_profiling else x.lib[link_style] - for x in filter(None, [ - d.get(HaskellLibraryProvider) - for d in attr_deps(ctx) + ctx.attrs.template_deps - ]) - ] - -def _cxx_toolchain_link_style(ctx: AnalysisContext) -> LinkStyle: - return ctx.attrs._cxx_toolchain[CxxToolchainInfo].linker_info.link_style - -def _attr_link_style(ctx: AnalysisContext) -> LinkStyle: - if ctx.attrs.link_style != None: - return LinkStyle(ctx.attrs.link_style) - else: - return _cxx_toolchain_link_style(ctx) - def _attr_preferred_linkage(ctx: AnalysisContext) -> Linkage: preferred_linkage = ctx.attrs.preferred_linkage @@ -260,14 +168,6 @@ def _attr_preferred_linkage(ctx: AnalysisContext) -> Linkage: # -- -def _is_haskell_src(x: str) -> bool: - _, ext = paths.split_extension(x) - return ext in _HASKELL_EXTENSIONS - -def _src_to_module_name(x: str) -> str: - base, _ext = paths.split_extension(x) - return base.replace("/", ".") - def _get_haskell_prebuilt_libs( ctx, link_style: LinkStyle, @@ -353,7 +253,7 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: def archive_linkable(lib): return ArchiveLinkable( archive = Archive(artifact = lib), - linker_type = "gnu", + linker_type = LinkerType("gnu"), ) def shared_linkable(lib): @@ -371,18 +271,28 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: ] hlibinfos[link_style] = hlibinfo - hlinkinfos[link_style] = [hlibinfo] + hlinkinfos[link_style] = ctx.actions.tset( + HaskellLibraryInfoTSet, + value = hlibinfo, + children = [lib.info[link_style] for lib in haskell_infos], + ) prof_hlibinfos[link_style] = prof_hlibinfo - prof_hlinkinfos[link_style] = [prof_hlibinfo] + prof_hlinkinfos[link_style] = ctx.actions.tset( + HaskellLibraryInfoTSet, + value = prof_hlibinfo, + children = [lib.prof_info[link_style] for lib in haskell_infos], + ) link_infos[link_style] = LinkInfos( default = LinkInfo( pre_flags = ctx.attrs.exported_linker_flags, + post_flags = ctx.attrs.exported_post_linker_flags, linkables = linkables, ), ) prof_link_infos[link_style] = LinkInfos( default = LinkInfo( pre_flags = ctx.attrs.exported_linker_flags, + post_flags = ctx.attrs.exported_post_linker_flags, linkables = prof_linkables, ), ) @@ -396,13 +306,18 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: prof_lib = prof_hlibinfos, ) + # The link info that will be used when this library is a dependency of a non-Haskell + # target (e.g. a cxx_library()). We need to pick the profiling libs if we're in + # profiling mode. + default_link_infos = prof_link_infos if ctx.attrs.enable_profiling else link_infos + default_native_infos = prof_native_infos if ctx.attrs.enable_profiling else native_infos merged_link_info = create_merged_link_info( ctx, # We don't have access to a CxxToolchain here (yet). # Give that it's already built, this doesn't mean much, use a sane default. pic_behavior = PicBehavior("supported"), - link_infos = {_to_lib_output_style(s): v for s, v in link_infos.items()}, - exported_deps = native_infos, + link_infos = {_to_lib_output_style(s): v for s, v in default_link_infos.items()}, + exported_deps = default_native_infos, ) prof_merged_link_info = create_merged_link_info( @@ -417,6 +332,7 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: solibs = {} for soname, lib in ctx.attrs.shared_libs.items(): solibs[soname] = LinkedObject(output = lib, unstripped_output = lib) + shared_libs = create_shared_libraries(ctx, solibs) linkable_graph = create_linkable_graph( ctx, @@ -426,7 +342,7 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: ctx = ctx, exported_deps = ctx.attrs.deps, link_infos = {_to_lib_output_style(s): v for s, v in link_infos.items()}, - shared_libs = solibs, + shared_libs = shared_libs, default_soname = None, ), ), @@ -435,7 +351,7 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: inherited_pp_info = cxx_inherited_preprocessor_infos(ctx.attrs.deps) own_pp_info = CPreprocessor( - relative_args = CPreprocessorArgs(args = flatten([["-isystem", d] for d in ctx.attrs.cxx_header_dirs])), + args = CPreprocessorArgs(args = flatten([["-isystem", d] for d in ctx.attrs.cxx_header_dirs])), ) return [ @@ -444,11 +360,11 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: cxx_merge_cpreprocessors(ctx, [own_pp_info], inherited_pp_info), merge_shared_libraries( ctx.actions, - create_shared_libraries(ctx, solibs), + shared_libs, shared_library_infos, ), merge_link_group_lib_info(deps = ctx.attrs.deps), - merge_haskell_link_infos(haskell_infos + [haskell_link_infos]), + haskell_link_infos, merged_link_info, HaskellProfLinkInfo( prof_infos = prof_merged_link_info, @@ -456,246 +372,19 @@ def haskell_prebuilt_library_impl(ctx: AnalysisContext) -> list[Provider]: linkable_graph, ] -def merge_haskell_link_infos(deps: list[HaskellLinkInfo]) -> HaskellLinkInfo: - merged = {} - prof_merged = {} - for link_style in LinkStyle: - children = [] - prof_children = [] - for dep in deps: - if link_style in dep.info: - children.extend(dep.info[link_style]) - - if link_style in dep.prof_info: - prof_children.extend(dep.prof_info[link_style]) - - merged[link_style] = dedupe(children) - prof_merged[link_style] = dedupe(prof_children) - - return HaskellLinkInfo(info = merged, prof_info = prof_merged) - -PackagesInfo = record( - exposed_package_args = cmd_args, - packagedb_args = cmd_args, - transitive_deps = field(list[HaskellLibraryInfo]), -) - -def _package_flag(toolchain: HaskellToolchainInfo) -> str: - if toolchain.support_expose_package: - return "-expose-package" - else: - return "-package" - -def get_packages_info( - ctx: AnalysisContext, - link_style: LinkStyle, - specify_pkg_version: bool, - enable_profiling: bool) -> PackagesInfo: - haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] - - # Collect library dependencies. Note that these don't need to be in a - # particular order and we really want to remove duplicates (there - # are a *lot* of duplicates). - libs = {} - direct_deps_link_info = _attr_deps_haskell_link_infos(ctx) - merged_hs_link_info = merge_haskell_link_infos(direct_deps_link_info) - - hs_link_info = merged_hs_link_info.prof_info if enable_profiling else merged_hs_link_info.info - - for lib in hs_link_info[link_style]: - libs[lib.db] = lib # lib.db is a good enough unique key - - # base is special and gets exposed by default - package_flag = _package_flag(haskell_toolchain) - exposed_package_args = cmd_args([package_flag, "base"]) - - packagedb_args = cmd_args() - - for lib in libs.values(): - exposed_package_args.hidden(lib.import_dirs.values()) - exposed_package_args.hidden(lib.stub_dirs) - - # libs of dependencies might be needed at compile time if - # we're using Template Haskell: - exposed_package_args.hidden(lib.libs) - - packagedb_args.hidden(lib.import_dirs.values()) - packagedb_args.hidden(lib.stub_dirs) - packagedb_args.hidden(lib.libs) - - for lib in libs.values(): - # These we need to add for all the packages/dependencies, i.e. - # direct and transitive (e.g. `fbcode-common-hs-util-hs-array`) - packagedb_args.add("-package-db", lib.db) - - haskell_direct_deps_lib_infos = _attr_deps_haskell_lib_infos( - ctx, - link_style, - enable_profiling, - ) - - # Expose only the packages we depend on directly - for lib in haskell_direct_deps_lib_infos: - pkg_name = lib.name - if (specify_pkg_version): - pkg_name += "-{}".format(lib.version) - - exposed_package_args.add(package_flag, pkg_name) - - return PackagesInfo( - exposed_package_args = exposed_package_args, - packagedb_args = packagedb_args, - transitive_deps = libs.values(), - ) - -# The type of the return value of the `_compile()` function. -CompileResultInfo = record( - objects = field(Artifact), - hi = field(Artifact), - stubs = field(Artifact), - producing_indices = field(bool), -) - -def _link_style_extensions(link_style: LinkStyle) -> (str, str): - if link_style == LinkStyle("shared"): - return ("dyn_o", "dyn_hi") - elif link_style == LinkStyle("static_pic"): - return ("o", "hi") # is this right? - elif link_style == LinkStyle("static"): - return ("o", "hi") - fail("unknown LinkStyle") - -def _output_extensions( - link_style: LinkStyle, - profiled: bool) -> (str, str): - osuf, hisuf = _link_style_extensions(link_style) - if profiled: - return ("p_" + osuf, "p_" + hisuf) - else: - return (osuf, hisuf) - def _srcs_to_objfiles( ctx: AnalysisContext, odir: Artifact, - osuf: str) -> cmd_args: - objfiles = cmd_args() - for src in ctx.attrs.srcs: + osuf: str) -> list[Artifact]: + objfiles = [] + for src, _ in srcs_to_pairs(ctx.attrs.srcs): # Don't link boot sources, as they're only meant to be used for compiling. - if _is_haskell_src(src): - objfiles.add(cmd_args([odir, "/", paths.replace_extension(src, "." + osuf)], delimiter = "")) + if is_haskell_src(src): + objfiles.append(odir.project(paths.replace_extension(src, "." + osuf))) return objfiles -# Single place to build the suffix used in artifacts (e.g. package directories, -# lib names) considering attributes like link style and profiling. -def get_artifact_suffix(link_style: LinkStyle, enable_profiling: bool) -> str: - artifact_suffix = link_style.value - if enable_profiling: - artifact_suffix += "-prof" - return artifact_suffix - -# Compile all the context's sources. -def _compile( - ctx: AnalysisContext, - link_style: LinkStyle, - enable_profiling: bool, - extra_args = []) -> CompileResultInfo: - haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] - compile_cmd = cmd_args(haskell_toolchain.compiler) - compile_cmd.add(haskell_toolchain.compiler_flags) - - # Some rules pass in RTS (e.g. `+RTS ... -RTS`) options for GHC, which can't - # be parsed when inside an argsfile. - compile_cmd.add(ctx.attrs.compiler_flags) - - compile_args = cmd_args() - compile_args.add("-no-link", "-i") - - if enable_profiling: - compile_args.add("-prof") - - if link_style == LinkStyle("shared"): - compile_args.add("-dynamic", "-fPIC") - elif link_style == LinkStyle("static_pic"): - compile_args.add("-fPIC", "-fexternal-dynamic-refs") - - osuf, hisuf = _output_extensions(link_style, enable_profiling) - compile_args.add("-osuf", osuf, "-hisuf", hisuf) - - if getattr(ctx.attrs, "main", None) != None: - compile_args.add(["-main-is", ctx.attrs.main]) - - artifact_suffix = get_artifact_suffix(link_style, enable_profiling) - - objects = ctx.actions.declare_output( - "objects-" + artifact_suffix, - dir = True, - ) - hi = ctx.actions.declare_output("hi-" + artifact_suffix, dir = True) - stubs = ctx.actions.declare_output("stubs-" + artifact_suffix, dir = True) - - compile_args.add( - "-odir", - objects.as_output(), - "-hidir", - hi.as_output(), - "-hiedir", - hi.as_output(), - "-stubdir", - stubs.as_output(), - ) - - # Add -package-db and -package/-expose-package flags for each Haskell - # library dependency. - packages_info = get_packages_info( - ctx, - link_style, - specify_pkg_version = False, - enable_profiling = enable_profiling, - ) - - compile_args.add(packages_info.exposed_package_args) - compile_args.add(packages_info.packagedb_args) - - # Add args from preprocess-able inputs. - inherited_pre = cxx_inherited_preprocessor_infos(ctx.attrs.deps) - pre = cxx_merge_cpreprocessors(ctx, [], inherited_pre) - pre_args = pre.set.project_as_args("args") - compile_args.add(cmd_args(pre_args, format = "-optP={}")) - - compile_args.add(extra_args) - - for (path, src) in ctx.attrs.srcs.items(): - # hs-boot files aren't expected to be an argument to compiler but does need - # to be included in the directory of the associated src file - if _is_haskell_src(path): - compile_args.add(src) - else: - compile_args.hidden(src) - - argsfile = ctx.actions.declare_output( - "haskell_compile_" + artifact_suffix + ".argsfile", - ) - ctx.actions.write(argsfile.as_output(), compile_args, allow_args = True) - hidden_args = [compile_args] - compile_cmd.add(cmd_args(argsfile, format = "@{}").hidden(hidden_args)) - - ctx.actions.run( - compile_cmd, - category = "haskell_compile_" + artifact_suffix.replace("-", "_"), - no_outputs_cleanup = True, - ) - - producing_indices = "-fwrite-ide-info" in ctx.attrs.compiler_flags - - return CompileResultInfo( - objects = objects, - hi = hi, - stubs = stubs, - producing_indices = producing_indices, - ) - _REGISTER_PACKAGE = """\ -set -euo pipefail +set -eu GHC_PKG=$1 DB=$2 PKGCONF=$3 @@ -733,11 +422,7 @@ def _make_package( artifact_suffix = get_artifact_suffix(link_style, enable_profiling) # Don't expose boot sources, as they're only meant to be used for compiling. - modules = [_src_to_module_name(x) for x in ctx.attrs.srcs if _is_haskell_src(x)] - - uniq_hlis = {} - for x in hlis: - uniq_hlis[x.id] = x + modules = [src_to_module_name(x) for x, _ in srcs_to_pairs(ctx.attrs.srcs) if is_haskell_src(x)] if enable_profiling: # Add the `-p` suffix otherwise ghc will look for objects @@ -767,36 +452,40 @@ def _make_package( "import-dirs:" + ", ".join(import_dirs), "library-dirs:" + ", ".join(library_dirs), "extra-libraries: " + libname, - "depends: " + ", ".join(uniq_hlis), + "depends: " + ", ".join([lib.id for lib in hlis]), ] pkg_conf = ctx.actions.write("pkg-" + artifact_suffix + ".conf", conf) db = ctx.actions.declare_output("db-" + artifact_suffix) - db_deps = {} - for x in uniq_hlis.values(): - db_deps[repr(x.db)] = x.db + # While the list of hlis is unique, there may be multiple packages in the same db. + # Cutting down the GHC_PACKAGE_PATH significantly speeds up GHC. + db_deps = {x.db: None for x in hlis}.keys() # So that ghc-pkg can find the DBs for the dependencies. We might # be able to use flags for this instead, but this works. ghc_package_path = cmd_args( - db_deps.values(), + db_deps, delimiter = ":", ) haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] ctx.actions.run( - cmd_args([ - "sh", - "-c", - _REGISTER_PACKAGE, - "", - haskell_toolchain.packager, - db.as_output(), - pkg_conf, - ]).hidden(hi.values()).hidden(lib.values()), # needs hi, because ghc-pkg checks that the .hi files exist + cmd_args( + [ + "sh", + "-c", + _REGISTER_PACKAGE, + "", + haskell_toolchain.packager, + db.as_output(), + pkg_conf, + ], + # needs hi, because ghc-pkg checks that the .hi files exist + hidden = hi.values() + lib.values(), + ), category = "haskell_package_" + artifact_suffix.replace("-", "_"), - env = {"GHC_PACKAGE_PATH": ghc_package_path}, + env = {"GHC_PACKAGE_PATH": ghc_package_path} if db_deps else {}, ) return db @@ -809,8 +498,23 @@ HaskellLibBuildOutput = record( libs = list[Artifact], ) +def _get_haskell_shared_library_name_linker_flags( + linker_type: LinkerType, + soname: str) -> list[str]: + if linker_type == LinkerType("gnu"): + return ["-Wl,-soname,{}".format(soname)] + elif linker_type == LinkerType("darwin"): + # Passing `-install_name @rpath/...` or + # `-Xlinker -install_name -Xlinker @rpath/...` instead causes + # ghc-9.6.3: panic! (the 'impossible' happened) + return ["-Wl,-install_name,@rpath/{}".format(soname)] + else: + fail("Unknown linker type '{}'.".format(linker_type)) + def _build_haskell_lib( ctx, + libname: str, + pkgname: str, hlis: list[HaskellLinkInfo], # haskell link infos from all deps nlis: list[MergedLinkInfo], # native link infos from all deps link_style: LinkStyle, @@ -819,20 +523,18 @@ def _build_haskell_lib( # profiling, so it should be passed when `enable_profiling` is True. non_profiling_hlib: [HaskellLibBuildOutput, None] = None) -> HaskellLibBuildOutput: linker_info = ctx.attrs._cxx_toolchain[CxxToolchainInfo].linker_info - libname = repr(ctx.label.path).replace("//", "_").replace("/", "_") + "_" + ctx.label.name - pkgname = libname.replace("_", "-") # Link the objects into a library haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] - osuf, _hisuf = _output_extensions(link_style, enable_profiling) + osuf, _hisuf = output_extensions(link_style, enable_profiling) # Compile the sources - compiled = _compile( + compiled = compile( ctx, link_style, enable_profiling = enable_profiling, - extra_args = ["-this-unit-id", pkgname], + pkgname = pkgname, ) solibs = {} artifact_suffix = get_artifact_suffix(link_style, enable_profiling) @@ -841,34 +543,38 @@ def _build_haskell_lib( if link_style == LinkStyle("static_pic"): libstem += "_pic" + dynamic_lib_suffix = "." + LINKERS[linker_info.type].default_shared_library_extension static_lib_suffix = "_p.a" if enable_profiling else ".a" - libfile = "lib" + libstem + (".so" if link_style == LinkStyle("shared") else static_lib_suffix) + libfile = "lib" + libstem + (dynamic_lib_suffix if link_style == LinkStyle("shared") else static_lib_suffix) lib_short_path = paths.join("lib-{}".format(artifact_suffix), libfile) linfos = [x.prof_info if enable_profiling else x.info for x in hlis] - uniq_infos = dedupe(flatten([x[link_style] for x in linfos])) + + # only gather direct dependencies + uniq_infos = [x[link_style].value for x in linfos] objfiles = _srcs_to_objfiles(ctx, compiled.objects, osuf) if link_style == LinkStyle("shared"): lib = ctx.actions.declare_output(lib_short_path) - link = cmd_args(haskell_toolchain.linker) - link.add(haskell_toolchain.linker_flags) - link.add(ctx.attrs.linker_flags) - link.add("-o", lib.as_output()) - link.add( - "-shared", - "-dynamic", - "-optl", - "-Wl,-soname", - "-optl", - "-Wl," + libfile, + link = cmd_args( + [haskell_toolchain.linker] + + [haskell_toolchain.linker_flags] + + [ctx.attrs.linker_flags] + + ["-o", lib.as_output()] + + [ + get_shared_library_flags(linker_info.type), + "-dynamic", + cmd_args( + _get_haskell_shared_library_name_linker_flags(linker_info.type, libfile), + prepend = "-optl", + ), + ] + + [objfiles], + hidden = compiled.stubs, ) - link.add(objfiles) - link.hidden(compiled.stubs) - infos = get_link_args_for_strategy( ctx, nlis, @@ -889,7 +595,7 @@ def _build_haskell_lib( else: # static flavours # TODO: avoid making an archive for a single object, like cxx does # (but would that work with Template Haskell?) - archive = make_archive(ctx, lib_short_path, [compiled.objects], objfiles) + archive = make_archive(ctx, lib_short_path, objfiles) lib = archive.artifact libs = [lib] + archive.external_objects link_infos = LinkInfos( @@ -965,27 +671,10 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: preferred_linkage = Linkage("static") # Get haskell and native link infos from all deps - hlis = [] - nlis = [] - prof_nlis = [] - shared_library_infos = [] - for lib in attr_deps(ctx): - li = lib.get(HaskellLinkInfo) - if li != None: - hlis.append(li) - li = lib.get(MergedLinkInfo) - if li != None: - nlis.append(li) - if HaskellLinkInfo not in lib: - # MergedLinkInfo from non-haskell deps should be part of the - # profiling MergedLinkInfo - prof_nlis.append(li) - li = lib.get(HaskellProfLinkInfo) - if li != None: - prof_nlis.append(li.prof_infos) - li = lib.get(SharedLibraryInfo) - if li != None: - shared_library_infos.append(li) + hlis = attr_deps_haskell_link_infos_sans_template_deps(ctx) + nlis = attr_deps_merged_link_infos(ctx) + prof_nlis = attr_deps_profiling_link_infos(ctx) + shared_library_infos = attr_deps_shared_library_infos(ctx) solibs = {} link_infos = {} @@ -997,6 +686,9 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: indexing_tsets = {} sub_targets = {} + libname = repr(ctx.label.path).replace("//", "_").replace("/", "_") + "_" + ctx.label.name + pkgname = libname.replace("_", "-") + # The non-profiling library is also needed to build the package with # profiling enabled, so we need to keep track of it for each link style. non_profiling_hlib = {} @@ -1009,6 +701,8 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: hlib_build_out = _build_haskell_lib( ctx, + libname, + pkgname, hlis = hlis, nlis = nlis, link_style = link_style, @@ -1025,11 +719,19 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: if enable_profiling: prof_hlib_infos[link_style] = hlib - prof_hlink_infos[link_style] = [hlib] + prof_hlink_infos[link_style] = ctx.actions.tset( + HaskellLibraryInfoTSet, + value = hlib, + children = [li.prof_info[link_style] for li in hlis], + ) prof_link_infos[link_style] = hlib_build_out.link_infos else: hlib_infos[link_style] = hlib - hlink_infos[link_style] = [hlib] + hlink_infos[link_style] = ctx.actions.tset( + HaskellLibraryInfoTSet, + value = hlib, + children = [li.info[link_style] for li in hlis], + ) link_infos[link_style] = hlib_build_out.link_infos # Build the indices and create subtargets only once, with profiling @@ -1050,12 +752,13 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: )] pic_behavior = ctx.attrs._cxx_toolchain[CxxToolchainInfo].pic_behavior - link_style = _cxx_toolchain_link_style(ctx) + link_style = cxx_toolchain_link_style(ctx) output_style = get_lib_output_style( to_link_strategy(link_style), preferred_linkage, pic_behavior, ) + shared_libs = create_shared_libraries(ctx, solibs) # TODO(cjhopman): this haskell implementation does not consistently handle LibOutputStyle # and LinkStrategy as expected and it's hard to tell what the intent of the existing code is @@ -1071,12 +774,14 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: # and `link_infos` if the target doesn't force static linking. prof_link_infos[LinkStyle("shared")] = link_infos[LinkStyle("shared")] + default_link_infos = prof_link_infos if ctx.attrs.enable_profiling else link_infos + default_native_infos = prof_nlis if ctx.attrs.enable_profiling else nlis merged_link_info = create_merged_link_info( ctx, pic_behavior = pic_behavior, - link_infos = {_to_lib_output_style(s): v for s, v in link_infos.items()}, + link_infos = {_to_lib_output_style(s): v for s, v in default_link_infos.items()}, preferred_linkage = preferred_linkage, - exported_deps = nlis, + exported_deps = default_native_infos, ) prof_merged_link_info = create_merged_link_info( @@ -1096,7 +801,7 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: preferred_linkage = preferred_linkage, exported_deps = ctx.attrs.deps, link_infos = {_to_lib_output_style(s): v for s, v in link_infos.items()}, - shared_libs = solibs, + shared_libs = shared_libs, # TODO(cjhopman): this should be set to non-None default_soname = None, ), @@ -1129,10 +834,10 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: lib = hlib_infos, prof_lib = prof_hlib_infos, ), - merge_haskell_link_infos(hlis + [HaskellLinkInfo( + HaskellLinkInfo( info = hlink_infos, prof_info = prof_hlink_infos, - )]), + ), merged_link_info, HaskellProfLinkInfo( prof_infos = prof_merged_link_info, @@ -1141,9 +846,10 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: cxx_merge_cpreprocessors(ctx, pp, inherited_pp_info), merge_shared_libraries( ctx.actions, - create_shared_libraries(ctx, solibs), + shared_libs, shared_library_infos, ), + haskell_haddock_lib(ctx, pkgname), ] if indexing_tsets: @@ -1185,7 +891,7 @@ def haskell_library_impl(ctx: AnalysisContext) -> list[Provider]: def derive_indexing_tset( actions: AnalysisActions, link_style: LinkStyle, - value: [Artifact, None], + value: Artifact | None, children: list[Dependency]) -> HaskellIndexingTSet: index_children = [] for dep in children: @@ -1205,7 +911,7 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: # Decide what kind of linking we're doing - link_style = _attr_link_style(ctx) + link_style = attr_link_style(ctx) # Link Groups link_group_info = get_link_group_info(ctx, filter_and_map_idx(LinkableGraph, attr_deps(ctx))) @@ -1214,7 +920,7 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: if enable_profiling and link_style == LinkStyle("shared"): link_style = LinkStyle("static") - compiled = _compile( + compiled = compile( ctx, link_style, enable_profiling = enable_profiling, @@ -1223,17 +929,20 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] output = ctx.actions.declare_output(ctx.attrs.name) - link = cmd_args(haskell_toolchain.compiler) - link.add("-o", output.as_output()) - link.add(haskell_toolchain.linker_flags) - link.add(ctx.attrs.linker_flags) + link = cmd_args( + [haskell_toolchain.compiler] + + ["-o", output.as_output()] + + [haskell_toolchain.linker_flags] + + [ctx.attrs.linker_flags], + hidden = compiled.stubs, + ) - link.hidden(compiled.stubs) + link_args = cmd_args() - osuf, _hisuf = _output_extensions(link_style, enable_profiling) + osuf, _hisuf = output_extensions(link_style, enable_profiling) objfiles = _srcs_to_objfiles(ctx, compiled.objects, osuf) - link.add(objfiles) + link_args.add(objfiles) indexing_tsets = {} if compiled.producing_indices: @@ -1250,8 +959,9 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: deps = slis, ) - sos = {} + sos = [] + link_strategy = to_link_strategy(link_style) if link_group_info != None: own_binary_link_flags = [] auto_link_groups = {} @@ -1264,14 +974,23 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: # in the prelude, the link group map will give us the link group libs. # Otherwise, pull them from the `LinkGroupLibInfo` provider from out deps. auto_link_group_specs = get_auto_link_group_specs(ctx, link_group_info) + executable_deps = [d.linkable_graph.nodes.value.label for d in link_deps if d.linkable_graph != None] + public_nodes = get_public_link_group_nodes( + linkable_graph_node_map, + link_group_info.mappings, + executable_deps, + None, + ) if auto_link_group_specs != None: linked_link_groups = create_link_groups( ctx = ctx, + link_strategy = link_strategy, link_group_mappings = link_group_info.mappings, link_group_preferred_linkage = link_group_preferred_linkage, - executable_deps = [d.linkable_graph.nodes.value.label for d in link_deps if d.linkable_graph != None], + executable_deps = executable_deps, link_group_specs = auto_link_group_specs, linkable_graph_node_map = linkable_graph_node_map, + public_nodes = public_nodes, ) for name, linked_link_group in linked_link_groups.libs.items(): auto_link_groups[name] = linked_link_group.artifact @@ -1290,15 +1009,11 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: link_group_relevant_roots = find_relevant_roots( linkable_graph_node_map = linkable_graph_node_map, link_group_mappings = link_group_info.mappings, - roots = [ - mapping.root - for group in link_group_info.groups.values() - for mapping in group.mappings - if mapping.root != None - ], + roots = get_dedupped_roots_from_groups(link_group_info.groups.values()), ) - labels_to_links_map = get_filtered_labels_to_links_map( + labels_to_links = get_filtered_labels_to_links_map( + public_nodes = public_nodes, linkable_graph_node_map = linkable_graph_node_map, link_group = None, link_groups = link_group_info.groups, @@ -1308,7 +1023,7 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: name: (lib.label, lib.shared_link_infos) for name, lib in link_group_libs.items() }, - link_strategy = to_link_strategy(link_style), + link_strategy = link_strategy, roots = ( [ d.linkable_graph.nodes.value.label @@ -1340,25 +1055,26 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: pre_flags = own_binary_link_flags, ), ) - link_infos.extend(get_filtered_links(labels_to_links_map, set(public_nodes))) + link_infos.extend(get_filtered_links(labels_to_links.map, set(public_nodes))) infos = LinkArgs(infos = link_infos) link_group_ctx = LinkGroupContext( link_group_mappings = link_group_info.mappings, link_group_libs = link_group_libs, link_group_preferred_linkage = link_group_preferred_linkage, - labels_to_links_map = labels_to_links_map, + labels_to_links_map = labels_to_links.map, + targets_consumed_by_link_groups = {}, ) - for name, shared_lib in traverse_shared_library_info(shlib_info).items(): + for shared_lib in traverse_shared_library_info(shlib_info): label = shared_lib.label if is_link_group_shlib(label, link_group_ctx): - sos[name] = shared_lib.lib + sos.append(shared_lib) # When there are no matches for a pattern based link group, # `link_group_mappings` will not have an entry associated with the lib. for _name, link_group_lib in link_group_libs.items(): - sos.update(link_group_lib.shared_libs) + sos.extend(link_group_lib.shared_libs.libraries) else: nlis = [] @@ -1371,21 +1087,32 @@ def haskell_binary_impl(ctx: AnalysisContext) -> list[Provider]: li = lib.get(MergedLinkInfo) if li != None: nlis.append(li) - for name, shared_lib in traverse_shared_library_info(shlib_info).items(): - sos[name] = shared_lib.lib + sos.extend(traverse_shared_library_info(shlib_info)) infos = get_link_args_for_strategy(ctx, nlis, to_link_strategy(link_style)) - link.add(cmd_args(unpack_link_args(infos), prepend = "-optl")) + link_args.add(cmd_args(unpack_link_args(infos), prepend = "-optl")) + link.add(at_argfile( + actions = ctx.actions, + name = "args.haskell_link_argsfile", + args = link_args, + allow_args = True, + )) ctx.actions.run(link, category = "haskell_link") - run = cmd_args(output) - if link_style == LinkStyle("shared") or link_group_info != None: sos_dir = "__{}__shared_libs_symlink_tree".format(ctx.attrs.name) - link.add("-optl", "-Wl,-rpath", "-optl", "-Wl,$ORIGIN/{}".format(sos_dir)) - symlink_dir = ctx.actions.symlinked_dir(sos_dir, {n: o.output for n, o in sos.items()}) - run.hidden(symlink_dir) + rpath_ref = get_rpath_origin(get_cxx_toolchain_info(ctx).linker_info.type) + rpath_ldflag = "-Wl,{}/{}".format(rpath_ref, sos_dir) + link.add("-optl", "-Wl,-rpath", "-optl", rpath_ldflag) + symlink_dir = create_shlib_symlink_tree( + actions = ctx.actions, + out = sos_dir, + shared_libs = sos, + ) + run = cmd_args(output, hidden = symlink_dir) + else: + run = cmd_args(output) providers = [ DefaultInfo(default_output = output), diff --git a/prelude/haskell/haskell_ghci.bzl b/prelude/haskell/haskell_ghci.bzl index 93f76677ccc26..3ad2d24adda01 100644 --- a/prelude/haskell/haskell_ghci.bzl +++ b/prelude/haskell/haskell_ghci.bzl @@ -17,28 +17,33 @@ load( "link_options", ) load( - "@prelude//haskell:haskell.bzl", + "@prelude//haskell:compile.bzl", + "PackagesInfo", + "get_packages_info", +) +load( + "@prelude//haskell:library_info.bzl", "HaskellLibraryInfo", "HaskellLibraryProvider", +) +load( + "@prelude//haskell:toolchain.bzl", "HaskellToolchainInfo", - "PackagesInfo", - "attr_deps", - "get_artifact_suffix", - "get_packages_info", ) +load("@prelude//haskell:util.bzl", "attr_deps", "get_artifact_suffix") load("@prelude//linking:execution_preference.bzl", "LinkExecutionPreference") load( "@prelude//linking:link_info.bzl", "LinkArgs", "LinkInfo", "LinkStyle", - "Linkage", "get_lib_output_style", "set_linkable_link_whole", "to_link_strategy", ) load( "@prelude//linking:linkable_graph.bzl", + "LinkableGraph", "LinkableRootInfo", "create_linkable_graph", "get_deps_for_link", @@ -47,12 +52,15 @@ load( load( "@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", + "create_shlib_symlink_tree", "traverse_shared_library_info", + "with_unique_str_sonames", ) +load("@prelude//linking:types.bzl", "Linkage") load( "@prelude//utils:graph_utils.bzl", - "breadth_first_traversal", - "breadth_first_traversal_by", + "depth_first_traversal", + "depth_first_traversal_by", ) load("@prelude//utils:utils.bzl", "flatten") @@ -172,11 +180,15 @@ def _build_haskell_omnibus_so(ctx: AnalysisContext) -> HaskellOmnibusData: for nlabel, n in graph_nodes.items() } - all_direct_deps = [dep.label for dep in all_deps] + all_direct_deps = [] + for dep in all_deps: + graph = dep.get(LinkableGraph) + if graph: + all_direct_deps.append(graph.label) dep_graph[ctx.label] = all_direct_deps # Need to exclude all transitive deps of excluded deps - all_nodes_to_exclude = breadth_first_traversal( + all_nodes_to_exclude = depth_first_traversal( dep_graph, [dep.label for dep in preload_deps], ) @@ -221,7 +233,7 @@ def _build_haskell_omnibus_so(ctx: AnalysisContext) -> HaskellOmnibusData: # This is not the final set of body nodes, because it still includes # nodes that don't support omnibus (e.g. haskell_library nodes) - breadth_first_traversal_by( + depth_first_traversal_by( dep_graph, [ctx.label], find_deps_for_body, @@ -264,7 +276,7 @@ def _build_haskell_omnibus_so(ctx: AnalysisContext) -> HaskellOmnibusData: # Handle third-party dependencies of the omnibus SO tp_deps_shared_link_infos = {} - so_symlinks = {} + prebuilt_shlibs = [] for node_label in prebuilt_so_deps.keys(): node = graph_nodes[node_label] @@ -278,14 +290,14 @@ def _build_haskell_omnibus_so(ctx: AnalysisContext) -> HaskellOmnibusData: shared_li = node.link_infos.get(output_style, None) if shared_li != None: tp_deps_shared_link_infos[node_label] = shared_li.default - for libname, linkObject in node.shared_libs.items(): - so_symlinks[libname] = linkObject.output + prebuilt_shlibs.extend(node.shared_libs.libraries) # Create symlinks to the TP dependencies' SOs so_symlinks_root_path = ctx.label.name + ".so-symlinks" - so_symlinks_root = ctx.actions.symlinked_dir( - so_symlinks_root_path, - so_symlinks, + so_symlinks_root = create_shlib_symlink_tree( + actions = ctx.actions, + out = so_symlinks_root_path, + shared_libs = prebuilt_shlibs, ) linker_info = get_cxx_toolchain_info(ctx).linker_info @@ -323,10 +335,10 @@ def _replace_macros_in_script_template( script_template: Artifact, haskell_toolchain: HaskellToolchainInfo, # Optional artifacts - ghci_bin: [Artifact, None] = None, - start_ghci: [Artifact, None] = None, - iserv_script: [Artifact, None] = None, - squashed_so: [Artifact, None] = None, + ghci_bin: Artifact | None = None, + start_ghci: Artifact | None = None, + iserv_script: Artifact | None = None, + squashed_so: Artifact | None = None, # Optional cmd_args exposed_package_args: [cmd_args, None] = None, packagedb_args: [cmd_args, None] = None, @@ -335,16 +347,16 @@ def _replace_macros_in_script_template( # Optional string args srcs: [str, None] = None, output_name: [str, None] = None, - ghci_iserv_path: [str, None] = None, + ghci_iserv_path: [Artifact, None] = None, preload_libs: [str, None] = None) -> Artifact: toolchain_paths = { BINUTILS_PATH: haskell_toolchain.ghci_binutils_path, - GHCI_LIB_PATH: haskell_toolchain.ghci_lib_path, + GHCI_LIB_PATH: haskell_toolchain.ghci_lib_path.get(DefaultInfo).default_outputs[0], CC_PATH: haskell_toolchain.ghci_cc_path, CPP_PATH: haskell_toolchain.ghci_cpp_path, CXX_PATH: haskell_toolchain.ghci_cxx_path, - GHCI_PACKAGER: haskell_toolchain.ghci_packager, - GHCI_GHC_PATH: haskell_toolchain.ghci_ghc_path, + GHCI_PACKAGER: haskell_toolchain.ghci_packager.get(DefaultInfo).default_outputs[0], + GHCI_GHC_PATH: haskell_toolchain.ghci_ghc_path.get(DefaultInfo).default_outputs[0], } if ghci_bin != None: @@ -358,7 +370,7 @@ def _replace_macros_in_script_template( replace_cmd = cmd_args(script_template_processor) replace_cmd.add(cmd_args(script_template, format = "--script_template={}")) for name, path in toolchain_paths.items(): - replace_cmd.add(cmd_args("--{}={}".format(name, path))) + replace_cmd.add(cmd_args(path, format = "--{}={{}}".format(name))) replace_cmd.add(cmd_args( final_script.as_output(), @@ -455,7 +467,7 @@ def _write_iserv_script( script_template = ghci_iserv_template, output_name = iserv_script_name, haskell_toolchain = haskell_toolchain, - ghci_iserv_path = ghci_iserv_path, + ghci_iserv_path = ghci_iserv_path.get(DefaultInfo).default_outputs[0], preload_libs = preload_libs, ) return iserv_script @@ -472,10 +484,10 @@ def _build_preload_deps_root( if SharedLibraryInfo in preload_dep: slib_info = preload_dep[SharedLibraryInfo] - shlib = traverse_shared_library_info(slib_info).items() + shlib = traverse_shared_library_info(slib_info) - for shlib_name, shared_lib in shlib: - preload_symlinks[shlib_name] = shared_lib.lib.output + for soname, shared_lib in with_unique_str_sonames(shlib).items(): + preload_symlinks[soname] = shared_lib.lib.output # TODO(T150785851): build or get SO for direct preload_deps # TODO(T150785851): find out why the only SOs missing are the ones from @@ -520,11 +532,10 @@ def _build_preload_deps_root( ) # Symlink the ghci binary that will be used, e.g. the internal fork in Haxlsh -def _symlink_ghci_binary(ctx, ghci_bin: Artifact): - # TODO(T155760998): set ghci_ghc_path as a dependency instead of string +def _symlink_ghci_binary(ctx, haskell_toolchain: HaskellToolchainInfo, ghci_bin: Artifact): ghci_bin_dep = ctx.attrs.ghci_bin_dep if not ghci_bin_dep: - fail("GHC binary path not specified") + ghci_bin_dep = haskell_toolchain.ghci_ghc_path # NOTE: In the buck1 version we'd symlink the binary only if a custom one # was provided, but in buck2 we're always setting `ghci_bin_dep` (i.e. @@ -590,15 +601,15 @@ def _write_start_ghci( ctx.actions.copy_file(script_file, header_ghci) def haskell_ghci_impl(ctx: AnalysisContext) -> list[Provider]: + haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] enable_profiling = ctx.attrs.enable_profiling start_ghci_file = ctx.actions.declare_output("start.ghci") _write_start_ghci(ctx, start_ghci_file, enable_profiling) ghci_bin = ctx.actions.declare_output(ctx.attrs.name + ".bin/ghci") - _symlink_ghci_binary(ctx, ghci_bin) + _symlink_ghci_binary(ctx, haskell_toolchain, ghci_bin) - haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] preload_deps_info = _build_preload_deps_root(ctx, haskell_toolchain) ghci_script_template = haskell_toolchain.ghci_script_template @@ -628,11 +639,11 @@ def haskell_ghci_impl(ctx: AnalysisContext) -> list[Provider]: package_symlinks_root = ctx.label.name + ".packages" packagedb_args = cmd_args(delimiter = " ") - prebuilt_packagedb_args = cmd_args(delimiter = " ") + prebuilt_packagedb_args_set = {} - for lib in packages_info.transitive_deps: + for lib in packages_info.transitive_deps.traverse(): if lib.is_prebuilt: - prebuilt_packagedb_args.add(lib.db) + prebuilt_packagedb_args_set[lib.db] = None else: lib_symlinks_root = paths.join( package_symlinks_root, @@ -662,6 +673,7 @@ def haskell_ghci_impl(ctx: AnalysisContext) -> list[Provider]: "packagedb", ), ) + prebuilt_packagedb_args = cmd_args(prebuilt_packagedb_args_set.keys(), delimiter = " ") script_templates = [] for script_template in ctx.attrs.extra_script_templates: @@ -712,7 +724,7 @@ def haskell_ghci_impl(ctx: AnalysisContext) -> list[Provider]: "__{}__".format(ctx.label.name), output_artifacts, ) - run = cmd_args(final_ghci_script).hidden(outputs) + run = cmd_args(final_ghci_script, hidden = outputs) return [ DefaultInfo(default_outputs = [root_output_dir]), diff --git a/prelude/haskell/haskell_haddock.bzl b/prelude/haskell/haskell_haddock.bzl index d30b0c56cd728..4154e3aba4f83 100644 --- a/prelude/haskell/haskell_haddock.bzl +++ b/prelude/haskell/haskell_haddock.bzl @@ -5,5 +5,156 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -def haskell_haddock_impl(_ctx: AnalysisContext) -> list[Provider]: - return [DefaultInfo()] +load("@prelude//haskell:compile.bzl", "compile_args") +load("@prelude//haskell:link_info.bzl", "cxx_toolchain_link_style") +load( + "@prelude//haskell:toolchain.bzl", + "HaskellToolchainInfo", +) +load( + "@prelude//haskell:util.bzl", + "attr_deps", +) +load("@prelude//utils:argfile.bzl", "at_argfile") + +HaskellHaddockInfo = provider( + fields = { + "html": provider_field(typing.Any, default = None), + "interface": provider_field(typing.Any, default = None), + }, +) + +def haskell_haddock_lib(ctx: AnalysisContext, pkgname: str) -> Provider: + haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] + + iface = ctx.actions.declare_output("haddock-interface") + odir = ctx.actions.declare_output("haddock-html", dir = True) + + link_style = cxx_toolchain_link_style(ctx) + args = compile_args( + ctx, + link_style, + enable_profiling = False, + suffix = "-haddock", + pkgname = pkgname, + ) + + cmd = cmd_args(haskell_toolchain.haddock) + cmd.add(cmd_args(args.args_for_cmd, format = "--optghc={}")) + cmd.add( + "--use-index", + "doc-index.html", + "--use-contents", + "index.html", + "--html", + "--hoogle", + "--no-tmp-comp-dir", + "--no-warnings", + "--dump-interface", + iface.as_output(), + "--odir", + odir.as_output(), + "--package-name", + pkgname, + ) + + for lib in attr_deps(ctx): + hi = lib.get(HaskellHaddockInfo) + if hi != None: + cmd.add("--read-interface", hi.interface) + + cmd.add(ctx.attrs.haddock_flags) + + source_entity = read_root_config("haskell", "haddock_source_entity", None) + if source_entity: + cmd.add("--source-entity", source_entity) + + if args.args_for_file: + if haskell_toolchain.use_argsfile: + ghcargs = cmd_args(args.args_for_file, format = "--optghc={}") + cmd.add(at_argfile( + actions = ctx.actions, + name = "args.haskell_haddock_argsfile", + args = [ghcargs, args.srcs], + allow_args = True, + )) + else: + cmd.add(args.args_for_file) + + # Buck2 requires that the output artifacts are always produced, but Haddock only + # creates them if it needs to, so we need a wrapper script to mkdir the outputs. + script = ctx.actions.declare_output("haddock-script") + script_args = cmd_args([ + "mkdir", + "-p", + args.result.objects.as_output(), + args.result.hi.as_output(), + args.result.stubs.as_output(), + "&&", + cmd_args(cmd, quote = "shell"), + ], delimiter = " ") + ctx.actions.write( + script, + cmd_args("#!/bin/sh", script_args), + is_executable = True, + allow_args = True, + ) + + ctx.actions.run( + cmd_args(script, hidden = cmd), + category = "haskell_haddock", + no_outputs_cleanup = True, + ) + + return HaskellHaddockInfo(interface = iface, html = odir) + +def haskell_haddock_impl(ctx: AnalysisContext) -> list[Provider]: + haskell_toolchain = ctx.attrs._haskell_toolchain[HaskellToolchainInfo] + + out = ctx.actions.declare_output("haddock-html", dir = True) + + cmd = cmd_args(haskell_toolchain.haddock) + + cmd.add( + "--gen-index", + "--gen-contents", + "-o", + out.as_output(), + ) + + dep_htmls = [] + for lib in attr_deps(ctx): + hi = lib.get(HaskellHaddockInfo) + if hi != None: + cmd.add("--read-interface", hi.interface) + dep_htmls.append(hi.html) + + cmd.add(ctx.attrs.haddock_flags) + + script = ctx.actions.declare_output("haddock-script") + script_args = cmd_args([ + "#!/bin/sh", + "set -ueo pipefail", + cmd_args(cmd, delimiter = " ", quote = "shell"), + ]) + for dir in dep_htmls: + script_args.add( + cmd_args( + ["cp", "-Rf", "--reflink=auto", cmd_args(dir, format = "{}/*"), out.as_output()], + delimiter = " ", + ), + ) + ctx.actions.write( + script, + script_args, + is_executable = True, + allow_args = True, + ) + + ctx.actions.run( + cmd_args(script, hidden = script_args), + category = "haskell_haddock", + no_outputs_cleanup = True, + ) + + return [DefaultInfo(default_outputs = [out])] diff --git a/prelude/haskell/ide/README.md b/prelude/haskell/ide/README.md new file mode 100644 index 0000000000000..4e58eed4f0a96 --- /dev/null +++ b/prelude/haskell/ide/README.md @@ -0,0 +1,14 @@ +# Haskell Language Server integration + +This integration allows loading `haskell_binary` and `haskell_library` targets +on Haskell Language Server. This is accomplished via a BXL script that is used +to drive a hie-bios "bios" cradle. + +# Usage + +To print the list of GHC flags and targets for a Haskell source file: + +buck2 bxl prelude//haskell/ide/ide.bxl -- --bios true --file + + +To integrate with hie_bios, copy `hie.yaml` to your repo root diff --git a/prelude/haskell/ide/hie.yaml b/prelude/haskell/ide/hie.yaml new file mode 100644 index 0000000000000..9782810ab2b06 --- /dev/null +++ b/prelude/haskell/ide/hie.yaml @@ -0,0 +1,3 @@ +cradle: + bios: + shell: buck2 bxl prelude//haskell/ide/ide.bxl --bios true --file $HIE_BIOS_ARG > $HIE_BIOS_OUTPUT diff --git a/prelude/haskell/ide/ide.bxl b/prelude/haskell/ide/ide.bxl new file mode 100644 index 0000000000000..754c703305e8e --- /dev/null +++ b/prelude/haskell/ide/ide.bxl @@ -0,0 +1,316 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//haskell:library_info.bzl", "HaskellLibraryProvider") +load("@prelude//haskell:link_info.bzl", "HaskellLinkInfo") +load("@prelude//haskell:toolchain.bzl", "HaskellToolchainInfo") +load("@prelude//linking:link_info.bzl", "LinkStyle") +load("@prelude//paths.bzl", "paths") + +# This script computes a solution for loading a Haskell project in VSCode +# The solution includes: +# - flags (and materializing any artifacts used by the flags) +# - source files +# - external dependencies whose changes will cause a reload (e.g. TARGETS files) + +# There are only 3 rule types that we need to support +# - haskell_binary +# - haskell_library +# - haskell_ide (aka "projects") + +# The input to the script is usually a source file, although it can also be a target +# Solving an input involves: +# 1. Finding its owner target, if the input is a file +# 2. Finding the target's "project", which involves a rdeps search +# 3. Computing the project solution (flags, sources and dependencies) +# 4. Outputting the solution as JSON + +_HASKELL_BIN = "prelude//rules.bzl:haskell_binary" +_HASKELL_IDE = "prelude//rules.bzl:haskell_ide" +_HASKELL_LIB = "prelude//rules.bzl:haskell_library" + +linkStyle = LinkStyle("static") + +configuration_modifiers = ["ovr_config//third-party/ghc/constraints:8.8.3"] + +def _impl_target(ctx): + target = ctx.cli_args.target + project_universe = ctx.cli_args.project_universe + res = _find_project_and_solve(ctx, target, project_universe) + solution = _assembleSolution(ctx, linkStyle, res) + _print_solution(ctx, solution) + +def _print_solution(ctx, solution): + if not ctx.cli_args.bios: + ctx.output.print_json(solution) + else: + for flag in solution["flags"]: + ctx.output.print(flag) + for src in solution["sources"]: + ctx.output.print(src) + +def _impl_file(ctx): + project_universe = ctx.cli_args.project_universe + res = _solution_for_file(ctx, ctx.cli_args.file, project_universe) + solution = _assembleSolution(ctx, linkStyle, res) + _print_solution(ctx, solution) + +def _solution_for_file(ctx, file, project_universe): + unconfigured_owners = ctx.uquery().owner(file) + target_universe = ctx.target_universe(unconfigured_owners).target_set() + owners = ctx.cquery().owner(file, target_universe) + if not owners or len(owners) == 0: + return { + "external_dependencies": [], + "flags": [], + "generated_dependencies": [], + "haskell_deps": {}, + "import_dirs": [], + "owner": "No owner found for " + file, + "project": "", + "project_type": "", + "sources": [], + "targets": [], + } + + owner = owners[0] + + result = _find_project_and_solve(ctx, owner, project_universe) + result["owner"] = owner.label.raw_target() + return result + +def _find_project_and_solve(ctx, target, project_universe = []): + prefix = target.label.package.split("/", 1)[0] + local_universe = _find_project_universe(ctx, target.label.cell, prefix) + if local_universe: + project_universe.extend(local_universe) + project = _find_target_in_universe(ctx, target, dedupe(project_universe)) + + result = _solution_for_target(ctx, project) + result["project"] = project.label.raw_target() + result["project_type"] = project.rule_type + return result + +def _find_target_in_universe(ctx, target, project_universe): + for p in project_universe: + cfg_p = ctx.configured_targets( + p, + modifiers = configuration_modifiers, + ) + members = cfg_p.resolved_attrs_eager(ctx).include_projects + for member in members: + if target.label.raw_target() == member.label.raw_target(): + return cfg_p + + return target + +def _find_project_universe(ctx, cell, prefix): + return ctx.uquery().eval("kind(haskell_ide, %s//%s/...)" % (cell, prefix)) + +def _solution_for_target(ctx, target, exclude = {}): + result = None + if target.rule_type == _HASKELL_LIB: + result = _solution_for_haskell_lib(ctx, target, exclude) + elif target.rule_type == _HASKELL_BIN: + result = _solution_for_haskell_bin(ctx, target, exclude) + elif target.rule_type == _HASKELL_IDE: + result = _solution_for_haskell_ide(ctx, target) + + if result == None: + return {"error": "Cannot handle rule type " + target.rule_type} + return result + +def _solution_for_haskell_ide(ctx, target): + resolved_attrs = target.resolved_attrs_eager(ctx) + results = [] + deps = {} + for dep in resolved_attrs.deps_query: + t = ctx.configured_targets( + dep.label.raw_target(), + modifiers = configuration_modifiers, + ) + if (t.rule_type == _HASKELL_LIB or t.rule_type == _HASKELL_BIN): + deps[dep.label] = t + for lib in deps.values(): + results.append(_solution_for_target(ctx, lib, deps)) + + final = merge(results) + final["targets"].extend(targetsForTarget(ctx, target)) + return final + +def _solution_for_haskell_bin(ctx, target, exclude): + return _solution_for_haskell_lib(ctx, target, exclude) + +def _solution_for_haskell_lib(ctx, target, exclude): + resolved_attrs = target.resolved_attrs_eager(ctx) + hli = ctx.analysis(target).providers().get(HaskellLibraryProvider) + + haskellLibs = {} + for dep in resolved_attrs.deps + resolved_attrs.template_deps: + if exclude.get(dep.label) == None: + providers = ctx.analysis(dep.label).providers() + lb = providers.get(HaskellLinkInfo) + if lb != None: + haskellLibs[dep.label] = lb + + sources = [] + for item in ctx.output.ensure_multiple(resolved_attrs.srcs.values()): + sources.append(item.abs_path()) + + import_dirs = {} + root = ctx.root() + for key, item in resolved_attrs.srcs.items(): + # because BXL won't give you the path of an ensured artifact + sp = get_path_without_materialization(item, ctx) + (_, ext) = paths.split_extension(sp) + diff = sp.removesuffix(paths.replace_extension(key, ext)) + import_dirs["%s/%s" % (root, diff)] = () + + haskell_toolchain = ctx.analysis(resolved_attrs._haskell_toolchain.label) + toolchain = haskell_toolchain.providers().get(HaskellToolchainInfo) + + binutils_path = paths.join(root, toolchain.ghci_binutils_path) + cc_path = paths.join(root, toolchain.ghci_cc_path) + cxx_path = paths.join(root, toolchain.ghci_cxx_path) + cpp_path = paths.join(root, toolchain.ghci_cpp_path) + + flags = [ + "-this-unit-id", + "fbcode_fake_unit_id", + "-optP-undef", + "-optP-traditional-cpp", + "-I.", + "-no-global-package-db", + "-no-user-package-db", + "-hide-all-packages", + "-pgma%s" % cc_path, + "-pgml%s" % cxx_path, + "-pgmc%s" % cc_path, + "-pgmP%s" % cpp_path, + "-opta-B%s" % binutils_path, + "-optc-B%s" % binutils_path, + ] + flags.extend(resolved_attrs.compiler_flags) + + return { + "exclude_packages": {hli.lib.get(linkStyle).name: ()} if hli else {}, + "flags": flags, + "generated_dependencies": externalSourcesForTarget(ctx, target), + "haskell_deps": haskellLibs, + "import_dirs": import_dirs.keys(), + "sources": sources, + "targets": targetsForTarget(ctx, target), + } + +def targetsForTarget(ctx, target): + buildfile = ctx.cquery().buildfile(target) + root = ctx.root() + paths = [] + for b in buildfile: + paths.append("%s/%s" % (root, str(b).replace("//", "/"))) + return paths + +def externalSourcesForTarget(ctx, target): + deps3 = ctx.cquery().deps(target, 3) + thrifts = ctx.cquery().attrfilter("labels", "thrift_library=hs2/compile", deps3) + paths = [] + for thrift in thrifts: + paths.extend(thrift.resolved_attrs_lazy(ctx).get("srcs")) + return paths + +def merge(results): + flags = {} + sources = {} + haskellDeps = {} + import_dirs = {} + generated_dependencies = {} + targets = {} + exclude_packages = {} + + for result in results: + # TODO flags are order sensitive, so avoid the dedup dictionary trick + for flag in result["flags"]: + flags[flag] = () + for source in result["sources"]: + sources[source] = () + for p, v in result["haskell_deps"].items(): + haskellDeps[p] = v + for i in result["import_dirs"]: + import_dirs[i] = () + for dep in result["generated_dependencies"]: + generated_dependencies[dep] = () + for t in result["targets"]: + targets[t] = () + for t in result["exclude_packages"]: + exclude_packages[t] = () + + return { + "exclude_packages": exclude_packages, + "flags": flags.keys(), + "generated_dependencies": generated_dependencies.keys(), + "haskell_deps": haskellDeps, + "import_dirs": import_dirs.keys(), + "sources": sources.keys(), + "targets": targets.keys(), + } + +def _assembleSolution(ctx, linkStyle, result): + flags = result["flags"] + package_dbs = {} + for i in result["import_dirs"]: + flags.append("-i%s" % i) + hlis = {} + for provider in result["haskell_deps"].values(): + info = provider.info.get(linkStyle) + if info != None: + for item in info.traverse(): + if result["exclude_packages"].get(item.name) == None: + hlis[item.name] = item + for hli in hlis.values(): + flags.append("-package") + flags.append(hli.name) + ctx.output.ensure_multiple(hli.stub_dirs) + ctx.output.ensure_multiple(hli.libs) + ctx.output.ensure_multiple(hli.import_dirs.values()) + package_dbs[hli.db] = () + for pkgdb in ctx.output.ensure_multiple(package_dbs.keys()): + flags.append("-package-db") + flags.append(pkgdb.abs_path()) + + external_deps = result["targets"] + for s in ctx.output.ensure_multiple(result["generated_dependencies"]): + external_deps.append(s.abs_path()) + + return { + "externalDependencies": external_deps, + "flags": result["flags"], + "owner": result.get("owner"), + "owner_type": result.get("owner_type"), + "project": result.get("project"), + "project_type": result.get("project_type"), + # TODO check for duplicate module names in sources + "sources": result["sources"], + } + +_common_flags = { + "bios": cli_args.bool(False, "Output GHC flags and targets separated by newlines"), + "project_universe": cli_args.list(cli_args.target_label("list of haskell_ide targets"), []), +} + +ide_for_target = bxl_main( + impl = _impl_target, + cli_args = dict(_common_flags.items() + { + "target": cli_args.target_label(), + }.items()), +) + +ide_for_file = bxl_main( + impl = _impl_file, + cli_args = dict(_common_flags.items() + { + "file": cli_args.string("File to load in IDE"), + }.items()), +) diff --git a/prelude/haskell/library_info.bzl b/prelude/haskell/library_info.bzl new file mode 100644 index 0000000000000..3b048f1374698 --- /dev/null +++ b/prelude/haskell/library_info.bzl @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# If the target is a haskell library, the HaskellLibraryProvider +# contains its HaskellLibraryInfo. (in contrast to a HaskellLinkInfo, +# which contains the HaskellLibraryInfo for all the transitive +# dependencies). Direct dependencies are treated differently from +# indirect dependencies for the purposes of module visibility. +HaskellLibraryProvider = provider( + fields = { + "lib": provider_field(typing.Any, default = None), # dict[LinkStyle, HaskellLibraryInfo] + "prof_lib": provider_field(typing.Any, default = None), # dict[LinkStyle, HaskellLibraryInfo] + }, +) + +# A record of a Haskell library. +HaskellLibraryInfo = record( + # The library target name: e.g. "rts" + name = str, + # package config database: e.g. platform009/build/ghc/lib/package.conf.d + db = Artifact, + # e.g. "base-4.13.0.0" + id = str, + # Import dirs indexed by profiling enabled/disabled + import_dirs = dict[bool, Artifact], + stub_dirs = list[Artifact], + + # This field is only used as hidden inputs to compilation, to + # support Template Haskell which may need access to the libraries + # at compile time. The real library flags are propagated up the + # dependency graph via MergedLinkInfo. + libs = field(list[Artifact], []), + # Package version, used to specify the full package when exposing it, + # e.g. filepath-1.4.2.1, deepseq-1.4.4.0. + # Internal packages default to 1.0.0, e.g. `fbcode-dsi-logger-hs-types-1.0.0`. + version = str, + is_prebuilt = bool, + profiling_enabled = bool, +) + +HaskellLibraryInfoTSet = transitive_set() diff --git a/prelude/haskell/link_info.bzl b/prelude/haskell/link_info.bzl new file mode 100644 index 0000000000000..5cdc5f59265e5 --- /dev/null +++ b/prelude/haskell/link_info.bzl @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", +) +load( + "@prelude//haskell:library_info.bzl", + "HaskellLibraryInfoTSet", +) +load( + "@prelude//linking:link_info.bzl", + "LinkStyle", +) + +# A list of `HaskellLibraryInfo`s. +HaskellLinkInfo = provider( + # Contains a list of HaskellLibraryInfo records. + fields = { + "info": provider_field(dict[LinkStyle, HaskellLibraryInfoTSet]), + "prof_info": provider_field(dict[LinkStyle, HaskellLibraryInfoTSet]), + }, +) + +# HaskellProfLinkInfo exposes the MergedLinkInfo of a target and all of its +# dependencies built for profiling. This allows top-level targets (e.g. +# `haskell_binary`) to be defined with profiling enabled by default. +HaskellProfLinkInfo = provider( + fields = { + "prof_infos": provider_field(typing.Any, default = None), # MergedLinkInfo + }, +) + +def cxx_toolchain_link_style(ctx: AnalysisContext) -> LinkStyle: + return ctx.attrs._cxx_toolchain[CxxToolchainInfo].linker_info.link_style + +def attr_link_style(ctx: AnalysisContext) -> LinkStyle: + if ctx.attrs.link_style != None: + return LinkStyle(ctx.attrs.link_style) + else: + return cxx_toolchain_link_style(ctx) diff --git a/prelude/haskell/toolchain.bzl b/prelude/haskell/toolchain.bzl new file mode 100644 index 0000000000000..f6c072fbf51d5 --- /dev/null +++ b/prelude/haskell/toolchain.bzl @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +HaskellPlatformInfo = provider(fields = { + "name": provider_field(typing.Any, default = None), +}) + +HaskellToolchainInfo = provider( + # @unsorted-dict-items + fields = { + "compiler": provider_field(typing.Any, default = None), + "compiler_flags": provider_field(typing.Any, default = None), + "linker": provider_field(typing.Any, default = None), + "linker_flags": provider_field(typing.Any, default = None), + "haddock": provider_field(typing.Any, default = None), + "compiler_major_version": provider_field(typing.Any, default = None), + "package_name_prefix": provider_field(typing.Any, default = None), + "packager": provider_field(typing.Any, default = None), + "use_argsfile": provider_field(typing.Any, default = None), + "support_expose_package": provider_field(bool, default = False), + "archive_contents": provider_field(typing.Any, default = None), + "ghci_script_template": provider_field(typing.Any, default = None), + "ghci_iserv_template": provider_field(typing.Any, default = None), + "ide_script_template": provider_field(typing.Any, default = None), + "ghci_binutils_path": provider_field(typing.Any, default = None), + "ghci_lib_path": provider_field(typing.Any, default = None), + "ghci_ghc_path": provider_field(typing.Any, default = None), + "ghci_iserv_path": provider_field(typing.Any, default = None), + "ghci_iserv_prof_path": provider_field(typing.Any, default = None), + "ghci_cxx_path": provider_field(typing.Any, default = None), + "ghci_cc_path": provider_field(typing.Any, default = None), + "ghci_cpp_path": provider_field(typing.Any, default = None), + "ghci_packager": provider_field(typing.Any, default = None), + "cache_links": provider_field(typing.Any, default = None), + "script_template_processor": provider_field(typing.Any, default = None), + }, +) diff --git a/prelude/haskell/tools/BUCK b/prelude/haskell/tools/BUCK deleted file mode 100644 index 48758abb9a3c1..0000000000000 --- a/prelude/haskell/tools/BUCK +++ /dev/null @@ -1,7 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "script_template_processor", - main = "script_template_processor.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/haskell/tools/BUCK.v2 b/prelude/haskell/tools/BUCK.v2 new file mode 100644 index 0000000000000..3029719fcd616 --- /dev/null +++ b/prelude/haskell/tools/BUCK.v2 @@ -0,0 +1,13 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "script_template_processor", + main = "script_template_processor.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/haskell/util.bzl b/prelude/haskell/util.bzl new file mode 100644 index 0000000000000..80584cd3bebff --- /dev/null +++ b/prelude/haskell/util.bzl @@ -0,0 +1,151 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxPlatformInfo", +) +load( + "@prelude//haskell:library_info.bzl", + "HaskellLibraryInfo", + "HaskellLibraryProvider", +) +load( + "@prelude//haskell:link_info.bzl", + "HaskellLinkInfo", + "HaskellProfLinkInfo", +) +load( + "@prelude//linking:link_info.bzl", + "LinkStyle", + "MergedLinkInfo", +) +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibraryInfo", +) +load("@prelude//utils:platform_flavors_util.bzl", "by_platform") +load("@prelude//utils:utils.bzl", "flatten") + +HASKELL_EXTENSIONS = [ + ".hs", + ".lhs", + ".hsc", + ".chs", + ".x", + ".y", +] + +# We take a named_set for srcs, which is sometimes a list, sometimes a dict. +# In future we should only accept a list, but for now, cope with both. +def srcs_to_pairs(srcs) -> list[(str, Artifact)]: + if type(srcs) == type({}): + return srcs.items() + else: + return [(src.short_path, src) for src in srcs] + +def is_haskell_src(x: str) -> bool: + _, ext = paths.split_extension(x) + return ext in HASKELL_EXTENSIONS + +def src_to_module_name(x: str) -> str: + base, _ext = paths.split_extension(x) + return base.replace("/", ".") + +def _by_platform(ctx: AnalysisContext, xs: list[(str, list[typing.Any])]) -> list[typing.Any]: + platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo].name + return flatten(by_platform([platform], xs)) + +def attr_deps(ctx: AnalysisContext) -> list[Dependency]: + return ctx.attrs.deps + _by_platform(ctx, ctx.attrs.platform_deps) + +def attr_deps_haskell_link_infos(ctx: AnalysisContext) -> list[HaskellLinkInfo]: + return dedupe(filter( + None, + [ + d.get(HaskellLinkInfo) + for d in attr_deps(ctx) + ctx.attrs.template_deps + ], + )) + +# DONT CALL THIS FUNCTION, you want attr_deps_haskell_link_infos instead +def attr_deps_haskell_link_infos_sans_template_deps(ctx: AnalysisContext) -> list[HaskellLinkInfo]: + return dedupe(filter( + None, + [ + d.get(HaskellLinkInfo) + for d in attr_deps(ctx) + ], + )) + +def attr_deps_haskell_lib_infos( + ctx: AnalysisContext, + link_style: LinkStyle, + enable_profiling: bool) -> list[HaskellLibraryInfo]: + if enable_profiling and link_style == LinkStyle("shared"): + fail("Profiling isn't supported when using dynamic linking") + return [ + x.prof_lib[link_style] if enable_profiling else x.lib[link_style] + for x in filter(None, [ + d.get(HaskellLibraryProvider) + for d in attr_deps(ctx) + ctx.attrs.template_deps + ]) + ] + +def attr_deps_merged_link_infos(ctx: AnalysisContext) -> list[MergedLinkInfo]: + return dedupe(filter( + None, + [ + d.get(MergedLinkInfo) + for d in attr_deps(ctx) + ], + )) + +def attr_deps_profiling_link_infos(ctx: AnalysisContext) -> list[MergedLinkInfo]: + return filter( + None, + [ + d.get(HaskellProfLinkInfo).prof_infos if d.get(HaskellProfLinkInfo) else d.get(MergedLinkInfo) + for d in attr_deps(ctx) + ], + ) + +def attr_deps_shared_library_infos(ctx: AnalysisContext) -> list[SharedLibraryInfo]: + return filter( + None, + [ + d.get(SharedLibraryInfo) + for d in attr_deps(ctx) + ], + ) + +def _link_style_extensions(link_style: LinkStyle) -> (str, str): + if link_style == LinkStyle("shared"): + return ("dyn_o", "dyn_hi") + elif link_style == LinkStyle("static_pic"): + return ("o", "hi") # is this right? + elif link_style == LinkStyle("static"): + return ("o", "hi") + fail("unknown LinkStyle") + +def output_extensions( + link_style: LinkStyle, + profiled: bool) -> (str, str): + osuf, hisuf = _link_style_extensions(link_style) + if profiled: + return ("p_" + osuf, "p_" + hisuf) + else: + return (osuf, hisuf) + +# Single place to build the suffix used in artifacts (e.g. package directories, +# lib names) considering attributes like link style and profiling. +def get_artifact_suffix(link_style: LinkStyle, enable_profiling: bool, suffix: str = "") -> str: + artifact_suffix = link_style.value + if enable_profiling: + artifact_suffix += "-prof" + return artifact_suffix + suffix diff --git a/prelude/http_archive/http_archive.bzl b/prelude/http_archive/http_archive.bzl index 0d115228111ec..cd04c269450df 100644 --- a/prelude/http_archive/http_archive.bzl +++ b/prelude/http_archive/http_archive.bzl @@ -6,12 +6,14 @@ # of this source tree. load("@prelude//os_lookup:defs.bzl", "OsLookup") -load("@prelude//utils:utils.bzl", "expect", "value_or") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "value_or") load(":exec_deps.bzl", "HttpArchiveExecDeps") # Flags to apply to decompress the various types of archives. _TAR_FLAGS = { "tar": [], + "tar.bz2": ["-j"], "tar.gz": ["-z"], "tar.xz": ["-J"], "tar.zst": ["--use-compress-program=unzstd"], @@ -65,8 +67,9 @@ def _unarchive_cmd( archive, "--stdout", "|", - "tar", + "%WINDIR%\\System32\\tar.exe", "-x", + "-P", "-f", "-", _tar_strip_prefix_flags(strip_prefix), @@ -75,8 +78,9 @@ def _unarchive_cmd( # unzip and zip are not cli commands available on windows. however, the # bsdtar that ships with windows has builtin support for zip return cmd_args( - "tar", + "%WINDIR%\\System32\\tar.exe", "-x", + "-P", "-f", archive, _tar_strip_prefix_flags(strip_prefix), @@ -85,9 +89,15 @@ def _unarchive_cmd( # Else hope for the best if ext_type in _TAR_FLAGS: + os_flags = [ + # buck-out is a symlink with EdenFS, and tar on Windows doesn't like it, + # and needs -P flag to allow operations with symlinks + "-P", + ] if exec_is_windows else [] return cmd_args( "tar", _TAR_FLAGS[ext_type], + os_flags, "-x", "-f", archive, @@ -185,14 +195,17 @@ def http_archive_impl(ctx: AnalysisContext) -> list[Provider]: [ cmd_args(script_output, format = mkdir), cmd_args(script_output, format = "cd {}"), - cmd_args([unarchive_cmd] + exclude_flags, delimiter = " ").relative_to(script_output), + cmd_args([unarchive_cmd] + exclude_flags, delimiter = " ", relative_to = script_output), ], is_executable = True, allow_args = True, ) ctx.actions.run( - cmd_args(interpreter + [script]).hidden(exclude_hidden + [archive, script_output.as_output()]), + cmd_args( + interpreter + [script], + hidden = exclude_hidden + [archive, script_output.as_output()], + ), category = "http_archive", prefer_local = prefer_local, ) diff --git a/prelude/http_archive/tools/BUCK b/prelude/http_archive/tools/BUCK deleted file mode 100644 index f08b7dcc79ba5..0000000000000 --- a/prelude/http_archive/tools/BUCK +++ /dev/null @@ -1,13 +0,0 @@ -load("@prelude//http_archive/exec_deps.bzl", "http_archive_exec_deps") - -prelude = native - -http_archive_exec_deps( - name = "exec_deps", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "create_exclusion_list", - main = "create_exclusion_list.py", -) diff --git a/prelude/http_archive/tools/BUCK.v2 b/prelude/http_archive/tools/BUCK.v2 new file mode 100644 index 0000000000000..b91ae412d47ee --- /dev/null +++ b/prelude/http_archive/tools/BUCK.v2 @@ -0,0 +1,18 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load("@prelude//http_archive/exec_deps.bzl", "http_archive_exec_deps") + +oncall("build_infra") + +source_listing() + +prelude = native + +http_archive_exec_deps( + name = "exec_deps", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "create_exclusion_list", + main = "create_exclusion_list.py", +) diff --git a/prelude/http_file.bzl b/prelude/http_file.bzl index b494bb7918095..beadc30d73671 100644 --- a/prelude/http_file.bzl +++ b/prelude/http_file.bzl @@ -5,7 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//utils:utils.bzl", "expect", "value_or") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "value_or") def http_file_shared( actions: AnalysisActions, diff --git a/prelude/ide_integrations/xcode.bzl b/prelude/ide_integrations/xcode.bzl index f8f2cda0d3a74..3434ac117a45f 100644 --- a/prelude/ide_integrations/xcode.bzl +++ b/prelude/ide_integrations/xcode.bzl @@ -5,6 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +XCODE_ARGSFILES_SUB_TARGET = "xcode-argsfiles" + XCODE_DATA_SUB_TARGET = "xcode-data" _XCODE_DATA_FILE_NAME = "xcode_data.json" @@ -12,18 +14,51 @@ XcodeDataInfo = provider(fields = { "data": provider_field(typing.Any, default = None), # {str: _a} }) +XcodeDataInfoKeys = struct( + ARCH = "arch", + ARGSFILES_BY_EXT = "argsfiles_by_ext", + BUNDLE_TYPE = "bundle_type", + CONTAINS_SWIFT_SOURCES = "contains_swift_sources", + DEFAULT_TARGET_PLATFORM = "default_target_platform", + DEPLOYMENT_VERSION = "deployment_version", + EXPORTED_HEADERS = "exported_headers", + EXTRA_XCODE_FILES = "extra_xcode_files", + HEADERS = "headers", + INFO_PLIST = "info_plist", + OUTPUT = "output", + PROCESSED_INFO_PLIST = "processed_info_plist", + INFO_PLIST_RELATIVE_PATH = "info_plist_relative_path", + PRODUCT_NAME = "product_name", + RULE_TYPE = "rule_type", + SDK = "sdk", + SRCS = "srcs", + SWIFT_VERSION = "swift_version", + TARGET = "target", + TEST_HOST_APP_BINARY = "test_host_app_binary", + TEST_TARGET = "test_target", + TEST_TYPE = "test_type", + XCTOOLCHAIN_BUNDLE_ID_TARGET = "xctoolchain_bundle_id_target", + XCTOOLCHAIN_BUNDLE_ID = "xctoolchain_bundle_id", + XCTOOLCHAIN_BUNDLE_TARGET = "xctoolchain_bundle_target", +) + def generate_xcode_data( ctx: AnalysisContext, rule_type: str, - output: [Artifact, None], + output: Artifact | None, populate_rule_specific_attributes_func: [typing.Callable, None] = None, **kwargs) -> (list[DefaultInfo], XcodeDataInfo): data = { - "rule_type": rule_type, - "target": ctx.label, + XcodeDataInfoKeys.RULE_TYPE: rule_type, + XcodeDataInfoKeys.TARGET: ctx.label, } if output: - data["output"] = output + data[XcodeDataInfoKeys.OUTPUT] = output + + data[XcodeDataInfoKeys.EXTRA_XCODE_FILES] = [] + if hasattr(ctx.attrs, "extra_xcode_files"): + data[XcodeDataInfoKeys.EXTRA_XCODE_FILES] = ctx.attrs.extra_xcode_files + if populate_rule_specific_attributes_func: data.update(populate_rule_specific_attributes_func(ctx, **kwargs)) diff --git a/prelude/java/class_to_srcs.bzl b/prelude/java/class_to_srcs.bzl index d8bc8fa03ca77..b026964ae6327 100644 --- a/prelude/java/class_to_srcs.bzl +++ b/prelude/java/class_to_srcs.bzl @@ -7,11 +7,11 @@ load( "@prelude//java:java_toolchain.bzl", - "JavaTestToolchainInfo", # @unused Used as a type "JavaToolchainInfo", # @unused Used as a type ) +load("@prelude//utils:argfile.bzl", "at_argfile") -def _class_to_src_map_args(mapping: [Artifact, None]): +def _class_to_src_map_args(mapping: Artifact | None): if mapping != None: return cmd_args(mapping) return cmd_args() @@ -34,8 +34,8 @@ JavaClassToSourceMapInfo = provider( def create_class_to_source_map_info( ctx: AnalysisContext, - mapping: [Artifact, None] = None, - mapping_debuginfo: [Artifact, None] = None, + mapping: Artifact | None = None, + mapping_debuginfo: Artifact | None = None, deps = [Dependency]) -> JavaClassToSourceMapInfo: # Only generate debuginfo if the debug info tool is available. java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] @@ -55,7 +55,7 @@ def create_class_to_source_map_info( actions = ctx.actions, java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo], tset_debuginfo = tset_debuginfo, - name = ctx.attrs.name + ".debuginfo_merged.json", + name = ctx.label.name + ".debuginfo_merged.json", ) return JavaClassToSourceMapInfo( @@ -73,21 +73,28 @@ def create_class_to_source_map_from_jar( name: str, java_toolchain: JavaToolchainInfo, jar: Artifact, - srcs: list[Artifact]) -> Artifact: + srcs: list[Artifact], + sources_jar_name: [str, None] = None) -> (Artifact, Artifact | None): output = actions.declare_output(name) cmd = cmd_args(java_toolchain.gen_class_to_source_map[RunInfo]) + if java_toolchain.gen_class_to_source_map_include_sourceless_compiled_packages != None: + for item in java_toolchain.gen_class_to_source_map_include_sourceless_compiled_packages: + cmd.add("-i", item) cmd.add("-o", output.as_output()) cmd.add(jar) - for src in srcs: - cmd.add(cmd_args(src)) + cmd.add(at_argfile(actions = actions, name = "class_to_srcs_map_argsfile.txt", args = srcs)) + sources_jar = None + if sources_jar_name: + sources_jar = actions.declare_output(sources_jar_name) + cmd.add("--sources_jar", sources_jar.as_output()) actions.run(cmd, category = "class_to_srcs_map") - return output + return (output, sources_jar) def maybe_create_class_to_source_map_debuginfo( actions: AnalysisActions, name: str, java_toolchain: JavaToolchainInfo, - srcs: list[Artifact]) -> [Artifact, None]: + srcs: list[Artifact]) -> Artifact | None: # Only generate debuginfo if the debug info tool is available. if java_toolchain.gen_class_to_source_map_debuginfo == None: return None @@ -96,34 +103,33 @@ def maybe_create_class_to_source_map_debuginfo( cmd = cmd_args(java_toolchain.gen_class_to_source_map_debuginfo[RunInfo]) cmd.add("gen") cmd.add("-o", output.as_output()) - inputs_file = actions.write("sourcefiles.txt", srcs) - cmd.add(cmd_args(inputs_file, format = "@{}")) - cmd.hidden(srcs) + cmd.add(at_argfile(actions = actions, name = "sourcefiles.txt", args = srcs)) actions.run(cmd, category = "class_to_srcs_map_debuginfo") return output def merge_class_to_source_map_from_jar( actions: AnalysisActions, name: str, - java_test_toolchain: JavaTestToolchainInfo, - mapping: [Artifact, None] = None, - relative_to: [CellRoot, None] = None, - # TODO(nga): I think this meant to be type, not default value. - deps = [JavaClassToSourceMapInfo.type]) -> Artifact: + java_toolchain: JavaToolchainInfo, + relative_to: [CellRoot, None], + deps: list[JavaClassToSourceMapInfo]) -> Artifact: output = actions.declare_output(name) - cmd = cmd_args(java_test_toolchain.merge_class_to_source_maps[RunInfo]) - cmd.add(cmd_args(output.as_output(), format = "--output={}")) - if relative_to != None: - cmd.add(cmd_args(str(relative_to), format = "--relative-to={}")) + tset = actions.tset( JavaClassToSourceMapTset, - value = mapping, + value = None, children = [d.tset for d in deps], ) class_to_source_files = tset.project_as_args("class_to_src_map") mappings_file = actions.write("class_to_src_map.txt", class_to_source_files) - cmd.add(["--mappings", mappings_file]) - cmd.hidden(class_to_source_files) + + cmd = cmd_args( + java_toolchain.merge_class_to_source_maps[RunInfo], + cmd_args(output.as_output(), format = "--output={}"), + cmd_args(str(relative_to), format = "--relative-to={}") if relative_to != None else [], + ["--mappings", mappings_file], + hidden = class_to_source_files, + ) actions.run(cmd, category = "merge_class_to_srcs_map") return output @@ -142,8 +148,7 @@ def _create_merged_debug_info( children = [tset_debuginfo], ) input_files = tset.project_as_args("class_to_src_map") - input_list_file = actions.write("debuginfo_list.txt", input_files) - cmd.add(cmd_args(input_list_file, format = "@{}")) - cmd.hidden(input_files) + cmd.add(at_argfile(actions = actions, name = "debuginfo_list.txt", args = input_files)) + actions.run(cmd, category = "merged_debuginfo") return output diff --git a/prelude/java/dex.bzl b/prelude/java/dex.bzl index d2fd2f6ddd2d0..16a0c7680317e 100644 --- a/prelude/java/dex.bzl +++ b/prelude/java/dex.bzl @@ -51,7 +51,7 @@ def get_dex_produced_from_java_library( else: desugar_deps_file = ctx.actions.write(prefix + "_desugar_deps_file.txt", desugar_deps) d8_cmd.add(["--classpath-files", desugar_deps_file]) - d8_cmd.hidden(desugar_deps) + d8_cmd.add(cmd_args(hidden = desugar_deps)) referenced_resources_file = ctx.actions.declare_output(prefix + "_referenced_resources.txt") d8_cmd.add(["--referenced-resources-path", referenced_resources_file.as_output()]) @@ -71,7 +71,7 @@ def get_dex_produced_from_java_library( identifier = "{}:{} {}".format(ctx.label.package, ctx.label.name, output_dex_file.short_path) ctx.actions.run( d8_cmd, - category = "d8", + category = "pre_dex", identifier = identifier, ) diff --git a/prelude/java/gwt_binary.bzl b/prelude/java/gwt_binary.bzl new file mode 100644 index 0000000000000..d5d86cc592b18 --- /dev/null +++ b/prelude/java/gwt_binary.bzl @@ -0,0 +1,64 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//utils:expect.bzl", "expect") +load( + ":java_providers.bzl", + "derive_compiling_deps", + "get_all_java_packaging_deps", +) + +_GWT_COMPILER_CLASS = "com.google.gwt.dev.Compiler" + +def gwt_binary_impl(ctx: AnalysisContext) -> list[Provider]: + expect(ctx.attrs.local_workers > 0, "local workers must be greater than zero") + + output = ctx.actions.declare_output("{}.zip".format(ctx.label.name)) + + # Write deploy files to separate directory so that the generated .zip is smaller + deploy_output = ctx.actions.declare_output("deploy") + + module_deps_classpath = [dep.gwt_module for dep in get_all_java_packaging_deps(ctx, ctx.attrs.module_deps) if dep.gwt_module] + compiling_deps_tset = derive_compiling_deps(ctx.actions, None, ctx.attrs.deps) + deps_classpath = [dep.full_library for dep in (list(compiling_deps_tset.traverse()) if compiling_deps_tset else [])] + + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] + gwt_args = cmd_args([ + java_toolchain.java[RunInfo], + "-Dgwt.normalizeTimestamps=true", + ctx.attrs.vm_args, + "-classpath", + cmd_args(module_deps_classpath + deps_classpath, delimiter = get_path_separator_for_exec_os(ctx)), + _GWT_COMPILER_CLASS, + "-war", + output.as_output(), + "-style", + ctx.attrs.style, + "-optimize", + str(ctx.attrs.optimize), + "-localWorkers", + str(ctx.attrs.local_workers), + "-deploy", + deploy_output.as_output(), + ]) + + if ctx.attrs.draft_compile: + gwt_args.add("-draftCompile") + if ctx.attrs.strict: + gwt_args.add("-strict") + gwt_args.add(ctx.attrs.experimental_args) + gwt_args.add(ctx.attrs.modules) + + ctx.actions.run(gwt_args, category = "gwt_binary") + + sub_targets = {"deploy": [DefaultInfo(default_output = deploy_output)]} + + return [ + DefaultInfo(default_output = output, sub_targets = sub_targets), + ] diff --git a/prelude/java/jar_genrule.bzl b/prelude/java/jar_genrule.bzl index d8f9e1848d493..627a1f2b3d706 100644 --- a/prelude/java/jar_genrule.bzl +++ b/prelude/java/jar_genrule.bzl @@ -7,7 +7,7 @@ load("@prelude//:genrule.bzl", "process_genrule") load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") def jar_genrule_impl(ctx: AnalysisContext) -> list[Provider]: output_name = "{}.jar".format(ctx.label.name) diff --git a/prelude/java/java.bzl b/prelude/java/java.bzl index 7e0372cf36773..8ab08fcb5f62d 100644 --- a/prelude/java/java.bzl +++ b/prelude/java/java.bzl @@ -5,6 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:validation_deps.bzl", "VALIDATION_DEPS_ATTR_NAME") load("@prelude//android:build_only_native_code.bzl", "is_build_only_native_code") load("@prelude//android:configuration.bzl", "is_building_android_binary_attr") load("@prelude//android:min_sdk_version.bzl", "get_min_sdk_version_constraint_value_name", "get_min_sdk_version_range") @@ -13,6 +14,7 @@ load("@prelude//java/plugins:java_plugin.bzl", "java_plugin_impl") load("@prelude//decls/common.bzl", "buck") load("@prelude//decls/toolchains_common.bzl", "toolchains_common") load("@prelude//genrule.bzl", "genrule_attributes") +load(":gwt_binary.bzl", "gwt_binary_impl") load(":jar_genrule.bzl", "jar_genrule_impl") load(":java_binary.bzl", "java_binary_impl") load(":java_library.bzl", "java_library_impl") @@ -31,6 +33,7 @@ def dex_min_sdk_version(): return select(min_sdk_version_dict) implemented_rules = { + "gwt_binary": gwt_binary_impl, "jar_genrule": jar_genrule_impl, "java_annotation_processor": java_annotation_processor_impl, "java_binary": java_binary_impl, @@ -42,6 +45,10 @@ implemented_rules = { } extra_attributes = { + "gwt_binary": { + "_exec_os_type": buck.exec_os_type_arg(), + "_java_toolchain": toolchains_common.java(), + }, "jar_genrule": genrule_attributes() | { "_java_toolchain": toolchains_common.java(), }, @@ -58,8 +65,8 @@ extra_attributes = { }, "java_library": { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), - "javac": attrs.option(attrs.one_of(attrs.dep(), attrs.source()), default = None), "resources_root": attrs.option(attrs.string(), default = None), + VALIDATION_DEPS_ATTR_NAME: attrs.set(attrs.dep(), sorted = True, default = []), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), "_dex_min_sdk_version": attrs.option(attrs.int(), default = dex_min_sdk_version()), "_dex_toolchain": toolchains_common.dex(), @@ -72,8 +79,9 @@ extra_attributes = { }, "java_test": { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), - "javac": attrs.option(attrs.one_of(attrs.dep(), attrs.source()), default = None), + "java_agents": attrs.list(attrs.source(), default = []), "resources_root": attrs.option(attrs.string(), default = None), + "test_class_names_file": attrs.option(attrs.source(), default = None), "unbundled_resources_root": attrs.option(attrs.source(allow_directory = True), default = None), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), "_exec_os_type": buck.exec_os_type_arg(), @@ -87,6 +95,7 @@ extra_attributes = { }, "prebuilt_jar": { "generate_abi": attrs.bool(default = True), + "is_executable": attrs.bool(default = False), # Prebuilt jars are quick to build, and often contain third-party code, which in turn is # often a source of annotations and constants. To ease migration to ABI generation from # source without deps, we have them present during ABI gen by default. diff --git a/prelude/java/java_binary.bzl b/prelude/java/java_binary.bzl index 40a7a613c1aea..5891f6c705056 100644 --- a/prelude/java/java_binary.bzl +++ b/prelude/java/java_binary.bzl @@ -5,12 +5,16 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load("@prelude//java:java_toolchain.bzl", "JavaToolchainInfo") load("@prelude//java/utils:java_utils.bzl", "get_class_to_source_map_info", "get_classpath_subtarget") -load("@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", "merge_shared_libraries", "traverse_shared_library_info") -load("@prelude//utils:utils.bzl", "expect") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", # @unused used as type + "SharedLibraryInfo", + "merge_shared_libraries", + "traverse_shared_library_info", +) +load("@prelude//utils:expect.bzl", "expect") load( ":java_providers.bzl", "create_template_info", @@ -18,32 +22,39 @@ load( "get_java_packaging_info", ) -def _generate_script(generate_wrapper: bool, native_libs: dict[str, "SharedLibrary"]) -> bool: +def _generate_script(generate_wrapper: bool, native_libs: list[SharedLibrary]) -> bool: # if `generate_wrapper` is set and no native libs then it should be a wrapper script as result, # otherwise fat jar will be generated (inner jar or script will be included inside a final fat jar) return generate_wrapper and len(native_libs) == 0 def _create_fat_jar( ctx: AnalysisContext, - java_toolchain: JavaToolchainInfo, jars: cmd_args, - native_libs: dict[str, "SharedLibrary"], - do_not_create_inner_jar: bool, - generate_wrapper: bool) -> list[Artifact]: + native_libs: list[SharedLibrary] = [], + name_prefix: str = "", + do_not_create_inner_jar: bool = True, + generate_wrapper: bool = False, + main_class: [str, None] = None, + append_jar: [Artifact, None] = None) -> list[Artifact]: + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] extension = "sh" if _generate_script(generate_wrapper, native_libs) else "jar" - output = ctx.actions.declare_output("{}.{}".format(ctx.label.name, extension)) + output = ctx.actions.declare_output("{}{}.{}".format(name_prefix, ctx.label.name, extension)) args = [ java_toolchain.fat_jar[RunInfo], "--jar_builder_tool", cmd_args(java_toolchain.jar_builder, delimiter = " "), + "--zip_scrubber_tool", + cmd_args(java_toolchain.zip_scrubber, delimiter = " "), "--output", output.as_output(), "--jars_file", - ctx.actions.write("jars_file", jars), + ctx.actions.write("{}jars_file".format(name_prefix), jars), ] - local_only = False + if append_jar: + args += ["--append_jar", append_jar] + if native_libs: expect( java_toolchain.is_bootstrap_toolchain == False, @@ -51,7 +62,7 @@ def _create_fat_jar( ) args += [ "--native_libs_file", - ctx.actions.write("native_libs", [cmd_args([so_name, native_lib.lib.output], delimiter = " ") for so_name, native_lib in native_libs.items()]), + ctx.actions.write("{}native_libs".format(name_prefix), [cmd_args([native_lib.soname.ensure_str(), native_lib.lib.output], delimiter = " ") for native_lib in native_libs]), ] if do_not_create_inner_jar: args += [ @@ -69,15 +80,7 @@ def _create_fat_jar( "nativelibs", ] - # TODO(T151045001) native deps are not compressed (for performance), but that can result in - # really large binaries. Large outputs can cause issues on RE, so we run locally instead. - local_only = "run_locally_if_has_native_deps" in ctx.attrs.labels - - main_class = ctx.attrs.main_class if main_class: - if do_not_create_inner_jar and native_libs: - fail("For performance reasons, java binaries with a main class and native libs should always generate an inner jar.\ - The reason for having inner.jar is so that we don't have to compress the native libraries, which is slow at compilation time and also at runtime (when decompressing).") args += ["--main_class", main_class] manifest_file = ctx.attrs.manifest_file @@ -86,7 +89,7 @@ def _create_fat_jar( blocklist = ctx.attrs.blacklist if blocklist: - args += ["--blocklist", ctx.actions.write("blocklist_args", blocklist)] + args += ["--blocklist", ctx.actions.write("{}blocklist_args".format(name_prefix), blocklist)] if ctx.attrs.meta_inf_directory: args += ["--meta_inf_directory", ctx.attrs.meta_inf_directory] @@ -105,13 +108,15 @@ def _create_fat_jar( ] outputs.append(classpath_args_output) - fat_jar_cmd = cmd_args(args) - fat_jar_cmd.hidden(jars, [native_lib.lib.output for native_lib in native_libs.values()]) + fat_jar_cmd = cmd_args( + args, + hidden = [jars] + [native_lib.lib.output for native_lib in native_libs], + ) ctx.actions.run( fat_jar_cmd, - local_only = local_only, - category = "fat_jar", + local_only = False, + category = "{}fat_jar".format(name_prefix), allow_cache_upload = True, ) @@ -166,41 +171,98 @@ def java_binary_impl(ctx: AnalysisContext) -> list[Provider]: ) native_deps = traverse_shared_library_info(shared_library_info) + base_dep = ctx.attrs.base_dep java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] need_to_generate_wrapper = ctx.attrs.generate_wrapper == True do_not_create_inner_jar = ctx.attrs.do_not_create_inner_jar == True packaging_jar_args = packaging_info.packaging_deps.project_as_args("full_jar_args") - outputs = _create_fat_jar(ctx, java_toolchain, cmd_args(packaging_jar_args), native_deps, do_not_create_inner_jar, need_to_generate_wrapper) + incremental_target_prefix = ctx.attrs.incremental_target_prefix + main_class = ctx.attrs.main_class - main_artifact = outputs[0] other_outputs = [] + if incremental_target_prefix: + base_jar = None + incremental_jars = [] + dependency_jars = [] + + # separate jars in groups + for dep in packaging_jar_args.transitive_set.traverse(): + if dep.jar: + # lookup for the base jar that can be used to append all other dependencies + if base_dep and dep.label.raw_target() == base_dep.label.raw_target(): + expect( + base_jar == None, + "JAR can only have one base JAR file.", + ) + base_jar = dep.jar + elif str(dep.label.raw_target()).startswith(incremental_target_prefix): + # if it's not a base jar, it can be an incremental jar or dependency only + incremental_jars.append(dep.jar) + else: + dependency_jars.append(dep.jar) + + # collect incremental targets + expect( + len(incremental_jars) > 0, + "No incremental dependencies found that starts with {}.".format(incremental_target_prefix), + ) + + # generate intermediary jar only with dependencies + deps_outputs = _create_fat_jar( + ctx, + cmd_args(dependency_jars), + name_prefix = "deps_", + append_jar = base_jar, + ) + other_outputs = [deps_outputs[0]] + + # generate final jar appending modules to the dependencies jar + outputs = _create_fat_jar( + ctx, + cmd_args(incremental_jars), + native_libs = native_deps, + do_not_create_inner_jar = do_not_create_inner_jar, + generate_wrapper = need_to_generate_wrapper, + main_class = main_class, + append_jar = deps_outputs[0], + ) + else: + outputs = _create_fat_jar( + ctx, + cmd_args(packaging_jar_args), + native_libs = native_deps, + do_not_create_inner_jar = do_not_create_inner_jar, + generate_wrapper = need_to_generate_wrapper, + main_class = main_class, + ) + run_cmd = _get_run_cmd( attrs = ctx.attrs, script_mode = _generate_script(need_to_generate_wrapper, native_deps), - main_artifact = main_artifact, + main_artifact = outputs[0], java_toolchain = java_toolchain, ) if need_to_generate_wrapper: classpath_file = outputs[1] - run_cmd.hidden( + run_cmd.add(cmd_args(hidden = [ java_toolchain.java[RunInfo], classpath_file, packaging_jar_args, - ) + ])) other_outputs = [classpath_file] + [packaging_jar_args] + _get_java_tool_artifacts(java_toolchain) sub_targets = get_classpath_subtarget(ctx.actions, packaging_info) - class_to_src_map, _ = get_class_to_source_map_info( + class_to_src_map, _, _ = get_class_to_source_map_info( ctx, outputs = None, deps = ctx.attrs.deps, ) return [ - DefaultInfo(default_output = main_artifact, other_outputs = other_outputs, sub_targets = sub_targets), + DefaultInfo(default_output = outputs[0], other_outputs = other_outputs, sub_targets = sub_targets), RunInfo(args = run_cmd), create_template_info(ctx, packaging_info, first_order_libs), class_to_src_map, diff --git a/prelude/java/java_library.bzl b/prelude/java/java_library.bzl index d4ade59619950..52ce9f7027398 100644 --- a/prelude/java/java_library.bzl +++ b/prelude/java/java_library.bzl @@ -5,12 +5,12 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load("@prelude//:paths.bzl", "paths") +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load("@prelude//android:android_providers.bzl", "merge_android_packageable_info") load( "@prelude//java:java_providers.bzl", + "JavaCompileOutputs", # @unused Used as type "JavaLibraryInfo", "JavaPackagingDepTSet", "JavaProviders", @@ -18,6 +18,7 @@ load( "create_java_library_providers", "create_native_providers", "derive_compiling_deps", + "generate_java_classpath_snapshot", "make_compile_outputs", "to_list", ) @@ -25,11 +26,17 @@ load("@prelude//java:java_resources.bzl", "get_resources_map") load("@prelude//java:java_toolchain.bzl", "AbiGenerationMode", "JavaToolchainInfo") load("@prelude//java:javacd_jar_creator.bzl", "create_jar_artifact_javacd") load("@prelude//java/plugins:java_annotation_processor.bzl", "AnnotationProcessorProperties", "create_annotation_processor_properties") -load("@prelude//java/plugins:java_plugin.bzl", "create_plugin_params") -load("@prelude//java/utils:java_utils.bzl", "declare_prefixed_name", "derive_javac", "get_abi_generation_mode", "get_class_to_source_map_info", "get_default_info", "get_java_version_attributes", "get_path_separator_for_exec_os", "to_java_version") +load( + "@prelude//java/plugins:java_plugin.bzl", + "PluginParams", # @unused Used as type + "create_plugin_params", +) +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//java/utils:java_utils.bzl", "declare_prefixed_name", "derive_javac", "get_abi_generation_mode", "get_class_to_source_map_info", "get_default_info", "get_java_version_attributes", "to_java_version") +load("@prelude//jvm:cd_jar_creator_util.bzl", "postprocess_jar") load("@prelude//jvm:nullsafe.bzl", "get_nullsafe_info") load("@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") _JAVA_FILE_EXTENSION = [".java"] _SUPPORTED_ARCHIVE_SUFFIXES = [".src.zip", "-sources.jar"] @@ -37,9 +44,8 @@ _SUPPORTED_ARCHIVE_SUFFIXES = [".src.zip", "-sources.jar"] def _process_classpath( actions: AnalysisActions, classpath_args: cmd_args, - cmd: cmd_args, args_file_name: str, - option_name: str): + option_name: str) -> cmd_args: # write joined classpath string into args file classpath_args_file, _ = actions.write( args_file_name, @@ -47,22 +53,24 @@ def _process_classpath( allow_args = True, ) - # mark classpath artifacts as input - cmd.hidden(classpath_args) - - # add classpath args file to cmd - cmd.add(option_name, classpath_args_file) + return cmd_args( + option_name, + # add classpath args file to cmd + classpath_args_file, + # mark classpath artifacts as input + hidden = classpath_args, + ) -def classpath_args(ctx: AnalysisContext, args): +def _classpath_args(ctx: AnalysisContext, args): return cmd_args(args, delimiter = get_path_separator_for_exec_os(ctx)) def _process_plugins( ctx: AnalysisContext, actions_identifier: [str, None], annotation_processor_properties: AnnotationProcessorProperties, - plugin_params: ["PluginParams", None], - javac_args: cmd_args, - cmd: cmd_args): + plugin_params: [PluginParams, None], + javac_args: cmd_args) -> cmd_args: + cmd = cmd_args() processors_classpath_tsets = [] # Process Annotation processors @@ -84,8 +92,7 @@ def _process_plugins( # Process Javac Plugins if plugin_params: - plugin = plugin_params.processors[0] - args = plugin_params.args.get(plugin, cmd_args()) + plugin, args = plugin_params.processors[0] # Produces "-Xplugin:PluginName arg1 arg2 arg3", as a single argument plugin_and_args = cmd_args(plugin) @@ -105,14 +112,15 @@ def _process_plugins( processors_classpath_tset = None if processors_classpath_tset: - processors_classpath = classpath_args(ctx, processors_classpath_tset.project_as_args("full_jar_args")) - _process_classpath( + processors_classpath = _classpath_args(ctx, processors_classpath_tset.project_as_args("full_jar_args")) + cmd.add(_process_classpath( ctx.actions, processors_classpath, - cmd, declare_prefixed_name("plugin_cp_args", actions_identifier), "--javac_processors_classpath_file", - ) + )) + + return cmd def _build_classpath(actions: AnalysisActions, deps: list[Dependency], additional_classpath_entries: list[Artifact], classpath_args_projection: str) -> [cmd_args, None]: compiling_deps_tset = derive_compiling_deps(actions, None, deps) @@ -128,12 +136,11 @@ def _build_classpath(actions: AnalysisActions, deps: list[Dependency], additiona def _build_bootclasspath(bootclasspath_entries: list[Artifact], source_level: int, java_toolchain: JavaToolchainInfo) -> list[Artifact]: bootclasspath_list = [] - if source_level in [7, 8]: + if source_level in [8]: if bootclasspath_entries: bootclasspath_list = bootclasspath_entries - elif source_level == 7: - bootclasspath_list = java_toolchain.bootclasspath_7 elif source_level == 8: + expect(java_toolchain.bootclasspath_8, "Must specify bootclasspath for source level 8") bootclasspath_list = java_toolchain.bootclasspath_8 return bootclasspath_list @@ -144,15 +151,15 @@ def _append_javac_params( srcs: list[Artifact], remove_classes: list[str], annotation_processor_properties: AnnotationProcessorProperties, - javac_plugin_params: ["PluginParams", None], + javac_plugin_params: [PluginParams, None], source_level: int, target_level: int, deps: list[Dependency], extra_arguments: cmd_args, additional_classpath_entries: list[Artifact], bootclasspath_entries: list[Artifact], - cmd: cmd_args, - generated_sources_dir: Artifact): + generated_sources_dir: Artifact) -> cmd_args: + cmd = cmd_args() javac_args = cmd_args( "-encoding", "utf-8", @@ -164,13 +171,12 @@ def _append_javac_params( compiling_classpath = _build_classpath(ctx.actions, deps, additional_classpath_entries, "args_for_compiling") if compiling_classpath: - _process_classpath( + cmd.add(_process_classpath( ctx.actions, - classpath_args(ctx, compiling_classpath), - cmd, + _classpath_args(ctx, compiling_classpath), declare_prefixed_name("classpath_args", actions_identifier), "--javac_classpath_file", - ) + )) else: javac_args.add("-classpath ''") @@ -181,22 +187,20 @@ def _append_javac_params( bootclasspath_list = _build_bootclasspath(bootclasspath_entries, source_level, java_toolchain) if bootclasspath_list: - _process_classpath( + cmd.add(_process_classpath( ctx.actions, - classpath_args(ctx, bootclasspath_list), - cmd, + _classpath_args(ctx, bootclasspath_list), declare_prefixed_name("bootclasspath_args", actions_identifier), "--javac_bootclasspath_file", - ) + )) - _process_plugins( + cmd.add(_process_plugins( ctx, actions_identifier, annotation_processor_properties, javac_plugin_params, javac_args, - cmd, - ) + )) cmd.add("--generated_sources_dir", generated_sources_dir.as_output()) @@ -208,20 +212,22 @@ def _append_javac_params( javac_args, allow_args = True, ) - cmd.hidden(javac_args) + cmd.add(cmd_args(hidden = javac_args)) # mark plain srcs artifacts as input - cmd.hidden(plain_sources) + cmd.add(cmd_args(hidden = plain_sources)) cmd.add("--javac_args_file", args_file) if zipped_sources: cmd.add("--zipped_sources_file", ctx.actions.write(declare_prefixed_name("zipped_source_args", actions_identifier), zipped_sources)) - cmd.hidden(zipped_sources) + cmd.add(cmd_args(hidden = zipped_sources)) if remove_classes: cmd.add("--remove_classes", ctx.actions.write(declare_prefixed_name("remove_classes_args", actions_identifier), remove_classes)) + return cmd + def split_on_archives_and_plain_files( srcs: list[Artifact], plain_file_extensions: list[str]) -> (list[Artifact], list[Artifact]): @@ -271,15 +277,15 @@ def compile_to_jar( srcs: list[Artifact], *, abi_generation_mode: [AbiGenerationMode, None] = None, - output: [Artifact, None] = None, + output: Artifact | None = None, actions_identifier: [str, None] = None, javac_tool: [typing.Any, None] = None, resources: [list[Artifact], None] = None, resources_root: [str, None] = None, remove_classes: [list[str], None] = None, - manifest_file: [Artifact, None] = None, + manifest_file: Artifact | None = None, annotation_processor_properties: [AnnotationProcessorProperties, None] = None, - plugin_params: ["PluginParams", None] = None, + plugin_params: [PluginParams, None] = None, source_level: [int, None] = None, target_level: [int, None] = None, deps: [list[Dependency], None] = None, @@ -287,9 +293,10 @@ def compile_to_jar( source_only_abi_deps: [list[Dependency], None] = None, extra_arguments: [cmd_args, None] = None, additional_classpath_entries: [list[Artifact], None] = None, - additional_compiled_srcs: [Artifact, None] = None, + additional_compiled_srcs: Artifact | None = None, bootclasspath_entries: [list[Artifact], None] = None, - is_creating_subtarget: bool = False) -> "JavaCompileOutputs": + is_creating_subtarget: bool = False, + debug_port: [int, None] = None) -> JavaCompileOutputs: if not additional_classpath_entries: additional_classpath_entries = [] if not bootclasspath_entries: @@ -343,6 +350,7 @@ def compile_to_jar( bootclasspath_entries, is_building_android_binary, is_creating_subtarget, + debug_port, ) def _create_jar_artifact( @@ -351,15 +359,15 @@ def _create_jar_artifact( abi_generation_mode: [AbiGenerationMode, None], java_toolchain: JavaToolchainInfo, label: Label, - output: [Artifact, None], + output: Artifact | None, javac_tool: [typing.Any, None], srcs: list[Artifact], remove_classes: list[str], resources: list[Artifact], resources_root: [str, None], - manifest_file: [Artifact, None], + manifest_file: Artifact | None, annotation_processor_properties: AnnotationProcessorProperties, - plugin_params: ["PluginParams", None], + plugin_params: [PluginParams, None], source_level: int, target_level: int, deps: list[Dependency], @@ -367,10 +375,11 @@ def _create_jar_artifact( _source_only_abi_deps: list[Dependency], extra_arguments: cmd_args, additional_classpath_entries: list[Artifact], - additional_compiled_srcs: [Artifact, None], + additional_compiled_srcs: Artifact | None, bootclasspath_entries: list[Artifact], _is_building_android_binary: bool, - _is_creating_subtarget: bool = False) -> "JavaCompileOutputs": + _is_creating_subtarget: bool = False, + _debug_port: [int, None] = None) -> JavaCompileOutputs: """ Creates jar artifact. @@ -408,7 +417,7 @@ def _create_jar_artifact( generated_sources_dir = None if not skip_javac: generated_sources_dir = ctx.actions.declare_output(declare_prefixed_name("generated_sources", actions_identifier), dir = True) - _append_javac_params( + compile_and_package_cmd.add(_append_javac_params( ctx, actions_identifier, java_toolchain, @@ -422,19 +431,24 @@ def _create_jar_artifact( extra_arguments, additional_classpath_entries, bootclasspath_entries, - compile_and_package_cmd, generated_sources_dir, - ) + )) ctx.actions.run(compile_and_package_cmd, category = "javac_and_jar", identifier = actions_identifier) abi = None if (not srcs and not additional_compiled_srcs) or abi_generation_mode == AbiGenerationMode("none") or java_toolchain.is_bootstrap_toolchain else create_abi(ctx.actions, java_toolchain.class_abi_generator, jar_out) + has_postprocessor = hasattr(ctx.attrs, "jar_postprocessor") and ctx.attrs.jar_postprocessor + final_jar = postprocess_jar(ctx.actions, java_toolchain.zip_scrubber, ctx.attrs.jar_postprocessor[RunInfo], java_toolchain.postprocessor_runner[RunInfo], jar_out, actions_identifier) if has_postprocessor else jar_out + + jar_snapshot = generate_java_classpath_snapshot(ctx.actions, java_toolchain.cp_snapshot_generator, abi or final_jar, actions_identifier) return make_compile_outputs( - full_library = jar_out, + full_library = final_jar, + preprocessed_library = jar_out, class_abi = abi, required_for_source_only_abi = required_for_source_only_abi, annotation_processor_output = generated_sources_dir, + abi_jar_snapshot = jar_snapshot, ) def _check_dep_types(deps: list[Dependency]): @@ -456,6 +470,10 @@ def _check_exported_deps(exported_deps: list[Dependency], attr_name: str): "Exported deps are meant to be forwarded onto the classpath for dependents, so only " + "make sense for a target that emits Java bytecode, {} in {} does not.".format(exported_dep, attr_name), ) + expect( + not exported_dep[JavaLibraryInfo].may_not_be_exported, + "{} has 'may_not_be_exported' label and should not be present in {}.".format(exported_dep.label.raw_target(), attr_name), + ) # TODO(T145137403) remove need for this def _skip_java_library_dep_checks(ctx: AnalysisContext) -> bool: @@ -495,7 +513,11 @@ def java_library_impl(ctx: AnalysisContext) -> list[Provider]: _check_dep_types(ctx.attrs.exported_provided_deps) _check_dep_types(ctx.attrs.runtime_deps) - java_providers = build_java_library(ctx, ctx.attrs.srcs) + java_providers = build_java_library( + ctx = ctx, + srcs = ctx.attrs.srcs, + validation_deps_outputs = get_validation_deps_outputs(ctx), + ) return to_list(java_providers) + [android_packageable_info] @@ -505,10 +527,11 @@ def build_java_library( run_annotation_processors = True, additional_classpath_entries: list[Artifact] = [], bootclasspath_entries: list[Artifact] = [], - additional_compiled_srcs: [Artifact, None] = None, + additional_compiled_srcs: Artifact | None = None, generated_sources: list[Artifact] = [], override_abi_generation_mode: [AbiGenerationMode, None] = None, - extra_sub_targets: dict = {}) -> JavaProviders: + extra_sub_targets: dict = {}, + validation_deps_outputs: [list[Artifact], None] = None) -> JavaProviders: expect( not getattr(ctx.attrs, "_build_only_native_code", False), "Shouldn't call build_java_library if we're only building native code!", @@ -557,6 +580,7 @@ def build_java_library( "additional_compiled_srcs": additional_compiled_srcs, "annotation_processor_properties": annotation_processor_properties, "bootclasspath_entries": bootclasspath_entries, + "debug_port": getattr(ctx.attrs, "debug_port", None), "deps": first_order_deps, "javac_tool": derive_javac(ctx.attrs.javac) if ctx.attrs.javac else None, "manifest_file": manifest_file, @@ -570,10 +594,17 @@ def build_java_library( "target_level": target_level, } + # The outputs of validation_deps need to be added as hidden arguments + # to an action for the validation_deps targets to be built and enforced. + extra_arguments = cmd_args( + ctx.attrs.extra_arguments, + hidden = validation_deps_outputs or [], + ) + outputs = compile_to_jar( ctx, plugin_params = plugin_params, - extra_arguments = cmd_args(ctx.attrs.extra_arguments), + extra_arguments = extra_arguments, **common_compile_kwargs ) @@ -599,6 +630,31 @@ def build_java_library( DefaultInfo(default_output = nullsafe_info.output), ]} + gwt_output = None + if ( + (srcs or resources) and + not java_toolchain.is_bootstrap_toolchain and + not ctx.attrs._is_building_android_binary + ): + gwt_output = ctx.actions.declare_output("gwt_module/{}.jar".format(ctx.label.name)) + entries = [] + + if srcs or resources: + entries.append(_copy_resources(ctx.actions, "gwt_module", java_toolchain, ctx.label.package, srcs + resources, resources_root)) + if outputs and outputs.annotation_processor_output: + entries.append(outputs.annotation_processor_output) + + gwt_cmd_args = cmd_args( + java_toolchain.jar_builder, + "--entries-to-jar", + ctx.actions.write("gwt_entries.txt", entries), + "--output", + gwt_output.as_output(), + hidden = entries, + ) + + ctx.actions.run(gwt_cmd_args, category = "gwt_module") + all_generated_sources = list(generated_sources) if outputs and outputs.annotation_processor_output: all_generated_sources.append(outputs.annotation_processor_output) @@ -608,9 +664,18 @@ def build_java_library( DefaultInfo(default_output = all_generated_sources[0]), ]} - java_library_info, java_packaging_info, shared_library_info, cxx_resource_info, linkable_graph, template_placeholder_info, intellij_info = create_java_library_providers( + class_to_src_map, sources_jar, class_to_src_map_sub_targets = get_class_to_source_map_info( + ctx, + outputs = outputs, + deps = ctx.attrs.deps + deps_query + ctx.attrs.exported_deps, + generate_sources_jar = True, + ) + extra_sub_targets = extra_sub_targets | class_to_src_map_sub_targets + + java_library_info, java_packaging_info, global_code_info, shared_library_info, cxx_resource_info, linkable_graph, template_placeholder_info, intellij_info = create_java_library_providers( ctx, library_output = outputs.classpath_entry if outputs else None, + global_code_config = java_toolchain.global_code_config, declared_deps = ctx.attrs.deps + deps_query, exported_deps = ctx.attrs.exported_deps, provided_deps = ctx.attrs.provided_deps + provided_deps_query, @@ -619,15 +684,11 @@ def build_java_library( needs_desugar = source_level > 7 or target_level > 7, generated_sources = all_generated_sources, has_srcs = has_srcs, + sources_jar = sources_jar, + gwt_module = gwt_output, + preprocessed_library = outputs.preprocessed_library if outputs else None, ) - class_to_src_map, class_to_src_map_sub_targets = get_class_to_source_map_info( - ctx, - outputs = outputs, - deps = ctx.attrs.deps + deps_query + ctx.attrs.exported_deps, - ) - extra_sub_targets = extra_sub_targets | class_to_src_map_sub_targets - default_info = get_default_info( ctx.actions, java_toolchain, @@ -639,6 +700,7 @@ def build_java_library( java_library_info = java_library_info, java_library_intellij_info = intellij_info, java_packaging_info = java_packaging_info, + java_global_code_info = global_code_info, shared_library_info = shared_library_info, cxx_resource_info = cxx_resource_info, linkable_graph = linkable_graph, diff --git a/prelude/java/java_providers.bzl b/prelude/java/java_providers.bzl index bb1b178a2086d..064d4581d0a31 100644 --- a/prelude/java/java_providers.bzl +++ b/prelude/java/java_providers.bzl @@ -5,8 +5,6 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load( "@prelude//:resources.bzl", "ResourceInfo", @@ -15,14 +13,18 @@ load( load("@prelude//java:class_to_srcs.bzl", "JavaClassToSourceMapInfo") load("@prelude//java:dex.bzl", "DexLibraryInfo", "get_dex_produced_from_java_library") load("@prelude//java:dex_toolchain.bzl", "DexToolchainInfo") -load("@prelude//java/utils:java_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") load("@prelude//linking:linkable_graph.bzl", "LinkableGraph", "create_linkable_graph") load( "@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", "merge_shared_libraries", ) -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") +load( + "@prelude//utils:utils.bzl", + "flatten", +) # JAVA PROVIDER DOCS # @@ -92,8 +94,9 @@ JavaClasspathEntry = record( abi = field(Artifact), # abi_as_dir is the abi .jar unzipped into a directory. If available, it is used to provide # .class level granularity for javacd and kotlincd dep-files. - abi_as_dir = field([Artifact, None]), + abi_as_dir = field(Artifact | None), required_for_source_only_abi = field(bool), + abi_jar_snapshot = field(Artifact | None), ) def _args_for_ast_dumper(entry: JavaClasspathEntry): @@ -127,14 +130,16 @@ JavaCompilingDepsTSet = transitive_set( JavaPackagingDep = record( label = Label, - jar = [Artifact, None], + jar = Artifact | None, dex = [DexLibraryInfo, None], + gwt_module = Artifact | None, is_prebuilt_jar = bool, - proguard_config = [Artifact, None], + proguard_config = Artifact | None, # An output that is used solely by the system to have an artifact bound to the target (that the core can then use to find # the right target from the given artifact). output_for_classpath_macro = Artifact, + sources_jar = Artifact | None, ) def _full_jar_args(dep: JavaPackagingDep): @@ -158,6 +163,17 @@ JavaPackagingDepTSet = transitive_set( }, ) +JavaGlobalCodeInfo = provider( + doc = """This dictionary maps a framework key to its corresponding GlobalCodeConfig. The GlobalCodeConfig specifies the dependency .jars required by the framework for global-level code generation (binary level). + The process responsible for generating the global_code_info provider for the target utilizes this mapping to: + * Retrieve the GlobalCodeConfig associated with each framework, identified by its key. + * Determine whether the .jar files for the library or any of its dependencies are necessary for global code generation for that particular framework. + * Create a mapping from each framework key to a list of the required .jars identified in the previous step.""", + fields = { + "global_code_map": provider_field(typing.Any, default = None), # "{name: JavaCompilingDepsTSet}" + }, +) + JavaLibraryInfo = provider( doc = "Information about a java library and its dependencies", fields = { @@ -169,6 +185,12 @@ JavaLibraryInfo = provider( # An output of the library. If present then already included into `compiling_deps` field. "library_output": provider_field(typing.Any, default = None), # ["JavaClasspathEntry", None] + # Shows if the library can be exported or not + "may_not_be_exported": provider_field(typing.Any, default = None), + + # Shows if the library can be packaged or not + "may_not_be_packaged": provider_field(typing.Any, default = None), + # An output that is used solely by the system to have an artifact bound to the target (that the core can then use to find # the right target from the given artifact). "output_for_classpath_macro": provider_field(typing.Any, default = None), # "artifact" @@ -176,7 +198,6 @@ JavaLibraryInfo = provider( ) JavaLibraryIntellijInfo = provider( - # @unsorted-dict-items doc = "Information about a java library that is required for Intellij project generation", fields = { # Directory containing external annotation jars @@ -184,6 +205,10 @@ JavaLibraryIntellijInfo = provider( # All the artifacts that were used in order to compile this library "compiling_classpath": provider_field(typing.Any, default = None), # ["artifact"] "generated_sources": provider_field(typing.Any, default = None), # ["artifact"] + "lint_jar": provider_field(typing.Any, default = None), # ["artifact"] + # If this library has a jar_postprocessor, this is the jar prior to post-processing. + # Otherwise, it is the same as library_output in JavaLibraryInfo. + "preprocessed_library": provider_field(typing.Any, default = None), # ["artifact", None] }, ) @@ -205,17 +230,20 @@ KeystoreInfo = provider( JavaCompileOutputs = record( full_library = Artifact, - class_abi = [Artifact, None], - source_abi = [Artifact, None], - source_only_abi = [Artifact, None], + class_abi = Artifact | None, + source_abi = Artifact | None, + source_only_abi = Artifact | None, classpath_entry = JavaClasspathEntry, - annotation_processor_output = [Artifact, None], + annotation_processor_output = Artifact | None, + preprocessed_library = Artifact, + incremental_state_dir = Artifact | None, ) JavaProviders = record( java_library_info = JavaLibraryInfo, java_library_intellij_info = JavaLibraryIntellijInfo, java_packaging_info = JavaPackagingInfo, + java_global_code_info = JavaGlobalCodeInfo, shared_library_info = SharedLibraryInfo, cxx_resource_info = ResourceInfo, linkable_graph = LinkableGraph, @@ -229,6 +257,7 @@ def to_list(java_providers: JavaProviders) -> list[Provider]: java_providers.java_library_info, java_providers.java_library_intellij_info, java_providers.java_packaging_info, + java_providers.java_global_code_info, java_providers.shared_library_info, java_providers.cxx_resource_info, java_providers.linkable_graph, @@ -243,13 +272,16 @@ def to_list(java_providers: JavaProviders) -> list[Provider]: # specific artifact to be used as the abi for the JavaClasspathEntry. def make_compile_outputs( full_library: Artifact, - class_abi: [Artifact, None] = None, - source_abi: [Artifact, None] = None, - source_only_abi: [Artifact, None] = None, - classpath_abi: [Artifact, None] = None, - classpath_abi_dir: [Artifact, None] = None, + preprocessed_library: Artifact, + class_abi: Artifact | None = None, + source_abi: Artifact | None = None, + source_only_abi: Artifact | None = None, + classpath_abi: Artifact | None = None, + classpath_abi_dir: Artifact | None = None, required_for_source_only_abi: bool = False, - annotation_processor_output: [Artifact, None] = None) -> JavaCompileOutputs: + annotation_processor_output: Artifact | None = None, + incremental_state_dir: Artifact | None = None, + abi_jar_snapshot: Artifact | None = None) -> JavaCompileOutputs: expect(classpath_abi != None or classpath_abi_dir == None, "A classpath_abi_dir should only be provided if a classpath_abi is provided!") return JavaCompileOutputs( full_library = full_library, @@ -261,8 +293,11 @@ def make_compile_outputs( abi = classpath_abi or class_abi or full_library, abi_as_dir = classpath_abi_dir, required_for_source_only_abi = required_for_source_only_abi, + abi_jar_snapshot = abi_jar_snapshot, ), annotation_processor_output = annotation_processor_output, + preprocessed_library = preprocessed_library, + incremental_state_dir = incremental_state_dir, ) def create_abi(actions: AnalysisActions, class_abi_generator: Dependency, library: Artifact) -> Artifact: @@ -282,11 +317,31 @@ def create_abi(actions: AnalysisActions, class_abi_generator: Dependency, librar ) return class_abi +def generate_java_classpath_snapshot(actions: AnalysisActions, snapshot_generator: Dependency | None, library: Artifact, action_identifier: str | None) -> Artifact | None: + if not snapshot_generator: + return None + identifier = ( + "{}_".format(action_identifier) if action_identifier else "" + ) + library.short_path.replace("/", "_").split(".")[0] + output = actions.declare_output("{}_jar_snapshot.bin".format(identifier)) + actions.run( + [ + snapshot_generator[RunInfo], + "--input-jar", + library, + "--output-snapshot", + output.as_output(), + ], + category = "jar_snapshot", + identifier = identifier, + ) + return output + # Accumulate deps necessary for compilation, which consist of this library's output and compiling_deps of its exported deps def derive_compiling_deps( actions: AnalysisActions, library_output: [JavaClasspathEntry, None], - children: list[Dependency]) -> ["JavaCompilingDepsTSet", None]: + children: list[Dependency]) -> [JavaCompilingDepsTSet, None]: if children: filtered_children = filter( None, @@ -304,14 +359,16 @@ def derive_compiling_deps( def create_java_packaging_dep( ctx: AnalysisContext, - library_jar: [Artifact, None] = None, - output_for_classpath_macro: [Artifact, None] = None, + library_jar: Artifact | None = None, + output_for_classpath_macro: Artifact | None = None, needs_desugar: bool = False, desugar_deps: list[Artifact] = [], is_prebuilt_jar: bool = False, has_srcs: bool = True, + sources_jar: Artifact | None = None, dex_weight_factor: int = 1, - proguard_config: [Artifact, None] = None) -> "JavaPackagingDep": + proguard_config: Artifact | None = None, + gwt_module: Artifact | None = None) -> JavaPackagingDep: dex_toolchain = getattr(ctx.attrs, "_dex_toolchain", None) if library_jar != None and has_srcs and dex_toolchain != None and ctx.attrs._dex_toolchain[DexToolchainInfo].d8_command != None: dex = get_dex_produced_from_java_library( @@ -331,15 +388,17 @@ def create_java_packaging_dep( label = ctx.label, jar = library_jar, dex = dex, + gwt_module = gwt_module, is_prebuilt_jar = is_prebuilt_jar, proguard_config = proguard_config or getattr(ctx.attrs, "proguard_config", None), output_for_classpath_macro = output_for_classpath_macro or library_jar, + sources_jar = sources_jar, ) -def get_all_java_packaging_deps(ctx: AnalysisContext, deps: list[Dependency]) -> list["JavaPackagingDep"]: +def get_all_java_packaging_deps(ctx: AnalysisContext, deps: list[Dependency]) -> list[JavaPackagingDep]: return get_all_java_packaging_deps_from_packaging_infos(ctx, filter(None, [x.get(JavaPackagingInfo) for x in deps])) -def get_all_java_packaging_deps_from_packaging_infos(ctx: AnalysisContext, infos: list[JavaPackagingInfo]) -> list["JavaPackagingDep"]: +def get_all_java_packaging_deps_from_packaging_infos(ctx: AnalysisContext, infos: list[JavaPackagingInfo]) -> list[JavaPackagingDep]: children = filter(None, [info.packaging_deps for info in infos]) if not children: return [] @@ -371,6 +430,86 @@ def get_java_packaging_info( packaging_deps = get_all_java_packaging_deps_tset(ctx, java_packaging_infos, java_packaging_dep) return JavaPackagingInfo(packaging_deps = packaging_deps) +def _create_java_compiling_deps_tset_for_global_code( + actions: AnalysisActions, + global_code_library: [JavaCompilingDepsTSet, None], + name: str, + global_code_infos: list[JavaGlobalCodeInfo]) -> [JavaCompilingDepsTSet, None]: + global_code_jars_kwargs = {} + global_code_jars_children = filter(None, [info.global_code_map.get(name, None) for info in global_code_infos]) + if global_code_library: + global_code_jars_children.append(global_code_library) + if global_code_jars_children: + global_code_jars_kwargs["children"] = global_code_jars_children + + return actions.tset(JavaCompilingDepsTSet, **global_code_jars_kwargs) if global_code_jars_kwargs else None + +# This function identifies and collects necessary dependencies that meet criteria defined in `GLOBAL_CODE_CONFIG` for global code generation across frameworks. +# It maps framework names to their corresponding Java compiling dependency sets. +# Example: Below configuration specifies criteria for the "di" framework: +# GLOBAL_CODE_CONFIG = { +# "di": ( +# triggers = ["//fbandroid/java/com/facebook/inject:inject"], +# deps = [], +# requires_first_order_classpath = False, +# ), +# } +# With this setup, if a target depends on "//fbandroid/java/com/facebook/inject:inject", the `global_code_info` provider for that target will have an entry under "di". +# This entry will be a JavaCompilingDepsTSet containing the .jar files associated with that target. +# Each framework (like "di") can use a Buck rule to identify dependencies with matching values for their framework key in the `global_code_info` provider. +# They can then compile all the .jars needed for global code generation. + +def get_global_code_info( + ctx: AnalysisContext, + declared_deps: list[Dependency], + packaging_deps: list[Dependency], + single_library_dep: [JavaCompilingDepsTSet, None], + library_compiling_deps: [JavaCompilingDepsTSet, None], + first_order_compiling_deps: [JavaCompilingDepsTSet, None], + global_code_config: dict) -> JavaGlobalCodeInfo: + global_code_infos = filter(None, [x.get(JavaGlobalCodeInfo) for x in packaging_deps]) + + def declared_deps_contains_trigger(deps_triggers: list[TargetLabel]): + for deps_trigger in deps_triggers: + for declared_dep in declared_deps: + if declared_dep.label.raw_target() == deps_trigger: + return True + + return False + + global_code_map = {} + for name, (config) in global_code_config.items(): + contains_trigger = declared_deps_contains_trigger(config.triggers) + target_is_global_code_dep = ctx.label.raw_target() in config.deps + if (contains_trigger or target_is_global_code_dep) and config.requires_first_order_classpath: + global_code_library_compiling_deps = first_order_compiling_deps + elif target_is_global_code_dep: + global_code_library_compiling_deps = library_compiling_deps + elif contains_trigger: + global_code_library_compiling_deps = single_library_dep + else: + global_code_library_compiling_deps = None + + global_code_tset = _create_java_compiling_deps_tset_for_global_code(ctx.actions, global_code_library_compiling_deps, name, global_code_infos) + if global_code_tset: + global_code_map[name] = global_code_tset + + return JavaGlobalCodeInfo(global_code_map = global_code_map) + +def propagate_global_code_info( + ctx: AnalysisContext, + packaging_deps: list[Dependency]) -> JavaGlobalCodeInfo: + global_code_map = {} + global_code_infos = filter(None, [x.get(JavaGlobalCodeInfo) for x in packaging_deps]) + keys = set(flatten([info.global_code_map.keys() for info in global_code_infos])) + + for key in keys: + global_code_tset = _create_java_compiling_deps_tset_for_global_code(ctx.actions, None, key, global_code_infos) + if global_code_tset: + global_code_map[key] = global_code_tset + + return JavaGlobalCodeInfo(global_code_map = global_code_map) + def create_native_providers(ctx: AnalysisContext, label: Label, packaging_deps: list[Dependency]) -> (SharedLibraryInfo, ResourceInfo, LinkableGraph): shared_library_info = merge_shared_libraries( ctx.actions, @@ -387,6 +526,7 @@ def create_native_providers(ctx: AnalysisContext, label: Label, packaging_deps: def _create_non_template_providers( ctx: AnalysisContext, library_output: [JavaClasspathEntry, None], + global_code_config, declared_deps: list[Dependency] = [], exported_deps: list[Dependency] = [], exported_provided_deps: list[Dependency] = [], @@ -395,7 +535,10 @@ def _create_non_template_providers( desugar_classpath: list[Artifact] = [], is_prebuilt_jar: bool = False, has_srcs: bool = True, - proguard_config: [Artifact, None] = None) -> (JavaLibraryInfo, JavaPackagingInfo, SharedLibraryInfo, ResourceInfo, LinkableGraph): + sources_jar: Artifact | None = None, + proguard_config: Artifact | None = None, + gwt_module: Artifact | None = None, + first_order_compiling_deps: JavaCompilingDepsTSet | None = None) -> (JavaLibraryInfo, JavaPackagingInfo, JavaGlobalCodeInfo, SharedLibraryInfo, ResourceInfo, LinkableGraph): """Creates java library providers of type `JavaLibraryInfo` and `JavaPackagingInfo`. Args: @@ -406,6 +549,12 @@ def _create_non_template_providers( runtime_deps: dependencies that are used for packaging only """ packaging_deps = declared_deps + exported_deps + runtime_deps + for dep in packaging_deps: + if JavaLibraryInfo in dep and dep[JavaLibraryInfo].may_not_be_packaged: + fail("{} has 'may_not_be_packaged' label but is present in {}. If you need to use it in order to build the library, move it into 'provided_deps'".format( + dep.label.raw_target(), + ctx.label.raw_target(), + )) shared_library_info, cxx_resource_info, linkable_graph = create_native_providers(ctx, ctx.label, packaging_deps) output_for_classpath_macro = library_output.abi if (library_output and library_output.abi.owner != None) else ctx.actions.write("dummy_output_for_classpath_macro.txt", "Unused") @@ -417,7 +566,9 @@ def _create_non_template_providers( desugar_classpath, is_prebuilt_jar, has_srcs, + sources_jar, proguard_config = proguard_config, + gwt_module = gwt_module, ) java_packaging_info = get_java_packaging_info( @@ -426,13 +577,28 @@ def _create_non_template_providers( java_packaging_dep = java_packaging_dep, ) + compiling_deps = derive_compiling_deps(ctx.actions, library_output, exported_deps + exported_provided_deps) + + global_code_info = get_global_code_info( + ctx, + declared_deps, + packaging_deps, + derive_compiling_deps(ctx.actions, library_output, []), + compiling_deps, + first_order_compiling_deps, + global_code_config, + ) + return ( JavaLibraryInfo( - compiling_deps = derive_compiling_deps(ctx.actions, library_output, exported_deps + exported_provided_deps), + compiling_deps = compiling_deps, library_output = library_output, output_for_classpath_macro = output_for_classpath_macro, + may_not_be_exported = "may_not_be_exported" in (ctx.attrs.labels or []), + may_not_be_packaged = "may_not_be_packaged" in (ctx.attrs.labels or []), ), java_packaging_info, + global_code_info, shared_library_info, cxx_resource_info, linkable_graph, @@ -448,6 +614,7 @@ def create_template_info(ctx: AnalysisContext, packaging_info: JavaPackagingInfo def create_java_library_providers( ctx: AnalysisContext, library_output: [JavaClasspathEntry, None], + global_code_config, declared_deps: list[Dependency] = [], exported_deps: list[Dependency] = [], provided_deps: list[Dependency] = [], @@ -456,19 +623,25 @@ def create_java_library_providers( needs_desugar: bool = False, is_prebuilt_jar: bool = False, has_srcs: bool = True, + sources_jar: Artifact | None = None, generated_sources: list[Artifact] = [], - annotation_jars_dir: [Artifact, None] = None, - proguard_config: [Artifact, None] = None) -> (JavaLibraryInfo, JavaPackagingInfo, SharedLibraryInfo, ResourceInfo, LinkableGraph, TemplatePlaceholderInfo, JavaLibraryIntellijInfo): + annotation_jars_dir: Artifact | None = None, + proguard_config: Artifact | None = None, + gwt_module: Artifact | None = None, + lint_jar: Artifact | None = None, + preprocessed_library: Artifact | None = None) -> (JavaLibraryInfo, JavaPackagingInfo, JavaGlobalCodeInfo, SharedLibraryInfo, ResourceInfo, LinkableGraph, TemplatePlaceholderInfo, JavaLibraryIntellijInfo): first_order_classpath_deps = filter(None, [x.get(JavaLibraryInfo) for x in declared_deps + exported_deps + runtime_deps]) first_order_classpath_libs = [dep.output_for_classpath_macro for dep in first_order_classpath_deps] compiling_deps = derive_compiling_deps(ctx.actions, None, declared_deps + exported_deps + provided_deps + exported_provided_deps) + first_order_compiling_deps = derive_compiling_deps(ctx.actions, library_output, declared_deps + exported_deps + provided_deps + exported_provided_deps) if library_output else compiling_deps compiling_classpath = [dep.full_library for dep in (list(compiling_deps.traverse()) if compiling_deps else [])] desugar_classpath = compiling_classpath if needs_desugar else [] - library_info, packaging_info, shared_library_info, cxx_resource_info, linkable_graph = _create_non_template_providers( + library_info, packaging_info, global_code_info, shared_library_info, cxx_resource_info, linkable_graph = _create_non_template_providers( ctx, library_output = library_output, + global_code_config = global_code_config, declared_deps = declared_deps, exported_deps = exported_deps, exported_provided_deps = exported_provided_deps, @@ -477,7 +650,10 @@ def create_java_library_providers( desugar_classpath = desugar_classpath, is_prebuilt_jar = is_prebuilt_jar, has_srcs = has_srcs, + sources_jar = sources_jar, proguard_config = proguard_config, + gwt_module = gwt_module, + first_order_compiling_deps = first_order_compiling_deps, ) first_order_libs = first_order_classpath_libs + [library_info.library_output.full_library] if library_info.library_output else first_order_classpath_libs @@ -487,6 +663,8 @@ def create_java_library_providers( compiling_classpath = compiling_classpath, generated_sources = generated_sources, annotation_jars_dir = annotation_jars_dir, + lint_jar = lint_jar, + preprocessed_library = preprocessed_library, ) - return (library_info, packaging_info, shared_library_info, cxx_resource_info, linkable_graph, template_info, intellij_info) + return (library_info, packaging_info, global_code_info, shared_library_info, cxx_resource_info, linkable_graph, template_info, intellij_info) diff --git a/prelude/java/java_resources.bzl b/prelude/java/java_resources.bzl index c7976e3276c20..fbab61e4cb4a2 100644 --- a/prelude/java/java_resources.bzl +++ b/prelude/java/java_resources.bzl @@ -18,7 +18,7 @@ def get_src_package(src_root_prefixes: list[str], src_root_elements: list[str], prefix, ) parts = path.split("/") - for i in range(len(parts) - 1, -1, -1): + for i in range(len(parts) - 2, -1, -1): part = parts[i] if part in src_root_elements: return "/".join(parts[i + 1:]) @@ -57,3 +57,18 @@ def get_resources_map( resource_name = get_src_package(java_toolchain.src_root_prefixes, java_toolchain.src_root_elements, full_resource) resources_to_copy[resource_name] = resource return resources_to_copy + +def parse_src_roots(src_roots: list[str]) -> (list[str], list[str]): + prefixes = [] + elements = [] + for src_root in src_roots: + if src_root.startswith("/"): + if not src_root.endswith("/"): + fail("Elements in java.src_roots config that begin with a / must end in one too, but {} does not".format(src_root)) + prefixes.append(src_root[1:]) + elif "/" in src_root: + fail("No / is permitted in java.src_roots config elements, but {} has one".format(src_root)) + else: + elements.append(src_root) + + return elements, prefixes diff --git a/prelude/java/java_test.bzl b/prelude/java/java_test.bzl index 339b3fd65bff5..0e133c6729621 100644 --- a/prelude/java/java_test.bzl +++ b/prelude/java/java_test.bzl @@ -13,9 +13,20 @@ load( load("@prelude//java:java_library.bzl", "build_java_library") load("@prelude//java:java_providers.bzl", "JavaLibraryInfo", "JavaPackagingInfo", "get_all_java_packaging_deps_tset") load("@prelude//java:java_toolchain.bzl", "JavaTestToolchainInfo", "JavaToolchainInfo") -load("@prelude//java/utils:java_utils.bzl", "get_path_separator_for_exec_os") -load("@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", "merge_shared_libraries", "traverse_shared_library_info") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibraryInfo", + "create_shlib_symlink_tree", + "merge_shared_libraries", + "traverse_shared_library_info", +) +load( + "@prelude//tests:re_utils.bzl", + "get_re_executors_from_props", +) +load("@prelude//utils:argfile.bzl", "at_argfile") +load("@prelude//utils:expect.bzl", "expect") load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") def java_test_impl(ctx: AnalysisContext) -> list[Provider]: @@ -42,10 +53,14 @@ def build_junit_test( extra_cmds: list = [], extra_classpath_entries: list[Artifact] = []) -> ExternalRunnerTestInfo: java_test_toolchain = ctx.attrs._java_test_toolchain[JavaTestToolchainInfo] + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] - java = ctx.attrs.java[RunInfo] if ctx.attrs.java else ctx.attrs._java_toolchain[JavaToolchainInfo].java_for_tests + java = ctx.attrs.java[RunInfo] if ctx.attrs.java else java_toolchain.java_for_tests cmd = [java] + extra_cmds + ctx.attrs.vm_args + ["-XX:-MaxFDLimit"] + if java_test_toolchain.jvm_args: + cmd.extend(java_test_toolchain.jvm_args) + classpath = [] if java_test_toolchain.use_java_custom_class_loader: @@ -53,6 +68,8 @@ def build_junit_test( cmd.extend(java_test_toolchain.java_custom_class_loader_vm_args) classpath.append(java_test_toolchain.java_custom_class_loader_library_jar) + cmd.append(cmd_args(ctx.attrs.java_agents, format = "-javaagent:{}")) + classpath.extend( [java_test_toolchain.test_runner_library_jar] + [ @@ -66,12 +83,17 @@ def build_junit_test( classpath.append(ctx.attrs.unbundled_resources_root) labels = ctx.attrs.labels or [] + + # Setup RE executors based on the `remote_execution` param. + re_executor, executor_overrides = get_re_executors_from_props(ctx) + + # We implicitly make the target run from the project root if remote + # execution options were specified. run_from_cell_root = "buck2_run_from_cell_root" in labels + uses_java8 = "run_with_java8" in labels - classpath_args = cmd_args() - if run_from_cell_root: - classpath_args.relative_to(ctx.label.cell_root) + relative_to = {"relative_to": ctx.label.cell_root} if run_from_cell_root else {} if uses_java8: # Java 8 does not support using argfiles, and these tests can have huge classpaths so we need another @@ -80,16 +102,25 @@ def build_junit_test( # to the "FileClassPathRunner" as a system variable. The "FileClassPathRunner" then loads all the jars # from that file onto the classpath, and delegates running the test to the junit test runner. cmd.extend(["-classpath", cmd_args(java_test_toolchain.test_runner_library_jar)]) - classpath_args.add(cmd_args(classpath)) + classpath_args = cmd_args( + cmd_args(classpath), + **relative_to + ) classpath_args_file = ctx.actions.write("classpath_args_file", classpath_args) - cmd.append(cmd_args(classpath_args_file, format = "-Dbuck.classpath_file={}").hidden(classpath_args)) + cmd.append(cmd_args( + classpath_args_file, + format = "-Dbuck.classpath_file={}", + hidden = classpath_args, + )) else: # Java 9+ supports argfiles, so just write the classpath to an argsfile. "FileClassPathRunner" will delegate # immediately to the junit test runner. - classpath_args.add("-classpath") - classpath_args.add(cmd_args(classpath, delimiter = get_path_separator_for_exec_os(ctx))) - classpath_args_file = ctx.actions.write("classpath_args_file", classpath_args) - cmd.append(cmd_args(classpath_args_file, format = "@{}").hidden(classpath_args)) + classpath_args = cmd_args( + "-classpath", + cmd_args(classpath, delimiter = get_path_separator_for_exec_os(ctx)), + **relative_to + ) + cmd.append(at_argfile(actions = ctx.actions, name = "classpath_args_file", args = classpath_args)) if (ctx.attrs.test_type == "junit5"): cmd.extend(java_test_toolchain.junit5_test_runner_main_class_args) @@ -99,21 +130,23 @@ def build_junit_test( cmd.extend(java_test_toolchain.junit_test_runner_main_class_args) if ctx.attrs.test_case_timeout_ms: - cmd.extend(["--default_test_timeout", ctx.attrs.test_case_timeout_ms]) - - expect(tests_java_library_info.library_output != None, "Built test library has no output, likely due to missing srcs") - - class_names = ctx.actions.declare_output("class_names") - list_class_names_cmd = cmd_args([ - java_test_toolchain.list_class_names[RunInfo], - "--jar", - tests_java_library_info.library_output.full_library, - "--sources", - ctx.actions.write("sources.txt", ctx.attrs.srcs), - "--output", - class_names.as_output(), - ]).hidden(ctx.attrs.srcs) - ctx.actions.run(list_class_names_cmd, category = "list_class_names") + cmd.extend(["--default-test-timeout", str(ctx.attrs.test_case_timeout_ms)]) + + if ctx.attrs.test_class_names_file: + class_names = ctx.attrs.test_class_names_file + else: + expect(tests_java_library_info.library_output != None, "Built test library has no output, likely due to missing srcs") + class_names = ctx.actions.declare_output("class_names") + list_class_names_cmd = cmd_args([ + java_test_toolchain.list_class_names[RunInfo], + "--jar", + tests_java_library_info.library_output.full_library, + "--sources", + ctx.actions.write("sources.txt", ctx.attrs.srcs), + "--output", + class_names.as_output(), + ], hidden = ctx.attrs.srcs) + ctx.actions.run(list_class_names_cmd, category = "list_class_names") cmd.extend(["--test-class-names-file", class_names]) @@ -128,13 +161,13 @@ def build_junit_test( if tests_class_to_source_info != None: transitive_class_to_src_map = merge_class_to_source_map_from_jar( actions = ctx.actions, - name = ctx.attrs.name + ".transitive_class_to_src.json", - java_test_toolchain = java_test_toolchain, + name = ctx.label.name + ".transitive_class_to_src.json", + java_toolchain = java_toolchain, relative_to = ctx.label.cell_root if run_from_cell_root else None, deps = [tests_class_to_source_info], ) if run_from_cell_root: - transitive_class_to_src_map = cmd_args(transitive_class_to_src_map).relative_to(ctx.label.cell_root) + transitive_class_to_src_map = cmd_args(transitive_class_to_src_map, relative_to = ctx.label.cell_root) env["JACOCO_CLASSNAME_SOURCE_MAP"] = transitive_class_to_src_map test_info = ExternalRunnerTestInfo( @@ -145,6 +178,8 @@ def build_junit_test( contacts = ctx.attrs.contacts, run_from_project_root = not run_from_cell_root, use_project_relative_paths = not run_from_cell_root, + default_executor = re_executor, + executor_overrides = executor_overrides, ) return test_info @@ -162,8 +197,10 @@ def _get_native_libs_env(ctx: AnalysisContext) -> dict: deps = shared_library_infos, ) - native_linkables = traverse_shared_library_info(shared_library_info) - cxx_library_symlink_tree_dict = {so_name: shared_lib.lib.output for so_name, shared_lib in native_linkables.items()} - cxx_library_symlink_tree = ctx.actions.symlinked_dir("cxx_library_symlink_tree", cxx_library_symlink_tree_dict) + cxx_library_symlink_tree = create_shlib_symlink_tree( + actions = ctx.actions, + out = "cxx_library_symlink_tree", + shared_libs = traverse_shared_library_info(shared_library_info), + ) return {"BUCK_LD_SYMLINK_TREE": cxx_library_symlink_tree} diff --git a/prelude/java/java_toolchain.bzl b/prelude/java/java_toolchain.bzl index dbfd1ccc14770..e6029d3142c06 100644 --- a/prelude/java/java_toolchain.bzl +++ b/prelude/java/java_toolchain.bzl @@ -28,15 +28,20 @@ JavaToolchainInfo = provider( "class_abi_generator": provider_field(typing.Any, default = None), "class_loader_bootstrapper": provider_field(typing.Any, default = None), "compile_and_package": provider_field(typing.Any, default = None), + "cp_snapshot_generator": provider_field(typing.Any, default = None), "dep_files": provider_field(typing.Any, default = None), "fat_jar": provider_field(typing.Any, default = None), "fat_jar_main_class_lib": provider_field(typing.Any, default = None), "gen_class_to_source_map": provider_field(typing.Any, default = None), "gen_class_to_source_map_debuginfo": provider_field(typing.Any, default = None), # optional + "gen_class_to_source_map_include_sourceless_compiled_packages": provider_field(typing.Any, default = None), + "global_code_config": provider_field(typing.Any, default = None), + "graalvm_java": provider_field(typing.Any, default = None), "is_bootstrap_toolchain": provider_field(typing.Any, default = None), "jar": provider_field(typing.Any, default = None), "jar_builder": provider_field(typing.Any, default = None), "java": provider_field(typing.Any, default = None), + "java_error_handler": provider_field(typing.Any, default = None), "java_for_tests": provider_field(typing.Any, default = None), "javac": provider_field(typing.Any, default = None), "javac_protocol": provider_field(typing.Any, default = None), @@ -46,13 +51,17 @@ JavaToolchainInfo = provider( "javacd_jvm_args_target": provider_field(typing.Any, default = None), "javacd_main_class": provider_field(typing.Any, default = None), "javacd_worker": provider_field(typing.Any, default = None), + "merge_class_to_source_maps": provider_field(typing.Any, default = None), "nullsafe": provider_field(typing.Any, default = None), "nullsafe_extra_args": provider_field(typing.Any, default = None), "nullsafe_signatures": provider_field(typing.Any, default = None), + "postprocessor_runner": provider_field(typing.Any, default = None), "source_level": provider_field(typing.Any, default = None), "src_root_elements": provider_field(typing.Any, default = None), "src_root_prefixes": provider_field(typing.Any, default = None), "target_level": provider_field(typing.Any, default = None), + "track_class_usage": provider_field(bool, default = True), + "use_graalvm_java_for_javacd": provider_field(typing.Any, default = None), "zip_scrubber": provider_field(typing.Any, default = None), }, ) @@ -66,8 +75,8 @@ JavaTestToolchainInfo = provider( "java_custom_class_loader_vm_args": provider_field(typing.Any, default = None), "junit5_test_runner_main_class_args": provider_field(typing.Any, default = None), "junit_test_runner_main_class_args": provider_field(typing.Any, default = None), + "jvm_args": provider_field(typing.Any, default = None), "list_class_names": provider_field(typing.Any, default = None), - "merge_class_to_source_maps": provider_field(typing.Any, default = None), "test_runner_library_jar": provider_field(typing.Any, default = None), "testng_test_runner_main_class_args": provider_field(typing.Any, default = None), "use_java_custom_class_loader": provider_field(typing.Any, default = None), @@ -81,6 +90,9 @@ PrebuiltJarToolchainInfo = provider( doc = "prebuilt_jar toolchain info", fields = { "class_abi_generator": provider_field(typing.Any, default = None), + "cp_snapshot_generator": provider_field(typing.Any, default = None), + "global_code_config": provider_field(typing.Any, default = None), "is_bootstrap_toolchain": provider_field(typing.Any, default = None), + "java": provider_field(typing.Any, default = None), }, ) diff --git a/prelude/java/javacd_jar_creator.bzl b/prelude/java/javacd_jar_creator.bzl index 40337e99a9b60..0bfdf8d574762 100644 --- a/prelude/java/javacd_jar_creator.bzl +++ b/prelude/java/javacd_jar_creator.bzl @@ -5,10 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load( "@prelude//java:java_providers.bzl", + "JavaClasspathEntry", # @unused Used as a type + "JavaCompileOutputs", # @unused Used as a type + "generate_java_classpath_snapshot", "make_compile_outputs", ) load("@prelude//java:java_resources.bzl", "get_resources_map") @@ -18,12 +19,19 @@ load( "DepFiles", "JavaToolchainInfo", # @unused Used as a type ) +load( + "@prelude//java/plugins:java_annotation_processor.bzl", + "AnnotationProcessorProperties", # @unused Used as a type +) +load( + "@prelude//java/plugins:java_plugin.bzl", + "PluginParams", # @unused Used as a type +) load( "@prelude//jvm:cd_jar_creator_util.bzl", "OutputPaths", "TargetType", "add_java_7_8_bootclasspath", - "add_output_paths_to_cmd_args", "base_qualified_name", "declare_prefixed_output", "define_output_paths", @@ -36,9 +44,9 @@ load( "prepare_final_jar", "setup_dep_files", ) -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") -base_command_params = struct( +_base_command_params = struct( withDownwardApi = True, spoolMode = "DIRECT_TO_JAR", ) @@ -49,15 +57,15 @@ def create_jar_artifact_javacd( abi_generation_mode: [AbiGenerationMode, None], java_toolchain: JavaToolchainInfo, label, - output: [Artifact, None], + output: Artifact | None, javac_tool: [typing.Any, None], srcs: list[Artifact], remove_classes: list[str], resources: list[Artifact], resources_root: [str, None], - manifest_file: [Artifact, None], - annotation_processor_properties: "AnnotationProcessorProperties", - plugin_params: ["PluginParams", None], + manifest_file: Artifact | None, + annotation_processor_properties: AnnotationProcessorProperties, + plugin_params: [PluginParams, None], source_level: int, target_level: int, deps: list[Dependency], @@ -65,10 +73,11 @@ def create_jar_artifact_javacd( source_only_abi_deps: list[Dependency], extra_arguments: cmd_args, additional_classpath_entries: list[Artifact], - additional_compiled_srcs: [Artifact, None], + additional_compiled_srcs: Artifact | None, bootclasspath_entries: list[Artifact], is_building_android_binary: bool, - is_creating_subtarget: bool = False) -> "JavaCompileOutputs": + is_creating_subtarget: bool = False, + debug_port: [int, None] = None) -> JavaCompileOutputs: if javac_tool != None: # TODO(cjhopman): We can probably handle this better. I think we should be able to just use the non-javacd path. fail("cannot set explicit javac on library when using javacd") @@ -76,9 +85,6 @@ def create_jar_artifact_javacd( actions = ctx.actions resources_map = get_resources_map(java_toolchain, label.package, resources, resources_root) - # TODO(cjhopman): Handle manifest file. - _ = manifest_file # buildifier: disable=unused-variable - bootclasspath_entries = add_java_7_8_bootclasspath(target_level, bootclasspath_entries, java_toolchain) abi_generation_mode = get_abi_generation_mode(abi_generation_mode, java_toolchain, srcs, annotation_processor_properties) @@ -100,7 +106,7 @@ def create_jar_artifact_javacd( compiling_deps_tset = get_compiling_deps_tset(actions, deps, additional_classpath_entries) # external javac does not support used classes - track_class_usage = javac_tool == None + track_class_usage = javac_tool == None and java_toolchain.track_class_usage def encode_library_command( output_paths: OutputPaths, @@ -124,13 +130,14 @@ def create_jar_artifact_javacd( resources_map, annotation_processor_properties, plugin_params, + manifest_file, extra_arguments, source_only_abi_compiling_deps = [], track_class_usage = track_class_usage, ) return struct( - baseCommandParams = base_command_params, + _baseCommandParams = _base_command_params, libraryJarCommand = struct( baseJarCommand = base_jar_command, libraryJarBaseCommand = struct( @@ -146,7 +153,7 @@ def create_jar_artifact_javacd( output_paths: OutputPaths, target_type: TargetType, classpath_jars_tag: ArtifactTag, - source_only_abi_compiling_deps: list["JavaClasspathEntry"] = []) -> struct: + source_only_abi_compiling_deps: list[JavaClasspathEntry] = []) -> struct: base_jar_command = encode_base_jar_command( javac_tool, target_type, @@ -163,11 +170,12 @@ def create_jar_artifact_javacd( resources_map, annotation_processor_properties, plugin_params, + manifest_file, extra_arguments, source_only_abi_compiling_deps = source_only_abi_compiling_deps, track_class_usage = track_class_usage, ) - abi_params = encode_jar_params(remove_classes, output_paths) + abi_params = encode_jar_params(remove_classes, output_paths, manifest_file) abi_command = struct( baseJarCommand = base_jar_command, @@ -175,7 +183,7 @@ def create_jar_artifact_javacd( ) return struct( - baseCommandParams = base_command_params, + _baseCommandParams = _base_command_params, abiJarCommand = abi_command, ) @@ -187,11 +195,10 @@ def create_jar_artifact_javacd( qualified_name: str, output_paths: OutputPaths, classpath_jars_tag: ArtifactTag, - abi_dir: [Artifact, None], + abi_dir: Artifact | None, target_type: TargetType, - path_to_class_hashes: [Artifact, None], is_creating_subtarget: bool = False, - source_only_abi_compiling_deps: list["JavaClasspathEntry"] = []): + source_only_abi_compiling_deps: list[JavaClasspathEntry] = []): proto = declare_prefixed_output(actions, actions_identifier, "jar_command.proto.json") proto_with_inputs = actions.write_json(proto, encoded_command, with_inputs = True) @@ -201,13 +208,14 @@ def create_jar_artifact_javacd( compiler = java_toolchain.javac[DefaultInfo].default_outputs[0] exe, local_only = prepare_cd_exe( qualified_name, - java = java_toolchain.java[RunInfo], + java = java_toolchain.graalvm_java[RunInfo] if java_toolchain.use_graalvm_java_for_javacd else java_toolchain.java[RunInfo], class_loader_bootstrapper = java_toolchain.class_loader_bootstrapper, compiler = compiler, main_class = java_toolchain.javacd_main_class, worker = java_toolchain.javacd_worker[WorkerInfo], - debug_port = java_toolchain.javacd_debug_port, - debug_target = java_toolchain.javacd_debug_target, + target_specified_debug_port = debug_port, + toolchain_specified_debug_port = java_toolchain.javacd_debug_port, + toolchain_specified_debug_target = java_toolchain.javacd_debug_target, extra_jvm_args = java_toolchain.javacd_jvm_args, extra_jvm_args_target = java_toolchain.javacd_jvm_args_target, ) @@ -237,11 +245,6 @@ def create_jar_artifact_javacd( abi_dir.as_output(), ) - args = add_output_paths_to_cmd_args(args, output_paths, path_to_class_hashes) - - # TODO(cjhopman): make sure this works both locally and remote. - event_pipe_out = declare_prefixed_output(actions, actions_identifier, "events.data") - dep_files = {} if not is_creating_subtarget and srcs and (java_toolchain.dep_files == DepFiles("per_jar") or java_toolchain.dep_files == DepFiles("per_class")) and track_class_usage: abi_to_abi_dir_map = None @@ -270,16 +273,17 @@ def create_jar_artifact_javacd( args, env = { "BUCK_CLASSPATH": compiler, - "BUCK_EVENT_PIPE": event_pipe_out.as_output(), "JAVACD_ABSOLUTE_PATHS_ARE_RELATIVE_TO_CWD": "1", }, category = "{}javacd_jar".format(category_prefix), identifier = actions_identifier or "", dep_files = dep_files, + allow_dep_file_cache_upload = False, exe = exe, local_only = local_only, low_pass_filter = False, weight = 2, + error_handler = java_toolchain.java_error_handler, ) library_classpath_jars_tag = actions.artifact_tag() @@ -293,10 +297,21 @@ def create_jar_artifact_javacd( library_classpath_jars_tag, class_abi_output_dir if should_create_class_abi else None, TargetType("library"), - path_to_class_hashes_out, is_creating_subtarget, ) - final_jar = prepare_final_jar(actions, actions_identifier, output, output_paths, additional_compiled_srcs, java_toolchain.jar_builder) + jar_postprocessor = ctx.attrs.jar_postprocessor[RunInfo] if hasattr(ctx.attrs, "jar_postprocessor") and ctx.attrs.jar_postprocessor else None + final_jar_output = prepare_final_jar( + actions = actions, + actions_identifier = actions_identifier, + output = output, + output_paths = output_paths, + additional_compiled_srcs = additional_compiled_srcs, + jar_builder = java_toolchain.jar_builder, + jar_postprocessor = jar_postprocessor, + jar_postprocessor_runner = java_toolchain.postprocessor_runner[RunInfo], + zip_scrubber = java_toolchain.zip_scrubber, + ) + if not is_creating_subtarget: class_abi, source_abi, source_only_abi, classpath_abi, classpath_abi_dir = generate_abi_jars( actions, @@ -306,7 +321,7 @@ def create_jar_artifact_javacd( additional_compiled_srcs, is_building_android_binary, java_toolchain.class_abi_generator, - final_jar, + final_jar_output.final_jar, compiling_deps_tset, source_only_abi_deps, class_abi_jar = class_abi_jar, @@ -315,8 +330,10 @@ def create_jar_artifact_javacd( define_action = define_javacd_action, ) + abi_jar_snapshot = generate_java_classpath_snapshot(ctx.actions, java_toolchain.cp_snapshot_generator, classpath_abi, actions_identifier) result = make_compile_outputs( - full_library = final_jar, + full_library = final_jar_output.final_jar, + preprocessed_library = final_jar_output.preprocessed_jar, class_abi = class_abi, source_abi = source_abi, source_only_abi = source_only_abi, @@ -324,11 +341,15 @@ def create_jar_artifact_javacd( classpath_abi_dir = classpath_abi_dir, required_for_source_only_abi = required_for_source_only_abi, annotation_processor_output = output_paths.annotations, + abi_jar_snapshot = abi_jar_snapshot, ) else: + full_jar_snapshot = generate_java_classpath_snapshot(ctx.actions, java_toolchain.cp_snapshot_generator, final_jar_output.final_jar, actions_identifier) result = make_compile_outputs( - full_library = final_jar, + full_library = final_jar_output.final_jar, + preprocessed_library = final_jar_output.preprocessed_jar, required_for_source_only_abi = required_for_source_only_abi, annotation_processor_output = output_paths.annotations, + abi_jar_snapshot = full_jar_snapshot, ) return result diff --git a/prelude/java/plugins/java_annotation_processor.bzl b/prelude/java/plugins/java_annotation_processor.bzl index a674922b47b57..a37c718074879 100644 --- a/prelude/java/plugins/java_annotation_processor.bzl +++ b/prelude/java/plugins/java_annotation_processor.bzl @@ -6,6 +6,7 @@ # of this source tree. load("@prelude//java:java_providers.bzl", "JavaLibraryInfo", "JavaPackagingDepTSet", "JavaPackagingInfo") +load("@prelude//utils:type_defs.bzl", "is_tuple") JavaProcessorsType = enum( "java_annotation_processor", @@ -57,7 +58,7 @@ def derive_transitive_deps(ctx: AnalysisContext, deps: list[Dependency]) -> [Jav def create_annotation_processor_properties( ctx: AnalysisContext, - plugins: list[Dependency], + plugins: list[[Dependency, (Dependency, list[str])]], annotation_processor_names: list[str], annotation_processor_params: list[str], annotation_processor_deps: list[Dependency]) -> AnnotationProcessorProperties: @@ -65,7 +66,7 @@ def create_annotation_processor_properties( # Extend `ap_processor_deps` with java deps from `annotation_processor_deps` if annotation_processor_names or annotation_processor_deps: - for ap_dep in [x.get(JavaLibraryInfo) for x in annotation_processor_deps]: + for ap_dep in [_get_plugin_provider(x, JavaLibraryInfo) for x in annotation_processor_deps]: if not ap_dep: fail("Dependency must have a type of `java_library` or `prebuilt_jar`. Deps: {}".format(annotation_processor_deps)) @@ -80,7 +81,7 @@ def create_annotation_processor_properties( )) # APs derived from `plugins` attribute - for ap_plugin in filter(None, [x.get(JavaProcessorsInfo) for x in plugins]): + for ap_plugin in filter(None, [_get_plugin_provider(x, JavaProcessorsInfo) for x in plugins]): if not ap_plugin: fail("Plugin must have a type of `java_annotation_processor` or `java_plugin`. Plugins: {}".format(plugins)) if ap_plugin.type == JavaProcessorsType("java_annotation_processor"): @@ -92,35 +93,33 @@ def create_annotation_processor_properties( isolate_class_loader = ap_plugin.isolate_class_loader, )) + annotation_processor_params = annotation_processor_params + [ + "buck.current_buck_target=" + str(ctx.label.raw_target()), + ] + return AnnotationProcessorProperties( annotation_processors = annotation_processors, annotation_processor_params = annotation_processor_params, ) -def create_ksp_annotation_processor_properties(ctx: AnalysisContext, plugins: list[Dependency]) -> AnnotationProcessorProperties: - ap_processors = [] - ap_processor_deps = [] +def create_ksp_annotation_processor_properties(plugins: list[[Dependency, (Dependency, list[str])]]) -> AnnotationProcessorProperties: + annotation_processors = [] # APs derived from `plugins` attribute - for ap_plugin in filter(None, [x.get(JavaProcessorsInfo) for x in plugins]): + for ap_plugin in filter(None, [_get_plugin_provider(x, JavaProcessorsInfo) for x in plugins]): if not ap_plugin: fail("Plugin must have a type of `java_annotation_processor` or `java_plugin`. Plugins: {}".format(plugins)) if ap_plugin.type == JavaProcessorsType("ksp_annotation_processor"): - ap_processors += ap_plugin.processors - if ap_plugin.deps: - ap_processor_deps.append(ap_plugin.deps) - - if not ap_processors: - return AnnotationProcessorProperties(annotation_processors = [], annotation_processor_params = []) + annotation_processors.append(AnnotationProcessor( + affects_abi = ap_plugin.affects_abi, + supports_source_only_abi = ap_plugin.supports_source_only_abi, + processors = ap_plugin.processors, + deps = ap_plugin.deps, + isolate_class_loader = ap_plugin.isolate_class_loader, + )) return AnnotationProcessorProperties( - annotation_processors = [AnnotationProcessor( - processors = dedupe(ap_processors), - deps = ctx.actions.tset(JavaPackagingDepTSet, children = ap_processor_deps) if ap_processor_deps else None, - affects_abi = True, - supports_source_only_abi = False, - isolate_class_loader = False, - )], + annotation_processors = annotation_processors, annotation_processor_params = [], ) @@ -130,6 +129,9 @@ def _get_processor_type(processor_class: str) -> JavaProcessorsType: return JavaProcessorsType("java_annotation_processor") +def _get_plugin_provider(plugin: [Dependency, (Dependency, list[str])], provider: typing.Callable[[], Provider]) -> [Provider, None]: + return (plugin[0] if is_tuple(plugin) else plugin).get(provider) + def java_annotation_processor_impl(ctx: AnalysisContext) -> list[Provider]: if ctx.attrs._build_only_native_code: return [DefaultInfo()] diff --git a/prelude/java/plugins/java_plugin.bzl b/prelude/java/plugins/java_plugin.bzl index ac4129903204f..9723658174199 100644 --- a/prelude/java/plugins/java_plugin.bzl +++ b/prelude/java/plugins/java_plugin.bzl @@ -12,33 +12,43 @@ load( "JavaProcessorsType", "derive_transitive_deps", ) +load("@prelude//utils:type_defs.bzl", "is_tuple") PluginParams = record( - processors = field(list[str]), - args = field(dict[str, cmd_args]), + processors = field(list[(str, cmd_args)]), deps = field([JavaPackagingDepTSet, None]), ) -def create_plugin_params(ctx: AnalysisContext, plugins: list[Dependency]) -> [PluginParams, None]: +def create_plugin_params(ctx: AnalysisContext, plugins: list[[Dependency, (Dependency, list[str])]]) -> [PluginParams, None]: processors = [] plugin_deps = [] # Compiler plugin derived from `plugins` attribute - for plugin in filter(None, [x.get(JavaProcessorsInfo) for x in plugins]): - if plugin.type == JavaProcessorsType("plugin"): - if len(plugin.processors) > 1: - fail("Only 1 java compiler plugin is expected. But received: {}".format(plugin.processors)) - processors.append(plugin.processors[0]) - if plugin.deps: - plugin_deps.append(plugin.deps) + for item in plugins: + # Each plugin can be either a tuple of (target, arguments) or just the target + if is_tuple(item): + plugin = item[0] + arguments = item[1] + else: + plugin = item + arguments = None + + processors_info = plugin.get(JavaProcessorsInfo) + if processors_info != None and processors_info.type == JavaProcessorsType("plugin"): + if len(processors_info.processors) > 1: + fail("Only 1 java compiler plugin is expected. But received: {}".format(processors_info.processors)) + processor = processors_info.processors[0] + if processors_info.deps: + plugin_deps.append(processors_info.deps) + + processors.append((processor, cmd_args(arguments) if arguments != None else cmd_args())) if not processors: return None return PluginParams( - processors = dedupe(processors), + processors = processors, deps = ctx.actions.tset(JavaPackagingDepTSet, children = plugin_deps) if plugin_deps else None, - args = {}, ) def java_plugin_impl(ctx: AnalysisContext) -> list[Provider]: diff --git a/prelude/java/prebuilt_jar.bzl b/prelude/java/prebuilt_jar.bzl index 649a595ebc6ce..d1b4ae80a6021 100644 --- a/prelude/java/prebuilt_jar.bzl +++ b/prelude/java/prebuilt_jar.bzl @@ -12,6 +12,7 @@ load( "JavaClasspathEntry", "create_abi", "create_java_library_providers", + "generate_java_classpath_snapshot", ) load(":java_toolchain.bzl", "PrebuiltJarToolchainInfo") @@ -38,26 +39,35 @@ def prebuilt_jar_impl(ctx: AnalysisContext) -> list[Provider]: output = ctx.actions.declare_output("symlink/{}".format(binary_jar.short_path)) ctx.actions.symlink_file(output, binary_jar) + gwt_output = ctx.actions.declare_output("{}-gwt.jar".format(ctx.label.name)) + ctx.actions.copy_file(gwt_output, ctx.attrs.source_jar if ctx.attrs.source_jar else ctx.attrs.binary_jar) + abi = None - if ctx.attrs.generate_abi: - prebuilt_jar_toolchain = ctx.attrs._prebuilt_jar_toolchain[PrebuiltJarToolchainInfo] - if not prebuilt_jar_toolchain.is_bootstrap_toolchain: + prebuilt_jar_toolchain = ctx.attrs._prebuilt_jar_toolchain[PrebuiltJarToolchainInfo] + if not prebuilt_jar_toolchain.is_bootstrap_toolchain: + if ctx.attrs.generate_abi: abi = create_abi(ctx.actions, prebuilt_jar_toolchain.class_abi_generator, output) + jar_snapshot = generate_java_classpath_snapshot(ctx.actions, ctx.attrs._prebuilt_jar_toolchain[PrebuiltJarToolchainInfo].cp_snapshot_generator, abi or output, "") library_output_classpath_entry = JavaClasspathEntry( full_library = output, abi = abi or output, abi_as_dir = None, required_for_source_only_abi = ctx.attrs.required_for_source_only_abi, + abi_jar_snapshot = jar_snapshot, ) - java_library_info, java_packaging_info, shared_library_info, cxx_resource_info, linkable_graph, template_placeholder_info, _ = create_java_library_providers( + java_library_info, java_packaging_info, global_code_info, shared_library_info, cxx_resource_info, linkable_graph, template_placeholder_info, _ = create_java_library_providers( ctx, library_output = library_output_classpath_entry, + global_code_config = prebuilt_jar_toolchain.global_code_config, declared_deps = ctx.attrs.deps, exported_deps = ctx.attrs.deps, + provided_deps = ctx.attrs.desugar_deps, needs_desugar = True, is_prebuilt_jar = True, + gwt_module = gwt_output, + sources_jar = ctx.attrs.source_jar, ) # TODO(T107163344) this shouldn't be in prebuilt_jar itself, use overlays to remove it. @@ -73,10 +83,15 @@ def prebuilt_jar_impl(ctx: AnalysisContext) -> list[Provider]: return [ java_library_info, java_packaging_info, + global_code_info, shared_library_info, cxx_resource_info, android_packageable_info, template_placeholder_info, linkable_graph, DefaultInfo(default_output = output, sub_targets = sub_targets), - ] + ] + ( + [ + RunInfo(args = cmd_args([ctx.attrs._prebuilt_jar_toolchain[PrebuiltJarToolchainInfo].java[RunInfo], "-jar", output])), + ] if ctx.attrs.is_executable else [] + ) diff --git a/prelude/java/tools/BUCK b/prelude/java/tools/BUCK deleted file mode 100644 index 71c397965df6b..0000000000000 --- a/prelude/java/tools/BUCK +++ /dev/null @@ -1,83 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "gen_class_to_source_map", - main = "gen_class_to_source_map.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "merge_class_to_source_maps", - main = "merge_class_to_source_maps.py", - visibility = ["PUBLIC"], -) - -prelude.python_binary( - name = "compile_and_package", - main = "compile_and_package.py", - visibility = ["PUBLIC"], - deps = [ - ":compile_and_package_lib", - ], -) - -prelude.python_binary( - name = "fat_jar", - main = "fat_jar.py", - visibility = ["PUBLIC"], - deps = [ - ":fat_jar_lib", - ":utils_lib", - ], -) - -prelude.python_binary( - name = "list_class_names", - main = "list_class_names.py", - visibility = ["PUBLIC"], - deps = [ - ":list_class_names_lib", - ], -) - -prelude.python_library( - name = "fat_jar_lib", - srcs = [ - "fat_jar.py", - ], - deps = [ - ":utils_lib", - ], -) - -prelude.python_library( - name = "compile_and_package_lib", - srcs = [ - "compile_and_package.py", - ], - deps = [ - ":utils_lib", - ], -) - -prelude.python_library( - name = "list_class_names_lib", - srcs = [ - "list_class_names.py", - ], - deps = [ - ":utils_lib", - ], -) - -prelude.python_library( - name = "utils_lib", - srcs = [ - "utils.py", - ], - visibility = [ - "prelude//android/tools/...", - "prelude//kotlin/tools/...", - "prelude//java/tools/...", - ], -) diff --git a/prelude/java/tools/BUCK.v2 b/prelude/java/tools/BUCK.v2 new file mode 100644 index 0000000000000..b48e1296e119e --- /dev/null +++ b/prelude/java/tools/BUCK.v2 @@ -0,0 +1,98 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "gen_class_to_source_map", + main = "gen_class_to_source_map.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "merge_class_to_source_maps", + main = "merge_class_to_source_maps.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "compile_and_package", + main = "compile_and_package.py", + visibility = ["PUBLIC"], + deps = [ + ":compile_and_package_lib", + ], +) + +prelude.python_bootstrap_binary( + name = "fat_jar", + main = "fat_jar.py", + visibility = ["PUBLIC"], + deps = [ + ":fat_jar_lib", + ":utils_lib", + ], +) + +prelude.python_bootstrap_binary( + name = "list_class_names", + main = "list_class_names.py", + visibility = ["PUBLIC"], + deps = [ + ":list_class_names_lib", + ], +) + +prelude.python_bootstrap_library( + name = "fat_jar_lib", + srcs = [ + "fat_jar.py", + ], + deps = [ + ":utils_lib", + ], +) + +prelude.python_bootstrap_library( + name = "compile_and_package_lib", + srcs = [ + "compile_and_package.py", + ], + deps = [ + ":utils_lib", + ], +) + +prelude.python_bootstrap_library( + name = "list_class_names_lib", + srcs = [ + "list_class_names.py", + ], + deps = [ + ":utils_lib", + ], +) + +prelude.python_bootstrap_binary( + name = "postprocessor_runner", + main = "run_postprocessor.py", + visibility = ["PUBLIC"], + deps = [ + ":utils_lib", + ], +) + +prelude.python_bootstrap_library( + name = "utils_lib", + srcs = [ + "utils.py", + ], + visibility = [ + "prelude//android/tools/...", + "prelude//java/tools/...", + "prelude//kotlin/tools/...", + ], +) diff --git a/prelude/java/tools/compile_and_package.py b/prelude/java/tools/compile_and_package.py index 96d9cf6c1ca6a..8b877cdf39a04 100644 --- a/prelude/java/tools/compile_and_package.py +++ b/prelude/java/tools/compile_and_package.py @@ -11,7 +11,7 @@ import pathlib from tempfile import TemporaryDirectory -from java.tools import utils +import utils _JAVA_FILE_EXTENSION = [".java"] diff --git a/prelude/java/tools/fat_jar.py b/prelude/java/tools/fat_jar.py index 211702d3358f2..3c9f9b52c0342 100644 --- a/prelude/java/tools/fat_jar.py +++ b/prelude/java/tools/fat_jar.py @@ -14,9 +14,9 @@ import zipfile from shutil import copy, copytree from tempfile import TemporaryDirectory -from typing import Dict, List +from typing import Optional -from java.tools import utils +import utils def _parse_args(): @@ -30,6 +30,12 @@ def _parse_args(): required=True, help="tool for building jars", ) + parser.add_argument( + "--zip_scrubber_tool", + type=str, + required=True, + help="tool for scrubbing jars", + ) parser.add_argument( "--output", type=pathlib.Path, required=True, help="a path to an output result" ) @@ -103,19 +109,45 @@ def _parse_args(): action="store_true", help="Whether to create an inner jar if native libraries are present.", ) + parser.add_argument( + "--append_jar", + required=False, + type=pathlib.Path, + help="path to a jar used as base of the new jar, which new files will be added", + ) return parser.parse_args() -def _merge_dictionaries(dict1: Dict[str, str], dict2: Dict[str, str]) -> Dict[str, str]: - return {**dict1, **dict2} - - -def _shlex_split(cmd: str) -> List[str]: - if platform.system() == "Windows": - return cmd.split() - else: - return shlex.split(cmd) +def _fat_jar( + jar_builder_tool: str, + output_path: str, + append_jar: Optional[str] = None, + main_class: Optional[str] = None, + entries_to_jar_file: Optional[str] = None, + override_entries_to_jar_file: Optional[str] = None, + manifest_file: Optional[str] = None, + blocklist_file: Optional[str] = None, +) -> None: + cmd = [] + cmd.extend(utils.shlex_split(jar_builder_tool)) + if append_jar: + cmd.extend(["--append-jar", append_jar]) + if main_class: + cmd.extend(["--main-class", main_class]) + if entries_to_jar_file: + cmd.extend(["--entries-to-jar", entries_to_jar_file]) + if override_entries_to_jar_file: + cmd.extend(["--override-entries-to-jar", override_entries_to_jar_file]) + if manifest_file: + cmd.extend(["--manifest-file", manifest_file]) + if blocklist_file: + cmd.extend(["--blocklist-patterns", blocklist_file]) + cmd.extend(["--blocklist-patterns-matcher", "substring"]) + cmd.append("--merge-manifests") + cmd.extend(["--output", output_path]) + utils.log_message("fat_jar_cmd: {}".format(cmd)) + utils.execute_command(cmd) # Reads a list of files from native_libs_file and symlinks each as files in native_libs_dir. @@ -138,12 +170,14 @@ def main(): args = _parse_args() jar_builder_tool = args.jar_builder_tool + zip_scrubber_tool = args.zip_scrubber_tool output_path = args.output jars_file = args.jars_file main_class = args.main_class manifest = args.manifest blocklist_file = args.blocklist meta_inf_directory = args.meta_inf_directory + append_jar = args.append_jar generate_wrapper = args.generate_wrapper classpath_args_output = args.classpath_args_output @@ -184,6 +218,8 @@ def main(): utils.log_message("classpath_args_output: {}".format(classpath_args_output)) utils.log_message("java_tool: {}".format(java_tool)) utils.log_message("script_marker_file_name: {}".format(script_marker_file_name)) + if append_jar: + utils.log_message("append_jar = {}".format(append_jar)) need_to_process_native_libs = native_libs_file is not None if need_to_process_native_libs and not do_not_create_inner_jar: @@ -251,8 +287,8 @@ def main(): else: # generate fat jar - jar_cmd = [] - jar_cmd.extend(utils.shlex_split(jar_builder_tool)) + entries_to_jar_file = jars_file + override_entries_to_jar = None if need_to_process_native_libs and do_not_create_inner_jar: # symlink native libs to `nativelibs` directory @@ -276,11 +312,7 @@ def main(): f.write(str(f2.read()) + "\n") f.write(str(native_libs_staging)) - jar_cmd.extend( - ["--entries-to-jar", jars_and_native_libs_directory_file] - ) - else: - jar_cmd.extend(["--entries-to-jar", jars_file]) + entries_to_jar_file = jars_and_native_libs_directory_file if meta_inf_directory: meta_inf_staging = pathlib.Path(temp_dir) / "meta_inf_staging" @@ -298,28 +330,26 @@ def main(): with open(meta_inf_directory_file, "w") as f: f.write(str(meta_inf_staging)) - jar_cmd.extend(["--override-entries-to-jar", meta_inf_directory_file]) - - if main_class: - jar_cmd.extend(["--main-class", main_class]) - - if blocklist_file: - jar_cmd.extend(["--blocklist-patterns", blocklist_file]) - jar_cmd.extend(["--blocklist-patterns-matcher", "substring"]) - - if manifest: - jar_cmd.extend(["--manifest-file", manifest]) - - jar_cmd.append("--merge-manifests") + override_entries_to_jar = meta_inf_directory_file jar_output = ( os.path.join(temp_dir, "inner.jar") if need_to_process_native_libs and not do_not_create_inner_jar else output_path ) - jar_cmd.extend(["--output", jar_output]) - utils.log_message("jar_cmd: {}".format(jar_cmd)) - utils.execute_command(jar_cmd) + + utils.log_message("jar_output: {}".format(jar_output)) + + _fat_jar( + jar_builder_tool=jar_builder_tool, + output_path=jar_output, + main_class=main_class, + entries_to_jar_file=entries_to_jar_file, + override_entries_to_jar_file=override_entries_to_jar, + manifest_file=manifest, + blocklist_file=blocklist_file, + append_jar=append_jar, + ) if need_to_process_native_libs and not do_not_create_inner_jar: fat_jar_content_dir = os.path.join(temp_dir, "fat_jar_content_dir") @@ -369,17 +399,21 @@ def main(): content.relative_to(fat_jar_content_dir), ) + zip_scrubber_cmd = [] + zip_scrubber_cmd.extend(utils.shlex_split(zip_scrubber_tool)) + zip_scrubber_cmd.extend([contents_zip_path]) + utils.execute_command(zip_scrubber_cmd) + entries_to_jar_file = os.path.join(temp_dir, "entries_to_jar.txt") with open(entries_to_jar_file, "w") as f: f.write("\n".join([contents_zip_path, str(fat_jar_lib)])) - fat_jar_cmd = [] - fat_jar_cmd.extend(utils.shlex_split(jar_builder_tool)) - fat_jar_cmd.extend(["--main-class", fat_jar_main_class]) - fat_jar_cmd.extend(["--output", output_path]) - fat_jar_cmd.extend(["--entries-to-jar", entries_to_jar_file]) - fat_jar_cmd.append("--merge-manifests") - utils.execute_command(fat_jar_cmd) + _fat_jar( + jar_builder_tool=jar_builder_tool, + output_path=output_path, + main_class=fat_jar_main_class, + entries_to_jar_file=entries_to_jar_file, + ) if __name__ == "__main__": diff --git a/prelude/java/tools/gen_class_to_source_map.py b/prelude/java/tools/gen_class_to_source_map.py index eed5c847315f7..f86d83cd1ee7f 100644 --- a/prelude/java/tools/gen_class_to_source_map.py +++ b/prelude/java/tools/gen_class_to_source_map.py @@ -12,11 +12,27 @@ import zipfile +def _base_class_name_matches_base_source_path( + base_class_name: str, base_source_path: str +): + return base_class_name == base_source_path or base_source_path.endswith( + "/" + base_class_name + ) + + def main(argv): - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(fromfile_prefix_chars="@") + parser.add_argument( + "--include_classes_prefixes", + "-i", + default=[], + nargs="*", + help="Prefixes of classes to include in the output, even if their source isn't present", + ) parser.add_argument( "--output", "-o", type=argparse.FileType("w"), default=sys.stdin ) + parser.add_argument("--sources_jar", required=False) parser.add_argument("jar") parser.add_argument("sources", nargs="*") args = parser.parse_args(argv[1:]) @@ -44,16 +60,55 @@ def main(argv): if "$" in base: continue + found = False for src_base, src_path in sources.items(): - if base == src_base or src_base.endswith("/" + base): + if _base_class_name_matches_base_source_path(base, src_base): classes.append( { "className": classname, "srcPath": src_path, } ) + found = True + break + # Kotlin creates .class files with a "Kt" suffix when code is written outside of a class, + # so strip that suffix and redo the comparison. + elif base.endswith("Kt") and _base_class_name_matches_base_source_path( + base[:-2], src_base + ): + classes.append( + { + "className": classname[:-2], + "srcPath": src_path, + } + ) + found = True break + if not found: + # If the class is not present in the sources, we still want to + # include it if it has a prefix that we are interested in. + # certain classes in "androidx.databinding.*" are generated and it's useful to know their presence in jars + for prefix in args.include_classes_prefixes: + if classname.startswith(prefix): + classes.append( + { + "className": classname, + } + ) + break + + if args.sources_jar: + with zipfile.ZipFile(args.sources_jar, "w") as sources_jar: + for d in classes: + if "srcPath" in d: + src_path = d["srcPath"] + class_name = d["className"] + _, src_path_ext = os.path.splitext(src_path) + sources_jar.write( + src_path, class_name.replace(".", "/") + src_path_ext + ) + json.dump( { "jarPath": args.jar, diff --git a/prelude/java/tools/run_postprocessor.py b/prelude/java/tools/run_postprocessor.py new file mode 100644 index 0000000000000..3e6da8e4bb5d6 --- /dev/null +++ b/prelude/java/tools/run_postprocessor.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + + +import argparse +import pathlib + +import utils + + +def _parse_args(): + parser = argparse.ArgumentParser(description="Tool to run a jar postprocessor.") + + parser.add_argument( + "--postprocessor_cmd", + type=str, + required=True, + ) + parser.add_argument( + "--zip_scrubber", + type=str, + required=True, + help="A command to run a zip scrubber", + ) + parser.add_argument( + "--output", + type=pathlib.Path, + required=True, + help="the path that the postprocessor writes to, which is then scrubbed", + ) + + return parser.parse_args() + + +def main(): + args = _parse_args() + + utils.execute_command(utils.shlex_split(args.postprocessor_cmd)) + + scrubber_cmd = utils.shlex_split(args.zip_scrubber) + [args.output] + utils.execute_command(scrubber_cmd) + + +if __name__ == "__main__": + main() diff --git a/prelude/java/tools/utils.py b/prelude/java/tools/utils.py index 20caa29e21f4d..a6adfe2929e14 100644 --- a/prelude/java/tools/utils.py +++ b/prelude/java/tools/utils.py @@ -109,3 +109,14 @@ def execute_command(command: List): exit_code = subprocess.call(command) if exit_code != 0: sys.exit(exit_code) + + +def execute_command_ignore_exit_codes(command: List, exit_codes_to_ignore: List): + log_message( + "executing command = '{}'".format( + " ".join([shlex.quote(str(s)) for s in command]) + ) + ) + exit_code = subprocess.call(command) + if exit_code != 0 and exit_code not in exit_codes_to_ignore: + sys.exit(exit_code) diff --git a/prelude/java/utils/java_more_utils.bzl b/prelude/java/utils/java_more_utils.bzl new file mode 100644 index 0000000000000..1cf29fd99d76e --- /dev/null +++ b/prelude/java/utils/java_more_utils.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//os_lookup:defs.bzl", + "OsLookup", # @unused Used as type +) +load("@prelude//utils:expect.bzl", "expect") + +def get_path_separator_for_exec_os(ctx: AnalysisContext) -> str: + expect(hasattr(ctx.attrs, "_exec_os_type"), "Expect ctx.attrs._exec_os_type is defined.") + is_windows = ctx.attrs._exec_os_type[OsLookup].platform == "windows" + return ";" if is_windows else ":" diff --git a/prelude/java/utils/java_utils.bzl b/prelude/java/utils/java_utils.bzl index addaf0aa5268a..476b685b3a8f2 100644 --- a/prelude/java/utils/java_utils.bzl +++ b/prelude/java/utils/java_utils.bzl @@ -5,8 +5,6 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load( "@prelude//java:class_to_srcs.bzl", "JavaClassToSourceMapInfo", # @unused Used as a type @@ -14,14 +12,13 @@ load( "create_class_to_source_map_info", "maybe_create_class_to_source_map_debuginfo", ) +load( + "@prelude//java:java_providers.bzl", + "JavaCompileOutputs", # @unused Used as a type + "JavaPackagingInfo", # @unused Used as a type +) load("@prelude//java:java_toolchain.bzl", "AbiGenerationMode", "JavaToolchainInfo") -load("@prelude//os_lookup:defs.bzl", "OsLookup") -load("@prelude//utils:utils.bzl", "expect") - -def get_path_separator_for_exec_os(ctx: AnalysisContext) -> str: - expect(hasattr(ctx.attrs, "_exec_os_type"), "Expect ctx.attrs._exec_os_type is defined.") - is_windows = ctx.attrs._exec_os_type[OsLookup].platform == "windows" - return ";" if is_windows else ":" +load("@prelude//utils:expect.bzl", "expect") def derive_javac(javac_attribute: [str, Dependency, Artifact]) -> [str, RunInfo, Artifact]: javac_attr_type = type(javac_attribute) @@ -87,8 +84,8 @@ def get_abi_generation_mode(abi_generation_mode): def get_default_info( actions: AnalysisActions, java_toolchain: JavaToolchainInfo, - outputs: ["JavaCompileOutputs", None], - packaging_info: "JavaPackagingInfo", + outputs: [JavaCompileOutputs, None], + packaging_info: JavaPackagingInfo, extra_sub_targets: dict = {}) -> DefaultInfo: sub_targets = get_classpath_subtarget(actions, packaging_info) default_info = DefaultInfo() @@ -119,27 +116,32 @@ def declare_prefixed_name(name: str, prefix: [str, None]) -> str: def get_class_to_source_map_info( ctx: AnalysisContext, - outputs: ["JavaCompileOutputs", None], - deps: list[Dependency]) -> (JavaClassToSourceMapInfo, dict): + outputs: [JavaCompileOutputs, None], + deps: list[Dependency], + generate_sources_jar: bool = False) -> (JavaClassToSourceMapInfo, Artifact | None, dict): sub_targets = {} class_to_srcs = None class_to_srcs_debuginfo = None + sources_jar = None if outputs != None: - if not ctx.attrs._is_building_android_binary: - class_to_srcs = create_class_to_source_map_from_jar( - actions = ctx.actions, - java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo], - name = ctx.attrs.name + ".class_to_srcs.json", - jar = outputs.classpath_entry.full_library, - srcs = ctx.attrs.srcs, - ) + name = ctx.label.name + class_to_srcs, sources_jar = create_class_to_source_map_from_jar( + actions = ctx.actions, + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo], + name = name + ".class_to_srcs.json", + jar = outputs.classpath_entry.full_library, + srcs = ctx.attrs.srcs, + sources_jar_name = "{}-sources.jar".format(name) if generate_sources_jar else None, + ) class_to_srcs_debuginfo = maybe_create_class_to_source_map_debuginfo( actions = ctx.actions, java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo], - name = ctx.attrs.name + ".debuginfo.json", + name = name + ".debuginfo.json", srcs = ctx.attrs.srcs, ) sub_targets["class-to-srcs"] = [DefaultInfo(default_output = class_to_srcs)] + if sources_jar: + sub_targets["sources.jar"] = [DefaultInfo(default_output = sources_jar)] class_to_src_map_info = create_class_to_source_map_info( ctx = ctx, @@ -149,9 +151,9 @@ def get_class_to_source_map_info( ) if outputs != None: sub_targets["debuginfo"] = [DefaultInfo(default_output = class_to_src_map_info.debuginfo)] - return (class_to_src_map_info, sub_targets) + return (class_to_src_map_info, sources_jar, sub_targets) -def get_classpath_subtarget(actions: AnalysisActions, packaging_info: "JavaPackagingInfo") -> dict[str, list[Provider]]: +def get_classpath_subtarget(actions: AnalysisActions, packaging_info: JavaPackagingInfo) -> dict[str, list[Provider]]: proj = packaging_info.packaging_deps.project_as_args("full_jar_args") output = actions.write("classpath", proj) return {"classpath": [DefaultInfo(output, other_outputs = [proj])]} diff --git a/prelude/js/js.bzl b/prelude/js/js.bzl index 8daa45b7cfead..37b1311e9f718 100644 --- a/prelude/js/js.bzl +++ b/prelude/js/js.bzl @@ -15,10 +15,17 @@ load("@prelude//genrule.bzl", "genrule_attributes") def _select_platform(): return select({ - "DEFAULT": "android", - "config//os/constraints:iphoneos": "ios", - "config//os/constraints:macos": "macos", - "config//os/constraints:windows": "windows", + "DEFAULT": select({ + "DEFAULT": "android", + "config//os/constraints:iphoneos": "ios", + "config//os/constraints:macos": "macos", + "config//os/constraints:windows": "windows", + }), + "fbsource//tools/build_defs/js/config:platform_override_android": "android", + "fbsource//tools/build_defs/js/config:platform_override_ios": "ios", + "fbsource//tools/build_defs/js/config:platform_override_macos": "macos", + "fbsource//tools/build_defs/js/config:platform_override_vr": "vr", + "fbsource//tools/build_defs/js/config:platform_override_windows": "windows", }) def _is_release(): @@ -30,6 +37,13 @@ def _is_release(): "config//build_mode/constraints:release": True, }) +def _select_asset_dest_path_resolver(): + return select({ + "DEFAULT": None, + "fbsource//tools/build_defs/js/config:asset_dest_path_resolver_android": "android", + "fbsource//tools/build_defs/js/config:asset_dest_path_resolver_generic": "generic", + }) + implemented_rules = { "js_bundle": js_bundle_impl, "js_bundle_genrule": js_bundle_genrule_impl, @@ -61,6 +75,10 @@ extra_attributes = { }, "js_library": { "worker": attrs.exec_dep(), + "_asset_dest_path_resolver": attrs.option( + attrs.string(), + default = _select_asset_dest_path_resolver(), + ), "_build_only_native_code": attrs.bool(default = is_build_only_native_code()), "_is_release": attrs.bool( default = _is_release(), diff --git a/prelude/js/js_bundle.bzl b/prelude/js/js_bundle.bzl index 93181fdfc21c8..e90621731435c 100644 --- a/prelude/js/js_bundle.bzl +++ b/prelude/js/js_bundle.bzl @@ -10,7 +10,8 @@ load("@prelude//android:android_resource.bzl", "JAVA_PACKAGE_FILENAME", "aapt2_c load("@prelude//android:android_toolchain.bzl", "AndroidToolchainInfo") load("@prelude//js:js_providers.bzl", "JsBundleInfo", "JsLibraryInfo", "get_transitive_outputs") load("@prelude//js:js_utils.bzl", "RAM_BUNDLE_TYPES", "TRANSFORM_PROFILES", "get_apple_resource_providers_for_js_bundle", "get_bundle_name", "get_flavors", "run_worker_commands") -load("@prelude//utils:utils.bzl", "expect", "map_idx") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "map_idx") def _build_dependencies_file( ctx: AnalysisContext, @@ -48,10 +49,11 @@ def _build_dependencies_file( command_args_files = [command_args_file], identifier = transform_profile, category = "dependencies", - hidden_artifacts = [cmd_args([ + hidden_artifacts = [cmd_args( dependencies_file.as_output(), extra_data_args, - ]).add(transitive_js_library_outputs)], + transitive_js_library_outputs, + )], ) return dependencies_file @@ -109,13 +111,14 @@ def _build_js_bundle( command_args_files = [command_args_file], identifier = base_dir, category = job_args["command"], - hidden_artifacts = [cmd_args([ + hidden_artifacts = [cmd_args( bundle_dir_output.as_output(), assets_dir.as_output(), misc_dir_path.as_output(), source_map.as_output(), extra_data_args, - ]).add(transitive_js_library_outputs)], + transitive_js_library_outputs, + )], ) return JsBundleInfo( diff --git a/prelude/js/js_bundle_genrule.bzl b/prelude/js/js_bundle_genrule.bzl index 1fe67cefaa59c..cdc653f2e1ee5 100644 --- a/prelude/js/js_bundle_genrule.bzl +++ b/prelude/js/js_bundle_genrule.bzl @@ -9,7 +9,7 @@ load("@prelude//:genrule.bzl", "process_genrule") load("@prelude//android:android_providers.bzl", "AndroidResourceInfo", "merge_android_packageable_info") load("@prelude//js:js_providers.bzl", "JsBundleInfo") load("@prelude//js:js_utils.bzl", "RAM_BUNDLE_TYPES", "TRANSFORM_PROFILES", "get_apple_resource_providers_for_js_bundle", "get_bundle_name") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") def _build_js_bundle( ctx: AnalysisContext, diff --git a/prelude/js/js_library.bzl b/prelude/js/js_library.bzl index 5269bc611b56e..8bbe0ad46be00 100644 --- a/prelude/js/js_library.bzl +++ b/prelude/js/js_library.bzl @@ -8,7 +8,8 @@ load("@prelude//:paths.bzl", "paths") load("@prelude//js:js_providers.bzl", "JsLibraryInfo", "get_transitive_outputs") load("@prelude//js:js_utils.bzl", "TRANSFORM_PROFILES", "get_canonical_src_name", "get_flavors", "run_worker_commands") -load("@prelude//utils:utils.bzl", "expect", "map_idx") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "map_idx") # A group of sources that all have the same canonical name. The main_source is arbitrary but # consistent (it is just the first source encountered when processing the src files). @@ -59,7 +60,7 @@ def _build_js_files( for grouped_src in grouped_srcs: identifier = "{}/{}".format(transform_profile, grouped_src.canonical_name) - output_path = ctx.actions.declare_output(identifier) + output_path = ctx.actions.declare_output("transform-out/{}.jsfile".format(identifier)) job_args = { "additionalSources": [{ "sourcePath": additional_source, @@ -105,17 +106,26 @@ def _build_library_files( transform_profile: str, flavors: list[str], js_files: list[Artifact]) -> Artifact: - output_path = ctx.actions.declare_output("{}/library_files".format(transform_profile)) + output_path = ctx.actions.declare_output("library-files-out/{}/library_files".format(transform_profile)) + + job_args = { + "command": "library-files", + "flavors": flavors, + "outputFilePath": output_path, + "platform": ctx.attrs._platform, + "release": ctx.attrs._is_release, + "sourceFilePaths": js_files, + } + + if ctx.attrs.extra_json: + job_args["extraData"] = cmd_args(ctx.attrs.extra_json, delimiter = "") + + if ctx.attrs._asset_dest_path_resolver: + job_args["assetDestPathResolver"] = ctx.attrs._asset_dest_path_resolver + command_args_file = ctx.actions.write_json( "library_files_{}_command_args".format(transform_profile), - { - "command": "library-files", - "flavors": flavors, - "outputFilePath": output_path, - "platform": ctx.attrs._platform, - "release": ctx.attrs._is_release, - "sourceFilePaths": js_files, - }, + job_args, ) run_worker_commands( @@ -134,7 +144,7 @@ def _build_js_library( library_files: Artifact, flavors: list[str], js_library_deps: list[Artifact]) -> Artifact: - output_path = ctx.actions.declare_output("{}.jslib".format(transform_profile)) + output_path = ctx.actions.declare_output("library-dependencies-out/{}.jslib".format(transform_profile)) job_args = { "aggregatedSourceFilesFilePath": library_files, "command": "library-dependencies", diff --git a/prelude/js/js_providers.bzl b/prelude/js/js_providers.bzl index bc24622a4bea5..c5027eaf57945 100644 --- a/prelude/js/js_providers.bzl +++ b/prelude/js/js_providers.bzl @@ -37,7 +37,7 @@ JsBundleInfo = provider( def get_transitive_outputs( actions: AnalysisActions, - value: [Artifact, None] = None, + value: Artifact | None = None, deps: list[JsLibraryInfo] = []) -> TransitiveOutputsTSet: kwargs = {} if value: diff --git a/prelude/js/js_utils.bzl b/prelude/js/js_utils.bzl index 8f899c329b80d..d95f84e664c12 100644 --- a/prelude/js/js_utils.bzl +++ b/prelude/js/js_utils.bzl @@ -10,7 +10,8 @@ load("@prelude//:worker_tool.bzl", "WorkerToolInfo") load("@prelude//apple:apple_resource_types.bzl", "AppleResourceDestination", "AppleResourceSpec") load("@prelude//apple:resource_groups.bzl", "ResourceGraphInfo", "create_resource_graph") # @unused `ResourceGraphInfo` used as a type load("@prelude//js:js_providers.bzl", "JsBundleInfo") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:argfile.bzl", "at_argfile") +load("@prelude//utils:expect.bzl", "expect") RAM_BUNDLE_TYPES = { "": "", @@ -125,7 +126,7 @@ def get_bundle_name(ctx: AnalysisContext, default_bundle_name: str) -> str: flavors = bundle_name_for_flavor_map.keys() for flavor in flavors: expect( - flavor == "android" or flavor == "ios" or flavor == "macos" or flavor == "windows", + flavor == "android" or flavor == "ios" or flavor == "macos" or flavor == "windows" or flavor == "vr", "Currently only support picking bundle name by platform!", ) @@ -142,11 +143,25 @@ def run_worker_commands( identifier: str, category: str, hidden_artifacts = [cmd_args]): + worker_args = cmd_args( + "--command-args-file", + command_args_files, + "--command-args-file-extra-data-fixup-hack=true", + ) + worker_tool_info = worker_tool[WorkerToolInfo] - worker_command = worker_tool_info.command.copy() - worker_command.add("--command-args-file", command_args_files) - worker_command.hidden(hidden_artifacts) - worker_command.add("--command-args-file-extra-data-fixup-hack=true") + worker_command = cmd_args( + worker_tool_info.command.copy(), + at_argfile( + actions = ctx.actions, + name = paths.join(identifier, "{}.js_worker_argsfile".format(category)), + args = worker_args, + ), + hidden = [ + hidden_artifacts, + command_args_files, + ], + ) ctx.actions.run( worker_command, diff --git a/prelude/julia/julia.bzl b/prelude/julia/julia.bzl index 74e7f433a1f57..4337bef7a602b 100644 --- a/prelude/julia/julia.bzl +++ b/prelude/julia/julia.bzl @@ -5,6 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//decls/common.bzl", "buck") load(":julia_binary.bzl", "julia_binary_impl") load(":julia_library.bzl", "julia_jll_library_impl", "julia_library_impl") load(":julia_test.bzl", "julia_test_impl") @@ -48,5 +49,5 @@ extra_attributes = { "srcs": attrs.list(attrs.source(), default = []), "_julia_toolchain": julia_toolchain(), # TODO: coverage - }, + } | buck.inject_test_env_arg(), } diff --git a/prelude/julia/julia_binary.bzl b/prelude/julia/julia_binary.bzl index c1cedffee21eb..d9441aa411299 100644 --- a/prelude/julia/julia_binary.bzl +++ b/prelude/julia/julia_binary.bzl @@ -5,7 +5,12 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//linking:shared_libraries.bzl", "merge_shared_libraries", "traverse_shared_library_info") +load( + "@prelude//linking:shared_libraries.bzl", + "create_shlib_symlink_tree", + "merge_shared_libraries", + "traverse_shared_library_info", +) load("@prelude//utils:utils.bzl", "flatten") load(":julia_info.bzl", "JuliaLibraryInfo", "JuliaLibraryTSet", "JuliaToolchainInfo") @@ -47,12 +52,13 @@ def build_jll_shlibs_mapping(ctx: AnalysisContext, json_info_file: Artifact): filter(None, [d.shared_library_info for d in deps]), )) - shared_libs_symlink_tree = ctx.actions.symlinked_dir( - "__shared_libs_symlink_tree__", - {name: shlib.lib.output for name, shlib in shlibs.items()}, + shared_libs_symlink_tree = create_shlib_symlink_tree( + actions = ctx.actions, + out = "__shared_libs_symlink_tree__", + shared_libs = shlibs, ) - shlib_label_to_soname = {shlib.label: name for name, shlib in shlibs.items()} + shlib_label_to_soname = {shlib.label: shlib.soname.ensure_str() for shlib in shlibs} # iterate through all the jll libraries json_info = [] @@ -64,8 +70,11 @@ def build_jll_shlibs_mapping(ctx: AnalysisContext, json_info_file: Artifact): # iterate through all the shlib dependencies for the current jll artifact_info = [] for julia_name, label in jll.libs.items(): - symlink_dir = cmd_args(shared_libs_symlink_tree, delimiter = "") - symlink_dir.relative_to(json_info_file) # That cannot be produced by a tset projection + symlink_dir = cmd_args( + shared_libs_symlink_tree, + delimiter = "", + relative_to = json_info_file, # That cannot be produced by a tset projection + ) artifact_info.append((julia_name, symlink_dir, shlib_label_to_soname[label])) json_info.append((jll.name, jli.uuid, artifact_info)) @@ -100,19 +109,20 @@ def build_julia_command(ctx): """ julia_toolchain = ctx.attrs._julia_toolchain[JuliaToolchainInfo] - # python processor - cmd = cmd_args([julia_toolchain.cmd_processor]) - # build out the symlink tree for libs symlink_dir = build_load_path_symtree(ctx) - cmd.hidden(symlink_dir) # build symdir for sources srcs_by_path = {f.short_path: f for f in ctx.attrs.srcs} srcs = ctx.actions.symlinked_dir("srcs_tree", srcs_by_path) if ctx.attrs.main not in srcs_by_path: fail("main should be in srcs!") - cmd.hidden(srcs) + + # python processor + cmd = cmd_args( + [julia_toolchain.cmd_processor], + hidden = [symlink_dir] + [srcs], + ) # prepare a json file to hold all the data the python preprocessor needs to # execute the julia interpreter. @@ -122,10 +132,10 @@ def build_julia_command(ctx): "env": julia_toolchain.env, "jll_mapping": build_jll_shlibs_mapping(ctx, json_info_file), "julia_args": ctx.attrs.julia_args, - "julia_binary": cmd_args(julia_toolchain.julia, delimiter = " ").relative_to(json_info_file), + "julia_binary": cmd_args(julia_toolchain.julia, delimiter = " ", relative_to = json_info_file), "julia_flags": ctx.attrs.julia_flags, - "lib_path": cmd_args(symlink_dir, delimiter = " ").relative_to(json_info_file), - "main": cmd_args(srcs.project(ctx.attrs.main), delimiter = " ").relative_to(json_info_file), + "lib_path": cmd_args(symlink_dir, delimiter = " ", relative_to = json_info_file), + "main": cmd_args(srcs.project(ctx.attrs.main), delimiter = " ", relative_to = json_info_file), } json_file_loc = ctx.actions.write_json(json_info_file, json_info_dict, with_inputs = True) diff --git a/prelude/julia/julia_test.bzl b/prelude/julia/julia_test.bzl index d28e6e9c4094c..7b0d3e7ca4f5f 100644 --- a/prelude/julia/julia_test.bzl +++ b/prelude/julia/julia_test.bzl @@ -14,6 +14,8 @@ def julia_test_impl(ctx: AnalysisContext) -> list[Provider]: type = "julia", command = [cmd], contacts = ctx.attrs.contacts, + # FIXME: Consider setting to true + run_from_project_root = False, ) return inject_test_run_info(ctx, external_runner_test_info) + [DefaultInfo(default_output = json_info_file)] diff --git a/prelude/julia/tools/BUCK b/prelude/julia/tools/BUCK deleted file mode 100644 index 2f205a4bb0ea7..0000000000000 --- a/prelude/julia/tools/BUCK +++ /dev/null @@ -1,7 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "parse_julia_cmd", - main = "parse_julia_cmd.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/julia/tools/BUCK.v2 b/prelude/julia/tools/BUCK.v2 new file mode 100644 index 0000000000000..3867e7396105c --- /dev/null +++ b/prelude/julia/tools/BUCK.v2 @@ -0,0 +1,13 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "parse_julia_cmd", + main = "parse_julia_cmd.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/jvm/cd_jar_creator_util.bzl b/prelude/jvm/cd_jar_creator_util.bzl index 1bf3273e1cec3..08afa2355cb8d 100644 --- a/prelude/jvm/cd_jar_creator_util.bzl +++ b/prelude/jvm/cd_jar_creator_util.bzl @@ -20,11 +20,10 @@ load( "PluginParams", # @unused Used as type ) load("@prelude//java/utils:java_utils.bzl", "declare_prefixed_name") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") def add_java_7_8_bootclasspath(target_level: int, bootclasspath_entries: list[Artifact], java_toolchain: JavaToolchainInfo) -> list[Artifact]: - if target_level == 7: - return bootclasspath_entries + java_toolchain.bootclasspath_7 + # bootclasspath_7 is deprecated. if target_level == 8: return bootclasspath_entries + java_toolchain.bootclasspath_8 return bootclasspath_entries @@ -97,7 +96,6 @@ OutputPaths = record( jar = Artifact, classes = Artifact, annotations = Artifact, - scratch = Artifact, ) def qualified_name_with_subtarget(label: Label) -> str: @@ -127,27 +125,13 @@ def define_output_paths(actions: AnalysisActions, prefix: [str, None], label: La jar = jar_parent.project("{}.jar".format(label.name)), classes = declare_prefixed_output(actions, prefix, "__classes__", dir = True), annotations = declare_prefixed_output(actions, prefix, "__gen__", dir = True), - scratch = declare_prefixed_output(actions, prefix, "scratch", dir = True), ) -# buildifier: disable=uninitialized -def add_output_paths_to_cmd_args(cmd: cmd_args, output_paths: OutputPaths, path_to_class_hashes: [Artifact, None]) -> cmd_args: - if path_to_class_hashes != None: - cmd.hidden(path_to_class_hashes.as_output()) - cmd.hidden(output_paths.jar_parent.as_output()) - cmd.hidden(output_paths.jar.as_output()) - cmd.hidden(output_paths.classes.as_output()) - cmd.hidden(output_paths.annotations.as_output()) - cmd.hidden(output_paths.scratch.as_output()) - return cmd - def encode_output_paths(label: Label, paths: OutputPaths, target_type: TargetType) -> struct: paths = struct( classesDir = paths.classes.as_output(), outputJarDirPath = paths.jar_parent.as_output(), annotationPath = paths.annotations.as_output(), - pathToSourcesList = cmd_args([paths.scratch.as_output(), "/", "__srcs__"], delimiter = ""), - workingDirectory = paths.scratch.as_output(), outputJarPath = paths.jar.as_output(), ) @@ -158,13 +142,14 @@ def encode_output_paths(label: Label, paths: OutputPaths, target_type: TargetTyp libraryTargetFullyQualifiedName = base_qualified_name(label), ) -def encode_jar_params(remove_classes: list[str], output_paths: OutputPaths) -> struct: +def encode_jar_params(remove_classes: list[str], output_paths: OutputPaths, manifest_file: Artifact | None) -> struct: return struct( jarPath = output_paths.jar.as_output(), removeEntryPredicate = struct( patterns = remove_classes, ), entriesToJar = [output_paths.classes.as_output()], + manifestFile = manifest_file, duplicatesLogLevel = "FINE", ) @@ -199,6 +184,7 @@ def get_compiling_deps_tset( abi = entry, abi_as_dir = None, required_for_source_only_abi = True, + abi_jar_snapshot = None, ))) compiling_deps_tset = actions.tset(JavaCompilingDepsTSet, children = children) @@ -211,7 +197,7 @@ def _get_source_only_abi_compiling_deps(compiling_deps_tset: [JavaCompilingDepsT for d in source_only_abi_deps: info = d.get(JavaLibraryInfo) if not info: - fail("source_only_abi_deps must produce a JavaLibraryInfo but {} does not, please remove it".format(d)) + fail("source_only_abi_deps must produce a JavaLibraryInfo but '{}' does not, please remove it".format(d.label)) if info.library_output: source_only_abi_deps_filter[info.library_output.abi] = True @@ -250,22 +236,28 @@ def encode_ap_params(annotation_processor_properties: AnnotationProcessorPropert return encoded_ap_params def encode_plugin_params(plugin_params: [PluginParams, None]) -> [struct, None]: - # TODO(cjhopman): We should change plugins to not be merged together just like APs. encoded_plugin_params = None if plugin_params: encoded_plugin_params = struct( parameters = [], - pluginProperties = [struct( - canReuseClassLoader = False, - doesNotAffectAbi = False, - supportsAbiGenerationFromSource = False, - processorNames = plugin_params.processors, - classpath = plugin_params.deps.project_as_json("javacd_json") if plugin_params.deps else [], - pathParams = {}, - )], + pluginProperties = [ + encode_plugin_properties(processor, arguments, plugin_params) + for processor, arguments in plugin_params.processors + ], ) return encoded_plugin_params +def encode_plugin_properties(processor: str, arguments: cmd_args, plugin_params: PluginParams) -> struct: + return struct( + canReuseClassLoader = False, + doesNotAffectAbi = False, + supportsAbiGenerationFromSource = False, + processorNames = [processor], + classpath = plugin_params.deps.project_as_json("javacd_json") if plugin_params.deps else [], + pathParams = {}, + arguments = arguments, + ) + def encode_base_jar_command( javac_tool: [str, RunInfo, Artifact, None], target_type: TargetType, @@ -282,18 +274,23 @@ def encode_base_jar_command( resources_map: dict[str, Artifact], annotation_processor_properties: AnnotationProcessorProperties, plugin_params: [PluginParams, None], + manifest_file: Artifact | None, extra_arguments: cmd_args, source_only_abi_compiling_deps: list[JavaClasspathEntry], - track_class_usage: bool) -> struct: - library_jar_params = encode_jar_params(remove_classes, output_paths) + track_class_usage: bool, + is_incremental: bool = False) -> struct: + library_jar_params = encode_jar_params(remove_classes, output_paths, manifest_file) qualified_name = get_qualified_name(label, target_type) if target_type == TargetType("source_only_abi"): compiling_classpath = classpath_jars_tag.tag_artifacts([dep.abi for dep in source_only_abi_compiling_deps]) + compiling_classpath_snapshot = {} else: expect(len(source_only_abi_compiling_deps) == 0) + compiling_deps_list = filter(None, list(compiling_deps_tset.traverse(ordering = "topological"))) if compiling_deps_tset else [] compiling_classpath = classpath_jars_tag.tag_artifacts( - compiling_deps_tset.project_as_json("javacd_json") if compiling_deps_tset else None, + [dep.abi for dep in compiling_deps_list], ) + compiling_classpath_snapshot = {dep.abi: dep.abi_jar_snapshot or "" for dep in compiling_deps_list} if is_incremental else {} build_target_value = struct( fullyQualifiedName = qualified_name, @@ -324,6 +321,7 @@ def encode_base_jar_command( return struct( outputPathsValue = encode_output_paths(label, output_paths, target_type), compileTimeClasspathPaths = compiling_classpath, + compileTimeClasspathSnapshotPaths = compiling_classpath_snapshot, javaSrcs = srcs, # TODO(cjhopman): populate jar infos. I think these are only used for unused dependencies (and appear to be broken in buck1 w/javacd anyway). fullJarInfos = [], @@ -358,9 +356,10 @@ def setup_dep_files( hidden = ["artifact"]) -> cmd_args: dep_file = declare_prefixed_output(actions, actions_identifier, "dep_file.txt") - new_cmd = cmd_args() - new_cmd.add(cmd) - new_cmd.add([ + new_cmd_args = [] + new_cmd_hidden = [] + new_cmd_args.append(cmd) + new_cmd_args.append([ "--used-classes", ] + [ used_classes_json.as_output() @@ -373,16 +372,16 @@ def setup_dep_files( if abi_to_abi_dir_map: abi_to_abi_dir_map_file = declare_prefixed_output(actions, actions_identifier, "abi_to_abi_dir_map") actions.write(abi_to_abi_dir_map_file, abi_to_abi_dir_map) - new_cmd.add([ + new_cmd_args.extend([ "--jar-to-jar-dir-map", abi_to_abi_dir_map_file, ]) - if type(abi_to_abi_dir_map) == "transitive_set_args_projection": - new_cmd.hidden(classpath_jars_tag.tag_artifacts(abi_to_abi_dir_map)) + if isinstance(abi_to_abi_dir_map, TransitiveSetArgsProjection): + new_cmd_hidden.append(classpath_jars_tag.tag_artifacts(abi_to_abi_dir_map)) for hidden_artifact in hidden: - new_cmd.hidden(classpath_jars_tag.tag_artifacts(hidden_artifact)) + new_cmd_hidden.append(classpath_jars_tag.tag_artifacts(hidden_artifact)) - return new_cmd + return cmd_args(new_cmd_args, hidden = new_cmd_hidden) FORCE_PERSISTENT_WORKERS = read_root_config("build", "require_persistent_workers", "false").lower() == "true" @@ -393,21 +392,63 @@ def prepare_cd_exe( compiler: Artifact, main_class: str, worker: WorkerInfo, - debug_port: [int, None], - debug_target: [Label, None], + target_specified_debug_port: [int, None], + toolchain_specified_debug_port: [int, None], + toolchain_specified_debug_target: [Label, None], extra_jvm_args: list[str], - extra_jvm_args_target: [Label, None]) -> tuple: + extra_jvm_args_target: list[Label]) -> tuple: local_only = False jvm_args = ["-XX:-MaxFDLimit"] + # The variables 'extra_jvm_args' and 'extra_jvm_args_target' are generally used, but they are primarily designed for profiling use-cases. + # The following section is configured with the profiling use-case in mind. if extra_jvm_args_target: - if qualified_name == qualified_name_with_subtarget(extra_jvm_args_target): - jvm_args = jvm_args + extra_jvm_args - local_only = True + if len(extra_jvm_args_target) == 1: + # If there's only one target to profile, we want to isolate its compilation. + # This target should be built in its own action, allowing the worker (if available) to handle the remaining targets. + if qualified_name == qualified_name_with_subtarget(extra_jvm_args_target[0]): + jvm_args = jvm_args + extra_jvm_args + local_only = True # This flag ensures the target is not run on the worker. + else: + # If there are multiple targets to profile, they should be built on the worker to generate a single profiling data set. + # The remaining targets should be built individually, either locally or on the Remote Execution (RE). + local_only = True # By default, targets are not run on the worker. + for target in extra_jvm_args_target: + # If the current target matches the qualified name with subtarget, it is selected for profiling. + if qualified_name == qualified_name_with_subtarget(target): + jvm_args = jvm_args + extra_jvm_args + local_only = False # This flag allows the target to run on the worker. + break else: + # If no specific target is provided, the extra JVM arguments are added to all targets that run on worker, local machine or RE. jvm_args = jvm_args + extra_jvm_args - if debug_port and qualified_name == qualified_name_with_subtarget(debug_target): + # Allow JVM compiler daemon to access internal jdk.compiler APIs + jvm_args += [ + "--add-exports=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.jvm=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.model=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED", + "--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED", + "--add-opens=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED", + "--add-opens=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED", + "--add-opens=jdk.compiler/com.sun.tools.javac.jvm=ALL-UNNAMED", + "--add-opens=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED", + "--add-opens=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED", + ] + + if target_specified_debug_port: + debug_port = target_specified_debug_port + elif toolchain_specified_debug_port and qualified_name == qualified_name_with_subtarget(toolchain_specified_debug_target): + debug_port = toolchain_specified_debug_port + else: + debug_port = None + + if debug_port: # Do not use a worker when debugging is enabled local_only = True jvm_args.extend(["-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address={}".format(debug_port)]) @@ -426,21 +467,39 @@ def prepare_cd_exe( ) return worker_run_info, FORCE_PERSISTENT_WORKERS +FinalJarOutput = record( + final_jar = Artifact, + # The same as final_jar unless there is a jar_postprocessor. + preprocessed_jar = Artifact, +) + # If there's additional compiled srcs, we need to merge them in and if the # caller specified an output artifact we need to make sure the jar is in that # location. def prepare_final_jar( actions: AnalysisActions, actions_identifier: [str, None], - output: [Artifact, None], + output: Artifact | None, output_paths: OutputPaths, - additional_compiled_srcs: [Artifact, None], - jar_builder: RunInfo) -> Artifact: + additional_compiled_srcs: Artifact | None, + jar_builder: RunInfo, + jar_postprocessor: [RunInfo, None], + jar_postprocessor_runner: RunInfo, + zip_scrubber: RunInfo) -> FinalJarOutput: + def make_output(jar: Artifact) -> FinalJarOutput: + if jar_postprocessor: + postprocessed_jar = postprocess_jar(actions, zip_scrubber, jar_postprocessor, jar_postprocessor_runner, jar, actions_identifier) + return FinalJarOutput(final_jar = postprocessed_jar, preprocessed_jar = jar) + else: + return FinalJarOutput(final_jar = jar, preprocessed_jar = jar) + if not additional_compiled_srcs: + output_jar = output_paths.jar if output: actions.copy_file(output.as_output(), output_paths.jar) - return output - return output_paths.jar + output_jar = output + + return make_output(output_jar) merged_jar = output if not merged_jar: @@ -454,25 +513,26 @@ def prepare_final_jar( merged_jar.as_output(), "--entries-to-jar", files_to_merge_file, - ]).hidden(files_to_merge), + ], hidden = files_to_merge), category = "merge_additional_srcs", identifier = actions_identifier, ) - return merged_jar + + return make_output(merged_jar) def generate_abi_jars( actions: AnalysisActions, actions_identifier: [str, None], label: Label, abi_generation_mode: [AbiGenerationMode, None], - additional_compiled_srcs: [Artifact, None], + additional_compiled_srcs: Artifact | None, is_building_android_binary: bool, class_abi_generator: Dependency, final_jar: Artifact, compiling_deps_tset: [JavaCompilingDepsTSet, None], source_only_abi_deps: list[Dependency], - class_abi_jar: [Artifact, None], - class_abi_output_dir: [Artifact, None], + class_abi_jar: Artifact | None, + class_abi_output_dir: Artifact | None, encode_abi_command: typing.Callable, define_action: typing.Callable) -> tuple: class_abi = None @@ -502,7 +562,6 @@ def generate_abi_jars( source_abi_classpath_jars_tag, source_abi_dir, source_abi_target_type, - path_to_class_hashes = None, ) source_abi = source_abi_output_paths.jar @@ -528,7 +587,6 @@ def generate_abi_jars( source_only_abi_classpath_jars_tag, source_only_abi_dir, source_only_abi_target_type, - path_to_class_hashes = None, source_only_abi_compiling_deps = source_only_abi_compiling_deps, ) source_only_abi = source_only_abi_output_paths.jar @@ -548,3 +606,32 @@ def generate_abi_jars( classpath_abi_dir = class_abi_output_dir return class_abi, source_abi, source_only_abi, classpath_abi, classpath_abi_dir + +def postprocess_jar( + actions: AnalysisActions, + zip_scrubber: RunInfo, + jar_postprocessor: RunInfo, + jar_postprocessor_runner: RunInfo, + original_jar: Artifact, + actions_identifier: [str, None]) -> Artifact: + jar_path = original_jar.short_path + postprocessed_output = actions.declare_output("postprocessed_{}".format(jar_path)) + + postprocess_jar_cmd = cmd_args( + jar_postprocessor_runner, + "--postprocessor_cmd", + cmd_args([ + jar_postprocessor, + original_jar, + postprocessed_output.as_output(), + ], delimiter = " "), + "--zip_scrubber", + cmd_args(zip_scrubber, delimiter = " "), + "--output", + postprocessed_output.as_output(), + ) + + identifier = actions_identifier if actions_identifier else "" + actions.run(postprocess_jar_cmd, category = "postprocessed{}".format(identifier)) + + return postprocessed_output diff --git a/prelude/kotlin/kotlin.bzl b/prelude/kotlin/kotlin.bzl index 4c9dd097e2b23..6bff10eac2601 100644 --- a/prelude/kotlin/kotlin.bzl +++ b/prelude/kotlin/kotlin.bzl @@ -5,6 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:validation_deps.bzl", "VALIDATION_DEPS_ATTR_NAME") load("@prelude//android:build_only_native_code.bzl", "is_build_only_native_code") load("@prelude//android:configuration.bzl", "is_building_android_binary_attr") load("@prelude//java:java.bzl", "AbiGenerationMode", "dex_min_sdk_version") @@ -21,8 +22,8 @@ implemented_rules = { extra_attributes = { "kotlin_library": { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), - "javac": attrs.option(attrs.one_of(attrs.dep(), attrs.source()), default = None), "resources_root": attrs.option(attrs.string(), default = None), + VALIDATION_DEPS_ATTR_NAME: attrs.set(attrs.dep(), sorted = True, default = []), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), "_dex_min_sdk_version": attrs.option(attrs.int(), default = dex_min_sdk_version()), "_dex_toolchain": toolchains_common.dex(), @@ -33,8 +34,9 @@ extra_attributes = { }, "kotlin_test": { "abi_generation_mode": attrs.option(attrs.enum(AbiGenerationMode), default = None), - "javac": attrs.option(attrs.one_of(attrs.dep(), attrs.source()), default = None), + "java_agents": attrs.list(attrs.source(), default = []), "resources_root": attrs.option(attrs.string(), default = None), + "test_class_names_file": attrs.option(attrs.source(), default = None), "unbundled_resources_root": attrs.option(attrs.source(allow_directory = True), default = None), "_build_only_native_code": attrs.default_only(attrs.bool(default = is_build_only_native_code())), "_exec_os_type": buck.exec_os_type_arg(), diff --git a/prelude/kotlin/kotlin_library.bzl b/prelude/kotlin/kotlin_library.bzl index ba06067b26261..8ad39f7b02996 100644 --- a/prelude/kotlin/kotlin_library.bzl +++ b/prelude/kotlin/kotlin_library.bzl @@ -5,6 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:validation_deps.bzl", "get_validation_deps_outputs") load("@prelude//android:android_providers.bzl", "merge_android_packageable_info") load( "@prelude//java:java_library.bzl", @@ -29,7 +30,8 @@ load( ) load("@prelude//java/plugins:java_annotation_processor.bzl", "AnnotationProcessorProperties", "create_annotation_processor_properties", "create_ksp_annotation_processor_properties") load("@prelude//java/plugins:java_plugin.bzl", "create_plugin_params") -load("@prelude//java/utils:java_utils.bzl", "derive_javac", "get_abi_generation_mode", "get_class_to_source_map_info", "get_default_info", "get_java_version_attributes", "get_path_separator_for_exec_os") +load("@prelude//java/utils:java_more_utils.bzl", "get_path_separator_for_exec_os") +load("@prelude//java/utils:java_utils.bzl", "derive_javac", "get_abi_generation_mode", "get_class_to_source_map_info", "get_default_info", "get_java_version_attributes") load("@prelude//jvm:nullsafe.bzl", "get_nullsafe_info") load( "@prelude//kotlin:kotlin_toolchain.bzl", @@ -37,7 +39,10 @@ load( ) load("@prelude//kotlin:kotlin_utils.bzl", "get_kotlinc_compatible_target") load("@prelude//kotlin:kotlincd_jar_creator.bzl", "create_jar_artifact_kotlincd") -load("@prelude//utils:utils.bzl", "is_any", "map_idx") +load("@prelude//utils:argfile.bzl", "at_argfile") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:lazy.bzl", "lazy") +load("@prelude//utils:utils.bzl", "map_idx") _JAVA_OR_KOTLIN_FILE_EXTENSION = [".java", ".kt"] @@ -47,7 +52,7 @@ def _create_kotlin_sources( deps: list[Dependency], annotation_processor_properties: AnnotationProcessorProperties, ksp_annotation_processor_properties: AnnotationProcessorProperties, - additional_classpath_entries: list[Artifact]) -> (Artifact, [Artifact, None], [Artifact, None]): + additional_classpath_entries: list[Artifact]) -> (Artifact, Artifact | None, Artifact | None): """ Runs kotlinc on the provided kotlin sources. """ @@ -57,19 +62,21 @@ def _create_kotlin_sources( kotlinc = kotlin_toolchain.kotlinc[RunInfo] kotlinc_output = ctx.actions.declare_output("kotlinc_classes_output", dir = True) - compile_kotlin_cmd = cmd_args([ + compile_kotlin_cmd_args = [ compile_kotlin_tool, "--kotlinc_output", kotlinc_output.as_output(), - ]) + ] + compile_kotlin_cmd_hidden = [] + java_toolchain = ctx.attrs._java_toolchain[JavaToolchainInfo] zip_scrubber_args = ["--zip_scrubber", cmd_args(java_toolchain.zip_scrubber, delimiter = " ")] - compile_kotlin_cmd.add(zip_scrubber_args) + compile_kotlin_cmd_args.append(zip_scrubber_args) kotlinc_cmd_args = cmd_args([kotlinc]) compiling_classpath = [] + additional_classpath_entries - compiling_deps_tset = derive_compiling_deps(ctx.actions, None, deps + kotlin_toolchain.kotlinc_classpath) + compiling_deps_tset = derive_compiling_deps(ctx.actions, None, deps + [kotlin_toolchain.kotlin_stdlib]) if compiling_deps_tset: compiling_classpath.extend( [compiling_dep.abi for compiling_dep in list(compiling_deps_tset.traverse())], @@ -80,17 +87,15 @@ def _create_kotlin_sources( delimiter = get_path_separator_for_exec_os(ctx), ) - # write joined classpath string into args file - classpath_args_file, _ = ctx.actions.write( - "kotlinc_classpath", - classpath_args, - allow_args = True, - ) - - compile_kotlin_cmd.hidden([compiling_classpath]) + compile_kotlin_cmd_hidden.append([compiling_classpath]) kotlinc_cmd_args.add(["-classpath"]) - kotlinc_cmd_args.add(cmd_args(classpath_args_file, format = "@{}")) + kotlinc_cmd_args.add(at_argfile( + actions = ctx.actions, + name = "kotlinc_classpath", + args = classpath_args, + allow_args = True, + )) module_name = ctx.label.package.replace("/", ".") + "." + ctx.label.name kotlinc_cmd_args.add( @@ -111,9 +116,9 @@ def _create_kotlin_sources( kapt_generated_sources_output = None if annotation_processor_properties.annotation_processors: - compile_kotlin_cmd.add(["--kapt_annotation_processing_jar", kotlin_toolchain.annotation_processing_jar[JavaLibraryInfo].library_output.full_library]) - compile_kotlin_cmd.add(["--kapt_annotation_processors", ",".join([p for ap in annotation_processor_properties.annotation_processors for p in ap.processors])]) - compile_kotlin_cmd.add(["--kapt_annotation_processor_params", ";".join(annotation_processor_properties.annotation_processor_params)]) + compile_kotlin_cmd_args.extend(["--kapt_annotation_processing_jar", kotlin_toolchain.annotation_processing_jar[JavaLibraryInfo].library_output.full_library]) + compile_kotlin_cmd_args.extend(["--kapt_annotation_processors", ",".join([p for ap in annotation_processor_properties.annotation_processors for p in ap.processors])]) + compile_kotlin_cmd_args.extend(["--kapt_annotation_processor_params", ";".join(annotation_processor_properties.annotation_processor_params)]) annotation_processor_classpath_tsets = ( filter(None, ([ap.deps for ap in annotation_processor_properties.annotation_processors])) + @@ -124,23 +129,23 @@ def _create_kotlin_sources( children = annotation_processor_classpath_tsets, ).project_as_args("full_jar_args") kapt_classpath_file = ctx.actions.write("kapt_classpath_file", annotation_processor_classpath) - compile_kotlin_cmd.add(["--kapt_classpath_file", kapt_classpath_file]) - compile_kotlin_cmd.hidden(annotation_processor_classpath) + compile_kotlin_cmd_args.extend(["--kapt_classpath_file", kapt_classpath_file]) + compile_kotlin_cmd_hidden.append(annotation_processor_classpath) sources_output = ctx.actions.declare_output("kapt_sources_output") - compile_kotlin_cmd.add(["--kapt_sources_output", sources_output.as_output()]) + compile_kotlin_cmd_args.append(["--kapt_sources_output", sources_output.as_output()]) classes_output = ctx.actions.declare_output("kapt_classes_output") - compile_kotlin_cmd.add(["--kapt_classes_output", classes_output.as_output()]) + compile_kotlin_cmd_args.append(["--kapt_classes_output", classes_output.as_output()]) stubs = ctx.actions.declare_output("kapt_stubs") - compile_kotlin_cmd.add(["--kapt_stubs", stubs.as_output()]) + compile_kotlin_cmd_args.append(["--kapt_stubs", stubs.as_output()]) kapt_generated_sources_output = ctx.actions.declare_output("kapt_generated_sources_output.src.zip") - compile_kotlin_cmd.add(["--kapt_generated_sources_output", kapt_generated_sources_output.as_output()]) - compile_kotlin_cmd.add(["--kapt_base64_encoder", cmd_args(kotlin_toolchain.kapt_base64_encoder[RunInfo], delimiter = " ")]) + compile_kotlin_cmd_args.append(["--kapt_generated_sources_output", kapt_generated_sources_output.as_output()]) + compile_kotlin_cmd_args.append(["--kapt_base64_encoder", cmd_args(kotlin_toolchain.kapt_base64_encoder[RunInfo], delimiter = " ")]) generated_kotlin_output = ctx.actions.declare_output("kapt_generated_kotlin_output") - compile_kotlin_cmd.add(["--kapt_generated_kotlin_output", generated_kotlin_output.as_output()]) + compile_kotlin_cmd_args.append(["--kapt_generated_kotlin_output", generated_kotlin_output.as_output()]) if jvm_target: - compile_kotlin_cmd.add(["--kapt_jvm_target", jvm_target]) + compile_kotlin_cmd_args.append(["--kapt_jvm_target", jvm_target]) friend_paths = ctx.attrs.friend_paths if friend_paths: @@ -153,8 +158,8 @@ def _create_kotlin_sources( ksp_zipped_sources_output = None if ksp_annotation_processor_properties.annotation_processors: - ksp_cmd = cmd_args(compile_kotlin_tool) - ksp_cmd.add(zip_scrubber_args) + ksp_cmd = [compile_kotlin_tool] + ksp_cmd.append(zip_scrubber_args) ksp_annotation_processor_classpath_tsets = filter(None, ([ap.deps for ap in ksp_annotation_processor_properties.annotation_processors])) if ksp_annotation_processor_classpath_tsets: @@ -162,22 +167,24 @@ def _create_kotlin_sources( JavaPackagingDepTSet, children = ksp_annotation_processor_classpath_tsets, ).project_as_args("full_jar_args") - ksp_cmd.add(["--ksp_processor_jars"]) - ksp_cmd.add(cmd_args(ksp_annotation_processor_classpath, delimiter = ",")) + ksp_cmd.append("--ksp_processor_jars") + ksp_cmd.append(cmd_args(ksp_annotation_processor_classpath, delimiter = ",")) - ksp_cmd.add(["--ksp_classpath", classpath_args]) + ksp_cmd.extend(["--ksp_classpath", classpath_args]) ksp_classes_and_resources_output = ctx.actions.declare_output("ksp_output_dir/ksp_classes_and_resources_output") - ksp_cmd.add(["--ksp_classes_and_resources_output", ksp_classes_and_resources_output.as_output()]) - ksp_output = cmd_args(ksp_classes_and_resources_output.as_output()).parent() - ksp_cmd.add(["--ksp_output", ksp_output]) + ksp_cmd.extend(["--ksp_classes_and_resources_output", ksp_classes_and_resources_output.as_output()]) + ksp_output = cmd_args(ksp_classes_and_resources_output.as_output(), parent = 1) + ksp_cmd.extend(["--ksp_output", ksp_output]) ksp_sources_output = ctx.actions.declare_output("ksp_output_dir/ksp_sources_output") - ksp_cmd.add(["--ksp_sources_output", ksp_sources_output.as_output()]) + ksp_cmd.extend(["--ksp_sources_output", ksp_sources_output.as_output()]) ksp_zipped_sources_output = ctx.actions.declare_output("ksp_output_dir/ksp_zipped_sources_output.src.zip") - ksp_cmd.add(["--ksp_zipped_sources_output", ksp_zipped_sources_output.as_output()]) - ksp_cmd.add(["--ksp_project_base_dir", ctx.label.path]) + ksp_cmd.extend(["--ksp_zipped_sources_output", ksp_zipped_sources_output.as_output()]) + ksp_cmd.extend(["--ksp_project_base_dir", ctx.label.path]) ksp_kotlinc_cmd_args = cmd_args(kotlinc_cmd_args) - _add_plugins(ctx, ksp_kotlinc_cmd_args, ksp_cmd, is_ksp = True) + plugins_cmd_args = _add_plugins(ctx, is_ksp = True) + ksp_kotlinc_cmd_args.add(plugins_cmd_args.kotlinc_cmd_args) + ksp_cmd.append(plugins_cmd_args.compile_kotlin_cmd) ksp_cmd_args_file, _ = ctx.actions.write( "ksp_kotlinc_cmd", @@ -185,21 +192,24 @@ def _create_kotlin_sources( allow_args = True, ) - ksp_cmd.add("--kotlinc_cmd_file") - ksp_cmd.add(ksp_cmd_args_file) - ksp_cmd.hidden(ksp_kotlinc_cmd_args) + ksp_cmd.extend(["--kotlinc_cmd_file", ksp_cmd_args_file]) - ctx.actions.run(ksp_cmd, category = "ksp_kotlinc") + ctx.actions.run( + cmd_args(ksp_cmd, hidden = ksp_kotlinc_cmd_args), + category = "ksp_kotlinc", + ) zipped_sources = (zipped_sources or []) + [ksp_zipped_sources_output] - compile_kotlin_cmd.add(["--ksp_generated_classes_and_resources", ksp_classes_and_resources_output]) + compile_kotlin_cmd_args.extend(["--ksp_generated_classes_and_resources", ksp_classes_and_resources_output]) - _add_plugins(ctx, kotlinc_cmd_args, compile_kotlin_cmd, is_ksp = False) + plugin_cmd_args = _add_plugins(ctx, is_ksp = False) + kotlinc_cmd_args.add(plugin_cmd_args.kotlinc_cmd_args) + compile_kotlin_cmd_args.append(plugin_cmd_args.compile_kotlin_cmd) if zipped_sources: zipped_sources_file = ctx.actions.write("kotlinc_zipped_source_args", zipped_sources) - compile_kotlin_cmd.add(["--zipped_sources_file", zipped_sources_file]) - compile_kotlin_cmd.hidden(zipped_sources) + compile_kotlin_cmd_args.append(["--zipped_sources_file", zipped_sources_file]) + compile_kotlin_cmd_hidden.append(zipped_sources) args_file, _ = ctx.actions.write( "kotlinc_cmd", @@ -207,24 +217,32 @@ def _create_kotlin_sources( allow_args = True, ) - compile_kotlin_cmd.hidden([plain_sources]) + compile_kotlin_cmd_hidden.append(plain_sources) - compile_kotlin_cmd.add("--kotlinc_cmd_file") - compile_kotlin_cmd.add(args_file) - compile_kotlin_cmd.hidden(kotlinc_cmd_args) + compile_kotlin_cmd_args.append("--kotlinc_cmd_file") + compile_kotlin_cmd_args.append(args_file) + compile_kotlin_cmd_hidden.append(kotlinc_cmd_args) - ctx.actions.run(compile_kotlin_cmd, category = "kotlinc") + ctx.actions.run( + cmd_args(compile_kotlin_cmd_args, hidden = compile_kotlin_cmd_hidden), + category = "kotlinc", + ) return kotlinc_output, kapt_generated_sources_output, ksp_zipped_sources_output def _is_ksp_plugin(plugin: str) -> bool: return "symbol-processing" in plugin +_PluginCmdArgs = record( + kotlinc_cmd_args = cmd_args, + compile_kotlin_cmd = cmd_args, +) + def _add_plugins( ctx: AnalysisContext, - kotlinc_cmd_args: cmd_args, - compile_kotlin_cmd: cmd_args, - is_ksp: bool): + is_ksp: bool) -> _PluginCmdArgs: + kotlinc_cmd_args = cmd_args() + compile_kotlin_cmd = cmd_args() for plugin, plugin_options in ctx.attrs.kotlin_compiler_plugins.items(): if _is_ksp_plugin(str(plugin)) != is_ksp: continue @@ -243,6 +261,8 @@ def _add_plugins( if options: kotlinc_cmd_args.add(["-P", cmd_args(options, delimiter = ",")]) + return _PluginCmdArgs(kotlinc_cmd_args = kotlinc_cmd_args, compile_kotlin_cmd = compile_kotlin_cmd) + def kotlin_library_impl(ctx: AnalysisContext) -> list[Provider]: packaging_deps = ctx.attrs.deps + ctx.attrs.exported_deps + ctx.attrs.runtime_deps @@ -262,16 +282,29 @@ def kotlin_library_impl(ctx: AnalysisContext) -> list[Provider]: android_packageable_info, ] - java_providers = build_kotlin_library(ctx) + java_providers = build_kotlin_library( + ctx = ctx, + validation_deps_outputs = get_validation_deps_outputs(ctx), + ) return to_list(java_providers) + [android_packageable_info] +def _check_exported_deps(exported_deps: list[Dependency], attr_name: str): + for exported_dep in exported_deps: + # TODO(navidq) add a check that the exported dep always have a JavaLibraryInfo provider + if JavaLibraryInfo in exported_dep: + expect( + not exported_dep[JavaLibraryInfo].may_not_be_exported, + "{} has 'may_not_be_exported' label and should not be present in {}.".format(exported_dep.label.raw_target(), attr_name), + ) + def build_kotlin_library( ctx: AnalysisContext, additional_classpath_entries: list[Artifact] = [], bootclasspath_entries: list[Artifact] = [], - extra_sub_targets: dict = {}) -> JavaProviders: + extra_sub_targets: dict = {}, + validation_deps_outputs: [list[Artifact], None] = None) -> JavaProviders: srcs = ctx.attrs.srcs - has_kotlin_srcs = is_any(lambda src: src.extension == ".kt" or src.basename.endswith(".src.zip") or src.basename.endswith("-sources.jar"), srcs) + has_kotlin_srcs = lazy.is_any(lambda src: src.extension == ".kt" or src.basename.endswith(".src.zip") or src.basename.endswith("-sources.jar"), srcs) if not has_kotlin_srcs: return build_java_library( @@ -282,11 +315,19 @@ def build_kotlin_library( # Match buck1, which always does class ABI generation for Kotlin targets unless explicitly specified. override_abi_generation_mode = get_abi_generation_mode(ctx.attrs.abi_generation_mode) or AbiGenerationMode("class"), extra_sub_targets = extra_sub_targets, + validation_deps_outputs = validation_deps_outputs, ) else: + compose_stability_config = getattr(ctx.attrs, "compose_stability_config", None) + if compose_stability_config != None: + ctx.attrs.extra_kotlinc_arguments.append("-P") + ctx.attrs.extra_kotlinc_arguments.append(cmd_args(["plugin:androidx.compose.compiler.plugins.kotlin:stabilityConfigurationPath", ctx.attrs._compose_stability_config], delimiter = "=")) + deps_query = getattr(ctx.attrs, "deps_query", []) or [] provided_deps_query = getattr(ctx.attrs, "provided_deps_query", []) or [] + _check_exported_deps(ctx.attrs.exported_deps, "exported_deps") + _check_exported_deps(ctx.attrs.exported_provided_deps, "exported_provided_deps") deps = ( ctx.attrs.deps + deps_query + @@ -302,7 +343,7 @@ def build_kotlin_library( ctx.attrs.annotation_processor_params, ctx.attrs.annotation_processor_deps, ) - ksp_annotation_processor_properties = create_ksp_annotation_processor_properties(ctx, ctx.attrs.plugins) + ksp_annotation_processor_properties = create_ksp_annotation_processor_properties(ctx.attrs.plugins) kotlin_toolchain = ctx.attrs._kotlin_toolchain[KotlinToolchainInfo] if kotlin_toolchain.kotlinc_protocol == "classic": @@ -329,11 +370,18 @@ def build_kotlin_library( additional_compiled_srcs = kotlinc_classes, generated_sources = filter(None, [kapt_generated_sources, ksp_generated_sources]), extra_sub_targets = extra_sub_targets, + validation_deps_outputs = validation_deps_outputs, ) return java_lib elif kotlin_toolchain.kotlinc_protocol == "kotlincd": source_level, target_level = get_java_version_attributes(ctx) - extra_arguments = cmd_args(ctx.attrs.extra_arguments) + extra_arguments = cmd_args( + ctx.attrs.extra_arguments, + # The outputs of validation_deps need to be added as hidden arguments + # to an action for the validation_deps targets to be built and enforced. + hidden = validation_deps_outputs or [], + ) + common_kotlincd_kwargs = { "abi_generation_mode": get_abi_generation_mode(ctx.attrs.abi_generation_mode), "actions": ctx.actions, @@ -343,16 +391,19 @@ def build_kotlin_library( annotation_processor_params = annotation_processor_properties.annotation_processor_params + ksp_annotation_processor_properties.annotation_processor_params, ), "bootclasspath_entries": bootclasspath_entries, - "deps": deps, + "debug_port": getattr(ctx.attrs, "debug_port", None), + "deps": deps + [kotlin_toolchain.kotlin_stdlib], "extra_kotlinc_arguments": ctx.attrs.extra_kotlinc_arguments, "friend_paths": ctx.attrs.friend_paths, "is_building_android_binary": ctx.attrs._is_building_android_binary, + "jar_postprocessor": ctx.attrs.jar_postprocessor[RunInfo] if hasattr(ctx.attrs, "jar_postprocessor") and ctx.attrs.jar_postprocessor else None, "java_toolchain": ctx.attrs._java_toolchain[JavaToolchainInfo], "javac_tool": derive_javac(ctx.attrs.javac) if ctx.attrs.javac else None, "k2": ctx.attrs.k2, "kotlin_compiler_plugins": ctx.attrs.kotlin_compiler_plugins, "kotlin_toolchain": kotlin_toolchain, "label": ctx.label, + "manifest_file": ctx.attrs.manifest_file, "remove_classes": ctx.attrs.remove_classes, "required_for_source_only_abi": ctx.attrs.required_for_source_only_abi, "resources": ctx.attrs.resources, @@ -362,13 +413,21 @@ def build_kotlin_library( "srcs": srcs, "target_level": target_level, } - outputs = create_jar_artifact_kotlincd( + outputs, proto = create_jar_artifact_kotlincd( plugin_params = create_plugin_params(ctx, ctx.attrs.plugins), extra_arguments = extra_arguments, actions_identifier = "", + incremental = ctx.attrs.incremental, **common_kotlincd_kwargs ) + if proto: + extra_sub_targets = extra_sub_targets | {"jar_command_proto_json": [DefaultInfo(default_output = proto)]} + if outputs and outputs.incremental_state_dir: + extra_sub_targets = extra_sub_targets | {"incremental_state_dir": [ + DefaultInfo(default_output = outputs.incremental_state_dir), + ]} + if outputs and outputs.annotation_processor_output: generated_sources = [outputs.annotation_processor_output] extra_sub_targets = extra_sub_targets | {"generated_sources": [ @@ -391,6 +450,7 @@ def build_kotlin_library( # To make sure that even for pure Kotlin targets empty output dir is always present optional_dirs = [nullsafe_info.output.as_output()], is_creating_subtarget = True, + incremental = False, **common_kotlincd_kwargs ) @@ -398,9 +458,18 @@ def build_kotlin_library( DefaultInfo(default_output = nullsafe_info.output), ]} - java_library_info, java_packaging_info, shared_library_info, cxx_resource_info, linkable_graph, template_placeholder_info, intellij_info = create_java_library_providers( + class_to_src_map, sources_jar, class_to_src_map_sub_targets = get_class_to_source_map_info( + ctx, + outputs = outputs, + deps = ctx.attrs.deps + deps_query + ctx.attrs.exported_deps, + generate_sources_jar = True, + ) + extra_sub_targets = extra_sub_targets | class_to_src_map_sub_targets + + java_library_info, java_packaging_info, global_code_info, shared_library_info, cxx_resource_info, linkable_graph, template_placeholder_info, intellij_info = create_java_library_providers( ctx, library_output = outputs.classpath_entry if outputs else None, + global_code_config = java_toolchain.global_code_config, declared_deps = ctx.attrs.deps + deps_query, exported_deps = ctx.attrs.exported_deps, provided_deps = ctx.attrs.provided_deps + provided_deps_query, @@ -409,15 +478,10 @@ def build_kotlin_library( needs_desugar = source_level > 7 or target_level > 7, generated_sources = generated_sources, has_srcs = bool(srcs), + sources_jar = sources_jar, + preprocessed_library = outputs.preprocessed_library if outputs else None, ) - class_to_src_map, class_to_src_map_sub_targets = get_class_to_source_map_info( - ctx, - outputs = outputs, - deps = ctx.attrs.deps + deps_query + ctx.attrs.exported_deps, - ) - extra_sub_targets = extra_sub_targets | class_to_src_map_sub_targets - default_info = get_default_info( ctx.actions, ctx.attrs._java_toolchain[JavaToolchainInfo], @@ -429,6 +493,7 @@ def build_kotlin_library( java_library_info = java_library_info, java_library_intellij_info = intellij_info, java_packaging_info = java_packaging_info, + java_global_code_info = global_code_info, shared_library_info = shared_library_info, cxx_resource_info = cxx_resource_info, linkable_graph = linkable_graph, diff --git a/prelude/kotlin/kotlin_toolchain.bzl b/prelude/kotlin/kotlin_toolchain.bzl index 248ea5a197cdc..b24c0ae98e30f 100644 --- a/prelude/kotlin/kotlin_toolchain.bzl +++ b/prelude/kotlin/kotlin_toolchain.bzl @@ -11,30 +11,31 @@ KotlinToolchainInfo = provider( # @unsorted-dict-items doc = "Kotlin toolchain info", fields = { + "allow_k2_usage": provider_field(typing.Any, default = None), "annotation_processing_jar": provider_field(typing.Any, default = None), "class_loader_bootstrapper": provider_field(typing.Any, default = None), - "compilation_tracer_plugin": provider_field(typing.Any, default = None), "compile_kotlin": provider_field(typing.Any, default = None), "dep_files": provider_field(typing.Any, default = None), + "enable_incremental_compilation": provider_field(typing.Any, default = None), "jvm_abi_gen_plugin": provider_field(typing.Any, default = None), "kapt_base64_encoder": provider_field(typing.Any, default = None), "kosabi_applicability_plugin": provider_field(typing.Any, default = None), "kosabi_jvm_abi_gen_plugin": provider_field(typing.Any, default = None), + "kosabi_source_modifier_plugin": provider_field(typing.Any, default = None), + "kosabi_standalone": provider_field(typing.Any, default = None), "kosabi_stubs_gen_plugin": provider_field(typing.Any, default = None), - "kosabi_supported_ksp_providers": provider_field(typing.Any, default = None), + "kotlin_error_handler": provider_field(typing.Any, default = None), "kotlin_home_libraries": provider_field(typing.Any, default = None), "kotlin_stdlib": provider_field(typing.Any, default = None), "kotlinc": provider_field(typing.Any, default = None), - "kotlinc_classpath": provider_field(typing.Any, default = None), "kotlinc_protocol": provider_field(typing.Any, default = None), + "kotlinc_run_via_build_tools_api": provider_field(typing.Any, default = None), "kotlincd_debug_port": provider_field(typing.Any, default = None), "kotlincd_debug_target": provider_field(typing.Any, default = None), "kotlincd_jvm_args": provider_field(typing.Any, default = None), "kotlincd_jvm_args_target": provider_field(typing.Any, default = None), "kotlincd_main_class": provider_field(typing.Any, default = None), "kotlincd_worker": provider_field(typing.Any, default = None), - "qpld_dotslash": provider_field(typing.Any, default = None), - "should_use_compilation_tracer": provider_field(typing.Any, default = None), "track_class_usage_plugin": provider_field(typing.Any, default = None), }, ) diff --git a/prelude/kotlin/kotlincd_jar_creator.bzl b/prelude/kotlin/kotlincd_jar_creator.bzl index 62d16aa7c69a2..aed8cda451ae7 100644 --- a/prelude/kotlin/kotlincd_jar_creator.bzl +++ b/prelude/kotlin/kotlincd_jar_creator.bzl @@ -10,6 +10,7 @@ load( "JavaClasspathEntry", # @unused Used as a type "JavaCompileOutputs", # @unused Used as a type "JavaLibraryInfo", + "generate_java_classpath_snapshot", "make_compile_outputs", ) load("@prelude//java:java_resources.bzl", "get_resources_map") @@ -26,7 +27,6 @@ load( "@prelude//jvm:cd_jar_creator_util.bzl", "OutputPaths", "TargetType", - "add_output_paths_to_cmd_args", "base_qualified_name", "declare_prefixed_output", "define_output_paths", @@ -40,12 +40,8 @@ load( ) load("@prelude//kotlin:kotlin_toolchain.bzl", "KotlinToolchainInfo") load("@prelude//kotlin:kotlin_utils.bzl", "get_kotlinc_compatible_target") -load("@prelude//utils:utils.bzl", "expect", "map_idx") - -buckPaths = struct( - configuredBuckOut = "buck-out/v2", - includeTargetConfigHash = True, -) +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "map_idx") def create_jar_artifact_kotlincd( actions: AnalysisActions, @@ -61,6 +57,7 @@ def create_jar_artifact_kotlincd( resources_root: [str, None], annotation_processor_properties: AnnotationProcessorProperties, plugin_params: [PluginParams, None], + manifest_file: Artifact | None, source_level: int, target_level: int, deps: list[Dependency], @@ -72,10 +69,13 @@ def create_jar_artifact_kotlincd( is_building_android_binary: bool, friend_paths: list[Dependency], kotlin_compiler_plugins: dict, - extra_kotlinc_arguments: list[str], + extra_kotlinc_arguments: list, k2: bool, + incremental: bool, is_creating_subtarget: bool = False, - optional_dirs: list[OutputArtifact] = []) -> JavaCompileOutputs: + optional_dirs: list[OutputArtifact] = [], + jar_postprocessor: [RunInfo, None] = None, + debug_port: [int, None] = None) -> (JavaCompileOutputs, Artifact): resources_map = get_resources_map( java_toolchain = java_toolchain, package = label.package, @@ -89,7 +89,10 @@ def create_jar_artifact_kotlincd( output_paths = define_output_paths(actions, actions_identifier, label) path_to_class_hashes_out = declare_prefixed_output(actions, actions_identifier, "classes.txt") - should_create_class_abi = not is_creating_subtarget and (actual_abi_generation_mode == AbiGenerationMode("class") or not is_building_android_binary) + should_create_class_abi = \ + not is_creating_subtarget and \ + (actual_abi_generation_mode == AbiGenerationMode("class") or not is_building_android_binary) and \ + kotlin_toolchain.jvm_abi_gen_plugin != None if should_create_class_abi: class_abi_jar = declare_prefixed_output(actions, actions_identifier, "class-abi.jar") class_abi_output_dir = declare_prefixed_output(actions, actions_identifier, "class_abi_dir", dir = True) @@ -101,46 +104,68 @@ def create_jar_artifact_kotlincd( jvm_abi_gen = None should_use_jvm_abi_gen = False - def encode_kotlin_extra_params(kotlin_compiler_plugins): + should_kotlinc_run_incrementally = kotlin_toolchain.enable_incremental_compilation and incremental + incremental_state_dir = declare_prefixed_output(actions, actions_identifier, "incremental_state", dir = True) if should_kotlinc_run_incrementally else None + + def encode_kotlin_extra_params(kotlin_compiler_plugins, incremental_state_dir = None): + kosabiPluginOptionsMap = {} + if kotlin_toolchain.kosabi_stubs_gen_plugin != None: + kosabiPluginOptionsMap["kosabi_stubs_gen_plugin"] = kotlin_toolchain.kosabi_stubs_gen_plugin + + if kotlin_toolchain.kosabi_source_modifier_plugin != None: + kosabiPluginOptionsMap["kosabi_source_modifier_plugin"] = kotlin_toolchain.kosabi_source_modifier_plugin + + if kotlin_toolchain.kosabi_applicability_plugin != None: + kosabiPluginOptionsMap["kosabi_applicability_plugin"] = kotlin_toolchain.kosabi_applicability_plugin + + if kotlin_toolchain.kosabi_jvm_abi_gen_plugin != None: + kosabiPluginOptionsMap["kosabi_jvm_abi_gen_plugin"] = kotlin_toolchain.kosabi_jvm_abi_gen_plugin + + current_language_version = None + for arg in extra_kotlinc_arguments: + # If `-language-version` is defined multiple times, we use the last one, just like the compiler does + if isinstance(arg, str) and "-language-version" in arg: + current_language_version = arg.split("=")[1].strip() + + if k2 == True and kotlin_toolchain.allow_k2_usage: + if not current_language_version or current_language_version < "2.0": + extra_kotlinc_arguments.append("-language-version=2.0") + else: # use K1 + if not current_language_version or current_language_version >= "2.0": + extra_kotlinc_arguments.append("-language-version=1.9") + return struct( extraClassPaths = bootclasspath_entries, standardLibraryClassPath = kotlin_toolchain.kotlin_stdlib[JavaLibraryInfo].library_output.full_library, annotationProcessingClassPath = kotlin_toolchain.annotation_processing_jar[JavaLibraryInfo].library_output.full_library, - compilationTracerPlugin = kotlin_toolchain.compilation_tracer_plugin, - qpldDotslash = kotlin_toolchain.qpld_dotslash, jvmAbiGenPlugin = kotlin_toolchain.jvm_abi_gen_plugin, kotlinCompilerPlugins = {plugin: {"params": plugin_options} if plugin_options else {} for plugin, plugin_options in kotlin_compiler_plugins.items()}, - kosabiPluginOptions = struct( - kosabi_stubs_gen_plugin = kotlin_toolchain.kosabi_stubs_gen_plugin, - kosabi_applicability_plugin = kotlin_toolchain.kosabi_applicability_plugin, - kosabi_jvm_abi_gen_plugin = kotlin_toolchain.kosabi_jvm_abi_gen_plugin, - ), + kosabiPluginOptions = struct(**kosabiPluginOptionsMap), friendPaths = [friend_path.library_output.abi for friend_path in map_idx(JavaLibraryInfo, friend_paths) if friend_path.library_output], kotlinHomeLibraries = kotlin_toolchain.kotlin_home_libraries, jvmTarget = get_kotlinc_compatible_target(str(target_level)), kosabiJvmAbiGenEarlyTerminationMessagePrefix = "exception: java.lang.RuntimeException: Terminating compilation. We're done with ABI.", - kosabiSupportedKspProviders = kotlin_toolchain.kosabi_supported_ksp_providers, - shouldUseCompilationTracer = kotlin_toolchain.should_use_compilation_tracer, shouldUseJvmAbiGen = should_use_jvm_abi_gen, shouldVerifySourceOnlyAbiConstraints = actual_abi_generation_mode == AbiGenerationMode("source_only"), shouldGenerateAnnotationProcessingStats = True, extraKotlincArguments = extra_kotlinc_arguments, - extraNonSourceOnlyAbiKotlincArguments = ["-language-version=2.0"] if k2 else [], - shouldRemoveKotlinCompilerFromClassPath = True, depTrackerPlugin = kotlin_toolchain.track_class_usage_plugin, + shouldKotlincRunViaBuildToolsApi = kotlin_toolchain.kotlinc_run_via_build_tools_api, + shouldKotlincRunIncrementally = should_kotlinc_run_incrementally, + incrementalStateDir = incremental_state_dir.as_output() if incremental_state_dir else None, + shouldUseStandaloneKosabi = kotlin_toolchain.kosabi_standalone, ) - kotlin_extra_params = encode_kotlin_extra_params(kotlin_compiler_plugins) - compiling_deps_tset = get_compiling_deps_tset(actions, deps, additional_classpath_entries) # external javac does not support used classes - track_class_usage = javac_tool == None + track_class_usage = javac_tool == None and kotlin_toolchain.track_class_usage_plugin != None and not should_kotlinc_run_incrementally def encode_library_command( output_paths: OutputPaths, path_to_class_hashes: Artifact, - classpath_jars_tag: ArtifactTag) -> struct: + classpath_jars_tag: ArtifactTag, + incremental_state_dir: Artifact | None) -> struct: target_type = TargetType("library") base_jar_command = encode_base_jar_command( javac_tool, @@ -158,9 +183,11 @@ def create_jar_artifact_kotlincd( resources_map, annotation_processor_properties = annotation_processor_properties, plugin_params = plugin_params, + manifest_file = manifest_file, extra_arguments = cmd_args(extra_arguments), source_only_abi_compiling_deps = [], track_class_usage = track_class_usage, + is_incremental = should_kotlinc_run_incrementally, ) return struct( @@ -169,7 +196,7 @@ def create_jar_artifact_kotlincd( hasAnnotationProcessing = True, ), libraryJarCommand = struct( - kotlinExtraParams = kotlin_extra_params, + kotlinExtraParams = encode_kotlin_extra_params(kotlin_compiler_plugins, incremental_state_dir), baseJarCommand = base_jar_command, libraryJarBaseCommand = struct( pathToClasses = output_paths.jar.as_output(), @@ -201,13 +228,14 @@ def create_jar_artifact_kotlincd( resources_map, annotation_processor_properties, plugin_params, + manifest_file, cmd_args(extra_arguments), source_only_abi_compiling_deps = source_only_abi_compiling_deps, track_class_usage = True, ) - abi_params = encode_jar_params(remove_classes, output_paths) + abi_params = encode_jar_params(remove_classes, output_paths, manifest_file) abi_command = struct( - kotlinExtraParams = kotlin_extra_params, + kotlinExtraParams = encode_kotlin_extra_params(kotlin_compiler_plugins), baseJarCommand = base_jar_command, abiJarParameters = abi_params, ) @@ -228,11 +256,12 @@ def create_jar_artifact_kotlincd( qualified_name: str, output_paths: OutputPaths, classpath_jars_tag: ArtifactTag, - abi_dir: [Artifact, None], + abi_dir: Artifact | None, target_type: TargetType, - path_to_class_hashes: [Artifact, None], source_only_abi_compiling_deps: list[JavaClasspathEntry] = [], - is_creating_subtarget: bool = False): + is_creating_subtarget: bool = False, + incremental_state_dir: Artifact | None = None, + should_action_run_incrementally: bool = False): _unused = source_only_abi_compiling_deps proto = declare_prefixed_output(actions, actions_identifier, "jar_command.proto.json") @@ -241,13 +270,14 @@ def create_jar_artifact_kotlincd( compiler = kotlin_toolchain.kotlinc[DefaultInfo].default_outputs[0] exe, local_only = prepare_cd_exe( qualified_name, - java = java_toolchain.java[RunInfo], + java = java_toolchain.graalvm_java[RunInfo] if java_toolchain.use_graalvm_java_for_javacd else java_toolchain.java[RunInfo], class_loader_bootstrapper = kotlin_toolchain.class_loader_bootstrapper, compiler = compiler, main_class = kotlin_toolchain.kotlincd_main_class, worker = kotlin_toolchain.kotlincd_worker[WorkerInfo], - debug_port = kotlin_toolchain.kotlincd_debug_port, - debug_target = kotlin_toolchain.kotlincd_debug_target, + target_specified_debug_port = debug_port, + toolchain_specified_debug_port = kotlin_toolchain.kotlincd_debug_port, + toolchain_specified_debug_target = kotlin_toolchain.kotlincd_debug_target, extra_jvm_args = kotlin_toolchain.kotlincd_jvm_args, extra_jvm_args_target = kotlin_toolchain.kotlincd_jvm_args_target, ) @@ -286,10 +316,11 @@ def create_jar_artifact_kotlincd( optional_dirs, ) - args = add_output_paths_to_cmd_args(args, output_paths, path_to_class_hashes) - - event_pipe_out = declare_prefixed_output(actions, actions_identifier, "events.data") - + if incremental_state_dir: + args.add( + "--incremental-state-dir", + incremental_state_dir.as_output(), + ) dep_files = {} if not is_creating_subtarget and srcs and (kotlin_toolchain.dep_files == DepFiles("per_jar") or kotlin_toolchain.dep_files == DepFiles("per_class")) and target_type == TargetType("library") and track_class_usage: used_classes_json_outputs = [ @@ -307,25 +338,33 @@ def create_jar_artifact_kotlincd( dep_files["classpath_jars"] = classpath_jars_tag + incremental_run_params = { + "metadata_env_var": "ACTION_METADATA", + "metadata_path": "action_metadata.json", + "no_outputs_cleanup": True, + } if should_action_run_incrementally else {} actions.run( args, env = { "BUCK_CLASSPATH": compiler, - "BUCK_EVENT_PIPE": event_pipe_out.as_output(), "JAVACD_ABSOLUTE_PATHS_ARE_RELATIVE_TO_CWD": "1", }, category = "{}kotlincd_jar".format(category_prefix), identifier = actions_identifier, dep_files = dep_files, + allow_dep_file_cache_upload = False, exe = exe, local_only = local_only, low_pass_filter = False, weight = 2, + error_handler = kotlin_toolchain.kotlin_error_handler, + **incremental_run_params ) + return proto library_classpath_jars_tag = actions.artifact_tag() - command = encode_library_command(output_paths, path_to_class_hashes_out, library_classpath_jars_tag) - define_kotlincd_action( + command = encode_library_command(output_paths, path_to_class_hashes_out, library_classpath_jars_tag, incremental_state_dir) + proto = define_kotlincd_action( category_prefix = "", actions_identifier = actions_identifier, encoded_command = command, @@ -334,17 +373,21 @@ def create_jar_artifact_kotlincd( classpath_jars_tag = library_classpath_jars_tag, abi_dir = class_abi_output_dir if should_create_class_abi else None, target_type = TargetType("library"), - path_to_class_hashes = path_to_class_hashes_out, is_creating_subtarget = is_creating_subtarget, + incremental_state_dir = incremental_state_dir, + should_action_run_incrementally = should_kotlinc_run_incrementally, ) - final_jar = prepare_final_jar( + final_jar_output = prepare_final_jar( actions = actions, actions_identifier = actions_identifier, output = None, output_paths = output_paths, additional_compiled_srcs = None, jar_builder = java_toolchain.jar_builder, + jar_postprocessor = jar_postprocessor, + jar_postprocessor_runner = java_toolchain.postprocessor_runner[RunInfo], + zip_scrubber = java_toolchain.zip_scrubber, ) if not is_creating_subtarget: @@ -357,7 +400,7 @@ def create_jar_artifact_kotlincd( additional_compiled_srcs = None, is_building_android_binary = is_building_android_binary, class_abi_generator = java_toolchain.class_abi_generator, - final_jar = final_jar, + final_jar = final_jar_output.final_jar, compiling_deps_tset = compiling_deps_tset, source_only_abi_deps = source_only_abi_deps, class_abi_jar = class_abi_jar, @@ -365,18 +408,25 @@ def create_jar_artifact_kotlincd( encode_abi_command = encode_abi_command, define_action = define_kotlincd_action, ) + abi_jar_snapshot = generate_java_classpath_snapshot(actions, java_toolchain.cp_snapshot_generator, classpath_abi, actions_identifier) return make_compile_outputs( - full_library = final_jar, + full_library = final_jar_output.final_jar, + preprocessed_library = final_jar_output.preprocessed_jar, class_abi = class_abi, source_only_abi = source_only_abi, classpath_abi = classpath_abi, classpath_abi_dir = classpath_abi_dir, required_for_source_only_abi = required_for_source_only_abi, annotation_processor_output = output_paths.annotations, - ) + incremental_state_dir = incremental_state_dir, + abi_jar_snapshot = abi_jar_snapshot, + ), proto else: + full_jar_snapshot = generate_java_classpath_snapshot(actions, java_toolchain.cp_snapshot_generator, final_jar_output.final_jar, actions_identifier) return make_compile_outputs( - full_library = final_jar, + full_library = final_jar_output.final_jar, + preprocessed_library = final_jar_output.preprocessed_jar, required_for_source_only_abi = required_for_source_only_abi, annotation_processor_output = output_paths.annotations, - ) + abi_jar_snapshot = full_jar_snapshot, + ), proto diff --git a/prelude/kotlin/tools/BUCK b/prelude/kotlin/tools/BUCK deleted file mode 100644 index a8af5c42e0b2f..0000000000000 --- a/prelude/kotlin/tools/BUCK +++ /dev/null @@ -1,20 +0,0 @@ -prelude = native - -prelude.python_binary( - name = "compile_kotlin", - main = "compile_kotlin.py", - visibility = ["PUBLIC"], - deps = [ - ":compile_kotlin_lib", - ], -) - -prelude.python_library( - name = "compile_kotlin_lib", - srcs = [ - "compile_kotlin.py", - ], - deps = [ - "prelude//java/tools:utils_lib", - ], -) diff --git a/prelude/kotlin/tools/compile_kotlin/BUCK.v2 b/prelude/kotlin/tools/compile_kotlin/BUCK.v2 new file mode 100644 index 0000000000000..5ae64be14ef2c --- /dev/null +++ b/prelude/kotlin/tools/compile_kotlin/BUCK.v2 @@ -0,0 +1,26 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "compile_kotlin", + main = "compile_kotlin.py", + visibility = ["PUBLIC"], + deps = [ + ":compile_kotlin_lib", + ], +) + +prelude.python_bootstrap_library( + name = "compile_kotlin_lib", + srcs = [ + "compile_kotlin.py", + ], + deps = [ + "prelude//java/tools:utils_lib", + ], +) diff --git a/prelude/kotlin/tools/compile_kotlin.py b/prelude/kotlin/tools/compile_kotlin/compile_kotlin.py similarity index 95% rename from prelude/kotlin/tools/compile_kotlin.py rename to prelude/kotlin/tools/compile_kotlin/compile_kotlin.py index eec2559f06695..511c0530accc8 100644 --- a/prelude/kotlin/tools/compile_kotlin.py +++ b/prelude/kotlin/tools/compile_kotlin/compile_kotlin.py @@ -9,12 +9,12 @@ import argparse import os import pathlib -import shlex import shutil +import zipfile from tempfile import TemporaryDirectory from typing import List -from java.tools import utils +import utils _JAVA_OR_KOTLIN_FILE_EXTENSION = [".java", ".kt"] @@ -267,7 +267,7 @@ def _encode_kapt_ap_options( ) if kapt_annotation_processor_params: for param in kapt_annotation_processor_params.split(";"): - file.write(os.linesep) + file.write("\n") file.write(param) encoded_ap_options_file = os.path.join(temp_dir, "encoded_ap_options.txt") @@ -283,7 +283,7 @@ def _encode_javac_arguments( ) -> str: javac_arguments_file = os.path.join(temp_dir, "javac_arguments.txt") with open(javac_arguments_file, "w") as file: - file.write("-source={}{}-target={}".format(jvm_target, os.linesep, jvm_target)) + file.write("-source={}\n-target={}".format(jvm_target, jvm_target)) encoded_javac_arguments_file = os.path.join(temp_dir, "encoded_javac_arguments.txt") return _encode_options( @@ -330,7 +330,7 @@ def _get_kapt_cmd( "=".join([_AP_CLASSPATH_ARG, line.strip()]) for line in file.readlines() ] - kapt_base64_encoder_cmd = shlex.split(kapt_base64_encoder) + kapt_base64_encoder_cmd = utils.shlex_split(kapt_base64_encoder) kapt_plugin_options += [ "=".join([_SOURCES_ARG, str(kapt_sources_output)]), "=".join([_CLASSES_ARG, str(kapt_classes_output)]), @@ -403,6 +403,16 @@ def _get_ksp_cmd( ] +def _zip_recursive(archive_path: pathlib.Path, source_path: pathlib.Path): + # Same as 'zip -r archive_path source_path' + with zipfile.ZipFile( + archive_path, "w", compression=zipfile.ZIP_DEFLATED, compresslevel=6 + ) as z: + z.write(source_path) + for f in source_path.glob("**/*"): + z.write(f) + + def main(): args = _parse_args() @@ -551,10 +561,10 @@ def main(): if ksp_sources_output: if not ksp_sources_output.exists(): ksp_sources_output.mkdir() + _zip_recursive(ksp_zipped_sources_output, ksp_sources_output) utils.execute_command( - ["zip", "-r", ksp_zipped_sources_output, ksp_sources_output] + utils.shlex_split(zip_scrubber) + [ksp_zipped_sources_output] ) - utils.execute_command(shlex.split(zip_scrubber) + [ksp_zipped_sources_output]) if ksp_classes_and_resources_output: if not ksp_classes_and_resources_output.exists(): @@ -564,11 +574,9 @@ def main(): os.mkdir(kapt_sources_output) kapt_generated_sources_output.touch() else: + _zip_recursive(kapt_generated_sources_output, kapt_sources_output) utils.execute_command( - ["zip", "-r", kapt_generated_sources_output, kapt_sources_output] - ) - utils.execute_command( - shlex.split(zip_scrubber) + [kapt_generated_sources_output] + utils.shlex_split(zip_scrubber) + [kapt_generated_sources_output] ) if kapt_classes_output: diff --git a/prelude/kotlin/tools/defs.bzl b/prelude/kotlin/tools/defs.bzl new file mode 100644 index 0000000000000..3b13962484181 --- /dev/null +++ b/prelude/kotlin/tools/defs.bzl @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Wrappers for native rules that use a minimal "bootstrap" toolchain instead of the full toolchain. + +Use these to break cycles when a target used in a toolchain needs that toolchain to compile. Such targets +provide functionality that is not strictly required in order to build, and thus can be omitted from the +bootstrap toolchain. For example, an ABI generator for Java can be written in Java and compiled with a +bootstrap toolchain that does not include an ABI generator. While using that bootstrap toolchain, buck2 +will just use the full JARs as ABI jars, which will be slower and result in some unnecessary recompilation, +but still produce valid results. +""" + +load("@prelude//:prelude.bzl", "native") + +def java_bootstrap_binary(**kwargs): + kwargs = _set_bootstrap_java_toolchain(**kwargs) + native.java_binary(**kwargs) + +def java_bootstrap_library(**kwargs): + kwargs = _set_bootstrap_java_toolchain(**kwargs) + native.java_library(**kwargs) + +def kotlin_bootstrap_library(**kwargs): + kwargs = _set_bootstrap_java_toolchain(**kwargs) + kwargs = _set_bootstrap_kotlin_toolchain(**kwargs) + native.kotlin_library(**kwargs) + +def prebuilt_jar_bootstrap(**kwargs): + kwargs["_prebuilt_jar_toolchain"] = "toolchains//:prebuilt_jar_bootstrap" + native.prebuilt_jar(**kwargs) + +def _set_bootstrap_java_toolchain(**kwargs): + kwargs["_java_toolchain"] = "toolchains//:java_bootstrap" + return kwargs + +def _set_bootstrap_kotlin_toolchain(**kwargs): + kwargs["_kotlin_toolchain"] = "toolchains//:kotlin_bootstrap" + return kwargs diff --git a/prelude/kotlin/tools/kapt_base64_encoder/BUCK.v2 b/prelude/kotlin/tools/kapt_base64_encoder/BUCK.v2 new file mode 100644 index 0000000000000..e44d3e6263e4b --- /dev/null +++ b/prelude/kotlin/tools/kapt_base64_encoder/BUCK.v2 @@ -0,0 +1,24 @@ +load("@prelude//kotlin/tools:defs.bzl", "java_bootstrap_binary", "java_bootstrap_library") +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +java_bootstrap_library( + name = "kapt_base64_encoder_lib", + srcs = [ + "com/facebook/kapt/KaptBase64Encoder.java", + ], + source = "8", + target = "8", +) + +java_bootstrap_binary( + name = "kapt_base64_encoder", + main_class = "com.facebook.kapt.KaptBase64Encoder", + visibility = ["PUBLIC"], + deps = [ + ":kapt_base64_encoder_lib", + ], +) diff --git a/prelude/kotlin/tools/kapt_base64_encoder/com/facebook/kapt/KaptBase64Encoder.java b/prelude/kotlin/tools/kapt_base64_encoder/com/facebook/kapt/KaptBase64Encoder.java new file mode 100644 index 0000000000000..d6840bdeb2ead --- /dev/null +++ b/prelude/kotlin/tools/kapt_base64_encoder/com/facebook/kapt/KaptBase64Encoder.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +package com.facebook.kapt; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +/** + * Main entry point for encoding KAPT options in base64. Usage: + * + */ +public class KaptBase64Encoder { + + public static void main(String[] args) throws IOException { + String optionsFile = args[0]; + String outputPath = args[1]; + List params = Files.readAllLines(Paths.get(optionsFile)); + + try (ByteArrayOutputStream os = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(os)) { + + oos.writeInt(params.size()); + for (String param : params) { + String[] splitParam = param.split("="); + oos.writeUTF(splitParam[0]); + oos.writeUTF(splitParam[1]); + } + + oos.flush(); + Files.write( + Paths.get(outputPath), + Collections.singleton(Base64.getEncoder().encodeToString(os.toByteArray()))); + } catch (IOException e) { + throw new RuntimeException(e); + } + + System.exit(0); + } +} diff --git a/prelude/linking/execution_preference.bzl b/prelude/linking/execution_preference.bzl index 92d45adee6110..041ceb7dc0bff 100644 --- a/prelude/linking/execution_preference.bzl +++ b/prelude/linking/execution_preference.bzl @@ -36,13 +36,14 @@ _ActionExecutionAttributes = record( def link_execution_preference_attr(): # The attribute is optional, allowing for None to represent that no preference has been set and we should fallback on the toolchain. return attrs.option(attrs.one_of(attrs.enum(LinkExecutionPreferenceTypes), attrs.dep(providers = [LinkExecutionPreferenceDeterminatorInfo])), default = None, doc = """ - The execution preference for linking. Options are:\n - - any : No preference is set, and the link action will be performed based on buck2's executor configuration.\n - - full_hybrid : The link action will execute both locally and remotely, regardless of buck2's executor configuration (if\n - the executor is capable of hybrid execution). The use_limited_hybrid setting of the hybrid executor is ignored.\n - - local : The link action will execute locally if compatible on current host platform.\n - - local_only : The link action will execute locally, and error if the current platform is not compatible.\n - - remote : The link action will execute remotely if a compatible remote platform exists, otherwise locally.\n + The execution preference for linking. Options are: + + - any : No preference is set, and the link action will be performed based on buck2's executor configuration. + - full_hybrid : The link action will execute both locally and remotely, regardless of buck2's executor configuration (if + the executor is capable of hybrid execution). The use_limited_hybrid setting of the hybrid executor is ignored. + - local : The link action will execute locally if compatible on current host platform. + - local_only : The link action will execute locally, and error if the current platform is not compatible. + - remote : The link action will execute remotely if a compatible remote platform exists, otherwise locally. The default is None, expressing that no preference has been set on the target itself. """) diff --git a/prelude/linking/link_groups.bzl b/prelude/linking/link_groups.bzl index 19582078e0409..a3fb2e1c59c66 100644 --- a/prelude/linking/link_groups.bzl +++ b/prelude/linking/link_groups.bzl @@ -12,7 +12,10 @@ load( load( ":link_info.bzl", "LinkInfos", - "LinkedObject", +) +load( + ":shared_libraries.bzl", + "SharedLibraries", ) # Information about a linkable node which explicitly sets `link_group`. @@ -20,7 +23,7 @@ LinkGroupLib = record( # The label of the owning target (if any). label = field([Label, None], None), # The shared libs to package for this link group. - shared_libs = field(dict[str, LinkedObject]), + shared_libs = field(SharedLibraries), # The link info to link against this link group. shared_link_infos = field(LinkInfos), ) @@ -48,9 +51,10 @@ def gather_link_group_libs( def merge_link_group_lib_info( label: [Label, None] = None, name: [str, None] = None, - shared_libs: [dict[str, LinkedObject], None] = None, + shared_libs: [SharedLibraries, None] = None, shared_link_infos: [LinkInfos, None] = None, - deps: list[Dependency] = []) -> LinkGroupLibInfo: + deps: list[Dependency] = [], + children: list[LinkGroupLibInfo] = []) -> LinkGroupLibInfo: """ Merge and return link group info libs from deps and the current rule wrapped in a provider. @@ -66,5 +70,6 @@ def merge_link_group_lib_info( libs = gather_link_group_libs( libs = [libs], deps = deps, + children = children, ), ) diff --git a/prelude/linking/link_info.bzl b/prelude/linking/link_info.bzl index 2bd0b63d1824b..a0e9ab3ade33d 100644 --- a/prelude/linking/link_info.bzl +++ b/prelude/linking/link_info.bzl @@ -10,17 +10,26 @@ load( "ArtifactTSet", "make_artifact_tset", ) -load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "LinkerType", + "PicBehavior", +) load( "@prelude//cxx:linker.bzl", "get_link_whole_args", "get_no_as_needed_shared_libs_flags", "get_objects_as_library_args", ) +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//utils:arglike.bzl", "ArgLike") -load( - "@prelude//utils:utils.bzl", - "flatten", + +ExtraLinkerOutputs = record( + # The unbound extra outputs produced by a link action + # stored by key for lookup in the flag factory. + artifacts = field(dict[str, Artifact], {}), + # The output providers for the extra linker output. + providers = field(dict[str, list[DefaultInfo]], {}), ) # Represents an archive (.a file) @@ -74,23 +83,13 @@ def default_output_style_for_link_strategy(link_strategy: LinkStrategy) -> LibOu return LibOutputStyle("pic_archive") return LibOutputStyle("shared_lib") -# Ways a library can request to be linked (e.g. usually specific via a rule -# param like `preferred_linkage`. The actual link style used for a library is -# usually determined by a combination of this and the link style being exported -# via a provider. -Linkage = enum( - "static", - "shared", - "any", -) - # An archive. ArchiveLinkable = record( # Artifact in the .a format from ar archive = field(Archive), # If a bitcode bundle was created for this artifact it will be present here - bitcode_bundle = field([Artifact, None], None), - linker_type = field(str), + bitcode_bundle = field(Artifact | None, None), + linker_type = field(LinkerType), link_whole = field(bool, False), # Indicates if this archive may contain LTO bit code. Can be set to `False` # to e.g. tell dist LTO handling that a potentially expensive archive doesn't @@ -108,8 +107,8 @@ SharedLibLinkable = record( ObjectsLinkable = record( objects = field([list[Artifact], None], None), # Any of the objects that are in bitcode format - bitcode_bundle = field([Artifact, None], None), - linker_type = field(str), + bitcode_bundle = field(Artifact | None, None), + linker_type = field(LinkerType), link_whole = field(bool, False), ) @@ -143,7 +142,14 @@ SwiftRuntimeLinkable = record( runtime_required = field(bool, False), ) -LinkableTypes = [ArchiveLinkable, SharedLibLinkable, ObjectsLinkable, FrameworksLinkable, SwiftRuntimeLinkable, SwiftmoduleLinkable] +LinkableTypes = [ + ArchiveLinkable, + SharedLibLinkable, + ObjectsLinkable, + FrameworksLinkable, + SwiftRuntimeLinkable, + SwiftmoduleLinkable, +] LinkerFlags = record( flags = field(list[typing.Any], []), @@ -157,6 +163,7 @@ LinkInfo = record( # An informative name for this LinkInfo. This may be used in user messages # or when constructing intermediate output paths and does not need to be unique. name = field([str, None], None), + dist_thin_lto_codegen_flags = field(list[typing.Any], []), # Opaque cmd_arg-likes to be added pre/post this item on a linker command line. pre_flags = field(list[typing.Any], []), post_flags = field(list[typing.Any], []), @@ -177,6 +184,10 @@ LinkOrdering = enum( "topological", ) +CxxSanitizerRuntimeInfo = provider(fields = { + "runtime_files": provider_field(list[Artifact]), +}) + def set_link_info_link_whole(info: LinkInfo) -> LinkInfo: linkables = [set_linkable_link_whole(linkable) for linkable in info.linkables] return LinkInfo( @@ -219,38 +230,55 @@ def wrap_link_info( external_debug_info = inner.external_debug_info, ) +# Returns true if the command line argument representation of this linkable, +# could be passed within a filelist. +def _is_linkable_included_in_filelist(linkable: LinkableTypes) -> bool: + if isinstance(linkable, ArchiveLinkable): + # Link whole archives don't appear in the filelist, but are passed directly to the linker + # with a -force-load (MachO) or -whole-archive (ELF) flag. Regular archives do appear in the filelist. + return not linkable.link_whole + elif isinstance(linkable, SharedLibLinkable) or \ + isinstance(linkable, FrameworksLinkable) or \ + isinstance(linkable, SwiftRuntimeLinkable) or \ + isinstance(linkable, SwiftmoduleLinkable): + # These are all passed directly via various command line flags, not via a filelist. + return False + elif isinstance(linkable, ObjectsLinkable): + # Object files always appear in the filelist. + return True + else: + fail("Encountered unhandled filelist-like linkable {}".format(str(linkable))) + # Adds appropriate args representing `linkable` to `args` def append_linkable_args(args: cmd_args, linkable: LinkableTypes): if isinstance(linkable, ArchiveLinkable): if linkable.link_whole: args.add(get_link_whole_args(linkable.linker_type, [linkable.archive.artifact])) - elif linkable.linker_type == "darwin": - pass else: args.add(linkable.archive.artifact) # When using thin archives, object files are implicitly used as inputs # to the link, so make sure track them as inputs so that they're # materialized/tracked properly. - args.add(cmd_args().hidden(linkable.archive.external_objects)) + args.add(cmd_args(hidden = linkable.archive.external_objects)) elif isinstance(linkable, SharedLibLinkable): if linkable.link_without_soname: - args.add(cmd_args(linkable.lib, format = "-L{}").parent()) + args.add(cmd_args(linkable.lib, format = "-L{}", parent = 1)) args.add("-l" + linkable.lib.basename.removeprefix("lib").removesuffix(linkable.lib.extension)) else: args.add(linkable.lib) elif isinstance(linkable, ObjectsLinkable): - # We depend on just the filelist for darwin linker and don't add the normal args - if linkable.linker_type != "darwin": - # We need to export every symbol when link groups are used, but enabling - # --whole-archive with --start-lib is undefined behavior in gnu linkers: - # https://reviews.llvm.org/D120443. We need to export symbols from every - # linkable in the link_info - if not linkable.link_whole: - args.add(get_objects_as_library_args(linkable.linker_type, linkable.objects)) - else: - args.add(linkable.objects) - elif isinstance(linkable, FrameworksLinkable) or isinstance(linkable, SwiftRuntimeLinkable) or isinstance(linkable, SwiftmoduleLinkable): + # We need to export every symbol when link groups are used, but enabling + # --whole-archive with --start-lib is undefined behavior in gnu linkers: + # https://reviews.llvm.org/D120443. We need to export symbols from every + # linkable in the link_info + if not linkable.link_whole: + args.add(get_objects_as_library_args(linkable.linker_type, linkable.objects)) + else: + args.add(linkable.objects) + elif isinstance(linkable, FrameworksLinkable) or \ + isinstance(linkable, SwiftRuntimeLinkable) or \ + isinstance(linkable, SwiftmoduleLinkable): # These flags are handled separately so they can be deduped. # # We've seen in apps with larger dependency graphs that failing @@ -259,42 +287,33 @@ def append_linkable_args(args: cmd_args, linkable: LinkableTypes): else: fail("Encountered unhandled linkable {}".format(str(linkable))) -def link_info_to_args(value: LinkInfo) -> cmd_args: - args = cmd_args(value.pre_flags) - for linkable in value.linkables: - append_linkable_args(args, linkable) - if False: - # TODO(nga): `post_flags` is never `None`. - def unknown(): - pass - - value = unknown() - if value.post_flags != None: - args.add(value.post_flags) - return args - -# List of inputs to pass to the darwin linker via the `-filelist` param. -# TODO(agallagher): It might be nicer to leave these inlined in the args -# above and extract them at link time via reflection. This way we'd hide -# platform-specific details from this level. -# NOTE(agallagher): Using filelist out-of-band means objects/archives get -# linked out of order of their corresponding flags. -def link_info_filelist(value: LinkInfo) -> list[Artifact]: - filelists = [] +LinkInfoArgumentFilter = enum( + "all", + "filelist_only", + "excluding_filelist", +) + +def link_info_to_args(value: LinkInfo, argument_type_filter: LinkInfoArgumentFilter = LinkInfoArgumentFilter("all")) -> cmd_args: + pre_flags = cmd_args() + post_flags = cmd_args() + if argument_type_filter == LinkInfoArgumentFilter("all") or argument_type_filter == LinkInfoArgumentFilter("excluding_filelist"): + pre_flags.add(value.pre_flags) + post_flags.add(value.post_flags) + + flags = cmd_args() for linkable in value.linkables: - if isinstance(linkable, ArchiveLinkable): - if linkable.linker_type == "darwin" and not linkable.link_whole: - filelists.append(linkable.archive.artifact) - elif isinstance(linkable, SharedLibLinkable): - pass - elif isinstance(linkable, ObjectsLinkable): - if linkable.linker_type == "darwin": - filelists += linkable.objects - elif isinstance(linkable, FrameworksLinkable) or isinstance(linkable, SwiftRuntimeLinkable) or isinstance(linkable, SwiftmoduleLinkable): - pass - else: - fail("Encountered unhandled linkable {}".format(str(linkable))) - return filelists + if argument_type_filter == LinkInfoArgumentFilter("all"): + append_linkable_args(flags, linkable) + elif argument_type_filter == LinkInfoArgumentFilter("filelist_only") and _is_linkable_included_in_filelist(linkable): + append_linkable_args(flags, linkable) + elif argument_type_filter == LinkInfoArgumentFilter("excluding_filelist") and not _is_linkable_included_in_filelist(linkable): + append_linkable_args(flags, linkable) + + result = cmd_args() + result.add(pre_flags) + result.add(flags) + result.add(post_flags) + return result # Encapsulate all `LinkInfo`s provided by a given rule's link style. # @@ -304,45 +323,47 @@ def link_info_filelist(value: LinkInfo) -> list[Artifact]: LinkInfos = record( # Link info to use by default. default = field(LinkInfo), + # Link info for objects compiler with extra optimizations (EXPERIMENTAL) + optimized = field([LinkInfo, None], None), # Link info stripped of debug symbols. stripped = field([LinkInfo, None], None), ) def _link_info_default_args(infos: LinkInfos): info = infos.default - return link_info_to_args(info) + return link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("all")) -def _link_info_default_shared_link_args(infos: LinkInfos): - info = infos.default - return link_info_to_args(info) - -def _link_info_stripped_args(infos: LinkInfos): - info = infos.stripped or infos.default - return link_info_to_args(info) - -def _link_info_stripped_shared_link_args(infos: LinkInfos): +def _link_info_stripped_link_args(infos: LinkInfos): info = infos.stripped or infos.default - return link_info_to_args(info) + return link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("all")) def _link_info_default_filelist(infos: LinkInfos): info = infos.default - return link_info_filelist(info) + return link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("filelist_only")) def _link_info_stripped_filelist(infos: LinkInfos): info = infos.stripped or infos.default - return link_info_filelist(info) + return link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("filelist_only")) + +def _link_info_default_excluding_filelist_args(infos: LinkInfos): + info = infos.default + return link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("excluding_filelist")) + +def _link_info_stripped_excluding_filelist_args(infos: LinkInfos): + info = infos.stripped or infos.default + return link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("excluding_filelist")) def _link_info_has_default_filelist(children: list[bool], infos: [LinkInfos, None]) -> bool: if infos: info = infos.default - if link_info_filelist(info): + if len(link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("filelist_only")).inputs): return True return any(children) def _link_info_has_stripped_filelist(children: list[bool], infos: [LinkInfos, None]) -> bool: if infos: info = infos.stripped or infos.default - if link_info_filelist(info): + if len(link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("filelist_only")).inputs): return True return any(children) @@ -350,11 +371,11 @@ def _link_info_has_stripped_filelist(children: list[bool], infos: [LinkInfos, No LinkInfosTSet = transitive_set( args_projections = { "default": _link_info_default_args, + "default_excluding_filelist": _link_info_default_excluding_filelist_args, "default_filelist": _link_info_default_filelist, - "default_shared": _link_info_default_shared_link_args, - "stripped": _link_info_stripped_args, + "stripped": _link_info_stripped_link_args, + "stripped_excluding_filelist": _link_info_stripped_excluding_filelist_args, "stripped_filelist": _link_info_stripped_filelist, - "stripped_shared": _link_info_stripped_shared_link_args, }, reductions = { "has_default_filelist": _link_info_has_default_filelist, @@ -386,44 +407,49 @@ LinkArgs = record( LinkedObject = record( output = field([Artifact, Promise]), # The combined bitcode from this linked object and any static libraries - bitcode_bundle = field([Artifact, None], None), + bitcode_bundle = field(Artifact | None, None), # the generated linked output before running stripping(and bolt). unstripped_output = field(Artifact), # the generated linked output before running bolt, may be None if bolt is not used. - prebolt_output = field([Artifact, None], None), + prebolt_output = field(Artifact | None, None), # The LinkArgs used to produce this LinkedObject. This can be useful for debugging or # for downstream rules to reproduce the shared library with some modifications (for example # android relinker will link again with an added version script argument). - link_args = field([LinkArgs, None], None), + link_args = field(list[LinkArgs] | None, None), # A linked object (binary/shared library) may have an associated dwp file with # its corresponding DWARF debug info. # May be None when Split DWARF is disabled or for some types of synthetic link objects. - dwp = field([Artifact, None], None), + dwp = field(Artifact | None, None), # Additional dirs or paths that contain debug info referenced by the linked # object (e.g. split dwarf files or PDB file). external_debug_info = field(ArtifactTSet, ArtifactTSet()), # This argsfile is generated in the `cxx_link` step and contains a list of arguments # passed to the linker. It is being exposed as a sub-target for debugging purposes. - linker_argsfile = field([Artifact, None], None), + linker_argsfile = field(Artifact | None, None), # The filelist is generated in the `cxx_link` step and contains a list of # object files (static libs or plain object files) passed to the linker. # It is being exposed for debugging purposes. Only present when a Darwin # linker is used. - linker_filelist = field([Artifact, None], None), + linker_filelist = field(Artifact | None, None), # The linker command as generated by `cxx_link`. Exposed for debugging purposes only. # Not present for DistLTO scenarios. linker_command = field([cmd_args, None], None), # This sub-target is only available for distributed thinLTO builds. - index_argsfile = field([Artifact, None], None), + index_argsfile = field(Artifact | None, None), + # This sub-target is only available for distributed thinLTO builds. + dist_thin_lto_codegen_argsfile = field([Artifact, None], None), + # This sub-target is only available for distributed thinLTO builds. This is similar to + # index_argsfile, but only includes flags that can be determined at analysis time, no input files. + dist_thin_lto_index_argsfile = field([Artifact, None], None), # Import library for linking with DLL on Windows. # If not on Windows it's always None. - import_library = field([Artifact, None], None), + import_library = field(Artifact | None, None), # A linked object (binary/shared library) may have an associated PDB file with # its corresponding Windows debug info. # If not on Windows it's always None. - pdb = field([Artifact, None], None), + pdb = field(Artifact | None, None), # Split-debug info generated by the link. - split_debug_output = field([Artifact, None], None), + split_debug_output = field(Artifact | None, None), ) # A map of native linkable infos from transitive dependencies for each LinkStrategy. @@ -524,16 +550,27 @@ def create_merged_link_info( swift_runtime_linkables += [dep_info.swift_runtime[link_strategy] for dep_info in exported_deps] for dep_info in deps: - children.append(dep_info._infos[link_strategy]) - external_debug_info_children.append(dep_info._external_debug_info[link_strategy]) + # The inherited link infos no longer guarantees that a tset will be available for + # all link strategies. Protect against missing infos + value = dep_info._infos.get(link_strategy) + if value: + children.append(value) + value = dep_info._external_debug_info.get(link_strategy) + if value: + external_debug_info_children.append(value) + framework_linkables.append(dep_info.frameworks[link_strategy]) swiftmodule_linkables.append(dep_info.swiftmodules[link_strategy]) swift_runtime_linkables.append(dep_info.swift_runtime[link_strategy]) # We always export link info for exported deps. for dep_info in exported_deps: - children.append(dep_info._infos[link_strategy]) - external_debug_info_children.append(dep_info._external_debug_info[link_strategy]) + value = dep_info._infos.get(link_strategy) + if value: + children.append(value) + value = dep_info._external_debug_info.get(link_strategy) + if value: + external_debug_info_children.append(value) frameworks[link_strategy] = merge_framework_linkables(framework_linkables) swift_runtime[link_strategy] = merge_swift_runtime_linkables(swift_runtime_linkables) @@ -602,7 +639,8 @@ def create_merged_link_info_for_propagation( def get_link_info( infos: LinkInfos, - prefer_stripped: bool = False) -> LinkInfo: + prefer_stripped: bool = False, + prefer_optimized: bool = False) -> LinkInfo: """ Helper for getting a `LinkInfo` out of a `LinkInfos`. """ @@ -610,22 +648,19 @@ def get_link_info( # When requested, prefer using pre-stripped link info. if prefer_stripped and infos.stripped != None: return infos.stripped + if prefer_optimized and infos.optimized: + return infos.optimized return infos.default -def unpack_link_args(args: LinkArgs, is_shared: [bool, None] = None, link_ordering: [LinkOrdering, None] = None) -> ArgLike: +def unpack_link_args(args: LinkArgs, link_ordering: [LinkOrdering, None] = None) -> ArgLike: if args.tset != None: ordering = link_ordering.value if link_ordering else "preorder" tset = args.tset.infos - if is_shared: - if args.tset.prefer_stripped: - return tset.project_as_args("stripped_shared", ordering = ordering) - return tset.project_as_args("default_shared", ordering = ordering) - else: - if args.tset.prefer_stripped: - return tset.project_as_args("stripped", ordering = ordering) - return tset.project_as_args("default", ordering = ordering) + if args.tset.prefer_stripped: + return tset.project_as_args("stripped", ordering = ordering) + return tset.project_as_args("default", ordering = ordering) if args.infos != None: return cmd_args([link_info_to_args(info) for info in args.infos]) @@ -644,20 +679,37 @@ def unpack_link_args_filelist(args: LinkArgs) -> [ArgLike, None]: return tset.project_as_args("stripped_filelist" if stripped else "default_filelist") if args.infos != None: - filelist = flatten([link_info_filelist(info) for info in args.infos]) - if not filelist: + result_args = cmd_args() + for info in args.infos: + result_args.add(link_info_to_args(info, argument_type_filter = LinkInfoArgumentFilter("filelist_only"))) + + if not len(result_args.inputs): return None - # Actually create cmd_args so the API is consistent between the 2 branches. - args = cmd_args() - args.add(filelist) - return args + return result_args if args.flags != None: return None fail("Unpacked invalid empty link args") +def unpack_link_args_excluding_filelist(args: LinkArgs, link_ordering: [LinkOrdering, None] = None) -> ArgLike: + if args.tset != None: + ordering = link_ordering.value if link_ordering else "preorder" + + tset = args.tset.infos + if args.tset.prefer_stripped: + return tset.project_as_args("stripped_excluding_filelist", ordering = ordering) + return tset.project_as_args("default_excluding_filelist", ordering = ordering) + + if args.infos != None: + return cmd_args([link_info_to_args(info, LinkInfoArgumentFilter("excluding_filelist")) for info in args.infos]) + + if args.flags != None: + return args.flags + + fail("Unpacked invalid empty link args") + def unpack_external_debug_info(actions: AnalysisActions, args: LinkArgs) -> ArtifactTSet: if args.tset != None: if args.tset.prefer_stripped: @@ -679,7 +731,7 @@ def map_to_link_infos(links: list[LinkArgs]) -> list[LinkInfo]: res = [] def append(v): - if v.pre_flags or v.post_flags or v.linkables: + if v.pre_flags or v.post_flags or v.dist_thin_lto_codegen_flags or v.linkables: res.append(v) for link in links: @@ -695,7 +747,7 @@ def map_to_link_infos(links: list[LinkArgs]) -> list[LinkInfo]: append(link) continue if link.flags != None: - append(LinkInfo(pre_flags = link.flags)) + append(LinkInfo(pre_flags = [link.flags])) continue fail("Unpacked invalid empty link args") return res @@ -865,13 +917,13 @@ def merge_swiftmodule_linkables(ctx: AnalysisContext, linkables: list[[Swiftmodu ], )) -def wrap_with_no_as_needed_shared_libs_flags(linker_type: str, link_info: LinkInfo) -> LinkInfo: +def wrap_with_no_as_needed_shared_libs_flags(linker_type: LinkerType, link_info: LinkInfo) -> LinkInfo: """ Wrap link info in args used to prevent linkers from dropping unused shared library dependencies from the e.g. DT_NEEDED tags of the link. """ - if linker_type == "gnu": + if linker_type == LinkerType("gnu"): return wrap_link_info( inner = link_info, pre_flags = ( @@ -881,7 +933,7 @@ def wrap_with_no_as_needed_shared_libs_flags(linker_type: str, link_info: LinkIn post_flags = ["-Wl,--pop-state"], ) - if linker_type == "darwin": + if linker_type == LinkerType("darwin"): return link_info fail("Linker type {} not supported".format(linker_type)) @@ -893,7 +945,9 @@ LinkCommandDebugOutput = record( filename = str, command = ArgLike, argsfile = Artifact, - filelist = [Artifact, None], + filelist = Artifact | None, + dist_thin_lto_codegen_argsfile = Artifact | None, + dist_thin_lto_index_argsfile = Artifact | None, ) # NB: Debug output is _not_ transitive over deps, so tsets are not used here. @@ -908,31 +962,52 @@ UnstrippedLinkOutputInfo = provider(fields = { }) def make_link_command_debug_output(linked_object: LinkedObject) -> [LinkCommandDebugOutput, None]: - if not linked_object.output or not linked_object.linker_command or not linked_object.linker_argsfile: + local_link_debug_info_present = linked_object.output and linked_object.linker_command and linked_object.linker_argsfile + distributed_link_debug_info_present = linked_object.dist_thin_lto_index_argsfile and linked_object.dist_thin_lto_codegen_argsfile + if not local_link_debug_info_present and not distributed_link_debug_info_present: return None return LinkCommandDebugOutput( filename = linked_object.output.short_path, command = linked_object.linker_command, argsfile = linked_object.linker_argsfile, filelist = linked_object.linker_filelist, + dist_thin_lto_index_argsfile = linked_object.dist_thin_lto_index_argsfile, + dist_thin_lto_codegen_argsfile = linked_object.dist_thin_lto_codegen_argsfile, ) # Given a list of `LinkCommandDebugOutput`, it will produce a JSON info file. # The JSON info file will contain entries for each link command. In addition, # it will _not_ materialize any inputs to the link command except: +# +# For local thin-LTO: # - linker argfile # - linker filelist (if present - only applicable to Darwin linkers) +# +# For distributed thin-LTO: +# - thin-link argsfile (without inputs just flags) +# - codegen argsfile (without inputs just flags) def make_link_command_debug_output_json_info(ctx: AnalysisContext, debug_outputs: list[LinkCommandDebugOutput]) -> Artifact: json_info = [] associated_artifacts = [] for debug_output in debug_outputs: - json_info.append({ - "command": debug_output.command, - "filename": debug_output.filename, - }) - - # Ensure all argsfile and filelists get materialized, as those are needed for debugging - associated_artifacts.extend(filter(None, [debug_output.argsfile, debug_output.filelist])) + is_distributed_link = debug_output.dist_thin_lto_index_argsfile and debug_output.dist_thin_lto_codegen_argsfile + if is_distributed_link: + json_info.append({ + "dist_thin_lto_codegen_argsfile": debug_output.dist_thin_lto_codegen_argsfile, + "dist_thin_lto_index_argsfile": debug_output.dist_thin_lto_index_argsfile, + "filename": debug_output.filename, + }) + + associated_artifacts.extend([debug_output.dist_thin_lto_codegen_argsfile, debug_output.dist_thin_lto_index_argsfile]) + else: + json_info.append({ + "argsfile": debug_output.argsfile, + "command": debug_output.command, + "filename": debug_output.filename, + }) + + # Ensure all argsfile and filelists get materialized, as those are needed for debugging + associated_artifacts.extend(filter(None, [debug_output.argsfile, debug_output.filelist])) # Explicitly drop all inputs by using `with_inputs = False`, we don't want # to materialize all inputs to the link actions (which includes all object files diff --git a/prelude/linking/linkable_graph.bzl b/prelude/linking/linkable_graph.bzl index b74fc2d1f3597..528d3ae022e46 100644 --- a/prelude/linking/linkable_graph.bzl +++ b/prelude/linking/linkable_graph.bzl @@ -7,26 +7,37 @@ load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") load("@prelude//cxx:headers.bzl", "CPrecompiledHeaderInfo") +load("@prelude//cxx:platform.bzl", "cxx_by_platform") + +# TODO(mattpayne): Add this back once the type is supported by dependency mgmt +# load("@prelude//cxx:shared_library_interface.bzl", "SharedInterfaceInfo") +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//python:python.bzl", "PythonLibraryInfo") +load("@prelude//utils:expect.bzl", "expect") load( "@prelude//utils:graph_utils.bzl", - "breadth_first_traversal_by", + "depth_first_traversal_by", +) +load( + "@prelude//utils:utils.bzl", + "flatten", ) -load("@prelude//utils:utils.bzl", "expect") load( ":link_info.bzl", "LibOutputStyle", "LinkInfo", # @unused Used as a type "LinkInfos", "LinkStrategy", - "Linkage", - "LinkedObject", "LinkerFlags", "MergedLinkInfo", "get_lib_output_style", "get_output_styles_for_linkage", _get_link_info = "get_link_info", ) +load( + ":shared_libraries.bzl", + "SharedLibraries", +) # A provider with information used to link a rule into a shared library. # Potential omnibus roots must provide this so that omnibus can link them @@ -34,6 +45,7 @@ load( LinkableRootInfo = provider( # @unsorted-dict-items fields = { + "label": provider_field(Label), "link_infos": provider_field(typing.Any, default = None), # LinkInfos "name": provider_field(typing.Any, default = None), # [str, None] "deps": provider_field(typing.Any, default = None), # ["label"] @@ -47,12 +59,14 @@ LinkableRootInfo = provider( ############################################################################### _DisallowConstruction = record() +_TargetSourceType = Artifact | str | tuple LinkableNode = record( # Attribute labels on the target. labels = field(list[str], []), # Preferred linkage for this target. preferred_linkage = field(Linkage, Linkage("any")), + default_link_strategy = field(LinkStrategy), # Linkable deps of this target. deps = field(list[Label], []), # Exported linkable deps of this target. @@ -62,6 +76,10 @@ LinkableNode = record( # deps and their (transitive) exported deps. This helps keep link lines smaller # and produces more efficient libs (for example, DT_NEEDED stays a manageable size). exported_deps = field(list[Label], []), + + # List of both deps and exported deps. We traverse linkable graph lots of times + # and preallocating this list saves RAM during analysis + all_deps = field(list[Label], []), # Link infos for all supported lib output styles supported by this node. This should have a value # for every output_style supported by the preferred linkage. link_infos = field(dict[LibOutputStyle, LinkInfos], {}), @@ -73,7 +91,7 @@ LinkableNode = record( # Shared libraries provided by this target. Used if this target is # excluded. - shared_libs = field(dict[str, LinkedObject], {}), + shared_libs = field(SharedLibraries, SharedLibraries(libraries = [])), # The soname this node would use in default link strategies. May be used by non-default # link strategies as a lib's soname. @@ -83,9 +101,20 @@ LinkableNode = record( # as an asset in android apks. can_be_asset = field(bool), + # Collected target sources from the target. + srcs = field(list[_TargetSourceType]), + # Whether the node should appear in the android mergemap (which provides information about the original # soname->final merged lib mapping) include_in_android_mergemap = field(bool), + # Don't follow dependents on this node even if has preferred linkage static + ignore_force_static_follows_dependents = field(bool), + + # Shared interface provider for this node. + # TODO(mattpayne): This type is incompatible with Autodeps. + # Once the pyautotargets service is rolled out, we can change it back. + # It should be SharedInterfaceInfo | None + shared_interface_info = field(typing.Any), # Only allow constructing within this file. _private = _DisallowConstruction, @@ -132,17 +161,31 @@ def _get_required_outputs_for_linkage(linkage: Linkage) -> list[LibOutputStyle]: return get_output_styles_for_linkage(linkage) +def _get_target_sources(ctx: AnalysisContext) -> list[_TargetSourceType]: + srcs = [] + if hasattr(ctx.attrs, "srcs"): + srcs.extend(ctx.attrs.srcs) + if hasattr(ctx.attrs, "platform_srcs"): + srcs.extend(flatten(cxx_by_platform(ctx, ctx.attrs.platform_srcs))) + return srcs + def create_linkable_node( ctx: AnalysisContext, default_soname: str | None, preferred_linkage: Linkage = Linkage("any"), - deps: list[Dependency] = [], - exported_deps: list[Dependency] = [], + default_link_strategy: LinkStrategy = LinkStrategy("shared"), + deps: list[Dependency | LinkableGraph] = [], + exported_deps: list[Dependency | LinkableGraph] = [], link_infos: dict[LibOutputStyle, LinkInfos] = {}, - shared_libs: dict[str, LinkedObject] = {}, + shared_libs: SharedLibraries = SharedLibraries(libraries = []), can_be_asset: bool = True, include_in_android_mergemap: bool = True, - linker_flags: [LinkerFlags, None] = None) -> LinkableNode: + linker_flags: [LinkerFlags, None] = None, + ignore_force_static_follows_dependents: bool = False, + # TODO(mattpayne): This type is incompatible with Autodeps. + # Once the pyautotargets service is rolled out, we can change it back. + # It should be SharedInterfaceInfo | None + shared_interface_info: typing.Any = None) -> LinkableNode: for output_style in _get_required_outputs_for_linkage(preferred_linkage): expect( output_style in link_infos, @@ -150,17 +193,24 @@ def create_linkable_node( ) if not linker_flags: linker_flags = LinkerFlags() + deps = linkable_deps(deps) + exported_deps = linkable_deps(exported_deps) return LinkableNode( labels = ctx.attrs.labels, preferred_linkage = preferred_linkage, - deps = linkable_deps(deps), - exported_deps = linkable_deps(exported_deps), + default_link_strategy = default_link_strategy, + deps = deps, + exported_deps = exported_deps, + all_deps = deps + exported_deps, link_infos = link_infos, shared_libs = shared_libs, can_be_asset = can_be_asset, + srcs = _get_target_sources(ctx), include_in_android_mergemap = include_in_android_mergemap, default_soname = default_soname, linker_flags = linker_flags, + ignore_force_static_follows_dependents = ignore_force_static_follows_dependents, + shared_interface_info = shared_interface_info, _private = _DisallowConstruction(), ) @@ -168,9 +218,12 @@ def create_linkable_graph_node( ctx: AnalysisContext, linkable_node: [LinkableNode, None] = None, roots: dict[Label, LinkableRootInfo] = {}, - excluded: dict[Label, None] = {}) -> LinkableGraphNode: + excluded: dict[Label, None] = {}, + label: Label | None = None) -> LinkableGraphNode: + if not label: + label = ctx.label return LinkableGraphNode( - label = ctx.label, + label = label, linkable = linkable_node, roots = roots, excluded = excluded, @@ -184,7 +237,7 @@ def create_linkable_graph( deps: list[[LinkableGraph, Dependency]] = []) -> LinkableGraph: graph_deps = [] for d in deps: - if eval_type(LinkableGraph.type).matches(d): + if isinstance(d, LinkableGraph): graph_deps.append(d) else: graph = d.get(LinkableGraph) @@ -193,7 +246,7 @@ def create_linkable_graph( deps_labels = {x.label: True for x in graph_deps} if node and node.linkable: - for l in [node.linkable.deps, node.linkable.exported_deps]: + for l in [node.linkable.deps, node.linkable.exported_deps]: # buildifier: disable=confusing-name for d in l: if not d in deps_labels: fail("LinkableNode had {} in its deps, but that label is missing from the node's linkable graph children (`{}`)".format(d, ", ".join(deps_labels))) @@ -205,8 +258,11 @@ def create_linkable_graph( } if node: kwargs["value"] = node + label = node.label + else: + label = ctx.label return LinkableGraph( - label = ctx.label, + label = label, nodes = ctx.actions.tset(LinkableGraphTSet, **kwargs), ) @@ -221,13 +277,16 @@ def get_linkable_graph_node_map_func(graph: LinkableGraph): return get_linkable_graph_node_map -def linkable_deps(deps: list[Dependency]) -> list[Label]: +def linkable_deps(deps: list[Dependency | LinkableGraph]) -> list[Label]: labels = [] for dep in deps: - dep_info = linkable_graph(dep) - if dep_info != None: - labels.append(dep_info.label) + if isinstance(dep, LinkableGraph): + labels.append(dep.label) + else: + dep_info = linkable_graph(dep) + if dep_info != None: + labels.append(dep_info.label) return labels @@ -256,10 +315,12 @@ def linkable_graph(dep: Dependency) -> [LinkableGraph, None]: def get_link_info( node: LinkableNode, output_style: LibOutputStyle, - prefer_stripped: bool = False) -> LinkInfo: + prefer_stripped: bool = False, + prefer_optimized: bool = False) -> LinkInfo: info = _get_link_info( node.link_infos[output_style], prefer_stripped = prefer_stripped, + prefer_optimized = prefer_optimized, ) return info @@ -291,8 +352,8 @@ def get_transitive_deps( """ def find_transitive_deps(node: Label): - return link_infos[node].deps + link_infos[node].exported_deps + return link_infos[node].all_deps - all_deps = breadth_first_traversal_by(link_infos, roots, find_transitive_deps) + all_deps = depth_first_traversal_by(link_infos, roots, find_transitive_deps) return all_deps diff --git a/prelude/linking/linkables.bzl b/prelude/linking/linkables.bzl index 9b7034dd345f1..e84beae4c66c0 100644 --- a/prelude/linking/linkables.bzl +++ b/prelude/linking/linkables.bzl @@ -5,10 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load( - "@prelude//utils:utils.bzl", - "expect", -) +load("@prelude//utils:expect.bzl", "expect") load( ":link_groups.bzl", "LinkGroupLibInfo", diff --git a/prelude/linking/lto.bzl b/prelude/linking/lto.bzl index f275d00590179..fab91ec6d7055 100644 --- a/prelude/linking/lto.bzl +++ b/prelude/linking/lto.bzl @@ -5,7 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", + "LinkerType", +) load("@prelude//cxx:debug.bzl", "SplitDebugMode") # Styles of LTO. @@ -50,7 +54,7 @@ def get_split_debug_lto_info(actions: AnalysisActions, cxx_toolchain: CxxToolcha # TODO: It might be nice to generalize a but more and move the darwin v. gnu # differences into toolchain settings (e.g. `split_debug_lto_flags_fmt`). - if linker_info.type == "darwin": + if linker_info.type == LinkerType("darwin"): # https://releases.llvm.org/14.0.0/tools/clang/docs/CommandGuide/clang.html#cmdoption-flto # We need to pass -object_path_lto to keep the temporary LTO object files around to use # for dSYM generation. @@ -74,7 +78,7 @@ def get_split_debug_lto_info(actions: AnalysisActions, cxx_toolchain: CxxToolcha linker_flags = linker_args, ) - if linker_info.type == "gnu": + if linker_info.type == LinkerType("gnu"): dwo_dir = actions.declare_output(name + ".dwo.d", dir = True) linker_flags = cmd_args([ diff --git a/prelude/linking/shared_libraries.bzl b/prelude/linking/shared_libraries.bzl index 6e687594a7f47..e4303f8667b8e 100644 --- a/prelude/linking/shared_libraries.bzl +++ b/prelude/linking/shared_libraries.bzl @@ -12,6 +12,20 @@ load( "LinkedObject", # @unused Used as a type ) load("@prelude//linking:strip.bzl", "strip_object") +load("@prelude//utils:expect.bzl", "expect") + +Soname = record( + # Return the SONAME if it's a string, otherwise None. + as_str = field(typing.Callable), + # Return the SONAME as a string, throwing an error if it is actually an + # artifact. + ensure_str = field(typing.Callable), + # Return `True` if the SONAME is respresented as a string. + is_str = field(bool), + # The the actual SONAME can be rerepsented by a static string, or the + # contents of a file genrated at build time. + _soname = field(str | Artifact), +) SharedLibrary = record( lib = field(LinkedObject), @@ -19,23 +33,48 @@ SharedLibrary = record( # for downstream rules to reproduce the shared library with some modifications (for example # android relinker will link again with an added version script argument). # TODO(cjhopman): This is currently always available. - link_args = field(list[LinkArgs] | None), + link_args = field(list[LinkArgs] | None, None), # The sonames of the shared libraries that this links against. # TODO(cjhopman): This is currently always available. - shlib_deps = field(list[str] | None), - stripped_lib = field([Artifact, None]), - can_be_asset = field(bool), - for_primary_apk = field(bool), - soname = field(str), + shlib_deps = field(list[str] | None, None), + stripped_lib = field(Artifact | None, None), + can_be_asset = field(bool, False), + for_primary_apk = field(bool, False), + soname = field(Soname), label = field(Label), ) +def _ensure_str(soname: str | Artifact) -> str: + expect(type(soname) == type(""), "SONAME is not a `str`: {}", soname) + return soname + +def to_soname(soname: str | Artifact | Soname) -> Soname: + if isinstance(soname, Soname): + return soname + soname_is_str = isinstance(soname, str) + return Soname( + as_str = lambda: soname if soname_is_str else None, + ensure_str = lambda: _ensure_str(soname), + is_str = soname_is_str, + _soname = soname, + ) + +def create_shlib( + # The soname can either be a string or an artifact with the soname in + # text form. + soname: str | Artifact | Soname, + **kwargs) -> SharedLibrary: + return SharedLibrary( + soname = to_soname(soname), + **kwargs + ) + SharedLibraries = record( # A mapping of shared library SONAME (e.g. `libfoo.so.2`) to the artifact. # Since the SONAME is what the dynamic loader uses to uniquely identify # libraries, using this as the key allows easily detecting conflicts from # dependencies. - libraries = field(dict[str, SharedLibrary]), + libraries = field(list[SharedLibrary]), ) # T-set of SharedLibraries @@ -44,7 +83,7 @@ SharedLibrariesTSet = transitive_set() # Shared libraries required by top-level packaging rules (e.g. shared libs # for Python binary, symlink trees of shared libs for C++ binaries) SharedLibraryInfo = provider(fields = { - "set": provider_field(typing.Any, default = None), # SharedLibrariesTSet | None + "set": provider_field(SharedLibrariesTSet | None, default = None), }) def get_strip_non_global_flags(cxx_toolchain: CxxToolchainInfo) -> list: @@ -53,6 +92,27 @@ def get_strip_non_global_flags(cxx_toolchain: CxxToolchainInfo) -> list: return ["--strip-unneeded"] +def create_shlib_from_ctx( + ctx: AnalysisContext, + soname: str | Artifact | Soname, + lib: LinkedObject): + cxx_toolchain = getattr(ctx.attrs, "_cxx_toolchain", None) + return create_shlib( + lib = lib, + stripped_lib = strip_object( + ctx, + cxx_toolchain[CxxToolchainInfo], + lib.output, + cmd_args(get_strip_non_global_flags(cxx_toolchain[CxxToolchainInfo])), + ) if cxx_toolchain != None else None, + link_args = lib.link_args, + shlib_deps = None, # TODO(cjhopman): we need this figured out. + can_be_asset = getattr(ctx.attrs, "can_be_asset", False) or False, + for_primary_apk = getattr(ctx.attrs, "used_by_wrap_script", False), + label = ctx.label, + soname = soname, + ) + def create_shared_libraries( ctx: AnalysisContext, libraries: dict[str, LinkedObject]) -> SharedLibraries: @@ -60,57 +120,13 @@ def create_shared_libraries( Take a mapping of dest -> src and turn it into a mapping that will be passed around in providers. Used for both srcs, and resources. """ - cxx_toolchain = getattr(ctx.attrs, "_cxx_toolchain", None) return SharedLibraries( - libraries = {name: SharedLibrary( - lib = shlib, - stripped_lib = strip_object( - ctx, - cxx_toolchain[CxxToolchainInfo], - shlib.output, - cmd_args(get_strip_non_global_flags(cxx_toolchain[CxxToolchainInfo])), - ) if cxx_toolchain != None else None, - link_args = shlib.link_args, - shlib_deps = None, # TODO(cjhopman): we need this figured out. - can_be_asset = getattr(ctx.attrs, "can_be_asset", False) or False, - for_primary_apk = getattr(ctx.attrs, "used_by_wrap_script", False), - label = ctx.label, - soname = name, - ) for (name, shlib) in libraries.items()}, + libraries = [ + create_shlib_from_ctx(ctx = ctx, soname = name, lib = shlib) + for (name, shlib) in libraries.items() + ], ) -# We do a lot of merging library maps, so don't use O(n) type annotations -def _merge_lib_map( - # dict[str, SharedLibrary] - dest_mapping, - # [dict[str, SharedLibrary] - mapping_to_merge, - filter_func) -> None: - """ - Merges a mapping_to_merge into `dest_mapping`. Fails if different libraries - map to the same name. - """ - for (name, src) in mapping_to_merge.items(): - if filter_func != None and not filter_func(name, src): - continue - existing = dest_mapping.get(name) - if existing != None and existing.lib != src.lib: - error = ( - "Duplicate library {}! Conflicting mappings:\n" + - "{} from {}\n" + - "{} from {}" - ) - fail( - error.format( - name, - existing.lib, - existing.label, - src.lib, - src.label, - ), - ) - dest_mapping[name] = src - # Merge multiple SharedLibraryInfo. The value in `node` represents a set of # SharedLibraries that is provided by the target being analyzed. It's optional # because that might not always exist, e.g. a Python library can pass through @@ -131,11 +147,156 @@ def merge_shared_libraries( set = actions.tset(SharedLibrariesTSet, **kwargs) if kwargs else None return SharedLibraryInfo(set = set) -def traverse_shared_library_info( - info: SharedLibraryInfo, - filter_func = None): # -> dict[str, SharedLibrary]: - libraries = {} +def traverse_shared_library_info(info: SharedLibraryInfo): # -> list[SharedLibrary]: + libraries = [] if info.set: for libs in info.set.traverse(): - _merge_lib_map(libraries, libs.libraries, filter_func) + libraries.extend(libs.libraries) return libraries + +# Helper to merge shlibs, throwing an error if more than one have the same SONAME. +def _merge_shlibs( + shared_libs: list[SharedLibrary], + resolve_soname: typing.Callable) -> dict[str, SharedLibrary]: + merged = {} + for shlib in shared_libs: + soname = resolve_soname(shlib.soname) + existing = merged.get(soname) + if existing != None and existing.lib != shlib.lib: + error = ( + "Duplicate library {}! Conflicting mappings:\n" + + "{} from {}\n" + + "{} from {}" + ) + fail( + error.format( + shlib.soname, + existing.lib, + existing.label, + shlib.lib, + shlib.label, + ), + ) + merged[soname] = shlib + return merged + +def with_unique_str_sonames( + shared_libs: list[SharedLibrary], + skip_dynamic: bool = False) -> dict[str, SharedLibrary]: + """ + Convert a list of `SharedLibrary`s to a map of unique SONAMEs to the + corresponding `SharedLibrary`. + + Will fail if the same SONAME maps to multiple `SharedLibrary`s. + """ + return _merge_shlibs( + shared_libs = [ + shlib + for shlib in shared_libs + if shlib.soname.is_str or not skip_dynamic + ], + resolve_soname = lambda s: s.ensure_str(), + ) + +def gen_shared_libs_action( + actions: AnalysisActions, + out: str, + shared_libs: list[SharedLibrary], + gen_action: typing.Callable, + dir = False): + """ + Produce an action by first resolving all SONAME of the given shlibs and + enforcing that each SONAME is unique. + + The provided `gen_action` callable is called with a map of unique SONAMEs + to the corresponding shlibs. + """ + + output = actions.declare_output(out, dir = dir) + + def func(actions, artifacts, output): + def resolve_soname(soname): + if soname.is_str: + return soname._soname + else: + return artifacts[soname._soname].read_string().strip() + + gen_action( + actions, + output, + _merge_shlibs( + shared_libs = shared_libs, + resolve_soname = resolve_soname, + ), + ) + + dynamic_sonames = [shlib.soname._soname for shlib in shared_libs if not shlib.soname.is_str] + if dynamic_sonames: + actions.dynamic_output( + dynamic = dynamic_sonames, + inputs = [], + outputs = [output.as_output()], + f = lambda ctx, artifacts, outputs: func(ctx.actions, artifacts, outputs[output]), + ) + else: + func(actions, {}, output) + + return output + +def zip_shlibs( + merged: dict[str, SharedLibrary], + vals: list[(SharedLibrary, typing.Any)]) -> list[(str, SharedLibrary, typing.Any)]: + """ + Helper to "zip" together the soname->shlib map to a list with associated + shared lib values. + + This is useful for callers of `gen_shared_libs_action` to combine the merged + shared libs, in dedup'd dict form, with some additional data. + """ + + zipped = [] + + # Walk through the shlib and val tuples + idx = 0 + for soname, shlib in merged.items(): + for idx in range(idx, len(vals)): + if vals[idx][0] == shlib: + break + zipped.append((soname, shlib, vals[idx][1])) + + return zipped + +def create_shlib_symlink_tree(actions: AnalysisActions, out: str, shared_libs: list[SharedLibrary]): + """ + Merged shared libs into a symlink tree mapping the library's SONAME to + it's artifact. + """ + return gen_shared_libs_action( + actions = actions, + out = out, + shared_libs = shared_libs, + gen_action = lambda actions, output, shared_libs: actions.symlinked_dir( + output, + {name: shlib.lib.output for name, shlib in shared_libs.items()}, + ), + dir = True, + ) + +def extract_soname_from_shlib( + actions: AnalysisActions, + name: str, + shared_lib: Artifact) -> Artifact: + """ + Extract the SONAME from a shared library into a file. + """ + soname = actions.declare_output(name) + cmd = cmd_args( + "sh", + "-c", + '''set -euo pipefail; objdump -p "$1" | grep SONAME | awk '{print $2}' > "$2"''', + "", + shared_lib, + soname.as_output(), + ) + actions.run(cmd, category = "extract_soname", identifier = shared_lib.short_path) + return soname diff --git a/prelude/linking/stamp_build_info.bzl b/prelude/linking/stamp_build_info.bzl new file mode 100644 index 0000000000000..a12327bfab65a --- /dev/null +++ b/prelude/linking/stamp_build_info.bzl @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") + +def stamp_build_info(ctx: AnalysisContext, obj: Artifact) -> Artifact: + """ + If necessary, add fb_build_info section to binary via late-stamping + """ + if hasattr(ctx.attrs, "_build_info") and ctx.attrs._build_info: + ctx.attrs._build_info["late_stamping"] = True + build_info_json = ctx.actions.write_json(obj.short_path + "-build-info.json", ctx.attrs._build_info) + stem, ext = paths.split_extension(obj.short_path) + stamped_output = ctx.actions.declare_output(stem + "-stamped" + ext) + + ctx.actions.run( + cmd_args([ + get_cxx_toolchain_info(ctx).binary_utilities_info.objcopy, + "--add-section", + cmd_args(build_info_json, format = "fb_build_info={}"), + obj, + stamped_output.as_output(), + ]), + identifier = obj.short_path, + category = "stamp_build_info", + ) + return stamped_output + return obj diff --git a/prelude/linking/strip.bzl b/prelude/linking/strip.bzl index 1c753dde9e099..9bcd22207dbf6 100644 --- a/prelude/linking/strip.bzl +++ b/prelude/linking/strip.bzl @@ -6,16 +6,20 @@ # of this source tree. load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") -load("@prelude//cxx:cxx_library_utility.bzl", "cxx_is_gnu") -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load( + "@prelude//cxx:cxx_toolchain_types.bzl", + "CxxToolchainInfo", + "LinkerType", +) def _strip_debug_info(ctx: AnalysisContext, out: str, obj: Artifact) -> Artifact: """ Strip debug information from an object. """ - strip = get_cxx_toolchain_info(ctx).binary_utilities_info.strip + cxx_toolchain = get_cxx_toolchain_info(ctx) + strip = cxx_toolchain.binary_utilities_info.strip output = ctx.actions.declare_output("__stripped__", out) - if cxx_is_gnu(ctx): + if cxx_toolchain.linker_info.type == LinkerType("gnu"): cmd = cmd_args([strip, "--strip-debug", "--strip-unneeded", "-o", output.as_output(), obj]) else: cmd = cmd_args([strip, "-S", "-o", output.as_output(), obj]) @@ -60,7 +64,6 @@ def strip_debug_info( out = out, obj = obj, ), - with_artifacts = True, ).artifact("strip_debug_info") return ctx.actions.assert_short_path(strip_debug_info, short_path = out) @@ -71,23 +74,28 @@ def strip_debug_info( obj = obj, ) -def strip_object(ctx: AnalysisContext, cxx_toolchain: CxxToolchainInfo, unstripped: Artifact, strip_flags: cmd_args, category_suffix: [str, None] = None) -> Artifact: +def strip_object(ctx: AnalysisContext, cxx_toolchain: CxxToolchainInfo, unstripped: Artifact, strip_flags: cmd_args, category_suffix: [str, None] = None, output_path: [str, None] = None) -> Artifact: """ Strip unneeded information from binaries / shared libs. """ strip = cxx_toolchain.binary_utilities_info.strip - stripped_lib = ctx.actions.declare_output("stripped/{}".format(unstripped.short_path)) + + output_path = output_path or unstripped.short_path + stripped_lib = ctx.actions.declare_output("stripped/{}".format(output_path)) # TODO(T109996375) support configuring the flags used for stripping - cmd = cmd_args() - cmd.add(strip) - cmd.add(strip_flags) - cmd.add([unstripped, "-o", stripped_lib.as_output()]) + cmd = cmd_args( + strip, + strip_flags, + unstripped, + "-o", + stripped_lib.as_output(), + ) effective_category_suffix = category_suffix if category_suffix else "shared_lib" category = "strip_{}".format(effective_category_suffix) - ctx.actions.run(cmd, category = category, identifier = unstripped.short_path) + ctx.actions.run(cmd, category = category, identifier = output_path) return stripped_lib @@ -105,7 +113,7 @@ def strip_debug_with_gnu_debuglink(ctx: AnalysisContext, name: str, obj: Artifac ctx.actions.run(cmd, category = "extract_debuginfo", identifier = name) binary_output = ctx.actions.declare_output("__stripped_objects__", name) - cmd = cmd_args([objcopy, "--strip-debug", "--add-gnu-debuglink", debuginfo_output, obj, binary_output.as_output()]) + cmd = cmd_args([objcopy, "--strip-debug", "--keep-file-symbols", "--add-gnu-debuglink", debuginfo_output, obj, binary_output.as_output()]) ctx.actions.run(cmd, category = "strip_debug", identifier = name) return binary_output, debuginfo_output diff --git a/prelude/linking/types.bzl b/prelude/linking/types.bzl new file mode 100644 index 0000000000000..486318bedbc25 --- /dev/null +++ b/prelude/linking/types.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Ways a library can request to be linked (e.g. usually specific via a rule +# param like `preferred_linkage`). The actual link style used for a library is +# usually determined by a combination of this and the link style being exported +# via a provider. +Linkage = enum( + "any", + "static", + "shared", +) diff --git a/prelude/matlab/matlab.bzl b/prelude/matlab/matlab.bzl new file mode 100644 index 0000000000000..f1fedd9d0e113 --- /dev/null +++ b/prelude/matlab/matlab.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":matlab_program.bzl", "matlab_program_impl") +load(":matlab_toolchain.bzl", "matlab_toolchain") + +implemented_rules = { + "matlab_program": matlab_program_impl, +} + +extra_attributes = { + "matlab_program": { + "main": attrs.source(), + "_matlab_toolchain": matlab_toolchain(), + }, +} diff --git a/prelude/matlab/matlab_info.bzl b/prelude/matlab/matlab_info.bzl new file mode 100644 index 0000000000000..3e15fc162105a --- /dev/null +++ b/prelude/matlab/matlab_info.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +MatlabToolchainInfo = provider(fields = { + "matlab_exe": RunInfo, +}) diff --git a/prelude/matlab/matlab_program.bzl b/prelude/matlab/matlab_program.bzl new file mode 100644 index 0000000000000..c78cb1be37df1 --- /dev/null +++ b/prelude/matlab/matlab_program.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":matlab_info.bzl", "MatlabToolchainInfo") + +def matlab_program_impl(ctx: AnalysisContext) -> list[Provider]: + toolchain = ctx.attrs._matlab_toolchain[MatlabToolchainInfo] + + cmd = cmd_args(toolchain.matlab_exe) + cmd.add( + "-batch", + cmd_args( + ctx.attrs.main.basename.removesuffix(".m"), + quote = "shell", + ), + ) + cmd.add("-sd", cmd_args(ctx.attrs.main, parent = 1)) + + return [DefaultInfo(default_output = None, other_outputs = [cmd]), RunInfo(cmd)] diff --git a/prelude/matlab/matlab_toolchain.bzl b/prelude/matlab/matlab_toolchain.bzl new file mode 100644 index 0000000000000..23456cf56d622 --- /dev/null +++ b/prelude/matlab/matlab_toolchain.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":matlab_info.bzl", "MatlabToolchainInfo") + +def matlab_toolchain(): + return attrs.default_only( + attrs.toolchain_dep( + default = "toolchains//:matlab", + providers = [MatlabToolchainInfo], + ), + ) diff --git a/prelude/native.bzl b/prelude/native.bzl index b9ec92be9578a..2d774327a0750 100644 --- a/prelude/native.bzl +++ b/prelude/native.bzl @@ -12,19 +12,20 @@ # **all** interpreted files. load("@prelude//android:cpu_filters.bzl", "ALL_CPU_FILTERS", "CPU_FILTER_FOR_DEFAULT_PLATFORM") -load("@prelude//apple:apple_macro_layer.bzl", "apple_binary_macro_impl", "apple_bundle_macro_impl", "apple_library_macro_impl", "apple_package_macro_impl", "apple_test_macro_impl") +load("@prelude//apple:apple_macro_layer.bzl", "apple_binary_macro_impl", "apple_bundle_macro_impl", "apple_library_macro_impl", "apple_package_macro_impl", "apple_test_macro_impl", "apple_universal_executable_macro_impl", "apple_xcuitest_macro_impl", "prebuilt_apple_framework_macro_impl") load("@prelude//apple/swift:swift_toolchain_macro_layer.bzl", "swift_toolchain_macro_impl") load("@prelude//cxx:cxx_toolchain.bzl", "cxx_toolchain_inheriting_target_platform") load("@prelude//cxx:cxx_toolchain_macro_layer.bzl", "cxx_toolchain_macro_impl") load("@prelude//cxx:cxx_toolchain_types.bzl", _cxx = "cxx") load("@prelude//erlang:erlang.bzl", _erlang_application = "erlang_application", _erlang_tests = "erlang_tests") load("@prelude//python:toolchain.bzl", _python = "python") +load("@prelude//rust:link_info.bzl", "RustLinkInfo") load("@prelude//rust:rust_common.bzl", "rust_common_macro_wrapper") load("@prelude//rust:rust_library.bzl", "rust_library_macro_wrapper") load("@prelude//rust:with_workspace.bzl", "with_rust_workspace") load("@prelude//user:all.bzl", _user_rules = "rules") +load("@prelude//utils:expect.bzl", "expect") load("@prelude//utils:selects.bzl", "selects") -load("@prelude//utils:utils.bzl", "expect") load(":is_full_meta_repo.bzl", "is_full_meta_repo") load(":paths.bzl", "paths") load(":rules.bzl", __rules__ = "rules") @@ -182,13 +183,26 @@ def _get_valid_cpu_filters(cpu_filters: [list[str], None]) -> list[str]: return [cpu_filter for cpu_filter in cpu_filters if cpu_filter in cpu_abis] +def _android_aar_macro_stub( + cpu_filters = None, + **kwargs): + __rules__["android_aar"]( + cpu_filters = _get_valid_cpu_filters(cpu_filters), + **kwargs + ) + def _android_binary_macro_stub( allow_r_dot_java_in_secondary_dex = False, cpu_filters = None, primary_dex_patterns = [], **kwargs): if not allow_r_dot_java_in_secondary_dex: - primary_dex_patterns = primary_dex_patterns + ["/R^", "/R$"] + primary_dex_patterns = primary_dex_patterns + [ + "/R^", + "/R$", + # Pin this to the primary for apps with no primary dex classes. + "^com/facebook/buck_generated/AppWithoutResourcesStub^", + ] __rules__["android_binary"]( allow_r_dot_java_in_secondary_dex = allow_r_dot_java_in_secondary_dex, cpu_filters = _get_valid_cpu_filters(cpu_filters), @@ -196,11 +210,27 @@ def _android_binary_macro_stub( **kwargs ) +def _android_bundle_macro_stub( + cpu_filters = None, + **kwargs): + __rules__["android_bundle"]( + cpu_filters = _get_valid_cpu_filters(cpu_filters), + **kwargs + ) + def _android_instrumentation_apk_macro_stub( cpu_filters = None, + primary_dex_patterns = [], **kwargs): + primary_dex_patterns = primary_dex_patterns + [ + "/R^", + "/R$", + # Pin this to the primary for apps with no primary dex classes. + "^com/facebook/buck_generated/AppWithoutResourcesStub^", + ] __rules__["android_instrumentation_apk"]( cpu_filters = _get_valid_cpu_filters(cpu_filters), + primary_dex_patterns = primary_dex_patterns, **kwargs ) @@ -314,6 +344,13 @@ def _apple_watchos_bundle_macro_stub(**kwargs): **kwargs ) +def _apple_macos_bundle_macro_stub(**kwargs): + apple_bundle_macro_impl( + apple_bundle_rule = _user_rules["apple_macos_bundle"], + apple_resource_bundle_rule = _user_rules["apple_resource_bundle"], + **kwargs + ) + def _apple_test_macro_stub(**kwargs): apple_test_macro_impl( apple_test_rule = __rules__["apple_test"], @@ -321,6 +358,12 @@ def _apple_test_macro_stub(**kwargs): **kwargs ) +def _apple_xcuitest_macro_stub(**kwargs): + apple_xcuitest_macro_impl( + apple_xcuitest_rule = __rules__["apple_xcuitest"], + **kwargs + ) + def _apple_binary_macro_stub(**kwargs): apple_binary_macro_impl( apple_binary_rule = __rules__["apple_binary"], @@ -337,6 +380,13 @@ def _apple_library_macro_stub(**kwargs): def _apple_package_macro_stub(**kwargs): apple_package_macro_impl( apple_package_rule = __rules__["apple_package"], + apple_ipa_package_rule = _user_rules["apple_ipa_package"], + **kwargs + ) + +def _apple_universal_executable_macro_stub(**kwargs): + apple_universal_executable_macro_impl( + apple_universal_executable_rule = __rules__["apple_universal_executable"], **kwargs ) @@ -348,31 +398,21 @@ def _swift_toolchain_macro_stub(**kwargs): **kwargs ) -def _cxx_toolchain_macro_stub(inherit_target_platform = False, **kwargs): - if inherit_target_platform: - rule = cxx_toolchain_inheriting_target_platform - if is_full_meta_repo(): - cache_links = kwargs.get("cache_links") - kwargs["cache_links"] = select({ - "DEFAULT": cache_links, - "ovr_config//build_mode:fbcode-build-info-mode-disable": True, - "ovr_config//build_mode:fbcode-build-info-mode-full": False, - "ovr_config//build_mode:fbcode-build-info-mode-stable": True, - }) - else: - rule = __rules__["cxx_toolchain"] +def _cxx_toolchain_macro_stub(**kwargs): + if is_full_meta_repo(): + cache_links = kwargs.get("cache_links") + kwargs["cache_links"] = select({ + "DEFAULT": cache_links, + "ovr_config//platform/execution/constraints:execution-platform-transitioned": True, + }) cxx_toolchain_macro_impl( - cxx_toolchain_rule = rule, + cxx_toolchain_rule = cxx_toolchain_inheriting_target_platform, **kwargs ) -def _cxx_toolchain_override_macro_stub(inherit_target_platform = False, **kwargs): - if inherit_target_platform: - rule = _user_rules["cxx_toolchain_override_inheriting_target_platform"] - else: - rule = _user_rules["cxx_toolchain_override"] +def _cxx_toolchain_override_macro_stub(**kwargs): cxx_toolchain_macro_impl( - cxx_toolchain_rule = rule, + cxx_toolchain_rule = _user_rules["cxx_toolchain_override"], **kwargs ) @@ -403,24 +443,36 @@ def _rust_test_macro_stub(**kwargs): rust_test = rust_common_macro_wrapper(__rules__["rust_test"]) rust_test(**kwargs) +def _prebuilt_apple_framework_macro_stub(**kwargs): + prebuilt_apple_framework_macro_impl( + prebuilt_apple_framework_rule = __rules__["prebuilt_apple_framework"], + **kwargs + ) + # TODO(cjhopman): These macro wrappers should be handled in prelude/rules.bzl+rule_impl.bzl. # Probably good if they were defined to take in the base rule that # they are wrapping and return the wrapped one. __extra_rules__ = { + "android_aar": _android_aar_macro_stub, "android_binary": _android_binary_macro_stub, + "android_bundle": _android_bundle_macro_stub, "android_instrumentation_apk": _android_instrumentation_apk_macro_stub, "apple_binary": _apple_binary_macro_stub, "apple_bundle": _apple_bundle_macro_stub, "apple_library": _apple_library_macro_stub, + "apple_macos_bundle": _apple_macos_bundle_macro_stub, "apple_package": _apple_package_macro_stub, "apple_test": _apple_test_macro_stub, + "apple_universal_executable": _apple_universal_executable_macro_stub, "apple_watchos_bundle": _apple_watchos_bundle_macro_stub, + "apple_xcuitest": _apple_xcuitest_macro_stub, "configured_alias": _configured_alias_macro_stub, "cxx_toolchain": _cxx_toolchain_macro_stub, "cxx_toolchain_override": _cxx_toolchain_override_macro_stub, "erlang_application": _erlang_application_macro_stub, "erlang_tests": _erlang_tests_macro_stub, "export_file": _export_file_macro_stub, + "prebuilt_apple_framework": _prebuilt_apple_framework_macro_stub, "prebuilt_cxx_library": _prebuilt_cxx_library_macro_stub, "python_library": _python_library_macro_stub, "rust_binary": _rust_binary_macro_stub, @@ -431,12 +483,17 @@ __extra_rules__ = { "versioned_alias": _versioned_alias_macro_stub, } -__shimmed_native__ = __struct_to_dict(__internal__) +__shimmed_native__ = __struct_to_dict(__buck2_builtins__) __shimmed_native__.update(__rules__) __shimmed_native__.update(_user_rules) # Should come after the rules which are macro overridden __shimmed_native__.update(__extra_rules__) __shimmed_native__.update({"cxx": _cxx, "python": _python}) +__shimmed_native__.update({ + "__internal_autodeps_hacks__": struct( + rust_link_info = RustLinkInfo, + ), +}) native = struct(**__shimmed_native__) diff --git a/prelude/ocaml/ocaml.bzl b/prelude/ocaml/ocaml.bzl index 2fa1bc5153870..9cfe8fa201576 100644 --- a/prelude/ocaml/ocaml.bzl +++ b/prelude/ocaml/ocaml.bzl @@ -69,6 +69,7 @@ load( "LinkInfo", "LinkInfos", "LinkStrategy", + "LinkerFlags", "MergedLinkInfo", "ObjectsLinkable", "create_merged_link_info", @@ -77,7 +78,10 @@ load( ) load( "@prelude//linking:linkable_graph.bzl", + "LinkableGraph", "create_linkable_graph", + "create_linkable_graph_node", + "create_linkable_node", ) load( "@prelude//linking:shared_libraries.bzl", @@ -88,7 +92,7 @@ load( "@prelude//python:python.bzl", "PythonLibraryInfo", ) -load("@prelude//utils:graph_utils.bzl", "breadth_first_traversal", "post_order_traversal") +load("@prelude//utils:graph_utils.bzl", "depth_first_traversal", "post_order_traversal") load("@prelude//utils:platform_flavors_util.bzl", "by_platform") load("@prelude//utils:utils.bzl", "filter_and_map_idx", "flatten") load(":makefile.bzl", "parse_makefile") @@ -163,7 +167,7 @@ def _mk_script(ctx: AnalysisContext, file: str, args: list[typing.Any], env: dic is_executable = True, allow_args = True, ) - return cmd_args(script).hidden(args, env.values()) + return cmd_args(script, hidden = args + env.values()) # An environment in which a custom `bin` is at the head of `$PATH`. def _mk_env(ctx: AnalysisContext) -> dict[str, cmd_args]: @@ -215,6 +219,34 @@ def _mk_ocaml_compiler(ctx: AnalysisContext, env: dict[str, typing.Any], build_m script_args = _mk_script(ctx, script_name, [compiler], env) return script_args +def _get_empty_link_infos() -> dict[LibOutputStyle, LinkInfos]: + infos = {} + for output_style in LibOutputStyle: + infos[output_style] = LinkInfos(default = LinkInfo()) + return infos + +def _get_linkable_graph( + ctx: AnalysisContext, + deps: list[Dependency] = [], + link_infos: dict[LibOutputStyle, LinkInfos] = {}, + linker_flags: [LinkerFlags, None] = None) -> LinkableGraph: + if not deps: + deps = ctx.attrs.deps + return create_linkable_graph( + ctx, + node = create_linkable_graph_node( + ctx, + linkable_node = create_linkable_node( + ctx, + default_soname = None, + deps = deps, + link_infos = link_infos if link_infos else _get_empty_link_infos(), + linker_flags = linker_flags, + ), + ), + deps = deps, + ) + # A command initialized with flags common to all compiler commands. def _compiler_cmd(ctx: AnalysisContext, compiler: cmd_args, cc: cmd_args) -> cmd_args: ocaml_toolchain = ctx.attrs._ocaml_toolchain[OCamlToolchainInfo] @@ -289,8 +321,10 @@ def _preprocess(ctx: AnalysisContext, srcs: list[Artifact], build_mode: BuildMod parser_sig = ctx.actions.declare_output(name + ".mli") result.extend((parser_sig, parser)) - cmd = cmd_args([menhir, "--fixed-exception", "-b", cmd_args(prefix).ignore_artifacts(), src]) - cmd.hidden(parser.as_output(), parser_sig.as_output()) + cmd = cmd_args( + [menhir, "--fixed-exception", "-b", cmd_args(prefix, ignore_artifacts = True), src], + hidden = [parser.as_output(), parser_sig.as_output()], + ) ctx.actions.run(cmd, category = "ocaml_yacc_" + build_mode.value, identifier = src.short_path) elif ext == ".mll": @@ -321,7 +355,7 @@ def _depends(ctx: AnalysisContext, srcs: list[Artifact], build_mode: BuildMode) dep_cmdline.add([cmd_args(f, format = "\"{}\"") for f in ctx.attrs.ocamldep_flags]) # These -I's are for ocamldep. - dep_cmdline.add(cmd_args([cmd_args(src).parent() for src in srcs], format = "-I {}")) + dep_cmdline.add(cmd_args([cmd_args(src, parent = 1) for src in srcs], format = "-I {}")) dep_cmdline.add(srcs) dep_script_name = "ocamldep_" + build_mode.value + ".sh" dep_sh, _ = ctx.actions.write( @@ -330,7 +364,7 @@ def _depends(ctx: AnalysisContext, srcs: list[Artifact], build_mode: BuildMode) is_executable = True, allow_args = True, ) - ctx.actions.run(cmd_args(dep_sh).hidden(dep_output.as_output(), dep_cmdline), category = "ocamldep_" + build_mode.value) + ctx.actions.run(cmd_args(dep_sh, hidden = [dep_output.as_output(), dep_cmdline]), category = "ocamldep_" + build_mode.value) return dep_output # Compile all the context's sources. If bytecode compiling, 'cmxs' & 'objs' will @@ -468,7 +502,7 @@ def _compile(ctx: AnalysisContext, compiler: cmd_args, build_mode: BuildMode) -> # the dependency of 'src' on other files in 'srcs'. depends_include_paths = [] seen_dirs = {} - for d in breadth_first_traversal(makefile2, makefile2.get(src, [])): + for d in depth_first_traversal(makefile2, makefile2.get(src, [])): # 'src' depends on 'd' (e.g. src='quux.ml' depends on # d='quux.mli'). # @@ -484,7 +518,7 @@ def _compile(ctx: AnalysisContext, compiler: cmd_args, build_mode: BuildMode) -> if i != None: p = paths.dirname(i.short_path) if not p in seen_dirs: - depends_include_paths.append(cmd_args(i).parent()) + depends_include_paths.append(cmd_args(i, parent = 1)) seen_dirs[p] = None # *All* the include paths needed to compile 'src'. @@ -496,7 +530,7 @@ def _compile(ctx: AnalysisContext, compiler: cmd_args, build_mode: BuildMode) -> cmd.add(src, "-c", "-o", mk_out(cmi)) if build_mode.value == "expand": cmd.add("-dsource") - cmd.hidden(mk_out(cmti), depends_produce) + cmd.add(cmd_args(hidden = [mk_out(cmti), depends_produce])) if build_mode.value == "expand": sh = cmd_args(["/bin/sh", "-c", '"$@" 2> "$preprocessed_source_file"', "--", cmd]) @@ -508,22 +542,22 @@ def _compile(ctx: AnalysisContext, compiler: cmd_args, build_mode: BuildMode) -> elif ext == ".ml": (obj, cmo, cmx, cmt, cmi, ppml) = produces[src] cmd = _compile_cmd(ctx, compiler, build_mode, cc, all_include_paths) - cmd.hidden(depends_produce) + cmd.add(cmd_args(hidden = depends_produce)) if cmo != None: cmd.add(src, "-c", "-o", mk_out(cmo)) if cmx != None: cmd.add(src, "-c", "-o", mk_out(cmx)) - cmd.hidden(mk_out(cmt)) + cmd.add(cmd_args(hidden = mk_out(cmt))) if build_mode.value == "expand": cmd.add("-dsource") if obj != None: - cmd.hidden(mk_out(obj)) + cmd.add(cmd_args(hidden = mk_out(obj))) if cmi != None: cmd.add("-intf-suffix", ",nomli,") # ignore any .mlis that aren't explicit dependencies - cmd.hidden(mk_out(cmi)) + cmd.add(cmd_args(hidden = mk_out(cmi))) else: # An explicit '.mli' for this '.ml' is a dependency. - cmd.hidden(mlis[paths.replace_extension(src.short_path, ".mli")]) + cmd.add(cmd_args(hidden = mlis[paths.replace_extension(src.short_path, ".mli")])) if build_mode.value == "expand": sh = cmd_args(["/bin/sh", "-c", '"$@" 2> "$preprocessed_source_file"', "--", cmd]) @@ -538,7 +572,7 @@ def _compile(ctx: AnalysisContext, compiler: cmd_args, build_mode: BuildMode) -> # `ocaml_object` breaks for `-flto=...` so ensure `-fno-lto` prevails here. cmd.add(src, "-c", "-ccopt", "-fno-lto", "-ccopt", cmd_args(mk_out(stb), format = "-o \"{}\"")) - cmd.hidden(headers) # Any .h files given are dependencies. + cmd.add(cmd_args(hidden = headers)) # Any .h files given are dependencies. ctx.actions.run(cmd, category = "ocaml_compile_c", identifier = src.short_path) elif ext == ".h": @@ -550,7 +584,12 @@ def _compile(ctx: AnalysisContext, compiler: cmd_args, build_mode: BuildMode) -> if outputs == []: ctx.actions.write(cmxs_order, "") else: - ctx.actions.dynamic_output(dynamic = [depends_output], inputs = todo_inputs, outputs = outputs + [cmxs_order], f = f) + ctx.actions.dynamic_output( + dynamic = [depends_output], + inputs = todo_inputs, + outputs = [o.as_output() for o in outputs + [cmxs_order]], + f = f, + ) return CompileResultInfo(cmxs_order = cmxs_order, stbs = stbs, objs = objs, cmis = cmis, cmos = cmos, cmxs = cmxs, cmts = cmts, cmtis = cmtis, ppmlis = ppmlis, ppmls = ppmls) @@ -562,15 +601,17 @@ def _include_paths(cmis: list[Artifact], cmos: list[Artifact]) -> cmd_args: for f in cmis: p = paths.dirname(f.short_path) if not p in seen_dirs: - include_paths.append(cmd_args(f).parent()) + include_paths.append(cmd_args(f, parent = 1)) seen_dirs[p] = None for f in cmos: p = paths.dirname(f.short_path) if not p in seen_dirs: - include_paths.append(cmd_args(f).parent()) + include_paths.append(cmd_args(f, parent = 1)) seen_dirs[p] = None - include_paths = cmd_args(include_paths) - include_paths.hidden(cmis + cmos) + include_paths = cmd_args( + include_paths, + hidden = cmis + cmos, + ) return include_paths def ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: @@ -594,7 +635,7 @@ def ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: cmd_nat.add("-o", cmxa.as_output()) if len([s for s in ctx.attrs.srcs if s.extension == ".ml"]) != 0: native_c_lib = ctx.actions.declare_output("lib" + ctx.attrs.name + ".a") - cmd_nat.hidden(native_c_lib.as_output()) + cmd_nat.add(cmd_args(hidden = native_c_lib.as_output())) native_c_libs = [native_c_lib] else: native_c_libs = [] @@ -605,7 +646,7 @@ def ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: # These were produced by the compile step and so are hidden dependencies of # the archive step. - cmd_nat.hidden(cmxs, cmis_nat, objs, cmts_nat, cmtis_nat) + cmd_nat.add(cmd_args(hidden = [cmxs, cmis_nat, objs, cmts_nat, cmtis_nat])) ctx.actions.run(cmd_nat, category = "ocaml_archive_native") cmxs_order, stbs_byt, _objs, cmis_byt, cmos, _cmxs, cmts_byt, cmtis_byt, _ppmlis, _ppmls = _compile_result_to_tuple(_compile(ctx, ocamlc, BuildMode("bytecode"))) @@ -620,7 +661,7 @@ def ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: # These were produced by the compile step and so are hidden dependencies of # the archive step. - cmd_byt.hidden(cmos, cmis_byt, cmts_byt, cmtis_byt) + cmd_byt.add(cmd_args(hidden = [cmos, cmis_byt, cmts_byt, cmtis_byt])) ctx.actions.run(cmd_byt, category = "ocaml_archive_bytecode") infos = _attr_deps_ocaml_link_infos(ctx) @@ -657,7 +698,7 @@ def ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: info_ide = [ DefaultInfo( - default_output = cmxa, + default_output = cmts_nat[0] if cmts_nat else None, other_outputs = [cmd_args(other_outputs_info.info.project_as_args("ide"))], ), ] @@ -685,22 +726,21 @@ def ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: merge_shared_libraries(ctx.actions, deps = filter_and_map_idx(SharedLibraryInfo, _attr_deps(ctx))), merge_link_group_lib_info(deps = _attr_deps(ctx)), other_outputs_info, - create_linkable_graph( - ctx, - deps = _attr_deps(ctx), - ), + _get_linkable_graph(ctx), ] def ocaml_binary_impl(ctx: AnalysisContext) -> list[Provider]: ocaml_toolchain = ctx.attrs._ocaml_toolchain[OCamlToolchainInfo] + ocaml_toolchain_runtime_deps = ocaml_toolchain.runtime_dep_link_extras env = _mk_env(ctx) ocamlopt = _mk_ocaml_compiler(ctx, env, BuildMode("native")) ocamlc = _mk_ocaml_compiler(ctx, env, BuildMode("bytecode")) - dep_link_infos = _attr_deps_merged_link_infos(ctx) + filter(None, [ocaml_toolchain.libc]) + dep_link_infos = _attr_deps_merged_link_infos(ctx) + filter(None, [ocaml_toolchain.libc]) + [d.get(MergedLinkInfo) for d in ocaml_toolchain_runtime_deps] cxx_toolchain = get_cxx_toolchain_info(ctx) link_args_output = make_link_args( + ctx, ctx.actions, cxx_toolchain, [get_link_args_for_strategy(ctx, dep_link_infos, LinkStrategy("static_pic"))], @@ -726,8 +766,10 @@ def ocaml_binary_impl(ctx: AnalysisContext) -> list[Provider]: # These were produced by the compile step and are therefore hidden # dependencies of the link step. - cmd_nat.hidden(cmxs, cmis_nat, cmts_nat, cmtis_nat, objs, link_args_output.hidden) + cmd_nat.add(cmd_args(hidden = [cmxs, cmis_nat, cmts_nat, cmtis_nat, objs, link_args_output.hidden])) binary_nat = ctx.actions.declare_output(ctx.attrs.name + ".opt") + + cmd_nat.add([cmd_args(["-cclib", f]) for f in ocaml_toolchain.runtime_dep_link_flags]) cmd_nat.add("-cclib", "-lpthread") cmd_nat.add("-o", binary_nat.as_output()) local_only = link_cxx_binary_locally(ctx) @@ -738,9 +780,10 @@ def ocaml_binary_impl(ctx: AnalysisContext) -> list[Provider]: # These were produced by the compile step and are therefore hidden # dependencies of the link step. - cmd_byt.hidden(cmos, cmis_byt, cmts_byt, cmtis_byt, link_args_output.hidden) + cmd_byt.add(cmd_args(hidden = [cmos, cmis_byt, cmts_byt, cmtis_byt, link_args_output.hidden])) binary_byt = ctx.actions.declare_output(ctx.attrs.name) cmd_byt.add("-custom") + cmd_byt.add([cmd_args(["-cclib", f]) for f in ocaml_toolchain.runtime_dep_link_flags]) cmd_byt.add("-cclib", "-lpthread") cmd_byt.add("-o", binary_byt.as_output()) local_only = link_cxx_binary_locally(ctx) @@ -755,7 +798,7 @@ def ocaml_binary_impl(ctx: AnalysisContext) -> list[Provider]: info_ide = [ DefaultInfo( - default_output = binary_nat, + default_output = cmts_nat[0] if cmts_nat else None, other_outputs = [cmd_args(other_outputs_info.info.project_as_args("ide"))], ), ] @@ -790,6 +833,7 @@ def ocaml_object_impl(ctx: AnalysisContext) -> list[Provider]: dep_link_infos = _attr_deps_merged_link_infos(ctx) cxx_toolchain = get_cxx_toolchain_info(ctx) link_args_output = make_link_args( + ctx, ctx.actions, cxx_toolchain, [get_link_args_for_strategy(ctx, dep_link_infos, LinkStrategy("static_pic"))], @@ -807,10 +851,10 @@ def ocaml_object_impl(ctx: AnalysisContext) -> list[Provider]: for lib in merge_ocaml_link_infos(_attr_deps_ocaml_link_infos(ctx)).info: cmd.add(lib.cmxas, lib.c_libs, lib.native_c_libs, lib.stbs_nat) - cmd.hidden(lib.cmxs, lib.cmis_nat, lib.cmts_nat) + cmd.add(cmd_args(hidden = [lib.cmxs, lib.cmis_nat, lib.cmts_nat])) cmd.add(stbs, "-args", cmxs_order) - cmd.hidden(cmxs, cmis, cmts, objs, cmtis, link_args_output.hidden) + cmd.add(cmd_args(hidden = [cmxs, cmis, cmts, objs, cmtis, link_args_output.hidden])) obj = ctx.actions.declare_output(ctx.attrs.name + ".o") cmd.add("-output-complete-obj") @@ -822,12 +866,13 @@ def ocaml_object_impl(ctx: AnalysisContext) -> list[Provider]: ocaml_toolchain_runtime_deps = ocaml_toolchain.runtime_dep_link_extras linker_type = cxx_toolchain.linker_info.type link_infos = {} + linker_flags = [cmd_args(f) for f in ocaml_toolchain.runtime_dep_link_flags] for output_style in LibOutputStyle: link_infos[output_style] = LinkInfos(default = LinkInfo( linkables = [ ObjectsLinkable(objects = [obj], linker_type = linker_type), ], - post_flags = [cmd_args(f) for f in ocaml_toolchain.runtime_dep_link_flags], + post_flags = linker_flags, )) obj_link_info = create_merged_link_info( @@ -846,7 +891,7 @@ def ocaml_object_impl(ctx: AnalysisContext) -> list[Provider]: info_ide = [ DefaultInfo( - default_output = obj, + default_output = cmts[0] if cmts else None, other_outputs = [cmd_args(other_outputs_info.info.project_as_args("ide"))], ), ] @@ -871,10 +916,7 @@ def ocaml_object_impl(ctx: AnalysisContext) -> list[Provider]: obj_link_info, merge_link_group_lib_info(deps = deps), merge_shared_libraries(ctx.actions, deps = filter_and_map_idx(SharedLibraryInfo, deps)), - create_linkable_graph( - ctx, - deps = deps, - ), + _get_linkable_graph(ctx, deps, link_infos, LinkerFlags(post_flags = linker_flags)), ] # `ocaml_shared` enables one to produce an OCaml "plugin". Such native code @@ -891,6 +933,7 @@ def ocaml_shared_impl(ctx: AnalysisContext) -> list[Provider]: dep_link_infos = _attr_deps_merged_link_infos(ctx) + filter(None, [ocaml_toolchain.libc]) cxx_toolchain = get_cxx_toolchain_info(ctx) link_args_output = make_link_args( + ctx, ctx.actions, cxx_toolchain, [get_link_args_for_strategy(ctx, dep_link_infos, LinkStrategy("static_pic"))], @@ -915,7 +958,7 @@ def ocaml_shared_impl(ctx: AnalysisContext) -> list[Provider]: # These were produced by the compile step and are therefore hidden # dependencies of the link step. - cmd_nat.hidden(cmxs, cmis_nat, cmts_nat, cmtis_nat, objs, link_args_output.hidden) + cmd_nat.add(cmd_args(hidden = [cmxs, cmis_nat, cmts_nat, cmtis_nat, objs, link_args_output.hidden])) binary_nat = ctx.actions.declare_output(ctx.attrs.name + ".cmxs") cmd_nat.add("-shared") cmd_nat.add("-o", binary_nat.as_output()) @@ -931,7 +974,7 @@ def ocaml_shared_impl(ctx: AnalysisContext) -> list[Provider]: info_ide = [ DefaultInfo( - default_output = binary_nat, + default_output = cmts_nat[0] if cmts_nat else None, other_outputs = [cmd_args(other_outputs_info.info.project_as_args("ide"))], ), ] @@ -945,6 +988,7 @@ def ocaml_shared_impl(ctx: AnalysisContext) -> list[Provider]: return [ DefaultInfo(default_output = binary_nat, sub_targets = sub_targets), + _get_linkable_graph(ctx), ] def prebuilt_ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: @@ -1010,8 +1054,5 @@ def prebuilt_ocaml_library_impl(ctx: AnalysisContext) -> list[Provider]: create_merged_link_info_for_propagation(ctx, native_infos), merge_link_group_lib_info(deps = ctx.attrs.deps), merge_shared_libraries(ctx.actions, deps = filter_and_map_idx(SharedLibraryInfo, ctx.attrs.deps)), - create_linkable_graph( - ctx, - deps = ctx.attrs.deps, - ), + _get_linkable_graph(ctx), ] diff --git a/prelude/os/BUCK b/prelude/os/BUCK deleted file mode 100644 index 816fd17647326..0000000000000 --- a/prelude/os/BUCK +++ /dev/null @@ -1,124 +0,0 @@ -# The short list of ubiquitous, mainstream operating systems: - -config_setting( - name = "linux", - constraint_values = [ - "//os/constraints:linux", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "macos", - constraint_values = [ - "//os/constraints:macos", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "windows", - constraint_values = [ - "//os/constraints:windows", - ], - visibility = ["PUBLIC"], -) - -# Android - -config_setting( - name = "android", - constraint_values = [ - "//os/constraints:android", - ], - visibility = ["PUBLIC"], -) - -native.constraint_setting( - name = "maybe_building_android_binary", - visibility = ["prelude//..."], -) - -native.constraint_value( - name = "building_android_binary", - constraint_setting = ":maybe_building_android_binary", - visibility = ["prelude//..."], -) - -# Rest of Apple's operating systems. - -config_setting( - name = "ios", - constraint_values = [ - "//os/constraints:ios", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "tvos", - constraint_values = [ - "//os/constraints:tvos", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "watchos", - constraint_values = [ - "//os/constraints:watchos", - ], - visibility = ["PUBLIC"], -) - -# Long tail but contemporarily relevant operating systems. -# Whether a Tier 2 rustc target exists is approximately the right bar. - -config_setting( - name = "freebsd", - constraint_values = [ - "//os/constraints:freebsd", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "fuchsia", - constraint_values = [ - "//os/constraints:fuchsia", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "illumos", - constraint_values = [ - "//os/constraints:illumos", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "netbsd", - constraint_values = [ - "//os/constraints:netbsd", - ], - visibility = ["PUBLIC"], -) - -config_setting( - name = "wasi", - constraint_values = [ - "//os/constraints:wasi", - ], - visibility = ["PUBLIC"], -) - -# For platforms with no OS, like microcontrollers. -config_setting( - name = "none", - constraint_values = [ - "//os/constraints:none", - ], - visibility = ["PUBLIC"], -) diff --git a/prelude/os/BUCK.v2 b/prelude/os/BUCK.v2 new file mode 100644 index 0000000000000..04721a42b799d --- /dev/null +++ b/prelude/os/BUCK.v2 @@ -0,0 +1,149 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native # Avoid warnings and auto-formatters + +# The short list of ubiquitous, mainstream operating systems: + +config_setting( + name = "linux", + constraint_values = [ + "//os/constraints:linux", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "macos", + constraint_values = [ + "//os/constraints:macos", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "windows", + constraint_values = [ + "//os/constraints:windows", + ], + visibility = ["PUBLIC"], +) + +# Android + +config_setting( + name = "android", + constraint_values = [ + "//os/constraints:android", + ], + visibility = ["PUBLIC"], +) + +prelude.constraint_setting( + name = "maybe_building_android_binary", + visibility = ["prelude//..."], +) + +prelude.constraint_value( + name = "building_android_binary", + constraint_setting = ":maybe_building_android_binary", + visibility = ["prelude//..."], +) + +# Rest of Apple's operating systems. + +config_setting( + name = "ios", + constraint_values = [ + "//os/constraints:ios", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "tvos", + constraint_values = [ + "//os/constraints:tvos", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "watchos", + constraint_values = [ + "//os/constraints:watchos", + ], + visibility = ["PUBLIC"], +) + +# Long tail but contemporarily relevant operating systems. +# Whether a Tier 2 rustc target exists is approximately the right bar. + +config_setting( + name = "freebsd", + constraint_values = [ + "//os/constraints:freebsd", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "fuchsia", + constraint_values = [ + "//os/constraints:fuchsia", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "illumos", + constraint_values = [ + "//os/constraints:illumos", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "netbsd", + constraint_values = [ + "//os/constraints:netbsd", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "wasi", + constraint_values = [ + "//os/constraints:wasi", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "linux-sgx", + constraint_values = [ + "//os/constraints:linux", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "linux-arm64", + constraint_values = [ + "//cpu/constraints:arm64", + "//os/constraints:linux", + ], + visibility = ["PUBLIC"], +) + +# For platforms with no OS, like microcontrollers. +config_setting( + name = "none", + constraint_values = [ + "//os/constraints:none", + ], + visibility = ["PUBLIC"], +) diff --git a/prelude/os/constraints/BUCK b/prelude/os/constraints/BUCK deleted file mode 100644 index cdb63a7a8b8ff..0000000000000 --- a/prelude/os/constraints/BUCK +++ /dev/null @@ -1,84 +0,0 @@ -# Used by open source projects to support `prelude//` - -constraint_setting( - name = "os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "linux", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "macos", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "windows", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "android", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "ios", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "tvos", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "watchos", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "freebsd", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "fuchsia", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "illumos", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "netbsd", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "wasi", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) - -constraint_value( - name = "none", - constraint_setting = ":os", - visibility = ["PUBLIC"], -) diff --git a/prelude/os/constraints/BUCK.v2 b/prelude/os/constraints/BUCK.v2 new file mode 100644 index 0000000000000..0426226eb0806 --- /dev/null +++ b/prelude/os/constraints/BUCK.v2 @@ -0,0 +1,90 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +# Used by open source projects to support `prelude//` + +constraint_setting( + name = "os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "linux", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "macos", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "windows", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "android", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "ios", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "tvos", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "watchos", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "freebsd", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "fuchsia", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "illumos", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "netbsd", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "wasi", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "none", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) diff --git a/prelude/os_lookup/defs.bzl b/prelude/os_lookup/defs.bzl index f5ce4bf2551b7..5b708e6400811 100644 --- a/prelude/os_lookup/defs.bzl +++ b/prelude/os_lookup/defs.bzl @@ -5,8 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//decls/android_rules.bzl", "TargetCpuType") -load("@prelude//decls/core_rules.bzl", "Platform") +load("@prelude//decls/core_rules.bzl", "Platform", "TargetCpuType") OsLookup = provider(fields = {"cpu": provider_field(typing.Any, default = None), "platform": provider_field(typing.Any, default = None)}) diff --git a/prelude/os_lookup/targets/BUCK b/prelude/os_lookup/targets/BUCK deleted file mode 100644 index 9919027f6a337..0000000000000 --- a/prelude/os_lookup/targets/BUCK +++ /dev/null @@ -1,16 +0,0 @@ -load("//os_lookup:defs.bzl", "os_lookup") - -os_lookup( - name = "os_lookup", - cpu = select({ - "DEFAULT": None, - "config//cpu:arm64": "arm64", - "config//cpu:x86_64": "x86_64", - }), - platform = select({ - "DEFAULT": "linux", - "config//os:macos": "macos", - "config//os:windows": "windows", - }), - visibility = ["PUBLIC"], -) diff --git a/prelude/os_lookup/targets/BUCK.v2 b/prelude/os_lookup/targets/BUCK.v2 new file mode 100644 index 0000000000000..e6ecd06544d94 --- /dev/null +++ b/prelude/os_lookup/targets/BUCK.v2 @@ -0,0 +1,21 @@ +load("@prelude//os_lookup:defs.bzl", "os_lookup") +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +os_lookup( + name = "os_lookup", + cpu = select({ + "DEFAULT": None, + "config//cpu:arm64": "arm64", + "config//cpu:x86_64": "x86_64", + }), + platform = select({ + "DEFAULT": "linux", + "config//os:macos": "macos", + "config//os:windows": "windows", + }), + visibility = ["PUBLIC"], +) diff --git a/prelude/oss/CHANGELOG.md b/prelude/oss/CHANGELOG.md index 0ab36850df0e3..524da63093fc7 100644 --- a/prelude/oss/CHANGELOG.md +++ b/prelude/oss/CHANGELOG.md @@ -1,3 +1,3 @@ # Buck2 Prelude -* Initial version. +- Initial version. diff --git a/prelude/oss/CONTRIBUTING.md b/prelude/oss/CONTRIBUTING.md index e2f05f03e678f..7f7c52bbb8d53 100644 --- a/prelude/oss/CONTRIBUTING.md +++ b/prelude/oss/CONTRIBUTING.md @@ -1,15 +1,16 @@ # Contributing to Buck2 Prelude -This repository is a subset of . -You can contribute to either that repo, or this repo - changes will be mirrored to both. +This repository is a subset of . You can +contribute to either that repo, or this repo - changes will be mirrored to both. -We want to make contributing to this project as easy and transparent as possible. +We want to make contributing to this project as easy and transparent as +possible. ## Our Development Process -Buck2 Prelude is currently developed in Facebook's internal repositories and then exported -out to GitHub by a Facebook team member; however, we invite you to submit pull -requests as described below. +Buck2 Prelude is currently developed in Facebook's internal repositories and +then exported out to GitHub by a Facebook team member; however, we invite you to +submit pull requests as described below. ## Pull Requests @@ -45,5 +46,6 @@ We use several Python formatters. ## License By contributing to Buck2 Prelude, you agree that your contributions will be -licensed under both the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) -files in the root directory of this source tree. +licensed under both the [LICENSE-MIT](LICENSE-MIT) and +[LICENSE-APACHE](LICENSE-APACHE) files in the root directory of this source +tree. diff --git a/prelude/oss/README.md b/prelude/oss/README.md index e41bdc072c1cf..6830efe26d3ae 100644 --- a/prelude/oss/README.md +++ b/prelude/oss/README.md @@ -1,12 +1,16 @@ # Buck2 Prelude -This repo contains a copy of the Buck2 Prelude, which is often included as a submodule with a Buck2 project. -To obtain a copy of this repo, and set up other details of a Buck2, you should usually run `buck2 init --git`. -Most information can be found on the main [Buck2 GitHub project](https://github.com/facebook/buck2). +This repo contains a copy of the Buck2 Prelude, which is often included as a +submodule with a Buck2 project. To obtain a copy of this repo, and set up other +details of a Buck2, you should usually run `buck2 init --git`. Most information +can be found on the main +[Buck2 GitHub project](https://github.com/facebook/buck2). -Pull requests and issues should be raised at [facebook/buck2](https://github.com/facebook/buck2) as that project -is more closely monitored and contains CI checks. +Pull requests and issues should be raised at +[facebook/buck2](https://github.com/facebook/buck2) as that project is more +closely monitored and contains CI checks. ## License -Buck2 Prelude is both MIT and Apache License, Version 2.0 licensed, as found in the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. +Buck2 Prelude is both MIT and Apache License, Version 2.0 licensed, as found in +the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. diff --git a/prelude/oss/pull_request_template.md b/prelude/oss/pull_request_template.md index 1554e0ee172bb..ab8b597978799 100644 --- a/prelude/oss/pull_request_template.md +++ b/prelude/oss/pull_request_template.md @@ -1,3 +1,7 @@ -IMPORTANT: Please don't raise pull requests here, but at [facebook/buck2](https://github.com/facebook/buck2/pulls). +IMPORTANT: Please don't raise pull requests here, but at +[facebook/buck2](https://github.com/facebook/buck2/pulls). -The [`prelude`](https://github.com/facebook/buck2/tree/main/prelude) directory is a mirror of this repo, but that repo also features CI tests and is more actively monitored. Any PR's landing there will automatically show up here at the same time. +The [`prelude`](https://github.com/facebook/buck2/tree/main/prelude) directory +is a mirror of this repo, but that repo also features CI tests and is more +actively monitored. Any PR's landing there will automatically show up here at +the same time. diff --git a/prelude/platforms/BUCK b/prelude/platforms/BUCK deleted file mode 100644 index f6f15e73be8e9..0000000000000 --- a/prelude/platforms/BUCK +++ /dev/null @@ -1,10 +0,0 @@ -# Used by open source projects to provide a simple platform setting - -load(":defs.bzl", "execution_platform", "host_configuration") - -execution_platform( - name = "default", - cpu_configuration = host_configuration.cpu, - os_configuration = host_configuration.os, - use_windows_path_separators = host_info().os.is_windows, -) diff --git a/prelude/platforms/BUCK.v2 b/prelude/platforms/BUCK.v2 new file mode 100644 index 0000000000000..30302244bda52 --- /dev/null +++ b/prelude/platforms/BUCK.v2 @@ -0,0 +1,83 @@ +# Used by open source projects to provide a simple platform setting + +load("@prelude//utils:source_listing.bzl", "source_listing") +load(":defs.bzl", "execution_platform", "host_configuration") + +oncall("build_infra") + +source_listing() + +prelude = native + +execution_platform( + name = "default", + cpu_configuration = host_configuration.cpu, + os_configuration = host_configuration.os, + use_windows_path_separators = host_info().os.is_windows, + visibility = ["PUBLIC"], +) + +prelude.constraint_setting( + name = "runs_remote", +) + +prelude.constraint_value( + name = "may_run_remote", + constraint_setting = ":runs_remote", + visibility = ["PUBLIC"], +) + +prelude.constraint_setting( + name = "runs_local", + visibility = ["PUBLIC"], +) + +prelude.constraint_value( + name = "may_run_local", + constraint_setting = ":runs_local", + visibility = ["PUBLIC"], +) + +prelude.constraint_setting( + name = "runs_only", +) + +prelude.constraint_value( + name = "runs_only_local", + constraint_setting = ":runs_only", + visibility = ["PUBLIC"], +) + +prelude.constraint_value( + name = "runs_only_remote", + constraint_setting = ":runs_only", + visibility = ["PUBLIC"], +) + +prelude.constraint_setting( + name = "fat_platform_marker", +) + +prelude.constraint_value( + name = "fat_platform_enabled", + constraint_setting = ":fat_platform_marker", + visibility = ["PUBLIC"], +) + +# This is mostly here for a rule type to add a dependency on it to mark all +# instances of that rule type as incompatible with a fat platform. Ideally, +# toolchains could affect the target compatibility of their users directly but +# toolchains are currently all exec deps and so cannot do that. We'd like +# buck2 to support a form of dep that inherited its users execution platform +# so that toolchains could basically get visibility and affect both target and +# execution configuration, but that's not implemented yet. +export_file( + name = "fat_platform_incompatible", + src = "BUCK.v2", # @oss-enable + # @oss-disable: src = "TARGETS.v2", + target_compatible_with = select({ + ":fat_platform_enabled": ["config//:none"], + "DEFAULT": [], + }), + visibility = ["PUBLIC"], +) diff --git a/prelude/platforms/apple/arch.bzl b/prelude/platforms/apple/arch.bzl new file mode 100644 index 0000000000000..72163c98b815a --- /dev/null +++ b/prelude/platforms/apple/arch.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +_APPLE_ARCHES = [ + "arm64", + "arm64_32", + "x86_64", +] + +AppleArch = enum(*_APPLE_ARCHES) diff --git a/prelude/platforms/apple/base.bzl b/prelude/platforms/apple/base.bzl new file mode 100644 index 0000000000000..90a87848abb4b --- /dev/null +++ b/prelude/platforms/apple/base.bzl @@ -0,0 +1,97 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:is_buck2.bzl", "is_buck2") # @oss-enable +load( + "@prelude//platforms/apple:build_mode.bzl", + "APPLE_BUILD_MODES", + "CONSTRAINT_PACKAGE", + "get_build_mode", + "get_build_mode_debug", +) +load( + "@prelude//platforms/apple:constants.bzl", + "ios_platforms", + "mac_catalyst_platforms", + "mac_platforms", + "watch_platforms", +) + +# Debug constraints to add for build modes used by other rule platforms (ex: rust). +_DEBUG_CONSTRAINTS = [ + # @oss-disable: "ovr_config//build_mode/constraints:debug", +] + +# Release constraints to add for build modes used by other rule platforms (ex: rust). +_RELEASE_CONSTRAINTS = [ + # @oss-disable: "ovr_config//build_mode/constraints:release", +] + +BUILD_MODE_TO_CONSTRAINTS_MAP = { + build_mode: ["{}:{}".format(CONSTRAINT_PACKAGE, build_mode)] + (_DEBUG_CONSTRAINTS if build_mode == get_build_mode_debug() else _RELEASE_CONSTRAINTS) + for build_mode in APPLE_BUILD_MODES +} + +_MOBILE_PLATFORMS = [ + ios_platforms.IPHONEOS_ARM64, + ios_platforms.IPHONESIMULATOR_ARM64, + ios_platforms.IPHONESIMULATOR_X86_64, + watch_platforms.WATCHOS_ARM64, + watch_platforms.WATCHOS_ARM64_32, + watch_platforms.WATCHSIMULATOR_ARM64, + watch_platforms.WATCHSIMULATOR_X86_64, +] + +_MAC_PLATFORMS = [ + mac_platforms.MACOS_ARM64, + mac_platforms.MACOS_X86_64, + mac_platforms.MACOS_UNIVERSAL, + mac_catalyst_platforms.MACCATALYST_ARM64, + mac_catalyst_platforms.MACCATALYST_X86_64, +] + +# TODO: Drop the platform_rule when we're not longer attempting to support buck1. +def apple_generated_platforms(name, constraint_values, deps, platform_rule, platform = None): + # By convention, the cxx.default_platform is typically the same as the platform being defined. + # This is not the case for all watch platforms, so provide an override. + platform = platform if platform else name + if is_mobile_platform(platform) or is_buck2_mac_platform(platform): + for build_mode in APPLE_BUILD_MODES: + platform_rule( + name = _get_generated_name(name, platform, build_mode), + constraint_values = constraint_values + BUILD_MODE_TO_CONSTRAINTS_MAP.get(build_mode), + visibility = ["PUBLIC"], + deps = deps, + ) + + # Create a platform without the build mode to support backwards compatibility of hardcoded platforms + # and with buck1 cxx platform setup. + # TODO(chatatap): Look to remove all hardcoded references and get rid of these + platform_rule( + name = name, + constraint_values = constraint_values, + visibility = ["PUBLIC"], + deps = deps, + ) + +def apple_build_mode_backed_platform(name, platform, build_mode = None): + build_mode = get_build_mode() if build_mode == None else build_mode + return _get_generated_name(name, platform, build_mode) + +def is_mobile_platform(platform): + # These builds modes are primarily used in mobile code. MacOS builds in fbcode/arvr use different + # modes to represent dev/opt variants. + return platform in _MOBILE_PLATFORMS + +def is_buck2_mac_platform(platform): + return platform in _MAC_PLATFORMS + +def _get_generated_name(name, platform, build_mode): + if is_mobile_platform(platform) or is_buck2_mac_platform(platform): + return "{}-{}".format(name, build_mode) + else: + return name diff --git a/prelude/platforms/apple/build_mode.bzl b/prelude/platforms/apple/build_mode.bzl new file mode 100644 index 0000000000000..7200555022a4c --- /dev/null +++ b/prelude/platforms/apple/build_mode.bzl @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @oss-disable: load("@prelude//platforms/apple/meta_only:build_mode.bzl", _APPLE_BUILD_MODES = "APPLE_BUILD_MODES", _BUILD_MODE = "BUILD_MODE", _get_build_mode = "get_build_mode", _get_build_mode_debug = "get_build_mode_debug", _get_build_mode_release = "get_build_mode_release") + +BUILD_MODE_DEBUG = "debug" # @oss-enable +BUILD_MODE_PROFILE = "profile" # @oss-enable +BUILD_MODE_RELEASE = "release" # @oss-enable + +APPLE_BUILD_MODES = [BUILD_MODE_DEBUG, BUILD_MODE_PROFILE, BUILD_MODE_RELEASE] # @oss-enable +# @oss-disable: APPLE_BUILD_MODES = _APPLE_BUILD_MODES + +BUILD_MODE = struct( # @oss-enable + DEBUG = BUILD_MODE_DEBUG, # @oss-enable + PROFILE = BUILD_MODE_PROFILE, # @oss-enable + RELEASE = BUILD_MODE_RELEASE, # @oss-enable +) # @oss-enable +# @oss-disable: BUILD_MODE = _BUILD_MODE + +CONSTRAINT_PACKAGE = "prelude//platforms/apple/constraints" # @oss-enable +# @oss-disable: CONSTRAINT_PACKAGE = "ovr_config//build_mode/apple/constraints" + +def get_build_mode(): + return read_root_config("apple", "build_mode", BUILD_MODE_DEBUG) # @oss-enable + # @oss-disable: return _get_build_mode() + +def get_build_mode_debug(): + return BUILD_MODE.DEBUG # @oss-enable + # @oss-disable: return _get_build_mode_debug() + +def get_build_mode_release(): + return BUILD_MODE.RELEASE # @oss-enable + # @oss-disable: return _get_build_mode_release() diff --git a/prelude/platforms/apple/constants.bzl b/prelude/platforms/apple/constants.bzl new file mode 100644 index 0000000000000..a5dcbe3fc90e8 --- /dev/null +++ b/prelude/platforms/apple/constants.bzl @@ -0,0 +1,109 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# These are identifiers used in defining Apple platforms for configuring apple_* rules. + +APPLE = "Apple" + +# Apple SDK Definitions +APPLETVOS = "appletvos" + +IOS = "ios" + +MACOSX = "macosx" + +WATCHOS = "watchos" + +VISIONOS = "visionos" + +# Apple TV Platforms/Flavors + +APPLETVOS_ARM64 = "appletvos-arm64" + +APPLETVSIMULATOR_ARM64 = "appletvsimulator-arm64" + +APPLETVSIMULATOR_X86_64 = "appletvsimulator-x86_64" + +# iOS Platforms/Flavors + +IPHONEOS_ARM64 = "iphoneos-arm64" + +IPHONESIMULATOR_ARM64 = "iphonesimulator-arm64" + +IPHONESIMULATOR_X86_64 = "iphonesimulator-x86_64" + +# Mac Catalyst Platforms/Flavors + +MACCATALYST_ARM64 = "maccatalyst-arm64" + +MACCATALYST_X86_64 = "maccatalyst-x86_64" + +# Mac OS X Platforms/Flavors + +MACOS_ARM64 = "macosx-arm64" + +MACOS_X86_64 = "macosx-x86_64" + +MACOS_UNIVERSAL = "macosx-universal" + +# Watch OS Platforms/Flavors + +WATCHOS_ARM64 = "watchos-arm64" + +WATCHOS_ARM64_32 = "watchos-arm64_32" + +WATCHSIMULATOR_ARM64 = "watchsimulator-arm64" + +WATCHSIMULATOR_X86_64 = "watchsimulator-x86_64" + +# Vision OS Platforms/Flavors +VISIONOS_ARM64 = "visionos-arm64" + +VISIONSIMULATOR_ARM64 = "visionsimulator-arm64" + +apple_sdks = struct( + IOS = IOS, + WATCHOS = WATCHOS, + MACOSX = MACOSX, + APPLETVOS = APPLETVOS, + VISIONOS = VISIONOS, +) + +appletv_platforms = struct( + APPLETVOS_ARM64 = APPLETVOS_ARM64, + APPLETVSIMULATOR_ARM64 = APPLETVSIMULATOR_ARM64, + APPLETVSIMULATOR_X86_64 = APPLETVSIMULATOR_X86_64, +) + +ios_platforms = struct( + IPHONEOS_ARM64 = IPHONEOS_ARM64, + IPHONESIMULATOR_ARM64 = IPHONESIMULATOR_ARM64, + IPHONESIMULATOR_X86_64 = IPHONESIMULATOR_X86_64, +) + +mac_catalyst_platforms = struct( + MACCATALYST_ARM64 = MACCATALYST_ARM64, + MACCATALYST_X86_64 = MACCATALYST_X86_64, +) + +mac_platforms = struct( + MACOS_ARM64 = MACOS_ARM64, + MACOS_X86_64 = MACOS_X86_64, + MACOS_UNIVERSAL = MACOS_UNIVERSAL, +) + +watch_platforms = struct( + WATCHOS_ARM64 = WATCHOS_ARM64, + WATCHOS_ARM64_32 = WATCHOS_ARM64_32, + WATCHSIMULATOR_ARM64 = WATCHSIMULATOR_ARM64, + WATCHSIMULATOR_X86_64 = WATCHSIMULATOR_X86_64, +) + +vision_platforms = struct( + VISIONOS_ARM64 = VISIONOS_ARM64, + VISIONSIMULATOR_ARM64 = VISIONSIMULATOR_ARM64, +) diff --git a/prelude/platforms/apple/platforms.bzl b/prelude/platforms/apple/platforms.bzl new file mode 100644 index 0000000000000..8f3fc03822535 --- /dev/null +++ b/prelude/platforms/apple/platforms.bzl @@ -0,0 +1,241 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//apple:apple_platforms.bzl", "APPLE_PLATFORMS_KEY") +load("@prelude//platforms/apple:base.bzl", "BUILD_MODE_TO_CONSTRAINTS_MAP", "apple_build_mode_backed_platform", "is_buck2_mac_platform", "is_mobile_platform") +load( + "@prelude//platforms/apple:build_mode.bzl", + "APPLE_BUILD_MODES", + "get_build_mode", + "get_build_mode_debug", +) +load( + "@prelude//platforms/apple:constants.bzl", + "ios_platforms", + "mac_catalyst_platforms", + "mac_platforms", +) +load("@prelude//platforms/apple:platforms_map.bzl", "APPLE_PLATFORMS_MAP") +load("@prelude//utils:buckconfig.bzl", "read") + +_SUPPORTED_IOS_PLATFORMS = [ + ios_platforms.IPHONEOS_ARM64, + ios_platforms.IPHONESIMULATOR_ARM64, + ios_platforms.IPHONESIMULATOR_X86_64, +] + +_SUPPORTED_MACOS_PLATFORMS = [ + mac_platforms.MACOS_ARM64, + mac_platforms.MACOS_X86_64, +] + +_SUPPORTED_MAC_CATALYST_PLATFORMS = [ + mac_catalyst_platforms.MACCATALYST_ARM64, + mac_catalyst_platforms.MACCATALYST_X86_64, +] + +_ANALYSIS_CONSTRAINTS = ["ovr_config//bitcode/constraints:bitcode"] +_DEFAULT_ANALYSIS_IOS_PLATFORM = ios_platforms.IPHONEOS_ARM64 +_DEFAULT_ANALYSIS_MACOS_PLATFORM = mac_platforms.MACOS_X86_64 + +DEFAULT_SUPPORTED_CXX_PLATFORMS = _SUPPORTED_IOS_PLATFORMS + +def apple_target_platforms( + base_name, + platform_rule, + constraint_values = None, # Constraint values added to all generated platforms + visibility = None, + deps = None, + cxx_platforms_constraint_values = None, # Must be a map of a supported cxx platform to a list of constraint values + build_mode_constraint_values = None, # Must be a map of a supported build mode to a list of constraint values + supported_cxx_platforms = DEFAULT_SUPPORTED_CXX_PLATFORMS, # Cxx platforms to generate platforms for + supported_build_modes = APPLE_BUILD_MODES): # Build modes to generate platforms for + """ Define architecture and sdk specific platforms alongside the base platform. """ + + # HACK: Apps shouldn't be generating platforms for cxx_platforms they don't support. However, to support cases where other apps + # depend on shared libraries that don't generate particular platforms, and set a cxx.default_platform on the command line, we need + # to make the graph parseable and generate the missing target platforms. They will never be used, but need to exist in the config + # backed world. + config_based_platform = read("cxx", "default_platform") + if config_based_platform != None and config_based_platform not in supported_cxx_platforms: + supported_cxx_platforms = list(supported_cxx_platforms) + if config_based_platform in _SUPPORTED_MACOS_PLATFORMS: + for p in _SUPPORTED_MACOS_PLATFORMS: + if p not in supported_cxx_platforms: + supported_cxx_platforms.append(p) + + if config_based_platform in _SUPPORTED_MAC_CATALYST_PLATFORMS: + for p in _SUPPORTED_MAC_CATALYST_PLATFORMS: + if p not in supported_cxx_platforms: + supported_cxx_platforms.append(p) + + # Form defaults + constraint_values = constraint_values or [] + cxx_platforms_constraint_values = cxx_platforms_constraint_values or {} + build_mode_constraint_values = build_mode_constraint_values or {} + visibility = visibility or ["PUBLIC"] + deps = deps or [] + + _validate_cxx_platforms_constraint_values(base_name, cxx_platforms_constraint_values, supported_cxx_platforms) + _validate_build_mode_constraint_values(base_name, build_mode_constraint_values, supported_build_modes) + + # Define the generated platforms + for platform in supported_cxx_platforms: + platform_dep = get_default_target_platform_for_platform(platform) + cxx_platform_constraints = cxx_platforms_constraint_values.get(platform, []) + if is_mobile_platform(platform) or is_buck2_mac_platform(platform): + for build_mode in supported_build_modes: + build_mode_constraints = build_mode_constraint_values.get(build_mode, []) + BUILD_MODE_TO_CONSTRAINTS_MAP.get(build_mode) + _define_platform( + base_name, + platform, + build_mode, + constraint_values + cxx_platform_constraints + build_mode_constraints, + visibility, + deps + [platform_dep], + platform_rule, + ) + else: + _define_platform( + base_name, + platform, + None, + constraint_values + cxx_platform_constraints, + visibility, + deps + [platform_dep], + platform_rule, + ) + + # Define the base platform in case it is needed (example: to be a dep of another platform) + platform_rule( + name = base_name, + constraint_values = constraint_values, + visibility = visibility, + deps = deps, + ) + + analysis_platform = _get_analysis_platform_for_supported_platforms(supported_cxx_platforms) + analysis_platform_dep = get_default_target_platform_for_platform(analysis_platform) + analysis_platform_build_mode_constraints = build_mode_constraint_values.get(get_build_mode_debug(), []) + + platform_rule( + name = base_name + "-analysis", + constraint_values = constraint_values + analysis_platform_build_mode_constraints + _ANALYSIS_CONSTRAINTS, + visibility = ["PUBLIC"], + deps = deps + [analysis_platform_dep], + ) + +def config_backed_apple_target_platform(target_platform = None, platform = None, build_mode = None): + platform = _get_default_platform() if platform == None else platform + build_mode = get_build_mode() if build_mode == None else build_mode + if target_platform == None: + return get_default_target_platform_for_platform(platform) + + return _get_generated_name(target_platform, platform, build_mode) + +def get_default_target_platform_for_platform(sdk_arch) -> [str, None]: + data = APPLE_PLATFORMS_MAP.get(sdk_arch) + if data != None: + return data.target_platform + + return None + +def set_apple_platforms(platform, base_config_backed_target_platform, kwargs): + def get_supported_platforms(): + if platform in _SUPPORTED_IOS_PLATFORMS: + return _SUPPORTED_IOS_PLATFORMS + elif platform in _SUPPORTED_MACOS_PLATFORMS: + return _SUPPORTED_MACOS_PLATFORMS + elif platform in _SUPPORTED_MAC_CATALYST_PLATFORMS: + return _SUPPORTED_MAC_CATALYST_PLATFORMS + else: + return None + + supported_platforms = get_supported_platforms() + if not supported_platforms: + return kwargs + + # If we've already defined the apple platforms, we can avoid having to process them again. + if APPLE_PLATFORMS_KEY in kwargs: + return kwargs + + apple_platforms = {} + for platform in supported_platforms: + for build_mode in APPLE_BUILD_MODES: + identifier = "{}-{}".format(platform, build_mode) + if base_config_backed_target_platform: + apple_platforms[identifier] = config_backed_apple_target_platform(base_config_backed_target_platform, platform, build_mode) + else: + base_target_platform = _get_base_target_platform_for_platform(platform) + if not base_target_platform: + fail("A valid base target platform is required!") + apple_platforms[identifier] = apple_build_mode_backed_platform(base_target_platform, platform, build_mode) + + kwargs[APPLE_PLATFORMS_KEY] = apple_platforms + + return kwargs + +def _get_generated_name(base_name, platform, build_mode): + platform_and_build_mode_name = apple_build_mode_backed_platform(platform, platform, build_mode) + return "{}-{}".format(base_name, platform_and_build_mode_name) + +def _get_default_platform(): + platform = read("cxx", "default_platform") + return platform if platform != None else ios_platforms.IPHONESIMULATOR_X86_64 + +def _define_platform(base_name, platform, build_mode, constraint_values, visibility, deps, platform_rule): + # @lint-ignore BUCKLINT - We set the visibility to PUBLIC directly and can bypass fb_native + platform_rule( + name = _get_generated_name(base_name, platform, build_mode), + constraint_values = constraint_values, + visibility = visibility, + deps = deps, + ) + +def _get_base_target_platform_for_platform(sdk_arch) -> [str, None]: + data = APPLE_PLATFORMS_MAP.get(sdk_arch) + if data != None: + return data.base_target_platform + + return None + +def _get_analysis_platform_for_supported_platforms(supported_cxx_platforms): + # For determining the platform deps to use for the base platform, we inspect the supported + # cxx platforms, giving precedence to iOS platforms. + for platform in _SUPPORTED_IOS_PLATFORMS: + if platform in supported_cxx_platforms: + return _DEFAULT_ANALYSIS_IOS_PLATFORM + + for platform in _SUPPORTED_MACOS_PLATFORMS: + if platform in supported_cxx_platforms: + return _DEFAULT_ANALYSIS_MACOS_PLATFORM + + return _DEFAULT_ANALYSIS_IOS_PLATFORM + +def _validate_cxx_platforms_constraint_values(base_name, cxx_platforms_constraint_values, supported_cxx_platforms): + if type(cxx_platforms_constraint_values) != type({}): + fail("cxx_platforms_constraint_values must be a map of platform to constraint values!") + for platform, platform_values in cxx_platforms_constraint_values.items(): + if platform not in supported_cxx_platforms: + fail("\n\nProviding platform constraints for an unsupported platform!\nBase platform: {}\nCXX Platform: {} with values {}\nSupported platforms: {}\n".format( + base_name, + platform, + platform_values, + ", ".join(supported_cxx_platforms), + )) + +def _validate_build_mode_constraint_values(base_name, build_mode_constraint_values, supported_build_modes): + if type(build_mode_constraint_values) != type({}): + fail("build_mode_constraint_values must be a map of build mode to constraint values!") + for build_mode, build_mode_values in build_mode_constraint_values.items(): + if build_mode not in supported_build_modes: + fail("\n\nProviding build mode constraints for an unsupported build mode!\nBase platform: {}\nBuild mode: {} with values {}\nSupported build modes: {}\n".format( + base_name, + build_mode, + build_mode_values, + ", ".join(supported_build_modes), + )) diff --git a/prelude/platforms/apple/platforms_map.bzl b/prelude/platforms/apple/platforms_map.bzl new file mode 100644 index 0000000000000..9e15a662cd36b --- /dev/null +++ b/prelude/platforms/apple/platforms_map.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @oss-disable: load("@prelude//platforms/apple/meta_only:platforms_map.bzl", _APPLE_PLATFORMS_MAP = "APPLE_PLATFORMS_MAP", _APPLE_SDK_DEFAULT_PLATFORM_MAP = "APPLE_SDK_DEFAULT_PLATFORM_MAP") + +APPLE_PLATFORMS_MAP = {} # TODO: Define OSS platforms map # @oss-enable +# @oss-disable: APPLE_PLATFORMS_MAP = _APPLE_PLATFORMS_MAP + +APPLE_SDK_DEFAULT_PLATFORM_MAP = {} # @oss-enable +# @oss-disable: APPLE_SDK_DEFAULT_PLATFORM_MAP = _APPLE_SDK_DEFAULT_PLATFORM_MAP diff --git a/prelude/platforms/apple/sdk.bzl b/prelude/platforms/apple/sdk.bzl new file mode 100644 index 0000000000000..6ea7e1f0cb2fa --- /dev/null +++ b/prelude/platforms/apple/sdk.bzl @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +_APPLE_SDKS = [ + "iphoneos", + "iphonesimulator", + "maccatalyst", + "macosx", + "visionos", + "visionsimulator", + "watchos", + "watchsimulator", + # Marker entry used to help toolchain selectors define a set of + # tools outside the apple_toolchain definition. + "toolchain-tool", +] + +AppleSdk = enum(*_APPLE_SDKS) diff --git a/prelude/prelude.bzl b/prelude/prelude.bzl index 6ef06c1ea3796..ac15950e85302 100644 --- a/prelude/prelude.bzl +++ b/prelude/prelude.bzl @@ -10,5 +10,3 @@ load("@prelude//:native.bzl", _native = "native") # Public symbols in this file become globals everywhere except `bzl` files in prelude. # Additionally, members of `native` struct also become globals in `BUCK` files. native = _native - -# This is a test to get CI to notice me diff --git a/prelude/python/compile.bzl b/prelude/python/compile.bzl index 4d6175ac2b262..36f3d4eacae8a 100644 --- a/prelude/python/compile.bzl +++ b/prelude/python/compile.bzl @@ -9,10 +9,10 @@ load(":manifest.bzl", "ManifestInfo") load(":toolchain.bzl", "PythonToolchainInfo") PycInvalidationMode = enum( - "UNCHECKED_HASH", - "CHECKED_HASH", + "unchecked_hash", + "checked_hash", # timestamp isn't supported at the moment - # "TIMESTAMP", + # "timestamp", ) def compile_manifests( @@ -20,31 +20,57 @@ def compile_manifests( manifests: list[ManifestInfo]) -> dict[PycInvalidationMode, ManifestInfo]: return { mode: compile_manifests_for_mode(ctx, manifests, mode) - for mode in [PycInvalidationMode("UNCHECKED_HASH"), PycInvalidationMode("CHECKED_HASH")] + for mode in [PycInvalidationMode("unchecked_hash"), PycInvalidationMode("checked_hash")] } def compile_manifests_for_mode( ctx: AnalysisContext, manifests: list[ManifestInfo], - invalidation_mode: PycInvalidationMode = PycInvalidationMode("UNCHECKED_HASH")) -> ManifestInfo: - output = ctx.actions.declare_output("bytecode_{}".format(invalidation_mode.value), dir = True) - bytecode_manifest = ctx.actions.declare_output("bytecode_{}.manifest".format(invalidation_mode.value)) - cmd = cmd_args(ctx.attrs._python_toolchain[PythonToolchainInfo].host_interpreter) - cmd.add(ctx.attrs._python_toolchain[PythonToolchainInfo].compile) - cmd.add(cmd_args(output.as_output(), format = "--output={}")) - cmd.add(cmd_args(bytecode_manifest.as_output(), format = "--bytecode-manifest={}")) - cmd.add("--invalidation-mode={}".format(invalidation_mode.value)) + invalidation_mode: PycInvalidationMode = PycInvalidationMode("unchecked_hash")) -> ManifestInfo: + mode = invalidation_mode.value.upper() + output = ctx.actions.declare_output("bytecode_{}".format(mode), dir = True) + bytecode_manifest = ctx.actions.declare_output("bytecode_{}.manifest".format(mode)) + cmd = [ + ctx.attrs._python_toolchain[PythonToolchainInfo].host_interpreter, + ctx.attrs._python_toolchain[PythonToolchainInfo].compile, + cmd_args(output.as_output(), format = "--output={}"), + cmd_args(bytecode_manifest.as_output(), format = "--bytecode-manifest={}"), + "--invalidation-mode={}".format(mode), + ] - for manifest in manifests: - cmd.add(manifest.manifest) - cmd.hidden([a for a, _ in manifest.artifacts]) - ctx.actions.run( - cmd, + env = { # On some platforms (e.g. linux), python hash code randomness can cause # the bytecode to be non-deterministic, so pin via the `PYTHONHASHSEED` # env var. - env = {"PYTHONHASHSEED": "7"}, + "PYTHONHASHSEED": "7", + } + + # support invalidating cached pyc compile actions by bumping the env var. + # the value itself is meaningless, just the fact it changes is meaningful. + # using the PYC magic number for *convenience* only + version = ctx.attrs._python_toolchain[PythonToolchainInfo].version + if version and "cinder" in version: + # for Cinder, this is a workaround... + # this action *should* use the bundled (in-repo) runtime for compilation + # (and then the change in the Cinder codebase would be sufficient to invalidate caches) + # currently though, the action uses the platform Cinder for PYC compilation, + # and these are deployed in-place (no change to toolchain paths), + # so we need to force cache invalidation when needed (e.g. for S411091) + env["CINDER_DUMMY_PYC_CACHE_BUSTER"] = "3451" + elif version and "3.12" in version: + # for CPython, the magic number *shouldn't* change during the lifetime of a feature release + # but internally we do make more signifcant changes (rarely), + # so for those cases we support forced invalidation using this env var + env["PYTHON312_DUMMY_PYC_CACHE_BUSTER"] = "3532" + + hidden = [] + for manifest in manifests: + cmd.append(manifest.manifest) + hidden.extend([a for a, _ in manifest.artifacts]) + ctx.actions.run( + cmd_args(cmd, hidden = hidden), + env = env, category = "py_compile", - identifier = invalidation_mode.value, + identifier = mode, ) return ManifestInfo(manifest = bytecode_manifest, artifacts = [(output, "bytecode")]) diff --git a/prelude/python/cxx_python_extension.bzl b/prelude/python/cxx_python_extension.bzl index 87099183e6eb8..a971c3f0d832a 100644 --- a/prelude/python/cxx_python_extension.bzl +++ b/prelude/python/cxx_python_extension.bzl @@ -18,7 +18,6 @@ load( "@prelude//cxx:cxx_sources.bzl", "get_srcs_with_flags", ) -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo") load( "@prelude//cxx:cxx_types.bzl", "CxxRuleConstructorParams", @@ -41,7 +40,6 @@ load( "LibOutputStyle", "LinkInfo", "LinkInfos", - "Linkage", "create_merged_link_info", "wrap_link_infos", ) @@ -60,9 +58,12 @@ load( "@prelude//linking:shared_libraries.bzl", "merge_shared_libraries", ) +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//os_lookup:defs.bzl", "OsLookup") -load("@prelude//python:toolchain.bzl", "PythonPlatformInfo", "get_platform_attr") -load("@prelude//utils:utils.bzl", "expect", "value_or") +load("@prelude//python:toolchain.bzl", "PythonPlatformInfo", "PythonToolchainInfo", "get_platform_attr") +load("@prelude//unix:providers.bzl", "UnixEnv", "create_unix_env_info") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "value_or") load(":manifest.bzl", "create_manifest_for_source_map") load( ":native_python_util.bzl", @@ -98,6 +99,7 @@ def cxx_python_extension_impl(ctx: AnalysisContext) -> list[Provider]: compilation_database = True, default = False, # We need to do some postprocessing to make sure the shared library is our default output java_packaging_info = False, + java_global_code_info = False, linkable_graph = False, # We create this here so we can correctly apply exclusions link_style_outputs = False, merged_native_link_info = False, @@ -109,6 +111,8 @@ def cxx_python_extension_impl(ctx: AnalysisContext) -> list[Provider]: preprocessor_for_tests = False, ) + python_toolchain = ctx.attrs._python_toolchain[PythonToolchainInfo] + impl_params = CxxRuleConstructorParams( build_empty_so = True, rule_type = "cxx_python_extension", @@ -118,6 +122,15 @@ def cxx_python_extension_impl(ctx: AnalysisContext) -> list[Provider]: use_soname = False, generate_providers = cxx_providers, generate_sub_targets = sub_targets, + compiler_flags = ctx.attrs.compiler_flags, + lang_compiler_flags = ctx.attrs.lang_compiler_flags, + platform_compiler_flags = ctx.attrs.platform_compiler_flags, + extra_link_flags = python_toolchain.extension_linker_flags, + lang_platform_compiler_flags = ctx.attrs.lang_platform_compiler_flags, + preprocessor_flags = ctx.attrs.preprocessor_flags, + lang_preprocessor_flags = ctx.attrs.lang_preprocessor_flags, + platform_preprocessor_flags = ctx.attrs.platform_preprocessor_flags, + lang_platform_preprocessor_flags = ctx.attrs.lang_platform_preprocessor_flags, ) cxx_library_info = cxx_library_parameterized(ctx, impl_params) @@ -213,6 +226,7 @@ def cxx_python_extension_impl(ctx: AnalysisContext) -> list[Provider]: deps = [d.shared_library_info for d in link_deps], ), linkable_root_info = create_linkable_root( + label = ctx.label, link_infos = wrap_link_infos( link_infos[LibOutputStyle("pic_archive")], pre_flags = ctx.attrs.linker_flags, @@ -251,20 +265,22 @@ def cxx_python_extension_impl(ctx: AnalysisContext) -> list[Provider]: # Export library info. python_platform = ctx.attrs._python_toolchain[PythonPlatformInfo] - cxx_platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo] + cxx_toolchain = ctx.attrs._cxx_toolchain raw_deps = ctx.attrs.deps raw_deps.extend( - get_platform_attr(python_platform, cxx_platform, ctx.attrs.platform_deps), + get_platform_attr(python_platform, cxx_toolchain, ctx.attrs.platform_deps), ) + deps, shared_deps = gather_dep_libraries(raw_deps) - providers.append(create_python_library_info( + library_info = create_python_library_info( ctx.actions, ctx.label, extensions = qualify_srcs(ctx.label, ctx.attrs.base_module, {name: extension}), deps = deps, - shared_libraries = shared_deps, + extension_shared_libraries = shared_deps, src_types = src_type_manifest, - )) + ) + providers.append(library_info) # Omnibus providers @@ -287,4 +303,16 @@ def cxx_python_extension_impl(ctx: AnalysisContext) -> list[Provider]: deps = raw_deps, ) providers.append(linkable_graph) + + providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + python_libs = [library_info], + ), + deps = raw_deps, + ), + ) + return providers diff --git a/prelude/python/interface.bzl b/prelude/python/interface.bzl index 9da4e0015bcbc..4a6f19ec49353 100644 --- a/prelude/python/interface.bzl +++ b/prelude/python/interface.bzl @@ -13,6 +13,10 @@ PythonLibraryInterface = record( # dict[str, SharedLibraryInfo] shared_libraries = field(typing.Callable), + # Shared libraries used by this Python library. + # dict[str, SharedLibraryInfo] + extension_shared_libraries = field(typing.Callable), + # An iterator of PythonLibraryManifests objects. This is used to collect extensions. # iterator of PythonLibraryManifests iter_manifests = field(typing.Callable), diff --git a/prelude/python/make_py_package.bzl b/prelude/python/make_py_package.bzl index e114a0e5bd63c..22178db1cf019 100644 --- a/prelude/python/make_py_package.bzl +++ b/prelude/python/make_py_package.bzl @@ -12,25 +12,34 @@ execution load("@prelude//:artifact_tset.bzl", "project_artifacts") load("@prelude//:local_only.bzl", "package_python_locally") +load("@prelude//:paths.bzl", "paths") load( - "@prelude//linking:link_info.bzl", - "LinkedObject", # @unused Used as a type + "@prelude//cxx:cxx_library_utility.bzl", + "cxx_is_gnu", +) +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", # @unused Used as a type + "gen_shared_libs_action", + "zip_shlibs", ) load("@prelude//os_lookup:defs.bzl", "OsLookup") load("@prelude//utils:arglike.bzl", "ArgLike") load(":compile.bzl", "PycInvalidationMode") load(":interface.bzl", "EntryPoint", "EntryPointKind", "PythonLibraryManifestsInterface") load(":manifest.bzl", "ManifestInfo") # @unused Used as a type -load(":toolchain.bzl", "PackageStyle", "PythonToolchainInfo") +load(":python.bzl", "manifests_to_interface") +load(":python_library.bzl", "gather_dep_libraries") +load(":toolchain.bzl", "PackageStyle", "PythonToolchainInfo", "get_package_style") # This represents the input to the creation of a Pex. Manifests provide source # files, extensions are native extensions, and compile indicates whether we # should also include bytecode from manifests. PexModules = record( manifests = field(PythonLibraryManifestsInterface), - extensions = field([ManifestInfo, None], None), - extra_manifests = field([ManifestInfo, None], None), - debuginfo_manifest = field([ManifestInfo, None], None), + extensions = field(ManifestInfo | None, None), + extra_manifests = field(ManifestInfo | None, None), + repl_manifests = field(PythonLibraryManifestsInterface | None, None), compile = field(bool, False), ) @@ -38,46 +47,33 @@ PexModules = record( # providers. PexProviders = record( default_output = field(Artifact), - other_outputs = list[(ArgLike, str)], - other_outputs_prefix = [str, None], + other_outputs = list[ArgLike], + other_outputs_prefix = str | None, hidden_resources = list[ArgLike], sub_targets = dict[str, list[Provider]], run_cmd = cmd_args, ) def make_py_package_providers( - python_toolchain: PythonToolchainInfo, pex: PexProviders) -> list[Provider]: providers = [ make_default_info(pex), make_run_info(pex), ] - if python_toolchain.installer != None: - providers.append(make_install_info(python_toolchain.installer, pex)) return providers -def make_install_info(installer: ArgLike, pex: PexProviders) -> Provider: - prefix = "{}/".format(pex.other_outputs_prefix) if pex.other_outputs_prefix != None else "" - files = { - "{}{}".format(prefix, path): artifact - for artifact, path in pex.other_outputs - if path != pex.other_outputs_prefix # don't include prefix dir - if path != "" # HACK: skip artifacts without a path - } - files[pex.default_output.basename] = pex.default_output - return InstallInfo( - installer = installer, - files = files, - ) - def make_default_info(pex: PexProviders) -> Provider: return DefaultInfo( default_output = pex.default_output, - other_outputs = [a for a, _ in pex.other_outputs] + pex.hidden_resources, + other_outputs = pex.other_outputs + pex.hidden_resources, sub_targets = pex.sub_targets, ) -def make_run_info(pex: PexProviders) -> Provider: +def make_run_info(pex: PexProviders, run_with_inplace: bool = False) -> Provider: + if run_with_inplace and "inplace" in pex.sub_targets: + # If running with inplace, we want to use the RunInfo of inplace subtarget. + return pex.sub_targets["inplace"][1] + return RunInfo(pex.run_cmd) def _srcs(srcs: list[typing.Any], format = "{}") -> cmd_args: @@ -127,14 +123,15 @@ def make_py_package( ctx: AnalysisContext, python_toolchain: PythonToolchainInfo, # A rule-provided tool to use to build the PEX. - make_py_package_cmd: [RunInfo, None], + make_py_package_cmd: RunInfo | None, package_style: PackageStyle, build_args: list[ArgLike], pex_modules: PexModules, - shared_libraries: dict[str, (LinkedObject, bool)], + shared_libraries: list[(str, SharedLibrary, bool)], main: EntryPoint, - hidden_resources: [None, list[ArgLike]], - allow_cache_upload: bool) -> PexProviders: + hidden_resources: list[ArgLike] | None, + allow_cache_upload: bool, + debuginfo_files: list[(str | (str, SharedLibrary, str), Artifact)] = []) -> PexProviders: """ Passes a standardized set of flags to a `make_py_package` binary to create a python "executable". @@ -151,13 +148,28 @@ def make_py_package( resulting binary. - hidden_resources: extra resources the binary depends on. """ + srcs = [] + srcs.extend(pex_modules.manifests.src_manifests()) - preload_libraries = _preload_libraries_args(ctx, shared_libraries) - manifest_module = generate_manifest_module(ctx, python_toolchain, pex_modules.manifests.src_manifests()) - common_modules_args, dep_artifacts = _pex_modules_common_args( + if pex_modules.extensions: + srcs.append(pex_modules.extensions.manifest) + + preload_libraries = _preload_libraries_args( + ctx = ctx, + shared_libraries = [ + (shlib, libdir) + for libdir, shlib, preload in shared_libraries + if preload + ], + ) + startup_function = generate_startup_function_loader(ctx) + manifest_module = generate_manifest_module(ctx, python_toolchain, srcs) + common_modules_args, dep_artifacts, debug_artifacts = _pex_modules_common_args( ctx, pex_modules, - {name: lib for name, (lib, _) in shared_libraries.items()}, + [startup_function] if startup_function else [], + [(shlib, libdir) for libdir, shlib, _ in shared_libraries], + debuginfo_files = debuginfo_files, ) default = _make_py_package_impl( @@ -166,10 +178,11 @@ def make_py_package( make_py_package_cmd, package_style, build_args, - shared_libraries, + len(shared_libraries) > 0, preload_libraries, common_modules_args, dep_artifacts, + debug_artifacts, main, hidden_resources, manifest_module, @@ -177,6 +190,52 @@ def make_py_package( output_suffix = "", allow_cache_upload = allow_cache_upload, ) + + # lets make a shell + if ctx.attrs.repl_main: + # no more + # kjdfhgskjh + repl_deps, _ = gather_dep_libraries(ctx.attrs.repl_only_deps) + repl_manifests = manifests_to_interface(repl_deps[0].manifests) + + repl_pex_modules = PexModules( + manifests = pex_modules.manifests, + extra_manifests = pex_modules.extra_manifests, + extensions = pex_modules.extensions, + repl_manifests = repl_manifests, + compile = pex_modules.compile, + ) + + repl_common_modules_args, repl_dep_artifacts, repl_debug_artifacts = _pex_modules_common_args( + ctx, + repl_pex_modules, + [startup_function] if startup_function else [], + [(shlib, libdir) for libdir, shlib, _ in shared_libraries], + debuginfo_files = debuginfo_files, + suffix = "_repl", + ) + + default.sub_targets["repl"] = make_py_package_providers( + _make_py_package_impl( + ctx, + python_toolchain, + make_py_package_cmd, + PackageStyle("inplace"), + build_args, + len(shared_libraries) > 0, + preload_libraries, + repl_common_modules_args, + repl_dep_artifacts, + repl_debug_artifacts, + (EntryPointKind("function"), ctx.attrs.repl_main), + hidden_resources, + manifest_module, + pex_modules, + output_suffix = "-repl", + allow_cache_upload = allow_cache_upload, + ), + ) + for style in PackageStyle.values(): pex_providers = default if style == package_style.value else _make_py_package_impl( ctx, @@ -184,10 +243,11 @@ def make_py_package( make_py_package_cmd, PackageStyle(style), build_args, - shared_libraries, + len(shared_libraries) > 0, preload_libraries, common_modules_args, dep_artifacts, + debug_artifacts, main, hidden_resources, manifest_module, @@ -195,22 +255,27 @@ def make_py_package( output_suffix = "-{}".format(style), allow_cache_upload = allow_cache_upload, ) - default.sub_targets[style] = make_py_package_providers(python_toolchain, pex_providers) + default.sub_targets[style] = make_py_package_providers(pex_providers) + + # cpp binaries already emit a `debuginfo` subtarget with a different format, + # so we opt to use a more specific subtarget + default.sub_targets["par-debuginfo"] = _debuginfo_subtarget(ctx, debug_artifacts) return default def _make_py_package_impl( ctx: AnalysisContext, python_toolchain: PythonToolchainInfo, - make_py_package_cmd: [RunInfo, None], + make_py_package_cmd: RunInfo | None, package_style: PackageStyle, build_args: list[ArgLike], - shared_libraries: dict[str, (LinkedObject, bool)], + shared_libraries: bool, preload_libraries: cmd_args, common_modules_args: cmd_args, - dep_artifacts: list[(ArgLike, str)], + dep_artifacts: list[ArgLike], + debug_artifacts: list[(str | (str, SharedLibrary, str), ArgLike)], main: EntryPoint, - hidden_resources: [None, list[ArgLike]], - manifest_module: [None, ArgLike], + hidden_resources: list[ArgLike] | None, + manifest_module: ArgLike | None, pex_modules: PexModules, output_suffix: str, allow_cache_upload: bool) -> PexProviders: @@ -218,6 +283,7 @@ def _make_py_package_impl( standalone = package_style == PackageStyle("standalone") runtime_files = [] + sub_targets = {} if standalone and hidden_resources != None: # constructing this error message is expensive, only do it when we abort analysis error_msg = "standalone builds don't support hidden resources" if output_suffix else _hidden_resources_error_message(ctx.label, hidden_resources) @@ -246,13 +312,14 @@ def _make_py_package_impl( ctx, common_modules_args, dep_artifacts, + debug_artifacts, symlink_tree_path, manifest_module, pex_modules, output_suffix, ) - output = ctx.actions.declare_output("{}{}".format(name, python_toolchain.pex_extension)) + output = ctx.actions.declare_output("{}{}".format(name, ctx.attrs.extension or python_toolchain.pex_extension)) bootstrap_args = _pex_bootstrap_args( python_toolchain, @@ -262,57 +329,54 @@ def _make_py_package_impl( preload_libraries, symlink_tree_path, package_style, + True if ctx.attrs.zip_safe == None else ctx.attrs.zip_safe, ) bootstrap_args.add(build_args) - if package_style == PackageStyle("standalone"): + if standalone: bootstrap_args.add(ctx.attrs.standalone_build_args) else: bootstrap_args.add(ctx.attrs.inplace_build_args) - if standalone: + # For inplace builds add local artifacts to outputs so they get properly materialized + runtime_files.extend(dep_artifacts) + runtime_files.append(symlink_tree_path) + + # For standalone builds, or builds setting make_py_package we generate args for calling make_par.py + if standalone or make_py_package_cmd != None: # We support building _standalone_ packages locally to e.g. support fbcode's # current style of build info stamping (e.g. T10696178). - prefer_local = package_python_locally(ctx, python_toolchain) + prefer_local = standalone and package_python_locally(ctx, python_toolchain) cmd = cmd_args( make_py_package_cmd if make_py_package_cmd != None else python_toolchain.make_py_package_standalone, ) cmd.add(modules_args) cmd.add(bootstrap_args) + if ctx.attrs.runtime_env: + for k, v in ctx.attrs.runtime_env.items(): + cmd.add(cmd_args(["--passthrough", "--runtime_env={}={}".format(k, v)])) cmd.add(cmd_args("--no-sitecustomize")) + identifier_prefix = "standalone{}" if standalone else "inplace{}" ctx.actions.run( cmd, prefer_local = prefer_local, category = "par", - identifier = "standalone{}".format(output_suffix), + identifier = identifier_prefix.format(output_suffix), allow_cache_upload = allow_cache_upload, ) else: - runtime_files.extend(dep_artifacts) - runtime_files.append((symlink_tree_path, symlink_tree_path.short_path)) - if make_py_package_cmd != None: - cmd = cmd_args(make_py_package_cmd) - cmd.add(modules_args) - cmd.add(bootstrap_args) - cmd.add(cmd_args("--no-sitecustomize")) - ctx.actions.run(cmd, category = "par", identifier = "inplace{}".format(output_suffix)) - else: - modules = cmd_args(python_toolchain.make_py_package_modules) - modules.add(modules_args) - ctx.actions.run(modules, category = "par", identifier = "modules{}".format(output_suffix)) - - bootstrap = cmd_args(python_toolchain.make_py_package_inplace) - bootstrap.add(bootstrap_args) - - if ctx.attrs.add_multiprocessing_wrapper and ctx.attrs._exec_os_type[OsLookup].platform == "linux": - # This script will add the preload/library path vars as well as the pythonpath vars to the - # subprocess interpreter so that the spawned process will be able to find the inplace par - # link tree, native libs, and the modules under the link tree. - mp_executable = ctx.actions.declare_output("mp_exec_{}.sh".format(name)) - runtime_files.append((mp_executable, mp_executable.short_path)) - bootstrap.add(["--add-multiprocessing-executable", mp_executable.as_output()]) - ctx.actions.run(bootstrap, category = "par", identifier = "bootstrap{}".format(output_suffix)) + modules = cmd_args(python_toolchain.make_py_package_modules) + modules.add(modules_args) + ctx.actions.run(modules, category = "par", identifier = "modules{}".format(output_suffix)) + + bootstrap = cmd_args(python_toolchain.make_py_package_inplace) + bootstrap.add(bootstrap_args) + if ctx.attrs.runtime_env: + for k, v in ctx.attrs.runtime_env.items(): + bootstrap.add(cmd_args(["--runtime_env", "{}={}".format(k, v)])) + + ctx.actions.run(bootstrap, category = "par", identifier = "bootstrap{}".format(output_suffix)) run_args = [] @@ -324,23 +388,61 @@ def _make_py_package_impl( if hidden_resources == None: hidden_resources = [] + if symlink_tree_path != None: + sub_targets["link-tree"] = [DefaultInfo( + default_output = symlink_tree_path, + other_outputs = runtime_files, + sub_targets = {}, + )] + return PexProviders( default_output = output, other_outputs = runtime_files, other_outputs_prefix = symlink_tree_path.short_path if symlink_tree_path != None else None, hidden_resources = hidden_resources, - sub_targets = {}, - run_cmd = cmd_args(run_args).hidden([a for a, _ in runtime_files] + hidden_resources), + sub_targets = sub_targets, + run_cmd = cmd_args( + run_args, + hidden = runtime_files + hidden_resources + [python_toolchain.interpreter], + ), ) -def _preload_libraries_args(ctx: AnalysisContext, shared_libraries: dict[str, (LinkedObject, bool)]) -> cmd_args: - preload_libraries_path = ctx.actions.write( - "__preload_libraries.txt", - cmd_args([ - "--preload={}".format(name) - for name, (_, preload) in shared_libraries.items() - if preload - ]), +def _debuginfo_subtarget( + ctx: AnalysisContext, + debug_artifacts: list[(str | (str, SharedLibrary, str), ArgLike)]) -> list[Provider]: + for_shared_libs = [] + other = [] + for name, artifact in debug_artifacts: + if type(name) == type(()): + for_shared_libs.append((name[1], (artifact, name[0], name[2]))) + else: + other.append((artifact, name)) + out = gen_shared_libs_action( + actions = ctx.actions, + out = "debuginfo.manifest.json", + shared_libs = [shlib for shlib, _ in for_shared_libs], + gen_action = lambda actions, output, shared_libs: actions.write_json( + output, + [ + (debug, paths.join(libdir, soname + ext)) + for soname, _, (debug, libdir, ext) in zip_shlibs(shared_libs, for_shared_libs) + ] + other, + ), + ) + return [DefaultInfo(default_output = out, other_outputs = [d for _, d in debug_artifacts])] + +def _preload_libraries_args(ctx: AnalysisContext, shared_libraries: list[(SharedLibrary, str)]) -> cmd_args: + preload_libraries_path = gen_shared_libs_action( + actions = ctx.actions, + out = "__preload_libraries.txt", + shared_libs = [shlib for shlib, _ in shared_libraries], + gen_action = lambda actions, output, shared_libs: actions.write( + output, + [ + "--preload={}".format(paths.join(libdir, soname)) + for soname, _, libdir in zip_shlibs(shared_libs, shared_libraries) + ], + ), ) return cmd_args(preload_libraries_path, format = "@{}") @@ -348,10 +450,11 @@ def _pex_bootstrap_args( toolchain: PythonToolchainInfo, main: EntryPoint, output: Artifact, - shared_libraries: dict[str, (LinkedObject, bool)], + shared_libraries: bool, preload_libraries: cmd_args, symlink_tree_path: Artifact | None, - package_style: PackageStyle) -> cmd_args: + package_style: PackageStyle, + zip_safe: bool) -> cmd_args: cmd = cmd_args() cmd.add(preload_libraries) cmd.add([ @@ -365,25 +468,34 @@ def _pex_bootstrap_args( else: cmd.add(["--main-function", main[1]]) if symlink_tree_path != None: - cmd.add(cmd_args(["--modules-dir", symlink_tree_path]).ignore_artifacts()) + cmd.add(cmd_args(["--modules-dir", symlink_tree_path], ignore_artifacts = True)) - if toolchain.main_runner: - cmd.add(["--main-runner", toolchain.main_runner]) + cmd.add(["--main-runner", toolchain.main_runner]) # Package style `inplace_lite` cannot be used with shared libraries if package_style == PackageStyle("inplace_lite") and not shared_libraries: cmd.add("--use-lite") cmd.add(output.as_output()) + if package_style == PackageStyle("standalone") and not zip_safe: + cmd.add("--no-zip-safe") + + for lib_path in toolchain.native_library_runtime_paths: + cmd.add("--native-library-runtime-path={}".format(lib_path)) + return cmd def _pex_modules_common_args( ctx: AnalysisContext, pex_modules: PexModules, - shared_libraries: dict[str, LinkedObject]) -> (cmd_args, list[(ArgLike, str)]): + extra_manifests: list[ArgLike], + shared_libraries: list[(SharedLibrary, str)], + debuginfo_files: list[(str | (str, SharedLibrary, str), Artifact)], + suffix: str = "") -> (cmd_args, list[ArgLike], list[(str | (str, SharedLibrary, str), ArgLike)]): srcs = [] src_artifacts = [] deps = [] + debug_artifacts = [] srcs.extend(pex_modules.manifests.src_manifests()) src_artifacts.extend(pex_modules.manifests.src_artifacts_with_paths()) @@ -396,83 +508,126 @@ def _pex_modules_common_args( srcs.append(pex_modules.extra_manifests.manifest) src_artifacts.extend(pex_modules.extra_manifests.artifacts) - deps.extend(src_artifacts) + if pex_modules.repl_manifests: + srcs.extend(pex_modules.repl_manifests.src_manifests()) + src_artifacts.extend(pex_modules.repl_manifests.src_artifacts_with_paths()) + + if extra_manifests: + srcs.extend(extra_manifests) + + deps.extend([a[0] for a in src_artifacts]) resources = pex_modules.manifests.resource_manifests() - deps.extend(pex_modules.manifests.resource_artifacts_with_paths()) + deps.extend([a[0] for a in pex_modules.manifests.resource_artifacts_with_paths()]) src_manifests_path = ctx.actions.write( - "__src_manifests.txt", + "__src_manifests{}.txt".format(suffix), _srcs(srcs, format = "--module-manifest={}"), ) resource_manifests_path = ctx.actions.write( - "__resource_manifests.txt", + "__resource_manifests{}.txt".format(suffix), _srcs(resources, format = "--resource-manifest={}"), ) - native_libraries = [s.output for s in shared_libraries.values()] - native_library_srcs_path = ctx.actions.write( - "__native_libraries___srcs.txt", - _srcs(native_libraries, format = "--native-library-src={}"), - ) - native_library_dests_path = ctx.actions.write( - "__native_libraries___dests.txt", - ["--native-library-dest={}".format(lib) for lib in shared_libraries], + native_libraries = gen_shared_libs_action( + actions = ctx.actions, + out = "__native_libraries{}__.txt".format(suffix), + shared_libs = [shlib for shlib, _ in shared_libraries], + gen_action = lambda actions, output, shared_libs: actions.write( + output, + cmd_args( + _srcs( + [shlib.lib.output for shlib in shared_libs.values()], + format = "--native-library-src={}", + ), + [ + "--native-library-dest={}".format(paths.join(libdir, soname)) + for soname, _, libdir in zip_shlibs(shared_libs, shared_libraries) + ], + ), + ), ) - src_manifest_args = cmd_args(src_manifests_path).hidden(srcs) - resource_manifest_args = cmd_args(resource_manifests_path).hidden(resources) - native_library_srcs_args = cmd_args(native_library_srcs_path) + src_manifest_args = cmd_args(src_manifests_path, hidden = srcs) + resource_manifest_args = cmd_args(resource_manifests_path, hidden = resources) cmd = cmd_args() cmd.add(cmd_args(src_manifest_args, format = "@{}")) cmd.add(cmd_args(resource_manifest_args, format = "@{}")) - cmd.add(cmd_args(native_library_srcs_args, format = "@{}")) - cmd.add(cmd_args(native_library_dests_path, format = "@{}")) + cmd.add(cmd_args(native_libraries, format = "@{}")) - if pex_modules.debuginfo_manifest: - debuginfo_files = pex_modules.debuginfo_manifest.artifacts + if debuginfo_files: debuginfo_srcs_path = ctx.actions.write( - "__debuginfo___srcs.txt", - _srcs([src for src, _ in debuginfo_files], format = "--debuginfo-src={}"), + "__debuginfo___srcs{}.txt".format(suffix), + _srcs([src for _, src in debuginfo_files], format = "--debuginfo-src={}"), ) debuginfo_srcs_args = cmd_args(debuginfo_srcs_path) cmd.add(cmd_args(debuginfo_srcs_args, format = "@{}")) - deps.extend(debuginfo_files) + for name, artifact in debuginfo_files: + if type(name) != type(""): + libdir, shlib, ext = name + name = paths.join(libdir, shlib.soname.ensure_str() + ext) + debug_artifacts.append((name, artifact)) if ctx.attrs.package_split_dwarf_dwp: - dwp = [(s.dwp, "{}.dwp".format(n)) for n, s in shared_libraries.items() if s.dwp != None] - dwp_srcs_path = ctx.actions.write( - "__dwp___srcs.txt", - _srcs([src for src, _ in dwp], format = "--dwp-src={}"), - ) - dwp_dests_path = ctx.actions.write( - "__dwp___dests.txt", - _srcs([dest for _, dest in dwp], format = "--dwp-dest={}"), + if ctx.attrs.strip_libpar == "extract" and get_package_style(ctx) == PackageStyle("standalone") and cxx_is_gnu(ctx): + dwp_ext = ".debuginfo.dwp" + else: + dwp_ext = ".dwp" + dwp_args = gen_shared_libs_action( + actions = ctx.actions, + out = "__dwp{}__.txt".format(suffix), + shared_libs = [shlib for shlib, _ in shared_libraries], + gen_action = lambda actions, output, shared_libs: actions.write( + output, + cmd_args( + _srcs( + [ + shlib.lib.dwp + for shlib in shared_libs.values() + if shlib.lib.dwp != None + ], + format = "--dwp-src={}", + ), + _srcs( + [ + paths.join(libdir, soname + dwp_ext) + for soname, shlib, libdir in zip_shlibs(shared_libs, shared_libraries) + if shlib.lib.dwp != None + ], + format = "--dwp-dest={}", + ), + ), + ), ) - dwp_srcs_args = cmd_args(dwp_srcs_path) - cmd.add(cmd_args(dwp_srcs_args, format = "@{}")) - cmd.add(cmd_args(dwp_dests_path, format = "@{}")) + cmd.add(cmd_args(dwp_args, format = "@{}")) - deps.extend(dwp) + for shlib, libdir in shared_libraries: + if shlib.lib.dwp != None: + debug_artifacts.append(((libdir, shlib, dwp_ext), shlib.lib.dwp)) - deps.extend([(lib.output, name) for name, lib in shared_libraries.items()]) + for shlib, _ in shared_libraries: + deps.append(shlib.lib.output) external_debug_info = project_artifacts( ctx.actions, - [lib.external_debug_info for lib in shared_libraries.values()], + [ + shlib.lib.external_debug_info + for shlib, _ in shared_libraries + ], ) - # HACK: exclude external_debug_info from InstallInfo by providing an empty path - deps.extend([(d, "") for d in external_debug_info]) + # HACK: external_debug_info has an empty path + debug_artifacts.extend([("", d) for d in external_debug_info]) - return (cmd, deps) + return (cmd, deps, debug_artifacts) def _pex_modules_args( ctx: AnalysisContext, common_args: cmd_args, - dep_artifacts: list[(ArgLike, str)], - symlink_tree_path: [None, Artifact], - manifest_module: [ArgLike, None], + dep_artifacts: list[ArgLike], + debug_artifacts: list[(str | (str, SharedLibrary, str), ArgLike)], + symlink_tree_path: Artifact | None, + manifest_module: ArgLike | None, pex_modules: PexModules, output_suffix: str) -> cmd_args: """ @@ -481,16 +636,18 @@ def _pex_modules_args( runtime (this might be empty for e.g. a standalone pex). """ - cmd = cmd_args() - cmd.add(common_args) + cmd = [] + hidden = [] + + cmd.append(common_args) if manifest_module != None: - cmd.add(cmd_args(manifest_module, format = "--module-manifest={}")) + cmd.append(cmd_args(manifest_module, format = "--module-manifest={}")) if pex_modules.compile: - pyc_mode = PycInvalidationMode("UNCHECKED_HASH") if symlink_tree_path == None else PycInvalidationMode("CHECKED_HASH") + pyc_mode = PycInvalidationMode("unchecked_hash") if symlink_tree_path == None else PycInvalidationMode("checked_hash") bytecode_manifests = pex_modules.manifests.bytecode_manifests(pyc_mode) - dep_artifacts.extend(pex_modules.manifests.bytecode_artifacts_with_paths(pyc_mode)) + bytecode_artifacts = [a[0] for a in pex_modules.manifests.bytecode_artifacts_with_paths(pyc_mode)] bytecode_manifests_path = ctx.actions.write( "__bytecode_manifests{}.txt".format(output_suffix), @@ -499,19 +656,21 @@ def _pex_modules_args( format = "--module-manifest={}", ), ) - cmd.add(cmd_args(bytecode_manifests_path, format = "@{}")) - cmd.hidden(bytecode_manifests) + cmd.append(cmd_args(bytecode_manifests_path, format = "@{}")) + hidden.extend([bytecode_manifests] + bytecode_artifacts) if symlink_tree_path != None: - cmd.add(["--modules-dir", symlink_tree_path.as_output()]) + cmd.extend(["--modules-dir", symlink_tree_path.as_output()]) else: # Accumulate all the artifacts we depend on. Only add them to the command # if we are not going to create symlinks. - cmd.hidden([a for a, _ in dep_artifacts]) + hidden.append(dep_artifacts) - return cmd + hidden.extend([s for _, s in debug_artifacts]) -def _hidden_resources_error_message(current_target: Label, hidden_resources) -> str: + return cmd_args(cmd, hidden = hidden) + +def _hidden_resources_error_message(current_target: Label, hidden_resources: list[ArgLike] | None) -> str: """ Friendlier error message about putting non-python resources into standalone bins """ @@ -530,7 +689,8 @@ def _hidden_resources_error_message(current_target: Label, hidden_resources) -> msg = ( "Cannot package hidden srcs/resources in a standalone python_binary. " + - 'Eliminate resources in non-Python dependencies of this python binary, use `package_style = "inplace"`, ' + + 'Eliminate resources in non-Python dependencies of this python binary, set `package_style = "inplace"` on ' + + str(current_target.raw_target()) + ", " + 'use `strip_mode="full"` or turn off Split DWARF `-c fbcode.split-dwarf=false` on C++ binary resources.\n' ) @@ -544,10 +704,75 @@ def _hidden_resources_error_message(current_target: Label, hidden_resources) -> msg += " {}\n".format(resource) return msg +def generate_startup_function_loader(ctx: AnalysisContext) -> ArgLike: + """ + Generate `__startup_function_loader__.py` used for early bootstrap of a par. + Things that go here are also enumerated in `__manifest__['startup_functions']` + Some examples include: + * static extension finder init + * eager import loader init + * cinderx init + """ + + if ctx.attrs.manifest_module_entries == None: + startup_functions_list = "" + else: + startup_functions_list = "\n".join( + [ + "'''" + startup_function + "'''," + for _, startup_function in sorted(ctx.attrs.manifest_module_entries.get("startup_functions", {}).items()) + ], + ) + + src_startup_functions_path = ctx.actions.write( + "manifest/__startup_function_loader__.py", + """ +import importlib +import warnings + +VARS = {vars} +STARTUP_FUNCTIONS=[{startup_functions_list}] + +VARS["_dearg"] = lambda *args, **kwargs: (args, kwargs) + + +def load_startup_functions(): + for name in STARTUP_FUNCTIONS: + mod, sep, func = name.partition(":") + if sep: + try: + func, _, args = func.partition("(") + args, kwargs = eval("_dearg(" + args, VARS) if args else ((), {{}}) + module = importlib.import_module(mod) + getattr(module, func)(*args, **kwargs) + except Exception as e: + # TODO: Ignoring errors for now. + warnings.warn( + "Startup function '%s' (%s:%s) not executed: %s" + % (func, mod, func, e), + stacklevel=1, + ) + + """.format( + startup_functions_list = startup_functions_list, + vars = { + "label": repr(ctx.label), + "name": ctx.attrs.name, + }, + ), + ) + return ctx.actions.write_json( + "manifest/startup_function_loader.manifest", + [ + ["__par__/__startup_function_loader__.py", src_startup_functions_path, "prelude//python:make_py_package.bzl"], + ], + with_inputs = True, + ) + def generate_manifest_module( ctx: AnalysisContext, python_toolchain: PythonToolchainInfo, - src_manifests: list[ArgLike]) -> [ArgLike, None]: + src_manifests: list[ArgLike]) -> ArgLike | None: """ Generates a __manifest__.py module, and an extra entry to add to source manifests. @@ -563,11 +788,13 @@ def generate_manifest_module( "__module_manifests.txt", _srcs(src_manifests, format = "--module-manifest={}"), ) - cmd = cmd_args(python_toolchain.make_py_package_manifest_module) - cmd.add(["--manifest-entries", entries_json]) - cmd.add(cmd_args(src_manifests_path, format = "@{}")) - cmd.hidden(src_manifests) - cmd.add(["--output", module.as_output()]) + cmd = cmd_args( + python_toolchain.make_py_package_manifest_module, + ["--manifest-entries", entries_json], + cmd_args(src_manifests_path, format = "@{}"), + ["--output", module.as_output()], + hidden = src_manifests, + ) ctx.actions.run(cmd, category = "par", identifier = "manifest-module") json_entries_output = ctx.actions.declare_output("manifest/__manifest__.json") diff --git a/prelude/python/manifest.bzl b/prelude/python/manifest.bzl index 7a832ac38b49f..3d89ea2f5f8f5 100644 --- a/prelude/python/manifest.bzl +++ b/prelude/python/manifest.bzl @@ -6,6 +6,11 @@ # of this source tree. load("@prelude//:artifact_tset.bzl", "project_artifacts") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", + "gen_shared_libs_action", +) load("@prelude//utils:arglike.bzl", "ArgLike") load(":toolchain.bzl", "PythonToolchainInfo") @@ -82,6 +87,33 @@ def create_manifest_for_source_map( [(dest, artifact, origin) for dest, artifact in srcs.items()], ) +def get_srcs_from_manifest( + src_manifest: [ManifestInfo, None]) -> list[Artifact]: + return [a for (a, _) in src_manifest.artifacts] if src_manifest else [] + +def create_manifest_for_shared_libs( + actions: AnalysisActions, + name: str, + shared_libs: list[SharedLibrary]) -> ManifestInfo: + """ + Generate a source manifest for the given list of sources. + """ + return ManifestInfo( + manifest = gen_shared_libs_action( + actions = actions, + out = name + ".manifest", + shared_libs = shared_libs, + gen_action = lambda actions, output, shared_libs: actions.write_json( + output, + [ + (soname, shlib.lib.output, name) + for soname, shlib in shared_libs.items() + ], + ), + ), + artifacts = [(shlib.lib.output, "") for shlib in shared_libs], + ) + def create_manifest_for_source_dir( ctx: AnalysisContext, param: str, diff --git a/prelude/python/native_python_util.bzl b/prelude/python/native_python_util.bzl index e049465d4e7ee..b5715536f3347 100644 --- a/prelude/python/native_python_util.bzl +++ b/prelude/python/native_python_util.bzl @@ -178,8 +178,8 @@ def _write_syms_file( nm = cxx_toolchain.binary_utilities_info.nm symbols_file = ctx.actions.declare_output(name) - objects_argsfile = ctx.actions.write(name + ".objects.argsfile", objects) - objects_args = cmd_args(objects_argsfile).hidden(objects) + objects_argsfile = ctx.actions.write(name + ".py_objects_argsfile", objects) + objects_args = cmd_args(objects_argsfile, hidden = objects) script_env = { "NM": nm, diff --git a/prelude/python/needed_coverage.bzl b/prelude/python/needed_coverage.bzl index 2cb2cf2ff00f6..825e6317f543f 100644 --- a/prelude/python/needed_coverage.bzl +++ b/prelude/python/needed_coverage.bzl @@ -5,7 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") # All modules owned by a library. This will be used by top-level tests to find # paths that corresponds to the library. diff --git a/prelude/python/prebuilt_python_library.bzl b/prelude/python/prebuilt_python_library.bzl index 3a1e1a2e30ca6..8b897e5fe9db2 100644 --- a/prelude/python/prebuilt_python_library.bzl +++ b/prelude/python/prebuilt_python_library.bzl @@ -15,13 +15,29 @@ load( "get_excluded", "get_roots", ) +load( + "@prelude//cxx:preprocessor.bzl", + "CPreprocessor", + "CPreprocessorArgs", + "cxx_inherited_preprocessor_infos", + "cxx_merge_cpreprocessors", + "format_system_include_arg", +) load( "@prelude//linking:linkable_graph.bzl", "create_linkable_graph", "create_linkable_graph_node", ) +load( + "@prelude//third-party:build.bzl", + "create_third_party_build_root", + "prefix_from_label", + "project_from_label", +) +load("@prelude//third-party:providers.bzl", "ThirdPartyBuild", "third_party_build_info") +load("@prelude//unix:providers.bzl", "UnixEnv", "create_unix_env_info") load(":compile.bzl", "compile_manifests") -load(":manifest.bzl", "create_manifest_for_source_dir") +load(":manifest.bzl", "ManifestInfo", "create_manifest_for_source_dir") load( ":python_library.bzl", "create_python_library_info", @@ -34,8 +50,29 @@ def prebuilt_python_library_impl(ctx: AnalysisContext) -> list[Provider]: # Extract prebuilt wheel and wrap in python library provider. # TODO(nmj): Make sure all attrs are used if necessary, esp compile + entry_points = ctx.actions.declare_output("entry_points.manifest") + entry_points_dir = ctx.actions.declare_output("__entry_points__", dir = True) extracted_src = ctx.actions.declare_output("{}_extracted".format(ctx.label.name), dir = True) - ctx.actions.run([ctx.attrs._extract[RunInfo], ctx.attrs.binary_src, "--output", extracted_src.as_output()], category = "py_extract_prebuilt_library") + cmd = cmd_args( + ctx.attrs._extract[RunInfo], + ctx.attrs.binary_src, + "--output", + extracted_src.as_output(), + "--entry-points-manifest", + entry_points.as_output(), + "--entry-points", + entry_points_dir.as_output(), + ) + if ctx.attrs.strip_soabi_tags: + cmd.add("--strip-soabi-tags") + inferred_cxx_header_dirs = None + if ctx.attrs.infer_cxx_header_dirs: + inferred_cxx_header_dirs = ctx.actions.declare_output("__cxx_header_dirs__.txt") + cmd.add( + "--cxx-header-dirs", + inferred_cxx_header_dirs.as_output(), + ) + ctx.actions.run(cmd, category = "py_extract_prebuilt_library") deps, shared_deps = gather_dep_libraries(ctx.attrs.deps) src_manifest = create_manifest_for_source_dir(ctx, "binary_src", extracted_src, exclude = "\\.pyc$") bytecode = compile_manifests(ctx, [src_manifest]) @@ -50,6 +87,11 @@ def prebuilt_python_library_impl(ctx: AnalysisContext) -> list[Provider]: ) providers.append(library_info) + entry_points_manifest = ManifestInfo( + manifest = entry_points, + artifacts = [(entry_points_dir, "")], + ) + # Create, augment and provide the linkable graph. linkable_graph = create_linkable_graph( ctx, @@ -71,4 +113,100 @@ def prebuilt_python_library_impl(ctx: AnalysisContext) -> list[Provider]: deps = ctx.attrs.deps, ))) + # Allow third-party-build rules to depend on Python rules. + tp_project = project_from_label(ctx.label) + tp_prefix = prefix_from_label(ctx.label) + providers.append( + third_party_build_info( + actions = ctx.actions, + build = ThirdPartyBuild( + project = tp_project, + prefix = tp_prefix, + root = create_third_party_build_root( + ctx = ctx, + paths = [ + ("lib/python", extracted_src), + ], + manifests = [ + ("bin", entry_points_manifest), + ], + ), + manifest = ctx.actions.write_json( + "third_party_build_manifest.json", + dict( + prefix = tp_prefix, + project = tp_project, + py_lib_paths = ["lib/python"], + ), + ), + ), + deps = ctx.attrs.deps, + ), + ) + + # Unix env provider. + providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + python_libs = [library_info], + binaries = [entry_points_manifest], + ), + deps = ctx.attrs.deps, + ), + ) + + # If this prebuilt wheel contains headers, export them via a C++ provider. + pp_args = [] + if ctx.attrs.cxx_header_dirs: + for header_dir in ctx.attrs.cxx_header_dirs: + pp_args.append( + format_system_include_arg( + cmd_args(extracted_src.project(header_dir)), + "clang", + ), + ) + if inferred_cxx_header_dirs != None: + pp_argsfile = ctx.actions.declare_output("__cxx_header_dirs__.py_cxx_header_argsfile") + + def write_argsfile(actions, header_dirs, output): + lines = [] + for header_dir in header_dirs.read_string().splitlines(): + lines.append(format_system_include_arg( + cmd_args(extracted_src.project(header_dir)), + "clang", + )) + actions.write(output, lines) + + ctx.actions.dynamic_output( + dynamic = [inferred_cxx_header_dirs], + inputs = [], + outputs = [pp_argsfile.as_output()], + f = lambda ctx, artifacts, outputs: write_argsfile( + ctx.actions, + artifacts[inferred_cxx_header_dirs], + outputs[pp_argsfile], + ), + ) + pp_args.append( + cmd_args( + pp_argsfile, + format = "@{}", + hidden = [extracted_src], + ), + ) + if pp_args: + providers.append(cxx_merge_cpreprocessors( + ctx = ctx, + own = [ + CPreprocessor( + args = CPreprocessorArgs( + args = pp_args, + ), + ), + ], + xs = cxx_inherited_preprocessor_infos(ctx.attrs.deps), + )) + return providers diff --git a/prelude/python/python.bzl b/prelude/python/python.bzl index 1864bdc11115b..766c2e06e963b 100644 --- a/prelude/python/python.bzl +++ b/prelude/python/python.bzl @@ -5,14 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo") load("@prelude//linking:shared_libraries.bzl", "traverse_shared_library_info") load("@prelude//utils:arglike.bzl", "ArgLike") -load("@prelude//utils:utils.bzl", "flatten") load(":compile.bzl", "PycInvalidationMode") load(":interface.bzl", "PythonLibraryInterface", "PythonLibraryManifestsInterface") load(":manifest.bzl", "ManifestInfo") -load(":toolchain.bzl", "PythonPlatformInfo", "get_platform_attr") PythonLibraryManifests = record( label = field(Label), @@ -91,28 +88,32 @@ def _source_type_artifacts(value: PythonLibraryManifests): return [a for a, _ in value.src_types.artifacts] _BYTECODE_PROJ_PREFIX = { - PycInvalidationMode("CHECKED_HASH"): "checked_bytecode", - PycInvalidationMode("UNCHECKED_HASH"): "bytecode", + PycInvalidationMode("checked_hash"): "checked_bytecode", + PycInvalidationMode("unchecked_hash"): "bytecode", } +args_projections = { + "dep_artifacts": _dep_artifacts, + "dep_manifests": _dep_manifests, + "hidden_resources": _hidden_resources, + "resource_artifacts": _resource_artifacts, + "resource_manifests": _resource_manifests, + "source_artifacts": _source_artifacts, + "source_manifests": _source_manifests, + "source_type_artifacts": _source_type_artifacts, + "source_type_manifests": _source_type_manifests, +} +args_projections.update({ + "{}_artifacts".format(prefix): _bytecode_artifacts(mode) + for mode, prefix in _BYTECODE_PROJ_PREFIX.items() +}) +args_projections.update({ + "{}_manifests".format(prefix): _bytecode_manifests(mode) + for mode, prefix in _BYTECODE_PROJ_PREFIX.items() +}) + PythonLibraryManifestsTSet = transitive_set( - args_projections = dict({ - "dep_artifacts": _dep_artifacts, - "dep_manifests": _dep_manifests, - "hidden_resources": _hidden_resources, - "resource_artifacts": _resource_artifacts, - "resource_manifests": _resource_manifests, - "source_artifacts": _source_artifacts, - "source_manifests": _source_manifests, - "source_type_artifacts": _source_type_artifacts, - "source_type_manifests": _source_type_manifests, - }.items() + { - "{}_artifacts".format(prefix): _bytecode_artifacts(mode) - for mode, prefix in _BYTECODE_PROJ_PREFIX.items() - }.items() + { - "{}_manifests".format(prefix): _bytecode_manifests(mode) - for mode, prefix in _BYTECODE_PROJ_PREFIX.items() - }.items()), + args_projections = args_projections, json_projections = { "source_type_manifests_json": _source_type_manifest_jsons, }, @@ -124,6 +125,7 @@ PythonLibraryManifestsTSet = transitive_set( # Information about a python library and its dependencies. # TODO(nmj): Resources in general, and mapping of resources to new paths too. PythonLibraryInfo = provider(fields = { + "extension_shared_libraries": provider_field(typing.Any, default = None), # "SharedLibraryInfo" "manifests": provider_field(typing.Any, default = None), # PythonLibraryManifestsTSet "shared_libraries": provider_field(typing.Any, default = None), # "SharedLibraryInfo" }) @@ -131,6 +133,7 @@ PythonLibraryInfo = provider(fields = { def info_to_interface(info: PythonLibraryInfo) -> PythonLibraryInterface: return PythonLibraryInterface( shared_libraries = lambda: traverse_shared_library_info(info.shared_libraries), + extension_shared_libraries = lambda: traverse_shared_library_info(info.extension_shared_libraries), iter_manifests = lambda: info.manifests.traverse(), manifests = lambda: manifests_to_interface(info.manifests), has_hidden_resources = lambda: info.manifests.reduce("has_hidden_resources"), @@ -152,11 +155,3 @@ def manifests_to_interface(manifests: PythonLibraryManifestsTSet) -> PythonLibra resource_artifacts = lambda: [manifests.project_as_args("resource_artifacts")], resource_artifacts_with_paths = lambda: [(a, p) for m in manifests.traverse() if m != None and m.resources != None for a, p in m.resources[0].artifacts], ) - -def get_python_deps(ctx: AnalysisContext): - python_platform = ctx.attrs._python_toolchain[PythonPlatformInfo] - cxx_platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo] - return flatten( - [ctx.attrs.deps] + - get_platform_attr(python_platform, cxx_platform, ctx.attrs.platform_deps), - ) diff --git a/prelude/python/python_binary.bzl b/prelude/python/python_binary.bzl index 42f088634a671..b05e85f45251a 100644 --- a/prelude/python/python_binary.bzl +++ b/prelude/python/python_binary.bzl @@ -5,8 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//:artifacts.bzl", "ArtifactGroupInfo") -load("@prelude//cxx:compile.bzl", "CxxSrcWithFlags") +load( + "@prelude//:artifacts.bzl", + "ArtifactGroupInfo", + "ArtifactOutputs", # @unused Used as a type +) load("@prelude//cxx:cxx.bzl", "create_shared_lib_link_group_specs") load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") load("@prelude//cxx:cxx_executable.bzl", "cxx_executable") @@ -14,13 +17,15 @@ load( "@prelude//cxx:cxx_library_utility.bzl", "cxx_is_gnu", ) -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo") +load("@prelude//cxx:cxx_sources.bzl", "CxxSrcWithFlags") +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") load( "@prelude//cxx:cxx_types.bzl", "CxxRuleConstructorParams", ) +load("@prelude//cxx:cxx_utility.bzl", "cxx_attrs_get_allow_cache_upload") load( - "@prelude//cxx:groups.bzl", + "@prelude//cxx:groups_types.bzl", "Group", "GroupAttrs", "GroupMapping", @@ -29,11 +34,14 @@ load( load("@prelude//cxx:headers.bzl", "cxx_get_regular_cxx_headers_layout") load( "@prelude//cxx:link_groups.bzl", - "LinkGroupInfo", # @unused Used as a type "LinkGroupLibSpec", "build_link_group_info", "get_link_group_info", ) +load( + "@prelude//cxx:link_groups_types.bzl", + "LinkGroupInfo", # @unused Used as a type +) load("@prelude//cxx:linker.bzl", "get_rpath_origin") load( "@prelude//cxx:omnibus.bzl", @@ -50,8 +58,7 @@ load( ) load( "@prelude//linking:link_info.bzl", - "Linkage", - "LinkedObject", # @unused Used as a type + "LinkedObject", ) load( "@prelude//linking:linkable_graph.bzl", @@ -64,9 +71,15 @@ load( "LinkableProviders", # @unused Used as a type "linkables", ) -load("@prelude//linking:shared_libraries.bzl", "merge_shared_libraries", "traverse_shared_library_info") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", + "create_shlib", + "merge_shared_libraries", + "traverse_shared_library_info", +) load("@prelude//linking:strip.bzl", "strip_debug_with_gnu_debuglink") -load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//utils:utils.bzl", "flatten", "value_or") load("@prelude//paths.bzl", "paths") load("@prelude//resources.bzl", "gather_resources") @@ -77,7 +90,7 @@ load( "EntryPointKind", "PythonLibraryInterface", ) -load(":make_py_package.bzl", "PexModules", "PexProviders", "make_default_info", "make_py_package") +load(":make_py_package.bzl", "PexModules", "PexProviders", "make_default_info", "make_py_package", "make_run_info") load( ":manifest.bzl", "create_dep_manifest_for_source_map", @@ -90,11 +103,13 @@ load( ":python_library.bzl", "create_python_library_info", "gather_dep_libraries", + "py_attr_resources", "py_resources", "qualify_srcs", ) load(":source_db.bzl", "create_dbg_source_db", "create_python_source_db_info", "create_source_db", "create_source_db_no_deps") -load(":toolchain.bzl", "NativeLinkStrategy", "PackageStyle", "PythonPlatformInfo", "PythonToolchainInfo", "get_platform_attr") +load(":toolchain.bzl", "NativeLinkStrategy", "PackageStyle", "PythonPlatformInfo", "PythonToolchainInfo", "get_package_style", "get_platform_attr") +load(":typing.bzl", "create_per_target_type_check") OmnibusMetadataInfo = provider( # @unsorted-dict-items @@ -106,11 +121,6 @@ def _link_strategy(ctx: AnalysisContext) -> NativeLinkStrategy: return NativeLinkStrategy(ctx.attrs.native_link_strategy) return NativeLinkStrategy(ctx.attrs._python_toolchain[PythonToolchainInfo].native_link_strategy) -def _package_style(ctx: AnalysisContext) -> PackageStyle: - if ctx.attrs.package_style != None: - return PackageStyle(ctx.attrs.package_style.lower()) - return PackageStyle(ctx.attrs._python_toolchain[PythonToolchainInfo].package_style) - # We do a lot of merging extensions, so don't use O(n) type annotations def _merge_extensions( # {str: ("_a", "label")} @@ -161,11 +171,12 @@ def _get_root_link_group_specs( name = dep.linkable_root_info.name, is_shared_lib = True, root = dep.linkable_root_info, + label = dep.linkable_graph.nodes.value.label, group = Group( name = dep.linkable_root_info.name, mappings = [ GroupMapping( - root = dep.linkable_graph.nodes.value.label, + roots = [dep.linkable_graph.nodes.value.label], traversal = Traversal("node"), ), ], @@ -188,7 +199,7 @@ def _get_root_link_group_specs( name = name, mappings = [ GroupMapping( - root = extension.linkable_graph.nodes.value.label, + roots = [extension.linkable_graph.nodes.value.label], traversal = Traversal("node"), ), ], @@ -202,15 +213,6 @@ def _get_root_link_group_specs( return specs -def _split_debuginfo(ctx, data: dict[str, (typing.Any, Label | bool)]) -> (dict[str, (LinkedObject, Label | bool)], dict[str, Artifact]): - debuginfo_artifacts = {} - transformed = {} - for name, (artifact, extra) in data.items(): - stripped_binary, debuginfo = strip_debug_with_gnu_debuglink(ctx, name, artifact.unstripped_output) - transformed[name] = LinkedObject(output = stripped_binary, unstripped_output = artifact.unstripped_output, dwp = artifact.dwp), extra - debuginfo_artifacts[name + ".debuginfo"] = debuginfo - return transformed, debuginfo_artifacts - def _get_shared_only_groups(shared_only_libs: list[LinkableProviders]) -> list[Group]: """ Create link group mappings for shared-only libs that'll force the link to @@ -228,7 +230,7 @@ def _get_shared_only_groups(shared_only_libs: list[LinkableProviders]) -> list[G name = str(dep.linkable_graph.nodes.value.label.raw_target()), mappings = [ GroupMapping( - root = dep.linkable_graph.nodes.value.label, + roots = [dep.linkable_graph.nodes.value.label], traversal = Traversal("node"), preferred_linkage = Linkage("shared"), ), @@ -316,7 +318,7 @@ def python_executable( ctx: AnalysisContext, main: EntryPoint, srcs: dict[str, Artifact], - resources: dict[str, (Artifact, list[ArgLike])], + resources: dict[str, ArtifactOutputs], compile: bool, allow_cache_upload: bool) -> PexProviders: # Returns a three tuple: the Python binary, all its potential runtime files, @@ -325,12 +327,12 @@ def python_executable( # TODO(nmj): See if people are actually setting cxx_platform here. Really # feels like it should be a property of the python platform python_platform = ctx.attrs._python_toolchain[PythonPlatformInfo] - cxx_platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo] + cxx_toolchain = ctx.attrs._cxx_toolchain raw_deps = ctx.attrs.deps raw_deps.extend(flatten( - get_platform_attr(python_platform, cxx_platform, ctx.attrs.platform_deps), + get_platform_attr(python_platform, cxx_toolchain, ctx.attrs.platform_deps), )) # `preload_deps` is used later to configure `LD_PRELOAD` environment variable, @@ -365,6 +367,7 @@ def python_executable( ctx.actions, ctx.label, srcs = src_manifest, + src_types = src_manifest, dep_manifest = dep_manifest, resources = py_resources(ctx, all_resources) if all_resources else None, bytecode = bytecode_manifest, @@ -396,10 +399,29 @@ def python_executable( exe.sub_targets.update({ "dbg-source-db": [dbg_source_db], "library-info": [library_info], + "main": [DefaultInfo(default_output = ctx.actions.write_json("main.json", main))], "source-db": [source_db], "source-db-no-deps": [source_db_no_deps, create_python_source_db_info(library_info.manifests)], }) + # Type check + type_checker = python_toolchain.type_checker + if type_checker != None: + exe.sub_targets.update({ + "typecheck": [ + create_per_target_type_check( + ctx, + type_checker, + src_manifest, + python_deps, + typeshed = python_toolchain.typeshed_stubs, + py_version = ctx.attrs.py_version_for_type_checking, + typing_enabled = ctx.attrs.typing, + sharding_enabled = ctx.attrs.shard_typing, + ), + ], + }) + return exe def create_dep_report( @@ -408,12 +430,13 @@ def create_dep_report( main: str, library_info: PythonLibraryInfo) -> DefaultInfo: out = ctx.actions.declare_output("dep-report.json") - cmd = cmd_args() - cmd.add(python_toolchain.traverse_dep_manifest) - cmd.add(cmd_args(main, format = "--main={}")) - cmd.add(cmd_args(out.as_output(), format = "--outfile={}")) - cmd.add(cmd_args(library_info.manifests.project_as_args("dep_manifests"))) - cmd.hidden(library_info.manifests.project_as_args("dep_artifacts")) + cmd = cmd_args( + python_toolchain.traverse_dep_manifest, + cmd_args(main, format = "--main={}"), + cmd_args(out.as_output(), format = "--outfile={}"), + cmd_args(library_info.manifests.project_as_args("dep_manifests")), + hidden = library_info.manifests.project_as_args("dep_artifacts"), + ) ctx.actions.run(cmd, category = "write_dep_report") return DefaultInfo(default_output = out) @@ -428,15 +451,10 @@ def _convert_python_library_to_executable( extra = {} python_toolchain = ctx.attrs._python_toolchain[PythonToolchainInfo] - package_style = _package_style(ctx) + package_style = get_package_style(ctx) # Convert preloaded deps to a set of their names to be loaded by. preload_labels = {d.label: None for d in ctx.attrs.preload_deps} - preload_names = { - name: None - for name, shared_lib in library.shared_libraries().items() - if shared_lib.label in preload_labels - } extensions = {} extra_artifacts = {} @@ -444,9 +462,19 @@ def _convert_python_library_to_executable( if manifest.extensions: _merge_extensions(extensions, manifest.label, manifest.extensions) - # If we're using omnibus linking, re-link libraries and extensions and - # update the libraries we'll pull into the final binary. - if _link_strategy(ctx) == NativeLinkStrategy("merged"): + if ctx.attrs._cxx_toolchain.get(CxxToolchainInfo) == None: + # In fat target platforms, there may not be a CXX toolchain available. + shared_libs = [ + ("", shared_lib) + for shared_lib in library.shared_libraries() + ] + [ + ("", shared_lib) + for shared_lib in library.extension_shared_libraries() + ] + elif _link_strategy(ctx) == NativeLinkStrategy("merged"): + # If we're using omnibus linking, re-link libraries and extensions and + # update the libraries we'll pull into the final binary. + # Collect omnibus info from deps. linkable_graph = create_linkable_graph( ctx, @@ -468,7 +496,6 @@ def _convert_python_library_to_executable( omnibus_graph, python_toolchain.linker_flags + ctx.attrs.linker_flags, prefer_stripped_objects = ctx.attrs.prefer_stripped_native_objects, - allow_cache_upload = allow_cache_upload, ) # Extract re-linked extensions. @@ -476,7 +503,7 @@ def _convert_python_library_to_executable( dest: (omnibus_libs.roots[label].shared_library, label) for dest, (_, label) in extensions.items() } - native_libs = omnibus_libs.libraries + shared_libs = [("", shlib) for shlib in omnibus_libs.libraries] omnibus_providers = [] @@ -546,7 +573,7 @@ def _convert_python_library_to_executable( ] extra_preprocessors = [] if ctx.attrs.par_style == "native": - extra_preprocessors.append(CPreprocessor(relative_args = CPreprocessorArgs(args = ["-DNATIVE_PAR_STYLE=1"]))) + extra_preprocessors.append(CPreprocessor(args = CPreprocessorArgs(args = ["-DNATIVE_PAR_STYLE=1"]))) # All deps inolved in the link. link_deps = ( @@ -605,14 +632,26 @@ def _convert_python_library_to_executable( linkables(ctx.attrs.link_group_deps) ), exe_allow_cache_upload = allow_cache_upload, + compiler_flags = ctx.attrs.compiler_flags, + lang_compiler_flags = ctx.attrs.lang_compiler_flags, + platform_compiler_flags = ctx.attrs.platform_compiler_flags, + lang_platform_compiler_flags = ctx.attrs.lang_platform_compiler_flags, + preprocessor_flags = ctx.attrs.preprocessor_flags, + lang_preprocessor_flags = ctx.attrs.lang_preprocessor_flags, + platform_preprocessor_flags = ctx.attrs.platform_preprocessor_flags, + lang_platform_preprocessor_flags = ctx.attrs.lang_platform_preprocessor_flags, ) executable_info = cxx_executable(ctx, impl_params) extra["native-executable"] = [DefaultInfo(default_output = executable_info.binary, sub_targets = executable_info.sub_targets)] # Add sub-targets for libs. - for name, lib in executable_info.shared_libs.items(): - extra[name] = [DefaultInfo(default_output = lib.output)] + for shlib in executable_info.shared_libs: + # TODO(agallagher) There appears to be pre-existing soname conflicts + # when building this (when using link groups), which prevents using + # `with_unique_str_sonames`. + if shlib.soname.is_str: + extra[shlib.soname.ensure_str()] = [DefaultInfo(default_output = shlib.lib.output)] for name, group in executable_info.auto_link_groups.items(): extra[name] = [DefaultInfo(default_output = group.output)] @@ -628,23 +667,37 @@ def _convert_python_library_to_executable( # Put native libraries into the runtime location, as we need to unpack # potentially all of them before startup. - native_libs = { - paths.join("runtime", "lib", name): lib - for name, lib in executable_info.shared_libs.items() - } - preload_names = [paths.join("runtime", "lib", n) for n in preload_names] + shared_libs = [("runtime/lib", s) for s in executable_info.shared_libs] # TODO expect(len(executable_info.runtime_files) == 0, "OH NO THERE ARE RUNTIME FILES") extra_artifacts.update(dict(extension_info.artifacts)) - native_libs["runtime/bin/{}".format(ctx.attrs.executable_name)] = LinkedObject( - output = executable_info.binary, - unstripped_output = executable_info.binary, - dwp = executable_info.dwp, - ) + shared_libs.append(( + "runtime/bin", + create_shlib( + soname = ctx.attrs.executable_name, + label = ctx.label, + lib = LinkedObject( + output = executable_info.binary, + unstripped_output = executable_info.binary, + dwp = executable_info.dwp, + ), + ), + )) extra_artifacts["static_extension_finder.py"] = ctx.attrs.static_extension_finder else: - native_libs = {name: shared_lib.lib for name, shared_lib in library.shared_libraries().items()} + shared_libs = [ + ("", shared_lib) + for shared_lib in library.shared_libraries() + ] + + if (not ctx.attrs.standalone_extensions) or ctx.attrs.link_style == "shared": + # darwin and windows expect self-contained dynamically linked + # python extensions without additional transitive shared libraries + shared_libs += [ + ("", extension_shared_lib) + for extension_shared_lib in library.extension_shared_libraries() + ] if dbg_source_db: extra_artifacts["dbg-db.json"] = dbg_source_db.default_outputs[0] @@ -654,28 +707,71 @@ def _convert_python_library_to_executable( extra_manifests = create_manifest_for_source_map(ctx, "extra_manifests", extra_artifacts) - shared_libraries = {} - debuginfo_artifacts = {} - # Create the map of native libraries to their artifacts and whether they # need to be preloaded. Note that we merge preload deps into regular deps # above, before gathering up all native libraries, so we're guaranteed to # have all preload libraries (and their transitive deps) here. - for name, lib in native_libs.items(): - shared_libraries[name] = lib, name in preload_names + shared_libs = [ + (libdir, shlib, shlib.label in preload_labels) + for libdir, shlib in shared_libs + ] # Strip native libraries and extensions and update the .gnu_debuglink references if we are extracting # debug symbols from the par + debuginfo_files = [] + debuginfos = {} if ctx.attrs.strip_libpar == "extract" and package_style == PackageStyle("standalone") and cxx_is_gnu(ctx): - shared_libraries, library_debuginfo = _split_debuginfo(ctx, shared_libraries) - extensions, extension_debuginfo = _split_debuginfo(ctx, extensions) - debuginfo_artifacts = library_debuginfo | extension_debuginfo + stripped_shlibs = [] + for libdir, shlib, preload in shared_libs: + name = paths.join( + libdir, + value_or( + shlib.soname.as_str(), + shlib.lib.unstripped_output.short_path, + ), + ) + existing = debuginfos.get(name) + if existing == None: + stripped, debuginfo = strip_debug_with_gnu_debuglink( + ctx = ctx, + name = name, + obj = shlib.lib.unstripped_output, + ) + debuginfos[name] = (stripped, debuginfo) + else: + stripped, debuginfo = existing + shlib = SharedLibrary( + soname = shlib.soname, + label = shlib.label, + lib = LinkedObject( + output = stripped, + unstripped_output = shlib.lib.unstripped_output, + dwp = shlib.lib.dwp, + ), + ) + stripped_shlibs.append((libdir, shlib, preload)) + debuginfo_files.append(((libdir, shlib, ".debuginfo"), debuginfo)) + shared_libs = stripped_shlibs + for name, (extension, label) in extensions.items(): + stripped, debuginfo = strip_debug_with_gnu_debuglink( + ctx = ctx, + name = name, + obj = extension.unstripped_output, + ) + extensions[name] = ( + LinkedObject( + output = stripped, + unstripped_output = extension.unstripped_output, + dwp = extension.dwp, + ), + label, + ) + debuginfo_files.append((name + ".debuginfo", debuginfo)) # Combine sources and extensions into a map of all modules. pex_modules = PexModules( manifests = library.manifests(), extra_manifests = extra_manifests, - debuginfo_manifest = create_manifest_for_source_map(ctx, "debuginfo", debuginfo_artifacts) if debuginfo_artifacts else None, compile = compile, extensions = create_manifest_for_extensions( ctx, @@ -688,16 +784,17 @@ def _convert_python_library_to_executable( # Build the PEX. pex = make_py_package( - ctx, - python_toolchain, - ctx.attrs.make_py_package[RunInfo] if ctx.attrs.make_py_package != None else None, - package_style, - ctx.attrs.build_args, - pex_modules, - shared_libraries, - main, - hidden_resources, - allow_cache_upload, + ctx = ctx, + python_toolchain = python_toolchain, + make_py_package_cmd = ctx.attrs.make_py_package[RunInfo] if ctx.attrs.make_py_package != None else None, + package_style = package_style, + build_args = ctx.attrs.build_args, + pex_modules = pex_modules, + shared_libraries = shared_libs, + main = main, + hidden_resources = hidden_resources, + allow_cache_upload = allow_cache_upload, + debuginfo_files = debuginfo_files, ) pex.sub_targets.update(extra) @@ -711,13 +808,25 @@ def python_binary_impl(ctx: AnalysisContext) -> list[Provider]: fail("Only one of main_module or main may be set. Prefer main_function as main and main_module are considered deprecated") elif main_module != None and main_function != None: fail("Only one of main_module or main_function may be set. Prefer main_function.") - elif main_function != None and ctx.attrs.main != None: - fail("Only one of main_function or main may be set. Prefer main_function.") - elif ctx.attrs.main != None: + elif ctx.attrs.main != None and main_function == None: main_module = "." + ctx.attrs.main.short_path.replace("/", ".") if main_module.endswith(".py"): main_module = main_module[:-3] + # if "python-version=3.8" in ctx.attrs.labels: + # # buildifier: disable=print + # print(( + # "\033[1;33m \u26A0 [Warning] " + + # "{0} 3.8 is EOL, and is going away by the end of H1 2024. " + + # "This build triggered //{1}:{2} which still uses {0} 3.8. " + + # "Make sure someone (you or the appropriate maintainers) upgrades it to {0} 3.10 soon to avoid breakages. " + + # "https://fburl.com/python-eol \033[0m" + # ).format( + # "Cinder" if "python-flavor=cinder" in ctx.attrs.labels else "Python", + # ctx.label.package, + # ctx.attrs.name, + # )) + if main_module != None: main = (EntryPointKind("module"), main_module) else: @@ -727,16 +836,17 @@ def python_binary_impl(ctx: AnalysisContext) -> list[Provider]: if ctx.attrs.main != None: srcs[ctx.attrs.main.short_path] = ctx.attrs.main srcs = qualify_srcs(ctx.label, ctx.attrs.base_module, srcs) + resources = qualify_srcs(ctx.label, ctx.attrs.base_module, py_attr_resources(ctx)) pex = python_executable( ctx, main, srcs, - {}, + resources, compile = value_or(ctx.attrs.compile, False), - allow_cache_upload = ctx.attrs.allow_cache_upload, + allow_cache_upload = cxx_attrs_get_allow_cache_upload(ctx.attrs), ) return [ make_default_info(pex), - RunInfo(pex.run_cmd), + make_run_info(pex, ctx.attrs.run_with_inplace), ] diff --git a/prelude/python/python_library.bzl b/prelude/python/python_library.bzl index a0e1a2684a70c..a584b0a8fb9a8 100644 --- a/prelude/python/python_library.bzl +++ b/prelude/python/python_library.bzl @@ -5,7 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//:artifacts.bzl", "unpack_artifact_map") +load( + "@prelude//:artifacts.bzl", + "ArtifactOutputs", # @unused Used as a type + "unpack_artifact_map", +) load("@prelude//:paths.bzl", "paths") load( "@prelude//:resources.bzl", @@ -13,7 +17,6 @@ load( "gather_resources", ) load("@prelude//cxx:cxx_link_utility.bzl", "shared_libs_symlink_tree_name") -load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo") load( "@prelude//cxx:omnibus.bzl", "get_excluded", @@ -31,8 +34,17 @@ load( ) load("@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", "merge_shared_libraries") load("@prelude//python:toolchain.bzl", "PythonPlatformInfo", "get_platform_attr") +load( + "@prelude//third-party:build.bzl", + "create_third_party_build_root", + "prefix_from_label", + "project_from_label", +) +load("@prelude//third-party:providers.bzl", "ThirdPartyBuild", "third_party_build_info") +load("@prelude//unix:providers.bzl", "UnixEnv", "create_unix_env_info") load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type -load("@prelude//utils:utils.bzl", "expect", "flatten", "from_named_set") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "flatten", "from_named_set") load(":compile.bzl", "PycInvalidationMode", "compile_manifests") load( ":manifest.bzl", @@ -48,6 +60,7 @@ load(":needed_coverage.bzl", "PythonNeededCoverageInfo") load(":python.bzl", "PythonLibraryInfo", "PythonLibraryManifests", "PythonLibraryManifestsTSet") load(":source_db.bzl", "create_python_source_db_info", "create_source_db", "create_source_db_no_deps") load(":toolchain.bzl", "PythonToolchainInfo") +load(":typing.bzl", "create_per_target_type_check") def dest_prefix(label: Label, base_module: [None, str]) -> str: """ @@ -109,7 +122,8 @@ def create_python_library_info( resources: [(ManifestInfo, list[ArgLike]), None] = None, extensions: [dict[str, LinkedObject], None] = None, deps: list[PythonLibraryInfo] = [], - shared_libraries: list[SharedLibraryInfo] = []): + shared_libraries: list[SharedLibraryInfo] = [], + extension_shared_libraries: list[SharedLibraryInfo] = []): """ Create a `PythonLibraryInfo` for a set of sources and deps @@ -142,9 +156,15 @@ def create_python_library_info( deps = shared_libraries + [dep.shared_libraries for dep in deps], ) + new_extension_shared_libraries = merge_shared_libraries( + actions, + deps = extension_shared_libraries + [dep.extension_shared_libraries for dep in deps], + ) + return PythonLibraryInfo( manifests = actions.tset(PythonLibraryManifestsTSet, value = manifests, children = [dep.manifests for dep in deps]), shared_libraries = new_shared_libraries, + extension_shared_libraries = new_extension_shared_libraries, ) def gather_dep_libraries(raw_deps: list[Dependency]) -> (list[PythonLibraryInfo], list[SharedLibraryInfo]): @@ -189,23 +209,26 @@ def _exclude_deps_from_omnibus( def _attr_srcs(ctx: AnalysisContext) -> dict[str, Artifact]: python_platform = ctx.attrs._python_toolchain[PythonPlatformInfo] - cxx_platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo] + cxx_toolchain = ctx.attrs._cxx_toolchain all_srcs = {} all_srcs.update(from_named_set(ctx.attrs.srcs)) - for srcs in get_platform_attr(python_platform, cxx_platform, ctx.attrs.platform_srcs): + for srcs in get_platform_attr(python_platform, cxx_toolchain, ctx.attrs.platform_srcs): all_srcs.update(from_named_set(srcs)) return all_srcs -def _attr_resources(ctx: AnalysisContext) -> dict[str, [Dependency, Artifact]]: +def _attr_resources(ctx: AnalysisContext) -> dict[str, Artifact | Dependency]: python_platform = ctx.attrs._python_toolchain[PythonPlatformInfo] - cxx_platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo] + cxx_toolchain = ctx.attrs._cxx_toolchain all_resources = {} all_resources.update(from_named_set(ctx.attrs.resources)) - for resources in get_platform_attr(python_platform, cxx_platform, ctx.attrs.platform_resources): + + # `python_binary` doesn't have `platform_resources` + platform_resources = getattr(ctx.attrs, "platform_resources", []) + for resources in get_platform_attr(python_platform, cxx_toolchain, platform_resources): all_resources.update(from_named_set(resources)) return all_resources -def py_attr_resources(ctx: AnalysisContext) -> dict[str, (Artifact, list[ArgLike])]: +def py_attr_resources(ctx: AnalysisContext) -> dict[str, ArtifactOutputs]: """ Return the resources provided by this rule, as a map of resource name to a tuple of the resource artifact and any "other" outputs exposed by it. @@ -215,15 +238,15 @@ def py_attr_resources(ctx: AnalysisContext) -> dict[str, (Artifact, list[ArgLike def py_resources( ctx: AnalysisContext, - resources: dict[str, (Artifact, list[ArgLike])]) -> (ManifestInfo, list[ArgLike]): + resources: dict[str, ArtifactOutputs]) -> (ManifestInfo, list[ArgLike]): """ Generate a manifest to wrap this rules resources. """ - d = {name: resource for name, (resource, _) in resources.items()} + d = {name: resource.default_output for name, resource in resources.items()} hidden = [] - for name, (resource, other) in resources.items(): - for o in other: - if type(o) == "artifact" and o.basename == shared_libs_symlink_tree_name(resource): + for name, resource in resources.items(): + for o in resource.nondebug_runtime_files: + if type(o) == "artifact" and o.basename == shared_libs_symlink_tree_name(resource.default_output): # Package the binary's shared libs next to the binary # (the path is stored in RPATH relative to the binary). d[paths.join(paths.dirname(name), o.basename)] = o @@ -256,7 +279,7 @@ def python_library_impl(ctx: AnalysisContext) -> list[Provider]: expect(not ctx.attrs.versioned_resources) python_platform = ctx.attrs._python_toolchain[PythonPlatformInfo] - cxx_platform = ctx.attrs._cxx_toolchain[CxxPlatformInfo] + cxx_toolchain = ctx.attrs._cxx_toolchain providers = [] sub_targets = {} @@ -276,7 +299,7 @@ def python_library_impl(ctx: AnalysisContext) -> list[Provider]: bytecode = None if src_manifest != None: bytecode = compile_manifests(ctx, [src_manifest]) - sub_targets["compile"] = [DefaultInfo(default_output = bytecode[PycInvalidationMode("UNCHECKED_HASH")].artifacts[0][0])] + sub_targets["compile"] = [DefaultInfo(default_output = bytecode[PycInvalidationMode("unchecked_hash")].artifacts[0][0])] sub_targets["src-manifest"] = [DefaultInfo(default_output = src_manifest.manifest, other_outputs = [a for a, _ in src_manifest.artifacts])] if python_toolchain.emit_dependency_metadata: dep_manifest = create_dep_manifest_for_source_map(ctx, python_toolchain, qualified_srcs) @@ -284,15 +307,16 @@ def python_library_impl(ctx: AnalysisContext) -> list[Provider]: raw_deps = ctx.attrs.deps raw_deps.extend(flatten( - get_platform_attr(python_platform, cxx_platform, ctx.attrs.platform_deps), + get_platform_attr(python_platform, cxx_toolchain, ctx.attrs.platform_deps), )) + resource_manifest = py_resources(ctx, resources) if resources else None deps, shared_libraries = gather_dep_libraries(raw_deps) library_info = create_python_library_info( ctx.actions, ctx.label, srcs = src_manifest, src_types = src_type_manifest, - resources = py_resources(ctx, resources) if resources else None, + resources = resource_manifest, bytecode = bytecode, dep_manifest = dep_manifest, deps = deps, @@ -300,11 +324,71 @@ def python_library_impl(ctx: AnalysisContext) -> list[Provider]: ) providers.append(library_info) + providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + python_libs = [library_info], + ), + deps = raw_deps, + ), + ) + + # Allow third-party-build rules to depend on Python rules. + tp_project = project_from_label(ctx.label) + tp_prefix = prefix_from_label(ctx.label) + providers.append( + third_party_build_info( + actions = ctx.actions, + build = ThirdPartyBuild( + # TODO(agallagher): Figure out a way to get a unique name? + project = tp_project, + prefix = tp_prefix, + root = create_third_party_build_root( + ctx = ctx, + # TODO(agallagher): use constraints to get py version. + manifests = ( + [("lib/python", src_manifest)] if src_manifest != None else [] + ) + ( + [("lib/python", resource_manifest[0])] if resource_manifest != None else [] + ), + ), + manifest = ctx.actions.write_json( + "third_party_build_manifest.json", + dict( + project = tp_project, + prefix = tp_prefix, + py_lib_paths = ["lib/python"], + ), + ), + ), + deps = raw_deps, + ), + ) + providers.append(create_python_needed_coverage_info(ctx.label, ctx.attrs.base_module, srcs.keys())) # Source DBs. sub_targets["source-db"] = [create_source_db(ctx, src_type_manifest, deps)] sub_targets["source-db-no-deps"] = [create_source_db_no_deps(ctx, src_types), create_python_source_db_info(library_info.manifests)] + + # Type check + type_checker = python_toolchain.type_checker + if type_checker != None: + sub_targets["typecheck"] = [ + create_per_target_type_check( + ctx, + type_checker, + src_type_manifest, + deps, + typeshed = python_toolchain.typeshed_stubs, + py_version = ctx.attrs.py_version_for_type_checking, + typing_enabled = ctx.attrs.typing, + sharding_enabled = ctx.attrs.shard_typing, + ), + ] + providers.append(DefaultInfo(sub_targets = sub_targets)) # Create, augment and provide the linkable graph. diff --git a/prelude/python/python_needed_coverage_test.bzl b/prelude/python/python_needed_coverage_test.bzl index 972282a3f6747..8da3630232977 100644 --- a/prelude/python/python_needed_coverage_test.bzl +++ b/prelude/python/python_needed_coverage_test.bzl @@ -7,7 +7,7 @@ load( "@prelude//tests:re_utils.bzl", - "get_re_executor_from_props", + "get_re_executors_from_props", ) load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") load( @@ -42,8 +42,8 @@ def python_needed_coverage_test_impl(ctx: AnalysisContext) -> list[Provider]: test_type = "simple" test_env["TEST_PILOT"] = "1" - # Setup a RE executor based on the `remote_execution` param. - re_executor = get_re_executor_from_props(ctx) + # Setup RE executors based on the `remote_execution` param. + re_executor, executor_overrides = get_re_executors_from_props(ctx) return inject_test_run_info( ctx, @@ -54,6 +54,7 @@ def python_needed_coverage_test_impl(ctx: AnalysisContext) -> list[Provider]: labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, default_executor = re_executor, + executor_overrides = executor_overrides, # We implicitly make this test via the project root, instead of # the cell root (e.g. fbcode root). run_from_project_root = re_executor != None, diff --git a/prelude/python/python_test.bzl b/prelude/python/python_test.bzl index c9f4b695b8d11..1c00caa09ce82 100644 --- a/prelude/python/python_test.bzl +++ b/prelude/python/python_test.bzl @@ -8,12 +8,17 @@ load("@prelude//:paths.bzl", "paths") load( "@prelude//tests:re_utils.bzl", - "get_re_executor_from_props", + "get_re_executors_from_props", ) load("@prelude//utils:utils.bzl", "from_named_set", "value_or") load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") load(":interface.bzl", "EntryPointKind") load(":make_py_package.bzl", "PexProviders", "make_default_info") +load( + ":manifest.bzl", + "get_srcs_from_manifest", +) +load(":python.bzl", "PythonLibraryInfo") load(":python_binary.bzl", "python_executable") load(":python_library.bzl", "py_attr_resources", "qualify_srcs") @@ -38,8 +43,10 @@ def python_test_executable(ctx: AnalysisContext) -> PexProviders: main_module = value_or(ctx.attrs.main_module, "__test_main__") srcs = qualify_srcs(ctx.label, ctx.attrs.base_module, from_named_set(ctx.attrs.srcs)) + if ctx.attrs.implicit_test_library != None: + top_level_manifest = list(ctx.attrs.implicit_test_library[PythonLibraryInfo].manifests.traverse(ordering = "preorder"))[0] + srcs.update(qualify_srcs(ctx.label, ctx.attrs.base_module, from_named_set(get_srcs_from_manifest(top_level_manifest.srcs)))) - # Generate the test modules file and add it to sources. test_modules_name, test_modules_path = _write_test_modules_list(ctx, srcs) srcs[test_modules_name] = test_modules_path @@ -61,8 +68,8 @@ def python_test_impl(ctx: AnalysisContext) -> list[Provider]: pex = python_test_executable(ctx) test_cmd = pex.run_cmd - # Setup a RE executor based on the `remote_execution` param. - re_executor = get_re_executor_from_props(ctx) + # Setup RE executors based on the `remote_execution` param. + re_executor, executor_overrides = get_re_executors_from_props(ctx) return inject_test_run_info( ctx, @@ -73,6 +80,7 @@ def python_test_impl(ctx: AnalysisContext) -> list[Provider]: labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, default_executor = re_executor, + executor_overrides = executor_overrides, # We implicitly make this test via the project root, instead of # the cell root (e.g. fbcode root). run_from_project_root = re_executor != None, diff --git a/prelude/python/python_wheel.bzl b/prelude/python/python_wheel.bzl new file mode 100644 index 0000000000000..093183398776f --- /dev/null +++ b/prelude/python/python_wheel.bzl @@ -0,0 +1,232 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:paths.bzl", "paths") +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") +load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") +load( + "@prelude//cxx:link.bzl", + "cxx_link_shared_library", +) +load( + "@prelude//cxx:link_types.bzl", + "link_options", +) +load("@prelude//linking:execution_preference.bzl", "LinkExecutionPreference") +load( + "@prelude//linking:link_info.bzl", + "LinkArgs", + "LinkStrategy", + "get_lib_output_style", + "get_link_info", +) +load( + "@prelude//linking:linkable_graph.bzl", + "LinkableGraph", + "LinkableNode", # @unused Used as a type + "LinkableRootInfo", + "get_deps_for_link", + "get_linkable_graph_node_map_func", + get_link_info_for_node = "get_link_info", +) +load("@prelude//python:manifest.bzl", "create_manifest_for_entries") +load("@prelude//python:python.bzl", "PythonLibraryInfo") +load("@prelude//python:toolchain.bzl", "PythonToolchainInfo") +load("@prelude//utils:expect.bzl", "expect") +load( + "@prelude//utils:graph_utils.bzl", + "depth_first_traversal_by", +) +load("@prelude//decls/toolchains_common.bzl", "toolchains_common") +load("@prelude//transitions/constraint_overrides.bzl", "constraint_overrides") + +def _link_deps( + link_infos: dict[Label, LinkableNode], + deps: list[Label], + link_strategy: LinkStrategy, + pic_behavior: PicBehavior) -> list[Label]: + """ + Return transitive deps required to link dynamically against the given deps. + This will following through deps of statically linked inputs and exported + deps of everything else (see https://fburl.com/diffusion/rartsbkw from v1). + """ + + def find_deps(node: Label): + return get_deps_for_link(link_infos[node], link_strategy, pic_behavior) + + return depth_first_traversal_by(link_infos, deps, find_deps) + +def _impl(ctx: AnalysisContext) -> list[Provider]: + providers = [] + + cmd = [] + hidden = [] + + cmd.append(ctx.attrs._wheel[RunInfo]) + + name_parts = [ + ctx.attrs.dist or ctx.attrs.name, + ctx.attrs.version, + ctx.attrs.python, + ctx.attrs.abi, + ctx.attrs.platform, + ] + wheel = ctx.actions.declare_output("{}.whl".format("-".join(name_parts))) + cmd.append(cmd_args(wheel.as_output(), format = "--output={}")) + + cmd.append("--name={}".format(ctx.attrs.dist or ctx.attrs.name)) + cmd.append("--version={}".format(ctx.attrs.version)) + + if ctx.attrs.entry_points: + cmd.append("--entry-points={}".format(json.encode(ctx.attrs.entry_points))) + + for key, val in ctx.attrs.extra_metadata.items(): + cmd.extend(["--metadata", key, val]) + + cmd.extend(["--metadata", "Requires-Python", "=={}.*".format(ctx.attrs.python[2:])]) + + for requires in ctx.attrs.requires: + cmd.extend(["--metadata", "Requires-Dist", requires]) + + for name, script in ctx.attrs.scripts.items(): + cmd.extend(["--data", paths.join("scripts", name), script]) + + libraries = {} + for lib in ctx.attrs.libraries: + libraries[lib.label] = lib + if ctx.attrs.libraries_query != None: + for lib in ctx.attrs.libraries_query: + if PythonLibraryInfo in lib: + libraries[lib.label] = lib + + srcs = [] + extensions = {} + for dep in libraries.values(): + manifests = dep[PythonLibraryInfo].manifests.value + if manifests.srcs != None: + srcs.append(manifests.srcs) + if manifests.resources != None: + expect(not manifests.resources[1]) + srcs.append(manifests.resources[0]) + if manifests.extensions != None: + python_toolchain = ctx.attrs._python_toolchain[PythonToolchainInfo] + toolchain_info = get_cxx_toolchain_info(ctx) + items = manifests.extensions.items() + expect(len(items) == 1) + extension = items[0][0] + root = dep[LinkableRootInfo] + + # Add link inputs for the linkable root and any deps. + inputs = [] + inputs.append(get_link_info( + infos = root.link_infos, + prefer_stripped = ctx.attrs.prefer_stripped_objects, + )) + link_infos = get_linkable_graph_node_map_func(dep[LinkableGraph])() + for ext_dep in _link_deps( + link_infos, + root.deps, + LinkStrategy("static_pic"), + toolchain_info.pic_behavior, + ): + node = link_infos[ext_dep] + output_style = get_lib_output_style( + LinkStrategy("static_pic"), + node.preferred_linkage, + toolchain_info.pic_behavior, + ) + inputs.append(get_link_info_for_node( + node, + output_style, + prefer_stripped = ctx.attrs.prefer_stripped_objects, + )) + + # link the rule + link_result = cxx_link_shared_library( + ctx = ctx, + output = extension, + opts = link_options( + links = [ + LinkArgs(flags = python_toolchain.extension_linker_flags), + LinkArgs(flags = python_toolchain.wheel_linker_flags), + LinkArgs(infos = inputs), + ], + category_suffix = "native_extension", + identifier = extension, + link_execution_preference = LinkExecutionPreference("any"), + ), + ) + extensions[extension] = link_result.linked_object + + if extensions: + srcs.append( + create_manifest_for_entries( + ctx, + name = "extensions.txt", + entries = [ + (name, extension.output, "") + for name, extension in extensions.items() + ], + ), + ) + + for manifest in srcs: + cmd.append(cmd_args(manifest.manifest, format = "--srcs={}")) + for a, _ in manifest.artifacts: + hidden.append(a) + + ctx.actions.run(cmd_args(cmd, hidden = hidden), category = "wheel") + providers.append(DefaultInfo(default_output = wheel)) + + return providers + +python_wheel = rule( + impl = _impl, + cfg = constraint_overrides.transition, + attrs = dict( + dist = attrs.option(attrs.string(), default = None), + version = attrs.string(default = "1.0.0"), + python = attrs.string( + default = select({ + "ovr_config//third-party/python/constraints:3.10": "py3.10", + "ovr_config//third-party/python/constraints:3.11": "py3.11", + "ovr_config//third-party/python/constraints:3.12": "py3.12", + "ovr_config//third-party/python/constraints:3.8": "py3.8", + "ovr_config//third-party/python/constraints:3.9": "py3.9", + }), + ), + entry_points = attrs.dict( + key = attrs.string(), + value = attrs.dict( + key = attrs.string(), + value = attrs.string(), + ), + default = {}, + ), + requires = attrs.list(attrs.string(), default = []), + extra_metadata = attrs.dict( + key = attrs.string(), + value = attrs.string(), + default = {}, + ), + abi = attrs.string(default = "none"), + platform = attrs.string( + default = select({ + "DEFAULT": "any", + "ovr_config//os:linux-arm64": "linux_aarch64", + "ovr_config//os:linux-x86_64": "linux_x86_64", + }), + ), + libraries = attrs.list(attrs.dep(providers = [PythonLibraryInfo]), default = []), + scripts = attrs.dict(key = attrs.string(), value = attrs.source(), default = {}), + libraries_query = attrs.option(attrs.query(), default = None), + prefer_stripped_objects = attrs.default_only(attrs.bool(default = False)), + _wheel = attrs.default_only(attrs.exec_dep(default = "prelude//python/tools:wheel")), + _cxx_toolchain = toolchains_common.cxx(), + _python_toolchain = toolchains_common.python(), + ) | constraint_overrides.attributes, +) diff --git a/prelude/python/runtime/BUCK b/prelude/python/runtime/BUCK deleted file mode 100644 index 1cac267a37f45..0000000000000 --- a/prelude/python/runtime/BUCK +++ /dev/null @@ -1,5 +0,0 @@ -filegroup( - name = "bootstrap_files", - srcs = glob(["__par__/**/*.py"]), - visibility = ["PUBLIC"], -) diff --git a/prelude/python/runtime/BUCK.v2 b/prelude/python/runtime/BUCK.v2 new file mode 100644 index 0000000000000..dbf6fa6b73fcd --- /dev/null +++ b/prelude/python/runtime/BUCK.v2 @@ -0,0 +1,11 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +filegroup( + name = "bootstrap_files", + srcs = glob(["__par__/**/*.py"]), + visibility = ["PUBLIC"], +) diff --git a/prelude/python/runtime/__par__/bootstrap.py b/prelude/python/runtime/__par__/bootstrap.py index 695f95728cc1b..1cf7a424a4b81 100644 --- a/prelude/python/runtime/__par__/bootstrap.py +++ b/prelude/python/runtime/__par__/bootstrap.py @@ -5,12 +5,23 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + from __future__ import annotations import os +import types from typing import Callable, Sequence +def iscoroutinefunction(func: Callable[[], None]) -> bool: + # This is the guts of inspect.iscoroutinefunction without the cost of inspect import + CO_COROUTINE = 128 # This hasn't changed in 8 years most likely never will + return isinstance(func, types.FunctionType) and bool( + func.__code__.co_flags & CO_COROUTINE + ) + + def run_as_main( main_module: str, main_function: str | None, @@ -38,12 +49,19 @@ def run_as_main( # `sys.path` has been setup by setting the PAR_MAIN_OVERRIDE environment # variable. decorate_main_module = os.environ.pop("PAR_MAIN_OVERRIDE", None) + is_decorated_module = "PAR_MAIN_ORIGINAL" in os.environ if decorate_main_module: # Pass the original main module as environment variable for the process. # Allowing the decorating module to pick it up. os.environ["PAR_MAIN_ORIGINAL"] = main_module main_module = decorate_main_module + # Also pass the main function if set: + decorate_main_function = os.environ.pop("PAR_MAIN_FUNCTION_OVERRIDE", None) + if main_function and (decorate_main_module or is_decorated_module): + os.environ["PAR_MAIN_FUNCTION_ORIGINAL"] = main_function + main_function = decorate_main_function + if not main_function: import runpy @@ -61,6 +79,16 @@ def run_as_main( import sys sys.modules["__main__"] = mod + + # Pretend we're executing `main()` directly + if hasattr(main, "__globals__") and isinstance(main.__globals__, dict): + main.__globals__["__name__"] = "__main__" for hook in main_function_hooks: hook() - main() + + if iscoroutinefunction(main): + import asyncio + + asyncio.run(main()) + else: + main() diff --git a/prelude/python/source_db.bzl b/prelude/python/source_db.bzl index c799e576de44a..4d299a03d058e 100644 --- a/prelude/python/source_db.bzl +++ b/prelude/python/source_db.bzl @@ -6,6 +6,7 @@ # of this source tree. load("@prelude//python:python.bzl", "PythonLibraryInfo") +load("@prelude//utils:argfile.bzl", "at_argfile") load( ":manifest.bzl", "ManifestInfo", # @unused Used as a type @@ -41,10 +42,12 @@ def create_source_db( dep_manifests = ctx.actions.tset(PythonLibraryManifestsTSet, children = [d.manifests for d in python_deps]) dependencies = cmd_args(dep_manifests.project_as_args("source_type_manifests"), format = "--dependency={}") - dependencies_file = ctx.actions.write("source_db_dependencies", dependencies) - dependencies_file = cmd_args(dependencies_file, format = "@{}").hidden(dependencies) + cmd.add(at_argfile( + actions = ctx.actions, + name = "source_db_dependencies", + args = dependencies, + )) - cmd.add(dependencies_file) artifacts.append(dep_manifests.project_as_args("source_type_artifacts")) ctx.actions.run(cmd, category = "py_source_db") @@ -71,9 +74,12 @@ def create_dbg_source_db( dep_manifests = ctx.actions.tset(PythonLibraryManifestsTSet, children = [d.manifests for d in python_deps]) dependencies = cmd_args(dep_manifests.project_as_args("source_manifests"), format = "--dependency={}") - dependencies_file = ctx.actions.write("dbg_source_db_dependencies", dependencies) - dependencies_file = cmd_args(dependencies_file, format = "@{}").hidden(dependencies) - cmd.add(dependencies_file) + cmd.add(at_argfile( + actions = ctx.actions, + name = "dbg_source_db_dependencies", + args = dependencies, + )) + artifacts.append(dep_manifests.project_as_args("source_artifacts")) ctx.actions.run(cmd, category = "py_dbg_source_db") diff --git a/prelude/python/sourcedb/build.bxl b/prelude/python/sourcedb/build.bxl index 349ee1be735ab..81ded5ae7fff5 100644 --- a/prelude/python/sourcedb/build.bxl +++ b/prelude/python/sourcedb/build.bxl @@ -5,31 +5,41 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - -def _get_artifact(result: "bxl_build_result") -> Artifact: +def _get_artifact(result: bxl.BuildResult) -> Artifact: # NOTE: the first artifact is always the source db json # T124989384 will make this nicer for artifact in result.artifacts(): return artifact fail("Sourcedb rule must have at least one artifact") -def _abort_on_build_failure(target_label: TargetLabel, result: "bxl_build_result") -> None: +def _get_sourcedb(result: list[bxl.EnsuredArtifact]) -> bxl.EnsuredArtifact: + # NOTE: the first artifact is always the source db json + # T124989384 will make this nicer + for artifact in result: + return artifact + fail("Sourcedb rule must have at least one artifact") + +def _abort_on_build_failure(target_label: TargetLabel, result: bxl.BuildResult) -> None: for failure in result.failures(): error_message = "Target `{}` cannot be built by Buck\nreason: {}".format(target_label, failure) fail(error_message) -# Build sourcedb for the given targets, and return a mapping from target names -# to the corresponding sourcedb JSON file location. -def do_build( +def _build( ctx: bxl.Context, - targets: list[ConfiguredTargetLabel]) -> dict[TargetLabel, Artifact]: + targets: list[ConfiguredTargetLabel]) -> dict[Label, bxl.BuildResult]: # Build sourcedbs of all targets configured_sub_targets = [ target.with_sub_target(["source-db-no-deps"]) for target in targets ] - build_results = ctx.build(configured_sub_targets) + return ctx.build(configured_sub_targets) + +# Build sourcedb for the given targets, and return a mapping from target names +# to the corresponding sourcedb JSON file location. +def do_build( + ctx: bxl.Context, + targets: list[ConfiguredTargetLabel]) -> dict[TargetLabel, Artifact]: + build_results = _build(ctx, targets) # Compute result dict output = {} @@ -39,3 +49,14 @@ def do_build( path = _get_artifact(result) output[raw_target] = path return output + +# Same as do_build, except calls ensure to ensure artifacts are materialized. +def do_build_ensured( + ctx: bxl.Context, + targets: list[ConfiguredTargetLabel]) -> dict[TargetLabel, bxl.EnsuredArtifact]: + build_results = _build(ctx, targets) + for key, value in build_results.items(): + _abort_on_build_failure(key.raw_target(), value) + + ensured_artifacts = ctx.output.ensure_multiple(build_results) + return {label.raw_target(): _get_sourcedb(artifact) for label, artifact in ensured_artifacts.items()} diff --git a/prelude/python/sourcedb/classic.bxl b/prelude/python/sourcedb/classic.bxl index 7504ad8129383..1dec7df197d31 100644 --- a/prelude/python/sourcedb/classic.bxl +++ b/prelude/python/sourcedb/classic.bxl @@ -16,7 +16,7 @@ def _build_entry_point(ctx: bxl.Context) -> None: ) actions = bxl_actions.actions - query = ctx.cquery() + query = ctx.uquery() targets = do_query(ctx, query, actions, [query.eval(target) for target in ctx.cli_args.target]) built_sourcedbs = do_build(ctx, targets) diff --git a/prelude/python/sourcedb/code_navigation.bxl b/prelude/python/sourcedb/code_navigation.bxl index fd127ec888dc8..9272ced6a76ac 100644 --- a/prelude/python/sourcedb/code_navigation.bxl +++ b/prelude/python/sourcedb/code_navigation.bxl @@ -16,12 +16,11 @@ def _build_entry_point(ctx: bxl.Context) -> None: ) actions = bxl_actions.actions - query = ctx.cquery() + query = ctx.uquery() root = ctx.root() sources = ["{}/{}".format(root, source) for source in ctx.cli_args.source] - target_universe = ctx.uquery().owner(sources) - targets = do_query(ctx, query, actions, query.owner(sources, target_universe)) + targets = do_query(ctx, query, actions, query.owner(sources)) built_sourcedbs = do_build(ctx, targets) # Ensure all source files are materialized diff --git a/prelude/python/sourcedb/filter.bxl b/prelude/python/sourcedb/filter.bxl new file mode 100644 index 0000000000000..79d65f7ca80d2 --- /dev/null +++ b/prelude/python/sourcedb/filter.bxl @@ -0,0 +1,76 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +BUCK_PYTHON_RULE_KINDS = [ + "python_binary", + "python_library", + "python_test", +] +BUCK_PYTHON_RULE_KIND_QUERY = "|".join(BUCK_PYTHON_RULE_KINDS) + +def get_unfiltered_root_targets( + query: bxl.UqueryContext, + target_patterns: typing.Any) -> bxl.UnconfiguredTargetSet: + candidate_targets = utarget_set() + for pattern in target_patterns: + candidate_targets += query.kind( + BUCK_PYTHON_RULE_KIND_QUERY, + pattern, + ) + return candidate_targets + +def filter_root_targets( + query: bxl.UqueryContext, + target_patterns: typing.Any) -> bxl.UnconfiguredTargetSet: + # Find all Pure-Python targets + candidate_targets = get_unfiltered_root_targets(query, target_patterns) + + # Don't check generated rules + filtered_targets = candidate_targets - query.attrfilter( + "labels", + "generated", + candidate_targets, + ) + + # Provide an opt-out label + filtered_targets = filtered_targets - query.attrfilter( + "labels", + "no_pyre", + candidate_targets, + ) + return filtered_targets + +def do_filter( + query: bxl.UqueryContext, + target_patterns: typing.Any, + exclude_targets_with_special_labels: bool) -> list[TargetLabel]: + if exclude_targets_with_special_labels: + root_targets = filter_root_targets(query, target_patterns) + else: + root_targets = get_unfiltered_root_targets(query, target_patterns) + return [root_target.label for root_target in root_targets] + +def _do_filter_entry_point(ctx: bxl.Context) -> None: + query = ctx.uquery() + targets = do_filter( + query, + [query.eval(target) for target in ctx.cli_args.target], + exclude_targets_with_special_labels = ctx.cli_args.recognize_labels, + ) + ctx.output.print_json(targets) + +filter = bxl_main( + doc = ( + "Expand target patterns and look for all targets in immediate sources " + + "that will be built by Pyre." + ), + impl = _do_filter_entry_point, + cli_args = { + "recognize-labels": cli_args.bool(default = True), + "target": cli_args.list(cli_args.string()), + }, +) diff --git a/prelude/python/sourcedb/ide.bxl b/prelude/python/sourcedb/ide.bxl new file mode 100644 index 0000000000000..af0661f92d5bc --- /dev/null +++ b/prelude/python/sourcedb/ide.bxl @@ -0,0 +1,51 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":build.bxl", "do_build_ensured") +load(":query.bxl", "do_query") + +def _build_entry_point(ctx: bxl.Context) -> None: + bxl_actions = ctx.bxl_actions( + target_platform = "prelude//platforms:default", + ) + actions = bxl_actions.actions + + query = ctx.uquery() + + output = {} + for source in ctx.cli_args.source: + owning_targets = query.owner(source) + targets_configured = do_query(ctx, query, actions, owning_targets) + targets_configured += [target.label for target in ctx.configured_targets(ctx.cli_args.extra_source_targets)] + + sourcedbs = do_build_ensured(ctx, targets_configured) + + output[source] = {"db": sourcedbs, "owning_targets": [target.label for target in owning_targets]} + + ctx.output.print_json(output) + +build = bxl_main( + doc = """Build Python sourcedb for Python IDE support. + + It takes a list of file paths, and will find the owner targets for all + those files and build source-db for those owning targets, returning them all. + """, + impl = _build_entry_point, + cli_args = { + "extra-source-targets": cli_args.list( + cli_args.string( + doc = "fully qualified targets to include in the sourcedb", + ), + [], # default value + ), + "source": cli_args.list( + cli_args.string( + doc = "File to build a source db for (relative to source root)", + ), + ), + }, +) diff --git a/prelude/python/sourcedb/merge.bxl b/prelude/python/sourcedb/merge.bxl index 3d4024dce4c4c..525bdf89280a7 100644 --- a/prelude/python/sourcedb/merge.bxl +++ b/prelude/python/sourcedb/merge.bxl @@ -5,13 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - def do_merge( ctx: bxl.Context, - bxl_actions: "bxl_actions", + bxl_actions: bxl.Actions, built_sourcedbs: dict[TargetLabel, Artifact], - command_category: str) -> "ensured_artifact": + command_category: str) -> bxl.EnsuredArtifact: actions = bxl_actions.actions merger_input = actions.write_json("merge_input.json", built_sourcedbs) @@ -19,13 +17,14 @@ def do_merge( dependency_key = bxl_actions.exec_deps.keys()[0] - command = cmd_args(bxl_actions.exec_deps[dependency_key][RunInfo]) - command.add(merger_input) - command.add("--output") - command.add(merger_output.as_output()) - - # Declare that the merger result depends on all sourcedbs - command.hidden(built_sourcedbs.values()) + command = cmd_args( + bxl_actions.exec_deps[dependency_key][RunInfo], + merger_input, + "--output", + merger_output.as_output(), + # Declare that the merger result depends on all sourcedbs + hidden = built_sourcedbs.values(), + ) actions.run(command, category = command_category) return ctx.output.ensure(merger_output) diff --git a/prelude/python/sourcedb/owners.bxl b/prelude/python/sourcedb/owners.bxl new file mode 100644 index 0000000000000..deeefbf7e0a89 --- /dev/null +++ b/prelude/python/sourcedb/owners.bxl @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":filter.bxl", "filter_root_targets") + +def _owners_entry_point(ctx: bxl.Context) -> None: + query = ctx.uquery() + root = ctx.root() + + owning_targets = filter_root_targets(query, query.owner(ctx.cli_args.source)) + + files = [] + if len(owning_targets) > 0: + target = owning_targets[0] + files = query.inputs(target) + cell_root = ctx.audit().cell([target.label.cell])[target.label.cell] + files = ["{}/{}".format(cell_root, file.path) for file in files] + + ctx.output.print_json({"files": files, "owning_targets": [target.label for target in owning_targets], "root": root}) + +build = bxl_main( + doc = """Determines owning python targets and root, providing files within the first owning target. + Note: must be run from within fbsource. + + It takes a file path, returning an object of format + `{'owning_targets': List, 'root': string, 'files': List}` + - Owning targets is the list of python target labels that own the file. + - Root is the buck project root. + - Files is the list of files (absolute paths) within the first owning target, if any. + """, + impl = _owners_entry_point, + cli_args = { + "source": cli_args.string( + doc = "Source file (absolute path)", + ), + }, +) diff --git a/prelude/python/sourcedb/query.bxl b/prelude/python/sourcedb/query.bxl index f33468434e0bd..bc3a8c4aa6c02 100644 --- a/prelude/python/sourcedb/query.bxl +++ b/prelude/python/sourcedb/query.bxl @@ -5,53 +5,12 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load("@prelude//python:python.bzl", "PythonLibraryManifestsTSet") load("@prelude//python:source_db.bzl", "PythonSourceDBInfo") - -BUCK_PYTHON_RULE_KINDS = [ - "python_binary", - "python_library", - "python_test", -] -BUCK_PYTHON_RULE_KIND_QUERY = "|".join(BUCK_PYTHON_RULE_KINDS) - -def _filter_root_targets( - query: "cqueryctx", - target_patterns: typing.Any) -> "target_set": - # Find all Pure-Python targets - candidate_targets = ctarget_set() - for pattern in target_patterns: - candidate_targets += query.kind( - BUCK_PYTHON_RULE_KIND_QUERY, - pattern, - ) - - # Don't check generated rules - filtered_targets = candidate_targets - query.attrfilter( - "labels", - "generated", - candidate_targets, - ) - - # Do include unittest sources, which are marked as generated - filtered_targets = filtered_targets + query.attrfilter( - "labels", - "unittest-library", - candidate_targets, - ) - - # Provide an opt-out label - filtered_targets = filtered_targets - query.attrfilter( - "labels", - "no_pyre", - candidate_targets, - ) - return filtered_targets +load("@prelude//python/sourcedb/filter.bxl", "filter_root_targets") def _get_python_library_manifests_from_analysis_result( - analysis_result: "analysis_result") -> [PythonLibraryManifestsTSet, None]: + analysis_result: bxl.AnalysisResult) -> [PythonLibraryManifestsTSet, None]: sub_target = analysis_result.providers()[DefaultInfo].sub_targets.get("source-db-no-deps") if sub_target == None: return None @@ -62,7 +21,7 @@ def _get_python_library_manifests_from_analysis_result( def _get_python_library_manifests_from_targets( ctx: bxl.Context, - targets: "target_set") -> list[PythonLibraryManifestsTSet]: + targets: bxl.UnconfiguredTargetSet) -> list[PythonLibraryManifestsTSet]: return filter(None, [ _get_python_library_manifests_from_analysis_result(analysis_result) for analysis_result in ctx.analysis(targets).values() @@ -71,7 +30,7 @@ def _get_python_library_manifests_from_targets( def get_python_library_manifests_tset_from_targets( ctx: bxl.Context, actions: AnalysisActions, - root_targets: "target_set") -> PythonLibraryManifestsTSet: + root_targets: bxl.UnconfiguredTargetSet) -> PythonLibraryManifestsTSet: return actions.tset( PythonLibraryManifestsTSet, children = _get_python_library_manifests_from_targets(ctx, root_targets), @@ -79,22 +38,24 @@ def get_python_library_manifests_tset_from_targets( def get_python_library_manifests_tset_from_target_patterns( ctx: bxl.Context, - query: "cqueryctx", + query: bxl.UqueryContext, actions: AnalysisActions, target_patterns: typing.Any) -> PythonLibraryManifestsTSet: - root_targets = _filter_root_targets(query, target_patterns) + root_targets = filter_root_targets(query, target_patterns) return get_python_library_manifests_tset_from_targets(ctx, actions, root_targets) def do_query( ctx: bxl.Context, - query: "cqueryctx", + query: bxl.UqueryContext, actions: AnalysisActions, target_patterns: typing.Any) -> list[ConfiguredTargetLabel]: - manifests_of_transitive_dependencies = get_python_library_manifests_tset_from_target_patterns( - ctx, - query, - actions, - target_patterns, + manifests_of_transitive_dependencies = ( + get_python_library_manifests_tset_from_target_patterns( + ctx, + query, + actions, + target_patterns, + ) ) return [ manifest.label.configured_target() @@ -103,9 +64,14 @@ def do_query( ] def _do_query_entry_point(ctx: bxl.Context) -> None: - query = ctx.cquery() + query = ctx.uquery() actions = ctx.bxl_actions().actions - targets = do_query(ctx, query, actions, [query.eval(target) for target in ctx.cli_args.target]) + targets = do_query( + ctx, + query, + actions, + [query.eval(target) for target in ctx.cli_args.target], + ) ctx.output.print_json([target.raw_target() for target in targets]) query = bxl_main( diff --git a/prelude/python/sourcedb/typing_query.bxl b/prelude/python/sourcedb/typing_query.bxl new file mode 100644 index 0000000000000..a5eb7845f46f3 --- /dev/null +++ b/prelude/python/sourcedb/typing_query.bxl @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//python/sourcedb/filter.bxl", "BUCK_PYTHON_RULE_KIND_QUERY") + +def get_owners_for_files( + query: bxl.UqueryContext, + sources: list[str]) -> dict[str, bxl.UnconfiguredTargetSet]: + return {source: query.owner(source) for source in sources} + +def has_any_python_targets_with_typing( + query: bxl.UqueryContext, + owners: bxl.UnconfiguredTargetSet) -> bool: + targets_with_typing = query.attrfilter("typing", "True", owners) + + python_targets_with_typing = query.kind( + BUCK_PYTHON_RULE_KIND_QUERY, + targets_with_typing, + ) + + return len(python_targets_with_typing) != 0 + +def get_files_per_target_typed( + query: bxl.UqueryContext, + sources: list[str]) -> dict[str, bool]: + files_to_owners = get_owners_for_files(query, sources) + + return { + file: has_any_python_targets_with_typing(query, owners) + for file, owners in files_to_owners.items() + } + +def _do_typing_query_entry_point(ctx: bxl.Context) -> None: + query = ctx.uquery() + files_per_target_typed = get_files_per_target_typed(query, ctx.cli_args.source) + ctx.output.print_json(files_per_target_typed) + +typing_query = bxl_main( + doc = ( + "Queries Buck about a given file to determine if any owning targets have typing " + + "in their attributes." + ), + impl = _do_typing_query_entry_point, + cli_args = { + "source": cli_args.list( + cli_args.string( + doc = "The absolute path to a file you are trying to get typing attributes of", + ), + ), + }, +) diff --git a/prelude/python/toolchain.bzl b/prelude/python/toolchain.bzl index 712c8841c73b9..3a0e4260b0c75 100644 --- a/prelude/python/toolchain.bzl +++ b/prelude/python/toolchain.bzl @@ -50,17 +50,21 @@ PythonToolchainInfo = provider( "default_sitecustomize": provider_field(typing.Any, default = None), # The interpreter to use to compile bytecode. "host_interpreter": provider_field(typing.Any, default = None), + "bundled_interpreter": provider_field(typing.Any, default = None), "interpreter": provider_field(typing.Any, default = None), "version": provider_field(typing.Any, default = None), "native_link_strategy": provider_field(typing.Any, default = None), "linker_flags": provider_field(typing.Any, default = None), "binary_linker_flags": provider_field(typing.Any, default = None), + "extension_linker_flags": provider_field(typing.Any, default = None), + "wheel_linker_flags": provider_field(list[typing.Any], default = []), "generate_static_extension_info": provider_field(typing.Any, default = None), "parse_imports": provider_field(typing.Any, default = None), "traverse_dep_manifest": provider_field(typing.Any, default = None), "package_style": provider_field(typing.Any, default = None), "strip_libpar": provider_field(typing.Any, default = None), "make_source_db": provider_field(typing.Any, default = None), + "native_library_runtime_paths": provider_field(list[str], default = []), "make_source_db_no_deps": provider_field(typing.Any, default = None), "make_py_package_inplace": provider_field(typing.Any, default = None), "make_py_package_standalone": provider_field(typing.Any, default = None), @@ -68,10 +72,11 @@ PythonToolchainInfo = provider( "make_py_package_modules": provider_field(typing.Any, default = None), "pex_executor": provider_field(typing.Any, default = None), "pex_extension": provider_field(typing.Any, default = None), + "type_checker": provider_field(typing.Any, default = None), + "typeshed_stubs": provider_field(typing.Any, default = []), "emit_omnibus_metadata": provider_field(typing.Any, default = None), "fail_with_message": provider_field(typing.Any, default = None), "emit_dependency_metadata": provider_field(typing.Any, default = None), - "installer": provider_field(typing.Any, default = None), # A filegroup that gets added to all python executables "runtime_library": provider_field(Dependency | None, default = None), # The fully qualified name of a function that handles invoking the @@ -85,16 +90,26 @@ PythonPlatformInfo = provider(fields = { "name": provider_field(typing.Any, default = None), }) +def get_package_style(ctx: AnalysisContext) -> PackageStyle: + if ctx.attrs.package_style != None: + return PackageStyle(ctx.attrs.package_style.lower()) + return PackageStyle(ctx.attrs._python_toolchain[PythonToolchainInfo].package_style) + def get_platform_attr( python_platform_info: PythonPlatformInfo, - cxx_platform_info: CxxPlatformInfo, + cxx_toolchain: Dependency, xs: list[(str, typing.Any)]) -> list[typing.Any]: """ Take a platform_* value, and the non-platform version, and concat into a list of values based on the cxx/python platform """ + if len(xs) == 0: + return [] + cxx_info = cxx_toolchain.get(CxxPlatformInfo) + if cxx_info == None: + fail("Cannot use platform attrs in a fat platform configuration") python_platform = python_platform_info.name - cxx_platform = cxx_platform_info.name + cxx_platform = cxx_info.name return by_platform([python_platform, cxx_platform], xs) python = struct( diff --git a/prelude/python/tools/BUCK b/prelude/python/tools/BUCK deleted file mode 100644 index 1e325372f54ae..0000000000000 --- a/prelude/python/tools/BUCK +++ /dev/null @@ -1,128 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "extract", - main = "extract.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "create_manifest_for_source_dir", - main = "create_manifest_for_source_dir.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "make_source_db", - main = "make_source_db.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "make_source_db_no_deps", - main = "make_source_db_no_deps.py", - visibility = ["PUBLIC"], -) - -prelude.export_file( - name = "__test_main__.py", - src = "__test_main__.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "make_py_package_inplace.py", - main = "make_py_package_inplace.py", - visibility = ["PUBLIC"], -) - -prelude.export_file( - name = "run_inplace_lite.py.in", - src = "run_inplace_lite.py.in", -) - -prelude.export_file( - name = "run_inplace.py.in", - src = "run_inplace.py.in", -) - -prelude.command_alias( - name = "make_py_package_inplace", - args = [ - "--template", - "$(location :run_inplace.py.in)", - "--template-lite", - "$(location :run_inplace_lite.py.in)", - ], - exe = ":make_py_package_inplace.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "make_py_package_modules", - main = "make_py_package_modules.py", - visibility = ["PUBLIC"], -) - -prelude.export_file( - name = "compile.py", - src = "compile.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "generate_static_extension_info", - main = "generate_static_extension_info.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_library( - name = "py38stdlib", - srcs = ["py38stdlib.py"], -) - -prelude.python_bootstrap_binary( - name = "parse_imports", - main = "parse_imports.py", - visibility = ["PUBLIC"], - deps = [":py38stdlib"], -) - -prelude.python_bootstrap_binary( - name = "traverse_dep_manifest", - main = "traverse_dep_manifest.py", - visibility = ["PUBLIC"], - deps = [":py38stdlib"], -) - -# Main file used for native python binaries -prelude.export_file( - name = "embedded_main.cpp", - src = "embedded_main.cpp", - visibility = ["PUBLIC"], -) - -# Custom importer for native linked `cxx_python_extension' targets -prelude.export_file( - name = "static_extension_utils.cpp", - src = "static_extension_utils.cpp", - visibility = ["PUBLIC"], -) - -prelude.export_file( - name = "static_extension_finder.py", - src = "static_extension_finder.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "make_py_package_manifest_module", - main = "make_py_package_manifest_module.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "fail_with_message", - main = "fail_with_message.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/python/tools/BUCK.v2 b/prelude/python/tools/BUCK.v2 new file mode 100644 index 0000000000000..6463b5c018174 --- /dev/null +++ b/prelude/python/tools/BUCK.v2 @@ -0,0 +1,140 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "extract", + main = "extract.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "create_manifest_for_source_dir", + main = "create_manifest_for_source_dir.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "make_source_db", + main = "make_source_db.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "make_source_db_no_deps", + main = "make_source_db_no_deps.py", + visibility = ["PUBLIC"], +) + +prelude.export_file( + name = "__test_main__.py", + src = "__test_main__.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "make_py_package_inplace.py", + main = "make_py_package_inplace.py", + visibility = ["PUBLIC"], +) + +prelude.export_file( + name = "run_inplace_lite.py.in", + src = "run_inplace_lite.py.in", +) + +prelude.export_file( + name = "run_inplace.py.in", + src = "run_inplace.py.in", +) + +prelude.command_alias( + name = "make_py_package_inplace", + args = [ + "--template", + "$(location :run_inplace.py.in)", + "--template-lite", + "$(location :run_inplace_lite.py.in)", + ], + exe = ":make_py_package_inplace.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "make_py_package_modules", + main = "make_py_package_modules.py", + visibility = ["PUBLIC"], +) + +prelude.export_file( + name = "compile.py", + src = "compile.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "generate_static_extension_info", + main = "generate_static_extension_info.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_library( + name = "py38stdlib", + srcs = ["py38stdlib.py"], +) + +prelude.python_bootstrap_binary( + name = "parse_imports", + main = "parse_imports.py", + visibility = ["PUBLIC"], + deps = [":py38stdlib"], +) + +prelude.python_bootstrap_binary( + name = "traverse_dep_manifest", + main = "traverse_dep_manifest.py", + visibility = ["PUBLIC"], + deps = [":py38stdlib"], +) + +# Main file used for native python binaries +prelude.export_file( + name = "embedded_main.cpp", + src = "embedded_main.cpp", + visibility = ["PUBLIC"], +) + +# Custom importer for native linked `cxx_python_extension' targets +prelude.export_file( + name = "static_extension_utils.cpp", + src = "static_extension_utils.cpp", + visibility = ["PUBLIC"], +) + +prelude.export_file( + name = "static_extension_finder.py", + src = "static_extension_finder.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "make_py_package_manifest_module", + main = "make_py_package_manifest_module.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "fail_with_message", + main = "fail_with_message.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "wheel", + main = "wheel.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/python/tools/__test_main__.py b/prelude/python/tools/__test_main__.py index 1ce6a946f8de6..b8c3cb6c7dfcd 100644 --- a/prelude/python/tools/__test_main__.py +++ b/prelude/python/tools/__test_main__.py @@ -32,13 +32,9 @@ import time import traceback import unittest -import warnings +from importlib.machinery import PathFinder -with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - import imp - try: from StringIO import StringIO # type: ignore except ImportError: @@ -88,7 +84,7 @@ def include(self, path): return not self.omit(path) -class DebugWipeFinder: +class DebugWipeFinder(PathFinder): """ PEP 302 finder that uses a DebugWipeLoader for all files which do not need coverage @@ -97,28 +93,15 @@ class DebugWipeFinder: def __init__(self, matcher): self.matcher = matcher - def find_module(self, fullname, path=None): - _, _, basename = fullname.rpartition(".") - try: - fd, pypath, (_, _, kind) = imp.find_module(basename, path) - except Exception: - # Finding without hooks using the imp module failed. One reason - # could be that there is a zip file on sys.path. The imp module - # does not support loading from there. Leave finding this module to - # the others finders in sys.meta_path. + def find_spec(self, fullname, path=None, target=None): + spec = super().find_spec(fullname, path=path, target=target) + if spec is None or spec.origin is None: return None - - if hasattr(fd, "close"): - fd.close() - if kind != imp.PY_SOURCE: + if not spec.origin.endswith(".py"): return None - if self.matcher.include(pypath): + if self.matcher.include(spec.origin): return None - """ - This is defined to match CPython's PyVarObject struct - """ - class PyVarObject(ctypes.Structure): _fields_ = [ ("ob_refcnt", ctypes.c_long), @@ -132,8 +115,9 @@ class DebugWipeLoader(SourceFileLoader): """ def get_code(self, fullname): - code = super(DebugWipeLoader, self).get_code(fullname) - if code: + code = super().get_code(fullname) + # This can segfault in 3.12 + if code and sys.version_info < (3, 12): # Ideally we'd do # code.co_lnotab = b'' # But code objects are READONLY. Not to worry though; we'll @@ -142,7 +126,9 @@ def get_code(self, fullname): code_impl.ob_size = 0 return code - return DebugWipeLoader(fullname, pypath) + if isinstance(spec.loader, SourceFileLoader): + spec.loader = DebugWipeLoader(fullname, spec.origin) + return spec def optimize_for_coverage(cov, include_patterns, omit_patterns): @@ -200,8 +186,7 @@ def fileno(self): return self._fileno -# pyre-fixme[11]: Annotation `unittest._TextTestResult` is not defined as a type. -class BuckTestResult(unittest._TextTestResult): +class BuckTestResult(unittest.TextTestResult): """ Our own TestResult class that outputs data in a format that can be easily parsed by buck's test runner. @@ -273,7 +258,14 @@ def stopTest(self, test): # test cases, and fall back to looking the test up from the suite # otherwise. if not hasattr(test, "_testMethodName"): - test = self._find_next_test(self._suite) + potential_test = self._find_next_test(self._suite) + + if potential_test is not None: + test = potential_test + elif hasattr(test, "id"): + # If the next test can't be found, this could be a failure in class teardown. Fallback + # to using the id, which will likely be the method name as the test method. + test._testMethodName = test.id() self._results.append( { @@ -443,6 +435,30 @@ def getTestCaseNames(self, testCaseClass): matched.append(attrname) return matched + def loadTestsFromName(self, name, module=None): + """ + Tries to find and import the module from `name` and discover test cases inside. + + NOTE: this function is used by the unittest framework and our unittest + adapters to integrate with buck/tpx. + """ + suite = super().loadTestsFromName(name, module) + for test in suite: + if isinstance(test, unittest.loader._FailedTest): + # _FailedTest means that the test module couldn't be loaded + # (usually, because of a bad import). Instead of pretending to + # execute a synthetic test case + # `unittest.loader._FailedTest()` and reporting + # it to the downstream consumers, we should hard fail. + # When static listing is used this will let TPX to associate the + # failure to either the main test (for bundled execution) or + # individual test cases (regular execution) in a test target, + # and not to the synthetic _FailedTest case. + print(test._exception, file=sys.stderr) + sys.exit(1) + + return suite + class Loader: def __init__(self, modules, regex=None): @@ -672,11 +688,20 @@ def run(self): if self.options.list: for test in self.get_tests(test_suite): + # Python 3.12 changed the implementation of `TestCase.__str__`. + # We construct the name manually here to ensure consistency between + # Python versions. + # Example: "test_basic (tests.test_object.TestAbsent)". + method_name = getattr(test, "_testMethodName", "") + cls = test.__class__ if self.options.list_format == "python": - name = str(test) + if method_name: + name = f"{method_name} ({cls.__module__}.{cls.__qualname__})" + else: + name = str(test) + elif self.options.list_format == "buck": - method_name = getattr(test, "_testMethodName", "") - name = _format_test_name(test.__class__, method_name) + name = _format_test_name(cls, method_name) else: raise Exception( "Bad test list format: %s" % (self.options.list_format,) @@ -772,12 +797,12 @@ def convert_to_diff_cov_str(self, analysis): analysis[3][-1] if len(analysis[3]) else 0, ) lines = ["N"] * numLines - for l in analysis[1]: - lines[l - 1] = "C" - for l in analysis[2]: - lines[l - 1] = "X" - for l in analysis[3]: - lines[l - 1] = "U" + for line in analysis[1]: + lines[line - 1] = "C" + for line in analysis[2]: + lines[line - 1] = "X" + for line in analysis[3]: + lines[line - 1] = "U" return "".join(lines) diff --git a/prelude/python/tools/compile.py b/prelude/python/tools/compile.py index 220161418467c..a9deee2a10aec 100644 --- a/prelude/python/tools/compile.py +++ b/prelude/python/tools/compile.py @@ -5,6 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + """ Example usage: $ cat inputs.manifest @@ -14,30 +16,29 @@ out-dir/foo.pyc """ -# pyre-unsafe - import argparse import errno import json import os import sys from py_compile import compile, PycInvalidationMode +from typing import List if sys.version_info[0] == 3: import importlib import importlib.util - DEFAULT_FORMAT = importlib.util.cache_from_source("{pkg}/{name}.py") + DEFAULT_FORMAT: str = importlib.util.cache_from_source("{pkg}/{name}.py") else: - DEFAULT_FORMAT = "{pkg}/{name}.pyc" + DEFAULT_FORMAT: str = "{pkg}/{name}.pyc" -def get_py_path(module): +def get_py_path(module: str) -> str: return module.replace(".", os.sep) + ".py" -def get_pyc_path(module, fmt): +def get_pyc_path(module: str, fmt: str) -> str: try: package, name = module.rsplit(".", 1) except ValueError: @@ -51,7 +52,7 @@ def get_pyc_path(module, fmt): return os.path.join(*parts) -def _mkdirs(dirpath): +def _mkdirs(dirpath: str) -> None: try: os.makedirs(dirpath) except OSError as e: @@ -59,7 +60,7 @@ def _mkdirs(dirpath): raise -def main(argv): +def main(argv: List[str]) -> None: parser = argparse.ArgumentParser(fromfile_prefix_chars="@") parser.add_argument("-o", "--output", required=True) parser.add_argument( diff --git a/prelude/python/tools/create_manifest_for_source_dir.py b/prelude/python/tools/create_manifest_for_source_dir.py index 96b011f3e86be..4f086154c9b5a 100755 --- a/prelude/python/tools/create_manifest_for_source_dir.py +++ b/prelude/python/tools/create_manifest_for_source_dir.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + import argparse import json import os diff --git a/prelude/python/tools/extract.py b/prelude/python/tools/extract.py index 843241d8a96dd..15b52b07a1f41 100755 --- a/prelude/python/tools/extract.py +++ b/prelude/python/tools/extract.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + """ Quick and dirty wrapper to extract zip files; python 3.6.2+ @@ -13,44 +15,159 @@ """ import argparse +import configparser +import glob +import json import os +import shutil import stat +import tarfile +import tempfile import zipfile from pathlib import Path +from typing import Optional + + +def strip_soabi_tag(path: Path) -> Optional[Path]: + """ + Helper to strip any SOABI tag from the given extension path. Returns `None` + if no stripping is performed. + """ + + suffixes = path.suffixes[-2:] + + # SOABI tagged extensions should have two suffixes. + if len(suffixes) != 2: + return None + + # Not an extension. + ext = "" + for ext in (".so", ".pyd"): + if suffixes[1] == ext: + break + else: + return None + + # TODO(agallagher): Is there a better way to detect these tags? + if not (suffixes[0].startswith(".cpython-") or suffixes[0] == ".abi3"): + return None + + return path.with_suffix("").with_suffix(ext) # shutil.unpack_archive calls zipfile.extract which does *not* preserve file attributes # (see https://bugs.python.org/issue15795, https://stackoverflow.com/questions/39296101/python-zipfile-removes-execute-permissions-from-binaries). # # We need to preserve at least the executable bit. -def extract_zip_with_permissions(src: Path, dst_dir: Path) -> None: - z = zipfile.ZipFile(src) - for info in z.infolist(): - outfile = z.extract(info.filename, dst_dir) +def extract(src: Path, dst_dir: Path, strip_soabi_tags: bool = False) -> None: + if src.suffixes[-2:] == [".tar", ".gz"]: + with tempfile.TemporaryDirectory() as tmp_dir: + with tarfile.open(src) as tf: + tf.extractall(tmp_dir) - execute_perms = (info.external_attr >> 16) & ( - stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH - ) - if execute_perms: - st = os.stat(outfile) - new_mode = stat.S_IMODE(st.st_mode | execute_perms) - if new_mode != st.st_mode: - os.chmod(outfile, new_mode) + # We expect the tgz to contain a single top-level dir with all the + # items to unpack. + (path,) = glob.glob(os.path.join(tmp_dir, "*")) + for ent in os.listdir(path): + fsrc = os.path.join(path, ent) + fdst = Path(os.path.join(dst_dir, ent)) + soabi_less_dst = strip_soabi_tag(fdst) + if soabi_less_dst is not None: + fdst = soabi_less_dst + shutil.move(fsrc, fdst) + + else: + with zipfile.ZipFile(src) as z: + for info in z.infolist(): + outfile = Path(z.extract(info.filename, dst_dir)) + if strip_soabi_tags: + soabi_less_outfile = strip_soabi_tag(outfile) + if soabi_less_outfile is not None: + os.rename(outfile, soabi_less_outfile) + outfile = soabi_less_outfile + execute_perms = (info.external_attr >> 16) & ( + stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + ) + if execute_perms: + st = os.stat(outfile) + new_mode = stat.S_IMODE(st.st_mode | execute_perms) + if new_mode != st.st_mode: + os.chmod(outfile, new_mode) def main() -> None: parser = argparse.ArgumentParser( - description="Extract .zip files to a directory in a cross platform manner" + description="Extract .zip/.tar.gz archives to a directory in a cross platform manner" ) parser.add_argument( "--output", type=Path, required=True, help="The directory to write to" ) + parser.add_argument("--strip-soabi-tags", action="store_true") + parser.add_argument("--entry-points", type=Path, help="The directory to write to") + parser.add_argument( + "--cxx-header-dirs", + type=Path, + help="A file to write out inferred C++ include dirs to", + ) + parser.add_argument( + "--entry-points-manifest", type=Path, help="The directory to write to" + ) parser.add_argument("src", type=Path, help="The archive to extract to --output") args = parser.parse_args() args.output.mkdir(parents=True, exist_ok=True) - extract_zip_with_permissions(args.src, args.output) + extract( + src=args.src, + dst_dir=args.output, + strip_soabi_tags=args.strip_soabi_tags, + ) + + # Infer C++ header dirs. + if args.cxx_header_dirs is not None: + with open(args.cxx_header_dirs, mode="w") as f: + for root, dirs, _files in os.walk(args.output): + root = os.path.relpath(root, args.output) + if "include" in dirs: + print(os.path.normpath(os.path.join(root, "include")), file=f) + + # Extract any "entry points" from the wheel, and generate scripts from them + # (just like `pip install` would do). + if args.entry_points is not None: + entry_points = glob.glob( + os.path.join(args.output, "*.dist-info", "entry_points.txt") + ) + os.makedirs(args.entry_points, exist_ok=True) + manifest = [] + if entry_points: + (entry_points,) = entry_points + config = configparser.ConfigParser() + config.read(entry_points) + if config.has_section("console_scripts"): + for name, entry_point in config.items("console_scripts"): + mod, func = entry_point.split(":") + path = os.path.join(args.entry_points, name) + manifest.append( + (name, path, os.path.relpath(entry_points, args.output)) + ) + with open(path, mode="w") as bf: + bf.write( + """\ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import re +import sys +from {mod} import {func} +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0]) + sys.exit({func}()) +""".format( + mod=mod, func=func + ) + ) + os.chmod(path, 0o777) + with open(args.entry_points_manifest, mode="w") as f: + json.dump(manifest, f) if __name__ == "__main__": diff --git a/prelude/python/tools/fail_with_message.py b/prelude/python/tools/fail_with_message.py index dd06ee5a9d36f..bf4dba1343edc 100644 --- a/prelude/python/tools/fail_with_message.py +++ b/prelude/python/tools/fail_with_message.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + import sys from pathlib import Path diff --git a/prelude/python/tools/generate_static_extension_info.py b/prelude/python/tools/generate_static_extension_info.py index d0679a6089a72..da569f56d57fc 100644 --- a/prelude/python/tools/generate_static_extension_info.py +++ b/prelude/python/tools/generate_static_extension_info.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + import argparse import sys from typing import List diff --git a/prelude/python/tools/make_par/BUCK b/prelude/python/tools/make_par/BUCK index da10d3ec0c7c7..62d07e0989a2c 100644 --- a/prelude/python/tools/make_par/BUCK +++ b/prelude/python/tools/make_par/BUCK @@ -3,8 +3,12 @@ # as it is the only `TARGETS` (not `TARGETS.v2`) in the prelude. # Configuring the tools to do it right seemed more dangerous than just having a caveat on this one file. +load("@prelude//utils:source_listing.bzl", "source_listing") + oncall("build_infra") +source_listing() + export_file( name = "__run_lpar_main__.py", src = "__run_lpar_main__.py", diff --git a/prelude/python/tools/make_par/__run_lpar_main__.py b/prelude/python/tools/make_par/__run_lpar_main__.py index 13de11aaa521e..1a05d95abc227 100644 --- a/prelude/python/tools/make_par/__run_lpar_main__.py +++ b/prelude/python/tools/make_par/__run_lpar_main__.py @@ -5,14 +5,11 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# -# Put everything inside an __invoke_main() function. -# This way anything we define won't pollute globals(), since runpy -# will propagate our globals() as to the user's main module. -# pyre-fixme[3]: Return type must be annotated. -def __invoke_main(): +# pyre-strict + + +def __invoke_main() -> None: import os - import runpy import sys module = os.getenv("FB_PAR_MAIN_MODULE") @@ -21,50 +18,14 @@ def __invoke_main(): sys.argv[0] = os.getenv("FB_LPAR_INVOKED_NAME", sys.argv[0]) del sys.path[0] - main_runner_module = os.getenv("FB_PAR_MAIN_RUNNER_MODULE") - main_runner_function = os.getenv("FB_PAR_MAIN_RUNNER_FUNCTION") - - if main_runner_module and main_runner_function: - from importlib import import_module - - mod = import_module(main_runner_module) - run_as_main = getattr(mod, main_runner_function) - run_as_main(module, main_function) - return - - #### BUCK1-ONLY CODE FOLLOWS #### - - # Allow users to decorate the main module. In normal Python invocations - # this can be done by prefixing the arguments with `-m decoratingmodule`. - # It's not that easy for par files. The startup script sets up `sys.path` - # from within the Python interpreter. Enable decorating the main module - # after `sys.path` has been setup by setting the PAR_MAIN_OVERRIDE - # environment variable. - decorate_main_module = os.environ.pop("PAR_MAIN_OVERRIDE", None) - if decorate_main_module: - # Pass the original main module as environment variable for the process. - # Allowing the decorating module to pick it up. - # pyre-fixme[6]: For 2nd argument expected `str` but got `Optional[str]`. - os.environ["PAR_MAIN_ORIGINAL"] = module - module = decorate_main_module - - if main_function: - assert module - from importlib import import_module - - mod = import_module(module) - main = getattr(mod, main_function) - # This is normally done by `runpy._run_module_as_main`, and is - # important to make multiprocessing work - sys.modules["__main__"] = mod - main() - return + main_runner_module = os.environ["FB_PAR_MAIN_RUNNER_MODULE"] + main_runner_function = os.environ["FB_PAR_MAIN_RUNNER_FUNCTION"] - del os - del sys + from importlib import import_module - # pyre-fixme[16]: Module `runpy` has no attribute `_run_module_as_main`. - runpy._run_module_as_main(module, False) + mod = import_module(main_runner_module) + run_as_main = getattr(mod, main_runner_function) + run_as_main(module, main_function) __invoke_main() diff --git a/prelude/python/tools/make_par/_lpar_bootstrap.sh.template b/prelude/python/tools/make_par/_lpar_bootstrap.sh.template index 0a87fdc9e6bb2..2e01a66bde474 100644 --- a/prelude/python/tools/make_par/_lpar_bootstrap.sh.template +++ b/prelude/python/tools/make_par/_lpar_bootstrap.sh.template @@ -19,7 +19,10 @@ export {lib_path_env}={ld_library_path} if [ -n "${{PYTHONPATH+SET}}" ]; then export FB_SAVED_PYTHONPATH=$PYTHONPATH fi -export PYTHONPATH=$BASE_DIR + +# The following expands to ":PAR_APPEND_PYTHONPATH" when $PAR_APPEND_PYTHONPATH is set +# This is important: we don't want a trailing colon in $PYTHONPATH. +export PYTHONPATH=$BASE_DIR${{PAR_APPEND_PYTHONPATH:+:$PAR_APPEND_PYTHONPATH}} if [ -n "${{PYTHONHOME+SET}}" ]; then export FB_SAVED_PYTHONHOME=$PYTHONHOME fi @@ -34,6 +37,10 @@ export FB_PAR_MAIN_RUNNER_FUNCTION="{main_runner_function}" export FB_PAR_RUNTIME_FILES=$BASE_DIR : ${{FB_LPAR_INVOKED_NAME:="$0"}} export FB_LPAR_INVOKED_NAME +# This environment variable is immediately unset on startup but will also appear +# in e.g. `multiprocessing` workers, and so serves as an audit trail back to +# the originating PAR (and can be read via e.g. `/proc//environ`). +export PAR_INVOKED_NAME_TAG="$FB_LPAR_INVOKED_NAME" {ld_preload} {env} exec {cmd} "$@" diff --git a/prelude/python/tools/make_par/sitecustomize.py b/prelude/python/tools/make_par/sitecustomize.py index 2d91abaf9a7aa..31c12b6575f2d 100644 --- a/prelude/python/tools/make_par/sitecustomize.py +++ b/prelude/python/tools/make_par/sitecustomize.py @@ -6,27 +6,30 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + +from __future__ import annotations + +import itertools import multiprocessing.util as mp_util import os import sys import threading +import warnings from importlib.machinery import PathFinder from importlib.util import module_from_spec lock = threading.Lock() -# pyre-fixme[3]: Return type must be annotated. -# pyre-fixme[2]: Parameter must be annotated. -def __patch_spawn(var_names, saved_env): +def __patch_spawn(var_names: list[str], saved_env: dict[str, str]) -> None: std_spawn = mp_util.spawnv_passfds # pyre-fixme[53]: Captured variable `std_spawn` is not annotated. # pyre-fixme[53]: Captured variable `saved_env` is not annotated. # pyre-fixme[53]: Captured variable `var_names` is not annotated. - # pyre-fixme[3]: Return type must be annotated. # pyre-fixme[2]: Parameter must be annotated. - def spawnv_passfds(path, args, passfds): + def spawnv_passfds(path, args, passfds) -> None | int: with lock: try: for var in var_names: @@ -43,18 +46,32 @@ def spawnv_passfds(path, args, passfds): mp_util.spawnv_passfds = spawnv_passfds -# pyre-fixme[3]: Return type must be annotated. -# pyre-fixme[2]: Parameter must be annotated. -def __clear_env(patch_spawn=True): +def __clear_env(patch_spawn: bool = True) -> None: saved_env = {} - darwin_vars = ("DYLD_LIBRARY_PATH", "DYLD_INSERT_LIBRARIES") - linux_vars = ("LD_LIBRARY_PATH", "LD_PRELOAD") - python_vars = ("PYTHONPATH",) + + var_names = [ + "PYTHONPATH", + # We use this env var to tag the process and it's `multiprocessing` + # workers. It's important that we clear it out (so that unrelated sub- + # processes don't inherit it), but it can be read via + # `/proc//environ`. + "PAR_INVOKED_NAME_TAG", + ] if sys.platform == "darwin": - var_names = darwin_vars + python_vars + var_names.extend( + [ + "DYLD_LIBRARY_PATH", + "DYLD_INSERT_LIBRARIES", + ] + ) else: - var_names = linux_vars + python_vars + var_names.extend( + [ + "LD_LIBRARY_PATH", + "LD_PRELOAD", + ] + ) # Restore the original value of environment variables that we altered # as part of the startup process. @@ -71,12 +88,22 @@ def __clear_env(patch_spawn=True): __patch_spawn(var_names, saved_env) -# pyre-fixme[3]: Return type must be annotated. -def __passthrough_exec_module(): +def __startup__() -> None: + try: + # pyre-fixme[21]: Could not find module `__par__.__startup_function_loader__`. + from __par__.__startup_function_loader__ import load_startup_functions + + load_startup_functions() + except Exception: + warnings.warn("could not load startup functions", stacklevel=1) + + +def __passthrough_exec_module() -> None: # Delegate this module execution to the next module in the path, if any, # effectively making this sitecustomize.py a passthrough module. + paths = itertools.dropwhile(lambda p: not __file__.startswith(p), sys.path) spec = PathFinder.find_spec( - __name__, path=[p for p in sys.path if not __file__.startswith(p)] + __name__, path=[p for p in paths if not __file__.startswith(p)] ) if spec: mod = module_from_spec(spec) @@ -86,4 +113,5 @@ def __passthrough_exec_module(): __clear_env() +__startup__() __passthrough_exec_module() diff --git a/prelude/python/tools/make_py_package_inplace.py b/prelude/python/tools/make_py_package_inplace.py index 7942b5678c112..9abccde90d899 100755 --- a/prelude/python/tools/make_py_package_inplace.py +++ b/prelude/python/tools/make_py_package_inplace.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + """ Create a bootstrapper pex for inplace python binaries @@ -121,11 +123,6 @@ def parse_args() -> argparse.Namespace: type=Path, help="Where to write the bootstrapper script to", ) - parser.add_argument( - "--add-multiprocessing-executable", - type=Path, - help="If set, where to write the multiprocessing executable", - ) parser.add_argument( "--native-libs-env-var", default=( @@ -133,8 +130,26 @@ def parse_args() -> argparse.Namespace: ), help="The dynamic loader env used to find native library deps", ) + parser.add_argument( + "--native-library-runtime-path", + dest="native_library_runtime_paths", + default=[], + action="append", + help="The dynamic loader env used to find native library deps", + ) + parser.add_argument( + "-e", + "--runtime_env", + action="append", + default=[], + help="environment variables to set before launching the runtime. (e.g. -e FOO=BAR BAZ=QUX)", + ) # Compatibility with existing make_par scripts parser.add_argument("--passthrough", action="append", default=[]) + # No-op, added for compatibility with existing make_par scripts + parser.add_argument( + "--omnibus-debug-info", choices=["separate", "strip", "extract"] + ) return parser.parse_args() @@ -142,13 +157,18 @@ def parse_args() -> argparse.Namespace: def write_bootstrapper(args: argparse.Namespace) -> None: """Write the .pex bootstrapper script using a template""" - template = args.template_lite if args.use_lite else args.template + template = ( + args.template_lite + if (args.use_lite and not args.runtime_env) + else args.template + ) with open(template, "r", encoding="utf8") as fin: data = fin.read() # Because this can be invoked from other directories, find the relative path # from this .par to the modules dir, and use that. relative_modules_dir = os.path.relpath(args.modules_dir, args.output.parent) + native_lib_dirs = [relative_modules_dir] + args.native_library_runtime_paths # TODO(nmj): Remove this hack. So, if arg0 in your shebang is a bash script # (like /usr/local/fbcode/platform007/bin/python3.7 on macs is) @@ -160,9 +180,9 @@ def write_bootstrapper(args: argparse.Namespace) -> None: # exclude it for now, because linux doesn't like multiple args # after /usr/bin/env - ld_preload = "None" + ld_preload = None if args.preload_libraries: - ld_preload = repr(":".join(p.name for p in args.preload_libraries)) + ld_preload = [p.name for p in args.preload_libraries] new_data = data.replace("", "/usr/bin/env " + str(args.python)) new_data = new_data.replace("", "") @@ -179,32 +199,18 @@ def write_bootstrapper(args: argparse.Namespace) -> None: new_data = new_data.replace("", main_runner_module) new_data = new_data.replace("", main_runner_function) - if args.add_multiprocessing_executable: - # Handle enabling multiprocessing for inplace par - new_data = new_data.replace("", str(True)) - env_vals_for_subprocesses = [ - f"PYTHONPATH=$(dirname $0)/{relative_modules_dir}", - f"LD_PRELOAD={ld_preload}", - f"{args.native_libs_env_var}=$(dirname $0)/{relative_modules_dir}", - ] - # env_str should contain PYTHONPATH, LD_LIBRARY_PATH, and LD_PRELOAD (if set) - env_str = " ".join(env_vals_for_subprocesses) - script_contents = f'#!/usr/bin/env bash\n\n{env_str} {str(args.python)} "$@"' - mp_executable = args.add_multiprocessing_executable - with open(mp_executable, "w") as handle: - handle.write(script_contents) - os.chmod(mp_executable, 0o755) - relative_mp = os.path.relpath(mp_executable.as_posix(), args.output.parent) - new_data = new_data.replace("", relative_mp) - else: - new_data = new_data.replace("", str(args.python)) - new_data = new_data.replace("", str(False)) - # Things that are only required for the full template new_data = new_data.replace("", args.native_libs_env_var) - new_data = new_data.replace("", repr(relative_modules_dir)) + new_data = new_data.replace("", repr(native_lib_dirs)) new_data = new_data.replace("", "LD_PRELOAD") - new_data = new_data.replace("", ld_preload) + new_data = new_data.replace("", repr(ld_preload)) + + if args.runtime_env: + runtime_env = dict(e.split("=", maxsplit=1) for e in args.runtime_env) + env = f"os.environ.update({runtime_env!r})" + else: + env = "" + new_data = new_data.replace("", env) args.output.parent.mkdir(parents=True, exist_ok=True) with open(args.output, "w", encoding="utf8") as fout: diff --git a/prelude/python/tools/make_py_package_manifest_module.py b/prelude/python/tools/make_py_package_manifest_module.py index 3c60373f2d9d1..89427237b62eb 100755 --- a/prelude/python/tools/make_py_package_manifest_module.py +++ b/prelude/python/tools/make_py_package_manifest_module.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + """ Generate a __manifest__.py module containing build metadata for a Python package. """ @@ -13,7 +15,7 @@ import argparse import json from pathlib import Path -from typing import Optional, Set +from typing import Dict, Optional def parse_args() -> argparse.Namespace: @@ -44,9 +46,9 @@ def parse_args() -> argparse.Namespace: def path_to_module(path: str) -> Optional[str]: - if not path.endswith(".py"): - return None - return path[:-3].replace("/", ".") + for suffix in (".py", ".so", ".pyd"): + if path.endswith(suffix): + return path[: -len(suffix)].replace("/", ".").replace("\\", ".") def main() -> None: @@ -57,16 +59,22 @@ def main() -> None: f"Output path '{output}' already exists, refusing to overwrite." ) - modules: Set[str] = set() + modules: Dict[str, str] = {} for module_manifest_file in args.module_manifests: with open(module_manifest_file) as f: - for pkg_path, *_ in json.load(f): - modules.add(pkg_path) + for pkg_path, _, origin_desc in json.load(f): + module = path_to_module(pkg_path) + if module: + modules[module] = origin_desc # Add artificial __init__.py files like in make_py_package_modules.py for parent in Path(pkg_path).parents: if parent == Path("") or parent == Path("."): continue - modules.add(str(parent / "__init__.py")) + path = str(parent / "__init__.py") + module = path_to_module(path) + if module and module not in modules: + modules[module] = origin_desc + entries = {} if args.manifest_entries: with open(args.manifest_entries) as f: @@ -77,7 +85,9 @@ def main() -> None: ) if "modules" in entries: raise ValueError("'modules' can't be a key in manifest entries") - entries["modules"] = sorted(filter(None, (path_to_module(m) for m in modules))) + sorted_modules = sorted(modules.items()) + entries["modules"] = [m[0] for m in sorted_modules] + entries["origins"] = tuple(m[1] for m in sorted_modules) output.write_text( "\n".join((f"{key} = {repr(value)}" for key, value in entries.items())) ) diff --git a/prelude/python/tools/make_py_package_modules.py b/prelude/python/tools/make_py_package_modules.py index 07e247df10860..db591ec7152f8 100755 --- a/prelude/python/tools/make_py_package_modules.py +++ b/prelude/python/tools/make_py_package_modules.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + """ Create the link tree for inplace Python binaries. diff --git a/prelude/python/tools/make_source_db.py b/prelude/python/tools/make_source_db.py index ef7f638328a9d..a89d50efb5680 100755 --- a/prelude/python/tools/make_source_db.py +++ b/prelude/python/tools/make_source_db.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + """ Creates a Python Source DB JSON file containing both a rule's immediate sources and the sources of all transitive dependencies (e.g. for use with Pyre). @@ -34,19 +36,18 @@ } """ -# pyre-unsafe - import argparse import json import sys +from typing import List, Tuple -def _load(path): +def _load(path: str) -> List[Tuple[str, str, str]]: with open(path) as f: return json.load(f) -def main(argv): +def main(argv: List[str]) -> None: parser = argparse.ArgumentParser(fromfile_prefix_chars="@") parser.add_argument("--output", type=argparse.FileType("w"), default=sys.stdout) parser.add_argument("--sources") diff --git a/prelude/python/tools/make_source_db_no_deps.py b/prelude/python/tools/make_source_db_no_deps.py index d764f8fe57ff4..4493274eb1ff3 100644 --- a/prelude/python/tools/make_source_db_no_deps.py +++ b/prelude/python/tools/make_source_db_no_deps.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + """ Creates a Python Source DB JSON file from Python manifest JSON file (e.g. for use with Pyre). @@ -28,18 +30,15 @@ import argparse import json import sys +from typing import List, Tuple -# pyre-fixme[3]: Return type must be annotated. -# pyre-fixme[2]: Parameter must be annotated. -def _load(path): +def _load(path: str) -> List[Tuple[str, str, str]]: with open(path) as f: return json.load(f) -# pyre-fixme[3]: Return type must be annotated. -# pyre-fixme[2]: Parameter must be annotated. -def main(argv): +def main(argv: List[str]) -> None: parser = argparse.ArgumentParser(fromfile_prefix_chars="@") parser.add_argument("--output", type=argparse.FileType("w"), default=sys.stdout) parser.add_argument("sources") diff --git a/prelude/python/tools/parse_imports.py b/prelude/python/tools/parse_imports.py index a0c2bf9ac44c6..1e5e259af4bb6 100644 --- a/prelude/python/tools/parse_imports.py +++ b/prelude/python/tools/parse_imports.py @@ -6,6 +6,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + import argparse import ast import json @@ -114,9 +116,7 @@ def visit(self, node: ast.AST) -> None: try: return super().visit(node) except AttributeError as exc: - logger.error( - "Got %r when parsing %s from %s", exc, ast.dump(node), self.path - ) + logger.error(f"Got {exc} when parsing {ast.dump(node)} from {self.path}") def visit_Module(self, node: ast.Module) -> None: self._top_level = set(node.body) diff --git a/prelude/python/tools/py38stdlib.py b/prelude/python/tools/py38stdlib.py index 30052528fd98d..f5d33a6b484e1 100644 --- a/prelude/python/tools/py38stdlib.py +++ b/prelude/python/tools/py38stdlib.py @@ -5,6 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + # This is list is "borrowed" from https://github.com/amyreese/stdlibs from typing import FrozenSet diff --git a/prelude/python/tools/run_inplace.py.in b/prelude/python/tools/run_inplace.py.in index 6c362bba8dcc1..8ea96bae17ca6 100644 --- a/prelude/python/tools/run_inplace.py.in +++ b/prelude/python/tools/run_inplace.py.in @@ -12,11 +12,11 @@ main_module = "" main_function = "" modules_dir = "" native_libs_env_var = "" -native_libs_dir = +native_libs_dirs = native_libs_preload_env_var = "" native_libs_preload = interpreter_flags = "" -mp_executable="" +unc_prefix = "\\\\?\\" import os import platform @@ -29,13 +29,26 @@ os.environ["PAR_LAUNCH_TIMESTAMP"] = str(time.time()) dirpath = os.path.dirname(os.path.realpath(__file__)) +# Enable long path support on Windows +if platform.system() == "Windows" and not dirpath.startswith(unc_prefix): + dirpath = unc_prefix + dirpath + env_vals_to_restore = {} # Update the environment variable for the dynamic loader to the native # libraries location. -if native_libs_dir is not None: - old_native_libs_dir = os.environ.get(native_libs_env_var) - os.environ[native_libs_env_var] = os.path.join(dirpath, native_libs_dir) - env_vals_to_restore[native_libs_env_var] = old_native_libs_dir +if native_libs_dirs is not None: + old_native_libs_dirs = os.environ.get(native_libs_env_var) + os.environ[native_libs_env_var] = os.pathsep.join([ + os.path.join(dirpath, native_libs_dir) + for native_libs_dir in native_libs_dirs + ]) + env_vals_to_restore[native_libs_env_var] = old_native_libs_dirs +if os.environ.get("PAR_APPEND_LD_LIBRARY_PATH") is not None: + os.environ[native_libs_env_var] = ( + (os.environ[native_libs_env_var] + ":" + os.environ["PAR_APPEND_LD_LIBRARY_PATH"]) + if os.environ.get(native_libs_env_var) is not None + else os.environ["PAR_APPEND_LD_LIBRARY_PATH"] + ) # Update the environment variable for the dynamic loader to find libraries # to preload. @@ -44,10 +57,26 @@ if native_libs_preload is not None: env_vals_to_restore[native_libs_preload_env_var] = old_native_libs_preload # On macos, preloaded libs are found via paths. - os.environ[native_libs_preload_env_var] = ":".join( - os.path.join(dirpath, native_libs_dir, l) - for l in native_libs_preload.split(":") - ) + if platform.system() == "Darwin": + full_path_preloads = [] + for lib in native_libs_preload: + for native_libs_dir in native_libs_dirs: + fpath = os.path.join(dirpath, native_libs_dir, lib) + if os.path.exists(fpath): + full_path_preloads.append(fpath) + break + else: + raise Exception( + "cannot find preload lib {!r} in paths {!r}".format( + lib, + native_libs_dirs, + ), + ) + native_libs_preload = full_path_preloads + + os.environ[native_libs_preload_env_var] = os.pathsep.join(native_libs_preload) + + # Note: this full block of code will be included as the argument to Python, # and will be the first thing that shows up in the process arguments as displayed @@ -73,20 +102,13 @@ def __run(): assert sys.argv[0] == '-c' sys.argv[0] = {sys.argv[0]!r} - # Replace the working directory with location of the modules directory. + # Remove the working directory. assert sys.path[0] == '' del sys.path[0] import os import runpy - if and sys.platform.startswith('linux'): - import multiprocessing - c1 = multiprocessing.get_context("spawn") - c1.set_executable({os.path.join(dirpath, mp_executable)!r}) - c2 = multiprocessing.get_context("forkserver") - c2.set_executable({os.path.join(dirpath, mp_executable)!r}) - def setenv(var, val): if val is None: os.environ.pop(var, None) @@ -99,6 +121,18 @@ def __run(): restoreenv({env_vals_to_restore!r}) + # On windows, adjust os.add_dll_directory and PATH (for `ctypes.util.find_library`) + # so that native libraries can be found by the dynamic linker or ctypes + if sys.platform.startswith("win"): + path = os.environ.get("PATH", "") + for native_libs_dir in {native_libs_dirs!r}: + d = os.path.join({dirpath!r}, native_libs_dir) + os.add_dll_directory(d) + if path and not path.endswith(os.pathsep): + path += os.pathsep + path += d + setenv("PATH", path) + from import as run_as_main run_as_main({main_module!r}, {main_function!r}) @@ -134,10 +168,22 @@ if ( ): args[1:1] = ["-X", "importtime"] +# Save the variables that will be restored back into the environment by +# fbcode/buck2/prelude/python/tools/make_par/sitecustomize.py +for env in ("PYTHONPATH", "LD_LIBRARY_PATH", "LD_PRELOAD", + "DYLD_LIBRARY_PATH", "DYLD_INSERT_LIBRARIES"): + if env in os.environ: + os.environ["FB_SAVED_" + env] = os.environ[env] + path = os.path.join(dirpath, modules_dir) -if "PYTHONPATH" in os.environ: - path += os.pathsep + os.environ["PYTHONPATH"] os.environ["PYTHONPATH"] = path +if "PAR_APPEND_PYTHONPATH" in os.environ: + os.environ["PYTHONPATH"] += ":" + os.environ["PAR_APPEND_PYTHONPATH"] + +# This environment variable is immediately unset on startup but will also appear +# in e.g. `multiprocessing` workers, and so serves as an audit trail back to +# the originating PAR (and can be read via e.g. `/proc//environ`). +os.environ["PAR_INVOKED_NAME_TAG"] = sys.argv[0] if platform.system() == "Windows": # exec on Windows is not true exec - there is only 'spawn' ('CreateProcess'). diff --git a/prelude/python/tools/run_inplace_lite.py.in b/prelude/python/tools/run_inplace_lite.py.in index 43e160bf79037..ead8c5e6d6f95 100644 --- a/prelude/python/tools/run_inplace_lite.py.in +++ b/prelude/python/tools/run_inplace_lite.py.in @@ -11,6 +11,7 @@ main_module = "" main_function = "" modules_dir = "" +unc_prefix = "\\\\?\\" # Wrap everything in a private function to prevent globals being captured by # the `runpy._run_module_as_main` below. @@ -31,8 +32,22 @@ def __run(): dirpath = os.path.dirname(os.path.realpath(__file__)) + # Enable long path support on Windows + if os.name == "nt" and not dirpath.startswith(unc_prefix): + dirpath = unc_prefix + dirpath + + # Save the variables that will be restored back into the environment by + # fbcode/buck2/prelude/python/tools/make_par/sitecustomize.py + for env in ("PYTHONPATH", "LD_LIBRARY_PATH", "LD_PRELOAD", + "DYLD_LIBRARY_PATH", "DYLD_INSERT_LIBRARIES"): + if env in os.environ: + os.environ["FB_SAVED_" + env] = os.environ[env] + + path = os.path.join(dirpath, modules_dir) + os.environ["PYTHONPATH"] = path + # Replace the working directory with location of the modules directory. - sys.path[0] = os.path.join(dirpath, modules_dir) + sys.path[0] = path site.execsitecustomize() diff --git a/prelude/python/tools/sourcedb_merger/BUCK b/prelude/python/tools/sourcedb_merger/BUCK deleted file mode 100644 index 7f090f90ea9dc..0000000000000 --- a/prelude/python/tools/sourcedb_merger/BUCK +++ /dev/null @@ -1,37 +0,0 @@ -prelude = native - -prelude.python_bootstrap_library( - name = "library", - srcs = [ - "inputs.py", - "legacy_outputs.py", - "outputs.py", - ], - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "merge", - main = "merge.py", - deps = [ - ":library", - ], - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "legacy_merge", - main = "legacy_merge.py", - deps = [ - ":library", - ], - visibility = ["PUBLIC"], -) - -# Run the test suite with this command: -# buck2 run prelude//python/tools/sourcedb_merger:tests --target-platforms prelude//platforms:default -prelude.sh_binary( - name = "tests", - main = "tests/main.sh", - resources = glob(["**/*.py"]), -) diff --git a/prelude/python/tools/sourcedb_merger/BUCK.v2 b/prelude/python/tools/sourcedb_merger/BUCK.v2 new file mode 100644 index 0000000000000..2521ab5a741d3 --- /dev/null +++ b/prelude/python/tools/sourcedb_merger/BUCK.v2 @@ -0,0 +1,43 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_library( + name = "library", + srcs = [ + "inputs.py", + "legacy_outputs.py", + "outputs.py", + ], + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "merge", + main = "merge.py", + visibility = ["PUBLIC"], + deps = [ + ":library", + ], +) + +prelude.python_bootstrap_binary( + name = "legacy_merge", + main = "legacy_merge.py", + visibility = ["PUBLIC"], + deps = [ + ":library", + ], +) + +# Run the test suite with this command: +# buck2 run prelude//python/tools/sourcedb_merger:tests --target-platforms prelude//platforms:default +prelude.sh_binary( + name = "tests", + main = "tests/main.sh", + resources = glob(["**/*.py"]), +) diff --git a/prelude/python/tools/sourcedb_merger/inputs.py b/prelude/python/tools/sourcedb_merger/inputs.py index 8ccc78304cf60..2f298a25882b4 100644 --- a/prelude/python/tools/sourcedb_merger/inputs.py +++ b/prelude/python/tools/sourcedb_merger/inputs.py @@ -5,6 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import dataclasses import json import pathlib diff --git a/prelude/python/tools/sourcedb_merger/legacy_merge.py b/prelude/python/tools/sourcedb_merger/legacy_merge.py index ecd6c61e1f973..0eb16221f9b7b 100644 --- a/prelude/python/tools/sourcedb_merger/legacy_merge.py +++ b/prelude/python/tools/sourcedb_merger/legacy_merge.py @@ -6,6 +6,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import argparse import pathlib import sys diff --git a/prelude/python/tools/sourcedb_merger/legacy_outputs.py b/prelude/python/tools/sourcedb_merger/legacy_outputs.py index aefebb22ba459..ab225f9b05ced 100644 --- a/prelude/python/tools/sourcedb_merger/legacy_outputs.py +++ b/prelude/python/tools/sourcedb_merger/legacy_outputs.py @@ -5,6 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import dataclasses import json import pathlib diff --git a/prelude/python/tools/sourcedb_merger/merge.py b/prelude/python/tools/sourcedb_merger/merge.py index 6f641c040fe62..f4dd4845b7f47 100644 --- a/prelude/python/tools/sourcedb_merger/merge.py +++ b/prelude/python/tools/sourcedb_merger/merge.py @@ -6,6 +6,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import argparse import pathlib import sys diff --git a/prelude/python/tools/sourcedb_merger/outputs.py b/prelude/python/tools/sourcedb_merger/outputs.py index 7a7e4f88e2b47..2565f75c5de17 100644 --- a/prelude/python/tools/sourcedb_merger/outputs.py +++ b/prelude/python/tools/sourcedb_merger/outputs.py @@ -5,6 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import dataclasses import json import pathlib diff --git a/prelude/python/tools/sourcedb_merger/tests/__init__.py b/prelude/python/tools/sourcedb_merger/tests/__init__.py index b404f6ed69308..2444a8d0f439e 100644 --- a/prelude/python/tools/sourcedb_merger/tests/__init__.py +++ b/prelude/python/tools/sourcedb_merger/tests/__init__.py @@ -5,6 +5,32 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + +# pyre-fixme[21]: Could not find name `BuildMapLoadError` in `tests.inputs_test`. +# pyre-fixme[21]: Could not find name `PartialBuildMap` in `tests.inputs_test`. +# pyre-fixme[21]: Could not find name `Target` in `tests.inputs_test`. +# pyre-fixme[21]: Could not find name `TargetEntry` in `tests.inputs_test`. +# pyre-fixme[21]: Could not find name `load_targets_and_build_maps_from_json` in +# `tests.inputs_test`. from .inputs_test import * # noqa + +# pyre-fixme[21]: Could not find name `ConflictInfo` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `ConflictMap` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `FullBuildMap` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `MergeResult` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `PartialBuildMap` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `SourceInfo` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `Target` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `TargetEntry` in `tests.legacy_output_test`. +# pyre-fixme[21]: Could not find name `merge_partial_build_maps` in +# `tests.legacy_output_test`. from .legacy_output_test import * # noqa + +# pyre-fixme[21]: Could not find name `PartialBuildMap` in `tests.outputs_test`. +# pyre-fixme[21]: Could not find name `Target` in `tests.outputs_test`. +# pyre-fixme[21]: Could not find name `TargetEntry` in `tests.outputs_test`. +# pyre-fixme[21]: Could not find name `merge_partial_build_maps` in +# `tests.outputs_test`. from .outputs_test import * # noqa diff --git a/prelude/python/tools/sourcedb_merger/tests/inputs_test.py b/prelude/python/tools/sourcedb_merger/tests/inputs_test.py index c671e1b618694..75a8ce7e3ead0 100644 --- a/prelude/python/tools/sourcedb_merger/tests/inputs_test.py +++ b/prelude/python/tools/sourcedb_merger/tests/inputs_test.py @@ -5,6 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import contextlib import json import os diff --git a/prelude/python/tools/sourcedb_merger/tests/legacy_output_test.py b/prelude/python/tools/sourcedb_merger/tests/legacy_output_test.py index 267a135f21f5c..3d587cbb87e75 100644 --- a/prelude/python/tools/sourcedb_merger/tests/legacy_output_test.py +++ b/prelude/python/tools/sourcedb_merger/tests/legacy_output_test.py @@ -5,6 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import unittest from typing import Mapping diff --git a/prelude/python/tools/sourcedb_merger/tests/outputs_test.py b/prelude/python/tools/sourcedb_merger/tests/outputs_test.py index 91affd8053bc3..cb147a3dbaa0a 100644 --- a/prelude/python/tools/sourcedb_merger/tests/outputs_test.py +++ b/prelude/python/tools/sourcedb_merger/tests/outputs_test.py @@ -5,6 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + + import unittest from typing import Mapping diff --git a/prelude/python/tools/static_extension_finder.py b/prelude/python/tools/static_extension_finder.py index 9b278d3b7d68e..c4c1171f7c983 100644 --- a/prelude/python/tools/static_extension_finder.py +++ b/prelude/python/tools/static_extension_finder.py @@ -5,8 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -import sys -from importlib.machinery import ModuleSpec +# pyre-strict + # Add a try except to force eager importing try: @@ -17,6 +17,9 @@ class StaticExtensionFinder: + # pyre-fixme + ModuleSpec = None + @classmethod # pyre-fixme[3]: Return type must be annotated. # pyre-fixme[2]: Parameter must be annotated. @@ -25,16 +28,22 @@ def find_spec(cls, fullname, path, target=None): Use fullname to look up the PyInit function in the main binary. Returns None if not present. This allows importing CExtensions that have been statically linked in. """ + if not fullname: return None if not _check_module(fullname): return None - spec = ModuleSpec( + spec = cls.ModuleSpec( fullname, StaticExtensionLoader, origin="static-extension", is_package=False ) return spec -# pyre-fixme[3]: Return type must be annotated. -def _initialize(): +def _initialize() -> None: + # This imports are here to avoid tricking circular dependencies. see S389486 + import sys + from importlib.machinery import ModuleSpec + + StaticExtensionFinder.ModuleSpec = ModuleSpec + sys.meta_path.insert(0, StaticExtensionFinder) diff --git a/prelude/python/tools/static_extension_utils.cpp b/prelude/python/tools/static_extension_utils.cpp index f35e2a682068e..7e620935a593b 100644 --- a/prelude/python/tools/static_extension_utils.cpp +++ b/prelude/python/tools/static_extension_utils.cpp @@ -24,15 +24,13 @@ namespace { static PyObject* _create_module(PyObject* self, PyObject* spec) { PyObject* name; PyObject* mod; - const char* oldcontext; name = PyObject_GetAttrString(spec, "name"); if (name == nullptr) { return nullptr; } - // TODO private api usage - mod = _PyImport_FindExtensionObject(name, name); + mod = PyImport_GetModule(name); if (mod || PyErr_Occurred()) { Py_DECREF(name); Py_XINCREF(mod); @@ -58,7 +56,15 @@ static PyObject* _create_module(PyObject* self, PyObject* spec) { PyObject* modules = nullptr; PyModuleDef* def; - oldcontext = _Py_PackageContext; + +#if PY_VERSION_HEX >= 0x030C0000 + // Use our custom Python 3.12 C-API to call the statically linked module init + // function + mod = _Ci_PyImport_CallInitFuncWithContext(namestr.c_str(), initfunc); +#else + // In Python 3.10 (and earlier) we need to handle package context swapping + // ourselves + const char* oldcontext = _Py_PackageContext; _Py_PackageContext = namestr.c_str(); if (_Py_PackageContext == nullptr) { _Py_PackageContext = oldcontext; @@ -67,6 +73,7 @@ static PyObject* _create_module(PyObject* self, PyObject* spec) { } mod = initfunc(); _Py_PackageContext = oldcontext; +#endif if (mod == nullptr) { Py_DECREF(name); return nullptr; @@ -107,7 +114,6 @@ static PyObject* _create_module(PyObject* self, PyObject* spec) { static PyObject* _exec_module(PyObject* self, PyObject* module) { PyModuleDef* def; - int res; // TODO errors if (!PyModule_Check(module)) { @@ -121,8 +127,9 @@ static PyObject* _exec_module(PyObject* self, PyObject* module) { Py_RETURN_NONE; } - res = PyModule_ExecDef(module, def); - // TODO check res + // TODO check this result + PyModule_ExecDef(module, def); + Py_RETURN_NONE; } diff --git a/prelude/python/tools/traverse_dep_manifest.py b/prelude/python/tools/traverse_dep_manifest.py index cc7c5e45bba1a..6e73c9414156f 100644 --- a/prelude/python/tools/traverse_dep_manifest.py +++ b/prelude/python/tools/traverse_dep_manifest.py @@ -5,6 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# pyre-strict + import argparse import json diff --git a/prelude/python/tools/wheel.py b/prelude/python/tools/wheel.py new file mode 100644 index 0000000000000..0dc378cdd5c15 --- /dev/null +++ b/prelude/python/tools/wheel.py @@ -0,0 +1,149 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import argparse +import configparser +import contextlib +import io +import json +import os +import sys +import zipfile +from types import TracebackType +from typing import cast, Dict, List, Optional, Set, Tuple, Type + + +# pyre-fixme[24]: Generic type `AbstractContextManager` expects 1 type parameter. +class WheelBuilder(contextlib.AbstractContextManager): + + def __init__( + self, + *, + name: str, + version: str, + output: str, + entry_points: Optional[Dict[str, str]] = None, + metadata: Optional[List[Tuple[str, str]]] = None, + ) -> None: + self._name = name + self._version = version + self._record: list[str] = [] + self._outf = zipfile.ZipFile(output, mode="w") + self._entry_points: Optional[Dict[str, str]] = entry_points + self._metadata: List[Tuple[str, str]] = [] + self._metadata.append(("Name", name)) + self._metadata.append(("Version", version)) + if metadata is not None: + self._metadata.extend(metadata) + + def _dist_info(self, *path: str) -> str: + return os.path.join(f"{self._name}-{self._version}.dist-info", *path) + + def _data(self, *path: str) -> str: + return os.path.join(f"{self._name}-{self._version}.data", *path) + + def write(self, dst: str, src: str) -> None: + self._record.append(dst) + self._outf.write(filename=src, arcname=dst) + + def write_data(self, dst: str, src: str) -> None: + self.write(self._data(dst), src) + + def writestr(self, dst: str, contents: str) -> None: + self._record.append(dst) + self._outf.writestr(zinfo_or_arcname=dst, data=contents) + + def _write_record(self) -> None: + record = self._dist_info("RECORD") + self._outf.writestr( + record, "".join(["{},,\n".format(f) for f in (self._record + [record])]) + ) + + def close(self) -> None: + self.writestr( + self._dist_info("METADATA"), + "".join(["{}: {}\n".format(key, val) for key, val in self._metadata]), + ) + self.writestr( + self._dist_info("WHEEL"), + """\ +Wheel-Version: 1.0 +""", + ) + + # Write entry points. + if self._entry_points is not None: + config = configparser.ConfigParser() + config.read_dict(cast(Dict[str, Dict[str, str]], self._entry_points)) + with io.TextIOWrapper( + self._outf.open(self._dist_info("entry_points.txt"), mode="w"), + encoding="utf-8", + ) as f: + config.write(f) + + self._write_record() + self._outf.close() + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.close() + + +def main(argv: List[str]) -> None: + parser = argparse.ArgumentParser() + parser.add_argument("--output", required=True) + parser.add_argument("--name", required=True) + parser.add_argument("--version", required=True) + parser.add_argument("--entry-points", default=None) + parser.add_argument("--srcs", action="append", default=[]) + parser.add_argument("--metadata", nargs=2, action="append", default=[]) + parser.add_argument("--data", nargs=2, action="append", default=[]) + args = parser.parse_args(argv[1:]) + + pkgs: Set[str] = set() + pkgs_with_init = set() + + def _add_pkg(pkg: str) -> None: + pkgs.add(pkg) + parent = os.path.dirname(pkg) + if parent: + _add_pkg(parent) + + with WheelBuilder( + name=args.name, + version=args.version, + output=args.output, + entry_points=( + json.loads(args.entry_points) if args.entry_points is not None else None + ), + metadata=args.metadata, + ) as whl: + for src in args.srcs: + with open(src) as f: + manifest = json.load(f) + for dst, src, *_ in manifest: + if dst.endswith((".py", ".so")): + pkg = os.path.dirname(dst) + _add_pkg(pkg) + if os.path.basename(dst) == "__init__.py": + pkgs_with_init.add(pkg) + whl.write(dst, src) + + for dst, src in args.data: + whl.write_data(dst, src) + + for pkg in pkgs - pkgs_with_init: + whl.writestr(os.path.join(pkg, "__init__.py"), "") + + +sys.exit(main(sys.argv)) diff --git a/prelude/python/typecheck/batch.bxl b/prelude/python/typecheck/batch.bxl new file mode 100644 index 0000000000000..174094b245c70 --- /dev/null +++ b/prelude/python/typecheck/batch.bxl @@ -0,0 +1,75 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:utils.bzl", "flatten") +load("@prelude//python/sourcedb/filter.bxl", "do_filter") + +def _check_targets_sharded(ctx: bxl.Context, checked_targets: typing.Any) -> typing.Any: + type_check_shard_labels = { + label.configured_target(): target.providers()[DefaultInfo] + .sub_targets["typecheck"][DefaultInfo] + .sub_targets + for label, target in ctx.analysis(checked_targets).items() + } + shard_sub_targets = [ + target.with_sub_target(["typecheck", shard]) + for target, shards in type_check_shard_labels.items() + for shard in shards + ] + build_result = ctx.build(shard_sub_targets) + build_output = ctx.output.ensure_multiple(build_result) + + output = {label.raw_target(): [] for label in build_output} + for label, artifacts in build_output.items(): + output[label.raw_target()] += [artifact.rel_path() for artifact in artifacts] + + return output + +def _check_targets_batched(ctx: bxl.Context, checked_targets: typing.Any) -> typing.Any: + build_result = ctx.build( + [target.label.with_sub_target("typecheck") for target in checked_targets], + ) + output = ctx.output.ensure_multiple(build_result) + + return { + label.raw_target(): [artifact.rel_path() for artifact in artifacts] + for label, artifacts in output.items() + } + +def check_targets(ctx: bxl.Context, targets: typing.Any, enable_sharding: bool) -> None: + checked_targets = ctx.configured_targets(do_filter(ctx.uquery(), targets, exclude_targets_with_special_labels = False)) + if enable_sharding: + artifacts = _check_targets_sharded(ctx, checked_targets) + else: + artifacts = _check_targets_batched(ctx, checked_targets) + + ctx.output.print_json( + { + "artifacts": artifacts, + "root": ctx.root(), + }, + ) + +def _run_entry_point(ctx: bxl.Context) -> None: + targets = flatten(ctx.cli_args.target) + check_targets(ctx, targets, ctx.cli_args.enable_sharding) + +run = bxl_main( + doc = "Run [typecheck] on a set of targets or target patterns.", + impl = _run_entry_point, + cli_args = { + "enable-sharding": cli_args.bool( + default = False, + doc = "Shard type checking within each target", + ), + "target": cli_args.list( + cli_args.target_expr( + doc = "Target pattern to run type checking on", + ), + ), + }, +) diff --git a/prelude/python/typecheck/batch_files.bxl b/prelude/python/typecheck/batch_files.bxl new file mode 100644 index 0000000000000..39d4bef730787 --- /dev/null +++ b/prelude/python/typecheck/batch_files.bxl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":batch.bxl", "check_targets") + +def _run_entry_point(ctx: bxl.Context) -> None: + targets = ctx.uquery().owner(ctx.cli_args.source) + check_targets(ctx, targets, ctx.cli_args.enable_sharding) + +run = bxl_main( + doc = "Run [typecheck] on the owning targets of given files.", + impl = _run_entry_point, + cli_args = { + "enable-sharding": cli_args.bool( + default = False, + doc = "Shard type checking within each target", + ), + "source": cli_args.list( + cli_args.string( + doc = "Files whose owning targets need to be checked", + ), + ), + }, +) diff --git a/prelude/python/typing.bzl b/prelude/python/typing.bzl new file mode 100644 index 0000000000000..3e7865e97177a --- /dev/null +++ b/prelude/python/typing.bzl @@ -0,0 +1,213 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//python:python.bzl", "PythonLibraryInfo") +load( + ":manifest.bzl", + "ManifestInfo", # @unused Used as a type + "create_manifest_for_entries", +) +load(":python.bzl", "PythonLibraryManifestsTSet") + +DEFAULT_PY_VERSION = "3.10" +DEFAULT_SHARDING_ENABLED = True + +# Best-effort guess on what the host sys.platform is +def get_default_sys_platform() -> str | None: + os_info = host_info().os + if os_info.is_linux: + return "linux" + elif os_info.is_macos: + return "darwin" + elif os_info.is_windows: + return "win32" + return None + +def _create_all_dep_manifests( + source_manifests: list[Artifact], + dep_manifests: typing.Any) -> typing.Any: + return source_manifests + [dep for dep in dep_manifests.traverse() if dep] + +def _create_batched_type_check( + ctx: AnalysisContext, + executable: RunInfo, + typeshed_manifest: Artifact, + py_version: str | None, + source_manifests: list[Artifact], + dep_manifests: typing.Any, + hidden: typing.Any, + is_sharded_fallback: bool) -> Artifact: + # Create input configs + input_config = { + "dependencies": dep_manifests, + "py_version": py_version or DEFAULT_PY_VERSION, + "sources": source_manifests, + "system_platform": get_default_sys_platform(), + "typeshed": typeshed_manifest, + } + + file_suffix = "_sharding_fallback" if is_sharded_fallback else "" + + input_file = ctx.actions.write_json( + "type_check_config{}.json".format(file_suffix), + input_config, + with_inputs = True, + ) + output_file = ctx.actions.declare_output("type_check_result{}.json".format(file_suffix)) + cmd = cmd_args( + executable, + input_file, + "--output", + output_file.as_output(), + hidden = hidden, + ) + + identifier = "_sharding_fallback" if is_sharded_fallback else "batched" + ctx.actions.run(cmd, category = "type_check", identifier = identifier) + + return output_file + +def _create_sharded_type_check( + ctx: AnalysisContext, + executable: RunInfo, + typeshed_manifest: Artifact, + py_version: str | None, + source_manifests: list[Artifact], + source_artifacts: list[typing.Any], + dep_manifests: typing.Any, + hidden: typing.Any, + sharding_enabled: bool | None) -> dict[str, list[DefaultInfo]]: + if sharding_enabled == False or not DEFAULT_SHARDING_ENABLED: + return { + "shard_default": [DefaultInfo(default_output = _create_batched_type_check( + ctx, + executable, + typeshed_manifest, + py_version, + source_manifests, + dep_manifests, + hidden, + True, + ))], + } + + commands = {} + output_files = [] + all_dep_manifests = _create_all_dep_manifests(source_manifests, dep_manifests) + for shard in source_artifacts: + artifact_project_path, artifact_cell_path = shard + sanitized_path = artifact_cell_path.replace("/", "+") + shard_name = "shard_{}".format(sanitized_path) + + shard_manifest = create_manifest_for_entries( + ctx, + shard_name, + [(artifact_cell_path, artifact_project_path, "sharding")], + ) + + # Create input configs + input_config = { + "dependencies": all_dep_manifests, + "py_version": py_version or DEFAULT_PY_VERSION, + "sources": [shard_manifest.manifest], + "system_platform": get_default_sys_platform(), + "typeshed": typeshed_manifest, + } + + input_file_name = "type_checking_config_shard_{}.json".format(sanitized_path) + output_file_name = "type_check_result_shard_{}.json".format(sanitized_path) + input_file = ctx.actions.write_json( + input_file_name, + input_config, + with_inputs = True, + ) + output_file = ctx.actions.declare_output(output_file_name) + output_files.append(output_file) + + cmd = cmd_args( + executable, + input_file, + "--output", + output_file.as_output(), + hidden = hidden, + ) + ctx.actions.run(cmd, category = "type_check", identifier = shard_name) + commands[shard_name] = [DefaultInfo(default_output = output_file)] + + return commands + +def create_per_target_type_check( + ctx: AnalysisContext, + executable: RunInfo, + srcs: ManifestInfo | None, + deps: list[PythonLibraryInfo], + typeshed: ManifestInfo | None, + py_version: str | None, + typing_enabled: bool, + sharding_enabled: bool | None = None) -> DefaultInfo: + if not typing_enabled: + # Use empty dict to signal that no type checking was performed. + output_file = ctx.actions.write_json("type_check_result.json", {}) + sharded_output_file = ctx.actions.write_json( + "sharded_type_check_result.json", + {}, + ) + return DefaultInfo( + default_output = output_file, + sub_targets = { + "shard_default": [DefaultInfo(default_output = sharded_output_file)], + }, + ) + + hidden = [] + + # Dep artifacts + dep_manifest_tset = ctx.actions.tset( + PythonLibraryManifestsTSet, + children = [d.manifests for d in deps], + ) + dep_manifests = dep_manifest_tset.project_as_args("source_type_manifests") + hidden.append(dep_manifest_tset.project_as_args("source_type_artifacts")) + + # Typeshed artifacts + if typeshed != None: + hidden.extend([a for a, _ in typeshed.artifacts]) + typeshed_manifest = typeshed.manifest + else: + typeshed_manifest = None + + # Source artifacts + source_manifests = [] + source_artifacts = [] + if srcs != None: + source_manifests = [srcs.manifest] + source_artifacts = srcs.artifacts + hidden.extend([a for a, _ in srcs.artifacts]) + + return DefaultInfo( + default_output = _create_batched_type_check( + ctx, + executable, + typeshed_manifest, + py_version, + source_manifests, + dep_manifests, + hidden, + False, + ), + sub_targets = _create_sharded_type_check( + ctx, + executable, + typeshed_manifest, + py_version, + source_manifests, + source_artifacts, + dep_manifests, + hidden, + sharding_enabled, + ), + ) diff --git a/prelude/python_bootstrap/python_bootstrap.bzl b/prelude/python_bootstrap/python_bootstrap.bzl index 49c48febdc8a0..ef628945dc3af 100644 --- a/prelude/python_bootstrap/python_bootstrap.bzl +++ b/prelude/python_bootstrap/python_bootstrap.bzl @@ -5,6 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//utils:utils.bzl", "flatten") + PythonBootstrapSources = provider(fields = {"srcs": provider_field(typing.Any, default = None)}) PythonBootstrapToolchainInfo = provider(fields = {"interpreter": provider_field(typing.Any, default = None)}) @@ -14,7 +16,7 @@ def python_bootstrap_library_impl(ctx: AnalysisContext) -> list[Provider]: output = ctx.actions.symlinked_dir("__{}__".format(ctx.attrs.name), tree) return [ DefaultInfo(default_output = output), - PythonBootstrapSources(srcs = ctx.attrs.srcs), + PythonBootstrapSources(srcs = dedupe(flatten([ctx.attrs.srcs] + [dep[PythonBootstrapSources].srcs for dep in ctx.attrs.deps]))), ] def python_bootstrap_binary_impl(ctx: AnalysisContext) -> list[Provider]: @@ -30,7 +32,7 @@ def python_bootstrap_binary_impl(ctx: AnalysisContext) -> list[Provider]: for dep in ctx.attrs.deps: dep_srcs = dep[PythonBootstrapSources].srcs for src in dep_srcs: - if src.short_path in run_tree_recorded_deps: + if src.short_path in run_tree_recorded_deps and src != run_tree_inputs[src.short_path]: original_dep = run_tree_recorded_deps[src.short_path] fail("dependency `{}` and `{}` both declare a source file named `{}`, consider renaming one of these files to avoid collision".format(original_dep.label, dep.label, src.short_path)) run_tree_inputs[src.short_path] = src @@ -41,15 +43,18 @@ def python_bootstrap_binary_impl(ctx: AnalysisContext) -> list[Provider]: interpreter = ctx.attrs._python_bootstrap_toolchain[PythonBootstrapToolchainInfo].interpreter - run_args = cmd_args() if ctx.attrs._win_python_wrapper != None: - run_args.add(ctx.attrs._win_python_wrapper[RunInfo]) - run_args.add(run_tree) - run_args.add(interpreter) - run_args.add(output) + run_args = cmd_args( + ctx.attrs._win_python_wrapper[RunInfo], + run_tree, + interpreter, + output, + ) else: - run_args.add("/usr/bin/env") - run_args.add(cmd_args(run_tree, format = "PYTHONPATH={}")) - run_args.add(interpreter) - run_args.add(output) + run_args = cmd_args( + "/usr/bin/env", + cmd_args(run_tree, format = "PYTHONPATH={}"), + interpreter, + output, + ) return [DefaultInfo(default_output = output), RunInfo(args = run_args)] diff --git a/prelude/python_bootstrap/tools/BUCK b/prelude/python_bootstrap/tools/BUCK deleted file mode 100644 index b895ef1a52b01..0000000000000 --- a/prelude/python_bootstrap/tools/BUCK +++ /dev/null @@ -1,8 +0,0 @@ -prelude = native - -prelude.sh_binary( - name = "win_python_wrapper", - main = "win_python_wrapper.bat", - visibility = ["PUBLIC"], - target_compatible_with = ["config//os:windows"], -) diff --git a/prelude/python_bootstrap/tools/BUCK.v2 b/prelude/python_bootstrap/tools/BUCK.v2 new file mode 100644 index 0000000000000..e3fb697482f53 --- /dev/null +++ b/prelude/python_bootstrap/tools/BUCK.v2 @@ -0,0 +1,14 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.sh_binary( + name = "win_python_wrapper", + main = "win_python_wrapper.bat", + target_compatible_with = ["config//os:windows"], + visibility = ["PUBLIC"], +) diff --git a/prelude/python_bootstrap/tools/win_python_wrapper.bat b/prelude/python_bootstrap/tools/win_python_wrapper.bat index 8aa8c4a25a4e7..f2c8b0cfb79b8 100644 --- a/prelude/python_bootstrap/tools/win_python_wrapper.bat +++ b/prelude/python_bootstrap/tools/win_python_wrapper.bat @@ -17,5 +17,5 @@ setlocal enabledelayedexpansion set args=;;;;;;%* set args=!args:;;;;;;%1 =! -set PYTHONPATH=%1 +set PYTHONPATH=%~1 %args% diff --git a/prelude/remote_file.bzl b/prelude/remote_file.bzl index c85ad2097b09a..0d54189d47c15 100644 --- a/prelude/remote_file.bzl +++ b/prelude/remote_file.bzl @@ -6,7 +6,8 @@ # of this source tree. load("@prelude//:http_file.bzl", "http_file_shared") -load("@prelude//utils:utils.bzl", "expect", "value_or") +load("@prelude//utils:expect.bzl", "expect") +load("@prelude//utils:utils.bzl", "value_or") _ROOT = "https://maven.thefacebook.com/nexus/content/groups/public" diff --git a/prelude/resources.bzl b/prelude/resources.bzl index 59f4ef5a1185f..89126a65fb401 100644 --- a/prelude/resources.bzl +++ b/prelude/resources.bzl @@ -5,20 +5,20 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type +load("@prelude//:artifacts.bzl", "ArtifactOutputs") # Resources for transitive deps, shared by C++ and Rust. ResourceInfo = provider(fields = { # A map containing all resources from transitive dependencies. The keys # are rule labels and the values are maps of resource names (the name used # to lookup the resource at runtime) and the actual resource artifact. - "resources": provider_field(typing.Any, default = None), # {"label": {str: ("artifact", ["hidden"])}} + "resources": provider_field(dict[Label, dict[str, ArtifactOutputs]]), }) def gather_resources( label: Label, - resources: dict[str, (Artifact, list[ArgLike])] = {}, - deps: list[Dependency] = []) -> dict[Label, dict[str, (Artifact, list[ArgLike])]]: + resources: dict[str, ArtifactOutputs] = {}, + deps: list[Dependency] = []) -> dict[Label, dict[str, ArtifactOutputs]]: """ Return the resources for this rule and its transitive deps. """ @@ -40,14 +40,14 @@ def create_resource_db( ctx: AnalysisContext, name: str, binary: Artifact, - resources: dict[str, (Artifact, list[ArgLike])]) -> Artifact: + resources: dict[str, ArtifactOutputs]) -> Artifact: """ Generate a resource DB for resources for the given binary, relativized to the binary's working directory. """ db = { - name: cmd_args(resource, delimiter = "").relative_to(binary, parent = 1) - for (name, (resource, _other)) in resources.items() + name: cmd_args(resource.default_output, delimiter = "", relative_to = (binary, 1)) + for (name, resource) in resources.items() } return ctx.actions.write_json(name, db) diff --git a/prelude/rules.bzl b/prelude/rules.bzl index 803f7d00d0ccc..d132966efa570 100644 --- a/prelude/rules.bzl +++ b/prelude/rules.bzl @@ -5,6 +5,8 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//:buck2_compatibility.bzl", "BUCK2_COMPATIBILITY_ATTRIB_NAME", "BUCK2_COMPATIBILITY_ATTRIB_TYPE", "check_buck2_compatibility") +load("@prelude//apple:apple_platforms.bzl", "APPLE_PLATFORMS_KEY") load("@prelude//configurations:rules.bzl", _config_implemented_rules = "implemented_rules") load("@prelude//decls/common.bzl", "prelude_rule") load("@prelude//is_full_meta_repo.bzl", "is_full_meta_repo") @@ -21,7 +23,7 @@ def _unimplemented_impl(name): # some features disabled. return partial(_unimplemented, name) -def _mk_rule(rule_spec: typing.Any): +def _mk_rule(rule_spec: typing.Any, extra_attrs: dict[str, typing.Any] = dict(), impl_override: [typing.Callable, None] = None, **kwargs): name = rule_spec.name attributes = rule_spec.attrs @@ -36,22 +38,26 @@ def _mk_rule(rule_spec: typing.Any): if toolchain_attr in attributes: fat_platform_compatible = False + #Add buck2_compatibility attribute to all rules + extra_attrs[BUCK2_COMPATIBILITY_ATTRIB_NAME] = BUCK2_COMPATIBILITY_ATTRIB_TYPE + # Fat platforms is an idea specific to our toolchains, so doesn't apply to # open source. Ideally this restriction would be done at the toolchain level. if not is_full_meta_repo(): fat_platform_compatible = True attributes = dict(attributes) + attributes.update(extra_attrs) if not fat_platform_compatible: # copy so we don't try change the passed in object - attributes["_cxx_toolchain_target_configuration"] = attrs.dep(default = "fbcode//buck2/platform/execution:fat_platform_incompatible") + attributes["_cxx_toolchain_target_configuration"] = attrs.dep(default = "prelude//platforms:fat_platform_incompatible") # Add _apple_platforms to all rules so that we may query the target platform to use until we support configuration # modifiers and can use them to set the configuration to use for operations. - # Map of string identifer to platform. - attributes["_apple_platforms"] = attrs.dict(key = attrs.string(), value = attrs.dep(), sorted = False, default = {}) + # Map of string identifier to platform. + attributes[APPLE_PLATFORMS_KEY] = attrs.dict(key = attrs.string(), value = attrs.dep(), sorted = False, default = {}) - extra_args = {} + extra_args = dict(kwargs) cfg = transitions.get(name) if cfg != None: extra_args["cfg"] = cfg @@ -79,17 +85,26 @@ def _mk_rule(rule_spec: typing.Any): impl = extra_impl if not impl: impl = _unimplemented_impl(name) + if impl_override != None: + impl = impl_override if rule_spec.uses_plugins != None: extra_args["uses_plugins"] = rule_spec.uses_plugins + extra_args.setdefault("is_configuration_rule", name in _config_implemented_rules) + extra_args.setdefault("is_toolchain_rule", name in toolchain_rule_names) return rule( - impl = impl, + impl = buck2_compatibility_check_wrapper(impl), attrs = attributes, - is_configuration_rule = name in _config_implemented_rules, - is_toolchain_rule = name in toolchain_rule_names, **extra_args ) +def buck2_compatibility_check_wrapper(impl) -> typing.Callable: + def buck2_compatibility_shim(ctx: AnalysisContext) -> [list[Provider], Promise]: + check_buck2_compatibility(ctx) + return impl(ctx) + + return buck2_compatibility_shim + def _flatten_decls(): decls = {} for decl_set in rule_decl_records: @@ -130,3 +145,9 @@ rules = {rule.name: _mk_rule(rule) for rule in _declared_rules.values()} # The rules are accessed by doing module.name, so we have to put them on the correct module. load_symbols(rules) + +# TODO(akrieger): Remove this and instead refactor to allow impl bzl files to export attrs. +def clone_rule(rule: str, extra_attrs: dict[str, typing.Any] = dict(), impl_override = None, **kwargs): + if not rule in _declared_rules: + fail("Tried clone rule {} which does not exist".format(rule)) + return _mk_rule(_declared_rules[rule], extra_attrs, impl_override, **kwargs) diff --git a/prelude/rules_impl.bzl b/prelude/rules_impl.bzl index a2b67c58563fd..e1095f1a8e99b 100644 --- a/prelude/rules_impl.bzl +++ b/prelude/rules_impl.bzl @@ -5,95 +5,62 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# Android load("@prelude//android:android.bzl", _android_extra_attributes = "extra_attributes", _android_implemented_rules = "implemented_rules") load("@prelude//android:configuration.bzl", "is_building_android_binary_attr") - -# Apple load("@prelude//apple:apple_rules_impl.bzl", _apple_extra_attributes = "extra_attributes", _apple_implemented_rules = "implemented_rules") - -# Configuration load("@prelude//configurations:rules.bzl", _config_extra_attributes = "extra_attributes", _config_implemented_rules = "implemented_rules") - -# C# load("@prelude//csharp:csharp.bzl", "csharp_library_impl", "prebuilt_dotnet_library_impl") - -# C++ - LLVM load("@prelude//cxx:bitcode.bzl", "llvm_link_bitcode_impl") load("@prelude//cxx:cxx.bzl", "cxx_binary_impl", "cxx_library_impl", "cxx_precompiled_header_impl", "cxx_test_impl", "prebuilt_cxx_library_impl") load("@prelude//cxx:cxx_toolchain.bzl", "cxx_toolchain_extra_attributes", "cxx_toolchain_impl") load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxPlatformInfo", "CxxToolchainInfo") - -# C++ load("@prelude//cxx:headers.bzl", "CPrecompiledHeaderInfo", "HeaderMode") +load("@prelude//cxx:link_groups_types.bzl", "LINK_GROUP_MAP_ATTR") load("@prelude//cxx:prebuilt_cxx_library_group.bzl", "prebuilt_cxx_library_group_impl") -load("@prelude//cxx/user:link_group_map.bzl", "link_group_map_attr") - -# Erlang +load("@prelude//cxx:windows_resource.bzl", "windows_resource_impl") load("@prelude//erlang:erlang.bzl", _erlang_implemented_rules = "implemented_rules") - -# Git load("@prelude//git:git_fetch.bzl", "git_fetch_impl") - -# Go -load("@prelude//go:cgo_library.bzl", "cgo_library_impl") load("@prelude//go:coverage.bzl", "GoCoverageMode") load("@prelude//go:go_binary.bzl", "go_binary_impl") load("@prelude//go:go_exported_library.bzl", "go_exported_library_impl") load("@prelude//go:go_library.bzl", "go_library_impl") +load("@prelude//go:go_stdlib.bzl", "go_stdlib_impl") load("@prelude//go:go_test.bzl", "go_test_impl") - -# Haskell -load("@prelude//haskell:haskell.bzl", "HaskellLibraryProvider", "haskell_binary_impl", "haskell_library_impl", "haskell_prebuilt_library_impl") +load("@prelude//go/transitions:defs.bzl", "asan_attr", "cgo_enabled_attr", "coverage_mode_attr", "go_binary_transition", "go_exported_library_transition", "go_library_transition", "go_stdlib_transition", "go_test_transition", "race_attr", "tags_attr") +load("@prelude//go_bootstrap:go_bootstrap.bzl", "go_bootstrap_binary_impl") +load("@prelude//haskell:haskell.bzl", "haskell_binary_impl", "haskell_library_impl", "haskell_prebuilt_library_impl") load("@prelude//haskell:haskell_ghci.bzl", "haskell_ghci_impl") load("@prelude//haskell:haskell_haddock.bzl", "haskell_haddock_impl") load("@prelude//haskell:haskell_ide.bzl", "haskell_ide_impl") - -# Http archive +load("@prelude//haskell:library_info.bzl", "HaskellLibraryProvider") load("@prelude//http_archive:http_archive.bzl", "http_archive_impl") - -# Java load("@prelude//java:java.bzl", _java_extra_attributes = "extra_attributes", _java_implemented_rules = "implemented_rules") - -# JavaScript load("@prelude//js:js.bzl", _js_extra_attributes = "extra_attributes", _js_implemented_rules = "implemented_rules") - -# Julia load("@prelude//julia:julia.bzl", _julia_extra_attributes = "extra_attributes", _julia_implemented_rules = "implemented_rules") - -# Kotlin load("@prelude//kotlin:kotlin.bzl", _kotlin_extra_attributes = "extra_attributes", _kotlin_implemented_rules = "implemented_rules") load("@prelude//linking:execution_preference.bzl", "link_execution_preference_attr") - -# Linking load("@prelude//linking:link_info.bzl", "LinkOrdering") - -# Lua +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//lua:cxx_lua_extension.bzl", "cxx_lua_extension_impl") load("@prelude//lua:lua_binary.bzl", "lua_binary_impl") load("@prelude//lua:lua_library.bzl", "lua_library_impl") - -# OCaml +load("@prelude//matlab:matlab.bzl", _matlab_extra_attributes = "extra_attributes", _matlab_implemented_rules = "implemented_rules") load("@prelude//ocaml:attrs.bzl", _ocaml_extra_attributes = "ocaml_extra_attributes") load("@prelude//ocaml:ocaml.bzl", "ocaml_binary_impl", "ocaml_library_impl", "ocaml_object_impl", "ocaml_shared_impl", "prebuilt_ocaml_library_impl") - -# Python load("@prelude//python:cxx_python_extension.bzl", "cxx_python_extension_impl") load("@prelude//python:prebuilt_python_library.bzl", "prebuilt_python_library_impl") +load("@prelude//python:python.bzl", "PythonLibraryInfo") load("@prelude//python:python_binary.bzl", "python_binary_impl") load("@prelude//python:python_library.bzl", "python_library_impl") load("@prelude//python:python_needed_coverage_test.bzl", "python_needed_coverage_test_impl") load("@prelude//python:python_test.bzl", "python_test_impl") - -# Python Bootstrap load("@prelude//python_bootstrap:python_bootstrap.bzl", "PythonBootstrapSources", "python_bootstrap_binary_impl", "python_bootstrap_library_impl") - -# Zip file load("@prelude//zip_file:zip_file.bzl", _zip_file_extra_attributes = "extra_attributes", _zip_file_implemented_rules = "implemented_rules") - -# Rule declarations +load("@prelude//apple/user/apple_resource_transition.bzl", "apple_resource_transition") +load("@prelude//apple/user/target_sdk_version_transition.bzl", "target_sdk_version_transition") load("@prelude//decls/android_rules.bzl", "android_rules") -load("@prelude//decls/common.bzl", "IncludeType", "LinkableDepType", "Linkage", "buck") +load("@prelude//decls/apple_rules.bzl", "ios_rules") +load("@prelude//decls/common.bzl", "IncludeType", "LinkableDepType", "buck") load("@prelude//decls/core_rules.bzl", "core_rules") load("@prelude//decls/cxx_rules.bzl", "cxx_rules") load("@prelude//decls/d_rules.bzl", "d_rules") @@ -104,22 +71,20 @@ load("@prelude//decls/go_rules.bzl", "go_rules") load("@prelude//decls/groovy_rules.bzl", "groovy_rules") load("@prelude//decls/halide_rules.bzl", "halide_rules") load("@prelude//decls/haskell_rules.bzl", "haskell_rules") -load("@prelude//decls/ios_rules.bzl", "ios_rules") load("@prelude//decls/java_rules.bzl", "java_rules") load("@prelude//decls/js_rules.bzl", "js_rules") load("@prelude//decls/kotlin_rules.bzl", "kotlin_rules") load("@prelude//decls/lua_rules.bzl", "lua_rules") load("@prelude//decls/ocaml_rules.bzl", "ocaml_rules") load("@prelude//decls/python_rules.bzl", "python_rules") +load("@prelude//decls/re_test_common.bzl", "re_test_common") load("@prelude//decls/rust_rules.bzl", "rust_rules") load("@prelude//decls/scala_rules.bzl", "scala_rules") load("@prelude//decls/shell_rules.bzl", "shell_rules") load("@prelude//decls/toolchains_common.bzl", "toolchains_common") load("@prelude//decls/uncategorized_rules.bzl", "uncategorized_rules") - -# Constraints -load("@prelude//transitions/constraint_overrides.bzl", "constraint_overrides_transition") -load(":alias.bzl", "alias_impl", "configured_alias_impl", "versioned_alias_impl") +load("@prelude//transitions/constraint_overrides.bzl", "constraint_overrides") +load(":alias.bzl", "alias_impl", "configured_alias_impl", "toolchain_alias_impl", "versioned_alias_impl") load(":command_alias.bzl", "command_alias_impl") load(":export_file.bzl", "export_file_impl") load(":filegroup.bzl", "filegroup_impl") @@ -129,10 +94,16 @@ load(":remote_file.bzl", "remote_file_impl") load(":sh_binary.bzl", "sh_binary_impl") load(":sh_test.bzl", "sh_test_impl") load(":test_suite.bzl", "test_suite_impl") - -# Other load(":worker_tool.bzl", "worker_tool") +BUILD_INFO_ATTR = attrs.dict( + key = attrs.string(), + value = attrs.option(attrs.any()), + sorted = False, + default = {}, + doc = "Build info that is passed along here will be late-stamped into a fb_build_info section on the output binary", +) + rule_decl_records = [ android_rules, core_rules, @@ -182,6 +153,7 @@ extra_implemented_rules = struct( sh_binary = sh_binary_impl, sh_test = sh_test_impl, test_suite = test_suite_impl, + toolchain_alias = toolchain_alias_impl, versioned_alias = versioned_alias_impl, worker_tool = worker_tool, @@ -199,6 +171,7 @@ extra_implemented_rules = struct( cxx_python_extension = cxx_python_extension_impl, prebuilt_cxx_library = prebuilt_cxx_library_impl, prebuilt_cxx_library_group = prebuilt_cxx_library_group_impl, + windows_resource = windows_resource_impl, # C++ / LLVM llvm_link_bitcode = llvm_link_bitcode_impl, @@ -207,11 +180,12 @@ extra_implemented_rules = struct( git_fetch = git_fetch_impl, #go - cgo_library = cgo_library_impl, go_binary = go_binary_impl, + go_bootstrap_binary = go_bootstrap_binary_impl, go_exported_library = go_exported_library_impl, go_library = go_library_impl, go_test = go_test_impl, + go_stdlib = go_stdlib_impl, #haskell haskell_library = haskell_library_impl, @@ -254,6 +228,7 @@ extra_implemented_rules = struct( _js_implemented_rules, _julia_implemented_rules, _kotlin_implemented_rules, + _matlab_implemented_rules, _zip_file_implemented_rules, ]) ) @@ -271,6 +246,12 @@ def _cxx_python_extension_attrs(): "allow_suffixing": attrs.bool(default = True), # Copied from cxx_library. "auto_link_groups": attrs.bool(default = False), + + # These flags will only be used to instrument a target + # when coverage for that target is enabled by `exported_needs_coverage_instrumentation` + # or by any of the target's dependencies. + "coverage_instrumentation_compiler_flags": attrs.list(attrs.string(), default = []), + "exported_needs_coverage_instrumentation": attrs.bool(default = False), "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), "link_whole": attrs.default_only(attrs.bool(default = True)), "precompiled_header": attrs.option(attrs.dep(providers = [CPrecompiledHeaderInfo]), default = None), @@ -298,52 +279,66 @@ def _python_executable_attrs(): if key not in python_executable_attrs } + updated_attrs.update(constraint_overrides.attributes) + # allow non-default value for the args below updated_attrs.update({ - "add_multiprocessing_wrapper": attrs.bool(default = False), "anonymous_link_groups": attrs.bool(default = False), "binary_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "bolt_flags": attrs.list(attrs.arg(), default = []), "bolt_profile": attrs.option(attrs.source(), default = None), "compiler_flags": attrs.list(attrs.arg(), default = []), - "constraint_overrides": attrs.list(attrs.string(), default = []), "cxx_main": attrs.source(default = "prelude//python/tools:embedded_main.cpp"), + "distributed_thinlto_partial_split_dwarf": attrs.bool(default = False), "enable_distributed_thinlto": attrs.bool(default = False), "executable_deps": attrs.list(attrs.dep(), default = []), "executable_name": attrs.option(attrs.string(), default = None), "inplace_build_args": attrs.list(attrs.arg(), default = []), "link_group": attrs.option(attrs.string(), default = None), - "link_group_map": link_group_map_attr(), + "link_group_map": LINK_GROUP_MAP_ATTR, "link_group_min_binary_node_count": attrs.option(attrs.int(), default = None), "link_style": attrs.enum(LinkableDepType, default = "static"), "main_function": attrs.option( attrs.string(), default = None, doc = """ - Fully qualified name of a Python function that will serve as the main entry point of the binary. - - This should usually be a function defined within one of the - dependencies of this target. This attribute should be preferred over - `main_module` or `main`, and it is an error to specify more than one of these. + Name of a Python function that will serve as the main entry point of + the binary. The name is either a fully qualified name like + `foo.bar.baz` or it starts with a `.` like `.bar.baz`, in which case + it is relative to the package containing the target. This should + usually be a function defined within one of the dependencies of this + target. This attribute should be preferred over `main_module` or + `main`, and it is an error to specify more than one of these. """, ), "make_py_package": attrs.option(attrs.exec_dep(providers = [RunInfo]), default = None), - # entries for the generated __manifest__ python module - "manifest_module_entries": attrs.option(attrs.dict( - key = attrs.string(), - value = attrs.one_of( - attrs.dict(key = attrs.string(), value = attrs.option(attrs.any())), - attrs.list(attrs.string()), + "manifest_module_entries": attrs.option( + attrs.dict( + key = attrs.string(), + value = attrs.one_of( + attrs.dict(key = attrs.string(), value = attrs.option(attrs.any())), + attrs.list(attrs.string()), + ), ), - ), default = None), + default = None, + doc = """If present, it should be a `string` -> `entry` mapping that + gets generated into a `__manifest__` module in the executable. Top + level string keys will be the names of variables in this module (so + they must be valid Python identifiers). An `entry` can be a list of + `string`s, or a further `string`-keyed dictionary.""", + ), "native_link_strategy": attrs.option(attrs.enum(NativeLinkStrategy), default = None), "package_split_dwarf_dwp": attrs.bool(default = False), "par_style": attrs.option(attrs.string(), default = None), "resources": attrs.named_set(attrs.one_of(attrs.dep(), attrs.source(allow_directory = True)), sorted = True, default = []), + "run_with_inplace": attrs.bool(default = False), + "runtime_env": attrs.option(attrs.dict(key = attrs.string(), value = attrs.string()), default = None), "standalone_build_args": attrs.list(attrs.arg(), default = []), "static_extension_finder": attrs.source(default = "prelude//python/tools:static_extension_finder.py"), "static_extension_utils": attrs.source(default = "prelude//python/tools:static_extension_utils.cpp"), "strip_libpar": attrs.enum(StripLibparStrategy, default = "none"), + "strip_stapsdt": attrs.bool(default = False), + "_build_info": BUILD_INFO_ATTR, "_create_manifest_for_source_dir": _create_manifest_for_source_dir(), "_cxx_hacks": attrs.default_only(attrs.dep(default = "prelude//cxx/tools:cxx_hacks")), "_cxx_toolchain": toolchains_common.cxx(), @@ -357,11 +352,12 @@ def _python_executable_attrs(): def _python_test_attrs(): test_attrs = _python_executable_attrs() test_attrs["_test_main"] = attrs.source(default = "prelude//python/tools:__test_main__.py") - test_attrs.update(buck.re_test_args()) + test_attrs["implicit_test_library"] = attrs.option(attrs.dep(providers = [PythonLibraryInfo]), default = None) + test_attrs.update(re_test_common.test_args()) return test_attrs def _cxx_binary_and_test_attrs(): - return { + ret = { "anonymous_link_groups": attrs.bool(default = False), "auto_link_groups": attrs.bool(default = False), # Linker flags that only apply to the executable link, used for link @@ -370,17 +366,28 @@ def _cxx_binary_and_test_attrs(): "binary_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "bolt_flags": attrs.list(attrs.arg(), default = []), "bolt_profile": attrs.option(attrs.source(), default = None), + # These flags will only be used to instrument a target + # when coverage for that target is enabled by a header + # selected for coverage either in the target or in one + # of the target's dependencies. + "coverage_instrumentation_compiler_flags": attrs.list(attrs.string(), default = []), + "distributed_thinlto_partial_split_dwarf": attrs.bool(default = False), "enable_distributed_thinlto": attrs.bool(default = False), + "exported_needs_coverage_instrumentation": attrs.bool(default = False), "link_execution_preference": link_execution_preference_attr(), - "link_group_map": link_group_map_attr(), + "link_group_map": LINK_GROUP_MAP_ATTR, "link_group_min_binary_node_count": attrs.option(attrs.int(), default = None), "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), "link_whole": attrs.default_only(attrs.bool(default = False)), "precompiled_header": attrs.option(attrs.dep(providers = [CPrecompiledHeaderInfo]), default = None), "resources": attrs.named_set(attrs.one_of(attrs.dep(), attrs.source(allow_directory = True)), sorted = True, default = []), + "standalone_extensions": attrs.option(attrs.bool(), default = None), + "_build_info": BUILD_INFO_ATTR, "_cxx_hacks": attrs.dep(default = "prelude//cxx/tools:cxx_hacks"), "_cxx_toolchain": toolchains_common.cxx(), } + ret.update(constraint_overrides.attributes) + return ret NativeLinkStrategy = ["separate", "native", "merged"] StripLibparStrategy = ["full", "extract", "none"] @@ -405,12 +412,6 @@ def _create_manifest_for_source_dir(): inlined_extra_attributes = { - # go - "cgo_library": { - "embedcfg": attrs.option(attrs.source(allow_directory = False), default = None), - "_cxx_toolchain": toolchains_common.cxx(), - "_go_toolchain": toolchains_common.go(), - }, # csharp "csharp_library": { "_csharp_toolchain": toolchains_common.csharp(), @@ -424,26 +425,39 @@ inlined_extra_attributes = { }, "cxx_library": { "auto_link_groups": attrs.bool(default = False), + # These flags will only be used to instrument a target + # when coverage for that target is enabled by `exported_needs_coverage_instrumentation` + # or by any of the target's dependencies. + "coverage_instrumentation_compiler_flags": attrs.list(attrs.string(), default = []), "deps_query": attrs.option(attrs.query(), default = None), + "exported_needs_coverage_instrumentation": attrs.bool(default = False), "extra_xcode_sources": attrs.list(attrs.source(allow_directory = True), default = []), "header_mode": attrs.option(attrs.enum(HeaderMode.values()), default = None), "link_deps_query_whole": attrs.bool(default = False), "link_execution_preference": link_execution_preference_attr(), - "link_group_map": link_group_map_attr(), + "link_group_map": LINK_GROUP_MAP_ATTR, "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), "precompiled_header": attrs.option(attrs.dep(providers = [CPrecompiledHeaderInfo]), default = None), "prefer_stripped_objects": attrs.bool(default = False), - "preferred_linkage": attrs.enum(Linkage, default = "any"), + "preferred_linkage": attrs.enum( + Linkage.values(), + default = "any", + doc = """ + Determines what linkage is used when the library is depended on by another target. To + control how the dependencies of this library are linked, use `link_style` instead. + """, + ), "resources": attrs.named_set(attrs.one_of(attrs.dep(), attrs.source(allow_directory = True)), sorted = True, default = []), "supports_header_symlink_subtarget": attrs.bool(default = False), "supports_python_dlopen": attrs.option(attrs.bool(), default = None), "supports_shlib_interfaces": attrs.bool(default = True), + "_create_third_party_build_root": attrs.default_only(attrs.exec_dep(default = "prelude//third-party/tools:create_build")), "_cxx_hacks": attrs.default_only(attrs.dep(default = "prelude//cxx/tools:cxx_hacks")), "_cxx_toolchain": toolchains_common.cxx(), "_is_building_android_binary": is_building_android_binary_attr(), }, "cxx_python_extension": _cxx_python_extension_attrs(), - "cxx_test": buck.re_test_args() | _cxx_binary_and_test_attrs(), + "cxx_test": re_test_common.test_args() | _cxx_binary_and_test_attrs(), "cxx_toolchain": cxx_toolchain_extra_attributes(is_toolchain_rule = False), # Generic rule to build from a command @@ -453,25 +467,65 @@ inlined_extra_attributes = { "go_binary": { "embedcfg": attrs.option(attrs.source(allow_directory = False), default = None), "resources": attrs.list(attrs.one_of(attrs.dep(), attrs.source(allow_directory = True)), default = []), + "_asan": asan_attr, + "_build_info": BUILD_INFO_ATTR, + "_cxx_toolchain": toolchains_common.cxx(), "_exec_os_type": buck.exec_os_type_arg(), + "_go_stdlib": attrs.default_only(attrs.dep(default = "prelude//go/tools:stdlib")), "_go_toolchain": toolchains_common.go(), + "_race": race_attr, + "_tags": tags_attr, + }, + "go_bootstrap_binary": { + "_exec_os_type": buck.exec_os_type_arg(), + "_go_bootstrap_toolchain": toolchains_common.go_bootstrap(), }, "go_exported_library": { "embedcfg": attrs.option(attrs.source(allow_directory = False), default = None), + "_asan": asan_attr, + "_build_info": BUILD_INFO_ATTR, + "_cxx_toolchain": toolchains_common.cxx(), "_exec_os_type": buck.exec_os_type_arg(), + "_go_stdlib": attrs.default_only(attrs.dep(default = "prelude//go/tools:stdlib")), "_go_toolchain": toolchains_common.go(), + "_race": race_attr, + "_tags": tags_attr, }, "go_library": { "embedcfg": attrs.option(attrs.source(allow_directory = False), default = None), + "_asan": asan_attr, + "_cgo_enabled": cgo_enabled_attr, + "_coverage_mode": coverage_mode_attr, + "_cxx_toolchain": toolchains_common.cxx(), + "_exec_os_type": buck.exec_os_type_arg(), + "_go_stdlib": attrs.default_only(attrs.dep(default = "prelude//go/tools:stdlib")), "_go_toolchain": toolchains_common.go(), + "_race": race_attr, + "_tags": tags_attr, + }, + "go_stdlib": { + "_asan": asan_attr, + "_cgo_enabled": cgo_enabled_attr, + "_cxx_toolchain": toolchains_common.cxx(), + "_exec_os_type": buck.exec_os_type_arg(), + "_go_toolchain": toolchains_common.go(), + "_race": race_attr, + "_tags": tags_attr, }, "go_test": { "coverage_mode": attrs.option(attrs.enum(GoCoverageMode.values()), default = None), "embedcfg": attrs.option(attrs.source(allow_directory = False), default = None), "resources": attrs.list(attrs.source(allow_directory = True), default = []), + "_asan": asan_attr, + "_build_info": BUILD_INFO_ATTR, + "_coverage_mode": coverage_mode_attr, + "_cxx_toolchain": toolchains_common.cxx(), "_exec_os_type": buck.exec_os_type_arg(), + "_go_stdlib": attrs.default_only(attrs.dep(default = "prelude//go/tools:stdlib")), "_go_toolchain": toolchains_common.go(), - "_testmaingen": attrs.default_only(attrs.exec_dep(default = "prelude//go/tools:testmaingen")), + "_race": race_attr, + "_tags": tags_attr, + "_testmaingen": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//go_bootstrap/tools:go_testmaingen")), }, # groovy @@ -483,7 +537,7 @@ inlined_extra_attributes = { }, "haskell_binary": { "auto_link_groups": attrs.bool(default = False), - "link_group_map": link_group_map_attr(), + "link_group_map": LINK_GROUP_MAP_ATTR, "template_deps": attrs.list(attrs.exec_dep(providers = [HaskellLibraryProvider]), default = []), "_cxx_toolchain": toolchains_common.cxx(), "_haskell_toolchain": toolchains_common.haskell(), @@ -493,12 +547,16 @@ inlined_extra_attributes = { "_cxx_toolchain": toolchains_common.cxx(), "_haskell_toolchain": toolchains_common.haskell(), }, + "haskell_haddock": { + "_cxx_toolchain": toolchains_common.cxx(), + "_haskell_toolchain": toolchains_common.haskell(), + }, "haskell_ide": { "include_projects": attrs.list(attrs.dep(), default = []), "_haskell_toolchain": toolchains_common.haskell(), }, "haskell_library": { - "preferred_linkage": attrs.enum(Linkage, default = "any"), + "preferred_linkage": attrs.enum(Linkage.values(), default = "any"), "template_deps": attrs.list(attrs.exec_dep(providers = [HaskellLibraryProvider]), default = []), "_cxx_toolchain": toolchains_common.cxx(), "_haskell_toolchain": toolchains_common.haskell(), @@ -514,12 +572,22 @@ inlined_extra_attributes = { "header_dirs": attrs.option(attrs.list(attrs.source(allow_directory = True)), default = None), "linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), "platform_header_dirs": attrs.option(attrs.list(attrs.tuple(attrs.regex(), attrs.list(attrs.source(allow_directory = True)))), default = None), - "preferred_linkage": attrs.enum(Linkage, default = "any"), + "post_linker_flags": attrs.list(attrs.arg(anon_target_compatible = True), default = []), + "preferred_linkage": attrs.enum( + Linkage.values(), + default = "any", + doc = """ + Determines what linkage is used when the library is depended on by another target. To + control how the dependencies of this library are linked, use `link_style` instead. + """, + ), "public_include_directories": attrs.set(attrs.string(), sorted = True, default = []), "public_system_include_directories": attrs.set(attrs.string(), sorted = True, default = []), "raw_headers": attrs.set(attrs.source(), sorted = True, default = []), + "supports_lto": attrs.bool(default = False), "supports_python_dlopen": attrs.bool(default = True), "versioned_header_dirs": attrs.option(attrs.versioned(attrs.list(attrs.source(allow_directory = True))), default = None), + "_create_third_party_build_root": attrs.default_only(attrs.exec_dep(default = "prelude//third-party/tools:create_build")), "_cxx_toolchain": toolchains_common.cxx(), "_target_os_type": buck.target_os_type_arg(), }, @@ -530,6 +598,7 @@ inlined_extra_attributes = { #python "prebuilt_python_library": { "_create_manifest_for_source_dir": _create_manifest_for_source_dir(), + "_create_third_party_build_root": attrs.default_only(attrs.exec_dep(default = "prelude//third-party/tools:create_build")), "_extract": attrs.default_only(attrs.exec_dep(default = "prelude//python/tools:extract")), "_python_toolchain": toolchains_common.python(), }, @@ -551,11 +620,13 @@ inlined_extra_attributes = { ), }, "python_bootstrap_library": { + "deps": attrs.list(attrs.dep(providers = [PythonBootstrapSources]), default = []), "srcs": attrs.list(attrs.source()), }, "python_library": { "resources": attrs.named_set(attrs.one_of(attrs.dep(), attrs.source(allow_directory = True)), sorted = True, default = []), "_create_manifest_for_source_dir": _create_manifest_for_source_dir(), + "_create_third_party_build_root": attrs.default_only(attrs.exec_dep(default = "prelude//third-party/tools:create_build")), "_cxx_toolchain": toolchains_common.cxx(), "_python_toolchain": toolchains_common.python(), }, @@ -565,7 +636,7 @@ inlined_extra_attributes = { labels = attrs.list(attrs.string(), default = []), needed_coverage = attrs.list(attrs.tuple(attrs.int(), attrs.dep(), attrs.option(attrs.string())), default = []), test = attrs.dep(providers = [ExternalRunnerTestInfo]), - **buck.re_test_args() + **(re_test_common.test_args() | buck.inject_test_env_arg()) ), "python_test": _python_test_attrs(), "remote_file": { @@ -574,7 +645,10 @@ inlined_extra_attributes = { "_unzip_tool": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//zip_file/tools:unzip")), }, "rust_test": {}, - "sh_test": {}, + "sh_test": constraint_overrides.attributes, + "windows_resource": { + "_cxx_toolchain": toolchains_common.cxx(), + }, } all_extra_attributes = _merge_dictionaries([ @@ -586,44 +660,36 @@ all_extra_attributes = _merge_dictionaries([ _js_extra_attributes, _julia_extra_attributes, _kotlin_extra_attributes, + _matlab_extra_attributes, _ocaml_extra_attributes, _zip_file_extra_attributes, ]) -# Inject test toolchain in all tests. - -for rule in [ - "sh_test", - "rust_test", - "python_test", - "python_needed_coverage_test", - "java_test", - "go_test", - "cxx_test", - "apple_test", - "android_instrumentation_test", - "kotlin_test", - "robolectric_test", - "julia_test", -]: - # NOTE: We make this a `dep` not an `exec_dep` even though we'll execute - # it, because it needs to execute in the same platform as the test itself - # (we run tests in the target platform not the exec platform, since the - # goal is to test the code that is being built!). - all_extra_attributes[rule] = _merge_dictionaries([all_extra_attributes[rule], { - "_inject_test_env": attrs.default_only(attrs.dep(default = "prelude//test/tools:inject_test_env")), - }]) - extra_attributes = struct(**all_extra_attributes) # Configuration transitions to pass `cfg` for builtin rules. transitions = { - "android_binary": constraint_overrides_transition, - "python_binary": constraint_overrides_transition, - "python_test": constraint_overrides_transition, + "android_binary": constraint_overrides.transition, + "apple_asset_catalog": apple_resource_transition, + "apple_binary": target_sdk_version_transition, + "apple_bundle": target_sdk_version_transition, + "apple_library": target_sdk_version_transition, + "apple_resource": apple_resource_transition, + "apple_test": target_sdk_version_transition, + "cxx_binary": constraint_overrides.transition, + "cxx_test": constraint_overrides.transition, + "go_binary": go_binary_transition, + "go_exported_library": go_exported_library_transition, + "go_library": go_library_transition, + "go_stdlib": go_stdlib_transition, + "go_test": go_test_transition, + "python_binary": constraint_overrides.transition, + "python_test": constraint_overrides.transition, + "sh_test": constraint_overrides.transition, } toolchain_rule_names = [ "apple_toolchain", "swift_toolchain", + "toolchain_alias", ] diff --git a/prelude/runtime/BUCK.v2 b/prelude/runtime/BUCK.v2 new file mode 100644 index 0000000000000..a4b3167a3e0f1 --- /dev/null +++ b/prelude/runtime/BUCK.v2 @@ -0,0 +1,12 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +# Used by open source projects to support `prelude//` + +config_setting( + name = "fbcode", + visibility = ["PUBLIC"], +) diff --git a/prelude/rust/build.bzl b/prelude/rust/build.bzl index 788854501a222..2d312cb367de9 100644 --- a/prelude/rust/build.bzl +++ b/prelude/rust/build.bzl @@ -5,7 +5,10 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//:artifact_tset.bzl", "project_artifacts") +load( + "@prelude//:artifact_tset.bzl", + "project_artifacts", +) load("@prelude//:local_only.bzl", "link_cxx_binary_locally") load("@prelude//:paths.bzl", "paths") load("@prelude//:resources.bzl", "create_resource_db", "gather_resources") @@ -21,73 +24,79 @@ load("@prelude//cxx:debug.bzl", "SplitDebugMode") load("@prelude//cxx:dwp.bzl", "dwp", "dwp_available") load( "@prelude//cxx:linker.bzl", - "get_default_shared_library_name", "get_shared_library_name_linker_flags", ) load( "@prelude//linking:link_info.bzl", + "LibOutputStyle", # @unused Used as a type "LinkArgs", - "LinkStyle", + "LinkInfos", # @unused Used as a type + "LinkStrategy", # @unused Used as a type + "create_merged_link_info", "get_link_args_for_strategy", - "to_link_strategy", ) load( "@prelude//linking:shared_libraries.bzl", "merge_shared_libraries", "traverse_shared_library_info", ) +load("@prelude//linking:strip.bzl", "strip_debug_info") +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//os_lookup:defs.bzl", "OsLookup") +load("@prelude//utils:argfile.bzl", "at_argfile") load("@prelude//utils:cmd_script.bzl", "ScriptOs", "cmd_script") -load("@prelude//utils:set.bzl", "set") load("@prelude//utils:utils.bzl", "flatten_dict") load( ":build_params.bzl", "BuildParams", # @unused Used as a type "CrateType", "Emit", + "MetadataKind", "crate_type_codegen", "crate_type_linked", - "emit_needs_codegen", + "dep_metadata_of_emit", "output_filename", ) -load(":context.bzl", "CommonArgsInfo", "CompileContext") -load(":extern.bzl", "crate_map_arg", "extern_arg") +load(":clippy_configuration.bzl", "ClippyConfiguration") +load( + ":context.bzl", + "CommonArgsInfo", + "CompileContext", + "CrateName", # @unused Used as a type + "DepCollectionContext", +) +load( + ":extern.bzl", + "crate_map_arg", + "extern_arg", +) load( ":failure_filter.bzl", - "RustFailureFilter", "failure_filter", ) load( ":link_info.bzl", - "CrateName", #@unused Used as a type "RustCxxLinkGroupInfo", #@unused Used as a type + "RustDependency", "RustLinkInfo", - "RustLinkStyleInfo", "attr_crate", "attr_simple_crate_for_filenames", + "attr_soname", "get_available_proc_macros", "inherited_external_debug_info", - "inherited_non_rust_link_info", - "inherited_non_rust_shared_libs", + "inherited_merged_link_infos", + "inherited_rust_external_debug_info", + "inherited_shared_libs", "normalize_crate", "resolve_rust_deps", - "style_info", + "strategy_info", ) +load(":outputs.bzl", "RustcOutput") load(":resources.bzl", "rust_attr_resources") -load(":rust_toolchain.bzl", "RustToolchainInfo", "ctx_toolchain_info") - -RustcOutput = record( - output = field(Artifact), - diag = field(dict[str, Artifact]), - pdb = field([Artifact, None]), - dwp_output = field([Artifact, None]), - # Zero or more Split DWARF debug info files are emitted into this directory - # with unpredictable filenames. - dwo_output_directory = field([Artifact, None]), -) +load(":rust_toolchain.bzl", "PanicRuntime", "RustToolchainInfo") def compile_context(ctx: AnalysisContext) -> CompileContext: - toolchain_info = ctx_toolchain_info(ctx) + toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] cxx_toolchain_info = get_cxx_toolchain_info(ctx) # Setup source symlink tree. @@ -115,16 +124,37 @@ def compile_context(ctx: AnalysisContext) -> CompileContext: linker = _linker_args(ctx, cxx_toolchain_info.linker_info) clippy_wrapper = _clippy_wrapper(ctx, toolchain_info) + dep_ctx = DepCollectionContext( + advanced_unstable_linking = toolchain_info.advanced_unstable_linking, + include_doc_deps = False, + is_proc_macro = getattr(ctx.attrs, "proc_macro", False), + explicit_sysroot_deps = toolchain_info.explicit_sysroot_deps, + panic_runtime = toolchain_info.panic_runtime, + ) + + # When we pass explicit sysroot deps, we need to override the default sysroot to avoid accidentally + # linking against the prebuilt sysroot libs provided by the toolchain. Rustc requires a specific layout + # for these libs, so we need to carefully recreate the directory structure below. + if toolchain_info.explicit_sysroot_deps: + empty_dir = ctx.actions.copied_dir("empty_dir", {}) + empty_sysroot = ctx.actions.copied_dir("empty_sysroot", {"lib/rustlib/" + toolchain_info.rustc_target_triple + "/lib": empty_dir}) + + sysroot_args = cmd_args("--sysroot=", empty_sysroot, delimiter = "") + elif toolchain_info.sysroot_path: + sysroot_args = cmd_args("--sysroot=", toolchain_info.sysroot_path, delimiter = "") + else: + sysroot_args = cmd_args() + return CompileContext( toolchain_info = toolchain_info, cxx_toolchain_info = cxx_toolchain_info, + dep_ctx = dep_ctx, symlinked_srcs = symlinked_srcs, linker_args = linker, clippy_wrapper = clippy_wrapper, common_args = {}, - flagfiles_for_extern = {}, - flagfiles_for_crate_map = {}, transitive_dependency_dirs = {}, + sysroot_args = sysroot_args, ) def generate_rustdoc( @@ -142,80 +172,132 @@ def generate_rustdoc( common_args = _compute_common_args( ctx = ctx, compile_ctx = compile_ctx, + dep_ctx = compile_ctx.dep_ctx, # to make sure we get the rmeta's generated for the crate dependencies, # rather than full .rlibs - emit = Emit("metadata"), + emit = Emit("metadata-fast"), params = params, - dep_link_style = params.dep_link_style, default_roots = default_roots, + infallible_diagnostics = False, + incremental_enabled = False, is_rustdoc_test = False, ) subdir = common_args.subdir + "-rustdoc" output = ctx.actions.declare_output(subdir) - plain_env, path_env = _process_env(compile_ctx, ctx.attrs.env, exec_is_windows) + plain_env, path_env = process_env(compile_ctx, toolchain_info.rustdoc_env | ctx.attrs.env, exec_is_windows) + plain_env["RUSTDOC_BUCK_TARGET"] = cmd_args(str(ctx.label.raw_target())) rustdoc_cmd = cmd_args( - [cmd_args("--env=", k, "=", v, delimiter = "") for k, v in plain_env.items()], - [cmd_args("--path-env=", k, "=", v, delimiter = "") for k, v in path_env.items()], - cmd_args(str(ctx.label.raw_target()), format = "--env=RUSTDOC_BUCK_TARGET={}"), toolchain_info.rustdoc, toolchain_info.rustdoc_flags, ctx.attrs.rustdoc_flags, common_args.args, cmd_args(output.as_output(), format = "--out-dir={}"), + hidden = [toolchain_info.rustdoc, compile_ctx.symlinked_srcs], ) if document_private_items: rustdoc_cmd.add("--document-private-items") - url_prefix = toolchain_info.extern_html_root_url_prefix - if url_prefix != None: - # Flag --extern-html-root-url used below is only supported on nightly. - rustdoc_cmd.add("-Zunstable-options") + rustdoc_cmd_action = cmd_args( + [cmd_args("--env=", k, "=", v, delimiter = "") for k, v in plain_env.items()], + [cmd_args("--path-env=", k, "=", v, delimiter = "") for k, v in path_env.items()], + rustdoc_cmd, + ) - for dep in resolve_rust_deps(ctx): - if dep.label.cell != ctx.label.cell: - # TODO: support a different extern_html_root_url_prefix per cell - continue + rustdoc_cmd = _long_command( + ctx = ctx, + exe = toolchain_info.rustc_action, + args = rustdoc_cmd_action, + argfile_name = "{}.args".format(subdir), + ) - if dep.name: - name = normalize_crate(dep.name) - else: - name = dep.info.crate + ctx.actions.run(rustdoc_cmd, category = "rustdoc") - rustdoc_cmd.add( - "--extern-html-root-url={}={}/{}:{}" - .format(name, url_prefix, dep.label.package, dep.label.name), - ) + return output - rustdoc_cmd.hidden(toolchain_info.rustdoc, compile_ctx.symlinked_srcs) +def generate_rustdoc_coverage( + ctx: AnalysisContext, + compile_ctx: CompileContext, + # link strategy doesn't matter, but caller should pass in build params + # with static-pic (to get best cache hits for deps) + params: BuildParams, + default_roots: list[str]) -> Artifact: + toolchain_info = compile_ctx.toolchain_info + + common_args = _compute_common_args( + ctx = ctx, + compile_ctx = compile_ctx, + dep_ctx = compile_ctx.dep_ctx, + # to make sure we get the rmeta's generated for the crate dependencies, + # rather than full .rlibs + emit = Emit("metadata-fast"), + params = params, + default_roots = default_roots, + infallible_diagnostics = False, + incremental_enabled = False, + is_rustdoc_test = False, + ) + + file = common_args.subdir + "-rustdoc-coverage" + output = ctx.actions.declare_output(file) + + rustdoc_cmd = cmd_args( + toolchain_info.rustdoc, + toolchain_info.rustdoc_flags, + ctx.attrs.rustdoc_flags, + common_args.args, + "-Zunstable-options", + "--show-coverage", + ) + + exec_is_windows = ctx.attrs._exec_os_type[OsLookup].platform == "windows" + plain_env, path_env = process_env(compile_ctx, ctx.attrs.env, exec_is_windows) + plain_env["RUSTDOC_BUCK_TARGET"] = cmd_args(str(ctx.label.raw_target())) + + rustdoc_cmd_action = cmd_args( + [cmd_args("--env=", k, "=", v, delimiter = "") for k, v in plain_env.items()], + [cmd_args("--path-env=", k, "=", v, delimiter = "") for k, v in path_env.items()], + rustdoc_cmd, + ) rustdoc_cmd = _long_command( ctx = ctx, exe = toolchain_info.rustc_action, - args = rustdoc_cmd, - argfile_name = "{}.args".format(subdir), + args = rustdoc_cmd_action, + argfile_name = "{}.args".format(file), ) - ctx.actions.run(rustdoc_cmd, category = "rustdoc") + cmd = cmd_args([toolchain_info.rustdoc_coverage, output.as_output(), rustdoc_cmd]) + + ctx.actions.run(cmd, category = "rustdoc_coverage") return output def generate_rustdoc_test( ctx: AnalysisContext, compile_ctx: CompileContext, - link_style: LinkStyle, - library: RustLinkStyleInfo, + rlib: Artifact, + link_infos: dict[LibOutputStyle, LinkInfos], params: BuildParams, default_roots: list[str]) -> cmd_args: + exec_is_windows = ctx.attrs._exec_os_type[OsLookup].platform == "windows" + toolchain_info = compile_ctx.toolchain_info + doc_dep_ctx = DepCollectionContext( + advanced_unstable_linking = compile_ctx.dep_ctx.advanced_unstable_linking, + include_doc_deps = True, + is_proc_macro = False, + explicit_sysroot_deps = compile_ctx.dep_ctx.explicit_sysroot_deps, + panic_runtime = compile_ctx.dep_ctx.panic_runtime, + ) resources = create_resource_db( ctx = ctx, name = "doctest/resources.json", - binary = library.rlib, + binary = rlib, resources = flatten_dict(gather_resources( label = ctx.label, resources = rust_attr_resources(ctx), @@ -224,16 +306,15 @@ def generate_rustdoc_test( ) # Gather and setup symlink tree of transitive shared library deps. - shared_libs = {} - if link_style == LinkStyle("shared"): + shared_libs = [] + if params.dep_link_strategy == LinkStrategy("shared"): shlib_info = merge_shared_libraries( ctx.actions, - deps = inherited_non_rust_shared_libs(ctx, include_doc_deps = True), + deps = inherited_shared_libs(ctx, doc_dep_ctx), ) - for soname, shared_lib in traverse_shared_library_info(shlib_info).items(): - shared_libs[soname] = shared_lib.lib - extra_link_args, runtime_files, _ = executable_shared_lib_arguments( - ctx.actions, + shared_libs.extend(traverse_shared_library_info(shlib_info)) + executable_args = executable_shared_lib_arguments( + ctx, compile_ctx.cxx_toolchain_info, resources, shared_libs, @@ -242,23 +323,34 @@ def generate_rustdoc_test( common_args = _compute_common_args( ctx = ctx, compile_ctx = compile_ctx, + dep_ctx = doc_dep_ctx, emit = Emit("link"), params = params, - dep_link_style = params.dep_link_style, default_roots = default_roots, + infallible_diagnostics = False, is_rustdoc_test = True, + incremental_enabled = False, ) link_args_output = make_link_args( + ctx, ctx.actions, compile_ctx.cxx_toolchain_info, [ - LinkArgs(flags = extra_link_args), + LinkArgs(flags = executable_args.extra_link_args), get_link_args_for_strategy( ctx, - inherited_non_rust_link_info(ctx, include_doc_deps = True), - # TODO(cjhopman): It's unclear how rust is using link_style. I'm not sure if it's intended to be a LibOutputStyle or a LinkStrategy. - to_link_strategy(link_style), + # Since we pass the rlib in and treat it as a dependency to the rustdoc test harness, + # we need to ensure that the rlib's link info is added to the linker, otherwise we may + # end up with missing symbols that are defined within the crate. + [create_merged_link_info( + ctx, + compile_ctx.cxx_toolchain_info.pic_behavior, + link_infos, + deps = inherited_merged_link_infos(ctx, doc_dep_ctx).values(), + preferred_linkage = Linkage("static"), + )] + inherited_merged_link_infos(ctx, doc_dep_ctx).values(), + params.dep_link_strategy, ), ], "{}-{}".format(common_args.subdir, common_args.tempfile), @@ -272,74 +364,57 @@ def generate_rustdoc_test( allow_args = True, ) - if ctx.attrs._exec_os_type[OsLookup].platform == "windows": + if exec_is_windows: runtool = ["--runtool=cmd.exe", "--runtool-arg=/V:OFF", "--runtool-arg=/C"] else: runtool = ["--runtool=/usr/bin/env"] + plain_env, path_env = process_env(compile_ctx, ctx.attrs.env, exec_is_windows) + doc_plain_env, doc_path_env = process_env(compile_ctx, ctx.attrs.doc_env, exec_is_windows) + for k, v in doc_plain_env.items(): + path_env.pop(k, None) + plain_env[k] = v + for k, v in doc_path_env.items(): + plain_env.pop(k, None) + path_env[k] = v + + # `--runtool` is unstable. + plain_env["RUSTC_BOOTSTRAP"] = cmd_args("1") + unstable_options = ["-Zunstable-options"] + rustdoc_cmd = cmd_args( + [cmd_args("--env=", k, "=", v, delimiter = "") for k, v in plain_env.items()], + [cmd_args("--path-env=", k, "=", v, delimiter = "") for k, v in path_env.items()], + toolchain_info.rustdoc, "--test", - "-Zunstable-options", + unstable_options, cmd_args("--test-builder=", toolchain_info.compiler, delimiter = ""), toolchain_info.rustdoc_flags, ctx.attrs.rustdoc_flags, common_args.args, - extern_arg(ctx, compile_ctx, [], attr_crate(ctx), library.rlib), + extern_arg([], attr_crate(ctx), rlib), "--extern=proc_macro" if ctx.attrs.proc_macro else [], - compile_ctx.linker_args, + cmd_args(compile_ctx.linker_args, format = "-Clinker={}"), cmd_args(linker_argsfile, format = "-Clink-arg=@{}"), runtool, cmd_args(toolchain_info.rustdoc_test_with_resources, format = "--runtool-arg={}"), cmd_args("--runtool-arg=--resources=", resources, delimiter = ""), "--color=always", "--test-args=--color=always", + hidden = [ + compile_ctx.symlinked_srcs, + link_args_output.hidden, + executable_args.runtime_files, + ], ) - rustdoc_cmd.hidden(compile_ctx.symlinked_srcs, link_args_output.hidden, runtime_files) - return _long_command( ctx = ctx, - exe = toolchain_info.rustdoc, + exe = toolchain_info.rustc_action, args = rustdoc_cmd, argfile_name = "{}.args".format(common_args.subdir), ) -# Generate multiple compile artifacts so that distinct sets of artifacts can be -# generated concurrently. -def rust_compile_multi( - ctx: AnalysisContext, - compile_ctx: CompileContext, - emits: list[Emit], - params: BuildParams, - dep_link_style: LinkStyle, - default_roots: list[str], - extra_link_args: list[typing.Any] = [], - predeclared_outputs: dict[Emit, Artifact] = {}, - extra_flags: list[[str, ResolvedStringWithMacros]] = [], - is_binary: bool = False, - allow_cache_upload: bool = False, - rust_cxx_link_group_info: [RustCxxLinkGroupInfo, None] = None) -> list[RustcOutput]: - outputs = [] - - for emit in emits: - outs = rust_compile( - ctx = ctx, - compile_ctx = compile_ctx, - emit = emit, - params = params, - dep_link_style = dep_link_style, - default_roots = default_roots, - extra_link_args = extra_link_args, - predeclared_outputs = predeclared_outputs, - extra_flags = extra_flags, - is_binary = is_binary, - allow_cache_upload = allow_cache_upload, - rust_cxx_link_group_info = rust_cxx_link_group_info, - ) - outputs.append(outs) - - return outputs - # Generate a compilation action. A single instance of rustc can emit # numerous output artifacts, so return an artifact object for each of # them. @@ -348,71 +423,142 @@ def rust_compile( compile_ctx: CompileContext, emit: Emit, params: BuildParams, - dep_link_style: LinkStyle, default_roots: list[str], + incremental_enabled: bool, extra_link_args: list[typing.Any] = [], - predeclared_outputs: dict[Emit, Artifact] = {}, + predeclared_output: Artifact | None = None, extra_flags: list[[str, ResolvedStringWithMacros]] = [], - is_binary: bool = False, allow_cache_upload: bool = False, + # Setting this to true causes the diagnostic outputs that are generated + # from this action to always be successfully generated, even if + # compilation fails. This should not generally be used if the "real" + # output of the action is going to be depended on + infallible_diagnostics: bool = False, rust_cxx_link_group_info: [RustCxxLinkGroupInfo, None] = None) -> RustcOutput: exec_is_windows = ctx.attrs._exec_os_type[OsLookup].platform == "windows" toolchain_info = compile_ctx.toolchain_info - lints, clippy_lints = _lint_flags(compile_ctx) + lints = _lint_flags(compile_ctx, infallible_diagnostics, emit == Emit("clippy")) + + # If we are building metadata-full for a dylib target, we want the hollow-rlib version of rmeta, not the shared lib version. + if compile_ctx.dep_ctx.advanced_unstable_linking and emit == Emit("metadata-full") and params.crate_type == CrateType("dylib"): + params = BuildParams( + crate_type = CrateType("rlib"), + reloc_model = params.reloc_model, + dep_link_strategy = params.dep_link_strategy, + prefix = "lib", + suffix = ".rlib", + ) common_args = _compute_common_args( ctx = ctx, compile_ctx = compile_ctx, + dep_ctx = compile_ctx.dep_ctx, emit = emit, params = params, - dep_link_style = dep_link_style, default_roots = default_roots, + infallible_diagnostics = infallible_diagnostics, + incremental_enabled = incremental_enabled, is_rustdoc_test = False, ) + deferred_link_cmd = None + + # TODO(pickett): We can expand this to support all linked crate types (cdylib + binary) + # We can also share logic here for producing linked artifacts with cxx_library (instead of using) + # deferred_link_action + if params.crate_type == CrateType("dylib") and emit == Emit("link") and compile_ctx.dep_ctx.advanced_unstable_linking: + out_argsfile = ctx.actions.declare_output(common_args.subdir + "/extracted-link-args.args") + out_version_script = ctx.actions.declare_output(common_args.subdir + "/version-script") + out_objects_dir = ctx.actions.declare_output(common_args.subdir + "/objects", dir = True) + linker_cmd = cmd_args( + toolchain_info.extract_link_action, + cmd_args(out_argsfile.as_output(), format = "--out_argsfile={}"), + cmd_args(out_version_script.as_output(), format = "--out_version-script={}") if out_version_script else cmd_args(), + cmd_args(out_objects_dir.as_output(), format = "--out_objects={}"), + compile_ctx.linker_args, + ) + + linker_args = cmd_script( + ctx = ctx, + name = common_args.subdir + "/linker_wrapper", + cmd = linker_cmd, + os = ScriptOs("windows" if ctx.attrs._exec_os_type[OsLookup].platform == "windows" else "unix"), + ) + + deferred_link_cmd = cmd_args( + toolchain_info.deferred_link_action, + cmd_args(out_objects_dir, format = "--objects={}"), + cmd_args(out_version_script, format = "--version-script={}"), + compile_ctx.linker_args, + cmd_args(out_argsfile, format = "@{}"), + ) + else: + linker_args = compile_ctx.linker_args + path_sep = "\\" if exec_is_windows else "/" rustc_cmd = cmd_args( # Lints go first to allow other args to override them. lints, # Report unused --extern crates in the notification stream. ["--json=unused-externs-silent", "-Wunused-crate-dependencies"] if toolchain_info.report_unused_deps else [], - "--json=artifacts", # only needed for pipeline but no harm in always leaving it enabled common_args.args, cmd_args("--remap-path-prefix=", compile_ctx.symlinked_srcs, path_sep, "=", ctx.label.path, path_sep, delimiter = ""), - compile_ctx.linker_args, + ["-Zremap-cwd-prefix=."] if toolchain_info.nightly_features else [], + cmd_args(linker_args, format = "-Clinker={}"), extra_flags, ) + rustc_bin = compile_ctx.clippy_wrapper if emit == Emit("clippy") else toolchain_info.compiler + # If we're using failure filtering then we need to make sure the final # artifact location is the predeclared one since its specific path may have # already been encoded into the other compile args (eg rpath). So we still # let rustc_emit generate its own output artifacts, and then make sure we # use the predeclared one as the output after the failure filter action # below. Otherwise we'll use the predeclared outputs directly. - if toolchain_info.failure_filter: - emit_output, emit_args, extra_out = _rustc_emit( + if infallible_diagnostics: + emit_op = _rustc_emit( ctx = ctx, - compile_ctx = compile_ctx, emit = emit, - predeclared_outputs = {}, subdir = common_args.subdir, params = params, + incremental_enabled = incremental_enabled, ) else: - emit_output, emit_args, extra_out = _rustc_emit( + emit_op = _rustc_emit( ctx = ctx, - compile_ctx = compile_ctx, emit = emit, - predeclared_outputs = predeclared_outputs, subdir = common_args.subdir, params = params, + predeclared_output = predeclared_output, + incremental_enabled = incremental_enabled, + deferred_link = deferred_link_cmd != None, ) + if emit == Emit("clippy"): + clippy_toml = None + if ctx.attrs.clippy_configuration: + clippy_toml = ctx.attrs.clippy_configuration[ClippyConfiguration].clippy_toml + elif toolchain_info.clippy_toml: + clippy_toml = toolchain_info.clippy_toml + + if clippy_toml: + # Clippy wants to be given a path to a directory containing a + # clippy.toml (or .clippy.toml). Our buckconfig accepts an arbitrary + # label like //path/to:my-clippy.toml which may not have the + # filename that clippy looks for. Here we make a directory that + # symlinks the requested configuration file under the required name. + clippy_conf_dir = ctx.actions.symlinked_dir( + common_args.subdir + "-clippy-configuration", + {"clippy.toml": clippy_toml}, + ) + emit_op.env["CLIPPY_CONF_DIR"] = clippy_conf_dir + pdb_artifact = None dwp_inputs = [] - if crate_type_linked(params.crate_type) and not common_args.is_check: + if crate_type_linked(params.crate_type) and common_args.emit_requires_linking: subdir = common_args.subdir tempfile = common_args.tempfile @@ -420,30 +566,30 @@ def rust_compile( # of that style. if rust_cxx_link_group_info: - inherited_non_rust_link_args = LinkArgs( + inherited_link_args = LinkArgs( infos = rust_cxx_link_group_info.filtered_links + [rust_cxx_link_group_info.symbol_files_info], ) else: - inherited_non_rust_link_args = get_link_args_for_strategy( + inherited_link_args = get_link_args_for_strategy( ctx, - inherited_non_rust_link_info( + inherited_merged_link_infos( ctx, - include_doc_deps = False, - ), - # TODO(cjhopman): It's unclear how rust is using link_style. I'm not sure if it's intended to be a LibOutputStyle or a LinkStrategy. - to_link_strategy(dep_link_style), + compile_ctx.dep_ctx, + ).values(), + params.dep_link_strategy, ) link_args_output = make_link_args( + ctx, ctx.actions, compile_ctx.cxx_toolchain_info, [ LinkArgs(flags = extra_link_args), - inherited_non_rust_link_args, + inherited_link_args, ], "{}-{}".format(subdir, tempfile), - output_short_path = emit_output.short_path, + output_short_path = emit_op.output.short_path, ) linker_argsfile, _ = ctx.actions.write( "{}/__{}_linker_args.txt".format(subdir, tempfile), @@ -453,101 +599,88 @@ def rust_compile( pdb_artifact = link_args_output.pdb_artifact dwp_inputs = [link_args_output.link_args] - rustc_cmd.add(cmd_args(linker_argsfile, format = "-Clink-arg=@{}")) - rustc_cmd.hidden(link_args_output.hidden) - (diag, build_status) = _rustc_invoke( + # If we are deferring the real link to a separate action, we no longer pass the linker + # argsfile to rustc. This allows the rustc action to complete with only transitive dep rmeta. + if deferred_link_cmd != None: + deferred_link_cmd.add(cmd_args(linker_argsfile, format = "@{}")) + deferred_link_cmd.add(cmd_args(hidden = link_args_output.hidden)) + + # The -o flag passed to the linker by rustc is a temporary file. So we will strip it + # out in `extract_link_action.py` and provide our own output path here. + deferred_link_cmd.add(cmd_args(emit_op.output.as_output(), format = "-o {}")) + else: + rustc_cmd.add(cmd_args(linker_argsfile, format = "-Clink-arg=@{}")) + rustc_cmd.add(cmd_args(hidden = link_args_output.hidden)) + + invoke = _rustc_invoke( ctx = ctx, compile_ctx = compile_ctx, + common_args = common_args, prefix = "{}/{}".format(common_args.subdir, common_args.tempfile), - rustc_cmd = cmd_args(toolchain_info.compiler, rustc_cmd, emit_args), - diag = "diag", - required_outputs = [emit_output], - short_cmd = common_args.short_cmd, - is_binary = is_binary, - allow_cache_upload = allow_cache_upload, + rustc_cmd = cmd_args(rustc_bin, rustc_cmd, emit_op.args), + required_outputs = [emit_op.output], + is_clippy = emit.value == "clippy", + infallible_diagnostics = infallible_diagnostics, + allow_cache_upload = allow_cache_upload and emit != Emit("clippy"), crate_map = common_args.crate_map, - only_artifact = "metadata" if toolchain_info.pipelined and emit == Emit("metadata") else None, + env = emit_op.env, + incremental_enabled = incremental_enabled, + deferred_link_cmd = deferred_link_cmd, ) - # Add clippy diagnostic targets for check builds - if common_args.is_check: - # We don't really need the outputs from this build, just to keep the artifact accounting straight - clippy_out, clippy_emit_args, _extra_out = _rustc_emit( - ctx = ctx, - compile_ctx = compile_ctx, - emit = emit, - predeclared_outputs = {}, - subdir = common_args.subdir + "-clippy", - params = params, - ) - clippy_env = dict() - if toolchain_info.clippy_toml: - # Clippy wants to be given a path to a directory containing a - # clippy.toml (or .clippy.toml). Our buckconfig accepts an arbitrary - # label like //path/to:my-clippy.toml which may not have the - # filename that clippy looks for. Here we make a directory that - # symlinks the requested configuration file under the required name. - clippy_conf_dir = ctx.actions.symlinked_dir( - common_args.subdir + "-clippy-configuration", - {"clippy.toml": toolchain_info.clippy_toml}, - ) - clippy_env["CLIPPY_CONF_DIR"] = clippy_conf_dir - (clippy_diag, _) = _rustc_invoke( - ctx = ctx, - compile_ctx = compile_ctx, - prefix = "{}/{}".format(common_args.subdir, common_args.tempfile), - # Lints go first to allow other args to override them. - rustc_cmd = cmd_args(compile_ctx.clippy_wrapper, clippy_lints, rustc_cmd, clippy_emit_args), - env = clippy_env, - diag = "clippy", - required_outputs = [clippy_out], - short_cmd = common_args.short_cmd, - is_binary = False, - allow_cache_upload = False, - crate_map = common_args.crate_map, - ) - diag.update(clippy_diag) - - if toolchain_info.failure_filter: + if infallible_diagnostics and emit != Emit("clippy"): # This is only needed when this action's output is being used as an # input, so we only need standard diagnostics (clippy is always # asked for explicitly). - stderr = diag["diag.txt"] - filter_prov = RustFailureFilter( - buildstatus = build_status, - required = emit_output, - stderr = stderr, - ) - filtered_output = failure_filter( ctx = ctx, compile_ctx = compile_ctx, - prefix = "{}/{}".format(common_args.subdir, emit.value), - predecl_out = predeclared_outputs.get(emit), - failprov = filter_prov, - short_cmd = common_args.short_cmd, + predeclared_output = predeclared_output, + build_status = invoke.build_status, + required = emit_op.output, + stderr = invoke.diag_txt, + identifier = invoke.identifier, ) else: - filtered_output = emit_output + filtered_output = emit_op.output split_debug_mode = compile_ctx.cxx_toolchain_info.split_debug_mode or SplitDebugMode("none") if emit == Emit("link") and split_debug_mode != SplitDebugMode("none"): - dwo_output_directory = extra_out - external_debug_info = inherited_external_debug_info( + dwo_output_directory = emit_op.extra_out + + # staticlibs and cdylibs are "bundled" in the sense that they are used + # without their dependencies by the rest of the rules. This is normally + # correct, except that the split debuginfo rustc emits for these crate + # types is not bundled. This is arguably inconsistent behavior from + # rustc, but in any case, it means we need to do this bundling manually + # by collecting all the external debuginfo from dependencies + if params.crate_type == CrateType("cdylib") or params.crate_type == CrateType("staticlib"): + extra_external_debug_info = inherited_rust_external_debug_info( + ctx = ctx, + dep_ctx = compile_ctx.dep_ctx, + link_strategy = params.dep_link_strategy, + ) + else: + extra_external_debug_info = [] + all_external_debug_info = inherited_external_debug_info( ctx = ctx, + dep_ctx = compile_ctx.dep_ctx, dwo_output_directory = dwo_output_directory, - dep_link_style = params.dep_link_style, + dep_link_strategy = params.dep_link_strategy, ) - dwp_inputs.extend(project_artifacts(ctx.actions, [external_debug_info])) + dwp_inputs.extend(project_artifacts(ctx.actions, [all_external_debug_info])) else: dwo_output_directory = None + extra_external_debug_info = [] - if is_binary and dwp_available(compile_ctx.cxx_toolchain_info): + if params.crate_type == CrateType("bin") and \ + emit == Emit("link") and \ + dwp_available(compile_ctx.cxx_toolchain_info): dwp_output = dwp( ctx, compile_ctx.cxx_toolchain_info, - emit_output, + emit_op.output, identifier = "{}/__{}_{}_dwp".format(common_args.subdir, common_args.tempfile, str(emit)), category_suffix = "rust", # TODO(T110378142): Ideally, referenced objects are a list of @@ -559,32 +692,52 @@ def rust_compile( else: dwp_output = None + stripped_output = strip_debug_info( + ctx, + paths.join(common_args.subdir, "stripped", output_filename( + attr_simple_crate_for_filenames(ctx), + Emit("link"), + params, + )), + filtered_output, + ) + return RustcOutput( output = filtered_output, - diag = diag, + stripped_output = stripped_output, + diag_txt = invoke.diag_txt, + diag_json = invoke.diag_json, pdb = pdb_artifact, dwp_output = dwp_output, dwo_output_directory = dwo_output_directory, + extra_external_debug_info = extra_external_debug_info, ) # --extern = for direct dependencies # -Ldependency=
    for transitive dependencies # For native dependencies, we use -Clink-arg=@argsfile -# Second element of result tuple is a list of files/directories that should be present for executable to be run successfully -# Third return is the mapping from crate names back to targets (needed so that a deps linter knows what deps need fixing) -def _dependency_args( +# +# Second element of returned tuple is a mapping from crate names back to target +# label, needed for applying autofixes for rustc's unused_crate_dependencies +# lint by tracing Rust crate names in the compiler diagnostic back to which +# dependency entry in the BUCK file needs to be removed. +# +# The `compile_ctx` may be omitted if there are no dependencies with dynamic +# crate names. +def dependency_args( ctx: AnalysisContext, - compile_ctx: CompileContext, + compile_ctx: CompileContext | None, + toolchain_info: RustToolchainInfo, + deps: list[RustDependency], subdir: str, - crate_type: CrateType, - dep_link_style: LinkStyle, - is_check: bool, + dep_link_strategy: LinkStrategy, + dep_metadata_kind: MetadataKind, is_rustdoc_test: bool) -> (cmd_args, list[(CrateName, Label)]): args = cmd_args() transitive_deps = {} crate_targets = [] available_proc_macros = get_available_proc_macros(ctx) - for dep in resolve_rust_deps(ctx, include_doc_deps = is_rustdoc_test): + for dep in deps: if dep.name: crate = CrateName( simple = normalize_crate(dep.name), @@ -593,31 +746,24 @@ def _dependency_args( else: crate = dep.info.crate - style = style_info(dep.info, dep_link_style) - - use_rmeta = is_check or (compile_ctx.toolchain_info.pipelined and not crate_type_codegen(crate_type) and not is_rustdoc_test) + strategy = strategy_info(toolchain_info, dep.info, dep_link_strategy) - # Use rmeta dependencies whenever possible because they - # should be cheaper to produce. - if use_rmeta: - artifact = style.rmeta - transitive_artifacts = style.transitive_rmeta_deps - else: - artifact = style.rlib - transitive_artifacts = style.transitive_deps + artifact = strategy.outputs[dep_metadata_kind] + transitive_artifacts = strategy.transitive_deps[dep_metadata_kind] - for marker in style.transitive_proc_macro_deps.keys(): + for marker in strategy.transitive_proc_macro_deps.keys(): info = available_proc_macros[marker.label][RustLinkInfo] - style = style_info(info, dep_link_style) - transitive_deps[style.rmeta if use_rmeta else style.rlib] = info.crate + strategy = strategy_info(toolchain_info, info, dep_link_strategy) + transitive_deps[strategy.outputs[MetadataKind("link")]] = info.crate - args.add(extern_arg(ctx, compile_ctx, dep.flags, crate, artifact)) + args.add(extern_arg(dep.flags, crate, artifact)) crate_targets.append((crate, dep.label)) - # Because deps of this *target* can also be transitive deps of this compiler - # invocation, pass the artifact through `-L` unconditionally for doc tests. + # Because deps of this *target* can also be transitive deps of this + # compiler invocation, pass the artifact (under its original crate name) + # through `-L` unconditionally for doc tests. if is_rustdoc_test: - transitive_deps[artifact] = crate + transitive_deps[artifact] = dep.info.crate # Unwanted transitive_deps have already been excluded transitive_deps.update(transitive_artifacts) @@ -630,7 +776,7 @@ def _dependency_args( else: simple_artifacts[artifact] = None - prefix = "{}-deps{}".format(subdir, "-check" if is_check else "") + prefix = "{}-deps{}".format(subdir, dep_metadata_kind.value) if simple_artifacts: args.add(simple_symlinked_dirs(ctx, prefix, simple_artifacts)) if dynamic_artifacts: @@ -664,20 +810,38 @@ def dynamic_symlinked_dirs( artifacts: dict[Artifact, CrateName]) -> cmd_args: name = "{}-dyn".format(prefix) transitive_dependency_dir = ctx.actions.declare_output(name, dir = True) - do_symlinks = cmd_args( - compile_ctx.toolchain_info.transitive_dependency_symlinks_tool, - cmd_args(transitive_dependency_dir.as_output(), format = "--out-dir={}"), + + # Pass the list of rlibs to transitive_dependency_symlinks.py through a file + # because there can be a lot of them. This avoids running out of command + # line length, particularly on Windows. + relative_path = lambda artifact: cmd_args( + artifact, + delimiter = "", + ignore_artifacts = True, + relative_to = transitive_dependency_dir.project("i"), + ) + artifacts_json = ctx.actions.write_json( + ctx.actions.declare_output("{}-dyn.json".format(prefix)), + [ + (relative_path(artifact), crate.dynamic) + for artifact, crate in artifacts.items() + ], + with_inputs = True, + pretty = True, ) - for artifact, crate in artifacts.items(): - relative_path = cmd_args(artifact).relative_to(transitive_dependency_dir.project("i")) - do_symlinks.add("--artifact", crate.dynamic, relative_path.ignore_artifacts()) + ctx.actions.run( - do_symlinks, - category = "tdep_symlinks", + [ + compile_ctx.toolchain_info.transitive_dependency_symlinks_tool, + cmd_args(transitive_dependency_dir.as_output(), format = "--out-dir={}"), + cmd_args(artifacts_json, format = "--artifacts={}"), + ], + category = "deps", identifier = str(len(compile_ctx.transitive_dependency_dirs)), ) + compile_ctx.transitive_dependency_dirs[transitive_dependency_dir] = None - return cmd_args(transitive_dependency_dir, format = "@{}/dirs").hidden(artifacts.keys()) + return cmd_args(transitive_dependency_dir, format = "@{}/dirs", hidden = artifacts.keys()) def _lintify(flag: str, clippy: bool, lints: list[ResolvedStringWithMacros]) -> cmd_args: return cmd_args( @@ -685,23 +849,16 @@ def _lintify(flag: str, clippy: bool, lints: list[ResolvedStringWithMacros]) -> format = "-{}{{}}".format(flag), ) -def _lint_flags(compile_ctx: CompileContext) -> (cmd_args, cmd_args): +def _lint_flags(compile_ctx: CompileContext, infallible_diagnostics: bool, is_clippy: bool) -> cmd_args: toolchain_info = compile_ctx.toolchain_info - plain = cmd_args( - _lintify("A", False, toolchain_info.allow_lints), - _lintify("D", False, toolchain_info.deny_lints), - _lintify("W", False, toolchain_info.warn_lints), - ) - - clippy = cmd_args( - _lintify("A", True, toolchain_info.allow_lints), - _lintify("D", True, toolchain_info.deny_lints), - _lintify("W", True, toolchain_info.warn_lints), + return cmd_args( + _lintify("A", is_clippy, toolchain_info.allow_lints), + _lintify("D", is_clippy, toolchain_info.deny_lints), + _lintify("D" if infallible_diagnostics else "W", is_clippy, toolchain_info.deny_on_check_lints), + _lintify("W", is_clippy, toolchain_info.warn_lints), ) - return (plain, clippy) - def _rustc_flags(flags: list[[str, ResolvedStringWithMacros]]) -> list[[str, ResolvedStringWithMacros]]: # Rustc's "-g" flag is documented as being exactly equivalent to # "-Cdebuginfo=2". Rustdoc supports the latter, it just doesn't have the @@ -716,60 +873,77 @@ def _rustc_flags(flags: list[[str, ResolvedStringWithMacros]]) -> list[[str, Res def _compute_common_args( ctx: AnalysisContext, compile_ctx: CompileContext, + dep_ctx: DepCollectionContext, emit: Emit, params: BuildParams, - dep_link_style: LinkStyle, default_roots: list[str], + infallible_diagnostics: bool, + incremental_enabled: bool, is_rustdoc_test: bool) -> CommonArgsInfo: exec_is_windows = ctx.attrs._exec_os_type[OsLookup].platform == "windows" path_sep = "\\" if exec_is_windows else "/" crate_type = params.crate_type - args_key = (crate_type, emit, dep_link_style, is_rustdoc_test) - if False: - # TODO(nga): following `if args_key in ...` is no-op, and typechecker does not like it. - def unknown(): - pass - - args_key = unknown() + args_key = (crate_type, emit, params.dep_link_strategy, is_rustdoc_test, infallible_diagnostics, incremental_enabled) if args_key in compile_ctx.common_args: return compile_ctx.common_args[args_key] # Keep filenames distinct in per-flavour subdirs - subdir = "{}-{}-{}-{}".format(crate_type.value, params.reloc_model.value, dep_link_style.value, emit.value) + subdir = "{}-{}-{}-{}".format(crate_type.value, params.reloc_model.value, params.dep_link_strategy.value, emit.value) if is_rustdoc_test: subdir = "{}-rustdoc-test".format(subdir) + if infallible_diagnostics: + subdir = "{}-diag".format(subdir) + if incremental_enabled: + subdir = "{}-incr".format(subdir) # Included in tempfiles tempfile = "{}-{}".format(attr_simple_crate_for_filenames(ctx), emit.value) - srcs = ctx.attrs.srcs - mapped_srcs = ctx.attrs.mapped_srcs - all_srcs = map(lambda s: s.short_path, srcs) + mapped_srcs.values() - crate_root = ctx.attrs.crate_root or _crate_root(ctx, all_srcs, default_roots) + root = crate_root(ctx, default_roots) if exec_is_windows: - crate_root = crate_root.replace("/", "\\") - - is_check = not emit_needs_codegen(emit) + root = root.replace("/", "\\") - dependency_args, crate_map = _dependency_args( + # With `advanced_unstable_linking`, we unconditionally pass the metadata + # artifacts. There are two things that work together to make this possible + # in the case of binaries: + # + # 1. The actual rlibs appear in the link providers, so they'll still be + # available for the linker to link in + # 2. The metadata artifacts aren't rmetas, but rather rlibs that just + # don't contain any generated code. Rustc can't distinguish these + # from real rlibs, and so doesn't throw an error + # + # The benefit of doing this is that there's no requirement that the + # dependency's generated code be provided to the linker via an rlib. It + # could be provided by other means, say, a link group + dep_metadata_kind = dep_metadata_of_emit(emit) + + # FIXME(JakobDegen): This computation is an awfully broad over-approximation + emit_requires_linking = dep_metadata_kind == MetadataKind("link") + if compile_ctx.dep_ctx.advanced_unstable_linking or not crate_type_codegen(crate_type): + if dep_metadata_kind == MetadataKind("link"): + dep_metadata_kind = MetadataKind("full") + + dep_args, crate_map = dependency_args( ctx = ctx, compile_ctx = compile_ctx, + toolchain_info = compile_ctx.toolchain_info, + deps = resolve_rust_deps(ctx, dep_ctx), subdir = subdir, - crate_type = crate_type, - dep_link_style = dep_link_style, - is_check = is_check, + dep_link_strategy = params.dep_link_strategy, + dep_metadata_kind = dep_metadata_kind, is_rustdoc_test = is_rustdoc_test, ) if crate_type == CrateType("proc-macro"): - dependency_args.add("--extern=proc_macro") + dep_args.add("--extern=proc_macro") - if crate_type == CrateType("cdylib") and not is_check: + if crate_type in [CrateType("cdylib"), CrateType("dylib")] and emit_requires_linking: linker_info = compile_ctx.cxx_toolchain_info.linker_info - shlib_name = get_default_shared_library_name(linker_info, ctx.label) - dependency_args.add(cmd_args( + shlib_name = attr_soname(ctx) + dep_args.add(cmd_args( get_shared_library_name_linker_flags(linker_info.type, shlib_name), format = "-Clink-arg={}", )) @@ -784,6 +958,46 @@ def _compute_common_args( else: crate_name_arg = cmd_args("--crate-name=", crate.simple, delimiter = "") + # The `-Cprefer-dynamic` flag controls rustc's choice of artifacts for + # transitive dependencies, both for loading metadata and linking them. + # Direct dependencies are given to rustc one-by-one using `--extern` with a + # path to a specific artifact, so there is never ambiguity what artifact to + # use for a direct dependency. But transitive dependencies are passed in + # bulk via zero or more `-Ldependency` flags, which are directories + # containing artifacts. Within those directories, information about a + # specific crate might be available from more than one artifact, such as a + # dylib and rlib for the same crate. + # + # With `-Cprefer-dynamic=no` (the default), when a transitive dependency + # exists as both rlib and dylib, metadata is loaded from the rlib. If some + # dependencies are available in dylib but not rlib, the dylib is used for + # those. With `-Cprefer-dynamic=yes`, when a transitive dependency exists as + # both rlib and dylib, instead the dylib is used. + # + # The ambiguity over whether to use rlib or dylib for a particular + # transitive dependency only occurs if the rlib and dylib both describe the + # same crate i.e. contain the same crate hash. + # + # Buck-built libraries never produce an rlib and dylib containing the same + # crate hash, since that only occurs when outputting multiple crate types + # through a single rustc invocation: `--crate-type=rlib --crate-type=dylib`. + # In Buck, different crate types are built by different rustc invocations. + # But Cargo does invoke rustc with multiple crate types when you write + # `[lib] crate-type = ["rlib", "dylib"]` in Cargo.toml, and in fact the + # standard libraries built by x.py and distributed by Rustup are built this + # way. + if toolchain_info.explicit_sysroot_deps: + # Standard libraries are being passed explicitly, and Buck-built + # dependencies never collide on crate hash, so `-Cprefer-dynamic` cannot + # make a difference. + prefer_dynamic_flags = [] + elif crate_type == CrateType("dylib") and toolchain_info.advanced_unstable_linking: + # Use standard library dylibs from the implicit sysroot. + prefer_dynamic_flags = ["-Cprefer-dynamic=yes"] + else: + # Use standard library rlibs from the implicit sysroot. + prefer_dynamic_flags = ["-Cprefer-dynamic=no"] # (the default) + split_debuginfo_flags = { # Rustc's default behavior: debug info is put into every rlib and # staticlib, then copied into the executables and shared libraries by @@ -827,31 +1041,38 @@ def _compute_common_args( }[compile_ctx.cxx_toolchain_info.split_debug_mode or SplitDebugMode("none")] args = cmd_args( - cmd_args(compile_ctx.symlinked_srcs, path_sep, crate_root, delimiter = ""), + cmd_args(compile_ctx.symlinked_srcs, path_sep, root, delimiter = ""), crate_name_arg, "--crate-type={}".format(crate_type.value), "-Crelocation-model={}".format(params.reloc_model.value), "--edition={}".format(edition), - "-Cmetadata={}".format(_metadata(ctx.label)[0]), + "-Cmetadata={}".format(_metadata(ctx.label, is_rustdoc_test)[0]), # Make diagnostics json with the option to extract rendered text ["--error-format=json", "--json=diagnostic-rendered-ansi"] if not is_rustdoc_test else [], - ["-Cprefer-dynamic=yes"] if crate_type == CrateType("dylib") else [], + prefer_dynamic_flags, ["--target={}".format(toolchain_info.rustc_target_triple)] if toolchain_info.rustc_target_triple else [], split_debuginfo_flags, + compile_ctx.sysroot_args, + ["-Cpanic=abort", "-Zpanic-abort-tests=yes"] if toolchain_info.panic_runtime == PanicRuntime("abort") else [], _rustc_flags(toolchain_info.rustc_flags), - _rustc_flags(toolchain_info.rustc_check_flags) if is_check else [], + # `rustc_check_flags` is specifically interpreted as flags that are used + # only on the metadata-fast graph. + _rustc_flags(toolchain_info.rustc_check_flags) if dep_metadata_kind == MetadataKind("fast") else [], _rustc_flags(toolchain_info.rustc_coverage_flags) if ctx.attrs.coverage else [], _rustc_flags(ctx.attrs.rustc_flags), + _rustc_flags(toolchain_info.extra_rustc_flags), cmd_args(ctx.attrs.features, format = '--cfg=feature="{}"'), - dependency_args, + dep_args, ) common_args = CommonArgsInfo( args = args, subdir = subdir, tempfile = tempfile, - short_cmd = "{},{},{}".format(crate_type.value, params.reloc_model.value, emit.value), - is_check = is_check, + crate_type = crate_type, + params = params, + emit = emit, + emit_requires_linking = emit_requires_linking, crate_map = crate_map, ) @@ -871,15 +1092,16 @@ def _clippy_wrapper( if toolchain_info.rustc_target_triple: rustc_print_sysroot.add("--target={}".format(toolchain_info.rustc_target_triple)) + skip_setting_sysroot = toolchain_info.explicit_sysroot_deps != None or toolchain_info.sysroot_path != None + if ctx.attrs._exec_os_type[OsLookup].platform == "windows": wrapper_file, _ = ctx.actions.write( ctx.actions.declare_output("__clippy_driver_wrapper.bat"), [ "@echo off", "set __CLIPPY_INTERNAL_TESTS=true", - cmd_args(rustc_print_sysroot, format = 'FOR /F "tokens=* USEBACKQ" %%F IN (`{}`) DO ('), - "set SYSROOT=%%F", - ")", + ] + [ + cmd_args(rustc_print_sysroot, format = 'FOR /F "tokens=* USEBACKQ" %%F IN (`{}`) DO (set SYSROOT=%%F)') if not skip_setting_sysroot else "", cmd_args(clippy_driver, format = "{} %*"), ], allow_args = True, @@ -891,14 +1113,16 @@ def _clippy_wrapper( "#!/usr/bin/env bash", # Force clippy to be clippy: https://github.com/rust-lang/rust-clippy/blob/e405c68b3c1265daa9a091ed9b4b5c5a38c0c0ba/src/driver.rs#L334 "export __CLIPPY_INTERNAL_TESTS=true", - cmd_args(rustc_print_sysroot, format = "export SYSROOT=$({})"), + ] + ( + [] if skip_setting_sysroot else [cmd_args(rustc_print_sysroot, format = "export SYSROOT=$({})")] + ) + [ cmd_args(clippy_driver, format = "{} \"$@\"\n"), ], is_executable = True, allow_args = True, ) - return cmd_args(wrapper_file).hidden(clippy_driver, rustc_print_sysroot) + return cmd_args(wrapper_file, hidden = [clippy_driver, rustc_print_sysroot]) # This is a hack because we need to pass the linker to rustc # using -Clinker=path and there is currently no way of doing this @@ -913,31 +1137,35 @@ def _linker_args( ctx.attrs.linker_flags, ) - linker_wrapper = cmd_script( + return cmd_script( ctx = ctx, name = "linker_wrapper", cmd = linker, os = ScriptOs("windows" if ctx.attrs._exec_os_type[OsLookup].platform == "windows" else "unix"), ) - return cmd_args(linker_wrapper, format = "-Clinker={}") - # Returns the full label and its hash. The full label is used for `-Cmetadata` # which provided the primary disambiguator for two otherwise identically named # crates. The hash is added to the filename to give them a lower likelihood of # duplicate names, but it doesn't matter if they collide. -def _metadata(label: Label) -> (str, str): +def _metadata(label: Label, is_rustdoc_test: bool) -> (str, str): label = str(label.raw_target()) + if is_rustdoc_test: + label = "doctest/" + label h = hash(label) if h < 0: h = -h h = "%x" % h return (label, "0" * (8 - len(h)) + h) -def _crate_root( +def crate_root( ctx: AnalysisContext, - srcs: list[str], default_roots: list[str]) -> str: + if ctx.attrs.crate_root: + return ctx.attrs.crate_root + + srcs = [s.short_path for s in ctx.attrs.srcs] + ctx.attrs.mapped_srcs.values() + candidates = set() if getattr(ctx.attrs, "crate_dynamic", None): crate_with_suffix = None @@ -948,127 +1176,204 @@ def _crate_root( if filename in default_roots or filename == crate_with_suffix: candidates.add(src) - if candidates.size() == 1: - return candidates.list()[0] - - fail("Could not infer crate_root. candidates=%s\nAdd 'crate_root = \"src/example.rs\"' to your attributes to disambiguate." % candidates.list()) + if len(candidates) == 1: + return candidates.pop() + + fail("Could not infer crate_root." + + "\nMake sure you have one of {} in your `srcs` attribute.".format(default_roots) + + "\nOr add 'crate_root = \"src/example.rs\"' to your attributes to disambiguate. candidates={}".format(candidates)) + +def _explain(crate_type: CrateType, link_strategy: LinkStrategy, emit: Emit, infallible_diagnostics: bool) -> str: + if emit == Emit("metadata-full"): + link_strategy_suffix = { + LinkStrategy("static"): " [static]", + LinkStrategy("static_pic"): " [pic]", + LinkStrategy("shared"): " [shared]", + }[link_strategy] + return "metadata" + link_strategy_suffix + + if emit == Emit("metadata-fast"): + return "diag" if infallible_diagnostics else "check" + + if emit == Emit("link"): + link_strategy_suffix = { + LinkStrategy("static"): "", + LinkStrategy("static_pic"): " [pic]", + LinkStrategy("shared"): " [shared]", + }[link_strategy] + if crate_type == CrateType("bin"): + return "link" + link_strategy_suffix + if crate_type == CrateType("rlib"): + return "rlib" + link_strategy_suffix + if crate_type == CrateType("dylib"): + return "dylib" + link_strategy_suffix + if crate_type == CrateType("proc-macro"): + return "proc-macro" # always static_pic + if crate_type == CrateType("cdylib"): + return "cdylib" + link_strategy_suffix + if crate_type == CrateType("staticlib"): + return "staticlib" + link_strategy_suffix + + if emit == Emit("expand"): + return "expand" + + if emit == Emit("llvm-ir"): + link_strategy_suffix = { + LinkStrategy("static"): " [static]", + LinkStrategy("static_pic"): " [pic]", + LinkStrategy("shared"): " [shared]", + }[link_strategy] + return "llvm-ir" + link_strategy_suffix + + fail("unrecognized rustc action:", crate_type, link_strategy, emit) + +EmitOperation = record( + output = field(Artifact), + args = field(cmd_args), + env = field(dict[str, str]), + extra_out = field(Artifact | None), +) # Take a desired output and work out how to convince rustc to generate it def _rustc_emit( ctx: AnalysisContext, - compile_ctx: CompileContext, emit: Emit, - predeclared_outputs: dict[Emit, Artifact], subdir: str, - params: BuildParams) -> (Artifact, cmd_args, [Artifact, None]): - toolchain_info = compile_ctx.toolchain_info + params: BuildParams, + incremental_enabled: bool, + predeclared_output: Artifact | None = None, + deferred_link: bool = False) -> EmitOperation: simple_crate = attr_simple_crate_for_filenames(ctx) crate_type = params.crate_type - # Metadata for pipelining needs has enough info to be used as an input for - # dependents. To do this reliably, follow Cargo's pattern of always doing - # --emit metadata,link, but only using the output we actually need. - # - # We don't bother to do this with "codegen" crates - ie, ones which are - # linked into an artifact like binaries and dylib, since they're not used as - # a pipelined dependency input. - pipeline_artifact = toolchain_info.pipelined and \ - emit in (Emit("metadata"), Emit("link")) and \ - not crate_type_codegen(crate_type) - emit_args = cmd_args() - if emit in predeclared_outputs: - emit_output = predeclared_outputs[emit] + emit_env = {} + extra_out = None + + if predeclared_output: + emit_output = predeclared_output else: - extra_hash = "-" + _metadata(ctx.label)[1] + extra_hash = "-" + _metadata(ctx.label, False)[1] emit_args.add("-Cextra-filename={}".format(extra_hash)) filename = subdir + "/" + output_filename(simple_crate, emit, params, extra_hash) emit_output = ctx.actions.declare_output(filename) - # For pipelined builds if we're emitting either metadata or link then make - # sure we generate both and take the one we want. - if pipeline_artifact: - metaext = "" if emit == Emit("metadata") else "_unwanted" - linkext = "" if emit == Emit("link") else "_unwanted" - - emit_args.add( - cmd_args("--emit=metadata=", emit_output.as_output(), metaext, delimiter = ""), - cmd_args("--emit=link=", emit_output.as_output(), linkext, delimiter = ""), - ) - elif emit == Emit("expand"): + if emit == Emit("expand"): + emit_env["RUSTC_BOOTSTRAP"] = "1" emit_args.add( "-Zunpretty=expanded", cmd_args(emit_output.as_output(), format = "-o{}"), ) else: - # Assume https://github.com/rust-lang/rust/issues/85356 is fixed (ie - # https://github.com/rust-lang/rust/pull/85362 is applied) - emit_args.add(cmd_args("--emit=", emit.value, "=", emit_output.as_output(), delimiter = "")) + # Even though the unstable flag only appears on one of the branches, we need + # an identical environment between the `-Zno-codegen` and non-`-Zno-codegen` + # command or else there are "found possibly newer version of crate" errors. + emit_env["RUSTC_BOOTSTRAP"] = "1" + + if emit == Emit("metadata-full"): + if crate_type_codegen(crate_type): + # We don't ever have metadata-only deps on codegen crates, so we can + # fall back to the `metadata-fast` behavior. Normally though, this + # artifact should be unused and so this shouldn't matter. + effective_emit = "metadata" + else: + # As we're doing a pipelined build, instead of emitting an actual rmeta + # we emit a "hollow" .rlib - ie, it only contains lib.rmeta and no object + # code. It should contain full information needed by any dependent + # crate which is generating code (MIR, etc). + # + # IMPORTANT: this flag is the only way that the Emit("metadata") and + # Emit("link") operations are allowed to diverge without causing them to + # get different crate hashes. + emit_args.add("-Zno-codegen") + effective_emit = "link" + elif emit == Emit("metadata-fast") or emit == Emit("clippy"): + effective_emit = "metadata" + else: + effective_emit = emit.value + + # When using deferred link, we still want to pass `--emit` to rustc to trigger + # the correct compilation behavior, but we do not want to pass emit_output here. + # Instead, we will bind the emit output to the actual deferred link action. + if deferred_link and effective_emit == "link": + emit_args.add(cmd_args("--emit=", effective_emit, delimiter = "")) + else: + emit_args.add(cmd_args("--emit=", effective_emit, "=", emit_output.as_output(), delimiter = "")) - extra_out = None - if emit != Emit("expand"): # Strip file extension from directory name. base, _ext = paths.split_extension(output_filename(simple_crate, emit, params)) extra_dir = subdir + "/extras/" + base extra_out = ctx.actions.declare_output(extra_dir, dir = True) emit_args.add(cmd_args(extra_out.as_output(), format = "--out-dir={}")) - if ctx.attrs.incremental_enabled: + if incremental_enabled: build_mode = ctx.attrs.incremental_build_mode incremental_out = ctx.actions.declare_output("{}/extras/incremental/{}".format(subdir, build_mode)) incremental_cmd = cmd_args(incremental_out.as_output(), format = "-Cincremental={}") emit_args.add(incremental_cmd) - return (emit_output, emit_args, extra_out) + return EmitOperation( + output = emit_output, + args = emit_args, + env = emit_env, + extra_out = extra_out, + ) + +Invoke = record( + diag_txt = field(Artifact), + diag_json = field(Artifact), + build_status = field(Artifact | None), + identifier = field([str, None]), +) # Invoke rustc and capture outputs def _rustc_invoke( ctx: AnalysisContext, compile_ctx: CompileContext, + common_args: CommonArgsInfo, prefix: str, rustc_cmd: cmd_args, - diag: str, required_outputs: list[Artifact], - short_cmd: str, - is_binary: bool, + is_clippy: bool, + infallible_diagnostics: bool, allow_cache_upload: bool, + incremental_enabled: bool, crate_map: list[(CrateName, Label)], - env: dict[str, [ResolvedStringWithMacros, Artifact]] = {}, - only_artifact: [None, str] = None) -> (dict[str, Artifact], [Artifact, None]): + env: dict[str, str | ResolvedStringWithMacros | Artifact], + deferred_link_cmd: cmd_args | None) -> Invoke: exec_is_windows = ctx.attrs._exec_os_type[OsLookup].platform == "windows" toolchain_info = compile_ctx.toolchain_info - plain_env, path_env = _process_env(compile_ctx, ctx.attrs.env, exec_is_windows) + plain_env, path_env = process_env(compile_ctx, ctx.attrs.env, exec_is_windows) - more_plain_env, more_path_env = _process_env(compile_ctx, env, exec_is_windows) + more_plain_env, more_path_env = process_env(compile_ctx, env, exec_is_windows) plain_env.update(more_plain_env) path_env.update(more_path_env) # Save diagnostic outputs - json_diag = ctx.actions.declare_output("{}-{}.json".format(prefix, diag)) - txt_diag = ctx.actions.declare_output("{}-{}.txt".format(prefix, diag)) + diag = "clippy" if is_clippy else "diag" + diag_json = ctx.actions.declare_output("{}-{}.json".format(prefix, diag)) + diag_txt = ctx.actions.declare_output("{}-{}.txt".format(prefix, diag)) compile_cmd = cmd_args( - cmd_args(json_diag.as_output(), format = "--diag-json={}"), - cmd_args(txt_diag.as_output(), format = "--diag-txt={}"), - "--remap-cwd-prefix=.", + cmd_args(diag_json.as_output(), format = "--diag-json={}"), + cmd_args(diag_txt.as_output(), format = "--diag-txt={}"), + ["--remap-cwd-prefix=."] if not toolchain_info.nightly_features else [], "--buck-target={}".format(ctx.label.raw_target()), + hidden = [toolchain_info.compiler, compile_ctx.symlinked_srcs], ) - if only_artifact: - compile_cmd.add("--only-artifact=" + only_artifact) - for k, v in crate_map: - compile_cmd.add(crate_map_arg(ctx, compile_ctx, k, v)) + compile_cmd.add(crate_map_arg(k, v)) for k, v in plain_env.items(): compile_cmd.add(cmd_args("--env=", k, "=", v, delimiter = "")) for k, v in path_env.items(): compile_cmd.add(cmd_args("--path-env=", k, "=", v, delimiter = "")) build_status = None - if toolchain_info.failure_filter: + if infallible_diagnostics: # Build status for fail filter build_status = ctx.actions.declare_output("{}_build_status-{}.json".format(prefix, diag)) compile_cmd.add(cmd_args(build_status.as_output(), format = "--failure-filter={}")) @@ -1076,7 +1381,6 @@ def _rustc_invoke( compile_cmd.add("--required-output", out.short_path, out.as_output()) compile_cmd.add(rustc_cmd) - compile_cmd.hidden(toolchain_info.compiler, compile_ctx.symlinked_srcs) compile_cmd = _long_command( ctx = ctx, @@ -1085,26 +1389,59 @@ def _rustc_invoke( argfile_name = "{}-{}.args".format(prefix, diag), ) - incremental_enabled = ctx.attrs.incremental_enabled local_only = False prefer_local = False if incremental_enabled: local_only = True - elif is_binary and link_cxx_binary_locally(ctx): + elif common_args.crate_type == CrateType("bin") and \ + common_args.emit == Emit("link") and \ + link_cxx_binary_locally(ctx): prefer_local = True - identifier = "{} {} [{}]".format(prefix, short_cmd, diag) + if is_clippy: + category = "clippy" + identifier = "" + else: + category = "rustc" + identifier = _explain( + crate_type = common_args.crate_type, + link_strategy = common_args.params.dep_link_strategy, + emit = common_args.emit, + infallible_diagnostics = infallible_diagnostics, + ) + + if incremental_enabled: + if not identifier.endswith("]"): + identifier += " " + identifier += "[incr]" + ctx.actions.run( compile_cmd, local_only = local_only, - prefer_local = prefer_local, - category = "rustc", + # We only want to prefer_local here if rustc is performing the link + prefer_local = prefer_local and deferred_link_cmd == None, + category = category, identifier = identifier, no_outputs_cleanup = incremental_enabled, - allow_cache_upload = allow_cache_upload, + # We want to unconditionally cache object file compilations when rustc is not linking + allow_cache_upload = allow_cache_upload or deferred_link_cmd != None, ) - return ({diag + ".json": json_diag, diag + ".txt": txt_diag}, build_status) + if deferred_link_cmd: + ctx.actions.run( + deferred_link_cmd, + local_only = local_only, + prefer_local = prefer_local, + category = "deferred_link", + allow_cache_upload = allow_cache_upload, + ) + + return Invoke( + diag_txt = diag_txt, + diag_json = diag_json, + build_status = build_status, + identifier = identifier, + ) # Our rustc and rustdoc commands can have arbitrarily large number of `--extern` # flags, so write to file to avoid hitting the platform's limit on command line @@ -1114,8 +1451,22 @@ def _long_command( exe: RunInfo, args: cmd_args, argfile_name: str) -> cmd_args: - argfile, hidden = ctx.actions.write(argfile_name, args, allow_args = True) - return cmd_args(exe, cmd_args(argfile, format = "@{}")).hidden(args, hidden) + return cmd_args( + exe, + at_argfile( + actions = ctx.actions, + name = argfile_name, + args = args, + allow_args = True, + ), + ) + +_DOUBLE_ESCAPED_NEWLINE_RE = regex("\\\\n") +_ESCAPED_NEWLINE_RE = regex("\\n") +_DIRECTORY_ENV = [ + "CARGO_MANIFEST_DIR", + "OUT_DIR", +] # Separate env settings into "plain" and "with path". Path env vars are often # used in Rust `include!()` and similar directives, which always interpret the @@ -1125,10 +1476,11 @@ def _long_command( # paths to absolute paths so they'll work in any context. Hence the need to # distinguish path from non-path. (This will not work if the value contains both # path and non-path content, but we'll burn that bridge when we get to it.) -def _process_env( +def process_env( compile_ctx: CompileContext, - env: dict[str, [ResolvedStringWithMacros, Artifact]], - exec_is_windows: bool) -> (dict[str, cmd_args], dict[str, cmd_args]): + env: dict[str, str | ResolvedStringWithMacros | Artifact], + exec_is_windows: bool, + escape_for_rustc_action: bool = True) -> (dict[str, cmd_args], dict[str, cmd_args]): # Values with inputs (ie artifact references). path_env = {} @@ -1139,12 +1491,20 @@ def _process_env( v = cmd_args(v) if len(v.inputs) > 0: path_env[k] = v - else: + elif escape_for_rustc_action: # Environment variables may have newlines, escape them for now. # Will be unescaped in rustc_action. # Variable may have "\\n" as well. # Example: \\n\n -> \\\n\n -> \\\\n\\n - plain_env[k] = v.replace_regex("\\\\n", "\\\n").replace_regex("\\n", "\\n") + plain_env[k] = cmd_args( + v, + replace_regex = [ + (_DOUBLE_ESCAPED_NEWLINE_RE, "\\\n"), + (_ESCAPED_NEWLINE_RE, "\\n"), + ], + ) + else: + plain_env[k] = cmd_args(v) # If CARGO_MANIFEST_DIR is not already expressed in terms of $(location ...) # of some target, then interpret it as a relative path inside of the crate's @@ -1181,13 +1541,14 @@ def _process_env( # and proc macros using std::fs to read thing like .pest grammars, which # would need paths relative to the directory that rustc got invoked in # (which is the repo root in Buck builds). - cargo_manifest_dir = plain_env.pop("CARGO_MANIFEST_DIR", None) - if cargo_manifest_dir: - path_env["CARGO_MANIFEST_DIR"] = cmd_args( - compile_ctx.symlinked_srcs, - "\\" if exec_is_windows else "/", - cargo_manifest_dir, - delimiter = "", - ) + for key in _DIRECTORY_ENV: + value = plain_env.pop(key, None) + if value: + path_env[key] = cmd_args( + compile_ctx.symlinked_srcs, + "\\" if exec_is_windows else "/", + value, + delimiter = "", + ) return (plain_env, path_env) diff --git a/prelude/rust/build_params.bzl b/prelude/rust/build_params.bzl index 15095068379f8..f21c280c65a22 100644 --- a/prelude/rust/build_params.bzl +++ b/prelude/rust/build_params.bzl @@ -7,13 +7,14 @@ # Rules for mapping requirements to options +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerType") load( "@prelude//linking:link_info.bzl", - "LinkStyle", - "Linkage", # @unused Used as a type + "LibOutputStyle", + "LinkStrategy", ) load("@prelude//os_lookup:defs.bzl", "OsLookup") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") # --crate-type= # Excludes `lib` because we want to explicitly choose the library flavour @@ -29,10 +30,6 @@ CrateType = enum( "staticlib", ) -# Crate type is intended for consumption by Rust code -def crate_type_rust_linkage(crate_type: CrateType) -> bool: - return crate_type.value in ("rlib", "dylib", "proc-macro") - # Crate type is intended for native linkage (eg C++) def crate_type_native_linkage(crate_type: CrateType) -> bool: return crate_type.value in ("cdylib", "staticlib") @@ -41,10 +38,6 @@ def crate_type_native_linkage(crate_type: CrateType) -> bool: def crate_type_linked(crate_type: CrateType) -> bool: return crate_type.value in ("bin", "dylib", "proc-macro", "cdylib") -# Crate type which should include transitive deps -def crate_type_transitive_deps(crate_type: CrateType) -> bool: - return crate_type.value in ("rlib", "dylib", "staticlib") # not sure about staticlib - # Crate type which should always need codegen def crate_type_codegen(crate_type: CrateType) -> bool: return crate_type_linked(crate_type) or crate_type_native_linkage(crate_type) @@ -68,24 +61,51 @@ Emit = enum( "llvm-bc", "llvm-ir", "obj", - "metadata", "link", "dep-info", "mir", "expand", # pseudo emit alias for -Zunpretty=expanded + "clippy", + # Rustc actually has two different forms of metadata: + # - The full flavor, which is what's outputted when passing + # `--emit link,metadata` and can be used as a part of pipelined builds + # - The fast flavor, which is emitted from `--emit metadata`, is faster to + # build, but cannot be used in pipelined builds. + "metadata-full", + "metadata-fast", +) + +# The different quantities of Rust metadata that can be requested from +# dependencies. Each one corresponds to an `Emit` variant, but not all `Emit` +# variants output metadata +MetadataKind = enum( + "fast", + "full", + "link", ) # Emitting this artifact generates code -def emit_needs_codegen(emit: Emit) -> bool: - return emit.value in ("asm", "llvm-bc", "llvm-ir", "obj", "link", "mir") +def dep_metadata_of_emit(emit: Emit) -> MetadataKind: + return { + Emit("asm"): MetadataKind("link"), + Emit("llvm-bc"): MetadataKind("link"), + Emit("llvm-ir"): MetadataKind("link"), + Emit("obj"): MetadataKind("link"), + Emit("link"): MetadataKind("link"), + Emit("mir"): MetadataKind("link"), + Emit("metadata-fast"): MetadataKind("fast"), + Emit("clippy"): MetadataKind("fast"), + Emit("dep-info"): MetadataKind("full"), + Emit("expand"): MetadataKind("full"), + Emit("metadata-full"): MetadataKind("full"), + }[emit] # Represents a way of invoking rustc to produce an artifact. These values are computed from # information such as the rule type, linkstyle, crate type, etc. BuildParams = record( crate_type = field(CrateType), reloc_model = field(RelocModel), - # TODO(cjhopman): Is this a LibOutputStyle or a LinkStrategy? - dep_link_style = field(LinkStyle), # what link_style to use for dependencies + dep_link_strategy = field(LinkStrategy), # A prefix and suffix to use for the name of the produced artifact. Note that although we store # these in this type, they are in principle computable from the remaining fields and the OS. # Keeping them here just turns out to be a little more convenient. @@ -95,9 +115,8 @@ BuildParams = record( RustcFlags = record( crate_type = field(CrateType), - reloc_model = field(RelocModel), - dep_link_style = field(LinkStyle), platform_to_affix = field(typing.Callable), + link_strategy = field(LinkStrategy | None), ) # Filenames used for various emitted forms @@ -107,11 +126,13 @@ _EMIT_PREFIX_SUFFIX = { Emit("llvm-bc"): ("", ".bc"), Emit("llvm-ir"): ("", ".ll"), Emit("obj"): ("", ".o"), - Emit("metadata"): ("lib", ".rmeta"), # even binaries get called 'libfoo.rmeta' + Emit("metadata-fast"): ("lib", ".rmeta"), # even binaries get called 'libfoo.rmeta' + Emit("metadata-full"): (None, None), # Hollow rlibs, so they get the same name Emit("link"): (None, None), # crate type and reloc model dependent Emit("dep-info"): ("", ".d"), Emit("mir"): (None, ".mir"), Emit("expand"): (None, ".rs"), + Emit("clippy"): ("lib", ".rmeta"), # Treated like metadata-fast } # Return the filename for a particular emitted artifact type @@ -124,12 +145,40 @@ def output_filename(cratename: str, emit: Emit, buildparams: BuildParams, extra: # Rule type - 'binary' also covers 'test' RuleType = enum("binary", "library") -# What language we're generating artifacts to be linked with -LinkageLang = enum("rust", "c++") +# Controls how we build our rust libraries, largely dependent on whether rustc +# or buck is driving the final linking and whether we are linking the artifact +# into other rust targets. +# +# Rust: In this mode, we build rust libraries as rlibs. This is the primary +# approach for building rust targets when the final link step is driven by +# rustc (e.g. rust_binary, rust_unittest, etc). +# +# Native: In this mode, we build rust libraries as staticlibs, where rustc +# will bundle all of this target's rust dependencies into a single library +# artifact. This approach is the most standardized way to build rust libraries +# for linkage in non-rust code. +# +# NOTE: This approach does not scale well. It's possible to end up with +# non-rust target A depending on two rust targets B and C, which can cause +# duplicate symbols if B and C share common rust dependencies. +# +# Native Unbundled: In this mode, we revert back to building as rlibs. This +# approach mitigates the duplicate symbol downside of the "Native" approach. +# However, this option is not formally supported by rustc, and depends on an +# implementation detail of rlibs (they're effectively .a archives and can be +# linked with other native code using the CXX linker). +# +# See https://github.com/rust-lang/rust/issues/73632 for more details on +# stabilizing this approach. + +LinkageLang = enum( + "rust", + "native", + "native-unbundled", +) -_BINARY_SHARED = 0 -_BINARY_PIE = 1 -_BINARY_NON_PIE = 2 +_BINARY = 0 +_RUST_PROC_MACRO_RUSTDOC_TEST = 1 _NATIVE_LINKABLE_SHARED_OBJECT = 3 _RUST_DYLIB_SHARED = 4 _RUST_PROC_MACRO = 5 @@ -138,187 +187,193 @@ _RUST_STATIC_NON_PIC_LIBRARY = 7 _NATIVE_LINKABLE_STATIC_PIC = 8 _NATIVE_LINKABLE_STATIC_NON_PIC = 9 -def _executable_prefix_suffix(linker_type: str, target_os_type: OsLookup) -> (str, str): +def _executable_prefix_suffix(linker_type: LinkerType, target_os_type: OsLookup) -> (str, str): return { - "darwin": ("", ""), - "gnu": ("", ".exe") if target_os_type.platform == "windows" else ("", ""), - "wasm": ("", ".wasm"), - "windows": ("", ".exe"), + LinkerType("darwin"): ("", ""), + LinkerType("gnu"): ("", ".exe") if target_os_type.platform == "windows" else ("", ""), + LinkerType("wasm"): ("", ".wasm"), + LinkerType("windows"): ("", ".exe"), }[linker_type] -def _library_prefix_suffix(linker_type: str, target_os_type: OsLookup) -> (str, str): +def _library_prefix_suffix(linker_type: LinkerType, target_os_type: OsLookup) -> (str, str): return { - "darwin": ("lib", ".dylib"), - "gnu": ("", ".dll") if target_os_type.platform == "windows" else ("lib", ".so"), - "wasm": ("", ".wasm"), - "windows": ("", ".dll"), + LinkerType("darwin"): ("lib", ".dylib"), + LinkerType("gnu"): ("", ".dll") if target_os_type.platform == "windows" else ("lib", ".so"), + LinkerType("wasm"): ("", ".wasm"), + LinkerType("windows"): ("", ".dll"), }[linker_type] _BUILD_PARAMS = { - _BINARY_SHARED: RustcFlags( + _BINARY: RustcFlags( crate_type = CrateType("bin"), - reloc_model = RelocModel("pic"), - dep_link_style = LinkStyle("shared"), platform_to_affix = _executable_prefix_suffix, + # link_strategy is provided by the rust_binary attribute + link_strategy = None, ), - _BINARY_PIE: RustcFlags( - crate_type = CrateType("bin"), - reloc_model = RelocModel("pic"), - dep_link_style = LinkStyle("static_pic"), - platform_to_affix = _executable_prefix_suffix, - ), - _BINARY_NON_PIE: RustcFlags( - crate_type = CrateType("bin"), - reloc_model = RelocModel("static"), - dep_link_style = LinkStyle("static"), + # It's complicated: this is a rustdoc test for a procedural macro crate. + # We need deps built as if this were a binary, while passing crate-type + # proc_macro to the rustdoc invocation. + _RUST_PROC_MACRO_RUSTDOC_TEST: RustcFlags( + crate_type = CrateType("proc-macro"), platform_to_affix = _executable_prefix_suffix, + link_strategy = LinkStrategy("static_pic"), ), _NATIVE_LINKABLE_SHARED_OBJECT: RustcFlags( crate_type = CrateType("cdylib"), - reloc_model = RelocModel("pic"), - dep_link_style = LinkStyle("shared"), platform_to_affix = _library_prefix_suffix, + # cdylibs statically link all rust code and export a single C-style dylib + # for consumption by other languages + link_strategy = LinkStrategy("shared"), ), _RUST_DYLIB_SHARED: RustcFlags( crate_type = CrateType("dylib"), - reloc_model = RelocModel("pic"), - dep_link_style = LinkStyle("shared"), platform_to_affix = _library_prefix_suffix, + link_strategy = LinkStrategy("shared"), ), _RUST_PROC_MACRO: RustcFlags( crate_type = CrateType("proc-macro"), - reloc_model = RelocModel("pic"), - dep_link_style = LinkStyle("static_pic"), platform_to_affix = _library_prefix_suffix, + # FIXME(JakobDegen): It's not really clear what we should do about + # proc macros. The principled thing is probably to treat them sort + # of like a normal library, except that they always have preferred + # linkage shared? Preserve existing behavior for now + link_strategy = LinkStrategy("static_pic"), ), + # FIXME(JakobDegen): Add a comment explaining why `.a`s need reloc-strategy + # dependent names while `.rlib`s don't. _RUST_STATIC_PIC_LIBRARY: RustcFlags( crate_type = CrateType("rlib"), - reloc_model = RelocModel("pic"), - dep_link_style = LinkStyle("static_pic"), platform_to_affix = lambda _l, _t: ("lib", ".rlib"), + link_strategy = LinkStrategy("static_pic"), ), _RUST_STATIC_NON_PIC_LIBRARY: RustcFlags( crate_type = CrateType("rlib"), - reloc_model = RelocModel("static"), - dep_link_style = LinkStyle("static"), platform_to_affix = lambda _l, _t: ("lib", ".rlib"), + link_strategy = LinkStrategy("static"), ), _NATIVE_LINKABLE_STATIC_PIC: RustcFlags( crate_type = CrateType("staticlib"), - reloc_model = RelocModel("pic"), - dep_link_style = LinkStyle("static_pic"), platform_to_affix = lambda _l, _t: ("lib", "_pic.a"), + link_strategy = LinkStrategy("static_pic"), ), _NATIVE_LINKABLE_STATIC_NON_PIC: RustcFlags( crate_type = CrateType("staticlib"), - reloc_model = RelocModel("static"), - dep_link_style = LinkStyle("static"), platform_to_affix = lambda _l, _t: ("lib", ".a"), + link_strategy = LinkStrategy("static"), ), } _INPUTS = { - # Binary, shared - ("binary", False, "shared", "any", "rust"): _BINARY_SHARED, - ("binary", False, "shared", "shared", "rust"): _BINARY_SHARED, - ("binary", False, "shared", "static", "rust"): _BINARY_SHARED, - # Binary, PIE - ("binary", False, "static_pic", "any", "rust"): _BINARY_PIE, - ("binary", False, "static_pic", "shared", "rust"): _BINARY_PIE, - ("binary", False, "static_pic", "static", "rust"): _BINARY_PIE, - # Binary, non-PIE - ("binary", False, "static", "any", "rust"): _BINARY_NON_PIE, - ("binary", False, "static", "shared", "rust"): _BINARY_NON_PIE, - ("binary", False, "static", "static", "rust"): _BINARY_NON_PIE, + # Binary + ("binary", False, None, "rust"): _BINARY, + ("binary", True, None, "rust"): _RUST_PROC_MACRO_RUSTDOC_TEST, # Native linkable shared object - ("library", False, "shared", "any", "c++"): _NATIVE_LINKABLE_SHARED_OBJECT, - ("library", False, "shared", "shared", "c++"): _NATIVE_LINKABLE_SHARED_OBJECT, - ("library", False, "static", "shared", "c++"): _NATIVE_LINKABLE_SHARED_OBJECT, - ("library", False, "static_pic", "shared", "c++"): _NATIVE_LINKABLE_SHARED_OBJECT, + ("library", False, "shared_lib", "native"): _NATIVE_LINKABLE_SHARED_OBJECT, + # Native unbundled linkable shared object + ("library", False, "shared_lib", "native-unbundled"): _RUST_DYLIB_SHARED, # Rust dylib shared object - ("library", False, "shared", "any", "rust"): _RUST_DYLIB_SHARED, - ("library", False, "shared", "shared", "rust"): _RUST_DYLIB_SHARED, - ("library", False, "static", "shared", "rust"): _RUST_DYLIB_SHARED, - ("library", False, "static_pic", "shared", "rust"): _RUST_DYLIB_SHARED, + ("library", False, "shared_lib", "rust"): _RUST_DYLIB_SHARED, # Rust proc-macro - ("library", True, "shared", "any", "rust"): _RUST_PROC_MACRO, - ("library", True, "shared", "shared", "rust"): _RUST_PROC_MACRO, - ("library", True, "shared", "static", "rust"): _RUST_PROC_MACRO, - ("library", True, "static", "any", "rust"): _RUST_PROC_MACRO, - ("library", True, "static", "shared", "rust"): _RUST_PROC_MACRO, - ("library", True, "static", "static", "rust"): _RUST_PROC_MACRO, - ("library", True, "static_pic", "any", "rust"): _RUST_PROC_MACRO, - ("library", True, "static_pic", "shared", "rust"): _RUST_PROC_MACRO, - ("library", True, "static_pic", "static", "rust"): _RUST_PROC_MACRO, + ("library", True, "archive", "rust"): _RUST_PROC_MACRO, + ("library", True, "pic_archive", "rust"): _RUST_PROC_MACRO, + ("library", True, "shared_lib", "rust"): _RUST_PROC_MACRO, # Rust static_pic library - ("library", False, "shared", "static", "rust"): _RUST_STATIC_PIC_LIBRARY, - ("library", False, "static_pic", "any", "rust"): _RUST_STATIC_PIC_LIBRARY, - ("library", False, "static_pic", "static", "rust"): _RUST_STATIC_PIC_LIBRARY, + ("library", False, "pic_archive", "rust"): _RUST_STATIC_PIC_LIBRARY, # Rust static (non-pic) library - ("library", False, "static", "any", "rust"): _RUST_STATIC_NON_PIC_LIBRARY, - ("library", False, "static", "static", "rust"): _RUST_STATIC_NON_PIC_LIBRARY, + ("library", False, "archive", "rust"): _RUST_STATIC_NON_PIC_LIBRARY, # Native linkable static_pic - ("library", False, "shared", "static", "c++"): _NATIVE_LINKABLE_STATIC_PIC, - ("library", False, "static_pic", "any", "c++"): _NATIVE_LINKABLE_STATIC_PIC, - ("library", False, "static_pic", "static", "c++"): _NATIVE_LINKABLE_STATIC_PIC, + ("library", False, "pic_archive", "native"): _NATIVE_LINKABLE_STATIC_PIC, # Native linkable static non-pic - ("library", False, "static", "any", "c++"): _NATIVE_LINKABLE_STATIC_NON_PIC, - ("library", False, "static", "static", "c++"): _NATIVE_LINKABLE_STATIC_NON_PIC, + ("library", False, "archive", "native"): _NATIVE_LINKABLE_STATIC_NON_PIC, + # Native Unbundled static_pic library + ("library", False, "pic_archive", "native-unbundled"): _RUST_STATIC_PIC_LIBRARY, + # Native Unbundled static (non-pic) library + ("library", False, "archive", "native-unbundled"): _RUST_STATIC_NON_PIC_LIBRARY, } # Check types of _INPUTS, writing these out as types is too verbose, but let's make sure we don't have any typos. [ - (RuleType(rule_type), LinkStyle(link_style), Linkage(preferred_linkage), LinkageLang(linkage_lang)) - for (rule_type, _, link_style, preferred_linkage, linkage_lang), _ in _INPUTS.items() + (RuleType(rule_type), LibOutputStyle(lib_output_style) if lib_output_style else None, LinkageLang(linkage_lang)) + for (rule_type, _, lib_output_style, linkage_lang), _ in _INPUTS.items() ] -def _get_flags(build_kind_key: int, target_os_type: OsLookup) -> (RustcFlags, RelocModel): - flags = _BUILD_PARAMS[build_kind_key] - - # On Windows we should always use pic reloc model. +def _get_reloc_model(link_strategy: LinkStrategy, target_os_type: OsLookup) -> RelocModel: if target_os_type.platform == "windows": - return flags, RelocModel("pic") - return flags, flags.reloc_model + return RelocModel("pic") + if link_strategy == LinkStrategy("static"): + return RelocModel("static") + return RelocModel("pic") -# Compute crate type, relocation model and name mapping given what rule we're building, -# whether its a proc-macro, linkage information and language. +# Compute crate type, relocation model and name mapping given what rule we're building, whether its +# a proc-macro, linkage information and language. +# +# Binaries should pass the link strategy and not the lib output style, while libraries should do the +# opposite. +# +# The linking information that's passed here is different from what one might expect in the C++ +# rules. There's a good reason for that, so let's go over it. First, let's recap how C++ handles +# this, as of December 2023 (I say "recap" but I don't think this is actually documented anywhere): +# +# 1. C++ libraries can be built in three different ways: Archives, pic archives, and shared +# libraries. Which one of these is used for a given link strategy is determined by the preferred +# linkage using `linking/link_info.bzl:get_lib_output_style`. +# 2. When a C++ library is built as a shared library, the link strategy used for its dependencies +# is determined by the link style attribute on the C++ library. +# 3. When a C++ library is built as an archive (either kind), there's no need to know a link +# strategy for the dependencies. None of the per-link-strategy providers of the dependencies +# need to be accessed. +# +# There are two relevant ways in which Rust differs: +# +# 1. There are more ways of building Rust libraries than are represented by `LibOutputStyle`. The +# Rust analogue is the `BuildParams` type, which implicitly holds a `LibOutputStyle` as well as +# a bunch of additional information - this is why `LibOutputStyle` is relatively rarely used +# directly in the Rust rules. +# 2. Rust does not have the property in point three above, ie building a Rust library into an +# archive does require knowing per-link-strategy properties of the dependencies. This is +# fundamental in cases without native unbundled deps - with native unbundled deps it may be +# fixable, but that's not super clear. def build_params( rule: RuleType, proc_macro: bool, - link_style: LinkStyle, - preferred_linkage: Linkage, + link_strategy: LinkStrategy | None, + lib_output_style: LibOutputStyle | None, lang: LinkageLang, - linker_type: str, + linker_type: LinkerType, target_os_type: OsLookup) -> BuildParams: - if rule == RuleType("binary") and proc_macro: - # It's complicated: this is a rustdoc test for a procedural macro crate. - # We need deps built as if this were a binary, while passing crate-type - # proc_macro to the rustdoc invocation. - crate_type = CrateType("proc-macro") - proc_macro = False + if rule == RuleType("binary"): + expect(link_strategy != None) + expect(lib_output_style == None) else: - crate_type = None + expect(lib_output_style != None) - input = (rule.value, proc_macro, link_style.value, preferred_linkage.value, lang.value) + input = (rule.value, proc_macro, lib_output_style.value if lib_output_style else None, lang.value) expect( input in _INPUTS, - "missing case for rule_type={} proc_macro={} link_style={} preferred_linkage={} lang={}", + "missing case for rule_type={} proc_macro={} lib_output_style={} lang={}", rule, proc_macro, - link_style, - preferred_linkage, + lib_output_style, lang, ) - build_kind_key = _INPUTS[input] - flags, reloc_model = _get_flags(build_kind_key, target_os_type) + flags = _BUILD_PARAMS[_INPUTS[input]] + + # FIXME(JakobDegen): We deal with Rust needing to know the link strategy + # even for building archives by using a default link strategy specifically + # for those cases. I've gone through the code and checked all the places + # where the link strategy is used to determine that this won't do anything + # too bad, but it would be nice to enforce that more strictly or not have + # this at all. + link_strategy = link_strategy or flags.link_strategy + reloc_model = _get_reloc_model(link_strategy, target_os_type) prefix, suffix = flags.platform_to_affix(linker_type, target_os_type) return BuildParams( - crate_type = crate_type or flags.crate_type, + crate_type = flags.crate_type, reloc_model = reloc_model, - dep_link_style = flags.dep_link_style, + dep_link_strategy = link_strategy, prefix = prefix, suffix = suffix, ) diff --git a/prelude/rust/cargo_buildscript.bzl b/prelude/rust/cargo_buildscript.bzl index c3d2970d356a2..52722021fe48e 100644 --- a/prelude/rust/cargo_buildscript.bzl +++ b/prelude/rust/cargo_buildscript.bzl @@ -20,9 +20,76 @@ load("@prelude//:prelude.bzl", "native") load("@prelude//decls:common.bzl", "buck") +load("@prelude//os_lookup:defs.bzl", "OsLookup") load("@prelude//rust:rust_toolchain.bzl", "RustToolchainInfo") load("@prelude//rust:targets.bzl", "targets") load("@prelude//decls/toolchains_common.bzl", "toolchains_common") +load(":build.bzl", "dependency_args") +load(":build_params.bzl", "MetadataKind") +load(":context.bzl", "DepCollectionContext") +load( + ":link_info.bzl", + "DEFAULT_STATIC_LINK_STRATEGY", + "RustProcMacroPlugin", + "gather_explicit_sysroot_deps", + "resolve_rust_deps_inner", +) +load(":rust_toolchain.bzl", "PanicRuntime") + +def _make_rustc_shim(ctx: AnalysisContext, cwd: Artifact) -> cmd_args: + # Build scripts expect to receive a `rustc` which "just works." However, + # our rustc sometimes has no sysroot available, so we need to make a shim + # which supplies the sysroot deps if necessary + toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] + explicit_sysroot_deps = toolchain_info.explicit_sysroot_deps + if explicit_sysroot_deps: + dep_ctx = DepCollectionContext( + advanced_unstable_linking = False, + include_doc_deps = False, + is_proc_macro = False, + explicit_sysroot_deps = explicit_sysroot_deps, + panic_runtime = PanicRuntime("unwind"), # not actually used + ) + deps = gather_explicit_sysroot_deps(dep_ctx) + deps = resolve_rust_deps_inner(ctx, deps) + dep_args, _ = dependency_args( + ctx = ctx, + compile_ctx = None, + toolchain_info = toolchain_info, + deps = deps, + subdir = "any", + dep_link_strategy = DEFAULT_STATIC_LINK_STRATEGY, + dep_metadata_kind = MetadataKind("full"), + is_rustdoc_test = False, + ) + + null_path = "nul" if ctx.attrs._exec_os_type[OsLookup].platform == "windows" else "/dev/null" + dep_args = cmd_args("--sysroot=" + null_path, dep_args, relative_to = cwd) + dep_file, _ = ctx.actions.write("rustc_dep_file", dep_args, allow_args = True) + sysroot_args = cmd_args("@", dep_file, delimiter = "", hidden = dep_args) + else: + sysroot_args = cmd_args() + + if ctx.attrs._exec_os_type[OsLookup].platform == "windows": + shim, _ = ctx.actions.write( + "__rustc_shim.bat", + [ + "@echo off", + cmd_args(toolchain_info.compiler, sysroot_args, "%*", delimiter = " ", relative_to = cwd), + ], + allow_args = True, + ) + else: + shim, _ = ctx.actions.write( + "__rustc_shim.sh", + [ + "#!/usr/bin/env bash", + cmd_args(toolchain_info.compiler, sysroot_args, "\"$@\"\n", delimiter = " ", relative_to = cwd), + ], + is_executable = True, + allow_args = True, + ) + return cmd_args(shim, relative_to = cwd, hidden = [toolchain_info.compiler, sysroot_args]) def _cargo_buildscript_impl(ctx: AnalysisContext) -> list[Provider]: toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] @@ -47,11 +114,15 @@ def _cargo_buildscript_impl(ctx: AnalysisContext) -> list[Provider]: env["CARGO_PKG_NAME"] = ctx.attrs.package_name env["CARGO_PKG_VERSION"] = ctx.attrs.version env["OUT_DIR"] = out_dir.as_output() - env["RUSTC"] = cmd_args(toolchain_info.compiler_standalone).relative_to(cwd) + env["RUSTC"] = _make_rustc_shim(ctx, cwd) env["RUSTC_LINKER"] = "/bin/false" env["RUST_BACKTRACE"] = "1" env["TARGET"] = toolchain_info.rustc_target_triple + # \037 == \x1f == the magic delimiter specified in the environment variable + # reference above. + env["CARGO_ENCODED_RUSTFLAGS"] = cmd_args(toolchain_info.rustc_flags, delimiter = "\037") + host_triple = targets.exec_triple(ctx) if host_triple: env["HOST"] = host_triple @@ -63,7 +134,7 @@ def _cargo_buildscript_impl(ctx: AnalysisContext) -> list[Provider]: # Environment variables specified in the target's attributes get priority # over all the above. for k, v in ctx.attrs.env.items(): - env[k] = cmd_args(v).relative_to(cwd) + env[k] = cmd_args(v, relative_to = cwd) ctx.actions.run( cmd, @@ -90,11 +161,13 @@ _cargo_buildscript_rule = rule( "runner": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//rust/tools:buildscript_run")), # *IMPORTANT* rustc_cfg must be a `dep` and not an `exec_dep` because # we want the `rustc --cfg` for the target platform, not the exec platform. - "rustc_cfg": attrs.default_only(attrs.dep(default = "prelude//rust/tools:rustc_cfg")), + "rustc_cfg": attrs.dep(default = "prelude//rust/tools:rustc_cfg"), "version": attrs.string(), "_exec_os_type": buck.exec_os_type_arg(), "_rust_toolchain": toolchains_common.rust(), }, + # Always empty, but needed to prevent errors + uses_plugins = [RustProcMacroPlugin], ) def buildscript_run( diff --git a/prelude/rust/cargo_package.bzl b/prelude/rust/cargo_package.bzl index d9b50510712ed..b80b51f90cf5e 100644 --- a/prelude/rust/cargo_package.bzl +++ b/prelude/rust/cargo_package.bzl @@ -5,6 +5,9 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# This file exports utilities for use with with reindeer. +# These are not used anywhere else in prelude and are not exported as prelude globals. + load("@prelude//:prelude.bzl", "native") load("@prelude//utils:selects.bzl", "selects") diff --git a/prelude/rust/clippy_configuration.bzl b/prelude/rust/clippy_configuration.bzl new file mode 100644 index 0000000000000..74f0c3f6d7974 --- /dev/null +++ b/prelude/rust/clippy_configuration.bzl @@ -0,0 +1,58 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//rust:rust_toolchain.bzl", "RustToolchainInfo") +load("@prelude//decls/toolchains_common.bzl", "toolchains_common") + +# Configurations for Clippy runs. +ClippyConfiguration = provider( + fields = { + "clippy_toml": provider_field(Artifact), + }, +) + +def _clippy_configuration_impl(ctx: AnalysisContext) -> list[Provider]: + toolchain_ctx = ctx.attrs._rust_toolchain[RustToolchainInfo] + toolchain_clippy_toml = toolchain_ctx.clippy_toml + + if not toolchain_clippy_toml: + clippy_toml = ctx.attrs.clippy_toml_src + else: + toml_merge_tool = ctx.attrs.toml_merge_tool + + clippy_toml = ctx.actions.declare_output("clippy.toml") + ctx.actions.run([ + toml_merge_tool[RunInfo], + cmd_args(clippy_toml.as_output(), format = "--output={}"), + cmd_args(toolchain_clippy_toml, format = "--file={}"), + cmd_args(ctx.attrs.clippy_toml_src, format = "--file={}"), + ], category = "clippy_toml_merge") + + return [ + DefaultInfo( + default_output = clippy_toml, + ), + ClippyConfiguration( + clippy_toml = clippy_toml, + ), + ] + +# Generate a Clippy configuration that is merged with the toolchain specified +# Clippy configuration (if defined). +clippy_configuration = rule(impl = _clippy_configuration_impl, attrs = { + "clippy_toml_src": attrs.source(), + # TODO(emersonford): figure out how to store this in `_rust_toolchain` + # without causing a circular dependency on the toolchain target when + # `toml_merge_tool` is a `rust_binary`. + # + # Tool used to recursively merge multiple TOML files, e.g. for merging + # clippy.toml files. Must support taking multiple `--file ` flags + # as source files to merge and `--output ` flag to write the + # merged TOML table to. + "toml_merge_tool": attrs.exec_dep(providers = [RunInfo]), + "_rust_toolchain": toolchains_common.rust(), +}) diff --git a/prelude/rust/context.bzl b/prelude/rust/context.bzl index 6b7b9d406e63c..8c098bb7df4eb 100644 --- a/prelude/rust/context.bzl +++ b/prelude/rust/context.bzl @@ -6,10 +6,14 @@ # of this source tree. load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") -load("@prelude//linking:link_info.bzl", "LinkStyle") -load(":build_params.bzl", "CrateType", "Emit") -load(":link_info.bzl", "CrateName") -load(":rust_toolchain.bzl", "RustToolchainInfo") +load("@prelude//linking:link_info.bzl", "LinkStrategy") +load(":build_params.bzl", "BuildParams", "CrateType", "Emit") +load(":rust_toolchain.bzl", "PanicRuntime", "RustExplicitSysrootDeps", "RustToolchainInfo") + +CrateName = record( + simple = field(str | ResolvedStringWithMacros), + dynamic = field(Artifact | None), +) # Struct for sharing common args between rustc and rustdoc # (rustdoc just relays bunch of the same args to rustc when trying to gen docs) @@ -17,18 +21,25 @@ CommonArgsInfo = record( args = field(cmd_args), subdir = field(str), tempfile = field(str), - short_cmd = field(str), - is_check = field(bool), + crate_type = field(CrateType), + params = field(BuildParams), + emit = field(Emit), + emit_requires_linking = field(bool), crate_map = field(list[(CrateName, Label)]), ) -ExternArg = record( - flags = str, - lib = field(Artifact), -) - -CrateMapArg = record( - label = field(Label), +# Information that determines how dependencies should be collected +DepCollectionContext = record( + advanced_unstable_linking = field(bool), + include_doc_deps = field(bool), + # Is the target a proc-macro target? This is ignored if `include_doc_deps` + # is set, since doc tests in proc macro crates are not built with + # `--extern proc_macro` + is_proc_macro = field(bool), + # From the toolchain, if available + explicit_sysroot_deps = field(RustExplicitSysrootDeps | None), + # Only needed if `advanced_unstable_linking` is set + panic_runtime = field(PanicRuntime), ) # Compile info which is reusable between multiple compilation command performed @@ -36,6 +47,7 @@ CrateMapArg = record( CompileContext = record( toolchain_info = field(RustToolchainInfo), cxx_toolchain_info = field(CxxToolchainInfo), + dep_ctx = field(DepCollectionContext), # Symlink root containing all sources. symlinked_srcs = field(Artifact), # Linker args to pass the linker wrapper to rustc. @@ -43,8 +55,7 @@ CompileContext = record( # Clippy wrapper (wrapping clippy-driver so it has the same CLI as rustc). clippy_wrapper = field(cmd_args), # Memoized common args for reuse. - common_args = field(dict[(CrateType, Emit, LinkStyle), CommonArgsInfo]), - flagfiles_for_extern = field(dict[ExternArg, Artifact]), - flagfiles_for_crate_map = field(dict[CrateMapArg, Artifact]), + common_args = field(dict[(CrateType, Emit, LinkStrategy, bool, bool, bool), CommonArgsInfo]), transitive_dependency_dirs = field(dict[Artifact, None]), + sysroot_args = field(cmd_args), ) diff --git a/prelude/rust/extern.bzl b/prelude/rust/extern.bzl index 8d89dd16bf948..2c19ad4bb134f 100644 --- a/prelude/rust/extern.bzl +++ b/prelude/rust/extern.bzl @@ -5,8 +5,15 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load(":context.bzl", "CompileContext", "CrateMapArg", "ExternArg") -load(":link_info.bzl", "CrateName") +load(":context.bzl", "CrateName") + +def crate_name_as_cmd_arg(crate: CrateName) -> cmd_args | str | ResolvedStringWithMacros: + if crate.dynamic: + # TODO: consider using `cmd_args(crate.dynamic, quote = "json")` so it + # doesn't fall apart on paths containing ')' + return cmd_args(crate.dynamic, format = "$(cat {})") + else: + return crate.simple # Create `--extern` flag. For crates with a name computed during analysis: # @@ -14,47 +21,22 @@ load(":link_info.bzl", "CrateName") # # For crates with a name computed during build: # -# --extern @extern/libPROVISIONAL +# --extern=$(cat path/to/REALNAME)=path/to/libPROVISIONAL.rlib # -# where extern/libPROVISIONAL holds a flag containing the real crate name: -# -# REALNAME=path/to/libPROVISIONAL.rlib -# -def extern_arg( - ctx: AnalysisContext, - compile_ctx: CompileContext, - flags: list[str], - crate: CrateName, - lib: Artifact) -> cmd_args: +def extern_arg(flags: list[str], crate: CrateName, lib: Artifact) -> cmd_args: if flags == []: flags = "" else: flags = ",".join(flags) + ":" - if crate.dynamic: - args = ExternArg(flags = flags, lib = lib) - flagfile = compile_ctx.flagfiles_for_extern.get(args, None) - if not flagfile: - flagfile = ctx.actions.declare_output("extern/{}".format(lib.short_path)) - concat_cmd = [ - compile_ctx.toolchain_info.concat_tool, - "--output", - flagfile.as_output(), - "--", - flags, - cmd_args("@", crate.dynamic, delimiter = ""), - "=", - cmd_args(lib).ignore_artifacts(), - ] - ctx.actions.run( - concat_cmd, - category = "concat", - identifier = str(len(compile_ctx.flagfiles_for_extern)), - ) - compile_ctx.flagfiles_for_extern[args] = flagfile - return cmd_args("--extern", cmd_args("@", flagfile, delimiter = "")).hidden(lib) - else: - return cmd_args("--extern=", flags, crate.simple, "=", lib, delimiter = "") + return cmd_args( + "--extern=", + flags, + crate_name_as_cmd_arg(crate), + "=", + lib, + delimiter = "", + ) # Create `--crate-map` flag. For crates with a name computed during analysis: # @@ -62,37 +44,13 @@ def extern_arg( # # For crates with a name computed during build: # -# --crate-map @cratemap/path/to/target -# -# where cratemap/path/to/target holds a flag containing the real crate name: -# -# REALNAME=//path/to:target +# --crate-map=$(cat path/to/REALNAME)=//path/to:target # -def crate_map_arg( - ctx: AnalysisContext, - compile_ctx: CompileContext, - crate: CrateName, - label: Label) -> cmd_args: - if crate.dynamic: - args = CrateMapArg(label = label) - flagfile = compile_ctx.flagfiles_for_crate_map.get(args, None) - if not flagfile: - flagfile = ctx.actions.declare_output("cratemap/{}/{}/{}".format(label.cell, label.package, label.name)) - concat_cmd = [ - compile_ctx.toolchain_info.concat_tool, - "--output", - flagfile.as_output(), - "--", - cmd_args("@", crate.dynamic, delimiter = ""), - "=", - str(label.raw_target()), - ] - ctx.actions.run( - concat_cmd, - category = "cratemap", - identifier = str(len(compile_ctx.flagfiles_for_crate_map)), - ) - compile_ctx.flagfiles_for_crate_map[args] = flagfile - return cmd_args("--crate-map", cmd_args("@", flagfile, delimiter = "")) - else: - return cmd_args("--crate-map=", crate.simple, "=", str(label.raw_target()), delimiter = "") +def crate_map_arg(crate: CrateName, label: Label) -> cmd_args: + return cmd_args( + "--crate-map=", + crate_name_as_cmd_arg(crate), + "=", + str(label.raw_target()), + delimiter = "", + ) diff --git a/prelude/rust/failure_filter.bzl b/prelude/rust/failure_filter.bzl index 7a8fa9ff313c0..67533c1da784b 100644 --- a/prelude/rust/failure_filter.bzl +++ b/prelude/rust/failure_filter.bzl @@ -7,16 +7,6 @@ load(":context.bzl", "CompileContext") -# Inputs to the fail filter -RustFailureFilter = provider(fields = { - # Build status json - "buildstatus": typing.Any, - # Required files - "required": typing.Any, - # stderr - "stderr": typing.Any, -}) - # This creates an action which takes a buildstatus json artifact as an input, and a list of other # artifacts. If all those artifacts are present in the buildstatus as successfully generated, then # the action will succeed with those artifacts as outputs. Otherwise it fails. @@ -24,19 +14,16 @@ RustFailureFilter = provider(fields = { def failure_filter( ctx: AnalysisContext, compile_ctx: CompileContext, - prefix: str, - predecl_out: [Artifact, None], - failprov: RustFailureFilter, - short_cmd: str) -> Artifact: + predeclared_output: Artifact | None, + build_status: Artifact, + required: Artifact, + stderr: Artifact, + identifier: str) -> Artifact: toolchain_info = compile_ctx.toolchain_info failure_filter_action = toolchain_info.failure_filter_action - buildstatus = failprov.buildstatus - required = failprov.required - stderr = failprov.stderr - - if predecl_out: - output = predecl_out + if predeclared_output: + output = predeclared_output else: output = ctx.actions.declare_output("out/" + required.short_path) @@ -49,9 +36,9 @@ def failure_filter( required, output.as_output(), "--build-status", - buildstatus, + build_status, ) - ctx.actions.run(cmd, category = "failure_filter", identifier = "{} {}".format(prefix, short_cmd)) + ctx.actions.run(cmd, category = "failure_filter", identifier = identifier) return output diff --git a/prelude/rust/link_info.bzl b/prelude/rust/link_info.bzl index 721bd096ac73c..e955e2b3a1600 100644 --- a/prelude/rust/link_info.bzl +++ b/prelude/rust/link_info.bzl @@ -16,6 +16,7 @@ load( "@prelude//cxx:cxx.bzl", "get_auto_link_group_specs", ) +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") load( "@prelude//cxx:cxx_library_utility.bzl", "cxx_is_gnu", @@ -23,7 +24,6 @@ load( load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") load( "@prelude//cxx:link_groups.bzl", - "LinkGroupInfo", # @unused Used as a type "LinkGroupLinkInfo", # @unused Used as a type "create_link_groups", "get_filtered_labels_to_links_map", @@ -32,19 +32,29 @@ load( "get_link_group", "get_link_group_info", "get_link_group_preferred_linkage", + "get_public_link_group_nodes", +) +load( + "@prelude//cxx:link_groups_types.bzl", + "LinkGroupInfo", # @unused Used as a type +) +load( + "@prelude//cxx:linker.bzl", + "get_default_shared_library_name", + "get_shared_library_name_for_param", ) load( "@prelude//linking:link_groups.bzl", "LinkGroupLib", # @unused Used as a type + "LinkGroupLibInfo", # @unused Used as a type ) load( "@prelude//linking:link_info.bzl", + "LibOutputStyle", "LinkInfo", - "LinkStyle", - "Linkage", # @unused Used as a type + "LinkStrategy", "MergedLinkInfo", "get_link_args_for_strategy", - "to_link_strategy", "unpack_external_debug_info", ) load( @@ -53,28 +63,40 @@ load( "create_linkable_graph", "get_linkable_graph_node_map_func", ) -load( - "@prelude//linking:linkables.bzl", - "linkables", -) load( "@prelude//linking:shared_libraries.bzl", "SharedLibraryInfo", ) load( - "@prelude//utils:utils.bzl", - "filter_and_map_idx", + "@prelude//linking:types.bzl", + "Linkage", # @unused Used as a type ) +load( + "@prelude//utils:type_defs.bzl", + "is_dict", + "is_string", +) +load( + ":build_params.bzl", + "MetadataKind", # @unused Used as a type +) +load( + ":context.bzl", + "CrateName", # @unused Used as a type + "DepCollectionContext", # @unused Used as a type +) +load(":rust_toolchain.bzl", "PanicRuntime", "RustToolchainInfo") -# Link style for targets which do not set an explicit `link_style` attribute. -DEFAULT_STATIC_LINK_STYLE = LinkStyle("static_pic") - -# Override dylib crates to static_pic, so that Rust code is always -# statically linked. -# In v1 we always linked Rust deps statically, even for "shared" link style -# That shouldn't be necessary, but fully shared needs some more debugging, -# so default to v1 behaviour. (Should be controlled with the `rust.force_rlib` option) -FORCE_RLIB = True +# Link strategy for targets which do not set an explicit `link_style` attribute. +# +# These values are also used as the defaults for check/clippy subtargets on +# libraries, and are the only way in which metadata-fast output can be built. +# +# Internally at Meta, these are a good choice for a default because they allow +# sharing work between check builds and dev mode builds, which have shared link +# strategy, and so consume their dependencies as `static_pic`. +DEFAULT_STATIC_LINK_STRATEGY = LinkStrategy("static_pic") +DEFAULT_STATIC_LIB_OUTPUT_STYLE = LibOutputStyle("pic_archive") RustProcMacroPlugin = plugins.kind() @@ -86,29 +108,18 @@ RustProcMacroMarker = provider(fields = { "label": typing.Any, }) -CrateName = record( - simple = field(str), - dynamic = field([Artifact, None]), -) - # Information which is keyed on link_style -RustLinkStyleInfo = record( - # Path to library or binary - rlib = field(Artifact), +RustLinkStrategyInfo = record( + # Path to the rlib, rmeta, dylib, etc. + outputs = field(dict[MetadataKind, Artifact]), # Transitive dependencies which are relevant to the consumer. For crate types which do not # propagate their deps (specifically proc macros), this set is empty # This does not include the proc macros, which are passed separately in `RustLinkInfo` - transitive_deps = field(dict[Artifact, CrateName]), - - # Path for library metadata (used for check or pipelining) - rmeta = field(Artifact), - # Transitive rmeta deps. This is the same dict as `transitive_deps`, except that it has the - # rmeta and not the rlib artifact - transitive_rmeta_deps = field(dict[Artifact, CrateName]), + transitive_deps = field(dict[MetadataKind, dict[Artifact, CrateName]]), transitive_proc_macro_deps = field(dict[RustProcMacroMarker, ()]), # Path to PDB file with Windows debug data. - pdb = field([Artifact, None]), + pdb = field(Artifact | None), # Debug info which is referenced -- but not included -- by the linkable rlib. external_debug_info = field(ArtifactTSet), ) @@ -119,33 +130,82 @@ RustLinkInfo = provider( fields = { # crate - crate name "crate": CrateName, - # styles - information about each LinkStyle as RustLinkStyleInfo - "styles": dict[LinkStyle, RustLinkStyleInfo], - # Propagate non-rust native linkable dependencies through rust libraries. - "non_rust_exported_link_deps": typing.Any, - # Propagate non-rust native linkable info through rust libraries. - "non_rust_link_info": typing.Any, - # Propagate non-rust shared libraries through rust libraries. - "non_rust_shared_libs": typing.Any, + # strategies - information about each LinkStrategy as RustLinkStrategyInfo + "strategies": dict[LinkStrategy, RustLinkStrategyInfo], + # Rust interacts with the native link graph in a non-standard way. + # + # The first difference is in the re-export behavior of Rust compared to C++. The native link + # providers make an assumption that if one node in the link graph references a symbol in + # another node in the link graph, there is also a corresponding edge in the link graph. + # Specifically, the first node must declare a direct dependency on the second, a transitive + # dependency is not enough. For C++, this just means that each library depends in the link + # graph on its direct deps and their exported deps. + # + # For Rust, the situation is different. Because of re-exports and generics causing delayed + # codegen, the generated object files for a Rust library can generate symbol references to + # any of the library's transitive Rust dependencies, as well as to the immediate C++ + # dependencies of those libraries. So to account for that, each Rust library reports direct + # dependencies on all of those libraries in the link graph. The `merged_link_infos` and + # `linkable_graphs` lists are the providers from all of those libraries. + # + # The second difference is unique to the case where `advanced_unstable_linking` is not set + # on the toolchain. Imagine we have a Rust library `:B` with its only one dependency `:A`, + # another Rust library. The Rust rules give Rust -> Rust dependencies special treatment in + # the non-`advanced_unstable_linking` case. As a result, the `MergedLinkInfo` provided from + # `:B` is not a "superset" of the `MergedLinkInfo` provided from `:A` (concrete differences + # discussed below). + # + # This distinction is implemented by effectively having each Rust library provide two sets + # of link providers. The first is the link providers used across Rust -> Rust dependency + # edges - this is what the fields below are. The second set is the one that is used by C++ + # and other non-Rust dependents, and is returned from the rule like normal. The second set + # is a superset of the first, that is it includes anything that the first link providers + # added. + # + # The concrete difference is that the Rust `MergedLinkInfo` provided by `:A` is only the + # result of merging the `MergedLinkInfo`s from `:A`'s deps, and does not contain anything + # about `:A`. Instead, when `:B` produces the native `MergedLinkInfo`, it will add a single + # static library that bundles all transitive Rust deps, including `:A` (and similarly for + # the DSO case). + # + # With `advanced_unstable_linkin`, Rust libraries essentially behave just like C++ + # libraries in the link graph, with the handling of transitive dependencies being the only + # difference. + "merged_link_infos": dict[ConfiguredTargetLabel, MergedLinkInfo], + "linkable_graphs": list[LinkableGraph], + "shared_libs": SharedLibraryInfo, + # LinkGroupLibInfo intentionally omitted because the Rust -> Rust version + # never needs to be different from the Rust -> native version + # + # Rust currently treats all native dependencies as being exported, in + # the sense of C++ `exported_deps`. However, they are not only exported + # from the Rust library that directly depends on them, they are also + # exported through any further chains of Rust libraries. This list + # tracks those dependencies + # + # FIXME(JakobDegen): We should not default to treating all native deps + # as exported. + "exported_link_deps": list[Dependency], }, ) -def _adjust_link_style_for_rust_dependencies(dep_link_style: LinkStyle) -> LinkStyle: - if FORCE_RLIB and dep_link_style == LinkStyle("shared"): - return DEFAULT_STATIC_LINK_STYLE +def _adjust_link_strategy_for_rust_dependencies(toolchain_info: RustToolchainInfo, dep_link_strategy: LinkStrategy) -> LinkStrategy: + if dep_link_strategy == LinkStrategy("shared") and not toolchain_info.advanced_unstable_linking: + return DEFAULT_STATIC_LINK_STRATEGY else: - return dep_link_style + return dep_link_strategy -def style_info(info: RustLinkInfo, dep_link_style: LinkStyle) -> RustLinkStyleInfo: - rust_dep_link_style = _adjust_link_style_for_rust_dependencies(dep_link_style) - return info.styles[rust_dep_link_style] +def strategy_info(toolchain_info: RustToolchainInfo, info: RustLinkInfo, dep_link_strategy: LinkStrategy) -> RustLinkStrategyInfo: + rust_dep_link_strategy = _adjust_link_strategy_for_rust_dependencies(toolchain_info, dep_link_strategy) + + return info.strategies[rust_dep_link_strategy] # Any dependency of a Rust crate RustOrNativeDependency = record( # The actual dependency dep = field(Dependency), # The local name, if any (for `named_deps`) - name = field([None, str]), + name = field(None | str | ResolvedStringWithMacros), # Any flags for the dependency (`flagged_deps`), which are passed on to rustc. flags = field(list[str]), ) @@ -153,7 +213,8 @@ RustOrNativeDependency = record( RustDependency = record( info = field(RustLinkInfo), label = field(ConfiguredProvidersLabel), - name = field([None, str]), + dep = field(Dependency), + name = field(None | str | ResolvedStringWithMacros), flags = field(list[str]), proc_macro_marker = field([None, RustProcMacroMarker]), ) @@ -172,60 +233,106 @@ RustCxxLinkGroupInfo = record( link_group_libs = field(dict[str, [LinkGroupLib, None]]), # mapping from target labels to the corresponding link group link_info labels_to_links_map = field(dict[Label, LinkGroupLinkInfo]), - # prefrred linkage mode for link group libraries + # Target to link group name where it was actually linked into + targets_consumed_by_link_groups = field(dict[Label, str]), + # preferred linkage mode for link group libraries link_group_preferred_linkage = field(dict[Label, Linkage]), ) -def enable_link_groups( - ctx: AnalysisContext, - link_style: [LinkStyle, None], - specified_link_style: LinkStyle, - is_binary: bool): - if not (cxx_is_gnu(ctx) and is_binary): - # check minium requirements +def enable_link_groups(ctx: AnalysisContext): + if not cxx_is_gnu(ctx): + # check minimum requirements return False - if link_style == LinkStyle("shared") or link_style != specified_link_style: - # check whether we should run link groups analysis for the given link style - return False - - # check whether link groups is enabled return ctx.attrs.auto_link_groups and ctx.attrs.link_group_map # Returns all first-order dependencies. def _do_resolve_deps( deps: list[Dependency], - named_deps: dict[str, Dependency], + named_deps: dict[str, Dependency] | list[(ResolvedStringWithMacros, Dependency)], flagged_deps: list[(Dependency, list[str])] = []) -> list[RustOrNativeDependency]: + named_deps_items = named_deps.items() if is_dict(named_deps) else named_deps + return [ RustOrNativeDependency(name = name, dep = dep, flags = flags) for name, dep, flags in [(None, dep, []) for dep in deps] + - [(name, dep, []) for name, dep in named_deps.items()] + + [(name, dep, []) for name, dep in named_deps_items] + [(None, dep, flags) for dep, flags in flagged_deps] ] +def gather_explicit_sysroot_deps(dep_ctx: DepCollectionContext) -> list[RustOrNativeDependency]: + explicit_sysroot_deps = dep_ctx.explicit_sysroot_deps + if not explicit_sysroot_deps: + return [] + + out = [] + if explicit_sysroot_deps.core: + out.append(RustOrNativeDependency( + dep = explicit_sysroot_deps.core, + name = None, + flags = ["nounused"], + )) + if explicit_sysroot_deps.std: + out.append(RustOrNativeDependency( + dep = explicit_sysroot_deps.std, + name = None, + flags = ["nounused", "force"], + )) + if explicit_sysroot_deps.proc_macro: + flags = ["noprelude"] if not dep_ctx.is_proc_macro or dep_ctx.include_doc_deps else [] + out.append(RustOrNativeDependency( + dep = explicit_sysroot_deps.proc_macro, + name = None, + flags = ["nounused"] + flags, + )) + + # When advanced_unstable_linking is on, we only add the dep that matches the + # panic runtime. Without advanced_unstable_linking, we just let rustc deal + # with it + if explicit_sysroot_deps.panic_unwind: + if not dep_ctx.advanced_unstable_linking or dep_ctx.panic_runtime == PanicRuntime("unwind"): + out.append(RustOrNativeDependency( + dep = explicit_sysroot_deps.panic_unwind, + name = None, + flags = ["nounused"], + )) + if explicit_sysroot_deps.panic_abort: + if not dep_ctx.advanced_unstable_linking or dep_ctx.panic_runtime == PanicRuntime("abort"): + out.append(RustOrNativeDependency( + dep = explicit_sysroot_deps.panic_abort, + name = None, + flags = ["nounused"], + )) + for d in explicit_sysroot_deps.others: + # FIXME(JakobDegen): Ideally we would not be using `noprelude` here but + # instead report these as regular transitive dependencies. However, + # that's a bit harder to get right, so leave it like this for now. + out.append(RustOrNativeDependency( + dep = d, + name = None, + flags = ["noprelude", "nounused"], + )) + return out + def resolve_deps( ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[RustOrNativeDependency]: - # The `getattr`s are needed for when we're operating on - # `prebuilt_rust_library` rules, which don't have those attrs. + dep_ctx: DepCollectionContext) -> list[RustOrNativeDependency]: dependencies = _do_resolve_deps( deps = ctx.attrs.deps, - named_deps = getattr(ctx.attrs, "named_deps", {}), - flagged_deps = getattr(ctx.attrs, "flagged_deps", []), + named_deps = ctx.attrs.named_deps, + flagged_deps = ctx.attrs.flagged_deps, ) - if include_doc_deps: + if dep_ctx.include_doc_deps: dependencies.extend(_do_resolve_deps( - deps = ctx.attrs.doc_deps, + deps = getattr(ctx.attrs, "doc_deps", []), named_deps = getattr(ctx.attrs, "doc_named_deps", {}), )) - return dependencies + return dependencies + gather_explicit_sysroot_deps(dep_ctx) -def resolve_rust_deps( +def resolve_rust_deps_inner( ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[RustDependency]: - all_deps = resolve_deps(ctx, include_doc_deps) + all_deps: list[RustOrNativeDependency]) -> list[RustDependency]: rust_deps = [] available_proc_macros = get_available_proc_macros(ctx) for dep in all_deps: @@ -244,30 +351,26 @@ def resolve_rust_deps( rust_deps.append(RustDependency( info = info, label = label, + dep = dep.dep, name = dep.name, flags = dep.flags, proc_macro_marker = proc_macro_marker, )) return rust_deps +def resolve_rust_deps( + ctx: AnalysisContext, + dep_ctx: DepCollectionContext) -> list[RustDependency]: + all_deps = resolve_deps(ctx, dep_ctx) + return resolve_rust_deps_inner(ctx, all_deps) + def get_available_proc_macros(ctx: AnalysisContext) -> dict[TargetLabel, Dependency]: return {x.label.raw_target(): x for x in ctx.plugins[RustProcMacroPlugin]} -def _non_rust_linkable_graph( - ctx: AnalysisContext, - deps: list[Dependency]) -> LinkableGraph: - linkable_graph = create_linkable_graph( - ctx, - deps = filter(None, ( - [d.linkable_graph for d in linkables(deps)] - )), - ) - return linkable_graph - # Returns native link dependencies. -def _non_rust_link_deps( +def _native_link_dependencies( ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[Dependency]: + dep_ctx: DepCollectionContext) -> list[Dependency]: """ Return all first-order native linkable dependencies of all transitive Rust libraries. @@ -275,104 +378,85 @@ def _non_rust_link_deps( This emulates v1's graph walk, where it traverses through Rust libraries looking for non-Rust native link infos (and terminating the search there). """ - first_order_deps = [dep.dep for dep in resolve_deps(ctx, include_doc_deps)] + first_order_deps = [dep.dep for dep in resolve_deps(ctx, dep_ctx)] + return [ d for d in first_order_deps if RustLinkInfo not in d and MergedLinkInfo in d ] -# Returns native link dependencies. -def _non_rust_link_infos( - ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[MergedLinkInfo]: - """ - Return all first-order native link infos of all transitive Rust libraries. - - This emulates v1's graph walk, where it traverses through Rust libraries - looking for non-Rust native link infos (and terminating the search there). - MergedLinkInfo is a mapping from link style to all the transitive deps - rolled up in a tset. - """ - link_deps = _non_rust_link_deps(ctx, include_doc_deps) - return [d[MergedLinkInfo] for d in link_deps] - -# Returns native link dependencies. -def _non_rust_shared_lib_infos( - ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[SharedLibraryInfo]: - """ - Return all transitive shared libraries for non-Rust native linkabes. - - This emulates v1's graph walk, where it traverses through -- and ignores -- - Rust libraries to collect all transitive shared libraries. - """ - first_order_deps = [dep.dep for dep in resolve_deps(ctx, include_doc_deps)] - return [ - d[SharedLibraryInfo] - for d in first_order_deps - if RustLinkInfo not in d and SharedLibraryInfo in d - ] - -# Returns native link dependencies. -def _rust_link_infos( +# Returns the rust link infos for non-proc macro deps. +# +# This is intended to be used to access the Rust -> Rust link providers +def _rust_non_proc_macro_link_infos( ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[RustLinkInfo]: - return [d.info for d in resolve_rust_deps(ctx, include_doc_deps)] + dep_ctx: DepCollectionContext) -> list[RustLinkInfo]: + return [d.info for d in resolve_rust_deps(ctx, dep_ctx) if d.proc_macro_marker == None] -def normalize_crate(label: str) -> str: - return label.replace("-", "_") - -def inherited_non_rust_exported_link_deps(ctx: AnalysisContext) -> list[Dependency]: +def inherited_exported_link_deps(ctx: AnalysisContext, dep_ctx: DepCollectionContext) -> list[Dependency]: deps = {} - for dep in _non_rust_link_deps(ctx): + for dep in _native_link_dependencies(ctx, dep_ctx): deps[dep.label] = dep - for info in _rust_link_infos(ctx): - for dep in info.non_rust_exported_link_deps: + for dep in resolve_rust_deps(ctx, dep_ctx): + if dep.proc_macro_marker != None: + continue + + for dep in dep.info.exported_link_deps: deps[dep.label] = dep + return deps.values() -def inherited_non_rust_link_group_info( +def inherited_rust_cxx_link_group_info( ctx: AnalysisContext, - link_style: [LinkStyle, None] = None) -> RustCxxLinkGroupInfo: - link_deps = inherited_non_rust_exported_link_deps(ctx) + dep_ctx: DepCollectionContext, + link_strategy: [LinkStrategy, None] = None) -> RustCxxLinkGroupInfo: + link_graphs = inherited_linkable_graphs(ctx, dep_ctx) # Assume a rust executable wants to use link groups if a link group map # is present link_group = get_link_group(ctx) - link_group_info = get_link_group_info(ctx, filter_and_map_idx(LinkableGraph, link_deps)) + link_group_info = get_link_group_info(ctx, link_graphs) link_groups = link_group_info.groups link_group_mappings = link_group_info.mappings link_group_preferred_linkage = get_link_group_preferred_linkage(link_groups.values()) auto_link_group_specs = get_auto_link_group_specs(ctx, link_group_info) - linkable_graph = _non_rust_linkable_graph( + linkable_graph = create_linkable_graph( ctx, - link_deps, + deps = link_graphs, ) linkable_graph_node_map = get_linkable_graph_node_map_func(linkable_graph)() executable_deps = [] - for d in link_deps: - if d.label in linkable_graph_node_map: - executable_deps.append(d.label) + for g in link_graphs: + if g.label in linkable_graph_node_map: + executable_deps.append(g.label) else: # handle labels that are mutated by version alias - executable_deps.append(d.get(LinkableGraph).nodes.value.label) + executable_deps.append(g.nodes.value.label) + + public_link_group_nodes = get_public_link_group_nodes( + linkable_graph_node_map, + link_group_mappings, + executable_deps, + link_group, + ) linked_link_groups = create_link_groups( ctx = ctx, link_groups = link_groups, + link_strategy = link_strategy, link_group_mappings = link_group_mappings, link_group_preferred_linkage = link_group_preferred_linkage, executable_deps = executable_deps, linker_flags = [], link_group_specs = auto_link_group_specs, - root_link_group = link_group, linkable_graph_node_map = linkable_graph_node_map, other_roots = [], prefer_stripped_objects = False, # Does Rust ever use stripped objects? anonymous = ctx.attrs.anonymous_link_groups, + public_nodes = public_link_group_nodes, ) auto_link_groups = {} @@ -383,18 +467,19 @@ def inherited_non_rust_link_group_info( if linked_link_group.library != None: link_group_libs[name] = linked_link_group.library - labels_to_links_map = get_filtered_labels_to_links_map( + labels_to_links = get_filtered_labels_to_links_map( + public_link_group_nodes, linkable_graph_node_map, link_group, link_groups, link_group_mappings, link_group_preferred_linkage, - pic_behavior = PicBehavior("always_enabled") if link_style == LinkStyle("static_pic") else PicBehavior("supported"), + pic_behavior = PicBehavior("always_enabled") if link_strategy == LinkStrategy("static_pic") else PicBehavior("supported"), link_group_libs = { name: (lib.label, lib.shared_link_infos) for name, lib in link_group_libs.items() }, - link_strategy = to_link_strategy(link_style), + link_strategy = link_strategy, roots = executable_deps, is_executable_link = True, prefer_stripped = False, @@ -402,51 +487,82 @@ def inherited_non_rust_link_group_info( ) return RustCxxLinkGroupInfo( - filtered_links = get_filtered_links(labels_to_links_map), + filtered_links = get_filtered_links(labels_to_links.map), symbol_files_info = LinkInfo( pre_flags = linked_link_groups.symbol_ldflags, ), - filtered_targets = get_filtered_targets(labels_to_links_map), + filtered_targets = get_filtered_targets(labels_to_links.map), link_group_info = link_group_info, link_group_libs = link_group_libs, - labels_to_links_map = labels_to_links_map, + labels_to_links_map = labels_to_links.map, + targets_consumed_by_link_groups = linked_link_groups.targets_consumed_by_link_groups, link_group_preferred_linkage = link_group_preferred_linkage, ) -def inherited_non_rust_link_info( +def inherited_merged_link_infos( ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[MergedLinkInfo]: - infos = [] - infos.extend(_non_rust_link_infos(ctx, include_doc_deps)) - infos.extend([d.non_rust_link_info for d in _rust_link_infos(ctx, include_doc_deps) if d.non_rust_link_info]) + dep_ctx: DepCollectionContext) -> dict[ConfiguredTargetLabel, MergedLinkInfo]: + infos = {} + for d in _native_link_dependencies(ctx, dep_ctx): + g = d.get(MergedLinkInfo) + if g: + infos[d.label.configured_target()] = g + for info in _rust_non_proc_macro_link_infos(ctx, dep_ctx): + infos.update(info.merged_link_infos) return infos -def inherited_non_rust_shared_libs( +def inherited_shared_libs( ctx: AnalysisContext, - include_doc_deps: bool = False) -> list[SharedLibraryInfo]: + dep_ctx: DepCollectionContext) -> list[SharedLibraryInfo]: infos = [] - infos.extend(_non_rust_shared_lib_infos(ctx, include_doc_deps)) - infos.extend([d.non_rust_shared_libs for d in _rust_link_infos(ctx, include_doc_deps)]) + infos.extend([d[SharedLibraryInfo] for d in _native_link_dependencies(ctx, dep_ctx)]) + infos.extend([d.shared_libs for d in _rust_non_proc_macro_link_infos(ctx, dep_ctx)]) return infos -def inherited_external_debug_info( +def inherited_linkable_graphs(ctx: AnalysisContext, dep_ctx: DepCollectionContext) -> list[LinkableGraph]: + deps = {} + for d in _native_link_dependencies(ctx, dep_ctx): + g = d.get(LinkableGraph) + if g: + deps[g.label] = g + for info in _rust_non_proc_macro_link_infos(ctx, dep_ctx): + for g in info.linkable_graphs: + deps[g.label] = g + return deps.values() + +def inherited_link_group_lib_infos(ctx: AnalysisContext, dep_ctx: DepCollectionContext) -> list[LinkGroupLibInfo]: + # There are no special Rust -> Rust versions of this provider + deps = {} + for d in resolve_deps(ctx, dep_ctx): + i = d.dep.get(LinkGroupLibInfo) + if i: + deps[d.dep.label] = i + return deps.values() + +def inherited_rust_external_debug_info( ctx: AnalysisContext, - dwo_output_directory: [Artifact, None], - dep_link_style: LinkStyle) -> ArtifactTSet: - rust_dep_link_style = _adjust_link_style_for_rust_dependencies(dep_link_style) - non_rust_dep_link_style = dep_link_style + dep_ctx: DepCollectionContext, + link_strategy: LinkStrategy) -> list[ArtifactTSet]: + toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] + return [strategy_info(toolchain_info, d.info, link_strategy).external_debug_info for d in resolve_rust_deps(ctx, dep_ctx)] +def inherited_external_debug_info( + ctx: AnalysisContext, + dep_ctx: DepCollectionContext, + dwo_output_directory: Artifact | None, + dep_link_strategy: LinkStrategy) -> ArtifactTSet: inherited_debug_infos = [] - inherited_non_rust_link_infos = [] + inherited_link_infos = [] + toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] - for d in resolve_deps(ctx): + for d in resolve_deps(ctx, dep_ctx): if RustLinkInfo in d.dep: - inherited_debug_infos.append(d.dep[RustLinkInfo].styles[rust_dep_link_style].external_debug_info) - inherited_non_rust_link_infos.append(d.dep[RustLinkInfo].non_rust_link_info) + inherited_debug_infos.append(strategy_info(toolchain_info, d.dep[RustLinkInfo], dep_link_strategy).external_debug_info) + inherited_link_infos.extend(d.dep[RustLinkInfo].merged_link_infos.values()) elif MergedLinkInfo in d.dep: - inherited_non_rust_link_infos.append(d.dep[MergedLinkInfo]) + inherited_link_infos.append(d.dep[MergedLinkInfo]) - link_args = get_link_args_for_strategy(ctx, inherited_non_rust_link_infos, to_link_strategy(non_rust_dep_link_style)) + link_args = get_link_args_for_strategy(ctx, inherited_link_infos, dep_link_strategy) inherited_debug_infos.append(unpack_external_debug_info(ctx.actions, link_args)) return make_artifact_tset( @@ -456,6 +572,9 @@ def inherited_external_debug_info( children = inherited_debug_infos, ) +def normalize_crate(label: str | ResolvedStringWithMacros) -> str | ResolvedStringWithMacros: + return label.replace("-", "_") if is_string(label) else label + def attr_simple_crate_for_filenames(ctx: AnalysisContext) -> str: """ A "good enough" identifier to use in filenames. Buck wants to have filenames @@ -487,6 +606,15 @@ def attr_crate(ctx: AnalysisContext) -> CrateName: if dynamic: dynamic = dynamic.get(DefaultInfo).default_outputs[0] return CrateName( - simple = ctx.attrs.crate or normalize_crate(ctx.label.name), + simple = normalize_crate(ctx.attrs.crate or ctx.label.name), dynamic = dynamic, ) + +def attr_soname(ctx: AnalysisContext) -> str: + """ + Get the shared library name to set for the given rust library. + """ + linker_info = get_cxx_toolchain_info(ctx).linker_info + if ctx.attrs.soname != None: + return get_shared_library_name_for_param(linker_info, ctx.attrs.soname) + return get_default_shared_library_name(linker_info, ctx.label) diff --git a/prelude/rust/linkable_symbol.bzl b/prelude/rust/linkable_symbol.bzl new file mode 100644 index 0000000000000..d1e2955487593 --- /dev/null +++ b/prelude/rust/linkable_symbol.bzl @@ -0,0 +1,152 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +Example: + + buck_genrule( + name = "my-generated-data" + bash = "something slow", + ) + + rust_linkable_symbol( + name = "my-generated-data-symbol", + content_str = ":my-generated-data", # or `content_bytes` for non-utf8 + ) + + rust_binary( + name = "whoa", + srcs = ..., + deps = [ + ... + ":my-generated-data-symbol", + ], + ) + +The generated Rust library contains a get() function that returns your symbol's +data as &'static str or &'static [u8], depending on whether you used str or +bytes in the rust_linkable_symbol target. + + fn main() { + let my_generated_data = my_generated_data_symbol::get(); + println!("{:?}", my_generated_data); + } + +The major advantage of rust_linkable_symbol over directly using include_bytes +with a mapped_srcs in your Rust target is that your slow genrule does not have +to get built when you're doing typecheck-only builds of the Rust code. That +applies to all of the following situations: + + - `arc rust-check` a.k.a. `buck2 build :whoa[check]` + + - documentation builds: `buck2 build :whoa[doc]` + + - all building performed by IDE +""" + +load("@prelude//rust:link_info.bzl", "RustLinkInfo") # @oss-enable +load("@prelude//prelude.bzl", prelude = "native") # @oss-enable +# @oss-disable: load("@fbcode//buck2/facebook:autodeps_hacks.bzl", "RustLinkInfo", "prelude") + +def _remove_rust_link_info_impl(ctx: AnalysisContext) -> list[Provider]: + out = [] + for p in ctx.attrs.base.providers: + if not isinstance(p, RustLinkInfo): + out.append(p) + return out + +_remove_rust_link_info = rule( + impl = _remove_rust_link_info_impl, + attrs = { + "base": attrs.dep(), + "labels": attrs.list(attrs.string()), + }, +) + +def rust_linkable_symbol( + name, + content_str = None, + content_bytes = None, + align_bytes = None, + visibility = None, + rust_library_macro = None): + if (content_str == None) == (content_bytes == None): + fail("rust_linkable_symbol requires exactly one of `content_str =` or `content_bytes =` to be passed") + + if align_bytes != None: + if content_bytes == None: + fail("rust_linkable_symbol's align_bytes is only supported when using content_bytes") + if align_bytes not in [2, 4, 8]: + fail("unsupported rust_linkable_symbol alignment") + + kind, content = ("str", content_str) if content_str else ("bytes", content_bytes) + + rust_library_macro = rust_library_macro or prelude.rust_library + + # Rustc shouldn't be the easiest way to accomplish this but here we are. + # + # Background reading: + # https://tratt.net/laurie/blog/2022/whats_the_most_portable_way_to_include_binary_blobs_in_an_executable.html + # + # Maybe use `#embed` eventually (several years from now?). + # https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3017.htm + rust_library_macro( + name = "{}@symbol".format(name), + crate = name, + doctests = False, + env = { + "LINKABLE_SYMBOL": "{}:{}".format(package_name(), name), + }, + labels = [ + "generated", + "rustc_do_not_check", + ], + mapped_srcs = { + "prelude//rust/tools:linkable_symbol.rs": "lib.rs", + content: "content", + }, + rustc_flags = [ + "--cfg=rust_linkable_symbol_content_{}".format(kind), + "--cfg=rust_linkable_symbol_align_bytes=\"{}\"".format(align_bytes or 1), + "@$(location prelude//rust/tools:linkable_symbol_supports_no_std)", + ], + visibility = [], + ) + + # Alias the Rust library with a rule that just removes the `RustLinkInfo`. + # This causes the dependent library to be treated more like a C++ dep than a + # Rust dep, and thereby not be needed during type checking. + _remove_rust_link_info( + name = "{}@link".format(name), + base = ":{}@symbol".format(name), + labels = ["generated"], + ) + + rust_library_macro( + name = name, + deps = [ + ":{}@link".format(name), + ], + doctests = False, + env = { + "LINKABLE_SYMBOL": "{}:{}".format(package_name(), name), + }, + labels = [ + "generated", + ], + mapped_srcs = { + "prelude//rust/tools:linkable_symbol.rs": "lib.rs", + }, + rustc_flags = [ + "--cfg=rust_linkable_symbol_getter_{}".format(kind), + "--cfg=rust_linkable_symbol_align_bytes=\"{}\"".format(align_bytes or 1), + # Setting `no_std` here is unconditionally fine - a panic handler will + # be provided by whatever uses this library. + "--cfg=set_nostd", + ], + visibility = visibility, + ) diff --git a/prelude/rust/named_deps.bzl b/prelude/rust/named_deps.bzl new file mode 100644 index 0000000000000..99ccd74265638 --- /dev/null +++ b/prelude/rust/named_deps.bzl @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:argfile.bzl", "at_argfile") +load("@prelude//utils:type_defs.bzl", "is_list") +load(":context.bzl", "CompileContext") + +# Write a file containing all the dynamically-generated dependency names. This +# isn't used in the course of any Buck builds, but is needed by rust-project to +# supply an accurate dependency graph to rust-analyzer.. +def write_named_deps_names( + ctx: AnalysisContext, + compile_ctx: CompileContext) -> Artifact | None: + if not is_list(ctx.attrs.named_deps): + return None + + named_deps_names = ctx.actions.declare_output("named_deps") + ctx.actions.run( + cmd_args( + compile_ctx.toolchain_info.rustc_action, + cmd_args(named_deps_names.as_output(), format = "--echo={}"), + at_argfile( + actions = ctx.actions, + name = "named_deps.args", + args = [name for name, _dep in ctx.attrs.named_deps], + allow_args = True, + ), + ), + category = "named_deps", + ) + return named_deps_names diff --git a/prelude/rust/outputs.bzl b/prelude/rust/outputs.bzl new file mode 100644 index 0000000000000..8b04722e32542 --- /dev/null +++ b/prelude/rust/outputs.bzl @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load( + "@prelude//:artifact_tset.bzl", + "ArtifactTSet", # @unused Used as a type +) + +RustcOutput = record( + output = field(Artifact), + stripped_output = field(Artifact), + diag_txt = field(Artifact), + diag_json = field(Artifact), + pdb = field(Artifact | None), + dwp_output = field(Artifact | None), + # Zero or more Split DWARF debug info files are emitted into this directory + # with unpredictable filenames. + dwo_output_directory = field(Artifact | None), + extra_external_debug_info = field(list[ArtifactTSet]), +) + +def output_as_diag_subtargets(o: RustcOutput, clippy: RustcOutput) -> dict[str, Artifact]: + return { + "check": o.output, + "clippy.json": clippy.diag_json, + "clippy.txt": clippy.diag_txt, + "diag.json": o.diag_json, + "diag.txt": o.diag_txt, + } + +# Access to additional outputs from Rust compilation. +# +# This provider is intended to be available from all rules that compile Rust +# code. As a result, it must be different from `RustLinkInfo`, since it should +# not exist on a prebuilt Rust library, but should exist on a binary. +RustcExtraOutputsInfo = provider( + fields = { + "clippy": RustcOutput, + "clippy_incr": RustcOutput, + "metadata": RustcOutput, + "metadata_incr": RustcOutput, + }, +) diff --git a/prelude/rust/resources.bzl b/prelude/rust/resources.bzl index 05ef5fb82d1e5..1b59ba8a87e74 100644 --- a/prelude/rust/resources.bzl +++ b/prelude/rust/resources.bzl @@ -5,11 +5,15 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//:artifacts.bzl", + "ArtifactOutputs", # @unused Used as a type + "single_artifact", +) load("@prelude//:paths.bzl", "paths") -load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type -load("@prelude//utils:utils.bzl", "expect", "from_named_set") +load("@prelude//utils:utils.bzl", "from_named_set") -def rust_attr_resources(ctx: AnalysisContext) -> dict[str, (Artifact, list[ArgLike])]: +def rust_attr_resources(ctx: AnalysisContext) -> dict[str, ArtifactOutputs]: """ Return the resources provided by this rule, as a map of resource name to a tuple of the resource artifact and any "other" outputs exposed by it. @@ -17,18 +21,6 @@ def rust_attr_resources(ctx: AnalysisContext) -> dict[str, (Artifact, list[ArgLi resources = {} for name, resource in from_named_set(ctx.attrs.resources).items(): - if type(resource) == "artifact": - other = [] - else: - info = resource[DefaultInfo] - expect( - len(info.default_outputs) == 1, - "expected exactly one default output from {} ({})" - .format(resource, info.default_outputs), - ) - [resource] = info.default_outputs - other = info.other_outputs - - resources[paths.join(ctx.label.package, name)] = (resource, other) + resources[paths.join(ctx.label.package, name)] = single_artifact(resource) return resources diff --git a/prelude/rust/rust-analyzer/check.bxl b/prelude/rust/rust-analyzer/check.bxl index 252d6a5fb3d4e..ab972d5a6ad52 100644 --- a/prelude/rust/rust-analyzer/check.bxl +++ b/prelude/rust/rust-analyzer/check.bxl @@ -5,32 +5,46 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -def check_targets_impl(ctx): - target_universe = ctx.uquery().owner(ctx.cli_args.file) - owners = ctx.cquery().owner(ctx.cli_args.file, target_universe) +load("@prelude//rust:outputs.bzl", "RustcExtraOutputsInfo") + +def _check_targets_impl(ctx: bxl.Context) -> list[bxl.EnsuredArtifact]: + uquery_owners = ctx.uquery().owner(ctx.cli_args.file) + target_universe = ctx.target_universe(uquery_owners) + owners = ctx.cquery().owner(ctx.cli_args.file, target_universe.target_set()) nodes = ctx.cquery().kind("^(rust_binary|rust_library|rust_test)$", owners) if len(nodes) == 0: - return + return [] + + analysis = ctx.analysis(nodes).values() - diag_kind = "clippy.json" if ctx.cli_args.use_clippy else "diag.json" - build_result = ctx.build([ - node.label.with_sub_target(diag_kind) - for node in nodes - ]) + artifacts = [] + for a in analysis: + o = a.providers()[RustcExtraOutputsInfo] + if ctx.cli_args.use_clippy: + artifacts.append(o.clippy_incr.diag_json) + else: + artifacts.append(o.metadata_incr.diag_json) - dict_output = ctx.output.ensure_multiple(build_result) + art_output = ctx.output.ensure_multiple(artifacts) - out = [ - artifacts[0].abs_path() - for artifacts in dict_output.values() - if len(artifacts) == 1 + return [ + artifact.abs_path() + for artifact in art_output ] +def _run(ctx: bxl.Context) -> None: + diagnostic_paths = _check_targets_impl(ctx) + + out = { + "diagnostic_paths": diagnostic_paths, + "project_root": ctx.root(), + } + ctx.output.print_json(out) check = bxl_main( - impl = check_targets_impl, + impl = _run, cli_args = { "file": cli_args.string(), "use-clippy": cli_args.bool(), diff --git a/prelude/rust/rust-analyzer/provider.bzl b/prelude/rust/rust-analyzer/provider.bzl new file mode 100644 index 0000000000000..b764c5829ee1b --- /dev/null +++ b/prelude/rust/rust-analyzer/provider.bzl @@ -0,0 +1,79 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//rust:build.bzl", "crate_root", "process_env") +load( + "@prelude//rust:context.bzl", + "CompileContext", # @unused Used as a type + "DepCollectionContext", # @unused Used as a type +) +load("@prelude//rust:link_info.bzl", "get_available_proc_macros", "resolve_rust_deps") + +RustAnalyzerInfo = provider( + fields = { + # The root source for the rust target (typically lib.rs, main.rs), relative to the buck target file. + "crate_root": str, + # The processed env as produced by the buck build prelude. Some env vars like `OUT_DIR` and `CARGO_MANIFEST_DIR` + # will be made into absolute paths. + "env": dict[str, cmd_args], + # The list of rust deps needed for RustAnalyzer to function. Namely, this excludes things like + # exec deps used as inputs to genrules and other non-rust dependencies. + "rust_deps": list[Dependency], + # The list of recursive rust dependencies for this target, including proc macros. Useful for + # identifying the targets needing to be collected into Rust Analyzer's crate graph. Notably, + # excludes rust dependencies that are used in build tools (e.g. build scripts). + "transitive_target_set": set[TargetLabel], + }, +) + +def _compute_rust_deps( + ctx: AnalysisContext, + dep_ctx: DepCollectionContext) -> list[Dependency]: + dep_ctx = DepCollectionContext( + advanced_unstable_linking = dep_ctx.advanced_unstable_linking, + # Include doc deps here for any doctests that may be present in the target. + include_doc_deps = True, + is_proc_macro = dep_ctx.is_proc_macro, + # Rust Analyzer handles the sysroot separately. We omit the sysroot deps here and will + # instead pass a path to the sysroot as a separate config. + explicit_sysroot_deps = None, + panic_runtime = dep_ctx.panic_runtime, + ) + + first_order_deps = resolve_rust_deps(ctx, dep_ctx) + available_proc_macros = get_available_proc_macros(ctx) + + return [dep.dep for dep in first_order_deps] + available_proc_macros.values() + +def _compute_transitive_target_set( + ctx: AnalysisContext, + first_order_deps: list[Dependency]) -> set[TargetLabel]: + transitive_targets = set([ctx.label.raw_target()]) + for dep in first_order_deps: + target_sets = dep.get(RustAnalyzerInfo).transitive_target_set + for target_set in target_sets: + transitive_targets.add(target_set) + return transitive_targets + +def _compute_env( + ctx: AnalysisContext, + compile_ctx: CompileContext) -> dict[str, cmd_args]: + # Disable rustc_action processing, as rust-project will handle windows + any escaping necessary. + plain_env, path_env = process_env(compile_ctx, ctx.attrs.env, False, False) + return plain_env | path_env + +def rust_analyzer_provider( + ctx: AnalysisContext, + compile_ctx: CompileContext, + default_roots: list[str]) -> RustAnalyzerInfo: + rust_deps = _compute_rust_deps(ctx, compile_ctx.dep_ctx) + return RustAnalyzerInfo( + crate_root = crate_root(ctx, default_roots), + env = _compute_env(ctx, compile_ctx), + rust_deps = rust_deps, + transitive_target_set = _compute_transitive_target_set(ctx, rust_deps), + ) diff --git a/prelude/rust/rust-analyzer/resolve_deps.bxl b/prelude/rust/rust-analyzer/resolve_deps.bxl index 4e9b88b5e1bf7..ac94d3201464f 100644 --- a/prelude/rust/rust-analyzer/resolve_deps.bxl +++ b/prelude/rust/rust-analyzer/resolve_deps.bxl @@ -5,14 +5,44 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -def materialize(ctx, target): +load("@prelude//linking:link_info.bzl", "LinkStrategy") +load("@prelude//rust:build_params.bzl", "MetadataKind") +load("@prelude//rust:link_info.bzl", "RustLinkInfo") +load("@prelude//rust/rust-analyzer:provider.bzl", "RustAnalyzerInfo") +load("@prelude//utils:type_defs.bzl", "is_list") + +TargetInfo = dict[str, typing.Any] + +MacroOutput = record( + actual = TargetLabel, + dylib = Artifact, +) + +ExpandedAndResolved = record( + expanded_targets = list[TargetLabel], + queried_proc_macros = dict[TargetLabel, MacroOutput], + resolved_deps = dict[TargetLabel, TargetInfo], +) + +def materialize( + ctx: bxl.Context, + target: bxl.ConfiguredTargetNode) -> Artifact: analysis = ctx.analysis(target) sources = analysis.providers()[DefaultInfo].sub_targets["sources"][DefaultInfo].default_outputs[0] + return sources + +def _get_nullable_attr(attrs, key: str) -> typing.Any: + nullable = getattr(attrs, key, None) + return nullable.value() if nullable != None else None - # Ensures the srcs folder will be present - return ctx.output.ensure(sources).abs_path() +def _process_target_config( + ctx: bxl.Context, + target: bxl.ConfiguredTargetNode, + analysis: bxl.AnalysisResult, + in_workspace: bool) -> TargetInfo: + providers = analysis.providers() + ra_info = providers[RustAnalyzerInfo] -def _process_target_config(ctx, target, in_workspace): # convert all source paths to absolute paths resolved_attrs = target.resolved_attrs_eager(ctx) @@ -20,118 +50,165 @@ def _process_target_config(ctx, target, in_workspace): # For example, this is used in cxx powered crates internally srcs = [] for src in resolved_attrs.srcs: - srcs.append(ctx.output.ensure(src).abs_path()) + srcs.append(src) # remove the configured platform from the deps. for example, # `fbsource//third-party/rust:tracing (ovr_config//platform/linux:x86_64-fbcode-platform010-clang-9f23200ddcddc3cb)` # becomes `fbsource//third-party/rust:tracing`. - deps = [] - for dep in resolved_attrs.deps: - deps.append(dep.label.raw_target()) + deps = [dep.label.raw_target() for dep in ra_info.rust_deps] # Grab only the values that the the gen-rules are being mapped to. mapped_srcs = {} for key, v in resolved_attrs.mapped_srcs.items(): - mapped_srcs[v] = ctx.output.ensure(key).abs_path() + mapped_srcs[v] = key # remove the configured platform from named deps. - named_deps = {} - for dep, alias in resolved_attrs.named_deps.items(): - named_deps[dep] = alias.label.raw_target() + if is_list(resolved_attrs.named_deps): + named_deps_names = providers[DefaultInfo].sub_targets["named_deps"][DefaultInfo].default_outputs[0] + named_deps = [named_deps_names] + for _alias, dep in resolved_attrs.named_deps: + named_deps.append(dep.label.raw_target()) + else: + named_deps = {} + for dep, alias in resolved_attrs.named_deps.items(): + named_deps[dep] = alias.label.raw_target() # remove the configured platform for tests tests = [] for test in resolved_attrs.tests: tests.append(test.raw_target()) + # materialize a file containing the dynamic crate name + crate_dynamic = getattr(resolved_attrs, "crate_dynamic", None) + if crate_dynamic: + crate_dynamic = crate_dynamic.get(DefaultInfo).default_outputs[0] + + env = {k: cmd_args(v, delimiter = "") for k, v in ra_info.env.items()} + # copy over the absolute paths and raw targets into the output - copy = {} attrs = target.attrs_eager() - for k in dir(attrs): - if k == "srcs": - copy["srcs"] = srcs - elif k == "deps": - copy["deps"] = deps - elif k == "mapped_srcs": - copy["mapped_srcs"] = mapped_srcs - elif k == "named_deps": - copy["named_deps"] = named_deps - elif k == "tests": - copy["tests"] = tests - else: - copy[k] = getattr(attrs, k) - - # Always generate the source folder. Let rust-project resolve whether or not to use it - copy["source_folder"] = materialize(ctx, target) - copy["label"] = target.label.raw_target() - copy["kind"] = target.rule_type - copy["in_workspace"] = in_workspace - return copy - -def cquery_deps(ctx, top_targets, workspaces): - # the set that we have at home. - targets = [target for top_target in top_targets for target in ctx.cquery().deps(top_target)] - outputs = ctx.cquery().kind("^(rust_binary|rust_library|rust_test)$", targets) + return { + "crate": _get_nullable_attr(attrs, "crate"), + "crate_dynamic": crate_dynamic, + "crate_root": ra_info.crate_root, + "deps": deps, + "edition": _get_nullable_attr(attrs, "edition"), + "env": env, + "features": resolved_attrs.features, + "in_workspace": in_workspace, + "kind": target.rule_type, + "label": target.label.raw_target(), + "mapped_srcs": mapped_srcs, + "name": resolved_attrs.name, + "named_deps": named_deps, + "proc_macro": _get_nullable_attr(attrs, "proc_macro"), + "project_relative_buildfile": ctx.fs.project_rel_path(target.buildfile_path), + "rustc_flags": _get_nullable_attr(attrs, "rustc_flags"), + "source_folder": materialize(ctx, target), # Always generate the source folder. Let rust-project resolve whether or not to use it + "srcs": srcs, + "tests": tests, + } + +def cquery_deps( + ctx: bxl.Context, + top_targets: list[TargetLabel], + workspaces: list[TargetLabel]) -> dict[TargetLabel, TargetInfo]: + targets = set() + target_universe = ctx.target_universe(top_targets).target_set() + analysis_set = ctx.analysis(target_universe) + for _target, analysis in analysis_set.items(): + info = analysis.providers().get(RustAnalyzerInfo) + if info: + for target_set in info.transitive_target_set: + targets.add(target_set) + + #TODO(romanp) support set as target_universe arg + outputs = ctx.target_universe(list(targets)).target_set() out = {} # Eagerly analyze targets - ctx.analysis(outputs) + analysis = ctx.analysis(outputs) for target in outputs: + attrs = target.attrs_lazy() + in_workspace = target.label.raw_target() in top_targets - for candidate_workspace in target.attrs_lazy().get("_workspaces").value(): - if candidate_workspace.raw_target() in workspaces: - in_workspace = True - out[target.label.raw_target()] = _process_target_config(ctx, target, in_workspace) - return out + candidate_workspaces = attrs.get("_workspaces") + if candidate_workspaces: + for candidate_workspace in candidate_workspaces.value(): + if candidate_workspace.raw_target() in workspaces: + in_workspace = True + break -def expand_proc_macros(ctx, targets): - targets = ctx.cquery().deps(targets) - outputs = ctx.cquery().kind("^(configured_alias)$", targets) + target_info = _process_target_config( + ctx = ctx, + target = target, + analysis = analysis[target.label.with_sub_target()], + in_workspace = in_workspace, + ) - out = {} - for alias in outputs: - cfg_actual = alias.resolved_attrs_eager(ctx).actual - actual = ctx.cquery().kind("rust_library", cfg_actual.raw_target()) + out[target.label.raw_target()] = target_info - # if a `configured_alias` has a single `configured_actual` that corresponds to - # a rust library, then that is probably the resolved library, but we'll - # double-check to make sure it's a proc macro. - if len(actual) == 1: - if actual[0].attrs_eager().proc_macro: - out[alias.label] = actual[0] + return out - macros = {} - for (alias, actual) in out.items(): - analysis = ctx.analysis(actual.label.raw_target()) - so = analysis.providers()[DefaultInfo].sub_targets["shared"][DefaultInfo].default_outputs[0] - macros[alias] = {"actual": actual.label, "dylib": ctx.output.ensure(so).abs_path()} +def expand_proc_macros( + ctx: bxl.Context, + targets: list[TargetLabel]) -> dict[TargetLabel, MacroOutput]: + target_universe = ctx.target_universe(targets).target_set() + targets = ctx.cquery().deps(target_universe) + targets = ctx.cquery().kind("^(rust_binary|rust_library)$", targets) - return macros + out = {} + for target in targets: + attrs = target.resolved_attrs_eager(ctx) + proc_macro = getattr(attrs, "proc_macro", False) + if proc_macro: + analysis = ctx.analysis(target) + rlib = analysis.providers()[RustLinkInfo].strategies[LinkStrategy("shared")].outputs[MetadataKind("link")] + label = target.label.raw_target() + out[label] = MacroOutput( + actual = label, + dylib = rlib, + ) + return out # Returns a list of all the expanded targets including any workspaces, followed by just the workspaces -def expand_targets(ctx, targets): - kind_target_list = ctx.cquery().kind("^(rust_binary|rust_library|rust_test|alias)$", targets) +def expand_targets( + ctx: bxl.Context, + targets: list[TargetLabel], + exclude_workspaces: bool) -> (list[TargetLabel], list[TargetLabel]): + target_universe = ctx.target_universe(targets).target_set() + kind_target_list = ctx.cquery().kind("^(rust_binary|rust_library|rust_test|alias)$", target_universe) # Allow targets to opt-in to being treated as rust-analyzer-compatible. # This is used for cross-compilation targets that apply Buck transitions to Rust rules. - labeled_target_list = ctx.cquery().attrfilter("labels", "rust_analyzer_target", targets) + labeled_target_list = ctx.cquery().attrfilter("labels", "rust_analyzer_target", target_universe) expanded_targets = {t.label.raw_target(): t for t in kind_target_list + labeled_target_list} # Map of potential workspaces to a list of the targets that name these as potential workspaces possible_workspaces = {} - for label, t in expanded_targets.items(): - workspaces = t.attrs_lazy().get("_workspaces") - if workspaces: - for workspace in workspaces.value(): - possible_workspaces.setdefault(workspace.raw_target(), []).append(label) + if not exclude_workspaces: + for label, t in expanded_targets.items(): + workspaces = t.attrs_lazy().get("_workspaces") + if workspaces: + for workspace in workspaces.value(): + if not ctx.target_exists(str(workspace.raw_target())): + continue + + possible_workspaces.setdefault(workspace.raw_target(), []).append(label) + + workspace_analysis = ctx.analysis(ctx.target_universe(possible_workspaces.keys()).target_set()) active_workspaces = {} - for workspace, candidate_deps in possible_workspaces.items(): - # FIXME: Using `cquery deps` here is not right. It will transparently look through - # dependency edges of all types, meaning that eg build tools written in Rust and built - # from source will show up too - workspace_deps = {d.label.raw_target(): () for d in ctx.cquery().deps(workspace)} + for workspace_label, analysis in workspace_analysis.items(): + workspace = workspace_label.raw_target() + candidate_deps = possible_workspaces[workspace] + workspace_info = analysis.providers().get(RustAnalyzerInfo) + if workspace_info: + workspace_deps = {t: () for t in workspace_info.transitive_target_set} + else: + workspace_deps = {} + for d in candidate_deps: if d in workspace_deps: active_workspaces[workspace] = () @@ -145,24 +222,115 @@ def expand_targets(ctx, targets): # in the prelude are a bit hard in general expanded_targets.pop(d, None) - return dedupe(sorted(expanded_targets.keys() + active_workspaces.keys())), sorted(active_workspaces.keys()) + return dedupe(sorted(expanded_targets.keys() + active_workspaces.keys())), sorted(possible_workspaces.keys()) -def expand_and_resolve_impl(ctx): +def resolve_targets_impl(ctx: bxl.Context) -> None: # equivalent of `flat_map`ing targets = [target for sublist in ctx.cli_args.targets for target in sublist] - expanded_targets, workspaces = expand_targets(ctx, targets) + actions = ctx.bxl_actions().actions + + expanded_targets, workspaces = expand_targets(ctx, targets, ctx.cli_args.exclude_workspaces) queried_proc_macros = expand_proc_macros(ctx, expanded_targets) resolved_deps = cquery_deps(ctx, expanded_targets, workspaces) - ctx.output.print_json({ - "expanded_targets": expanded_targets, - "queried_proc_macros": queried_proc_macros, - "resolved_deps": resolved_deps, - }) + artifact = actions.declare_output("resolve_targets.json") + artifacts = actions.write_json( + artifact, + ExpandedAndResolved( + expanded_targets = expanded_targets, + queried_proc_macros = queried_proc_macros, + resolved_deps = resolved_deps, + ), + with_inputs = True, + absolute = True, + pretty = ctx.cli_args.pretty, + ) + ctx.output.ensure_multiple(artifacts) + ctx.output.print(ctx.output.ensure(artifact).abs_path()) + +def resolve_owning_buildfile_impl(ctx: bxl.Context) -> None: + # depending on the input, determine the initial set of targets + if ctx.cli_args.files: + targets = ctx.uquery().owner(ctx.cli_args.files) + elif ctx.cli_args.buildfiles: + targets = [ctx.uquery().targets_in_buildfile(buildfile) for buildfile in ctx.cli_args.buildfiles] + + # equivalent of `flat_map`ing + targets = [target for sublist in targets for target in sublist] + targets = ctx.uquery().kind("^(rust_binary|rust_library|rust_test)$", targets) + elif ctx.cli_args.targets: + # equivalent of `flat_map`ing + targets = [target for sublist in ctx.cli_args.targets for target in sublist] + targets = ctx.unconfigured_targets(targets) + else: + fail("Neither `--files`, `--targets`, nor `--buildfiles` were specified; this is a bug") + + # group targets by their buildfile + targets_by_buildfile = {} + for target in targets: + buildfile_path = ctx.fs.abs_path_unsafe(target.buildfile_path) + + if buildfile_path not in targets_by_buildfile: + targets_by_buildfile[buildfile_path] = utarget_set() + targets_by_buildfile[buildfile_path] += utarget_set([target]) + + # collect extra targets from each buildfile + extra_targets_by_buildfile = {} + for buildfile_path in targets_by_buildfile: + extra_targets = ctx.uquery().targets_in_buildfile("{}".format(buildfile_path)) + extra_targets = ctx.uquery().kind("^(rust_binary|rust_library|rust_test)$", extra_targets) + + # Exclude targets with the rustc_do_no_check label from the extra targets. This + # label is used for foo@symbol targets (generated by rust_linkable_symbols), which + # are slow to build and never a direct dependencies of rust targets. + extra_targets -= ctx.uquery().attrfilter( + "labels", + "rustc_do_not_check", + extra_targets, + ) + + # explicitly included targets aren't "extra" + extra_targets -= targets_by_buildfile[buildfile_path] -expand_and_resolve = bxl_main( - impl = expand_and_resolve_impl, + extra_targets_by_buildfile[buildfile_path] = extra_targets + + # add as many extra targets as we can according to max_extra_targets. + # note that which extra targets we add is arbitrary since it depends on the + # iteration order of the dict and the target_set. + remaining_extra_targets = ctx.cli_args.max_extra_targets + for buildfile_path, extra_targets in extra_targets_by_buildfile.items(): + extra_targets = utarget_set(list(extra_targets)[:remaining_extra_targets]) + targets_by_buildfile[buildfile_path] += extra_targets + + remaining_extra_targets -= len(extra_targets) + if remaining_extra_targets <= 0: + break + + # output just the target labels by buildfile + out = {} + for buildfile_path, targets in targets_by_buildfile.items(): + out[buildfile_path] = [target.label for target in targets] + ctx.output.print_json(out) + +# Writes a json file as an artifact and returns the absolute path to that artifact to stdout. +resolve_targets = bxl_main( + impl = resolve_targets_impl, cli_args = { + "exclude_workspaces": cli_args.bool(default = False), + "pretty": cli_args.bool(default = False), "targets": cli_args.list(cli_args.target_expr()), }, ) + +resolve_owning_buildfile = bxl_main( + impl = resolve_owning_buildfile_impl, + cli_args = { + # while buildfiles, files, and targets can all be passed, only files will be used. + # this file is driven primarily by rust-project's needs and is a private implementation + # detail. + "buildfiles": cli_args.option(cli_args.list(cli_args.string())), + "files": cli_args.option(cli_args.list(cli_args.string())), + "max_extra_targets": cli_args.int(), + "targets": cli_args.option(cli_args.list(cli_args.target_expr())), + }, +) diff --git a/prelude/rust/rust_binary.bzl b/prelude/rust/rust_binary.bzl index 28ba7287cf521..eaeeadc49c659 100644 --- a/prelude/rust/rust_binary.bzl +++ b/prelude/rust/rust_binary.bzl @@ -5,6 +5,10 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//:artifact_tset.bzl", + "project_artifacts", +) load( "@prelude//:resources.bzl", "create_resource_db", @@ -15,20 +19,24 @@ load( "cxx_attr_deps", ) load("@prelude//cxx:cxx_link_utility.bzl", "executable_shared_lib_arguments") +load("@prelude//cxx:cxx_utility.bzl", "cxx_attrs_get_allow_cache_upload") load( "@prelude//cxx:link_groups.bzl", "LINK_GROUP_MAPPINGS_FILENAME_SUFFIX", "LINK_GROUP_MAPPINGS_SUB_TARGET", "LINK_GROUP_MAP_DATABASE_SUB_TARGET", "LinkGroupContext", + "build_shared_libs_for_symlink_tree", "get_link_group_map_json", - "is_link_group_shlib", ) load("@prelude//cxx:linker.bzl", "DUMPBIN_SUB_TARGET", "PDB_SUB_TARGET", "get_dumpbin_providers", "get_pdb_providers") +load( + "@prelude//dist:dist_info.bzl", + "DistInfo", +) load( "@prelude//linking:link_info.bzl", - "LinkStyle", - "Linkage", + "LinkStrategy", ) load( "@prelude//linking:shared_libraries.bzl", @@ -36,11 +44,11 @@ load( "traverse_shared_library_info", ) load("@prelude//os_lookup:defs.bzl", "OsLookup") +load("@prelude//rust/rust-analyzer:provider.bzl", "rust_analyzer_provider") load( "@prelude//tests:re_utils.bzl", - "get_re_executor_from_props", + "get_re_executors_from_props", ) -load("@prelude//utils:arglike.bzl", "ArgLike") # @unused Used as a type load("@prelude//utils:utils.bzl", "flatten_dict") load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") load( @@ -48,10 +56,10 @@ load( "compile_context", "generate_rustdoc", "rust_compile", - "rust_compile_multi", ) load( ":build_params.bzl", + "BuildParams", # @unused Used as a type "Emit", "LinkageLang", "RuleType", @@ -61,42 +69,47 @@ load( load(":context.bzl", "CompileContext") load( ":link_info.bzl", - "DEFAULT_STATIC_LINK_STYLE", + "DEFAULT_STATIC_LINK_STRATEGY", "attr_simple_crate_for_filenames", "enable_link_groups", - "inherited_non_rust_link_group_info", - "inherited_non_rust_shared_libs", + "inherited_external_debug_info", + "inherited_rust_cxx_link_group_info", + "inherited_shared_libs", ) +load(":named_deps.bzl", "write_named_deps_names") +load(":outputs.bzl", "RustcExtraOutputsInfo", "output_as_diag_subtargets") load(":resources.bzl", "rust_attr_resources") -_CompileOutputs = record( - link = field(Artifact), - args = field(ArgLike), - extra_targets = field(list[(str, Artifact)]), - runtime_files = field(list[ArgLike]), - sub_targets = field(dict[str, list[DefaultInfo]]), -) +def _strategy_params( + ctx: AnalysisContext, + compile_ctx: CompileContext) -> dict[LinkStrategy, BuildParams]: + target_os_type = ctx.attrs._target_os_type[OsLookup] + linker_type = compile_ctx.cxx_toolchain_info.linker_info.type + + params = {} + for link_strategy in LinkStrategy: + params[link_strategy] = build_params( + rule = RuleType("binary"), + proc_macro = False, + link_strategy = link_strategy, + lib_output_style = None, + lang = LinkageLang("rust"), + linker_type = linker_type, + target_os_type = target_os_type, + ) + return params def _rust_binary_common( ctx: AnalysisContext, compile_ctx: CompileContext, default_roots: list[str], extra_flags: list[str], - allow_cache_upload: bool) -> (list[[DefaultInfo, RunInfo]], cmd_args): + allow_cache_upload: bool) -> (list[Provider], cmd_args): toolchain_info = compile_ctx.toolchain_info simple_crate = attr_simple_crate_for_filenames(ctx) - styles = {} - dwp_target = None - pdb = None - style_param = {} # style -> param - sub_targets = {} - - specified_link_style = LinkStyle(ctx.attrs.link_style) if ctx.attrs.link_style else DEFAULT_STATIC_LINK_STYLE - - target_os_type = ctx.attrs._target_os_type[OsLookup] - linker_type = compile_ctx.cxx_toolchain_info.linker_info.type + link_strategy = LinkStrategy(ctx.attrs.link_style) if ctx.attrs.link_style else DEFAULT_STATIC_LINK_STRATEGY resources = flatten_dict(gather_resources( label = ctx.label, @@ -104,232 +117,289 @@ def _rust_binary_common( deps = cxx_attr_deps(ctx), ).values()) - for link_style in LinkStyle: - # Unlike for libraries, there's no possibility of different link styles - # resulting in the same build params, so no need to deduplicate. - params = build_params( - rule = RuleType("binary"), - proc_macro = False, - link_style = link_style, - preferred_linkage = Linkage("any"), - lang = LinkageLang("rust"), - linker_type = linker_type, - target_os_type = target_os_type, - ) - style_param[link_style] = params - name = link_style.value + "/" + output_filename(simple_crate, Emit("link"), params) - output = ctx.actions.declare_output(name) - - # Gather and setup symlink tree of transitive shared library deps. - shared_libs = {} - - rust_cxx_link_group_info = None - link_group_mappings = {} - link_group_libs = {} - link_group_preferred_linkage = {} - labels_to_links_map = {} - filtered_targets = [] - - if enable_link_groups(ctx, link_style, specified_link_style, is_binary = True): - rust_cxx_link_group_info = inherited_non_rust_link_group_info( - ctx, - link_style = link_style, - ) - link_group_mappings = rust_cxx_link_group_info.link_group_info.mappings - link_group_libs = rust_cxx_link_group_info.link_group_libs - link_group_preferred_linkage = rust_cxx_link_group_info.link_group_preferred_linkage - labels_to_links_map = rust_cxx_link_group_info.labels_to_links_map - filtered_targets = rust_cxx_link_group_info.filtered_targets - - # As per v1, we only setup a shared library symlink tree for the shared - # link style. - # XXX need link tree for dylib crates - if link_style == LinkStyle("shared") or rust_cxx_link_group_info != None: - shlib_info = merge_shared_libraries( - ctx.actions, - deps = inherited_non_rust_shared_libs(ctx), - ) - - link_group_ctx = LinkGroupContext( - link_group_mappings = link_group_mappings, - link_group_libs = link_group_libs, - link_group_preferred_linkage = link_group_preferred_linkage, - labels_to_links_map = labels_to_links_map, - ) - for soname, shared_lib in traverse_shared_library_info(shlib_info).items(): - label = shared_lib.label - if rust_cxx_link_group_info == None or is_link_group_shlib(label, link_group_ctx): - shared_libs[soname] = shared_lib.lib - - if rust_cxx_link_group_info: - # When there are no matches for a pattern based link group, - # `link_group_mappings` will not have an entry associated with the lib. - for _name, link_group_lib in link_group_libs.items(): - shared_libs.update(link_group_lib.shared_libs) - - # link groups shared libraries link args are directly added to the link command, - # we don't have to add them here - extra_link_args, runtime_files, shared_libs_symlink_tree = executable_shared_lib_arguments( - ctx.actions, - compile_ctx.cxx_toolchain_info, - output, - shared_libs, - ) + extra_flags = toolchain_info.rustc_binary_flags + (extra_flags or []) - extra_flags = toolchain_info.rustc_binary_flags + (extra_flags or []) + strategy_param = _strategy_params(ctx, compile_ctx) - # Compile rust binary. - link, meta = rust_compile_multi( - ctx = ctx, - compile_ctx = compile_ctx, - emits = [Emit("link"), Emit("metadata")], - params = params, - dep_link_style = link_style, - default_roots = default_roots, - extra_link_args = extra_link_args, - predeclared_outputs = {Emit("link"): output}, - extra_flags = extra_flags, - is_binary = True, - allow_cache_upload = allow_cache_upload, - rust_cxx_link_group_info = rust_cxx_link_group_info, + params = strategy_param[link_strategy] + name = output_filename(simple_crate, Emit("link"), params) + output = ctx.actions.declare_output(name) + + rust_cxx_link_group_info = None + link_group_mappings = {} + link_group_libs = {} + link_group_preferred_linkage = {} + labels_to_links_map = {} + targets_consumed_by_link_groups = {} + filtered_targets = [] + + if enable_link_groups(ctx): + rust_cxx_link_group_info = inherited_rust_cxx_link_group_info( + ctx, + compile_ctx.dep_ctx, + link_strategy = link_strategy, ) + link_group_mappings = rust_cxx_link_group_info.link_group_info.mappings + link_group_libs = rust_cxx_link_group_info.link_group_libs + link_group_preferred_linkage = rust_cxx_link_group_info.link_group_preferred_linkage + labels_to_links_map = rust_cxx_link_group_info.labels_to_links_map + targets_consumed_by_link_groups = rust_cxx_link_group_info.targets_consumed_by_link_groups + filtered_targets = rust_cxx_link_group_info.filtered_targets + + shlib_deps = [] + if link_strategy == LinkStrategy("shared") or rust_cxx_link_group_info != None: + shlib_deps = inherited_shared_libs(ctx, compile_ctx.dep_ctx) + + shlib_info = merge_shared_libraries(ctx.actions, deps = shlib_deps) + + link_group_ctx = LinkGroupContext( + link_group_mappings = link_group_mappings, + link_group_libs = link_group_libs, + link_group_preferred_linkage = link_group_preferred_linkage, + labels_to_links_map = labels_to_links_map, + targets_consumed_by_link_groups = targets_consumed_by_link_groups, + ) - args = cmd_args(link.output).hidden(runtime_files) - extra_targets = [("check", meta.output)] + meta.diag.items() - - # If we have some resources, write it to the resources JSON file and add - # it and all resources to "runtime_files" so that we make to materialize - # them with the final binary. - if resources: - resources_hidden = [create_resource_db( - ctx = ctx, - name = name + ".resources.json", - binary = output, - resources = resources, - )] - for resource, other in resources.values(): - resources_hidden.append(resource) - resources_hidden.extend(other) - args.hidden(resources_hidden) - runtime_files.extend(resources_hidden) + # Gather and setup symlink tree of transitive shared library deps. + shared_libs = build_shared_libs_for_symlink_tree( + use_link_groups = rust_cxx_link_group_info != None, + link_group_ctx = link_group_ctx, + link_strategy = link_strategy, + shared_libraries = traverse_shared_library_info(shlib_info), + extra_shared_libraries = [], + ) - sub_targets_for_link_style = {} + # link groups shared libraries link args are directly added to the link command, + # we don't have to add them here + executable_args = executable_shared_lib_arguments( + ctx, + compile_ctx.cxx_toolchain_info, + output, + shared_libs, + ) - sub_targets_for_link_style["shared-libraries"] = [DefaultInfo( - default_output = ctx.actions.write_json( - name + ".shared-libraries.json", - { - "libraries": ["{}:{}[shared-libraries][{}]".format(ctx.label.path, ctx.label.name, name) for name in shared_libs.keys()], - "librariesdwp": ["{}:{}[shared-libraries][{}][dwp]".format(ctx.label.path, ctx.label.name, name) for name, lib in shared_libs.items() if lib.dwp], - "rpathtree": ["{}:{}[rpath-tree]".format(ctx.label.path, ctx.label.name)] if shared_libs_symlink_tree else [], - }, - ), - sub_targets = { - name: [DefaultInfo( - default_output = lib.output, - sub_targets = {"dwp": [DefaultInfo(default_output = lib.dwp)]} if lib.dwp else {}, - )] - for name, lib in shared_libs.items() - }, + # Compile rust binary. + link = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("link"), + params = params, + default_roots = default_roots, + extra_link_args = compile_ctx.cxx_toolchain_info.linker_info.binary_linker_flags + executable_args.extra_link_args, + predeclared_output = output, + extra_flags = extra_flags, + allow_cache_upload = allow_cache_upload, + rust_cxx_link_group_info = rust_cxx_link_group_info, + incremental_enabled = ctx.attrs.incremental_enabled, + ) + + args = cmd_args(link.output, hidden = executable_args.runtime_files) + external_debug_info = project_artifacts( + actions = ctx.actions, + tsets = [inherited_external_debug_info( + ctx, + compile_ctx.dep_ctx, + link.dwo_output_directory, + link_strategy, + )], + ) + + # If we have some resources, write it to the resources JSON file and add + # it and all resources to "runtime_files" so that we make to materialize + # them with the final binary. + runtime_files = list(executable_args.runtime_files) + if resources: + resources_hidden = [create_resource_db( + ctx = ctx, + name = name + ".resources.json", + binary = output, + resources = resources, )] + for resource in resources.values(): + resources_hidden.append(resource.default_output) + resources_hidden.extend(resource.other_outputs) + args.add(cmd_args(hidden = resources_hidden)) + runtime_files.extend(resources_hidden) + + # A simple dict of sub-target key to artifact, which we'll convert to + # DefaultInfo providers at the end + extra_compiled_targets = { + "sources": compile_ctx.symlinked_srcs, + } + sub_targets = {} - if isinstance(shared_libs_symlink_tree, Artifact): - sub_targets_for_link_style["rpath-tree"] = [DefaultInfo( - default_output = shared_libs_symlink_tree, - other_outputs = [ - lib.output - for lib in shared_libs.values() - ] + [ - lib.dwp - for lib in shared_libs.values() - if lib.dwp + # TODO(agallagher) There appears to be pre-existing soname conflicts + # when building this (when using link groups), which prevents using + # `with_unique_str_sonames`. + str_soname_shlibs = { + shlib.soname.ensure_str(): shlib + for shlib in shared_libs + if shlib.soname.is_str + } + sub_targets["shared-libraries"] = [DefaultInfo( + default_output = ctx.actions.write_json( + name + ".shared-libraries.json", + { + "libraries": [ + "{}:{}[shared-libraries][{}]".format(ctx.label.path, ctx.label.name, soname) + for soname in str_soname_shlibs ], + "librariesdwp": [ + "{}:{}[shared-libraries][{}][dwp]".format(ctx.label.path, ctx.label.name, soname) + for soname, shlib in str_soname_shlibs.items() + if shlib.lib.dwp + ], + "rpathtree": ["{}:{}[rpath-tree]".format(ctx.label.path, ctx.label.name)] if executable_args.shared_libs_symlink_tree else [], + }, + ), + sub_targets = { + soname: [DefaultInfo( + default_output = shlib.lib.output, + sub_targets = {"dwp": [DefaultInfo(default_output = shlib.lib.dwp)]} if shlib.lib.dwp else {}, )] + for soname, shlib in str_soname_shlibs.items() + }, + )] + + if isinstance(executable_args.shared_libs_symlink_tree, Artifact): + sub_targets["rpath-tree"] = [DefaultInfo( + default_output = executable_args.shared_libs_symlink_tree, + other_outputs = [ + shlib.lib.output + for shlib in shared_libs + ] + [ + shlib.lib.dwp + for shlib in shared_libs + if shlib.lib.dwp + ], + )] - if rust_cxx_link_group_info: - sub_targets_for_link_style[LINK_GROUP_MAP_DATABASE_SUB_TARGET] = [get_link_group_map_json(ctx, filtered_targets)] - readable_mappings = {} - for node, group in link_group_mappings.items(): - readable_mappings[group] = readable_mappings.get(group, []) + ["{}//{}:{}".format(node.cell, node.package, node.name)] - sub_targets_for_link_style[LINK_GROUP_MAPPINGS_SUB_TARGET] = [DefaultInfo( - default_output = ctx.actions.write_json( - name + LINK_GROUP_MAPPINGS_FILENAME_SUFFIX, - readable_mappings, - ), - )] + if rust_cxx_link_group_info: + sub_targets[LINK_GROUP_MAP_DATABASE_SUB_TARGET] = [get_link_group_map_json(ctx, filtered_targets)] + readable_mappings = {} + for node, group in link_group_mappings.items(): + readable_mappings[group] = readable_mappings.get(group, []) + ["{}//{}:{}".format(node.cell, node.package, node.name)] + sub_targets[LINK_GROUP_MAPPINGS_SUB_TARGET] = [DefaultInfo( + default_output = ctx.actions.write_json( + name + LINK_GROUP_MAPPINGS_FILENAME_SUFFIX, + readable_mappings, + ), + )] - styles[link_style] = _CompileOutputs( - link = link.output, - args = args, - extra_targets = extra_targets, - runtime_files = runtime_files, - sub_targets = sub_targets_for_link_style, + # `infallible_diagnostics` allows us to circumvent compilation failures and + # treat the resulting rustc action as a success, even if a metadata + # artifact was not generated. This allows us to generate diagnostics + # even when the target has bugs. + diag_artifacts = {} + clippy_artifacts = {} + for incr in (True, False): + diag_artifacts[incr] = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("metadata-fast"), + params = strategy_param[DEFAULT_STATIC_LINK_STRATEGY], + default_roots = default_roots, + extra_flags = extra_flags, + infallible_diagnostics = True, + incremental_enabled = incr, + ) + clippy_artifacts[incr] = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("clippy"), + params = strategy_param[DEFAULT_STATIC_LINK_STRATEGY], + default_roots = default_roots, + extra_flags = extra_flags, + infallible_diagnostics = True, + incremental_enabled = incr, ) - if link_style == specified_link_style and link.dwp_output: - dwp_target = link.dwp_output - if link_style == specified_link_style and link.pdb: - pdb = link.pdb + providers = [RustcExtraOutputsInfo( + metadata = diag_artifacts[False], + metadata_incr = diag_artifacts[True], + clippy = clippy_artifacts[False], + clippy_incr = clippy_artifacts[True], + )] - expand = rust_compile( + incr_enabled = ctx.attrs.incremental_enabled + extra_compiled_targets.update(output_as_diag_subtargets(diag_artifacts[incr_enabled], clippy_artifacts[incr_enabled])) + + extra_compiled_targets["expand"] = rust_compile( ctx = ctx, compile_ctx = compile_ctx, emit = Emit("expand"), - params = style_param[DEFAULT_STATIC_LINK_STYLE], - dep_link_style = DEFAULT_STATIC_LINK_STYLE, + params = strategy_param[DEFAULT_STATIC_LINK_STRATEGY], + default_roots = default_roots, + extra_flags = extra_flags, + incremental_enabled = ctx.attrs.incremental_enabled, + ).output + + extra_compiled_targets["llvm_ir"] = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("llvm-ir"), + params = params, default_roots = default_roots, extra_flags = extra_flags, + incremental_enabled = ctx.attrs.incremental_enabled, + ).output + + doc_output = generate_rustdoc( + ctx = ctx, + compile_ctx = compile_ctx, + params = strategy_param[DEFAULT_STATIC_LINK_STRATEGY], + default_roots = default_roots, + document_private_items = True, ) + extra_compiled_targets["doc"] = doc_output - compiled_outputs = styles[specified_link_style] - extra_compiled_targets = (compiled_outputs.extra_targets + [ - ("doc", generate_rustdoc( - ctx = ctx, - compile_ctx = compile_ctx, - params = style_param[DEFAULT_STATIC_LINK_STYLE], - default_roots = default_roots, - document_private_items = True, - )), - ("expand", expand.output), - ("sources", compile_ctx.symlinked_srcs), - ]) - sub_targets.update({k: [DefaultInfo(default_output = v)] for k, v in extra_compiled_targets}) - sub_targets.update(compiled_outputs.sub_targets) - for (k, sub_compiled_outputs) in styles.items(): - sub_targets[k.value] = [ - DefaultInfo( - default_output = sub_compiled_outputs.link, - other_outputs = sub_compiled_outputs.runtime_files, - # Check/save-analysis for each link style? - sub_targets = sub_compiled_outputs.sub_targets, - ), - RunInfo(args = sub_compiled_outputs.args), - ] + named_deps_names = write_named_deps_names(ctx, compile_ctx) + if named_deps_names: + extra_compiled_targets["named_deps"] = named_deps_names - if dwp_target: + if link.dwp_output: sub_targets["dwp"] = [ DefaultInfo( - default_output = dwp_target, + default_output = link.dwp_output, + other_outputs = [ + shlib.lib.dwp + for shlib in shared_libs + if shlib.lib.dwp + ], ), ] - if pdb: - sub_targets[PDB_SUB_TARGET] = get_pdb_providers(pdb = pdb, binary = compiled_outputs.link) + if link.pdb: + sub_targets[PDB_SUB_TARGET] = get_pdb_providers(pdb = link.pdb, binary = link.output) dupmbin_toolchain = compile_ctx.cxx_toolchain_info.dumpbin_toolchain_path if dupmbin_toolchain: - sub_targets[DUMPBIN_SUB_TARGET] = get_dumpbin_providers(ctx, compiled_outputs.link, dupmbin_toolchain) + sub_targets[DUMPBIN_SUB_TARGET] = get_dumpbin_providers(ctx, link.output, dupmbin_toolchain) - providers = [ + sub_targets.update({ + k: [DefaultInfo(default_output = v)] + for (k, v) in extra_compiled_targets.items() + }) + + providers += [ DefaultInfo( - default_output = compiled_outputs.link, - other_outputs = compiled_outputs.runtime_files, + default_output = link.output, + other_outputs = runtime_files + executable_args.external_debug_info + external_debug_info, sub_targets = sub_targets, ), + DistInfo( + shared_libs = shlib_info.set, + nondebug_runtime_files = runtime_files, + ), ] - return (providers, compiled_outputs.args) + providers.append(rust_analyzer_provider( + ctx = ctx, + compile_ctx = compile_ctx, + default_roots = default_roots, + )) + return (providers, args) -def rust_binary_impl(ctx: AnalysisContext) -> list[[DefaultInfo, RunInfo]]: +def rust_binary_impl(ctx: AnalysisContext) -> list[Provider]: compile_ctx = compile_context(ctx) providers, args = _rust_binary_common( @@ -337,12 +407,12 @@ def rust_binary_impl(ctx: AnalysisContext) -> list[[DefaultInfo, RunInfo]]: compile_ctx = compile_ctx, default_roots = ["main.rs"], extra_flags = [], - allow_cache_upload = ctx.attrs.allow_cache_upload, + allow_cache_upload = cxx_attrs_get_allow_cache_upload(ctx.attrs), ) return providers + [RunInfo(args = args)] -def rust_test_impl(ctx: AnalysisContext) -> list[[DefaultInfo, RunInfo, ExternalRunnerTestInfo]]: +def rust_test_impl(ctx: AnalysisContext) -> list[Provider]: compile_ctx = compile_context(ctx) toolchain_info = compile_ctx.toolchain_info @@ -353,23 +423,27 @@ def rust_test_impl(ctx: AnalysisContext) -> list[[DefaultInfo, RunInfo, External providers, args = _rust_binary_common( ctx = ctx, compile_ctx = compile_ctx, - default_roots = ["main.rs", "lib.rs"], + # Unless default_roots are provided, it is ambiguous whether this test rule is invoked + # to test a binary, or to test a library. As such, we must consider both main.rs and + # lib.rs as potential candidates. + default_roots = ctx.attrs.default_roots or ["main.rs", "lib.rs"], extra_flags = extra_flags, allow_cache_upload = False, ) - # Setup a RE executor based on the `remote_execution` param. - re_executor = get_re_executor_from_props(ctx) + # Setup RE executors based on the `remote_execution` param. + re_executor, executor_overrides = get_re_executors_from_props(ctx) return inject_test_run_info( ctx, ExternalRunnerTestInfo( type = "rust", command = [args], - env = ctx.attrs.env, + env = ctx.attrs.env | ctx.attrs.run_env, labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, default_executor = re_executor, + executor_overrides = executor_overrides, run_from_project_root = True, use_project_relative_paths = True, ), diff --git a/prelude/rust/rust_library.bzl b/prelude/rust/rust_library.bzl index 3996e135999e0..c9a714e2615b7 100644 --- a/prelude/rust/rust_library.bzl +++ b/prelude/rust/rust_library.bzl @@ -10,21 +10,14 @@ load( "ArtifactTSet", "make_artifact_tset", ) -load("@prelude//:paths.bzl", "paths") load("@prelude//:resources.bzl", "ResourceInfo", "gather_resources") load( "@prelude//android:android_providers.bzl", "merge_android_packageable_info", ) -load( - "@prelude//cxx:cxx_context.bzl", - "get_cxx_toolchain_info", -) -load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") load( "@prelude//cxx:linker.bzl", "PDB_SUB_TARGET", - "get_default_shared_library_name", "get_pdb_providers", ) load( @@ -43,174 +36,86 @@ load( "LinkInfo", "LinkInfos", "LinkStrategy", - "LinkStyle", - "Linkage", "LinkedObject", - "MergedLinkInfo", + "MergedLinkInfo", # @unused Used as a type "SharedLibLinkable", "create_merged_link_info", - "create_merged_link_info_for_propagation", "get_lib_output_style", "legacy_output_style_to_link_style", + "set_link_info_link_whole", ) load( "@prelude//linking:linkable_graph.bzl", "DlopenableLibraryInfo", + "LinkableGraph", # @unused Used as a type "create_linkable_graph", "create_linkable_graph_node", "create_linkable_node", ) load( "@prelude//linking:shared_libraries.bzl", + "SharedLibraryInfo", # @unused Used as a type "create_shared_libraries", "merge_shared_libraries", ) -load("@prelude//linking:strip.bzl", "strip_debug_info") +load("@prelude//linking:types.bzl", "Linkage") load("@prelude//os_lookup:defs.bzl", "OsLookup") +load("@prelude//rust/rust-analyzer:provider.bzl", "rust_analyzer_provider") +load("@prelude//unix:providers.bzl", "UnixEnv", "create_unix_env_info") load( ":build.bzl", - "RustcOutput", # @unused Used as a type "compile_context", "generate_rustdoc", + "generate_rustdoc_coverage", "generate_rustdoc_test", "rust_compile", - "rust_compile_multi", ) load( ":build_params.bzl", "BuildParams", # @unused Used as a type "Emit", "LinkageLang", + "MetadataKind", "RuleType", "build_params", - "crate_type_transitive_deps", ) load( ":context.bzl", "CompileContext", # @unused Used as a type + "CrateName", # @unused Used as a type + "DepCollectionContext", ) load( ":link_info.bzl", - "CrateName", # @unused Used as a type - "DEFAULT_STATIC_LINK_STYLE", + "DEFAULT_STATIC_LIB_OUTPUT_STYLE", + "DEFAULT_STATIC_LINK_STRATEGY", "RustLinkInfo", - "RustLinkStyleInfo", + "RustLinkStrategyInfo", "RustProcMacroMarker", # @unused Used as a type "attr_crate", - "inherited_external_debug_info", - "inherited_non_rust_exported_link_deps", - "inherited_non_rust_link_info", - "inherited_non_rust_shared_libs", + "attr_soname", + "inherited_exported_link_deps", + "inherited_link_group_lib_infos", + "inherited_linkable_graphs", + "inherited_merged_link_infos", + "inherited_shared_libs", "resolve_deps", "resolve_rust_deps", - "style_info", + "strategy_info", +) +load(":named_deps.bzl", "write_named_deps_names") +load( + ":outputs.bzl", + "RustcExtraOutputsInfo", + "RustcOutput", # @unused Used as a type + "output_as_diag_subtargets", ) load(":proc_macro_alias.bzl", "rust_proc_macro_alias") load(":resources.bzl", "rust_attr_resources") +load(":rust_toolchain.bzl", "RustToolchainInfo") load(":targets.bzl", "targets") -def prebuilt_rust_library_impl(ctx: AnalysisContext) -> list[Provider]: - providers = [] - - # Default output. - providers.append( - DefaultInfo( - default_output = ctx.attrs.rlib, - ), - ) - - # Rust link provider. - crate = attr_crate(ctx) - styles = {} - for style in LinkStyle: - dep_link_style = style - tdeps, tmetadeps, external_debug_info, tprocmacrodeps = _compute_transitive_deps(ctx, dep_link_style) - external_debug_info = make_artifact_tset( - actions = ctx.actions, - children = external_debug_info, - ) - styles[style] = RustLinkStyleInfo( - rlib = ctx.attrs.rlib, - transitive_deps = tdeps, - rmeta = ctx.attrs.rlib, - transitive_rmeta_deps = tmetadeps, - transitive_proc_macro_deps = tprocmacrodeps, - pdb = None, - external_debug_info = external_debug_info, - ) - providers.append( - RustLinkInfo( - crate = crate, - styles = styles, - non_rust_exported_link_deps = inherited_non_rust_exported_link_deps(ctx), - non_rust_link_info = create_merged_link_info_for_propagation(ctx, inherited_non_rust_link_info(ctx)), - non_rust_shared_libs = merge_shared_libraries( - ctx.actions, - deps = inherited_non_rust_shared_libs(ctx), - ), - ), - ) - - # Native link provier. - link = LinkInfos( - default = LinkInfo( - linkables = [ - ArchiveLinkable( - archive = Archive(artifact = ctx.attrs.rlib), - linker_type = "unknown", - ), - ], - ), - stripped = LinkInfo( - linkables = [ - ArchiveLinkable( - archive = Archive( - artifact = strip_debug_info( - ctx = ctx, - out = ctx.attrs.rlib.short_path, - obj = ctx.attrs.rlib, - ), - ), - linker_type = "unknown", - ), - ], - ), - ) - providers.append( - create_merged_link_info( - ctx, - PicBehavior("supported"), - {output_style: link for output_style in LibOutputStyle}, - exported_deps = [d[MergedLinkInfo] for d in ctx.attrs.deps], - # TODO(agallagher): This matches v1 behavior, but some of these libs - # have prebuilt DSOs which might be usable. - preferred_linkage = Linkage("static"), - ), - ) - - # Native link graph setup. - linker_info = get_cxx_toolchain_info(ctx).linker_info - linkable_graph = create_linkable_graph( - ctx, - node = create_linkable_graph_node( - ctx, - linkable_node = create_linkable_node( - ctx = ctx, - preferred_linkage = Linkage("static"), - exported_deps = ctx.attrs.deps, - link_infos = {output_style: link for output_style in LibOutputStyle}, - default_soname = get_default_shared_library_name(linker_info, ctx.label), - ), - ), - deps = ctx.attrs.deps, - ) - providers.append(linkable_graph) - - providers.append(merge_link_group_lib_info(deps = ctx.attrs.deps)) - - providers.append(merge_android_packageable_info(ctx.label, ctx.actions, ctx.attrs.deps)) - - return providers +_DEFAULT_ROOTS = ["lib.rs"] def rust_library_impl(ctx: AnalysisContext) -> list[Provider]: compile_ctx = compile_context(ctx) @@ -221,114 +126,236 @@ def rust_library_impl(ctx: AnalysisContext) -> list[Provider]: # distinct kinds of build we actually need to deal with. param_lang, lang_style_param = _build_params_for_styles(ctx, compile_ctx) - artifacts = _build_library_artifacts(ctx, compile_ctx, param_lang.keys()) + # Grab the artifacts to use for the check subtargets. Picking a good + # `LibOutputStyle` ensures that the subtarget shares work with the main + # build if possible + meta_params = lang_style_param[(LinkageLang("rust"), DEFAULT_STATIC_LIB_OUTPUT_STYLE)] + + meta_fast = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("metadata-fast"), + params = meta_params, + default_roots = _DEFAULT_ROOTS, + incremental_enabled = ctx.attrs.incremental_enabled, + ) + # Generate the actions to build various output artifacts. Given the set of + # parameters we need, populate maps to the linkable and metadata + # artifacts by linkage lang. rust_param_artifact = {} + rust_param_subtargets = {} native_param_artifact = {} - check_artifacts = None - - for params, (link, meta) in artifacts.items(): - if LinkageLang("rust") in param_lang[params]: - # Grab the check output for all kinds of builds to use - # in the check subtarget. The link style doesn't matter - # so pick the first. - if check_artifacts == None: - check_artifacts = {"check": meta.output} - check_artifacts.update(meta.diag) - - rust_param_artifact[params] = _handle_rust_artifact(ctx, params, link, meta) - if LinkageLang("c++") in param_lang[params]: + for params, langs in param_lang.items(): + link = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("link"), + params = params, + default_roots = _DEFAULT_ROOTS, + incremental_enabled = ctx.attrs.incremental_enabled, + ) + + if LinkageLang("rust") in langs: + rust_param_artifact[params] = { + MetadataKind("link"): link, + MetadataKind("full"): rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("metadata-full"), + params = params, + default_roots = _DEFAULT_ROOTS, + incremental_enabled = ctx.attrs.incremental_enabled, + ), + MetadataKind("fast"): meta_fast, + } + + rust_param_subtargets[params] = { + "llvm-ir": rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("llvm-ir"), + params = params, + default_roots = _DEFAULT_ROOTS, + incremental_enabled = ctx.attrs.incremental_enabled, + ), + } + + if LinkageLang("native") in langs or LinkageLang("native-unbundled") in langs: native_param_artifact[params] = link - # Among {rustdoc, doctests, macro expand}, doctests are the only one which - # cares about linkage. So if there is a required link style set for the - # doctests, reuse those same dependency artifacts for the other build - # outputs where static vs static_pic does not make a difference. + rust_artifacts = _rust_artifacts( + ctx = ctx, + compile_ctx = compile_ctx, + lang_style_param = lang_style_param, + rust_param_artifact = rust_param_artifact, + ) + + link_infos = _link_infos( + ctx = ctx, + compile_ctx = compile_ctx, + lang_style_param = lang_style_param, + param_artifact = native_param_artifact, + ) + + # For doctests, we need to know two things to know how to link them. The + # first is that we need a link strategy, which affects how deps of this + # target are handled + if ctx.attrs.doc_link_style: + doc_link_strategy = LinkStrategy(ctx.attrs.doc_link_style) + else: + # FIXME(JakobDegen): In this position, a binary would just fall back to + # the default link style. However, we have a little bit of additional + # information in the form of the preferred linkage that we can use to + # make a different decision. There's nothing technically wrong with + # that, but a comment explaining why we want to do it would be nice + doc_link_strategy = { + "any": LinkStrategy("shared"), + "shared": LinkStrategy("shared"), + "static": DEFAULT_STATIC_LINK_STRATEGY, + }[ctx.attrs.preferred_linkage] + + # The second thing we need is a lib output style of the regular, non-doctest + # version of this target that we want. Rustdoc does not handle this library + # being built in a "shared" way well, so this must be a static output style. if ctx.attrs.doc_link_style: - static_link_style = { - "shared": DEFAULT_STATIC_LINK_STYLE, - "static": LinkStyle("static"), - "static_pic": LinkStyle("static_pic"), + doc_output_style = { + "shared": DEFAULT_STATIC_LIB_OUTPUT_STYLE, + "static": LibOutputStyle("archive"), + "static_pic": LibOutputStyle("pic_archive"), }[ctx.attrs.doc_link_style] else: - static_link_style = DEFAULT_STATIC_LINK_STYLE + doc_output_style = DEFAULT_STATIC_LIB_OUTPUT_STYLE + static_library_params = lang_style_param[(LinkageLang("rust"), doc_output_style)] - static_library_params = lang_style_param[(LinkageLang("rust"), static_link_style)] - default_roots = ["lib.rs"] + # Among {rustdoc, doctests, macro expand}, doctests are the only one which + # cares about linkage. So whatever build params we picked for the doctests, + # reuse them for the other two as well rustdoc = generate_rustdoc( ctx = ctx, compile_ctx = compile_ctx, params = static_library_params, - default_roots = default_roots, + default_roots = _DEFAULT_ROOTS, document_private_items = False, ) - # If doctests=True or False is set on the individual target, respect that. - # Otherwise look at the global setting on the toolchain. - doctests_enabled = ctx.attrs.doctests if ctx.attrs.doctests != None else toolchain_info.doctests - - rustdoc_test = None - if doctests_enabled and toolchain_info.rustc_target_triple == targets.exec_triple(ctx): - if ctx.attrs.doc_link_style: - doc_link_style = LinkStyle(ctx.attrs.doc_link_style) - else: - doc_link_style = { - "any": LinkStyle("shared"), - "shared": LinkStyle("shared"), - "static": DEFAULT_STATIC_LINK_STYLE, - }[ctx.attrs.preferred_linkage] - rustdoc_test_params = build_params( - rule = RuleType("binary"), - proc_macro = ctx.attrs.proc_macro, - link_style = doc_link_style, - preferred_linkage = Linkage(ctx.attrs.preferred_linkage), - lang = LinkageLang("rust"), - linker_type = compile_ctx.cxx_toolchain_info.linker_info.type, - target_os_type = ctx.attrs._target_os_type[OsLookup], - ) - rustdoc_test = generate_rustdoc_test( - ctx = ctx, - compile_ctx = compile_ctx, - link_style = rustdoc_test_params.dep_link_style, - library = rust_param_artifact[static_library_params], - params = rustdoc_test_params, - default_roots = default_roots, - ) + rustdoc_coverage = generate_rustdoc_coverage( + ctx = ctx, + compile_ctx = compile_ctx, + params = static_library_params, + default_roots = _DEFAULT_ROOTS, + ) expand = rust_compile( ctx = ctx, compile_ctx = compile_ctx, emit = Emit("expand"), params = static_library_params, - dep_link_style = DEFAULT_STATIC_LINK_STYLE, - default_roots = default_roots, + default_roots = _DEFAULT_ROOTS, + # This is needed as rustc can generate expanded sources that do not + # fully compile, but will report an error even if it succeeds. + # TODO(pickett): Handle this at the rustc action level, we shouldn't + # need to pass a special arg here, expand should just work. + infallible_diagnostics = True, + incremental_enabled = ctx.attrs.incremental_enabled, ) - providers = [] + # If doctests=True or False is set on the individual target, respect that. + # Otherwise look at the global setting on the toolchain. + doctests_enabled = \ + (ctx.attrs.doctests if ctx.attrs.doctests != None else toolchain_info.doctests) and \ + toolchain_info.rustc_target_triple == targets.exec_triple(ctx) + + rustdoc_test_params = build_params( + rule = RuleType("binary"), + proc_macro = ctx.attrs.proc_macro, + link_strategy = doc_link_strategy, + lib_output_style = None, + lang = LinkageLang("rust"), + linker_type = compile_ctx.cxx_toolchain_info.linker_info.type, + target_os_type = ctx.attrs._target_os_type[OsLookup], + ) + rustdoc_test = generate_rustdoc_test( + ctx = ctx, + compile_ctx = compile_ctx, + rlib = rust_param_artifact[static_library_params][MetadataKind("link")].output, + link_infos = link_infos, + params = rustdoc_test_params, + default_roots = _DEFAULT_ROOTS, + ) + # infallible_diagnostics allows us to circumvent compilation failures and + # treat the resulting rustc action as a success, even if a metadata + # artifact was not generated. This allows us to generate diagnostics + # even when the target has bugs. + diag_artifacts = {} + clippy_artifacts = {} + for incr in (True, False): + diag_artifacts[incr] = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("metadata-fast"), + params = meta_params, + default_roots = _DEFAULT_ROOTS, + infallible_diagnostics = True, + incremental_enabled = incr, + ) + clippy_artifacts[incr] = rust_compile( + ctx = ctx, + compile_ctx = compile_ctx, + emit = Emit("clippy"), + params = meta_params, + default_roots = _DEFAULT_ROOTS, + infallible_diagnostics = True, + incremental_enabled = incr, + ) + + incr_enabled = ctx.attrs.incremental_enabled + providers = [] providers += _default_providers( - ctx = ctx, lang_style_param = lang_style_param, - param_artifact = rust_param_artifact, + rust_param_artifact = rust_param_artifact, + rust_param_subtargets = rust_param_subtargets, + native_param_artifact = native_param_artifact, rustdoc = rustdoc, rustdoc_test = rustdoc_test, - check_artifacts = check_artifacts, + doctests_enabled = doctests_enabled, + check_artifacts = output_as_diag_subtargets(diag_artifacts[incr_enabled], clippy_artifacts[incr_enabled]), expand = expand.output, sources = compile_ctx.symlinked_srcs, + rustdoc_coverage = rustdoc_coverage, + named_deps_names = write_named_deps_names(ctx, compile_ctx), ) - providers += _rust_providers( - ctx = ctx, - lang_style_param = lang_style_param, - param_artifact = rust_param_artifact, - ) - providers += _native_providers( - ctx = ctx, - compile_ctx = compile_ctx, - lang_style_param = lang_style_param, - param_artifact = native_param_artifact, + providers += _rust_metadata_providers( + diag_artifacts = diag_artifacts, + clippy_artifacts = clippy_artifacts, ) - deps = [dep.dep for dep in resolve_deps(ctx)] + if ctx.attrs.proc_macro: + providers += _proc_macro_link_providers( + ctx = ctx, + rust_artifacts = rust_artifacts, + ) + elif toolchain_info.advanced_unstable_linking: + providers += _advanced_unstable_link_providers( + ctx = ctx, + compile_ctx = compile_ctx, + lang_style_param = lang_style_param, + rust_artifacts = rust_artifacts, + native_param_artifact = native_param_artifact, + link_infos = link_infos, + ) + else: + providers += _stable_link_providers( + ctx = ctx, + compile_ctx = compile_ctx, + lang_style_param = lang_style_param, + rust_artifacts = rust_artifacts, + native_param_artifact = native_param_artifact, + link_infos = link_infos, + ) + + deps = [dep.dep for dep in resolve_deps(ctx, compile_ctx.dep_ctx)] providers.append(ResourceInfo(resources = gather_resources( label = ctx.label, resources = rust_attr_resources(ctx), @@ -337,13 +364,19 @@ def rust_library_impl(ctx: AnalysisContext) -> list[Provider]: providers.append(merge_android_packageable_info(ctx.label, ctx.actions, deps)) + providers.append(rust_analyzer_provider( + ctx = ctx, + compile_ctx = compile_ctx, + default_roots = _DEFAULT_ROOTS, + )) + return providers def _build_params_for_styles( ctx: AnalysisContext, compile_ctx: CompileContext) -> ( dict[BuildParams, list[LinkageLang]], - dict[(LinkageLang, LinkStyle), BuildParams], + dict[(LinkageLang, LibOutputStyle), BuildParams], ): """ For a given rule, return two things: @@ -365,16 +398,16 @@ def _build_params_for_styles( # Styles+lang linkage to params for linkage_lang in LinkageLang: - # Skip proc_macro + c++ combination - if ctx.attrs.proc_macro and linkage_lang == LinkageLang("c++"): + # Skip proc_macro + non-rust combinations + if ctx.attrs.proc_macro and linkage_lang != LinkageLang("rust"): continue - for link_style in LinkStyle: + for lib_output_style in LibOutputStyle: params = build_params( rule = RuleType("library"), proc_macro = ctx.attrs.proc_macro, - link_style = link_style, - preferred_linkage = Linkage(ctx.attrs.preferred_linkage), + link_strategy = None, + lib_output_style = lib_output_style, lang = linkage_lang, linker_type = linker_type, target_os_type = target_os_type, @@ -382,140 +415,195 @@ def _build_params_for_styles( if params not in param_lang: param_lang[params] = [] param_lang[params] = param_lang[params] + [linkage_lang] - style_param[(linkage_lang, link_style)] = params + style_param[(linkage_lang, lib_output_style)] = params return (param_lang, style_param) -def _build_library_artifacts( +def _link_infos( ctx: AnalysisContext, compile_ctx: CompileContext, - params: list[BuildParams]) -> dict[BuildParams, (RustcOutput, RustcOutput)]: - """ - Generate the actual actions to build various output artifacts. Given the set - parameters we need, return a mapping to the linkable and metadata artifacts. - """ - param_artifact = {} + lang_style_param: dict[(LinkageLang, LibOutputStyle), BuildParams], + param_artifact: dict[BuildParams, RustcOutput]) -> dict[LibOutputStyle, LinkInfos]: + if ctx.attrs.proc_macro: + # Don't need any of this for proc macros + return {} - for params in params: - dep_link_style = params.dep_link_style + advanced_unstable_linking = compile_ctx.toolchain_info.advanced_unstable_linking + lang = LinkageLang("native-unbundled") if advanced_unstable_linking else LinkageLang("native") + linker_type = compile_ctx.cxx_toolchain_info.linker_info.type - # Separate actions for each emit type - # - # In principle we don't really need metadata for C++-only artifacts, but I don't think it hurts - link, meta = rust_compile_multi( - ctx = ctx, - compile_ctx = compile_ctx, - emits = [Emit("link"), Emit("metadata")], - params = params, - dep_link_style = dep_link_style, - default_roots = ["lib.rs"], + link_infos = {} + for output_style in LibOutputStyle: + lib = param_artifact[lang_style_param[(lang, output_style)]] + external_debug_info = make_artifact_tset( + actions = ctx.actions, + label = ctx.label, + artifacts = filter(None, [lib.dwo_output_directory]), + children = lib.extra_external_debug_info, ) + if output_style == LibOutputStyle("shared_lib"): + link_infos[output_style] = LinkInfos( + default = LinkInfo( + linkables = [SharedLibLinkable(lib = lib.output)], + external_debug_info = external_debug_info, + pre_flags = ctx.attrs.exported_linker_flags, + post_flags = ctx.attrs.exported_post_linker_flags, + ), + stripped = LinkInfo( + linkables = [SharedLibLinkable(lib = lib.stripped_output)], + external_debug_info = external_debug_info, + pre_flags = ctx.attrs.exported_linker_flags, + post_flags = ctx.attrs.exported_post_linker_flags, + ), + ) + else: + link_infos[output_style] = LinkInfos( + default = LinkInfo( + linkables = [ArchiveLinkable( + archive = Archive(artifact = lib.output), + linker_type = linker_type, + )], + external_debug_info = external_debug_info, + pre_flags = ctx.attrs.exported_linker_flags, + post_flags = ctx.attrs.exported_post_linker_flags, + ), + stripped = LinkInfo( + linkables = [ArchiveLinkable( + archive = Archive(artifact = lib.stripped_output), + linker_type = linker_type, + )], + pre_flags = ctx.attrs.exported_linker_flags, + post_flags = ctx.attrs.exported_post_linker_flags, + ), + ) + return link_infos - param_artifact[params] = (link, meta) +def _rust_artifacts( + ctx: AnalysisContext, + compile_ctx: CompileContext, + lang_style_param: dict[(LinkageLang, LibOutputStyle), BuildParams], + rust_param_artifact: dict[BuildParams, dict[MetadataKind, RustcOutput]]) -> dict[LinkStrategy, RustLinkStrategyInfo]: + pic_behavior = compile_ctx.cxx_toolchain_info.pic_behavior + preferred_linkage = Linkage(ctx.attrs.preferred_linkage) - return param_artifact + rust_artifacts = {} + for link_strategy in LinkStrategy: + params = lang_style_param[(LinkageLang("rust"), get_lib_output_style(link_strategy, preferred_linkage, pic_behavior))] + rust_artifacts[link_strategy] = _handle_rust_artifact(ctx, compile_ctx.dep_ctx, link_strategy, rust_param_artifact[params]) + return rust_artifacts def _handle_rust_artifact( ctx: AnalysisContext, - params: BuildParams, - link: RustcOutput, - meta: RustcOutput) -> RustLinkStyleInfo: + dep_ctx: DepCollectionContext, + link_strategy: LinkStrategy, + outputs: dict[MetadataKind, RustcOutput]) -> RustLinkStrategyInfo: """ - Return the RustLinkInfo for a given set of artifacts. The main consideration + Return the RustLinkStrategyInfo for a given set of artifacts. The main consideration is computing the right set of dependencies. """ - dep_link_style = params.dep_link_style - # If we're a crate where our consumers should care about transitive deps, # then compute them (specifically, not proc-macro). - if crate_type_transitive_deps(params.crate_type): - tdeps, tmetadeps, external_debug_info, tprocmacrodeps = _compute_transitive_deps(ctx, dep_link_style) - else: - tdeps, tmetadeps, external_debug_info, tprocmacrodeps = {}, {}, [], {} - + link_output = outputs[MetadataKind("link")] if not ctx.attrs.proc_macro: + tdeps, external_debug_info, tprocmacrodeps = _compute_transitive_deps(ctx, dep_ctx, link_strategy) external_debug_info = make_artifact_tset( actions = ctx.actions, label = ctx.label, - artifacts = filter(None, [link.dwo_output_directory]), + artifacts = filter(None, [link_output.dwo_output_directory]), children = external_debug_info, ) - return RustLinkStyleInfo( - rlib = link.output, + return RustLinkStrategyInfo( + outputs = {m: x.output for m, x in outputs.items()}, transitive_deps = tdeps, - rmeta = meta.output, - transitive_rmeta_deps = tmetadeps, transitive_proc_macro_deps = tprocmacrodeps, - pdb = link.pdb, + pdb = link_output.pdb, external_debug_info = external_debug_info, ) else: # Proc macro deps are always the real thing - return RustLinkStyleInfo( - rlib = link.output, - transitive_deps = tdeps, - rmeta = link.output, - transitive_rmeta_deps = tdeps, - transitive_proc_macro_deps = tprocmacrodeps, - pdb = link.pdb, + return RustLinkStrategyInfo( + outputs = {m: link_output.output for m in MetadataKind}, + transitive_deps = {m: {} for m in MetadataKind}, + transitive_proc_macro_deps = {}, + pdb = link_output.pdb, external_debug_info = ArtifactTSet(), ) def _default_providers( - ctx: AnalysisContext, - lang_style_param: dict[(LinkageLang, LinkStyle), BuildParams], - param_artifact: dict[BuildParams, RustLinkStyleInfo], + lang_style_param: dict[(LinkageLang, LibOutputStyle), BuildParams], + rust_param_artifact: dict[BuildParams, dict[MetadataKind, RustcOutput]], + native_param_artifact: dict[BuildParams, RustcOutput], + rust_param_subtargets: dict[BuildParams, dict[str, RustcOutput]], rustdoc: Artifact, - rustdoc_test: [cmd_args, None], - check_artifacts: dict[str, Artifact], + rustdoc_test: cmd_args, + doctests_enabled: bool, + check_artifacts: dict[str, Artifact | None], expand: Artifact, - sources: Artifact) -> list[Provider]: + sources: Artifact, + rustdoc_coverage: Artifact, + named_deps_names: Artifact | None) -> list[Provider]: targets = {} targets.update(check_artifacts) targets["sources"] = sources targets["expand"] = expand targets["doc"] = rustdoc + targets["doc-coverage"] = rustdoc_coverage + if named_deps_names: + targets["named_deps"] = named_deps_names sub_targets = { k: [DefaultInfo(default_output = v)] for (k, v) in targets.items() } - # Add provider for default output, and for each link-style... - for link_style in LinkStyle: - link_style_info = param_artifact[lang_style_param[(LinkageLang("rust"), link_style)]] - nested_sub_targets = {} - if link_style_info.pdb: - nested_sub_targets[PDB_SUB_TARGET] = get_pdb_providers(pdb = link_style_info.pdb, binary = link_style_info.rlib) - sub_targets[link_style.value] = [DefaultInfo( - default_output = link_style_info.rlib, + # Add provider for default output, and for each lib output style... + # FIXME(JakobDegen): C++ rules only provide some of the output styles, + # determined by `get_output_styles_for_linkage` in `linking/link_info.bzl`. + # Do we want to do the same? + for output_style in LibOutputStyle: + param = lang_style_param[(LinkageLang("rust"), output_style)] + link = rust_param_artifact[param][MetadataKind("link")] + nested_sub_targets = {k: [DefaultInfo(default_output = v.output)] for k, v in rust_param_subtargets[param].items()} + if link.pdb: + nested_sub_targets[PDB_SUB_TARGET] = get_pdb_providers(pdb = link.pdb, binary = link.output) + + # FIXME(JakobDegen): Ideally we'd use the same + # `subtarget_for_output_style` as C++, but that uses `static-pic` + # instead of `static_pic`. Would be nice if that were consistent + name = legacy_output_style_to_link_style(output_style).value + sub_targets[name] = [DefaultInfo( + default_output = link.output, sub_targets = nested_sub_targets, )] + lang_style_for_staticlib = (LinkageLang("native"), LibOutputStyle("archive")) + if lang_style_for_staticlib in lang_style_param: + artifact = native_param_artifact[lang_style_param[lang_style_for_staticlib]] + sub_targets["staticlib"] = [DefaultInfo( + default_output = artifact.output, + )] + + lang_style_for_cdylib = (LinkageLang("native"), LibOutputStyle("shared_lib")) + if lang_style_for_cdylib in lang_style_param: + artifact = native_param_artifact[lang_style_param[lang_style_for_cdylib]] + sub_targets["cdylib"] = [DefaultInfo( + default_output = artifact.output, + )] + providers = [] - if rustdoc_test: - # Pass everything in env + doc_env, except ones with value None in doc_env. - doc_env = dict(ctx.attrs.env) - for k, v in ctx.attrs.doc_env.items(): - if v == None: - doc_env.pop(k, None) - else: - doc_env[k] = v - doc_env["RUSTC_BOOTSTRAP"] = "1" # for `-Zunstable-options` - - rustdoc_test_info = ExternalRunnerTestInfo( - type = "rustdoc", - command = [rustdoc_test], - run_from_project_root = True, - env = doc_env, - ) + rustdoc_test_info = ExternalRunnerTestInfo( + type = "rustdoc", + command = [rustdoc_test], + run_from_project_root = True, + ) - # Run doc test as part of `buck2 test :crate` - providers.append(rustdoc_test_info) + # Always let the user run doctests via `buck2 test :crate[doc]` + sub_targets["doc"].append(rustdoc_test_info) - # Run doc test as part of `buck2 test :crate[doc]` - sub_targets["doc"].append(rustdoc_test_info) + # But only run it as a part of `buck2 test :crate` if it's not disabled + if doctests_enabled: + providers.append(rustdoc_test_info) providers.append(DefaultInfo( default_output = check_artifacts["check"], @@ -524,147 +612,233 @@ def _default_providers( return providers -def _rust_providers( +def _rust_metadata_providers(diag_artifacts: dict[bool, RustcOutput], clippy_artifacts: dict[bool, RustcOutput]) -> list[Provider]: + return [ + RustcExtraOutputsInfo( + metadata = diag_artifacts[False], + metadata_incr = diag_artifacts[True], + clippy = clippy_artifacts[False], + clippy_incr = clippy_artifacts[True], + ), + ] + +def _proc_macro_link_providers( ctx: AnalysisContext, - lang_style_param: dict[(LinkageLang, LinkStyle), BuildParams], - param_artifact: dict[BuildParams, RustLinkStyleInfo]) -> list[Provider]: - """ - Return the set of providers for Rust linkage. - """ + rust_artifacts: dict[LinkStrategy, RustLinkStrategyInfo]) -> list[Provider]: + # These are never accessed in the case of proc macros, so just return some dummy + # values + return [RustLinkInfo( + crate = attr_crate(ctx), + strategies = rust_artifacts, + merged_link_infos = {}, + exported_link_deps = [], + shared_libs = merge_shared_libraries(ctx.actions), + linkable_graphs = [], + )] + +def _advanced_unstable_link_providers( + ctx: AnalysisContext, + compile_ctx: CompileContext, + lang_style_param: dict[(LinkageLang, LibOutputStyle), BuildParams], + rust_artifacts: dict[LinkStrategy, RustLinkStrategyInfo], + native_param_artifact: dict[BuildParams, RustcOutput], + link_infos: dict[LibOutputStyle, LinkInfos]) -> list[Provider]: crate = attr_crate(ctx) + pic_behavior = compile_ctx.cxx_toolchain_info.pic_behavior + preferred_linkage = Linkage(ctx.attrs.preferred_linkage) - style_info = { - link_style: param_artifact[lang_style_param[(LinkageLang("rust"), link_style)]] - for link_style in LinkStyle - } + providers = [] - # Inherited link input and shared libraries. As in v1, this only includes - # non-Rust rules, found by walking through -- and ignoring -- Rust libraries - # to find non-Rust native linkables and libraries. - if not ctx.attrs.proc_macro: - inherited_non_rust_link_deps = inherited_non_rust_exported_link_deps(ctx) - inherited_non_rust_link = inherited_non_rust_link_info(ctx) - inherited_non_rust_shlibs = inherited_non_rust_shared_libs(ctx) - else: - # proc-macros are just used by the compiler and shouldn't propagate - # their native deps to the link line of the target. - inherited_non_rust_link = [] - inherited_non_rust_shlibs = [] - inherited_non_rust_link_deps = [] + dep_ctx = compile_ctx.dep_ctx - providers = [] + inherited_link_infos = inherited_merged_link_infos(ctx, dep_ctx) + inherited_shlibs = inherited_shared_libs(ctx, dep_ctx) + inherited_graphs = inherited_linkable_graphs(ctx, dep_ctx) + inherited_exported_deps = inherited_exported_link_deps(ctx, dep_ctx) + + # Native link provider. + merged_link_info = create_merged_link_info( + ctx, + pic_behavior, + link_infos, + deps = inherited_link_infos.values(), + exported_deps = filter(None, [d.get(MergedLinkInfo) for d in inherited_exported_deps]), + preferred_linkage = preferred_linkage, + ) + providers.append(merged_link_info) + + solibs = {} + + # Add the shared library to the list of shared libs. + shlib_name = attr_soname(ctx) + + shared_lib_params = lang_style_param[(LinkageLang("native-unbundled"), LibOutputStyle("shared_lib"))] + shared_lib_output = native_param_artifact[shared_lib_params].output + + # Only add a shared library if we generated one. + # TODO(cjhopman): This is strange. Normally (like in c++) the link_infos passed to create_merged_link_info above would only have + # a value for LibOutputStyle("shared_lib") if that were created and we could just check for that key. Given that I intend + # to remove the SharedLibraries provider, maybe just wait for that to resolve this. + if get_lib_output_style(LinkStrategy("shared"), preferred_linkage, compile_ctx.cxx_toolchain_info.pic_behavior) == LibOutputStyle("shared_lib"): + solibs[shlib_name] = LinkedObject( + output = shared_lib_output, + unstripped_output = shared_lib_output, + external_debug_info = link_infos[LibOutputStyle("shared_lib")].default.external_debug_info, + ) + + # Native shared library provider. + shared_libs = create_shared_libraries(ctx, solibs) + shared_library_info = merge_shared_libraries( + ctx.actions, + shared_libs, + inherited_shlibs, + ) + providers.append(shared_library_info) + + linkable_graph = create_linkable_graph( + ctx, + node = create_linkable_graph_node( + ctx, + linkable_node = create_linkable_node( + ctx = ctx, + preferred_linkage = preferred_linkage, + deps = inherited_graphs, + exported_deps = inherited_exported_deps, + link_infos = link_infos, + shared_libs = shared_libs, + default_soname = shlib_name, + # Link groups have a heuristic in which they assume that a + # preferred_linkage = "static" library needs to be linked + # into every single link group, instead of just one. + # Applying that same heuristic to Rust seems right, but only + # if this target actually requested that. Opt ourselves out + # if it didn't. + ignore_force_static_follows_dependents = preferred_linkage != Linkage("static"), + include_in_android_mergemap = False, # TODO(pickett): Plumb D54748362 to the macro layer + ), + ), + deps = inherited_graphs + inherited_exported_deps, + ) + + providers.append(linkable_graph) + + # Omnibus root provider. + linkable_root = create_linkable_root( + label = ctx.label, + name = shlib_name, + link_infos = LinkInfos( + default = set_link_info_link_whole(link_infos[LibOutputStyle("pic_archive")].default), + ), + deps = inherited_graphs, + ) + providers.append(linkable_root) + + # Mark libraries that support `dlopen`. + if getattr(ctx.attrs, "supports_python_dlopen", False): + providers.append(DlopenableLibraryInfo()) + + # We never need to add anything to this provider because Rust libraries + # cannot act as link group libs, especially given that they only support + # auto link groups anyway + providers.append(merge_link_group_lib_info(children = inherited_link_group_lib_infos(ctx, compile_ctx.dep_ctx))) # Create rust library provider. providers.append(RustLinkInfo( crate = crate, - styles = style_info, - non_rust_link_info = create_merged_link_info_for_propagation(ctx, inherited_non_rust_link), - non_rust_exported_link_deps = inherited_non_rust_link_deps, - non_rust_shared_libs = merge_shared_libraries( - ctx.actions, - deps = inherited_non_rust_shlibs, - ), + strategies = rust_artifacts, + merged_link_infos = inherited_link_infos | {ctx.label.configured_target(): merged_link_info}, + exported_link_deps = inherited_exported_deps, + shared_libs = shared_library_info, + linkable_graphs = inherited_graphs + [linkable_graph], )) return providers -def _native_providers( +def _stable_link_providers( ctx: AnalysisContext, compile_ctx: CompileContext, - lang_style_param: dict[(LinkageLang, LinkStyle), BuildParams], - param_artifact: dict[BuildParams, RustcOutput]) -> list[Provider]: + lang_style_param: dict[(LinkageLang, LibOutputStyle), BuildParams], + native_param_artifact: dict[BuildParams, RustcOutput], + rust_artifacts: dict[LinkStrategy, RustLinkStrategyInfo], + link_infos: dict[LibOutputStyle, LinkInfos]) -> list[Provider]: + providers = [] + + crate = attr_crate(ctx) + + merged_link_infos, shared_libs, linkable_graphs, exported_link_deps = _rust_link_providers(ctx, compile_ctx.dep_ctx) + + # Create rust library provider. + rust_link_info = RustLinkInfo( + crate = crate, + strategies = rust_artifacts, + merged_link_infos = merged_link_infos, + exported_link_deps = exported_link_deps, + shared_libs = shared_libs, + linkable_graphs = linkable_graphs, + ) + + providers.append(rust_link_info) + providers += _native_link_providers(ctx, compile_ctx, lang_style_param, native_param_artifact, link_infos, rust_link_info) + return providers + +def _rust_link_providers( + ctx: AnalysisContext, + dep_ctx: DepCollectionContext) -> ( + dict[ConfiguredTargetLabel, MergedLinkInfo], + SharedLibraryInfo, + list[LinkableGraph], + list[Dependency], +): + inherited_link_infos = inherited_merged_link_infos(ctx, dep_ctx) + inherited_shlibs = inherited_shared_libs(ctx, dep_ctx) + inherited_graphs = inherited_linkable_graphs(ctx, dep_ctx) + inherited_exported_deps = inherited_exported_link_deps(ctx, dep_ctx) + + shared_libs = merge_shared_libraries( + ctx.actions, + deps = inherited_shlibs, + ) + return (inherited_link_infos, shared_libs, inherited_graphs, inherited_exported_deps) + +def _native_link_providers( + ctx: AnalysisContext, + compile_ctx: CompileContext, + lang_style_param: dict[(LinkageLang, LibOutputStyle), BuildParams], + param_artifact: dict[BuildParams, RustcOutput], + link_infos: dict[LibOutputStyle, LinkInfos], + rust_link_info: RustLinkInfo) -> list[Provider]: """ Return the set of providers needed to link Rust as a dependency for native (ie C/C++) code, along with relevant dependencies. - - TODO: This currently assumes `staticlib`/`cdylib` behaviour, where all - dependencies are bundled into the Rust crate itself. We need to break out of - this mode of operation. """ - inherited_non_rust_link_deps = inherited_non_rust_exported_link_deps(ctx) - inherited_non_rust_link = inherited_non_rust_link_info(ctx) - inherited_non_rust_shlibs = inherited_non_rust_shared_libs(ctx) - linker_info = compile_ctx.cxx_toolchain_info.linker_info - linker_type = linker_info.type - providers = [] + # We collected transitive deps in the Rust link providers + inherited_link_infos = rust_link_info.merged_link_infos + inherited_shlibs = [rust_link_info.shared_libs] + inherited_link_graphs = rust_link_info.linkable_graphs + inherited_exported_deps = rust_link_info.exported_link_deps - if ctx.attrs.proc_macro: - # Proc-macros never have a native form - return providers - - # TODO(cjhopman): This seems to be conflating the link strategy with the lib output style. I tried going through - # lang_style_param/BuildParams and make it actually be based on LibOutputStyle, but it goes on to use that for defining - # how to consume dependencies and it's used for rust_binary like its own link strategy and it's unclear what's the - # correct behavior. For now, this preserves existing behavior without clarifying what concepts its actually - # operating on. - libraries = {} - link_infos = {} - external_debug_infos = {} - for output_style in LibOutputStyle: - legacy_link_style = legacy_output_style_to_link_style(output_style) - params = lang_style_param[(LinkageLang("c++"), legacy_link_style)] - lib = param_artifact[params] - libraries[output_style] = lib - - external_debug_info = inherited_external_debug_info( - ctx = ctx, - dwo_output_directory = lib.dwo_output_directory, - dep_link_style = params.dep_link_style, - ) - external_debug_infos[output_style] = external_debug_info + providers = [] - # DO NOT COMMIT: verify this change - if output_style == LibOutputStyle("shared_lib"): - link_infos[output_style] = LinkInfos( - default = LinkInfo( - linkables = [SharedLibLinkable(lib = lib.output)], - external_debug_info = external_debug_info, - ), - stripped = LinkInfo( - linkables = [ArchiveLinkable( - archive = Archive( - artifact = strip_debug_info( - ctx, - paths.join(output_style.value, lib.output.short_path), - lib.output, - ), - ), - linker_type = linker_type, - )], - ), - ) - else: - link_infos[output_style] = LinkInfos( - default = LinkInfo( - linkables = [ArchiveLinkable( - archive = Archive(artifact = lib.output), - linker_type = linker_type, - )], - external_debug_info = external_debug_info, - ), - ) + shared_lib_params = lang_style_param[(LinkageLang("native"), LibOutputStyle("shared_lib"))] + shared_lib_output = param_artifact[shared_lib_params].output preferred_linkage = Linkage(ctx.attrs.preferred_linkage) - # TODO(cjhopman): This is preserving existing behavior, but it doesn't make sense. These lists can be passed - # unmerged to create_merged_link_info below. Potentially that could change link order, so needs to be done more carefully. - merged_inherited_non_rust_link = create_merged_link_info_for_propagation(ctx, inherited_non_rust_link) - # Native link provider. providers.append(create_merged_link_info( ctx, compile_ctx.cxx_toolchain_info.pic_behavior, link_infos, - exported_deps = [merged_inherited_non_rust_link], + deps = inherited_link_infos.values(), + exported_deps = filter(None, [d.get(MergedLinkInfo) for d in inherited_exported_deps]), preferred_linkage = preferred_linkage, )) solibs = {} # Add the shared library to the list of shared libs. - linker_info = compile_ctx.cxx_toolchain_info.linker_info - shlib_name = get_default_shared_library_name(linker_info, ctx.label) + shlib_name = attr_soname(ctx) # Only add a shared library if we generated one. # TODO(cjhopman): This is strange. Normally (like in c++) the link_infos passed to create_merged_link_info above would only have @@ -672,34 +846,27 @@ def _native_providers( # to remove the SharedLibraries provider, maybe just wait for that to resolve this. if get_lib_output_style(LinkStrategy("shared"), preferred_linkage, compile_ctx.cxx_toolchain_info.pic_behavior) == LibOutputStyle("shared_lib"): solibs[shlib_name] = LinkedObject( - output = libraries[LibOutputStyle("shared_lib")].output, - unstripped_output = libraries[LibOutputStyle("shared_lib")].output, - external_debug_info = external_debug_infos[LibOutputStyle("shared_lib")], + output = shared_lib_output, + unstripped_output = shared_lib_output, + external_debug_info = link_infos[LibOutputStyle("shared_lib")].default.external_debug_info, ) # Native shared library provider. + shared_libs = create_shared_libraries(ctx, solibs) providers.append(merge_shared_libraries( ctx.actions, - create_shared_libraries(ctx, solibs), - inherited_non_rust_shlibs, + shared_libs, + inherited_shlibs, )) # Omnibus root provider. linkable_root = create_linkable_root( + label = ctx.label, name = shlib_name, link_infos = LinkInfos( - default = LinkInfo( - linkables = [ArchiveLinkable( - archive = Archive( - artifact = libraries[LibOutputStyle("shared_lib")].output, - ), - linker_type = linker_type, - link_whole = True, - )], - external_debug_info = external_debug_infos[LibOutputStyle("pic_archive")], - ), + default = set_link_info_link_whole(link_infos[LibOutputStyle("pic_archive")].default), ), - deps = inherited_non_rust_link_deps, + deps = inherited_link_graphs, ) providers.append(linkable_root) @@ -714,53 +881,69 @@ def _native_providers( linkable_node = create_linkable_node( ctx = ctx, preferred_linkage = preferred_linkage, - exported_deps = inherited_non_rust_link_deps, + deps = inherited_link_graphs, + exported_deps = inherited_exported_deps, link_infos = link_infos, - shared_libs = solibs, + shared_libs = shared_libs, default_soname = shlib_name, + include_in_android_mergemap = False, ), ), - deps = inherited_non_rust_link_deps, + deps = inherited_link_graphs + inherited_exported_deps, ) providers.append(linkable_graph) - providers.append(merge_link_group_lib_info(deps = inherited_non_rust_link_deps)) + # We never need to add anything to this provider because Rust libraries + # cannot act as link group libs, especially given that they only support + # auto link groups anyway + providers.append(merge_link_group_lib_info(children = inherited_link_group_lib_infos(ctx, compile_ctx.dep_ctx))) + + providers.append( + create_unix_env_info( + actions = ctx.actions, + env = UnixEnv( + label = ctx.label, + native_libs = [shared_libs], + ), + #deps = [dep.dep for dep in resolve_deps(ctx, compile_ctx.dep_ctx)] + #deps = deps, + deps = inherited_exported_deps, + ), + ) return providers # Compute transitive deps. Caller decides whether this is necessary. def _compute_transitive_deps( ctx: AnalysisContext, - dep_link_style: LinkStyle) -> ( - dict[Artifact, CrateName], - dict[Artifact, CrateName], + dep_ctx: DepCollectionContext, + dep_link_strategy: LinkStrategy) -> ( + dict[MetadataKind, dict[Artifact, CrateName]], list[ArtifactTSet], dict[RustProcMacroMarker, ()], ): - transitive_deps = {} - transitive_rmeta_deps = {} + toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] + transitive_deps = {m: {} for m in MetadataKind} external_debug_info = [] transitive_proc_macro_deps = {} - for dep in resolve_rust_deps(ctx): + for dep in resolve_rust_deps(ctx, dep_ctx): if dep.proc_macro_marker != None: transitive_proc_macro_deps[dep.proc_macro_marker] = () # We don't want to propagate proc macros directly, and they have no transitive deps continue - style = style_info(dep.info, dep_link_style) - transitive_deps[style.rlib] = dep.info.crate - transitive_deps.update(style.transitive_deps) - - transitive_rmeta_deps[style.rmeta] = dep.info.crate - transitive_rmeta_deps.update(style.transitive_rmeta_deps) + strategy = strategy_info(toolchain_info, dep.info, dep_link_strategy) + for m in MetadataKind: + transitive_deps[m][strategy.outputs[m]] = dep.info.crate + transitive_deps[m].update(strategy.transitive_deps[m]) - external_debug_info.append(style.external_debug_info) + external_debug_info.append(strategy.external_debug_info) - transitive_proc_macro_deps.update(style.transitive_proc_macro_deps) + transitive_proc_macro_deps.update(strategy.transitive_proc_macro_deps) - return transitive_deps, transitive_rmeta_deps, external_debug_info, transitive_proc_macro_deps + return transitive_deps, external_debug_info, transitive_proc_macro_deps def rust_library_macro_wrapper(rust_library: typing.Callable) -> typing.Callable: def wrapper(**kwargs): diff --git a/prelude/rust/rust_toolchain.bzl b/prelude/rust/rust_toolchain.bzl index cf71959376c31..a69e40d69ae4d 100644 --- a/prelude/rust/rust_toolchain.bzl +++ b/prelude/rust/rust_toolchain.bzl @@ -5,79 +5,141 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# Typically, rustc has access to a "sysroot," which is a directory tree with a known layout that +# contains a number of pre-compiled rlibs that are available by default. This includes, for example, +# the standard library. If explicit sysroot deps are passed on the toolchain, the sysroot is not +# made available to rustc. Instead, all crates which would normally be members of the sysroot must +# be passed explicitly here. +# +# Most sysroot deps typically behave as if they were transitive dependencies of the crate being +# compiled. That means they are not guaranteed to be included in the link, and the user must write +# `extern crate;` to access them. The crates singled out here behave differently in that regard, +# which is why they are singled out. +# +# Toolchains are free to omit any crates they like from this list; those crates will simply not be +# available in the compilation. +RustExplicitSysrootDeps = record( + core = Dependency | None, + proc_macro = Dependency | None, + std = Dependency | None, + panic_unwind = Dependency | None, + panic_abort = Dependency | None, + others = list[Dependency], +) + +PanicRuntime = enum("unwind", "abort", "none") + +# FIXME(JakobDegen): These all have default values for historical reasons. Some of them certainly +# should, but some of them probably shouldn't? # @unsorted-dict-items -_rust_toolchain_attrs = { +rust_toolchain_attrs = { # Report unused dependencies - "report_unused_deps": False, + "report_unused_deps": provider_field(bool, default = False), # Rustc target triple to use # https://doc.rust-lang.org/rustc/platform-support.html - "rustc_target_triple": None, + "rustc_target_triple": provider_field(str | None, default = None), # Baseline compiler config - "rustc_flags": [], + "rustc_flags": provider_field(list[typing.Any], default = []), + # Rustc flags, except that they are applied on the command line after the + # target's rustc flags + "extra_rustc_flags": provider_field(list[typing.Any], default = []), + # Flags applied only on check builds + "rustc_check_flags": provider_field(list[typing.Any], default = []), # Extra flags when building binaries - "rustc_binary_flags": [], - # Extra flags for doing check builds - "rustc_check_flags": [], + "rustc_binary_flags": provider_field(list[typing.Any], default = []), # Extra flags for doing building tests - "rustc_test_flags": [], + "rustc_test_flags": provider_field(list[typing.Any], default = []), # Extra flags when coverage is enabled for a target - "rustc_coverage_flags": ["-Cinstrument-coverage"], + # FIXME(JakobDegen): Can't use `list[str]` here, because then the default is wrong, but can't + # use a non-empty list as the default because lists are mutable + "rustc_coverage_flags": provider_field(typing.Any, default = ("-Cinstrument-coverage",)), + # Extra env variables that should be made available to the rustdoc executable. + "rustdoc_env": provider_field(dict[str, typing.Any], default = {}), # Extra flags for rustdoc invocations - "rustdoc_flags": [], - # Use rmeta for lib->lib dependencies, and only block - # linking on rlib crates. The hope is that rmeta builds - # are quick and this increases effective parallelism. - "pipelined": False, + "rustdoc_flags": provider_field(list[typing.Any], default = []), # When you `buck test` a library, also compile and run example code in its # documentation comments. - "doctests": False, - # Filter out failures when we just need diagnostics. That is, - # a rule which fails with a compilation failure will report - # success as an RE action, but a "failure filter" action will - # report the failure if some downstream action needs one of the - # artifacts. If all you need is diagnostics, then it will report - # success. This doubles the number of actions, so it should only - # be explicitly enabled when needed. - "failure_filter": False, + "doctests": provider_field(bool, default = False), # The Rust compiler (rustc) - "compiler": None, - # A Rust compiler that can be used "standalone", without special settings - # from Rust rules (e.g. this keeps the default sysroot). - "compiler_standalone": None, + "compiler": provider_field(RunInfo | None, default = None), # Rust documentation extractor (rustdoc) - "rustdoc": None, + "rustdoc": provider_field(RunInfo | None, default = None), # Clippy (linter) version of the compiler - "clippy_driver": None, + "clippy_driver": provider_field(RunInfo | None, default = None), # Wrapper for rustc in actions - "rustc_action": None, + "rustc_action": provider_field(RunInfo | None, default = None), # Wrapper for rustdoc-generated test executables - "rustdoc_test_with_resources": None, + "rustdoc_test_with_resources": provider_field(RunInfo | None, default = None), + # Wrapper for rustdoc coverage + "rustdoc_coverage": provider_field(RunInfo | None, default = None), + # These two scripts are used to implement deferred linking, where the link action + # is separate from the rustc invocation action. The benefit here is that we can + # decouple the action graph such that rustc can compile libs without waiting for + # the link step from shared lib dependencies from completing. + "deferred_link_action": provider_field(RunInfo | None, default = None), + "extract_link_action": provider_field(RunInfo | None, default = None), # Failure filter action - "failure_filter_action": None, + "failure_filter_action": provider_field(RunInfo | None, default = None), # The default edition to use, if not specified. - "default_edition": None, + "default_edition": provider_field(str | None, default = None), # Lints - "allow_lints": [], - "deny_lints": [], - "warn_lints": [], + "allow_lints": provider_field(list[typing.Any], default = []), + "deny_lints": provider_field(list[typing.Any], default = []), + "warn_lints": provider_field(list[typing.Any], default = []), + # Deny-on-Check lints are handled differently depending on the build. + # + # For check builds, e.g. [check], [diag.json], [clippy.json] subtargets, or the default target + # for `rust_library` rules, these lints will be applied as Deny Lints. Importantly, this means + # that when you call `buck build :rust_lib` or use tools like arc rust-check or rustfix, these + # lints will be surfaced as errors. + # + # However, for "regular" builds, e.g. when building tests or binaries, or building this target + # as a dependency of another target, these flags will be surfaced only as warnings. The primary + # benefit here is that you can develop + test your code as normal and will not be blocked by + # these lints. However, once you run rust check, or submit your code to phabricator, these + # lints will prevent you from landing your code. This way we can introduce lints that we'd like + # to deny from our codebase without slowing down your inner dev loop, or encouraging you to + # --cap-warns=lint for your projects. + "deny_on_check_lints": provider_field(list[typing.Any], default = []), # Clippy configuration file clippy.toml - "clippy_toml": None, - # URL prefix (e.g. /path/to/docs) where crates' docs are hosted. Used for - # linking types in signatures to their definition in another crate. - "extern_html_root_url_prefix": None, + "clippy_toml": provider_field(Artifact | None, default = None), # Utilities used for building flagfiles containing dynamic crate names - "concat_tool": None, - "transitive_dependency_symlinks_tool": None, + "transitive_dependency_symlinks_tool": provider_field(RunInfo | None, default = None), + # Setting this enables additional behaviors that improves linking at the + # cost of using unstable implementation details of rustc. At the moment, + # this is only used for linking rlibs into C++/C builds, instead of using + # staticlibs, but that's expected to change. + # + # FIXME(JakobDegen): This should require `explicit_sysroot_deps` in the + # future. + "advanced_unstable_linking": provider_field(bool, default = False), + # Override the implicit sysroot with the provided Artifact containing a directory to + # a prebuilt sysroot. Will be forwarded to rustc as `--sysroot=`. Only + # one of this and `explicit_sysroot_deps` may be set. + "sysroot_path": provider_field(Artifact | None, default = None), + # See the documentation on the type for details + "explicit_sysroot_deps": provider_field(RustExplicitSysrootDeps | None, default = None), + # The panic runtime to use. This is a part of the target definition and is + # normally inferred by rustc. This field: + # + # - Should be set to `"none"` on nostd targets + # - Must be set correctly if `explicit_sysroot_deps` and + # `advanced_unstable_linking` are used. You can find the correct value + # for a given target triple via `rustc --print target-spec-json` + # - Otherwise can typically be safely defaulted to `"unwind"`. It is, + # however, still the preferred way of configuring `-Cpanic=abort`, since + # it makes sure that the flag is consistent across the crate graph. + # + # It's worth pointing out that the way that rustc handles this is a bit + # weird. It requires the panic runtime to be a nostd crate, despite the fact + # that it is only ever useful in combination with std. We don't impose such + # a requirement. + # + # FIXME(JakobDegen): Fix `enum` so that we can set `unwind` as the default + "panic_runtime": provider_field(PanicRuntime), + # Setting this allows Rust rules to use features which are only available + # on nightly release. + "nightly_features": provider_field(bool, default = False), } -RustToolchainInfo = provider(fields = _rust_toolchain_attrs.keys()) - -def ctx_toolchain_info(ctx: AnalysisContext) -> RustToolchainInfo: - toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] - - attrs = dict() - for k, default in _rust_toolchain_attrs.items(): - v = getattr(toolchain_info, k) - attrs[k] = default if v == None else v - - return RustToolchainInfo(**attrs) +RustToolchainInfo = provider(fields = rust_toolchain_attrs) diff --git a/prelude/rust/targets.bzl b/prelude/rust/targets.bzl index e746d630466d8..169f591a4a3a7 100644 --- a/prelude/rust/targets.bzl +++ b/prelude/rust/targets.bzl @@ -6,8 +6,7 @@ # of this source tree. load("@prelude//os_lookup:defs.bzl", "OsLookup") -load("@prelude//decls/android_rules.bzl", "TargetCpuType") -load("@prelude//decls/core_rules.bzl", "Platform") +load("@prelude//decls/core_rules.bzl", "Platform", "TargetCpuType") _platform = enum(*Platform) _cpu = enum(*TargetCpuType) diff --git a/prelude/rust/tools/BUCK b/prelude/rust/tools/BUCK deleted file mode 100644 index d28c450927edf..0000000000000 --- a/prelude/rust/tools/BUCK +++ /dev/null @@ -1,50 +0,0 @@ -load(":tool_rules.bzl", "get_rustc_cfg") - -prelude = native - -get_rustc_cfg( - name = "rustc_cfg", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "rustc_action", - main = "rustc_action.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "rustdoc_test_with_resources", - main = "rustdoc_test_with_resources.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "failure_filter_action", - main = "failure_filter_action.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "concat", - main = "concat.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "transitive_dependency_symlinks", - main = "transitive_dependency_symlinks.py", - visibility = ["PUBLIC"], -) - -prelude.python_bootstrap_binary( - name = "get_rustc_cfg", - main = "get_rustc_cfg.py", - visibility = [], -) - -prelude.python_bootstrap_binary( - name = "buildscript_run", - main = "buildscript_run.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/rust/tools/BUCK.v2 b/prelude/rust/tools/BUCK.v2 new file mode 100644 index 0000000000000..03154a325d861 --- /dev/null +++ b/prelude/rust/tools/BUCK.v2 @@ -0,0 +1,75 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load( + ":tool_rules.bzl", + "get_rustc_cfg", + "linkable_symbol_supports_no_std", +) + +oncall("build_infra") + +source_listing() + +prelude = native + +get_rustc_cfg( + name = "rustc_cfg", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "deferred_link_action", + main = "deferred_link_action.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "extract_link_action", + main = "extract_link_action.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "rustc_action", + main = "rustc_action.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "rustdoc_test_with_resources", + main = "rustdoc_test_with_resources.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "failure_filter_action", + main = "failure_filter_action.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "transitive_dependency_symlinks", + main = "transitive_dependency_symlinks.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "buildscript_run", + main = "buildscript_run.py", + visibility = ["PUBLIC"], +) + +prelude.python_bootstrap_binary( + name = "rustdoc_coverage", + main = "rustdoc_coverage.py", + visibility = ["PUBLIC"], +) + +prelude.export_file( + name = "linkable_symbol.rs", + visibility = ["PUBLIC"], +) + +linkable_symbol_supports_no_std( + name = "linkable_symbol_supports_no_std", + visibility = ["PUBLIC"], +) diff --git a/prelude/rust/tools/attrs.bzl b/prelude/rust/tools/attrs.bzl index 7d4231e8f13bc..2a18fc0c36740 100644 --- a/prelude/rust/tools/attrs.bzl +++ b/prelude/rust/tools/attrs.bzl @@ -12,9 +12,11 @@ def _internal_tool(default: str) -> Attr: # configurable attributes there. This list of internal tools is distracting and # expected to grow. internal_tool_attrs = { - "concat_tool": _internal_tool("prelude//rust/tools:concat"), + "deferred_link_action": _internal_tool("prelude//rust/tools:deferred_link_action"), + "extract_link_action": _internal_tool("prelude//rust/tools:extract_link_action"), "failure_filter_action": _internal_tool("prelude//rust/tools:failure_filter_action"), "rustc_action": _internal_tool("prelude//rust/tools:rustc_action"), + "rustdoc_coverage": _internal_tool("prelude//rust/tools:rustdoc_coverage"), "rustdoc_test_with_resources": _internal_tool("prelude//rust/tools:rustdoc_test_with_resources"), "transitive_dependency_symlinks_tool": _internal_tool("prelude//rust/tools:transitive_dependency_symlinks"), } diff --git a/prelude/rust/tools/buildscript_run.py b/prelude/rust/tools/buildscript_run.py index 85f6d98bb6b17..69a96f3389d1a 100755 --- a/prelude/rust/tools/buildscript_run.py +++ b/prelude/rust/tools/buildscript_run.py @@ -15,7 +15,14 @@ import subprocess import sys from pathlib import Path -from typing import Dict, IO, NamedTuple +from typing import Any, Dict, IO, NamedTuple + + +IS_WINDOWS: bool = os.name == "nt" + + +def eprint(*args: Any, **kwargs: Any) -> None: + print(*args, end="\n", file=sys.stderr, flush=True, **kwargs) def cfg_env(rustc_cfg: Path) -> Dict[str, str]: @@ -111,23 +118,41 @@ def ensure_rustc_available( assert rustc is not None, "RUSTC env is missing" assert target is not None, "TARGET env is missing" - if os.path.dirname(rustc) != "": - rustc = os.path.join(cwd, rustc) - # NOTE: `HOST` is optional. host = env.get("HOST") try: - subprocess.check_output([rustc, "--version"]) + # Run through cmd.exe on Windows so if rustc is a batch script + # (like the command_alias trampoline is), it is found relative to + # cwd. + # + # Executing `os.path.join(cwd, rustc)` would also work, but because + # of `../` in the path, it's possible to hit path length limits. + # Resolving it would remove the `..` but then sometimes things + # fail with exit code `3221225725` ("out of stack memory"). + # I suspect it's some infinite loop brought about by the trampoline + # and symlinks. + subprocess.check_output( # noqa: P204 + [rustc, "--version"], + cwd=cwd, + shell=IS_WINDOWS, + ) # A multiplexed sysroot may involve another fetch, # so pass `--target` to check that too. if host != target: - subprocess.check_output([rustc, f"--target={target}", "--version"]) + subprocess.check_output( # noqa: P204 + [rustc, f"--target={target}", "--version"], + cwd=cwd, + shell=IS_WINDOWS, + ) except OSError as ex: - print(f"Failed to run {rustc} because {ex}", file=sys.stderr) + eprint(f"Failed to run {rustc} because {ex}") sys.exit(1) except subprocess.CalledProcessError as ex: - print(f"Failed to run {ex.cmd}: {ex.stderr}", file=sys.stderr) + eprint(f"Command failed with exit code {ex.returncode}") + eprint(f"Command: {ex.cmd}") + if ex.stdout: + eprint(f"Stdout: {ex.stdout}") sys.exit(1) @@ -195,7 +220,7 @@ def main() -> None: # noqa: C901 if cargo_rustc_cfg_match: flags += "--cfg={}\n".format(cargo_rustc_cfg_match.group(1)) else: - print(line) + print(line, end="\n") args.outfile.write(flags) diff --git a/prelude/rust/tools/concat.py b/prelude/rust/tools/concat.py deleted file mode 100755 index 6dfb8723fdb57..0000000000000 --- a/prelude/rust/tools/concat.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -# A tool to concatenate strings, some of which may be from @files. ¯\_(ツ)_/¯ -# -# Rustc's command line requires dependencies to be provided as: -# -# --extern cratename=path/to/libcratename.rlib -# -# In Buck, sometimes the cratename is computed at build time, for example -# extracted from a Thrift file. Rustc's "@" support isn't sufficient for this -# because the following doesn't make sense: -# -# --extern @filecontainingcrate=path/to/libcratename.rlib -# -# and the cratename isn't able to be its own argument: -# -# --extern @filecontainingcrate =path/to/libcratename.rlib -# -# Instead we use Python to make a single file containing the dynamic cratename -# and the rlib filepath concatenated together. -# -# concat.py --output $TMP -- @filecontainingcrate = path/to/libcratename.rlib -# -# then: -# -# --extern @$TMP -# - -import argparse -from typing import IO, List, NamedTuple - - -class Args(NamedTuple): - output: IO[str] - strings: List[str] - - -def main(): - parser = argparse.ArgumentParser(fromfile_prefix_chars="@") - parser.add_argument("--output", type=argparse.FileType("w")) - parser.add_argument("strings", nargs="*", type=str) - args = Args(**vars(parser.parse_args())) - - args.output.write("".join(args.strings)) - - -if __name__ == "__main__": - main() diff --git a/prelude/rust/tools/deferred_link_action.py b/prelude/rust/tools/deferred_link_action.py new file mode 100644 index 0000000000000..87255d847aadf --- /dev/null +++ b/prelude/rust/tools/deferred_link_action.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Execute a previously deferred link action. The inputs to this script are expected to come from +# a previous invocation of `extract_link_action.py`. The main special processing here is to handle +# the optional version script argument, and pass the objects located in the provided directory +# as individual inputs to the linker command. + +import argparse +import asyncio +import os +import sys +import tempfile +from pathlib import Path +from typing import Any, List, NamedTuple + + +def eprint(*args: Any, **kwargs: Any) -> None: + print(*args, end="\n", file=sys.stderr, flush=True, **kwargs) + + +class Args(NamedTuple): + objects: Path + version_script: Path + linker: List[str] + + +def arg_parse() -> Args: + parser = argparse.ArgumentParser() + parser.add_argument( + "--objects", + type=Path, + required=True, + ) + parser.add_argument( + "--version-script", + type=Path, + required=True, + ) + parser.add_argument( + "linker", + nargs=argparse.REMAINDER, + type=str, + help="Linker command line", + ) + + return Args(**vars(parser.parse_args())) + + +def unpack_objects(objects: Path) -> List[str]: + return [os.path.join(objects, x) for x in os.listdir(objects) if x.endswith(".o")] + + +async def main() -> int: + args = arg_parse() + + linker_cmd = args.linker[:1] + + objects = unpack_objects(args.objects) + + with tempfile.NamedTemporaryFile( + mode="wb", + prefix="real-linker-args-", + suffix=".txt", + delete=False, + ) as args_file: + # Some platforms do not use version-scripts. For those platforms we simply + # do not pass the version-script to the linker. + if os.path.getsize(args.version_script) > 0: + args_file.write( + b"-Wl,--version-script=" + str(args.version_script).encode() + b"\n" + ) + + args_file.write("\n".join(objects).encode() + b"\n") + args_file.write("\n".join(args.linker[1:]).encode() + b"\n") + args_file.flush() + + proc = await asyncio.create_subprocess_exec( + *linker_cmd, + "@" + args_file.name, + env=os.environ, + limit=1_000_000, + ) + res = await proc.wait() + + return res + + +sys.exit(asyncio.run(main())) diff --git a/prelude/rust/tools/extract_link_action.py b/prelude/rust/tools/extract_link_action.py new file mode 100644 index 0000000000000..f101c6f509c54 --- /dev/null +++ b/prelude/rust/tools/extract_link_action.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A "fake" linker command meant to be provided to rustc as `-Clinker={}`. This script will process +# the arguments passed in from rustc and export the objects, version script, and other arguments +# as outputs to later be used by an invocation of `deferred_link_action.py`. +# +# Some arguments here are stripped out e.g. -L in order to save work from having to persist +# an artifact between this action and the deferred link action. See the comments in +# `process_link_args()` for more details. + +import argparse +import os +import shutil +import sys +from pathlib import Path +from typing import Any, IO, List, NamedTuple, Tuple + + +def eprint(*args: Any, **kwargs: Any) -> None: + print(*args, end="\n", file=sys.stderr, flush=True, **kwargs) + + +class Args(NamedTuple): + out_argsfile: IO[str] + out_version_script: Path + out_objects: Path + linker: List[str] + + +def arg_parse() -> Args: + parser = argparse.ArgumentParser() + parser.add_argument( + "--out_argsfile", + type=argparse.FileType("w"), + required=True, + ) + parser.add_argument( + "--out_version-script", + type=Path, + required=True, + ) + parser.add_argument( + "--out_objects", + type=Path, + required=True, + ) + parser.add_argument( + "linker", + nargs=argparse.REMAINDER, + type=str, + help="Linker command line", + ) + + return Args(**vars(parser.parse_args())) + + +def process_link_args(args: List[str]) -> Tuple[List[str], Path | None, List[Path]]: + new_args = [] + version_script = None + objects = [] + + i = 0 + size = len(args) + while i < size: + arg = args[i] + # We want to extract the version script file as an artifact to pass along to the deferred + # link action. rustc by default exports this file to somewhere in the TMP directory, so we + # must persist it ourselves between actions via an artifact. + if arg.startswith("-Wl,--version-script"): + version_script = Path(arg.split("=")[1]) + i += 1 + continue + # These are the artifacts that rustc generates as inputs to the linker. + elif arg.endswith("rcgu.o") or arg.endswith("symbols.o"): + objects.append(Path(arg)) + i += 1 + continue + # We don't need either of these, and omitting them from the deferred link args will save + # us from having to pass them to the deferred link action. + # The .rlib files here are hollow rlibs, providing only metadata for each dependency. These + # files have no impact on the link step; they're only needed by rustc. + # The .rmeta file contains the metadata section for this crate being linked. Again, since + # rmeta is not used at all for linking, we can omit the section entirely from our link step. + elif arg.endswith(".rlib") or arg.endswith(".rmeta"): + i += 1 + continue + # The -L flag is used by rustc to pass the sysroot as a linker search path. When compiling + # we pass a dummy empty sysroot to rustc, so this path is not needed. The real -L flags for + # transitive deps are passed along in a separate args file. + # The -o flag here is set by rustc to a temporary output location. In a normal rustc link, + # rustc will eventually copy the temporary output file to the final location specified by + # --emit=link={}. Since this path is temporary, we can simply omit it and pass the real + # path needed by buck directly to the deferred link action. + elif arg.startswith("-L") or arg.startswith("-o"): + i += 2 # skip the next line + continue + + new_args.append(arg) + i += 1 + + return (new_args, version_script, objects) + + +def unpack_objects(objects: Path) -> List[str]: + return [x for x in os.listdir(objects) if x.endswith(".o") or x.endswith(".rmeta")] + + +def main() -> int: + args = arg_parse() + + filtered_args, version_script, objects = process_link_args(args.linker[1:]) + args.out_argsfile.write("\n".join(filtered_args)) + args.out_argsfile.close() + + if version_script: + shutil.copy(version_script, args.out_version_script) + else: + # Touch the file to make buck2 happy + args.out_version_script.touch() + + os.mkdir(args.out_objects) + for obj in objects: + shutil.copy(obj, args.out_objects) + + return 0 + + +sys.exit(main()) diff --git a/prelude/rust/tools/failure_filter_action.py b/prelude/rust/tools/failure_filter_action.py index 6fd2342ee7bdd..155c93ad5b181 100755 --- a/prelude/rust/tools/failure_filter_action.py +++ b/prelude/rust/tools/failure_filter_action.py @@ -16,7 +16,11 @@ import os import shutil import sys -from typing import IO, List, NamedTuple, Optional, Tuple +from typing import Any, IO, List, NamedTuple, Optional, Tuple + + +def eprint(*args: Any, **kwargs: Any) -> None: + print(*args, end="\n", file=sys.stderr, flush=True, **kwargs) class Args(NamedTuple): @@ -69,10 +73,7 @@ def main() -> int: # Fall back to real copy if that doesn't work shutil.copy(inp, out) else: - print( - f"Missing required input file {short} ({inp})", - file=sys.stderr, - ) + eprint(f"Missing required input file {short} ({inp})") return build_status["status"] # If all the required files were present, then success regardless of diff --git a/prelude/rust/tools/get_rustc_cfg.py b/prelude/rust/tools/get_rustc_cfg.py deleted file mode 100755 index b2dfa8501715b..0000000000000 --- a/prelude/rust/tools/get_rustc_cfg.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -# If https://github.com/rust-lang/rust/pull/113780 is accepted, this wrapper can -# go away. The `rule` in the bzl code should directly run rustc. -# -# cmd_args( -# toolchain_info.compiler, -# cmd_args("--print=cfg=", out.as_output(), delimiter = ""), -# cmd_args("--target=", toolchain_info.rustc_target_triple, delimiter = ""), -# ) -# -# Alternatively if `ctx.actions.run` learns to redirect stdout. Something like: -# -# ctx.actions.run( -# cmd_args(toolchain_info.compiler, ...), -# stdout = out.as_output(), -# ) -# -# or: -# -# subprocess = ctx.actions.run( -# cmd_args(toolchain_info.compiler, ...), -# ) -# return [DefaultInfo(default_output = subprocess.stdout)] - - -import argparse -import subprocess -import sys -from typing import IO, NamedTuple - - -class Args(NamedTuple): - rustc: str - target: str - out: IO[str] - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--rustc", type=str, required=True) - parser.add_argument("--target", type=str, required=True) - parser.add_argument("--out", type=argparse.FileType("w"), required=True) - args = Args(**vars(parser.parse_args())) - - subprocess.run( - [args.rustc, "--print=cfg", f"--target={args.target}"], - stdout=args.out, - stderr=sys.stderr, - encoding="utf-8", - check=True, - ) - - -if __name__ == "__main__": - main() diff --git a/prelude/rust/tools/linkable_symbol.rs b/prelude/rust/tools/linkable_symbol.rs new file mode 100644 index 0000000000000..861c8334cde6c --- /dev/null +++ b/prelude/rust/tools/linkable_symbol.rs @@ -0,0 +1,49 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![cfg_attr(set_nostd, no_std)] + +#[cfg(any(rust_linkable_symbol_content_bytes, rust_linkable_symbol_getter_bytes))] +#[repr(C)] +#[cfg_attr(rust_linkable_symbol_align_bytes = "2", repr(align(2)))] +#[cfg_attr(rust_linkable_symbol_align_bytes = "4", repr(align(4)))] +#[cfg_attr(rust_linkable_symbol_align_bytes = "8", repr(align(8)))] +struct Aligned { + bytes: Bytes, +} + +#[cfg(rust_linkable_symbol_content_str)] +#[used] +#[export_name = env!("LINKABLE_SYMBOL")] +pub static LINKABLE_SYMBOL: &str = include_str!("content"); + +#[cfg(rust_linkable_symbol_content_bytes)] +#[used] +#[export_name = env!("LINKABLE_SYMBOL")] +pub static LINKABLE_SYMBOL: &Aligned<[u8]> = &Aligned { + bytes: *include_bytes!("content"), +}; + +#[cfg(rust_linkable_symbol_getter_str)] +pub fn get() -> &'static str { + extern "Rust" { + #[link_name = env!("LINKABLE_SYMBOL")] + static LINKABLE_SYMBOL: &'static str; + } + unsafe { LINKABLE_SYMBOL } +} + +#[cfg(rust_linkable_symbol_getter_bytes)] +pub fn get() -> &'static [u8] { + extern "Rust" { + #[link_name = env!("LINKABLE_SYMBOL")] + static LINKABLE_SYMBOL: &'static Aligned<[u8]>; + } + unsafe { &LINKABLE_SYMBOL.bytes } +} diff --git a/prelude/rust/tools/rustc_action.py b/prelude/rust/tools/rustc_action.py index 6f2498cd18caf..d15971bf969b7 100755 --- a/prelude/rust/tools/rustc_action.py +++ b/prelude/rust/tools/rustc_action.py @@ -42,6 +42,7 @@ def eprint(*args: Any, **kwargs: Any) -> None: def key_value_arg(s: str) -> Tuple[str, str]: + s = arg_eval(s) key_value = s.split("=", maxsplit=1) if len(key_value) == 2: return (key_value[0], key_value[1]) @@ -58,7 +59,7 @@ class Args(NamedTuple): buck_target: Optional[str] failure_filter: Optional[IO[bytes]] required_output: Optional[List[Tuple[str, str]]] - only_artifact: Optional[str] + echo: Optional[IO[bytes]] rustc: List[str] @@ -120,33 +121,49 @@ def arg_parse() -> Args: "(and filled with a placeholder on a filtered failure)", ) parser.add_argument( - "--only-artifact", - metavar="TYPE", - help="Terminate rustc after requested artifact type (metadata, link, etc) has been emitted. " - "(Assumes compiler is invoked with --error-format=json --json=artifacts)", + "--echo", + type=argparse.FileType("wb"), + help="Write the input command line to this file, without running it", ) parser.add_argument( "rustc", nargs=argparse.REMAINDER, - type=str, + type=arg_eval, help="Compiler command line", ) return Args(**vars(parser.parse_args())) +def arg_eval(arg: str) -> str: + """ + Expand an argument such as --extern=$(cat buck-out/v2/gen/foo.txt)=buck-out/dev/gen/libfoo.rlib + """ + expanded = "" + + while True: + begin = arg.find("$(cat ") + if begin == -1: + return expanded + arg + expanded += arg[:begin] + begin += len("$(cat ") + path, rest = arg[begin:].split(")", maxsplit=1) + with open(path, encoding="utf-8") as f: + expanded += f.read().strip() + arg = rest + + async def handle_output( # noqa: C901 proc: asyncio.subprocess.Process, args: Args, crate_map: Dict[str, str], -) -> Tuple[bool, bool]: +) -> bool: got_error_diag = False - shutdown = False proc_stderr = proc.stderr assert proc_stderr is not None - while not shutdown: + while True: line = await proc_stderr.readline() if line is None or line == b"": @@ -161,12 +178,7 @@ async def handle_output( # noqa: C901 if DEBUG: print(f"diag={repr(diag)}", end="\n") - # We have to sniff the shape of diag record based on what fields it has set. - if "artifact" in diag and "emit" in diag: - if diag["emit"] == args.only_artifact: - shutdown = True - continue - elif "unused_extern_names" in diag: + if "unused_extern_names" in diag: unused_names = diag["unused_extern_names"] # Empty unused_extern_names is just noise. @@ -219,12 +231,16 @@ async def handle_output( # noqa: C901 if args.diag_txt: args.diag_txt.close() - return (got_error_diag, shutdown) + return got_error_diag async def main() -> int: args = arg_parse() + if args.echo: + args.echo.write("".join(arg + "\n" for arg in args.rustc).encode("utf-8")) + return 0 + # Inherit a very limited initial environment, then add the new things env = { k: os.environ[k] @@ -239,6 +255,7 @@ async def main() -> int: "LOCALAPPDATA", "PROGRAMDATA", "TEMP", + "TMP", # TODO(andirauter): Required by RE. Remove them when no longer required T119466023 "EXECUTION_ID", "SESSION_ID", @@ -266,15 +283,14 @@ async def main() -> int: {k: v.replace("\\n", "\n").replace("\\\n", "\\n") for k, v in args.env} ) if args.path_env: - env.update({k: str(Path(v).resolve()) for k, v in args.path_env}) + env.update({k: os.path.abspath(v) for k, v in args.path_env}) crate_map = dict(args.crate_map) if args.crate_map else {} if DEBUG: print(f"args {repr(args)} env {env} crate_map {crate_map}", end="\n") - rustc_cmd = args.rustc[:1] - rustc_args = args.rustc[1:] + rustc_cmd, rustc_args = args.rustc[:1], args.rustc[1:] if args.remap_cwd_prefix is not None: rustc_args.append( @@ -304,20 +320,12 @@ async def main() -> int: stderr=subprocess.PIPE, limit=1_000_000, ) - (got_error_diag, shutdown) = await handle_output(proc, args, crate_map) - - if shutdown: - # We got what we want so shut down early - proc.terminate() - await proc.wait() - res = 0 - else: - res = await proc.wait() + got_error_diag = await handle_output(proc, args, crate_map) + res = await proc.wait() if DEBUG: print( f"res={repr(res)} " - f"shutdown={shutdown} " f"got_error_diag={got_error_diag} " f"args.failure_filter {args.failure_filter}", end="\n", @@ -329,7 +337,7 @@ async def main() -> int: # Check for death by signal - this is always considered a failure if res < 0: - cmdline = " ".join(shlex.quote(arg) for arg in args.rustc) + cmdline = " ".join(shlex.quote(arg) for arg in rustc_cmd + rustc_args) eprint(f"Command exited with signal {-res}: command line: {cmdline}") elif args.failure_filter: # If failure filtering is enabled, then getting an error diagnostic is also @@ -365,6 +373,4 @@ async def main() -> int: return res -# There is a bug with asyncio.run() on Windows: -# https://bugs.python.org/issue39232 -sys.exit(asyncio.get_event_loop().run_until_complete(main())) +sys.exit(asyncio.run(main())) diff --git a/prelude/rust/tools/rustdoc_coverage.py b/prelude/rust/tools/rustdoc_coverage.py new file mode 100755 index 0000000000000..adaec1564008f --- /dev/null +++ b/prelude/rust/tools/rustdoc_coverage.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import json +import re +import subprocess +from pathlib import Path +from typing import List, NamedTuple + + +class Args(NamedTuple): + out: Path + cmd: List[str] + + +def arg_parse() -> Args: + parser = argparse.ArgumentParser() + parser.add_argument( + "out", + type=Path, + help="path to output", + ) + parser.add_argument("cmd", nargs=argparse.REMAINDER, help="command to run") + return Args(**vars(parser.parse_args())) + + +_REGEX = re.compile(r"(\d+(?:\.\d+)?)") + + +def main(): + args = arg_parse() + stdout = subprocess.run(args.cmd, capture_output=True, text=True).stdout + + with open(args.out, "w") as f: + # not using json output until https://github.com/rust-lang/rust/issues/117291 is fixed + # stdout looks like... + # +--------+------------+------------+------------+------------+ + # | File | Documented | Percentage | Examples | Percentage | + # +--------+------------+------------+------------+------------+ + # | foo.rs | 1 | 1.0% | 0 | 0.0% | + # | bar.rs | 2 | 2.1% | 0 | 0.0% | + # +--------+------------+------------+------------+------------+ + # | Total | 3 | 3.1% | 0 | 0.0% | + # +--------+------------+------------+------------+------------+ + total_line = stdout.splitlines()[-2] + nums = _REGEX.findall(total_line) + if len(nums) != 4: + raise Exception( + f"using regex `{_REGEX.pattern}`, expected to find 4 numbers, got {len(nums)} " + f"for line: '{total_line}'" + ) + json.dump( + { + "documented": nums[0], + "documented_percentage": nums[1], + "examples": nums[2], + "examples_percentage": nums[3], + }, + f, + ) + + +if __name__ == "__main__": + main() diff --git a/prelude/rust/tools/tool_rules.bzl b/prelude/rust/tools/tool_rules.bzl index 26cd9b391a018..2184cd04c0192 100644 --- a/prelude/rust/tools/tool_rules.bzl +++ b/prelude/rust/tools/tool_rules.bzl @@ -14,10 +14,10 @@ def _get_rustc_cfg_impl(ctx: AnalysisContext) -> list[Provider]: out = ctx.actions.declare_output("rustc.cfg") cmd = [ - ctx.attrs.get_rustc_cfg[RunInfo], - cmd_args("--rustc=", toolchain_info.compiler, delimiter = ""), + toolchain_info.compiler, + cmd_args("--print=cfg=", out.as_output(), delimiter = ""), cmd_args("--target=", toolchain_info.rustc_target_triple, delimiter = ""), - cmd_args("--out=", out.as_output(), delimiter = ""), + cmd_args("--sysroot="), # We do not need a sysroot here, and not all platforms we support have one available (e.g. mips64-unknown-linux-gnuabi64) ] ctx.actions.run(cmd, category = "rustc_cfg") @@ -27,7 +27,23 @@ def _get_rustc_cfg_impl(ctx: AnalysisContext) -> list[Provider]: get_rustc_cfg = rule( impl = _get_rustc_cfg_impl, attrs = { - "get_rustc_cfg": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//rust/tools:get_rustc_cfg")), + "_rust_toolchain": toolchains_common.rust(), + }, +) + +def _linkable_symbol_supports_no_std_impl(ctx: AnalysisContext) -> list[Provider]: + toolchain_info = ctx.attrs._rust_toolchain[RustToolchainInfo] + + # `#[no_std]` requires use of `advanced_unstable_linking` on the toolchain, + # as otherwise the panic handler is missing. + cfg = "--cfg=set_nostd\n" if toolchain_info.advanced_unstable_linking else "" + + flagfile = ctx.actions.write("cfg", cfg) + return [DefaultInfo(default_output = flagfile)] + +linkable_symbol_supports_no_std = rule( + impl = _linkable_symbol_supports_no_std_impl, + attrs = { "_rust_toolchain": toolchains_common.rust(), }, ) diff --git a/prelude/rust/tools/transitive_dependency_symlinks.py b/prelude/rust/tools/transitive_dependency_symlinks.py index 247d683fc3bb9..77959079b63ec 100755 --- a/prelude/rust/tools/transitive_dependency_symlinks.py +++ b/prelude/rust/tools/transitive_dependency_symlinks.py @@ -29,22 +29,31 @@ # # transitive_dependency_symlinks.py \ # --out-dir path/to/out \ -# --artifact path/to/cratename ../../libprovisional.rlib \ -# --artifact ... +# --artifacts path/to/artifacts.json # -# The tool reads the crate name from the file at "path/to/out". Suppose it's +# The input file artifact.json is an array of pairs, each an rlib and a file +# containing a crate name for it. +# +# [ +# ["../../libprovisional.rlib", "path/to/cratename"], +# ... +# ] +# +# The tool reads the crate name from the file at "path/to/cratename". Suppose it's # "thriftgenerated". It symlinks the given artifact as "0/libthriftgenerated.rlib" # within the specified output directory. In the event of collisions, there might # be multiple dirs created, just as we do for analysis-time named crates. import argparse +import json +import os from pathlib import Path -from typing import List, NamedTuple, Tuple +from typing import IO, NamedTuple class Args(NamedTuple): out_dir: Path - artifact: List[Tuple[Path, Path]] + artifacts: IO[str] def main(): @@ -55,11 +64,8 @@ def main(): required=True, ) parser.add_argument( - "--artifact", - action="append", - nargs=2, - type=Path, - metavar=("CRATENAME", "ARTIFACT"), + "--artifacts", + type=argparse.FileType(), required=True, ) args = Args(**vars(parser.parse_args())) @@ -69,9 +75,9 @@ def main(): # Add as many -Ldependency dirs as we need to avoid name conflicts deps_dirs = [{}] - for crate_name, artifact in args.artifact: - crate_name = crate_name.read_text().strip() - original_filename = artifact.name + for artifact, crate_name in json.load(args.artifacts): + crate_name = Path(crate_name).read_text().strip() + original_filename = os.path.basename(artifact) new_filename = "lib{}-{}".format( crate_name, original_filename.rsplit("-", 1)[1], diff --git a/prelude/sh_binary.bzl b/prelude/sh_binary.bzl index 344995e57e263..c54dcd5c07bf6 100644 --- a/prelude/sh_binary.bzl +++ b/prelude/sh_binary.bzl @@ -18,9 +18,17 @@ def _derive_link(artifact): return paths.join(artifact.owner.package, artifact.owner.name) -def _generate_script(name: str, main: Artifact, resources: list[Artifact], actions: AnalysisActions, is_windows: bool) -> (Artifact, Artifact): +def _generate_script( + name: str, + main: Artifact, + resources: list[Artifact], + append_script_extension: bool, + actions: AnalysisActions, + is_windows: bool) -> (Artifact, Artifact): main_path = main.short_path - if is_windows: + if not append_script_extension: + main_link = main_path + elif is_windows: main_link = main_path if main_path.endswith(".bat") or main_path.endswith(".cmd") else main_path + ".bat" else: main_link = main_path if main_path.endswith(".sh") else main_path + ".sh" @@ -42,18 +50,9 @@ def _generate_script(name: str, main: Artifact, resources: list[Artifact], actio # construct links directly to things (which buck1 actually also did for its # BUCK_DEFAULT_RUNTIME_RESOURCES). if not is_windows: - script_content = cmd_args([ + script_content = cmd_args( "#!/usr/bin/env bash", "set -e", - # This is awkward for two reasons: args doesn't support format strings - # and will insert a newline between items and so __RESOURCES_ROOT - # is put in a bash array, and we want it to be relative to script's - # dir, not the script itself, but there's no way to do that in - # starlark. To deal with this, we strip the first 3 characters - # (`../`). - "__RESOURCES_ROOT=(", - resources_dir, - ")", # If we access this sh_binary via a unhashed symlink we need to # update the relative source. '__SRC="${BASH_SOURCE[0]}"', @@ -64,7 +63,7 @@ def _generate_script(name: str, main: Artifact, resources: list[Artifact], actio # identify what the right format is. For now, this variable lets # callees disambiguate (see D28960177 for more context). "export BUCK_SH_BINARY_VERSION_UNSTABLE=2", - "export BUCK_PROJECT_ROOT=$__SCRIPT_DIR/\"${__RESOURCES_ROOT:3}\"", + cmd_args("export BUCK_PROJECT_ROOT=\"$__SCRIPT_DIR/", resources_dir, "\"", delimiter = ""), # In buck1, the paths for resources that are outputs of rules have # different paths in BUCK_PROJECT_ROOT and # BUCK_DEFAULT_RUNTIME_RESOURCES, but we use the same paths. buck1's @@ -74,13 +73,12 @@ def _generate_script(name: str, main: Artifact, resources: list[Artifact], actio # sources, the paths are the same for both. "export BUCK_DEFAULT_RUNTIME_RESOURCES=\"$BUCK_PROJECT_ROOT\"", "exec \"$BUCK_PROJECT_ROOT/{}\" \"$@\"".format(main_link), - ]).relative_to(script) + relative_to = (script, 1), + ) else: - script_content = cmd_args([ + script_content = cmd_args( "@echo off", "setlocal EnableDelayedExpansion", - "set __RESOURCES_ROOT=^", - resources_dir, # Fully qualified script path. "set __SRC=%~f0", # This is essentially a realpath. @@ -88,11 +86,11 @@ def _generate_script(name: str, main: Artifact, resources: list[Artifact], actio # Get parent folder. 'for %%a in ("%__SRC%") do set "__SCRIPT_DIR=%%~dpa"', "set BUCK_SH_BINARY_VERSION_UNSTABLE=2", - # ':~3' strips the first 3 chars of __RESOURCES_ROOT. - "set BUCK_PROJECT_ROOT=%__SCRIPT_DIR%\\!__RESOURCES_ROOT:~3!", + cmd_args("set BUCK_PROJECT_ROOT=%__SCRIPT_DIR%\\", resources_dir, delimiter = ""), "set BUCK_DEFAULT_RUNTIME_RESOURCES=%BUCK_PROJECT_ROOT%", "%BUCK_PROJECT_ROOT%\\{} %*".format(main_link), - ]).relative_to(script) + relative_to = (script, 1), + ) actions.write( script, script_content, @@ -111,13 +109,22 @@ def sh_binary_impl(ctx): fail("sh_binary deps unsupported. Got `{}`".format(repr(ctx.attrs))) is_windows = ctx.attrs._target_os_type[OsLookup].platform == "windows" - (script, resources_dir) = _generate_script(ctx.label.name, ctx.attrs.main, ctx.attrs.resources, ctx.actions, is_windows) + (script, resources_dir) = _generate_script( + ctx.label.name, + ctx.attrs.main, + ctx.attrs.resources, + ctx.attrs.append_script_extension, + ctx.actions, + is_windows, + ) + + script = script.with_associated_artifacts([resources_dir]) return [ DefaultInfo(default_output = script, other_outputs = [resources_dir]), RunInfo( # TODO(cjhopman): Figure out if we need to specify the link targets # as inputs. We shouldn't need to, but need to verify it. - args = cmd_args(script).hidden(resources_dir), + args = cmd_args(script, hidden = resources_dir), ), ] diff --git a/prelude/sh_test.bzl b/prelude/sh_test.bzl index d5b4c5b4f8855..9bb08fabdd010 100644 --- a/prelude/sh_test.bzl +++ b/prelude/sh_test.bzl @@ -5,7 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//tests:re_utils.bzl", "get_re_executor_from_props") +load("@prelude//tests:re_utils.bzl", "get_re_executors_from_props") load("@prelude//test/inject_test_run_info.bzl", "inject_test_run_info") def sh_test_impl(ctx: AnalysisContext) -> list[Provider]: @@ -14,22 +14,24 @@ def sh_test_impl(ctx: AnalysisContext) -> list[Provider]: if ctx.attrs.list_args or ctx.attrs.list_env or ctx.attrs.run_args or ctx.attrs.run_env: fail("An unsupported attribute was passed") - args = cmd_args() + args_args = [] + args_hidden = [] if ctx.attrs.test != None: if type(ctx.attrs.test) == "artifact": - args.add(ctx.attrs.test) + args_args.append(ctx.attrs.test) elif isinstance(ctx.attrs.test, Dependency): run_info = ctx.attrs.test.get(RunInfo) if run_info != None: - args.add(run_info.args) + args_args.append(run_info.args) else: info = ctx.attrs.test[DefaultInfo] - args.add(info.default_outputs).hidden(info.other_outputs) + args_args.append(info.default_outputs) + args_hidden.append(info.other_outputs) else: fail("Unexpected type for test attribute") - args.hidden(ctx.attrs.resources) + args_hidden.append(ctx.attrs.resources) deps = [] for dep in ctx.attrs.deps: @@ -37,15 +39,17 @@ def sh_test_impl(ctx: AnalysisContext) -> list[Provider]: deps.extend(info.default_outputs) deps.extend(info.other_outputs) - args.hidden(deps) + args_hidden.append(deps) + + args = cmd_args(args_args, hidden = args_hidden) command = [args] + ctx.attrs.args # Setup a RE executor based on the `remote_execution` param. - re_executor = get_re_executor_from_props(ctx) + re_executor, executor_overrides = get_re_executors_from_props(ctx) # We implicitly make the target run from the project root if remote - # excution options were specified + # execution options were specified run_from_project_root = "buck2_run_from_project_root" in (ctx.attrs.labels or []) or re_executor != None # TODO support default info and runinfo properly by writing a sh script that invokes the command properly @@ -59,6 +63,7 @@ def sh_test_impl(ctx: AnalysisContext) -> list[Provider]: labels = ctx.attrs.labels, contacts = ctx.attrs.contacts, default_executor = re_executor, + executor_overrides = executor_overrides, run_from_project_root = run_from_project_root, use_project_relative_paths = run_from_project_root, ), diff --git a/prelude/test/inject_test_run_info.bzl b/prelude/test/inject_test_run_info.bzl index 811d13a1cc60b..bba28d60fa0d5 100644 --- a/prelude/test/inject_test_run_info.bzl +++ b/prelude/test/inject_test_run_info.bzl @@ -5,11 +5,22 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load( + "@prelude//tests:re_utils.bzl", + "maybe_add_run_as_bundle_label", +) + def inject_test_run_info(ctx: AnalysisContext, test_info: ExternalRunnerTestInfo) -> list[Provider]: # Access this here so we get failures in CI if we forget to inject it # anywhere, regardless of whether an `env` is used. inject_test_env = ctx.attrs._inject_test_env[RunInfo] + # `if test_info.labels != None` doesn't work because `None` is not of type `list[str]`, + # yet it is None in some cases... this hack lets us check for None without a type error. + if getattr(test_info, "labels", None) != None: + # If forcing RE on tpx, check if the test suite should be run as a bundle + maybe_add_run_as_bundle_label(ctx, test_info.labels) + if (not test_info.env) or _exclude_test_env_from_run_info(ctx): return [test_info, RunInfo(args = test_info.command)] @@ -22,6 +33,7 @@ def inject_test_run_info(ctx: AnalysisContext, test_info: ExternalRunnerTestInfo for (k, v) in test_info.env.items() }, with_inputs = True, + absolute = True, ) return [test_info, RunInfo(args = [inject_test_env, env_file, "--", test_info.command])] diff --git a/prelude/test/tools/BUCK b/prelude/test/tools/BUCK deleted file mode 100644 index 89ed9074211ca..0000000000000 --- a/prelude/test/tools/BUCK +++ /dev/null @@ -1,7 +0,0 @@ -prelude = native - -prelude.python_bootstrap_binary( - name = "inject_test_env", - main = "inject_test_env.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/test/tools/BUCK.v2 b/prelude/test/tools/BUCK.v2 new file mode 100644 index 0000000000000..1c3928706f2e0 --- /dev/null +++ b/prelude/test/tools/BUCK.v2 @@ -0,0 +1,13 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "inject_test_env", + main = "inject_test_env.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/tests/re_utils.bzl b/prelude/tests/re_utils.bzl index e2fa39d44fa88..b110a4d091bff 100644 --- a/prelude/tests/re_utils.bzl +++ b/prelude/tests/re_utils.bzl @@ -5,24 +5,81 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @starlark-rust: allow_string_literals_in_type_expr - load("@prelude//:build_mode.bzl", "BuildModeInfo") +load("@prelude//tests:remote_test_execution_toolchain.bzl", "RemoteTestExecutionToolchainInfo") +load("@prelude//utils:expect.bzl", "expect_non_none") + +ReArg = record( + re_props = field(dict | None), + default_run_as_bundle = field(bool | None), +) + +def _get_re_arg(ctx: AnalysisContext) -> ReArg: + if not hasattr(ctx.attrs, "remote_execution"): + return ReArg(re_props = None, default_run_as_bundle = False) + + if ctx.attrs.remote_execution != None: + # If this is a string, look up the re_props on the RE toolchain. + if type(ctx.attrs.remote_execution) == type(""): + expect_non_none(ctx.attrs._remote_test_execution_toolchain) + return ReArg( + re_props = + ctx.attrs._remote_test_execution_toolchain[RemoteTestExecutionToolchainInfo].profiles[ctx.attrs.remote_execution], + default_run_as_bundle = + ctx.attrs._remote_test_execution_toolchain[RemoteTestExecutionToolchainInfo].default_run_as_bundle, + ) + + return ReArg(re_props = ctx.attrs.remote_execution, default_run_as_bundle = False) -def get_re_executor_from_props(ctx: AnalysisContext) -> ["command_executor_config", None]: + # Check for a default RE option on the toolchain. + re_toolchain = ctx.attrs._remote_test_execution_toolchain + if re_toolchain != None and re_toolchain[RemoteTestExecutionToolchainInfo].default_profile != None: + return ReArg( + re_props = re_toolchain[RemoteTestExecutionToolchainInfo].default_profile, + default_run_as_bundle = re_toolchain[RemoteTestExecutionToolchainInfo].default_run_as_bundle, + ) + + return ReArg(re_props = None, default_run_as_bundle = False) + +def maybe_add_run_as_bundle_label(ctx: AnalysisContext, labels: list[str]) -> None: + if "re_ignore_force_run_as_bundle" in labels: + return + re_arg = _get_re_arg(ctx) + if re_arg.default_run_as_bundle or read_config("tpx", "force_run_as_bundle") == "True": + labels.extend(["run_as_bundle"]) + +def get_re_executors_from_props(ctx: AnalysisContext) -> ([CommandExecutorConfig, None], dict[str, CommandExecutorConfig]): """ - Convert the `remote_execution` properties param into a `CommandExecutorConfig` - to use with test providers. + Convert the `remote_execution` properties param into `CommandExecutorConfig` objects to use with test providers. + + Returns (default_executor, executor_overrides). """ - re_props = ctx.attrs.remote_execution + re_props = _get_re_arg(ctx).re_props if re_props == None: - return None + # If no RE args are set and an RE config is specified + if read_config("tpx", "force_mac_re_props") == "True": + # In the case we want to force tests on mac RE + re_props = { + "capabilities": { + "platform": "mac", + "subplatform": "any", + }, + "use_case": read_config("remoteexecution", "use_case"), + } + + else: + return None, {} re_props_copy = dict(re_props) capabilities = re_props_copy.pop("capabilities") use_case = re_props_copy.pop("use_case") + listing_capabilities = re_props_copy.pop("listing_capabilities", None) remote_cache_enabled = re_props_copy.pop("remote_cache_enabled", None) + re_dependencies = re_props_copy.pop("dependencies", []) + local_enabled = re_props_copy.pop("local_enabled", False) + local_listing_enabled = re_props_copy.pop("local_listing_enabled", False) + re_resource_units = re_props_copy.pop("resource_units", None) if re_props_copy: unexpected_props = ", ".join(re_props_copy.keys()) fail("found unexpected re props: " + unexpected_props) @@ -32,11 +89,25 @@ def get_re_executor_from_props(ctx: AnalysisContext) -> ["command_executor_confi if build_mode_info != None: remote_execution_action_key = "{}={}".format(build_mode_info.cell, build_mode_info.mode) - return CommandExecutorConfig( - local_enabled = False, + default_executor = CommandExecutorConfig( + local_enabled = local_enabled, remote_enabled = True, remote_execution_properties = capabilities, remote_execution_use_case = use_case or "tpx-default", remote_cache_enabled = remote_cache_enabled, remote_execution_action_key = remote_execution_action_key, + remote_execution_dependencies = re_dependencies, + remote_execution_resource_units = re_resource_units, ) + listing_executor = default_executor + if listing_capabilities: + listing_executor = CommandExecutorConfig( + local_enabled = local_listing_enabled or False, + remote_enabled = True, + remote_execution_properties = listing_capabilities, + remote_execution_use_case = use_case or "tpx-default", + remote_cache_enabled = remote_cache_enabled, + remote_execution_action_key = remote_execution_action_key, + remote_execution_resource_units = re_resource_units, + ) + return default_executor, {"listing": listing_executor} diff --git a/prelude/tests/remote_test_execution_toolchain.bzl b/prelude/tests/remote_test_execution_toolchain.bzl new file mode 100644 index 0000000000000..306c898d8c1ca --- /dev/null +++ b/prelude/tests/remote_test_execution_toolchain.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +RemoteTestExecutionToolchainInfo = provider( + fields = [ + # The profile to use by default. + "default_profile", + # A dictionary of string names to pre-registered profiles. Rules can + # use the profile name to references these. + "profiles", + # A bool indicating whether the test suites executed by this toolchain + # should be run in a bundle. This makes all tests in a suite run in + # a single RE action as opposed to one action per test. + "default_run_as_bundle", + ], +) diff --git a/prelude/tests/tpx_re_legacy.bzl b/prelude/tests/tpx_re_legacy.bzl deleted file mode 100644 index 0c5c38bd000ff..0000000000000 --- a/prelude/tests/tpx_re_legacy.bzl +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -# @starlark-rust: allow_string_literals_in_type_expr - -load("@prelude//utils:utils.bzl", "expect") - -_RE_ENABLED = "supports_remote_execution" -_RE_OPTS_LABEL_PREFIX = "re_opts_capabilities=" -_RE_OPTS_KEYS = ["platform", "subplatform", "gpu_name"] - -def _parse_re_opts(labels: list[str]) -> [dict[str, str], None]: - """ - Parse out JSON-embedded RE options like: - 're_opts_capabilities={"platform": gpu-remote-execution, "gpu_name": "A100"}' - """ - - for label in labels: - if label.startswith(_RE_OPTS_LABEL_PREFIX): - result = json.decode(label[len(_RE_OPTS_LABEL_PREFIX):]) - for key in result.keys(): - expect(key in _RE_OPTS_KEYS, "unexpected key in RE options label: {}", key) - return result - - return None - -# TODO(agallagher): Parsing RE options via JSON embedded in labels isn't a great -# UI, and we just do it here to support existing use cases. Ideally, though, we'd -# present a better UI (e.g. an `re_opts` param for tests) and use that instead. -# TODO(nga): remove "command_executor_config_builder", this is dead code after the version bump. -def get_re_executor_from_labels(labels: list[str]) -> ["command_executor_config_builder", "command_executor_config", None]: - """ - Parse legacy RE-enablement test labels and use them to configure a test RE - executor to run the test with. - - The UI is best documented at: - https://www.internalfb.com/intern/wiki/Remote_Execution/Users/GPU_RE_Contbuild_Migration/ - """ - - # If the special "RE enabled" label isn't present, abort. - if _RE_ENABLED not in labels: - return None - - # If there's no options found in labels, don't use RE. This diverges from - # v1 behavior, but v2+tpx needs some platform to be set and so we probably - # want to the toolchain tp provide some exec-platform compatible platform. - re_opts = _parse_re_opts(labels) - if re_opts == None: - return None - - return CommandExecutorConfig( - local_enabled = False, - remote_enabled = True, - remote_execution_properties = re_opts, - remote_execution_use_case = "tpx-default", - ) diff --git a/prelude/third-party/build.bzl b/prelude/third-party/build.bzl new file mode 100644 index 0000000000000..b194132adaf92 --- /dev/null +++ b/prelude/third-party/build.bzl @@ -0,0 +1,150 @@ +load("@prelude//:artifacts.bzl", "ArtifactExt", "artifact_ext") +load("@prelude//:paths.bzl", path_utils = "paths") +load("@prelude//cxx:cxx_context.bzl", "get_cxx_toolchain_info") +load("@prelude//cxx:preprocessor.bzl", "CPreprocessorInfo") +load( + "@prelude//linking:shared_libraries.bzl", + "SharedLibrary", + "gen_shared_libs_action", +) +load( + "@prelude//python:manifest.bzl", + "ManifestInfo", # @unused Used as a type +) +load(":providers.bzl", "ThirdPartyBuild", "ThirdPartyBuildInfo", "third_party_build_info") + +def project_from_label(label: Label) -> str: + """ + Generate a unique third-party project name for the given label. + """ + return str(label.raw_target()) + +def prefix_from_label(label: Label, prefix: str = "/usr/local") -> str: + """ + Generate a unique third-party prefix for the given label. + """ + return path_utils.join(prefix, "{}-{}".format(label.name, sha256(str(label.raw_target()))[:7])) + +def create_third_party_build_root( + ctx: AnalysisContext, + out: str = "__third_party_build__", + manifests: list[(str, ManifestInfo)] = [], + shared_libs: list[SharedLibrary] = [], + cxx_headers: list[CPreprocessorInfo] = [], + paths: list[(str, Artifact)] = []) -> ArtifactExt: + """ + Installs components into a unix-y install dir which can by used by other + third-party builds. + """ + + cmd = cmd_args() + cmd.add(ctx.attrs._create_third_party_build_root[RunInfo]) + + for dst, manifest in manifests: + cmd.add( + "--manifest", + dst, + cmd_args(manifest.manifest, hidden = [a for a, _ in manifest.artifacts]), + ) + + for pps in cxx_headers: + for pp in pps.set.value: + for hdr in pp.headers: + cmd.add("--path", path_utils.join("include", hdr.namespace, hdr.name), hdr.artifact) + + for dst, path in paths: + cmd.add("--path", dst, path) + + def gen(actions, output, shared_libs): + lines = [] + if shared_libs: + sh_ext = get_cxx_toolchain_info(ctx).linker_info.shared_library_name_format.format("") + for soname, shared_lib in shared_libs.items(): + lines.append(cmd_args("--path", path_utils.join("lib", soname), shared_lib.lib.output)) + + # Linker link `-l` dynamically (by default) by looking for `lib.so`, + # so make sure this exists by creating it as a symlink (to the versioned name) + # if it doesn't already. + if sh_ext in soname and not soname.endswith(sh_ext): + idx = soname.index(sh_ext) + link_name = soname[:idx + 3] + lines.append(cmd_args("--symlink", path_utils.join("lib", link_name), soname)) + return actions.write(output.as_output(), lines) + + # Add shlibs via argfsile. + argsfile = gen_shared_libs_action( + actions = ctx.actions, + out = "shared_libs_args.txt", + shared_libs = shared_libs, + gen_action = gen, + ) + cmd.add(cmd_args(argsfile, format = "@{}", hidden = [s.lib.output for s in shared_libs])) + + out = ctx.actions.declare_output(out, dir = True) + cmd.add(out.as_output()) + + ctx.actions.run(cmd, category = "third_party_build_root") + + return artifact_ext(out) + +def create_third_party_build_info( + ctx: AnalysisContext, + project: str | None = None, + prefix: str | None = None, + out: str = "__third_party_build__", + manifests: list[(str, ManifestInfo)] = [], + shared_libs: list[SharedLibrary] = [], + cxx_headers: list[CPreprocessorInfo] = [], + cxx_header_dirs: list[str] = [], + paths: list[(str, Artifact)] = [], + deps: list[Dependency] = []) -> ThirdPartyBuildInfo: + if project == None: + project = project_from_label(ctx.label) + if prefix == None: + prefix = prefix_from_label(ctx.label) + + root = create_third_party_build_root( + ctx = ctx, + out = out, + manifests = manifests, + cxx_headers = cxx_headers, + shared_libs = shared_libs, + paths = paths, + ) + + # Build manifest. + def gen_manifest(actions, output, shared_libs): + manifest = {} + manifest["project"] = project + manifest["prefix"] = prefix + if cxx_header_dirs: + manifest["c_include_paths"] = cxx_header_dirs + manifest["cxx_include_paths"] = cxx_header_dirs + if shared_libs: + manifest["runtime_lib_paths"] = ["lib"] + libs = [] + sh_ext = get_cxx_toolchain_info(ctx).linker_info.shared_library_name_format.format("") + for soname in shared_libs: + if sh_ext in soname: + lib = soname.split(sh_ext)[0].removeprefix("lib") + libs.append("-l{}".format(lib)) + manifest["libs"] = libs + return actions.write_json(output.as_output(), manifest, pretty = True) + + manifest = gen_shared_libs_action( + actions = ctx.actions, + out = out + ".json", + shared_libs = shared_libs, + gen_action = gen_manifest, + ) + + return third_party_build_info( + actions = ctx.actions, + build = ThirdPartyBuild( + project = project, + prefix = prefix, + root = root, + manifest = manifest, + ), + deps = deps, + ) diff --git a/prelude/third-party/hmaptool/BUCK.v2 b/prelude/third-party/hmaptool/BUCK.v2 new file mode 100644 index 0000000000000..4a96edf2e1e0d --- /dev/null +++ b/prelude/third-party/hmaptool/BUCK.v2 @@ -0,0 +1,19 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native # Avoid warnings and auto-formatters + +prelude.export_file( + name = "_hmaptool", + src = "hmaptool", + mode = "reference", +) + +prelude.command_alias( + name = "hmaptool", + exe = ":_hmaptool", + visibility = ["PUBLIC"], +) diff --git a/prelude/third-party/hmaptool/METADATA.bzl b/prelude/third-party/hmaptool/METADATA.bzl new file mode 100644 index 0000000000000..aa62f8b1b576a --- /dev/null +++ b/prelude/third-party/hmaptool/METADATA.bzl @@ -0,0 +1,7 @@ +METADATA = { + "maintainers": [ + "build_infra", + ], + "name": "hmaptool", + "owner": "build_infra", +} diff --git a/prelude/third-party/hmaptool/README.md b/prelude/third-party/hmaptool/README.md new file mode 100644 index 0000000000000..c98aab3ce52a1 --- /dev/null +++ b/prelude/third-party/hmaptool/README.md @@ -0,0 +1,22 @@ +# hmaptool + +This tool was copied from llvm-project. See https://github.com/llvm/llvm-project/blob/main/clang/utils/hmaptool/hmaptool + +## About + +Header maps are binary files used by Xcode, which are used to map +header names or paths to other locations. Clang has support for +those since its inception, but there's not a lot of header map +testing around. + +Since it's a binary format, testing becomes pretty much brittle +and its hard to even know what's inside if you don't have the +appropriate tools. + +Add a python based tool that allows creating and dumping header +maps based on a json description of those. While here, rewrite +tests to use the tool and remove the binary files from the tree. + +This tool was initially written by Daniel Dunbar. + +Thanks to Stella Stamenova for helping make this work on Windows. diff --git a/prelude/third-party/hmaptool/hmaptool b/prelude/third-party/hmaptool/hmaptool new file mode 100755 index 0000000000000..e6e99058b4e54 --- /dev/null +++ b/prelude/third-party/hmaptool/hmaptool @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 + +# ===----------------------------------------------------------------------=== # +# +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ===----------------------------------------------------------------------=== # + +from __future__ import absolute_import, division, print_function + +import json +import optparse +import os +import struct +import sys + +### + +k_header_magic_LE = b'pamh' +k_header_magic_BE = b'hmap' + +def hmap_hash(str): + """hash(str) -> int + + Apply the "well-known" headermap hash function. + """ + + return sum((ord(c.lower()) * 13 + for c in str), 0) + +class HeaderMap(object): + @staticmethod + def frompath(path): + with open(path, 'rb') as f: + magic = f.read(4) + if magic == k_header_magic_LE: + endian_code = '<' + elif magic == k_header_magic_BE: + endian_code = '>' + else: + raise SystemExit("error: %s: not a headermap" % ( + path,)) + + # Read the header information. + header_fmt = endian_code + 'HHIIII' + header_size = struct.calcsize(header_fmt) + data = f.read(header_size) + if len(data) != header_size: + raise SystemExit("error: %s: truncated headermap header" % ( + path,)) + + (version, reserved, strtable_offset, num_entries, + num_buckets, max_value_len) = struct.unpack(header_fmt, data) + + if version != 1: + raise SystemExit("error: %s: unknown headermap version: %r" % ( + path, version)) + if reserved != 0: + raise SystemExit("error: %s: invalid reserved value in header" % ( + path,)) + + # The number of buckets must be a power of two. + if num_buckets == 0 or (num_buckets & num_buckets - 1) != 0: + raise SystemExit("error: %s: invalid number of buckets" % ( + path,)) + + # Read all of the buckets. + bucket_fmt = endian_code + 'III' + bucket_size = struct.calcsize(bucket_fmt) + buckets_data = f.read(num_buckets * bucket_size) + if len(buckets_data) != num_buckets * bucket_size: + raise SystemExit("error: %s: truncated headermap buckets" % ( + path,)) + buckets = [struct.unpack(bucket_fmt, + buckets_data[i*bucket_size:(i+1)*bucket_size]) + for i in range(num_buckets)] + + # Read the string table; the format doesn't explicitly communicate the + # size of the string table (which is dumb), so assume it is the rest of + # the file. + f.seek(0, 2) + strtable_size = f.tell() - strtable_offset + f.seek(strtable_offset) + + if strtable_size == 0: + raise SystemExit("error: %s: unable to read zero-sized string table"%( + path,)) + strtable = f.read(strtable_size) + + if len(strtable) != strtable_size: + raise SystemExit("error: %s: unable to read complete string table"%( + path,)) + if strtable[-1] != 0: + raise SystemExit("error: %s: invalid string table in headermap" % ( + path,)) + + return HeaderMap(num_entries, buckets, strtable) + + def __init__(self, num_entries, buckets, strtable): + self.num_entries = num_entries + self.buckets = buckets + self.strtable = strtable + + def get_string(self, idx): + if idx >= len(self.strtable): + raise SystemExit("error: %s: invalid string index" % ( + path,)) + end_idx = self.strtable.index(b'\0', idx) + return self.strtable[idx:end_idx].decode() + + @property + def mappings(self): + for key_idx,prefix_idx,suffix_idx in self.buckets: + if key_idx == 0: + continue + yield (self.get_string(key_idx), + self.get_string(prefix_idx) + self.get_string(suffix_idx)) + +class StringTable: + def __init__(self): + # A string table offset of 0 is interpreted as an empty bucket, so it's + # important we don't assign an actual string to that offset. + self.table = "\0" + # For the same reason we don't want the empty string having a 0 offset. + self.offsets = {} + + def add(self, string): + offset = self.offsets.get(string) + if offset: + return offset + + offset = len(self.table) + self.table += string + "\0" + self.offsets[string] = offset + return offset + +### + +def action_dump(name, args): + "dump a headermap file" + + parser = optparse.OptionParser("%%prog %s [options] " % ( + name,)) + parser.add_option("-v", "--verbose", dest="verbose", + help="show more verbose output [%default]", + action="store_true", default=False) + parser.add_option("--json", dest="json", + help="output as JSON [%default]", + action="store_true", default=False) + (opts, args) = parser.parse_args(args) + + if len(args) != 1: + parser.error("invalid number of arguments") + + path, = args + + hmap = HeaderMap.frompath(path) + + # Dump all of the buckets. + if opts.verbose: + print ('headermap: %r' % (path,)) + print (' num entries: %d' % (hmap.num_entries,)) + print (' num buckets: %d' % (len(hmap.buckets),)) + print (' string table size: %d' % (len(hmap.strtable),)) + for i,bucket in enumerate(hmap.buckets): + key_idx,prefix_idx,suffix_idx = bucket + + if key_idx == 0: + continue + + # Get the strings. + key = hmap.get_string(key_idx) + prefix = hmap.get_string(prefix_idx) + suffix = hmap.get_string(suffix_idx) + + print (" bucket[%d]: %r -> (%r, %r) -- %d" % ( + i, key, prefix, suffix, (hmap_hash(key) & (len(hmap.buckets) - 1)))) + elif opts.json: + print(json.dumps({"mappings": dict(hmap.mappings)}, indent=4)) + else: + print ('Header Map: %s' % (path,)) + mappings = sorted(hmap.mappings) + for key,value in mappings: + print ("%s -> %s" % (key, value)) + print () + +def next_power_of_two(value): + if value < 0: + raise ArgumentError + return 1 if value == 0 else 2**(value - 1).bit_length() + +def action_write(name, args): + "write a headermap file from a JSON definition" + + parser = optparse.OptionParser("%%prog %s [options] " % ( + name,)) + (opts, args) = parser.parse_args(args) + + if len(args) != 2: + parser.error("invalid number of arguments") + + input_path,output_path = args + + with open(input_path, "r") as f: + input_data = json.load(f) + + # Compute the headermap contents, we make a table that is 1/3 full. + mappings = input_data['mappings'] + num_buckets = next_power_of_two(len(mappings) * 3) + + table = [(0, 0, 0) + for i in range(num_buckets)] + max_value_len = 0 + strtable = StringTable() + for key,value in mappings.items(): + if not isinstance(key, str): + key = key.decode('utf-8') + if not isinstance(value, str): + value = value.decode('utf-8') + max_value_len = max(max_value_len, len(value)) + + key_idx = strtable.add(key) + prefix, suffix = os.path.split(value) + # This guarantees that prefix + suffix == value in all cases, including when + # prefix is empty or contains a trailing slash or suffix is empty (hence the use + # of `len(value) - len(suffix)` instead of just `-len(suffix)`. + prefix += value[len(prefix) : len(value) - len(suffix)] + prefix_idx = strtable.add(prefix) + suffix_idx = strtable.add(suffix) + + hash = hmap_hash(key) + for i in range(num_buckets): + idx = (hash + i) % num_buckets + if table[idx][0] == 0: + table[idx] = (key_idx, prefix_idx, suffix_idx) + break + else: + raise RuntimeError + + endian_code = '<' + magic = k_header_magic_LE + magic_size = 4 + header_fmt = endian_code + 'HHIIII' + header_size = struct.calcsize(header_fmt) + bucket_fmt = endian_code + 'III' + bucket_size = struct.calcsize(bucket_fmt) + strtable_offset = magic_size + header_size + num_buckets * bucket_size + header = (1, 0, strtable_offset, len(mappings), + num_buckets, max_value_len) + + # Write out the headermap. + with open(output_path, 'wb') as f: + f.write(magic) + f.write(struct.pack(header_fmt, *header)) + for bucket in table: + f.write(struct.pack(bucket_fmt, *bucket)) + f.write(strtable.table.encode()) + +def action_tovfs(name, args): + "convert a headermap to a VFS layout" + + parser = optparse.OptionParser("%%prog %s [options] " % ( + name,)) + parser.add_option("", "--build-path", dest="build_path", + help="build path prefix", + action="store", type=str) + (opts, args) = parser.parse_args(args) + + if len(args) != 2: + parser.error("invalid number of arguments") + if opts.build_path is None: + parser.error("--build-path is required") + + input_path,output_path = args + + hmap = HeaderMap.frompath(input_path) + + # Create the table for all the objects. + vfs = {} + vfs['version'] = 0 + build_dir_contents = [] + vfs['roots'] = [{ + 'name' : opts.build_path, + 'type' : 'directory', + 'contents' : build_dir_contents }] + + # We assume we are mapping framework paths, so a key of "Foo/Bar.h" maps to + # "/Foo.framework/Headers/Bar.h". + for key,value in hmap.mappings: + # If this isn't a framework style mapping, ignore it. + components = key.split('/') + if len(components) != 2: + continue + framework_name,header_name = components + build_dir_contents.append({ + 'name' : '%s.framework/Headers/%s' % (framework_name, + header_name), + 'type' : 'file', + 'external-contents' : value }) + + with open(output_path, 'w') as f: + json.dump(vfs, f, indent=2) + +commands = dict((name[7:].replace("_","-"), f) + for name,f in locals().items() + if name.startswith('action_')) + +def usage(): + print ("Usage: %s command [options]" % ( + os.path.basename(sys.argv[0])), file=sys.stderr) + print (file=sys.stderr) + print ("Available commands:", file=sys.stderr) + cmds_width = max(map(len, commands)) + for name,func in sorted(commands.items()): + print (" %-*s - %s" % (cmds_width, name, func.__doc__), file=sys.stderr) + sys.exit(1) + +def main(): + if len(sys.argv) < 2 or sys.argv[1] not in commands: + usage() + + cmd = sys.argv[1] + commands[cmd](cmd, sys.argv[2:]) + +if __name__ == '__main__': + main() diff --git a/prelude/third-party/pkgconfig.bzl b/prelude/third-party/pkgconfig.bzl new file mode 100644 index 0000000000000..e9dbc5f122eb9 --- /dev/null +++ b/prelude/third-party/pkgconfig.bzl @@ -0,0 +1,87 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:prelude.bzl", "native") + +# NB: Meta engineers should not use this! Please use tp2 instead: +# https://fburl.com/wiki/oyy0fi5j +# +# If a system has a package installed and that package provides a `.pc` file +# this rule can be used to make that library visible to other rules. The `name` +# of this rule should be the pkg-config name. For example, if +# `pkg-config --libs gtest` prints out the flags to link against gtest, then +# `external_pkgconfig_library(name = "gtest")` would allow other rules to +# depend on gtest. +# +# WARNING: dependencies are not resolved by pkg-config, so these must be specified +# manually with `deps`. Additionally, ABI/platform differences are not handled +# by this rule so be careful not to cache it in Remote Execution etc to prevent +# different machines from reusing the outputs of these rules. +def external_pkgconfig_library( + name, + package = None, + visibility = ["PUBLIC"], + labels = [], + default_target_platform = "prelude//platforms:default", + deps = [], + fallback = None): + if package == None: + package = name + + cmd_cflags = "pkg-config --cflags {} > $OUT".format(package) + cmd_libs = "pkg-config --libs {} > $OUT".format(package) + + if fallback != None: + preprocessor_flags = ( + fallback.preprocessor_flags if hasattr(fallback, "preprocessor_flags") else [] + ) + linker_flags = ( + fallback.linker_flags if hasattr(fallback, "linker_flags") else [] + ) + + cmd_cflags = "if pkg-config --exists {}; then {}; else echo {} > $OUT; fi".format( + package, + cmd_cflags, + " ".join(preprocessor_flags), + ) + + cmd_libs = "if pkg-config --exists {}; then {}; else echo {} > $OUT; fi".format( + package, + cmd_libs, + " ".join(linker_flags), + ) + + pkg_config_cflags = name + "__pkg_config_cflags" + native.genrule( + name = pkg_config_cflags, + default_target_platform = default_target_platform, + out = "out", + cmd = cmd_cflags, + remote = False, + ) + + pkg_config_libs = name + "__pkg_config_libs" + native.genrule( + name = pkg_config_libs, + default_target_platform = default_target_platform, + out = "out", + cmd = cmd_libs, + remote = False, + ) + + labels = list(labels) + labels.append("third-party:pkg-config:{}".format(package)) + + native.prebuilt_cxx_library( + name = name, + default_target_platform = default_target_platform, + visibility = visibility, + exported_preprocessor_flags = ["@$(location :{})".format(pkg_config_cflags)], + exported_linker_flags = ["@$(location :{})".format(pkg_config_libs)], + exported_deps = deps, + labels = labels, + ) diff --git a/prelude/third-party/providers.bzl b/prelude/third-party/providers.bzl new file mode 100644 index 0000000000000..27096e1ae3263 --- /dev/null +++ b/prelude/third-party/providers.bzl @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:artifacts.bzl", "ArtifactExt") + +ThirdPartyBuild = record( + # A logical project name for the project, currently used for logging. + project = field(str), + # The directory containing the build output. + root = field(ArtifactExt), + # The prefix to install the build output. + prefix = field(str, "/"), + # A manifest of build env settings to use to build against this build. + manifest = field(Artifact | None, None), + # Environment variables to set to build against this project. + # TODO(agallagher): Can this move into the manifest? + exported_env = field(dict[str, str], {}), +) + +# Work-around for buck2 bug causing "transitive values must be of the same +# transitive set type" errors: +# https://fb.prod.workplace.com/groups/buck2users/posts/3637287806527574/ +ThirdPartyBuildTSet = transitive_set() +ThirdPartyBuildInfo = provider(fields = { + "build": provider_field(ThirdPartyBuild | None), + "_tset": provider_field(ThirdPartyBuildTSet), +}) + +def third_party_build_info( + actions, + build: [ThirdPartyBuild, None] = None, + children: list[ThirdPartyBuildInfo] = [], + deps: list[Dependency] = []) -> ThirdPartyBuildInfo: + kwargs = {} + if build != None: + kwargs["value"] = build + if deps or children: + kwargs["children"] = [ + child._tset + for child in children + ] + [ + dep[ThirdPartyBuildInfo]._tset + for dep in deps + if ThirdPartyBuildInfo in dep + ] + return ThirdPartyBuildInfo( + build = build, + _tset = actions.tset(ThirdPartyBuildTSet, **kwargs), + ) diff --git a/prelude/third-party/tools/BUCK.v2 b/prelude/third-party/tools/BUCK.v2 new file mode 100644 index 0000000000000..e241143bff893 --- /dev/null +++ b/prelude/third-party/tools/BUCK.v2 @@ -0,0 +1,11 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +source_listing() + +prelude = native + +prelude.python_bootstrap_binary( + name = "create_build", + main = "create_build.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/third-party/tools/create_build.py b/prelude/third-party/tools/create_build.py new file mode 100644 index 0000000000000..7699004c49b1c --- /dev/null +++ b/prelude/third-party/tools/create_build.py @@ -0,0 +1,61 @@ +import argparse +import shutil +import stat +import json +import sys +import os + + +# Copy only file contents and exec permission bit. +def _copy(src, dst, *, follow_symlinks=True): + shutil.copyfile(src, dst, follow_symlinks=False) + src_mode = os.lstat(src).st_mode + dst_mode = os.lstat(dst).st_mode + os.chmod( + dst, + dst_mode | (src_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)), + follow_symlinks=False, + ) + + +def main(argv): + parser = argparse.ArgumentParser(fromfile_prefix_chars="@") + parser.add_argument( + "--manifest", dest="manifests", nargs=2, action="append", default=[] + ) + parser.add_argument("--path", dest="paths", nargs=2, action="append", default=[]) + parser.add_argument( + "--symlink", dest="symlinks", nargs=2, action="append", default=[] + ) + parser.add_argument("output") + args = parser.parse_args(argv[1:]) + + os.makedirs(args.output) + + all_paths = [] + all_paths.extend(args.paths) + + for bdst, manifest in args.manifests: + with open(manifest) as mf: + manifest = json.load(mf) + for dst, src, _ in manifest: + dst = os.path.join(bdst, dst) + all_paths.append((dst, src)) + + for dst, src in all_paths: + fdst = os.path.join(args.output, dst) + os.makedirs(os.path.dirname(fdst), exist_ok=True) + if os.path.isdir(src): + shutil.copytree( + src, fdst, symlinks=True, dirs_exist_ok=True, copy_function=_copy + ) + else: + shutil.copy(src, fdst) + + for dst, target in args.symlinks: + fdst = os.path.join(args.output, dst) + os.makedirs(os.path.dirname(fdst), exist_ok=True) + os.symlink(target, fdst) + + +sys.exit(main(sys.argv)) diff --git a/prelude/toolchains/apple/xcode_version_checker/.gitignore b/prelude/toolchains/apple/xcode_version_checker/.gitignore new file mode 100644 index 0000000000000..5c94124a7623b --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/.gitignore @@ -0,0 +1,4 @@ +xcode_version_checker.arm64 +xcode_version_checker.x86_64 +xcode_version_tester +xcode_exec_tester diff --git a/prelude/toolchains/apple/xcode_version_checker/BUCK.v2 b/prelude/toolchains/apple/xcode_version_checker/BUCK.v2 new file mode 100644 index 0000000000000..383dc2dfa00f9 --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/BUCK.v2 @@ -0,0 +1,12 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +export_file( + name = "xcode_version_checker", + labels = ["buck2-only"], + mode = "reference", + visibility = ["PUBLIC"], +) diff --git a/prelude/toolchains/apple/xcode_version_checker/Makefile b/prelude/toolchains/apple/xcode_version_checker/Makefile new file mode 100644 index 0000000000000..bd2ab7ea1a6f5 --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/Makefile @@ -0,0 +1,29 @@ +CFLAGS = -x objective-c -lobjc -fobjc-arc -fobjc-weak -O3 -Wall + +.PHONY: all clean test + +xcode_version_checker: xcode_version_checker.x86_64 xcode_version_checker.arm64 + lipo -create -output xcode_version_checker xcode_version_checker.x86_64 xcode_version_checker.arm64 +xcode_version_checker.x86_64: src/xcode_version_checker.m src/xcode_version_checks.m + $(CC) $(CFLAGS) src/xcode_version_checker.m src/xcode_version_checks.m -o xcode_version_checker.x86_64 -target x86_64-apple-macos +xcode_version_checker.arm64: src/xcode_version_checker.m src/xcode_version_checks.m + $(CC) $(CFLAGS) src/xcode_version_checker.m src/xcode_version_checks.m -o xcode_version_checker.arm64 -target arm64-apple-macos + +all: xcode_version_checker test + +test: xcode_version_tester xcode_exec_tester + ./xcode_version_tester $(abspath test/Xcode_14.2.0_14C18_fb_version.plist) + ./xcode_exec_tester "/usr/bin/true" + ./xcode_exec_tester "/bin/bash" "-c" "! /usr/bin/false" + +xcode_version_tester: src/xcode_version_tester.m src/xcode_version_checks.m + $(CC) $(CFLAGS) src/xcode_version_tester.m src/xcode_version_checks.m -o xcode_version_tester + +xcode_exec_tester: src/xcode_exec_tester.m src/xcode_version_checks.m + $(CC) $(CFLAGS) src/xcode_exec_tester.m src/xcode_version_checks.m -o xcode_exec_tester + +clean: + rm -f xcode_version_checker.x86_64 + rm -f xcode_version_checker.arm64 + rm -f xcode_version_tester + rm -f xcode_exec_tester diff --git a/prelude/toolchains/apple/xcode_version_checker/README b/prelude/toolchains/apple/xcode_version_checker/README new file mode 100644 index 0000000000000..5968c86a17cd1 --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/README @@ -0,0 +1,8 @@ +- Run `make` to compile the `xcode_version_checker` binary. +- Run `make all` to compile the `xcode_version_checker` binary and run all tests. +- Run `make test` to just run tests. +- Run `make clean` to delete tests and intermediate binaries. + +We cannot include this as part of the toolchain because of +bootstrapping issues: i.e., compiling it requires an +`apple_toolchain` but its needed to define an `apple_toolchain`. diff --git a/prelude/toolchains/apple/xcode_version_checker/defs.bzl b/prelude/toolchains/apple/xcode_version_checker/defs.bzl new file mode 100644 index 0000000000000..516924ac565ac --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/defs.bzl @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:native.bzl", _native = "native") + +def xcode_command_alias(name, xcode_version = None, xcode_product_build = None, **kwargs): + xcode_version_specified = xcode_version and len(xcode_version) > 0 + xcode_product_build_specified = xcode_product_build and len(xcode_product_build) > 0 + + if not xcode_version_specified and not xcode_product_build_specified: + fail("Must specify either Xcode version or Xcode product build") + if xcode_version_specified and xcode_product_build_specified: + fail("Must specify only one of Xcode version or Xcode product build but not both") + + version_args = ["-n", xcode_version] if xcode_version_specified else ["-b", xcode_product_build] + original_args = kwargs.pop("args", []) + all_args = version_args + original_args + + env = kwargs.pop("env", {}) + xcode_cache_seed = read_root_config("apple", "xcode_cache_seed", None) + if xcode_cache_seed: + # It's possible that Mac RE does not distinguish between Xcode versions, + # in which case we can reset the caches by setting a unique env var. + # Since the env var is part of the cache key, that means that any actions + # depending on selected Xcode will get recomputed. + env["BUCK2_XCODE_CACHE_SEED"] = xcode_cache_seed + + # This setup means that all `xcode_command_alias()` would effectively become: + # xcode_version_checker VERSION ORIGINAL_COMMAND ORIGINAL_COMMAND_ARGS + # `xcode_version_checker` will check the currently selected Xcode and + # if it matches, it will then execute `ORIGINAL_COMMAND` + # passing `ORIGINAL_COMMAND_ARGS`. + _native.command_alias( + name = name, + exe = "prelude//toolchains/apple/xcode_version_checker:xcode_version_checker", + args = all_args, + env = env, + visibility = ["PUBLIC"], + **kwargs + ) diff --git a/prelude/toolchains/apple/xcode_version_checker/src/xcode_exec_tester.m b/prelude/toolchains/apple/xcode_version_checker/src/xcode_exec_tester.m new file mode 100644 index 0000000000000..0bf6fb4101a9b --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/src/xcode_exec_tester.m @@ -0,0 +1,28 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#import +#import + +#import "xcode_version_checks.h" + +int main(int argc, char const* argv[]) { + @autoreleasepool { + const int numberOfArgs = argc - 1; + if (numberOfArgs < 1) { + fprintf( + stderr, "Expected at least one arguments: executable, aborting...\n"); + return 1; + } + + execTool(argv + 1, argc - 1); + // `execTool` should never return, if it did, it means it failed to `execve` + return 1; + } +} diff --git a/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checker.m b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checker.m new file mode 100644 index 0000000000000..4d00310392a9c --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checker.m @@ -0,0 +1,72 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#import +#import + +#import "xcode_version_checks.h" + +int main(int argc, char const* argv[]) { + @autoreleasepool { + // This executable gets called with the following arguments: + // - Version format (argv[1]) (`-n` for short version, `-b` for product + // build) + // - Expected Xcode version (argv[2]) + // - Executable (argv[3]) + // - Args to forward executable + // + // For example, it will be something like: + // xcode_version_checker -n 13.4 + // /var/db/xcode_select_link/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang + // -c file.c -o file.o + + // TODO(T128745718): We need to figure out the fastest way to implement this + // forwarder. There are multiple options with varying tradeoffs and we need + // to make decisions along the following axes: + // - Plist parsing vs memory comparison + // - Embedding of expected plist data + // - Hashing vs full comparison + // + // I believe it's important to focus on max performance given this will be + // running for _every_ tool invocation which will amount to millions of + // executions per _day_. + + const int numberOfArgs = argc - 1; + if (numberOfArgs < 3) { + fprintf( + stderr, + "Expected at least three arguments: version format, Xcode version and executable, aborting...\n"); + return 1; + } + + NSString* expectedVersion = [[NSString alloc] initWithUTF8String:argv[2]]; + const char* versionFormat = argv[1]; + if (strcmp(versionFormat, "-n") == 0) { + if (!checkXcodeShortVersionMatch(expectedVersion)) { + return 1; + } + } else if (strcmp(versionFormat, "-b") == 0) { + if (!checkXcodeProductBuildMatch(expectedVersion)) { + return 1; + } + } else { + fprintf( + stderr, + "Found unknown version format `%s`, expected version `%s`, aborting...\n", + argv[1], + expectedVersion.UTF8String); + return 1; + } + + const int stripArgsCount = 3; // executable + version + execTool(argv + stripArgsCount, argc - stripArgsCount); + // `execTool` should never return, if it did, it means it failed to `execve` + return 1; + } +} diff --git a/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checks.h b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checks.h new file mode 100644 index 0000000000000..7bbb1e907dd87 --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checks.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ +#pragma once + +#import + +BOOL checkVersionsMatch(NSString* shortVersion, NSString* expectedShortVersion); +void execTool(char const* arguments[], int argumentsCount); +BOOL checkPlistValueMatch( + NSString* plistPath, + NSString* plistKey, + NSString* expectedValue, + BOOL (*comparator)(NSString* value, NSString* expectedValue), + BOOL logComparisonFailure); + +BOOL checkProductBuildVersionMatch( + NSString* productBuild, + NSString* expectedProductBuild); +BOOL checkVersionPlistProductBuildMatch( + NSString* plistPath, + NSString* expectedProductBuild, + BOOL logComparisonFailure); +BOOL checkXcodeProductBuildMatch(NSString* expectedProductBuild); + +BOOL checkVersionPlistShortVersionMatch( + NSString* plistPath, + NSString* expectedShortVersion, + BOOL logComparisonFailure); +BOOL checkXcodeShortVersionMatch(NSString* expectedShortVersion); diff --git a/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checks.m b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checks.m new file mode 100644 index 0000000000000..24912d3c41b45 --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_checks.m @@ -0,0 +1,212 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#import "xcode_version_checks.h" + +#import +#import + +extern char** environ; + +static BOOL IsInsideRemoteExecutionWorker(void) { + // TODO: Remove dependence on ACTION_DIGEST, once D41872225 lands + return getenv("INSIDE_RE_WORKER") != NULL || getenv("ACTION_DIGEST") != NULL; +} + +static NSUInteger effectiveVersionComponentsCount( + NSArray* versionComponents) { + NSUInteger zeroTrailingComponentsCount = 0; + for (NSInteger i = [versionComponents count]; i > 0; --i) { + NSString* component = [versionComponents objectAtIndex:(i - 1)]; + if ([component isEqual:@"0"]) { + zeroTrailingComponentsCount++; + } + } + + // Ignore trailing zero components, e.g.,, treat "14.0.0" as "14" + return [versionComponents count] - zeroTrailingComponentsCount; +} + +// Looks for _exact_ match for versions up to all components specified in +// `expectedShortVersion` +BOOL checkVersionsMatch( + NSString* shortVersion, + NSString* expectedShortVersion) { + NSArray* expectedShortVersionComponents = + [expectedShortVersion componentsSeparatedByString:@"."]; + NSArray* shortVersionComponents = + [shortVersion componentsSeparatedByString:@"."]; + + NSUInteger shortVersionComponentsCount = + effectiveVersionComponentsCount(shortVersionComponents); + NSUInteger expectedShortVersionComponentsCount = + effectiveVersionComponentsCount(expectedShortVersionComponents); + if (shortVersionComponentsCount < expectedShortVersionComponentsCount) { + return NO; + } + + for (NSUInteger i = 0; i < expectedShortVersionComponentsCount; ++i) { + NSString* expectedComponent = + [expectedShortVersionComponents objectAtIndex:i]; + NSString* actualComponent = [shortVersionComponents objectAtIndex:i]; + if (![expectedComponent isEqual:actualComponent]) { + return NO; + } + } + + return YES; +} + +BOOL checkPlistValueMatch( + NSString* plistPath, + NSString* plistKey, + NSString* expectedValue, + BOOL (*comparator)(NSString* value, NSString* expectedValue), + BOOL logComparisonFailure) { + NSURL* xcodePlistUrl = [NSURL fileURLWithPath:plistPath]; + NSError* xcodePlistError = nil; + NSData* xcodePlistData = [NSData dataWithContentsOfURL:xcodePlistUrl + options:0 + error:&xcodePlistError]; + if (xcodePlistData == nil) { + fprintf( + stderr, + "Failed to read Xcode plist file: %s\n", + (xcodePlistError.description.UTF8String ?: "")); + return NO; + } + + NSError* plistError = nil; + id xcodePlist = + [NSPropertyListSerialization propertyListWithData:xcodePlistData + options:NSPropertyListImmutable + format:nil + error:&plistError]; + if (xcodePlist == nil) { + fprintf( + stderr, + "Failed to parse Xcode plist file: %s\n", + (plistError.description.UTF8String ?: "")); + return NO; + } + + if (![xcodePlist isKindOfClass:[NSDictionary class]]) { + fprintf(stderr, "Plist is not a dictionary\n"); + return NO; + } + + NSDictionary* xcodePlistDict = (NSDictionary*)xcodePlist; + NSString* value = xcodePlistDict[plistKey]; + if (![value isKindOfClass:[NSString class]]) { + fprintf(stderr, "Version is not of type string\n"); + return NO; + } + + BOOL comparisonResult = comparator(value, expectedValue); + if (!comparisonResult && logComparisonFailure) { + fprintf( + stderr, + "Found unexpected Xcode version, expected %s but found %s, aborting...\n", + expectedValue.UTF8String, + value.UTF8String); + if (IsInsideRemoteExecutionWorker()) { + fprintf( + stderr, + "This error was generated inside an RE worker. To debug, check the following:\n" + "1) The command being executed\n" + "2) The subplatform property of the RE action\n" + "3) The execution platform constraints\n" + "4) The Xcode installed on the particular worker\n"); + } else { + fprintf( + stderr, + "This error was generated when running locally. To debug, check the following:\n" + "1) The command being executed\n" + "2) The selected Xcode version\n" + "3) Did you select a different Xcode _after_ a buck2 daemon was launched?\n" + "4) The execution platform constraints\n" + "5) The tools specified for the `apple_toolchain()`\n"); + } + } + + return comparisonResult; +} + +BOOL checkProductBuildVersionMatch( + NSString* productBuild, + NSString* expectedProductBuild) { + if (productBuild == expectedProductBuild) { + return YES; + } + + if (productBuild == nil || expectedProductBuild == nil) { + // `caseInsensitiveCompare:` is declared taking non-null, so guard against + return NO; + } + + NSComparisonResult cmpResult = + [productBuild caseInsensitiveCompare:expectedProductBuild]; + return (cmpResult == NSOrderedSame); +} + +BOOL checkVersionPlistProductBuildMatch( + NSString* plistPath, + NSString* expectedProductBuild, + BOOL logComparisonFailure) { + return checkPlistValueMatch( + plistPath, + @"ProductBuildVersion", + expectedProductBuild, + checkProductBuildVersionMatch, + logComparisonFailure); +} + +NSString* const VERSION_PLIST_PATH = + @"/var/db/xcode_select_link/../version.plist"; +BOOL checkXcodeProductBuildMatch(NSString* expectedProductBuild) { + return checkVersionPlistProductBuildMatch( + VERSION_PLIST_PATH, + expectedProductBuild, + /* logComparisonFailure = */ YES); +} + +BOOL checkVersionPlistShortVersionMatch( + NSString* plistPath, + NSString* expectedShortVersion, + BOOL logComparisonFailure) { + return checkPlistValueMatch( + plistPath, + @"CFBundleShortVersionString", + expectedShortVersion, + checkVersionsMatch, + logComparisonFailure); +} + +BOOL checkXcodeShortVersionMatch(NSString* expectedShortVersion) { + return checkVersionPlistShortVersionMatch( + VERSION_PLIST_PATH, + expectedShortVersion, + /* logComparisonFailure = */ YES); +} + +void execTool(char const* arguments[], int argumentsCount) { + int execArgvSize = argumentsCount + 1 /* NULL terminator */; + char** execArgv = malloc( + sizeof(char*) * + execArgvSize); // no need to free() as either we execve() or exit + memcpy(execArgv, arguments, argumentsCount * sizeof(char*)); + execArgv[execArgvSize - 1] = NULL; + execve(arguments[0], execArgv, environ); + // execve() never returns on success, so we must have failed if we're here + fprintf( + stderr, + "Failed to execve(), errno %d, description: %s\n", + errno, + strerror(errno)); +} diff --git a/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_tester.m b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_tester.m new file mode 100644 index 0000000000000..7899dda3825ca --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/src/xcode_version_tester.m @@ -0,0 +1,229 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#import + +#import "xcode_version_checks.h" + +typedef struct { + NSString* actualVersion; + NSString* expectedVersion; + BOOL versionsMatch; +} version_check; + +typedef struct { + NSString* actualBuild; + NSString* expectedBuild; + BOOL versionsMatch; +} build_check; + +typedef struct { + NSString* plistPath; + BOOL(*plistFunction) + (NSString* plistPath, NSString* expectedValue, BOOL logComparisonFailure); + NSString* expectedValue; + BOOL versionsMatch; +} plist_check; + +int main(int argc, char const* argv[]) { + @autoreleasepool { + version_check checks[] = { + { + .actualVersion = @"14", + .expectedVersion = @"14", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.2", + .expectedVersion = @"14.2", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.2", + .expectedVersion = @"14", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.2.1", + .expectedVersion = @"14.2", + .versionsMatch = YES, + }, + { + .actualVersion = @"13", + .expectedVersion = @"14", + .versionsMatch = NO, + }, + { + .actualVersion = @"15", + .expectedVersion = @"14", + .versionsMatch = NO, + }, + { + .actualVersion = @"14.2.1", + .expectedVersion = @"14.2.1", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.2.3", + .expectedVersion = @"14.2.2", + .versionsMatch = NO, + }, + { + .actualVersion = @"14.2.1", + .expectedVersion = @"14.2.2", + .versionsMatch = NO, + }, + { + .actualVersion = @"14.3", + .expectedVersion = @"14.2", + .versionsMatch = NO, + }, + { + .actualVersion = @"14.1", + .expectedVersion = @"14.2", + .versionsMatch = NO, + }, + { + .actualVersion = @"14", + .expectedVersion = @"14.0", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.0", + .expectedVersion = @"14", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.0.0", + .expectedVersion = @"14.0", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.0", + .expectedVersion = @"14.0.0", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.0.0.0", + .expectedVersion = @"14", + .versionsMatch = YES, + }, + { + .actualVersion = @"14", + .expectedVersion = @"14.0.0.0", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.1.0", + .expectedVersion = @"14.1", + .versionsMatch = YES, + }, + { + .actualVersion = @"14.1", + .expectedVersion = @"14.1.0", + .versionsMatch = YES, + }, + }; + + for (NSUInteger i = 0; i < sizeof(checks) / sizeof(*checks); ++i) { + version_check check = checks[i]; + if (checkVersionsMatch(check.actualVersion, check.expectedVersion) != + check.versionsMatch) { + fprintf( + stderr, + "Version checked failed, version: `%s`, expected: `%s`, expected match result: %d...\n", + check.actualVersion.UTF8String, + check.expectedVersion.UTF8String, + check.versionsMatch); + return 1; + } + } + + const int numberOfArgs = argc - 1; + if (numberOfArgs < 1) { + fprintf(stderr, "Expected path to Info.plist as first argument...\n"); + return 1; + } + + NSString* versionPlistPath = [[NSString alloc] initWithUTF8String:argv[1]]; + plist_check plist_checks[] = { + { + .plistPath = versionPlistPath, + .plistFunction = checkVersionPlistShortVersionMatch, + .expectedValue = @"14.2", + .versionsMatch = YES, + }, + { + .plistPath = versionPlistPath, + .plistFunction = checkVersionPlistShortVersionMatch, + .expectedValue = @"14.3", + .versionsMatch = NO, + }, + { + .plistPath = versionPlistPath, + .plistFunction = checkVersionPlistProductBuildMatch, + .expectedValue = @"14C18", + .versionsMatch = YES, + }, + { + .plistPath = versionPlistPath, + .plistFunction = checkVersionPlistProductBuildMatch, + .expectedValue = @"14B5033e", + .versionsMatch = NO, + }, + }; + + for (NSUInteger i = 0; i < sizeof(plist_checks) / sizeof(*plist_checks); + ++i) { + plist_check check = plist_checks[i]; + if (check.plistFunction( + check.plistPath, + check.expectedValue, + /* logComparisonFailure = */ NO) != check.versionsMatch) { + fprintf( + stderr, + "Version check failed, plist path: `%s`, expected value: `%s`, expected match result: %d...\n", + check.plistPath.UTF8String, + check.expectedValue.UTF8String, + check.versionsMatch); + return 1; + } + } + + build_check build_checks[] = { + { + .actualBuild = @"14e222b", + .expectedBuild = @"14E222b", + .versionsMatch = YES, + }, + { + .actualBuild = @"14e300b'", + .expectedBuild = @"14e222b", + .versionsMatch = NO, + }, + }; + + for (NSUInteger i = 0; i < sizeof(build_checks) / sizeof(*build_checks); + ++i) { + build_check check = build_checks[i]; + if (checkProductBuildVersionMatch( + check.actualBuild, check.expectedBuild) != check.versionsMatch) { + fprintf( + stderr, + "Build check failed, build: `%s`, expected: `%s`, expected match result: %d...\n", + check.actualBuild.UTF8String, + check.expectedBuild.UTF8String, + check.versionsMatch); + return 1; + } + } + + return 0; + } +} diff --git a/prelude/toolchains/apple/xcode_version_checker/test/Xcode_14.2.0_14C18_fb_version.plist b/prelude/toolchains/apple/xcode_version_checker/test/Xcode_14.2.0_14C18_fb_version.plist new file mode 100644 index 0000000000000..f6c1fa7a339fb --- /dev/null +++ b/prelude/toolchains/apple/xcode_version_checker/test/Xcode_14.2.0_14C18_fb_version.plist @@ -0,0 +1,18 @@ + + + + + BuildVersion + 49 + CFBundleShortVersionString + 14.2 + CFBundleVersion + 21534 + ProductBuildVersion + 14C18 + ProjectName + IDEFrameworks + SourceVersion + 21534000000000000 + + diff --git a/prelude/toolchains/apple/xcode_version_checker/xcode_version_checker b/prelude/toolchains/apple/xcode_version_checker/xcode_version_checker new file mode 100755 index 0000000000000..c02dc8a9817c9 Binary files /dev/null and b/prelude/toolchains/apple/xcode_version_checker/xcode_version_checker differ diff --git a/prelude/toolchains/conan/BUCK b/prelude/toolchains/conan/BUCK new file mode 100644 index 0000000000000..0edb38d5db9be --- /dev/null +++ b/prelude/toolchains/conan/BUCK @@ -0,0 +1,63 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +export_file( + name = "buckler", + src = "buckler/conanfile.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_library( + name = "conan_common", + srcs = ["conan_common.py"], +) + +python_bootstrap_binary( + name = "conan_generate", + main = "conan_generate.py", + visibility = ["PUBLIC"], + deps = [":conan_common"], +) + +python_bootstrap_binary( + name = "conan_init", + main = "conan_init.py", + visibility = ["PUBLIC"], + deps = [":conan_common"], +) + +python_bootstrap_binary( + name = "conan_lock", + main = "conan_lock.py", + visibility = ["PUBLIC"], + deps = [":conan_common"], +) + +python_bootstrap_binary( + name = "conan_package", + main = "conan_package.py", + visibility = ["PUBLIC"], + deps = [":conan_common"], +) + +python_bootstrap_binary( + name = "conan_package_extract", + main = "conan_package_extract.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_binary( + name = "conan_update", + main = "conan_update.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_binary( + name = "lock_generate", + main = "lock_generate.py", + visibility = ["PUBLIC"], + deps = [":conan_common"], +) diff --git a/prelude/toolchains/conan/buckler/conanfile.py b/prelude/toolchains/conan/buckler/conanfile.py new file mode 100644 index 0000000000000..d82c964f91955 --- /dev/null +++ b/prelude/toolchains/conan/buckler/conanfile.py @@ -0,0 +1,251 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import collections +import os +import re + +from conans import ConanFile +from conans.model import Generator + + +def _rel_to_root(rootpath, path): + """Make the given path relative to the given root path.""" + return os.path.relpath(path, rootpath) + + +def _map_rel_to_root(rootpath, paths): + """Make all given paths relative to the given root path.""" + return [_rel_to_root(rootpath, p) for p in paths] + + +_LibraryFiles = collections.namedtuple("_LibraryFiles", ["static", "shared"]) + + +def _find_libs(rootpath, lib_paths, lib_names): + """Collect static and shared library files for the given library names. + + Searches for library files under the given library search paths. + Retains order as defined in the given library names in case it matters for + linking order. + """ + result = collections.OrderedDict( + (name, _LibraryFiles([], [])) for name in lib_names + ) + + name_pattern = "(?P{})".format("|".join(lib_names)) + ext_pattern = ( + "(?:(?Pa|lib)|(?Pso(?:\\.\\d+(?:\\.\\d+)?)?|dylib|dll))" + ) + regex = re.compile("lib{}.{}".format(name_pattern, ext_pattern)) + + files = ( + os.path.join(libdir, filepath) + for libdir in lib_paths + for filepath in os.listdir(os.path.join(rootpath, libdir)) + if os.path.isfile(os.path.join(rootpath, libdir, filepath)) + ) + + for filepath in files: + m = regex.match(os.path.basename(filepath)) + if m: + name = m.group("name") + # TODO[AH] Can we distinguish static and static-pic libs? + if m.group("static"): + result[name].static.append(filepath) + elif m.group("shared"): + result[name].shared.append(filepath) + + return result + + +class _Requirement(collections.namedtuple("_Requirement", ["package", "component"])): + """Represents a Conan requirement. + + Requirements can be + * a relative reference to a component of the current package - `package` None, `component` set, + * an absolute reference to a package - `package` set, `component` None, or + * an absolute reference to another package's component - `package` set, `component` set. + """ + + @classmethod + def parse(cls, requirement): + """Parse a Conan requirement. + + These take the form + * `somecomponent` for a relative component reference, + * `somepackage::somepackage` for an absolute package reference, or + * `somepackage::somecomponent` for an absolute component reference. + """ + if "::" in requirement: + package, component = requirement.split("::", 1) + if package == component: + return cls(package, None) + else: + return cls(package, component) + else: + return cls(None, requirement) + + def to_name(self, current_package): + """Generate the Buck2 target name for a requirement.""" + package = self.package or current_package + component = self.component or package + return "_component_{}_{}".format(package, component) + + def to_label(self, current_package): + """Generate the Buck2 label for a requirement. + + Relative requirements use the given current package's name to refer directly to the component target. + Absolute requirements refer to the package target or sub-target. + """ + if self.package and self.component: + return ":{}[{}]".format(self.package, self.component) + elif self.package: + return ":{}".format(self.package) + else: + return ":{}".format(self.to_name(current_package)) + + +class _BucklerDepCppComponent(object): + """A Conan package component or the package itself if it has no components. + + You can learn more about Conan package components [here][conan-components]. + + [conan-components]: https://docs.conan.io/en/1.53/creating_packages/package_information.html#using-components + """ + + def __init__(self, package_name, component_info): + self.package_name = package_name + self.component_name = component_info.name + self.rootpath = component_info.rootpath + rootpath = self.rootpath + + self.defines = component_info.defines + self.cflags = component_info.cflags + self.cppflags = component_info.cppflags + + self.include_paths = _map_rel_to_root(rootpath, component_info.include_paths) + + lib_paths = _map_rel_to_root(rootpath, component_info.lib_paths) + self.libs = _find_libs(rootpath, lib_paths, component_info.libs) + self.system_libs = component_info.system_libs + + self.requires = [_Requirement.parse(req) for req in component_info.requires] + + def generate(self): + """Generate Buck2 target definitions for the component.""" + name = _Requirement(None, self.component_name).to_name(self.package_name) + deps = [req.to_label(self.package_name) for req in self.requires] + return """\ + +conan_component( + name = {name!r}, + defines = {defines!r}, + cflags = {cflags!r}, + cppflags = {cppflags!r}, + include_paths = {include_paths!r}, + libs = {libs!r}, + static_libs = {static_libs!r}, + shared_libs = {shared_libs!r}, + system_libs = {system_libs!r}, + deps = {deps!r}, + package = {package!r}, +) +""".format( + name=name, + defines=self.defines, + cflags=self.cflags, + cppflags=self.cppflags, + include_paths=self.include_paths, + libs=list(self.libs.keys()), + static_libs={ + name: sorted(libs.static) + for name, libs in self.libs.items() + if libs.static + }, + shared_libs={ + name: sorted(libs.shared) + for name, libs in self.libs.items() + if libs.shared + }, + system_libs=self.system_libs, + deps=deps, + package=":_package_" + self.package_name, + ) + + +class _BucklerDepCpp(object): + """A Conan package.""" + + def __init__(self, dep_name, dep_cpp_info, public=False): + self.name = dep_name + self.public = public + self.rootpath = dep_cpp_info.rootpath + if dep_cpp_info.components: + self.components = collections.OrderedDict( + (name, _BucklerDepCppComponent(dep_name, component_info)) + for (name, component_info) in dep_cpp_info.components.items() + ) + else: + self.components = collections.OrderedDict( + [(dep_name, _BucklerDepCppComponent(dep_name, dep_cpp_info))] + ) + + def generate(self): + """Generate Buck2 target definitions for the package and its components.""" + result = """\ + +conan_dep( + name = {name!r}, + components = {components!r}, + visibility = {visibility!r}, +) +""".format( + name=self.name, + components={ + name: _Requirement(None, name).to_label(self.name) + for name in self.components.keys() + }, + visibility=["PUBLIC"] if self.public else [], + ) + + for component in self.components.values(): + result += component.generate() + + return result + + +class BucklerGenerator(Generator): + @property + def filename(self): + return "conan-imports.bzl" + + @property + def content(self): + result = "" + + for dep_name, dep_cpp_info in self.deps_build_info.dependencies: + direct_dep = dep_name in self.conanfile.requires + buckler_dep = _BucklerDepCpp(dep_name, dep_cpp_info, public=direct_dep) + result += buckler_dep.generate() + + return result + + +class Buckler(ConanFile): + name = "buckler" + version = "0.1" + description = """\ +Buckler - Conan extension for Buck2 + +This package provides a +- [Generator][generator] to import Conan built packages into Buck2. + +[generator]: https://docs.conan.io/en/latest/reference/generators.html#generators-reference +""" + url = "https://github.com/facebookincubator/buck2" + license = "Apache-2.0" diff --git a/prelude/toolchains/conan/conan_common.py b/prelude/toolchains/conan/conan_common.py new file mode 100644 index 0000000000000..7f324df0cd996 --- /dev/null +++ b/prelude/toolchains/conan/conan_common.py @@ -0,0 +1,184 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import shutil +import subprocess + + +def _none(s): + if not s or s == "_": + return None + else: + return s + + +def parse_reference(ref): + """Parse a Conan package reference. + + These take the shape `name/version@channel/name#revision`. + Omitted values or `_` are read as `None`. + """ + name = None + version = None + user = None + channel = None + revision = None + + if "#" in ref: + ref, revision = ref.split("#", 1) + + if "@" in ref: + ref, user_channel = ref.split("@", 1) + if "/" in user_channel: + user, channel = user_channel.split("/", 1) + else: + user = user_channel + + if "/" in ref: + name, version = ref.split("/", 1) + else: + name = ref + + return _none(name), _none(version), _none(user), _none(channel), _none(revision) + + +CONAN_DIR = ".conan" +GENERATORS_DIR = "generators" +STORE_DIR = "data" +PACKAGE_DIR = "package" + + +def conan_dir(user_home): + """Conan folder under the Conen user home.""" + return os.path.join(user_home, CONAN_DIR) + + +def generators_dir(user_home): + """Custom generators folder under the Conen user home.""" + return os.path.join(conan_dir(user_home), "generators") + + +def store_dir(user_home): + """Store folder under the Conen user home.""" + return os.path.join(conan_dir(user_home), "data") + + +def reference_subtree(name, version, user, channel): + """Package base directory subtree under the Conan store folder.""" + return os.path.join(name or "_", version or "_", user or "_", channel or "_") + + +def package_subtree(package_id): + """Package directory subtree under the package base directory.""" + return os.path.join(PACKAGE_DIR, package_id) + + +def reference_dir(user_home, name, version, user, channel): + """Package base directory under the Conan store folder.""" + return os.path.join( + store_dir(user_home), reference_subtree(name, version, user, channel) + ) + + +def package_dir(user_home, name, version, user, channel, package_id): + """Package directory under the Conan store folder.""" + return os.path.join( + reference_dir(user_home, name, version, user, channel), + package_subtree(package_id), + ) + + +def _copytree(src, dst): + """Recursively copy the source directory tree to the destination. + + Copies symbolic links and ignores dangling symbolic links. + """ + shutil.copytree(src, dst, symlinks=True, ignore_dangling_symlinks=True) + + +def install_user_home(user_home, base_user_home): + """Copy the given base user-home to the current user-home.""" + src = base_user_home + dst = user_home + _copytree(src, dst) + + +def install_generator(user_home, generator_file): + """Copy the given custom generator into the generators path. + + Note, this will overwrite any pre-existing generators. + """ + src = generator_file + dstdir = generators_dir(user_home) + dst = os.path.join(dstdir, "conanfile.py") + os.makedirs(dstdir, exist_ok=True) + shutil.copyfile(src, dst) + + +def install_reference(user_home, reference, path): + """Copy the cache directory of a given package reference into the store.""" + name, version, user, channel, _ = parse_reference(reference) + src = path + dst = reference_dir(user_home, name, version, user, channel) + _copytree(src, dst) + + +def extract_reference(user_home, reference, output): + """Copy the cache directory of the given package reference out of the store.""" + name, version, user, channel, _ = parse_reference(reference) + src = reference_dir(user_home, name, version, user, channel) + dst = output + _copytree(src, dst) + + +def extract_package(user_home, reference, package_id, output): + """Copy the package directory of the given package out of the store.""" + name, version, user, channel, _ = parse_reference(reference) + src = package_dir(user_home, name, version, user, channel, package_id) + dst = output + _copytree(src, dst) + + +def conan_env(user_home=None, trace_log=None): + """Generate environment variables needed to invoke Conan.""" + env = dict(os.environ) + + if user_home is not None: + # Set the Conan base directory. + env["CONAN_USER_HOME"] = os.path.abspath(user_home) + + if trace_log is not None: + # Enable Conan debug trace. + env["CONAN_TRACE_FILE"] = os.path.abspath(trace_log) + + # TODO[AH] Enable Conan revisions for reproducibility + # Enable Conan revisions for reproducibility + # env["CONAN_REVISIONS_ENABLED"] = "1" + + # Prevent over-allocation. + # TODO[AH] Support parallelized package builds and set an appropriate action + # weight using the `weight` parameter to `ctx.actions.run`. + # Note that not all Conan packages respect the `CONAN_CPU_COUNT` setting. + env["CONAN_CPU_COUNT"] = "1" + + # Prevent interactive prompts. + env["CONAN_NON_INTERACTIVE"] = "1" + + # Print every `self.run` invocation. + # TODO[AH] Remove this debug output. + env["CONAN_PRINT_RUN_COMMANDS"] = "1" + + # Disable the short paths feature on Windows. + # TODO[AH] Enable if needed with a hermetic short path. + env["CONAN_USER_HOME_SHORT"] = "None" + + return env + + +def run_conan(conan, *args, env=None): + return subprocess.check_call([conan] + list(args), env=env or {}) diff --git a/prelude/toolchains/conan/conan_generate.py b/prelude/toolchains/conan/conan_generate.py new file mode 100644 index 0000000000000..830ecf6eed8cd --- /dev/null +++ b/prelude/toolchains/conan/conan_generate.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import os +import shutil + +import conan_common + + +def conan_install( + conan, + conanfile, + lockfile, + install_folder, + output_folder, + user_home, + manifests, + install_info, + trace_log, +): + env = conan_common.conan_env(user_home=user_home, trace_log=trace_log) + + args = ["install"] + args.extend(["--build", "missing"]) + args.extend(["--generator", "BucklerGenerator"]) + args.extend(["--lockfile", lockfile]) + args.extend(["--install-folder", install_folder]) + args.extend(["--output-folder", output_folder]) + args.extend(["--manifests", manifests]) + args.extend(["--json", install_info]) + args.append(conanfile) + + conan_common.run_conan(conan, *args, env=env) + + +def extract_generated(install_folder, targets_out): + src = os.path.join(install_folder, "conan-imports.bzl") + dst = targets_out + shutil.copy(src, dst) + + +def main(): + parser = argparse.ArgumentParser( + prog="conan_generate", + description="Generate Buck2 imports of Conan built packages.", + ) + parser.add_argument( + "--conan", + metavar="FILE", + type=str, + required=True, + help="Path to the Conan executable.", + ) + parser.add_argument( + "--conan-init", + metavar="PATH", + type=str, + required=True, + help="Path to the base Conan user-home.", + ) + parser.add_argument( + "--buckler", + metavar="FILE", + type=str, + required=True, + help="Path to the Buckler generator.", + ) + parser.add_argument( + "--install-folder", + metavar="PATH", + type=str, + required=True, + help="Path to install directory to place generator files into.", + ) + parser.add_argument( + "--output-folder", + metavar="PATH", + type=str, + required=True, + help="Path to the root output folder for generated and built files.", + ) + parser.add_argument( + "--user-home", + metavar="PATH", + type=str, + required=True, + help="Path to the Conan base directory.", + ) + parser.add_argument( + "--manifests", + metavar="PATH", + type=str, + required=True, + help="Write dependency manifests into this directory.", + ) + parser.add_argument( + "--install-info", + metavar="PATH", + type=str, + required=True, + help="Write install information JSON file to this location.", + ) + parser.add_argument( + "--trace-file", + metavar="PATH", + type=str, + required=True, + help="Write Conan trace log to this file.", + ) + parser.add_argument( + "--conanfile", + metavar="FILE", + type=str, + required=True, + help="Path to the Conanfile.", + ) + parser.add_argument( + "--lockfile", + metavar="FILE", + type=str, + required=True, + help="Path to the Conan lock-file.", + ) + parser.add_argument( + "--targets-out", + metavar="PATH", + type=str, + required=True, + help="Write the generated targets to this file.", + ) + args = parser.parse_args() + + conan_common.install_user_home(args.user_home, args.conan_init) + conan_common.install_generator(args.user_home, args.buckler) + + os.mkdir(args.install_folder) + os.mkdir(args.output_folder) + os.mkdir(args.manifests) + + conan_install( + args.conan, + args.conanfile, + args.lockfile, + args.install_folder, + args.output_folder, + args.user_home, + args.manifests, + args.install_info, + args.trace_file, + ) + extract_generated(args.install_folder, args.targets_out) + + +if __name__ == "__main__": + main() diff --git a/prelude/toolchains/conan/conan_init.py b/prelude/toolchains/conan/conan_init.py new file mode 100644 index 0000000000000..fb42852b3a7c1 --- /dev/null +++ b/prelude/toolchains/conan/conan_init.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse + +import conan_common + + +def conan_profile(conan, user_home, trace_log): + env = conan_common.conan_env(user_home=user_home, trace_log=trace_log) + + # TODO[AH] Allow users to define additional remotes. + remotes = [ + ("conancenter", "https://center.conan.io"), + ] + + for name, url in remotes: + conan_common.run_conan(conan, "remote", "add", "-f", name, url, env=env) + + +def main(): + parser = argparse.ArgumentParser( + prog="conan_init", description="Initialise a Conan home directory." + ) + parser.add_argument( + "--conan", + metavar="FILE", + type=str, + required=True, + help="Path to the Conan executable.", + ) + parser.add_argument( + "--user-home", + metavar="PATH", + type=str, + required=True, + help="Path to the Conan base directory.", + ) + parser.add_argument( + "--trace-file", + metavar="PATH", + type=str, + required=True, + help="Write Conan trace log to this file.", + ) + args = parser.parse_args() + + conan_profile(args.conan, args.user_home, args.trace_file) + + +if __name__ == "__main__": + main() diff --git a/prelude/toolchains/conan/conan_lock.py b/prelude/toolchains/conan/conan_lock.py new file mode 100644 index 0000000000000..6e86a90debc56 --- /dev/null +++ b/prelude/toolchains/conan/conan_lock.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse + +import conan_common + + +def conan_lock(conan, profile, conanfile, lockfile_out, lockfile, user_home, trace_log): + env = conan_common.conan_env(user_home=user_home, trace_log=trace_log) + + args = ["lock", "create"] + args.extend(["--profile", profile]) + args.extend(["--build", "missing"]) + if lockfile: + args.extend(["--lockfile", lockfile]) + args.extend(["--lockfile-out", lockfile_out]) + args.append(conanfile) + + conan_common.run_conan(conan, *args, env=env) + + +def main(): + parser = argparse.ArgumentParser( + prog="conan_lock", description="Generate a Conan lock-file." + ) + parser.add_argument( + "--conan", + metavar="FILE", + type=str, + required=True, + help="Path to the Conan executable.", + ) + parser.add_argument( + "--conan-init", + metavar="PATH", + type=str, + required=True, + help="Path to the base Conan user-home.", + ) + parser.add_argument( + "--profile", + metavar="FILE", + type=str, + required=True, + help="Path to the Conan profile.", + ) + parser.add_argument( + "--user-home", + metavar="PATH", + type=str, + required=True, + help="Path to the Conan base directory.", + ) + parser.add_argument( + "--trace-file", + metavar="PATH", + type=str, + required=True, + help="Write Conan trace log to this file.", + ) + parser.add_argument( + "--conanfile", + metavar="FILE", + type=str, + required=True, + help="Path to the Conanfile.", + ) + parser.add_argument( + "--lockfile-out", + metavar="FILE", + type=str, + required=True, + help="Path to the lock-file to generate.", + ) + parser.add_argument( + "--lockfile", + metavar="FILE", + type=str, + required=False, + help="Path to an existing Conan lock-file to base resolution on.", + ) + args = parser.parse_args() + + conan_common.install_user_home(args.user_home, args.conan_init) + + conan_lock( + args.conan, + args.profile, + args.conanfile, + args.lockfile_out, + args.lockfile, + args.user_home, + args.trace_file, + ) + + +if __name__ == "__main__": + main() diff --git a/prelude/toolchains/conan/conan_package.py b/prelude/toolchains/conan/conan_package.py new file mode 100644 index 0000000000000..b64a29fc5ac28 --- /dev/null +++ b/prelude/toolchains/conan/conan_package.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import json +import os + +import conan_common + + +def conan_install( + conan, + reference, + lockfile, + options, + install_folder, + output_folder, + user_home, + manifests, + install_info, + trace_log, +): + env = conan_common.conan_env(user_home=user_home, trace_log=trace_log) + + args = ["install"] + args.extend(["--build", "missing"]) + args.extend(["--lockfile", lockfile]) + args.extend(["--install-folder", install_folder]) + args.extend(["--output-folder", output_folder]) + args.extend(["--manifests", manifests]) + args.extend(["--json", install_info]) + args.append(reference.split("#")[0] + "@") + + conan_common.run_conan(conan, *args, env=env) + + +def verify_build_and_cached_deps(install_info, package, deps): + """Verify that the package was built and dependencies were cached.""" + with open(install_info, "r") as f: + info = json.load(f) + package_parsed = conan_common.parse_reference(package) + deps_parsed = {conan_common.parse_reference(dep) for dep in deps} + for installed in info["installed"]: + recipe_id = installed["recipe"]["id"] + ref = conan_common.parse_reference(recipe_id) + is_package = ref == package_parsed + is_dep = ref in deps_parsed + + if not is_package and not is_dep: + raise RuntimeError( + "Unexpected installed package found: {}".format(recipe_id) + ) + + recipe_downloaded = installed["recipe"]["downloaded"] + if is_package and not recipe_downloaded: + raise RuntimeError("Cached package to build detected: {}".format(recipe_id)) + elif is_dep and recipe_downloaded: + raise RuntimeError("Downloaded dependency detected: {}".format(recipe_id)) + + for package in installed["packages"]: + package_id = package["id"] + package_downloaded = package["downloaded"] + package_built = package["built"] + if is_package and not (package_downloaded or package_built): + raise RuntimeError( + "Cached package to build detected: {}-{}".format( + recipe_id, package_id + ) + ) + elif is_dep and (recipe_downloaded or package_built): + raise RuntimeError( + "Downloaded or built dependency detected: {}-{}".format( + recipe_id, package_id + ) + ) + + +def main(): + parser = argparse.ArgumentParser( + prog="conan_package", description="Build a Conan package." + ) + parser.add_argument( + "--conan", + metavar="FILE", + type=str, + required=True, + help="Path to the Conan executable.", + ) + parser.add_argument( + "--conan-init", + metavar="PATH", + type=str, + required=True, + help="Path to the base Conan user-home.", + ) + parser.add_argument( + "--lockfile", + metavar="FILE", + type=str, + required=True, + help="Path to the Conan lockfile.", + ) + parser.add_argument( + "--reference", + metavar="STRING", + type=str, + required=True, + help="Reference of the Conan package to build.", + ) + parser.add_argument( + "--package-id", + metavar="STRING", + type=str, + required=True, + help="Package ID of the Conan package to build.", + ) + parser.add_argument( + "--option", + metavar="STRING", + type=str, + required=False, + action="append", + help="Conan options for the package to build.", + ) + parser.add_argument( + "--install-folder", + metavar="PATH", + type=str, + required=True, + help="Path to install directory to place generator files into.", + ) + parser.add_argument( + "--output-folder", + metavar="PATH", + type=str, + required=True, + help="Path to the root output folder for generated and built files.", + ) + parser.add_argument( + "--user-home", + metavar="PATH", + type=str, + required=True, + help="Path to the Conan base directory used for Conan's cache.", + ) + parser.add_argument( + "--manifests", + metavar="PATH", + type=str, + required=True, + help="Write dependency manifests into this directory.", + ) + parser.add_argument( + "--install-info", + metavar="PATH", + type=str, + required=True, + help="Write install information JSON file to this location.", + ) + parser.add_argument( + "--trace-file", + metavar="PATH", + type=str, + required=True, + help="Write Conan trace log to this file.", + ) + parser.add_argument( + "--cache-out", + metavar="PATH", + type=str, + required=True, + help="Copy the package's cache directory to this path.", + ) + parser.add_argument( + "--package-out", + metavar="PATH", + type=str, + required=True, + help="Copy the package directory to this path.", + ) + parser.add_argument( + "--dep-reference", + metavar="STRING", + type=str, + required=False, + action="append", + default=[], + help="Conan package dependency reference. All --dep-* arguments must align.", + ) + parser.add_argument( + "--dep-cache-out", + metavar="PATH", + type=str, + required=False, + action="append", + default=[], + help="Conan package dependency cache output directory. All --dep-* arguments must align.", + ) + # TODO[AH] Remove unused `--manifests` and `--verify` flags and outputs. + # TODO[AH] Should we enable the `--no-imports` flag? + # TODO[AH] Handle packages that are build requirements and set + # `--build-require` in that case. + args = parser.parse_args() + + conan_common.install_user_home(args.user_home, args.conan_init) + assert len(args.dep_reference) == len( + args.dep_cache_out + ), "Mismatching dependency arguments." + for ref, cache_out in zip(args.dep_reference, args.dep_cache_out): + conan_common.install_reference(args.user_home, ref, cache_out) + + os.mkdir(args.install_folder) + os.mkdir(args.output_folder) + os.mkdir(args.manifests) + + conan = args.conan + conan_install( + conan, + args.reference, + args.lockfile, + args.option, + args.install_folder, + args.output_folder, + args.user_home, + args.manifests, + args.install_info, + args.trace_file, + ) + verify_build_and_cached_deps(args.install_info, args.reference, args.dep_reference) + conan_common.extract_reference(args.user_home, args.reference, args.cache_out) + conan_common.extract_package( + args.user_home, args.reference, args.package_id, args.package_out + ) + + +if __name__ == "__main__": + main() diff --git a/prelude/toolchains/conan/conan_package_extract.py b/prelude/toolchains/conan/conan_package_extract.py new file mode 100644 index 0000000000000..51615202021c5 --- /dev/null +++ b/prelude/toolchains/conan/conan_package_extract.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import os +import shutil + + +def extract_file(package, src, dst): + os.makedirs(os.path.dirname(dst), exist_ok=True) + shutil.copyfile(os.path.join(package, src), dst) + + +def extract_directory(package, src, dst): + os.makedirs(os.path.dirname(dst), exist_ok=True) + shutil.copytree(os.path.join(package, src), dst) + + +def main(): + parser = argparse.ArgumentParser( + prog="conan_package_extract", + description="Extract outputs from a Conan package.", + ) + parser.add_argument( + "--package", + metavar="PATH", + type=str, + required=True, + help="Path to the package output directory.", + ) + parser.add_argument( + "--file-from", + metavar="PATH", + type=str, + required=False, + action="append", + default=[], + help="File to extract. All --file-* arguments must align.", + ) + parser.add_argument( + "--file-to", + metavar="PATH", + type=str, + required=False, + action="append", + default=[], + help="Destination to extract the file to. All --file-* arguments must align.", + ) + parser.add_argument( + "--directory-from", + metavar="PATH", + type=str, + required=False, + action="append", + default=[], + help="Directory to extract. All --directory-* arguments must align.", + ) + parser.add_argument( + "--directory-to", + metavar="PATH", + type=str, + required=False, + action="append", + default=[], + help="Destination to extract the directory to. All --directory-* arguments must align.", + ) + args = parser.parse_args() + + assert len(args.file_from) == len(args.file_to), "Mismatching file arguments." + assert len(args.directory_from) == len( + args.directory_to + ), "Mismatching directory arguments." + for src, dst in zip(args.file_from, args.file_to): + extract_file(args.package, src, dst) + for src, dst in zip(args.directory_from, args.directory_to): + extract_directory(args.package, src, dst) + + +if __name__ == "__main__": + main() diff --git a/prelude/toolchains/conan/conan_update.py b/prelude/toolchains/conan/conan_update.py new file mode 100644 index 0000000000000..f52f75beded03 --- /dev/null +++ b/prelude/toolchains/conan/conan_update.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import os +import shutil +import subprocess + + +def find_root(): + """Find the repository root using `buck2 root`.""" + # TODO[AH] This assumes that buck2 is in PATH when executing the script via `buck2 run`. + # Consider making the name/path `buck2` configurable via an environment variable. + return subprocess.check_output(["buck2", "root"], text=True).strip() + + +def write_lockfile(lockfile, lockfile_out): + os.makedirs(os.path.dirname(lockfile_out), exist_ok=True) + shutil.copy(lockfile, lockfile_out) + + +def write_targets(update_label, lock_generate, conan_generate, targets_out): + header = """\ +# {at}generated +# Update using `buck2 run {update_label}` + +load( + "@prelude//toolchains/conan:defs.bzl", + "conan_component", + "conan_dep", + "conan_package", +) +""".format( + at="@", update_label=update_label + ) + os.makedirs(os.path.dirname(targets_out), exist_ok=True) + with open(targets_out, "w") as outf: + outf.write(header) + with open(lock_generate, "r") as inf: + for x in inf: + outf.write(x) + with open(conan_generate, "r") as inf: + for x in inf: + outf.write(x) + + +def main(): + parser = argparse.ArgumentParser( + prog="conan_update", + description="Update the Conan lock-file and the Buck2 package imports.", + ) + parser.add_argument( + "--update-label", + metavar="LABEL", + type=str, + required=True, + help="The label to the target to run this program.", + ) + parser.add_argument( + "--lockfile", + metavar="FILE", + type=str, + required=True, + help="Path to the lockfile to copy to the repository.", + ) + parser.add_argument( + "--lock-targets", + metavar="FILE", + type=str, + required=True, + help="Path to the targets file generated from the lock file.", + ) + parser.add_argument( + "--conan-targets", + metavar="FILE", + type=str, + required=True, + help="Path to the targets file generated by Buckler.", + ) + parser.add_argument( + "--conanfile", + metavar="FILE", + type=str, + required=True, + help="Path to the Conanfile.", + ) + parser.add_argument( + "--lockfile-out", + metavar="FILE", + type=str, + required=True, + help="Name of the Conan lock-file to generate, relative to the Conanfile.", + ) + parser.add_argument( + "--targets-out", + metavar="FILE", + type=str, + required=True, + help="Name of the Starlark file to generate, relative to the Conanfile.", + ) + args = parser.parse_args() + + root = find_root() + conanfile = os.path.join(root, args.conanfile) + package = os.path.dirname(conanfile) + + lockfile_out = os.path.join(package, args.lockfile_out) + targets_out = os.path.join(package, args.targets_out) + + write_lockfile(args.lockfile, lockfile_out) + write_targets(args.update_label, args.lock_targets, args.conan_targets, targets_out) + + +if __name__ == "__main__": + main() diff --git a/prelude/toolchains/conan/defs.bzl b/prelude/toolchains/conan/defs.bzl new file mode 100644 index 0000000000000..8f65db3dface2 --- /dev/null +++ b/prelude/toolchains/conan/defs.bzl @@ -0,0 +1,835 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +"""Conan C/C++ Package Manager Toolchain. + +Provides a toolchain and rules to use the [Conan package manager][conan] to +manage and install third-party C/C++ dependencies. Only works with Conan 1.61.1 +(not Conan 2). + +[conan]: https://docs.conan.io/en/latest/introduction.html + +## Usage + +### Toolchain + +First you must define a Conan toolchain, profile, and user-home in the +top-level package of the `toolchains` cell, i.e. `toolchains//:`. For example: + +``` +load("@prelude//toolchains/conan:defs.bzl", "conan_init", "conan_profile", "system_conan_toolchain") + +system_conan_toolchain( + name = "conan", + conan_path = "conan", + visibility = ["PUBLIC"], +) + +conan_profile( + name = "conan-profile", + arch = "x86_64", + os = "Linux" , + build_type = "Release", + compiler = "gcc", + compiler_version = "11.3", + compiler_libcxx = "libstdc++", +) + +conan_init( + name = "conan-init", + profile = ":conan-profile", + visibility = ["PUBLIC"], +) +``` + +### Packages + +Then you must define your project dependencies in a `conanfile.txt`. E.g. + +``` +[requires] +zlib/1.2.13 +``` + +Then you must define targets to generate and update the Conan integration +targets. E.g. + +``` +load( + "@prelude//toolchains/conan:defs.bzl", + "conan_generate", + "conan_lock", + "conan_update", + "lock_generate", +) + +conan_lock( + name = "lock", + conanfile = "conanfile.txt", + visibility = ["//cpp/conan/import:"], +) + +lock_generate( + name = "lock-generate", + lockfile = ":lock", +) + +conan_generate( + name = "conan-generate", + conanfile = "conanfile.txt", + lockfile = ":lock", +) + +conan_update( + name = "update", + lockfile = ":lock", + lock_generate = ":lock-generate", + conan_generate = ":conan-generate", + conanfile = "conanfile.txt", + lockfile_name = "conan.lock", + targets_name = "conan/BUCK", +) +``` + +On first use, or whenever you change a Conan dependency or the toolchain +configuration you must regenerate the import targets. For example: + +``` +$ buck2 run //:update +``` + +Then you can depend on Conan provided packages defined in the generated file, +configured with the `targets_name` attribute to `conan_update`. For example: + +``` +cxx_binary( + name = "main", + srcs = ["main.cpp"], + deps = ["//conan:zlib"], +) +``` + +Note, only packages that are declared as direct dependencies in the +`conanfile.txt` will have public visibility. If you wish to depend on a package +that was a transitive dependency and is currently private, then you must first +add it to the `conanfile.txt` and update the import targets. + +### Example + +See `examples/prelude/cpp/conan` in the Buck2 source repository for a full +working example. + +## Motivation + +Buck2 has the ability to build C/C++ libraries natively. However, some C/C++ +projects have complex build systems and are difficult to migrate to a native +Buck2 build. Other programming languages often have established standard +package managers and such dependencies can be imported into a Buck2 project +with the help of that package manager. This module provides such an integration +for C/C++ with the help of the Conan package manager. + +Conan offers a relatively large [community package set][conan-center] and is +compatible with Linux, MacOS, and Windows. It also allows for sufficient +control to support an integration into Buck2, supports toolchain configuration +and cross-compilation, and provides a Python extension API. + +[conan-center]: https://conan.io/center/ + +## Design Goals + +The Buck2 integration of Conan should fulfill the following design goals: + +* The overall build should be controlled by Buck2: + + Which packages are built at which point, which compiler toolchain and + configuration is used, where build artifacts are stored, and where + dependencies are looked up. + + This enables the use of Buck2's own incremental build and caching + functionality. It also enables cross-platform and cross-compilation with + the help of Buck2's platforms and toolchains. + +* Conan should provide transitive dependencies: + + The user should only have to declare the projects direct third-party C/C++ + dependencies. The transitive dependency graph, package versions, package + downloads, and their build definitions - all these should be provided by + Conan. + +## Integration + +Conan provides a number of control and integration points that are relevant to +the Buck2 integration: + +* Conanfile + + A file `conanfile.txt` defines the direct dependencies of the project. This + file is provided by the user, and used by the integration and Conan. + +* Lockfile + + Conan generates a lockfile which contains the set of transitive + dependencies, their precise versions, and their inter-dependencies. The + integration parses this file to generate build targets for individual Conan + packages to build in dependency order. + +* Command-Line + + Conan's command-line interface can be used to request a build or fetch of + an individual package in the context of a given lockfile. Conan will build + only this package, provided that the package's dependencies have been built + before and are available in Conan's cache directory. The integration uses + this capability to build Conan packages in separate Buck2 build actions. + +* Install Location + + Conan stores build artifacts and other data underneath the Conan home + directory, which is configurable with the `CONAN_USER_HOME` environment + variable. Package dependencies, newly built packages, and other resources + must be available under this path. The integration configures a Conan home + directory under Buck2's output directory and copies needed dependencies + into place before the build and extracts relevant build results into + dedicated output paths after the build. + +* Profiles + + Conan profiles can configure the operating system and architecture to + target or build on, the compiler and its version, and other tools and + settings. The integration uses profiles to expose Buck2's own cxx toolchain + and other configuration to Conan. + +* Generators + + Conan is designed to integrate with other build systems, this is a + necessity in the C/C++ ecosystem, as there is no single standard build + system used by all projects. Conan generators can access package metadata, + such as exposed libraries or header files, and can generate files to be + read by another build system to import Conan built packages. Buckler is a + Conan generator that creates Buck2 targets that import Conan built packages + and can be depended upon by native Buck2 C/C++ targets. + +""" + +# TODO[AH] May prelude modules load the top-level prelude? +# This module defines a macro that calls prebuilt_cxx_library, +# which is provided by the prelude. Alternatively, we could change the +# prelude to make prebuilt_cxx_library directly importable, or replace the +# macro by a custom rule that directly constricuts the relevant providers. +load("@prelude//:prelude.bzl", "native") +load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo") +load("@prelude//utils:utils.bzl", "flatten") + +ConanInitInfo = provider(fields = ["profile", "user_home"]) +ConanLockInfo = provider(fields = ["lockfile"]) +ConanPackageInfo = provider(fields = ["reference", "package_id", "cache_out", "package_out", "cache_tset"]) +ConanProfileInfo = provider(fields = ["config", "inputs"]) +ConanToolchainInfo = provider(fields = ["conan"]) + +def _project_conan_package_dep(value: (str, Artifact)) -> cmd_args: + """Generate dependency flags for conan_package.py""" + return cmd_args(["--dep-reference", value[0], "--dep-cache-out", value[1]]) + +ConanPackageCacheTSet = transitive_set( + args_projections = { + "dep-flags": _project_conan_package_dep, + }, +) + +def _conan_package_extract_impl(ctx: AnalysisContext) -> list[Provider]: + conan_package_extract = ctx.attrs._conan_package_extract[RunInfo] + + cmd = cmd_args([conan_package_extract]) + sub_targets = {} + + for filename in ctx.attrs.files: + output = ctx.actions.declare_output(filename) + cmd.add(["--file-from", filename, "--file-to", output.as_output()]) + if filename in sub_targets: + fail("File-name collision: " + filename) + sub_targets[filename] = [DefaultInfo(default_outputs = [output])] + + i = 0 + for dirname in ctx.attrs.directories: + # Some packages provide overlapping include directories, e.g. + # `include`, and `include/jemalloc`. Such overlapping directories + # cannot both be passed to `prebuilt_cxx_library`'s `include_dirs`. + # This adds a counter prefix to avoid the overlap. + prefix = str(i) + "/" + i += 1 + output = ctx.actions.declare_output(prefix + dirname) + cmd.add(["--directory-from", dirname, "--directory-to", output.as_output()]) + if dirname in sub_targets: + fail("Directory-name collision: " + dirname) + sub_targets[dirname] = [DefaultInfo(default_outputs = [output])] + + cmd.add(["--package", ctx.attrs.package[ConanPackageInfo].package_out]) + ctx.actions.run(cmd, category = "conan_extract") + + return [DefaultInfo(default_outputs = [], sub_targets = sub_targets)] + +_conan_package_extract = rule( + impl = _conan_package_extract_impl, + attrs = { + "directories": attrs.list(attrs.string(), doc = "Directories to extract from the package."), + "files": attrs.list(attrs.string(), doc = "Files to extract from the package."), + "package": attrs.dep(providers = [ConanPackageInfo], doc = "The Conan package directory to extract files from."), + "_conan_package_extract": attrs.dep(providers = [RunInfo], default = "prelude//toolchains/conan:conan_package_extract"), + }, + doc = "Extract files and directories from Conan package directory.", +) + +def conan_component( + name: str, + defines: list[str], + cflags: list[str], + cppflags: list[str], + include_paths: list[str], + libs: list[str], + static_libs: dict[str, list[str]], + shared_libs: dict[str, list[str]], + system_libs: list[str], + deps: list[str], + package: str): + """Import a Conan package component. + + Extracts the relevant files from the Conan package directory and exposes + them as a target that can be depended on by native Buck2 C/C++ targets such + as `cxx_library`. + """ + + extract_name = name + "_extract" + extract_tpl = ":" + extract_name + "[{}]" + extract_include_paths = [extract_tpl.format(p) for p in include_paths] + extract_shared_libs = {name: [extract_tpl.format(lib) for lib in libs] for name, libs in shared_libs.items()} + extract_static_libs = {name: [extract_tpl.format(lib) for lib in libs] for name, libs in static_libs.items()} + + _conan_package_extract( + name = extract_name, + package = package, + files = flatten(static_libs.values() + shared_libs.values()), + directories = include_paths, + ) + + # [Note: Conan exported_deps] We cannot distinguish private and public + # dependencies based on the information exposed by Conan. We default to + # public dependencies, to avoid having to manually specify public + # dependencies when headers need to be reexported. + + if len(libs) == 0: + native.prebuilt_cxx_library( + name = name, + exported_deps = deps, # See [Note: Conan exported_deps] + header_dirs = extract_include_paths, + exported_preprocessor_flags = ["-D" + d for d in defines], + exported_lang_preprocessor_flags = { + "c": cflags, + "cxx": cppflags, + }, + exported_post_linker_flags = ["-l" + lib for lib in system_libs], + ) + elif len(libs) == 1: + lib = libs[0] + if lib in shared_libs: + shared_lib = extract_shared_libs[lib][0] + else: + shared_lib = None + if lib in static_libs: + static_lib = extract_static_libs[lib][0] + else: + static_lib = None + native.prebuilt_cxx_library( + name = name, + exported_deps = deps, # See [Note: Conan exported_deps] + header_dirs = extract_include_paths, + exported_preprocessor_flags = ["-D" + d for d in defines], + exported_lang_preprocessor_flags = { + "c": cflags, + "cxx": cppflags, + }, + exported_post_linker_flags = ["-l" + lib for lib in system_libs], + shared_lib = shared_lib, + static_lib = static_lib, + # TODO[AH] Can we set static_pic_lib, some libs seem to end on _pic? + # TODO[AH] Do we need supports_merged_linking? + # TODO[AH] Do we need supports_shared_library_interface? + ) + else: + # TODO[AH] Implement prebuilt_cxx_library_group. + fail("Support for package components with multiple libraries is not yet implemented.") + #"contacts": attrs.list(attrs.string(), default = []), + #"default_host_platform": attrs.option(attrs.configuration_label(), default = None), + #"deps": attrs.list(attrs.dep(), default = []), + #"exported_deps": attrs.list(attrs.dep(), default = []), + #"exported_platform_deps": attrs.list(attrs.tuple(attrs.regex(), attrs.set(attrs.dep(), sorted = True)), default = []), + #"exported_preprocessor_flags": attrs.list(attrs.string(), default = []), + #"import_libs": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), + #"include_dirs": attrs.list(attrs.source(), default = []), + #"include_in_android_merge_map_output": attrs.bool(), + #"labels": attrs.list(attrs.string(), default = []), + #"licenses": attrs.list(attrs.source(), default = []), + #"provided_shared_libs": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), + #"shared_libs": attrs.dict(key = attrs.string(), value = attrs.source(), sorted = False, default = {}), + #"shared_link": attrs.list(attrs.string(), default = []), + #"static_libs": attrs.list(attrs.source(), default = []), + #"static_link": attrs.list(attrs.string(), default = []), + #"static_pic_libs": attrs.list(attrs.source(), default = []), + #"static_pic_link": attrs.list(attrs.string(), default = []), + #"supported_platforms_regex": attrs.option(attrs.regex(), default = None), + #"within_view": attrs.option(attrs.list(attrs.string())), + +def _conan_cxx_libraries_impl(ctx: AnalysisContext) -> list[Provider]: + default_info = DefaultInfo( + default_outputs = ctx.attrs.main[DefaultInfo].default_outputs + flatten([c[DefaultInfo].default_outputs for c in ctx.attrs.components.values()]), + sub_targets = {n: c.providers for n, c in ctx.attrs.components.items()}, + ) + providers = [p for p in ctx.attrs.main.providers if type(p) != "DefaultInfo"] + providers.append(default_info) + return providers + +_conan_cxx_libraries = rule( + impl = _conan_cxx_libraries_impl, + attrs = { + "components": attrs.dict(key = attrs.string(), value = attrs.dep(), doc = "The package's components."), + "main": attrs.dep(doc = "The main package target, depends on all components."), + }, + doc = "Helper rule to bundle Conan package components into a single target.", +) + +def conan_dep(name: str, components: dict[str, str], **kwargs): + """Bundle Conan package components into a single target. + + The target itself represents the entire Conan package, including its + sub-components, if any. The individual components are exposed as sub-targets, + e.g. `:openssl` represents the entire openssl package, while `:openssl[crypto]` + represents only the `crypto` component. + """ + native.cxx_library( + name = "_bundle_" + name, + exported_deps = components.values(), + ) + _conan_cxx_libraries( + name = name, + main = ":_bundle_" + name, + components = components, + **kwargs + ) + +def _conan_generate_impl(ctx: AnalysisContext) -> list[Provider]: + conan_toolchain = ctx.attrs._conan_toolchain[ConanToolchainInfo] + conan_init = ctx.attrs._conan_init[ConanInitInfo] + conan_generate = ctx.attrs._conan_generate[RunInfo] + + install_folder = ctx.actions.declare_output("install-folder") + output_folder = ctx.actions.declare_output("output-folder") + user_home = ctx.actions.declare_output("user-home") + manifests = ctx.actions.declare_output("manifests") + install_info = ctx.actions.declare_output("install-info.json") + trace_log = ctx.actions.declare_output("trace.log") + targets_out = ctx.actions.declare_output(ctx.label.name + ".bzl") + + cmd = cmd_args( + [conan_generate] + + ["--conan", conan_toolchain.conan] + + ["--conan-init", conan_init.user_home] + + ["--buckler", ctx.attrs._buckler] + + ["--install-folder", install_folder.as_output()] + + ["--output-folder", output_folder.as_output()] + + ["--user-home", user_home.as_output()] + + ["--manifests", manifests.as_output()] + + ["--install-info", install_info.as_output()] + + ["--trace-file", trace_log.as_output()] + + ["--conanfile", ctx.attrs.conanfile] + + ["--lockfile", ctx.attrs.lockfile] + + ["--targets-out", targets_out.as_output()], + hidden = [ + conan_init.profile.config, # The profile is inlined in the lockfile. + conan_init.profile.inputs, + ], + ) + ctx.actions.run(cmd, category = "conan_build") + + return [ + DefaultInfo( + default_outputs = [targets_out], + other_outputs = [ + install_folder, + output_folder, + user_home, + manifests, + install_info, + trace_log, + ], + ), + ] + +conan_generate = rule( + impl = _conan_generate_impl, + attrs = { + "conanfile": attrs.source(doc = "The conanfile defining the project dependencies."), + "lockfile": attrs.source(doc = "The Conan lockfile pinning the package versions."), + "_buckler": attrs.source(default = "prelude//toolchains/conan:buckler"), + "_conan_generate": attrs.dep(providers = [RunInfo], default = "prelude//toolchains/conan:conan_generate"), + "_conan_init": attrs.dep(providers = [ConanInitInfo], default = "toolchains//:conan-init"), + "_conan_toolchain": attrs.default_only(attrs.toolchain_dep(default = "toolchains//:conan", providers = [ConanToolchainInfo])), + }, + doc = "Generate Buck2 import targets for Conan packages using the Buckler generator.", +) + +def _conan_init_impl(ctx: AnalysisContext) -> list[Provider]: + conan_toolchain = ctx.attrs._conan_toolchain[ConanToolchainInfo] + conan_init = ctx.attrs._conan_init[RunInfo] + + user_home = ctx.actions.declare_output("user-home") + trace_log = ctx.actions.declare_output("trace.log") + + cmd = cmd_args( + [conan_init] + + ["--conan", conan_toolchain.conan] + + ["--user-home", user_home.as_output()] + + ["--trace-file", trace_log.as_output()], + ) + ctx.actions.run(cmd, category = "conan_init") + + return [ + ConanInitInfo( + user_home = user_home, + profile = ctx.attrs.profile[ConanProfileInfo], + ), + DefaultInfo(default_outputs = [ + user_home, + trace_log, + ]), + ] + +conan_init = rule( + impl = _conan_init_impl, + attrs = { + # TODO[AH] Define separate profiles for + # the target platform (`--profile:build`) and + # exec platform (`--profile:host`). + # This will be needed for cross-compilation. + "profile": attrs.dep(providers = [ConanProfileInfo], doc = "The Conan profile to use."), + "_conan_init": attrs.dep(providers = [RunInfo], default = "prelude//toolchains/conan:conan_init"), + "_conan_toolchain": attrs.default_only(attrs.toolchain_dep(default = "toolchains//:conan", providers = [ConanToolchainInfo])), + }, + doc = "Generate a Conan user-home directory.", +) + +def _conan_lock_impl(ctx: AnalysisContext) -> list[Provider]: + conan_toolchain = ctx.attrs._conan_toolchain[ConanToolchainInfo] + conan_init = ctx.attrs._conan_init[ConanInitInfo] + conan_lock = ctx.attrs._conan_lock[RunInfo] + + lockfile_out = ctx.actions.declare_output("conan.lock") + user_home = ctx.actions.declare_output("user-home") + trace_log = ctx.actions.declare_output("trace.log") + + cmd = cmd_args( + [conan_lock] + + ["--conan", conan_toolchain.conan] + + ["--conan-init", conan_init.user_home] + + ["--profile", conan_init.profile.config] + + ["--user-home", user_home.as_output()] + + ["--trace-file", trace_log.as_output()] + + ["--conanfile", ctx.attrs.conanfile] + + ["--lockfile-out", lockfile_out.as_output()] + + (["--lockfile", ctx.attrs.lockfile] if ctx.attrs.lockfile else []), + hidden = conan_init.profile.inputs, + ) + ctx.actions.run(cmd, category = "conan_lock") + + return [ + ConanLockInfo( + lockfile = lockfile_out, + ), + DefaultInfo( + default_outputs = [lockfile_out], + other_outputs = [user_home, trace_log], + ), + ] + +conan_lock = rule( + impl = _conan_lock_impl, + attrs = { + "conanfile": attrs.source(doc = "The conanfile defining the project dependencies."), + "lockfile": attrs.option(attrs.source(doc = "A pre-existing lockfile to base the dependency resolution on."), default = None), + "_conan_init": attrs.dep(providers = [ConanInitInfo], default = "toolchains//:conan-init"), + "_conan_lock": attrs.dep(providers = [RunInfo], default = "prelude//toolchains/conan:conan_lock"), + "_conan_toolchain": attrs.default_only(attrs.toolchain_dep(default = "toolchains//:conan", providers = [ConanToolchainInfo])), + }, + doc = "Generate a Conan lock-file.", +) + +def _conan_package_impl(ctx: AnalysisContext) -> list[Provider]: + conan_toolchain = ctx.attrs._conan_toolchain[ConanToolchainInfo] + conan_init = ctx.attrs._conan_init[ConanInitInfo] + conan_package = ctx.attrs._conan_package[RunInfo] + + install_folder = ctx.actions.declare_output("install-folder") + output_folder = ctx.actions.declare_output("output-folder") + user_home = ctx.actions.declare_output("user-home") + manifests = ctx.actions.declare_output("manifests") + install_info = ctx.actions.declare_output("install-info.json") + trace_log = ctx.actions.declare_output("trace.log") + cache_out = ctx.actions.declare_output("cache-out") + package_out = ctx.actions.declare_output("package") + + cmd = cmd_args( + [conan_package] + + ["--conan", conan_toolchain.conan] + + ["--conan-init", conan_init.user_home] + + ["--lockfile", ctx.attrs.lockfile] + + ["--reference", ctx.attrs.reference] + + ["--package-id", ctx.attrs.package_id] + + ["--install-folder", install_folder.as_output()] + + ["--output-folder", output_folder.as_output()] + + ["--user-home", user_home.as_output()] + + ["--manifests", manifests.as_output()] + + ["--install-info", install_info.as_output()] + + ["--trace-file", trace_log.as_output()] + + ["--cache-out", cache_out.as_output()] + + ["--package-out", package_out.as_output()], + hidden = [ + conan_init.profile.config, # The profile is inlined in the lockfile. + conan_init.profile.inputs, + ], + ) + + # TODO[AH] Do we need to separate deps and build_deps? + # This may become necessary for cross-compilation support. + deps = ctx.actions.tset( + ConanPackageCacheTSet, + children = [ + dep[ConanPackageInfo].cache_tset + for dep in ctx.attrs.deps + ctx.attrs.build_deps + ], + ) + cmd.add(deps.project_as_args("dep-flags")) + + ctx.actions.run(cmd, category = "conan_build") + + return [ + ConanPackageInfo( + reference = ctx.attrs.reference, + package_id = ctx.attrs.package_id, + cache_out = cache_out, + package_out = package_out, + cache_tset = ctx.actions.tset(ConanPackageCacheTSet, value = (ctx.attrs.reference, cache_out), children = [deps]), + ), + DefaultInfo( + default_outputs = [package_out], + other_outputs = [ + install_folder, + output_folder, + user_home, + manifests, + install_info, + trace_log, + cache_out, + ], + ), + ] + +conan_package = rule( + impl = _conan_package_impl, + attrs = { + "build_deps": attrs.list(attrs.dep(providers = [ConanPackageInfo], doc = "Conan build dependencies.")), + "deps": attrs.list(attrs.dep(providers = [ConanPackageInfo], doc = "Conan package dependencies.")), + "lockfile": attrs.source(doc = "The Conan lockfile defining the package and its dependencies."), + "package_id": attrs.string(doc = "The Conan package-id."), + "reference": attrs.string(doc = "The Conan package reference /#."), + "_conan_init": attrs.dep(providers = [ConanInitInfo], default = "toolchains//:conan-init"), + "_conan_package": attrs.dep(providers = [RunInfo], default = "prelude//toolchains/conan:conan_package"), + "_conan_toolchain": attrs.default_only(attrs.toolchain_dep(default = "toolchains//:conan", providers = [ConanToolchainInfo])), + }, + doc = "Build a single Conan package.", +) + +def _profile_env_var(name, value) -> cmd_args: + # TODO[AH] Do we need `quote = "shell"` here? + # Setting it causes Buck2 to escape the `$PROFILE_DIR` prefix set in the + # very end which causes failures in Conan package builds. + return cmd_args([name, cmd_args(value, delimiter = " ")], delimiter = "=") + +def _make_wrapper_script(ctx, name, tool): + wrapper = ctx.actions.declare_output(name) + return ctx.actions.write( + wrapper, + cmd_args([ + "#!/bin/sh", + '_SCRIPTDIR=`dirname "$0"`', + cmd_args( + "exec", + tool, + '"$@"', + delimiter = " ", + relative_to = (wrapper, 1), + absolute_prefix = '"$_SCRIPTDIR"/', + ), + ]), + allow_args = True, + is_executable = True, + ) + +def _profile_env_tool(ctx, name, tool): + """Create a wrapper script and assign it to the profile variable. + + Conan configures the build tools it invokes through environment variables. + Some build tools don't accept full command-lines in the environment + variables configuring the compiler. E.g. CMake expects `CC` to contain the + compiler alone, not a command-line such as `zig cc`. This first creates a + wrapper script around the provided tool to avoid build failures with tools + that configured as full command lines. + """ + wrapper, inputs = _make_wrapper_script(ctx, name, tool) + return cmd_args(_profile_env_var(name, wrapper), hidden = [tool, inputs]) + +def _conan_profile_impl(ctx: AnalysisContext) -> list[Provider]: + cxx = ctx.attrs._cxx_toolchain[CxxToolchainInfo] + + content = [] + + content.append("[settings]") + content.append(cmd_args(ctx.attrs.arch, format = "arch={}")) + content.append(cmd_args(ctx.attrs.os, format = "os={}")) + content.append(cmd_args(ctx.attrs.build_type, format = "build_type={}")) + + # TODO[AH] Auto-generate the compiler setting based on the toolchain. + # Needs a translation of CxxToolProviderType to compiler setting. + content.append(cmd_args(ctx.attrs.compiler, format = "compiler={}")) + content.append(cmd_args(ctx.attrs.compiler_version, format = "compiler.version={}")) + content.append(cmd_args(ctx.attrs.compiler_libcxx, format = "compiler.libcxx={}")) + + content.append("") + content.append("[env]") + content.append(_profile_env_var("CMAKE_FIND_ROOT_PATH", "")) + + # TODO[AH] Define CMAKE_SYSROOT if needed. + # TODO[AH] Define target CHOST for cross-compilation + content.append(_profile_env_tool(ctx, "AR", cxx.linker_info.archiver)) + if cxx.as_compiler_info: + content.append(_profile_env_tool(ctx, "AS", cxx.as_compiler_info.compiler)) + # TODO[AH] Use asm_compiler_info for Windows + + if cxx.binary_utilities_info: + if cxx.binary_utilities_info.nm: + content.append(_profile_env_tool(ctx, "NM", cxx.binary_utilities_info.nm)) + if cxx.binary_utilities_info.ranlib: + content.append(_profile_env_tool(ctx, "RANLIB", cxx.binary_utilities_info.ranlib)) + if cxx.binary_utilities_info.strip: + content.append(_profile_env_tool(ctx, "STRIP", cxx.binary_utilities_info.strip)) + if cxx.c_compiler_info: + content.append(_profile_env_tool(ctx, "CC", cxx.c_compiler_info.compiler)) + content.append(_profile_env_var("CFLAGS", cxx.c_compiler_info.compiler_flags)) + if cxx.cxx_compiler_info: + content.append(_profile_env_tool(ctx, "CXX", cxx.cxx_compiler_info.compiler)) + content.append(_profile_env_var("CXXFLAGS", cxx.cxx_compiler_info.compiler_flags)) + + output = ctx.actions.declare_output(ctx.label.name) + content = cmd_args( + content, + relative_to = (output, 1), + absolute_prefix = "$PROFILE_DIR/", + ) + _, args_inputs = ctx.actions.write(output, content, allow_args = True) + + return [ + DefaultInfo(default_outputs = [output]), + ConanProfileInfo(config = output, inputs = cmd_args(content, hidden = args_inputs)), + ] + +conan_profile = rule( + impl = _conan_profile_impl, + attrs = { + "arch": attrs.string(doc = "The target architecture"), + "build_type": attrs.string(doc = "The Conan build-type, e.g. Release or Debug"), + "compiler": attrs.string(doc = "The name of the C/C++ compiler, e.g. gcc, clang, or Visual Studio."), + "compiler_libcxx": attrs.string(doc = "The C++ standard library, e.g. libstdc++, or libc++"), + "compiler_version": attrs.string(doc = "The version of the C/C++ compiler, e.g. 12.2 for gcc, 15 for clang, or 17 for Visual Studio."), + "os": attrs.string(doc = "The target operating system"), + "_cxx_toolchain": attrs.default_only(attrs.toolchain_dep(default = "toolchains//:cxx", providers = [CxxToolchainInfo])), + }, + doc = "Defines a Conan profile.", +) + +def _conan_update_impl(ctx: AnalysisContext) -> list[Provider]: + conan_update = ctx.attrs._conan_update[RunInfo] + + cmd = cmd_args( + [conan_update] + + ["--update-label", str(ctx.label.raw_target())] + + ["--lockfile", ctx.attrs.lockfile] + + ["--lock-targets", ctx.attrs.lock_generate] + + ["--conan-targets", ctx.attrs.conan_generate] + + ["--conanfile", ctx.attrs.conanfile] + + ["--lockfile-out", ctx.attrs.lockfile_name] + + ["--targets-out", ctx.attrs.targets_name], + ) + + return [ + DefaultInfo(default_outputs = []), + RunInfo(args = [cmd]), + ] + +conan_update = rule( + impl = _conan_update_impl, + attrs = { + "conan_generate": attrs.source(doc = "The targets generated by Buckler."), + "conanfile": attrs.source(doc = "The Conanfile."), + "lock_generate": attrs.source(doc = "The targets generated from the Conan lockfile."), + "lockfile": attrs.source(doc = "The generated Conan lockfile."), + "lockfile_name": attrs.string(doc = "Generate a lockfile with this name next to the Conanfile."), + "targets_name": attrs.string(doc = "Generate a TARGETS file with this name next to the Conanfile."), + "_conan_update": attrs.dep(providers = [RunInfo], default = "prelude//toolchains/conan:conan_update"), + }, + doc = "Defines a runnable target that will update the Conan lockfile and import targets.", +) + +def _lock_generate_impl(ctx: AnalysisContext) -> list[Provider]: + lock_generate = ctx.attrs._lock_generate[RunInfo] + + targets_out = ctx.actions.declare_output(ctx.label.name + ".bzl") + + cmd = cmd_args( + [lock_generate] + + ["--lockfile", ctx.attrs.lockfile] + + ["--lockfile-label", str(ctx.attrs.lockfile.owner.raw_target())] + + ["--targets-out", targets_out.as_output()], + ) + ctx.actions.run(cmd, category = "conan_generate") + + return [ + DefaultInfo( + default_outputs = [targets_out], + ), + ] + +lock_generate = rule( + impl = _lock_generate_impl, + attrs = { + "lockfile": attrs.source(doc = "The Conan lockfile defining the package and its dependencies."), + "_lock_generate": attrs.dep(providers = [RunInfo], default = "prelude//toolchains/conan:lock_generate"), + }, + doc = "Generate targets to build individual Conan packages in dependency order based on a Conan lock-file.", +) + +def _system_conan_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: + return [ + DefaultInfo(), + ConanToolchainInfo( + conan = RunInfo(args = [ctx.attrs.conan_path]), + ), + ] + +system_conan_toolchain = rule( + impl = _system_conan_toolchain_impl, + attrs = { + "conan_path": attrs.string(doc = "Path to the Conan executable."), + }, + is_toolchain_rule = True, + doc = "Uses a globally installed Conan executable.", +) diff --git a/prelude/toolchains/conan/lock_generate.py b/prelude/toolchains/conan/lock_generate.py new file mode 100644 index 0000000000000..f236c3242ac1d --- /dev/null +++ b/prelude/toolchains/conan/lock_generate.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import json + +import conan_common + + +def parse_lockfile(lockfile): + """Parse Conan lockfile into a package collection.""" + with open(lockfile) as f: + data = json.load(f) + + assert data["version"] == "0.4", "Unsupported Conan lockfile version" + graph = data["graph_lock"] + # TODO[AH] Enable Conan revisions for reproducibility + # assert graph["revisions_enabled"] == True, "Enable revisions for reproducibility" + nodes = graph["nodes"] + + pkgs = {} + for key, item in nodes.items(): + if key == "0": + # Skip the root package, it just bundles all dependencies. + continue + ref = item["ref"] + name, _, _, _, _ = conan_common.parse_reference(ref) + package_id = item["package_id"] + options = item["options"] + requires = item.get("requires", []) + build_requires = item.get("build_requires", []) + # context = item["context"] # TODO[AH] Do we need this? + pkgs[key] = { + "name": name, + "reference": ref, + "package_id": package_id, + "options": options, + "requires": requires, + "build_requires": build_requires, + } + + return pkgs + + +def generate_targets(lockfile_label, pkgs, targets_out): + """Write Buck2 targets for the packages to bzl_out.""" + package_template = """\ + +conan_package( + name = {name!r}, + lockfile = {lockfile!r}, + reference = {reference!r}, + package_id = {package_id!r}, + deps = {deps!r}, + build_deps = {build_deps!r}, +) +""" + with open(targets_out, "w") as f: + for pkg in pkgs.values(): + name = "_package_" + pkg["name"] + reference = pkg["reference"] + package_id = pkg["package_id"] + deps = [":_package_" + pkgs[key]["name"] for key in pkg["requires"]] + build_deps = [ + ":_package_" + pkgs[key]["name"] for key in pkg["build_requires"] + ] + f.write( + package_template.format( + name=name, + # TODO[AH] Remove that lockfile and generate a minimal one in the rule. + # Using the full lock file means that any change to the set + # of Conan packages will require a rebuild of all Conan + # packages. Generating minimal lock files with only the + # required information per package will only invalidate those + # packages that were affected by a change. Note, the lock + # file also contains the Conan profile, which defines the + # Buck2 provided C/C++ toolchain. This information would need + # to be included in a minimal lockfile. + lockfile=lockfile_label, + reference=reference, + package_id=package_id, + deps=deps, + build_deps=build_deps, + ) + ) + + +def main(): + parser = argparse.ArgumentParser( + prog="lock_generate", + description="Generate Buck2 build targets for Conan packages.", + ) + parser.add_argument( + "--lockfile", + metavar="FILE", + type=str, + required=False, + help="Path to the Conan lock-file.", + ) + parser.add_argument( + "--lockfile-label", + metavar="LABEL", + type=str, + required=False, + help="Buck2 label for the Conan lock-file.", + ) + parser.add_argument( + "--targets-out", + metavar="FILE", + type=str, + required=False, + help="Write the generated targets to this file.", + ) + args = parser.parse_args() + + pkgs = parse_lockfile(args.lockfile) + generate_targets(args.lockfile_label, pkgs, args.targets_out) + + +if __name__ == "__main__": + main() diff --git a/prelude/toolchains/csharp.bzl b/prelude/toolchains/csharp.bzl index a92547f2bd2b5..a93072ae2b057 100644 --- a/prelude/toolchains/csharp.bzl +++ b/prelude/toolchains/csharp.bzl @@ -15,19 +15,34 @@ def _system_csharp_toolchain_impl(ctx): DefaultInfo(), CSharpToolchainInfo( csc = RunInfo(args = ctx.attrs.csc), + framework_dirs = { + "net35": "C:\\Program Files (x86)\\Reference Assemblies\\Microsoft\\Framework\\.NETFramework\\v3.5\\Profile\\Client", + "net40": "C:\\Program Files (x86)\\Reference Assemblies\\Microsoft\\Framework\\.NETFramework\\v4.0", + "net45": "C:\\Program Files (x86)\\Reference Assemblies\\Microsoft\\Framework\\.NETFramework\\v4.5", + "net46": "C:\\Program Files (x86)\\Reference Assemblies\\Microsoft\\Framework\\.NETFramework\\v4.6", + }, ), ] system_csharp_toolchain = rule( impl = _system_csharp_toolchain_impl, - doc = """Example system C# toolchain that invokes csc using the current environment path. Usage: + doc = """A C# toolchain that invokes the system C# compiler `csc.exe` using the current environment path. + This toolchain requires the Microsoft provided .NET Framework SDKs (3.5, 4.0, 4.5, 4.6). By default these + Framework SDKs should be installed at their default location, however this can be customized by changing + the parameters passed to `system_chsarp_toolchain`. + + The `csc` and `framework_dir` attributes can be buck targets if you would like to check the C# redist bits + into your repo. + + Usage: system_csharp_toolchain( name = "csharp", csc = "csc.exe", visibility = ["PUBLIC"], )""", attrs = { - "csc": attrs.string(default = "csc.exe"), + "csc": attrs.string(default = "csc.exe", doc = "Executable name or a path to the C# compiler frequently referred to as csc.exe"), + "framework_dirs": attrs.dict(key = attrs.string(), value = attrs.one_of(attrs.source(), attrs.string()), doc = "Dictionary of .NET framework assembly directories, where each key is a supported value in `framework_ver` and the value is a path to a directory containing .net assemblies such as System.dll matching the given framework version"), }, is_toolchain_rule = True, ) diff --git a/prelude/toolchains/cxx.bzl b/prelude/toolchains/cxx.bzl index b172704448c0b..93baa02425f83 100644 --- a/prelude/toolchains/cxx.bzl +++ b/prelude/toolchains/cxx.bzl @@ -9,72 +9,117 @@ load( "@prelude//cxx:cxx_toolchain_types.bzl", "BinaryUtilitiesInfo", "CCompilerInfo", + "CvtresCompilerInfo", "CxxCompilerInfo", + "CxxInternalTools", "CxxPlatformInfo", "CxxToolchainInfo", "LinkerInfo", + "LinkerType", "PicBehavior", + "RcCompilerInfo", "ShlibInterfacesMode", ) load("@prelude//cxx:headers.bzl", "HeaderMode") load("@prelude//cxx:linker.bzl", "is_pdb_generated") -load("@prelude//linking:link_info.bzl", "LinkStyle") +load("@prelude//linking:link_info.bzl", "LinkOrdering", "LinkStyle") load("@prelude//linking:lto.bzl", "LtoMode") -load("@prelude//toolchains/msvc:tools.bzl", "VisualStudio") -load("@prelude//utils:cmd_script.bzl", "ScriptOs", "cmd_script") +load("@prelude//os_lookup:defs.bzl", "OsLookup") +load("@prelude//decls/common.bzl", "buck") + +CxxToolsInfo = provider( + fields = { + "archiver": provider_field(typing.Any, default = None), + "archiver_type": provider_field(typing.Any, default = None), + "asm_compiler": provider_field(typing.Any, default = None), + "asm_compiler_type": provider_field(typing.Any, default = None), + "compiler": provider_field(typing.Any, default = None), + "compiler_type": provider_field(typing.Any, default = None), + "cvtres_compiler": provider_field(typing.Any, default = None), + "cxx_compiler": provider_field(typing.Any, default = None), + "linker": provider_field(typing.Any, default = None), + "linker_type": LinkerType, + "rc_compiler": provider_field(typing.Any, default = None), + }, +) + +def _legacy_equivalent_cxx_tools_info_windows(ctx: AnalysisContext, default_toolchain: CxxToolsInfo) -> CxxToolsInfo: + return CxxToolsInfo( + compiler = default_toolchain.compiler if ctx.attrs.compiler == None or ctx.attrs.compiler == "cl.exe" else ctx.attrs.compiler, + compiler_type = default_toolchain.compiler_type if ctx.attrs.compiler_type == None else ctx.attrs.compiler_type, + cxx_compiler = default_toolchain.cxx_compiler if ctx.attrs.compiler == None or ctx.attrs.compiler == "cl.exe" else ctx.attrs.compiler, + asm_compiler = default_toolchain.asm_compiler, + asm_compiler_type = default_toolchain.asm_compiler_type, + rc_compiler = default_toolchain.rc_compiler if ctx.attrs.rc_compiler == None or ctx.attrs.rc_compiler == "rc.exe" else ctx.attrs.rc_compiler, + cvtres_compiler = default_toolchain.cvtres_compiler if ctx.attrs.cvtres_compiler == None or ctx.attrs.cvtres_compiler == "cvtres.exe" else ctx.attrs.cvtres_compiler, + archiver = default_toolchain.archiver, + archiver_type = default_toolchain.archiver_type, + linker = default_toolchain.linker if ctx.attrs.linker == None or ctx.attrs.linker == "link.exe" else ctx.attrs.linker, + linker_type = default_toolchain.linker_type, + ) + +def _legacy_equivalent_cxx_tools_info_non_windows(ctx: AnalysisContext, default_toolchain: CxxToolsInfo) -> CxxToolsInfo: + return CxxToolsInfo( + compiler = default_toolchain.compiler if ctx.attrs.compiler == None else ctx.attrs.compiler, + compiler_type = default_toolchain.compiler_type if ctx.attrs.compiler_type == None else ctx.attrs.compiler_type, + cxx_compiler = default_toolchain.cxx_compiler if ctx.attrs.cxx_compiler == None else ctx.attrs.cxx_compiler, + asm_compiler = default_toolchain.asm_compiler if ctx.attrs.compiler == None else ctx.attrs.compiler, + asm_compiler_type = default_toolchain.asm_compiler_type if ctx.attrs.compiler_type == None else ctx.attrs.compiler_type, + rc_compiler = default_toolchain.rc_compiler if ctx.attrs.rc_compiler == None else ctx.attrs.rc_compiler, + cvtres_compiler = default_toolchain.cvtres_compiler if ctx.attrs.cvtres_compiler == None else ctx.attrs.cvtres_compiler, + archiver = default_toolchain.archiver, + archiver_type = default_toolchain.archiver_type, + linker = default_toolchain.linker if ctx.attrs.linker == None else ctx.attrs.linker, + linker_type = default_toolchain.linker_type, + ) def _system_cxx_toolchain_impl(ctx: AnalysisContext): """ A very simple toolchain that is hardcoded to the current environment. """ - archiver_args = ["ar", "rcs"] - archiver_type = "gnu" - archiver_supports_argfiles = True - asm_compiler = ctx.attrs.compiler - asm_compiler_type = ctx.attrs.compiler_type - compiler = ctx.attrs.compiler - cxx_compiler = ctx.attrs.cxx_compiler - linker = ctx.attrs.linker - linker_type = "gnu" - pic_behavior = PicBehavior("supported") - binary_extension = "" - object_file_extension = "o" - static_library_extension = "a" - shared_library_name_default_prefix = "lib" - shared_library_name_format = "{}.so" - shared_library_versioned_name_format = "{}.so.{}" - additional_linker_flags = [] - if host_info().os.is_macos: - archiver_supports_argfiles = False - linker_type = "darwin" - pic_behavior = PicBehavior("always_enabled") - elif host_info().os.is_windows: - msvc_tools = ctx.attrs.msvc_tools[VisualStudio] - archiver_args = [msvc_tools.lib_exe] - archiver_type = "windows" - asm_compiler = msvc_tools.ml64_exe - asm_compiler_type = "windows_ml64" - if compiler == "cl.exe": - compiler = msvc_tools.cl_exe - cxx_compiler = compiler - if linker == "link.exe": - linker = msvc_tools.link_exe - linker = _windows_linker_wrapper(ctx, linker) - linker_type = "windows" + + os = ctx.attrs._target_os_type[OsLookup].platform + arch_name = ctx.attrs._target_os_type[OsLookup].cpu + cxx_tools_info = ctx.attrs._cxx_tools_info[CxxToolsInfo] + cxx_tools_info = _legacy_equivalent_cxx_tools_info_windows(ctx, cxx_tools_info) if os == "windows" else _legacy_equivalent_cxx_tools_info_non_windows(ctx, cxx_tools_info) + target_name = os + if arch_name: + target_name += "-" + arch_name + return _cxx_toolchain_from_cxx_tools_info(ctx, cxx_tools_info, target_name) + +def _cxx_tools_info_toolchain_impl(ctx: AnalysisContext): + return _cxx_toolchain_from_cxx_tools_info(ctx, ctx.attrs.cxx_tools_info[CxxToolsInfo]) + +def _cxx_toolchain_from_cxx_tools_info(ctx: AnalysisContext, cxx_tools_info: CxxToolsInfo, target_name = "x86_64"): + os = ctx.attrs._target_os_type[OsLookup].platform + archiver_supports_argfiles = os != "macos" + additional_linker_flags = ["-fuse-ld=lld"] if os == "linux" and cxx_tools_info.linker != "g++" and cxx_tools_info.cxx_compiler != "g++" else [] + + if os == "windows": + linker_type = LinkerType("windows") binary_extension = "exe" object_file_extension = "obj" static_library_extension = "lib" shared_library_name_default_prefix = "" shared_library_name_format = "{}.dll" shared_library_versioned_name_format = "{}.dll" - additional_linker_flags = ["msvcrt.lib"] pic_behavior = PicBehavior("not_supported") - elif ctx.attrs.linker == "g++" or ctx.attrs.cxx_compiler == "g++": - pass else: - additional_linker_flags = ["-fuse-ld=lld"] + binary_extension = "" + object_file_extension = "o" + static_library_extension = "a" + shared_library_name_default_prefix = "lib" + shared_library_name_format = "{}.so" + shared_library_versioned_name_format = "{}.so.{}" - if ctx.attrs.compiler_type == "clang": + if os == "macos": + linker_type = LinkerType("darwin") + pic_behavior = PicBehavior("always_enabled") + else: + linker_type = LinkerType("gnu") + pic_behavior = PicBehavior("supported") + + if cxx_tools_info.compiler_type == "clang": llvm_link = RunInfo(args = ["llvm-link"]) else: llvm_link = None @@ -82,19 +127,21 @@ def _system_cxx_toolchain_impl(ctx: AnalysisContext): return [ DefaultInfo(), CxxToolchainInfo( - mk_comp_db = ctx.attrs.make_comp_db, + internal_tools = ctx.attrs._internal_tools[CxxInternalTools], linker_info = LinkerInfo( - linker = RunInfo(args = linker), + linker = _run_info(cxx_tools_info.linker), linker_flags = additional_linker_flags + ctx.attrs.link_flags, - archiver = RunInfo(args = archiver_args), - archiver_type = archiver_type, + post_linker_flags = ctx.attrs.post_link_flags, + archiver = _run_info(cxx_tools_info.archiver), + archiver_type = cxx_tools_info.archiver_type, archiver_supports_argfiles = archiver_supports_argfiles, generate_linker_maps = False, lto_mode = LtoMode("none"), type = linker_type, link_binaries_locally = True, + link_libraries_locally = True, archive_objects_locally = True, - use_archiver_flags = False, + use_archiver_flags = True, static_dep_runtime_ld_flags = [], static_pic_dep_runtime_ld_flags = [], shared_dep_runtime_ld_flags = [], @@ -110,83 +157,110 @@ def _system_cxx_toolchain_impl(ctx: AnalysisContext): static_library_extension = static_library_extension, force_full_hybrid_if_capable = False, is_pdb_generated = is_pdb_generated(linker_type, ctx.attrs.link_flags), - produce_interface_from_stub_shared_library = True, + link_ordering = ctx.attrs.link_ordering, ), bolt_enabled = False, binary_utilities_info = BinaryUtilitiesInfo( nm = RunInfo(args = ["nm"]), objcopy = RunInfo(args = ["objcopy"]), + objdump = RunInfo(args = ["objdump"]), ranlib = RunInfo(args = ["ranlib"]), strip = RunInfo(args = ["strip"]), dwp = None, bolt_msdk = None, ), cxx_compiler_info = CxxCompilerInfo( - compiler = RunInfo(args = [cxx_compiler]), + compiler = _run_info(cxx_tools_info.cxx_compiler), preprocessor_flags = [], compiler_flags = ctx.attrs.cxx_flags, - compiler_type = ctx.attrs.compiler_type, + compiler_type = cxx_tools_info.compiler_type, ), c_compiler_info = CCompilerInfo( - compiler = RunInfo(args = [compiler]), + compiler = _run_info(cxx_tools_info.compiler), preprocessor_flags = [], compiler_flags = ctx.attrs.c_flags, - compiler_type = ctx.attrs.compiler_type, + compiler_type = cxx_tools_info.compiler_type, ), as_compiler_info = CCompilerInfo( - compiler = RunInfo(args = [compiler]), - compiler_type = ctx.attrs.compiler_type, + compiler = _run_info(cxx_tools_info.compiler), + compiler_type = cxx_tools_info.compiler_type, ), asm_compiler_info = CCompilerInfo( - compiler = RunInfo(args = [asm_compiler]), - compiler_type = asm_compiler_type, + compiler = _run_info(cxx_tools_info.asm_compiler), + compiler_type = cxx_tools_info.asm_compiler_type, + ), + cvtres_compiler_info = CvtresCompilerInfo( + compiler = _run_info(cxx_tools_info.cvtres_compiler), + preprocessor_flags = [], + compiler_flags = ctx.attrs.cvtres_flags, + compiler_type = cxx_tools_info.compiler_type, + ), + rc_compiler_info = RcCompilerInfo( + compiler = _run_info(cxx_tools_info.rc_compiler), + preprocessor_flags = [], + compiler_flags = ctx.attrs.rc_flags, + compiler_type = cxx_tools_info.compiler_type, ), header_mode = HeaderMode("symlink_tree_only"), cpp_dep_tracking_mode = ctx.attrs.cpp_dep_tracking_mode, pic_behavior = pic_behavior, llvm_link = llvm_link, ), - CxxPlatformInfo(name = "x86_64"), + CxxPlatformInfo(name = target_name), ] -def _windows_linker_wrapper(ctx: AnalysisContext, linker: cmd_args) -> cmd_args: - # Linkers pretty much all support @file.txt argument syntax to insert - # arguments from the given text file, usually formatted one argument per - # line. - # - # - GNU ld: https://gcc.gnu.org/onlinedocs/gcc/Overall-Options.html - # - lld is command line compatible with GNU ld - # - MSVC link.exe: https://learn.microsoft.com/en-us/cpp/build/reference/linking?view=msvc-170#link-command-files - # - # However, there is inconsistency in whether they support nesting of @file - # arguments inside of another @file. - # - # We wrap the linker to flatten @file arguments down to 1 level of nesting. - return cmd_script( - ctx = ctx, - name = "windows_linker", - cmd = cmd_args( - ctx.attrs.linker_wrapper[RunInfo], - linker, - ), - os = ScriptOs("windows"), - ) +def _run_info(args): + return None if args == None else RunInfo(args = [args]) system_cxx_toolchain = rule( impl = _system_cxx_toolchain_impl, attrs = { "c_flags": attrs.list(attrs.string(), default = []), - "compiler": attrs.string(default = "cl.exe" if host_info().os.is_windows else "clang"), - "compiler_type": attrs.string(default = "windows" if host_info().os.is_windows else "clang"), # one of CxxToolProviderType + "compiler": attrs.option(attrs.string(), default = None), + "compiler_type": attrs.option(attrs.string(), default = None), # one of CxxToolProviderType "cpp_dep_tracking_mode": attrs.string(default = "makefile"), - "cxx_compiler": attrs.string(default = "cl.exe" if host_info().os.is_windows else "clang++"), + "cvtres_compiler": attrs.option(attrs.string(), default = None), + "cvtres_flags": attrs.list(attrs.string(), default = []), + "cxx_compiler": attrs.option(attrs.string(), default = None), "cxx_flags": attrs.list(attrs.string(), default = []), "link_flags": attrs.list(attrs.string(), default = []), + "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), "link_style": attrs.string(default = "shared"), - "linker": attrs.string(default = "link.exe" if host_info().os.is_windows else "clang++"), - "linker_wrapper": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//cxx/tools:linker_wrapper")), - "make_comp_db": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//cxx/tools:make_comp_db")), - "msvc_tools": attrs.default_only(attrs.exec_dep(providers = [VisualStudio], default = "prelude//toolchains/msvc:msvc_tools")), + "linker": attrs.option(attrs.string(), default = None), + "post_link_flags": attrs.list(attrs.string(), default = []), + "rc_compiler": attrs.option(attrs.string(), default = None), + "rc_flags": attrs.list(attrs.string(), default = []), + "_cxx_tools_info": attrs.exec_dep(providers = [CxxToolsInfo], default = "prelude//toolchains/msvc:msvc_tools" if host_info().os.is_windows else "prelude//toolchains/cxx/clang:path_clang_tools"), + "_internal_tools": attrs.default_only(attrs.exec_dep(providers = [CxxInternalTools], default = "prelude//cxx/tools:internal_tools")), + "_target_os_type": buck.target_os_type_arg(), + }, + is_toolchain_rule = True, +) + +cxx_tools_info_toolchain = rule( + impl = _cxx_tools_info_toolchain_impl, + attrs = { + "c_flags": attrs.list(attrs.string(), default = []), + "cpp_dep_tracking_mode": attrs.string(default = "makefile"), + "cvtres_flags": attrs.list(attrs.string(), default = []), + "cxx_flags": attrs.list(attrs.string(), default = []), + "cxx_tools_info": attrs.exec_dep(providers = [CxxToolsInfo], default = select({ + "DEFAULT": "prelude//toolchains/cxx/clang:path_clang_tools", + "config//os:windows": "prelude//toolchains/msvc:msvc_tools", + })), + "link_flags": attrs.list(attrs.string(), default = []), + "link_ordering": attrs.option(attrs.enum(LinkOrdering.values()), default = None), + "link_style": attrs.enum( + LinkStyle.values(), + default = "shared", + doc = """ + The default value of the `link_style` attribute for rules that use this toolchain. + """, + ), + "post_link_flags": attrs.list(attrs.string(), default = []), + "rc_flags": attrs.list(attrs.string(), default = []), + "_internal_tools": attrs.default_only(attrs.exec_dep(providers = [CxxInternalTools], default = "prelude//cxx/tools:internal_tools")), + "_target_os_type": buck.target_os_type_arg(), }, is_toolchain_rule = True, ) diff --git a/prelude/toolchains/cxx/clang/BUCK b/prelude/toolchains/cxx/clang/BUCK new file mode 100644 index 0000000000000..9a1337a444e86 --- /dev/null +++ b/prelude/toolchains/cxx/clang/BUCK @@ -0,0 +1,11 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load(":tools.bzl", "path_clang_tools") + +oncall("build_infra") + +source_listing() + +path_clang_tools( + name = "path_clang_tools", + visibility = ["PUBLIC"], +) diff --git a/prelude/toolchains/cxx/clang/tools.bzl b/prelude/toolchains/cxx/clang/tools.bzl new file mode 100644 index 0000000000000..05f4a6a1b0db0 --- /dev/null +++ b/prelude/toolchains/cxx/clang/tools.bzl @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerType") +load("@prelude//toolchains:cxx.bzl", "CxxToolsInfo") + +def _path_clang_tools_impl(_ctx) -> list[Provider]: + return [ + DefaultInfo(), + CxxToolsInfo( + compiler = "clang", + compiler_type = "clang", + cxx_compiler = "clang++", + asm_compiler = "clang", + asm_compiler_type = "clang", + rc_compiler = None, + cvtres_compiler = None, + archiver = "ar", + archiver_type = "gnu", + linker = "clang++", + linker_type = LinkerType("gnu"), + ), + ] + +path_clang_tools = rule( + impl = _path_clang_tools_impl, + attrs = {}, +) diff --git a/prelude/toolchains/cxx/zig/defs.bzl b/prelude/toolchains/cxx/zig/defs.bzl index 0ea820400f9f1..a8a98ac1aa4ef 100644 --- a/prelude/toolchains/cxx/zig/defs.bzl +++ b/prelude/toolchains/cxx/zig/defs.bzl @@ -21,6 +21,7 @@ the time of writing this is still experimental. If this is a problem for your use-case then you may wish to rely on a system toolchain or define your own. The toolchain is not fully hermetic as it still relies on system tools like nm. +It only works on Linux, and to a limited extent on MacOS. [zig-cc-announcement]: https://andrewkelley.me/post/zig-cc-powerful-drop-in-replacement-gcc-clang.html @@ -31,7 +32,7 @@ the toolchain like so: `toolchains//BUILD` ```bzl -load("@prelude//toolchains/cxx:zig.bzl", "download_zig_distribution", "cxx_zig_toolchain") +load("@prelude//toolchains/cxx/zig:defs.bzl", "download_zig_distribution", "cxx_zig_toolchain") download_zig_distribution( name = "zig", @@ -49,7 +50,7 @@ To define toolchains for multiple platforms and configure cross-compilation you can configure the toolchain like so: ```bzl -load("@prelude//toolchains/cxx:zig.bzl", "download_zig_distribution", "cxx_zig_toolchain") +load("@prelude//toolchains/cxx/zig:defs.bzl", "download_zig_distribution", "cxx_zig_toolchain") download_zig_distribution( name = "zig-x86_64-linux", @@ -75,9 +76,9 @@ download_zig_distribution( alias( name = "zig", actual = select({ - "prelude//os:linux": ":zig-x86_64-linux", - "prelude//os:macos": ":zig-x86_64-macos", - "prelude//os:windows": ":zig-x86_64-windows", + "@prelude//os:linux": ":zig-x86_64-linux", + "@prelude//os:macos": ":zig-x86_64-macos", + "@prelude//os:windows": ":zig-x86_64-windows", }), ) @@ -85,9 +86,9 @@ cxx_zig_toolchain( name = "cxx", distribution = ":zig", target = select({ - "prelude//os:linux": "x86_64-linux-gnu", - "prelude//os:macos": "x86_64-macos-gnu", - "prelude//os:windows": "x86_64-windows-gnu", + "@prelude//os:linux": "x86_64-linux-gnu", + "@prelude//os:macos": "x86_64-macos-gnu", + "@prelude//os:windows": "x86_64-windows-gnu", }), visibility = ["PUBLIC"], ) @@ -99,6 +100,7 @@ load( "BinaryUtilitiesInfo", "CCompilerInfo", "CxxCompilerInfo", + "CxxInternalTools", "LinkerInfo", "ShlibInterfacesMode", "StripFlagsInfo", @@ -116,13 +118,16 @@ load( "@prelude//linking:link_info.bzl", "LinkStyle", ) +load( + "@prelude//utils:cmd_script.bzl", + "ScriptOs", + "cmd_script", +) load( ":releases.bzl", "releases", ) -DEFAULT_MAKE_COMP_DB = "prelude//cxx/tools:make_comp_db" - ZigReleaseInfo = provider( # @unsorted-dict-items fields = { @@ -166,11 +171,18 @@ def _zig_distribution_impl(ctx: AnalysisContext) -> list[Provider]: dst = ctx.actions.declare_output("zig") path_tpl = "{}/" + ctx.attrs.prefix + "/zig" + ctx.attrs.suffix src = cmd_args(ctx.attrs.dist[DefaultInfo].default_outputs[0], format = path_tpl) - ctx.actions.run(["ln", "-srf", src, dst.as_output()], category = "cp_compiler") + ctx.actions.run( + ["ln", "-sf", cmd_args(src, relative_to = (dst, 1)), dst.as_output()], + category = "cp_compiler", + ) - compiler = cmd_args([dst]) - compiler.hidden(ctx.attrs.dist[DefaultInfo].default_outputs) - compiler.hidden(ctx.attrs.dist[DefaultInfo].other_outputs) + compiler = cmd_args( + [dst], + hidden = [ + ctx.attrs.dist[DefaultInfo].default_outputs, + ctx.attrs.dist[DefaultInfo].other_outputs, + ], + ) return [ ctx.attrs.dist[DefaultInfo], @@ -216,13 +228,15 @@ def _http_archive_impl(ctx: AnalysisContext) -> list[Provider]: [ cmd_args(output, format = "mkdir -p {}"), cmd_args(output, format = "cd {}"), - cmd_args(flags, archive, delimiter = " ").relative_to(output), + cmd_args(flags, archive, delimiter = " ", relative_to = output), ], is_executable = True, allow_args = True, ) - ctx.actions.run(cmd_args(["/bin/sh", script]) - .hidden([archive, output.as_output()]), category = "http_archive") + ctx.actions.run( + cmd_args(["/bin/sh", script], hidden = [archive, output.as_output()]), + category = "http_archive", + ) return [DefaultInfo(default_output = output)] @@ -294,7 +308,11 @@ def _get_linker_type(os: str) -> str: if os == "linux": return "gnu" elif os == "macos" or os == "freebsd": - return "darwin" + # TODO[AH] return "darwin". + # The cc rules emit linker flags on MacOS that are not supported by Zig's linker. + # Declaring the linker as GNU style is not entirely correct, however it works better than + # declaring Darwin style at this point. See https://github.com/facebook/buck2/issues/470 + return "gnu" elif os == "windows": return "windows" else: @@ -304,28 +322,53 @@ def _cxx_zig_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: dist = ctx.attrs.distribution[ZigDistributionInfo] zig = ctx.attrs.distribution[RunInfo] target = ["-target", ctx.attrs.target] if ctx.attrs.target else [] + zig_cc = cmd_script( + ctx = ctx, + name = "zig_cc", + cmd = cmd_args(zig, "cc"), + os = ScriptOs("windows" if dist.os == "windows" else "unix"), + ) + zig_cxx = cmd_script( + ctx = ctx, + name = "zig_cxx", + cmd = cmd_args(zig, "c++"), + os = ScriptOs("windows" if dist.os == "windows" else "unix"), + ) + zig_ar = cmd_script( + ctx = ctx, + name = "zig_ar", + cmd = cmd_args(zig, "ar"), + os = ScriptOs("windows" if dist.os == "windows" else "unix"), + ) + zig_ranlib = cmd_script( + ctx = ctx, + name = "zig_ranlib", + cmd = cmd_args(zig, "ranlib"), + os = ScriptOs("windows" if dist.os == "windows" else "unix"), + ) return [ctx.attrs.distribution[DefaultInfo]] + cxx_toolchain_infos( + internal_tools = ctx.attrs._cxx_internal_tools[CxxInternalTools], platform_name = dist.arch, c_compiler_info = CCompilerInfo( - compiler = RunInfo(args = cmd_args([zig, "cc"])), + compiler = RunInfo(args = cmd_args(zig_cc)), compiler_type = "clang", - compiler_flags = cmd_args(target + ctx.attrs.c_compiler_flags), + compiler_flags = cmd_args(target, ctx.attrs.c_compiler_flags), #preprocessor = None, #preprocessor_type = None, preprocessor_flags = cmd_args(ctx.attrs.c_preprocessor_flags), #dep_files_processor = None, ), cxx_compiler_info = CxxCompilerInfo( - compiler = RunInfo(args = cmd_args([zig, "c++"])), + compiler = RunInfo(args = cmd_args(zig_cxx)), compiler_type = "clang", - compiler_flags = cmd_args(target + ctx.attrs.cxx_compiler_flags), + compiler_flags = cmd_args(target, ctx.attrs.cxx_compiler_flags), #preprocessor = None, #preprocessor_type = None, preprocessor_flags = cmd_args(ctx.attrs.cxx_preprocessor_flags), #dep_files_processor = None, ), linker_info = LinkerInfo( - archiver = RunInfo(args = cmd_args([zig, "ar"])), + archiver = RunInfo(args = cmd_args(zig_ar)), archiver_type = "gnu", archiver_supports_argfiles = True, #archive_contents = None, @@ -337,12 +380,11 @@ def _cxx_zig_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: link_style = LinkStyle(ctx.attrs.link_style), link_weight = 1, #link_ordering = None, - linker = RunInfo(args = cmd_args([zig, "c++"])), - linker_flags = cmd_args(target + ctx.attrs.linker_flags), + linker = RunInfo(args = cmd_args(zig_cxx)), + linker_flags = cmd_args(target, ctx.attrs.linker_flags), #lto_mode = None, # TODO support LTO object_file_extension = "o", #mk_shlib_intf = None, # not needed if shlib_interfaces = "disabled" - produce_interface_from_stub_shared_library = True, shlib_interfaces = ShlibInterfacesMode("disabled"), shared_dep_runtime_ld_flags = ctx.attrs.shared_dep_runtime_ld_flags, shared_library_name_default_prefix = "lib", @@ -364,7 +406,7 @@ def _cxx_zig_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: dwp = None, nm = RunInfo(args = ["nm"]), # not included in the zig distribution. objcopy = RunInfo(args = ["objcopy"]), # not included in the zig distribution. - ranlib = RunInfo(args = cmd_args([zig, "ranlib"])), + ranlib = RunInfo(args = cmd_args(zig_ranlib)), strip = RunInfo(args = ["strip"]), # not included in the zig distribution. ), header_mode = HeaderMode("symlink_tree_only"), # header map modes require mk_hmap @@ -374,7 +416,6 @@ def _cxx_zig_toolchain_impl(ctx: AnalysisContext) -> list[Provider]: #as_compiler_info = None, #hip_compiler_info = None, #cuda_compiler_info = None, - mk_comp_db = ctx.attrs.make_comp_db, #mk_hmap = None, #use_distributed_thinlto = False, #use_dep_files = False, # requires dep_files_processor @@ -396,9 +437,14 @@ cxx_zig_toolchain = rule( "cxx_compiler_flags": attrs.list(attrs.arg(), default = []), "cxx_preprocessor_flags": attrs.list(attrs.arg(), default = []), "distribution": attrs.exec_dep(providers = [RunInfo, ZigDistributionInfo]), - "link_style": attrs.enum(LinkStyle.values(), default = "static"), + "link_style": attrs.enum( + LinkStyle.values(), + default = "static", + doc = """ + The default value of the `link_style` attribute for rules that use this toolchain. + """, + ), "linker_flags": attrs.list(attrs.arg(), default = []), - "make_comp_db": attrs.dep(providers = [RunInfo], default = DEFAULT_MAKE_COMP_DB), "shared_dep_runtime_ld_flags": attrs.list(attrs.arg(), default = []), "shared_library_interface_flags": attrs.list(attrs.string(), default = []), "static_dep_runtime_ld_flags": attrs.list(attrs.arg(), default = []), @@ -407,6 +453,7 @@ cxx_zig_toolchain = rule( "strip_debug_flags": attrs.option(attrs.list(attrs.arg()), default = None), "strip_non_global_flags": attrs.option(attrs.list(attrs.arg()), default = None), "target": attrs.option(attrs.string(), default = None), + "_cxx_internal_tools": attrs.default_only(attrs.dep(providers = [CxxInternalTools], default = "prelude//cxx/tools:internal_tools")), }, is_toolchain_rule = True, ) diff --git a/prelude/toolchains/cxx/zig/releases.bzl b/prelude/toolchains/cxx/zig/releases.bzl index 72421943dbb6d..7d54be6f1cc65 100644 --- a/prelude/toolchains/cxx/zig/releases.bzl +++ b/prelude/toolchains/cxx/zig/releases.bzl @@ -23,6 +23,209 @@ releases = { "tarball": "https://ziglang.org/download/0.1.1/zig-win64-0.1.1.zip", }, }, + "0.10.0": { + "aarch64-linux": { + "shasum": "09ef50c8be73380799804169197820ee78760723b0430fa823f56ed42b06ea0f", + "size": "40387688", + "tarball": "https://ziglang.org/download/0.10.0/zig-linux-aarch64-0.10.0.tar.xz", + }, + "aarch64-macos": { + "shasum": "02f7a7839b6a1e127eeae22ea72c87603fb7298c58bc35822a951479d53c7557", + "size": "40602664", + "tarball": "https://ziglang.org/download/0.10.0/zig-macos-aarch64-0.10.0.tar.xz", + }, + "aarch64-windows": { + "shasum": "1bbda8d123d44f3ae4fa90d0da04b1e9093c3f9ddae3429a4abece1e1c0bf19a", + "size": "69332389", + "tarball": "https://ziglang.org/download/0.10.0/zig-windows-aarch64-0.10.0.zip", + }, + "armv7a-linux": { + "shasum": "7201b2e89cd7cc2dde95d39485fd7d5641ba67dc6a9a58c036cb4c308d2e82de", + "size": "50805936", + "tarball": "https://ziglang.org/download/0.10.0/zig-linux-armv7a-0.10.0.tar.xz", + }, + "bootstrap": { + "shasum": "c13dc70c4ff4c09f749adc0d473cbd3942991dd4d1bd2d860fbf257d8c1bbabf", + "size": "45625516", + "tarball": "https://ziglang.org/download/0.10.0/zig-bootstrap-0.10.0.tar.xz", + }, + "date": "2022-10-31", + "docs": "https://ziglang.org/documentation/0.10.0/", + "i386-linux": { + "shasum": "dac8134f1328c50269f3e50b334298ec7916cb3b0ef76927703ddd1c96fd0115", + "size": "48451732", + "tarball": "https://ziglang.org/download/0.10.0/zig-linux-i386-0.10.0.tar.xz", + }, + "notes": "https://ziglang.org/download/0.10.0/release-notes.html", + "riscv64-linux": { + "shasum": "2a126f3401a7a7efc4b454f0a85c133db1af5a9dfee117f172213b7cbd47bfba", + "size": "42272968", + "tarball": "https://ziglang.org/download/0.10.0/zig-linux-riscv64-0.10.0.tar.xz", + }, + "src": { + "shasum": "d8409f7aafc624770dcd050c8fa7e62578be8e6a10956bca3c86e8531c64c136", + "size": "14530912", + "tarball": "https://ziglang.org/download/0.10.0/zig-0.10.0.tar.xz", + }, + "stdDocs": "https://ziglang.org/documentation/0.10.0/std/", + "x86_64-freebsd": { + "shasum": "dd77afa2a8676afbf39f7d6068eda81b0723afd728642adaac43cb2106253d65", + "size": "44056504", + "tarball": "https://ziglang.org/download/0.10.0/zig-freebsd-x86_64-0.10.0.tar.xz", + }, + "x86_64-linux": { + "shasum": "631ec7bcb649cd6795abe40df044d2473b59b44e10be689c15632a0458ddea55", + "size": "44142400", + "tarball": "https://ziglang.org/download/0.10.0/zig-linux-x86_64-0.10.0.tar.xz", + }, + "x86_64-macos": { + "shasum": "3a22cb6c4749884156a94ea9b60f3a28cf4e098a69f08c18fbca81c733ebfeda", + "size": "45175104", + "tarball": "https://ziglang.org/download/0.10.0/zig-macos-x86_64-0.10.0.tar.xz", + }, + "x86_64-windows": { + "shasum": "a66e2ff555c6e48781de1bcb0662ef28ee4b88af3af2a577f7b1950e430897ee", + "size": "73181558", + "tarball": "https://ziglang.org/download/0.10.0/zig-windows-x86_64-0.10.0.zip", + }, + }, + "0.10.1": { + "aarch64-linux": { + "shasum": "db0761664f5f22aa5bbd7442a1617dd696c076d5717ddefcc9d8b95278f71f5d", + "size": "40321280", + "tarball": "https://ziglang.org/download/0.10.1/zig-linux-aarch64-0.10.1.tar.xz", + }, + "aarch64-macos": { + "shasum": "b9b00477ec5fa1f1b89f35a7d2a58688e019910ab80a65eac2a7417162737656", + "size": "40517896", + "tarball": "https://ziglang.org/download/0.10.1/zig-macos-aarch64-0.10.1.tar.xz", + }, + "aarch64-windows": { + "shasum": "ece93b0d77b2ab03c40db99ef7ccbc63e0b6bd658af12b97898960f621305428", + "size": "69417459", + "tarball": "https://ziglang.org/download/0.10.1/zig-windows-aarch64-0.10.1.zip", + }, + "bootstrap": { + "shasum": "9f5781210b9be8f832553d160851635780f9bd71816065351ab29cfd8968f5e9", + "size": "43971816", + "tarball": "https://ziglang.org/download/0.10.1/zig-bootstrap-0.10.1.tar.xz", + }, + "date": "2023-01-19", + "docs": "https://ziglang.org/documentation/0.10.1/", + "i386-linux": { + "shasum": "8c710ca5966b127b0ee3efba7310601ee57aab3dd6052a082ebc446c5efb2316", + "size": "48367388", + "tarball": "https://ziglang.org/download/0.10.1/zig-linux-i386-0.10.1.tar.xz", + }, + "notes": "https://ziglang.org/download/0.10.1/release-notes.html", + "riscv64-linux": { + "shasum": "9db5b59a5112b8beb995094ba800e88b0060e9cf7cfadf4dc3e666c9010dc77b", + "size": "42196008", + "tarball": "https://ziglang.org/download/0.10.1/zig-linux-riscv64-0.10.1.tar.xz", + }, + "src": { + "shasum": "69459bc804333df077d441ef052ffa143d53012b655a51f04cfef1414c04168c", + "size": "15143112", + "tarball": "https://ziglang.org/download/0.10.1/zig-0.10.1.tar.xz", + }, + "stdDocs": "https://ziglang.org/documentation/0.10.1/std/", + "x86_64-linux": { + "shasum": "6699f0e7293081b42428f32c9d9c983854094bd15fee5489f12c4cf4518cc380", + "size": "44085596", + "tarball": "https://ziglang.org/download/0.10.1/zig-linux-x86_64-0.10.1.tar.xz", + }, + "x86_64-macos": { + "shasum": "02483550b89d2a3070c2ed003357fd6e6a3059707b8ee3fbc0c67f83ca898437", + "size": "45119596", + "tarball": "https://ziglang.org/download/0.10.1/zig-macos-x86_64-0.10.1.tar.xz", + }, + "x86_64-windows": { + "shasum": "5768004e5e274c7969c3892e891596e51c5df2b422d798865471e05049988125", + "size": "73259729", + "tarball": "https://ziglang.org/download/0.10.1/zig-windows-x86_64-0.10.1.zip", + }, + }, + "0.11.0": { + "aarch64-linux": { + "shasum": "956eb095d8ba44ac6ebd27f7c9956e47d92937c103bf754745d0a39cdaa5d4c6", + "size": "41492432", + "tarball": "https://ziglang.org/download/0.11.0/zig-linux-aarch64-0.11.0.tar.xz", + }, + "aarch64-macos": { + "shasum": "c6ebf927bb13a707d74267474a9f553274e64906fd21bf1c75a20bde8cadf7b2", + "size": "43855096", + "tarball": "https://ziglang.org/download/0.11.0/zig-macos-aarch64-0.11.0.tar.xz", + }, + "aarch64-windows": { + "shasum": "5d4bd13db5ecb0ddc749231e00f125c1d31087d708e9ff9b45c4f4e13e48c661", + "size": "73883137", + "tarball": "https://ziglang.org/download/0.11.0/zig-windows-aarch64-0.11.0.zip", + }, + "armv7a-linux": { + "shasum": "aebe8bbeca39f13f9b7304465f9aee01ab005d243836bd40f4ec808093dccc9b", + "size": "42240664", + "tarball": "https://ziglang.org/download/0.11.0/zig-linux-armv7a-0.11.0.tar.xz", + }, + "bootstrap": { + "shasum": "38dd9e17433c7ce5687c48fa0a757462cbfcbe75d9d5087d14ebbe00efd21fdc", + "size": "43227592", + "tarball": "https://ziglang.org/download/0.11.0/zig-bootstrap-0.11.0.tar.xz", + }, + "date": "2023-08-04", + "docs": "https://ziglang.org/documentation/0.11.0/", + "notes": "https://ziglang.org/download/0.11.0/release-notes.html", + "powerpc-linux": { + "shasum": "70a5f9668a66fb2a91a7c3488b15bcb568e1f9f44b95cd10075c138ad8c42864", + "size": "44539972", + "tarball": "https://ziglang.org/download/0.11.0/zig-linux-powerpc-0.11.0.tar.xz", + }, + "powerpc64le-linux": { + "shasum": "75260e87325e820a278cf9e74f130c7b3d84c0b5197afb2e3c85eff3fcedd48d", + "size": "44656184", + "tarball": "https://ziglang.org/download/0.11.0/zig-linux-powerpc64le-0.11.0.tar.xz", + }, + "riscv64-linux": { + "shasum": "24a478937eddb507e96d60bd4da00de9092b3f0920190eb45c4c99c946b00ed5", + "size": "43532324", + "tarball": "https://ziglang.org/download/0.11.0/zig-linux-riscv64-0.11.0.tar.xz", + }, + "src": { + "shasum": "72014e700e50c0d3528cef3adf80b76b26ab27730133e8202716a187a799e951", + "size": "15275316", + "tarball": "https://ziglang.org/download/0.11.0/zig-0.11.0.tar.xz", + }, + "stdDocs": "https://ziglang.org/documentation/0.11.0/std/", + "x86-linux": { + "shasum": "7b0dc3e0e070ae0e0d2240b1892af6a1f9faac3516cae24e57f7a0e7b04662a8", + "size": "49824456", + "tarball": "https://ziglang.org/download/0.11.0/zig-linux-x86-0.11.0.tar.xz", + }, + "x86-windows": { + "shasum": "e72b362897f28c671633e650aa05289f2e62b154efcca977094456c8dac3aefa", + "size": "81576961", + "tarball": "https://ziglang.org/download/0.11.0/zig-windows-x86-0.11.0.zip", + }, + "x86_64-freebsd": { + "shasum": "ea430327f9178377b79264a1d492868dcff056cd76d43a6fb00719203749e958", + "size": "46432140", + "tarball": "https://ziglang.org/download/0.11.0/zig-freebsd-x86_64-0.11.0.tar.xz", + }, + "x86_64-linux": { + "shasum": "2d00e789fec4f71790a6e7bf83ff91d564943c5ee843c5fd966efc474b423047", + "size": "44961892", + "tarball": "https://ziglang.org/download/0.11.0/zig-linux-x86_64-0.11.0.tar.xz", + }, + "x86_64-macos": { + "shasum": "1c1c6b9a906b42baae73656e24e108fd8444bb50b6e8fd03e9e7a3f8b5f05686", + "size": "47189164", + "tarball": "https://ziglang.org/download/0.11.0/zig-macos-x86_64-0.11.0.tar.xz", + }, + "x86_64-windows": { + "shasum": "142caa3b804d86b4752556c9b6b039b7517a08afa3af842645c7e2dcd125f652", + "size": "77216743", + "tarball": "https://ziglang.org/download/0.11.0/zig-windows-x86_64-0.11.0.zip", + }, + }, "0.2.0": { "date": "2018-03-15", "docs": "https://ziglang.org/documentation/0.2.0/", @@ -597,43 +800,78 @@ releases = { }, "master": { "aarch64-linux": { - "shasum": "a90b52a968b9176ab7c2d8fb1b7b84f0e7503dc03d7791d7c5286f1ed9ad5eed", - "size": "38035988", - "tarball": "https://ziglang.org/builds/zig-linux-aarch64-0.10.0-dev.4247+3234e8de3.tar.xz", + "shasum": "0f1cd21441d69d1379e9bb3c76e2039b10156dd7dcd920a08d0e8c998e1fdb62", + "size": "43357640", + "tarball": "https://ziglang.org/builds/zig-linux-aarch64-0.12.0-dev.1298+da06269d7.tar.xz", }, "aarch64-macos": { - "shasum": "d435855e9b62a6aee78e4d707debf137ac3e85a9662ffa47267be56149333f06", - "size": "40986992", - "tarball": "https://ziglang.org/builds/zig-macos-aarch64-0.10.0-dev.4247+3234e8de3.tar.xz", + "shasum": "5edd4f9e88eb6864f1818ecf81d2d489cb10845a4e84e6ebcee06566b205e769", + "size": "45715512", + "tarball": "https://ziglang.org/builds/zig-macos-aarch64-0.12.0-dev.1298+da06269d7.tar.xz", + }, + "aarch64-windows": { + "shasum": "bf3ec738e94a1db17df866590d4e6af4fce8f7150ea71fda0ce5d2656012134c", + "size": "76173076", + "tarball": "https://ziglang.org/builds/zig-windows-aarch64-0.12.0-dev.1298+da06269d7.zip", }, - "date": "2022-10-05", + "armv7a-linux": { + "shasum": "a09495d944a31affb3bc1f9bde36e0924d54fd91d1aebba470368491d9e7451b", + "size": "44075000", + "tarball": "https://ziglang.org/builds/zig-linux-armv7a-0.12.0-dev.1298+da06269d7.tar.xz", + }, + "bootstrap": { + "shasum": "e69bbfe3393a39a4963c462ec1b56cea449671af55584f1eaa8be6dd14e2912f", + "size": "44387500", + "tarball": "https://ziglang.org/builds/zig-bootstrap-0.12.0-dev.1298+da06269d7.tar.xz", + }, + "date": "2023-10-27", "docs": "https://ziglang.org/documentation/master/", + "powerpc-linux": { + "shasum": "4f9bea964887426e5dc4ed2a17c5b283c3afbda01aa6bab1ff5789e555b8e1b0", + "size": "46468592", + "tarball": "https://ziglang.org/builds/zig-linux-powerpc-0.12.0-dev.1298+da06269d7.tar.xz", + }, + "powerpc64le-linux": { + "shasum": "703e302cca1580c55746d5374d160a936261243c5b0ae9712fa7faa6722554da", + "size": "46675648", + "tarball": "https://ziglang.org/builds/zig-linux-powerpc64le-0.12.0-dev.1298+da06269d7.tar.xz", + }, + "riscv64-linux": { + "shasum": "8fee2a5816ac923d7674f96c51bccf047aaa87b572fa6c285cf9f55336554cf5", + "size": "45362356", + "tarball": "https://ziglang.org/builds/zig-linux-riscv64-0.12.0-dev.1298+da06269d7.tar.xz", + }, "src": { - "shasum": "1fe9fb34ef15d433bd1496782d1a645e3a2122455d6aad0294502ae2f416c7e4", - "size": "15824808", - "tarball": "https://ziglang.org/builds/zig-0.10.0-dev.4247+3234e8de3.tar.xz", + "shasum": "2cb26538f672d11a5e8ec5f4ce777dfa275655eb88401cc7a959d07fe226911d", + "size": "15945864", + "tarball": "https://ziglang.org/builds/zig-0.12.0-dev.1298+da06269d7.tar.xz", }, "stdDocs": "https://ziglang.org/documentation/master/std/", - "version": "0.10.0-dev.4247+3234e8de3", - "x86_64-freebsd": { - "shasum": "2555d683f7c8ba903c55c218f64963783f769736b6d6a5a8382e575df82234b5", - "size": "40954156", - "tarball": "https://ziglang.org/builds/zig-freebsd-x86_64-0.10.0-dev.4247+3234e8de3.tar.xz", + "version": "0.12.0-dev.1298+da06269d7", + "x86-linux": { + "shasum": "83feb8c5dcfc93b4d6261458d19804691595211984e2d5bda5793d98303e387f", + "size": "52006108", + "tarball": "https://ziglang.org/builds/zig-linux-x86-0.12.0-dev.1298+da06269d7.tar.xz", + }, + "x86-windows": { + "shasum": "62dd4748a1fa794bf786a4b26496caca87f2fed43f8493920723406c0acaddb0", + "size": "84111935", + "tarball": "https://ziglang.org/builds/zig-windows-x86-0.12.0-dev.1298+da06269d7.zip", }, "x86_64-linux": { - "shasum": "5ce6b50eae7a787b7e6e002e3b14cb8a149359df1941cf701e99ded365c9895e", - "size": "44178932", - "tarball": "https://ziglang.org/builds/zig-linux-x86_64-0.10.0-dev.4247+3234e8de3.tar.xz", + "shasum": "3607099807dca909935fb9249c16f4bfed6e58872ec1a7fc1eb65776d6d32111", + "size": "46985312", + "tarball": "https://ziglang.org/builds/zig-linux-x86_64-0.12.0-dev.1298+da06269d7.tar.xz", }, "x86_64-macos": { - "shasum": "81d7a9615b00bce617602fd40fe4e0b3bb962ff0bb4595cf78be067385bce135", - "size": "44180748", - "tarball": "https://ziglang.org/builds/zig-macos-x86_64-0.10.0-dev.4247+3234e8de3.tar.xz", + "shasum": "8d27c61c36454ecffe56ae00055f8354289464c1ea507182cccab585ad31d115", + "size": "49203308", + "tarball": "https://ziglang.org/builds/zig-macos-x86_64-0.12.0-dev.1298+da06269d7.tar.xz", }, "x86_64-windows": { - "shasum": "555ac169c7e35f1dd98ea86bc514e80f7527b242c22d44a34a166c80d4441ceb", - "size": "69140534", - "tarball": "https://ziglang.org/builds/zig-windows-x86_64-0.10.0-dev.4247+3234e8de3.zip", + "shasum": "057e0c412598bcedd941c5444c0439af321647be53ed274eed2a938d4699042f", + "size": "79585336", + "tarball": "https://ziglang.org/builds/zig-windows-x86_64-0.12.0-dev.1298+da06269d7.zip", }, }, } diff --git a/prelude/toolchains/demo.bzl b/prelude/toolchains/demo.bzl index 608602fe6fe2a..7448804a32c5e 100644 --- a/prelude/toolchains/demo.bzl +++ b/prelude/toolchains/demo.bzl @@ -7,9 +7,11 @@ load("@prelude//toolchains:cxx.bzl", "system_cxx_toolchain") load("@prelude//toolchains:genrule.bzl", "system_genrule_toolchain") +load("@prelude//toolchains:go.bzl", "system_go_bootstrap_toolchain", "system_go_toolchain") load("@prelude//toolchains:haskell.bzl", "system_haskell_toolchain") load("@prelude//toolchains:ocaml.bzl", "system_ocaml_toolchain") load("@prelude//toolchains:python.bzl", "system_python_bootstrap_toolchain", "system_python_toolchain") +load("@prelude//toolchains:remote_test_execution.bzl", "remote_test_execution_toolchain") load("@prelude//toolchains:rust.bzl", "system_rust_toolchain") def system_demo_toolchains(): @@ -27,6 +29,16 @@ def system_demo_toolchains(): visibility = ["PUBLIC"], ) + system_go_toolchain( + name = "go", + visibility = ["PUBLIC"], + ) + + system_go_bootstrap_toolchain( + name = "go_bootstrap", + visibility = ["PUBLIC"], + ) + system_haskell_toolchain( name = "haskell", visibility = ["PUBLIC"], @@ -52,3 +64,8 @@ def system_demo_toolchains(): default_edition = "2021", visibility = ["PUBLIC"], ) + + remote_test_execution_toolchain( + name = "remote_test_execution", + visibility = ["PUBLIC"], + ) diff --git a/prelude/toolchains/execution_host.bzl b/prelude/toolchains/execution_host.bzl new file mode 100644 index 0000000000000..331393d4d8949 --- /dev/null +++ b/prelude/toolchains/execution_host.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:host.bzl", "HostOSTypes") + +_ExecutionHostOSTypes = HostOSTypes + ["fat"] # Fat toolchains are compatible on multiple OSes. + +ExecutionHostOSType = enum(*_ExecutionHostOSTypes) diff --git a/prelude/toolchains/go.bzl b/prelude/toolchains/go.bzl index ad9f97127dcc5..577463bcd57f1 100644 --- a/prelude/toolchains/go.bzl +++ b/prelude/toolchains/go.bzl @@ -6,11 +6,10 @@ # of this source tree. load("@prelude//go:toolchain.bzl", "GoToolchainInfo") +load("@prelude//go_bootstrap:go_bootstrap.bzl", "GoBootstrapToolchainInfo") +load("@prelude//utils:cmd_script.bzl", "ScriptOs", "cmd_script") -def _system_go_toolchain_impl(ctx): - go_root = ctx.attrs.go_root - go_binary = go_root + "/bin/go" - +def go_platform() -> (str, str): arch = host_info().arch if arch.is_aarch64: go_arch = "arm64" @@ -18,35 +17,76 @@ def _system_go_toolchain_impl(ctx): go_arch = "amd64" else: fail("Unsupported go arch: {}".format(arch)) + os = host_info().os if os.is_macos: go_os = "darwin" elif os.is_linux: go_os = "linux" + elif os.is_windows: + go_os = "windows" else: fail("Unsupported go os: {}".format(os)) - get_go_tool = lambda go_tool: "{}/pkg/tool/{}_{}/{}".format(go_root, go_os, go_arch, go_tool) + return go_os, go_arch + +def _system_go_bootstrap_toolchain_impl(ctx): + go_os, go_arch = go_platform() + + script_os = ScriptOs("windows" if go_os == "windows" else "unix") + go = "go.exe" if go_os == "windows" else "go" + + return [ + DefaultInfo(), + GoBootstrapToolchainInfo( + env_go_arch = go_arch, + env_go_os = go_os, + go = RunInfo(cmd_script(ctx, "go", cmd_args(go), script_os)), + go_wrapper = ctx.attrs.go_wrapper[RunInfo], + ), + ] + +system_go_bootstrap_toolchain = rule( + impl = _system_go_bootstrap_toolchain_impl, + doc = """Example system go toolchain rules (WIP). Usage: + system_go_bootstrap_toolchain( + name = "go_bootstrap", + visibility = ["PUBLIC"], + )""", + attrs = { + "go_wrapper": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go/tools:go_wrapper_py")), + }, + is_toolchain_rule = True, +) + +def _system_go_toolchain_impl(ctx): + go_os, go_arch = go_platform() + + script_os = ScriptOs("windows" if go_os == "windows" else "unix") + go = "go.exe" if go_os == "windows" else "go" + return [ DefaultInfo(), GoToolchainInfo( - assembler = get_go_tool("asm"), - cgo = get_go_tool("cgo"), - cgo_wrapper = ctx.attrs.cgo_wrapper, - compile_wrapper = ctx.attrs.compile_wrapper, - compiler = get_go_tool("compile"), - cover = get_go_tool("cover"), - cover_srcs = ctx.attrs.cover_srcs, - cxx_toolchain_for_linking = None, + assembler = RunInfo(cmd_script(ctx, "asm", cmd_args(go, "tool", "asm"), script_os)), + cgo = RunInfo(cmd_script(ctx, "cgo", cmd_args(go, "tool", "cgo"), script_os)), + cgo_wrapper = ctx.attrs.cgo_wrapper[RunInfo], + concat_files = ctx.attrs.concat_files[RunInfo], + compiler = RunInfo(cmd_script(ctx, "compile", cmd_args(go, "tool", "compile"), script_os)), + cover = RunInfo(cmd_script(ctx, "cover", cmd_args(go, "tool", "cover"), script_os)), + default_cgo_enabled = False, env_go_arch = go_arch, env_go_os = go_os, - env_go_root = go_root, - external_linker_flags = None, - filter_srcs = ctx.attrs.filter_srcs, - go = go_binary, - linker = get_go_tool("link"), - packer = get_go_tool("pack"), + external_linker_flags = [], + gen_stdlib_importcfg = ctx.attrs.gen_stdlib_importcfg[RunInfo], + go = RunInfo(cmd_script(ctx, "go", cmd_args(go), script_os)), + go_wrapper = ctx.attrs.go_wrapper[RunInfo], + linker = RunInfo(cmd_script(ctx, "link", cmd_args(go, "tool", "link"), script_os)), + packer = RunInfo(cmd_script(ctx, "pack", cmd_args(go, "tool", "pack"), script_os)), tags = [], + linker_flags = [], + assembler_flags = [], + compiler_flags = [], ), ] @@ -55,15 +95,13 @@ system_go_toolchain = rule( doc = """Example system go toolchain rules (WIP). Usage: system_go_toolchain( name = "go", - go_root = "/opt/homebrew/Cellar/go/1.20.4/libexec", visibility = ["PUBLIC"], )""", attrs = { "cgo_wrapper": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go/tools:cgo_wrapper")), - "compile_wrapper": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go/tools:compile_wrapper")), - "cover_srcs": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go/tools:cover_srcs")), - "filter_srcs": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go/tools:filter_srcs")), - "go_root": attrs.string(), + "concat_files": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go_bootstrap/tools:go_concat_files")), + "gen_stdlib_importcfg": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go/tools:gen_stdlib_importcfg")), + "go_wrapper": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//go_bootstrap/tools:go_go_wrapper")), }, is_toolchain_rule = True, ) diff --git a/prelude/toolchains/haskell.bzl b/prelude/toolchains/haskell.bzl index 9d5b02f16d432..c3e99c382af4e 100644 --- a/prelude/toolchains/haskell.bzl +++ b/prelude/toolchains/haskell.bzl @@ -5,7 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//haskell:haskell.bzl", "HaskellPlatformInfo", "HaskellToolchainInfo") +load("@prelude//haskell:toolchain.bzl", "HaskellPlatformInfo", "HaskellToolchainInfo") def _system_haskell_toolchain(_ctx: AnalysisContext) -> list[Provider]: return [ @@ -14,11 +14,12 @@ def _system_haskell_toolchain(_ctx: AnalysisContext) -> list[Provider]: compiler = "ghc", packager = "ghc-pkg", linker = "ghc", + haddock = "haddock", compiler_flags = [], linker_flags = [], ), HaskellPlatformInfo( - name = "x86_64", + name = host_info().arch, ), ] diff --git a/prelude/toolchains/msvc/BUCK b/prelude/toolchains/msvc/BUCK deleted file mode 100644 index ed74363b496d7..0000000000000 --- a/prelude/toolchains/msvc/BUCK +++ /dev/null @@ -1,18 +0,0 @@ -load(":tools.bzl", "find_msvc_tools") - -python_bootstrap_binary( - name = "vswhere", - main = "vswhere.py", - visibility = [], -) - -python_bootstrap_binary( - name = "run_msvc_tool", - main = "run_msvc_tool.py", - visibility = [], -) - -find_msvc_tools( - name = "msvc_tools", - visibility = ["PUBLIC"], -) diff --git a/prelude/toolchains/msvc/BUCK.v2 b/prelude/toolchains/msvc/BUCK.v2 new file mode 100644 index 0000000000000..42d4db5370d7a --- /dev/null +++ b/prelude/toolchains/msvc/BUCK.v2 @@ -0,0 +1,24 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") +load(":tools.bzl", "find_msvc_tools") + +oncall("build_infra") + +source_listing() + +python_bootstrap_binary( + name = "vswhere", + main = "vswhere.py", + visibility = ["PUBLIC"], +) + +python_bootstrap_binary( + name = "run_msvc_tool", + main = "run_msvc_tool.py", + visibility = ["PUBLIC"], +) + +find_msvc_tools( + name = "msvc_tools", + target_compatible_with = ["config//os:windows"], + visibility = ["PUBLIC"], +) diff --git a/prelude/toolchains/msvc/run_msvc_tool.py b/prelude/toolchains/msvc/run_msvc_tool.py index 6d2f8fc73671b..2fa9a60193a48 100644 --- a/prelude/toolchains/msvc/run_msvc_tool.py +++ b/prelude/toolchains/msvc/run_msvc_tool.py @@ -42,8 +42,12 @@ def main(): prepend_env(env, "PATH", tool.PATH) prepend_env(env, "INCLUDE", tool.INCLUDE) - completed_process = subprocess.run([tool.exe, *arguments], env=env) - sys.exit(completed_process.returncode) + if tool.exe is None: + print("Tool not found", file=sys.stderr) + sys.exit(1) + else: + completed_process = subprocess.run([tool.exe, *arguments], env=env) + sys.exit(completed_process.returncode) if __name__ == "__main__": diff --git a/prelude/toolchains/msvc/tools.bzl b/prelude/toolchains/msvc/tools.bzl index 72d294eba99f1..93ffa771d58a6 100644 --- a/prelude/toolchains/msvc/tools.bzl +++ b/prelude/toolchains/msvc/tools.bzl @@ -5,34 +5,26 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//cxx:cxx_toolchain_types.bzl", "LinkerType") +load("@prelude//toolchains:cxx.bzl", "CxxToolsInfo") load("@prelude//utils:cmd_script.bzl", "ScriptOs", "cmd_script") -VisualStudio = provider( - # @unsorted-dict-items - fields = { - # cl.exe - "cl_exe": provider_field(typing.Any, default = None), - # lib.exe - "lib_exe": provider_field(typing.Any, default = None), - # ml64.exe - "ml64_exe": provider_field(typing.Any, default = None), - # link.exe - "link_exe": provider_field(typing.Any, default = None), - }, -) - def _find_msvc_tools_impl(ctx: AnalysisContext) -> list[Provider]: cl_exe_json = ctx.actions.declare_output("cl.exe.json") + cvtres_exe_json = ctx.actions.declare_output("cvtres.exe.json") lib_exe_json = ctx.actions.declare_output("lib.exe.json") ml64_exe_json = ctx.actions.declare_output("ml64.exe.json") link_exe_json = ctx.actions.declare_output("link.exe.json") + rc_exe_json = ctx.actions.declare_output("rc.exe.json") cmd = [ ctx.attrs.vswhere[RunInfo], cmd_args("--cl=", cl_exe_json.as_output(), delimiter = ""), + cmd_args("--cvtres=", cvtres_exe_json.as_output(), delimiter = ""), cmd_args("--lib=", lib_exe_json.as_output(), delimiter = ""), cmd_args("--ml64=", ml64_exe_json.as_output(), delimiter = ""), cmd_args("--link=", link_exe_json.as_output(), delimiter = ""), + cmd_args("--rc=", rc_exe_json.as_output(), delimiter = ""), ] ctx.actions.run( @@ -42,30 +34,53 @@ def _find_msvc_tools_impl(ctx: AnalysisContext) -> list[Provider]: ) run_msvc_tool = ctx.attrs.run_msvc_tool[RunInfo] - cl_exe_script = cmd_script( - ctx = ctx, - name = "cl", - cmd = cmd_args(run_msvc_tool, cl_exe_json), - os = ScriptOs("windows"), - ) - lib_exe_script = cmd_script( - ctx = ctx, - name = "lib", - cmd = cmd_args(run_msvc_tool, lib_exe_json), - os = ScriptOs("windows"), - ) - ml64_exe_script = cmd_script( - ctx = ctx, - name = "ml64", - cmd = cmd_args(run_msvc_tool, ml64_exe_json), - os = ScriptOs("windows"), - ) - link_exe_script = cmd_script( - ctx = ctx, - name = "link", - cmd = cmd_args(run_msvc_tool, link_exe_json), - os = ScriptOs("windows"), - ) + if ctx.attrs.use_path_compilers: + cl_exe_script = "cl.exe" + ml64_exe_script = "ml64.exe" + rc_exe_script = "rc.exe" + cvtres_exe_script = "cvtres.exe" + else: + cl_exe_script = cmd_script( + ctx = ctx, + name = "cl", + cmd = cmd_args(run_msvc_tool, cl_exe_json), + os = ScriptOs("windows"), + ) + cvtres_exe_script = cmd_script( + ctx = ctx, + name = "cvtres", + cmd = cmd_args(run_msvc_tool, cvtres_exe_json), + os = ScriptOs("windows"), + ) + ml64_exe_script = cmd_script( + ctx = ctx, + name = "ml64", + cmd = cmd_args(run_msvc_tool, ml64_exe_json), + os = ScriptOs("windows"), + ) + rc_exe_script = cmd_script( + ctx = ctx, + name = "rc", + cmd = cmd_args(run_msvc_tool, rc_exe_json), + os = ScriptOs("windows"), + ) + + if ctx.attrs.use_path_linkers: + lib_exe_script = "lib.exe" + link_exe_script = "link.exe" + else: + lib_exe_script = cmd_script( + ctx = ctx, + name = "lib", + cmd = cmd_args(run_msvc_tool, lib_exe_json), + os = ScriptOs("windows"), + ) + link_exe_script = cmd_script( + ctx = ctx, + name = "link", + cmd = cmd_args(run_msvc_tool, link_exe_json), + os = ScriptOs("windows"), + ) return [ # Supports `buck2 run prelude//toolchains/msvc:msvc_tools[cl.exe]` @@ -77,6 +92,12 @@ def _find_msvc_tools_impl(ctx: AnalysisContext) -> list[Provider]: "json": [DefaultInfo(default_output = cl_exe_json)], }), ], + "cvtres.exe": [ + RunInfo(args = [cvtres_exe_script]), + DefaultInfo(sub_targets = { + "json": [DefaultInfo(default_output = cvtres_exe_json)], + }), + ], "lib.exe": [ RunInfo(args = [lib_exe_script]), DefaultInfo(sub_targets = { @@ -95,19 +116,58 @@ def _find_msvc_tools_impl(ctx: AnalysisContext) -> list[Provider]: "json": [DefaultInfo(default_output = ml64_exe_json)], }), ], + "rc.exe": [ + RunInfo(args = [rc_exe_script]), + DefaultInfo(sub_targets = { + "json": [DefaultInfo(default_output = rc_exe_json)], + }), + ], }), - VisualStudio( - cl_exe = cl_exe_script, - lib_exe = lib_exe_script, - ml64_exe = ml64_exe_script, - link_exe = link_exe_script, + CxxToolsInfo( + compiler = cl_exe_script, + compiler_type = "windows", + cxx_compiler = cl_exe_script, + asm_compiler = ml64_exe_script, + asm_compiler_type = "windows_ml64", + rc_compiler = rc_exe_script, + cvtres_compiler = cvtres_exe_script, + archiver = lib_exe_script, + archiver_type = "windows", + linker = _windows_linker_wrapper(ctx, link_exe_script), + linker_type = LinkerType("windows"), ), ] +def _windows_linker_wrapper(ctx: AnalysisContext, linker: [cmd_args, str]) -> cmd_args: + # Linkers pretty much all support @file.txt argument syntax to insert + # arguments from the given text file, usually formatted one argument per + # line. + # + # - GNU ld: https://gcc.gnu.org/onlinedocs/gcc/Overall-Options.html + # - lld is command line compatible with GNU ld + # - MSVC link.exe: https://learn.microsoft.com/en-us/cpp/build/reference/linking?view=msvc-170#link-command-files + # + # However, there is inconsistency in whether they support nesting of @file + # arguments inside of another @file. + # + # We wrap the linker to flatten @file arguments down to 1 level of nesting. + return cmd_script( + ctx = ctx, + name = "windows_linker", + cmd = cmd_args( + ctx.attrs.linker_wrapper[RunInfo], + linker, + ), + os = ScriptOs("windows"), + ) + find_msvc_tools = rule( impl = _find_msvc_tools_impl, attrs = { + "linker_wrapper": attrs.default_only(attrs.exec_dep(providers = [RunInfo], default = "prelude//cxx/tools:linker_wrapper")), "run_msvc_tool": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//toolchains/msvc:run_msvc_tool")), + "use_path_compilers": attrs.bool(default = False), + "use_path_linkers": attrs.bool(default = False), "vswhere": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//toolchains/msvc:vswhere")), }, ) diff --git a/prelude/toolchains/msvc/vswhere.py b/prelude/toolchains/msvc/vswhere.py index 0d98a6165e4f5..d56dfa6665cef 100644 --- a/prelude/toolchains/msvc/vswhere.py +++ b/prelude/toolchains/msvc/vswhere.py @@ -15,17 +15,23 @@ import shutil import subprocess import sys +import tempfile import winreg from pathlib import Path from typing import IO, List, NamedTuple +VC_EXE_NAMES = ["cl.exe", "cvtres.exe", "lib.exe", "ml64.exe", "link.exe"] +UCRT_EXE_NAMES = ["rc.exe"] + class OutputJsonFiles(NamedTuple): # We write a Tool instance as JSON into each of these files. cl: IO[str] + cvtres: IO[str] lib: IO[str] ml64: IO[str] link: IO[str] + rc: IO[str] class Tool(NamedTuple): @@ -35,11 +41,14 @@ class Tool(NamedTuple): INCLUDE: List[Path] = [] -def find_in_path(executable): +def find_in_path(executable, is_optional=False): which = shutil.which(executable) if which is None: - print(f"{executable} not found in $PATH", file=sys.stderr) - sys.exit(1) + if is_optional: + return None + else: + print(f"{executable} not found in $PATH", file=sys.stderr) + sys.exit(1) return Tool(which) @@ -66,6 +75,7 @@ def find_with_vswhere_exe(): "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", "-format", "json", + "-utf8", "-nologo", ], encoding="utf-8", @@ -99,8 +109,9 @@ def find_with_vswhere_exe(): lib_path = tools_path / "lib" / "x64" include_path = tools_path / "include" - exe_names = "cl.exe", "lib.exe", "ml64.exe", "link.exe" - if not all(bin_path.joinpath(exe).exists() for exe in exe_names): + vc_exe_paths = [bin_path / exe for exe in VC_EXE_NAMES] + + if not all(exe.exists() for exe in vc_exe_paths): continue PATH = [bin_path] @@ -109,10 +120,16 @@ def find_with_vswhere_exe(): ucrt, ucrt_version = get_ucrt_dir() if ucrt and ucrt_version: - PATH.append(ucrt / "bin" / ucrt_version / "x64") + ucrt_bin_path = ucrt / "bin" / ucrt_version / "x64" + PATH.append(ucrt_bin_path) LIB.append(ucrt / "lib" / ucrt_version / "ucrt" / "x64") INCLUDE.append(ucrt / "include" / ucrt_version / "ucrt") + ucrt_exe_paths = [ucrt_bin_path / exe for exe in UCRT_EXE_NAMES] + ucrt_exe_paths = [exe if exe.exists() else None for exe in ucrt_exe_paths] + else: + ucrt_exe_paths = [None for exe in UCRT_EXE_NAMES] + sdk, sdk_version = get_sdk10_dir() if sdk and sdk_version: PATH.append(sdk / "bin" / "x64") @@ -123,12 +140,13 @@ def find_with_vswhere_exe(): INCLUDE.append(sdk / "include" / sdk_version / "shared") return [ - Tool(exe=bin_path / exe, LIB=LIB, PATH=PATH, INCLUDE=INCLUDE) - for exe in exe_names + Tool(exe=exe, LIB=LIB, PATH=PATH, INCLUDE=INCLUDE) + for exe in vc_exe_paths + ucrt_exe_paths ] print( - "vswhere.exe did not find a suitable MSVC toolchain containing cl.exe, lib.exe, ml64.exe", + "vswhere.exe did not find a suitable MSVC toolchain containing " + + ", ".join(VC_EXE_NAMES), file=sys.stderr, ) sys.exit(1) @@ -172,7 +190,7 @@ def get_sdk10_dir(): windows_sdk_dir = os.environ.get("WindowsSdkDir") windows_sdk_version = os.environ.get("WindowsSDKVersion") if windows_sdk_dir is not None and windows_sdk_version is not None: - return windows_sdk_dir, windows_sdk_version.removesuffix("\\") + return Path(windows_sdk_dir), windows_sdk_version.removesuffix("\\") registry = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) key_name = "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v10.0" @@ -202,27 +220,112 @@ def write_tool_json(out, tool): out.write(j) +# for use with the ewdk to grab the environment strings +def get_ewdk_env(ewdkdir: Path): + """ + Inspiration taken from the following: + http://pythonwise.blogspot.fr/2010/04/sourcing-shell-script.html (Miki Tebeka) + http://stackoverflow.com/questions/3503719/#comment28061110_3505826 (ahal) + """ + + # We need to write the script that will make the important variables available + with tempfile.NamedTemporaryFile( + prefix="VcVarsExtract", suffix=".bat", mode="w", delete=False + ) as tmp: + print("@echo off", file=tmp) + print("call %* > NUL", file=tmp) + print("set", file=tmp) + + env_script = ewdkdir / "BuildEnv" / "SetupBuildEnv.cmd" + cmd = [tmp.name, env_script, "amd64"] + output = subprocess.check_output(cmd).decode("utf-8") + + env = {} + for line in output.split("\r\n"): + if line and "=" in line: + first, second = line.split("=", 1) + env[first] = second + + return env + + +def find_with_ewdk(ewdkdir: Path): + env = get_ewdk_env(ewdkdir) + + installation_path = Path(env["VSINSTALLDIR"]) + vc_tools_version = env["VCToolsVersion"] + tools_path = installation_path / "VC" / "Tools" / "MSVC" / vc_tools_version + bin_path = tools_path / "bin" / "HostX64" / "x64" + lib_path = tools_path / "lib" / "x64" + include_path = tools_path / "include" + + PATH = [bin_path] + LIB = [lib_path] + INCLUDE = [include_path] + + ucrt = Path(env["UCRTContentRoot"]) + ucrt_version = env.get("Version_Number") + + vc_exe_paths = [bin_path / exe for exe in VC_EXE_NAMES] + + if ucrt_version: + ucrt_bin_path = ucrt / "bin" / ucrt_version / "x64" + PATH.append(ucrt_bin_path) + LIB.append(ucrt / "lib" / ucrt_version / "ucrt" / "x64") + INCLUDE.append(ucrt / "include" / ucrt_version / "ucrt") + + ucrt_exe_paths = [ucrt_bin_path / exe for exe in UCRT_EXE_NAMES] + ucrt_exe_paths = [exe if exe.exists() else None for exe in ucrt_exe_paths] + else: + ucrt_exe_paths = [None for exe in UCRT_EXE_NAMES] + + sdk = Path(env["WindowsSdkDir"]) + sdk_version = ucrt_version + if sdk_version: + PATH.append(sdk / "bin" / "x64") + LIB.append(sdk / "lib" / sdk_version / "um" / "x64") + INCLUDE.append(sdk / "include" / sdk_version / "um") + INCLUDE.append(sdk / "include" / sdk_version / "cppwinrt") + INCLUDE.append(sdk / "include" / sdk_version / "winrt") + INCLUDE.append(sdk / "include" / sdk_version / "shared") + + return [ + Tool(exe=bin_path / exe, LIB=LIB, PATH=PATH, INCLUDE=INCLUDE) + for exe in vc_exe_paths + ucrt_exe_paths + ] + + def main(): parser = argparse.ArgumentParser() parser.add_argument("--cl", type=argparse.FileType("w"), required=True) + parser.add_argument("--cvtres", type=argparse.FileType("w"), required=True) parser.add_argument("--lib", type=argparse.FileType("w"), required=True) parser.add_argument("--ml64", type=argparse.FileType("w"), required=True) parser.add_argument("--link", type=argparse.FileType("w"), required=True) + parser.add_argument("--rc", type=argparse.FileType("w"), required=True) output = OutputJsonFiles(**vars(parser.parse_args())) # If vcvars has been run, it puts these tools onto $PATH. if "VCINSTALLDIR" in os.environ: - cl_exe = find_in_path("cl.exe") - lib_exe = find_in_path("lib.exe") - ml64_exe = find_in_path("ml64.exe") - link_exe = find_in_path("link.exe") + cl_exe, cvtres_exe, lib_exe, ml64_exe, link_exe = ( + find_in_path(exe) for exe in VC_EXE_NAMES + ) + rc_exe = find_in_path("rc.exe", is_optional=True) + elif "EWDKDIR" in os.environ: + cl_exe, cvtres_exe, lib_exe, ml64_exe, link_exe, rc_exe = find_with_ewdk( + Path(os.environ["EWDKDIR"]) + ) else: - cl_exe, lib_exe, ml64_exe, link_exe = find_with_vswhere_exe() + cl_exe, cvtres_exe, lib_exe, ml64_exe, link_exe, rc_exe = ( + find_with_vswhere_exe() + ) write_tool_json(output.cl, cl_exe) + write_tool_json(output.cvtres, cvtres_exe) write_tool_json(output.lib, lib_exe) write_tool_json(output.ml64, ml64_exe) write_tool_json(output.link, link_exe) + write_tool_json(output.rc, rc_exe) if __name__ == "__main__": diff --git a/prelude/toolchains/python.bzl b/prelude/toolchains/python.bzl index e71fb2bc9705f..16400385b0d4d 100644 --- a/prelude/toolchains/python.bzl +++ b/prelude/toolchains/python.bzl @@ -20,8 +20,7 @@ load( ) _INTERPRETER = select({ - "config//os:linux": "python3", - "config//os:macos": "python3", + "DEFAULT": "python3", "config//os:windows": "python", }) @@ -58,7 +57,10 @@ def _system_python_toolchain_impl(ctx): return [ DefaultInfo(), PythonToolchainInfo( + binary_linker_flags = ctx.attrs.binary_linker_flags, + linker_flags = ctx.attrs.linker_flags, fail_with_message = ctx.attrs.fail_with_message[RunInfo], + generate_static_extension_info = ctx.attrs.generate_static_extension_info, make_source_db = ctx.attrs.make_source_db[RunInfo], make_source_db_no_deps = ctx.attrs.make_source_db_no_deps[RunInfo], host_interpreter = RunInfo(args = [ctx.attrs.interpreter]), @@ -67,6 +69,7 @@ def _system_python_toolchain_impl(ctx): make_py_package_inplace = ctx.attrs.make_py_package_inplace[RunInfo], compile = RunInfo(args = ["echo", "COMPILEINFO"]), package_style = "inplace", + pex_extension = ctx.attrs.pex_extension, native_link_strategy = "separate", runtime_library = ctx.attrs.runtime_library, ), @@ -76,12 +79,16 @@ def _system_python_toolchain_impl(ctx): system_python_toolchain = rule( impl = _system_python_toolchain_impl, attrs = { + "binary_linker_flags": attrs.default_only(attrs.list(attrs.arg(), default = [])), "fail_with_message": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//python/tools:fail_with_message")), + "generate_static_extension_info": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//python/tools:generate_static_extension_info")), "interpreter": attrs.string(default = _INTERPRETER), + "linker_flags": attrs.default_only(attrs.list(attrs.arg(), default = [])), "make_py_package_inplace": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//python/tools:make_py_package_inplace")), "make_py_package_modules": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//python/tools:make_py_package_modules")), "make_source_db": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//python/tools:make_source_db")), "make_source_db_no_deps": attrs.default_only(attrs.dep(providers = [RunInfo], default = "prelude//python/tools:make_source_db_no_deps")), + "pex_extension": attrs.string(default = ".pex"), "runtime_library": attrs.default_only(attrs.dep(providers = [ArtifactGroupInfo], default = "prelude//python/runtime:bootstrap_files")), }, is_toolchain_rule = True, diff --git a/prelude/toolchains/remote_test_execution.bzl b/prelude/toolchains/remote_test_execution.bzl new file mode 100644 index 0000000000000..39479f0623c5f --- /dev/null +++ b/prelude/toolchains/remote_test_execution.bzl @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//decls:re_test_common.bzl", "re_test_common") +load("@prelude//tests:remote_test_execution_toolchain.bzl", "RemoteTestExecutionToolchainInfo") +load("@prelude//utils:utils.bzl", "map_val") + +def _impl(ctx: AnalysisContext) -> list[Provider]: + default_profile = map_val(ctx.attrs.profiles.get, ctx.attrs.default_profile) + if ctx.attrs.default_run_as_bundle != None: + default_run_as_bundle = ctx.attrs.default_run_as_bundle + else: + default_run_as_bundle = bool(default_profile) + + return [ + DefaultInfo(), + RemoteTestExecutionToolchainInfo( + default_profile = default_profile, + default_run_as_bundle = default_run_as_bundle, + profiles = ctx.attrs.profiles, + ), + ] + +remote_test_execution_toolchain = rule( + impl = _impl, + is_toolchain_rule = True, + attrs = { + "default_profile": attrs.option(attrs.string(), default = None), + "default_run_as_bundle": attrs.option(attrs.bool(), default = None), + "profiles": attrs.dict( + key = attrs.string(), + value = attrs.option(re_test_common.opts_for_tests_arg()), + default = {}, + ), + }, +) diff --git a/prelude/toolchains/rust.bzl b/prelude/toolchains/rust.bzl index 21ee3d32fd469..3860f821313e0 100644 --- a/prelude/toolchains/rust.bzl +++ b/prelude/toolchains/rust.bzl @@ -5,7 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//rust:rust_toolchain.bzl", "RustToolchainInfo") +load("@prelude//rust:rust_toolchain.bzl", "PanicRuntime", "RustToolchainInfo") load("@prelude//rust/tools:attrs.bzl", "internal_tool_attrs") _DEFAULT_TRIPLE = select({ @@ -42,24 +42,22 @@ def _system_rust_toolchain_impl(ctx): clippy_driver = RunInfo(args = ["clippy-driver"]), clippy_toml = ctx.attrs.clippy_toml[DefaultInfo].default_outputs[0] if ctx.attrs.clippy_toml else None, compiler = RunInfo(args = ["rustc"]), - compiler_standalone = RunInfo(args = ["rustc"]), - concat_tool = ctx.attrs.concat_tool[RunInfo], default_edition = ctx.attrs.default_edition, + panic_runtime = PanicRuntime("unwind"), deny_lints = ctx.attrs.deny_lints, doctests = ctx.attrs.doctests, - extern_html_root_url_prefix = ctx.attrs.extern_html_root_url_prefix, failure_filter_action = ctx.attrs.failure_filter_action[RunInfo], - pipelined = ctx.attrs.pipelined, + nightly_features = ctx.attrs.nightly_features, report_unused_deps = ctx.attrs.report_unused_deps, rustc_action = ctx.attrs.rustc_action[RunInfo], rustc_binary_flags = ctx.attrs.rustc_binary_flags, - rustc_check_flags = ctx.attrs.rustc_check_flags, rustc_flags = ctx.attrs.rustc_flags, rustc_target_triple = ctx.attrs.rustc_target_triple, rustc_test_flags = ctx.attrs.rustc_test_flags, rustdoc = RunInfo(args = ["rustdoc"]), rustdoc_flags = ctx.attrs.rustdoc_flags, rustdoc_test_with_resources = ctx.attrs.rustdoc_test_with_resources[RunInfo], + rustdoc_coverage = ctx.attrs.rustdoc_coverage[RunInfo], transitive_dependency_symlinks_tool = ctx.attrs.transitive_dependency_symlinks_tool[RunInfo], warn_lints = ctx.attrs.warn_lints, ), @@ -73,11 +71,9 @@ system_rust_toolchain = rule( "default_edition": attrs.option(attrs.string(), default = None), "deny_lints": attrs.list(attrs.string(), default = []), "doctests": attrs.bool(default = False), - "extern_html_root_url_prefix": attrs.option(attrs.string(), default = None), - "pipelined": attrs.bool(default = False), + "nightly_features": attrs.bool(default = False), "report_unused_deps": attrs.bool(default = False), "rustc_binary_flags": attrs.list(attrs.string(), default = []), - "rustc_check_flags": attrs.list(attrs.string(), default = []), "rustc_flags": attrs.list(attrs.string(), default = []), "rustc_target_triple": attrs.string(default = _DEFAULT_TRIPLE), "rustc_test_flags": attrs.list(attrs.string(), default = []), diff --git a/prelude/tools/audit_providers_universe.bxl b/prelude/tools/audit_providers_universe.bxl new file mode 100644 index 0000000000000..6916ed7a32542 --- /dev/null +++ b/prelude/tools/audit_providers_universe.bxl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + ts = ctx.target_universe(ctx.cli_args.universe).lookup(ctx.cli_args.target) + ctx.output.print(pstr({t: a.providers() for t, a in ctx.analysis(ts).items()})) + +run = bxl_main( + impl = _impl, + cli_args = { + "target": cli_args.target_label(), + "universe": cli_args.target_label(), + }, +) diff --git a/prelude/transitions/constraint_overrides.bzl b/prelude/transitions/constraint_overrides.bzl index 56067f10179a3..6adadb64f36ae 100644 --- a/prelude/transitions/constraint_overrides.bzl +++ b/prelude/transitions/constraint_overrides.bzl @@ -5,59 +5,108 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @oss-disable: load("@prelude//meta_only:product_constraints.bzl", _PRODUCT_CONSTRAINTS = "constraints") -# @oss-disable: load("@prelude//meta_only:third_party_version_constraints.bzl", _VERSION_CONSTRAINTS = "constraints") - -# @oss-disable: _CONSTRAINTS = _PRODUCT_CONSTRAINTS + _VERSION_CONSTRAINTS -_CONSTRAINTS = [] # @oss-enable - -# Apparently, `==` doesn't do value comparison for `ConstraintValueInfo`, so -# impl a hacky eq impl to workaround. -def _constr_eq(a, b): - return a.label == b.label - -def _constraint_overrides_transition_impl( - platform: PlatformInfo, - refs: struct, - attrs: struct) -> PlatformInfo: - # Extract actual constraint value objects. - new_constraints = [ - getattr(refs, constraint)[ConstraintValueInfo] - for constraint in attrs.constraint_overrides - ] - - # Filter out new constraints which are already a part of the platform. - new_constraints = [ - constraint - for constraint in new_constraints - if ( - constraint.setting.label not in platform.configuration.constraints or - not _constr_eq(constraint, platform.configuration.constraints[constraint.setting.label]) - ) - ] - - # Nothing to do. - if not new_constraints: - return platform - - # Generate new constraints. - constraints = {} - constraints.update(platform.configuration.constraints) - for constraint in new_constraints: - constraints[constraint.setting.label] = constraint - - return PlatformInfo( +# NOTE: Currently, constraints can't be propagated via rule attrs and so need to be +# hard-coded here. We use a read_config to avoid hard-coding these repo-specific +# constraints into the prelude. + +def _platform_overrides() -> list[str]: + config = read_root_config("buck2", "platform_overrides", "") + return [override.strip() for override in config.split(",") if override.strip()] + +def _constraint_overrides() -> list[str]: + overrides = read_root_config("buck2", "constraint_overrides", "") + return [override.strip() for override in overrides.split(",") if override.strip()] + +def _constraint_passthroughs() -> list[str]: + passthroughs = read_root_config("buck2", "constraint_passthroughs", "") + return [passthrough.strip() for passthrough in passthroughs.split(",") if passthrough.strip()] + +_PLATFORM_OVERRIDES = _platform_overrides() +_CONSTRAINT_OVERRIDES = _constraint_overrides() +_CONSTRAINT_PASSTHROUGHS = _constraint_passthroughs() + +def _apply( + old_platform: PlatformInfo, + *, + platform: PlatformInfo | None = None, + constraints: list[ConstraintValueInfo] = []) -> PlatformInfo: + # Store passthrough constraint values. + passthrough_constraints = [] + for constraint in _CONSTRAINT_PASSTHROUGHS: + if constraint in old_platform.configuration.constraints: + passthrough_constraints.append( + old_platform.configuration.constraints[constraint], + ) + + # Switch target platform. + platform = platform or old_platform + + # Add passthrough constraint values and apply constraint value overrides. + new_constraints = { + label: constraint + for label, constraint in platform.configuration.constraints.items() + } + for constraint in passthrough_constraints: + new_constraints[constraint.setting.label] = constraint + for constraint in constraints: + new_constraints[constraint.setting.label] = constraint + + new_platform = PlatformInfo( label = platform.label, configuration = ConfigurationInfo( - constraints = constraints, + constraints = new_constraints, values = platform.configuration.values, ), ) -constraint_overrides_transition = transition( - impl = _constraint_overrides_transition_impl, - refs = {constraint: constraint for constraint in _CONSTRAINTS}, - attrs = [ - "constraint_overrides", - ], + return new_platform + +def _impl(platform: PlatformInfo, refs: struct, attrs: struct) -> PlatformInfo: + # Resolve target platform override. + override = None + if hasattr(attrs, "platform_override"): + override = getattr(attrs, "platform_override") + platform_override = None + if override: + if not hasattr(refs, override): + fail("Target platform override not supported: {override}".format( + override = override, + )) + ref = getattr(refs, override) + platform_override = ref[PlatformInfo] + + # Resolve constraint value overrides. + overrides = [] + if hasattr(attrs, "constraint_overrides"): + overrides = getattr(attrs, "constraint_overrides", []) + constraint_overrides = [] + for override in overrides: + if not hasattr(refs, override): + fail("Constraint value override not supported: {override}".format( + override = override, + )) + ref = getattr(refs, override) + constraint_overrides.append(ref[ConstraintValueInfo]) + + return _apply( + platform, + platform = platform_override, + constraints = constraint_overrides, + ) + +_attributes = { + "constraint_overrides": attrs.list(attrs.string(), default = []), + "platform_override": attrs.option(attrs.string(), default = None), +} + +_transition = transition( + impl = _impl, + attrs = _attributes.keys(), + refs = {override: override for override in _PLATFORM_OVERRIDES + _CONSTRAINT_OVERRIDES}, +) + +constraint_overrides = struct( + apply = _apply, + transition = _transition, + attributes = _attributes, ) diff --git a/prelude/transitions/utils.bzl b/prelude/transitions/utils.bzl new file mode 100644 index 0000000000000..5899fd1ad8a43 --- /dev/null +++ b/prelude/transitions/utils.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def filtered_platform_constraints(platform: PlatformInfo, constraint_settings_labels_to_remove: list[TargetLabel]) -> dict[TargetLabel, ConstraintValueInfo]: + return { + constraint_setting_label: constraint_setting_value + for (constraint_setting_label, constraint_setting_value) in platform.configuration.constraints.items() + if constraint_setting_label not in constraint_settings_labels_to_remove + } + +def get_constraint_value(platform: PlatformInfo, constraint: ConstraintSettingInfo) -> [None, ConstraintValueInfo]: + return platform.configuration.constraints.get(constraint.label) + +utils = { + "filtered_platform_constraints": filtered_platform_constraints, + "get_constraint_value": get_constraint_value, +} diff --git a/prelude/unix/providers.bzl b/prelude/unix/providers.bzl new file mode 100644 index 0000000000000..5378cc2f218c1 --- /dev/null +++ b/prelude/unix/providers.bzl @@ -0,0 +1,60 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:artifacts.bzl", "ArtifactExt") +load("@prelude//linking:shared_libraries.bzl", "SharedLibraries") +load("@prelude//python:manifest.bzl", "ManifestInfo") +load("@prelude//python:python.bzl", "PythonLibraryInfo") + +# Provider representing components that can be added to a "unix" env (e.g. +# binaries in `bin/`, native libs in `lib/`, and Python modules under +# `lib/python*/site-packages`). +UnixEnv = record( + label = field(Label), + # Third-party builds to install into the env (non-transitive). + third_party_builds = field(list[ArtifactExt], []), + # Python libraries to install (non-transitive). + python_libs = field(list[PythonLibraryInfo], []), + # Native libs to install (non-transitive). + native_libs = field(list[SharedLibraries], []), + # Binaries to install. + binaries = field(list[ManifestInfo], []), + # Raw paths to install. + paths = field(list[(str, ArtifactExt)], []), + patterns = field(list[(str, ArtifactExt, str)], []), +) + +UnixEnvTSet = transitive_set() + +UnixEnvInfo = provider( + fields = dict( + _tset = provider_field(UnixEnvTSet), + ), +) + +def create_unix_env_info( + actions: AnalysisActions, + env: UnixEnv | None = None, + children: list[UnixEnvInfo] = [], + deps: list[Dependency] = []) -> UnixEnvInfo: + all_children = [] + for child in children: + all_children.append(child._tset) + for dep in deps: + child = dep.get(UnixEnvInfo) + if child != None: + all_children.append(child._tset) + kwargs = {} + if env != None: + kwargs["value"] = env + kwargs["children"] = all_children + return UnixEnvInfo( + _tset = actions.tset( + UnixEnvTSet, + **kwargs + ), + ) diff --git a/prelude/user/all.bzl b/prelude/user/all.bzl index 2fd2dbb095822..d00eb145deacc 100644 --- a/prelude/user/all.bzl +++ b/prelude/user/all.bzl @@ -5,14 +5,21 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +load("@prelude//android/user:android_emulators.bzl", _android_emulators_spec = "registration_spec") +load("@prelude//apple:apple_resource_dedupe_alias.bzl", _apple_resource_dedupe_alias_spec = "registration_spec") +load("@prelude//apple:apple_static_archive.bzl", _apple_static_archive_spec = "registration_spec") +load("@prelude//apple/mockingbird:mockingbird_mock.bzl", _mockingbird_mock_spec = "registration_spec") +load("@prelude//apple/user:apple_ipa_package.bzl", _apple_ipa_package_spec = "registration_spec") +load("@prelude//apple/user:apple_macos_bundle.bzl", _apple_macos_bundle_spec = "registration_spec") load("@prelude//apple/user:apple_resource_bundle.bzl", _apple_resource_bundle_spec = "registration_spec") load("@prelude//apple/user:apple_selective_debugging.bzl", _apple_selective_debugging_spec = "registration_spec") load("@prelude//apple/user:apple_simulators.bzl", _apple_simulators_spec = "registration_spec") load("@prelude//apple/user:apple_toolchain_override.bzl", _apple_toolchain_override_spec = "registration_spec") load("@prelude//apple/user:apple_tools.bzl", _apple_tools_spec = "registration_spec") load("@prelude//apple/user:apple_watchos_bundle.bzl", _apple_watchos_bundle_spec = "registration_spec") +load("@prelude//apple/user:apple_xcframework.bzl", _apple_xcframework_spec = "registration_spec") load("@prelude//apple/user:resource_group_map.bzl", _resource_group_map_spec = "registration_spec") -load("@prelude//cxx/user:cxx_toolchain_override.bzl", _cxx_toolchain_override_inheriting_target_platform_spec = "cxx_toolchain_override_inheriting_target_platform_registration_spec", _cxx_toolchain_override_spec = "cxx_toolchain_override_registration_spec") +load("@prelude//cxx/user:cxx_toolchain_override.bzl", _cxx_toolchain_override_spec = "cxx_toolchain_override_registration_spec") load("@prelude//cxx/user:link_group_map.bzl", _link_group_map_spec = "registration_spec") load(":cxx_headers_bundle.bzl", _cxx_headers_bundle_spec = "registration_spec") load(":extract_archive.bzl", _extract_archive_spec = "registration_spec") @@ -20,18 +27,24 @@ load(":write_file.bzl", _write_file_spec = "registration_spec") _all_specs = [ _extract_archive_spec, + _android_emulators_spec, _apple_tools_spec, _apple_selective_debugging_spec, + _apple_static_archive_spec, _apple_resource_bundle_spec, + _apple_resource_dedupe_alias_spec, + _apple_xcframework_spec, + _apple_ipa_package_spec, _link_group_map_spec, _resource_group_map_spec, _apple_watchos_bundle_spec, + _apple_macos_bundle_spec, _apple_toolchain_override_spec, _cxx_headers_bundle_spec, _cxx_toolchain_override_spec, - _cxx_toolchain_override_inheriting_target_platform_spec, _apple_simulators_spec, _write_file_spec, + _mockingbird_mock_spec, ] rules = { diff --git a/prelude/user/cxx_headers_bundle.bzl b/prelude/user/cxx_headers_bundle.bzl index 0aaaff6369ff8..9c3e771984cbb 100644 --- a/prelude/user/cxx_headers_bundle.bzl +++ b/prelude/user/cxx_headers_bundle.bzl @@ -8,7 +8,7 @@ load("@prelude//:artifacts.bzl", "ArtifactGroupInfo") load("@prelude//:paths.bzl", "paths") load("@prelude//cxx:preprocessor.bzl", "CPreprocessorInfo", "cxx_merge_cpreprocessors") -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") load(":rule_spec.bzl", "RuleRegistrationSpec") def _headers(ctx: AnalysisContext, deps: list[Dependency]) -> dict[str, Artifact]: diff --git a/prelude/user/extract_archive.bzl b/prelude/user/extract_archive.bzl index a7c76731e6c72..a65107e05d099 100644 --- a/prelude/user/extract_archive.bzl +++ b/prelude/user/extract_archive.bzl @@ -20,13 +20,15 @@ def _impl(ctx: AnalysisContext) -> list[Provider]: [ cmd_args(output, format = "mkdir -p {}"), cmd_args(output, format = "cd {}"), - cmd_args(archive, format = "tar -xzf {}").relative_to(output), + cmd_args(archive, format = "tar -xzf {}", relative_to = output), ], is_executable = True, allow_args = True, ) - ctx.actions.run(cmd_args(["/bin/sh", script]) - .hidden([archive, output.as_output()]), category = "extract_archive") + ctx.actions.run( + cmd_args(["/bin/sh", script], hidden = [archive, output.as_output()]), + category = "extract_archive", + ) return [DefaultInfo(default_output = output)] diff --git a/prelude/user/rule_spec.bzl b/prelude/user/rule_spec.bzl index 426b57d3f7ef9..ad8b45113d2da 100644 --- a/prelude/user/rule_spec.bzl +++ b/prelude/user/rule_spec.bzl @@ -9,7 +9,8 @@ RuleRegistrationSpec = record( name = field(str), impl = field(typing.Callable), attrs = field(dict[str, Attr]), - cfg = field([None, "transition"], None), + # TODO(nga): should be `transition | None`, but `transition` does not work as type. + cfg = field(typing.Any | None, None), is_toolchain_rule = field(bool, False), doc = field(str, ""), ) diff --git a/prelude/utils/argfile.bzl b/prelude/utils/argfile.bzl new file mode 100644 index 0000000000000..6512a8ed95f7f --- /dev/null +++ b/prelude/utils/argfile.bzl @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Create an argument file. +# Return `cmd_args` which is single string containing `@path/to/argfile`. +# Returned `cmd_args` contains given files as hidden artifacts. +def at_argfile( + *, + # ctx.actions + actions, + # name of the argument file + name: str | Artifact, + # the arguments to write to the argument file + args, + # pass to `ctx.actions.write` + allow_args: bool = False) -> cmd_args: + if allow_args: + args_file, _ = actions.write(name, args, allow_args = True, with_inputs = True) + else: + args_file = actions.write(name, args, with_inputs = True) + return cmd_args(args_file, format = "@{}", hidden = args) + +# Write arguments to a file, and return the file path as `cmd_args` +# with args attached as hidden artifacts. +def argfile( + *, + # ctx.actions + actions, + # name of the argument file + name: str | Artifact, + # the arguments to write to the argument file + args, + # pass to `ctx.actions.write` + allow_args: bool = False) -> cmd_args: + if allow_args: + args_file, _ = actions.write(name, args, allow_args = True, with_inputs = True) + else: + args_file = actions.write(name, args, with_inputs = True) + return cmd_args(args_file, hidden = args) diff --git a/prelude/utils/buckconfig.bzl b/prelude/utils/buckconfig.bzl new file mode 100644 index 0000000000000..8d747bd09d465 --- /dev/null +++ b/prelude/utils/buckconfig.bzl @@ -0,0 +1,191 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +"""Provides macros for working with .buckconfig.""" + +load(":expect.bzl", "expect") +load(":lazy.bzl", "lazy") + +def _decode_raw_word(val, start, delimiter = None): + """ + Read characters up to the given delimiter in a string supporting and + stripping quoting (i.e. `"`) and escape characters (i.e. `\\`). + + Args: + val: Input string to evaluate. + start: Where to start in the input string. + delimiter: An optional character to terminate at. If omitted will + continue to the end of the string. + """ + + quotes = ['"', "'"] + word = "" + current_quote_char = None + escaped = False + idx = -1 + + for idx in range(start, len(val)): + c = val[idx] + + if current_quote_char == None and c == delimiter: + break + + if current_quote_char == None and c in quotes: # quote start + current_quote_char = c + elif c == current_quote_char and not escaped: # quote end + current_quote_char = None + elif c == "\\" and not escaped: # handle escape char + expect( + current_quote_char != None, + "escape char outside of quotes at char %d in: %s" % (idx + 1, val), + ) + escaped = True + else: + word += c + escaped = False + + expect(current_quote_char == None, "quote not closed in: %s" % val) + + return idx, word + +def _next_word(val, start, delimiter): + """ + Advance past delimiter characters. + """ + + for idx in range(start, len(val)): + c = val[idx] + if c != delimiter: + return idx + + return -1 + +def read(section, field, default = None, root_cell = False): + """Read a `string` from `.buckconfig`.""" + + read_config_func = read_root_config if root_cell else read_config + return read_config_func(section, field, default) + +# Alias for `read` that's explicit about the type being returned. +read_string = read + +def read_choice(section, field, choices, default = None, required = True, root_cell = False): + """Read a string from `.buckconfig` that must be one `choices`.""" + + val = read(section, field, root_cell = root_cell) + if val != None: + if val in choices: + return val + else: + fail( + "`{}:{}`: must be one of ({}), but was {}".format(section, field, ", ".join(choices), repr(val)), + ) + elif default != None: + return default + elif not required: + return None + else: + fail("`{}:{}`: no value set".format(section, field)) + +def read_bool(section, field, default = None, required = True, root_cell = False): + """Read a `boolean` from `.buckconfig`.""" + + # Treat the empty string as "unset". This allows the user to "override" a + # previous setting by "clearing" it out. + val = read(section, field, root_cell = root_cell) + if val != None and val != "": + # Fast-path string check + if val == "True" or val == "true": + return True + elif val == "False" or val == "false": + return False + + # Else fall back to lower casing + if val.lower() == "true": + return True + elif val.lower() == "false": + return False + else: + fail( + "`{}:{}`: cannot coerce {!r} to bool".format(section, field, val), + ) + elif default != None: + return default + elif not required: + return None + else: + fail("`{}:{}`: no value set".format(section, field)) + +def read_int(section, field, default = None, required = True, root_cell = False): + """Read an `int` from `.buckconfig`.""" + + val = read(section, field, root_cell = root_cell) + if val != None: + if val.isdigit(): + return int(val) + else: + fail( + "`{}:{}`: cannot coerce {!r} to int".format(section, field, val), + ) + elif default != None: + return default + elif not required: + return None + else: + fail("`{}:{}`: no value set".format(section, field)) + +def read_list(section, field, delimiter = ",", default = None, required = True, root_cell = False): + """Read a `list` from `.buckconfig`.""" + val = read(section, field, root_cell = root_cell) + if val != None: + quotes = ["\\", '"', "'"] + if lazy.is_any(lambda x: x in val, quotes): + words = [] + idx = 0 + for _ in range(len(val)): + idx = _next_word(val, idx, delimiter) + if idx == -1: + break + idx, word = _decode_raw_word(val, idx, delimiter) + words.append(word.strip()) + if idx == -1 or idx >= len(val) - 1: + break + return words + else: + return [v.strip() for v in val.split(delimiter) if v] + elif default != None: + return default + elif not required: + return None + else: + fail("`{}:{}`: no value set".format(section, field)) + +def resolve_alias(alias): + """Resolves an alias into a target (recursively). `fail`s if the alias does + not exist. + + Args: + alias (str): The alias or target to resolve. + + Returns: + The target pointed to by the alias (or the input if the caller lied + to us and `alias` is a target) + """ + if "//" in alias: + return alias + + # Starlark doesn't have while loops or allow recursion, so if we want to + # resolve aliases that we find we need to iterate somehow + for _ in range(1000): + # TODO: set root_cell to true when all aliases come from root cell? + target = read("alias", alias, root_cell = False) + expect(target != None, "Alias {} does not exist".format(alias)) + if "//" in target: + return target + else: + alias = target + fail("This should never happen - either the alias exists or it doesn't") diff --git a/prelude/utils/build_target_pattern.bzl b/prelude/utils/build_target_pattern.bzl index 330e5abd9b168..5d9dd962978f3 100644 --- a/prelude/utils/build_target_pattern.bzl +++ b/prelude/utils/build_target_pattern.bzl @@ -5,9 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//utils:utils.bzl", "expect") - -_ROOT_SYMBOL = "//" +ROOT_SYMBOL = "//" _TARGET_SYMBOL = ":" _RECURSIVE_SYMBOL = "..." _PATH_SYMBOL = "/" @@ -25,13 +23,30 @@ BuildTargetPattern = record( name = field([str, None], None), matches = field(typing.Callable), as_string = field(typing.Callable), + + # Exists purely for optimisation purposes. + # Matching pattern inside a loop for many targets creates huge amount of + # unnecessary string allocations that we can avoid + _path_with_path_symbol = field(str), ) -def parse_build_target_pattern(pattern: str) -> BuildTargetPattern: - expect(len(pattern) >= len(_ROOT_SYMBOL) + 1, "Invalid build target pattern, pattern too short: {}".format(pattern)) +BuildTargetPatternParseResult = record( + build_target_pattern = field([BuildTargetPattern, None], None), + error = field([str, None], default = None), +) - root_position = pattern.find(_ROOT_SYMBOL) - expect(root_position >= 0, "Invalid build target pattern, pattern should started with `{}` or a cell name followed by `{}`: ".format(_ROOT_SYMBOL, _ROOT_SYMBOL, pattern)) +def try_parse_build_target_pattern(pattern: str) -> BuildTargetPatternParseResult: + """ + This function try to parse build target pattern. If parse fails, it will return the error message. + """ + if not (len(pattern) >= len(ROOT_SYMBOL) + 1): + err_msg = "Invalid build target pattern, pattern too short: {}".format(pattern) + return BuildTargetPatternParseResult(error = err_msg) + + root_position = pattern.find(ROOT_SYMBOL) + if not (root_position >= 0): + err_msg = "Invalid build target pattern, pattern should started with `{}` or a cell name followed by `{}`: ".format(ROOT_SYMBOL, ROOT_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) cell = None if root_position > 0: @@ -44,7 +59,9 @@ def parse_build_target_pattern(pattern: str) -> BuildTargetPattern: elif pattern.endswith(_RECURSIVE_SYMBOL): kind = _BuildTargetPatternKind("recursive") end_of_path_position = len(pattern) - len(_RECURSIVE_SYMBOL) - 1 - expect(pattern[end_of_path_position] == _PATH_SYMBOL, "Invalid build target pattern, `{}` should be preceded by a `{}`: {}".format(_RECURSIVE_SYMBOL, _PATH_SYMBOL, pattern)) + if not (pattern[end_of_path_position] == _PATH_SYMBOL): + err_msg = "Invalid build target pattern, `{}` should be preceded by a `{}`: {}".format(_RECURSIVE_SYMBOL, _PATH_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) else: kind = _BuildTargetPatternKind("single") end_of_path_position = pattern.rfind(_TARGET_SYMBOL) @@ -55,19 +72,30 @@ def parse_build_target_pattern(pattern: str) -> BuildTargetPattern: start_of_package = pattern.rfind(_PATH_SYMBOL) name = pattern[start_of_package + len(_PATH_SYMBOL):] elif end_of_path_position < root_position: - fail("Invalid build target pattern, cell name should not contain `{}`: {}".format(_PATH_SYMBOL, pattern)) + err_msg = "Invalid build target pattern, cell name should not contain `{}`: {}".format(_PATH_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) else: name = pattern[end_of_path_position + len(_TARGET_SYMBOL):] - start_of_path_position = root_position + len(_ROOT_SYMBOL) + start_of_path_position = root_position + len(ROOT_SYMBOL) - expect(pattern[start_of_path_position] != _PATH_SYMBOL, "Invalid build target pattern, path cannot start with `{}`: {}".format(_PATH_SYMBOL, pattern)) + if not (pattern[start_of_path_position] != _PATH_SYMBOL): + err_msg = "Invalid build target pattern, path cannot start with `{}`: {}".format(_PATH_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) path = pattern[start_of_path_position:end_of_path_position] - expect(path.find(_ROOT_SYMBOL) < 0, "Invalid build target pattern, `{}` can only appear once: {}".format(_ROOT_SYMBOL, pattern)) - expect(path.find(_RECURSIVE_SYMBOL) < 0, "Invalid build target pattern, `{}` can only appear once: {}".format(_RECURSIVE_SYMBOL, pattern)) - expect(path.find(_TARGET_SYMBOL) < 0, "Invalid build target pattern, `{}` can only appear once: {}".format(_TARGET_SYMBOL, pattern)) - expect(len(path) == 0 or path[-1:] != _PATH_SYMBOL, "Invalid build target pattern, path cannot end with `{}`: {}".format(_PATH_SYMBOL, pattern)) + if not (path.find(ROOT_SYMBOL) < 0): + err_msg = "Invalid build target pattern, `{}` can only appear once: {}".format(ROOT_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) + if not (path.find(_RECURSIVE_SYMBOL) < 0): + err_msg = "Invalid build target pattern, `{}` can only appear once: {}".format(_RECURSIVE_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) + if not (path.find(_TARGET_SYMBOL) < 0): + err_msg = "Invalid build target pattern, `{}` can only appear once: {}".format(_TARGET_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) + if not (len(path) == 0 or path[-1:] != _PATH_SYMBOL): + err_msg = "Invalid build target pattern, path cannot end with `{}`: {}".format(_PATH_SYMBOL, pattern) + return BuildTargetPatternParseResult(error = err_msg) # buildifier: disable=uninitialized - self is initialized def matches(label: [Label, TargetLabel]) -> bool: @@ -85,7 +113,7 @@ def parse_build_target_pattern(pattern: str) -> BuildTargetPattern: return True elif len(label.package) > path_pattern_length: # pattern cell//package/... matches label cell//package/subpackage:target - return label.package.startswith(self.path + _PATH_SYMBOL) + return label.package.startswith(self._path_with_path_symbol) else: return self.path == label.package else: @@ -99,10 +127,16 @@ def parse_build_target_pattern(pattern: str) -> BuildTargetPattern: elif self.kind == _BuildTargetPatternKind("package"): return "{}//{}:".format(normalized_cell, self.path) elif self.kind == _BuildTargetPatternKind("recursive"): - return "{}//{}...".format(normalized_cell, self.path + _PATH_SYMBOL if self.path else "") + return "{}//{}...".format(normalized_cell, self._path_with_path_symbol) else: fail("Unknown build target pattern kind.") - self = BuildTargetPattern(kind = kind, cell = cell, path = path, name = name, matches = matches, as_string = as_string) + self = BuildTargetPattern(kind = kind, cell = cell, path = path, name = name, matches = matches, as_string = as_string, _path_with_path_symbol = path + _PATH_SYMBOL if path else "") + + return BuildTargetPatternParseResult(build_target_pattern = self) - return self +def parse_build_target_pattern(pattern: str) -> BuildTargetPattern: + parse_res = try_parse_build_target_pattern(pattern) + if parse_res.error != None: + fail(parse_res.error) + return parse_res.build_target_pattern diff --git a/prelude/utils/clear_platform.bzl b/prelude/utils/clear_platform.bzl new file mode 100644 index 0000000000000..6e2c9dda56ccd --- /dev/null +++ b/prelude/utils/clear_platform.bzl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# We don't want e.g. Apple simulator and Android emulator targets to be configured differently and handled as a different resource broker by buck2 core. +# By clearing the platform we make sure there is only a single configured target for each resource broker which manages resources of certain type. +def _transition_impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: + # buildifier: disable=unused-variable + _ = (platform, refs) + return PlatformInfo( + label = "empty_platform", + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ) + +clear_platform_transition = transition( + impl = _transition_impl, + refs = {}, +) diff --git a/prelude/utils/cmd_script.bzl b/prelude/utils/cmd_script.bzl index 3d8cd25a4bf77..7a6c23ea890f6 100644 --- a/prelude/utils/cmd_script.bzl +++ b/prelude/utils/cmd_script.bzl @@ -52,4 +52,4 @@ def cmd_script( else: fail(os) - return cmd_args(wrapper).hidden(cmd) + return cmd_args(wrapper, hidden = cmd) diff --git a/prelude/utils/dicts.bzl b/prelude/utils/dicts.bzl index 365ef2eda0e44..f5dcab45d014e 100644 --- a/prelude/utils/dicts.bzl +++ b/prelude/utils/dicts.bzl @@ -5,10 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load( - "@prelude//utils:utils.bzl", - "expect", -) +load("@prelude//utils:expect.bzl", "expect") _DEFAULT_FMT = "found different values for key \"{0}\": {} != {}" diff --git a/prelude/utils/directory_fold.bzl b/prelude/utils/directory_fold.bzl new file mode 100644 index 0000000000000..fbf2b31e48718 --- /dev/null +++ b/prelude/utils/directory_fold.bzl @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Given a list of files, return a tree structure with the shape: +# +# type Tree = dict[component, None | Tree] +# +# Where None indicates a file, and a Tree indicates a directory. +def _build_tree(files): + tree = {} + for file in files: + map = tree + + # For every python file, starting from distillery, figure out every subdirectory and add it to the map if it's not there already + components = file.split("/") + for directory_chunk in components[:-1]: + map = map.setdefault(directory_chunk, {}) + map[components[-1]] = None + + return tree + +def _reduce_tree(path, tree, directory): + files = [] + dirs = [] + for k, v in tree.items(): + path2 = path + ("/" if path else "") + k + if v == None: + files.append(path2) + else: + dirs.append(_reduce_tree(path2, v, directory)) + return directory(path, dirs, files) + +# Given a list of files, perform a reduction on the tree structure. +# The `directory` argument is a function that takes a path, a list of subdirectory results, and a list of files. +# For example, given the paths `foo/bar.txt` and `foo/baz.txt` it would be called thusly: +# +# directory("", [directory("foo", [], ["foo/bar.txt", "foo/baz.txt"])], []) +def directory_fold(files, directory): + return _reduce_tree("", _build_tree(files), directory) + +def _test_tree_functions(): + input = ["foo/bar/baz.txt", "foo/bar.txt", "foo.txt", "foo/bar/quux.txt", "foo/baz/quux.txt"] + output = { + "foo": { + "bar": {"baz.txt": None, "quux.txt": None}, + "bar.txt": None, + "baz": {"quux.txt": None}, + }, + "foo.txt": None, + } + result = _build_tree(input) + if result != output: + fail("_build_tree(), unexpected output. Wanted `{output}`, got `{tree}`".format(output = output, result = result)) + + original = directory_fold(input, lambda _name, dirs, files: files + [x for xs in dirs for x in xs]) + if sorted(original) != sorted(input): + fail("_directory_fold(), unexpected output. Wanted `{input}`, got `{original}`".format(input = input, original = original)) + +_test_tree_functions() diff --git a/prelude/utils/expect.bzl b/prelude/utils/expect.bzl new file mode 100644 index 0000000000000..889742519e3fe --- /dev/null +++ b/prelude/utils/expect.bzl @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +"""Provides macros for stating program invariant expectations. + +It is a good practice to enforce program invariants in code, so that all +assumptions are explicit and execution fails fast and with clear message in +case any one of them is violated. +""" + +load( + "@prelude//utils:type_defs.bzl", + "is_bool", + "is_collection", + "is_dict", + "is_list", + "is_number", + "is_string", + "is_struct", +) + +def expect(condition: typing.Any, message: str = "condition not expected", *format_args): + """Fails if provided condition is not truthy. + + Args: + condition: condition that is expected to be truthy + message: an optional error message to display in case provided condition + is not truthy + format_args: optional arguments to format the error message with + """ + if not condition: + formatted_message = message.format(*format_args) + fail(formatted_message) + +def expect_equal(left: typing.Any, right: typing.Any, message: str | None = None, *format_args): + if left != right: + if message == None: + msg = "Expected values to be equal, but got '{}' and '{}' instead.".format(left, right) + fail(msg) + else: + formatted_message = message.format(*format_args) + fail(formatted_message) + +def expect_non_none(val, msg: str = "unexpected none", *fmt_args, **fmt_kwargs): + """ + Require the given value not be `None`. + """ + if val == None: + fail(msg.format(*fmt_args, **fmt_kwargs)) + return val + +def expect_type(name: str, check: typing.Callable[[typing.Any], bool], desc: str, val: typing.Any): + """Fails if check(val) if not truthy. name, desc are used for the error message. + + Usually you shouldn't need to directly use this, and prefer the expect_* family of functions + defined in the same file. + + Args: + name: the name of the attribute we're checking + check: a function implementing an invariant check + desc: a description of what we expected to see + val: the value we're checking + + See the below functions, like expect_string, for usage examples. + """ + expect(check(val), 'Buck target requires "{}" to be a {}', name, desc) + +def expect_string(name, val): + expect_type(name, is_string, "string", val) + +def expect_string_starts_with(name, val, prefix): + expect_type(name, is_string, "string", val) + expect(val.startswith(prefix), 'Buck target requires "{}" to start with "{}', name, prefix) + +def expect_number(name, val): + expect_type(name, is_number, "number", val) + +def expect_bool(name, val): + expect_type(name, is_bool, "bool", val) + +def expect_list(name, val): + expect_type(name, is_list, "list", val) + +def expect_list_of(sub_expect, name, val): + """Check that all items in val satisfy sub_expect + + See expect_list_of_strings for an example. + """ + expect_list(name, val) + for i, val in enumerate(val): + sub_expect("{}[{}] (value: {})".format(name, i, val), val) + +def expect_list_of_strings(name, val): + expect_list_of(expect_string, name, val) + +def expect_dict(name, val): + expect_type(name, is_dict, "dict", val) + +def expect_dict_of(key_expect, value_expect, name, val): + """Verify that all key-value pairs of val satisfy key_expect and value_expect respectively""" + expect_dict(name, val) + for key, val in val.items(): + key_expect("Key '{}' in {}".format(key, name), key) + value_expect("{}[{}] (value={})".format(name, repr(key), val), val) + +def expect_collection(name, val): + expect_type(name, is_collection, "collection", val) + +def expect_contains(name, val, options): + """Verify that val is in options.""" + expect(val in options, "{name} (value: {val}) not in {options}".format(name = name, val = val, options = options)) + +def expect_contains_all(name, val, options): + """Verify all items in val are in the options.""" + + expect_collection(name, val) + for index, val in enumerate(val): + expect_contains("{name}[{index}]".format(name = name, index = index), val, options) + +def expect_struct(name: str, val: struct): + expect_type(name, is_struct, "struct", val) + +# You'll likely want to import this struct for convenience, instead of each method separately +expects = struct( + type = expect_type, + string = expect_string, + string_starts_with = expect_string_starts_with, + number = expect_number, + bool = expect_bool, + list = expect_list, + list_of = expect_list_of, + list_of_strings = expect_list_of_strings, + dict = expect_dict, + dict_of = expect_dict_of, + collection = expect_collection, + contains = expect_contains, + contains_all = expect_contains_all, + equal = expect_equal, + struct = expect_struct, +) diff --git a/prelude/utils/graph_utils.bzl b/prelude/utils/graph_utils.bzl index 700bc4e590cd1..12a1dfda3e068 100644 --- a/prelude/utils/graph_utils.bzl +++ b/prelude/utils/graph_utils.bzl @@ -5,11 +5,14 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") -def topo_sort(graph: dict[typing.Any, list[typing.Any]]) -> list[typing.Any]: +def pre_order_traversal( + graph: dict[typing.Any, list[typing.Any]], + node_formatter: typing.Callable[[typing.Any], str] = str, + edge_explainer: typing.Callable[[typing.Any, typing.Any], list[str]] = lambda _src, _dest: ["Unknown"]) -> list[typing.Any]: """ - Topo-sort the given graph. + Perform a pre-order (topologically sorted) traversal of `graph` and return the ordered nodes """ in_degrees = {node: 0 for node in graph} @@ -27,7 +30,7 @@ def topo_sort(graph: dict[typing.Any, list[typing.Any]]) -> list[typing.Any]: for _ in range(len(in_degrees)): if len(queue) == 0: - fail_cycle(graph) + fail_cycle(graph, node_formatter, edge_explainer) node = queue.pop() ordered.append(node) @@ -37,34 +40,34 @@ def topo_sort(graph: dict[typing.Any, list[typing.Any]]) -> list[typing.Any]: if in_degrees[dep] == 0: queue.append(dep) - expect(not queue, "finished before processing nodes: {}".format(queue)) + expect(not queue, "finished before processing nodes: {}".format([node_formatter(node) for node in queue])) expect(len(ordered) == len(graph), "missing or duplicate nodes in sort") return ordered -def post_order_traversal(graph: dict[typing.Any, list[typing.Any]]) -> list[typing.Any]: +def post_order_traversal( + graph: dict[typing.Any, list[typing.Any]], + node_formatter: typing.Callable[[typing.Any], str] = str, + edge_explainer: typing.Callable[[typing.Any, typing.Any], list[str]] = lambda _src, _dest: ["Unknown"]) -> list[typing.Any]: """ Performs a post-order traversal of `graph`. """ - out_degrees = {node: 0 for node in graph} + out_degrees = {} rdeps = {node: [] for node in graph} for node, deps in graph.items(): - for dep in dedupe(deps): - out_degrees[node] += 1 + deps = dedupe(deps) + out_degrees[node] = len(deps) + for dep in deps: rdeps[dep].append(node) - queue = [] - - for node, out_degree in out_degrees.items(): - if out_degree == 0: - queue.append(node) + queue = [node for node, out_degree in out_degrees.items() if out_degree == 0] ordered = [] for _ in range(len(out_degrees)): if len(queue) == 0: - fail_cycle(graph) + fail_cycle(graph, node_formatter, edge_explainer) node = queue.pop() ordered.append(node) @@ -74,20 +77,27 @@ def post_order_traversal(graph: dict[typing.Any, list[typing.Any]]) -> list[typi if out_degrees[dep] == 0: queue.append(dep) - expect(not queue, "finished before processing nodes: {}".format(queue)) + expect(not queue, "finished before processing nodes: {}".format([node_formatter(node) for node in queue])) expect(len(ordered) == len(graph), "missing or duplicate nodes in sort") return ordered -def fail_cycle(graph: dict[typing.Any, list[typing.Any]]) -> typing.Never: +def fail_cycle( + graph: dict[typing.Any, list[typing.Any]], + node_formatter: typing.Callable[[typing.Any], str], + edge_explainer: typing.Callable[[typing.Any, typing.Any], list[str]]) -> typing.Never: cycle = find_cycle(graph) if cycle: + errors = [] + for i, c in enumerate(cycle): + indented_number = "\n\n" + (" -> " if i > 0 else " ") + "" * (3 - len(str(i))) + str(i + 1) + ": " + edge_explanation = "" + if i > 0: + edge_explanation = "\n" + " " * 9 + "Reason for edge:" + edge_explanation += "".join(["\n" + " " * 11 + e for e in edge_explainer(cycle[i - 1], c)]) + errors.append(indented_number + node_formatter(c) + edge_explanation) fail( - "cycle in graph detected: {}".format( - " -> ".join( - [str(c) for c in cycle], - ), - ), + "cycle in graph detected:{}\n".format("".join(errors)), ) fail("expected cycle, but found none") @@ -152,62 +162,86 @@ def post_order_traversal_by( ordered.append(node) return ordered -def topo_sort_by( +def pre_order_traversal_by( roots: list[typing.Any], get_nodes_to_traverse_func) -> list[typing.Any]: """ - Returns a topological sorted list of the nodes in the traversal. + Returns a topological sorted list of the nodes from a pre-order traversal. - Note this gives a different order from topo_sort above (to simplify the implementation). + Note this gives a different order from `pre_order_traversal` above (to simplify the implementation). """ ordered = post_order_traversal_by(roots, get_nodes_to_traverse_func) return ordered[::-1] -def breadth_first_traversal( +def depth_first_traversal( graph_nodes: dict[typing.Any, list[typing.Any]], roots: list[typing.Any]) -> list[typing.Any]: """ - Like `breadth_first_traversal_by` but the nodes are stored in the graph. + Like `depth_first_traversal_by` but the nodes are stored in the graph. """ def lookup(x): return graph_nodes[x] - return breadth_first_traversal_by(graph_nodes, roots, lookup) + return depth_first_traversal_by(graph_nodes, roots, lookup) -def breadth_first_traversal_by( +# With following graph +# +# A +# / \ +# B C +# / \ / \ +# D E F G +# +# preorder-left-to-right starting from A will go to left leg first +# A-B-D-E-C-F-G +# +# preorder-right-to-left starting from A will go to right leg first +# A-C-G-F-B-E-D +# +GraphTraversal = enum( + "preorder-right-to-left", + "preorder-left-to-right", +) + +def depth_first_traversal_by( graph_nodes: [dict[typing.Any, typing.Any], None], roots: list[typing.Any], - get_nodes_to_traverse_func) -> list[typing.Any]: + get_nodes_to_traverse_func: typing.Callable, + traversal: GraphTraversal = GraphTraversal("preorder-right-to-left"), + node_formatter: typing.Callable[[typing.Any], str] = str) -> list[typing.Any]: """ - Performs a breadth first traversal of `graph_nodes`, beginning - with the `roots` and queuing the nodes returned by`get_nodes_to_traverse_func`. + Performs a depth first traversal of `graph_nodes`, beginning + with the `roots` and queuing the nodes returned by `get_nodes_to_traverse_func`. Returns a list of all visisted nodes. get_nodes_to_traverse_func(node: '_a') -> ['_a']: Starlark does not offer while loops, so this implementation - must make use of a for loop. We pop from the end of the queue - as a matter of performance. + must make use of a for loop. """ # Dictify for O(1) lookup visited = {k: None for k in roots} + stride = -1 if traversal == GraphTraversal("preorder-left-to-right") else 1 - queue = visited.keys() + stack = [] + for node in visited.keys()[::stride]: + stack.append(node) for _ in range(len(graph_nodes) if graph_nodes else 2000000000): - if not queue: + if not stack: break - node = queue.pop() - if graph_nodes: - expect(node in graph_nodes, "Expected node {} in graph nodes", node) + node = stack.pop() + if graph_nodes and node not in graph_nodes: + fail("Expected node {} in graph nodes".format(node_formatter(node))) nodes_to_visit = get_nodes_to_traverse_func(node) - for node in nodes_to_visit: - if node not in visited: - visited[node] = None - queue.append(node) + if nodes_to_visit: + for node in nodes_to_visit[::stride]: + if node not in visited: + visited[node] = None + stack.append(node) - expect(not queue, "Expected to be done with graph traversal queue.") + expect(not stack, "Expected to be done with graph traversal stack.") return visited.keys() diff --git a/prelude/utils/host.bzl b/prelude/utils/host.bzl new file mode 100644 index 0000000000000..67145fe91db15 --- /dev/null +++ b/prelude/utils/host.bzl @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +HostOSTypes = ["linux", "macos", "windows"] + +HostOSType = enum(*HostOSTypes) + +def _compute_get_host_os() -> HostOSType: + info = host_info() + if info.os.is_linux: + return HostOSType("linux") + elif info.os.is_macos: + return HostOSType("macos") + elif info.os.is_windows: + return HostOSType("windows") + else: + fail("Unknown host OS") + +_HOST_OS = _compute_get_host_os() + +def get_host_os() -> HostOSType: + return _HOST_OS diff --git a/prelude/utils/lazy.bzl b/prelude/utils/lazy.bzl new file mode 100644 index 0000000000000..4fda71964eb99 --- /dev/null +++ b/prelude/utils/lazy.bzl @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _is_any(predicate, iterable): + """ + This expression lazily iterates the container with 0 new allocations. + In the event that the iterable is empty, it will return False. + + For scenarios like this: + + _ = any([i % 2 == 0 for i in range(100000)]) + + The list comprehension would lead to a new list of 100000 booleans, + and would only end-up checking 1. Replacing it with: + + _ = is_any(lambda i: i % 2 == 0, range(100000)) + + would lead to 0 new allocations. + """ + for i in iterable: + if predicate(i): + return True + + return False + +def _is_all(predicate, iterable): + """ + This expression lazily iterates the container with 0 new allocations. + In the event that the iterable is empty, it will return False. + + For scenarios like this: + + _ = all([i % 2 == 0 for i in range(100000)]) + + The list comprehension would lead to a list of 100000 booleans. + Replacing it with: + + _ = is_all(lambda i: i % 2 == 0, range(100000)) + + would lead to 0 new allocations. + """ + for i in iterable: + if not predicate(i): + return False + return True + +lazy = struct( + is_any = _is_any, + is_all = _is_all, +) diff --git a/prelude/utils/pick.bzl b/prelude/utils/pick.bzl index ad1310eb4f2cc..d6de059ae4782 100644 --- a/prelude/utils/pick.bzl +++ b/prelude/utils/pick.bzl @@ -12,10 +12,13 @@ def pick_bin(override, underlying): return override[RunInfo] if override != None else underlying def pick_dep(override, underlying): + return pick_raw(override, underlying) + +def pick_raw(override, underlying): return override if override != None else underlying def pick_and_add(override, additional, underlying): - flags = cmd_args(pick(override, underlying)) + flags = [pick(override, underlying)] if additional: - flags.add(additional) - return flags + flags.append(additional) + return cmd_args(flags) diff --git a/prelude/utils/set.bzl b/prelude/utils/set.bzl index 14fb5e2c8b662..d242f45bb557a 100644 --- a/prelude/utils/set.bzl +++ b/prelude/utils/set.bzl @@ -52,9 +52,9 @@ def set(initial_entries: list[typing.Any] = []) -> set_type: def set_add(v: typing.Any) -> bool: if self.contains(v): - return True + return False self._entries[v] = None - return False + return True def set_contains(v: typing.Any) -> bool: return v in self._entries @@ -66,7 +66,7 @@ def set(initial_entries: list[typing.Any] = []) -> set_type: return False def set_update(values: list[typing.Any]) -> list[typing.Any]: - return filter(None, [v for v in values if not self.add(v)]) + return filter(None, [v for v in values if self.add(v)]) def set_size() -> int: return len(self._entries) diff --git a/prelude/utils/source_listing.bzl b/prelude/utils/source_listing.bzl new file mode 100644 index 0000000000000..5ac49d4a96602 --- /dev/null +++ b/prelude/utils/source_listing.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":source_listing_impl.bzl?v2_only", "SourceListingInfoAlias", "source_listing_impl") + +SourceListingInfo = SourceListingInfoAlias + +def source_listing(exclude = None): + source_listing_impl(exclude = exclude or []) diff --git a/prelude/utils/source_listing_impl.bzl b/prelude/utils/source_listing_impl.bzl new file mode 100644 index 0000000000000..fae29ffffdb70 --- /dev/null +++ b/prelude/utils/source_listing_impl.bzl @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +SourceListingInfo = provider(fields = { + "sources": dict[str, Artifact], +}) + +SourceListingInfoAlias = SourceListingInfo + +def _impl(ctx): + sources = {} + for d in ctx.attrs.deps: + package = ctx.label.package + if package != "": + package += "/" + rel_loc = d.label.package.removeprefix(package) + sources.update({rel_loc + "/" + p: art for p, art in d[SourceListingInfo].sources.items()}) + + for s in ctx.attrs.srcs: + sources[s.short_path] = s + return [DefaultInfo(), SourceListingInfo(sources = sources)] + +# This rule acts sort of like a `filegroup`, except that 1) it returns all the +# source artifacts unchanged, and 2) it reports the location of all artifacts +# relative to the current package. We use this for gathering listings of the +# source files for bundled cells. +_source_listing = rule( + impl = _impl, + attrs = { + "deps": attrs.list(attrs.dep()), + "srcs": attrs.list(attrs.source()), + }, +) + +def source_listing_impl(exclude: list[str]): + package = package_name() + if package != "": + package += "/" + _source_listing( + name = "source_listing", + srcs = glob(["**/*", "**/.*"], exclude = exclude), + deps = ["//" + package + s + ":source_listing" for s in __internal__.sub_packages()], + visibility = ["PUBLIC"], + ) diff --git a/prelude/utils/type_defs.bzl b/prelude/utils/type_defs.bzl new file mode 100644 index 0000000000000..3ab5d21b5f9e7 --- /dev/null +++ b/prelude/utils/type_defs.bzl @@ -0,0 +1,152 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +"""Provides macros for queries type information.""" + +_SELECT_TYPE = type(select({"DEFAULT": []})) + +def is_select(thing): + return type(thing) == _SELECT_TYPE + +def is_unicode(arg): + """Checks if provided instance has a unicode type. + + Args: + arg: An instance to check. type: Any + + Returns: + True for unicode instances, False otherwise. rtype: bool + """ + return hasattr(arg, "encode") + +_STRING_TYPE = type("") + +def is_string(arg): + """Checks if provided instance has a string type. + + Args: + arg: An instance to check. type: Any + + Returns: + True for string instances, False otherwise. rtype: bool + """ + return type(arg) == _STRING_TYPE + +_LIST_TYPE = type([]) + +def is_list(arg): + """Checks if provided instance has a list type. + + Args: + arg: An instance to check. type: Any + + Returns: + True for list instances, False otherwise. rtype: bool + """ + return type(arg) == _LIST_TYPE + +_DICT_TYPE = type({}) + +def is_dict(arg): + """Checks if provided instance has a dict type. + + Args: + arg: An instance to check. type: Any + + Returns: + True for dict instances, False otherwise. rtype: bool + """ + return type(arg) == _DICT_TYPE + +_TUPLE_TYPE = type(()) + +def is_tuple(arg): + """Checks if provided instance has a tuple type. + + Args: + arg: An instance to check. type: Any + + Returns: + True for tuple instances, False otherwise. rtype: bool + """ + return type(arg) == _TUPLE_TYPE + +def is_collection(arg): + """Checks if provided instance is a collection subtype. + + This will either be a dict, list, or tuple. + """ + return is_dict(arg) or is_list(arg) or is_tuple(arg) + +_BOOL_TYPE = type(True) + +def is_bool(arg): + """Checks if provided instance is a boolean value. + + Args: + arg: An instance of check. type: Any + + Returns: + True for boolean values, False otherwise. rtype: bool + """ + return type(arg) == _BOOL_TYPE + +_NUMBER_TYPE = type(1) + +def is_number(arg): + """Checks if provided instance is a number value. + + Args: + arg: An instance of check. type: Any + + Returns: + True for number values, False otherwise. rtype: bool + """ + return type(arg) == _NUMBER_TYPE + +_STRUCT_TYPE = type(struct()) # Starlark returns the same type for all structs + +def is_struct(arg): + """Checks if provided instance is a struct value. + + Args: + arg: An instance to check. type: Any + + Returns: + True for struct values, False otherwise. rtype: bool + """ + return type(arg) == _STRUCT_TYPE + +def _func(): + pass + +_FUNCTION_TYPE = type(_func) + +def is_function(args): + """Checks if provided instance is a function value. + + Args: + arg: An instance to check. type: Any + + Returns: + True for function values, False otherwise. rtype: function + """ + return type(args) == _FUNCTION_TYPE + +type_utils = struct( + is_bool = is_bool, + is_number = is_number, + is_string = is_string, + is_unicode = is_unicode, + is_list = is_list, + is_dict = is_dict, + is_tuple = is_tuple, + is_collection = is_collection, + is_select = is_select, + is_struct = is_struct, + is_function = is_function, +) diff --git a/prelude/utils/utils.bzl b/prelude/utils/utils.bzl index 08be875d0127a..658629fcae715 100644 --- a/prelude/utils/utils.bzl +++ b/prelude/utils/utils.bzl @@ -7,51 +7,17 @@ # General utilities shared between multiple rules. -def is_any(predicate: typing.Callable, iterable: list[typing.Any]) -> bool: - """ - This expression lazily iterates the container with 0 new allocations. - In the event that the iterable is empty, it will return False. - - For scenarios like this: - - _ = any([i % 2 == 0 for i in range(100000)]) - - The list comprehension would lead to a new list of 100000 booleans, - and would only end-up checking 1. Replacing it with: - - _ = is_any(lambda i: i % 2 == 0, range(100000)) - - would lead to 0 new allocations. - """ - for i in iterable: - if predicate(i): - return True - return False - -def is_all(predicate: typing.Callable, iterable: list[typing.Any]) -> bool: - """ - This expression lazily iterates the container with 0 new allocations. - In the event that the iterable is empty, it will return False. - - For scenarios like this: - - _ = all([i % 2 == 0 for i in range(100000)]) - - The list comprehension would lead to a list of 100000 booleans. - Replacing it with: - - _ = is_all(lambda i: i % 2 == 0, range(100000)) - - would lead to 0 new allocations. - """ - for i in iterable: - if not predicate(i): - return False - return True +load("@prelude//utils:expect.bzl", "expect") def value_or(x: [None, typing.Any], default: typing.Any) -> typing.Any: return default if x == None else x +def values_or(*xs: typing.Any | None) -> typing.Any | None: + for x in xs: + if x != None: + return x + return None + # Flatten a list of lists into a list def flatten(xss: list[list[typing.Any]]) -> list[typing.Any]: return [x for xs in xss for x in xs] @@ -60,21 +26,7 @@ def flatten(xss: list[list[typing.Any]]) -> list[typing.Any]: def flatten_dict(xss: list[dict[typing.Any, typing.Any]]) -> dict[typing.Any, typing.Any]: return {k: v for xs in xss for k, v in xs.items()} -# Fail if given condition is not met. -def expect(x: bool, msg: str = "condition not expected", *fmt): - if not x: - fmt_msg = msg.format(*fmt) - fail(fmt_msg) - -def expect_non_none(val, msg: str = "unexpected none", *fmt_args, **fmt_kwargs): - """ - Require the given value not be `None`. - """ - if val == None: - fail(msg.format(*fmt_args, **fmt_kwargs)) - return val - -def from_named_set(srcs: [dict[str, [Artifact, Dependency]], list[[Artifact, Dependency]]]) -> dict[str, [Artifact, Dependency]]: +def from_named_set(srcs: [dict[str, Artifact | Dependency], list[Artifact | Dependency]]) -> dict[str, Artifact | Dependency]: """ Normalize parameters of optionally named sources to a dictionary mapping names to sources, deriving the name from the short path when it's not @@ -104,9 +56,6 @@ def from_named_set(srcs: [dict[str, [Artifact, Dependency]], list[[Artifact, Dep def map_idx(key: typing.Any, vals: list[typing.Any]) -> list[typing.Any]: return [x[key] for x in vals] -def filter_idx(key: typing.Any, vals: list[typing.Any]) -> list[typing.Any]: - return [x for x in vals if key in x] - def filter_and_map_idx(key: typing.Any, vals: list[typing.Any]) -> list[typing.Any]: return [x[key] for x in vals if key in x] @@ -117,7 +66,7 @@ def idx(x: [typing.Any, None], key: typing.Any) -> [typing.Any, None]: def dedupe_by_value(vals: list[typing.Any]) -> list[typing.Any]: return {val: None for val in vals}.keys() -def map_val(func: typing.Callable, val: [typing.Any, None]) -> [typing.Any, None]: +def map_val(func: typing.Callable[[typing.Any], typing.Any], val: [typing.Any, None]) -> [typing.Any, None]: """ If `val` if `None`, return `None`, else apply `func` to `val` and return the result. diff --git a/prelude/validation/audit_results.bxl b/prelude/validation/audit_results.bxl new file mode 100644 index 0000000000000..a86d5cad74b52 --- /dev/null +++ b/prelude/validation/audit_results.bxl @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx: bxl.Context): + if ctx.cli_args.transitive: + targets = ( + ctx.target_universe(ctx.cli_args.targets).universe_target_set() + ) + else: + targets = ctx.cli_args.targets + + info = {} + for target, analysis_result in ctx.analysis(targets).items(): + providers = analysis_result.providers() + validation_info = providers.get(ValidationInfo) + if not validation_info: + if not ctx.cli_args.trim: + info[target] = {} + continue + + spec_name_to_path = {} + for spec in validation_info.validations: + # Yes, I'm aware this is suboptimal. When running this script on + # large targets are Meta, there is no discernable regression to performance. + # + # Read the big ol' comment block below to understand why. + o = ctx.output.ensure(spec.validation_result) + spec_name_to_path[spec.name] = o.abs_path() + + info[target] = spec_name_to_path + + # We chose to print to stdout because we run into an issue with + # the ctx.bxl_actions().actions.write_json() API + # + # The goal is to output something into a file which looks like this: + # { + # "cell//some:target": { + # "spec_name": "path/to/materialized/output.json" + # + # Unfortuantely, if we use the actions.write_json() API, it requires us to pass + # `with_inputs = True` so we can be sure that we materialize the the paths to validation + # outputs with the JSON. + # + # Unfortunately, `ensured_artifact_group` has a limited API that doesn't allow us to + # only print a subset of the targets. While you can loop thru them, there is no way .owner + # API, you'd have to guess based on filepath. + # + # As a result, we ensure the artifacts as we iterate so we can get the materialized absolute + # path and not run into an invariant where you are not allowed to freeze EnsuredArtifacts. + ctx.output.print_json(info) + +main = bxl_main( + impl = _impl, + cli_args = { + "targets": cli_args.target_expr(), + "transitive": cli_args.bool(False), + "trim": cli_args.bool( + default = True, + doc = "By default, targets with no validations will be stripped from the output.", + ), + }, +) diff --git a/prelude/validation_deps.bzl b/prelude/validation_deps.bzl new file mode 100644 index 0000000000000..a057e353d61aa --- /dev/null +++ b/prelude/validation_deps.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +VALIDATION_DEPS_ATTR_NAME = "validation_deps" +VALIDATION_DEPS_ATTR_TYPE = attrs.set(attrs.dep(), sorted = True, default = []) + +def get_validation_deps_outputs(ctx: AnalysisContext) -> list[Artifact]: + artifacts = [] + if hasattr(ctx.attrs, VALIDATION_DEPS_ATTR_NAME): + validation_deps = getattr(ctx.attrs, VALIDATION_DEPS_ATTR_NAME) + for dep in validation_deps: + default_info = dep[DefaultInfo] + artifacts += default_info.default_outputs + return artifacts diff --git a/prelude/windows/tools/BUCK b/prelude/windows/tools/BUCK deleted file mode 100644 index bda1136989c48..0000000000000 --- a/prelude/windows/tools/BUCK +++ /dev/null @@ -1,12 +0,0 @@ -prelude = native - -prelude.export_file( - name = "msvc_hermetic_exec.bat", - src = "msvc_hermetic_exec.bat", -) - -prelude.command_alias( - name = "msvc_hermetic_exec", - exe = ":msvc_hermetic_exec.bat", - visibility = ["PUBLIC"], -) diff --git a/prelude/windows/tools/BUCK.v2 b/prelude/windows/tools/BUCK.v2 new file mode 100644 index 0000000000000..a8fc01876479f --- /dev/null +++ b/prelude/windows/tools/BUCK.v2 @@ -0,0 +1,18 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +prelude = native + +prelude.export_file( + name = "msvc_hermetic_exec.bat", + src = "msvc_hermetic_exec.bat", +) + +prelude.command_alias( + name = "msvc_hermetic_exec", + exe = ":msvc_hermetic_exec.bat", + visibility = ["PUBLIC"], +) diff --git a/prelude/worker_tool.bzl b/prelude/worker_tool.bzl index 0006019fe872d..1cac6e325340c 100644 --- a/prelude/worker_tool.bzl +++ b/prelude/worker_tool.bzl @@ -5,7 +5,7 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("@prelude//utils:utils.bzl", "expect") +load("@prelude//utils:expect.bzl", "expect") WorkerToolInfo = provider( fields = { @@ -28,9 +28,9 @@ def worker_tool(ctx: AnalysisContext) -> list[Provider]: expect(worker_tool_run_info != None, "Worker tool executable must have a RunInfo!") worker_tool_runner = ctx.attrs._worker_tool_runner[RunInfo] - worker_tool_cmd = cmd_args(worker_tool_runner) - worker_tool_cmd.add("--worker-tool") - worker_tool_cmd.add(worker_tool_run_info) + worker_tool_cmd = [worker_tool_runner] + worker_tool_cmd.append("--worker-tool") + worker_tool_cmd.append(worker_tool_run_info) worker_args = ctx.attrs.args if worker_args: @@ -40,8 +40,8 @@ def worker_tool(ctx: AnalysisContext) -> list[Provider]: allow_args = True, ) - worker_tool_cmd.add("--worker-args-file") - worker_tool_cmd.add(worker_args_file) + worker_tool_cmd.append("--worker-args-file") + worker_tool_cmd.append(worker_args_file) worker_env = ctx.attrs.env if worker_env: @@ -56,9 +56,10 @@ def worker_tool(ctx: AnalysisContext) -> list[Provider]: allow_args = True, ) - worker_tool_cmd.add("--worker-env-file") - worker_tool_cmd.add(env_args_file) + worker_tool_cmd.append("--worker-env-file") + worker_tool_cmd.append(env_args_file) + worker_tool_cmd = cmd_args(worker_tool_cmd) return [ DefaultInfo(), RunInfo( diff --git a/prelude/zip_file/tools/BUCK b/prelude/zip_file/tools/BUCK deleted file mode 100644 index 36cc2b8b9a6ef..0000000000000 --- a/prelude/zip_file/tools/BUCK +++ /dev/null @@ -1,5 +0,0 @@ -python_bootstrap_binary( - name = "unzip", - main = "unzip.py", - visibility = ["PUBLIC"], -) diff --git a/prelude/zip_file/tools/BUCK.v2 b/prelude/zip_file/tools/BUCK.v2 new file mode 100644 index 0000000000000..560920dfcd84f --- /dev/null +++ b/prelude/zip_file/tools/BUCK.v2 @@ -0,0 +1,11 @@ +load("@prelude//utils:source_listing.bzl", "source_listing") + +oncall("build_infra") + +source_listing() + +python_bootstrap_binary( + name = "unzip", + main = "unzip.py", + visibility = ["PUBLIC"], +) diff --git a/prelude/zip_file/tools/unzip.py b/prelude/zip_file/tools/unzip.py index e571c3987fa58..3ec289156e4eb 100644 --- a/prelude/zip_file/tools/unzip.py +++ b/prelude/zip_file/tools/unzip.py @@ -28,6 +28,11 @@ def do_unzip(archive, output_dir): # That way we don't need to pass `target_is_directory` argument to `os.symlink` function. for info in (i for i in z.infolist() if not _is_symlink(i)): z.extract(info, path=output_dir) + if _is_executable(info): + os.chmod( + os.path.join(output_dir, info.filename), + _file_attributes(info) | stat.S_IXUSR, + ) for info in (i for i in z.infolist() if _is_symlink(i)): symlink_path = os.path.join(output_dir, info.filename) symlink_dst = z.read(info).decode("utf-8") @@ -54,6 +59,10 @@ def _is_symlink(zip_info): return stat.S_ISLNK(_file_attributes(zip_info)) +def _is_executable(zip_info): + return stat.S_IMODE(_file_attributes(zip_info)) & stat.S_IXUSR + + def main(): args = _parse_args() print("Source zip is: {}".format(args.src), file=sys.stderr) diff --git a/prelude/zip_file/zip_file.bzl b/prelude/zip_file/zip_file.bzl index 79b8902158551..3cf3c8017c36e 100644 --- a/prelude/zip_file/zip_file.bzl +++ b/prelude/zip_file/zip_file.bzl @@ -8,7 +8,7 @@ load("@prelude//decls/toolchains_common.bzl", "toolchains_common") load(":zip_file_toolchain.bzl", "ZipFileToolchainInfo") -def zip_file_impl(ctx: AnalysisContext) -> list[Provider]: +def _zip_file_impl(ctx: AnalysisContext) -> list[Provider]: """ zip_file() rule implementation @@ -29,42 +29,42 @@ def zip_file_impl(ctx: AnalysisContext) -> list[Provider]: zip_srcs = ctx.attrs.zip_srcs srcs = ctx.attrs.srcs - create_zip_cmd = cmd_args([ + create_zip_cmd = [ create_zip_tool, "--output_path", output.as_output(), "--on_duplicate_entry", on_duplicate_entry if on_duplicate_entry else "overwrite", - ]) + ] if srcs: - srcs_file_cmd = cmd_args() - # add artifact and is_source flag pair - for src in srcs: - srcs_file_cmd.add(src) - srcs_file_cmd.add(src.short_path) - srcs_file_cmd.add(str(src.is_source)) + srcs_file_cmd = cmd_args( + [ + [src, src.short_path, str(src.is_source)] + for src in srcs + ], + ) entries_file = ctx.actions.write("entries", srcs_file_cmd) - create_zip_cmd.add("--entries_file") - create_zip_cmd.add(entries_file) - create_zip_cmd.hidden(srcs) + create_zip_cmd.append("--entries_file") + create_zip_cmd.append(entries_file) + create_zip_cmd.append(cmd_args(hidden = srcs)) if zip_srcs: - create_zip_cmd.add("--zip_sources") - create_zip_cmd.add(zip_srcs) + create_zip_cmd.append("--zip_sources") + create_zip_cmd.append(zip_srcs) if entries_to_exclude: - create_zip_cmd.add("--entries_to_exclude") - create_zip_cmd.add(entries_to_exclude) + create_zip_cmd.append("--entries_to_exclude") + create_zip_cmd.append(entries_to_exclude) - ctx.actions.run(create_zip_cmd, category = "zip") + ctx.actions.run(cmd_args(create_zip_cmd), category = "zip") return [DefaultInfo(default_output = output)] implemented_rules = { - "zip_file": zip_file_impl, + "zip_file": _zip_file_impl, } extra_attributes = { diff --git a/proto_defs.bzl b/proto_defs.bzl index f56b8137428cc..3a7778d6f5f27 100644 --- a/proto_defs.bzl +++ b/proto_defs.bzl @@ -6,7 +6,6 @@ # of this source tree. load("@fbcode//buck2:buck_rust_binary.bzl", "buck_rust_binary") -load("@fbcode_macros//build_defs:export_files.bzl", "export_file") load("@fbcode_macros//build_defs:native_rules.bzl", "buck_genrule") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") @@ -57,14 +56,16 @@ def rust_protobuf_library( # This is where prost looks for generated .rs files "OUT_DIR": "$(location :{})".format(proto_name), }, + named_deps = { + # "prost" is https://github.com/tokio-rs/prost, which is used + # to generate Rust code from protobuf definitions. + "generated_prost_target": ":{}".format(proto_name), + }, + labels = [ + "generated_protobuf_library_rust", + ], deps = [ "fbsource//third-party/rust:prost", ] + (deps or []), test_deps = test_deps, ) - - # For python tests only - for proto in protos: - export_file( - name = proto, - ) diff --git a/remote_execution/oss/re_grpc/BUCK b/remote_execution/oss/re_grpc/BUCK index 30a860a4cad02..6ecedda34c469 100644 --- a/remote_execution/oss/re_grpc/BUCK +++ b/remote_execution/oss/re_grpc/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -13,6 +12,7 @@ rust_library( "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:futures", "fbsource//third-party/rust:http", + "fbsource//third-party/rust:lru", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:prost", "fbsource//third-party/rust:prost-types", @@ -23,6 +23,7 @@ rust_library( "fbsource//third-party/rust:tracing", "fbsource//third-party/rust:uuid", "//buck2/app/buck2_re_configuration:buck2_re_configuration", + "//buck2/app/buck2_util:buck2_util", "//buck2/gazebo/dupe:dupe", "//buck2/gazebo/gazebo:gazebo", "//buck2/remote_execution/oss/re_grpc_proto:re_grpc_proto", diff --git a/remote_execution/oss/re_grpc/Cargo.toml b/remote_execution/oss/re_grpc/Cargo.toml index 6d6e057eec2fc..1e71767600d32 100644 --- a/remote_execution/oss/re_grpc/Cargo.toml +++ b/remote_execution/oss/re_grpc/Cargo.toml @@ -1,34 +1,31 @@ [package] +description = "A library providing remote execution using gRPC" +edition = "2021" +license = { workspace = true } name = "remote_execution" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "A library providing remote execution using gRPC" [dependencies] anyhow = { workspace = true } dupe = { workspace = true } -gazebo = { workspace = true } futures = { workspace = true } +gazebo = { workspace = true } http = { workspace = true } -thiserror = { workspace = true } -prost-types = { workspace = true } +lru = { workspace = true } +once_cell = { workspace = true } prost = { workspace = true } +prost-types = { workspace = true } regex = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } -once_cell = { workspace = true } uuid = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../../gazebo_lint/gazebo_lint" - buck2_re_configuration = { workspace = true } +buck2_util = { workspace = true } re_grpc_proto = { path = "../re_grpc_proto" } [dev-dependencies] tempfile = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/remote_execution/oss/re_grpc/src/client.rs b/remote_execution/oss/re_grpc/src/client.rs index ae665ff4582e3..40bf31fa1d885 100644 --- a/remote_execution/oss/re_grpc/src/client.rs +++ b/remote_execution/oss/re_grpc/src/client.rs @@ -9,8 +9,12 @@ use std::collections::HashMap; use std::env::VarError; +use std::num::NonZeroUsize; use std::pin::Pin; use std::sync::Arc; +use std::sync::Mutex; +use std::time::Duration; +use std::time::Instant; use anyhow::Context; use buck2_re_configuration::Buck2OssReConfiguration; @@ -23,6 +27,7 @@ use futures::stream::StreamExt; use futures::stream::TryStreamExt; use futures::Stream; use gazebo::prelude::*; +use lru::LruCache; use once_cell::sync::Lazy; use prost::Message; use re_grpc_proto::build::bazel::remote::execution::v2::action_cache_client::ActionCacheClient; @@ -45,7 +50,9 @@ use re_grpc_proto::build::bazel::remote::execution::v2::FindMissingBlobsRequest; use re_grpc_proto::build::bazel::remote::execution::v2::FindMissingBlobsResponse; use re_grpc_proto::build::bazel::remote::execution::v2::GetActionResultRequest; use re_grpc_proto::build::bazel::remote::execution::v2::GetCapabilitiesRequest; +use re_grpc_proto::build::bazel::remote::execution::v2::RequestMetadata; use re_grpc_proto::build::bazel::remote::execution::v2::ResultsCachePolicy; +use re_grpc_proto::build::bazel::remote::execution::v2::ToolDetails; use re_grpc_proto::google::bytestream::byte_stream_client::ByteStreamClient; use re_grpc_proto::google::bytestream::ReadRequest; use re_grpc_proto::google::bytestream::ReadResponse; @@ -74,12 +81,7 @@ use crate::metadata::*; use crate::request::*; use crate::response::*; -// RBE Services (e.g. Buildbarn) may not be robust against having too many files open at -// once. Limit to an arbitrary reasonable number since this information is not expressed -// in the Capabilities message query. -const CONCURRENT_UPLOAD_LIMIT: usize = 64; - -const DEFAULT_MAX_MSG_SIZE: usize = 4 * 1000 * 1000; +const DEFAULT_MAX_TOTAL_BATCH_SIZE: usize = 4 * 1000 * 1000; fn tdigest_to(tdigest: TDigest) -> Digest { Digest { @@ -112,6 +114,7 @@ fn check_status(status: Status) -> Result<(), REClientError> { Err(REClientError { code: TCode(status.code), message: status.message, + group: TCodeReasonGroup::UNKNOWN, }) } @@ -202,11 +205,19 @@ fn prepare_uri(uri: Uri, tls: bool) -> anyhow::Result { pub struct RECapabilities { /// Largest size of a message before being uploaded using bytestream service. /// 0 indicates no limit beyond constraint of underlying transport (which is unknown). - max_msg_size: usize, + max_total_batch_size: usize, /// Does the remote server support execution. exec_enabled: bool, } +/// Contains runtime options for the remote execution client as set under `buck2_re_client` +pub struct RERuntimeOpts { + /// Use the Meta version of the request metadata + use_fbcode_metadata: bool, + /// Maximum number of concurrent upload requests. + max_concurrent_uploads_per_action: Option, +} + struct InstanceName(Option); impl InstanceName { @@ -266,11 +277,52 @@ impl REClientBuilder { let interceptor = InjectHeadersInterceptor::new(&opts.http_headers)?; - let mut grpc_clients = GRPCClients { + let mut capabilities_client = CapabilitiesClient::with_interceptor( + capabilities.context("Error creating Capabilities client")?, + interceptor.dupe(), + ); + + if let Some(max_decoding_message_size) = opts.max_decoding_message_size { + capabilities_client = + capabilities_client.max_decoding_message_size(max_decoding_message_size); + } + + let instance_name = InstanceName(opts.instance_name.clone()); + + let capabilities = if opts.capabilities.unwrap_or(true) { + Self::fetch_rbe_capabilities( + &mut capabilities_client, + &instance_name, + opts.max_total_batch_size, + ) + .await? + } else { + RECapabilities { + exec_enabled: true, + max_total_batch_size: DEFAULT_MAX_TOTAL_BATCH_SIZE, + } + }; + + if !capabilities.exec_enabled { + return Err(anyhow::anyhow!("Server has remote execution disabled.")); + } + + let max_decoding_msg_size = opts + .max_decoding_message_size + .unwrap_or(capabilities.max_total_batch_size * 2); + + if max_decoding_msg_size < capabilities.max_total_batch_size { + return Err(anyhow::anyhow!( + "Attribute `max_decoding_message_size` must always be equal or higher to `max_total_batch_size`" + )); + } + + let grpc_clients = GRPCClients { cas_client: ContentAddressableStorageClient::with_interceptor( cas.context("Error creating CAS client")?, interceptor.dupe(), - ), + ) + .max_decoding_message_size(max_decoding_msg_size), execution_client: ExecutionClient::with_interceptor( execution.context("Error creating Execution client")?, interceptor.dupe(), @@ -282,64 +334,61 @@ impl REClientBuilder { bytestream_client: ByteStreamClient::with_interceptor( bytestream.context("Error creating Bytestream client")?, interceptor.dupe(), - ), - capabilities_client: CapabilitiesClient::with_interceptor( - capabilities.context("Error creating Capabilities client")?, - interceptor.dupe(), - ), - }; - - let instance_name = InstanceName(opts.instance_name.clone()); - - let capabilities = if opts.capabilities.unwrap_or(true) { - Self::fetch_rbe_capabilities(&mut grpc_clients, &instance_name).await? - } else { - RECapabilities { - exec_enabled: true, - max_msg_size: DEFAULT_MAX_MSG_SIZE, - } + ) + .max_decoding_message_size(max_decoding_msg_size), }; - if !capabilities.exec_enabled { - return Err(anyhow::anyhow!("Server has remote execution disabled.")); - } - - Ok(REClient::new(grpc_clients, capabilities, instance_name)) + Ok(REClient::new( + RERuntimeOpts { + use_fbcode_metadata: opts.use_fbcode_metadata, + max_concurrent_uploads_per_action: opts.max_concurrent_uploads_per_action, + }, + grpc_clients, + capabilities, + instance_name, + )) } async fn fetch_rbe_capabilities( - clients: &mut GRPCClients, + client: &mut CapabilitiesClient, instance_name: &InstanceName, + max_total_batch_size: Option, ) -> anyhow::Result { // TODO use more of the capabilities of the remote build executor - let resp = clients - .capabilities_client + let resp = client .get_capabilities(GetCapabilitiesRequest { instance_name: instance_name.as_str().to_owned(), }) .await .context("Failed to query capabilities of remote")? .into_inner(); - // Default is a reasonable size for the gRPC transport - // with enough room for headers. - let mut max_msg_size = DEFAULT_MAX_MSG_SIZE; + let mut exec_enabled = true; - if let Some(cache_cap) = resp.cache_capabilities { - let size = cache_cap.max_batch_total_size_bytes as usize; - // A value of 0 means no limit is set - if size != 0 { - max_msg_size = size; - } - } + let max_total_batch_size_from_capabilities: Option = + if let Some(cache_cap) = resp.cache_capabilities { + let size = cache_cap.max_batch_total_size_bytes as usize; + // A value of 0 means no limit is set + if size != 0 { Some(size) } else { None } + } else { + None + }; + + let max_total_batch_size = + match (max_total_batch_size_from_capabilities, max_total_batch_size) { + (Some(cap), Some(config)) => std::cmp::min(cap, config), + (Some(cap), None) => cap, + (None, Some(config)) => config, + (None, None) => DEFAULT_MAX_TOTAL_BATCH_SIZE, + }; if let Some(exec_cap) = resp.execution_capabilities { exec_enabled = exec_cap.exec_enabled; } Ok(RECapabilities { - max_msg_size, + max_total_batch_size, exec_enabled, }) } @@ -390,19 +439,53 @@ impl Interceptor for InjectHeadersInterceptor { } } +type GrpcService = InterceptedService; + pub struct GRPCClients { - cas_client: - ContentAddressableStorageClient>, - execution_client: ExecutionClient>, - action_cache_client: ActionCacheClient>, - bytestream_client: ByteStreamClient>, - capabilities_client: CapabilitiesClient>, + cas_client: ContentAddressableStorageClient, + execution_client: ExecutionClient, + action_cache_client: ActionCacheClient, + bytestream_client: ByteStreamClient, +} + +enum DigestRemoteState { + ExistsOnRemote, + Missing, +} + +struct FindMissingCache { + cache: LruCache, + /// To avoid a situation where we cache that an artifact is available remotely, but the artifact then expires + /// we clear our local cache once every `ttl`. + ttl: Duration, + last_check: Instant, +} + +impl FindMissingCache { + fn clear_if_ttl_expires(&mut self) { + if self.last_check.elapsed() > self.ttl { + self.cache.clear(); + self.last_check = Instant::now(); + } + } + + pub fn put(&mut self, digest: TDigest, state: DigestRemoteState) { + self.clear_if_ttl_expires(); + self.cache.put(digest, state); + } + pub fn get(&mut self, digest: &TDigest) -> Option<&DigestRemoteState> { + self.clear_if_ttl_expires(); + self.cache.get(digest) + } } pub struct REClient { + runtime_opts: RERuntimeOpts, grpc_clients: GRPCClients, capabilities: RECapabilities, instance_name: InstanceName, + // buck2 calls find_missing for same blobs + find_missing_cache: Mutex, } impl Drop for REClient { @@ -462,14 +545,21 @@ impl BatchUploadReqAggregator { impl REClient { fn new( + runtime_opts: RERuntimeOpts, grpc_clients: GRPCClients, capabilities: RECapabilities, instance_name: InstanceName, ) -> Self { REClient { + runtime_opts, grpc_clients, capabilities, instance_name, + find_missing_cache: Mutex::new(FindMissingCache { + cache: LruCache::new(NonZeroUsize::new(50 << 20).unwrap()), // 50Mb + ttl: Duration::from_secs(12 * 60 * 60), // 12 hours TODO: Tune this parameter + last_check: Instant::now(), + }), } } @@ -481,13 +571,14 @@ impl REClient { let mut client = self.grpc_clients.action_cache_client.clone(); let res = client - .get_action_result(with_internal_metadata( + .get_action_result(with_re_metadata( GetActionResultRequest { instance_name: self.instance_name.as_str().to_owned(), action_digest: Some(tdigest_to(request.digest)), ..Default::default() }, metadata, + self.runtime_opts.use_fbcode_metadata, )) .await?; @@ -526,7 +617,11 @@ impl REClient { }; let stream = client - .execute(with_internal_metadata(request, metadata)) + .execute(with_re_metadata( + request, + metadata, + self.runtime_opts.use_fbcode_metadata, + )) .await? .into_inner(); @@ -545,6 +640,7 @@ impl REClient { return Err(REClientError { code: TCode(rpc_status.code), message: rpc_status.message, + group: TCodeReasonGroup::UNKNOWN, } .into()); } @@ -564,7 +660,7 @@ impl REClient { action_result, action_result_digest: TDigest::default(), action_result_ttl: 0, - error: REError { + status: TStatus { code: TCode::OK, ..Default::default() }, @@ -630,12 +726,17 @@ impl REClient { upload_impl( &self.instance_name, request, - self.capabilities.max_msg_size, + self.capabilities.max_total_batch_size, + self.runtime_opts.max_concurrent_uploads_per_action, |re_request| async { let metadata = metadata.clone(); let mut cas_client = self.grpc_clients.cas_client.clone(); let resp = cas_client - .batch_update_blobs(with_internal_metadata(re_request, metadata)) + .batch_update_blobs(with_re_metadata( + re_request, + metadata, + self.runtime_opts.use_fbcode_metadata, + )) .await?; Ok(resp.into_inner()) }, @@ -644,7 +745,11 @@ impl REClient { let mut bytestream_client = self.grpc_clients.bytestream_client.clone(); let requests = futures::stream::iter(segments); let resp = bytestream_client - .write(with_internal_metadata(requests, metadata)) + .write(with_re_metadata( + requests, + metadata, + self.runtime_opts.use_fbcode_metadata, + )) .await?; Ok(resp.into_inner()) @@ -670,12 +775,16 @@ impl REClient { download_impl( &self.instance_name, request, - self.capabilities.max_msg_size, + self.capabilities.max_total_batch_size, |re_request| async { let metadata = metadata.clone(); let mut client = self.grpc_clients.cas_client.clone(); Ok(client - .batch_read_blobs(with_internal_metadata(re_request, metadata)) + .batch_read_blobs(with_re_metadata( + re_request, + metadata, + self.runtime_opts.use_fbcode_metadata, + )) .await? .into_inner()) }, @@ -684,7 +793,11 @@ impl REClient { async move { let mut client = self.grpc_clients.bytestream_client.clone(); let response = client - .read(with_internal_metadata(read_request, metadata)) + .read(with_re_metadata( + read_request, + metadata, + self.runtime_opts.use_fbcode_metadata, + )) .await? .into_inner(); Ok(Box::pin(response.into_stream())) @@ -703,30 +816,50 @@ impl REClient { let mut remote_ttl: HashMap = HashMap::new(); for digest_chunk in request.digests.chunks(100) { - for digest in digest_chunk { - // Assume that all digests are present on the remote because the API - // returns what is *not* present. - remote_ttl.insert( - digest.clone(), - DigestWithTtl { - digest: digest.clone(), - // NOTE: This is an arbitrary number because RBE does not return information - // on the TTL of the remote blob. - ttl: 60, - }, - ); + let mut digest_to_check: Vec = Vec::new(); + { + let mut find_missing_cache = self.find_missing_cache.lock().unwrap(); + for digest in digest_chunk { + // Assume that all digests are present on the remote because the API + // returns what is *not* present. + remote_ttl.insert( + digest.clone(), + DigestWithTtl { + digest: digest.clone(), + // NOTE: This is an arbitrary number because RBE does not return information + // on the TTL of the remote blob. + ttl: 60, + }, + ); + match find_missing_cache.get(digest) { + Some(DigestRemoteState::Missing) | None => { + digest_to_check.push(digest.clone()); + } + _ => {} + } + } + } + + if digest_to_check.is_empty() { + continue; } + let missing_blobs = cas_client - .find_missing_blobs(with_internal_metadata( + .find_missing_blobs(with_re_metadata( FindMissingBlobsRequest { instance_name: self.instance_name.as_str().to_owned(), - blob_digests: digest_chunk.map(|b| tdigest_to(b.clone())), + blob_digests: digest_to_check.map(|b| tdigest_to(b.clone())), }, metadata.clone(), + self.runtime_opts.use_fbcode_metadata, )) .await .context("Failed to request what blobs are not present on remote")?; let resp: FindMissingBlobsResponse = missing_blobs.into_inner(); + let mut find_missing_cache = self.find_missing_cache.lock().unwrap(); + for digest in &digest_to_check { + find_missing_cache.put(digest.clone(), DigestRemoteState::ExistsOnRemote); + } for digest in &resp.missing_blob_digests.map(|d| tdigest_from(d.clone())) { // If it's present in the MissingBlobsResponse, it's expired on the remote and // needs to be refetched. @@ -737,6 +870,7 @@ impl REClient { ttl: 0, }, ); + find_missing_cache.put(digest.clone(), DigestRemoteState::Missing); } } @@ -745,6 +879,15 @@ impl REClient { }) } + pub async fn extend_digest_ttl( + &self, + _metadata: RemoteExecutionMetadata, + _request: ExtendDigestsTtlRequest, + ) -> anyhow::Result { + // TODO(arr) + Err(anyhow::anyhow!("Not implemented (RE extend_digest_ttl)")) + } + pub fn get_execution_client(&self) -> &Self { self } @@ -793,6 +936,16 @@ fn convert_action_result(action_result: ActionResult) -> anyhow::Result anyhow::Result anyhow::Result( instance_name: &InstanceName, request: DownloadRequest, - max_msg_size: usize, + max_total_batch_size: usize, cas_f: impl Fn(BatchReadBlobsRequest) -> Cas, bystream_fut: impl Fn(ReadRequest) -> Byt + Sync + Send + Copy, ) -> anyhow::Result @@ -901,19 +1055,20 @@ where .map(|d| tdigest_to(d.clone())) .filter(|d| d.size_bytes > 0) { - if digest.size_bytes as usize >= max_msg_size { + if digest.size_bytes as usize >= max_total_batch_size { // digest is too big to download in a BatchReadBlobsRequest // need to use the bytstream api continue; } curr_size += digest.size_bytes; - if curr_size >= max_msg_size as i64 { + if curr_size >= max_total_batch_size as i64 { let read_blob_req = BatchReadBlobsRequest { instance_name: instance_name.as_str().to_owned(), digests: std::mem::take(&mut curr_digests), acceptable_compressors: vec![compressor::Value::Identity as i32], }; requests.push(read_blob_req); + curr_size = digest.size_bytes; } curr_digests.push(digest.clone()); } @@ -952,7 +1107,7 @@ where let mut inlined_blobs = vec![]; for digest in inlined_digests { - let data = if digest.size_in_bytes as usize >= max_msg_size { + let data = if digest.size_in_bytes as usize >= max_total_batch_size { let mut accum = vec![]; let mut responses = bystream_fut(digest.clone()).await?; while let Some(resp) = responses.next().await { @@ -993,7 +1148,7 @@ where // If the data is small enough to be transferred in a batch // blob update, write it all at once to the file. Otherwise, it'll // be streamed in chunks as the remote responds. - if req.named_digest.digest.size_in_bytes < max_msg_size as i64 { + if req.named_digest.digest.size_in_bytes < max_total_batch_size as i64 { let data = get(&req.named_digest.digest)?; file.write_all(&data) .await @@ -1020,7 +1175,7 @@ where }) }); - futures::future::try_join_all(writes).await?; + buck2_util::future::try_join_all(writes).await?; Ok(DownloadResponse { inlined_blobs: Some(inlined_blobs), @@ -1031,7 +1186,8 @@ where async fn upload_impl( instance_name: &InstanceName, request: UploadRequest, - max_msg_size: usize, + max_total_batch_size: usize, + max_concurrent_uploads: Option, cas_f: impl Fn(BatchUpdateBlobsRequest) -> Cas + Sync + Send + Copy, bystream_fut: impl Fn(Vec) -> Byt + Sync + Send + Copy, ) -> anyhow::Result @@ -1044,14 +1200,14 @@ where // For small file uploads the client should group them together and call `BatchUpdateBlobs` // https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto#L205 - let mut batched_blob_updates = BatchUploadReqAggregator::new(max_msg_size); + let mut batched_blob_updates = BatchUploadReqAggregator::new(max_total_batch_size); // Create futures for any blobs that need uploading. for blob in request.inlined_blobs_with_digest.unwrap_or_default() { let hash = blob.digest.hash.clone(); let size = blob.digest.size_in_bytes; - if size < max_msg_size as i64 { + if size < max_total_batch_size as i64 { batched_blob_updates.push(BatchUploadRequest::Blob(blob)); continue; } @@ -1068,10 +1224,10 @@ where let fut = async move { // Number of complete (non-partial) messages let mut upload_segments = vec![]; - for (i, chunk) in data.chunks(max_msg_size).enumerate() { + for (i, chunk) in data.chunks(max_total_batch_size).enumerate() { upload_segments.push(WriteRequest { resource_name: resource_name.to_owned(), - write_offset: (i * max_msg_size) as i64, + write_offset: (i * max_total_batch_size) as i64, finish_write: false, data: chunk.to_owned(), }); @@ -1095,7 +1251,7 @@ where let hash = file.digest.hash.clone(); let size = file.digest.size_in_bytes; let name = file.name.clone(); - if size < max_msg_size as i64 { + if size < max_total_batch_size as i64 { batched_blob_updates.push(BatchUploadRequest::File(file)); continue; } @@ -1111,7 +1267,7 @@ where let mut file = tokio::fs::File::open(&name) .await .with_context(|| format!("Opening `{name}` for reading failed"))?; - let mut data = vec![0; max_msg_size]; + let mut data = vec![0; max_total_batch_size]; let mut write_offset = 0; let mut upload_segments = Vec::new(); @@ -1215,42 +1371,96 @@ where upload_futures.push(Box::pin(fut)); } - let upload_stream = - futures::stream::iter(upload_futures).buffer_unordered(CONCURRENT_UPLOAD_LIMIT); - let blob_hashes = upload_stream.try_collect::>>().await?; + let blob_hashes = if let Some(concurrency_limit) = max_concurrent_uploads { + futures::stream::iter(upload_futures) + .buffer_unordered(concurrency_limit) + .try_collect::>>() + .await? + } else { + futures::future::try_join_all(upload_futures).await? + }; tracing::debug!("uploaded: {:?}", blob_hashes); Ok(UploadResponse {}) } -fn with_internal_metadata(t: T, metadata: RemoteExecutionMetadata) -> tonic::Request { - // This is pretty ugly, but the protobuf spec that defines this is internal, so considering - // field numbers need to be stable anyway (= low risk), and this is not used in prod (= low - // impact if this goes wrong), we just inline it here. This is a small hack that lets us use - // our internal RE using this GRPC client for testing. +fn with_re_metadata( + t: T, + metadata: RemoteExecutionMetadata, + use_fbcode_metadata: bool, +) -> tonic::Request { + // This creates a new Tonic request with attached metadata for the RE + // backend. There are two cases here we need to support: // - // This is defined in `fbcode/remote_execution/grpc/metadata.proto`. - #[derive(prost::Message)] - struct Metadata { - #[prost(message, optional, tag = "15")] - platform: Option, - #[prost(string, optional, tag = "18")] - use_case_id: Option, - } + // - Servers that abide by the remote execution apis defined with Bazel, + // AKA the "OSS RE API", which this package implements + // - The internal RE solution used at Meta, which uses a different API, + // but is compatible with the OSS RE API to some extent. + // + // The second case is supported only through attaching some metadata to the + // request, which the fbcode RE service understands; and the reason for all + // of this is that it allows this OSS client package to be tested inside of + // fbcode builds within Meta. So there doesn't need to be a separate CI + // check. + // + // However, we don't need it for FOSS builds of Buck2. And in theory we + // could test the OSS Bazel API in the upstream GitHub CI, but doing it this + // way is only a little ugly, it's hidden, and it helps ensure the internal + // Meta builds catch those issues earlier. let mut msg = tonic::Request::new(t); - // We encode minimal metadata here. This is a bit of a hack to be compatible with internal RE. + if use_fbcode_metadata { + // This is pretty ugly, but the protobuf spec that defines this is + // internal, so considering field numbers need to be stable anyway (= + // low risk), and this is not used in prod (= low impact if this goes + // wrong), we just inline it here. This is a small hack that lets us use + // our internal RE using this GRPC client for testing. + // + // This is defined in `fbcode/remote_execution/grpc/metadata.proto`. + #[derive(prost::Message)] + struct Metadata { + #[prost(message, optional, tag = "15")] + platform: Option, + #[prost(string, optional, tag = "18")] + use_case_id: Option, + } - let mut encoded = Vec::new(); - Metadata { - platform: metadata.platform, - use_case_id: Some(metadata.use_case_id), - } - .encode(&mut encoded) - .expect("Encoding into a Vec cannot not fail"); - msg.metadata_mut() - .insert_bin("re-metadata-bin", MetadataValue::from_bytes(&encoded)); + let mut encoded = Vec::new(); + Metadata { + platform: metadata.platform, + use_case_id: Some(metadata.use_case_id), + } + .encode(&mut encoded) + .expect("Encoding into a Vec cannot not fail"); + + msg.metadata_mut() + .insert_bin("re-metadata-bin", MetadataValue::from_bytes(&encoded)); + } else { + let mut encoded = Vec::new(); + RequestMetadata { + tool_details: Some(ToolDetails { + tool_name: "buck2".to_owned(), + // TODO(#503): Pull the BuckVersion::get_unique_id() from BuckDaemon + tool_version: "0.1.0".to_owned(), + }), + action_id: "".to_owned(), + tool_invocation_id: metadata + .buck_info + .map_or(String::new(), |buck_info| buck_info.build_id), + correlated_invocations_id: "".to_owned(), + action_mnemonic: "".to_owned(), + target_id: "".to_owned(), + configuration_id: "".to_owned(), + } + .encode(&mut encoded) + .expect("Encoding into a Vec cannot not fail"); + + msg.metadata_mut().insert_bin( + "build.bazel.remote.execution.v2.requestmetadata-bin", + MetadataValue::from_bytes(&encoded), + ); + }; msg } @@ -1285,12 +1495,13 @@ fn substitute_env_vars_impl( #[cfg(test)] mod tests { + use core::sync::atomic::Ordering; + use std::sync::atomic::AtomicU16; + use re_grpc_proto::build::bazel::remote::execution::v2::batch_read_blobs_response; use re_grpc_proto::build::bazel::remote::execution::v2::batch_update_blobs_response; use super::*; - use crate::NamedDigest; - use crate::NamedDigestWithPermissions; #[tokio::test] async fn test_download_named() -> anyhow::Result<()> { @@ -1573,6 +1784,87 @@ mod tests { Ok(()) } + #[tokio::test] + async fn test_download_multiple_batches() -> anyhow::Result<()> { + let digest1 = &TDigest { + hash: "aa".to_owned(), + size_in_bytes: 3, + ..Default::default() + }; + + let digest2 = &TDigest { + hash: "bb".to_owned(), + size_in_bytes: 3, + ..Default::default() + }; + + let digest3 = &TDigest { + hash: "cc".to_owned(), + size_in_bytes: 3, + ..Default::default() + }; + + let digest4 = &TDigest { + hash: "dd".to_owned(), + size_in_bytes: 3, + ..Default::default() + }; + + let digest5 = &TDigest { + hash: "dd".to_owned(), + size_in_bytes: 3, + ..Default::default() + }; + + let digest6 = &TDigest { + hash: "dd".to_owned(), + size_in_bytes: 3, + ..Default::default() + }; + + let digests = vec![ + digest1.clone(), + digest2.clone(), + digest3.clone(), + digest4.clone(), + digest5.clone(), + digest6.clone(), + ]; + + let req = DownloadRequest { + inlined_digests: Some(digests.clone()), + ..Default::default() + }; + + let counter = AtomicU16::new(0); + + let res = download_impl( + &InstanceName(None), + req, + 7, + |req| { + counter.fetch_add(1, Ordering::Relaxed); + let res = BatchReadBlobsResponse { + responses: req.digests.map(|d| batch_read_blobs_response::Response { + digest: Some(d.clone()), + data: vec![0, 1, 2], + ..Default::default() + }), + }; + async { Ok(res) } + }, + |_digest| async move { anyhow::Ok(Box::pin(futures::stream::iter(vec![]))) }, + ) + .await?; + + let inlined_blobs = res.inlined_blobs.unwrap(); + + assert_eq!(inlined_blobs.len(), digests.len()); + assert_eq!(counter.load(Ordering::Relaxed), 3); + + Ok(()) + } + #[tokio::test] async fn test_download_large_inlined() -> anyhow::Result<()> { let digest1 = &TDigest { @@ -1780,6 +2072,7 @@ mod tests { &InstanceName(None), req, 10000, + None, |req| { let res = res.clone(); let digest1 = digest1.clone(); @@ -1862,6 +2155,7 @@ mod tests { &InstanceName(None), req, 10, // kept small to simulate a large file upload + None, |req| { let res = res.clone(); let digest1 = digest1.clone(); @@ -1935,6 +2229,7 @@ mod tests { &InstanceName(None), req, 10, // kept small to simulate a large inlined upload + None, |req| { let res = res.clone(); let digest1 = digest1.clone(); @@ -1995,6 +2290,7 @@ mod tests { &InstanceName(None), // TODO req, 10, + None, |_req| async move { panic!("This should not be called as there are no blobs to upload in batch"); }, @@ -2055,6 +2351,7 @@ mod tests { &InstanceName(None), req, 3, + None, |_req| async move { panic!("Not called"); }, @@ -2095,6 +2392,7 @@ mod tests { &InstanceName(None), req, 0, + None, |_req| async move { panic!("Not called"); }, @@ -2140,6 +2438,7 @@ mod tests { &InstanceName(Some("instance".to_owned())), req, 1, + None, |_req| async move { panic!("Not called"); }, diff --git a/remote_execution/oss/re_grpc/src/error.rs b/remote_execution/oss/re_grpc/src/error.rs index e2a13d0ae4fc3..6ef83050f6a6d 100644 --- a/remote_execution/oss/re_grpc/src/error.rs +++ b/remote_execution/oss/re_grpc/src/error.rs @@ -18,13 +18,7 @@ use thiserror::Error; pub struct REClientError { pub message: String, pub code: TCode, -} - -#[derive(Debug, Clone, Default)] -pub struct REError { - pub code: TCode, - pub message: String, - pub error_location: ErrorLocation, + pub group: TCodeReasonGroup, } #[derive(Debug, Clone, Dupe, Default)] @@ -36,13 +30,27 @@ impl Display for ErrorLocation { } } -#[derive(Debug, PartialEq, Eq, Clone, Dupe, Default)] +#[derive(Copy, Debug, PartialEq, Eq, Clone, Dupe, Default)] pub struct TCode(pub i32); impl TCode { pub const OK: Self = TCode(0i32); + pub const CANCELLED: Self = TCode(1i32); + pub const UNKNOWN: Self = TCode(2i32); pub const INVALID_ARGUMENT: Self = TCode(3i32); + pub const DEADLINE_EXCEEDED: Self = TCode(4i32); pub const NOT_FOUND: Self = TCode(5i32); + pub const ALREADY_EXISTS: Self = TCode(6i32); + pub const PERMISSION_DENIED: Self = TCode(7i32); + pub const RESOURCE_EXHAUSTED: Self = TCode(8i32); + pub const FAILED_PRECONDITION: Self = TCode(9i32); + pub const ABORTED: Self = TCode(10i32); + pub const OUT_OF_RANGE: Self = TCode(11i32); + pub const UNIMPLEMENTED: Self = TCode(12i32); + pub const INTERNAL: Self = TCode(13i32); + pub const UNAVAILABLE: Self = TCode(14i32); + pub const DATA_LOSS: Self = TCode(15i32); + pub const UNAUTHENTICATED: Self = TCode(16i32); } impl Display for TCode { @@ -56,3 +64,16 @@ impl Display for TCode { } } } + +#[derive(Copy, Debug, PartialEq, Eq, Clone, Dupe, Default)] +pub struct TCodeReasonGroup(pub i32); + +impl TCodeReasonGroup { + pub const UNKNOWN: Self = TCodeReasonGroup(0i32); +} + +impl Display for TCodeReasonGroup { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UNKNOWN") + } +} diff --git a/remote_execution/oss/re_grpc/src/lib.rs b/remote_execution/oss/re_grpc/src/lib.rs index 26d9bd6bc4c04..5344b555e1c68 100644 --- a/remote_execution/oss/re_grpc/src/lib.rs +++ b/remote_execution/oss/re_grpc/src/lib.rs @@ -7,11 +7,6 @@ * of this source tree. */ -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] - mod client; mod digest; mod error; @@ -20,7 +15,6 @@ mod metadata; mod request; mod response; pub use client::*; -pub use digest::*; pub use error::*; pub use grpc::*; pub use metadata::*; @@ -29,9 +23,5 @@ pub use response::*; pub fn get_network_stats() -> anyhow::Result { // TODO: Support this in this client. - Ok(NetworkStatisticsResponse { - downloaded: 0, - uploaded: 0, - _dot_dot_default: (), - }) + Ok(NetworkStatisticsResponse::default()) } diff --git a/remote_execution/oss/re_grpc/src/metadata.rs b/remote_execution/oss/re_grpc/src/metadata.rs index e2e79be297bca..83f4cb6755c3a 100644 --- a/remote_execution/oss/re_grpc/src/metadata.rs +++ b/remote_execution/oss/re_grpc/src/metadata.rs @@ -27,6 +27,7 @@ pub struct HostResourceRequirements { #[derive(Clone, Default)] pub struct BuckInfo { pub build_id: String, + pub version: String, pub _dot_dot: (), } @@ -34,9 +35,9 @@ pub struct BuckInfo { pub struct RemoteExecutionMetadata { pub action_history_info: Option, pub buck_info: Option, - pub host_resource_requirements: Option, pub platform: Option, pub use_case_id: String, pub do_not_cache: bool, + pub respect_file_symlinks: Option, pub _dot_dot: (), } diff --git a/remote_execution/oss/re_grpc/src/request.rs b/remote_execution/oss/re_grpc/src/request.rs index 267718de6cf1b..eb795177224f2 100644 --- a/remote_execution/oss/re_grpc/src/request.rs +++ b/remote_execution/oss/re_grpc/src/request.rs @@ -8,6 +8,7 @@ */ pub use crate::digest::*; +use crate::grpc::Platform as TPlatform; use crate::response::TActionResult2; #[derive(Default)] @@ -73,17 +74,50 @@ pub struct GetDigestsTtlRequest { pub _dot_dot: (), } +#[derive(Default)] +pub struct ExtendDigestsTtlRequest { + pub digests: Vec, + pub ttl: i64, + pub _dot_dot: (), +} + #[derive(Clone, Default)] pub struct ExecuteRequest { pub action_digest: TDigest, pub skip_cache_lookup: bool, pub execution_policy: Option, + pub host_runtime_requirements: THostRuntimeRequirements, + pub _dot_dot: (), +} + +#[derive(Clone, Default)] +pub struct TDependency { + pub smc_tier: String, + pub id: String, pub _dot_dot: (), } #[derive(Clone, Default)] pub struct TExecutionPolicy { pub priority: i32, + pub affinity_keys: Vec, + pub _dot_dot: (), +} + +#[derive(Clone, Default)] +pub struct THostResourceRequirements { + pub mem_bytes: i64, + pub cpu_units: i64, + pub input_files_bytes: i64, + pub resource_units: i64, + pub _dot_dot: (), +} + +#[derive(Clone, Default)] +pub struct THostRuntimeRequirements { + pub platform: TPlatform, + pub host_resource_requirements: THostResourceRequirements, + pub dependencies: Vec, pub _dot_dot: (), } diff --git a/remote_execution/oss/re_grpc/src/response.rs b/remote_execution/oss/re_grpc/src/response.rs index e715e64b17bc0..7c773c01da514 100644 --- a/remote_execution/oss/re_grpc/src/response.rs +++ b/remote_execution/oss/re_grpc/src/response.rs @@ -98,6 +98,7 @@ pub struct TActionResult2 { pub stderr_digest: Option, pub execution_metadata: TExecutedActionMetadata, pub auxiliary_metadata: Vec, + pub output_symlinks: Vec, // Compatibility with the Thrift structs pub _dot_dot_default: (), } @@ -143,7 +144,7 @@ pub struct DigestWithStatus { pub _dot_dot_default: (), } -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default)] pub struct TStatus { pub code: TCode, pub message: String, @@ -170,7 +171,7 @@ pub struct GetDigestsTtlResponse { #[derive(Clone, Default)] pub struct ExecuteResponse { pub action_result: TActionResult2, - pub error: REError, + pub status: TStatus, pub cached_result: bool, pub action_digest: TDigest, pub action_result_digest: TDigest, @@ -202,6 +203,9 @@ impl Stage { pub const MATERIALIZING_INPUT: Self = Stage(100i32); pub const UPLOADING_OUTPUT: Self = Stage(101i32); pub const KEEP_ALIVE: Self = Stage(102i32); + pub const BEFORE_ACTION: Self = Stage(103i32); + pub const AFTER_ACTION: Self = Stage(104i32); + pub const WORKER_RECEIVED: Self = Stage(105i32); } #[derive(Clone, Default)] @@ -234,10 +238,26 @@ pub struct TFile { pub _dot_dot_default: (), } +#[derive(Clone, Default)] +pub struct TSymlink { + pub name: String, + pub target: String, + // Compatibility with the Thrift structs + pub _dot_dot_default: (), +} + #[derive(Clone, Default)] pub struct NetworkStatisticsResponse { pub uploaded: i64, pub downloaded: i64, + pub download_storage_stats: TStorageStats, + pub upload_storage_stats: TStorageStats, + // Compatibility with the Thrift structs + pub _dot_dot_default: (), +} + +#[derive(Clone, Default)] +pub struct TStorageStats { // Compatibility with the Thrift structs pub _dot_dot_default: (), } diff --git a/remote_execution/oss/re_grpc_proto/BUCK b/remote_execution/oss/re_grpc_proto/BUCK index f05dbf0bd554c..1c05ddc203793 100644 --- a/remote_execution/oss/re_grpc_proto/BUCK +++ b/remote_execution/oss/re_grpc_proto/BUCK @@ -1,5 +1,4 @@ load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -7,10 +6,11 @@ rust_protobuf_library( name = "re_grpc_proto", srcs = glob(["src/**/*.rs"]), build_script = "build.rs", - doctests = False, # FIXME protos = glob(["proto/**/*.proto"]), deps = [ "fbsource//third-party/rust:prost-types", + "fbsource//third-party/rust:serde", "fbsource//third-party/rust:tonic", + "//buck2/app/buck2_data:buck2_data", ], ) diff --git a/remote_execution/oss/re_grpc_proto/Cargo.toml b/remote_execution/oss/re_grpc_proto/Cargo.toml index dc55cea4683a7..7c24abc4f2848 100644 --- a/remote_execution/oss/re_grpc_proto/Cargo.toml +++ b/remote_execution/oss/re_grpc_proto/Cargo.toml @@ -1,13 +1,18 @@ [package] +description = "A library providing gRPC client for Bazel remote execution" +edition = "2021" +license = { workspace = true } name = "re_grpc_proto" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "A library providing gRPC client for Bazel remote execution" [dependencies] -prost-types = { workspace = true } prost = { workspace = true } +prost-types = { workspace = true } +serde = { workspace = true, features = ["derive"] } tonic = { workspace = true } +buck2_data = { workspace = true } + [build-dependencies] buck2_protoc_dev = { workspace = true } diff --git a/remote_execution/oss/re_grpc_proto/build.rs b/remote_execution/oss/re_grpc_proto/build.rs index 480c71f77f2ed..a38088d6567c3 100644 --- a/remote_execution/oss/re_grpc_proto/build.rs +++ b/remote_execution/oss/re_grpc_proto/build.rs @@ -24,5 +24,74 @@ fn main() -> io::Result<()> { buck2_protoc_dev::configure() .setup_protoc() + .type_attribute(".", "#[derive(::serde::Serialize, ::serde::Deserialize)]") + .field_attribute( + "build.bazel.remote.execution.v2.Action.timeout", + "#[serde(with = \"::buck2_data::serialize_duration_as_micros\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.virtual_execution_duration", + "#[serde(with = \"::buck2_data::serialize_duration_as_micros\")]", + ) + .field_attribute( + "google.longrunning.WaitOperationRequest.timeout", + "#[serde(with = \"::buck2_data::serialize_duration_as_micros\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.NodeProperties.mtime", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.queued_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.worker_start_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.worker_completed_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.input_fetch_start_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.input_fetch_completed_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.execution_start_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.execution_completed_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.output_upload_start_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.output_upload_completed_timestamp", + "#[serde(with = \"::buck2_data::serialize_timestamp\")]", + ) + .field_attribute( + "build.bazel.remote.execution.v2.ExecutedActionMetadata.auxiliary_metadata", + "#[serde(with = \"crate::serialize_vec_any\")]", + ) + .field_attribute( + "google.longrunning.Operation.metadata", + "#[serde(with = \"crate::serialize_option_any\")]", + ) + .field_attribute( + "google.longrunning.Operation.result.response", + "#[serde(with = \"crate::serialize_any\")]", + ) + .field_attribute( + "google.rpc.Status.details", + "#[serde(with = \"crate::serialize_vec_any\")]", + ) .compile(proto_files, &["./proto/"]) } diff --git a/remote_execution/oss/re_grpc_proto/src/lib.rs b/remote_execution/oss/re_grpc_proto/src/lib.rs index 547432e617f38..9e80d2f289aae 100644 --- a/remote_execution/oss/re_grpc_proto/src/lib.rs +++ b/remote_execution/oss/re_grpc_proto/src/lib.rs @@ -7,6 +7,7 @@ * of this source tree. */ +#[allow(clippy::doc_lazy_continuation)] pub mod google { pub mod api { tonic::include_proto!("google.api"); @@ -21,6 +22,8 @@ pub mod google { tonic::include_proto!("google.rpc"); } } + +#[allow(clippy::doc_lazy_continuation)] pub mod build { pub mod bazel { pub mod semver { @@ -35,3 +38,88 @@ pub mod build { } } } + +pub mod serialize_vec_any { + use serde::Deserialize; + use serde::Deserializer; + use serde::Serialize; + use serde::Serializer; + + pub fn serialize(value: &[::prost_types::Any], serializer: S) -> Result + where + S: Serializer, + { + let d: Vec<(String, Vec)> = value + .iter() + .map(|v| (v.type_url.clone(), v.value.clone())) + .collect(); + d.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let d: Vec<::prost_types::Any> = Vec::deserialize(deserializer)? + .into_iter() + .map(|(type_url, value)| ::prost_types::Any { type_url, value }) + .collect(); + Ok(d) + } +} + +pub mod serialize_option_any { + use serde::Deserialize; + use serde::Deserializer; + use serde::Serialize; + use serde::Serializer; + + pub fn serialize( + value: &Option<::prost_types::Any>, + serializer: S, + ) -> Result + where + S: Serializer, + { + let d = value + .as_ref() + .map(|v| (v.type_url.clone(), v.value.clone())); + d.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let d = Option::<(String, Vec)>::deserialize(deserializer)? + .map(|(type_url, value)| ::prost_types::Any { type_url, value }); + Ok(d) + } +} + +pub mod serialize_any { + use serde::Deserialize; + use serde::Deserializer; + use serde::Serialize; + use serde::Serializer; + + pub fn serialize(value: &::prost_types::Any, serializer: S) -> Result + where + S: Serializer, + { + let d = (value.type_url.clone(), value.value.clone()); + d.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<::prost_types::Any, D::Error> + where + D: Deserializer<'de>, + { + let d = <(String, Vec)>::deserialize(deserializer)?; + let d = ::prost_types::Any { + type_url: d.0, + value: d.1, + }; + Ok(d) + } +} diff --git a/rust-toolchain b/rust-toolchain index 997f98ceeb396..d3bf011317fa1 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -4,7 +4,11 @@ # * Update the `rustc_version` directive (read by `app/buck2_core/build.rs`). # * Update `HACKING.md` (two instances). # * Update `docs/getting_started.md` (two instances). +# * Update `../common/rust/tools/reindeer/rust-toolchain` (one instance) +# * Update `../common/ocaml/interop/rust-toolchain` (one instance) +# * NOTE: You may have to change this file in a follow up commit as ocamlrep +# has a dependency on buck2 git trunk. -# @rustc_version: rustc 1.73.0-nightly (1065d876c 2023-07-09) -channel = "nightly-2023-07-10" +# @rustc_version: rustc 1.81.0-nightly (506985649 2024-07-20) +channel = "nightly-2024-07-21" components = ["llvm-tools-preview","rustc-dev","rust-src"] diff --git a/shed/README.md b/shed/README.md index 3d4fa4912e0a6..5f80b804c4449 100644 --- a/shed/README.md +++ b/shed/README.md @@ -1,6 +1,8 @@ # Shed Code which is: -* used by Buck -* generic, knows nothing of Buck -* we would rather not have written and would like to get into a different package + +- used by Buck +- generic, knows nothing of Buck +- we would rather not have written and would like to get into a different + package diff --git a/shed/completion_verify/BUCK b/shed/completion_verify/BUCK new file mode 100644 index 0000000000000..e7ba38e5c2f77 --- /dev/null +++ b/shed/completion_verify/BUCK @@ -0,0 +1,23 @@ +load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") + +oncall("build_infra") + +# Small binary to verify shell completion works correctly. Used in e2e tests of buck2. +rust_binary( + name = "completion_verify", + srcs = glob(["src/**/*.rs"]), + resources = select({ + "DEFAULT": {}, + "ovr_config//os:linux": { + "fish": "//buck2/shed/completion_verify/packages:fish", + "zsh": "//buck2/shed/completion_verify/packages:zsh", + }, + }), + deps = [ + "fbsource//third-party/rust:buck-resources", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:ptyprocess", + "fbsource//third-party/rust:tempfile", + "fbsource//third-party/rust:vt100", + ], +) diff --git a/shed/completion_verify/packages/BUCK b/shed/completion_verify/packages/BUCK new file mode 100644 index 0000000000000..d10abf1e2f980 --- /dev/null +++ b/shed/completion_verify/packages/BUCK @@ -0,0 +1,13 @@ +load(":packages.bzl", "download_rpm") + +oncall("build_infra") + +download_rpm( + name = "zsh", + rpm_name = "zsh.x86_64", +) + +download_rpm( + name = "fish", + rpm_name = "fish.x86_64", +) diff --git a/shed/completion_verify/packages/download.sh b/shed/completion_verify/packages/download.sh new file mode 100755 index 0000000000000..7a8f749551922 --- /dev/null +++ b/shed/completion_verify/packages/download.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +set -e + +dnf download "$1" --destdir "$BUCK_SCRATCH_PATH" +rpm=$(echo "$BUCK_SCRATCH_PATH"/*) +mkdir -p "$2" +rpm2archive - < "$rpm" | tar -xvzf - -C "$(realpath "$2")" + +if [[ $1 =~ fish ]]; then + # In order to get fish to behave like it's been installed into a relocatable + # directory, we need to move things out of `usr/` + mv "$2/usr/"* "$2" + rmdir "$2/usr" +fi diff --git a/shed/completion_verify/packages/packages.bzl b/shed/completion_verify/packages/packages.bzl new file mode 100644 index 0000000000000..b2360df50172d --- /dev/null +++ b/shed/completion_verify/packages/packages.bzl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @oss-disable: load("@fbcode_macros//build_defs:platform_utils.bzl", "platform_utils") + +def _impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output(ctx.attrs.name, dir = True) + ctx.actions.run( + cmd_args( + ctx.attrs.download_tool, + ctx.attrs.rpm_name, + out.as_output(), + ), + category = "download_rpm", + local_only = True, + ) + return [DefaultInfo(default_output = out)] + +download_rpm_impl = rule( + impl = _impl, + attrs = { + "download_tool": attrs.source(), + "rpm_name": attrs.string(), + }, +) + +def download_rpm(**kwargs): + prelude = native + + platform_utils = None # @oss-enable + dtp = platform_utils.get_cxx_platform_for_base_path(prelude.package_name()).target_platform if platform_utils else None + + download_rpm_impl( + download_tool = "download.sh", + default_target_platform = dtp, + visibility = ["PUBLIC"], + **kwargs + ) diff --git a/shed/completion_verify/src/bash.rs b/shed/completion_verify/src/bash.rs new file mode 100644 index 0000000000000..754407d75c218 --- /dev/null +++ b/shed/completion_verify/src/bash.rs @@ -0,0 +1,52 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io; +use std::path::Path; + +use crate::extract_from_outputs; +use crate::runtime::BashRuntime; + +pub(crate) fn run_bash( + completion_name: &str, + script: &str, + input: &str, + tempdir: &Path, +) -> io::Result> { + let home = tempdir; + + std::fs::write( + home.join(".bashrc"), + "\ +PS1='% ' +", + )?; + std::fs::write( + home.join(".inputrc"), + "# expected empty file to disable loading ~/.inputrc\n", + )?; + let mut r = BashRuntime::with_home(home.to_owned())?; + r.register(completion_name, script)?; + + let one_tab = r.complete(&format!("{}\t", input))?; + // complete_pty turns on echoing in this case to work around a bash bug, so strip that + let one_tab = one_tab.strip_prefix(input).unwrap_or(&one_tab).trim_start(); + + // Depending on situation, bash might need as many as three tabs to produce all completions + extract_from_outputs( + input, + std::iter::once(Ok(one_tab.to_owned())) + .chain(std::iter::once_with(|| { + r.complete(&format!("{}\t\t", input)) + })) + .chain(std::iter::once_with(|| { + r.complete(&format!("{}\t\t\t", input)) + })), + ) +} diff --git a/shed/completion_verify/src/fish.rs b/shed/completion_verify/src/fish.rs new file mode 100644 index 0000000000000..a5cb4fed9eb45 --- /dev/null +++ b/shed/completion_verify/src/fish.rs @@ -0,0 +1,58 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io; +use std::path::Path; + +use crate::extract_from_outputs; +use crate::runtime::FishRuntime; + +fn reconstruct_with_beginning_omitted(out: String, last_input_word: &str) -> String { + let Some(end) = out.strip_prefix('…') else { + return out; + }; + + for (i, _) in end.char_indices().rev() { + if let Some(omited) = last_input_word.strip_suffix(&end[0..i]) { + return format!("{omited}{end}"); + } + } + // Unreachable because of `i == 0` case + unreachable!() +} + +pub(crate) fn run_fish( + completion_name: &str, + script: &str, + input: &str, + tempdir: &Path, +) -> io::Result> { + let home = tempdir; + + let mut r = FishRuntime::new(home.to_owned())?; + r.register(completion_name, script)?; + + let outs = extract_from_outputs( + input, + std::iter::empty() + .chain(std::iter::once_with(|| r.complete(&format!("{}\t", input)))) + .chain(std::iter::once_with(|| { + r.complete(&format!("{}\t\t", input)) + })), + )?; + + let last_input_word = input + .rsplit_once(|c: char| c.is_ascii_whitespace()) + .map_or(input, |x| x.1); + + Ok(outs + .into_iter() + .map(|out| reconstruct_with_beginning_omitted(out, last_input_word)) + .collect()) +} diff --git a/shed/completion_verify/src/main.rs b/shed/completion_verify/src/main.rs new file mode 100644 index 0000000000000..e93ee365e8ab8 --- /dev/null +++ b/shed/completion_verify/src/main.rs @@ -0,0 +1,276 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io; +use std::process::Command; + +use clap::Parser; + +use crate::bash::run_bash; +use crate::fish::run_fish; +use crate::zsh::run_zsh; + +mod bash; +mod fish; +mod runtime; +mod zsh; + +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +#[clap(rename_all = "kebab-case")] +enum Shell { + Bash, + Fish, + Zsh, +} + +impl Shell { + fn find(self) -> io::Result { + match self { + Self::Bash => Ok(Command::new("bash")), + Self::Fish => { + let mut path = buck_resources::get("buck2/shed/completion_verify/fish").unwrap(); + path.push("bin/fish"); + Ok(Command::new(path)) + } + Self::Zsh => { + if cfg!(target_os = "macos") { + Ok(Command::new("zsh")) + } else { + let mut path = buck_resources::get("buck2/shed/completion_verify/zsh").unwrap(); + path.push("usr/bin/zsh"); + Ok(Command::new(path)) + } + } + } + } +} + +fn extract_from_outputs>( + input: &str, + raw_outs: impl IntoIterator>, +) -> io::Result> { + for raw_out in raw_outs { + if let Some(options) = extract_from_single_output(input, raw_out?.as_ref()) { + return Ok(options); + } + } + Ok(Vec::new()) +} + +/// Accepts an output like `% buck2 targets` or `% buck2\ntargets test` and returns +/// the possible completions +fn extract_from_single_output(input: &str, raw_out: &str) -> Option> { + if let Some((_, rest)) = raw_out.split_once('\n') { + // Multiple lines of output indicates there is more than one option. Just naively splitting + // the output by whitespace is unfortunate wrong in hypothetical cases of completions with + // spaces, but those should be uncommon so this is fine. + Some( + rest.split_ascii_whitespace() + .filter(|s| !s.is_empty()) + .map(str::to_owned) + .collect(), + ) + } else { + let raw_out = raw_out.strip_prefix("% ").unwrap_or(raw_out); + + // No outputed completions + if raw_out == input || raw_out.is_empty() { + return None; + } + + if !raw_out.ends_with(|c: char| c.is_ascii_whitespace()) { + // Output does not end with whitespace. This means that the output is a partial + // completion, and so we'll return `None` to indicate that the completion should be + // retried with an additional tab + return None; + } + + // Find the first changed word and copy everything beginning there + let mut last_equal = 0; + for (i, c) in raw_out.char_indices() { + if c.is_ascii_whitespace() && input.len() > i { + // Include this character in the comparison + let i = i + 1; + if raw_out.as_bytes()[..i] == input.as_bytes()[..i] { + last_equal = i; + } else { + break; + } + } + } + Some(vec![raw_out[last_equal..].trim_end().to_owned()]) + } +} + +fn run( + completion_name: &str, + script: &str, + input: &str, + tempdir: &Option, + shell: Shell, +) -> io::Result> { + let real_tempdir; + let tempdir = match tempdir { + Some(tempdir) => tempdir.as_ref(), + None => { + real_tempdir = tempfile::tempdir()?; + real_tempdir.path() + } + }; + + match shell { + Shell::Bash => run_bash(completion_name, script, input, &tempdir), + Shell::Fish => run_fish(completion_name, script, input, &tempdir), + Shell::Zsh => run_zsh(completion_name, script, input, &tempdir), + } +} + +/// Helper binary used to test CLI completions. +/// +/// Other than the args, it accepts a single line of input containing a partial command invocation +/// to be completed and outputs the possible completions, newline delimited. +/// +/// Completion checking from shells are fundamentally racey - to help guard against this, when the +/// completion script is invoked, the test environment sets `COMPLETION_VERIFY_LOCKFILE` to a path. +/// A backing completion impl can create a file at this path to indicate that it is still executing. +#[derive(Debug, clap::Parser)] +#[clap(name = "completion-verify")] +struct CompletionVerify { + /// The path to a directory to use as a tempdir + /// + /// Must be empty prior to each invocation of this binary + #[clap(long, value_name = "DIR")] + tempdir: Option, + /// The command we complete + #[clap(long, value_name = "COMMAND", default_value = "buck2")] + name: String, + /// The shell to test with + shell: Shell, + /// The path of the completion script to load + script: String, +} + +fn main() -> io::Result<()> { + let args = CompletionVerify::parse(); + + let script = std::fs::read_to_string(&args.script)?; + let input = std::io::read_to_string(io::stdin())?; + + for option in run(&args.name, &script, &input, &args.tempdir, args.shell)? { + println!("{}", option); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::run; + use crate::Shell; + + const BASH_SCRIPT: &str = "complete -W 'car1 cat2' find"; + + // Note: fish requires the command to actually exist + const FISH_SCRIPT: &str = "complete -c find -a 'car1 cat2'"; + + const ZSH_SCRIPT: &str = "\ +#compdef find +_impl() +{ + compadd car1 cat2 +} +compdef _impl find +"; + + fn test_complete(input: &str, expected: &[&'static str]) { + check_shell_available(Shell::Bash); + let actual = run( + "find", + BASH_SCRIPT, + &format!("find {}", input), + &None, + Shell::Bash, + ) + .unwrap(); + assert_eq!(actual, expected, "testing bash"); + + if cfg!(target_os = "linux") { + check_shell_available(Shell::Fish); + let actual = run( + "find", + FISH_SCRIPT, + &format!("find {}", input), + &None, + Shell::Fish, + ) + .unwrap(); + assert_eq!(actual, expected, "testing fish"); + } + + check_shell_available(Shell::Zsh); + let actual = run( + "find", + ZSH_SCRIPT, + &format!("find {}", input), + &None, + Shell::Zsh, + ) + .unwrap(); + assert_eq!(actual, expected, "testing zsh"); + } + + fn check_shell_available(shell: Shell) { + #[allow(clippy::expect_fun_call)] + let output = shell + .find() + .unwrap() + .arg("--version") + .output() + .expect(format!("Failed to run {:?}", shell).as_str()); + assert!( + output.status.success(), + "checking that `{:?}` is available", + shell, + ); + } + + #[test] + fn test_zero() { + test_complete("camp", &[]); + } + + #[test] + fn test_one() { + test_complete("car", &["car1"]); + test_complete("car1", &["car1"]); + } + + #[test] + fn test_two() { + test_complete("ca", &["car1", "cat2"]); + test_complete("c", &["car1", "cat2"]); + } + + #[test] + fn test_long_completion() { + let arg1 = "abcdefghijkl0"; + let arg2 = "abcdefghijkl1"; + let script: &str = &format!("complete -c buck2 -a '{arg1} {arg2}'"); + + if cfg!(target_os = "linux") { + check_shell_available(Shell::Fish); + let actual = run("buck2", script, "buck2 abcdefghijkl", &None, Shell::Fish).unwrap(); + assert_eq!( + actual, + vec![arg1.to_owned(), arg2.to_owned()], + "testing fish" + ); + } + } +} diff --git a/shed/completion_verify/src/runtime/LICENSE-APACHE b/shed/completion_verify/src/runtime/LICENSE-APACHE new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/shed/completion_verify/src/runtime/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/shed/completion_verify/src/runtime/LICENSE-MIT b/shed/completion_verify/src/runtime/LICENSE-MIT new file mode 100644 index 0000000000000..a2d01088b6ce5 --- /dev/null +++ b/shed/completion_verify/src/runtime/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright (c) Individual contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/shed/completion_verify/src/runtime/NOTICE b/shed/completion_verify/src/runtime/NOTICE new file mode 100644 index 0000000000000..bf58f86bb488c --- /dev/null +++ b/shed/completion_verify/src/runtime/NOTICE @@ -0,0 +1 @@ +The code in this directory is adapted from [`complete_pty`](https://crates.io/crates/completest_pty). diff --git a/shed/completion_verify/src/runtime/mod.rs b/shed/completion_verify/src/runtime/mod.rs new file mode 100644 index 0000000000000..384b695996839 --- /dev/null +++ b/shed/completion_verify/src/runtime/mod.rs @@ -0,0 +1,255 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// @lint-ignore-every PATTERNLINT LINTIGNORE + +// The code in this module is adapted from [`complete_pty`](https://crates.io/crates/completest_pty). + +//! Run completions for your program +//! +//! # Example +//! +//! ```rust,no_run +//! # #[cfg(unix)] { +//! # use std::path::Path; +//! # let bin_root = Path::new("").to_owned(); +//! # let completion_script = ""; +//! # let home = std::env::current_dir().unwrap(); +//! let term = completest_pty::Term::new(); +//! +//! let mut runtime = completest_pty::BashRuntime::new(bin_root, home).unwrap(); +//! runtime.register("foo", completion_script).unwrap(); +//! let output = runtime.complete("foo \t\t", &term).unwrap(); +//! # } +//! ``` + +use std::ffi::OsStr; +use std::io::Read as _; +use std::io::Write as _; +use std::path::Path; +use std::path::PathBuf; +use std::process::Command; +use std::time::Duration; + +use ptyprocess::PtyProcess; + +use crate::Shell; + +/// Zsh runtime +#[derive(Debug)] +pub(crate) struct ZshRuntime { + home: PathBuf, +} + +impl ZshRuntime { + /// Reuse an existing runtime's home + pub(crate) fn with_home(home: PathBuf) -> std::io::Result { + Ok(Self { home }) + } + + /// Register a completion script + pub(crate) fn register(&mut self, name: &str, content: &str) -> std::io::Result<()> { + let path = self.home.join(format!("zsh/_{name}")); + std::fs::create_dir_all(path.parent().expect("path created with parent"))?; + std::fs::write(path, content) + } + + /// Get the output from typing `input` into the shell + pub(crate) fn complete(&self, input: &str) -> std::io::Result { + let mut command = Shell::Zsh.find()?; + command.arg("--noglobalrcs"); + command.env("TERM", "xterm").env("ZDOTDIR", &self.home); + let echo = false; + comptest(command, echo, input, &self.home) + } +} + +/// Bash runtime +#[derive(Debug)] +pub(crate) struct BashRuntime { + home: PathBuf, + config: PathBuf, +} + +impl BashRuntime { + /// Reuse an existing runtime's home + pub(crate) fn with_home(home: PathBuf) -> std::io::Result { + let config_path = home.join(".bashrc"); + + Ok(Self { + home, + config: config_path, + }) + } + + /// Register a completion script + pub(crate) fn register(&mut self, _name: &str, content: &str) -> std::io::Result<()> { + let mut file = std::fs::OpenOptions::new() + .append(true) + .open(&self.config)?; + writeln!(&mut file, "{content}")?; + Ok(()) + } + + /// Get the output from typing `input` into the shell + pub(crate) fn complete(&self, input: &str) -> std::io::Result { + let mut command = Shell::Bash.find()?; + let inputrc_path = self.home.join(".inputrc"); + command + .env("TERM", "xterm") + .env("INPUTRC", &inputrc_path) + .env("BASH_SILENCE_DEPRECATION_WARNING", "1") + .args([ + OsStr::new("--noprofile"), + OsStr::new("--rcfile"), + self.config.as_os_str(), + ]); + let echo = !input.contains("\t\t"); + comptest(command, echo, input, &self.home) + } +} + +/// Fish runtime +#[derive(Debug)] +pub(crate) struct FishRuntime { + home: PathBuf, +} + +impl FishRuntime { + /// Initialize a new runtime's home + pub(crate) fn new(home: PathBuf) -> std::io::Result { + std::fs::create_dir_all(&home)?; + + let config_path = home.join("fish/config.fish"); + let config = "\ +set -U fish_greeting \"\" +set -U fish_autosuggestion_enabled 0 +function fish_title +end +function fish_prompt + printf '%% ' +end; +" + .to_owned(); + std::fs::create_dir_all(config_path.parent().expect("path created with parent"))?; + std::fs::write(config_path, config)?; + + Self::with_home(home) + } + + /// Reuse an existing runtime's home + pub(crate) fn with_home(home: PathBuf) -> std::io::Result { + Ok(Self { home }) + } + + /// Register a completion script + pub(crate) fn register(&mut self, name: &str, content: &str) -> std::io::Result<()> { + let path = self.home.join(format!("fish/completions/{name}.fish")); + std::fs::create_dir_all(path.parent().expect("path created with parent"))?; + std::fs::write(path, content) + } + + /// Get the output from typing `input` into the shell + pub(crate) fn complete(&self, input: &str) -> std::io::Result { + let mut command = Shell::Fish.find()?; + command + // fish requires TERM to be set. + .env("TERM", "xterm") + .env("XDG_CONFIG_HOME", &self.home); + let echo = false; + comptest(command, echo, input, &self.home) + } +} + +const TERM_WIDTH: u16 = 120; +const TERM_HEIGHT: u16 = 60; + +fn comptest( + mut command: Command, + echo: bool, + input: &str, + lockfile_dir: &Path, +) -> std::io::Result { + #![allow(clippy::unwrap_used)] // some unwraps need extra investigation + + let lockfile = lockfile_dir.join("completion_verify_lockfile"); + + command.env("COMPLETION_VERIFY_LOCKFILE", &lockfile); + + // spawn a new process, pass it the input was. + // + // This triggers completion loading process which takes some time in shell so we should let it + // run for some time + let mut process = PtyProcess::spawn(command)?; + process.set_window_size(TERM_WIDTH, TERM_HEIGHT)?; + // for some reason bash does not produce anything with echo disabled... + process.set_echo(echo, None)?; + + let mut parser = vt100::Parser::new(TERM_HEIGHT, TERM_WIDTH, 0); + + let mut stream = process.get_raw_handle()?; + // pass the completion input + write!(stream, "{}", input)?; + stream.flush()?; + + let (snd, rcv) = std::sync::mpsc::channel(); + + let shutdown = std::sync::atomic::AtomicBool::new(false); + let shutdown_ref = &shutdown; + std::thread::scope(|scope| { + scope.spawn(move || { + // The lockfile can be created by a completions impl to indicate that it hasn't finished + // yet + let check_lockfile = || lockfile.exists(); + + // First wait for anything to be produced. This is usually the prompt + rcv.recv().unwrap(); + // Then, wait for a potentially extended amount of time for the next data to be + // produced. This will only not happen if there are no completions to output + if rcv.recv_timeout(Duration::from_millis(5000)).is_ok() || check_lockfile() { + // Finally, wait for shorter intervals until new output stops being produced + while rcv.recv_timeout(Duration::from_millis(1000)).is_ok() || check_lockfile() {} + } + + shutdown_ref.store(true, std::sync::atomic::Ordering::SeqCst); + process.exit(false).unwrap(); + }); + + let mut buf = [0; 2048]; + let mut seen_prompt = false; + while let Ok(n) = stream.read(&mut buf) { + if shutdown.load(std::sync::atomic::Ordering::SeqCst) { + // fish clears completions on process teardown + break; + } + let buf = &buf[..n]; + if buf.is_empty() { + break; + } + parser.process(buf); + + // We know that we will see at least one prompt, so we never need to consider exiting + // before that comes through + match seen_prompt { + false => { + if buf.contains(&b'%') { + seen_prompt = true; + _ = snd.send(()); + } + } + true => { + _ = snd.send(()); + } + } + } + }); + + let content = parser.screen().contents(); + Ok(content) +} diff --git a/shed/completion_verify/src/zsh.rs b/shed/completion_verify/src/zsh.rs new file mode 100644 index 0000000000000..bc71e76d84b69 --- /dev/null +++ b/shed/completion_verify/src/zsh.rs @@ -0,0 +1,73 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::io; +use std::path::Path; + +use crate::extract_from_outputs; +use crate::runtime::ZshRuntime; + +pub(crate) fn run_zsh( + completion_name: &str, + script: &str, + input: &str, + tempdir: &Path, +) -> io::Result> { + let home = tempdir; + + // A couple of locals need to be set based on the zsh install dir, if we installed into + // a tempdir as we do on Linux + let extra_fpath; + let extra_mpath; + if cfg!(target_os = "linux") { + let base_path = buck_resources::get("buck2/shed/completion_verify/zsh").unwrap(); + let version_dir = base_path.join("usr/lib64/zsh"); + let version = std::fs::read_dir(version_dir)? + .next() + .unwrap()? + .file_name() + .to_str() + .unwrap() + .to_owned(); + let base_path = base_path.to_str().unwrap(); + extra_fpath = format!("{base_path}/usr/share/zsh/{version}/functions"); + extra_mpath = format!("module_path={base_path}/usr/lib64/zsh/{version}"); + } else { + extra_fpath = String::new(); + extra_mpath = String::new(); + } + + // Copy and paste of `ZshRuntime::new` which works around a zsh bug in which completions are not + // autoloaded completely + let config_path = home.join(".zshenv"); + let config = format!( + "\ +fpath=($ZDOTDIR/zsh {extra_fpath} $fpath) +{extra_mpath} +autoload -U +X compinit && compinit -u # bypass compaudit security checking +precmd_functions=\"\" # avoid the prompt being overwritten +PS1='%% ' +PROMPT='%% ' +_{completion_name} >/dev/null 2>/dev/null ; # Force the completion to be loaded +" + ); + std::fs::write(config_path, config)?; + + let mut r = ZshRuntime::with_home(home.to_owned())?; + r.register(completion_name, script)?; + + extract_from_outputs( + input, + std::iter::empty() + .chain(std::iter::once_with(|| r.complete(&format!("{}\t", input)))) + .chain(std::iter::once_with(|| { + r.complete(&format!("{}\t\t", input)) + })), + ) +} diff --git a/shed/internment_tweaks/BUCK b/shed/internment_tweaks/BUCK deleted file mode 100644 index cacfed555e0e9..0000000000000 --- a/shed/internment_tweaks/BUCK +++ /dev/null @@ -1,16 +0,0 @@ -load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") - -oncall("build_infra") - -rust_library( - name = "internment_tweaks", - srcs = glob(["src/**/*.rs"]), - crate_root = "src/lib.rs", - deps = [ - "fbsource//third-party/rust:equivalent", - "//buck2/allocative/allocative:allocative", - "//buck2/gazebo/dupe:dupe", - "//buck2/shed/lock_free_hashtable:lock_free_hashtable", - ], -) diff --git a/shed/internment_tweaks/Cargo.toml b/shed/internment_tweaks/Cargo.toml deleted file mode 100644 index b7109fd01c714..0000000000000 --- a/shed/internment_tweaks/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "internment_tweaks" -version = "0.1.0" -edition = "2021" -description = "Similar to `internment` crate, but with interface and performance tweaks" - -[dependencies] -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -gazebo = { workspace = true } -dupe = { workspace = true } -allocative = { workspace = true } -lock_free_hashtable = { workspace = true } -equivalent = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] diff --git a/shed/internment_tweaks/src/lib.rs b/shed/internment_tweaks/src/lib.rs deleted file mode 100644 index 051a75f5359bb..0000000000000 --- a/shed/internment_tweaks/src/lib.rs +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Intern objects in memory. -//! -//! This is similar to [`internment` crate](https://github.com/droundy/internment) -//! but with changes for performance and flexibility. - -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] -#![feature(offset_of)] - -use std::cmp::Ordering; -use std::collections::hash_map::DefaultHasher; -use std::fmt; -use std::fmt::Display; -use std::fmt::Formatter; -use std::hash::Hash; -use std::hash::Hasher; -use std::marker; -use std::mem; -use std::ops::Deref; -use std::ptr; - -use allocative::Allocative; -use allocative::Visitor; -use dupe::Dupe; -use equivalent::Equivalent; -use lock_free_hashtable::sharded::ShardedLockFreeRawTable; - -pub struct StaticInterner { - table: ShardedLockFreeRawTable>, 64>, - _marker: marker::PhantomData, -} - -/// This structure is similar to `Hashed`, but it is not parameterized by hash function. -#[derive(Debug)] -struct InternedData { - data: T, - hash: u64, -} - -/// An interned pointer. -/// -/// Equality of this type is a pointer comparison. -/// But note, this works correctly only if `Intern` pointers created -/// from the same instance of `StaticInterner`. -#[derive(Debug)] -pub struct Intern { - pointer: &'static InternedData, -} - -// TODO(nga): derive. -impl Allocative for Intern { - fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { - let mut visitor = visitor.enter_self_sized::(); - if mem::size_of::() > 0 { - let visitor = visitor.enter_shared( - allocative::Key::new("pointer"), - mem::size_of::<*const T>(), - &**self as &T as *const T as *const (), - ); - if let Some(mut visitor) = visitor { - (**self).visit(&mut visitor); - visitor.exit(); - } - } - } -} - -impl Copy for Intern {} - -impl Clone for Intern { - #[inline] - fn clone(&self) -> Self { - *self - } -} - -impl Dupe for Intern { - #[inline] - fn dupe(&self) -> Self { - *self - } -} - -impl Deref for Intern { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - &self.pointer.data - } -} - -impl Intern { - #[inline] - pub const fn deref_static(&self) -> &'static T { - &self.pointer.data - } - - /// SAFETY: This may only be called with pointers returned from [`Self::deref_static`] - #[inline] - pub const unsafe fn from_ptr(p: *const T) -> Self { - // SAFETY: `p` is a pointer to the `data` field of an `InternedData` - unsafe { - let p = p - .cast::() - .sub(std::mem::offset_of!(InternedData, data)) - .cast::>(); - Self { pointer: &*p } - } - } -} - -impl Hash for Intern { - fn hash(&self, state: &mut H) { - // We could hash only the pointer, since we only compare the pointers, - // but users may expect hashing to be stable between runs. - self.pointer.hash.hash(state); - } -} - -impl PartialEq for Intern { - #[inline] - fn eq(&self, other: &Self) -> bool { - ptr::eq(self.pointer, other.pointer) - } -} - -impl Eq for Intern {} - -impl PartialOrd for Intern { - fn partial_cmp(&self, other: &Self) -> Option { - self.pointer.data.partial_cmp(&other.pointer.data) - } -} - -impl Ord for Intern { - fn cmp(&self, other: &Self) -> Ordering { - self.pointer.data.cmp(&other.pointer.data) - } -} - -impl Display for Intern { - #[inline] - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - Display::fmt(&self.pointer.data, f) - } -} - -/// Hash the value before acquiring the lock. -struct Hashed { - hash: u64, - value: T, - _marker: marker::PhantomData, -} - -impl Hashed { - /// Compute the hash. - fn hash(value: &T) -> u64 { - let mut hasher = H::default(); - value.hash(&mut hasher); - hasher.finish() - } - - fn new(value: T) -> Self { - let hash = Self::hash(&value); - Hashed { - hash, - value, - _marker: marker::PhantomData, - } - } -} - -impl StaticInterner { - /// Create a new interner for given type. - pub const fn new() -> StaticInterner { - StaticInterner { - table: ShardedLockFreeRawTable::new(), - _marker: marker::PhantomData, - } - } -} - -impl StaticInterner { - /// Allocate a value, or return previously allocated one. - pub fn intern(&'static self, value: Q) -> Intern - where - Q: Hash + Equivalent + Into, - T: Eq + Hash, - { - let hashed = Hashed::<_, H>::new(value); - if let Some(pointer) = self - .table - .lookup(hashed.hash, |t| hashed.value.equivalent(&t.data)) - { - return Intern { pointer }; - } - - self.intern_slow(hashed) - } - - #[cold] - fn intern_slow(&'static self, hashed_value: Hashed) -> Intern - where - Q: Hash + Equivalent + Into, - T: Eq + Hash, - { - let pointer = Box::new(InternedData { - data: hashed_value.value.into(), - hash: hashed_value.hash, - }); - let pointer = self - .table - .insert( - hashed_value.hash, - pointer, - |a, b| a.hash == b.hash && a.data == b.data, - |t| t.hash, - ) - .0; - Intern { pointer } - } - - /// Get a value if it has been interned. - pub fn get(&'static self, key: Q) -> Option> - where - Q: Hash + Equivalent, - T: Eq + Hash, - { - let hashed = Hashed::<_, H>::new(key); - self.table - .lookup(hashed.hash, |t| hashed.value.equivalent(&t.data)) - .map(|pointer| Intern { pointer }) - } - - /// Iterate over the interned values. - #[inline] - pub fn iter(&'static self) -> Iter { - Iter { - iter: self.table.iter(), - _marker: marker::PhantomData, - } - } -} - -pub struct Iter { - iter: lock_free_hashtable::sharded::Iter<'static, Box>, 64>, - _marker: marker::PhantomData, -} - -impl Iterator for Iter { - type Item = Intern; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|pointer| Intern { pointer }) - } -} - -#[cfg(test)] -mod tests { - use std::collections::BTreeSet; - - use equivalent::Equivalent; - - use crate::Intern; - use crate::StaticInterner; - - static STRING_INTERNER: StaticInterner = StaticInterner::new(); - - #[derive(Hash, Eq, PartialEq)] - struct StrRef<'a>(&'a str); - - #[test] - fn test_intern() { - assert_eq!( - STRING_INTERNER.intern("hello".to_owned()), - STRING_INTERNER.intern("hello".to_owned()) - ); - assert_eq!( - STRING_INTERNER.intern("hello".to_owned()), - STRING_INTERNER.intern(StrRef("hello")), - ); - assert_ne!( - STRING_INTERNER.intern("hello".to_owned()), - STRING_INTERNER.intern("world".to_owned()) - ); - } - - // Make sure things work with reallocation. - #[test] - fn test_resize() { - let mut interned_strings = Vec::new(); - for i in 0..100000 { - let s = i.to_string(); - let interned = STRING_INTERNER.intern(s.clone()); - assert_eq!(&s, &*interned); - interned_strings.push(interned); - } - - for s in &interned_strings { - let interned = STRING_INTERNER.intern(String::clone(s)); - assert_eq!(*s, interned); - } - } - - impl Equivalent for StrRef<'_> { - fn equivalent(&self, key: &String) -> bool { - self.0 == key - } - } - - impl From> for String { - fn from(value: StrRef<'_>) -> Self { - value.0.to_owned() - } - } - - static TEST_GET_INTERNER: StaticInterner = StaticInterner::new(); - #[test] - fn test_get() { - let interner = &TEST_GET_INTERNER; - assert_eq!(interner.get(StrRef("hello")), None); - assert_eq!(interner.get("hello".to_owned()), None); - - let interned = interner.intern("hello".to_owned()); - assert_eq!(interner.get(StrRef("hello")), Some(interned)); - assert_eq!(interner.get("hello".to_owned()), Some(interned)); - assert_eq!(interner.get(StrRef("world")), None); - } - - static TEST_ITER_INTERNER: StaticInterner<&'static str> = StaticInterner::new(); - #[test] - fn test_iter() { - let interner = &TEST_ITER_INTERNER; - assert_eq!( - interner - .iter() - .map(|v| *v) - .collect::>(), - BTreeSet::from([]) - ); - interner.intern("hello"); - interner.intern("cat"); - interner.intern("world"); - - assert_eq!( - interner - .iter() - .map(|v| *v) - .collect::>(), - BTreeSet::from(["hello", "cat", "world"]) - ); - } - - static TEST_POINTER_INTERNER: StaticInterner<&'static str> = StaticInterner::new(); - #[test] - fn test_pointer_roundtrip() { - let one = TEST_POINTER_INTERNER.intern("one"); - let one_p = one.deref_static() as *const _; - assert_eq!(one, unsafe { Intern::from_ptr(one_p) }); - } -} diff --git a/shed/lock_free_hashtable/BUCK b/shed/lock_free_hashtable/BUCK index 51c9e95f8e9e4..9f965847a12cd 100644 --- a/shed/lock_free_hashtable/BUCK +++ b/shed/lock_free_hashtable/BUCK @@ -1,6 +1,5 @@ load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/shed/lock_free_hashtable/Cargo.toml b/shed/lock_free_hashtable/Cargo.toml index daa2141790468..913549c468b7c 100644 --- a/shed/lock_free_hashtable/Cargo.toml +++ b/shed/lock_free_hashtable/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Lock-free (almost) insertion only hashtable" +edition = "2021" +license = { workspace = true } name = "lock_free_hashtable" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Lock-free (almost) insertion only hashtable" [dependencies] allocative = { workspace = true } diff --git a/shed/lock_free_hashtable/examples/stress.rs b/shed/lock_free_hashtable/examples/stress.rs index 7cd533a7dfc85..6df6423970716 100644 --- a/shed/lock_free_hashtable/examples/stress.rs +++ b/shed/lock_free_hashtable/examples/stress.rs @@ -51,6 +51,7 @@ fn hash(key: u32) -> u64 { hasher.finish() } +#[allow(clippy::trivially_copy_pass_by_ref)] fn hash_fn(key: &u32) -> u64 { hash(*key) } diff --git a/shed/lock_free_hashtable/src/sharded.rs b/shed/lock_free_hashtable/src/sharded.rs index 26d97aa47b590..5e35cd26ec31d 100644 --- a/shed/lock_free_hashtable/src/sharded.rs +++ b/shed/lock_free_hashtable/src/sharded.rs @@ -22,6 +22,12 @@ pub struct ShardedLockFreeRawTable { shards: [LockFreeRawTable; SHARDS], } +impl Default for ShardedLockFreeRawTable { + fn default() -> Self { + ShardedLockFreeRawTable::new() + } +} + impl ShardedLockFreeRawTable { const _ASSERTIONS: () = assert!(SHARDS.is_power_of_two()); diff --git a/shed/lock_free_vec/BUCK b/shed/lock_free_vec/BUCK index 0febc8e2c9a83..25f3e6873ead3 100644 --- a/shed/lock_free_vec/BUCK +++ b/shed/lock_free_vec/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/shed/lock_free_vec/Cargo.toml b/shed/lock_free_vec/Cargo.toml index ff941c412d91f..9a5e8b57597fb 100644 --- a/shed/lock_free_vec/Cargo.toml +++ b/shed/lock_free_vec/Cargo.toml @@ -1,8 +1,10 @@ [package] +description = "Data structure" +edition = "2021" +license = { workspace = true } name = "lock_free_vec" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Data structure" [dependencies] allocative = { workspace = true } diff --git a/shed/lock_free_vec/src/lib.rs b/shed/lock_free_vec/src/lib.rs index 1ac30fa013c02..9293405853a33 100644 --- a/shed/lock_free_vec/src/lib.rs +++ b/shed/lock_free_vec/src/lib.rs @@ -11,7 +11,6 @@ #![deny(missing_docs)] -use std::array; use std::cell::UnsafeCell; use std::cmp; use std::mem; @@ -134,13 +133,11 @@ impl LockFreeVec { }; /// Empty. - // This can be `const fn` when something is stabilized as const, for example: - // * `MaybeUninit::zeroed` - // * `[const {expr}; 10 ]` #[inline] - pub fn new() -> LockFreeVec { + pub const fn new() -> LockFreeVec { LockFreeVec { - buckets: array::from_fn(|_| UnsafeCell::new(ptr::null_mut())), + // SAFETY: we want zeros. + buckets: unsafe { MaybeUninit::zeroed().assume_init() }, size: AtomicUsize::new(0), } } diff --git a/shed/more_futures/BUCK b/shed/more_futures/BUCK deleted file mode 100644 index 4285027aa3145..0000000000000 --- a/shed/more_futures/BUCK +++ /dev/null @@ -1,30 +0,0 @@ -load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") - -oncall("build_infra") - -rust_library( - name = "more_futures", - srcs = glob( - ["src/**/*.rs"], - ), - test_deps = [ - "fbsource//third-party/rust:assert_matches", - ], - deps = [ - "fbsource//third-party/rust:futures", - "fbsource//third-party/rust:once_cell", - "fbsource//third-party/rust:parking_lot", - "fbsource//third-party/rust:pin-project", - "fbsource//third-party/rust:slab", - "fbsource//third-party/rust:take_mut", - "fbsource//third-party/rust:thiserror", - "fbsource//third-party/rust:tokio", - "fbsource//third-party/rust:tracing", - "//buck2/allocative/allocative:allocative", - "//buck2/app/buck2_data:buck2_data", - "//buck2/app/buck2_events:buck2_events", - "//buck2/app/buck2_wrapper_common:buck2_wrapper_common", - "//buck2/gazebo/dupe:dupe", - ], -) diff --git a/shed/more_futures/Cargo.toml b/shed/more_futures/Cargo.toml deleted file mode 100644 index 5472f8af7baf9..0000000000000 --- a/shed/more_futures/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "more_futures" -version = "0.1.0" -edition = "2021" - -[dependencies] -futures = "0.3" -gazebo = { workspace = true } -dupe = { workspace = true } -gazebo_lint.version = "0.1" -gazebo_lint.optional = true -# @oss-disable: gazebo_lint.path = "../../gazebo_lint/gazebo_lint" -pin-project = "0.4" -tokio = { version = "1.5", features = ["full"]} -tracing = "0.1.22" -buck2_events = { workspace = true } -buck2_data = { workspace = true } -buck2_wrapper_common = { workspace = true } -allocative = { workspace = true } -once_cell = { workspace = true } -parking_lot = { workspace = true } -slab = "0.4.7" -take_mut = { workspace = true } -thiserror = { workspace = true } - -[features] -# @oss-disable: default = ["gazebo_lint"] - -[dev-dependencies] -assert_matches = { workspace = true } diff --git a/shed/more_futures/src/cancellation/future.rs b/shed/more_futures/src/cancellation/future.rs deleted file mode 100644 index 0ad52850c1fb8..0000000000000 --- a/shed/more_futures/src/cancellation/future.rs +++ /dev/null @@ -1,1262 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! A future that can be canceled via an explicit `CancellationHandle`. -//! This future is intended to be spawned on tokio-runtime directly, and for its results to be -//! accessed via the joinhandle. -//! It is not intended to be polled directly. -//! - -use std::future::Future; -use std::mem; -use std::pin::Pin; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::AtomicU8; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; -use std::task::Waker; - -use dupe::Clone_; -use dupe::Dupe; -use dupe::Dupe_; -use futures::future::BoxFuture; -use futures::task::AtomicWaker; -use parking_lot::Mutex; -use pin_project::pin_project; -use slab::Slab; - -use crate::cancellation::ExplicitCancellationContext; -use crate::maybe_future::MaybeFuture; -use crate::owning_future::OwningFuture; - -pub(crate) fn make_cancellable_future( - f: F, -) -> (ExplicitlyCancellableFuture, CancellationHandle) -where - F: for<'a> FnOnce(&'a ExplicitCancellationContext) -> BoxFuture<'a, T> + Send, -{ - let context = ExecutionContext::new(); - - let fut = { - let context = context.dupe(); - let cancel = ExplicitCancellationContext { inner: context }; - - OwningFuture::new(cancel, |d| f(d)) - }; - - let state = SharedState::new(); - - let fut = ExplicitlyCancellableFuture::new(fut, state.dupe(), context); - let handle = CancellationHandle::new(state); - - (fut, handle) -} - -/// Defines a future that operates with the 'CancellationContext' to provide explicit cancellation. -/// -/// NOTE: this future is intended only to be polled in a consistent tokio runtime, and never moved -/// from one executor to another. -/// The general safe way of using this future is to spawn it directly via `tokio::spawn`. -#[pin_project] -pub struct ExplicitlyCancellableFuture { - #[pin] - fut: MaybeFuture>, -} - -struct ExplicitlyCancellableFutureInner { - shared: SharedState, - - execution: ExecutionContext, - - /// NOTE: this is duplicative of the `SharedState`, but unlike that state this is not behind a - /// lock. This avoids us needing to grab the lock to check if we're Pending every time we poll. - started: bool, - - future: Pin>>, -} - -impl ExplicitlyCancellableFuture { - fn new( - future: Pin>>, - shared: SharedState, - execution: ExecutionContext, - ) -> Self { - ExplicitlyCancellableFuture { - fut: MaybeFuture::Fut(ExplicitlyCancellableFutureInner { - shared, - execution, - started: false, - future, - }), - } - } -} - -impl Future for ExplicitlyCancellableFuture { - type Output = Option; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - match this.fut.as_mut().poll(cx) { - Poll::Ready(res) => { - this.fut.take(); - Poll::Ready(res) - } - Poll::Pending => Poll::Pending, - } - } -} - -impl ExplicitlyCancellableFutureInner { - fn poll_inner(self: &mut Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let is_cancelled = self.shared.inner.cancelled.load(Ordering::SeqCst); - - if is_cancelled { - let mut execution = self.execution.shared.lock(); - if execution.can_exit() { - return Poll::Ready(None); - } - execution.notify_cancelled(); - } - - let res = Pin::new(&mut self.future).poll(cx).map(Some); - - // If we were using structured cancellation but just exited the critical section, then we - // should exit now. - if is_cancelled && self.execution.shared.lock().can_exit() { - return Poll::Ready(None); - } - - res - } -} - -impl Future for ExplicitlyCancellableFutureInner { - type Output = Option; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Update the state before we check for cancellation so that the cancellation logic can - // observe whether this future has entered `poll` or not. This lets cancellation set the - // termination observer correctly so that the state is picked up. - // Once we start, the `poll_inner` will check whether we are actually canceled and return - // the proper poll value. - if !self.started { - // we only update the Waker once at the beginning of the poll. For the same tokio - // runtime, this is always safe and behaves correctly, as such, this future is - // restricted to be ran on the same tokio executor and never moved from one runtime to - // another - take_mut::take( - &mut *self.shared.inner.state.lock(), - |future| match future { - State::Pending => State::Polled { - waker: cx.waker().clone(), - }, - other => other, - }, - ); - - self.started = true; - } - - let poll = self.poll_inner(cx); - - // When we exit, release our waker to ensure we don't keep create a reference cycle for - // this task. - if poll.is_ready() { - let inner = self.shared.inner.dupe(); - let mut locked_state = inner.state.lock(); - let state = mem::replace(&mut *locked_state, State::Exited); - - match state { - State::Cancelled => { - if self.execution.shared.lock().can_exit() { - return Poll::Ready(None); - } - } - _ => {} - } - } else if self.execution.shared.lock().should_exit() { - // the future itself indicated that we should cancel - - return Poll::Ready(None); - } - - poll - } -} - -pub struct CancellationHandle { - shared_state: SharedState, -} - -impl CancellationHandle { - fn new(shared_state: SharedState) -> Self { - CancellationHandle { shared_state } - } - - /// Attempts to cancel the future this handle is associated with as soon as possible, returning - /// a future that completes when the future is canceled. - pub fn cancel(self) { - // Store to the boolean first before we write to state. - // This is because on `poll`, the future will update the state first then check the boolean. - // This ordering ensures that either the `poll` has read our cancellation, and hence will - // later notify the termination observer via the channel we store in `State::Cancelled`, - // or that we will observe the terminated state of the future and directly notify the - // `TerminationObserver` ourselves. - self.shared_state - .inner - .cancelled - .store(true, Ordering::SeqCst); - - match &mut *self.shared_state.inner.state.lock() { - State::Cancelled { .. } => { - unreachable!("We consume the CancellationHandle on cancel, so this isn't possible") - } - State::Exited => { - // Nothing to do, that future is done. - } - state @ State::Pending => { - // we wait for the future to `poll` once even if it has yet to do so. - // Since we always should be spawning the `ExplicitlyCancellableFuture` on tokio, - // it should be polled once. - let _old = std::mem::replace(state, State::Cancelled); - } - state @ State::Polled { .. } => { - let old = std::mem::replace(state, State::Cancelled); - match old { - State::Polled { waker } => waker.wake(), - _ => { - unreachable!() - } - } - } - }; - } -} - -#[derive(Clone_, Dupe_)] -struct SharedState { - inner: Arc, -} - -impl SharedState { - fn new() -> Self { - Self { - inner: Arc::new(SharedStateData { - state: Mutex::new(State::Pending), - cancelled: AtomicBool::new(false), - }), - } - } -} - -struct SharedStateData { - state: Mutex, - - /// When set, this future has been cancelled and should attempt to exit as soon as possible. - cancelled: AtomicBool, -} - -enum State { - /// This future has been constructed, but not polled yet. - Pending, - - /// This future has been polled. A waker is available. - Polled { waker: Waker }, - - /// This future has already been cancelled. - Cancelled, - - /// This future has already finished executing. - Exited, -} - -/// Context relating to execution of the `poll` of the future. This will contain the information -/// required for the `CancellationContext` that the future holds to enter critical sections and -/// structured cancellations. -#[derive(Clone, Dupe)] -pub(crate) struct ExecutionContext { - shared: Arc>, -} - -impl ExecutionContext { - fn new() -> Self { - Self { - shared: Arc::new(Mutex::new(ExecutionContextData { - cancellation_notification: { - CancellationNotificationData { - inner: Arc::new(CancellationNotificationDataInner { - notified: Default::default(), - wakers: Mutex::new(Some(Default::default())), - }), - } - }, - prevent_cancellation: 0, - should_exit: false, - })), - } - } - - pub fn testing() -> Self { - Self::new() - } - - pub(crate) fn enter_structured_cancellation( - &self, - ) -> (CancellationNotificationData, CriticalSectionGuard) { - let mut shared = self.shared.lock(); - - let notification = shared.enter_structured_cancellation(); - - (notification, CriticalSectionGuard::new(&self.shared)) - } -} - -pub(crate) struct CriticalSectionGuard<'a> { - shared: Option<&'a Mutex>, -} - -impl<'a> CriticalSectionGuard<'a> { - fn new(shared: &'a Mutex) -> Self { - Self { - shared: Some(shared), - } - } - - pub(crate) fn exit_prevent_cancellation(mut self) -> bool { - self.shared - .take() - .expect("should be set") - .lock() - .exit_prevent_cancellation() - } - - pub(crate) fn try_to_disable_cancellation(mut self) -> bool { - let mut shared = self.shared.take().expect("should be set").lock(); - if shared.try_to_disable_cancellation() { - true - } else { - // couldn't prevent cancellation, so release our hold onto the counter - shared.exit_prevent_cancellation(); - false - } - } -} - -impl<'a> Drop for CriticalSectionGuard<'a> { - fn drop(&mut self) { - if let Some(shared) = self.shared.take() { - // never actually exited during normal poll, but dropping this means we'll never poll - // again, so just release the `prevent_cancellation` - - shared.lock().exit_prevent_cancellation(); - } - } -} - -struct ExecutionContextData { - cancellation_notification: CancellationNotificationData, - - /// How many observers are preventing immediate cancellation. - prevent_cancellation: usize, - - should_exit: bool, -} - -impl ExecutionContextData { - /// Does this future not currently prevent its cancellation? - fn can_exit(&self) -> bool { - self.prevent_cancellation == 0 - } - - fn should_exit(&self) -> bool { - self.should_exit - } - - fn enter_structured_cancellation(&mut self) -> CancellationNotificationData { - self.prevent_cancellation += 1; - - self.cancellation_notification.dupe() - } - - fn notify_cancelled(&mut self) { - let updated = self.cancellation_notification.inner.notified.fetch_update( - Ordering::SeqCst, - Ordering::SeqCst, - |old| match CancellationNotificationStatus::from(old) { - CancellationNotificationStatus::Pending => { - Some(CancellationNotificationStatus::Notified.into()) - } - CancellationNotificationStatus::Notified => None, - CancellationNotificationStatus::Disabled => None, - }, - ); - if updated.is_ok() { - if let Some(mut wakers) = self.cancellation_notification.inner.wakers.lock().take() { - wakers.drain().for_each(|waker| waker.wake()); - } - } - } - - fn exit_prevent_cancellation(&mut self) -> bool { - self.prevent_cancellation -= 1; - - self.prevent_cancellation == 0 - } - - fn try_to_disable_cancellation(&mut self) -> bool { - let maybe_updated = self.cancellation_notification.inner.notified.fetch_update( - Ordering::SeqCst, - Ordering::SeqCst, - |old| match CancellationNotificationStatus::from(old) { - CancellationNotificationStatus::Pending => { - Some(CancellationNotificationStatus::Disabled.into()) - } - CancellationNotificationStatus::Notified => None, - CancellationNotificationStatus::Disabled => None, - }, - ); - - match maybe_updated { - Ok(_) => true, - Err(old) => { - let old = CancellationNotificationStatus::from(old); - matches!(old, CancellationNotificationStatus::Disabled) - } - } - } -} - -enum CancellationNotificationStatus { - /// no notifications yet. maps to '0' - Pending, - /// notified, maps to '1' - Notified, - /// disabled notifications, maps to '2' - Disabled, -} - -impl From for CancellationNotificationStatus { - fn from(value: u8) -> Self { - match value { - 0 => CancellationNotificationStatus::Pending, - 1 => CancellationNotificationStatus::Notified, - 2 => CancellationNotificationStatus::Disabled, - _ => panic!("invalid status"), - } - } -} - -impl From for u8 { - fn from(value: CancellationNotificationStatus) -> Self { - match value { - CancellationNotificationStatus::Pending => 0, - CancellationNotificationStatus::Notified => 1, - CancellationNotificationStatus::Disabled => 2, - } - } -} - -#[derive(Clone, Dupe)] -pub(crate) struct CancellationNotificationData { - inner: Arc, -} - -struct CancellationNotificationDataInner { - /// notification status per enum 'CancellationNotificationStatus' - notified: AtomicU8, - wakers: Mutex>>>, -} - -pub(crate) struct CancellationNotificationFuture { - data: CancellationNotificationData, - // index into the waker for this future held by the Slab in 'CancellationNotificationData' - id: Option, - // duplicate of the waker held for us to update the waker on poll without acquiring lock - waker: Arc, -} - -impl CancellationNotificationFuture { - pub(crate) fn new(data: CancellationNotificationData) -> Self { - let waker = Arc::new(AtomicWaker::new()); - let id = data - .inner - .wakers - .lock() - .as_mut() - .map(|wakers| wakers.insert(waker.dupe())); - CancellationNotificationFuture { data, id, waker } - } - - fn remove_waker(&mut self, id: Option) { - if let Some(id) = id { - self.data - .inner - .wakers - .lock() - .as_mut() - .map(|wakers| wakers.remove(id)); - } - } -} - -impl Clone for CancellationNotificationFuture { - fn clone(&self) -> Self { - CancellationNotificationFuture::new(self.data.dupe()) - } -} - -impl Dupe for CancellationNotificationFuture {} - -impl Drop for CancellationNotificationFuture { - fn drop(&mut self) { - self.remove_waker(self.id); - } -} - -impl Future for CancellationNotificationFuture { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match CancellationNotificationStatus::from(self.data.inner.notified.load(Ordering::SeqCst)) - { - CancellationNotificationStatus::Notified => { - // take the id so that we don't need to lock the wakers when this future is dropped - // after completion - let id = self.id.take(); - self.remove_waker(id); - Poll::Ready(()) - } - _ => { - self.waker.register(cx.waker()); - Poll::Pending - } - } - } -} - -#[cfg(test)] -mod tests { - use std::future::Future; - use std::pin::Pin; - use std::sync::atomic::AtomicBool; - use std::sync::atomic::Ordering; - use std::sync::Arc; - use std::task::Context; - use std::task::Poll; - use std::time::Duration; - - use assert_matches::assert_matches; - use dupe::Dupe; - use futures::FutureExt; - use parking_lot::Mutex; - use pin_project::pin_project; - use pin_project::pinned_drop; - - use crate::cancellation::future::make_cancellable_future; - use crate::cancellation::future::CancellationHandle; - - struct MaybePanicOnDrop { - panic: bool, - } - - impl Drop for MaybePanicOnDrop { - fn drop(&mut self) { - if self.panic { - panic!() - } - } - } - - #[tokio::test] - async fn test_ready() { - let (fut, _handle) = make_cancellable_future(|_| futures::future::ready(()).boxed()); - futures::pin_mut!(fut); - assert_matches!(futures::poll!(fut), Poll::Ready(Some(()))); - } - - #[tokio::test] - async fn test_cancel() { - let (fut, handle) = make_cancellable_future(|_| futures::future::pending::<()>().boxed()); - - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_cancel_never_polled() { - let (fut, handle) = make_cancellable_future(|_| futures::future::pending::<()>().boxed()); - - futures::pin_mut!(fut); - - handle.cancel(); - - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_cancel_already_finished() { - let (fut, handle) = make_cancellable_future(|_| futures::future::ready::<()>(()).boxed()); - - futures::pin_mut!(fut); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); - - handle.cancel(); - // this is okay - } - - #[tokio::test] - async fn test_wakeup() { - let (fut, handle) = make_cancellable_future(|_| futures::future::pending::<()>().boxed()); - - let task = tokio::task::spawn(fut); - futures::pin_mut!(task); - - assert_matches!( - tokio::time::timeout(Duration::from_millis(100), &mut task).await, - Err(..) - ); - - handle.cancel(); - - assert_matches!( - tokio::time::timeout(Duration::from_millis(100), &mut task).await, - Ok(Ok(None)) - ); - } - - #[tokio::test] - async fn test_is_dropped() { - let dropped = Arc::new(Mutex::new(false)); - - struct SetOnDrop { - dropped: Arc>, - } - - impl Drop for SetOnDrop { - fn drop(&mut self) { - *self.dropped.lock() = true; - } - } - - impl Future for SetOnDrop { - type Output = (); - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - Poll::Ready(()) - } - } - - let (fut, _handle) = make_cancellable_future({ - let dropped = dropped.dupe(); - |_| SetOnDrop { dropped }.boxed() - }); - - let task = tokio::task::spawn(fut); - - task.await.unwrap(); - assert!(*dropped.lock()); - } - - #[tokio::test] - async fn test_critical_section() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - { - cancellations.critical_section(tokio::task::yield_now).await; - } - futures::future::pending::<()>().await - } - .boxed() - }); - futures::pin_mut!(fut); - - // We reach the first yield. At this point there is one guard held by the critical section. - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - // Cancel, then poll again. Cancellation is checked, *then* the guard in the future - // is dropped and then immediately check for cancellation and yield. - handle.cancel(); - - // Poll again, this time we don't enter the future's poll because it is cancelled. - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_critical_section_noop_drop_is_allowed() { - let (fut, _handle) = make_cancellable_future(|cancellations| { - async { - let section = cancellations.critical_section(futures::future::pending::<()>); - drop(section); // Drop it within an ExecutionContext - } - .boxed() - }); - - fut.await; - } - - #[tokio::test] - async fn test_nested_critical_section() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - { - cancellations - .critical_section(|| async move { tokio::task::yield_now().await }) - .await; - } - futures::future::pending::<()>().await - } - .boxed() - }); - futures::pin_mut!(fut); - - // We reach the first yield. - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - let res = fut.await; - - assert_eq!(res, None); - } - - #[tokio::test] - async fn test_critical_section_cancelled_during_poll() { - let handle_slot = Arc::new(Mutex::new(None::)); - - let (fut, handle) = make_cancellable_future({ - let handle_slot = handle_slot.dupe(); - - move |cancellations| { - async move { - { - handle_slot - .lock() - .take() - .expect("Expected the guard to be here by now") - .cancel(); - - cancellations - .critical_section(|| async { - let mut panic = MaybePanicOnDrop { panic: true }; - tokio::task::yield_now().await; - panic.panic = false; - }) - .await; - } - futures::future::pending::<()>().await - } - .boxed() - } - }); - futures::pin_mut!(fut); - - *handle_slot.lock() = Some(handle); - - // Run the future. It'll drop the guard (and cancel itself) after entering the critical - // section while it's being polled, but it'll proceed to the end. - fut.await; - } - - // Cases to test: - // - Basic - // - Reentrant - // - Cancel when exiting critical section (with no further wakeups) - - #[tokio::test] - async fn test_structured_cancellation_notifies() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - cancellations - .with_structured_cancellation(|observer| observer) - .await; - } - .boxed() - }); - futures::pin_mut!(fut); - - // Proceed all the way to awaiting the observer - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - // Drop our guard. At this point we'll cancel, and notify the observer. - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); - } - - #[tokio::test] - async fn test_structured_cancellation_is_blocking() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - cancellations - .with_structured_cancellation(|_observer| async move { - let mut panic = MaybePanicOnDrop { panic: true }; - tokio::task::yield_now().await; - panic.panic = false; - }) - .await; - } - .boxed() - }); - futures::pin_mut!(fut); - - // Proceed all the way to the first pending. - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - // Drop our guard. We should resume and disarm the guard. - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); - } - - #[tokio::test] - async fn test_structured_cancellation_cancels_on_exit() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - cancellations - .with_structured_cancellation(|observer| observer) - .await; - futures::future::pending::<()>().await - } - .boxed() - }); - - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - // This is a bit of an implementation detail. - #[tokio::test] - async fn test_structured_cancellation_returns_to_executor() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - cancellations - .with_structured_cancellation(|observer| observer) - .await - } - .boxed() - }); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_structured_cancellation_is_reentrant() { - let (fut, handle) = make_cancellable_future(|cancellations| { - { - async move { - cancellations - .with_structured_cancellation(|o1| async move { - cancellations - .with_structured_cancellation(|o2| async move { - o2.await; - o1.await; - }) - .await; - }) - .await; - } - .boxed() - } - }); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); - } - - #[tokio::test] - async fn test_structured_cancellation_with_critical_section() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async move { - cancellations - .critical_section(|| async move { - cancellations - .with_structured_cancellation(|observer| async move { - let mut panic = MaybePanicOnDrop { panic: true }; - tokio::task::yield_now().await; - panic.panic = false; - - // we should get the cancel notification - observer.await; - }) - .await; - }) - .await - } - .boxed() - }); - futures::pin_mut!(fut); - - // Proceed all the way to the first pending. - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - // Drop our guard. We should resume and disarm the guard. - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_structured_cancellation_can_be_reentered() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - cancellations - .with_structured_cancellation(|_o1| async move {}) - .await; - cancellations - .with_structured_cancellation(|o2| async move { - o2.await; - }) - .await; - } - .boxed() - }); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); - } - - #[tokio::test] - async fn test_structured_cancellation_works_after_cancel() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async move { - cancellations - .with_structured_cancellation(|_o1| async move { - tokio::task::yield_now().await; - // At this point we'll get cancelled. - cancellations - .with_structured_cancellation(|o2| async move { - o2.await; - }) - .await; - }) - .await; - } - .boxed() - }); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_disable_cancellation() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async move { - assert!(cancellations.try_to_keep_going_on_cancellation().is_some()); - tokio::task::yield_now().await; - } - .boxed() - }); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); - } - - #[tokio::test] - async fn test_disable_cancellation_already_canceled() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async move { - assert!(cancellations.try_to_keep_going_on_cancellation().is_none()); - tokio::task::yield_now().await; - panic!("already canceled") - } - .boxed() - }); - futures::pin_mut!(fut); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_disable_cancellation_synced_with_structured_cancellation_already_cancelled() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async move { - cancellations - .with_structured_cancellation(|obs| async move { - tokio::task::yield_now().await; - futures::pin_mut!(obs); - assert_matches!(futures::poll!(&mut obs), Poll::Ready(())); - - assert!(cancellations.try_to_keep_going_on_cancellation().is_none()); - }) - .await; - } - .boxed() - }); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_disable_cancellation_synced_with_structured_cancellation_not_cancelled() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async move { - assert!(cancellations.try_to_keep_going_on_cancellation().is_some()); - - tokio::task::yield_now().await; - - cancellations - .with_structured_cancellation(|obs| async move { - futures::pin_mut!(obs); - assert_matches!(futures::poll!(&mut obs), Poll::Pending); - - assert!(cancellations.try_to_keep_going_on_cancellation().is_some()); - }) - .await; - } - .boxed() - }); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - - assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); - } - - #[tokio::test] - async fn test_finished_future_dropped_when_ready() { - #[pin_project(PinnedDrop)] - struct DropFuture(Arc); - - impl Future for DropFuture { - type Output = (); - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - Poll::Ready(()) - } - } - - #[pinned_drop] - impl PinnedDrop for DropFuture { - fn drop(self: Pin<&mut Self>) { - self.0.store(true, Ordering::SeqCst); - } - } - - let is_dropped = Arc::new(AtomicBool::new(false)); - let fut = DropFuture(is_dropped.dupe()); - - let (fut, _handle) = make_cancellable_future(|_cancellations| fut.boxed()); - futures::pin_mut!(fut); - - assert_matches!(futures::poll!(&mut fut), Poll::Ready(Some(()))); - - assert!(is_dropped.load(Ordering::SeqCst)); - } - - #[tokio::test] - async fn test_finished_future_dropped_when_cancelled() { - struct DropFuture(Arc); - - impl Future for DropFuture { - type Output = (); - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - Poll::Pending - } - } - - impl Drop for DropFuture { - fn drop(&mut self) { - self.0.store(true, Ordering::SeqCst); - } - } - - let is_dropped = Arc::new(AtomicBool::new(false)); - let fut = DropFuture(is_dropped.dupe()); - - let (fut, handle) = make_cancellable_future(|_cancellations| fut.boxed()); - - futures::pin_mut!(fut); - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - assert!(is_dropped.load(Ordering::SeqCst)); - } - - #[tokio::test] - async fn test_lambda_is_ran_without_poll() { - let mut panic = MaybePanicOnDrop { panic: true }; - tokio::task::yield_now().await; - panic.panic = false; - - let (fut, handle) = make_cancellable_future(move |_cancellations| { - panic.panic = false; - - async move { - panic!("polled"); - } - .boxed() - }); - futures::pin_mut!(fut); - - // cancel before any polls - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_critical_section_via_prevent_cancellation() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - { - let prevent_cancellation = cancellations.begin_ignore_cancellation(); - tokio::task::yield_now().await; - - prevent_cancellation.allow_cancellations_again().await; - } - futures::future::pending::<()>().await - } - .boxed() - }); - futures::pin_mut!(fut); - - // We reach the first yield. At this point there is one guard held by the critical section. - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - // Cancel, then poll again. Cancellation is checked, *then* the guard in the future - // is dropped and then immediately check for cancellation and yield. - handle.cancel(); - - // Poll again, this time we don't enter the future's poll because it is cancelled. - assert_matches!(futures::poll!(&mut fut), Poll::Ready(None)); - } - - #[tokio::test] - async fn test_prevent_cancellation_drop_is_allowed() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - let prevent_cancellation = cancellations.begin_ignore_cancellation(); - drop(prevent_cancellation); - - futures::future::pending::<()>().await - } - .boxed() - }); - - futures::pin_mut!(fut); - // We reach the first yield. - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - - fut.await; - } - - #[tokio::test] - async fn test_prevent_cancellation_is_reentrant() { - let mut panic = MaybePanicOnDrop { panic: true }; - tokio::task::yield_now().await; - panic.panic = false; - - let (fut, handle) = make_cancellable_future(|cancellations| { - async move { - { - let prevent1 = cancellations.begin_ignore_cancellation(); - let prevent2 = cancellations.begin_ignore_cancellation(); - - tokio::task::yield_now().await; - - prevent1.allow_cancellations_again().await; - - panic.panic = false; - - prevent2.allow_cancellations_again().await; - } - futures::future::pending::<()>().await - } - .boxed() - }); - futures::pin_mut!(fut); - - // We reach the first yield. - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - handle.cancel(); - let res = fut.await; - - assert_eq!(res, None); - } - - #[tokio::test] - async fn test_prevent_cancellation_cancellation_observer_notifies() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - let prevent_cancellation = cancellations.begin_ignore_cancellation(); - prevent_cancellation.cancellation_observer().await; - } - .boxed() - }); - futures::pin_mut!(fut); - - // Proceed all the way to awaiting the observer - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - // Drop our guard. At this point we'll cancel, and notify the observer. - handle.cancel(); - assert_matches!(futures::poll!(&mut fut), Poll::Ready(..)); - } - - #[tokio::test] - async fn test_cancellation_observer_wakes_up_other_tasks() { - let (fut, handle) = make_cancellable_future(|cancellations| { - async { - let prevent_cancellation = cancellations.begin_ignore_cancellation(); - let observer = prevent_cancellation.cancellation_observer(); - - let _ignore = tokio::spawn(observer).await; - } - .boxed() - }); - futures::pin_mut!(fut); - - // Proceed all the way to awaiting the observer - assert_matches!(futures::poll!(&mut fut), Poll::Pending); - - // Drop our guard. At this point we'll cancel, and notify the observer. - handle.cancel(); - - fut.await; - } -} diff --git a/shed/more_futures/src/lib.rs b/shed/more_futures/src/lib.rs deleted file mode 100644 index 94e774c6977d2..0000000000000 --- a/shed/more_futures/src/lib.rs +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -#![feature(assert_matches)] -#![feature(pin_deref_mut)] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] - -pub mod cancellable_future; -pub mod cancellation; -pub mod drop; -pub mod instrumented_shared; -mod maybe_future; -pub mod owning_future; -pub mod spawn; -pub mod spawner; diff --git a/shed/provider/BUCK b/shed/provider/BUCK index 2e3a3099ef310..1793e29d24344 100644 --- a/shed/provider/BUCK +++ b/shed/provider/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/shed/provider/Cargo.toml b/shed/provider/Cargo.toml index 1519b870c131d..7f7e1a3e155c5 100644 --- a/shed/provider/Cargo.toml +++ b/shed/provider/Cargo.toml @@ -1,5 +1,7 @@ [package] +description = "std::any::Provider replacement" +edition = "2021" +license = { workspace = true } name = "provider" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "std::any::Provider replacement" diff --git a/shed/scribe_client/BUCK b/shed/scribe_client/BUCK new file mode 100644 index 0000000000000..8969a9e3e0a84 --- /dev/null +++ b/shed/scribe_client/BUCK @@ -0,0 +1,36 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +# @oss-disable: _is_oss = False +_is_oss = True # @oss-enable + +# buildifier: disable=no-effect +rust_library( + name = "scribe_client", + srcs = glob(["src/**/*.rs"]), + test_deps = [ + "fbsource//third-party/rust:assert_matches", + "//common/rust/shed/fbinit:fbinit-tokio", + "//scribe/api/producer/thrift:producer_service-rust-mocks", + ], + visibility = [ + "//buck2/...", + ], + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:crossbeam", + "fbsource//third-party/rust:thiserror", + "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tokio-retry", + "fbsource//third-party/rust:tracing", + "//common/rust/shed/fbinit:fbinit", + "//common/rust/thrift/bareclient:thriftclient", + "//scribe/api/producer/thrift:producer_service-rust", + "//scribe/api/producer/thrift:producer_service-rust-clients", + "//scribe/api/producer/thrift:producer_service-rust-thriftclients", + "//scribe/api/producer/thrift:use_case-rust", + "//scribe/api/thrift:message_metadata-rust", + "//thrift/lib/rust:fbthrift", + ], +) if not _is_oss else None diff --git a/shed/scribe_client/README.md b/shed/scribe_client/README.md new file mode 100644 index 0000000000000..700926850aac2 --- /dev/null +++ b/shed/scribe_client/README.md @@ -0,0 +1,43 @@ +# Buck2 Scribe Client + +This folder houses Buck2's Scribe client, which Buck2 uses to send information +that powers all of our internal tooling around Buck2. Despite this client +serving the needs of Buck2, there is no Buck2-specific logic contained within +this library. + +See +[this post](https://fb.workplace.com/groups/buck2prototyping/posts/2829650903999058) +for justification of why this library exists and why it is here. This library is +intended to be an implementation detail of Buck2; please do not depend directly +on this library without speaking to us first. + +Buck2 writes to Scribe by interfacing directly with the Thrift service running +on port 1456 on all Meta-owned machines. In prod, the service listening on port +`1456` is +[`scribed`](https://www.internalfb.com/intern/wiki/Documentation/Scribe/), our +production Scribe daemon. In corp, or in non-Linux prod, the service listening +on on this port is +[`scribbled`](https://www.internalfb.com/intern/wiki/Scribe/users/Knowledge_Base/Interacting_with_Scribe_categories/Write_from_Alternative_Environments/Scribble/). +Both services are expected to behave the same, as far as this client is +cooncerned, so this client concerns itself with using the +[ProducerService Thrift API](https://www.internalfb.com/intern/wiki/Scribe/users/Knowledge_Base/Interacting_with_Scribe_categories/producer/producer-service-thrift-api/) +to send messages to Scribe. + +Why don't we use the already-existing +[Rust wrapper around the ProducerService Thrift API](https://www.internalfb.com/intern/wiki/Scribe/users/Knowledge_Base/Interacting_with_Scribe_categories/producer/producer-service-thrift-api/#producerservice-thrift-c)? +Unfortunately, this library does not provide a few key features that we need in +Buck2: + +1. On Linux, this library + [defaults to using ServiceRouter to construct a client](https://fburl.com/code/15fy5dyk), + which is not acceptable for Buck2 (which often runs in environments where + ServiceRouter cannot function). +2. `ScribeProducer` presents an asynchronous API for pushing messages, which is + not acceptable for Buck2. +3. Buck2 needs functionality that exists in the C++ Scribe client - + specifically, intelligent retries and message buffering. The Rust + ProducerService client does not provide any of these things, and we would + need to implement them on top of the library anyway. + +While this library cannot build in OSS, the code is still available for people +to inspect. diff --git a/shed/scribe_client/src/lib.rs b/shed/scribe_client/src/lib.rs new file mode 100644 index 0000000000000..3be2e390c862c --- /dev/null +++ b/shed/scribe_client/src/lib.rs @@ -0,0 +1,112 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(once_cell_try)] + +mod producer; + +use std::sync::Arc; +use std::sync::OnceLock; +use std::time::Duration; + +use fbinit::FacebookInit; +pub use producer::Message; +use tokio::runtime::Builder; + +use crate::producer::ProducerCounters; +use crate::producer::ScribeProducer; + +static PRODUCER: OnceLock> = OnceLock::new(); + +/// Initializes the Scribe producer that backs all Scribe clients. Returns an error if a connection can't be +/// established to a remote Scribe daemon process. +fn initialize( + fb: FacebookInit, + buffer_size: usize, + retry_backoff: Duration, + retry_attempts: usize, + message_batch_size: Option, +) -> anyhow::Result<&'static ScribeProducer> { + Ok(&**PRODUCER.get_or_try_init(|| -> anyhow::Result<_> { + let producer = Arc::new(ScribeProducer::new( + fb, + buffer_size, + retry_backoff, + retry_attempts, + message_batch_size, + )?); + + // Instead of relying on any existing runtimes, we bootstrap a new tokio runtime bound to a single thread that + // we spawn here. Running on the same runtime as the rest of the program runs the risk of the producer loop not + // getting polled in a timely fashion, which leads directly to the message queue filling up and messages getting + // dropped. + // + // We need a separate runtime for now because loading and analysis do large amounts of blocking work on Tokio + // runtime threads. + std::thread::Builder::new() + .name("scribe-producer".to_owned()) + .spawn({ + let producer = producer.clone(); + move || { + let runtime = Builder::new_current_thread().enable_all().build().unwrap(); + runtime.block_on(producer_loop(&producer)); + } + })?; + + Ok(producer) + })?) +} + +/// Task that drives the producer to regularly drain its queue. +async fn producer_loop(producer: &ScribeProducer) { + const SLEEP_INTERVAL: Duration = Duration::from_millis(500); + + loop { + producer.run_once().await; + tokio::time::sleep(SLEEP_INTERVAL).await; + } +} + +/// A Scribe client that sends messages to Scribe via `offer`. +pub struct ScribeClient { + scribe_producer: &'static ScribeProducer, +} + +impl ScribeClient { + pub fn new( + fb: FacebookInit, + buffer_size: usize, + retry_backoff: Duration, + retry_attempts: usize, + message_batch_size: Option, + ) -> anyhow::Result { + let scribe_producer = initialize( + fb, + buffer_size, + retry_backoff, + retry_attempts, + message_batch_size, + )?; + Ok(ScribeClient { scribe_producer }) + } + + pub fn export_counters(&self) -> ProducerCounters { + self.scribe_producer.export_counters() + } + + /// Offers a single message to Scribe. Does not block. + pub fn offer(&self, message: Message) { + self.scribe_producer.offer(message); + } + + /// Sends all messages in `messages` now (bypass internal message queue.) + pub async fn send_messages_now(&self, messages: Vec) { + self.scribe_producer.send_messages_now(messages).await + } +} diff --git a/shed/scribe_client/src/producer.rs b/shed/scribe_client/src/producer.rs new file mode 100644 index 0000000000000..92d025b26b42b --- /dev/null +++ b/shed/scribe_client/src/producer.rs @@ -0,0 +1,1179 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! A producer of Scribe messages, which connects to a remote Scribe daemon and sends messages to it. +//! +//! There is expected to be at most one Scribe producer in any given process. The Scribe producer synchronously +//! receives messages from the process, which it places in a queue. As messages queue up, it periodically retires the +//! queue by flushing to the remote Scribe producer service. + +use std::convert::TryInto; +use std::net::IpAddr; +use std::net::Ipv6Addr; +use std::net::SocketAddr; +use std::sync::atomic; +use std::sync::atomic::AtomicU64; +use std::sync::Mutex; +use std::time::Duration; + +use crossbeam::queue::ArrayQueue; +use fbinit::FacebookInit; +use fbthrift::NonthrowingFunctionError; +use scribe_message_metadata::MessageMetadata; +use scribe_producer_service::consts::DEFAULT_PRODUCER_SERVICE_PORT; +use scribe_producer_service::WriteMessage; +use scribe_producer_service::WriteMessageResultCode; +use scribe_producer_service::WriteMessagesRequest; +use scribe_producer_service_clients::ProducerServiceClient; +use scribe_producer_service_thriftclients::build_ProducerService_client; +use scribe_use_cases::UseCase; +use thriftclient::ThriftChannelBuilder; +use thriftclient::TransportType; +use tokio_retry::strategy::FibonacciBackoff; + +/// A message, destined to be delivered to Scribe. +#[derive(Clone, Debug, Default)] +pub struct Message { + /// The category to publish this Scribe message to. + pub category: String, + + /// The message payload itself. + pub message: Vec, + + /// A message key to provide to Scribe. The message key is a 64-bit integer that Scribe will use when sharding a + /// category. To Scribe, a message key is the atomic unit of partitioning for a stream. All messages with a given + /// message key are guaranteed by Scribe to be processed by the same consumer shard. + /// + /// The number itself is arbitrary; for example, Buck2 hashes the trace ID and uses that as the message key. + pub message_key: Option, +} + +impl From for WriteMessage { + fn from(message: Message) -> WriteMessage { + let metadata = MessageMetadata { + messageKey: message.message_key, + ..Default::default() + }; + + WriteMessage { + category: message.category, + message: message.message, + metadata, + // TODO(swgillespie) add a buck2 use case? + useCase: UseCase::DEFAULT_RUST, + ..Default::default() + } + } +} + +struct MessageSendState { + message: Message, + error: Option, +} + +/// These counters are a snapshot of the current state of this Scribe client. +#[derive(Debug, Default, Clone)] +pub struct ProducerCounters { + // Successful submissions to scribe. + pub successes: u64, + // How many messages failed to be submitted to scribe by error type. + pub failures_invalid_request: u64, + pub failures_unauthorized: u64, + pub failures_rate_limited: u64, + pub failures_pushed_back: u64, + pub failures_enqueue_failed: u64, + pub failures_internal_error: u64, + pub failures_timed_out: u64, + pub failures_unknown: u64, + // Depth of the queue, e.g. how many messages need to be processed. + pub queue_depth: u64, + // How many messages were dropped before we even enqueued them (e.g. because the internal buffer is full). + pub dropped: u64, + /// How many bytes were written into this sink. + pub bytes_written: u64, +} + +impl ProducerCounters { + pub fn failures(&self) -> u64 { + let ProducerCounters { + successes: _, + failures_invalid_request, + failures_unauthorized, + failures_rate_limited, + failures_pushed_back, + failures_enqueue_failed, + failures_internal_error, + failures_timed_out, + failures_unknown, + queue_depth: _, + dropped: _, + bytes_written: _, + } = self; + *failures_invalid_request + + *failures_unauthorized + + *failures_rate_limited + + *failures_pushed_back + + *failures_enqueue_failed + + *failures_internal_error + + *failures_timed_out + + *failures_unknown + } +} + +#[derive(Debug, Default)] +struct ProducerCountersData { + successes: AtomicU64, + failures_invalid_request: AtomicU64, + failures_unauthorized: AtomicU64, + failures_rate_limited: AtomicU64, + failures_pushed_back: AtomicU64, + failures_enqueue_failed: AtomicU64, + failures_internal_error: AtomicU64, + failures_timed_out: AtomicU64, + failures_unknown: AtomicU64, + dropped: AtomicU64, + bytes_written: AtomicU64, +} + +// This congestion control has 2 states (phases) each of which changes its behavior depending on whether +// congestion has occurred. The concept of the algorithm is based on TCP Reno which also consists of the +// two states. Our implementation is a very-simplified version but we try not to diverge from the original +// concept on purpose. +#[derive(Debug)] +enum CongestionControlPhase { + /// Every message batch begins with EarlyFail phase where, if it fails to queue, + /// the batch is cut by half until the length that is successfully queued is found. + EarlyFail, + /// In this phase, the client tries recovering back the batch size linearly so it can send + /// as many messages as possible while keeping from causing congestion. If it fails to push + /// the batch to the queue, it cuts the size down to `(current_cutoff + cliff_bottom) / 2` + /// where `cliff_bottom` is the successful cutoff we memorize when the last cutoff happened. + FastRecovery { step: usize }, +} + +#[derive(Debug)] +struct CongestionControlState { + phase: CongestionControlPhase, + current_cutoff: usize, + + // If we draw a line chart with the send attempt count on x-axis and cutoff size on y-axis, the line will + // be going upward linearly while the traffic isn't congested, and it looks like a cliff when a congestion + // happens in this algorithm. We need to store the top and bottom value of the cliff since they'll be used + // in computing the next cutoff when a congestion happens again. + // https://fburl.com/px/le1398zy and https://fburl.com/gsheet/de3aa4ux are the illustrations that help + // visually understand it using the example of the value changes in `normal_cutoff_computations` test. + cliff_top: usize, + cliff_bottom: usize, +} + +/// The number of steps for a message batch to get recovered to the original length. +/// The smaller it is, the sooner the recovery will be, but the less chance it has to +/// find a proper and stable batch length. +const FAST_RECOVERY_STEPS: usize = 10; + +impl CongestionControlState { + fn new(initial_cutoff: usize) -> Self { + Self { + phase: CongestionControlPhase::EarlyFail, + current_cutoff: initial_cutoff, + cliff_top: 0, + cliff_bottom: 0, + } + } + + /// Update cutoff based on the current conditions + fn update_cutoff(&mut self, congested: bool) { + match self.phase { + CongestionControlPhase::EarlyFail => { + if congested { + // The +1 avoids the situations where 1) cutting down to zero and + // 2) making 1-leftover for odd numbers that adds one redundant iteration + let new_cutoff = self.current_cutoff / 2 + 1; + self.cliff_top = self.current_cutoff; + self.cliff_bottom = new_cutoff; + self.current_cutoff = new_cutoff; + } else { + self.phase = CongestionControlPhase::FastRecovery { + step: self.compute_recovery_amount(), + }; + } + } + CongestionControlPhase::FastRecovery { step } => { + if congested { + let mut new_cutoff = (self.current_cutoff + self.cliff_bottom) / 2 + 1; + if new_cutoff == self.current_cutoff || new_cutoff == self.current_cutoff + 1 { + // Congested but reached the bottom of cliff. Exploring down deeper. + self.cliff_bottom /= 2; + new_cutoff = (self.current_cutoff + self.cliff_bottom) / 2 + 1; + } + self.cliff_top = self.current_cutoff; + self.cliff_bottom = new_cutoff; + self.current_cutoff = new_cutoff; + self.phase = CongestionControlPhase::FastRecovery { + step: self.compute_recovery_amount(), + }; + } else { + self.current_cutoff += step; + } + } + } + } + + /// Compute and return the recovery amount of each step + fn compute_recovery_amount(&self) -> usize { + self.cliff_top.saturating_sub(self.current_cutoff) / FAST_RECOVERY_STEPS + 1 + } + + fn load_last_cutoff(&mut self, last_cutoff: usize, init_cliff_top: usize) { + self.current_cutoff = last_cutoff; + self.cliff_top = init_cliff_top; + self.phase = CongestionControlPhase::FastRecovery { + step: self.compute_recovery_amount(), + }; + } +} + +/// A client of the Scribe ProducerService that buffers, retries, and sends messages to a remote Scribe daemon. +pub(crate) struct ScribeProducer { + fb: FacebookInit, + client: tokio::sync::Mutex, + queue: ArrayQueue, + counters: ProducerCountersData, + retry_backoff: Duration, + retry_attempts: usize, + message_batch_size: Option, + last_cutoff: Mutex>, +} + +impl ScribeProducer { + pub(crate) fn new( + fb: FacebookInit, + buffer_size: usize, + retry_backoff: Duration, + retry_attempts: usize, + message_batch_size: Option, + ) -> anyhow::Result { + let client = connect(fb)?; + let queue = ArrayQueue::new(buffer_size); + Ok(ScribeProducer { + fb, + client: tokio::sync::Mutex::new(client), + queue, + counters: ProducerCountersData::default(), + retry_backoff, + retry_attempts, + message_batch_size, + last_cutoff: Mutex::new(None), + }) + } + + /// Offers a message to this Scribe producer. Does not block. + pub(crate) fn offer(&self, message: Message) { + if self.queue.push(message).is_err() { + tracing::debug!("Scribe producer dropping message due to full buffer"); + self.counters + .dropped + .fetch_add(1, atomic::Ordering::Relaxed); + } + } + + /// Scrape counters for reporting to upstream event logging. + pub(crate) fn export_counters(&self) -> ProducerCounters { + ProducerCounters { + successes: self.counters.successes.load(atomic::Ordering::Relaxed), + failures_invalid_request: self + .counters + .failures_invalid_request + .load(atomic::Ordering::Relaxed), + failures_unauthorized: self + .counters + .failures_unauthorized + .load(atomic::Ordering::Relaxed), + failures_rate_limited: self + .counters + .failures_rate_limited + .load(atomic::Ordering::Relaxed), + failures_pushed_back: self + .counters + .failures_pushed_back + .load(atomic::Ordering::Relaxed), + failures_enqueue_failed: self + .counters + .failures_enqueue_failed + .load(atomic::Ordering::Relaxed), + failures_internal_error: self + .counters + .failures_internal_error + .load(atomic::Ordering::Relaxed), + failures_timed_out: self + .counters + .failures_timed_out + .load(atomic::Ordering::Relaxed), + failures_unknown: self + .counters + .failures_unknown + .load(atomic::Ordering::Relaxed), + dropped: self.counters.dropped.load(atomic::Ordering::Relaxed), + bytes_written: self.counters.bytes_written.load(atomic::Ordering::Relaxed), + // So we get an accurate snapshot of the queue depth when scraping + // metrics, do this here and now rather than in the background. + queue_depth: self.queue.len() as u64, + } + } + + /// Sends all messages in `messages` now (bypass internal message queue.) + pub(crate) async fn send_messages_now(&self, messages: Vec) { + self.send_impl(messages).await; + } + + async fn refresh_connection( + &self, + client_: &mut tokio::sync::MutexGuard<'_, ProducerServiceClient>, + ) -> anyhow::Result<()> { + let new_client = connect(self.fb)?; + **client_ = new_client; + Ok(()) + } + + fn make_retry_intervals(&self) -> Vec { + // Why not use tokio_retry::Retry? We don't want to wholesale re-run the + // entire request because we need to mutate the request to filter out + // successful messages. + let retry_backoff_ms: u64 = self.retry_backoff.as_millis().try_into().unwrap_or(0); + std::iter::once(Duration::from_millis(0)) + .chain(FibonacciBackoff::from_millis(retry_backoff_ms)) + .take(self.retry_attempts) + .collect() + } + + async fn send_impl(&self, messages: Vec) { + if messages.is_empty() { + return; + } + + let mut messages: Vec = messages + .into_iter() + .map(|message| MessageSendState { + message, + error: None, + }) + .collect(); + + let retry_intervals: Vec = self.make_retry_intervals(); + + let mut cc_state = CongestionControlState::new(messages.len()); + let original_len = messages.len(); + let mut retry_count = 0; + + let mut cutoff_len; + let mut success_count; + let mut retryable_error_count; + + if let Some(last_cutoff) = *self.last_cutoff.lock().unwrap() { + cc_state.load_last_cutoff(last_cutoff, messages.len()); + } + + while retry_count < retry_intervals.len() { + cutoff_len = std::cmp::min(messages.len(), cc_state.current_cutoff); + success_count = 0; + retryable_error_count = 0; + + tracing::debug!( + "retry_count={}, interval={:?}, current_cutoff={}, {} remained in batch", + retry_count, + retry_intervals[retry_count], + cc_state.current_cutoff, + messages.len() + ); + + tokio::time::sleep(retry_intervals[retry_count]).await; + + let req = WriteMessagesRequest { + messages: messages[..cutoff_len] + .iter() + .map(|message| message.message.clone().into()) + .collect(), + ..Default::default() + }; + + // Mutex block for `self.client` + let results = { + let mut client_ = self.client.lock().await; + match client_.WriteMessages(&req).await { + Ok(result) => result.results, + Err(e) => { + tracing::debug!( + "scribe_producer: received fatal error from scribe, will retry: {:#}", + e, + ); + match e { + NonthrowingFunctionError::ThriftError(te) => { + // Error on Thrift transport layer. The channel likely got EOF due to any of + // server hitting connection limit, connection age timeout, server connection + // idle timeout, or server crashes and the endpoint is voided. + if te + .to_string() + .contains("apache::thrift::transport::TTransportException") + { + tracing::debug!( + "The existing connection reached EOF. Reconnecting." + ); + // If an error happens during re-connection, ignore it because the build + // command has already started and we don't want to bail out. + let _ignore = self.refresh_connection(&mut client_).await; + } + } + NonthrowingFunctionError::ApplicationException(_) => { + // Error on Thrift application layer. Rarely happens as long as we use an + // official thrift library. No special action is taken but just retry it. + } + } + retry_count += 1; + continue; + } + } + }; + + let mut write_result_iter = results.iter(); + messages.retain_mut(|message| { + write_result_iter.next().map_or(true, |result| { + match result_code_into_result(result.code) { + Ok(_) => { + self.counters + .successes + .fetch_add(1, atomic::Ordering::Relaxed); + + // Unwrap safety: an individual message cant't be so large its length + // can't be repsentable as 64 bits. + self.counters.bytes_written.fetch_add( + message.message.message.len().try_into().unwrap(), + atomic::Ordering::Relaxed, + ); + success_count += 1; + false + } + Err(e) => { + if e.is_retryable() { + if message.error.is_none() { + message.error = Some(e); + } + retryable_error_count += 1; + true + } else { + e.inc_counter(&self.counters); + false + } + } + } + }) + }); + if messages.is_empty() { + break; + } + + if cutoff_len == retryable_error_count { + // Congested; all messages were pushed back with retryable errors, which means + // Scribed/Scribble couldn't prepare enough buffer to hold the message vector. + cc_state.update_cutoff(true); + retry_count += 1; + } else if cutoff_len == success_count { + // Success; all the messages up to the cutoff were successfully processed. + cc_state.update_cutoff(false); + } else { + // Partial Success; there were some unretryable errors, or partial successes, or both + // not because of congestion. Try again with a longer interval. + retry_count += 1; + } + tracing::debug!( + "Updated cc_state: {:?}; new cutoff: {}", + cc_state, + cc_state.current_cutoff + ); + } + + // Any messages leftover after exiting the loop are ones we failed to send after exhausting retries and + // should be counted as errors. + if !messages.is_empty() { + tracing::debug!("scribe_producer: failed to send all messages"); + } + for message in &messages { + match &message.error { + None => { + self.counters + .failures_unknown + .fetch_add(1, atomic::Ordering::Relaxed); + } + Some(e) => e.inc_counter(&self.counters), + } + } + + if cc_state.current_cutoff < original_len { + *self.last_cutoff.lock().unwrap() = Some(cc_state.current_cutoff); + } else { + *self.last_cutoff.lock().unwrap() = None; + } + } + + pub(crate) async fn run_once(&self) { + if self.queue.is_empty() { + return; + } + + let mut messages: Vec = vec![]; + let count = self.message_batch_size.unwrap_or_else(|| self.queue.len()); + for _ in 0..count { + match self.queue.pop() { + Some(msg) => messages.push(msg), + None => break, + } + } + + self.send_impl(messages).await; + } +} + +/// Connect to Scribe producer service (local Scribe daemon) via Thrift interface. +fn connect(fb: FacebookInit) -> anyhow::Result { + let addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::LOCALHOST), + DEFAULT_PRODUCER_SERVICE_PORT as u16, + ); + build_ProducerService_client( + ThriftChannelBuilder::from_sock_addr(fb, addr)? + .with_conn_timeout(1000) + .with_recv_timeout(1000) + .with_channel_pool(false) + .with_transport_type(TransportType::Header) + // By default, ThriftChannelBuilder will initiate a TLS handshake with the target server. This works fine + // on production machines, where Chef has set up certificates that make this successful; on corp machines, + // this is not the case, and we must turn it off otherwise we will fail to connect to the local Scribe + // daemon. + // + // Disabling TLS on a localhost connection is fine anyway since there's no way this traffic ever leaves the + // machine. + .with_secure(false), + ) +} + +// Errors returned from Scribe's Producer API. Each corresponds to a return code +// from WriteMessages. +// From: https://fburl.com/code/2jrnviz0 +#[derive(Debug, thiserror::Error)] +pub enum WriteMessageError { + // Returned if the message is evaluated to be invalid (e.g., invalid message + // size, unregistered category, invalid application bucket). + #[error("invalid request")] + InvalidRequest, + // Returned if the Thrift client has attempted to write to a category + // which it lacks permissions for. + #[error("unauthorized")] + Unauthorized, + // Returned if the message is dropped for rate limiting reasons + // (e.g., the category is blocklisted or sampled). + // + // Retrying upon this result code may ingest a message successfully due to + // sampling or if the category is removed from the blocklist in the meantime. + // However, if retry is needed all the time, the pushback write enforcement + // is a better choice. + // + // Please see the following links for more information about the blocking, + // sampling and pushback write enforcement types. + // - https://fburl.com/wiki/6hvwsuir + // - https://fburl.com/wiki/4clxpwm6 + // - https://fburl.com/wiki/7b9vr52e + #[error("ratelimited")] + RateLimited, + // Returned if the request is throttled because the category of the message + // has reached its write rate limit. + // + // It is up to the client to retry its request upon this result code after + // some time. + // + // Please see the following links for more information about the pushback + // write enforcement type: + // - https://fburl.com/wiki/6hvwsuir + // - https://fburl.com/wiki/7b9vr52e + #[error("pushed back")] + PushedBack, + // Returned if the message could not be handled by the Scribe service, + // possibly because clients have been sending too many requests in a short + // period of time and the Scribe service is overloaded. + #[error("enqueue failed")] + EnqueueFailed, + // Returned if an error occurred inside the Scribe service. + #[error("internal error")] + InternalError, + // Returned in case of the request timeout. + #[error("timed out")] + TimedOut, + // Any other errors that arise. + #[error("unknown")] + Unknown, +} + +impl WriteMessageError { + pub fn is_retryable(&self) -> bool { + std::matches!( + self, + Self::RateLimited + | Self::PushedBack + | Self::EnqueueFailed + | Self::InternalError + | Self::TimedOut + | Self::Unknown + ) + } + + fn inc_counter(&self, counter: &ProducerCountersData) { + let counter = match self { + WriteMessageError::InvalidRequest => &counter.failures_invalid_request, + WriteMessageError::Unauthorized => &counter.failures_unauthorized, + WriteMessageError::RateLimited => &counter.failures_rate_limited, + WriteMessageError::PushedBack => &counter.failures_pushed_back, + WriteMessageError::EnqueueFailed => &counter.failures_enqueue_failed, + WriteMessageError::InternalError => &counter.failures_internal_error, + WriteMessageError::TimedOut => &counter.failures_timed_out, + WriteMessageError::Unknown => &counter.failures_unknown, + }; + counter.fetch_add(1, atomic::Ordering::Relaxed); + } +} + +fn result_code_into_result(code: WriteMessageResultCode) -> Result<(), WriteMessageError> { + match code { + WriteMessageResultCode::OK => Ok(()), + WriteMessageResultCode::INVALID_REQUEST => Err(WriteMessageError::InvalidRequest), + WriteMessageResultCode::UNAUTHORIZED => Err(WriteMessageError::Unauthorized), + WriteMessageResultCode::RATE_LIMITED => Err(WriteMessageError::RateLimited), + WriteMessageResultCode::PUSHED_BACK => Err(WriteMessageError::PushedBack), + WriteMessageResultCode::ENQUEUE_FAILED => Err(WriteMessageError::EnqueueFailed), + WriteMessageResultCode::INTERNAL_ERROR => Err(WriteMessageError::InternalError), + WriteMessageResultCode::TIMED_OUT => Err(WriteMessageError::TimedOut), + _ => Err(WriteMessageError::Unknown), + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use assert_matches::assert_matches; + use fbthrift::application_exception::ApplicationException; + use scribe_producer_service::WriteMessageResult as ThriftWriteMessageResult; + use scribe_producer_service::WriteMessagesResponse; + use scribe_producer_service_clients::errors::WriteMessagesError; + use scribe_producer_service_clients::ProducerService; + + use super::*; + + #[allow(non_snake_case)] + fn make_ScribeProducer( + fb: FacebookInit, + client: ProducerServiceClient, + queue_size: usize, + ) -> ScribeProducer { + ScribeProducer { + fb, + client: tokio::sync::Mutex::new(client), + queue: ArrayQueue::new(queue_size), + counters: ProducerCountersData::default(), + retry_backoff: Duration::from_millis(0), + retry_attempts: 5, + message_batch_size: None, + last_cutoff: Mutex::new(None), + } + } + + #[fbinit::test] + async fn success_smoke_test(fb: FacebookInit) { + let client = Arc::new(scribe_producer_service_mocks::new::()); + client.WriteMessages.mock(|req| { + assert_eq!(req.messages.len(), 1); + let msg = &req.messages[0]; + assert_eq!(msg.category, "buck2_events"); + assert_eq!(msg.message, b"hello, world!".to_vec()); + assert_eq!(msg.metadata.messageKey, Some(42)); + WriteMessagesResponse { + results: vec![ThriftWriteMessageResult { + code: WriteMessageResultCode::OK, + ..Default::default() + }], + ..Default::default() + } + }); + + let producer = make_ScribeProducer(fb, client, 5); + + let message = Message { + category: "buck2_events".to_owned(), + message: b"hello, world!".to_vec(), + message_key: Some(42), + }; + + producer.offer(message); + producer.run_once().await; + let counters = producer.export_counters(); + println!("counters: {:?}", counters); + assert_eq!(counters.successes, 1); + assert_eq!(counters.failures(), 0); + } + + // Make a mock ProducerService that returns the provided codes in the same order + fn mock_client( + mut mock_return_code_groups: Vec>, + ) -> ProducerServiceClient { + let client = Arc::new(scribe_producer_service_mocks::new::()); + mock_return_code_groups.reverse(); + client.WriteMessages.mock(move |req| { + let mock_return_code_group = mock_return_code_groups.pop().unwrap_or_else(|| { + panic!("WriteMessages() was called more than the mock ProducerService expected."); + }); + assert!( + mock_return_code_group.len() == req.messages.len(), + "Mock ProducerService received the different number of messages than it expected.", + ); + + WriteMessagesResponse { + results: mock_return_code_group + .iter() + .map(|code| ThriftWriteMessageResult { + code: *code, + ..Default::default() + }) + .collect(), + ..Default::default() + } + }); + + client + } + + fn message(contents: &'static str) -> Message { + Message { + category: "buck2_events".to_owned(), + message: contents.as_bytes().to_vec(), + ..Default::default() + } + } + + #[fbinit::test] + async fn run_once_retries_and_all_succeed(fb: FacebookInit) { + let codes = vec![ + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![WriteMessageResultCode::OK, WriteMessageResultCode::OK], + ]; + let client = mock_client(codes.clone()); + let producer = make_ScribeProducer(fb, client, codes[0].len()); + + for _ in codes[0].iter() { + producer.offer(message("hello, world!")); + } + producer.run_once().await; + let counters = producer.export_counters(); + assert_eq!(counters.successes, 5); + assert_eq!(counters.failures(), 0); + } + + #[fbinit::test] + async fn run_once_does_not_retry_terminal_failures(fb: FacebookInit) { + let codes = vec![ + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::UNAUTHORIZED, + WriteMessageResultCode::INVALID_REQUEST, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![WriteMessageResultCode::OK], + ]; + let client = mock_client(codes.clone()); + let producer = make_ScribeProducer(fb, client, codes[0].len()); + + for _ in codes[0].iter() { + producer.offer(message("hello, world!")); + } + producer.run_once().await; + let counters = producer.export_counters(); + println!("counters: {:?}", counters); + assert_eq!(counters.successes, 3); + assert_eq!(counters.failures(), 2); + } + + #[fbinit::test] + async fn run_once_does_not_retry_after_max_retries(fb: FacebookInit) { + let codes = vec![ + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::OK, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::OK, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::OK, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + ]; + let client = mock_client(codes.clone()); + let producer = make_ScribeProducer(fb, client, codes[0].len()); + for _ in codes[0].iter() { + producer.offer(message("hello, world!")); + } + + producer.run_once().await; + let counters = producer.export_counters(); + assert_eq!(counters.successes, 5); + assert_eq!(counters.failures(), 2); + } + + #[fbinit::test] + async fn all_messages_are_retried_if_write_messages_fails(fb: FacebookInit) { + let client = Arc::new(scribe_producer_service_mocks::new::()); + client.WriteMessages.mock_result(|_| { + Err(WriteMessagesError::ApplicationException( + ApplicationException::unknown_method(), + )) + }); + + let producer = make_ScribeProducer(fb, client, 5); + for _ in 0..5 { + producer.offer(message("hello, world!")); + } + + producer.run_once().await; + let counters = producer.export_counters(); + assert_eq!(counters.successes, 0); + assert_eq!(counters.failures(), 5); + } + + #[fbinit::test] + async fn send_one_message_with_cutoff_retries_and_succeeds(fb: FacebookInit) { + let codes = vec![ + vec![WriteMessageResultCode::ENQUEUE_FAILED], + // Last one shouldn't be cut off + vec![WriteMessageResultCode::OK], + ]; + let client = mock_client(codes.clone()); + let producer = make_ScribeProducer(fb, client, codes[0].len()); + producer + .send_messages_now(vec![message("hello, world!")]) + .await; + let counters = producer.export_counters(); + assert_eq!(counters.successes, 1); + assert_eq!(counters.failures(), 0); + } + + #[fbinit::test] + async fn run_once_with_cutoff_retries_and_all_succeed(fb: FacebookInit) { + let codes = vec![ + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![WriteMessageResultCode::OK, WriteMessageResultCode::OK], + vec![WriteMessageResultCode::OK, WriteMessageResultCode::OK], + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + ], + vec![WriteMessageResultCode::OK], + ]; + let client = mock_client(codes.clone()); + let producer = make_ScribeProducer(fb, client, codes[0].len()); + + for _ in codes[0].iter() { + producer.offer(message("hello, world!")); + } + producer.run_once().await; + let counters = producer.export_counters(); + assert_eq!(counters.successes, 8); + assert_eq!(counters.failures(), 0); + } + + #[fbinit::test] + async fn run_once_with_cutoff_does_not_retry_after_max_retries(fb: FacebookInit) { + let codes = vec![ + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + ]; + let client = mock_client(codes.clone()); + let producer = make_ScribeProducer(fb, client, codes[0].len()); + for _ in codes[0].iter() { + producer.offer(message("hello, world!")); + } + + producer.run_once().await; + let counters = producer.export_counters(); + assert_eq!(counters.successes, 0); + assert_eq!(counters.failures(), 8); + } + + #[fbinit::test] + async fn last_cutoff_is_memorized(fb: FacebookInit) { + let codes = vec![ + // The first batch begins with cutoff_index = 8 + vec![ + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + WriteMessageResultCode::ENQUEUE_FAILED, + ], + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + ], + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + ], + // The second batch begins with cutoff_index = 5 + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + ], + vec![ + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + WriteMessageResultCode::OK, + ], + ]; + let client = mock_client(codes.clone()); + let producer = make_ScribeProducer(fb, client, 8); + for _ in 0..8 { + producer.offer(message("hello, world!")); + } + producer.run_once().await; + for _ in 0..8 { + producer.offer(message("hello, world!")); + } + producer.run_once().await; + let counters = producer.export_counters(); + assert_eq!(counters.successes, 16); + assert_eq!(counters.failures(), 0); + } + + #[test] + fn normal_cutoff_computations() { + let mut cc_state = CongestionControlState::new(1000); + + // Congested; 1000 -> 501 + cc_state.update_cutoff(true); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::EarlyFail, + current_cutoff: 501, + cliff_top: 1000, + cliff_bottom: 501, + } + ); + + // Congested; 501 -> 251 + cc_state.update_cutoff(true); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::EarlyFail, + current_cutoff: 251, + cliff_top: 501, + cliff_bottom: 251, + } + ); + + // Not congested; 251 -> 251; phase shifted to FastRecovery + cc_state.update_cutoff(false); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::FastRecovery { step: 26 }, + current_cutoff: 251, + cliff_top: 501, + cliff_bottom: 251, + } + ); + + // Not congested; 251 -> 277 (+26) + cc_state.update_cutoff(false); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::FastRecovery { step: 26 }, + current_cutoff: 277, + cliff_top: 501, + cliff_bottom: 251, + } + ); + + // Not congested; 277 -> 303 (+26) + cc_state.update_cutoff(false); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::FastRecovery { step: 26 }, + current_cutoff: 303, + cliff_top: 501, + cliff_bottom: 251, + } + ); + + // Not congested x10; +26 per step; recovers beyond the cliff top + for _ in 0..10 { + cc_state.update_cutoff(false); + } + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::FastRecovery { step: 26 }, + current_cutoff: 563, + cliff_top: 501, + cliff_bottom: 251, + } + ); + + // Congested; 563 -> 408 (cut down to the mid point of current value and cliff bottom) + cc_state.update_cutoff(true); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::FastRecovery { step: 16 }, + current_cutoff: 408, + cliff_top: 563, + cliff_bottom: 408, + } + ); + + // Congested; 408 -> 307 (cut down to below the cliff bottom) + cc_state.update_cutoff(true); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::FastRecovery { step: 11 }, + current_cutoff: 307, + cliff_top: 408, + cliff_bottom: 307, + } + ); + + // Not congested; 307 -> 318 (+11) + cc_state.update_cutoff(false); + assert_matches!( + cc_state, + CongestionControlState { + phase: CongestionControlPhase::FastRecovery { step: 11 }, + current_cutoff: 318, + cliff_top: 408, + cliff_bottom: 307, + } + ); + } + + #[test] + fn last_one_is_not_cut_off() { + let mut cc_state = CongestionControlState::new(1); + + cc_state.update_cutoff(true); + assert_eq!(cc_state.current_cutoff, 1); + cc_state.update_cutoff(false); + cc_state.update_cutoff(true); + assert_eq!(cc_state.current_cutoff, 1); + } +} diff --git a/shed/static_interner/BUCK b/shed/static_interner/BUCK new file mode 100644 index 0000000000000..7ec0a26cf503c --- /dev/null +++ b/shed/static_interner/BUCK @@ -0,0 +1,15 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +rust_library( + name = "static_interner", + srcs = glob(["src/**/*.rs"]), + crate_root = "src/lib.rs", + deps = [ + "fbsource//third-party/rust:equivalent", + "//buck2/allocative/allocative:allocative", + "//buck2/gazebo/dupe:dupe", + "//buck2/shed/lock_free_hashtable:lock_free_hashtable", + ], +) diff --git a/shed/static_interner/Cargo.toml b/shed/static_interner/Cargo.toml new file mode 100644 index 0000000000000..86c394b25126a --- /dev/null +++ b/shed/static_interner/Cargo.toml @@ -0,0 +1,13 @@ +[package] +description = "Similar to `internment` crate, but with interface and performance tweaks" +edition = "2021" +license = { workspace = true } +name = "static_interner" +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +allocative = { workspace = true } +dupe = { workspace = true } +equivalent = { workspace = true } +lock_free_hashtable = { workspace = true } diff --git a/shed/static_interner/src/lib.rs b/shed/static_interner/src/lib.rs new file mode 100644 index 0000000000000..c77f662eb1d93 --- /dev/null +++ b/shed/static_interner/src/lib.rs @@ -0,0 +1,368 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Intern objects in memory. +//! +//! This is similar to [`internment` crate](https://github.com/droundy/internment) +//! but with changes for performance and flexibility. + +use std::cmp::Ordering; +use std::collections::hash_map::DefaultHasher; +use std::fmt; +use std::fmt::Display; +use std::fmt::Formatter; +use std::hash::Hash; +use std::hash::Hasher; +use std::marker::PhantomData; +use std::mem; +use std::ops::Deref; +use std::ptr; + +use allocative::Allocative; +use allocative::Visitor; +use dupe::Dupe; +pub use equivalent::Equivalent; +use lock_free_hashtable::sharded::ShardedLockFreeRawTable; + +pub struct Interner { + table: ShardedLockFreeRawTable>, 64>, + _marker: PhantomData, +} + +/// This structure is similar to `Hashed`, but it is not parameterized by hash function. +#[derive(Debug)] +struct InternedData { + data: T, + hash: u64, +} + +/// An interned pointer. +/// +/// Equality of this type is a pointer comparison. +/// But note, this works correctly only if `Intern` pointers created +/// from the same instance of `Interner`. +#[derive(Debug)] +pub struct Intern { + pointer: &'static InternedData, +} + +// TODO(nga): derive. +impl Allocative for Intern { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { + let mut visitor = visitor.enter_self_sized::(); + if mem::size_of::() > 0 { + let visitor = visitor.enter_shared( + allocative::Key::new("pointer"), + mem::size_of::<*const T>(), + &**self as &T as *const T as *const (), + ); + if let Some(mut visitor) = visitor { + (**self).visit(&mut visitor); + visitor.exit(); + } + } + } +} + +impl Copy for Intern {} + +impl Clone for Intern { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Dupe for Intern { + #[inline] + fn dupe(&self) -> Self { + *self + } +} + +impl Deref for Intern { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &self.pointer.data + } +} + +impl Intern { + #[inline] + pub const fn deref_static(&self) -> &'static T { + &self.pointer.data + } + + /// SAFETY: This may only be called with pointers returned from [`Self::deref_static`] + #[inline] + pub const unsafe fn from_ptr(p: *const T) -> Self { + // SAFETY: `p` is a pointer to the `data` field of an `InternedData` + unsafe { + let p = p + .cast::() + .sub(std::mem::offset_of!(InternedData, data)) + .cast::>(); + Self { pointer: &*p } + } + } +} + +impl Hash for Intern { + fn hash(&self, state: &mut H) { + // We could hash only the pointer, since we only compare the pointers, + // but users may expect hashing to be stable between runs. + self.pointer.hash.hash(state); + } +} + +impl PartialEq for Intern { + #[inline] + fn eq(&self, other: &Self) -> bool { + ptr::eq(self.pointer, other.pointer) + } +} + +impl Eq for Intern {} + +impl PartialOrd for Intern { + fn partial_cmp(&self, other: &Self) -> Option { + self.pointer.data.partial_cmp(&other.pointer.data) + } +} + +impl Ord for Intern { + fn cmp(&self, other: &Self) -> Ordering { + self.pointer.data.cmp(&other.pointer.data) + } +} + +impl Display for Intern { + #[inline] + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + Display::fmt(&self.pointer.data, f) + } +} + +/// Hash the value before acquiring the lock. +struct Hashed { + hash: u64, + value: T, + _marker: PhantomData, +} + +impl Hashed { + /// Compute the hash. + fn hash(value: &T) -> u64 { + let mut hasher = H::default(); + value.hash(&mut hasher); + hasher.finish() + } + + fn new(value: T) -> Self { + let hash = Self::hash(&value); + Hashed { + hash, + value, + _marker: PhantomData, + } + } +} + +impl Interner { + /// Create a new interner for given type. + pub const fn new() -> Interner { + Interner { + table: ShardedLockFreeRawTable::new(), + _marker: PhantomData, + } + } +} + +impl Interner { + /// Allocate a value, or return previously allocated one. + pub fn intern(&'static self, value: Q) -> Intern + where + Q: Hash + Equivalent + Into, + T: Eq + Hash, + { + let hashed = Hashed::<_, H>::new(value); + if let Some(pointer) = self + .table + .lookup(hashed.hash, |t| hashed.value.equivalent(&t.data)) + { + return Intern { pointer }; + } + + self.intern_slow(hashed) + } + + #[cold] + fn intern_slow(&'static self, hashed_value: Hashed) -> Intern + where + Q: Hash + Equivalent + Into, + T: Eq + Hash, + { + let pointer = Box::new(InternedData { + data: hashed_value.value.into(), + hash: hashed_value.hash, + }); + let pointer = self + .table + .insert( + hashed_value.hash, + pointer, + |a, b| a.hash == b.hash && a.data == b.data, + |t| t.hash, + ) + .0; + Intern { pointer } + } + + /// Get a value if it has been interned. + pub fn get(&'static self, key: Q) -> Option> + where + Q: Hash + Equivalent, + T: Eq + Hash, + { + let hashed = Hashed::<_, H>::new(key); + self.table + .lookup(hashed.hash, |t| hashed.value.equivalent(&t.data)) + .map(|pointer| Intern { pointer }) + } + + /// Iterate over the interned values. + #[inline] + pub fn iter(&'static self) -> Iter { + Iter { + iter: self.table.iter(), + _marker: PhantomData, + } + } +} + +pub struct Iter { + iter: lock_free_hashtable::sharded::Iter<'static, Box>, 64>, + _marker: PhantomData, +} + +impl Iterator for Iter { + type Item = Intern; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(|pointer| Intern { pointer }) + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use equivalent::Equivalent; + + use crate::Intern; + use crate::Interner; + + static STRING_INTERNER: Interner = Interner::new(); + + #[derive(Hash, Eq, PartialEq)] + struct StrRef<'a>(&'a str); + + #[test] + fn test_intern() { + assert_eq!( + STRING_INTERNER.intern("hello".to_owned()), + STRING_INTERNER.intern("hello".to_owned()) + ); + assert_eq!( + STRING_INTERNER.intern("hello".to_owned()), + STRING_INTERNER.intern(StrRef("hello")), + ); + assert_ne!( + STRING_INTERNER.intern("hello".to_owned()), + STRING_INTERNER.intern("world".to_owned()) + ); + } + + // Make sure things work with reallocation. + #[test] + fn test_resize() { + let mut interned_strings = Vec::new(); + for i in 0..100000 { + let s = i.to_string(); + let interned = STRING_INTERNER.intern(s.clone()); + assert_eq!(&s, &*interned); + interned_strings.push(interned); + } + + for s in &interned_strings { + let interned = STRING_INTERNER.intern(String::clone(s)); + assert_eq!(*s, interned); + } + } + + impl Equivalent for StrRef<'_> { + fn equivalent(&self, key: &String) -> bool { + self.0 == key + } + } + + impl From> for String { + fn from(value: StrRef<'_>) -> Self { + value.0.to_owned() + } + } + + static TEST_GET_INTERNER: Interner = Interner::new(); + #[test] + fn test_get() { + let interner = &TEST_GET_INTERNER; + assert_eq!(interner.get(StrRef("hello")), None); + assert_eq!(interner.get("hello".to_owned()), None); + + let interned = interner.intern("hello".to_owned()); + assert_eq!(interner.get(StrRef("hello")), Some(interned)); + assert_eq!(interner.get("hello".to_owned()), Some(interned)); + assert_eq!(interner.get(StrRef("world")), None); + } + + static TEST_ITER_INTERNER: Interner<&'static str> = Interner::new(); + #[test] + fn test_iter() { + let interner = &TEST_ITER_INTERNER; + assert_eq!( + interner + .iter() + .map(|v| *v) + .collect::>(), + BTreeSet::from([]) + ); + interner.intern("hello"); + interner.intern("cat"); + interner.intern("world"); + + assert_eq!( + interner + .iter() + .map(|v| *v) + .collect::>(), + BTreeSet::from(["hello", "cat", "world"]) + ); + } + + static TEST_POINTER_INTERNER: Interner<&'static str> = Interner::new(); + #[test] + fn test_pointer_roundtrip() { + let one = TEST_POINTER_INTERNER.intern("one"); + let one_p = one.deref_static() as *const _; + assert_eq!(one, unsafe { Intern::from_ptr(one_p) }); + } +} diff --git a/shed/three_billion_instructions/BUCK b/shed/three_billion_instructions/BUCK index 8157e6afdcc7a..8eff5a487e8bf 100644 --- a/shed/three_billion_instructions/BUCK +++ b/shed/three_billion_instructions/BUCK @@ -1,11 +1,23 @@ load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") oncall("build_infra") -rust_binary( +rust_library( name = "three_billion_instructions", srcs = glob( ["src/**/*.rs"], ), + deps = [ + "fbsource//third-party/rust:thiserror", + ], +) + +rust_binary( + name = "three_billion_instructions-bin", + srcs = ["bin/three_billion_instructions.rs"], + crate_root = "bin/three_billion_instructions.rs", + deps = [ + ":three_billion_instructions", + ], ) diff --git a/shed/three_billion_instructions/Cargo.toml b/shed/three_billion_instructions/Cargo.toml index a7a700c345180..e932e52097040 100644 --- a/shed/three_billion_instructions/Cargo.toml +++ b/shed/three_billion_instructions/Cargo.toml @@ -1,7 +1,11 @@ [package] +authors = ["Meta"] +description = "Run 3B instructions" +edition = "2021" +license = { workspace = true } name = "three_billion_instructions" +repository = { workspace = true } version = "0.1.0" -edition = "2021" -description = "Run 3B instructions" -license = "MIT OR Apache-2.0" -authors = ["Meta"] + +[dependencies] +thiserror = { workspace = true } diff --git a/shed/three_billion_instructions/bin/three_billion_instructions.rs b/shed/three_billion_instructions/bin/three_billion_instructions.rs new file mode 100644 index 0000000000000..380449ce16752 --- /dev/null +++ b/shed/three_billion_instructions/bin/three_billion_instructions.rs @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use three_billion_instructions::three_billion_instructions; + +/// Run 3B instructions. +fn main() -> Result<(), three_billion_instructions::Error> { + three_billion_instructions() +} diff --git a/shed/three_billion_instructions/src/lib.rs b/shed/three_billion_instructions/src/lib.rs new file mode 100644 index 0000000000000..1b2843dcab75b --- /dev/null +++ b/shed/three_billion_instructions/src/lib.rs @@ -0,0 +1,47 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#[derive(thiserror::Error, Debug)] +#[error("Unsupported architecture")] +pub struct Error(()); + +/// Run 3B instructions. +pub fn three_billion_instructions() -> Result<(), Error> { + #[allow(unused_mut)] + let mut x = 1_000_000_000u64; + + #[cfg(target_arch = "x86_64")] + unsafe { + std::arch::asm!( + "2:", + "sub {0:r}, 1", + "cmp {0:r}, 0", + "jne 2b", + inout(reg) x, + ); + } + + #[cfg(target_arch = "aarch64")] + unsafe { + std::arch::asm!( + "2:", + "sub {0:x}, {0:x}, 1", + "cmp {0:x}, 0", + "bne 2b", + inout(reg) x, + ); + } + + if !cfg!(any(target_arch = "x86_64", target_arch = "aarch64")) { + return Err(Error(())); + } + + assert_eq!(x, 0); + Ok(()) +} diff --git a/shed/three_billion_instructions/src/main.rs b/shed/three_billion_instructions/src/main.rs deleted file mode 100644 index c03078fa9b6e6..0000000000000 --- a/shed/three_billion_instructions/src/main.rs +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -/// Run 3B instructions. -fn main() { - let mut x = 1_000_000_000u64; - - #[cfg(target_arch = "x86_64")] - unsafe { - std::arch::asm!( - "2:", - "sub {0:r}, 1", - "cmp {0:r}, 0", - "jne 2b", - inout(reg) x, - ); - } - - #[cfg(target_arch = "aarch64")] - unsafe { - std::arch::asm!( - "2:", - "sub {0:x}, {0:x}, 1", - "cmp {0:x}, 0", - "bne 2b", - inout(reg) x, - ); - } - - assert_eq!(x, 0); -} diff --git a/shim/.buckconfig b/shim/.buckconfig index e69de29bb2d1d..d6f453f72b545 100644 --- a/shim/.buckconfig +++ b/shim/.buckconfig @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +[cells] +gh_facebook_buck2_shims_meta = . + +[cell_aliases] +root = gh_facebook_buck2_shims_meta + +# This is a duplicate of .buckconfig.d/common.buckconfig, but due to the way +# we vendor the shim directory, common.buckconfig won't show up in the +# vendored .buckconfig.d directory. +# When the shim cell is no longer vendored in individual projects, this +# duplication cam be removed. +[cells] +prelude = prelude +none = none + +[cell_aliases] +config = prelude +ovr_config = prelude +bazel_skylib = gh_facebook_buck2_shims_meta +buck = gh_facebook_buck2_shims_meta +fbcode = gh_facebook_buck2_shims_meta +fbcode_macros = gh_facebook_buck2_shims_meta +fbsource = gh_facebook_buck2_shims_meta +shim = gh_facebook_buck2_shims_meta +toolchains = gh_facebook_buck2_shims_meta + +[external_cells] +prelude = bundled + +[parser] +target_platform_detector_spec = target:root//...->prelude//platforms:default target:shim//...->prelude//platforms:default diff --git a/shim/.buckroot b/shim/.buckroot new file mode 100644 index 0000000000000..a869e838b4c7c --- /dev/null +++ b/shim/.buckroot @@ -0,0 +1,6 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. diff --git a/shim/.gitignore b/shim/.gitignore index a1412f7fa8e00..ca95b0e4631fb 100644 --- a/shim/.gitignore +++ b/shim/.gitignore @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # We currently expect end users to run reindeer vendor themselves # so mark these things as to ignore /third-party/rust/.cargo/ diff --git a/shim/BUCK b/shim/BUCK index 9e740dde55dac..42230090524af 100644 --- a/shim/BUCK +++ b/shim/BUCK @@ -1,5 +1,75 @@ -load("@prelude//toolchains:demo.bzl", "system_demo_toolchains") +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. -# All the default toolchains, suitable for a quick demo or early prototyping. -# Most real projects should copy/paste the implementation to configure them. -system_demo_toolchains() +load("@prelude//toolchains:cxx.bzl", "system_cxx_toolchain") +load("@prelude//toolchains:genrule.bzl", "system_genrule_toolchain") +load("@prelude//toolchains:go.bzl", "system_go_bootstrap_toolchain", "system_go_toolchain") +load("@prelude//toolchains:haskell.bzl", "system_haskell_toolchain") +load("@prelude//toolchains:ocaml.bzl", "system_ocaml_toolchain") +load("@prelude//toolchains:python.bzl", "system_python_bootstrap_toolchain", "system_python_toolchain") +load("@prelude//toolchains:remote_test_execution.bzl", "remote_test_execution_toolchain") +load("@prelude//toolchains:rust.bzl", "system_rust_toolchain") + +oncall("open_source") + +system_cxx_toolchain( + name = "cxx", + cxx_flags = ["-std=c++20"], + link_flags = select({ + "DEFAULT": [], + "prelude//os:linux": [ + "-latomic", + ], + }), + visibility = ["PUBLIC"], +) + +system_genrule_toolchain( + name = "genrule", + visibility = ["PUBLIC"], +) + +system_go_toolchain( + name = "go", + visibility = ["PUBLIC"], +) + +system_go_bootstrap_toolchain( + name = "go_bootstrap", + visibility = ["PUBLIC"], +) + +system_haskell_toolchain( + name = "haskell", + visibility = ["PUBLIC"], +) + +system_ocaml_toolchain( + name = "ocaml", + visibility = ["PUBLIC"], +) + +system_python_toolchain( + name = "python", + visibility = ["PUBLIC"], +) + +system_python_bootstrap_toolchain( + name = "python_bootstrap", + visibility = ["PUBLIC"], +) + +system_rust_toolchain( + name = "rust", + default_edition = "2021", + visibility = ["PUBLIC"], +) + +remote_test_execution_toolchain( + name = "remote_test_execution", + visibility = ["PUBLIC"], +) diff --git a/shim/CODE_OF_CONDUCT.md b/shim/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000..3232ed665566e --- /dev/null +++ b/shim/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/shim/CONTRIBUTING.md b/shim/CONTRIBUTING.md new file mode 100644 index 0000000000000..541779c332e6a --- /dev/null +++ b/shim/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to this repo, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/shim/LICENSE-APACHE b/shim/LICENSE-APACHE new file mode 100644 index 0000000000000..b09cd7856d585 --- /dev/null +++ b/shim/LICENSE-APACHE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/shim/LICENSE-MIT b/shim/LICENSE-MIT new file mode 100644 index 0000000000000..b93be90515ccd --- /dev/null +++ b/shim/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Meta Platforms, Inc. and affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/shim/PACKAGE b/shim/PACKAGE new file mode 100644 index 0000000000000..782e6f913ff4e --- /dev/null +++ b/shim/PACKAGE @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":cfg.bzl", "SHIM_ALIASES", "set_cfg_constructor", "get_shim_modifiers") +load("@prelude//cfg/modifier:set_cfg_modifiers.bzl", "set_cfg_modifiers") + +# Activate cfg modifiers from CLI / PACKAGE / targets +set_cfg_constructor(SHIM_ALIASES) + +modifiers = get_shim_modifiers() +set_cfg_modifiers(modifiers) diff --git a/shim/README.md b/shim/README.md index 6b3b272221541..72345d6ba0568 100644 --- a/shim/README.md +++ b/shim/README.md @@ -1,3 +1,18 @@ -# Open Source Shim +# Buck2 Shims for Meta -These files are a shim that allow us to build Buck2 with Buck2 outside Meta in the open source world. +These files implement shims for Meta internal buck2 cells, macros, and targets. + +Via these shims, the buck2 experience when building Meta open source projects +should be nearly identical to the internal buck2 experience. + +## These shims are not recommended for non-Meta projects!!! + +Prefer to use [rules from the buck2 prelude](https://buck2.build/docs/prelude/globals/) +and the [buck2 build apis](https://buck2.build/docs/api/build/globals/) + +## License + +This source code is licensed under both the MIT license found in the +LICENSE-MIT file in the root directory of this source tree and the Apache +License, Version 2.0 found in the LICENSE-APACHE file in the root directory +of this source tree. diff --git a/shim/antlir/fbpkg/fbpkg.bzl b/shim/antlir/fbpkg/fbpkg.bzl new file mode 100644 index 0000000000000..21c0104326e78 --- /dev/null +++ b/shim/antlir/fbpkg/fbpkg.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _builder(**_): + pass + +def _buck_opts(**_): + pass + +fbpkg = struct( + builder = _builder, + buck_opts = _buck_opts, +) diff --git a/shim/buck2/buck_rust_binary.bzl b/shim/buck2/buck_rust_binary.bzl index aad0fdab72b92..ebd00026216f0 100644 --- a/shim/buck2/buck_rust_binary.bzl +++ b/shim/buck2/buck_rust_binary.bzl @@ -6,7 +6,7 @@ # of this source tree. load( - "//:shims.bzl", + "@shim//:shims.bzl", _rust_binary = "rust_binary", ) diff --git a/shim/buck2/proto_defs.bzl b/shim/buck2/proto_defs.bzl index 5cafa19acc391..468a3be310fc6 100644 --- a/shim/buck2/proto_defs.bzl +++ b/shim/buck2/proto_defs.bzl @@ -6,7 +6,7 @@ # of this source tree. load( - "//:shims.bzl", + "@shim//:shims.bzl", _rust_protobuf_library = "rust_protobuf_library", ) diff --git a/shim/build_defs/auto_headers.bzl b/shim/build_defs/auto_headers.bzl new file mode 100644 index 0000000000000..3c1324a6eea8a --- /dev/null +++ b/shim/build_defs/auto_headers.bzl @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:buckconfig.bzl", "read_choice") + +AutoHeaders = struct( + NONE = "none", + # Infer headers from sources of the rule. + SOURCES = "sources", +) + +_VALUES = [ + AutoHeaders.NONE, + AutoHeaders.SOURCES, +] + +def get_auto_headers(auto_headers): + """ + Returns the level of auto-headers to apply to a rule. + + Args: + auto_headers: One of the values in `AutoHeaders` + + Returns: + The value passed in as auto_headers, or the value from configuration if + `auto_headers` is None + """ + if auto_headers != None: + if auto_headers not in _VALUES: + fail("unexpected `auto_headers` value: {!r}".format(auto_headers)) + return auto_headers + return read_choice("cxx", "auto_headers", _VALUES, AutoHeaders.SOURCES) diff --git a/shim/build_defs/config.bzl b/shim/build_defs/config.bzl new file mode 100644 index 0000000000000..cddd1a978905d --- /dev/null +++ b/shim/build_defs/config.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _get_build_mode(): + return "" + +config = struct( + get_build_mode = _get_build_mode, +) diff --git a/shim/build_defs/cpp_benchmark.bzl b/shim/build_defs/cpp_benchmark.bzl new file mode 100644 index 0000000000000..928f62d2c555e --- /dev/null +++ b/shim/build_defs/cpp_benchmark.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def cpp_benchmark(**_): + pass diff --git a/shim/build_defs/cpp_binary.bzl b/shim/build_defs/cpp_binary.bzl new file mode 100644 index 0000000000000..89bf161e7b23b --- /dev/null +++ b/shim/build_defs/cpp_binary.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//:shims.bzl", _cpp_binary = "cpp_binary") + +cpp_binary = _cpp_binary diff --git a/shim/build_defs/cpp_library.bzl b/shim/build_defs/cpp_library.bzl new file mode 100644 index 0000000000000..d69787b947c83 --- /dev/null +++ b/shim/build_defs/cpp_library.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//:shims.bzl", _cpp_library = "cpp_library") + +cpp_library = _cpp_library diff --git a/shim/build_defs/cpp_unittest.bzl b/shim/build_defs/cpp_unittest.bzl new file mode 100644 index 0000000000000..2c4103ebd8bb6 --- /dev/null +++ b/shim/build_defs/cpp_unittest.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//:shims.bzl", _cpp_unittest = "cpp_unittest") + +cpp_unittest = _cpp_unittest diff --git a/shim/build_defs/custom_rule.bzl b/shim/build_defs/custom_rule.bzl new file mode 100644 index 0000000000000..03aac5904f5ca --- /dev/null +++ b/shim/build_defs/custom_rule.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def custom_rule(**_): + pass diff --git a/shim/build_defs/custom_unittest.bzl b/shim/build_defs/custom_unittest.bzl new file mode 100644 index 0000000000000..f5aa4cf8f06e5 --- /dev/null +++ b/shim/build_defs/custom_unittest.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def custom_unittest(**_): + pass diff --git a/shim/build_defs/cython_library.bzl b/shim/build_defs/cython_library.bzl new file mode 100644 index 0000000000000..dac5b8bcae441 --- /dev/null +++ b/shim/build_defs/cython_library.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//build_defs:python_library.bzl", "python_library") + +def cython_library(name, visibility = ["PUBLIC"], **_): + python_library(name = name, visibility = visibility) diff --git a/shim/build_defs/export_files.bzl b/shim/build_defs/export_files.bzl index 01a9fa37fedce..25ae03f6288e3 100644 --- a/shim/build_defs/export_files.bzl +++ b/shim/build_defs/export_files.bzl @@ -6,4 +6,10 @@ # of this source tree. def export_file(visibility = ["PUBLIC"], **kwargs): + # @lint-ignore BUCKLINT: avoid "native is forbidden in fbcode" native.export_file(visibility = visibility, **kwargs) + +def export_files(files, visibility = ["PUBLIC"], **kwargs): + # @lint-ignore BUCKLINT: avoid "native is forbidden in fbcode" + for file in files: + native.export_file(name = file, visibility = visibility, **kwargs) diff --git a/shim/build_defs/lib/oss.bzl b/shim/build_defs/lib/oss.bzl new file mode 100644 index 0000000000000..9e386e2ab4923 --- /dev/null +++ b/shim/build_defs/lib/oss.bzl @@ -0,0 +1,236 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _filter_empty_strings(string_list): + return filter(lambda d: d != "", string_list) + +def _parse_prefix_mappings(raw_rules): + rules = [] + for raw_rule in raw_rules: + (match, replace) = raw_rule.split("->", 1) + + (cell, root_dir) = match.split("//") + match = struct(cell = cell, root_dir = root_dir) + + (cell, root_dir) = replace.split("//") + replace = struct(cell = cell, root_dir = root_dir) + + rules.append(struct(match = match, replace = replace)) + + return rules + +def _strip_third_party_rust_version(target: str) -> str: + # When upgrading libraries we either suffix them as `-old` or with a version, e.g. `-1-08` + # Strip those so we grab the right one in open source. + if target.endswith(":md-5"): # md-5 is the one exception + return target + xs = target.split("-") + for i in reversed(range(len(xs))): + s = xs[i] + if s == "old" or s.isdigit(): + xs.pop(i) + else: + break + return "-".join(xs) + +# Cell the BUCK file being processed belongs to +ACTIVE_CELL = native.get_cell_name() + +# The root cell of this project, extracted from the "root" alias. +# Targets that explicitly reference this cell will not be rewritten, and +# targets that do not end up referencing a cell will be replaced with targets +# that reference this cell +ROOT_CELL = read_config("cell_aliases", "root", "root") + +# The cell this file and the rest of the shim directory belong to, generally +# "shim" and does not need to be set. +SHIM_CELL = read_config("oss", "shim_cell", "shim") + +# The internal cell this project originally belonged to. +# +# When applying rewrites, the cell of the target is often considered. Targets +# that do not explicitly specify a cell (eg: "//foo:bar") will be considered +# to belong to INTERNAL_CELL. +INTERNAL_CELL = read_config("oss", "internal_cell", "fbcode") + +# There can be situations where a target specifies a cell explicitly and the +# path is part of the local checkout, rather than potentially needing to be +# shimmed. In this case, we want to rewrite the target to use the root cell. +# +# If a target's cell is unspecified or matches the internal cell, and the path +# starts with an entry in this list, The cell replaced with the ROOT_CELL. +# +# Entries are separated by spaces, and evaluated in order. Once a match is +# found, the rewrite is complete and the following entries will not be +# evaluated. +# +# Examples: +# internal_cell//oss_project/foo:bar -> root//oss_project/foo:bar +PROJECT_DIRS = _filter_empty_strings(read_config("oss", "project_dirs", "").split(" ")) + +# There are some situations where prefix of the internal directory structure is +# removed from the public filepaths, such as rewriting "internal/foo/bar/baz" +# to "oss/baz". When this happens, the BUCK files are not converted to reflect +# the public directory structure, and targets need to be rewritten to account +# for the discrepancy. +# +# Entries behave similarly to PROJECT_DIRS, except that the root directory will +# also be removed from the path in the rewritten target. This setting is +# applied after PROJECT_DIRS. +# +# Entries are separated by spaces and evaluated in order. Once a match is +# found, the rewrite is complete and the following entries will not be +# evaluated. +# +# Examples: +# //oss_project/foo:bar -> root//foo:bar +# internal_cell//oss_project/foo:bar -> root//foo:bar +STRIPPED_ROOT_DIRS = _filter_empty_strings(read_config("oss", "stripped_root_dirs", "").split(" ")) + +# Internally, most code shares the same cell in a monorepo, but public projects +# only contain a subset, importing dependencies via git submodules or other +# mechanisms. When this happens, the dependency may end up in a different +# filepath, or may have it's own buck2 configuration and should be treated as +# an on disk external cell. +# +# If the target's cell is a match (or if unspecified, INTERNAL_CELL is a +# match),unspecified) matches, and the target's path is within the root +# directory, both the cell and root directory prefix are replaced with the new +# values. +# +# Entries are in the form of "MATCH->REPLACEMENT". Both MATCH and replacement +# shall be in the format of "CELL//DIR_PREFIX". +# +# Entries are separated by spaces and evaluated in order. Once a match is +# found, the rewrite is complete and the following entries will not be +# evaluated. +# +# Examples: +# internal//foo->foo//foo; internal//foo/bar:baz -> foo//foo/bar:baz +PREFIX_MAPPINGS = _parse_prefix_mappings( + _filter_empty_strings(read_config("oss", "prefix_mappings", "").split(" ")), +) + +# Hardcoded rewrite rules that apply to many projects and only produce targets +# within the shim cell. They are applied after the rules from .buckconfig, and +# will not be applied if any other rules match. +IMPLICIT_REWRITE_RULES = { + "fbcode": struct( + exact = { + "common/rust/shed/fbinit:fbinit": "third-party/rust:fbinit", + "common/rust/shed/sorted_vector_map:sorted_vector_map": "third-party/rust:sorted_vector_map", + "watchman/rust/watchman_client:watchman_client": "third-party/rust:watchman_client", + }, + dirs = [ + ("third-party-buck/platform010/build/supercaml", "third-party/ocaml"), + ("third-party-buck/platform010/build", "third-party"), + ], + ), + "fbsource": struct( + dirs = [ + ("third-party", "third-party"), + ], + dynamic = [ + ("third-party/rust", _strip_third_party_rust_version), + ], + ), + "third-party": struct( + dirs = [ + ("", "third-party"), + ], + dynamic = [ + ("rust", lambda path: "third-party/" + _strip_third_party_rust_version(path)), + ], + ), +} + +DEFAULT_REWRITE_CTX = struct( + cells = struct( + active = ACTIVE_CELL, + root = ROOT_CELL, + shim = SHIM_CELL, + internal = INTERNAL_CELL, + ), + project_dirs = PROJECT_DIRS, + stripped_root_dirs = STRIPPED_ROOT_DIRS, + prefix_mappings = PREFIX_MAPPINGS, + implicit_rewrite_rules = IMPLICIT_REWRITE_RULES, +) + +""" +Rewrite an internal target string to one that is compatible with this OSS +project. + +Some example use cases for this: +- Map dependency targets to shim targets in this dir +- Handle mismatching buck roots between internal and oss + (eg: internal/oss-project/... is exposed externally as oss-project/...) +- Handle submodules that result in filepaths that do not match internal + (eg: internal/my_library/... and oss-project/my_library/my_library/...) +""" + +def translate_target( + target: str, + ctx = DEFAULT_REWRITE_CTX) -> str: + if "//" not in target: + # This is a local target, aka ":foo". Don't touch + return target + + (cell, path) = target.split("//", 1) + + if cell == ctx.cells.root: + # This cell is explicitly root. Don't touch + return target + + resolved_cell = ctx.cells.active if cell == "" else cell + internal_cell = ctx.cells.internal if resolved_cell == ctx.cells.root else resolved_cell + + if internal_cell == ctx.cells.internal: + for d in ctx.project_dirs: + if _path_rooted_in_dir(path, d): + return ctx.cells.root + "//" + path + + for d in ctx.stripped_root_dirs: + if _path_rooted_in_dir(path, d): + return ctx.cells.root + "//" + _strip_root_dir_from_path(path, d) + + for rule in ctx.prefix_mappings: + if internal_cell == rule.match.cell and _path_rooted_in_dir(path, rule.match.root_dir): + return rule.replace.cell + "//" + _swap_root_dir_for_path(path, rule.match.root_dir, rule.replace.root_dir) + + rules = ctx.implicit_rewrite_rules.get(internal_cell) + + if rules == None: + # No implicit rewrite rules + return target + + exact = getattr(rules, "exact", {}).get(path) + if exact != None: + return ctx.cells.shim + "//" + exact + + for (match_root_dir, replace_root_dir) in getattr(rules, "dirs", []): + if _path_rooted_in_dir(path, match_root_dir): + return ctx.cells.shim + "//" + _swap_root_dir_for_path(path, match_root_dir, replace_root_dir) + + for (match_root_dir, fn) in getattr(rules, "dynamic", []): + if _path_rooted_in_dir(path, match_root_dir): + return ctx.cells.shim + "//" + fn(path) + + return target + +def _path_rooted_in_dir(path: str, d: str) -> bool: + return d == "" or path == d or path.startswith(d + "/") or path.startswith(d + ":") + +def _strip_root_dir_from_path(path: str, d: str) -> str: + return path.removeprefix(d).removeprefix("/") + +def _swap_root_dir_for_path(path: str, root_dir: str, new_root_dir) -> str: + suffix = _strip_root_dir_from_path(path, root_dir) + if not suffix.startswith(":"): + suffix = "/" + suffix + replace_path = new_root_dir.removesuffix("/") + suffix + return replace_path.removeprefix("/") diff --git a/shim/build_defs/lib/test/BUCK b/shim/build_defs/lib/test/BUCK new file mode 100644 index 0000000000000..a2f6ef3a1c64a --- /dev/null +++ b/shim/build_defs/lib/test/BUCK @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//build_defs/lib/test:oss.bzl", "test_translate_target") + +test_translate_target() diff --git a/shim/build_defs/lib/test/oss.bzl b/shim/build_defs/lib/test/oss.bzl new file mode 100644 index 0000000000000..edae83732a032 --- /dev/null +++ b/shim/build_defs/lib/test/oss.bzl @@ -0,0 +1,73 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//build_defs/lib:oss.bzl", "translate_target") + +TEST_CTX = struct( + cells = struct( + active = "root", + root = "root", + shim = "shim", + internal = "internal", + ), + project_dirs = ["project"], + stripped_root_dirs = ["root_dir"], + prefix_mappings = [ + struct( + match = struct(cell = "internal", root_dir = "dep"), + replace = struct(cell = "dep", root_dir = "dep_rename"), + ), + ], + implicit_rewrite_rules = { + "internal": struct( + exact = { + "exact:exact": "foo/shimmed:exact", + }, + dirs = [ + ("third-party", "third_party"), + ], + dynamic = [ + ("dynamic", lambda path: path.upper()), + ], + ), + }, +) + +def _test_target(target: str, expected: str): + actual = translate_target(target, TEST_CTX) + + if actual != expected: + fail("Expected {} == {}".format(actual, expected)) + +def test_translate_target(): + _test_target("//:foo", "//:foo") + _test_target("root//:foo", "root//:foo") + _test_target("other//:foo", "other//:foo") + + _test_target("//project/foo:bar", "root//project/foo:bar") + _test_target("internal//project/foo:bar", "root//project/foo:bar") + _test_target("internal//project2/foo:bar", "internal//project2/foo:bar") + + _test_target("//root_dir/foo:bar", "root//foo:bar") + _test_target("//root_dir/with/subdir/foo:bar", "root//with/subdir/foo:bar") + _test_target("internal//root_dir/foo:bar", "root//foo:bar") + + _test_target("//dep:foo", "dep//dep_rename:foo") + _test_target("//dep/with/subdir:foo", "dep//dep_rename/with/subdir:foo") + _test_target("internal//dep:foo", "dep//dep_rename:foo") + _test_target("other//dep:foo", "other//dep:foo") + + _test_target("//exact:exact", "shim//foo/shimmed:exact") + _test_target("internal//exact:exact", "shim//foo/shimmed:exact") + _test_target("other//exact:exact", "other//exact:exact") + + _test_target("//third-party/lib/foo:bar", "shim//third_party/lib/foo:bar") + _test_target("internal//third-party/lib/foo:bar", "shim//third_party/lib/foo:bar") + + _test_target("//dynamic:foo", "shim//DYNAMIC:FOO") + _test_target("internal//dynamic:foo", "shim//DYNAMIC:FOO") + _test_target("other//dynamic:foo", "other//dynamic:foo") diff --git a/shim/build_defs/native_rules.bzl b/shim/build_defs/native_rules.bzl index 7219580281c47..a3e3a7039b0f3 100644 --- a/shim/build_defs/native_rules.bzl +++ b/shim/build_defs/native_rules.bzl @@ -6,12 +6,21 @@ # of this source tree. def buck_genrule(visibility = ["PUBLIC"], **kwargs): + # @lint-ignore BUCKLINT: avoid "native is forbidden in fbcode" native.genrule(visibility = visibility, **kwargs) +def buck_command_alias(**_): + pass + def buck_filegroup(visibility = ["PUBLIC"], **kwargs): + # @lint-ignore BUCKLINT: avoid "native is forbidden in fbcode" native.filegroup(visibility = visibility, **kwargs) def alias(actual, visibility = ["PUBLIC"], **kwargs): if actual.startswith("//buck2/"): actual = "root//" + actual.removeprefix("//buck2/") native.alias(actual = actual, visibility = visibility, **kwargs) + +def buck_sh_binary(visibility = ["PUBLIC"], **kwargs): + # @lint-ignore BUCKLINT: avoid "native is forbidden in fbcode" + native.sh_binary(visibility = visibility, **kwargs) diff --git a/shim/build_defs/ocaml_binary.bzl b/shim/build_defs/ocaml_binary.bzl new file mode 100644 index 0000000000000..1530848e3dbe5 --- /dev/null +++ b/shim/build_defs/ocaml_binary.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//:shims.bzl", _ocaml_binary = "ocaml_binary") + +ocaml_binary = _ocaml_binary diff --git a/shim/build_defs/package_local_utils.bzl b/shim/build_defs/package_local_utils.bzl deleted file mode 100644 index b914516934240..0000000000000 --- a/shim/build_defs/package_local_utils.bzl +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under both the MIT license found in the -# LICENSE-MIT file in the root directory of this source tree and the Apache -# License, Version 2.0 found in the LICENSE-APACHE file in the root directory -# of this source tree. - -def _set_clang_version(_version, _overwrite = False): - pass - -package_local_utils = struct( - set_clang_version = _set_clang_version, -) diff --git a/shim/build_defs/prebuilt_cpp_library.bzl b/shim/build_defs/prebuilt_cpp_library.bzl new file mode 100644 index 0000000000000..97a0d8cf2e9a8 --- /dev/null +++ b/shim/build_defs/prebuilt_cpp_library.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//:shims.bzl", _prebuilt_cpp_library = "prebuilt_cpp_library") + +prebuilt_cpp_library = _prebuilt_cpp_library diff --git a/shim/build_defs/python_binary.bzl b/shim/build_defs/python_binary.bzl index 29bb388e9266f..5805fc30bd256 100644 --- a/shim/build_defs/python_binary.bzl +++ b/shim/build_defs/python_binary.bzl @@ -7,4 +7,6 @@ def python_binary(srcs = [], **kwargs): _unused = srcs # @unused + + # @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." native.python_binary(**kwargs) diff --git a/shim/build_defs/python_library.bzl b/shim/build_defs/python_library.bzl new file mode 100644 index 0000000000000..3e62a5e9105ce --- /dev/null +++ b/shim/build_defs/python_library.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def python_library(srcs = [], visibility = ["PUBLIC"], **kwargs): + _unused = srcs # @unused + + # @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." + native.python_library(visibility = visibility, **kwargs) diff --git a/shim/build_defs/python_unittest.bzl b/shim/build_defs/python_unittest.bzl new file mode 100644 index 0000000000000..76bc3589b2ba0 --- /dev/null +++ b/shim/build_defs/python_unittest.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def python_unittest(srcs = [], **kwargs): + _unused = srcs # @unused + + # @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." + native.python_test(**kwargs) diff --git a/shim/build_defs/roar.bzl b/shim/build_defs/roar.bzl new file mode 100644 index 0000000000000..0af590c1330c6 --- /dev/null +++ b/shim/build_defs/roar.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbsource//tools/build_defs:buckconfig.bzl", "read_bool") + +def roar_no_jit(): + use_roar_jit = read_bool("fbcode", "use_roar_jit", required = False) + if use_roar_jit: + return ["-fforce-no-jit"] + return [] diff --git a/shim/build_defs/rust_binary.bzl b/shim/build_defs/rust_binary.bzl index 188df9aa23fb4..d763f446cb534 100644 --- a/shim/build_defs/rust_binary.bzl +++ b/shim/build_defs/rust_binary.bzl @@ -5,6 +5,6 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("//:shims.bzl", _rust_binary = "rust_binary") +load("@shim//:shims.bzl", _rust_binary = "rust_binary") rust_binary = _rust_binary diff --git a/shim/build_defs/rust_library.bzl b/shim/build_defs/rust_library.bzl index da9da2f0cd0af..fcd27bcf803d4 100644 --- a/shim/build_defs/rust_library.bzl +++ b/shim/build_defs/rust_library.bzl @@ -5,6 +5,6 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("//:shims.bzl", _rust_library = "rust_library") +load("@shim//:shims.bzl", _rust_library = "rust_library") rust_library = _rust_library diff --git a/shim/build_defs/rust_linkable_symbol.bzl b/shim/build_defs/rust_linkable_symbol.bzl new file mode 100644 index 0000000000000..c29faa3968f14 --- /dev/null +++ b/shim/build_defs/rust_linkable_symbol.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//rust:linkable_symbol.bzl", prelude_rust_linkable_symbol = "rust_linkable_symbol") +load("@shim//:shims.bzl", _rust_library = "rust_library") + +def rust_linkable_symbol( + visibility = ["PUBLIC"], + **kwargs): + prelude_rust_linkable_symbol( + visibility = visibility, + rust_library_macro = _rust_library, + **kwargs + ) diff --git a/shim/build_defs/rust_unittest.bzl b/shim/build_defs/rust_unittest.bzl index 17f03052fd1d6..8b0822fa8233d 100644 --- a/shim/build_defs/rust_unittest.bzl +++ b/shim/build_defs/rust_unittest.bzl @@ -5,6 +5,6 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -load("//:shims.bzl", _rust_unittest = "rust_unittest") +load("@shim//:shims.bzl", _rust_unittest = "rust_unittest") rust_unittest = _rust_unittest diff --git a/shim/cfg.bzl b/shim/cfg.bzl new file mode 100644 index 0000000000000..9b63ae110f292 --- /dev/null +++ b/shim/cfg.bzl @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cfg/modifier:cfg_constructor.bzl?v2_only", "cfg_constructor_post_constraint_analysis", "cfg_constructor_pre_constraint_analysis") +load("@prelude//cfg/modifier:common.bzl?v2_only", "MODIFIER_METADATA_KEY") + +SHIM_ALIASES = { + "fedora": "shim//os/linux/distro/constraints:fedora", + "ubuntu": "shim//os/linux/distro/constraints:ubuntu", +} + +def set_cfg_constructor(aliases = dict()): + project_root_cell = read_root_config("cell_aliases", "root") + current_root_cell = read_config("cell_aliases", "root") + if project_root_cell == current_root_cell: + native.set_cfg_constructor( + stage0 = cfg_constructor_pre_constraint_analysis, + stage1 = cfg_constructor_post_constraint_analysis, + key = MODIFIER_METADATA_KEY, + aliases = struct(**aliases), + extra_data = struct(), + ) + +def get_shim_modifiers(): + modifiers = [] + + linux_distro = read_config("linux", "distro") + + if linux_distro: + modifiers.append("shim//os/linux/distro/constraints:{}".format(linux_distro)) + + known_broken = read_config("oss", "known_broken", "disable") + modifiers.append("shim//opensource/macros/broken_in_oss/constraints:{}".format(known_broken)) + + return modifiers diff --git a/shim/common/ocaml/interop/defs.bzl b/shim/common/ocaml/interop/defs.bzl new file mode 100644 index 0000000000000..18e488e23d29f --- /dev/null +++ b/shim/common/ocaml/interop/defs.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Try to keep in sync with all the client projects, like hack +RUST_FLAGS_2018 = [ + "-Drust-2018-idioms", + "-Dwarnings", + "-Dunused-crate-dependencies", +] diff --git a/shim/folly/io/async/test/certs/defs.bzl b/shim/folly/io/async/test/certs/defs.bzl new file mode 100644 index 0000000000000..1a645059e25c4 --- /dev/null +++ b/shim/folly/io/async/test/certs/defs.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode_macros//build_defs:native_rules.bzl", "alias") + +def alias_pem(pems: list[str]): + for pem in pems: + alias( + name = pem, + actual = "//folly/io/async/test/certs:{pem}".format(pem = pem), + ) + +def alias_pem_for_xplat(pems: list[str]): + # in xplat these pem files are exported in //xplat/folly/io/async/test + for pem in pems: + alias( + name = pem, + actual = "//xplat/folly/io/async/test:certs/{pem}".format(pem = pem), + ) diff --git a/shim/lib/dicts.bzl b/shim/lib/dicts.bzl new file mode 100644 index 0000000000000..f702fa92c59e6 --- /dev/null +++ b/shim/lib/dicts.bzl @@ -0,0 +1,41 @@ +# Copyright 2017 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Skylib module containing functions that operate on dictionaries.""" + +def _add(*dictionaries): + """Returns a new `dict` that has all the entries of the given dictionaries. + + If the same key is present in more than one of the input dictionaries, the + last of them in the argument list overrides any earlier ones. + + This function is designed to take zero or one arguments as well as multiple + dictionaries, so that it follows arithmetic identities and callers can avoid + special cases for their inputs: the sum of zero dictionaries is the empty + dictionary, and the sum of a single dictionary is a copy of itself. + + Args: + *dictionaries: Zero or more dictionaries to be added. + + Returns: + A new `dict` that has all the entries of the given dictionaries. + """ + result = {} + for d in dictionaries: + result.update(d) + return result + +dicts = struct( + add = _add, +) diff --git a/shim/lib/paths.bzl b/shim/lib/paths.bzl new file mode 100644 index 0000000000000..8e3fcaea5161e --- /dev/null +++ b/shim/lib/paths.bzl @@ -0,0 +1,242 @@ +# Copyright 2017 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Skylib module containing file path manipulation functions. + +NOTE: The functions in this module currently only support paths with Unix-style +path separators (forward slash, "/"); they do not handle Windows-style paths +with backslash separators or drive letters. +""" + +def _basename(p): + """Returns the basename (i.e., the file portion) of a path. + + Note that if `p` ends with a slash, this function returns an empty string. + This matches the behavior of Python's `os.path.basename`, but differs from + the Unix `basename` command (which would return the path segment preceding + the final slash). + + Args: + p: The path whose basename should be returned. + + Returns: + The basename of the path, which includes the extension. + """ + return p.rpartition("/")[-1] + +def _dirname(p): + """Returns the dirname of a path. + + The dirname is the portion of `p` up to but not including the file portion + (i.e., the basename). Any slashes immediately preceding the basename are not + included, unless omitting them would make the dirname empty. + + Args: + p: The path whose dirname should be returned. + + Returns: + The dirname of the path. + """ + prefix, sep, _ = p.rpartition("/") + if not prefix: + return sep + else: + # If there are multiple consecutive slashes, strip them all out as Python's + # os.path.dirname does. + return prefix.rstrip("/") + +def _is_absolute(path): + """Returns `True` if `path` is an absolute path. + + Args: + path: A path (which is a string). + + Returns: + `True` if `path` is an absolute path. + """ + return path.startswith("/") or (len(path) > 2 and path[1] == ":") + +def _join(path, *others): + """Joins one or more path components intelligently. + + This function mimics the behavior of Python's `os.path.join` function on POSIX + platform. It returns the concatenation of `path` and any members of `others`, + inserting directory separators before each component except the first. The + separator is not inserted if the path up until that point is either empty or + already ends in a separator. + + If any component is an absolute path, all previous components are discarded. + + Args: + path: A path segment. + *others: Additional path segments. + + Returns: + A string containing the joined paths. + """ + result = path + + for p in others: + if _is_absolute(p): + result = p + elif not result or result.endswith("/"): + result += p + else: + result += "/" + p + + return result + +def _normalize(path): + """Normalizes a path, eliminating double slashes and other redundant segments. + + This function mimics the behavior of Python's `os.path.normpath` function on + POSIX platforms; specifically: + + - If the entire path is empty, "." is returned. + - All "." segments are removed, unless the path consists solely of a single + "." segment. + - Trailing slashes are removed, unless the path consists solely of slashes. + - ".." segments are removed as long as there are corresponding segments + earlier in the path to remove; otherwise, they are retained as leading ".." + segments. + - Single and double leading slashes are preserved, but three or more leading + slashes are collapsed into a single leading slash. + - Multiple adjacent internal slashes are collapsed into a single slash. + + Args: + path: A path. + + Returns: + The normalized path. + """ + if not path: + return "." + + if path.startswith("//") and not path.startswith("///"): + initial_slashes = 2 + elif path.startswith("/"): + initial_slashes = 1 + else: + initial_slashes = 0 + is_relative = (initial_slashes == 0) + + components = path.split("/") + new_components = [] + + for component in components: + if component in ("", "."): + continue + if component == "..": + if new_components and new_components[-1] != "..": + # Only pop the last segment if it isn't another "..". + new_components.pop() + elif is_relative: + # Preserve leading ".." segments for relative paths. + new_components.append(component) + else: + new_components.append(component) + + path = "/".join(new_components) + if not is_relative: + path = ("/" * initial_slashes) + path + + return path or "." + +def _relativize(path, start): + """Returns the portion of `path` that is relative to `start`. + + Because we do not have access to the underlying file system, this + implementation differs slightly from Python's `os.path.relpath` in that it + will fail if `path` is not beneath `start` (rather than use parent segments to + walk up to the common file system root). + + Relativizing paths that start with parent directory references only works if + the path both start with the same initial parent references. + + Args: + path: The path to relativize. + start: The ancestor path against which to relativize. + + Returns: + The portion of `path` that is relative to `start`. + """ + segments = _normalize(path).split("/") + start_segments = _normalize(start).split("/") + if start_segments == ["."]: + start_segments = [] + start_length = len(start_segments) + + if (path.startswith("/") != start.startswith("/") or + len(segments) < start_length): + fail("Path '%s' is not beneath '%s'" % (path, start)) + + for ancestor_segment, segment in zip(start_segments, segments): + if ancestor_segment != segment: + fail("Path '%s' is not beneath '%s'" % (path, start)) + + length = len(segments) - start_length + result_segments = segments[-length:] + return "/".join(result_segments) + +def _replace_extension(p, new_extension): + """Replaces the extension of the file at the end of a path. + + If the path has no extension, the new extension is added to it. + + Args: + p: The path whose extension should be replaced. + new_extension: The new extension for the file. The new extension should + begin with a dot if you want the new filename to have one. + + Returns: + The path with the extension replaced (or added, if it did not have one). + """ + return _split_extension(p)[0] + new_extension + +def _split_extension(p): + """Splits the path `p` into a tuple containing the root and extension. + + Leading periods on the basename are ignored, so + `path.split_extension(".bashrc")` returns `(".bashrc", "")`. + + Args: + p: The path whose root and extension should be split. + + Returns: + A tuple `(root, ext)` such that the root is the path without the file + extension, and `ext` is the file extension (which, if non-empty, contains + the leading dot). The returned tuple always satisfies the relationship + `root + ext == p`. + """ + b = _basename(p) + last_dot_in_basename = b.rfind(".") + + # If there is no dot or the only dot in the basename is at the front, then + # there is no extension. + if last_dot_in_basename <= 0: + return (p, "") + + dot_distance_from_end = len(b) - last_dot_in_basename + return (p[:-dot_distance_from_end], p[-dot_distance_from_end:]) + +paths = struct( + basename = _basename, + dirname = _dirname, + is_absolute = _is_absolute, + join = _join, + normalize = _normalize, + relativize = _relativize, + replace_extension = _replace_extension, + split_extension = _split_extension, +) diff --git a/shim/lib/shell.bzl b/shim/lib/shell.bzl new file mode 100644 index 0000000000000..3e22ffaeb7ef2 --- /dev/null +++ b/shim/lib/shell.bzl @@ -0,0 +1,63 @@ +# Copyright 2017 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Skylib module containing shell utility functions.""" + +def _array_literal(iterable): + """Creates a string from a sequence that can be used as a shell array. + + For example, `shell.array_literal(["a", "b", "c"])` would return the string + `("a" "b" "c")`, which can be used in a shell script wherever an array + literal is needed. + + Note that all elements in the array are quoted (using `shell.quote`) for + safety, even if they do not need to be. + + Args: + iterable: A sequence of elements. Elements that are not strings will be + converted to strings first, by calling `str()`. + + Returns: + A string that represents the sequence as a shell array; that is, + parentheses containing the quoted elements. + """ + return "(" + " ".join([_quote(str(i)) for i in iterable]) + ")" + +def _quote(s): + """Quotes the given string for use in a shell command. + + This function quotes the given string (in case it contains spaces or other + shell metacharacters.) + + Args: + s: The string to quote. + + Returns: + A quoted version of the string that can be passed to a shell command. + """ + return "'" + s.replace("'", "'\\''") + "'" + +def _powershell_quote(s): + """Quoting multiline strings for Powershell. + References: + 1. https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_special_characters?view=powershell-7.4 + 2. https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules?view=powershell-7.4 + """ + return s.replace("`", "``").replace("\n", "`n").replace('"', '""').replace("$", "`$") + +shell = struct( + array_literal = _array_literal, + quote = _quote, + powershell_quote = _powershell_quote, +) diff --git a/shim/opensource/macros/BUCK b/shim/opensource/macros/BUCK new file mode 100644 index 0000000000000..4e4acfd2090d6 --- /dev/null +++ b/shim/opensource/macros/BUCK @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +config_setting( + name = "broken-in-oss", + constraint_values = [ + "//opensource/macros/broken_in_oss/constraints:enable", + ], + visibility = ["PUBLIC"], +) diff --git a/shim/opensource/macros/broken_in_oss/constraints/BUCK b/shim/opensource/macros/broken_in_oss/constraints/BUCK new file mode 100644 index 0000000000000..c49f2e67cf60d --- /dev/null +++ b/shim/opensource/macros/broken_in_oss/constraints/BUCK @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +constraint_setting( + name = "broken_in_oss", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "enable", + constraint_setting = ":broken_in_oss", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "disable", + constraint_setting = ":broken_in_oss", + visibility = ["PUBLIC"], +) diff --git a/shim/os/BUCK b/shim/os/BUCK new file mode 100644 index 0000000000000..e98de580759ab --- /dev/null +++ b/shim/os/BUCK @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +config_setting( + name = "linux-fedora", + constraint_values = [ + "prelude//os/constraints:linux", + "//os/linux/distro/constraints:fedora", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "linux-ubuntu", + constraint_values = [ + "prelude//os/constraints:linux", + "//os/linux/distro/constraints:ubuntu", + ], + visibility = ["PUBLIC"], +) + +config_setting( + name = "macos-homebrew", + constraint_values = [ + "prelude//os/constraints:macos", + ], + visibility = ["PUBLIC"], +) diff --git a/shim/os/linux/distro/constraints/BUCK b/shim/os/linux/distro/constraints/BUCK new file mode 100644 index 0000000000000..9abe02eb55493 --- /dev/null +++ b/shim/os/linux/distro/constraints/BUCK @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +constraint_setting( + name = "distro", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "fedora", + constraint_setting = ":distro", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "ubuntu", + constraint_setting = ":distro", + visibility = ["PUBLIC"], +) diff --git a/shim/shims.bzl b/shim/shims.bzl index 8d9f60183764c..6a73cfe2a8166 100644 --- a/shim/shims.bzl +++ b/shim/shims.bzl @@ -5,12 +5,238 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. -# @lint-ignore FBCODEBZLADDLOADS +load("@bazel_skylib//lib:paths.bzl", "paths") +load("@prelude//utils:selects.bzl", "selects") +# @lint-ignore-every FBCODEBZLADDLOADS -_SELECT_TYPE = type(select({"DEFAULT": []})) +load("@prelude//utils:type_defs.bzl", "is_dict", "is_list", "is_select", "is_tuple") +load("@shim//build_defs:auto_headers.bzl", "AutoHeaders", "get_auto_headers") +load("@shim//build_defs/lib:oss.bzl", "translate_target") -def is_select(thing): - return type(thing) == _SELECT_TYPE +prelude = native + +_C_SOURCE_EXTS = ( + ".c", +) + +_CPP_SOURCE_EXTS = ( + ".cc", + ".cpp", +) + +_SOURCE_EXTS = _C_SOURCE_EXTS + _CPP_SOURCE_EXTS + +# These header suffixes are used to logically group C/C++ source (e.g. +# `foo/Bar.cpp`) with headers with the following suffixes (e.g. `foo/Bar.h` and +# `foo/Bar-inl.tcc`), such that the source provides all implementation for +# methods/classes declared in the headers. +# +# This is important for a couple reasons: +# 1) Automatic dependencies: Tooling can use this property to automatically +# manage TARGETS dependencies by extracting `#include` references in sources +# and looking up the rules which "provide" them. +# 2) Modules: This logical group can be combined into a standalone C/C++ module +# (when such support is available). +_HEADER_SUFFIXES = ( + ".h", + ".hpp", + ".tcc", + "-inl.h", + "-inl.hpp", + "-inl.tcc", + "-defs.h", + "-defs.hpp", + "-defs.tcc", +) + +CPP_UNITTEST_DEPS = [ + "shim//third-party/googletest:cpp_unittest_main", +] +CPP_FOLLY_UNITTEST_DEPS = [ + "gh_facebook_folly//folly/test/common:test_main_lib", + "gh_facebook_folly//folly/ext/buck2:test_ext", +] + +def _get_headers_from_sources(srcs): + """ + Return the headers likely associated with the given sources + + Args: + srcs: A list of strings representing files or build targets + + Returns: + A list of header files corresponding to the list of sources. These files are + validated to exist based on glob() + """ + split_srcs = [ + paths.split_extension(src_filename) + for src_filename in [_get_src_filename(src) for src in srcs] + if "//" not in src_filename and not src_filename.startswith(":") + ] + + # For e.g. foo.cpp grab a glob on foo.h, foo-inl.h, etc + headers = [ + base + header_ext + for base, ext in split_srcs + if ext in _SOURCE_EXTS + for header_ext in _HEADER_SUFFIXES + ] + + # Avoid a warning for an empty glob pattern if there are no headers. + return glob(headers) if headers else [] + +def _get_src_filename(src): + """ + Return filename from a potentilly tuple value entry in srcs attribute + """ + + if is_tuple(src): + s, _ = src + return s + return src + +def _update_headers_with_src_headers(src_headers, out_headers): + """ + Helper function to update raw headers with headers from srcs + """ + src_headers = list(src_headers.difference(out_headers)) + + # Looks simple, right? But if a header is explicitly added in, say, a + # dictionary mapping, we want to make sure to keep the original mapping + # and drop the F -> F mapping + if is_list(out_headers): + out_headers.extend(sorted(src_headers)) + else: + # Let it throw AttributeError if update() can't be found neither + out_headers.update({k: k for k in src_headers}) + return out_headers + +def prebuilt_cpp_library( + headers = None, + linker_flags = None, + private_linker_flags = None, + **kwargs): + prelude.prebuilt_cxx_library( + exported_headers = headers, + exported_linker_flags = linker_flags, + linker_flags = private_linker_flags, + **kwargs + ) + +def cpp_library( + name, + deps = [], + srcs = [], + external_deps = [], + exported_deps = [], + exported_external_deps = [], + undefined_symbols = None, + visibility = ["PUBLIC"], + auto_headers = None, + arch_preprocessor_flags = None, + modular_headers = None, + os_deps = [], + arch_compiler_flags = None, + tags = None, + linker_flags = None, + private_linker_flags = None, + exported_linker_flags = None, + headers = None, + private_headers = None, + propagated_pp_flags = (), + **kwargs): + base_path = native.package_name() + oss_depends_on_folly = read_config("oss_depends_on", "folly", False) + header_base_path = base_path + if oss_depends_on_folly and header_base_path.startswith("folly"): + header_base_path = header_base_path.replace("folly/", "", 1) + + _unused = (undefined_symbols, arch_preprocessor_flags, modular_headers, arch_compiler_flags, tags, propagated_pp_flags) # @unused + if os_deps: + deps += _select_os_deps(_fix_dict_deps(os_deps)) + if headers == None: + headers = [] + if tags != None and "oss_dependency" in tags: + if oss_depends_on_folly: + headers = [item.replace("//:", "//folly:") if item == "//:folly-config.h" else item for item in headers] + if is_select(srcs) and auto_headers == AutoHeaders.SOURCES: + # Validate `srcs` and `auto_headers` before the config check + fail( + "//{}:{}: `select` srcs cannot support AutoHeaders.SOURCES".format(base_path, name), + ) + auto_headers = get_auto_headers(auto_headers) + if auto_headers == AutoHeaders.SOURCES and not is_select(srcs): + src_headers = set(_get_headers_from_sources(srcs)) + if private_headers: + src_headers = src_headers.difference(set(private_headers)) + + headers = selects.apply( + headers, + partial(_update_headers_with_src_headers, src_headers), + ) + if not is_select(linker_flags): + linker_flags = linker_flags or [] + linker_flags = list(linker_flags) + if exported_linker_flags != None: + linker_flags += exported_linker_flags + prelude.cxx_library( + name = name, + srcs = srcs, + deps = _fix_deps(deps + external_deps_to_targets(external_deps)), + exported_deps = _fix_deps(exported_deps + external_deps_to_targets(exported_external_deps)), + visibility = visibility, + preferred_linkage = "static", + exported_headers = headers, + headers = private_headers, + exported_linker_flags = linker_flags, + linker_flags = private_linker_flags, + header_namespace = header_base_path, + **kwargs + ) + +def cpp_unittest( + deps = [], + external_deps = [], + visibility = ["PUBLIC"], + supports_static_listing = None, + allocator = None, + owner = None, + tags = None, + emails = None, + extract_helper_lib = None, + compiler_specific_flags = None, + default_strip_mode = None, + resources = {}, + **kwargs): + _unused = (supports_static_listing, allocator, owner, tags, emails, extract_helper_lib, compiler_specific_flags, default_strip_mode) # @unused + if read_config("oss", "folly_cxx_tests", True): + deps = deps + CPP_FOLLY_UNITTEST_DEPS + else: + deps = deps + CPP_UNITTEST_DEPS + + prelude.cxx_test( + deps = _fix_deps(deps + external_deps_to_targets(external_deps)), + visibility = visibility, + resources = _fix_resources(resources), + **kwargs + ) + +def cpp_binary( + deps = [], + external_deps = [], + visibility = ["PUBLIC"], + dlopen_enabled = None, + compiler_specific_flags = None, + os_linker_flags = None, + allocator = None, + modules = None, + **kwargs): + _unused = (dlopen_enabled, compiler_specific_flags, os_linker_flags, allocator, modules) # @unused + prelude.cxx_binary( + deps = _fix_deps(deps + external_deps_to_targets(external_deps)), + visibility = visibility, + **kwargs + ) def rust_library( rustc_flags = [], @@ -19,11 +245,14 @@ def rust_library( os_deps = None, test_deps = None, test_env = None, + test_os_deps = None, + autocargo = None, + unittests = None, mapped_srcs = {}, visibility = ["PUBLIC"], **kwargs): - _unused = (test_deps, test_env, named_deps, visibility) # @unused - deps = _maybe_select_map(deps, _fix_deps) + _unused = (test_deps, test_env, test_os_deps, named_deps, autocargo, unittests, visibility) # @unused + deps = _fix_deps(deps) mapped_srcs = _maybe_select_map(mapped_srcs, _fix_mapped_srcs) if os_deps: deps += _select_os_deps(_fix_dict_deps(os_deps)) @@ -31,8 +260,8 @@ def rust_library( # Reset visibility because internal and external paths are different. visibility = ["PUBLIC"] - native.rust_library( - rustc_flags = rustc_flags + [_CFG_BUCK_OSS_BUILD], + prelude.rust_library( + rustc_flags = rustc_flags + [_CFG_BUCK_BUILD], deps = deps, visibility = visibility, mapped_srcs = mapped_srcs, @@ -42,15 +271,18 @@ def rust_library( def rust_binary( rustc_flags = [], deps = [], + autocargo = None, unittests = None, allocator = None, default_strip_mode = None, visibility = ["PUBLIC"], **kwargs): - _unused = (unittests, allocator, default_strip_mode) # @unused - deps = _maybe_select_map(deps, _fix_deps) - native.rust_binary( - rustc_flags = rustc_flags + [_CFG_BUCK_OSS_BUILD], + _unused = (unittests, allocator, default_strip_mode, autocargo) # @unused + deps = _fix_deps(deps) + + # @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." + prelude.rust_binary( + rustc_flags = rustc_flags + [_CFG_BUCK_BUILD], deps = deps, visibility = visibility, **kwargs @@ -61,9 +293,10 @@ def rust_unittest( deps = [], visibility = ["PUBLIC"], **kwargs): - deps = _maybe_select_map(deps, _fix_deps) - native.rust_test( - rustc_flags = rustc_flags + [_CFG_BUCK_OSS_BUILD], + deps = _fix_deps(deps) + + prelude.rust_test( + rustc_flags = rustc_flags + [_CFG_BUCK_BUILD], deps = deps, visibility = visibility, **kwargs @@ -78,13 +311,6 @@ def rust_protobuf_library( deps = [], test_deps = None, doctests = True): - deps = _maybe_select_map(deps, _fix_deps) - if build_env: - build_env = { - k: _fix_dep_in_string(v) - for k, v in build_env.items() - } - build_name = name + "-build" proto_name = name + "-proto" @@ -101,15 +327,15 @@ def rust_protobuf_library( build_env = build_env or {} build_env.update( { - "PROTOC": "$(exe buck//third-party/proto:protoc)", - "PROTOC_INCLUDE": "$(location buck//third-party/proto:google_protobuf)", + "PROTOC": "$(exe shim//third-party/proto:protoc)", + "PROTOC_INCLUDE": "$(location shim//third-party/proto:google_protobuf)", }, ) - native.genrule( + prelude.genrule( name = proto_name, srcs = protos + [ - "buck//third-party/proto:google_protobuf", + "shim//third-party/proto:google_protobuf", ], out = ".", cmd = "$(exe :" + build_name + ")", @@ -124,34 +350,33 @@ def rust_protobuf_library( # This is where prost looks for generated .rs files "OUT_DIR": "$(location :{})".format(proto_name), }, + test_deps = test_deps, deps = [ "fbsource//third-party/rust:prost", "fbsource//third-party/rust:prost-types", ] + (deps or []), - test_deps = test_deps, ) - # For python tests only - for proto in protos: - native.export_file( - name = proto, - visibility = ["PUBLIC"], - ) +def ocaml_binary( + deps = [], + visibility = ["PUBLIC"], + **kwargs): + deps = _fix_deps(deps) -# Configuration that is used when building open source using Buck2 as the build system. -# E.g. not applied either internally, or when using Cargo to build the open source code. -# At the moment of writing, mostly used to disable jemalloc. -_CFG_BUCK_OSS_BUILD = "--cfg=buck_oss_build" + prelude.ocaml_binary( + deps = deps, + visibility = visibility, + **kwargs + ) + +_CFG_BUCK_BUILD = "--cfg=buck_build" def _maybe_select_map(v, mapper): if is_select(v): return select_map(v, mapper) return mapper(v) -def _select_os_deps(xss: list[( - str, - list[str], -)]) -> Select: +def _select_os_deps(xss) -> Select: d = { "prelude//os:" + os: xs for os, xs in xss @@ -159,13 +384,7 @@ def _select_os_deps(xss: list[( d["DEFAULT"] = [] return select(d) -def _fix_dict_deps(xss: list[( - str, - list[str], -)]) -> list[( - str, - list[str], -)]: +def _fix_dict_deps(xss): return [ (k, _fix_deps(xs)) for k, xs in xss @@ -174,32 +393,30 @@ def _fix_dict_deps(xss: list[( def _fix_mapped_srcs(xs: dict[str, str]): # For reasons, this is source -> file path, which is the opposite of what # it should be. - return {_fix_dep(k): v for (k, v) in xs.items()} - -def _fix_deps(xs: list[str]) -> list[str]: - return filter(None, map(_fix_dep, xs)) - -def _fix_dep(x: str) -> [ - None, - str, -]: - if x == "fbsource//third-party/blake3:blake3-rust": - x = "fbsource//third-party/rust:blake3" - - if x == "//common/rust/folly/logging:logging": - return None - elif x == "//watchman/rust/watchman_client:watchman_client": - return "fbsource//third-party/rust:watchman_client" - elif x.startswith("//common/rust/shed/"): - return "fbsource//third-party/rust:" + x.removeprefix("//common/rust/shed/").split(":")[0] - elif x.startswith("//common/rust/") or x.startswith("//buck2/facebook/") or x.startswith("//eden/") or x.startswith("//remote_execution/"): - return None - elif x.startswith("//buck2/"): - return "root//" + x.removeprefix("//buck2/") + return {translate_target(k): v for (k, v) in xs.items()} + +def _fix_deps(xs): + if is_select(xs): + return select_map(xs, lambda child_targets: _fix_deps(child_targets)) + return map(translate_target, xs) + +def _fix_resources(resources): + if is_list(resources): + return [translate_target(r) for r in resources] + + if is_dict(resources): + return {k: translate_target(v) for k, v in resources.items()} + + fail("Unexpected type {} for resources".format(type(resources))) + +# Do a nasty conversion of e.g. ("supercaml", None, "ocaml-dev") to +# 'fbcode//third-party-buck/platform010/build/supercaml:ocaml-dev' +# (which will then get mapped to `shim//third-party/ocaml:ocaml-dev`). +def external_dep_to_target(t): + if type(t) == type(()): + return "fbcode//third-party-buck/platform010/build/{}:{}".format(t[0], t[2]) else: - return x + return "fbcode//third-party-buck/platform010/build/{}:{}".format(t, t) -def _fix_dep_in_string(x: str) -> str: - """Replace internal labels in string values such as env-vars.""" - return (x - .replace("//buck2/", "root//")) +def external_deps_to_targets(ts): + return [external_dep_to_target(t) for t in ts] diff --git a/shim/target_determinator/macros/ci.bzl b/shim/target_determinator/macros/ci.bzl index 85ca974dcef44..0c02e6fc3ab46 100644 --- a/shim/target_determinator/macros/ci.bzl +++ b/shim/target_determinator/macros/ci.bzl @@ -5,11 +5,16 @@ # License, Version 2.0 found in the LICENSE-APACHE file in the root directory # of this source tree. +# https://internalfb.com/code/fbsource/fbcode/target_determinator/macros/README.md + def _lbl(*_args): return "" -def _package(_values, overwrite = False): - _ = overwrite +def _package( + _values, + # starlark-lint-disable unused-argument + overwrite = False): # @unused + pass ci = struct( package = _package, diff --git a/shim/third-party/binutils/BUCK b/shim/third-party/binutils/BUCK new file mode 100644 index 0000000000000..8f25070f895b0 --- /dev/null +++ b/shim/third-party/binutils/BUCK @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +oncall("open_source") + +system_library( + name = "iberty", + exported_linker_flags = ["-liberty"], + packages = { + "//os:linux-fedora": ["binutils-devel"], + "//os:linux-ubuntu": ["libiberty-dev"], + }, +) diff --git a/shim/third-party/boost/BUCK b/shim/third-party/boost/BUCK new file mode 100644 index 0000000000000..a0d3bbf4bcca6 --- /dev/null +++ b/shim/third-party/boost/BUCK @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party/boost:boost.bzl", "boost_libs") + +oncall("open_source") + +boost_libs( + header_only = [ + "algorithm", + "interprocess", + "lexical_cast", + "multi_index", + "preprocessor", + "program_options", + "range", + "sort", + "variant", + ], + libraries = [ + "container", + "context", + "filesystem", + "random", + "regex", + "thread", + ], +) diff --git a/shim/third-party/boost/boost.bzl b/shim/third-party/boost/boost.bzl new file mode 100644 index 0000000000000..7b2836318953d --- /dev/null +++ b/shim/third-party/boost/boost.bzl @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +HOMEBREW_BREW = "boost" + +def boost_libs(libraries, header_only): + system_library( + name = "boost", + packages = { + "//os:linux-fedora": ["boost-devel"], + "//os:linux-ubuntu": ["libboost-all-dev"], + "//os:macos-homebrew": ["boost"], + }, + ) + + for library in libraries: + boost_library(library, False) + + for library in header_only: + boost_library(library, True) + +def boost_library(library: str, header_only: bool): + exported_linker_flags = [] if header_only else ["-lboost_{}".format(library)] + + system_library( + name = "boost_{}".format(library), + packages = { + "//os:linux-fedora": ["boost-devel"], + "//os:linux-ubuntu": [ + "libboost-dev" if header_only else "libboost-{}-dev".format(library), + ], + "//os:macos-homebrew": ["boost"], + }, + exported_linker_flags = exported_linker_flags, + ) diff --git a/shim/third-party/brotli/BUCK b/shim/third-party/brotli/BUCK new file mode 100644 index 0000000000000..fd7de2d707ad1 --- /dev/null +++ b/shim/third-party/brotli/BUCK @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +PACKAGES = { + "//os:linux-fedora": ["brotli-devel"], + "//os:linux-ubuntu": ["libbrotli-dev"], + "//os:macos-homebrew": ["brotli"], +} + +pkgconfig_system_library( + name = "brotli_decode", + packages = PACKAGES, + pkgconfig_name = "libbrotlidec", +) + +pkgconfig_system_library( + name = "brotli_encode", + packages = PACKAGES, + pkgconfig_name = "libbrotlienc", +) diff --git a/shim/third-party/bzip2/BUCK b/shim/third-party/bzip2/BUCK new file mode 100644 index 0000000000000..1ee23b853b5dc --- /dev/null +++ b/shim/third-party/bzip2/BUCK @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +prebuilt_cxx_library( + name = "pkgconfig_unsupported", + exported_linker_flags = select({ + "//os:linux-ubuntu": ["-lbz2"], + "DEFAULT": [], + }), + target_compatible_with = [ + "//os:linux-ubuntu", + ], + visibility = [], +) + +pkgconfig_system_library( + name = "bz2", + packages = { + "//os:linux-fedora": ["bzip2-devel"], + "//os:linux-ubuntu": ["libbz2-dev"], + "//os:macos-homebrew": ["bzip2"], + }, + pkgconfig_name = "bzip2", + unsupported = { + "//os:linux-ubuntu": [":pkgconfig_unsupported"], + }, +) diff --git a/shim/third-party/defs.bzl b/shim/third-party/defs.bzl new file mode 100644 index 0000000000000..b35a0b3688d28 --- /dev/null +++ b/shim/third-party/defs.bzl @@ -0,0 +1,157 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//third-party:pkgconfig.bzl", "external_pkgconfig_library") + +HOMEBREW_CONSTRAINT = "//os:macos-homebrew" + +def system_library( + name: str, + packages = None, + visibility = ["PUBLIC"], + deps = [], + exported_deps = [], + **kwargs): + system_packages_target_name = "__{}_system_pkgs".format(name) + packages = packages or dict() + packages["DEFAULT"] = [] + system_packages( + name = system_packages_target_name, + packages = select(packages), + ) + deps = deps + [":" + system_packages_target_name] + + brews = packages.get(HOMEBREW_CONSTRAINT) + if brews != None: + exported_deps = exported_deps + select({ + HOMEBREW_CONSTRAINT: _system_homebrew_targets(name, brews), + "DEFAULT": [], + }) + + native.prebuilt_cxx_library( + name = name, + visibility = visibility, + deps = deps, + exported_deps = exported_deps, + **kwargs + ) + +def pkgconfig_system_library( + name: str, + pkgconfig_name = None, + packages = None, + visibility = ["PUBLIC"], + deps = [], + exported_deps = [], + unsupported = dict(), + **kwargs): + system_packages_target_name = "__{}_system_pkgs".format(name) + packages = packages or dict() + packages["DEFAULT"] = [] + system_packages( + name = system_packages_target_name, + packages = select(packages), + ) + + deps = exported_deps + deps + + if len(unsupported) == 0: + external_pkgconfig_library( + name = name, + package = pkgconfig_name, + visibility = visibility, + deps = deps + [":" + system_packages_target_name], + **kwargs + ) + else: + exported_deps_select_map = {} + for constraint, constraint_exported_deps in unsupported.items(): + if constraint == HOMEBREW_CONSTRAINT: + brews = packages.get(constraint, []) + constraint_exported_deps = constraint_exported_deps + _system_homebrew_targets(name, brews) + + exported_deps_select_map[constraint] = constraint_exported_deps + + pkgconfig_target_name = "__{}_pkgconfig".format(name) + external_pkgconfig_library( + name = pkgconfig_target_name, + package = pkgconfig_name, + visibility = [], + deps = deps, + **kwargs + ) + exported_deps_select_map["DEFAULT"] = [":" + pkgconfig_target_name] + + native.prebuilt_cxx_library( + name = name, + visibility = visibility, + deps = [":" + system_packages_target_name], + exported_deps = select(exported_deps_select_map), + ) + +def _system_homebrew_targets( + name: str, + brews): + deps = [] + for brew in brews: + homebrew_target_name = "__{}_homebrew_{}".format(name, brew) + homebrew_library( + name = homebrew_target_name, + brew = brew, + ) + deps.append(":" + homebrew_target_name) + + return deps + +def _system_packages_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo()] + +system_packages = rule( + impl = lambda _ctx: [DefaultInfo()], + attrs = { + "deps": attrs.list(attrs.dep(), default = []), + "packages": attrs.list(attrs.string()), + }, +) + +def homebrew_library( + name: str, + brew: str, + homebrew_header_path = "include", + exported_preprocessor_flags = [], + exported_linker_flags = [], + target_compatible_with = ["//os:macos-homebrew"], + **kwargs): + preproc_flags_rule_name = "__{}__{}__preproc_flags".format(name, brew) + native.genrule( + name = preproc_flags_rule_name, + type = "homebrew_library_preproc_flags", + out = "out", + cmd = "echo \"-I`brew --prefix {}`/{}\" > $OUT".format(brew, homebrew_header_path), + target_compatible_with = target_compatible_with, + ) + + linker_flags_rule_name = "__{}__{}__linker_flags".format(name, brew) + native.genrule( + name = linker_flags_rule_name, + type = "homebrew_library_linker_flags", + out = "out", + cmd = "echo \"-L`brew --prefix {}`/lib\" > $OUT".format(brew), + target_compatible_with = target_compatible_with, + ) + + native.prebuilt_cxx_library( + name = name, + exported_preprocessor_flags = exported_preprocessor_flags + [ + "@$(location :{})/preproc_flags.txt".format(preproc_flags_rule_name), + ], + exported_linker_flags = exported_linker_flags + [ + "@$(location :{})/linker_flags.txt".format(linker_flags_rule_name), + ], + target_compatible_with = target_compatible_with, + **kwargs + ) diff --git a/shim/third-party/double_conversion/BUCK b/shim/third-party/double_conversion/BUCK new file mode 100644 index 0000000000000..750949b1c8c64 --- /dev/null +++ b/shim/third-party/double_conversion/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +oncall("open_source") + +system_library( + name = "double_conversion", + exported_linker_flags = ["-ldouble-conversion"], + packages = { + "//os:linux-fedora": ["double-conversion-devel"], + "//os:linux-ubuntu": ["libdouble-conversion-dev"], + "//os:macos-homebrew": ["double-conversion"], + }, +) diff --git a/shim/third-party/fast_float/BUCK b/shim/third-party/fast_float/BUCK new file mode 100644 index 0000000000000..35ce9db12848b --- /dev/null +++ b/shim/third-party/fast_float/BUCK @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +oncall("open_source") + +system_library( + name = "fast_float", + packages = { + "//os:linux-fedora": ["fast_float-devel"], + "//os:linux-ubuntu": ["libfast-float-dev"], + "//os:macos-homebrew": ["fast_float"], + }, +) diff --git a/shim/third-party/fmt/BUCK b/shim/third-party/fmt/BUCK new file mode 100644 index 0000000000000..024783475bc8d --- /dev/null +++ b/shim/third-party/fmt/BUCK @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "fmt", + packages = { + "//os:linux-fedora": ["fmt-devel"], + "//os:linux-ubuntu": ["libfmt-dev"], + "//os:macos-homebrew": ["fmt"], + }, +) diff --git a/shim/third-party/gflags/BUCK b/shim/third-party/gflags/BUCK new file mode 100644 index 0000000000000..d6dac8a0d08ae --- /dev/null +++ b/shim/third-party/gflags/BUCK @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "gflags", + packages = { + "//os:linux-fedora": ["gflags-devel"], + "//os:linux-ubuntu": ["libgflags-dev"], + "//os:macos-homebrew": ["gflags"], + }, +) diff --git a/shim/third-party/glibc/BUCK b/shim/third-party/glibc/BUCK new file mode 100644 index 0000000000000..eb46aa0cb7d12 --- /dev/null +++ b/shim/third-party/glibc/BUCK @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@shim//build_defs:prebuilt_cpp_library.bzl", "prebuilt_cpp_library") + +oncall("open_source") + +prebuilt_cpp_library(name = "glibc") + +alias( + name = "rt", + actual = ":glibc", + visibility = ["PUBLIC"], +) + +alias( + name = "ct", + actual = ":glibc", + visibility = ["PUBLIC"], +) + +alias( + name = "dl", + actual = ":glibc", + visibility = ["PUBLIC"], +) + +alias( + name = "pthread", + actual = ":glibc", + visibility = ["PUBLIC"], +) diff --git a/shim/third-party/glog/BUCK b/shim/third-party/glog/BUCK new file mode 100644 index 0000000000000..830b1dd11ba42 --- /dev/null +++ b/shim/third-party/glog/BUCK @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "glog", + packages = { + "//os:linux-fedora": ["glog-devel"], + "//os:linux-ubuntu": ["libgoogle-glog-dev"], + "//os:macos-homebrew": ["glog"], + }, + pkgconfig_name = "libglog", + exported_deps = [ + "//third-party/gflags:gflags", + ], +) diff --git a/shim/third-party/googletest/BUCK b/shim/third-party/googletest/BUCK new file mode 100644 index 0000000000000..89fed880abc42 --- /dev/null +++ b/shim/third-party/googletest/BUCK @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "gtest", + packages = { + "//os:linux-fedora": ["gtest-devel"], + "//os:linux-ubuntu": ["libgtest-dev"], + "//os:macos-homebrew": ["googletest"], + }, +) + +pkgconfig_system_library( + name = "gmock", + packages = { + "//os:linux-fedora": ["gmock-devel"], + "//os:linux-ubuntu": ["libgmock-dev"], + "//os:macos-homebrew": ["googletest"], + }, +) + +cxx_library( + name = "cpp_unittest_main", + srcs = ["gtest_main.cpp"], + visibility = ["PUBLIC"], + deps = [":gtest"], +) diff --git a/shim/third-party/googletest/gtest_main.cpp b/shim/third-party/googletest/gtest_main.cpp new file mode 100644 index 0000000000000..8141caf4ca086 --- /dev/null +++ b/shim/third-party/googletest/gtest_main.cpp @@ -0,0 +1,66 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "gtest/gtest.h" + +#if defined(GTEST_OS_ESP8266) || defined(GTEST_OS_ESP32) || \ + (defined(GTEST_OS_NRF52) && defined(ARDUINO)) +// Arduino-like platforms: program entry points are setup/loop instead of main. + +#ifdef GTEST_OS_ESP8266 +extern "C" { +#endif + +void setup() { testing::InitGoogleTest(); } + +void loop() { RUN_ALL_TESTS(); } + +#ifdef GTEST_OS_ESP8266 +} +#endif + +#elif defined(GTEST_OS_QURT) +// QuRT: program entry point is main, but argc/argv are unusable. + +GTEST_API_ int main() { + printf("Running main() from %s\n", __FILE__); + testing::InitGoogleTest(); + return RUN_ALL_TESTS(); +} +#else +// Normal platforms: program entry point is main, argc/argv are initialized. + +GTEST_API_ int main(int argc, char **argv) { + printf("Running main() from %s\n", __FILE__); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +#endif diff --git a/shim/third-party/jemalloc/BUCK b/shim/third-party/jemalloc/BUCK new file mode 100644 index 0000000000000..04cdb0c6c0465 --- /dev/null +++ b/shim/third-party/jemalloc/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "headers", + packages = { + "//os:linux-fedora": ["jemalloc-devel"], + "//os:linux-ubuntu": ["libjemalloc-dev"], + "//os:macos-homebrew": ["jemalloc"], + }, + pkgconfig_name = "jemalloc", +) diff --git a/shim/third-party/jvm/BUCK b/shim/third-party/jvm/BUCK new file mode 100644 index 0000000000000..88188c51c7fe9 --- /dev/null +++ b/shim/third-party/jvm/BUCK @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +oncall("open_source") + +system_library( + name = "jvm", + packages = { + "//os:macos-homebrew": ["openjdk"], + }, +) diff --git a/shim/third-party/libaegis/BUCK b/shim/third-party/libaegis/BUCK new file mode 100644 index 0000000000000..4d49fa66e261c --- /dev/null +++ b/shim/third-party/libaegis/BUCK @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +oncall("open_source") + +git_fetch( + name = "libaegis.git", + repo = "https://github.com/jedisct1/libaegis.git", + rev = "9c7677d742aaae312e09b1574998acba620188d8", # tag 0.1.23 +) + +genrule( + name = "libaegis-cmake", + out = "out", + cmd = "cmake -DCMAKE_INSTALL_PREFIX=$OUT $(location :libaegis.git) && make install", +) + +prebuilt_cxx_library( + name = "aegis", + exported_linker_flags = [ + "-L$(location :libaegis-cmake)/lib64", + "-laegis", + ], + exported_preprocessor_flags = ["-I$(location :libaegis-cmake)/include"], + visibility = ["PUBLIC"], +) diff --git a/shim/third-party/libaio/BUCK b/shim/third-party/libaio/BUCK new file mode 100644 index 0000000000000..65aef8962145d --- /dev/null +++ b/shim/third-party/libaio/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +oncall("open_source") + +system_library( + name = "aio", + exported_linker_flags = ["-laio"], + packages = { + "//os:linux-fedora": ["libaio-devel"], + "//os:linux-ubuntu": ["libaio-dev"], + "//os:macos-homebrew": ["libaio"], + }, +) diff --git a/shim/third-party/libdwarf/BUCK b/shim/third-party/libdwarf/BUCK new file mode 100644 index 0000000000000..07efec9cd0105 --- /dev/null +++ b/shim/third-party/libdwarf/BUCK @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +prebuilt_cxx_library( + name = "pkgconfig_unsupported", + exported_preprocessor_flags = select({ + "//os:linux-ubuntu": ["-I/usr/include/libdwarf"], + "DEFAULT": [], + }), + target_compatible_with = [ + "//os:linux-ubuntu", + ], + visibility = [], +) + +pkgconfig_system_library( + name = "dwarf", + packages = { + "//os:linux-fedora": ["libdwarf-devel"], + "//os:linux-ubuntu": ["libdwarf-dev"], + "//os:macos-homebrew": ["libdwarf"], + }, + pkgconfig_name = "libdwarf", + unsupported = { + "//os:linux-ubuntu": [":pkgconfig_unsupported"], + }, +) diff --git a/shim/third-party/libevent/BUCK b/shim/third-party/libevent/BUCK new file mode 100644 index 0000000000000..b69201e42c0b7 --- /dev/null +++ b/shim/third-party/libevent/BUCK @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "libevent", + packages = { + "//os:linux-fedora": ["libevent-devel"], + "//os:linux-ubuntu": ["libevent-dev"], + "//os:macos-homebrew": ["libevent"], + }, +) diff --git a/shim/third-party/libgcc/BUCK b/shim/third-party/libgcc/BUCK new file mode 100644 index 0000000000000..c95eb4209e61c --- /dev/null +++ b/shim/third-party/libgcc/BUCK @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +oncall("open_source") + +system_library( + name = "stdc++fs", + packages = { + "//os:linux-fedora": ["gcc"], + "//os:linux-ubuntu": ["gcc"], + "//os:macos-homebrew": ["gcc"], + }, +) diff --git a/shim/third-party/liboqs/BUCK b/shim/third-party/liboqs/BUCK new file mode 100644 index 0000000000000..74e4d23c70d19 --- /dev/null +++ b/shim/third-party/liboqs/BUCK @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +prebuilt_cxx_library( + name = "pkgconfig_unsupported", + target_compatible_with = [ + "//os:linux-ubuntu", + ], + visibility = [], +) + +pkgconfig_system_library( + name = "oqs", + packages = { + "//os:linux-fedora": ["liboqs-devel"], + "//os:macos-homebrew": ["liboqs"], + }, + pkgconfig_name = "liboqs", + unsupported = { + "//os:linux-ubuntu": [":pkgconfig_unsupported"], + }, +) diff --git a/shim/third-party/libsodium/BUCK b/shim/third-party/libsodium/BUCK new file mode 100644 index 0000000000000..b2d8fe4f354a2 --- /dev/null +++ b/shim/third-party/libsodium/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "sodium", + packages = { + "//os:linux-fedora": ["libsodium-devel"], + "//os:linux-ubuntu": ["libsodium-dev"], + "//os:macos-homebrew": ["libsodium"], + }, + pkgconfig_name = "libsodium", +) diff --git a/shim/third-party/libunwind/BUCK b/shim/third-party/libunwind/BUCK new file mode 100644 index 0000000000000..67375ea8bc1d1 --- /dev/null +++ b/shim/third-party/libunwind/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "unwind", + packages = { + "//os:linux-fedora": ["libunwind-devel"], + "//os:linux-ubuntu": ["libunwind-dev"], + "//os:macos-homebrew": ["libunwind"], + }, + pkgconfig_name = "libunwind", +) diff --git a/shim/third-party/liburing/BUCK b/shim/third-party/liburing/BUCK new file mode 100644 index 0000000000000..a2810945e1f19 --- /dev/null +++ b/shim/third-party/liburing/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "uring", + packages = { + "//os:linux-fedora": ["liburing-devel"], + "//os:linux-ubuntu": ["liburing-dev"], + "//os:macos-homebrew": ["liburing"], + }, + pkgconfig_name = "liburing", +) diff --git a/shim/third-party/lz4/BUCK b/shim/third-party/lz4/BUCK new file mode 100644 index 0000000000000..553bbd086bf59 --- /dev/null +++ b/shim/third-party/lz4/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "lz4", + packages = { + "//os:linux-fedora": ["lz4-devel"], + "//os:linux-ubuntu": ["liblz4-dev"], + "//os:macos-homebrew": ["lz4"], + }, + pkgconfig_name = "liblz4", +) diff --git a/shim/third-party/macros/rust_third_party.bzl b/shim/third-party/macros/rust_third_party.bzl index f6a0bd3d227ca..d00ddce82274c 100644 --- a/shim/third-party/macros/rust_third_party.bzl +++ b/shim/third-party/macros/rust_third_party.bzl @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # @nolint def third_party_rust_prebuilt_cxx_library(name, **kwargs): diff --git a/shim/third-party/ocaml/BUCK b/shim/third-party/ocaml/BUCK new file mode 100644 index 0000000000000..1b62b09b57723 --- /dev/null +++ b/shim/third-party/ocaml/BUCK @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." +prebuilt_cxx_library( + name = "ocaml-dev", + header_dirs = ["opam/lib/ocaml"], + header_only = True, + visibility = ["PUBLIC"], +) diff --git a/shim/third-party/ocaml/opam/lib/ocaml b/shim/third-party/ocaml/opam/lib/ocaml new file mode 100644 index 0000000000000..971daaba53aad --- /dev/null +++ b/shim/third-party/ocaml/opam/lib/ocaml @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Fake file so that buck2 can run in non-ocamlrep repos diff --git a/shim/third-party/openssl/BUCK b/shim/third-party/openssl/BUCK new file mode 100644 index 0000000000000..beefbd19d2f6c --- /dev/null +++ b/shim/third-party/openssl/BUCK @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "ssl", + packages = { + "//os:linux-fedora": ["openssl-devel"], + "//os:linux-ubuntu": ["libssl-dev"], + "//os:macos-homebrew": ["openssl"], + }, + pkgconfig_name = "openssl", +) + +alias( + name = "crypto", + actual = ":ssl", + visibility = ["PUBLIC"], +) diff --git a/shim/third-party/proto/BUCK b/shim/third-party/proto/BUCK index 14c88fc35d91f..aefe5570a8e11 100644 --- a/shim/third-party/proto/BUCK +++ b/shim/third-party/proto/BUCK @@ -1,3 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." load(":defs.bzl", "protoc_distribution") protoc_distribution( @@ -5,12 +13,14 @@ protoc_distribution( version = "21.4", ) +# @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." alias( name = "protoc", actual = ":distribution[protoc]", visibility = ["PUBLIC"], ) +# @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." alias( name = "google_protobuf", actual = ":distribution[google_protobuf]", diff --git a/shim/third-party/proto/defs.bzl b/shim/third-party/proto/defs.bzl index 00991b93a29e7..9f8d50608b8f5 100644 --- a/shim/third-party/proto/defs.bzl +++ b/shim/third-party/proto/defs.bzl @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + load("@prelude//http_archive/exec_deps.bzl", "HttpArchiveExecDeps") load(":releases.bzl", "releases") @@ -49,6 +56,7 @@ def _turn_http_archive_into_protoc_distribution( def _download_protoc_distribution_impl(ctx: AnalysisContext) -> Promise: protoc_filename = "bin/protoc" + ctx.attrs.exe_extension + # @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." return ctx.actions.anon_target(native.http_archive, { "exec_deps": ctx.attrs._http_archive_exec_deps, "sha256": ctx.attrs.sha256, @@ -57,7 +65,7 @@ def _download_protoc_distribution_impl(ctx: AnalysisContext) -> Promise: "include", ], "urls": [ctx.attrs.url], - }, with_artifacts = True).promise.map(lambda providers: _turn_http_archive_into_protoc_distribution( + }).promise.map(lambda providers: _turn_http_archive_into_protoc_distribution( providers = providers, protoc_filename = protoc_filename, )) diff --git a/shim/third-party/proto/releases.bzl b/shim/third-party/proto/releases.bzl index 153ca496e4c99..2feb48e70e6b2 100644 --- a/shim/third-party/proto/releases.bzl +++ b/shim/third-party/proto/releases.bzl @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # @generated # Update with ./update.py > releases.bzl releases = { diff --git a/shim/third-party/proto/update.py b/shim/third-party/proto/update.py index cfee489f671ea..c3ac3a631a102 100755 --- a/shim/third-party/proto/update.py +++ b/shim/third-party/proto/update.py @@ -1,4 +1,11 @@ #!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + """Query recent Github release artifacts for protobuf. Use this script to update the releases.bzl file that contains metadata about @@ -36,6 +43,14 @@ } } """ +COPYRIGHT_HEADER = """\ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. +""" async def query_releases(): async with aiohttp.ClientSession(raise_for_status=True) as session: @@ -91,6 +106,7 @@ async def main(): releases = await query_releases() formatted = format_releases(releases) with_sha256 = await hash_releases(formatted) + print(COPYRIGHT_HEADER) print("# @" + "generated") print("# Update with ./update.py > releases.bzl") print("releases = ", json.dumps(with_sha256, indent=4)) diff --git a/shim/third-party/python/BUCK b/shim/third-party/python/BUCK new file mode 100644 index 0000000000000..d87d251b4f712 --- /dev/null +++ b/shim/third-party/python/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "python", + packages = { + "//os:linux-fedora": ["python3-devel"], + "//os:linux-ubuntu": ["python3-dev"], + "//os:macos-homebrew": ["python3"], + }, + pkgconfig_name = "python3", +) diff --git a/shim/third-party/range-v3/BUCK b/shim/third-party/range-v3/BUCK new file mode 100644 index 0000000000000..b81a4de31cc3b --- /dev/null +++ b/shim/third-party/range-v3/BUCK @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "system_library") + +oncall("open_source") + +system_library( + name = "range-v3", + packages = { + "//os:linux-fedora": ["range-v3-devel"], + "//os:linux-ubuntu": ["librange-v3-dev"], + "//os:macos-homebrew": ["range-v3"], + }, +) diff --git a/shim/third-party/rust/.gitignore b/shim/third-party/rust/.gitignore index c18f3dc437a24..95e346d623f6d 100644 --- a/shim/third-party/rust/.gitignore +++ b/shim/third-party/rust/.gitignore @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # Ignore Cargo-related stuff .cargo/registry .cargo/git diff --git a/shim/third-party/rust/Cargo.toml b/shim/third-party/rust/Cargo.toml index 4a50a7914fa21..b569ecfaaa4d1 100644 --- a/shim/third-party/rust/Cargo.toml +++ b/shim/third-party/rust/Cargo.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # Definitions of third-party libraries used for buck2 build of buck2. # This file is **not** used by cargo build of buck2. # See the /docs/bootstrapping.md for more details. @@ -21,8 +28,8 @@ path = "top/main.rs" [dependencies] gazebo = {version = "0.8.1", features = ["str_pattern_extensions"]} -fbinit = "0.1" -sorted_vector_map = "0.1" +fbinit = "0.2" +sorted_vector_map = "0.2" watchman_client = "0.8.0" annotate-snippets = { version = "0.9.0", features = ["color"] } @@ -34,20 +41,23 @@ assert_matches = "1.5" async-compression = { version = "0.4.1", features = ["tokio", "gzip", "zstd"] } async-condvar-fair = { version = "0.2.2", features = ["parking_lot_0_11", "tokio"] } async-recursion = "1.0" -async-scoped = { version = "0.7.1", features = ["use-tokio"] } +async-scoped = { version = "0.8", features = ["use-tokio"] } async-trait = "0.1.24" atomic = "0.5.1" backtrace = "0.3.51" -base64 = "0.13.0" +base64 = "0.21.7" bincode = "1.3.3" +bitflags = "2.4" blake3 = { version = "1.3.1", features = [ "default", "digest", "rayon", "std", "traits-preview" ] } -bumpalo = "=3.11.1" +bstr = { version = "1.4.0", features = ["serde", "std", "unicode"] } +bumpalo = { version = "3.14.0", features = ["allocator_api", "collections"] } byteorder = "1.4.3" bytes = "1.0" bytesize = "1.1.0" chrono = "0.4.28" -clap = { package = "clap", version = "4.0.7", features = ["derive", "env"] } -clap-3 = { package = "clap", version = "3.2.24", features = ["derive", "env"] } +clap = { package = "clap", version = "4.5.4", features = ["derive", "env", "string"] } +clap-3 = { package = "clap", version = "3.2.24", features = ["derive", "env", "regex", "unicode", "wrap_help"] } +clap_complete = { package = "clap_complete", version = "4.5.1" } common-path = "1.0.0" compact_str = "0.6.1" constant_time_eq = "0.2.4" @@ -59,14 +69,15 @@ crossbeam-epoch = "0.9.7" crossterm = "0.27" csv = "1.1" ctor = "0.1.16" -dashmap = "4.0.2" +dashmap = "5.5.3" debugserver-types = "0.5.0" derivative = "2.2" -derive_more = "0.99.3" +derive_more = { version = "1.0.0", features = ["full"] } digest = "0.10" dirs = "3.0.1" dunce = "1.0.2" either = "1.8" +elf = "0.7.4" enum-iterator = "1.4.1" enum-map = "0.6.3" env_logger = "0.9.0" @@ -79,9 +90,10 @@ fnv = "1.0.7" fs4 = { version = "0.6", features = ["sync"] } futures = { version = "0.3.28", features = ["async-await", "compat"] } futures-intrusive = "0.4" +fxhash = "0.2.1" glob = "0.3.0" globset = "0.4.10" -hashbrown = { version = "0.12.3", features = ["raw"] } +hashbrown = { version = "0.14.3", features = ["raw"] } hex = "0.4.3" higher-order-closure = "0.0.5" hostname = "0.3.1" @@ -96,24 +108,28 @@ hyper-timeout = "0.4" hyper-unix-connector = "0.2" indent_write = "2.2.0" indenter = "0.3.3" -indexmap = { version = "1.9.1", features = ["serde-1"] } +indexmap = { version = "2.1.0", features = ["arbitrary", "rayon", "serde"] } indoc = "1.0.3" inferno = { version = "0.11.11", default-features = false } internment = { version = "0.7", features = ["arc"] } inventory = "0.3.8" -ipnetwork = "0.15" +ipnetwork = "0.20.0" is_proc_translated = "0.1.1" -itertools = "0.10.3" +itertools = "0.13.0" jemallocator = { version = "0.5.0", features = ["profiling"] } lalrpop = { version = "0.19.7", artifact = "bin", features = ["pico-args"] } lalrpop-util = "0.19.7" libc = "0.2.132" linked-hash-map = { version = "0.5", features = ["serde_impl"] } +linkme = { version = "0.3.17", features = ["used_linker"] } log = "0.4" logos = "0.12" +lru = "0.12.3" lsp-server = "0.7.2" lsp-types = "0.94.1" maplit = "1.0.2" +mappable-rc = { version = "0.1.1", features = ["std"] } +md-5 = "0.10" memchr = "2.4.1" memmap2 = "0.5.0" memoffset = "0.6.4" @@ -126,16 +142,18 @@ num-traits = "0.2" num_cpus = "1.11" num_enum = "0.5" object = "0.29.0" +oid-registry = "0.6.1" once_cell = "1.8" -os_str_bytes = "6.0" +os_str_bytes = { version = "6.6.0", features = ["conversions"] } parking_lot = { version = "0.11.2", features = ["send_guard"] } paste = "1.0" +pathdiff = "0.2" perf-event = "0.4" perf-event-open-sys = "4.0" pin-project = "0.4.29" plist = "0.5" pretty_assertions = "1.2.1" -proc-macro2 = "1.0" +proc-macro2 = { version = "1.0.70", features = ["span-locations"] } prost = "0.11.9" prost-build = "0.11.9" prost-derive = "0.11.9" @@ -149,18 +167,21 @@ rand_distr = "0.4" ref-cast = "1.0.0" regex = "1.5.4" relative-path = { version = "1.7.0", features = ["serde"] } +ring = "=0.17.5" # Upgrading this is possible, but a pain, so we don't want to pick up every new minor version rusqlite = { version = "0.29.0", features = ["bundled"] } +rustc-hash = "1.1.0" rustls = "0.21.0" rustls-native-certs = { package = "rustls-native-certs", version = "0.6.2" } rustls-pemfile = { package = "rustls-pemfile", version = "1.0.0" } rustyline = "11.0" scopeguard = "1.0.0" sequence_trie = "0.3.6" -serde = { version = "1.0.173", features = ["derive"] } -serde_json = "1.0.48" +serde = { version = "1.0.173", features = ["derive", "rc"] } +serde_json = { version = "1.0.48", features = ["raw_value"] } sha1 = "0.10" sha2 = "0.10" -shlex = "1.0" +shlex = "1.3" +similar = { version = "2.2.0", features = ["inline"] } siphasher = "0.3.3" slab = "0.4.7" slog = "2.7.0" @@ -168,10 +189,13 @@ smallvec = { version = "1.10", features = ["const_generics", "const_new", "serde static_assertions = "1.1.0" strsim = "0.10.0" structopt = "0.3.23" +strum = { version = "0.26.2", features = ["derive", "strum_macros"] } syn = { version = "2", features = ["extra-traits", "full", "visit"] } +syn1 = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +synstructure = "0.12" sync_wrapper = "0.1.0" sys-info = "0.9.1" -sysinfo = "0.26.8" +sysinfo = "0.30.11" take_mut = "0.2.2" tar = "0.4.38" tempfile = "3.1.0" @@ -193,21 +217,29 @@ tower = "0.4" tower-layer = "0.3.1" tower-service = "0.3.2" tracing = "0.1.22" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } -triomphe = "0.1.8" +tracing-core = "0.1.32" +tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } +triomphe = "0.1.11" trybuild = "1.0.56" +typed-arena = "2.0" twox-hash = "1.6.1" unicode-segmentation = "1.7" uuid = { version = "1.2", features = ["v4"] } walkdir = "2.3.2" which = "4.3.0" +whoami = "1.5.1" windows_x86_64_msvc = "=0.48.0" # our fixup only works if we are on precisely 0.48.0 -winapi = { version = "0.3", features = ["everything"] } +winapi = { version = "0.3", features = ["everything", "std"] } +x509-parser = { version = "0.14.0", features = ["verify"] } xattr = "0.2.2" zip = "0.5" -zstd = "=0.11.1" +zstd = "0.13.0" [patch.crates-io] # For https://github.com/jimblandy/perf-event/pull/29 perf-event = { git = "https://github.com/krallin/perf-event.git", rev = "86224a9bc025d5d19f719542f27c8c629a08b167", version = "0.4" } perf-event-open-sys = { git = "https://github.com/krallin/perf-event.git", rev = "86224a9bc025d5d19f719542f27c8c629a08b167", version = "4.0" } + +# Windows-specific dependencies +[target."cfg(windows)".dependencies] +winver = "1" diff --git a/shim/third-party/rust/defs.bzl b/shim/third-party/rust/defs.bzl index 9b89668baca42..ae25b318287ca 100644 --- a/shim/third-party/rust/defs.bzl +++ b/shim/third-party/rust/defs.bzl @@ -1,5 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + def rust_library_from_crates(name): + # @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." native.export_file(name = name, src = "BUCK", visibility = ["PUBLIC"]) def rust_binary_from_crates(name): + # @lint-ignore BUCKLINT: avoid "Direct usage of native rules is not allowed." native.genrule(name = name, cmd = "exit 1", executable = True, out = "out", visibility = ["PUBLIC"]) diff --git a/shim/third-party/rust/fixups/ahash/fixups.toml b/shim/third-party/rust/fixups/ahash/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/ahash/fixups.toml +++ b/shim/third-party/rust/fixups/ahash/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/anyhow/fixups.toml b/shim/third-party/rust/fixups/anyhow/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/anyhow/fixups.toml +++ b/shim/third-party/rust/fixups/anyhow/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/async-trait/fixups.toml b/shim/third-party/rust/fixups/async-trait/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/async-trait/fixups.toml +++ b/shim/third-party/rust/fixups/async-trait/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/atomic/fixups.toml b/shim/third-party/rust/fixups/atomic/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/atomic/fixups.toml +++ b/shim/third-party/rust/fixups/atomic/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/axum-core/fixups.toml b/shim/third-party/rust/fixups/axum-core/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/axum-core/fixups.toml +++ b/shim/third-party/rust/fixups/axum-core/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/axum/fixups.toml b/shim/third-party/rust/fixups/axum/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/axum/fixups.toml +++ b/shim/third-party/rust/fixups/axum/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/backtrace/fixups.toml b/shim/third-party/rust/fixups/backtrace/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/backtrace/fixups.toml +++ b/shim/third-party/rust/fixups/backtrace/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/blake3/fixups.toml b/shim/third-party/rust/fixups/blake3/fixups.toml index 389fd57acfc70..19d0d8adf8302 100644 --- a/shim/third-party/rust/fixups/blake3/fixups.toml +++ b/shim/third-party/rust/fixups/blake3/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] ## The various X86 platform fixups diff --git a/shim/third-party/rust/fixups/bumpalo/fixups.toml b/shim/third-party/rust/fixups/bumpalo/fixups.toml new file mode 100644 index 0000000000000..928a0682c4668 --- /dev/null +++ b/shim/third-party/rust/fixups/bumpalo/fixups.toml @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +extra_srcs = ["README.md"] diff --git a/shim/third-party/rust/fixups/bzip2-sys/fixups.toml b/shim/third-party/rust/fixups/bzip2-sys/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/bzip2-sys/fixups.toml +++ b/shim/third-party/rust/fixups/bzip2-sys/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/clap/fixups.toml b/shim/third-party/rust/fixups/clap/fixups.toml index ee94fdc2f29cc..41256d384f583 100644 --- a/shim/third-party/rust/fixups/clap/fixups.toml +++ b/shim/third-party/rust/fixups/clap/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + omit_features = ["deprecated"] diff --git a/shim/third-party/rust/fixups/clap_builder/fixups.toml b/shim/third-party/rust/fixups/clap_builder/fixups.toml new file mode 100644 index 0000000000000..928a0682c4668 --- /dev/null +++ b/shim/third-party/rust/fixups/clap_builder/fixups.toml @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +extra_srcs = ["README.md"] diff --git a/shim/third-party/rust/fixups/clap_derive/fixups.toml b/shim/third-party/rust/fixups/clap_derive/fixups.toml index ee94fdc2f29cc..41256d384f583 100644 --- a/shim/third-party/rust/fixups/clap_derive/fixups.toml +++ b/shim/third-party/rust/fixups/clap_derive/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + omit_features = ["deprecated"] diff --git a/shim/third-party/rust/fixups/core-foundation-sys/fixups.toml b/shim/third-party/rust/fixups/core-foundation-sys/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/core-foundation-sys/fixups.toml +++ b/shim/third-party/rust/fixups/core-foundation-sys/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/crc32fast/fixups.toml b/shim/third-party/rust/fixups/crc32fast/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/crc32fast/fixups.toml +++ b/shim/third-party/rust/fixups/crc32fast/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/criterion/fixups.toml b/shim/third-party/rust/fixups/criterion/fixups.toml index 41439d3356a5e..1da45ba23144e 100644 --- a/shim/third-party/rust/fixups/criterion/fixups.toml +++ b/shim/third-party/rust/fixups/criterion/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + cargo_env = true diff --git a/shim/third-party/rust/fixups/crossbeam-epoch/fixups.toml b/shim/third-party/rust/fixups/crossbeam-epoch/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/crossbeam-epoch/fixups.toml +++ b/shim/third-party/rust/fixups/crossbeam-epoch/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/crossbeam-queue/fixups.toml b/shim/third-party/rust/fixups/crossbeam-queue/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/crossbeam-queue/fixups.toml +++ b/shim/third-party/rust/fixups/crossbeam-queue/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/crossbeam-utils/fixups.toml b/shim/third-party/rust/fixups/crossbeam-utils/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/crossbeam-utils/fixups.toml +++ b/shim/third-party/rust/fixups/crossbeam-utils/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/crunchy/fixups.toml b/shim/third-party/rust/fixups/crunchy/fixups.toml index ac9ebfb4af71d..309316ac45495 100644 --- a/shim/third-party/rust/fixups/crunchy/fixups.toml +++ b/shim/third-party/rust/fixups/crunchy/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.gen_srcs] diff --git a/shim/third-party/rust/fixups/darwin-libproc-sys/fixups.toml b/shim/third-party/rust/fixups/darwin-libproc-sys/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/darwin-libproc-sys/fixups.toml +++ b/shim/third-party/rust/fixups/darwin-libproc-sys/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/debugserver-types/fixups.toml b/shim/third-party/rust/fixups/debugserver-types/fixups.toml index 50645274fe787..c740f1218e2b4 100644 --- a/shim/third-party/rust/fixups/debugserver-types/fixups.toml +++ b/shim/third-party/rust/fixups/debugserver-types/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + extra_srcs = ["src/schema.json"] cargo_env = true diff --git a/shim/third-party/rust/fixups/derive_more-impl/fixups.toml b/shim/third-party/rust/fixups/derive_more-impl/fixups.toml new file mode 100644 index 0000000000000..190a6c25c4434 --- /dev/null +++ b/shim/third-party/rust/fixups/derive_more-impl/fixups.toml @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +extra_srcs = ["README.md", "doc/*.md"] diff --git a/shim/third-party/rust/fixups/derive_more/fixups.toml b/shim/third-party/rust/fixups/derive_more/fixups.toml new file mode 100644 index 0000000000000..928a0682c4668 --- /dev/null +++ b/shim/third-party/rust/fixups/derive_more/fixups.toml @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +extra_srcs = ["README.md"] diff --git a/shim/third-party/rust/fixups/erased-serde/fixups.toml b/shim/third-party/rust/fixups/erased-serde/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/erased-serde/fixups.toml +++ b/shim/third-party/rust/fixups/erased-serde/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/fs-err/fixups.toml b/shim/third-party/rust/fixups/fs-err/fixups.toml new file mode 100644 index 0000000000000..d514599437f6a --- /dev/null +++ b/shim/third-party/rust/fixups/fs-err/fixups.toml @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +[[buildscript]] +[buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/fs4/fixups.toml b/shim/third-party/rust/fixups/fs4/fixups.toml index af7edb27e3bb7..9b40a18f4cab3 100644 --- a/shim/third-party/rust/fixups/fs4/fixups.toml +++ b/shim/third-party/rust/fixups/fs4/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/futures-channel/fixups.toml b/shim/third-party/rust/fixups/futures-channel/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/futures-channel/fixups.toml +++ b/shim/third-party/rust/fixups/futures-channel/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/futures-core/fixups.toml b/shim/third-party/rust/fixups/futures-core/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/futures-core/fixups.toml +++ b/shim/third-party/rust/fixups/futures-core/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/futures-task/fixups.toml b/shim/third-party/rust/fixups/futures-task/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/futures-task/fixups.toml +++ b/shim/third-party/rust/fixups/futures-task/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/futures-util/fixups.toml b/shim/third-party/rust/fixups/futures-util/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/futures-util/fixups.toml +++ b/shim/third-party/rust/fixups/futures-util/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/generic-array/fixups.toml b/shim/third-party/rust/fixups/generic-array/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/generic-array/fixups.toml +++ b/shim/third-party/rust/fixups/generic-array/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/getrandom/fixups.toml b/shim/third-party/rust/fixups/getrandom/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/getrandom/fixups.toml +++ b/shim/third-party/rust/fixups/getrandom/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/httparse/fixups.toml b/shim/third-party/rust/fixups/httparse/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/httparse/fixups.toml +++ b/shim/third-party/rust/fixups/httparse/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/hyper/fixups.toml b/shim/third-party/rust/fixups/hyper/fixups.toml index 9118e9de767c8..4dc9cd11669f1 100644 --- a/shim/third-party/rust/fixups/hyper/fixups.toml +++ b/shim/third-party/rust/fixups/hyper/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # reindeer cannot see through `cfg_proto!` macros and the like extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/indexmap/fixups.toml b/shim/third-party/rust/fixups/indexmap/fixups.toml index 4aa45eda12c3c..4088726881865 100644 --- a/shim/third-party/rust/fixups/indexmap/fixups.toml +++ b/shim/third-party/rust/fixups/indexmap/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cfgs = ["has_std"] diff --git a/shim/third-party/rust/fixups/io-lifetimes/fixups.toml b/shim/third-party/rust/fixups/io-lifetimes/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/io-lifetimes/fixups.toml +++ b/shim/third-party/rust/fixups/io-lifetimes/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/jemalloc-sys/fixups.toml b/shim/third-party/rust/fixups/jemalloc-sys/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/jemalloc-sys/fixups.toml +++ b/shim/third-party/rust/fixups/jemalloc-sys/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/lalrpop/fixups.toml b/shim/third-party/rust/fixups/lalrpop/fixups.toml index 41439d3356a5e..1da45ba23144e 100644 --- a/shim/third-party/rust/fixups/lalrpop/fixups.toml +++ b/shim/third-party/rust/fixups/lalrpop/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + cargo_env = true diff --git a/shim/third-party/rust/fixups/lexical-core/fixups.toml b/shim/third-party/rust/fixups/lexical-core/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/lexical-core/fixups.toml +++ b/shim/third-party/rust/fixups/lexical-core/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/libc/fixups.toml b/shim/third-party/rust/fixups/libc/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/libc/fixups.toml +++ b/shim/third-party/rust/fixups/libc/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/libm/fixups.toml b/shim/third-party/rust/fixups/libm/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/libm/fixups.toml +++ b/shim/third-party/rust/fixups/libm/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/libsqlite3-sys/BUCK b/shim/third-party/rust/fixups/libsqlite3-sys/BUCK new file mode 100644 index 0000000000000..938b0a56d0939 --- /dev/null +++ b/shim/third-party/rust/fixups/libsqlite3-sys/BUCK @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +filegroup( + name = "out_dir", + srcs = ["bindgen.rs"], + visibility = ["//third-party/rust/..."], +) diff --git a/shim/third-party/rust/fixups/libsqlite3-sys/bindgen.rs b/shim/third-party/rust/fixups/libsqlite3-sys/bindgen.rs new file mode 100644 index 0000000000000..fbe2d39fe0be8 --- /dev/null +++ b/shim/third-party/rust/fixups/libsqlite3-sys/bindgen.rs @@ -0,0 +1,3594 @@ +/* automatically generated by rust-bindgen 0.64.0 */ + +pub const SQLITE_VERSION: &[u8; 7usize] = b"3.41.2\0"; +pub const SQLITE_VERSION_NUMBER: i32 = 3041002; +pub const SQLITE_SOURCE_ID: &[u8; 85usize] = + b"2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\0"; +pub const SQLITE_OK: i32 = 0; +pub const SQLITE_ERROR: i32 = 1; +pub const SQLITE_INTERNAL: i32 = 2; +pub const SQLITE_PERM: i32 = 3; +pub const SQLITE_ABORT: i32 = 4; +pub const SQLITE_BUSY: i32 = 5; +pub const SQLITE_LOCKED: i32 = 6; +pub const SQLITE_NOMEM: i32 = 7; +pub const SQLITE_READONLY: i32 = 8; +pub const SQLITE_INTERRUPT: i32 = 9; +pub const SQLITE_IOERR: i32 = 10; +pub const SQLITE_CORRUPT: i32 = 11; +pub const SQLITE_NOTFOUND: i32 = 12; +pub const SQLITE_FULL: i32 = 13; +pub const SQLITE_CANTOPEN: i32 = 14; +pub const SQLITE_PROTOCOL: i32 = 15; +pub const SQLITE_EMPTY: i32 = 16; +pub const SQLITE_SCHEMA: i32 = 17; +pub const SQLITE_TOOBIG: i32 = 18; +pub const SQLITE_CONSTRAINT: i32 = 19; +pub const SQLITE_MISMATCH: i32 = 20; +pub const SQLITE_MISUSE: i32 = 21; +pub const SQLITE_NOLFS: i32 = 22; +pub const SQLITE_AUTH: i32 = 23; +pub const SQLITE_FORMAT: i32 = 24; +pub const SQLITE_RANGE: i32 = 25; +pub const SQLITE_NOTADB: i32 = 26; +pub const SQLITE_NOTICE: i32 = 27; +pub const SQLITE_WARNING: i32 = 28; +pub const SQLITE_ROW: i32 = 100; +pub const SQLITE_DONE: i32 = 101; +pub const SQLITE_ERROR_MISSING_COLLSEQ: i32 = 257; +pub const SQLITE_ERROR_RETRY: i32 = 513; +pub const SQLITE_ERROR_SNAPSHOT: i32 = 769; +pub const SQLITE_IOERR_READ: i32 = 266; +pub const SQLITE_IOERR_SHORT_READ: i32 = 522; +pub const SQLITE_IOERR_WRITE: i32 = 778; +pub const SQLITE_IOERR_FSYNC: i32 = 1034; +pub const SQLITE_IOERR_DIR_FSYNC: i32 = 1290; +pub const SQLITE_IOERR_TRUNCATE: i32 = 1546; +pub const SQLITE_IOERR_FSTAT: i32 = 1802; +pub const SQLITE_IOERR_UNLOCK: i32 = 2058; +pub const SQLITE_IOERR_RDLOCK: i32 = 2314; +pub const SQLITE_IOERR_DELETE: i32 = 2570; +pub const SQLITE_IOERR_BLOCKED: i32 = 2826; +pub const SQLITE_IOERR_NOMEM: i32 = 3082; +pub const SQLITE_IOERR_ACCESS: i32 = 3338; +pub const SQLITE_IOERR_CHECKRESERVEDLOCK: i32 = 3594; +pub const SQLITE_IOERR_LOCK: i32 = 3850; +pub const SQLITE_IOERR_CLOSE: i32 = 4106; +pub const SQLITE_IOERR_DIR_CLOSE: i32 = 4362; +pub const SQLITE_IOERR_SHMOPEN: i32 = 4618; +pub const SQLITE_IOERR_SHMSIZE: i32 = 4874; +pub const SQLITE_IOERR_SHMLOCK: i32 = 5130; +pub const SQLITE_IOERR_SHMMAP: i32 = 5386; +pub const SQLITE_IOERR_SEEK: i32 = 5642; +pub const SQLITE_IOERR_DELETE_NOENT: i32 = 5898; +pub const SQLITE_IOERR_MMAP: i32 = 6154; +pub const SQLITE_IOERR_GETTEMPPATH: i32 = 6410; +pub const SQLITE_IOERR_CONVPATH: i32 = 6666; +pub const SQLITE_IOERR_VNODE: i32 = 6922; +pub const SQLITE_IOERR_AUTH: i32 = 7178; +pub const SQLITE_IOERR_BEGIN_ATOMIC: i32 = 7434; +pub const SQLITE_IOERR_COMMIT_ATOMIC: i32 = 7690; +pub const SQLITE_IOERR_ROLLBACK_ATOMIC: i32 = 7946; +pub const SQLITE_IOERR_DATA: i32 = 8202; +pub const SQLITE_IOERR_CORRUPTFS: i32 = 8458; +pub const SQLITE_LOCKED_SHAREDCACHE: i32 = 262; +pub const SQLITE_LOCKED_VTAB: i32 = 518; +pub const SQLITE_BUSY_RECOVERY: i32 = 261; +pub const SQLITE_BUSY_SNAPSHOT: i32 = 517; +pub const SQLITE_BUSY_TIMEOUT: i32 = 773; +pub const SQLITE_CANTOPEN_NOTEMPDIR: i32 = 270; +pub const SQLITE_CANTOPEN_ISDIR: i32 = 526; +pub const SQLITE_CANTOPEN_FULLPATH: i32 = 782; +pub const SQLITE_CANTOPEN_CONVPATH: i32 = 1038; +pub const SQLITE_CANTOPEN_DIRTYWAL: i32 = 1294; +pub const SQLITE_CANTOPEN_SYMLINK: i32 = 1550; +pub const SQLITE_CORRUPT_VTAB: i32 = 267; +pub const SQLITE_CORRUPT_SEQUENCE: i32 = 523; +pub const SQLITE_CORRUPT_INDEX: i32 = 779; +pub const SQLITE_READONLY_RECOVERY: i32 = 264; +pub const SQLITE_READONLY_CANTLOCK: i32 = 520; +pub const SQLITE_READONLY_ROLLBACK: i32 = 776; +pub const SQLITE_READONLY_DBMOVED: i32 = 1032; +pub const SQLITE_READONLY_CANTINIT: i32 = 1288; +pub const SQLITE_READONLY_DIRECTORY: i32 = 1544; +pub const SQLITE_ABORT_ROLLBACK: i32 = 516; +pub const SQLITE_CONSTRAINT_CHECK: i32 = 275; +pub const SQLITE_CONSTRAINT_COMMITHOOK: i32 = 531; +pub const SQLITE_CONSTRAINT_FOREIGNKEY: i32 = 787; +pub const SQLITE_CONSTRAINT_FUNCTION: i32 = 1043; +pub const SQLITE_CONSTRAINT_NOTNULL: i32 = 1299; +pub const SQLITE_CONSTRAINT_PRIMARYKEY: i32 = 1555; +pub const SQLITE_CONSTRAINT_TRIGGER: i32 = 1811; +pub const SQLITE_CONSTRAINT_UNIQUE: i32 = 2067; +pub const SQLITE_CONSTRAINT_VTAB: i32 = 2323; +pub const SQLITE_CONSTRAINT_ROWID: i32 = 2579; +pub const SQLITE_CONSTRAINT_PINNED: i32 = 2835; +pub const SQLITE_CONSTRAINT_DATATYPE: i32 = 3091; +pub const SQLITE_NOTICE_RECOVER_WAL: i32 = 283; +pub const SQLITE_NOTICE_RECOVER_ROLLBACK: i32 = 539; +pub const SQLITE_NOTICE_RBU: i32 = 795; +pub const SQLITE_WARNING_AUTOINDEX: i32 = 284; +pub const SQLITE_AUTH_USER: i32 = 279; +pub const SQLITE_OK_LOAD_PERMANENTLY: i32 = 256; +pub const SQLITE_OK_SYMLINK: i32 = 512; +pub const SQLITE_OPEN_READONLY: i32 = 1; +pub const SQLITE_OPEN_READWRITE: i32 = 2; +pub const SQLITE_OPEN_CREATE: i32 = 4; +pub const SQLITE_OPEN_DELETEONCLOSE: i32 = 8; +pub const SQLITE_OPEN_EXCLUSIVE: i32 = 16; +pub const SQLITE_OPEN_AUTOPROXY: i32 = 32; +pub const SQLITE_OPEN_URI: i32 = 64; +pub const SQLITE_OPEN_MEMORY: i32 = 128; +pub const SQLITE_OPEN_MAIN_DB: i32 = 256; +pub const SQLITE_OPEN_TEMP_DB: i32 = 512; +pub const SQLITE_OPEN_TRANSIENT_DB: i32 = 1024; +pub const SQLITE_OPEN_MAIN_JOURNAL: i32 = 2048; +pub const SQLITE_OPEN_TEMP_JOURNAL: i32 = 4096; +pub const SQLITE_OPEN_SUBJOURNAL: i32 = 8192; +pub const SQLITE_OPEN_SUPER_JOURNAL: i32 = 16384; +pub const SQLITE_OPEN_NOMUTEX: i32 = 32768; +pub const SQLITE_OPEN_FULLMUTEX: i32 = 65536; +pub const SQLITE_OPEN_SHAREDCACHE: i32 = 131072; +pub const SQLITE_OPEN_PRIVATECACHE: i32 = 262144; +pub const SQLITE_OPEN_WAL: i32 = 524288; +pub const SQLITE_OPEN_NOFOLLOW: i32 = 16777216; +pub const SQLITE_OPEN_EXRESCODE: i32 = 33554432; +pub const SQLITE_OPEN_MASTER_JOURNAL: i32 = 16384; +pub const SQLITE_IOCAP_ATOMIC: i32 = 1; +pub const SQLITE_IOCAP_ATOMIC512: i32 = 2; +pub const SQLITE_IOCAP_ATOMIC1K: i32 = 4; +pub const SQLITE_IOCAP_ATOMIC2K: i32 = 8; +pub const SQLITE_IOCAP_ATOMIC4K: i32 = 16; +pub const SQLITE_IOCAP_ATOMIC8K: i32 = 32; +pub const SQLITE_IOCAP_ATOMIC16K: i32 = 64; +pub const SQLITE_IOCAP_ATOMIC32K: i32 = 128; +pub const SQLITE_IOCAP_ATOMIC64K: i32 = 256; +pub const SQLITE_IOCAP_SAFE_APPEND: i32 = 512; +pub const SQLITE_IOCAP_SEQUENTIAL: i32 = 1024; +pub const SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN: i32 = 2048; +pub const SQLITE_IOCAP_POWERSAFE_OVERWRITE: i32 = 4096; +pub const SQLITE_IOCAP_IMMUTABLE: i32 = 8192; +pub const SQLITE_IOCAP_BATCH_ATOMIC: i32 = 16384; +pub const SQLITE_LOCK_NONE: i32 = 0; +pub const SQLITE_LOCK_SHARED: i32 = 1; +pub const SQLITE_LOCK_RESERVED: i32 = 2; +pub const SQLITE_LOCK_PENDING: i32 = 3; +pub const SQLITE_LOCK_EXCLUSIVE: i32 = 4; +pub const SQLITE_SYNC_NORMAL: i32 = 2; +pub const SQLITE_SYNC_FULL: i32 = 3; +pub const SQLITE_SYNC_DATAONLY: i32 = 16; +pub const SQLITE_FCNTL_LOCKSTATE: i32 = 1; +pub const SQLITE_FCNTL_GET_LOCKPROXYFILE: i32 = 2; +pub const SQLITE_FCNTL_SET_LOCKPROXYFILE: i32 = 3; +pub const SQLITE_FCNTL_LAST_ERRNO: i32 = 4; +pub const SQLITE_FCNTL_SIZE_HINT: i32 = 5; +pub const SQLITE_FCNTL_CHUNK_SIZE: i32 = 6; +pub const SQLITE_FCNTL_FILE_POINTER: i32 = 7; +pub const SQLITE_FCNTL_SYNC_OMITTED: i32 = 8; +pub const SQLITE_FCNTL_WIN32_AV_RETRY: i32 = 9; +pub const SQLITE_FCNTL_PERSIST_WAL: i32 = 10; +pub const SQLITE_FCNTL_OVERWRITE: i32 = 11; +pub const SQLITE_FCNTL_VFSNAME: i32 = 12; +pub const SQLITE_FCNTL_POWERSAFE_OVERWRITE: i32 = 13; +pub const SQLITE_FCNTL_PRAGMA: i32 = 14; +pub const SQLITE_FCNTL_BUSYHANDLER: i32 = 15; +pub const SQLITE_FCNTL_TEMPFILENAME: i32 = 16; +pub const SQLITE_FCNTL_MMAP_SIZE: i32 = 18; +pub const SQLITE_FCNTL_TRACE: i32 = 19; +pub const SQLITE_FCNTL_HAS_MOVED: i32 = 20; +pub const SQLITE_FCNTL_SYNC: i32 = 21; +pub const SQLITE_FCNTL_COMMIT_PHASETWO: i32 = 22; +pub const SQLITE_FCNTL_WIN32_SET_HANDLE: i32 = 23; +pub const SQLITE_FCNTL_WAL_BLOCK: i32 = 24; +pub const SQLITE_FCNTL_ZIPVFS: i32 = 25; +pub const SQLITE_FCNTL_RBU: i32 = 26; +pub const SQLITE_FCNTL_VFS_POINTER: i32 = 27; +pub const SQLITE_FCNTL_JOURNAL_POINTER: i32 = 28; +pub const SQLITE_FCNTL_WIN32_GET_HANDLE: i32 = 29; +pub const SQLITE_FCNTL_PDB: i32 = 30; +pub const SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: i32 = 31; +pub const SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: i32 = 32; +pub const SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: i32 = 33; +pub const SQLITE_FCNTL_LOCK_TIMEOUT: i32 = 34; +pub const SQLITE_FCNTL_DATA_VERSION: i32 = 35; +pub const SQLITE_FCNTL_SIZE_LIMIT: i32 = 36; +pub const SQLITE_FCNTL_CKPT_DONE: i32 = 37; +pub const SQLITE_FCNTL_RESERVE_BYTES: i32 = 38; +pub const SQLITE_FCNTL_CKPT_START: i32 = 39; +pub const SQLITE_FCNTL_EXTERNAL_READER: i32 = 40; +pub const SQLITE_FCNTL_CKSM_FILE: i32 = 41; +pub const SQLITE_FCNTL_RESET_CACHE: i32 = 42; +pub const SQLITE_GET_LOCKPROXYFILE: i32 = 2; +pub const SQLITE_SET_LOCKPROXYFILE: i32 = 3; +pub const SQLITE_LAST_ERRNO: i32 = 4; +pub const SQLITE_ACCESS_EXISTS: i32 = 0; +pub const SQLITE_ACCESS_READWRITE: i32 = 1; +pub const SQLITE_ACCESS_READ: i32 = 2; +pub const SQLITE_SHM_UNLOCK: i32 = 1; +pub const SQLITE_SHM_LOCK: i32 = 2; +pub const SQLITE_SHM_SHARED: i32 = 4; +pub const SQLITE_SHM_EXCLUSIVE: i32 = 8; +pub const SQLITE_SHM_NLOCK: i32 = 8; +pub const SQLITE_CONFIG_SINGLETHREAD: i32 = 1; +pub const SQLITE_CONFIG_MULTITHREAD: i32 = 2; +pub const SQLITE_CONFIG_SERIALIZED: i32 = 3; +pub const SQLITE_CONFIG_MALLOC: i32 = 4; +pub const SQLITE_CONFIG_GETMALLOC: i32 = 5; +pub const SQLITE_CONFIG_SCRATCH: i32 = 6; +pub const SQLITE_CONFIG_PAGECACHE: i32 = 7; +pub const SQLITE_CONFIG_HEAP: i32 = 8; +pub const SQLITE_CONFIG_MEMSTATUS: i32 = 9; +pub const SQLITE_CONFIG_MUTEX: i32 = 10; +pub const SQLITE_CONFIG_GETMUTEX: i32 = 11; +pub const SQLITE_CONFIG_LOOKASIDE: i32 = 13; +pub const SQLITE_CONFIG_PCACHE: i32 = 14; +pub const SQLITE_CONFIG_GETPCACHE: i32 = 15; +pub const SQLITE_CONFIG_LOG: i32 = 16; +pub const SQLITE_CONFIG_URI: i32 = 17; +pub const SQLITE_CONFIG_PCACHE2: i32 = 18; +pub const SQLITE_CONFIG_GETPCACHE2: i32 = 19; +pub const SQLITE_CONFIG_COVERING_INDEX_SCAN: i32 = 20; +pub const SQLITE_CONFIG_SQLLOG: i32 = 21; +pub const SQLITE_CONFIG_MMAP_SIZE: i32 = 22; +pub const SQLITE_CONFIG_WIN32_HEAPSIZE: i32 = 23; +pub const SQLITE_CONFIG_PCACHE_HDRSZ: i32 = 24; +pub const SQLITE_CONFIG_PMASZ: i32 = 25; +pub const SQLITE_CONFIG_STMTJRNL_SPILL: i32 = 26; +pub const SQLITE_CONFIG_SMALL_MALLOC: i32 = 27; +pub const SQLITE_CONFIG_SORTERREF_SIZE: i32 = 28; +pub const SQLITE_CONFIG_MEMDB_MAXSIZE: i32 = 29; +pub const SQLITE_DBCONFIG_MAINDBNAME: i32 = 1000; +pub const SQLITE_DBCONFIG_LOOKASIDE: i32 = 1001; +pub const SQLITE_DBCONFIG_ENABLE_FKEY: i32 = 1002; +pub const SQLITE_DBCONFIG_ENABLE_TRIGGER: i32 = 1003; +pub const SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER: i32 = 1004; +pub const SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION: i32 = 1005; +pub const SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE: i32 = 1006; +pub const SQLITE_DBCONFIG_ENABLE_QPSG: i32 = 1007; +pub const SQLITE_DBCONFIG_TRIGGER_EQP: i32 = 1008; +pub const SQLITE_DBCONFIG_RESET_DATABASE: i32 = 1009; +pub const SQLITE_DBCONFIG_DEFENSIVE: i32 = 1010; +pub const SQLITE_DBCONFIG_WRITABLE_SCHEMA: i32 = 1011; +pub const SQLITE_DBCONFIG_LEGACY_ALTER_TABLE: i32 = 1012; +pub const SQLITE_DBCONFIG_DQS_DML: i32 = 1013; +pub const SQLITE_DBCONFIG_DQS_DDL: i32 = 1014; +pub const SQLITE_DBCONFIG_ENABLE_VIEW: i32 = 1015; +pub const SQLITE_DBCONFIG_LEGACY_FILE_FORMAT: i32 = 1016; +pub const SQLITE_DBCONFIG_TRUSTED_SCHEMA: i32 = 1017; +pub const SQLITE_DBCONFIG_MAX: i32 = 1017; +pub const SQLITE_DENY: i32 = 1; +pub const SQLITE_IGNORE: i32 = 2; +pub const SQLITE_CREATE_INDEX: i32 = 1; +pub const SQLITE_CREATE_TABLE: i32 = 2; +pub const SQLITE_CREATE_TEMP_INDEX: i32 = 3; +pub const SQLITE_CREATE_TEMP_TABLE: i32 = 4; +pub const SQLITE_CREATE_TEMP_TRIGGER: i32 = 5; +pub const SQLITE_CREATE_TEMP_VIEW: i32 = 6; +pub const SQLITE_CREATE_TRIGGER: i32 = 7; +pub const SQLITE_CREATE_VIEW: i32 = 8; +pub const SQLITE_DELETE: i32 = 9; +pub const SQLITE_DROP_INDEX: i32 = 10; +pub const SQLITE_DROP_TABLE: i32 = 11; +pub const SQLITE_DROP_TEMP_INDEX: i32 = 12; +pub const SQLITE_DROP_TEMP_TABLE: i32 = 13; +pub const SQLITE_DROP_TEMP_TRIGGER: i32 = 14; +pub const SQLITE_DROP_TEMP_VIEW: i32 = 15; +pub const SQLITE_DROP_TRIGGER: i32 = 16; +pub const SQLITE_DROP_VIEW: i32 = 17; +pub const SQLITE_INSERT: i32 = 18; +pub const SQLITE_PRAGMA: i32 = 19; +pub const SQLITE_READ: i32 = 20; +pub const SQLITE_SELECT: i32 = 21; +pub const SQLITE_TRANSACTION: i32 = 22; +pub const SQLITE_UPDATE: i32 = 23; +pub const SQLITE_ATTACH: i32 = 24; +pub const SQLITE_DETACH: i32 = 25; +pub const SQLITE_ALTER_TABLE: i32 = 26; +pub const SQLITE_REINDEX: i32 = 27; +pub const SQLITE_ANALYZE: i32 = 28; +pub const SQLITE_CREATE_VTABLE: i32 = 29; +pub const SQLITE_DROP_VTABLE: i32 = 30; +pub const SQLITE_FUNCTION: i32 = 31; +pub const SQLITE_SAVEPOINT: i32 = 32; +pub const SQLITE_COPY: i32 = 0; +pub const SQLITE_RECURSIVE: i32 = 33; +pub const SQLITE_TRACE_STMT: i32 = 1; +pub const SQLITE_TRACE_PROFILE: i32 = 2; +pub const SQLITE_TRACE_ROW: i32 = 4; +pub const SQLITE_TRACE_CLOSE: i32 = 8; +pub const SQLITE_LIMIT_LENGTH: i32 = 0; +pub const SQLITE_LIMIT_SQL_LENGTH: i32 = 1; +pub const SQLITE_LIMIT_COLUMN: i32 = 2; +pub const SQLITE_LIMIT_EXPR_DEPTH: i32 = 3; +pub const SQLITE_LIMIT_COMPOUND_SELECT: i32 = 4; +pub const SQLITE_LIMIT_VDBE_OP: i32 = 5; +pub const SQLITE_LIMIT_FUNCTION_ARG: i32 = 6; +pub const SQLITE_LIMIT_ATTACHED: i32 = 7; +pub const SQLITE_LIMIT_LIKE_PATTERN_LENGTH: i32 = 8; +pub const SQLITE_LIMIT_VARIABLE_NUMBER: i32 = 9; +pub const SQLITE_LIMIT_TRIGGER_DEPTH: i32 = 10; +pub const SQLITE_LIMIT_WORKER_THREADS: i32 = 11; +pub const SQLITE_PREPARE_PERSISTENT: i32 = 1; +pub const SQLITE_PREPARE_NORMALIZE: i32 = 2; +pub const SQLITE_PREPARE_NO_VTAB: i32 = 4; +pub const SQLITE_INTEGER: i32 = 1; +pub const SQLITE_FLOAT: i32 = 2; +pub const SQLITE_BLOB: i32 = 4; +pub const SQLITE_NULL: i32 = 5; +pub const SQLITE_TEXT: i32 = 3; +pub const SQLITE3_TEXT: i32 = 3; +pub const SQLITE_UTF8: i32 = 1; +pub const SQLITE_UTF16LE: i32 = 2; +pub const SQLITE_UTF16BE: i32 = 3; +pub const SQLITE_UTF16: i32 = 4; +pub const SQLITE_ANY: i32 = 5; +pub const SQLITE_UTF16_ALIGNED: i32 = 8; +pub const SQLITE_DETERMINISTIC: i32 = 2048; +pub const SQLITE_DIRECTONLY: i32 = 524288; +pub const SQLITE_SUBTYPE: i32 = 1048576; +pub const SQLITE_INNOCUOUS: i32 = 2097152; +pub const SQLITE_WIN32_DATA_DIRECTORY_TYPE: i32 = 1; +pub const SQLITE_WIN32_TEMP_DIRECTORY_TYPE: i32 = 2; +pub const SQLITE_TXN_NONE: i32 = 0; +pub const SQLITE_TXN_READ: i32 = 1; +pub const SQLITE_TXN_WRITE: i32 = 2; +pub const SQLITE_INDEX_SCAN_UNIQUE: i32 = 1; +pub const SQLITE_INDEX_CONSTRAINT_EQ: i32 = 2; +pub const SQLITE_INDEX_CONSTRAINT_GT: i32 = 4; +pub const SQLITE_INDEX_CONSTRAINT_LE: i32 = 8; +pub const SQLITE_INDEX_CONSTRAINT_LT: i32 = 16; +pub const SQLITE_INDEX_CONSTRAINT_GE: i32 = 32; +pub const SQLITE_INDEX_CONSTRAINT_MATCH: i32 = 64; +pub const SQLITE_INDEX_CONSTRAINT_LIKE: i32 = 65; +pub const SQLITE_INDEX_CONSTRAINT_GLOB: i32 = 66; +pub const SQLITE_INDEX_CONSTRAINT_REGEXP: i32 = 67; +pub const SQLITE_INDEX_CONSTRAINT_NE: i32 = 68; +pub const SQLITE_INDEX_CONSTRAINT_ISNOT: i32 = 69; +pub const SQLITE_INDEX_CONSTRAINT_ISNOTNULL: i32 = 70; +pub const SQLITE_INDEX_CONSTRAINT_ISNULL: i32 = 71; +pub const SQLITE_INDEX_CONSTRAINT_IS: i32 = 72; +pub const SQLITE_INDEX_CONSTRAINT_LIMIT: i32 = 73; +pub const SQLITE_INDEX_CONSTRAINT_OFFSET: i32 = 74; +pub const SQLITE_INDEX_CONSTRAINT_FUNCTION: i32 = 150; +pub const SQLITE_MUTEX_FAST: i32 = 0; +pub const SQLITE_MUTEX_RECURSIVE: i32 = 1; +pub const SQLITE_MUTEX_STATIC_MAIN: i32 = 2; +pub const SQLITE_MUTEX_STATIC_MEM: i32 = 3; +pub const SQLITE_MUTEX_STATIC_MEM2: i32 = 4; +pub const SQLITE_MUTEX_STATIC_OPEN: i32 = 4; +pub const SQLITE_MUTEX_STATIC_PRNG: i32 = 5; +pub const SQLITE_MUTEX_STATIC_LRU: i32 = 6; +pub const SQLITE_MUTEX_STATIC_LRU2: i32 = 7; +pub const SQLITE_MUTEX_STATIC_PMEM: i32 = 7; +pub const SQLITE_MUTEX_STATIC_APP1: i32 = 8; +pub const SQLITE_MUTEX_STATIC_APP2: i32 = 9; +pub const SQLITE_MUTEX_STATIC_APP3: i32 = 10; +pub const SQLITE_MUTEX_STATIC_VFS1: i32 = 11; +pub const SQLITE_MUTEX_STATIC_VFS2: i32 = 12; +pub const SQLITE_MUTEX_STATIC_VFS3: i32 = 13; +pub const SQLITE_MUTEX_STATIC_MASTER: i32 = 2; +pub const SQLITE_TESTCTRL_FIRST: i32 = 5; +pub const SQLITE_TESTCTRL_PRNG_SAVE: i32 = 5; +pub const SQLITE_TESTCTRL_PRNG_RESTORE: i32 = 6; +pub const SQLITE_TESTCTRL_PRNG_RESET: i32 = 7; +pub const SQLITE_TESTCTRL_BITVEC_TEST: i32 = 8; +pub const SQLITE_TESTCTRL_FAULT_INSTALL: i32 = 9; +pub const SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: i32 = 10; +pub const SQLITE_TESTCTRL_PENDING_BYTE: i32 = 11; +pub const SQLITE_TESTCTRL_ASSERT: i32 = 12; +pub const SQLITE_TESTCTRL_ALWAYS: i32 = 13; +pub const SQLITE_TESTCTRL_RESERVE: i32 = 14; +pub const SQLITE_TESTCTRL_OPTIMIZATIONS: i32 = 15; +pub const SQLITE_TESTCTRL_ISKEYWORD: i32 = 16; +pub const SQLITE_TESTCTRL_SCRATCHMALLOC: i32 = 17; +pub const SQLITE_TESTCTRL_INTERNAL_FUNCTIONS: i32 = 17; +pub const SQLITE_TESTCTRL_LOCALTIME_FAULT: i32 = 18; +pub const SQLITE_TESTCTRL_EXPLAIN_STMT: i32 = 19; +pub const SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD: i32 = 19; +pub const SQLITE_TESTCTRL_NEVER_CORRUPT: i32 = 20; +pub const SQLITE_TESTCTRL_VDBE_COVERAGE: i32 = 21; +pub const SQLITE_TESTCTRL_BYTEORDER: i32 = 22; +pub const SQLITE_TESTCTRL_ISINIT: i32 = 23; +pub const SQLITE_TESTCTRL_SORTER_MMAP: i32 = 24; +pub const SQLITE_TESTCTRL_IMPOSTER: i32 = 25; +pub const SQLITE_TESTCTRL_PARSER_COVERAGE: i32 = 26; +pub const SQLITE_TESTCTRL_RESULT_INTREAL: i32 = 27; +pub const SQLITE_TESTCTRL_PRNG_SEED: i32 = 28; +pub const SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS: i32 = 29; +pub const SQLITE_TESTCTRL_SEEK_COUNT: i32 = 30; +pub const SQLITE_TESTCTRL_TRACEFLAGS: i32 = 31; +pub const SQLITE_TESTCTRL_TUNE: i32 = 32; +pub const SQLITE_TESTCTRL_LOGEST: i32 = 33; +pub const SQLITE_TESTCTRL_LAST: i32 = 33; +pub const SQLITE_STATUS_MEMORY_USED: i32 = 0; +pub const SQLITE_STATUS_PAGECACHE_USED: i32 = 1; +pub const SQLITE_STATUS_PAGECACHE_OVERFLOW: i32 = 2; +pub const SQLITE_STATUS_SCRATCH_USED: i32 = 3; +pub const SQLITE_STATUS_SCRATCH_OVERFLOW: i32 = 4; +pub const SQLITE_STATUS_MALLOC_SIZE: i32 = 5; +pub const SQLITE_STATUS_PARSER_STACK: i32 = 6; +pub const SQLITE_STATUS_PAGECACHE_SIZE: i32 = 7; +pub const SQLITE_STATUS_SCRATCH_SIZE: i32 = 8; +pub const SQLITE_STATUS_MALLOC_COUNT: i32 = 9; +pub const SQLITE_DBSTATUS_LOOKASIDE_USED: i32 = 0; +pub const SQLITE_DBSTATUS_CACHE_USED: i32 = 1; +pub const SQLITE_DBSTATUS_SCHEMA_USED: i32 = 2; +pub const SQLITE_DBSTATUS_STMT_USED: i32 = 3; +pub const SQLITE_DBSTATUS_LOOKASIDE_HIT: i32 = 4; +pub const SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE: i32 = 5; +pub const SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: i32 = 6; +pub const SQLITE_DBSTATUS_CACHE_HIT: i32 = 7; +pub const SQLITE_DBSTATUS_CACHE_MISS: i32 = 8; +pub const SQLITE_DBSTATUS_CACHE_WRITE: i32 = 9; +pub const SQLITE_DBSTATUS_DEFERRED_FKS: i32 = 10; +pub const SQLITE_DBSTATUS_CACHE_USED_SHARED: i32 = 11; +pub const SQLITE_DBSTATUS_CACHE_SPILL: i32 = 12; +pub const SQLITE_DBSTATUS_MAX: i32 = 12; +pub const SQLITE_STMTSTATUS_FULLSCAN_STEP: i32 = 1; +pub const SQLITE_STMTSTATUS_SORT: i32 = 2; +pub const SQLITE_STMTSTATUS_AUTOINDEX: i32 = 3; +pub const SQLITE_STMTSTATUS_VM_STEP: i32 = 4; +pub const SQLITE_STMTSTATUS_REPREPARE: i32 = 5; +pub const SQLITE_STMTSTATUS_RUN: i32 = 6; +pub const SQLITE_STMTSTATUS_FILTER_MISS: i32 = 7; +pub const SQLITE_STMTSTATUS_FILTER_HIT: i32 = 8; +pub const SQLITE_STMTSTATUS_MEMUSED: i32 = 99; +pub const SQLITE_CHECKPOINT_PASSIVE: i32 = 0; +pub const SQLITE_CHECKPOINT_FULL: i32 = 1; +pub const SQLITE_CHECKPOINT_RESTART: i32 = 2; +pub const SQLITE_CHECKPOINT_TRUNCATE: i32 = 3; +pub const SQLITE_VTAB_CONSTRAINT_SUPPORT: i32 = 1; +pub const SQLITE_VTAB_INNOCUOUS: i32 = 2; +pub const SQLITE_VTAB_DIRECTONLY: i32 = 3; +pub const SQLITE_ROLLBACK: i32 = 1; +pub const SQLITE_FAIL: i32 = 3; +pub const SQLITE_REPLACE: i32 = 5; +pub const SQLITE_SCANSTAT_NLOOP: i32 = 0; +pub const SQLITE_SCANSTAT_NVISIT: i32 = 1; +pub const SQLITE_SCANSTAT_EST: i32 = 2; +pub const SQLITE_SCANSTAT_NAME: i32 = 3; +pub const SQLITE_SCANSTAT_EXPLAIN: i32 = 4; +pub const SQLITE_SCANSTAT_SELECTID: i32 = 5; +pub const SQLITE_SCANSTAT_PARENTID: i32 = 6; +pub const SQLITE_SCANSTAT_NCYCLE: i32 = 7; +pub const SQLITE_SCANSTAT_COMPLEX: i32 = 1; +pub const SQLITE_SERIALIZE_NOCOPY: i32 = 1; +pub const SQLITE_DESERIALIZE_FREEONCLOSE: i32 = 1; +pub const SQLITE_DESERIALIZE_RESIZEABLE: i32 = 2; +pub const SQLITE_DESERIALIZE_READONLY: i32 = 4; +pub const NOT_WITHIN: i32 = 0; +pub const PARTLY_WITHIN: i32 = 1; +pub const FULLY_WITHIN: i32 = 2; +pub const __SQLITESESSION_H_: i32 = 1; +pub const SQLITE_SESSION_OBJCONFIG_SIZE: i32 = 1; +pub const SQLITE_CHANGESETSTART_INVERT: i32 = 2; +pub const SQLITE_CHANGESETAPPLY_NOSAVEPOINT: i32 = 1; +pub const SQLITE_CHANGESETAPPLY_INVERT: i32 = 2; +pub const SQLITE_CHANGESET_DATA: i32 = 1; +pub const SQLITE_CHANGESET_NOTFOUND: i32 = 2; +pub const SQLITE_CHANGESET_CONFLICT: i32 = 3; +pub const SQLITE_CHANGESET_CONSTRAINT: i32 = 4; +pub const SQLITE_CHANGESET_FOREIGN_KEY: i32 = 5; +pub const SQLITE_CHANGESET_OMIT: i32 = 0; +pub const SQLITE_CHANGESET_REPLACE: i32 = 1; +pub const SQLITE_CHANGESET_ABORT: i32 = 2; +pub const SQLITE_SESSION_CONFIG_STRMSIZE: i32 = 1; +pub const FTS5_TOKENIZE_QUERY: i32 = 1; +pub const FTS5_TOKENIZE_PREFIX: i32 = 2; +pub const FTS5_TOKENIZE_DOCUMENT: i32 = 4; +pub const FTS5_TOKENIZE_AUX: i32 = 8; +pub const FTS5_TOKEN_COLOCATED: i32 = 1; +extern "C" { + pub static sqlite3_version: [::std::os::raw::c_char; 0usize]; +} +extern "C" { + pub fn sqlite3_libversion() -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_sourceid() -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_libversion_number() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_compileoption_used( + zOptName: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_compileoption_get(N: ::std::os::raw::c_int) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_threadsafe() -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3 { + _unused: [u8; 0], +} +pub type sqlite_int64 = ::std::os::raw::c_longlong; +pub type sqlite_uint64 = ::std::os::raw::c_ulonglong; +pub type sqlite3_int64 = sqlite_int64; +pub type sqlite3_uint64 = sqlite_uint64; +extern "C" { + pub fn sqlite3_close(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_close_v2(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +pub type sqlite3_callback = ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut ::std::os::raw::c_char, + arg4: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, +>; +extern "C" { + pub fn sqlite3_exec( + arg1: *mut sqlite3, + sql: *const ::std::os::raw::c_char, + callback: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut ::std::os::raw::c_char, + arg4: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + arg2: *mut ::std::os::raw::c_void, + errmsg: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_file { + pub pMethods: *const sqlite3_io_methods, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_io_methods { + pub iVersion: ::std::os::raw::c_int, + pub xClose: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_file) -> ::std::os::raw::c_int, + >, + pub xRead: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + arg2: *mut ::std::os::raw::c_void, + iAmt: ::std::os::raw::c_int, + iOfst: sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xWrite: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + arg2: *const ::std::os::raw::c_void, + iAmt: ::std::os::raw::c_int, + iOfst: sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xTruncate: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_file, size: sqlite3_int64) -> ::std::os::raw::c_int, + >, + pub xSync: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + flags: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xFileSize: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + pSize: *mut sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xLock: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + arg2: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xUnlock: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + arg2: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xCheckReservedLock: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + pResOut: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xFileControl: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + op: ::std::os::raw::c_int, + pArg: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + pub xSectorSize: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_file) -> ::std::os::raw::c_int, + >, + pub xDeviceCharacteristics: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_file) -> ::std::os::raw::c_int, + >, + pub xShmMap: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + iPg: ::std::os::raw::c_int, + pgsz: ::std::os::raw::c_int, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + pub xShmLock: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + offset: ::std::os::raw::c_int, + n: ::std::os::raw::c_int, + flags: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xShmBarrier: ::std::option::Option, + pub xShmUnmap: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + deleteFlag: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xFetch: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + iOfst: sqlite3_int64, + iAmt: ::std::os::raw::c_int, + pp: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + pub xUnfetch: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_file, + iOfst: sqlite3_int64, + p: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_mutex { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_api_routines { + _unused: [u8; 0], +} +pub type sqlite3_filename = *const ::std::os::raw::c_char; +pub type sqlite3_syscall_ptr = ::std::option::Option; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_vfs { + pub iVersion: ::std::os::raw::c_int, + pub szOsFile: ::std::os::raw::c_int, + pub mxPathname: ::std::os::raw::c_int, + pub pNext: *mut sqlite3_vfs, + pub zName: *const ::std::os::raw::c_char, + pub pAppData: *mut ::std::os::raw::c_void, + pub xOpen: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zName: sqlite3_filename, + arg2: *mut sqlite3_file, + flags: ::std::os::raw::c_int, + pOutFlags: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xDelete: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zName: *const ::std::os::raw::c_char, + syncDir: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xAccess: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zName: *const ::std::os::raw::c_char, + flags: ::std::os::raw::c_int, + pResOut: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xFullPathname: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zName: *const ::std::os::raw::c_char, + nOut: ::std::os::raw::c_int, + zOut: *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pub xDlOpen: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zFilename: *const ::std::os::raw::c_char, + ) -> *mut ::std::os::raw::c_void, + >, + pub xDlError: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + nByte: ::std::os::raw::c_int, + zErrMsg: *mut ::std::os::raw::c_char, + ), + >, + pub xDlSym: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + arg2: *mut ::std::os::raw::c_void, + zSymbol: *const ::std::os::raw::c_char, + ) -> ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + arg2: *mut ::std::os::raw::c_void, + zSymbol: *const ::std::os::raw::c_char, + ), + >, + >, + pub xDlClose: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_vfs, arg2: *mut ::std::os::raw::c_void), + >, + pub xRandomness: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + nByte: ::std::os::raw::c_int, + zOut: *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pub xSleep: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + microseconds: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xCurrentTime: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_vfs, arg2: *mut f64) -> ::std::os::raw::c_int, + >, + pub xGetLastError: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + arg2: ::std::os::raw::c_int, + arg3: *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pub xCurrentTimeInt64: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + arg2: *mut sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xSetSystemCall: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zName: *const ::std::os::raw::c_char, + arg2: sqlite3_syscall_ptr, + ) -> ::std::os::raw::c_int, + >, + pub xGetSystemCall: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zName: *const ::std::os::raw::c_char, + ) -> sqlite3_syscall_ptr, + >, + pub xNextSystemCall: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vfs, + zName: *const ::std::os::raw::c_char, + ) -> *const ::std::os::raw::c_char, + >, +} +extern "C" { + pub fn sqlite3_initialize() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_shutdown() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_os_init() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_os_end() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_config(arg1: ::std::os::raw::c_int, ...) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_db_config( + arg1: *mut sqlite3, + op: ::std::os::raw::c_int, + ... + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_mem_methods { + pub xMalloc: ::std::option::Option< + unsafe extern "C" fn(arg1: ::std::os::raw::c_int) -> *mut ::std::os::raw::c_void, + >, + pub xFree: ::std::option::Option, + pub xRealloc: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + ) -> *mut ::std::os::raw::c_void, + >, + pub xSize: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int, + >, + pub xRoundup: ::std::option::Option< + unsafe extern "C" fn(arg1: ::std::os::raw::c_int) -> ::std::os::raw::c_int, + >, + pub xInit: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int, + >, + pub xShutdown: ::std::option::Option, + pub pAppData: *mut ::std::os::raw::c_void, +} +extern "C" { + pub fn sqlite3_extended_result_codes( + arg1: *mut sqlite3, + onoff: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_last_insert_rowid(arg1: *mut sqlite3) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_set_last_insert_rowid(arg1: *mut sqlite3, arg2: sqlite3_int64); +} +extern "C" { + pub fn sqlite3_changes(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_changes64(arg1: *mut sqlite3) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_total_changes(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_total_changes64(arg1: *mut sqlite3) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_interrupt(arg1: *mut sqlite3); +} +extern "C" { + pub fn sqlite3_is_interrupted(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_complete(sql: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_complete16(sql: *const ::std::os::raw::c_void) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_busy_handler( + arg1: *mut sqlite3, + arg2: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + arg3: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_busy_timeout( + arg1: *mut sqlite3, + ms: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_get_table( + db: *mut sqlite3, + zSql: *const ::std::os::raw::c_char, + pazResult: *mut *mut *mut ::std::os::raw::c_char, + pnRow: *mut ::std::os::raw::c_int, + pnColumn: *mut ::std::os::raw::c_int, + pzErrmsg: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_free_table(result: *mut *mut ::std::os::raw::c_char); +} +extern "C" { + pub fn sqlite3_mprintf(arg1: *const ::std::os::raw::c_char, ...) + -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_snprintf( + arg1: ::std::os::raw::c_int, + arg2: *mut ::std::os::raw::c_char, + arg3: *const ::std::os::raw::c_char, + ... + ) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_malloc(arg1: ::std::os::raw::c_int) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_malloc64(arg1: sqlite3_uint64) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_realloc( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_realloc64( + arg1: *mut ::std::os::raw::c_void, + arg2: sqlite3_uint64, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_free(arg1: *mut ::std::os::raw::c_void); +} +extern "C" { + pub fn sqlite3_msize(arg1: *mut ::std::os::raw::c_void) -> sqlite3_uint64; +} +extern "C" { + pub fn sqlite3_memory_used() -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_memory_highwater(resetFlag: ::std::os::raw::c_int) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_randomness(N: ::std::os::raw::c_int, P: *mut ::std::os::raw::c_void); +} +extern "C" { + pub fn sqlite3_set_authorizer( + arg1: *mut sqlite3, + xAuth: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_char, + arg4: *const ::std::os::raw::c_char, + arg5: *const ::std::os::raw::c_char, + arg6: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pUserData: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_trace( + arg1: *mut sqlite3, + xTrace: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: *const ::std::os::raw::c_char, + ), + >, + arg2: *mut ::std::os::raw::c_void, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_profile( + arg1: *mut sqlite3, + xProfile: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: *const ::std::os::raw::c_char, + arg3: sqlite3_uint64, + ), + >, + arg2: *mut ::std::os::raw::c_void, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_trace_v2( + arg1: *mut sqlite3, + uMask: ::std::os::raw::c_uint, + xCallback: ::std::option::Option< + unsafe extern "C" fn( + arg1: ::std::os::raw::c_uint, + arg2: *mut ::std::os::raw::c_void, + arg3: *mut ::std::os::raw::c_void, + arg4: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + pCtx: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_progress_handler( + arg1: *mut sqlite3, + arg2: ::std::os::raw::c_int, + arg3: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int, + >, + arg4: *mut ::std::os::raw::c_void, + ); +} +extern "C" { + pub fn sqlite3_open( + filename: *const ::std::os::raw::c_char, + ppDb: *mut *mut sqlite3, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_open16( + filename: *const ::std::os::raw::c_void, + ppDb: *mut *mut sqlite3, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_open_v2( + filename: *const ::std::os::raw::c_char, + ppDb: *mut *mut sqlite3, + flags: ::std::os::raw::c_int, + zVfs: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_uri_parameter( + z: sqlite3_filename, + zParam: *const ::std::os::raw::c_char, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_uri_boolean( + z: sqlite3_filename, + zParam: *const ::std::os::raw::c_char, + bDefault: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_uri_int64( + arg1: sqlite3_filename, + arg2: *const ::std::os::raw::c_char, + arg3: sqlite3_int64, + ) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_uri_key( + z: sqlite3_filename, + N: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_filename_database(arg1: sqlite3_filename) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_filename_journal(arg1: sqlite3_filename) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_filename_wal(arg1: sqlite3_filename) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_database_file_object(arg1: *const ::std::os::raw::c_char) -> *mut sqlite3_file; +} +extern "C" { + pub fn sqlite3_create_filename( + zDatabase: *const ::std::os::raw::c_char, + zJournal: *const ::std::os::raw::c_char, + zWal: *const ::std::os::raw::c_char, + nParam: ::std::os::raw::c_int, + azParam: *mut *const ::std::os::raw::c_char, + ) -> sqlite3_filename; +} +extern "C" { + pub fn sqlite3_free_filename(arg1: sqlite3_filename); +} +extern "C" { + pub fn sqlite3_errcode(db: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_extended_errcode(db: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_errmsg(arg1: *mut sqlite3) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_errmsg16(arg1: *mut sqlite3) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_errstr(arg1: ::std::os::raw::c_int) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_error_offset(db: *mut sqlite3) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_stmt { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3_limit( + arg1: *mut sqlite3, + id: ::std::os::raw::c_int, + newVal: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_prepare( + db: *mut sqlite3, + zSql: *const ::std::os::raw::c_char, + nByte: ::std::os::raw::c_int, + ppStmt: *mut *mut sqlite3_stmt, + pzTail: *mut *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_prepare_v2( + db: *mut sqlite3, + zSql: *const ::std::os::raw::c_char, + nByte: ::std::os::raw::c_int, + ppStmt: *mut *mut sqlite3_stmt, + pzTail: *mut *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_prepare_v3( + db: *mut sqlite3, + zSql: *const ::std::os::raw::c_char, + nByte: ::std::os::raw::c_int, + prepFlags: ::std::os::raw::c_uint, + ppStmt: *mut *mut sqlite3_stmt, + pzTail: *mut *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_prepare16( + db: *mut sqlite3, + zSql: *const ::std::os::raw::c_void, + nByte: ::std::os::raw::c_int, + ppStmt: *mut *mut sqlite3_stmt, + pzTail: *mut *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_prepare16_v2( + db: *mut sqlite3, + zSql: *const ::std::os::raw::c_void, + nByte: ::std::os::raw::c_int, + ppStmt: *mut *mut sqlite3_stmt, + pzTail: *mut *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_prepare16_v3( + db: *mut sqlite3, + zSql: *const ::std::os::raw::c_void, + nByte: ::std::os::raw::c_int, + prepFlags: ::std::os::raw::c_uint, + ppStmt: *mut *mut sqlite3_stmt, + pzTail: *mut *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_sql(pStmt: *mut sqlite3_stmt) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_expanded_sql(pStmt: *mut sqlite3_stmt) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_stmt_readonly(pStmt: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_stmt_isexplain(pStmt: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_stmt_busy(arg1: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_value { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_context { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3_bind_blob( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_void, + n: ::std::os::raw::c_int, + arg4: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_blob64( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_void, + arg4: sqlite3_uint64, + arg5: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_double( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: f64, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_int( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_int64( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: sqlite3_int64, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_null( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_text( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_char, + arg4: ::std::os::raw::c_int, + arg5: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_text16( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_void, + arg4: ::std::os::raw::c_int, + arg5: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_text64( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_char, + arg4: sqlite3_uint64, + arg5: ::std::option::Option, + encoding: ::std::os::raw::c_uchar, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_value( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: *const sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_pointer( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: *mut ::std::os::raw::c_void, + arg4: *const ::std::os::raw::c_char, + arg5: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_zeroblob( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + n: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_zeroblob64( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + arg3: sqlite3_uint64, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_parameter_count(arg1: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_bind_parameter_name( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_bind_parameter_index( + arg1: *mut sqlite3_stmt, + zName: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_clear_bindings(arg1: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_column_count(pStmt: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_column_name( + arg1: *mut sqlite3_stmt, + N: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_column_name16( + arg1: *mut sqlite3_stmt, + N: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_column_database_name( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_column_database_name16( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_column_table_name( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_column_table_name16( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_column_origin_name( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_column_origin_name16( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_column_decltype( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_column_decltype16( + arg1: *mut sqlite3_stmt, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_step(arg1: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_data_count(pStmt: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_column_blob( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_column_double(arg1: *mut sqlite3_stmt, iCol: ::std::os::raw::c_int) -> f64; +} +extern "C" { + pub fn sqlite3_column_int( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_column_int64( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_column_text( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_uchar; +} +extern "C" { + pub fn sqlite3_column_text16( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_column_value( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> *mut sqlite3_value; +} +extern "C" { + pub fn sqlite3_column_bytes( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_column_bytes16( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_column_type( + arg1: *mut sqlite3_stmt, + iCol: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_finalize(pStmt: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_reset(pStmt: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_create_function( + db: *mut sqlite3, + zFunctionName: *const ::std::os::raw::c_char, + nArg: ::std::os::raw::c_int, + eTextRep: ::std::os::raw::c_int, + pApp: *mut ::std::os::raw::c_void, + xFunc: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xStep: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xFinal: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_create_function16( + db: *mut sqlite3, + zFunctionName: *const ::std::os::raw::c_void, + nArg: ::std::os::raw::c_int, + eTextRep: ::std::os::raw::c_int, + pApp: *mut ::std::os::raw::c_void, + xFunc: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xStep: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xFinal: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_create_function_v2( + db: *mut sqlite3, + zFunctionName: *const ::std::os::raw::c_char, + nArg: ::std::os::raw::c_int, + eTextRep: ::std::os::raw::c_int, + pApp: *mut ::std::os::raw::c_void, + xFunc: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xStep: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xFinal: ::std::option::Option, + xDestroy: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_create_window_function( + db: *mut sqlite3, + zFunctionName: *const ::std::os::raw::c_char, + nArg: ::std::os::raw::c_int, + eTextRep: ::std::os::raw::c_int, + pApp: *mut ::std::os::raw::c_void, + xStep: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xFinal: ::std::option::Option, + xValue: ::std::option::Option, + xInverse: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + xDestroy: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_aggregate_count(arg1: *mut sqlite3_context) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_expired(arg1: *mut sqlite3_stmt) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_transfer_bindings( + arg1: *mut sqlite3_stmt, + arg2: *mut sqlite3_stmt, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_global_recover() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_thread_cleanup(); +} +extern "C" { + pub fn sqlite3_memory_alarm( + arg1: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: sqlite3_int64, + arg3: ::std::os::raw::c_int, + ), + >, + arg2: *mut ::std::os::raw::c_void, + arg3: sqlite3_int64, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_blob(arg1: *mut sqlite3_value) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_value_double(arg1: *mut sqlite3_value) -> f64; +} +extern "C" { + pub fn sqlite3_value_int(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_int64(arg1: *mut sqlite3_value) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_value_pointer( + arg1: *mut sqlite3_value, + arg2: *const ::std::os::raw::c_char, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_value_text(arg1: *mut sqlite3_value) -> *const ::std::os::raw::c_uchar; +} +extern "C" { + pub fn sqlite3_value_text16(arg1: *mut sqlite3_value) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_value_text16le(arg1: *mut sqlite3_value) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_value_text16be(arg1: *mut sqlite3_value) -> *const ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_value_bytes(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_bytes16(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_type(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_numeric_type(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_nochange(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_frombind(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_encoding(arg1: *mut sqlite3_value) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_value_subtype(arg1: *mut sqlite3_value) -> ::std::os::raw::c_uint; +} +extern "C" { + pub fn sqlite3_value_dup(arg1: *const sqlite3_value) -> *mut sqlite3_value; +} +extern "C" { + pub fn sqlite3_value_free(arg1: *mut sqlite3_value); +} +extern "C" { + pub fn sqlite3_aggregate_context( + arg1: *mut sqlite3_context, + nBytes: ::std::os::raw::c_int, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_user_data(arg1: *mut sqlite3_context) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_context_db_handle(arg1: *mut sqlite3_context) -> *mut sqlite3; +} +extern "C" { + pub fn sqlite3_get_auxdata( + arg1: *mut sqlite3_context, + N: ::std::os::raw::c_int, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_set_auxdata( + arg1: *mut sqlite3_context, + N: ::std::os::raw::c_int, + arg2: *mut ::std::os::raw::c_void, + arg3: ::std::option::Option, + ); +} +pub type sqlite3_destructor_type = + ::std::option::Option; +extern "C" { + pub fn sqlite3_result_blob( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_void, + arg3: ::std::os::raw::c_int, + arg4: ::std::option::Option, + ); +} +extern "C" { + pub fn sqlite3_result_blob64( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_void, + arg3: sqlite3_uint64, + arg4: ::std::option::Option, + ); +} +extern "C" { + pub fn sqlite3_result_double(arg1: *mut sqlite3_context, arg2: f64); +} +extern "C" { + pub fn sqlite3_result_error( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_char, + arg3: ::std::os::raw::c_int, + ); +} +extern "C" { + pub fn sqlite3_result_error16( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_void, + arg3: ::std::os::raw::c_int, + ); +} +extern "C" { + pub fn sqlite3_result_error_toobig(arg1: *mut sqlite3_context); +} +extern "C" { + pub fn sqlite3_result_error_nomem(arg1: *mut sqlite3_context); +} +extern "C" { + pub fn sqlite3_result_error_code(arg1: *mut sqlite3_context, arg2: ::std::os::raw::c_int); +} +extern "C" { + pub fn sqlite3_result_int(arg1: *mut sqlite3_context, arg2: ::std::os::raw::c_int); +} +extern "C" { + pub fn sqlite3_result_int64(arg1: *mut sqlite3_context, arg2: sqlite3_int64); +} +extern "C" { + pub fn sqlite3_result_null(arg1: *mut sqlite3_context); +} +extern "C" { + pub fn sqlite3_result_text( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_char, + arg3: ::std::os::raw::c_int, + arg4: ::std::option::Option, + ); +} +extern "C" { + pub fn sqlite3_result_text64( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_char, + arg3: sqlite3_uint64, + arg4: ::std::option::Option, + encoding: ::std::os::raw::c_uchar, + ); +} +extern "C" { + pub fn sqlite3_result_text16( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_void, + arg3: ::std::os::raw::c_int, + arg4: ::std::option::Option, + ); +} +extern "C" { + pub fn sqlite3_result_text16le( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_void, + arg3: ::std::os::raw::c_int, + arg4: ::std::option::Option, + ); +} +extern "C" { + pub fn sqlite3_result_text16be( + arg1: *mut sqlite3_context, + arg2: *const ::std::os::raw::c_void, + arg3: ::std::os::raw::c_int, + arg4: ::std::option::Option, + ); +} +extern "C" { + pub fn sqlite3_result_value(arg1: *mut sqlite3_context, arg2: *mut sqlite3_value); +} +extern "C" { + pub fn sqlite3_result_pointer( + arg1: *mut sqlite3_context, + arg2: *mut ::std::os::raw::c_void, + arg3: *const ::std::os::raw::c_char, + arg4: ::std::option::Option, + ); +} +extern "C" { + pub fn sqlite3_result_zeroblob(arg1: *mut sqlite3_context, n: ::std::os::raw::c_int); +} +extern "C" { + pub fn sqlite3_result_zeroblob64( + arg1: *mut sqlite3_context, + n: sqlite3_uint64, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_result_subtype(arg1: *mut sqlite3_context, arg2: ::std::os::raw::c_uint); +} +extern "C" { + pub fn sqlite3_create_collation( + arg1: *mut sqlite3, + zName: *const ::std::os::raw::c_char, + eTextRep: ::std::os::raw::c_int, + pArg: *mut ::std::os::raw::c_void, + xCompare: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_void, + arg4: ::std::os::raw::c_int, + arg5: *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_create_collation_v2( + arg1: *mut sqlite3, + zName: *const ::std::os::raw::c_char, + eTextRep: ::std::os::raw::c_int, + pArg: *mut ::std::os::raw::c_void, + xCompare: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_void, + arg4: ::std::os::raw::c_int, + arg5: *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + xDestroy: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_create_collation16( + arg1: *mut sqlite3, + zName: *const ::std::os::raw::c_void, + eTextRep: ::std::os::raw::c_int, + pArg: *mut ::std::os::raw::c_void, + xCompare: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_void, + arg4: ::std::os::raw::c_int, + arg5: *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_collation_needed( + arg1: *mut sqlite3, + arg2: *mut ::std::os::raw::c_void, + arg3: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: *mut sqlite3, + eTextRep: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_char, + ), + >, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_collation_needed16( + arg1: *mut sqlite3, + arg2: *mut ::std::os::raw::c_void, + arg3: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: *mut sqlite3, + eTextRep: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_void, + ), + >, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_sleep(arg1: ::std::os::raw::c_int) -> ::std::os::raw::c_int; +} +extern "C" { + pub static mut sqlite3_temp_directory: *mut ::std::os::raw::c_char; +} +extern "C" { + pub static mut sqlite3_data_directory: *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_win32_set_directory( + type_: ::std::os::raw::c_ulong, + zValue: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_win32_set_directory8( + type_: ::std::os::raw::c_ulong, + zValue: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_win32_set_directory16( + type_: ::std::os::raw::c_ulong, + zValue: *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_get_autocommit(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_db_handle(arg1: *mut sqlite3_stmt) -> *mut sqlite3; +} +extern "C" { + pub fn sqlite3_db_name( + db: *mut sqlite3, + N: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_db_filename( + db: *mut sqlite3, + zDbName: *const ::std::os::raw::c_char, + ) -> sqlite3_filename; +} +extern "C" { + pub fn sqlite3_db_readonly( + db: *mut sqlite3, + zDbName: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_txn_state( + arg1: *mut sqlite3, + zSchema: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_next_stmt(pDb: *mut sqlite3, pStmt: *mut sqlite3_stmt) -> *mut sqlite3_stmt; +} +extern "C" { + pub fn sqlite3_commit_hook( + arg1: *mut sqlite3, + arg2: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int, + >, + arg3: *mut ::std::os::raw::c_void, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_rollback_hook( + arg1: *mut sqlite3, + arg2: ::std::option::Option, + arg3: *mut ::std::os::raw::c_void, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_autovacuum_pages( + db: *mut sqlite3, + arg1: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: *const ::std::os::raw::c_char, + arg3: ::std::os::raw::c_uint, + arg4: ::std::os::raw::c_uint, + arg5: ::std::os::raw::c_uint, + ) -> ::std::os::raw::c_uint, + >, + arg2: *mut ::std::os::raw::c_void, + arg3: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_update_hook( + arg1: *mut sqlite3, + arg2: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_char, + arg4: *const ::std::os::raw::c_char, + arg5: sqlite3_int64, + ), + >, + arg3: *mut ::std::os::raw::c_void, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_enable_shared_cache(arg1: ::std::os::raw::c_int) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_release_memory(arg1: ::std::os::raw::c_int) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_db_release_memory(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_soft_heap_limit64(N: sqlite3_int64) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_hard_heap_limit64(N: sqlite3_int64) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3_soft_heap_limit(N: ::std::os::raw::c_int); +} +extern "C" { + pub fn sqlite3_table_column_metadata( + db: *mut sqlite3, + zDbName: *const ::std::os::raw::c_char, + zTableName: *const ::std::os::raw::c_char, + zColumnName: *const ::std::os::raw::c_char, + pzDataType: *mut *const ::std::os::raw::c_char, + pzCollSeq: *mut *const ::std::os::raw::c_char, + pNotNull: *mut ::std::os::raw::c_int, + pPrimaryKey: *mut ::std::os::raw::c_int, + pAutoinc: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_load_extension( + db: *mut sqlite3, + zFile: *const ::std::os::raw::c_char, + zProc: *const ::std::os::raw::c_char, + pzErrMsg: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_enable_load_extension( + db: *mut sqlite3, + onoff: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_auto_extension( + xEntryPoint: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_cancel_auto_extension( + xEntryPoint: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_reset_auto_extension(); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_module { + pub iVersion: ::std::os::raw::c_int, + pub xCreate: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3, + pAux: *mut ::std::os::raw::c_void, + argc: ::std::os::raw::c_int, + argv: *const *const ::std::os::raw::c_char, + ppVTab: *mut *mut sqlite3_vtab, + arg2: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pub xConnect: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3, + pAux: *mut ::std::os::raw::c_void, + argc: ::std::os::raw::c_int, + argv: *const *const ::std::os::raw::c_char, + ppVTab: *mut *mut sqlite3_vtab, + arg2: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pub xBestIndex: ::std::option::Option< + unsafe extern "C" fn( + pVTab: *mut sqlite3_vtab, + arg1: *mut sqlite3_index_info, + ) -> ::std::os::raw::c_int, + >, + pub xDisconnect: ::std::option::Option< + unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> ::std::os::raw::c_int, + >, + pub xDestroy: ::std::option::Option< + unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> ::std::os::raw::c_int, + >, + pub xOpen: ::std::option::Option< + unsafe extern "C" fn( + pVTab: *mut sqlite3_vtab, + ppCursor: *mut *mut sqlite3_vtab_cursor, + ) -> ::std::os::raw::c_int, + >, + pub xClose: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_vtab_cursor) -> ::std::os::raw::c_int, + >, + pub xFilter: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vtab_cursor, + idxNum: ::std::os::raw::c_int, + idxStr: *const ::std::os::raw::c_char, + argc: ::std::os::raw::c_int, + argv: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int, + >, + pub xNext: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_vtab_cursor) -> ::std::os::raw::c_int, + >, + pub xEof: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_vtab_cursor) -> ::std::os::raw::c_int, + >, + pub xColumn: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vtab_cursor, + arg2: *mut sqlite3_context, + arg3: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xRowid: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vtab_cursor, + pRowid: *mut sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xUpdate: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_vtab, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + arg4: *mut sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xBegin: ::std::option::Option< + unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> ::std::os::raw::c_int, + >, + pub xSync: ::std::option::Option< + unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> ::std::os::raw::c_int, + >, + pub xCommit: ::std::option::Option< + unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> ::std::os::raw::c_int, + >, + pub xRollback: ::std::option::Option< + unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> ::std::os::raw::c_int, + >, + pub xFindFunction: ::std::option::Option< + unsafe extern "C" fn( + pVtab: *mut sqlite3_vtab, + nArg: ::std::os::raw::c_int, + zName: *const ::std::os::raw::c_char, + pxFunc: *mut ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_context, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ), + >, + ppArg: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + pub xRename: ::std::option::Option< + unsafe extern "C" fn( + pVtab: *mut sqlite3_vtab, + zNew: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pub xSavepoint: ::std::option::Option< + unsafe extern "C" fn( + pVTab: *mut sqlite3_vtab, + arg1: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xRelease: ::std::option::Option< + unsafe extern "C" fn( + pVTab: *mut sqlite3_vtab, + arg1: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xRollbackTo: ::std::option::Option< + unsafe extern "C" fn( + pVTab: *mut sqlite3_vtab, + arg1: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xShadowName: ::std::option::Option< + unsafe extern "C" fn(arg1: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int, + >, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_index_info { + pub nConstraint: ::std::os::raw::c_int, + pub aConstraint: *mut sqlite3_index_constraint, + pub nOrderBy: ::std::os::raw::c_int, + pub aOrderBy: *mut sqlite3_index_orderby, + pub aConstraintUsage: *mut sqlite3_index_constraint_usage, + pub idxNum: ::std::os::raw::c_int, + pub idxStr: *mut ::std::os::raw::c_char, + pub needToFreeIdxStr: ::std::os::raw::c_int, + pub orderByConsumed: ::std::os::raw::c_int, + pub estimatedCost: f64, + pub estimatedRows: sqlite3_int64, + pub idxFlags: ::std::os::raw::c_int, + pub colUsed: sqlite3_uint64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_index_constraint { + pub iColumn: ::std::os::raw::c_int, + pub op: ::std::os::raw::c_uchar, + pub usable: ::std::os::raw::c_uchar, + pub iTermOffset: ::std::os::raw::c_int, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_index_orderby { + pub iColumn: ::std::os::raw::c_int, + pub desc: ::std::os::raw::c_uchar, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_index_constraint_usage { + pub argvIndex: ::std::os::raw::c_int, + pub omit: ::std::os::raw::c_uchar, +} +extern "C" { + pub fn sqlite3_create_module( + db: *mut sqlite3, + zName: *const ::std::os::raw::c_char, + p: *const sqlite3_module, + pClientData: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_create_module_v2( + db: *mut sqlite3, + zName: *const ::std::os::raw::c_char, + p: *const sqlite3_module, + pClientData: *mut ::std::os::raw::c_void, + xDestroy: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_drop_modules( + db: *mut sqlite3, + azKeep: *mut *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_vtab { + pub pModule: *const sqlite3_module, + pub nRef: ::std::os::raw::c_int, + pub zErrMsg: *mut ::std::os::raw::c_char, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_vtab_cursor { + pub pVtab: *mut sqlite3_vtab, +} +extern "C" { + pub fn sqlite3_declare_vtab( + arg1: *mut sqlite3, + zSQL: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_overload_function( + arg1: *mut sqlite3, + zFuncName: *const ::std::os::raw::c_char, + nArg: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_blob { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3_blob_open( + arg1: *mut sqlite3, + zDb: *const ::std::os::raw::c_char, + zTable: *const ::std::os::raw::c_char, + zColumn: *const ::std::os::raw::c_char, + iRow: sqlite3_int64, + flags: ::std::os::raw::c_int, + ppBlob: *mut *mut sqlite3_blob, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_blob_reopen( + arg1: *mut sqlite3_blob, + arg2: sqlite3_int64, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_blob_close(arg1: *mut sqlite3_blob) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_blob_bytes(arg1: *mut sqlite3_blob) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_blob_read( + arg1: *mut sqlite3_blob, + Z: *mut ::std::os::raw::c_void, + N: ::std::os::raw::c_int, + iOffset: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_blob_write( + arg1: *mut sqlite3_blob, + z: *const ::std::os::raw::c_void, + n: ::std::os::raw::c_int, + iOffset: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vfs_find(zVfsName: *const ::std::os::raw::c_char) -> *mut sqlite3_vfs; +} +extern "C" { + pub fn sqlite3_vfs_register( + arg1: *mut sqlite3_vfs, + makeDflt: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vfs_unregister(arg1: *mut sqlite3_vfs) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_mutex_alloc(arg1: ::std::os::raw::c_int) -> *mut sqlite3_mutex; +} +extern "C" { + pub fn sqlite3_mutex_free(arg1: *mut sqlite3_mutex); +} +extern "C" { + pub fn sqlite3_mutex_enter(arg1: *mut sqlite3_mutex); +} +extern "C" { + pub fn sqlite3_mutex_try(arg1: *mut sqlite3_mutex) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_mutex_leave(arg1: *mut sqlite3_mutex); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_mutex_methods { + pub xMutexInit: ::std::option::Option ::std::os::raw::c_int>, + pub xMutexEnd: ::std::option::Option ::std::os::raw::c_int>, + pub xMutexAlloc: ::std::option::Option< + unsafe extern "C" fn(arg1: ::std::os::raw::c_int) -> *mut sqlite3_mutex, + >, + pub xMutexFree: ::std::option::Option, + pub xMutexEnter: ::std::option::Option, + pub xMutexTry: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_mutex) -> ::std::os::raw::c_int, + >, + pub xMutexLeave: ::std::option::Option, + pub xMutexHeld: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_mutex) -> ::std::os::raw::c_int, + >, + pub xMutexNotheld: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_mutex) -> ::std::os::raw::c_int, + >, +} +extern "C" { + pub fn sqlite3_mutex_held(arg1: *mut sqlite3_mutex) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_mutex_notheld(arg1: *mut sqlite3_mutex) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_db_mutex(arg1: *mut sqlite3) -> *mut sqlite3_mutex; +} +extern "C" { + pub fn sqlite3_file_control( + arg1: *mut sqlite3, + zDbName: *const ::std::os::raw::c_char, + op: ::std::os::raw::c_int, + arg2: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_test_control(op: ::std::os::raw::c_int, ...) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_keyword_count() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_keyword_name( + arg1: ::std::os::raw::c_int, + arg2: *mut *const ::std::os::raw::c_char, + arg3: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_keyword_check( + arg1: *const ::std::os::raw::c_char, + arg2: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_str { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3_str_new(arg1: *mut sqlite3) -> *mut sqlite3_str; +} +extern "C" { + pub fn sqlite3_str_finish(arg1: *mut sqlite3_str) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_str_appendf(arg1: *mut sqlite3_str, zFormat: *const ::std::os::raw::c_char, ...); +} +extern "C" { + pub fn sqlite3_str_append( + arg1: *mut sqlite3_str, + zIn: *const ::std::os::raw::c_char, + N: ::std::os::raw::c_int, + ); +} +extern "C" { + pub fn sqlite3_str_appendall(arg1: *mut sqlite3_str, zIn: *const ::std::os::raw::c_char); +} +extern "C" { + pub fn sqlite3_str_appendchar( + arg1: *mut sqlite3_str, + N: ::std::os::raw::c_int, + C: ::std::os::raw::c_char, + ); +} +extern "C" { + pub fn sqlite3_str_reset(arg1: *mut sqlite3_str); +} +extern "C" { + pub fn sqlite3_str_errcode(arg1: *mut sqlite3_str) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_str_length(arg1: *mut sqlite3_str) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_str_value(arg1: *mut sqlite3_str) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_status( + op: ::std::os::raw::c_int, + pCurrent: *mut ::std::os::raw::c_int, + pHighwater: *mut ::std::os::raw::c_int, + resetFlag: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_status64( + op: ::std::os::raw::c_int, + pCurrent: *mut sqlite3_int64, + pHighwater: *mut sqlite3_int64, + resetFlag: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_db_status( + arg1: *mut sqlite3, + op: ::std::os::raw::c_int, + pCur: *mut ::std::os::raw::c_int, + pHiwtr: *mut ::std::os::raw::c_int, + resetFlg: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_stmt_status( + arg1: *mut sqlite3_stmt, + op: ::std::os::raw::c_int, + resetFlg: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_pcache { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_pcache_page { + pub pBuf: *mut ::std::os::raw::c_void, + pub pExtra: *mut ::std::os::raw::c_void, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_pcache_methods2 { + pub iVersion: ::std::os::raw::c_int, + pub pArg: *mut ::std::os::raw::c_void, + pub xInit: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int, + >, + pub xShutdown: ::std::option::Option, + pub xCreate: ::std::option::Option< + unsafe extern "C" fn( + szPage: ::std::os::raw::c_int, + szExtra: ::std::os::raw::c_int, + bPurgeable: ::std::os::raw::c_int, + ) -> *mut sqlite3_pcache, + >, + pub xCachesize: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_pcache, nCachesize: ::std::os::raw::c_int), + >, + pub xPagecount: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_pcache) -> ::std::os::raw::c_int, + >, + pub xFetch: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_pcache, + key: ::std::os::raw::c_uint, + createFlag: ::std::os::raw::c_int, + ) -> *mut sqlite3_pcache_page, + >, + pub xUnpin: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_pcache, + arg2: *mut sqlite3_pcache_page, + discard: ::std::os::raw::c_int, + ), + >, + pub xRekey: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_pcache, + arg2: *mut sqlite3_pcache_page, + oldKey: ::std::os::raw::c_uint, + newKey: ::std::os::raw::c_uint, + ), + >, + pub xTruncate: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_pcache, iLimit: ::std::os::raw::c_uint), + >, + pub xDestroy: ::std::option::Option, + pub xShrink: ::std::option::Option, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_pcache_methods { + pub pArg: *mut ::std::os::raw::c_void, + pub xInit: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int, + >, + pub xShutdown: ::std::option::Option, + pub xCreate: ::std::option::Option< + unsafe extern "C" fn( + szPage: ::std::os::raw::c_int, + bPurgeable: ::std::os::raw::c_int, + ) -> *mut sqlite3_pcache, + >, + pub xCachesize: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_pcache, nCachesize: ::std::os::raw::c_int), + >, + pub xPagecount: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_pcache) -> ::std::os::raw::c_int, + >, + pub xFetch: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_pcache, + key: ::std::os::raw::c_uint, + createFlag: ::std::os::raw::c_int, + ) -> *mut ::std::os::raw::c_void, + >, + pub xUnpin: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_pcache, + arg2: *mut ::std::os::raw::c_void, + discard: ::std::os::raw::c_int, + ), + >, + pub xRekey: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_pcache, + arg2: *mut ::std::os::raw::c_void, + oldKey: ::std::os::raw::c_uint, + newKey: ::std::os::raw::c_uint, + ), + >, + pub xTruncate: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_pcache, iLimit: ::std::os::raw::c_uint), + >, + pub xDestroy: ::std::option::Option, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_backup { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3_backup_init( + pDest: *mut sqlite3, + zDestName: *const ::std::os::raw::c_char, + pSource: *mut sqlite3, + zSourceName: *const ::std::os::raw::c_char, + ) -> *mut sqlite3_backup; +} +extern "C" { + pub fn sqlite3_backup_step( + p: *mut sqlite3_backup, + nPage: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_backup_finish(p: *mut sqlite3_backup) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_backup_remaining(p: *mut sqlite3_backup) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_backup_pagecount(p: *mut sqlite3_backup) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_unlock_notify( + pBlocked: *mut sqlite3, + xNotify: ::std::option::Option< + unsafe extern "C" fn( + apArg: *mut *mut ::std::os::raw::c_void, + nArg: ::std::os::raw::c_int, + ), + >, + pNotifyArg: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_stricmp( + arg1: *const ::std::os::raw::c_char, + arg2: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_strnicmp( + arg1: *const ::std::os::raw::c_char, + arg2: *const ::std::os::raw::c_char, + arg3: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_strglob( + zGlob: *const ::std::os::raw::c_char, + zStr: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_strlike( + zGlob: *const ::std::os::raw::c_char, + zStr: *const ::std::os::raw::c_char, + cEsc: ::std::os::raw::c_uint, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_log( + iErrCode: ::std::os::raw::c_int, + zFormat: *const ::std::os::raw::c_char, + ... + ); +} +extern "C" { + pub fn sqlite3_wal_hook( + arg1: *mut sqlite3, + arg2: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: *mut sqlite3, + arg3: *const ::std::os::raw::c_char, + arg4: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + arg3: *mut ::std::os::raw::c_void, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_wal_autocheckpoint( + db: *mut sqlite3, + N: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_wal_checkpoint( + db: *mut sqlite3, + zDb: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_wal_checkpoint_v2( + db: *mut sqlite3, + zDb: *const ::std::os::raw::c_char, + eMode: ::std::os::raw::c_int, + pnLog: *mut ::std::os::raw::c_int, + pnCkpt: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_config( + arg1: *mut sqlite3, + op: ::std::os::raw::c_int, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_on_conflict(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_nochange(arg1: *mut sqlite3_context) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_collation( + arg1: *mut sqlite3_index_info, + arg2: ::std::os::raw::c_int, + ) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn sqlite3_vtab_distinct(arg1: *mut sqlite3_index_info) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_in( + arg1: *mut sqlite3_index_info, + iCons: ::std::os::raw::c_int, + bHandle: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_in_first( + pVal: *mut sqlite3_value, + ppOut: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_in_next( + pVal: *mut sqlite3_value, + ppOut: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_vtab_rhs_value( + arg1: *mut sqlite3_index_info, + arg2: ::std::os::raw::c_int, + ppVal: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_stmt_scanstatus( + pStmt: *mut sqlite3_stmt, + idx: ::std::os::raw::c_int, + iScanStatusOp: ::std::os::raw::c_int, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_stmt_scanstatus_v2( + pStmt: *mut sqlite3_stmt, + idx: ::std::os::raw::c_int, + iScanStatusOp: ::std::os::raw::c_int, + flags: ::std::os::raw::c_int, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_stmt_scanstatus_reset(arg1: *mut sqlite3_stmt); +} +extern "C" { + pub fn sqlite3_db_cacheflush(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_preupdate_hook( + db: *mut sqlite3, + xPreUpdate: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + db: *mut sqlite3, + op: ::std::os::raw::c_int, + zDb: *const ::std::os::raw::c_char, + zName: *const ::std::os::raw::c_char, + iKey1: sqlite3_int64, + iKey2: sqlite3_int64, + ), + >, + arg1: *mut ::std::os::raw::c_void, + ) -> *mut ::std::os::raw::c_void; +} +extern "C" { + pub fn sqlite3_preupdate_old( + arg1: *mut sqlite3, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_preupdate_count(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_preupdate_depth(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_preupdate_new( + arg1: *mut sqlite3, + arg2: ::std::os::raw::c_int, + arg3: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_preupdate_blobwrite(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_system_errno(arg1: *mut sqlite3) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_snapshot { + pub hidden: [::std::os::raw::c_uchar; 48usize], +} +extern "C" { + pub fn sqlite3_snapshot_get( + db: *mut sqlite3, + zSchema: *const ::std::os::raw::c_char, + ppSnapshot: *mut *mut sqlite3_snapshot, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_snapshot_open( + db: *mut sqlite3, + zSchema: *const ::std::os::raw::c_char, + pSnapshot: *mut sqlite3_snapshot, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_snapshot_free(arg1: *mut sqlite3_snapshot); +} +extern "C" { + pub fn sqlite3_snapshot_cmp( + p1: *mut sqlite3_snapshot, + p2: *mut sqlite3_snapshot, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_snapshot_recover( + db: *mut sqlite3, + zDb: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3_serialize( + db: *mut sqlite3, + zSchema: *const ::std::os::raw::c_char, + piSize: *mut sqlite3_int64, + mFlags: ::std::os::raw::c_uint, + ) -> *mut ::std::os::raw::c_uchar; +} +extern "C" { + pub fn sqlite3_deserialize( + db: *mut sqlite3, + zSchema: *const ::std::os::raw::c_char, + pData: *mut ::std::os::raw::c_uchar, + szDb: sqlite3_int64, + szBuf: sqlite3_int64, + mFlags: ::std::os::raw::c_uint, + ) -> ::std::os::raw::c_int; +} +pub type sqlite3_rtree_dbl = f64; +extern "C" { + pub fn sqlite3_rtree_geometry_callback( + db: *mut sqlite3, + zGeom: *const ::std::os::raw::c_char, + xGeom: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut sqlite3_rtree_geometry, + arg2: ::std::os::raw::c_int, + arg3: *mut sqlite3_rtree_dbl, + arg4: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pContext: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_rtree_geometry { + pub pContext: *mut ::std::os::raw::c_void, + pub nParam: ::std::os::raw::c_int, + pub aParam: *mut sqlite3_rtree_dbl, + pub pUser: *mut ::std::os::raw::c_void, + pub xDelUser: ::std::option::Option, +} +extern "C" { + pub fn sqlite3_rtree_query_callback( + db: *mut sqlite3, + zQueryFunc: *const ::std::os::raw::c_char, + xQueryFunc: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut sqlite3_rtree_query_info) -> ::std::os::raw::c_int, + >, + pContext: *mut ::std::os::raw::c_void, + xDestructor: ::std::option::Option, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_rtree_query_info { + pub pContext: *mut ::std::os::raw::c_void, + pub nParam: ::std::os::raw::c_int, + pub aParam: *mut sqlite3_rtree_dbl, + pub pUser: *mut ::std::os::raw::c_void, + pub xDelUser: ::std::option::Option, + pub aCoord: *mut sqlite3_rtree_dbl, + pub anQueue: *mut ::std::os::raw::c_uint, + pub nCoord: ::std::os::raw::c_int, + pub iLevel: ::std::os::raw::c_int, + pub mxLevel: ::std::os::raw::c_int, + pub iRowid: sqlite3_int64, + pub rParentScore: sqlite3_rtree_dbl, + pub eParentWithin: ::std::os::raw::c_int, + pub eWithin: ::std::os::raw::c_int, + pub rScore: sqlite3_rtree_dbl, + pub apSqlParam: *mut *mut sqlite3_value, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_session { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_changeset_iter { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3session_create( + db: *mut sqlite3, + zDb: *const ::std::os::raw::c_char, + ppSession: *mut *mut sqlite3_session, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_delete(pSession: *mut sqlite3_session); +} +extern "C" { + pub fn sqlite3session_object_config( + arg1: *mut sqlite3_session, + op: ::std::os::raw::c_int, + pArg: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_enable( + pSession: *mut sqlite3_session, + bEnable: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_indirect( + pSession: *mut sqlite3_session, + bIndirect: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_attach( + pSession: *mut sqlite3_session, + zTab: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_table_filter( + pSession: *mut sqlite3_session, + xFilter: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + zTab: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + pCtx: *mut ::std::os::raw::c_void, + ); +} +extern "C" { + pub fn sqlite3session_changeset( + pSession: *mut sqlite3_session, + pnChangeset: *mut ::std::os::raw::c_int, + ppChangeset: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_changeset_size(pSession: *mut sqlite3_session) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3session_diff( + pSession: *mut sqlite3_session, + zFromDb: *const ::std::os::raw::c_char, + zTbl: *const ::std::os::raw::c_char, + pzErrMsg: *mut *mut ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_patchset( + pSession: *mut sqlite3_session, + pnPatchset: *mut ::std::os::raw::c_int, + ppPatchset: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_isempty(pSession: *mut sqlite3_session) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_memory_used(pSession: *mut sqlite3_session) -> sqlite3_int64; +} +extern "C" { + pub fn sqlite3changeset_start( + pp: *mut *mut sqlite3_changeset_iter, + nChangeset: ::std::os::raw::c_int, + pChangeset: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_start_v2( + pp: *mut *mut sqlite3_changeset_iter, + nChangeset: ::std::os::raw::c_int, + pChangeset: *mut ::std::os::raw::c_void, + flags: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_next(pIter: *mut sqlite3_changeset_iter) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_op( + pIter: *mut sqlite3_changeset_iter, + pzTab: *mut *const ::std::os::raw::c_char, + pnCol: *mut ::std::os::raw::c_int, + pOp: *mut ::std::os::raw::c_int, + pbIndirect: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_pk( + pIter: *mut sqlite3_changeset_iter, + pabPK: *mut *mut ::std::os::raw::c_uchar, + pnCol: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_old( + pIter: *mut sqlite3_changeset_iter, + iVal: ::std::os::raw::c_int, + ppValue: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_new( + pIter: *mut sqlite3_changeset_iter, + iVal: ::std::os::raw::c_int, + ppValue: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_conflict( + pIter: *mut sqlite3_changeset_iter, + iVal: ::std::os::raw::c_int, + ppValue: *mut *mut sqlite3_value, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_fk_conflicts( + pIter: *mut sqlite3_changeset_iter, + pnOut: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_finalize(pIter: *mut sqlite3_changeset_iter) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_invert( + nIn: ::std::os::raw::c_int, + pIn: *const ::std::os::raw::c_void, + pnOut: *mut ::std::os::raw::c_int, + ppOut: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_concat( + nA: ::std::os::raw::c_int, + pA: *mut ::std::os::raw::c_void, + nB: ::std::os::raw::c_int, + pB: *mut ::std::os::raw::c_void, + pnOut: *mut ::std::os::raw::c_int, + ppOut: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_changegroup { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3changegroup_new(pp: *mut *mut sqlite3_changegroup) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changegroup_add( + arg1: *mut sqlite3_changegroup, + nData: ::std::os::raw::c_int, + pData: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changegroup_output( + arg1: *mut sqlite3_changegroup, + pnData: *mut ::std::os::raw::c_int, + ppData: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changegroup_delete(arg1: *mut sqlite3_changegroup); +} +extern "C" { + pub fn sqlite3changeset_apply( + db: *mut sqlite3, + nChangeset: ::std::os::raw::c_int, + pChangeset: *mut ::std::os::raw::c_void, + xFilter: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + zTab: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + xConflict: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + eConflict: ::std::os::raw::c_int, + p: *mut sqlite3_changeset_iter, + ) -> ::std::os::raw::c_int, + >, + pCtx: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_apply_v2( + db: *mut sqlite3, + nChangeset: ::std::os::raw::c_int, + pChangeset: *mut ::std::os::raw::c_void, + xFilter: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + zTab: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + xConflict: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + eConflict: ::std::os::raw::c_int, + p: *mut sqlite3_changeset_iter, + ) -> ::std::os::raw::c_int, + >, + pCtx: *mut ::std::os::raw::c_void, + ppRebase: *mut *mut ::std::os::raw::c_void, + pnRebase: *mut ::std::os::raw::c_int, + flags: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sqlite3_rebaser { + _unused: [u8; 0], +} +extern "C" { + pub fn sqlite3rebaser_create(ppNew: *mut *mut sqlite3_rebaser) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3rebaser_configure( + arg1: *mut sqlite3_rebaser, + nRebase: ::std::os::raw::c_int, + pRebase: *const ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3rebaser_rebase( + arg1: *mut sqlite3_rebaser, + nIn: ::std::os::raw::c_int, + pIn: *const ::std::os::raw::c_void, + pnOut: *mut ::std::os::raw::c_int, + ppOut: *mut *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3rebaser_delete(p: *mut sqlite3_rebaser); +} +extern "C" { + pub fn sqlite3changeset_apply_strm( + db: *mut sqlite3, + xInput: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pIn: *mut ::std::os::raw::c_void, + xFilter: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + zTab: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + xConflict: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + eConflict: ::std::os::raw::c_int, + p: *mut sqlite3_changeset_iter, + ) -> ::std::os::raw::c_int, + >, + pCtx: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_apply_v2_strm( + db: *mut sqlite3, + xInput: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pIn: *mut ::std::os::raw::c_void, + xFilter: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + zTab: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int, + >, + xConflict: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + eConflict: ::std::os::raw::c_int, + p: *mut sqlite3_changeset_iter, + ) -> ::std::os::raw::c_int, + >, + pCtx: *mut ::std::os::raw::c_void, + ppRebase: *mut *mut ::std::os::raw::c_void, + pnRebase: *mut ::std::os::raw::c_int, + flags: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_concat_strm( + xInputA: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pInA: *mut ::std::os::raw::c_void, + xInputB: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pInB: *mut ::std::os::raw::c_void, + xOutput: ::std::option::Option< + unsafe extern "C" fn( + pOut: *mut ::std::os::raw::c_void, + pData: *const ::std::os::raw::c_void, + nData: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_invert_strm( + xInput: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pIn: *mut ::std::os::raw::c_void, + xOutput: ::std::option::Option< + unsafe extern "C" fn( + pOut: *mut ::std::os::raw::c_void, + pData: *const ::std::os::raw::c_void, + nData: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_start_strm( + pp: *mut *mut sqlite3_changeset_iter, + xInput: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pIn: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changeset_start_v2_strm( + pp: *mut *mut sqlite3_changeset_iter, + xInput: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pIn: *mut ::std::os::raw::c_void, + flags: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_changeset_strm( + pSession: *mut sqlite3_session, + xOutput: ::std::option::Option< + unsafe extern "C" fn( + pOut: *mut ::std::os::raw::c_void, + pData: *const ::std::os::raw::c_void, + nData: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_patchset_strm( + pSession: *mut sqlite3_session, + xOutput: ::std::option::Option< + unsafe extern "C" fn( + pOut: *mut ::std::os::raw::c_void, + pData: *const ::std::os::raw::c_void, + nData: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changegroup_add_strm( + arg1: *mut sqlite3_changegroup, + xInput: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pIn: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3changegroup_output_strm( + arg1: *mut sqlite3_changegroup, + xOutput: ::std::option::Option< + unsafe extern "C" fn( + pOut: *mut ::std::os::raw::c_void, + pData: *const ::std::os::raw::c_void, + nData: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3rebaser_rebase_strm( + pRebaser: *mut sqlite3_rebaser, + xInput: ::std::option::Option< + unsafe extern "C" fn( + pIn: *mut ::std::os::raw::c_void, + pData: *mut ::std::os::raw::c_void, + pnData: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pIn: *mut ::std::os::raw::c_void, + xOutput: ::std::option::Option< + unsafe extern "C" fn( + pOut: *mut ::std::os::raw::c_void, + pData: *const ::std::os::raw::c_void, + nData: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pOut: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sqlite3session_config( + op: ::std::os::raw::c_int, + pArg: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Fts5Context { + _unused: [u8; 0], +} +pub type fts5_extension_function = ::std::option::Option< + unsafe extern "C" fn( + pApi: *const Fts5ExtensionApi, + pFts: *mut Fts5Context, + pCtx: *mut sqlite3_context, + nVal: ::std::os::raw::c_int, + apVal: *mut *mut sqlite3_value, + ), +>; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Fts5PhraseIter { + pub a: *const ::std::os::raw::c_uchar, + pub b: *const ::std::os::raw::c_uchar, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Fts5ExtensionApi { + pub iVersion: ::std::os::raw::c_int, + pub xUserData: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut Fts5Context) -> *mut ::std::os::raw::c_void, + >, + pub xColumnCount: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut Fts5Context) -> ::std::os::raw::c_int, + >, + pub xRowCount: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + pnRow: *mut sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xColumnTotalSize: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iCol: ::std::os::raw::c_int, + pnToken: *mut sqlite3_int64, + ) -> ::std::os::raw::c_int, + >, + pub xTokenize: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + pText: *const ::std::os::raw::c_char, + nText: ::std::os::raw::c_int, + pCtx: *mut ::std::os::raw::c_void, + xToken: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: ::std::os::raw::c_int, + arg3: *const ::std::os::raw::c_char, + arg4: ::std::os::raw::c_int, + arg5: ::std::os::raw::c_int, + arg6: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + ) -> ::std::os::raw::c_int, + >, + pub xPhraseCount: ::std::option::Option< + unsafe extern "C" fn(arg1: *mut Fts5Context) -> ::std::os::raw::c_int, + >, + pub xPhraseSize: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iPhrase: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xInstCount: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + pnInst: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xInst: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iIdx: ::std::os::raw::c_int, + piPhrase: *mut ::std::os::raw::c_int, + piCol: *mut ::std::os::raw::c_int, + piOff: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xRowid: + ::std::option::Option sqlite3_int64>, + pub xColumnText: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iCol: ::std::os::raw::c_int, + pz: *mut *const ::std::os::raw::c_char, + pn: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xColumnSize: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iCol: ::std::os::raw::c_int, + pnToken: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xQueryPhrase: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iPhrase: ::std::os::raw::c_int, + pUserData: *mut ::std::os::raw::c_void, + arg2: ::std::option::Option< + unsafe extern "C" fn( + arg1: *const Fts5ExtensionApi, + arg2: *mut Fts5Context, + arg3: *mut ::std::os::raw::c_void, + ) -> ::std::os::raw::c_int, + >, + ) -> ::std::os::raw::c_int, + >, + pub xSetAuxdata: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + pAux: *mut ::std::os::raw::c_void, + xDelete: ::std::option::Option, + ) -> ::std::os::raw::c_int, + >, + pub xGetAuxdata: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + bClear: ::std::os::raw::c_int, + ) -> *mut ::std::os::raw::c_void, + >, + pub xPhraseFirst: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iPhrase: ::std::os::raw::c_int, + arg2: *mut Fts5PhraseIter, + arg3: *mut ::std::os::raw::c_int, + arg4: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xPhraseNext: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + arg2: *mut Fts5PhraseIter, + piCol: *mut ::std::os::raw::c_int, + piOff: *mut ::std::os::raw::c_int, + ), + >, + pub xPhraseFirstColumn: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + iPhrase: ::std::os::raw::c_int, + arg2: *mut Fts5PhraseIter, + arg3: *mut ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub xPhraseNextColumn: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Context, + arg2: *mut Fts5PhraseIter, + piCol: *mut ::std::os::raw::c_int, + ), + >, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Fts5Tokenizer { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fts5_tokenizer { + pub xCreate: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + azArg: *mut *const ::std::os::raw::c_char, + nArg: ::std::os::raw::c_int, + ppOut: *mut *mut Fts5Tokenizer, + ) -> ::std::os::raw::c_int, + >, + pub xDelete: ::std::option::Option, + pub xTokenize: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut Fts5Tokenizer, + pCtx: *mut ::std::os::raw::c_void, + flags: ::std::os::raw::c_int, + pText: *const ::std::os::raw::c_char, + nText: ::std::os::raw::c_int, + xToken: ::std::option::Option< + unsafe extern "C" fn( + pCtx: *mut ::std::os::raw::c_void, + tflags: ::std::os::raw::c_int, + pToken: *const ::std::os::raw::c_char, + nToken: ::std::os::raw::c_int, + iStart: ::std::os::raw::c_int, + iEnd: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + ) -> ::std::os::raw::c_int, + >, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fts5_api { + pub iVersion: ::std::os::raw::c_int, + pub xCreateTokenizer: ::std::option::Option< + unsafe extern "C" fn( + pApi: *mut fts5_api, + zName: *const ::std::os::raw::c_char, + pContext: *mut ::std::os::raw::c_void, + pTokenizer: *mut fts5_tokenizer, + xDestroy: ::std::option::Option, + ) -> ::std::os::raw::c_int, + >, + pub xFindTokenizer: ::std::option::Option< + unsafe extern "C" fn( + pApi: *mut fts5_api, + zName: *const ::std::os::raw::c_char, + ppContext: *mut *mut ::std::os::raw::c_void, + pTokenizer: *mut fts5_tokenizer, + ) -> ::std::os::raw::c_int, + >, + pub xCreateFunction: ::std::option::Option< + unsafe extern "C" fn( + pApi: *mut fts5_api, + zName: *const ::std::os::raw::c_char, + pContext: *mut ::std::os::raw::c_void, + xFunction: fts5_extension_function, + xDestroy: ::std::option::Option, + ) -> ::std::os::raw::c_int, + >, +} diff --git a/shim/third-party/rust/fixups/libsqlite3-sys/fixups.toml b/shim/third-party/rust/fixups/libsqlite3-sys/fixups.toml index b82bb848a2209..139464b1cab3c 100644 --- a/shim/third-party/rust/fixups/libsqlite3-sys/fixups.toml +++ b/shim/third-party/rust/fixups/libsqlite3-sys/fixups.toml @@ -1,11 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # libsqlite3-sys uses a bindgen binding to libsqlite. # We can't easily import bindgen because of its libclang dependency, # so in the meantime we need to use pre-generated bindgen files. -extra_mapped_srcs = {"sqlite3/bindgen_bundled_version.rs" = "src/bindgen.rs"} - -[env] -OUT_DIR = "." +env = { "OUT_DIR" = "$(location //third-party/rust/fixups/libsqlite3-sys:out_dir)" } [[buildscript]] [buildscript.cxx_library] diff --git a/shim/third-party/rust/fixups/lock_api/fixups.toml b/shim/third-party/rust/fixups/lock_api/fixups.toml index 6c1a3d45a5a33..4d082640b5813 100644 --- a/shim/third-party/rust/fixups/lock_api/fixups.toml +++ b/shim/third-party/rust/fixups/lock_api/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # the build script for lock_api is a version check for rust 1.61. [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/log/fixups.toml b/shim/third-party/rust/fixups/log/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/log/fixups.toml +++ b/shim/third-party/rust/fixups/log/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/memchr/fixups.toml b/shim/third-party/rust/fixups/memchr/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/memchr/fixups.toml +++ b/shim/third-party/rust/fixups/memchr/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/memoffset/fixups.toml b/shim/third-party/rust/fixups/memoffset/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/memoffset/fixups.toml +++ b/shim/third-party/rust/fixups/memoffset/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/mio/fixups.toml b/shim/third-party/rust/fixups/mio/fixups.toml index af7edb27e3bb7..9b40a18f4cab3 100644 --- a/shim/third-party/rust/fixups/mio/fixups.toml +++ b/shim/third-party/rust/fixups/mio/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/native-tls/fixups.toml b/shim/third-party/rust/fixups/native-tls/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/native-tls/fixups.toml +++ b/shim/third-party/rust/fixups/native-tls/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/nix/fixups.toml b/shim/third-party/rust/fixups/nix/fixups.toml index 1849ea069d254..c269a19b413eb 100644 --- a/shim/third-party/rust/fixups/nix/fixups.toml +++ b/shim/third-party/rust/fixups/nix/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/nom/fixups.toml b/shim/third-party/rust/fixups/nom/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/nom/fixups.toml +++ b/shim/third-party/rust/fixups/nom/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/ntapi/fixups.toml b/shim/third-party/rust/fixups/ntapi/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/ntapi/fixups.toml +++ b/shim/third-party/rust/fixups/ntapi/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/num-bigint/fixups.toml b/shim/third-party/rust/fixups/num-bigint/fixups.toml index df0b424f49700..55e01177fd86d 100644 --- a/shim/third-party/rust/fixups/num-bigint/fixups.toml +++ b/shim/third-party/rust/fixups/num-bigint/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/num-integer/fixups.toml b/shim/third-party/rust/fixups/num-integer/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/num-integer/fixups.toml +++ b/shim/third-party/rust/fixups/num-integer/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/num-traits/fixups.toml b/shim/third-party/rust/fixups/num-traits/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/num-traits/fixups.toml +++ b/shim/third-party/rust/fixups/num-traits/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/oid-registry/fixups.toml b/shim/third-party/rust/fixups/oid-registry/fixups.toml new file mode 100644 index 0000000000000..45a72665bb10f --- /dev/null +++ b/shim/third-party/rust/fixups/oid-registry/fixups.toml @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +cargo_env = [ + "CARGO_PKG_NAME", # Needed by _buck1_handle_manifest_dir + "CARGO_MANIFEST_DIR", +] + +[[buildscript]] +[buildscript.gen_srcs] diff --git a/shim/third-party/rust/fixups/parking_lot/fixups.toml b/shim/third-party/rust/fixups/parking_lot/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/parking_lot/fixups.toml +++ b/shim/third-party/rust/fixups/parking_lot/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/parking_lot_core/fixups.toml b/shim/third-party/rust/fixups/parking_lot_core/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/parking_lot_core/fixups.toml +++ b/shim/third-party/rust/fixups/parking_lot_core/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/paste/fixups.toml b/shim/third-party/rust/fixups/paste/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/paste/fixups.toml +++ b/shim/third-party/rust/fixups/paste/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/pest/fixups.toml b/shim/third-party/rust/fixups/pest/fixups.toml index af7edb27e3bb7..9b40a18f4cab3 100644 --- a/shim/third-party/rust/fixups/pest/fixups.toml +++ b/shim/third-party/rust/fixups/pest/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/pin-project-internal/fixups.toml b/shim/third-party/rust/fixups/pin-project-internal/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/pin-project-internal/fixups.toml +++ b/shim/third-party/rust/fixups/pin-project-internal/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/platforms/fixups.toml b/shim/third-party/rust/fixups/platforms/fixups.toml index e086791cd2f5b..beb28c099061b 100644 --- a/shim/third-party/rust/fixups/platforms/fixups.toml +++ b/shim/third-party/rust/fixups/platforms/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # Buildscript sets TARGET in environment buildscript = [] diff --git a/shim/third-party/rust/fixups/prettyplease/fixups.toml b/shim/third-party/rust/fixups/prettyplease/fixups.toml index 3510928422a31..118f26932835f 100644 --- a/shim/third-party/rust/fixups/prettyplease/fixups.toml +++ b/shim/third-party/rust/fixups/prettyplease/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] cargo_env = true diff --git a/shim/third-party/rust/fixups/proc-macro-error-attr/fixups.toml b/shim/third-party/rust/fixups/proc-macro-error-attr/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/proc-macro-error-attr/fixups.toml +++ b/shim/third-party/rust/fixups/proc-macro-error-attr/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/proc-macro-error/fixups.toml b/shim/third-party/rust/fixups/proc-macro-error/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/proc-macro-error/fixups.toml +++ b/shim/third-party/rust/fixups/proc-macro-error/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/proc-macro-hack/fixups.toml b/shim/third-party/rust/fixups/proc-macro-hack/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/proc-macro-hack/fixups.toml +++ b/shim/third-party/rust/fixups/proc-macro-hack/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/proc-macro2/fixups.toml b/shim/third-party/rust/fixups/proc-macro2/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/proc-macro2/fixups.toml +++ b/shim/third-party/rust/fixups/proc-macro2/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/prost-build/fixups.toml b/shim/third-party/rust/fixups/prost-build/fixups.toml index 5865e69048455..61a5cb67428b6 100644 --- a/shim/third-party/rust/fixups/prost-build/fixups.toml +++ b/shim/third-party/rust/fixups/prost-build/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] # These are here because the crate uses the env! macro diff --git a/shim/third-party/rust/fixups/pulldown-cmark/fixups.toml b/shim/third-party/rust/fixups/pulldown-cmark/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/pulldown-cmark/fixups.toml +++ b/shim/third-party/rust/fixups/pulldown-cmark/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/quote/fixups.toml b/shim/third-party/rust/fixups/quote/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/quote/fixups.toml +++ b/shim/third-party/rust/fixups/quote/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/radium/fixups.toml b/shim/third-party/rust/fixups/radium/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/radium/fixups.toml +++ b/shim/third-party/rust/fixups/radium/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/rayon-core/fixups.toml b/shim/third-party/rust/fixups/rayon-core/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/rayon-core/fixups.toml +++ b/shim/third-party/rust/fixups/rayon-core/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/ref-cast/fixups.toml b/shim/third-party/rust/fixups/ref-cast/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/ref-cast/fixups.toml +++ b/shim/third-party/rust/fixups/ref-cast/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/reqwest/fixups.toml b/shim/third-party/rust/fixups/reqwest/fixups.toml index af7edb27e3bb7..9b40a18f4cab3 100644 --- a/shim/third-party/rust/fixups/reqwest/fixups.toml +++ b/shim/third-party/rust/fixups/reqwest/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/ring/fixups.toml b/shim/third-party/rust/fixups/ring/fixups.toml index 87574b6ff1b9d..8e7020eb716be 100644 --- a/shim/third-party/rust/fixups/ring/fixups.toml +++ b/shim/third-party/rust/fixups/ring/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # Copied from fbsource fixup. buildscript = [] @@ -14,6 +21,22 @@ exclude = [ "crypto/fipsmodule/aes/aes.c" ] include_paths = ["include"] compiler_flags = ["-Wno-error"] +# redundant `any` is needed to differentiate key for config specific to v0.17.5 version +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "linux")))'] +version = "=0.17.5" +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "linux")))'.env] +RING_CORE_PREFIX = "ring_core_0_17_5_" +[[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "linux")))'.buildscript]] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "linux")))'.buildscript.cxx_library] +name = "ring-c-asm-elf-x86_84" +srcs = [ "crypto/**/*.c", "pregenerated/*x86_64*-elf.S", "third_party/fiat/asm/fiat_curve25519_adx_mul.S", "third_party/fiat/asm/fiat_curve25519_adx_square.S" ] +headers = [ "include/**/*.h", "crypto/**/*.h", "third_party/**/*.h", "crypto/**/*.inl" ] +# Exclude C AES because we've got the x86_64 one +exclude = [ "crypto/fipsmodule/aes/aes.c" ] +include_paths = ["include"] +compiler_flags = ["-Wno-error"] +fixup_include_paths = ["include"] + [[platform_fixup.'cfg(all(target_arch = "aarch64", target_os = "linux"))'.buildscript]] [platform_fixup.'cfg(all(target_arch = "aarch64", target_os = "linux"))'.buildscript.cxx_library] name = "ring-c-asm-elf-aarch64" @@ -23,6 +46,20 @@ exclude = [ "crypto/cpu-intel.c" ] include_paths = ["include"] compiler_flags = ["-Wno-error"] +[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "linux")))'] +version = "=0.17.5" +[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "linux")))'.env] +RING_CORE_PREFIX = "ring_core_0_17_5_" +[[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "linux")))'.buildscript]] +[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "linux")))'.buildscript.cxx_library] +name = "ring-c-asm-elf-aarch64" +srcs = [ "crypto/**/*.c", "pregenerated/*armv8*-linux64.S", "pregenerated/*armx*-linux64.S" ] +headers = [ "include/**/*.h", "crypto/**/*.h", "third_party/**/*.h", "crypto/**/*.inl" ] +exclude = [ "crypto/cpu-intel.c" ] +include_paths = ["include"] +compiler_flags = ["-Wno-error"] +fixup_include_paths = ["include"] + [[platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "macos"))'.buildscript]] [platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "macos"))'.buildscript.cxx_library] name = "ring-c-asm-macos-x86_64" @@ -33,6 +70,21 @@ exclude = [ "crypto/fipsmodule/aes/aes.c" ] include_paths = ["include"] compiler_flags = ["-Wno-error"] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "macos")))'] +version = "=0.17.5" +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "macos")))'.env] +RING_CORE_PREFIX = "ring_core_0_17_5_" +[[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "macos")))'.buildscript]] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "macos")))'.buildscript.cxx_library] +name = "ring-c-asm-macos-x86_64" +srcs = [ "crypto/**/*.c", "pregenerated/*x86_64*-macosx.S", "third_party/fiat/asm/fiat_curve25519_adx_mul.S", "third_party/fiat/asm/fiat_curve25519_adx_square.S" ] +headers = [ "include/**/*.h", "crypto/**/*.h", "third_party/**/*.h", "crypto/**/*.inl" ] +# Exclude C AES because we've got the x86_64 one +exclude = [ "crypto/fipsmodule/aes/aes.c" ] +include_paths = ["include"] +compiler_flags = ["-Wno-error"] +fixup_include_paths = ["include"] + [[platform_fixup.'cfg(all(target_arch = "aarch64", target_os = "macos"))'.buildscript]] [platform_fixup.'cfg(all(target_arch = "aarch64", target_os = "macos"))'.buildscript.cxx_library] name = "ring-c-asm-macos-arm64" @@ -42,6 +94,20 @@ exclude = [ "crypto/cpu-intel.c" ] include_paths = ["include"] compiler_flags = ["-Wno-error"] +[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "macos")))'] +version = "=0.17.5" +[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "macos")))'.env] +RING_CORE_PREFIX = "ring_core_0_17_5_" +[[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "macos")))'.buildscript]] +[platform_fixup.'cfg(any(all(target_arch = "aarch64", target_os = "macos")))'.buildscript.cxx_library] +name = "ring-c-asm-macos-arm64" +srcs = [ "crypto/**/*.c", "pregenerated/*armv8*-ios64.S", "pregenerated/*armx*-ios64.S" ] +headers = [ "include/**/*.h", "crypto/**/*.h", "third_party/**/*.h", "crypto/**/*.inl" ] +exclude = [ "crypto/cpu-intel.c" ] +include_paths = ["include"] +compiler_flags = ["-Wno-error"] +fixup_include_paths = ["include"] + [[platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "windows", target_env = "gnu"))'.buildscript]] [platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "windows", target_env = "gnu"))'.buildscript.cxx_library] name = "ring-c-win-x86_84" @@ -52,6 +118,21 @@ exclude = [ "crypto/fipsmodule/aes/aes.c" ] include_paths = ["include"] compiler_flags = ["-Wno-error"] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "gnu")))'] +version = "=0.17.5" +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "gnu")))'.env] +RING_CORE_PREFIX = "ring_core_0_17_5_" +[[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "gnu")))'.buildscript]] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "gnu")))'.buildscript.cxx_library] +name = "ring-c-win-x86_84" +srcs = [ "crypto/**/*.c" ] +headers = [ "include/**/*.h", "crypto/**/*.h", "third_party/**/*.h", "crypto/**/*.inl" ] +# Exclude C AES because we've got the x86_64 one +exclude = [ "crypto/fipsmodule/aes/aes.c" ] +include_paths = ["include"] +compiler_flags = ["-Wno-error"] +fixup_include_paths = ["include"] + [[platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc"))'.buildscript]] [platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc"))'.buildscript.cxx_library] name = "ring-c-win-msvc-x86_84" @@ -61,7 +142,28 @@ headers = [ "include/**/*.h", "crypto/**/*.h", "third_party/**/*.h", "crypto/**/ exclude = [ "crypto/fipsmodule/aes/aes.c" ] include_paths = ["include"] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc")))'] +version = "=0.17.5" +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc")))'.env] +RING_CORE_PREFIX = "ring_core_0_17_5_" +[[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc")))'.buildscript]] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc")))'.buildscript.cxx_library] +name = "ring-c-win-msvc-x86_84" +srcs = [ "crypto/**/*.c" ] +headers = [ "include/**/*.h", "crypto/**/*.h", "third_party/**/*.h", "crypto/**/*.inl" ] +# Exclude C AES because we've got the x86_64 one +exclude = [ "crypto/fipsmodule/aes/aes.c" ] +include_paths = ["include"] +fixup_include_paths = ["include"] + [[platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "windows"))'.buildscript]] [platform_fixup.'cfg(all(target_arch = "x86_64", target_os = "windows"))'.buildscript.prebuilt_cxx_library] name = "ring-asm-windows-x86_84" static_libs = [ "pregenerated/*x86_64*-nasm.obj" ] + +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows")))'] +version = "=0.17.5" +[[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows")))'.buildscript]] +[platform_fixup.'cfg(any(all(target_arch = "x86_64", target_os = "windows")))'.buildscript.prebuilt_cxx_library] +name = "ring-asm-windows-x86_84" +static_libs = [ "pregenerated/*x86_64*-nasm.o" ] diff --git a/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols.h b/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols.h new file mode 100644 index 0000000000000..9ee40055efdaf --- /dev/null +++ b/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols.h @@ -0,0 +1,119 @@ + +#ifndef ring_core_generated_PREFIX_SYMBOLS_H +#define ring_core_generated_PREFIX_SYMBOLS_H + +#define ecp_nistz256_point_double p256_point_double +#define ecp_nistz256_point_add p256_point_add +#define ecp_nistz256_point_add_affine p256_point_add_affine +#define ecp_nistz256_ord_mul_mont p256_scalar_mul_mont +#define ecp_nistz256_ord_sqr_mont p256_scalar_sqr_rep_mont +#define ecp_nistz256_mul_mont p256_mul_mont +#define ecp_nistz256_sqr_mont p256_sqr_mont +#define CRYPTO_memcmp ring_core_0_17_5_CRYPTO_memcmp +#define CRYPTO_poly1305_finish ring_core_0_17_5_CRYPTO_poly1305_finish +#define CRYPTO_poly1305_finish_neon ring_core_0_17_5_CRYPTO_poly1305_finish_neon +#define CRYPTO_poly1305_init ring_core_0_17_5_CRYPTO_poly1305_init +#define CRYPTO_poly1305_init_neon ring_core_0_17_5_CRYPTO_poly1305_init_neon +#define CRYPTO_poly1305_update ring_core_0_17_5_CRYPTO_poly1305_update +#define CRYPTO_poly1305_update_neon ring_core_0_17_5_CRYPTO_poly1305_update_neon +#define ChaCha20_ctr32 ring_core_0_17_5_ChaCha20_ctr32 +#define LIMBS_add_mod ring_core_0_17_5_LIMBS_add_mod +#define LIMBS_are_even ring_core_0_17_5_LIMBS_are_even +#define LIMBS_are_zero ring_core_0_17_5_LIMBS_are_zero +#define LIMBS_equal ring_core_0_17_5_LIMBS_equal +#define LIMBS_equal_limb ring_core_0_17_5_LIMBS_equal_limb +#define LIMBS_less_than ring_core_0_17_5_LIMBS_less_than +#define LIMBS_less_than_limb ring_core_0_17_5_LIMBS_less_than_limb +#define LIMBS_reduce_once ring_core_0_17_5_LIMBS_reduce_once +#define LIMBS_select_512_32 ring_core_0_17_5_LIMBS_select_512_32 +#define LIMBS_shl_mod ring_core_0_17_5_LIMBS_shl_mod +#define LIMBS_sub_mod ring_core_0_17_5_LIMBS_sub_mod +#define LIMBS_window5_split_window ring_core_0_17_5_LIMBS_window5_split_window +#define LIMBS_window5_unsplit_window ring_core_0_17_5_LIMBS_window5_unsplit_window +#define LIMB_shr ring_core_0_17_5_LIMB_shr +#define OPENSSL_armcap_P ring_core_0_17_5_OPENSSL_armcap_P +#define OPENSSL_cpuid_setup ring_core_0_17_5_OPENSSL_cpuid_setup +#define OPENSSL_ia32cap_P ring_core_0_17_5_OPENSSL_ia32cap_P +#define aes_hw_ctr32_encrypt_blocks ring_core_0_17_5_aes_hw_ctr32_encrypt_blocks +#define aes_hw_encrypt ring_core_0_17_5_aes_hw_encrypt +#define aes_hw_set_encrypt_key ring_core_0_17_5_aes_hw_set_encrypt_key +#define aes_nohw_ctr32_encrypt_blocks ring_core_0_17_5_aes_nohw_ctr32_encrypt_blocks +#define aes_nohw_encrypt ring_core_0_17_5_aes_nohw_encrypt +#define aes_nohw_set_encrypt_key ring_core_0_17_5_aes_nohw_set_encrypt_key +#define aesni_gcm_decrypt ring_core_0_17_5_aesni_gcm_decrypt +#define aesni_gcm_encrypt ring_core_0_17_5_aesni_gcm_encrypt +#define bn_from_montgomery_in_place ring_core_0_17_5_bn_from_montgomery_in_place +#define bn_gather5 ring_core_0_17_5_bn_gather5 +#define bn_mul_mont ring_core_0_17_5_bn_mul_mont +#define bn_mul_mont_gather5 ring_core_0_17_5_bn_mul_mont_gather5 +#define bn_neg_inv_mod_r_u64 ring_core_0_17_5_bn_neg_inv_mod_r_u64 +#define bn_power5 ring_core_0_17_5_bn_power5 +#define bn_scatter5 ring_core_0_17_5_bn_scatter5 +#define bn_sqr8x_internal ring_core_0_17_5_bn_sqr8x_internal +#define bn_sqrx8x_internal ring_core_0_17_5_bn_sqrx8x_internal +#define bsaes_ctr32_encrypt_blocks ring_core_0_17_5_bsaes_ctr32_encrypt_blocks +#define bssl_constant_time_test_conditional_memcpy ring_core_0_17_5_bssl_constant_time_test_conditional_memcpy +#define bssl_constant_time_test_conditional_memxor ring_core_0_17_5_bssl_constant_time_test_conditional_memxor +#define bssl_constant_time_test_main ring_core_0_17_5_bssl_constant_time_test_main +#define chacha20_poly1305_open ring_core_0_17_5_chacha20_poly1305_open +#define chacha20_poly1305_seal ring_core_0_17_5_chacha20_poly1305_seal +#define fiat_curve25519_adx_mul ring_core_0_17_5_fiat_curve25519_adx_mul +#define fiat_curve25519_adx_square ring_core_0_17_5_fiat_curve25519_adx_square +#define gcm_ghash_avx ring_core_0_17_5_gcm_ghash_avx +#define gcm_ghash_clmul ring_core_0_17_5_gcm_ghash_clmul +#define gcm_ghash_neon ring_core_0_17_5_gcm_ghash_neon +#define gcm_gmult_clmul ring_core_0_17_5_gcm_gmult_clmul +#define gcm_gmult_neon ring_core_0_17_5_gcm_gmult_neon +#define gcm_init_avx ring_core_0_17_5_gcm_init_avx +#define gcm_init_clmul ring_core_0_17_5_gcm_init_clmul +#define gcm_init_neon ring_core_0_17_5_gcm_init_neon +#define k25519Precomp ring_core_0_17_5_k25519Precomp +#define limbs_mul_add_limb ring_core_0_17_5_limbs_mul_add_limb +#define little_endian_bytes_from_scalar ring_core_0_17_5_little_endian_bytes_from_scalar +#define ecp_nistz256_neg ring_core_0_17_5_ecp_nistz256_neg +#define ecp_nistz256_select_w5 ring_core_0_17_5_ecp_nistz256_select_w5 +#define ecp_nistz256_select_w7 ring_core_0_17_5_ecp_nistz256_select_w7 +#define p256_mul_mont ring_core_0_17_5_p256_mul_mont +#define p256_point_add ring_core_0_17_5_p256_point_add +#define p256_point_add_affine ring_core_0_17_5_p256_point_add_affine +#define p256_point_double ring_core_0_17_5_p256_point_double +#define p256_point_mul ring_core_0_17_5_p256_point_mul +#define p256_point_mul_base ring_core_0_17_5_p256_point_mul_base +#define p256_point_mul_base_vartime ring_core_0_17_5_p256_point_mul_base_vartime +#define p256_scalar_mul_mont ring_core_0_17_5_p256_scalar_mul_mont +#define p256_scalar_sqr_rep_mont ring_core_0_17_5_p256_scalar_sqr_rep_mont +#define p256_sqr_mont ring_core_0_17_5_p256_sqr_mont +#define p384_elem_div_by_2 ring_core_0_17_5_p384_elem_div_by_2 +#define p384_elem_mul_mont ring_core_0_17_5_p384_elem_mul_mont +#define p384_elem_neg ring_core_0_17_5_p384_elem_neg +#define p384_elem_sub ring_core_0_17_5_p384_elem_sub +#define p384_point_add ring_core_0_17_5_p384_point_add +#define p384_point_double ring_core_0_17_5_p384_point_double +#define p384_point_mul ring_core_0_17_5_p384_point_mul +#define p384_scalar_mul_mont ring_core_0_17_5_p384_scalar_mul_mont +#define openssl_poly1305_neon2_addmulmod ring_core_0_17_5_openssl_poly1305_neon2_addmulmod +#define openssl_poly1305_neon2_blocks ring_core_0_17_5_openssl_poly1305_neon2_blocks +#define sha256_block_data_order ring_core_0_17_5_sha256_block_data_order +#define sha512_block_data_order ring_core_0_17_5_sha512_block_data_order +#define vpaes_ctr32_encrypt_blocks ring_core_0_17_5_vpaes_ctr32_encrypt_blocks +#define vpaes_encrypt ring_core_0_17_5_vpaes_encrypt +#define vpaes_encrypt_key_to_bsaes ring_core_0_17_5_vpaes_encrypt_key_to_bsaes +#define vpaes_set_encrypt_key ring_core_0_17_5_vpaes_set_encrypt_key +#define x25519_NEON ring_core_0_17_5_x25519_NEON +#define x25519_fe_invert ring_core_0_17_5_x25519_fe_invert +#define x25519_fe_isnegative ring_core_0_17_5_x25519_fe_isnegative +#define x25519_fe_mul_ttt ring_core_0_17_5_x25519_fe_mul_ttt +#define x25519_fe_neg ring_core_0_17_5_x25519_fe_neg +#define x25519_fe_tobytes ring_core_0_17_5_x25519_fe_tobytes +#define x25519_ge_double_scalarmult_vartime ring_core_0_17_5_x25519_ge_double_scalarmult_vartime +#define x25519_ge_frombytes_vartime ring_core_0_17_5_x25519_ge_frombytes_vartime +#define x25519_ge_scalarmult_base ring_core_0_17_5_x25519_ge_scalarmult_base +#define x25519_ge_scalarmult_base_adx ring_core_0_17_5_x25519_ge_scalarmult_base_adx +#define x25519_public_from_private_generic_masked ring_core_0_17_5_x25519_public_from_private_generic_masked +#define x25519_sc_mask ring_core_0_17_5_x25519_sc_mask +#define x25519_sc_muladd ring_core_0_17_5_x25519_sc_muladd +#define x25519_sc_reduce ring_core_0_17_5_x25519_sc_reduce +#define x25519_scalar_mult_adx ring_core_0_17_5_x25519_scalar_mult_adx +#define x25519_scalar_mult_generic_masked ring_core_0_17_5_x25519_scalar_mult_generic_masked + +#endif diff --git a/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols_asm.h b/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols_asm.h new file mode 100644 index 0000000000000..84e487091503a --- /dev/null +++ b/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols_asm.h @@ -0,0 +1,236 @@ + +#ifndef ring_core_generated_PREFIX_SYMBOLS_ASM_H +#define ring_core_generated_PREFIX_SYMBOLS_ASM_H + +#if defined(__APPLE__) +#define _ecp_nistz256_point_double _p256_point_double +#define _ecp_nistz256_point_add _p256_point_add +#define _ecp_nistz256_point_add_affine _p256_point_add_affine +#define _ecp_nistz256_ord_mul_mont _p256_scalar_mul_mont +#define _ecp_nistz256_ord_sqr_mont _p256_scalar_sqr_rep_mont +#define _ecp_nistz256_mul_mont _p256_mul_mont +#define _ecp_nistz256_sqr_mont _p256_sqr_mont +#define _CRYPTO_memcmp _ring_core_0_17_5_CRYPTO_memcmp +#define _CRYPTO_poly1305_finish _ring_core_0_17_5_CRYPTO_poly1305_finish +#define _CRYPTO_poly1305_finish_neon _ring_core_0_17_5_CRYPTO_poly1305_finish_neon +#define _CRYPTO_poly1305_init _ring_core_0_17_5_CRYPTO_poly1305_init +#define _CRYPTO_poly1305_init_neon _ring_core_0_17_5_CRYPTO_poly1305_init_neon +#define _CRYPTO_poly1305_update _ring_core_0_17_5_CRYPTO_poly1305_update +#define _CRYPTO_poly1305_update_neon _ring_core_0_17_5_CRYPTO_poly1305_update_neon +#define _ChaCha20_ctr32 _ring_core_0_17_5_ChaCha20_ctr32 +#define _LIMBS_add_mod _ring_core_0_17_5_LIMBS_add_mod +#define _LIMBS_are_even _ring_core_0_17_5_LIMBS_are_even +#define _LIMBS_are_zero _ring_core_0_17_5_LIMBS_are_zero +#define _LIMBS_equal _ring_core_0_17_5_LIMBS_equal +#define _LIMBS_equal_limb _ring_core_0_17_5_LIMBS_equal_limb +#define _LIMBS_less_than _ring_core_0_17_5_LIMBS_less_than +#define _LIMBS_less_than_limb _ring_core_0_17_5_LIMBS_less_than_limb +#define _LIMBS_reduce_once _ring_core_0_17_5_LIMBS_reduce_once +#define _LIMBS_select_512_32 _ring_core_0_17_5_LIMBS_select_512_32 +#define _LIMBS_shl_mod _ring_core_0_17_5_LIMBS_shl_mod +#define _LIMBS_sub_mod _ring_core_0_17_5_LIMBS_sub_mod +#define _LIMBS_window5_split_window _ring_core_0_17_5_LIMBS_window5_split_window +#define _LIMBS_window5_unsplit_window _ring_core_0_17_5_LIMBS_window5_unsplit_window +#define _LIMB_shr _ring_core_0_17_5_LIMB_shr +#define _OPENSSL_armcap_P _ring_core_0_17_5_OPENSSL_armcap_P +#define _OPENSSL_cpuid_setup _ring_core_0_17_5_OPENSSL_cpuid_setup +#define _OPENSSL_ia32cap_P _ring_core_0_17_5_OPENSSL_ia32cap_P +#define _aes_hw_ctr32_encrypt_blocks _ring_core_0_17_5_aes_hw_ctr32_encrypt_blocks +#define _aes_hw_encrypt _ring_core_0_17_5_aes_hw_encrypt +#define _aes_hw_set_encrypt_key _ring_core_0_17_5_aes_hw_set_encrypt_key +#define _aes_nohw_ctr32_encrypt_blocks _ring_core_0_17_5_aes_nohw_ctr32_encrypt_blocks +#define _aes_nohw_encrypt _ring_core_0_17_5_aes_nohw_encrypt +#define _aes_nohw_set_encrypt_key _ring_core_0_17_5_aes_nohw_set_encrypt_key +#define _aesni_gcm_decrypt _ring_core_0_17_5_aesni_gcm_decrypt +#define _aesni_gcm_encrypt _ring_core_0_17_5_aesni_gcm_encrypt +#define _bn_from_montgomery_in_place _ring_core_0_17_5_bn_from_montgomery_in_place +#define _bn_gather5 _ring_core_0_17_5_bn_gather5 +#define _bn_mul_mont _ring_core_0_17_5_bn_mul_mont +#define _bn_mul_mont_gather5 _ring_core_0_17_5_bn_mul_mont_gather5 +#define _bn_neg_inv_mod_r_u64 _ring_core_0_17_5_bn_neg_inv_mod_r_u64 +#define _bn_power5 _ring_core_0_17_5_bn_power5 +#define _bn_scatter5 _ring_core_0_17_5_bn_scatter5 +#define _bn_sqr8x_internal _ring_core_0_17_5_bn_sqr8x_internal +#define _bn_sqrx8x_internal _ring_core_0_17_5_bn_sqrx8x_internal +#define _bsaes_ctr32_encrypt_blocks _ring_core_0_17_5_bsaes_ctr32_encrypt_blocks +#define _bssl_constant_time_test_conditional_memcpy _ring_core_0_17_5_bssl_constant_time_test_conditional_memcpy +#define _bssl_constant_time_test_conditional_memxor _ring_core_0_17_5_bssl_constant_time_test_conditional_memxor +#define _bssl_constant_time_test_main _ring_core_0_17_5_bssl_constant_time_test_main +#define _chacha20_poly1305_open _ring_core_0_17_5_chacha20_poly1305_open +#define _chacha20_poly1305_seal _ring_core_0_17_5_chacha20_poly1305_seal +#define _fiat_curve25519_adx_mul _ring_core_0_17_5_fiat_curve25519_adx_mul +#define _fiat_curve25519_adx_square _ring_core_0_17_5_fiat_curve25519_adx_square +#define _gcm_ghash_avx _ring_core_0_17_5_gcm_ghash_avx +#define _gcm_ghash_clmul _ring_core_0_17_5_gcm_ghash_clmul +#define _gcm_ghash_neon _ring_core_0_17_5_gcm_ghash_neon +#define _gcm_gmult_clmul _ring_core_0_17_5_gcm_gmult_clmul +#define _gcm_gmult_neon _ring_core_0_17_5_gcm_gmult_neon +#define _gcm_init_avx _ring_core_0_17_5_gcm_init_avx +#define _gcm_init_clmul _ring_core_0_17_5_gcm_init_clmul +#define _gcm_init_neon _ring_core_0_17_5_gcm_init_neon +#define _k25519Precomp _ring_core_0_17_5_k25519Precomp +#define _limbs_mul_add_limb _ring_core_0_17_5_limbs_mul_add_limb +#define _little_endian_bytes_from_scalar _ring_core_0_17_5_little_endian_bytes_from_scalar +#define _ecp_nistz256_neg _ring_core_0_17_5_ecp_nistz256_neg +#define _ecp_nistz256_select_w5 _ring_core_0_17_5_ecp_nistz256_select_w5 +#define _ecp_nistz256_select_w7 _ring_core_0_17_5_ecp_nistz256_select_w7 +#define _p256_mul_mont _ring_core_0_17_5_p256_mul_mont +#define _p256_point_add _ring_core_0_17_5_p256_point_add +#define _p256_point_add_affine _ring_core_0_17_5_p256_point_add_affine +#define _p256_point_double _ring_core_0_17_5_p256_point_double +#define _p256_point_mul _ring_core_0_17_5_p256_point_mul +#define _p256_point_mul_base _ring_core_0_17_5_p256_point_mul_base +#define _p256_point_mul_base_vartime _ring_core_0_17_5_p256_point_mul_base_vartime +#define _p256_scalar_mul_mont _ring_core_0_17_5_p256_scalar_mul_mont +#define _p256_scalar_sqr_rep_mont _ring_core_0_17_5_p256_scalar_sqr_rep_mont +#define _p256_sqr_mont _ring_core_0_17_5_p256_sqr_mont +#define _p384_elem_div_by_2 _ring_core_0_17_5_p384_elem_div_by_2 +#define _p384_elem_mul_mont _ring_core_0_17_5_p384_elem_mul_mont +#define _p384_elem_neg _ring_core_0_17_5_p384_elem_neg +#define _p384_elem_sub _ring_core_0_17_5_p384_elem_sub +#define _p384_point_add _ring_core_0_17_5_p384_point_add +#define _p384_point_double _ring_core_0_17_5_p384_point_double +#define _p384_point_mul _ring_core_0_17_5_p384_point_mul +#define _p384_scalar_mul_mont _ring_core_0_17_5_p384_scalar_mul_mont +#define _openssl_poly1305_neon2_addmulmod _ring_core_0_17_5_openssl_poly1305_neon2_addmulmod +#define _openssl_poly1305_neon2_blocks _ring_core_0_17_5_openssl_poly1305_neon2_blocks +#define _sha256_block_data_order _ring_core_0_17_5_sha256_block_data_order +#define _sha512_block_data_order _ring_core_0_17_5_sha512_block_data_order +#define _vpaes_ctr32_encrypt_blocks _ring_core_0_17_5_vpaes_ctr32_encrypt_blocks +#define _vpaes_encrypt _ring_core_0_17_5_vpaes_encrypt +#define _vpaes_encrypt_key_to_bsaes _ring_core_0_17_5_vpaes_encrypt_key_to_bsaes +#define _vpaes_set_encrypt_key _ring_core_0_17_5_vpaes_set_encrypt_key +#define _x25519_NEON _ring_core_0_17_5_x25519_NEON +#define _x25519_fe_invert _ring_core_0_17_5_x25519_fe_invert +#define _x25519_fe_isnegative _ring_core_0_17_5_x25519_fe_isnegative +#define _x25519_fe_mul_ttt _ring_core_0_17_5_x25519_fe_mul_ttt +#define _x25519_fe_neg _ring_core_0_17_5_x25519_fe_neg +#define _x25519_fe_tobytes _ring_core_0_17_5_x25519_fe_tobytes +#define _x25519_ge_double_scalarmult_vartime _ring_core_0_17_5_x25519_ge_double_scalarmult_vartime +#define _x25519_ge_frombytes_vartime _ring_core_0_17_5_x25519_ge_frombytes_vartime +#define _x25519_ge_scalarmult_base _ring_core_0_17_5_x25519_ge_scalarmult_base +#define _x25519_ge_scalarmult_base_adx _ring_core_0_17_5_x25519_ge_scalarmult_base_adx +#define _x25519_public_from_private_generic_masked _ring_core_0_17_5_x25519_public_from_private_generic_masked +#define _x25519_sc_mask _ring_core_0_17_5_x25519_sc_mask +#define _x25519_sc_muladd _ring_core_0_17_5_x25519_sc_muladd +#define _x25519_sc_reduce _ring_core_0_17_5_x25519_sc_reduce +#define _x25519_scalar_mult_adx _ring_core_0_17_5_x25519_scalar_mult_adx +#define _x25519_scalar_mult_generic_masked _ring_core_0_17_5_x25519_scalar_mult_generic_masked + +#else +#define ecp_nistz256_point_double p256_point_double +#define ecp_nistz256_point_add p256_point_add +#define ecp_nistz256_point_add_affine p256_point_add_affine +#define ecp_nistz256_ord_mul_mont p256_scalar_mul_mont +#define ecp_nistz256_ord_sqr_mont p256_scalar_sqr_rep_mont +#define ecp_nistz256_mul_mont p256_mul_mont +#define ecp_nistz256_sqr_mont p256_sqr_mont +#define CRYPTO_memcmp ring_core_0_17_5_CRYPTO_memcmp +#define CRYPTO_poly1305_finish ring_core_0_17_5_CRYPTO_poly1305_finish +#define CRYPTO_poly1305_finish_neon ring_core_0_17_5_CRYPTO_poly1305_finish_neon +#define CRYPTO_poly1305_init ring_core_0_17_5_CRYPTO_poly1305_init +#define CRYPTO_poly1305_init_neon ring_core_0_17_5_CRYPTO_poly1305_init_neon +#define CRYPTO_poly1305_update ring_core_0_17_5_CRYPTO_poly1305_update +#define CRYPTO_poly1305_update_neon ring_core_0_17_5_CRYPTO_poly1305_update_neon +#define ChaCha20_ctr32 ring_core_0_17_5_ChaCha20_ctr32 +#define LIMBS_add_mod ring_core_0_17_5_LIMBS_add_mod +#define LIMBS_are_even ring_core_0_17_5_LIMBS_are_even +#define LIMBS_are_zero ring_core_0_17_5_LIMBS_are_zero +#define LIMBS_equal ring_core_0_17_5_LIMBS_equal +#define LIMBS_equal_limb ring_core_0_17_5_LIMBS_equal_limb +#define LIMBS_less_than ring_core_0_17_5_LIMBS_less_than +#define LIMBS_less_than_limb ring_core_0_17_5_LIMBS_less_than_limb +#define LIMBS_reduce_once ring_core_0_17_5_LIMBS_reduce_once +#define LIMBS_select_512_32 ring_core_0_17_5_LIMBS_select_512_32 +#define LIMBS_shl_mod ring_core_0_17_5_LIMBS_shl_mod +#define LIMBS_sub_mod ring_core_0_17_5_LIMBS_sub_mod +#define LIMBS_window5_split_window ring_core_0_17_5_LIMBS_window5_split_window +#define LIMBS_window5_unsplit_window ring_core_0_17_5_LIMBS_window5_unsplit_window +#define LIMB_shr ring_core_0_17_5_LIMB_shr +#define OPENSSL_armcap_P ring_core_0_17_5_OPENSSL_armcap_P +#define OPENSSL_cpuid_setup ring_core_0_17_5_OPENSSL_cpuid_setup +#define OPENSSL_ia32cap_P ring_core_0_17_5_OPENSSL_ia32cap_P +#define aes_hw_ctr32_encrypt_blocks ring_core_0_17_5_aes_hw_ctr32_encrypt_blocks +#define aes_hw_encrypt ring_core_0_17_5_aes_hw_encrypt +#define aes_hw_set_encrypt_key ring_core_0_17_5_aes_hw_set_encrypt_key +#define aes_nohw_ctr32_encrypt_blocks ring_core_0_17_5_aes_nohw_ctr32_encrypt_blocks +#define aes_nohw_encrypt ring_core_0_17_5_aes_nohw_encrypt +#define aes_nohw_set_encrypt_key ring_core_0_17_5_aes_nohw_set_encrypt_key +#define aesni_gcm_decrypt ring_core_0_17_5_aesni_gcm_decrypt +#define aesni_gcm_encrypt ring_core_0_17_5_aesni_gcm_encrypt +#define bn_from_montgomery_in_place ring_core_0_17_5_bn_from_montgomery_in_place +#define bn_gather5 ring_core_0_17_5_bn_gather5 +#define bn_mul_mont ring_core_0_17_5_bn_mul_mont +#define bn_mul_mont_gather5 ring_core_0_17_5_bn_mul_mont_gather5 +#define bn_neg_inv_mod_r_u64 ring_core_0_17_5_bn_neg_inv_mod_r_u64 +#define bn_power5 ring_core_0_17_5_bn_power5 +#define bn_scatter5 ring_core_0_17_5_bn_scatter5 +#define bn_sqr8x_internal ring_core_0_17_5_bn_sqr8x_internal +#define bn_sqrx8x_internal ring_core_0_17_5_bn_sqrx8x_internal +#define bsaes_ctr32_encrypt_blocks ring_core_0_17_5_bsaes_ctr32_encrypt_blocks +#define bssl_constant_time_test_conditional_memcpy ring_core_0_17_5_bssl_constant_time_test_conditional_memcpy +#define bssl_constant_time_test_conditional_memxor ring_core_0_17_5_bssl_constant_time_test_conditional_memxor +#define bssl_constant_time_test_main ring_core_0_17_5_bssl_constant_time_test_main +#define chacha20_poly1305_open ring_core_0_17_5_chacha20_poly1305_open +#define chacha20_poly1305_seal ring_core_0_17_5_chacha20_poly1305_seal +#define fiat_curve25519_adx_mul ring_core_0_17_5_fiat_curve25519_adx_mul +#define fiat_curve25519_adx_square ring_core_0_17_5_fiat_curve25519_adx_square +#define gcm_ghash_avx ring_core_0_17_5_gcm_ghash_avx +#define gcm_ghash_clmul ring_core_0_17_5_gcm_ghash_clmul +#define gcm_ghash_neon ring_core_0_17_5_gcm_ghash_neon +#define gcm_gmult_clmul ring_core_0_17_5_gcm_gmult_clmul +#define gcm_gmult_neon ring_core_0_17_5_gcm_gmult_neon +#define gcm_init_avx ring_core_0_17_5_gcm_init_avx +#define gcm_init_clmul ring_core_0_17_5_gcm_init_clmul +#define gcm_init_neon ring_core_0_17_5_gcm_init_neon +#define k25519Precomp ring_core_0_17_5_k25519Precomp +#define limbs_mul_add_limb ring_core_0_17_5_limbs_mul_add_limb +#define little_endian_bytes_from_scalar ring_core_0_17_5_little_endian_bytes_from_scalar +#define ecp_nistz256_neg ring_core_0_17_5_ecp_nistz256_neg +#define ecp_nistz256_select_w5 ring_core_0_17_5_ecp_nistz256_select_w5 +#define ecp_nistz256_select_w7 ring_core_0_17_5_ecp_nistz256_select_w7 +#define p256_mul_mont ring_core_0_17_5_p256_mul_mont +#define p256_point_add ring_core_0_17_5_p256_point_add +#define p256_point_add_affine ring_core_0_17_5_p256_point_add_affine +#define p256_point_double ring_core_0_17_5_p256_point_double +#define p256_point_mul ring_core_0_17_5_p256_point_mul +#define p256_point_mul_base ring_core_0_17_5_p256_point_mul_base +#define p256_point_mul_base_vartime ring_core_0_17_5_p256_point_mul_base_vartime +#define p256_scalar_mul_mont ring_core_0_17_5_p256_scalar_mul_mont +#define p256_scalar_sqr_rep_mont ring_core_0_17_5_p256_scalar_sqr_rep_mont +#define p256_sqr_mont ring_core_0_17_5_p256_sqr_mont +#define p384_elem_div_by_2 ring_core_0_17_5_p384_elem_div_by_2 +#define p384_elem_mul_mont ring_core_0_17_5_p384_elem_mul_mont +#define p384_elem_neg ring_core_0_17_5_p384_elem_neg +#define p384_elem_sub ring_core_0_17_5_p384_elem_sub +#define p384_point_add ring_core_0_17_5_p384_point_add +#define p384_point_double ring_core_0_17_5_p384_point_double +#define p384_point_mul ring_core_0_17_5_p384_point_mul +#define p384_scalar_mul_mont ring_core_0_17_5_p384_scalar_mul_mont +#define openssl_poly1305_neon2_addmulmod ring_core_0_17_5_openssl_poly1305_neon2_addmulmod +#define openssl_poly1305_neon2_blocks ring_core_0_17_5_openssl_poly1305_neon2_blocks +#define sha256_block_data_order ring_core_0_17_5_sha256_block_data_order +#define sha512_block_data_order ring_core_0_17_5_sha512_block_data_order +#define vpaes_ctr32_encrypt_blocks ring_core_0_17_5_vpaes_ctr32_encrypt_blocks +#define vpaes_encrypt ring_core_0_17_5_vpaes_encrypt +#define vpaes_encrypt_key_to_bsaes ring_core_0_17_5_vpaes_encrypt_key_to_bsaes +#define vpaes_set_encrypt_key ring_core_0_17_5_vpaes_set_encrypt_key +#define x25519_NEON ring_core_0_17_5_x25519_NEON +#define x25519_fe_invert ring_core_0_17_5_x25519_fe_invert +#define x25519_fe_isnegative ring_core_0_17_5_x25519_fe_isnegative +#define x25519_fe_mul_ttt ring_core_0_17_5_x25519_fe_mul_ttt +#define x25519_fe_neg ring_core_0_17_5_x25519_fe_neg +#define x25519_fe_tobytes ring_core_0_17_5_x25519_fe_tobytes +#define x25519_ge_double_scalarmult_vartime ring_core_0_17_5_x25519_ge_double_scalarmult_vartime +#define x25519_ge_frombytes_vartime ring_core_0_17_5_x25519_ge_frombytes_vartime +#define x25519_ge_scalarmult_base ring_core_0_17_5_x25519_ge_scalarmult_base +#define x25519_ge_scalarmult_base_adx ring_core_0_17_5_x25519_ge_scalarmult_base_adx +#define x25519_public_from_private_generic_masked ring_core_0_17_5_x25519_public_from_private_generic_masked +#define x25519_sc_mask ring_core_0_17_5_x25519_sc_mask +#define x25519_sc_muladd ring_core_0_17_5_x25519_sc_muladd +#define x25519_sc_reduce ring_core_0_17_5_x25519_sc_reduce +#define x25519_scalar_mult_adx ring_core_0_17_5_x25519_scalar_mult_adx +#define x25519_scalar_mult_generic_masked ring_core_0_17_5_x25519_scalar_mult_generic_masked + +#endif +#endif diff --git a/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols_nasm.inc b/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols_nasm.inc new file mode 100644 index 0000000000000..50e986a6a6aeb --- /dev/null +++ b/shim/third-party/rust/fixups/ring/include/ring_core_generated/prefix_symbols_nasm.inc @@ -0,0 +1,236 @@ + +%ifndef ring_core_generated_PREFIX_SYMBOLS_NASM_INC +%define ring_core_generated_PREFIX_SYMBOLS_NASM_INC + +%ifidn __OUTPUT_FORMAT__,win32 +%define _ecp_nistz256_point_double _p256_point_double +%define _ecp_nistz256_point_add _p256_point_add +%define _ecp_nistz256_point_add_affine _p256_point_add_affine +%define _ecp_nistz256_ord_mul_mont _p256_scalar_mul_mont +%define _ecp_nistz256_ord_sqr_mont _p256_scalar_sqr_rep_mont +%define _ecp_nistz256_mul_mont _p256_mul_mont +%define _ecp_nistz256_sqr_mont _p256_sqr_mont +%define _CRYPTO_memcmp _ring_core_0_17_5_CRYPTO_memcmp +%define _CRYPTO_poly1305_finish _ring_core_0_17_5_CRYPTO_poly1305_finish +%define _CRYPTO_poly1305_finish_neon _ring_core_0_17_5_CRYPTO_poly1305_finish_neon +%define _CRYPTO_poly1305_init _ring_core_0_17_5_CRYPTO_poly1305_init +%define _CRYPTO_poly1305_init_neon _ring_core_0_17_5_CRYPTO_poly1305_init_neon +%define _CRYPTO_poly1305_update _ring_core_0_17_5_CRYPTO_poly1305_update +%define _CRYPTO_poly1305_update_neon _ring_core_0_17_5_CRYPTO_poly1305_update_neon +%define _ChaCha20_ctr32 _ring_core_0_17_5_ChaCha20_ctr32 +%define _LIMBS_add_mod _ring_core_0_17_5_LIMBS_add_mod +%define _LIMBS_are_even _ring_core_0_17_5_LIMBS_are_even +%define _LIMBS_are_zero _ring_core_0_17_5_LIMBS_are_zero +%define _LIMBS_equal _ring_core_0_17_5_LIMBS_equal +%define _LIMBS_equal_limb _ring_core_0_17_5_LIMBS_equal_limb +%define _LIMBS_less_than _ring_core_0_17_5_LIMBS_less_than +%define _LIMBS_less_than_limb _ring_core_0_17_5_LIMBS_less_than_limb +%define _LIMBS_reduce_once _ring_core_0_17_5_LIMBS_reduce_once +%define _LIMBS_select_512_32 _ring_core_0_17_5_LIMBS_select_512_32 +%define _LIMBS_shl_mod _ring_core_0_17_5_LIMBS_shl_mod +%define _LIMBS_sub_mod _ring_core_0_17_5_LIMBS_sub_mod +%define _LIMBS_window5_split_window _ring_core_0_17_5_LIMBS_window5_split_window +%define _LIMBS_window5_unsplit_window _ring_core_0_17_5_LIMBS_window5_unsplit_window +%define _LIMB_shr _ring_core_0_17_5_LIMB_shr +%define _OPENSSL_armcap_P _ring_core_0_17_5_OPENSSL_armcap_P +%define _OPENSSL_cpuid_setup _ring_core_0_17_5_OPENSSL_cpuid_setup +%define _OPENSSL_ia32cap_P _ring_core_0_17_5_OPENSSL_ia32cap_P +%define _aes_hw_ctr32_encrypt_blocks _ring_core_0_17_5_aes_hw_ctr32_encrypt_blocks +%define _aes_hw_encrypt _ring_core_0_17_5_aes_hw_encrypt +%define _aes_hw_set_encrypt_key _ring_core_0_17_5_aes_hw_set_encrypt_key +%define _aes_nohw_ctr32_encrypt_blocks _ring_core_0_17_5_aes_nohw_ctr32_encrypt_blocks +%define _aes_nohw_encrypt _ring_core_0_17_5_aes_nohw_encrypt +%define _aes_nohw_set_encrypt_key _ring_core_0_17_5_aes_nohw_set_encrypt_key +%define _aesni_gcm_decrypt _ring_core_0_17_5_aesni_gcm_decrypt +%define _aesni_gcm_encrypt _ring_core_0_17_5_aesni_gcm_encrypt +%define _bn_from_montgomery_in_place _ring_core_0_17_5_bn_from_montgomery_in_place +%define _bn_gather5 _ring_core_0_17_5_bn_gather5 +%define _bn_mul_mont _ring_core_0_17_5_bn_mul_mont +%define _bn_mul_mont_gather5 _ring_core_0_17_5_bn_mul_mont_gather5 +%define _bn_neg_inv_mod_r_u64 _ring_core_0_17_5_bn_neg_inv_mod_r_u64 +%define _bn_power5 _ring_core_0_17_5_bn_power5 +%define _bn_scatter5 _ring_core_0_17_5_bn_scatter5 +%define _bn_sqr8x_internal _ring_core_0_17_5_bn_sqr8x_internal +%define _bn_sqrx8x_internal _ring_core_0_17_5_bn_sqrx8x_internal +%define _bsaes_ctr32_encrypt_blocks _ring_core_0_17_5_bsaes_ctr32_encrypt_blocks +%define _bssl_constant_time_test_conditional_memcpy _ring_core_0_17_5_bssl_constant_time_test_conditional_memcpy +%define _bssl_constant_time_test_conditional_memxor _ring_core_0_17_5_bssl_constant_time_test_conditional_memxor +%define _bssl_constant_time_test_main _ring_core_0_17_5_bssl_constant_time_test_main +%define _chacha20_poly1305_open _ring_core_0_17_5_chacha20_poly1305_open +%define _chacha20_poly1305_seal _ring_core_0_17_5_chacha20_poly1305_seal +%define _fiat_curve25519_adx_mul _ring_core_0_17_5_fiat_curve25519_adx_mul +%define _fiat_curve25519_adx_square _ring_core_0_17_5_fiat_curve25519_adx_square +%define _gcm_ghash_avx _ring_core_0_17_5_gcm_ghash_avx +%define _gcm_ghash_clmul _ring_core_0_17_5_gcm_ghash_clmul +%define _gcm_ghash_neon _ring_core_0_17_5_gcm_ghash_neon +%define _gcm_gmult_clmul _ring_core_0_17_5_gcm_gmult_clmul +%define _gcm_gmult_neon _ring_core_0_17_5_gcm_gmult_neon +%define _gcm_init_avx _ring_core_0_17_5_gcm_init_avx +%define _gcm_init_clmul _ring_core_0_17_5_gcm_init_clmul +%define _gcm_init_neon _ring_core_0_17_5_gcm_init_neon +%define _k25519Precomp _ring_core_0_17_5_k25519Precomp +%define _limbs_mul_add_limb _ring_core_0_17_5_limbs_mul_add_limb +%define _little_endian_bytes_from_scalar _ring_core_0_17_5_little_endian_bytes_from_scalar +%define _ecp_nistz256_neg _ring_core_0_17_5_ecp_nistz256_neg +%define _ecp_nistz256_select_w5 _ring_core_0_17_5_ecp_nistz256_select_w5 +%define _ecp_nistz256_select_w7 _ring_core_0_17_5_ecp_nistz256_select_w7 +%define _p256_mul_mont _ring_core_0_17_5_p256_mul_mont +%define _p256_point_add _ring_core_0_17_5_p256_point_add +%define _p256_point_add_affine _ring_core_0_17_5_p256_point_add_affine +%define _p256_point_double _ring_core_0_17_5_p256_point_double +%define _p256_point_mul _ring_core_0_17_5_p256_point_mul +%define _p256_point_mul_base _ring_core_0_17_5_p256_point_mul_base +%define _p256_point_mul_base_vartime _ring_core_0_17_5_p256_point_mul_base_vartime +%define _p256_scalar_mul_mont _ring_core_0_17_5_p256_scalar_mul_mont +%define _p256_scalar_sqr_rep_mont _ring_core_0_17_5_p256_scalar_sqr_rep_mont +%define _p256_sqr_mont _ring_core_0_17_5_p256_sqr_mont +%define _p384_elem_div_by_2 _ring_core_0_17_5_p384_elem_div_by_2 +%define _p384_elem_mul_mont _ring_core_0_17_5_p384_elem_mul_mont +%define _p384_elem_neg _ring_core_0_17_5_p384_elem_neg +%define _p384_elem_sub _ring_core_0_17_5_p384_elem_sub +%define _p384_point_add _ring_core_0_17_5_p384_point_add +%define _p384_point_double _ring_core_0_17_5_p384_point_double +%define _p384_point_mul _ring_core_0_17_5_p384_point_mul +%define _p384_scalar_mul_mont _ring_core_0_17_5_p384_scalar_mul_mont +%define _openssl_poly1305_neon2_addmulmod _ring_core_0_17_5_openssl_poly1305_neon2_addmulmod +%define _openssl_poly1305_neon2_blocks _ring_core_0_17_5_openssl_poly1305_neon2_blocks +%define _sha256_block_data_order _ring_core_0_17_5_sha256_block_data_order +%define _sha512_block_data_order _ring_core_0_17_5_sha512_block_data_order +%define _vpaes_ctr32_encrypt_blocks _ring_core_0_17_5_vpaes_ctr32_encrypt_blocks +%define _vpaes_encrypt _ring_core_0_17_5_vpaes_encrypt +%define _vpaes_encrypt_key_to_bsaes _ring_core_0_17_5_vpaes_encrypt_key_to_bsaes +%define _vpaes_set_encrypt_key _ring_core_0_17_5_vpaes_set_encrypt_key +%define _x25519_NEON _ring_core_0_17_5_x25519_NEON +%define _x25519_fe_invert _ring_core_0_17_5_x25519_fe_invert +%define _x25519_fe_isnegative _ring_core_0_17_5_x25519_fe_isnegative +%define _x25519_fe_mul_ttt _ring_core_0_17_5_x25519_fe_mul_ttt +%define _x25519_fe_neg _ring_core_0_17_5_x25519_fe_neg +%define _x25519_fe_tobytes _ring_core_0_17_5_x25519_fe_tobytes +%define _x25519_ge_double_scalarmult_vartime _ring_core_0_17_5_x25519_ge_double_scalarmult_vartime +%define _x25519_ge_frombytes_vartime _ring_core_0_17_5_x25519_ge_frombytes_vartime +%define _x25519_ge_scalarmult_base _ring_core_0_17_5_x25519_ge_scalarmult_base +%define _x25519_ge_scalarmult_base_adx _ring_core_0_17_5_x25519_ge_scalarmult_base_adx +%define _x25519_public_from_private_generic_masked _ring_core_0_17_5_x25519_public_from_private_generic_masked +%define _x25519_sc_mask _ring_core_0_17_5_x25519_sc_mask +%define _x25519_sc_muladd _ring_core_0_17_5_x25519_sc_muladd +%define _x25519_sc_reduce _ring_core_0_17_5_x25519_sc_reduce +%define _x25519_scalar_mult_adx _ring_core_0_17_5_x25519_scalar_mult_adx +%define _x25519_scalar_mult_generic_masked _ring_core_0_17_5_x25519_scalar_mult_generic_masked + +%else +%define ecp_nistz256_point_double p256_point_double +%define ecp_nistz256_point_add p256_point_add +%define ecp_nistz256_point_add_affine p256_point_add_affine +%define ecp_nistz256_ord_mul_mont p256_scalar_mul_mont +%define ecp_nistz256_ord_sqr_mont p256_scalar_sqr_rep_mont +%define ecp_nistz256_mul_mont p256_mul_mont +%define ecp_nistz256_sqr_mont p256_sqr_mont +%define CRYPTO_memcmp ring_core_0_17_5_CRYPTO_memcmp +%define CRYPTO_poly1305_finish ring_core_0_17_5_CRYPTO_poly1305_finish +%define CRYPTO_poly1305_finish_neon ring_core_0_17_5_CRYPTO_poly1305_finish_neon +%define CRYPTO_poly1305_init ring_core_0_17_5_CRYPTO_poly1305_init +%define CRYPTO_poly1305_init_neon ring_core_0_17_5_CRYPTO_poly1305_init_neon +%define CRYPTO_poly1305_update ring_core_0_17_5_CRYPTO_poly1305_update +%define CRYPTO_poly1305_update_neon ring_core_0_17_5_CRYPTO_poly1305_update_neon +%define ChaCha20_ctr32 ring_core_0_17_5_ChaCha20_ctr32 +%define LIMBS_add_mod ring_core_0_17_5_LIMBS_add_mod +%define LIMBS_are_even ring_core_0_17_5_LIMBS_are_even +%define LIMBS_are_zero ring_core_0_17_5_LIMBS_are_zero +%define LIMBS_equal ring_core_0_17_5_LIMBS_equal +%define LIMBS_equal_limb ring_core_0_17_5_LIMBS_equal_limb +%define LIMBS_less_than ring_core_0_17_5_LIMBS_less_than +%define LIMBS_less_than_limb ring_core_0_17_5_LIMBS_less_than_limb +%define LIMBS_reduce_once ring_core_0_17_5_LIMBS_reduce_once +%define LIMBS_select_512_32 ring_core_0_17_5_LIMBS_select_512_32 +%define LIMBS_shl_mod ring_core_0_17_5_LIMBS_shl_mod +%define LIMBS_sub_mod ring_core_0_17_5_LIMBS_sub_mod +%define LIMBS_window5_split_window ring_core_0_17_5_LIMBS_window5_split_window +%define LIMBS_window5_unsplit_window ring_core_0_17_5_LIMBS_window5_unsplit_window +%define LIMB_shr ring_core_0_17_5_LIMB_shr +%define OPENSSL_armcap_P ring_core_0_17_5_OPENSSL_armcap_P +%define OPENSSL_cpuid_setup ring_core_0_17_5_OPENSSL_cpuid_setup +%define OPENSSL_ia32cap_P ring_core_0_17_5_OPENSSL_ia32cap_P +%define aes_hw_ctr32_encrypt_blocks ring_core_0_17_5_aes_hw_ctr32_encrypt_blocks +%define aes_hw_encrypt ring_core_0_17_5_aes_hw_encrypt +%define aes_hw_set_encrypt_key ring_core_0_17_5_aes_hw_set_encrypt_key +%define aes_nohw_ctr32_encrypt_blocks ring_core_0_17_5_aes_nohw_ctr32_encrypt_blocks +%define aes_nohw_encrypt ring_core_0_17_5_aes_nohw_encrypt +%define aes_nohw_set_encrypt_key ring_core_0_17_5_aes_nohw_set_encrypt_key +%define aesni_gcm_decrypt ring_core_0_17_5_aesni_gcm_decrypt +%define aesni_gcm_encrypt ring_core_0_17_5_aesni_gcm_encrypt +%define bn_from_montgomery_in_place ring_core_0_17_5_bn_from_montgomery_in_place +%define bn_gather5 ring_core_0_17_5_bn_gather5 +%define bn_mul_mont ring_core_0_17_5_bn_mul_mont +%define bn_mul_mont_gather5 ring_core_0_17_5_bn_mul_mont_gather5 +%define bn_neg_inv_mod_r_u64 ring_core_0_17_5_bn_neg_inv_mod_r_u64 +%define bn_power5 ring_core_0_17_5_bn_power5 +%define bn_scatter5 ring_core_0_17_5_bn_scatter5 +%define bn_sqr8x_internal ring_core_0_17_5_bn_sqr8x_internal +%define bn_sqrx8x_internal ring_core_0_17_5_bn_sqrx8x_internal +%define bsaes_ctr32_encrypt_blocks ring_core_0_17_5_bsaes_ctr32_encrypt_blocks +%define bssl_constant_time_test_conditional_memcpy ring_core_0_17_5_bssl_constant_time_test_conditional_memcpy +%define bssl_constant_time_test_conditional_memxor ring_core_0_17_5_bssl_constant_time_test_conditional_memxor +%define bssl_constant_time_test_main ring_core_0_17_5_bssl_constant_time_test_main +%define chacha20_poly1305_open ring_core_0_17_5_chacha20_poly1305_open +%define chacha20_poly1305_seal ring_core_0_17_5_chacha20_poly1305_seal +%define fiat_curve25519_adx_mul ring_core_0_17_5_fiat_curve25519_adx_mul +%define fiat_curve25519_adx_square ring_core_0_17_5_fiat_curve25519_adx_square +%define gcm_ghash_avx ring_core_0_17_5_gcm_ghash_avx +%define gcm_ghash_clmul ring_core_0_17_5_gcm_ghash_clmul +%define gcm_ghash_neon ring_core_0_17_5_gcm_ghash_neon +%define gcm_gmult_clmul ring_core_0_17_5_gcm_gmult_clmul +%define gcm_gmult_neon ring_core_0_17_5_gcm_gmult_neon +%define gcm_init_avx ring_core_0_17_5_gcm_init_avx +%define gcm_init_clmul ring_core_0_17_5_gcm_init_clmul +%define gcm_init_neon ring_core_0_17_5_gcm_init_neon +%define k25519Precomp ring_core_0_17_5_k25519Precomp +%define limbs_mul_add_limb ring_core_0_17_5_limbs_mul_add_limb +%define little_endian_bytes_from_scalar ring_core_0_17_5_little_endian_bytes_from_scalar +%define ecp_nistz256_neg ring_core_0_17_5_ecp_nistz256_neg +%define ecp_nistz256_select_w5 ring_core_0_17_5_ecp_nistz256_select_w5 +%define ecp_nistz256_select_w7 ring_core_0_17_5_ecp_nistz256_select_w7 +%define p256_mul_mont ring_core_0_17_5_p256_mul_mont +%define p256_point_add ring_core_0_17_5_p256_point_add +%define p256_point_add_affine ring_core_0_17_5_p256_point_add_affine +%define p256_point_double ring_core_0_17_5_p256_point_double +%define p256_point_mul ring_core_0_17_5_p256_point_mul +%define p256_point_mul_base ring_core_0_17_5_p256_point_mul_base +%define p256_point_mul_base_vartime ring_core_0_17_5_p256_point_mul_base_vartime +%define p256_scalar_mul_mont ring_core_0_17_5_p256_scalar_mul_mont +%define p256_scalar_sqr_rep_mont ring_core_0_17_5_p256_scalar_sqr_rep_mont +%define p256_sqr_mont ring_core_0_17_5_p256_sqr_mont +%define p384_elem_div_by_2 ring_core_0_17_5_p384_elem_div_by_2 +%define p384_elem_mul_mont ring_core_0_17_5_p384_elem_mul_mont +%define p384_elem_neg ring_core_0_17_5_p384_elem_neg +%define p384_elem_sub ring_core_0_17_5_p384_elem_sub +%define p384_point_add ring_core_0_17_5_p384_point_add +%define p384_point_double ring_core_0_17_5_p384_point_double +%define p384_point_mul ring_core_0_17_5_p384_point_mul +%define p384_scalar_mul_mont ring_core_0_17_5_p384_scalar_mul_mont +%define openssl_poly1305_neon2_addmulmod ring_core_0_17_5_openssl_poly1305_neon2_addmulmod +%define openssl_poly1305_neon2_blocks ring_core_0_17_5_openssl_poly1305_neon2_blocks +%define sha256_block_data_order ring_core_0_17_5_sha256_block_data_order +%define sha512_block_data_order ring_core_0_17_5_sha512_block_data_order +%define vpaes_ctr32_encrypt_blocks ring_core_0_17_5_vpaes_ctr32_encrypt_blocks +%define vpaes_encrypt ring_core_0_17_5_vpaes_encrypt +%define vpaes_encrypt_key_to_bsaes ring_core_0_17_5_vpaes_encrypt_key_to_bsaes +%define vpaes_set_encrypt_key ring_core_0_17_5_vpaes_set_encrypt_key +%define x25519_NEON ring_core_0_17_5_x25519_NEON +%define x25519_fe_invert ring_core_0_17_5_x25519_fe_invert +%define x25519_fe_isnegative ring_core_0_17_5_x25519_fe_isnegative +%define x25519_fe_mul_ttt ring_core_0_17_5_x25519_fe_mul_ttt +%define x25519_fe_neg ring_core_0_17_5_x25519_fe_neg +%define x25519_fe_tobytes ring_core_0_17_5_x25519_fe_tobytes +%define x25519_ge_double_scalarmult_vartime ring_core_0_17_5_x25519_ge_double_scalarmult_vartime +%define x25519_ge_frombytes_vartime ring_core_0_17_5_x25519_ge_frombytes_vartime +%define x25519_ge_scalarmult_base ring_core_0_17_5_x25519_ge_scalarmult_base +%define x25519_ge_scalarmult_base_adx ring_core_0_17_5_x25519_ge_scalarmult_base_adx +%define x25519_public_from_private_generic_masked ring_core_0_17_5_x25519_public_from_private_generic_masked +%define x25519_sc_mask ring_core_0_17_5_x25519_sc_mask +%define x25519_sc_muladd ring_core_0_17_5_x25519_sc_muladd +%define x25519_sc_reduce ring_core_0_17_5_x25519_sc_reduce +%define x25519_scalar_mult_adx ring_core_0_17_5_x25519_scalar_mult_adx +%define x25519_scalar_mult_generic_masked ring_core_0_17_5_x25519_scalar_mult_generic_masked + +%endif +%endif diff --git a/shim/third-party/rust/fixups/rustix/fixups.toml b/shim/third-party/rust/fixups/rustix/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/rustix/fixups.toml +++ b/shim/third-party/rust/fixups/rustix/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/rustls/fixups.toml b/shim/third-party/rust/fixups/rustls/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/rustls/fixups.toml +++ b/shim/third-party/rust/fixups/rustls/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/rustversion/fixups.toml b/shim/third-party/rust/fixups/rustversion/fixups.toml index ac9ebfb4af71d..309316ac45495 100644 --- a/shim/third-party/rust/fixups/rustversion/fixups.toml +++ b/shim/third-party/rust/fixups/rustversion/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.gen_srcs] diff --git a/shim/third-party/rust/fixups/serde/fixups.toml b/shim/third-party/rust/fixups/serde/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/serde/fixups.toml +++ b/shim/third-party/rust/fixups/serde/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/serde_derive/fixups.toml b/shim/third-party/rust/fixups/serde_derive/fixups.toml index 7de4ebb3e1996..015a575a2f772 100644 --- a/shim/third-party/rust/fixups/serde_derive/fixups.toml +++ b/shim/third-party/rust/fixups/serde_derive/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # To set `CARGO_MANIFEST_DIR` cargo_env = true buildscript = [] diff --git a/shim/third-party/rust/fixups/serde_json/fixups.toml b/shim/third-party/rust/fixups/serde_json/fixups.toml index db40d72cb2eaf..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/serde_json/fixups.toml +++ b/shim/third-party/rust/fixups/serde_json/fixups.toml @@ -1 +1,9 @@ -buildscript = [] +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +[[buildscript]] +[buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/signal-hook/fixups.toml b/shim/third-party/rust/fixups/signal-hook/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/signal-hook/fixups.toml +++ b/shim/third-party/rust/fixups/signal-hook/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/slab/fixups.toml b/shim/third-party/rust/fixups/slab/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/slab/fixups.toml +++ b/shim/third-party/rust/fixups/slab/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/slog/fixups.toml b/shim/third-party/rust/fixups/slog/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/slog/fixups.toml +++ b/shim/third-party/rust/fixups/slog/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/syn/fixups.toml b/shim/third-party/rust/fixups/syn/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/syn/fixups.toml +++ b/shim/third-party/rust/fixups/syn/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/sys-info/fixups.toml b/shim/third-party/rust/fixups/sys-info/fixups.toml index 5d124b3ad7ad2..39a5fdc37278f 100644 --- a/shim/third-party/rust/fixups/sys-info/fixups.toml +++ b/shim/third-party/rust/fixups/sys-info/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [[platform_fixup.'cfg(target_os = "linux")'.buildscript]] diff --git a/shim/third-party/rust/fixups/sysinfo/fixups.toml b/shim/third-party/rust/fixups/sysinfo/fixups.toml index 8518b1897006d..bcf5ac4924828 100644 --- a/shim/third-party/rust/fixups/sysinfo/fixups.toml +++ b/shim/third-party/rust/fixups/sysinfo/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + extra_srcs = ["README.md", "md_doc/**/*.md"] buildscript = [] diff --git a/shim/third-party/rust/fixups/tempfile/fixups.toml b/shim/third-party/rust/fixups/tempfile/fixups.toml index 5e026f75e0de3..d514599437f6a 100644 --- a/shim/third-party/rust/fixups/tempfile/fixups.toml +++ b/shim/third-party/rust/fixups/tempfile/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.rustc_flags] diff --git a/shim/third-party/rust/fixups/terminfo/fixups.toml b/shim/third-party/rust/fixups/terminfo/fixups.toml index ac9ebfb4af71d..309316ac45495 100644 --- a/shim/third-party/rust/fixups/terminfo/fixups.toml +++ b/shim/third-party/rust/fixups/terminfo/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.gen_srcs] diff --git a/shim/third-party/rust/fixups/termwiz/fixups.toml b/shim/third-party/rust/fixups/termwiz/fixups.toml index fdba1eb21b86a..478b0154e80be 100644 --- a/shim/third-party/rust/fixups/termwiz/fixups.toml +++ b/shim/third-party/rust/fixups/termwiz/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + cargo_env = true extra_srcs = ["src/**/*.pest"] diff --git a/shim/third-party/rust/fixups/test-case/fixups.toml b/shim/third-party/rust/fixups/test-case/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/test-case/fixups.toml +++ b/shim/third-party/rust/fixups/test-case/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/thiserror/fixups.toml b/shim/third-party/rust/fixups/thiserror/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/thiserror/fixups.toml +++ b/shim/third-party/rust/fixups/thiserror/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/tiny-keccak/fixups.toml b/shim/third-party/rust/fixups/tiny-keccak/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/tiny-keccak/fixups.toml +++ b/shim/third-party/rust/fixups/tiny-keccak/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/tokio-stream/fixups.toml b/shim/third-party/rust/fixups/tokio-stream/fixups.toml index a1f4f0e7091d5..3c20ec4086b2a 100644 --- a/shim/third-party/rust/fixups/tokio-stream/fixups.toml +++ b/shim/third-party/rust/fixups/tokio-stream/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # reindeer cannot see through `cfg_sync!` macros and the like extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/tokio-util/fixups.toml b/shim/third-party/rust/fixups/tokio-util/fixups.toml index af7edb27e3bb7..9b40a18f4cab3 100644 --- a/shim/third-party/rust/fixups/tokio-util/fixups.toml +++ b/shim/third-party/rust/fixups/tokio-util/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/tokio/fixups.toml b/shim/third-party/rust/fixups/tokio/fixups.toml index d69dcefddb329..1446deebebdcd 100644 --- a/shim/third-party/rust/fixups/tokio/fixups.toml +++ b/shim/third-party/rust/fixups/tokio/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + cfgs = ["tokio_unstable", "tokio_track_caller"] extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/tonic/fixups.toml b/shim/third-party/rust/fixups/tonic/fixups.toml index 41439d3356a5e..1da45ba23144e 100644 --- a/shim/third-party/rust/fixups/tonic/fixups.toml +++ b/shim/third-party/rust/fixups/tonic/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + cargo_env = true diff --git a/shim/third-party/rust/fixups/tracing-subscriber/fixups.toml b/shim/third-party/rust/fixups/tracing-subscriber/fixups.toml index 28683dbe3cbbf..bb8e8892b738a 100644 --- a/shim/third-party/rust/fixups/tracing-subscriber/fixups.toml +++ b/shim/third-party/rust/fixups/tracing-subscriber/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # Reindeer can't find the dependencies due to being confused by the feature! macro. extra_srcs = ["src/**/*.rs"] diff --git a/shim/third-party/rust/fixups/trybuild/fixups.toml b/shim/third-party/rust/fixups/trybuild/fixups.toml index ac9ebfb4af71d..309316ac45495 100644 --- a/shim/third-party/rust/fixups/trybuild/fixups.toml +++ b/shim/third-party/rust/fixups/trybuild/fixups.toml @@ -1,2 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [[buildscript]] [buildscript.gen_srcs] diff --git a/shim/third-party/rust/fixups/typenum/fixups.toml b/shim/third-party/rust/fixups/typenum/fixups.toml index 7ab02e4fd17cf..6f947d94b63de 100644 --- a/shim/third-party/rust/fixups/typenum/fixups.toml +++ b/shim/third-party/rust/fixups/typenum/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + features = ["force_unix_path_separator"] [[buildscript]] diff --git a/shim/third-party/rust/fixups/unicase/fixups.toml b/shim/third-party/rust/fixups/unicase/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/unicase/fixups.toml +++ b/shim/third-party/rust/fixups/unicase/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/winapi-x86_64-pc-windows-gnu/fixups.toml b/shim/third-party/rust/fixups/winapi-x86_64-pc-windows-gnu/fixups.toml index 07c79834d924f..a5a969de56ac5 100644 --- a/shim/third-party/rust/fixups/winapi-x86_64-pc-windows-gnu/fixups.toml +++ b/shim/third-party/rust/fixups/winapi-x86_64-pc-windows-gnu/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [[platform_fixup.'cfg(target_os = "windows")'.buildscript]] diff --git a/shim/third-party/rust/fixups/winapi/fixups.toml b/shim/third-party/rust/fixups/winapi/fixups.toml index ec4f86e500d0a..295d056aba7bf 100644 --- a/shim/third-party/rust/fixups/winapi/fixups.toml +++ b/shim/third-party/rust/fixups/winapi/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [[platform_fixup.'cfg(target_os = "windows")'.buildscript]] diff --git a/shim/third-party/rust/fixups/windows-targets/fixups.toml b/shim/third-party/rust/fixups/windows-targets/fixups.toml index b3555c7795e3d..9268eebf6b8fe 100644 --- a/shim/third-party/rust/fixups/windows-targets/fixups.toml +++ b/shim/third-party/rust/fixups/windows-targets/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + [platform_fixup.'cfg(any(target_os = "windows", target_os = "linux"))'] version = ">=0.48.0" cfgs = ["windows_raw_dylib"] diff --git a/shim/third-party/rust/fixups/windows_aarch64_gnullvm/fixups.toml b/shim/third-party/rust/fixups/windows_aarch64_gnullvm/fixups.toml index f40e9f9cbe6b1..c924883f5b0b5 100644 --- a/shim/third-party/rust/fixups/windows_aarch64_gnullvm/fixups.toml +++ b/shim/third-party/rust/fixups/windows_aarch64_gnullvm/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [platform_fixup.'cfg(all(target_os = "linux", target_arch = "aarch64"))'] diff --git a/shim/third-party/rust/fixups/windows_x86_64_gnu/fixups.toml b/shim/third-party/rust/fixups/windows_x86_64_gnu/fixups.toml index b25e7f05fa57c..aa5cd0bf4e70d 100644 --- a/shim/third-party/rust/fixups/windows_x86_64_gnu/fixups.toml +++ b/shim/third-party/rust/fixups/windows_x86_64_gnu/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [platform_fixup.'cfg(all(target_os = "windows", target_env = "gnu"))'] diff --git a/shim/third-party/rust/fixups/windows_x86_64_gnullvm/fixups.toml b/shim/third-party/rust/fixups/windows_x86_64_gnullvm/fixups.toml index a5579b6930ec1..568edc03a17d5 100644 --- a/shim/third-party/rust/fixups/windows_x86_64_gnullvm/fixups.toml +++ b/shim/third-party/rust/fixups/windows_x86_64_gnullvm/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [platform_fixup.'cfg(all(target_os = "linux", target_arch = "x86_64"))'] diff --git a/shim/third-party/rust/fixups/windows_x86_64_msvc/fixups.toml b/shim/third-party/rust/fixups/windows_x86_64_msvc/fixups.toml index 8e8dea3516505..541a4b122ebac 100644 --- a/shim/third-party/rust/fixups/windows_x86_64_msvc/fixups.toml +++ b/shim/third-party/rust/fixups/windows_x86_64_msvc/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [platform_fixup.'cfg(all(target_os = "windows", target_env = "msvc"))'] diff --git a/shim/third-party/rust/fixups/winreg/fixups.toml b/shim/third-party/rust/fixups/winreg/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/winreg/fixups.toml +++ b/shim/third-party/rust/fixups/winreg/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/zerocopy/fixups.toml b/shim/third-party/rust/fixups/zerocopy/fixups.toml new file mode 100644 index 0000000000000..1da45ba23144e --- /dev/null +++ b/shim/third-party/rust/fixups/zerocopy/fixups.toml @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +cargo_env = true diff --git a/shim/third-party/rust/fixups/zstd-safe/fixups.toml b/shim/third-party/rust/fixups/zstd-safe/fixups.toml index db40d72cb2eaf..554cc0dae2b2b 100644 --- a/shim/third-party/rust/fixups/zstd-safe/fixups.toml +++ b/shim/third-party/rust/fixups/zstd-safe/fixups.toml @@ -1 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] diff --git a/shim/third-party/rust/fixups/zstd-sys/fixups.toml b/shim/third-party/rust/fixups/zstd-sys/fixups.toml index 164b6a7237b0f..c627d9be88ba6 100644 --- a/shim/third-party/rust/fixups/zstd-sys/fixups.toml +++ b/shim/third-party/rust/fixups/zstd-sys/fixups.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + buildscript = [] [[platform_fixup.'cfg(not(all(target_os = "windows", target_env = "msvc")))'.buildscript]] @@ -21,6 +28,8 @@ srcs = [ headers = [ "zdict.h", "zstd.h", + "zstd/lib/common/allocations.h", + "zstd/lib/common/bits.h", "zstd/lib/common/bitstream.h", "zstd/lib/common/compiler.h", "zstd/lib/common/cpu.h", @@ -32,7 +41,7 @@ headers = [ "zstd/lib/common/pool.h", "zstd/lib/common/portability_macros.h", "zstd/lib/common/threading.h", - #"zstd/lib/common/xxhash.h", + "zstd/lib/common/xxhash.h", "zstd/lib/common/zstd_deps.h", "zstd/lib/common/zstd_internal.h", "zstd/lib/common/zstd_trace.h", @@ -78,6 +87,8 @@ srcs = [ headers = [ "zdict.h", "zstd.h", + "zstd/lib/common/allocations.h", + "zstd/lib/common/bits.h", "zstd/lib/common/bitstream.h", "zstd/lib/common/compiler.h", "zstd/lib/common/cpu.h", @@ -89,7 +100,7 @@ headers = [ "zstd/lib/common/pool.h", "zstd/lib/common/portability_macros.h", "zstd/lib/common/threading.h", - #"zstd/lib/common/xxhash.h", + "zstd/lib/common/xxhash.h", "zstd/lib/common/zstd_deps.h", "zstd/lib/common/zstd_internal.h", "zstd/lib/common/zstd_trace.h", diff --git a/shim/third-party/rust/reindeer.toml b/shim/third-party/rust/reindeer.toml index 4aa852e04e129..13d929bbbe93e 100644 --- a/shim/third-party/rust/reindeer.toml +++ b/shim/third-party/rust/reindeer.toml @@ -1,3 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + # Configuration for Reindeer to generate Buck targets from Cargo.toml # # Overview of available options: @@ -25,5 +32,5 @@ prebuilt_cxx_library = "third_party_rust_prebuilt_cxx_library" buckfile_imports = """ load("@prelude//rust:cargo_buildscript.bzl", "buildscript_run") load("@prelude//rust:cargo_package.bzl", "cargo") -load("//third-party/macros:rust_third_party.bzl", "third_party_rust_prebuilt_cxx_library") +load("@shim//third-party/macros:rust_third_party.bzl", "third_party_rust_prebuilt_cxx_library") """ diff --git a/shim/third-party/rust/top/main.rs b/shim/third-party/rust/top/main.rs index 211a33685e9a9..7dde67dd19b09 100644 --- a/shim/third-party/rust/top/main.rs +++ b/shim/third-party/rust/top/main.rs @@ -1,3 +1,8 @@ -// Dummy source to keep Cargo happy +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +#![allow(unused_crate_dependencies)] fn main() {} diff --git a/shim/third-party/snappy/BUCK b/shim/third-party/snappy/BUCK new file mode 100644 index 0000000000000..295d5a6c2b78c --- /dev/null +++ b/shim/third-party/snappy/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "snappy", + packages = { + "//os:linux-fedora": ["snappy-devel"], + "//os:linux-ubuntu": ["libsnappy-dev"], + "//os:macos-homebrew": ["snappy"], + }, + pkgconfig_name = "snappy", +) diff --git a/shim/third-party/xz/BUCK b/shim/third-party/xz/BUCK new file mode 100644 index 0000000000000..5b427a9753902 --- /dev/null +++ b/shim/third-party/xz/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "lzma", + packages = { + "//os:linux-fedora": ["xz-devel"], + "//os:linux-ubuntu": ["liblz4-dev"], + "//os:macos-homebrew": ["liblzma"], + }, + pkgconfig_name = "liblzma", +) diff --git a/shim/third-party/zlib/BUCK b/shim/third-party/zlib/BUCK new file mode 100644 index 0000000000000..5d79750e8b297 --- /dev/null +++ b/shim/third-party/zlib/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "z", + packages = { + "//os:linux-fedora": ["zlib-devel"], + "//os:linux-ubuntu": ["zlib1g"], + "//os:macos-homebrew": ["zlib"], + }, + pkgconfig_name = "zlib", +) diff --git a/shim/third-party/zstd/BUCK b/shim/third-party/zstd/BUCK new file mode 100644 index 0000000000000..4bc3bbb5776d8 --- /dev/null +++ b/shim/third-party/zstd/BUCK @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@//third-party:defs.bzl", "pkgconfig_system_library") + +oncall("open_source") + +pkgconfig_system_library( + name = "zstd", + packages = { + "//os:linux-fedora": ["libzstd-devel"], + "//os:linux-ubuntu": ["libzstd-dev"], + "//os:macos-homebrew": ["zstd"], + }, + pkgconfig_name = "libzstd", +) diff --git a/shim/tools/build_defs/buckconfig.bzl b/shim/tools/build_defs/buckconfig.bzl new file mode 100644 index 0000000000000..d9c29d252f9dd --- /dev/null +++ b/shim/tools/build_defs/buckconfig.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:buckconfig.bzl", _read = "read", _read_bool = "read_bool", _read_choice = "read_choice", _read_int = "read_int", _read_list = "read_list", _read_string = "read_string", _resolve_alias = "resolve_alias") + +read = _read +read_string = _read_string +read_choice = _read_choice +read_bool = _read_bool +read_int = _read_int +read_list = _read_list +resolve_alias = _resolve_alias diff --git a/shim/tools/build_defs/default_platform_defs.bzl b/shim/tools/build_defs/default_platform_defs.bzl new file mode 100644 index 0000000000000..3f860ae49df06 --- /dev/null +++ b/shim/tools/build_defs/default_platform_defs.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +DEVSERVER_PLATFORM_REGEX = "UNUSED" diff --git a/shim/tools/build_defs/fb_native_wrapper.bzl b/shim/tools/build_defs/fb_native_wrapper.bzl new file mode 100644 index 0000000000000..f6795d7ff4313 --- /dev/null +++ b/shim/tools/build_defs/fb_native_wrapper.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +fb_native = native diff --git a/shim/xplat/executorch/kernels/optimized/lib_defs.bzl b/shim/xplat/executorch/kernels/optimized/lib_defs.bzl new file mode 100644 index 0000000000000..af62407eb079c --- /dev/null +++ b/shim/xplat/executorch/kernels/optimized/lib_defs.bzl @@ -0,0 +1,149 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbsource//tools/build_defs:default_platform_defs.bzl", "DEVSERVER_PLATFORM_REGEX") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +# Because vec exists as a collection of header files, compile and preprocessor +# flags applied to the vec target do not have any effect, since no compilation +# actually occurs for the target. +# +# Targets using the vec library must therefore call the get_vec_*_flags +# functions in order to declare the required compiler flags needed in order to +# access CPU vector intrinsics. + +def get_vec_android_preprocessor_flags(): + preprocessor_flags = [ + ( + "^android-arm64.*$", + [ + "-DET_BUILD_ARM_VEC256_WITH_SLEEF", + ], + ), + ] + return preprocessor_flags + +def get_vec_cxx_preprocessor_flags(): + preprocessor_flags = [ + ( + DEVSERVER_PLATFORM_REGEX, + [ + "-DCPU_CAPABILITY_AVX2", + ], + ), + ] + return preprocessor_flags + +def get_vec_fbcode_preprocessor_flags(): + preprocessor_flags = [ + "-DCPU_CAPABILITY_AVX2", + ] + return preprocessor_flags + +# Currently, having a dependency on fbsource//third-party/sleef:sleef may cause +# duplicate symbol errors when linking fbcode targets in opt mode that also +# depend on ATen. This is because ATen accesses sleef via the third-party folder +# in caffe2 (caffe2/third-party//sleef:sleef). +# TODO(ssjia): Enable -DCPU_CAPABILITY_AVX2 in fbcode, which requires sleef. +def define_libs(): + runtime.cxx_library( + name = "libvec", + srcs = [], + exported_headers = native.glob([ + "vec/**/*.h", + ]), + header_namespace = "executorch/kernels/optimized", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + cxx_platform_deps = select({ + "DEFAULT": [ + ( + DEVSERVER_PLATFORM_REGEX, + [ + "fbsource//third-party/sleef:sleef", + ], + ), + ], + "ovr_config//cpu:arm64": [ + ( + DEVSERVER_PLATFORM_REGEX, + [ + "fbsource//third-party/sleef:sleef_arm", + ], + ), + ], + }), + fbandroid_platform_deps = [ + ( + "^android-arm64.*$", + [ + "fbsource//third-party/sleef:sleef_arm", + ], + ), + ], + ) + + runtime.cxx_library( + name = "libutils", + srcs = [], + exported_headers = native.glob([ + "utils/**/*.h", + ]), + header_namespace = "executorch/kernels/optimized", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + # Needed to access the __ET_INLINE macro + "//executorch/runtime/platform:compiler", + ], + ) + + runtime.cxx_library( + name = "libblas", + srcs = native.glob([ + "blas/**/*.cpp", + ]), + exported_headers = native.glob([ + "blas/**/*.h", + ]), + header_namespace = "executorch/kernels/optimized", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + fbandroid_platform_preprocessor_flags = [ + ( + "^android-arm64.*$", + [ + "-DET_BUILD_WITH_BLAS", + ], + ), + ], + fbandroid_platform_deps = [ + ( + "^android-arm64.*$", + [ + "fbsource//third-party/openblas:openblas", + ], + ), + ], + fbobjc_exported_preprocessor_flags = [ + "-DET_BUILD_WITH_BLAS", + "-DET_BUILD_FOR_APPLE", + ], + fbobjc_frameworks = [ + "Accelerate", + ], + exported_deps = [ + "//executorch/kernels/optimized:libutils", + "//executorch/runtime/core/exec_aten:lib", + ], + ) diff --git a/shim/xplat/executorch/kernels/optimized/op_registration_util.bzl b/shim/xplat/executorch/kernels/optimized/op_registration_util.bzl new file mode 100644 index 0000000000000..c9fe4ec912db4 --- /dev/null +++ b/shim/xplat/executorch/kernels/optimized/op_registration_util.bzl @@ -0,0 +1,138 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/build:selects.bzl", "selects") +load( + "@fbsource//xplat/executorch/kernels/optimized:lib_defs.bzl", + "get_vec_android_preprocessor_flags", +) + +def op_target(name, deps = []): + """Registers an optimized implementation for an operator overload group. + + An operator overload group is a set of operator overloads with a common + operator name. That common operator name should be the base name of this + target. + + E.g., the "add" operator overload group, named "op_add" in this target, + might implement: + - add.Tensor + - add_.Tensor + - add.out + - add.Scalar + + If an op target would like to share a header/sources with a different op + target (e.g., helpers/utilities), it should declare a separate cxx_library + and add it as a dep. + + Args: + name: The name of the operator overload group; e.g., + "op_add". This directory must contain a source file named + ".cpp"; e.g., "op_add.cpp". + deps: Optional extra deps to add to the cxx_library(). Note: + - op targets may not depend on other op targets, to keep the + dependencies manageable. If two op targets would like to share + code, define a separate runtime.cxx_library that they both depend + on. + """ + + # Note that this doesn't actually define the target, but helps register + # it in a table that's used to define the target. + return { + "deps": deps, + "name": name, + } + +def _enforce_deps(deps, name): + """Fails if any of the deps are not allowed. + + Args: + deps: A list of build target strings. + name: The name of the target; e.g., "op_add" + """ + for dep in deps: + if dep.startswith(":op_"): + # op targets may not depend on other op targets, to keep the + # dependencies manageable. If two op targets would like to share + # code, define a separate runtime.cxx_library that they both depend + # on. + fail("op_target {} may not depend on other op_target {}".format( + name, + dep, + )) + +def define_op_library(name, deps): + """Defines a cxx_library target for the named operator overload group. + + Args: + name: The name of the target; e.g., "op_add" + deps: List of deps for the target. + """ + selects.apply(obj = deps, function = native.partial(_enforce_deps, name = name)) + + augmented_deps = deps + [ + "//executorch/kernels/optimized:libvec", + "//executorch/kernels/optimized:libutils", + ] + + runtime.cxx_library( + name = "{}".format(name), + srcs = [ + "{}.cpp".format(name), + ], + visibility = [ + "//executorch/kernels/portable/test/...", + "//executorch/kernels/quantized/test/...", + "//executorch/kernels/optimized/test/...", + "//executorch/kernels/test/...", + "@EXECUTORCH_CLIENTS", + ], + # kernels often have helpers with no prototypes just disabling the warning here as the headers + # are codegend and linked in later + compiler_flags = ["-Wno-missing-prototypes"], + deps = [ + "//executorch/runtime/kernel:kernel_includes", + ] + augmented_deps, + fbandroid_platform_preprocessor_flags = get_vec_android_preprocessor_flags(), + # sleef needs to be added as a direct dependency of the operator target when building for Android, + # or a linker error may occur. Not sure why this happens; it seems that fbandroid_platform_deps of + # dependencies are not transitive + fbandroid_platform_deps = [ + ( + "^android-arm64.*$", + [ + "fbsource//third-party/sleef:sleef_arm", + ], + ), + ], + # link_whole is necessary because the operators register themselves + # via static initializers that run at program startup. + # @lint-ignore BUCKLINT link_whole + link_whole = True, + ) + +def define_op_target(name, deps): + """Possibly defines cxx_library targets for the named operator group. + + Args: + name: The base name of the target; e.g., "op_add" + deps: List of deps for the targets. + """ + + # When building in ATen mode, ATen-compatible (non-custom) operators will + # use the implementations provided by ATen, so we should not build the + # versions defined here. + define_op_library( + name = name, + deps = deps, + ) + +def is_op_disabled(name): + # TODO (gjcomer) Enable ops with sleef dependency in OSS + disabled_ops = ["op_gelu", "op_log_softmax"] + return name in disabled_ops diff --git a/starlark-rust/README.md b/starlark-rust/README.md index 1c53f58201c52..5094d350de00a 100644 --- a/starlark-rust/README.md +++ b/starlark-rust/README.md @@ -1,16 +1,30 @@ # Starlark in Rust [![Support Ukraine](https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB)](https://opensource.fb.com/support-ukraine) -[![GitHub link](https://img.shields.io/badge/GitHub-facebookexperimental%2Fstarlark--rust-blue.svg)](https://github.com/facebookexperimental/starlark-rust) +[![GitHub link](https://img.shields.io/badge/GitHub-facebook%2Fstarlark--rust-blue.svg)](https://github.com/facebook/starlark-rust) [![crates.io version](https://img.shields.io/crates/v/starlark.svg)](https://crates.io/crates/starlark) [![docs.rs availability](https://img.shields.io/docsrs/starlark?label=docs.rs)](https://docs.rs/starlark/) -[![Build status](https://img.shields.io/github/actions/workflow/status/facebookexperimental/starlark-rust/ci.yml?branch=main)](https://github.com/facebookexperimental/starlark-rust/actions) - -There are several copies of this repo on GitHub, [facebookexperimental/starlark-rust](https://github.com/facebookexperimental/starlark-rust) is the canonical one. - -This project provides a Rust implementation of the [Starlark language](https://github.com/bazelbuild/starlark/blob/master/spec.md). Starlark (formerly codenamed Skylark) is a deterministic language inspired by Python3, used for configuration in the build systems [Bazel](https://bazel.build), [Buck](https://buck.build) and [Buck2](https://buck2.build), of which Buck2 depends on this library. This project was originally developed [in this repo](https://github.com/google/starlark-rust), which contains a more extensive history. - -There are at least three implementations of Starlark, [one in Java](https://github.com/bazelbuild/starlark), [one in Go](https://github.com/google/starlark-go), and this one in Rust. We mostly follow the Starlark standard. If you are interested in trying out Rust Starlark, you can clone this repo and run: +[![Build status](https://img.shields.io/github/actions/workflow/status/facebook/starlark-rust/ci.yml?branch=main)](https://github.com/facebook/starlark-rust/actions) + +There are several copies of this repo on GitHub, +[facebook/starlark-rust](https://github.com/facebook/starlark-rust) is the +canonical one. + +This project provides a Rust implementation of the +[Starlark language](https://github.com/bazelbuild/starlark/blob/master/spec.md). +Starlark (formerly codenamed Skylark) is a deterministic language inspired by +Python3, used for configuration in the build systems +[Bazel](https://bazel.build), [Buck](https://buck.build) and +[Buck2](https://buck2.build), of which Buck2 depends on this library. This +project was originally developed +[in this repo](https://github.com/google/starlark-rust), which contains a more +extensive history. + +There are at least three implementations of Starlark, +[one in Java](https://github.com/bazelbuild/starlark), +[one in Go](https://github.com/google/starlark-go), and this one in Rust. We +mostly follow the Starlark standard. If you are interested in trying out Rust +Starlark, you can clone this repo and run: ```shell $ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh @@ -19,58 +33,109 @@ $> 1+2 3 ``` -This project was started by [Damien Martin-Guillerez](https://github.com/damienmg). Version 0.4.0 of this library changed ownership [from Google](https://github.com/google/starlark-rust) to Facebook. +This project was started by +[Damien Martin-Guillerez](https://github.com/damienmg). Version 0.4.0 of this +library changed ownership [from Google](https://github.com/google/starlark-rust) +to Facebook. ## Learn More -Read [this blog post](https://developers.facebook.com/blog/post/2021/04/08/rust-starlark-library/) for an overview of the library, the reasons behind Starlark, and how it might fit in to your project. There is also a [2 minute introductory video](https://www.youtube.com/watch?v=3kHER3KIPj4). +Read +[this blog post](https://developers.facebook.com/blog/post/2021/04/08/rust-starlark-library/) +for an overview of the library, the reasons behind Starlark, and how it might +fit in to your project. There is also a +[2 minute introductory video](https://www.youtube.com/watch?v=3kHER3KIPj4). ## Features This project features: -* Easy interoperability between Rust types and Starlark. -* Rust-friendly types, so frozen values are `Send`/`Sync`, while non-frozen values aren't. -* [Garbage collected](docs/gc.md) values allocated on [a heap](docs/heaps.md). -* Optional runtime-checked [types](docs/types.md). -* A linter, to detect code issues in Starlark. -* IDE integration in the form of [LSP](https://microsoft.github.io/language-server-protocol/). - +- Easy interoperability between Rust types and Starlark. +- Rust-friendly types, so frozen values are `Send`/`Sync`, while non-frozen + values aren't. +- [Garbage collected](docs/gc.md) values allocated on [a heap](docs/heaps.md). +- Optional runtime-checked [types](docs/types.md). +- A linter, to detect code issues in Starlark. +- IDE integration in the form of + [LSP](https://microsoft.github.io/language-server-protocol/). +- Extensive testing, including + [fuzz testing](https://github.com/google/oss-fuzz/tree/master/projects/starlark-rust). +- [DAP](https://microsoft.github.io/debug-adapter-protocol/) support. This project also has three non-goals: -* We do _not_ aim for API stability between releases, preferring to iterate quickly and refine the API as much as possible. But we do [follow SemVer](https://doc.rust-lang.org/cargo/reference/semver.html). -* We do _not_ aim for minimal dependencies, preferring to keep one package with lots of power. But if some dependencies prove tricky, we might add feature flags. +- We do _not_ aim for API stability between releases, preferring to iterate + quickly and refine the API as much as possible. But we do + [follow SemVer](https://doc.rust-lang.org/cargo/reference/semver.html). +- We do _not_ aim for minimal dependencies, preferring to keep one package with + lots of power. But if some dependencies prove tricky, we might add feature + flags. ## Components -There are three components: - -* `starlark_derive`, a proc-macro crate that defines the necessary macros for Starlark. This library is a dependency of `starlark` the library, which reexports all the relevant pieces, and should not be used directly. -* `starlark` the library, a library that defines the parser, evaluator and standard library. Projects wishing to embed Starlark in their environment (with additional types, library functions and features) will make use of this library. -* `starlark` the binary, which provides interactive evaluation, IDE features and linter, exposed through a command line. Useful if you want to use vanilla Starlark (but if you do, consider Python3 instead) or as a test-bed for experimenting. Most projects will end up implementing some of this functionality themselves over the `starlark` library, incorporating their specific extra types etc. - -In particular the `starlark` binary _can_ be effectively used as a linter. But for the REPL, evaluator and IDE features the `starlark` binary is only aware of standard Starlark. Most Starlark embeddings supply extra functions and data types to work with domain-specific concerns, and the lack of these bindings will cause the REPL/evaluator to fail if they are used, and will give a subpar IDE experience. In most cases you should write your own binary depending on the `starlark` library, integrating your domain-specific pieces, and then using the bundled LSP functions to produce your own IDE/REPL/evaluator on top of those. You should still be able to use the [VS Code extension](vscode/README.md). +There are six components: + +- `starlark_derive`, a proc-macro crate that defines the necessary macros for + Starlark. This library is a dependency of `starlark` the library, which + reexports all the relevant pieces, and should not be used directly. +- `starlark_map`, a library with memory-efficient ordered/unordered maps/sets + and various other data structures useful in Starlark. +- `starlark_syntax`, a library with the AST of Starlark and parsing functions. + Only use if you want to manipulate the AST directly. +- `starlark` the main library, with evaluator, standard library, debugger + support and lots of other pieces. Projects wishing to embed Starlark in their + environment (with additional types, library functions and features) will make + use of this library. This library reexports the relevant pieces of + `starlark_derive`, `starlark_map` and most of `starlark_syntax`. +- `starlark_lsp`, a library providing an + [LSP](https://microsoft.github.io/language-server-protocol/). +- `starlark_bin` the binary, which provides interactive evaluation, IDE features + and linter, exposed through a command line. Useful if you want to use vanilla + Starlark (but if you do, consider Python3 instead) or as a test-bed for + experimenting. Most projects will end up implementing some of this + functionality themselves over the `starlark` and `starlark_lsp` libraries, + incorporating their specific extra types etc. + +In particular the `starlark_bin` binary _can_ be effectively used as a linter. +But for the REPL, evaluator and IDE features the `starlark_bin` binary is only +aware of standard Starlark. Most Starlark embeddings supply extra functions and +data types to work with domain-specific concerns, and the lack of these bindings +will cause the REPL/evaluator to fail if they are used, and will give a subpar +IDE experience. In most cases you should write your own binary depending on the +`starlark` library, integrating your domain-specific pieces, and then using the +bundled LSP functions to produce your own IDE/REPL/evaluator on top of those. +You should still be able to use the [VS Code extension](vscode/README.md). ## Compatibility -In this section we outline where we don't comply with the [Starlark spec](https://github.com/bazelbuild/starlark/blob/master/spec.md). +In this section we outline where we don't comply with the +[Starlark spec](https://github.com/bazelbuild/starlark/blob/master/spec.md). -* We have plenty of extensions, e.g. type annotations, recursion, top-level `for`. -* We don't yet support later additions to Starlark, such as [bytes](https://github.com/facebookexperimental/starlark-rust/issues/4). -* In some cases creating circular data structures may lead to stack overflows. +- We have plenty of extensions, e.g. type annotations, recursion, top-level + `for`. +- We don't yet support later additions to Starlark, such as + [bytes](https://github.com/facebook/starlark-rust/issues/4). +- In some cases creating circular data structures may lead to stack overflows. ## Making a release -1. Check the [GitHub Actions](https://github.com/facebookexperimental/starlark-rust/actions) are green. -2. Update `CHANGELOG.md` with the changes since the last release. [This link](https://github.com/facebookexperimental/starlark-rust/compare/v0.4.0...main) can help (update to compare against the last release). -3. Update the version numbers of the two `Cargo.toml` files. Bump them by 0.0.1 if there are no incompatible changes, or 0.1.0 if there are. Bump the dependency in `starlark` to point at the latest `starlark_derive` version. -4. Copy the files `CHANGELOG.md`, `LICENSE` and `README.md` into each `starlark` and `starlark_derive` subdirectory. -5. Run `cargo publish --allow-dirty --dry-run`, then without the `--dry-run`, first in `starlark_derive` and then `starlark` directories. -6. Create a [GitHub release](https://github.com/facebookexperimental/starlark-rust/releases/new) with `v0.X.Y`, using the `starlark` version as the name. +1. Check the [GitHub Actions](https://github.com/facebook/starlark-rust/actions) + are green. +2. Update `CHANGELOG.md` with the changes since the last release. + [This link](https://github.com/facebook/starlark-rust/compare/v0.4.0...main) + can help (update to compare against the last release). +3. Update the version numbers of the two `Cargo.toml` files. Bump them by 0.0.1 + if there are no incompatible changes, or 0.1.0 if there are. Bump the + dependency in `starlark` to point at the latest `starlark_derive` version. +4. Copy the files `CHANGELOG.md`, `LICENSE` and `README.md` into each + subdirectory. +5. Run `cargo publish --allow-dirty --dry-run`, then without the `--dry-run`, in + each of the component directories in the [order above](#components). +6. Create a + [GitHub release](https://github.com/facebook/starlark-rust/releases/new) with + `v0.X.Y`, using the `starlark` version as the name. ## License -Starlark Rust is Apache License, Version 2.0 licensed, as found in the [LICENSE](LICENSE) file. +Starlark Rust is Apache License, Version 2.0 licensed, as found in the +[LICENSE](LICENSE) file. diff --git a/starlark-rust/docs/environment.md b/starlark-rust/docs/environment.md index 9bd80b5d95897..d527d482eda2e 100644 --- a/starlark-rust/docs/environment.md +++ b/starlark-rust/docs/environment.md @@ -1,10 +1,11 @@ # Environments -:::warning -Some of the information within this page is outdated. However, the explanation of the problem, and thought process behind it, remains useful. The storage of values is similar but implemented using different types. -::: +:::warning Some of the information within this page is outdated. However, the +explanation of the problem, and thought process behind it, remains useful. The +storage of values is similar but implemented using different types. ::: -Starlark (with a nested `def`) has a series of environments that may be active during an evaluation, as illustrated in the following example: +Starlark (with a nested `def`) has a series of environments that may be active +during an evaluation, as illustrated in the following example: ```python x = [] @@ -17,21 +18,30 @@ def foo(): The above example features the following environments: -* Global environment - defining things like `list.append` -* Module environment - defining `x` -* Environment of `foo` - defining `y` -* Environment of `bar` - defining `z` +- Global environment - defining things like `list.append` +- Module environment - defining `x` +- Environment of `foo` - defining `y` +- Environment of `bar` - defining `z` -A scope can *access* variables defined above it, and often *mutate* them, but not *assign* them. +A scope can _access_ variables defined above it, and often _mutate_ them, but +not _assign_ them. To unpack that: -* From the statements inside `bar`, you can access `list.append`, `x`, `y`, and `z`. -* From inside `bar`, you can mutate the variables to be accessed with statements like `list.append(x, 1)` (which may also be termed `x.append(1)`). - * However, before this module is imported by another module, all of its exports become *frozen*, which means it isn't possible to mutate a global list, and if `foo` is called from a different module, then `x` can't be modified. -* If `bar` does `x = 1` that defines a local variable `x` in the function `bar`, shadowing the global `x`. As a consequence, you cannot assign to variables defined in an outer scope. - -Note that assignment *after*, or even *in* non-executed conditional branches, introduces a local variable. +- From the statements inside `bar`, you can access `list.append`, `x`, `y`, and + `z`. +- From inside `bar`, you can mutate the variables to be accessed with statements + like `list.append(x, 1)` (which may also be termed `x.append(1)`). + - However, before this module is imported by another module, all of its + exports become _frozen_, which means it isn't possible to mutate a global + list, and if `foo` is called from a different module, then `x` can't be + modified. +- If `bar` does `x = 1` that defines a local variable `x` in the function `bar`, + shadowing the global `x`. As a consequence, you cannot assign to variables + defined in an outer scope. + +Note that assignment _after_, or even _in_ non-executed conditional branches, +introduces a local variable. For example: @@ -43,21 +53,28 @@ def f(): x = 2 ``` -In the above code, on executing `f()`, it would complain that `x` is referenced before assignment, as the assignment `x = 2` makes `x` a local variable. +In the above code, on executing `f()`, it would complain that `x` is referenced +before assignment, as the assignment `x = 2` makes `x` a local variable. -The rest of this document outlines the various types of environments, how they are accessed, and how they are updated. +The rest of this document outlines the various types of environments, how they +are accessed, and how they are updated. ## Global Environment -The global environment is always frozen and consists of *functions* and *type-values*. All things in the global environment are accessed by name. +The global environment is always frozen and consists of _functions_ and +_type-values_. All things in the global environment are accessed by name. -Type-values are things like `list.append`, which is used when you do either `list.append(xs, 1)` or `xs.append(1)`, assuming `xs` is of type `list`. The available methods for a type can be queried (for example, `dir(list)`). +Type-values are things like `list.append`, which is used when you do either +`list.append(xs, 1)` or `xs.append(1)`, assuming `xs` is of type `list`. The +available methods for a type can be queried (for example, `dir(list)`). There are also global functions, such as `len`, `range`, and `str`. ## Slots -To optimise evaluation, all variables are accessed by integers, which are known as 'slots'. Many variables can be converted to slots statically during compilation, and those which can't have their slot looked up by name at runtime. +To optimise evaluation, all variables are accessed by integers, which are known +as 'slots'. Many variables can be converted to slots statically during +compilation, and those which can't have their slot looked up by name at runtime. The `Slots` data type is defined as: @@ -72,25 +89,39 @@ struct FrozenSlots(Arc>>); As featured in the above code: -* A set of slots are either `Frozen`, which came from another module behind `Arc` or just normal `Slots`, which can be manipulated by the current scope (behind a `Rc`/`RefCell` for single-threaded use and mutation). -* `Vec` is accessed by the slot index. -* `Option` refers to whether the slot has been assigned yet (to detect variables referenced before assignment). +- A set of slots are either `Frozen`, which came from another module behind + `Arc` or just normal `Slots`, which can be manipulated by the current scope + (behind a `Rc`/`RefCell` for single-threaded use and mutation). +- `Vec` is accessed by the slot index. +- `Option` refers to whether the slot has been assigned yet (to detect variables + referenced before assignment). ## Module Environment -The module environment is where the module executes, namely where `x` is defined above. The module environment can have values added in the following standards-conforming ways: +The module environment is where the module executes, namely where `x` is defined +above. The module environment can have values added in the following +standards-conforming ways: -* Assignment statements (such as `x = 1` or `x += 1`). -* `For` loops (such as the `x` in `for x in []:`). -* Via the `load("a.bzl", "foo")`, which imports `foo` frozen. -* Via `def foo():`, which defines `foo` in the module environment. Whether a `def` is frozen or not, when it's executed, its local variables are not frozen. +- Assignment statements (such as `x = 1` or `x += 1`). +- `For` loops (such as the `x` in `for x in []:`). +- Via the `load("a.bzl", "foo")`, which imports `foo` frozen. +- Via `def foo():`, which defines `foo` in the module environment. Whether a + `def` is frozen or not, when it's executed, its local variables are not + frozen. -In addition, two non-standards-conforming ways of defining variables are supported: +In addition, two non-standards-conforming ways of defining variables are +supported: -* Some modules can be injected as bindings in advance. Given a module `foo` that is injected, all the bindings of `foo` will be inserted in this module as frozen. -* The function `load_symbols` injects a dictionary of bindings into the module environment. +- Some modules can be injected as bindings in advance. Given a module `foo` that + is injected, all the bindings of `foo` will be inserted in this module as + frozen. +- The function `load_symbols` injects a dictionary of bindings into the module + environment. -Note that a module has a fixed set of variables (from the standards-conforming ways), a pre-execution set (from the injections) and yet more variables at runtime (via `load_symbols`). To support that structure, the mapping from name to slot index is tracked in a struct: +Note that a module has a fixed set of variables (from the standards-conforming +ways), a pre-execution set (from the injections) and yet more variables at +runtime (via `load_symbols`). To support that structure, the mapping from name +to slot index is tracked in a struct: ```rust enum Names { @@ -100,15 +131,23 @@ enum Names { struct FrozenNames(Arc>); ``` -Each name is given an entry in the map with an increasing slot index. A name will only be assigned a slot once, reusing it thereafter. A corresponding `Slots` data type provides the values associated with those names. +Each name is given an entry in the map with an increasing slot index. A name +will only be assigned a slot once, reusing it thereafter. A corresponding +`Slots` data type provides the values associated with those names. -Importantly, the `Slots` can be extended at runtime by the `load_symbols` function. As with `Slots`, you can either share things behind an `Arc` or mutate them behind an `Rc`/`RefCell`. +Importantly, the `Slots` can be extended at runtime by the `load_symbols` +function. As with `Slots`, you can either share things behind an `Arc` or mutate +them behind an `Rc`/`RefCell`. ## Function Environment -A function can have variables introduced via assignments, `for` loops, and parameters. No additional variables can be discovered at runtime, so all names can be erased at compile time. +A function can have variables introduced via assignments, `for` loops, and +parameters. No additional variables can be discovered at runtime, so all names +can be erased at compile time. -A function can also access variables from the functions it is statically nested within, and from the variables at the root of the module. To support this structure, at runtime we pass around the context, defined as: +A function can also access variables from the functions it is statically nested +within, and from the variables at the root of the module. To support this +structure, at runtime we pass around the context, defined as: ```rust struct Context { @@ -117,13 +156,15 @@ struct Context { } ``` -The above code contains the mapping of names for the module and the slots for the module and each function. +The above code contains the mapping of names for the module and the slots for +the module and each function. -When executed, the inner-most `Slots` (at the end of `slots:`) will never be frozen, as that represents the local variables: but any other may be. +When executed, the inner-most `Slots` (at the end of `slots:`) will never be +frozen, as that represents the local variables: but any other may be. When a function value is captured in a frozen module, use `FrozenContext`: -```rust +````rust struct FrozenContext { names: FrozenNames, slots: Vec, @@ -135,21 +176,34 @@ A list comprehension can be defined as: ```python [x for x in [1,2,3]] -``` +```` In the above code: -* The statement defines a variable `x` that is immediately initialised and shadows any other variables `x` in scope. -* The variable `x` cannot be assigned to, other than in the list comprehension, as it only lives inside the comprehension and the comprehension does not permit assignment statements (only expressions). Such names are not available at the top-level, even when defined in the root of a module. +- The statement defines a variable `x` that is immediately initialised and + shadows any other variables `x` in scope. +- The variable `x` cannot be assigned to, other than in the list comprehension, + as it only lives inside the comprehension and the comprehension does not + permit assignment statements (only expressions). Such names are not available + at the top-level, even when defined in the root of a module. -List comprehensions are implemented by adding additional entries into the `Slots` data type. Even when added at the root of a module, such names are not added to `Names`. +List comprehensions are implemented by adding additional entries into the +`Slots` data type. Even when added at the root of a module, such names are not +added to `Names`. ## Optimisations There are a number of optimisations made to the scheme: -* When freezing a `Names` or `Slots` structure, it's important to only freeze a particular mutable variant once, or you duplicate memory unnecessarily. Therefore, the `Slots` to be `Rc)>>` are augmented, and, similarly, the `Names`. - * When `freeze` is called, the original value is consumed, and the `Some` variant is added. - * **Note**: it is unsafe to ever access the slots after the `freeze`. -* Programs can only assign to the inner-most `Slots`, and that slots must always be mutable. Therefore, define a local `Slots` that is always mutable, and a separate AST node for referring to it. - * For modules, it is important that this mutable local `Slots` is *also* in scope since the scope is used to retrieve unknown variables. +- When freezing a `Names` or `Slots` structure, it's important to only freeze a + particular mutable variant once, or you duplicate memory unnecessarily. + Therefore, the `Slots` to be `Rc)>>` are + augmented, and, similarly, the `Names`. + - When `freeze` is called, the original value is consumed, and the `Some` + variant is added. + - **Note**: it is unsafe to ever access the slots after the `freeze`. +- Programs can only assign to the inner-most `Slots`, and that slots must always + be mutable. Therefore, define a local `Slots` that is always mutable, and a + separate AST node for referring to it. + - For modules, it is important that this mutable local `Slots` is _also_ in + scope since the scope is used to retrieve unknown variables. diff --git a/starlark-rust/docs/gc.md b/starlark-rust/docs/gc.md index 324a3a5a82b62..847d4835489ff 100644 --- a/starlark-rust/docs/gc.md +++ b/starlark-rust/docs/gc.md @@ -2,7 +2,14 @@ This page describes a two-space garbage collector that can deal with cycles. -In Starlark, this pattern is used both when doing a real garbage collection, and when freezing. For both cases, it starts out with a memory block, which has pointers referring to things inside it, and ends up with a new memory block with equivalent pointers inside it. However, only pointers reachable from outside the original memory block are available in the new memory block. The garbage collector can deal with cyclic data structures and the time spent is proportional to the amount of live data in the heap (memory that is dropped is not even visited). +In Starlark, this pattern is used both when doing a real garbage collection, and +when freezing. For both cases, it starts out with a memory block, which has +pointers referring to things inside it, and ends up with a new memory block with +equivalent pointers inside it. However, only pointers reachable from outside the +original memory block are available in the new memory block. The garbage +collector can deal with cyclic data structures and the time spent is +proportional to the amount of live data in the heap (memory that is dropped is +not even visited). ## A worked example @@ -14,53 +21,67 @@ Y := Data("hello", X, Y) Z := Data("universe") ``` -All of `X`, `Y` and `Z` are memory locations. The `Y` memory location has both some data of its own (`"hello"`) and two pointers (`X` and `Y` itself). +All of `X`, `Y` and `Z` are memory locations. The `Y` memory location has both +some data of its own (`"hello"`) and two pointers (`X` and `Y` itself). -The pointers from outside the heap into the heap are known as *roots*. +The pointers from outside the heap into the heap are known as _roots_. -Assuming, in the above example, that `Y` is the only root, then, since `Y` is used from outside, `Y` must be moved to the new memory block. Consequently, the data `X` needs to be copied, but `Z` can be dropped. +Assuming, in the above example, that `Y` is the only root, then, since `Y` is +used from outside, `Y` must be moved to the new memory block. Consequently, the +data `X` needs to be copied, but `Z` can be dropped. Following are the required steps for using a garbage collector: -1. To copy `Y`, allocate a value in the new heap `A` with a sentinel value in it (that that sentinel is called a `Blackhole`). Then, turn `Y` into a `Forward(A)` pointer, so that if anyone else in this cycle tries to collect `Y` they immediately "forward" to the new value and the data from `Y` is grabbed so its pointers can be traversed. That results in the following: +1. To copy `Y`, allocate a value in the new heap `A` with a sentinel value in it + (that that sentinel is called a `Blackhole`). Then, turn `Y` into a + `Forward(A)` pointer, so that if anyone else in this cycle tries to collect + `Y` they immediately "forward" to the new value and the data from `Y` is + grabbed so its pointers can be traversed. That results in the following: - ```bash - X := Data("world") - Y := Forward(A) - Z := Data("universe") + ```bash + X := Data("world") + Y := Forward(A) + Z := Data("universe") - A := Blackhole - ``` + A := Blackhole + ``` - With `Data("hello", X, Y)` as the current item being processed. + With `Data("hello", X, Y)` as the current item being processed. -2. Walk the pointers of the current value, performing a garbage collection on each of them. To copy `Y`, it can be seen that `Y` points at a `Forward(A)` node, so there's no need to do anything. To copy `X`, follow the process starting at step 1, but for `X` (which ends up at `B`). Performing that move leads to the following: +2. Walk the pointers of the current value, performing a garbage collection on + each of them. To copy `Y`, it can be seen that `Y` points at a `Forward(A)` + node, so there's no need to do anything. To copy `X`, follow the process + starting at step 1, but for `X` (which ends up at `B`). Performing that move + leads to the following: - ```bash - X := Forward(B) - Y := Forward(A) - Z := Data("universe") + ```bash + X := Forward(B) + Y := Forward(A) + Z := Data("universe") - A := Blackhole - B := Data("world") - ``` + A := Blackhole + B := Data("world") + ``` -3. Replace all the pointers with the forwarded value, and write it back over the `Blackhole` in `A`. This gives the following: +3. Replace all the pointers with the forwarded value, and write it back over the + `Blackhole` in `A`. This gives the following: - ```bash - X := Forward(B) - Y := Forward(A) - Z := Data("universe") + ```bash + X := Forward(B) + Y := Forward(A) + Z := Data("universe") - A := Data("hello", B, A) - B := Data("world") - ``` + A := Data("hello", B, A) + B := Data("world") + ``` -4. Adjust any roots pointing at `Y` to point at `A` and throw away the original heap, which produces the following: +4. Adjust any roots pointing at `Y` to point at `A` and throw away the original + heap, which produces the following: - ```bash - A := Data("hello", B, A) - B := Data("world") - ``` + ```bash + A := Data("hello", B, A) + B := Data("world") + ``` -These above four steps successfully garbage collects a cyclic data structure, while preserving the cycles and getting rid of the unused data. +These above four steps successfully garbage collects a cyclic data structure, +while preserving the cycles and getting rid of the unused data. diff --git a/starlark-rust/docs/heaps.md b/starlark-rust/docs/heaps.md index 22cf8497b1578..0af4f4ebc7b9a 100644 --- a/starlark-rust/docs/heaps.md +++ b/starlark-rust/docs/heaps.md @@ -4,24 +4,36 @@ In Starlark, there are three interesting heap-related points of interest: -* A `Heap` has `Value`'s allocated on it and cannot be cloned or shared. -* A `FrozenHeap` has `FrozenValue`'s allocated on it and cannot be cloned or shared. -* A `FrozenHeapRef` is a `FrozenHeap` that is now read-only and can now be cloned and shared. +- A `Heap` has `Value`'s allocated on it and cannot be cloned or shared. +- A `FrozenHeap` has `FrozenValue`'s allocated on it and cannot be cloned or + shared. +- A `FrozenHeapRef` is a `FrozenHeap` that is now read-only and can now be + cloned and shared. -A `FrozenHeapRef` keeps a heap alive. While you have a `FrozenValue`, it is important that you have either the `FrozenHeap` itself, or more usually, a `FrozenHeapRef` to it. A `FrozenHeap` may contains a set of `FrozenHeapRef`'s to keep the `FrozenHeap`s it references alive. +A `FrozenHeapRef` keeps a heap alive. While you have a `FrozenValue`, it is +important that you have either the `FrozenHeap` itself, or more usually, a +`FrozenHeapRef` to it. A `FrozenHeap` may contains a set of `FrozenHeapRef`'s to +keep the `FrozenHeap`s it references alive. ## Heap Containers Heaps are included in other data types: -* A `Module` contains a `Heap` (where normal values are allocated) and a `FrozenHeap` (stores references to other frozen heaps and has compilation constants allocated on it). The `Heap` portion is garbage collected. At the end, when you call `freeze`, `Value`'s referenced by name in the `Module` are moved to the `FrozenHeap` and then then `FrozenHeap` is sealed to produce a `FrozenHeapRef`. -* A `FrozenModule` contains a `FrozenHeapRef`. -* A `GlobalsBuilder` contains a `FrozenHeap` onto which values are allocated. -* A `Globals` contains a `FrozenHeapRef`. +- A `Module` contains a `Heap` (where normal values are allocated) and a + `FrozenHeap` (stores references to other frozen heaps and has compilation + constants allocated on it). The `Heap` portion is garbage collected. At the + end, when you call `freeze`, `Value`'s referenced by name in the `Module` are + moved to the `FrozenHeap` and then then `FrozenHeap` is sealed to produce a + `FrozenHeapRef`. +- A `FrozenModule` contains a `FrozenHeapRef`. +- A `GlobalsBuilder` contains a `FrozenHeap` onto which values are allocated. +- A `Globals` contains a `FrozenHeapRef`. ## Heap References -It is important that when a `FrozenValue` X is referenced by a `Value` or `FrozenValue` (for example, included in a list), the heap where X originates is added as a reference to the heap where the new value is being created. +It is important that when a `FrozenValue` X is referenced by a `Value` or +`FrozenValue` (for example, included in a list), the heap where X originates is +added as a reference to the heap where the new value is being created. As a concrete example in pseudo-code: @@ -40,19 +52,29 @@ In the above code, the following steps are taken: 1. Create a `FrozenHeap` then allocate something in it. 1. Turn the heap into a reference. 1. Use the allocated value `s` from `h1` when constructing a value in `h2`. -1. For that to be legal, and for the heap `h1` to not disappear while it is being allocated, it is important to call `add_reference`. +1. For that to be legal, and for the heap `h1` to not disappear while it is + being allocated, it is important to call `add_reference`. -Note that this API can only point at a `FrozenValue` from another heap, and only after that heap has been turned into a reference, so it will not be allocated in anymore. These restrictions are deliberate and mean that most programs only have one 'active heap' at a time. +Note that this API can only point at a `FrozenValue` from another heap, and only +after that heap has been turned into a reference, so it will not be allocated in +anymore. These restrictions are deliberate and mean that most programs only have +one 'active heap' at a time. Following are some places where heap references are added by Starlark: -* Before evaluation is started, a reference is added to the `Globals` from the `Module`, so it can access the global functions. -* When evaluating a `load` statement, a reference is added to the `FrozenModule` that is being loaded. -* When freezing a module, the `FrozenHeap`, in the `Module`, is moved to the `FrozenModule`, preserving the references that were added. +- Before evaluation is started, a reference is added to the `Globals` from the + `Module`, so it can access the global functions. +- When evaluating a `load` statement, a reference is added to the `FrozenModule` + that is being loaded. +- When freezing a module, the `FrozenHeap`, in the `Module`, is moved to the + `FrozenModule`, preserving the references that were added. ## `OwnedFrozenValue` -When you get a value from a `FrozenModule`, it will be a `OwnedFrozenValue`. This structure is a pair of a `FrozenHeapRef` and a `FrozenValue`, where the ref keeps the value alive. You can move that `OwnedFrozenValue` into the value of a module with code such as: +When you get a value from a `FrozenModule`, it will be a `OwnedFrozenValue`. +This structure is a pair of a `FrozenHeapRef` and a `FrozenValue`, where the ref +keeps the value alive. You can move that `OwnedFrozenValue` into the value of a +module with code such as: ```rust fn move<'v>(from: &FrozenModule, to: &'v Module) { @@ -64,9 +86,16 @@ fn move<'v>(from: &FrozenModule, to: &'v Module) { In general, you can use the `OwnedFrozenValue` in one of three ways: -* **Operate on it directly** - with methods like `unpack_i32` or `to_str`. -* **Extract it safely** - using methods like `owned_frozen_value`, which takes a `FrozenHeap` to which the heap reference is added and returns a naked `FrozenValue`. After that, it is then safe for the `FrozenHeap` you passed in to use the `FrozenValue`. - * With `owned_value`, there is lifetime checking that the right heap is passed, but with `FrozenValue`, there isn't. - * Be careful to pass the right heap, although given most programs only have one active heap at a time, it should mostly work out. -* **Extract it unsafely** - using methods `unchecked_frozen_value`, which gives you the underlying `FrozenValue` without adding any references. - * Be careful to make sure there is a good reason the `FrozenValue` remains valid. +- **Operate on it directly** - with methods like `unpack_i32` or `to_str`. +- **Extract it safely** - using methods like `owned_frozen_value`, which takes a + `FrozenHeap` to which the heap reference is added and returns a naked + `FrozenValue`. After that, it is then safe for the `FrozenHeap` you passed in + to use the `FrozenValue`. + - With `owned_value`, there is lifetime checking that the right heap is + passed, but with `FrozenValue`, there isn't. + - Be careful to pass the right heap, although given most programs only have + one active heap at a time, it should mostly work out. +- **Extract it unsafely** - using methods `unchecked_frozen_value`, which gives + you the underlying `FrozenValue` without adding any references. + - Be careful to make sure there is a good reason the `FrozenValue` remains + valid. diff --git a/starlark-rust/docs/spec.md b/starlark-rust/docs/spec.md new file mode 100644 index 0000000000000..85b72ee9bc968 --- /dev/null +++ b/starlark-rust/docs/spec.md @@ -0,0 +1,4 @@ +# Starlark Language Specification + +The Starlark language spec can be found in the +[Bazel GitHub repository](https://github.com/bazelbuild/starlark/blob/master/spec.md). diff --git a/starlark-rust/docs/types.md b/starlark-rust/docs/types.md index f71ea3d8e3464..c50b20efabc88 100644 --- a/starlark-rust/docs/types.md +++ b/starlark-rust/docs/types.md @@ -1,51 +1,81 @@ # Starlark Types -The Starlark 'types' extension is highly experimental and likely to be modified in the future. +The Starlark 'types' extension is highly experimental and likely to be modified +in the future. Types can be added to function arguments, or function return types. For example: ```python -def fib(i: int.type) -> int.type: +def fib(i: int) -> int: ... ``` -These types are checked *at runtime*. Currently, there is no static checking or linting for them. +There are moments where types can be checked: -The rest of this document lays out what types mean and what type-supporting objects have been written using them. +1. At runtime, as a function is executed, when a value of the appropriate type + is available. +2. Statically, without executing anything. +3. At compile time, when the definitions of all symbols imported using `load` + are available. -## What does a type mean? - -A type is just an arbitrary expression that evaluates to a value; that value is then treated as a type, which is matched against values: - -* When `fib(3)` is called, the *value* `3` is passed to `fib` as parameter `i`. -* When the execution of `fib` is started, the *expression* `int.type` is evaluated to `"int"`. -* A check is then made that the value `3` matches the type represented by `"int"`. +Currently runtime is the normal way of checking, but other systems built on +Starlark (e.g. Buck2) may also perform additional types of checking. In all +cases the meaning of the types is the same. -If the value doesn't match, it is a runtime error. Similarly, on `return` statements, or the end of the function, a check is made that result type matches `int.type`. +The rest of this document lays out what types mean and what type-supporting +values are available (records and enums). -Types match using the following rules: +## What does a type mean? -* The type `""` means anything. -* The type `"foo"` means any value of type `foo`, where the type of `x` is computed by doing `type(x)`. That means that `"int"`, `"bool"` and `"string"` are common types. -* Most constructor functions provide a `.type` property to obtain the type they produce, allowing `int.type`, `bool.type` and `str.type` etc. -* Any string starting with an underscore `_` (for example, `"_a"` means anything) but the name is often used as a hint to say where types go in polymorphic functions. -* The type `None` means the result must be `None`. -* The singleton list `[t]` means a list where each element must be of type `t`. If you want a list of any types, use `[""]`. -* Multiple element lists `[t1,t2]` are OR types, where the value must be either type `t1` OR type `t2`. -* A tuple `(t1, t2, t3)` matches tuples of the same length (3 in this case), where each element of the value must match the corresponding element of the tuple. -* A singleton dictionary `{k: v}` means a dictionary where all the keys have type `k`, and all the values have type `v`. -* It is possible to define functions that return types. For example, `def StrDict(t): return {str.type: t}` would mean `StrDict(int.type)` was a valid type. +A type is a Starlark expression that has a meaning as a type: + +- When `fib(3)` is called, the _value_ `3` is passed to `fib` as parameter `i`. +- When the execution of `fib` is started, the _expression_ `int` is evaluated to + the value of the `int` function. +- A check is then made that the value `3` matches the type represented by `int`. + +If the value doesn't match, it is a runtime error. Similarly, on `return` +statements, or the end of the function, a check is made that result type matches +`int`. + +As some examples of types: + +- The type `typing.Any` matches any value, with no restrictions. +- The types `int`, `bool`, `str` all represent the values produced by the + respective functions. +- The type `None` represents the value `None`. +- The type `list[int]` represents a list of `int` types, e.g. `list[typing.Any]` + represents a list containing any types. +- The type `dict[int, bool]` represents a dictionary with `int` keys and `bool` + values. +- The type `tuple[int, bool, str]` represents a tuple of arity 3 with components + being `int`, `bool` and `str`. +- The type `tuple[int, ...]` represents a tuple of unknown arity where all the + components are of type `int`. +- The type `int | bool` represents a value that is either an `int` or a `bool`. +- The type `typing.Callable` represents something that can be called as a + function. +- The type `typing.Iterable` represents something that can be iterated on. +- The type `typing.Never` represents a type with no valid values - e.g. the + result of `fail` is `typing.Never` as the return value of `fail` can never be + observed, given the program terminates. The goals of this type system are: -* Reuse the existing machinery of Starlark as much as possible, avoiding inventing a special class of type values. As a consequence, any optimisations for values like string/list are reused. -* Provide a pleasing syntax. -* Some degree of compatibility with Python, which allows types as expressions in the same places Buck2 allows them (but with different meaning and different checking). -* And finally, a non-goal is to provide a complete type system capable of representing every type invariant: it's intended to be a lossy approximation. +- Reuse the existing machinery of Starlark as much as possible, avoiding + inventing a special class of type values. As a consequence, any optimisations + for values like string/list are reused. +- Provide a pleasing syntax. +- Some degree of compatibility with Python, which allows types as expressions in + the same places Buck2 allows them (but with different meaning and different + checking). +- And finally, a non-goal is to provide a complete type system capable of + representing every type invariant: it's intended to be a lossy approximation. -In addition to these built-in types, records and enumerations are provided as special concepts. +In addition to these built-in types, records and enumerations are provided as +special concepts. ## Record types @@ -54,28 +84,37 @@ A `record` type represents a set of named values, each with their own type. For example: ```python -MyRecord = record(host=str.type, port=int.type) +MyRecord = record(host=str, port=int) ``` -This above statement defines a record `MyRecord` with 2 fields, the first named `host` that must be of type `str.type`, and the second named `port` that must be of type `int.type`. +This above statement defines a record `MyRecord` with 2 fields, the first named +`host` that must be of type `str`, and the second named `port` that must be of +type `int`. Now `MyRecord` is defined, it's possible to do the following: -* Create values of this type with `MyRecord(host="localhost", port=80)`. It is a runtime error if any arguments are missed, of the wrong type, or if any unexpected arguments are given. -* Get the type of the record suitable for a type annotation with `MyRecord.type`. -* Get the fields of the record. For example, `v = MyRecord(host="localhost", port=80)` will provide `v.host == "localhost"` and `v.port == 80`. Similarly, `dir(v) == ["host", "port"]`. +- Create values of this type with `MyRecord(host="localhost", port=80)`. It is a + runtime error if any arguments are missed, of the wrong type, or if any + unexpected arguments are given. +- Get the type of the record suitable for a type annotation with `MyRecord`. +- Get the fields of the record. For example, + `v = MyRecord(host="localhost", port=80)` will provide `v.host == "localhost"` + and `v.port == 80`. Similarly, `dir(v) == ["host", "port"]`. -It is also possible to specify default values for parameters using the `field` function. +It is also possible to specify default values for parameters using the `field` +function. For example: ```python -MyRecord = record(host=str.type, port=field(int.type, 80)) +MyRecord = record(host=str, port=field(int, 80)) ``` -Now the `port` field can be omitted, defaulting to `80` is not present (for example, `MyRecord(host="localhost").port == 80`). +Now the `port` field can be omitted, defaulting to `80` is not present (for +example, `MyRecord(host="localhost").port == 80`). -Records are stored deduplicating their field names, making them more memory efficient than dictionaries. +Records are stored deduplicating their field names, making them more memory +efficient than dictionaries. ## Enum types @@ -84,17 +123,25 @@ The `enum` type represents one value picked from a set of values. For example: ```python -MyEnum = enum("option1", "option2", True) +MyEnum = enum("option1", "option2", "option3") ``` -This statement defines an enumeration `MyEnum` that consists of the three values `"option1"`, `"option2"` and `True`. +This statement defines an enumeration `MyEnum` that consists of the three values +`"option1"`, `"option2"` and `"option3"`. Now `MyEnum` is defined, it's possible to do the following: -* Create values of this type with `MyEnum("option2")`. It is a runtime error if the argument is not one of the predeclared values of the enumeration. -* Get the type of the enum suitable for a type annotation with `MyEnum.type`. -* Given a value of the enum (for example, `v = MyEnum("option2")`), get the underlying value `v.value == "option2"` or the index in the enumeration `v.index = 1`. -* Get a list of the values that make up the array with `MyEnum.values() == ["option1", "option2", True]`. -* Treat `MyEnum` a bit like an array, with `len(MyEnum) == 3`, `MyEnum[1] == MyEnum("option2")` and iteration over enums `[x.value for x in MyEnum] == ["option1", "option2", True]`. - -Enumeration types store each value once, which are then efficiently referenced by enumeration values. +- Create values of this type with `MyEnum("option2")`. It is a runtime error if + the argument is not one of the predeclared values of the enumeration. +- Get the type of the enum suitable for a type annotation with `MyEnum`. +- Given a value of the enum (for example, `v = MyEnum("option2")`), get the + underlying value `v.value == "option2"` or the index in the enumeration + `v.index == 1`. +- Get a list of the values that make up the array with + `MyEnum.values() == ["option1", "option2", "option3"]`. +- Treat `MyEnum` a bit like an array, with `len(MyEnum) == 3`, + `MyEnum[1] == MyEnum("option2")` and iteration over enums + `[x.value for x in MyEnum] == ["option1", "option2", "option3"]`. + +Enumeration types store each value once, which are then efficiently referenced +by enumeration values. diff --git a/starlark-rust/docs/values.md b/starlark-rust/docs/values.md index 7938faa1e09e1..6bc7ae0a54e03 100644 --- a/starlark-rust/docs/values.md +++ b/starlark-rust/docs/values.md @@ -1,21 +1,29 @@ # Value Representation -:::warning -Some of the information in this page is outdated. However, the explanation of the problem, and thought process behind it, remains useful. Of particular note is that a garbage collected heap is now used for `Value`. -::: +:::warning Some of the information in this page is outdated. However, the +explanation of the problem, and thought process behind it, remains useful. Of +particular note is that a garbage collected heap is now used for `Value`. ::: -This page explains how values are represented in the Starlark interpreter, ignoring some incidental details. +This page explains how values are represented in the Starlark interpreter, +ignoring some incidental details. -Importantly, in Starlark, any identifiers from modules that you import are 'frozen', which means that, if you have a module that defines a list, then once you have imported the module, the list is now immutable. This design means that you can safely share imports with multiple users, without any expensive copying, and use the imports in parallel. +Importantly, in Starlark, any identifiers from modules that you import are +'frozen', which means that, if you have a module that defines a list, then once +you have imported the module, the list is now immutable. This design means that +you can safely share imports with multiple users, without any expensive copying, +and use the imports in parallel. ## Frozen vs unfrozen values Values that are frozen are segregated from those that are not: -* Frozen values are those you import, and (assuming no GC) are to be ref-counted atomically (so they can be shared by multiple threads) and never changed. -* Unfrozen values are those which are local to the module, and, since modules execute single threaded, can be non-atomically ref-counted and mutated. +- Frozen values are those you import, and (assuming no GC) are to be ref-counted + atomically (so they can be shared by multiple threads) and never changed. +- Unfrozen values are those which are local to the module, and, since modules + execute single threaded, can be non-atomically ref-counted and mutated. -Once a module has finished executing, it's values are frozen and can be reused freely. +Once a module has finished executing, it's values are frozen and can be reused +freely. ## Thaw-on-write @@ -28,11 +36,19 @@ def my_list(x): return ([1,2,3], x) ``` -This above code returns the unfrozen list `[1,2,3]`. But while the list is unfrozen, and could be mutated by the caller, it probably won't be. To optimise this pattern, construct a frozen list when compiling `my_list` and insert a shared reference to it in the result. If anyone tries to mutate the list, it's explicitly unfrozen by copying it into a mutable variant (known as thawing the value). +This above code returns the unfrozen list `[1,2,3]`. But while the list is +unfrozen, and could be mutated by the caller, it probably won't be. To optimise +this pattern, construct a frozen list when compiling `my_list` and insert a +shared reference to it in the result. If anyone tries to mutate the list, it's +explicitly unfrozen by copying it into a mutable variant (known as thawing the +value). ## Immutable containers of mutable data -There are some data types (such as functions and tuples) that are themselves immutable but contain mutable data. Importantly, all types that can be invoked as functions (for example, `lambda`, `def`, and `a.b()`) fall into this category. These types can be non-atomically ref-counted but can't be mutated. +There are some data types (such as functions and tuples) that are themselves +immutable but contain mutable data. Importantly, all types that can be invoked +as functions (for example, `lambda`, `def`, and `a.b()`) fall into this +category. These types can be non-atomically ref-counted but can't be mutated. ## Implementation in Rust @@ -58,16 +74,18 @@ enum Mutable { } ``` -In the above code, both of the traits `dyn SimpleValue` `and dyn ComplexValue` enable you to convert to the other and have shared general value-like methods. +In the above code, both of the traits `dyn SimpleValue` `and dyn ComplexValue` +enable you to convert to the other and have shared general value-like methods. There are four types of value: -* `Immutable` -* `Pseudo` - immutable containers of mutable values. -* `Mutable`/`Mutable` -* `Mutable`/`ThawOnWrite` - immutable now but can be replaced with `Mutable`/`Mutable` if needed. +- `Immutable` +- `Pseudo` - immutable containers of mutable values. +- `Mutable`/`Mutable` +- `Mutable`/`ThawOnWrite` - immutable now but can be replaced with + `Mutable`/`Mutable` if needed. There are two root types: -* `FrozenValue` - imported. -* `Value` - defined locally. +- `FrozenValue` - imported. +- `Value` - defined locally. diff --git a/starlark-rust/starlark/BUCK b/starlark-rust/starlark/BUCK index bfeee4642926e..f3fb6d1ae4283 100644 --- a/starlark-rust/starlark/BUCK +++ b/starlark-rust/starlark/BUCK @@ -1,6 +1,5 @@ load("@fbcode_macros//build_defs:native_rules.bzl", "alias", "buck_filegroup") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -12,16 +11,16 @@ alias( buck_filegroup( name = "testcases", srcs = glob([ - "src/**/golden/*", + "src/**/golden/**", "src/**/*.golden", "testcases/**", ]), + copy = False, ) rust_library( name = "starlark", srcs = glob(["src/**/*.rs"]), - doctests = False, # FIXME rustc_flags = [ "--cfg=rust_nightly", ], @@ -45,7 +44,6 @@ rust_library( "fbsource//third-party/rust:derive_more", "fbsource//third-party/rust:either", "fbsource//third-party/rust:erased-serde", - "fbsource//third-party/rust:fancy-regex", "fbsource//third-party/rust:hashbrown", "fbsource//third-party/rust:inventory", "fbsource//third-party/rust:itertools", @@ -55,6 +53,7 @@ rust_library( "fbsource//third-party/rust:num-traits", "fbsource//third-party/rust:once_cell", "fbsource//third-party/rust:paste", + "fbsource//third-party/rust:ref-cast", "fbsource//third-party/rust:regex", "fbsource//third-party/rust:rustyline", "fbsource//third-party/rust:serde", diff --git a/starlark-rust/starlark/Cargo.toml b/starlark-rust/starlark/Cargo.toml index 6e70f67a3860d..893de22c9ed56 100644 --- a/starlark-rust/starlark/Cargo.toml +++ b/starlark-rust/starlark/Cargo.toml @@ -1,56 +1,59 @@ [package] -name = "starlark" -edition = "2021" -version = "0.9.0" -license = "Apache-2.0" -description = "An implementation of the Starlark language in Rust." -documentation = "https://docs.rs/starlark" -repository = "https://github.com/facebookexperimental/starlark-rust" authors = [ "Damien Martin-Guillerez ", "Stepan Koltsov ", "Facebook", ] build = "build.rs" -keywords = ["starlark", "skylark", "bazel", "language", "interpreter"] categories = ["parser-implementations", "development-tools"] +description = "An implementation of the Starlark language in Rust." +documentation = "https://docs.rs/starlark" +edition = "2021" +keywords = ["starlark", "skylark", "bazel", "language", "interpreter"] +license = "Apache-2.0" +name = "starlark" +repository = "https://github.com/facebook/starlark-rust" +version = "0.12.0" [dependencies] anyhow = "1.0.65" +bumpalo = "3.8" +debugserver-types = "0.5.0" derivative = "2.2" -derive_more = "0.99" +derive_more.workspace = true display_container = { workspace = true } dupe = { workspace = true } +either = "1.8" erased-serde = "0.3.12" -itertools = "0.10" +hashbrown = { version = "0.14.3", features = ["raw"] } +inventory = "0.3.8" +itertools = "0.13.0" +maplit = "1.0.2" +memoffset = "0.6.4" +num-bigint = "0.4.3" +num-traits = "0.2" once_cell = "1.8" -bumpalo = "3.8" paste = "1.0" -either = "1.8" -static_assertions = "1.1.0" -memoffset = "0.6.4" -thiserror = "1.0.36" -starlark_derive = { version = "0.9.0", path = "../starlark_derive" } -starlark_map = { version = "0.9.0", path = "../starlark_map" } -starlark_syntax = { version = "0.9.0", path = "../starlark_syntax" } +ref-cast = "1.0.18" +regex = "1.5.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -maplit = "1.0.2" -debugserver-types = "0.5.0" -hashbrown = { version = "0.12.3", features = ["raw"] } -textwrap = "0.11" -fancy-regex = "0.10.0" -regex = "1.5.4" +starlark_derive = { version = "0.12.0", path = "../starlark_derive" } +starlark_map = { version = "0.12.0", path = "../starlark_map" } +starlark_syntax = { version = "0.12.0", path = "../starlark_syntax" } +static_assertions = "1.1.0" strsim = "0.10.0" -num-bigint = "0.4.3" -num-traits = "0.2" -inventory = "0.3.8" +textwrap = "0.11" +thiserror = "1.0.36" allocative = { workspace = true, features = ["bumpalo", "num-bigint"] } cmp_any = { workspace = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -rustyline = "11.0" +rustyline = "14.0" [dev-dependencies] rand = { version = "0.8.4", features = ["small_rng"] } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(rust_nightly)"] } diff --git a/starlark-rust/starlark/fuzz/BUCK b/starlark-rust/starlark/fuzz/BUCK index 395c7e710b6fa..99215ddb1ab5a 100644 --- a/starlark-rust/starlark/fuzz/BUCK +++ b/starlark-rust/starlark/fuzz/BUCK @@ -10,7 +10,6 @@ rust_library( crate_root = "fuzz_targets/starlark.rs", unittests = False, # There is no main deps = [ - "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:libfuzzer-sys", "//buck2/starlark-rust/starlark:starlark", ], diff --git a/starlark-rust/starlark/fuzz/Cargo.toml b/starlark-rust/starlark/fuzz/Cargo.toml index d077a8e70f4a9..5bf46d5162a33 100644 --- a/starlark-rust/starlark/fuzz/Cargo.toml +++ b/starlark-rust/starlark/fuzz/Cargo.toml @@ -1,14 +1,13 @@ [package] +edition = "2021" name = "starlark-fuzz" -version = "0.0.0" publish = false -edition = "2021" +version = "0.0.0" [package.metadata] cargo-fuzz = true [dependencies] -anyhow = "1.0.69" libfuzzer-sys = "0.4" starlark.path = ".." @@ -20,7 +19,7 @@ members = ["."] debug = 1 [[bin]] +doc = false name = "starlark" path = "fuzz_targets/starlark.rs" test = false -doc = false diff --git a/starlark-rust/starlark/fuzz/fuzz_targets/starlark.rs b/starlark-rust/starlark/fuzz/fuzz_targets/starlark.rs index 375cd5849ce8d..df5a75242e98b 100644 --- a/starlark-rust/starlark/fuzz/fuzz_targets/starlark.rs +++ b/starlark-rust/starlark/fuzz/fuzz_targets/starlark.rs @@ -24,7 +24,7 @@ use starlark::eval::Evaluator; use starlark::syntax::AstModule; use starlark::syntax::Dialect; -fn run_arbitrary_starlark_err(content: &str) -> anyhow::Result { +fn run_arbitrary_starlark_err(content: &str) -> starlark::Result { let ast: AstModule = AstModule::parse("hello_world.star", content.to_owned(), &Dialect::Standard)?; let globals: Globals = Globals::standard(); @@ -51,5 +51,5 @@ fn run_arbitrary_starlark(content: &str) -> String { } fuzz_target!(|content: &str| { - let _ = run_arbitrary_starlark(content); + let _ignore = run_arbitrary_starlark(content); }); diff --git a/starlark-rust/starlark/src/__derive_refs.rs b/starlark-rust/starlark/src/__derive_refs.rs new file mode 100644 index 0000000000000..6355a95f7ae7e --- /dev/null +++ b/starlark-rust/starlark/src/__derive_refs.rs @@ -0,0 +1,34 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![doc(hidden)] + +/// __derive_refs allows us to reference other crates in starlark_derive without users needing to be +/// aware of those dependencies. We make them public here and then can reference them like +/// `starlark::__derive_refs::foo`. + +pub mod serde { + pub use serde::ser::Error; + pub use serde::Serialize; + pub use serde::Serializer; +} +pub use inventory; +pub mod components; +pub mod invoke_macro_error; +pub mod param_spec; +pub mod parse_args; +pub mod sig; diff --git a/starlark-rust/starlark/src/__derive_refs/components.rs b/starlark-rust/starlark/src/__derive_refs/components.rs new file mode 100644 index 0000000000000..42f130ce3d298 --- /dev/null +++ b/starlark-rust/starlark/src/__derive_refs/components.rs @@ -0,0 +1,83 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use dupe::Dupe; + +use crate::__derive_refs::param_spec::NativeCallableParam; +use crate::__derive_refs::param_spec::NativeCallableParamDefaultValue; +use crate::__derive_refs::param_spec::NativeCallableParamSpec; +use crate::docs::DocFunction; +use crate::docs::DocItem; +use crate::docs::DocMember; +use crate::docs::DocParam; +use crate::docs::DocParams; +use crate::docs::DocStringKind; +use crate::docs::DocType; +use crate::eval::runtime::params::display::PARAM_FMT_OPTIONAL; +use crate::typing::Ty; + +/// A wrapper for the parameters to `GlobalsBuilder::set_function` and `MethodBuilder::set_method` +pub struct NativeCallableComponents { + pub speculative_exec_safe: bool, + pub rust_docstring: Option<&'static str>, + pub param_spec: NativeCallableParamSpec, + pub return_type: Ty, +} + +impl NativeCallableComponents { + fn doc_params(&self) -> DocParams { + fn doc_param(p: &NativeCallableParam) -> DocParam { + let NativeCallableParam { name, ty, required } = p; + DocParam { + name: (*name).to_owned(), + docs: None, + typ: ty.dupe(), + default_value: match required { + None => None, + Some(NativeCallableParamDefaultValue::Optional) => { + Some(PARAM_FMT_OPTIONAL.to_owned()) + } + Some(NativeCallableParamDefaultValue::Value(v)) => Some(v.to_value().to_repr()), + }, + } + } + + DocParams { + pos_only: self.param_spec.pos_only.iter().map(doc_param).collect(), + pos_or_named: self.param_spec.pos_or_named.iter().map(doc_param).collect(), + args: self.param_spec.args.as_ref().map(doc_param), + named_only: self.param_spec.named_only.iter().map(doc_param).collect(), + kwargs: self.param_spec.kwargs.as_ref().map(doc_param), + } + } + + pub(crate) fn into_docs(self, as_type: Option<(Ty, DocType)>) -> DocItem { + let func_docs = DocFunction::from_docstring( + DocStringKind::Rust, + self.doc_params(), + self.return_type.clone(), + self.rust_docstring, + ); + match as_type { + Some((_, ty_docs)) => DocItem::Type(DocType { + constructor: Some(func_docs), + ..ty_docs + }), + None => DocItem::Member(DocMember::Function(func_docs)), + } + } +} diff --git a/starlark-rust/starlark/src/__derive_refs/invoke_macro_error.rs b/starlark-rust/starlark/src/__derive_refs/invoke_macro_error.rs new file mode 100644 index 0000000000000..caf9073fafbf8 --- /dev/null +++ b/starlark-rust/starlark/src/__derive_refs/invoke_macro_error.rs @@ -0,0 +1,38 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/// Trait used to convert error returned from native function into `starlark::Error`. +pub trait InvokeMacroError { + fn into_starlark_error(self) -> crate::Error; +} + +/// This implementation should not be used by starlark itself: +/// starlark native functions should not return `anyhow::Error`, +/// and should not convert to `ErrorKind::Native`. +impl InvokeMacroError for anyhow::Error { + #[cold] + fn into_starlark_error(self) -> crate::Error { + crate::Error::new_native(self) + } +} + +impl InvokeMacroError for crate::Error { + #[cold] + fn into_starlark_error(self) -> crate::Error { + self + } +} diff --git a/starlark-rust/starlark/src/__derive_refs/param_spec.rs b/starlark-rust/starlark/src/__derive_refs/param_spec.rs new file mode 100644 index 0000000000000..f1cf12e18ee98 --- /dev/null +++ b/starlark-rust/starlark/src/__derive_refs/param_spec.rs @@ -0,0 +1,102 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use dupe::Dupe; + +use crate::typing::callable_param::ParamIsRequired; +use crate::typing::macro_support::unpack_args_item_ty; +use crate::typing::macro_support::unpack_kwargs_value_ty; +use crate::typing::ParamSpec; +use crate::typing::Ty; +use crate::util::arc_str::ArcStr; +use crate::values::FrozenValue; + +pub enum NativeCallableParamDefaultValue { + /// Value is used for documentation only, not when the function is called. + Value(FrozenValue), + Optional, +} + +pub struct NativeCallableParam { + pub name: &'static str, + /// Type of the parameter. + /// For `*args` is the type of the element, and for `**kwargs` is the type of the value. + pub ty: Ty, + /// `None` means the parameter is required. + pub required: Option, +} + +impl NativeCallableParam { + pub fn args(name: &'static str, param_ty: Ty) -> NativeCallableParam { + NativeCallableParam { + name, + ty: unpack_args_item_ty(param_ty), + required: None, + } + } + + pub fn kwargs(name: &'static str, param_ty: Ty) -> NativeCallableParam { + NativeCallableParam { + name, + ty: unpack_kwargs_value_ty(param_ty), + required: None, + } + } + + fn is_required(&self) -> ParamIsRequired { + match self.required { + None => ParamIsRequired::Yes, + Some(_) => ParamIsRequired::No, + } + } +} + +pub struct NativeCallableParamSpec { + pub pos_only: Vec, + pub pos_or_named: Vec, + pub args: Option, + pub named_only: Vec, + pub kwargs: Option, +} + +impl NativeCallableParamSpec { + /// For a function accepting raw `&Arguments`. + pub fn for_arguments() -> NativeCallableParamSpec { + NativeCallableParamSpec { + pos_only: Vec::new(), + pos_or_named: Vec::new(), + args: Some(NativeCallableParam::args("args", Ty::any())), + named_only: Vec::new(), + kwargs: Some(NativeCallableParam::kwargs("kwargs", Ty::any())), + } + } + + pub(crate) fn param_spec(&self) -> ParamSpec { + ParamSpec::new_parts( + self.pos_only.iter().map(|p| (p.is_required(), p.ty.dupe())), + self.pos_or_named + .iter() + .map(|p| (ArcStr::new_static(p.name), p.is_required(), p.ty.dupe())), + self.args.as_ref().map(|p| p.ty.dupe()), + self.named_only + .iter() + .map(|p| (ArcStr::new_static(p.name), p.is_required(), p.ty.dupe())), + self.kwargs.as_ref().map(|p| p.ty.dupe()), + ) + .unwrap() + } +} diff --git a/starlark-rust/starlark/src/__derive_refs/parse_args.rs b/starlark-rust/starlark/src/__derive_refs/parse_args.rs new file mode 100644 index 0000000000000..8a944d75f95bd --- /dev/null +++ b/starlark-rust/starlark/src/__derive_refs/parse_args.rs @@ -0,0 +1,100 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::eval::Arguments; +use crate::eval::ParametersSpec; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueError; + +/// Collect `N` arguments. +/// +/// This function is called by generated code. +#[inline] +pub fn parse_signature<'v, const N: usize>( + parser: &ParametersSpec, + args: &Arguments<'v, '_>, + heap: &'v Heap, +) -> crate::Result<[Option>; N]> { + parser.collect_into(args, heap) +} + +/// Parse positional-only arguments, required and optional. +#[inline(always)] +pub fn parse_positional<'v, const R: usize, const O: usize>( + args: &Arguments<'v, '_>, + heap: &'v Heap, +) -> crate::Result<([Value<'v>; R], [Option>; O])> { + args.no_named_args()?; + args.optional(heap) +} + +#[inline(always)] +pub fn parse_positional_kwargs_alloc<'v, 'a, const R: usize, const O: usize>( + args: &'a Arguments<'v, 'a>, + heap: &'v Heap, +) -> crate::Result<([Value<'v>; R], [Option>; O], Value<'v>)> { + let (required, optional) = args.optional(heap)?; + let kwargs = args.names_map()?; + let kwargs = heap.alloc(kwargs); + Ok((required, optional, kwargs)) +} + +/// Utility for checking a `this` parameter matches what you expect. +#[inline] +pub fn check_this<'v, T: UnpackValue<'v>>(this: Value<'v>) -> anyhow::Result { + T::unpack_named_param(this, "this") +} + +/// Utility for checking a required parameter matches what you expect. +#[inline] +pub fn check_required<'v, T: UnpackValue<'v>>( + name: &str, + x: Option>, +) -> anyhow::Result { + let x = x.ok_or_else(|| ValueError::MissingRequired(name.to_owned()))?; + T::unpack_named_param(x, name) +} + +/// Utility for checking an optional parameter matches what you expect. +#[inline] +pub fn check_optional<'v, T: UnpackValue<'v>>( + name: &str, + x: Option>, +) -> anyhow::Result> { + match x { + None => Ok(None), + Some(x) => Ok(Some(T::unpack_named_param(x, name)?)), + } +} + +#[inline] +pub fn check_defaulted<'v, T: UnpackValue<'v>>( + name: &str, + x: Option>, + default: impl FnOnce() -> T, +) -> anyhow::Result { + Ok(check_optional(name, x)?.unwrap_or_else(default)) +} + +/// We already know the parameter is set, so we just unpack it. +#[inline] +pub fn check_unpack<'v, T: UnpackValue<'v>>(name: &str, x: Value<'v>) -> anyhow::Result { + T::unpack_named_param(x, name) +} diff --git a/starlark-rust/starlark/src/__derive_refs/sig.rs b/starlark-rust/starlark/src/__derive_refs/sig.rs new file mode 100644 index 0000000000000..287a14d1c2cc7 --- /dev/null +++ b/starlark-rust/starlark/src/__derive_refs/sig.rs @@ -0,0 +1,59 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::eval::ParametersSpec; +use crate::eval::ParametersSpecParam; +use crate::values::FrozenValue; + +pub enum NativeSigArg { + Required(&'static str), + Optional(&'static str), + Defaulted(&'static str, FrozenValue), +} + +impl NativeSigArg { + fn param(&self) -> (&str, ParametersSpecParam) { + match self { + NativeSigArg::Required(name) => (name, ParametersSpecParam::Required), + NativeSigArg::Optional(name) => (name, ParametersSpecParam::Optional), + NativeSigArg::Defaulted(name, value) => (name, ParametersSpecParam::Defaulted(*value)), + } + } +} + +pub fn parameter_spec( + name: &'static str, + pos_only: &[NativeSigArg], + pos_or_named: &[NativeSigArg], + args: bool, + named_only: &[NativeSigArg], + kwargs: bool, +) -> ParametersSpec { + ParametersSpec::new_parts( + name, + pos_only.iter().map(NativeSigArg::param), + pos_or_named.iter().map(NativeSigArg::param), + args, + named_only.iter().map(NativeSigArg::param), + kwargs, + ) +} + +/// `ParametersSpec` for a function which accepts `&Arguments`. +pub fn parameter_spec_for_arguments(name: &'static str) { + parameter_spec(name, &[], &[], true, &[], true); +} diff --git a/starlark-rust/starlark/src/analysis.rs b/starlark-rust/starlark/src/analysis.rs new file mode 100644 index 0000000000000..0559658a5adae --- /dev/null +++ b/starlark-rust/starlark/src/analysis.rs @@ -0,0 +1,310 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Linter. + +use std::collections::HashSet; + +pub use lint_message::LintMessage; +pub use types::EvalMessage; +pub use types::EvalSeverity; +pub use types::Lint; +pub use unused_loads::remove::remove_unused_loads; + +use crate::analysis::types::LintT; +use crate::syntax::AstModule; + +mod dubious; +pub mod find_call_name; +mod flow; +mod incompatible; +mod lint_message; +mod names; +mod performance; +mod types; +mod underscore; +mod unused_loads; + +/// Run the linter. +pub trait AstModuleLint { + /// Run a static linter over the module. If the complete set of global variables are known + /// they can be passed as the `globals` argument, resulting in name-resolution lint errors. + /// The precise checks run by the linter are not considered stable between versions. + fn lint(&self, globals: Option<&HashSet>) -> Vec; +} + +impl AstModuleLint for AstModule { + fn lint(&self, globals: Option<&HashSet>) -> Vec { + let mut res = Vec::new(); + res.extend(flow::lint(self).into_iter().map(LintT::erase)); + res.extend(incompatible::lint(self).into_iter().map(LintT::erase)); + res.extend(dubious::lint(self).into_iter().map(LintT::erase)); + res.extend(names::lint(self, globals).into_iter().map(LintT::erase)); + res.extend(underscore::lint(self).into_iter().map(LintT::erase)); + res.extend(performance::lint(self).into_iter().map(LintT::erase)); + res.retain(|issue| !self.is_suppressed(&issue.short_name, issue.location.span)); + res + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::codemap::Pos; + use crate::syntax::Dialect; + + fn module(x: &str) -> AstModule { + AstModule::parse("X", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() + } + + #[test] + fn test_lint_suppressions_keyword_matching() { + let m = module( + r#" +def good1() -> str: #starlark-lint-disable missing-return + pass +def bad1() -> str: # invalid suppression starlark-lint-disable missing-return + pass +def bad2() -> str: #starlark-lint-disable-also-invalid missing-return + pass +def good2() -> str: + pass # starlark-lint-disable ,,missing-return, misplaced-load , missing-return ,, +def bad3() -> str: + pass # # starlark-lint-disable missing-return # invalid prefix +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 3); + assert!(res[0].problem.contains("bad1")); + assert!(res[1].problem.contains("bad2")); + assert!(res[2].problem.contains("bad3")); + } + + #[test] + fn test_lint_suppressions_fn_with_many_issues() { + let m = module( + r#" +def bad1(items): + a = all(items) + b = all({"a": a for a in []}) + c = any(list({})) + +# suppressing issues fn-wide doesnt work +# starlark-lint-disable unused-assign, eager-and-inefficient-bool-check +def bad2(items): + d = all(items) + e = all({"e": e for e in []}) + f = any(list({})) + +def good1(items): + g = all(items) # starlark-lint-disable unused-assign + # starlark-lint-disable unused-assign + # starlark-lint-disable eager-and-inefficient-bool-check + h = all({"h": h for h in []}) + # starlark-lint-disable inefficient-bool-check + i = any(list({})) # starlark-lint-disable unused-assign +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 10); + assert!(res[0].problem.contains("Unused assignment of `a`")); + assert!(res[1].problem.contains("`b`")); + assert!(res[2].problem.contains("`c`")); + assert!(res[3].problem.contains("`d`")); + assert!(res[4].problem.contains("`e`")); + assert!(res[5].problem.contains("`f`")); + assert!(res[6].original.contains("all({\"a\": a for a in []})")); + assert!( + res[7] + .problem + .contains("`any(list({}))` allocates a new list") + ); + assert!(res[8].original.contains("all({\"e\": e for e in []})")); + assert!( + res[9] + .problem + .contains("`any(list({}))` allocates a new list") + ); + } + + #[test] + fn test_lint_suppressions_preceding_whitespace() { + let m = module( + r#" +def bad(): + a = 1 + +def good(): + # starlark-lint-disable unused-assign + # extra comment + b = 1 +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 1); + assert!(res[0].problem.contains("Unused assignment of `a`")); + } + + #[test] + fn test_lint_suppressions_with_space_separator() { + let m = module( + r#" +def good(): + # starlark-lint-disable unused-assign FIXME + b = 1 +"#, + ); + let res = m.lint(None); + assert!(res.is_empty()); + } + + #[test] + fn test_lint_suppressions_multiline_span() { + let m = module( + r#" +def bad() -> str: + pass +def good() -> str: + pass # starlark-lint-disable missing-return +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 1); + assert!(res[0].problem.contains("bad")); + } + + #[test] + fn test_lint_suppressions_small_span() { + let m = module( + r#" +load("@cell//t:rust_library.bzl", "rust_library") # starlark-lint-disable unused-load + +def bad() -> str: + pass +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 1); + assert!(res[0].problem.contains("bad")); + } + + #[test] + fn test_lint_suppressions_data() { + let m = module( + r#" +{no3: 1, no4: 2, yes: 3, no3: 3} + +# starlark-lint-disable duplicate-key +{no3: 1, no4: 2, yes: 3, no3: 3} + +{no3: 1, no4: 2, yes: 3, no3: 3} # starlark-lint-disable duplicate-key + +{ no3: 1, + no4: 2, + yes: 3, + # inline data suppression of one key doesnt work + # starlark-lint-disable duplicate-key + no3: 3 +} + +{ no3: 1, # starlark-lint-disable duplicate-key + no4: 2, + yes: 3, + # each offender has to be disabled + # starlark-lint-disable duplicate-key + no3: 3 +} + +# starlark-lint-disable duplicate-key +{ no3: 1, + no4: 2, + yes: 3, + no3: 3 +} +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 2); + assert_eq!(res[0].location.span.begin(), Pos::new(2)); + assert_eq!(res[1].location.span.begin(), Pos::new(183)); + } + + #[test] + fn test_lint_suppressions_line_before() { + let m = module( + r#" +# starlark-lint-disable unused-load +load("@cell//buck/lib:rust_library.bzl", "rust_library") +load("@cell//buck/lib:rust_binary.bzl", "rust_binary") + +def bad1() -> str: + pass + +# starlark-lint-disable missing-return +def good1() -> str: + pass + +# starlark-lint-disable missing-return +# must not be on the last line of a block of comments +def good2() -> str: + pass + +# suppressions accumulate in a block of comments, +# starlark-lint-disable missing-return, unreachable +# and you can put other comments between +# starlark-lint-disable unused-load +def good3() -> str: + pass +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 2); + assert!(res[0].problem.contains("bad1")); + assert!(res[1].problem.contains("rust_binary")); + } + + #[test] + fn test_lint_suppressions_line_before_windows_newlines() { + let src = module( + "\ + # starlark-lint-disable unused-load\r\n\ + load('@cell//buck/lib:rust_library.bzl', 'rust_library')", + ); + let res = src.lint(None); + assert!(res.is_empty()); + } + + #[test] + fn test_lint_suppressions_inside_fn() { + let m = module( + r#" +def bad1() -> str: + pass + +def good1() -> str: + # starlark-lint-disable missing-return + pass + +def good2() -> str: + pass # starlark-lint-disable missing-return +"#, + ); + let res = m.lint(None); + assert_eq!(res.len(), 1); + assert!(res[0].problem.contains("bad1")); + } +} diff --git a/starlark-rust/starlark/src/analysis/dubious.rs b/starlark-rust/starlark/src/analysis/dubious.rs index 1fedc2f09fd53..3a2a9c9ed0fa8 100644 --- a/starlark-rust/starlark/src/analysis/dubious.rs +++ b/starlark-rust/starlark/src/analysis/dubious.rs @@ -32,8 +32,8 @@ use crate::codemap::CodeMap; use crate::codemap::FileSpan; use crate::codemap::Span; use crate::syntax::AstModule; -use crate::values::num::value::NumRef; -use crate::values::types::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkInt; +use crate::values::types::num::value::NumRef; #[derive(Error, Debug)] pub(crate) enum Dubious { @@ -151,7 +151,7 @@ mod tests { use crate::syntax::Dialect; fn module(x: &str) -> AstModule { - AstModule::parse("X", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("X", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } impl Dubious { diff --git a/starlark-rust/starlark/src/analysis/find_call_name.rs b/starlark-rust/starlark/src/analysis/find_call_name.rs index fa3c34933f55e..3ee9d12ff18f7 100644 --- a/starlark-rust/starlark/src/analysis/find_call_name.rs +++ b/starlark-rust/starlark/src/analysis/find_call_name.rs @@ -53,16 +53,22 @@ impl AstModuleFindCallName for AstModule { .. } => { if let Expr::Identifier(_) = &identifier.node { - let found = arguments.iter().find_map(|argument| match &argument.node { - Argument::Named( - arg_name, - Spanned { - node: Expr::Literal(AstLiteral::String(s)), - .. - }, - ) if arg_name.node == "name" && s.node == name => Some(identifier.span), - _ => None, - }); + let found = + arguments + .args + .iter() + .find_map(|argument| match &argument.node { + Argument::Named( + arg_name, + Spanned { + node: Expr::Literal(AstLiteral::String(s)), + .. + }, + ) if arg_name.node == "name" && s.node == name => { + Some(identifier.span) + } + _ => None, + }); if found.is_some() { *ret = found; } @@ -79,7 +85,7 @@ impl AstModuleFindCallName for AstModule { } #[cfg(test)] -mod test { +mod tests { use starlark_syntax::syntax::module::AstModuleFields; use crate::analysis::find_call_name::AstModuleFindCallName; @@ -99,7 +105,12 @@ def x(name = "foo_name"): pass "#; - let module = AstModule::parse("foo.star", contents.to_owned(), &Dialect::Extended).unwrap(); + let module = AstModule::parse( + "foo.star", + contents.to_owned(), + &Dialect::AllOptionsInternal, + ) + .unwrap(); assert_eq!( Some(ResolvedSpan { diff --git a/starlark-rust/starlark/src/analysis/flow.rs b/starlark-rust/starlark/src/analysis/flow.rs index a6eb95e38b184..1e4dcf3bbd690 100644 --- a/starlark-rust/starlark/src/analysis/flow.rs +++ b/starlark-rust/starlark/src/analysis/flow.rs @@ -334,7 +334,7 @@ mod tests { use crate::syntax::Dialect; fn module(x: &str) -> AstModule { - AstModule::parse("X", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("X", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } impl FlowIssue { diff --git a/starlark-rust/starlark/src/analysis/incompatible.rs b/starlark-rust/starlark/src/analysis/incompatible.rs index 50fa910a93062..ca1d42b5dcced 100644 --- a/starlark-rust/starlark/src/analysis/incompatible.rs +++ b/starlark-rust/starlark/src/analysis/incompatible.rs @@ -87,7 +87,7 @@ fn match_bad_type_equality( // Return true if this expression matches `type($x)` fn is_type_call(x: &AstExpr) -> bool { match &**x { - Expr::Call(fun, args) if args.len() == 1 => match &***fun { + Expr::Call(fun, args) if args.args.len() == 1 => match &***fun { Expr::Identifier(x) => x.node.ident == "type", _ => false, }, @@ -216,7 +216,7 @@ mod tests { use crate::syntax::Dialect; fn module(x: &str) -> AstModule { - AstModule::parse("bad.py", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("bad.py", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } #[test] diff --git a/starlark-rust/starlark/src/analysis/mod.rs b/starlark-rust/starlark/src/analysis/mod.rs deleted file mode 100644 index fbfc572596b55..0000000000000 --- a/starlark-rust/starlark/src/analysis/mod.rs +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Linter. - -use std::collections::HashSet; - -pub use lint_message::LintMessage; -pub use types::EvalMessage; -pub use types::EvalSeverity; -pub use types::Lint; -pub use unused_loads::remove::remove_unused_loads; - -use crate::analysis::types::LintT; -use crate::syntax::AstModule; - -mod dubious; -pub mod find_call_name; -mod flow; -mod incompatible; -mod lint_message; -mod names; -mod performance; -mod types; -mod underscore; -mod unused_loads; - -/// Run the linter. -pub trait AstModuleLint { - /// Run a static linter over the module. If the complete set of global variables are known - /// they can be passed as the `globals` argument, resulting in name-resolution lint errors. - /// The precise checks run by the linter are not considered stable between versions. - fn lint(&self, globals: Option<&HashSet>) -> Vec; -} - -impl AstModuleLint for AstModule { - fn lint(&self, globals: Option<&HashSet>) -> Vec { - let mut res = Vec::new(); - res.extend(flow::lint(self).into_iter().map(LintT::erase)); - res.extend(incompatible::lint(self).into_iter().map(LintT::erase)); - res.extend(dubious::lint(self).into_iter().map(LintT::erase)); - res.extend(names::lint(self, globals).into_iter().map(LintT::erase)); - res.extend(underscore::lint(self).into_iter().map(LintT::erase)); - res.extend(performance::lint(self).into_iter().map(LintT::erase)); - res - } -} diff --git a/starlark-rust/starlark/src/analysis/names.rs b/starlark-rust/starlark/src/analysis/names.rs index 0de189a3fe90d..2c60ca4bf99d3 100644 --- a/starlark-rust/starlark/src/analysis/names.rs +++ b/starlark-rust/starlark/src/analysis/names.rs @@ -579,7 +579,7 @@ mod tests { } fn module(x: &str) -> AstModule { - AstModule::parse("X", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("X", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } #[test] diff --git a/starlark-rust/starlark/src/analysis/performance.rs b/starlark-rust/starlark/src/analysis/performance.rs index 711816c512fd4..13aa39eea75bc 100644 --- a/starlark-rust/starlark/src/analysis/performance.rs +++ b/starlark-rust/starlark/src/analysis/performance.rs @@ -58,7 +58,7 @@ impl LintWarning for Performance { fn match_dict_copy(codemap: &CodeMap, x: &AstExpr, res: &mut Vec>) { // If we see `dict(**x)` suggest `dict(x)` match &**x { - Expr::Call(fun, args) if args.len() == 1 => match (&***fun, &*args[0]) { + Expr::Call(fun, args) if args.args.len() == 1 => match (&***fun, &*args.args[0]) { (Expr::Identifier(f), Argument::KwArgs(arg)) if f.node.ident == "dict" => { res.push(LintT::new( codemap, @@ -74,7 +74,7 @@ fn match_dict_copy(codemap: &CodeMap, x: &AstExpr, res: &mut Vec>) { match &**x { - Expr::Call(fun, args) if args.len() == 1 => match (&***fun, &*args[0]) { + Expr::Call(fun, args) if args.args.len() == 1 => match (&***fun, &*args.args[0]) { (Expr::Identifier(f), Argument::Positional(arg)) if f.node.ident == "any" || f.node.ident == "all" => { @@ -136,7 +136,7 @@ mod tests { use crate::syntax::Dialect; fn module(x: &str) -> AstModule { - AstModule::parse("bad.bzl", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("bad.bzl", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } #[test] diff --git a/starlark-rust/starlark/src/analysis/types.rs b/starlark-rust/starlark/src/analysis/types.rs index 32bc3c3588e4e..c42b12ab3e156 100644 --- a/starlark-rust/starlark/src/analysis/types.rs +++ b/starlark-rust/starlark/src/analysis/types.rs @@ -21,7 +21,6 @@ use std::path::Path; use dupe::Dupe; use serde::Serialize; -use starlark_syntax::diagnostic::Diagnostic; use crate::codemap::CodeMap; use crate::codemap::FileSpan; @@ -146,37 +145,44 @@ impl Display for EvalMessage { } impl EvalMessage { - /// Convert from an `anyhow::Error`, including some type checking, to an `EvalMessage` - pub fn from_anyhow(file: &Path, x: &anyhow::Error) -> Self { - match x.downcast_ref::() { - Some( - d @ Diagnostic { - message, - span: Some(span), - .. - }, - ) => { - let original = span.source_span().to_owned(); - let resolved_span = span.resolve_span(); - Self { - path: span.filename().to_owned(), - span: Some(resolved_span), - severity: EvalSeverity::Error, - name: "error".to_owned(), - description: format!("{:#}", message), - full_error_with_span: Some(d.to_string()), - original: Some(original), - } - } - _ => Self { - path: file.display().to_string(), - span: None, - severity: EvalSeverity::Error, - name: "error".to_owned(), - description: format!("{:#}", x), - full_error_with_span: None, - original: None, - }, + /// Produce an `EvalMessage` from a `starlark::Error` + pub fn from_error(file: &Path, err: &crate::Error) -> Self { + if let Some(span) = err.span() { + return Self::from_diagnostic(span, err.without_diagnostic(), err); + } + Self::from_any_error(file, err) + } + + /// Create an `EvalMessage` from any kind of error + /// + /// Prefer to use `from_error` if at all possible. + pub fn from_any_error(file: &Path, x: &impl std::fmt::Display) -> Self { + Self { + path: file.display().to_string(), + span: None, + severity: EvalSeverity::Error, + name: "error".to_owned(), + description: format!("{:#}", x), + full_error_with_span: None, + original: None, + } + } + + fn from_diagnostic( + span: &FileSpan, + message: impl std::fmt::Display, + full_error: impl std::fmt::Display, + ) -> Self { + let original = span.source_span().to_owned(); + let resolved_span = span.resolve_span(); + Self { + path: span.filename().to_owned(), + span: Some(resolved_span), + severity: EvalSeverity::Error, + name: "error".to_owned(), + description: format!("{:#}", message), + full_error_with_span: Some(full_error.to_string()), + original: Some(original), } } } diff --git a/starlark-rust/starlark/src/analysis/underscore.rs b/starlark-rust/starlark/src/analysis/underscore.rs index bccce0f99407c..9af0c76223308 100644 --- a/starlark-rust/starlark/src/analysis/underscore.rs +++ b/starlark-rust/starlark/src/analysis/underscore.rs @@ -160,7 +160,6 @@ fn use_ignored(codemap: &CodeMap, x: &AstStmt, res: &mut Vec AstModule { - AstModule::parse("X", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("X", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } #[test] diff --git a/starlark-rust/starlark/src/analysis/unused_loads/mod.rs b/starlark-rust/starlark/src/analysis/unused_loads.rs similarity index 100% rename from starlark-rust/starlark/src/analysis/unused_loads/mod.rs rename to starlark-rust/starlark/src/analysis/unused_loads.rs diff --git a/starlark-rust/starlark/src/analysis/unused_loads/find.rs b/starlark-rust/starlark/src/analysis/unused_loads/find.rs index 3f4b10f9e2f30..99cc433e80713 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/find.rs +++ b/starlark-rust/starlark/src/analysis/unused_loads/find.rs @@ -17,7 +17,6 @@ use std::collections::HashMap; -use anyhow::Context; use dupe::Dupe; use starlark_syntax::codemap::CodeMap; use starlark_syntax::codemap::FileSpanRef; @@ -72,12 +71,12 @@ fn has_unused_marker_in_range(span: FileSpanRef) -> bool { pub(crate) fn find_unused_loads( name: &str, program: &str, -) -> anyhow::Result<(CodeMap, Vec)> { - let module = AstModule::parse(name, program.to_owned(), &Dialect::Extended)?; +) -> crate::Result<(CodeMap, Vec)> { + let module = AstModule::parse(name, program.to_owned(), &Dialect::AllOptionsInternal)?; let names = MutableNames::new(); let heap = FrozenHeap::new(); let (codemap, statement, dialect, ..) = module.into_parts(); - let codemap = heap.alloc_any_display_from_type_name(codemap); + let codemap = heap.alloc_any(codemap); let module_scopes = ModuleScopes::check_module_err( &names, &heap, @@ -112,7 +111,10 @@ pub(crate) fn find_unused_loads( let args = load.args.try_map(|arg| { anyhow::Ok(LoadSymbol { arg, - binding_id: arg.local.payload.context("payload is not set")?, + binding_id: arg + .local + .payload + .ok_or_else(|| anyhow::anyhow!("payload is not set"))?, used: false, }) })?; @@ -131,7 +133,7 @@ pub(crate) fn find_unused_loads( println!("visit ident: {:?}", ident); let ResolvedIdent::Slot(Slot::Module(_), binding_id) = ident .payload - .context("ident is not resolved (internal error)")? + .ok_or_else(|| anyhow::anyhow!("ident is not resolved (internal error)"))? else { return Ok(()); }; diff --git a/starlark-rust/starlark/src/analysis/unused_loads/find/one_of_two_unused.golden b/starlark-rust/starlark/src/analysis/unused_loads/find/one_of_two_unused.golden index 40f067c75b7a0..585f0f9901dd5 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/find/one_of_two_unused.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/find/one_of_two_unused.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/find/simple.golden b/starlark-rust/starlark/src/analysis/unused_loads/find/simple.golden index 586eab9fdb0c5..41413f0808170 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/find/simple.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/find/simple.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/find/unused_annotation_on_arg.golden b/starlark-rust/starlark/src/analysis/unused_loads/find/unused_annotation_on_arg.golden index 2a14646b427fa..d91b3ab68e280 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/find/unused_annotation_on_arg.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/find/unused_annotation_on_arg.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_top_level_assignment.golden b/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_top_level_assignment.golden index fe317c1c0b091..226d47e38ac2b 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_top_level_assignment.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_top_level_assignment.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_type_expr.golden b/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_type_expr.golden index 125a0cc427086..74cf7bacc3d95 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_type_expr.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/find/used_in_type_expr.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/find/with_rename.golden b/starlark-rust/starlark/src/analysis/unused_loads/find/with_rename.golden index 72722c41b269b..81cca90a87310 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/find/with_rename.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/find/with_rename.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/remove.rs b/starlark-rust/starlark/src/analysis/unused_loads/remove.rs index 4b87fe132e3d4..d62912136cc44 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/remove.rs +++ b/starlark-rust/starlark/src/analysis/unused_loads/remove.rs @@ -50,7 +50,7 @@ impl<'a> Out<'a> { } /// Return `None` if there is no unused loads. -pub fn remove_unused_loads(name: &str, program: &str) -> anyhow::Result> { +pub fn remove_unused_loads(name: &str, program: &str) -> crate::Result> { let (codemap, unused_loads) = find_unused_loads(name, program)?; if unused_loads.is_empty() { return Ok(None); diff --git a/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_all.golden b/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_all.golden index 100da14347912..282d8ade659a0 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_all.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_all.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_first_of_two.golden b/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_first_of_two.golden index cddd70e0968b0..c72ef91227214 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_first_of_two.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_first_of_two.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_second_of_two.golden b/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_second_of_two.golden index 1f9e53dd0a7df..a16a9f34abb9f 100644 --- a/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_second_of_two.golden +++ b/starlark-rust/starlark/src/analysis/unused_loads/remove/remove_second_of_two.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark/src/any.rs b/starlark-rust/starlark/src/any.rs index 654a9365f9283..519e2186e1788 100644 --- a/starlark-rust/starlark/src/any.rs +++ b/starlark-rust/starlark/src/any.rs @@ -86,9 +86,9 @@ unsafe impl<'a, T: ProvidesStaticType<'a> + 'a + ?Sized> AnyLifetime<'a> for T { /// struct Baz(T); /// # // TODO: `#[derive(ProvidesStaticType)]` should learn to handle this case too. /// unsafe impl<'a, T> ProvidesStaticType<'a> for Baz -/// where -/// T: ProvidesStaticType<'a> + Display, -/// T::StaticType: Display + Sized, +/// where +/// T: ProvidesStaticType<'a> + Display, +/// T::StaticType: Display + Sized, /// { /// type StaticType = Baz; /// } @@ -114,7 +114,7 @@ impl<'a> dyn AnyLifetime<'a> { self.static_type_of() == T::static_type_id() } - /// Downcast a reference to type `T`, or return [`None`](None) if it is not the + /// Downcast a reference to type `T`, or return [`None`] if it is not the /// right type. pub fn downcast_ref>(&self) -> Option<&T> { if self.is::() { @@ -125,7 +125,7 @@ impl<'a> dyn AnyLifetime<'a> { } } - /// Downcast a mutable reference to type `T`, or return [`None`](None) if it is not + /// Downcast a mutable reference to type `T`, or return [`None`] if it is not /// the right type. pub fn downcast_mut>(&mut self) -> Option<&mut T> { if self.is::() { @@ -259,6 +259,7 @@ mod tests { struct Value<'a>(&'a str); #[derive(ProvidesStaticType)] + #[allow(dead_code)] // field `0` is never read struct Value2<'a>(&'a str); // Changing the return type too `Value<'static>` causes a compile error. @@ -299,6 +300,7 @@ mod tests { test::(TypeId::of::()); #[derive(ProvidesStaticType)] + #[allow(dead_code)] // field `0` is never read struct Bbb<'a>(&'a str); test::(TypeId::of::>()); @@ -328,6 +330,7 @@ mod tests { trait My<'a> {} #[derive(ProvidesStaticType)] + #[allow(dead_code)] // field `0` is never read struct FooBar<'x, P: My<'x>>(&'x P); } } diff --git a/starlark-rust/starlark/src/assert.rs b/starlark-rust/starlark/src/assert.rs new file mode 100644 index 0000000000000..e2b01d34f12d0 --- /dev/null +++ b/starlark-rust/starlark/src/assert.rs @@ -0,0 +1,46 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Utilities to test Starlark code execution, using the [`Assert`] type and top-level functions. +//! +//! There are two general approaches. You can either use the functions in this module directly, e.g.: +//! +//! ``` +//! use starlark::assert; +//! assert::eq("1+2", "3"); +//! ``` +//! +//! Or create an [`Assert`] object, which supports the same assertions, but also let's you modify the +//! environment in which the tests are run, e.g.: +//! +//! ``` +//! use starlark::assert::Assert; +//! use starlark::syntax::Dialect; +//! +//! let mut a = Assert::new(); +//! a.dialect(&Dialect::Standard); // Use standard Starlark +//! a.eq("1+2", "3"); +//! ``` +//! +//! The tests in question may be run multiple times, in different modes, to maximise test coverage. +//! For example, execution tests are run at different garbage collection settings. Parsing tests are run +//! with both Unix and Windows newlines. + +mod assert; +mod conformance; + +pub use assert::*; diff --git a/starlark-rust/starlark/src/assert/assert.rs b/starlark-rust/starlark/src/assert/assert.rs index 31a8dbad3d7bc..a20cb851dfe71 100644 --- a/starlark-rust/starlark/src/assert/assert.rs +++ b/starlark-rust/starlark/src/assert/assert.rs @@ -28,7 +28,6 @@ use dupe::Dupe; use maplit::hashmap; use once_cell::sync::Lazy; use starlark_derive::starlark_module; -use starlark_syntax::diagnostic::Diagnostic; use crate as starlark; use crate::codemap::FileSpanRef; @@ -43,6 +42,7 @@ use crate::syntax::AstModule; use crate::syntax::Dialect; use crate::values::none::NoneType; use crate::values::structs::AllocStruct; +use crate::values::tuple::UnpackTuple; use crate::values::typing::type_compiled::compiled::TypeCompiled; use crate::values::AllocValue; use crate::values::Heap; @@ -57,7 +57,7 @@ static GLOBALS: Lazy = Lazy::new(|| mk_environment().build()); static ASSERTS_STAR: Lazy = Lazy::new(|| { let g = GlobalsBuilder::new() - .with_struct("asserts", asserts_star) + .with_namespace("asserts", asserts_star) .build(); let m = Module::new(); m.frozen_heap().add_reference(g.heap()); @@ -70,25 +70,25 @@ static ASSERTS_STAR: Lazy = Lazy::new(|| { m.freeze().unwrap() }); -fn assert_equals<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { +fn assert_equals<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { if !a.equals(b)? { - Err(anyhow::anyhow!("assert_eq: expected {}, got {}", a, b)) + Err(anyhow::anyhow!("assert_eq: expected {}, got {}", a, b).into()) } else { Ok(NoneType) } } -fn assert_different<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { +fn assert_different<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { if a.equals(b)? { - Err(anyhow::anyhow!("assert_ne: but {} == {}", a, b)) + Err(anyhow::anyhow!("assert_ne: but {} == {}", a, b).into()) } else { Ok(NoneType) } } -fn assert_less_than<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { +fn assert_less_than<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { if a.compare(b)? != std::cmp::Ordering::Less { - Err(anyhow::anyhow!("assert_lt: but {} >= {}", a, b)) + Err(anyhow::anyhow!("assert_lt: but {} >= {}", a, b).into()) } else { Ok(NoneType) } @@ -107,31 +107,27 @@ enum GcStrategy { #[starlark_module] // Deliberately qualify the GlobalsBuild type to test that we can fn asserts_star(builder: &mut crate::environment::GlobalsBuilder) { - fn eq<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { + fn eq<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { assert_equals(a, b) } - fn ne<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { + fn ne<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { assert_different(a, b) } - fn lt<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { + fn lt<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { assert_less_than(a, b) } - fn contains<'v>(xs: Value<'v>, x: Value<'v>) -> anyhow::Result { + fn contains<'v>(xs: Value<'v>, x: Value<'v>) -> starlark::Result { if !xs.is_in(x)? { - Err(anyhow::anyhow!( - "assert.contains: expected {} to be in {}", - x, - xs - )) + Err(anyhow::anyhow!("assert.contains: expected {} to be in {}", x, xs).into()) } else { Ok(NoneType) } } - fn r#true(x: Value) -> anyhow::Result { + fn r#true(x: Value) -> starlark::Result { assert_equals(Value::new_bool(x.to_bool()), Value::new_bool(true)) } @@ -143,7 +139,7 @@ fn asserts_star(builder: &mut crate::environment::GlobalsBuilder) { fn fails<'v>( f: Value<'v>, msg: &str, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let _ = msg; match f.invoke_pos(&[], eval) { @@ -163,20 +159,15 @@ pub(crate) fn test_functions(builder: &mut GlobalsBuilder) { Ok(AllocStruct::EMPTY) } - // Approximate version of a method used by the Go test suite - fn set<'v>(xs: Value<'v>) -> anyhow::Result> { - Ok(xs) - } - - fn assert_eq<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { + fn assert_eq<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { assert_equals(a, b) } - fn assert_ne<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { + fn assert_ne<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { assert_different(a, b) } - fn assert_lt<'v>(a: Value<'v>, b: Value<'v>) -> anyhow::Result { + fn assert_lt<'v>(a: Value<'v>, b: Value<'v>) -> starlark::Result { assert_less_than(a, b) } @@ -202,7 +193,7 @@ pub(crate) fn test_functions(builder: &mut GlobalsBuilder) { Ok(NoneType) } - fn assert_type<'v>(v: Value<'v>, ty: Value<'v>, heap: &'v Heap) -> anyhow::Result { + fn assert_type<'v>(v: Value<'v>, ty: Value<'v>, heap: &'v Heap) -> starlark::Result { TypeCompiled::new(ty, heap)?.check_type(v, Some("v"))?; Ok(NoneType) } @@ -211,11 +202,11 @@ pub(crate) fn test_functions(builder: &mut GlobalsBuilder) { /// /// This function is unknown to optimizer, so it can be used in optimizer tests. fn noop<'v>( - #[starlark(args)] args: Vec>, + #[starlark(args)] args: UnpackTuple>, #[starlark(kwargs)] kwargs: Value<'v>, ) -> anyhow::Result> { let _ = kwargs; - Ok(args.into_iter().next().unwrap_or(Value::new_none())) + Ok(args.items.into_iter().next().unwrap_or(Value::new_none())) } } @@ -235,13 +226,13 @@ pub struct Assert<'a> { /// Construction and state management. impl<'a> Assert<'a> { /// Create a new assert object, which will by default use - /// [`Dialect::Extended`] and all library extensions, + /// extended dialect and all library extensions, /// plus some additional global functions like `assert_eq`. /// The usual pattern is to create a `mut` `Assert`, modify some properties /// and then execute some tests. pub fn new() -> Self { Self { - dialect: Dialect::Extended, + dialect: Dialect::AllOptionsInternal, modules: hashmap!["asserts.star".to_owned() => Lazy::force(&ASSERTS_STAR).dupe()], globals: Lazy::force(&GLOBALS).dupe(), gc_strategy: None, @@ -291,7 +282,7 @@ impl<'a> Assert<'a> { program: &str, module: &'v Module, gc: GcStrategy, - ) -> anyhow::Result> { + ) -> crate::Result> { let mut modules = HashMap::with_capacity(self.modules.len()); for (k, v) in &self.modules { modules.insert(k.as_str(), v); @@ -314,7 +305,7 @@ impl<'a> Assert<'a> { GcStrategy::Always => eval.before_stmt_fn(&gc_always), } eval.set_loader(&loader); - eval.eval_module(ast, &self.globals) + eval.eval_module(ast, &self.globals).map_err(Into::into) } fn execute_fail<'v>( @@ -323,7 +314,7 @@ impl<'a> Assert<'a> { program: &str, module: &'v Module, gc: GcStrategy, - ) -> anyhow::Error { + ) -> crate::Error { match self.execute("assert.bzl", program, module, gc) { Ok(v) => panic!( "starlark::assert::{}, didn't fail!\nCode:\n{}\nResult:\n{}\n", @@ -344,7 +335,7 @@ impl<'a> Assert<'a> { match self.execute(path, program, module, gc) { Ok(v) => v, Err(err) => { - Diagnostic::eprint(&err); + err.eprint(); panic!( "starlark::assert::{}, failed to execute!\nCode:\n{}\nGot error: {}", func, program, err @@ -437,7 +428,7 @@ impl<'a> Assert<'a> { self.globals(mk_environment().with(f).build()) } - fn fails_with_name(&self, func: &str, program: &str, msgs: &[&str]) -> anyhow::Error { + fn fails_with_name(&self, func: &str, program: &str, msgs: &[&str]) -> crate::Error { self.with_gc(|gc| { let module_env = Module::new(); let original = self.execute_fail(func, program, &module_env, gc); @@ -445,19 +436,18 @@ impl<'a> Assert<'a> { // fail("bad") # error: magic // Then when we print the source code, magic is contained in the error message. // Therefore, find the internals. - let inner = original - .downcast_ref::() - .map_or(&original, |d| &d.message); + let inner = original.without_diagnostic(); let err_msg = format!("{:#}", inner); for msg in msgs { if !err_msg.contains(msg) { - Diagnostic::eprint(&original); + original.eprint(); panic!( - "starlark::assert::{}, failed with the wrong message!\nCode:\n{}\nError:\n{}\nMissing:\n{}\nExpected:\n{:?}", + "starlark::assert::{}, failed with the wrong message!\nCode:\n{}\nError:\n{:#}\nMissing:\n{}\nExpected:\n{:?}", func, program, inner, msg, msgs ) } } + drop(inner); original }) } @@ -474,7 +464,7 @@ impl<'a> Assert<'a> { /// # use starlark::assert::Assert; /// Assert::new().fail("fail('hello')", "ello"); /// ``` - pub fn fail(&self, program: &str, msg: &str) -> anyhow::Error { + pub fn fail(&self, program: &str, msg: &str) -> crate::Error { self.fails_with_name("fail", program, &[msg]) } @@ -488,7 +478,7 @@ impl<'a> Assert<'a> { /// # use starlark::assert::Assert; /// Assert::new().fails("fail('hello')", &["fail", "ello"]); /// ``` - pub fn fails(&self, program: &str, msgs: &[&str]) -> anyhow::Error { + pub fn fails(&self, program: &str, msgs: &[&str]) -> crate::Error { self.fails_with_name("fails", program, msgs) } @@ -525,10 +515,12 @@ impl<'a> Assert<'a> { /// /// ``` /// # use starlark::assert::Assert; - /// Assert::new().is_true(r#" + /// Assert::new().is_true( + /// r#" /// x = 1 + 1 /// x == 2 - /// "#); + /// "#, + /// ); /// ``` pub fn is_true(&self, program: &str) { self.with_gc(|gc| { @@ -549,11 +541,13 @@ impl<'a> Assert<'a> { /// /// ``` /// # use starlark::assert::Assert; - /// Assert::new().all_true(r#" + /// Assert::new().all_true( + /// r#" /// 1 == 1 /// /// 2 == 1 + 1 - /// "#); + /// "#, + /// ); /// ``` pub fn all_true(&self, program: &str) { self.with_gc(|gc| { @@ -595,24 +589,33 @@ pub fn eq(lhs: &str, rhs: &str) { } /// See [`Assert::fail`]. -pub fn fail(program: &str, msg: &str) -> anyhow::Error { +pub fn fail(program: &str, msg: &str) -> crate::Error { Assert::new().fail(program, msg) } #[cfg(test)] -pub(crate) fn fail_skip_typecheck(program: &str, msg: &str) -> anyhow::Error { +pub(crate) fn fail_golden(path: &str, program: &str) -> crate::Error { + let program = program.trim(); + let e = fails(program, &[]); + let output = format!("Program:\n\n{program}\n\nError:\n\n{e:?}\n"); + starlark_syntax::golden_test_template::golden_test_template(path, &output); + e +} + +#[cfg(test)] +pub(crate) fn fail_skip_typecheck(program: &str, msg: &str) -> crate::Error { let mut a = Assert::new(); a.disable_static_typechecking(); a.fail(program, msg) } /// See [`Assert::fails`]. -pub fn fails(program: &str, msgs: &[&str]) -> anyhow::Error { +pub fn fails(program: &str, msgs: &[&str]) -> crate::Error { Assert::new().fails(program, msgs) } #[cfg(test)] -pub(crate) fn fails_skip_typecheck(program: &str, msgs: &[&str]) -> anyhow::Error { +pub(crate) fn fails_skip_typecheck(program: &str, msgs: &[&str]) -> crate::Error { let mut a = Assert::new(); a.disable_static_typechecking(); a.fails(program, msgs) diff --git a/starlark-rust/starlark/src/assert/conformance.rs b/starlark-rust/starlark/src/assert/conformance.rs index dc5accbe6cb64..e70f55f128805 100644 --- a/starlark-rust/starlark/src/assert/conformance.rs +++ b/starlark-rust/starlark/src/assert/conformance.rs @@ -24,7 +24,6 @@ #![allow(clippy::if_then_panic)] use itertools::Itertools; -use starlark_syntax::diagnostic::Diagnostic; use crate::assert::assert::Assert; @@ -88,13 +87,8 @@ impl ConformanceTest { } fn test(&self, assert: &Assert) { - fn get_line(err: &anyhow::Error) -> Option { - match err.downcast_ref::() { - Some(Diagnostic { - span: Some(span), .. - }) => Some(span.resolve_span().begin.line + 1), - _ => None, - } + fn get_line(err: &crate::Error) -> Option { + err.span().map(|span| span.resolve_span().begin.line + 1) } match &self.error { diff --git a/starlark-rust/starlark/src/assert/mod.rs b/starlark-rust/starlark/src/assert/mod.rs deleted file mode 100644 index e3da74c30c077..0000000000000 --- a/starlark-rust/starlark/src/assert/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Utilities to test Starlark code execution, using the [`Assert`] type and top-level functions. -//! -//! There are two general approaches. You can either use the functions in this module directly, e.g.: -//! -//! ``` -//! use starlark::assert; -//! assert::eq("1+2", "3"); -//! ``` -//! -//! Or create an [`Assert`] object, which supports the same assertions, but also let's you modify the -//! environment in which the tests are run, e.g.: -//! -//! ``` -//! use starlark::assert::Assert; -//! use starlark::syntax::Dialect; -//! -//! let mut a = Assert::new(); -//! a.dialect(&Dialect::Standard); // Use standard Starlark -//! a.eq("1+2", "3"); -//! ``` -//! -//! The tests in question may be run multiple times, in different modes, to maximise test coverage. -//! For example, execution tests are run at different garbage collection settings. Parsing tests are run -//! with both Unix and Windows newlines. - -#[allow(clippy::module_inception)] // This seems a perfectly reasonable thing to do -mod assert; -mod conformance; - -pub use assert::*; -pub use conformance::*; diff --git a/starlark-rust/starlark/src/coerce.rs b/starlark-rust/starlark/src/coerce.rs index a146641c8edc6..0308bbeb2ff39 100644 --- a/starlark-rust/starlark/src/coerce.rs +++ b/starlark-rust/starlark/src/coerce.rs @@ -27,6 +27,7 @@ use std::ptr; pub use starlark_derive::Coerce; use starlark_map::small_map::SmallMap; +use starlark_map::small_set::SmallSet; /// A marker trait such that the existence of `From: Coerce` implies /// that `From` can be treat as `To` without any data manipulation. @@ -39,41 +40,6 @@ use starlark_map::small_map::SmallMap; /// and it must be safe for the `From` to be treated as `To`, namely same (or less restrictive) alignment, /// no additional invariants, value can be dropped as `To`. /// -/// One use of `Coerce` is around newtype wrappers: -/// -/// ``` -/// use starlark::coerce::{Coerce, coerce}; -/// #[repr(transparent)] -/// #[derive(Debug, Coerce)] -/// struct Wrapper(String); -/// -/// let value = vec![Wrapper("hello".to_owned()), Wrapper("world".to_owned())]; -/// assert_eq!( -/// coerce::<_, &Vec>(&value).join(" "), -/// "hello world" -/// ); -/// let mut value = coerce::<_, Vec>(value); -/// assert_eq!(value.pop(), Some("world".to_owned())); -/// ``` -/// -/// Another involves containers: -/// -/// ``` -/// use starlark::coerce::{Coerce, coerce}; -/// # #[derive(Coerce)] -/// # #[repr(transparent)] -/// # struct Wrapper(String); -/// #[derive(Coerce)] -/// #[repr(C)] -/// struct Container(i32, T); -/// -/// let value = Container(20, Wrapper("twenty".to_owned())); -/// assert_eq!( -/// coerce::<_, &Container>(&value).1, -/// "twenty" -/// ); -/// ``` -/// /// If you only need [`coerce`] on newtype references, /// then the [`ref-cast` crate](https://crates.io/crates/ref-cast) /// provides that, along with automatic derivations (no `unsafe` required). @@ -144,6 +110,8 @@ where { } +unsafe impl Coerce> for SmallSet where From: Coerce {} + /// Safely convert between types which have a `Coerce` relationship. /// Often the second type argument will need to be given explicitly, /// e.g. `coerce::<_, ToType>(x)`. @@ -174,22 +142,10 @@ mod tests { assert_eq!(f(("test",)), (x.as_str(),)) } - #[test] - fn test_coerce_lifetime() { - #[derive(Coerce)] - #[repr(transparent)] - struct NewtypeWithLifetime<'v>(&'v [usize]); - - let newtype = NewtypeWithLifetime(&[1, 2]); - assert_eq!(&[1, 2], coerce(newtype)) - } - #[test] fn test_coerce_type_and_lifetime_params() { - #[derive(Coerce)] #[repr(C)] struct Aaa<'a>(&'a u32); - #[derive(Coerce)] #[repr(C)] struct Bbb<'a>(&'a u32); @@ -216,10 +172,11 @@ mod tests { fn test_coerce_is_unsound() { // TODO(nga): fix it. - #[derive(Coerce)] #[repr(transparent)] struct Newtype(u8); + unsafe impl Coerce for Newtype {} + #[derive(Coerce)] #[repr(transparent)] struct Struct(T::Assoc); diff --git a/starlark-rust/starlark/src/collections.rs b/starlark-rust/starlark/src/collections.rs new file mode 100644 index 0000000000000..e6a7fce7b4b1b --- /dev/null +++ b/starlark-rust/starlark/src/collections.rs @@ -0,0 +1,37 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Defines [`SmallMap`] and [`SmallSet`] - collections with deterministic iteration and small memory footprint. +//! +//! These structures use vector backed storage if there are only a few elements, and and index +//! for larger collections. The API mirrors standard Rust collections. + +pub use starlark_map::small_map::IntoIter; +pub use starlark_map::small_map::Iter; +pub use starlark_map::small_map::IterMut; +pub use starlark_map::small_map::SmallMap; +pub use starlark_map::small_set::SmallSet; +pub use starlark_map::Equivalent; +pub use starlark_map::Hashed; +pub use starlark_map::StarlarkHashValue; +pub use starlark_map::StarlarkHasher; + +pub(crate) mod aligned_padded_str; +pub(crate) mod alloca; +pub(crate) mod maybe_uninit_backport; +pub(crate) mod string_pool; +pub(crate) mod symbol; diff --git a/starlark-rust/starlark/src/collections/mod.rs b/starlark-rust/starlark/src/collections/mod.rs deleted file mode 100644 index e545b1f76e52c..0000000000000 --- a/starlark-rust/starlark/src/collections/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Defines [`SmallMap`] and [`SmallSet`] - collections with deterministic iteration and small memory footprint. -//! -//! These structures use vector backed storage if there are only a few elements, and and index -//! for larger collections. The API mirrors standard Rust collections. - -pub use starlark_map::small_map::IntoIter; -pub use starlark_map::small_map::Iter; -pub use starlark_map::small_map::IterMut; -pub use starlark_map::small_map::SmallMap; -pub use starlark_map::small_set::SmallSet; -pub use starlark_map::Equivalent; -pub use starlark_map::Hashed; -pub use starlark_map::StarlarkHashValue; -pub use starlark_map::StarlarkHasher; - -pub(crate) mod aligned_padded_str; -pub(crate) mod alloca; -pub(crate) mod maybe_uninit_backport; -pub(crate) mod string_pool; -pub(crate) mod symbol_map; diff --git a/starlark-rust/starlark/src/collections/symbol.rs b/starlark-rust/starlark/src/collections/symbol.rs new file mode 100644 index 0000000000000..b7f92ff4521d7 --- /dev/null +++ b/starlark-rust/starlark/src/collections/symbol.rs @@ -0,0 +1,19 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod map; +pub(crate) mod symbol; diff --git a/starlark-rust/starlark/src/collections/symbol/map.rs b/starlark-rust/starlark/src/collections/symbol/map.rs new file mode 100644 index 0000000000000..545656981b345 --- /dev/null +++ b/starlark-rust/starlark/src/collections/symbol/map.rs @@ -0,0 +1,119 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! An optimised string HashMap which goes even faster when the keys can +//! be pre-hashed or otherwise precomputed. +//! +//! The two bottlenecks in our use of these hash tables are computing the hashes and comparing +//! the resulting keys for equality. We precompute the hashes. We also use `[usize]` to do faster +//! comparison when possible. We use the Starlark SmallHash hashes, promoted by IdHasher, +//! so we can reuse a SmallMap hash. +//! +//! Benchmarks on which the `[usize]` choice was made (mac/linux, all in ns): +//! 8 bytes 32 bytes 64 bytes +//! slice equality (memcmp) 3.5/3.8 3.5/ 3.0 4.5/ 4.7 +//! usize equality loop 1.0/1.4 2.7/ 3.5 3.5/ 6.0 +//! u8 equality loop 3.4/5.7 13.7/19.7 22.6/44.8 +//! +//! Measuring some sample strings, the P50 = 21 bytes, P75 = 27, P95 = 35, +//! so we can reasonably expect to hit the smaller cases most often. + +use std::fmt; +use std::fmt::Debug; +use std::mem; + +use allocative::Allocative; +use hashbrown::HashTable; +use starlark_derive::Trace; +use starlark_map::Hashed; + +use crate as starlark; +use crate::collections::symbol::symbol::Symbol; +use crate::values::StringValue; + +// We use a RawTable (the thing that underlies HashMap) so we can look up efficiently +// and easily by Symbol and str, without being limited by `Borrow` traits. +#[derive(Clone, Trace, Allocative)] +pub(crate) struct SymbolMap(HashTable<(Symbol, T)>); + +impl Debug for SymbolMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map() + .entries(self.iter().map(|x| (&x.0, &x.1))) + .finish() + } +} + +impl SymbolMap { + pub(crate) fn new() -> Self { + SymbolMap::with_capacity(0) + } + + pub(crate) fn with_capacity(capacity: usize) -> Self { + SymbolMap(HashTable::with_capacity(capacity)) + } + + pub(crate) fn insert(&mut self, key: &str, value: T) -> Option { + let s = Symbol::new(key); + if let Some((_, item)) = self.0.find_mut(s.hash(), |x| s == x.0) { + Some(mem::replace(item, value)) + } else { + // This insert doesn't remove old values, so do that manually first + self.0.insert_unique(s.hash(), (s, value), |x| x.0.hash()); + None + } + } + + #[inline] + pub(crate) fn get(&self, key: &Symbol) -> Option<&T> { + self.0.find(key.hash(), |x| key == &x.0).map(|x| &x.1) + } + + pub(crate) fn get_str(&self, key: &str) -> Option<&T> { + self.get_hashed_str(Hashed::new(key)) + } + + pub(crate) fn get_hashed_str(&self, key: Hashed<&str>) -> Option<&T> { + self.0 + .find(key.hash().promote(), |x| x.0.as_str() == *key.key()) + .map(|x| &x.1) + } + + pub(crate) fn get_hashed_string_value(&self, key: Hashed) -> Option<&T> { + self.0 + .find(key.hash().promote(), |x| { + x.0.as_aligned_padded_str() == key.key().as_aligned_padded_str() + }) + .map(|x| &x.1) + } + + pub(crate) fn len(&self) -> usize { + self.0.len() + } + + pub(crate) fn iter<'a>(&'a self) -> impl ExactSizeIterator + 'a { + self.0.iter() + } + + pub(crate) fn keys<'a>(&'a self) -> impl ExactSizeIterator + 'a { + self.iter().map(|x| &x.0) + } + + pub(crate) fn values<'a>(&'a self) -> impl ExactSizeIterator + 'a { + self.iter().map(|x| &x.1) + } +} diff --git a/starlark-rust/starlark/src/collections/symbol/symbol.rs b/starlark-rust/starlark/src/collections/symbol/symbol.rs new file mode 100644 index 0000000000000..13f403afd6cbe --- /dev/null +++ b/starlark-rust/starlark/src/collections/symbol/symbol.rs @@ -0,0 +1,119 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt; +use std::fmt::Debug; +use std::intrinsics::copy_nonoverlapping; +use std::mem; +use std::slice; +use std::str; + +use allocative::Allocative; +use starlark_derive::Trace; +use starlark_map::Hashed; +use starlark_map::StarlarkHashValue; + +use crate as starlark; +use crate::coerce::Coerce; +use crate::collections::aligned_padded_str::AlignedPaddedStr; + +/// A pre-hashed string used for efficient dictionary lookup. +#[derive(Clone, Trace, Allocative)] +pub(crate) struct Symbol { + hash: u64, + len: u32, + payload: Box<[usize]>, + small_hash: StarlarkHashValue, +} + +unsafe impl Coerce for Symbol {} + +impl Debug for Symbol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.as_str().fmt(f) + } +} + +impl PartialEq for Symbol { + fn eq(&self, other: &Self) -> bool { + if self.len != other.len { + return false; + } + + let p1 = &*self.payload; + let p2 = &*other.payload; + // Important to use the payload len, which is in u64 units, rather than len which is in u8 + for i in 0..self.payload.len() { + // Safe because we checked the lengths at the start + if unsafe { p1.get_unchecked(i) != p2.get_unchecked(i) } { + return false; + } + } + true + } +} + +impl Eq for Symbol {} + +impl Symbol { + pub(crate) fn new(x: &str) -> Self { + Self::new_hashed(Hashed::new(x)) + } + + pub(crate) fn new_hashed(x: Hashed<&str>) -> Self { + let small_hash = x.hash(); + let hash = small_hash.promote(); + let len = x.key().len(); + let len_words = (len + mem::size_of::() - 1) / mem::size_of::(); + let mut payload = vec![0; len_words]; // 0 pad it at the end + unsafe { + copy_nonoverlapping(x.key().as_ptr(), payload.as_mut_ptr() as *mut u8, len); + } + Self { + hash, + len: len.try_into().unwrap(), + payload: payload.into_boxed_slice(), + small_hash, + } + } + + #[inline] + pub(crate) fn hash(&self) -> u64 { + self.hash + } + + pub(crate) fn as_str(&self) -> &str { + // All safe because we promise we started out with a str + unsafe { + let s = slice::from_raw_parts(self.payload.as_ptr() as *const u8, self.len as usize); + str::from_utf8_unchecked(s) + } + } + + #[inline] + pub(crate) fn as_aligned_padded_str(&self) -> AlignedPaddedStr { + unsafe { AlignedPaddedStr::new(self.len as usize, self.payload.as_ptr()) } + } + + pub(crate) fn as_str_hashed(&self) -> Hashed<&str> { + Hashed::new_unchecked(self.small_hash, self.as_str()) + } + + pub(crate) fn small_hash(&self) -> StarlarkHashValue { + self.small_hash + } +} diff --git a/starlark-rust/starlark/src/collections/symbol_map.rs b/starlark-rust/starlark/src/collections/symbol_map.rs deleted file mode 100644 index d9b5a56ddd30d..0000000000000 --- a/starlark-rust/starlark/src/collections/symbol_map.rs +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! An optimised string HashMap which goes even faster when the keys can -//! be pre-hashed or otherwise precomputed. -//! -//! The two bottlenecks in our use of these hash tables are computing the hashes and comparing -//! the resulting keys for equality. We precompute the hashes. We also use `[usize]` to do faster -//! comparison when possible. We use the Starlark SmallHash hashes, promoted by IdHasher, -//! so we can reuse a SmallMap hash. -//! -//! Benchmarks on which the `[usize]` choice was made (mac/linux, all in ns): -//! 8 bytes 32 bytes 64 bytes -//! slice equality (memcmp) 3.5/3.8 3.5/ 3.0 4.5/ 4.7 -//! usize equality loop 1.0/1.4 2.7/ 3.5 3.5/ 6.0 -//! u8 equality loop 3.4/5.7 13.7/19.7 22.6/44.8 -//! -//! Measuring some sample strings, the P50 = 21 bytes, P75 = 27, P95 = 35, -//! so we can reasonably expect to hit the smaller cases most often. - -use std::fmt; -use std::fmt::Debug; -use std::intrinsics::copy_nonoverlapping; -use std::mem; -use std::slice; -use std::str; - -use allocative::Allocative; -use hashbrown::raw::RawTable; - -use crate as starlark; -use crate::coerce::Coerce; -use crate::collections::Hashed; -use crate::collections::StarlarkHashValue; -use crate::values::StringValue; -use crate::values::Trace; - -// We use a RawTable (the thing that underlies HashMap) so we can look up efficiently -// and easily by Symbol and str, without being limited by `Borrow` traits. -#[derive(Clone, Trace, Allocative)] -pub(crate) struct SymbolMap(RawTable<(Symbol, T)>); - -impl Debug for SymbolMap { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_map() - .entries(self.iter().map(|x| (&x.0, &x.1))) - .finish() - } -} - -/// A pre-hashed string used for efficient dictionary lookup. -#[derive(Clone, Trace, Allocative)] -pub(crate) struct Symbol { - hash: u64, - len: u32, - payload: Box<[usize]>, - small_hash: StarlarkHashValue, -} - -unsafe impl Coerce for Symbol {} - -impl Debug for Symbol { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.as_str().fmt(f) - } -} - -impl PartialEq for Symbol { - fn eq(&self, other: &Self) -> bool { - if self.len != other.len { - return false; - } - - let p1 = &*self.payload; - let p2 = &*other.payload; - // Important to use the payload len, which is in u64 units, rather than len which is in u8 - for i in 0..self.payload.len() { - // Safe because we checked the lengths at the start - if unsafe { p1.get_unchecked(i) != p2.get_unchecked(i) } { - return false; - } - } - true - } -} - -impl Eq for Symbol {} - -impl Symbol { - pub fn new(x: &str) -> Self { - Self::new_hashed(Hashed::new(x)) - } - - pub fn new_hashed(x: Hashed<&str>) -> Self { - let small_hash = x.hash(); - let hash = small_hash.promote(); - let len = x.key().len(); - let len_words = (len + mem::size_of::() - 1) / mem::size_of::(); - let mut payload = vec![0; len_words]; // 0 pad it at the end - unsafe { - copy_nonoverlapping(x.key().as_ptr(), payload.as_mut_ptr() as *mut u8, len); - } - Self { - hash, - len: len.try_into().unwrap(), - payload: payload.into_boxed_slice(), - small_hash, - } - } - - pub fn as_str(&self) -> &str { - // All safe because we promise we started out with a str - unsafe { - let s = slice::from_raw_parts(self.payload.as_ptr() as *const u8, self.len as usize); - str::from_utf8_unchecked(s) - } - } - - pub(crate) fn as_str_hashed(&self) -> Hashed<&str> { - Hashed::new_unchecked(self.small_hash, self.as_str()) - } - - pub fn small_hash(&self) -> StarlarkHashValue { - self.small_hash - } -} - -impl SymbolMap { - pub fn new() -> Self { - Self(RawTable::new()) - } - - pub fn with_capacity(capacity: usize) -> Self { - Self(RawTable::with_capacity(capacity)) - } - - pub fn insert(&mut self, key: &str, value: T) -> Option { - let s = Symbol::new(key); - if let Some((_, item)) = self.0.get_mut(s.hash, |x| s == x.0) { - Some(mem::replace(item, value)) - } else { - // This insert doesn't remove old values, so do that manually first - self.0.insert(s.hash, (s, value), |x| x.0.hash); - None - } - } - - pub fn get(&self, key: &Symbol) -> Option<&T> { - self.0.get(key.hash, |x| key == &x.0).map(|x| &x.1) - } - - pub fn get_str(&self, key: &str) -> Option<&T> { - self.get_hashed_str(Hashed::new(key)) - } - - pub fn get_hashed_str(&self, key: Hashed<&str>) -> Option<&T> { - self.0 - .get(key.hash().promote(), |x| x.0.as_str() == *key.key()) - .map(|x| &x.1) - } - - pub(crate) fn get_hashed_string_value(&self, key: Hashed) -> Option<&T> { - self.0 - .get(key.hash().promote(), |x| x.0.as_str() == key.key().as_str()) - .map(|x| &x.1) - } - - pub fn len(&self) -> usize { - self.0.len() - } - - pub fn iter<'a>(&'a self) -> impl ExactSizeIterator + 'a { - // Unsafe because it doesn't have a lifetime, but we added one in the type signature - unsafe { self.0.iter().map(|x| x.as_ref()) } - } - - pub fn keys<'a>(&'a self) -> impl ExactSizeIterator + 'a { - self.iter().map(|x| &x.0) - } - - pub fn values<'a>(&'a self) -> impl ExactSizeIterator + 'a { - self.iter().map(|x| &x.1) - } -} diff --git a/starlark-rust/starlark/src/debug/mod.rs b/starlark-rust/starlark/src/debug.rs similarity index 100% rename from starlark-rust/starlark/src/debug/mod.rs rename to starlark-rust/starlark/src/debug.rs diff --git a/starlark-rust/starlark/src/debug/adapter.rs b/starlark-rust/starlark/src/debug/adapter.rs new file mode 100644 index 0000000000000..8c6e00ba18b64 --- /dev/null +++ b/starlark-rust/starlark/src/debug/adapter.rs @@ -0,0 +1,454 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Provides utilities useful for implementation of the debug adapter protocol (DAP, see +//! ), primarily the DapAdapter/DapAdapterEvalHook +//! that provide for debugging a starlark Evaluation. + +use std::fmt::Debug; +use std::fmt::Display; + +use debugserver_types::*; +use dupe::Dupe; + +use crate::codemap::FileSpan; +use crate::eval::Evaluator; +use crate::syntax::AstModule; +use crate::values::dict::DictRef; +use crate::values::layout::heap::heap_type::Heap; +use crate::values::layout::value::Value; + +mod implementation; +mod tests; + +/// The DapAdapterClient is implemented by the user and provides functionality required by the DapAdapter. +pub trait DapAdapterClient: Debug + Send + Sync + 'static { + /// Indicates that the evaluation stopped at a breakpoint. + fn event_stopped(&self) -> crate::Result<()>; +} + +/// Information about the variables scopes +pub struct ScopesInfo { + /// Number of local variables. + pub num_locals: usize, +} + +/// Information about a "structural variable" inspected by a debugger +/// this currently has DAP-like semantic meaning that every complex object returned +/// by debugger from the stack or from the heap can be broken down into "variables" +/// this is how structured data is managed by the debugger. +/// Something similar to LLDB's SBValue +pub struct Variable { + /// Name of the variable. + pub name: PathSegment, + /// The value as a String. + pub value: String, + /// The variables type. + pub type_: String, + /// Indicates whether there are children available for a given variable. + pub has_children: bool, +} + +/// Represents the scope of a variable. +#[derive(Clone, Debug)] +pub enum Scope { + /// A local variable's scope, identified by its name. + Local(String), + /// A scope determined by a particular expression. + #[allow(dead_code)] + Expr(String), +} + +/// Represents a variable's "access path" for a local variable or watch expression. +/// +/// # Examples +/// +/// - For path `var1.field1[0]`, the scope is `Local("var1")` and the access path is `["field1", 0]`. +/// - For path `someObject.method().something`, the scope is `Expr("someObject.method().something")`. The access path +/// includes segments inside the evaluated result of `someObject.method().something` if it returns a complex object. +#[derive(Clone, Debug)] +pub struct VariablePath { + scope: Scope, + access_path: Vec, +} + +impl VariablePath { + /// creates new instance of VariablePath from a given expression + pub fn new_expression(expr: impl Into) -> VariablePath { + VariablePath { + scope: Scope::Expr(expr.into()), + access_path: vec![], + } + } + + /// creates new instance of VariablePath from a given local variable + pub fn new_local(scope: impl Into) -> VariablePath { + VariablePath { + scope: Scope::Local(scope.into()), + access_path: vec![], + } + } + /// creates a child segment of given access path + pub fn make_child(&self, path: PathSegment) -> VariablePath { + // TODO(vmakaev): figure out if need to optimize memory usage and build persistent data structure + let mut access_path = self.access_path.clone(); + access_path.push(path); + + Self { + scope: self.scope.clone(), + access_path, + } + } +} + +/// Represents a segment in an access expression. +/// +/// For the given expression `name.field1.array\[0\]`, the segments are "field1", "array", and "0". +#[derive(Clone, Debug)] +pub enum PathSegment { + /// Represents a path segment that accesses array-like types (i.e., types indexable by numbers). + Index(i32), + /// Represents a path segment that accesses object-like types (i.e., types keyed by strings). + Attr(String), + /// Represents a path segment that accesses dict items by key. + Key(String), +} + +impl Display for PathSegment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PathSegment::Index(x) => write!(f, "{}", x), + PathSegment::Attr(x) => f.write_str(x), + PathSegment::Key(x) => write!(f, "\"{}\"", x), + } + } +} + +impl PathSegment { + fn get<'v>(&self, v: &Value<'v>, heap: &'v Heap) -> crate::Result> { + match self { + PathSegment::Index(i) => v.at(heap.alloc(*i), heap).map_err(Into::into), + PathSegment::Attr(key) => v.get_attr_error(key.as_str(), heap), + PathSegment::Key(i) => v.at(heap.alloc(i.to_owned()), heap).map_err(Into::into), + } + } +} + +impl Variable { + /// Helper to convert to the DAP Variable type. + pub fn to_dap(self) -> debugserver_types::Variable { + debugserver_types::Variable { + name: self.name.to_string(), + value: self.value, + type_: Some(self.type_), + evaluate_name: None, + indexed_variables: None, + named_variables: None, + presentation_hint: None, + variables_reference: 0, + } + } + + fn tuple_value_as_str<'v>(v: Value<'v>) -> String { + match v.length() { + Ok(size) if size > 0 => format!("", size), + _ => "()".to_owned(), + } + } + + fn list_value_as_str<'v>(v: Value<'v>) -> String { + match v.length() { + Ok(size) if size > 0 => format!("", size), + _ => "[]".to_owned(), + } + } + + fn dict_value_as_str<'v>(v: Value<'v>) -> String { + match v.length() { + Ok(size) if size > 0 => format!("", size), + _ => "{}".to_owned(), + } + } + + fn struct_like_value_as_str<'v>(v: Value<'v>) -> String { + let attrs = v.dir_attr(); + format!("", v.get_type(), attrs.len()) + } + + pub(crate) fn truncate_string(mut str_value: String, mut max_len: usize) -> String { + if str_value.len() > max_len { + // Find a valid UTF-8 cut-off point that's within our max length + while max_len > 0 && !str_value.is_char_boundary(max_len) { + max_len -= 1; + } + if max_len > 0 { + str_value.truncate(max_len); + str_value.push_str("...(truncated)"); + } + } + str_value + } + + pub(crate) fn value_as_str<'v>(v: &Value<'v>) -> String { + if Self::has_children(v) { + match v.get_type() { + "list" => Self::list_value_as_str(*v), + "tuple" => Self::tuple_value_as_str(*v), + "dict" => Self::dict_value_as_str(*v), + _ => Self::struct_like_value_as_str(*v), + } + } else { + match v.get_type() { + "function" => "".to_owned(), + _ => { + const MAX_STR_LEN: usize = 10000; + Self::truncate_string(v.to_str(), MAX_STR_LEN) + } + } + } + } + + /// creates a new instance of Variable from a given starlark value + pub fn from_value<'v>(name: PathSegment, v: Value<'v>) -> Self { + Self { + name, + value: Self::value_as_str(&v), + type_: v.get_type().to_owned(), + has_children: Self::has_children(&v), + } + } + + pub(crate) fn has_children<'v>(v: &Value<'v>) -> bool { + match v.get_type() { + "function" | "never" | "NoneType" | "bool" | "int" | "float" | "string" => false, + "list" | "tuple" | "dict" => match v.length() { + Ok(length) => length > 0, + _ => false, + }, + _ => true, + } + } +} + +/// The kind of debugger step, used for next/stepin/stepout requests. +#[derive(Debug, Clone, Dupe, Copy)] +pub enum StepKind { + /// Step "into" the statement. This is generally used on a function call to stop in the + /// function call. In practice, this will stop on the next statement. + Into, + /// Step "over" the statement. This will stop on the next statement in the current function + /// after the current one (so will step "over" a function call). + Over, + /// Step "out" of the current function. This will stop on the next statement after this + /// function returns. + Out, +} + +/// Information about variables in scope. +pub struct VariablesInfo { + /// Local variables. + pub locals: Vec, +} + +/// Information about variable child "sub-values" +#[derive(Default)] +pub struct InspectVariableInfo { + /// Child variables. + pub sub_values: Vec, +} + +/// Information about expression evaluation result +pub struct EvaluateExprInfo { + /// The value as a String. + pub result: String, + /// The variables type. + pub type_: String, + /// Indicates whether there are children available for a given variable. + pub has_children: bool, +} + +impl InspectVariableInfo { + fn try_from_dict<'v>(value_dict: DictRef<'v>) -> crate::Result { + let key_segments = value_dict + .iter() + .map(|(key, value)| (PathSegment::Key(key.to_str()), value)) + .collect::>(); + + Ok(Self { + sub_values: key_segments + .into_iter() + .map(|(path_segment, value)| Variable::from_value(path_segment, value)) + .collect::>(), + }) + } + + fn try_from_struct_like<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result { + Ok(Self { + sub_values: v + .dir_attr() + .into_iter() + .map(|child_name| { + let child_value = v.get_attr_error(&child_name, heap)?; + let segment = PathSegment::Attr(child_name); + Ok(Variable::from_value(segment, child_value)) + }) + .collect::>>()?, + }) + } + + fn try_from_array_like<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result { + let len = v.length()?; + Ok(Self { + sub_values: (0..len) + .map(|i| { + let index = heap.alloc(i); + v.at(index, heap) + .map(|v| Variable::from_value(PathSegment::Index(i), v)) + }) + .collect::>>()?, + }) + } + + /// Trying to create InspectVariableInfo from a given starlark value + pub fn try_from_value<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result { + match v.get_type() { + "dict" => Self::try_from_dict( + DictRef::from_value(v).ok_or(anyhow::Error::msg("not a dictionary"))?, + ), + "struct" => Self::try_from_struct_like(v, heap), + "list" | "tuple" => Self::try_from_array_like(v, heap), + "bool" | "int" | "float" | "string" => Ok(Default::default()), + "function" | "never" | "NoneType" => Ok(Default::default()), + // this branch will catch Ty::basic(name) + _ => Self::try_from_struct_like(v, heap), + } + } +} + +impl EvaluateExprInfo { + /// Creating EvaluateExprInfo from a given starlark value + pub fn from_value(v: &Value) -> Self { + Self { + result: Variable::value_as_str(v), + type_: v.get_type().to_owned(), + has_children: Variable::has_children(v), + } + } +} + +/// The DapAdapter accepts DAP requests and updates the hooks in the running evaluator. +pub trait DapAdapter: Debug + Send + 'static { + /// Sets multiple breakpoints for a file (and clears existing ones). + /// + /// See + fn set_breakpoints( + &self, + source: &str, + breakpoints: &ResolvedBreakpoints, + ) -> anyhow::Result<()>; + + /// Gets the top stack frame, may be None if entered from native. + fn top_frame(&self) -> anyhow::Result>; + + /// Gets a stacktrace from the current execution state. + /// + /// See + fn stack_trace(&self, args: StackTraceArguments) -> anyhow::Result; + + /// Gets the variables scope for a frame. + /// + /// See + fn scopes(&self) -> anyhow::Result; + + /// Gets variables for the current scope + /// + /// See + fn variables(&self) -> anyhow::Result; + + /// Gets all child variables for the given access path + /// + /// See + fn inspect_variable(&self, path: VariablePath) -> anyhow::Result; + + /// Resumes execution. + /// + /// See + fn continue_(&self) -> anyhow::Result<()>; + + /// Continues execution until some condition. + /// + /// See + /// + /// + fn step(&self, kind: StepKind) -> anyhow::Result<()>; + /// Evaluates in expression in the context of the top-most frame. + /// + /// See + fn evaluate(&self, expr: &str) -> anyhow::Result; +} + +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub(crate) struct Breakpoint { + span: FileSpan, + condition: Option, +} + +/// Breakpoints resolved to their spans. +#[derive(Debug)] +pub struct ResolvedBreakpoints(Vec>); + +impl ResolvedBreakpoints { + /// Converts resolved breakpoints to a SetBreakpointsResponseBody. The breakpoints should've been resolved from the corresponding SetBreakpointsRequest. + pub fn to_response(&self) -> SetBreakpointsResponseBody { + implementation::resolved_breakpoints_to_dap(self) + } +} + +/// Resolves the breakpoints to their FileSpan if possible. +pub fn resolve_breakpoints( + args: &SetBreakpointsArguments, + ast: &AstModule, +) -> anyhow::Result { + implementation::resolve_breakpoints(args, ast) +} + +/// This is sort of the evaluation side of the DapAdapter. It's expected that these are on different threads +/// (the starlark evaluation is single-threaded, so certainly the DapAdapter itself doesn't do interesting +/// things there). +pub trait DapAdapterEvalHook: Debug + Send + 'static { + /// Hooks the evaluator for this DapAdapter. + fn add_dap_hooks(self: Box, eval: &mut Evaluator<'_, '_, '_>); +} + +/// The DAP capabilities that the adapter supports. +pub fn dap_capabilities() -> Capabilities { + Capabilities { + supports_configuration_done_request: Some(true), + supports_evaluate_for_hovers: Some(true), + supports_set_variable: Some(true), + supports_step_in_targets_request: Some(true), + supports_conditional_breakpoints: Some(true), + ..Capabilities::default() + } +} + +/// Creates a DapAdapter and corresponding DapAdapterEvalHook. +pub fn prepare_dap_adapter( + client: Box, +) -> (impl DapAdapter, impl DapAdapterEvalHook) { + implementation::prepare_dap_adapter(client) +} diff --git a/starlark-rust/starlark/src/debug/adapter/implementation.rs b/starlark-rust/starlark/src/debug/adapter/implementation.rs index 81977be968122..558380b3d506d 100644 --- a/starlark-rust/starlark/src/debug/adapter/implementation.rs +++ b/starlark-rust/starlark/src/debug/adapter/implementation.rs @@ -27,8 +27,13 @@ use std::sync::Mutex; use debugserver_types::*; use dupe::Dupe; +use starlark_syntax::error::StarlarkResultExt; use starlark_syntax::slice_vec_ext::SliceExt; +use super::EvaluateExprInfo; +use super::InspectVariableInfo; +use super::PathSegment; +use super::VariablePath; use crate::codemap::FileSpan; use crate::codemap::FileSpanRef; use crate::codemap::Span; @@ -83,21 +88,31 @@ struct DapAdapterEvalHookImpl { fn evaluate_expr<'v>( state: &SharedAdapterState, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, expr: String, ) -> anyhow::Result> { // We don't want to trigger breakpoints during an evaluate, // not least because we currently don't allow reenterant evaluate state.disable_breakpoints.fetch_add(1, Ordering::SeqCst); // Don't use `?`, we need to reset disable_breakpoints. - let ast = AstModule::parse("interactive", expr, &Dialect::Extended); - let res = ast.and_then(|ast| eval.eval_statements(ast)); + let ast = AstModule::parse("interactive", expr, &Dialect::AllOptionsInternal); + // This technically loses structured access to the diagnostic information. However, it's + // completely unused, so there's not much point in converting all of this code to using + // `starlark::Error`, only for buck2 to then go and blindly turn it into a `anyhow::Error` + // anyway. + let res = ast + .and_then(|ast| eval.eval_statements(ast)) + .into_anyhow_result(); state.disable_breakpoints.fetch_sub(1, Ordering::SeqCst); res } -impl<'a> BeforeStmtFuncDyn<'a> for DapAdapterEvalHookImpl { - fn call<'v>(&mut self, span_loc: FileSpanRef, eval: &mut Evaluator<'v, 'a>) { +impl<'a, 'e: 'a> BeforeStmtFuncDyn<'a, 'e> for DapAdapterEvalHookImpl { + fn call<'v>( + &mut self, + span_loc: FileSpanRef, + eval: &mut Evaluator<'v, 'a, 'e>, + ) -> crate::Result<()> { let stop = if self.state.disable_breakpoints.load(Ordering::SeqCst) > 0 { false } else { @@ -109,7 +124,11 @@ impl<'a> BeforeStmtFuncDyn<'a> for DapAdapterEvalHookImpl { .. }) => match evaluate_expr(&self.state, eval, condition.to_owned()) { Ok(v) => v.to_bool(), - _ => true, + Err(_) => { + // If failed to evaluate the condition, stop. + // TODO(nga): print the error. + true + } }, Some(..) => true, None => false, @@ -128,7 +147,7 @@ impl<'a> BeforeStmtFuncDyn<'a> for DapAdapterEvalHookImpl { if stop || step_stop { self.step = None; - self.state.client.event_stopped(); + self.state.client.event_stopped()?; loop { let msg = self.receiver.recv(); match msg.map(|msg| msg(span_loc, eval)) { @@ -145,6 +164,7 @@ impl<'a> BeforeStmtFuncDyn<'a> for DapAdapterEvalHookImpl { } } } + Ok(()) } } @@ -165,7 +185,7 @@ impl DapAdapterEvalHookImpl { } impl DapAdapterEvalHook for DapAdapterEvalHookImpl { - fn add_dap_hooks<'v, 'a>(self: Box, eval: &mut Evaluator<'v, 'a>) { + fn add_dap_hooks(self: Box, eval: &mut Evaluator<'_, '_, '_>) { eval.before_stmt_for_dap((self as Box).into()); } } @@ -310,16 +330,35 @@ impl DapAdapter for DapAdapterImpl { Ok(VariablesInfo { locals: vars .into_iter() - .map(|(name, value)| Variable { - name, - value: value.to_string(), - type_: value.get_type().to_owned(), - }) + .map(|(name, value)| Variable::from_value(PathSegment::Attr(name), value)) .collect(), }) })) } + fn inspect_variable(&self, path: VariablePath) -> anyhow::Result { + let state = self.state.dupe(); + self.with_ctx(Box::new(move |_span, eval| { + let access_path = &path.access_path; + let mut value = match &path.scope { + super::Scope::Local(name) => { + let mut vars = eval.local_variables(); + // since vars is owned within this closure scope we can just remove value from the map + // obtaining owned variable as the rest of the map will be dropped anyway + vars.shift_remove(name).ok_or_else(|| { + anyhow::Error::msg(format!("Local variable {} not found", name)) + }) + } + super::Scope::Expr(expr) => evaluate_expr(&state, eval, expr.to_owned()), + }?; + + for p in access_path.iter() { + value = p.get(&value, eval.heap()).into_anyhow_result()?; + } + InspectVariableInfo::try_from_value(value, eval.heap()).into_anyhow_result() + })) + } + fn continue_(&self) -> anyhow::Result<()> { self.inject_next(Next::Continue); Ok(()) @@ -330,22 +369,14 @@ impl DapAdapter for DapAdapterImpl { Ok(()) } - fn evaluate(&self, expr: &str) -> anyhow::Result { + fn evaluate(&self, expr: &str) -> anyhow::Result { let state = self.state.dupe(); let expression = expr.to_owned(); self.with_ctx(Box::new(move |_, eval| { - let s = match evaluate_expr(&state, eval, expression.clone()) { - Err(e) => format!("{:#}", e), - Ok(v) => v.to_string(), - }; - Ok(EvaluateResponseBody { - indexed_variables: None, - named_variables: None, - presentation_hint: None, - result: s, - type_: None, - variables_reference: 0.0, - }) + match evaluate_expr(&state, eval, expression.clone()) { + Err(e) => Err(e), + Ok(v) => Ok(EvaluateExprInfo::from_value(&v)), + } })) } } diff --git a/starlark-rust/starlark/src/debug/adapter/mod.rs b/starlark-rust/starlark/src/debug/adapter/mod.rs deleted file mode 100644 index 7da2ea8102935..0000000000000 --- a/starlark-rust/starlark/src/debug/adapter/mod.rs +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Provides utilities useful for implementation of the debug adapter protocol (DAP, see -//! ), primarily the DapAdapter/DapAdapterEvalHook -//! that provide for debugging a starlark Evaluation. - -use std::fmt::Debug; - -use debugserver_types::*; -use dupe::Dupe; - -use crate::codemap::FileSpan; -use crate::eval::Evaluator; -use crate::syntax::AstModule; - -mod implementation; -mod tests; - -/// The DapAdapterClient is implemented by the user and provides functionality required by the DapAdapter. -pub trait DapAdapterClient: Debug + Send + Sync + 'static { - /// Indicates that the evaluation stopped at a breakpoint. - fn event_stopped(&self); -} - -/// Information about the variables scopes -pub struct ScopesInfo { - /// Number of local variables. - pub num_locals: usize, -} - -/// Information about a variable. -pub struct Variable { - /// Name of the variable. - pub name: String, - /// The value as a String. - pub value: String, - /// The variables type. - pub type_: String, -} - -impl Variable { - /// Helper to convert to the DAP Variable type. - pub fn to_dap(self) -> debugserver_types::Variable { - debugserver_types::Variable { - name: self.name, - value: self.value, - type_: Some(self.type_), - evaluate_name: None, - indexed_variables: None, - named_variables: None, - presentation_hint: None, - variables_reference: 0, - } - } -} - -/// The kind of debugger step, used for next/stepin/stepout requests. -#[derive(Debug, Clone, Dupe, Copy)] -pub enum StepKind { - /// Step "into" the statement. This is generally used on a function call to stop in the - /// function call. In practice, this will stop on the next statement. - Into, - /// Step "over" the statement. This will stop on the next statement in the current function - /// after the current one (so will step "over" a function call). - Over, - /// Step "out" of the current function. This will stop on the next statement after this - /// function returns. - Out, -} - -/// Information about variables in scope. -pub struct VariablesInfo { - /// Local variables. - pub locals: Vec, -} - -/// The DapAdapter accepts DAP requests and updates the hooks in the running evaluator. -pub trait DapAdapter: Debug + Send + 'static { - /// Sets multiple breakpoints for a file (and clears existing ones). - /// - /// See - fn set_breakpoints( - &self, - source: &str, - breakpoints: &ResolvedBreakpoints, - ) -> anyhow::Result<()>; - - /// Gets the top stack frame, may be None if entered from native. - fn top_frame(&self) -> anyhow::Result>; - - /// Gets a stacktrace from the current execution state. - /// - /// See - fn stack_trace(&self, args: StackTraceArguments) -> anyhow::Result; - - /// Gets the variables scope for a frame. - /// - /// See - fn scopes(&self) -> anyhow::Result; - - /// Gets child variables for a variable reference. - /// - /// See - fn variables(&self) -> anyhow::Result; - - /// Resumes execution. - /// - /// See - fn continue_(&self) -> anyhow::Result<()>; - - /// Continues execution until some condition. - /// - /// See - /// - /// - fn step(&self, kind: StepKind) -> anyhow::Result<()>; - /// Evaluates in expression in the context of the top-most frame. - /// - /// See - fn evaluate(&self, expr: &str) -> anyhow::Result; -} - -#[derive(Debug, Clone, Hash, Eq, PartialEq)] -pub(crate) struct Breakpoint { - span: FileSpan, - condition: Option, -} - -/// Breakpoints resolved to their spans. -#[derive(Debug)] -pub struct ResolvedBreakpoints(Vec>); - -impl ResolvedBreakpoints { - /// Converts resolved breakpoints to a SetBreakpointsResponseBody. The breakpoints should've been resolved from the corresponding SetBreakpointsRequest. - pub fn to_response(&self) -> SetBreakpointsResponseBody { - implementation::resolved_breakpoints_to_dap(self) - } -} - -/// Resolves the breakpoints to their FileSpan if possible. -pub fn resolve_breakpoints( - args: &SetBreakpointsArguments, - ast: &AstModule, -) -> anyhow::Result { - implementation::resolve_breakpoints(args, ast) -} - -/// This is sort of the evaluation side of the DapAdapter. It's expected that these are on different threads -/// (the starlark evaluation is single-threaded, so certainly the DapAdapter itself doesn't do interesting -/// things there). -pub trait DapAdapterEvalHook: Debug + Send + 'static { - /// Hooks the evaluator for this DapAdapter. - fn add_dap_hooks<'v, 'a>(self: Box, eval: &mut Evaluator<'v, 'a>); -} - -/// The DAP capabilities that the adapter supports. -pub fn dap_capabilities() -> Capabilities { - Capabilities { - supports_configuration_done_request: Some(true), - supports_evaluate_for_hovers: Some(true), - supports_set_variable: Some(true), - supports_step_in_targets_request: Some(true), - supports_conditional_breakpoints: Some(true), - ..Capabilities::default() - } -} - -/// Creates a DapAdapter and corresponding DapAdapterEvalHook. -pub fn prepare_dap_adapter( - client: Box, -) -> (impl DapAdapter, impl DapAdapterEvalHook) { - implementation::prepare_dap_adapter(client) -} diff --git a/starlark-rust/starlark/src/debug/adapter/tests.rs b/starlark-rust/starlark/src/debug/adapter/tests.rs index c3fd12249b080..c47e92bfbad57 100644 --- a/starlark-rust/starlark/src/debug/adapter/tests.rs +++ b/starlark-rust/starlark/src/debug/adapter/tests.rs @@ -18,9 +18,11 @@ #[cfg(test)] mod t { use std::collections::HashMap; + use std::hint; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; + use std::thread; use std::thread::ScopedJoinHandle; use std::time::Duration; use std::time::Instant; @@ -35,6 +37,7 @@ mod t { use crate::debug::DapAdapterClient; use crate::debug::DapAdapterEvalHook; use crate::debug::StepKind; + use crate::debug::VariablePath; use crate::environment::GlobalsBuilder; use crate::environment::Module; use crate::eval::Evaluator; @@ -46,23 +49,25 @@ mod t { #[derive(Debug)] struct Client { - breakpoints_hit: Arc, + controller: BreakpointController, } impl Client { - pub fn new(breakpoints_hit: Arc) -> Self { - Self { breakpoints_hit } + pub fn new(controller: BreakpointController) -> Self { + Self { controller } } } impl DapAdapterClient for Client { - fn event_stopped(&self) { + fn event_stopped(&self) -> crate::Result<()> { println!("stopped!"); - self.breakpoints_hit.fetch_add(1, Ordering::SeqCst); + self.controller.eval_stopped() } } + #[derive(Debug, Clone, Dupe)] struct BreakpointController { + /// The number of breakpoint hits or 999999 if cancelled. breakpoints_hit: Arc, } @@ -74,20 +79,58 @@ mod t { } fn get_client(&self) -> Box { - Box::new(Client::new(self.breakpoints_hit.dupe())) + Box::new(Client::new(self.dupe())) + } + + fn eval_stopped(&self) -> crate::Result<()> { + loop { + let breakpoints_hit = self.breakpoints_hit.load(Ordering::SeqCst); + if breakpoints_hit == 999999 { + eprintln!("eval_stopped: cancelled"); + return Err(anyhow::anyhow!("cancelled").into()); + } + if self.breakpoints_hit.compare_exchange( + breakpoints_hit, + breakpoints_hit + 1, + Ordering::SeqCst, + Ordering::SeqCst, + ) == Ok(breakpoints_hit) + { + return Ok(()); + } + } } fn wait_for_eval_stopped(&self, breakpoint_count: usize, timeout: Duration) { let now = Instant::now(); - while self.breakpoints_hit.load(Ordering::SeqCst) != breakpoint_count { + loop { + let breakpoints_hit = self.breakpoints_hit.load(Ordering::SeqCst); + assert_ne!(breakpoints_hit, 999999, "cancelled"); + assert!(breakpoints_hit <= breakpoint_count); + if breakpoints_hit == breakpoint_count { + break; + } if now.elapsed() > timeout { panic!("didn't hit expected breakpoint"); } - std::hint::spin_loop(); + hint::spin_loop(); } } } + struct BreakpointControllerDropGuard { + controller: BreakpointController, + } + + impl Drop for BreakpointControllerDropGuard { + fn drop(&mut self) { + eprintln!("dropping controller"); + self.controller + .breakpoints_hit + .store(999999, Ordering::SeqCst); + } + } + fn breakpoint(line: i64, condition: Option<&str>) -> SourceBreakpoint { SourceBreakpoint { column: None, @@ -123,15 +166,15 @@ mod t { fn eval_with_hook( ast: AstModule, - hook: impl DapAdapterEvalHook, - ) -> anyhow::Result { + hook: Box, + ) -> crate::Result { let modules = HashMap::new(); let loader = ReturnFileLoader { modules: &modules }; let globals = GlobalsBuilder::extended().with(test_functions).build(); let env = Module::new(); let res = { let mut eval = Evaluator::new(&env); - Box::new(hook).add_dap_hooks(&mut eval); + hook.add_dap_hooks(&mut eval); eval.set_loader(&loader); eval.eval_module(ast, &globals)? }; @@ -156,26 +199,47 @@ mod t { static TIMEOUT: Duration = Duration::from_secs(10); + fn dap_test_template<'env, F, R>(f: F) -> crate::Result + where + F: for<'scope> FnOnce( + &'scope thread::Scope<'scope, 'env>, + BreakpointController, + Box, + Box, + ) -> crate::Result, + { + let controller = BreakpointController::new(); + + let _guard = BreakpointControllerDropGuard { + controller: controller.dupe(), + }; + + let (adapter, eval_hook) = prepare_dap_adapter(controller.get_client()); + thread::scope(|s| f(s, controller, Box::new(adapter), Box::new(eval_hook))) + } + #[test] - fn test_breakpoint() -> anyhow::Result<()> { + fn test_breakpoint() -> crate::Result<()> { if is_wasm() { // `thread::scope` doesn't work in wasm. return Ok(()); } - let controller = BreakpointController::new(); - let (adapter, eval_hook) = prepare_dap_adapter(controller.get_client()); let file_contents = " x = [1, 2, 3] print(x) "; - std::thread::scope(|s| { - let ast = AstModule::parse("test.bzl", file_contents.to_owned(), &Dialect::Extended)?; + dap_test_template(|s, controller, adapter, eval_hook| { + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let breakpoints = resolve_breakpoints(&breakpoints_args("test.bzl", &[(3, None)]), &ast)?; adapter.set_breakpoints("test.bzl", &breakpoints)?; let eval_result = - s.spawn(move || -> anyhow::Result<_> { eval_with_hook(ast, eval_hook) }); + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); controller.wait_for_eval_stopped(1, TIMEOUT); // TODO(cjhopman): we currently hit breakpoints on top-level statements twice (once for the gc bytecode, once for the actual statement). adapter.continue_()?; @@ -189,48 +253,52 @@ print(x) } #[test] - fn test_breakpoint_with_failing_condition() -> anyhow::Result<()> { + fn test_breakpoint_with_failing_condition() -> crate::Result<()> { if is_wasm() { return Ok(()); } - let controller = BreakpointController::new(); - let (adapter, eval_hook) = prepare_dap_adapter(controller.get_client()); let file_contents = " x = [1, 2, 3] print(x) "; - std::thread::scope(|s| { - let ast = AstModule::parse("test.bzl", file_contents.to_owned(), &Dialect::Extended)?; + dap_test_template(|s, _, adapter, eval_hook| { + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let breakpoints = resolve_breakpoints(&breakpoints_args("test.bzl", &[(3, Some("5 in x"))]), &ast)?; adapter.set_breakpoints("test.bzl", &breakpoints)?; let eval_result = - s.spawn(move || -> anyhow::Result<_> { eval_with_hook(ast, eval_hook) }); + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); join_timeout(eval_result, TIMEOUT)?; Ok(()) }) } #[test] - fn test_breakpoint_with_passing_condition() -> anyhow::Result<()> { + fn test_breakpoint_with_passing_condition() -> crate::Result<()> { if is_wasm() { return Ok(()); } - let controller = BreakpointController::new(); - let (adapter, eval_hook) = prepare_dap_adapter(controller.get_client()); let file_contents = " x = [1, 2, 3] print(x) "; - std::thread::scope(|s| { - let ast = AstModule::parse("test.bzl", file_contents.to_owned(), &Dialect::Extended)?; + dap_test_template(|s, controller, adapter, eval_hook| { + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let breakpoints = resolve_breakpoints(&breakpoints_args("test.bzl", &[(3, Some("2 in x"))]), &ast)?; adapter.set_breakpoints("test.bzl", &breakpoints)?; let eval_result = - s.spawn(move || -> anyhow::Result<_> { eval_with_hook(ast, eval_hook) }); + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); controller.wait_for_eval_stopped(1, TIMEOUT); adapter.continue_()?; // TODO(cjhopman): we currently hit breakpoints on top-level statements twice (once for the gc bytecode, once for the actual statement). @@ -243,13 +311,11 @@ print(x) } #[test] - fn test_step_over() -> anyhow::Result<()> { + fn test_step_over() -> crate::Result<()> { if is_wasm() { return Ok(()); } - let controller = BreakpointController::new(); - let (adapter, eval_hook) = prepare_dap_adapter(controller.get_client()); let file_contents = " def adjust(y): y[0] += 1 @@ -260,29 +326,39 @@ adjust(x) # line 7 adjust(x) print(x) "; - std::thread::scope(|s| { - let ast = AstModule::parse("test.bzl", file_contents.to_owned(), &Dialect::Extended)?; + dap_test_template(|s, controller, adapter, eval_hook| { + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let breakpoints = resolve_breakpoints(&breakpoints_args("test.bzl", &[(7, None)]), &ast)?; adapter.set_breakpoints("test.bzl", &breakpoints)?; let eval_result = - s.spawn(move || -> anyhow::Result<_> { eval_with_hook(ast, eval_hook) }); + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); controller.wait_for_eval_stopped(1, TIMEOUT); // TODO(cjhopman): we currently hit breakpoints on top-level statements twice (once for the gc bytecode, once for the actual statement). adapter.continue_()?; controller.wait_for_eval_stopped(2, TIMEOUT); - assert_eq!("[1, 2, 3]", adapter.evaluate("x")?.result); + assert_eq!("1", adapter.evaluate("x[0]")?.result); + assert_eq!("2", adapter.evaluate("x[1]")?.result); + assert_eq!("3", adapter.evaluate("x[2]")?.result); adapter.step(StepKind::Over)?; controller.wait_for_eval_stopped(3, TIMEOUT); - assert_eq!("[2, 3, 4]", adapter.evaluate("x")?.result); + assert_eq!("2", adapter.evaluate("x[0]")?.result); + assert_eq!("3", adapter.evaluate("x[1]")?.result); + assert_eq!("4", adapter.evaluate("x[2]")?.result); // TODO(cjhopman): we currently hit breakpoints on top-level statements twice (once for the gc bytecode, once for the actual statement). adapter.step(StepKind::Over)?; controller.wait_for_eval_stopped(4, TIMEOUT); adapter.step(StepKind::Over)?; controller.wait_for_eval_stopped(5, TIMEOUT); - assert_eq!("[3, 4, 5]", adapter.evaluate("x")?.result); + assert_eq!("3", adapter.evaluate("x[0]")?.result); + assert_eq!("4", adapter.evaluate("x[1]")?.result); + assert_eq!("5", adapter.evaluate("x[2]")?.result); adapter.continue_()?; join_timeout(eval_result, TIMEOUT)?; Ok(()) @@ -290,13 +366,11 @@ print(x) } #[test] - fn test_step_into() -> anyhow::Result<()> { + fn test_step_into() -> crate::Result<()> { if is_wasm() { return Ok(()); } - let controller = BreakpointController::new(); - let (adapter, eval_hook) = prepare_dap_adapter(controller.get_client()); let file_contents = " def adjust(y): y[0] += 1 @@ -307,35 +381,48 @@ adjust(x) # line 7 adjust(x) print(x) "; - std::thread::scope(|s| { - let ast = AstModule::parse("test.bzl", file_contents.to_owned(), &Dialect::Extended)?; + dap_test_template(|s, controller, adapter, eval_hook| { + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let breakpoints = resolve_breakpoints(&breakpoints_args("test.bzl", &[(7, None)]), &ast)?; adapter.set_breakpoints("test.bzl", &breakpoints)?; let eval_result = - s.spawn(move || -> anyhow::Result<_> { eval_with_hook(ast, eval_hook) }); + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); controller.wait_for_eval_stopped(1, TIMEOUT); // TODO(cjhopman): we currently hit breakpoints on top-level statements twice (once for the gc bytecode, once for the actual statement). adapter.continue_()?; controller.wait_for_eval_stopped(2, TIMEOUT); - assert_eq!("[1, 2, 3]", adapter.evaluate("x")?.result); + assert_eq!("1", adapter.evaluate("x[0]")?.result); + assert_eq!("2", adapter.evaluate("x[1]")?.result); + assert_eq!("3", adapter.evaluate("x[2]")?.result); // into adjust adapter.step(StepKind::Into)?; controller.wait_for_eval_stopped(3, TIMEOUT); - assert_eq!("[1, 2, 3]", adapter.evaluate("y")?.result); + assert_eq!("1", adapter.evaluate("y[0]")?.result); + assert_eq!("2", adapter.evaluate("y[1]")?.result); + assert_eq!("3", adapter.evaluate("y[2]")?.result); // into should go to next line adapter.step(StepKind::Into)?; controller.wait_for_eval_stopped(4, TIMEOUT); - assert_eq!("[2, 2, 3]", adapter.evaluate("y")?.result); + assert_eq!("2", adapter.evaluate("y[0]")?.result); + assert_eq!("2", adapter.evaluate("y[1]")?.result); + assert_eq!("3", adapter.evaluate("y[2]")?.result); + // two more intos should get us out of the function call adapter.step(StepKind::Into)?; controller.wait_for_eval_stopped(5, TIMEOUT); adapter.step(StepKind::Into)?; controller.wait_for_eval_stopped(6, TIMEOUT); - assert_eq!("[2, 3, 4]", adapter.evaluate("x")?.result); + assert_eq!("2", adapter.evaluate("x[0]")?.result); + assert_eq!("3", adapter.evaluate("x[1]")?.result); + assert_eq!("4", adapter.evaluate("x[2]")?.result); // and once more back into the function adapter.step(StepKind::Into)?; @@ -345,7 +432,9 @@ print(x) adapter.step(StepKind::Into)?; controller.wait_for_eval_stopped(8, TIMEOUT); - assert_eq!("[2, 3, 4]", adapter.evaluate("y")?.result); + assert_eq!("2", adapter.evaluate("y[0]")?.result); + assert_eq!("3", adapter.evaluate("y[1]")?.result); + assert_eq!("4", adapter.evaluate("y[2]")?.result); adapter.continue_()?; join_timeout(eval_result, TIMEOUT)?; @@ -354,13 +443,11 @@ print(x) } #[test] - fn test_step_out() -> anyhow::Result<()> { + fn test_step_out() -> crate::Result<()> { if is_wasm() { return Ok(()); } - let controller = BreakpointController::new(); - let (adapter, eval_hook) = prepare_dap_adapter(controller.get_client()); let file_contents = " def adjust(y): y[0] += 1 @@ -371,31 +458,43 @@ adjust(x) # line 7 adjust(x) print(x) "; - std::thread::scope(|s| { - let ast = AstModule::parse("test.bzl", file_contents.to_owned(), &Dialect::Extended)?; + dap_test_template(|s, controller, adapter, eval_hook| { + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let breakpoints = resolve_breakpoints(&breakpoints_args("test.bzl", &[(4, None)]), &ast)?; adapter.set_breakpoints("test.bzl", &breakpoints)?; let eval_result = - s.spawn(move || -> anyhow::Result<_> { eval_with_hook(ast, eval_hook) }); + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); // should break on the first time hitting line 4 controller.wait_for_eval_stopped(1, TIMEOUT); - assert_eq!("[2, 2, 3]", adapter.evaluate("y")?.result); + assert_eq!("2", adapter.evaluate("y[0]")?.result); + assert_eq!("2", adapter.evaluate("y[1]")?.result); + assert_eq!("3", adapter.evaluate("y[2]")?.result); // step out should take us to line 8 adapter.step(StepKind::Out)?; controller.wait_for_eval_stopped(2, TIMEOUT); - assert_eq!("[2, 3, 4]", adapter.evaluate("x")?.result); + assert_eq!("2", adapter.evaluate("x[0]")?.result); + assert_eq!("3", adapter.evaluate("x[1]")?.result); + assert_eq!("4", adapter.evaluate("x[2]")?.result); // step out should actually hit the breakpoint at 4 first (before getting out) adapter.step(StepKind::Out)?; controller.wait_for_eval_stopped(3, TIMEOUT); - assert_eq!("[3, 3, 4]", adapter.evaluate("y")?.result); + assert_eq!("3", adapter.evaluate("y[0]")?.result); + assert_eq!("3", adapter.evaluate("y[1]")?.result); + assert_eq!("4", adapter.evaluate("y[2]")?.result); // step out should get out to the print adapter.step(StepKind::Out)?; controller.wait_for_eval_stopped(4, TIMEOUT); - assert_eq!("[3, 4, 5]", adapter.evaluate("x")?.result); + assert_eq!("3", adapter.evaluate("x[0]")?.result); + assert_eq!("4", adapter.evaluate("x[1]")?.result); + assert_eq!("5", adapter.evaluate("x[2]")?.result); // one more out should be equivalent to continue adapter.step(StepKind::Out)?; @@ -403,4 +502,234 @@ print(x) Ok(()) }) } + + #[test] + fn test_local_variables() -> crate::Result<()> { + if is_wasm() { + return Ok(()); + } + + let file_contents = " +def do(): + a = struct( + f1 = \"1\", + f2 = 123, + ) + arr = [1, 2, 3, 4, 6, \"234\", 123.32] + t = (1, 2) + d = dict(a = 1, b = \"2\") + empty_dict = {} + empty_list = [] + empty_tuple = () + return d # line 13 +print(do()) + "; + let result = dap_test_template(|s, controller, adapter, eval_hook| { + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; + let breakpoints = + resolve_breakpoints(&breakpoints_args("test.bzl", &[(13, None)]), &ast)?; + adapter.set_breakpoints("test.bzl", &breakpoints)?; + let eval_result = + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); + controller.wait_for_eval_stopped(1, TIMEOUT); + let result = adapter.variables(); + adapter.continue_()?; + join_timeout(eval_result, TIMEOUT)?; + result.map_err(crate::Error::from) + })?; + // It's easier to handle errors outside of thread::scope block as the test is quite flaky + // and hangs in case error propagates + assert_eq!( + vec![ + ("a".to_owned(), String::from(""), true), + ("arr".to_owned(), String::from(""), true), + ("t".to_owned(), String::from(""), true), + ("d".to_owned(), String::from(""), true), + ("empty_dict".to_owned(), String::from("{}"), false), + ("empty_list".to_owned(), String::from("[]"), false), + ("empty_tuple".to_owned(), String::from("()"), false), + ], + result + .locals + .into_iter() + .map(|v| (v.name.to_string(), v.value, v.has_children)) + .collect::>() + ); + + Ok(()) + } + + #[test] + fn test_inspect_variables() -> crate::Result<()> { + if is_wasm() { + return Ok(()); + } + + let file_contents = " +def do(): + a = struct( + f1 = \"1\", + f2 = 123, + ) + arr = [1, 2, 3, 4, 6, \"234\", 123.32] + t = (1, 2) + d = dict(a = 1, b = \"2\") + empty_dict = {} + empty_list = [] + empty_tuple = () + return d # line 13 +print(do()) + "; + let result = dap_test_template(|s, controller, adapter, eval_hook| { + let mut result = Vec::new(); + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; + let breakpoints = + resolve_breakpoints(&breakpoints_args("test.bzl", &[(13, None)]), &ast)?; + adapter.set_breakpoints("test.bzl", &breakpoints)?; + let eval_result = + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); + controller.wait_for_eval_stopped(1, TIMEOUT); + result.extend([ + adapter.inspect_variable(VariablePath::new_local("a")), + adapter.inspect_variable(VariablePath::new_local("arr")), + adapter.inspect_variable(VariablePath::new_local("t")), + adapter.inspect_variable(VariablePath::new_local("d")), + ]); + adapter.continue_()?; + join_timeout(eval_result, TIMEOUT)?; + crate::Result::Ok(result) + })? + .into_iter() + .collect::>>()?; + + // It's easier to handle errors outside of thread::scope block as the test is quite flaky + // and hangs in case error propagates + + assert_variable("f1", "1", false, &result[0].sub_values[0]); + assert_variable("f2", "123", false, &result[0].sub_values[1]); + assert_variable("0", "1", false, &result[1].sub_values[0]); + assert_variable("5", "234", false, &result[1].sub_values[5]); + assert_variable("0", "1", false, &result[2].sub_values[0]); + assert_variable("1", "2", false, &result[2].sub_values[1]); + assert_variable("\"a\"", "1", false, &result[3].sub_values[0]); + assert_variable("\"b\"", "2", false, &result[3].sub_values[1]); + Ok(()) + } + + #[test] + fn test_evaluate_expression() -> crate::Result<()> { + if is_wasm() { + return Ok(()); + } + + let file_contents = " +def do(): + s = struct( + inner = struct( + inner = struct( + value = \"more_inner\" + ), + value = \"inner\", + arr = [dict(a = 1, b = \"2\"), 1337] + ) + ) + return s # line 12 +print(do()) + "; + let result = dap_test_template(|s, controller, adapter, eval_hook| { + let mut result = Vec::new(); + let ast = AstModule::parse( + "test.bzl", + file_contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; + let breakpoints = + resolve_breakpoints(&breakpoints_args("test.bzl", &[(12, None)]), &ast)?; + adapter.set_breakpoints("test.bzl", &breakpoints)?; + let eval_result = + s.spawn(move || -> crate::Result<_> { eval_with_hook(ast, eval_hook) }); + controller.wait_for_eval_stopped(1, TIMEOUT); + result.extend([ + adapter.evaluate("s.inner.value"), + adapter.evaluate("s.inner.inner.value"), + adapter.evaluate("s.inner.arr[0]"), + adapter.evaluate("s.inner.arr[0][\"a\"]"), + adapter.evaluate("s.inner.arr[1]"), + ]); + adapter.continue_()?; + join_timeout(eval_result, TIMEOUT)?; + crate::Result::Ok(result) + })? + .into_iter() + .collect::>>()?; + + // It's easier to handle errors outside of thread::scope block as the test is quite flaky + // and hangs in case error propagates + assert_eq!( + vec![ + ("inner", false), + ("more_inner", false), + ("", true), + ("1", false), + ("1337", false), + ], + result + .iter() + .map(|v| (v.result.as_str(), v.has_children)) + .collect::>() + ); + + Ok(()) + } + + fn assert_variable( + name: &str, + value: &str, + has_children: bool, + var: &crate::debug::adapter::Variable, + ) { + assert_eq!( + (name.to_owned(), value, has_children), + (var.name.to_string(), var.value.as_str(), var.has_children) + ); + } + + #[test] + pub fn test_truncate_string() { + assert_eq!( + "Hello", + crate::debug::adapter::Variable::truncate_string("Hello".to_owned(), 10) + ); + assert_eq!( + "Hello", + crate::debug::adapter::Variable::truncate_string("Hello".to_owned(), 5) + ); + assert_eq!( + "Hello, ...(truncated)", + // A string that should be truncated at a character boundary + crate::debug::adapter::Variable::truncate_string("Hello, 世界".to_owned(), 7) + ); + assert_eq!( + "Hello, ...(truncated)", + // A string that would be truncated within a multi-byte character + crate::debug::adapter::Variable::truncate_string("Hello, 世界".to_owned(), 8) + ); + assert_eq!( + "Hello, ...(truncated)", + // A string that should be truncated just before a multi-byte character + crate::debug::adapter::Variable::truncate_string("Hello, 世界".to_owned(), 9) + ); + assert_eq!( + "Hello, 世...(truncated)", + crate::debug::adapter::Variable::truncate_string("Hello, 世界".to_owned(), 10) + ); + } } diff --git a/starlark-rust/starlark/src/debug/evaluate.rs b/starlark-rust/starlark/src/debug/evaluate.rs index edc05796f7b51..c78cbe1e30a25 100644 --- a/starlark-rust/starlark/src/debug/evaluate.rs +++ b/starlark-rust/starlark/src/debug/evaluate.rs @@ -23,14 +23,14 @@ use crate::syntax::AstModule; use crate::values::FrozenStringValue; use crate::values::Value; -impl<'v, 'a> Evaluator<'v, 'a> { +impl<'v> Evaluator<'v, '_, '_> { /// Evaluate statements in the existing context. This function is designed for debugging, /// not production use. /// /// There are lots of health warnings on this code. Might not work with frozen modules, unassigned variables, /// nested definitions etc. It would be a bad idea to rely on the results of continued execution /// after evaluating stuff randomly. - pub fn eval_statements(&mut self, statements: AstModule) -> anyhow::Result> { + pub fn eval_statements(&mut self, statements: AstModule) -> crate::Result> { // We are doing a lot of funky stuff here. It's amazing anything works, so let's not push our luck with GC. self.disable_gc(); @@ -52,7 +52,7 @@ impl<'v, 'a> Evaluator<'v, 'a> { .collect(); // Push all the frozen variables into the module - if let Some(frozen) = &self.module_variables { + if let Some(frozen) = self.top_frame_def_frozen_module(true)? { for (name, slot) in frozen.names.symbols() { if let Some(value) = frozen.get_slot(slot) { self.module_env.set(&name, value.to_value()) @@ -78,10 +78,8 @@ impl<'v, 'a> Evaluator<'v, 'a> { } } - let orig_module_variables = self.module_variables.take(); let globals = self.top_frame_def_info_for_debugger()?.globals; let res = self.eval_module(statements, &globals); - self.module_variables = orig_module_variables; // Now put the Module back how it was before we started, as best we can // and move things into locals if that makes sense @@ -109,6 +107,7 @@ impl<'v, 'a> Evaluator<'v, 'a> { mod tests { use itertools::Itertools; use starlark_derive::starlark_module; + use starlark_syntax::error::StarlarkResultExt; use super::*; use crate as starlark; @@ -121,10 +120,11 @@ mod tests { fn debugger(builder: &mut GlobalsBuilder) { fn debug_evaluate<'v>( code: String, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { - let ast = AstModule::parse("interactive", code, &Dialect::Extended)?; - eval.eval_statements(ast) + let ast = AstModule::parse("interactive", code, &Dialect::AllOptionsInternal) + .into_anyhow_result()?; + eval.eval_statements(ast).into_anyhow_result() } } diff --git a/starlark-rust/starlark/src/debug/inspect.rs b/starlark-rust/starlark/src/debug/inspect.rs index c4ddc9f0e5620..e3719c1ca4fa5 100644 --- a/starlark-rust/starlark/src/debug/inspect.rs +++ b/starlark-rust/starlark/src/debug/inspect.rs @@ -33,7 +33,7 @@ pub(crate) fn to_scope_names_by_local_slot_id<'v>(x: Value<'v>) -> Option<&'v [F } } -impl<'v, 'a> Evaluator<'v, 'a> { +impl<'v> Evaluator<'v, '_, '_> { /// Obtain the local variables currently in scope. When at top-level these will be /// [`Module`](crate::environment::Module) variables, otherwise local definitions. The precise number of variables /// may change over time due to optimisation. The only legitimate use of this function is for debugging. @@ -42,7 +42,9 @@ impl<'v, 'a> Evaluator<'v, 'a> { } } -fn inspect_local_variables<'v>(eval: &Evaluator<'v, '_>) -> Option>> { +fn inspect_local_variables<'v>( + eval: &Evaluator<'v, '_, '_>, +) -> Option>> { // First we find the first entry on the call_stack which contains a Def (and thus has locals) let xs = eval.call_stack.to_function_values(); let names = xs @@ -62,7 +64,7 @@ fn inspect_local_variables<'v>(eval: &Evaluator<'v, '_>) -> Option(eval: &Evaluator<'v, '_>) -> SmallMap> { +fn inspect_module_variables<'v>(eval: &Evaluator<'v, '_, '_>) -> SmallMap> { let mut res = SmallMap::new(); for (name, slot) in eval.module_env.mutable_names().all_names_and_slots() { if let Some(v) = eval.module_env.slots().get_slot(slot) { @@ -91,7 +93,9 @@ mod tests { Ok(eval.call_stack().into_frames().map(ToString::to_string)) } - fn debug_inspect_variables<'v>(eval: &mut Evaluator<'v, '_>) -> anyhow::Result> { + fn debug_inspect_variables<'v>( + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { let mut sm = SmallMap::new(); for (k, v) in eval.local_variables() { sm.insert_hashed(eval.heap().alloc_str(&k).get_hashed(), v); diff --git a/starlark-rust/starlark/src/docs.rs b/starlark-rust/starlark/src/docs.rs new file mode 100644 index 0000000000000..40449874ccfb0 --- /dev/null +++ b/starlark-rust/starlark/src/docs.rs @@ -0,0 +1,308 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Types supporting documentation for code written in or for Starlark. + +// TODO(nga): document it +#![allow(missing_docs)] + +pub mod code; +pub mod markdown; +pub mod multipage; +mod parse; +#[cfg(test)] +mod tests; + +use std::iter; + +use allocative::Allocative; +pub use parse::DocStringKind; +use starlark_map::small_map::SmallMap; + +use crate as starlark; +use crate::eval::runtime::params::display::iter_fmt_param_spec; +pub use crate::eval::runtime::params::display::FmtParam; +use crate::typing::Ty; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::StarlarkValue; +use crate::values::Trace; +use crate::values::Value; + +/// The documentation provided by a user for a specific module, object, function, etc. +#[derive(Debug, Clone, PartialEq, Trace, Default, Allocative)] +pub struct DocString { + /// The first line of a doc string. This has whitespace trimmed from it. + pub summary: String, + /// The contents of a doc string that follow the summary, and a single blank line. + /// This also has whitespace trimmed from it, and it is dedented. + pub details: Option, +} + +/// The documentation for a module/namespace. +/// +/// See the docs on [`DocType`] for the distinction between that type and this one. +#[derive(Debug, Clone, PartialEq, Default, Allocative)] +pub struct DocModule { + pub docs: Option, + pub members: SmallMap, +} + +/// Documents a single function. +#[derive(Debug, Clone, PartialEq, Default, Allocative)] +pub struct DocFunction { + /// Documentation for the function. If parsed, this should generally be the first statement + /// of a function's body if that statement is a string literal. Any sections like "Args:", + /// "Returns", etc are kept intact. It is up to the consumer to remove these sections if + /// they are present. + pub docs: Option, + /// The parameters that this function takes. Docs for these parameters should generally be + /// extracted from the main docstring's details. + pub params: DocParams, + /// Details about what this function returns. + pub ret: DocReturn, +} + +impl DocFunction { + /// Used by LSP. Return starred name and the doc. + pub fn find_param_with_name(&self, param_name: &str) -> Option<(String, &DocParam)> { + self.params + .doc_params_with_starred_names() + .find(|(_, p)| p.name == param_name) + } +} + +/// Function parameters. +#[derive(Debug, Clone, PartialEq, Default, Allocative)] +pub struct DocParams { + pub pos_only: Vec, + pub pos_or_named: Vec, + pub args: Option, + pub named_only: Vec, + pub kwargs: Option, +} + +impl DocParams { + /// Iterate parameters ignoring information about positional-only, named-only. + pub(crate) fn doc_params(&self) -> impl Iterator { + iter::empty() + .chain(&self.pos_only) + .chain(&self.pos_or_named) + .chain(&self.args) + .chain(&self.named_only) + .chain(&self.kwargs) + } + + pub(crate) fn doc_params_with_starred_names( + &self, + ) -> impl Iterator { + iter::empty() + .chain(self.pos_only.iter().map(|p| (p.name.clone(), p))) + .chain(self.pos_or_named.iter().map(|p| (p.name.clone(), p))) + .chain(self.args.iter().map(|p| (format!("*{}", p.name), p))) + .chain(self.named_only.iter().map(|p| (p.name.clone(), p))) + .chain(self.kwargs.iter().map(|p| (format!("**{}", p.name), p))) + } + + pub(crate) fn doc_params_mut(&mut self) -> impl Iterator { + iter::empty() + .chain(&mut self.pos_only) + .chain(&mut self.pos_or_named) + .chain(&mut self.args) + .chain(&mut self.named_only) + .chain(&mut self.kwargs) + } + + /// Non-star parameters. + pub fn regular_params(&self) -> impl Iterator { + iter::empty() + .chain(&self.pos_only) + .chain(&self.pos_or_named) + .chain(&self.named_only) + } + + /// Iterate params with `/` and `*` markers to output function signature. + pub fn fmt_params(&self) -> impl Iterator> { + iter_fmt_param_spec( + &self.pos_only, + &self.pos_or_named, + self.args.as_ref(), + &self.named_only, + self.kwargs.as_ref(), + ) + } +} + +/// A single parameter of a function. +#[derive(Debug, Clone, PartialEq, Allocative)] +pub struct DocParam { + /// Does not include `*` or `**`. + pub name: String, + pub docs: Option, + /// Element type for `*args` and value type for `**kwargs`. + pub typ: Ty, + pub default_value: Option, +} + +/// Details about the return value of a function. +#[derive(Debug, Clone, PartialEq, Allocative)] +pub struct DocReturn { + /// Extra semantic details around the returned value's meaning. + pub docs: Option, + pub typ: Ty, +} + +impl Default for DocReturn { + fn default() -> Self { + DocReturn { + docs: None, + typ: Ty::any(), + } + } +} + +/// A single property of an object. These are explicitly not functions (see [`DocMember`]). +#[derive(Debug, Clone, PartialEq, Allocative)] +pub struct DocProperty { + pub docs: Option, + pub typ: Ty, +} + +/// A named member of an object. +#[derive(Debug, Clone, PartialEq, Allocative)] +pub enum DocMember { + Property(DocProperty), + Function(DocFunction), +} + +impl DocMember { + pub(crate) fn from_value(value: Value) -> Self { + // If we have a value which is a complex type, the right type to put in the docs is not the type + // it represents, but it's just a property we should point at + match value.documentation() { + DocItem::Member(x) => x, + _ => DocMember::Property(DocProperty { + docs: None, + typ: value.get_type_starlark_repr(), + }), + } + } +} + +/// The documentation for a type +/// +/// This is distinct from a module since, well, types and modules are different things, but more +/// importantly because the members here are expected to be attributes on *values* of the type, not +/// on the type itself. In other words, if I have a global `FooRecord`, and its documentation says +/// that it's a type with member `x`, then the expectation is not that `FooRecord.x` works, but +/// rather that `foo.x` works, where `foo` is of type `FooRecord`. On the other hand, if there's a +/// global `m`, and `m`'s documentation says it's a module with member `x`, then `m.x` should work. +#[derive(Debug, Clone, PartialEq, Allocative)] +pub struct DocType { + pub docs: Option, + /// Name and details of each attr/function that can be accessed on this type. + pub members: SmallMap, + pub ty: Ty, + pub constructor: Option, +} + +impl DocType { + pub fn from_starlark_value>() -> DocType { + let ty = T::starlark_type_repr(); + match T::get_methods() { + Some(methods) => methods.documentation(ty), + None => DocType { + docs: None, + members: SmallMap::new(), + ty, + constructor: None, + }, + } + } +} + +#[derive(Debug, Clone, PartialEq, Allocative)] +pub enum DocItem { + Module(DocModule), + Type(DocType), + Member(DocMember), +} + +impl DocItem { + /// Get the underlying [`DocString`] for this item, if it exists. + pub fn get_doc_string(&self) -> Option<&DocString> { + match self { + DocItem::Module(m) => m.docs.as_ref(), + DocItem::Type(o) => o.docs.as_ref(), + DocItem::Member(DocMember::Function(f)) => f.docs.as_ref(), + DocItem::Member(DocMember::Property(p)) => p.docs.as_ref(), + } + } + + /// Get the summary of the underlying [`DocString`] for this item, if it exists. + pub fn get_doc_summary(&self) -> Option<&str> { + self.get_doc_string().map(|ds| ds.summary.as_str()) + } + + /// Converts to a doc member, if possible. + /// + /// This conversion is trivial, except in the case of objects - those are flattened into a + /// single property that just indicates their type + pub fn try_as_member_with_collapsed_object(&self) -> Result { + match self { + DocItem::Module(m) => Err(m), + DocItem::Member(m) => Ok(m.clone()), + DocItem::Type(o) => Ok(DocMember::Property(DocProperty { + docs: o.docs.clone(), + typ: o.ty.clone(), + })), + } + } + + pub fn try_as_member(&self) -> Option { + match self { + DocItem::Member(m) => Some(m.clone()), + _ => None, + } + } +} + +impl DocMember { + /// Get the underlying [`DocString`] for this item, if it exists. + pub fn get_doc_string(&self) -> Option<&DocString> { + match self { + DocMember::Function(f) => f.docs.as_ref(), + DocMember::Property(p) => p.docs.as_ref(), + } + } + + /// Get the summary of the underlying [`DocString`] for this item, if it exists. + pub fn get_doc_summary(&self) -> Option<&str> { + self.get_doc_string().map(|ds| ds.summary.as_str()) + } +} + +impl DocParam { + /// Get the underlying [`DocString`] for this item, if it exists. + pub fn get_doc_string(&self) -> Option<&DocString> { + self.docs.as_ref() + } + + /// Get the summary of the underlying [`DocString`] for this item, if it exists. + pub fn get_doc_summary(&self) -> Option<&str> { + self.get_doc_string().map(|ds| ds.summary.as_str()) + } +} diff --git a/starlark-rust/starlark/src/docs/code.rs b/starlark-rust/starlark/src/docs/code.rs new file mode 100644 index 0000000000000..05e7704cdcb53 --- /dev/null +++ b/starlark-rust/starlark/src/docs/code.rs @@ -0,0 +1,288 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt::Display; + +use itertools::Itertools; + +use crate::docs::DocFunction; +use crate::docs::DocItem; +use crate::docs::DocMember; +use crate::docs::DocModule; +use crate::docs::DocParam; +use crate::docs::DocParams; +use crate::docs::DocProperty; +use crate::docs::DocReturn; +use crate::docs::DocString; +use crate::docs::DocType; +use crate::eval::runtime::params::display::fmt_param_spec_maybe_multiline; +use crate::eval::runtime::params::display::ParamFmt; +use crate::typing::ty::TypeRenderConfig; +use crate::typing::Ty; + +/// There have been bugs around line endings in the textwrap crate. Just join +/// into a single string, and trim the line endings. +fn wrap_trimmed(s: &str, width: usize) -> String { + textwrap::wrap(s, width).join("\n").trim_end().to_owned() +} + +/// There have been bugs around line endings in the textwrap crate. Just trim the line endings. +fn indent_trimmed(s: &str, prefix: &str) -> String { + textwrap::indent(s, prefix).trim_end().to_owned() +} + +impl DocString { + /// Render this docstring as a "starlark" docstring. + fn render_as_code(&self) -> String { + let s = match &self.details { + Some(details) => { + format!("{}\n\n{}", self.summary, details) + } + None => self.summary.clone(), + }; + wrap_trimmed(&s, 80) + } + + /// Render the docstring as in `render_as_code`, but surround it in triple quotes, + /// a common convention in starlark docstrings. + fn render_as_quoted_code(&self) -> String { + format!("\"\"\"\n{}\n\"\"\"", self.render_as_code()) + } +} + +impl DocModule { + pub fn render_as_code(&self) -> String { + let mut res = self + .docs + .as_ref() + .map(DocString::render_as_quoted_code) + .unwrap_or_default(); + for (k, v) in &self.members { + if let Ok(v) = v.try_as_member_with_collapsed_object() { + res.push('\n'); + match v { + DocMember::Property(p) => res.push_str(&p.render_as_code(k)), + DocMember::Function(f) => res.push_str(&f.render_as_code(k)), + } + res.push('\n'); + } + } + res + } +} + +impl DocFunction { + fn starlark_docstring(&self) -> Option { + let mut docs = String::new(); + if let Some(main_docs) = self.docs.as_ref().map(DocString::render_as_code) { + docs.push_str(&main_docs); + } + + let args_indentation_count = self + .params + .doc_params() + .map(|p| p.name.len() + 2) + .max() + .unwrap_or_default(); + let args_indentation = " ".repeat(args_indentation_count); + + let args_docs = self + .params + .doc_params() + .filter_map(|p| p.starlark_docstring(&args_indentation)) + .join("\n"); + if !args_docs.is_empty() { + let indented = indent_trimmed(&args_docs, " "); + docs.push_str(&format!("\n\nArgs:\n{}", indented)); + } + + if let Some(ret_docs) = self.ret.starlark_docstring() { + let indented = indent_trimmed(&ret_docs, " "); + docs.push_str(&format!("\n\nRet:\n{}", indented)); + } + if docs.is_empty() { + None + } else { + Some(indent_trimmed( + &format!("\"\"\"\n{}\n\"\"\"", docs.trim_start()), + " ", + )) + } + } + + pub fn render_as_code(&self, name: &str) -> String { + let params_one_line = self.params.render_code(None, &TypeRenderConfig::Default); + + let params = if params_one_line.len() > 60 { + format!( + "(\n{})", + self.params + .render_code(Some(" "), &TypeRenderConfig::Default), + ) + } else { + format!("({})", params_one_line) + }; + let docstring = self + .starlark_docstring() + .map(|mut ds| { + ds.push('\n'); + ds + }) + .unwrap_or_default(); + let ret = Some(&self.ret.typ) + .filter(|t| t != &&Ty::any()) + .map(|t| format!(" -> {}", t)) + .unwrap_or_default(); + + format!("def {}{}{}:\n{} pass", name, params, ret, docstring) + } +} + +impl DocParam { + fn starlark_docstring(&self, max_indentation: &str) -> Option { + let DocParam { name, docs, .. } = self; + let rendered_docs = docs.as_ref()?.render_as_code(); + let mut indented = indent_trimmed(&rendered_docs, max_indentation); + indented.replace_range(..name.len() + 2, &format!("{}: ", name)); + Some(indented) + } + + fn fmt_param<'a>( + &'a self, + render_config: &'a TypeRenderConfig, + ) -> ParamFmt<'a, impl Display + 'a, impl Display + 'a> { + let DocParam { + name, + docs: _, + typ, + default_value, + } = self; + let ty = if typ.is_any() { + None + } else { + Some(typ.display_with(render_config)) + }; + ParamFmt { + name, + ty, + default: default_value.as_ref(), + } + } +} + +impl DocParams { + /// Render multiline if `indent` is `Some`. + pub(crate) fn render_code( + &self, + indent: Option<&str>, + render_config: &TypeRenderConfig, + ) -> String { + let mut s = String::new(); + fmt_param_spec_maybe_multiline( + &mut s, + indent, + self.pos_only.iter().map(|p| p.fmt_param(render_config)), + self.pos_or_named.iter().map(|p| p.fmt_param(render_config)), + self.args.as_ref().map(|p| p.fmt_param(render_config)), + self.named_only.iter().map(|p| p.fmt_param(render_config)), + self.kwargs.as_ref().map(|p| p.fmt_param(render_config)), + ) + .unwrap(); + s + } +} + +impl DocReturn { + fn starlark_docstring(&self) -> Option { + self.docs.as_ref().map(DocString::render_as_code) + } +} + +impl DocProperty { + pub fn render_as_code(&self, name: &str) -> String { + match ( + &self.typ, + self.docs.as_ref().map(DocString::render_as_quoted_code), + ) { + // TODO(nmj): The starlark syntax needs to be updated to support type + // annotations on values as python does. Afterward, use these + // format strings. + // (Some(t), Some(ds)) => { + // format!("{}\n_{}: {} = None", ds, name, t.raw_type) + // } + // (Some(t), None) => format!(r#"_{}: {} = None"#, name, t.raw_type), + (t, Some(ds)) if t.is_any() => format!("{}\n_{} = None", ds, name), + (t, None) if t.is_any() => format!("_{} = None", name), + (t, Some(ds)) => { + format!("{}\n# type: {}\n_{} = None", ds, t, name) + } + (t, None) => format!("# type: {}\n_{} = None", t, name), + } + } +} + +impl DocType { + fn render_as_code(&self, name: &str) -> String { + let summary = self + .docs + .as_ref() + .map(|ds| { + let mut s = ds.render_as_quoted_code(); + s.push('\n'); + s + }) + .unwrap_or_default(); + + let member_docs = self + .members + .iter() + .map(|(name, member)| match member { + DocMember::Property(p) => p.render_as_code(name), + DocMember::Function(f) => f.render_as_code(&format!("_{}", name)), + }) + .join("\n\n"); + + let exported_struct_members = self + .members + .iter() + .map(|(name, _)| format!(" {} = _{},", name, name)) + .join("\n"); + let exported_struct = if !exported_struct_members.is_empty() { + format!( + "{}{} = struct(\n{}\n)", + summary, name, exported_struct_members + ) + } else { + String::new() + }; + + format!("{}\n\n{}", member_docs, exported_struct) + .trim() + .to_owned() + } +} + +impl DocItem { + pub fn render_as_code(&self, name: &str) -> String { + match self { + DocItem::Module(m) => m.render_as_code(), + DocItem::Type(o) => o.render_as_code(&name), + DocItem::Member(DocMember::Function(f)) => f.render_as_code(&name), + DocItem::Member(DocMember::Property(p)) => p.render_as_code(&name), + } + } +} diff --git a/starlark-rust/starlark/src/docs/markdown.rs b/starlark-rust/starlark/src/docs/markdown.rs index 31f60b150ce4e..ec321a3718469 100644 --- a/starlark-rust/starlark/src/docs/markdown.rs +++ b/starlark-rust/starlark/src/docs/markdown.rs @@ -15,60 +15,22 @@ * limitations under the License. */ -use std::slice; +use std::fmt::Write; +use std::iter; -use dupe::Dupe; use itertools::Itertools; -use starlark_map::small_map::SmallMap; -use crate::docs::Doc; use crate::docs::DocFunction; use crate::docs::DocItem; use crate::docs::DocMember; use crate::docs::DocModule; -use crate::docs::DocObject; use crate::docs::DocParam; use crate::docs::DocProperty; use crate::docs::DocString; +use crate::docs::DocType; +use crate::typing::ty::TypeRenderConfig; use crate::typing::Ty; -/// The style of output that is being generated -#[derive(Copy, Clone, Dupe)] -pub enum MarkdownFlavor { - /// A file that is written out to disk for a website or in repo. - /// - /// These pages are generally slightly more detailed (e.g. module summary tables at the top - /// of the page) and have different formatting due differing use cases. - DocFile, - /// A summary that can be shown in the "Hover" event in the LSP. - LspSummary, -} - -/// This object can potentially generate markdown documentation about itself. -pub trait RenderMarkdown { - /// Generate markdown of the given flavor if possible. For some types, there may not be - /// any useful documentation available. - fn render_markdown_opt(&self, flavor: MarkdownFlavor) -> Option; - - /// Convenience method that invokes `RenderMarkdown::render_markdown_opt`, and returns an - /// empty string if that is `None` - fn render_markdown(&self, flavor: MarkdownFlavor) -> String { - self.render_markdown_opt(flavor).unwrap_or_default() - } -} - -impl RenderMarkdown for String { - fn render_markdown_opt(&self, _flavor: MarkdownFlavor) -> Option { - Some(self.clone()) - } -} - -impl RenderMarkdown for str { - fn render_markdown_opt(&self, _flavor: MarkdownFlavor) -> Option { - Some(self.to_owned()) - } -} - /// What to render from a [`DocString`]. enum DSOpts { /// Just the summary. @@ -96,11 +58,11 @@ fn escape_name(name: &str) -> String { name.replace('_', "\\_") } -fn render_property(name: &str, property: &DocProperty) -> String { - let prototype = render_code_block(&format!( - "{name}: {}", - TypeRenderer::Type(&property.typ).render_markdown(MarkdownFlavor::DocFile) - )); +fn render_property(name: &str, property: &DocProperty, render_config: &TypeRenderConfig) -> String { + let prototype = render_code_block( + &format!("{name}: {}", &property.typ.display_with(render_config)), + render_config, + ); let header = format!("## {}\n\n{prototype}", escape_name(name)); let summary = render_doc_string(DSOpts::Summary, &property.docs); let details = render_doc_string(DSOpts::Details, &property.docs); @@ -119,51 +81,55 @@ fn render_property(name: &str, property: &DocProperty) -> String { } /// If there are any parameter docs to render, render them as a list. -fn render_function_parameters(params: &[DocParam]) -> Option { - // Filter out parameters without docs - let has_docs: Vec<_> = params - .iter() - .filter(|p| match p { - DocParam::Arg { docs, .. } => docs.is_some(), - DocParam::NoArgs | DocParam::OnlyPosBefore => false, - DocParam::Args { docs, .. } => docs.is_some(), - DocParam::Kwargs { docs, .. } => docs.is_some(), - }) - .collect(); - - if has_docs.is_empty() { - return None; +fn render_function_parameters<'a>( + params: impl IntoIterator, +) -> Option { + let mut param_list: Option = None; + for (name, p) in params { + let DocParam { docs, .. } = p; + + if docs.is_none() { + continue; + } + + let param_list = param_list.get_or_insert_with(String::new); + + let docs = render_doc_string(DSOpts::Combined, docs).unwrap_or_default(); + + let mut lines_iter = docs.lines(); + if let Some(first_line) = lines_iter.next() { + let _ = writeln!(param_list, "* `{name}`: {first_line}"); + for line in lines_iter { + let _ = writeln!(param_list, " {line}"); + } + } else { + let _ = writeln!(param_list, "* `{name}`"); + } } - let param_list: String = has_docs - .iter() - .filter_map(|p| match p { - DocParam::Arg { name, docs, .. } => Some((name, docs)), - DocParam::NoArgs | DocParam::OnlyPosBefore => None, - DocParam::Args { name, docs, .. } => Some((name, docs)), - DocParam::Kwargs { name, docs, .. } => Some((name, docs)), - }) - .map(|(name, docs)| { - let docs = render_doc_string(DSOpts::Combined, docs).unwrap_or_default(); - format!("* `{name}`: {docs}\n") - }) - .collect(); - Some(param_list) + param_list } -fn render_function(name: &str, function: &DocFunction) -> String { +fn render_function( + name: &str, + function: &DocFunction, + include_header: bool, + render_config: &TypeRenderConfig, +) -> String { let prototype = render_code_block( - &(TypeRenderer::Function { - function_name: name, - f: function, - } - .render_markdown(MarkdownFlavor::DocFile)), + &render_function_prototype(name, function, render_config), + render_config, ); - let header = format!("## {}\n\n{prototype}", escape_name(name)); + let header = if include_header { + format!("## {}\n\n{prototype}", escape_name(name)) + } else { + prototype + }; let summary = render_doc_string(DSOpts::Summary, &function.docs); let details = render_doc_string(DSOpts::Details, &function.docs); - let parameter_docs = render_function_parameters(&function.params); + let parameter_docs = + render_function_parameters(function.params.doc_params_with_starred_names()); let return_docs = render_doc_string(DSOpts::Combined, &function.ret.docs); let mut body = header; @@ -179,12 +145,8 @@ fn render_function(name: &str, function: &DocFunction) -> String { body.push_str("\n\n#### Returns\n\n"); body.push_str(returns); } - if let Some(dot_type) = function.as_type.as_ref().and_then(|t| t.as_name()) { - body.push_str("\n\n#### `.type` attribute\n\n"); - body.push_str(&format!("Produces `{dot_type:?}`")); - } if let Some(details) = &details { - if parameter_docs.is_some() || return_docs.is_some() || function.as_type.is_some() { + if parameter_docs.is_some() || return_docs.is_some() { body.push_str("\n\n#### Details\n\n"); } else { // No need to aggressively separate the defaults from the summary if there @@ -197,86 +159,92 @@ fn render_function(name: &str, function: &DocFunction) -> String { body } -fn render_members( +pub(super) fn render_members<'a>( name: &str, - object: bool, docs: &Option, - members: &SmallMap, + prefix: &str, + members: impl IntoIterator, + after_summary: Option, + render_config: &TypeRenderConfig, ) -> String { - // If this is a native, top level object, render it with a larger - // header. Sub objects will be listed along side members, so use - // smaller headers there. - let title = if object { - format!("# `{name}` type") - } else { - format!("# {name}") - }; let summary = render_doc_string(DSOpts::Combined, docs) .map(|s| format!("\n\n{}", s)) .unwrap_or_default(); - let prefix = if object { - format!("{name}.") - } else { - String::new() - }; - - let member_details: Vec = members - .iter() + let member_details = members + .into_iter() .sorted_by(|(l_m, _), (r_m, _)| l_m.cmp(r_m)) - .map(|(child, member)| render_member(&format!("{prefix}{child}"), member)) - .collect(); + .map(|(child, member)| { + render_doc_member(&format!("{prefix}{child}"), &member, render_config) + }); + let member_details: Vec<_> = after_summary.into_iter().chain(member_details).collect(); let members_details = member_details.join("\n\n---\n\n"); - format!("{title}{summary}\n\n{members_details}") + format!("# {name}{summary}\n\n{members_details}") } -/// Render a top level module. -fn render_module(name: &str, module: &DocModule) -> String { - render_members(name, false, &module.docs, &module.members) +pub(super) fn render_doc_type( + name: &str, + prefix: &str, + t: &DocType, + render_config: &TypeRenderConfig, +) -> String { + let constructor = t + .constructor + .as_ref() + .map(|c| render_function(name, c, false, render_config)); + render_members( + &name, + &t.docs, + &prefix, + t.members.iter().map(|(n, m)| (&**n, m.clone())), + constructor, + render_config, + ) } -fn render_object(name: &str, object: &DocObject) -> String { - render_members(name, true, &object.docs, &object.members) +/// Used by LSP. +/// It will not render the type signatures with link to types +pub fn render_doc_item_no_link(name: &str, item: &DocItem) -> String { + render_doc_item(name, item, &TypeRenderConfig::Default) } -/// Used by LSP. -pub fn render_doc_item(name: &str, item: &DocItem) -> String { +pub fn render_doc_item(name: &str, item: &DocItem, render_config: &TypeRenderConfig) -> String { match item { - DocItem::Module(m) => render_module(name, m), - DocItem::Object(o) => render_object(name, o), - DocItem::Function(f) => render_function(name, f), - DocItem::Property(p) => render_property(name, p), + DocItem::Module(m) => render_members( + name, + &m.docs, + "", + m.members.iter().filter_map(|(n, m)| { + m.try_as_member_with_collapsed_object() + .ok() + .map(|m| (&**n, m)) + }), + None, + render_config, + ), + DocItem::Type(o) => render_doc_type( + &format!("`{name}` type"), + &format!("{name}."), + o, + render_config, + ), + DocItem::Member(DocMember::Function(f)) => render_function(name, f, true, render_config), + DocItem::Member(DocMember::Property(p)) => render_property(name, p, render_config), } } /// Used by LSP. -pub fn render_doc_member(name: &str, item: &DocMember) -> String { +pub fn render_doc_member(name: &str, item: &DocMember, render_config: &TypeRenderConfig) -> String { match item { - DocMember::Function(f) => render_function(name, f), - DocMember::Property(p) => render_property(name, p), + DocMember::Function(f) => render_function(name, f, true, render_config), + DocMember::Property(p) => render_property(name, p, render_config), } } /// Used by LSP. -pub fn render_doc_param(item: &DocParam) -> String { - render_function_parameters(slice::from_ref(item)).unwrap_or_default() -} - -impl RenderMarkdown for Doc { - fn render_markdown_opt(&self, flavor: MarkdownFlavor) -> Option { - match flavor { - MarkdownFlavor::DocFile => Some(render_doc_item(&self.id.name, &self.item)), - MarkdownFlavor::LspSummary => None, - } - } -} - -fn render_member(name: &str, member: &DocMember) -> String { - match member { - DocMember::Property(p) => render_property(name, p), - DocMember::Function(f) => render_function(name, f), - } +pub fn render_doc_param(starred_name: String, item: &DocParam) -> String { + render_function_parameters(iter::once((starred_name, item))).unwrap_or_default() } /// Any functions with more parameters than this will have @@ -287,83 +255,76 @@ const MAX_ARGS_BEFORE_MULTILINE: usize = 3; /// If the prototype ends up longer than this length, we'll split it anyway const MAX_LENGTH_BEFORE_MULTILINE: usize = 80; -/// Render a "type". This is either a [`Type`] object, or details about a function to -/// produce a function prototype. -enum TypeRenderer<'a> { - /// A general "type". - Type(&'a Ty), - /// A function, with some extra formatting options. - Function { - /// The function name in the prototype as well. - function_name: &'a str, - f: &'a DocFunction, - }, +fn raw_type_prefix(prefix: &str, t: &Ty, render_config: &TypeRenderConfig) -> String { + if t.is_any() { + String::new() + } else { + format!("{prefix}{}", t.display_with(render_config)) + } } -impl<'a> RenderMarkdown for TypeRenderer<'a> { - fn render_markdown_opt(&self, flavor: MarkdownFlavor) -> Option { - fn raw_type(t: &Ty) -> String { - t.to_string() - } +fn render_function_prototype( + function_name: &str, + f: &DocFunction, + render_config: &TypeRenderConfig, +) -> String { + let ret_type = raw_type_prefix(" -> ", &f.ret.typ, render_config); + let prefix = format!("def {}", function_name); + let one_line_params = f.params.render_code(None, render_config); + let single_line_result = format!("{}({}){}", prefix, one_line_params, ret_type); + + if f.params.doc_params().count() > MAX_ARGS_BEFORE_MULTILINE + || single_line_result.len() > MAX_LENGTH_BEFORE_MULTILINE + { + let chunked_params = f.params.render_code(Some(" "), render_config); + format!("{}(\n{}){}", prefix, chunked_params, ret_type) + } else { + single_line_result + } +} - fn raw_type_prefix(prefix: &str, t: &Ty) -> String { - if t.is_any() { - String::new() - } else { - format!("{prefix}{}", raw_type(t)) - } +// For LikedType render in markdown, for code block ``` ``` we cannot contain the link in it +// We need to use the html block here, +// example: +//
    +//  
    +//    def soome_function() -> Artifact
    +//  
    +//
    +fn render_code_block(contents: &str, render_config: &TypeRenderConfig) -> String { + match render_config { + TypeRenderConfig::Default => format!("```python\n{contents}\n```"), + TypeRenderConfig::LinkedType { ty_to_path_map: _ } => { + format!(r#"
    {contents}
    "#) } + } +} - match flavor { - MarkdownFlavor::DocFile => match self { - TypeRenderer::Type(t) => Some(raw_type(t)), - TypeRenderer::Function { function_name, f } => { - let mut params = f.params.iter().map(|p| match p { - DocParam::Arg { - typ, - name, - default_value, - .. - } => { - let type_string = raw_type_prefix(": ", typ); - match default_value { - Some(v) => format!("{}{} = {}", name, type_string, v), - None => format!("{}{}", name, type_string), - } - } - DocParam::NoArgs => "*".to_owned(), - DocParam::OnlyPosBefore => "/".to_owned(), - DocParam::Args { typ, name, .. } => { - format!("{}{}", name, raw_type_prefix(": ", typ)) - } - DocParam::Kwargs { typ, name, .. } => { - format!("{}{}", name, raw_type_prefix(": ", typ)) - } - }); - - let ret_type = raw_type_prefix(" -> ", &f.ret.typ); - let prefix = format!("def {}", function_name); - let single_line_result = - format!("{}({}){}", prefix, params.clone().join(", "), ret_type); - - if f.params.len() > MAX_ARGS_BEFORE_MULTILINE - || single_line_result.len() > MAX_LENGTH_BEFORE_MULTILINE - { - let chunked_params = params.join(",\n "); - Some(format!( - "{}(\n {}\n){}", - prefix, chunked_params, ret_type - )) - } else { - Some(single_line_result) - } - } - }, - MarkdownFlavor::LspSummary => None, - } +impl DocModule { + pub(super) fn render_markdown_page_for_multipage_render( + &self, + name: &str, + render_config: &TypeRenderConfig, + ) -> String { + render_members( + name, + &self.docs, + "", + self.members + .iter() + .filter_map(|(n, m)| m.try_as_member().map(|m| (&**n, m))), + None, + render_config, + ) } } -fn render_code_block(contents: &str) -> String { - format!("```python\n{contents}\n```") +impl DocType { + pub(super) fn render_markdown_page_for_multipage_render( + &self, + name: &str, + render_config: &TypeRenderConfig, + ) -> String { + render_doc_type(&name, &format!("{name}."), self, render_config) + } } diff --git a/starlark-rust/starlark/src/docs/mod.rs b/starlark-rust/starlark/src/docs/mod.rs deleted file mode 100644 index 2c52c580d4788..0000000000000 --- a/starlark-rust/starlark/src/docs/mod.rs +++ /dev/null @@ -1,1361 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Types supporting documentation for code written in or for Starlark. - -// TODO(nga): document it -#![allow(missing_docs)] - -pub mod markdown; - -use std::collections::HashMap; - -use allocative::Allocative; -use dupe::Dupe; -use itertools::Itertools; -pub use markdown::MarkdownFlavor; -pub use markdown::RenderMarkdown; -use once_cell::sync::Lazy; -use regex::Regex; -use regex::RegexBuilder; -use serde::Serialize; -pub use starlark_derive::StarlarkDocs; -use starlark_map::small_map::SmallMap; -use starlark_syntax::syntax::ast::AstLiteral; -use starlark_syntax::syntax::ast::AstPayload; -use starlark_syntax::syntax::ast::AstStmtP; -use starlark_syntax::syntax::ast::ExprP; -use starlark_syntax::syntax::ast::StmtP; - -use crate as starlark; -use crate::codemap::Spanned; -use crate::typing::Ty; -use crate::values::StarlarkValue; -use crate::values::Trace; -use crate::values::Value; - -/// There have been bugs around line endings in the textwrap crate. Just join -/// into a single string, and trim the line endings. -fn wrap_trimmed(s: &str, width: usize) -> String { - textwrap::wrap(s, width).join("\n").trim_end().to_owned() -} - -/// There have been bugs around line endings in the textwrap crate. Just trim the line endings. -fn indent_trimmed(s: &str, prefix: &str) -> String { - textwrap::indent(s, prefix).trim_end().to_owned() -} - -/// The documentation provided by a user for a specific module, object, function, etc. -#[derive(Debug, Clone, PartialEq, Serialize, Trace, Default, Allocative)] -pub struct DocString { - /// The first line of a doc string. This has whitespace trimmed from it. - pub summary: String, - /// The contents of a doc string that follow the summary, and a single blank line. - /// This also has whitespace trimmed from it, and it is dedented. - pub details: Option, -} - -impl DocString { - /// Render this docstring as a "starlark" docstring. - fn render_as_code(&self) -> String { - let s = match &self.details { - Some(details) => { - format!("{}\n\n{}", self.summary, details) - } - None => self.summary.clone(), - }; - wrap_trimmed(&s, 80) - } - - /// Render the docstring as in `render_as_code`, but surround it in triple quotes, - /// a common convention in starlark docstrings. - fn render_as_quoted_code(&self) -> String { - format!("\"\"\"\n{}\n\"\"\"", self.render_as_code()) - } -} - -/// Controls the formatting to use when parsing `DocString`s from raw docstrings -#[derive(Copy, Clone, Dupe)] -pub enum DocStringKind { - /// Docstrings provided by users in starlark files, following python-y documentation style. - /// - /// For functions, they are the piece in `"""` that come right after the `def foo():` line, - /// and they have sections for additional details. An example from a starlark file might be: - /// - /// ```starlark - /// """ Module level docs here """ - /// - /// def some_function(val: "string") -> "string": - /// """ This function takes a string and returns it. - /// - /// This is where an explanation might go, but I have none - /// - /// Args: - /// val: This is the value that gets returned - /// - /// Returns: - /// The original value, because identity functions are fun. - /// ``` - Starlark, - /// Docstrings used with `#[starlark_module]` in rust. - /// - /// These are the documentation strings prefixed by `///` (like these docs) on - /// `#[starlark_module]`, and the functions / attributes within it. It supports - /// a section `# Arguments`, and `# Returns`, and removes some lines from code - /// blocks that are valid for rustdoc, but not useful for people using these - /// functions via starlark. An example might be something like: - /// - /// ``` - /// # use starlark::starlark_module; - /// # use starlark::environment::MethodsBuilder; - /// # use starlark::values::Value; - /// - /// /// These are where the module / object level docs go - /// #[starlark_module] - /// fn add_some_value(builder: &mut MethodsBuilder) { - /// /// attr1 is an attribute that does nothing interesting. - /// #[starlark(attribute)] - /// fn attr1<'v>(this: Value<'v>) -> anyhow::Result { - /// let _ = this; - /// Ok("attr1".to_owned()) - /// } - /// /// Copies a string - /// /// - /// /// This is where details would be, if this were - /// /// a more interesting function. - /// /// - /// /// # Arguments - /// /// * `s`: This is string that is returned. - /// /// - /// /// # Returns - /// /// The a copy of the original string. - /// fn copy_string<'v>(this: Value<'v>, s: &str) -> anyhow::Result { - /// let _ = this; - /// Ok(s.to_owned()) - /// } - /// } - /// ``` - Rust, -} - -impl DocString { - /// Extracts the docstring from a function or module body, iff the first - /// statement is a string literal. - pub(crate) fn extract_raw_starlark_docstring( - body: &AstStmtP

    , - ) -> Option { - if let StmtP::Statements(stmts) = &body.node { - if let Some(Spanned { - node: - StmtP::Expression(Spanned { - node: ExprP::Literal(AstLiteral::String(s)), - .. - }), - .. - }) = stmts.first() - { - return Some(s.node.to_owned()); - } - }; - None - } - - fn split_summary_details(s: &str) -> Option<(&str, &str)> { - let mut summary_len = 0; - for line in s.split_inclusive('\n') { - if line.trim().is_empty() { - let details_start = summary_len + line.len(); - return Some((s[..summary_len].trim(), &s[details_start..])); - } else { - summary_len += line.len(); - } - } - None - } - - // Remove any newlines (and surrounding whitespace) in the summary, and - // replace them with a single space. - fn normalize_summary(summary: &str) -> String { - let mut res = String::with_capacity(summary.len()); - for line in summary.lines() { - if !res.is_empty() { - res.push(' '); - } - res.push_str(line.trim()); - } - res - } - - /// Do common work to parse a docstring (dedenting, splitting summary and details, etc) - pub fn from_docstring(kind: DocStringKind, user_docstring: &str) -> Option { - let trimmed_docs = user_docstring.trim(); - if trimmed_docs.is_empty() { - None - } else { - let split: Option<(&str, &str)> = Self::split_summary_details(trimmed_docs); - let (summary, details) = match split { - Some((summary, details)) if !summary.is_empty() && !details.is_empty() => { - // Dedent the details separately so that people can have the summary on the - // same line as the opening quotes, and the details indented on subsequent - // lines. - let details = match kind { - DocStringKind::Starlark => textwrap::dedent(details).trim().to_owned(), - DocStringKind::Rust => { - Self::remove_rust_comments(textwrap::dedent(details).trim()) - } - }; - (summary, Some(details)) - } - _ => (trimmed_docs, None), - }; - - let summary = Self::normalize_summary(summary); - - Some(DocString { summary, details }) - } - } - - /// Removes rustdoc-style commented out lines from code blocks. - fn remove_rust_comments(details: &str) -> String { - static CODEBLOCK_RE: Lazy = Lazy::new(|| { - RegexBuilder::new(r"```(\w*)\n.*?```") - .dot_matches_new_line(true) - .build() - .expect("regex to compile") - }); - static COMMENT_RE: Lazy = Lazy::new(|| { - RegexBuilder::new(r"^# .*$\n") - .multi_line(true) - .build() - .expect("regex to compile") - }); - CODEBLOCK_RE - .replace_all(details, |caps: ®ex::Captures| { - match caps.get(1).expect("language group").as_str() { - "" | "rust" => COMMENT_RE - .replace_all(caps.get(0).expect("$0 to exist").as_str(), "") - .to_string(), - _ => caps.get(0).expect("$0 to exist").as_str().to_owned(), - } - }) - .to_string() - } - - /// Join lines up, dedent them, and trim them - fn join_and_dedent_lines(lines: &[String]) -> String { - textwrap::dedent(&lines.join("\n")).trim().to_owned() - } - - /// Parse the sections out of a docstring's `details` text, and remove the requested sections from the text. - /// - /// "sections" are the various things in doc strings like "Arguments:", "Returns:", etc - /// - /// # Returns - /// - A new instance of `DocString`, with the requested sections, if found, removed. - /// - A mapping of section name, converted to lower case, to the cleaned up section text - /// i.e. dedented, section header not present, etc for any found sections. - fn parse_and_remove_sections( - self, - kind: DocStringKind, - requested_sections: &[&str], - ) -> (Self, HashMap) { - let mut sections = HashMap::new(); - - let mut finish_section = - |current_section: &mut Option, current_section_text: &mut Vec| { - if let Some(s) = current_section.take() { - sections.insert(s, DocString::join_and_dedent_lines(current_section_text)); - current_section_text.clear(); - } - }; - - static STARLARK_SECTION_RE: Lazy = - Lazy::new(|| Regex::new(r"^([\w -]+):\s*$").unwrap()); - static STARLARK_INDENTED_RE: Lazy = Lazy::new(|| Regex::new(r"^(?:\s|$)").unwrap()); - static RUST_SECTION_RE: Lazy = - Lazy::new(|| Regex::new(r"^# ([\w -]+)\s*$").unwrap()); - static RUST_INDENTED_RE: Lazy = Lazy::new(|| Regex::new(r"^.*").unwrap()); - - let (section_re, indented_re) = match kind { - DocStringKind::Starlark => (&STARLARK_SECTION_RE, &STARLARK_INDENTED_RE), - DocStringKind::Rust => (&RUST_SECTION_RE, &RUST_INDENTED_RE), - }; - - if let Some(details) = self.details { - let mut new_details = vec![]; - let mut current_section = None; - let mut current_section_text = vec![]; - - for line in details.lines() { - if let Some(matches) = section_re.captures(line) { - finish_section(&mut current_section, &mut current_section_text); - - let found_section = matches.get(1).unwrap().as_str().to_ascii_lowercase(); - if requested_sections.contains(&found_section.as_str()) { - current_section = Some(found_section); - } else { - new_details.push(line.to_owned()); - } - } else if current_section.is_some() && indented_re.is_match(line) { - current_section_text.push(line.to_owned()); - } else { - new_details.push(line.to_owned()); - finish_section(&mut current_section, &mut current_section_text); - } - } - - finish_section(&mut current_section, &mut current_section_text); - - let joined_details = new_details.join("\n").trim().to_owned(); - let details = match joined_details.is_empty() { - true => None, - false => Some(joined_details), - }; - ( - Self { - summary: self.summary, - details, - }, - sections, - ) - } else { - (self, sections) - } - } -} - -/// The file a symbol resides in, and if available its location within that file. -#[derive(Debug, Clone, PartialEq, Serialize, Default)] -pub struct Location { - /// `path` is a string that can be passed into `load()` statements. - pub path: String, -} - -/// The main identifier for a symbol. -#[derive(Debug, Clone, PartialEq, Serialize, Default)] -pub struct Identifier { - /// The name of the symbol (e.g. the function name, a name or path for a module, etc). - pub name: String, - /// Where the symbol is located, or absent if it is a built-in symbol. - pub location: Option, -} - -/// Documents a full module. -#[derive(Debug, Clone, PartialEq, Serialize, Default, Allocative)] -pub struct DocModule { - /// In general, this should be the first statement of a loaded file, if that statement is - /// a string literal. - pub docs: Option, - /// A mapping of top level symbols to their documentation, if any. - pub members: SmallMap, -} - -impl DocModule { - pub(crate) fn render_as_code(&self) -> String { - let mut res = self - .docs - .as_ref() - .map(DocString::render_as_quoted_code) - .unwrap_or_default(); - for (k, v) in &self.members { - res.push('\n'); - res.push_str(&(Doc::named_item(k.clone(), v.clone().to_doc_item())).render_as_code()); - res.push('\n'); - } - res - } -} - -/// Documents a single function. -#[derive(Debug, Clone, PartialEq, Default, Serialize, Allocative)] -pub struct DocFunction { - /// Documentation for the function. If parsed, this should generally be the first statement - /// of a function's body if that statement is a string literal. Any sections like "Args:", - /// "Returns", etc are kept intact. It is up to the consumer to remove these sections if - /// they are present. - pub docs: Option, - /// The parameters that this function takes. Docs for these parameters should generally be - /// extracted from the main docstring's details. - pub params: Vec, - /// Details about what this function returns. - pub ret: DocReturn, - /// Does this function act as type? - pub as_type: Option, -} - -impl DocFunction { - fn starlark_docstring(&self) -> Option { - let mut docs = String::new(); - if let Some(main_docs) = self.docs.as_ref().map(DocString::render_as_code) { - docs.push_str(&main_docs); - } - - let args_indentation_count = self - .params - .iter() - .map(|p| match p { - DocParam::NoArgs | DocParam::OnlyPosBefore => 0, - DocParam::Arg { name, .. } - | DocParam::Args { name, .. } - | DocParam::Kwargs { name, .. } => name.len() + 2, - }) - .max() - .unwrap_or_default(); - let args_indentation = " ".repeat(args_indentation_count); - - let args_docs = self - .params - .iter() - .filter_map(|p| p.starlark_docstring(&args_indentation)) - .join("\n"); - if !args_docs.is_empty() { - let indented = indent_trimmed(&args_docs, " "); - docs.push_str(&format!("\n\nArgs:\n{}", indented)); - } - - if let Some(ret_docs) = self.ret.starlark_docstring() { - let indented = indent_trimmed(&ret_docs, " "); - docs.push_str(&format!("\n\nRet:\n{}", indented)); - } - if docs.is_empty() { - None - } else { - Some(indent_trimmed( - &format!("\"\"\"\n{}\n\"\"\"", docs.trim_start()), - " ", - )) - } - } - - pub(crate) fn render_as_code(&self, name: &str) -> String { - let params: Vec<_> = self.params.iter().map(DocParam::render_as_code).collect(); - let spacer_len = if params.is_empty() { - 0 - } else { - (params.len() - 1) * 2 - }; - let params_len = params.iter().map(|a| a.len()).sum::() + spacer_len; - let params = if params_len > 60 { - format!("(\n{}\n)", indent_trimmed(¶ms.join(",\n"), " ")) - } else { - format!("({})", params.join(", ")) - }; - let docstring = self - .starlark_docstring() - .map(|mut ds| { - ds.push('\n'); - ds - }) - .unwrap_or_default(); - let ret = Some(&self.ret.typ) - .filter(|t| t != &&Ty::any()) - .map(|t| format!(" -> {}", t)) - .unwrap_or_default(); - - format!("def {}{}{}:\n{} pass", name, params, ret, docstring) - } - - /// Used by LSP. - pub fn find_param_with_name(&self, param_name: &str) -> Option<&DocParam> { - self.params.iter().find(|p| match p { - DocParam::Arg { name, .. } - | DocParam::Args { name, .. } - | DocParam::Kwargs { name, .. } - if name == param_name => - { - true - } - _ => false, - }) - } - - /// Parses function documentation out of a docstring - /// - /// # Arguments - /// * `kind`: The kind of docstring. This determines the formatting that is parsed. - /// * `params`: The parameters of the function. - /// * `return_type`: The return type. This is pulled from typing info / directly from users, - /// so it cannot be inferred generically. - /// * `raw_docstring`: The raw docstring to be parsed and potentially modified, - /// removing the sections detailing arguments and return values. - /// The format is determined by `kind`. - pub fn from_docstring( - kind: DocStringKind, - mut params: Vec, - return_type: Ty, - raw_docstring: Option<&str>, - as_type: Option, - ) -> Self { - match raw_docstring.and_then(|raw| DocString::from_docstring(kind, raw)) { - Some(ds) => { - let (function_docstring, sections) = - ds.parse_and_remove_sections(kind, &["arguments", "args", "returns", "return"]); - - match sections.get("arguments").or_else(|| sections.get("args")) { - Some(args) => { - let entries = Self::parse_params(kind, args); - for x in &mut params { - match x { - DocParam::Arg { name, docs, .. } - | DocParam::Args { name, docs, .. } - | DocParam::Kwargs { name, docs, .. } => match entries.get(name) { - Some(raw) => *docs = DocString::from_docstring(kind, raw), - _ => (), - }, - _ => (), - } - } - } - _ => (), - } - - let return_docs = sections - .get("return") - .or_else(|| sections.get("returns")) - .and_then(|raw| DocString::from_docstring(kind, raw)); - - DocFunction { - docs: Some(function_docstring), - params, - ret: DocReturn { - docs: return_docs, - typ: return_type, - }, - as_type, - } - } - None => DocFunction { - docs: None, - params, - ret: DocReturn { - docs: None, - typ: return_type, - }, - as_type, - }, - } - } - - /// Parse out parameter docs from an "Args:" section of a docstring - /// - /// `args_section` should be dedented, and generally should just be the `args` key of - /// the `DocString::parse_and_remove_sections()` function call. This is done as a - /// separate function to reduce the number of times that sections are parsed out of - /// docstring (e.g. if a user wants both the `Args:` and `Returns:` sections) - fn parse_params(kind: DocStringKind, args_section: &str) -> HashMap { - static STARLARK_ARG_RE: Lazy = - Lazy::new(|| Regex::new(r"^(\*{0,2}\w+):\s*(.*)").unwrap()); - static RUST_ARG_RE: Lazy = - Lazy::new(|| Regex::new(r"^(?:\* )?`(\w+)`:?\s*(.*)").unwrap()); - - static INDENTED_RE: Lazy = Lazy::new(|| Regex::new(r"^(?:\s|$)").unwrap()); - - let arg_re = match kind { - DocStringKind::Starlark => &STARLARK_ARG_RE, - DocStringKind::Rust => &RUST_ARG_RE, - }; - - let mut ret = HashMap::new(); - let mut current_arg = None; - let mut current_text = vec![]; - - for line in args_section.lines() { - if let Some(matches) = arg_re.captures(line) { - if let Some(a) = current_arg.take() { - ret.insert(a, DocString::join_and_dedent_lines(¤t_text)); - } - - current_arg = Some(matches.get(1).unwrap().as_str().to_owned()); - - let doc_match = matches.get(2).unwrap(); - current_text = vec![format!( - "{}{}", - " ".repeat(doc_match.start()), - doc_match.as_str() - )]; - } else if current_arg.is_some() && INDENTED_RE.is_match(line) { - current_text.push(line.to_owned()); - } - } - - if let Some(a) = current_arg.take() { - ret.insert(a, DocString::join_and_dedent_lines(¤t_text)); - } - - ret - } -} - -/// A single parameter of a function. -#[derive(Debug, Clone, PartialEq, Serialize, Allocative)] -#[serde(tag = "kind", rename_all = "snake_case")] -pub enum DocParam { - /// A regular parameter that may or may not have a default value. - Arg { - name: String, - docs: Option, - #[serde(rename = "type")] - typ: Ty, - /// If present, this parameter has a default value. This is the `repr()` of that value. - default_value: Option, - }, - /// Represents the "*" argument. - NoArgs, - /// Represents the "/" argument from [PEP 570](https://peps.python.org/pep-0570/). - OnlyPosBefore, - /// Represents the "*args" style of argument. - Args { - name: String, - docs: Option, - #[serde(rename = "type")] - typ: Ty, - }, - /// Represents the "**kwargs" style of argument. - Kwargs { - name: String, - docs: Option, - #[serde(rename = "type")] - typ: Ty, - }, -} - -impl DocParam { - fn starlark_docstring(&self, max_indentation: &str) -> Option { - let (name, docs) = match self { - DocParam::Arg { name, docs, .. } => Some((name, docs)), - DocParam::NoArgs | DocParam::OnlyPosBefore => None, - DocParam::Args { name, docs, .. } => Some((name, docs)), - DocParam::Kwargs { name, docs, .. } => Some((name, docs)), - }?; - let rendered_docs = docs.as_ref()?.render_as_code(); - let mut indented = indent_trimmed(&rendered_docs, max_indentation); - indented.replace_range(..name.len() + 2, &format!("{}: ", name)); - Some(indented) - } - - fn render_as_code(&self) -> String { - match self { - DocParam::Arg { - name, - typ, - default_value, - .. - } => match (typ, default_value.as_ref()) { - (t, Some(default)) if t.is_any() => format!("{} = {}", name, default), - (t, None) if t.is_any() => name.clone(), - (t, Some(default)) => format!("{}: {} = {}", name, t, default), - (t, None) => format!("{}: {}", name, t), - }, - DocParam::NoArgs => "*".to_owned(), - DocParam::OnlyPosBefore => "/".to_owned(), - DocParam::Args { name, typ, .. } | DocParam::Kwargs { name, typ, .. } => match typ { - t if t.is_any() => name.clone(), - typ => format!("{}: {}", name, typ), - }, - } - } -} - -/// Details about the return value of a function. -#[derive(Debug, Clone, PartialEq, Serialize, Allocative)] -pub struct DocReturn { - /// Extra semantic details around the returned value's meaning. - pub docs: Option, - #[serde(rename = "type")] - pub typ: Ty, -} - -impl Default for DocReturn { - fn default() -> Self { - DocReturn { - docs: None, - typ: Ty::any(), - } - } -} - -impl DocReturn { - fn starlark_docstring(&self) -> Option { - self.docs.as_ref().map(DocString::render_as_code) - } -} - -/// A single property of an object. These are explicitly not functions (see [`DocMember`]). -#[derive(Debug, Clone, PartialEq, Serialize, Allocative)] -pub struct DocProperty { - pub docs: Option, - #[serde(rename = "type")] - pub typ: Ty, -} - -impl DocProperty { - pub(crate) fn render_as_code(&self, name: &str) -> String { - match ( - &self.typ, - self.docs.as_ref().map(DocString::render_as_quoted_code), - ) { - // TODO(nmj): The starlark syntax needs to be updated to support type - // annotations on values as python does. Afterward, use these - // format strings. - // (Some(t), Some(ds)) => { - // format!("{}\n_{}: {} = None", ds, name, t.raw_type) - // } - // (Some(t), None) => format!(r#"_{}: {} = None"#, name, t.raw_type), - (t, Some(ds)) if t.is_any() => format!("{}\n_{} = None", ds, name), - (t, None) if t.is_any() => format!("_{} = None", name), - (t, Some(ds)) => { - format!("{}\n# type: {}\n_{} = None", ds, t, name) - } - (t, None) => format!("# type: {}\n_{} = None", t, name), - } - } -} - -/// A named member of an object. -#[derive(Debug, Clone, PartialEq, Serialize, Allocative)] -#[serde(tag = "kind", rename_all = "snake_case")] -pub enum DocMember { - Property(DocProperty), - Function(DocFunction), -} - -impl DocMember { - pub(crate) fn from_value(value: Value) -> Self { - // If we have a value which is a complex type, the right type to put in the docs is not the type - // it represents, but it's just a property we should point at - match value.documentation() { - Some(DocItem::Function(x)) => DocMember::Function(x), - Some(DocItem::Property(x)) => DocMember::Property(x), - _ => DocMember::Property(DocProperty { - docs: None, - typ: value.get_type_starlark_repr(), - }), - } - } - - pub fn to_doc_item(self) -> DocItem { - match self { - DocMember::Property(x) => DocItem::Property(x), - DocMember::Function(x) => DocItem::Function(x), - } - } -} - -/// An object with named functions/properties. -#[derive(Debug, Clone, PartialEq, Serialize, Default, Allocative)] -pub struct DocObject { - pub docs: Option, - /// Name and details of each member of this object. - pub members: SmallMap, -} - -impl DocObject { - pub(crate) fn render_as_code(&self, name: &str) -> String { - let summary = self - .docs - .as_ref() - .map(|ds| { - let mut s = ds.render_as_quoted_code(); - s.push('\n'); - s - }) - .unwrap_or_default(); - - let member_docs = self - .members - .iter() - .map(|(name, member)| match member { - DocMember::Property(p) => p.render_as_code(name), - DocMember::Function(f) => f.render_as_code(&format!("_{}", name)), - }) - .join("\n\n"); - - let exported_struct_members = self - .members - .iter() - .map(|(name, _)| format!(" {} = _{},", name, name)) - .join("\n"); - let exported_struct = if !exported_struct_members.is_empty() { - format!( - "{}{} = struct(\n{}\n)", - summary, name, exported_struct_members - ) - } else { - String::new() - }; - - format!("{}\n\n{}", member_docs, exported_struct) - .trim() - .to_owned() - } -} - -#[derive(Debug, Clone, PartialEq, Serialize, Allocative)] -#[serde(tag = "kind", rename_all = "snake_case")] -pub enum DocItem { - Module(DocModule), - Object(DocObject), - Function(DocFunction), - Property(DocProperty), -} - -impl DocItem { - /// Get the underlying [`DocString`] for this item, if it exists. - pub fn get_doc_string(&self) -> Option<&DocString> { - match self { - DocItem::Module(m) => m.docs.as_ref(), - DocItem::Object(o) => o.docs.as_ref(), - DocItem::Function(f) => f.docs.as_ref(), - DocItem::Property(p) => p.docs.as_ref(), - } - } - - /// Get the summary of the underlying [`DocString`] for this item, if it exists. - pub fn get_doc_summary(&self) -> Option<&str> { - self.get_doc_string().map(|ds| ds.summary.as_str()) - } -} - -impl DocMember { - /// Get the underlying [`DocString`] for this item, if it exists. - pub fn get_doc_string(&self) -> Option<&DocString> { - match self { - DocMember::Function(f) => f.docs.as_ref(), - DocMember::Property(p) => p.docs.as_ref(), - } - } - - /// Get the summary of the underlying [`DocString`] for this item, if it exists. - pub fn get_doc_summary(&self) -> Option<&str> { - self.get_doc_string().map(|ds| ds.summary.as_str()) - } -} - -impl DocParam { - /// Get the underlying [`DocString`] for this item, if it exists. - pub fn get_doc_string(&self) -> Option<&DocString> { - match self { - DocParam::Arg { docs, .. } - | DocParam::Args { docs, .. } - | DocParam::Kwargs { docs, .. } => docs.as_ref(), - _ => None, - } - } - - /// Get the summary of the underlying [`DocString`] for this item, if it exists. - pub fn get_doc_summary(&self) -> Option<&str> { - self.get_doc_string().map(|ds| ds.summary.as_str()) - } -} - -/// The main structure that represents the documentation for a given symbol / module. -#[derive(Debug, Clone, PartialEq, Serialize)] -pub struct Doc { - pub id: Identifier, - pub item: DocItem, - /// Custom key-value pairs that are not interpreted directly by starlark, and can be - /// used as arbitrary data for documentation tooling. - pub custom_attrs: HashMap, -} - -impl Doc { - pub fn named_item(name: String, item: DocItem) -> Self { - Doc { - id: Identifier { - name, - location: None, - }, - item, - custom_attrs: HashMap::new(), - } - } - - /// Render a starlark code representation of this documentation object. - /// - /// Function bodies for these consist of a single "pass" statement, and objects - /// are represented as structs. - pub fn render_as_code(&self) -> String { - match &self.item { - DocItem::Module(m) => m.render_as_code(), - DocItem::Object(o) => o.render_as_code(&self.id.name), - DocItem::Function(f) => f.render_as_code(&self.id.name), - DocItem::Property(p) => p.render_as_code(&self.id.name), - } - } -} - -/// Render a series of [`Doc`] objects into a "starlark" file. -/// -/// Function bodies for these consist of a single "pass" statement, and objects -/// are represented as structs. -/// -/// The returned array may not be in the same order as the originally provided docs. -/// They are in the order that they should appear in the rendered starlark file. -pub fn render_docs_as_code(docs: &[Doc]) -> String { - let (modules, non_modules): (Vec<_>, Vec<_>) = docs - .iter() - .partition(|d| matches!(d.item, DocItem::Module(_))); - modules - .into_iter() - .chain(non_modules) - .map(|d| d.render_as_code()) - .join("\n\n") -} - -/// Get documentation for all items registered with `#[derive(StarlarkDocs)]` -/// -/// Note: Because `StarlarkDocs` uses the inventory crate under the hood, in statically linked -/// binaries, documentation from all compiled crates in the binary will be included. -/// -/// For dynamically linked binaries, documentation will only be able to retrieved after the crate's -/// library is `dlopen()`ed. -pub fn get_registered_starlark_docs() -> Vec { - inventory::iter:: - .into_iter() - .filter_map(|d| (d.getter)()) - .collect() -} - -#[doc(hidden)] -pub struct RegisteredDoc { - pub getter: fn() -> Option, -} - -inventory::collect!(RegisteredDoc); - -impl RegisteredDoc { - /// This function is called from generated code. - pub fn for_type<'v, T: StarlarkValue<'v>>(custom_attrs: &[(&str, &str)]) -> Option { - let name = T::TYPE.to_owned(); - let id = Identifier { - name, - location: None, - }; - let item = DocItem::Object(T::get_methods()?.documentation()); - let custom_attrs = custom_attrs - .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())) - .collect(); - Some(Doc { - id, - item, - custom_attrs, - }) - } -} - -#[cfg(test)] -mod tests { - - use super::*; - - #[test] - fn parses_starlark_docstring() { - assert_eq!( - DocString::from_docstring(DocStringKind::Starlark, " "), - None - ); - assert_eq!( - DocString::from_docstring( - DocStringKind::Starlark, - " \n\nThis should be the summary\n\n" - ), - Some(DocString { - summary: "This should be the summary".to_owned(), - details: None, - }) - ); - assert_eq!( - DocString::from_docstring( - DocStringKind::Starlark, - " \n\nThis should be the summary\n\n " - ), - Some(DocString { - summary: "This should be the summary".to_owned(), - details: None, - }) - ); - assert_eq!( - DocString::from_docstring( - DocStringKind::Starlark, - "Summary line here\n \nDetails after some spaces\n\nand some more newlines" - ), - Some(DocString { - summary: "Summary line here".to_owned(), - details: Some("Details after some spaces\n\nand some more newlines".to_owned()), - }) - ); - assert_eq!( - DocString::from_docstring( - DocStringKind::Starlark, - r#" - This is the summary. - It has multiple lines and some spaces, and should be collapsed - - This should be a multiline set of details. - It should be: - - Dedented - - Trimmed - - Split properly from the summary - -"# - ), - Some(DocString { - summary: "This is the summary. It has multiple lines and some spaces, and should be collapsed".to_owned(), - details: Some( - concat!( - "This should be a multiline set of details.\n", - "It should be:\n", - " - Dedented\n", - " - Trimmed\n", - " - Split properly from the summary" - ) - .to_owned() - ), - }) - ); - assert_eq!( - DocString::from_docstring( - DocStringKind::Starlark, - r#"This is a summary line that is not dedented like the 'details' - - Typing the first line right after the """ in python docstrings is common, - while putting the rest of the docstring indented. Just support both so it - doesn't surprise anyone. - "# - ), - Some(DocString { - summary: "This is a summary line that is not dedented like the 'details'" - .to_owned(), - details: Some( - concat!( - "Typing the first line right after the \"\"\" in python docstrings is common,\n", - "while putting the rest of the docstring indented. Just support both so it\n", - "doesn't surprise anyone." - ) - .to_owned() - ), - }) - ); - } - - #[test] - fn parses_rust_docstring() { - let raw = r#" - This is the summary line - that sometimes is split on two lines - - This is the second part. It has some code blocks - - ``` - # foo() { - "bar" - # } - ``` - - ```python - # This is a python comment. Leave it be - print(1) - ``` - - ```rust - # other_foo() { - "other_bar" - # } - ``` - "#; - - let parsed = DocString::from_docstring(DocStringKind::Rust, raw).unwrap(); - assert_eq!( - "This is the summary line that sometimes is split on two lines", - parsed.summary - ); - assert_eq!( - concat!( - "This is the second part. It has some code blocks\n\n", - "```\n", - "\"bar\"\n", - "```\n\n", - "```python\n", - "# This is a python comment. Leave it be\n", - "print(1)\n", - "```\n\n", - "```rust\n", - "\"other_bar\"", - "\n```" - ), - parsed.details.unwrap() - ); - } - - #[test] - fn parses_and_removes_sections_from_starlark_docstring() { - let raw_docs = r#"This is an example docstring - - We have some details up here that should not be parsed - - Some empty section: - Example: - First line of the section - - A newline with no space after it before the second one, - and a third that's indented further. - This is not in the example section - - Last: - This is something in the last section - "#; - let expected_docstring = DocString::from_docstring( - DocStringKind::Starlark, - r#"This is an example docstring - - We have some details up here that should not be parsed - - Some empty section: - This is not in the example section - - Last: - This is something in the last section - "#, - ) - .unwrap(); - - let expected_sections = HashMap::from([( - "example".to_owned(), - concat!( - "First line of the section\n\n", - "A newline with no space after it before the second one,\n", - " and a third that's indented further." - ) - .to_owned(), - )]); - - let ds = DocString::from_docstring(DocStringKind::Starlark, raw_docs).unwrap(); - let (new_ds, sections) = - ds.parse_and_remove_sections(DocStringKind::Starlark, &["example"]); - - assert_eq!(new_ds, expected_docstring); - assert_eq!(sections, expected_sections); - } - - #[test] - fn parses_and_removes_sections_from_rust_docstring() { - let raw_docs = r#"This is an example docstring - - We have some details up here that should not be parsed - - # Some Section - - ``` - # This is a commented out line in a codeblock - fn some_func() {} - ``` - - # Example - First line of the section - - Note that, unlike starlark doc strings, - we don't require indentation. The end of a - section is either a new section appearing, - or the end of the string. - - # Last - This is something in the last section - "#; - let expected_docstring = DocString::from_docstring( - DocStringKind::Rust, - r#"This is an example docstring - - We have some details up here that should not be parsed - - # Some Section - - ``` - fn some_func() {} - ``` - - # Last - This is something in the last section - "#, - ) - .unwrap(); - - let expected_sections = HashMap::from([( - "example".to_owned(), - concat!( - "First line of the section\n\n", - "Note that, unlike starlark doc strings,\n", - "we don't require indentation. The end of a\n", - "section is either a new section appearing,\n", - "or the end of the string.", - ) - .to_owned(), - )]); - - let ds = DocString::from_docstring(DocStringKind::Rust, raw_docs).unwrap(); - let (new_ds, sections) = ds.parse_and_remove_sections(DocStringKind::Rust, &["example"]); - - assert_eq!(new_ds, expected_docstring); - assert_eq!(sections, expected_sections); - } - - fn arg(name: &str) -> DocParam { - DocParam::Arg { - name: name.to_owned(), - docs: None, - typ: Ty::any(), - default_value: None, - } - } - - #[test] - fn parses_starlark_function_docstring() { - let docstring = r#"This is an example docstring - - Details here - - Args: - arg_foo: The argument named foo - arg_bar: The argument named bar. It has - a longer doc string that spans - over three lines - *args: Docs for args - **kwargs: Docs for kwargs - - Returns: - A value - "#; - - let kind = DocStringKind::Starlark; - let return_type = Ty::int(); - let expected = DocFunction { - docs: DocString::from_docstring(kind, "This is an example docstring\n\nDetails here"), - params: vec![ - DocParam::Arg { - name: "**kwargs".to_owned(), - docs: DocString::from_docstring(kind, "Docs for kwargs"), - typ: Ty::any(), - default_value: None, - }, - DocParam::Arg { - name: "*args".to_owned(), - docs: DocString::from_docstring(kind, "Docs for args"), - typ: Ty::any(), - default_value: None, - }, - DocParam::Arg { - name: "arg_bar".to_owned(), - docs: DocString::from_docstring( - kind, - concat!( - "The argument named bar. It has\n", - "a longer doc string that spans\n", - "over three lines" - ), - ), - typ: Ty::any(), - default_value: None, - }, - DocParam::Arg { - name: "arg_foo".to_owned(), - docs: DocString::from_docstring(kind, "The argument named foo"), - typ: Ty::any(), - default_value: None, - }, - ], - ret: DocReturn { - docs: DocString::from_docstring(kind, "A value"), - typ: return_type.clone(), - }, - as_type: None, - }; - - let function_docs = DocFunction::from_docstring( - kind, - vec![ - arg("**kwargs"), - arg("*args"), - arg("arg_bar"), - arg("arg_foo"), - ], - return_type, - Some(docstring), - None, - ); - - assert_eq!(expected, function_docs); - } - - #[test] - fn parses_rust_function_docstring() { - let docstring = r#"This is an example docstring - - Details here - - # Arguments - * `arg_foo`: The argument named foo - `arg_bar`: The argument named bar. It has - a longer doc string that spans - over three lines - - # Returns - A value - "#; - - let kind = DocStringKind::Rust; - let return_type = Ty::int(); - let expected = DocFunction { - docs: DocString::from_docstring(kind, "This is an example docstring\n\nDetails here"), - params: vec![ - DocParam::Arg { - name: "arg_bar".to_owned(), - docs: DocString::from_docstring( - kind, - concat!( - "The argument named bar. It has\n", - "a longer doc string that spans\n", - "over three lines" - ), - ), - typ: Ty::any(), - default_value: None, - }, - DocParam::Arg { - name: "arg_foo".to_owned(), - docs: DocString::from_docstring(kind, "The argument named foo"), - typ: Ty::any(), - default_value: None, - }, - ], - ret: DocReturn { - docs: DocString::from_docstring(kind, "A value"), - typ: return_type.clone(), - }, - as_type: None, - }; - - let function_docs = DocFunction::from_docstring( - kind, - vec![arg("arg_bar"), arg("arg_foo")], - return_type, - Some(docstring), - None, - ); - - assert_eq!(expected, function_docs); - } -} diff --git a/starlark-rust/starlark/src/docs/multipage.rs b/starlark-rust/starlark/src/docs/multipage.rs new file mode 100644 index 0000000000000..543d1cb98b9e2 --- /dev/null +++ b/starlark-rust/starlark/src/docs/multipage.rs @@ -0,0 +1,172 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; + +use dupe::Dupe; + +use crate::docs::DocItem; +use crate::docs::DocModule; +use crate::docs::DocType; +use crate::typing::ty::TypeRenderConfig; +use crate::typing::Ty; + +pub struct DocModuleInfo<'a> { + pub module: &'a DocModule, + pub name: String, + /// A prefix to attach to all of the pages rendered from this module + pub page_path: String, +} + +impl<'a> DocModuleInfo<'a> { + fn into_page_renders(&self) -> Vec> { + Self::traverse_inner(&self.module, &self.name, &self.page_path) + } + + fn traverse_inner( + docs: &'a DocModule, + module_name: &str, + base_path: &str, + ) -> Vec> { + let mut result = vec![]; + + result.push(PageRender { + page: DocPageRef::Module(docs), + path: base_path.to_owned(), + name: module_name.to_owned(), + ty: None, + }); + + for (name, doc) in &docs.members { + let path = if base_path.is_empty() { + name.to_owned() + } else { + format!("{}/{}", base_path, name) + }; + match doc { + DocItem::Module(doc_module) => { + result.extend(Self::traverse_inner(&doc_module, &name, &path)) + } + DocItem::Type(doc_type) => result.push(PageRender { + page: DocPageRef::Type(doc_type), + path, + name: name.to_owned(), + ty: Some(doc_type.ty.dupe()), + }), + + DocItem::Member(_) => (), + } + } + + result + } +} + +/// A reference to a page to render +/// DocsRender will have all the PageRender it needs to render the docs +/// Since types and some modules are owned by other modules, we need to use the reference here +enum DocPageRef<'a> { + Module(&'a DocModule), + Type(&'a DocType), +} + +/// A single page to render +struct PageRender<'a> { + page: DocPageRef<'a>, + path: String, + name: String, + /// The type of the page, if it is a type page. This is used to get the link to the type. + ty: Option, +} + +impl<'a> PageRender<'a> { + fn render_markdown(&self, render_config: &TypeRenderConfig) -> String { + let content = match self.page { + DocPageRef::Module(doc_module) => { + doc_module.render_markdown_page_for_multipage_render(&self.name, render_config) + } + DocPageRef::Type(doc_type) => { + doc_type.render_markdown_page_for_multipage_render(&self.name, render_config) + } + }; + match render_config { + TypeRenderConfig::Default => content, + TypeRenderConfig::LinkedType { ty_to_path_map: _ } => { + format!("{}\n\n{}", "import Link from '@docusaurus/Link';", content) + } + } + } +} + +/// Renders the contents into a multi-page tree structure +/// +/// The output will contain page-paths like ``, `type1`, `mod1`, and `mod1/type2`, each mapped to +/// the contents of that page. That means that some of the paths may be prefixes of each other, +/// which will need consideration if this is to be materialized to a filesystem. +struct MultipageRender<'a> { + page_renders: Vec>, + // used for the linkable type in the markdown + render_config: TypeRenderConfig, +} + +impl<'a> MultipageRender<'a> { + /// Create a new MultipageRender from a list of DocModuleInfo, and an optional function to map a type path to a linkable path + /// If the function is not provided, the type will not be linkable + fn new(docs: Vec>, ty_path_mapper: Option<&dyn Fn(&str) -> String>) -> Self { + let mut res = vec![]; + for doc in docs { + res.extend(doc.into_page_renders()); + } + let mut render_config = TypeRenderConfig::Default; + if let Some(path_mapper) = ty_path_mapper { + let mut ty_to_path_map = HashMap::new(); + for page in res.iter() { + if let Some(ty) = &page.ty { + ty_to_path_map.insert(ty.dupe(), path_mapper(&page.path)); + } + } + render_config = TypeRenderConfig::LinkedType { ty_to_path_map }; + } + Self { + page_renders: res, + render_config, + } + } + + /// Render the docs into a map of markdown paths to markdown content + fn render_markdown_pages(&self) -> HashMap { + self.page_renders + .iter() + .map(|page| (page.path.clone(), page.render_markdown(&self.render_config))) + .collect() + } +} + +/// Renders the contents into a multi-page tree structure +/// +/// The output will contain page-paths like ``, `type1`, `mod1`, and `mod1/type2`, each mapped to +/// the contents of that page. That means that some of the paths may be prefixes of each other, +/// which will need consideration if this is to be materialized to a filesystem. +/// +/// It accepts a list of DocModuleInfo, and an optional function to map a type path to a linkable path +pub fn render_markdown_multipage( + modules_infos: Vec>, + ty_path_mapper: Option<&dyn Fn(&str) -> String>, +) -> HashMap { + let multipage_render = MultipageRender::new(modules_infos, ty_path_mapper); + multipage_render.render_markdown_pages() +} diff --git a/starlark-rust/starlark/src/docs/parse.rs b/starlark-rust/starlark/src/docs/parse.rs new file mode 100644 index 0000000000000..72e3d3b7ae7de --- /dev/null +++ b/starlark-rust/starlark/src/docs/parse.rs @@ -0,0 +1,808 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; + +use dupe::Dupe; +use once_cell::sync::Lazy; +use regex::Regex; +use regex::RegexBuilder; +use starlark_syntax::syntax::ast::AstLiteral; +use starlark_syntax::syntax::ast::AstPayload; +use starlark_syntax::syntax::ast::AstStmtP; +use starlark_syntax::syntax::ast::ExprP; +use starlark_syntax::syntax::ast::StmtP; + +use crate::codemap::Spanned; +use crate::docs::DocFunction; +use crate::docs::DocParam; +use crate::docs::DocParams; +use crate::docs::DocReturn; +use crate::docs::DocString; +use crate::typing::Ty; + +/// Controls the formatting to use when parsing `DocString`s from raw docstrings +#[derive(Copy, Clone, Dupe)] +pub enum DocStringKind { + /// Docstrings provided by users in starlark files, following python-y documentation style. + /// + /// For functions, they are the piece in `"""` that come right after the `def foo():` line, + /// and they have sections for additional details. An example from a starlark file might be: + /// + /// ```starlark + /// """ Module level docs here """ + /// + /// def some_function(val: "string") -> "string": + /// """ This function takes a string and returns it. + /// + /// This is where an explanation might go, but I have none + /// + /// Args: + /// val: This is the value that gets returned + /// + /// Returns: + /// The original value, because identity functions are fun. + /// ``` + Starlark, + /// Docstrings used with `#[starlark_module]` in rust. + /// + /// These are the documentation strings prefixed by `///` (like these docs) on + /// `#[starlark_module]`, and the functions / attributes within it. It supports + /// a section `# Arguments`, and `# Returns`, and removes some lines from code + /// blocks that are valid for rustdoc, but not useful for people using these + /// functions via starlark. An example might be something like: + /// + /// ``` + /// # use starlark::starlark_module; + /// # use starlark::environment::MethodsBuilder; + /// # use starlark::values::Value; + /// + /// /// These are where the module / object level docs go + /// #[starlark_module] + /// fn add_some_value(builder: &mut MethodsBuilder) { + /// /// attr1 is an attribute that does nothing interesting. + /// #[starlark(attribute)] + /// fn attr1<'v>(this: Value<'v>) -> anyhow::Result { + /// let _ = this; + /// Ok("attr1".to_owned()) + /// } + /// /// Copies a string + /// /// + /// /// This is where details would be, if this were + /// /// a more interesting function. + /// /// + /// /// # Arguments + /// /// * `s`: This is string that is returned. + /// /// + /// /// # Returns + /// /// The a copy of the original string. + /// fn copy_string<'v>(this: Value<'v>, s: &str) -> anyhow::Result { + /// let _ = this; + /// Ok(s.to_owned()) + /// } + /// } + /// ``` + Rust, +} + +impl DocString { + /// Extracts the docstring from a function or module body, iff the first + /// statement is a string literal. + pub(crate) fn extract_raw_starlark_docstring( + body: &AstStmtP

    , + ) -> Option { + if let StmtP::Statements(stmts) = &body.node { + if let Some(Spanned { + node: + StmtP::Expression(Spanned { + node: ExprP::Literal(AstLiteral::String(s)), + .. + }), + .. + }) = stmts.first() + { + return Some(s.node.to_owned()); + } + }; + None + } + + fn split_summary_details(s: &str) -> Option<(&str, &str)> { + let mut summary_len = 0; + for line in s.split_inclusive('\n') { + if line.trim().is_empty() { + let details_start = summary_len + line.len(); + return Some((s[..summary_len].trim(), &s[details_start..])); + } else { + summary_len += line.len(); + } + } + None + } + + // Remove any newlines (and surrounding whitespace) in the summary, and + // replace them with a single space. + fn normalize_summary(summary: &str) -> String { + let mut res = String::with_capacity(summary.len()); + for line in summary.lines() { + if !res.is_empty() { + res.push(' '); + } + res.push_str(line.trim()); + } + res + } + + /// Do common work to parse a docstring (dedenting, splitting summary and details, etc) + pub fn from_docstring(kind: DocStringKind, user_docstring: &str) -> Option { + let trimmed_docs = user_docstring.trim(); + if trimmed_docs.is_empty() { + None + } else { + let split: Option<(&str, &str)> = Self::split_summary_details(trimmed_docs); + let (summary, details) = match split { + Some((summary, details)) if !summary.is_empty() && !details.is_empty() => { + // Dedent the details separately so that people can have the summary on the + // same line as the opening quotes, and the details indented on subsequent + // lines. + let details = match kind { + DocStringKind::Starlark => textwrap::dedent(details).trim().to_owned(), + DocStringKind::Rust => { + Self::remove_rust_comments(textwrap::dedent(details).trim()) + } + }; + (summary, Some(details)) + } + _ => (trimmed_docs, None), + }; + + let summary = Self::normalize_summary(summary); + + Some(DocString { summary, details }) + } + } + + /// Removes rustdoc-style commented out lines from code blocks. + fn remove_rust_comments(details: &str) -> String { + static CODEBLOCK_RE: Lazy = Lazy::new(|| { + RegexBuilder::new(r"```(\w*)\n.*?```") + .dot_matches_new_line(true) + .build() + .expect("regex to compile") + }); + static COMMENT_RE: Lazy = Lazy::new(|| { + RegexBuilder::new(r"^# .*$\n") + .multi_line(true) + .build() + .expect("regex to compile") + }); + CODEBLOCK_RE + .replace_all(details, |caps: ®ex::Captures| { + match caps.get(1).expect("language group").as_str() { + "" | "rust" => COMMENT_RE + .replace_all(caps.get(0).expect("$0 to exist").as_str(), "") + .to_string(), + _ => caps.get(0).expect("$0 to exist").as_str().to_owned(), + } + }) + .to_string() + } + + /// Join lines up, dedent them, and trim them + fn join_and_dedent_lines(lines: &[String]) -> String { + textwrap::dedent(&lines.join("\n")).trim().to_owned() + } + + /// Parse the sections out of a docstring's `details` text, and remove the requested sections from the text. + /// + /// "sections" are the various things in doc strings like "Arguments:", "Returns:", etc + /// + /// # Returns + /// - A new instance of `DocString`, with the requested sections, if found, removed. + /// - A mapping of section name, converted to lower case, to the cleaned up section text + /// i.e. dedented, section header not present, etc for any found sections. + fn parse_and_remove_sections( + self, + kind: DocStringKind, + requested_sections: &[&str], + ) -> (Self, HashMap) { + let mut sections = HashMap::new(); + + let mut finish_section = + |current_section: &mut Option, current_section_text: &mut Vec| { + if let Some(s) = current_section.take() { + sections.insert(s, DocString::join_and_dedent_lines(current_section_text)); + current_section_text.clear(); + } + }; + + static STARLARK_SECTION_RE: Lazy = + Lazy::new(|| Regex::new(r"^([\w -]+):\s*$").unwrap()); + static STARLARK_INDENTED_RE: Lazy = Lazy::new(|| Regex::new(r"^(?:\s|$)").unwrap()); + static RUST_SECTION_RE: Lazy = + Lazy::new(|| Regex::new(r"^# ([\w -]+)\s*$").unwrap()); + static RUST_INDENTED_RE: Lazy = Lazy::new(|| Regex::new(r"^.*").unwrap()); + + let (section_re, indented_re) = match kind { + DocStringKind::Starlark => (&STARLARK_SECTION_RE, &STARLARK_INDENTED_RE), + DocStringKind::Rust => (&RUST_SECTION_RE, &RUST_INDENTED_RE), + }; + + if let Some(details) = self.details { + let mut new_details = vec![]; + let mut current_section = None; + let mut current_section_text = vec![]; + + for line in details.lines() { + if let Some(matches) = section_re.captures(line) { + finish_section(&mut current_section, &mut current_section_text); + + let found_section = matches.get(1).unwrap().as_str().to_ascii_lowercase(); + if requested_sections.contains(&found_section.as_str()) { + current_section = Some(found_section); + } else { + new_details.push(line.to_owned()); + } + } else if current_section.is_some() && indented_re.is_match(line) { + current_section_text.push(line.to_owned()); + } else { + new_details.push(line.to_owned()); + finish_section(&mut current_section, &mut current_section_text); + } + } + + finish_section(&mut current_section, &mut current_section_text); + + let joined_details = new_details.join("\n").trim().to_owned(); + let details = match joined_details.is_empty() { + true => None, + false => Some(joined_details), + }; + ( + Self { + summary: self.summary, + details, + }, + sections, + ) + } else { + (self, sections) + } + } +} + +impl DocFunction { + /// Parses function documentation out of a docstring + /// + /// # Arguments + /// * `kind`: The kind of docstring. This determines the formatting that is parsed. + /// * `params`: The parameters of the function. + /// * `return_type`: The return type. This is pulled from typing info / directly from users, + /// so it cannot be inferred generically. + /// * `raw_docstring`: The raw docstring to be parsed and potentially modified, + /// removing the sections detailing arguments and return values. + /// The format is determined by `kind`. + pub fn from_docstring( + kind: DocStringKind, + mut params: DocParams, + return_type: Ty, + raw_docstring: Option<&str>, + ) -> Self { + match raw_docstring.and_then(|raw| DocString::from_docstring(kind, raw)) { + Some(ds) => { + let (function_docstring, sections) = + ds.parse_and_remove_sections(kind, &["arguments", "args", "returns", "return"]); + + match sections.get("arguments").or_else(|| sections.get("args")) { + Some(args) => { + let entries = Self::parse_params(kind, args); + for x in &mut params.doc_params_mut() { + let DocParam { name, docs, .. } = x; + match entries.get(name) { + Some(raw) => *docs = DocString::from_docstring(kind, raw), + None => {} + } + } + } + _ => (), + } + + let return_docs = sections + .get("return") + .or_else(|| sections.get("returns")) + .and_then(|raw| DocString::from_docstring(kind, raw)); + + DocFunction { + docs: Some(function_docstring), + params, + ret: DocReturn { + docs: return_docs, + typ: return_type, + }, + } + } + None => DocFunction { + docs: None, + params, + ret: DocReturn { + docs: None, + typ: return_type, + }, + }, + } + } + + /// Parse out parameter docs from an "Args:" section of a docstring + /// + /// `args_section` should be dedented, and generally should just be the `args` key of + /// the `DocString::parse_and_remove_sections()` function call. This is done as a + /// separate function to reduce the number of times that sections are parsed out of + /// docstring (e.g. if a user wants both the `Args:` and `Returns:` sections) + fn parse_params(kind: DocStringKind, args_section: &str) -> HashMap { + static STARLARK_ARG_RE: Lazy = + Lazy::new(|| Regex::new(r"^\*{0,2}(\w+):\s*(.*)").unwrap()); + static RUST_ARG_RE: Lazy = + Lazy::new(|| Regex::new(r"^(?:\* )?`(\w+)`:?\s*(.*)").unwrap()); + + static INDENTED_RE: Lazy = Lazy::new(|| Regex::new(r"^(?:\s|$)").unwrap()); + + let arg_re = match kind { + DocStringKind::Starlark => &STARLARK_ARG_RE, + DocStringKind::Rust => &RUST_ARG_RE, + }; + + let mut ret = HashMap::new(); + let mut current_arg = None; + let mut current_text = vec![]; + + for line in args_section.lines() { + if let Some(matches) = arg_re.captures(line) { + if let Some(a) = current_arg.take() { + ret.insert(a, DocString::join_and_dedent_lines(¤t_text)); + } + + current_arg = Some(matches.get(1).unwrap().as_str().to_owned()); + + let doc_match = matches.get(2).unwrap(); + current_text = vec![format!( + "{}{}", + " ".repeat(doc_match.start()), + doc_match.as_str() + )]; + } else if current_arg.is_some() && INDENTED_RE.is_match(line) { + current_text.push(line.to_owned()); + } + } + + if let Some(a) = current_arg.take() { + ret.insert(a, DocString::join_and_dedent_lines(¤t_text)); + } + + ret + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_starlark_docstring() { + assert_eq!( + DocString::from_docstring(DocStringKind::Starlark, " "), + None + ); + assert_eq!( + DocString::from_docstring( + DocStringKind::Starlark, + " \n\nThis should be the summary\n\n" + ), + Some(DocString { + summary: "This should be the summary".to_owned(), + details: None, + }) + ); + assert_eq!( + DocString::from_docstring( + DocStringKind::Starlark, + " \n\nThis should be the summary\n\n " + ), + Some(DocString { + summary: "This should be the summary".to_owned(), + details: None, + }) + ); + assert_eq!( + DocString::from_docstring( + DocStringKind::Starlark, + "Summary line here\n \nDetails after some spaces\n\nand some more newlines" + ), + Some(DocString { + summary: "Summary line here".to_owned(), + details: Some("Details after some spaces\n\nand some more newlines".to_owned()), + }) + ); + assert_eq!( + DocString::from_docstring( + DocStringKind::Starlark, + r#" + This is the summary. + It has multiple lines and some spaces, and should be collapsed + + This should be a multiline set of details. + It should be: + - Dedented + - Trimmed + - Split properly from the summary + +"# + ), + Some(DocString { + summary: "This is the summary. It has multiple lines and some spaces, and should be collapsed".to_owned(), + details: Some( + concat!( + "This should be a multiline set of details.\n", + "It should be:\n", + " - Dedented\n", + " - Trimmed\n", + " - Split properly from the summary" + ) + .to_owned() + ), + }) + ); + assert_eq!( + DocString::from_docstring( + DocStringKind::Starlark, + r#"This is a summary line that is not dedented like the 'details' + + Typing the first line right after the """ in python docstrings is common, + while putting the rest of the docstring indented. Just support both so it + doesn't surprise anyone. + "# + ), + Some(DocString { + summary: "This is a summary line that is not dedented like the 'details'" + .to_owned(), + details: Some( + concat!( + "Typing the first line right after the \"\"\" in python docstrings is common,\n", + "while putting the rest of the docstring indented. Just support both so it\n", + "doesn't surprise anyone." + ) + .to_owned() + ), + }) + ); + } + + #[test] + fn parses_rust_docstring() { + let raw = r#" + This is the summary line + that sometimes is split on two lines + + This is the second part. It has some code blocks + + ``` + # foo() { + "bar" + # } + ``` + + ```python + # This is a python comment. Leave it be + print(1) + ``` + + ```rust + # other_foo() { + "other_bar" + # } + ``` + "#; + + let parsed = DocString::from_docstring(DocStringKind::Rust, raw).unwrap(); + assert_eq!( + "This is the summary line that sometimes is split on two lines", + parsed.summary + ); + assert_eq!( + concat!( + "This is the second part. It has some code blocks\n\n", + "```\n", + "\"bar\"\n", + "```\n\n", + "```python\n", + "# This is a python comment. Leave it be\n", + "print(1)\n", + "```\n\n", + "```rust\n", + "\"other_bar\"", + "\n```" + ), + parsed.details.unwrap() + ); + } + + #[test] + fn parses_and_removes_sections_from_starlark_docstring() { + let raw_docs = r#"This is an example docstring + + We have some details up here that should not be parsed + + Some empty section: + Example: + First line of the section + + A newline with no space after it before the second one, + and a third that's indented further. + This is not in the example section + + Last: + This is something in the last section + "#; + let expected_docstring = DocString::from_docstring( + DocStringKind::Starlark, + r#"This is an example docstring + + We have some details up here that should not be parsed + + Some empty section: + This is not in the example section + + Last: + This is something in the last section + "#, + ) + .unwrap(); + + let expected_sections = HashMap::from([( + "example".to_owned(), + concat!( + "First line of the section\n\n", + "A newline with no space after it before the second one,\n", + " and a third that's indented further." + ) + .to_owned(), + )]); + + let ds = DocString::from_docstring(DocStringKind::Starlark, raw_docs).unwrap(); + let (new_ds, sections) = + ds.parse_and_remove_sections(DocStringKind::Starlark, &["example"]); + + assert_eq!(new_ds, expected_docstring); + assert_eq!(sections, expected_sections); + } + + #[test] + fn parses_and_removes_sections_from_rust_docstring() { + let raw_docs = r#"This is an example docstring + + We have some details up here that should not be parsed + + # Some Section + + ``` + # This is a commented out line in a codeblock + fn some_func() {} + ``` + + # Example + First line of the section + + Note that, unlike starlark doc strings, + we don't require indentation. The end of a + section is either a new section appearing, + or the end of the string. + + # Last + This is something in the last section + "#; + let expected_docstring = DocString::from_docstring( + DocStringKind::Rust, + r#"This is an example docstring + + We have some details up here that should not be parsed + + # Some Section + + ``` + fn some_func() {} + ``` + + # Last + This is something in the last section + "#, + ) + .unwrap(); + + let expected_sections = HashMap::from([( + "example".to_owned(), + concat!( + "First line of the section\n\n", + "Note that, unlike starlark doc strings,\n", + "we don't require indentation. The end of a\n", + "section is either a new section appearing,\n", + "or the end of the string.", + ) + .to_owned(), + )]); + + let ds = DocString::from_docstring(DocStringKind::Rust, raw_docs).unwrap(); + let (new_ds, sections) = ds.parse_and_remove_sections(DocStringKind::Rust, &["example"]); + + assert_eq!(new_ds, expected_docstring); + assert_eq!(sections, expected_sections); + } + + fn arg(name: &str) -> DocParam { + DocParam { + name: name.to_owned(), + docs: None, + typ: Ty::any(), + default_value: None, + } + } + + #[test] + fn parses_starlark_function_docstring() { + let docstring = r#"This is an example docstring + + Details here + + Args: + arg_foo: The argument named foo + arg_bar: The argument named bar. It has + a longer doc string that spans + over three lines + *args: Docs for args + **kwargs: Docs for kwargs + + Returns: + A value + "#; + + let kind = DocStringKind::Starlark; + let return_type = Ty::int(); + let expected = DocFunction { + docs: DocString::from_docstring(kind, "This is an example docstring\n\nDetails here"), + params: DocParams { + kwargs: Some(DocParam { + name: "kwargs".to_owned(), + docs: DocString::from_docstring(kind, "Docs for kwargs"), + typ: Ty::any(), + default_value: None, + }), + args: Some(DocParam { + name: "args".to_owned(), + docs: DocString::from_docstring(kind, "Docs for args"), + typ: Ty::any(), + default_value: None, + }), + pos_or_named: vec![ + DocParam { + name: "arg_bar".to_owned(), + docs: DocString::from_docstring( + kind, + concat!( + "The argument named bar. It has\n", + "a longer doc string that spans\n", + "over three lines" + ), + ), + typ: Ty::any(), + default_value: None, + }, + DocParam { + name: "arg_foo".to_owned(), + docs: DocString::from_docstring(kind, "The argument named foo"), + typ: Ty::any(), + default_value: None, + }, + ], + pos_only: Vec::new(), + named_only: Vec::new(), + }, + ret: DocReturn { + docs: DocString::from_docstring(kind, "A value"), + typ: return_type.clone(), + }, + }; + + let function_docs = DocFunction::from_docstring( + kind, + DocParams { + kwargs: Some(arg("kwargs")), + args: Some(arg("args")), + pos_or_named: vec![arg("arg_bar"), arg("arg_foo")], + pos_only: Vec::new(), + named_only: Vec::new(), + }, + return_type, + Some(docstring), + ); + + assert_eq!(expected, function_docs); + } + + #[test] + fn parses_rust_function_docstring() { + let docstring = r#"This is an example docstring + + Details here + + # Arguments + * `arg_foo`: The argument named foo + `arg_bar`: The argument named bar. It has + a longer doc string that spans + over three lines + + # Returns + A value + "#; + + let kind = DocStringKind::Rust; + let return_type = Ty::int(); + let expected = DocFunction { + docs: DocString::from_docstring(kind, "This is an example docstring\n\nDetails here"), + params: DocParams { + pos_or_named: vec![ + DocParam { + name: "arg_bar".to_owned(), + docs: DocString::from_docstring( + kind, + concat!( + "The argument named bar. It has\n", + "a longer doc string that spans\n", + "over three lines" + ), + ), + typ: Ty::any(), + default_value: None, + }, + DocParam { + name: "arg_foo".to_owned(), + docs: DocString::from_docstring(kind, "The argument named foo"), + typ: Ty::any(), + default_value: None, + }, + ], + kwargs: None, + args: None, + pos_only: Vec::new(), + named_only: Vec::new(), + }, + ret: DocReturn { + docs: DocString::from_docstring(kind, "A value"), + typ: return_type.clone(), + }, + }; + + let function_docs = DocFunction::from_docstring( + kind, + DocParams { + pos_or_named: vec![arg("arg_bar"), arg("arg_foo")], + ..DocParams::default() + }, + return_type, + Some(docstring), + ); + + assert_eq!(expected, function_docs); + } +} diff --git a/starlark-rust/starlark/src/docs/tests.rs b/starlark-rust/starlark/src/docs/tests.rs new file mode 100644 index 0000000000000..b5786caeecabd --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests.rs @@ -0,0 +1,19 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod markdown; +mod rustdocs; diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage/Magic.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage/Magic.golden.md new file mode 100644 index 0000000000000..fe1aa1cbf2bf6 --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage/Magic.golden.md @@ -0,0 +1,25 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +# Magic + +```python +def Magic(a1: int, a2: int = ..., step: int = 1, /) -> str +``` + +A function with only positional arguments. + +And a slightly longer description. With some example code: + +```python +Magic(1) +``` + +And some assertions: + +```rust +1 == 1 +``` diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage/Obj.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage/Obj.golden.md new file mode 100644 index 0000000000000..296712966ee71 --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage/Obj.golden.md @@ -0,0 +1,62 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +# Obj + +These are where the module docs go + +## Obj.\_\_exported\_\_ + +```python +def Obj.__exported__() -> None +``` + +Needs to be escaped when rendered in markdown. + +--- + +## Obj.attr1 + +```python +Obj.attr1: str +``` + +Docs for attr1 + +--- + +## Obj.attr2 + +```python +Obj.attr2: str +``` + +--- + +## Obj.func1 + +```python +def Obj.func1(foo: str) -> str +``` + +Docs for func1 + +#### Parameters + +* `foo`: Docs for foo + + +#### Returns + +The string 'func1' + +--- + +## Obj.func2 + +```python +def Obj.func2() -> str +``` diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage/globals.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage/globals.golden.md new file mode 100644 index 0000000000000..dd5d6fe818af4 --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage/globals.golden.md @@ -0,0 +1,60 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +# globals + +## MAGIC + +```python +MAGIC: int +``` + +--- + +## func1 + +```python +def func1(foo: str) -> str +``` + +Docs for func1 + +#### Parameters + +* `foo`: Docs for foo + + +#### Returns + +The string 'func1' + +--- + +## func2 + +```python +def func2() -> str +``` + +--- + +## pos\_either\_named + +```python +def pos_either_named(a: int, /, b: int, *, c: int) -> magic +``` + +--- + +## with\_defaults + +```python +def with_defaults( + explicit_default: list[str] = [], + hidden_default: list[str] = ..., + string_default: str = "my_default", +) -> None +``` diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage/submod.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage/submod.golden.md new file mode 100644 index 0000000000000..6f8434d43e96e --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage/submod.golden.md @@ -0,0 +1,37 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +# submod + +## new\_obj + +```python +def new_obj() -> obj +``` + +--- + +## notypes + +```python +def notypes(a) +``` + +--- + +## starlark\_args + +```python +def starlark_args(*args: str) -> None +``` + +--- + +## starlark\_kwargs + +```python +def starlark_kwargs(**kwargs: int) -> None +``` diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/Magic.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/Magic.golden.md new file mode 100644 index 0000000000000..0fe585213078e --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/Magic.golden.md @@ -0,0 +1,25 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +import Link from '@docusaurus/Link'; + +# Magic + +

    def Magic(a1: int, a2: int = ..., step: int = 1, /) -> str
    + +A function with only positional arguments. + +And a slightly longer description. With some example code: + +```python +Magic(1) +``` + +And some assertions: + +```rust +1 == 1 +``` diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/Obj.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/Obj.golden.md new file mode 100644 index 0000000000000..ac7152977be2d --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/Obj.golden.md @@ -0,0 +1,54 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +import Link from '@docusaurus/Link'; + +# Obj + +These are where the module docs go + +## Obj.\_\_exported\_\_ + +
    def Obj.__exported__() -> None
    + +Needs to be escaped when rendered in markdown. + +--- + +## Obj.attr1 + +
    Obj.attr1: str
    + +Docs for attr1 + +--- + +## Obj.attr2 + +
    Obj.attr2: str
    + +--- + +## Obj.func1 + +
    def Obj.func1(foo: str) -> str
    + +Docs for func1 + +#### Parameters + +* `foo`: Docs for foo + + +#### Returns + +The string 'func1' + +--- + +## Obj.func2 + +
    def Obj.func2() -> str
    diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/globals.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/globals.golden.md new file mode 100644 index 0000000000000..45a640232dcdc --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/globals.golden.md @@ -0,0 +1,58 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +import Link from '@docusaurus/Link'; + +# globals + +## MAGIC + +
    MAGIC: int
    + +--- + +## func1 + +
    def func1(foo: str) -> str
    + +Docs for func1 + +#### Parameters + +* `foo`: Docs for foo + + +#### Returns + +The string 'func1' + +--- + +## func2 + +
    def func2() -> str
    + +--- + +## pos\_either\_named + +
    def pos_either_named(
    +    a: int,
    +    /,
    +    b: int,
    +    *,
    +    c: int,
    +) -> magic
    + +--- + +## with\_defaults + +
    def with_defaults(
    +    explicit_default: list[str] = [],
    +    hidden_default: list[str] = ...,
    +    string_default: str = "my_default",
    +) -> None
    diff --git a/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/submod.golden.md b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/submod.golden.md new file mode 100644 index 0000000000000..f4b864b18ee61 --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/multipage_linked_type/submod.golden.md @@ -0,0 +1,31 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +import Link from '@docusaurus/Link'; + +# submod + +## new\_obj + +
    def new_obj() -> obj
    + +--- + +## notypes + +
    def notypes(a)
    + +--- + +## starlark\_args + +
    def starlark_args(*args: str) -> None
    + +--- + +## starlark\_kwargs + +
    def starlark_kwargs(**kwargs: int) -> None
    diff --git a/starlark-rust/starlark/src/docs/tests/golden/native.golden.md b/starlark-rust/starlark/src/docs/tests/golden/native.golden.md new file mode 100644 index 0000000000000..92fecb0d78726 --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/golden/native.golden.md @@ -0,0 +1,78 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +# name + +## MAGIC + +```python +MAGIC: int +``` + +--- + +## Magic + +```python +Magic: magic +``` + +--- + +## Obj + +```python +Obj: obj +``` + +These are where the module docs go + +--- + +## func1 + +```python +def func1(foo: str) -> str +``` + +Docs for func1 + +#### Parameters + +* `foo`: Docs for foo + + +#### Returns + +The string 'func1' + +--- + +## func2 + +```python +def func2() -> str +``` + +--- + +## pos\_either\_named + +```python +def pos_either_named(a: int, /, b: int, *, c: int) -> magic +``` + +--- + +## with\_defaults + +```python +def with_defaults( + explicit_default: list[str] = [], + hidden_default: list[str] = ..., + string_default: str = "my_default", +) -> None +``` diff --git a/starlark-rust/starlark/src/tests/docs/golden/object.golden.md b/starlark-rust/starlark/src/docs/tests/golden/object.golden.md similarity index 97% rename from starlark-rust/starlark/src/tests/docs/golden/object.golden.md rename to starlark-rust/starlark/src/docs/tests/golden/object.golden.md index c9b27a33efa54..09c03eb86793b 100644 --- a/starlark-rust/starlark/src/tests/docs/golden/object.golden.md +++ b/starlark-rust/starlark/src/docs/tests/golden/object.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` # `name` type diff --git a/starlark-rust/starlark/src/tests/docs/golden/starlark.golden.md b/starlark-rust/starlark/src/docs/tests/golden/starlark.golden.md similarity index 97% rename from starlark-rust/starlark/src/tests/docs/golden/starlark.golden.md rename to starlark-rust/starlark/src/docs/tests/golden/starlark.golden.md index 726164024a388..343cbb2cf0fb7 100644 --- a/starlark-rust/starlark/src/tests/docs/golden/starlark.golden.md +++ b/starlark-rust/starlark/src/docs/tests/golden/starlark.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` # name @@ -20,7 +20,7 @@ def f1( c: int = 5, *, d: str = "some string", - **kwargs + **kwargs, ) -> list[str] ``` diff --git a/starlark-rust/starlark/src/docs/tests/markdown.rs b/starlark-rust/starlark/src/docs/tests/markdown.rs new file mode 100644 index 0000000000000..6a52a2dd62818 --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/markdown.rs @@ -0,0 +1,332 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use allocative::Allocative; +use derive_more::Display; +use itertools::Itertools; +use serde::Serialize; +use starlark::starlark_simple_value; +use starlark_derive::starlark_module; +use starlark_derive::starlark_value; +use starlark_derive::NoSerialize; +use starlark_map::small_map::SmallMap; +use starlark_syntax::golden_test_template::golden_test_template; + +use crate as starlark; +use crate::any::ProvidesStaticType; +use crate::assert; +use crate::docs::markdown::render_doc_item_no_link; +use crate::docs::multipage::render_markdown_multipage; +use crate::docs::multipage::DocModuleInfo; +use crate::docs::DocItem; +use crate::docs::DocType; +use crate::environment::Globals; +use crate::environment::GlobalsBuilder; +use crate::environment::Methods; +use crate::environment::MethodsBuilder; +use crate::environment::MethodsStatic; +use crate::values::list::UnpackList; +use crate::values::none::NoneType; +use crate::values::starlark_value_as_type::StarlarkValueAsType; +use crate::values::tuple::UnpackTuple; +use crate::values::StarlarkValue; +use crate::values::Value; + +fn docs_golden_test(test_file_name: &str, doc: DocItem) -> String { + assert!(test_file_name.ends_with(".golden.md")); + assert!(!test_file_name.contains('/')); + + let output = render_doc_item_no_link("name", &doc); + + golden_test_template(&format!("src/docs/tests/golden/{test_file_name}"), &output); + + output +} + +const STARLARK_CODE: &str = r#" +""" +This is the summary of the module's docs + +Some extra details can go here, + and indentation is kept as expected +""" + +def f1(a, b: str, c: int = 5, *, d: str = "some string", **kwargs) -> list[str]: + """ + Summary line goes here + + Args: + a: The docs for a + b: The docs for b + c: The docs for c, but these + go onto two lines + **kwargs: Docs for the keyword args + + Returns: + A string repr of the args + """ + return [str((a, b, c, d, repr(kwargs)))] + +def f2(a, *args: list[str]): + """ + This is a function with *args, and no return type + + Args: + *args: Only doc this arg + """ + return None + +def f3(a: str) -> str: + return a + +def f4(a: str) -> str: + """ This is a docstring with no 'Args:' section """ + return a + +# Not public, so shouldn't show up +def _do_not_export(): + pass +"#; + +#[derive( + Debug, + derive_more::Display, + ProvidesStaticType, + Allocative, + NoSerialize +)] +#[display("magic")] +struct Magic; + +starlark_simple_value!(Magic); + +#[starlark_value(type = "magic")] +impl<'v> StarlarkValue<'v> for Magic {} + +/// These are where the module docs go +#[starlark_module] +fn module(builder: &mut GlobalsBuilder) { + const MAGIC: i32 = 42; + + const Obj: StarlarkValueAsType = StarlarkValueAsType::new(); + + /// Docs for func1 + /// + /// # Arguments + /// * `foo`: Docs for foo + /// + /// # Returns + /// The string 'func1' + fn func1(foo: String) -> anyhow::Result { + let _ignore = foo; + Ok("func1".to_owned()) + } + + fn func2() -> anyhow::Result { + Ok("func2".to_owned()) + } + + /// A function with only positional arguments. + /// + /// And a slightly longer description. With some example code: + /// + /// ```python + /// Magic(1) + /// ``` + /// + /// And some assertions: + /// + /// ```rust + /// # starlark::assert::all_true(r#" + /// 1 == 1 + /// # "#); + /// ``` + #[starlark(as_type = Magic)] + fn Magic( + #[starlark(require = pos)] a1: i32, + #[starlark(require = pos)] a2: Option, + #[starlark(require = pos, default = 1)] step: i32, + ) -> anyhow::Result { + let _ = (a1, a2, step); + Ok("func3".to_owned()) + } + + fn with_defaults<'v>( + #[starlark(default=UnpackList::default())] explicit_default: UnpackList, + hidden_default: Option>, + #[starlark(default = "my_default")] string_default: &str, + ) -> anyhow::Result { + let _unused = (explicit_default, hidden_default, string_default); + Ok(NoneType) + } + + fn pos_either_named( + #[starlark(require = pos)] a: i32, + b: i32, + #[starlark(require = named)] c: i32, + ) -> anyhow::Result { + let _unused = (a, b, c); + Ok(Magic) + } +} + +#[starlark_module] +fn submodule(builder: &mut GlobalsBuilder) { + fn notypes<'v>(a: Value<'v>) -> anyhow::Result> { + Ok(a) + } + + fn starlark_args(#[starlark(args)] args: UnpackTuple) -> anyhow::Result { + let _ignore = args; + Ok(NoneType) + } + + fn starlark_kwargs( + #[starlark(kwargs)] kwargs: SmallMap, + ) -> anyhow::Result { + let _ignore = kwargs; + Ok(NoneType) + } + + fn new_obj() -> anyhow::Result { + Ok(Obj) + } +} + +fn get_globals() -> Globals { + GlobalsBuilder::new() + .with(module) + .with_namespace("submod", submodule) + .build() +} + +#[derive(ProvidesStaticType, Debug, Display, Allocative, Serialize)] +#[display("obj")] +struct Obj; + +starlark_simple_value!(Obj); + +#[starlark_value(type = "obj")] +impl<'v> StarlarkValue<'v> for Obj { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(object) + } +} + +/// These are where the module docs go +#[starlark_module] +fn object(builder: &mut MethodsBuilder) { + /// Docs for attr1 + #[starlark(attribute)] + fn attr1<'v>(this: Value<'v>) -> anyhow::Result { + Ok("attr1".to_owned()) + } + + #[starlark(attribute)] + fn attr2<'v>(this: Value<'v>) -> anyhow::Result { + Ok("attr2".to_owned()) + } + + /// Docs for func1 + /// + /// # Arguments + /// * `foo`: Docs for foo + /// + /// # Returns + /// The string 'func1' + fn func1<'v>(this: Value<'v>, foo: String) -> anyhow::Result { + let _ignore = (this, foo); + Ok("func1".to_owned()) + } + + fn func2<'v>(this: Value<'v>) -> anyhow::Result { + let _ = this; + Ok("func2".to_owned()) + } + + /// Needs to be escaped when rendered in markdown. + fn __exported__<'v>(this: Value<'v>) -> anyhow::Result { + let _ = this; + Ok(NoneType) + } +} + +#[test] +fn golden_docs_starlark() { + let res = docs_golden_test( + "starlark.golden.md", + DocItem::Module(assert::pass_module(STARLARK_CODE).documentation()), + ); + assert!(!res.contains("_do_not_export")); +} + +#[test] +fn native_docs_module() { + let res = docs_golden_test( + "native.golden.md", + DocItem::Module(get_globals().documentation()), + ); + assert!(!res.contains("starlark::assert::all_true")); + assert!(res.contains(r#"string_default: str = "my_default"#)); +} + +fn test_globals_docs_render(with_linked_type: bool) { + let global = get_globals().documentation(); + let modules_info = DocModuleInfo { + module: &global, + name: "globals".to_owned(), + page_path: "".to_owned(), + }; + let path_mapper = |p: &str| format!("/path/to/{}", p); + let res = if with_linked_type { + render_markdown_multipage(vec![modules_info], Some(&path_mapper)) + } else { + render_markdown_multipage(vec![modules_info], None) + }; + let subfolder_name = if with_linked_type { + "multipage_linked_type" + } else { + "multipage" + }; + let expected_keys = vec!["", "Magic", "Obj", "submod"]; + assert_eq!(&res.keys().sorted().collect::>(), &expected_keys); + for (k, v) in res { + let k = if k.is_empty() { "globals" } else { &k }; + golden_test_template( + &format!("src/docs/tests/golden/{subfolder_name}/{}.golden.md", k), + &v, + ); + } +} + +#[test] +fn globals_docs_render() { + test_globals_docs_render(false); +} + +#[test] +fn globals_docs_render_with_linked_type() { + test_globals_docs_render(true); +} + +#[test] +fn golden_docs_object() { + let docs = DocType::from_starlark_value::(); + let res = docs_golden_test("object.golden.md", DocItem::Type(docs)); + assert!(res.contains(r#"name.\_\_exported\_\_"#)); +} diff --git a/starlark-rust/starlark/src/docs/tests/rustdocs.rs b/starlark-rust/starlark/src/docs/tests/rustdocs.rs new file mode 100644 index 0000000000000..3f53e2f5e2f60 --- /dev/null +++ b/starlark-rust/starlark/src/docs/tests/rustdocs.rs @@ -0,0 +1,236 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use allocative::Allocative; +use derive_more::Display; +use serde::Serialize; +use starlark_derive::starlark_module; +use starlark_derive::starlark_value; +use starlark_derive::NoSerialize; +use starlark_derive::ProvidesStaticType; +use starlark_map::small_map::SmallMap; + +use crate as starlark; +use crate::assert::Assert; +use crate::docs::DocItem; +use crate::docs::DocMember; +use crate::docs::DocParam; +use crate::environment::GlobalsBuilder; +use crate::environment::Methods; +use crate::environment::MethodsBuilder; +use crate::environment::MethodsStatic; +use crate::eval::runtime::params::display::PARAM_FMT_OPTIONAL; +use crate::eval::Arguments; +use crate::eval::Evaluator; +use crate::values::list::UnpackList; +use crate::values::none::NoneType; +use crate::values::starlark_value_as_type::StarlarkValueAsType; +use crate::values::tuple::UnpackTuple; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::StringValue; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +#[derive( + Debug, + derive_more::Display, + Allocative, + NoSerialize, + ProvidesStaticType +)] +#[display("input")] +struct InputTypeRepr; +#[derive( + Debug, + derive_more::Display, + Allocative, + NoSerialize, + ProvidesStaticType +)] +#[display("output")] +struct OutputTypeRepr; + +#[starlark_value(type = "input")] +impl<'v> StarlarkValue<'v> for InputTypeRepr {} + +#[starlark_value(type = "output")] +impl<'v> StarlarkValue<'v> for OutputTypeRepr {} + +#[starlark_module] +#[allow(unused_variables)] // Since this is for a test +fn globals(builder: &mut GlobalsBuilder) { + const Input: StarlarkValueAsType = StarlarkValueAsType::new(); + const Output: StarlarkValueAsType = StarlarkValueAsType::new(); + + fn simple( + arg_int: i32, + arg_bool: bool, + arg_vec: UnpackList<&str>, + arg_dict: SmallMap, + ) -> anyhow::Result { + unimplemented!() + } + + fn default_arg<'v>( + arg1: Option>, + #[starlark(default = NoneType)] arg2: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + unimplemented!() + } + + fn args_kwargs<'v>( + #[starlark(args)] args: UnpackTuple>, + #[starlark(kwargs)] kwargs: Value<'v>, + ) -> anyhow::Result { + unimplemented!() + } + + fn custom_types<'v>( + arg1: StringValue<'v>, + arg2: ValueOfUnchecked<'v, InputTypeRepr>, + heap: &'v Heap, + ) -> anyhow::Result> { + unimplemented!() + } + + fn pos_named(arg1: i32, #[starlark(require = named)] arg2: i32) -> anyhow::Result { + unimplemented!() + } + + fn with_arguments(args: &Arguments) -> anyhow::Result { + unimplemented!() + } +} + +/// Test that a Rust starlark_module produces the right documentation. +#[test] +fn test_rustdoc() { + let got = GlobalsBuilder::new().with(globals).build(); + let mut a = Assert::new(); + a.globals_add(globals); + let expected = a.pass_module(r#" +def args_kwargs(*args, **kwargs: typing.Any) -> None: pass +def custom_types(arg1: str, arg2: Input) -> Output: pass +def default_arg(arg1 = "_", arg2: typing.Any = None) -> list[str]: pass +def pos_named(arg1: int, *, arg2: int) -> int: pass +def simple(arg_int: int, arg_bool: bool, arg_vec: list[str], arg_dict: dict[str, (bool, int)]) -> None: pass +def with_arguments(*args, **kwargs) -> int: pass +"#); + + let expected = expected.documentation().members; + let mut got = got.documentation().members; + + got.shift_remove("Input"); + got.shift_remove("Output"); + + assert_eq!(expected.len(), got.len()); + for (name, mut expected) in expected { + if &name == "default_arg" { + // `Option` args in native functions are special magic and have behavior that can't + // be replicated with normal functions + let DocItem::Member(DocMember::Function(expected)) = &mut expected else { + unreachable!() + }; + let DocParam { default_value, .. } = expected.params.doc_params_mut().next().unwrap(); + *default_value = Some(PARAM_FMT_OPTIONAL.to_owned()); + } + // Comparing one at a time produces more useful error messages + assert_eq!(&expected, got.get(&name).unwrap()); + } +} + +#[derive(ProvidesStaticType, Debug, Display, Allocative, Serialize)] +#[display("obj")] +struct Obj; + +#[starlark_value(type = "obj")] +impl<'v> StarlarkValue<'v> for Obj { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(object) + } +} + +/// These are where the module docs go +#[starlark_module] +fn object(builder: &mut MethodsBuilder) { + /// Docs for func1 + /// + /// # Arguments + /// * `foo`: Docs for foo + /// + /// # Returns + /// The string 'func1' + fn func1<'v>(this: Value<'v>, foo: String) -> anyhow::Result { + let _ignore = (this, foo); + Ok("func1".to_owned()) + } +} + +#[test] +fn inner_object_functions_have_docs() { + let heap = Heap::new(); + let obj = heap.alloc_simple(Obj); + let item = obj + .get_attr("func1", &heap) + .unwrap() + .unwrap() + .documentation(); + + match item { + DocItem::Member(DocMember::Function(item)) => { + assert_eq!(item.docs.unwrap().summary, "Docs for func1"); + } + _ => panic!("Expected function: {:#?}", item), + } +} + +#[starlark_module] +fn module(builder: &mut GlobalsBuilder) { + const MAGIC: i32 = 42; + + /// Docs for func1 + /// + /// # Arguments + /// * `foo`: Docs for foo + /// + /// # Returns + /// The string 'func1' + fn func1(foo: String) -> anyhow::Result { + let _ignore = foo; + Ok("func1".to_owned()) + } +} + +#[test] +fn inner_module_functions_have_docs() { + let item = GlobalsBuilder::new() + .with(module) + .build() + .get("func1") + .unwrap() + .documentation(); + + match item { + DocItem::Member(DocMember::Function(item)) => { + assert_eq!(item.docs.unwrap().summary, "Docs for func1"); + } + _ => panic!("Expected function: {:#?}", item), + } +} diff --git a/starlark-rust/starlark/src/environment.rs b/starlark-rust/starlark/src/environment.rs new file mode 100644 index 0000000000000..71b1e8c41f870 --- /dev/null +++ b/starlark-rust/starlark/src/environment.rs @@ -0,0 +1,48 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Types representing Starlark modules ([`Module`] and [`FrozenModule`]) and global variables ([`Globals`]). +//! +//! Global functions and values are stored in [`Globals`], which are typically +//! built using [`GlobalsBuilder`]. +//! User executions store their values in a [`Module`], which have to be converted to a +//! [`FrozenModule`] using [`freeze`](Module::freeze) before they can be `load()`'d as a dependency. + +mod globals; +mod methods; +mod module_dump; +mod modules; +pub(crate) mod names; +pub(crate) mod slots; + +pub use globals::*; +pub use methods::*; +pub use modules::*; +use thiserror::Error; + +#[derive(Debug, Error)] +enum EnvironmentError { + /// Cannot import private symbol, i.e. underscore prefixed + #[error("Cannot import private symbol `{0}`")] + CannotImportPrivateSymbol(String), + #[error("Module has no symbol `{0}`")] + ModuleHasNoSymbol(String), + #[error("Module has no symbol `{0}`, did you mean `{1}`?")] + ModuleHasNoSymbolDidYouMean(String, String), + #[error("Module symbol `{0}` is not exported")] + ModuleSymbolIsNotExported(String), +} diff --git a/starlark-rust/starlark/src/environment/globals.rs b/starlark-rust/starlark/src/environment/globals.rs index 90d3b9252beb2..becdb45fa1e50 100644 --- a/starlark-rust/starlark/src/environment/globals.rs +++ b/starlark-rust/starlark/src/environment/globals.rs @@ -23,49 +23,32 @@ use itertools::Itertools; use once_cell::sync::Lazy; use once_cell::sync::OnceCell; -use crate::collections::symbol_map::Symbol; -use crate::collections::symbol_map::SymbolMap; -use crate::collections::Hashed; +use crate::__derive_refs::components::NativeCallableComponents; +use crate::collections::symbol::map::SymbolMap; use crate::collections::SmallMap; -use crate::docs::DocMember; +use crate::docs::DocItem; use crate::docs::DocModule; -use crate::docs::DocObject; use crate::docs::DocString; use crate::docs::DocStringKind; +use crate::docs::DocType; use crate::stdlib; pub use crate::stdlib::LibraryExtension; use crate::typing::Ty; -use crate::values::function::NativeAttribute; -use crate::values::function::NativeCallableRawDocs; use crate::values::function::NativeFunc; -use crate::values::function::NativeMeth; use crate::values::function::SpecialBuiltinFunction; -use crate::values::layout::value_not_special::FrozenValueNotSpecial; -use crate::values::structs::AllocStruct; +use crate::values::namespace::FrozenNamespace; use crate::values::types::function::NativeFunction; -use crate::values::types::function::NativeMethod; use crate::values::AllocFrozenValue; use crate::values::FrozenHeap; use crate::values::FrozenHeapRef; use crate::values::FrozenStringValue; use crate::values::FrozenValue; -use crate::values::Heap; use crate::values::Value; /// The global values available during execution. #[derive(Clone, Dupe, Debug, Allocative)] pub struct Globals(Arc); -/// Methods of an object. -#[derive(Clone, Debug)] -pub struct Methods { - /// This field holds the objects referenced in `members`. - #[allow(dead_code)] - heap: FrozenHeapRef, - members: SymbolMap, - docstring: Option, -} - #[derive(Debug, Allocative)] struct GlobalsData { heap: FrozenHeapRef, @@ -82,19 +65,11 @@ pub struct GlobalsBuilder { // Normal top-level variables, e.g. True/hash variables: SymbolMap, // The list of struct fields, pushed to the end - struct_fields: Vec>, - // The raw docstring for this module - docstring: Option, -} - -/// Used to build a [`Methods`] value. -#[derive(Debug)] -pub struct MethodsBuilder { - /// The heap everything is allocated in. - heap: FrozenHeap, - /// Members, either `NativeMethod` or `NativeAttribute`. - members: SymbolMap, - /// The raw docstring for the main object. + namespace_fields: Vec>, + /// The raw docstring for this module + /// + /// FIXME(JakobDegen): This should probably be removed. Having a docstring on a `GlobalsBuilder` + /// doesn't really make sense, because there's no way good way to combine multiple docstrings. docstring: Option, } @@ -168,51 +143,21 @@ impl Globals { .join("\n") } + /// Get the documentation for the object itself + pub fn docstring(&self) -> Option<&str> { + self.0.docstring.as_deref() + } + /// Get the documentation for both the object itself, and its members. pub fn documentation(&self) -> DocModule { - let DocObject { docs, members } = common_documentation( + let (docs, members) = common_documentation( &self.0.docstring, self.0.variables.iter().map(|(n, v)| (n.as_str(), *v)), ); - DocModule { docs, members } - } -} - -impl Methods { - pub(crate) fn get<'v>(&'v self, name: &str) -> Option> { - self.get_frozen(name).map(FrozenValueNotSpecial::to_value) - } - - pub(crate) fn get_frozen(&self, name: &str) -> Option { - self.members.get_str(name).copied() - } - - pub(crate) fn get_hashed(&self, name: Hashed<&str>) -> Option { - self.members.get_hashed_str(name).copied() - } - - pub(crate) fn get_frozen_symbol(&self, name: &Symbol) -> Option { - self.members.get(name).copied() - } - - pub(crate) fn names(&self) -> Vec { - self.members.keys().map(|x| x.as_str().to_owned()).collect() - } - - pub(crate) fn members(&self) -> impl Iterator { - self.members - .iter() - .map(|(k, v)| (k.as_str(), v.to_frozen_value())) - } - - /// Fetch the documentation. - pub fn documentation(&self) -> DocObject { - common_documentation( - &self.docstring, - self.members - .iter() - .map(|(n, v)| (n.as_str(), v.to_frozen_value())), - ) + DocModule { + docs, + members: members.collect(), + } } } @@ -222,7 +167,7 @@ impl GlobalsBuilder { Self { heap: FrozenHeap::new(), variables: SymbolMap::new(), - struct_fields: Vec::new(), + namespace_fields: Vec::new(), docstring: None, } } @@ -249,13 +194,13 @@ impl GlobalsBuilder { res } - /// Add a nested struct to the builder. If `f` adds the definition `foo`, - /// it will end up on a struct `name`, accessible as `name.foo`. - pub fn struct_(&mut self, name: &str, f: impl FnOnce(&mut GlobalsBuilder)) { - self.struct_fields.push(SmallMap::new()); + /// Add a nested namespace to the builder. If `f` adds the definition `foo`, + /// it will end up on a namespace `name`, accessible as `name.foo`. + pub fn namespace(&mut self, name: &str, f: impl FnOnce(&mut GlobalsBuilder)) { + self.namespace_fields.push(SmallMap::new()); f(self); - let fields = self.struct_fields.pop().unwrap(); - self.set(name, AllocStruct(fields)); + let fields = self.namespace_fields.pop().unwrap(); + self.set(name, self.heap.alloc(FrozenNamespace::new(fields))); } /// A fluent API for modifying [`GlobalsBuilder`] and returning the result. @@ -264,9 +209,9 @@ impl GlobalsBuilder { self } - /// A fluent API for modifying [`GlobalsBuilder`] using [`struct_`](GlobalsBuilder::struct_). - pub fn with_struct(mut self, name: &str, f: impl Fn(&mut GlobalsBuilder)) -> Self { - self.struct_(name, f); + /// A fluent API for modifying [`GlobalsBuilder`] using [`namespace`](GlobalsBuilder::namespace). + pub fn with_namespace(mut self, name: &str, f: impl Fn(&mut GlobalsBuilder)) -> Self { + self.namespace(name, f); self } @@ -289,7 +234,7 @@ impl GlobalsBuilder { /// Set a value in the [`GlobalsBuilder`]. pub fn set<'v, V: AllocFrozenValue>(&'v mut self, name: &str, value: V) { let value = value.alloc_frozen_value(&self.heap); - match self.struct_fields.last_mut() { + match self.namespace_fields.last_mut() { None => { // TODO(nga): do not quietly ignore redefinitions. self.variables.insert(name, value) @@ -306,9 +251,8 @@ impl GlobalsBuilder { pub fn set_function( &mut self, name: &str, - speculative_exec_safe: bool, - raw_docs: NativeCallableRawDocs, - type_attr: Option, + components: NativeCallableComponents, + as_type: Option<(Ty, DocType)>, ty: Option, special_builtin_function: Option, f: F, @@ -320,10 +264,16 @@ impl GlobalsBuilder { NativeFunction { function: Box::new(f), name: name.to_owned(), - speculative_exec_safe, - type_attr, - ty: Some(ty.unwrap_or_else(|| Ty::from_docs_function(&raw_docs.documentation()))), - raw_docs: Some(raw_docs), + speculative_exec_safe: components.speculative_exec_safe, + as_type: as_type.as_ref().map(|x| x.0.dupe()), + ty: ty.unwrap_or_else(|| { + Ty::from_native_callable_components( + &components, + as_type.as_ref().map(|x| x.0.dupe()), + ) + .unwrap() // TODO(nga): do not unwrap. + }), + docs: components.into_docs(as_type), special_builtin_function, }, ) @@ -350,144 +300,9 @@ impl GlobalsBuilder { } } -impl Methods { - /// Create an empty [`Globals`], with no functions in scope. - pub fn new() -> Self { - MethodsBuilder::new().build() - } -} - -impl MethodsBuilder { - /// Create an empty [`MethodsBuilder`], with no functions in scope. - pub fn new() -> Self { - MethodsBuilder { - heap: FrozenHeap::new(), - members: SymbolMap::new(), - docstring: None, - } - } - - /// Called at the end to build a [`Methods`]. - pub fn build(self) -> Methods { - Methods { - heap: self.heap.into_ref(), - members: self.members, - docstring: self.docstring, - } - } - - /// A fluent API for modifying [`MethodsBuilder`] and returning the result. - pub fn with(mut self, f: impl FnOnce(&mut Self)) -> Self { - f(&mut self); - self - } - - /// Set the raw docstring for this object. - pub fn set_docstring(&mut self, docstring: &str) { - self.docstring = Some(docstring.to_owned()); - } - - /// Set a constant value in the [`MethodsBuilder`] that will be suitable for use with - /// [`StarlarkValue::get_methods`](crate::values::StarlarkValue::get_methods). - pub fn set_attribute<'v, V: AllocFrozenValue>( - &'v mut self, - name: &str, - value: V, - docstring: Option, - ) { - // We want to build an attribute, that ignores its self argument, and does no subsequent allocation. - let value = self.heap.alloc(value); - self.set_attribute_fn( - name, - true, - docstring, - V::starlark_type_repr(), - move |_, _| Ok(value.to_value()), - ); - } - - /// Set an attribute. This function is usually called from code - /// generated by `starlark_derive` and rarely needs to be called manually. - pub fn set_attribute_fn( - &mut self, - name: &str, - speculative_exec_safe: bool, - docstring: Option, - typ: Ty, - f: F, - ) where - F: for<'v> Fn(Value<'v>, &'v Heap) -> anyhow::Result> + Send + Sync + 'static, - { - self.members.insert( - name, - FrozenValueNotSpecial::new(self.heap.alloc(NativeAttribute { - function: Box::new(f), - speculative_exec_safe, - docstring, - typ, - })) - .unwrap(), - ); - } - - /// Set a method. This function is usually called from code - /// generated by `starlark_derive` and rarely needs to be called manually. - pub fn set_method( - &mut self, - name: &str, - speculative_exec_safe: bool, - raw_docs: NativeCallableRawDocs, - f: F, - ) where - F: NativeMeth, - { - let ty = Ty::from_docs_function(&raw_docs.documentation()); - - self.members.insert( - name, - FrozenValueNotSpecial::new(self.heap.alloc(NativeMethod { - function: Box::new(f), - name: name.to_owned(), - speculative_exec_safe, - raw_docs, - ty, - })) - .unwrap(), - ); - } - - /// Allocate a value using the same underlying heap as the [`MethodsBuilder`] - pub fn alloc<'v, V: AllocFrozenValue>(&'v self, value: V) -> FrozenValue { - value.alloc_frozen_value(&self.heap) - } -} - -/// Used to create methods for a [`StarlarkValue`](crate::values::StarlarkValue). -/// -/// To define a method `foo()` on your type, define -/// usually written as: -/// -/// ```ignore -/// fn my_methods(builder: &mut GlobalsBuilder) { -/// fn foo(me: ARef) -> anyhow::Result { -/// ... -/// } -/// } -/// -/// impl StarlarkValue<'_> for Foo { -/// ... -/// fn get_methods(&self) -> Option<&'static Globals> { -/// static RES: GlobalsStatic = GlobalsStatic::new(); -/// RES.methods(module_creator) -/// } -/// ... -/// } -/// ``` +/// Used to create globals. pub struct GlobalsStatic(OnceCell); -/// Similar to [`GlobalsStatic`], but for methods. -pub struct MethodsStatic(OnceCell); - impl GlobalsStatic { /// Create a new [`GlobalsStatic`]. pub const fn new() -> Self { @@ -525,60 +340,24 @@ impl GlobalsStatic { } } -impl MethodsStatic { - /// Create a new [`MethodsStatic`]. - pub const fn new() -> Self { - Self(OnceCell::new()) - } - - /// Populate the globals with a builder function. Always returns `Some`, but using this API - /// to be a better fit for [`StarlarkValue.get_methods`](crate::values::StarlarkValue::get_methods). - pub fn methods(&'static self, x: impl FnOnce(&mut MethodsBuilder)) -> Option<&'static Methods> { - Some(self.0.get_or_init(|| MethodsBuilder::new().with(x).build())) - } - - /// Move all the globals in this [`GlobalsBuilder`] into a new one. All variables will - /// only be allocated once (ensuring things like function comparison works properly). - pub fn populate(&'static self, x: impl FnOnce(&mut MethodsBuilder), out: &mut MethodsBuilder) { - let methods = self.methods(x).unwrap(); - for (name, value) in methods.members.iter() { - out.members.insert(name.as_str(), *value); - } - out.docstring = methods.docstring.clone(); - } -} - -fn common_documentation<'a>( +pub(crate) fn common_documentation<'a>( docstring: &Option, members: impl IntoIterator, -) -> DocObject { +) -> (Option, impl Iterator) { let main_docs = docstring .as_ref() .and_then(|ds| DocString::from_docstring(DocStringKind::Rust, ds)); let member_docs = members .into_iter() - .map(|(name, val)| (name.to_owned(), DocMember::from_value(val.to_value()))) - .sorted_by(|(l, _), (r, _)| Ord::cmp(l, r)) - .collect(); + .map(|(name, val)| (name.to_owned(), val.to_value().documentation())) + .sorted_by(|(l, _), (r, _)| Ord::cmp(l, r)); - DocObject { - docs: main_docs, - members: member_docs, - } + (main_docs, member_docs) } #[cfg(test)] mod tests { - use derive_more::Display; - use starlark_derive::starlark_value; - use super::*; - use crate as starlark; - use crate::any::ProvidesStaticType; - use crate::assert::Assert; - use crate::starlark_simple_value; - use crate::values::NoSerialize; - use crate::values::StarlarkValue; #[test] fn test_send_sync() @@ -586,31 +365,4 @@ mod tests { Globals: Send + Sync, { } - - #[test] - fn test_set_attribute() { - #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] - #[display(fmt = "Magic")] - struct Magic; - starlark_simple_value!(Magic); - - #[starlark_value(type = "magic")] - impl<'v> StarlarkValue<'v> for Magic { - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(|x| { - x.set_attribute("my_type", "magic", None); - x.set_attribute("my_value", 42, None); - }) - } - } - - let mut a = Assert::new(); - a.globals_add(|x| x.set("magic", Magic)); - a.pass( - r#" -assert_eq(magic.my_type, "magic") -assert_eq(magic.my_value, 42)"#, - ); - } } diff --git a/starlark-rust/starlark/src/environment/methods.rs b/starlark-rust/starlark/src/environment/methods.rs new file mode 100644 index 0000000000000..d138c7ecbcd01 --- /dev/null +++ b/starlark-rust/starlark/src/environment/methods.rs @@ -0,0 +1,332 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use dupe::Dupe; +use once_cell::sync::OnceCell; +use starlark_map::Hashed; + +use crate::__derive_refs::components::NativeCallableComponents; +use crate::collections::symbol::map::SymbolMap; +use crate::collections::symbol::symbol::Symbol; +use crate::docs::DocType; +use crate::environment::common_documentation; +use crate::typing::Ty; +use crate::values::function::NativeAttr; +use crate::values::function::NativeAttribute; +use crate::values::function::NativeMeth; +use crate::values::function::NativeMethod; +use crate::values::types::unbound::UnboundValue; +use crate::values::AllocFrozenValue; +use crate::values::FrozenHeap; +use crate::values::FrozenHeapRef; +use crate::values::FrozenRef; +use crate::values::FrozenValue; +use crate::values::FrozenValueTyped; +use crate::values::Heap; +use crate::values::Value; + +/// Methods of an object. +#[derive(Clone, Debug)] +pub struct Methods { + /// This field holds the objects referenced in `members`. + #[allow(dead_code)] + heap: FrozenHeapRef, + members: SymbolMap, + docstring: Option, +} + +/// Used to build a [`Methods`] value. +#[derive(Debug)] +pub struct MethodsBuilder { + /// The heap everything is allocated in. + heap: FrozenHeap, + /// Members, either `NativeMethod` or `NativeAttribute`. + members: SymbolMap, + /// The raw docstring for the main object. + /// + /// FIXME(JakobDegen): This should probably be removed. Not only can these docstrings not be + /// combined with each other, but having the main documentation for the object on the methods + /// instead of on the object type directly is extraordinarily confusing. + docstring: Option, +} + +impl Methods { + pub(crate) fn get<'v>(&'v self, name: &str) -> Option> { + Some(self.members.get_str(name)?.to_frozen_value().to_value()) + } + + /// Gets the type of the member + /// + /// In the case of an attribute, this is the type the attribute evaluates to, while in the case + /// of a method, this is the `TyCallable` + pub(crate) fn get_ty(&self, name: &str) -> Option { + match self.members.get_str(name)? { + UnboundValue::Attr(attr, _) => Some(attr.typ.dupe()), + UnboundValue::Method(method, _) => Some(method.ty.dupe()), + } + } + + #[inline] + pub(crate) fn get_hashed(&self, name: Hashed<&str>) -> Option<&UnboundValue> { + self.members.get_hashed_str(name) + } + + #[inline] + pub(crate) fn get_frozen_symbol(&self, name: &Symbol) -> Option<&UnboundValue> { + self.members.get(name) + } + + pub(crate) fn names(&self) -> Vec { + self.members.keys().map(|x| x.as_str().to_owned()).collect() + } + + pub(crate) fn members(&self) -> impl Iterator { + self.members + .iter() + .map(|(k, v)| (k.as_str(), v.to_frozen_value())) + } + + /// Fetch the documentation. + pub fn documentation(&self, ty: Ty) -> DocType { + let (docs, members) = common_documentation( + &self.docstring, + self.members + .iter() + .map(|(n, v)| (n.as_str(), v.to_frozen_value())), + ); + + DocType { + docs, + members: members + .filter_map(|(n, item)| { + // This is only `None` if the item is a module, but types shouldn't really have + // modules in them anyway, so that seems ok + Some((n, item.try_as_member_with_collapsed_object().ok()?)) + }) + .collect(), + ty, + constructor: None, + } + } +} + +impl Methods { + /// Create an empty [`Methods`], with no functions in scope. + pub fn new() -> Self { + MethodsBuilder::new().build() + } +} + +impl MethodsBuilder { + /// Create an empty [`MethodsBuilder`], with no functions in scope. + pub fn new() -> Self { + MethodsBuilder { + heap: FrozenHeap::new(), + members: SymbolMap::new(), + docstring: None, + } + } + + /// Called at the end to build a [`Methods`]. + pub fn build(self) -> Methods { + Methods { + heap: self.heap.into_ref(), + members: self.members, + docstring: self.docstring, + } + } + + /// A fluent API for modifying [`MethodsBuilder`] and returning the result. + pub fn with(mut self, f: impl FnOnce(&mut Self)) -> Self { + f(&mut self); + self + } + + /// Set the raw docstring for this object. + pub fn set_docstring(&mut self, docstring: &str) { + self.docstring = Some(docstring.to_owned()); + } + + /// Set a constant value in the [`MethodsBuilder`] that will be suitable for use with + /// [`StarlarkValue::get_methods`](crate::values::StarlarkValue::get_methods). + pub fn set_attribute<'v, V: AllocFrozenValue>( + &'v mut self, + name: &str, + value: V, + docstring: Option, + ) { + // We want to build an attribute, that ignores its self argument, and does no subsequent allocation. + let value = self.heap.alloc(value); + self.set_attribute_fn( + name, + true, + docstring, + V::starlark_type_repr(), + move |_, _| Ok(value.to_value()), + ); + } + + /// Set an attribute. This function is usually called from code + /// generated by `starlark_derive` and rarely needs to be called manually. + pub fn set_attribute_fn( + &mut self, + name: &str, + speculative_exec_safe: bool, + docstring: Option, + typ: Ty, + f: F, + ) where + F: for<'v> Fn(Value<'v>, &'v Heap) -> crate::Result> + Send + Sync + 'static, + { + self.members.insert( + name, + UnboundValue::Attr( + FrozenValueTyped::new(self.heap.alloc(NativeAttribute { + speculative_exec_safe, + docstring, + typ, + })) + .unwrap(), + FrozenRef::::new( + self.heap.alloc_any_debug_type_name(f).as_ref(), + ), + ), + ); + } + + /// Set a method. This function is usually called from code + /// generated by `starlark_derive` and rarely needs to be called manually. + pub fn set_method(&mut self, name: &str, components: NativeCallableComponents, f: F) + where + F: NativeMeth, + { + // TODO(nga): do not unwrap. + let ty = Ty::from_native_callable_components(&components, None).unwrap(); + + let function = FrozenRef::::new( + self.heap.alloc_any_debug_type_name(f).as_ref(), + ); + self.members.insert( + name, + UnboundValue::Method( + FrozenValueTyped::new(self.heap.alloc(NativeMethod { + function, + name: name.to_owned(), + speculative_exec_safe: components.speculative_exec_safe, + docs: components.into_docs(None), + ty, + })) + .unwrap(), + function, + ), + ); + } + + /// Allocate a value using the same underlying heap as the [`MethodsBuilder`] + pub fn alloc<'v, V: AllocFrozenValue>(&'v self, value: V) -> FrozenValue { + value.alloc_frozen_value(&self.heap) + } +} + +/// Used to create methods for a [`StarlarkValue`](crate::values::StarlarkValue). +/// +/// To define a method `foo()` on your type, define +/// usually written as: +/// +/// ```ignore +/// fn my_methods(builder: &mut GlobalsBuilder) { +/// fn foo(me: ARef) -> anyhow::Result { +/// ... +/// } +/// } +/// +/// impl StarlarkValue<'_> for Foo { +/// ... +/// fn get_methods(&self) -> Option<&'static Globals> { +/// static RES: GlobalsStatic = GlobalsStatic::new(); +/// RES.methods(module_creator) +/// } +/// ... +/// } +/// ``` +pub struct MethodsStatic(OnceCell); + +impl MethodsStatic { + /// Create a new [`MethodsStatic`]. + pub const fn new() -> Self { + Self(OnceCell::new()) + } + + /// Populate the globals with a builder function. Always returns `Some`, but using this API + /// to be a better fit for [`StarlarkValue.get_methods`](crate::values::StarlarkValue::get_methods). + pub fn methods(&'static self, x: impl FnOnce(&mut MethodsBuilder)) -> Option<&'static Methods> { + Some(self.0.get_or_init(|| MethodsBuilder::new().with(x).build())) + } + + /// Copy all the methods in this [`MethodsBuilder`] into a new one. All variables will + /// only be allocated once (ensuring things like function comparison works properly). + pub fn populate(&'static self, x: impl FnOnce(&mut MethodsBuilder), out: &mut MethodsBuilder) { + let methods = self.methods(x).unwrap(); + for (name, value) in methods.members.iter() { + out.members.insert(name.as_str(), value.clone()); + } + out.docstring = methods.docstring.clone(); + } +} + +#[cfg(test)] +mod tests { + use allocative::Allocative; + use derive_more::Display; + use starlark_derive::starlark_value; + use starlark_derive::NoSerialize; + use starlark_derive::ProvidesStaticType; + + use crate as starlark; + use crate::assert::Assert; + use crate::environment::Methods; + use crate::environment::MethodsStatic; + use crate::starlark_simple_value; + use crate::values::StarlarkValue; + + #[test] + fn test_set_attribute() { + #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] + #[display("Magic")] + struct Magic; + starlark_simple_value!(Magic); + + #[starlark_value(type = "magic")] + impl<'v> StarlarkValue<'v> for Magic { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(|x| { + x.set_attribute("my_type", "magic", None); + x.set_attribute("my_value", 42, None); + }) + } + } + + let mut a = Assert::new(); + a.globals_add(|x| x.set("magic", Magic)); + a.pass( + r#" +assert_eq(magic.my_type, "magic") +assert_eq(magic.my_value, 42)"#, + ); + } +} diff --git a/starlark-rust/starlark/src/environment/mod.rs b/starlark-rust/starlark/src/environment/mod.rs deleted file mode 100644 index 34aad13b41483..0000000000000 --- a/starlark-rust/starlark/src/environment/mod.rs +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Types representing Starlark modules ([`Module`] and [`FrozenModule`]) and global variables ([`Globals`]). -//! -//! Global functions and values are stored in [`Globals`], which are typically -//! built using [`GlobalsBuilder`]. -//! User executions store their values in a [`Module`], which have to be converted to a -//! [`FrozenModule`] using [`freeze`](Module::freeze) before they can be `load()`'d as a dependency. - -mod globals; -mod module_dump; -mod modules; -pub(crate) mod names; -pub(crate) mod slots; - -pub use globals::*; -pub use modules::*; -use thiserror::Error; - -#[derive(Debug, Error)] -enum EnvironmentError { - /// Cannot import private symbol, i.e. underscore prefixed - #[error("Cannot import private symbol `{0}`")] - CannotImportPrivateSymbol(String), - #[error("Module has no symbol `{0}`")] - ModuleHasNoSymbol(String), - #[error("Module has no symbol `{0}`, did you mean `{1}`?")] - ModuleHasNoSymbolDidYouMean(String, String), - #[error("Module symbol `{0}` is not exported")] - ModuleSymbolIsNotExported(String), -} diff --git a/starlark-rust/starlark/src/environment/modules.rs b/starlark-rust/starlark/src/environment/modules.rs index 2e55db51422cb..af0ca8ecf6e8e 100644 --- a/starlark-rust/starlark/src/environment/modules.rs +++ b/starlark-rust/starlark/src/environment/modules.rs @@ -16,7 +16,7 @@ */ //! The environment, called "Module" in [this spec]( -//! https://github.com/google/skylark/blob/a0e5de7e63b47e716cca7226662a4c95d47bf873/doc/spec.md) +//! https://github.com/bazelbuild/starlark/blob/master/spec.md) //! is the list of variable in the current scope. It can be frozen, after which //! all values from this environment become immutable. @@ -33,6 +33,7 @@ use starlark_syntax::syntax::ast::Visibility; use crate::cast::transmute; use crate::collections::Hashed; +use crate::docs::DocItem; use crate::docs::DocMember; use crate::docs::DocModule; use crate::docs::DocString; @@ -67,6 +68,8 @@ use crate::values::Value; enum ModuleError { #[error("Retained memory profiling is not enabled")] RetainedMemoryProfileNotEnabled, + #[error("Extra value already set to a value of type `{}`", .0)] + ExtraValueAlreadySet(&'static str), } /// The result of freezing a [`Module`], making it and its contained values immutable. @@ -145,6 +148,10 @@ impl FrozenModule { module.set(name, value.to_value()); } + if let Some(docstring) = globals.docstring() { + module.set_docstring(String::from(docstring)); + } + module.freeze() } @@ -228,7 +235,13 @@ impl FrozenModule { let members = self .all_items() .filter(|n| Module::default_visibility(n.0.as_str()) == Visibility::Public) - .map(|(k, v)| (k.as_str().to_owned(), DocMember::from_value(v.to_value()))) + // FIXME(JakobDegen): Throws out information + .map(|(k, v)| { + ( + k.as_str().to_owned(), + DocItem::Member(DocMember::from_value(v.to_value())), + ) + }) .collect(); DocModule { @@ -237,14 +250,6 @@ impl FrozenModule { } } - /// Retained memory info, or error if not enabled. - pub fn aggregated_heap_profile_info(&self) -> anyhow::Result<&AggregateHeapProfileInfo> { - match &self.module.heap_profile { - None => Err(ModuleError::RetainedMemoryProfileNotEnabled.into()), - Some(p) => Ok(&p.info), - } - } - /// Retained memory info, or error if not enabled. pub fn heap_profile(&self) -> anyhow::Result { match &self.module.heap_profile { @@ -331,7 +336,7 @@ impl Module { } } - pub(crate) fn enable_heap_profile(&self, mode: RetainedHeapProfileMode) { + pub(crate) fn enable_retained_heap_profile(&self, mode: RetainedHeapProfileMode) { self.heap_profile_on_freeze.set(Some(mode)); } @@ -437,7 +442,7 @@ impl Module { docstring: docstring.into_inner(), heap_profile: stacks, }; - let frozen_module_ref = freezer.heap.alloc_any_display_from_debug(rest); + let frozen_module_ref = freezer.heap.alloc_any(rest); for frozen_def in freezer.frozen_defs.borrow().as_slice() { frozen_def.post_freeze(frozen_module_ref, &heap, &freezer.heap); } @@ -445,14 +450,6 @@ impl Module { // but can now be dropped mem::drop(heap); - if let Some(stacks) = &frozen_module_ref.heap_profile { - assert_eq!(stacks.info.unused_capacity.get(), 0, "sanity check"); - stacks - .info - .unused_capacity - .set(freezer.heap.unused_capacity()); - } - Ok(FrozenModule { heap: freezer.into_ref(), module: frozen_module_ref, @@ -520,6 +517,7 @@ impl Module { self.docstring.replace(Some(docstring)); } + #[cfg(not(target_arch = "wasm32"))] pub(crate) fn add_eval_duration(&self, duration: Duration) { self.eval_duration.set(self.eval_duration.get() + duration); } @@ -532,6 +530,8 @@ impl Module { extra_value.trace(tracer); self.set_extra_value(extra_value); } + + self.heap().trace_interner(tracer); } /// Field that can be used for any purpose you want. @@ -541,6 +541,15 @@ impl Module { self.extra_value.set(Some(v)); } + /// Set extra value, but fail if it's already set. + pub fn set_extra_value_no_overwrite<'v>(&'v self, v: Value<'v>) -> anyhow::Result<()> { + if let Some(existing) = self.extra_value() { + return Err(ModuleError::ExtraValueAlreadySet(existing.get_type()).into()); + } + self.set_extra_value(v); + Ok(()) + } + /// Field that can be used for any purpose you want. pub fn extra_value<'v>(&'v self) -> Option> { // Cast lifetime. @@ -564,8 +573,8 @@ mod tests { use crate::environment::Globals; use crate::environment::GlobalsBuilder; use crate::environment::Module; + use crate::eval::runtime::profile::mode::ProfileMode; use crate::eval::Evaluator; - use crate::eval::ProfileMode; use crate::syntax::AstModule; use crate::syntax::Dialect; use crate::values::list::ListRef; @@ -587,7 +596,7 @@ def f(x): x = f(1) " .to_owned(), - &Dialect::Extended, + &Dialect::AllOptionsInternal, ) .unwrap(), &Globals::standard(), @@ -595,10 +604,8 @@ x = f(1) .unwrap(); } let module = module.freeze().unwrap(); - let profile_info = module.aggregated_heap_profile_info().unwrap(); - let heap_summary = profile_info.gen_summary_csv(); + let heap_summary = module.heap_profile().unwrap().gen().unwrap(); // Smoke test. - assert!(profile_info.unused_capacity.get() > 0); assert!(heap_summary.contains("\"x.star.f\""), "{:?}", heap_summary); } diff --git a/starlark-rust/starlark/src/environment/names.rs b/starlark-rust/starlark/src/environment/names.rs index baa3889ef6fb6..1119a97a6044d 100644 --- a/starlark-rust/starlark/src/environment/names.rs +++ b/starlark-rust/starlark/src/environment/names.rs @@ -101,7 +101,7 @@ impl MutableNames { } pub(crate) fn hide_name(&self, name: &str) { - self.0.borrow_mut().remove(name); + self.0.borrow_mut().shift_remove(name); } pub(crate) fn all_names_and_slots(&self) -> Vec<(FrozenStringValue, ModuleSlotId)> { diff --git a/starlark-rust/starlark/src/errors.rs b/starlark-rust/starlark/src/errors.rs new file mode 100644 index 0000000000000..e5661e24eee3c --- /dev/null +++ b/starlark-rust/starlark/src/errors.rs @@ -0,0 +1,26 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Error types used by Starlark. + +pub use starlark_syntax::frame::Frame; + +pub use crate::analysis::EvalMessage; +pub use crate::analysis::EvalSeverity; +pub use crate::analysis::Lint; + +pub(crate) mod did_you_mean; diff --git a/starlark-rust/starlark/src/errors/mod.rs b/starlark-rust/starlark/src/errors/mod.rs deleted file mode 100644 index 6c115ddb154e7..0000000000000 --- a/starlark-rust/starlark/src/errors/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Error types used by Starlark, mostly [`Diagnostic`]. - -pub use starlark_syntax::diagnostic::Diagnostic; -pub use starlark_syntax::frame::Frame; - -pub use crate::analysis::EvalMessage; -pub use crate::analysis::EvalSeverity; -pub use crate::analysis::Lint; - -pub(crate) mod did_you_mean; diff --git a/starlark-rust/starlark/src/eval.rs b/starlark-rust/starlark/src/eval.rs new file mode 100644 index 0000000000000..16e52140f2362 --- /dev/null +++ b/starlark-rust/starlark/src/eval.rs @@ -0,0 +1,176 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Evaluate some code, typically done by creating an [`Evaluator`], then calling +//! [`eval_module`](Evaluator::eval_module). + +pub(crate) mod bc; +pub(crate) mod compiler; +mod params; +pub(crate) mod runtime; +pub(crate) mod soft_error; + +use std::collections::HashMap; +use std::mem; +#[cfg(not(target_arch = "wasm32"))] +use std::time::Instant; + +use dupe::Dupe; +pub use runtime::arguments::Arguments; +pub use runtime::before_stmt::BeforeStmtFuncDyn; +pub use runtime::evaluator::Evaluator; +pub use runtime::file_loader::FileLoader; +pub use runtime::file_loader::ReturnFileLoader; +pub use runtime::params::parser::ParametersParser; +pub use runtime::params::spec::ParametersSpec; +pub use runtime::params::spec::ParametersSpecParam; +pub use runtime::profile::data::ProfileData; +pub use runtime::profile::mode::ProfileMode; +pub use soft_error::SoftErrorHandler; +pub use starlark_syntax::call_stack::CallStack; +use starlark_syntax::slice_vec_ext::SliceExt; +use starlark_syntax::syntax::module::AstModule; +use starlark_syntax::syntax::module::AstModuleFields; + +use crate::collections::symbol::symbol::Symbol; +use crate::docs::DocString; +use crate::environment::Globals; +use crate::eval::compiler::def::DefInfo; +use crate::eval::compiler::scope::scope_resolver_globals::ScopeResolverGlobals; +use crate::eval::compiler::scope::ModuleScopes; +use crate::eval::compiler::scope::ScopeId; +use crate::eval::compiler::Compiler; +pub use crate::eval::params::param_specs; +use crate::eval::runtime::arguments::ArgNames; +use crate::eval::runtime::arguments::ArgumentsFull; +use crate::eval::runtime::evaluator; +use crate::syntax::DialectTypes; +use crate::values::Value; + +impl<'v, 'a, 'e> Evaluator<'v, 'a, 'e> { + /// Evaluate an [`AstModule`] with this [`Evaluator`], modifying the in-scope + /// [`Module`](crate::environment::Module) as appropriate. + pub fn eval_module(&mut self, ast: AstModule, globals: &Globals) -> crate::Result> { + #[cfg(not(target_arch = "wasm32"))] + let start = Instant::now(); + + let (codemap, statement, dialect, typecheck) = ast.into_parts(); + + let codemap = self.module_env.frozen_heap().alloc_any(codemap.dupe()); + + let globals = self.module_env.frozen_heap().alloc_any(globals.dupe()); + + if let Some(docstring) = DocString::extract_raw_starlark_docstring(&statement) { + self.module_env.set_docstring(docstring) + } + + let ModuleScopes { + cst, + module_slot_count, + scope_data, + top_level_stmt_count, + } = ModuleScopes::check_module_err( + self.module_env.mutable_names(), + self.module_env.frozen_heap(), + &HashMap::new(), + statement, + ScopeResolverGlobals { + globals: Some(globals), + }, + codemap, + &dialect, + )?; + + let scope_names = scope_data.get_scope(ScopeId::module()); + let local_names = self.frozen_heap().alloc_any_slice(&scope_names.used); + + self.module_env.slots().ensure_slots(module_slot_count); + let old_def_info = mem::replace( + &mut self.module_def_info, + self.module_env.frozen_heap().alloc_any(DefInfo::for_module( + codemap, + local_names, + self.module_env + .frozen_heap() + .alloc_any_slice(&scope_names.parent), + globals, + )), + ); + + self.call_stack.alloc_if_needed( + self.max_callstack_size + .unwrap_or(evaluator::DEFAULT_STACK_SIZE), + )?; + + // Set up the world to allow evaluation (do NOT use ? from now on) + + self.call_stack.push(Value::new_none(), None).unwrap(); + + // Evaluation + let mut compiler = Compiler { + scope_data, + locals: Vec::new(), + globals, + codemap, + eval: self, + check_types: dialect.enable_types == DialectTypes::Enable, + top_level_stmt_count, + typecheck, + }; + + let res = compiler.eval_module(cst, local_names); + + // Clean up the world, putting everything back + self.call_stack.pop(); + + self.module_def_info = old_def_info; + + #[cfg(not(target_arch = "wasm32"))] + self.module_env.add_eval_duration(start.elapsed()); + + // Return the result of evaluation + res.map_err(|e| e.into_error()) + } + + /// Evaluate a function stored in a [`Value`], passing in `positional` and `named` arguments. + pub fn eval_function( + &mut self, + function: Value<'v>, + positional: &[Value<'v>], + named: &[(&str, Value<'v>)], + ) -> crate::Result> { + let names = named.map(|(s, _)| (Symbol::new(s), self.heap().alloc_str(s))); + let named = named.map(|x| x.1); + let params = Arguments(ArgumentsFull { + pos: positional, + named: &named, + names: ArgNames::new_check_unique(&names)?, + args: None, + kwargs: None, + }); + self.call_stack.alloc_if_needed( + self.max_callstack_size + .unwrap_or(evaluator::DEFAULT_STACK_SIZE), + )?; + // eval_module pushes an "empty" call stack frame. other places expect that first frame to be ignorable, and + // so we push an empty frame too (otherwise things would ignore this function's own frame). + self.with_call_stack(Value::new_none(), None, |this| { + function.invoke(¶ms, this) + }) + .map_err(Into::into) + } +} diff --git a/starlark-rust/starlark/src/eval/bc.rs b/starlark-rust/starlark/src/eval/bc.rs new file mode 100644 index 0000000000000..448b2bc93280d --- /dev/null +++ b/starlark-rust/starlark/src/eval/bc.rs @@ -0,0 +1,37 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Bytecode interpreter. + +pub(crate) mod addr; +pub(crate) mod bytecode; +pub(crate) mod call; +pub(crate) mod compiler; +pub(crate) mod definitely_assigned; +pub(crate) mod for_loop; +pub(crate) mod frame; +pub(crate) mod if_debug; +pub(crate) mod instr; +pub(crate) mod instr_arg; +pub(crate) mod instr_impl; +pub(crate) mod instrs; +pub(crate) mod native_function; +pub(crate) mod opcode; +pub(crate) mod repr; +pub(crate) mod slow_arg; +pub(crate) mod stack_ptr; +pub(crate) mod writer; diff --git a/starlark-rust/starlark/src/eval/bc/addr.rs b/starlark-rust/starlark/src/eval/bc/addr.rs index 40ebe489ed123..ef4a619b6c849 100644 --- a/starlark-rust/starlark/src/eval/bc/addr.rs +++ b/starlark-rust/starlark/src/eval/bc/addr.rs @@ -37,7 +37,7 @@ use crate::eval::bc::repr::BC_INSTR_ALIGN; #[derive( Eq, PartialEq, Copy, Clone, Dupe, Debug, PartialOrd, Ord, Display, Hash, Default )] -#[display(fmt = "@{}", _0)] +#[display("@{}", _0)] pub(crate) struct BcAddr(pub(crate) u32); impl BcAddr { diff --git a/starlark-rust/starlark/src/eval/bc/bytecode.rs b/starlark-rust/starlark/src/eval/bc/bytecode.rs index 7adb0f727984b..31a255095b273 100644 --- a/starlark-rust/starlark/src/eval/bc/bytecode.rs +++ b/starlark-rust/starlark/src/eval/bc/bytecode.rs @@ -81,7 +81,7 @@ impl Bc { #[inline(never)] pub(crate) fn wrap_error_for_instr_ptr( ptr: BcPtrAddr, - e: anyhow::Error, + e: crate::Error, eval: &Evaluator, ) -> EvalException { let span = Self::slow_arg_at_ptr(ptr).span; @@ -94,7 +94,7 @@ impl Bc { #[inline(always)] pub(crate) fn run<'v, EC: EvaluationCallbacks>( &self, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ec: &mut EC, ) -> Result, EvalException> { debug_assert!(eval.current_frame.is_inititalized()); @@ -117,7 +117,7 @@ impl Bc { /// Execute one instruction. #[cfg_attr(not(debug_assertions), inline(always))] fn step<'v, 'b, EC: EvaluationCallbacks>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ec: &mut EC, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, @@ -125,13 +125,13 @@ fn step<'v, 'b, EC: EvaluationCallbacks>( let opcode = ip.get_opcode(); // println!("{}: {:?}", self.current_ip, opcode); - struct HandlerImpl<'v, 'a, 'y, 'b> { - eval: &'y mut Evaluator<'v, 'a>, + struct HandlerImpl<'v, 'a, 'e, 'y, 'b> { + eval: &'y mut Evaluator<'v, 'a, 'e>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, } - impl<'v, 'a, 'y, 'b> BcOpcodeHandler> for HandlerImpl<'v, 'a, 'y, 'b> { + impl<'v, 'a, 'e, 'y, 'b> BcOpcodeHandler> for HandlerImpl<'v, 'a, 'e, 'y, 'b> { #[cfg_attr(not(debug_assertions), inline(always))] fn handle(self) -> InstrControl<'v, 'b> { let HandlerImpl { eval, frame, ip } = self; @@ -140,14 +140,16 @@ fn step<'v, 'b, EC: EvaluationCallbacks>( } } - ec.before_instr(eval, ip, opcode); + match ec.before_instr(eval, ip, opcode) { + Ok(()) => {} + Err(e) => return InstrControl::Err(e), + } opcode.dispatch(HandlerImpl { eval, frame, ip }) } -/// Execute the code block, either a module, a function body or a loop body. -// Do not inline this function because it is called from two places: function and loop. +/// Execute the code block, either a module or a function body. pub(crate) fn run_block<'v, EC: EvaluationCallbacks>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ec: &mut EC, mut ip: BcPtrAddr, ) -> Result, EvalException> { diff --git a/starlark-rust/starlark/src/eval/bc/call.rs b/starlark-rust/starlark/src/eval/bc/call.rs index 703b9b15f6489..fa4c4c2e83e51 100644 --- a/starlark-rust/starlark/src/eval/bc/call.rs +++ b/starlark-rust/starlark/src/eval/bc/call.rs @@ -25,7 +25,7 @@ use std::marker::PhantomData; use starlark_syntax::slice_vec_ext::VecExt; use crate::coerce::coerce; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::eval::bc::frame::BcFramePtr; use crate::eval::bc::instr_arg::BcInstrArg; use crate::eval::bc::stack_ptr::BcSlotIn; @@ -99,33 +99,28 @@ impl BcCallArgsFull { impl Display for BcCallArgsFull { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - let mut first = true; - let mut write_sep = |f: &mut Formatter| { - if !first { - write!(f, " ")?; - } - first = false; - Ok(()) - }; + let BcCallArgsFull { + pos_named, + names, + args, + kwargs, + } = self; + write!(f, "{}", pos_named)?; // Number of positional arguments. if self.pos() != 0 { - write_sep(f)?; - write!(f, "{}", self.pos())?; + write!(f, " {}", self.pos())?; } // Named arguments. - for (_, name) in &*self.names { - write_sep(f)?; - write!(f, "{}", name.as_str())?; + for (_, name) in &**names { + write!(f, " {}", name.as_str())?; } // Star argument? - if self.args.is_some() { - write_sep(f)?; - write!(f, "*")?; + if let Some(args) = args { + write!(f, " *{args}")?; } // Star-star argument? - if self.kwargs.is_some() { - write_sep(f)?; - write!(f, "**")?; + if let Some(kwargs) = kwargs { + write!(f, " **{kwargs}")?; } Ok(()) } @@ -141,7 +136,7 @@ impl BcCallArgs for BcCallArgsFull { ArgumentsFull { pos, named, - names: ArgNames::new(coerce(&self.names)), + names: ArgNames::new_unique(coerce(&self.names)), args, kwargs, } @@ -155,7 +150,7 @@ impl BcCallArgs for BcCallArgsPos { ArgumentsFull { pos, named: &[], - names: ArgNames::new(&[]), + names: ArgNames::new_unique(&[]), args: None, kwargs: None, } @@ -180,7 +175,7 @@ impl BcCallArgsForDef for BcCallArgsFull { ArgumentsFull { pos, named, - names: ArgNames::new(coerce(&self.names)), + names: ArgNames::new_unique(coerce(&self.names)), args, kwargs, } diff --git a/starlark-rust/starlark/src/eval/bc/compiler/mod.rs b/starlark-rust/starlark/src/eval/bc/compiler.rs similarity index 100% rename from starlark-rust/starlark/src/eval/bc/compiler/mod.rs rename to starlark-rust/starlark/src/eval/bc/compiler.rs diff --git a/starlark-rust/starlark/src/eval/bc/compiler/assign.rs b/starlark-rust/starlark/src/eval/bc/compiler/assign.rs index 2c49d7ca67dff..a67a63d941502 100644 --- a/starlark-rust/starlark/src/eval/bc/compiler/assign.rs +++ b/starlark-rust/starlark/src/eval/bc/compiler/assign.rs @@ -19,7 +19,7 @@ use starlark_syntax::slice_vec_ext::SliceExt; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::eval::bc::compiler::expr::write_n_exprs; use crate::eval::bc::instr_impl::InstrSetArrayIndex; use crate::eval::bc::instr_impl::InstrSetObjectField; @@ -86,12 +86,12 @@ impl IrSpanned { }) .ok(); if let Some(all_local) = all_local { - let args = bc.heap.alloc_any_slice_display_from_debug(&all_local); + let args = bc.heap.alloc_any_slice(&all_local); bc.write_instr::(span, (value, args)); } else { bc.alloc_slots(xs.len() as u32, |slots, bc| { let args: Vec = slots.iter().map(|s| s.to_out()).collect(); - let args = bc.heap.alloc_any_slice_display_from_debug(&args); + let args = bc.heap.alloc_any_slice(&args); bc.write_instr::(span, (value, args)); for (x, slot) in xs.iter().zip(slots.iter()) { diff --git a/starlark-rust/starlark/src/eval/bc/compiler/assign_modify.rs b/starlark-rust/starlark/src/eval/bc/compiler/assign_modify.rs index adbb671e13e19..588e34b2e2c9f 100644 --- a/starlark-rust/starlark/src/eval/bc/compiler/assign_modify.rs +++ b/starlark-rust/starlark/src/eval/bc/compiler/assign_modify.rs @@ -19,7 +19,7 @@ use starlark_syntax::syntax::ast::AssignOp; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::eval::bc::compiler::expr::write_n_exprs; use crate::eval::bc::instr_impl::InstrAddAssign; use crate::eval::bc::instr_impl::InstrArrayIndex; diff --git a/starlark-rust/starlark/src/eval/bc/compiler/call.rs b/starlark-rust/starlark/src/eval/bc/compiler/call.rs index 26fb63538e692..aa7cd156c5200 100644 --- a/starlark-rust/starlark/src/eval/bc/compiler/call.rs +++ b/starlark-rust/starlark/src/eval/bc/compiler/call.rs @@ -19,7 +19,7 @@ use either::Either; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::eval::bc::call::BcCallArgsFull; use crate::eval::bc::call::BcCallArgsPos; use crate::eval::bc::compiler::expr::write_expr_opt; diff --git a/starlark-rust/starlark/src/eval/bc/compiler/def.rs b/starlark-rust/starlark/src/eval/bc/compiler/def.rs index 53b120d480827..4d2c97e1cfa34 100644 --- a/starlark-rust/starlark/src/eval/bc/compiler/def.rs +++ b/starlark-rust/starlark/src/eval/bc/compiler/def.rs @@ -36,21 +36,24 @@ impl DefCompiled { pub(crate) fn write_bc(&self, span: FrameSpan, target: BcSlotOut, bc: &mut BcWriter) { let DefCompiled { - ref function_name, - ref params, + function_name, + params, return_type, info, - } = *self; + } = self; let function_name = function_name.clone(); - let num_positional = params.num_positional; + let ParametersCompiled { + params: param_list, + indices, + } = params; let how_many_slots_we_need = params.count_exprs(); bc.alloc_slots(how_many_slots_we_need, |slots, bc| { let mut slots_i = slots.iter(); let mut value_count = 0; - let params = params.params.map(|p| { + let params = param_list.map(|p| { p.map(|p| { p.map_expr(|e| { e.write_bc(slots_i.next().unwrap().to_out(), bc); @@ -62,13 +65,13 @@ impl DefCompiled { let params = ParametersCompiled { params, - num_positional, + indices: *indices, }; let instr_def_data = InstrDefData { function_name, params, - return_type, - info, + return_type: *return_type, + info: *info, }; assert!(slots_i.next().is_none()); diff --git a/starlark-rust/starlark/src/eval/bc/frame.rs b/starlark-rust/starlark/src/eval/bc/frame.rs index 4b4bb0399d3c7..1d94f39213b60 100644 --- a/starlark-rust/starlark/src/eval/bc/frame.rs +++ b/starlark-rust/starlark/src/eval/bc/frame.rs @@ -17,7 +17,6 @@ //! Local variables and stack, in single allocation. -use std::cell::Cell; use std::mem; use std::mem::MaybeUninit; use std::ptr; @@ -96,7 +95,7 @@ impl<'v> BcFramePtr<'v> { } #[inline(always)] - fn frame_mut(&mut self) -> &mut BcFrame<'v> { + fn frame_mut<'a>(self) -> &'a mut BcFrame<'v> { debug_assert!(self.is_inititalized()); unsafe { let frame = (self.slots_ptr as *mut u8).sub(BcFrame::offset_of_slots()) as *mut BcFrame; @@ -120,7 +119,7 @@ impl<'v> BcFramePtr<'v> { } #[inline(always)] - pub(crate) fn set_slot(mut self, slot: LocalSlotIdCapturedOrNot, value: Value<'v>) { + pub(crate) fn set_slot(self, slot: LocalSlotIdCapturedOrNot, value: Value<'v>) { self.frame_mut().set_slot(slot, value) } @@ -130,7 +129,7 @@ impl<'v> BcFramePtr<'v> { } #[inline(always)] - pub(crate) fn set_bc_slot(mut self, slot: BcSlotOut, value: Value<'v>) { + pub(crate) fn set_bc_slot(self, slot: BcSlotOut, value: Value<'v>) { self.frame_mut().set_bc_slot(slot, value) } @@ -145,7 +144,7 @@ impl<'v> BcFramePtr<'v> { } #[inline(always)] - pub(crate) fn set_iter_index(mut self, loop_depth: LoopDepth, index: usize) { + pub(crate) fn set_iter_index(self, loop_depth: LoopDepth, index: usize) { self.frame_mut().set_iter_index(loop_depth, index) } @@ -154,8 +153,8 @@ impl<'v> BcFramePtr<'v> { } #[inline(always)] - pub(crate) fn locals(&self) -> &[Cell>>] { - self.frame().locals() + pub(crate) unsafe fn locals_mut<'a>(self) -> &'a mut [Option>] { + self.frame_mut().locals_mut() } } @@ -176,16 +175,6 @@ impl<'v> BcFrame<'v> { } } - #[inline(always)] - fn locals(&self) -> &[Cell>>] { - unsafe { - slice::from_raw_parts( - self.slots.as_ptr() as *const Cell>, - self.local_count as usize, - ) - } - } - #[inline(always)] fn locals_mut(&mut self) -> &mut [Option>] { unsafe { slice::from_raw_parts_mut(self.slots.as_mut_ptr(), self.local_count as usize) } @@ -319,12 +308,12 @@ unsafe impl<'v> Trace<'v> for BcFramePtr<'v> { } #[inline(always)] -fn alloca_raw<'v, 'a, R>( - eval: &mut Evaluator<'v, 'a>, +fn alloca_raw<'v, 'a, 'e, R>( + eval: &mut Evaluator<'v, 'a, 'e>, local_count: u32, max_stack_size: u32, max_loop_depth: LoopDepth, - k: impl FnOnce(&mut Evaluator<'v, 'a>, BcFramePtr<'v>) -> R, + k: impl FnOnce(&mut Evaluator<'v, 'a, 'e>, BcFramePtr<'v>) -> R, ) -> R { assert_eq!(mem::align_of::() % mem::size_of::(), 0); assert_eq!(mem::size_of::(), mem::size_of::()); @@ -349,19 +338,19 @@ fn alloca_raw<'v, 'a, R>( /// /// After callback finishes, previous frame is restored. #[inline(always)] -pub(crate) fn alloca_frame<'v, 'a, R>( - eval: &mut Evaluator<'v, 'a>, +pub(crate) fn alloca_frame<'v, 'a, 'e, R>( + eval: &mut Evaluator<'v, 'a, 'e>, local_count: u32, max_stack_size: u32, loop_depth: LoopDepth, - k: impl FnOnce(&mut Evaluator<'v, 'a>) -> R, + k: impl FnOnce(&mut Evaluator<'v, 'a, 'e>) -> R, ) -> R { alloca_raw( eval, local_count, max_stack_size, loop_depth, - |eval, mut frame| { + |eval, frame| { // TODO(nga): no need to fill the slots for parameters. frame.frame_mut().init(); let old_frame = mem::replace(&mut eval.current_frame, frame); diff --git a/starlark-rust/starlark/src/eval/bc/if_debug.rs b/starlark-rust/starlark/src/eval/bc/if_debug.rs index 80bd891c12258..c735016b84177 100644 --- a/starlark-rust/starlark/src/eval/bc/if_debug.rs +++ b/starlark-rust/starlark/src/eval/bc/if_debug.rs @@ -38,7 +38,6 @@ use dupe::Dupe; #[derive(Debug, Default, Copy, Clone, Dupe)] // In release build this structure is DST, // so gazebo suggests implementing `Dupe` for any ``. T102920913. -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_impl_dupe))] pub(crate) struct IfDebug { #[cfg(debug_assertions)] value: T, diff --git a/starlark-rust/starlark/src/eval/bc/instr.rs b/starlark-rust/starlark/src/eval/bc/instr.rs index b6d03722a572f..88f5e40dec398 100644 --- a/starlark-rust/starlark/src/eval/bc/instr.rs +++ b/starlark-rust/starlark/src/eval/bc/instr.rs @@ -34,7 +34,7 @@ pub(crate) enum InstrControl<'v, 'b> { Return(Value<'v>), /// Error. This can be either any `anyhow::Error` or `Diagnostics`. /// If it is the former, error span will be added from instruction metadata. - Err(anyhow::Error), + Err(crate::Error), } pub(crate) trait BcInstr: Sized + 'static { @@ -44,7 +44,7 @@ pub(crate) trait BcInstr: Sized + 'static { /// Execute the instruction. fn run<'v, 'b>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, arg: &Self::Arg, diff --git a/starlark-rust/starlark/src/eval/bc/instr_arg.rs b/starlark-rust/starlark/src/eval/bc/instr_arg.rs index 8e8cbc69103ae..5b680feee8bd1 100644 --- a/starlark-rust/starlark/src/eval/bc/instr_arg.rs +++ b/starlark-rust/starlark/src/eval/bc/instr_arg.rs @@ -24,7 +24,7 @@ use std::fmt::Write; use itertools::Itertools; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::collections::Hashed; use crate::collections::SmallMap; use crate::environment::slots::ModuleSlotId; @@ -486,7 +486,7 @@ impl BcInstrArg for BcSlotOut { end_arg: Option<&BcInstrEndArg>, f: &mut dyn Write, ) -> fmt::Result { - write!(f, " {}", BcSlotDisplay(param.get(), end_arg)) + write!(f, " ->{}", BcSlotDisplay(param.get(), end_arg)) } fn visit_jump_addr(_param: &Self, _ip: BcAddr, _consumer: &mut dyn FnMut(BcAddr)) {} diff --git a/starlark-rust/starlark/src/eval/bc/instr_impl.rs b/starlark-rust/starlark/src/eval/bc/instr_impl.rs index c2740d29d2be4..0453fa760c565 100644 --- a/starlark-rust/starlark/src/eval/bc/instr_impl.rs +++ b/starlark-rust/starlark/src/eval/bc/instr_impl.rs @@ -20,12 +20,11 @@ use std::cmp::Ordering; use std::marker; use std::ptr; -use std::time::Instant; use starlark_syntax::eval_exception::EvalException; use crate::coerce::coerce; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::collections::Hashed; use crate::collections::SmallMap; use crate::const_frozen_string; @@ -57,14 +56,14 @@ use crate::eval::compiler::def::ParametersCompiled; use crate::eval::compiler::expr::get_attr_hashed_bind; use crate::eval::compiler::expr::get_attr_hashed_raw; use crate::eval::compiler::expr::EvalError; -use crate::eval::compiler::expr::MemberOrValue; -use crate::eval::compiler::expr_throw; +use crate::eval::compiler::expr_throw_starlark_result; use crate::eval::compiler::stmt::add_assign; use crate::eval::compiler::stmt::bit_or_assign; use crate::eval::compiler::stmt::possible_gc; use crate::eval::compiler::stmt::AssignError; use crate::eval::runtime::arguments::ResolvedArgName; use crate::eval::runtime::frame_span::FrameSpan; +use crate::eval::runtime::profile::instant::ProfilerInstant; use crate::eval::runtime::slots::LocalCapturedSlotId; use crate::eval::runtime::slots::LocalSlotId; use crate::eval::Arguments; @@ -72,7 +71,7 @@ use crate::eval::DefInfo; use crate::eval::Evaluator; use crate::eval::ParametersSpec; use crate::values::dict::Dict; -use crate::values::int::PointerI32; +use crate::values::int::pointer_i32::PointerI32; use crate::values::layout::value_not_special::FrozenValueNotSpecial; use crate::values::string::dot_format::format_one; use crate::values::string::interpolation::percent_s_one; @@ -95,11 +94,11 @@ pub(crate) trait InstrNoFlowImpl: 'static { type Arg: BcInstrArg; fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr, arg: &Self::Arg, - ) -> anyhow::Result<()>; + ) -> crate::Result<()>; } pub(crate) struct InstrNoFlow(marker::PhantomData); @@ -109,7 +108,7 @@ impl BcInstr for InstrNoFlow { #[inline(always)] fn run<'v, 'b>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, arg: &Self::Arg, @@ -129,11 +128,11 @@ impl InstrNoFlowImpl for InstrConstImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (constant, target): &(FrozenValue, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { frame.set_bc_slot(*target, constant.to_value()); Ok(()) } @@ -151,7 +150,6 @@ pub(crate) struct InstrArrayIndexImpl; pub(crate) struct InstrSetArrayIndexImpl; pub(crate) struct InstrArrayIndexSetImpl; pub(crate) struct InstrObjectFieldImpl; -pub(crate) struct InstrObjectFieldRawImpl; pub(crate) struct InstrSetObjectFieldImpl; pub(crate) struct InstrSliceImpl; pub(crate) struct InstrArrayIndex2Impl; @@ -168,7 +166,6 @@ pub(crate) type InstrArrayIndex = InstrNoFlow; pub(crate) type InstrSetArrayIndex = InstrNoFlow; pub(crate) type InstrArrayIndexSet = InstrNoFlow; pub(crate) type InstrObjectField = InstrNoFlow; -pub(crate) type InstrObjectFieldRaw = InstrNoFlow; pub(crate) type InstrSetObjectField = InstrNoFlow; pub(crate) type InstrSlice = InstrNoFlow; pub(crate) type InstrArrayIndex2 = InstrNoFlow; @@ -178,11 +175,11 @@ impl InstrNoFlowImpl for InstrLoadLocalImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(LocalSlotId, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let value = eval.get_slot_local(frame, *source)?; frame.set_bc_slot(*target, value); Ok(()) @@ -194,11 +191,11 @@ impl InstrNoFlowImpl for InstrLoadLocalCapturedImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(LocalCapturedSlotId, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let value = eval.get_slot_local_captured(*source)?; frame.set_bc_slot(*target, value); Ok(()) @@ -210,11 +207,11 @@ impl InstrNoFlowImpl for InstrLoadModuleImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(ModuleSlotId, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let value = eval.get_slot_module(*source)?; frame.set_bc_slot(*target, value); Ok(()) @@ -226,11 +223,11 @@ impl InstrNoFlowImpl for InstrMovImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(BcSlotIn, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let v = frame.get_bc_slot(*source); frame.set_bc_slot(*target, v); Ok(()) @@ -241,11 +238,11 @@ impl InstrNoFlowImpl for InstrStoreLocalCapturedImpl { type Arg = (BcSlotIn, LocalCapturedSlotId); fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(BcSlotIn, LocalCapturedSlotId), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let v = frame.get_bc_slot(*source); eval.set_slot_local_captured(*target, v); Ok(()) @@ -256,11 +253,11 @@ impl InstrNoFlowImpl for InstrStoreModuleAndExportImpl { type Arg = (BcSlotIn, ModuleSlotId, String); fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, slot, name): &(BcSlotIn, ModuleSlotId, String), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let v = frame.get_bc_slot(*source); v.export_as(name.as_str(), eval)?; eval.set_slot_module(*slot, v); @@ -272,11 +269,11 @@ impl InstrNoFlowImpl for InstrStoreModuleImpl { type Arg = (BcSlotIn, ModuleSlotId); fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(BcSlotIn, ModuleSlotId), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let v = frame.get_bc_slot(*source); eval.set_slot_module(*target, v); Ok(()) @@ -288,17 +285,17 @@ impl InstrNoFlowImpl for InstrUnpackImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(BcSlotIn, FrozenRef<'static, [BcSlotOut]>), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let v = frame.get_bc_slot(*source); let nvl = v.length()?; if nvl != target.len() as i32 { - return Err( - AssignError::IncorrectNumberOfValueToUnpack(target.len() as i32, nvl).into(), - ); + return Err(crate::Error::new_other( + AssignError::IncorrectNumberOfValueToUnpack(target.len() as i32, nvl), + )); } let mut i = 0; for item in v.iterate(eval.heap())? { @@ -318,11 +315,11 @@ impl InstrNoFlowImpl for InstrArrayIndexImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (array, index, target): &(BcSlotIn, BcSlotIn, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let array = frame.get_bc_slot(*array); let index = frame.get_bc_slot(*index); let value = array.at(index, eval.heap())?; @@ -336,11 +333,11 @@ impl InstrNoFlowImpl for InstrSetArrayIndexImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, array, index): &(BcSlotIn, BcSlotIn, BcSlotIn), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let value = frame.get_bc_slot(*source); let array = frame.get_bc_slot(*array); let index = frame.get_bc_slot(*index); @@ -353,11 +350,11 @@ impl InstrNoFlowImpl for InstrArrayIndexSetImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (array, index, source): &(BcSlotIn, BcSlotIn, BcSlotIn), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let value = frame.get_bc_slot(*source); let array = frame.get_bc_slot(*array); let index = frame.get_bc_slot(*index); @@ -370,11 +367,11 @@ impl InstrNoFlowImpl for InstrObjectFieldImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (object, field, target): &(BcSlotIn, Symbol, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let object = frame.get_bc_slot(*object); let value = get_attr_hashed_bind(object, field, eval.heap())?; frame.set_bc_slot(*target, value); @@ -382,36 +379,15 @@ impl InstrNoFlowImpl for InstrObjectFieldImpl { } } -/// Get raw field. -/// -/// For regular field, get the field. For methods, get the raw unbound method. -/// -/// This instruction is used for call profiling, where we don't need to bind the methods. -impl InstrNoFlowImpl for InstrObjectFieldRawImpl { - type Arg = (BcSlotIn, Symbol, BcSlotOut); - - fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, - frame: BcFramePtr<'v>, - _ip: BcPtrAddr, - (object, field, target): &(BcSlotIn, Symbol, BcSlotOut), - ) -> anyhow::Result<()> { - let object = frame.get_bc_slot(*object); - let value = get_attr_hashed_raw(object, field, eval.heap())?; - frame.set_bc_slot(*target, value.to_value()); - Ok(()) - } -} - impl InstrNoFlowImpl for InstrSetObjectFieldImpl { type Arg = (BcSlotIn, BcSlotIn, Symbol); fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, object, field): &(BcSlotIn, BcSlotIn, Symbol), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let v = frame.get_bc_slot(*source); let object = frame.get_bc_slot(*object); object.set_attr(field.as_str(), v) @@ -429,7 +405,7 @@ impl InstrNoFlowImpl for InstrSliceImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (list, start, stop, step, target): &( @@ -439,7 +415,7 @@ impl InstrNoFlowImpl for InstrSliceImpl { Option, BcSlotOut, ), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let list = frame.get_bc_slot(*list); let start = start.map(|s| frame.get_bc_slot(s)); let stop = stop.map(|s| frame.get_bc_slot(s)); @@ -455,11 +431,11 @@ impl InstrNoFlowImpl for InstrArrayIndex2Impl { #[cold] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (array, index0, index1, target): &(BcSlotIn, BcSlotIn, BcSlotIn, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let array = frame.get_bc_slot(*array); let index0 = frame.get_bc_slot(*index0); let index1 = frame.get_bc_slot(*index1); @@ -483,7 +459,7 @@ pub(crate) type InstrEqInt = InstrNoFlow; impl InstrBinOpImpl for InstrEqImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, _heap: &'v Heap) -> crate::Result> { v0.equals(v1).map(Value::new_bool) } } @@ -493,11 +469,11 @@ impl InstrNoFlowImpl for InstrEqConstImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (a, b, target): &(BcSlotIn, FrozenValueNotSpecial, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let a = frame.get_bc_slot(*a); let r = b.equals(a)?; frame.set_bc_slot(*target, Value::new_bool(r)); @@ -510,11 +486,11 @@ impl InstrNoFlowImpl for InstrEqPtrImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (a, b, target): &(BcSlotIn, FrozenValue, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let a = frame.get_bc_slot(*a); let r = a.ptr_eq(b.to_value()); frame.set_bc_slot(*target, Value::new_bool(r)); @@ -527,11 +503,11 @@ impl InstrNoFlowImpl for InstrEqIntImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (a, b, target): &(BcSlotIn, FrozenValueTyped<'static, PointerI32>, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let a = frame.get_bc_slot(*a); let r = if let Some(a) = a.unpack_int_value() { a.as_ref() == b.as_ref() @@ -548,11 +524,11 @@ impl InstrNoFlowImpl for InstrEqStrImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (a, b, target): &(BcSlotIn, FrozenStringValue, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let a = frame.get_bc_slot(*a); let r = if let Some(a) = StringValue::new(a) { a == b.to_string_value() @@ -576,38 +552,38 @@ pub(crate) type InstrBitNot = InstrUnOp; impl InstrUnOpImpl for InstrNotImpl { #[inline(always)] - fn eval<'v>(v: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v: Value<'v>, _heap: &'v Heap) -> crate::Result> { Ok(Value::new_bool(!v.to_bool())) } } impl InstrUnOpImpl for InstrPlusImpl { #[inline(always)] - fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result> { v.plus(heap) } } impl InstrUnOpImpl for InstrMinusImpl { #[inline(always)] - fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result> { v.minus(heap) } } impl InstrUnOpImpl for InstrBitNotImpl { #[inline(always)] - fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result> { v.bit_not(heap) } } pub(crate) trait InstrBinOpImpl: 'static { - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result>; + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result>; } pub(crate) trait InstrUnOpImpl: 'static { - fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> anyhow::Result>; + fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result>; } pub(crate) struct InstrBinOpWrapper(marker::PhantomData); @@ -620,11 +596,11 @@ impl InstrNoFlowImpl for InstrBinOpWrapper { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (v0, v1, target): &(BcSlotIn, BcSlotIn, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let v0 = frame.get_bc_slot(*v0); let v1 = frame.get_bc_slot(*v1); let v = I::eval(v0, v1, eval.heap())?; @@ -638,11 +614,11 @@ impl InstrNoFlowImpl for InstrUnOpWrapper { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (source, target): &(BcSlotIn, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let source = frame.get_bc_slot(*source); let value = I::eval(source, eval.heap())?; frame.set_bc_slot(*target, value); @@ -682,98 +658,98 @@ pub(crate) type InstrIn = InstrBinOp; impl InstrBinOpImpl for InstrAddImpl { #[inline(always)] - fn eval<'v>(l: Value<'v>, r: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(l: Value<'v>, r: Value<'v>, heap: &'v Heap) -> crate::Result> { l.add(r, heap) } } impl InstrBinOpImpl for InstrAddAssignImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { add_assign(v0, v1, heap) } } impl InstrBinOpImpl for InstrSubImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.sub(v1, heap) } } impl InstrBinOpImpl for InstrMultiplyImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.mul(v1, heap) } } impl InstrBinOpImpl for InstrPercentImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.percent(v1, heap) } } impl InstrBinOpImpl for InstrFloorDivideImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.floor_div(v1, heap) } } impl InstrBinOpImpl for InstrDivideImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.div(v1, heap) } } impl InstrBinOpImpl for InstrBitAndImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.bit_and(v1, heap) } } impl InstrBinOpImpl for InstrBitOrImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.bit_or(v1, heap) } } impl InstrBinOpImpl for InstrBitOrAssignImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { bit_or_assign(v0, v1, heap) } } impl InstrBinOpImpl for InstrBitXorImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.bit_xor(v1, heap) } } impl InstrBinOpImpl for InstrLeftShiftImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.left_shift(v1, heap) } } impl InstrBinOpImpl for InstrRightShiftImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, heap: &'v Heap) -> crate::Result> { v0.right_shift(v1, heap) } } impl InstrBinOpImpl for InstrInImpl { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, _heap: &'v Heap) -> crate::Result> { Ok(Value::new_bool(v1.is_in(v0)?)) } } @@ -788,11 +764,11 @@ impl InstrNoFlowImpl for InstrPercentSOneImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (before, arg, after, target): &(FrozenStringValue, BcSlotIn, FrozenStringValue, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let arg = frame.get_bc_slot(*arg); let r = percent_s_one(before.as_str(), arg, after.as_str(), eval.heap())?; frame.set_bc_slot(*target, r.to_value()); @@ -805,11 +781,11 @@ impl InstrNoFlowImpl for InstrFormatOneImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (before, arg, after, target): &(FrozenStringValue, BcSlotIn, FrozenStringValue, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let arg = frame.get_bc_slot(*arg); let r = format_one(before.as_str(), arg, after.as_str(), eval.heap()); frame.set_bc_slot(*target, r.to_value()); @@ -825,7 +801,7 @@ pub(crate) struct InstrCompare(marker::PhantomData); impl InstrBinOpImpl for InstrCompare { #[inline(always)] - fn eval<'v>(v0: Value<'v>, v1: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v0: Value<'v>, v1: Value<'v>, _heap: &'v Heap) -> crate::Result> { Ok(Value::new_bool(I::eval_compare(v0.compare(v1)?))) } } @@ -873,7 +849,7 @@ pub(crate) type InstrType = InstrUnOp; impl InstrUnOpImpl for InstrTypeImpl { #[inline(always)] - fn eval<'v>(v: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v: Value<'v>, _heap: &'v Heap) -> crate::Result> { Ok(v.get_type_value().to_frozen_value().to_value()) } } @@ -886,11 +862,11 @@ impl InstrNoFlowImpl for InstrTypeIsImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, (arg, t, target): &(BcSlotIn, FrozenStringValue, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let arg = frame.get_bc_slot(*arg); let r = arg.get_type_value() == *t; frame.set_bc_slot(*target, Value::new_bool(r)); @@ -906,11 +882,11 @@ impl InstrNoFlowImpl for InstrIsInstanceImpl { #[inline(always)] fn run_with_args<'v>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, (arg, t, target): &(BcSlotIn, TypeCompiled, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let arg = frame.get_bc_slot(*arg); let r = t.matches(arg); frame.set_bc_slot(*target, Value::new_bool(r)); @@ -923,7 +899,7 @@ pub(crate) type InstrLen = InstrUnOp; impl InstrUnOpImpl for InstrLenImpl { #[inline(always)] - fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(v: Value<'v>, heap: &'v Heap) -> crate::Result> { Ok(heap.alloc(v.length()?)) } } @@ -951,11 +927,11 @@ impl InstrNoFlowImpl for InstrTupleNPopImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, (values, target): &(BcSlotInRange, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let items = frame.get_bc_slot_range(*values); let value = eval.heap().alloc_tuple(items); frame.set_bc_slot(*target, value); @@ -968,11 +944,11 @@ impl InstrNoFlowImpl for InstrListNPopImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, (values, target): &(BcSlotInRange, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let items = frame.get_bc_slot_range(*values); let value = eval.heap().alloc_list(items); frame.set_bc_slot(*target, value); @@ -985,11 +961,11 @@ impl InstrNoFlowImpl for InstrListOfConstsImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, (values, target): &(Box<[FrozenValue]>, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let list = eval.heap().alloc_list(coerce(&values)); frame.set_bc_slot(*target, list); Ok(()) @@ -1001,11 +977,11 @@ impl InstrNoFlowImpl for InstrDictOfConstsImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, (values, target): &(SmallMap, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let dict = eval.heap().alloc(Dict::new((*coerce(values)).clone())); frame.set_bc_slot(*target, dict); Ok(()) @@ -1016,11 +992,11 @@ impl InstrNoFlowImpl for InstrDictNPopImpl { type Arg = (BcSlotInRange, BcSlotOut); fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr, (npops, target): &(BcSlotInRange, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let items = frame.get_bc_slot_range(*npops); debug_assert!(items.len() % 2 == 0); let mut dict = SmallMap::with_capacity(items.len() / 2); @@ -1031,14 +1007,15 @@ impl InstrNoFlowImpl for InstrDictNPopImpl { Ok(k) => k, Err(e) => { let spans = &Bc::slow_arg_at_ptr(ip).spans; - return Err(add_span_to_expr_error(e, spans[i], eval).into_anyhow()); + return Err(add_span_to_expr_error(e, spans[i], eval).into_error()); } }; let prev = dict.insert_hashed(k, v); if prev.is_some() { - let e = EvalError::DuplicateDictionaryKey(k.key().to_string()).into(); + let e = + crate::Error::new_other(EvalError::DuplicateDictionaryKey(k.key().to_string())); let spans = &Bc::slow_arg_at_ptr(ip).spans; - return Err(add_span_to_expr_error(e, spans[i], eval).into_anyhow()); + return Err(add_span_to_expr_error(e, spans[i], eval).into_error()); } } let dict = eval.heap().alloc(Dict::new(dict)); @@ -1051,11 +1028,11 @@ impl InstrNoFlowImpl for InstrDictConstKeysImpl { type Arg = (Box<[Hashed]>, BcSlotInRangeFrom, BcSlotOut); fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, (keys, values, target): &(Box<[Hashed]>, BcSlotInRangeFrom, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let values = frame.get_bc_slot_range(values.to_range(keys.len() as u32)); let mut dict = SmallMap::with_capacity(keys.len()); for (k, v) in keys.iter().zip(values) { @@ -1073,11 +1050,11 @@ impl InstrNoFlowImpl for InstrListNewImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, target: &BcSlotOut, - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let list = eval.heap().alloc_list(&[]); frame.set_bc_slot(*target, list); Ok(()) @@ -1089,11 +1066,11 @@ impl InstrNoFlowImpl for InstrDictNewImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _: BcPtrAddr, target: &BcSlotOut, - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let dict = eval.heap().alloc(Dict::default()); frame.set_bc_slot(*target, dict); Ok(()) @@ -1108,7 +1085,7 @@ impl BcInstr for InstrComprListAppend { #[inline(always)] fn run<'v, 'b>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, (list, item): &(BcSlotIn, BcSlotIn), @@ -1128,7 +1105,7 @@ impl BcInstr for InstrComprDictInsert { #[inline(always)] fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr<'b>, (dict, key, value): &(BcSlotIn, BcSlotIn, BcSlotIn), @@ -1156,14 +1133,14 @@ impl InstrNoFlowImpl for InstrCheckTypeImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (expr, ty): &(BcSlotIn, TypeCompiled), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let expr = frame.get_bc_slot(*expr); let start = if eval.typecheck_profile.enabled { - Some(Instant::now()) + Some(ProfilerInstant::now()) } else { None }; @@ -1172,7 +1149,7 @@ impl InstrNoFlowImpl for InstrCheckTypeImpl { let name = const_frozen_string!("assignment"); eval.typecheck_profile.add(name, start.elapsed()); } - res + res.map_err(Into::into) } } @@ -1185,7 +1162,7 @@ impl BcInstr for InstrBr { #[inline(always)] fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, _frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, target: &BcAddrOffset, @@ -1199,7 +1176,7 @@ impl BcInstr for InstrIfBr { #[inline(always)] fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, (cond, target): &(BcSlotIn, BcAddrOffset), @@ -1218,7 +1195,7 @@ impl BcInstr for InstrIfNotBr { #[inline(always)] fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, (cond, target): &(BcSlotIn, BcAddrOffset), @@ -1246,7 +1223,7 @@ impl BcInstr for InstrIter { #[inline(always)] fn run<'v, 'b>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, (over, loop_depth, iter_slot, var, end): &( @@ -1288,7 +1265,7 @@ impl BcInstr for InstrContinue { #[inline(always)] fn run<'v, 'b>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, (iter, loop_depth, var, begin, end): &( @@ -1321,7 +1298,7 @@ impl BcInstr for InstrBreak { #[inline(always)] fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, (iter, end): &(BcSlotIn, BcAddrOffset), @@ -1337,7 +1314,7 @@ impl BcInstr for InstrIterStop { #[inline(always)] fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, ip: BcPtrAddr<'b>, iter: &BcSlotIn, @@ -1356,7 +1333,7 @@ impl BcInstr for InstrReturnConst { type Arg = FrozenValue; fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, _frame: BcFramePtr<'v>, _ip: BcPtrAddr<'b>, value: &FrozenValue, @@ -1370,7 +1347,7 @@ impl BcInstr for InstrReturn { #[inline(always)] fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr<'b>, &slot: &BcSlotIn, @@ -1385,14 +1362,14 @@ impl BcInstr for InstrReturnCheckType { #[inline(always)] fn run<'v, 'b>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr<'b>, &slot: &BcSlotIn, ) -> InstrControl<'v, 'b> { let v = frame.get_bc_slot(slot); if let Err(e) = eval.check_return_type(v) { - return InstrControl::Err(e); + return InstrControl::Err(e.into()); } InstrControl::Return(v) } @@ -1413,18 +1390,17 @@ impl InstrNoFlowImpl for InstrDefImpl { type Arg = (BcSlotInRange, InstrDefData, BcSlotOut); fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (pops, def_data, target): &(BcSlotInRange, InstrDefData, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let pop = frame.get_bc_slot_range(*pops); let mut parameters = ParametersSpec::with_capacity( def_data.function_name.clone(), def_data.params.params.len(), ); - parameters.no_more_positional_only_args(); let mut parameter_types = Vec::new(); let mut pop_index = 0; @@ -1432,7 +1408,11 @@ impl InstrNoFlowImpl for InstrDefImpl { for (i, x) in def_data.params.params.iter().enumerate() { let i = i as u32; - if i == def_data.params.num_positional && !x.is_star_or_star_star() { + if i == def_data.params.indices.num_positional_only && !x.is_star_or_star_star() { + parameters.no_more_positional_only_args(); + } + + if i == def_data.params.indices.num_positional && !x.is_star_or_star_star() { parameters.no_more_positional_args(); } @@ -1441,8 +1421,8 @@ impl InstrNoFlowImpl for InstrDefImpl { } match &x.node { - ParameterCompiled::Normal(n, _) => parameters.required(&n.name), - ParameterCompiled::WithDefaultValue(n, ty, v) => { + ParameterCompiled::Normal(n, _, None) => parameters.required(&n.name), + ParameterCompiled::Normal(n, ty, Some(v)) => { assert!(*v == pop_index); let value = pop[pop_index as usize]; pop_index += 1; @@ -1450,8 +1430,12 @@ impl InstrNoFlowImpl for InstrDefImpl { if ty.is_some() { // Check the type of the default let (_, _, ty_compiled) = parameter_types.last().unwrap(); - expr_throw(ty_compiled.check_type(value, Some(&n.name)), x.span, eval) - .map_err(EvalException::into_anyhow)?; + expr_throw_starlark_result( + ty_compiled.check_type(value, Some(&n.name)), + x.span, + eval, + ) + .map_err(EvalException::into_error)?; } parameters.defaulted(&n.name, value); } @@ -1467,7 +1451,7 @@ impl InstrNoFlowImpl for InstrDefImpl { return_type, def_data.info, eval, - )); + )?); frame.set_bc_slot(*target, def); Ok(()) } @@ -1479,8 +1463,8 @@ pub(crate) trait BcFrozenCallable: BcInstrArg + Copy { self, location: FrozenRef<'static, FrameSpan>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>; + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result>; } impl BcFrozenCallable for FrozenValue { @@ -1489,8 +1473,8 @@ impl BcFrozenCallable for FrozenValue { self, location: FrozenRef<'static, FrameSpan>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { self.to_value().invoke_with_loc(Some(location), args, eval) } } @@ -1501,8 +1485,8 @@ impl BcFrozenCallable for FrozenValueTyped<'static, FrozenDef> { self, location: FrozenRef<'static, FrameSpan>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { eval.with_call_stack(self.to_value(), Some(location), |eval| { self.as_ref().invoke(self.to_value(), args, eval) }) @@ -1515,8 +1499,8 @@ impl BcFrozenCallable for BcNativeFunction { self, location: FrozenRef<'static, FrameSpan>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { eval.with_call_stack(self.to_value(), Some(location), |eval| { self.invoke(args, eval) }) @@ -1556,11 +1540,11 @@ impl> InstrNoFlowImpl for InstrCallImpl
    { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (this, args, span, target): &(BcSlotIn, A, FrozenRef<'static, FrameSpan>, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let f = frame.get_bc_slot(*this); let arguments = Arguments(args.pop_from_stack(frame)); let r = f.invoke_with_loc(Some(*span), &arguments, eval)?; @@ -1576,11 +1560,11 @@ impl> InstrNoFlowImpl #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (fun, args, span, target): &(F, A, FrozenRef<'static, FrameSpan>, BcSlotOut), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let arguments = Arguments(args.pop_from_stack(frame)); let r = fun.bc_invoke(*span, &arguments, eval)?; frame.set_bc_slot(*target, r); @@ -1598,7 +1582,7 @@ impl InstrNoFlowImpl for InstrCallFrozenDefImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (fun, args, span, target): &( @@ -1607,7 +1591,7 @@ impl InstrNoFlowImpl for InstrCallFrozenDefImpl { FrozenRef<'static, FrameSpan>, BcSlotOut, ), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let arguments = args.pop_from_stack(frame); let r = eval.with_call_stack(fun.to_value(), Some(*span), |eval| { fun.as_ref() @@ -1621,20 +1605,17 @@ impl InstrNoFlowImpl for InstrCallFrozenDefImpl { /// Common of method invocation instructions. #[inline(always)] fn call_method_common<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, this: Value<'v>, symbol: &Symbol, arguments: &Arguments<'v, '_>, span: FrozenRef<'static, FrameSpan>, target: BcSlotOut, -) -> anyhow::Result<()> { +) -> crate::Result<()> { // TODO: wrong span: should be span of `object.method`, not of the whole expression let method = get_attr_hashed_raw(this, symbol, eval.heap())?; - let r = match method { - MemberOrValue::Member(member) => member.invoke_method(this, span, arguments, eval)?, - MemberOrValue::Value(value) => value.invoke_with_loc(Some(span), arguments, eval)?, - }; + let r = method.invoke(this, span, arguments, eval)?; frame.set_bc_slot(target, r); Ok(()) } @@ -1642,7 +1623,7 @@ fn call_method_common<'v>( /// Common of method invocation instructions where a method is likely stdlib method. #[inline(always)] fn call_maybe_known_method_common<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, this: Value<'v>, symbol: &Symbol, @@ -1650,7 +1631,7 @@ fn call_maybe_known_method_common<'v>( arguments: &Arguments<'v, '_>, span: FrozenRef<'static, FrameSpan>, target: BcSlotOut, -) -> anyhow::Result<()> { +) -> crate::Result<()> { if let Some(methods) = this.vtable().methods() { // Instead of method lookup by name, we compare `Methods` pointers. // If pointers are equal, getattr would return the same method @@ -1678,7 +1659,7 @@ impl> InstrNoFlowImpl for InstrCallMethodImpl { #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (this, symbol, args, span, target): &( @@ -1688,7 +1669,7 @@ impl> InstrNoFlowImpl for InstrCallMethodImpl { FrozenRef<'static, FrameSpan>, BcSlotOut, ), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let this = frame.get_bc_slot(*this); let arguments = Arguments(args.pop_from_stack(frame)); call_method_common(eval, frame, this, symbol, &arguments, *span, *target) @@ -1707,7 +1688,7 @@ impl> InstrNoFlowImpl for InstrCallMaybeKnownMethodImpl #[inline(always)] fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, frame: BcFramePtr<'v>, _ip: BcPtrAddr, (this, symbol, known_method, args, span, target): &( @@ -1718,7 +1699,7 @@ impl> InstrNoFlowImpl for InstrCallMaybeKnownMethodImpl FrozenRef<'static, FrameSpan>, BcSlotOut, ), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { let this = frame.get_bc_slot(*this); let arguments = Arguments(args.pop_from_stack(frame)); call_maybe_known_method_common( @@ -1742,11 +1723,11 @@ impl InstrNoFlowImpl for InstrPossibleGcImpl { type Arg = (); fn run_with_args<'v>( - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, _frame: BcFramePtr<'v>, _ip: BcPtrAddr, (): &(), - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { possible_gc(eval); Ok(()) } @@ -1757,14 +1738,14 @@ impl InstrNoFlowImpl for InstrPossibleGcImpl { /// we don't have a pointer to bytecode object. To obtain spans by IP, we scroll /// through the instruction until we encounter this pseudo-instruction. /// * as a safety against memory overruns. Function block must terminate with return instruction, -/// but if return was missed, this instruction is executed and it panics. +/// but if return was missed, this instruction is executed and it panics. pub(crate) struct InstrEnd; impl BcInstr for InstrEnd { type Arg = BcInstrEndArg; fn run<'v, 'b>( - _eval: &mut Evaluator<'v, '_>, + _eval: &mut Evaluator<'v, '_, '_>, _frame: BcFramePtr<'v>, _ip: BcPtrAddr<'b>, _: &Self::Arg, diff --git a/starlark-rust/starlark/src/eval/bc/instrs.rs b/starlark-rust/starlark/src/eval/bc/instrs.rs index 74883ab61deaa..ea6b5729dc03c 100644 --- a/starlark-rust/starlark/src/eval/bc/instrs.rs +++ b/starlark-rust/starlark/src/eval/bc/instrs.rs @@ -390,7 +390,7 @@ mod tests { fn display() { let heap = FrozenHeap::new(); let local_names = heap - .alloc_any_display_from_debug(vec![const_frozen_string!("abc")]) + .alloc_any(vec![const_frozen_string!("abc")]) .map(|s| s.as_slice()); let mut bc = BcInstrsWriter::new(); bc.write::((FrozenValue::new_bool(true), BcSlot(0).to_out())); @@ -398,11 +398,11 @@ mod tests { let bc = bc.finish(Vec::new(), BcStatementLocations::new(), local_names); if mem::size_of::() == 8 { assert_eq!( - "0: Const True &abc; 24: Return &abc; 32: End", + "0: Const True ->&abc; 24: Return &abc; 32: End", bc.to_string() ); assert_eq!( - "0: Const True &abc\n24: Return &abc\n32: End\n", + "0: Const True ->&abc\n24: Return &abc\n32: End\n", bc.dump_debug() ); } else if mem::size_of::() == 4 { diff --git a/starlark-rust/starlark/src/eval/bc/mod.rs b/starlark-rust/starlark/src/eval/bc/mod.rs deleted file mode 100644 index f82a3f27c5e65..0000000000000 --- a/starlark-rust/starlark/src/eval/bc/mod.rs +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Bytecode interpreter. - -pub(crate) mod addr; -pub(crate) mod bytecode; -pub(crate) mod call; -pub(crate) mod compiler; -pub(crate) mod definitely_assigned; -pub(crate) mod for_loop; -pub(crate) mod frame; -pub(crate) mod if_debug; -pub(crate) mod instr; -pub(crate) mod instr_arg; -pub(crate) mod instr_impl; -pub(crate) mod instrs; -pub(crate) mod native_function; -pub(crate) mod opcode; -pub(crate) mod repr; -pub(crate) mod slow_arg; -pub(crate) mod stack_ptr; -pub(crate) mod stack_values; -pub(crate) mod writer; diff --git a/starlark-rust/starlark/src/eval/bc/native_function.rs b/starlark-rust/starlark/src/eval/bc/native_function.rs index ee63caf445a09..6c5cd89fa19ba 100644 --- a/starlark-rust/starlark/src/eval/bc/native_function.rs +++ b/starlark-rust/starlark/src/eval/bc/native_function.rs @@ -54,8 +54,8 @@ impl BcNativeFunction { pub(crate) fn invoke<'v>( &self, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { self.imp.invoke(eval, args) } } diff --git a/starlark-rust/starlark/src/eval/bc/opcode.rs b/starlark-rust/starlark/src/eval/bc/opcode.rs index 40eb51c54e95b..40335e1f69469 100644 --- a/starlark-rust/starlark/src/eval/bc/opcode.rs +++ b/starlark-rust/starlark/src/eval/bc/opcode.rs @@ -46,7 +46,6 @@ pub(crate) enum BcOpcode { ArrayIndexSet, Slice, ObjectField, - ObjectFieldRaw, SetObjectField, Eq, EqConst, diff --git a/starlark-rust/starlark/src/eval/bc/stack_ptr.rs b/starlark-rust/starlark/src/eval/bc/stack_ptr.rs index 1efa546f709af..20b470255d672 100644 --- a/starlark-rust/starlark/src/eval/bc/stack_ptr.rs +++ b/starlark-rust/starlark/src/eval/bc/stack_ptr.rs @@ -36,7 +36,7 @@ use dupe::Dupe; Hash, derive_more::Display )] -#[display(fmt = "&{}", _0)] +#[display("&{}", _0)] pub(crate) struct BcSlot(pub(crate) u32); impl BcSlot { @@ -79,7 +79,7 @@ impl BcSlotsN { } #[derive(Copy, Clone, Dupe, Debug, derive_more::Display)] -#[display(fmt = "{}..{}", start, end)] +#[display("{}..{}", start, end)] pub(crate) struct BcSlotRange { pub(crate) start: BcSlot, pub(crate) end: BcSlot, @@ -103,9 +103,6 @@ impl BcSlotRange { } } -#[derive(Copy, Clone, Dupe, Debug)] -pub(crate) struct BcSlotRangeFrom(pub(crate) BcSlot); - /// Slot containing a value. /// /// The slot may be a local variable, so this slot cannot be used to store a temporary value. @@ -132,7 +129,7 @@ impl BcSlotIn { } #[derive(Copy, Clone, Dupe, Debug, derive_more::Display)] -#[display(fmt = "{}..{}", start, end)] +#[display("{}..{}", start, end)] pub(crate) struct BcSlotInRange { pub(crate) start: BcSlotIn, pub(crate) end: BcSlotIn, diff --git a/starlark-rust/starlark/src/eval/bc/stack_values.rs b/starlark-rust/starlark/src/eval/bc/stack_values.rs deleted file mode 100644 index 0f5bbe8ec4db2..0000000000000 --- a/starlark-rust/starlark/src/eval/bc/stack_values.rs +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Instruction arguments for working with stack. - -use crate::values::Value; - -/// Instruction parameter for values popped from stack or pushed to stack. -pub(crate) trait BcStackValues<'v>: Copy { - /// How many values. - const N: u32; -} - -impl<'v> BcStackValues<'v> for () { - const N: u32 = 0; -} - -impl<'v> BcStackValues<'v> for Value<'v> { - const N: u32 = 1; -} - -impl<'v, const N: usize> BcStackValues<'v> for [Value<'v>; N] { - const N: u32 = N as u32; -} diff --git a/starlark-rust/starlark/src/eval/compiler.rs b/starlark-rust/starlark/src/eval/compiler.rs new file mode 100644 index 0000000000000..5ec92cb50499a --- /dev/null +++ b/starlark-rust/starlark/src/eval/compiler.rs @@ -0,0 +1,109 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod args; +pub(crate) mod call; +pub(crate) mod compr; +pub(crate) mod constants; +pub(crate) mod def; +pub(crate) mod def_inline; +pub(crate) mod error; +pub(crate) mod expr; +pub(crate) mod expr_bool; +pub(crate) mod known; +pub(crate) mod module; +pub(crate) mod opt_ctx; +pub(crate) mod scope; +pub(crate) mod small_vec_1; +pub(crate) mod span; +pub(crate) mod stmt; +pub(crate) mod types; + +use starlark_syntax::eval_exception::EvalException; + +use crate::codemap::CodeMap; +use crate::environment::Globals; +use crate::eval::compiler::scope::ModuleScopeData; +use crate::eval::compiler::scope::ScopeId; +use crate::eval::compiler::scope::ScopeNames; +use crate::eval::runtime::frame_span::FrameSpan; +use crate::eval::Evaluator; +use crate::values::FrozenRef; + +#[cold] +#[inline(never)] +pub(crate) fn add_span_to_expr_error( + e: crate::Error, + span: FrameSpan, + eval: &Evaluator, +) -> EvalException { + EvalException::new_with_callstack(e, span.span.span(), &span.span.file(), || { + eval.call_stack.to_diagnostic_frames(span.inlined_frames) + }) +} + +/// Convert syntax error to spanned evaluation exception +#[inline(always)] +pub(crate) fn expr_throw<'v, T>( + r: anyhow::Result, + span: FrameSpan, + eval: &Evaluator<'v, '_, '_>, +) -> Result { + match r { + Ok(v) => Ok(v), + Err(e) => Err(add_span_to_expr_error(e.into(), span, eval)), + } +} + +/// Convert syntax error to spanned evaluation exception +#[inline(always)] +pub(crate) fn expr_throw_starlark_result<'v, T>( + r: crate::Result, + span: FrameSpan, + eval: &Evaluator<'v, '_, '_>, +) -> Result { + match r { + Ok(v) => Ok(v), + Err(e) => Err(add_span_to_expr_error(e, span, eval)), + } +} + +pub(crate) struct Compiler<'v, 'a, 'e, 'x> { + pub(crate) eval: &'x mut Evaluator<'v, 'a, 'e>, + pub(crate) scope_data: ModuleScopeData<'v>, + pub(crate) locals: Vec, + pub(crate) globals: FrozenRef<'static, Globals>, + pub(crate) codemap: FrozenRef<'static, CodeMap>, + pub(crate) check_types: bool, + pub(crate) top_level_stmt_count: usize, + /// Set with `@starlark-rust: typecheck`. + pub(crate) typecheck: bool, +} + +impl Compiler<'_, '_, '_, '_> { + pub(crate) fn enter_scope(&mut self, scope_id: ScopeId) { + self.locals.push(scope_id); + } + + pub(crate) fn exit_scope(&mut self) -> ScopeId { + self.locals.pop().unwrap() + } + + pub(crate) fn current_scope(&self) -> &ScopeNames { + self.scope_data.get_scope(*self.locals.last().unwrap()) + } +} diff --git a/starlark-rust/starlark/src/eval/compiler/args.rs b/starlark-rust/starlark/src/eval/compiler/args.rs index ca03aa764a2aa..8e88d7b115c07 100644 --- a/starlark-rust/starlark/src/eval/compiler/args.rs +++ b/starlark-rust/starlark/src/eval/compiler/args.rs @@ -18,12 +18,14 @@ use starlark_derive::VisitSpanMut; use starlark_syntax::slice_vec_ext::SliceExt; use starlark_syntax::syntax::ast::ArgumentP; +use starlark_syntax::syntax::ast::CallArgsP; use crate::coerce::coerce; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; +use crate::eval::compiler::error::CompilerInternalError; use crate::eval::compiler::expr::ExprCompiled; use crate::eval::compiler::opt_ctx::OptCtx; -use crate::eval::compiler::scope::payload::CstArgument; +use crate::eval::compiler::scope::payload::CstPayload; use crate::eval::compiler::span::IrSpanned; use crate::eval::compiler::Compiler; use crate::eval::runtime::arguments::ArgNames; @@ -120,7 +122,7 @@ impl ArgsCompiledValue { Some(handler(&Arguments(ArgumentsFull { pos: &pos, named: &named, - names: ArgNames::new(coerce(&self.names)), + names: ArgNames::new_unique(coerce(&self.names)), args, kwargs, }))) @@ -163,25 +165,28 @@ impl ArgsCompiledValue { } } -impl Compiler<'_, '_, '_> { - pub(crate) fn args(&mut self, args: &[CstArgument]) -> ArgsCompiledValue { +impl Compiler<'_, '_, '_, '_> { + pub(crate) fn args( + &mut self, + args: &CallArgsP, + ) -> Result { let mut res = ArgsCompiledValue::default(); - for x in args { + for x in &args.args { match &x.node { - ArgumentP::Positional(x) => res.pos_named.push(self.expr(x)), + ArgumentP::Positional(x) => res.pos_named.push(self.expr(x)?), ArgumentP::Named(name, value) => { let fv = self .eval .module_env .frozen_heap() - .alloc_str(name.node.as_str()); + .alloc_str_intern(name.node.as_str()); res.names.push((Symbol::new(&name.node), fv)); - res.pos_named.push(self.expr(value)); + res.pos_named.push(self.expr(value)?); } - ArgumentP::Args(x) => res.args = Some(self.expr(x)), - ArgumentP::KwArgs(x) => res.kwargs = Some(self.expr(x)), + ArgumentP::Args(x) => res.args = Some(self.expr(x)?), + ArgumentP::KwArgs(x) => res.kwargs = Some(self.expr(x)?), } } - res + Ok(res) } } diff --git a/starlark-rust/starlark/src/eval/compiler/call.rs b/starlark-rust/starlark/src/eval/compiler/call.rs index 1374f5e6fd19f..d656a706718fd 100644 --- a/starlark-rust/starlark/src/eval/compiler/call.rs +++ b/starlark-rust/starlark/src/eval/compiler/call.rs @@ -17,12 +17,10 @@ //! Compile function calls. -use std::cell::Cell; - use starlark_derive::VisitSpanMut; use starlark_syntax::slice_vec_ext::VecExt; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::eval::compiler::args::ArgsCompiledValue; use crate::eval::compiler::def_inline::local_as_value::local_as_value; use crate::eval::compiler::def_inline::InlineDefBody; @@ -178,15 +176,15 @@ impl CallCompiled { }; args.all_values_generic(expr_to_value, |arguments| { - let slots = vec![Cell::new(None); fun.parameters.len()]; + let mut slots = vec![None; fun.parameters.len()]; fun.parameters - .collect(arguments.frozen_to_v(), &slots, ctx.heap()) + .collect(arguments.frozen_to_v(), &mut slots, ctx.heap()) .ok()?; let slots = slots .into_try_map(|value| { // Value must be set, but better ignore optimization here than panic. - let value = value.get().ok_or(())?; + let value = value.ok_or(())?; // Everything should be frozen here, but if not, // it is safer to abandon optimization. value.unpack_frozen().ok_or(()) @@ -255,8 +253,8 @@ impl CallCompiled { let (before, after) = parse_format_one(&format)?; - let before = ctx.frozen_heap().alloc_str(&before); - let after = ctx.frozen_heap().alloc_str(&after); + let before = ctx.frozen_heap().alloc_str_intern(&before); + let after = ctx.frozen_heap().alloc_str_intern(&after); Some(ExprCompiled::format_one(before, arg.clone(), after, ctx)) } diff --git a/starlark-rust/starlark/src/eval/compiler/compr.rs b/starlark-rust/starlark/src/eval/compiler/compr.rs index 3c0a75b678a0f..7c6307d8a89da 100644 --- a/starlark-rust/starlark/src/eval/compiler/compr.rs +++ b/starlark-rust/starlark/src/eval/compiler/compr.rs @@ -22,6 +22,7 @@ use starlark_syntax::slice_vec_ext::SliceExt; use starlark_syntax::syntax::ast::ClauseP; use starlark_syntax::syntax::ast::ForClauseP; +use crate::eval::compiler::error::CompilerInternalError; use crate::eval::compiler::expr::ExprCompiled; use crate::eval::compiler::expr_bool::ExprCompiledBool; use crate::eval::compiler::known::list_to_tuple; @@ -32,16 +33,19 @@ use crate::eval::compiler::span::IrSpanned; use crate::eval::compiler::stmt::AssignCompiledValue; use crate::eval::compiler::Compiler; -impl Compiler<'_, '_, '_> { +impl Compiler<'_, '_, '_, '_> { pub fn list_comprehension( &mut self, x: &CstExpr, for_: &ForClauseP, clauses: &[ClauseP], - ) -> ExprCompiled { - let clauses = self.compile_clauses(for_, clauses); - let x = self.expr(x); - ExprCompiled::compr(ComprCompiled::List(Box::new(x), clauses)) + ) -> Result { + let clauses = self.compile_clauses(for_, clauses)?; + let x = self.expr(x)?; + Ok(ExprCompiled::compr(ComprCompiled::List( + Box::new(x), + clauses, + ))) } pub fn dict_comprehension( @@ -50,27 +54,31 @@ impl Compiler<'_, '_, '_> { v: &CstExpr, for_: &ForClauseP, clauses: &[ClauseP], - ) -> ExprCompiled { - let clauses = self.compile_clauses(for_, clauses); - let k = self.expr(k); - let v = self.expr(v); - ExprCompiled::compr(ComprCompiled::Dict(Box::new((k, v)), clauses)) + ) -> Result { + let clauses = self.compile_clauses(for_, clauses)?; + let k = self.expr(k)?; + let v = self.expr(v)?; + Ok(ExprCompiled::compr(ComprCompiled::Dict( + Box::new((k, v)), + clauses, + ))) } /// Peel the final if's from clauses, and return them (in the order they started), plus the next for you get to fn compile_ifs( &mut self, clauses: &mut Vec>, - ) -> (Option>, Vec>) { + ) -> Result<(Option>, Vec>), CompilerInternalError> + { let mut ifs = Vec::new(); while let Some(x) = clauses.pop() { match x { ClauseP::For(f) => { ifs.reverse(); - return (Some(f), ifs); + return Ok((Some(f), ifs)); } ClauseP::If(x) => { - let x = self.expr_truth(&x); + let x = self.expr_truth(&x)?; if let ExprCompiledBool::Const(true) = &x.node { // If the condition is always true, skip the clause. continue; @@ -81,16 +89,16 @@ impl Compiler<'_, '_, '_> { } } ifs.reverse(); - (None, ifs) + Ok((None, ifs)) } fn compile_clauses( &mut self, for_: &ForClauseP, clauses: &[ClauseP], - ) -> ClausesCompiled { + ) -> Result { // The first for.over is scoped before we enter the list comp - let over = self.expr(&list_to_tuple(&for_.over)); + let over = self.expr(&list_to_tuple(&for_.over))?; // TODO(nga): unnecessary clone. let mut clauses = clauses.to_vec(); @@ -99,20 +107,20 @@ impl Compiler<'_, '_, '_> { // The evaluator wants to use pop to consume them, so reverse the order. let mut res = Vec::new(); loop { - let (next_for, ifs) = self.compile_ifs(&mut clauses); + let (next_for, ifs) = self.compile_ifs(&mut clauses)?; match next_for { None => { let last = ClauseCompiled { - var: self.assign_target(&for_.var), + var: self.assign_target(&for_.var)?, over, ifs, }; - return ClausesCompiled::new(res, last); + return Ok(ClausesCompiled::new(res, last)); } Some(f) => { res.push(ClauseCompiled { - over: self.expr(&f.over), - var: self.assign_target(&f.var), + over: self.expr(&f.over)?, + var: self.assign_target(&f.var)?, ifs, }); } diff --git a/starlark-rust/starlark/src/eval/compiler/constants.rs b/starlark-rust/starlark/src/eval/compiler/constants.rs index 1923e3b3a8f65..817c3acb3eea2 100644 --- a/starlark-rust/starlark/src/eval/compiler/constants.rs +++ b/starlark-rust/starlark/src/eval/compiler/constants.rs @@ -19,6 +19,7 @@ use dupe::Dupe; use once_cell::sync::Lazy; use crate::environment::Globals; +use crate::values::namespace::FrozenNamespace; use crate::values::FrozenValue; #[derive(Copy, Clone, Dupe, Debug)] @@ -46,6 +47,9 @@ pub(crate) struct Constants { pub(crate) fn_dict: BuiltinFn, pub(crate) fn_tuple: BuiltinFn, pub(crate) fn_isinstance: BuiltinFn, + pub(crate) fn_set: BuiltinFn, + // Technically, this is not a function. + pub(crate) typing_callable: BuiltinFn, } impl Constants { @@ -59,6 +63,15 @@ impl Constants { fn_dict: BuiltinFn(g.get_frozen("dict").unwrap()), fn_tuple: BuiltinFn(g.get_frozen("tuple").unwrap()), fn_isinstance: BuiltinFn(g.get_frozen("isinstance").unwrap()), + fn_set: BuiltinFn(g.get_frozen("set").unwrap()), + typing_callable: { + let typing = g + .get_frozen("typing") + .unwrap() + .downcast_frozen_ref::() + .unwrap(); + BuiltinFn(typing.as_ref().get("Callable").unwrap()) + }, } }); Lazy::force(&RES) diff --git a/starlark-rust/starlark/src/eval/compiler/def.rs b/starlark-rust/starlark/src/eval/compiler/def.rs index 3a4874a6a1d9e..051e774eaff73 100644 --- a/starlark-rust/starlark/src/eval/compiler/def.rs +++ b/starlark-rust/starlark/src/eval/compiler/def.rs @@ -20,10 +20,8 @@ use std::cell::UnsafeCell; use std::collections::HashMap; use std::fmt; -use std::fmt::Display; use std::fmt::Write; use std::ptr; -use std::time::Instant; use allocative::Allocative; use derivative::Derivative; @@ -33,9 +31,11 @@ use once_cell::sync::Lazy; use starlark_derive::starlark_value; use starlark_derive::NoSerialize; use starlark_derive::VisitSpanMut; +use starlark_map::StarlarkHasher; use starlark_syntax::eval_exception::EvalException; use starlark_syntax::slice_vec_ext::SliceExt; use starlark_syntax::syntax::def::DefParam; +use starlark_syntax::syntax::def::DefParamIndices; use starlark_syntax::syntax::def::DefParamKind; use starlark_syntax::syntax::def::DefParams; @@ -47,6 +47,7 @@ use crate::collections::Hashed; use crate::const_frozen_string; use crate::docs::DocFunction; use crate::docs::DocItem; +use crate::docs::DocMember; use crate::docs::DocString; use crate::docs::DocStringKind; use crate::environment::FrozenModuleData; @@ -55,6 +56,7 @@ use crate::eval::bc::bytecode::Bc; use crate::eval::bc::frame::alloca_frame; use crate::eval::compiler::def_inline::inline_def_body; use crate::eval::compiler::def_inline::InlineDefBody; +use crate::eval::compiler::error::CompilerInternalError; use crate::eval::compiler::expr::ExprCompiled; use crate::eval::compiler::opt_ctx::OptCtx; use crate::eval::compiler::scope::payload::CstAssignIdent; @@ -74,13 +76,16 @@ use crate::eval::runtime::arguments::ResolvedArgName; use crate::eval::runtime::evaluator::Evaluator; use crate::eval::runtime::frame_span::FrameSpan; use crate::eval::runtime::frozen_file_span::FrozenFileSpan; -use crate::eval::runtime::params::ParametersSpec; +use crate::eval::runtime::params::spec::ParametersSpec; +use crate::eval::runtime::profile::instant::ProfilerInstant; use crate::eval::runtime::slots::LocalSlotId; use crate::eval::runtime::slots::LocalSlotIdCapturedOrNot; use crate::eval::Arguments; use crate::starlark_complex_values; -use crate::typing::Param; +use crate::typing::callable_param::ParamIsRequired; +use crate::typing::ParamSpec; use crate::typing::Ty; +use crate::util::arc_str::ArcStr; use crate::values::frozen_ref::AtomicFrozenRefOption; use crate::values::function::FUNCTION_TYPE; use crate::values::typing::type_compiled::compiled::TypeCompiled; @@ -144,18 +149,23 @@ pub(crate) struct ParameterName { #[derive(Clone, Debug, VisitSpanMut)] pub(crate) enum ParameterCompiled { - Normal(ParameterName, Option>), - WithDefaultValue(ParameterName, Option>, T), + Normal( + /// Name. + ParameterName, + /// Type. + Option>, + /// Default value. + Option, + ), Args(ParameterName, Option>), KwArgs(ParameterName, Option>), } impl ParameterCompiled { - pub(crate) fn map_expr(&self, mut f: impl FnMut(&T) -> U) -> ParameterCompiled { + pub(crate) fn map_expr(&self, f: impl FnMut(&T) -> U) -> ParameterCompiled { match self { - ParameterCompiled::Normal(n, o) => ParameterCompiled::Normal(n.clone(), *o), - ParameterCompiled::WithDefaultValue(n, o, t) => { - ParameterCompiled::WithDefaultValue(n.clone(), *o, f(t)) + ParameterCompiled::Normal(n, o, t) => { + ParameterCompiled::Normal(n.clone(), *o, t.as_ref().map(f)) } ParameterCompiled::Args(n, o) => ParameterCompiled::Args(n.clone(), *o), ParameterCompiled::KwArgs(n, o) => ParameterCompiled::KwArgs(n.clone(), *o), @@ -164,8 +174,7 @@ impl ParameterCompiled { pub(crate) fn accepts_positional(&self) -> bool { match self { - ParameterCompiled::Normal(_, _) => true, - ParameterCompiled::WithDefaultValue(_, _, _) => true, + ParameterCompiled::Normal(..) => true, _ => false, } } @@ -176,8 +185,7 @@ impl ParameterCompiled { pub(crate) fn name_ty(&self) -> (&ParameterName, Option>) { match self { - Self::Normal(n, t) => (n, *t), - Self::WithDefaultValue(n, t, _) => (n, *t), + Self::Normal(n, t, _) => (n, *t), Self::Args(n, t) => (n, *t), Self::KwArgs(n, t) => (n, *t), } @@ -197,6 +205,15 @@ impl ParameterCompiled { } } + pub(crate) fn required(&self) -> ParamIsRequired { + match self { + ParameterCompiled::Normal(_, _, None) => ParamIsRequired::Yes, + ParameterCompiled::Normal(_, _, Some(_)) => ParamIsRequired::No, + ParameterCompiled::Args(..) => ParamIsRequired::No, + ParameterCompiled::KwArgs(..) => ParamIsRequired::No, + } + } + pub(crate) fn is_star_or_star_star(&self) -> bool { matches!( self, @@ -208,9 +225,7 @@ impl ParameterCompiled { #[derive(Debug, Clone, VisitSpanMut)] pub(crate) struct ParametersCompiled { pub(crate) params: Vec>>, - /// Number of parameters which can be filled positionally. - /// That is, number of parameters before first `*`, `*args` or `**kwargs`. - pub(crate) num_positional: u32, + pub(crate) indices: DefParamIndices, } impl ParametersCompiled { @@ -257,32 +272,39 @@ impl ParametersCompiled { .collect() } - pub(crate) fn to_ty_params(&self) -> Vec { - self.params - .iter() - .enumerate() - .map(|(i, p)| { - let ty = p.ty(); - match &p.node { - ParameterCompiled::Normal(name, ..) => { - if i < self.num_positional as usize { - Param::pos_or_name(&name.name, ty) - } else { - Param::name_only(&name.name, ty) - } - } - ParameterCompiled::WithDefaultValue(name, ..) => { - if i < self.num_positional as usize { - Param::pos_or_name(&name.name, ty).optional() - } else { - Param::name_only(&name.name, ty).optional() - } - } - ParameterCompiled::Args(..) => Param::args(ty), - ParameterCompiled::KwArgs(..) => Param::kwargs(ty), - } - }) - .collect() + pub(crate) fn to_ty_params(&self) -> ParamSpec { + ParamSpec::new_parts( + self.indices.pos_only().map(|i| { + let p = &self.params[i].node; + (p.required(), p.ty()) + }), + self.indices.pos_or_named().map(|i| { + let p = &self.params[i].node; + ( + ArcStr::from(p.name_ty().0.name.as_str()), + p.required(), + p.ty(), + ) + }), + self.indices.args.map(|i| { + let p = &self.params[i as usize].node; + p.ty() + }), + self.indices.named_only(self.params.len()).map(|i| { + let p = &self.params[i].node; + ( + ArcStr::from(p.name_ty().0.name.as_str()), + p.required(), + p.ty(), + ) + }), + self.indices.kwargs.map(|i| { + let p = &self.params[i as usize].node; + p.ty() + }), + ) + // TODO(nga): do not unwrap. + .unwrap() } } @@ -298,7 +320,7 @@ pub(crate) struct CopySlotFromParent { /// Static info for `def`, `lambda` or module. #[derive(Derivative, Display)] #[derivative(Debug)] -#[display(fmt = "DefInfo")] +#[display("DefInfo")] pub(crate) struct DefInfo { pub(crate) name: FrozenStringValue, /// Span of function signature. @@ -385,7 +407,7 @@ pub(crate) struct DefCompiled { pub(crate) info: FrozenRef<'static, DefInfo>, } -impl Compiler<'_, '_, '_> { +impl Compiler<'_, '_, '_, '_> { fn parameter_name(&mut self, ident: &CstAssignIdent) -> ParameterName { let binding_id = ident.payload.expect("no binding for parameter"); let binding = self.scope_data.get_binding(binding_id); @@ -395,24 +417,19 @@ impl Compiler<'_, '_, '_> { } } - /// Compile a parameter. Return `None` for `*` pseudo parameter. fn parameter( &mut self, x: &Spanned>, - ) -> IrSpanned>> { + ) -> Result>>, CompilerInternalError> { let span = FrameSpan::new(FrozenFileSpan::new(self.codemap, x.span)); let parameter_name = self.parameter_name(x.ident); - IrSpanned { + Ok(IrSpanned { span, node: match &x.node.kind { - DefParamKind::Regular(None) => ParameterCompiled::Normal( + DefParamKind::Regular(_mode, default_value) => ParameterCompiled::Normal( parameter_name, self.expr_for_type(x.ty).map(|t| t.node), - ), - DefParamKind::Regular(Some(default_value)) => ParameterCompiled::WithDefaultValue( - parameter_name, - self.expr_for_type(x.ty).map(|t| t.node), - self.expr(default_value), + default_value.as_ref().map(|d| self.expr(d)).transpose()?, ), DefParamKind::Args => ParameterCompiled::Args( parameter_name, @@ -423,7 +440,7 @@ impl Compiler<'_, '_, '_> { self.expr_for_type(x.ty).map(|t| t.node), ), }, - } + }) } pub fn function( @@ -434,20 +451,23 @@ impl Compiler<'_, '_, '_> { params: &[CstParameter], return_type: Option<&CstTypeExpr>, suite: &CstStmt, - ) -> ExprCompiled { + ) -> Result { let file = self.codemap.file_span(suite.span); let function_name = format!("{}.{}", file.file.filename(), name); - let name = self.eval.frozen_heap().alloc_str(name); + let name = self.eval.frozen_heap().alloc_str_intern(name); - let def_params = DefParams::unpack(params, &self.codemap).expect("verified at parse time"); + let DefParams { params, indices } = match DefParams::unpack(params, &self.codemap) { + Ok(def_params) => def_params, + Err(e) => return Err(CompilerInternalError::from_eval_exception(e)), + }; // The parameters run in the scope of the parent, so compile them with the outer // scope - let params = def_params.params.map(|x| self.parameter(x)); - let params = ParametersCompiled { - params, - num_positional: def_params.num_positional, - }; + let params: Vec<_> = params + .iter() + .map(|x| self.parameter(x)) + .collect::>()?; + let params = ParametersCompiled { params, indices }; let return_type = self.expr_for_type(return_type).map(|t| t.node); let ty = Ty::function( @@ -458,7 +478,7 @@ impl Compiler<'_, '_, '_> { self.enter_scope(scope_id); let docstring = DocString::extract_raw_starlark_docstring(suite); - let body = self.stmt(suite, false); + let body = self.stmt(suite, false)?; let scope_id = self.exit_scope(); let scope_names = self.scope_data.get_scope(scope_id); @@ -473,25 +493,19 @@ impl Compiler<'_, '_, '_> { let param_count = params.count_param_variables(); - let used = self - .eval - .frozen_heap() - .alloc_any_slice_display_from_debug(&scope_names.used); + let used = self.eval.frozen_heap().alloc_any_slice(&scope_names.used); let info = self.eval.module_env.frozen_heap().alloc_any(DefInfo { name, signature_span, parameter_captures: self .eval .frozen_heap() - .alloc_any_slice_display_from_debug(¶ms.parameter_captures()), + .alloc_any_slice(¶ms.parameter_captures()), ty, codemap: self.codemap, docstring, used, - parent: self - .eval - .frozen_heap() - .alloc_any_slice_display_from_debug(&scope_names.parent), + parent: self.eval.frozen_heap().alloc_any_slice(&scope_names.parent), stmt_compiled: body.as_bc( &self.compile_context(return_type.is_some()), used, @@ -504,12 +518,12 @@ impl Compiler<'_, '_, '_> { globals: self.globals, }); - ExprCompiled::Def(DefCompiled { + Ok(ExprCompiled::Def(DefCompiled { function_name, params, return_type, info, - }) + })) } } @@ -540,7 +554,7 @@ pub(crate) struct DefGen { /// When the module is not frozen yet, this field contains `None`, and function's module /// can be accessed from evaluator's module. #[allocative(skip)] - module: AtomicFrozenRefOption, + pub(crate) module: AtomicFrozenRefOption, /// This field is only used in `FrozenDef`. It is populated in `post_freeze`. #[derivative(Debug = "ignore")] #[allocative(skip)] @@ -564,22 +578,22 @@ impl<'v> Def<'v> { parameter_types: Vec<(LocalSlotId, String, TypeCompiled)>, return_type: Option>, stmt: FrozenRef<'static, DefInfo>, - eval: &mut Evaluator<'v, '_>, - ) -> Value<'v> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { let captured = stmt .parent .as_ref() .map(|copy| eval.clone_slot_capture(copy, &stmt)); - eval.heap().alloc(Self { + Ok(eval.heap().alloc(Self { parameters, parameter_captures: stmt.parameter_captures, parameter_types, return_type, captured, - module: AtomicFrozenRefOption::new(eval.module_variables), + module: AtomicFrozenRefOption::new(eval.top_frame_def_frozen_module(false)?), optimized_on_freeze_stmt: StmtCompiledCell::new(), def_info: stmt, - }) + })) } } @@ -618,7 +632,7 @@ impl<'v> DefLike<'v> for DefGen { } #[starlark_value(type = FUNCTION_TYPE)] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for DefGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for DefGen where Self: ProvidesStaticType<'v> + DefLike<'v>, { @@ -630,12 +644,12 @@ where &self, me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { self.invoke_impl(me, &args.0, eval) } - fn documentation(&self) -> Option { + fn documentation(&self) -> DocItem { let mut parameter_types = vec![Ty::any(); self.parameters.len()]; for (idx, _, ty) in &self.parameter_types { // Local slot number for parameter is the same as parameter index. @@ -650,15 +664,19 @@ where .documentation(parameter_types, HashMap::new()), return_type, self.def_info.docstring.as_ref().map(String::as_ref), - None, ); - Some(DocItem::Function(function_docs)) + DocItem::Member(DocMember::Function(function_docs)) } fn typechecker_ty(&self) -> Option { Some(self.def_info.ty.clone()) } + + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { + // It's hard to come up with a good hash here, but let's at least make an effort. + self.def_info.name.write_hash(hasher) + } } impl<'v, V: ValueLike<'v>> DefGen @@ -673,9 +691,9 @@ where } } - fn check_parameter_types(&self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn check_parameter_types(&self, eval: &mut Evaluator<'v, '_, '_>) -> crate::Result<()> { let start = if eval.typecheck_profile.enabled { - Some(Instant::now()) + Some(ProfilerInstant::now()) } else { None }; @@ -697,12 +715,13 @@ where pub(crate) fn check_return_type( &self, ret: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result<()> { - let return_type_ty: TypeCompiled = - self.return_type.ok_or(DefError::CheckReturnTypeNoType)?; + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result<()> { + let return_type_ty: TypeCompiled = self + .return_type + .ok_or_else(|| crate::Error::new_other(DefError::CheckReturnTypeNoType))?; let start = if eval.typecheck_profile.enabled { - Some(Instant::now()) + Some(ProfilerInstant::now()) } else { None }; @@ -719,8 +738,8 @@ where &self, me: Value<'v>, args: &A, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> where 'v: 'a, { @@ -731,7 +750,11 @@ where bc.max_stack_size, bc.max_loop_depth, |eval| { - let slots = eval.current_frame.locals(); + // SAFETY: `slots` is unique: `alloca_frame` just allocated the frame, + // so there are no references to the frame except `eval.current_frame`. + // We use `slots` only in `collect_inline`, + // which does not have access to `eval` thus cannot access the frame indirectly. + let slots = unsafe { eval.current_frame.locals_mut() }; self.parameters.collect_inline(args, slots, eval.heap())?; self.invoke_raw(me, eval) }, @@ -742,8 +765,8 @@ where &self, me: Value<'v>, args: &A, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> where 'v: 'a, { @@ -757,7 +780,11 @@ where /// * the frame has been allocated and stored in `eval.current_frame` /// * the arguments have been collected into the frame #[inline(always)] - fn invoke_raw(&self, me: Value<'v>, eval: &mut Evaluator<'v, '_>) -> anyhow::Result> { + fn invoke_raw( + &self, + me: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { // println!("invoking {}", self.def.stmt.name.node); if !self.parameter_types.is_empty() { @@ -783,9 +810,9 @@ where if Self::FROZEN { debug_assert!(self.module.load_relaxed().is_some()); } - let res = eval.with_function_context(me, self.module.load_relaxed(), self.bc()); - res.map_err(EvalException::into_anyhow) + eval.eval_bc(me, self.bc()) + .map_err(EvalException::into_error) } pub(crate) fn resolve_arg_name(&self, name: Hashed<&str>) -> ResolvedArgName { diff --git a/starlark-rust/starlark/src/eval/compiler/def_inline.rs b/starlark-rust/starlark/src/eval/compiler/def_inline.rs new file mode 100644 index 0000000000000..ccdb8af80d772 --- /dev/null +++ b/starlark-rust/starlark/src/eval/compiler/def_inline.rs @@ -0,0 +1,356 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Inline functions. + +pub(crate) mod local_as_value; + +use crate::eval::compiler::args::ArgsCompiledValue; +use crate::eval::compiler::call::CallCompiled; +use crate::eval::compiler::def::ParametersCompiled; +use crate::eval::compiler::def_inline::local_as_value::LocalAsValue; +use crate::eval::compiler::expr::Builtin1; +use crate::eval::compiler::expr::Builtin2; +use crate::eval::compiler::expr::ExprCompiled; +use crate::eval::compiler::expr::ExprLogicalBinOp; +use crate::eval::compiler::opt_ctx::OptCtx; +use crate::eval::compiler::span::IrSpanned; +use crate::eval::compiler::stmt::StmtCompiled; +use crate::eval::compiler::stmt::StmtsCompiled; +use crate::eval::runtime::frame_span::FrameSpan; +use crate::eval::runtime::slots::LocalSlotId; +use crate::values::FrozenStringValue; +use crate::values::FrozenValue; +use crate::values::FrozenValueTyped; + +/// Function body suitable for inlining. +#[derive(Debug)] +pub(crate) enum InlineDefBody { + /// Function body is `return type(x) == "y"` + ReturnTypeIs(FrozenStringValue), + /// Any expression which can be safely inlined. + /// + /// See the function where this enum variant is computed for the definition + /// of safe to inline expression. + ReturnSafeToInlineExpr(IrSpanned), +} + +/// If a statement is `return type(x) == "y"` where `x` is a first slot. +fn is_return_type_is(stmt: &StmtsCompiled) -> Option { + let (x, t) = stmt.first()?.as_return()?.as_type_is()?; + match &x.node { + // Slot 0 is a slot for the first function parameter. + ExprCompiled::Local(LocalSlotId(0)) => Some(t), + _ => None, + } +} + +struct IsSafeToInlineExpr { + /// Function parameter count. + param_count: u32, + /// How many expressions we visited already. + counter: u32, +} + +impl IsSafeToInlineExpr { + fn new(param_count: u32) -> IsSafeToInlineExpr { + Self { + param_count, + counter: 0, + } + } + + fn is_safe_to_inline_opt_expr(&mut self, expr: &Option>) -> bool { + if let Some(expr) = expr { + self.is_safe_to_inline_expr(expr) + } else { + true + } + } + + /// Expression which is has no access to locals or globals. + fn is_safe_to_inline_expr(&mut self, expr: &ExprCompiled) -> bool { + // Do not inline too large functions. + if self.counter > 100 { + return false; + } + self.counter += 1; + match expr { + ExprCompiled::Value(..) => true, + ExprCompiled::LocalCaptured(..) | ExprCompiled::Module(..) | ExprCompiled::Def(..) => { + false + } + ExprCompiled::Local(l) => { + // `l >= param_count` should be unreachable, but it is safer this way. + l.0 < self.param_count + } + ExprCompiled::Call(call) => { + self.is_safe_to_inline_expr(&call.fun) + && call + .args + .arg_exprs() + .all(|e| self.is_safe_to_inline_expr(e)) + } + ExprCompiled::Compr(..) => { + // TODO: some comprehensions are safe to inline. + false + } + ExprCompiled::Slice(a_b_c_d) => { + let (a, b, c, d) = &**a_b_c_d; + self.is_safe_to_inline_expr(a) + && self.is_safe_to_inline_opt_expr(b) + && self.is_safe_to_inline_opt_expr(c) + && self.is_safe_to_inline_opt_expr(d) + } + ExprCompiled::Builtin2(bin_op, a_b) => { + let (a, b) = &**a_b; + let _: &Builtin2 = bin_op; + self.is_safe_to_inline_expr(a) && self.is_safe_to_inline_expr(b) + } + ExprCompiled::Index2(a_i0_i1) => { + let (a, i0, i1) = &**a_i0_i1; + self.is_safe_to_inline_expr(a) + && self.is_safe_to_inline_expr(i0) + && self.is_safe_to_inline_expr(i1) + } + ExprCompiled::Builtin1(un_op, arg) => { + let _: &Builtin1 = un_op; + self.is_safe_to_inline_expr(arg) + } + ExprCompiled::Tuple(xs) | ExprCompiled::List(xs) => { + xs.iter().all(|x| self.is_safe_to_inline_expr(x)) + } + ExprCompiled::Dict(xs) => xs + .iter() + .all(|(x, y)| self.is_safe_to_inline_expr(x) && self.is_safe_to_inline_expr(y)), + ExprCompiled::If(c_t_f) => { + let (c, t, f) = &**c_t_f; + self.is_safe_to_inline_expr(c) + && self.is_safe_to_inline_expr(t) + && self.is_safe_to_inline_expr(f) + } + ExprCompiled::LogicalBinOp(op, x_y) => { + let (x, y) = &**x_y; + let _: &ExprLogicalBinOp = op; + self.is_safe_to_inline_expr(x) && self.is_safe_to_inline_expr(y) + } + ExprCompiled::Seq(x_y) => { + let (x, y) = &**x_y; + self.is_safe_to_inline_expr(x) && self.is_safe_to_inline_expr(y) + } + } + } +} + +/// Function body is a `return` safe to inline expression (as defined above). +fn is_return_safe_to_inline_expr( + stmts: &StmtsCompiled, + param_count: u32, +) -> Option> { + match stmts.first() { + None => { + // Empty function is equivalent to `return None`. + Some(IrSpanned { + span: FrameSpan::default(), + node: ExprCompiled::Value(FrozenValue::new_none()), + }) + } + Some(stmt) => match &stmt.node { + StmtCompiled::Return(expr) + if IsSafeToInlineExpr::new(param_count).is_safe_to_inline_expr(expr) => + { + Some(expr.clone()) + } + _ => None, + }, + } +} + +pub(crate) fn inline_def_body( + params: &ParametersCompiled>, + body: &StmtsCompiled, +) -> Option { + if params.params.len() == 1 && params.params[0].accepts_positional() { + if let Some(t) = is_return_type_is(body) { + return Some(InlineDefBody::ReturnTypeIs(t)); + } + } + if !params.has_args_or_kwargs() { + // It is possible to sometimes inline functions with `*args` or `**kwargs`, + // but let's postpone that for now. + let param_count = params.count_param_variables(); + if let Some(expr) = is_return_safe_to_inline_expr(body, param_count) { + return Some(InlineDefBody::ReturnSafeToInlineExpr(expr)); + } + } + None +} + +pub(crate) struct CannotInline; + +/// Utility to inline function body at call site. +pub(crate) struct InlineDefCallSite<'s, 'v, 'a, 'e, 'x> { + pub(crate) ctx: &'s mut OptCtx<'v, 'a, 'e, 'x>, + // Values in the slots are either real frozen values + // or `LocalAsValue` which are the parameters to be substituted with caller locals. + pub(crate) slots: &'s [FrozenValue], +} + +impl InlineDefCallSite<'_, '_, '_, '_, '_> { + fn inline_opt( + &mut self, + expr: Option<&IrSpanned>, + ) -> Result>, CannotInline> { + match expr { + None => Ok(None), + Some(expr) => Ok(Some(self.inline(expr)?)), + } + } + + fn inline_args(&mut self, args: &ArgsCompiledValue) -> Result { + args.map_exprs(|expr| self.inline(expr)) + } + + fn inline_call( + &mut self, + call: &IrSpanned, + ) -> Result, CannotInline> { + let span = call.span; + let CallCompiled { fun, args } = &call.node; + let fun = self.inline(fun)?; + let args = self.inline_args(args)?; + Ok(IrSpanned { + span, + node: CallCompiled::call(span, fun, args, self.ctx), + }) + } + + pub(crate) fn inline( + &mut self, + expr: &IrSpanned, + ) -> Result, CannotInline> { + let span = expr.span; + Ok(match &expr.node { + e @ ExprCompiled::Value(..) => IrSpanned { + span, + node: e.clone(), + }, + ExprCompiled::Local(local) => { + let value = self.slots[local.0 as usize]; + let expr = if let Some(local) = FrozenValueTyped::::new(value) { + ExprCompiled::Local(local.local) + } else { + ExprCompiled::Value(value) + }; + IrSpanned { span, node: expr } + } + ExprCompiled::If(c_t_f) => { + let (c, t, f) = &**c_t_f; + let c = self.inline(c)?; + let t = self.inline(t)?; + let f = self.inline(f)?; + ExprCompiled::if_expr(c, t, f) + } + ExprCompiled::LogicalBinOp(op, l_r) => { + let (l, r) = &**l_r; + let l = self.inline(l)?; + let r = self.inline(r)?; + ExprCompiled::logical_bin_op(*op, l, r) + } + ExprCompiled::List(xs) => { + let xs = xs + .iter() + .map(|x| self.inline(x)) + .collect::, CannotInline>>()?; + IrSpanned { + span, + node: ExprCompiled::List(xs), + } + } + ExprCompiled::Tuple(xs) => { + let xs = xs + .iter() + .map(|x| self.inline(x)) + .collect::, CannotInline>>()?; + IrSpanned { + span, + node: ExprCompiled::tuple(xs, self.ctx.frozen_heap()), + } + } + ExprCompiled::Dict(xs) => { + let xs = xs + .iter() + .map(|(x, y)| Ok((self.inline(x)?, self.inline(y)?))) + .collect::, CannotInline>>()?; + IrSpanned { + span, + node: ExprCompiled::Dict(xs), + } + } + ExprCompiled::Builtin2(op, l_r) => { + let (l, r) = &**l_r; + let l = self.inline(l)?; + let r = self.inline(r)?; + IrSpanned { + span, + node: ExprCompiled::bin_op(*op, l, r, self.ctx), + } + } + ExprCompiled::Index2(a_i0_i1) => { + let (a, i0, i1) = &**a_i0_i1; + let a = self.inline(a)?; + let i0 = self.inline(i0)?; + let i1 = self.inline(i1)?; + IrSpanned { + span, + node: ExprCompiled::Index2(Box::new((a, i0, i1))), + } + } + ExprCompiled::Builtin1(op, x) => { + let x = self.inline(x)?; + IrSpanned { + span, + node: ExprCompiled::un_op(span, op, x, self.ctx), + } + } + ExprCompiled::Slice(l_a_b_c) => { + let (l, a, b, c) = &**l_a_b_c; + let l = self.inline(l)?; + let a = self.inline_opt(a.as_ref())?; + let b = self.inline_opt(b.as_ref())?; + let c = self.inline_opt(c.as_ref())?; + IrSpanned { + span, + node: ExprCompiled::Slice(Box::new((l, a, b, c))), + } + } + ExprCompiled::Seq(a_b) => { + let (a, b) = &**a_b; + let a = self.inline(a)?; + let b = self.inline(b)?; + ExprCompiled::seq(a, b) + } + ExprCompiled::Call(call) => return self.inline_call(call), + // These should be unreachable, but it is safer + // to do unnecessary work in compiler than crash. + ExprCompiled::LocalCaptured(..) + | ExprCompiled::Module(..) + | ExprCompiled::Compr(..) + | ExprCompiled::Def(..) => return Err(CannotInline), + }) + } +} diff --git a/starlark-rust/starlark/src/eval/compiler/def_inline/local_as_value.rs b/starlark-rust/starlark/src/eval/compiler/def_inline/local_as_value.rs index 73fa6b30dde69..726e4b391427b 100644 --- a/starlark-rust/starlark/src/eval/compiler/def_inline/local_as_value.rs +++ b/starlark-rust/starlark/src/eval/compiler/def_inline/local_as_value.rs @@ -44,7 +44,7 @@ use crate::values::StarlarkValue; NoSerialize, Allocative )] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] pub(crate) struct LocalAsValue { #[allocative(skip)] pub(crate) local: LocalSlotId, diff --git a/starlark-rust/starlark/src/eval/compiler/def_inline/mod.rs b/starlark-rust/starlark/src/eval/compiler/def_inline/mod.rs deleted file mode 100644 index c2938e5cbecb2..0000000000000 --- a/starlark-rust/starlark/src/eval/compiler/def_inline/mod.rs +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Inline functions. - -pub(crate) mod local_as_value; - -use crate::eval::compiler::args::ArgsCompiledValue; -use crate::eval::compiler::call::CallCompiled; -use crate::eval::compiler::def::ParametersCompiled; -use crate::eval::compiler::def_inline::local_as_value::LocalAsValue; -use crate::eval::compiler::expr::Builtin1; -use crate::eval::compiler::expr::Builtin2; -use crate::eval::compiler::expr::ExprCompiled; -use crate::eval::compiler::expr::ExprLogicalBinOp; -use crate::eval::compiler::opt_ctx::OptCtx; -use crate::eval::compiler::span::IrSpanned; -use crate::eval::compiler::stmt::StmtCompiled; -use crate::eval::compiler::stmt::StmtsCompiled; -use crate::eval::runtime::frame_span::FrameSpan; -use crate::eval::runtime::slots::LocalSlotId; -use crate::values::FrozenStringValue; -use crate::values::FrozenValue; -use crate::values::FrozenValueTyped; - -/// Function body suitable for inlining. -#[derive(Debug)] -pub(crate) enum InlineDefBody { - /// Function body is `return type(x) == "y"` - ReturnTypeIs(FrozenStringValue), - /// Any expression which can be safely inlined. - /// - /// See the function where this enum variant is computed for the definition - /// of safe to inline expression. - ReturnSafeToInlineExpr(IrSpanned), -} - -/// If a statement is `return type(x) == "y"` where `x` is a first slot. -fn is_return_type_is(stmt: &StmtsCompiled) -> Option { - let (x, t) = stmt.first()?.as_return()?.as_type_is()?; - match &x.node { - // Slot 0 is a slot for the first function parameter. - ExprCompiled::Local(LocalSlotId(0)) => Some(t), - _ => None, - } -} - -struct IsSafeToInlineExpr { - /// Function parameter count. - param_count: u32, - /// How many expressions we visited already. - counter: u32, -} - -impl IsSafeToInlineExpr { - fn new(param_count: u32) -> IsSafeToInlineExpr { - Self { - param_count, - counter: 0, - } - } - - fn is_safe_to_inline_opt_expr(&mut self, expr: &Option>) -> bool { - if let Some(expr) = expr { - self.is_safe_to_inline_expr(expr) - } else { - true - } - } - - /// Expression which is has no access to locals or globals. - fn is_safe_to_inline_expr(&mut self, expr: &ExprCompiled) -> bool { - // Do not inline too large functions. - if self.counter > 100 { - return false; - } - self.counter += 1; - match expr { - ExprCompiled::Value(..) => true, - ExprCompiled::LocalCaptured(..) | ExprCompiled::Module(..) | ExprCompiled::Def(..) => { - false - } - ExprCompiled::Local(l) => { - // `l >= param_count` should be unreachable, but it is safer this way. - l.0 < self.param_count - } - ExprCompiled::Call(call) => { - self.is_safe_to_inline_expr(&call.fun) - && call - .args - .arg_exprs() - .all(|e| self.is_safe_to_inline_expr(e)) - } - ExprCompiled::Compr(..) => { - // TODO: some comprehensions are safe to inline. - false - } - ExprCompiled::Slice(a_b_c_d) => { - let (a, b, c, d) = &**a_b_c_d; - self.is_safe_to_inline_expr(a) - && self.is_safe_to_inline_opt_expr(b) - && self.is_safe_to_inline_opt_expr(c) - && self.is_safe_to_inline_opt_expr(d) - } - ExprCompiled::Builtin2(bin_op, a_b) => { - let (a, b) = &**a_b; - let _: &Builtin2 = bin_op; - self.is_safe_to_inline_expr(a) && self.is_safe_to_inline_expr(b) - } - ExprCompiled::Index2(a_i0_i1) => { - let (a, i0, i1) = &**a_i0_i1; - self.is_safe_to_inline_expr(a) - && self.is_safe_to_inline_expr(i0) - && self.is_safe_to_inline_expr(i1) - } - ExprCompiled::Builtin1(un_op, arg) => { - let _: &Builtin1 = un_op; - self.is_safe_to_inline_expr(arg) - } - ExprCompiled::Tuple(xs) | ExprCompiled::List(xs) => { - xs.iter().all(|x| self.is_safe_to_inline_expr(x)) - } - ExprCompiled::Dict(xs) => xs - .iter() - .all(|(x, y)| self.is_safe_to_inline_expr(x) && self.is_safe_to_inline_expr(y)), - ExprCompiled::If(c_t_f) => { - let (c, t, f) = &**c_t_f; - self.is_safe_to_inline_expr(c) - && self.is_safe_to_inline_expr(t) - && self.is_safe_to_inline_expr(f) - } - ExprCompiled::LogicalBinOp(op, x_y) => { - let (x, y) = &**x_y; - let _: &ExprLogicalBinOp = op; - self.is_safe_to_inline_expr(x) && self.is_safe_to_inline_expr(y) - } - ExprCompiled::Seq(x_y) => { - let (x, y) = &**x_y; - self.is_safe_to_inline_expr(x) && self.is_safe_to_inline_expr(y) - } - } - } -} - -/// Function body is a `return` safe to inline expression (as defined above). -fn is_return_safe_to_inline_expr( - stmts: &StmtsCompiled, - param_count: u32, -) -> Option> { - match stmts.first() { - None => { - // Empty function is equivalent to `return None`. - Some(IrSpanned { - span: FrameSpan::default(), - node: ExprCompiled::Value(FrozenValue::new_none()), - }) - } - Some(stmt) => match &stmt.node { - StmtCompiled::Return(expr) - if IsSafeToInlineExpr::new(param_count).is_safe_to_inline_expr(expr) => - { - Some(expr.clone()) - } - _ => None, - }, - } -} - -pub(crate) fn inline_def_body( - params: &ParametersCompiled>, - body: &StmtsCompiled, -) -> Option { - if params.params.len() == 1 && params.params[0].accepts_positional() { - if let Some(t) = is_return_type_is(body) { - return Some(InlineDefBody::ReturnTypeIs(t)); - } - } - if !params.has_args_or_kwargs() { - // It is possible to sometimes inline functions with `*args` or `**kwargs`, - // but let's postpone that for now. - let param_count = params.count_param_variables(); - if let Some(expr) = is_return_safe_to_inline_expr(body, param_count) { - return Some(InlineDefBody::ReturnSafeToInlineExpr(expr)); - } - } - None -} - -pub(crate) struct CannotInline; - -/// Utility to inline function body at call site. -pub(crate) struct InlineDefCallSite<'s, 'v, 'a, 'e> { - pub(crate) ctx: &'s mut OptCtx<'v, 'a, 'e>, - // Values in the slots are either real frozen values - // or `LocalAsValue` which are the parameters to be substituted with caller locals. - pub(crate) slots: &'s [FrozenValue], -} - -impl<'s, 'v, 'a, 'e> InlineDefCallSite<'s, 'v, 'a, 'e> { - fn inline_opt( - &mut self, - expr: Option<&IrSpanned>, - ) -> Result>, CannotInline> { - match expr { - None => Ok(None), - Some(expr) => Ok(Some(self.inline(expr)?)), - } - } - - fn inline_args(&mut self, args: &ArgsCompiledValue) -> Result { - args.map_exprs(|expr| self.inline(expr)) - } - - fn inline_call( - &mut self, - call: &IrSpanned, - ) -> Result, CannotInline> { - let span = call.span; - let CallCompiled { fun, args } = &call.node; - let fun = self.inline(fun)?; - let args = self.inline_args(args)?; - Ok(IrSpanned { - span, - node: CallCompiled::call(span, fun, args, self.ctx), - }) - } - - pub(crate) fn inline( - &mut self, - expr: &IrSpanned, - ) -> Result, CannotInline> { - let span = expr.span; - Ok(match &expr.node { - e @ ExprCompiled::Value(..) => IrSpanned { - span, - node: e.clone(), - }, - ExprCompiled::Local(local) => { - let value = self.slots[local.0 as usize]; - let expr = if let Some(local) = FrozenValueTyped::::new(value) { - ExprCompiled::Local(local.local) - } else { - ExprCompiled::Value(value) - }; - IrSpanned { span, node: expr } - } - ExprCompiled::If(c_t_f) => { - let (c, t, f) = &**c_t_f; - let c = self.inline(c)?; - let t = self.inline(t)?; - let f = self.inline(f)?; - ExprCompiled::if_expr(c, t, f) - } - ExprCompiled::LogicalBinOp(op, l_r) => { - let (l, r) = &**l_r; - let l = self.inline(l)?; - let r = self.inline(r)?; - ExprCompiled::logical_bin_op(*op, l, r) - } - ExprCompiled::List(xs) => { - let xs = xs - .iter() - .map(|x| self.inline(x)) - .collect::, CannotInline>>()?; - IrSpanned { - span, - node: ExprCompiled::List(xs), - } - } - ExprCompiled::Tuple(xs) => { - let xs = xs - .iter() - .map(|x| self.inline(x)) - .collect::, CannotInline>>()?; - IrSpanned { - span, - node: ExprCompiled::tuple(xs, self.ctx.frozen_heap()), - } - } - ExprCompiled::Dict(xs) => { - let xs = xs - .iter() - .map(|(x, y)| Ok((self.inline(x)?, self.inline(y)?))) - .collect::, CannotInline>>()?; - IrSpanned { - span, - node: ExprCompiled::Dict(xs), - } - } - ExprCompiled::Builtin2(op, l_r) => { - let (l, r) = &**l_r; - let l = self.inline(l)?; - let r = self.inline(r)?; - IrSpanned { - span, - node: ExprCompiled::bin_op(*op, l, r, self.ctx), - } - } - ExprCompiled::Index2(a_i0_i1) => { - let (a, i0, i1) = &**a_i0_i1; - let a = self.inline(a)?; - let i0 = self.inline(i0)?; - let i1 = self.inline(i1)?; - IrSpanned { - span, - node: ExprCompiled::Index2(Box::new((a, i0, i1))), - } - } - ExprCompiled::Builtin1(op, x) => { - let x = self.inline(x)?; - IrSpanned { - span, - node: ExprCompiled::un_op(span, op, x, self.ctx), - } - } - ExprCompiled::Slice(l_a_b_c) => { - let (l, a, b, c) = &**l_a_b_c; - let l = self.inline(l)?; - let a = self.inline_opt(a.as_ref())?; - let b = self.inline_opt(b.as_ref())?; - let c = self.inline_opt(c.as_ref())?; - IrSpanned { - span, - node: ExprCompiled::Slice(Box::new((l, a, b, c))), - } - } - ExprCompiled::Seq(a_b) => { - let (a, b) = &**a_b; - let a = self.inline(a)?; - let b = self.inline(b)?; - ExprCompiled::seq(a, b) - } - ExprCompiled::Call(call) => return self.inline_call(call), - // These should be unreachable, but it is safer - // to do unnecessary work in compiler than crash. - ExprCompiled::LocalCaptured(..) - | ExprCompiled::Module(..) - | ExprCompiled::Compr(..) - | ExprCompiled::Def(..) => return Err(CannotInline), - }) - } -} diff --git a/starlark-rust/starlark/src/eval/compiler/error.rs b/starlark-rust/starlark/src/eval/compiler/error.rs new file mode 100644 index 0000000000000..883210442f3f5 --- /dev/null +++ b/starlark-rust/starlark/src/eval/compiler/error.rs @@ -0,0 +1,32 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_syntax::eval_exception::EvalException; + +pub(crate) struct CompilerInternalError(EvalException); + +impl CompilerInternalError { + #[cold] + pub(crate) fn from_eval_exception(e: EvalException) -> CompilerInternalError { + CompilerInternalError(e.into_internal_error()) + } + + #[cold] + pub(crate) fn into_eval_exception(self) -> EvalException { + self.0 + } +} diff --git a/starlark-rust/starlark/src/eval/compiler/expr.rs b/starlark-rust/starlark/src/eval/compiler/expr.rs index 777eed0864d11..cf874a507089c 100644 --- a/starlark-rust/starlark/src/eval/compiler/expr.rs +++ b/starlark-rust/starlark/src/eval/compiler/expr.rs @@ -34,7 +34,7 @@ use starlark_syntax::syntax::ast::StmtP; use thiserror::Error; use crate::codemap::Spanned; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::environment::slots::ModuleSlotId; use crate::errors::did_you_mean::did_you_mean; use crate::eval::compiler::args::ArgsCompiledValue; @@ -43,6 +43,7 @@ use crate::eval::compiler::compr::ComprCompiled; use crate::eval::compiler::constants::Constants; use crate::eval::compiler::def::DefCompiled; use crate::eval::compiler::def::FrozenDef; +use crate::eval::compiler::error::CompilerInternalError; use crate::eval::compiler::expr_bool::ExprCompiledBool; use crate::eval::compiler::known::list_to_tuple; use crate::eval::compiler::opt_ctx::OptCtx; @@ -58,25 +59,27 @@ use crate::eval::runtime::frame_span::FrameSpan; use crate::eval::runtime::frozen_file_span::FrozenFileSpan; use crate::eval::runtime::slots::LocalCapturedSlotId; use crate::eval::runtime::slots::LocalSlotId; +use crate::eval::Arguments; +use crate::eval::Evaluator; +use crate::values::bool::StarlarkBool; use crate::values::function::BoundMethodGen; use crate::values::function::FrozenBoundMethod; -use crate::values::layout::value_not_special::FrozenValueNotSpecial; use crate::values::list::ListRef; +use crate::values::range::Range; use crate::values::string::interpolation::parse_percent_s_one; -use crate::values::types::bool::StarlarkBool; use crate::values::types::dict::Dict; use crate::values::types::ellipsis::Ellipsis; use crate::values::types::float::StarlarkFloat; -use crate::values::types::inline_int::InlineInt; -use crate::values::types::int_or_big::StarlarkInt; +use crate::values::types::int::inline_int::InlineInt; +use crate::values::types::int::int_or_big::StarlarkInt; use crate::values::types::list::value::FrozenListData; use crate::values::types::list::value::ListData; -use crate::values::types::range::Range; use crate::values::types::string::dot_format::format_one; use crate::values::types::string::interpolation::percent_s_one; use crate::values::types::tuple::value::Tuple; -use crate::values::types::unbound::MaybeUnboundValue; +use crate::values::types::unbound::UnboundValue; use crate::values::FrozenHeap; +use crate::values::FrozenRef; use crate::values::FrozenStringValue; use crate::values::FrozenValue; use crate::values::FrozenValueTyped; @@ -143,7 +146,7 @@ pub(crate) enum Builtin1 { } impl Builtin1 { - fn eval<'v>(&self, v: FrozenValue, ctx: &mut OptCtx<'v, '_, '_>) -> Option> { + fn eval<'v>(&self, v: FrozenValue, ctx: &mut OptCtx<'v, '_, '_, '_>) -> Option> { match self { Builtin1::Minus => v.to_value().minus(ctx.heap()).ok(), Builtin1::Plus => v.to_value().plus(ctx.heap()).ok(), @@ -201,7 +204,7 @@ pub(crate) enum Builtin2 { } impl Builtin2 { - fn eval<'v>(self, a: Value<'v>, b: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn eval<'v>(self, a: Value<'v>, b: Value<'v>, heap: &'v Heap) -> crate::Result> { match self { Builtin2::Equals => a.equals(b).map(Value::new_bool), Builtin2::Compare(cmp) => a.compare(b).map(|c| Value::new_bool(cmp.apply(c))), @@ -354,26 +357,6 @@ impl ExprCompiled { FrozenStringValue::new(self.as_value()?) } - /// Try to extract `[c0, c1, ..., cn]` from this expression. - pub(crate) fn as_short_list_of_consts(&self) -> Option> { - // Prevent exponential explosion during optimization. - const MAX_LEN: usize = 1000; - match self { - ExprCompiled::List(xs) if xs.len() <= MAX_LEN => { - xs.try_map(|x| x.as_value().ok_or(())).ok() - } - ExprCompiled::Value(v) => { - let list = FrozenListData::from_frozen_value(v)?; - if list.len() <= MAX_LEN { - Some(list.content().to_owned()) - } else { - None - } - } - _ => None, - } - } - /// Iterable produced by this expression results in empty. pub(crate) fn is_iterable_empty(&self) -> bool { match self { @@ -464,7 +447,49 @@ impl ExprCompiled { } } +enum ExprShortList<'a> { + Exprs(&'a [IrSpanned]), + Constants(&'a [FrozenValue]), +} + +impl<'a> IrSpanned> { + fn as_exprs(&self) -> Vec> { + match &self.node { + ExprShortList::Exprs(exprs) => exprs.to_vec(), + ExprShortList::Constants(constants) => constants + .iter() + .map(|c| IrSpanned { + node: ExprCompiled::Value(*c), + span: self.span, + }) + .collect(), + } + } +} + impl IrSpanned { + /// Try to extract `[e0, e1, ..., en]` from this expression. + fn as_short_list(&self) -> Option> { + // Prevent exponential explosion during optimization. + const MAX_LEN: usize = 1000; + match &self.node { + ExprCompiled::List(xs) if xs.len() <= MAX_LEN => Some(ExprShortList::Exprs(xs)), + ExprCompiled::Value(v) => { + let list = FrozenListData::from_frozen_value(v)?; + if list.len() <= MAX_LEN { + Some(ExprShortList::Constants(list.content())) + } else { + None + } + } + _ => None, + } + .map(|node| IrSpanned { + node, + span: self.span, + }) + } + pub(crate) fn optimize(&self, ctx: &mut OptCtx) -> IrSpanned { let span = self.span; let expr = match &self.node { @@ -633,8 +658,8 @@ impl ExprCompiled { ) -> ExprCompiled { if let Some(v) = l.as_string() { if let Some((before, after)) = parse_percent_s_one(&v) { - let before = ctx.frozen_heap().alloc_str(&before); - let after = ctx.frozen_heap().alloc_str(&after); + let before = ctx.frozen_heap().alloc_str_intern(&before); + let after = ctx.frozen_heap().alloc_str_intern(&after); return ExprCompiled::percent_s_one(before, r, after, ctx); } } @@ -651,7 +676,7 @@ impl ExprCompiled { if let Ok(value) = percent_s_one(before.as_str(), arg.to_value(), after.as_str(), ctx.heap()) { - let value = ctx.frozen_heap().alloc_str(value.as_str()); + let value = ctx.frozen_heap().alloc_str_intern(value.as_str()); return ExprCompiled::Value(value.to_frozen_value()); } } @@ -667,7 +692,7 @@ impl ExprCompiled { ) -> ExprCompiled { if let Some(arg) = arg.as_value() { let value = format_one(&before, arg.to_value(), &after, ctx.heap()); - let value = ctx.frozen_heap().alloc_str(value.as_str()); + let value = ctx.frozen_heap().alloc_str_intern(value.as_str()); return ExprCompiled::Value(value.to_frozen_value()); } @@ -675,17 +700,8 @@ impl ExprCompiled { } fn add(l: IrSpanned, r: IrSpanned) -> ExprCompiled { - let span = l.span.merge(&r.span); - if let (Some(l), Some(r)) = (l.as_short_list_of_consts(), r.as_short_list_of_consts()) { - let lr = l - .iter() - .chain(r.iter()) - .map(|x| IrSpanned { - node: ExprCompiled::Value(*x), - span, - }) - .collect(); - return ExprCompiled::List(lr); + if let (Some(l), Some(r)) = (l.as_short_list(), r.as_short_list()) { + return ExprCompiled::List(l.as_exprs().into_iter().chain(r.as_exprs()).collect()); } ExprCompiled::Builtin2(Builtin2::Add, Box::new((l, r))) } @@ -796,7 +812,9 @@ impl ExprCompiled { } else if let Some(v) = v.unpack_str() { if v.len() <= 1000 { // If string, copy it to frozen heap. - Some(ExprCompiled::Value(heap.alloc_str(v).to_frozen_value())) + Some(ExprCompiled::Value( + heap.alloc_str_intern(v).to_frozen_value(), + )) } else { // Long strings may lead to exponential explosion in the optimizer, // so skips optimizations for them. @@ -856,11 +874,12 @@ impl ExprCompiled { // We assume `getattr` has no side effects. let v = get_attr_hashed_raw(left.to_value(), attr, ctx.heap()).ok()?; match v { - MemberOrValue::Member(m) => match MaybeUnboundValue::new(m) { - MaybeUnboundValue::Method(m) => { - Some(ctx.frozen_heap().alloc_simple(BoundMethodGen::new(left, m))) - } - MaybeUnboundValue::Attr(..) => None, + MemberOrValue::Member(m) => match m { + UnboundValue::Method(m, _) => Some( + ctx.frozen_heap() + .alloc_simple(BoundMethodGen::new(left, *m)), + ), + UnboundValue::Attr(..) => None, }, MemberOrValue::Value(v) => v.unpack_frozen(), } @@ -1089,7 +1108,7 @@ impl CompilerExprUtil

    for ExprP

    Starlark evaluator

    +

    + Using starlark-rust compiled to web assembly. + Change the input to see it update. +

    + + + + + diff --git a/starlark-rust/starlark_js_example/src/lib.rs b/starlark-rust/starlark_js_example/src/lib.rs new file mode 100644 index 0000000000000..e1c1c68a771d5 --- /dev/null +++ b/starlark-rust/starlark_js_example/src/lib.rs @@ -0,0 +1,64 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::mem; +use std::slice; +use std::str; + +use starlark::environment::Globals; +use starlark::environment::Module; +use starlark::eval::Evaluator; +use starlark::syntax::AstModule; +use starlark::syntax::Dialect; +use starlark::values::Value; + +#[no_mangle] +pub extern "C" fn allocation(n: usize) -> *mut u8 { + mem::ManuallyDrop::new(Vec::with_capacity(n)).as_mut_ptr() +} + +#[no_mangle] +pub unsafe extern "C" fn evaluate(s: *const u8) -> *mut u8 { + let length = u32::from_le_bytes(*(s as *const [u8; 4])) as usize; + let input = slice::from_raw_parts(s.offset(4), length); + let output = evaluate_buffers(input); + mem::ManuallyDrop::new(output).as_mut_ptr() +} + +fn evaluate_buffers(input: &[u8]) -> Vec { + let contents = str::from_utf8(input).unwrap(); + let result = evaluate_starlark(contents); + let success = result.is_ok(); + let message = result.unwrap_or_else(|e| e.into_anyhow().to_string()); + let len = message.len(); + let mut buffer = Vec::with_capacity(len + 8); + buffer.push(if success { 1 } else { 0 }); + buffer.extend(vec![0; 3]); + buffer.extend_from_slice(&(len as u32).to_le_bytes()); + buffer.extend_from_slice(message.as_bytes()); + buffer +} + +fn evaluate_starlark(content: &str) -> Result { + let ast: AstModule = + AstModule::parse("hello_world.star", content.to_owned(), &Dialect::Standard)?; + let globals = Globals::standard(); + let module: Module = Module::new(); + let mut eval: Evaluator = Evaluator::new(&module); + let res: Value = eval.eval_module(ast, &globals)?; + Ok(res.to_string()) +} diff --git a/starlark-rust/starlark_lsp/BUCK b/starlark-rust/starlark_lsp/BUCK index 62e265394c2f2..ef378fae15f82 100644 --- a/starlark-rust/starlark_lsp/BUCK +++ b/starlark-rust/starlark_lsp/BUCK @@ -5,15 +5,27 @@ oncall("build_infra") rust_library( name = "starlark_lsp", srcs = glob(["src/**/*.rs"]), - doctests = False, # FIXME rustc_flags = [ "--cfg=rust_nightly", ], test_deps = [ - "fbsource//third-party/rust:maplit", "fbsource//third-party/rust:regex", "fbsource//third-party/rust:textwrap", ], + test_os_deps = [ + ( + "linux", + [ + "fbsource//third-party/rust:maplit", + ], + ), + ( + "macos", + [ + "fbsource//third-party/rust:maplit", + ], + ), + ], deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:derivative", diff --git a/starlark-rust/starlark_lsp/Cargo.toml b/starlark-rust/starlark_lsp/Cargo.toml index ef1e1463457f6..940103dea876f 100644 --- a/starlark-rust/starlark_lsp/Cargo.toml +++ b/starlark-rust/starlark_lsp/Cargo.toml @@ -1,34 +1,36 @@ [package] -name = "starlark_lsp" -edition = "2021" -version = "0.9.0" -license = "Apache-2.0" -description = "LSP bindings for starlark" -documentation = "https://docs.rs/starlark" -repository = "https://github.com/facebookexperimental/starlark-rust" authors = [ "Damien Martin-Guillerez ", "Facebook", ] -keywords = ["starlark", "skylark", "language", "interpreter"] categories = ["parser-implementations", "development-tools"] +description = "LSP bindings for starlark" +documentation = "https://docs.rs/starlark" +edition = "2021" +keywords = ["starlark", "skylark", "language", "interpreter"] +license = "Apache-2.0" +name = "starlark_lsp" +repository = "https://github.com/facebook/starlark-rust" +version = "0.12.0" [dependencies] anyhow = "1.0.65" derivative = "2.2" -derive_more = "0.99" +derive_more.workspace = true dupe = { workspace = true } -itertools = "0.10" -thiserror = "1.0.36" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" +itertools = "0.13.0" lsp-server = "0.7.2" lsp-types = "0.94.1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +thiserror = "1.0.36" -starlark_syntax = { version = "0.9.0", path = "../starlark_syntax" } -starlark = { version = "0.9.0", path = "../starlark" } +starlark = { version = "0.12.0", path = "../starlark" } +starlark_syntax = { version = "0.12.0", path = "../starlark_syntax" } [dev-dependencies] -maplit = "1.0.2" regex = "1.5.4" textwrap = "0.11" + +[target.'cfg(not(windows))'.dev-dependencies] +maplit = "1.0.2" diff --git a/starlark-rust/starlark_lsp/src/bind.rs b/starlark-rust/starlark_lsp/src/bind.rs index 40e4ee1ff273e..0b5d994562855 100644 --- a/starlark-rust/starlark_lsp/src/bind.rs +++ b/starlark-rust/starlark_lsp/src/bind.rs @@ -183,10 +183,10 @@ fn dot_access<'a>(lhs: &'a AstExpr, attribute: &'a AstString, res: &mut Vec { + Expr::Call(name, args) => { f(name, attributes, res); // make sure that if someone does a(b).c, 'b' is bound and considered used. - for parameter in parameters { + for parameter in &args.args { expr(parameter.expr(), res); } } @@ -343,12 +343,9 @@ pub(crate) fn scope(module: &AstModule) -> Scope { } #[cfg(test)] -mod test { +mod tests { use std::iter; - use starlark::codemap::Pos; - use starlark::codemap::Span; - use starlark::syntax::AstModule; use starlark::syntax::Dialect; use starlark_syntax::slice_vec_ext::SliceExt; use starlark_syntax::slice_vec_ext::VecExt; @@ -356,7 +353,7 @@ mod test { use super::*; #[test] - fn dotted_access_is_correct() -> anyhow::Result<()> { + fn dotted_access_is_correct() -> starlark::Result<()> { let contents = "x1.y\nx1.y().z\nx2().y\nx2().y.z"; let expected = vec![ @@ -367,7 +364,11 @@ mod test { ] .into_map(|names| names.into_map(String::from)); - let module = AstModule::parse("foo.star", contents.to_owned(), &Dialect::Extended)?; + let module = AstModule::parse( + "foo.star", + contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let scope = scope(&module); let found_bindings = scope @@ -388,9 +389,13 @@ mod test { } #[test] - fn dotted_contains_is_correct() -> anyhow::Result<()> { + fn dotted_contains_is_correct() -> starlark::Result<()> { let contents = "x1.y1.z1\nx2.y2.z2"; - let module = AstModule::parse("foo.star", contents.to_owned(), &Dialect::Extended)?; + let module = AstModule::parse( + "foo.star", + contents.to_owned(), + &Dialect::AllOptionsInternal, + )?; let scope = scope(&module); let get = scope diff --git a/starlark-rust/starlark_lsp/src/completion.rs b/starlark-rust/starlark_lsp/src/completion.rs index da2e0fc1650f4..d082f2ae4bb97 100644 --- a/starlark-rust/starlark_lsp/src/completion.rs +++ b/starlark-rust/starlark_lsp/src/completion.rs @@ -29,11 +29,10 @@ use lsp_types::MarkupKind; use lsp_types::Range; use lsp_types::TextEdit; use starlark::codemap::ResolvedSpan; -use starlark::docs::markdown::render_doc_item; +use starlark::docs::markdown::render_doc_item_no_link; use starlark::docs::markdown::render_doc_param; use starlark::docs::DocItem; use starlark::docs::DocMember; -use starlark::docs::DocParam; use starlark_syntax::codemap::ResolvedPos; use starlark_syntax::syntax::ast::StmtP; use starlark_syntax::syntax::module::AstModuleFields; @@ -106,14 +105,14 @@ impl Backend { .map(|doc| { Documentation::MarkupContent(MarkupContent { kind: MarkupKind::Markdown, - value: render_doc_item(&value.name, &doc), + value: render_doc_item_no_link(&value.name, &doc), }) }) .or_else(|| { - value.param.map(|doc| { + value.param.map(|(starred_name, doc)| { Documentation::MarkupContent(MarkupContent { kind: MarkupKind::Markdown, - value: render_doc_param(&doc), + value: render_doc_param(starred_name, &doc), }) }) }), @@ -262,17 +261,13 @@ impl Backend { SymbolKind::Variable => None, }) .and_then(|docs| match docs { - DocItem::Function(doc_function) => Some( + DocItem::Member(DocMember::Function(doc_function)) => Some( doc_function .params - .into_iter() - .filter_map(|param| match param { - DocParam::Arg { name, .. } => Some(name), - _ => None, - }) - .filter(|name| !previously_used_named_parameters.contains(name)) - .map(|name| CompletionItem { - label: name, + .regular_params() + .filter(|p| !previously_used_named_parameters.contains(&p.name)) + .map(|p| CompletionItem { + label: p.name.to_owned(), kind: Some(CompletionItemKind::PROPERTY), ..Default::default() }) @@ -309,17 +304,14 @@ impl Backend { .find(|symbol| &symbol.0 == name) { Some(symbol) => match symbol.1 { - DocMember::Function(doc_function) => Some( + DocItem::Member(DocMember::Function(doc_function)) => Some( doc_function .params - .into_iter() - .filter_map(|param| match param { - DocParam::Arg { name, .. } => Some(CompletionItem { - label: name, - kind: Some(CompletionItemKind::PROPERTY), - ..Default::default() - }), - _ => None, + .regular_params() + .map(|param| CompletionItem { + label: param.name.to_owned(), + kind: Some(CompletionItemKind::PROPERTY), + ..Default::default() }) .collect(), ), diff --git a/starlark-rust/starlark_lsp/src/definition.rs b/starlark-rust/starlark_lsp/src/definition.rs index 0a4413d420171..ad0ce30531546 100644 --- a/starlark-rust/starlark_lsp/src/definition.rs +++ b/starlark-rust/starlark_lsp/src/definition.rs @@ -508,7 +508,7 @@ impl LspModule { } } - for arg in args { + for arg in &args.args { if let ArgumentP::Named(arg_name, arg_expr) = &arg.node { if arg_name.node != member { continue; @@ -608,11 +608,6 @@ pub(crate) mod helpers { use std::collections::hash_map::Entry; use std::collections::HashMap; - use starlark::codemap::CodeMap; - use starlark::codemap::Pos; - use starlark::codemap::ResolvedSpan; - use starlark::codemap::Span; - use starlark::syntax::AstModule; use starlark::syntax::Dialect; use starlark_syntax::codemap::ResolvedPos; use textwrap::dedent; @@ -732,11 +727,12 @@ pub(crate) mod helpers { .expect("identifier to be present") } - pub(crate) fn module(&self) -> anyhow::Result { + pub(crate) fn module(&self) -> starlark::Result { Ok(LspModule::new(AstModule::parse( &self.filename, self.program.clone(), - &Dialect::Extended, + // TODO(nga): use dialect of current module. + &Dialect::AllOptionsInternal, )?)) } @@ -807,14 +803,14 @@ pub(crate) mod helpers { } #[cfg(test)] -mod test { +mod tests { use textwrap::dedent; use super::helpers::*; use super::*; #[test] - fn find_definition_loaded_symbol() -> anyhow::Result<()> { + fn find_definition_loaded_symbol() -> starlark::Result<()> { let contents = dedent( r#" load("bar.star", print = "other_print"); @@ -861,7 +857,7 @@ mod test { } #[test] - fn find_definition_function_calls() -> anyhow::Result<()> { + fn find_definition_function_calls() -> starlark::Result<()> { let contents = dedent( r#" load("bar.star", "print"); @@ -925,7 +921,7 @@ mod test { } #[test] - fn find_definition_function_params() -> anyhow::Result<()> { + fn find_definition_function_params() -> starlark::Result<()> { let contents = dedent( r#" load("bar.star", "print"); @@ -964,7 +960,7 @@ mod test { } #[test] - fn find_definition_scopes_locals() -> anyhow::Result<()> { + fn find_definition_scopes_locals() -> starlark::Result<()> { let contents = dedent( r#" load("bar.star", "print"); @@ -1036,7 +1032,7 @@ mod test { } #[test] - fn find_definition_unknown_clicks() -> anyhow::Result<()> { + fn find_definition_unknown_clicks() -> starlark::Result<()> { let contents = dedent( r#" load("bar.star", "print"); @@ -1072,7 +1068,7 @@ mod test { } #[test] - fn finds_definition_in_strings() -> anyhow::Result<()> { + fn finds_definition_in_strings() -> starlark::Result<()> { let contents = dedent( r#" "foo1" @@ -1155,7 +1151,7 @@ mod test { } #[test] - fn find_definition_dot_access_unresolved_root() -> anyhow::Result<()> { + fn find_definition_dot_access_unresolved_root() -> starlark::Result<()> { let contents = dedent( r#" foo.bar.baz().quz @@ -1208,7 +1204,7 @@ mod test { } #[test] - fn find_definition_dot_access_loaded_root() -> anyhow::Result<()> { + fn find_definition_dot_access_loaded_root() -> starlark::Result<()> { let contents = dedent( r#" load("defs.bzl", "foo"); @@ -1265,7 +1261,7 @@ mod test { } #[test] - fn find_definition_dot_access_local_root() -> anyhow::Result<()> { + fn find_definition_dot_access_local_root() -> starlark::Result<()> { let contents = dedent( r#" def func_1(foo): diff --git a/starlark-rust/starlark_lsp/src/docs.rs b/starlark-rust/starlark_lsp/src/docs.rs index 17d2b1fe2694b..c814695d6c4d4 100644 --- a/starlark-rust/starlark_lsp/src/docs.rs +++ b/starlark-rust/starlark_lsp/src/docs.rs @@ -17,47 +17,54 @@ use starlark::docs::DocFunction; use starlark::docs::DocParam; +use starlark::docs::DocParams; use starlark::docs::DocProperty; use starlark::docs::DocString; use starlark::docs::DocStringKind; use starlark::typing::Ty; +use starlark_syntax::codemap::CodeMap; use starlark_syntax::syntax::ast::AstAssignTargetP; use starlark_syntax::syntax::ast::AstLiteral; use starlark_syntax::syntax::ast::AstPayload; use starlark_syntax::syntax::ast::AstStmtP; use starlark_syntax::syntax::ast::DefP; use starlark_syntax::syntax::ast::ExprP; -use starlark_syntax::syntax::ast::ParameterP; use starlark_syntax::syntax::ast::StmtP; +use starlark_syntax::syntax::def::DefParams; /// Given the AST node for a `def` statement, return a `DocFunction` if the /// `def` statement has a docstring as its first statement. -pub(crate) fn get_doc_item_for_def(def: &DefP

    ) -> Option { +pub(crate) fn get_doc_item_for_def( + def: &DefP

    , + codemap: &CodeMap, +) -> Option { if let Some(doc_string) = peek_docstring(&def.body) { - let args: Vec<_> = def - .params - .iter() - .filter_map(|param| match ¶m.node { - ParameterP::Normal(p, _) - | ParameterP::WithDefaultValue(p, _, _) - | ParameterP::Args(p, _) - | ParameterP::KwArgs(p, _) => Some(DocParam::Arg { - name: p.ident.to_owned(), - docs: None, - typ: Ty::any(), - default_value: None, - }), - _ => None, - }) - .collect(); + // TODO(nga): do not unwrap. + let def = DefParams::unpack(&def.params, codemap).unwrap(); + let dp = |i: usize| -> DocParam { + let param = &def.params[i]; + DocParam { + name: param.ident.ident.clone(), + docs: None, + typ: Ty::any(), + default_value: None, + } + }; + + let doc_params = DocParams { + pos_only: def.indices.pos_only().map(dp).collect(), + pos_or_named: def.indices.pos_or_named().map(dp).collect(), + args: def.indices.args.map(|a| a as usize).map(dp), + named_only: def.indices.named_only(def.params.len()).map(dp).collect(), + kwargs: def.indices.kwargs.map(|a| a as usize).map(dp), + }; let doc_function = DocFunction::from_docstring( DocStringKind::Starlark, - args, + doc_params, // TODO: Figure out how to get a `Ty` from the `def.return_type`. Ty::any(), Some(doc_string), - None, ); Some(doc_function) } else { diff --git a/starlark-rust/starlark_lsp/src/exported.rs b/starlark-rust/starlark_lsp/src/exported.rs index c1eacc5e2700d..7331c3c385cd7 100644 --- a/starlark-rust/starlark_lsp/src/exported.rs +++ b/starlark-rust/starlark_lsp/src/exported.rs @@ -22,8 +22,9 @@ use lsp_types::MarkupContent; use lsp_types::MarkupKind; use starlark::codemap::FileSpan; use starlark::collections::SmallMap; -use starlark::docs::markdown::render_doc_item; +use starlark::docs::markdown::render_doc_item_no_link; use starlark::docs::DocItem; +use starlark::docs::DocMember; use starlark::syntax::AstModule; use starlark_syntax::syntax::ast::AstAssignIdent; use starlark_syntax::syntax::ast::Expr; @@ -87,7 +88,7 @@ impl From for CompletionItem { let documentation = value.docs.map(|docs| { Documentation::MarkupContent(MarkupContent { kind: MarkupKind::Markdown, - value: render_doc_item(&value.name, &docs), + value: render_doc_item_no_link(&value.name, &docs), }) }); Self { @@ -137,7 +138,7 @@ impl AstModuleExportedSymbols for AstModule { add(self, &mut result, name, kind, || { last_node .and_then(|last| get_doc_item_for_assign(last, &assign.lhs)) - .map(DocItem::Property) + .map(|x| DocItem::Member(DocMember::Property(x))) }); }); } @@ -146,7 +147,7 @@ impl AstModuleExportedSymbols for AstModule { add(self, &mut result, name, SymbolKind::Any, || { last_node .and_then(|last| get_doc_item_for_assign(last, dest)) - .map(DocItem::Property) + .map(|x| DocItem::Member(DocMember::Property(x))) }); }); } @@ -162,7 +163,10 @@ impl AstModuleExportedSymbols for AstModule { .filter_map(|param| param.split().0.map(|name| name.to_string())) .collect(), }, - || get_doc_item_for_def(def).map(DocItem::Function), + || { + get_doc_item_for_def(def, self.codemap()) + .map(|x| DocItem::Member(DocMember::Function(x))) + }, ); } _ => {} @@ -181,7 +185,7 @@ mod tests { use super::*; fn module(x: &str) -> AstModule { - AstModule::parse("X", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("X", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } #[test] diff --git a/starlark-rust/starlark_lsp/src/inspect.rs b/starlark-rust/starlark_lsp/src/inspect.rs index ce342a6a95382..c586213ea1034 100644 --- a/starlark-rust/starlark_lsp/src/inspect.rs +++ b/starlark-rust/starlark_lsp/src/inspect.rs @@ -184,12 +184,12 @@ impl AstModuleInspect for AstModule { continue; } match &arg.node { - ParameterP::Normal(_, Some(type_)) => { + ParameterP::Normal(_, Some(type_), None) => { if type_.span.contains(position) { return Some(AutocompleteType::Type); } } - ParameterP::WithDefaultValue(_, type_, expr) => { + ParameterP::Normal(_, type_, Some(expr)) => { if let Some(type_) = type_ { if type_.span.contains(position) { return Some(AutocompleteType::Type); @@ -228,7 +228,8 @@ impl AstModuleInspect for AstModule { return Some(AutocompleteType::Default); } let get_previously_used_argument_names = || { - args.iter() + args.args + .iter() .filter_map(|arg| match arg { AstArgumentP { node: ArgumentP::Named(name, _), @@ -238,7 +239,7 @@ impl AstModuleInspect for AstModule { }) .collect() }; - for arg in args { + for arg in &args.args { if !arg.span.contains(position) { continue; } diff --git a/starlark-rust/starlark_lsp/src/lib.rs b/starlark-rust/starlark_lsp/src/lib.rs index afe390571d571..496a94f93368d 100644 --- a/starlark-rust/starlark_lsp/src/lib.rs +++ b/starlark-rust/starlark_lsp/src/lib.rs @@ -18,6 +18,9 @@ //! The server that allows IDEs to evaluate and interpret starlark code according //! to the [Language Server Protocol](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/). +// Lints that don't necessarily make sense +#[allow(clippy::needless_lifetimes)] +#[allow(clippy::type_complexity)] mod bind; pub mod completion; mod definition; diff --git a/starlark-rust/starlark_lsp/src/loaded.rs b/starlark-rust/starlark_lsp/src/loaded.rs index 64fda557657e6..636f5b815bb11 100644 --- a/starlark-rust/starlark_lsp/src/loaded.rs +++ b/starlark-rust/starlark_lsp/src/loaded.rs @@ -38,7 +38,7 @@ pub(crate) trait AstModuleLoadedSymbols { } impl AstModuleLoadedSymbols for AstModule { - fn loaded_symbols<'a>(&'a self) -> Vec> { + fn loaded_symbols(&self) -> Vec> { top_level_stmts(self.statement()) .into_iter() .filter_map(|x| match &x.node { @@ -63,7 +63,7 @@ mod tests { use super::*; fn module(x: &str) -> AstModule { - AstModule::parse("X", x.to_owned(), &Dialect::Extended).unwrap() + AstModule::parse("X", x.to_owned(), &Dialect::AllOptionsInternal).unwrap() } #[test] diff --git a/starlark-rust/starlark_lsp/src/server.rs b/starlark-rust/starlark_lsp/src/server.rs index abe57eed6c1c3..19cf574682cd9 100644 --- a/starlark-rust/starlark_lsp/src/server.rs +++ b/starlark-rust/starlark_lsp/src/server.rs @@ -88,9 +88,9 @@ use serde::Serialize; use serde::Serializer; use starlark::codemap::ResolvedSpan; use starlark::codemap::Span; -use starlark::docs::markdown::render_doc_item; -use starlark::docs::markdown::render_doc_member; +use starlark::docs::markdown::render_doc_item_no_link; use starlark::docs::markdown::render_doc_param; +use starlark::docs::DocItem; use starlark::docs::DocMember; use starlark::docs::DocModule; use starlark::syntax::AstModule; @@ -148,14 +148,14 @@ pub enum LspUrlError { #[derive(Clone, Debug, Hash, Eq, PartialEq, Display)] pub enum LspUrl { /// A "file://" url with a path sent from the LSP client. - #[display(fmt = "file://{}", "_0.display()")] + #[display("file://{}", _0.display())] File(PathBuf), /// A "starlark:" url. This is mostly used for native types that don't actually /// exist on the filesystem. The path component always has a leading slash. - #[display(fmt = "starlark:{}", "_0.display()")] + #[display("starlark:{}", _0.display())] Starlark(PathBuf), /// Any other type. Often should just be ignored, or return an error. - #[display(fmt = "{}", "_0")] + #[display("{}", _0)] Other(Url), } @@ -406,27 +406,7 @@ impl Backend { ServerCapabilities { text_document_sync: Some(TextDocumentSyncCapability::Kind(TextDocumentSyncKind::FULL)), definition_provider, - completion_provider: Some(CompletionOptions { - trigger_characters: Some(vec![ - // e.g. function call - "(".to_owned(), - // e.g. list creation, function call - ",".to_owned(), - // e.g. when typing a load path - "/".to_owned(), - // e.g. dict creation - ":".to_owned(), - // e.g. variable assignment - "=".to_owned(), - // e.g. list creation - "[".to_owned(), - // e.g. string literal (load path, target name) - "\"".to_owned(), - // don't lose autocomplete when typing a space, e.g. after a comma - " ".to_owned(), - ]), - ..Default::default() - }), + completion_provider: Some(CompletionOptions::default()), hover_provider: Some(HoverProviderCapability::Simple(true)), ..ServerCapabilities::default() } @@ -929,13 +909,13 @@ impl Backend { .map(|(symbol, documentation)| CompletionItem { label: symbol.clone(), kind: Some(match &documentation { - DocMember::Function { .. } => CompletionItemKind::FUNCTION, + DocItem::Member(DocMember::Function { .. }) => CompletionItemKind::FUNCTION, _ => CompletionItemKind::CONSTANT, }), detail: documentation.get_doc_summary().map(|str| str.to_owned()), documentation: Some(Documentation::MarkupContent(MarkupContent { kind: MarkupKind::Markdown, - value: render_doc_member(&symbol, &documentation), + value: render_doc_item_no_link(&symbol, &documentation), })), ..Default::default() }) @@ -1008,10 +988,7 @@ impl Backend { [ // Actual keywords "and", "else", "load", "break", "for", "not", "continue", "if", "or", "def", "in", - "pass", "elif", "return", "lambda", // - // Reserved words - "as", "import", "is", "class", "nonlocal", "del", "raise", "except", "try", "finally", - "while", "from", "with", "global", "yield", + "pass", "elif", "return", "lambda", ] .into_iter() .map(|keyword| CompletionItem { @@ -1104,14 +1081,14 @@ impl Backend { .doc .map(|docs| Hover { contents: HoverContents::Array(vec![MarkedString::String( - render_doc_item(&symbol.name, &docs), + render_doc_item_no_link(&symbol.name, &docs), )]), range: Some(source.into()), }) .or_else(|| { - symbol.param.map(|docs| Hover { + symbol.param.map(|(starred_name, doc)| Hover { contents: HoverContents::Array(vec![MarkedString::String( - render_doc_param(&docs), + render_doc_param(starred_name, &doc), )]), range: Some(source.into()), }) @@ -1128,7 +1105,7 @@ impl Backend { ast.find_exported_symbol(&name).and_then(|symbol| { symbol.docs.map(|docs| Hover { contents: HoverContents::Array(vec![MarkedString::String( - render_doc_item(&symbol.name, &docs), + render_doc_item_no_link(&symbol.name, &docs), )]), range: Some(source.into()), }) @@ -1180,7 +1157,7 @@ impl Backend { .find(|symbol| symbol.0 == name) .map(|symbol| Hover { contents: HoverContents::Array(vec![MarkedString::String( - render_doc_member(&symbol.0, &symbol.1), + render_doc_item_no_link(&symbol.0, &symbol.1), )]), range: Some(source.into()), }) @@ -1387,7 +1364,7 @@ where // TODO(nmj): Some of the windows tests get a bit flaky, especially around // some paths. Revisit later. #[cfg(all(test, not(windows)))] -mod test { +mod tests { use std::path::Path; use std::path::PathBuf; @@ -2306,6 +2283,29 @@ mod test { Ok(()) } + fn resolve_range_in_string(s: &str, r: Range) -> &str { + let byte_of_pos = |p: Position| { + let l = if p.line == 0 { + 0 + } else { + s.char_indices() + .filter(|(_, c)| *c == '\n') + .nth((p.line - 1).try_into().unwrap()) + .unwrap() + .0 + + 1 + }; + l + s[l..] + .char_indices() + .nth((p.character).try_into().unwrap()) + .unwrap() + .0 + }; + let start = byte_of_pos(r.start); + let end = byte_of_pos(r.end); + &s[start..end] + } + #[test] fn goto_works_for_native_symbols() -> anyhow::Result<()> { if is_wasm() { @@ -2327,29 +2327,11 @@ mod test { ) .trim() .to_owned(); - let native_contents = dedent( - r#" - def native_function1(): - pass - - def native_function2(): - pass - "#, - ) - .trim() - .to_owned(); let foo = FixtureWithRanges::from_fixture(foo_uri.path(), &foo_contents)?; - let native = FixtureWithRanges::from_fixture(native_uri.path(), &native_contents)?; server.open_file(foo_uri.clone(), foo.program())?; - let expected_n1_location = expected_location_link_from_spans( - native_uri, - foo.resolved_span("click_n1"), - native.resolved_span("n1_loc"), - ); - let goto_definition = goto_definition_request( &mut server, foo_uri.clone(), @@ -2359,7 +2341,16 @@ mod test { let request_id = server.send_request(goto_definition)?; let n1_location = goto_definition_response_location(&mut server, request_id)?; - assert_eq!(expected_n1_location, n1_location); + assert_eq!( + n1_location.origin_selection_range, + Some(foo.resolved_span("click_n1").into()) + ); + assert_eq!(n1_location.target_uri, native_uri); + let native_gen_code = server + .docs_as_code(&native_uri.try_into().unwrap()) + .unwrap(); + let target_str = resolve_range_in_string(&native_gen_code, n1_location.target_range); + assert_eq!(target_str, "native_function1"); let expected_n2_location = expected_location_link_from_spans( foo_uri.clone(), diff --git a/starlark-rust/starlark_lsp/src/symbols.rs b/starlark-rust/starlark_lsp/src/symbols.rs index 0f63052c9e285..e727c648a623e 100644 --- a/starlark-rust/starlark_lsp/src/symbols.rs +++ b/starlark-rust/starlark_lsp/src/symbols.rs @@ -21,6 +21,7 @@ use std::collections::HashMap; use starlark::codemap::CodeMap; use starlark::docs::DocItem; +use starlark::docs::DocMember; use starlark::docs::DocParam; use starlark_syntax::codemap::ResolvedPos; use starlark_syntax::syntax::ast::AssignP; @@ -46,7 +47,8 @@ pub(crate) struct Symbol { pub(crate) detail: Option, pub(crate) kind: SymbolKind, pub(crate) doc: Option, - pub(crate) param: Option, + /// Name with `*` prefixes, the param. + pub(crate) param: Option<(String, DocParam)>, } /// Walk the AST recursively and discover symbols. @@ -101,14 +103,14 @@ pub(crate) fn find_symbols_at_location( } StmtP::Def(def) => { // Peek into the function definition to find the docstring. - let doc = get_doc_item_for_def(def); + let doc = get_doc_item_for_def(def, codemap); symbols .entry(def.name.ident.clone()) .or_insert_with(|| Symbol { name: def.name.ident.clone(), kind: SymbolKind::Method, detail: None, - doc: doc.clone().map(DocItem::Function), + doc: doc.clone().map(|x| DocItem::Member(DocMember::Function(x))), param: None, }); @@ -118,20 +120,19 @@ pub(crate) fn find_symbols_at_location( .contains(cursor_position) { symbols.extend(def.params.iter().filter_map(|param| match ¶m.node { - ParameterP::Normal(p, _) | ParameterP::WithDefaultValue(p, _, _) => { - Some(( - p.ident.clone(), - Symbol { - name: p.ident.clone(), - kind: SymbolKind::Variable, - detail: None, - doc: None, - param: doc.as_ref().and_then(|doc| { - doc.find_param_with_name(&p.ident).cloned() - }), - }, - )) - } + ParameterP::Normal(p, ..) => Some(( + p.ident.clone(), + Symbol { + name: p.ident.clone(), + kind: SymbolKind::Variable, + detail: None, + doc: None, + param: doc.as_ref().and_then(|doc| { + doc.find_param_with_name(&p.ident) + .map(|(name, doc)| (name, doc.clone())) + }), + }, + )), _ => None, })); walk(codemap, &def.body, cursor_position, symbols); diff --git a/starlark-rust/starlark_lsp/src/test.rs b/starlark-rust/starlark_lsp/src/test.rs index dbcc0b385b3fd..36f53d346e23e 100644 --- a/starlark-rust/starlark_lsp/src/test.rs +++ b/starlark-rust/starlark_lsp/src/test.rs @@ -57,14 +57,10 @@ use serde::de::DeserializeOwned; use starlark::analysis::AstModuleLint; use starlark::codemap::Pos; use starlark::codemap::Span; -use starlark::docs::render_docs_as_code; -use starlark::docs::Doc; use starlark::docs::DocFunction; use starlark::docs::DocItem; use starlark::docs::DocMember; use starlark::docs::DocModule; -use starlark::docs::Identifier; -use starlark::docs::Location; use starlark::errors::EvalMessage; use starlark::syntax::AstModule; use starlark::syntax::Dialect; @@ -138,7 +134,11 @@ impl LspContext for TestServerContext { fn parse_file_with_contents(&self, uri: &LspUrl, content: String) -> LspEvalResult { match uri { LspUrl::File(path) | LspUrl::Starlark(path) => { - match AstModule::parse(&path.to_string_lossy(), content, &Dialect::Extended) { + match AstModule::parse( + &path.to_string_lossy(), + content, + &Dialect::AllOptionsInternal, + ) { Ok(ast) => { let diagnostics = ast .lint(None) @@ -150,7 +150,7 @@ impl LspContext for TestServerContext { } Err(e) => { let diagnostics = vec![eval_message_to_lsp_diagnostic( - EvalMessage::from_anyhow(path, &e), + EvalMessage::from_error(path, &e), )]; LspEvalResult { diagnostics, @@ -296,7 +296,12 @@ impl LspContext for TestServerContext { members: self .builtin_symbols .keys() - .map(|name| (name.clone(), DocMember::Function(DocFunction::default()))) + .map(|name| { + ( + name.clone(), + DocItem::Member(DocMember::Function(DocFunction::default())), + ) + }) .collect(), } } @@ -363,25 +368,22 @@ impl TestServer { } /// A static set of "builtins" to use for testing - fn testing_builtins(root: &Path) -> anyhow::Result>> { + fn testing_builtins(root: &Path) -> anyhow::Result> { let prelude_path = root.join("dir/prelude.bzl"); let ret = hashmap! { - LspUrl::try_from(Url::parse("starlark:/native/builtin.bzl")?)? => vec![ - Doc::named_item("native_function1".to_owned(), DocItem::Function(DocFunction::default())), - Doc::named_item("native_function2".to_owned(),DocItem::Function(DocFunction::default())), - ], - LspUrl::try_from(Url::from_file_path(prelude_path).unwrap())? => vec![ - Doc { - id: Identifier { - name: "prelude_function".to_owned(), - location: Some(Location { - path: "//dir/prelude.bzl".to_owned(), - }), - }, - item: DocItem::Function(DocFunction::default()), - custom_attrs: Default::default(), - }, - ] + LspUrl::try_from(Url::parse("starlark:/native/builtin.bzl")?)? => DocModule { + docs: None, + members: [ + ("native_function1".to_owned(), DocItem::Member(DocMember::Function(DocFunction::default()))), + ("native_function2".to_owned(), DocItem::Member(DocMember::Function(DocFunction::default()))), + ].into_iter().collect(), + }, + LspUrl::try_from(Url::from_file_path(prelude_path).unwrap())? => DocModule { + docs: None, + members: [ + ("prelude_function".to_owned(), DocItem::Member(DocMember::Function(DocFunction::default()))), + ].into_iter().collect() + }, }; Ok(ret) } @@ -417,9 +419,9 @@ impl TestServer { let mut builtin_symbols = HashMap::new(); for (u, ds) in builtin { - builtin_docs.insert(u.clone(), render_docs_as_code(&ds)); - for d in ds { - builtin_symbols.insert(d.id.name, u.clone()); + builtin_docs.insert(u.clone(), ds.render_as_code()); + for (sym, _) in ds.members { + builtin_symbols.insert(sym, u.clone()); } } diff --git a/starlark-rust/starlark_map/BUCK b/starlark-rust/starlark_map/BUCK index 05c2a063acbc4..aa8f66ebf7dd3 100644 --- a/starlark-rust/starlark_map/BUCK +++ b/starlark-rust/starlark_map/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -14,7 +13,7 @@ rust_library( ], deps = [ "fbsource//third-party/rust:equivalent", - "fbsource//third-party/rust:fnv", + "fbsource//third-party/rust:fxhash", "fbsource//third-party/rust:hashbrown", "fbsource//third-party/rust:serde", "//buck2/allocative/allocative:allocative", diff --git a/starlark-rust/starlark_map/Cargo.toml b/starlark-rust/starlark_map/Cargo.toml index bff2d4950df65..ff6d791280c42 100644 --- a/starlark-rust/starlark_map/Cargo.toml +++ b/starlark-rust/starlark_map/Cargo.toml @@ -1,23 +1,26 @@ [package] -name = "starlark_map" -edition = "2021" -version = "0.9.0" -license = "Apache-2.0" -description = "Map implementation with starlark-rust specific optimizations" -documentation = "https://docs.rs/starlark_map" -repository = "https://github.com/facebookexperimental/starlark-rust" authors = [ - "Facebook" + "Facebook", ] +description = "Map implementation with starlark-rust specific optimizations" +documentation = "https://docs.rs/starlark_map" +edition = "2021" +license = "Apache-2.0" +name = "starlark_map" +repository = "https://github.com/facebook/starlark-rust" +version = "0.12.0" [dependencies] -dupe = { workspace = true } allocative = { workspace = true, features = ["hashbrown"] } +dupe = { workspace = true } -fnv = "1.0.7" -hashbrown = { version = "0.12.3", features = ["raw"] } equivalent = { workspace = true } +fxhash = "0.2.1" +hashbrown = { version = "0.14.3", features = ["raw"] } serde = { version = "1.0", features = ["derive"] } [dev-dependencies] serde_json = "1.0.48" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(rust_nightly)"] } diff --git a/starlark-rust/starlark_map/src/hasher.rs b/starlark-rust/starlark_map/src/hasher.rs index a85422637d741..b48aac4213c3a 100644 --- a/starlark-rust/starlark_map/src/hasher.rs +++ b/starlark-rust/starlark_map/src/hasher.rs @@ -19,7 +19,7 @@ use std::hash::BuildHasher; use std::hash::Hasher; use dupe::Dupe; -use fnv::FnvHasher; +use fxhash::FxHasher64; use crate::hash_value::StarlarkHashValue; @@ -27,7 +27,10 @@ use crate::hash_value::StarlarkHashValue; /// /// Starlark relies on stable hashing, and this is the hasher. #[derive(Default)] -pub struct StarlarkHasher(FnvHasher); +pub struct StarlarkHasher( + // TODO(nga): `FxHasher64` is endian-dependent, this is not right. + FxHasher64, +); impl StarlarkHasher { /// Creates a new hasher. @@ -38,7 +41,7 @@ impl StarlarkHasher { /// Finish the hash computation and return the result. #[inline] - pub fn finish_small(self) -> StarlarkHashValue { + pub fn finish_small(&self) -> StarlarkHashValue { // NOTE: Here we throw away half the key material we are given, // taking only the lower 32 bits. // Not a problem because `DefaultHasher` produces well-swizzled bits. @@ -56,6 +59,36 @@ impl Hasher for StarlarkHasher { fn write(&mut self, bytes: &[u8]) { self.0.write(bytes) } + + #[inline] + fn write_u8(&mut self, i: u8) { + self.0.write_u8(i) + } + + #[inline] + fn write_u16(&mut self, i: u16) { + self.0.write_u16(i) + } + + #[inline] + fn write_u32(&mut self, i: u32) { + self.0.write_u32(i) + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.0.write_u64(i) + } + + #[inline] + fn write_u128(&mut self, i: u128) { + self.0.write_u128(i) + } + + #[inline] + fn write_usize(&mut self, i: usize) { + self.0.write_usize(i) + } } /// [`BuildHasher`] implementation which produces [`StarlarkHasher`]. diff --git a/starlark-rust/starlark_map/src/lib.rs b/starlark-rust/starlark_map/src/lib.rs index e5b9b74cdc0c5..5c5e12a7b90aa 100644 --- a/starlark-rust/starlark_map/src/lib.rs +++ b/starlark-rust/starlark_map/src/lib.rs @@ -18,12 +18,13 @@ //! Ordered map optimized for starlark-rust use cases. // Hints we disagree with -#![allow(clippy::from_iter_instead_of_collect)] #![allow(clippy::missing_safety_doc)] #![deny(missing_docs)] #![deny(rustdoc::broken_intra_doc_links)] #![cfg_attr(rust_nightly, feature(core_intrinsics))] #![cfg_attr(rust_nightly, feature(portable_simd))] +#![cfg_attr(rust_nightly, feature(cfg_version))] +#![cfg_attr(rust_nightly, allow(internal_features))] mod hash_value; mod hashed; diff --git a/starlark-rust/starlark_map/src/ordered_map.rs b/starlark-rust/starlark_map/src/ordered_map.rs index 0237183ebfd94..108436ee74d77 100644 --- a/starlark-rust/starlark_map/src/ordered_map.rs +++ b/starlark-rust/starlark_map/src/ordered_map.rs @@ -89,7 +89,7 @@ impl OrderedMap { /// Get a reference to the value associated with the given key. #[inline] - pub fn get(&self, k: &Q) -> Option<&V> + pub fn get<'a, Q>(&'a self, k: &Q) -> Option<&'a V> where Q: Hash + Equivalent + ?Sized, { @@ -98,7 +98,7 @@ impl OrderedMap { /// Get a mutable reference to the value associated with the given key. #[inline] - pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> + pub fn get_mut<'a, Q>(&'a mut self, k: &Q) -> Option<&'a mut V> where Q: Hash + Equivalent + ?Sized, { @@ -144,7 +144,7 @@ impl OrderedMap { where Q: Hash + Equivalent + ?Sized, { - self.0.remove(k) + self.0.shift_remove(k) } /// Clear the map. diff --git a/starlark-rust/starlark_map/src/ordered_set.rs b/starlark-rust/starlark_map/src/ordered_set.rs index db778e84bd7f2..7c6a21d251f8d 100644 --- a/starlark-rust/starlark_map/src/ordered_set.rs +++ b/starlark-rust/starlark_map/src/ordered_set.rs @@ -113,7 +113,7 @@ impl OrderedSet { /// Iterate over the elements. #[inline] - pub fn iter(&self) -> small_set::Iter { + pub fn iter(&self) -> Iter { self.0.iter() } @@ -138,6 +138,15 @@ impl OrderedSet { self.0.insert(value) } + /// Insert an element into the set assuming it is not already present. + #[inline] + pub fn insert_unique_unchecked(&mut self, value: T) + where + T: Hash, + { + self.0.insert_unique_unchecked(value) + } + /// Insert an element if element is not present in the set, /// otherwise return the element. #[inline] @@ -191,6 +200,11 @@ impl OrderedSet { { self.0.union(&other.0) } + + /// Reverse the iteration order of the set. + pub fn reverse(&mut self) { + self.0.reverse(); + } } impl Default for OrderedSet { @@ -230,9 +244,14 @@ impl Hash for OrderedSet { } } +/// Iterator returned by `iter`. +pub type Iter<'a, T> = small_set::Iter<'a, T>; +/// Iterator returned by `into_iter`. +pub type IntoIter = small_set::IntoIter; + impl IntoIterator for OrderedSet { type Item = T; - type IntoIter = small_set::IntoIter; + type IntoIter = IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { @@ -242,7 +261,7 @@ impl IntoIterator for OrderedSet { impl<'a, T> IntoIterator for &'a OrderedSet { type Item = &'a T; - type IntoIter = small_set::Iter<'a, T>; + type IntoIter = Iter<'a, T>; #[inline] fn into_iter(self) -> Self::IntoIter { @@ -267,6 +286,16 @@ where } } +impl Extend for OrderedSet +where + T: Eq + Hash, +{ + #[inline] + fn extend>(&mut self, iter: I) { + self.0.extend(iter) + } +} + #[cfg(test)] mod tests { use std::cell::Cell; diff --git a/starlark-rust/starlark_map/src/small_map.rs b/starlark-rust/starlark_map/src/small_map.rs new file mode 100644 index 0000000000000..31723deb66bd0 --- /dev/null +++ b/starlark-rust/starlark_map/src/small_map.rs @@ -0,0 +1,1482 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! A Map with deterministic iteration order that specializes its storage based on the number of +//! entries to optimize memory. This is essentially `IndexMap` with two changes: +//! * no index is created for small maps +//! * short hashes are stored next to keys + +use std::fmt; +use std::fmt::Debug; +use std::hash::Hash; +use std::hash::Hasher; +use std::marker::PhantomData; +use std::mem; + +use allocative::Allocative; +use equivalent::Equivalent; +use hashbrown::HashTable; +use serde::Deserialize; +use serde::Serialize; + +use crate::hashed::Hashed; +pub use crate::small_map::iter::IntoIter; +pub use crate::small_map::iter::IntoIterHashed; +pub use crate::small_map::iter::IntoKeys; +pub use crate::small_map::iter::IntoValues; +pub use crate::small_map::iter::Iter; +pub use crate::small_map::iter::IterHashed; +pub use crate::small_map::iter::IterMut; +pub use crate::small_map::iter::IterMutUnchecked; +pub use crate::small_map::iter::Keys; +pub use crate::small_map::iter::Values; +pub use crate::small_map::iter::ValuesMut; +use crate::vec_map::VecMap; +use crate::StarlarkHashValue; + +mod iter; + +/// Max size of a map when we do not create an index. +/// 32 is the value where `buck2 cquery some-target` is the fastest and consumes the least memory. +/// Note the test was performed for buck2-specific patterns. +/// On nightly we use SIMD to speed up the search, so use 16 on stable to be safe. +#[cfg(rust_nightly)] +const NO_INDEX_THRESHOLD: usize = 32; +#[cfg(not(rust_nightly))] +const NO_INDEX_THRESHOLD: usize = 16; + +/// A map with deterministic iteration order. +/// +/// This map is similar to [`indexmap::IndexMap`](https://docs.rs/indexmap) +/// with the following differences: +/// - [Small hashes](StarlarkHashValue) are stored next to keys +/// - Index is not created for small maps +#[repr(C)] +#[derive(Clone, Allocative)] +pub struct SmallMap { + entries: VecMap, + /// Map a key to the index in `entries`. + /// This field is initialized when the size of the map exceeds `NO_INDEX_THRESHOLD`. + index: Option>>, +} + +impl Default for SmallMap { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl Debug for SmallMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } +} + +impl SmallMap { + /// Empty map. + #[inline] + pub const fn new() -> Self { + Self { + entries: VecMap::new(), + index: None, + } + } + + /// Create an empty map with specified capacity. + #[inline] + pub fn with_capacity(n: usize) -> Self { + if n <= NO_INDEX_THRESHOLD { + SmallMap { + entries: VecMap::with_capacity(n), + index: None, + } + } else { + SmallMap { + entries: VecMap::with_capacity(n), + index: Some(Box::new(HashTable::with_capacity(n))), + } + } + } + + /// Verify that the map is internally consistent. + #[cfg(test)] + fn assert_invariants(&self) + where + K: Eq, + { + if let Some(index) = &self.index { + assert_eq!(index.len(), self.entries.len()); + for (i, (k, _)) in self.entries.iter_hashed().enumerate() { + let j = *index + .find(k.hash().promote(), |j| { + &self.entries.get_index(*j).unwrap().0 == k.key() + }) + .unwrap(); + assert_eq!(i, j); + } + } else { + assert!(self.entries.len() <= NO_INDEX_THRESHOLD); + } + } + + /// Drop the index if the map is too small, and the index is not really needed. + /// + /// We don't allocate index prematurely when we add entries the map, + /// but we keep it allocated when we remove entries from the map. + /// + /// This function allows to reclaim memory after some entries are removed. + pub fn maybe_drop_index(&mut self) { + if self.entries.len() <= NO_INDEX_THRESHOLD { + self.index = None; + } + } + + /// Key references iterator. + #[inline] + pub fn keys(&self) -> Keys { + Keys { + iter: self.entries.keys(), + } + } + + /// Value references iterator. + #[inline] + pub fn values(&self) -> Values { + Values { + iter: self.entries.values(), + } + } + + /// Key owned iterator. + #[inline] + pub fn into_keys(self) -> IntoKeys { + IntoKeys { + iter: self.entries.into_iter(), + } + } + + /// Value owned iterator. + #[inline] + pub fn into_values(self) -> IntoValues { + IntoValues { + iter: self.entries.into_iter(), + } + } + + /// Mutable value references iterator. + #[inline] + pub fn values_mut(&mut self) -> ValuesMut { + ValuesMut { + iter: self.entries.values_mut(), + } + } + + /// Entry references iterator. + #[inline] + pub fn iter(&self) -> Iter<'_, K, V> { + Iter { + iter: self.entries.iter(), + } + } + + /// Entry references with hashes iterator. + #[inline] + pub fn iter_hashed(&self) -> IterHashed { + IterHashed { + iter: self.entries.iter_hashed(), + } + } + + /// Entries with hashes iterator. + #[inline] + pub fn into_iter_hashed(self) -> IntoIterHashed { + IntoIterHashed { + iter: self.entries.into_iter_hashed(), + } + } + + /// Mutable entry references iterator. + #[inline] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + IterMut { + iter: self.entries.iter_mut(), + } + } + + /// Mutable entry references iterator, with mutable key references. + /// + /// This operation is memory safe, but otherwise no guarantees + /// if keys are mutated inconsistently (hash or equality changes). + #[inline] + pub fn iter_mut_unchecked(&mut self) -> IterMutUnchecked<'_, K, V> { + IterMutUnchecked { + iter: self.entries.iter_mut_unchecked(), + } + } + + /// Entries iterator. + #[inline] + fn into_iter(self) -> IntoIter { + IntoIter { + iter: self.entries.into_iter(), + } + } + + /// Query the map by a prehashed key. + #[inline] + pub fn get_hashed(&self, key: Hashed<&Q>) -> Option<&V> + where + Q: Equivalent + ?Sized, + { + self.get_index_of_hashed(key) + .map(|index| unsafe { self.entries.get_unchecked(index).1 }) + } + + /// Same as `get_hashed`, byt takes key by value instead of by reference. + /// Sometimes it generates slightly better code for small values. + #[inline] + pub fn get_hashed_by_value(&self, key: Hashed) -> Option<&V> + where + Q: Equivalent, + { + self.get_index_of_hashed_by_value(key) + .map(|index| unsafe { self.entries.get_unchecked(index).1 }) + } + + /// Query the map by a given key. + #[inline] + pub fn get(&self, key: &Q) -> Option<&V> + where + Q: Hash + Equivalent + ?Sized, + { + self.get_hashed(Hashed::new(key)) + } + + /// Query the map by a given key, return an index of the entry + /// along with the entry key and value. + #[inline] + pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> + where + Q: Hash + Equivalent + ?Sized, + { + self.get_full_hashed(Hashed::new(key)) + } + + /// Query the map by a given key, return an index of the entry + /// along with the entry key and value. + #[inline] + pub fn get_full_hashed(&self, key: Hashed<&Q>) -> Option<(usize, &K, &V)> + where + Q: Equivalent + ?Sized, + { + self.get_index_of_hashed(key).map(|index| { + let (key, value) = unsafe { self.entries.get_unchecked(index) }; + (index, *key.key(), value) + }) + } + + #[inline] + fn get_index_of_hashed_raw_with_index( + &self, + hash: StarlarkHashValue, + mut eq: impl FnMut(&K) -> bool, + index: &HashTable, + ) -> Option { + index + .find(hash.promote(), |&index| unsafe { + eq(self.entries.get_unchecked(index).0.key()) + }) + .copied() + } + + #[inline] + pub(crate) fn get_index_of_hashed_raw( + &self, + hash: StarlarkHashValue, + eq: impl FnMut(&K) -> bool, + ) -> Option { + match &self.index { + None => self.entries.get_index_of_hashed_raw(hash, eq), + Some(index) => self.get_index_of_hashed_raw_with_index(hash, eq, index), + } + } + + /// Find the index of the given hashed key. + #[inline] + pub fn get_index_of_hashed(&self, key: Hashed<&Q>) -> Option + where + Q: Equivalent + ?Sized, + { + self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) + } + + /// Get the index of the entry given a hashed key. + #[inline] + pub fn get_index_of_hashed_by_value(&self, key: Hashed) -> Option + where + Q: Equivalent, + { + self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) + } + + /// Find an entry by an index. + #[inline] + pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { + self.entries.get_index(index) + } + + /// The an entry index by a given key. + #[inline] + pub fn get_index_of(&self, key: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + { + self.get_index_of_hashed(Hashed::new(key)) + } + + /// Find a mutable value by a hashed key. + #[inline] + pub fn get_mut_hashed(&mut self, key: Hashed<&Q>) -> Option<&mut V> + where + Q: Equivalent + ?Sized, + { + let i = self.get_index_of_hashed(key)?; + debug_assert!(i < self.entries.len()); + Some(unsafe { self.entries.get_unchecked_mut(i).1 }) + } + + /// Find the entry by a given key. + #[inline] + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + Q: Hash + Equivalent + ?Sized, + { + self.get_mut_hashed(Hashed::new(key)) + } + + /// Find if an entry by a given prehashed key exists. + #[inline] + pub fn contains_key_hashed(&self, key: Hashed<&Q>) -> bool + where + Q: Equivalent + ?Sized, + { + self.get_index_of_hashed(key).is_some() + } + + /// Find if an entry by a given hashed key exists. + #[inline] + pub fn contains_key_hashed_by_value(&self, key: Hashed) -> bool + where + Q: Equivalent, + { + self.get_index_of_hashed_by_value(key).is_some() + } + + /// Find if an entry by a given key exists. + #[inline] + pub fn contains_key(&self, key: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.contains_key_hashed(Hashed::new(key)) + } + + /// Reserve capacity for at least `additional` more elements to be inserted. + #[inline] + pub fn reserve(&mut self, additional: usize) + where + K: Eq, + { + self.entries.reserve(additional); + if let Some(index) = &mut self.index { + index.reserve(additional, Self::hasher(&self.entries)); + } else if self.len() + additional > NO_INDEX_THRESHOLD { + self.create_index(self.len() + additional); + } + } + + /// Current map capacity. + #[inline] + pub fn capacity(&self) -> usize { + self.entries.capacity() + } + + /// Returns a reference to the first key-value pair. + pub fn first(&self) -> Option<(&K, &V)> { + self.iter().next() + } + + /// Returns a reference to the last key-value pair. + pub fn last(&self) -> Option<(&K, &V)> { + self.iter().next_back() + } + + #[cold] + fn create_index(&mut self, capacity: usize) { + debug_assert!(self.index.is_none()); + debug_assert!(capacity >= self.entries.len()); + let mut index = HashTable::with_capacity(capacity); + for (i, (k, _)) in self.entries.iter_hashed().enumerate() { + index.insert_unique(k.hash().promote(), i, |_| { + unreachable!("Must have enough capacity") + }); + } + self.index = Some(Box::new(index)); + } + + /// Rebuild the index after entries are reordered or removed. + fn rebuild_index(&mut self) { + if let Some(index) = &mut self.index { + index.clear(); + for (i, (k, _)) in self.entries.iter_hashed().enumerate() { + index.insert_unique(k.hash().promote(), i, |_| { + unreachable!("Must have enough capacity") + }); + } + } + } + + /// Hasher for index resize. + #[inline(always)] + fn hasher(entries: &VecMap) -> impl Fn(&usize) -> u64 + '_ { + move |&index| { + debug_assert!(index < entries.len()); + unsafe { entries.get_unchecked(index).0.hash().promote() } + } + } + + /// Insert an entry into the map without checking for a duplicate key. + #[inline] + pub fn insert_hashed_unique_unchecked(&mut self, key: Hashed, val: V) -> (&K, &mut V) { + let hash = key.hash(); + let entry_index = self.entries.len(); + self.entries.insert_hashed_unique_unchecked(key, val); + if let Some(index) = &mut self.index { + index.insert_unique(hash.promote(), entry_index, Self::hasher(&self.entries)); + } else if self.entries.len() == NO_INDEX_THRESHOLD + 1 { + self.create_index(self.entries.len()); + } else { + debug_assert!(self.entries.len() < NO_INDEX_THRESHOLD + 1); + } + // SAFETY: We've just inserted an entry, so we know entries is not empty. + unsafe { + let (key, value) = self.entries.get_unchecked_mut(self.entries.len() - 1); + (key.key(), value) + } + } + + /// Insert a key-value pair into the map. + #[inline] + pub fn insert_hashed(&mut self, key: Hashed, val: V) -> Option + where + K: Eq, + { + match self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) { + None => { + self.insert_hashed_unique_unchecked(key, val); + None + } + Some(i) => unsafe { + debug_assert!(i < self.entries.len()); + Some(mem::replace(self.entries.get_unchecked_mut(i).1, val)) + }, + } + } + + /// Insert a key-value pair into the map. + #[inline] + pub fn insert(&mut self, key: K, val: V) -> Option + where + K: Hash + Eq, + { + self.insert_hashed(Hashed::new(key), val) + } + + /// Insert a key-value pair into the map without checking for a duplicate key. + #[inline] + pub fn insert_unique_unchecked(&mut self, key: K, val: V) -> (&K, &mut V) + where + K: Hash, + { + self.insert_hashed_unique_unchecked(Hashed::new(key), val) + } + + /// Remove the entry for the key. + /// + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. + pub fn shift_remove_hashed(&mut self, key: Hashed<&Q>) -> Option + where + Q: ?Sized + Equivalent, + { + self.shift_remove_hashed_entry(key).map(|(_k, v)| v) + } + + /// Remove the entry for the key. + /// + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. + pub fn shift_remove_hashed_entry(&mut self, key: Hashed<&Q>) -> Option<(K, V)> + where + Q: ?Sized + Equivalent, + { + let hash = key.hash(); + if let Some(index) = &mut self.index { + let entries = &self.entries; + let i = match index.find_entry(hash.promote(), |&i| unsafe { + key.key().equivalent(entries.get_unchecked(i).0.key()) + }) { + Ok(found) => found.remove().0, + Err(_) => return None, + }; + // No need to update the index when the last entry is removed. + if i != self.entries.len() - 1 { + for bucket in index.iter_mut() { + debug_assert!(*bucket != i); + if *bucket > i { + *bucket -= 1; + } + } + } + let (key, value) = self.entries.remove(i); + Some((key.into_key(), value)) + } else { + self.entries.remove_hashed_entry(key) + } + } + + /// Remove the entry by index. This is *O(N)* operation. + pub fn shift_remove_index_hashed(&mut self, i: usize) -> Option<(Hashed, V)> { + if i >= self.len() { + return None; + } + if let Some(index) = &mut self.index { + let mut removed = false; + index.retain(|j| { + if *j == i { + debug_assert!(!removed); + removed = true; + false + } else if *j > i { + *j -= 1; + true + } else { + true + } + }); + debug_assert!(removed); + } + Some(self.entries.remove(i)) + } + + /// Remove the entry by index. This is *O(N)* operation. + pub fn shift_remove_index(&mut self, i: usize) -> Option<(K, V)> { + let (key, value) = self.shift_remove_index_hashed(i)?; + Some((key.into_key(), value)) + } + + /// Remove the entry for the key. + /// + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. + pub fn shift_remove(&mut self, key: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.shift_remove_hashed(Hashed::new(key)) + } + + /// Remove the entry for the key. + /// + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. + pub fn shift_remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + Q: ?Sized + Hash + Equivalent, + { + self.shift_remove_hashed_entry(Hashed::new(key)) + } + + /// Get the entry (occupied or not) for the key. + #[inline] + pub fn entry_hashed(&mut self, key: Hashed) -> Entry<'_, K, V> + where + K: Eq, + { + match self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) { + Some(i) => { + let (key, value) = unsafe { self.entries.get_unchecked_mut(i) }; + Entry::Occupied(OccupiedEntry { + key: key.key(), + value, + }) + } + None => Entry::Vacant(VacantEntry { key, map: self }), + } + } + + /// Remove the last element. + pub fn pop(&mut self) -> Option<(K, V)> { + match self.entries.pop() { + None => None, + Some((key, value)) => { + if let Some(index) = &mut self.index { + match index.find_entry(key.hash().promote(), |&i| i == self.entries.len()) { + Ok(found) => { + let removed = found.remove().0; + debug_assert!(removed == self.entries.len()); + } + Err(_) => { + if cfg!(debug_assertions) { + unreachable!("The entry must be in the index") + } + } + } + } + Some((key.into_key(), value)) + } + } + } + + /// Get the entry (occupied or not) for the key. + #[inline] + pub fn entry(&mut self, key: K) -> Entry<'_, K, V> + where + K: Eq + Hash, + { + self.entry_hashed(Hashed::new(key)) + } + + /// Is the map empty? + #[inline] + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + /// Get the number of elements in the map. + #[inline] + pub fn len(&self) -> usize { + self.entries.len() + } + + /// Remove all elements from the map. + /// + /// Retain the capacity. + #[inline] + pub fn clear(&mut self) { + self.entries.clear(); + if let Some(index) = &mut self.index { + // Note we are keeping the `index` object initialized here. + // So next insert will have to update the index. + // Which is probably suboptimal (hard to say), + // but `clear` is rare operation anyway. + index.clear(); + } + } + + /// Basic check the map invariants are hold. + #[cfg(test)] + fn state_check(&self) { + if let Some(index) = &self.index { + assert_eq!(self.entries.len(), index.len()); + let mut set_fields = vec![false; self.entries.len()]; + for bucket in index.iter() { + let i = *bucket; + let prev = mem::replace(&mut set_fields[i], true); + assert!(!prev); + } + } else { + assert!(self.entries.len() <= NO_INDEX_THRESHOLD); + } + } + + fn is_sorted_by_key(&self) -> bool + where + K: Ord, + { + self.entries.is_sorted_by_key() + } + + /// Sort entries by key. + pub fn sort_keys(&mut self) + where + K: Ord, + { + // Check if sorted first, otherwise we may need to rebuild the index + // even if the map is already sorted. + if self.is_sorted_by_key() { + return; + } + + // Rebuild index on drop to make this code panic-safe. + struct RebuildIndexOnDrop<'a, K, V> { + map: &'a mut SmallMap, + } + + impl<'a, K, V> Drop for RebuildIndexOnDrop<'a, K, V> { + fn drop(&mut self) { + self.map.rebuild_index(); + } + } + + let map = RebuildIndexOnDrop { map: self }; + map.map.entries.sort_keys(); + } + + /// Equal if the keys and values are equal in the iteration order. + pub fn eq_ordered(&self, other: &Self) -> bool + where + K: PartialEq, + V: PartialEq, + { + self.entries.eq_ordered(&other.entries) + } + + /// Hash entries in the iteration order. + /// + /// Note, keys are not hashed, but previously computed hashes are hashed instead. + pub fn hash_ordered(&self, state: &mut H) + where + K: Hash, + V: Hash, + { + self.entries.hash_ordered(state) + } + + /// Reverse the iteration order of the map. + pub fn reverse(&mut self) { + self.entries.reverse(); + if let Some(index) = &mut self.index { + let len = self.entries.len(); + for entry in index.iter_mut() { + *entry = len - 1 - *entry; + } + } + } + + /// Retains only the elements specified by the predicate. + pub fn retain(&mut self, f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + struct RebuildIndexOnDrop<'a, K, V> { + original_len: usize, + map: &'a mut SmallMap, + } + + impl<'a, K, V> Drop for RebuildIndexOnDrop<'a, K, V> { + fn drop(&mut self) { + debug_assert!(self.map.entries.len() <= self.original_len); + if self.map.len() < self.original_len { + self.map.rebuild_index(); + } + } + } + + let work = RebuildIndexOnDrop { + original_len: self.len(), + map: self, + }; + + work.map.entries.retain(f); + } +} + +/// Reference to the actual entry in the map. +pub struct OccupiedEntry<'a, K, V> { + /// Pointer to the key in the map. + key: &'a K, + /// Pointer to the value in the map. + value: &'a mut V, +} + +/// Reference to a vacant entry in the map. +/// +/// This can be used to insert an entry into the map. +pub struct VacantEntry<'a, K, V> { + key: Hashed, + map: &'a mut SmallMap, +} + +/// Occupied or vacant entry. +pub enum Entry<'a, K, V> { + /// Occupied entry. + Occupied(OccupiedEntry<'a, K, V>), + /// No entry for given key. + Vacant(VacantEntry<'a, K, V>), +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> { + /// Key for this entry. + #[inline] + pub fn key(&self) -> &K { + self.key + } + + /// Value for this entry. + #[inline] + pub fn get(&self) -> &V { + self.value + } + + /// Mutable reference to the value in the entry. + #[inline] + pub fn get_mut(&mut self) -> &mut V { + self.value + } + + /// Get a reference to the value in the entry with map lifetime. + #[inline] + pub fn into_mut(self) -> &'a mut V { + self.value + } + + #[inline] + pub(crate) fn into_mut_entry(self) -> (&'a K, &'a mut V) { + (self.key, self.value) + } +} + +impl<'a, K, V> VacantEntry<'a, K, V> +where + K: Eq, +{ + /// Key for this entry. + #[inline] + pub fn key(&self) -> &K { + self.key.key() + } + + /// Insert the value into the entry. + #[inline] + pub fn insert(self, value: V) -> &'a mut V { + self.insert_entry(value).1 + } + + #[inline] + pub(crate) fn insert_entry(self, value: V) -> (&'a K, &'a mut V) { + self.map.insert_hashed_unique_unchecked(self.key, value) + } +} + +impl<'a, K, V> Entry<'a, K, V> +where + K: Eq, +{ + /// Key for this entry. + #[inline] + pub fn key(&self) -> &K { + match self { + Entry::Occupied(e) => e.key(), + Entry::Vacant(e) => e.key(), + } + } + + /// Insert if vacant. + #[inline] + pub fn or_insert(self, default: V) -> &'a mut V { + self.or_insert_with(|| default) + } + + /// Insert if vacant. + #[inline] + pub fn or_insert_with(self, default: impl FnOnce() -> V) -> &'a mut V { + self.or_insert_entry_with(default).1 + } + + /// Insert if vacant. + #[inline] + pub fn or_default(self) -> &'a mut V + where + V: Default, + { + #[allow(clippy::unwrap_or_default)] // defining or_default + self.or_insert_with(V::default) + } + + #[inline] + pub(crate) fn or_insert_entry_with(self, default: impl FnOnce() -> V) -> (&'a K, &'a mut V) { + match self { + Entry::Occupied(e) => e.into_mut_entry(), + Entry::Vacant(e) => e.insert_entry(default()), + } + } +} + +impl FromIterator<(K, V)> for SmallMap +where + K: Hash + Eq, +{ + fn from_iter>(iter: I) -> Self { + let iter = iter.into_iter(); + let mut mp = Self::with_capacity(iter.size_hint().0); + for (k, v) in iter { + mp.insert(k, v); + } + mp + } +} + +impl FromIterator<(Hashed, V)> for SmallMap +where + K: Eq, +{ + fn from_iter, V)>>(iter: I) -> Self { + let iter = iter.into_iter(); + let mut mp = Self::with_capacity(iter.size_hint().0); + for (k, v) in iter { + mp.insert_hashed(k, v); + } + mp + } +} + +impl IntoIterator for SmallMap { + type Item = (K, V); + type IntoIter = IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.into_iter() + } +} + +impl<'a, K, V> IntoIterator for &'a SmallMap { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K, V> IntoIterator for &'a mut SmallMap { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl PartialEq for SmallMap { + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() + && self + .iter_hashed() + .all(|(k, v)| other.get_hashed(k) == Some(v)) + } +} + +impl Eq for SmallMap {} + +impl Extend<(K, V)> for SmallMap +where + K: Hash + Eq, +{ + fn extend>(&mut self, iter: T) { + for (k, v) in iter { + self.insert(k, v); + } + } +} + +/// Create a [`SmallMap`](SmallMap) from a list of key-value pairs. +/// +/// ## Example +/// +/// ``` +/// use starlark_map::smallmap; +/// +/// let map = smallmap! { +/// "a" => 1, +/// "b" => 2, +/// }; +/// assert_eq!(map.get("a"), Some(&1)); +/// assert_eq!(map.get("b"), Some(&2)); +/// assert_eq!(map.get("c"), None); +/// ``` +#[macro_export] +macro_rules! smallmap { + (@single $($x:tt)*) => (()); + (@count $($rest:expr),*) => (<[()]>::len(&[$(smallmap!(@single $rest)),*])); + + ($($key:expr => $value:expr,)+) => { smallmap!($($key => $value),+) }; + ($($key:expr => $value:expr),*) => { + { + let cap = smallmap!(@count $($key),*); + #[allow(unused_mut)] + let mut map = $crate::small_map::SmallMap::with_capacity(cap); + $( + map.insert($key, $value); + )* + map + } + }; +} + +impl Serialize for SmallMap { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.collect_map(self.iter()) + } +} + +impl<'de, K, V> Deserialize<'de> for SmallMap +where + K: Deserialize<'de> + Hash + Eq, + V: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct MapVisitor { + marker: PhantomData>, + } + + impl<'de, K, V> serde::de::Visitor<'de> for MapVisitor + where + K: Deserialize<'de> + Hash + Eq, + V: Deserialize<'de>, + { + type Value = SmallMap; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a map") + } + + #[inline] + fn visit_map(self, mut map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + let mut values = SmallMap::with_capacity(map.size_hint().unwrap_or(0)); + while let Some((key, value)) = map.next_entry()? { + values.insert(key, value); + } + Ok(values) + } + } + + let visitor = MapVisitor { + marker: PhantomData, + }; + deserializer.deserialize_map(visitor) + } +} + +#[cfg(test)] +mod tests { + use std::cmp::Ordering; + use std::panic::catch_unwind; + use std::panic::AssertUnwindSafe; + + use super::*; + + #[test] + fn empty_map() { + let m = SmallMap::::new(); + assert_eq!(m.is_empty(), true); + assert_eq!(m.len(), 0); + assert_eq!(m.iter().next(), None); + } + + #[test] + #[allow(clippy::map_identity)] + fn few_entries() { + let entries1 = [(0, 'a'), (1, 'b')]; + let m1 = entries1.iter().copied().collect::>(); + + let entries2 = [(1, 'b'), (0, 'a')]; + let m2 = entries2.iter().copied().collect::>(); + assert_eq!(m1.is_empty(), false); + assert_eq!(m1.len(), 2); + assert_eq!(m2.is_empty(), false); + assert_eq!(m2.len(), 2); + + assert_eq!(m1.iter().eq(entries1.iter().map(|(k, v)| (k, v))), true); + assert_eq!(m2.iter().eq(entries2.iter().map(|(k, v)| (k, v))), true); + assert_eq!(m1.iter().eq(m2.iter()), false); + assert_eq!(m1.eq(&m1), true); + assert_eq!(m2.eq(&m2), true); + assert_eq!(m1, m2); + + assert_eq!(m1.get(&0), Some(&'a')); + assert_eq!(m1.get(&3), None); + assert_eq!(m2.get(&1), Some(&'b')); + assert_eq!(m2.get(&3), None); + + assert_eq!(m1.get_index(0), Some((&0, &'a'))); + assert_eq!(m1.get_index(1), Some((&1, &'b'))); + assert_eq!(m1.get_index(2), None); + + assert_ne!(m1, smallmap! { 0 => 'a', 1 => 'c' }); + + let iter = m1.iter(); + let (values1, values2): (Vec<_>, Vec<_>) = (iter.clone().collect(), iter.collect()); + assert_eq!(values1, values2); + } + + #[test] + fn many_entries() { + let numbers = 0..26; + let letters = 'a'..='z'; + + let entries1 = numbers.zip(letters); + let m1 = entries1.clone().collect::>(); + + let numbers = (0..26).rev(); + let letters = ('a'..='z').rev(); + let entries2 = numbers.zip(letters); + let m2 = entries2.clone().collect::>(); + assert_eq!(m1.is_empty(), false); + assert_eq!(m1.len(), 26); + assert_eq!(m2.is_empty(), false); + assert_eq!(m2.len(), 26); + + assert_eq!(m1.clone().into_iter().eq(entries1), true); + assert_eq!(m2.clone().into_iter().eq(entries2), true); + assert_eq!(m1.iter().eq(m2.iter()), false); + assert_eq!(m1.eq(&m1), true); + assert_eq!(m2.eq(&m2), true); + assert_eq!(m1, m2); + + assert_eq!(m1.get(&1), Some(&'b')); + assert_eq!(m1.get(&30), None); + assert_eq!(m2.get(&0), Some(&'a')); + assert_eq!(m2.get(&30), None); + assert_eq!(m2.get_full(&0), Some((25, &0, &'a'))); + assert_eq!(m2.get_full(&25), Some((0, &25, &'z'))); + assert_eq!(m2.get_full(&29), None); + + let not_m1 = { + let mut m = m1.clone(); + m.shift_remove(&1); + m + }; + assert_ne!(m1, not_m1); + + let iter = m1.iter(); + let (values1, values2): (Vec<_>, Vec<_>) = (iter.clone().collect(), iter.collect()); + assert_eq!(values1, values2); + } + + #[test] + fn test_smallmap_macro() { + let map = smallmap![1 => "a", 3 => "b"]; + let mut i = map.into_iter(); + assert_eq!(i.next(), Some((1, "a"))); + assert_eq!(i.next(), Some((3, "b"))); + assert_eq!(i.next(), None); + } + + #[test] + fn test_clone() { + let map = smallmap![1 => "a", 3 => "b"]; + let iter = map.iter(); + let values1: Vec<_> = iter.clone().collect(); + let values2: Vec<_> = iter.collect(); + assert_eq!(vec![(&1, &"a"), (&3, &"b")], values1); + assert_eq!(values1, values2); + + let iter = map.keys(); + let values1: Vec<_> = iter.clone().collect(); + let values2: Vec<_> = iter.collect(); + assert_eq!(vec![&1, &3], values1); + assert_eq!(values1, values2); + + let iter = map.values(); + let values1: Vec<_> = iter.clone().collect(); + let values2: Vec<_> = iter.collect(); + assert_eq!(vec![&"a", &"b"], values1); + assert_eq!(values1, values2); + } + + #[test] + fn test_duplicate_hashes() { + // A type which always gives hash collisions + #[derive(PartialEq, Eq, Debug)] + struct K(i32); + #[allow(clippy::derived_hash_with_manual_eq)] + impl Hash for K { + fn hash(&self, _state: &mut H) {} + } + + let mut map = smallmap![K(1) => "test", K(3) => "more"]; + assert_eq!(map.get(&K(1)), Some(&"test")); + assert_eq!(map.get(&K(2)), None); + assert_eq!(map.get(&K(3)), Some(&"more")); + + assert_eq!(map.insert(K(2), "magic"), None); + assert_eq!(map.get(&K(2)), Some(&"magic")); + + assert_eq!(map.shift_remove(&K(1)), Some("test")); + assert_eq!(map.get(&K(1)), None); + assert_eq!(map.keys().collect::>(), vec![&K(3), &K(2)]); + } + + #[test] + fn test_smallmap_debug() { + let s = format!("{:?}", smallmap![1 => "test", 2 => "more"]); + assert_eq!(s, "{1: \"test\", 2: \"more\"}") + } + + #[test] + fn entry() { + let mut map = SmallMap::new(); + for i in 0..100 { + match map.entry(i) { + Entry::Vacant(e) => { + e.insert(i * 2); + } + Entry::Occupied(..) => panic!(), + } + match map.entry(i) { + Entry::Occupied(..) => {} + Entry::Vacant(..) => panic!(), + } + } + } + + #[test] + fn test_pop_small() { + let mut map = SmallMap::new(); + for i in 0..=5 { + map.insert(i, i * 10); + } + for i in (0..=5).rev() { + assert_eq!((i, i * 10), map.pop().unwrap()); + map.state_check(); + } + assert!(map.is_empty()); + } + + #[test] + fn test_pop_large() { + let mut map = SmallMap::new(); + for i in 0..=500 { + map.insert(i, i * 10); + } + for i in (0..=500).rev() { + assert_eq!((i, i * 10), map.pop().unwrap()); + if i % 100 == 0 { + map.state_check(); + } + } + assert!(map.is_empty()); + } + + #[test] + fn test_first() { + let mut map = SmallMap::new(); + map.insert(1, 10); + assert_eq!(map.first(), Some((&1, &10))); + map.insert(2, 20); + assert_eq!(map.first(), Some((&1, &10))); + map.shift_remove(&1); + assert_eq!(map.first(), Some((&2, &20))); + } + + #[test] + fn test_last() { + let mut map = SmallMap::new(); + map.insert(1, 10); + assert_eq!(map.last(), Some((&1, &10))); + map.insert(2, 20); + assert_eq!(map.last(), Some((&2, &20))); + map.insert(1, 100); + assert_eq!(map.last(), Some((&2, &20))); + } + + #[test] + fn test_sort_keys_no_index() { + let mut map = SmallMap::new(); + map.insert(2, 20); + map.insert(1, 10); + map.insert(3, 30); + map.sort_keys(); + assert_eq!( + vec![(&1, &10), (&2, &20), (&3, &30)], + map.iter().collect::>() + ); + assert_eq!(&10, map.get(&1).unwrap()); + assert_eq!(&20, map.get(&2).unwrap()); + assert_eq!(&30, map.get(&3).unwrap()); + } + + #[test] + fn test_sort_keys_with_index() { + let mut map = SmallMap::new(); + for i in 1..=100 { + map.insert(i, i * 10); + } + map.sort_keys(); + assert_eq!( + (1..=100).map(|i| (i, i * 10)).collect::>(), + map.iter().map(|(k, v)| (*k, *v)).collect::>() + ); + for i in 1..=100 { + assert_eq!(i * 10, *map.get(&i).unwrap()); + } + } + + #[test] + fn test_sort_keys_updates_index_on_panic() { + #[derive(Hash, PartialEq, Eq, Debug)] + struct Key(u32); + + impl PartialOrd for Key { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for Key { + fn cmp(&self, other: &Self) -> Ordering { + if self.0 < 10 && other.0 < 10 { + panic!("panic in Ord::cmp") + } + self.0.cmp(&other.0) + } + } + + let mut map = SmallMap::new(); + for i in (1..=100).rev() { + map.insert(Key(i), i * 10); + } + catch_unwind(AssertUnwindSafe(|| map.sort_keys())).unwrap_err(); + // If index is not updated on panic, the following assertion will fail. + map.assert_invariants(); + } + + #[test] + fn test_eq_ordered() { + let m0 = SmallMap::from_iter([(1, 2), (3, 4)]); + let m1 = SmallMap::from_iter([(1, 2), (3, 4)]); + let m2 = SmallMap::from_iter([(3, 4), (1, 2)]); + let m3 = SmallMap::from_iter([(3, 4)]); + assert!(m0.eq_ordered(&m0)); + assert!(m0.eq_ordered(&m1)); + assert!(!m0.eq_ordered(&m2)); + assert!(!m0.eq_ordered(&m3)); + } + + #[test] + fn test_shift_remove() { + // Large enough so the index is used. + let mut m = (0..100).map(|i| (i, i * 10)).collect::>(); + assert_eq!(Some((1, 10)), m.shift_remove_entry(&1)); + assert_eq!(Some(&30), m.get(&3)); + m.assert_invariants(); + } + + #[test] + fn test_shift_remove_last() { + // Large enough so the index is used. + let mut m = (0..100).map(|i| (i, i * 10)).collect::>(); + assert_eq!(Some((99, 990)), m.shift_remove_entry(&99)); + assert_eq!(Some(&980), m.get(&98)); + m.assert_invariants(); + } + + #[test] + fn test_shift_remove_index() { + let mut m = (0..100).map(|i| (i, i * 10)).collect::>(); + m.shift_remove_index(5); + assert_eq!(Some(&40), m.get(&4)); + assert_eq!(None, m.get(&5)); + assert_eq!(Some(&60), m.get(&6)); + m.assert_invariants(); + } + + #[test] + fn test_json() { + let mp = smallmap! {"a".to_owned() => 1, "b".to_owned() => 2}; + let expected = serde_json::json!({ + "a": 1, + "b": 2, + }); + assert_eq!(serde_json::to_value(&mp).unwrap(), expected); + assert_eq!( + serde_json::from_value::>(expected).unwrap(), + mp + ); + } + + #[test] + fn test_reverse_small() { + let mut map = SmallMap::new(); + map.insert("a".to_owned(), "b".to_owned()); + map.insert("c".to_owned(), "d".to_owned()); + map.reverse(); + + assert_eq!(Some("b"), map.get("a").map(|s| s.as_str())); + assert_eq!(Some("d"), map.get("c").map(|s| s.as_str())); + assert_eq!( + vec![ + ("c".to_owned(), "d".to_owned()), + ("a".to_owned(), "b".to_owned()) + ], + map.into_iter().collect::>() + ); + } + + #[test] + fn test_reverse_large() { + let mut map = SmallMap::new(); + for i in 0..100 { + map.insert(i.to_string(), (i * 10).to_string()); + } + + let expected = map + .iter() + .rev() + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>(); + + map.reverse(); + + for i in 0..100 { + assert_eq!(Some(&(i * 10).to_string()), map.get(&i.to_string())); + } + + assert_eq!( + expected, + map.iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>() + ); + } + + #[test] + fn test_retain() { + let mut map = SmallMap::new(); + for i in 0..100 { + map.insert(i.to_string(), i); + } + map.retain(|_, v| { + let res = *v % 2 == 0; + *v += 3; + res + }); + assert_eq!(map.len(), 50); + assert_eq!(map.get("7"), None); + assert_eq!(map.get("8"), Some(&11)); + } +} diff --git a/starlark-rust/starlark_map/src/small_map/mod.rs b/starlark-rust/starlark_map/src/small_map/mod.rs deleted file mode 100644 index a75ceb381cf70..0000000000000 --- a/starlark-rust/starlark_map/src/small_map/mod.rs +++ /dev/null @@ -1,1328 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! A Map with deterministic iteration order that specializes its storage based on the number of -//! entries to optimize memory. This is essentially `IndexMap` with two changes: -//! * no index is created for small maps -//! * short hashes are stored next to keys - -use std::fmt; -use std::fmt::Debug; -use std::hash::Hash; -use std::hash::Hasher; -use std::marker::PhantomData; -use std::mem; - -use allocative::Allocative; -use equivalent::Equivalent; -use hashbrown::raw::RawTable; -use serde::Deserialize; -use serde::Serialize; - -use crate::hashed::Hashed; -pub use crate::small_map::iter::IntoIter; -pub use crate::small_map::iter::IntoIterHashed; -pub use crate::small_map::iter::IntoKeys; -pub use crate::small_map::iter::IntoValues; -pub use crate::small_map::iter::Iter; -pub use crate::small_map::iter::IterHashed; -pub use crate::small_map::iter::IterMut; -pub use crate::small_map::iter::IterMutUnchecked; -pub use crate::small_map::iter::Keys; -pub use crate::small_map::iter::Values; -pub use crate::small_map::iter::ValuesMut; -use crate::vec_map::VecMap; -use crate::StarlarkHashValue; - -mod iter; - -/// Max size of a map when we do not create an index. -/// 32 is the value where `buck2 cquery some-target` is the fastest and consumes the least memory. -/// Note the test was performed for buck2-specific patterns. -/// On nightly we use SIMD to speed up the search, so use 16 on stable to be safe. -#[cfg(rust_nightly)] -const NO_INDEX_THRESHOLD: usize = 32; -#[cfg(not(rust_nightly))] -const NO_INDEX_THRESHOLD: usize = 16; - -/// An memory-efficient key-value map with deterministic order. -/// -/// Provides the standard container operations, modelled most closely on `indexmap::IndexMap`, plus: -/// -/// * Variants which take an already hashed value, e.g. [`get_hashed`](SmallMap::get_hashed). -/// -/// * Functions which work with the position, e.g. [`get_index_of`](SmallMap::get_index_of). -#[repr(C)] -#[derive(Clone, Allocative)] -pub struct SmallMap { - entries: VecMap, - /// Map a key to the index in `entries`. - /// This field is initialized when the size of the map exceeds `NO_INDEX_THRESHOLD`. - index: Option>>, -} - -impl Default for SmallMap { - #[inline] - fn default() -> Self { - Self::new() - } -} - -impl Debug for SmallMap { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_map().entries(self.iter()).finish() - } -} - -impl SmallMap { - /// Empty map. - #[inline] - pub const fn new() -> Self { - Self { - entries: VecMap::new(), - index: None, - } - } - - /// Create an empty map with specified capacity. - #[inline] - pub fn with_capacity(n: usize) -> Self { - if n <= NO_INDEX_THRESHOLD { - SmallMap { - entries: VecMap::with_capacity(n), - index: None, - } - } else { - SmallMap { - entries: VecMap::with_capacity(n), - index: Some(Box::new(RawTable::with_capacity(n))), - } - } - } - - /// Verify that the map is internally consistent. - #[cfg(test)] - fn assert_invariants(&self) - where - K: Eq, - { - if let Some(index) = &self.index { - assert_eq!(index.len(), self.entries.len()); - for (i, (k, _)) in self.entries.iter_hashed().enumerate() { - let j = *index - .get(k.hash().promote(), |j| { - &self.entries.get_index(*j).unwrap().0 == k.key() - }) - .unwrap(); - assert_eq!(i, j); - } - } else { - assert!(self.entries.len() <= NO_INDEX_THRESHOLD); - } - } - - /// Drop the index if the map is too small, and the index is not really needed. - /// - /// We don't allocate index prematurely when we add entries the map, - /// but we keep it allocated when we remove entries from the map. - /// - /// This function allows to reclaim memory after some entries are removed. - pub fn maybe_drop_index(&mut self) { - if self.entries.len() <= NO_INDEX_THRESHOLD { - self.index = None; - } - } - - /// Key references iterator. - #[inline] - pub fn keys(&self) -> Keys { - Keys { - iter: self.entries.keys(), - } - } - - /// Value references iterator. - #[inline] - pub fn values(&self) -> Values { - Values { - iter: self.entries.values(), - } - } - - /// Key owned iterator. - #[inline] - pub fn into_keys(self) -> IntoKeys { - IntoKeys { - iter: self.entries.into_iter(), - } - } - - /// Value owned iterator. - #[inline] - pub fn into_values(self) -> IntoValues { - IntoValues { - iter: self.entries.into_iter(), - } - } - - /// Mutable value references iterator. - #[inline] - pub fn values_mut(&mut self) -> ValuesMut { - ValuesMut { - iter: self.entries.values_mut(), - } - } - - /// Entry references iterator. - #[inline] - pub fn iter(&self) -> Iter<'_, K, V> { - Iter { - iter: self.entries.iter(), - } - } - - /// Entry references with hashes iterator. - #[inline] - pub fn iter_hashed(&self) -> IterHashed { - IterHashed { - iter: self.entries.iter_hashed(), - } - } - - /// Entries with hashes iterator. - #[inline] - pub fn into_iter_hashed(self) -> IntoIterHashed { - IntoIterHashed { - iter: self.entries.into_iter_hashed(), - } - } - - /// Mutable entry references iterator. - #[inline] - pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - IterMut { - iter: self.entries.iter_mut(), - } - } - - /// Mutable entry references iterator, with mutable key references. - /// - /// This operation is memory safe, but otherwise no guarantees - /// if keys are mutated inconsistently (hash or equality changes). - #[inline] - pub fn iter_mut_unchecked(&mut self) -> IterMutUnchecked<'_, K, V> { - IterMutUnchecked { - iter: self.entries.iter_mut_unchecked(), - } - } - - /// Entries iterator. - #[inline] - fn into_iter(self) -> IntoIter { - IntoIter { - iter: self.entries.into_iter(), - } - } - - /// Query the map by a prehashed key. - #[inline] - pub fn get_hashed(&self, key: Hashed<&Q>) -> Option<&V> - where - Q: Equivalent + ?Sized, - { - self.get_index_of_hashed(key) - .map(|index| unsafe { self.entries.get_unchecked(index).1 }) - } - - /// Same as `get_hashed`, byt takes key by value instead of by reference. - /// Sometimes it generates slightly better code for small values. - #[inline] - pub fn get_hashed_by_value(&self, key: Hashed) -> Option<&V> - where - Q: Equivalent, - { - self.get_index_of_hashed_by_value(key) - .map(|index| unsafe { self.entries.get_unchecked(index).1 }) - } - - /// Query the map by a given key. - #[inline] - pub fn get(&self, key: &Q) -> Option<&V> - where - Q: Hash + Equivalent + ?Sized, - { - self.get_hashed(Hashed::new(key)) - } - - /// Query the map by a given key, return an index of the entry - /// along with the entry key and value. - #[inline] - pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> - where - Q: Hash + Equivalent + ?Sized, - { - self.get_full_hashed(Hashed::new(key)) - } - - /// Query the map by a given key, return an index of the entry - /// along with the entry key and value. - #[inline] - pub fn get_full_hashed(&self, key: Hashed<&Q>) -> Option<(usize, &K, &V)> - where - Q: Equivalent + ?Sized, - { - self.get_index_of_hashed(key).map(|index| { - let (key, value) = unsafe { self.entries.get_unchecked(index) }; - (index, *key.key(), value) - }) - } - - #[inline] - fn get_index_of_hashed_raw_with_index( - &self, - hash: StarlarkHashValue, - mut eq: impl FnMut(&K) -> bool, - index: &RawTable, - ) -> Option { - index - .get(hash.promote(), |&index| unsafe { - eq(self.entries.get_unchecked(index).0.key()) - }) - .copied() - } - - #[inline] - pub(crate) fn get_index_of_hashed_raw( - &self, - hash: StarlarkHashValue, - eq: impl FnMut(&K) -> bool, - ) -> Option { - match &self.index { - None => self.entries.get_index_of_hashed_raw(hash, eq), - Some(index) => self.get_index_of_hashed_raw_with_index(hash, eq, index), - } - } - - /// Find the index of the given hashed key. - #[inline] - pub fn get_index_of_hashed(&self, key: Hashed<&Q>) -> Option - where - Q: Equivalent + ?Sized, - { - self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) - } - - /// Get the index of the entry given a hashed key. - #[inline] - pub fn get_index_of_hashed_by_value(&self, key: Hashed) -> Option - where - Q: Equivalent, - { - self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) - } - - /// Find an entry by an index. - #[inline] - pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { - self.entries.get_index(index) - } - - /// The an entry index by a given key. - #[inline] - pub fn get_index_of(&self, key: &Q) -> Option - where - Q: Hash + Equivalent + ?Sized, - { - self.get_index_of_hashed(Hashed::new(key)) - } - - /// Find a mutable value by a hashed key. - #[inline] - pub fn get_mut_hashed(&mut self, key: Hashed<&Q>) -> Option<&mut V> - where - Q: Equivalent + ?Sized, - { - let i = self.get_index_of_hashed(key)?; - debug_assert!(i < self.entries.len()); - Some(unsafe { self.entries.get_unchecked_mut(i).1 }) - } - - /// Find the entry by a given key. - #[inline] - pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> - where - Q: Hash + Equivalent + ?Sized, - { - self.get_mut_hashed(Hashed::new(key)) - } - - /// Find if an entry by a given prehashed key exists. - #[inline] - pub fn contains_key_hashed(&self, key: Hashed<&Q>) -> bool - where - Q: Equivalent + ?Sized, - { - self.get_index_of_hashed(key).is_some() - } - - /// Find if an entry by a given hashed key exists. - #[inline] - pub fn contains_key_hashed_by_value(&self, key: Hashed) -> bool - where - Q: Equivalent, - { - self.get_index_of_hashed_by_value(key).is_some() - } - - /// Find if an entry by a given key exists. - #[inline] - pub fn contains_key(&self, key: &Q) -> bool - where - Q: Hash + Equivalent + ?Sized, - { - self.contains_key_hashed(Hashed::new(key)) - } - - /// Reserve capacity for at least `additional` more elements to be inserted. - #[inline] - pub fn reserve(&mut self, additional: usize) - where - K: Eq, - { - self.entries.reserve(additional); - if let Some(index) = &mut self.index { - index.reserve(additional, Self::hasher(&self.entries)); - } else if self.len() + additional > NO_INDEX_THRESHOLD { - self.create_index(self.len() + additional); - } - } - - /// Current map capacity. - #[inline] - pub fn capacity(&self) -> usize { - self.entries.capacity() - } - - /// Returns a reference to the first key-value pair. - pub fn first(&self) -> Option<(&K, &V)> { - self.iter().next() - } - - /// Returns a reference to the last key-value pair. - pub fn last(&self) -> Option<(&K, &V)> { - self.iter().next_back() - } - - #[cold] - fn create_index(&mut self, capacity: usize) { - debug_assert!(self.index.is_none()); - debug_assert!(capacity >= self.entries.len()); - let mut index = RawTable::with_capacity(capacity); - for (i, (k, _)) in self.entries.iter_hashed().enumerate() { - // SAFETY: capacity >= self.entries.len() - unsafe { index.insert_no_grow(k.hash().promote(), i) }; - } - self.index = Some(Box::new(index)); - } - - /// Hasher for index resize. - #[inline(always)] - fn hasher(entries: &VecMap) -> impl Fn(&usize) -> u64 + '_ { - move |&index| { - debug_assert!(index < entries.len()); - unsafe { entries.get_unchecked(index).0.hash().promote() } - } - } - - /// Insert an entry into the map without checking for a duplicate key. - #[inline] - pub fn insert_hashed_unique_unchecked(&mut self, key: Hashed, val: V) -> (&K, &mut V) { - let hash = key.hash(); - let entry_index = self.entries.len(); - self.entries.insert_hashed_unique_unchecked(key, val); - if let Some(index) = &mut self.index { - index.insert(hash.promote(), entry_index, Self::hasher(&self.entries)); - } else if self.entries.len() == NO_INDEX_THRESHOLD + 1 { - self.create_index(self.entries.len()); - } else { - debug_assert!(self.entries.len() < NO_INDEX_THRESHOLD + 1); - } - // SAFETY: We've just inserted an entry, so we know entries is not empty. - unsafe { - let (key, value) = self.entries.get_unchecked_mut(self.entries.len() - 1); - (key.key(), value) - } - } - - /// Insert a key-value pair into the map. - #[inline] - pub fn insert_hashed(&mut self, key: Hashed, val: V) -> Option - where - K: Eq, - { - match self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) { - None => { - self.insert_hashed_unique_unchecked(key, val); - None - } - Some(i) => unsafe { - debug_assert!(i < self.entries.len()); - Some(mem::replace(self.entries.get_unchecked_mut(i).1, val)) - }, - } - } - - /// Insert a key-value pair into the map. - #[inline] - pub fn insert(&mut self, key: K, val: V) -> Option - where - K: Hash + Eq, - { - self.insert_hashed(Hashed::new(key), val) - } - - /// Insert a key-value pair into the map without checking for a duplicate key. - #[inline] - pub fn insert_unique_unchecked(&mut self, key: K, val: V) -> (&K, &mut V) - where - K: Hash, - { - self.insert_hashed_unique_unchecked(Hashed::new(key), val) - } - - /// Remove the entry for the key. - /// - /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. - pub fn remove_hashed(&mut self, key: Hashed<&Q>) -> Option - where - Q: ?Sized + Equivalent, - { - self.remove_hashed_entry(key).map(|(_k, v)| v) - } - - /// Remove the entry for the key. - /// - /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. - pub fn remove_hashed_entry(&mut self, key: Hashed<&Q>) -> Option<(K, V)> - where - Q: ?Sized + Equivalent, - { - let hash = key.hash(); - if let Some(index) = &mut self.index { - let entries = &self.entries; - let i = index.remove_entry(hash.promote(), |&i| unsafe { - key.key().equivalent(entries.get_unchecked(i).0.key()) - })?; - unsafe { - // No need to update the index when the last entry is removed. - if i != self.entries.len() - 1 { - for bucket in index.iter() { - debug_assert!(*bucket.as_ref() != i); - if *bucket.as_mut() > i { - *bucket.as_mut() -= 1; - } - } - } - } - let (key, value) = self.entries.remove(i); - Some((key.into_key(), value)) - } else { - self.entries.remove_hashed_entry(key) - } - } - - /// Remove the entry for the key. - /// - /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. - pub fn remove(&mut self, key: &Q) -> Option - where - Q: ?Sized + Hash + Equivalent, - { - self.remove_hashed(Hashed::new(key)) - } - - /// Remove the entry for the key. - /// - /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the map. - pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> - where - Q: ?Sized + Hash + Equivalent, - { - self.remove_hashed_entry(Hashed::new(key)) - } - - /// Get the entry (occupied or not) for the key. - #[inline] - pub fn entry_hashed(&mut self, key: Hashed) -> Entry<'_, K, V> - where - K: Eq, - { - match self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) { - Some(i) => { - let (key, value) = unsafe { self.entries.get_unchecked_mut(i) }; - Entry::Occupied(OccupiedEntry { - key: key.key(), - value, - }) - } - None => Entry::Vacant(VacantEntry { key, map: self }), - } - } - - /// Remove the last element. - pub fn pop(&mut self) -> Option<(K, V)> { - match self.entries.pop() { - None => None, - Some((key, value)) => { - if let Some(index) = &mut self.index { - let removed = - index.remove_entry(key.hash().promote(), |&i| i == self.entries.len()); - debug_assert!(removed.unwrap() == self.entries.len()); - } - Some((key.into_key(), value)) - } - } - } - - /// Get the entry (occupied or not) for the key. - #[inline] - pub fn entry(&mut self, key: K) -> Entry<'_, K, V> - where - K: Eq + Hash, - { - self.entry_hashed(Hashed::new(key)) - } - - /// Is the map empty? - #[inline] - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - /// Get the number of elements in the map. - #[inline] - pub fn len(&self) -> usize { - self.entries.len() - } - - /// Remove all elements from the map. - /// - /// Retain the capacity. - #[inline] - pub fn clear(&mut self) { - self.entries.clear(); - if let Some(index) = &mut self.index { - // Note we are keeping the `index` object initialized here. - // So next insert will have to update the index. - // Which is probably suboptimal (hard to say), - // but `clear` is rare operation anyway. - index.clear(); - } - } - - /// Basic check the map invariants are hold. - #[cfg(test)] - fn state_check(&self) { - if let Some(index) = &self.index { - assert_eq!(self.entries.len(), index.len()); - let mut set_fields = vec![false; self.entries.len()]; - unsafe { - for bucket in index.iter() { - let i = *bucket.as_ref(); - let prev = mem::replace(&mut set_fields[i], true); - assert!(!prev); - } - } - } else { - assert!(self.entries.len() <= NO_INDEX_THRESHOLD); - } - } - - fn is_sorted_by_key(&self) -> bool - where - K: Ord, - { - self.entries.is_sorted_by_key() - } - - /// Sort entries by key. - pub fn sort_keys(&mut self) - where - K: Ord, - { - // Check if sorted first, otherwise we may need to rebuild the index - // even if the map is already sorted. - if self.is_sorted_by_key() { - return; - } - - // Rebuild index on drop to make this code panic-safe. - struct RebuildIndexOnDrop<'a, K, V> { - map: &'a mut SmallMap, - } - - impl<'a, K, V> Drop for RebuildIndexOnDrop<'a, K, V> { - fn drop(&mut self) { - if let Some(index) = &mut self.map.index { - index.clear(); - for (i, (k, _)) in self.map.entries.iter_hashed().enumerate() { - // SAFETY: capacity >= self.entries.len() - unsafe { index.insert_no_grow(k.hash().promote(), i) }; - } - } - } - } - - let map = RebuildIndexOnDrop { map: self }; - map.map.entries.sort_keys(); - } - - /// Equal if the keys and values are equal in the iteration order. - pub fn eq_ordered(&self, other: &Self) -> bool - where - K: PartialEq, - V: PartialEq, - { - self.entries.eq_ordered(&other.entries) - } - - /// Hash entries in the iteration order. - /// - /// Note, keys are not hashed, but previously computed hashes are hashed instead. - pub fn hash_ordered(&self, state: &mut H) - where - K: Hash, - V: Hash, - { - self.entries.hash_ordered(state) - } -} - -/// Reference to the actual entry in the map. -pub struct OccupiedEntry<'a, K, V> { - /// Pointer to the key in the map. - key: &'a K, - /// Pointer to the value in the map. - value: &'a mut V, -} - -/// Reference to a vacant entry in the map. -/// -/// This can be used to insert an entry into the map. -pub struct VacantEntry<'a, K, V> { - key: Hashed, - map: &'a mut SmallMap, -} - -/// Occupied or vacant entry. -pub enum Entry<'a, K, V> { - /// Occupied entry. - Occupied(OccupiedEntry<'a, K, V>), - /// No entry for given key. - Vacant(VacantEntry<'a, K, V>), -} - -impl<'a, K, V> OccupiedEntry<'a, K, V> { - /// Key for this entry. - #[inline] - pub fn key(&self) -> &K { - self.key - } - - /// Value for this entry. - #[inline] - pub fn get(&self) -> &V { - self.value - } - - /// Mutable reference to the value in the entry. - #[inline] - pub fn get_mut(&mut self) -> &mut V { - self.value - } - - /// Get a reference to the value in the entry with map lifetime. - #[inline] - pub fn into_mut(self) -> &'a mut V { - self.value - } - - #[inline] - pub(crate) fn into_mut_entry(self) -> (&'a K, &'a mut V) { - (self.key, self.value) - } -} - -impl<'a, K, V> VacantEntry<'a, K, V> -where - K: Eq, -{ - /// Key for this entry. - #[inline] - pub fn key(&self) -> &K { - self.key.key() - } - - /// Insert the value into the entry. - #[inline] - pub fn insert(self, value: V) -> &'a mut V { - self.insert_entry(value).1 - } - - #[inline] - pub(crate) fn insert_entry(self, value: V) -> (&'a K, &'a mut V) { - self.map.insert_hashed_unique_unchecked(self.key, value) - } -} - -impl<'a, K, V> Entry<'a, K, V> -where - K: Eq, -{ - /// Key for this entry. - #[inline] - pub fn key(&self) -> &K { - match self { - Entry::Occupied(e) => e.key(), - Entry::Vacant(e) => e.key(), - } - } - - /// Insert if vacant. - #[inline] - pub fn or_insert(self, default: V) -> &'a mut V { - self.or_insert_with(|| default) - } - - /// Insert if vacant. - #[inline] - pub fn or_insert_with(self, default: impl FnOnce() -> V) -> &'a mut V { - self.or_insert_entry_with(default).1 - } - - /// Insert if vacant. - #[inline] - pub fn or_default(self) -> &'a mut V - where - V: Default, - { - self.or_insert_with(V::default) - } - - #[inline] - pub(crate) fn or_insert_entry_with(self, default: impl FnOnce() -> V) -> (&'a K, &'a mut V) { - match self { - Entry::Occupied(e) => e.into_mut_entry(), - Entry::Vacant(e) => e.insert_entry(default()), - } - } -} - -impl FromIterator<(K, V)> for SmallMap -where - K: Hash + Eq, -{ - fn from_iter>(iter: I) -> Self { - let iter = iter.into_iter(); - let mut mp = Self::with_capacity(iter.size_hint().0); - for (k, v) in iter { - mp.insert(k, v); - } - mp - } -} - -impl FromIterator<(Hashed, V)> for SmallMap -where - K: Eq, -{ - fn from_iter, V)>>(iter: I) -> Self { - let iter = iter.into_iter(); - let mut mp = Self::with_capacity(iter.size_hint().0); - for (k, v) in iter { - mp.insert_hashed(k, v); - } - mp - } -} - -impl IntoIterator for SmallMap { - type Item = (K, V); - type IntoIter = IntoIter; - - #[inline] - fn into_iter(self) -> Self::IntoIter { - self.into_iter() - } -} - -impl<'a, K, V> IntoIterator for &'a SmallMap { - type Item = (&'a K, &'a V); - type IntoIter = Iter<'a, K, V>; - - #[inline] - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, K, V> IntoIterator for &'a mut SmallMap { - type Item = (&'a K, &'a mut V); - type IntoIter = IterMut<'a, K, V>; - - #[inline] - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl PartialEq for SmallMap { - fn eq(&self, other: &Self) -> bool { - self.len() == other.len() - && self - .iter_hashed() - .all(|(k, v)| other.get_hashed(k) == Some(v)) - } -} - -impl Eq for SmallMap {} - -impl Extend<(K, V)> for SmallMap -where - K: Hash + Eq, -{ - fn extend>(&mut self, iter: T) { - for (k, v) in iter { - self.insert(k, v); - } - } -} - -/// Create a [`SmallMap`](SmallMap) from a list of key-value pairs. -/// -/// ## Example -/// -/// ``` -/// use starlark_map::smallmap; -/// -/// let map = smallmap!{ -/// "a" => 1, -/// "b" => 2, -/// }; -/// assert_eq!(map.get("a"), Some(&1)); -/// assert_eq!(map.get("b"), Some(&2)); -/// assert_eq!(map.get("c"), None); -/// ``` -#[macro_export] -macro_rules! smallmap { - (@single $($x:tt)*) => (()); - (@count $($rest:expr),*) => (<[()]>::len(&[$(smallmap!(@single $rest)),*])); - - ($($key:expr => $value:expr,)+) => { smallmap!($($key => $value),+) }; - ($($key:expr => $value:expr),*) => { - { - let cap = smallmap!(@count $($key),*); - #[allow(unused_mut)] - let mut map = $crate::small_map::SmallMap::with_capacity(cap); - $( - map.insert($key, $value); - )* - map - } - }; -} - -impl Serialize for SmallMap { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.collect_map(self.iter()) - } -} - -impl<'de, K, V> Deserialize<'de> for SmallMap -where - K: Deserialize<'de> + Hash + Eq, - V: Deserialize<'de>, -{ - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct MapVisitor { - marker: PhantomData>, - } - - impl<'de, K, V> serde::de::Visitor<'de> for MapVisitor - where - K: Deserialize<'de> + Hash + Eq, - V: Deserialize<'de>, - { - type Value = SmallMap; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a map") - } - - #[inline] - fn visit_map(self, mut map: A) -> Result - where - A: serde::de::MapAccess<'de>, - { - let mut values = SmallMap::with_capacity(map.size_hint().unwrap_or(0)); - while let Some((key, value)) = map.next_entry()? { - values.insert(key, value); - } - Ok(values) - } - } - - let visitor = MapVisitor { - marker: PhantomData, - }; - deserializer.deserialize_map(visitor) - } -} - -#[cfg(test)] -mod tests { - use std::cmp::Ordering; - use std::panic::catch_unwind; - use std::panic::AssertUnwindSafe; - - use super::*; - - #[test] - fn empty_map() { - let m = SmallMap::::new(); - assert_eq!(m.is_empty(), true); - assert_eq!(m.len(), 0); - assert_eq!(m.iter().next(), None); - } - - #[test] - fn few_entries() { - let entries1 = [(0, 'a'), (1, 'b')]; - let m1 = entries1.iter().copied().collect::>(); - - let entries2 = [(1, 'b'), (0, 'a')]; - let m2 = entries2.iter().copied().collect::>(); - assert_eq!(m1.is_empty(), false); - assert_eq!(m1.len(), 2); - assert_eq!(m2.is_empty(), false); - assert_eq!(m2.len(), 2); - - assert_eq!(m1.iter().eq(entries1.iter().map(|(k, v)| (k, v))), true); - assert_eq!(m2.iter().eq(entries2.iter().map(|(k, v)| (k, v))), true); - assert_eq!(m1.iter().eq(m2.iter()), false); - assert_eq!(m1.eq(&m1), true); - assert_eq!(m2.eq(&m2), true); - assert_eq!(m1, m2); - - assert_eq!(m1.get(&0), Some(&'a')); - assert_eq!(m1.get(&3), None); - assert_eq!(m2.get(&1), Some(&'b')); - assert_eq!(m2.get(&3), None); - - assert_eq!(m1.get_index(0), Some((&0, &'a'))); - assert_eq!(m1.get_index(1), Some((&1, &'b'))); - assert_eq!(m1.get_index(2), None); - - assert_ne!(m1, smallmap! { 0 => 'a', 1 => 'c' }); - - let iter = m1.iter(); - let (values1, values2): (Vec<_>, Vec<_>) = (iter.clone().collect(), iter.collect()); - assert_eq!(values1, values2); - } - - #[test] - fn many_entries() { - let numbers = 0..26; - let letters = 'a'..='z'; - - let entries1 = numbers.zip(letters); - let m1 = entries1.clone().collect::>(); - - let numbers = (0..26).rev(); - let letters = ('a'..='z').rev(); - let entries2 = numbers.zip(letters); - let m2 = entries2.clone().collect::>(); - assert_eq!(m1.is_empty(), false); - assert_eq!(m1.len(), 26); - assert_eq!(m2.is_empty(), false); - assert_eq!(m2.len(), 26); - - assert_eq!(m1.clone().into_iter().eq(entries1), true); - assert_eq!(m2.clone().into_iter().eq(entries2), true); - assert_eq!(m1.iter().eq(m2.iter()), false); - assert_eq!(m1.eq(&m1), true); - assert_eq!(m2.eq(&m2), true); - assert_eq!(m1, m2); - - assert_eq!(m1.get(&1), Some(&'b')); - assert_eq!(m1.get(&30), None); - assert_eq!(m2.get(&0), Some(&'a')); - assert_eq!(m2.get(&30), None); - assert_eq!(m2.get_full(&0), Some((25, &0, &'a'))); - assert_eq!(m2.get_full(&25), Some((0, &25, &'z'))); - assert_eq!(m2.get_full(&29), None); - - let not_m1 = { - let mut m = m1.clone(); - m.remove(&1); - m - }; - assert_ne!(m1, not_m1); - - let iter = m1.iter(); - let (values1, values2): (Vec<_>, Vec<_>) = (iter.clone().collect(), iter.collect()); - assert_eq!(values1, values2); - } - - #[test] - fn test_smallmap_macro() { - let map = smallmap![1 => "a", 3 => "b"]; - let mut i = map.into_iter(); - assert_eq!(i.next(), Some((1, "a"))); - assert_eq!(i.next(), Some((3, "b"))); - assert_eq!(i.next(), None); - } - - #[test] - fn test_clone() { - let map = smallmap![1 => "a", 3 => "b"]; - let iter = map.iter(); - let values1: Vec<_> = iter.clone().collect(); - let values2: Vec<_> = iter.collect(); - assert_eq!(vec![(&1, &"a"), (&3, &"b")], values1); - assert_eq!(values1, values2); - - let iter = map.keys(); - let values1: Vec<_> = iter.clone().collect(); - let values2: Vec<_> = iter.collect(); - assert_eq!(vec![&1, &3], values1); - assert_eq!(values1, values2); - - let iter = map.values(); - let values1: Vec<_> = iter.clone().collect(); - let values2: Vec<_> = iter.collect(); - assert_eq!(vec![&"a", &"b"], values1); - assert_eq!(values1, values2); - } - - #[test] - fn test_duplicate_hashes() { - // A type which always gives hash collisions - #[derive(PartialEq, Eq, Debug)] - struct K(i32); - #[allow(clippy::derived_hash_with_manual_eq)] - impl Hash for K { - fn hash(&self, _state: &mut H) {} - } - - let mut map = smallmap![K(1) => "test", K(3) => "more"]; - assert_eq!(map.get(&K(1)), Some(&"test")); - assert_eq!(map.get(&K(2)), None); - assert_eq!(map.get(&K(3)), Some(&"more")); - - assert_eq!(map.insert(K(2), "magic"), None); - assert_eq!(map.get(&K(2)), Some(&"magic")); - - assert_eq!(map.remove(&K(1)), Some("test")); - assert_eq!(map.get(&K(1)), None); - assert_eq!(map.keys().collect::>(), vec![&K(3), &K(2)]); - } - - #[test] - fn test_smallmap_debug() { - let s = format!("{:?}", smallmap![1 => "test", 2 => "more"]); - assert_eq!(s, "{1: \"test\", 2: \"more\"}") - } - - #[test] - fn entry() { - let mut map = SmallMap::new(); - for i in 0..100 { - match map.entry(i) { - Entry::Vacant(e) => { - e.insert(i * 2); - } - Entry::Occupied(..) => panic!(), - } - match map.entry(i) { - Entry::Occupied(..) => {} - Entry::Vacant(..) => panic!(), - } - } - } - - #[test] - fn test_pop_small() { - let mut map = SmallMap::new(); - for i in 0..=5 { - map.insert(i, i * 10); - } - for i in (0..=5).rev() { - assert_eq!((i, i * 10), map.pop().unwrap()); - map.state_check(); - } - assert!(map.is_empty()); - } - - #[test] - fn test_pop_large() { - let mut map = SmallMap::new(); - for i in 0..=500 { - map.insert(i, i * 10); - } - for i in (0..=500).rev() { - assert_eq!((i, i * 10), map.pop().unwrap()); - if i % 100 == 0 { - map.state_check(); - } - } - assert!(map.is_empty()); - } - - #[test] - fn test_first() { - let mut map = SmallMap::new(); - map.insert(1, 10); - assert_eq!(map.first(), Some((&1, &10))); - map.insert(2, 20); - assert_eq!(map.first(), Some((&1, &10))); - map.remove(&1); - assert_eq!(map.first(), Some((&2, &20))); - } - - #[test] - fn test_last() { - let mut map = SmallMap::new(); - map.insert(1, 10); - assert_eq!(map.last(), Some((&1, &10))); - map.insert(2, 20); - assert_eq!(map.last(), Some((&2, &20))); - map.insert(1, 100); - assert_eq!(map.last(), Some((&2, &20))); - } - - #[test] - fn test_sort_keys_no_index() { - let mut map = SmallMap::new(); - map.insert(2, 20); - map.insert(1, 10); - map.insert(3, 30); - map.sort_keys(); - assert_eq!( - vec![(&1, &10), (&2, &20), (&3, &30)], - map.iter().collect::>() - ); - assert_eq!(&10, map.get(&1).unwrap()); - assert_eq!(&20, map.get(&2).unwrap()); - assert_eq!(&30, map.get(&3).unwrap()); - } - - #[test] - fn test_sort_keys_with_index() { - let mut map = SmallMap::new(); - for i in 1..=100 { - map.insert(i, i * 10); - } - map.sort_keys(); - assert_eq!( - (1..=100).map(|i| (i, i * 10)).collect::>(), - map.iter().map(|(k, v)| (*k, *v)).collect::>() - ); - for i in 1..=100 { - assert_eq!(i * 10, *map.get(&i).unwrap()); - } - } - - #[test] - fn test_sort_keys_updates_index_on_panic() { - #[derive(Hash, PartialEq, Eq, Debug)] - struct Key(u32); - - impl PartialOrd for Key { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } - } - - impl Ord for Key { - fn cmp(&self, other: &Self) -> Ordering { - if self.0 < 10 && other.0 < 10 { - panic!("panic in Ord::cmp") - } - self.0.cmp(&other.0) - } - } - - let mut map = SmallMap::new(); - for i in (1..=100).rev() { - map.insert(Key(i), i * 10); - } - catch_unwind(AssertUnwindSafe(|| map.sort_keys())).unwrap_err(); - // If index is not updated on panic, the following assertion will fail. - map.assert_invariants(); - } - - #[test] - fn test_eq_ordered() { - let m0 = SmallMap::from_iter([(1, 2), (3, 4)]); - let m1 = SmallMap::from_iter([(1, 2), (3, 4)]); - let m2 = SmallMap::from_iter([(3, 4), (1, 2)]); - let m3 = SmallMap::from_iter([(3, 4)]); - assert!(m0.eq_ordered(&m0)); - assert!(m0.eq_ordered(&m1)); - assert!(!m0.eq_ordered(&m2)); - assert!(!m0.eq_ordered(&m3)); - } - - #[test] - fn test_remove() { - // Large enough so the index is used. - let mut m = (0..100).map(|i| (i, i * 10)).collect::>(); - assert_eq!(Some((1, 10)), m.remove_entry(&1)); - assert_eq!(Some(&30), m.get(&3)); - m.assert_invariants(); - } - - #[test] - fn test_remove_last() { - // Large enough so the index is used. - let mut m = (0..100).map(|i| (i, i * 10)).collect::>(); - assert_eq!(Some((99, 990)), m.remove_entry(&99)); - assert_eq!(Some(&980), m.get(&98)); - m.assert_invariants(); - } - - #[test] - fn test_json() { - let mp = smallmap! {"a".to_owned() => 1, "b".to_owned() => 2}; - let expected = serde_json::json!({ - "a": 1, - "b": 2, - }); - assert_eq!(serde_json::to_value(&mp).unwrap(), expected); - assert_eq!( - serde_json::from_value::>(expected).unwrap(), - mp - ); - } -} diff --git a/starlark-rust/starlark_map/src/small_set.rs b/starlark-rust/starlark_map/src/small_set.rs new file mode 100644 index 0000000000000..9406e2b197157 --- /dev/null +++ b/starlark-rust/starlark_map/src/small_set.rs @@ -0,0 +1,791 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Ordered set. + +mod iter; + +use std::fmt; +use std::fmt::Debug; +use std::hash::Hash; +use std::hash::Hasher; +use std::marker::PhantomData; + +use allocative::Allocative; +use equivalent::Equivalent; +use serde::Deserialize; +use serde::Serialize; + +use crate::hashed::Hashed; +use crate::small_map::SmallMap; +pub use crate::small_set::iter::IntoIter; +pub use crate::small_set::iter::IntoIterHashed; +pub use crate::small_set::iter::Iter; +pub use crate::small_set::iter::IterHashed; +pub use crate::small_set::iter::IterMutUnchecked; + +/// An memory-efficient set with deterministic order, based on [`SmallMap`]. +#[derive(Clone, Allocative)] +pub struct SmallSet(SmallMap); + +impl Default for SmallSet { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl Debug for SmallSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +impl Eq for SmallSet where T: Eq {} + +impl PartialEq for SmallSet +where + T: Eq, +{ + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl FromIterator for SmallSet +where + T: Hash + Eq, +{ + fn from_iter>(iter: I) -> Self { + let iter = iter.into_iter(); + let mut smallset = Self::with_capacity(iter.size_hint().0); + for t in iter { + smallset.insert(t); + } + smallset + } +} + +impl SmallSet { + /// Creates an empty `SmallSet`. + #[inline] + pub const fn new() -> Self { + SmallSet(SmallMap::new()) + } + + /// Empty small set with preallocated capacity. + #[inline] + pub fn with_capacity(n: usize) -> Self { + Self(SmallMap::with_capacity(n)) + } + + /// Current capacity of the set. + #[inline] + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// Iterate the element references. + #[inline] + pub fn iter(&self) -> Iter { + self.into_iter() + } + + /// Iterate the hashed element references. + #[inline] + pub fn iter_hashed(&self) -> IterHashed { + IterHashed { + iter: self.0.iter_hashed(), + } + } + + /// Iterate the mutable element references. + /// + /// This operation is memory safe, but otherwise no guarantees + /// if keys are mutated inconsistently (hash or equality changes). + #[inline] + pub fn iter_mut_unchecked(&mut self) -> IterMutUnchecked { + IterMutUnchecked { + iter: self.0.iter_mut_unchecked(), + } + } + + /// Into hashed entries. + #[inline] + pub fn into_iter_hashed(self) -> IntoIterHashed { + IntoIterHashed { + iter: self.0.into_iter_hashed(), + } + } + + /// Insert the element into the set. + /// + /// Return `true` iff the element was inserted. + #[inline] + pub fn insert(&mut self, key: T) -> bool + where + T: Hash + Eq, + { + self.0.insert(key, ()).is_none() + } + + /// Insert the element into the set without checking for a duplicate entry. + #[inline] + pub fn insert_unique_unchecked(&mut self, key: T) + where + T: Hash, + { + self.0.insert_unique_unchecked(key, ()); + } + + /// Insert the element into the set. + /// + /// Return `true` iff the element was inserted. + #[inline] + pub fn insert_hashed(&mut self, key: Hashed) -> bool + where + T: Eq, + { + self.0.insert_hashed(key, ()).is_none() + } + + /// Insert an entry into the set without checking for a duplicate key. + #[inline] + pub fn insert_hashed_unique_unchecked(&mut self, key: Hashed) { + self.0.insert_hashed_unique_unchecked(key, ()); + } + + /// Return a reference to the value stored in the set, if it is present, + /// else `None`. + /// + /// Computes in **O(1)** time (average). + #[inline] + pub fn get(&self, value: &Q) -> Option<&T> + where + Q: Hash + Equivalent + ?Sized, + T: Eq, + { + self.0.get_full(value).map(|(_, t, _)| t) + } + + /// Query the set by a prehashed value. + #[inline] + pub fn get_hashed(&self, value: Hashed<&Q>) -> Option<&T> + where + Q: Equivalent + ?Sized, + T: Eq, + { + self.0.get_full_hashed(value).map(|(_, t, _)| t) + } + + /// Find an entry by an index. + #[inline] + pub fn get_index(&self, index: usize) -> Option<&T> { + self.0.get_index(index).map(|(k, _)| k) + } + + /// Return item index, if it exists in the set + #[inline] + pub fn get_index_of(&self, value: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + T: Eq, + { + self.0.get_index_of(value) + } + + /// Find the index of the given hashed value. + #[inline] + pub fn get_index_of_hashed(&self, value: Hashed<&Q>) -> Option + where + Q: Equivalent + ?Sized, + T: Eq, + { + self.0.get_index_of_hashed(value) + } + + /// Find the index of the given hashed value. + /// + /// This operations is similar to [`get_index_of_hashed`](Self::get_index_of_hashed), + /// but it takes the key by value, instead of by reference + /// which sometimes generates better code. + #[inline] + pub fn get_index_of_hashed_by_value(&self, value: Hashed) -> Option + where + Q: Equivalent, + T: Eq, + { + self.0.get_index_of_hashed_by_value(value) + } + + /// Remove the element from the set if it is present. + /// + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the set. + #[inline] + pub fn shift_remove(&mut self, key: &Q) -> bool + where + Q: ?Sized + Hash + Equivalent, + T: Eq, + { + self.0.shift_remove(key).is_some() + } + + /// Remove the element by index. This is *O(N)* operation. + #[inline] + pub fn shift_remove_index_hashed(&mut self, i: usize) -> Option> { + Some(self.0.shift_remove_index_hashed(i)?.0) + } + + /// Remove the element by index. This is *O(N)* operation. + #[inline] + pub fn shift_remove_index(&mut self, i: usize) -> Option { + Some(self.shift_remove_index_hashed(i)?.into_key()) + } + + /// Remove the entry for the key. + /// + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the set. + pub fn shift_remove_hashed(&mut self, key: Hashed<&Q>) -> bool + where + Q: ?Sized + Equivalent, + { + self.0.shift_remove_hashed(key).is_some() + } + + /// Insert entry if it doesn't exist. + /// + /// Return the resulting entry in the map. + #[inline] + pub fn get_or_insert(&mut self, value: T) -> &T + where + T: Hash + Eq, + { + let value = Hashed::new(value); + match self + .0 + .get_index_of_hashed_raw(value.hash(), |v| value.key().equivalent(v)) + { + Some(index) => self.0.get_index(index).unwrap().0, + None => self.0.insert_hashed_unique_unchecked(value, ()).0, + } + } + + /// Insert entry if it doesn't exist. + /// + /// Return the resulting entry in the map. + #[inline] + pub fn get_or_insert_owned(&mut self, value: &Q) -> &T + where + Q: Hash + Equivalent + ToOwned + ?Sized, + T: Eq, + { + let value = Hashed::new(value); + match self.0.get_index_of_hashed(value) { + Some(index) => self.0.get_index(index).unwrap().0, + None => self.0.insert_hashed_unique_unchecked(value.owned(), ()).0, + } + } + + /// Remove the element from the set if it is present, + /// + /// and return the removed element. + #[inline] + pub fn take(&mut self, key: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + T: Eq, + { + self.0.shift_remove_entry(key).map(|(k, _)| k) + } + + /// Remove the last element from the set. + #[inline] + pub fn pop(&mut self) -> Option + where + T: Eq, + { + self.0.pop().map(|(k, ())| k) + } + + /// Is the set empty? + #[inline] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Number of elements in the set. + #[inline] + pub fn len(&self) -> usize { + self.0.len() + } + + /// Check if the set contains an element. + #[inline] + pub fn contains(&self, key: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + T: Eq, + { + self.0.contains_key(key) + } + + /// Check if the set contains an element. + #[inline] + pub fn contains_hashed(&self, key: Hashed<&Q>) -> bool + where + Q: Equivalent + ?Sized, + T: Eq, + { + self.0.contains_key_hashed(key) + } + + /// Remove all elements from the set. + /// + /// Retain the capacity. + #[inline] + pub fn clear(&mut self) { + self.0.clear() + } + + /// Returns a reference to the first item. + #[inline] + pub fn first(&self) -> Option<&T> { + self.0.first().map(|(k, ())| k) + } + + /// Returns a reference to the last item. + #[inline] + pub fn last(&self) -> Option<&T> { + self.0.last().map(|(k, ())| k) + } + + /// Iterator over elements of this set which are not in the other set. + pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T> + where + T: Eq + Hash, + { + Difference { + iter: self.iter(), + other, + } + } + + /// Iterator over union of two sets. + /// + /// Iteration order is: elements of this set followed by elements in the other set + /// not present in this set. + pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T> + where + T: Eq + Hash, + { + Union { + iter: self.iter().chain(other.difference(self)), + } + } + + /// Sort entries. + pub fn sort(&mut self) + where + T: Ord, + { + self.0.sort_keys(); + } + + /// Equal if entries are equal in iteration order. + pub fn eq_ordered(&self, other: &Self) -> bool + where + T: PartialEq, + { + self.0.eq_ordered(&other.0) + } + + /// Hash entries in iteration order. + /// + /// Note, entries are not hashed, but previously computed hashes are hashed instead. + pub fn hash_ordered(&self, state: &mut H) + where + T: Hash, + { + self.0.hash_ordered(state) + } + + /// Reverse the iteration order of the set. + pub fn reverse(&mut self) { + self.0.reverse(); + } + + /// Retains only the elements specified by the predicate. + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&T) -> bool, + { + self.0.retain(|k, _| f(k)) + } +} + +impl<'a, T> IntoIterator for &'a SmallSet { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + Iter { + iter: self.0.iter(), + } + } +} + +impl IntoIterator for SmallSet { + type Item = T; + type IntoIter = IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + IntoIter { + iter: self.0.into_iter(), + } + } +} + +impl Extend for SmallSet +where + T: Eq + Hash, +{ + fn extend>(&mut self, iter: I) { + self.0.extend(iter.into_iter().map(|v| (v, ()))); + } +} + +/// Iterator over the difference of two sets. +pub struct Difference<'a, T: 'a> { + iter: Iter<'a, T>, + other: &'a SmallSet, +} + +impl<'a, T: 'a> Iterator for Difference<'a, T> +where + T: Hash + Eq, +{ + type Item = &'a T; + + #[allow(clippy::while_let_on_iterator)] + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + if !self.other.contains(item) { + return Some(item); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + ( + self.iter.len().saturating_sub(self.other.len()), + Some(self.iter.len()), + ) + } +} + +/// Iterator over a union of two sets. +pub struct Union<'a, T: 'a> { + iter: std::iter::Chain, Difference<'a, T>>, +} + +impl<'a, T: 'a> Iterator for Union<'a, T> +where + T: Hash + Eq, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +/// Create a [`SmallSet`](SmallSet) from a list of values. +/// +/// ## Example +/// +/// ``` +/// use starlark_map::smallset; +/// +/// let set = smallset! {"a", "b"}; +/// assert_eq!(set.contains("a"), true); +/// assert_eq!(set.len(), 2); +/// assert_eq!(set.contains("c"), false); +/// ``` +#[macro_export] +macro_rules! smallset { + (@single $($x:tt)*) => (()); + (@count $($rest:expr),*) => (<[()]>::len(&[$(smallset!(@single $rest)),*])); + + ($($key:expr,)+) => { smallset!($($key),+) }; + ($($key:expr),*) => { + { + let cap = smallset!(@count $($key),*); + let mut set = $crate::small_set::SmallSet::with_capacity(cap); + $( + set.insert($key); + )* + set + } + }; +} + +impl Serialize for SmallSet { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.collect_seq(self.iter()) + } +} + +impl<'de, T> Deserialize<'de> for SmallSet +where + T: Deserialize<'de> + Hash + Eq, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct SeqVisitor { + marker: PhantomData>, + } + + impl<'de, T> serde::de::Visitor<'de> for SeqVisitor + where + T: serde::de::Deserialize<'de> + Hash + Eq, + { + type Value = SmallSet; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[inline] + fn visit_seq(self, mut set: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let mut values = SmallSet::with_capacity(set.size_hint().unwrap_or(0)); + while let Some(value) = set.next_element()? { + values.insert(value); + } + Ok(values) + } + } + + let visitor = SeqVisitor { + marker: PhantomData, + }; + deserializer.deserialize_seq(visitor) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + use std::rc::Rc; + + use dupe::Dupe; + use dupe::IterDupedExt; + + use super::*; + + #[test] + fn empty_set() { + let m = SmallSet::::new(); + assert_eq!(m.is_empty(), true); + assert_eq!(m.len(), 0); + assert_eq!(m.iter().next(), None); + } + + #[test] + fn few_entries() { + let entries1 = [(0), (1)]; + let m1 = entries1.iter().duped().collect::>(); + + let entries2 = [(1), (0)]; + let m2 = entries2.iter().duped().collect::>(); + assert_eq!(m1.is_empty(), false); + assert_eq!(m1.len(), 2); + assert_eq!(m2.is_empty(), false); + assert_eq!(m2.len(), 2); + + assert_eq!(m1.iter().eq(entries1.iter()), true); + assert_eq!(m2.iter().eq(entries2.iter()), true); + assert_eq!(m1.iter().eq(m2.iter()), false); + assert_eq!(m1.eq(&m1), true); + assert_eq!(m2.eq(&m2), true); + assert_eq!(m1, m2); + + assert_ne!(m1, smallset![1]) + } + + #[test] + fn many_entries() { + let letters = 'a'..='z'; + + let entries1 = letters; + let m1 = entries1.clone().collect::>(); + + assert_eq!(m1.get(&'b'), Some(&'b')); + assert_eq!(m1.get_index_of(&'b'), Some(1)); + + assert_eq!(m1.get(&'!'), None); + assert_eq!(m1.get_index_of(&'!'), None); + + let letters = ('a'..='z').rev(); + let entries2 = letters; + let m2 = entries2.clone().collect::>(); + assert_eq!(m1.is_empty(), false); + assert_eq!(m1.len(), 26); + assert_eq!(m2.is_empty(), false); + assert_eq!(m2.len(), 26); + + assert_eq!(m1.clone().into_iter().eq(entries1), true); + assert_eq!(m2.clone().into_iter().eq(entries2), true); + assert_eq!(m1.iter().eq(m2.iter()), false); + assert_eq!(m1.eq(&m1), true); + assert_eq!(m2.eq(&m2), true); + assert_eq!(m1, m2); + + let not_m1 = { + let mut s = m1.clone(); + s.shift_remove(&'a'); + s + }; + assert_ne!(m1, not_m1); + } + + #[test] + fn small_set_macros() { + let s = smallset![1, 4, 2]; + let mut i = s.into_iter(); + assert_eq!(i.next(), Some(1)); + assert_eq!(i.next(), Some(4)); + assert_eq!(i.next(), Some(2)); + assert_eq!(i.next(), None); + } + + #[test] + fn small_set_inserts() { + let mut s = SmallSet::new(); + assert_eq!(s.insert(2), true); + assert_eq!(s.insert(5), true); + + assert_eq!(s.insert(5), false); + } + + #[test] + fn get_or_insert() { + let mut set = SmallSet::new(); + let x = set.get_or_insert(Rc::new(1)).dupe(); + let x1 = set.get_or_insert(Rc::new(1)); + assert!(Rc::ptr_eq(&x, x1)); + } + + #[test] + fn get_or_insert_owned() { + let mut set = SmallSet::new(); + let x = set.get_or_insert_owned(&Rc::new(1)).dupe(); + let x1 = set.get_or_insert_owned(&Rc::new(1)); + assert!(Rc::ptr_eq(&x, x1)); + } + + #[test] + fn test_first() { + let mut s = SmallSet::new(); + s.insert(1); + assert_eq!(s.first(), Some(&1)); + s.insert(2); + assert_eq!(s.first(), Some(&1)); + s.shift_remove(&1); + assert_eq!(s.first(), Some(&2)); + } + + #[test] + fn test_last() { + let mut s = SmallSet::new(); + s.insert(1); + assert_eq!(s.last(), Some(&1)); + s.insert(2); + assert_eq!(s.last(), Some(&2)); + } + + #[test] + fn test_shift_remove() { + let mut h: HashSet = HashSet::from_iter([17]); + let mut s: SmallSet = SmallSet::from_iter([17]); + assert!(h.remove(&17)); + assert!(s.shift_remove(&17)); + assert!(!h.remove(&17)); + assert!(!s.shift_remove(&17)); + } + + #[test] + fn test_difference() { + let a = SmallSet::from_iter([1, 2, 3]); + let b = SmallSet::from_iter([2, 4, 1]); + let d = Vec::from_iter(a.difference(&b).copied()); + assert_eq!(vec![3], d); + } + + #[test] + fn test_union() { + let a = SmallSet::from_iter([1, 2, 3]); + let b = SmallSet::from_iter([2, 4, 1]); + let d = Vec::from_iter(a.union(&b).copied()); + assert_eq!(vec![1, 2, 3, 4], d); + } + + #[test] + fn test_sort() { + let mut a = SmallSet::from_iter([1, 3, 2]); + a.sort(); + assert_eq!(vec![1, 2, 3], Vec::from_iter(a)); + } + + #[test] + fn test_difference_size_hint() { + let a = SmallSet::from_iter([1, 2, 3]); + let b = SmallSet::from_iter([2]); + let mut iter = a.difference(&b); + assert_eq!((2, Some(3)), iter.size_hint()); + assert_eq!(Some(&1), iter.next()); + assert_eq!((1, Some(2)), iter.size_hint()); + assert_eq!(Some(&3), iter.next()); + assert_eq!((0, Some(0)), iter.size_hint()); + assert_eq!(None, iter.next()); + } + + #[test] + fn test_json() { + let mp = smallset! {"a".to_owned() , "b".to_owned() }; + let expected = serde_json::json!(["a", "b"]); + assert_eq!(serde_json::to_value(&mp).unwrap(), expected); + assert_eq!( + serde_json::from_value::>(expected).unwrap(), + mp + ); + } +} diff --git a/starlark-rust/starlark_map/src/small_set/mod.rs b/starlark-rust/starlark_map/src/small_set/mod.rs deleted file mode 100644 index 6998fc95944fc..0000000000000 --- a/starlark-rust/starlark_map/src/small_set/mod.rs +++ /dev/null @@ -1,756 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Ordered set. - -mod iter; - -use std::fmt; -use std::fmt::Debug; -use std::hash::Hash; -use std::hash::Hasher; -use std::marker::PhantomData; - -use allocative::Allocative; -use equivalent::Equivalent; -use serde::Deserialize; -use serde::Serialize; - -use crate::hashed::Hashed; -use crate::small_map::SmallMap; -pub use crate::small_set::iter::IntoIter; -pub use crate::small_set::iter::IntoIterHashed; -pub use crate::small_set::iter::Iter; -pub use crate::small_set::iter::IterHashed; -pub use crate::small_set::iter::IterMutUnchecked; - -/// An memory-efficient set with deterministic order, based on [`SmallMap`]. -#[derive(Clone, Allocative)] -pub struct SmallSet(SmallMap); - -impl Default for SmallSet { - #[inline] - fn default() -> Self { - Self::new() - } -} - -impl Debug for SmallSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_set().entries(self.iter()).finish() - } -} - -impl Eq for SmallSet where T: Eq {} - -impl PartialEq for SmallSet -where - T: Eq, -{ - fn eq(&self, other: &Self) -> bool { - self.0.eq(&other.0) - } -} - -impl FromIterator for SmallSet -where - T: Hash + Eq, -{ - fn from_iter>(iter: I) -> Self { - let iter = iter.into_iter(); - let mut smallset = Self::with_capacity(iter.size_hint().0); - for t in iter { - smallset.insert(t); - } - smallset - } -} - -impl SmallSet { - /// Creates an empty `SmallSet`. - #[inline] - pub const fn new() -> Self { - SmallSet(SmallMap::new()) - } - - /// Empty small set with preallocated capacity. - #[inline] - pub fn with_capacity(n: usize) -> Self { - Self(SmallMap::with_capacity(n)) - } - - /// Current capacity of the set. - #[inline] - pub fn capacity(&self) -> usize { - self.0.capacity() - } - - /// Iterate the element references. - #[inline] - pub fn iter(&self) -> Iter { - self.into_iter() - } - - /// Iterate the hashed element references. - #[inline] - pub fn iter_hashed(&self) -> IterHashed { - IterHashed { - iter: self.0.iter_hashed(), - } - } - - /// Iterate the mutable element references. - /// - /// This operation is memory safe, but otherwise no guarantees - /// if keys are mutated inconsistently (hash or equality changes). - #[inline] - pub fn iter_mut_unchecked(&mut self) -> IterMutUnchecked { - IterMutUnchecked { - iter: self.0.iter_mut_unchecked(), - } - } - - /// Into hashed entries. - #[inline] - pub fn into_iter_hashed(self) -> IntoIterHashed { - IntoIterHashed { - iter: self.0.into_iter_hashed(), - } - } - - /// Insert the element into the set. - /// - /// Return `true` iff the element was inserted. - #[inline] - pub fn insert(&mut self, key: T) -> bool - where - T: Hash + Eq, - { - self.0.insert(key, ()).is_none() - } - - /// Insert the element into the set without checking for a duplicate entry. - #[inline] - pub fn insert_unique_unchecked(&mut self, key: T) - where - T: Hash + Eq, - { - self.0.insert_unique_unchecked(key, ()); - } - - /// Insert the element into the set. - /// - /// Return `true` iff the element was inserted. - #[inline] - pub fn insert_hashed(&mut self, key: Hashed) -> bool - where - T: Eq, - { - self.0.insert_hashed(key, ()).is_none() - } - - /// Insert an entry into the set without checking for a duplicate key. - #[inline] - pub fn insert_hashed_unique_unchecked(&mut self, key: Hashed) { - self.0.insert_hashed_unique_unchecked(key, ()); - } - - /// Return a reference to the value stored in the set, if it is present, - /// else `None`. - /// - /// Computes in **O(1)** time (average). - #[inline] - pub fn get(&self, value: &Q) -> Option<&T> - where - Q: Hash + Equivalent + ?Sized, - T: Eq, - { - self.0.get_full(value).map(|(_, t, _)| t) - } - - /// Query the set by a prehashed value. - #[inline] - pub fn get_hashed(&self, value: Hashed<&Q>) -> Option<&T> - where - Q: Equivalent + ?Sized, - T: Eq, - { - self.0.get_full_hashed(value).map(|(_, t, _)| t) - } - - /// Find an entry by an index. - #[inline] - pub fn get_index(&self, index: usize) -> Option<&T> { - self.0.get_index(index).map(|(k, _)| k) - } - - /// Return item index, if it exists in the set - #[inline] - pub fn get_index_of(&self, value: &Q) -> Option - where - Q: Hash + Equivalent + ?Sized, - T: Eq, - { - self.0.get_index_of(value) - } - - /// Find the index of the given hashed value. - #[inline] - pub fn get_index_of_hashed(&self, value: Hashed<&Q>) -> Option - where - Q: Equivalent + ?Sized, - T: Eq, - { - self.0.get_index_of_hashed(value) - } - - /// Find the index of the given hashed value. - /// - /// This operations is similar to [`get_index_of_hashed`](Self::get_index_of_hashed), - /// but it takes the key by value, instead of by reference - /// which sometimes generates better code. - #[inline] - pub fn get_index_of_hashed_by_value(&self, value: Hashed) -> Option - where - Q: Equivalent, - T: Eq, - { - self.0.get_index_of_hashed_by_value(value) - } - - /// Remove the element from the set if it is present. - /// - /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the set. - #[inline] - pub fn remove(&mut self, key: &Q) -> bool - where - Q: ?Sized + Hash + Equivalent, - T: Eq, - { - self.0.remove(key).is_some() - } - - /// Insert entry if it doesn't exist. - /// - /// Return the resulting entry in the map. - #[inline] - pub fn get_or_insert(&mut self, value: T) -> &T - where - T: Hash + Eq, - { - let value = Hashed::new(value); - match self - .0 - .get_index_of_hashed_raw(value.hash(), |v| value.key().equivalent(v)) - { - Some(index) => self.0.get_index(index).unwrap().0, - None => self.0.insert_hashed_unique_unchecked(value, ()).0, - } - } - - /// Insert entry if it doesn't exist. - /// - /// Return the resulting entry in the map. - #[inline] - pub fn get_or_insert_owned(&mut self, value: &Q) -> &T - where - Q: Hash + Equivalent + ToOwned + ?Sized, - T: Eq, - { - let value = Hashed::new(value); - match self.0.get_index_of_hashed(value) { - Some(index) => self.0.get_index(index).unwrap().0, - None => self.0.insert_hashed_unique_unchecked(value.owned(), ()).0, - } - } - - /// Remove the element from the set if it is present, - /// - /// and return the removed element. - #[inline] - pub fn take(&mut self, key: &Q) -> Option - where - Q: ?Sized + Hash + Equivalent, - T: Eq, - { - self.0.remove_entry(key).map(|(k, _)| k) - } - - /// Remove the last element from the set. - #[inline] - pub fn pop(&mut self) -> Option - where - T: Eq, - { - self.0.pop().map(|(k, ())| k) - } - - /// Is the set empty? - #[inline] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Number of elements in the set. - #[inline] - pub fn len(&self) -> usize { - self.0.len() - } - - /// Check if the set contains an element. - #[inline] - pub fn contains(&self, key: &Q) -> bool - where - Q: Hash + Equivalent + ?Sized, - T: Eq, - { - self.0.contains_key(key) - } - - /// Check if the set contains an element. - #[inline] - pub fn contains_hashed(&self, key: Hashed<&Q>) -> bool - where - Q: Equivalent + ?Sized, - T: Eq, - { - self.0.contains_key_hashed(key) - } - - /// Remove all elements from the set. - /// - /// Retain the capacity. - #[inline] - pub fn clear(&mut self) { - self.0.clear() - } - - /// Returns a reference to the first item. - #[inline] - pub fn first(&self) -> Option<&T> { - self.0.first().map(|(k, ())| k) - } - - /// Returns a reference to the last item. - #[inline] - pub fn last(&self) -> Option<&T> { - self.0.last().map(|(k, ())| k) - } - - /// Iterator over elements of this set which are not in the other set. - pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T> - where - T: Eq + Hash, - { - Difference { - iter: self.iter(), - other, - } - } - - /// Iterator over union of two sets. - /// - /// Iteration order is: elements of this set followed by elements in the other set - /// not present in this set. - pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T> - where - T: Eq + Hash, - { - Union { - iter: self.iter().chain(other.difference(self)), - } - } - - /// Sort entries. - pub fn sort(&mut self) - where - T: Ord, - { - self.0.sort_keys(); - } - - /// Equal if entries are equal in iteration order. - pub fn eq_ordered(&self, other: &Self) -> bool - where - T: PartialEq, - { - self.0.eq_ordered(&other.0) - } - - /// Hash entries in iteration order. - /// - /// Note, entries are not hashed, but previously computed hashes are hashed instead. - pub fn hash_ordered(&self, state: &mut H) - where - T: Hash, - { - self.0.hash_ordered(state) - } -} - -impl<'a, T> IntoIterator for &'a SmallSet { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - #[inline] - fn into_iter(self) -> Self::IntoIter { - Iter { - iter: self.0.iter(), - } - } -} - -impl IntoIterator for SmallSet { - type Item = T; - type IntoIter = IntoIter; - - #[inline] - fn into_iter(self) -> Self::IntoIter { - IntoIter { - iter: self.0.into_iter(), - } - } -} - -impl Extend for SmallSet -where - T: Eq + Hash, -{ - fn extend>(&mut self, iter: I) { - self.0.extend(iter.into_iter().map(|v| (v, ()))); - } -} - -/// Iterator over the difference of two sets. -pub struct Difference<'a, T: 'a> { - iter: Iter<'a, T>, - other: &'a SmallSet, -} - -impl<'a, T: 'a> Iterator for Difference<'a, T> -where - T: Hash + Eq, -{ - type Item = &'a T; - - #[allow(clippy::while_let_on_iterator)] - fn next(&mut self) -> Option { - while let Some(item) = self.iter.next() { - if !self.other.contains(item) { - return Some(item); - } - } - None - } - - fn size_hint(&self) -> (usize, Option) { - ( - self.iter.len().saturating_sub(self.other.len()), - Some(self.iter.len()), - ) - } -} - -/// Iterator over a union of two sets. -pub struct Union<'a, T: 'a> { - iter: std::iter::Chain, Difference<'a, T>>, -} - -impl<'a, T: 'a> Iterator for Union<'a, T> -where - T: Hash + Eq, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -/// Create a [`SmallSet`](SmallSet) from a list of values. -/// -/// ## Example -/// -/// ``` -/// use starlark_map::smallset; -/// -/// let set = smallset!{"a", "b"}; -/// assert_eq!(set.contains("a"), true); -/// assert_eq!(set.len(), 2); -/// assert_eq!(set.contains("c"), false); -/// ``` -#[macro_export] -macro_rules! smallset { - (@single $($x:tt)*) => (()); - (@count $($rest:expr),*) => (<[()]>::len(&[$(smallset!(@single $rest)),*])); - - ($($key:expr,)+) => { smallset!($($key),+) }; - ($($key:expr),*) => { - { - let cap = smallset!(@count $($key),*); - let mut set = $crate::small_set::SmallSet::with_capacity(cap); - $( - set.insert($key); - )* - set - } - }; -} - -impl Serialize for SmallSet { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.collect_seq(self.iter()) - } -} - -impl<'de, T> Deserialize<'de> for SmallSet -where - T: Deserialize<'de> + Hash + Eq, -{ - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct SeqVisitor { - marker: PhantomData>, - } - - impl<'de, T> serde::de::Visitor<'de> for SeqVisitor - where - T: serde::de::Deserialize<'de> + Hash + Eq, - { - type Value = SmallSet; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a sequence") - } - - #[inline] - fn visit_seq(self, mut set: A) -> Result - where - A: serde::de::SeqAccess<'de>, - { - let mut values = SmallSet::with_capacity(set.size_hint().unwrap_or(0)); - while let Some(value) = set.next_element()? { - values.insert(value); - } - Ok(values) - } - } - - let visitor = SeqVisitor { - marker: PhantomData, - }; - deserializer.deserialize_seq(visitor) - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashSet; - use std::rc::Rc; - - use dupe::Dupe; - use dupe::IterDupedExt; - - use super::*; - - #[test] - fn empty_set() { - let m = SmallSet::::new(); - assert_eq!(m.is_empty(), true); - assert_eq!(m.len(), 0); - assert_eq!(m.iter().next(), None); - } - - #[test] - fn few_entries() { - let entries1 = [(0), (1)]; - let m1 = entries1.iter().duped().collect::>(); - - let entries2 = [(1), (0)]; - let m2 = entries2.iter().duped().collect::>(); - assert_eq!(m1.is_empty(), false); - assert_eq!(m1.len(), 2); - assert_eq!(m2.is_empty(), false); - assert_eq!(m2.len(), 2); - - assert_eq!(m1.iter().eq(entries1.iter()), true); - assert_eq!(m2.iter().eq(entries2.iter()), true); - assert_eq!(m1.iter().eq(m2.iter()), false); - assert_eq!(m1.eq(&m1), true); - assert_eq!(m2.eq(&m2), true); - assert_eq!(m1, m2); - - assert_ne!(m1, smallset![1]) - } - - #[test] - fn many_entries() { - let letters = 'a'..='z'; - - let entries1 = letters; - let m1 = entries1.clone().collect::>(); - - assert_eq!(m1.get(&'b'), Some(&'b')); - assert_eq!(m1.get_index_of(&'b'), Some(1)); - - assert_eq!(m1.get(&'!'), None); - assert_eq!(m1.get_index_of(&'!'), None); - - let letters = ('a'..='z').rev(); - let entries2 = letters; - let m2 = entries2.clone().collect::>(); - assert_eq!(m1.is_empty(), false); - assert_eq!(m1.len(), 26); - assert_eq!(m2.is_empty(), false); - assert_eq!(m2.len(), 26); - - assert_eq!(m1.clone().into_iter().eq(entries1), true); - assert_eq!(m2.clone().into_iter().eq(entries2), true); - assert_eq!(m1.iter().eq(m2.iter()), false); - assert_eq!(m1.eq(&m1), true); - assert_eq!(m2.eq(&m2), true); - assert_eq!(m1, m2); - - let not_m1 = { - let mut s = m1.clone(); - s.remove(&'a'); - s - }; - assert_ne!(m1, not_m1); - } - - #[test] - fn small_set_macros() { - let s = smallset![1, 4, 2]; - let mut i = s.into_iter(); - assert_eq!(i.next(), Some(1)); - assert_eq!(i.next(), Some(4)); - assert_eq!(i.next(), Some(2)); - assert_eq!(i.next(), None); - } - - #[test] - fn small_set_inserts() { - let mut s = SmallSet::new(); - assert_eq!(s.insert(2), true); - assert_eq!(s.insert(5), true); - - assert_eq!(s.insert(5), false); - } - - #[test] - fn get_or_insert() { - let mut set = SmallSet::new(); - let x = set.get_or_insert(Rc::new(1)).dupe(); - let x1 = set.get_or_insert(Rc::new(1)); - assert!(Rc::ptr_eq(&x, x1)); - } - - #[test] - fn get_or_insert_owned() { - let mut set = SmallSet::new(); - let x = set.get_or_insert_owned(&Rc::new(1)).dupe(); - let x1 = set.get_or_insert_owned(&Rc::new(1)); - assert!(Rc::ptr_eq(&x, x1)); - } - - #[test] - fn test_first() { - let mut s = SmallSet::new(); - s.insert(1); - assert_eq!(s.first(), Some(&1)); - s.insert(2); - assert_eq!(s.first(), Some(&1)); - s.remove(&1); - assert_eq!(s.first(), Some(&2)); - } - - #[test] - fn test_last() { - let mut s = SmallSet::new(); - s.insert(1); - assert_eq!(s.last(), Some(&1)); - s.insert(2); - assert_eq!(s.last(), Some(&2)); - } - - #[test] - fn test_remove() { - let mut h: HashSet = HashSet::from_iter([17]); - let mut s: SmallSet = SmallSet::from_iter([17]); - assert!(h.remove(&17)); - assert!(s.remove(&17)); - assert!(!h.remove(&17)); - assert!(!s.remove(&17)); - } - - #[test] - fn test_difference() { - let a = SmallSet::from_iter([1, 2, 3]); - let b = SmallSet::from_iter([2, 4, 1]); - let d = Vec::from_iter(a.difference(&b).copied()); - assert_eq!(vec![3], d); - } - - #[test] - fn test_union() { - let a = SmallSet::from_iter([1, 2, 3]); - let b = SmallSet::from_iter([2, 4, 1]); - let d = Vec::from_iter(a.union(&b).copied()); - assert_eq!(vec![1, 2, 3, 4], d); - } - - #[test] - fn test_sort() { - let mut a = SmallSet::from_iter([1, 3, 2]); - a.sort(); - assert_eq!(vec![1, 2, 3], Vec::from_iter(a)); - } - - #[test] - fn test_difference_size_hint() { - let a = SmallSet::from_iter([1, 2, 3]); - let b = SmallSet::from_iter([2]); - let mut iter = a.difference(&b); - assert_eq!((2, Some(3)), iter.size_hint()); - assert_eq!(Some(&1), iter.next()); - assert_eq!((1, Some(2)), iter.size_hint()); - assert_eq!(Some(&3), iter.next()); - assert_eq!((0, Some(0)), iter.size_hint()); - assert_eq!(None, iter.next()); - } - - #[test] - fn test_json() { - let mp = smallset! {"a".to_owned() , "b".to_owned() }; - let expected = serde_json::json!(["a", "b"]); - assert_eq!(serde_json::to_value(&mp).unwrap(), expected); - assert_eq!( - serde_json::from_value::>(expected).unwrap(), - mp - ); - } -} diff --git a/starlark-rust/starlark_map/src/sorted_map.rs b/starlark-rust/starlark_map/src/sorted_map.rs index d5acb317449e1..4fddf183702a0 100644 --- a/starlark-rust/starlark_map/src/sorted_map.rs +++ b/starlark-rust/starlark_map/src/sorted_map.rs @@ -20,9 +20,12 @@ use std::hash::Hash; use allocative::Allocative; +use serde::Deserialize; +use serde::Serialize; use crate::ordered_map::OrderedMap; use crate::small_map; +use crate::small_map::SmallMap; use crate::Equivalent; /// `IndexMap` but with keys sorted. @@ -120,6 +123,12 @@ where { self.map.contains_key(k) } + + /// Iterate over the map with hashes. + #[inline] + pub fn iter_hashed(&self) -> small_map::IterHashed { + self.map.iter_hashed() + } } impl FromIterator<(K, V)> for SortedMap { @@ -138,6 +147,14 @@ impl From> for SortedMap { } } +impl From> for SortedMap { + #[inline] + fn from(map: SmallMap) -> SortedMap { + // `OrderedMap: From` is trivial, so this does not do any extra work + SortedMap::from(OrderedMap::from(map)) + } +} + impl IntoIterator for SortedMap { type Item = (K, V); type IntoIter = small_map::IntoIter; @@ -168,6 +185,30 @@ impl<'a, K: Ord + Hash, V> IntoIterator for &'a mut SortedMap { } } +impl Serialize for SortedMap { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.map.serialize(serializer) + } +} + +impl<'de, K, V> Deserialize<'de> for SortedMap +where + K: Deserialize<'de> + Hash + Eq, + V: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(Self { + map: OrderedMap::deserialize(deserializer)?, + }) + } +} + #[cfg(test)] mod tests { use crate::sorted_map::SortedMap; diff --git a/starlark-rust/starlark_map/src/sorting/mod.rs b/starlark-rust/starlark_map/src/sorting.rs similarity index 100% rename from starlark-rust/starlark_map/src/sorting/mod.rs rename to starlark-rust/starlark_map/src/sorting.rs diff --git a/starlark-rust/starlark_map/src/unordered_map.rs b/starlark-rust/starlark_map/src/unordered_map.rs index a2fc2928f671b..0cd3aa91d39c4 100644 --- a/starlark-rust/starlark_map/src/unordered_map.rs +++ b/starlark-rust/starlark_map/src/unordered_map.rs @@ -28,15 +28,10 @@ use hashbrown::raw::Bucket; use hashbrown::raw::RawTable; use crate::Equivalent; +use crate::Hashed; +use crate::StarlarkHashValue; use crate::StarlarkHasher; -#[inline] -fn compute_hash(k: &Q) -> u64 { - let mut hasher = StarlarkHasher::new(); - k.hash(&mut hasher); - hasher.finish() -} - /// Hash map which does not expose any insertion order-specific behavior /// (except `Debug`). #[derive(Clone, Allocative)] @@ -80,9 +75,19 @@ impl UnorderedMap { where Q: Hash + Equivalent + ?Sized, { - let hash = compute_hash(k); + let k = Hashed::new(k); + self.get_hashed(k) + } + + /// Get a reference to the value associated with the given key. + #[inline] + pub fn get_hashed(&self, key: Hashed<&Q>) -> Option<&V> + where + Q: Equivalent + ?Sized, + { + let hash = key.hash().promote(); self.0 - .get(hash, |(next_k, _v)| k.equivalent(next_k)) + .get(hash, |(next_k, _v)| key.key().equivalent(next_k)) .map(|(_, v)| v) } @@ -92,7 +97,7 @@ impl UnorderedMap { where Q: Hash + Equivalent + ?Sized, { - let hash = compute_hash(k); + let hash = StarlarkHashValue::new(k).promote(); self.0 .get_mut(hash, |(next_k, _v)| k.equivalent(next_k)) .map(|(_, v)| v) @@ -107,20 +112,31 @@ impl UnorderedMap { self.get(k).is_some() } + /// Does the map contain the specified key? + #[inline] + pub fn contains_key_hashed(&self, key: Hashed<&Q>) -> bool + where + Q: Equivalent + ?Sized, + { + self.get_hashed(key).is_some() + } + /// Insert an entry into the map. #[inline] pub fn insert(&mut self, k: K, v: V) -> Option where K: Hash + Eq, { - let hash = compute_hash(&k); - if let Some((_k, existing_value)) = - self.0.get_mut(hash, |(next_k, _v)| k.equivalent(next_k)) - { - Some(mem::replace(existing_value, v)) - } else { - self.0.insert(hash, (k, v), |(k, _v)| compute_hash(k)); - None + let k = Hashed::new(k); + match self.raw_entry_mut().from_key_hashed(k.as_ref()) { + RawEntryMut::Occupied(mut e) => { + let old = e.insert(v); + Some(old) + } + RawEntryMut::Vacant(e) => { + e.insert_hashed(k, v); + None + } } } @@ -130,10 +146,30 @@ impl UnorderedMap { where Q: Hash + Equivalent + ?Sized, { - let hash = compute_hash(k); - self.0 - .remove_entry(hash, |(next_k, _v)| k.equivalent(next_k)) - .map(|(_, v)| v) + match self.raw_entry_mut().from_key(k) { + RawEntryMut::Occupied(e) => Some(e.remove()), + RawEntryMut::Vacant(_) => None, + } + } + + /// Preserve only the elements specified by the predicate. + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + // TODO(nga): update hashbrown and use safe `HashTable` instead of this heavily unsafe code: + // https://docs.rs/hashbrown/latest/hashbrown/struct.HashTable.html + + // Unsafe code is copy-paste from `hashbrown` crate: + // https://github.com/rust-lang/hashbrown/blob/f2e62124cd947b5e2309dd6a24c7e422932aae97/src/map.rs#L923 + unsafe { + for item in self.0.iter() { + let (k, v) = item.as_mut(); + if !f(k, v) { + self.0.erase(item); + } + } + } } /// Get an entry in the map for in-place manipulation. @@ -142,7 +178,7 @@ impl UnorderedMap { where K: Hash + Eq, { - let hash = compute_hash(&k); + let hash = StarlarkHashValue::new(&k).promote(); if let Some(bucket) = self.0.find(hash, |(next_k, _v)| k.equivalent(next_k)) { Entry::Occupied(OccupiedEntry { _map: self, bucket }) } else { @@ -154,20 +190,51 @@ impl UnorderedMap { } } + /// Lower-level access to the entry API. + #[inline] + pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut { + RawEntryBuilderMut { map: self } + } + /// Clear the map, removing all entries. #[inline] pub fn clear(&mut self) { self.0.clear(); } - /// This function is private. + /// Entries in the map, in arbitrary order. #[inline] - pub(crate) fn iter(&self) -> impl ExactSizeIterator { + pub fn entries_unordered(&self) -> impl ExactSizeIterator { unsafe { self.0.iter().map(|e| (&e.as_ref().0, &e.as_ref().1)) } } - /// This function is private. - pub(crate) fn into_iter(self) -> impl ExactSizeIterator { + /// Entries in the map, in arbitrary order. + #[inline] + pub fn entries_unordered_mut(&mut self) -> impl ExactSizeIterator { + unsafe { self.0.iter().map(|e| (&e.as_ref().0, &mut e.as_mut().1)) } + } + + /// Keys in the map, in arbitrary order. + #[inline] + pub fn keys_unordered(&self) -> impl ExactSizeIterator { + self.entries_unordered().map(|(k, _v)| k) + } + + /// Values in the map, in arbitrary order. + #[inline] + pub fn values_unordered(&self) -> impl ExactSizeIterator { + self.entries_unordered().map(|(_k, v)| v) + } + + /// Values in the map, in arbitrary order. + #[inline] + pub fn values_unordered_mut(&mut self) -> impl ExactSizeIterator { + self.entries_unordered_mut().map(|(_k, v)| v) + } + + /// Into entries, in arbitrary order. + #[inline] + pub(crate) fn into_entries_unordered(self) -> impl ExactSizeIterator { self.0.into_iter() } @@ -176,7 +243,7 @@ impl UnorderedMap { where K: Ord, { - let mut entries = Vec::from_iter(self.iter()); + let mut entries = Vec::from_iter(self.entries_unordered()); entries.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); entries } @@ -186,7 +253,7 @@ impl UnorderedMap { where K: Hash + Eq, { - self.into_iter().collect() + self.into_entries_unordered().collect() } /// Apply the function to value. @@ -195,7 +262,7 @@ impl UnorderedMap { K: Hash + Eq, { let mut map = UnorderedMap::with_capacity(self.len()); - for (k, v) in self.into_iter() { + for (k, v) in self.into_entries_unordered() { map.insert(k, f(v)); } map @@ -213,14 +280,17 @@ impl + Hash> Index<&Q> for UnorderedMap { impl Debug for UnorderedMap { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_map().entries(self.iter()).finish() + f.debug_map().entries(self.entries_unordered()).finish() } } impl PartialEq for UnorderedMap { #[inline] fn eq(&self, other: &Self) -> bool { - self.len() == other.len() && self.iter().all(|(k, v)| other.get(k) == Some(v)) + self.len() == other.len() + && self + .entries_unordered() + .all(|(k, v)| other.get(k) == Some(v)) } } @@ -230,7 +300,7 @@ impl Hash for UnorderedMap { fn hash(&self, state: &mut H) { self.len().hash(state); let mut sum: u64 = 0; - for (k, v) in self.iter() { + for (k, v) in self.entries_unordered() { let mut hasher = StarlarkHasher::new(); (k, v).hash(&mut hasher); sum = sum.wrapping_add(hasher.finish()); @@ -273,28 +343,167 @@ pub enum Entry<'a, K, V> { impl<'a, K: Eq + Hash, V> VacantEntry<'a, K, V> { /// Insert a value into the map. + #[inline] pub fn insert(self, value: V) { - self.map - .0 - .insert(self.hash, (self.key, value), |(k, _v)| compute_hash(k)); + self.map.0.insert(self.hash, (self.key, value), |(k, _v)| { + StarlarkHashValue::new(k).promote() + }); } } impl<'a, K, V> OccupiedEntry<'a, K, V> { /// Remove the entry from the map. + #[inline] pub fn get(&self) -> &V { unsafe { &self.bucket.as_ref().1 } } /// Get a reference to the value associated with the entry. + #[inline] pub fn get_mut(&mut self) -> &mut V { unsafe { &mut self.bucket.as_mut().1 } } /// Replace the value associated with the entry. + #[inline] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } +} + +/// Builder for [`RawEntryMut`]. +pub struct RawEntryBuilderMut<'a, K, V> { + map: &'a mut UnorderedMap, +} + +impl<'a, K, V> RawEntryBuilderMut<'a, K, V> { + /// Find an entry by key. + #[inline] + pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V> + where + Q: Hash + Equivalent + ?Sized, + { + let k = Hashed::new(k); + self.from_key_hashed(k) + } + + /// Find an entry by hashed key. + #[inline] + pub fn from_key_hashed(self, k: Hashed<&Q>) -> RawEntryMut<'a, K, V> + where + Q: Equivalent + ?Sized, + { + self.from_hash(k.hash(), |next_k| k.key().equivalent(next_k)) + } + + /// Find an entry by hash and equality function. + #[inline] + pub fn from_hash(self, hash: StarlarkHashValue, mut is_match: F) -> RawEntryMut<'a, K, V> + where + F: for<'b> FnMut(&'b K) -> bool, + { + let hash = hash.promote(); + if let Some(bucket) = self.map.0.find(hash, |(next_k, _v)| is_match(next_k)) { + RawEntryMut::Occupied(RawOccupiedEntryMut { + map: self.map, + bucket, + }) + } else { + RawEntryMut::Vacant(RawVacantEntryMut { map: self.map }) + } + } +} + +/// Occupied entry. +pub struct RawOccupiedEntryMut<'a, K, V> { + map: &'a mut UnorderedMap, + bucket: Bucket<(K, V)>, +} + +/// Vacant entry. +pub struct RawVacantEntryMut<'a, K, V> { + map: &'a mut UnorderedMap, +} + +/// Raw entry. +pub enum RawEntryMut<'a, K, V> { + /// Occupied entry. + Occupied(RawOccupiedEntryMut<'a, K, V>), + /// Vacant entry. + Vacant(RawVacantEntryMut<'a, K, V>), +} + +impl<'a, K, V> RawOccupiedEntryMut<'a, K, V> { + /// Replace the value associated with the entry. + #[inline] pub fn insert(&mut self, value: V) -> V { mem::replace(self.get_mut(), value) } + + /// Replace the key associated with the entry. + #[inline] + pub fn insert_key(&mut self, key: K) -> K { + mem::replace(self.key_mut(), key) + } + + /// Get a reference to the value associated with the entry. + #[inline] + pub fn get(&self) -> &V { + unsafe { &self.bucket.as_ref().1 } + } + + /// Get a reference to the value associated with the entry. + #[inline] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.bucket.as_mut().1 } + } + + /// Get a reference to the key associated with the entry. + #[inline] + pub fn key_mut(&mut self) -> &mut K { + unsafe { &mut self.bucket.as_mut().0 } + } + + /// Remove the entry, return the value. + #[inline] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Remove the entry, return the key and value. + #[inline] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.map.0.remove(self.bucket).0 } + } +} + +impl<'a, K, V> RawVacantEntryMut<'a, K, V> { + /// Insert entry. + /// + /// Not this function computes the hash of the key. + #[inline] + pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + { + let key = Hashed::new(key); + self.insert_hashed(key, value) + } + + /// Insert entry. + #[inline] + pub fn insert_hashed(self, key: Hashed, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + { + let (k, v) = + self.map + .0 + .insert_entry(key.hash().promote(), (key.into_key(), value), |(k, _v)| { + StarlarkHashValue::new(k).promote() + }); + (k, v) + } } #[cfg(test)] @@ -349,4 +558,26 @@ mod tests { map.insert(3, 4); assert_eq!(map.entries_sorted(), vec![(&1, &2), (&3, &4), (&5, &6)]); } + + #[test] + fn test_retain() { + let mut map = UnorderedMap::new(); + for i in 0..1000 { + map.insert(format!("key{}", i), format!("value{}", i)); + } + + map.retain(|k, v| { + v.push('x'); + k.ends_with('0') + }); + + assert_eq!(100, map.len()); + for i in 0..1000 { + if i % 10 == 0 { + assert_eq!(format!("value{}x", i), map[&format!("key{}", i)]); + } else { + assert!(!map.contains_key(&format!("key{}", i))); + } + } + } } diff --git a/starlark-rust/starlark_map/src/unordered_set.rs b/starlark-rust/starlark_map/src/unordered_set.rs index 1945d56049fe0..631b62320f3b3 100644 --- a/starlark-rust/starlark_map/src/unordered_set.rs +++ b/starlark-rust/starlark_map/src/unordered_set.rs @@ -21,15 +21,25 @@ use std::hash::Hash; use allocative::Allocative; +use crate::unordered_map; use crate::unordered_map::UnorderedMap; use crate::Equivalent; +use crate::Hashed; +use crate::StarlarkHashValue; /// `HashSet` that does not expose insertion order. -#[derive(Clone, Allocative, Debug, Default)] +#[derive(Clone, Allocative, Debug)] pub struct UnorderedSet { map: UnorderedMap, } +impl Default for UnorderedSet { + #[inline] + fn default() -> UnorderedSet { + UnorderedSet::new() + } +} + impl UnorderedSet { /// Create a new empty set. #[inline] @@ -83,9 +93,26 @@ impl UnorderedSet { self.map.contains_key(value) } + /// Does the set contain the specified value? + #[inline] + pub fn contains_hashed(&self, value: Hashed<&Q>) -> bool + where + Q: Equivalent + ?Sized, + { + self.map.contains_key_hashed(value) + } + + /// Lower-level access to the underlying map. + #[inline] + pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut { + RawEntryBuilderMut { + entry: self.map.raw_entry_mut(), + } + } + /// This function is private. fn iter(&self) -> impl Iterator { - self.map.iter().map(|(k, _)| k) + self.map.entries_unordered().map(|(k, _)| k) } /// Get the entries in the set, sorted. @@ -117,6 +144,101 @@ impl FromIterator for UnorderedSet { } } +/// Builder for [`RawEntryMut`]. +pub struct RawEntryBuilderMut<'a, T> { + entry: unordered_map::RawEntryBuilderMut<'a, T, ()>, +} + +impl<'a, T> RawEntryBuilderMut<'a, T> { + /// Find the entry for a key. + #[inline] + pub fn from_entry(self, entry: &Q) -> RawEntryMut<'a, T> + where + Q: Hash + Equivalent + ?Sized, + { + let entry = Hashed::new(entry); + self.from_entry_hashed(entry) + } + + /// Find the entry for a key. + #[inline] + pub fn from_entry_hashed(self, entry: Hashed<&Q>) -> RawEntryMut<'a, T> + where + Q: ?Sized + Equivalent, + { + self.from_hash(entry.hash(), |k| entry.key().equivalent(k)) + } + + /// Find the entry by hash and equality function. + #[inline] + pub fn from_hash(self, hash: StarlarkHashValue, is_match: F) -> RawEntryMut<'a, T> + where + F: for<'b> FnMut(&'b T) -> bool, + { + match self.entry.from_hash(hash, is_match) { + unordered_map::RawEntryMut::Occupied(e) => { + RawEntryMut::Occupied(RawOccupiedEntryMut { entry: e }) + } + unordered_map::RawEntryMut::Vacant(e) => { + RawEntryMut::Vacant(RawVacantEntryMut { entry: e }) + } + } + } +} + +/// Reference to an occupied entry in a [`UnorderedSet`]. +pub struct RawOccupiedEntryMut<'a, T> { + entry: unordered_map::RawOccupiedEntryMut<'a, T, ()>, +} + +/// Reference to a vacant entry in a [`UnorderedSet`]. +pub struct RawVacantEntryMut<'a, T> { + entry: unordered_map::RawVacantEntryMut<'a, T, ()>, +} + +/// Reference to an entry in a [`UnorderedSet`]. +pub enum RawEntryMut<'a, T> { + /// Occupied entry. + Occupied(RawOccupiedEntryMut<'a, T>), + /// Vacant entry. + Vacant(RawVacantEntryMut<'a, T>), +} + +impl<'a, T> RawOccupiedEntryMut<'a, T> { + /// Remove the entry. + #[inline] + pub fn remove(self) -> T { + self.entry.remove_entry().0 + } + + /// Replace the entry. + #[inline] + pub fn insert(&mut self, value: T) -> T { + self.entry.insert_key(value) + } +} + +impl<'a, T> RawVacantEntryMut<'a, T> { + /// Insert an entry to the set. This function computes the hash of the key. + #[inline] + pub fn insert(self, value: T) + where + T: Hash, + { + let value = Hashed::new(value); + self.insert_hashed(value); + } + + /// Insert an entry to the set. + #[inline] + pub fn insert_hashed(self, value: Hashed) + where + T: Hash, + { + self.entry.insert_hashed(value, ()); + } +} + #[cfg(test)] mod tests { use crate::unordered_set::UnorderedSet; diff --git a/starlark-rust/starlark_map/src/vec2.rs b/starlark-rust/starlark_map/src/vec2.rs new file mode 100644 index 0000000000000..81366d9ba6f9f --- /dev/null +++ b/starlark-rust/starlark_map/src/vec2.rs @@ -0,0 +1,786 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! A `Vec<(A, B)>` like object which stores `A` and `B` separately. + +use std::alloc; +use std::alloc::Layout; +use std::alloc::LayoutError; +use std::cmp; +use std::cmp::Ordering; +use std::fmt::Debug; +use std::hash::Hash; +use std::hash::Hasher; +use std::marker::PhantomData; +use std::mem; +use std::mem::MaybeUninit; +use std::ptr; +use std::ptr::NonNull; +use std::slice; + +use allocative::Allocative; +use allocative::Visitor; + +use crate::sorting::insertion::insertion_sort; +use crate::sorting::insertion::slice_swap_shift; +pub use crate::vec2::iter::IntoIter; +pub use crate::vec2::iter::Iter; + +mod iter; + +#[derive(Eq, PartialEq, Debug)] +struct Vec2Layout { + layout: Layout, + offset_of_bbb: usize, + _marker: PhantomData<*mut (A, B)>, +} + +impl Vec2Layout { + fn new(cap: usize) -> Vec2Layout { + Self::new_checked(cap).unwrap_or_else(|err| { + panic!( + "Vec2Layout failed with {:?} when allocating capacity of {}", + err, cap + ) + }) + } + + fn new_checked(cap: usize) -> Result, LayoutError> { + debug_assert!(cap != 0); + let a = Layout::array::(cap)?; + let b = Layout::array::(cap)?; + let (layout, offset_of_bbb) = a.extend(b)?; + + debug_assert!(offset_of_bbb <= layout.size()); + debug_assert!(layout.align() >= a.align()); + debug_assert!(layout.align() >= b.align()); + debug_assert!(offset_of_bbb % a.align() == 0); + + Ok(Vec2Layout { + layout, + offset_of_bbb, + _marker: PhantomData, + }) + } + + unsafe fn alloc(&self) -> NonNull { + let ptr: *mut u8 = alloc::alloc(self.layout); + let bbb_ptr: *mut B = ptr.add(self.offset_of_bbb).cast(); + NonNull::new_unchecked(bbb_ptr) + } + + unsafe fn dealloc(&self, bbb_ptr: NonNull) { + let ptr: *mut u8 = bbb_ptr.as_ptr().cast::().sub(self.offset_of_bbb); + alloc::dealloc(ptr, self.layout) + } +} + +/// Array of pairs `(A, B)`, where `A` and `B` are stored separately. +/// This reduces memory consumption when `A` and `B` have different alignments. +pub struct Vec2 { + // Layout is `[padding, A, A, ..., A, B, B, ..., B]` + bbb_ptr: NonNull, + len: usize, + cap: usize, + _marker: PhantomData<(A, B)>, +} + +unsafe impl Send for Vec2 {} +unsafe impl Sync for Vec2 {} + +impl Default for Vec2 { + #[inline] + fn default() -> Vec2 { + Vec2::new() + } +} + +impl Debug for Vec2 { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl Clone for Vec2 { + fn clone(&self) -> Vec2 { + let mut r = Vec2::with_capacity(self.len()); + for (a, b) in self.iter() { + r.push(a.clone(), b.clone()); + } + r + } +} + +impl Vec2 { + /// Empty vec. + #[inline] + pub const fn new() -> Vec2 { + Vec2 { + // Provide a dangling pointer aligned to both A and B, so that aaa_ptr() + // returns a properly aligned pointer + bbb_ptr: NonNull::<(A, B)>::dangling().cast(), + len: 0, + cap: 0, + _marker: PhantomData, + } + } + + /// New instance with given capacity. + #[inline] + pub fn with_capacity(cap: usize) -> Vec2 { + if cap == 0 { + Vec2::new() + } else { + let bbb_ptr = unsafe { Vec2Layout::::new(cap).alloc() }; + Vec2 { + bbb_ptr, + len: 0, + cap, + _marker: PhantomData, + } + } + } + + /// Number of elements. + #[inline] + pub fn len(&self) -> usize { + self.len + } + + /// Capacity. + #[inline] + pub fn capacity(&self) -> usize { + self.cap + } + + /// Is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + #[inline] + fn aaa_ptr(&self) -> NonNull { + unsafe { NonNull::new_unchecked(self.bbb_ptr.cast::().as_ptr().sub(self.cap)) } + } + + #[inline] + fn bbb_ptr(&self) -> NonNull { + self.bbb_ptr + } + + #[inline] + pub(crate) fn aaa(&self) -> &[A] { + unsafe { slice::from_raw_parts(self.aaa_ptr().as_ptr(), self.len) } + } + + #[inline] + pub(crate) fn aaa_mut(&mut self) -> &mut [A] { + unsafe { slice::from_raw_parts_mut(self.aaa_ptr().as_ptr(), self.len) } + } + + #[inline] + fn aaa_uninit(&mut self) -> &mut [MaybeUninit] { + unsafe { slice::from_raw_parts_mut(self.aaa_ptr().as_ptr() as *mut _, self.cap) } + } + + #[inline] + pub(crate) fn bbb(&self) -> &[B] { + unsafe { slice::from_raw_parts(self.bbb_ptr().as_ptr(), self.len) } + } + + #[inline] + pub(crate) fn bbb_mut(&mut self) -> &mut [B] { + unsafe { slice::from_raw_parts_mut(self.bbb_ptr().as_ptr(), self.len) } + } + + #[inline] + fn bbb_uninit(&mut self) -> &mut [MaybeUninit] { + unsafe { slice::from_raw_parts_mut(self.bbb_ptr().as_ptr() as *mut _, self.cap) } + } + + // This is what `Vec` does. + const MIN_NON_ZERO_CAP: usize = if mem::size_of::<(A, B)>() == 1 { + 8 + } else if mem::size_of::<(A, B)>() <= 1024 { + 4 + } else { + 1 + }; + + #[cold] + fn reserve_slow(&mut self, additional: usize) { + debug_assert!(self.cap - self.len < additional); + + let required_cap = self.len.checked_add(additional).expect("capacity overflow"); + let new_cap = cmp::max(required_cap, Self::MIN_NON_ZERO_CAP); + let new_cap = cmp::max(new_cap, self.cap * 2); + let new = Self::with_capacity(new_cap); + unsafe { + ptr::copy_nonoverlapping(self.aaa_ptr().as_ptr(), new.aaa_ptr().as_ptr(), self.len); + ptr::copy_nonoverlapping(self.bbb_ptr().as_ptr(), new.bbb_ptr().as_ptr(), self.len); + self.dealloc(); + self.bbb_ptr = new.bbb_ptr; + mem::forget(new); + self.cap = new_cap; + } + } + + /// Reserve capacity for `additional` elements. + #[inline] + pub fn reserve(&mut self, additional: usize) { + if self.cap - self.len < additional { + self.reserve_slow(additional); + } + } + + #[inline] + unsafe fn dealloc_impl(data: NonNull, cap: usize) { + if cap != 0 { + Vec2Layout::::new(cap).dealloc(data); + } + } + + /// Deallocate, but do not call destructors. + #[inline] + unsafe fn dealloc(&mut self) { + Self::dealloc_impl(self.bbb_ptr, self.cap); + } + + unsafe fn drop_in_place(&mut self) { + ptr::drop_in_place::<[A]>(self.aaa_mut()); + ptr::drop_in_place::<[B]>(self.bbb_mut()); + } + + /// Push an element. + #[inline] + pub fn push(&mut self, a: A, b: B) { + self.reserve(1); + let len = self.len; + unsafe { + self.aaa_uninit().get_unchecked_mut(len).write(a); + self.bbb_uninit().get_unchecked_mut(len).write(b); + } + self.len += 1; + } + + /// Get an element reference by index. + #[inline] + pub fn get(&self, index: usize) -> Option<(&A, &B)> { + if index < self.len { + Some(unsafe { self.get_unchecked(index) }) + } else { + None + } + } + + /// Get an element reference by index skipping bounds check. + #[inline] + pub unsafe fn get_unchecked(&self, index: usize) -> (&A, &B) { + debug_assert!(index < self.len); + ( + self.aaa().get_unchecked(index), + self.bbb().get_unchecked(index), + ) + } + + /// Get an element mutable reference by index. + #[inline] + pub fn get_mut(&mut self, index: usize) -> Option<(&mut A, &mut B)> { + if index < self.len { + Some(unsafe { self.get_unchecked_mut(index) }) + } else { + None + } + } + + /// Get an element mutable reference by index. + #[inline] + pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> (&mut A, &mut B) { + debug_assert!(index < self.len); + let k_ptr = self.aaa_ptr().as_ptr(); + let v_ptr = self.bbb_ptr().as_ptr(); + (&mut *k_ptr.add(index), &mut *v_ptr.add(index)) + } + + #[inline] + unsafe fn read(&self, index: usize) -> (A, B) { + debug_assert!(index < self.len); + let (a, b) = self.get_unchecked(index); + (ptr::read(a), ptr::read(b)) + } + + /// Remove an element by index. + /// + /// This is an `O(n)` operation. + pub fn remove(&mut self, index: usize) -> (A, B) { + assert!(index < self.len); + unsafe { + let (a, b) = self.read(index); + ptr::copy( + self.aaa_ptr().as_ptr().add(index + 1), + self.aaa_ptr().as_ptr().add(index), + self.len - index - 1, + ); + ptr::copy( + self.bbb_ptr().as_ptr().add(index + 1), + self.bbb_ptr().as_ptr().add(index), + self.len - index - 1, + ); + self.len -= 1; + (a, b) + } + } + + /// Remove all elements. + #[inline] + pub fn clear(&mut self) { + unsafe { + self.drop_in_place(); + self.len = 0; + } + } + + /// Remove the last element. + #[inline] + pub fn pop(&mut self) -> Option<(A, B)> { + let new_len = self.len.checked_sub(1)?; + let (a, b) = unsafe { self.read(new_len) }; + self.len = new_len; + Some((a, b)) + } + + /// Get the first element reference. + #[inline] + pub fn first(&self) -> Option<(&A, &B)> { + self.get(0) + } + + /// Get the last element reference. + #[inline] + pub fn last(&self) -> Option<(&A, &B)> { + self.get(self.len.checked_sub(1)?) + } + + /// If capacity exceeds length, shrink capacity to length. + pub fn shrink_to_fit(&mut self) { + if self.len() < self.capacity() { + let mut new_vec = Vec2::with_capacity(self.len()); + for (a, b) in mem::take(self).into_iter() { + new_vec.push(a, b); + } + *self = new_vec; + } else { + debug_assert!(self.len() == self.capacity()); + } + } + + /// Truncate the vector to the given length. + /// + /// If the vector is already shorter than the given length, do nothing. + pub fn truncate(&mut self, len: usize) { + let Some(drop_len) = self.len().checked_sub(len) else { + return; + }; + unsafe { + let drop_a = ptr::slice_from_raw_parts_mut(self.aaa_ptr().as_ptr().add(len), drop_len); + let drop_b = ptr::slice_from_raw_parts_mut(self.bbb_ptr().as_ptr().add(len), drop_len); + self.len = len; + + struct DropInPlace(*mut [X]); + + impl Drop for DropInPlace { + fn drop(&mut self) { + unsafe { + ptr::drop_in_place(self.0); + } + } + } + + // Drop with `Drop` implementation to continue panicking if `drop_a` panics. + let _drop_a = DropInPlace(drop_a); + let _drop_b = DropInPlace(drop_b); + } + } + + /// Retains only the elements specified by the predicate. + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&mut A, &mut B) -> bool, + { + struct Retain<'a, A, B> { + /// Data in `vec` is valid in ranges `[0, written)` and `[next, vec.len)`. + vec: &'a mut Vec2, + /// Processed and retained element count. + written: usize, + /// Next element to check. + next: usize, + } + + impl<'a, A, B> Drop for Retain<'a, A, B> { + fn drop(&mut self) { + debug_assert!(self.written <= self.next); + debug_assert!(self.next <= self.vec.len); + unsafe { + // Copy remaining elements to the beginning. + // Copy occurs only if `f` or `{A,B}::drop` panics. + ptr::copy( + self.vec.aaa_ptr().as_ptr().add(self.next), + self.vec.aaa_ptr().as_ptr().add(self.written), + self.vec.len - self.next, + ); + ptr::copy( + self.vec.bbb_ptr().as_ptr().add(self.next), + self.vec.bbb_ptr().as_ptr().add(self.written), + self.vec.len - self.next, + ); + + // Set correct length. + self.vec.len = self.written + self.vec.len - self.next; + } + } + } + + let mut retain = Retain { + vec: self, + next: 0, + written: 0, + }; + + unsafe { + while retain.next < retain.vec.len { + let (a, b) = retain.vec.get_unchecked_mut(retain.next); + let retain_elem = f(a, b); + let a = ptr::read(a); + let b = ptr::read(b); + retain.next += 1; + if retain_elem { + ptr::write(retain.vec.aaa_ptr().as_ptr().add(retain.written), a); + ptr::write(retain.vec.bbb_ptr().as_ptr().add(retain.written), b); + retain.written += 1; + } else { + drop((a, b)); + } + } + } + } + + /// Iterate over the elements. + #[inline] + pub fn iter(&self) -> Iter<'_, A, B> { + Iter { + aaa: self.aaa().iter(), + bbb: self.bbb_ptr(), + _marker: PhantomData, + } + } + + pub(crate) fn sort_insertion_by(&mut self, mut compare: F) + where + F: FnMut((&A, &B), (&A, &B)) -> Ordering, + { + insertion_sort( + self, + self.len, + |vec2, i, j| unsafe { + compare(vec2.get_unchecked(i), vec2.get_unchecked(j)) == Ordering::Less + }, + |vec2, a, b| { + slice_swap_shift(vec2.aaa_mut(), a, b); + slice_swap_shift(vec2.bbb_mut(), a, b); + }, + ); + } + + /// Sort the elements using given comparator. + pub fn sort_by(&mut self, mut compare: F) + where + F: FnMut((&A, &B), (&A, &B)) -> Ordering, + { + // Constant from rust stdlib. + const MAX_INSERTION: usize = 20; + if self.len() <= MAX_INSERTION { + self.sort_insertion_by(compare); + return; + } + + // TODO: sort without allocation. + // TODO: drain. + let mut entries: Vec<(A, B)> = mem::take(self).into_iter().collect(); + entries.sort_by(|(xa, xb), (ya, yb)| compare((xa, xb), (ya, yb))); + for (a, b) in entries { + self.push(a, b); + } + } +} + +impl Drop for Vec2 { + #[inline] + fn drop(&mut self) { + unsafe { + if self.cap != 0 { + self.drop_in_place(); + self.dealloc(); + } + } + } +} + +impl<'s, A, B> IntoIterator for &'s Vec2 { + type Item = (&'s A, &'s B); + type IntoIter = Iter<'s, A, B>; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for Vec2 { + type Item = (A, B); + type IntoIter = IntoIter; + + #[inline] + fn into_iter(self) -> IntoIter { + let iter = IntoIter { + aaa_begin: self.aaa_ptr(), + bbb_begin: self.bbb_ptr(), + bbb_end: unsafe { NonNull::new_unchecked(self.bbb_ptr().as_ptr().add(self.len)) }, + bbb_ptr: self.bbb_ptr, + cap: self.cap, + }; + mem::forget(self); + iter + } +} + +impl PartialEq for Vec2 { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.len == other.len && self.iter().eq(other.iter()) + } +} + +impl Eq for Vec2 {} + +impl Hash for Vec2 { + fn hash(&self, state: &mut H) { + self.len.hash(state); + for (a, b) in self.iter() { + a.hash(state); + b.hash(state); + } + } +} + +impl Allocative for Vec2 { + fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { + let mut visitor = visitor.enter_self_sized::(); + if self.cap != 0 { + let mut visitor = + visitor.enter_unique(allocative::Key::new("ptr"), mem::size_of::<*const ()>()); + { + let mut visitor = visitor.enter( + allocative::Key::new("data"), + Vec2Layout::::new(self.cap).layout.size(), + ); + for (a, b) in self { + a.visit(&mut visitor); + b.visit(&mut visitor); + } + visitor.exit(); + } + visitor.exit(); + } + visitor.exit(); + } +} + +#[cfg(test)] +mod tests { + use std::alloc::Layout; + use std::marker::PhantomData; + use std::rc::Rc; + + use dupe::Dupe; + + use crate::vec2::Vec2; + use crate::vec2::Vec2Layout; + + #[test] + fn test_layout_for() { + assert_eq!( + Vec2Layout { + offset_of_bbb: 4, + layout: Layout::from_size_align(8, 4).unwrap(), + _marker: PhantomData, + }, + Vec2Layout::<[u8; 3], u32>::new(1) + ); + } + + #[test] + fn test_alloc_dealloc() { + unsafe { + let layout = Vec2Layout::<[u8; 3], u32>::new(100); + let data = layout.alloc(); + layout.dealloc(data); + } + } + + #[test] + fn test_push() { + let mut v = Vec2::new(); + v.push(1, 2); + assert_eq!(1, v.len()); + assert_eq!(Some((&1, &2)), v.get(0)); + } + + #[test] + fn test_push_many() { + let mut v = Vec2::new(); + for i in 0..100 { + v.push(i.to_string(), i * 2); + } + assert_eq!(100, v.len()); + for i in 0..100 { + assert_eq!(Some((&i.to_string(), &(i * 2))), v.get(i)); + } + } + + #[test] + fn test_into_iter() { + let mut v = Vec2::new(); + for i in 0..100 { + v.push(i.to_string(), i * 2); + } + for (i, (a, b)) in v.into_iter().enumerate() { + assert_eq!(i.to_string(), a); + assert_eq!(i * 2, b); + } + } + + #[test] + fn test_sort_insertion_by() { + let mut v = Vec2::new(); + v.push(1, 2); + v.push(3, 4); + v.push(2, 3); + v.push(3, 2); + v.sort_insertion_by(|(xa, xb), (ya, yb)| (xa, xb).cmp(&(ya, yb))); + assert_eq!(Some((&1, &2)), v.get(0)); + assert_eq!(Some((&2, &3)), v.get(1)); + assert_eq!(Some((&3, &2)), v.get(2)); + assert_eq!(Some((&3, &4)), v.get(3)); + } + + #[test] + fn test_shrink_to_fit() { + let mut v = Vec2::with_capacity(10); + v.push("a".to_owned(), "b".to_owned()); + v.push("c".to_owned(), "d".to_owned()); + v.shrink_to_fit(); + for _ in 0..2 { + assert_eq!(2, v.len()); + assert_eq!(2, v.capacity()); + assert_eq!( + vec![("a", "b"), ("c", "d")], + v.iter() + .map(|(a, b)| (a.as_str(), b.as_str())) + .collect::>() + ); + } + } + + #[test] + fn test_truncate() { + let mut v = Vec2::new(); + let rs = (0..6).map(|i| Rc::new(i * 100)).collect::>(); + v.push(rs[0].dupe(), rs[1].dupe()); + v.push(rs[2].dupe(), rs[3].dupe()); + v.push(rs[4].dupe(), rs[5].dupe()); + v.truncate(1); + assert_eq!(Rc::strong_count(&rs[0]), 2); + assert_eq!(Rc::strong_count(&rs[1]), 2); + assert_eq!(Rc::strong_count(&rs[2]), 1); + assert_eq!(Rc::strong_count(&rs[3]), 1); + assert_eq!(Rc::strong_count(&rs[4]), 1); + assert_eq!(Rc::strong_count(&rs[5]), 1); + } + + #[test] + fn test_retain() { + let mut v = Vec2::new(); + v.push(1, 2); + v.push(2, 3); + v.push(3, 4); + v.retain(|a, b| { + if *a == 2 { + assert_eq!(b, &3); + false + } else { + true + } + }); + assert_eq!(2, v.len()); + assert_eq!(Some((&1, &2)), v.get(0)); + assert_eq!(Some((&3, &4)), v.get(1)); + } + + #[test] + fn test_first() { + let mut v: Vec2 = Vec2::new(); + assert_eq!(None, v.first()); + v.push(1, 2); + assert_eq!(Some((&1, &2)), v.first()); + v.push(3, 4); + assert_eq!(Some((&1, &2)), v.first()); + } + + #[test] + fn test_last() { + let mut v: Vec2 = Vec2::new(); + assert_eq!(None, v.last()); + v.push(1, 2); + assert_eq!(Some((&1, &2)), v.last()); + v.push(3, 4); + assert_eq!(Some((&3, &4)), v.last()); + } + + #[repr(align(16))] + struct Align16; + + #[repr(align(8))] + struct Align8; + + #[test] + fn test_alignment() { + let v: Vec2 = Vec2::new(); + assert_eq!( + v.aaa_ptr() + .as_ptr() + .align_offset(std::mem::align_of::()), + 0 + ); + assert_eq!( + v.bbb_ptr() + .as_ptr() + .align_offset(std::mem::align_of::()), + 0 + ); + } +} diff --git a/starlark-rust/starlark_map/src/vec2/mod.rs b/starlark-rust/starlark_map/src/vec2/mod.rs deleted file mode 100644 index 645fe23726413..0000000000000 --- a/starlark-rust/starlark_map/src/vec2/mod.rs +++ /dev/null @@ -1,652 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! A `Vec<(A, B)>` like object which stores `A` and `B` separately. - -use std::alloc; -use std::alloc::Layout; -use std::alloc::LayoutError; -use std::cmp; -use std::cmp::Ordering; -use std::fmt::Debug; -use std::hash::Hash; -use std::hash::Hasher; -use std::marker::PhantomData; -use std::mem; -use std::mem::MaybeUninit; -use std::ptr; -use std::ptr::NonNull; -use std::slice; - -use allocative::Allocative; -use allocative::Visitor; - -use crate::sorting::insertion::insertion_sort; -use crate::sorting::insertion::slice_swap_shift; -pub use crate::vec2::iter::IntoIter; -pub use crate::vec2::iter::Iter; - -mod iter; - -#[derive(Eq, PartialEq, Debug)] -struct Vec2Layout { - layout: Layout, - offset_of_bbb: usize, - _marker: PhantomData<*mut (A, B)>, -} - -impl Vec2Layout { - fn new(cap: usize) -> Vec2Layout { - Self::new_checked(cap).unwrap_or_else(|err| { - panic!( - "Vec2Layout failed with {:?} when allocating capacity of {}", - err, cap - ) - }) - } - - fn new_checked(cap: usize) -> Result, LayoutError> { - debug_assert!(cap != 0); - let a = Layout::array::(cap)?; - let b = Layout::array::(cap)?; - let (layout, offset_of_bbb) = a.extend(b)?; - - debug_assert!(offset_of_bbb <= layout.size()); - debug_assert!(layout.align() >= a.align()); - debug_assert!(layout.align() >= b.align()); - debug_assert!(offset_of_bbb % a.align() == 0); - - Ok(Vec2Layout { - layout, - offset_of_bbb, - _marker: PhantomData, - }) - } - - unsafe fn alloc(&self) -> NonNull { - let ptr: *mut u8 = alloc::alloc(self.layout); - let bbb_ptr: *mut B = ptr.add(self.offset_of_bbb).cast(); - NonNull::new_unchecked(bbb_ptr) - } - - unsafe fn dealloc(&self, bbb_ptr: NonNull) { - let ptr: *mut u8 = bbb_ptr.as_ptr().cast::().sub(self.offset_of_bbb); - alloc::dealloc(ptr, self.layout) - } -} - -/// Array of pairs `(A, B)`, where `A` and `B` are stored separately. -/// This reduces memory consumption when `A` and `B` have different alignments. -pub struct Vec2 { - // Layout is `[padding, A, A, ..., A, B, B, ..., B]` - bbb_ptr: NonNull, - len: usize, - cap: usize, - _marker: PhantomData<(A, B)>, -} - -unsafe impl Send for Vec2 {} -unsafe impl Sync for Vec2 {} - -impl Default for Vec2 { - #[inline] - fn default() -> Vec2 { - Vec2::new() - } -} - -impl Debug for Vec2 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl Clone for Vec2 { - fn clone(&self) -> Vec2 { - let mut r = Vec2::with_capacity(self.len()); - for (a, b) in self.iter() { - r.push(a.clone(), b.clone()); - } - r - } -} - -impl Vec2 { - /// Empty vec. - #[inline] - pub const fn new() -> Vec2 { - Vec2 { - // Provide a dangling pointer aligned to both A and B, so that aaa_ptr() - // returns a properly aligned pointer - bbb_ptr: NonNull::<(A, B)>::dangling().cast(), - len: 0, - cap: 0, - _marker: PhantomData, - } - } - - /// New instance with given capacity. - #[inline] - pub fn with_capacity(cap: usize) -> Vec2 { - if cap == 0 { - Vec2::new() - } else { - let bbb_ptr = unsafe { Vec2Layout::::new(cap).alloc() }; - Vec2 { - bbb_ptr, - len: 0, - cap, - _marker: PhantomData, - } - } - } - - /// Number of elements. - #[inline] - pub fn len(&self) -> usize { - self.len - } - - /// Capacity. - #[inline] - pub fn capacity(&self) -> usize { - self.cap - } - - /// Is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - #[inline] - fn aaa_ptr(&self) -> NonNull { - unsafe { NonNull::new_unchecked(self.bbb_ptr.cast::().as_ptr().sub(self.cap)) } - } - - #[inline] - fn bbb_ptr(&self) -> NonNull { - self.bbb_ptr - } - - #[inline] - pub(crate) fn aaa(&self) -> &[A] { - unsafe { slice::from_raw_parts(self.aaa_ptr().as_ptr(), self.len) } - } - - #[inline] - pub(crate) fn aaa_mut(&mut self) -> &mut [A] { - unsafe { slice::from_raw_parts_mut(self.aaa_ptr().as_ptr(), self.len) } - } - - #[inline] - fn aaa_uninit(&mut self) -> &mut [MaybeUninit] { - unsafe { slice::from_raw_parts_mut(self.aaa_ptr().as_ptr() as *mut _, self.cap) } - } - - #[inline] - pub(crate) fn bbb(&self) -> &[B] { - unsafe { slice::from_raw_parts(self.bbb_ptr().as_ptr(), self.len) } - } - - #[inline] - fn bbb_mut(&mut self) -> &mut [B] { - unsafe { slice::from_raw_parts_mut(self.bbb_ptr().as_ptr(), self.len) } - } - - #[inline] - fn bbb_uninit(&mut self) -> &mut [MaybeUninit] { - unsafe { slice::from_raw_parts_mut(self.bbb_ptr().as_ptr() as *mut _, self.cap) } - } - - // This is what `Vec` does. - const MIN_NON_ZERO_CAP: usize = if mem::size_of::<(A, B)>() == 1 { - 8 - } else if mem::size_of::<(A, B)>() <= 1024 { - 4 - } else { - 1 - }; - - #[cold] - fn reserve_slow(&mut self, additional: usize) { - debug_assert!(self.cap - self.len < additional); - - let required_cap = self.len.checked_add(additional).expect("capacity overflow"); - let new_cap = cmp::max(required_cap, Self::MIN_NON_ZERO_CAP); - let new_cap = cmp::max(new_cap, self.cap * 2); - let new = Self::with_capacity(new_cap); - unsafe { - ptr::copy_nonoverlapping(self.aaa_ptr().as_ptr(), new.aaa_ptr().as_ptr(), self.len); - ptr::copy_nonoverlapping(self.bbb_ptr().as_ptr(), new.bbb_ptr().as_ptr(), self.len); - self.dealloc(); - self.bbb_ptr = new.bbb_ptr; - mem::forget(new); - self.cap = new_cap; - } - } - - /// Reserve capacity for `additional` elements. - #[inline] - pub fn reserve(&mut self, additional: usize) { - if self.cap - self.len < additional { - self.reserve_slow(additional); - } - } - - #[inline] - unsafe fn dealloc_impl(data: NonNull, cap: usize) { - if cap != 0 { - Vec2Layout::::new(cap).dealloc(data); - } - } - - /// Deallocate, but do not call destructors. - #[inline] - unsafe fn dealloc(&mut self) { - Self::dealloc_impl(self.bbb_ptr, self.cap); - } - - unsafe fn drop_in_place(&mut self) { - ptr::drop_in_place::<[A]>(self.aaa_mut()); - ptr::drop_in_place::<[B]>(self.bbb_mut()); - } - - /// Push an element. - #[inline] - pub fn push(&mut self, a: A, b: B) { - self.reserve(1); - let len = self.len; - unsafe { - self.aaa_uninit().get_unchecked_mut(len).write(a); - self.bbb_uninit().get_unchecked_mut(len).write(b); - } - self.len += 1; - } - - /// Get an element reference by index. - #[inline] - pub fn get(&self, index: usize) -> Option<(&A, &B)> { - if index < self.len { - unsafe { - let a = self.aaa().get_unchecked(index); - let b = self.bbb().get_unchecked(index); - Some((a, b)) - } - } else { - None - } - } - - /// Get an element reference by index skipping bounds check. - #[inline] - pub unsafe fn get_unchecked(&self, index: usize) -> (&A, &B) { - debug_assert!(index < self.len); - ( - self.aaa().get_unchecked(index), - self.bbb().get_unchecked(index), - ) - } - - /// Get an element mutable reference by index. - #[inline] - pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> (&mut A, &mut B) { - debug_assert!(index < self.len); - let k_ptr = self.aaa_ptr().as_ptr(); - let v_ptr = self.bbb_ptr().as_ptr(); - (&mut *k_ptr.add(index), &mut *v_ptr.add(index)) - } - - #[inline] - unsafe fn read(&self, index: usize) -> (A, B) { - debug_assert!(index < self.len); - let (a, b) = self.get_unchecked(index); - (ptr::read(a), ptr::read(b)) - } - - /// Remove an element by index. - /// - /// This is an `O(n)` operation. - pub fn remove(&mut self, index: usize) -> (A, B) { - assert!(index < self.len); - unsafe { - let (a, b) = self.read(index); - ptr::copy( - self.aaa_ptr().as_ptr().add(index + 1), - self.aaa_ptr().as_ptr().add(index), - self.len - index - 1, - ); - ptr::copy( - self.bbb_ptr().as_ptr().add(index + 1), - self.bbb_ptr().as_ptr().add(index), - self.len - index - 1, - ); - self.len -= 1; - (a, b) - } - } - - /// Remove all elements. - #[inline] - pub fn clear(&mut self) { - unsafe { - self.drop_in_place(); - self.len = 0; - } - } - - /// Remove the last element. - #[inline] - pub fn pop(&mut self) -> Option<(A, B)> { - let new_len = self.len.checked_sub(1)?; - let (a, b) = unsafe { self.read(new_len) }; - self.len = new_len; - Some((a, b)) - } - - /// Get the first element reference. - #[inline] - pub fn first(&self) -> Option<(&A, &B)> { - self.get(0) - } - - /// Get the last element reference. - #[inline] - pub fn last(&self) -> Option<(&A, &B)> { - self.get(self.len.checked_sub(1)?) - } - - /// If capacity exceeds length, shrink capacity to length. - pub fn shrink_to_fit(&mut self) { - if self.len() < self.capacity() { - let mut new_vec = Vec2::with_capacity(self.len()); - for (a, b) in mem::take(self).into_iter() { - new_vec.push(a, b); - } - *self = new_vec; - } else { - debug_assert!(self.len() == self.capacity()); - } - } - - /// Iterate over the elements. - #[inline] - pub fn iter(&self) -> Iter<'_, A, B> { - Iter { - aaa: self.aaa().iter(), - bbb: self.bbb_ptr(), - _marker: PhantomData, - } - } - - pub(crate) fn sort_insertion_by(&mut self, mut compare: F) - where - F: FnMut((&A, &B), (&A, &B)) -> Ordering, - { - insertion_sort( - self, - self.len, - |vec2, i, j| unsafe { - compare(vec2.get_unchecked(i), vec2.get_unchecked(j)) == Ordering::Less - }, - |vec2, a, b| { - slice_swap_shift(vec2.aaa_mut(), a, b); - slice_swap_shift(vec2.bbb_mut(), a, b); - }, - ); - } - - /// Sort the elements using given comparator. - pub fn sort_by(&mut self, mut compare: F) - where - F: FnMut((&A, &B), (&A, &B)) -> Ordering, - { - // Constant from rust stdlib. - const MAX_INSERTION: usize = 20; - if self.len() <= MAX_INSERTION { - self.sort_insertion_by(compare); - return; - } - - // TODO: sort without allocation. - // TODO: drain. - let mut entries: Vec<(A, B)> = mem::take(self).into_iter().collect(); - entries.sort_by(|(xa, xb), (ya, yb)| compare((xa, xb), (ya, yb))); - for (a, b) in entries { - self.push(a, b); - } - } -} - -impl Drop for Vec2 { - #[inline] - fn drop(&mut self) { - unsafe { - if self.cap != 0 { - self.drop_in_place(); - self.dealloc(); - } - } - } -} - -impl<'s, A, B> IntoIterator for &'s Vec2 { - type Item = (&'s A, &'s B); - type IntoIter = Iter<'s, A, B>; - - #[inline] - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for Vec2 { - type Item = (A, B); - type IntoIter = IntoIter; - - #[inline] - fn into_iter(self) -> IntoIter { - let iter = IntoIter { - aaa_begin: self.aaa_ptr(), - bbb_begin: self.bbb_ptr(), - bbb_end: unsafe { NonNull::new_unchecked(self.bbb_ptr().as_ptr().add(self.len)) }, - bbb_ptr: self.bbb_ptr, - cap: self.cap, - }; - mem::forget(self); - iter - } -} - -impl PartialEq for Vec2 { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.len == other.len && self.iter().eq(other.iter()) - } -} - -impl Eq for Vec2 {} - -impl Hash for Vec2 { - fn hash(&self, state: &mut H) { - self.len.hash(state); - for (a, b) in self.iter() { - a.hash(state); - b.hash(state); - } - } -} - -impl Allocative for Vec2 { - fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) { - let mut visitor = visitor.enter_self_sized::(); - if self.cap != 0 { - let mut visitor = - visitor.enter_unique(allocative::Key::new("ptr"), mem::size_of::<*const ()>()); - { - let mut visitor = visitor.enter( - allocative::Key::new("data"), - Vec2Layout::::new(self.cap).layout.size(), - ); - for (a, b) in self { - a.visit(&mut visitor); - b.visit(&mut visitor); - } - visitor.exit(); - } - visitor.exit(); - } - visitor.exit(); - } -} - -#[cfg(test)] -mod tests { - use std::alloc::Layout; - use std::marker::PhantomData; - - use crate::vec2::Vec2; - use crate::vec2::Vec2Layout; - - #[test] - fn test_layout_for() { - assert_eq!( - Vec2Layout { - offset_of_bbb: 4, - layout: Layout::from_size_align(8, 4).unwrap(), - _marker: PhantomData, - }, - Vec2Layout::<[u8; 3], u32>::new(1) - ); - } - - #[test] - fn test_alloc_dealloc() { - unsafe { - let layout = Vec2Layout::<[u8; 3], u32>::new(100); - let data = layout.alloc(); - layout.dealloc(data); - } - } - - #[test] - fn test_push() { - let mut v = Vec2::new(); - v.push(1, 2); - assert_eq!(1, v.len()); - assert_eq!(Some((&1, &2)), v.get(0)); - } - - #[test] - fn test_push_many() { - let mut v = Vec2::new(); - for i in 0..100 { - v.push(i.to_string(), i * 2); - } - assert_eq!(100, v.len()); - for i in 0..100 { - assert_eq!(Some((&i.to_string(), &(i * 2))), v.get(i)); - } - } - - #[test] - fn test_into_iter() { - let mut v = Vec2::new(); - for i in 0..100 { - v.push(i.to_string(), i * 2); - } - for (i, (a, b)) in v.into_iter().enumerate() { - assert_eq!(i.to_string(), a); - assert_eq!(i * 2, b); - } - } - - #[test] - fn test_sort_insertion_by() { - let mut v = Vec2::new(); - v.push(1, 2); - v.push(3, 4); - v.push(2, 3); - v.push(3, 2); - v.sort_insertion_by(|(xa, xb), (ya, yb)| (xa, xb).cmp(&(ya, yb))); - assert_eq!(Some((&1, &2)), v.get(0)); - assert_eq!(Some((&2, &3)), v.get(1)); - assert_eq!(Some((&3, &2)), v.get(2)); - assert_eq!(Some((&3, &4)), v.get(3)); - } - - #[test] - fn test_shrink_to_fit() { - let mut v = Vec2::with_capacity(10); - v.push("a".to_owned(), "b".to_owned()); - v.push("c".to_owned(), "d".to_owned()); - v.shrink_to_fit(); - for _ in 0..2 { - assert_eq!(2, v.len()); - assert_eq!(2, v.capacity()); - assert_eq!( - vec![("a", "b"), ("c", "d")], - v.iter() - .map(|(a, b)| (a.as_str(), b.as_str())) - .collect::>() - ); - } - } - - #[test] - fn test_first() { - let mut v: Vec2 = Vec2::new(); - assert_eq!(None, v.first()); - v.push(1, 2); - assert_eq!(Some((&1, &2)), v.first()); - v.push(3, 4); - assert_eq!(Some((&1, &2)), v.first()); - } - - #[test] - fn test_last() { - let mut v: Vec2 = Vec2::new(); - assert_eq!(None, v.last()); - v.push(1, 2); - assert_eq!(Some((&1, &2)), v.last()); - v.push(3, 4); - assert_eq!(Some((&3, &4)), v.last()); - } - - #[repr(align(16))] - struct Align16; - - #[repr(align(8))] - struct Align8; - - #[test] - fn test_alignment() { - let v: Vec2 = Vec2::new(); - assert_eq!( - v.aaa_ptr() - .as_ptr() - .align_offset(std::mem::align_of::()), - 0 - ); - assert_eq!( - v.bbb_ptr() - .as_ptr() - .align_offset(std::mem::align_of::()), - 0 - ); - } -} diff --git a/starlark-rust/starlark_map/src/vec_map.rs b/starlark-rust/starlark_map/src/vec_map.rs new file mode 100644 index 0000000000000..561541aebf38e --- /dev/null +++ b/starlark-rust/starlark_map/src/vec_map.rs @@ -0,0 +1,284 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod hint; +mod iter; +mod simd; + +use std::hash::Hash; +use std::hash::Hasher; +use std::mem; + +use allocative::Allocative; +use equivalent::Equivalent; + +use crate::hash_value::StarlarkHashValue; +use crate::hashed::Hashed; +pub(crate) use crate::vec2::Vec2; +use crate::vec_map::hint::likely; +pub(crate) use crate::vec_map::iter::IntoIter; +pub(crate) use crate::vec_map::iter::IntoIterHashed; +pub(crate) use crate::vec_map::iter::Iter; +pub(crate) use crate::vec_map::iter::IterHashed; +pub(crate) use crate::vec_map::iter::IterMut; +pub(crate) use crate::vec_map::iter::IterMutUnchecked; +pub(crate) use crate::vec_map::iter::Keys; +pub(crate) use crate::vec_map::iter::Values; +pub(crate) use crate::vec_map::iter::ValuesMut; +use crate::vec_map::simd::find_hash_in_array; + +#[derive(Debug, Clone, Allocative)] +pub(crate) struct VecMap { + buckets: Vec2<(K, V), StarlarkHashValue>, +} + +impl Default for VecMap { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl VecMap { + #[inline] + pub(crate) const fn new() -> Self { + VecMap { + buckets: Vec2::new(), + } + } + + #[inline] + pub(crate) fn with_capacity(n: usize) -> Self { + VecMap { + buckets: Vec2::with_capacity(n), + } + } + + pub(crate) fn reserve(&mut self, additional: usize) { + self.buckets.reserve(additional); + } + + #[inline] + pub(crate) fn capacity(&self) -> usize { + self.buckets.capacity() + } + + #[inline] + pub(crate) fn get_index_of_hashed_raw( + &self, + hash: StarlarkHashValue, + mut eq: impl FnMut(&K) -> bool, + ) -> Option { + const _: () = assert!(mem::size_of::() == mem::size_of::()); + let hashes_ints = + unsafe { &*(self.buckets.bbb() as *const [StarlarkHashValue] as *const [u32]) }; + let mut i = 0; + while i < hashes_ints.len() { + i += find_hash_in_array(&hashes_ints[i..], hash.get())?; + let k = unsafe { &self.buckets.aaa().get_unchecked(i).0 }; + if likely(eq(k)) { + return Some(i); + } + i += 1; + } + debug_assert!(i == hashes_ints.len()); + None + } + + #[inline] + pub(crate) fn get_index_of_hashed(&self, key: Hashed<&Q>) -> Option + where + Q: ?Sized + Equivalent, + { + self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) + } + + #[inline] + pub(crate) fn get_index(&self, index: usize) -> Option<(&K, &V)> { + let ((k, v), _hash) = self.buckets.get(index)?; + Some((k, v)) + } + + #[inline] + pub(crate) unsafe fn get_unchecked(&self, index: usize) -> (Hashed<&K>, &V) { + debug_assert!(index < self.buckets.len()); + let ((key, value), hash) = self.buckets.get_unchecked(index); + (Hashed::new_unchecked(*hash, key), value) + } + + #[inline] + pub(crate) unsafe fn get_unchecked_mut(&mut self, index: usize) -> (Hashed<&K>, &mut V) { + debug_assert!(index < self.buckets.len()); + let ((key, value), hash) = self.buckets.get_unchecked_mut(index); + (Hashed::new_unchecked(*hash, key), value) + } + + #[inline] + pub(crate) fn insert_hashed_unique_unchecked(&mut self, key: Hashed, value: V) { + let hash = key.hash(); + self.buckets.push((key.into_key(), value), hash); + } + + pub(crate) fn remove_hashed_entry(&mut self, key: Hashed<&Q>) -> Option<(K, V)> + where + Q: ?Sized + Equivalent, + { + if let Some(index) = self.get_index_of_hashed(key) { + let (k, v) = self.remove(index); + Some((k.into_key(), v)) + } else { + None + } + } + + #[inline] + pub(crate) fn remove(&mut self, index: usize) -> (Hashed, V) { + let ((key, value), hash) = self.buckets.remove(index); + (Hashed::new_unchecked(hash, key), value) + } + + #[inline] + pub(crate) fn pop(&mut self) -> Option<(Hashed, V)> { + let ((key, value), hash) = self.buckets.pop()?; + Some((Hashed::new_unchecked(hash, key), value)) + } + + #[inline] + pub(crate) fn len(&self) -> usize { + self.buckets.len() + } + + #[inline] + pub(crate) fn is_empty(&self) -> bool { + self.buckets.is_empty() + } + + pub(crate) fn clear(&mut self) { + self.buckets.clear(); + } + + #[inline] + pub(crate) fn values(&self) -> Values { + Values { iter: self.iter() } + } + + #[inline] + pub(crate) fn values_mut(&mut self) -> ValuesMut { + ValuesMut { + iter: self.iter_mut(), + } + } + + #[inline] + pub(crate) fn keys(&self) -> Keys { + Keys { iter: self.iter() } + } + + #[inline] + pub(crate) fn into_iter(self) -> IntoIter { + IntoIter { + iter: self.into_iter_hashed(), + } + } + + #[inline] + pub(crate) fn iter(&self) -> Iter { + Iter { + iter: self.buckets.aaa().iter(), + } + } + + #[inline] + pub(crate) fn iter_hashed(&self) -> IterHashed { + IterHashed { + // Values go first since they terminate first and we can short-circuit + iter: self.buckets.iter(), + } + } + + #[inline] + pub(crate) fn into_iter_hashed(self) -> IntoIterHashed { + // See the comments on VMIntoIterHash for why this one looks different + IntoIterHashed { + iter: self.buckets.into_iter(), + } + } + + #[inline] + pub(crate) fn iter_mut(&mut self) -> IterMut { + IterMut { + iter: self.buckets.aaa_mut().iter_mut(), + } + } + + #[inline] + pub(crate) fn iter_mut_unchecked(&mut self) -> IterMutUnchecked { + IterMutUnchecked { + iter: self.buckets.aaa_mut().iter_mut(), + } + } + + pub(crate) fn sort_keys(&mut self) + where + K: Ord, + { + self.buckets.sort_by(|(a, _ah), (b, _bh)| a.0.cmp(&b.0)); + } + + pub(crate) fn is_sorted_by_key(&self) -> bool + where + K: Ord, + { + self.buckets.aaa().windows(2).all(|w| w[0].0 <= w[1].0) + } + + /// Equal if entries are equal in the iterator order. + pub(crate) fn eq_ordered(&self, other: &Self) -> bool + where + K: PartialEq, + V: PartialEq, + { + // We compare hashes before comparing keys and values because it is faster + // (fewer branches, and no comparison of the rest it at lest one hash is different). + self.buckets.bbb() == other.buckets.bbb() && self.buckets.aaa() == other.buckets.aaa() + } + + /// Hash entries in the iterator order. + /// + /// Note, keys are not hashed, but previously computed hashes are hashed instead. + pub(crate) fn hash_ordered(&self, state: &mut H) + where + K: Hash, + V: Hash, + { + for e in self.iter_hashed() { + e.hash(state); + } + } + + pub(crate) fn reverse(&mut self) { + self.buckets.aaa_mut().reverse(); + self.buckets.bbb_mut().reverse(); + } + + pub(crate) fn retain(&mut self, mut f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + self.buckets.retain(|(k, v), _| f(k, v)); + } +} diff --git a/starlark-rust/starlark_map/src/vec_map/mod.rs b/starlark-rust/starlark_map/src/vec_map/mod.rs deleted file mode 100644 index b7bbb145c723c..0000000000000 --- a/starlark-rust/starlark_map/src/vec_map/mod.rs +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod hint; -mod iter; -mod simd; - -use std::hash::Hash; -use std::hash::Hasher; -use std::mem; - -use allocative::Allocative; -use equivalent::Equivalent; - -use crate::hash_value::StarlarkHashValue; -use crate::hashed::Hashed; -pub(crate) use crate::vec2::Vec2; -use crate::vec_map::hint::likely; -pub(crate) use crate::vec_map::iter::IntoIter; -pub(crate) use crate::vec_map::iter::IntoIterHashed; -pub(crate) use crate::vec_map::iter::Iter; -pub(crate) use crate::vec_map::iter::IterHashed; -pub(crate) use crate::vec_map::iter::IterMut; -pub(crate) use crate::vec_map::iter::IterMutUnchecked; -pub(crate) use crate::vec_map::iter::Keys; -pub(crate) use crate::vec_map::iter::Values; -pub(crate) use crate::vec_map::iter::ValuesMut; -use crate::vec_map::simd::find_hash_in_array; - -/// Bucket in [`VecMap`]. -#[derive(Debug, Clone, Eq, PartialEq, Allocative)] -pub(crate) struct Bucket { - hash: StarlarkHashValue, - key: K, - value: V, -} - -#[allow(clippy::derived_hash_with_manual_eq)] -impl Hash for Bucket { - fn hash(&self, state: &mut H) { - self.hash.hash(state); - // Ignore the key, because `hash` is already the hash of the key, - // although maybe not as good hash as what is requested. - self.value.hash(state); - } -} - -#[derive(Debug, Clone, Allocative)] -pub(crate) struct VecMap { - buckets: Vec2<(K, V), StarlarkHashValue>, -} - -impl Default for VecMap { - #[inline] - fn default() -> Self { - Self::new() - } -} - -impl VecMap { - #[inline] - pub(crate) const fn new() -> Self { - VecMap { - buckets: Vec2::new(), - } - } - - #[inline] - pub(crate) fn with_capacity(n: usize) -> Self { - VecMap { - buckets: Vec2::with_capacity(n), - } - } - - pub(crate) fn reserve(&mut self, additional: usize) { - self.buckets.reserve(additional); - } - - #[inline] - pub(crate) fn capacity(&self) -> usize { - self.buckets.capacity() - } - - #[inline] - pub(crate) fn get_index_of_hashed_raw( - &self, - hash: StarlarkHashValue, - mut eq: impl FnMut(&K) -> bool, - ) -> Option { - const _: () = assert!(mem::size_of::() == mem::size_of::()); - let hashes_ints = - unsafe { &*(self.buckets.bbb() as *const [StarlarkHashValue] as *const [u32]) }; - let mut i = 0; - while i < hashes_ints.len() { - i += find_hash_in_array(&hashes_ints[i..], hash.get())?; - let k = unsafe { &self.buckets.aaa().get_unchecked(i).0 }; - if likely(eq(k)) { - return Some(i); - } - i += 1; - } - debug_assert!(i == hashes_ints.len()); - None - } - - #[inline] - pub(crate) fn get_index_of_hashed(&self, key: Hashed<&Q>) -> Option - where - Q: ?Sized + Equivalent, - { - self.get_index_of_hashed_raw(key.hash(), |k| key.key().equivalent(k)) - } - - #[inline] - pub(crate) fn get_index(&self, index: usize) -> Option<(&K, &V)> { - let ((k, v), _hash) = self.buckets.get(index)?; - Some((k, v)) - } - - #[inline] - pub(crate) unsafe fn get_unchecked(&self, index: usize) -> (Hashed<&K>, &V) { - debug_assert!(index < self.buckets.len()); - let ((key, value), hash) = self.buckets.get_unchecked(index); - (Hashed::new_unchecked(*hash, key), value) - } - - #[inline] - pub(crate) unsafe fn get_unchecked_mut(&mut self, index: usize) -> (Hashed<&K>, &mut V) { - debug_assert!(index < self.buckets.len()); - let ((key, value), hash) = self.buckets.get_unchecked_mut(index); - (Hashed::new_unchecked(*hash, key), value) - } - - #[inline] - pub(crate) fn insert_hashed_unique_unchecked(&mut self, key: Hashed, value: V) { - let hash = key.hash(); - self.buckets.push((key.into_key(), value), hash); - } - - pub(crate) fn remove_hashed_entry(&mut self, key: Hashed<&Q>) -> Option<(K, V)> - where - Q: ?Sized + Equivalent, - { - if let Some(index) = self.get_index_of_hashed(key) { - let (k, v) = self.remove(index); - Some((k.into_key(), v)) - } else { - None - } - } - - #[inline] - pub(crate) fn remove(&mut self, index: usize) -> (Hashed, V) { - let ((key, value), hash) = self.buckets.remove(index); - (Hashed::new_unchecked(hash, key), value) - } - - #[inline] - pub(crate) fn pop(&mut self) -> Option<(Hashed, V)> { - let ((key, value), hash) = self.buckets.pop()?; - Some((Hashed::new_unchecked(hash, key), value)) - } - - #[inline] - pub(crate) fn len(&self) -> usize { - self.buckets.len() - } - - #[inline] - pub(crate) fn is_empty(&self) -> bool { - self.buckets.is_empty() - } - - pub(crate) fn clear(&mut self) { - self.buckets.clear(); - } - - #[inline] - pub(crate) fn values(&self) -> Values { - Values { iter: self.iter() } - } - - #[inline] - pub(crate) fn values_mut(&mut self) -> ValuesMut { - ValuesMut { - iter: self.iter_mut(), - } - } - - #[inline] - pub(crate) fn keys(&self) -> Keys { - Keys { iter: self.iter() } - } - - #[inline] - pub(crate) fn into_iter(self) -> IntoIter { - IntoIter { - iter: self.into_iter_hashed(), - } - } - - #[inline] - pub(crate) fn iter(&self) -> Iter { - Iter { - iter: self.buckets.aaa().iter(), - } - } - - #[inline] - pub(crate) fn iter_hashed(&self) -> IterHashed { - IterHashed { - // Values go first since they terminate first and we can short-circuit - iter: self.buckets.iter(), - } - } - - #[inline] - pub(crate) fn into_iter_hashed(self) -> IntoIterHashed { - // See the comments on VMIntoIterHash for why this one looks different - IntoIterHashed { - iter: self.buckets.into_iter(), - } - } - - #[inline] - pub(crate) fn iter_mut(&mut self) -> IterMut { - IterMut { - iter: self.buckets.aaa_mut().iter_mut(), - } - } - - #[inline] - pub(crate) fn iter_mut_unchecked(&mut self) -> IterMutUnchecked { - IterMutUnchecked { - iter: self.buckets.aaa_mut().iter_mut(), - } - } - - pub(crate) fn sort_keys(&mut self) - where - K: Ord, - { - self.buckets.sort_by(|(a, _ah), (b, _bh)| a.0.cmp(&b.0)); - } - - pub(crate) fn is_sorted_by_key(&self) -> bool - where - K: Ord, - { - self.buckets.aaa().windows(2).all(|w| w[0].0 <= w[1].0) - } - - /// Equal if entries are equal in the iterator order. - pub(crate) fn eq_ordered(&self, other: &Self) -> bool - where - K: PartialEq, - V: PartialEq, - { - // We compare hashes before comparing keys and values because it is faster - // (fewer branches, and no comparison of the rest it at lest one hash is different). - self.buckets.bbb() == other.buckets.bbb() && self.buckets.aaa() == other.buckets.aaa() - } - - /// Hash entries in the iterator order. - /// - /// Note, keys are not hashed, but previously computed hashes are hashed instead. - pub(crate) fn hash_ordered(&self, state: &mut H) - where - K: Hash, - V: Hash, - { - for e in self.iter_hashed() { - e.hash(state); - } - } -} diff --git a/starlark-rust/starlark_map/src/vec_map/simd.rs b/starlark-rust/starlark_map/src/vec_map/simd.rs index 951b5a6bfa070..307b4fada7f92 100644 --- a/starlark-rust/starlark_map/src/vec_map/simd.rs +++ b/starlark-rust/starlark_map/src/vec_map/simd.rs @@ -31,35 +31,43 @@ pub(crate) fn find_hash_in_array_without_simd(array: &[u32], hash: u32) -> Optio pub(crate) fn find_hash_in_array(array: &[u32], hash: u32) -> Option { #[cfg(rust_nightly)] unsafe { + // TODO(yurysamkevich): remove conditional compilation after updating rust toolchain + #[cfg(version("1.76"))] + use std::simd::cmp::SimdPartialEq; use std::simd::*; // 128-bit SIMD is available on x86_64 and aarch64. // Also shorter SIMD works better for shorter arrays. type T = Simd; - if array.len() < T::LANES { + #[cfg(version("1.76"))] + const LANES: usize = T::LEN; + #[cfg(not(version("1.76")))] + const LANES: usize = T::LANES; + + if array.len() < LANES { find_hash_in_array_without_simd(array, hash) } else { let mut i = 0; let hash = T::splat(hash); // Process 4 elements at a time except last <= 4 elements. - while i + T::LANES < array.len() { - let next_hashes = T::from_slice(array.get_unchecked(i..i + T::LANES)); + while i + LANES < array.len() { + let next_hashes = T::from_slice(array.get_unchecked(i..i + LANES)); let eq = next_hashes.simd_eq(hash); if eq.any() { return Some(i + eq.to_bitmask().trailing_zeros() as usize); } - i += T::LANES; + i += LANES; } // Process last <= 4 elements. - debug_assert!(i >= array.len() - T::LANES); + debug_assert!(i >= array.len() - LANES); debug_assert!(i < array.len()); - let next_hashes = T::from_slice(array.get_unchecked(array.len() - T::LANES..)); + let next_hashes = T::from_slice(array.get_unchecked(array.len() - LANES..)); let eq = next_hashes.simd_eq(hash); if eq.any() { - Some(array.len() - T::LANES + eq.to_bitmask().trailing_zeros() as usize) + Some(array.len() - LANES + eq.to_bitmask().trailing_zeros() as usize) } else { None } diff --git a/starlark-rust/starlark_syntax/BUCK b/starlark-rust/starlark_syntax/BUCK index b22706e4083ae..5fc98ce3b3c2a 100644 --- a/starlark-rust/starlark_syntax/BUCK +++ b/starlark-rust/starlark_syntax/BUCK @@ -1,6 +1,5 @@ load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup", "buck_genrule") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") @@ -47,6 +46,9 @@ rust_library( # please implement it inside the `starlark` crate. # For codemods, use libCST. "//buck2/starlark-rust/...", + # For faster incremental compilation of buck2, + # we pull `starlark::Error` from this crate rather than from `starlark`. + "//buck2/app/buck2_error/...", ], deps = [ "fbsource//third-party/rust:annotate-snippets", diff --git a/starlark-rust/starlark_syntax/Cargo.toml b/starlark-rust/starlark_syntax/Cargo.toml index 0734b3b257388..778a3ced7dca4 100644 --- a/starlark-rust/starlark_syntax/Cargo.toml +++ b/starlark-rust/starlark_syntax/Cargo.toml @@ -1,17 +1,17 @@ [package] -name = "starlark_syntax" -edition = "2021" -version = "0.9.0" -license = "Apache-2.0" -description = "Starlark language AST" -documentation = "https://docs.rs/starlark" -repository = "https://github.com/facebookexperimental/starlark-rust" authors = [ "Damien Martin-Guillerez ", "Facebook", ] -keywords = ["starlark", "skylark", "bazel", "language", "interpreter"] categories = ["parser-implementations", "development-tools"] +description = "Starlark language AST" +documentation = "https://docs.rs/starlark" +edition = "2021" +keywords = ["starlark", "skylark", "bazel", "language", "interpreter"] +license = "Apache-2.0" +name = "starlark_syntax" +repository = "https://github.com/facebook/starlark-rust" +version = "0.12.0" [build-dependencies] lalrpop = "0.19.7" @@ -21,18 +21,18 @@ annotate-snippets = { version = "0.9.0", features = [] } anyhow = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } -logos = "0.12" -once_cell = "1.8" lalrpop-util = "0.19.7" +logos = "0.12" lsp-types = "0.94.1" memchr = { workspace = true } num-bigint = "0.4.3" num-traits = "0.2" +once_cell = "1.8" thiserror = "1.0.36" allocative = { workspace = true } dupe = { workspace = true } -starlark_map = { version = "0.9.0", path = "../starlark_map" } +starlark_map = { version = "0.12.0", path = "../starlark_map" } [dev-dependencies] serde_json = "1.0" diff --git a/starlark-rust/starlark_syntax/src/call_stack.rs b/starlark-rust/starlark_syntax/src/call_stack.rs index 6cb8bb1763065..d0ed4bc019bde 100644 --- a/starlark-rust/starlark_syntax/src/call_stack.rs +++ b/starlark-rust/starlark_syntax/src/call_stack.rs @@ -30,6 +30,8 @@ use std::fmt::Display; use crate::frame::Frame; +pub const CALL_STACK_TRACEBACK_PREFIX: &str = "Traceback (most recent call last):"; + /// Owned call stack. #[derive(Debug, Default, Clone, PartialEq, Eq, Hash)] pub struct CallStack { @@ -53,7 +55,7 @@ impl Display for CallStack { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if !self.frames.is_empty() { // Match Python output. - writeln!(f, "Traceback (most recent call last):")?; + writeln!(f, "{}", CALL_STACK_TRACEBACK_PREFIX)?; // TODO(nga): use real module name. let mut prev = ""; for x in &self.frames { diff --git a/starlark-rust/starlark_syntax/src/codemap.rs b/starlark-rust/starlark_syntax/src/codemap.rs index 1a54e7af98669..c1437b789d9ed 100644 --- a/starlark-rust/starlark_syntax/src/codemap.rs +++ b/starlark-rust/starlark_syntax/src/codemap.rs @@ -22,7 +22,11 @@ //! source code will not exceed 4GiB. The `CodeMap` can look up the source file, line, and column //! of a `Pos` or `Span`, as well as provide source code snippets for error reporting. use std::cmp; +use std::cmp::Ordering; +use std::collections::hash_map::Entry; +use std::collections::HashMap; use std::fmt; +use std::fmt::Debug; use std::fmt::Display; use std::hash::Hash; use std::hash::Hasher; @@ -30,6 +34,7 @@ use std::ops::Add; use std::ops::AddAssign; use std::ops::Deref; use std::ops::DerefMut; +use std::ops::Sub; use std::ptr; use std::sync::Arc; @@ -37,6 +42,8 @@ use allocative::Allocative; use dupe::Dupe; use once_cell::sync::Lazy; +use crate::fast_string; + /// A small, `Copy`, value representing a position in a `CodeMap`'s file. #[derive( Copy, Clone, Dupe, Hash, Eq, PartialEq, PartialOrd, Ord, Debug, Default, Allocative @@ -62,6 +69,13 @@ impl Add for Pos { } } +impl Sub for Pos { + type Output = Pos; + fn sub(self, other: u32) -> Pos { + Pos(self.0 - other) + } +} + impl AddAssign for Pos { fn add_assign(&mut self, other: u32) { self.0 += other; @@ -69,7 +83,9 @@ impl AddAssign for Pos { } /// A range of text within a CodeMap. -#[derive(Copy, Dupe, Clone, Hash, Eq, PartialEq, Debug, Default, Allocative)] +#[derive( + Copy, Dupe, Clone, Hash, Eq, PartialEq, Ord, PartialOrd, Debug, Default, Allocative +)] pub struct Span { /// The position in the codemap representing the first byte of the span. begin: Pos, @@ -125,10 +141,16 @@ impl Span { pub fn contains(self, pos: Pos) -> bool { self.begin <= pos && pos <= self.end } + + /// Determines whether a `span` intersects with this span. + /// End of range is inclusive. + pub fn intersects(self, span: Span) -> bool { + self.contains(span.begin) || self.contains(span.end) || span.contains(self.begin) + } } /// Associate a Span with a value of arbitrary type (e.g. an AST node). -#[derive(Clone, PartialEq, Eq, Hash, Debug, Copy)] +#[derive(Clone, Copy, Dupe, PartialEq, Eq, Hash, Debug)] pub struct Spanned { /// Data in the node. pub node: T, @@ -169,8 +191,11 @@ impl DerefMut for Spanned { // A cheap unowned unique identifier per file/CodeMap, // somewhat delving into internal details. // Remains unique because we take a reference to the CodeMap. -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, Dupe)] -pub struct CodeMapId(*const ()); +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, Dupe, Allocative)] +pub struct CodeMapId(#[allocative(skip)] *const ()); + +unsafe impl Send for CodeMapId {} +unsafe impl Sync for CodeMapId {} impl CodeMapId { pub const EMPTY: CodeMapId = CodeMapId(ptr::null()); @@ -187,6 +212,36 @@ enum CodeMapImpl { #[derive(Clone, Dupe, Allocative)] pub struct CodeMap(CodeMapImpl); +/// Multiple [`CodeMap`]. +#[derive(Clone, Default, Debug, PartialEq, Allocative)] +pub struct CodeMaps { + codemaps: HashMap, +} + +impl CodeMaps { + /// Lookup by id. + pub fn get(&self, id: CodeMapId) -> Option<&CodeMap> { + self.codemaps.get(&id) + } + + /// Add codemap if not already present. + pub fn add(&mut self, codemap: &CodeMap) { + match self.codemaps.entry(codemap.id()) { + Entry::Occupied(_) => {} + Entry::Vacant(e) => { + e.insert(codemap.dupe()); + } + } + } + + /// Add all codemaps. + pub fn add_all(&mut self, codemaps: &CodeMaps) { + for codemap in codemaps.codemaps.values() { + self.add(codemap); + } + } +} + /// A `CodeMap`'s record of a source file. #[derive(Allocative)] struct CodeMapData { @@ -335,9 +390,7 @@ impl CodeMap { let line = self.find_line(pos); let line_span = self.line_span(line); let byte_col = pos.0 - line_span.begin.0; - let column = self.source_span(line_span)[..byte_col as usize] - .chars() - .count(); + let column = fast_string::len(&self.source_span(line_span)[..byte_col as usize]).0; ResolvedPos { line, column } } @@ -369,6 +422,18 @@ impl CodeMap { .unwrap_or_else(|| panic!("Line {} is out of range for {:?}", line, self)) } + /// Trim trailing newline if any, including windows, from the line span. + pub fn line_span_trim_newline(&self, line: usize) -> Span { + let mut span = self.line_span(line); + if self.source_span(span).ends_with('\n') { + span.end.0 -= 1; + } + if self.source_span(span).ends_with('\r') { + span.end.0 -= 1; + } + span + } + /// Gets the span representing a line by line number. /// /// The line number is 0-indexed (first line is numbered 0). The returned span includes the @@ -411,7 +476,9 @@ impl CodeMap { } /// All are 0-based, but print out with 1-based. -#[derive(Copy, Clone, Dupe, Hash, Eq, PartialEq, Debug, Default)] +#[derive( + Copy, Clone, Dupe, Hash, Eq, PartialEq, Ord, PartialOrd, Debug, Default +)] pub struct ResolvedPos { /// The line number within the file (0-indexed). pub line: usize, @@ -450,6 +517,21 @@ pub struct FileSpan { pub span: Span, } +impl PartialOrd for FileSpan { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for FileSpan { + fn cmp(&self, that: &Self) -> Ordering { + Ord::cmp( + &(self.filename(), self.span, self.file.id().0 as usize), + &(that.filename(), that.span, that.file.id().0 as usize), + ) + } +} + impl<'a> fmt::Display for FileSpanRef<'a> { /// Formats the span as `filename:start_line:start_column: end_line:end_column`, /// or if the span is zero-length, `filename:line:column`, with a 1-indexed line and column. @@ -533,7 +615,9 @@ impl FileSpan { /// The locations of values within a span. /// All are 0-based, but print out with 1-based. -#[derive(Debug, Dupe, Clone, Copy, PartialEq, Eq, Hash, Default)] +#[derive( + Debug, Dupe, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash, Default +)] pub struct ResolvedSpan { /// Beginning of the span. pub begin: ResolvedPos, @@ -607,7 +691,7 @@ impl ResolvedSpan { /// File and line number. #[derive(Debug, PartialEq, Eq, Hash, Clone, derive_more::Display)] -#[display(fmt = "{}:{}", file, "line + 1")] +#[display("{}:{}", file, line + 1)] pub struct ResolvedFileLine { /// File name. pub file: String, @@ -616,7 +700,7 @@ pub struct ResolvedFileLine { } /// File name and line and column pairs for a span. -#[derive(Debug, PartialEq, Eq, Hash, Clone)] +#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone)] pub struct ResolvedFileSpan { /// File name. pub file: String, @@ -807,6 +891,75 @@ mod tests { assert!(!span.contains(ResolvedPos { line: 5, column: 0 })); } + #[test] + fn test_span_intersects() { + let span = Span { + begin: Pos(2), + end: Pos(4), + }; + // s: |---| + // o: |---| + assert!(!span.intersects(Span { + begin: Pos(5), + end: Pos(7), + })); + + // s: |---| + // o: |---| + assert!(span.intersects(Span { + begin: Pos(4), + end: Pos(6), + })); + + // s: |---| + // o: |---| + assert!(span.intersects(Span { + begin: Pos(3), + end: Pos(5), + })); + + // s: |---| + // o: |---| + assert!(span.intersects(Span { + begin: Pos(2), + end: Pos(4), + })); + + // s: |---| + // o: |---| + assert!(span.intersects(Span { + begin: Pos(1), + end: Pos(3), + })); + + // s: |---| + // o: |---| + assert!(span.intersects(Span { + begin: Pos(0), + end: Pos(2), + })); + + // s: |---| + // o: |--| + assert!(!span.intersects(Span { + begin: Pos(0), + end: Pos(1), + })); + + let large_span = Span { + begin: Pos(2), + end: Pos(8), + }; + + // s: |-------| + // o: |---| + assert!(large_span.intersects(span)); + + // s: |---| + // o: |-------| + assert!(span.intersects(large_span)); + } + #[test] fn test_resolved_file_span_to_begin_resolved_file_line() { let span = ResolvedFileSpan { diff --git a/starlark-rust/starlark_syntax/src/diagnostic.rs b/starlark-rust/starlark_syntax/src/diagnostic.rs index 54b0dace778c3..c1192add2a51b 100644 --- a/starlark-rust/starlark_syntax/src/diagnostic.rs +++ b/starlark-rust/starlark_syntax/src/diagnostic.rs @@ -15,10 +15,8 @@ * limitations under the License. */ -use std::error::Error; +use std::error::Error as StdError; use std::fmt; -use std::fmt::Display; -use std::fmt::Formatter; use crate::call_stack::CallStack; use crate::codemap::CodeMap; @@ -26,99 +24,123 @@ use crate::codemap::FileSpan; use crate::codemap::Span; use crate::span_display::span_display; -/// An error plus its origination location and call stack. +/// A value of type `T`, together with some diagnostic information. /// -/// The underlying [`message`](Diagnostic::message) is an [`anyhow::Error`]. -/// The [`Diagnostic`] structure itself usually stored within an [`anyhow::Error`]. -#[derive(Debug)] -pub struct Diagnostic { - /// Underlying error for the [`Diagnostic`]. - /// Should _never_ be of type [`Diagnostic`] itself. - pub message: anyhow::Error, - - /// Location where the error originated. - pub span: Option, +/// Most code in starlark should be using `starlark::Error` as the error type. However, some code +/// may want to have strongly typed errors, while still being able to have diagnostics. +/// `WithDiagnostic` is the tool for that. `WithDiagnostic` is always one word in size, +/// and so can be used as an error type in performance sensitive code. +/// +/// `WithDiagnostic` is `pub`, but only within the starlark crates, it's not a part of the API. +/// +/// Returning a `WithDiagnostic` value guarantees that a diagnostic is actually present, ie the +/// diagnostic is not optional. +pub struct WithDiagnostic(Box>); - /// Call stack where the error originated. - pub call_stack: CallStack, +struct WithDiagnosticInner { + t: T, + diagnostic: Diagnostic, } -impl Error for Diagnostic { - fn source(&self) -> Option<&(dyn Error + 'static)> { - // We do have an underlying source (namely `self.message`), but if we return - // it then `anyhow` will print it with `{:#}`, and we already print it in our - // `Display`, which would cause it to appear twice. - // Therefore, we say we have no source. - None +impl WithDiagnostic { + pub fn new_spanned(t: T, span: Span, codemap: &CodeMap) -> Self { + Self(Box::new(WithDiagnosticInner { + t, + diagnostic: Diagnostic { + span: Some(codemap.file_span(span)), + call_stack: CallStack::default(), + }, + })) } - // TODO(nga): figure out how to do it with unstable rust. - // fn backtrace(&self) -> Option<&std::backtrace::Backtrace> { - // Some(self.message.backtrace()) - // } -} + /// The contract of this type is normally that it actually contains diagnostic information. + /// However, `starlark::Error` doesn't guarantee that, but it'd be convenient to use this type + /// for it anyway. So we make an exception. Don't use this function for anything else. + pub(crate) fn new_empty(t: T) -> Self { + Self(Box::new(WithDiagnosticInner { + t, + diagnostic: Diagnostic::default(), + })) + } -impl Diagnostic { - /// Create a new [`Diagnostic`] containing an underlying error and span. - /// If the given `message` is already a [`Diagnostic`] with a [`Span`], - /// the new span will be ignored and the original `message` returned. - pub fn new(message: impl Into, span: Span, codemap: &CodeMap) -> anyhow::Error { - Self::modify(message.into(), |d| d.set_span(span, codemap)) + pub fn inner(&self) -> &T { + &self.0.t } - /// Modify an error by attaching diagnostic information to it - e.g. `span`/`call_stack`. - /// If given an [`anyhow::Error`] which is a [`Diagnostic`], it will add the information to the - /// existing [`Diagnostic`]. If not, it will wrap the error in [`Diagnostic`]. - #[cold] - pub fn modify(mut err: anyhow::Error, f: impl FnOnce(&mut Diagnostic)) -> anyhow::Error { - match err.downcast_mut::() { - Some(diag) => { - f(diag); - err - } - _ => { - let mut err = Self { - message: err, - span: None, - call_stack: CallStack::default(), - }; - f(&mut err); - err.into() - } - } + pub fn into_inner(self) -> T { + self.0.t } - /// Set the [`Diagnostic::span`] field, unless it's already been set. + pub fn map(self, f: impl FnOnce(T) -> U) -> WithDiagnostic { + WithDiagnostic(Box::new(WithDiagnosticInner { + t: f(self.0.t), + diagnostic: self.0.diagnostic, + })) + } + + pub fn span(&self) -> Option<&FileSpan> { + self.0.diagnostic.span.as_ref() + } + + pub fn call_stack(&self) -> &CallStack { + &self.0.diagnostic.call_stack + } + + /// Set the span, unless it's already been set. pub fn set_span(&mut self, span: Span, codemap: &CodeMap) { - if self.span.is_none() { - // We want the best span, which is likely the first person to set it - self.span = Some(codemap.file_span(span)); + if self.0.diagnostic.span.is_none() { + self.0.diagnostic.span = Some(codemap.file_span(span)); } } - /// Set the [`Diagnostic::call_stack`] field, unless it's already been set. + /// Set the `call_stack` field, unless it's already been set. pub fn set_call_stack(&mut self, call_stack: impl FnOnce() -> CallStack) { - if self.call_stack.is_empty() { - // We want the best call stack, which is likely the first person to set it - self.call_stack = call_stack(); + if self.0.diagnostic.call_stack.is_empty() { + self.0.diagnostic.call_stack = call_stack(); } } +} - /// Print an error to the stderr stream. If the error is a [`Diagnostic`] it will use - /// color-codes when printing. - /// - /// Note that this function doesn't print any context information if the error is a - /// [`Diagnostic`], so you might prefer to use `eprintln!("{:#}"), err)` - /// if you suspect there is useful context (although you won't get pretty colors). - pub fn eprint(err: &anyhow::Error) { - match err.downcast_ref::() { - None => eprintln!("{:#}", err), - Some(diag) => diagnostic_stderr(diag), - } +impl fmt::Display for WithDiagnostic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Not showing the context trace without `{:#}` or `{:?}` is the same thing that anyhow does + let with_context = f.alternate() && self.0.t.source().is_some(); + diagnostic_display(self, false, f, with_context) } +} +impl fmt::Debug for WithDiagnostic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + diagnostic_display(self, false, f, /* with_context */ true) + } +} + +impl> From> for crate::Error { + fn from(e: WithDiagnostic) -> Self { + let diagnostic = e.0.diagnostic; + let mut e: crate::Error = e.0.t.into(); + e.0.0.diagnostic = diagnostic; + e + } +} + +/// A description of where in starlark execution the error happened. +#[derive(Debug, Default)] +struct Diagnostic { + /// Location where the error originated. + span: Option, + + /// Call stack where the error originated. + call_stack: CallStack, +} + +impl Diagnostic { /// Gets annotated snippets for a [`Diagnostic`]. - fn get_display_list<'a>(&'a self, annotation_label: &'a str, color: bool) -> impl Display + 'a { + fn get_display_list<'a>( + &'a self, + annotation_label: &'a str, + color: bool, + ) -> impl fmt::Display + 'a { span_display( self.span.as_ref().map(|s| s.as_ref()), annotation_label, @@ -127,36 +149,29 @@ impl Diagnostic { } } -impl Display for Diagnostic { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - diagnostic_display(self, false, f) - } -} - ///////////////////////////////////////////////////////////////////// // DISPLAY RELATED UTILITIES // Since formatting these types is difficult, we reuse the Rust compiler // variants by doing a conversion using annotate-snippets // (https://github.com/rust-lang/annotate-snippets-rs) -fn diagnostic_display(diagnostic: &Diagnostic, color: bool, f: &mut dyn fmt::Write) -> fmt::Result { - write!(f, "{}", diagnostic.call_stack)?; - let annotation_label = format!("{}", diagnostic.message); +pub(crate) fn diagnostic_display( + d: &WithDiagnostic, + color: bool, + f: &mut dyn fmt::Write, + with_context: bool, +) -> fmt::Result { + write!(f, "{}", d.call_stack())?; + let annotation_label = format!("{}", d.inner()); // I set color to false here to make the comparison easier with tests (coloring // adds in pretty strange unicode chars). - let display_list = diagnostic.get_display_list(&annotation_label, color); + let display_list = d.0.diagnostic.get_display_list(&annotation_label, color); writeln!(f, "{}", display_list)?; // Print out the `Caused by:` trace (if exists) and rust backtrace (if enabled). // The trace printed comes from an [`anyhow::Error`] that is not a [`Diagnostic`]. - if diagnostic.message.source().is_some() { - writeln!(f, "\n\n{:?}", diagnostic.message)?; + if with_context { + writeln!(f, "\n\n{:?}", d.inner())?; } Ok(()) } - -fn diagnostic_stderr(diagnostic: &Diagnostic) { - let mut stderr = String::new(); - diagnostic_display(diagnostic, true, &mut stderr).unwrap(); - eprint!("{}", stderr); -} diff --git a/starlark-rust/starlark_syntax/src/dialect.rs b/starlark-rust/starlark_syntax/src/dialect.rs index 3fa0ea7be1601..8805a0b01597d 100644 --- a/starlark-rust/starlark_syntax/src/dialect.rs +++ b/starlark-rust/starlark_syntax/src/dialect.rs @@ -35,29 +35,33 @@ pub enum DialectTypes { #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct Dialect { /// Are `def` statements permitted. - /// Enabled in both [`Standard`](Dialect::Standard) and [`Extended`](Dialect::Extended). + /// Enabled by default. pub enable_def: bool, /// Are `lambda` expressions permitted. - /// Enabled in both [`Standard`](Dialect::Standard) and [`Extended`](Dialect::Extended). + /// Enabled by default. pub enable_lambda: bool, /// Are `load` statements permitted. - /// Enabled in both [`Standard`](Dialect::Standard) and [`Extended`](Dialect::Extended). + /// Enabled by default. pub enable_load: bool, /// Are `*` keyword-only arguments allowed as per [PEP 3102](https://www.python.org/dev/peps/pep-3102/). - /// Only enabled in [`Extended`](Dialect::Extended). + /// Disabled by default. pub enable_keyword_only_arguments: bool, + /// Are `/` for positional-only arguments allowed. + pub enable_positional_only_arguments: bool, /// Are expressions allowed in type positions as per [PEP 484](https://www.python.org/dev/peps/pep-0484/). - /// Only enabled in [`Extended`](Dialect::Extended). + /// Disabled by default. pub enable_types: DialectTypes, /// Do `load()` statements reexport their definition. - /// Enabled in both [`Standard`](Dialect::Standard) and [`Extended`](Dialect::Extended), + /// Enabled by default, /// but may change in future definitions of the standard. pub enable_load_reexport: bool, /// Are `for`, `if` and other statements allowed at the top level. - /// Only enabled in [`Extended`](Dialect::Extended). + /// Disabled by default. pub enable_top_level_stmt: bool, /// Are `f"{expression}"` strings supported? - /// Disabled in all dialects by default. + /// Disabled by default. + /// + /// [Starlark spec proposal](https://github.com/bazelbuild/starlark/issues/91). pub enable_f_strings: bool, /// Like `#[non_exhaustive]`, but allows struct expression. /// @@ -83,6 +87,7 @@ impl Dialect { enable_lambda: true, enable_load: true, enable_keyword_only_arguments: false, + enable_positional_only_arguments: false, enable_types: DialectTypes::Disable, enable_load_reexport: true, // But they plan to change it enable_top_level_stmt: false, @@ -90,16 +95,33 @@ impl Dialect { _non_exhaustive: (), }; - /// A superset of [`Standard`](Dialect::Standard), including extra features (types, top-level statements etc). + /// This option is deprecated. Extend `Standard` instead. + #[doc(hidden)] pub const Extended: Self = Self { enable_def: true, enable_lambda: true, enable_load: true, enable_keyword_only_arguments: true, + enable_positional_only_arguments: false, enable_types: DialectTypes::Enable, enable_load_reexport: true, enable_top_level_stmt: true, enable_f_strings: false, _non_exhaustive: (), }; + + /// Only for starlark-rust self tests. + #[doc(hidden)] + pub const AllOptionsInternal: Self = Self { + enable_def: true, + enable_lambda: true, + enable_load: true, + enable_keyword_only_arguments: true, + enable_positional_only_arguments: true, + enable_types: DialectTypes::Enable, + enable_load_reexport: true, + enable_top_level_stmt: true, + enable_f_strings: true, + _non_exhaustive: (), + }; } diff --git a/starlark-rust/starlark_syntax/src/dot_format_parser.rs b/starlark-rust/starlark_syntax/src/dot_format_parser.rs index aafee8446f9f0..da9621aae9574 100644 --- a/starlark-rust/starlark_syntax/src/dot_format_parser.rs +++ b/starlark-rust/starlark_syntax/src/dot_format_parser.rs @@ -18,21 +18,32 @@ use std::mem; use std::ops::Deref; +use dupe::Dupe; + /// Parser for `.format()` arguments. pub struct FormatParser<'a> { view: StringView<'a>, } +/// Output the capture as `str` or `repr`. +#[derive(Debug, PartialEq, Copy, Clone, Dupe)] +pub enum FormatConv { + Str, + Repr, +} + /// Token in the format string. #[derive(Debug, PartialEq)] pub enum FormatToken<'a> { /// Text to copy verbatim to the output. Text(&'a str), Capture { - /// Format part inside curly braces. + /// Format part inside curly braces before the conversion. capture: &'a str, /// The position of this capture. This does not include the curly braces. pos: usize, + /// The conversion to apply to this capture. + conv: FormatConv, }, Escape(EscapeCurlyBrace), } @@ -82,22 +93,48 @@ impl<'a> FormatParser<'a> { } b'{' => { assert!(i == 0); - if self.view.starts_with("{{") { - self.view.eat(2); - return Ok(Some(FormatToken::Escape(EscapeCurlyBrace::Open))); - } + // Position of the identifier relative to the start of the format string. + let pos = self.view.pos() + 1; i = 1; while i < self.view.len() { match self.view.as_bytes()[i] { b'}' => { - let pos = self.view.pos(); - let capture = self.view.eat(i + 1); // Grab the closing brace. + let capture = &self.view.eat(i + 1)[1..i]; return Ok(Some(FormatToken::Capture { - capture: &capture[1..i], - pos: pos + 1, + capture, + pos, + conv: FormatConv::Str, })); } + b'!' => { + let capture = &self.view.eat(i + 1)[1..i]; + let conv = if self.view.rem().starts_with('r') { + FormatConv::Repr + } else if self.view.rem().starts_with('s') { + FormatConv::Str + } else if self.view.rem().starts_with('}') { + return Err(anyhow::anyhow!( + "Missing conversion character in format string `{}`", + self.view.original() + )); + } else { + return Err(anyhow::anyhow!( + "Invalid conversion in format string `{}`", + self.view.original() + )); + }; + self.view.eat(1); // `r` or `s` after the exclamation mark. + if !self.view.starts_with('}') { + break; + } + self.view.eat(1); // Closing brace. + return Ok(Some(FormatToken::Capture { capture, pos, conv })); + } b'{' => { + if i == 1 { + self.view.eat(2); + return Ok(Some(FormatToken::Escape(EscapeCurlyBrace::Open))); + } break; } _ => i += 1, @@ -177,12 +214,13 @@ impl<'a> Deref for StringView<'a> { #[cfg(test)] mod tests { + use crate::dot_format_parser::FormatConv; use crate::dot_format_parser::FormatParser; use crate::dot_format_parser::FormatToken; #[test] fn test_parser_position() { - let s = "foo{x}bar{yz}baz"; + let s = "foo{x}bar{yz}baz{w!s}qux{v!r}quux"; let mut parser = FormatParser::new(s); assert_eq!(parser.next().unwrap(), Some(FormatToken::Text("foo"))); assert_eq!( @@ -190,6 +228,7 @@ mod tests { Some(FormatToken::Capture { capture: "x", pos: 4, + conv: FormatConv::Str, }) ); assert_eq!(parser.next().unwrap(), Some(FormatToken::Text("bar"))); @@ -198,9 +237,36 @@ mod tests { Some(FormatToken::Capture { capture: "yz", pos: 10, + conv: FormatConv::Str, }) ); assert_eq!(parser.next().unwrap(), Some(FormatToken::Text("baz"))); + assert_eq!( + parser.next().unwrap(), + Some(FormatToken::Capture { + capture: "w", + pos: 17, + conv: FormatConv::Str, + }) + ); + assert_eq!(parser.next().unwrap(), Some(FormatToken::Text("qux"))); + assert_eq!( + parser.next().unwrap(), + Some(FormatToken::Capture { + capture: "v", + pos: 25, + conv: FormatConv::Repr, + }) + ); + assert_eq!(parser.next().unwrap(), Some(FormatToken::Text("quux"))); assert_eq!(parser.next().unwrap(), None); } + + #[test] + fn test_failure() { + let s = "}foo"; + let mut parser = FormatParser::new(s); + let error_msg = parser.next().unwrap_err().to_string(); + assert_eq!(error_msg, "Standalone '}' in format string `}foo`"); + } } diff --git a/starlark-rust/starlark_syntax/src/error.rs b/starlark-rust/starlark_syntax/src/error.rs new file mode 100644 index 0000000000000..79d8b0be0d68b --- /dev/null +++ b/starlark-rust/starlark_syntax/src/error.rs @@ -0,0 +1,359 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt; +use std::mem; + +use crate::call_stack::CallStack; +use crate::codemap::CodeMap; +use crate::codemap::FileSpan; +use crate::codemap::Span; +use crate::diagnostic::diagnostic_display; +use crate::diagnostic::WithDiagnostic; + +/// An error produced by starlark. +/// +/// This error is composed of an error kind, together with some diagnostic information indicating +/// where it occurred. +/// +/// In order to prevent accidental conversions to `anyhow::Error`, this type intentionally does not +/// implement `std::error::Error`. That should probably change in the future. +pub struct Error(pub(crate) WithDiagnostic); + +const _: () = assert!(mem::size_of::() == mem::size_of::()); + +impl Error { + /// Create a new error + #[cold] + pub fn new_kind(kind: ErrorKind) -> Self { + Self(WithDiagnostic::new_empty(kind)) + } + + /// Create a new error with a span + #[cold] + pub fn new_spanned(kind: ErrorKind, span: Span, codemap: &CodeMap) -> Self { + Self(WithDiagnostic::new_spanned(kind, span, codemap)) + } + + /// Create a new error with no diagnostic and of kind [`ErrorKind::Other`] + #[cold] + pub fn new_other(e: impl Into) -> Self { + Self(WithDiagnostic::new_empty(ErrorKind::Other(e.into()))) + } + + /// Create a new error with no diagnostic and of kind [`ErrorKind::Native`] + #[cold] + pub fn new_native(e: impl Into) -> Self { + Self(WithDiagnostic::new_empty(ErrorKind::Native(e.into()))) + } + + /// Create a new error with no diagnostic and of kind [`ErrorKind::Value`] + #[cold] + pub fn new_value(e: impl Into) -> Self { + Self(WithDiagnostic::new_empty(ErrorKind::Value(e.into()))) + } + + /// The kind of this error + pub fn kind(&self) -> &ErrorKind { + self.0.inner() + } + + /// Convert the error into the underlying kind + pub fn into_kind(self) -> ErrorKind { + self.0.into_inner() + } + + pub fn has_diagnostic(&self) -> bool { + self.0.span().is_some() || !self.0.call_stack().is_empty() + } + + /// Convert this error into an `anyhow::Error` + #[cold] + pub fn into_anyhow(self) -> anyhow::Error { + struct Wrapped(Error); + + impl fmt::Display for Wrapped { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } + } + + impl fmt::Debug for Wrapped { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } + } + + impl std::error::Error for Wrapped { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + self.0.kind().source() + } + } + + anyhow::Error::new(Wrapped(self)) + } + + /// Returns a value that can be used to format this error without including the diagnostic + /// information + /// + /// This is the same as [`kind`](crate::Error::kind), just a bit more explicit. + pub fn without_diagnostic<'a>(&'a self) -> impl fmt::Debug + fmt::Display + 'a { + self.0.inner() + } + + pub fn span(&self) -> Option<&FileSpan> { + self.0.span() + } + + pub fn call_stack(&self) -> &CallStack { + self.0.call_stack() + } + + /// Set the span, unless it's already been set. + pub fn set_span(&mut self, span: Span, codemap: &CodeMap) { + self.0.set_span(span, codemap); + } + + /// Set the `call_stack` field, unless it's already been set. + pub fn set_call_stack(&mut self, call_stack: impl FnOnce() -> CallStack) { + self.0.set_call_stack(call_stack); + } + + /// Print an error to the stderr stream. If the error has diagnostic information it will use + /// color-codes when printing. + /// + /// Note that this function doesn't print any context information if the error is a diagnostic, + /// so you might prefer to use `eprintln!("{:#}"), err)` if you suspect there is useful context + /// (although you won't get pretty colors). + pub fn eprint(&self) { + if self.has_diagnostic() { + let mut stderr = String::new(); + diagnostic_display(&self.0, true, &mut stderr, true).unwrap(); + eprint!("{}", stderr); + } else { + eprintln!("{:#}", self) + } + } + + /// Change error kind to internal error. + pub fn into_internal_error(self) -> Error { + if let ErrorKind::Internal(_) = self.kind() { + self + } else { + Error(self.0.map(ErrorKind::into_internal_error)) + } + } +} + +fn fmt_impl(this: &Error, is_debug: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if this.has_diagnostic() { + // Not showing the context trace without `{:#}` or `{:?}` is the same thing that anyhow does + let with_context = (f.alternate() || is_debug) && this.kind().source().is_some(); + diagnostic_display(&this.0, false, f, with_context) + } else { + fmt::Display::fmt(&this.without_diagnostic(), f) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt_impl(self, false, f) + } +} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt_impl(self, true, f) + } +} + +/// The different kinds of errors that can be produced by starlark +#[non_exhaustive] +pub enum ErrorKind { + /// An explicit `fail` invocation + Fail(anyhow::Error), + /// Starlark call stack overflow. + StackOverflow(anyhow::Error), + /// An error approximately associated with a value. + /// + /// Includes unsupported operations, missing attributes, things of that sort. + Value(anyhow::Error), + /// Errors relating to the way a function is called (wrong number of args, etc.) + Function(anyhow::Error), + /// Out of scope variables and similar + Scope(anyhow::Error), + /// Syntax error. + Parser(anyhow::Error), + /// Indicates a logic bug in starlark + Internal(anyhow::Error), + /// Error from user provided native function + /// (but not from native functions provided by starlark crate). + /// When a native function declares `anyhow::Result<_>` + /// return type, it is automatically converted to this variant. + Native(anyhow::Error), + /// Fallback option + /// + /// For errors produced by starlark which have not yet been assigned their own kind + Other(anyhow::Error), +} + +impl ErrorKind { + /// The source of the error, akin to `[std::error::Error::source]` + pub fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::Fail(_) => None, + Self::StackOverflow(_) => None, + Self::Value(_) => None, + Self::Function(_) => None, + Self::Scope(_) => None, + Self::Parser(_) => None, + Self::Internal(_) => None, + Self::Native(e) => e.source(), + Self::Other(e) => e.source(), + } + } + + /// Change type to `Internal`. + pub(crate) fn into_internal_error(self) -> ErrorKind { + match self { + ErrorKind::Internal(e) + | ErrorKind::Fail(e) + | ErrorKind::Value(e) + | ErrorKind::Function(e) + | ErrorKind::Scope(e) + | ErrorKind::Parser(e) + | ErrorKind::StackOverflow(e) + | ErrorKind::Native(e) + | ErrorKind::Other(e) => ErrorKind::Internal(e), + } + } +} + +impl fmt::Debug for ErrorKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Fail(s) => write!(f, "fail:{}", s), + Self::Value(e) => fmt::Debug::fmt(e, f), + Self::StackOverflow(e) => fmt::Debug::fmt(e, f), + Self::Function(e) => fmt::Debug::fmt(e, f), + Self::Scope(e) => fmt::Debug::fmt(e, f), + Self::Parser(e) => fmt::Debug::fmt(e, f), + Self::Internal(e) => write!(f, "Internal error: {}", e), + Self::Native(e) => fmt::Debug::fmt(e, f), + Self::Other(e) => fmt::Debug::fmt(e, f), + } + } +} + +impl fmt::Display for ErrorKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Fail(s) => write!(f, "fail:{}", s), + Self::StackOverflow(e) => fmt::Display::fmt(e, f), + Self::Value(e) => fmt::Display::fmt(e, f), + Self::Function(e) => fmt::Display::fmt(e, f), + Self::Scope(e) => fmt::Display::fmt(e, f), + Self::Parser(e) => fmt::Display::fmt(e, f), + Self::Internal(e) => write!(f, "Internal error: {}", e), + Self::Native(e) => fmt::Display::fmt(e, f), + Self::Other(e) => fmt::Display::fmt(e, f), + } + } +} + +impl From for Error { + #[cold] + fn from(e: anyhow::Error) -> Self { + Self(WithDiagnostic::new_empty(ErrorKind::Other(e))) + } +} + +pub trait StarlarkResultExt { + fn into_anyhow_result(self) -> anyhow::Result; +} + +impl StarlarkResultExt for crate::Result { + #[inline] + fn into_anyhow_result(self) -> anyhow::Result { + self.map_err(Error::into_anyhow) + } +} + +#[doc(hidden)] +#[cold] +pub fn internal_error_impl(args: fmt::Arguments<'_>) -> Error { + Error::new_kind(ErrorKind::Internal(anyhow::anyhow!("{}", args))) +} + +#[doc(hidden)] +#[cold] +pub fn other_error_impl(args: fmt::Arguments<'_>) -> Error { + Error::new_kind(ErrorKind::Other(anyhow::anyhow!("{}", args))) +} + +#[doc(hidden)] +#[cold] +pub fn value_error_impl(args: fmt::Arguments<'_>) -> Error { + Error::new_kind(ErrorKind::Value(anyhow::anyhow!("{}", args))) +} + +#[doc(hidden)] +#[cold] +pub fn function_error_impl(args: fmt::Arguments<'_>) -> Error { + Error::new_kind(ErrorKind::Function(anyhow::anyhow!("{}", args))) +} + +/// Internal error of starlark. +#[macro_export] +macro_rules! internal_error { + ($format:literal) => { + internal_error!($format,) + }; + ($format:literal, $($args:tt)*) => { + $crate::error::internal_error_impl(format_args!($format, $($args)*)) + }; +} + +#[macro_export] +macro_rules! other_error { + ($format:literal) => { + other_error!($format,) + }; + ($format:literal, $($args:tt)*) => { + $crate::error::other_error_impl(format_args!($format, $($args)*)) + }; +} + +#[macro_export] +macro_rules! value_error { + ($format:literal) => { + value_error!($format,) + }; + ($format:literal, $($args:tt)*) => { + $crate::error::value_error_impl(format_args!($format, $($args)*)) + }; +} + +#[macro_export] +macro_rules! function_error { + ($format:literal) => { + function_error!($format,) + }; + ($format:literal, $($args:tt)*) => { + $crate::error::function_error_impl(format_args!($format, $($args)*)) + }; +} diff --git a/starlark-rust/starlark_syntax/src/eval_exception.rs b/starlark-rust/starlark_syntax/src/eval_exception.rs index ef1690333e634..3e6b010a07c3a 100644 --- a/starlark-rust/starlark_syntax/src/eval_exception.rs +++ b/starlark-rust/starlark_syntax/src/eval_exception.rs @@ -15,41 +15,95 @@ * limitations under the License. */ +use std::fmt::Display; + +use crate::call_stack::CallStack; use crate::codemap::CodeMap; use crate::codemap::Span; -use crate::diagnostic::Diagnostic; +use crate::diagnostic::WithDiagnostic; +use crate::internal_error; /// Error with location. #[derive(Debug, derive_more::Display)] pub struct EvalException( - /// Error is `Diagnostic`, but stored as `anyhow::Error` for smaller size. - anyhow::Error, + /// Error is guaranteed to have a diagnostic + crate::Error, ); impl EvalException { - /// Error must be `Diagnostic`. #[cold] - pub fn unchecked_new(error: anyhow::Error) -> EvalException { + pub fn into_error(self) -> crate::Error { + self.0 + } + + #[cold] + pub fn into_internal_error(self) -> Self { + EvalException(self.0.into_internal_error()) + } + + #[cold] + pub fn new(mut error: crate::Error, span: Span, codemap: &CodeMap) -> EvalException { + error.set_span(span, codemap); EvalException(error) } + /// `EvalException` is meant to provide type-safe guard against missing span. + /// Sometimes we need to construct `EvalException`, but span is not available, + /// so this function can be used. Avoid this function if possible. #[cold] - pub fn into_anyhow(self) -> anyhow::Error { - self.0 + pub fn new_unknown_span(error: crate::Error) -> EvalException { + EvalException(error) } #[cold] - pub fn new(error: anyhow::Error, span: Span, codemap: &CodeMap) -> EvalException { - EvalException(Diagnostic::new(error, span, codemap)) + pub fn new_with_callstack( + mut error: crate::Error, + span: Span, + codemap: &CodeMap, + call_stack: impl FnOnce() -> CallStack, + ) -> EvalException { + error.set_span(span, codemap); + error.set_call_stack(call_stack); + EvalException(error) } - pub fn _testing_loc(mut err: &anyhow::Error) -> crate::codemap::ResolvedFileSpan { - if let Some(eval_exc) = err.downcast_ref::() { - err = &eval_exc.0; - } - match err.downcast_ref::() { - Some(d) => d.span.as_ref().unwrap().resolve(), - None => panic!("Expected Diagnostic, got {:#?}", err), + #[cold] + pub fn new_anyhow(error: anyhow::Error, span: Span, codemap: &CodeMap) -> EvalException { + EvalException(crate::Error::new_spanned( + crate::ErrorKind::Other(error), + span, + codemap, + )) + } + + #[cold] + pub fn internal_error(error: impl Display, span: Span, codemap: &CodeMap) -> EvalException { + Self::new(internal_error!("{}", error), span, codemap) + } + + #[cold] + pub(crate) fn parser_error( + error: impl Display, + span: Span, + codemap: &CodeMap, + ) -> EvalException { + EvalException(crate::Error::new_spanned( + crate::ErrorKind::Parser(anyhow::anyhow!("{error}")), + span, + codemap, + )) + } + + pub fn _testing_loc(err: &crate::Error) -> crate::codemap::ResolvedFileSpan { + match err.span() { + Some(d) => d.resolve(), + None => panic!("Expected error with diagnostic, got {:#?}", err), } } } + +impl> From> for EvalException { + fn from(e: WithDiagnostic) -> Self { + Self(e.into()) + } +} diff --git a/starlark-rust/starlark_syntax/src/golden_test_template.rs b/starlark-rust/starlark_syntax/src/golden_test_template.rs index c3c9d78b58946..0250d468b9e33 100644 --- a/starlark-rust/starlark_syntax/src/golden_test_template.rs +++ b/starlark-rust/starlark_syntax/src/golden_test_template.rs @@ -31,7 +31,8 @@ fn make_golden(output: &str) -> String { writeln!(golden, "# ```").unwrap(); writeln!( golden, - "# {REGENERATE_VAR_NAME}=1 cargo test -p starlark --lib tests" + // TODO(nga): fix instruction for `starlark_syntax` crate. + "# {REGENERATE_VAR_NAME}=1 cargo test -p starlark --lib" ) .unwrap(); writeln!(golden, "# ```").unwrap(); diff --git a/starlark-rust/starlark_syntax/src/lexer.rs b/starlark-rust/starlark_syntax/src/lexer.rs index a1c0b65ac23bd..73320434ee16b 100644 --- a/starlark-rust/starlark_syntax/src/lexer.rs +++ b/starlark-rust/starlark_syntax/src/lexer.rs @@ -59,6 +59,12 @@ pub enum LexemeError { CannotParse(String, u32), } +impl From for crate::error::Error { + fn from(e: LexemeError) -> Self { + crate::error::Error::new_kind(crate::error::ErrorKind::Parser(anyhow::Error::new(e))) + } +} + type LexemeT = Result<(usize, T, usize), EvalException>; type Lexeme = LexemeT; @@ -591,7 +597,7 @@ pub enum TokenInt { } impl TokenInt { - pub fn from_str_radix(s: &str, base: u32) -> anyhow::Result { + pub fn from_str_radix(s: &str, base: u32) -> crate::Result { if let Ok(i) = i32::from_str_radix(s, base) { Ok(TokenInt::I32(i)) } else { @@ -960,7 +966,7 @@ pub fn lex_exactly_one_identifier(s: &str) -> Option { } #[cfg(test)] -mod test { +mod tests { use crate::lexer::lex_exactly_one_identifier; #[test] diff --git a/starlark-rust/starlark_syntax/src/lexer_tests.rs b/starlark-rust/starlark_syntax/src/lexer_tests.rs index 6c5437c0f3549..6d836cce40f91 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests.rs +++ b/starlark-rust/starlark_syntax/src/lexer_tests.rs @@ -53,13 +53,13 @@ fn lex_tokens(program: &str) -> Vec<(usize, Token, usize)> { } } - let orig = tokens(&Dialect::Extended, program); + let orig = tokens(&Dialect::AllOptionsInternal, program); check_spans(&orig); // In Starlark Windows newline characters shouldn't change the lex tokens (only the positions), so run that test too. // First convert \r\n to \n, in case we started with Windows newlines, so we don't get \r\r\n. let with_r = tokens( - &Dialect::Extended, + &Dialect::AllOptionsInternal, &program.replace("\r\n", "\n").replace('\n', "\r\n"), ); check_spans(&with_r); @@ -113,7 +113,7 @@ fn lexer_fail_golden_test(name: &str, programs: &[&str]) { let e = Lexer::new( program, - &Dialect::Extended, + &Dialect::AllOptionsInternal, CodeMap::new("x".to_owned(), program.to_owned()), ) .collect::, _>>() diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/comment.golden b/starlark-rust/starlark_syntax/src/lexer_tests/comment.golden index 532b62860b883..31986e07271a4 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/comment.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/comment.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/dedent.golden b/starlark-rust/starlark_syntax/src/lexer_tests/dedent.golden index 60c3db69945d1..8353345b43903 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/dedent.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/dedent.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/error_messages.fail.golden b/starlark-rust/starlark_syntax/src/lexer_tests/error_messages.fail.golden index e88c2f170064d..e2ebc6bd7567d 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/error_messages.fail.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/error_messages.fail.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/escape_newline.golden b/starlark-rust/starlark_syntax/src/lexer_tests/escape_newline.golden index 022a9465cfdf2..8e57f0650347f 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/escape_newline.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/escape_newline.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/f_string.golden b/starlark-rust/starlark_syntax/src/lexer_tests/f_string.golden index f650750b77066..0db94f26d0f51 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/f_string.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/f_string.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/final_comment.golden b/starlark-rust/starlark_syntax/src/lexer_tests/final_comment.golden index 7a8f9d2725b7e..6729c7403c35f 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/final_comment.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/final_comment.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/float_lit.golden b/starlark-rust/starlark_syntax/src/lexer_tests/float_lit.golden index 4a5f5795f2e11..23362b9f12e46 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/float_lit.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/float_lit.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/identifier.golden b/starlark-rust/starlark_syntax/src/lexer_tests/identifier.golden index 143fa5df6857c..199a479a62d85 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/identifier.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/identifier.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/indentation.golden b/starlark-rust/starlark_syntax/src/lexer_tests/indentation.golden index ffc7453a56c02..687df9bea2d5f 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/indentation.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/indentation.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.fail.golden b/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.fail.golden index c10cd76bcf3c4..5f5515d080b5f 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.fail.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.fail.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.golden b/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.golden index 07cfdd57b073b..7252661b0b752 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/int_lit.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/keywords.golden b/starlark-rust/starlark_syntax/src/lexer_tests/keywords.golden index 86601ed1c11cf..9ce0a2650c859 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/keywords.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/keywords.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/multiline_triple.golden b/starlark-rust/starlark_syntax/src/lexer_tests/multiline_triple.golden index 2b794611683f7..fb235e6106210 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/multiline_triple.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/multiline_triple.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/number_collated_with_keywords_or_identifier.golden b/starlark-rust/starlark_syntax/src/lexer_tests/number_collated_with_keywords_or_identifier.golden index 04bac6c46d855..ef636116eb886 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/number_collated_with_keywords_or_identifier.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/number_collated_with_keywords_or_identifier.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/operators.golden b/starlark-rust/starlark_syntax/src/lexer_tests/operators.golden index 30e09965c02f8..dbfc5e2434ae5 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/operators.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/operators.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/reserved.fail.golden b/starlark-rust/starlark_syntax/src/lexer_tests/reserved.fail.golden index 0f73d7acd282f..571ff8529c88e 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/reserved.fail.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/reserved.fail.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/simple_example.golden b/starlark-rust/starlark_syntax/src/lexer_tests/simple_example.golden index 19a8c378f2804..cbe0c23fb767f 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/simple_example.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/simple_example.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.fail.golden b/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.fail.golden index a0b04e641059a..47dd3242165e3 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.fail.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.fail.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.golden b/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.golden index 701ea2b79781e..83ff76bf9bc37 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/string_escape.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/string_lit.fail.golden b/starlark-rust/starlark_syntax/src/lexer_tests/string_lit.fail.golden index 7c09ebbe00160..b790f6eb775ee 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/string_lit.fail.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/string_lit.fail.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lexer_tests/symbols.golden b/starlark-rust/starlark_syntax/src/lexer_tests/symbols.golden index 17d34d665316e..1a8fe2c9fb4f0 100644 --- a/starlark-rust/starlark_syntax/src/lexer_tests/symbols.golden +++ b/starlark-rust/starlark_syntax/src/lexer_tests/symbols.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/lib.rs b/starlark-rust/starlark_syntax/src/lib.rs index 7b14b14b463b5..3768ef5163a4e 100644 --- a/starlark-rust/starlark_syntax/src/lib.rs +++ b/starlark-rust/starlark_syntax/src/lib.rs @@ -20,9 +20,16 @@ #![allow(clippy::comparison_chain)] #![allow(clippy::comparison_to_empty)] #![allow(clippy::len_without_is_empty)] +#![allow(clippy::needless_lifetimes)] #![allow(clippy::new_ret_no_self)] #![allow(clippy::should_implement_trait)] +pub use crate::error::Error; +pub use crate::error::ErrorKind; +pub use crate::error::StarlarkResultExt; + +pub type Result = std::result::Result; + pub mod call_stack; pub mod codemap; pub mod convert_indices; @@ -30,6 +37,7 @@ pub(crate) mod cursors; pub mod diagnostic; pub mod dialect; pub mod dot_format_parser; +pub mod error; pub mod eval_exception; pub mod fast_string; pub mod frame; diff --git a/starlark-rust/starlark_syntax/src/slice_vec_ext.rs b/starlark-rust/starlark_syntax/src/slice_vec_ext.rs index 5ee69bfba3fc0..2e486b09af29e 100644 --- a/starlark-rust/starlark_syntax/src/slice_vec_ext.rs +++ b/starlark-rust/starlark_syntax/src/slice_vec_ext.rs @@ -38,7 +38,7 @@ fn collect_result(mut it: impl ExactSizeIterator>) -> } } -/// Extension traits on slices/[`Vec`](Vec). +/// Extension traits on slices/[`Vec`]. pub trait SliceExt { type Item; @@ -71,7 +71,7 @@ impl SliceExt for [T] { } } -/// Extension traits on [`Vec`](Vec). +/// Extension traits on [`Vec`]. pub trait VecExt { type Item; diff --git a/starlark-rust/starlark_syntax/src/syntax.rs b/starlark-rust/starlark_syntax/src/syntax.rs new file mode 100644 index 0000000000000..dc5186a1af5ee --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax.rs @@ -0,0 +1,58 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The AST of Starlark as [`AstModule`], along with a [`parse`](AstModule::parse) function. + +pub use module::AstModule; +pub use parser::AstLoad; + +pub use crate::dialect::Dialect; +pub use crate::dialect::DialectTypes; + +pub mod ast; +pub mod call; +pub mod def; +#[cfg(test)] +mod grammar_tests; +pub mod grammar_util; +mod lint_suppressions; +pub mod module; +pub mod parser; +pub mod payload_map; +pub(crate) mod state; +#[cfg(test)] +mod testcases; +pub mod top_level_stmts; +pub mod type_expr; +pub mod uniplate; +pub mod validate; + +#[allow(clippy::all)] +// Things we explicitly turn on need to be explicitly turned off +#[allow(clippy::inefficient_to_string)] +#[allow(clippy::trivially_copy_pass_by_ref)] +#[allow(clippy::too_many_arguments)] +#[allow(clippy::cloned_instead_of_copied)] +#[allow(clippy::type_complexity)] +#[allow(clippy::needless_lifetimes)] +#[allow(clippy::single_match)] +#[allow(unused_extern_crates)] +#[allow(unused_braces)] + +mod grammar { + include!(concat!(env!("OUT_DIR"), "/syntax/grammar.rs")); +} diff --git a/starlark-rust/starlark_syntax/src/syntax/ast.rs b/starlark-rust/starlark_syntax/src/syntax/ast.rs index 31b8ab780112b..9da96bba4c78e 100644 --- a/starlark-rust/starlark_syntax/src/syntax/ast.rs +++ b/starlark-rust/starlark_syntax/src/syntax/ast.rs @@ -115,12 +115,17 @@ pub enum ArgumentP { #[derive(Debug, Clone)] pub enum ParameterP { - Normal(AstAssignIdentP

    , Option>>), - WithDefaultValue( + /// `/` marker. + Slash, + Normal( + /// Name. AstAssignIdentP

    , + /// Type. Option>>, - Box>, + /// Default value. + Option>>, ), + /// `*` marker. NoArgs, Args(AstAssignIdentP

    , Option>>), KwArgs(AstAssignIdentP

    , Option>>), @@ -129,11 +134,10 @@ pub enum ParameterP { impl ParameterP

    { pub fn ident(&self) -> Option<&AstAssignIdentP

    > { match self { - ParameterP::Normal(x, _) - | ParameterP::WithDefaultValue(x, _, _) - | ParameterP::Args(x, _) - | ParameterP::KwArgs(x, _) => Some(x), - ParameterP::NoArgs => None, + ParameterP::Normal(x, _, _) | ParameterP::Args(x, _) | ParameterP::KwArgs(x, _) => { + Some(x) + } + ParameterP::NoArgs | ParameterP::Slash => None, } } } @@ -166,11 +170,16 @@ impl LambdaP

    { } } +#[derive(Debug, Clone)] +pub struct CallArgsP { + pub args: Vec>, +} + #[derive(Debug, Clone)] pub enum ExprP { Tuple(Vec>), Dot(Box>, AstString), - Call(Box>, Vec>), + Call(Box>, CallArgsP

    ), Index(Box<(AstExprP

    , AstExprP

    )>), Index2(Box<(AstExprP

    , AstExprP

    , AstExprP

    )>), Slice( @@ -334,7 +343,7 @@ pub enum Visibility { Public, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct DefP { pub name: AstAssignIdentP

    , pub params: Vec>, @@ -356,7 +365,7 @@ impl DefP

    { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ForP { pub var: AstAssignTargetP

    , pub over: AstExprP

    , @@ -371,7 +380,7 @@ pub struct FStringP { pub expressions: Vec>, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum StmtP { Break, Continue, @@ -406,6 +415,14 @@ impl ArgumentP

    { ArgumentP::KwArgs(x) => x, } } + + /// Argument name if it is named. + pub fn name(&self) -> Option<&str> { + match self { + ArgumentP::Named(name, _) => Some(&name.node), + _ => None, + } + } } impl Display for BinOp { @@ -522,7 +539,7 @@ impl Display for Expr { } Expr::Call(e, args) => { write!(f, "{}(", e.node)?; - for (i, x) in args.iter().enumerate() { + for (i, x) in args.args.iter().enumerate() { if i != 0 { f.write_str(", ")?; } @@ -651,8 +668,8 @@ impl Display for Argument { impl Display for Parameter { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let (prefix, name, typ, default) = match self { - Parameter::Normal(s, t) => ("", s, t, None), - Parameter::WithDefaultValue(s, t, e) => ("", s, t, Some(e)), + Parameter::Slash => return write!(f, "/"), + Parameter::Normal(s, t, e) => ("", s, t, e.as_ref()), Parameter::NoArgs => return write!(f, "*"), Parameter::Args(s, t) => ("*", s, t, None), Parameter::KwArgs(s, t) => ("**", s, t, None), diff --git a/starlark-rust/starlark_syntax/src/syntax/call.rs b/starlark-rust/starlark_syntax/src/syntax/call.rs new file mode 100644 index 0000000000000..99ee8d421ea25 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/call.rs @@ -0,0 +1,126 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashSet; + +use crate::codemap::CodeMap; +use crate::codemap::Span; +use crate::eval_exception::EvalException; +use crate::syntax::ast::ArgumentP; +use crate::syntax::ast::AstArgumentP; +use crate::syntax::ast::AstPayload; +use crate::syntax::ast::CallArgsP; + +/// Validated call arguments. +pub struct CallArgsUnpack<'a, P: AstPayload> { + pub pos: &'a [AstArgumentP

    ], + pub named: &'a [AstArgumentP

    ], + pub star: Option<&'a AstArgumentP

    >, + pub star_star: Option<&'a AstArgumentP

    >, +} + +#[derive(Eq, PartialEq, PartialOrd, Ord)] +enum ArgsStage { + Positional, + Named, + Args, + Kwargs, +} + +impl<'a, P: AstPayload> CallArgsUnpack<'a, P> { + pub fn unpack(args: &'a CallArgsP

    , codemap: &CodeMap) -> Result { + let err = |span, msg: &str| Err(EvalException::parser_error(msg, span, codemap)); + + let args = &args.args; + + let mut stage = ArgsStage::Positional; + let mut named_args = HashSet::new(); + let mut num_pos = 0; + let mut num_named = 0; + let mut star = None; + let mut star_star = None; + for arg in args { + match &arg.node { + ArgumentP::Positional(_) => { + if stage != ArgsStage::Positional { + return err(arg.span, "positional argument after non positional"); + } else { + num_pos += 1; + } + } + ArgumentP::Named(n, _) => { + if stage > ArgsStage::Named { + return err(arg.span, "named argument after *args or **kwargs"); + } else if !named_args.insert(&n.node) { + // Check the names are distinct + return err(n.span, "repeated named argument"); + } else { + stage = ArgsStage::Named; + num_named += 1; + } + } + ArgumentP::Args(_) => { + if stage > ArgsStage::Named { + return err(arg.span, "Args array after another args or kwargs"); + } else { + stage = ArgsStage::Args; + if star.is_some() { + return Err(EvalException::internal_error( + "Multiple *args in arguments", + arg.span, + codemap, + )); + } + star = Some(arg); + } + } + ArgumentP::KwArgs(_) => { + if stage == ArgsStage::Kwargs { + return err(arg.span, "Multiple kwargs dictionary in arguments"); + } else { + stage = ArgsStage::Kwargs; + if star_star.is_some() { + return Err(EvalException::internal_error( + "Multiple **kwargs in arguments", + arg.span, + codemap, + )); + } + star_star = Some(arg); + } + } + } + } + + if num_pos + num_named + (star.is_some() as usize) + (star_star.is_some() as usize) + != args.len() + { + return Err(EvalException::internal_error( + "Argument count mismatch", + Span::merge_all(args.iter().map(|x| x.span)), + codemap, + )); + } + + Ok(CallArgsUnpack { + pos: &args[..num_pos], + named: &args[num_pos..num_pos + num_named], + star, + star_star, + }) + } +} diff --git a/starlark-rust/starlark_syntax/src/syntax/def.rs b/starlark-rust/starlark_syntax/src/syntax/def.rs index 672df7782a26c..5f7ccf1329d6b 100644 --- a/starlark-rust/starlark_syntax/src/syntax/def.rs +++ b/starlark-rust/starlark_syntax/src/syntax/def.rs @@ -16,8 +16,13 @@ */ use std::collections::HashSet; +use std::ops::Range; + +use allocative::Allocative; +use dupe::Dupe; use crate::codemap::CodeMap; +use crate::codemap::Span; use crate::codemap::Spanned; use crate::eval_exception::EvalException; use crate::syntax::ast::AstAssignIdentP; @@ -27,22 +32,16 @@ use crate::syntax::ast::AstPayload; use crate::syntax::ast::AstTypeExprP; use crate::syntax::ast::ParameterP; -#[derive(Debug, thiserror::Error)] -enum DefError { - #[error("duplicated parameter name")] - DuplicateParameterName, - #[error("positional parameter after non positional")] - PositionalThenNonPositional, - #[error("Default parameter after args array or kwargs dictionary")] - DefaultParameterAfterStars, - #[error("Args parameter after another args or kwargs parameter")] - ArgsParameterAfterStars, - #[error("Multiple kwargs dictionary in parameters")] - MultipleKwargs, +#[derive(Debug, Clone, Copy, Dupe, PartialEq, Eq)] +pub enum DefRegularParamMode { + PosOnly, + PosOrName, + NameOnly, } pub enum DefParamKind<'a, P: AstPayload> { Regular( + DefRegularParamMode, /// Default value. Option<&'a AstExprP

    >, ), @@ -56,15 +55,51 @@ pub struct DefParam<'a, P: AstPayload> { pub ty: Option<&'a AstTypeExprP

    >, } +/// Parameters internally in starlark-rust are commonly represented as a flat list of parameters, +/// with markers `/` and `*` omitted. +/// This struct contains sizes and indices to split the list into parts. +#[derive( + Copy, Clone, Dupe, Debug, Eq, PartialEq, Hash, Ord, PartialOrd, Allocative +)] +pub struct DefParamIndices { + /// Number of parameters which can be filled positionally. + /// That is, number of parameters before first `*`, `*args` or `**kwargs`. + pub num_positional: u32, + /// Number of parameters which can only be filled positionally. + /// Always less or equal to `num_positional`. + pub num_positional_only: u32, + /// Index of `*args` parameter, if any. + /// If present, equal to `num_positional`. + pub args: Option, + /// Index of `**kwargs` parameter, if any. + /// If present, equal to the number of parameters minus 1. + pub kwargs: Option, +} + +impl DefParamIndices { + pub fn pos_only(&self) -> Range { + 0..self.num_positional_only as usize + } + + pub fn pos_or_named(&self) -> Range { + self.num_positional_only as usize..self.num_positional as usize + } + + pub fn named_only(&self, param_count: usize) -> Range { + self.args + .map(|a| a as usize + 1) + .unwrap_or(self.num_positional as usize) + ..self.kwargs.unwrap_or(param_count as u32) as usize + } +} + /// Post-processed AST for function parameters. /// /// * Validated /// * `*` parameter replaced with `num_positional` field pub struct DefParams<'a, P: AstPayload> { pub params: Vec>>, - /// Number of parameters which can be filled positionally. - /// That is, number of parameters before first `*`, `*args` or `**kwargs`. - pub num_positional: u32, + pub indices: DefParamIndices, } fn check_param_name<'a, P: AstPayload, T>( @@ -74,8 +109,8 @@ fn check_param_name<'a, P: AstPayload, T>( codemap: &CodeMap, ) -> Result<(), EvalException> { if !argset.insert(n.node.ident.as_str()) { - return Err(EvalException::new( - DefError::DuplicateParameterName.into(), + return Err(EvalException::parser_error( + "duplicated parameter name", arg.span, codemap, )); @@ -88,78 +123,156 @@ impl<'a, P: AstPayload> DefParams<'a, P> { ast_params: &'a [AstParameterP

    ], codemap: &CodeMap, ) -> Result, EvalException> { + #[derive(Ord, PartialOrd, Eq, PartialEq)] + enum State { + Normal, + /// After `/`. + SeenSlash, + /// After `*` or `*args`. + SeenStar, + /// After `**kwargs`. + SeenStarStar, + } + // you can't repeat argument names let mut argset = HashSet::new(); // You can't have more than one *args/*, **kwargs // **kwargs must be last // You can't have a required `x` after an optional `y=1` - let mut seen_args = false; - let mut seen_kwargs = false; let mut seen_optional = false; let mut params = Vec::with_capacity(ast_params.len()); - let mut num_positional = None; + let mut num_positional = 0; + let mut args = None; + let mut kwargs = None; + + // Index of `*` parameter, if any. + let mut index_of_star = None; + + let num_positional_only = match ast_params + .iter() + .position(|p| matches!(p.node, ParameterP::Slash)) + { + None => 0, + Some(0) => { + return Err(EvalException::parser_error( + "`/` cannot be first parameter", + ast_params[0].span, + codemap, + )); + } + Some(n) => match n.try_into() { + Ok(n) => n, + Err(_) => { + return Err(EvalException::parser_error( + format_args!("Too many parameters: {}", ast_params.len()), + Span::merge_all(ast_params.iter().map(|p| p.span)), + codemap, + )); + } + }, + }; + + let mut state = if num_positional_only == 0 { + State::SeenSlash + } else { + State::Normal + }; for (i, param) in ast_params.iter().enumerate() { let span = param.span; + + if let Some(name) = param.ident() { + check_param_name(&mut argset, name, param, codemap)?; + } + match ¶m.node { - ParameterP::Normal(n, ty) => { - if seen_kwargs || seen_optional { - return Err(EvalException::new( - DefError::PositionalThenNonPositional.into(), + ParameterP::Normal(n, ty, default_value) => { + if state >= State::SeenStarStar { + return Err(EvalException::parser_error( + "Parameter after kwargs", param.span, codemap, )); } - check_param_name(&mut argset, n, param, codemap)?; + match default_value { + None => { + if seen_optional && state < State::SeenStar { + return Err(EvalException::parser_error( + "positional parameter after non positional", + param.span, + codemap, + )); + } + } + Some(_default_value) => { + seen_optional = true; + } + } + if state < State::SeenStar { + num_positional += 1; + } + let mode = if state < State::SeenSlash { + DefRegularParamMode::PosOnly + } else if state < State::SeenStar { + DefRegularParamMode::PosOrName + } else { + DefRegularParamMode::NameOnly + }; params.push(Spanned { span, node: DefParam { ident: n, - kind: DefParamKind::Regular(None), + kind: DefParamKind::Regular(mode, default_value.as_deref()), ty: ty.as_deref(), }, }); } - ParameterP::WithDefaultValue(n, ty, default_value) => { - if seen_kwargs { - return Err(EvalException::new( - DefError::DefaultParameterAfterStars.into(), + ParameterP::NoArgs => { + if state >= State::SeenStar { + return Err(EvalException::parser_error( + "Args parameter after another args or kwargs parameter", param.span, codemap, )); } - seen_optional = true; - check_param_name(&mut argset, n, param, codemap)?; - params.push(Spanned { - span, - node: DefParam { - ident: n, - kind: DefParamKind::Regular(Some(default_value)), - ty: ty.as_deref(), - }, - }); + state = State::SeenStar; + if index_of_star.is_some() { + return Err(EvalException::internal_error( + "Multiple `*` in parameters, must have been caught earlier", + param.span, + codemap, + )); + } + index_of_star = Some(i); } - ParameterP::NoArgs => { - if seen_args || seen_kwargs { - return Err(EvalException::new( - DefError::ArgsParameterAfterStars.into(), + ParameterP::Slash => { + if state >= State::SeenSlash { + return Err(EvalException::parser_error( + "Multiple `/` in parameters", param.span, codemap, )); } - seen_args = true; + state = State::SeenSlash; } ParameterP::Args(n, ty) => { - if seen_args || seen_kwargs { - return Err(EvalException::new( - DefError::ArgsParameterAfterStars.into(), + if state >= State::SeenStar { + return Err(EvalException::parser_error( + "Args parameter after another args or kwargs parameter", param.span, codemap, )); } - seen_args = true; - check_param_name(&mut argset, n, param, codemap)?; + state = State::SeenStar; + if args.is_some() { + return Err(EvalException::internal_error( + "Multiple *args", + param.span, + codemap, + )); + } + args = Some(params.len().try_into().unwrap()); params.push(Spanned { span, node: DefParam { @@ -170,15 +283,22 @@ impl<'a, P: AstPayload> DefParams<'a, P> { }); } ParameterP::KwArgs(n, ty) => { - if seen_kwargs { - return Err(EvalException::new( - DefError::MultipleKwargs.into(), + if state >= State::SeenStarStar { + return Err(EvalException::parser_error( + "Multiple kwargs dictionary in parameters", + param.span, + codemap, + )); + } + if kwargs.is_some() { + return Err(EvalException::internal_error( + "Multiple **kwargs", param.span, codemap, )); } - seen_kwargs = true; - check_param_name(&mut argset, n, param, codemap)?; + kwargs = Some(params.len().try_into().unwrap()); + state = State::SeenStarStar; params.push(Spanned { span, node: DefParam { @@ -189,19 +309,157 @@ impl<'a, P: AstPayload> DefParams<'a, P> { }); } } + } - if matches!( - param.node, - ParameterP::Args(..) | ParameterP::KwArgs(..) | ParameterP::NoArgs - ) { - if num_positional.is_none() { - num_positional = Some(i); + if let Some(index_of_star) = index_of_star { + let Some(next) = ast_params.get(index_of_star + 1) else { + return Err(EvalException::parser_error( + "`*` parameter must not be last", + ast_params[index_of_star].span, + codemap, + )); + }; + match &next.node { + ParameterP::Normal(..) => {} + ParameterP::KwArgs(_, _) + | ParameterP::Args(_, _) + | ParameterP::NoArgs + | ParameterP::Slash => { + // We get here only for `**kwargs`, the rest is handled above. + return Err(EvalException::parser_error( + "`*` must be followed by named parameter", + next.span, + codemap, + )); } } } + Ok(DefParams { - num_positional: u32::try_from(num_positional.unwrap_or(params.len())).unwrap(), params, + indices: DefParamIndices { + num_positional: u32::try_from(num_positional).unwrap(), + num_positional_only, + args, + kwargs, + }, }) } } + +#[cfg(test)] +mod tests { + use crate::golden_test_template::golden_test_template; + use crate::syntax::AstModule; + use crate::syntax::Dialect; + + fn fails_dialect(test_name: &str, program: &str, dialect: &Dialect) { + let e = AstModule::parse("test.star", program.to_owned(), dialect).unwrap_err(); + let text = format!("Program:\n{program}\n\nError: {e}\n"); + golden_test_template(&format!("src/syntax/def_tests/{test_name}.golden"), &text); + } + + fn fails(test_name: &str, program: &str) { + fails_dialect(test_name, program, &Dialect::AllOptionsInternal); + } + + fn passes(program: &str) { + AstModule::parse( + "test.star", + program.to_owned(), + &Dialect::AllOptionsInternal, + ) + .unwrap(); + } + + #[test] + fn test_params_unpack() { + fails("dup_name", "def test(x, y, x): pass"); + fails("pos_after_default", "def test(x=1, y): pass"); + fails("default_after_kwargs", "def test(**kwargs, y=1): pass"); + fails("args_args", "def test(*x, *y): pass"); + fails("kwargs_args", "def test(**x, *y): pass"); + fails("kwargs_kwargs", "def test(**x, **y): pass"); + + passes("def test(x, y, z=1, *args, **kwargs): pass"); + } + + #[test] + fn test_params_noargs() { + fails("star_star", "def test(*, *): pass"); + fails("normal_after_default", "def test(x, y=1, z): pass"); + + passes("def test(*args, x): pass"); + passes("def test(*args, x=1): pass"); + passes("def test(*args, x, y=1): pass"); + passes("def test(x=1, *args, y): pass"); + passes("def test(*args, x, y=1, z): pass"); + passes("def test(*, x, y=1, z): pass"); + } + + #[test] + fn test_star_cannot_be_last() { + fails("star_cannot_be_last", "def test(x, *): pass"); + } + + #[test] + fn test_star_then_args() { + fails("star_then_args", "def test(x, *, *args): pass"); + } + + #[test] + fn test_star_then_kwargs() { + fails("star_then_kwargs", "def test(x, *, **kwargs): pass"); + } + + #[test] + fn test_positional_only() { + passes("def test(x, /): pass"); + } + + #[test] + fn test_positional_only_cannot_be_first() { + fails("positional_only_cannot_be_first", "def test(/, x): pass"); + } + + #[test] + fn test_slash_slash() { + fails("slash_slash", "def test(x, /, y, /): pass"); + } + + #[test] + fn test_named_only_in_standard_dialect_def() { + fails_dialect( + "named_only_in_standard_dialect_def", + "def test(*, x): pass", + &Dialect::Standard, + ); + } + + #[test] + fn test_named_only_in_standard_dialect_lambda() { + fails_dialect( + "named_only_in_standard_dialect_lambda", + "lambda *, x: 17", + &Dialect::Standard, + ); + } + + #[test] + fn test_positional_only_in_standard_dialect_def() { + fails_dialect( + "positional_only_in_standard_dialect_def", + "def test(/, x): pass", + &Dialect::Standard, + ); + } + + #[test] + fn test_positional_only_in_standard_dialect_lambda() { + fails_dialect( + "positional_only_in_standard_dialect_lambda", + "lambda /, x: 17", + &Dialect::Standard, + ); + } +} diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/args_args.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/args_args.golden new file mode 100644 index 0000000000000..cf6c090ce2e6c --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/args_args.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(*x, *y): pass + +Error: error: Args parameter after another args or kwargs parameter + --> test.star:1:14 + | +1 | def test(*x, *y): pass + | ^^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/default_after_kwargs.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/default_after_kwargs.golden new file mode 100644 index 0000000000000..39143e6a5ea3b --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/default_after_kwargs.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(**kwargs, y=1): pass + +Error: error: Parameter after kwargs + --> test.star:1:20 + | +1 | def test(**kwargs, y=1): pass + | ^^^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/dup_name.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/dup_name.golden new file mode 100644 index 0000000000000..0e8c5a69be1a6 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/dup_name.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(x, y, x): pass + +Error: error: duplicated parameter name + --> test.star:1:16 + | +1 | def test(x, y, x): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/kwargs_args.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/kwargs_args.golden new file mode 100644 index 0000000000000..b98674dc841a9 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/kwargs_args.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(**x, *y): pass + +Error: error: Args parameter after another args or kwargs parameter + --> test.star:1:15 + | +1 | def test(**x, *y): pass + | ^^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/kwargs_kwargs.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/kwargs_kwargs.golden new file mode 100644 index 0000000000000..55ca0e1224eba --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/kwargs_kwargs.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(**x, **y): pass + +Error: error: Multiple kwargs dictionary in parameters + --> test.star:1:15 + | +1 | def test(**x, **y): pass + | ^^^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/named_only_in_standard_dialect_def.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/named_only_in_standard_dialect_def.golden new file mode 100644 index 0000000000000..7ec9f6c114501 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/named_only_in_standard_dialect_def.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(*, x): pass + +Error: error: * keyword-only-arguments is not allowed in this dialect + --> test.star:1:10 + | +1 | def test(*, x): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/named_only_in_standard_dialect_lambda.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/named_only_in_standard_dialect_lambda.golden new file mode 100644 index 0000000000000..3c00c195c0e45 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/named_only_in_standard_dialect_lambda.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +lambda *, x: 17 + +Error: error: * keyword-only-arguments is not allowed in this dialect + --> test.star:1:8 + | +1 | lambda *, x: 17 + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/normal_after_default.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/normal_after_default.golden new file mode 100644 index 0000000000000..0714c6843f80f --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/normal_after_default.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(x, y=1, z): pass + +Error: error: positional parameter after non positional + --> test.star:1:18 + | +1 | def test(x, y=1, z): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/pos_after_default.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/pos_after_default.golden new file mode 100644 index 0000000000000..94141c0bc69f6 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/pos_after_default.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(x=1, y): pass + +Error: error: positional parameter after non positional + --> test.star:1:15 + | +1 | def test(x=1, y): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_cannot_be_first.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_cannot_be_first.golden new file mode 100644 index 0000000000000..54f81e45ee6a7 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_cannot_be_first.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(/, x): pass + +Error: error: `/` cannot be first parameter + --> test.star:1:10 + | +1 | def test(/, x): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_in_standard_dialect_def.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_in_standard_dialect_def.golden new file mode 100644 index 0000000000000..2ca86c7b28dc0 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_in_standard_dialect_def.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(/, x): pass + +Error: error: / positional-only-arguments is not allowed in this dialect + --> test.star:1:10 + | +1 | def test(/, x): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_in_standard_dialect_lambda.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_in_standard_dialect_lambda.golden new file mode 100644 index 0000000000000..21f4a772b3aec --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/positional_only_in_standard_dialect_lambda.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +lambda /, x: 17 + +Error: error: / positional-only-arguments is not allowed in this dialect + --> test.star:1:8 + | +1 | lambda /, x: 17 + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/slash_slash.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/slash_slash.golden new file mode 100644 index 0000000000000..02cd0f8bde6ea --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/slash_slash.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(x, /, y, /): pass + +Error: error: Multiple `/` in parameters + --> test.star:1:19 + | +1 | def test(x, /, y, /): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/star_cannot_be_last.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_cannot_be_last.golden new file mode 100644 index 0000000000000..99282eff6297d --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_cannot_be_last.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(x, *): pass + +Error: error: `*` parameter must not be last + --> test.star:1:13 + | +1 | def test(x, *): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/star_star.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_star.golden new file mode 100644 index 0000000000000..822feb37b997a --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_star.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(*, *): pass + +Error: error: Args parameter after another args or kwargs parameter + --> test.star:1:13 + | +1 | def test(*, *): pass + | ^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/star_then_args.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_then_args.golden new file mode 100644 index 0000000000000..feb92312557b6 --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_then_args.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(x, *, *args): pass + +Error: error: Args parameter after another args or kwargs parameter + --> test.star:1:16 + | +1 | def test(x, *, *args): pass + | ^^^^^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/def_tests/star_then_kwargs.golden b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_then_kwargs.golden new file mode 100644 index 0000000000000..ee5c624d95b7d --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/def_tests/star_then_kwargs.golden @@ -0,0 +1,15 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: +def test(x, *, **kwargs): pass + +Error: error: `*` must be followed by named parameter + --> test.star:1:16 + | +1 | def test(x, *, **kwargs): pass + | ^^^^^^^^ + | diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar.lalrpop b/starlark-rust/starlark_syntax/src/syntax/grammar.lalrpop index 6aad2fce67bff..3df2df1d7a08c 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar.lalrpop +++ b/starlark-rust/starlark_syntax/src/syntax/grammar.lalrpop @@ -74,31 +74,40 @@ COMMA: Vec = pub(crate) Starlark: AstStmt = "\n"* "\n"*)*> => grammar_util::statements(s, l, r); -DefStmt: AstStmt = ASTS =>? Ok(grammar_util::dialect_check_def(state.dialect, state.codemap, <>)?); +DefStmt: AstStmt = ASTS; DefStmt_: Stmt = - "def" "(" > ")" ":" - => grammar_util::check_def(<>, state); + "def" "(" > ")" ":" + => StmtP::Def(DefP { + name, + params, + return_type, + body: Box::new(stmts), + payload: (), + }); ReturnType: Option> = { "->" => Some(Box::new(<>)), => None, } -Parameter: AstParameter = ASTP; -Parameter_: Parameter = { - "=" => Parameter::WithDefaultValue(n, None, Box::new(e)), - => Parameter::Normal(<>, None), +// Lambda parameter cannot have type annotations. +LambdaParameter: AstParameter = ASTP; +LambdaParameter_: Parameter = { + "/" => Parameter::Slash, + "=" => Parameter::Normal(n, None, Some(Box::new(e))), + => Parameter::Normal(<>, None, None), "*" => Parameter::Args(<>, None), - "*" =>? Ok(grammar_util::dialect_check_keyword_only_arguments(state.dialect, state.codemap, l, r, Parameter::NoArgs)?), + "*" => Parameter::NoArgs, "**" => Parameter::KwArgs(<>, None), }; -ParameterTyped: AstParameter = ASTP; -ParameterTyped_: Parameter = { - "=" => Parameter::WithDefaultValue(n, t, Box::new(e)), - => Parameter::Normal(<>), +DefParameter: AstParameter = ASTP; +DefParameter_: Parameter = { + "/" => Parameter::Slash, + "=" => Parameter::Normal(n, t, Some(Box::new(e))), + => Parameter::Normal(<>, None), "*" => Parameter::Args(<>), - "*" =>? Ok(grammar_util::dialect_check_keyword_only_arguments(state.dialect, state.codemap, l, r, Parameter::NoArgs)?), + "*" => Parameter::NoArgs, "**" => Parameter::KwArgs(<>), }; @@ -231,7 +240,7 @@ PrimaryExpr: AstExpr = { "." => Expr::Dot(Box::new(e), i).ast(l, r), "(" > ")" - =>? Ok(Expr::check_call(e, a, state.codemap)?.ast(l, r)), + =>? Ok(Expr::check_call(e, a, state).ast(l, r)), "[" ":" )?> "]" => { Expr::Slice(Box::new(e), i1.map(|x| Box::new(x)), i2.map(|x| Box::new(x)), i3.unwrap_or(None).map(|x| Box::new(x))) @@ -312,10 +321,14 @@ Test: AstExpr = { LambDef }; -LambDef: AstExpr = { - "lambda" > ":" - =>? Ok(grammar_util::dialect_check_lambda(state.dialect, state.codemap, grammar_util::check_lambda(p, e, state).ast(l, r))?), -} +LambDef: AstExpr = ASTE; +LambDef_: Expr = "lambda" > ":" => { + Expr::Lambda(LambdaP { + params: p, + body: Box::new(e), + payload: (), + }) +}; // Binary operators OrTest: AstExpr = { diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests.rs b/starlark-rust/starlark_syntax/src/syntax/grammar_tests.rs index 37703f8303ee9..4716e5f429dfa 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests.rs +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests.rs @@ -51,11 +51,11 @@ fn parse_fail_with_dialect(name: &str, dialect: &Dialect, program: &str) { } fn parse_fail(name: &str, program: &str) { - parse_fail_with_dialect(name, &Dialect::Extended, program); + parse_fail_with_dialect(name, &Dialect::AllOptionsInternal, program); } fn parse_fails(name: &str, programs: &[&str]) { - parse_fails_with_dialect(name, &Dialect::Extended, programs); + parse_fails_with_dialect(name, &Dialect::AllOptionsInternal, programs); } #[test] @@ -130,7 +130,7 @@ fn test_top_level_def() { "top_level_def", &Dialect { enable_def: false, - ..Dialect::Extended + ..Dialect::AllOptionsInternal }, "def toto():\n pass", ); @@ -151,7 +151,7 @@ fn test_top_level_def() { fn test_top_level_statements() { let no_top_leve_stmt = Dialect { enable_top_level_stmt: false, - ..Dialect::Extended + ..Dialect::AllOptionsInternal }; parse_fails_with_dialect( "top_level_statements", @@ -288,7 +288,7 @@ fn test_lambda() { "lambda", &Dialect { enable_lambda: false, - ..Dialect::Extended + ..Dialect::AllOptionsInternal }, "x = lambda y: y + 1", ); @@ -321,7 +321,7 @@ fn test_ellipsis() { "ellipsis", &Dialect { enable_types: DialectTypes::Disable, - ..Dialect::Extended + ..Dialect::AllOptionsInternal }, &["x = ..."], ); @@ -347,7 +347,8 @@ fn test_op_associativity() { assert_eq!(parse("1 * 2 * 3"), "((1 * 2) * 3)\n"); // Comparisons are not associative // TODO - create a better error message for this case - let err = AstModule::parse("x", "0 <= 1 < 2".to_owned(), &Dialect::Extended).unwrap_err(); + let err = + AstModule::parse("x", "0 <= 1 < 2".to_owned(), &Dialect::AllOptionsInternal).unwrap_err(); assert!(err.to_string().contains("Parse error"), "{}", err); } @@ -377,7 +378,7 @@ pub fn parse(program: &str) -> String { } pub fn parse_ast(program: &str) -> AstModule { - parse_ast_with_dialect(program, &Dialect::Extended) + parse_ast_with_dialect(program, &Dialect::AllOptionsInternal) } fn parse_with_dialect(program: &str, dialect: &Dialect) -> String { diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/assignment_type_annotation.golden b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/assignment_type_annotation.golden index 0cbce7e3cd7dc..049a7e8af5151 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/assignment_type_annotation.golden +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/assignment_type_annotation.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/bad_assignment.golden b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/bad_assignment.golden index 9c80c8c487d24..f59ec66ba1ab7 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/bad_assignment.golden +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/bad_assignment.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/ellipsis.golden b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/ellipsis.golden index f244309ec8cb6..bb6f4c7d60a9c 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/ellipsis.golden +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/ellipsis.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/lambda.golden b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/lambda.golden index fdc9f90fd2258..95d3ac58a59fd 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/lambda.golden +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/lambda.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/list_in_index_expr.golden b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/list_in_index_expr.golden index 3720db7601773..2326aca9a0616 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/list_in_index_expr.golden +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/list_in_index_expr.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_def.golden b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_def.golden index d08a551d514fb..e2481916a77b7 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_def.golden +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_def.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_statements.golden b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_statements.golden index 3a24495f00aea..8e6bca5175cc8 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_statements.golden +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_tests/top_level_statements.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Program: diff --git a/starlark-rust/starlark_syntax/src/syntax/grammar_util.rs b/starlark-rust/starlark_syntax/src/syntax/grammar_util.rs index be49d6baca703..d875c1a525d74 100644 --- a/starlark-rust/starlark_syntax/src/syntax/grammar_util.rs +++ b/starlark-rust/starlark_syntax/src/syntax/grammar_util.rs @@ -21,6 +21,7 @@ use crate::codemap::CodeMap; use crate::codemap::Pos; use crate::codemap::Span; use crate::codemap::Spanned; +use crate::dot_format_parser::FormatConv; use crate::dot_format_parser::FormatParser; use crate::dot_format_parser::FormatToken; use crate::eval_exception::EvalException; @@ -36,17 +37,14 @@ use crate::syntax::ast::AstAssignIdent; use crate::syntax::ast::AstAssignTarget; use crate::syntax::ast::AstExpr; use crate::syntax::ast::AstFString; -use crate::syntax::ast::AstParameter; use crate::syntax::ast::AstStmt; use crate::syntax::ast::AstString; use crate::syntax::ast::AstTypeExpr; use crate::syntax::ast::Comma; -use crate::syntax::ast::DefP; use crate::syntax::ast::Expr; use crate::syntax::ast::ExprP; use crate::syntax::ast::FStringP; use crate::syntax::ast::IdentP; -use crate::syntax::ast::LambdaP; use crate::syntax::ast::LoadArgP; use crate::syntax::ast::LoadP; use crate::syntax::ast::Stmt; @@ -54,10 +52,8 @@ use crate::syntax::ast::StmtP; use crate::syntax::ast::ToAst; use crate::syntax::ast::TypeExpr; use crate::syntax::ast::TypeExprP; -use crate::syntax::def::DefParams; use crate::syntax::state::ParserState; use crate::syntax::type_expr::TypeExprUnpackP; -use crate::syntax::Dialect; use crate::syntax::DialectTypes; #[derive(Debug, thiserror::Error)] @@ -97,7 +93,7 @@ pub fn check_assign(codemap: &CodeMap, x: AstExpr) -> Result { - return Err(EvalException::new( + return Err(EvalException::new_anyhow( GrammarUtilError::InvalidLhs.into(), x.span, codemap, @@ -118,7 +114,7 @@ pub fn check_assignment( // for augmented assignment, Starlark doesn't allow tuple/list match &lhs.node { Expr::Tuple(_) | Expr::List(_) => { - return Err(EvalException::new( + return Err(EvalException::new_anyhow( GrammarUtilError::InvalidModifyLhs.into(), lhs.span, codemap, @@ -137,7 +133,7 @@ pub fn check_assignment( None }; if let Some(err) = err { - return Err(EvalException::new(err.into(), ty.span, codemap)); + return Err(EvalException::new_anyhow(err.into(), ty.span, codemap)); } } Ok(match op { @@ -150,48 +146,8 @@ pub fn check_assignment( }) } -fn check_parameters<'a>(parameters: &[AstParameter], parser_state: &mut ParserState<'a>) { - if let Err(e) = DefParams::unpack(parameters, parser_state.codemap) { - parser_state.errors.push(e); - } -} - -pub fn check_lambda( - params: Vec, - body: AstExpr, - parser_state: &mut ParserState, -) -> Expr { - check_parameters(¶ms, parser_state); - Expr::Lambda(LambdaP { - params, - body: Box::new(body), - payload: (), - }) -} - -pub fn check_def( - name: AstString, - params: Vec, - return_type: Option>, - stmts: AstStmt, - parser_state: &mut ParserState, -) -> Stmt { - check_parameters(¶ms, parser_state); - let name = name.map(|s| AssignIdentP { - ident: s, - payload: (), - }); - Stmt::Def(DefP { - name, - params, - return_type, - body: Box::new(stmts), - payload: (), - }) -} - pub(crate) fn check_load_0(module: AstString, parser_state: &mut ParserState) -> Stmt { - parser_state.errors.push(EvalException::new( + parser_state.errors.push(EvalException::new_anyhow( GrammarUtilError::LoadRequiresAtLeastTwoArguments.into(), module.span, parser_state.codemap, @@ -234,20 +190,7 @@ pub(crate) fn check_load( }) } -#[derive(thiserror::Error, Debug)] -enum FStringError { - #[error("Not a valid identifier: `{}`", .capture)] - InvalidIdentifier { capture: String }, - - // Always render the causes for this, but don't expose the error when traversing sources. - #[error("Invalid format: {:#}", .inner)] - InvalidFormat { inner: anyhow::Error }, - - #[error("Your Starlark dialect must enable f-strings to use them")] - NotEnabled, -} - -pub fn fstring( +pub(crate) fn fstring( fstring: TokenFString, begin: usize, end: usize, @@ -256,7 +199,7 @@ pub fn fstring( if !parser_state.dialect.enable_f_strings { parser_state.error( Span::new(Pos::new(begin as _), Pos::new(end as _)), - FStringError::NotEnabled, + "Your Starlark dialect must enable f-strings to use them", ); } @@ -276,7 +219,7 @@ pub fn fstring( // We are producing a format string here so we need to escape this back! format.push_str(e.back_to_escape()) } - Ok(FormatToken::Capture { capture, pos }) => { + Ok(FormatToken::Capture { capture, pos, conv }) => { let capture_begin = begin + content_start_offset + pos; let capture_end = capture_begin + capture.len(); @@ -285,9 +228,7 @@ pub fn fstring( None => { parser_state.error( Span::new(Pos::new(capture_begin as _), Pos::new(capture_end as _)), - FStringError::InvalidIdentifier { - capture: capture.to_owned(), - }, + format_args!("Not a valid identifier: `{capture}`"), ); // Might as well keep going here. This doesn't compromise the parsing of // the rest of the format string. @@ -300,13 +241,17 @@ pub fn fstring( ) .ast(capture_begin, capture_end); expressions.push(expr); - format.push_str("{}"); // Positional format. + // Positional format. + match conv { + FormatConv::Str => format.push_str("{}"), + FormatConv::Repr => format.push_str("{!r}"), + } } Err(inner) => { // TODO: Reporting the exact position of the error would be better. parser_state.error( Span::new(Pos::new(begin as _), Pos::new(end as _)), - FStringError::InvalidFormat { inner }, + format_args!("Invalid format: {inner:#}"), ); break; } @@ -324,60 +269,15 @@ pub fn fstring( #[derive(thiserror::Error, Debug)] enum DialectError { - #[error("`def` is not allowed in this dialect")] - Def, - #[error("`lambda` is not allowed in this dialect")] - Lambda, - #[error("* keyword-only-arguments is not allowed in this dialect")] - KeywordOnlyArguments, #[error("type annotations are not allowed in this dialect")] Types, } fn err(codemap: &CodeMap, span: Span, err: DialectError) -> Result { - Err(EvalException::new(err.into(), span, codemap)) -} - -pub fn dialect_check_lambda( - dialect: &Dialect, - codemap: &CodeMap, - x: Spanned, -) -> Result, EvalException> { - if dialect.enable_lambda { - Ok(x) - } else { - err(codemap, x.span, DialectError::Lambda) - } -} - -pub fn dialect_check_def( - dialect: &Dialect, - codemap: &CodeMap, - x: Spanned, -) -> Result, EvalException> { - if dialect.enable_def { - Ok(x) - } else { - err(codemap, x.span, DialectError::Def) - } -} - -pub fn dialect_check_keyword_only_arguments( - dialect: &Dialect, - codemap: &CodeMap, - begin: usize, - end: usize, - x: T, -) -> Result { - let span = Span::new(Pos::new(begin as u32), Pos::new(end as u32)); - if dialect.enable_keyword_only_arguments { - Ok(x) - } else { - err(codemap, span, DialectError::KeywordOnlyArguments) - } + Err(EvalException::new_anyhow(err.into(), span, codemap)) } -pub fn dialect_check_type( +pub(crate) fn dialect_check_type( state: &ParserState, x: Spanned, ) -> Result, EvalException> { @@ -386,7 +286,7 @@ pub fn dialect_check_type( return err(state.codemap, x.span, DialectError::Types); } - TypeExprUnpackP::unpack(&x, state.codemap, state.allow_string_literals_in_type_expr)?; + TypeExprUnpackP::unpack(&x, state.codemap).map_err(EvalException::from)?; Ok(x.map(|node| TypeExprP { expr: Spanned { node, span }, diff --git a/starlark-rust/starlark_syntax/src/syntax/lint_suppressions.rs b/starlark-rust/starlark_syntax/src/syntax/lint_suppressions.rs new file mode 100644 index 0000000000000..2f55dc4e16aff --- /dev/null +++ b/starlark-rust/starlark_syntax/src/syntax/lint_suppressions.rs @@ -0,0 +1,189 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; +use std::collections::HashSet; + +use crate::codemap::CodeMap; +use crate::codemap::Pos; +use crate::codemap::Span; + +static LINT_SUPPRESISON_PREFIX: &str = "starlark-lint-disable "; + +#[derive(Debug, Clone)] +struct SuppressionInfo { + /// The original span of the comment token containing the suppression + token_span: Span, + /// The span that this suppression effects + effective_span: Span, + /// Does the suppression cover the next line? + suppress_next_line: bool, +} +#[derive(Debug, Clone)] +pub(crate) struct LintSuppressions { + /// A map from lint short names to spans where they are suppressed + suppressions: HashMap>, +} + +impl LintSuppressions { + /// Check if a given lint short_name and span is suppressed + pub(crate) fn is_suppressed(&self, issue_short_name: &str, issue_span: Span) -> bool { + self.suppressions + .get(issue_short_name) + .map(|suppression_spans| { + suppression_spans.iter().any( + |SuppressionInfo { + token_span, + effective_span, + suppress_next_line, + }| { + // is this suppression that last thing in a suppress_next_line issue span? ... + if *suppress_next_line && + // (issue_span includes line terminator) + (issue_span.end() - 1) == token_span.end() + { + // ... then issue is not suppressed + false + } else { + issue_span.intersects(*effective_span) + } + }, + ) + }) + == Some(true) + } +} + +/// State needed for parsing a block of comments +#[derive(Default)] +struct ParseState { + token_spans: Vec, + effective_spans: Vec, + short_names: HashSet, + last_line: usize, +} + +impl ParseState { + fn is_empty(&self) -> bool { + self.token_spans.is_empty() + && self.effective_spans.is_empty() + && self.short_names.is_empty() + } +} + +/// Parse lint suppressions for a module and build a LintSuppressions struct +pub(crate) struct LintSuppressionsBuilder { + state: ParseState, + suppressions: LintSuppressions, +} + +impl LintSuppressionsBuilder { + pub(crate) fn new() -> Self { + Self { + state: ParseState::default(), + suppressions: LintSuppressions { + suppressions: HashMap::new(), + }, + } + } + + /// Call for each comment in a block of comments + pub(crate) fn parse_comment( + &mut self, + codemap: &CodeMap, + comment: &str, + start: usize, + end: usize, + ) { + let parsed_short_names = parse_lint_suppressions(comment); + if !parsed_short_names.is_empty() || !self.state.short_names.is_empty() { + if let (Ok(start_pos), Ok(end_pos)) = (start.try_into(), end.try_into()) { + let token_span = Span::new(Pos::new(start_pos), Pos::new(end_pos)); + let line = codemap.find_line(Pos::new(start_pos)); + let effective_span = codemap.line_span_trim_newline(line); + self.state.short_names.extend(parsed_short_names); + self.state.token_spans.push(token_span); + self.state.effective_spans.push(effective_span); + self.state.last_line = line; + } + } + } + + /// Call after the last comment in a block of comments + pub(crate) fn end_of_comment_block(&mut self, codemap: &CodeMap) { + if !self.state.short_names.is_empty() { + self.update_lint_suppressions(codemap); + } + } + + pub(crate) fn build(self) -> LintSuppressions { + assert!(self.state.is_empty()); + self.suppressions + } + + /// Update the line_suppressions hashmap with parsed lint suppressions for a block of comment + /// Consumes and clears the ParseState + fn update_lint_suppressions(&mut self, codemap: &CodeMap) { + let state = std::mem::take(&mut self.state); + let number_of_tokens = state.token_spans.len(); + let token_span = Span::merge_all(state.token_spans.into_iter()); + let mut effective_span = Span::merge_all(state.effective_spans.into_iter()); + // In case the suppression comment has preceding whitespace + let source_before_token = + codemap.source_span(Span::new(effective_span.begin(), token_span.begin())); + let suppress_next_line = number_of_tokens > 1 + || effective_span == token_span + || (effective_span.end() == token_span.end() && source_before_token.trim().is_empty()); + if suppress_next_line { + // Expand the span to include the next line, + // in case suppression was put on the line before the issue + if let Some(next_line_span) = codemap.line_span_opt(state.last_line + 1) { + effective_span = effective_span.merge(next_line_span); + } + } + + for name in state.short_names { + self.suppressions + .suppressions + .entry(name) + .or_default() + .push(SuppressionInfo { + token_span, + effective_span, + suppress_next_line, + }); + } + } +} + +/// Parse a single comment line and extract any lint suppressions. +fn parse_lint_suppressions(comment_line: &str) -> Vec { + let mut res = Vec::new(); + if let Some(short_names) = comment_line + .trim_start() + .strip_prefix(LINT_SUPPRESISON_PREFIX) + { + for name in short_names.split([' ', ',']) { + let trimmed = name.trim(); + if !trimmed.is_empty() { + res.push(trimmed.to_owned()); + } + } + } + + res +} diff --git a/starlark-rust/starlark_syntax/src/syntax/mod.rs b/starlark-rust/starlark_syntax/src/syntax/mod.rs deleted file mode 100644 index 43455650c1e08..0000000000000 --- a/starlark-rust/starlark_syntax/src/syntax/mod.rs +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! The AST of Starlark as [`AstModule`], along with a [`parse`](AstModule::parse) function. - -pub use module::AstModule; -pub use parser::AstLoad; - -pub use crate::dialect::Dialect; -pub use crate::dialect::DialectTypes; - -pub mod ast; -pub mod def; -#[cfg(test)] -mod grammar_tests; -pub mod grammar_util; -pub mod module; -pub mod parser; -pub mod payload_map; -pub mod state; -#[cfg(test)] -mod testcases; -pub mod top_level_stmts; -pub mod type_expr; -pub mod uniplate; -pub mod validate; - -#[allow(clippy::all)] -// Things we explicitly turn on need to be explicitly turned off -#[allow(clippy::inefficient_to_string)] -#[allow(clippy::trivially_copy_pass_by_ref)] -#[allow(clippy::too_many_arguments)] -#[allow(clippy::cloned_instead_of_copied)] -#[allow(unused_extern_crates)] -#[allow(unused_braces)] -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_use_dupe))] -mod grammar { - include!(concat!(env!("OUT_DIR"), "/syntax/grammar.rs")); -} diff --git a/starlark-rust/starlark_syntax/src/syntax/module.rs b/starlark-rust/starlark_syntax/src/syntax/module.rs index db3f1d2556193..7dd0978456f90 100644 --- a/starlark-rust/starlark_syntax/src/syntax/module.rs +++ b/starlark-rust/starlark_syntax/src/syntax/module.rs @@ -30,19 +30,22 @@ use crate::codemap::FileSpan; use crate::codemap::Pos; use crate::codemap::Span; use crate::codemap::Spanned; -use crate::diagnostic::Diagnostic; use crate::eval_exception::EvalException; use crate::lexer::Lexer; use crate::lexer::Token; use crate::syntax::ast::ArgumentP; use crate::syntax::ast::AstExpr; use crate::syntax::ast::AstStmt; +use crate::syntax::ast::CallArgsP; use crate::syntax::ast::ExprP; use crate::syntax::ast::IdentP; use crate::syntax::ast::LoadArgP; use crate::syntax::ast::Stmt; use crate::syntax::grammar::StarlarkParser; +use crate::syntax::lint_suppressions::LintSuppressions; +use crate::syntax::lint_suppressions::LintSuppressionsBuilder; use crate::syntax::state::ParserState; +use crate::syntax::validate::validate_module; use crate::syntax::AstLoad; use crate::syntax::Dialect; @@ -68,7 +71,7 @@ fn parse_error_add_span( err: lu::ParseError, pos: usize, codemap: &CodeMap, -) -> anyhow::Error { +) -> crate::Error { let (message, span) = match err { lu::ParseError::InvalidToken { location } => ( "Parse error: invalid token".to_owned(), @@ -93,10 +96,14 @@ fn parse_error_add_span( format!("Parse error: extraneous token {}", t), Span::new(Pos::new(x as u32), Pos::new(y as u32)), ), - lu::ParseError::User { error } => return error.into_anyhow(), + lu::ParseError::User { error } => return error.into_error(), }; - Diagnostic::new(anyhow::anyhow!(message), span, codemap) + crate::Error::new_spanned( + crate::ErrorKind::Parser(anyhow::anyhow!(message)), + span, + codemap, + ) } /// A representation of a Starlark module abstract syntax tree. @@ -107,18 +114,18 @@ fn parse_error_add_span( /// The internal details (statements/expressions) are deliberately omitted, as they change /// more regularly. A few methods to obtain information about the AST are provided. #[derive(Derivative)] -#[derivative(Debug)] +#[derivative(Debug, Clone)] pub struct AstModule { #[derivative(Debug = "ignore")] pub(crate) codemap: CodeMap, pub(crate) statement: AstStmt, pub(crate) dialect: Dialect, - /// Temporary option to allow string literals in type expressions. - /// Specified with `@starlark-rust: allow_string_literals_in_type_expr`. - pub(crate) allow_string_literals_in_type_expr: bool, /// Opt-in typecheck. /// Specified with `@starlark-rust: typecheck`. pub(crate) typecheck: bool, + /// Lint issues suppressed in this module using inline comments of shape + /// # starlark-lint-disable , , ... + lint_suppressions: LintSuppressions, } /// This trait is not exported as public API of starlark. @@ -129,9 +136,7 @@ pub trait AstModuleFields: Sized { fn dialect(&self) -> &Dialect; - fn allow_string_literals_in_type_expr(&self) -> bool; - - fn into_parts(self) -> (CodeMap, AstStmt, Dialect, bool, bool); + fn into_parts(self) -> (CodeMap, AstStmt, Dialect, bool); } impl AstModuleFields for AstModule { @@ -147,18 +152,8 @@ impl AstModuleFields for AstModule { &self.dialect } - fn allow_string_literals_in_type_expr(&self) -> bool { - self.allow_string_literals_in_type_expr - } - - fn into_parts(self) -> (CodeMap, AstStmt, Dialect, bool, bool) { - ( - self.codemap, - self.statement, - self.dialect, - self.allow_string_literals_in_type_expr, - self.typecheck, - ) + fn into_parts(self) -> (CodeMap, AstStmt, Dialect, bool) { + (self.codemap, self.statement, self.dialect, self.typecheck) } } @@ -167,22 +162,34 @@ impl AstModule { codemap: CodeMap, statement: AstStmt, dialect: &Dialect, - allow_string_literals_in_type_expr: bool, typecheck: bool, - ) -> anyhow::Result { - Stmt::validate(&codemap, &statement, dialect).map_err(EvalException::into_anyhow)?; + lint_suppressions: LintSuppressions, + ) -> crate::Result { + let mut errors = Vec::new(); + validate_module( + &statement, + &mut ParserState { + codemap: &codemap, + dialect, + errors: &mut errors, + }, + ); + // We need the first error, so we don't use `.pop()`. + if let Some(err) = errors.into_iter().next() { + return Err(err.into_error()); + } Ok(AstModule { codemap, statement, dialect: dialect.clone(), - allow_string_literals_in_type_expr, typecheck, + lint_suppressions, }) } /// Parse a file stored on disk. For details see [`parse`](AstModule::parse). - pub fn parse_file(path: &Path, dialect: &Dialect) -> anyhow::Result { - let content = fs::read_to_string(path)?; + pub fn parse_file(path: &Path, dialect: &Dialect) -> crate::Result { + let content = fs::read_to_string(path).map_err(anyhow::Error::new)?; Self::parse(&path.to_string_lossy(), content, dialect) } @@ -190,45 +197,59 @@ impl AstModule { /// The `filename` is for error messages only, and does not have to be a valid file. /// The [`Dialect`] selects which Starlark constructs are valid. /// - /// Errors will be reported using the [`Diagnostic`] type. For example: + /// The returned error may contain diagnostic information. For example: /// /// ``` - /// use starlark_syntax::syntax::{AstModule, Dialect}; - /// use starlark_syntax::diagnostic::Diagnostic; + /// use starlark_syntax::codemap::FileSpan; + /// use starlark_syntax::syntax::AstModule; + /// use starlark_syntax::syntax::Dialect; /// - /// let err: anyhow::Error = AstModule::parse("filename", "\n(unmatched".to_owned(), &Dialect::Standard).unwrap_err(); - /// let err: Diagnostic = err.downcast::().unwrap(); - /// assert_eq!(err.span.unwrap().to_string(), "filename:2:11"); + /// let err: starlark_syntax::Error = + /// AstModule::parse("filename", "\n(unmatched".to_owned(), &Dialect::Standard).unwrap_err(); + /// let span: &FileSpan = err.span().unwrap(); + /// assert_eq!(span.to_string(), "filename:2:11"); /// ``` - pub fn parse(filename: &str, content: String, dialect: &Dialect) -> anyhow::Result { - let allow_string_literals_in_type_expr = - content.contains("@starlark-rust: allow_string_literals_in_type_expr"); + pub fn parse(filename: &str, content: String, dialect: &Dialect) -> crate::Result { let typecheck = content.contains("@starlark-rust: typecheck"); let codemap = CodeMap::new(filename.to_owned(), content); let lexer = Lexer::new(codemap.source(), dialect, codemap.dupe()); + // Store lint suppressions found during parsing + let mut lint_suppressions_builder = LintSuppressionsBuilder::new(); + // Keep track of block of comments, used for accumulating lint suppressions + let mut in_comment_block = false; let mut errors = Vec::new(); match StarlarkParser::new().parse( &mut ParserState { codemap: &codemap, dialect, - allow_string_literals_in_type_expr, errors: &mut errors, }, - lexer.filter(|t| match t { - Ok((_, Token::Comment(_), _)) => false, - _ => true, + lexer.filter(|token| match token { + // Filter out comment tokens and accumulate lint suppressions + Ok((start, Token::Comment(comment), end)) => { + lint_suppressions_builder.parse_comment(&codemap, comment, *start, *end); + in_comment_block = true; + false + } + _ => { + if in_comment_block { + lint_suppressions_builder.end_of_comment_block(&codemap); + in_comment_block = false; + } + true + } }), ) { Ok(v) => { if let Some(err) = errors.into_iter().next() { - return Err(err.into_anyhow()); + return Err(err.into_error()); } Ok(AstModule::create( codemap, v, dialect, - allow_string_literals_in_type_expr, typecheck, + lint_suppressions_builder.build(), )?) } Err(p) => Err(parse_error_add_span(p, codemap.source().len(), &codemap)), @@ -315,16 +336,18 @@ impl AstModule { }, }), }), - vec![ - Spanned { - span: lhs.span, - node: ArgumentP::Positional(*lhs), - }, - Spanned { - span: rhs.span, - node: ArgumentP::Positional(*rhs), - }, - ], + CallArgsP { + args: vec![ + Spanned { + span: lhs.span, + node: ArgumentP::Positional(*lhs), + }, + Spanned { + span: rhs.span, + node: ArgumentP::Positional(*rhs), + }, + ], + }, ), None => ExprP::Op(lhs, op, rhs), }, @@ -336,6 +359,12 @@ impl AstModule { self.statement.visit_expr_mut(|x| f(x, replace)); } + + /// Check if a given Lint short_name and span is suppressed in this module + pub fn is_suppressed(&self, issue_short_name: &str, issue_span: Span) -> bool { + self.lint_suppressions + .is_suppressed(issue_short_name, issue_span) + } } #[cfg(test)] diff --git a/starlark-rust/starlark_syntax/src/syntax/payload_map.rs b/starlark-rust/starlark_syntax/src/syntax/payload_map.rs index 9b42c5c515b0a..6eac40910b3b4 100644 --- a/starlark-rust/starlark_syntax/src/syntax/payload_map.rs +++ b/starlark-rust/starlark_syntax/src/syntax/payload_map.rs @@ -24,6 +24,7 @@ use crate::syntax::ast::AssignIdentP; use crate::syntax::ast::AssignP; use crate::syntax::ast::AssignTargetP; use crate::syntax::ast::AstPayload; +use crate::syntax::ast::CallArgsP; use crate::syntax::ast::ClauseP; use crate::syntax::ast::DefP; use crate::syntax::ast::ExprP; @@ -172,7 +173,9 @@ impl ExprP { ExprP::Dot(object, field) => ExprP::Dot(Box::new(object.into_map_payload(f)), field), ExprP::Call(ca, args) => ExprP::Call( Box::new(ca.into_map_payload(f)), - args.into_map(|a| a.into_map_payload(f)), + CallArgsP { + args: args.args.into_map(|a| a.into_map_payload(f)), + }, ), ExprP::Index(array_index) => { let (array, index) = *array_index; @@ -322,16 +325,13 @@ impl ParameterP { f: &mut impl AstPayloadFunction, ) -> ParameterP { match self { - ParameterP::Normal(name, ty) => ParameterP::Normal( + ParameterP::Normal(name, ty, defa) => ParameterP::Normal( name.into_map_payload(f), ty.map(|defa| Box::new(defa.into_map_payload(f))), - ), - ParameterP::WithDefaultValue(name, ty, defa) => ParameterP::WithDefaultValue( - name.into_map_payload(f), - ty.map(|defa| Box::new(defa.into_map_payload(f))), - Box::new(defa.into_map_payload(f)), + defa.map(|defa| Box::new(defa.into_map_payload(f))), ), ParameterP::NoArgs => ParameterP::NoArgs, + ParameterP::Slash => ParameterP::Slash, ParameterP::Args(name, ty) => ParameterP::Args( name.into_map_payload(f), ty.map(|defa| Box::new(defa.into_map_payload(f))), diff --git a/starlark-rust/starlark_syntax/src/syntax/state.rs b/starlark-rust/starlark_syntax/src/syntax/state.rs index d014a224073dc..f9eb0c323458f 100644 --- a/starlark-rust/starlark_syntax/src/syntax/state.rs +++ b/starlark-rust/starlark_syntax/src/syntax/state.rs @@ -15,23 +15,24 @@ * limitations under the License. */ +use std::fmt::Display; + use crate::codemap::CodeMap; use crate::codemap::Span; use crate::eval_exception::EvalException; use crate::syntax::Dialect; -pub struct ParserState<'a> { - pub dialect: &'a Dialect, - pub codemap: &'a CodeMap, - pub(crate) allow_string_literals_in_type_expr: bool, +pub(crate) struct ParserState<'a> { + pub(crate) dialect: &'a Dialect, + pub(crate) codemap: &'a CodeMap, /// Recoverable errors. - pub errors: &'a mut Vec, + pub(crate) errors: &'a mut Vec, } impl<'a> ParserState<'a> { /// Add recoverable error. - pub fn error(&mut self, span: Span, error: impl Into) { + pub(crate) fn error(&mut self, span: Span, error: impl Display) { self.errors - .push(EvalException::new(error.into(), span, self.codemap)); + .push(EvalException::parser_error(error, span, self.codemap)); } } diff --git a/starlark-rust/starlark_syntax/src/syntax/type_expr.rs b/starlark-rust/starlark_syntax/src/syntax/type_expr.rs index b9ffa428a7c9b..cf1f5ec9ef4ae 100644 --- a/starlark-rust/starlark_syntax/src/syntax/type_expr.rs +++ b/starlark-rust/starlark_syntax/src/syntax/type_expr.rs @@ -17,7 +17,7 @@ use crate::codemap::CodeMap; use crate::codemap::Spanned; -use crate::eval_exception::EvalException; +use crate::diagnostic::WithDiagnostic; use crate::slice_vec_ext::SliceExt; use crate::syntax::ast::AstExprP; use crate::syntax::ast::AstIdentP; @@ -27,117 +27,71 @@ use crate::syntax::ast::BinOp; use crate::syntax::ast::ExprP; #[derive(Debug, thiserror::Error)] -enum TypeExprUnpackError { +pub enum TypeExprUnpackError { #[error("{0} expression is not allowed in type expression")] InvalidType(&'static str), #[error("Empty list is not allowed in type expression")] EmptyListInType, #[error("Only dot expression of form `ident.ident` is allowed in type expression")] DotInType, - #[error(r#"`""` or `"_xxx"` is not allowed in type expression, use `typing.Any` instead"#)] - EmptyStrInType, - #[error(r#"`"{0}"` is not allowed in type expression, use `{1}` instead"#)] - StrBanReplace(&'static str, &'static str), + #[error("Expecting path like `a.b.c`")] + ExpectingPath, #[error(r#"`{0}.type` is not allowed in type expression, use `{0}` instead"#)] DotTypeBan(String), } +impl From for crate::Error { + fn from(e: TypeExprUnpackError) -> Self { + crate::Error::new_other(e) + } +} + /// Types that are `""` or start with `"_"` are wildcard - they match everything /// (also deprecated). pub fn type_str_literal_is_wildcard(s: &str) -> bool { s == "" || s.starts_with('_') } +/// Path component of type. +#[derive(Debug)] +pub struct TypePathP<'a, P: AstPayload> { + pub first: &'a AstIdentP

    , + pub rem: Vec>, +} + /// This type should be used instead of `TypeExprP`, but a lot of code needs to be updated. #[derive(Debug)] pub enum TypeExprUnpackP<'a, P: AstPayload> { - Path(&'a AstIdentP

    , Vec>), + Ellipsis, + Path(TypePathP<'a, P>), /// `list[str]`. Index(&'a AstIdentP

    , Box>>), - /// `dict[str, int]`. + /// `dict[str, int]` or `typing.Callable[[int], str]`. Index2( - &'a AstIdentP

    , + Spanned>, Box>>, Box>>, ), - /// `tuple[str, ...]`. - Index2Ellipsis(&'a AstIdentP

    , Box>>), + /// List argument in `typing.Callable[[int], str]`. + List(Vec>>), Union(Vec>>), Tuple(Vec>>), - Literal(Spanned<&'a str>), } -/// List of builtin types which are converted to proper types. -/// First is the type name, second is the symbol. -const BAN_REPLACE_TYPES: &[(&str, &str)] = &[ - ("str", "str"), - ("string", "str"), - ("int", "int"), - ("float", "float"), - ("bool", "bool"), - ("list", "list"), - ("dict", "dict"), - ("tuple", "tuple"), - ("range", "range"), - ("struct", "struct"), - ("NoneType", "None"), - ("None", "None"), - ("function", "typing.Callable"), - // Following do not belong to starlark, but this code will go away - // after we finish migration from string-based types. - ("actions", "AnalysisActions"), - ("context", "AnalysisContext"), - ("artifact", "Artifact"), - ("artifact_tag", "ArtifactTag"), - ("dependency", "Dependency"), - ("provider", "Provider"), - ("selector", "Select"), - ("transitive_set", "TransitiveSet"), - ( - "transitive_set_args_projection", - "TransitiveSetArgsProjection", - ), - ("transitive_set_definition", "TransitiveSetDefinition"), - ( - "transitive_set_json_projection", - "TransitiveSetJsonProjection", - ), - ("transitive_set_iterator", "TransitiveSetIterator"), - ( - "transitive_set_args_projection_iterator", - "TransitiveSetArgsProjectionIterator", - ), - ("target_label", "TargetLabel"), - ("configured_target_label", "ConfiguredTargetLabel"), - ("providers_label", "ProvidersLabel"), - ("label", "ConfiguredProvidersLabel"), -]; - impl<'a, P: AstPayload> TypeExprUnpackP<'a, P> { - pub fn unpack( + fn unpack_path( expr: &'a AstExprP

    , codemap: &CodeMap, - allow_string_literals_in_type_expr: bool, - ) -> Result>, EvalException> { + ) -> Result>, WithDiagnostic> { let span = expr.span; - let err = |t| { - Err(EvalException::new( - TypeExprUnpackError::InvalidType(t).into(), - expr.span, - codemap, - )) - }; - match &expr.node { - ExprP::Tuple(xs) => { - let xs = xs.try_map(|x| { - TypeExprUnpackP::unpack(x, codemap, allow_string_literals_in_type_expr) - })?; - Ok(Spanned { - span, - node: TypeExprUnpackP::Tuple(xs), - }) - } + ExprP::Identifier(ident) => Ok(Spanned { + span, + node: TypePathP { + first: ident, + rem: Vec::new(), + }, + }), ExprP::Dot(object, field) => { let mut current: &AstExprP

    = object; let mut rem: Vec> = vec![field.as_ref().map(|x| x.as_str())]; @@ -157,8 +111,8 @@ impl<'a, P: AstPayload> TypeExprUnpackP<'a, P> { } // TODO(nga): allow it after we prohibit // string constants as types. - return Err(EvalException::new( - TypeExprUnpackError::DotTypeBan(full_path).into(), + return Err(WithDiagnostic::new_spanned( + TypeExprUnpackError::DotTypeBan(full_path), current.span, codemap, )); @@ -166,33 +120,78 @@ impl<'a, P: AstPayload> TypeExprUnpackP<'a, P> { } return Ok(Spanned { span, - node: TypeExprUnpackP::Path(i, rem), + node: TypePathP { first: i, rem }, }); } _ => { - return Err(EvalException::new( - TypeExprUnpackError::DotInType.into(), + return Err(WithDiagnostic::new_spanned( + TypeExprUnpackError::DotInType, current.span, codemap, )); } } } - // We would also want to ban expressions like `x.y` where `x` is not `type`, - // or `x.y.z` but these are used now. - // Try `xbgs metalos.ProvisioningConfig`. - // That expression has type string which is the type name. + } + _ => Err(WithDiagnostic::new_spanned( + TypeExprUnpackError::ExpectingPath, + expr.span, + codemap, + )), + } + } + + fn unpack_argument( + expr: &'a AstExprP

    , + codemap: &CodeMap, + ) -> Result>, WithDiagnostic> { + let span = expr.span; + match &expr.node { + ExprP::List(items) => { + let items = items.try_map(|x| TypeExprUnpackP::unpack_argument(x, codemap))?; + Ok(Spanned { + span, + node: TypeExprUnpackP::List(items), + }) + } + _ => TypeExprUnpackP::unpack(expr, codemap), + } + } + + pub fn unpack( + expr: &'a AstExprP

    , + codemap: &CodeMap, + ) -> Result>, WithDiagnostic> { + let span = expr.span; + let err = |t| { + Err(WithDiagnostic::new_spanned( + TypeExprUnpackError::InvalidType(t), + expr.span, + codemap, + )) + }; + + match &expr.node { + ExprP::Tuple(xs) => { + let xs = xs.try_map(|x| TypeExprUnpackP::unpack(x, codemap))?; + Ok(Spanned { + span, + node: TypeExprUnpackP::Tuple(xs), + }) + } + ExprP::Dot(..) => { + let path = Self::unpack_path(expr, codemap)?; + Ok(Spanned { + span, + node: TypeExprUnpackP::Path(path.node), + }) } ExprP::Call(..) => err("call"), ExprP::Index(a_i) => { let (a, i) = &**a_i; match &a.node { ExprP::Identifier(ident) => { - let i = TypeExprUnpackP::unpack( - i, - codemap, - allow_string_literals_in_type_expr, - )?; + let i = TypeExprUnpackP::unpack(i, codemap)?; Ok(Spanned { span, node: TypeExprUnpackP::Index(ident, Box::new(i)), @@ -203,68 +202,41 @@ impl<'a, P: AstPayload> TypeExprUnpackP<'a, P> { } ExprP::Index2(a_i0_i1) => { let (a, i0, i1) = &**a_i0_i1; - let ExprP::Identifier(ident) = &a.node else { - return err("array indirection 2 where array is not an identifier"); - }; - if let ExprP::Literal(AstLiteral::Ellipsis) = &i1.node { - let i0 = - TypeExprUnpackP::unpack(i0, codemap, allow_string_literals_in_type_expr)?; - Ok(Spanned { - span, - node: TypeExprUnpackP::Index2Ellipsis(ident, Box::new(i0)), - }) - } else { - let i0 = - TypeExprUnpackP::unpack(i0, codemap, allow_string_literals_in_type_expr)?; - let i1 = - TypeExprUnpackP::unpack(i1, codemap, allow_string_literals_in_type_expr)?; - Ok(Spanned { - span, - node: TypeExprUnpackP::Index2(ident, Box::new(i0), Box::new(i1)), - }) - } + let path = Self::unpack_path(a, codemap)?; + let i0 = TypeExprUnpackP::unpack_argument(i0, codemap)?; + let i1 = TypeExprUnpackP::unpack_argument(i1, codemap)?; + Ok(Spanned { + span, + node: TypeExprUnpackP::Index2(path, Box::new(i0), Box::new(i1)), + }) } ExprP::Slice(..) => err("slice"), - ExprP::Identifier(ident) => Ok(Spanned { - span, - node: TypeExprUnpackP::Path(ident, Vec::new()), - }), - ExprP::Lambda(..) => err("lambda"), - ExprP::Literal(AstLiteral::String(s)) => { - if !allow_string_literals_in_type_expr { - return err("string literal"); - } - if type_str_literal_is_wildcard(s) { - return Err(EvalException::new( - TypeExprUnpackError::EmptyStrInType.into(), - expr.span, - codemap, - )); - } - for (ban, replace) in BAN_REPLACE_TYPES { - if s.as_str() == *ban { - return Err(EvalException::new( - TypeExprUnpackError::StrBanReplace(ban, replace).into(), - expr.span, - codemap, - )); - } - } + ExprP::Identifier(..) => { + let path = Self::unpack_path(expr, codemap)?; Ok(Spanned { span, - node: TypeExprUnpackP::Literal(s.as_ref().map(|x| x.as_str())), + node: TypeExprUnpackP::Path(path.node), }) } + ExprP::Lambda(..) => err("lambda"), + ExprP::Literal(AstLiteral::String(_)) => { + // TODO(nga): eventually this should be allowed for self-referential types: + // https://www.internalfb.com/tasks/?t=184482361 + err("string literal") + } ExprP::Literal(AstLiteral::Int(_)) => err("int"), ExprP::Literal(AstLiteral::Float(_)) => err("float"), - ExprP::Literal(AstLiteral::Ellipsis) => err("ellipsis"), + ExprP::Literal(AstLiteral::Ellipsis) => Ok(Spanned { + span, + node: TypeExprUnpackP::Ellipsis, + }), ExprP::Not(..) => err("not"), ExprP::Minus(..) => err("minus"), ExprP::Plus(..) => err("plus"), ExprP::BitNot(..) => err("bit not"), ExprP::Op(a, BinOp::BitOr, b) => { - let a = TypeExprUnpackP::unpack(a, codemap, allow_string_literals_in_type_expr)?; - let b = TypeExprUnpackP::unpack(b, codemap, allow_string_literals_in_type_expr)?; + let a = TypeExprUnpackP::unpack(a, codemap)?; + let b = TypeExprUnpackP::unpack(b, codemap)?; Ok(Spanned { span, node: TypeExprUnpackP::Union(vec![a, b]), @@ -274,17 +246,15 @@ impl<'a, P: AstPayload> TypeExprUnpackP<'a, P> { ExprP::If(..) => err("if"), ExprP::List(xs) => { if xs.is_empty() { - Err(EvalException::new( - TypeExprUnpackError::EmptyListInType.into(), + Err(WithDiagnostic::new_spanned( + TypeExprUnpackError::EmptyListInType, expr.span, codemap, )) } else if xs.len() == 1 { err("list of 1 element") } else { - let xs = xs.try_map(|x| { - TypeExprUnpackP::unpack(x, codemap, allow_string_literals_in_type_expr) - })?; + let xs = xs.try_map(|x| TypeExprUnpackP::unpack(x, codemap))?; Ok(Spanned { span, node: TypeExprUnpackP::Union(xs), diff --git a/starlark-rust/starlark_syntax/src/syntax/uniplate.rs b/starlark-rust/starlark_syntax/src/syntax/uniplate.rs index 9952d72955aa7..892a6f8d8ab73 100644 --- a/starlark-rust/starlark_syntax/src/syntax/uniplate.rs +++ b/starlark-rust/starlark_syntax/src/syntax/uniplate.rs @@ -335,13 +335,11 @@ impl ParameterP

    { Option<&AstExprP

    >, ) { match self { - ParameterP::Normal(a, b) | ParameterP::Args(a, b) | ParameterP::KwArgs(a, b) => { + ParameterP::Normal(a, b, None) | ParameterP::Args(a, b) | ParameterP::KwArgs(a, b) => { (Some(a), b.as_ref().map(|x| &**x), None) } - ParameterP::WithDefaultValue(a, b, c) => { - (Some(a), b.as_ref().map(|x| &**x), Some(&**c)) - } - ParameterP::NoArgs => (None, None, None), + ParameterP::Normal(a, b, Some(c)) => (Some(a), b.as_ref().map(|x| &**x), Some(&**c)), + ParameterP::NoArgs | ParameterP::Slash => (None, None, None), } } @@ -354,13 +352,13 @@ impl ParameterP

    { Option<&mut AstExprP

    >, ) { match self { - ParameterP::Normal(a, b) | ParameterP::Args(a, b) | ParameterP::KwArgs(a, b) => { + ParameterP::Normal(a, b, None) | ParameterP::Args(a, b) | ParameterP::KwArgs(a, b) => { (Some(a), b.as_mut().map(|x| &mut **x), None) } - ParameterP::WithDefaultValue(a, b, c) => { + ParameterP::Normal(a, b, Some(c)) => { (Some(a), b.as_mut().map(|x| &mut **x), Some(&mut **c)) } - ParameterP::NoArgs => (None, None, None), + ParameterP::NoArgs | ParameterP::Slash => (None, None, None), } } @@ -388,7 +386,7 @@ impl ExprP

    { ExprP::Dot(x, _) => f(x), ExprP::Call(a, b) => { f(a); - b.iter().for_each(|x| f(x.expr())); + b.args.iter().for_each(|x| f(x.expr())); } ExprP::Index(a_b) => { let (a, b) = &**a_b; @@ -488,7 +486,7 @@ impl ExprP

    { ExprP::Dot(x, _) => f(x), ExprP::Call(a, b) => { f(a); - b.iter_mut().for_each(|x| f(x.expr_mut())); + b.args.iter_mut().for_each(|x| f(x.expr_mut())); } ExprP::Index(a_b) => { let (a, b) = &mut **a_b; @@ -559,15 +557,12 @@ impl ExprP

    { &mut self, f: &mut impl FnMut(&mut AstTypeExprP

    ) -> Result<(), E>, ) -> Result<(), E> { - match self { - ExprP::Lambda(lambda) => { - for param in &mut lambda.params { - if let (_, Some(ty), _) = param.split_mut() { - f(ty)?; - } + if let ExprP::Lambda(lambda) = self { + for param in &mut lambda.params { + if let (_, Some(ty), _) = param.split_mut() { + f(ty)?; } } - _ => {} } self.visit_expr_err_mut(|expr| expr.visit_type_expr_err_mut(f)) } diff --git a/starlark-rust/starlark_syntax/src/syntax/validate.rs b/starlark-rust/starlark_syntax/src/syntax/validate.rs index fe20c4c875eb5..703bc39982b9c 100644 --- a/starlark-rust/starlark_syntax/src/syntax/validate.rs +++ b/starlark-rust/starlark_syntax/src/syntax/validate.rs @@ -17,66 +17,23 @@ //! AST for parsed starlark files. -use std::collections::HashSet; - -use thiserror::Error; - -use crate::codemap::CodeMap; -use crate::eval_exception::EvalException; -use crate::syntax::ast::Argument; use crate::syntax::ast::AstArgument; use crate::syntax::ast::AstExpr; use crate::syntax::ast::AstLiteral; +use crate::syntax::ast::AstParameter; use crate::syntax::ast::AstStmt; +use crate::syntax::ast::CallArgsP; use crate::syntax::ast::DefP; use crate::syntax::ast::Expr; use crate::syntax::ast::ForP; +use crate::syntax::ast::LambdaP; +use crate::syntax::ast::ParameterP; use crate::syntax::ast::Stmt; -use crate::syntax::Dialect; +use crate::syntax::call::CallArgsUnpack; +use crate::syntax::def::DefParams; +use crate::syntax::state::ParserState; use crate::syntax::DialectTypes; -#[derive(Error, Debug)] -enum ValidateError { - #[error("`break` cannot be used outside of a `for` loop")] - BreakOutsideLoop, - #[error("`continue` cannot be used outside of a `for` loop")] - ContinueOutsideLoop, - #[error("`return` cannot be used outside of a `def` function")] - ReturnOutsideDef, - #[error("`load` must only occur at the top of a module")] - LoadNotTop, - #[error("`if` cannot be used outside `def` in this dialect")] - NoTopLevelIf, - #[error("`for` cannot be used outside `def` in this dialect")] - NoTopLevelFor, - #[error("`load` is not allowed in this dialect")] - Load, - #[error("`...` is not allowed in this dialect")] - Ellipsis, -} - -#[derive(Eq, PartialEq, PartialOrd, Ord)] -enum ArgsStage { - Positional, - Named, - Args, - Kwargs, -} - -#[derive(Error, Debug)] -enum ArgumentDefinitionOrderError { - #[error("positional argument after non positional")] - PositionalThenNonPositional, - #[error("named argument after *args or **kwargs")] - NamedArgumentAfterStars, - #[error("repeated named argument")] - RepeatedNamed, - #[error("Args array after another args or kwargs")] - ArgsArrayAfterArgsOrKwargs, - #[error("Multiple kwargs dictionary in arguments")] - MultipleKwargs, -} - impl Expr { /// We want to check a function call is well-formed. /// Our eventual plan is to follow the Python invariants, but for now, we are closer @@ -90,137 +47,127 @@ impl Expr { /// multiple **kwargs. /// /// We allow at most one **kwargs. - pub fn check_call( + pub(crate) fn check_call( f: AstExpr, args: Vec, - codemap: &CodeMap, - ) -> Result { - let err = |span, msg: ArgumentDefinitionOrderError| { - Err(EvalException::new(msg.into(), span, codemap)) - }; + parser_state: &mut ParserState<'_>, + ) -> Expr { + let args = CallArgsP { args }; - let mut stage = ArgsStage::Positional; - let mut named_args = HashSet::new(); - for arg in &args { - match &arg.node { - Argument::Positional(_) => { - if stage != ArgsStage::Positional { - return err( - arg.span, - ArgumentDefinitionOrderError::PositionalThenNonPositional, - ); - } - } - Argument::Named(n, _) => { - if stage > ArgsStage::Named { - return err( - arg.span, - ArgumentDefinitionOrderError::NamedArgumentAfterStars, - ); - } else if !named_args.insert(&n.node) { - // Check the names are distinct - return err(n.span, ArgumentDefinitionOrderError::RepeatedNamed); - } else { - stage = ArgsStage::Named; - } - } - Argument::Args(_) => { - if stage > ArgsStage::Named { - return err( - arg.span, - ArgumentDefinitionOrderError::ArgsArrayAfterArgsOrKwargs, - ); - } else { - stage = ArgsStage::Args; - } + if let Err(e) = CallArgsUnpack::unpack(&args, parser_state.codemap) { + parser_state.errors.push(e); + } + + Expr::Call(Box::new(f), args) + } +} + +/// Validate all statements only occur where they are allowed to. +pub(crate) fn validate_module(stmt: &AstStmt, parser_state: &mut ParserState) { + fn validate_params(params: &[AstParameter], parser_state: &mut ParserState) { + if !parser_state.dialect.enable_keyword_only_arguments { + for param in params { + if let ParameterP::NoArgs = ¶m.node { + parser_state.error( + param.span, + "* keyword-only-arguments is not allowed in this dialect", + ); } - Argument::KwArgs(_) => { - if stage == ArgsStage::Kwargs { - return err(arg.span, ArgumentDefinitionOrderError::MultipleKwargs); - } else { - stage = ArgsStage::Kwargs; - } + } + } + if !parser_state.dialect.enable_positional_only_arguments { + for param in params { + if let ParameterP::Slash = ¶m.node { + parser_state.error( + param.span, + "/ positional-only-arguments is not allowed in this dialect", + ); } } } - Ok(Expr::Call(Box::new(f), args)) + if let Err(e) = DefParams::unpack(params, parser_state.codemap) { + parser_state.errors.push(e); + } } -} -impl Stmt { - /// Validate all statements only occur where they are allowed to. - pub fn validate( - codemap: &CodeMap, + // Inside a for, we allow continue/break, unless we go beneath a def. + // Inside a def, we allow return. + // All load's must occur at the top-level. + // At the top-level we only allow for/if when the dialect permits it. + fn f( stmt: &AstStmt, - dialect: &Dialect, - ) -> Result<(), EvalException> { - // Inside a for, we allow continue/break, unless we go beneath a def. - // Inside a def, we allow return. - // All load's must occur at the top-level. - // At the top-level we only allow for/if when the dialect permits it. - fn f( - codemap: &CodeMap, - dialect: &Dialect, - stmt: &AstStmt, - top_level: bool, - inside_for: bool, - inside_def: bool, - ) -> Result<(), EvalException> { - let err = |x: anyhow::Error| Err(EvalException::new(x, stmt.span, codemap)); + parser_state: &mut ParserState, + top_level: bool, + inside_for: bool, + inside_def: bool, + ) { + let span = stmt.span; - match &stmt.node { - Stmt::Def(DefP { body, .. }) => f(codemap, dialect, body, false, false, true), - Stmt::For(ForP { body, .. }) => { - if top_level && !dialect.enable_top_level_stmt { - err(ValidateError::NoTopLevelFor.into()) - } else { - f(codemap, dialect, body, false, true, inside_def) - } + match &stmt.node { + Stmt::Def(DefP { params, body, .. }) => { + if !parser_state.dialect.enable_def { + parser_state.error(span, "`def` is not allowed in this dialect"); + } + validate_params(params, parser_state); + f(body, parser_state, false, false, true) + } + Stmt::For(ForP { body, .. }) => { + if top_level && !parser_state.dialect.enable_top_level_stmt { + parser_state.error(span, "`for` cannot be used outside `def` in this dialect") + } else { + f(body, parser_state, false, true, inside_def) } - Stmt::If(..) | Stmt::IfElse(..) => { - if top_level && !dialect.enable_top_level_stmt { - err(ValidateError::NoTopLevelIf.into()) - } else { - stmt.node.visit_stmt_result(|x| { - f(codemap, dialect, x, false, inside_for, inside_def) - }) - } + } + Stmt::If(..) | Stmt::IfElse(..) => { + if top_level && !parser_state.dialect.enable_top_level_stmt { + parser_state.error(span, "`if` cannot be used outside `def` in this dialect") + } else { + stmt.node + .visit_stmt(|x| f(x, parser_state, false, inside_for, inside_def)) + } + } + Stmt::Break if !inside_for => { + parser_state.error(span, "`break` cannot be used outside of a `for` loop") + } + Stmt::Continue if !inside_for => { + parser_state.error(span, "`continue` cannot be used outside of a `for` loop") + } + Stmt::Return(_) if !inside_def => { + parser_state.error(span, "`return` cannot be used outside of a `def` function") + } + Stmt::Load(..) => { + if !top_level { + parser_state.error(span, "`load` must only occur at the top of a module"); } - Stmt::Break if !inside_for => err(ValidateError::BreakOutsideLoop.into()), - Stmt::Continue if !inside_for => err(ValidateError::ContinueOutsideLoop.into()), - Stmt::Return(_) if !inside_def => err(ValidateError::ReturnOutsideDef.into()), - Stmt::Load(..) => { - if !top_level { - return err(ValidateError::LoadNotTop.into()); - } - if !dialect.enable_load { - return err(ValidateError::Load.into()); - } - Ok(()) + if !parser_state.dialect.enable_load { + parser_state.error(span, "`load` is not allowed in this dialect"); } - _ => stmt.node.visit_stmt_result(|x| { - f(codemap, dialect, x, top_level, inside_for, inside_def) - }), } + _ => stmt + .node + .visit_stmt(|x| f(x, parser_state, top_level, inside_for, inside_def)), } + } - fn expr(expr: &AstExpr, dialect: &Dialect, codemap: &CodeMap) -> Result<(), EvalException> { - if let Expr::Literal(AstLiteral::Ellipsis) = &expr.node { - if dialect.enable_types == DialectTypes::Disable { - return Err(EvalException::new( - ValidateError::Ellipsis.into(), - expr.span, - codemap, - )); + fn expr(x: &AstExpr, parser_state: &mut ParserState) { + match &x.node { + Expr::Literal(AstLiteral::Ellipsis) => { + if parser_state.dialect.enable_types == DialectTypes::Disable { + parser_state.error(x.span, "`...` is not allowed in this dialect"); + } + } + Expr::Lambda(LambdaP { params, .. }) => { + if !parser_state.dialect.enable_lambda { + parser_state.error(x.span, "`lambda` is not allowed in this dialect"); } + validate_params(params, parser_state); } - Ok(()) + _ => {} } + x.node.visit_expr(|x| expr(x, parser_state)); + } - f(codemap, dialect, stmt, true, false, false)?; - - stmt.visit_expr_result(|x| expr(x, dialect, codemap))?; + f(stmt, parser_state, true, false, false); - Ok(()) - } + stmt.visit_expr(|x| expr(x, parser_state)); } diff --git a/starlark-rust/starlark_syntax/testcases/parse/README.md b/starlark-rust/starlark_syntax/testcases/parse/README.md index 32c0b39254d2a..7f59d8c1bcd0a 100644 --- a/starlark-rust/starlark_syntax/testcases/parse/README.md +++ b/starlark-rust/starlark_syntax/testcases/parse/README.md @@ -1,6 +1,7 @@ # Parsing test cases -A set of `.bzl` files taken from various open-source projects to test the Starlark parser against real world cases. +A set of `.bzl` files taken from various open-source projects to test the +Starlark parser against real world cases. -Files are marked generated, that was done to mute linters. There's -no automation to regenerate these files. +Files are marked generated, that was done to mute linters. There's no automation +to regenerate these files. diff --git a/starlark-rust/vscode/README.md b/starlark-rust/vscode/README.md index c665bbf342aa3..06714851fe935 100644 --- a/starlark-rust/vscode/README.md +++ b/starlark-rust/vscode/README.md @@ -1,18 +1,27 @@ # Starlark VS Code LSP extension -A VSCode LSP extension that talks over stdin/stdout to a binary. This can either be the starlark binary itself, or any binary that has implemented `starlark::lsp::server::LspContext` and runs `starlark::lsp::server::stdio_server()`. +A VSCode LSP extension that talks over stdin/stdout to a binary. This can either +be the starlark binary itself, or any binary that has implemented +`starlark::lsp::server::LspContext` and runs +`starlark::lsp::server::stdio_server()`. -If using another binary, the settings to be aware of are `starlark.lspPath` (the binary path) and `starlark.lspArguments` (the arguments to that binary). These are available in the VSCode extension settings UI. +If using another binary, the settings to be aware of are `starlark.lspPath` (the +binary path) and `starlark.lspArguments` (the arguments to that binary). These +are available in the VSCode extension settings UI. Based on a combination of: -* Tutorial at https://code.visualstudio.com/api/language-extensions/language-server-extension-guide -* Code for the tutorial at https://github.com/microsoft/vscode-extension-samples/tree/master/lsp-sample -* Syntax files from https://github.com/phgn0/vscode-starlark (which are the Microsoft Python ones with minor tweaks) +- Tutorial at + https://code.visualstudio.com/api/language-extensions/language-server-extension-guide +- Code for the tutorial at + https://github.com/microsoft/vscode-extension-samples/tree/master/lsp-sample +- Syntax files from https://github.com/phgn0/vscode-starlark (which are the + Microsoft Python ones with minor tweaks) ## Pre-requisites -You need to have npm v7+ installed. Afterwards, run `npm install` in this folder and in `client`. +You need to have npm v7+ installed. Afterwards, run `npm install` in this folder +and in `client`. ## Debugging @@ -28,16 +37,22 @@ You need to have npm v7+ installed. Afterwards, run `npm install` in this folder - Follow steps in Pre-requisites section. - Run `npm install vsce` - Run `npm exec vsce package` -- In VS Code, go to Extensions, click on the "..." button in the Extensions bar, select "Install from VSIX" and then select the `starlark-1.0.0.vsix` file that was produced. -- Build the starlark binary with `cargo build --bin=starlark` and then do one of: - - Put it on your `$PATH`, e.g. `cp $CARGO_TARGET_DIR/debug/starlark ~/.cargo/bin/starlark`. - - Configure the setting `starlark.lspPath` for this extension to point to the starlark binary. e.g. `$CARGO_TARGET_DIR/debug/starlark`. +- In VS Code, go to Extensions, click on the "..." button in the Extensions bar, + select "Install from VSIX" and then select the `starlark-1.0.0.vsix` file that + was produced. +- Build the starlark binary with `cargo build --bin=starlark` and then do one + of: + - Put it on your `$PATH`, e.g. + `cp $CARGO_TARGET_DIR/debug/starlark ~/.cargo/bin/starlark`. + - Configure the setting `starlark.lspPath` for this extension to point to the + starlark binary. e.g. `$CARGO_TARGET_DIR/debug/starlark`. ## Updating -Every few months security advisories will arrive about pinned versions of packages. +Every few months security advisories will arrive about pinned versions of +packages. -* `npm audit` to see which packages have security updates. -* `npm audit fix` to fix those issues. -* Try `npm audit`, if it still has issues run `npm update`. -* `npm exec vsce package` to confirm everything still works. +- `npm audit` to see which packages have security updates. +- `npm audit fix` to fix those issues. +- Try `npm audit`, if it still has issues run `npm update`. +- `npm exec vsce package` to confirm everything still works. diff --git a/starlark-rust/vscode/package.json b/starlark-rust/vscode/package.json index 0366c2eb9d349..c0188fa583e4e 100644 --- a/starlark-rust/vscode/package.json +++ b/starlark-rust/vscode/package.json @@ -9,7 +9,7 @@ "version": "1.0.0", "repository": { "type": "git", - "url": "https://github.com/facebookexperimental/starlark-rust" + "url": "https://github.com/facebook/starlark-rust" }, "publisher": "facebook", "categories": [], diff --git a/superconsole/BUCK b/superconsole/BUCK index 6a66ee02ee740..4b781919770de 100644 --- a/superconsole/BUCK +++ b/superconsole/BUCK @@ -1,6 +1,5 @@ load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/superconsole/Cargo.toml b/superconsole/Cargo.toml index e7de179cd9899..1e70813d3b3f3 100644 --- a/superconsole/Cargo.toml +++ b/superconsole/Cargo.toml @@ -1,25 +1,24 @@ [package] -name = "superconsole" -version = "0.2.0" -edition = "2021" -license = "MIT OR Apache-2.0" authors = ["Meta"] +categories = ["command-line-interface"] description = "A simple but powerful Text-based User Interface (TUI) framework" -repository = "https://github.com/facebookincubator/superconsole" documentation = "https://docs.rs/superconsole" -categories = ["command-line-interface"] +edition = "2021" keywords = ["TUI"] +license = "MIT OR Apache-2.0" +name = "superconsole" +repository = "https://github.com/facebookincubator/superconsole" +version = "0.2.0" [dependencies] anyhow = "1.0.65" +crossbeam-channel = "0.5" crossterm = "0.27" -itertools = "0.10" -unicode-segmentation = "1.7" -thiserror = "1.0.36" +itertools = "0.13.0" termwiz = "0.18" -crossbeam-channel = "0.5" -crossbeam-epoch = "0.9.7" +thiserror = "1.0.36" +unicode-segmentation = "1.7" [dev-dependencies] -tokio = { version = "1.5", features = ["macros", "rt-multi-thread", "time"]} -derive_more = "0.99" +derive_more = { version = "1.0.0", features = ["full"] } +tokio = { version = "1.5", features = ["macros", "rt-multi-thread", "time"] } diff --git a/superconsole/README.md b/superconsole/README.md index e1474515d0790..81b465af0bdf0 100644 --- a/superconsole/README.md +++ b/superconsole/README.md @@ -1,14 +1,25 @@ # A component-based framework for building Rust Text-based User Interfaces (TUIs) -There are several copies of this repo on GitHub, [facebookincubator/superconsole](https://github.com/facebookincubator/superconsole) is the canonical one. +There are several copies of this repo on GitHub, +[facebookincubator/superconsole](https://github.com/facebookincubator/superconsole) +is the canonical one. -The superconsole framework provides a powerful line based abstraction over text based rendering to the terminal. It also provides basic building blocks like line manipulation, and a higher level of composable components. A base set of "batteries" components are included to help developers create Text-based User Interfaces (TUIs) as quickly as possible. +The superconsole framework provides a powerful line based abstraction over text +based rendering to the terminal. It also provides basic building blocks like +line manipulation, and a higher level of composable components. A base set of +"batteries" components are included to help developers create Text-based User +Interfaces (TUIs) as quickly as possible. -The design choices that underly superconsole are selected to prioritize testability, ease of composition, and flexibility. +The design choices that underly superconsole are selected to prioritize +testability, ease of composition, and flexibility. -Superconsole also offers stylization, including italics, underlining, bolding, and coloring text. Furthermore, relying on crossterm ensures that it is compatible with Windows, Unix, and MacOS. +Superconsole also offers stylization, including italics, underlining, bolding, +and coloring text. Furthermore, relying on crossterm ensures that it is +compatible with Windows, Unix, and MacOS. -Finally, superconsole delineates between rendering logic and program state - each render call accepts an immutable reference to state, which components may use to inject state into their otherwise immutable rendering logic. +Finally, superconsole delineates between rendering logic and program state - +each render call accepts an immutable reference to state, which components may +use to inject state into their otherwise immutable rendering logic. ## Demo @@ -17,7 +28,6 @@ Finally, superconsole delineates between rendering logic and program state - eac ## Examples ```rust -use std::convert::TryInto; use superconsole::components::bordering::{Bordered, BorderedSpec}; use superconsole::{Component, Dimensions, DrawMode, Lines, SuperConsole}; @@ -46,4 +56,5 @@ See the [CONTRIBUTING](CONTRIBUTING.md) file for how to help out. ## License -Superconsole is both MIT and Apache License, Version 2.0 licensed, as found in the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. +Superconsole is both MIT and Apache License, Version 2.0 licensed, as found in +the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) files. diff --git a/superconsole/examples/hello_world.rs b/superconsole/examples/hello_world.rs index ada7ca2397909..b33aa858c6a99 100644 --- a/superconsole/examples/hello_world.rs +++ b/superconsole/examples/hello_world.rs @@ -66,9 +66,6 @@ async fn task_that_takes_some_time() -> String { #[tokio::main] async fn main() { - // set up state to be used for rendering - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - // set up future to periodically cause re-render let delay = Duration::from_secs(1); let mut interval = time::interval(delay); diff --git a/superconsole/examples/readme.rs b/superconsole/examples/readme.rs index 94b18b3a6ddf9..340b62ecc7bf5 100644 --- a/superconsole/examples/readme.rs +++ b/superconsole/examples/readme.rs @@ -9,8 +9,6 @@ // If this code needs fixing, make sure you fix the README.md too! -use std::convert::TryInto; - use superconsole::components::bordering::Bordered; use superconsole::components::bordering::BorderedSpec; use superconsole::Component; diff --git a/superconsole/oss/.github/workflows/ci.yml b/superconsole/oss/.github/workflows/ci.yml index f450c48522f41..b4a37cf402071 100644 --- a/superconsole/oss/.github/workflows/ci.yml +++ b/superconsole/oss/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: components: clippy, rustfmt diff --git a/superconsole/oss/CHANGELOG.md b/superconsole/oss/CHANGELOG.md index 181ce2114362c..1a9232718f385 100644 --- a/superconsole/oss/CHANGELOG.md +++ b/superconsole/oss/CHANGELOG.md @@ -2,13 +2,16 @@ ## 0.2.0 (June 5, 2023) -The major change from the last release is that `State` has been deleted in favor of `render` taking a reference to the components. In addition there were a number of API adjustments, including: +The major change from the last release is that `State` has been deleted in favor +of `render` taking a reference to the components. In addition there were a +number of API adjustments, including: -* Add the type `Lines` wrapping `Vec`. -* Add `Span::new_colored` and `Span::new_colored_lossy`. -* Remove the `line!` macro and add more utilities to the `Line` type, making its internals private. -* Rename `x`/`y` to `width`/`height` where it makes sense. +- Add the type `Lines` wrapping `Vec`. +- Add `Span::new_colored` and `Span::new_colored_lossy`. +- Remove the `line!` macro and add more utilities to the `Line` type, making its + internals private. +- Rename `x`/`y` to `width`/`height` where it makes sense. ## 0.1.0 (February 3, 2022) -* Initial version. +- Initial version. diff --git a/superconsole/oss/CONTRIBUTING.md b/superconsole/oss/CONTRIBUTING.md index 495aa3d935eb3..361b7c7099ed5 100644 --- a/superconsole/oss/CONTRIBUTING.md +++ b/superconsole/oss/CONTRIBUTING.md @@ -1,12 +1,13 @@ # Contributing to Superconsole -We want to make contributing to this project as easy and transparent as possible. +We want to make contributing to this project as easy and transparent as +possible. ## Our Development Process -Superconsole is currently developed in Facebook's internal repositories and then exported -out to GitHub by a Facebook team member; however, we invite you to submit pull -requests as described below. +Superconsole is currently developed in Facebook's internal repositories and then +exported out to GitHub by a Facebook team member; however, we invite you to +submit pull requests as described below. ## Pull Requests @@ -42,5 +43,6 @@ Follow the automatic `rust fmt` configuration. ## License By contributing to Superconsole, you agree that your contributions will be -licensed under both the [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) -files in the root directory of this source tree. +licensed under both the [LICENSE-MIT](LICENSE-MIT) and +[LICENSE-APACHE](LICENSE-APACHE) files in the root directory of this source +tree. diff --git a/superconsole/src/builder.rs b/superconsole/src/builder.rs index af8a0237c9ba8..f241d4eb123fe 100644 --- a/superconsole/src/builder.rs +++ b/superconsole/src/builder.rs @@ -62,7 +62,7 @@ impl Builder { } fn build_inner(self, fallback_size: Option) -> anyhow::Result { - Ok(SuperConsole::new_internal(fallback_size, self.output()?)) + Ok(SuperConsole::new_with_output(fallback_size, self.output()?)) } fn output(self) -> anyhow::Result> { diff --git a/superconsole/src/components.rs b/superconsole/src/components.rs index 05c4bcdbb1825..415a3df0b1c9b 100644 --- a/superconsole/src/components.rs +++ b/superconsole/src/components.rs @@ -16,7 +16,6 @@ pub use alignment::Aligned; pub use blank::Blank; pub use bordering::Bordered; pub use bounding::Bounded; -pub(crate) use canvas::Canvas; pub use padding::Padded; pub use splitting::Split; @@ -29,7 +28,6 @@ pub mod alignment; mod blank; pub mod bordering; mod bounding; -mod canvas; mod draw_horizontal; mod draw_vertical; pub(crate) mod echo; diff --git a/superconsole/src/components/alignment.rs b/superconsole/src/components/alignment.rs index 8ebee014b7cf2..1839b3b921c6a 100644 --- a/superconsole/src/components/alignment.rs +++ b/superconsole/src/components/alignment.rs @@ -41,9 +41,9 @@ pub enum HorizontalAlignmentKind { Right, } -/// The [`Aligned`](Aligned) [`Component`](Component) can be used to specify in which part of the view the content should live. -/// The [`HorizontalAlignmentKind`](HorizontalAlignmentKind) enum specifies the location relative to the x-axis. -/// The [`VerticalAlignmentKind`](VerticalAlignmentKind) enum specified the location relative to the y-axis. +/// The [`Aligned`] [`Component`] can be used to specify in which part of the view the content should live. +/// The [`HorizontalAlignmentKind`] enum specifies the location relative to the x-axis. +/// The [`VerticalAlignmentKind`] enum specified the location relative to the y-axis. #[derive(Debug)] pub struct Aligned> { pub child: C, @@ -136,6 +136,7 @@ mod tests { use crate::Lines; #[derive(AsRef, Debug)] + #[allow(dead_code)] struct Msg(Lines); #[test] diff --git a/superconsole/src/components/blank.rs b/superconsole/src/components/blank.rs index bc375281e7206..55d68f9f076b7 100644 --- a/superconsole/src/components/blank.rs +++ b/superconsole/src/components/blank.rs @@ -36,6 +36,7 @@ mod tests { use crate::Lines; #[derive(AsRef, Debug)] + #[allow(dead_code)] struct EchoMsg(Lines); #[test] diff --git a/superconsole/src/components/bordering.rs b/superconsole/src/components/bordering.rs index a6a2d13296c75..dc48ecdcc8456 100644 --- a/superconsole/src/components/bordering.rs +++ b/superconsole/src/components/bordering.rs @@ -131,7 +131,7 @@ impl Component for Bordered { } if let Some(bottom) = &self.border.bottom { let lines = construct_vertical_padding(bottom.clone(), output.max_line_length()); - output.0.extend(lines); + output.extend(lines); } Ok(output) @@ -146,6 +146,7 @@ mod tests { use crate::components::echo::Echo; #[derive(AsRef, Debug)] + #[allow(dead_code)] struct Msg(Lines); #[test] diff --git a/superconsole/src/components/bounding.rs b/superconsole/src/components/bounding.rs index 67af524637048..f048bf1a1d76e 100644 --- a/superconsole/src/components/bounding.rs +++ b/superconsole/src/components/bounding.rs @@ -48,6 +48,7 @@ mod tests { use crate::Span; #[derive(AsRef, Debug)] + #[allow(dead_code)] struct Msg(Lines); #[test] diff --git a/superconsole/src/components/canvas.rs b/superconsole/src/components/canvas.rs deleted file mode 100644 index 45ffaf1da35ed..0000000000000 --- a/superconsole/src/components/canvas.rs +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! The root component which manages all other components. -//! This is a fake component since it 1) manages state and 2) cannot be made by users and 3) is heavily coupled with [`superconsole`](crate::SuperConsole). -//! Do not make this somewhere down the component hierarchy unless you have a good reason for it. - -use std::cell::Cell; - -use crossterm::cursor::MoveToColumn; -use crossterm::cursor::MoveUp; -use crossterm::terminal::Clear; -use crossterm::terminal::ClearType; -use crossterm::QueueableCommand; - -use crate::components::Dimensions; -use crate::components::DrawMode; -use crate::Component; -use crate::Lines; - -/// The root components which manages all other components. -#[derive(Debug, Default)] -pub(crate) struct Canvas { - // used to overwrite previous canvas buffer - last_lines: Cell, -} - -impl Canvas { - /// A passthrough method that resizes the Canvas to reflect the size of the root. - /// Allows dynamic resizing. - /// Cuts off any lines that are too for long a single row - pub(crate) fn draw( - &self, - root: &dyn Component, - - dimensions: Dimensions, - mode: DrawMode, - ) -> anyhow::Result { - let mut output = root.draw(dimensions, mode)?; - // We don't trust the child to not truncate the result. - output.shrink_lines_to_dimensions(dimensions); - self.last_lines.set(output.len().try_into()?); - Ok(output) - } -} - -impl Canvas { - /// Canvas only has a single child. - /// It essentially functions as a passthrough - an invisible window which handles sizing and re-drawing correctly. - pub(crate) fn new() -> Self { - Self { - ..Default::default() - } - } - - /// The first half of drawing. It moves the buffer up to be overwritten and sets the length to 0. - /// This is used to clear the scratch area so that any possibly emitted messages can write over it. - pub(crate) fn move_up(&self, writer: &mut Vec) -> anyhow::Result<()> { - let len = self.last_lines.take(); - if len != 0 { - writer.queue(MoveUp(len))?; - } - writer.queue(MoveToColumn(0))?; - - Ok(()) - } - - /// Clears the canvas. - pub fn clear(&self, writer: &mut Vec) -> anyhow::Result<()> { - self.move_up(writer)?; - writer.queue(Clear(ClearType::FromCursorDown))?; - - Ok(()) - } -} diff --git a/superconsole/src/components/draw_vertical.rs b/superconsole/src/components/draw_vertical.rs index eaef1dca43473..39fa6262e09a0 100644 --- a/superconsole/src/components/draw_vertical.rs +++ b/superconsole/src/components/draw_vertical.rs @@ -33,14 +33,14 @@ impl DrawVertical { /// New component `draw` is called with remaining dimensions. pub fn draw(&mut self, component: &dyn Component, mode: DrawMode) -> anyhow::Result<()> { // We call `draw` even if no space is left, but maybe we should not. - let mut output = component.draw( + let output = component.draw( Dimensions { width: self.dim.width, height: self.dim.height.saturating_sub(self.lines.0.len()), }, mode, )?; - self.lines.0.append(&mut output.0); + self.lines.extend(output); Ok(()) } diff --git a/superconsole/src/components/padding.rs b/superconsole/src/components/padding.rs index 03167e741d5ae..8fbe6379205a7 100644 --- a/superconsole/src/components/padding.rs +++ b/superconsole/src/components/padding.rs @@ -13,7 +13,7 @@ use crate::components::DrawMode; use crate::Component; use crate::Lines; -/// The `Padded` [`Component`](Component) wraps its child by padding left, right, above, and below its content. +/// The `Padded` [`Component`] wraps its child by padding left, right, above, and below its content. /// This can be used to shift the content to a different location and ensure that following content comes after a certain distance. /// It is worth noting that this component will also *truncate* any content that is too long to fit in the given window at draw time. /// However, components are expected to constrain themselves to the given window, anyway. @@ -85,6 +85,7 @@ mod tests { use crate::Lines; #[derive(Debug, AsRef)] + #[allow(dead_code)] struct Msg(Lines); #[test] diff --git a/superconsole/src/components/splitting.rs b/superconsole/src/components/splitting.rs index 9b026506fe141..e934e8994384e 100644 --- a/superconsole/src/components/splitting.rs +++ b/superconsole/src/components/splitting.rs @@ -175,11 +175,14 @@ mod tests { use crate::Lines; #[derive(AsRef, Debug)] + #[allow(dead_code)] struct Echo1(Lines); #[derive(AsRef, Debug)] + #[allow(dead_code)] struct Echo2(Lines); #[derive(AsRef, Debug)] + #[allow(dead_code)] struct Echo3(Lines); mod horizontal { @@ -337,7 +340,7 @@ mod tests { vec!["Line 1"].try_into().unwrap(), vec!["Line 2222"].try_into().unwrap(), ]); - let mut bottom = Lines(vec![ + let bottom = Lines(vec![ vec!["Line 11"].try_into().unwrap(), vec!["Line 12"].try_into().unwrap(), vec!["Last line just kiddi"].try_into().unwrap(), @@ -349,9 +352,9 @@ mod tests { ); let mut output = top; - output.0.extend(iter::repeat(Line::default()).take(8)); - output.0.append(&mut bottom.0); - output.0.extend(iter::repeat(Line::default()).take(7)); + output.extend(iter::repeat(Line::default()).take(8)); + output.extend(bottom); + output.extend(iter::repeat(Line::default()).take(7)); let drawn = splitter .draw(Dimensions::new(20, 20), DrawMode::Normal) @@ -369,7 +372,7 @@ mod tests { vec!["Line 1"].try_into().unwrap(), vec!["Line 2222"].try_into().unwrap(), ]); - let mut bottom = Lines(vec![ + let bottom = Lines(vec![ vec!["Line 11"].try_into().unwrap(), vec!["Line 12"].try_into().unwrap(), vec!["Last line just kiddi"].try_into().unwrap(), @@ -381,7 +384,7 @@ mod tests { ); let mut output = top; - output.0.append(&mut bottom.0); + output.extend(bottom); let drawn = splitter .draw(Dimensions::new(20, 20), DrawMode::Normal) @@ -445,7 +448,7 @@ mod tests { #[test] fn test_no_children() { - let lines = Split::::new(vec![], Direction::Horizontal, SplitKind::Equal) + let lines = Split::::new(Vec::new(), Direction::Horizontal, SplitKind::Equal) .draw(Dimensions::new(20, 20), DrawMode::Normal) .unwrap(); assert!(lines.is_empty()); diff --git a/superconsole/src/content/lines.rs b/superconsole/src/content/lines.rs index 68012ea9c9513..e708f38cb6ec0 100644 --- a/superconsole/src/content/lines.rs +++ b/superconsole/src/content/lines.rs @@ -154,6 +154,10 @@ impl Lines { self.0.push(line); } + pub fn extend(&mut self, lines: impl IntoIterator) { + self.0.extend(lines); + } + pub fn iter(&self) -> impl ExactSizeIterator { self.0.iter() } @@ -227,8 +231,8 @@ impl Lines { /// Extends the Lines list by the given length, adding empty lines at the bottom pub fn pad_lines_bottom(&mut self, amount: usize) { - let mut extender = iter::repeat(Line::default()).take(amount); - self.0.extend(&mut extender); + let extender = iter::repeat(Line::default()).take(amount); + self.extend(extender); } /// Same functionality as `pad_lines_bottom` but on the top. @@ -263,12 +267,11 @@ impl Lines { self.truncate_lines_bottom(dimensions.height); } - /// Formats and renders all lines to `stdout`. - /// Notably, this *queues* the lines for rendering. You must flush the buffer. + /// Like `render`, but with a limit. /// If a limit is specified, no more than that amount will be drained. /// The limit is on the number of *lines*, **NOT** the number of *bytes*. /// Care should be taken with calling a limit of 0 - this will cause no lines to render and the buffer to never be drained. - pub(crate) fn render( + pub(crate) fn render_with_limit( &mut self, writer: &mut Vec, limit: Option, @@ -278,10 +281,30 @@ impl Lines { for line in self.0.drain(..amt) { line.render_with_clear_and_nl(writer)?; } + Ok(()) + } + /// Formats and renders all lines to `stdout`. + /// Notably, this *queues* the lines for rendering. You must flush the buffer. + pub(crate) fn render_from_line( + &self, + writer: &mut Vec, + start: usize, + ) -> anyhow::Result<()> { + for line in self.0.iter().skip(start) { + line.render_with_clear_and_nl(writer)?; + } Ok(()) } + pub(crate) fn lines_equal(&self, other: &Self) -> usize { + self.0 + .iter() + .zip(other.0.iter()) + .take_while(|(l1, l2)| l1 == l2) + .count() + } + /// Returns the maximum line width and the number of lines. /// This corresponds to how much space a justified version of the output would take. pub fn dimensions(&self) -> anyhow::Result { @@ -356,9 +379,6 @@ impl IntoIterator for Lines { #[cfg(test)] mod tests { - use crossterm::style::Attribute; - use crossterm::style::Color; - use super::*; #[test] @@ -531,7 +551,6 @@ mod tests { assert_eq!(test, expected); } - #[allow(clippy::from_iter_instead_of_collect)] // More readable this way. #[test] fn test_colored_from_multiline_string() { // Lots of little things we check in here, including that we persist state diff --git a/superconsole/src/dimensions.rs b/superconsole/src/dimensions.rs index 9dd85ec6e938e..1e172a9f65a2f 100644 --- a/superconsole/src/dimensions.rs +++ b/superconsole/src/dimensions.rs @@ -92,7 +92,6 @@ impl Dimensions { /// Finds the size of a [`Component`](crate::Component)'s output in a given dimension. /// Truncates the size to at most `u16::MAX`. // ptr args allowed because line trait only implemented on `Vec` - #[allow(clippy::ptr_arg)] pub fn dimension_from_output_truncated(output: &Lines, direction: Direction) -> usize { match direction { Direction::Horizontal => output.max_line_length(), diff --git a/superconsole/src/lib.rs b/superconsole/src/lib.rs index fb052c47474ad..5ef8f780259dd 100644 --- a/superconsole/src/lib.rs +++ b/superconsole/src/lib.rs @@ -8,18 +8,21 @@ */ //! The superconsole crate provides a handler and building blocks for powerful, yet minimally intrusive TUIs. -//! Built on-top of [`crossterm`](crossterm), it cross-compiles on Windows 7+, Linux, and MacOS. +//! Built on-top of [`crossterm`], it cross-compiles on Windows 7+, Linux, and MacOS. //! -//! Rendering is handled by [`SuperConsole`](SuperConsole), which draws to [`stdout`](std::io::stdout). +//! Rendering is handled by [`SuperConsole`], which draws to [`stdout`](std::io::stdout). //! The caller is responsible for re-rendering whenever necessary. //! User input will cause aberrations in output; similarly, one should also not produce output from other sources while superconsole is active. //! //! The rendering can be divided into two principle components: +//! //! * In the *scratch* area, the previous content is overwritten at each render. +//! //! * In the *emitted* area, lines scroll away above the scratch with various diagnostic output. +//! //! Components live in the scratch area. //! -//! A set of pre-baked composition and testing oriented components are provided in the [`components`](components) module. +//! A set of pre-baked composition and testing oriented components are provided in the [`components`] module. pub use components::Component; pub use components::DrawMode; diff --git a/superconsole/src/output.rs b/superconsole/src/output.rs index 21dce4201008f..0e5aae71ce0fb 100644 --- a/superconsole/src/output.rs +++ b/superconsole/src/output.rs @@ -183,9 +183,7 @@ impl SuperConsoleOutput for NonBlockingSuperConsoleOutput { } #[cfg(test)] -mod test { - use crossbeam_channel::Receiver; - +mod tests { use super::*; /// A test writer that just sends into a channel. Lets us block / unblock the output to test @@ -245,7 +243,7 @@ mod test { } // Likewise, we expect that sending output and finalizing wold fail. - assert!(output.output(vec![]).is_err()); + assert!(output.output(Vec::new()).is_err()); assert!(Box::new(output).finalize().is_err()); Ok(()) diff --git a/superconsole/src/superconsole.rs b/superconsole/src/superconsole.rs index 0ec0c3751c99f..96a6c11c5c54c 100644 --- a/superconsole/src/superconsole.rs +++ b/superconsole/src/superconsole.rs @@ -11,13 +11,14 @@ use std::cmp; use std::env; use std::io; +use crossterm::cursor::MoveToColumn; +use crossterm::cursor::MoveUp; use crossterm::terminal::Clear; use crossterm::terminal::ClearType; use crossterm::tty::IsTty; use crossterm::QueueableCommand; use crate::ansi_support::enable_ansi_support; -use crate::components::Canvas; use crate::components::Component; use crate::components::DrawMode; use crate::content::Line; @@ -35,12 +36,16 @@ const MAX_GRAPHEME_BUFFER: usize = 1000000; /// while a log area of emitted messages is produced above. /// Producing output from sources other than SuperConsole while break the TUI. pub struct SuperConsole { - root: Canvas, + /// Number of lines that were used to render the canvas last time. + canvas_contents: Lines, + /// Buffer storing the lines we should emit next time we render. to_emit: Lines, - // A default screen size to use if the size cannot be fetched - // from the terminal. This generally is only used for testing - // situations. + /// A default screen size to use if the size cannot be fetched + /// from the terminal. This generally is only used for testing + /// situations. fallback_size: Option, + /// The terminal handle to write a buffer to the screen. + /// All IO goes through this handle. pub(crate) output: Box, } @@ -48,7 +53,7 @@ impl SuperConsole { /// Build a new SuperConsole with a root component. pub fn new() -> Option { Self::compatible().then(|| { - Self::new_internal( + Self::new_with_output( None, Box::new(BlockingSuperConsoleOutput::new(Box::new(io::stderr()))), ) @@ -58,18 +63,18 @@ impl SuperConsole { /// Force a new SuperConsole to be built with a root component, regardless of /// whether the tty is compatible pub fn forced_new(fallback_size: Dimensions) -> Self { - Self::new_internal( + Self::new_with_output( Some(fallback_size), Box::new(BlockingSuperConsoleOutput::new(Box::new(io::stderr()))), ) } - pub(crate) fn new_internal( + pub(crate) fn new_with_output( fallback_size: Option, output: Box, ) -> Self { Self { - root: Canvas::new(), + canvas_contents: Lines::new(), to_emit: Lines::new(), fallback_size, output, @@ -120,7 +125,6 @@ impl SuperConsole { pub fn finalize_with_mode( mut self, root: &dyn Component, - mode: DrawMode, ) -> anyhow::Result<()> { self.render_with_mode(root, mode)?; @@ -140,8 +144,8 @@ impl SuperConsole { /// Queues the passed lines to be drawn on the next render. /// The lines *will not* appear until the next render is called. - pub fn emit(&mut self, mut lines: Lines) { - self.to_emit.0.append(&mut lines.0); + pub fn emit(&mut self, lines: Lines) { + self.to_emit.extend(lines); } fn size(&self) -> anyhow::Result { @@ -154,10 +158,32 @@ impl SuperConsole { } } + /// The first step of drawing. It moves the buffer up to be overwritten and sets the length to 0. + /// This is used to clear the scratch area so that any possibly emitted messages can write over it. + pub(crate) fn clear_canvas_pre(writer: &mut Vec, mut height: usize) -> anyhow::Result<()> { + while height > 0 { + // We can only move up at most u16 at a time, so repeat until we move up enough + let step = height.try_into().unwrap_or(u16::MAX); + writer.queue(MoveUp(step))?; + height -= step as usize; + } + writer.queue(MoveToColumn(0))?; + Ok(()) + } + + /// The last step of drawing. Ensures there is nothing else below on the console. + /// Important in case the new canvas was smaller than the last. + pub(crate) fn clear_canvas_post(writer: &mut Vec) -> anyhow::Result<()> { + writer.queue(Clear(ClearType::FromCursorDown))?; + Ok(()) + } + /// Clears the canvas portion of the superconsole. pub fn clear(&mut self) -> anyhow::Result<()> { - let mut buffer = vec![]; - self.root.clear(&mut buffer)?; + let mut buffer = Vec::new(); + Self::clear_canvas_pre(&mut buffer, self.canvas_contents.len())?; + self.canvas_contents = Lines::new(); + Self::clear_canvas_post(&mut buffer)?; self.output.output(buffer) } @@ -180,38 +206,45 @@ impl SuperConsole { &mut self, buffer: &mut Vec, root: &dyn Component, - mode: DrawMode, size: Dimensions, ) -> anyhow::Result<()> { /// Heuristic to determine if a buffer is too large to buffer. /// Can be tuned, but is currently set to 1000000 graphemes. - #[allow(clippy::ptr_arg)] fn is_big(buf: &Lines) -> bool { let len: usize = buf.iter().map(Line::len).sum(); len > MAX_GRAPHEME_BUFFER } - // Go the beginning of the canvas. - self.root.move_up(buffer)?; - // Pre-draw the frame *and then* start rendering emitted messages. - let mut frame = self.root.draw(root, size, mode)?; + let mut canvas = root.draw(size, mode)?; + // We don't trust the child to not truncate the result. + canvas.shrink_lines_to_dimensions(size); + // Render at most a single frame if this not the last render. // Does not buffer if there is a ridiculous amount of data. let limit = match mode { DrawMode::Normal if !is_big(&self.to_emit) => { - let limit = size.height.saturating_sub(frame.len()); + let limit = size.height.saturating_sub(canvas.len()); // arbitrary value picked so we don't starve `emit` on small terminal sizes. Some(cmp::max(limit, MINIMUM_EMIT)) } _ => None, }; - self.to_emit.render(buffer, limit)?; - frame.render(buffer, None)?; - // clear any residue from the previous render. - buffer.queue(Clear(ClearType::FromCursorDown))?; + // How much of the canvas hasn't changed, so I can avoid overwriting + // and thus avoid flickering things like URL's in VS Code terminal. + let reuse_prefix = if self.to_emit.is_empty() { + self.canvas_contents.lines_equal(&canvas) + } else { + 0 + }; + + Self::clear_canvas_pre(buffer, self.canvas_contents.len() - reuse_prefix)?; + self.to_emit.render_with_limit(buffer, limit)?; + canvas.render_from_line(buffer, reuse_prefix)?; + Self::clear_canvas_post(buffer)?; + self.canvas_contents = canvas; Ok(()) } @@ -227,9 +260,9 @@ mod tests { use crate::testing::frame_contains; use crate::testing::test_console; use crate::testing::SuperConsoleTestingExt; - use crate::Lines; #[derive(AsRef, Debug)] + #[allow(dead_code)] struct Msg(Lines); #[test] @@ -262,7 +295,7 @@ mod tests { vec!["line 1"].try_into()?; MAX_GRAPHEME_BUFFER * 2 ])); - let root = Echo(Lines(vec![vec!["line"].try_into()?; 1])); + let root = Echo(Lines(vec![vec!["line"].try_into()?])); let mut buffer = Vec::new(); // Even though we have more messages than fit on the screen in the `to_emit` buffer @@ -284,7 +317,7 @@ mod tests { fn test_block_render() -> anyhow::Result<()> { let mut console = test_console(); - let root = Echo(Lines(vec![vec!["state"].try_into()?; 1])); + let root = Echo(Lines(vec![vec!["state"].try_into()?])); console.render(&root)?; assert_eq!(console.test_output()?.frames.len(), 1); @@ -306,7 +339,7 @@ mod tests { fn test_block_lines() -> anyhow::Result<()> { let mut console = test_console(); - let root = Echo(Lines(vec![vec!["state"].try_into()?; 1])); + let root = Echo(Lines(vec![vec!["state"].try_into()?])); console.test_output_mut()?.should_render = false; console.emit(Lines(vec![vec!["line 1"].try_into()?])); @@ -335,7 +368,7 @@ mod tests { fn test_block_finalize() -> anyhow::Result<()> { let mut console = test_console(); - let root = Echo(Lines(vec![vec!["state"].try_into()?; 1])); + let root = Echo(Lines(vec![vec!["state"].try_into()?])); console.test_output_mut()?.should_render = false; console.emit(Lines(vec![vec!["line 1"].try_into()?])); @@ -354,4 +387,42 @@ mod tests { Ok(()) } + + #[test] + fn test_reuse_buffer() -> anyhow::Result<()> { + let mut console = test_console(); + + console.render(&Echo(Lines(vec![ + vec!["http://example.com/ link"].try_into()?, + vec!["number 1, special 1"].try_into()?, + ])))?; + console.render(&Echo(Lines(vec![ + vec!["http://example.com/ link"].try_into()?, + vec!["number 2, special 2"].try_into()?, + ])))?; + console.emit(Lines(vec![vec!["special 3"].try_into()?])); + console.render(&Echo(Lines(vec![ + vec!["http://example.com/ link"].try_into()?, + vec!["number 3"].try_into()?, + ])))?; + console.render(&Echo(Lines(vec![ + vec!["http://example.com/ link"].try_into()?, + vec!["special 4"].try_into()?, + vec!["number 4"].try_into()?, + ])))?; + + let frames = &console.test_output()?.frames; + assert_eq!(frames.len(), 4); + // We expect the URL to be omitted on some frames, because it didn't change. + let expect_url = [0, 2]; + for (i, frame) in frames.iter().enumerate() { + assert_eq!( + frame_contains(frame, "http://example.com/"), + expect_url.contains(&i), + ); + assert!(frame_contains(frame, format!("number {}", i + 1))); + assert!(frame_contains(frame, format!("special {}", i + 1))); + } + Ok(()) + } } diff --git a/superconsole/src/testing.rs b/superconsole/src/testing.rs index ee4a6336111de..6da7b34723e26 100644 --- a/superconsole/src/testing.rs +++ b/superconsole/src/testing.rs @@ -79,7 +79,7 @@ pub fn test_console() -> SuperConsole { width: 80, height: 80, }; - SuperConsole::new_internal( + SuperConsole::new_with_output( Some(size), Box::new(TestOutput { should_render: true, @@ -98,3 +98,14 @@ pub fn frame_contains(frame: &[u8], needle: impl AsRef<[u8]>) -> bool { } false } + +#[track_caller] +pub fn assert_frame_contains(frame: &[u8], needle: impl AsRef<[u8]>) { + if !frame_contains(frame, needle.as_ref()) { + panic!( + "Expected frame to contain `{}`, but was:\n{}", + String::from_utf8_lossy(needle.as_ref()), + String::from_utf8_lossy(frame) + ); + } +} diff --git a/test.py b/test.py index 0081ed425251c..5c48c31b3199a 100755 --- a/test.py +++ b/test.py @@ -11,6 +11,7 @@ """ import argparse +import importlib.machinery import json import os import signal @@ -21,7 +22,26 @@ from contextlib import contextmanager from enum import Enum from pathlib import Path -from typing import Dict, Iterable, List, Optional +from typing import Dict, Generator, Iterable, List, Optional + +# To prevent the next line from creating a pycache dir +sys.dont_write_bytecode = True +lint_levels = importlib.machinery.SourceFileLoader( + "lint_levels", str(Path(__file__).parent / "lint_levels.bzl") +).load_module() + + +def is_opensource() -> bool: + # @oss-disable: return False + return True # @oss-enable + + +def is_macos() -> bool: + return sys.platform == "darwin" + + +def is_windows() -> bool: + return sys.platform == "win32" class Colors(Enum): @@ -48,8 +68,19 @@ def print_error(msg: str) -> None: ) +def print_warn(msg: str) -> None: + print( + Colors.WARNING.value + + Colors.BOLD.value + + "WARNING: " + + msg + + Colors.ENDC.value, + file=sys.stderr, + ) + + @contextmanager -def timing() -> None: +def timing() -> Generator: start = time.time() yield duration = time.time() - start @@ -60,7 +91,7 @@ def run( args: Iterable[str], capture_output: bool = False, env: Optional[Dict[str, str]] = None, - error: Optional[str] = None, + timeout: Optional[int] = None, ) -> subprocess.CompletedProcess: """ Runs a command (args) in a new process. @@ -78,19 +109,17 @@ def run( # We'd like to use the capture_output argument, # but that isn't available in Python 3.6 which we use on Windows stdout=subprocess.PIPE if capture_output else sys.stdout, - stderr=subprocess.PIPE if capture_output else sys.stderr, + stderr=sys.stderr, check=True, encoding="utf-8", env=env or os.environ.copy(), + timeout=timeout, ) return result except subprocess.CalledProcessError as e: # Print the console info if we were capturing it if capture_output: print(e.stdout, file=sys.stdout) - print(e.stderr, file=sys.stderr) - if error: - print_error(error) sys.exit(1) @@ -107,9 +136,10 @@ def check_no_changes(git: bool): status = run(status_cmd, capture_output=True) if status.stdout.strip(): + run(status_cmd) run(diff_cmd) print_error( - "File changed from commit. This means you need to run cargo-fmt locally and amend this commit." + "File changes! Caused either by formatting or by tests creating stray files." ) sys.exit(1) @@ -124,11 +154,8 @@ def list_starlark_files(git: bool): ] excludes = [ "starlark-rust/starlark/testcases/", - "tests/e2e/test_starlark_data/bad_warning.bzl", - "tests/e2e/test_lsp_data/bad_syntax.bzl", - "tests/e2e/test_lsp_data/query.bxl", - "tests/e2e/test_lsp_data/globals.bzl", - "tests/e2e/test_lsp_data/cell/sub/defs.bzl", + "tests/core/**/test_*_data/**", + "tests/e2e/**/test_*_data/**", "**.rs", "**.fixture", "**.buckconfig", @@ -197,99 +224,11 @@ def rustfmt(buck2_dir: Path, ci: bool, git: bool) -> None: check_no_changes(git) -CLIPPY_ALLOW = [ - "clippy::arc-with-non-send-sync", # Needs triage, see 'dashmap_directory_interner.rs:39:20' (`DashMap` is not `Send` or `Sync`) - "clippy::useless_conversion", # Removed all obvious but there are some reports I'm unclear how to fix - "clippy::needless_raw_string_hashes", # False positives - "clippy::disallowed_names", # Not using foo, bar, baz in test data is silly - "clippy::bool-to-int-with-if", # Using if branches to return 1 or 0 is valid, but this complains that we should use `int::from`, which is arguably less clear - "clippy::cognitive_complexity", # This is an arbitrary linter - "clippy::collapsible-if", # Sometimes nesting better expresses intent - "clippy::collapsible-else-if", # Sometimes nesting better expresses intent - "clippy::comparison_chain", # Generates worse code and harder to read - "clippy::comparison_to_empty", # x == "" is clearer than x.is_empty() - "clippy::derive_partial_eq_without_eq", # In generated protobuf code - "clippy::implicit-hasher", # Makes code more complex for little benefit - "clippy::len-without-is-empty", # len() == 0 is perfectly clear - "clippy::manual-range-contains", # a <= b && b <= c is way clearer than (a..=c).contains(&b) - "clippy::many_single_char_names", # match(a,b,c,d,e) sometimes makes sense - "clippy::match-like-matches-macro", # Using matches! is sometimes clearer, sometimes not - "clippy::match-wild-err-arm", # Seems reasonable to panic on Err(_) - "clippy::missing-safety-doc", # Documentation should be tailored to the reader, not the linter - "clippy::mut_from_ref", # Tries to check soundness, which Rust already does - "clippy::naive-bytecount", # Requires an extra dependency for marginal gains. - "clippy::needless_collect", # False positives: doesn't understand lifetimes, or e.g. DoubleEndedIterator. - "clippy::needless_lifetimes", # This is throwing false positives - "clippy::new_without_default", # Default is not always useful - "clippy::single_match", # Sometimes a single match looks good - "clippy::too_many_arguments", # This is an arbitrary limit set on number of arguments and not always useful - "clippy::type_complexity", # This is an arbitrary limit set on number of type parameterizations and not always useful - "clippy::unnecessary-wraps", # Sometimes unnecessary wraps provide the right API - "clippy::wrong_self_convention", # These rules are useless pedantry - "clippy::bool-assert-comparison", # Sometimes more clear to write it this way - "clippy::unwrap-or-else-default", # Defaults aren't always more clear as it removes the type information when reading code - "clippy::enum-variant-names", # Sometimes you do want the same prefixes - "clippy::needless_update", # Our RE structs have slightly different definitions in internal and OSS. - "clippy::almost-swapped", # Triggered by Clap v3, perhaps remove when we move to v4 -] - -CLIPPY_DENY = [ - "clippy::all", - "clippy::await_holding_lock", - "clippy::await_holding_refcell_ref", - "clippy::dbg_macro", - "clippy::debug_assert_with_mut_call", - "clippy::empty_enum", - "clippy::filter_map_next", - "clippy::flat_map_option", - "clippy::from_iter_instead_of_collect", - "clippy::large_stack_arrays", - "clippy::linkedlist", - "clippy::macro_use_imports", - "clippy::maybe_infinite_iter", - "clippy::mut_mut", - "clippy::needless_continue", - "clippy::needless_range_loop", - "clippy::nonstandard_macro_braces", - "clippy::rc_mutex", - "clippy::ref_option_ref", - "clippy::rest_pat_in_fully_bound_structs", - "clippy::same_functions_in_if_condition", - "clippy::str_to_string", - "clippy::string_to_string", - "clippy::todo", - "clippy::trivially_copy_pass_by_ref", - "clippy::useless_transmute", - "clippy::verbose_file_reads", - "clippy::wildcard_dependencies", - "clippy::useless-vec", # TBD if this should be CLIPPY_ALLOW - "clippy::unnecessary-literal-unwrap", # TBD if this should be CLIPPY_ALLOW - "clippy::needless_borrow", - "clippy::tuple_array_conversions", - "let_underscore_drop", - "unused_extern_crates", -] - - -CLIPPY_AUTOFIX = [ - # Only add machine-fixable warnings in this list, or we'll see them all - # the time in CI. - "clippy::cloned_instead_of_copied", - "clippy::inconsistent_struct_constructor", - "clippy::inefficient_to_string", - "clippy::let_unit_value", - "clippy::map_flatten", - "clippy::map_unwrap_or", - "clippy::needless_bitwise_bool", - "clippy::needless_borrow", - "clippy::range_minus_one", - "clippy::unwrap_or_else_default", - "clippy::useless-conversion", -] - RUSTC_ALLOW = { - # This needs a feature + # These are not in the shared-with-buck2 lists because they only appear in third-party deps. + # Normally cargo would suppress those, but we do vendored builds and so it doesn't. "unfulfilled-lint-expectations", + "unknown-lints", # This is not *actually* a warning but rather a warning level. "warnings", } @@ -339,12 +278,12 @@ def clippy(package_args: List[str], fix: bool) -> None: clippy_fix_args = ["--fix"] if fix else [] - clippy_deny_lints = [*CLIPPY_DENY, *rustc_default_warnings] - clippy_allow_lints = CLIPPY_ALLOW + clippy_deny_lints = [*lint_levels.CLIPPY_DENY, *rustc_default_warnings] + clippy_allow_lints = lint_levels.CLIPPY_ALLOW if fix: - clippy_deny_lints.extend(CLIPPY_AUTOFIX) + clippy_deny_lints.extend(lint_levels.CLIPPY_AUTOFIX) else: - clippy_allow_lints.extend(CLIPPY_AUTOFIX) + clippy_allow_lints.extend(lint_levels.CLIPPY_AUTOFIX) clippy_deny_args = [f"--deny={c}" for c in clippy_deny_lints] clippy_allow_args = [f"--allow={c}" for c in clippy_allow_lints] @@ -367,6 +306,10 @@ def clippy(package_args: List[str], fix: bool) -> None: def starlark_linter(buck2: str, git: bool) -> None: + if git: + print_warn("Skipping starlark linter on git") + return + print_running("starlark linter") starlark_files = list_starlark_files(git) with tempfile.NamedTemporaryFile(mode="w+t") as fp: @@ -445,8 +388,16 @@ def rustdoc(package_args: List[str]) -> None: def test(package_args: List[str]) -> None: - print_running("cargo test") - run(["cargo", "test", *package_args]) + print_running("cargo test --lib") + extra_args = [] + # Limit number of parallel jobs to prevent OOMs + if is_windows(): + extra_args = ["--jobs", str(os.cpu_count() // 2)] + # Hour should be enough for all tests to run + timeout_sec = 60 * 60 + run(["cargo", "test", "--lib", *extra_args, *package_args], timeout=timeout_sec) + print_running("cargo test --doc") + run(["cargo", "test", "--doc", *extra_args, *package_args], timeout=timeout_sec) def main() -> None: @@ -526,8 +477,12 @@ def main() -> None: starlark_linter(args.buck2, args.git) if not (args.rustfmt_only or args.lint_starlark_only): - with timing(): - clippy(package_args, args.clippy_fix) + if args.ci and is_opensource() and is_macos(): + # TODO(nga): re-enable with next rust version bump (current is nightly-2024-02-01) + print_error("Clippy crashes on macOS; skipping") + else: + with timing(): + clippy(package_args, args.clippy_fix) if not args.lint_starlark_only: with timing(): diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 0000000000000..ce07be6844203 --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1 @@ +buck-out/ diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000000..e66849e143ae4 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,7 @@ +# buck2 e2e tests + +This directory contains end-to-end tests for buck2. Private tests are not +exported to GitHub. Prelude tests are not exported to GitHub yet (2024-08). + +Currently (2024-08), these tests only work internally in Meta and are not +executed from GitHub Actions. diff --git a/tests/assert_dependencies_test.bxl b/tests/assert_dependencies_test.bxl new file mode 100644 index 0000000000000..4363a89798780 --- /dev/null +++ b/tests/assert_dependencies_test.bxl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//utils:set.bzl", "set") + +_MAX_DEPTH = 2147483647 + +def _assert_dependencies_test(ctx: bxl.Context, expected_deps: list[TargetLabel], target: TargetLabel): + """Checks the dependencies of a given rule looking for expected dependencies. + + Test passes if each of expected_deps is a dependency of the target. + """ + + cquery = ctx.cquery() + transitive_deps = cquery.deps( + ctx.target_universe(target).target_set(), + _MAX_DEPTH, + filter = "target_deps()", + ) + result = set([target.label.raw_target() for target in transitive_deps]) + not_found = [item for item in expected_deps if not result.contains(item)] + if len(not_found) > 0: + fail("Expected dependencies not found: {}".format(not_found)) + +def _impl_assert_dep_test(ctx: bxl.Context): + _assert_dependencies_test(ctx, ctx.cli_args.deps, ctx.cli_args.target) + +test = bxl_main( + cli_args = { + "deps": cli_args.list(cli_args.target_label()), + "target": cli_args.target_label(), + }, + impl = _impl_assert_dep_test, +) diff --git a/tests/audit_dependents_test.bxl b/tests/audit_dependents_test.bxl new file mode 100644 index 0000000000000..841a381f785b2 --- /dev/null +++ b/tests/audit_dependents_test.bxl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":dependencies_test_util.bzl", "rule_list_regex") + +def _audit_dependents_test(ctx: bxl.Context, source_target: TargetLabel, target: TargetLabel, allowlist_patterns: list[str]): + """Checks dependents of a given target in a dependency graph of a source target. + + Test passes if only targets that match allowlist_patterns have direct dependency on target (in source_target's target universe). + The logic for non-bxl version: https://fburl.com/code/b7sbezop + """ + + cquery = ctx.cquery() + reverse_deps = cquery.eval( + "nattrfilter(labels, codegen_rule, rdeps({}, {}, 1) - set({} {}))".format(source_target, target, source_target, target), + target_universe = [str(source_target)], + ) + allowlist_regex = rule_list_regex(allowlist_patterns) + blocklisted = filter(lambda target: not regex_match(allowlist_regex, str(target.label.raw_target())), reverse_deps) + if len(blocklisted) > 0: + res = set([str(target.label.raw_target()) for target in blocklisted]) + fail("Disallowed rules were found between {} and {}: {}!".format(source_target, target, res)) + +def _impl_audit_dependents_test(ctx: bxl.Context): + _audit_dependents_test(ctx, ctx.cli_args.source_target, ctx.cli_args.target, ctx.cli_args.allowlist_patterns) + +test = bxl_main( + cli_args = { + "allowlist_patterns": cli_args.list(cli_args.string()), + "source_target": cli_args.target_label(), + "target": cli_args.target_label(), + }, + impl = _impl_audit_dependents_test, +) diff --git a/tests/buck_e2e.bzl b/tests/buck_e2e.bzl new file mode 100644 index 0000000000000..090464027d03a --- /dev/null +++ b/tests/buck_e2e.bzl @@ -0,0 +1,303 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode//buck2/app:modifier.bzl", "buck2_modifiers") +load("@fbcode//target_determinator/macros:ci.bzl", "ci") +load("@fbcode//target_determinator/macros:ci_hint.bzl", "ci_hint") +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup") +load("@fbcode_macros//build_defs:python_pytest.bzl", "python_pytest") + +def buck_e2e_test( + name, + executable, + use_buck_api = True, + contacts = None, + base_module = None, + data = None, + data_dir = None, + srcs = (), + tags = (), + deps = (), + env = None, + resources = None, + skip_for_os = (), + pytest_config = None, + pytest_marks = None, + pytest_expr = None, + pytest_confcutdir = None, + serialize_test_cases = None, + require_nano_prelude = None, + cfg_modifiers = (), + ci_srcs = [], + ci_deps = [], + compatible_with = None): + """ + Custom macro for buck2/buckaemon end-to-end tests using pytest. + """ + for s in skip_for_os: + if s not in ["darwin", "windows"]: + fail("Skipped os must be one of darwin or windows, not {}".format(s)) + tags = list(tags) + [ + # Running multiple bucks are expensive. This limits tpx to parallelism of 4. + "heavyweight", + ] + env = env or {} + env["RUST_BACKTRACE"] = "1" + env["TEST_EXECUTABLE"] = executable + + # Having it enabled has significant negative impact on Starlark evaluation performance. + env["RUST_LIB_BACKTRACE"] = "0" + + # Flags passed to pytest. + # -vv shows full assertion output on failures. + # ---tb=native shows python native traceback instead of default pytest traceback with source code. + # --no-header disables headers printed after "test session starts" on output + # --no-summary disables pytest summary printed after each test run on output + env["PYTEST_ADDOPTS"] = "-vv --tb=native --no-header --no-summary" + + # For autodeps + read_package_value = getattr(native, "read_package_value", None) + e2e_flavor = read_package_value and read_package_value("buck2_e2e_test.flavor") + if e2e_flavor == "isolated": + env["BUCK2_E2E_TEST_FLAVOR"] = "isolated" + serialize_test_cases = serialize_test_cases or False + else: + env["BUCK2_E2E_TEST_FLAVOR"] = "any" + serialize_test_cases = serialize_test_cases if serialize_test_cases != None else True + + if serialize_test_cases: + # This lets us pass stress runs by making all test cases inside of a test file serial + # Test cases in different files can still run in parallel. + tags.append("serialize_test_cases") + + if data and data_dir: + fail("`data` and `data_dir` cannot be used together") + + # If there's `_data` dir next to test, use it as test data directory. + if data_dir: + if not data_dir.startswith("test_") or not data_dir.endswith("_data"): + fail("Data dirs must be of the form `test_.*_data`, not {}".format(data_dir)) + buck_filegroup( + name = data_dir, + srcs = [data_dir], + copy = False, + ) + env["TEST_REPO_DATA"] = "$(location :{d})/{d}".format(d = data_dir) + + if data: + env["TEST_REPO_DATA"] = "$(location {})".format(data) + + # Add nano_prelude unconditionally for isolated tests + if require_nano_prelude == None: + require_nano_prelude = data_dir != None + if require_nano_prelude: + env["NANO_PRELUDE"] = "$(location fbcode//buck2/tests/e2e_util/nano_prelude:nano_prelude)" + + if type(deps) == "tuple": + deps = list(deps) + + deps = [ + "fbsource//third-party/pypi/pytest:pytest", + "fbsource//third-party/pypi/pytest-asyncio:pytest-asyncio", + "fbcode//buck2/tests/e2e_util:utilities", + ] + deps + if use_buck_api: + deps.append("fbcode//buck2/tests/e2e_util/api:api") + resources = resources or {} + + # Let users of the macro define their own configuration for pytest. This allow for reusing all + # the fixture code for tools building e2e tests that also need a working buck environment. + if not "conftest.py" in resources.values(): + resources["fbcode//buck2/tests/e2e_util:conftest.py"] = "conftest.py" + + labels = [] + if "darwin" in skip_for_os: + labels += ci.remove_labels(ci.mac(ci.aarch64(ci.opt()))) + if "windows" in skip_for_os: + labels += ci.remove_labels(ci.windows(ci.opt())) + + metadata = {} + metadata["buck.cfg_modifiers"] = cfg_modifiers + + python_pytest( + name = name, + base_module = base_module, + srcs = srcs, + tags = tags, + deps = deps, + env = env, + emails = contacts, + resources = resources, + skip_on_mode_mac = "darwin" in skip_for_os, + skip_on_mode_win = "windows" in skip_for_os, + pytest_config = pytest_config, + pytest_marks = pytest_marks, + pytest_expr = pytest_expr, + pytest_confcutdir = pytest_confcutdir, + labels = labels, + metadata = metadata, + compatible_with = compatible_with, + ) + + if e2e_flavor == "buck2_non_isolated": + # These are buck2's own non-isolated e2e tests. Add a ci hint indicating + # that they depend on many of the macros in the repo. Intentionally + # don't do this for other users of `buck2_e2e_test` in the repo + BUCK2_E2E_TEST_CI_SRCS = [ + "fbandroid/buck2/**", + "fbcode/buck2/cfg/**", + "fbcode/buck2/prelude/**", + "fbcode/buck2/platform/**", + "fbcode/buck2/toolchains/**", + "fbcode/buck2/tests/targets/**", + "fbobjc/buck2/**", + "xplat/buck2/**", + "xplat/toolchains/**", + "fbcode/hermetic_infra/fdb/**", + "tools/build_defs/**", + "arvr/tools/build_defs/config/**", + ".buckconfig", + "tools/buckconfigs/**", + ] + ci_srcs = ci_srcs + BUCK2_E2E_TEST_CI_SRCS + if ci_srcs or ci_deps: + ci_hint( + ci_srcs = ci_srcs, + ci_deps = ci_deps, + reason = "Non isolated buck2 e2e tests depend heavily on macros", + target = name, + ) + +def buck2_e2e_test( + name, + test_with_compiled_buck2 = True, + test_with_deployed_buck2 = False, + test_with_reverted_buck2 = False, + use_compiled_buck2_client_and_tpx = False, + deps = (), + env = None, + skip_for_os = (), + use_buck_api = True, + contacts = None, + base_module = None, + data = None, + data_dir = None, + srcs = (), + tags = (), + resources = None, + pytest_config = None, + pytest_marks = None, + pytest_expr = None, + pytest_confcutdir = None, + serialize_test_cases = None, + require_nano_prelude = None, + ci_srcs = [], + ci_deps = [], + compatible_with = None): + """ + Custom macro for buck2 end-to-end tests using pytest. All tests are run against buck2 compiled in-repo (compiled buck2). + + test_with_compiled_buck2: + A boolean for whether to run tests with the compiled buck2. + Default is True. + Should typically be unset when testing things that are not expected to be disproportionately + sensitive to buck2 core changes. Unsetting this also simplifies the CI setup, as testing + with buck2 core requires always using opt mode. + test_with_deployed_buck2: + A boolean for whether to run tests with the deployed buck2. + Default is False. + Should typically be set for tests of UDRs and other things that are not "core buck2 functionality" + test_with_reverted_buck2: + Like `test_with_deployed_buck2`, but for the previous version + use_compiled_buck2_client_and_tpx: + A full prod archive is distinct from a normal build of buck2 in that it uses a client-only + binary and additionally makes TPX available. Needed if you want to be able to `buck.test` + Default is False. + """ + kwargs = { + "base_module": base_module, + "ci_deps": ci_deps, + "ci_srcs": ci_srcs, + "compatible_with": compatible_with, + "contacts": contacts, + "data": data, + "data_dir": data_dir, + "pytest_confcutdir": pytest_confcutdir, + "pytest_config": pytest_config, + "pytest_expr": pytest_expr, + "pytest_marks": pytest_marks, + "require_nano_prelude": require_nano_prelude, + "resources": resources, + "serialize_test_cases": serialize_test_cases, + "srcs": srcs, + "tags": tags, + "use_buck_api": use_buck_api, + } + + env = env or {} + if not test_with_compiled_buck2 and not test_with_deployed_buck2: + fail("Must set one of `test_with_compiled_buck2` or `test_with_deployed_buck2` for " + name) + + # soft errors should always be allowed on tests with deployed buck2, or with reverted buck + deployed_env = dict(env) + deployed_env["BUCK2_HARD_ERROR"] = "false" + + if test_with_compiled_buck2: + compiled_env = dict(env) + compiled_env["BUCK2_HARD_ERROR"] = "true" + compiled_env["BUCK2_TPX"] = "$BUCK2_BINARY_DIR/buck2-tpx" + + if use_compiled_buck2_client_and_tpx: + base_exe = "$(location fbcode//buck2:symlinked_buck2_and_tpx)/buck2" + exe = select({ + "DEFAULT": base_exe, + "ovr_config//os:windows": base_exe + ".exe", + }) + else: + exe = "$(location fbcode//buck2:buck2)" + + buck_e2e_test( + # deployed buck2 test target retains the original target name so that when user runs `buck test `, + # it only runs the deployed buck2 tests and not the compiled buck2 tests. + # This will make it much quicker for rule writers to run their tests locally. + name = name + ("_with_compiled_buck2" if test_with_deployed_buck2 else ""), + env = compiled_env, + executable = exe, + skip_for_os = skip_for_os, + deps = deps, + cfg_modifiers = buck2_modifiers() + [ + # Always run these tests under rust opt build + "ovr_config//build_mode:opt", + ], + **kwargs + ) + + if test_with_deployed_buck2: + deps = deps or [] + + # Add a buck2 version file as dep so we can run deployed buck2 tests on version bumps. + deps += ["fbsource//tools/buck2-versions:stable"] + buck_e2e_test( + name = name, + env = deployed_env, + executable = "buck2", + skip_for_os = skip_for_os, + deps = deps, + **kwargs + ) + + if test_with_reverted_buck2: + previous_env = dict(deployed_env) + previous_env["BUCK2_CHANNEL"] = "previous" + buck_e2e_test( + name = name + "_with_reverted_buck2", + env = previous_env, + executable = "buck2", + skip_for_os = skip_for_os, + deps = deps, + **kwargs + ) diff --git a/tests/bxl_test.bzl b/tests/bxl_test.bzl new file mode 100644 index 0000000000000..2790f03454e5f --- /dev/null +++ b/tests/bxl_test.bzl @@ -0,0 +1,51 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") + +def bxl_test(src, name = None, labels = None, **kwargs): + """ + Creates a test target from a buck2 bxl script. BXL script must use "test" as entry + point. + + Parameters: + src: source path of BXL script. This cannot be a target since bxl + can only be invoked from the repo and not from buck-out. + name: Name of the test target. If unspecified, use src as the name. + """ + + if ":" in src: + fail("`src` cannot be a target. Found `{}` for `src`".format(src)) + if not src.endswith(".bxl"): + fail("`src` must end in '.bxl'. Found `{}` for `src`".format(src)) + export_file_name = src + ".export_file" + export_file(name = export_file_name, src = src, mode = "reference") + + # This is ugly but needed for buck1 compatibility + cell = native.repository_name()[1:] + base_path = native.package_name() + bxl_main = "{}//{}/{}:test".format(cell, base_path, src) + + if not name: + name = src + + buck2_e2e_test( + name = name, + srcs = {"fbcode//buck2/tests/e2e_util:test_bxl_template.py": "test_bxl_template.py"}, + env = { + "BXL_MAIN": bxl_main, + # This env var is used to properly declare a dep on the src file. + # I didn't use `resources` or `deps` because attaching to an env var makes debugging easier if needed. + "_BXL_SRC": "$(location :{})".format(export_file_name), + }, + # fbcode_macros uses tags instead of labels + tags = ["bxl_test"] + (labels if labels else []), + test_with_compiled_buck2 = False, + test_with_deployed_buck2 = True, + **kwargs + ) diff --git a/tests/check_dependencies_test.bxl b/tests/check_dependencies_test.bxl new file mode 100644 index 0000000000000..a45850b25947a --- /dev/null +++ b/tests/check_dependencies_test.bxl @@ -0,0 +1,68 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +"""BXL tests for checking dependencies.""" + +load(":dependencies_test_util.bzl", "rule_list_regex") + +_MAX_DEPTH = 2147483647 + +def _impl_check_dependencies_test( + ctx: bxl.Context, + allowlist_patterns: list[str] | None, + blocklist_patterns: list[str] | None, + target: TargetLabel): + """Test the dependencies of a given rule. + + If allowlist_patterns is set, only dependencies which match a pattern + in the allowlist will be allowed. In this mode, blocklist_patterns can + be used to remove targets from the set of allowlisted items. + If allowlist_patterns is not set, then all targets are assumed to be + allowed unless they match a pattern in the blocklist_patterns. + Each pattern list may contain target names (e.g. //foo/bar:bar) + or regex patterns (e.g. //foo/.*) + """ + if not allowlist_patterns and not blocklist_patterns: + fail("Self-check and self-documentation: must provide allow or block list") + cquery = ctx.cquery() + transitive_deps = cquery.deps( + ctx.target_universe(target).target_set(), + _MAX_DEPTH, + filter = "target_deps()", + ) + + blocklist_regex = rule_list_regex(blocklist_patterns) + if allowlist_patterns != None and len(allowlist_patterns) > 0: + allowlist_regex = rule_list_regex(allowlist_patterns) + blocklisted = filter(lambda target: regex_match(blocklist_regex, str(target.label.raw_target())), transitive_deps) + allowlisted = filter(lambda target: regex_match(allowlist_regex, str(target.label.raw_target())), transitive_deps) + allowlisted_deps = ctx.target_universe(allowlisted).target_set() + blocklisted_deps = ctx.target_universe(blocklisted).target_set() + + # We expect transitive_deps = allowlisted_deps - blocklisted_deps following line means the same. + res = transitive_deps - allowlisted_deps + blocklisted_deps + + # TODO(ezgi): Add shortest path from target to the banned/blocklisted targets + if len(res) > 0: + fail("Found banned targets: {}! ".format(res)) + elif blocklist_patterns != None and len(blocklist_patterns) > 0: + blocklisted = filter(lambda target: regex_match(blocklist_regex, str(target.label)), transitive_deps) + res = ctx.target_universe(blocklisted).target_set() + if len(res) > 0: + fail("Found blocklisted targets: {}!".format(res)) + +def _impl(ctx: bxl.Context): + _impl_check_dependencies_test(ctx, ctx.cli_args.allowlist_patterns, ctx.cli_args.blocklist_patterns, ctx.cli_args.target) + +test = bxl_main( + cli_args = { + "allowlist_patterns": cli_args.option(cli_args.list(cli_args.string())), + "blocklist_patterns": cli_args.option(cli_args.list(cli_args.string())), + "target": cli_args.target_label(), + }, + impl = _impl, +) diff --git a/tests/check_dependencies_test.bzl b/tests/check_dependencies_test.bzl new file mode 100644 index 0000000000000..37ae282527366 --- /dev/null +++ b/tests/check_dependencies_test.bzl @@ -0,0 +1,158 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +# This is meant to be Open-source friendly. In our e2e tests, we invoke a variant from +# tools/build_defs/check_dependencies_test.bzl that passes additional arguments for meta specific allowlist. + +def check_dependencies_test( + name, + target, + contacts, + allowlist_patterns = None, + blocklist_patterns = None, + expect_failure_msg = None, + env = None, + deps = None, + **kwargs): + """ + Creates a test target from a buck2 bxl script. BXL script must use "test" as entry + point. + + Parameters: + name: Name of the test target. + contacts: List of oncalls for the test. + target: The target to check dependencies for + allowlist_patterns: a regex of patterns that should be allowed in transitive deps of the target + blocklist_patterns: a regex of patterns that should be blocked in transitive deps of the target + expect_failure_msg: the test is expected to fail with this message regex + env: additional environment variables to pass to the checking script + """ + bxl_main = "fbcode//buck2/tests/check_dependencies_test.bxl:test" + allowlist_patterns = ",".join(allowlist_patterns) if allowlist_patterns else "" + blocklist_patterns = ",".join(blocklist_patterns) if blocklist_patterns else "" + if not (expect_failure_msg == None or len(expect_failure_msg) > 0): + fail("Expected failure message can only be None or non-empty string") + + buck2_e2e_test( + contacts = contacts, + name = name, + srcs = {"fbcode//buck2/tests/e2e_util:test_bxl_check_dependencies_template.py": "test_bxl_check_dependencies_template.py"}, + env = { + "ALLOWLIST": allowlist_patterns, + "BLOCKLIST": blocklist_patterns, + "BXL_MAIN": bxl_main, + "EXPECT_FAILURE_MSG": expect_failure_msg or "", + "TARGET": target, + } | (env or {}), + # fbcode_macros uses tags instead of labels + tags = ["check_dependencies_test"], + test_with_compiled_buck2 = False, + test_with_deployed_buck2 = True, + use_buck_api = False, + # In order for target determinator to trigger this test when the `target` specified has changed, we need to introduce a dep on `target`. + # However, we cannot introduce a configured dep, because the `target` may not be compatible with platform of dependencies test. + # This adds a dep on `target` in a select arm that is never satisfied. This will work for TD because TD only looks at deps on unconfigured + # target graph. + deps = (deps or []) + select({ + "DEFAULT": [], + "ovr_config//:none": [target], + }), + **kwargs + ) + +def assert_dependencies_test( + name, + target, + contacts, + expected_deps, + expect_failure_msg = None, + deps = None, + **kwargs): + """ + Creates a test target fromfbcode//buck2/tests/assert_dependencies_test.bxl:test bxl script. + + Parameters: + name: Name of the test target. + contacts: List of oncalls for the test. + target: The target to check dependencies for + expected_deps: list of expected deps + """ + buck2_e2e_test( + name = name, + contacts = contacts, + srcs = {"fbcode//buck2/tests/e2e_util:test_bxl_assert_dependencies_template.py": "test_bxl_assert_dependencies_template.py"}, + env = { + "BXL_MAIN": "fbcode//buck2/tests/assert_dependencies_test.bxl:test", + "DEPS": ",".join(expected_deps), + "EXPECT_FAILURE_MSG": expect_failure_msg or "", + "TARGET": target, + }, + # fbcode_macros uses tags instead of labels + tags = ["assert_dependencies_test"], + test_with_compiled_buck2 = False, + test_with_deployed_buck2 = True, + use_buck_api = False, + # In order for target determinator to trigger this test when the `target` specified has changed, we need to introduce a dep on `target`. + # However, we cannot introduce a configured dep, because the `target` may not be compatible with platform of dependencies test. + # This adds a dep on `target` in a select arm that is never satisfied. This will work for TD because TD only looks at deps on unconfigured + # target graph. + deps = (deps or []) + select({ + "DEFAULT": [], + "ovr_config//:none": [target], + }), + **kwargs + ) + +def audit_dependents_test( + name, + target, + contacts, + source_target, + allowlist_patterns, + expect_failure_msg = None, + deps = None, + **kwargs): + """ + Creates a test target from a buck2 bxl script. BXL script must use "test" as entry + point. + + Parameters: + name: Name of the test target. + contacts: List of oncalls for the test. + target: The target to check direct dependents for + source_target: The target universe + allowlist_patter: a regex of patterns that should be allowed for direct dependents of target + expect_failure_msg: the test is expected to fail with this message regex + """ + buck2_e2e_test( + name = name, + contacts = contacts, + srcs = {"fbcode//buck2/tests/e2e_util:test_bxl_audit_dependents_template.py": "test_bxl_audit_dependents_template.py"}, + env = { + "ALLOWLIST": ",".join(allowlist_patterns) if allowlist_patterns else "", + "BXL_MAIN": "fbcode//buck2/tests/audit_dependents_test.bxl:test", + "EXPECT_FAILURE_MSG": expect_failure_msg or "", + "SOURCE_TARGET": source_target, + "TARGET": target, + }, + # fbcode_macros uses tags instead of labels + tags = ["audit_dependents_test"], + test_with_compiled_buck2 = False, + test_with_deployed_buck2 = True, + use_buck_api = False, + # In order for target determinator to trigger this test when the `target` specified has changed, we need to introduce a dep on `target`. + # However, we cannot introduce a configured dep, because the `target` may not be compatible with platform of dependencies test. + # This adds a dep on `target` in a select arm that is never satisfied. This will work for TD because TD only looks at deps on unconfigured + # target graph. + deps = (deps or []) + select({ + "DEFAULT": [], + "ovr_config//:none": [target], + }), + **kwargs + ) diff --git a/tests/core/analysis/BUCK b/tests/core/analysis/BUCK new file mode 100644 index 0000000000000..7626a205b87f8 --- /dev/null +++ b/tests/core/analysis/BUCK @@ -0,0 +1,18 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_analysis_queries", + srcs = ["test_analysis_queries.py"], + data_dir = "test_analysis_queries_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_analysis_action_ids_unique", + srcs = ["test_analysis_action_ids_unique.py"], + data_dir = "test_analysis_action_ids_unique_data", +) diff --git a/tests/core/analysis/test_analysis_action_ids_unique.py b/tests/core/analysis/test_analysis_action_ids_unique.py new file mode 100644 index 0000000000000..485df9fd69420 --- /dev/null +++ b/tests/core/analysis/test_analysis_action_ids_unique.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(data_dir="identifier") +async def test_analysis_action_ids_unique_identifier_within_category( + buck: Buck, +) -> None: + await expect_failure( + buck.audit("providers", "//:yyy"), + stderr_regex="Action category `foo` contains duplicate identifier `x`", + ) + + +@buck_test(data_dir="category") +async def test_analysis_action_ids_unique_singleton_category(buck: Buck) -> None: + await expect_failure( + buck.audit("providers", "//:zzz"), + stderr_regex="Analysis produced multiple actions with category `foo` and at least one of them had no identifier", + ) diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/.buckroot b/tests/core/analysis/test_analysis_action_ids_unique_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/category/.buckconfig b/tests/core/analysis/test_analysis_action_ids_unique_data/category/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/analysis/test_analysis_action_ids_unique_data/category/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/category/.buckroot b/tests/core/analysis/test_analysis_action_ids_unique_data/category/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/category/TARGETS.fixture b/tests/core/analysis/test_analysis_action_ids_unique_data/category/TARGETS.fixture new file mode 100644 index 0000000000000..0d87776daacdc --- /dev/null +++ b/tests/core/analysis/test_analysis_action_ids_unique_data/category/TARGETS.fixture @@ -0,0 +1 @@ +my_rule(name = "zzz") diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/category/prelude.bzl b/tests/core/analysis/test_analysis_action_ids_unique_data/category/prelude.bzl new file mode 100644 index 0000000000000..26c1b1f4d4ceb --- /dev/null +++ b/tests/core/analysis/test_analysis_action_ids_unique_data/category/prelude.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _my_rule_impl(ctx): + a = ctx.actions.declare_output("a.txt") + b = ctx.actions.declare_output("b.txt") + ctx.actions.run(cmd_args("write_to", a.as_output()), category = "foo", identifier = "x") + ctx.actions.run(cmd_args("write_to", b.as_output()), category = "foo") + return [DefaultInfo( + default_outputs = [a, b], + )] + +my_rule = rule( + impl = _my_rule_impl, + attrs = {}, +) diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/.buckconfig b/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/.buckroot b/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/TARGETS.fixture b/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/TARGETS.fixture new file mode 100644 index 0000000000000..fff00dda2936c --- /dev/null +++ b/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/TARGETS.fixture @@ -0,0 +1 @@ +my_rule(name = "yyy") diff --git a/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/prelude.bzl b/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/prelude.bzl new file mode 100644 index 0000000000000..4143e1d9b48b3 --- /dev/null +++ b/tests/core/analysis/test_analysis_action_ids_unique_data/identifier/prelude.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _my_rule_impl(ctx): + a = ctx.actions.declare_output("a.txt") + b = ctx.actions.declare_output("b.txt") + ctx.actions.run(cmd_args("write_to", a.as_output()), category = "foo", identifier = "x") + ctx.actions.run(cmd_args("write_to", b.as_output()), category = "foo", identifier = "x") + return [DefaultInfo( + default_outputs = [a, b], + )] + +my_rule = rule( + impl = _my_rule_impl, + attrs = {}, +) diff --git a/tests/core/analysis/test_analysis_queries.py b/tests/core/analysis/test_analysis_queries.py new file mode 100644 index 0000000000000..7661f6e77281d --- /dev/null +++ b/tests/core/analysis/test_analysis_queries.py @@ -0,0 +1,94 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +async def _test_analysis_query_invalidation_impl(buck: Buck, name: str) -> None: + linux = await buck.build_without_report( + ":root", "-c", "test.configuration=linux", "--out=-" + ) + macos = await buck.build_without_report( + ":root", "-c", "test.configuration=macos", "--out=-" + ) + + golden( + output=linux.stdout, + rel_path=f"{name}/linux.txt.golden", + ) + golden( + output=macos.stdout, + rel_path=f"{name}/macos.txt.golden", + ) + + # Mostly here to really be safe but in practice this fails with an + # incompatible target earlier if we have a bug. + assert "linux-select-dep" in linux.stdout + assert "macos-select-dep" in macos.stdout + + +@buck_test(data_dir="analysis_query_invalidation") +async def test_analysis_query_invalidation_deps(buck: Buck) -> None: + """ + This is a regression test for T133069783. + """ + await _test_analysis_query_invalidation_impl( + buck, name="analysis_query_invalidation" + ) + + +@buck_test( + data_dir="analysis_query_invalidation_classpath", +) +async def test_analysis_query_invalidation_classpath(buck: Buck) -> None: + """ + Equivalent of T133069783 for `classpath()` instead of `deps()` queries. + """ + await _test_analysis_query_invalidation_impl( + buck, name="analysis_query_invalidation_classpath" + ) + + +@buck_test(data_dir="analysis_query_deps") +async def test_analysis_query_deps(buck: Buck) -> None: + deps = await buck.build_without_report(":deps", "--out=-") + golden( + output=deps.stdout, + rel_path="analysis_query_deps/deps.txt.golden", + ) + assert ":foo" in deps.stdout + assert ":bar" in deps.stdout + assert ":baz" in deps.stdout + assert ":qux" in deps.stdout + + +@buck_test(data_dir="analysis_query_deps") +async def test_analysis_query_deps_with_depth(buck: Buck) -> None: + deps = await buck.build_without_report(":deps1", "--out=-") + golden(output=deps.stdout, rel_path="analysis_query_deps/deps1.txt.golden") + assert ":foo" in deps.stdout + assert ":bar" in deps.stdout + assert ":baz" in deps.stdout + assert ":qux" not in deps.stdout + + +@buck_test(setup_eden=True, data_dir="analysis_query_deps") +async def test_analysis_query_target_deps(buck: Buck) -> None: + deps = await buck.build_without_report(":target_deps", "--out=-") + golden( + output=deps.stdout, + rel_path="analysis_query_deps/target_deps.txt.golden", + ) + assert ":foo" in deps.stdout + assert ":bar" in deps.stdout + assert ":baz" not in deps.stdout + assert ":qux" not in deps.stdout diff --git a/tests/core/analysis/test_analysis_queries_data/.buckroot b/tests/core/analysis/test_analysis_queries_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/.buckconfig b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/TARGETS.fixture b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/TARGETS.fixture new file mode 100644 index 0000000000000..d925343f11a14 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/TARGETS.fixture @@ -0,0 +1 @@ +defs() diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/deps.txt.golden b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/deps.txt.golden new file mode 100644 index 0000000000000..1012b186b4f82 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/deps.txt.golden @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prelude//:qux prelude//:baz prelude//:bar prelude//:foo \ No newline at end of file diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/deps1.txt.golden b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/deps1.txt.golden new file mode 100644 index 0000000000000..5f6fed107ad73 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/deps1.txt.golden @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prelude//:foo prelude//:bar prelude//:baz \ No newline at end of file diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/prelude.bzl b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/prelude.bzl new file mode 100644 index 0000000000000..4b930ddb5059d --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/prelude.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +target = rule( + impl = lambda ctx: [ + DefaultInfo(default_output = ctx.actions.write("out", ctx.attrs.arg or "", allow_args = True)[0]), + RunInfo(), + ], + attrs = {"arg": attrs.option(attrs.arg(), default = None)}, +) + +def defs(): + # Targets we want to run deps queries from + target( + name = "deps", + arg = "$(query_targets deps(:foo))", + ) + target( + name = "deps1", + arg = "$(query_targets deps(:foo, 1))", + ) + target( + name = "target_deps", + arg = "$(query_targets deps(:foo, 100000, target_deps()))", + ) + + # Targets that are deps of targets we want to run deps queries from. + target( + name = "foo", + arg = "$(location :bar) $(exe :baz)", + ) + target(name = "bar") + target(name = "baz", arg = "$(location :qux)") + target(name = "qux") diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/target_deps.txt.golden b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/target_deps.txt.golden new file mode 100644 index 0000000000000..f16af9ce9dfc4 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_deps/target_deps.txt.golden @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prelude//:foo prelude//:bar \ No newline at end of file diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/.buckconfig b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/TARGETS.fixture b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/TARGETS.fixture new file mode 100644 index 0000000000000..d925343f11a14 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/TARGETS.fixture @@ -0,0 +1 @@ +defs() diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/linux.txt.golden b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/linux.txt.golden new file mode 100644 index 0000000000000..83be9683a149b --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/linux.txt.golden @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prelude//:linux-select-dep prelude//:select-dep prelude//:dep \ No newline at end of file diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/macos.txt.golden b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/macos.txt.golden new file mode 100644 index 0000000000000..b4acbe9d3bb3c --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/macos.txt.golden @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prelude//:macos-select-dep prelude//:select-dep prelude//:dep \ No newline at end of file diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/prelude.bzl b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/prelude.bzl new file mode 100644 index 0000000000000..dda19795dbfef --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation/prelude.bzl @@ -0,0 +1,98 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +target = rule( + impl = lambda ctx: [ + DefaultInfo(default_output = ctx.actions.write("out", ctx.attrs.arg or "", allow_args = True)[0]), + RunInfo(), + ], + attrs = {"arg": attrs.option(attrs.arg(), default = None)}, +) + +proxy_target = rule( + impl = lambda ctx: ctx.attrs.dep.providers, + attrs = {"dep": attrs.dep()}, +) + +def _platform(ctx): + # Configuration that reads from buckconfig + configuration = ConfigurationInfo( + constraints = {}, + values = ctx.attrs.values, + ) + + return [ + DefaultInfo(), + configuration, + ] + +config = rule( + impl = _platform, + attrs = { + "setting": attrs.configuration_label(), + "values": attrs.dict( + key = attrs.string(), + value = attrs.string(), + sorted = False, + default = {}, + ), + }, +) + +def _config_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +config_setting = rule( + impl = _config_setting, + attrs = {}, +) + +def _target_platform_impl(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +target_platform = rule(impl = _target_platform_impl, attrs = {}) + +def defs(): + target( + name = "root", + arg = ("$(query_targets deps(:dep))"), + default_target_platform = ":target_platform", + ) + + proxy_target(name = "dep", dep = ":select-dep") + + proxy_target( + name = "select-dep", + dep = select( + { + ":linux": ":linux-select-dep", + ":macos": ":macos-select-dep", + }, + ), + ) + + target( + name = "macos-select-dep", + target_compatible_with = [":macos"], + ) + + target( + name = "linux-select-dep", + target_compatible_with = [":linux"], + ) + + config_setting(name = "setting") + config(name = "linux", setting = ":setting", values = {"test.configuration": "linux"}) + config(name = "macos", setting = ":setting", values = {"test.configuration": "macos"}) + + target_platform(name = "target_platform") diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/.buckconfig b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/TARGETS.fixture b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/TARGETS.fixture new file mode 100644 index 0000000000000..d925343f11a14 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/TARGETS.fixture @@ -0,0 +1 @@ +defs() diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/linux.txt.golden b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/linux.txt.golden new file mode 100644 index 0000000000000..687b64773d3f0 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/linux.txt.golden @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prelude//:dep prelude//:select-dep prelude//:linux-select-dep \ No newline at end of file diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/macos.txt.golden b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/macos.txt.golden new file mode 100644 index 0000000000000..4eebbe00d67c8 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/macos.txt.golden @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prelude//:dep prelude//:select-dep prelude//:macos-select-dep \ No newline at end of file diff --git a/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/prelude.bzl b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/prelude.bzl new file mode 100644 index 0000000000000..b506f07ff23e5 --- /dev/null +++ b/tests/core/analysis/test_analysis_queries_data/analysis_query_invalidation_classpath/prelude.bzl @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +CLASSPATH_KEY = "classpath_including_targets_with_no_output" + +def _target_impl(ctx): + out = ctx.actions.write("out", ctx.attrs.arg or "", allow_args = True)[0] + classpaths = [out] + if ctx.attrs.dep: + classpaths.append(ctx.attrs.dep[TemplatePlaceholderInfo].keyed_variables[CLASSPATH_KEY]) + return [ + DefaultInfo(default_output = out), + RunInfo(), + TemplatePlaceholderInfo(keyed_variables = { + CLASSPATH_KEY: cmd_args(classpaths), + }), + ] + +target = rule( + impl = _target_impl, + attrs = { + "arg": attrs.option(attrs.arg(), default = None), + "dep": attrs.option(attrs.dep(), default = None), + }, +) + +def _platform(ctx): + # Configuration that reads from buckconfig + configuration = ConfigurationInfo( + constraints = {}, + values = ctx.attrs.values, + ) + + return [ + DefaultInfo(), + configuration, + ] + +config = rule( + impl = _platform, + attrs = { + "setting": attrs.configuration_label(), + "values": attrs.dict( + key = attrs.string(), + value = attrs.string(), + sorted = False, + default = {}, + ), + }, +) + +def _config_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +config_setting = rule( + impl = _config_setting, + attrs = {}, +) + +def _target_platform_impl(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +target_platform = rule(impl = _target_platform_impl, attrs = {}) + +def defs(): + target( + name = "root", + arg = ("$(query_targets classpath(:dep))"), + default_target_platform = ":target_platform", + ) + + target(name = "dep", dep = ":select-dep") + + target( + name = "select-dep", + dep = select( + { + ":linux": ":linux-select-dep", + ":macos": ":macos-select-dep", + }, + ), + ) + + target( + name = "macos-select-dep", + target_compatible_with = [":macos"], + ) + + target( + name = "linux-select-dep", + target_compatible_with = [":linux"], + ) + + config_setting(name = "setting") + config(name = "linux", setting = ":setting", values = {"test.configuration": "linux"}) + config(name = "macos", setting = ":setting", values = {"test.configuration": "macos"}) + + target_platform(name = "target_platform") diff --git a/tests/core/audit/BUCK b/tests/core/audit/BUCK new file mode 100644 index 0000000000000..8e3804c5b673c --- /dev/null +++ b/tests/core/audit/BUCK @@ -0,0 +1,77 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_audit_cells", + srcs = ["test_audit_cells.py"], + data_dir = "test_audit_cells_data", +) + +buck2_e2e_test( + name = "test_audit_deferred_materializer", + srcs = ["test_audit_deferred_materializer.py"], + data_dir = "test_audit_deferred_materializer_data", +) + +buck2_e2e_test( + name = "test_audit_includes", + srcs = ["test_audit_includes.py"], + data_dir = "test_audit_includes_data", +) + +buck2_e2e_test( + name = "test_audit_configurations", + srcs = ["test_audit_configurations.py"], + data_dir = "test_audit_configurations_data", +) + +buck2_e2e_test( + name = "test_audit_execution_platform_resolution", + srcs = ["test_audit_execution_platform_resolution.py"], + data_dir = "test_audit_execution_platform_resolution_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_audit_providers", + srcs = ["test_audit_providers.py"], + data_dir = "test_audit_providers_data", +) + +buck2_e2e_test( + name = "test_audit_subtargets", + srcs = ["test_audit_subtargets.py"], + data_dir = "test_audit_subtargets_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_audit_config", + srcs = ["test_audit_config.py"], + data_dir = "test_audit_config_data", +) + +buck2_e2e_test( + name = "test_audit_common_opts", + srcs = ["test_audit_common_opts.py"], + data_dir = "test_audit_common_opts_data", +) + +buck2_e2e_test( + name = "test_audit_output", + srcs = ["test_audit_output.py"], + data_dir = "test_audit_output_data", +) + +buck2_e2e_test( + name = "test_audit_visibility", + srcs = ["test_audit_visibility.py"], + data_dir = "test_audit_visibility_data", +) + +buck2_e2e_test( + name = "test_audit_parse", + srcs = ["test_audit_parse.py"], + data_dir = "test_audit_parse_data", +) diff --git a/tests/core/audit/test_audit_cells.py b/tests/core/audit/test_audit_cells.py new file mode 100644 index 0000000000000..b639dad099c51 --- /dev/null +++ b/tests/core/audit/test_audit_cells.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_cell_ordering(buck: Buck) -> None: + res = await buck.audit("cell") + # The repository should be in the list, not the alias + assert "source:" in res.stdout + assert "a:" not in res.stdout + assert "z:" not in res.stdout + + res = await buck.audit("cell", "--aliases") + assert "source:" in res.stdout + assert "a:" in res.stdout + assert "z:" in res.stdout + + +@buck_test() +async def test_bxl_audit_cell(buck: Buck) -> None: + result = await buck.bxl("//test_audit.bxl:audit_cell") + + # specify single cell + outputs = result.stdout.splitlines() + single_result = json.loads(outputs[0]) + assert single_result["source"] == str(buck.cwd / "fbs") + + # don't specify cell - should return all cell aliases + all_result = json.loads(outputs[1]) + assert all_result["a"] == str(buck.cwd / "fbs") + assert all_result["z"] == str(buck.cwd / "fbc") + assert all_result["code"] == str(buck.cwd / "fbc") + assert all_result["source"] == str(buck.cwd / "fbs") diff --git a/tests/core/audit/test_audit_cells_data/.buckconfig b/tests/core/audit/test_audit_cells_data/.buckconfig new file mode 100644 index 0000000000000..1931c177e4107 --- /dev/null +++ b/tests/core/audit/test_audit_cells_data/.buckconfig @@ -0,0 +1,8 @@ +[cells] + source = fbs + code = fbc + root = . + +[cell_aliases] + a = source + z = code diff --git a/tests/core/audit/test_audit_cells_data/.buckroot b/tests/core/audit/test_audit_cells_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_cells_data/test_audit.bxl b/tests/core/audit/test_audit_cells_data/test_audit.bxl new file mode 100644 index 0000000000000..33b4ca0e8a4a3 --- /dev/null +++ b/tests/core/audit/test_audit_cells_data/test_audit.bxl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _audit_cell(ctx): + ctx.output.print(ctx.audit().cell(["source"])) + ctx.output.print(ctx.audit().cell(aliases = True)) + +audit_cell = bxl_main( + impl = _audit_cell, + cli_args = { + }, +) diff --git a/tests/core/audit/test_audit_common_opts.py b/tests/core/audit/test_audit_common_opts.py new file mode 100644 index 0000000000000..e2376e3200bba --- /dev/null +++ b/tests/core/audit/test_audit_common_opts.py @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import pytest +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +# TODO(iguridi) or TODO(raulgarcia4): +# New `audit` commands have been added since these tests were created. +# Test them if necessary. + + +@buck_test() +@pytest.mark.parametrize( # type: ignore + "cmd", + [ + "audit_visibility", + "audit_configurations", + "audit_config", + "audit_visibility", + ], +) +async def test_pass_common_opts_func(buck: Buck, cmd: str) -> None: + cmd_call = getattr(buck, cmd) + await cmd_call("--config", "client.id=placeholder_id") + + +@buck_test() +@pytest.mark.parametrize( # type: ignore + "cmd", + [ + "analysis-queries", + "cell", + "execution-platform-resolution", + "includes", + "prelude", + "providers", + "subtargets", + ], +) +async def test_pass_common_opts(buck: Buck, cmd: str) -> None: + commands_requiring_target_pattern_arg_value = {"providers", "subtargets"} + arg = "//:dummy" + + if cmd in commands_requiring_target_pattern_arg_value: + await buck.audit(cmd, arg, "--config", "client.id=placeholder_id") + else: + await buck.audit(cmd, "--config", "client.id=placeholder_id") diff --git a/tests/core/audit/test_audit_common_opts_data/.buckconfig b/tests/core/audit/test_audit_common_opts_data/.buckconfig new file mode 100644 index 0000000000000..814d1d7cd8e34 --- /dev/null +++ b/tests/core/audit/test_audit_common_opts_data/.buckconfig @@ -0,0 +1,6 @@ +[cells] + root = . + prelude = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/audit/test_audit_common_opts_data/.buckroot b/tests/core/audit/test_audit_common_opts_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_common_opts_data/TARGETS.fixture b/tests/core/audit/test_audit_common_opts_data/TARGETS.fixture new file mode 100644 index 0000000000000..607128ce43188 --- /dev/null +++ b/tests/core/audit/test_audit_common_opts_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "simple") + +simple(name = "dummy") diff --git a/tests/core/audit/test_audit_common_opts_data/defs.bzl b/tests/core/audit/test_audit_common_opts_data/defs.bzl new file mode 100644 index 0000000000000..bab2041feeb2d --- /dev/null +++ b/tests/core/audit/test_audit_common_opts_data/defs.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _simple_impl(_ctx): + return [DefaultInfo()] + +simple = rule(impl = _simple_impl, attrs = {}) diff --git a/tests/core/audit/test_audit_common_opts_data/prelude.bzl b/tests/core/audit/test_audit_common_opts_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_config.py b/tests/core/audit/test_audit_config.py new file mode 100644 index 0000000000000..df170e803c8bd --- /dev/null +++ b/tests/core/audit/test_audit_config.py @@ -0,0 +1,197 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test( + extra_buck_config={ + "test": { + "foo": "bar", + } + }, +) +async def test_extra_buck_config(buck: Buck) -> None: + """ + Assert that our testing framework works as expected. + """ + + cfg = (await buck.audit_config("--style=json")).get_json() + assert cfg.get("test.foo") == "bar" + + +@buck_test() +async def test_audit_config_json(buck: Buck) -> None: + result = await buck.audit_config("--style=json") + result_json = result.get_json() + assert result_json is not None + + +@buck_test() +async def test_audit_config_cell_json(buck: Buck) -> None: + out = await buck.audit_config( + "--style", + "json", + ) + out_json = out.get_json() or {} + assert out_json.get("test.is_root") == "yes" + assert out_json.get("test.is_code") is None + + out = await buck.audit_config("--style", "json", "--cell", "code") + out_json = out.get_json() or {} + assert out_json.get("test.is_code") == "yes" + assert out_json.get("test.is_root") is None + + out = await buck.audit_config( + "--style", + "json", + rel_cwd=Path("code"), + ) + out_json = out.get_json() or {} + assert out_json.get("test.is_code") == "yes" + assert out_json.get("test.is_root") is None + + +@buck_test(setup_eden=True) +async def test_audit_config_all_cells(buck: Buck) -> None: + out = await buck.audit_config( + "--all-cells", + "--style", + "json", + ) + out_json = out.get_json() or {} + print(out_json) + assert out_json.get("code//bar.a") == "2" + assert out_json.get("source//bar.a") == "1" + assert out_json.get("root//bar.a") == "1" + assert out_json.get("b//bar.a") is None + + out = await buck.audit_config( + "--all-cells", + "--style", + "json", + "code//bar.a", + ) + out_json = out.get_json() or {} + assert out_json.get("code//bar.a") == "2" + assert out_json.get("source//bar.a") is None + + out = await buck.audit_config( + "--all-cells", + ) + assert "# Cell: source\n[bar]\n a = 1\n" in out.stdout + + +@buck_test() +async def test_audit_config_with_config_value(buck: Buck) -> None: + result_config = await buck.audit_config( + "python", + "--style", + "json", + "-cpython.helpers=true", + ) + result_config_json = result_config.get_json() + + assert result_config_json.get("python.helpers") == "true" + + +@buck_test() +async def test_audit_config_with_config_file(buck: Buck, tmp_path: Path) -> None: + configfile = tmp_path / "config.bcfg" + configfile.write_text("[python]\n helpers = true\n") + + result_file = await buck.audit_config( + "--config-file", + str(configfile), + "--style", + "json", + ) + + assert result_file.get_json().get("python.helpers") == "true" + + +@buck_test() +async def test_audit_config_location_extended(buck: Buck) -> None: + result = await buck.audit_config( + "bar.a", + "--location=extended", + ) + assert "a = 1" in result.stdout + assert "included.bcfg:2" in result.stdout + + +@buck_test() +async def test_audit_config_with_cell_syntax(buck: Buck) -> None: + result_file = await buck.audit_config( + "code//test.is_code", + "--style", + "json", + ) + result_file_json = result_file.get_json() + + assert result_file_json.get("code//test.is_code") == "yes" + + +@buck_test() +async def test_cell_relative_configs(buck: Buck) -> None: + result_root_cell = await buck.audit_config( + "--config", + "root//bar.a=5", + "--style", + "json", + ) + result_root_cell_json = result_root_cell.get_json() + + assert result_root_cell_json is not None + assert result_root_cell_json.get("foo.b") == "5" + + result_nonroot_cell = await buck.audit_config( + "foo", + "--config", + "code//bar.a=5", + "--style", + "json", + "--cell", + "code", + ) + result_nonroot_cell_json = result_nonroot_cell.get_json() + + assert result_nonroot_cell_json is not None + assert result_nonroot_cell_json.get("foo.b") == "5" + + result_diff_cell = await buck.audit_config( + "foo", + "--config", + "code//bar.a=5", + "--style", + "json", + "--cell", + "source", + ) + result_diff_cell_json = result_diff_cell.get_json() + + assert result_diff_cell_json is not None + assert result_diff_cell_json.get("foo.b") == "1" + + result_all_cell = await buck.audit_config( + "foo", + "--config", + "bar.a=5", + "--style", + "json", + "--cell", + "source", + ) + result_all_cell_json = result_all_cell.get_json() + + assert result_all_cell_json is not None + assert result_all_cell_json.get("foo.b") == "5" diff --git a/tests/core/audit/test_audit_config_data/.buckconfig b/tests/core/audit/test_audit_config_data/.buckconfig new file mode 100644 index 0000000000000..e5d8a5de7756b --- /dev/null +++ b/tests/core/audit/test_audit_config_data/.buckconfig @@ -0,0 +1,13 @@ +[foo] + a = $(config bar.a) + b = $(config foo.a) + +[test] + is_root = yes + +[cells] + root = . + code = code + source = source + + diff --git a/tests/core/audit/test_audit_config_data/.buckroot b/tests/core/audit/test_audit_config_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_config_data/code/.buckconfig b/tests/core/audit/test_audit_config_data/code/.buckconfig new file mode 100644 index 0000000000000..e1a933b0529a3 --- /dev/null +++ b/tests/core/audit/test_audit_config_data/code/.buckconfig @@ -0,0 +1,9 @@ +[bar] + a = 2 + +[foo] + a = $(config bar.a) + b = $(config foo.a) + +[test] + is_code = yes diff --git a/tests/core/audit/test_audit_config_data/included.bcfg b/tests/core/audit/test_audit_config_data/included.bcfg new file mode 100644 index 0000000000000..e6c1997a1a7c6 --- /dev/null +++ b/tests/core/audit/test_audit_config_data/included.bcfg @@ -0,0 +1,2 @@ +[bar] + a = 1 diff --git a/tests/core/audit/test_audit_config_data/source/.buckconfig b/tests/core/audit/test_audit_config_data/source/.buckconfig new file mode 100644 index 0000000000000..a33c5cdc2d4bd --- /dev/null +++ b/tests/core/audit/test_audit_config_data/source/.buckconfig @@ -0,0 +1,6 @@ +[bar] + a = 1 + +[foo] + a = $(config bar.a) + b = $(config foo.a) diff --git a/tests/core/audit/test_audit_configurations.py b/tests/core/audit/test_audit_configurations.py new file mode 100644 index 0000000000000..73415bc0c73e6 --- /dev/null +++ b/tests/core/audit/test_audit_configurations.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +def _parse_audit_configurations(output: str) -> List[str]: + return [x.rstrip(":") for x in output.splitlines() if not x.startswith(" ")] + + +@buck_test() +async def test_audit_configurations_all(buck: Buck) -> None: + # Evaluate a target to make sure configuration is loaded. + await buck.cquery("//:genrule") + + result = await buck.audit("configurations") + configurations = _parse_audit_configurations(result.stdout) + configurations = [_replace_hash(x) for x in configurations] + assert "root//:p#" in configurations + + +@buck_test() +async def test_audit_configurations_specific(buck: Buck) -> None: + # Evaluate a target to make sure configuration is loaded. + await buck.cquery("//:genrule") + + # Load configurations so we can learn the hash. + result = await buck.audit("configurations") + configurations = _parse_audit_configurations(result.stdout) + [configuration] = [c for c in configurations if c.startswith("root//:p#")] + + # Now audit the specific configuration. + result = await buck.audit("configurations", configuration) + assert [configuration] == _parse_audit_configurations(result.stdout) diff --git a/tests/core/audit/test_audit_configurations_data/.buckconfig b/tests/core/audit/test_audit_configurations_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/audit/test_audit_configurations_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/audit/test_audit_configurations_data/.buckroot b/tests/core/audit/test_audit_configurations_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_configurations_data/TARGETS.fixture b/tests/core/audit/test_audit_configurations_data/TARGETS.fixture new file mode 100644 index 0000000000000..fd238c66930d5 --- /dev/null +++ b/tests/core/audit/test_audit_configurations_data/TARGETS.fixture @@ -0,0 +1,9 @@ +platform( + name = "p", + constraint_values = [], +) + +trivial_build( + name = "genrule", + default_target_platform = ":p", +) diff --git a/tests/core/audit/test_audit_deferred_materializer.py b/tests/core/audit/test_audit_deferred_materializer.py new file mode 100644 index 0000000000000..3fea2ad10f9cf --- /dev/null +++ b/tests/core/audit/test_audit_deferred_materializer.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_audit_deferred_materializer_list(buck: Buck) -> None: + res = await buck.audit("deferred-materializer", "list") + assert res.stdout.strip() == "" + + await buck.build("//:simple") + + res = await buck.audit("deferred-materializer", "list") + assert "__simple__" in res.stdout.strip() diff --git a/tests/core/audit/test_audit_deferred_materializer_data/.buckconfig b/tests/core/audit/test_audit_deferred_materializer_data/.buckconfig new file mode 100644 index 0000000000000..86593e627bfda --- /dev/null +++ b/tests/core/audit/test_audit_deferred_materializer_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] + materializations = deferred diff --git a/tests/core/audit/test_audit_deferred_materializer_data/.buckroot b/tests/core/audit/test_audit_deferred_materializer_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_deferred_materializer_data/TARGETS.fixture b/tests/core/audit/test_audit_deferred_materializer_data/TARGETS.fixture new file mode 100644 index 0000000000000..f94ce0ac7bd14 --- /dev/null +++ b/tests/core/audit/test_audit_deferred_materializer_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "simple") + +simple( + name = "simple", +) diff --git a/tests/core/audit/test_audit_deferred_materializer_data/defs.bzl b/tests/core/audit/test_audit_deferred_materializer_data/defs.bzl new file mode 100644 index 0000000000000..3666c01273785 --- /dev/null +++ b/tests/core/audit/test_audit_deferred_materializer_data/defs.bzl @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _simple_impl(ctx): + out = ctx.actions.declare_output("out") + + ctx.actions.run( + [ + "python3", + "-c", + "import sys; open(sys.argv[1], 'w')", + out.as_output(), + ], + category = "write", + ) + + return [ + DefaultInfo( + default_output = out, + ), + ] + +simple = rule( + attrs = {}, + impl = _simple_impl, +) diff --git a/tests/core/audit/test_audit_deferred_materializer_data/prelude/prelude.bzl b/tests/core/audit/test_audit_deferred_materializer_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_execution_platform_resolution.py b/tests/core/audit/test_audit_execution_platform_resolution.py new file mode 100644 index 0000000000000..9e2cc212ad3ba --- /dev/null +++ b/tests/core/audit/test_audit_execution_platform_resolution.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden_replace_cfg_hash + + +@buck_test() +async def test_audit_execition_platform_resolution(buck: Buck) -> None: + result = await buck.audit("execution-platform-resolution", "//:target") + golden_replace_cfg_hash(output=result.stdout, rel_path="out.txt.golden") diff --git a/tests/core/audit/test_audit_execution_platform_resolution_data/.buckconfig b/tests/core/audit/test_audit_execution_platform_resolution_data/.buckconfig new file mode 100644 index 0000000000000..1dcc7a75e1ea2 --- /dev/null +++ b/tests/core/audit/test_audit_execution_platform_resolution_data/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] + name = TARGETS.fixture + +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[build] + execution_platforms = //execution_platforms:execution_platforms diff --git a/tests/core/audit/test_audit_execution_platform_resolution_data/.buckroot b/tests/core/audit/test_audit_execution_platform_resolution_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_execution_platform_resolution_data/TARGETS.fixture b/tests/core/audit/test_audit_execution_platform_resolution_data/TARGETS.fixture new file mode 100644 index 0000000000000..fd92e92815a3e --- /dev/null +++ b/tests/core/audit/test_audit_execution_platform_resolution_data/TARGETS.fixture @@ -0,0 +1,9 @@ +platform(name = "p") + +stub( + name = "target", + exec_compatible_with = [ + "//config:minix", + ], + default_target_platform = ":p", +) diff --git a/tests/core/audit/test_audit_execution_platform_resolution_data/config/TARGETS.fixture b/tests/core/audit/test_audit_execution_platform_resolution_data/config/TARGETS.fixture new file mode 100644 index 0000000000000..f4893d5d0b25b --- /dev/null +++ b/tests/core/audit/test_audit_execution_platform_resolution_data/config/TARGETS.fixture @@ -0,0 +1,25 @@ +constraint_setting(name = "os") + +constraint_value( + name = "haiku", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "hurd", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "minix", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "sunos", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) diff --git a/tests/core/audit/test_audit_execution_platform_resolution_data/execution_platforms/TARGETS.fixture b/tests/core/audit/test_audit_execution_platform_resolution_data/execution_platforms/TARGETS.fixture new file mode 100644 index 0000000000000..3e5e052000c67 --- /dev/null +++ b/tests/core/audit/test_audit_execution_platform_resolution_data/execution_platforms/TARGETS.fixture @@ -0,0 +1,16 @@ +load(":defs.bzl", "execution_platform", "execution_platforms") + +execution_platform(name = "haiku", os_configuration = "//config:haiku") +execution_platform(name = "hurd", os_configuration = "//config:hurd") +execution_platform(name = "sunos", os_configuration = "//config:sunos") +execution_platform(name = "minix", os_configuration = "//config:minix") + +execution_platforms( + name = "execution_platforms", + platforms = [ + ":haiku", + ":hurd", + ":minix", + ":sunos", + ], +) diff --git a/tests/core/audit/test_audit_execution_platform_resolution_data/execution_platforms/defs.bzl b/tests/core/audit/test_audit_execution_platform_resolution_data/execution_platforms/defs.bzl new file mode 100644 index 0000000000000..705b5aeea61c5 --- /dev/null +++ b/tests/core/audit/test_audit_execution_platform_resolution_data/execution_platforms/defs.bzl @@ -0,0 +1,36 @@ +# @nolint + +def _execution_platform(ctx): + return [ + DefaultInfo(), + ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ctx.attrs.os_configuration[ConfigurationInfo], + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = False, + ), + ), + ] + +execution_platform = rule( + impl = _execution_platform, + attrs = { + "os_configuration": attrs.dep(providers = [ConfigurationInfo]), + }, +) + +def _execution_platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [p[ExecutionPlatformInfo] for p in ctx.attrs.platforms], + ), + ] + +execution_platforms = rule( + impl = _execution_platforms, + attrs = { + "platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo])), + }, +) \ No newline at end of file diff --git a/tests/core/audit/test_audit_execution_platform_resolution_data/out.txt.golden b/tests/core/audit/test_audit_execution_platform_resolution_data/out.txt.golden new file mode 100644 index 0000000000000..a23e90ddd37b2 --- /dev/null +++ b/tests/core/audit/test_audit_execution_platform_resolution_data/out.txt.golden @@ -0,0 +1,13 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Checking each target against execution platforms defined by root//execution_platforms:execution_platforms +root//:target (root//:p#): + Execution platform: root//execution_platforms:minix + Execution platform configuration: root//execution_platforms:minix# + Execution deps: + Toolchain deps: + Configuration deps: + Skipped root//execution_platforms:haiku + exec_compatible_with requires `root//config:minix` but it was not satisfied + Skipped root//execution_platforms:hurd + exec_compatible_with requires `root//config:minix` but it was not satisfied diff --git a/tests/core/audit/test_audit_includes.py b/tests/core/audit/test_audit_includes.py new file mode 100644 index 0000000000000..db104f82f55f2 --- /dev/null +++ b/tests/core/audit/test_audit_includes.py @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import re +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckResult +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _includes(output: BuckResult) -> List[str]: + return sorted( + [ + re.sub(".*[/\\\\]", "", line) + for line in output.stdout.splitlines() + if line.endswith(".bzl") + ] + ) + + +@buck_test() +async def test_audit_includes(buck: Buck, tmp_path: Path) -> None: + # Using project relative path. + output = await buck.audit("includes", "TARGETS.fixture") + assert _includes(output) == ["incl.bzl", "prelude.bzl"] + + # Using project relative path when in a subdirectory. + await buck.audit("includes", "TARGETS.fixture", rel_cwd=Path("dir")) + assert _includes(output) == ["incl.bzl", "prelude.bzl"] + + # Using absolute path. + output = await buck.audit("includes", f"{buck.cwd}/TARGETS.fixture") + assert _includes(output) == ["incl.bzl", "prelude.bzl"] + + if os.name != "nt": + # Create symlink to the project root in a temporary directory. + (tmp_path / "symlink").symlink_to(buck.cwd) + + output = await buck.audit("includes", f"{tmp_path}/symlink/TARGETS.fixture") + assert _includes(output) == ["incl.bzl", "prelude.bzl"] diff --git a/tests/core/audit/test_audit_includes_data/.buckconfig b/tests/core/audit/test_audit_includes_data/.buckconfig new file mode 100644 index 0000000000000..2955e4491d052 --- /dev/null +++ b/tests/core/audit/test_audit_includes_data/.buckconfig @@ -0,0 +1,8 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + +[repository_aliases] + prelude = root diff --git a/tests/core/audit/test_audit_includes_data/.buckroot b/tests/core/audit/test_audit_includes_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_includes_data/TARGETS.fixture b/tests/core/audit/test_audit_includes_data/TARGETS.fixture new file mode 100644 index 0000000000000..f69e7d054a388 --- /dev/null +++ b/tests/core/audit/test_audit_includes_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load("incl.bzl", "x") + +print(x) # buildifier: disable=print diff --git a/tests/core/audit/test_audit_includes_data/dir/keep-me b/tests/core/audit/test_audit_includes_data/dir/keep-me new file mode 100644 index 0000000000000..648a51a3266e8 --- /dev/null +++ b/tests/core/audit/test_audit_includes_data/dir/keep-me @@ -0,0 +1 @@ +Keep the directory. diff --git a/tests/core/audit/test_audit_includes_data/incl.bzl b/tests/core/audit/test_audit_includes_data/incl.bzl new file mode 100644 index 0000000000000..ff75a93011381 --- /dev/null +++ b/tests/core/audit/test_audit_includes_data/incl.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +x = 1 diff --git a/tests/core/audit/test_audit_includes_data/prelude.bzl b/tests/core/audit/test_audit_includes_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_output.py b/tests/core/audit/test_audit_output.py new file mode 100644 index 0000000000000..c13f76791c689 --- /dev/null +++ b/tests/core/audit/test_audit_output.py @@ -0,0 +1,202 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import platform + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_audit_output_malformed_path(buck: Buck) -> None: + await expect_failure( + buck.audit_output( + "blah", + ), + stderr_regex="Malformed buck-out path", + ) + + +@buck_test() +async def test_audit_output_scratch_path_unsupported(buck: Buck) -> None: + # pick a random target, we just want the config hash + config_hash = await _get_config_hash(buck, "root//:dummy") + await expect_failure( + buck.audit_output( + f"buck-out/v2/tmp/cell1/{config_hash}/path/to/target/__target__/output", + ), + stderr_regex="not supported for audit output", + ) + + +@buck_test() +async def test_audit_output_bxl_unsupported(buck: Buck) -> None: + # pick a random target, we just want the config hash + config_hash = await _get_config_hash(buck, "root//:dummy") + await expect_failure( + buck.audit_output( + f"buck-out/v2/gen-bxl/cell1/{config_hash}/path/to/function.bxl/__function__/output", + ), + stderr_regex="not supported for audit output", + ) + + +@buck_test() +async def test_audit_output_anon_targets_unsupported(buck: Buck) -> None: + # pick a random target, we just want the config hash + config_hash = await _get_config_hash(buck, "root//:dummy") + await expect_failure( + buck.audit_output( + f"buck-out/v2/gen-anon/cell1/{config_hash}/path/to/target/rule_hash/__target__/output", + ), + stderr_regex="not supported for audit output", + ) + + +@buck_test() +async def test_audit_output_invalid_prefix(buck: Buck) -> None: + # invalid prefix (i.e. not gen, gen-anon, gen-bxl, temp, or test) + # pick a random target, we just want the config hash + config_hash = await _get_config_hash(buck, "root//:dummy") + await expect_failure( + buck.audit_output( + f"buck-out/v2/not_gen/cell1/{config_hash}/path/to/target/rule_hash/__target__/output", + ), + stderr_regex="Malformed buck-out path", + ) + + +@buck_test() +async def test_audit_output_nonexistent_cell(buck: Buck) -> None: + # pick a random target, we just want the config hash + config_hash = await _get_config_hash(buck, "root//:dummy") + await expect_failure( + buck.audit_output( + f"buck-out/v2/gen/made_up_cell/{config_hash}/path/to/target/rule_hash/__target__/output", + ), + stderr_regex="unknown cell name", + ) + + +@buck_test() +async def test_audit_output_in_root_directory(buck: Buck) -> None: + target = "root//:dummy" + config_hash = await _get_config_hash(buck, target) + result = await buck.audit_output( + f"buck-out/v2/gen/root/{config_hash}/__dummy__/foo.txt", + "--output-all-attributes", + ) + + action = json.loads(result.stdout) + assert len(action.keys()) == 1 + action_key = list(action.keys())[0] + assert target in action_key + + +@buck_test() +async def test_non_root_cell(buck: Buck) -> None: + target = "cell1//:dummy2" + config_hash = await _get_config_hash(buck, target) + result = await buck.audit_output( + f"buck-out/v2/gen/cell1/{config_hash}/__dummy2__/foo.txt", + "--output-all-attributes", + ) + + action = json.loads(result.stdout) + assert len(action.keys()) == 1 + action_key = list(action.keys())[0] + assert target in action_key + + +@buck_test() +async def test_fixed_target_platform(buck: Buck) -> None: + target_platform = "root//:linux_platform" + target_platforms_arg = f"--target-platforms={target_platform}" + + target = "root//directory:dummy" + config_hash = await _get_config_hash(buck, target, target_platforms_arg) + result = await buck.audit_output( + f"buck-out/v2/gen/root/{config_hash}/directory/__dummy__/foo.txt", + target_platforms_arg, + ) + + action = result.stdout + assert target in action + assert target_platform in action + assert "id" in action + + +@buck_test() +async def test_dynamic_outputs(buck: Buck) -> None: + target = "root//dynamic_output:dynamic_output" + config_hash = await _get_config_hash(buck, target) + + result = await buck.audit_output( + f"buck-out/v2/gen/root/{config_hash}/dynamic_output/__dynamic_output__/bound_dynamic.txt", + ) + action = result.stdout + assert target in action + assert "id" in action + + result = await buck.audit_output( + f"buck-out/v2/gen/root/{config_hash}/dynamic_output/__dynamic_output__/defined_dynamic.txt", + ) + # FIXME(JakobDegen): Why isn't this an error? + assert "Failed to find an action that produced the output path" in result.stdout + + +@buck_test() +async def test_wrong_config_hash(buck: Buck) -> None: + # Should return the unconfigured target label + target_platform = "root//:linux_platform" + target_platforms_arg = f"--target-platforms={target_platform}" + result = await buck.audit_output( + "buck-out/v2/gen/root/wrong_config_hash/directory/__dummy__/foo.txt", + target_platforms_arg, + ) + + output = result.stdout + # unconfigure target label should be in the output + assert "root//directory:dummy" in output + # configuration platform should not be in the output + assert target_platform not in output + assert "Platform configuration" in output + assert "did not match" in output + + +@buck_test() +async def test_output_directory(buck: Buck) -> None: + # Test a rule that outputs to a directory + target = "root//directory:empty_dir" + config_hash = await _get_config_hash(buck, target) + result = await buck.audit_output( + f"buck-out/v2/gen/root/{config_hash}/directory/__empty_dir__/outputdir", + ) + + action = result.stdout + assert target in action + assert "id" in action + + +# TODO(@wendyy) - remove this config hash hack +# Config hash might change, so let's build a target with linux target platform +# and get the current config hash, which is the 4th index in the buck-out path. +async def _get_config_hash(buck: Buck, target: str, *args: str) -> str: + result = await buck.build(target, *args) + delim = "/" + if platform.system() == "Windows": + delim = "\\" + config_hash = str( + result.get_build_report().output_for_target(target, rel_path=True) + ).split(delim)[4] + + return config_hash diff --git a/tests/core/audit/test_audit_output_data/.buckconfig b/tests/core/audit/test_audit_output_data/.buckconfig new file mode 100644 index 0000000000000..a75cad917b64e --- /dev/null +++ b/tests/core/audit/test_audit_output_data/.buckconfig @@ -0,0 +1,13 @@ +[cells] + root = . + cell1 = cell1 + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/audit/test_audit_output_data/.buckroot b/tests/core/audit/test_audit_output_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_output_data/BUCK b/tests/core/audit/test_audit_output_data/BUCK new file mode 100644 index 0000000000000..bfdf355022eae --- /dev/null +++ b/tests/core/audit/test_audit_output_data/BUCK @@ -0,0 +1,9 @@ +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_genrule") + +oncall("build_infra") + +buck_genrule( + name = "dummy", + out = "dummy.txt", + cmd = "echo dummy > $OUT", +) diff --git a/tests/core/audit/test_audit_output_data/TARGETS.fixture b/tests/core/audit/test_audit_output_data/TARGETS.fixture new file mode 100644 index 0000000000000..a7d665ad7fe79 --- /dev/null +++ b/tests/core/audit/test_audit_output_data/TARGETS.fixture @@ -0,0 +1,13 @@ +trivial_build(name = "dummy") + +constraint_setting(name = "os") + +constraint_value( + name = "linux", + constraint_setting = ":os", +) + +platform( + name = "linux_platform", + constraint_values = [":linux"], +) diff --git a/tests/core/audit/test_audit_output_data/cell1/.buckconfig b/tests/core/audit/test_audit_output_data/cell1/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/audit/test_audit_output_data/cell1/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/audit/test_audit_output_data/cell1/TARGETS.fixture b/tests/core/audit/test_audit_output_data/cell1/TARGETS.fixture new file mode 100644 index 0000000000000..f98179bedcda4 --- /dev/null +++ b/tests/core/audit/test_audit_output_data/cell1/TARGETS.fixture @@ -0,0 +1 @@ +trivial_build(name = "dummy2") diff --git a/tests/core/audit/test_audit_output_data/directory/TARGETS.fixture b/tests/core/audit/test_audit_output_data/directory/TARGETS.fixture new file mode 100644 index 0000000000000..41b803e9a3990 --- /dev/null +++ b/tests/core/audit/test_audit_output_data/directory/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "empty_dir") + +empty_dir(name = "empty_dir") + +trivial_build(name = "dummy") diff --git a/tests/core/audit/test_audit_output_data/directory/defs.bzl b/tests/core/audit/test_audit_output_data/directory/defs.bzl new file mode 100644 index 0000000000000..9b7ad4d460598 --- /dev/null +++ b/tests/core/audit/test_audit_output_data/directory/defs.bzl @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.copied_dir("outputdir", {}) + return [DefaultInfo(default_output = out)] + +empty_dir = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/audit/test_audit_output_data/dynamic_output/TARGETS.fixture b/tests/core/audit/test_audit_output_data/dynamic_output/TARGETS.fixture new file mode 100644 index 0000000000000..fd7f326bb5a9f --- /dev/null +++ b/tests/core/audit/test_audit_output_data/dynamic_output/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "dynamic_output") + +dynamic_output(name = "dynamic_output") diff --git a/tests/core/audit/test_audit_output_data/dynamic_output/defs.bzl b/tests/core/audit/test_audit_output_data/dynamic_output/defs.bzl new file mode 100644 index 0000000000000..96f35203e4335 --- /dev/null +++ b/tests/core/audit/test_audit_output_data/dynamic_output/defs.bzl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.declare_output("bound_dynamic.txt") + + def dynamic(ctx, out): + defined_dynamic = ctx.actions.write("defined_dynamic.txt", "abcd") + ctx.actions.copy_file(out.as_output(), defined_dynamic) + + f = lambda ctx, _dyn, outputs: dynamic(ctx, outputs[out]) + + ctx.actions.dynamic_output(dynamic = [], inputs = [], outputs = [out.as_output()], f = f) + + return [DefaultInfo(default_output = out)] + +dynamic_output = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/audit/test_audit_parse.py b/tests/core/audit/test_audit_parse.py new file mode 100644 index 0000000000000..eee33e65df99e --- /dev/null +++ b/tests/core/audit/test_audit_parse.py @@ -0,0 +1,146 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_audit_parse(buck: Buck) -> None: + # random config hash + config_hash = "3f794b0267173c8e" + + # rule + # json + result = await buck.audit( + "parse", + f"buck-out/v2/gen/root/{config_hash}/path/to/target/__target_name__/output", + "--json", + ) + + result = json.loads(result.stdout) + assert result["cell_path"] == "root//path/to/target" + assert result["target_label"] == "root//path/to/target:target_name" + assert result["short_artifact_path"] == "output" + assert result["config_hash"] == config_hash + assert ( + result["full_artifact_path_no_hash"] + == "root/path/to/target/__target_name__/output" + ) + + # not json + result = await buck.audit( + "parse", + f"buck-out/v2/gen/root/{config_hash}/path/to/target/__target_name__/output", + ) + + result = result.stdout.splitlines() + assert result[0] == "root//path/to/target" + assert result[1] == "root//path/to/target:target_name" + assert result[2] == "output" + assert result[3] == config_hash + assert result[4] == "root/path/to/target/__target_name__/output" + + # output attribute + result = await buck.audit( + "parse", + f"buck-out/v2/gen/root/{config_hash}/path/to/target/__target_name__/output", + "--output-attribute", + "config_hash", + "--output-attribute", + "full_artifact_path_no_hash", + ) + + result = result.stdout.splitlines() + assert result[0] == config_hash + assert result[1] == "root/path/to/target/__target_name__/output" + + # output attribute with json + result = await buck.audit( + "parse", + f"buck-out/v2/gen/root/{config_hash}/path/to/target/__target_name__/output", + "--json", + "--output-attribute", + "config_hash", + "--output-attribute", + "full_artifact_path_no_hash", + ) + + result = json.loads(result.stdout) + assert result["config_hash"] == config_hash + assert ( + result["full_artifact_path_no_hash"] + == "root/path/to/target/__target_name__/output" + ) + + # tmp + result = await buck.audit( + "parse", + f"buck-out/v2/tmp/root/{config_hash}/path/to/target/__target_name__/output", + "--json", + ) + + result = json.loads(result.stdout) + assert result["cell_path"] == "root//path/to/target" + assert result["target_label"] == "root//path/to/target:target_name" + assert result["config_hash"] == config_hash + assert ( + result["full_artifact_path_no_hash"] + == "root/path/to/target/__target_name__/output" + ) + + # bxl + result = await buck.audit( + "parse", + f"buck-out/v2/gen-bxl/root/{config_hash}/path/to/function.bxl/__function_name__/output", + "--json", + ) + + result = json.loads(result.stdout) + assert result["bxl_function_label"] == "root//path/to/function.bxl:function_name" + assert result["config_hash"] == config_hash + assert ( + result["full_artifact_path_no_hash"] + == "root/path/to/function.bxl/__function_name__/output" + ) + + # anon + result = await buck.audit( + "parse", + f"buck-out/v2/gen-anon/root/{config_hash}/path/to/target/rule_hash/__target_name__/output", + "--json", + ) + + result = json.loads(result.stdout) + assert result["cell_path"] == "root//path/to/target" + assert result["target_label"] == "root//path/to/target:target_name" + assert result["config_hash"] == config_hash + assert result["attr_hash"] == "rule_hash" + assert ( + result["full_artifact_path_no_hash"] + == "root/path/to/target/rule_hash/__target_name__/output" + ) + + # test + result = await buck.audit( + "parse", + f"buck-out/v2/test/root/{config_hash}/path/to/target/__target_name__/output", + "--json", + ) + + result = json.loads(result.stdout) + assert result["cell_path"] == "root//path/to/target" + assert result["config_hash"] == config_hash + assert ( + result["full_artifact_path_no_hash"] + == "root/path/to/target/__target_name__/output" + ) diff --git a/tests/core/audit/test_audit_parse_data/.buckconfig b/tests/core/audit/test_audit_parse_data/.buckconfig new file mode 100644 index 0000000000000..82ff4e5316342 --- /dev/null +++ b/tests/core/audit/test_audit_parse_data/.buckconfig @@ -0,0 +1,2 @@ +[cells] + root = . diff --git a/tests/core/audit/test_audit_parse_data/.buckroot b/tests/core/audit/test_audit_parse_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_providers.py b/tests/core/audit/test_audit_providers.py new file mode 100644 index 0000000000000..88ae73f0b6cf4 --- /dev/null +++ b/tests/core/audit/test_audit_providers.py @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(data_dir="sorted") +async def test_listed_providers_are_sorted(buck: Buck) -> None: + result = await buck.audit("providers", "//:target", "--list") + + # " - DefaultInfo" -> "DefaultInfo" + providers = [ + line.split("-")[1].strip() + for line in result.stdout.split("\n") + if line.strip().startswith("-") + ] + assert providers == [ + "AlphaInfo", + "DefaultInfo", + "ZetaInfo", + ] + + +@buck_test(data_dir="universe") +async def test_audit_providers_universe(buck: Buck) -> None: + result = await buck.audit("providers", "//:aaa", "--quiet") + assert "root//:aaa (root//:p-aaa#)" == _replace_hash(result.stdout.strip()) + + result = await buck.audit( + "providers", "//:aaa", "--target-universe=//:bbb", "--quiet" + ) + assert "root//:aaa (root//:p-bbb#)" == _replace_hash(result.stdout.strip()) diff --git a/tests/core/audit/test_audit_providers_data/.buckroot b/tests/core/audit/test_audit_providers_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_providers_data/sorted/.buckconfig b/tests/core/audit/test_audit_providers_data/sorted/.buckconfig new file mode 100644 index 0000000000000..2955e4491d052 --- /dev/null +++ b/tests/core/audit/test_audit_providers_data/sorted/.buckconfig @@ -0,0 +1,8 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + +[repository_aliases] + prelude = root diff --git a/tests/core/audit/test_audit_providers_data/sorted/.buckroot b/tests/core/audit/test_audit_providers_data/sorted/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_providers_data/sorted/TARGETS.fixture b/tests/core/audit/test_audit_providers_data/sorted/TARGETS.fixture new file mode 100644 index 0000000000000..5552264e11ebc --- /dev/null +++ b/tests/core/audit/test_audit_providers_data/sorted/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "provider_test_rule") + +provider_test_rule(name = "target") diff --git a/tests/core/audit/test_audit_providers_data/sorted/defs.bzl b/tests/core/audit/test_audit_providers_data/sorted/defs.bzl new file mode 100644 index 0000000000000..1be0d20a685fb --- /dev/null +++ b/tests/core/audit/test_audit_providers_data/sorted/defs.bzl @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +AlphaInfo = provider(fields = []) +ZetaInfo = provider(fields = []) + +def _impl(_ctx): + return [DefaultInfo(), ZetaInfo(), AlphaInfo()] + +# This bzl file cannot be interpreted with Buck1 because there's no `rule` builtin. +provider_test_rule = rule(impl = _impl, attrs = {}) diff --git a/tests/core/audit/test_audit_providers_data/sorted/prelude.bzl b/tests/core/audit/test_audit_providers_data/sorted/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_providers_data/universe/.buckconfig b/tests/core/audit/test_audit_providers_data/universe/.buckconfig new file mode 100644 index 0000000000000..b884d57710a9d --- /dev/null +++ b/tests/core/audit/test_audit_providers_data/universe/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] + name = TARGETS.fixture + +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/audit/test_audit_providers_data/universe/.buckroot b/tests/core/audit/test_audit_providers_data/universe/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_providers_data/universe/TARGETS.fixture b/tests/core/audit/test_audit_providers_data/universe/TARGETS.fixture new file mode 100644 index 0000000000000..d8c9e17620eb2 --- /dev/null +++ b/tests/core/audit/test_audit_providers_data/universe/TARGETS.fixture @@ -0,0 +1,12 @@ +platform(name = "p-aaa") +platform(name = "p-bbb") + +stub( + name = "aaa", + default_target_platform = ":p-aaa", +) +stub( + name = "bbb", + deps = [":aaa"], + default_target_platform = ":p-bbb", +) diff --git a/tests/core/audit/test_audit_subtargets.py b/tests/core/audit/test_audit_subtargets.py new file mode 100644 index 0000000000000..911f4b53df156 --- /dev/null +++ b/tests/core/audit/test_audit_subtargets.py @@ -0,0 +1,86 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_audit_subtargets_basic(buck: Buck) -> None: + result = await buck.audit("subtargets", "//:no_subtargets") + assert result.stdout == "" + + result = await buck.audit("subtargets", "//:foo") + assert [ + "root//:foo[bar] ()", + "root//:foo[baz] ()", + ] == result.stdout.splitlines() + + +@buck_test() +async def test_audit_subtargets_of_subtarget(buck: Buck) -> None: + result = await buck.audit("subtargets", "//:nested[sub1]") + assert [ + "root//:nested[sub1][sub2] ()", + "root//:nested[sub1][sub3] ()", + ] == result.stdout.splitlines() + + result = await buck.audit("subtargets", "//:nested[sub4]") + assert result.stdout == "" + + +@buck_test() +async def test_audit_subtargets_shallow(buck: Buck) -> None: + result = await buck.audit("subtargets", "//:deeply_nested", "--shallow") + assert [ + "root//:deeply_nested[sub1] ()", + "root//:deeply_nested[sub2] ()", + ] == result.stdout.splitlines() + + result = await buck.audit("subtargets", "//:deeply_nested") + assert [ + "root//:deeply_nested[sub1] ()", + "root//:deeply_nested[sub2] ()", + "root//:deeply_nested[sub2][sub3] ()", + "root//:deeply_nested[sub2][sub3][sub4] ()", + "root//:deeply_nested[sub2][sub5] ()", + ] == result.stdout.splitlines() + + result = await buck.audit("subtargets", "//:deeply_nested[sub2]", "--shallow") + assert [ + "root//:deeply_nested[sub2][sub3] ()", + "root//:deeply_nested[sub2][sub5] ()", + ] == result.stdout.splitlines() + + result = await buck.audit("subtargets", "//:deeply_nested[sub2]") + assert [ + "root//:deeply_nested[sub2][sub3] ()", + "root//:deeply_nested[sub2][sub3][sub4] ()", + "root//:deeply_nested[sub2][sub5] ()", + ] == result.stdout.splitlines() + + +@buck_test() +async def test_audit_subtargets_json(buck: Buck) -> None: + result = await buck.audit("subtargets", "//:no_subtargets", "--json") + golden(output=result.stdout, rel_path="json/golden.has_no_subtargets.json") + + result = await buck.audit("subtargets", "//:foo", "--json") + golden(output=result.stdout, rel_path="json/golden.basic.json") + + result = await buck.audit("subtargets", "//:nested[sub1]", "--json") + golden(output=result.stdout, rel_path="json/golden.nested.json") + + result = await buck.audit("subtargets", "//:deeply_nested", "--json") + golden(output=result.stdout, rel_path="json/golden.deeply_nested_basic.json") + + result = await buck.audit("subtargets", "//:deeply_nested[sub2]", "--json") + golden(output=result.stdout, rel_path="json/golden.deeply_nested_subs_of_sub.json") diff --git a/tests/core/audit/test_audit_subtargets_data/.buckconfig b/tests/core/audit/test_audit_subtargets_data/.buckconfig new file mode 100644 index 0000000000000..cb37c990c90cb --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/audit/test_audit_subtargets_data/.buckroot b/tests/core/audit/test_audit_subtargets_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_subtargets_data/TARGETS.fixture b/tests/core/audit/test_audit_subtargets_data/TARGETS.fixture new file mode 100644 index 0000000000000..8338d22f26c16 --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/TARGETS.fixture @@ -0,0 +1,15 @@ +no_subtargets( + name = "no_subtargets", +) + +subtargets( + name = "foo", +) + +nested_subtargets( + name = "nested", +) + +deeply_nested_subtargets( + name = "deeply_nested", +) diff --git a/tests/core/audit/test_audit_subtargets_data/json/golden.basic.json b/tests/core/audit/test_audit_subtargets_data/json/golden.basic.json new file mode 100644 index 0000000000000..b17644f8443af --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/json/golden.basic.json @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:foo ()": { + "bar": {}, + "baz": {} + } +} \ No newline at end of file diff --git a/tests/core/audit/test_audit_subtargets_data/json/golden.deeply_nested_basic.json b/tests/core/audit/test_audit_subtargets_data/json/golden.deeply_nested_basic.json new file mode 100644 index 0000000000000..8bc0253fbf4ca --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/json/golden.deeply_nested_basic.json @@ -0,0 +1,13 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:deeply_nested ()": { + "sub1": {}, + "sub2": { + "sub3": { + "sub4": {} + }, + "sub5": {} + } + } +} \ No newline at end of file diff --git a/tests/core/audit/test_audit_subtargets_data/json/golden.deeply_nested_subs_of_sub.json b/tests/core/audit/test_audit_subtargets_data/json/golden.deeply_nested_subs_of_sub.json new file mode 100644 index 0000000000000..1d2aa4c53fa2a --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/json/golden.deeply_nested_subs_of_sub.json @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:deeply_nested[sub2] ()": { + "sub3": { + "sub4": {} + }, + "sub5": {} + } +} \ No newline at end of file diff --git a/tests/core/audit/test_audit_subtargets_data/json/golden.has_no_subtargets.json b/tests/core/audit/test_audit_subtargets_data/json/golden.has_no_subtargets.json new file mode 100644 index 0000000000000..92f24939c69de --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/json/golden.has_no_subtargets.json @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:no_subtargets ()": {} +} \ No newline at end of file diff --git a/tests/core/audit/test_audit_subtargets_data/json/golden.nested.json b/tests/core/audit/test_audit_subtargets_data/json/golden.nested.json new file mode 100644 index 0000000000000..d29aaa10553c3 --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/json/golden.nested.json @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:nested[sub1] ()": { + "sub2": {}, + "sub3": {} + } +} \ No newline at end of file diff --git a/tests/core/audit/test_audit_subtargets_data/prelude/prelude.bzl b/tests/core/audit/test_audit_subtargets_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..0e9c134624382 --- /dev/null +++ b/tests/core/audit/test_audit_subtargets_data/prelude/prelude.bzl @@ -0,0 +1,62 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _no_subtargets_impl(_ctx): + return [DefaultInfo()] + +no_subtargets = rule( + impl = _no_subtargets_impl, + attrs = {}, +) + +def _subtargets_impl(_ctx): + return [DefaultInfo( + sub_targets = { + "bar": [DefaultInfo()], + "baz": [DefaultInfo()], + }, + )] + +subtargets = rule( + impl = _subtargets_impl, + attrs = {}, +) + +def _nested_subtargets_impl(_ctx): + return [DefaultInfo( + sub_targets = { + "sub1": [DefaultInfo( + sub_targets = { + "sub2": [DefaultInfo()], + "sub3": [DefaultInfo()], + }, + )], + "sub4": [DefaultInfo()], + }, + )] + +nested_subtargets = rule( + impl = _nested_subtargets_impl, + attrs = {}, +) + +def _deeply_nested_subtargets_impl(_ctx): + return [DefaultInfo( + sub_targets = { + "sub1": [DefaultInfo()], + "sub2": [DefaultInfo( + sub_targets = {"sub3": [DefaultInfo( + sub_targets = {"sub4": [DefaultInfo()]}, + )], "sub5": [DefaultInfo()]}, + )], + }, + )] + +deeply_nested_subtargets = rule( + impl = _deeply_nested_subtargets_impl, + attrs = {}, +) diff --git a/tests/core/audit/test_audit_visibility.py b/tests/core/audit/test_audit_visibility.py new file mode 100644 index 0000000000000..5356d7634c4a4 --- /dev/null +++ b/tests/core/audit/test_audit_visibility.py @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import pytest +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +@pytest.mark.parametrize( + "rule, passes", + [ + ("self//:pass1", True), + ("self//:pass2", True), + ("self//:pass3", True), + ("self//:pass4", True), + ("self//:fail1", False), + ("self//:fail2", False), + ("self//:fail3", False), + ("self//:fail4", False), + ("self//:fail5", False), + ("self//:fail6", False), + ], +) +async def test_audit_visibility(buck: Buck, rule: str, passes: bool) -> None: + if passes: + out = await buck.audit_visibility(rule) + assert out.stdout == "" + else: + await expect_failure( + buck.audit_visibility(rule), + stderr_regex=f"not visible to `{rule}`", + ) diff --git a/tests/core/audit/test_audit_visibility_data/.buckconfig b/tests/core/audit/test_audit_visibility_data/.buckconfig new file mode 100644 index 0000000000000..a061c298f1fc3 --- /dev/null +++ b/tests/core/audit/test_audit_visibility_data/.buckconfig @@ -0,0 +1,6 @@ +[cells] + self = . + prelude = prelude + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/audit/test_audit_visibility_data/.buckroot b/tests/core/audit/test_audit_visibility_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/audit/test_audit_visibility_data/TARGETS.fixture b/tests/core/audit/test_audit_visibility_data/TARGETS.fixture new file mode 100644 index 0000000000000..33bc5a4bf2e35 --- /dev/null +++ b/tests/core/audit/test_audit_visibility_data/TARGETS.fixture @@ -0,0 +1,49 @@ +foo_target( + name = "pass1", + actual = "//subdir:public", +) + +foo_target( + name = "pass2", + actual = "//subdir:target", +) + +foo_target( + name = "pass3", + actual = "//subdir:package", +) + +foo_target( + name = "pass4", + actual = "//subdir:recursive", +) + +foo_target( + name = "fail1", + actual = "//subdir:badtarget", +) + +foo_target( + name = "fail2", + actual = "//subdir:badpackage", +) + +foo_target( + name = "fail3", + actual = "//subdir:badrecursive", +) + +foo_target( + name = "fail4", + actual = "//subdir:default", +) + +foo_target( + name = "fail5", + actual = "//subdir:badvisibility", +) + +foo_target( + name = "fail6", + actual = "//subdir:badtransitivevisibility", +) diff --git a/tests/core/audit/test_audit_visibility_data/prelude/prelude.bzl b/tests/core/audit/test_audit_visibility_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..f5106d8ec2bce --- /dev/null +++ b/tests/core/audit/test_audit_visibility_data/prelude/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +foo_target = rule( + impl = _impl, + attrs = { + "actual": attrs.option(attrs.dep(), default = None), + }, +) diff --git a/tests/core/audit/test_audit_visibility_data/subdir/TARGETS.fixture b/tests/core/audit/test_audit_visibility_data/subdir/TARGETS.fixture new file mode 100644 index 0000000000000..98838ba481d3d --- /dev/null +++ b/tests/core/audit/test_audit_visibility_data/subdir/TARGETS.fixture @@ -0,0 +1,49 @@ +foo_target( + name = "public", + visibility = ["PUBLIC"], +) + +foo_target( + name = "default", +) + +foo_target( + name = "target", + visibility = ["//:pass2"], +) + +foo_target( + name = "package", + visibility = ["//:"], +) + +foo_target( + name = "recursive", + visibility = ["//..."], +) + +foo_target( + name = "badtarget", + visibility = ["//:nothing"], +) + +foo_target( + name = "badpackage", + # TODO(cjhopman): This should check a package that's a prefix of the dependent, but buck2 doesn't handle that correctly currently. + visibility = ["//buck2:"], +) + +foo_target( + name = "badrecursive", + visibility = ["//buck2/..."], +) + +foo_target( + name = "badvisibility", + visibility = ["//:pass2"], +) + +foo_target( + name = "badtransitivevisibility", + visibility = ["//subdir:badvisibility"], +) diff --git a/tests/core/build/BUCK b/tests/core/build/BUCK new file mode 100644 index 0000000000000..3050468de984b --- /dev/null +++ b/tests/core/build/BUCK @@ -0,0 +1,240 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_build_output_file_hashes", + srcs = ["test_build_output_file_hashes.py"], + data_dir = "test_build_output_file_hashes_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_build_skip_incompatible_targets", + srcs = ["test_build_skip_incompatible_targets.py"], + data_dir = "test_build_skip_incompatible_targets_data", +) + +buck2_e2e_test( + name = "test_build_root_executable", + srcs = ["test_build_root_executable.py"], + data_dir = "test_build_root_executable_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_build_configured", + srcs = ["test_build_configured.py"], + data_dir = "test_build_configured_data", +) + +buck2_e2e_test( + name = "test_error_categorization", + srcs = ["test_error_categorization.py"], + data_dir = "test_error_categorization_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_modify", + srcs = ["test_modify.py"], + data_dir = "test_modify_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_output_cleanup", + srcs = ["test_output_cleanup.py"], + data_dir = "test_output_cleanup_data", +) + +buck2_e2e_test( + name = "test_paranoid", + srcs = ["test_paranoid.py"], + data = "//buck2/tests/targets:isolated_targets", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_skip_missing", + srcs = ["test_skip_missing.py"], + data_dir = "test_skip_missing_data", +) + +buck2_e2e_test( + name = "test_plugins", + srcs = ["test_plugins.py"], + data_dir = "test_plugins_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_uncategorized", + srcs = ["test_uncategorized.py"], + data_dir = "test_uncategorized_data", + env = { + "RECLI": "$(location fbsource//xplat/remote_execution/dotslash:recli)", + }, + serialize_test_cases = False, + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_universe", + srcs = ["test_universe.py"], + data_dir = "test_universe_data", +) + +buck2_e2e_test( + name = "test_hash_all_commands", + srcs = ["test_hash_all_commands.py"], + data_dir = "test_hash_all_commands_data", + # These tests heavily depend on watchman, which is flakey on non-Linux systems + skip_for_os = [ + "darwin", + "windows", + ], + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_dep_files", + srcs = ["test_dep_files.py"], + data_dir = "test_dep_files_data", + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_target_aliases", + srcs = ["test_target_aliases.py"], + data_dir = "test_target_aliases_data", +) + +buck2_e2e_test( + name = "test_symlinks", + srcs = ["test_symlinks.py"], + data_dir = "test_symlinks_data", + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_out_flag", + srcs = ["test_out_flag.py"], + data_dir = "test_out_flag_data", +) + +buck2_e2e_test( + name = "test_nested_subtargets", + srcs = ["test_nested_subtargets.py"], + data_dir = "test_nested_subtargets_data", +) + +buck2_e2e_test( + name = "test_build_report", + srcs = ["test_build_report.py"], + data_dir = "test_build_report_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_build_report_errors", + srcs = ["test_build_report_errors.py"], + data_dir = "test_build_report_errors_data", + deps = [ + "fbcode//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_build_id_env_var", + srcs = ["test_build_id_env_var.py"], + data_dir = "test_build_id_env_var_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_cancellation", + srcs = ["test_cancellation.py"], + data_dir = "test_cancellation_data", + skip_for_os = ["windows"], +) + +buck2_e2e_test( + name = "test_critical_path", + srcs = ["test_critical_path.py"], + data_dir = "test_critical_path_data", + deps = [ + "//buck2/tests/e2e_util:golden", + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_build_response", + srcs = ["test_build_response.py"], + data_dir = "test_build_response_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_build_rule_type_name_logging", + srcs = ["test_build_rule_type_name_logging.py"], + data_dir = "test_build_rule_type_name_logging_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_executor_with_dependencies", + srcs = ["test_executor_with_dependencies.py"], + data_dir = "test_executor_with_dependencies_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_action_error_handler_types", + srcs = ["test_action_error_handler_types.py"], + data_dir = "test_action_error_handler_types_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_build_system_info", + srcs = ["test_build_system_info.py"], + data_dir = "test_build_system_info_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_remote_execution", + srcs = ["test_remote_execution.py"], + data_dir = "test_remote_execution_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/core/build/actions/BUCK b/tests/core/build/actions/BUCK new file mode 100644 index 0000000000000..ee25a5c4ff66e --- /dev/null +++ b/tests/core/build/actions/BUCK @@ -0,0 +1,54 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_actions", + srcs = ["test_actions.py"], + data_dir = "test_actions_data", + env = { + # `cpe::x2p` resolves `localhost` incorrectly, and test fails. + "CPE_RUST_X2P_SUPPORTS_VPNLESS": "0", + }, + serialize_test_cases = False, + deps = [ + "fbcode//buck2/tests/e2e_util:assert_occurrences", + "fbcode//buck2/tests/e2e_util:utils", + "fbsource//third-party/pypi/aiohttp:aiohttp", + ], +) + +buck2_e2e_test( + name = "test_dynamic_value", + srcs = ["test_dynamic_value.py"], + data_dir = "test_dynamic_value_data", +) + +buck2_e2e_test( + name = "test_dynamic_output", + srcs = ["test_dynamic_output.py"], + data_dir = "test_dynamic_output_data", +) + +buck2_e2e_test( + name = "test_output_artifact_twice", + srcs = ["test_output_artifact_twice.py"], + data_dir = "test_output_artifact_twice_data", +) + +buck2_e2e_test( + name = "test_projected_output_artifact", + srcs = ["test_projected_output_artifact.py"], + data_dir = "test_projected_output_artifact_data", +) + +buck2_e2e_test( + name = "test_write", + srcs = ["test_write.py"], + data_dir = "test_write_data", + serialize_test_cases = False, + deps = [ + "fbcode//buck2/tests/e2e_util:assert_occurrences", + "fbcode//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/core/build/actions/test_actions.py b/tests/core/build/actions/test_actions.py new file mode 100644 index 0000000000000..65c2b0a9007b8 --- /dev/null +++ b/tests/core/build/actions/test_actions.py @@ -0,0 +1,458 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import hashlib +import json +import os +import platform +import socket +from pathlib import Path + +from aiohttp import web +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +from buck2.tests.e2e_util.helper.utils import filter_events + + +@buck_test(data_dir="actions") +async def test_write_json(buck: Buck) -> None: + result = await buck.build("//write_json:", "-c", "write_json.content=default") + + build_report = result.get_build_report() + output = build_report.output_for_target("//write_json:absolute") + for path in json.loads(output.read_text()): + assert os.path.isabs(path), path + + # we need to test that with_inputs properly flows input dependencies through to consumers + await buck.build("//write_json:with_inputs", "-c", "write_json.content=other") + + +@buck_test(data_dir="actions") +async def test_copies_files(buck: Buck) -> None: + result = await buck.build( + "//copy:file_uses_declared_output", + "//copy:file_uses_declared_output_as_output", + "//copy:file_declares_output", + ) + build_report = result.get_build_report() + + output = build_report.output_for_target("//copy:file_uses_declared_output") + assert output.read_text().rstrip() == "some file" + + output = build_report.output_for_target( + "//copy:file_uses_declared_output_as_output" + ) + assert output.read_text().rstrip() == "some file" + + output = build_report.output_for_target("//copy:file_declares_output") + assert output.read_text().rstrip() == "some file" + + await expect_failure( + buck.build("//copy:fails_on_invalid_src"), + stderr_regex="Type of parameter `src`", + ) + + await expect_failure( + buck.build("//copy:fails_on_invalid_dest"), + stderr_regex="Type of parameter `dest`", + ) + + +@buck_test(data_dir="actions") +async def test_symlink_dir(buck: Buck) -> None: + result = await buck.build("//symlinked_dir:") + build_report = result.get_build_report() + output = build_report.output_for_target("//symlinked_dir:out") + + dest1 = output / "dir1" / "dir1_1" / "file1.txt" + dest2 = output / "dep.txt" + dest3 = output / "subdir" / "dir1" / "dir1_1" / "file1.txt.suffix" + dest4 = output / "subdir" / "dep.txt.suffix" + + # Example subdir: buck-out/v2/gen/root/a59b783ba97fcd85891ddb2e62fbfebb/symlinked_dir/__out__/out/dir1/dir1_1 + expected_link1 = "../" * 10 + "symlinked_dir/dir1/dir1_1/file1.txt" + expected_link2 = "../../__dep__/dep.txt" + expected_link3 = "../" * 11 + "symlinked_dir/dir1/dir1_1/file1.txt" + expected_link4 = "../../../__dep__/dep.txt" + + if platform.system() == "Windows": + # In Windows, we convert all symlinks to be absolute and mostly canonical + def get_canonicalized_for_windows(dest: Path, relative_link: str) -> str: + return "\\\\?\\" + os.path.realpath(dest.parent / relative_link) + + expected_link1 = get_canonicalized_for_windows(dest1, expected_link1) + expected_link2 = get_canonicalized_for_windows(dest2, expected_link2) + expected_link3 = get_canonicalized_for_windows(dest3, expected_link3) + expected_link4 = get_canonicalized_for_windows(dest4, expected_link4) + + assert dest1.is_symlink() + assert dest2.is_symlink() + assert dest3.is_symlink() + assert dest4.is_symlink() + + assert os.readlink(dest1) == expected_link1 + assert os.readlink(dest2) == expected_link2 + assert os.readlink(dest3) == expected_link3 + assert os.readlink(dest4) == expected_link4 + + assert dest1.read_text().strip() == "dir1_1 out contents" + assert dest2.read_text().strip() == "dep contents" + assert dest3.read_text().strip() == "dir1_1 out contents" + assert dest4.read_text().strip() == "dep contents" + + +@buck_test(data_dir="actions") +async def test_simple_run(buck: Buck) -> None: + result = await buck.build("//run:runs_simple_script") + output = result.get_build_report().output_for_target("//run:runs_simple_script") + if platform.system() == "Windows": + assert output.read_text() == "foo\nrun\\src.txt\nbar\n" + else: + assert output.read_text() == "foo\nrun/src.txt\nbar\n" + + result = await buck.build("//run:runs_simple_script_as_exe") + output = result.get_build_report().output_for_target( + "//run:runs_simple_script_as_exe" + ) + if platform.system() == "Windows": + assert output.read_text() == "foo\nrun\\src.txt\nbar\n" + else: + assert output.read_text() == "foo\nrun/src.txt\nbar\n" + + result = await buck.build("//run:runs_script_locally") + output = result.get_build_report().output_for_target("//run:runs_script_locally") + assert output.read_text().strip() == socket.gethostname() + + result = await buck.build("//run:runs_script_locally_outputs_symlink") + output = result.get_build_report().output_for_target( + "//run:runs_script_locally_outputs_symlink" + ) + assert output.is_symlink() + + await expect_failure( + buck.build("//run:rejects_zero_outputs"), + stderr_regex="expected at least one output artifact", + ) + + await expect_failure( + buck.build("//run:rejects_bad_args"), + stderr_regex="Type of parameter `arguments` doesn't match", + ) + + +@buck_test(data_dir="actions") +async def test_anon_targets(buck: Buck) -> None: + await buck.build("//anon:") + + await expect_failure( + buck.build("//anon_invalid_defaults/source:default_source_fails"), + stderr_regex="Anon targets do not support default values for `attrs.source\\(\\)`", + ) + + await expect_failure( + buck.build("//anon_invalid_defaults/dep:default_dep_fails"), + stderr_regex="Anon targets do not support default values for `attrs.dep\\(\\)`", + ) + + await expect_failure( + buck.build("//anon_invalid_defaults/arg:default_arg_fails"), + stderr_regex="Anon targets do not support default values for `attrs.arg\\(\\)`", + ) + + await expect_failure( + buck.build("//anon_invalid_defaults/arg:arg_not_compatible"), + stderr_regex="Arg attribute must have `anon_target_compatible` set to `True`", + ) + + await expect_failure( + buck.build("//anon_invalid_defaults/promise_artifact:bad_short_path"), + stderr_regex="assert_short_path\\(\\) was called with `short_path = WRONG_PATH`", + ) + + await expect_failure( + buck.build("//anon_invalid_defaults/anon_rule:bad_anon_rule"), + stderr_regex="Attr type `attrs.plugin_dep\\(\\)` is not supported for anon rules", + ) + + +@buck_test(data_dir="actions") +async def test_download_file(buck: Buck) -> None: + routes = web.RouteTableDef() + + attempt = 0 + body: bytes = b"foobar" + sha1 = hashlib.sha1(body).hexdigest() + + @routes.get("/") + async def hello(request: web.Request) -> web.Response: + nonlocal attempt + attempt += 1 + if attempt > 2: + return web.Response(body=body) + if attempt > 1: + return web.Response(status=500) + return web.Response(status=429) + + app = web.Application() + app.add_routes(routes) + + sock = socket.socket() + sock.bind(("localhost", 0)) + + runner = web.AppRunner(app) + await runner.setup() + site = web.SockSite(runner, sock) + await site.start() + + port = sock.getsockname()[1] + url = f"http://localhost:{port}" + await buck.build( + "//download_file:", "-c", f"test.sha1={sha1}", "-c", f"test.url={url}" + ) + + await runner.cleanup() + + assert attempt == 3 + + +@buck_test(data_dir="actions") +async def test_download_file_timeout_after_retries(buck: Buck) -> None: + routes = web.RouteTableDef() + + body: bytes = b"foobar" + sha1 = hashlib.sha1(body).hexdigest() + + @routes.get("/always_times_out") + async def always_times_out(request: web.Request) -> web.Response: + await asyncio.sleep(3) + return web.Response(body=body) + + attempt = 0 + + @routes.get("/times_out_twice") + async def times_out_twice(request: web.Request) -> web.Response: + nonlocal attempt + attempt += 1 + if attempt > 2: + return web.Response(body=body) + await asyncio.sleep(3) + return web.Response(body=body) + + app = web.Application() + app.add_routes(routes) + + sock = socket.socket() + sock.bind(("localhost", 0)) + + runner = web.AppRunner(app) + await runner.setup() + site = web.SockSite(runner, sock) + await site.start() + + port = sock.getsockname()[1] + url = f"http://localhost:{port}" + + # These are daemon startup configs, need these to be written in a buckconfig rather + # than passed as an invocation config. + # + # Add an aggressive read timeout. + with open(buck.cwd / ".buckconfig", "a") as buckconfig: + buckconfig.write("[http]\nread_timeout_ms = 50\n") + + await expect_failure( + buck.build( + "//download_file:", + "-c", + f"test.sha1={sha1}", + "-c", + f"test.url={url}/always_times_out", + ), + stderr_regex="Timed out while making request to", + ) + + result = await buck.build( + "//download_file:", + "-c", + f"test.sha1={sha1}", + "-c", + f"test.url={url}/times_out_twice", + ) + assert "Retrying a HTTP error after" in result.stderr + + await runner.cleanup() + + +@buck_test(data_dir="actions") +async def test_cas_artifact(buck: Buck) -> None: + # The digests in `//cas_artifact:` require the buckconfig. + # NB: cannot use `extra_buck_config` attrib of `@buck_test()`` + with open(buck.cwd / ".buckconfig", "a") as buckconfig: + buckconfig.write("[buck2]\n") + buckconfig.write("digest_algorithms = BLAKE3-KEYED,SHA1\n") + + result = await buck.build("//cas_artifact:") + + empty = result.get_build_report().output_for_target("//cas_artifact:empty") + assert empty.read_text() == "" + + tree = result.get_build_report().output_for_target("//cas_artifact:tree") + assert list(tree.iterdir()) == [tree / "b"] + assert (tree / "b").read_text() == "b\n" + + tree = result.get_build_report().output_for_target("//cas_artifact:dir") + assert list(tree.iterdir()) == [tree / "y"] + assert (tree / "y").read_text() == "hi\n" + + +@buck_test(data_dir="actions") +async def test_invalid_command(buck: Buck) -> None: + await expect_failure( + buck.build("//run_bad:run_invalid_command_local"), + stderr_regex="non-zero exit code.*no exit code", + ) + await expect_failure( + buck.build("//run_bad:run_invalid_command_remote"), + stderr_regex="non-zero exit code", + ) + + +@buck_test(data_dir="actions") +async def test_exit_code(buck: Buck) -> None: + await expect_failure( + buck.build("//run_bad:run_odd_exit_code"), + stderr_regex="non-zero exit code 45", + ) + # Linux does not allow negative exit codes + if platform.system() == "Windows": + await expect_failure( + buck.build("//run_bad:run_negative_exit_code"), + stderr_regex="non-zero exit code -65", + ) + + +@buck_test(data_dir="actions") +async def test_artifact_cycle(buck: Buck) -> None: + await expect_failure( + buck.build("//run_invalid:artifact_cycle"), + stderr_regex="Recursion limit exceeded", + ) + + +@buck_test(data_dir="actions") +async def test_associated_artifacts(buck: Buck) -> None: + await buck.build("//associated_artifacts:check") + + +@buck_test(data_dir="actions") +async def test_failure_has_wall_time(buck: Buck) -> None: + await expect_failure( + buck.build("//run_bad:run_odd_exit_code"), + stderr_regex="non-zero exit code 45", + ) + + wall_time = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "wall_time_us", + ) + + assert wall_time + for time in wall_time: + assert time > 0 + + +@buck_test(data_dir="actions") +async def test_local_action_has_input_size(buck: Buck) -> None: + await buck.build("//run:runs_script_locally") + input_size = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "input_files_bytes", + ) + + assert input_size + + if platform.system() == "Windows": + assert input_size[0] == 370 + else: + assert input_size[0] == 342 + + +@buck_test(data_dir="actions") +async def test_remote_action_has_input_size(buck: Buck) -> None: + await buck.build("//run:runs_simple_script_remote") + input_size = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "input_files_bytes", + ) + + assert input_size + + if platform.system() == "Windows": + assert input_size[0] == 370 + else: + assert input_size[0] == 342 + + +@buck_test(data_dir="actions") +async def test_action_invalidation_tracking(buck: Buck) -> None: + + with open(buck.cwd / ".buckconfig", "a") as buckconfig: + buckconfig.write("[buck2]\n") + buckconfig.write("invalidation_tracking_enabled = true\n") + buckconfig.write("[buck2]\n") + buckconfig.write("invalidation_tracking_enabled = true\n") + + await buck.build("//run:runs_simple_script") + invalidation_info = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "invalidation_info", + ) + + assert invalidation_info + assert invalidation_info[0]["changed_file"] is None + + with open(buck.cwd / "run" / "src.txt", "a") as srcfile: + srcfile.write("more data\n") + + await buck.build("//run:runs_simple_script") + invalidation_info = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "invalidation_info", + ) + + assert invalidation_info + assert invalidation_info[0]["changed_file"] == {} diff --git a/tests/core/build/actions/test_actions_data/.buckroot b/tests/core/build/actions/test_actions_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_actions_data/actions/.buckconfig b/tests/core/build/actions/test_actions_data/actions/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/build/actions/test_actions_data/actions/.buckroot b/tests/core/build/actions/test_actions_data/actions/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_actions_data/actions/anon/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/anon/TARGETS.fixture new file mode 100644 index 0000000000000..707857b3befdc --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/TARGETS.fixture @@ -0,0 +1,17 @@ +load(":as_artifact.bzl", "as_artifact_test") +load(":attributes.bzl", "attributes_test") +load(":bigint.bzl", "bigint_test") +load(":build.bzl", "build_test") +load(":recursive.bzl", "recursive_test") +load(":shared.bzl", "shared_test") +load(":subtarget.bzl", "subtarget_test") +load(":types.bzl", "types_test") + +build_test() +bigint_test() +shared_test() +attributes_test() +recursive_test() +subtarget_test() +as_artifact_test() +types_test() diff --git a/tests/core/build/actions/test_actions_data/actions/anon/as_artifact.bzl b/tests/core/build/actions/test_actions_data/actions/anon/as_artifact.bzl new file mode 100644 index 0000000000000..6f6f07d1fedbd --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/as_artifact.bzl @@ -0,0 +1,175 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A very basic test that two things can share a single anon target + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +HelloInfo = provider(fields = ["output"]) + +def _builder_impl(ctx: AnalysisContext) -> list[Provider]: + hello = ctx.actions.write("dir/hello.out", "hello") + return [DefaultInfo(), HelloInfo(output = hello)] + +_builder = anon_rule( + impl = _builder_impl, + attrs = {}, + artifact_promise_mappings = { + "artifact": lambda x: x[HelloInfo].output, + }, +) + +def _build_impl(ctx: AnalysisContext) -> list[Provider]: + anon_target = ctx.actions.anon_target(_builder, {}) + artifact = anon_target.artifact("artifact") + artifact_from_dict = anon_target.artifacts()["artifact"] + _assert_eq(artifact, artifact_from_dict) + out = ctx.actions.declare_output("output") + ctx.actions.run(["cp", artifact, out.as_output()], category = "cp") + return [DefaultInfo(default_output = out)] + +_build = rule(impl = _build_impl, attrs = {}) + +def _check_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + _assert_eq(artifacts[ctx.attrs.src].read_string(), "hello") + ctx.actions.write(outputs[out], "") + + ctx.actions.dynamic_output(dynamic = [ctx.attrs.src], inputs = [], outputs = [out.as_output()], f = f) + return [DefaultInfo(default_output = out)] + +_check = rule(impl = _check_impl, attrs = {"src": attrs.source()}) + +def _short_path_impl(ctx: AnalysisContext) -> list[Provider]: + artifact = ctx.actions.anon_target(_builder, {}).artifact("artifact") + artifact_with_path = ctx.actions.assert_short_path(artifact, short_path = "dir/hello.out") + _assert_eq(artifact_with_path.short_path, "dir/hello.out") + _assert_eq(artifact_with_path.basename, "hello.out") + _assert_eq(artifact_with_path.extension, ".out") + return [DefaultInfo()] + +_short_path = rule(impl = _short_path_impl, attrs = {}) + +# Test symlinked dir can accept a promise artifact + +def _check_symlink_files_impl(ctx): + artifact = ctx.actions.anon_target(_builder, {}).artifact("artifact") + srcs = {"hello": artifact} + + out = ctx.actions.symlinked_dir("out", srcs) + return [DefaultInfo(default_output = out)] + +_check_symlink_files = rule( + impl = _check_symlink_files_impl, + attrs = {}, +) + +# Test copy can accept a promise artifact + +def _check_copy_impl(ctx): + artifact = ctx.actions.anon_target(_builder, {}).artifact("artifact") + out = ctx.actions.copy_file("copied", artifact) + return [DefaultInfo(default_output = out)] + +_check_copy = rule( + impl = _check_copy_impl, + attrs = {}, +) + +# Test passing in promise artifact to default_outputs + +def _default_output_impl(ctx: AnalysisContext) -> list[Provider]: + artifact = ctx.actions.anon_target(_builder, {}).artifact("artifact") + artifact_with_path = ctx.actions.assert_short_path(artifact, short_path = "dir/hello.out") + _assert_eq(artifact_with_path.short_path, "dir/hello.out") + return [DefaultInfo(default_outputs = [artifact])] + +_default_output = rule(impl = _default_output_impl, attrs = {}) + +def _check_default_output_impl(ctx: AnalysisContext) -> list[Provider]: + _assert_eq(type(ctx.attrs.src), "promise_artifact") + _assert_eq(ctx.attrs.src.short_path, "dir/hello.out") + + def check_is_artifact(_artifact: Artifact): + pass + + check_is_artifact(ctx.attrs.src) + + return [DefaultInfo()] + +_check_default_output = rule(impl = _check_default_output_impl, attrs = {"src": attrs.source()}) + +# Test promise artifacts when calling ctx.actions.anon_targets() + +def _anon_rule_impl(ctx: AnalysisContext) -> list[Provider]: + hello = ctx.actions.write("dir/hello.out", ctx.attrs.my_content) + return [DefaultInfo(), HelloInfo(output = hello)] + +_anon_rule1 = anon_rule( + impl = _anon_rule_impl, + attrs = { + "my_content": attrs.string(default = "content1"), + }, + artifact_promise_mappings = { + "artifact": lambda x: x[HelloInfo].output, + }, +) + +_anon_rule2 = anon_rule( + impl = _anon_rule_impl, + attrs = { + "my_content": attrs.string(default = "content2"), + }, + artifact_promise_mappings = { + "artifact": lambda x: x[HelloInfo].output, + }, +) + +def _build_multiple_impl(ctx: AnalysisContext) -> list[Provider]: + all_targets = ctx.actions.anon_targets([(_anon_rule1, {}), (_anon_rule2, {})]) + artifact1 = all_targets.anon_targets[0].artifact("artifact") + artifact2 = all_targets.anon_targets[1].artifact("artifact") + promise = all_targets.promise + _assert_eq(type(promise), "promise") + out1 = ctx.actions.declare_output("output1") + ctx.actions.run(["cp", artifact1, out1.as_output()], category = "cp", identifier = "cp1") + out2 = ctx.actions.declare_output("output2") + ctx.actions.run(["cp", artifact2, out2.as_output()], category = "cp", identifier = "cp2") + return [DefaultInfo(default_outputs = [out1, out2])] + +_build_multiple = rule(impl = _build_multiple_impl, attrs = {}) + +def _check_multiple_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + _assert_eq(artifacts[ctx.attrs.src[0]].read_string(), "content1") + _assert_eq(artifacts[ctx.attrs.src[1]].read_string(), "content2") + ctx.actions.write(outputs[out], "") + + ctx.actions.dynamic_output(dynamic = ctx.attrs.src, inputs = [], outputs = [out.as_output()], f = f) + return [DefaultInfo(default_output = out)] + +_check_multiple = rule(impl = _check_multiple_impl, attrs = {"src": attrs.list(attrs.source())}) + +def as_artifact_test(): + _build(name = "as_artifact_build") + _check(name = "as_artifact_check", src = ":as_artifact_build") + + _short_path(name = "as_artifact_short_path") + _check_symlink_files(name = "symlinked_dir_with_promise_artifact") + _check_copy(name = "copy_with_promise_artifact") + + _default_output(name = "default_output_with_promise_artifact") + _check_default_output(name = "default_output_with_promise_artifact_check", src = ":default_output_with_promise_artifact") + + _build_multiple(name = "as_artifact_build_multiple") + _check_multiple(name = "as_artifact_check_multiple", src = [":as_artifact_build_multiple"]) diff --git a/tests/core/build/actions/test_actions_data/actions/anon/attributes.bzl b/tests/core/build/actions/test_actions_data/actions/anon/attributes.bzl new file mode 100644 index 0000000000000..ecc3b52d4a577 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/attributes.bzl @@ -0,0 +1,251 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A test of various types of attribute + +MirrorInfo = provider(fields = ["info"]) + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +# Test primitives + +def _mirror_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +_mirror = rule(impl = _mirror_impl, attrs = { + "defaulted": attrs.string(default = "a-default"), + "enum": attrs.enum(["red", "green", "blue"]), + "false": attrs.bool(), + "int": attrs.int(), + "list_string": attrs.list(attrs.string()), + "string": attrs.string(), + "true": attrs.bool(), +}) + +def _simple_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.true, True) + _assert_eq(res.false, False) + _assert_eq(res.int, 42) + _assert_eq(res.string, "a-string") + _assert_eq(res.list_string, ["a", "b", "c"]) + _assert_eq(res.defaulted, "a-default") + _assert_eq(res.enum, "red") + return [DefaultInfo()] + + at = { + "enum": "red", + "false": False, + "int": 42, + "list_string": ["a", "b", "c"], + "name": ctx.label, + "string": "a-string", + "true": True, + } + return ctx.actions.anon_target(_mirror, at).promise.map(f) + +_simple = rule(impl = _simple_impl, attrs = {}) + +# Test dep + +_mirror2 = rule(impl = _mirror_impl, attrs = { + "dep": attrs.dep(), +}) + +def _complex_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.dep[DefaultInfo].default_outputs[0].short_path, "my_short_path") + return [DefaultInfo()] + + return ctx.actions.anon_target(_mirror2, {"dep": ctx.attrs.dep}).promise.map(f) + +_complex = rule(impl = _complex_impl, attrs = { + "dep": attrs.dep(default = "//anon:attributes_complex_source"), +}) + +# Test collections + +_mirror3 = rule(impl = _mirror_impl, attrs = { + "deps": attrs.list(attrs.dep()), + "dict": attrs.dict(key = attrs.string(), value = attrs.dep()), + "one_of": attrs.one_of(attrs.dep(), attrs.bool()), + "set": attrs.set(attrs.dep()), + "tuple": attrs.tuple(attrs.dep(), attrs.string()), +}) + +def _complex_collection_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.deps[0][DefaultInfo].default_outputs[0].short_path, "my_short_path") + _assert_eq(res.tuple[0][DefaultInfo].default_outputs[0].short_path, "my_short_path") + _assert_eq(res.tuple[1], "my_string") + _assert_eq(res.dict["my_key"][DefaultInfo].default_outputs[0].short_path, "my_short_path") + _assert_eq(res.one_of[DefaultInfo].default_outputs[0].short_path, "my_short_path") + _assert_eq(res.set[0][DefaultInfo].default_outputs[0].short_path, "my_short_path") + return [DefaultInfo()] + + return ctx.actions.anon_target(_mirror3, { + "deps": ctx.attrs.deps, + "dict": ctx.attrs.dict, + "one_of": ctx.attrs.one_of, + "set": ctx.attrs.set, + "tuple": ctx.attrs.tuple, + }).promise.map(f) + +_complex_collection = rule(impl = _complex_collection_impl, attrs = { + "deps": attrs.list(attrs.dep(), default = ["//anon:attributes_complex_source"]), + "dict": attrs.dict(key = attrs.string(), value = attrs.dep(), default = {"my_key": "//anon:attributes_complex_source"}), + "one_of": attrs.one_of(attrs.dep(), attrs.bool(), default = "//anon:attributes_complex_source"), + "set": attrs.set(attrs.dep(), default = ["//anon:attributes_complex_source"]), + "tuple": attrs.tuple(attrs.dep(), attrs.string(), default = ("//anon:attributes_complex_source", "my_string")), +}) + +def _complex_source_impl(ctx: AnalysisContext) -> list[Provider]: + artifact = ctx.actions.write("my_short_path", "") + return [DefaultInfo(default_output = artifact)] + +_complex_source = rule(impl = _complex_source_impl, attrs = {}) + +# Test artifacts + +_artifacts_mirror = rule(impl = _mirror_impl, attrs = { + "build_artifact": attrs.source(), + "declared_artifact": attrs.source(), + "source_artifact": attrs.source(), +}) + +def _complex_artifacts_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.source_artifact.is_source, True) + _assert_eq(res.build_artifact.is_source, False) + _assert_eq(res.declared_artifact.is_source, False) + _assert_eq(res.source_artifact.basename, "my_source") + _assert_eq(res.build_artifact.basename, "my_short_path") + _assert_eq(res.declared_artifact.basename, "my_shorter_path") + return [DefaultInfo()] + + declared_artifact = ctx.actions.write("my_shorter_path", "") + + return ctx.actions.anon_target(_artifacts_mirror, { + "build_artifact": ctx.attrs.build_artifact, + "declared_artifact": declared_artifact, + "source_artifact": ctx.attrs.source_artifact, + }).promise.map(f) + +_complex_artifacts = rule(impl = _complex_artifacts_impl, attrs = { + "build_artifact": attrs.source(default = "//anon:attributes_complex_source"), + "source_artifact": attrs.source(), +}) + +# Test promise_artifacts + +HelloInfo = provider(fields = ["output"]) + +def _builder_impl(ctx: AnalysisContext) -> list[Provider]: + hello = ctx.actions.write("hello.out", "hello") + return [DefaultInfo(), HelloInfo(output = hello)] + +_builder = anon_rule(impl = _builder_impl, artifact_promise_mappings = {"artifact": lambda x: x[HelloInfo].output}, attrs = {}) + +def _promise_artifact_mirror_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("output") + ctx.actions.run(["cp", ctx.attrs.promise_artifact, out.as_output()], category = "cp") + return [DefaultInfo(default_output = out), MirrorInfo(info = ctx.attrs)] + +_promise_artifact_mirror = rule(impl = _promise_artifact_mirror_impl, attrs = { + "promise_artifact": attrs.source(), +}) + +def _promise_artifact_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.promise_artifact.is_source, False) + return [providers[DefaultInfo]] + + promise_artifact = ctx.actions.anon_target(_builder, {}).artifact("artifact") + + return ctx.actions.anon_target(_promise_artifact_mirror, { + "promise_artifact": promise_artifact, + }).promise.map(f) + +_promise_artifact = rule(impl = _promise_artifact_impl, attrs = {}) + +def _check_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + _assert_eq(artifacts[ctx.attrs.src].read_string(), "hello") + ctx.actions.write(outputs[out], "") + + ctx.actions.dynamic_output(dynamic = [ctx.attrs.src], inputs = [], outputs = [out.as_output()], f = f) + return [DefaultInfo(default_output = out)] + +_check = rule(impl = _check_impl, attrs = {"src": attrs.source()}) + +# Test label + +LabelTestInfo = provider(fields = ["info"]) + +def _rule_with_subtarget_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), LabelTestInfo(info = ctx.attrs)] + +_rule_with_subtarget = rule(impl = _rule_with_subtarget_impl, attrs = { + "my_string": attrs.string(default = "a-string"), +}) + +_label_mirror = rule(impl = _mirror_impl, attrs = { + "subtarget_label": attrs.label(), + "target_label": attrs.label(), +}) + +def _label_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + target_label = providers[MirrorInfo].info.target_label + subtarget_label = providers[MirrorInfo].info.subtarget_label + _assert_eq(type(subtarget_label), "providers_label") + _assert_eq(subtarget_label.cell, "root") + _assert_eq(subtarget_label.name, "rule_with_subtarget") + _assert_eq(type(target_label), "providers_label") + _assert_eq(target_label.cell, "root") + return [DefaultInfo()] + + return ctx.actions.anon_target(_label_mirror, { + # Test that we can pass in an unconfigured subtarget label or an unconfigured target label + # ctx.attrs.label is a configured subtarget label at this point, so we can do some magic here + # to get the underlying unconfigured subtarget label via `with_sub_target()`, and we can also + # call `raw_target()` to get the underlying unconfigured target label. We do not accept + # configured labels for anon targets because anon targets do not support configurations in general. + "subtarget_label": ctx.attrs.label.raw_target().with_sub_target("LabelTestInfo"), + "target_label": ctx.attrs.label.raw_target(), + }).promise.map(f) + +_label = rule(impl = _label_impl, attrs = { + "label": attrs.label(default = "//anon:rule_with_subtarget"), +}) + +# Create targets for the tests + +def attributes_test(): + _simple(name = "attributes_simple") + _complex(name = "attributes_complex") + _complex_collection(name = "attributes_complex_collection") + _complex_source(name = "attributes_complex_source") + _complex_artifacts( + name = "attributes_complex_artifacts", + source_artifact = "my_source", + ) + + _promise_artifact(name = "promise_artifact") + _check(name = "check", src = ":promise_artifact") + + _rule_with_subtarget(name = "rule_with_subtarget") + _label(name = "label") diff --git a/tests/core/build/actions/test_actions_data/actions/anon/bigint.bzl b/tests/core/build/actions/test_actions_data/actions/anon/bigint.bzl new file mode 100644 index 0000000000000..197b979e3e631 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/bigint.bzl @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A test to ensure we coerce large numbers to bigint when resolving attrs. + +MirrorInfo = provider(fields = ["info"]) + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _mirror_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +_mirror = rule(impl = _mirror_impl, attrs = { + "any": attrs.any(), + "int": attrs.int(), + "string": attrs.string(), +}) + +def _int8_num_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.any, -42) + _assert_eq(res.int, -42) + _assert_eq(res.string, "-42") + return [DefaultInfo()] + + at = { + "any": -21 * 2, + "int": -21 * 2, + "string": str(-21 * 2), + } + return ctx.actions.anon_target(_mirror, at).promise.map(f) + +_int8_num = rule(impl = _int8_num_impl, attrs = {}) + +def _int32_num_impl(ctx: AnalysisContext) -> Promise: + """1500000000 fits in int32, 3000000000 does not""" + + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.any, -3000000000) + _assert_eq(res.int, -3000000000) + _assert_eq(res.string, "-3000000000") + return [DefaultInfo()] + + at = { + "any": -1500000000 * 2, + "int": -1500000000 * 2, + "string": str(-1500000000 * 2), + } + return ctx.actions.anon_target(_mirror, at).promise.map(f) + +_int32_num = rule(impl = _int32_num_impl, attrs = {}) + +def _int64_num_impl(ctx: AnalysisContext) -> Promise: + """2^40 = 1099511627776, 2^20 = 1048576, 2^60 = 2^40 * 2^20 = 1152921504606846976""" + + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.any, -1152921504606846976) + _assert_eq(res.int, -1152921504606846976) + _assert_eq(res.string, "-1152921504606846976") + return [DefaultInfo()] + + at = { + "any": -1099511627776 * 1048576, + "int": -1099511627776 * 1048576, + "string": str(-1099511627776 * 1048576), + } + return ctx.actions.anon_target(_mirror, at).promise.map(f) + +_int64_num = rule(impl = _int64_num_impl, attrs = {}) + +def bigint_test(): + _int8_num(name = "bigint_int8_num") + _int32_num(name = "bigint_int32_num") + _int64_num(name = "bigint_int64_num") diff --git a/tests/core/build/actions/test_actions_data/actions/anon/build.bzl b/tests/core/build/actions/test_actions_data/actions/anon/build.bzl new file mode 100644 index 0000000000000..95cc2ca844cba --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/build.bzl @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A very basic test that two things can share a single anon target + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _builder_impl(ctx: AnalysisContext) -> list[Provider]: + a = ctx.actions.write("input", "hello") + b = ctx.actions.declare_output("output") + ctx.actions.run(cmd_args("cp", a, b.as_output()), category = "cp") + return [DefaultInfo(default_output = b)] + +_builder = rule(impl = _builder_impl, attrs = {}) + +def _build_impl(ctx: AnalysisContext) -> Promise: + return ctx.actions.anon_target(_builder, {}).promise.map(lambda x: [x[DefaultInfo]]) + +_build = rule(impl = _build_impl, attrs = {}) + +def _check_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + _assert_eq(artifacts[ctx.attrs.dep].read_string(), "hello") + ctx.actions.write(outputs[out], "") + + ctx.actions.dynamic_output(dynamic = [ctx.attrs.dep], inputs = [], outputs = [out.as_output()], f = f) + return [DefaultInfo(default_output = out)] + +_check = rule(impl = _check_impl, attrs = {"dep": attrs.source()}) + +def build_test(): + _build(name = "build") + _check(name = "build_check", dep = ":build") diff --git a/tests/core/build/actions/test_actions_data/actions/anon/my_source b/tests/core/build/actions/test_actions_data/actions/anon/my_source new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_actions_data/actions/anon/recursive.bzl b/tests/core/build/actions/test_actions_data/actions/anon/recursive.bzl new file mode 100644 index 0000000000000..48ce265b2f7c8 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/recursive.bzl @@ -0,0 +1,64 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @lint-ignore-every FBCODEBZLADDLOADS + +# A test that you can recursively keep adding more anon targets, +# or use anon_targets + +IntInfo = provider(fields = ["value"]) + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _int_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), IntInfo(value = ctx.attrs.value)] + +_int = rule( + impl = _int_impl, + attrs = {"value": attrs.int()}, +) + +def _recursive_impl(ctx: AnalysisContext) -> Promise: + def f(x1, x2, x3): + _assert_eq(x1[IntInfo].value, 1) + _assert_eq(x2[IntInfo].value, 2) + _assert_eq(x3[IntInfo].value, 1) + return [DefaultInfo()] + + return ctx.actions.anon_target(_int, {"value": 1}).promise.map( + lambda x1: ctx.actions.anon_target(_int, {"value": 2}).promise.map( + lambda x2: ctx.actions.anon_target(_int, {"value": 1}).promise.map( + lambda x3: f(x1, x2, x3), + ), + ), + ) + +_recursive = rule( + impl = _recursive_impl, + attrs = {}, +) + +def _plural_impl(ctx: AnalysisContext) -> Promise: + def f(xs): + x1, x2, x3 = xs + _assert_eq(x1[IntInfo].value, 1) + _assert_eq(x2[IntInfo].value, 2) + _assert_eq(x3[IntInfo].value, 1) + return [DefaultInfo()] + + return ctx.actions.anon_targets([(_int, {"value": 1}), (_int, {"value": 2}), (_int, {"value": 1})]).promise.map(f) + +_plural = rule( + impl = _plural_impl, + attrs = {}, +) + +def recursive_test(): + _recursive(name = "recursive") + _plural(name = "plural") diff --git a/tests/core/build/actions/test_actions_data/actions/anon/shared.bzl b/tests/core/build/actions/test_actions_data/actions/anon/shared.bzl new file mode 100644 index 0000000000000..f3cffe867c744 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/shared.bzl @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A very basic test that two things can share a single anon target + +SharedInfo = provider(fields = ["information"]) + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _shared_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), SharedInfo(information = ctx.label.name)] + +_shared = rule(impl = _shared_impl, attrs = {}) + +def _user_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + _assert_eq(providers[SharedInfo].information, ctx.attrs.use) + return [DefaultInfo()] + + return ctx.actions.anon_target(_shared, {"name": "hello//world:" + ctx.attrs.use}).promise.map(f) + +_user = rule(impl = _user_impl, attrs = {"use": attrs.string()}) + +def shared_test(): + _user(name = "shared_a1", use = "a") + _user(name = "shared_a2", use = "a") + _user(name = "shared_b", use = "b") + + # It's also a regular rule + _shared(name = "shared") diff --git a/tests/core/build/actions/test_actions_data/actions/anon/subtarget.bzl b/tests/core/build/actions/test_actions_data/actions/anon/subtarget.bzl new file mode 100644 index 0000000000000..bbb8dd6934b5f --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/subtarget.bzl @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A very basic test that two things can share a single anon target + +SubtargetInfo = provider(fields = ["information"]) + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _anon_impl(ctx: AnalysisContext) -> list[Provider]: + output = ctx.actions.write("hello.txt", "hello") + return [DefaultInfo(default_outputs = [output])] + +_anon = rule(impl = _anon_impl, attrs = {"dep": attrs.dep()}) + +def _subtarget_impl(ctx: AnalysisContext) -> Promise: + def f(xs): + _assert_eq(xs[0][DefaultInfo].default_outputs, xs[1][DefaultInfo].default_outputs) + return [DefaultInfo()] + + child = ctx.attrs.child + base_child = ctx.attrs.base.sub_target("child") + _assert_eq(base_child[SubtargetInfo].information, child[SubtargetInfo].information) + _assert_eq(base_child.label, child.label) + return ctx.actions.anon_targets([(_anon, {"dep": child}), (_anon, {"dep": base_child})]).promise.map(f) + +_subtarget = rule(impl = _subtarget_impl, attrs = { + "base": attrs.dep(), + "child": attrs.dep(), +}) + +def _base_impl(_ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(sub_targets = {"child": [DefaultInfo(), SubtargetInfo(information = "hello")]})] + +_base = rule(impl = _base_impl, attrs = {}) + +def subtarget_test(): + _base(name = "subtarget_base") + _subtarget(name = "subtarget", base = ":subtarget_base", child = ":subtarget_base[child]") diff --git a/tests/core/build/actions/test_actions_data/actions/anon/types.bzl b/tests/core/build/actions/test_actions_data/actions/anon/types.bzl new file mode 100644 index 0000000000000..43461a30a53c0 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon/types.bzl @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _anon_impl(_ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo()] + +_anon = rule(impl = _anon_impl, attrs = { + "foo": attrs.string(), +}) + +def _anon_target(ctx: AnalysisContext) -> AnonTarget: + return ctx.actions.anon_target( + _anon, + {"foo": "barbaz"}, + ) + +def _anon_targets(ctx: AnalysisContext) -> AnonTargets: + return ctx.actions.anon_targets([(_anon, {"foo": "bar"}), (_anon, {"foo": "baz"})]) + +# this doesn't do anything except make sure that the anon targets types are valid +def _types_impl(ctx: AnalysisContext) -> list[Provider]: + _anon_target(ctx) + _anon_targets(ctx) + return [DefaultInfo()] + +_types = rule(impl = _types_impl, attrs = {}) + +def types_test(): + _types(name = "types") diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/anon_rule/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/anon_rule/TARGETS.fixture new file mode 100644 index 0000000000000..8d57636f503a8 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/anon_rule/TARGETS.fixture @@ -0,0 +1,7 @@ +load(":defs.bzl", "bad_anon_rule", "plugin") + +plugin( + name = "plugin", +) + +bad_anon_rule(name = "bad_anon_rule", attrs = {"bad_attr": ":plugin"}) diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/anon_rule/defs.bzl b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/anon_rule/defs.bzl new file mode 100644 index 0000000000000..ac363c850d6bf --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/anon_rule/defs.bzl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _nop_impl(_ctx): + return [DefaultInfo()] + +Plugin = plugins.kind() + +plugin = rule( + impl = _nop_impl, + attrs = {}, +) + +bad_anon_rule = anon_rule( + impl = _nop_impl, + attrs = { + "bad_attr": attrs.plugin_dep(kind = Plugin), + }, + artifact_promise_mappings = {}, +) diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/arg/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/arg/TARGETS.fixture new file mode 100644 index 0000000000000..e02ca431abc74 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/arg/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":attributes.bzl", "arg_not_compatible", "default_arg_fails") + +default_arg_fails(name = "default_arg_fails") +arg_not_compatible(name = "arg_not_compatible") diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/arg/attributes.bzl b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/arg/attributes.bzl new file mode 100644 index 0000000000000..fd738b81c8079 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/arg/attributes.bzl @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A test of various types of attribute + +MirrorInfo = provider(fields = ["info"]) + +def _mirror_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +_mirror_arg = rule(impl = _mirror_impl, attrs = { + "arg": attrs.arg(default = "foo"), +}) + +def _default_arg_fails(ctx: AnalysisContext) -> Promise: + def f(_providers): + return [DefaultInfo()] + + return ctx.actions.anon_target(_mirror_arg, {}).promise.map(f) + +default_arg_fails = rule(impl = _default_arg_fails, attrs = {}) + +def _mirror_no_default_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +_mirror_no_default_arg = rule(impl = _mirror_no_default_impl, attrs = { + "arg": attrs.arg(), +}) + +_python = "import os; out = open(os.getenv('OUT'), 'wb'); out.write(os.urandom(50))" + +def _arg_not_compatible_impl(ctx: AnalysisContext) -> Promise: + def f(_providers): + return [DefaultInfo()] + + return ctx.actions.anon_target(_mirror_no_default_arg, { + "arg": ctx.attrs.arg, + }).promise.map(f) + +arg_not_compatible = rule(impl = _arg_not_compatible_impl, attrs = { + "arg": attrs.arg(default = _python), +}) diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/dep/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/dep/TARGETS.fixture new file mode 100644 index 0000000000000..08cf5acd46ae8 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/dep/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":attributes.bzl", "default_dep_fails", "source") + +source(name = "dep") +default_dep_fails(name = "default_dep_fails") diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/dep/attributes.bzl b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/dep/attributes.bzl new file mode 100644 index 0000000000000..d47dfc70cd2dc --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/dep/attributes.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A test of various types of attribute + +MirrorInfo = provider(fields = ["info"]) + +def _mirror_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +def _complex_source_impl(ctx: AnalysisContext) -> list[Provider]: + artifact = ctx.actions.write("my_short_path", "") + return [DefaultInfo(default_output = artifact)] + +source = rule(impl = _complex_source_impl, attrs = {}) + +_dep_mirror = rule(impl = _mirror_impl, attrs = { + "dep": attrs.dep(default = "//anon_invalid_defaults/dep:dep"), +}) + +def _complex_dep_impl(ctx: AnalysisContext) -> Promise: + def f(_providers): + return [DefaultInfo()] + + return ctx.actions.anon_target(_dep_mirror, {}).promise.map(f) + +default_dep_fails = rule(impl = _complex_dep_impl, attrs = {}) diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/promise_artifact/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/promise_artifact/TARGETS.fixture new file mode 100644 index 0000000000000..a689e8283bc6e --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/promise_artifact/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":attributes.bzl", "bad_short_path", "build") + +build(name = "promise_artifact") +bad_short_path(name = "bad_short_path", src = ":promise_artifact") diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/promise_artifact/attributes.bzl b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/promise_artifact/attributes.bzl new file mode 100644 index 0000000000000..49adc660abf5c --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/promise_artifact/attributes.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +HelloInfo = provider(fields = ["output"]) + +def _builder_impl(ctx: AnalysisContext) -> list[Provider]: + hello = ctx.actions.write("hello.out", "hello") + return [DefaultInfo(), HelloInfo(output = hello)] + +_builder = anon_rule( + impl = _builder_impl, + attrs = {}, + artifact_promise_mappings = { + "artifact": lambda x: x[HelloInfo].output, + }, +) + +def _build_impl(ctx: AnalysisContext) -> list[Provider]: + artifact = ctx.actions.anon_target(_builder, {}).artifact("artifact") + ctx.actions.assert_short_path(artifact, short_path = "WRONG_PATH") + return [DefaultInfo()] + +build = rule(impl = _build_impl, attrs = {}) + +def _bad_short_path_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, _artifacts, outputs): + ctx.actions.write(outputs[out], "") + + ctx.actions.dynamic_output(dynamic = [ctx.attrs.src], inputs = [], outputs = [out.as_output()], f = f) + return [DefaultInfo(default_output = out)] + +bad_short_path = rule(impl = _bad_short_path_impl, attrs = {"src": attrs.source()}) diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/source/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/source/TARGETS.fixture new file mode 100644 index 0000000000000..e4275c3b6eb04 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/source/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":attributes.bzl", "default_source_fails", "source") + +source(name = "source") +default_source_fails(name = "default_source_fails") diff --git a/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/source/attributes.bzl b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/source/attributes.bzl new file mode 100644 index 0000000000000..f2fcdad641a49 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/anon_invalid_defaults/source/attributes.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# A test of various types of attribute + +MirrorInfo = provider(fields = ["info"]) + +def _mirror_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +def _complex_source_impl(ctx: AnalysisContext) -> list[Provider]: + artifact = ctx.actions.write("my_short_path", "") + return [DefaultInfo(default_output = artifact)] + +source = rule(impl = _complex_source_impl, attrs = {}) + +_artifacts_mirror = rule(impl = _mirror_impl, attrs = { + "source": attrs.source(default = "//anon_bad/source:source"), +}) + +def _complex_artifacts_impl(ctx: AnalysisContext) -> Promise: + def f(_providers): + return [DefaultInfo()] + + return ctx.actions.anon_target(_artifacts_mirror, {}).promise.map(f) + +default_source_fails = rule(impl = _complex_artifacts_impl, attrs = {}) diff --git a/tests/core/build/actions/test_actions_data/actions/associated_artifacts/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/associated_artifacts/TARGETS.fixture new file mode 100644 index 0000000000000..0c8777101868d --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/associated_artifacts/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "defs") + +defs() diff --git a/tests/core/build/actions/test_actions_data/actions/associated_artifacts/defs.bzl b/tests/core/build/actions/test_actions_data/actions/associated_artifacts/defs.bzl new file mode 100644 index 0000000000000..e63c2a1a96416 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/associated_artifacts/defs.bzl @@ -0,0 +1,67 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +Artifacts = provider(fields = ["associated", "artifacts"]) + +def _artifacts(ctx: AnalysisContext) -> list[Provider]: + out1 = ctx.actions.write("out1", "") + out2 = ctx.actions.write("out2", "") + return [ + DefaultInfo(), + Artifacts( + associated = out1.with_associated_artifacts([out2]), + artifacts = [out1, out2], + ), + ] + +artifacts = rule(impl = _artifacts, attrs = {}) + +def _check(ctx: AnalysisContext) -> list[Provider]: + dep1 = ctx.attrs.dep1[Artifacts] + dep2 = ctx.attrs.dep2[Artifacts] + + # Those are the paths we'll check. + all_artifacts = [] + all_artifacts.extend(dep1.artifacts) + all_artifacts.extend(dep2.artifacts) + + inputs = [] + + # Use associated artifacts on declared artifact from dep1 + inputs.append(dep1.associated) + + # Use associated artifacts on frozen artifact from dep2 + (out1, out2) = dep2.artifacts + inputs.append(out1.with_associated_artifacts([out2])) + + check = ctx.actions.declare_output("check") + ctx.actions.run( + [ + "python3", + "-c", + ";".join([ + "import sys, os", + "assert all(os.path.exists(f) for f in sys.argv[1:])", + "open(os.environ['OUT'], 'w')", + ]), + # Look for all the artifacts but don't add a dependency here. + cmd_args(all_artifacts, ignore_artifacts = True), + # Actually make those artifacts only available + cmd_args(hidden = inputs), + ], + env = {"OUT": check.as_output()}, + category = "check", + ) + + return [DefaultInfo(check)] + +check = rule(impl = _check, attrs = {"dep1": attrs.dep(), "dep2": attrs.dep()}) + +def defs(): + artifacts(name = "dep1") + artifacts(name = "dep2") + check(name = "check", dep1 = ":dep1", dep2 = ":dep2") diff --git a/tests/core/build/actions/test_actions_data/actions/cas_artifact/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/cas_artifact/TARGETS.fixture new file mode 100644 index 0000000000000..822b329203ba9 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/cas_artifact/TARGETS.fixture @@ -0,0 +1,43 @@ +load(":defs.bzl", "cas_artifact") + +# If you need to re-upload the CAS blobs, use the following commands: +# +# ``` +# $ cd artifacts +# $ frecli --use-case apple_build_infra_tools cas upload-blob empty --skip-find-missing +# $ frecli --use-case apple_build_infra_tools cas upload-tree tree --skip-find-missing +# $ frecli --use-case apple_build_infra_tools cas upload-directory dir --skip-find-missing +# ``` +# +# To verify the TTLs, you can use: +# +# ``` +# $ frecli --use-case apple_build_infra_tools cas get-ttl $BLOB_OR_DIR_DIGEST +# $ frecli --use-case apple_build_infra_tools cas get-tree-ttl $TREE_DIGEST +# ``` + +cas_artifact( + name = "empty", + # The empty file is guaranteed to exist in RE. + digest = "da39a3ee5e6b4b0d3255bfef95601890afd80709:0", + use_case = "buck2-testing", + expires_after_timestamp = 0, +) + +cas_artifact( + name = "tree", + # A small tree uploaded in RE with a very large expiration + digest = "0424991a08d1a857d9a4ea858b2f0c9d17f41abbc927d18a8147f0d9e708a77b:77", + use_case = "apple_build_infra_tools", + expires_after_timestamp = 0, + is_tree = True, +) + +cas_artifact( + name = "dir", + # A small directory uploaded in RE with a very large expiration + digest = "a00a566911ba695e1dab11c31fc81110fe98cb7250bf17242f6a048165ce72a8:75", + use_case = "apple_build_infra_tools", + expires_after_timestamp = 0, + is_directory = True, +) diff --git a/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/dir/y b/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/dir/y new file mode 100644 index 0000000000000..45b983be36b73 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/dir/y @@ -0,0 +1 @@ +hi diff --git a/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/empty b/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/empty new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/tree/b b/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/tree/b new file mode 100644 index 0000000000000..61780798228d1 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/cas_artifact/artifacts/tree/b @@ -0,0 +1 @@ +b diff --git a/tests/core/build/actions/test_actions_data/actions/cas_artifact/defs.bzl b/tests/core/build/actions/test_actions_data/actions/cas_artifact/defs.bzl new file mode 100644 index 0000000000000..2b598bd4667ff --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/cas_artifact/defs.bzl @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _cas_artifact_impl(ctx: AnalysisContext): + out = ctx.actions.cas_artifact( + ctx.label.name, + ctx.attrs.digest, + ctx.attrs.use_case, + expires_after_timestamp = ctx.attrs.expires_after_timestamp, + is_tree = ctx.attrs.is_tree, + is_directory = ctx.attrs.is_directory, + ) + return [DefaultInfo(default_output = out)] + +cas_artifact = rule(impl = _cas_artifact_impl, attrs = { + "digest": attrs.string(), + "expires_after_timestamp": attrs.int(), + "is_directory": attrs.bool(default = False), + "is_tree": attrs.bool(default = False), + "use_case": attrs.string(), +}) diff --git a/tests/core/build/actions/test_actions_data/actions/copy/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/copy/TARGETS.fixture new file mode 100644 index 0000000000000..8b319fb2fbb6c --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/copy/TARGETS.fixture @@ -0,0 +1,34 @@ +load(":defs.bzl", "copy_file") + +copy_file( + name = "fails_on_invalid_src", + src = "foo/bar/baz/file.txt", + out = "out", +) + +copy_file( + name = "fails_on_invalid_dest", + src = "foo/bar/baz/file.txt", + out = "out", +) + +copy_file( + name = "file_uses_declared_output", + src = "foo/bar/baz/file.txt", + out = "baz/file.txt", + test = "uses_declared_output", +) + +copy_file( + name = "file_uses_declared_output_as_output", + src = "foo/bar/baz/file.txt", + out = "baz/file.txt", + test = "uses_declared_output_as_output", +) + +copy_file( + name = "file_declares_output", + src = "foo/bar/baz/file.txt", + out = "baz/file.txt", + test = "declares_output", +) diff --git a/tests/core/build/actions/test_actions_data/actions/copy/defs.bzl b/tests/core/build/actions/test_actions_data/actions/copy/defs.bzl new file mode 100644 index 0000000000000..ebb38a57a60d9 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/copy/defs.bzl @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _copy_file_impl(ctx): + test = ctx.attrs.test or ctx.attrs.name + if test == "uses_declared_output": + declared = ctx.actions.declare_output(ctx.attrs.out) + output = ctx.actions.copy_file(declared, ctx.attrs.src) + return [DefaultInfo(default_output = output)] + elif test == "uses_declared_output_as_output": + declared = ctx.actions.declare_output(ctx.attrs.out) + output = ctx.actions.copy_file(declared.as_output(), ctx.attrs.src) + return [DefaultInfo(default_output = output)] + elif test == "declares_output": + output = ctx.actions.copy_file(ctx.attrs.out, ctx.attrs.src) + return [DefaultInfo(default_output = output)] + elif test == "fails_on_invalid_src": + ctx.actions.copy_file(ctx.attrs.out, []) + fail("should fail in copy() function") + elif test == "fails_on_invalid_dest": + ctx.actions.copy_file([], ctx.attrs.src) + fail("should fail in copy() function") + else: + fail("invalid test") + +copy_file = rule( + impl = _copy_file_impl, + attrs = { + "out": attrs.string(), + "src": attrs.source(), + "test": attrs.option(attrs.string(), default = None), + }, +) diff --git a/tests/core/build/actions/test_actions_data/actions/copy/foo/bar/baz/file.txt b/tests/core/build/actions/test_actions_data/actions/copy/foo/bar/baz/file.txt new file mode 100644 index 0000000000000..7a1c6130c652b --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/copy/foo/bar/baz/file.txt @@ -0,0 +1 @@ +some file diff --git a/tests/core/build/actions/test_actions_data/actions/download_file/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/download_file/TARGETS.fixture new file mode 100644 index 0000000000000..9650b9bf1b198 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/download_file/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "test") + +test(name = "test", url = read_config("test", "url"), sha1 = read_config("test", "sha1")) diff --git a/tests/core/build/actions/test_actions_data/actions/download_file/defs.bzl b/tests/core/build/actions/test_actions_data/actions/download_file/defs.bzl new file mode 100644 index 0000000000000..06ab7eb047e92 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/download_file/defs.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx: AnalysisContext): + output = ctx.actions.download_file(ctx.label.name, ctx.attrs.url, sha1 = ctx.attrs.sha1, is_deferrable = False) + return [ + DefaultInfo(default_output = output), + ] + +test = rule( + impl = _test_impl, + attrs = { + "sha1": attrs.string(), + "url": attrs.string(), + }, +) diff --git a/tests/core/build/actions/test_actions_data/actions/run/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/run/TARGETS.fixture new file mode 100644 index 0000000000000..7331941745543 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/TARGETS.fixture @@ -0,0 +1,40 @@ +load(":defs.bzl", "run_command") + +is_windows = host_info().os.is_windows + +run_command( + name = "runs_simple_script", + other_src = "src.txt", + script = "echo.bat" if is_windows else "echo.sh", +) + +run_command( + name = "runs_simple_script_as_exe", + other_src = "src.txt", + script = "echo.bat" if is_windows else "echo.sh", +) + +run_command( + name = "rejects_zero_outputs", + script = "echo.bat" if is_windows else "echo.sh", +) + +run_command( + name = "rejects_bad_args", + script = "echo.bat" if is_windows else "echo.sh", +) + +run_command( + name = "runs_script_locally", + script = "hostname.bat" if is_windows else "hostname.sh", +) + +run_command( + name = "runs_script_locally_outputs_symlink", + script = "create_symlink.bat" if is_windows else "create_symlink.sh", +) + +run_command( + name = "runs_simple_script_remote", + script = "hostname.bat" if is_windows else "hostname.sh", +) diff --git a/tests/core/build/actions/test_actions_data/actions/run/create_symlink.bat b/tests/core/build/actions/test_actions_data/actions/run/create_symlink.bat new file mode 100644 index 0000000000000..522bc25c27d5c --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/create_symlink.bat @@ -0,0 +1,10 @@ +@REM Copyright (c) Meta Platforms, Inc. and affiliates. +@REM +@REM This source code is licensed under both the MIT license found in the +@REM LICENSE-MIT file in the root directory of this source tree and the Apache +@REM License, Version 2.0 found in the LICENSE-APACHE file in the root directory +@REM of this source tree. + +@echo off + +mklink %1 %~f0 diff --git a/tests/core/build/actions/test_actions_data/actions/run/create_symlink.sh b/tests/core/build/actions/test_actions_data/actions/run/create_symlink.sh new file mode 100755 index 0000000000000..35d421151a25a --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/create_symlink.sh @@ -0,0 +1,11 @@ +#!/bin/sh +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# create relative symlink to `create_symlink.sh` +script_path_relative_to_symlink="$(realpath --relative-to="$(dirname "$1")" "$0")" +ln -s "$script_path_relative_to_symlink" "$1" diff --git a/tests/core/build/actions/test_actions_data/actions/run/defs.bzl b/tests/core/build/actions/test_actions_data/actions/run/defs.bzl new file mode 100644 index 0000000000000..3ebe8546e5973 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/defs.bzl @@ -0,0 +1,64 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _platform_args(args): + if host_info().os.is_windows: + return ["cmd.exe", "/c"] + args + else: + return args + +def _run_command_impl(ctx): + test = ctx.attrs.name + if test == "runs_simple_script": + declared = ctx.actions.declare_output(ctx.attrs.out) + args = [ + ctx.attrs.script, + declared.as_output(), + "foo", + ctx.attrs.other_src, + "bar", + ] + ctx.actions.run(_platform_args(args), category = "simple_script") + return [DefaultInfo(default_output = declared)] + if test == "runs_simple_script_as_exe": + declared = ctx.actions.declare_output(ctx.attrs.out) + args = [ + declared.as_output(), + "foo", + ctx.attrs.other_src, + "bar", + ] + exe = RunInfo(args = _platform_args([ctx.attrs.script])) + ctx.actions.run(args, exe = exe, category = "simple_script") + return [DefaultInfo(default_output = declared)] + if test in ("runs_script_locally", "runs_script_locally_outputs_symlink"): + declared = ctx.actions.declare_output(ctx.attrs.out) + ctx.actions.run(_platform_args([ctx.attrs.script, declared.as_output()]), local_only = True, category = "local") + return [DefaultInfo(default_output = declared)] + elif test == "runs_simple_script_remote": + declared = ctx.actions.declare_output(ctx.attrs.out) + ctx.actions.run(_platform_args([ctx.attrs.script, declared.as_output()]), local_only = False, category = "remote") + return [DefaultInfo(default_output = declared)] + elif test == "rejects_zero_outputs": + ctx.actions.run(_platform_args([ctx.attrs.script, "foo"]), category = "rejects_zero_outputs") + elif test == "rejects_bad_args": + def hide_type(x): + return x + + ctx.actions.run(hide_type({}), category = "bad_args") + else: + fail("invalid test") + return None + +run_command = rule( + impl = _run_command_impl, + attrs = { + "other_src": attrs.option(attrs.source(), default = None), + "out": attrs.string(default = "out.txt"), + "script": attrs.source(), + }, +) diff --git a/tests/core/build/actions/test_actions_data/actions/run/echo.bat b/tests/core/build/actions/test_actions_data/actions/run/echo.bat new file mode 100644 index 0000000000000..7251f6850763c --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/echo.bat @@ -0,0 +1,18 @@ +@REM Copyright (c) Meta Platforms, Inc. and affiliates. +@REM +@REM This source code is licensed under both the MIT license found in the +@REM LICENSE-MIT file in the root directory of this source tree and the Apache +@REM License, Version 2.0 found in the LICENSE-APACHE file in the root directory +@REM of this source tree. + +@echo off + +set OUT=%1 +shift +set dirname=%~dp1 +if not exist %dirname% md %dirname% +if exist %OUT% del /f %OUT% +:loop +echo %1>>%OUT% +shift +if not "%~1"=="" goto loop diff --git a/tests/core/build/actions/test_actions_data/actions/run/echo.sh b/tests/core/build/actions/test_actions_data/actions/run/echo.sh new file mode 100755 index 0000000000000..a33564c5a4dbc --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/echo.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +OUT="$1" +shift +mkdir -p "$(dirname "$OUT")" +rm -f "$OUT" +for arg in "$@"; do echo "$arg" >> "$OUT"; done diff --git a/tests/core/build/actions/test_actions_data/actions/run/hostname.bat b/tests/core/build/actions/test_actions_data/actions/run/hostname.bat new file mode 100644 index 0000000000000..62f1546296081 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/hostname.bat @@ -0,0 +1,11 @@ +@REM Copyright (c) Meta Platforms, Inc. and affiliates. +@REM +@REM This source code is licensed under both the MIT license found in the +@REM LICENSE-MIT file in the root directory of this source tree and the Apache +@REM License, Version 2.0 found in the LICENSE-APACHE file in the root directory +@REM of this source tree. + +@echo off + +set OUT=%1 +hostname>%OUT% diff --git a/tests/core/build/actions/test_actions_data/actions/run/hostname.sh b/tests/core/build/actions/test_actions_data/actions/run/hostname.sh new file mode 100755 index 0000000000000..c6688725d284b --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/hostname.sh @@ -0,0 +1,11 @@ +#!/bin/sh +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +OUT="$1" + +hostname > "$OUT" diff --git a/tests/core/build/actions/test_actions_data/actions/run/src.txt b/tests/core/build/actions/test_actions_data/actions/run/src.txt new file mode 100644 index 0000000000000..426863280eedd --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run/src.txt @@ -0,0 +1 @@ +some data diff --git a/tests/core/build/actions/test_actions_data/actions/run_bad/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/run_bad/TARGETS.fixture new file mode 100644 index 0000000000000..f720f0ec5ff8c --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run_bad/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":defs.bzl", "run_invalid_command", "run_odd_exit_code") + +run_invalid_command(name = "run_invalid_command_local", local_only = True) +run_invalid_command(name = "run_invalid_command_remote", local_only = False) +run_odd_exit_code(name = "run_odd_exit_code", exit_code = "45") +run_odd_exit_code(name = "run_negative_exit_code", exit_code = "-65") diff --git a/tests/core/build/actions/test_actions_data/actions/run_bad/defs.bzl b/tests/core/build/actions/test_actions_data/actions/run_bad/defs.bzl new file mode 100644 index 0000000000000..9229633fd2954 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run_bad/defs.bzl @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _run_invalid_command_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("out") + ctx.actions.run( + ["this_binary_does_not_exist", out.as_output()], + category = "test", + local_only = ctx.attrs.local_only, + ) + return [DefaultInfo(default_output = out)] + +run_invalid_command = rule(impl = _run_invalid_command_impl, attrs = {"local_only": attrs.bool()}) + +def _run_odd_exit_code_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("out") + ctx.actions.run( + cmd_args("fbpython", "-c", "import sys; sys.exit(int(sys.argv[1]))", ctx.attrs.exit_code, out.as_output()), + category = "test", + ) + return [DefaultInfo(default_output = out)] + +run_odd_exit_code = rule(impl = _run_odd_exit_code_impl, attrs = {"exit_code": attrs.string(default = "1")}) diff --git a/tests/core/build/actions/test_actions_data/actions/run_invalid/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/run_invalid/TARGETS.fixture new file mode 100644 index 0000000000000..49ccb57212646 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run_invalid/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "artifact_cycle") + +artifact_cycle(name = "artifact_cycle") diff --git a/tests/core/build/actions/test_actions_data/actions/run_invalid/defs.bzl b/tests/core/build/actions/test_actions_data/actions/run_invalid/defs.bzl new file mode 100644 index 0000000000000..ad823dbb46b3d --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/run_invalid/defs.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _artifact_cycle(ctx): + out = ctx.actions.declare_output("out") + + # Cycle + cmd = cmd_args(out.as_output()) + cmd.add(cmd_args(hidden = cmd)) + ctx.actions.run(cmd, category = "test") + + return DefaultInfo(out) + +artifact_cycle = rule(impl = _artifact_cycle, attrs = {}) diff --git a/tests/core/build/actions/test_actions_data/actions/symlinked_dir/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/symlinked_dir/TARGETS.fixture new file mode 100644 index 0000000000000..d0d6e47c8c3f1 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/symlinked_dir/TARGETS.fixture @@ -0,0 +1,11 @@ +load(":defs.bzl", "symlink_files", "write_file") + +write_file(name = "dep", out = "dep.txt", contents = "dep contents") + +symlink_files( + name = "out", + srcs = [ + "dir1/dir1_1/file1.txt", + ":dep", + ], +) diff --git a/tests/core/build/actions/test_actions_data/actions/symlinked_dir/defs.bzl b/tests/core/build/actions/test_actions_data/actions/symlinked_dir/defs.bzl new file mode 100644 index 0000000000000..91aa7ac4979d8 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/symlinked_dir/defs.bzl @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def write_file_impl(ctx): + out = ctx.actions.write(ctx.attrs.out, ctx.attrs.contents) + return [DefaultInfo(default_output = out)] + +def symlink_files_impl(ctx): + srcs = { + src.short_path: src + for src in ctx.attrs.srcs + } + + # Also make sure that linking to a new location works properly + srcs.update({ + "subdir/{}.suffix".format(src.short_path): src + for src in ctx.attrs.srcs + }) + out = ctx.actions.symlinked_dir("out", srcs) + return [DefaultInfo(default_output = out)] + +write_file = rule( + impl = write_file_impl, + attrs = { + "contents": attrs.string(), + "out": attrs.string(), + }, +) + +symlink_files = rule( + impl = symlink_files_impl, + attrs = { + "srcs": attrs.list(attrs.source()), + }, +) diff --git a/tests/core/build/actions/test_actions_data/actions/symlinked_dir/dir1/dir1_1/file1.txt b/tests/core/build/actions/test_actions_data/actions/symlinked_dir/dir1/dir1_1/file1.txt new file mode 100644 index 0000000000000..816dba3ddde47 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/symlinked_dir/dir1/dir1_1/file1.txt @@ -0,0 +1 @@ +dir1_1 out contents diff --git a/tests/core/build/actions/test_actions_data/actions/write_json/TARGETS.fixture b/tests/core/build/actions/test_actions_data/actions/write_json/TARGETS.fixture new file mode 100644 index 0000000000000..9e8accd7b7f9c --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/write_json/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "test") + +test() diff --git a/tests/core/build/actions/test_actions_data/actions/write_json/defs.bzl b/tests/core/build/actions/test_actions_data/actions/write_json/defs.bzl new file mode 100644 index 0000000000000..a30b233b2cc04 --- /dev/null +++ b/tests/core/build/actions/test_actions_data/actions/write_json/defs.bzl @@ -0,0 +1,219 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _create_artifact(ctx: AnalysisContext): + a = ctx.actions.write("path/test.txt", "") + return (a,) + +def _create_artifact_declared(ctx: AnalysisContext): + a = ctx.actions.declare_output("path/test.txt") + ctx.actions.write(a, "") + return (a,) + +def _create_artifact_as_output(ctx: AnalysisContext): + a = ctx.actions.declare_output("path/test.txt") + ctx.actions.write(a, "") + return (a.as_output(),) + +def _check_artifact(x): + # Want an array + [x] = x + x = x.replace("\\", "/") + if not x.startswith("buck-out/") or not x.endswith("path/test.txt"): + fail("Output is not as expected, got " + repr(x)) + +def _create_cmdargs_artifact(ctx: AnalysisContext): + a = ctx.actions.write("magic/path/test.txt", "") + return cmd_args(["a", cmd_args(a, parent = 1)]) + +def _check_cmdargs_artifact(x): + [a, b] = x + b = b.replace("\\", "/") + if a != "a" or not b.startswith("buck-out/") or not b.endswith("magic/path"): + fail("Output is not as expected, got " + repr(x)) + +def _create_target(ctx: AnalysisContext): + # We don't want to hardcode the label, as different roots may change it, + # so instead check it matches the string representation + return [ctx.label.raw_target(), str(ctx.label.raw_target())] + +def _check_target(x): + [a, b] = x + if a != b: + fail("Targets should match, got " + repr(x)) + +def _create_label(ctx: AnalysisContext): + # We don't want to hardcode the label, as different roots may change it, + # so instead check it matches the string representation + return [ctx.label, str(ctx.label)] + +def _check_label(x): + [a, b] = x + if a != b: + fail("Labels should match, got " + repr(x)) + +def _create_enum_value(_ctx: AnalysisContext): + typ = enum("foo") + return [typ("foo"), "foo"] + +def _check_enum_value(x): + [a, b] = x + if a != b: + fail("Enum values should match, got " + repr(x)) + +TestProvider = provider(fields = ["foo"]) + +def _create_provider_value(ctx: AnalysisContext): + a = ctx.actions.write("path/test.txt", "") + return [TestProvider(foo = a), a] + +def _check_provider_value(x): + [prov, a] = x + if prov["foo"] != a: + fail("Provider values should match, got " + repr(x)) + +_MyRec = record(hello = typing.Any, bye = typing.Any) + +tests = [ + ("atom", lambda _: "test", "test"), + ("simple", lambda _: [1], [1]), + ("nested", lambda _: [42, {"test": True}], [42, {"test": True}]), + ("record", lambda _: _MyRec(hello = [1], bye = {}), {"bye": {}, "hello": [1]}), + ("struct", lambda _: struct(hello = [1], bye = struct()), {"bye": {}, "hello": [1]}), + ("artifact", _create_artifact, _check_artifact), + ("artifact_declared", _create_artifact_declared, _check_artifact), + ("artifact_output", _create_artifact_as_output, _check_artifact), + ("target", _create_target, _check_target), + ("label", _create_label, _check_label), + ("cmdargs", lambda _: {"more": cmd_args(["a", "b", "c"], format = "1{}")}, {"more": ["1a", "1b", "1c"]}), + ("cmdargs_single", lambda _: {"test": cmd_args("abc")}, {"test": ["abc"]}), + ("cmdargs_concat", lambda _: {"test": cmd_args("abc", delimiter = "")}, {"test": "abc"}), + ("cmdargs_artifact", _create_cmdargs_artifact, _check_cmdargs_artifact), + ("enum", _create_enum_value, _check_enum_value), + ("provider", _create_provider_value, _check_provider_value), +] + +def _write_json_rule_impl(ctx: AnalysisContext) -> list[Provider]: + want = ctx.label.name + for name, input, output in tests: + if name == want: + input_file = ctx.actions.write_json("input", input(ctx)) + output_file = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + contents = artifacts[input_file].read_json() + if type(output) == "function": + output(contents) + elif contents == output: + pass + else: + fail("JSON divergence in " + name + ": Got " + repr(contents) + ", wanted " + repr(output)) + ctx.actions.write(outputs[output_file], "") + + ctx.actions.dynamic_output(dynamic = [input_file], inputs = [], outputs = [output_file.as_output()], f = f) + return [DefaultInfo(default_output = output_file)] + fail("Test named " + want + " not found") + +write_json_rule = rule(impl = _write_json_rule_impl, attrs = {}) + +def _write_json_pretty_rule_impl(ctx: AnalysisContext) -> list[Provider]: + value = {"key1": [1], "key2": [True, False]} + + # @unsorted-dict-items + tests = { + "default": ( + ctx.actions.write_json("default_input", value), + ctx.actions.declare_output("default_output"), + '{"key1":[1],"key2":[true,false]}', + ), + "compact": ( + ctx.actions.write_json("compact_input", value, pretty = False), + ctx.actions.declare_output("compact_output"), + '{"key1":[1],"key2":[true,false]}', + ), + "pretty": ( + ctx.actions.write_json("pretty_input", value, pretty = True), + ctx.actions.declare_output("pretty_output"), + '{\n "key1": [\n 1\n ],\n "key2": [\n true,\n false\n ]\n}\n', + ), + } + + if tests["default"][2] != tests["compact"][2]: + fail("The default for no 'pretty' must be compact") + + def f(ctx: AnalysisContext, artifacts, outputs): + for k, (input, output, expected) in tests.items(): + actual = artifacts[input].read_string() + if actual != expected: + fail("JSON divergence in '{}': Got {}, wanted {}", k, repr(actual), repr(expected)) + ctx.actions.write(outputs[output], "") + + ctx.actions.dynamic_output( + dynamic = [input for input, _, _ in tests.values()], + inputs = [], + outputs = [output.as_output() for _, output, _ in tests.values()], + f = f, + ) + + return [DefaultInfo(default_outputs = [output for _, output, _ in tests.values()])] + +write_json_pretty_rule = rule(impl = _write_json_pretty_rule_impl, attrs = {}) + +def _write_json_with_inputs_rule(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", ctx.attrs.content) + as_json = ctx.actions.write_json("json", input, with_inputs = True) + + output = ctx.actions.declare_output("output") + + # as_json will contain a quoted-path and we want to read the contents of that path + script = ctx.actions.write("script.py", ["import sys;p_fp=open(sys.argv[1],'r');p=p_fp.read().replace('\"',\"\");i_fp=open(p,'r');i=i_fp.read();o_fp=open(sys.argv[2],'w');o_fp.write(i)"]) + cmd = cmd_args("python3", script, as_json, output.as_output()) + ctx.actions.run(cmd, category = "cmd") + + marker = ctx.actions.declare_output("marker") + + def f(ctx: AnalysisContext, artifacts, outputs): + expected = artifacts[input].read_string() + actual = artifacts[output].read_string() + if expected != actual: + fail("mismatched output. expected `{}`, actual `{}`".format(expected, actual)) + ctx.actions.write(outputs[marker], "") + + ctx.actions.dynamic_output(dynamic = [input, output], inputs = [], outputs = [marker.as_output()], f = f) + return [DefaultInfo(default_output = marker)] + +write_json_with_inputs_rule = rule(impl = _write_json_with_inputs_rule, attrs = {"content": attrs.string()}) + +def _write_json_absolute_rule(ctx: AnalysisContext) -> list[Provider]: + src = ctx.attrs.dep[DefaultInfo].default_outputs[0] + + out = ctx.actions.write_json( + "out.json", + [src, cmd_args(src, delimiter = "")], + absolute = True, + ) + + return [DefaultInfo(out)] + +write_json_absolute_rule = rule(impl = _write_json_absolute_rule, attrs = {"dep": attrs.dep()}) + +def test(): + for name, _, _ in tests: + write_json_rule(name = name) + + write_json_pretty_rule( + name = "pretty", + ) + + inputs_content = read_config("write_json", "content") + if inputs_content == None: + fail("config value write_json.content required") + write_json_with_inputs_rule( + name = "with_inputs", + content = inputs_content, + ) + write_json_absolute_rule(name = "absolute", dep = ":atom") diff --git a/tests/core/build/actions/test_dynamic_output.py b/tests/core/build/actions/test_dynamic_output.py new file mode 100644 index 0000000000000..0f70c44733b15 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output.py @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck + +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(data_dir="everything") +async def test_dynamic_output(buck: Buck) -> None: + await buck.build("root//:") + + +@buck_test(data_dir="everything_new") +async def test_dynamic_output_new(buck: Buck) -> None: + await buck.build("root//:") + + +@buck_test(data_dir="empty_dynamic_list") +async def test_empty_dynamic_list(buck: Buck) -> None: + await buck.build("root//:empty_test") + + +@buck_test(data_dir="artifact_eq_bug") +async def test_artifact_eq_bug(buck: Buck) -> None: + await buck.build("root//:bug") diff --git a/tests/core/build/actions/test_dynamic_output_data/.buckroot b/tests/core/build/actions/test_dynamic_output_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/.buckconfig b/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/.buckconfig new file mode 100644 index 0000000000000..1455d7017f540 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/.buckroot b/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/TARGETS.fixture b/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/TARGETS.fixture new file mode 100644 index 0000000000000..f780a9ad35eea --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/TARGETS.fixture @@ -0,0 +1 @@ +bug(name = "bug") diff --git a/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/prelude.bzl b/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/prelude.bzl new file mode 100644 index 0000000000000..a38016be71066 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/artifact_eq_bug/prelude.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def _impl(ctx: AnalysisContext) -> list[Provider]: + # To trigger the bug we need to specify both `prefix` and `filename` arguments. + dir = ctx.actions.declare_output("one", "two") + + def _dyn(ctx, artifacts, outputs, dir = dir): + # The bug is here: artifacts are not equal, and map lookup fails. + dir = outputs[dir] + ctx.actions.write(dir, "x") + + ctx.actions.dynamic_output( + dynamic = [], + inputs = [], + outputs = [dir.as_output()], + f = _dyn, + ) + + return [DefaultInfo(default_output = dir)] + +bug = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/.buckconfig b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/.buckconfig new file mode 100644 index 0000000000000..1455d7017f540 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/.buckroot b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/TARGETS.fixture b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/TARGETS.fixture new file mode 100644 index 0000000000000..eb295817a822e --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":empty_defs.bzl", "test_rule") + +test_rule( + name = "empty_test", +) diff --git a/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/empty_defs.bzl b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/empty_defs.bzl new file mode 100644 index 0000000000000..472e684b12e82 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/empty_defs.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.declare_output("out.txt") + + def body(ctx, _dynamic_artifacts, outputs): + ctx.actions.write(outputs[out].as_output(), "42") + + ctx.actions.dynamic_output(dynamic = [], inputs = [], outputs = [out.as_output()], f = body) + return [DefaultInfo(default_output = out)] + +test_rule = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/prelude.bzl b/tests/core/build/actions/test_dynamic_output_data/empty_dynamic_list/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_output_data/everything/.buckconfig b/tests/core/build/actions/test_dynamic_output_data/everything/.buckconfig new file mode 100644 index 0000000000000..1455d7017f540 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/everything/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/build/actions/test_dynamic_output_data/everything/.buckroot b/tests/core/build/actions/test_dynamic_output_data/everything/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_output_data/everything/TARGETS.fixture b/tests/core/build/actions/test_dynamic_output_data/everything/TARGETS.fixture new file mode 100644 index 0000000000000..bf7573e852837 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/everything/TARGETS.fixture @@ -0,0 +1,50 @@ +load(":dynamic.bzl", "assert_output_value", "dynamic_check", "proto_genrule") + +dynamic_check(name = "basic") +dynamic_check(name = "two") +dynamic_check(name = "command") +dynamic_check(name = "create") +dynamic_check(name = "create_duplicate") +dynamic_check(name = "nested") + +assert_output_value( + name = "basic_check", + dep = ":basic", + value = "42", +) + +assert_output_value( + name = "two_output1", + dep = ":two[output1]", + value = "output1_test", +) + +assert_output_value( + name = "two_output2", + dep = ":two[output2]", + value = "output2_test", +) + +assert_output_value( + name = "command_output", + dep = ":command", + value = "Hello world\n", +) + +assert_output_value( + name = "create_check", + dep = ":create", + value = "42", +) + +assert_output_value( + name = "create_duplicate_check", + dep = ":create_duplicate", + value = "42", +) + +proto_genrule( + name = "nested_check", + python = "import os; fp = open(r'$(location :nested)/nested_output'); 'output1_test\\noutput2_test' in fp.read() and open(os.getenv('OUT'), 'w')", + out = "out.txt", +) diff --git a/tests/core/build/actions/test_dynamic_output_data/everything/dynamic.bzl b/tests/core/build/actions/test_dynamic_output_data/everything/dynamic.bzl new file mode 100644 index 0000000000000..a1bb18f43c7b0 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/everything/dynamic.bzl @@ -0,0 +1,220 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Basic test +def _basic(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", str(7 * 6)) + output = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + src = artifacts[input].read_string() + assert_eq(src, "42") + ctx.actions.write(outputs[output], src) + + ctx.actions.dynamic_output(dynamic = [input], inputs = [], outputs = [output.as_output()], f = f) + return [DefaultInfo(default_output = output)] + +# Produce two output files +def _two(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", "test") + output1 = ctx.actions.declare_output("output1") + output2 = ctx.actions.declare_output("output2") + + def f(ctx: AnalysisContext, artifacts, outputs): + src = artifacts[input].read_string() + ctx.actions.write(outputs[output1], "output1_" + src) + ctx.actions.write(outputs[output2], "output2_" + src) + + ctx.actions.dynamic_output(dynamic = [input], inputs = [], outputs = [output1.as_output(), output2.as_output()], f = f) + sub_targets = { + "output1": [DefaultInfo(default_output = output1)], + "output2": [DefaultInfo(default_output = output2)], + } + return [DefaultInfo( + sub_targets = sub_targets, + )] + +# Nested dynamic outputs +def _nested(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", "test") + symlinked_dir = ctx.actions.declare_output("output1_symlinked_dir", dir = True) + + def f(ctx: AnalysisContext, artifacts, outputs): + src = artifacts[input].read_string() + output1 = ctx.actions.declare_output("output1") + output2 = ctx.actions.declare_output("output2") + ctx.actions.write(output1, "output1_" + src) + ctx.actions.write(output2, "output2_" + src) + symlink_tree = { + "output1": output1, + "output2": output2, + } + nested_output = ctx.actions.declare_output("nested_output") + + def f2(ctx: AnalysisContext, artifacts, outputs): + nested_src1 = artifacts[output1].read_string() + nested_src2 = artifacts[output2].read_string() + ctx.actions.write(outputs[nested_output], [nested_src1, nested_src2]) + + ctx.actions.dynamic_output( + dynamic = [output1, output2], + inputs = [], + outputs = [nested_output.as_output()], + f = f2, + ) + + symlink_tree["nested_output"] = nested_output + ctx.actions.symlinked_dir(outputs[symlinked_dir], symlink_tree) + + ctx.actions.dynamic_output(dynamic = [input], inputs = [], outputs = [symlinked_dir.as_output()], f = f) + return [DefaultInfo(default_output = symlinked_dir)] + +# Produce two output files, using a command +def _command(ctx: AnalysisContext) -> list[Provider]: + hello = ctx.actions.declare_output("hello.txt") + write_hello = ctx.actions.write( + "hello.py", + [ + cmd_args(["with open(r'", hello, "', 'w') as f:"], delimiter = ""), + " f.write('Hello\\n')", + ], + ) + ctx.actions.run(cmd_args(["python3", write_hello], hidden = hello.as_output()), category = "test_category") + + world = ctx.actions.declare_output("world") + universe = ctx.actions.declare_output("universe") + + script = ctx.actions.write( + "script.py", + [ + "import sys", + "with open(sys.argv[2], 'w') as f:", + " f.write(sys.argv[1] + ' world\\n')", + "with open(sys.argv[3], 'w') as f:", + " f.write(sys.argv[1] + ' universe\\n')", + ], + ) + + def f(ctx: AnalysisContext, artifacts, outputs): + src = artifacts[hello].read_string().strip() + assert_eq(src, "Hello") + ctx.actions.run( + cmd_args(["python3", script, src, outputs[world].as_output(), outputs[universe].as_output()]), + category = "dynamic_check", + ) + + ctx.actions.dynamic_output( + dynamic = [hello], + inputs = [script], + outputs = [world.as_output(), universe.as_output()], + f = f, + ) + return [DefaultInfo(default_output = world, other_outputs = [universe])] + +# Create a fresh output inside the dynamic +def _create(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", str(7 * 6)) + output = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + src = artifacts[input].read_string() + new_file = ctx.actions.write("new_file", src) + ctx.actions.copy_file(outputs[output], new_file) + + ctx.actions.dynamic_output(dynamic = [input], inputs = [], outputs = [output.as_output()], f = f) + return [DefaultInfo(default_output = output)] + +# Create a fresh output inside the dynamic, which clashes +def _create_duplicate(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", str(7 * 6)) + output = ctx.actions.declare_output("output") + + def f(ctx: AnalysisContext, artifacts, outputs): + src = artifacts[input].read_string() + + # Deliberately reuse the names input/output + new_output = ctx.actions.write("output", src) + + # We can't have two actions that do copy with "output" as the name + # since then we get conflicting identifiers for category `copy`. + # I.e. the two copy() actions below can't end "output" and outputs[output]. + # We could allow copy to take an explicit identifier, but this is a corner + # case and I don't think its a good idea to reuse names heavily anyway. + new_input = ctx.actions.copy_file("input", new_output) + ctx.actions.copy_file(outputs[output], new_input) + + ctx.actions.dynamic_output(dynamic = [input], inputs = [], outputs = [output.as_output()], f = f) + return [DefaultInfo(default_output = output)] + +def _impl(ctx: AnalysisContext) -> list[Provider]: + if ctx.label.name == "basic": + return _basic(ctx) + elif ctx.label.name == "two": + return _two(ctx) + elif ctx.label.name == "command": + return _command(ctx) + elif ctx.label.name == "create": + return _create(ctx) + elif ctx.label.name == "create_duplicate": + return _create_duplicate(ctx) + elif ctx.label.name == "nested": + return _nested(ctx) + else: + fail("Unknown test: " + ctx.label.name) + +dynamic_check = rule(impl = _impl, attrs = {}) + +def assert_eq(a, b): + if a != b: + fail("Expected equal, but got", a, b) + +def _assert_output_value_impl(ctx: AnalysisContext) -> list[Provider]: + produced = ctx.attrs.dep[DefaultInfo].default_outputs[0] + value = ctx.actions.write("value", ctx.attrs.value) + output = ctx.actions.declare_output("output") + run = ctx.actions.write( + "run.py", + [ + "import sys", + "with open(sys.argv[1]) as f:", + " value_content = f.read()", + "with open(sys.argv[2]) as f:", + " produced_content = f.read()", + "if value_content != produced_content:", + " print('Content does not match! Expected:', value_content, 'Got:', produced_content)", + " sys.exit(1)", + "with open(sys.argv[3], 'w') as f:", + " f.write('Success\\n')", + ], + ) + ctx.actions.run(cmd_args(["python3", run, value, produced, output.as_output()]), category = "test_category") + return [DefaultInfo(default_output = output)] + +assert_output_value = rule(impl = _assert_output_value_impl, attrs = { + "dep": attrs.dep(), + "value": attrs.string(), +}) + +def _proto_genrule_impl(ctx): + out_artifact = ctx.actions.declare_output(ctx.attrs.out) + env_vars = { + "OUT": cmd_args(out_artifact.as_output()), + } + ctx.actions.run( + cmd_args(["python3", "-c", ctx.attrs.python]), + env = env_vars, + category = "genrule", + ) + return [DefaultInfo(default_output = out_artifact)] + +proto_genrule = rule( + impl = _proto_genrule_impl, + attrs = { + "out": attrs.string(), + "python": attrs.option(attrs.arg(), default = None), + }, +) diff --git a/tests/core/build/actions/test_dynamic_output_data/everything/prelude.bzl b/tests/core/build/actions/test_dynamic_output_data/everything/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_output_data/everything_new/.buckconfig b/tests/core/build/actions/test_dynamic_output_data/everything_new/.buckconfig new file mode 100644 index 0000000000000..1455d7017f540 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/everything_new/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/build/actions/test_dynamic_output_data/everything_new/.buckroot b/tests/core/build/actions/test_dynamic_output_data/everything_new/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_output_data/everything_new/TARGETS.fixture b/tests/core/build/actions/test_dynamic_output_data/everything_new/TARGETS.fixture new file mode 100644 index 0000000000000..bf7573e852837 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/everything_new/TARGETS.fixture @@ -0,0 +1,50 @@ +load(":dynamic.bzl", "assert_output_value", "dynamic_check", "proto_genrule") + +dynamic_check(name = "basic") +dynamic_check(name = "two") +dynamic_check(name = "command") +dynamic_check(name = "create") +dynamic_check(name = "create_duplicate") +dynamic_check(name = "nested") + +assert_output_value( + name = "basic_check", + dep = ":basic", + value = "42", +) + +assert_output_value( + name = "two_output1", + dep = ":two[output1]", + value = "output1_test", +) + +assert_output_value( + name = "two_output2", + dep = ":two[output2]", + value = "output2_test", +) + +assert_output_value( + name = "command_output", + dep = ":command", + value = "Hello world\n", +) + +assert_output_value( + name = "create_check", + dep = ":create", + value = "42", +) + +assert_output_value( + name = "create_duplicate_check", + dep = ":create_duplicate", + value = "42", +) + +proto_genrule( + name = "nested_check", + python = "import os; fp = open(r'$(location :nested)/nested_output'); 'output1_test\\noutput2_test' in fp.read() and open(os.getenv('OUT'), 'w')", + out = "out.txt", +) diff --git a/tests/core/build/actions/test_dynamic_output_data/everything_new/dynamic.bzl b/tests/core/build/actions/test_dynamic_output_data/everything_new/dynamic.bzl new file mode 100644 index 0000000000000..1dc7e034cf48b --- /dev/null +++ b/tests/core/build/actions/test_dynamic_output_data/everything_new/dynamic.bzl @@ -0,0 +1,300 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _basic_f_impl(actions: AnalysisActions, src: ArtifactValue, out: OutputArtifact): + src = src.read_string() + assert_eq(src, "42") + actions.write(out, src) + return [] + +_basic_f = dynamic_actions( + impl = _basic_f_impl, + attrs = { + "out": dynattrs.output(), + "src": dynattrs.artifact_value(), + }, +) + +# Basic test +def _basic(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", str(7 * 6)) + output = ctx.actions.declare_output("output") + + ctx.actions.dynamic_output_new(_basic_f( + src = input, + out = output.as_output(), + )) + return [DefaultInfo(default_output = output)] + +def _two_f_impl(actions: AnalysisActions, outs: tuple, src: ArtifactValue): + src = src.read_string() + [output1, output2] = outs + actions.write(output1, "output1_" + src) + actions.write(output2, "output2_" + src) + return [] + +_two_f = dynamic_actions( + impl = _two_f_impl, + attrs = { + "outs": dynattrs.tuple(dynattrs.output(), dynattrs.output()), + "src": dynattrs.artifact_value(), + }, +) + +# Produce two output files +def _two(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", "test") + output1 = ctx.actions.declare_output("output1") + output2 = ctx.actions.declare_output("output2") + + ctx.actions.dynamic_output_new(_two_f( + src = input, + outs = (output1.as_output(), output2.as_output()), + )) + sub_targets = { + "output1": [DefaultInfo(default_output = output1)], + "output2": [DefaultInfo(default_output = output2)], + } + return [DefaultInfo( + sub_targets = sub_targets, + )] + +def _nested_f_impl(actions: AnalysisActions, input: ArtifactValue, symlinked_dir: OutputArtifact): + src = input.read_string() + output1 = actions.declare_output("output1") + output2 = actions.declare_output("output2") + actions.write(output1, "output1_" + src) + actions.write(output2, "output2_" + src) + symlink_tree = { + "output1": output1, + "output2": output2, + } + nested_output = actions.declare_output("nested_output") + + actions.dynamic_output_new(_nested_f2( + output1 = output1, + output2 = output2, + nested_output = nested_output.as_output(), + )) + + symlink_tree["nested_output"] = nested_output + actions.symlinked_dir(symlinked_dir, symlink_tree) + return [] + +def _nested_f2_impl(actions: AnalysisActions, output1: ArtifactValue, output2: ArtifactValue, nested_output: OutputArtifact): + nested_src1 = output1.read_string() + nested_src2 = output2.read_string() + actions.write(nested_output, [nested_src1, nested_src2]) + return [] + +_nested_f = dynamic_actions( + impl = _nested_f_impl, + attrs = { + "input": dynattrs.artifact_value(), + "symlinked_dir": dynattrs.output(), + }, +) +_nested_f2 = dynamic_actions( + impl = _nested_f2_impl, + attrs = { + "nested_output": dynattrs.output(), + "output1": dynattrs.artifact_value(), + "output2": dynattrs.artifact_value(), + }, +) + +# Nested dynamic outputs +def _nested(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", "test") + symlinked_dir = ctx.actions.declare_output("output1_symlinked_dir", dir = True) + + ctx.actions.dynamic_output_new(_nested_f( + input = input, + symlinked_dir = symlinked_dir.as_output(), + )) + return [DefaultInfo(default_output = symlinked_dir)] + +def _command_f_impl(actions: AnalysisActions, hello: ArtifactValue, world: OutputArtifact, universe: OutputArtifact, script: Artifact): + src = hello.read_string().strip() + assert_eq(src, "Hello") + actions.run( + cmd_args(["python3", script, src, world, universe]), + category = "dynamic_check", + ) + return [] + +_command_f = dynamic_actions( + impl = _command_f_impl, + attrs = { + "hello": dynattrs.artifact_value(), + "script": dynattrs.value(Artifact), + "universe": dynattrs.output(), + "world": dynattrs.output(), + }, +) + +# Produce two output files, using a command +def _command(ctx: AnalysisContext) -> list[Provider]: + hello = ctx.actions.declare_output("hello.txt") + write_hello = ctx.actions.write( + "hello.py", + [ + cmd_args(["with open(r'", hello, "', 'w') as f:"], delimiter = ""), + " f.write('Hello\\n')", + ], + ) + ctx.actions.run(cmd_args(["python3", write_hello], hidden = hello.as_output()), category = "test_category") + + world = ctx.actions.declare_output("world") + universe = ctx.actions.declare_output("universe") + + script = ctx.actions.write( + "script.py", + [ + "import sys", + "with open(sys.argv[2], 'w') as f:", + " f.write(sys.argv[1] + ' world\\n')", + "with open(sys.argv[3], 'w') as f:", + " f.write(sys.argv[1] + ' universe\\n')", + ], + ) + + ctx.actions.dynamic_output_new(_command_f( + hello = hello, + script = script, + world = world.as_output(), + universe = universe.as_output(), + )) + return [DefaultInfo(default_output = world, other_outputs = [universe])] + +def _create_f_impl(actions: AnalysisActions, input: ArtifactValue, output: OutputArtifact): + src = input.read_string() + new_file = actions.write("new_file", src) + actions.copy_file(output, new_file) + return [] + +_create_f = dynamic_actions( + impl = _create_f_impl, + attrs = { + "input": dynattrs.artifact_value(), + "output": dynattrs.output(), + }, +) + +# Create a fresh output inside the dynamic +def _create(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", str(7 * 6)) + output = ctx.actions.declare_output("output") + + ctx.actions.dynamic_output_new(_create_f( + input = input, + output = output.as_output(), + )) + return [DefaultInfo(default_output = output)] + +def _create_duplicate_f_impl(actions: AnalysisActions, input: ArtifactValue, output: OutputArtifact): + src = input.read_string() + + # Deliberately reuse the names input/output + new_output = actions.write("output", src) + + # We can't have two actions that do copy with "output" as the name + # since then we get conflicting identifiers for category `copy`. + # I.e. the two copy() actions below can't end "output" and outputs[output]. + # We could allow copy to take an explicit identifier, but this is a corner + # case and I don't think its a good idea to reuse names heavily anyway. + new_input = actions.copy_file("input", new_output) + actions.copy_file(output, new_input) + return [] + +_create_duplicate_f = dynamic_actions( + impl = _create_duplicate_f_impl, + attrs = { + "input": dynattrs.artifact_value(), + "output": dynattrs.output(), + }, +) + +# Create a fresh output inside the dynamic, which clashes +def _create_duplicate(ctx: AnalysisContext) -> list[Provider]: + input = ctx.actions.write("input", str(7 * 6)) + output = ctx.actions.declare_output("output") + + ctx.actions.dynamic_output_new(_create_duplicate_f( + input = input, + output = output.as_output(), + )) + return [DefaultInfo(default_output = output)] + +def _impl(ctx: AnalysisContext) -> list[Provider]: + if ctx.label.name == "basic": + return _basic(ctx) + elif ctx.label.name == "two": + return _two(ctx) + elif ctx.label.name == "command": + return _command(ctx) + elif ctx.label.name == "create": + return _create(ctx) + elif ctx.label.name == "create_duplicate": + return _create_duplicate(ctx) + elif ctx.label.name == "nested": + return _nested(ctx) + else: + fail("Unknown test: " + ctx.label.name) + +dynamic_check = rule(impl = _impl, attrs = {}) + +def assert_eq(a, b): + if a != b: + fail("Expected equal, but got", a, b) + +def _assert_output_value_impl(ctx: AnalysisContext) -> list[Provider]: + produced = ctx.attrs.dep[DefaultInfo].default_outputs[0] + value = ctx.actions.write("value", ctx.attrs.value) + output = ctx.actions.declare_output("output") + run = ctx.actions.write( + "run.py", + [ + "import sys", + "with open(sys.argv[1]) as f:", + " value_content = f.read()", + "with open(sys.argv[2]) as f:", + " produced_content = f.read()", + "if value_content != produced_content:", + " print('Content does not match! Expected:', value_content, 'Got:', produced_content)", + " sys.exit(1)", + "with open(sys.argv[3], 'w') as f:", + " f.write('Success\\n')", + ], + ) + ctx.actions.run(cmd_args(["python3", run, value, produced, output.as_output()]), category = "test_category") + return [DefaultInfo(default_output = output)] + +assert_output_value = rule(impl = _assert_output_value_impl, attrs = { + "dep": attrs.dep(), + "value": attrs.string(), +}) + +def _proto_genrule_impl(ctx): + out_artifact = ctx.actions.declare_output(ctx.attrs.out) + env_vars = { + "OUT": cmd_args(out_artifact.as_output()), + } + ctx.actions.run( + cmd_args(["python3", "-c", ctx.attrs.python]), + env = env_vars, + category = "genrule", + ) + return [DefaultInfo(default_output = out_artifact)] + +proto_genrule = rule( + impl = _proto_genrule_impl, + attrs = { + "out": attrs.string(), + "python": attrs.option(attrs.arg(), default = None), + }, +) diff --git a/tests/core/build/actions/test_dynamic_output_data/everything_new/prelude.bzl b/tests/core/build/actions/test_dynamic_output_data/everything_new/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_value.py b/tests/core/build/actions/test_dynamic_value.py new file mode 100644 index 0000000000000..ce4738d45bd7e --- /dev/null +++ b/tests/core/build/actions/test_dynamic_value.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_dynamic_value(buck: Buck) -> None: + result = await buck.build("//:test_rule") + out = result.get_build_report().output_for_target("//:test_rule") + with open(out, "r") as f: + assert f.read().strip() == "<<<123>>>" diff --git a/tests/core/build/actions/test_dynamic_value_data/.buckconfig b/tests/core/build/actions/test_dynamic_value_data/.buckconfig new file mode 100644 index 0000000000000..1455d7017f540 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_value_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/build/actions/test_dynamic_value_data/.buckroot b/tests/core/build/actions/test_dynamic_value_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_dynamic_value_data/TARGETS.fixture b/tests/core/build/actions/test_dynamic_value_data/TARGETS.fixture new file mode 100644 index 0000000000000..14aa2dbcbd827 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_value_data/TARGETS.fixture @@ -0,0 +1,3 @@ +test_rule( + name = "test_rule", +) diff --git a/tests/core/build/actions/test_dynamic_value_data/prelude.bzl b/tests/core/build/actions/test_dynamic_value_data/prelude.bzl new file mode 100644 index 0000000000000..05cd98807dc28 --- /dev/null +++ b/tests/core/build/actions/test_dynamic_value_data/prelude.bzl @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +MyInfo = provider(fields = {"hgfd": int}) + +def _produce_dynamic_value_impl(actions: AnalysisActions) -> list[Provider]: + _ignore = (actions,) # buildifier: disable=unused-variable + return [ + MyInfo(hgfd = 123), + ] + +_produce_dynamic_value = dynamic_actions( + impl = _produce_dynamic_value_impl, + attrs = {}, +) + +def _consume_dynamic_value_impl(actions, out: OutputArtifact, v: ResolvedDynamicValue) -> list[Provider]: + value = v.providers[MyInfo].hgfd + actions.write(out, "<<<{}>>>".format(value)) + return [] + +_consume_dynamic_value = dynamic_actions( + impl = _consume_dynamic_value_impl, + attrs = { + "out": dynattrs.output(), + "v": dynattrs.dynamic_value(), + }, +) + +def _test_rule(ctx): + v = ctx.actions.dynamic_output_new(_produce_dynamic_value()) + + out = ctx.actions.declare_output("poiuy") + ctx.actions.dynamic_output_new(_consume_dynamic_value( + v = v, + out = out.as_output(), + )) + + return [DefaultInfo(default_output = out)] + +test_rule = rule( + impl = _test_rule, + attrs = {}, +) diff --git a/tests/core/build/actions/test_output_artifact_twice.py b/tests/core/build/actions/test_output_artifact_twice.py new file mode 100644 index 0000000000000..8d28ca27b173b --- /dev/null +++ b/tests/core/build/actions/test_output_artifact_twice.py @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_output_artifact_twice_same(buck: Buck) -> None: + res = await buck.build("root//:test_output_artifact_twice_same") + assert ( + res.get_build_report() + .output_for_target("root//:test_output_artifact_twice_same") + .read_text() + == "green lamp" + ) + + +@buck_test() +async def test_output_artifact_twice_with_projection(buck: Buck) -> None: + res = await buck.build("root//:test_output_artifact_twice_with_projection") + assert ( + res.get_build_report().output_for_target( + "root//:test_output_artifact_twice_with_projection" + ) + / "rel" + ).read_text() == "red alert" diff --git a/tests/core/build/actions/test_output_artifact_twice_data/.buckconfig b/tests/core/build/actions/test_output_artifact_twice_data/.buckconfig new file mode 100644 index 0000000000000..d0700c8abeaec --- /dev/null +++ b/tests/core/build/actions/test_output_artifact_twice_data/.buckconfig @@ -0,0 +1,8 @@ +[buildfile] +name = TARGETS.fixture + +[cells] +root = . + +[cell_aliases] +prelude = root diff --git a/tests/core/build/actions/test_output_artifact_twice_data/.buckroot b/tests/core/build/actions/test_output_artifact_twice_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_output_artifact_twice_data/TARGETS.fixture b/tests/core/build/actions/test_output_artifact_twice_data/TARGETS.fixture new file mode 100644 index 0000000000000..5f4fe89d612b7 --- /dev/null +++ b/tests/core/build/actions/test_output_artifact_twice_data/TARGETS.fixture @@ -0,0 +1,2 @@ +test_output_artifact_twice_same(name = "test_output_artifact_twice_same") +test_output_artifact_twice_with_projection(name = "test_output_artifact_twice_with_projection") diff --git a/tests/core/build/actions/test_output_artifact_twice_data/prelude.bzl b/tests/core/build/actions/test_output_artifact_twice_data/prelude.bzl new file mode 100644 index 0000000000000..ce1c8c606a8b2 --- /dev/null +++ b/tests/core/build/actions/test_output_artifact_twice_data/prelude.bzl @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Pass the same artifact twice to `run` action. +def _test_output_artifact_twice_same(ctx: AnalysisContext) -> list[Provider]: + a = ctx.actions.declare_output("uuuuuu") + ctx.actions.run(["python3", "-c", """ +import sys +[_, f1, f2] = sys.argv +assert f1 == f2 +with open(f1, "w") as f: + f.write("green lamp") +""", a.as_output(), a.as_output()], category = "ignore") + return [DefaultInfo(default_output = a)] + +test_output_artifact_twice_same = rule( + impl = _test_output_artifact_twice_same, + attrs = {}, +) + +# Pass the same artifact twice to `run` action, but the second as a projection. +def _test_output_artifact_twice_with_projection(ctx: AnalysisContext) -> list[Provider]: + a = ctx.actions.declare_output("ttttttttt") + b = a.project("rel") + ctx.actions.run(["python3", "-c", r""" +import sys +import os + +[_, f1, f2] = sys.argv +bs = "\\" +assert f"{f1.replace(bs, '/')}/rel" == f2.replace(bs, '/') + +os.mkdir(f1) + +with open(f2, "w") as f: + f.write("red alert") + +""", a.as_output(), b.as_output()], category = "ignore") + return [DefaultInfo(default_output = a)] + +test_output_artifact_twice_with_projection = rule( + impl = _test_output_artifact_twice_with_projection, + attrs = {}, +) diff --git a/tests/core/build/actions/test_projected_output_artifact.py b/tests/core/build/actions/test_projected_output_artifact.py new file mode 100644 index 0000000000000..e4ba64b4cd1b2 --- /dev/null +++ b/tests/core/build/actions/test_projected_output_artifact.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_projected_output_artifact_write(buck: Buck) -> None: + res = await buck.build("root//:write") + # TODO(nga): this is a bug: we write into projected artifact, but return original artifact, + # and yet here we read from original non-projected artifact. + assert ( + "ccoonntteenntt" + == res.get_build_report().output_for_target("root//:write").read_text() + ) + + +@buck_test() +async def test_projected_output_artifact_run(buck: Buck) -> None: + res = await buck.build("root//:run") + assert ( + "hello" + == (res.get_build_report().output_for_target("root//:run") / "rel").read_text() + ) diff --git a/tests/core/build/actions/test_projected_output_artifact_data/.buckconfig b/tests/core/build/actions/test_projected_output_artifact_data/.buckconfig new file mode 100644 index 0000000000000..3d42b8fbadaac --- /dev/null +++ b/tests/core/build/actions/test_projected_output_artifact_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . diff --git a/tests/core/build/actions/test_projected_output_artifact_data/.buckroot b/tests/core/build/actions/test_projected_output_artifact_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_projected_output_artifact_data/TARGETS.fixture b/tests/core/build/actions/test_projected_output_artifact_data/TARGETS.fixture new file mode 100644 index 0000000000000..fdb5f48019546 --- /dev/null +++ b/tests/core/build/actions/test_projected_output_artifact_data/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":defs.bzl", "run_rel_action", "write_rel_action") + +write_rel_action(name = "write") +run_rel_action(name = "run") diff --git a/tests/core/build/actions/test_projected_output_artifact_data/defs.bzl b/tests/core/build/actions/test_projected_output_artifact_data/defs.bzl new file mode 100644 index 0000000000000..63ff895d83ee5 --- /dev/null +++ b/tests/core/build/actions/test_projected_output_artifact_data/defs.bzl @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _write_rel_action(ctx: AnalysisContext) -> list[Provider]: + a = ctx.actions.declare_output("uuuuuu") + b = a.project("rel") + ctx.actions.write(b.as_output(), "ccoonntteenntt") + return [DefaultInfo(default_output = a)] + +write_rel_action = rule( + impl = _write_rel_action, + attrs = {}, +) + +def _run_rel_action(ctx: AnalysisContext) -> list[Provider]: + a = ctx.actions.declare_output("uuuuuu") + b = a.project("rel") + ctx.actions.run(cmd_args("python3", "-c", """ +import sys +import os + +f = sys.argv[1] + +# Here we also assert that the directory does not exist yet. +os.mkdir(os.path.dirname(f)) + +with open(f, "w") as f: + f.write("hello") +""", b.as_output()), category = "ignore") + return [DefaultInfo(default_output = a)] + +run_rel_action = rule( + impl = _run_rel_action, + attrs = {}, +) diff --git a/tests/core/build/actions/test_write.py b/tests/core/build/actions/test_write.py new file mode 100644 index 0000000000000..44b319bfd7072 --- /dev/null +++ b/tests/core/build/actions/test_write.py @@ -0,0 +1,105 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os + +from buck2.tests.e2e_util import asserts +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +from buck2.tests.e2e_util.helper.utils import filter_events + + +@buck_test(data_dir="write") +async def test_write_files(buck: Buck) -> None: + result = await buck.build( + "//:simple", + "//:uses_declared_output", + "//:uses_declared_output_as_output", + "//:declares_output", + "//:is_executable", + "//:writes_array_of_commands", + "//:writes_command_lines", + "//:writes_frozen_command_lines", + "//:with_inputs_and_copy", + "//:writes_absolute", + ) + build_report = result.get_build_report() + + simple = build_report.output_for_target("//:simple", rel_path=True) + + output = build_report.output_for_target("//:uses_declared_output") + assert output.read_text().rstrip() == "some content" + asserts.assert_not_executable(output) + + output = build_report.output_for_target("//:uses_declared_output_as_output") + assert output.read_text().rstrip() == "some content" + asserts.assert_not_executable(output) + + output = build_report.output_for_target("//:declares_output") + assert output.read_text().rstrip() == "some content" + asserts.assert_not_executable(output) + + output = build_report.output_for_target("//:is_executable") + assert output.read_text().rstrip() == "some content" + asserts.assert_executable(output) + + output = build_report.output_for_target("//:writes_array_of_commands") + assert output.read_text().rstrip() == f"{str(simple)}\nsome content" + asserts.assert_not_executable(output) + + output = build_report.output_for_target("//:writes_command_lines") + assert output.read_text().rstrip() == f"{str(simple)}\nsome content" + asserts.assert_not_executable(output) + + output = build_report.output_for_target("//:writes_frozen_command_lines") + assert output.read_text().rstrip() == str(simple) + asserts.assert_not_executable(output) + + output = build_report.output_for_target("//:with_inputs_and_copy") + assert output.read_text().rstrip() == "some content" + + output = build_report.output_for_target("//:writes_absolute") + assert os.path.isabs(output.read_text().strip()) + + +@buck_test(data_dir="write_fails") +async def test_write_files_fails_invalid_content(buck: Buck) -> None: + await expect_failure( + buck.build("//:fails_on_invalid_contents"), + stderr_regex="Type of parameter `content`", + ) + + +@buck_test(data_dir="write_fails") +async def test_write_files_fails_invalid_output(buck: Buck) -> None: + await expect_failure( + buck.build("//:fails_on_invalid_output"), + stderr_regex="Type of parameter `output`", + ) + + +@buck_test(data_dir="write") +async def test_output_size(buck: Buck) -> None: + await buck.build("//:simple") + + output_size = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "output_size", + ) + + assert output_size + assert output_size[0] == 8 diff --git a/tests/core/build/actions/test_write_data/.buckroot b/tests/core/build/actions/test_write_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_write_data/write/.buckconfig b/tests/core/build/actions/test_write_data/write/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/build/actions/test_write_data/write/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/build/actions/test_write_data/write/.buckroot b/tests/core/build/actions/test_write_data/write/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_write_data/write/TARGETS.fixture b/tests/core/build/actions/test_write_data/write/TARGETS.fixture new file mode 100644 index 0000000000000..e9e21c27d7a32 --- /dev/null +++ b/tests/core/build/actions/test_write_data/write/TARGETS.fixture @@ -0,0 +1,43 @@ +load(":defs.bzl", "simple_write", "write_file") + +simple_write(name = "simple") + +write_file(name = "uses_declared_output") + +write_file(name = "uses_declared_output_as_output") + +write_file(name = "declares_output") + +write_file( + name = "is_executable", + exe = True, +) + +write_file( + name = "writes_array_of_commands", + dep = ":simple", +) + +write_file( + name = "writes_command_lines", + dep = ":simple", +) + +write_file( + name = "writes_frozen_command_lines", + dep = ":simple", +) + +write_file(name = "fails_on_invalid_contents") + +write_file(name = "fails_on_invalid_output") + +write_file( + name = "with_inputs_and_copy", + dep = ":simple", +) + +write_file( + name = "writes_absolute", + dep = ":simple", +) diff --git a/tests/core/build/actions/test_write_data/write/defs.bzl b/tests/core/build/actions/test_write_data/write/defs.bzl new file mode 100644 index 0000000000000..97057d0b6c167 --- /dev/null +++ b/tests/core/build/actions/test_write_data/write/defs.bzl @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +FooInfo = provider(fields = ["args", "out"]) + +def _simple_write_impl(ctx): + out = ctx.actions.write("out.txt", "contents") + args = cmd_args([out]) + return [ + FooInfo(args = args, out = out), + DefaultInfo(default_output = out), + ] + +def _write_file_impl(ctx): + if ctx.attrs.name == "uses_declared_output": + declared = ctx.actions.declare_output(ctx.attrs.out) + output = ctx.actions.write(declared, ctx.attrs.content) + elif ctx.attrs.name == "uses_declared_output_as_output": + declared = ctx.actions.declare_output(ctx.attrs.out) + output = ctx.actions.write(declared.as_output(), ctx.attrs.content) + elif ctx.attrs.name == "declares_output": + output = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + elif ctx.attrs.name == "is_executable": + output = ctx.actions.write(ctx.attrs.out, ctx.attrs.content, is_executable = True) + elif ctx.attrs.name == "writes_array_of_commands": + cmd = [ctx.attrs.dep[FooInfo].out, ctx.attrs.content] + output = ctx.actions.write(ctx.attrs.out, cmd) + elif ctx.attrs.name == "writes_command_lines": + cmd = [ctx.attrs.dep[FooInfo].out, ctx.attrs.content] + output = ctx.actions.write(ctx.attrs.out, cmd_args(cmd)) + elif ctx.attrs.name == "writes_frozen_command_lines": + output = ctx.actions.write(ctx.attrs.out, ctx.attrs.dep[FooInfo].args) + elif ctx.attrs.name == "with_inputs_and_copy": + output1 = ctx.actions.write("intermediate.txt", ctx.attrs.content) + output2 = ctx.actions.declare_output(ctx.attrs.out) + + # Create script with output1 as its associated artifact + cmd = cmd_args(output1, format = "import sys; fp1=open('{}','r'); all=fp1.read(); fp2=open(sys.argv[1], 'w'); fp2.write(all);") + + # Replace \ with \\ for Windows compatibility + cmd = cmd_args(cmd, replace_regex = ("\\\\\\b", "\\\\")) + script = ctx.actions.write( + "script.py", + [cmd], + with_inputs = True, + ) + + # Read output1 and write back into output2. Output1 should be included as an associated artifact here so we do not need to add it as hidden + cmd = cmd_args(["python3", script, output2.as_output()]) + ctx.actions.run(cmd, category = "test") + return [DefaultInfo(default_output = output2)] + elif ctx.attrs.name == "writes_absolute": + content = [ctx.attrs.dep[FooInfo].out] + output = ctx.actions.write(ctx.attrs.out, content, absolute = True) + else: + fail("invalid test") + return [DefaultInfo(default_output = output)] + +write_file = rule( + impl = _write_file_impl, + attrs = { + "content": attrs.string(default = "some content"), + "dep": attrs.option(attrs.dep(providers = [FooInfo]), default = None), + "exe": attrs.bool(default = False), + "out": attrs.string(default = "out.txt"), + }, +) + +simple_write = rule( + impl = _simple_write_impl, + attrs = { + }, +) diff --git a/tests/core/build/actions/test_write_data/write/prelude.bzl b/tests/core/build/actions/test_write_data/write/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_write_data/write_fails/.buckconfig b/tests/core/build/actions/test_write_data/write_fails/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/build/actions/test_write_data/write_fails/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/build/actions/test_write_data/write_fails/.buckroot b/tests/core/build/actions/test_write_data/write_fails/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/actions/test_write_data/write_fails/TARGETS.fixture b/tests/core/build/actions/test_write_data/write_fails/TARGETS.fixture new file mode 100644 index 0000000000000..62c7b5b3f9453 --- /dev/null +++ b/tests/core/build/actions/test_write_data/write_fails/TARGETS.fixture @@ -0,0 +1,3 @@ +fails_on_invalid_output(name = "fails_on_invalid_output") + +fails_on_invalid_contents(name = "fails_on_invalid_contents") diff --git a/tests/core/build/actions/test_write_data/write_fails/prelude.bzl b/tests/core/build/actions/test_write_data/write_fails/prelude.bzl new file mode 100644 index 0000000000000..d51deef2ce761 --- /dev/null +++ b/tests/core/build/actions/test_write_data/write_fails/prelude.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _fails_on_invalid_contents(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.write(out, {}) + +fails_on_invalid_contents = rule( + impl = _fails_on_invalid_contents, + attrs = {}, +) + +def _fails_on_invalid_output(ctx): + ctx.actions.write([], "") + +fails_on_invalid_output = rule( + impl = _fails_on_invalid_output, + attrs = {}, +) diff --git a/tests/core/build/macros/BUCK b/tests/core/build/macros/BUCK new file mode 100644 index 0000000000000..d98e2af7cbb2e --- /dev/null +++ b/tests/core/build/macros/BUCK @@ -0,0 +1,21 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_macros", + srcs = ["test_macros.py"], + data_dir = "test_macros_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_write_to_file_macros", + srcs = ["test_write_to_file_macros.py"], + data_dir = "test_write_to_file_macros_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/core/build/macros/test_macros.py b/tests/core/build/macros/test_macros.py new file mode 100644 index 0000000000000..49fb7c4048ec3 --- /dev/null +++ b/tests/core/build/macros/test_macros.py @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import platform + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_run_with_source_macros(buck: Buck) -> None: + sep = "\\" if platform.system() == "Windows" else "/" + result = await buck.run("//source:echo_file") + assert result.stdout.endswith(f"source{sep}foo.txt\n") + + result = await buck.run("//source:echo_dir") + assert result.stdout.endswith(f"source{sep}bar\n") + + result = await buck.run("//source:cat_file") + assert result.stdout == "foo file\n" + + result = await buck.run("//source:cat_dir") + assert result.stdout == "bar file\n" + + +@buck_test() +async def test_no_dep_in_source(buck: Buck) -> None: + await expect_failure( + buck.build("//dep_as_source:uses_dep"), + stderr_regex="Source file `:trivial` does not exist", + ) diff --git a/tests/core/build/macros/test_macros_data/.buckconfig b/tests/core/build/macros/test_macros_data/.buckconfig new file mode 100644 index 0000000000000..c04f0f2853757 --- /dev/null +++ b/tests/core/build/macros/test_macros_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/macros/test_macros_data/.buckroot b/tests/core/build/macros/test_macros_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/macros/test_macros_data/dep_as_source/TARGETS.fixture b/tests/core/build/macros/test_macros_data/dep_as_source/TARGETS.fixture new file mode 100644 index 0000000000000..f56ac5100d3e9 --- /dev/null +++ b/tests/core/build/macros/test_macros_data/dep_as_source/TARGETS.fixture @@ -0,0 +1,11 @@ +load("//source:defs.bzl", "echo_rule") +load(":defs.bzl", "trivial") + +trivial( + name = "trivial", +) + +echo_rule( + name = "uses_dep", + arg = "$(source :trivial)", +) diff --git a/tests/core/build/macros/test_macros_data/dep_as_source/defs.bzl b/tests/core/build/macros/test_macros_data/dep_as_source/defs.bzl new file mode 100644 index 0000000000000..670c9f81e0130 --- /dev/null +++ b/tests/core/build/macros/test_macros_data/dep_as_source/defs.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +trivial = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/build/macros/test_macros_data/prelude/prelude.bzl b/tests/core/build/macros/test_macros_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/macros/test_macros_data/source/TARGETS.fixture b/tests/core/build/macros/test_macros_data/source/TARGETS.fixture new file mode 100644 index 0000000000000..302ac671d5601 --- /dev/null +++ b/tests/core/build/macros/test_macros_data/source/TARGETS.fixture @@ -0,0 +1,21 @@ +load(":defs.bzl", "cat_rule", "echo_rule") + +echo_rule( + name = "echo_file", + arg = "$(source foo.txt)", +) + +echo_rule( + name = "echo_dir", + arg = "$(source bar)", +) + +cat_rule( + name = "cat_file", + arg = "$(source foo.txt)", +) + +cat_rule( + name = "cat_dir", + arg = "$(source bar)/bar.txt", +) diff --git a/tests/core/build/macros/test_macros_data/source/bar/bar.txt b/tests/core/build/macros/test_macros_data/source/bar/bar.txt new file mode 100644 index 0000000000000..24ec36992e8d4 --- /dev/null +++ b/tests/core/build/macros/test_macros_data/source/bar/bar.txt @@ -0,0 +1 @@ +bar file diff --git a/tests/core/build/macros/test_macros_data/source/defs.bzl b/tests/core/build/macros/test_macros_data/source/defs.bzl new file mode 100644 index 0000000000000..2d9344b98c4ef --- /dev/null +++ b/tests/core/build/macros/test_macros_data/source/defs.bzl @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _echo(ctx): + return [DefaultInfo(), RunInfo(args = cmd_args("echo", ctx.attrs.arg))] + +echo_rule = rule( + impl = _echo, + attrs = { + "arg": attrs.arg(), + }, +) + +def _cat(ctx): + return [DefaultInfo(), RunInfo(args = cmd_args("cat", ctx.attrs.arg))] + +cat_rule = rule( + impl = _cat, + attrs = { + "arg": attrs.arg(), + }, +) diff --git a/tests/core/build/macros/test_macros_data/source/foo.txt b/tests/core/build/macros/test_macros_data/source/foo.txt new file mode 100644 index 0000000000000..3f9a7b101d4fa --- /dev/null +++ b/tests/core/build/macros/test_macros_data/source/foo.txt @@ -0,0 +1 @@ +foo file diff --git a/tests/core/build/macros/test_write_to_file_macros.py b/tests/core/build/macros/test_write_to_file_macros.py new file mode 100644 index 0000000000000..94721fb4dc60f --- /dev/null +++ b/tests/core/build/macros/test_write_to_file_macros.py @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import os +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _normalize_path(p: str) -> str: + p = p.replace("\\", "/") + p = re.sub("/([a-f0-9]{16})/", "//", p) + p = re.sub("/([a-f0-9]{40})/", "//", p) + return p + + +def _find_file(dir, name: str): + files = [] + for root, _, filenames in os.walk(dir): + for filename in filenames: + if filename == name: + files.append(os.path.join(root, filename)) + [f] = files + return f + + +@buck_test() +async def test_xxx(buck: Buck) -> None: + result = await buck.build("//:test_rule") + out = result.get_build_report().output_for_target("root//:test_rule") + + # Out contents is: + # ``` + # @buck-out/v2/gen/root/6dd044292ff31ae1/__test_rule__/__macros/1e0e364a22c69340e6f02604520fdeb7674264c0/0.macro + # @../__macros/1e0e364a22c69340e6f02604520fdeb7674264c0/1.macro + # @../__macros/1e0e364a22c69340e6f02604520fdeb7674264c0/2.macro + # @../__macros/1e0e364a22c69340e6f02604520fdeb7674264c0/3.macro + # ``` + + with open(out) as f: + [a, b, c, d] = [line.strip() for line in f.readlines()] + + a = a.replace("\\", "/") + b = b.replace("\\", "/") + c = c.replace("\\", "/") + d = d.replace("\\", "/") + + assert ( + "@buck-out/v2/gen/root//__test_rule__/__macros//0.macro" + == _normalize_path(a) + ) + assert "@../__macros//1.macro" == _normalize_path(b) + assert "@../__macros//2.macro" == _normalize_path(c) + assert "@../__macros//3.macro" == _normalize_path(d) + + a_x = _find_file(buck.cwd, "0.macro") + with open(a_x) as f: + a_contents = _normalize_path(f.read()) + assert "buck-out/v2/gen/root//__write_file__/write_file.txt" == a_contents + + # TODO(nga): contents of `{1,2,3}.macro` should be identical. + + b_x = _find_file(buck.cwd, "1.macro") + with open(b_x) as f: + b_contents = _normalize_path(f.read()) + assert "buck-out/v2/gen/root//__write_file__/write_file.txt" == b_contents + + c_x = _find_file(buck.cwd, "2.macro") + with open(c_x) as f: + c_contents = _normalize_path(f.read()) + assert "../../__write_file__/write_file.txt" == c_contents + + d_x = _find_file(buck.cwd, "3.macro") + with open(d_x) as f: + d_contents = _normalize_path(f.read()) + assert "../../__write_file__/write_file.txt" == d_contents diff --git a/tests/core/build/macros/test_write_to_file_macros_data/.buckconfig b/tests/core/build/macros/test_write_to_file_macros_data/.buckconfig new file mode 100644 index 0000000000000..d0cca261b8bee --- /dev/null +++ b/tests/core/build/macros/test_write_to_file_macros_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/build/macros/test_write_to_file_macros_data/.buckroot b/tests/core/build/macros/test_write_to_file_macros_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/macros/test_write_to_file_macros_data/TARGETS.fixture b/tests/core/build/macros/test_write_to_file_macros_data/TARGETS.fixture new file mode 100644 index 0000000000000..9c45a1f67684e --- /dev/null +++ b/tests/core/build/macros/test_write_to_file_macros_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "test_rule", "write_file") + +write_file(name = "write_file") + +test_rule(name = "test_rule", arg = "$(@query_outputs :write_file)") diff --git a/tests/core/build/macros/test_write_to_file_macros_data/defs.bzl b/tests/core/build/macros/test_write_to_file_macros_data/defs.bzl new file mode 100644 index 0000000000000..6e29433b2e41b --- /dev/null +++ b/tests/core/build/macros/test_write_to_file_macros_data/defs.bzl @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _write_file(ctx): + f = ctx.actions.write("write_file.txt", "test test test") + return [DefaultInfo(default_output = f)] + +write_file = rule( + impl = _write_file, + attrs = {}, +) + +def _test_rule(ctx): + arg = ctx.attrs.arg + + f = ctx.actions.declare_output("out.txt") + f, _ = ctx.actions.write(f, [ + cmd_args(arg, hidden = arg), + cmd_args(arg, relative_to = f), + cmd_args(cmd_args(arg, relative_to = f)), + cmd_args(cmd_args(arg), relative_to = f), + ], allow_args = True, with_inputs = True) + + return [DefaultInfo(default_output = f)] + +test_rule = rule( + impl = _test_rule, + attrs = { + "arg": attrs.arg(), + }, +) diff --git a/tests/core/build/test_action_error_handler_types.py b/tests/core/build/test_action_error_handler_types.py new file mode 100644 index 0000000000000..00714f4a215fe --- /dev/null +++ b/tests/core/build/test_action_error_handler_types.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_action_error_handler_types(buck: Buck) -> None: + await buck.bxl( + "//:test_action_error_handler_types.bxl:test_action_error_handler_types" + ) diff --git a/tests/core/build/test_action_error_handler_types_data/.buckconfig b/tests/core/build/test_action_error_handler_types_data/.buckconfig new file mode 100644 index 0000000000000..f12589f437e0f --- /dev/null +++ b/tests/core/build/test_action_error_handler_types_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/core/build/test_action_error_handler_types_data/.buckroot b/tests/core/build/test_action_error_handler_types_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_action_error_handler_types_data/error_handler_impl.bzl b/tests/core/build/test_action_error_handler_types_data/error_handler_impl.bzl new file mode 100644 index 0000000000000..124ea6b190f8e --- /dev/null +++ b/tests/core/build/test_action_error_handler_types_data/error_handler_impl.bzl @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def error_handler_impl(ctx: ActionErrorCtx) -> list[ActionSubError]: + categories = [] + + if "foo" in ctx.stdout: + categories.append(ctx.new_sub_error( + category = "foo_category", + message = "foo message", + locations = [ + ctx.new_error_location(file = "foo_file", line = 1), + ], + )) + + if "bar" in ctx.stderr: + categories.append(ctx.new_sub_error( + category = "bar_category", + message = "bar message", + locations = [ + ctx.new_error_location(file = "bar_file", line = 1), + ], + )) + + return categories diff --git a/tests/core/build/test_action_error_handler_types_data/platforms/TARGETS.fixture b/tests/core/build/test_action_error_handler_types_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..c54b9379d3a83 --- /dev/null +++ b/tests/core/build/test_action_error_handler_types_data/platforms/TARGETS.fixture @@ -0,0 +1,10 @@ +load(":defs.bzl", "execution_platforms", "target_platform") + +execution_platforms( + name = "platforms", +) + +target_platform( + name = "platform", + visibility = ["PUBLIC"], +) diff --git a/tests/core/build/test_action_error_handler_types_data/platforms/defs.bzl b/tests/core/build/test_action_error_handler_types_data/platforms/defs.bzl new file mode 100644 index 0000000000000..ffce952274d1d --- /dev/null +++ b/tests/core/build/test_action_error_handler_types_data/platforms/defs.bzl @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ), + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = True, + remote_cache_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = {}, impl = _execution_platform) + +def _target_platform(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +target_platform = rule( + impl = _target_platform, + attrs = {}, +) diff --git a/tests/core/build/test_action_error_handler_types_data/prelude/prelude.bzl b/tests/core/build/test_action_error_handler_types_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_action_error_handler_types_data/test_action_error_handler_types.bxl b/tests/core/build/test_action_error_handler_types_data/test_action_error_handler_types.bxl new file mode 100644 index 0000000000000..6c9423d09c64d --- /dev/null +++ b/tests/core/build/test_action_error_handler_types_data/test_action_error_handler_types.bxl @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":error_handler_impl.bzl", "error_handler_impl") + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _test_action_error_handler_types(ctx): + action_factory = ctx.bxl_actions().actions + + stdout = action_factory.write("stdout", "foo") + stderr = action_factory.write("stderr", "bar") + unused_out = action_factory.declare_output("out") + + def validate_action_error_handler_impl(ctx, artifacts, outputs): + stdout_content = artifacts[stdout].read_string() + stderr_content = artifacts[stderr].read_string() + + action_error_ctx = __internal__.new_test_action_error_ctx(stderr = stderr_content, stdout = stdout_content) + actual_error_categories = error_handler_impl(action_error_ctx) + + expected_foo_error = action_error_ctx.new_sub_error( + category = "foo_category", + message = "foo message", + locations = [ + action_error_ctx.new_error_location(file = "foo_file", line = 1), + ], + ) + + expected_bar_error = action_error_ctx.new_sub_error( + category = "bar_category", + message = "bar message", + locations = [ + action_error_ctx.new_error_location(file = "bar_file", line = 1), + ], + ) + + expected_error_categories = [expected_foo_error, expected_bar_error] + + _assert_eq(expected_error_categories, actual_error_categories) + + ctx.bxl_actions().actions.write(outputs[unused_out], "") + + action_factory.dynamic_output( + dynamic = [stderr, stdout], + inputs = [], + outputs = [unused_out.as_output()], + f = validate_action_error_handler_impl, + ) + + ctx.output.ensure(unused_out) + +test_action_error_handler_types = bxl_main( + impl = _test_action_error_handler_types, + cli_args = { + }, +) diff --git a/tests/core/build/test_build_configured.py b/tests/core/build/test_build_configured.py new file mode 100644 index 0000000000000..8529f5d9b0d88 --- /dev/null +++ b/tests/core/build/test_build_configured.py @@ -0,0 +1,80 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +import typing + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# Obtain hashes of `` and `` configurations. +async def _obtain_cfg_hashes(buck: Buck) -> typing.Tuple[str, str]: + result = await buck.cquery( + "root//:simple", + "--target-universe", + "root//:universe", + ) + [astrologer, vagabond] = result.stdout.splitlines() + assert astrologer.startswith("root//:simple (#") + assert vagabond.startswith("root//:simple (#") + astrologer_hash = re.sub(r".*#(.*)\)", r"\1", astrologer) + vagabond_hash = re.sub(r".*#(.*)\)", r"\1", vagabond) + assert re.fullmatch("[0-9a-f]{16}", astrologer_hash), astrologer + assert re.fullmatch("[0-9a-f]{16}", vagabond_hash), vagabond + return (astrologer_hash, vagabond_hash) + + +@buck_test() +async def test_build_configured_full_configuration(buck: Buck) -> None: + (astrologer_hash, _) = await _obtain_cfg_hashes(buck) + + result = await buck.build( + f"root//:simple (#{astrologer_hash})", + "--target-universe", + "root//:universe", + ) + out = result.get_build_report().output_for_target("root//:simple").read_text() + assert f"$$$root//:simple (#{astrologer_hash})$$$" == out + + +@buck_test() +async def test_build_configured_no_hash(buck: Buck) -> None: + (_, vagabond_hash) = await _obtain_cfg_hashes(buck) + result = await buck.build( + "root//:simple ()", + "--target-universe", + "root//:universe", + ) + out = result.get_build_report().output_for_target("root//:simple").read_text() + assert f"$$$root//:simple (#{vagabond_hash})$$$" == out + + +@buck_test() +async def test_build_configured_wrong_hash(buck: Buck) -> None: + result = await buck.build( + "root//:simple (#0123456789abcdef)", + "--target-universe", + "root//:universe", + ) + # TODO(nga): this should either fail or emit a warning. + assert "root//:simple" not in json.loads(result.stdout)["results"] + + +@buck_test() +async def test_build_configured_no_universe(buck: Buck) -> None: + await expect_failure( + buck.build( + "root//:simple ()", + ), + stderr_regex="Targets with explicit configuration can only be built when the", + ) diff --git a/tests/core/build/test_build_configured_data/.buckconfig b/tests/core/build/test_build_configured_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/build/test_build_configured_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/build/test_build_configured_data/.buckroot b/tests/core/build/test_build_configured_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_configured_data/TARGETS.fixture b/tests/core/build/test_build_configured_data/TARGETS.fixture new file mode 100644 index 0000000000000..abbaa22e060ec --- /dev/null +++ b/tests/core/build/test_build_configured_data/TARGETS.fixture @@ -0,0 +1,18 @@ +load(":defs.bzl", "simple", "universe") + +# Write the configuration label to the default output. +simple( + name = "simple", + default_target_platform = ":default_plat", +) + +# Build nothing, but depend on two `:simple` targets in different configurations. +universe( + name = "universe", + split_dep = ":simple", + default_target_platform = ":default_plat", +) + +platform( + name = "default_plat", +) diff --git a/tests/core/build/test_build_configured_data/defs.bzl b/tests/core/build/test_build_configured_data/defs.bzl new file mode 100644 index 0000000000000..9e485aa8fb6b7 --- /dev/null +++ b/tests/core/build/test_build_configured_data/defs.bzl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _my_transition_impl(platform, refs): + _ignore = (platform, refs) # buildifier: disable=unused-variable + return { + "astrologer": PlatformInfo(label = "", configuration = ConfigurationInfo(constraints = {}, values = {})), + "vagabond": PlatformInfo(label = "", configuration = ConfigurationInfo(constraints = {}, values = {})), + } + +my_transition = transition( + impl = _my_transition_impl, + refs = {}, + split = True, +) + +def _universe_impl(ctx): + _ignore = ctx # buildifier: disable=unused-variable + + # Do not build anything, just configure dependencies. + return [DefaultInfo()] + +universe = rule( + impl = _universe_impl, + attrs = { + "split_dep": attrs.split_transition_dep(cfg = my_transition), + }, +) + +def _simple_impl(ctx): + out = ctx.actions.write("out", cmd_args(str(ctx.label), format = "$$${}$$$")) + return [DefaultInfo(default_output = out)] + +simple = rule( + impl = _simple_impl, + attrs = { + }, +) diff --git a/tests/core/build/test_build_id_env_var.py b/tests/core/build/test_build_id_env_var.py new file mode 100644 index 0000000000000..55432c3c69716 --- /dev/null +++ b/tests/core/build/test_build_id_env_var.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import random_string + + +@buck_test() +async def test_build_id_env_var_is_set_locally(buck: Buck) -> None: + result = await buck.build( + "root//:top", + "--local-only", + "--no-remote-cache", + "-c", + f"test.cache_buster={random_string()}", + ) + + output = result.get_build_report().output_for_target("root//:top") + assert output.exists() + with open(output) as f: + assert f.read().strip() == result.buck_build_id + + +@buck_test() +async def test_build_id_env_var_is_set_remotely(buck: Buck) -> None: + result = await buck.build( + "root//:top", + "--remote-only", + "--no-remote-cache", + "-c", + f"test.cache_buster={random_string()}", + ) + + output = result.get_build_report().output_for_target("root//:top") + assert output.exists() + with open(output) as f: + assert f.read().strip() == result.buck_build_id diff --git a/tests/core/build/test_build_id_env_var_data/.buckconfig b/tests/core/build/test_build_id_env_var_data/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/build/test_build_id_env_var_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_build_id_env_var_data/.buckroot b/tests/core/build/test_build_id_env_var_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_id_env_var_data/TARGETS.fixture b/tests/core/build/test_build_id_env_var_data/TARGETS.fixture new file mode 100644 index 0000000000000..8e416d93d495d --- /dev/null +++ b/tests/core/build/test_build_id_env_var_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "top") + +top(name = "top") diff --git a/tests/core/build/test_build_id_env_var_data/defs.bzl b/tests/core/build/test_build_id_env_var_data/defs.bzl new file mode 100644 index 0000000000000..b3ae2ccba6fed --- /dev/null +++ b/tests/core/build/test_build_id_env_var_data/defs.bzl @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +cache_buster = read_config("test", "cache_buster", "") + +def _top(ctx): + output = ctx.actions.declare_output("output") + run = ctx.actions.write( + "run.py", + [ + "import os", + "import sys", + "build_id = os.environ[\"BUCK_BUILD_ID\"]", + "with open(sys.argv[1], 'w') as f:", + " f.write(f'{build_id}\\n')", + ], + ) + ctx.actions.run( + cmd_args(["python3", run, output.as_output()]), + category = "test_category", + env = { + "cache_buster": cache_buster, + }, + ) + + return [DefaultInfo(default_output = output)] + +top = rule(impl = _top, attrs = {}) diff --git a/tests/core/build/test_build_id_env_var_data/prelude/prelude.bzl b/tests/core/build/test_build_id_env_var_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_output_file_hashes.py b/tests/core/build/test_build_output_file_hashes.py new file mode 100644 index 0000000000000..ef5453152eb24 --- /dev/null +++ b/tests/core/build/test_build_output_file_hashes.py @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_file_output( + buck: Buck, + tmp_path: Path, +) -> None: + hashes_file_path = tmp_path / "hashes" + await buck.build( + "//:file.txt", + f"--output-hashes-file={hashes_file_path}", + ) + # Check json output + with open(hashes_file_path) as f: + data = json.loads(f.read()) + for line in data: + assert line["path"] is not None + assert line["kind"] in {"directory", "file", "symlink", "external_symlink"} + # Check the entry for file.txt is what we expect + file_entry = next(line for line in data if line["path"].endswith("file.txt")) + assert file_entry is not None + assert file_entry["kind"] == "file" + assert file_entry["digest_kind"] == "SHA1" + assert file_entry["digest"] == "fb19d5b1546753df5f7741efbabd0d24dcaacd65:20" diff --git a/tests/core/build/test_build_output_file_hashes_data/.buckconfig b/tests/core/build/test_build_output_file_hashes_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/build/test_build_output_file_hashes_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/build/test_build_output_file_hashes_data/.buckroot b/tests/core/build/test_build_output_file_hashes_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_output_file_hashes_data/TARGETS.fixture b/tests/core/build/test_build_output_file_hashes_data/TARGETS.fixture new file mode 100644 index 0000000000000..d1e998e5e962e --- /dev/null +++ b/tests/core/build/test_build_output_file_hashes_data/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":defs.bzl", "export_file") + +export_file( + name = "file.txt", + src = "file.txt", +) diff --git a/tests/core/build/test_build_output_file_hashes_data/defs.bzl b/tests/core/build/test_build_output_file_hashes_data/defs.bzl new file mode 100644 index 0000000000000..31d9e0c89cc54 --- /dev/null +++ b/tests/core/build/test_build_output_file_hashes_data/defs.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _export_file_impl(ctx): + return [DefaultInfo(default_output = ctx.attrs.src)] + +export_file = rule( + impl = _export_file_impl, + attrs = { + "src": attrs.source(allow_directory = True), + }, +) diff --git a/tests/core/build/test_build_output_file_hashes_data/file.txt b/tests/core/build/test_build_output_file_hashes_data/file.txt new file mode 100644 index 0000000000000..9b0f38862f3ea --- /dev/null +++ b/tests/core/build/test_build_output_file_hashes_data/file.txt @@ -0,0 +1 @@ +my text in the file diff --git a/tests/core/build/test_build_output_file_hashes_data/folder/file_in_folder.txt b/tests/core/build/test_build_output_file_hashes_data/folder/file_in_folder.txt new file mode 100644 index 0000000000000..130eaf34e2a82 --- /dev/null +++ b/tests/core/build/test_build_output_file_hashes_data/folder/file_in_folder.txt @@ -0,0 +1 @@ +the file in the folder diff --git a/tests/core/build/test_build_report.py b/tests/core/build/test_build_report.py new file mode 100644 index 0000000000000..20d66ae332147 --- /dev/null +++ b/tests/core/build/test_build_report.py @@ -0,0 +1,131 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import replace_hashes + + +@buck_test() +async def test_build_report_format(buck: Buck) -> None: + await buck.build( + "//:rule1", + "//:rule2", + "--build-report", + "report", + "//:rule2[out1]", + "-c", + "buck2.log_configured_graph_size=true", + ) + with open(buck.cwd / "report") as file: + report = json.load(file) + + assert report["success"] + assert report["failures"] == {} + + results = report["results"] + + rule1 = results["root//:rule1"] + assert replace_hashes(rule1["outputs"]["DEFAULT"]) == [ + "buck-out/v2/gen/root//__rule1__/out.txt" + ] + assert rule1["other_outputs"] == {} + rule1_configured = rule1["configured"][""] + assert rule1_configured["success"] == "SUCCESS" + assert replace_hashes(rule1_configured["outputs"]["DEFAULT"]) == [ + "buck-out/v2/gen/root//__rule1__/out.txt" + ] + assert rule1_configured["other_outputs"] == {} + + assert rule1["configured_graph_size"] == 2 + assert rule1_configured["configured_graph_size"] == 2 + + rule2 = results["root//:rule2"] + assert rule2["success"] == "SUCCESS" + assert replace_hashes(rule2["outputs"]["DEFAULT"]) == [ + "buck-out/v2/gen/root//__rule2__/out1.txt" + ] + assert replace_hashes(rule2["outputs"]["out1"]) == [ + "buck-out/v2/gen/root//__rule2__/out1.txt" + ] + assert rule2["other_outputs"] == {} + + rule2_configured = rule2["configured"][""] + assert rule2_configured["success"] == "SUCCESS" + assert replace_hashes(rule2_configured["outputs"]["DEFAULT"]) == [ + "buck-out/v2/gen/root//__rule2__/out1.txt" + ] + assert replace_hashes(rule2_configured["outputs"]["out1"]) == [ + "buck-out/v2/gen/root//__rule2__/out1.txt" + ] + assert rule2_configured["other_outputs"] == {} + + assert rule2["configured_graph_size"] == 3 + assert rule2_configured["configured_graph_size"] == 3 + + +@buck_test() +async def test_build_report_format_skip_unconfigured(buck: Buck) -> None: + await buck.build( + "//:rule1", + "--build-report", + "report", + "-c", + "build_report.print_unconfigured_section=false", + ) + with open(buck.cwd / "report") as file: + report = json.load(file) + + assert report["success"] + assert report["failures"] == {} + + results = report["results"] + + rule1 = results["root//:rule1"] + assert "success" not in rule1 + assert "outputs" not in rule1 + assert "other_outputs" not in rule1 + rule1_configured = rule1["configured"][""] + assert rule1_configured["success"] == "SUCCESS" + assert replace_hashes(rule1_configured["outputs"]["DEFAULT"]) == [ + "buck-out/v2/gen/root//__rule1__/out.txt" + ] + assert rule1_configured["other_outputs"] == {} + + +@buck_test() +async def test_build_report_package_project_relative_path(buck: Buck) -> None: + await buck.build( + "//:rule1", + "//subdir:rule", + "--build-report", + "report", + ) + + with open(buck.cwd / "report") as file: + results = json.load(file)["results"] + assert "package_project_relative_path" not in results["root//:rule1"] + assert "package_project_relative_path" not in results["root//subdir:rule"] + + await buck.build( + "//:rule1", + "//subdir:rule", + "--build-report", + "report", + "--build-report-options", + "package-project-relative-paths", + ) + + with open(buck.cwd / "report") as file: + results = json.load(file)["results"] + assert results["root//:rule1"]["package_project_relative_path"] == "" + assert results["root//subdir:rule"]["package_project_relative_path"] == "subdir" diff --git a/tests/core/build/test_build_report_data/.buckconfig b/tests/core/build/test_build_report_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/build/test_build_report_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/build/test_build_report_data/.buckroot b/tests/core/build/test_build_report_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_report_data/TARGETS.fixture b/tests/core/build/test_build_report_data/TARGETS.fixture new file mode 100644 index 0000000000000..0845f03b44aa9 --- /dev/null +++ b/tests/core/build/test_build_report_data/TARGETS.fixture @@ -0,0 +1,22 @@ +load(":rules.bzl", "touch_file") + +touch_file( + name = "rule0", + out = "out.txt", +) + +touch_file( + name = "rule1", + out = "out.txt", + deps = [":rule0"], +) + +touch_file( + name = "rule2", + outs = { + "out1": "out1.txt", + "out2": "out2.txt", + }, + default_outs = ["out1.txt"], + deps = [":rule0", ":rule1"], +) diff --git a/tests/core/build/test_build_report_data/rules.bzl b/tests/core/build/test_build_report_data/rules.bzl new file mode 100644 index 0000000000000..c56bd92a7d6a9 --- /dev/null +++ b/tests/core/build/test_build_report_data/rules.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _touch_file_impl(ctx): + if ctx.attrs.out != None: + out = ctx.actions.write(ctx.attrs.out, "") + default_outputs = [out] + named_outputs = {} + elif ctx.attrs.outs != None: + default_outputs = [] + named_outputs = {} + default_out_paths = ctx.attrs.default_outs or [] + for (name, path) in ctx.attrs.outs.items(): + artifact = ctx.actions.write(path, "") + if path in default_out_paths: + default_outputs.append(artifact) + named_outputs[name] = artifact + else: + fail("One of `out` or `outs` should be set.") + providers = [DefaultInfo( + default_outputs = default_outputs, + sub_targets = {k: [DefaultInfo(default_output = v)] for (k, v) in named_outputs.items()}, + )] + return providers + +touch_file = rule( + impl = _touch_file_impl, + attrs = { + "default_outs": attrs.option(attrs.set(attrs.string(), sorted = False), default = None), + "deps": attrs.list(attrs.dep(), default = []), + "out": attrs.option(attrs.string(), default = None), + "outs": attrs.option(attrs.dict(key = attrs.string(), value = attrs.string(), sorted = False), default = None), + }, +) diff --git a/tests/core/build/test_build_report_data/subdir/TARGETS.fixture b/tests/core/build/test_build_report_data/subdir/TARGETS.fixture new file mode 100644 index 0000000000000..7c47dbd58b4f4 --- /dev/null +++ b/tests/core/build/test_build_report_data/subdir/TARGETS.fixture @@ -0,0 +1,6 @@ +load("//:rules.bzl", "touch_file") + +touch_file( + name = "rule", + out = "out.txt", +) diff --git a/tests/core/build/test_build_report_errors.py b/tests/core/build/test_build_report_errors.py new file mode 100644 index 0000000000000..01acb3837d5bd --- /dev/null +++ b/tests/core/build/test_build_report_errors.py @@ -0,0 +1,340 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +import sys +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +def _sanitize(s: str) -> str: + # Remote message hashes + s = re.sub(r"\b[0-9]{16,}\b", "", s) + # Remove configuration hashes + # This is so bad... we don't force these hashes to print as 16 + # characters... and that's hard to fix because we don't allow changes to + # change action digests. + s = re.sub(r"\b[0-9a-f]{12,16}\b", "", s) + # And action digests + return re.sub(r"\b[0-9a-f]{40}:[0-9]{1,3}\b", "", s) + + +def _sanitize_stderr(s: str) -> str: + # Remove all timestamps + s = re.sub(r"\[.{29}\]", "[]", s) + # Remove all UUIDs + s = re.sub( + r"\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b", "", s + ) + # Remove "Commands" line + s = re.sub(r"Commands: .+", "Commands: ", s) + # Remove "Cache hits" percentage + s = re.sub(r"Cache hits: .+", "Cache hits: ", s) + # Remove "Network" line + s = re.sub(r"Network: .+", "Network: ", s) + return _sanitize(s) + + +def build_report_test(name: str, command: List[str]) -> None: + async def impl(buck: Buck, tmp_path: Path) -> None: + report = tmp_path / "build-report.json" + await expect_failure( + buck.build( + "--build-report", + str(report), + "--build-report-options", + "fill-out-failures", + *command + ) + ) + with open(report) as f: + report = json.loads(f.read()) + del report["trace_id"] + del report["project_root"] + + # string cache keys can vary due to differences in platform hashes within the message, + # so do something dumb here to still be able to use golden tests on all platforms: + # + # 1. sort by sanitized values + # 2. create a new dict where the keys are 1 + a large number so that we can + # sanitize it using the message regex above + strings = dict( + sorted( + report["strings"].items(), + key=lambda item: _sanitize(item[1]), + ) + ) + updated_strings = {} + start = 10000000000000000 + for i, v in enumerate(strings.values()): + updated_strings[i + start] = v + + report["strings"] = updated_strings + + golden( + output=_sanitize(json.dumps(report, indent=2, sort_keys=True)), + rel_path="fixtures/" + name + ".golden.json", + ) + pass + + globals()[name] = impl + + return buck_test()(impl) + + +build_report_test( + "test_action_fail_one", + ["//fail_action:fail_one"], +) + +build_report_test( + "test_action_fail_two", + ["//fail_action:fail_two"], +) + +build_report_test( + "test_action_fail_shared_dep", + ["//fail_action:fail_shared_dep"], +) + +build_report_test( + "test_action_fail_shared_across_targets", + ["//fail_action:alias_a", "//fail_action:alias_b"], +) + + +def running_on_windows() -> bool: + return sys.platform == "win32" + + +def running_on_mac() -> bool: + return sys.platform == "darwin" + + +# TODO(@wendyy) - windows adds some extra characters to stdout/stderr. +# Python reports compile errors with the full path on mac as well, which +# breaks golden tests. +# Fix for both os types later. +if not running_on_windows() and not running_on_mac(): + build_report_test( + "test_action_fail_with_stdout_stderr", + ["//fail_action:fail_script"], + ) + + build_report_test( + "test_action_fail_one_with_error_handler", + ["//fail_action:fail_one_with_error_handler"], + ) + + build_report_test( + "test_action_fail_many_with_error_handler", + ["//fail_action:fail_many_with_error_handler"], + ) + + build_report_test( + "test_action_fail_one_with_error_handler_no_op", + ["//fail_action:fail_one_with_error_handler_no_op"], + ) + + @buck_test() + async def test_stderr_with_empty_error_diagnostics(buck: Buck) -> None: + result = await expect_failure( + buck.build("//fail_action:fail_one_with_error_handler_no_op") + ) + + golden( + output=_sanitize_stderr(result.stderr), + rel_path="fixtures/test_stderr_with_empty_error_diagnostics.golden.txt", + ) + + @buck_test() + async def test_stderr_with_error_diagnostics(buck: Buck) -> None: + result = await expect_failure( + buck.build("//fail_action:error_handler_produced_multiple_categories") + ) + + golden( + output=_sanitize_stderr(result.stderr), + rel_path="fixtures/test_stderr_with_error_diagnostics.golden.txt", + ) + + @buck_test() + async def test_stderr_with_no_error_diagnostics(buck: Buck) -> None: + result = await expect_failure(buck.build("//fail_action:fail_script")) + + golden( + output=_sanitize_stderr(result.stderr), + rel_path="fixtures/test_stderr_with_no_error_diagnostics.golden.txt", + ) + + @buck_test() + async def test_stderr_could_not_produce_error_diagnostics(buck: Buck) -> None: + result = await expect_failure(buck.build("//fail_action:error_handler_failed")) + + golden( + output=_sanitize_stderr(result.stderr), + rel_path="fixtures/test_stderr_could_not_produce_error_diagnostics.golden.txt", + ) + + +build_report_test( + "test_analysis_fail", + ["//fail_analysis:fail_analysis"], +) + +build_report_test( + "test_analysis_fail_multi", + ["//fail_analysis:fail_analysis", "//fail_analysis:alias"], +) + +build_report_test( + "test_analysis_and_action", + ["//fail_analysis:fail_analysis", "//fail_action:fail_one"], +) + +build_report_test( + "test_configuration_fail", + ["//fail_config:cat_only"], +) + +build_report_test( + "test_missing", + ["//missing:missing"], +) + +build_report_test( + "test_load_fail", + ["//fail_load:first", "//fail_load:second"], +) + +# This does not show up in the build report, because there's nowhere we could put the error +build_report_test( + "test_load_fail_full_package", + ["//fail_load:"], +) + +build_report_test( + "test_partially_missing", + ["//missing:missing", "//missing:available"], +) + +build_report_test( + "test_one_of_each", + [ + "//success:success", + "//missing:missing", + "//fail_load:first", + "//fail_config:cat_only", + "//fail_analysis:fail_analysis", + "//fail_action:fail_one", + ], +) + +build_report_test( + "test_no_terminal_colors", + [ + "//terminal_colors:terminal_colors", + ], +) + + +@buck_test(setup_eden=True) +async def test_two_action_dep_failures(buck: Buck, tmp_path: Path) -> None: + # When we pass `--keep-going`, we should get error reports for both dependencies of the action. + # However, we don't. Instead, we just get one error non-deterministically. This is also why we + # can't use a `build_report_test` for this test. + report = tmp_path / "build-report.json" + await expect_failure( + buck.build( + "--keep-going", "--build-report", str(report), "//fail_action:fail_two_deps" + ), + stderr_regex="Failed to build 'root//fail_action:fail_two_deps", + ) + with open(report) as f: + report = json.loads(f.read()) + errors = list( + report["results"]["root//fail_action:fail_two_deps"]["configured"].values() + )[0]["errors"] + assert len(errors) == 1 + + strings_cache = report["strings"] + error_message_index = str(errors[0]["message_content"]) + + assert strings_cache[error_message_index].startswith( + "Action failed: root//fail_action:fail_two_deps" + ) + assert "fail_two_deps" in errors[0]["action_error"]["key"]["owner"] + + +@buck_test() +async def test_error_handler_failed(buck: Buck, tmp_path: Path) -> None: + # Starlark error messages change across different modes for some reason (ex: opt-asan vs opt). + # We have a fair amount of coverage for other functionalities of error handler/build report, + # so let's just add a simple test here. + report = tmp_path / "build-report.json" + + await expect_failure( + buck.build( + "--build-report", + str(report), + "//fail_action:error_handler_failed", + ), + stderr_regex="Failed to build 'root//fail_action:error_handler_failed", + ) + + with open(report) as f: + report = f.read() + + assert "Error handler failed" in report + assert "fail: something went wrong" in report + + +@buck_test() +async def test_error_handler_wrong_return_type(buck: Buck, tmp_path: Path) -> None: + # Starlark error messages change across different modes for some reason (ex: opt-asan vs opt). + # We have a fair amount of coverage for other functionalities of error handler/build report, + # so let's just add a simple test here. + report = tmp_path / "build-report.json" + + await expect_failure( + buck.build( + "--build-report", + str(report), + "//fail_action:error_handler_wrong_return_type", + ), + stderr_regex="Failed to build 'root//fail_action:error_handler_wrong_return_type", + ) + + with open(report) as f: + report = f.read() + + assert "Error handler failed" in report + assert ( + "Expected return type `list[ActionSubError]`, got value with type `int`" + in report + ) + + +@buck_test() +async def test_missing_report_on_wrong_package(buck: Buck, tmp_path: Path) -> None: + # If we specify a non-existent package, we don't get an error report + report = tmp_path / "build-report.json" + await expect_failure( + buck.build("--build-report", str(report), "//nopackage/..."), + stderr_regex="Error resolving recursive target pattern", + ) + if report.exists(): + raise AssertionError("Expected no report to be written") diff --git a/tests/core/build/test_build_report_errors_data/.buckconfig b/tests/core/build/test_build_report_errors_data/.buckconfig new file mode 100644 index 0000000000000..58907c0b4a100 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . diff --git a/tests/core/build/test_build_report_errors_data/.buckroot b/tests/core/build/test_build_report_errors_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_report_errors_data/config/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/config/TARGETS.fixture new file mode 100644 index 0000000000000..8503618f07746 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/config/TARGETS.fixture @@ -0,0 +1,25 @@ +load(":config_defs.bzl", "constraint_setting", "constraint_value", "platform") + +constraint_setting( + name = "animal", +) + +constraint_value( + name = "dog", + setting = ":animal", +) + +platform( + name = "dog_platform", + configuration = ":dog", +) + +constraint_value( + name = "cat", + setting = ":animal", +) + +platform( + name = "cat_platform", + configuration = ":cat", +) diff --git a/tests/core/build/test_build_report_errors_data/config/config_defs.bzl b/tests/core/build/test_build_report_errors_data/config/config_defs.bzl new file mode 100644 index 0000000000000..ed69ff37f09eb --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/config/config_defs.bzl @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _constraint_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +constraint_setting = rule( + impl = _constraint_setting, + attrs = {}, +) + +def _constraint_value(ctx): + constraint_value = ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + return [ + DefaultInfo(), + constraint_value, + # Provide `ConfigurationInfo` from `constraint_value` so it could be used as select key. + ConfigurationInfo(constraints = { + constraint_value.setting.label: constraint_value, + }, values = {}), + ] + +constraint_value = rule( + impl = _constraint_value, + attrs = {"setting": attrs.dep(providers = [ConstraintSettingInfo])}, +) + +def _platform(ctx): + cfg = ConfigurationInfo(constraints = ctx.attrs.configuration[ConfigurationInfo].constraints, values = {}) + + return [DefaultInfo(), PlatformInfo(label = str(ctx.label.raw_target()), configuration = cfg)] + +platform = rule( + impl = _platform, + attrs = {"configuration": attrs.dep(providers = [ConfigurationInfo])}, +) diff --git a/tests/core/build/test_build_report_errors_data/fail_action/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/fail_action/TARGETS.fixture new file mode 100644 index 0000000000000..a684b9cbbe62a --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_action/TARGETS.fixture @@ -0,0 +1,63 @@ +load(":fail_action.bzl", "action_alias", "fail_one", "fail_script", "fail_shared_dep", "fail_two", "fail_two_deps") +load(":fail_action_with_error_handler.bzl", "error_handler_failed", "error_handler_produced_multiple_categories", "error_handler_wrong_return_type", "fail_many_with_error_handler", "fail_one_with_error_handler", "fail_one_with_error_handler_no_op") + +fail_one( + name = "fail_one", + default_target_platform = "//config:dog_platform", +) + +fail_two( + name = "fail_two", + default_target_platform = "//config:dog_platform", +) + +fail_shared_dep( + name = "fail_shared_dep", + default_target_platform = "//config:dog_platform", +) + +action_alias( + name = "alias_a", + actual = ":fail_one", + default_target_platform = "//config:dog_platform", +) + +action_alias( + name = "alias_b", + actual = ":fail_one", + default_target_platform = "//config:dog_platform", +) + +fail_two_deps( + name = "fail_two_deps", +) + +fail_script( + name = "fail_script", +) + +fail_one_with_error_handler( + name = "fail_one_with_error_handler", + src = "fail_compile.py", +) + +fail_many_with_error_handler( + name = "fail_many_with_error_handler", + srcs = ["fail_compile.py", "fail_indent.py"], +) + +fail_one_with_error_handler_no_op( + name = "fail_one_with_error_handler_no_op", +) + +error_handler_failed( + name = "error_handler_failed", +) + +error_handler_wrong_return_type( + name = "error_handler_wrong_return_type", +) + +error_handler_produced_multiple_categories( + name = "error_handler_produced_multiple_categories", +) diff --git a/tests/core/build/test_build_report_errors_data/fail_action/fail_action.bzl b/tests/core/build/test_build_report_errors_data/fail_action/fail_action.bzl new file mode 100644 index 0000000000000..4cb84a1c42c89 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_action/fail_action.bzl @@ -0,0 +1,93 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _make_failing_action(ctx, name): + out = ctx.actions.declare_output(name) + ctx.actions.run(cmd_args("false", hidden = out.as_output()), category = "fail" + name) + return out + +def _action_alias(ctx, actual, name): + out = ctx.actions.declare_output(name) + ctx.actions.run( + cmd_args("doesntmatter", hidden = [out.as_output(), actual]), + category = "alias" + name, + ) + return out + +def _fail_one_impl(ctx): + return [DefaultInfo(default_outputs = [_make_failing_action(ctx, "name")])] + +def _fail_two_impl(ctx): + return [DefaultInfo(default_outputs = [ + _make_failing_action(ctx, "name_a"), + _make_failing_action(ctx, "name_b"), + ])] + +def _fail_shared_dep_impl(ctx): + fail = _make_failing_action(ctx, "fail") + a = _action_alias(ctx, fail, "a") + b = _action_alias(ctx, fail, "b") + return [DefaultInfo(default_outputs = [a, b])] + +def _action_alias_impl(ctx): + actual = ctx.attrs.actual[DefaultInfo].default_outputs[0] + aliased = _action_alias(ctx, actual, "aliased") + return [DefaultInfo(default_outputs = [aliased])] + +def _fail_two_deps_impl(ctx): + first = _make_failing_action(ctx, "first") + second = _make_failing_action(ctx, "second") + out = ctx.actions.declare_output("out") + ctx.actions.run( + cmd_args("doesntmatter", hidden = [first, second, out.as_output()]), + category = "out", + ) + return [DefaultInfo(default_outputs = [out])] + +def _fail_script_impl(ctx): + out = ctx.actions.declare_output("fail_script") + ctx.actions.run( + [ + "python3", + "-c", + "import sys; print('Some random stdout', file=sys.stdout); print('Some random stderr', file=sys.stderr); sys.exit(1)", + out.as_output(), + ], + category = "fail_script", + ) + + return [DefaultInfo(default_outputs = [out])] + +fail_one = rule( + impl = _fail_one_impl, + attrs = {}, +) + +fail_two = rule( + impl = _fail_two_impl, + attrs = {}, +) + +fail_shared_dep = rule( + impl = _fail_shared_dep_impl, + attrs = {}, +) + +action_alias = rule( + impl = _action_alias_impl, + attrs = {"actual": attrs.dep()}, +) + +fail_two_deps = rule( + impl = _fail_two_deps_impl, + attrs = {}, +) + +fail_script = rule( + impl = _fail_script_impl, + attrs = {}, +) diff --git a/tests/core/build/test_build_report_errors_data/fail_action/fail_action_with_error_handler.bzl b/tests/core/build/test_build_report_errors_data/fail_action/fail_action_with_error_handler.bzl new file mode 100644 index 0000000000000..f65ac89100112 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_action/fail_action_with_error_handler.bzl @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _error_handler_impl(ctx: ActionErrorCtx) -> list[ActionSubError]: + indentation_error = regex(r"IndentationError") + syntax_error = regex(r"SyntaxError") + categories = [] + + if indentation_error.match(ctx.stderr): + categories.append(ctx.new_sub_error( + category = "indentation", + message = "Indentation error!", + )) + + if syntax_error.match(ctx.stderr): + categories.append(ctx.new_sub_error( + category = "syntax", + message = "Syntax error!", + locations = [ + # Using regex to find the file is a pain, but let's at least show + # that `location` is emitted to the build report as expected + ctx.new_error_location(file = "not_really_the_right_file", line = 1), + ], + )) + + return categories + +def _make_failing_action(ctx, src, name): + out = ctx.actions.declare_output(src.short_path) + ctx.actions.run( + [ + "python3", + src, + out.as_output(), + ], + local_only = True, + category = name, + error_handler = _error_handler_impl, + ) + + return out + +def _fail_one(ctx): + return [DefaultInfo(default_outputs = [_make_failing_action(ctx, ctx.attrs.src, ctx.attrs.name)])] + +def _fail_many(ctx): + return [DefaultInfo(default_outputs = [_make_failing_action(ctx, src, ctx.attrs.name + str(i)) for (i, src) in enumerate(ctx.attrs.srcs)])] + +def _make_failing_action_no_source(ctx, error_handler): + out = ctx.actions.declare_output(ctx.attrs.name) + + # error handler is invoked but won't catch anything + ctx.actions.run( + cmd_args("false", hidden = out.as_output()), + category = ctx.attrs.name, + error_handler = error_handler, + ) + + return out + +def _fail_one_no_op(ctx): + # error handler is invoked but won't catch anything + return [DefaultInfo(default_outputs = [_make_failing_action_no_source(ctx, _error_handler_impl)])] + +def _error_handler_failed(ctx): + def f(_ctx) -> list[ActionSubError]: + fail("something went wrong") + + return [DefaultInfo(default_outputs = [_make_failing_action_no_source(ctx, f)])] + +def _error_handler_wrong_return_type(ctx): + def f(_ctx) -> int: + return 1 + + return [DefaultInfo(default_outputs = [_make_failing_action_no_source(ctx, f)])] + +def _error_handler_produced_multiple_categories(ctx): + def error_handler(ctx: ActionErrorCtx) -> list[ActionSubError]: + categories = [] + + categories.append(ctx.new_sub_error( + category = "category1", + message = "Message for category1", + )) + + categories.append(ctx.new_sub_error( + category = "category2", + message = "Message for category2", + )) + + return categories + + out = ctx.actions.declare_output(ctx.attrs.name) + + # error handler is invoked but won't catch anything + ctx.actions.run( + cmd_args("false", hidden = out.as_output()), + category = ctx.attrs.name, + error_handler = error_handler, + ) + + return [DefaultInfo(default_outputs = [out])] + +fail_one_with_error_handler = rule( + impl = _fail_one, + attrs = { + "src": attrs.source(), + }, +) + +fail_many_with_error_handler = rule( + impl = _fail_many, + attrs = { + "srcs": attrs.list(attrs.source()), + }, +) + +fail_one_with_error_handler_no_op = rule( + impl = _fail_one_no_op, + attrs = { + }, +) + +error_handler_failed = rule( + impl = _error_handler_failed, + attrs = { + }, +) + +error_handler_wrong_return_type = rule( + impl = _error_handler_wrong_return_type, + attrs = { + }, +) + +error_handler_produced_multiple_categories = rule( + impl = _error_handler_produced_multiple_categories, + attrs = { + }, +) diff --git a/tests/core/build/test_build_report_errors_data/fail_action/fail_compile.py b/tests/core/build/test_build_report_errors_data/fail_action/fail_compile.py new file mode 100644 index 0000000000000..e696fc2e5764f --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_action/fail_compile.py @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint +does not compile diff --git a/tests/core/build/test_build_report_errors_data/fail_action/fail_indent.py b/tests/core/build/test_build_report_errors_data/fail_action/fail_indent.py new file mode 100644 index 0000000000000..ae5e754a79e6a --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_action/fail_indent.py @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint +i = 1 + j = 2 diff --git a/tests/core/build/test_build_report_errors_data/fail_analysis/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/fail_analysis/TARGETS.fixture new file mode 100644 index 0000000000000..0c4386b138800 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_analysis/TARGETS.fixture @@ -0,0 +1,11 @@ +load("//:utils.bzl", "alias") +load(":fail_analysis.bzl", "fail_analysis") + +fail_analysis( + name = "fail_analysis", +) + +alias( + name = "alias", + actual = ":fail_analysis", +) diff --git a/tests/core/build/test_build_report_errors_data/fail_analysis/fail_analysis.bzl b/tests/core/build/test_build_report_errors_data/fail_analysis/fail_analysis.bzl new file mode 100644 index 0000000000000..183dfc9a271a5 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_analysis/fail_analysis.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + fail("Failure during analysis") + +fail_analysis = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/build/test_build_report_errors_data/fail_config/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/fail_config/TARGETS.fixture new file mode 100644 index 0000000000000..9c1c30cf8d673 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_config/TARGETS.fixture @@ -0,0 +1,9 @@ +load(":fail_config.bzl", "string_rule") + +string_rule( + name = "cat_only", + str = select({ + "//config:cat": "s", + }), + default_target_platform = "//config:not_a_target_platform", +) diff --git a/tests/core/build/test_build_report_errors_data/fail_config/fail_config.bzl b/tests/core/build/test_build_report_errors_data/fail_config/fail_config.bzl new file mode 100644 index 0000000000000..245e06ddca6e4 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_config/fail_config.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + fail("Not invoked") + +string_rule = rule( + impl = _impl, + attrs = {"str": attrs.string()}, +) diff --git a/tests/core/build/test_build_report_errors_data/fail_load/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/fail_load/TARGETS.fixture new file mode 100644 index 0000000000000..ae923a8707d23 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fail_load/TARGETS.fixture @@ -0,0 +1 @@ +fail("Non-specific load failure") diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_many_with_error_handler.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_many_with_error_handler.golden.json new file mode 100644 index 0000000000000..0cfab92cf48e3 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_many_with_error_handler.golden.json @@ -0,0 +1,95 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_many_with_error_handler": "Action failed: root//fail_action:fail_many_with_error_handler () (fail_many_with_error_handler1)\nLocal command returned non-zero exit code 1\nLocal command: env -- 'BUCK_SCRATCH_PATH=buck-out/v2/tmp/root//fail_many_with_error_handler1' python3 fail_action/fail_indent.py buck-out/v2/gen/root//fail_action/__fail_many_with_error_handler__/fail_indent.py\nStdout: \nStderr:\n File \"fail_action/fail_indent.py\", line 10\n j = 2\n ^\nIndentationError: unexpected indent\n\nAction sub-errors produced by error handlers:\n- [indentation] Indentation error!\n" + }, + "results": { + "root//fail_action:fail_many_with_error_handler": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": { + "sub_errors": [ + { + "category": "syntax", + "locations": [ + { + "file": "not_really_the_right_file", + "line": 1 + } + ], + "message_content": "" + } + ] + }, + "key": { + "owner": "root//fail_action:fail_many_with_error_handler ()" + }, + "name": { + "category": "fail_many_with_error_handler0", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + }, + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": { + "sub_errors": [ + { + "category": "indentation", + "locations": null, + "message_content": "" + } + ] + }, + "key": { + "owner": "root//fail_action:fail_many_with_error_handler ()" + }, + "name": { + "category": "fail_many_with_error_handler1", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 1, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": " File \"fail_action/fail_compile.py\", line 9\n does not compile\n ^\nSyntaxError: invalid syntax\n", + "": " File \"fail_action/fail_indent.py\", line 10\n j = 2\n ^\nIndentationError: unexpected indent\n", + "": "Action failed: root//fail_action:fail_many_with_error_handler () (fail_many_with_error_handler0)\nLocal command returned non-zero exit code 1\nLocal command: env -- 'BUCK_SCRATCH_PATH=buck-out/v2/tmp/root//fail_many_with_error_handler0' python3 fail_action/fail_compile.py buck-out/v2/gen/root//fail_action/__fail_many_with_error_handler__/fail_compile.py\nStdout: \nStderr:\n File \"fail_action/fail_compile.py\", line 9\n does not compile\n ^\nSyntaxError: invalid syntax\n\nAction sub-errors produced by error handlers:\n- [syntax] Syntax error!\n", + "": "Action failed: root//fail_action:fail_many_with_error_handler () (fail_many_with_error_handler1)\nLocal command returned non-zero exit code 1\nLocal command: env -- 'BUCK_SCRATCH_PATH=buck-out/v2/tmp/root//fail_many_with_error_handler1' python3 fail_action/fail_indent.py buck-out/v2/gen/root//fail_action/__fail_many_with_error_handler__/fail_indent.py\nStdout: \nStderr:\n File \"fail_action/fail_indent.py\", line 10\n j = 2\n ^\nIndentationError: unexpected indent\n\nAction sub-errors produced by error handlers:\n- [indentation] Indentation error!\n", + "": "Indentation error!", + "": "Local command returned non-zero exit code 1", + "": "Syntax error!" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one.golden.json new file mode 100644 index 0000000000000..b0021076c9f0b --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one.golden.json @@ -0,0 +1,51 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_one": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n" + }, + "results": { + "root//fail_action:fail_one": { + "configured": { + "root//config:dog_platform#": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_one (root//config:dog_platform#)" + }, + "name": { + "category": "failname", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "": "Remote command returned non-zero exit code 1" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one_with_error_handler.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one_with_error_handler.golden.json new file mode 100644 index 0000000000000..1def60eb551ea --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one_with_error_handler.golden.json @@ -0,0 +1,66 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_one_with_error_handler": "Action failed: root//fail_action:fail_one_with_error_handler () (fail_one_with_error_handler)\nLocal command returned non-zero exit code 1\nLocal command: env -- 'BUCK_SCRATCH_PATH=buck-out/v2/tmp/root//fail_one_with_error_handler' python3 fail_action/fail_compile.py buck-out/v2/gen/root//fail_action/__fail_one_with_error_handler__/fail_compile.py\nStdout: \nStderr:\n File \"fail_action/fail_compile.py\", line 9\n does not compile\n ^\nSyntaxError: invalid syntax\n\nAction sub-errors produced by error handlers:\n- [syntax] Syntax error!\n" + }, + "results": { + "root//fail_action:fail_one_with_error_handler": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": { + "sub_errors": [ + { + "category": "syntax", + "locations": [ + { + "file": "not_really_the_right_file", + "line": 1 + } + ], + "message_content": "" + } + ] + }, + "key": { + "owner": "root//fail_action:fail_one_with_error_handler ()" + }, + "name": { + "category": "fail_one_with_error_handler", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": " File \"fail_action/fail_compile.py\", line 9\n does not compile\n ^\nSyntaxError: invalid syntax\n", + "": "Action failed: root//fail_action:fail_one_with_error_handler () (fail_one_with_error_handler)\nLocal command returned non-zero exit code 1\nLocal command: env -- 'BUCK_SCRATCH_PATH=buck-out/v2/tmp/root//fail_one_with_error_handler' python3 fail_action/fail_compile.py buck-out/v2/gen/root//fail_action/__fail_one_with_error_handler__/fail_compile.py\nStdout: \nStderr:\n File \"fail_action/fail_compile.py\", line 9\n does not compile\n ^\nSyntaxError: invalid syntax\n\nAction sub-errors produced by error handlers:\n- [syntax] Syntax error!\n", + "": "Local command returned non-zero exit code 1", + "": "Syntax error!" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one_with_error_handler_no_op.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one_with_error_handler_no_op.golden.json new file mode 100644 index 0000000000000..cc32d3fa93154 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_one_with_error_handler_no_op.golden.json @@ -0,0 +1,53 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_one_with_error_handler_no_op": "Action failed: root//fail_action:fail_one_with_error_handler_no_op () (fail_one_with_error_handler_no_op)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n\nAction sub-errors produced by error handlers: \n" + }, + "results": { + "root//fail_action:fail_one_with_error_handler_no_op": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": { + "sub_errors": [] + }, + "key": { + "owner": "root//fail_action:fail_one_with_error_handler_no_op ()" + }, + "name": { + "category": "fail_one_with_error_handler_no_op", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": "Action failed: root//fail_action:fail_one_with_error_handler_no_op () (fail_one_with_error_handler_no_op)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n\nAction sub-errors produced by error handlers: \n", + "": "Remote command returned non-zero exit code 1" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_shared_across_targets.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_shared_across_targets.golden.json new file mode 100644 index 0000000000000..c29dc2272e5cd --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_shared_across_targets.golden.json @@ -0,0 +1,87 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:alias_a": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "root//fail_action:alias_b": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n" + }, + "results": { + "root//fail_action:alias_a": { + "configured": { + "root//config:dog_platform#": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_one (root//config:dog_platform#)" + }, + "name": { + "category": "failname", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//fail_action:alias_b": { + "configured": { + "root//config:dog_platform#": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_one (root//config:dog_platform#)" + }, + "name": { + "category": "failname", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "": "Remote command returned non-zero exit code 1" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_shared_dep.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_shared_dep.golden.json new file mode 100644 index 0000000000000..26abf640146fe --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_shared_dep.golden.json @@ -0,0 +1,51 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_shared_dep": "Action failed: root//fail_action:fail_shared_dep (root//config:dog_platform#) (failfail)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n" + }, + "results": { + "root//fail_action:fail_shared_dep": { + "configured": { + "root//config:dog_platform#": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_shared_dep (root//config:dog_platform#)" + }, + "name": { + "category": "failfail", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": "Action failed: root//fail_action:fail_shared_dep (root//config:dog_platform#) (failfail)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "": "Remote command returned non-zero exit code 1" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_two.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_two.golden.json new file mode 100644 index 0000000000000..7bce975bc6c17 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_two.golden.json @@ -0,0 +1,70 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_two": "Action failed: root//fail_action:fail_two (root//config:dog_platform#) (failname_b)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n" + }, + "results": { + "root//fail_action:fail_two": { + "configured": { + "root//config:dog_platform#": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_two (root//config:dog_platform#)" + }, + "name": { + "category": "failname_a", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + }, + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_two (root//config:dog_platform#)" + }, + "name": { + "category": "failname_b", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 1, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": "Action failed: root//fail_action:fail_two (root//config:dog_platform#) (failname_a)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "": "Action failed: root//fail_action:fail_two (root//config:dog_platform#) (failname_b)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "": "Remote command returned non-zero exit code 1" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_with_stdout_stderr.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_with_stdout_stderr.golden.json new file mode 100644 index 0000000000000..6992defde968e --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_action_fail_with_stdout_stderr.golden.json @@ -0,0 +1,52 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_script": "Action failed: root//fail_action:fail_script () (fail_script)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout:\nSome random stdout\nStderr:\nSome random stderr\n" + }, + "results": { + "root//fail_action:fail_script": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_script ()" + }, + "name": { + "category": "fail_script", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "Action failed: root//fail_action:fail_script () (fail_script)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout:\nSome random stdout\nStderr:\nSome random stderr\n", + "": "Remote command returned non-zero exit code 1", + "": "Some random stderr\n", + "": "Some random stdout\n" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_and_action.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_and_action.golden.json new file mode 100644 index 0000000000000..78965ffcd0b15 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_and_action.golden.json @@ -0,0 +1,75 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_one": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "root//fail_analysis:fail_analysis": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n " + }, + "results": { + "root//fail_action:fail_one": { + "configured": { + "root//config:dog_platform#": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_one (root//config:dog_platform#)" + }, + "name": { + "category": "failname", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//fail_analysis:fail_analysis": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 1, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "": "Remote command returned non-zero exit code 1" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_fail.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_fail.golden.json new file mode 100644 index 0000000000000..7f07671ea6141 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_fail.golden.json @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_analysis:fail_analysis": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n " + }, + "results": { + "root//fail_analysis:fail_analysis": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n " + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_fail_multi.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_fail_multi.golden.json new file mode 100644 index 0000000000000..d5b2fbce758cf --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_analysis_fail_multi.golden.json @@ -0,0 +1,60 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_analysis:alias": "Error running analysis for `root//fail_analysis:alias ()`\n\nCaused by:\n 0: Error running analysis for `root//fail_analysis:fail_analysis ()`\n 1: Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "root//fail_analysis:fail_analysis": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n " + }, + "results": { + "root//fail_analysis:alias": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//fail_analysis:fail_analysis": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "Error running analysis for `root//fail_analysis:alias ()`\n\nCaused by:\n 0: Error running analysis for `root//fail_analysis:fail_analysis ()`\n 1: Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n " + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_configuration_fail.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_configuration_fail.golden.json new file mode 100644 index 0000000000000..141ca8d4a48aa --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_configuration_fail.golden.json @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_config:cat_only": "Error running analysis for `root//config:not_a_target_platform ()`\n\nCaused by:\n 0: Error looking up configured node root//config:not_a_target_platform ()\n 1: looking up unconfigured target node `root//config:not_a_target_platform`\n 2: Unknown target `not_a_target_platform` from package `root//config`.\n Did you mean one of the 5 targets in root//config:TARGETS.fixture?" + }, + "results": { + "root//fail_config:cat_only": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "Error running analysis for `root//config:not_a_target_platform ()`\n\nCaused by:\n 0: Error looking up configured node root//config:not_a_target_platform ()\n 1: looking up unconfigured target node `root//config:not_a_target_platform`\n 2: Unknown target `not_a_target_platform` from package `root//config`.\n Did you mean one of the 5 targets in root//config:TARGETS.fixture?" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_load_fail.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_load_fail.golden.json new file mode 100644 index 0000000000000..e47f69a46f705 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_load_fail.golden.json @@ -0,0 +1,43 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_load:first": "Error evaluating build file: `root//fail_load:TARGETS.fixture`\n\nCaused by:\n Traceback (most recent call last):\n * fail_load/TARGETS.fixture:1, in \n fail(\"Non-specific load failure\")\n error: fail: Non-specific load failure\n --> fail_load/TARGETS.fixture:1:1\n |\n 1 | fail(\"Non-specific load failure\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "root//fail_load:second": "Error evaluating build file: `root//fail_load:TARGETS.fixture`\n\nCaused by:\n Traceback (most recent call last):\n * fail_load/TARGETS.fixture:1, in \n fail(\"Non-specific load failure\")\n error: fail: Non-specific load failure\n --> fail_load/TARGETS.fixture:1:1\n |\n 1 | fail(\"Non-specific load failure\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n " + }, + "results": { + "root//fail_load:first": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//fail_load:second": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "Error evaluating build file: `root//fail_load:TARGETS.fixture`\n\nCaused by:\n Traceback (most recent call last):\n * fail_load/TARGETS.fixture:1, in \n fail(\"Non-specific load failure\")\n error: fail: Non-specific load failure\n --> fail_load/TARGETS.fixture:1:1\n |\n 1 | fail(\"Non-specific load failure\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n " + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_load_fail_full_package.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_load_fail_full_package.golden.json new file mode 100644 index 0000000000000..c959812fbd5f7 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_load_fail_full_package.golden.json @@ -0,0 +1,9 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": {}, + "results": {}, + "strings": {}, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_missing.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_missing.golden.json new file mode 100644 index 0000000000000..729b1c6679eb9 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_missing.golden.json @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//missing:missing": "Unknown target `missing` from package `root//missing`.\nDid you mean one of the 1 targets in root//missing:TARGETS.fixture?" + }, + "results": { + "root//missing:missing": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "Unknown target `missing` from package `root//missing`.\nDid you mean one of the 1 targets in root//missing:TARGETS.fixture?" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_no_terminal_colors.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_no_terminal_colors.golden.json new file mode 100644 index 0000000000000..6532604a6acfa --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_no_terminal_colors.golden.json @@ -0,0 +1,52 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//terminal_colors:terminal_colors": "Action failed: root//terminal_colors:terminal_colors () (run)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout:\nHello\nStderr: \n" + }, + "results": { + "root//terminal_colors:terminal_colors": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//terminal_colors:terminal_colors ()" + }, + "name": { + "category": "run", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "", + "": "\u001b[31mHello\u001b[0m\n", + "": "Action failed: root//terminal_colors:terminal_colors () (run)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout:\nHello\nStderr: \n", + "": "Remote command returned non-zero exit code 1" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_one_of_each.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_one_of_each.golden.json new file mode 100644 index 0000000000000..b07cd8735227c --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_one_of_each.golden.json @@ -0,0 +1,139 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//fail_action:fail_one": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "root//fail_analysis:fail_analysis": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "root//fail_config:cat_only": "Error running analysis for `root//config:not_a_target_platform ()`\n\nCaused by:\n 0: Error looking up configured node root//config:not_a_target_platform ()\n 1: looking up unconfigured target node `root//config:not_a_target_platform`\n 2: Unknown target `not_a_target_platform` from package `root//config`.\n Did you mean one of the 5 targets in root//config:TARGETS.fixture?", + "root//fail_load:first": "Error evaluating build file: `root//fail_load:TARGETS.fixture`\n\nCaused by:\n Traceback (most recent call last):\n * fail_load/TARGETS.fixture:1, in \n fail(\"Non-specific load failure\")\n error: fail: Non-specific load failure\n --> fail_load/TARGETS.fixture:1:1\n |\n 1 | fail(\"Non-specific load failure\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "root//missing:missing": "Unknown target `missing` from package `root//missing`.\nDid you mean one of the 1 targets in root//missing:TARGETS.fixture?" + }, + "results": { + "root//fail_action:fail_one": { + "configured": { + "root//config:dog_platform#": { + "configured_graph_size": null, + "errors": [ + { + "action_error": { + "digest": "", + "error_content": "", + "error_diagnostics": null, + "key": { + "owner": "root//fail_action:fail_one (root//config:dog_platform#)" + }, + "name": { + "category": "failname", + "identifier": "" + }, + "stderr_content": "", + "stdout_content": "" + }, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//fail_analysis:fail_analysis": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 1, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//fail_config:cat_only": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 2, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//fail_load:first": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 3, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//missing:missing": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 4, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//success:success": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "SUCCESS" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "SUCCESS" + } + }, + "strings": { + "": "", + "": "Action failed: root//fail_action:fail_one (root//config:dog_platform#) (failname)\nRemote command returned non-zero exit code 1\nRemote action, reproduce with: `frecli cas download-action `\nStdout: \nStderr: \n", + "": "Error evaluating build file: `root//fail_load:TARGETS.fixture`\n\nCaused by:\n Traceback (most recent call last):\n * fail_load/TARGETS.fixture:1, in \n fail(\"Non-specific load failure\")\n error: fail: Non-specific load failure\n --> fail_load/TARGETS.fixture:1:1\n |\n 1 | fail(\"Non-specific load failure\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "": "Error running analysis for `root//config:not_a_target_platform ()`\n\nCaused by:\n 0: Error looking up configured node root//config:not_a_target_platform ()\n 1: looking up unconfigured target node `root//config:not_a_target_platform`\n 2: Unknown target `not_a_target_platform` from package `root//config`.\n Did you mean one of the 5 targets in root//config:TARGETS.fixture?", + "": "Error running analysis for `root//fail_analysis:fail_analysis ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "": "Remote command returned non-zero exit code 1", + "": "Unknown target `missing` from package `root//missing`.\nDid you mean one of the 1 targets in root//missing:TARGETS.fixture?" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_partially_missing.golden.json b/tests/core/build/test_build_report_errors_data/fixtures/test_partially_missing.golden.json new file mode 100644 index 0000000000000..160605f7f7dc0 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_partially_missing.golden.json @@ -0,0 +1,52 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//missing:available": "Error running analysis for `root//missing:available ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "root//missing:missing": "Unknown target `missing` from package `root//missing`.\nDid you mean one of the 1 targets in root//missing:TARGETS.fixture?" + }, + "results": { + "root//missing:available": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//missing:missing": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 1, + "message_content": "" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "": "Error running analysis for `root//missing:available ()`\n\nCaused by:\n Traceback (most recent call last):\n File , in \n * fail_analysis/fail_analysis.bzl:9, in _impl\n fail(\"Failure during analysis\")\n error: fail: Failure during analysis\n --> fail_analysis/fail_analysis.bzl:9:5\n |\n 9 | fail(\"Failure during analysis\")\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n ", + "": "Unknown target `missing` from package `root//missing`.\nDid you mean one of the 1 targets in root//missing:TARGETS.fixture?" + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_could_not_produce_error_diagnostics.golden.txt b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_could_not_produce_error_diagnostics.golden.txt new file mode 100644 index 0000000000000..40f472d008a91 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_could_not_produce_error_diagnostics.golden.txt @@ -0,0 +1,52 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[] Starting new buck2 daemon... +[] Connected to new buck2 daemon. +[] Buck UI: https://www.internalfb.com/buck2/ +[] Watchman fresh instance: cleared graph state +[] RE Session: reSessionID- +[] Action failed: root//fail_action:error_handler_failed () (error_handler_failed) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +[] Stderr: +[] +[] Could not produce error diagnostics: +Error handler failed: Traceback (most recent call last): + File , in + * fail_action/fail_action_with_error_handler.bzl:71, in f + fail("something went wrong") +error: fail: something went wrong + --> fail_action/fail_action_with_error_handler.bzl:71:9 + | +71 | fail("something went wrong") + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + +[] Cache hits: +[] Commands: +[] Network: +[] +[] BUILD ERRORS (1) +[] The following actions failed during the execution of this command: +[] Action failed: root//fail_action:error_handler_failed () (error_handler_failed) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +[] Stderr: +[] +[] Could not produce error diagnostics: +Error handler failed: Traceback (most recent call last): + File , in + * fail_action/fail_action_with_error_handler.bzl:71, in f + fail("something went wrong") +error: fail: something went wrong + --> fail_action/fail_action_with_error_handler.bzl:71:9 + | +71 | fail("something went wrong") + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + +[] +BUILD FAILED +Failed to build 'root//fail_action:error_handler_failed ()' diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_empty_error_diagnostics.golden.txt b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_empty_error_diagnostics.golden.txt new file mode 100644 index 0000000000000..b2efa28629c0c --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_empty_error_diagnostics.golden.txt @@ -0,0 +1,32 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[] Starting new buck2 daemon... +[] Connected to new buck2 daemon. +[] Buck UI: https://www.internalfb.com/buck2/ +[] Watchman fresh instance: cleared graph state +[] RE Session: reSessionID- +[] Action failed: root//fail_action:fail_one_with_error_handler_no_op () (fail_one_with_error_handler_no_op) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +[] Stderr: +[] +[] Action sub-errors produced by error handlers: + +[] Cache hits: +[] Commands: +[] Network: +[] +[] BUILD ERRORS (1) +[] The following actions failed during the execution of this command: +[] Action failed: root//fail_action:fail_one_with_error_handler_no_op () (fail_one_with_error_handler_no_op) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +[] Stderr: +[] +[] Action sub-errors produced by error handlers: + +[] +BUILD FAILED +Failed to build 'root//fail_action:fail_one_with_error_handler_no_op ()' diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_error_diagnostics.golden.txt b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_error_diagnostics.golden.txt new file mode 100644 index 0000000000000..18e58d1f0964b --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_error_diagnostics.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[] Starting new buck2 daemon... +[] Connected to new buck2 daemon. +[] Buck UI: https://www.internalfb.com/buck2/ +[] Watchman fresh instance: cleared graph state +[] RE Session: reSessionID- +[] Action failed: root//fail_action:error_handler_produced_multiple_categories () (error_handler_produced_multiple_categories) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +[] Stderr: +[] +[] Action sub-errors produced by error handlers: +- [category1] Message for category1 +- [category2] Message for category2 + +[] Cache hits: +[] Commands: +[] Network: +[] +[] BUILD ERRORS (1) +[] The following actions failed during the execution of this command: +[] Action failed: root//fail_action:error_handler_produced_multiple_categories () (error_handler_produced_multiple_categories) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +[] Stderr: +[] +[] Action sub-errors produced by error handlers: +- [category1] Message for category1 +- [category2] Message for category2 + +[] +BUILD FAILED +Failed to build 'root//fail_action:error_handler_produced_multiple_categories ()' diff --git a/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_no_error_diagnostics.golden.txt b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_no_error_diagnostics.golden.txt new file mode 100644 index 0000000000000..3152f5a7e1b69 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/fixtures/test_stderr_with_no_error_diagnostics.golden.txt @@ -0,0 +1,32 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[] Starting new buck2 daemon... +[] Connected to new buck2 daemon. +[] Buck UI: https://www.internalfb.com/buck2/ +[] Watchman fresh instance: cleared graph state +[] RE Session: reSessionID- +[] Action failed: root//fail_action:fail_script () (fail_script) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +Some random stdout +[] Stderr: +Some random stderr + +[] Cache hits: +[] Commands: +[] Network: +[] +[] BUILD ERRORS (1) +[] The following actions failed during the execution of this command: +[] Action failed: root//fail_action:fail_script () (fail_script) +[] Remote command returned non-zero exit code 1 +[] Remote action, reproduce with: `frecli cas download-action ` +[] Stdout: +Some random stdout +[] Stderr: +Some random stderr + +[] +BUILD FAILED +Failed to build 'root//fail_action:fail_script ()' diff --git a/tests/core/build/test_build_report_errors_data/missing/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/missing/TARGETS.fixture new file mode 100644 index 0000000000000..bbab23c7254a1 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/missing/TARGETS.fixture @@ -0,0 +1,5 @@ +load("//fail_analysis:fail_analysis.bzl", "fail_analysis") + +fail_analysis( + name = "available", +) diff --git a/tests/core/build/test_build_report_errors_data/success/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/success/TARGETS.fixture new file mode 100644 index 0000000000000..4e1d98130355b --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/success/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":success.bzl", "nop") + +nop( + name = "success", +) diff --git a/tests/core/build/test_build_report_errors_data/success/success.bzl b/tests/core/build/test_build_report_errors_data/success/success.bzl new file mode 100644 index 0000000000000..efc30bc8806f5 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/success/success.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +nop = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/build/test_build_report_errors_data/terminal_colors/TARGETS.fixture b/tests/core/build/test_build_report_errors_data/terminal_colors/TARGETS.fixture new file mode 100644 index 0000000000000..e68e6a94bda7a --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/terminal_colors/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":terminal_colors.bzl", "terminal_colors") + +terminal_colors( + name = "terminal_colors", +) diff --git a/tests/core/build/test_build_report_errors_data/terminal_colors/terminal_colors.bzl b/tests/core/build/test_build_report_errors_data/terminal_colors/terminal_colors.bzl new file mode 100644 index 0000000000000..a6e4529919b39 --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/terminal_colors/terminal_colors.bzl @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.run( + cmd_args( + ["bash", "-c", 'echo -e "\\033[31mHello\\033[0m" && false'], + hidden = out.as_output(), + ), + category = "run", + ) + return [DefaultInfo(default_output = out)] + +terminal_colors = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/build/test_build_report_errors_data/utils.bzl b/tests/core/build/test_build_report_errors_data/utils.bzl new file mode 100644 index 0000000000000..f825f87c62d7f --- /dev/null +++ b/tests/core/build/test_build_report_errors_data/utils.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _alias(ctx): + return ctx.attrs.actual.providers + +alias = rule( + impl = _alias, + attrs = { + "actual": attrs.dep(), + }, +) diff --git a/tests/core/build/test_build_response.py b/tests/core/build/test_build_response.py new file mode 100644 index 0000000000000..9c1d9875f5e01 --- /dev/null +++ b/tests/core/build/test_build_response.py @@ -0,0 +1,60 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import typing +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +async def check_targets( + buck: Buck, + expected_target_names: typing.List[str], + expected_error_messages: typing.List[str], +) -> None: + build_response = await filter_events( + buck, + "Result", + "result", + "build_response", + ) + build_response = build_response[0] + build_targets = build_response["build_targets"] + assert len(build_targets) == len(expected_target_names) + for actual, expected in zip(build_targets, expected_target_names): + if expected is not None: + assert actual["target"] == expected + error_messages = build_response["errors"] + assert len(error_messages) == len(expected_error_messages) + for actual_msg, expected in zip(error_messages, expected_error_messages): + if expected is not None: + assert expected in actual_msg["message"] + + +@buck_test() +async def test_build_one_fails(buck: Buck, tmp_path: Path) -> None: + report = tmp_path / "build-report.json" + await expect_failure( + buck.build( + "--build-report", + str(report), + "//:fail", + "//:a_one", + ), + stderr_regex="Failed to build 'root//:fail", + ) + await check_targets( + buck, + ["root//:a_one", "root//:fail"], + ["Failed to build 'root//:fail ()'"], + ) diff --git a/tests/core/build/test_build_response_data/.buckconfig b/tests/core/build/test_build_response_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/build/test_build_response_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/build/test_build_response_data/.buckroot b/tests/core/build/test_build_response_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_response_data/TARGETS.fixture b/tests/core/build/test_build_response_data/TARGETS.fixture new file mode 100644 index 0000000000000..3267ce644a153 --- /dev/null +++ b/tests/core/build/test_build_response_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "fail", "one") + +fail(name = "fail") + +one(name = "a_one") diff --git a/tests/core/build/test_build_response_data/defs.bzl b/tests/core/build/test_build_response_data/defs.bzl new file mode 100644 index 0000000000000..4a7b03cfd9ae8 --- /dev/null +++ b/tests/core/build/test_build_response_data/defs.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _fail(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(cmd_args("false", hidden = out.as_output()), category = "fail") + return [DefaultInfo(out)] + +fail = rule(attrs = {}, impl = _fail) + +def _one(ctx): + return [DefaultInfo(default_output = ctx.actions.write("out", "one"))] + +one = rule( + impl = _one, + attrs = {}, +) diff --git a/tests/core/build/test_build_root_executable.py b/tests/core/build/test_build_root_executable.py new file mode 100644 index 0000000000000..1a938f85ae3ba --- /dev/null +++ b/tests/core/build/test_build_root_executable.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +""" +Test that when we render paths relative to the repo root, we prefix them with a +`./` to ensure the OS executes the cwd-relative path and doesn't do a $PATH +lookup for them. +""" + + +@buck_test() +async def test_build_root_executable_local(buck: Buck) -> None: + await buck.build(":top", "--local-only") + + +@buck_test() +async def test_build_root_executable_remote(buck: Buck) -> None: + await buck.build(":top", "--remote-only") diff --git a/tests/core/build/test_build_root_executable_data/.buckconfig b/tests/core/build/test_build_root_executable_data/.buckconfig new file mode 100644 index 0000000000000..425a56f43b9c4 --- /dev/null +++ b/tests/core/build/test_build_root_executable_data/.buckconfig @@ -0,0 +1,6 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/build/test_build_root_executable_data/.buckroot b/tests/core/build/test_build_root_executable_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_root_executable_data/TARGETS.fixture b/tests/core/build/test_build_root_executable_data/TARGETS.fixture new file mode 100644 index 0000000000000..0f4a7328f5718 --- /dev/null +++ b/tests/core/build/test_build_root_executable_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "top") + +top(name = "top", src = "touch.bat" if host_info().os.is_windows else "touch.sh") diff --git a/tests/core/build/test_build_root_executable_data/defs.bzl b/tests/core/build/test_build_root_executable_data/defs.bzl new file mode 100644 index 0000000000000..c5c01c09432f6 --- /dev/null +++ b/tests/core/build/test_build_root_executable_data/defs.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _top(ctx): + out = ctx.actions.declare_output("out") + + ctx.actions.run( + [ctx.attrs.src, out.as_output()], + category = "test", + ) + + return [DefaultInfo(out)] + +top = rule(impl = _top, attrs = {"src": attrs.source()}) diff --git a/tests/core/build/test_build_root_executable_data/prelude.bzl b/tests/core/build/test_build_root_executable_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_root_executable_data/touch.bat b/tests/core/build/test_build_root_executable_data/touch.bat new file mode 100644 index 0000000000000..c017c8409d90f --- /dev/null +++ b/tests/core/build/test_build_root_executable_data/touch.bat @@ -0,0 +1,8 @@ +@REM Copyright (c) Meta Platforms, Inc. and affiliates. +@REM +@REM This source code is licensed under both the MIT license found in the +@REM LICENSE-MIT file in the root directory of this source tree and the Apache +@REM License, Version 2.0 found in the LICENSE-APACHE file in the root directory +@REM of this source tree. + +type nul > "%1" diff --git a/tests/core/build/test_build_root_executable_data/touch.sh b/tests/core/build/test_build_root_executable_data/touch.sh new file mode 100755 index 0000000000000..795a4fcb04d2e --- /dev/null +++ b/tests/core/build/test_build_root_executable_data/touch.sh @@ -0,0 +1,9 @@ +#!/bin/sh +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +touch "$1" diff --git a/tests/core/build/test_build_rule_type_name_logging.py b/tests/core/build/test_build_rule_type_name_logging.py new file mode 100644 index 0000000000000..a57453c35fa7d --- /dev/null +++ b/tests/core/build/test_build_rule_type_name_logging.py @@ -0,0 +1,73 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import typing + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +async def check_rule_type_names( + buck: Buck, expected_rule_type_names: typing.List[typing.Optional[str]] +) -> None: + rule_names = await filter_events( + buck, + "Result", + "result", + "build_response", + "build_targets", + ) + rule_names = rule_names[0] + assert len(rule_names) == len(expected_rule_type_names) + for actual, expected in zip(rule_names, expected_rule_type_names): + if expected is not None: + assert actual["target_rule_type_name"] == expected + + +@buck_test() +async def test_build_nested_subtargets(buck: Buck) -> None: + await buck.build( + "//:nested[sub][nested_sub]", + ) + await check_rule_type_names(buck, ["nested_subtargets"]) + + +@buck_test() +async def test_build_single_dep_touch(buck: Buck) -> None: + await buck.build( + "//:rule1", + ) + await check_rule_type_names(buck, ["one"]) + + +@buck_test() +async def test_build_two_out_of_order(buck: Buck) -> None: + await buck.build( + "//:rule1", + "//:nested[sub][nested_sub]", + ) + await check_rule_type_names(buck, ["nested_subtargets", "one"]) + + +@buck_test() +async def test_build_all_in_target(buck: Buck) -> None: + await buck.build( + "//:", + ) + await check_rule_type_names(buck, ["two", "nested_subtargets", "one", "one"]) + + +@buck_test() +async def test_build_all_recursive(buck: Buck) -> None: + await buck.build( + "//...", + ) + await check_rule_type_names(buck, ["two", "nested_subtargets", "one", "one"]) diff --git a/tests/core/build/test_build_rule_type_name_logging_data/.buckconfig b/tests/core/build/test_build_rule_type_name_logging_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/build/test_build_rule_type_name_logging_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/build/test_build_rule_type_name_logging_data/.buckroot b/tests/core/build/test_build_rule_type_name_logging_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_rule_type_name_logging_data/TARGETS.fixture b/tests/core/build/test_build_rule_type_name_logging_data/TARGETS.fixture new file mode 100644 index 0000000000000..7d69359b571aa --- /dev/null +++ b/tests/core/build/test_build_rule_type_name_logging_data/TARGETS.fixture @@ -0,0 +1,18 @@ +load(":defs.bzl", "nested_subtargets", "one", "two") + +nested_subtargets( + name = "nested", +) + +one( + name = "rule0", +) + +one( + name = "rule1", + deps = [":rule0"], +) + +two( + name = "a_writer", +) diff --git a/tests/core/build/test_build_rule_type_name_logging_data/defs.bzl b/tests/core/build/test_build_rule_type_name_logging_data/defs.bzl new file mode 100644 index 0000000000000..4720c07cf9b7a --- /dev/null +++ b/tests/core/build/test_build_rule_type_name_logging_data/defs.bzl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _one(ctx): + return [DefaultInfo(default_output = ctx.actions.write("out", "one"))] + +one = rule( + impl = _one, + attrs = { + "deps": attrs.list(attrs.dep(), default = []), + }, +) + +def _two(ctx): + return [DefaultInfo(default_output = ctx.actions.write("out", "two"))] + +two = rule( + impl = _two, + attrs = {}, +) + +def _nested_subtargets(ctx): + out = ctx.actions.write("foo", "foo_content") + + nested_info = [DefaultInfo( + sub_targets = {"nested_sub": [ + DefaultInfo(default_output = out), + ]}, + )] + + return [DefaultInfo( + sub_targets = {"sub": nested_info}, + )] + +nested_subtargets = rule( + impl = _nested_subtargets, + attrs = {}, +) diff --git a/tests/core/build/test_build_skip_incompatible_targets.py b/tests/core/build/test_build_skip_incompatible_targets.py new file mode 100644 index 0000000000000..091cbd2eefeb7 --- /dev/null +++ b/tests/core/build/test_build_skip_incompatible_targets.py @@ -0,0 +1,43 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_build_skip_incompatible(buck: Buck) -> None: + targetA = "root//:compatible-with-A" + targetB = "root//:compatible-with-B" + platformA = "root//:platA" + + await expect_failure( + buck.build( + targetA, + targetB, + f"--target-platforms={platformA}", + ), + stderr_regex=f"{targetB} is incompatible with {platformA}", + ) + + result = await buck.build( + targetA, + targetB, + f"--target-platforms={platformA}", + "--skip-incompatible-targets", + ) + assert f"Skipping target incompatible node `{targetB}" in result.stderr + + report = result.get_build_report() + assert len(report.results) == 2 + assert len(report.results[targetA]["configured"]) == 1 + assert len(report.results[targetB]["configured"]) == 0 diff --git a/tests/core/build/test_build_skip_incompatible_targets_data/.buckconfig b/tests/core/build/test_build_skip_incompatible_targets_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/build/test_build_skip_incompatible_targets_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/build/test_build_skip_incompatible_targets_data/.buckroot b/tests/core/build/test_build_skip_incompatible_targets_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_skip_incompatible_targets_data/TARGETS.fixture b/tests/core/build/test_build_skip_incompatible_targets_data/TARGETS.fixture new file mode 100644 index 0000000000000..048b2d0fc8663 --- /dev/null +++ b/tests/core/build/test_build_skip_incompatible_targets_data/TARGETS.fixture @@ -0,0 +1,24 @@ +constraint_setting(name = "s") + +constraint_value(name = "A", constraint_setting = ":s") +constraint_value(name = "B", constraint_setting = ":s") + +platform( + name = "platA", + constraint_values = [":A"], +) + +platform( + name = "platB", + constraint_values = [":B"], +) + +trivial_build( + name = "compatible-with-A", + target_compatible_with = [":A"], +) + +trivial_build( + name = "compatible-with-B", + target_compatible_with = [":B"], +) diff --git a/tests/core/build/test_build_system_info.py b/tests/core/build/test_build_system_info.py new file mode 100644 index 0000000000000..d470f293e05cb --- /dev/null +++ b/tests/core/build/test_build_system_info.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +@buck_test() +async def test_build_system_info(buck: Buck) -> None: + await buck.build( + "//:test", + ) + + system_info = await filter_events( + buck, + "Event", + "data", + "Instant", + "data", + "SystemInfo", + ) + assert len(system_info) == 1 + assert system_info[0]["system_total_memory_bytes"] > 0 + assert system_info[0]["total_disk_space_bytes"] > 0 diff --git a/tests/core/build/test_build_system_info_data/.buckconfig b/tests/core/build/test_build_system_info_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/build/test_build_system_info_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/build/test_build_system_info_data/.buckroot b/tests/core/build/test_build_system_info_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_build_system_info_data/TARGETS.fixture b/tests/core/build/test_build_system_info_data/TARGETS.fixture new file mode 100644 index 0000000000000..cf2d114457c4c --- /dev/null +++ b/tests/core/build/test_build_system_info_data/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "test") diff --git a/tests/core/build/test_cancellation.py b/tests/core/build/test_cancellation.py new file mode 100644 index 0000000000000..fc86094a2463b --- /dev/null +++ b/tests/core/build/test_cancellation.py @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import os +import signal +from pathlib import Path +from typing import Callable, List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException, BuckResult +from buck2.tests.e2e_util.api.process import Process +from buck2.tests.e2e_util.buck_workspace import buck_test + + +async def _test_cancellation_helper( + buck: Buck, + tmp_path: Path, + runner: Callable[[Buck, List[str]], Process[BuckResult, BuckException]], +) -> None: + """ + This test starts a test that writes its PID to a file then runs for 60 + seconds. We test cancellation by sending a CTRL+C as soon as a test + starts. We then check that the process exited, and that nothing else + started (or if anything did, that they stopped). + """ + opts = ["-c", f"test.pids={tmp_path}", "-c", "test.duration=60"] + await buck.audit("providers", ":slow", *opts) + command = runner(buck, [*opts, "--local-only"]) + + command = await command.start() + + for _i in range(30): + await asyncio.sleep(1) + pids = os.listdir(tmp_path) + if pids: + break + else: + raise Exception("Commands never started") + + command.send_signal(signal.SIGINT) + await command.communicate() # Wait for the command to exit + + # Give stuff time to settle, PIDS don't necessarily disappear + # instantly. Also, verify that we are not starting more tests. + await asyncio.sleep(5) + + # At this point, nothing should be alive. + pids = os.listdir(tmp_path) + for pid in pids: + try: + os.kill(int(pid), 0) + except OSError: + pass + else: + raise Exception(f"PID existed: {pid}") + + +@buck_test() +async def test_cancellation(buck: Buck, tmp_path: Path) -> None: + await _test_cancellation_helper( + buck, tmp_path, lambda buck, opts: buck.build(*opts, ":slow") + ) + + +@buck_test() +async def test_cancellation_bxl(buck: Buck, tmp_path: Path) -> None: + await _test_cancellation_helper( + buck, tmp_path, lambda buck, opts: buck.bxl(*opts, "//build.bxl:build") + ) diff --git a/tests/core/build/test_cancellation_data/.buckconfig b/tests/core/build/test_cancellation_data/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/build/test_cancellation_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_cancellation_data/.buckroot b/tests/core/build/test_cancellation_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_cancellation_data/TARGETS.fixture b/tests/core/build/test_cancellation_data/TARGETS.fixture new file mode 100644 index 0000000000000..045d0b03616ce --- /dev/null +++ b/tests/core/build/test_cancellation_data/TARGETS.fixture @@ -0,0 +1,9 @@ +load(":defs.bzl", "slow_actions") + +slow_actions( + name = "slow", + pids = read_config("test", "pids"), + duration = read_config("test", "duration"), + count = 10, + src = "slow.py", +) diff --git a/tests/core/build/test_cancellation_data/build.bxl b/tests/core/build/test_cancellation_data/build.bxl new file mode 100644 index 0000000000000..d1e5aa10ad571 --- /dev/null +++ b/tests/core/build/test_cancellation_data/build.bxl @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _build_impl(ctx): + ctx.build("//:slow") + +build = bxl_main( + impl = _build_impl, + cli_args = { + }, +) diff --git a/tests/core/build/test_cancellation_data/defs.bzl b/tests/core/build/test_cancellation_data/defs.bzl new file mode 100644 index 0000000000000..69ef45d17da2f --- /dev/null +++ b/tests/core/build/test_cancellation_data/defs.bzl @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _slow_impl(ctx: AnalysisContext) -> list[Provider]: + outs = {} + for i in range(ctx.attrs.count): + o = ctx.actions.declare_output("out/{}".format(i)) + ctx.actions.run( + ["python3", ctx.attrs.src, ctx.attrs.duration, ctx.attrs.pids, o.as_output()], + category = "test", + identifier = str(i), + ) + outs[str(i)] = o + + out = ctx.actions.symlinked_dir("outs", outs) + return [DefaultInfo(out)] + +slow_actions = rule(impl = _slow_impl, attrs = { + "count": attrs.int(), + "duration": attrs.string(), + "pids": attrs.string(), + "src": attrs.source(), +}) diff --git a/tests/core/build/test_cancellation_data/prelude/prelude.bzl b/tests/core/build/test_cancellation_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_cancellation_data/slow.py b/tests/core/build/test_cancellation_data/slow.py new file mode 100644 index 0000000000000..36ef6b9befc1d --- /dev/null +++ b/tests/core/build/test_cancellation_data/slow.py @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import sys +import time + + +def _touch(f): + with open(f, "w"): + pass + + +def main(args): + duration, pids, output = args + _touch(os.path.join(pids, str(os.getpid()))) + time.sleep(int(duration)) + _touch(output) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/tests/core/build/test_critical_path.py b/tests/core/build/test_critical_path.py new file mode 100644 index 0000000000000..65219f72714bf --- /dev/null +++ b/tests/core/build/test_critical_path.py @@ -0,0 +1,307 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import typing +from dataclasses import dataclass + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + +from buck2.tests.e2e_util.helper.utils import filter_events + + +@dataclass +class critical_path_log: + kind: str + name: str + category: str + identifier: str + execution_kind: str + total_duration: str + user_duration: str + potential_improvement_duration: str + + +async def do_critical_path(buck: Buck, correct_analysis: bool) -> None: + await buck.build("//:step_3", "--no-remote-cache") + + critical_path = (await buck.log("critical-path")).stdout.strip().splitlines() + critical_path = [e.split("\t") for e in critical_path] + + trimmed_critical_path = [ + critical_path_log(e[0], e[1].split(" ")[0], e[2], e[3], e[4], e[5], e[6], e[7]) + for e in critical_path + ] + + # There is now non-determism in this test since what we get back depends on + # where the analysis becomes the longest path. This gets fixed later in + # this stack. + + assert len(trimmed_critical_path) > 0 + expected = [ + ("load", "root//"), + ("analysis", "root//:step_0"), + ("analysis", "root//:step_1"), + ("analysis", "root//:step_2"), + ("analysis", "root//:step_3"), + ("action", "root//:step_0"), + ("action", "root//:step_1"), + ("action", "root//:step_2"), + ("action", "root//:step_3"), + ("materialization", "root//:step_3"), + ("compute-critical-path", ""), + ] + + for s, e in zip(reversed(trimmed_critical_path), reversed(expected)): + if s.kind == "action": + assert s.execution_kind != "" + assert s.execution_kind != "ACTION_EXECUTION_NOTSET" + else: + assert s.execution_kind == "" + + if not correct_analysis and s.kind == "analysis": + break + assert s.kind == e[0] + assert s.name == e[1] + + +@buck_test() +async def test_critical_path(buck: Buck) -> None: + await do_critical_path(buck, False) + + +@buck_test() +async def test_critical_path_longest_path_graph(buck: Buck) -> None: + with open(buck.cwd / ".buckconfig", "a") as f: + f.write("[buck2]\n") + f.write("critical_path_backend2 = longest-path-graph\n") + await do_critical_path(buck, True) + + +@buck_test() +async def test_critical_path_json(buck: Buck) -> None: + import json + + await buck.build("//:step_3", "--no-remote-cache") + critical_path = ( + (await buck.log("critical-path", "--format", "json")) + .stdout.strip() + .splitlines() + ) + critical_path = [json.loads(e) for e in critical_path] + + assert len(critical_path) > 0 + expected = [ + ("load", "root//"), + ("analysis", "root//:step_0"), + ("analysis", "root//:step_1"), + ("analysis", "root//:step_2"), + ("analysis", "root//:step_3"), + ("action", "root//:step_0"), + ("action", "root//:step_1"), + ("action", "root//:step_2"), + ("action", "root//:step_3"), + ("materialization", "root//:step_3"), + ("compute-critical-path", None), + ] + + for critical, exp in zip(reversed(critical_path), reversed(expected)): + if critical["kind"] == "analysis": + # There is now non-determism in this test since what we get back depends on + # where the analysis becomes the longest path. This gets fixed later in + # this stack. + break + + assert "kind" in critical + assert critical["kind"] == exp[0] + + if critical["kind"] == "compute-critical-path": + assert "name" not in critical + else: + assert "name" in critical + name = critical["name"].split(" ")[0] + assert name == exp[1] + + if critical["kind"] == "action": + assert "execution_kind" in critical + assert critical["execution_kind"] != "" + assert critical["execution_kind"] != "ACTION_EXECUTION_NOTSET" + else: + assert "execution_kind" not in critical + + +# Test that verifies the dicekey->node+deps graph that we produce for critical path +# calculations. It can be a lot easier to understand bugs and behavior here than +# only inspecting the final critical path output (like other tests). +@buck_test() +async def test_dynamic_input_events(buck: Buck) -> None: + with open(buck.cwd / ".buckconfig", "a") as f: + f.write("[buck2]\n") + f.write("critical_path_backend2 = logging\n") + + await buck.build("//:check_dynamic_input", "--no-remote-cache") + events = await filter_events( + buck, + "Event", + "data", + "Instant", + "data", + "UnstableE2eData", + ) + + events = [ + json.loads(ev["data"]) + for ev in events + if ev["key"] == "critical_path_logging_node" + ] + + golden( + output=json.dumps(events, sort_keys=True, indent=2), + rel_path="events.golden.json", + ) + + +# Test that we can compute critical paths that include edges as inputs of dynamic_output/actions. +@buck_test() +async def test_dynamic_input(buck: Buck) -> None: + import json + + await buck.build("//:check_dynamic_input", "--no-remote-cache") + critical_path = ( + await buck.log("critical-path", "--format", "json") + ).stdout.splitlines() + critical_path = [json.loads(e) for e in critical_path] + + assert len(critical_path) > 0 + transformed = [] + for critical in critical_path: + assert "kind" in critical + t = critical["kind"] + + if critical["kind"] == "compute-critical-path": + assert "name" not in critical + else: + assert "name" in critical + name = critical["name"].split(" ")[0] + t = "{} {}".format(t, name) + + if critical["kind"] == "action": + assert "execution_kind" in critical + assert critical["execution_kind"] != "" + assert critical["execution_kind"] != "ACTION_EXECUTION_NOTSET" + else: + assert "execution_kind" not in critical + + # there's nondeterminism in critical path here because step1 action + # depends on both step0 action and step1 analysis, both of those depend + # on step0 analysis. + if t in ["analysis root//:step_1", "action root//:step_0"]: + continue + + transformed.append(t) + + golden( + output=json.dumps(transformed, indent=2), + rel_path="dynamic_input.golden.json", + ) + + +@buck_test() +async def test_critical_path_metadata(buck: Buck) -> None: + await buck.build( + "//:step_0", + "--no-remote-cache", + "-c", + "client.id=myclient", + "--oncall=myoncall", + ) + + build_graph_info = await filter_events( + buck, + "Event", + "data", + "Instant", + "data", + "BuildGraphInfo", + ) + + build_graph_info = build_graph_info[0] + assert build_graph_info + assert "username" in build_graph_info["metadata"] + assert build_graph_info["metadata"]["client"] == "myclient" + assert build_graph_info["metadata"]["oncall"] == "myoncall" + + +async def critical_path_helper(buck: Buck) -> typing.List[typing.Dict[str, typing.Any]]: + critical_path_actions = await filter_events( + buck, + "Event", + "data", + "Instant", + "data", + "BuildGraphInfo", + "critical_path2", + ) + + assert len(critical_path_actions) == 1 + return critical_path_actions[0] + + +@buck_test() +async def test_critical_path_execution_kind(buck: Buck) -> None: + await buck.build("//:step_3", "--no-remote-cache") + + critical_path_actions = await critical_path_helper(buck) + + has_action_execution = False + for action in critical_path_actions: + assert action["entry"] + # Every ActionExecution should have an execution kind and it shouldn't be 0 (default) + if "ActionExecution" in action["entry"]: + has_action_execution = True + assert action["entry"]["ActionExecution"]["execution_kind"] + assert action["entry"]["ActionExecution"]["execution_kind"] != 0 + + # Should have at least 1 ActionExecution or something went wrong + assert has_action_execution + + +@buck_test() +async def test_critical_path_rule_type(buck: Buck) -> None: + await buck.build("//:step_0", "--no-remote-cache") + + critical_path_actions = await critical_path_helper(buck) + + for action in critical_path_actions: + assert action["entry"] + + if "ActionExecution" in action["entry"]: + assert action["entry"]["ActionExecution"]["target_rule_type_name"] + assert ( + action["entry"]["ActionExecution"]["target_rule_type_name"] == "write" + ) + + +@buck_test() +async def test_critical_path_action_digest(buck: Buck) -> None: + await buck.build("//:step_3", "--no-remote-cache") + + critical_path_actions = await critical_path_helper(buck) + + has_action_digest = False + for action in critical_path_actions: + assert action["entry"] + if "ActionExecution" in action["entry"]: + if "action_digest" in action["entry"]["ActionExecution"]: + has_action_digest = True + + assert has_action_digest diff --git a/tests/core/build/test_critical_path_data/.buckconfig b/tests/core/build/test_critical_path_data/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/build/test_critical_path_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_critical_path_data/.buckroot b/tests/core/build/test_critical_path_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_critical_path_data/TARGETS.fixture b/tests/core/build/test_critical_path_data/TARGETS.fixture new file mode 100644 index 0000000000000..3f2289face5e1 --- /dev/null +++ b/tests/core/build/test_critical_path_data/TARGETS.fixture @@ -0,0 +1,13 @@ +write(name = "step_0") +cp(name = "step_1", dep = ":step_0") +dynamic_cp(name = "step_2", dep = ":step_1") + +# NOTE: This is a long sleep, but our tests already take 50 seconds anyway so +# let's just sleep for a while to make sure we don't introduce flakyness. Also +# note that this should be unnecessary since there is only one top-level target +# here, but this breaks because critical path calculations are not aware of +# dependencies induced by the *inputs* of `dynamic_output`: they're only aware +# of the inputs used by the actions declared in `dynamic_output`. +cp(name = "step_3", dep = ":step_2", sleep = 5) + +dynamic_cp2(name = "check_dynamic_input", dep = ":step_1") diff --git a/tests/core/build/test_critical_path_data/dynamic_input.golden.json b/tests/core/build/test_critical_path_data/dynamic_input.golden.json new file mode 100644 index 0000000000000..55429b1f4dafe --- /dev/null +++ b/tests/core/build/test_critical_path_data/dynamic_input.golden.json @@ -0,0 +1,11 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[ + "listing root//", + "load root//", + "analysis root//:step_0", + "action root//:step_1", + "action root//:check_dynamic_input", + "materialization root//:check_dynamic_input", + "compute-critical-path" +] diff --git a/tests/core/build/test_critical_path_data/events.golden.json b/tests/core/build/test_critical_path_data/events.golden.json new file mode 100644 index 0000000000000..c42774646f1e2 --- /dev/null +++ b/tests/core/build/test_critical_path_data/events.golden.json @@ -0,0 +1,97 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[ + { + "deps": [], + "key": "PackageListingKey(root//)" + }, + { + "deps": [ + "PackageListingKey(root//)" + ], + "key": "InterpreterResultsKey(root//)" + }, + { + "deps": [ + "InterpreterResultsKey(root//)" + ], + "key": "ConfiguredTargetNodeKey(root//:step_0 ())" + }, + { + "deps": [ + "InterpreterResultsKey(root//)", + "ConfiguredTargetNodeKey(root//:step_0 ())" + ], + "key": "ConfiguredTargetNodeKey(root//:step_1 ())" + }, + { + "deps": [ + "InterpreterResultsKey(root//)", + "ConfiguredTargetNodeKey(root//:step_1 ())" + ], + "key": "ConfiguredTargetNodeKey(root//:check_dynamic_input ())" + }, + { + "deps": [ + "ConfiguredTargetNodeKey(root//:step_0 ())" + ], + "key": "AnalysisKey(root//:step_0 ())" + }, + { + "deps": [ + "ConfiguredTargetNodeKey(root//:step_1 ())", + "AnalysisKey(root//:step_0 ())" + ], + "key": "AnalysisKey(root//:step_1 ())" + }, + { + "deps": [ + "ConfiguredTargetNodeKey(root//:check_dynamic_input ())", + "AnalysisKey(root//:step_1 ())" + ], + "key": "AnalysisKey(root//:check_dynamic_input ())" + }, + { + "deps": [ + "AnalysisKey(root//:step_0 ())", + "ConfiguredTargetNodeKey(root//:step_0 ())" + ], + "key": "BuildKey((target: `root//:step_0 ()`, id: `0`))" + }, + { + "deps": [ + "AnalysisKey(root//:step_1 ())", + "BuildKey((target: `root//:step_0 ()`, id: `0`))", + "ConfiguredTargetNodeKey(root//:step_1 ())" + ], + "key": "BuildKey((target: `root//:step_1 ()`, id: `0`))" + }, + { + "deps": [ + "AnalysisKey(root//:check_dynamic_input ())", + "BuildKey((target: `root//:step_1 ()`, id: `0`))" + ], + "key": "DynamicLambdaDiceKey(root//:check_dynamic_input ()_0)" + }, + { + "deps": [ + "DynamicLambdaDiceKey(root//:check_dynamic_input ()_0)", + "ConfiguredTargetNodeKey(root//:check_dynamic_input ())" + ], + "key": "BuildKey((target: `root//:check_dynamic_input ()_0`, id: `1`))" + }, + { + "deps": [ + "DynamicLambdaDiceKey(root//:check_dynamic_input ()_0)", + "DynamicLambdaDiceKey(root//:check_dynamic_input ()_0)", + "BuildKey((target: `root//:check_dynamic_input ()_0`, id: `1`))" + ], + "key": "BuildKey((target: `root//:check_dynamic_input ()_0`, id: `0`))" + }, + { + "deps": [ + "BuildKey((target: `root//:check_dynamic_input ()_0`, id: `0`))" + ], + "key": "Materialization(`(root//:check_dynamic_input ())/out`, action: (target: `root//:check_dynamic_input ()_0`, id: `0`))" + } +] diff --git a/tests/core/build/test_critical_path_data/prelude/prelude.bzl b/tests/core/build/test_critical_path_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..dc3e9564169a5 --- /dev/null +++ b/tests/core/build/test_critical_path_data/prelude/prelude.bzl @@ -0,0 +1,70 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _write(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.write("out", "test") + return [DefaultInfo(default_output = out)] + +write = rule(impl = _write, attrs = { +}) + +def _cp(ctx: AnalysisContext) -> list[Provider]: + inp = ctx.attrs.dep[DefaultInfo].default_outputs[0] + out = ctx.actions.declare_output("out") + + ctx.actions.run([ + "sh", + "-c", + 'sleep "$1" && cp "$2" "$3"', + "--", + str(ctx.attrs.sleep), + inp, + out.as_output(), + ], category = "cp_action") + return [DefaultInfo(default_output = out)] + +cp = rule(impl = _cp, attrs = { + "dep": attrs.dep(), + "sleep": attrs.int(default = 0), +}) + +def _dynamic_cp(ctx: AnalysisContext) -> list[Provider]: + dummy = ctx.actions.write("dummy", "") + + inp = ctx.attrs.dep[DefaultInfo].default_outputs[0] + out = ctx.actions.declare_output("out") + + def f(ctx: AnalysisContext, _artifacts, outputs): + # NOTE: dummy doesn't show in the critical path calculation at all. + ctx.actions.run([ + "cp", + inp, + outputs[out].as_output(), + ], category = "dynamic_cp_action") + + ctx.actions.dynamic_output(dynamic = [dummy], inputs = [inp], outputs = [out.as_output()], f = f) + return [DefaultInfo(default_output = out)] + +dynamic_cp = rule(impl = _dynamic_cp, attrs = { + "dep": attrs.dep(), +}) + +def _dynamic_cp2(ctx: AnalysisContext) -> list[Provider]: + dummy = ctx.actions.write("dummy", "") + + inp = ctx.attrs.dep[DefaultInfo].default_outputs[0] + out = ctx.actions.declare_output("out") + + def f(ctx: AnalysisContext, artifacts, outputs): + ctx.actions.write(outputs[out].as_output(), artifacts[inp].read_string()) + + ctx.actions.dynamic_output(dynamic = [inp], inputs = [], outputs = [out.as_output()], f = f) + return [DefaultInfo(default_output = out)] + +dynamic_cp2 = rule(impl = _dynamic_cp2, attrs = { + "dep": attrs.dep(), +}) diff --git a/tests/core/build/test_dep_files.py b/tests/core/build/test_dep_files.py new file mode 100644 index 0000000000000..6a21e3f29032f --- /dev/null +++ b/tests/core/build/test_dep_files.py @@ -0,0 +1,697 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import hashlib +import typing +from pathlib import Path +from typing import Any, Dict, List + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env +from buck2.tests.e2e_util.helper.utils import ( + expect_exec_count, + filter_events, + random_string, + read_invocation_record, + read_what_ran, +) + +# Taken from data.proto +ACTION_EXECUTION_KIND_LOCAL = 1 +ACTION_EXECUTION_KIND_ACTION_CACHE = 3 +ACTION_EXECUTION_KIND_SIMPLE = 4 +ACTION_EXECUTION_KIND_LOCAL_DEP_FILE = 7 +ACTION_EXECUTION_KIND_REMOTE_DEP_FILE_CACHE = 9 + +CACHE_UPLOAD_REASON_LOCAL_EXECUTION = 0 +CACHE_UPLOAD_REASON_DEP_FILE = 1 + + +async def check_execution_kind( + buck: Buck, + expecteds: typing.List[int], + ignored: typing.Optional[typing.List[int]] = None, +) -> None: + ignored = ignored or [] + execution_kinds = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "execution_kind", + ) + execution_kinds = [kind for kind in execution_kinds if kind not in ignored] + assert len(execution_kinds) == len(expecteds) + for actual, expected in zip(execution_kinds, expecteds): + assert actual == expected + + +async def check_match_dep_files( + buck: Buck, + expected: typing.List[ + typing.Tuple[bool, bool] # (checking_filtered_inputs, remote_cache) + ], +) -> None: + match_dep_files = await filter_events( + buck, "Event", "data", "SpanStart", "data", "MatchDepFiles" + ) + assert len(match_dep_files) == len(expected) + + for match, (checking_filtered_inputs, remote_cache) in zip( + match_dep_files, expected + ): + assert bool(match["remote_cache"]) == remote_cache + assert bool(match["checking_filtered_inputs"]) == checking_filtered_inputs + + +async def _get_execution_kind(buck: Buck) -> int: + execution_kinds = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "execution_kind", + ) + return execution_kinds[0] + + +def touch(buck: Buck, name: str) -> None: + """ + Append a random string to the marker in the file + """ + with open(buck.cwd / name, "r", encoding="utf-8") as f: + text = f.read() + + with open(buck.cwd / name, "w", encoding="utf-8") as f: + f.write(text.replace("__MARKER__", "__MARKER__{}".format(random_string()))) + + +# Flaky because of watchman on mac (and maybe windows) +# Skipping on windows due to gcc dependency +@buck_test(data_dir="dep_files", skip_for_os=["darwin", "windows"]) +async def test_dep_files(buck: Buck) -> None: + # We query cache before we query dep file. Disable remote cache to make + # sure that for the last build what-ran doesn't return cached entry. + args = ["app:app", "--no-remote-cache"] + await buck.build(*args) + await expect_exec_count(buck, 1) + + touch(buck, "app/app.h") + await buck.build(*args) + await expect_exec_count(buck, 1) + + touch(buck, "app/app.c") + await buck.build(*args) + await expect_exec_count(buck, 1) + + # //app:app doesn't use other.h and + # using dep file this should build nothing. + touch(buck, "app/other.h") + await buck.build(*args) + await expect_exec_count(buck, 0) + + +async def get_cache_queries(buck: Buck) -> List[Dict[str, Any]]: + return await filter_events( + buck, + "Event", + "data", + "SpanStart", + "data", + "ExecutorStage", + "stage", + "CacheQuery", + ) + + +async def check_no_cache_query(buck: Buck) -> None: + cache_queries = await get_cache_queries(buck) + assert len(cache_queries) == 0 + + +async def check_cache_query(buck: Buck) -> None: + cache_queries = await get_cache_queries(buck) + assert len(cache_queries) == 1 + + +# Skipping on windows due to gcc dependency +@buck_test(setup_eden=True, data_dir="dep_files", skip_for_os=["windows"]) +async def test_dep_file_hit_identical_action(buck: Buck) -> None: + # For actions that have dep files, buck will query the local dep file cache to see + # if an identical action is stored there. Otherwise, it will fall back to an action cache + # look up (if enabled) and then to the full dep file query. + # This test builds a target to build up a dep file cache, then builds the target again + # with a no-op configuration change so that we hit the initial dep file lookup hit case. + dummy1 = "dummy1" + await buck.build( + "app:app_with_dummy_config", + "--local-only", + "--no-remote-cache", # Turn off remote cache query so we execute locally + "-c", + f"test.dummy_config={dummy1}", + ) + await check_execution_kind( + buck, [ACTION_EXECUTION_KIND_SIMPLE, ACTION_EXECUTION_KIND_LOCAL] + ) + + dummy2 = "dummy2" + await buck.build( + "app:app_with_dummy_config", + "--local-only", + "-c", + f"test.dummy_config={dummy2}", + ) + # The result should be served by the local dep file cache BEFORE an action cache lookup + await check_no_cache_query(buck) + # Ignoring any simple actions because there can be either one or two symlink dir actions, + # with the same dice key, + # Not sure why but this feels like a DICE bug triggered by the buckconfig change. + await check_execution_kind( + buck, + [ACTION_EXECUTION_KIND_LOCAL_DEP_FILE], + ignored=[ACTION_EXECUTION_KIND_SIMPLE], + ) + # The MatchDepFilesStart span should indicate we only checked the depfile cache once + await check_match_dep_files(buck, [(False, False)]) + + +# Flaky because of watchman on mac (and maybe windows) +# Skipping on windows due to gcc dependency +# This test tombstones the hash of the dep file produced by this action. +@buck_test(data_dir="dep_files", skip_for_os=["darwin", "windows"]) +@env( + "BUCK2_TEST_TOMBSTONED_DIGESTS", + "ed34019d42934db589d9678e6e2d0cdff739e7e2:78", +) +async def test_dep_files_ignore_missing_digests(buck: Buck, tmp_path: Path) -> None: + await buck.build("app:app") + await expect_exec_count(buck, 1) + + with pytest.raises(BuckException): # noqa B908 + dep_file_path = tmp_path / "dep_file" + await buck.build("app:app[dep_file]", f"--out={dep_file_path}") + + # If we get here, that means materialization did not fail. + with open(dep_file_path, "rb") as f: + dep_file = f.read() + dep_file_hash = hashlib.sha1(dep_file).hexdigest() + dep_file_len = len(dep_file) + raise Exception( + f"Misconfigured test, BUCK2_TEST_TOMBSTONED_DIGESTS to {dep_file_hash}:{dep_file_len}", + ) + + touch(buck, "app/other.h") + await buck.build("app:app") + + await expect_exec_count(buck, 1) + + +@buck_test(data_dir="invalid_dep_files") +async def test_invalid_dep_files(buck: Buck) -> None: + await buck.build( + "//:lazy", + ) + # Disable remote cache lookup so we actually check for local dep files + await expect_failure( + buck.build( + "//:lazy", + "-c", + "test.seed=123", + "--no-remote-cache", + ), + stderr_regex="Invalid line encountered in dep file", + ) + + await buck.debug("flush-dep-files") + await buck.build("//:lazy") + + # Disable remote cache lookup so we actually check for local dep files + await expect_failure( + buck.build( + "//:eager", + "--eager-dep-files", + "--no-remote-cache", + ), + stderr_regex="Invalid line encountered in dep file", + ) + + +@buck_test(data_dir="mismatched_outputs_dep_files") +async def test_mismatched_outputs_dep_files(buck: Buck) -> None: + await buck.build("//:test", "-c", "test.prefix=foo", "-c", "test.suffix=bar") + # Different output now, even though the command has not changed. + await buck.build("//:test", "-c", "test.prefix=foo/bar", "-c", "test.suffix=") + + +async def _dep_file_uploads(buck: Buck) -> List[Dict[str, Any]]: + return await filter_events( + buck, "Event", "data", "SpanEnd", "data", "DepFileUpload" + ) + + +async def _action_executions(buck: Buck) -> List[Dict[str, Any]]: + return await filter_events( + buck, "Event", "data", "SpanEnd", "data", "ActionExecution" + ) + + +async def _dep_file_key_from_executions(buck: Buck) -> str: + execs = await _action_executions(buck) + assert len(execs) == 1 + return execs[0]["dep_file_key"] + + +async def _check_uploaded_dep_file_key(buck: Buck, dep_file_key: str) -> None: + # BUCK2_TEST_SKIP_ACTION_CACHE_WRITE causes action result writes for dep files to always pass. + # This is to allow testing without action cache write permission. + dep_file_uploads = [ + upload for upload in await _dep_file_uploads(buck) if upload["success"] + ] + assert len(dep_file_uploads) == 1 + uploaded_key = dep_file_uploads[0]["remote_dep_file_key"] + assert dep_file_key == uploaded_key + + +@buck_test(data_dir="upload_dep_files") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +@env("BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", "true") +async def test_re_dep_file_uploads_same_key(buck: Buck) -> None: + # Test all the cases where the remote dep file key should stay the same + target = "root//:dep_files1" + tagged_used_file1 = buck.cwd / "used.1" # Used for depfile 0 + tagged_used_file3 = buck.cwd / "used.3" # Used for depfile 1 + assert tagged_used_file1.exists() + assert tagged_used_file3.exists() + + target = [ + target, + "-c", + "test.allow_dep_file_cache_upload=true", + "-c", + f"test.cache_buster={random_string()}", + "--local-only", + ] + + # Check that building this target results in a dep file cache upload + await buck.build(*target) + + key = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key) + + # Changing a tagged (associated with a dep file) input should not change the key + # The remote dep file key only tracks the untagged inputs. The dep file cache is for checking whether + # the output is the same despite a tagged file changing. + tagged_used_file1.write_text("CHANGE") + tagged_used_file3.write_text("CHANGE") + await buck.build(*target) + key_tagged_input_change = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key_tagged_input_change) + assert key == key_tagged_input_change + + +@buck_test(data_dir="upload_dep_files") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +@env("BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", "true") +async def test_re_dep_file_uploads_different_key(buck: Buck) -> None: + # TODO: Mergebase is currently not set in this test. + # Include it so we can test for the case where the mergebase differs + + keys_seen = [] + target = "root//:dep_files1" + untagged_file1 = buck.cwd / "untagged.1" + assert untagged_file1.exists() + targets_file = buck.cwd / "TARGETS.fixture" + assert targets_file.exists() + + target = [ + target, + "-c", + "test.allow_dep_file_cache_upload=true", + "-c", + f"test.cache_buster={random_string()}", + "--local-only", + ] + + # Check that building this target results in a dep file cache upload + await buck.build(*target) + key = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key) + keys_seen.append(key) + + # Modify the depfile name and check the new key is different + targets_file.write_text( + targets_file.read_text().replace( + '"dep_file_name1",', '"dep_file_name1_modified",' + ) + ) + await buck.build(*target) + + key_different_depfile_name = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key_different_depfile_name) + assert key_different_depfile_name not in keys_seen + keys_seen.append(key_different_depfile_name) + + # Modify the output name and check the new key is different + targets_file.write_text( + targets_file.read_text().replace('out_name = "out"', 'out_name = "out_changed"') + ) + await buck.build(*target) + key_different_out_name = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key_different_out_name) + assert key_different_out_name not in keys_seen + keys_seen.append(key_different_out_name) + + # Modify an untagged input and check the new key is different + untagged_file1.write_text("CHANGE") + await buck.build(*target) + key_untagged_input_change = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key_untagged_input_change) + assert key_untagged_input_change not in keys_seen + keys_seen.append(key_untagged_input_change) + + +@buck_test(data_dir="upload_dep_files") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +@env("BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", "true") +async def test_dep_file_does_not_upload_when_allow_cache_upload_is_true( + buck: Buck, +) -> None: + target = [ + "root//:dep_files1", + "-c", + "test.allow_dep_file_cache_upload=false", + "-c", + "test.allow_cache_upload=true", + "-c", + f"test.cache_buster={random_string()}", + "--remote-only", + ] + + # Check that we don't do a dep file cache upload when allow_dep_file_cache_upload is false, + # even though allow_cache_upload is true + await buck.build(*target) + uploads = await _dep_file_uploads(buck) + assert len(uploads) == 0 + + +@buck_test(data_dir="upload_dep_files") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +@env("BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", "true") +@env("BUCK2_TEST_ONLY_REMOTE_DEP_FILE_CACHE", "true") +async def test_only_do_cache_lookup_when_dep_file_upload_is_enabled( + buck: Buck, +) -> None: + target = [ + "root//:dep_files1", + "-c", + "test.allow_dep_file_cache_upload=false", + "-c", + "test.allow_cache_upload=true", + "-c", + f"test.cache_buster={random_string()}", + "--remote-only", + ] + + # Check that we don't do a dep file cache lookup when allow_dep_file_cache_upload is false + await buck.build(*target) + await check_no_cache_query(buck) + + target = [ + "root//:dep_files1", + "-c", + "test.allow_dep_file_cache_upload=true", + "-c", + "test.allow_cache_upload=true", + "-c", + f"test.cache_buster={random_string()}", + "--remote-only", + ] + + # Check that we do a dep file cache lookup when allow_dep_file_cache_upload is true + await buck.build(*target) + await check_cache_query(buck) + + +@buck_test(data_dir="upload_dep_files") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +@env("BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", "true") +async def test_re_dep_file_remote_upload(buck: Buck) -> None: + target = [ + "root//:dep_files1", + "-c", + "test.allow_dep_file_cache_upload=true", + "-c", + f"test.cache_buster={random_string()}", + "--remote-only", + ] + + # Check that building on RE results in a dep file cache upload + await buck.build(*target) + key = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key) + + +@buck_test(data_dir="upload_dep_files") +@env("BUCK_LOG", "buck2_action_impl=debug,buck2_execute_impl::executors::caching=debug") +@env("BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", "true") +async def test_re_dep_file_cache_hit_upload(buck: Buck, tmpdir: Path) -> None: + target = [ + "root//:dep_files1", + "--remote-only", + "-c", + # Ensure we don't get a dep file cache hit + "test.remote_dep_file_cache_enabled=false", + ] + + # Build on RE to make sure action cache is populated + await buck.build(*target) + await buck.kill() + + record = tmpdir / "record.json" + # Check for action cache hit and dep file cache upload + await buck.build( + *target, + "-c", + "test.allow_dep_file_cache_upload=true", + "--unstable-write-invocation-record", + str(record), + ) + what_ran = await read_what_ran(buck) + assert what_ran[0]["reproducer"]["executor"] == "Cache" + assert len(what_ran) == 1 + key = await _dep_file_key_from_executions(buck) + await _check_uploaded_dep_file_key(buck, key) + + invocation_record = read_invocation_record(record) + + assert invocation_record["dep_file_upload_count"] == 1 + assert ( + invocation_record["dep_file_upload_count"] + == invocation_record["dep_file_upload_attempt_count"] + ) + + # Simulate 'user' build, with action cache hit from previous build and dep file cache checking enabled. + await buck.clean() + await buck.build( + "root//:dep_files1", + "--remote-only", + "-c", + "test.remote_dep_file_cache_enabled=true", + "-c", + "test.allow_dep_file_cache_upload=false", + ) + await check_execution_kind(buck, [ACTION_EXECUTION_KIND_ACTION_CACHE]) + uploads = await _dep_file_uploads(buck) + # Ensure no dep file uploads are attempted for cache hits with dep file cache checking enabled, but dep file uploads disabled. + assert len(uploads) == 0 + + +@buck_test(data_dir="upload_dep_files") +async def test_re_dep_file_uploads_failed_action(buck: Buck) -> None: + # If the action failed, we should not attempt to upload to cache even if it's configured to + target = [ + "root//:dep_files_fail", + "-c", + "test.allow_dep_file_cache_upload=true", + ] + await expect_failure( + buck.build( + *target, + "--no-remote-cache", + "--local-only", + ), + stderr_regex="Failing on purpose", + ) + # Assert cache upload was not attempted + what_ran = await read_what_ran(buck, "--emit-cache-queries") + for what in what_ran: + assert "CacheQuery" != what["reproducer"]["executor"] + + +async def check_remote_dep_file_cache_query_took_place(buck: Buck) -> str: + what_ran = await read_what_ran(buck, "--emit-cache-queries") + assert "CacheQuery" == what_ran[0]["reproducer"]["executor"] + return what_ran[0]["reproducer"]["details"]["digest"] + + +@buck_test(data_dir="upload_dep_files") +@env( + "BUCK_LOG", + "buck2_execute_impl::executors::caching=debug,buck2_execute_impl::executors::action_cache=debug,buck2_action_impl=debug", +) +# Disable the regular action cache query so that we actually hit the remote dep file cache query. +@env("BUCK2_TEST_ONLY_REMOTE_DEP_FILE_CACHE", "true") +async def test_re_dep_file_query_change_tagged_unused_file(buck: Buck) -> None: + target = "root//:dep_files1" + # Tagged for depfile0, and exists in depfile0 + tagged_used_file1 = buck.cwd / "used.1" + # Tagged for depfile0, but does NOT exist in depfile0 + tagged_unused = buck.cwd / "unused.1" + assert tagged_used_file1.exists() + assert tagged_unused.exists() + + target_upload_enabled = [ + target, + "-c", + "test.allow_dep_file_cache_upload=true", + "--local-only", + ] + + target_upload_enabled_with_action_definition_change = [ + target, + "-c", + "test.allow_dep_file_cache_upload=true", + "-c", + "test.allow_cache_upload=true", + "--local-only", + ] + + # Build it once with cache upload (cache upload will fail locally) + result = await buck.build(*target_upload_enabled) + output = result.get_build_report().output_for_target(target).read_text() + assert output == "used1\nused2\nused3\n" + + # Build the target again. This will either result in one of + # 1. A remote dep file cache hit and a subsequent dep file validation + # 2. A remote dep file cache miss, fall back to local execution (local dep file cache is flushed) + await buck.debug("flush-dep-files") + result = await buck.build(*target_upload_enabled_with_action_definition_change) + output = result.get_build_report().output_for_target(target).read_text() + assert output == "used1\nused2\nused3\n" + + await check_remote_dep_file_cache_query_took_place(buck) + execution_kind = await _get_execution_kind(buck) + was_cache_hit = "Cache hits: 100%" in result.stderr + assert ( + was_cache_hit and execution_kind == ACTION_EXECUTION_KIND_REMOTE_DEP_FILE_CACHE + ) or (not was_cache_hit and execution_kind == ACTION_EXECUTION_KIND_LOCAL) + expected_dep_file_match = [ + (False, False), # Initial local dep file cache lookup for an identical action + (True, True), # Remote dep file cache verification + ] + if execution_kind == ACTION_EXECUTION_KIND_REMOTE_DEP_FILE_CACHE: + # Check the MatchDepFiles events + await check_match_dep_files(buck, expected_dep_file_match) + + # Change a file that is tracked by a dep file but shows up as unused, this will again result in one of + # 1. A remote dep file cache hit and a subsequent dep file validation + # 2. A remote dep file cache miss, fall back to local execution (local dep file cache is flushed) + tagged_unused.write_text("CHANGE") + result = await buck.build(*target_upload_enabled) + output = result.get_build_report().output_for_target(target).read_text() + assert output == "used1\nused2\nused3\n" + + await check_remote_dep_file_cache_query_took_place(buck) + execution_kind = await _get_execution_kind(buck) + was_cache_hit = "Cache hits: 100%" in result.stderr + assert ( + was_cache_hit and execution_kind == ACTION_EXECUTION_KIND_REMOTE_DEP_FILE_CACHE + ) or (not was_cache_hit and execution_kind == ACTION_EXECUTION_KIND_LOCAL) + + if execution_kind == ACTION_EXECUTION_KIND_REMOTE_DEP_FILE_CACHE: + # Check the MatchDepFiles events + await check_match_dep_files(buck, expected_dep_file_match) + + +@buck_test(data_dir="upload_dep_files") +@env( + "BUCK_LOG", + "buck2_execute_impl::executors::caching=debug,buck2_execute_impl::executors::action_cache=debug,buck2_action_impl=debug", +) +# Disable the regular action cache query so that we actually hit the remote dep file cache query. +@env("BUCK2_TEST_ONLY_REMOTE_DEP_FILE_CACHE", "true") +async def test_re_dep_file_query_change_tagged_used_file(buck: Buck) -> None: + target = "root//:dep_files2" + # Tagged for depfile0, and exists in depfile0 + tagged_used_file1 = buck.cwd / "used.1" + # Tagged for depfile0, but does NOT exist in depfile0 + tagged_unused = buck.cwd / "unused.1" + assert tagged_used_file1.exists() + assert tagged_unused.exists() + + target_upload_enabled = [ + target, + "-c", + "test.allow_dep_file_cache_upload=true", + "--local-only", + ] + + # Build it once with cache upload (cache upload will fail locally) + result = await buck.build(*target_upload_enabled) + output = result.get_build_report().output_for_target(target).read_text() + assert output == "used1\nused2\nused3\n" + + # Change a file that is tracked by a dep file and shows up as used (ends up listed in the dep file). + # Build the target again. This will either result in one of + # 1. A remote dep file cache hit and a subsequent dep file validation (which fails) + # 2. A remote dep file cache miss, fall back to local execution (local dep file cache is flushed) + # Either way, it should be executed locally + await buck.debug("flush-dep-files") + tagged_used_file1.write_text("used1(MODIFIED)\n") + result = await buck.build(*target_upload_enabled) + await check_remote_dep_file_cache_query_took_place(buck) + await check_execution_kind(buck, [ACTION_EXECUTION_KIND_LOCAL]) + output = result.get_build_report().output_for_target(target).read_text() + assert output == "used1(MODIFIED)\nused2\nused3\n" + + +# Flaky because of watchman on mac (and maybe windows) +# Skipping on windows due to gcc dependency +@buck_test(data_dir="dep_files", skip_for_os=["darwin", "windows"]) +async def test_flush_dep_files(buck: Buck) -> None: + # Make sure that we build locally + args = ["app:app", "--no-remote-cache", "--local-only"] + await buck.build(*args) + await expect_exec_count(buck, 1) + + await buck.debug("flush-dep-files", "--retain-local") + + # //app:app doesn't use other.h and + # dep file should still be present + # since we retained local dep files + touch(buck, "app/other.h") + await buck.build(*args) + await expect_exec_count(buck, 0) + + await buck.debug("flush-dep-files") + + # all dep files are gone, so we have + # to rebuild. + touch(buck, "app/other.h") + await buck.build(*args) + await expect_exec_count(buck, 1) diff --git a/tests/core/build/test_dep_files_data/.buckroot b/tests/core/build/test_dep_files_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_dep_files_data/dep_files/.buckconfig b/tests/core/build/test_dep_files_data/dep_files/.buckconfig new file mode 100644 index 0000000000000..86593e627bfda --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] + materializations = deferred diff --git a/tests/core/build/test_dep_files_data/dep_files/app/TARGETS.fixture b/tests/core/build/test_dep_files_data/dep_files/app/TARGETS.fixture new file mode 100644 index 0000000000000..133a2c6e29f91 --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/app/TARGETS.fixture @@ -0,0 +1,12 @@ +c_binary( + name = "app", + main = "app.c", + headers = ["app.h", "other.h"], +) + +c_binary( + name = "app_with_dummy_config", + main = "app.c", + headers = ["app.h", "other.h"], + _ignored = read_config("test", "dummy_config"), +) diff --git a/tests/core/build/test_dep_files_data/dep_files/app/app.c b/tests/core/build/test_dep_files_data/dep_files/app/app.c new file mode 100644 index 0000000000000..358e6362a0fe7 --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/app/app.c @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// __MARKER__ + +#include + +int main() { + return 0; +} diff --git a/tests/core/build/test_dep_files_data/dep_files/app/app.h b/tests/core/build/test_dep_files_data/dep_files/app/app.h new file mode 100644 index 0000000000000..d8e150a1c3cf2 --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/app/app.h @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// __MARKER__ diff --git a/tests/core/build/test_dep_files_data/dep_files/app/other.h b/tests/core/build/test_dep_files_data/dep_files/app/other.h new file mode 100644 index 0000000000000..d8e150a1c3cf2 --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/app/other.h @@ -0,0 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// __MARKER__ diff --git a/tests/core/build/test_dep_files_data/dep_files/prelude/prelude.bzl b/tests/core/build/test_dep_files_data/dep_files/prelude/prelude.bzl new file mode 100644 index 0000000000000..7f90d5f3cf920 --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/prelude/prelude.bzl @@ -0,0 +1,61 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _c_binary_impl(ctx): + headers = { + "{}/{}".format(ctx.label.package, h.short_path): h + for h in ctx.attrs.headers + } + + headers_tag = ctx.actions.artifact_tag() + + headers_dir = ctx.actions.symlinked_dir("headers", headers) + headers_dir = headers_tag.tag_artifacts(headers_dir) + + dep_file = ctx.actions.declare_output("depfile") + app = ctx.actions.declare_output(ctx.attrs.name) + + cmd = [ + ctx.attrs._cc[RunInfo].args, + ctx.attrs.main, + "-I", + headers_dir, + "-o", + app.as_output(), + "-MMD", + "-MF", + headers_tag.tag_artifacts(dep_file.as_output()), + ] + + ctx.actions.run( + cmd, + category = "cxx_link", + dep_files = {"headers": headers_tag}, + ) + + return [ + DefaultInfo( + default_output = app, + sub_targets = {"dep_file": [DefaultInfo(default_output = dep_file)]}, + ), + RunInfo(args = cmd_args(app)), + ] + +c_binary = rule( + attrs = { + "headers": attrs.list(attrs.source()), + "main": attrs.source(), + "_cc": attrs.dep(default = "root//tools:gcc"), + "_ignored": attrs.string(default = ""), + }, + impl = _c_binary_impl, +) + +def _tool_impl(ctx): + return [DefaultInfo(default_output = ctx.attrs.src), RunInfo(args = cmd_args(ctx.attrs.src))] + +tool = rule(attrs = {"src": attrs.source()}, impl = _tool_impl) diff --git a/tests/core/build/test_dep_files_data/dep_files/tools/TARGETS.fixture b/tests/core/build/test_dep_files_data/dep_files/tools/TARGETS.fixture new file mode 100644 index 0000000000000..afbd1d0787dc8 --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/tools/TARGETS.fixture @@ -0,0 +1,5 @@ +tool( + name = "gcc", + src = "gcc.py", + visibility = ["PUBLIC"], +) diff --git a/tests/core/build/test_dep_files_data/dep_files/tools/gcc.py b/tests/core/build/test_dep_files_data/dep_files/tools/gcc.py new file mode 100755 index 0000000000000..7a5dd618a40ed --- /dev/null +++ b/tests/core/build/test_dep_files_data/dep_files/tools/gcc.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import subprocess +import sys + + +def rewrite_dep_file(path): + with open(path) as f: + body = f.read() + + target, rest = body.split(": ", 1) + + deps = [] + while rest: + line, rest = rest.split("\n", 1) + line = line.rstrip("\\").strip() + deps.append(line) + + with open(path, "w") as f: + for line in deps: + f.write(line) + f.write("\n") + + +def main(): + subprocess.check_call(["gcc"] + sys.argv[1:]) + + for idx in range(len(sys.argv)): + if sys.argv[idx] == "-MF": + rewrite_dep_file(sys.argv[idx + 1]) + + +if __name__ == "__main__": + main() diff --git a/tests/core/build/test_dep_files_data/invalid_dep_files/.buckconfig b/tests/core/build/test_dep_files_data/invalid_dep_files/.buckconfig new file mode 100644 index 0000000000000..86593e627bfda --- /dev/null +++ b/tests/core/build/test_dep_files_data/invalid_dep_files/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] + materializations = deferred diff --git a/tests/core/build/test_dep_files_data/invalid_dep_files/TARGETS.fixture b/tests/core/build/test_dep_files_data/invalid_dep_files/TARGETS.fixture new file mode 100644 index 0000000000000..cfaf012fd385a --- /dev/null +++ b/tests/core/build/test_dep_files_data/invalid_dep_files/TARGETS.fixture @@ -0,0 +1,2 @@ +test(name = "eager") +test(name = "lazy", seed = read_config("test", "seed")) diff --git a/tests/core/build/test_dep_files_data/invalid_dep_files/prelude/prelude.bzl b/tests/core/build/test_dep_files_data/invalid_dep_files/prelude/prelude.bzl new file mode 100644 index 0000000000000..2216516c63759 --- /dev/null +++ b/tests/core/build/test_dep_files_data/invalid_dep_files/prelude/prelude.bzl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx): + tag = ctx.actions.artifact_tag() + + dep_file = ctx.actions.declare_output("depfile") + app = ctx.actions.declare_output("app") + + seed = ctx.actions.write("seed", ctx.attrs.seed) + + ctx.actions.run( + [ + "sh", + "-c", + 'echo "../invalid" > "$1" && touch "$2" && echo "$3"', + "--", + tag.tag_artifacts(dep_file.as_output()), + app.as_output(), + seed, + ], + category = "test", + dep_files = {"deps": tag}, + ) + + return [ + DefaultInfo( + default_output = app, + sub_targets = {"dep_file": [DefaultInfo(default_output = dep_file)]}, + ), + ] + +test = rule( + attrs = { + "seed": attrs.string(default = ""), + }, + impl = _test_impl, +) diff --git a/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/.buckconfig b/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/.buckconfig new file mode 100644 index 0000000000000..02b742892cc1e --- /dev/null +++ b/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/.buckconfig @@ -0,0 +1,13 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] + materializations = deferred + hash_all_commands = true diff --git a/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/TARGETS.fixture b/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/TARGETS.fixture new file mode 100644 index 0000000000000..e6db3845345fa --- /dev/null +++ b/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/TARGETS.fixture @@ -0,0 +1 @@ +test(name = "test", prefix = read_config("test", "prefix"), suffix = read_config("test", "suffix")) diff --git a/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/prelude/prelude.bzl b/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/prelude/prelude.bzl new file mode 100644 index 0000000000000..7031575c81caf --- /dev/null +++ b/tests/core/build/test_dep_files_data/mismatched_outputs_dep_files/prelude/prelude.bzl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx): + out = ctx.actions.declare_output(ctx.attrs.prefix, dir = True) + + ctx.actions.run( + [ + "mkdir", + "-p", + cmd_args(out.as_output(), format = "{{}}/{}".format(ctx.attrs.suffix)), + ], + category = "test", + ) + + return [DefaultInfo(out)] + +test = rule( + attrs = { + "prefix": attrs.string(default = ""), + "suffix": attrs.string(default = ""), + }, + impl = _test_impl, +) diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/.buckconfig b/tests/core/build/test_dep_files_data/upload_dep_files/.buckconfig new file mode 100644 index 0000000000000..8e6f26e08d246 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[build] +execution_platforms = root//platforms:platforms diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/TARGETS.fixture b/tests/core/build/test_dep_files_data/upload_dep_files/TARGETS.fixture new file mode 100644 index 0000000000000..37411ee211777 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/TARGETS.fixture @@ -0,0 +1,63 @@ +with_two_dep_files( + name = "dep_files1", + out_name = "out", + dep_file_contents = [ + ( + "dep_file_name0", + ["used.1", "used.2"], + ["unused.1"], + ), + ( + "dep_file_name1", + ["used.3"], + ["unused.2"], + ), + ], + create_dep_file = "create_dep_file.py", + untagged_files = ["untagged.1"], + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:cache_uploads_with_remote_dep_file_query"], +) + +with_two_dep_files( + name = "dep_files2", + out_name = "out", + dep_file_contents = [ + ( + "dep_file_name0", + ["used.1", "used.2"], + ["unused.1"], + ), + ( + "dep_file_name1", + ["used.3"], + ["unused.2"], + ), + ], + create_dep_file = "create_dep_file.py", + untagged_files = ["untagged.2"], + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:cache_uploads_with_remote_dep_file_query"], +) + +with_two_dep_files( + name = "dep_files_fail", + out_name = "out", + dep_file_contents = [ + ( + "dep_file_name0", + ["used.1", "used.2"], + ["unused.1"], + ), + ( + "dep_file_name1", + ["used.3"], + ["unused.2"], + ), + ], + create_dep_file = "create_dep_file.py", + untagged_files = ["untagged.1"], + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:cache_uploads"], + fail = True, +) diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/create_dep_file.py b/tests/core/build/test_dep_files_data/upload_dep_files/create_dep_file.py new file mode 100644 index 0000000000000..aac3f6c337686 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/create_dep_file.py @@ -0,0 +1,56 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import sys +from pathlib import Path +from typing import List + + +def _write_to_dep_file(dep_file: Path, files: List[Path]): + dep_file_text = "" + for file in files: + assert isinstance(file, Path) + file_str = str(file) + # Check that backslashes in paths work on windows + if sys.platform == "windows": + assert "\\" in file_str + dep_file_text += file_str + "\n" + dep_file.write_text(dep_file_text) + + +# 1. Create a file at dep_file0 / dep_file1 (optional) which looks like "file1\nfile2\n..." +# 2. Write something to out so that it exists +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--used-files0", type=Path, required=True, nargs="*") + parser.add_argument("--used-files1", type=Path, required=False, nargs="*") + parser.add_argument("--out", type=Path, required=True) + parser.add_argument("--dep-file0", type=Path, required=True) + parser.add_argument("--dep-file1", type=Path, required=False, default=None) + parser.add_argument("--fail", action="store_true") + args = parser.parse_args() + + if args.fail: + print("Failing on purpose", file=sys.stderr) + sys.exit(1) + + # Just copy the contents of the inputs to the output + all_used_files = args.used_files0 + (args.used_files1 or []) + with args.out.open("a") as f: + for used_file in all_used_files: + f.write(Path(used_file).read_text()) + + _write_to_dep_file(args.dep_file0, args.used_files0) + + if args.dep_file1: + _write_to_dep_file(args.dep_file1, args.used_files1) + + +if __name__ == "__main__": + main() diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/platforms/TARGETS.fixture b/tests/core/build/test_dep_files_data/upload_dep_files/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..425ceb51c08c5 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/platforms/TARGETS.fixture @@ -0,0 +1,26 @@ +load("@root//platforms:rules.bzl", "config_setting", "platform", "platforms", "target_platform") + +config_setting(name = "setting") + +target_platform(name = "target") + +platform( + name = "cache_uploads", + setting = ":setting", + allow_cache_uploads = True, +) + +platform( + name = "cache_uploads_with_remote_dep_file_query", + setting = ":setting", + allow_cache_uploads = True, + remote_dep_file_cache_enabled = True, +) + +platforms( + name = "platforms", + platforms = [ + ":cache_uploads", + ":cache_uploads_with_remote_dep_file_query", + ], +) diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/platforms/rules.bzl b/tests/core/build/test_dep_files_data/upload_dep_files/platforms/rules.bzl new file mode 100644 index 0000000000000..2ce7392f89c9c --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/platforms/rules.bzl @@ -0,0 +1,99 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +ExecutorConfigInfo = provider(fields = ["config"]) + +def _platform(ctx): + # We need to introduce a constraint to ensure our different execution + # platforms are distinct. This is because exec_compatible_with selects a + # ConfigurationInfo (which provides a config), not a ExecutionPlatformInfo + # (instead it matches on it). + configuration = ConfigurationInfo( + constraints = { + ctx.attrs.setting.label.raw_target(): ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ), + }, + values = {}, + ) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = configuration, + executor_config = CommandExecutorConfig( + local_enabled = ctx.attrs.local_enabled, + remote_enabled = ctx.attrs.remote_enabled, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_max_input_files_mebibytes = 1, + use_limited_hybrid = ctx.attrs.use_limited_hybrid, + allow_limited_hybrid_fallbacks = ctx.attrs.allow_hybrid_fallbacks_on_failure, + allow_hybrid_fallbacks_on_failure = ctx.attrs.allow_hybrid_fallbacks_on_failure, + remote_execution_use_case = "buck2-testing", + allow_cache_uploads = ctx.attrs.allow_cache_uploads, + remote_dep_file_cache_enabled = ctx.attrs.remote_dep_file_cache_enabled and read_config("test", "remote_dep_file_cache_enabled", "true") == "true", + max_cache_upload_mebibytes = 1, + ), + ) + + return [ + DefaultInfo(), + platform, + configuration, + ] + +platform = rule( + impl = _platform, + attrs = { + "allow_cache_uploads": attrs.bool(default = False), + "allow_hybrid_fallbacks_on_failure": attrs.bool(default = False), + "local_enabled": attrs.bool(default = True), + "remote_dep_file_cache_enabled": attrs.bool(default = False), + "remote_enabled": attrs.bool(default = True), + "setting": attrs.configuration_label(), + "use_limited_hybrid": attrs.bool(default = True), + }, +) + +def _platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [x[ExecutionPlatformInfo] for x in ctx.attrs.platforms], + ), + ] + +platforms = rule( + impl = _platforms, + attrs = { + "platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo])), + }, +) + +def _target_platform(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +target_platform = rule( + impl = _target_platform, + attrs = {}, +) + +def _config_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +config_setting = rule( + impl = _config_setting, + attrs = {}, +) diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/prelude/prelude.bzl b/tests/core/build/test_dep_files_data/upload_dep_files/prelude/prelude.bzl new file mode 100644 index 0000000000000..5ba4604508e25 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/prelude/prelude.bzl @@ -0,0 +1,87 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _tag_files(tag, files): + return [tag.tag_artifacts(f) for f in files] + +def _get_tagged_artifacts(ctx, dep_file: Artifact, used_files: list[Artifact], unused_files: list[Artifact]) -> (ArtifactTag, list[typing.Any], list[typing.Any], typing.Any): + tag = ctx.actions.artifact_tag() + tagged_used_files = _tag_files(tag, used_files) + tagged_unused_files = _tag_files(tag, unused_files) + tagged_dep_file = tag.tag_artifacts(dep_file.as_output()) + return (tag, tagged_used_files, tagged_unused_files, tagged_dep_file) + +def _with_two_dep_files_impl(ctx): + allow_dep_file_cache_upload = read_config("test", "allow_dep_file_cache_upload") in ["true", "True"] + allow_cache_upload = read_config("test", "allow_cache_upload") in ["true", "True"] + + out = ctx.actions.declare_output(ctx.attrs.out_name) + + (dep_file_name0, used_files0, unused_files0) = ctx.attrs.dep_file_contents[0] + dep_file0 = ctx.actions.declare_output(dep_file_name0) + (tag0, tagged_used_files0, tagged_unused_files0, tagged_dep_file0) = _get_tagged_artifacts(ctx, dep_file0, used_files0, unused_files0) + + (dep_file_name1, used_files1, unused_files1) = ctx.attrs.dep_file_contents[1] + dep_file1 = ctx.actions.declare_output(dep_file_name1) + (tag1, tagged_used_files1, tagged_unused_files1, tagged_dep_file1) = _get_tagged_artifacts(ctx, dep_file1, used_files1, unused_files1) + + cmd = [ + "python3", + ctx.attrs.create_dep_file, + "--out", + out.as_output(), + "--dep-file0", + tagged_dep_file0, + "--used-files0", + tagged_used_files0, + "--dep-file1", + tagged_dep_file1, + "--used-files1", + tagged_used_files1, + ] + if ctx.attrs.fail: + cmd = cmd + ["--fail"] + + cmd = cmd_args( + cmd, + # Add them to the command so they are tracked as inputs but don't do anything with them. + hidden = tagged_unused_files0 + tagged_unused_files1 + ctx.attrs.untagged_files, + ) + + ctx.actions.run( + cmd, + category = "create_dep_file", + dep_files = {"dep_file_tag0": tag0, "dep_file_tag1": tag1}, + allow_cache_upload = allow_cache_upload, + allow_dep_file_cache_upload = allow_dep_file_cache_upload, + env = {"cache_buster": ctx.attrs.cache_buster}, + ) + + return [ + DefaultInfo( + default_output = out, + sub_targets = {dep_file_name0: [DefaultInfo(default_output = dep_file0)], dep_file_name1: [DefaultInfo(default_output = dep_file1)]}, + ), + ] + +with_two_dep_files = rule( + attrs = { + "cache_buster": attrs.string(default = read_config("test", "cache_buster", "")), + "create_dep_file": attrs.source(), + "dep_file_contents": attrs.list( + attrs.tuple( + attrs.string(), # dep file name + attrs.list(attrs.source()), # tagged files to show up in the dep file + attrs.list(attrs.source()), # tagged files to not show up in the dep file + ), + ), + "fail": attrs.bool(default = False), + "out_name": attrs.string(), + "untagged_files": attrs.list(attrs.source()), + }, + impl = _with_two_dep_files_impl, +) diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/untagged.1 b/tests/core/build/test_dep_files_data/upload_dep_files/untagged.1 new file mode 100644 index 0000000000000..d4763d9ff639a --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/untagged.1 @@ -0,0 +1 @@ +untagged.1 diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/untagged.2 b/tests/core/build/test_dep_files_data/upload_dep_files/untagged.2 new file mode 100644 index 0000000000000..a9800f73ad098 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/untagged.2 @@ -0,0 +1 @@ +untagged.2 diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/unused.1 b/tests/core/build/test_dep_files_data/upload_dep_files/unused.1 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/unused.2 b/tests/core/build/test_dep_files_data/upload_dep_files/unused.2 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/used.1 b/tests/core/build/test_dep_files_data/upload_dep_files/used.1 new file mode 100644 index 0000000000000..26e4f522f0759 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/used.1 @@ -0,0 +1 @@ +used1 diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/used.2 b/tests/core/build/test_dep_files_data/upload_dep_files/used.2 new file mode 100644 index 0000000000000..207a1faa6172c --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/used.2 @@ -0,0 +1 @@ +used2 diff --git a/tests/core/build/test_dep_files_data/upload_dep_files/used.3 b/tests/core/build/test_dep_files_data/upload_dep_files/used.3 new file mode 100644 index 0000000000000..9b3a11e1ac7b4 --- /dev/null +++ b/tests/core/build/test_dep_files_data/upload_dep_files/used.3 @@ -0,0 +1 @@ +used3 diff --git a/tests/core/build/test_error_categorization.py b/tests/core/build/test_error_categorization.py new file mode 100644 index 0000000000000..a0e75a944eda8 --- /dev/null +++ b/tests/core/build/test_error_categorization.py @@ -0,0 +1,400 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + +from buck2.tests.e2e_util.helper.utils import ( + filter_events, + is_running_on_linux, + is_running_on_windows, + read_invocation_record, +) + +# From `buck2_data` +USER_ERROR = 2 +ENVIRONMENT_ERROR = 3 +ACTION_COMMAND_FAILURE = 2 + +STARLARK_FAIL_TAG = 1 +ANY_STARLARK_EVALUATION_TAG = 2001 + + +@buck_test() +async def test_action_error(buck: Buck) -> None: + await expect_failure( + buck.build("//:action_fail"), stderr_regex="Failed to build 'root//:action_fail" + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + assert len(errors_events) == 1 + errors = errors_events[0] + assert len(errors) == 1 + assert errors[0]["tier"] == USER_ERROR + # This test is unfortunately liable to break as a result of refactorings, since this is not + # stable. Feel free to delete it if it becomes a problem. + assert ( + errors[0]["source_location"] + == "buck2_build_api/src/actions/error.rs::ActionError" + ) + + +@buck_test() +async def test_missing_outputs(buck: Buck) -> None: + # FIXME(JakobDegen): This doesn't work with non-local-only actions + await expect_failure( + buck.build("//:missing_outputs", "--local-only"), + stderr_regex="Failed to build 'root//:missing_outputs", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + assert len(errors_events) == 1 + errors = errors_events[0] + assert len(errors) == 1 + assert errors[0]["tier"] == USER_ERROR + + +@buck_test() +async def test_bad_url(buck: Buck) -> None: + await expect_failure( + buck.build("//:bad_url"), + stderr_regex="Failed to build 'root//:bad_url", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + assert len(errors_events) == 1 + errors = errors_events[0] + assert len(errors) == 1 + # Also liable to break as a result of refactorings, feel free to update + assert ( + errors[0]["source_location"] == "buck2_http/src/lib.rs::HttpError::SendRequest" + ) + + +@buck_test() +async def test_attr_coercion(buck: Buck) -> None: + await expect_failure( + buck.build("//attr_coercion:int_rule"), + stderr_regex="evaluating build file: `root//attr_coercion:TARGETS.fixture", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + assert len(errors_events) == 1 + errors = errors_events[0] + assert len(errors) == 1 + # Just make sure there's some kind of error metadata + assert "CoercionError::TypeError" in errors[0]["source_location"] + + +@buck_test() +async def test_buck2_fail(buck: Buck) -> None: + await expect_failure( + buck.build("//buck2_fail:foobar"), + stderr_regex="evaluating build file: `root//buck2_fail:TARGETS.fixture`", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + assert len(errors_events) == 1 + errors = errors_events[0] + assert len(errors) == 1 + # Just make sure that despite there being no context on the error, we still report the right + # metadata + assert ( + errors[0]["source_location"] + == "buck2_interpreter_for_build/src/interpreter/functions/internals.rs::BuckFail" + ) + + +@buck_test() +async def test_starlark_fail_error_categorization(buck: Buck) -> None: + await expect_failure( + buck.build("//starlark_fail:foobar"), + stderr_regex="evaluating build file: `root//starlark_fail:TARGETS.fixture`", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + assert len(errors_events) == 1 + errors = errors_events[0] + assert len(errors) == 1 + assert errors[0]["source_location"].endswith("StarlarkError::Fail") + assert errors[0]["tier"] == USER_ERROR + + +@buck_test() +async def test_starlark_parse_error_categorization(buck: Buck) -> None: + await expect_failure( + buck.build("//starlark_parse_error:starlark_parse_error"), + stderr_regex=".*Parse error:.*", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + + assert len(errors_events) == 1 + errors = errors_events[0] + + assert len(errors) == 1 + assert errors[0]["source_location"].endswith("StarlarkError::Parser") + assert errors[0]["category_key"].endswith("ANY_STARLARK_EVALUATION") + assert errors[0]["tier"] == USER_ERROR + + +@buck_test() +async def test_starlark_scope_error_categorization(buck: Buck) -> None: + await expect_failure( + buck.build("//starlark_scope_error:value_err"), + stderr_regex="evaluating build file: .* not found", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "build_response", + "errors", + ) + + assert len(errors_events) == 1 + errors = errors_events[0] + + assert len(errors) == 1 + assert errors[0]["source_location"].endswith("StarlarkError::Scope") + assert errors[0]["category_key"].endswith("ANY_STARLARK_EVALUATION") + assert errors[0]["tier"] == USER_ERROR + + +@buck_test() +async def test_targets_error_categorization(buck: Buck) -> None: + await expect_failure( + buck.targets("//starlark_fail:foobar"), + stderr_regex="evaluating build file: `root//starlark_fail:TARGETS.fixture`", + ) + errors_events = await filter_events( + buck, + "Result", + "result", + "error", + ) + assert len(errors_events) == 1 + errors = errors_events[0]["errors"] + assert len(errors) == 1 + assert errors[0]["tags"] == [STARLARK_FAIL_TAG] + assert errors[0]["tier"] == USER_ERROR + + +@buck_test() +async def test_daemon_crash(buck: Buck, tmp_path: Path) -> None: + await buck.build() + + record = tmp_path / "record.json" + await buck.debug( + "crash", "panic", "--unstable-write-invocation-record", str(record) + ) + invocation_record = read_invocation_record(record) + + errors = invocation_record["errors"] + + assert len(errors) == 1 + [error] = errors + if is_running_on_windows(): + assert "transport error" in error["message"] + else: + assert "stream closed because of a broken pipe" in error["message"] + + assert error["tags"] == ["CLIENT_GRPC", "SERVER_PANICKED"] + assert "buckd stderr:\n" in error["message"] + assert "panicked at" in error["message"] + + assert invocation_record["best_error_tag"] == "SERVER_PANICKED" + category_key = invocation_record["best_error_category_key"].split(":") + category_key[0:2] = [ + "buck2_client_ctx/src/daemon/client.rs", + "CLIENT_GRPC", + "SERVER_PANICKED", + ] + # TODO dump stack trace on windows + if not is_running_on_windows(): + assert category_key[4].startswith("crash("), category_key[4] + + +@buck_test() +@env("BUCKD_STARTUP_TIMEOUT", "0") +async def test_connection_timeout(buck: Buck, tmp_path: Path) -> None: + record_path = tmp_path / "record.json" + res = await expect_failure( + buck.targets(":", "--unstable-write-invocation-record", str(record_path)) + ) + assert "timed out before establishing connection to Buck daemon" in res.stderr + + record = read_invocation_record(record_path) + + assert record["command_end"] is None + assert record["has_command_result"] is False + assert record["has_end_of_stream"] is False + assert record["daemon_connection_failure"] is True + assert record["daemon_was_started"] is None + + assert record["best_error_tag"] == "DAEMON_CONNECT" + + +@buck_test() +async def test_daemon_abort(buck: Buck, tmp_path: Path) -> None: + await buck.build() + + record = tmp_path / "record.json" + await buck.debug( + "crash", "abort", "--unstable-write-invocation-record", str(record) + ) + invocation_record = read_invocation_record(record) + + errors = invocation_record["errors"] + assert len(errors) == 1 + [error] = errors + + category_key = invocation_record["best_error_category_key"].split(":") + + if is_running_on_windows(): + # TODO get windows to dump a stack trace + assert "buckd stderr is empty" in error["message"] + assert category_key[0:3] == [ + "buck2_client_ctx/src/daemon/client.rs", + "CLIENT_GRPC", + "SERVER_STDERR_EMPTY", + ] + assert invocation_record["best_error_tag"] == "SERVER_STDERR_EMPTY" + else: + # Messages from folly's signal handler. + assert "*** Aborted at" in error["message"] + assert "*** Signal 6 (SIGABRT)" in error["message"] + assert category_key[0:3] == [ + "buck2_client_ctx/src/daemon/client.rs", + "CLIENT_GRPC", + "SERVER_STDERR_UNKNOWN", + ] + assert invocation_record["best_error_tag"] == "SERVER_STDERR_UNKNOWN" + + # TODO dump stack trace on mac and windows + if is_running_on_linux(): + assert category_key[3].startswith("crash("), category_key[3] + + +@buck_test() +async def test_build_file_race(buck: Buck, tmp_path: Path) -> None: + target = "//file_busy:file" + # first build + file_path = (await buck.build(target)).get_build_report().output_for_target(target) + + # Open the file for writing and keep it open + f = open(file_path, "w") + # build again, source code has changed, binary must be rebuilt + record = tmp_path / "record.json" + build = buck.build( + target, + "--show-output", + "-c", + "test.cache_buster=2", + "--unstable-write-invocation-record", + str(record), + ) + + if is_running_on_windows(): + await expect_failure(build) + + invocation_record = read_invocation_record(record) + assert invocation_record["best_error_tag"] == "IO_MATERIALIZER_FILE_BUSY" + assert invocation_record["errors"][0]["tier"] == ENVIRONMENT_ERROR + else: + await build + + f.close() + + +@buck_test() +async def test_download_failure(buck: Buck, tmp_path: Path) -> None: + # Upload action if necessary + await buck.build("//:run_action", "--remote-only") + await buck.clean() + record_path = tmp_path / "record.json" + res = await expect_failure( + buck.build( + "//:run_action", + "--unstable-write-invocation-record", + str(record_path), + env={"BUCK2_TEST_FAIL_RE_DOWNLOADS": "true"}, + ) + ) + record = read_invocation_record(record_path) + category_key = record["best_error_category_key"] + assert ( + category_key + == "buck2_execute/src/re/error.rs::RemoteExecutionError:MATERIALIZATION_ERROR:RE_NOT_FOUND:UNKNOWN" + ) + assert ( + "Your build requires materializing an artifact that has expired in the RE CAS and Buck does not have it. This likely happened because your Buck daemon has been online for a long time. This error is currently unrecoverable. To proceed, you should restart Buck using `buck2 killall`." + in res.stderr + ) + + +@buck_test() +async def test_local_incompatible(buck: Buck, tmp_path: Path) -> None: + record_path = tmp_path / "record.json" + res = await expect_failure( + buck.build( + "//:local_run_action", + "--remote-only", + "--no-remote-cache", + "--unstable-write-invocation-record", + str(record_path), + ) + ) + + assert "Incompatible executor preferences" in res.stderr + + record = read_invocation_record(record_path) + assert record["error_category"] == "USER" + assert ( + record["best_error_category_key"] + == "buck2_build_api/src/actions/error.rs::ActionError:ANY_ACTION_EXECUTION" + ) diff --git a/tests/core/build/test_error_categorization_data/.buckconfig b/tests/core/build/test_error_categorization_data/.buckconfig new file mode 100644 index 0000000000000..cb37c990c90cb --- /dev/null +++ b/tests/core/build/test_error_categorization_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_error_categorization_data/.buckroot b/tests/core/build/test_error_categorization_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_error_categorization_data/TARGETS.fixture b/tests/core/build/test_error_categorization_data/TARGETS.fixture new file mode 100644 index 0000000000000..49e899705e640 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/TARGETS.fixture @@ -0,0 +1,22 @@ +load(":defs.bzl", "action_fail", "bad_url", "missing_outputs", "run_action") + +action_fail( + name = "action_fail", +) + +missing_outputs( + name = "missing_outputs", +) + +bad_url( + name = "bad_url", +) + +run_action( + name = "run_action", +) + +run_action( + name = "local_run_action", + local_only = True, +) diff --git a/tests/core/build/test_error_categorization_data/attr_coercion/TARGETS.fixture b/tests/core/build/test_error_categorization_data/attr_coercion/TARGETS.fixture new file mode 100644 index 0000000000000..5a70c0dfea866 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/attr_coercion/TARGETS.fixture @@ -0,0 +1,9 @@ +load(":defs.bzl", "int_rule") + +int_rule( + name = "int_rule", + x = "foobar", +) + +# This test is interesting because it's a scenario in which there is context both above and below +# the starlark diagnostic diff --git a/tests/core/build/test_error_categorization_data/attr_coercion/defs.bzl b/tests/core/build/test_error_categorization_data/attr_coercion/defs.bzl new file mode 100644 index 0000000000000..4ff949ee6b69b --- /dev/null +++ b/tests/core/build/test_error_categorization_data/attr_coercion/defs.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + fail("unused") + +int_rule = rule( + impl = _impl, + attrs = { + "x": attrs.int(), + }, +) diff --git a/tests/core/build/test_error_categorization_data/buck2_fail/TARGETS.fixture b/tests/core/build/test_error_categorization_data/buck2_fail/TARGETS.fixture new file mode 100644 index 0000000000000..67b2dd4c22514 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/buck2_fail/TARGETS.fixture @@ -0,0 +1 @@ +__internal__.buck2_fail("foobar") diff --git a/tests/core/build/test_error_categorization_data/defs.bzl b/tests/core/build/test_error_categorization_data/defs.bzl new file mode 100644 index 0000000000000..e3e2bfdb5e84a --- /dev/null +++ b/tests/core/build/test_error_categorization_data/defs.bzl @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _action_fail(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.run(cmd_args("false", hidden = out.as_output()), category = "run") + return [DefaultInfo(default_outputs = [out])] + +action_fail = rule( + impl = _action_fail, + attrs = {}, +) + +def _action_missing_output(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(cmd_args("true", hidden = out.as_output()), category = "run") + return [DefaultInfo(default_outputs = [out])] + +missing_outputs = rule( + impl = _action_missing_output, + attrs = {}, +) + +def _bad_url(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.download_file(out.as_output(), "doesnotexist640693486.com", sha1 = "1" * 40) + return [DefaultInfo(default_output = out)] + +bad_url = rule( + impl = _bad_url, + attrs = {}, +) + +def _run_action(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(cmd_args(["sh", "-c", 'echo > "$1"', "--", out.as_output()]), category = "run", local_only = ctx.attrs.local_only) + return [DefaultInfo(default_outputs = [out])] + +run_action = rule( + impl = _run_action, + attrs = { + "local_only": attrs.bool(default = False), + }, +) diff --git a/tests/core/build/test_error_categorization_data/file_busy/TARGETS.fixture b/tests/core/build/test_error_categorization_data/file_busy/TARGETS.fixture new file mode 100644 index 0000000000000..9fe15380a4d91 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/file_busy/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":write.bzl", "write") + +write( + name = "file", + src = read_config("test", "cache_buster", default = "1"), +) diff --git a/tests/core/build/test_error_categorization_data/file_busy/write.bzl b/tests/core/build/test_error_categorization_data/file_busy/write.bzl new file mode 100644 index 0000000000000..31984d9e0ad67 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/file_busy/write.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.write("out.txt", ctx.attrs.src) + return [DefaultInfo(default_output = out)] + +write = rule( + impl = _impl, + attrs = { + "src": attrs.string(), + }, +) diff --git a/tests/core/build/test_error_categorization_data/prelude/prelude.bzl b/tests/core/build/test_error_categorization_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_error_categorization_data/starlark_fail/TARGETS.fixture b/tests/core/build/test_error_categorization_data/starlark_fail/TARGETS.fixture new file mode 100644 index 0000000000000..f4784549b0429 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/starlark_fail/TARGETS.fixture @@ -0,0 +1 @@ +fail("foobar") diff --git a/tests/core/build/test_error_categorization_data/starlark_parse_error/TARGETS.fixture b/tests/core/build/test_error_categorization_data/starlark_parse_error/TARGETS.fixture new file mode 100644 index 0000000000000..92504f6bcbbf3 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/starlark_parse_error/TARGETS.fixture @@ -0,0 +1,2 @@ +# @lint-ignore-every BUCKFORMAT +this targets file is not syntactically valid diff --git a/tests/core/build/test_error_categorization_data/starlark_scope_error/TARGETS.fixture b/tests/core/build/test_error_categorization_data/starlark_scope_error/TARGETS.fixture new file mode 100644 index 0000000000000..7a23e617bb7f2 --- /dev/null +++ b/tests/core/build/test_error_categorization_data/starlark_scope_error/TARGETS.fixture @@ -0,0 +1 @@ +this_doesnt_exist(name = "foobar") diff --git a/tests/core/build/test_executor_with_dependencies.py b/tests/core/build/test_executor_with_dependencies.py new file mode 100644 index 0000000000000..aab48a658260d --- /dev/null +++ b/tests/core/build/test_executor_with_dependencies.py @@ -0,0 +1,78 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import random_string, read_what_ran + + +@buck_test() +async def test_executor_with_dependencies(buck: Buck) -> None: + # Smoke test: run on RE and correctly pass the `remote_execution_dependencies` parameter specified in the platform + # The RE external dependency (https://fburl.com/wiki/e55nloow) is purposefully wrong as the smc_tier is non existent, + # We just want to check that the parameter is passed correctly to RE + await expect_failure( + buck.build( + ":target_without_dependencies", + "-c", + "build.execution_platforms=root//platforms:platforms_with_dependencies", + "-c", + f"test.cache_buster={random_string()}", + ), + # Full error message looks like this: P1217423393 + stderr_regex='facebook::remote_execution::scheduler::TaskCancelledException: Error acquiring dependency TaskDependencyRequest { dependency: TDependency { smc_tier: "buck2_smoke_test_tier", id: "dep_a"', + ) + + +@buck_test() +async def test_good_target_with_dependencies(buck: Buck) -> None: + result = await buck.build( + ":good_target_with_dependencies", + "-c", + "build.execution_platforms=root//platforms:platforms_without_dependencies", + "-c", + f"test.cache_buster={random_string()}", + "--show-full-output", + ) + output_dict = result.get_target_to_build_output() + + for _target, output in output_dict.items(): + with Path(output).open() as f: + deps = json.load(f) + assert len(deps) == 1 + assert deps[0]["smc_tier"] == "noop" + assert deps[0]["id"] == "foo" + assert deps[0]["reservation_id"] == "noop" + + # Make sure it actually did run on RE. + out = await read_what_ran(buck) + executors = {line["identity"]: line["reproducer"]["executor"] for line in out} + expected = { + "root//:good_target_with_dependencies () (cp)": "Re", + } + assert executors == expected + + +@buck_test() +async def test_bad_target_with_dependencies(buck: Buck) -> None: + await expect_failure( + buck.build( + ":bad_target_with_dependencies", + "-c", + "build.execution_platforms=root//platforms:platforms_without_dependencies", + "-c", + f"test.cache_buster={random_string()}", + ), + stderr_regex="error: too many fields set for RE dependency: `extra_field, id, smc_tier`", + ) diff --git a/tests/core/build/test_executor_with_dependencies_data/.buckconfig b/tests/core/build/test_executor_with_dependencies_data/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/build/test_executor_with_dependencies_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_executor_with_dependencies_data/.buckroot b/tests/core/build/test_executor_with_dependencies_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_executor_with_dependencies_data/TARGETS.fixture b/tests/core/build/test_executor_with_dependencies_data/TARGETS.fixture new file mode 100644 index 0000000000000..cb8473914fce2 --- /dev/null +++ b/tests/core/build/test_executor_with_dependencies_data/TARGETS.fixture @@ -0,0 +1,13 @@ +load(":defs.bzl", "test") + +test(name = "target_without_dependencies") + +test( + name = "good_target_with_dependencies", + remote_execution_dependencies = [{"id": "foo", "smc_tier": "noop"}], +) + +test( + name = "bad_target_with_dependencies", + remote_execution_dependencies = [{"extra_field": "abc", "id": "foo", "smc_tier": "noop"}], +) diff --git a/tests/core/build/test_executor_with_dependencies_data/defs.bzl b/tests/core/build/test_executor_with_dependencies_data/defs.bzl new file mode 100644 index 0000000000000..e49441622ba23 --- /dev/null +++ b/tests/core/build/test_executor_with_dependencies_data/defs.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx): + out = ctx.actions.declare_output("file") + ctx.actions.run( + ["cp", "/run/re_worker/action_dependencies", out.as_output()], + category = "cp", + env = {"cache_buster": ctx.attrs.cache_buster}, + remote_execution_dependencies = ctx.attrs.remote_execution_dependencies, + ) + return [DefaultInfo(out)] + +test = rule(attrs = { + "cache_buster": attrs.string(default = read_config("test", "cache_buster", "")), + "remote_execution_dependencies": attrs.list(attrs.dict(key = attrs.string(), value = attrs.string()), default = []), +}, impl = _test_impl) diff --git a/tests/core/build/test_executor_with_dependencies_data/platforms/TARGETS.fixture b/tests/core/build/test_executor_with_dependencies_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..b64c198dc3e63 --- /dev/null +++ b/tests/core/build/test_executor_with_dependencies_data/platforms/TARGETS.fixture @@ -0,0 +1,10 @@ +load(":defs.bzl", "execution_platforms") + +execution_platforms( + name = "platforms_without_dependencies", +) + +execution_platforms( + name = "platforms_with_dependencies", + remote_execution_dependencies = [{"id": "dep_a", "smc_tier": "buck2_smoke_test_tier"}], +) diff --git a/tests/core/build/test_executor_with_dependencies_data/platforms/defs.bzl b/tests/core/build/test_executor_with_dependencies_data/platforms/defs.bzl new file mode 100644 index 0000000000000..ea8e3af234b77 --- /dev/null +++ b/tests/core/build/test_executor_with_dependencies_data/platforms/defs.bzl @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = configuration, + executor_config = CommandExecutorConfig( + local_enabled = False, + remote_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + remote_execution_dependencies = ctx.attrs.remote_execution_dependencies, + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = { + "remote_execution_dependencies": attrs.list(attrs.dict(key = attrs.string(), value = attrs.string()), default = []), +}, impl = _execution_platform) diff --git a/tests/core/build/test_executor_with_dependencies_data/prelude/prelude.bzl b/tests/core/build/test_executor_with_dependencies_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_hash_all_commands.py b/tests/core/build/test_hash_all_commands.py new file mode 100644 index 0000000000000..40650578f0dcc --- /dev/null +++ b/tests/core/build/test_hash_all_commands.py @@ -0,0 +1,126 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env +from buck2.tests.e2e_util.helper.utils import expect_exec_count + + +@buck_test() +@env("BUCK_LOG", "buck2_action_impl::actions::impls::run::dep_files=trace") +@pytest.mark.parametrize( + "local_only", + [ + "true", + "false", + ], +) +async def test_hash_all_commands(buck: Buck, local_only: str) -> None: + # Expecting a rebuild since the command wasn't hashed previously. + await buck.build( + "//:test", + "-c", + "test.seed=123", + "-c", + f"test.local_only={local_only}", + ) + await expect_exec_count(buck, 1) + # Disable remote cache lookup so we actually utilize the local depfile cache for this test + # No longer expecting a rebuild. + res = await buck.build( + "//:test", + "-c", + "test.seed=456", + "-c", + f"test.local_only={local_only}", + "--no-remote-cache", + ) + await expect_exec_count(buck, 0) + + # Check that we're matching on just the directory here + assert "Command line and directory have not changed" in res.stderr + + +@buck_test() +async def test_hash_all_commands_key_change(buck: Buck) -> None: + # Expecting a rebuild since the command wasn't hashed previously. + await buck.build( + "//:test", + "-c", + "test.param=123", + "-c", + "test.category=cat1", + ) + await expect_exec_count(buck, 1) + + # Again expecting a rebuild as the action doesn't match and also it's not + # even the same action. + await buck.build( + "//:test", + "-c", + "test.param=456", + "-c", + "test.category=cat2", + ) + await expect_exec_count(buck, 1) + + # Again expecting a rebuild as the action once again doesn't match, but + # while is mismatches with the 2nd action (which had a differet key), + # it does match the 1st action (which had the same key). Since the 2nd + # action clobbered the output, however, this *must* re-run. + await buck.build( + "//:test", + "-c", + "test.param=123", + "-c", + "test.category=cat1", + ) + await expect_exec_count(buck, 1) + + +@buck_test() +async def test_hash_all_commands_key_change_deps(buck: Buck) -> None: + # Expecting a rebuild since the command wasn't hashed previously. + await buck.build( + "//:symlink_test", + "-c", + "test.param=123", + "-c", + "test.category=cat1", + ) + await expect_exec_count(buck, 1) + + # Again expecting a rebuild because the cache key is different anyway. + await buck.build( + "//:symlink_test", + "-c", + "test.param=456", + "-c", + "test.category=cat2", + ) + await expect_exec_count(buck, 1) + + # Not actually expecting a rebuild this time, because the symlink + # output is unchanged. + res = await buck.build( + "//:symlink_test", + "-c", + "test.param=123", + "-c", + "test.category=cat1", + ) + await expect_exec_count(buck, 0) + + # But we should have the right output here + build_report = res.get_build_report() + output = build_report.output_for_target("//:symlink_test") + assert output.read_text().rstrip() == "123" diff --git a/tests/core/build/test_hash_all_commands_data/.buckconfig b/tests/core/build/test_hash_all_commands_data/.buckconfig new file mode 100644 index 0000000000000..d88a7205eccae --- /dev/null +++ b/tests/core/build/test_hash_all_commands_data/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] + materializations = deferred + hash_all_commands = true + declare_match_in_depfiles = true + declare_in_local_executor = true diff --git a/tests/core/build/test_hash_all_commands_data/.buckroot b/tests/core/build/test_hash_all_commands_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_hash_all_commands_data/TARGETS.fixture b/tests/core/build/test_hash_all_commands_data/TARGETS.fixture new file mode 100644 index 0000000000000..bbb0ea20d7258 --- /dev/null +++ b/tests/core/build/test_hash_all_commands_data/TARGETS.fixture @@ -0,0 +1,13 @@ +echo_check( + name = "test", + seed = read_config("test", "seed", ""), + param = read_config("test", "param", ""), + category = read_config("test", "category", "dummy"), + local_only = read_config("test", "local_only", "false"), +) + +symlink_check( + name = "symlink_test", + param = read_config("test", "param", ""), + category = read_config("test", "category", "dummy"), +) diff --git a/tests/core/build/test_hash_all_commands_data/prelude/prelude.bzl b/tests/core/build/test_hash_all_commands_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..952d9340d94fb --- /dev/null +++ b/tests/core/build/test_hash_all_commands_data/prelude/prelude.bzl @@ -0,0 +1,78 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _echo_check_impl(ctx): + app = ctx.actions.declare_output("app") + + if ctx.attrs.local_only == "true": + local_only = True + elif ctx.attrs.local_only == "false": + local_only = False + else: + fail("Invalid local only: {}".format(ctx.attrs.local_only)) + + ctx.actions.run( + [ + "sh", + "-c", + 'echo "$1" > "$2"', + "--", + ctx.attrs.param, + app.as_output(), + ], + env = { + "local_only": str(local_only), + }, + category = ctx.attrs.category, + local_only = local_only, + ) + + return [ + DefaultInfo( + default_output = app, + ), + ] + +echo_check = rule( + attrs = { + "category": attrs.string(), + "local_only": attrs.string(), + # NOTE: This does NOT get ignored. + "param": attrs.string(), + # NOTE: This gets ignored. + "seed": attrs.string(), + }, + impl = _echo_check_impl, +) + +def _symlink_check_impl(ctx): + data = ctx.actions.write("data", ctx.attrs.param) + out = ctx.actions.declare_output("out/symlink") + + # NOTE: "data" and "out" will be next to each other here, hence ../data + # NOTE: We use local_only since RE actually returns files for symlinks. + ctx.actions.run( + cmd_args( + ["ln", "-s", "../data", out.as_output()], + hidden = data, + ), + category = ctx.attrs.category, + local_only = True, + ) + + return [ + DefaultInfo(default_output = out), + ] + +symlink_check = rule( + attrs = { + "category": attrs.string(), + # NOTE: This gets ignored by the executed action (but not the data it links to) + "param": attrs.string(), + }, + impl = _symlink_check_impl, +) diff --git a/tests/core/build/test_modify.py b/tests/core/build/test_modify.py new file mode 100644 index 0000000000000..25238c25f468d --- /dev/null +++ b/tests/core/build/test_modify.py @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import fileinput +import os +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import random_string + + +@buck_test(data_dir="modify") +async def test_modify_genrule(buck: Buck) -> None: + result = await buck.build("//:writer") + output = result.get_build_report().output_for_target("root//:writer") + assert Path(output).read_text() == "HELLO\n" + + # Change "HELLO" in TARGETS to "GOODBYE" + with fileinput.input(buck.cwd / "TARGETS.fixture", inplace=True) as f: + for line in f: + print(line.replace("HELLO", "GOODBYE"), end="") + + result = await buck.build("//:writer") + output = result.get_build_report().output_for_target("root//:writer") + assert Path(output).read_text() == "GOODBYE\n" + + +@buck_test(data_dir="modify") +async def test_modify_src(buck: Buck) -> None: + result = await buck.build("//:mysrcrule") + output = result.get_build_report().output_for_target("root//:mysrcrule") + assert Path(output).read_text() == "HELLO\n" + + (buck.cwd / "src.txt").write_text("GOODBYE\n") + result = await buck.build("//:mysrcrule") + output = result.get_build_report().output_for_target("root//:mysrcrule") + assert Path(output).read_text() == "GOODBYE\n" + + +@buck_test(data_dir="modify") +async def test_modify_genrule_notify(buck: Buck) -> None: + with open(buck.cwd / ".buckconfig", "a") as buckconfig: + buckconfig.write("\n[buck2]\nfile_watcher = notify") + await buck.kill() # Ensure the config gets picked up + await test_modify_genrule(buck) + + +@buck_test(data_dir="modify") +async def test_modify_directory(buck: Buck) -> None: + # Test for the bug reported in T99593442 + os.mkdir(buck.cwd / "a_dir") + with open(buck.cwd / "a_dir" / "test.txt", "w") as file: + file.write("test") + await buck.build("//:writer") + # Remove a directory, and change a file, so the file gets spotted, + # and we'd better note that the directory no longer exists + os.remove(buck.cwd / "a_dir" / "test.txt") + os.rmdir(buck.cwd / "a_dir") + await buck.build("//:writer") + + +@buck_test(data_dir="modify_file_during_build") +async def test_modify_file_during_build(buck: Buck) -> None: + # We need to write some random stuff to the file first so that Buck will + # have to attempt to upload it to RE (which will fail because by that time + # we will have overwritten it with other content). + with open(buck.cwd / "text", "w", encoding="utf-8") as f: + f.write(random_string()) + + await expect_failure( + buck.build("//:check"), + stderr_regex="modified files while the build was in progress", + ) + + +@buck_test(data_dir="modify_file_during_build") +async def test_file_notify(buck: Buck) -> None: + # We need to write some random stuff to the file first so that Buck will + # have to attempt to upload it to RE (which will fail because by that time + # we will have overwritten it with other content). + with open(buck.cwd / "text", "w", encoding="utf-8") as f: + f.write(random_string()) + + await expect_failure( + buck.build("//:check"), + stderr_regex="modified files while the build was in progress", + ) diff --git a/tests/core/build/test_modify_data/.buckroot b/tests/core/build/test_modify_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_modify_data/modify/.buckconfig b/tests/core/build/test_modify_data/modify/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/build/test_modify_data/modify/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/build/test_modify_data/modify/TARGETS.fixture b/tests/core/build/test_modify_data/modify/TARGETS.fixture new file mode 100644 index 0000000000000..f71dd2e4901c6 --- /dev/null +++ b/tests/core/build/test_modify_data/modify/TARGETS.fixture @@ -0,0 +1,13 @@ +load(":rules.bzl", "copy_file", "write_string") + +write_string( + name = "writer", + content = "HELLO\n", + out = "out.txt", +) + +copy_file( + name = "mysrcrule", + src = "src.txt", + out = "out.txt", +) diff --git a/tests/core/build/test_modify_data/modify/rules.bzl b/tests/core/build/test_modify_data/modify/rules.bzl new file mode 100644 index 0000000000000..84e9888991170 --- /dev/null +++ b/tests/core/build/test_modify_data/modify/rules.bzl @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _write_string_impl(ctx): + out = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +write_string = rule( + impl = _write_string_impl, + attrs = { + "content": attrs.string(default = ""), + "out": attrs.string(), + }, +) + +def _copy_file_impl(ctx): + out = ctx.actions.copy_file(ctx.attrs.out, ctx.attrs.src) + return [DefaultInfo(default_output = out)] + +copy_file = rule( + impl = _copy_file_impl, + attrs = { + "out": attrs.string(), + "src": attrs.source(), + }, +) diff --git a/tests/core/build/test_modify_data/modify/src.txt b/tests/core/build/test_modify_data/modify/src.txt new file mode 100644 index 0000000000000..e427984d4a2c1 --- /dev/null +++ b/tests/core/build/test_modify_data/modify/src.txt @@ -0,0 +1 @@ +HELLO diff --git a/tests/core/build/test_modify_data/modify_file_during_build/.buckconfig b/tests/core/build/test_modify_data/modify_file_during_build/.buckconfig new file mode 100644 index 0000000000000..b16f1dfe52e41 --- /dev/null +++ b/tests/core/build/test_modify_data/modify_file_during_build/.buckconfig @@ -0,0 +1,8 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture + +[buck2] + materializations = deferred diff --git a/tests/core/build/test_modify_data/modify_file_during_build/TARGETS.fixture b/tests/core/build/test_modify_data/modify_file_during_build/TARGETS.fixture new file mode 100644 index 0000000000000..405a90644eee5 --- /dev/null +++ b/tests/core/build/test_modify_data/modify_file_during_build/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":defs.bzl", "depend_file", "modify_file") + +modify_file(name = "modify_file", text = "text") +depend_file(name = "check", modify_file = ":modify_file", text = "text") diff --git a/tests/core/build/test_modify_data/modify_file_during_build/defs.bzl b/tests/core/build/test_modify_data/modify_file_during_build/defs.bzl new file mode 100644 index 0000000000000..bb2e10a845eae --- /dev/null +++ b/tests/core/build/test_modify_data/modify_file_during_build/defs.bzl @@ -0,0 +1,56 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _modify_file_impl(ctx): + text = ctx.attrs.text + + out = ctx.actions.declare_output("out") + + ctx.actions.run([ + "python3", + "-c", + "import sys; fp=open(sys.argv[1], 'w'); fp.write('REPLACEMENT'); open(sys.argv[2], 'w')", + text, + out.as_output(), + ], local_only = True, category = "test") + + return [DefaultInfo(default_output = out)] + +modify_file = rule( + impl = _modify_file_impl, + attrs = { + "text": attrs.source(), + }, +) + +def _depend_impl(ctx): + text = ctx.attrs.text + modify_file = ctx.attrs.modify_file[DefaultInfo].default_outputs[0] + + out = ctx.actions.declare_output("out") + + ctx.actions.run( + cmd_args( + [ + "cp", + text, + out.as_output(), + ], + hidden = modify_file, + ), + category = "test", + ) + + return [DefaultInfo(default_output = out)] + +depend_file = rule( + impl = _depend_impl, + attrs = { + "modify_file": attrs.dep(), + "text": attrs.source(), + }, +) diff --git a/tests/core/build/test_modify_data/modify_file_during_build/text b/tests/core/build/test_modify_data/modify_file_during_build/text new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_nested_subtargets.py b/tests/core/build/test_nested_subtargets.py new file mode 100644 index 0000000000000..3d85e19f92861 --- /dev/null +++ b/tests/core/build/test_nested_subtargets.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util import asserts +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_build_nested_subtargets(buck: Buck) -> None: + result = await buck.build( + "//:nested[sub][nested_sub]", + ) + build_report = result.get_build_report() + + output = build_report.output_for_target("//:nested", "sub|nested_sub") + + assert output.read_text().rstrip() == "foo_content" + asserts.assert_not_executable(output) + + +@buck_test() +async def test_build_nested_subtargets_errors(buck: Buck) -> None: + await expect_failure( + buck.build( + "//:nested[bad]", + ), + stderr_regex="Available subtargets are.*sub", + ) + + await expect_failure( + buck.build( + "//:nested[sub][bad]", + ), + stderr_regex="Available subtargets are.*nested_sub", + ) diff --git a/tests/core/build/test_nested_subtargets_data/.buckconfig b/tests/core/build/test_nested_subtargets_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/build/test_nested_subtargets_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/build/test_nested_subtargets_data/.buckroot b/tests/core/build/test_nested_subtargets_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_nested_subtargets_data/TARGETS.fixture b/tests/core/build/test_nested_subtargets_data/TARGETS.fixture new file mode 100644 index 0000000000000..477fdae37c626 --- /dev/null +++ b/tests/core/build/test_nested_subtargets_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "nested_subtargets") + +nested_subtargets( + name = "nested", +) diff --git a/tests/core/build/test_nested_subtargets_data/defs.bzl b/tests/core/build/test_nested_subtargets_data/defs.bzl new file mode 100644 index 0000000000000..56b66ac3c1ade --- /dev/null +++ b/tests/core/build/test_nested_subtargets_data/defs.bzl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _nested_subtargets(ctx): + out = ctx.actions.write("foo", "foo_content") + + nested_info = [DefaultInfo( + sub_targets = {"nested_sub": [ + DefaultInfo(default_output = out), + ]}, + )] + + return [DefaultInfo( + sub_targets = {"sub": nested_info}, + )] + +nested_subtargets = rule( + impl = _nested_subtargets, + attrs = {}, +) diff --git a/tests/core/build/test_out_flag.py b/tests/core/build/test_out_flag.py new file mode 100644 index 0000000000000..56d731eaf2c65 --- /dev/null +++ b/tests/core/build/test_out_flag.py @@ -0,0 +1,112 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import tempfile +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_out_single_default_output(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as out: + output = os.path.join(out, "output") + await buck.build("//:a", "--out", output) + with open(output) as readable: + assert readable.read() == "a\n" + + +@buck_test() +async def test_out_overwrite(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as out: + output = os.path.join(out, "output") + await buck.build("//:a", "--out", output) + await buck.build("//:a", "--out", output) + + +@buck_test() +async def test_out_parent_not_exist(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as out: + output = os.path.join(out, "notexist", "output") + await buck.build("//:a", "--out", output) + with open(output) as readable: + assert readable.read() == "a\n" + + +@buck_test() +async def test_out_single_default_output_to_dir(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as out: + await buck.build("//:a", "--out", out) + with open(Path(out) / "a.txt") as readable: + assert readable.read() == "a\n" + + +@buck_test() +async def test_out_no_outputs(buck: Buck) -> None: + with tempfile.NamedTemporaryFile("w") as out: + await expect_failure( + buck.build("//:none", "--out", out.name), + stderr_regex="produced zero default outputs", + ) + + +@buck_test() +async def test_out_multiple_outputs(buck: Buck) -> None: + with tempfile.NamedTemporaryFile("w") as out: + await expect_failure( + buck.build("//:ab", "--out", out.name), + stderr_regex="produced 2 outputs", + ) + + +@buck_test() +async def test_out_multiple_targets(buck: Buck) -> None: + with tempfile.NamedTemporaryFile("w") as out: + await expect_failure( + buck.build("//:a", "//:b", "--out", out.name), + stderr_regex="command built multiple top-level targets", + ) + + +@buck_test() +async def test_out_directory(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as out: + await buck.build("//:dir", "--out", out) + assert (Path(out) / "b.txt").exists() + assert (Path(out) / "nested_dir" / "a.txt").exists() + + +@buck_test() +async def test_out_stdout_multiple(buck: Buck) -> None: + result = await buck.build("//:a", "//:b", "--out", "-") + + # The e2e test runner adds a `--build-report` flag in order to be able + # to parse out failures. In normal usage of `--out -` there wouldn't be this + # extra line of JSON on the stdout, we'd _just_ get the requested outputs. + a, b, build_report, trailing = result.stdout.split("\n") + assert (a, b) == ("a", "b") or (a, b) == ("b", "a") + assert build_report.startswith("{") + assert trailing == "" + + +@buck_test() +async def test_out_stdout_none(buck: Buck) -> None: + await buck.build("--out", "-") + + +@buck_test() +async def test_out_stdout_directory(buck: Buck) -> None: + await expect_failure( + buck.build("//:dir", "--out", "-"), + stderr_regex="produces a default output that is a directory, and cannot be sent to stdout", + ) diff --git a/tests/core/build/test_out_flag_data/.buckconfig b/tests/core/build/test_out_flag_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/build/test_out_flag_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/build/test_out_flag_data/.buckroot b/tests/core/build/test_out_flag_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_out_flag_data/TARGETS.fixture b/tests/core/build/test_out_flag_data/TARGETS.fixture new file mode 100644 index 0000000000000..1284caf2a2212 --- /dev/null +++ b/tests/core/build/test_out_flag_data/TARGETS.fixture @@ -0,0 +1,53 @@ +load(":defs.bzl", "export_file", "out_library") + +export_file( + name = "a.txt", + src = "a.txt", +) + +export_file( + name = "b.txt", + src = "b.txt", +) + +export_file( + name = "directory", + src = "directory", +) + +# A target with no default outputs. +out_library( + name = "none", + outs = [], +) + +# Targets with exactly one default output. +out_library( + name = "a", + outs = [ + ":a.txt", + ], +) +out_library( + name = "b", + outs = [ + ":b.txt", + ], +) + +# A target with two default outputs. +out_library( + name = "ab", + outs = [ + ":a.txt", + ":b.txt", + ], +) + +# A target whose output is a directory. +out_library( + name = "dir", + outs = [ + ":directory", + ], +) diff --git a/tests/core/build/test_out_flag_data/a.txt b/tests/core/build/test_out_flag_data/a.txt new file mode 100644 index 0000000000000..78981922613b2 --- /dev/null +++ b/tests/core/build/test_out_flag_data/a.txt @@ -0,0 +1 @@ +a diff --git a/tests/core/build/test_out_flag_data/b.txt b/tests/core/build/test_out_flag_data/b.txt new file mode 100644 index 0000000000000..61780798228d1 --- /dev/null +++ b/tests/core/build/test_out_flag_data/b.txt @@ -0,0 +1 @@ +b diff --git a/tests/core/build/test_out_flag_data/defs.bzl b/tests/core/build/test_out_flag_data/defs.bzl new file mode 100644 index 0000000000000..b1da9a6417042 --- /dev/null +++ b/tests/core/build/test_out_flag_data/defs.bzl @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _out_library_impl(ctx): + outs = [] + for out in ctx.attrs.outs: + for default_out in out[DefaultInfo].default_outputs: + out_artifact = ctx.actions.copy_file(default_out.basename, default_out) + outs.append(out_artifact) + + return [DefaultInfo(default_outputs = outs)] + +out_library = rule( + impl = _out_library_impl, + attrs = { + "outs": attrs.list(attrs.dep()), + }, +) + +def _export_file_impl(ctx): + return [DefaultInfo(default_output = ctx.attrs.src)] + +export_file = rule( + impl = _export_file_impl, + attrs = { + "src": attrs.source(allow_directory = True), + }, +) diff --git a/tests/core/build/test_out_flag_data/directory/b.txt b/tests/core/build/test_out_flag_data/directory/b.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_out_flag_data/directory/nested_dir/a.txt b/tests/core/build/test_out_flag_data/directory/nested_dir/a.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_output_cleanup.py b/tests/core/build/test_output_cleanup.py new file mode 100644 index 0000000000000..cb00f7a2ede6d --- /dev/null +++ b/tests/core/build/test_output_cleanup.py @@ -0,0 +1,86 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +from pathlib import Path +from typing import Dict + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +output_cleanup_targets = [ + "local_action", + "remote_action", + "symlinked_dir", + "write", + "copy", +] + + +@buck_test(skip_for_os=["windows"]) +@pytest.mark.parametrize( + "materializations", + [ + "deferred", + "all", + ], +) +@pytest.mark.parametrize( + "first", + output_cleanup_targets, +) +@pytest.mark.parametrize( + "second", + output_cleanup_targets, +) +async def test_output_cleanup( + buck: Buck, tmp_path: Path, materializations: str, first: str, second: str +) -> None: + def read_dir(d: Path) -> Dict[str, str]: + steps = 0 + out = {} + + for root, dirs, files in os.walk(d, topdown=False): + for name in files: + path = os.path.join(root, name) + out[os.path.relpath(path, d)] = open(path).read() + steps += 1 + for name in dirs: + path = os.path.join(root, name) + out.setdefault(os.path.relpath(path, d), {}) + steps += 1 + + return out + + with open(buck.cwd / ".buckconfig", "a") as buckconfig: + buckconfig.write(f"\n[buck2]\nmaterializations = {materializations}") + await buck.kill() # Ensure the config gets picked up + + rebuild = tmp_path / "rebuild" + clean = tmp_path / "clean" + + first = f"{first}-a" + second = f"{second}-b" + + await buck.build(":main", "-c", f"test.main={first}") + await buck.build(":main", "-c", f"test.main={second}", "--out", str(rebuild)) + + await buck.clean() + await buck.build(":main", "-c", f"test.main={second}", "--out", str(clean)) + + assert read_dir(rebuild) == read_dir(clean) + + +# TODO(marwhal): Add this back one at least one test in this file passes on Windows +@buck_test() +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/core/build/test_output_cleanup_data/.buckconfig b/tests/core/build/test_output_cleanup_data/.buckconfig new file mode 100644 index 0000000000000..d1a34452dcabe --- /dev/null +++ b/tests/core/build/test_output_cleanup_data/.buckconfig @@ -0,0 +1,8 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture + +[project] + ignore=ignored diff --git a/tests/core/build/test_output_cleanup_data/.buckroot b/tests/core/build/test_output_cleanup_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_output_cleanup_data/TARGETS.fixture b/tests/core/build/test_output_cleanup_data/TARGETS.fixture new file mode 100644 index 0000000000000..714fb73c0462e --- /dev/null +++ b/tests/core/build/test_output_cleanup_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "declare_targets") + +declare_targets() diff --git a/tests/core/build/test_output_cleanup_data/defs.bzl b/tests/core/build/test_output_cleanup_data/defs.bzl new file mode 100644 index 0000000000000..3884f21f46260 --- /dev/null +++ b/tests/core/build/test_output_cleanup_data/defs.bzl @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _action_impl(ctx): + out = ctx.actions.declare_output("out") + + ctx.actions.run( + [ + "python3", + "-c", + "import sys; open(sys.argv[1], 'w').write(sys.argv[2])", + out.as_output(), + ctx.attrs.seed, + ], + local_only = ctx.attrs.local_only, + category = "write", + ) + + return [DefaultInfo(default_output = out)] + +action = rule( + attrs = { + "local_only": attrs.bool(), + "seed": attrs.string(), + }, + impl = _action_impl, +) + +def _symlinked_dir_impl(ctx): + f = ctx.actions.write("dst/f", "file") + out = ctx.actions.symlinked_dir("out", {ctx.attrs.seed: f}) + return [DefaultInfo(default_output = out)] + +symlinked_dir = rule( + attrs = { + "seed": attrs.string(), + }, + impl = _symlinked_dir_impl, +) + +def _write_impl(ctx): + out = ctx.actions.write("out", ctx.attrs.seed) + return [DefaultInfo(default_output = out)] + +write = rule( + attrs = { + "seed": attrs.string(), + }, + impl = _write_impl, +) + +def _copy_impl(ctx): + f = ctx.actions.write("dst/f", ctx.attrs.seed) + out = ctx.actions.copy_file("out", f) + return [DefaultInfo(default_output = out)] + +copy = rule( + attrs = { + "seed": attrs.string(), + }, + impl = _copy_impl, +) + +def declare_targets(): + target = read_config("test", "main") + + if target == "local_action-a": + action(name = "main", local_only = True, seed = "local-action-a") + elif target == "local_action-b": + action(name = "main", local_only = True, seed = "local-action-b") + elif target == "remote_action-a": + action(name = "main", local_only = False, seed = "remote-action-a") + elif target == "remote_action-b": + action(name = "main", local_only = False, seed = "remote-action-b") + elif target == "symlinked_dir-a": + symlinked_dir(name = "main", seed = "symlinked_dir-a") + elif target == "symlinked_dir-b": + symlinked_dir(name = "main", seed = "symlinked_dir-b") + elif target == "write-a": + write(name = "main", seed = "write-a") + elif target == "write-b": + write(name = "main", seed = "write-b") + elif target == "copy-a": + copy(name = "main", seed = "copy-a") + elif target == "copy-b": + copy(name = "main", seed = "copy-b") + else: + fail("Invalid target: `{}`".format(target)) diff --git a/tests/core/build/test_paranoid.py b/tests/core/build/test_paranoid.py new file mode 100644 index 0000000000000..bcb66a8741dca --- /dev/null +++ b/tests/core/build/test_paranoid.py @@ -0,0 +1,191 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import json +import random +import string +from pathlib import Path +from typing import Any, Dict, List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +# FIXME(JakobDegen): Give these tests their own data dir, instead of sharing one + +# TODO(T184317763): either those tests are flaky or paranoid mode is broken, +# to repro uncomment and run `buck2 test '@fbcode//mode/opt-asan' fbcode//buck2/tests/e2e/build:test_paranoid -- --exact 'buck2/tests/e2e/build:test_paranoid - test_paranoid.py::test_paranoid_allows_fallback_after_re_failure' --run-disabled` +# @buck_test( +# data_dir="execution_platforms", +# skip_for_os=["windows"], +# ) +# @env("BUCK2_TEST_FAIL_RE_DOWNLOADS", "true") +# async def test_paranoid_allows_fallback_after_re_failure( +# buck: Buck, +# ) -> None: +# """ +# Currently, this is only supported in paranoid mode. This is a smaller issue +# than it might seem, since it only enters the picture if RE claims, cancels +# local, and *then* we need a fallback, which is fairly unlikely since most +# of the work is done by then, and also if that fails, there's no reason to +# expect deferred materialization won't fail later, which is a much bigger +# (and much more irrecoverable) problem. +# """ + +# def args(): +# return [ +# "root//executor_race_tests:slower_locally_and_works_on_both", +# "-c", +# f"test.cache_buster={random_string()}", +# ] + +# await expect_failure( +# buck.build(*args()), +# stderr_regex="Injected error", +# ) + +# await buck.build( +# *args(), +# env={"BUCK_PARANOID": "true"}, +# ) + + +@buck_test( + data_dir="execution_platforms", + skip_for_os=["windows"], +) +async def test_paranoid_ignores_preferences( + buck: Buck, +) -> None: + def args() -> List[str]: + return [ + "root//executor_race_tests:fails_slow_on_re_works_locally_prefer_remote", + "-c", + f"test.cache_buster={random_string()}", + ] + + await expect_failure( + buck.build(*args()), + stderr_regex="Remote command returned non-zero exit code 1", + ) + + await buck.build( + *args(), + env={"BUCK_PARANOID": "true"}, + ) + + +# TODO(T184317763): either those tests are flaky or paranoid mode is broken, +# to repro uncomment and run `buck2 test '@fbcode//mode/opt-asan' fbcode//buck2/tests/e2e/build:test_paranoid -- --exact 'buck2/tests/e2e/build:test_paranoid - test_paranoid.py::test_paranoid_forces_fallback_on_failure' --run-disabled` +# @buck_test( +# data_dir="execution_platforms", +# skip_for_os=["windows"], +# ) +# async def test_paranoid_forces_fallback_on_failure( +# buck: Buck, +# ) -> None: +# def args(): +# return [ +# "root//executor_race_tests:slower_and_works_only_locally_local_not_preferred", +# "-c", +# f"test.cache_buster={random_string()}", +# ] + +# await expect_failure( +# buck.build(*args()), +# stderr_regex="Remote command returned non-zero exit code 1", +# ) + +# await buck.build( +# *args(), +# env={"BUCK_PARANOID": "true"}, +# ) + + +@buck_test( + data_dir="execution_platforms", + skip_for_os=["windows"], +) +async def test_paranoid_ignores_low_pass_filter( + buck: Buck, +) -> None: + def args() -> List[str]: + return [ + "root//executor_race_tests:fails_slow_on_re_works_locally_heavyweight", + "-c", + f"test.cache_buster={random_string()}", + "-c", + "test.experimental_low_pass_filter=true", + ] + + await expect_failure( + buck.build(*args()), + stderr_regex="Remote command returned non-zero exit code 1", + ) + + await buck.build( + *args(), + env={"BUCK_PARANOID": "true"}, + ) + + +@buck_test( + data_dir="execution_platforms", +) +async def test_paranoid_enable_disable( + buck: Buck, + tmp_path: Path, +) -> None: + env = {"BUCK2_PARANOID_PATH": str(tmp_path / "paranoid.info")} + + # Start the daemon + await buck.build(env=env) + + async def config() -> Dict[str, Any]: + status = (await buck.status()).stdout + config = json.loads(status)["daemon_constraints"]["daemon_startup_config"] + print(config) + return json.loads(config) + + # Its not paranoid. + assert not (await config())["paranoid"] + + # Still not, we didn't run any commands, so no restart. + await buck.debug("paranoid", "enable", env=env) + assert not (await config())["paranoid"] + + # Run a command, should restart and enable. + await buck.build(env=env) + assert (await config())["paranoid"] + + # Turn it off + await buck.debug("paranoid", "disable", env=env) + await buck.build(env=env) + assert not (await config())["paranoid"] + + # Turn it back on, but not for long. + await buck.debug("paranoid", "enable", "--ttl", "10s", env=env) + await buck.build(env=env) + assert (await config())["paranoid"] + + # Wait until it turns off. + await asyncio.sleep(15) + await buck.build(env=env) + assert not (await config())["paranoid"] + + +@buck_test(data_dir="execution_platforms") +async def test_noop(buck: Buck) -> None: + return + + +def random_string() -> str: + return "".join(random.choice(string.ascii_lowercase) for i in range(256)) diff --git a/tests/core/build/test_plugins.py b/tests/core/build/test_plugins.py new file mode 100644 index 0000000000000..91d4b2475e98c --- /dev/null +++ b/tests/core/build/test_plugins.py @@ -0,0 +1,149 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_deps_in_cquery_not_uquery(buck: Buck) -> None: + # Check that plugin deps appear as deps in uquery but not in cquery + result = await buck.uquery("deps(//tests:reg_a)") + assert "//tests:reg_a_REAL" in result.stdout + result = await buck.cquery("deps(//tests:reg_a)") + assert "//tests:reg_a_REAL" not in result.stdout + # And make sure that the attribute itself is serialized correctly in cquery and uquery + result = await buck.uquery("-a", "actual", "//tests:reg_a") + assert json.loads(result.stdout) == { + "root//tests:reg_a": {"actual": "root//tests:reg_a_REAL"} + } + result = await buck.cquery("-a", "actual", "//tests:reg_a") + assert json.loads(result.stdout) == { + "root//tests:reg_a ()": {"actual": "root//tests:reg_a_REAL"} + } + + +@buck_test() +async def test_cquery(buck: Buck) -> None: + ###### Check that everything is correctly configured as reported by cquery + result = await buck.cquery( + "--json", + "-a", + "buck.deps", + "-a", + "buck.execution_platform", + "-a", + "buck.plugins", + "deps(//tests:b)", + ) + result = json.loads(result.stdout) + + b = next(v for k, v in result.items() if k.startswith("root//tests:b")) + l = next( # noqa: E741 `l` as a variable name is fine + v for k, v in result.items() if k.startswith("root//tests:l") + ) + + assert set(b["buck.plugins"]["RustProcMacro"]) == { + "root//tests:reg_a_REAL", + "root//tests:reg_b_REAL", + "root//tests:doc_a_REAL", + } + assert set(l["buck.plugins"]["RustProcMacro"]) == { + "root//tests:reg_a_REAL", + "root//tests:doc_b_REAL", + } + + assert b["buck.execution_platform"].startswith("root//config:platform_linux") + assert any( + dep.startswith("root//tests:reg_a_REAL (root//config:platform_linux") + for dep in b["buck.deps"] + ) + assert l["buck.execution_platform"].startswith("root//config:platform_windows") + assert any( + dep.startswith("root//tests:reg_a_REAL (root//config:platform_windows") + for dep in l["buck.deps"] + ) + + assert any( + k.startswith("root//tests:reg_a_REAL (root//config:platform_linux") + for k in result.keys() + ) + assert any( + k.startswith("root//tests:reg_a_REAL (root//config:platform_windows") + for k in result.keys() + ) + + +@buck_test() +async def test_analysis(buck: Buck) -> None: + # Check that we can properly identify all the different plugin deps in analysis + result = await buck.build("root//tests:b", "root//tests:l") + + b = json.loads( + result.get_build_report().output_for_target("root//tests:b").read_text() + ) + assert b == { + "indirect": ["Reg A (linux)"], + "direct": ["Reg B (linux)"], + "indirect_doc": ["Doc A (linux)"], + "direct_doc": [], + } + + l = json.loads( # noqa: E741 `l` as a variable name is fine + result.get_build_report().output_for_target("root//tests:l").read_text() + ) + assert l == { # noqa: E741 `l` as a variable name is fine + "indirect": [], + "direct": ["Reg A (windows)"], + "indirect_doc": [], + "direct_doc": ["Doc B (windows)"], + } + + +@buck_test() +async def test_plugin_dep_errors(buck: Buck) -> None: + # Tests are explained in the file + await buck.build("//test_errors:recursive_target_1") + + await buck.build("//test_errors:regular_a") + + result = await buck.uquery("deps(//test_errors:regular_b)") + assert "//test_errors:toolchain" in result.stdout + result = await expect_failure(buck.build("//test_errors:regular_b")) + assert ( + "Plugin dep `root//test_errors:toolchain` is a toolchain rule" in result.stderr + ) + + result = await expect_failure(buck.build("//test_errors:wrong_plugin_kind")) + assert "The rule did not declare that it uses plugins of kind A" in result.stderr + + +@buck_test() +async def test_repeated_insertion(buck: Buck) -> None: + result = await buck.cquery( + "-a", "buck.plugins", "//repeated_insertion:different_deps_alias" + ) + assert {"Plugin": ["root//repeated_insertion:plugin"]} == list( + json.loads(result.stdout).values() + )[0]["buck.plugins"] + + +@buck_test() +async def test_visibility(buck: Buck) -> None: + result = await expect_failure(buck.build("//visibility:missing_access")) + assert ( + "`root//visibility/package:hidden` is not visible to `root//visibility:missing_access`" + in result.stderr + ) + + await buck.build("//visibility:has_access") diff --git a/tests/core/build/test_plugins_data/.buckconfig b/tests/core/build/test_plugins_data/.buckconfig new file mode 100644 index 0000000000000..feecbacf451ce --- /dev/null +++ b/tests/core/build/test_plugins_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = prelude + +[build] +execution_platforms = root//config:platforms diff --git a/tests/core/build/test_plugins_data/.buckroot b/tests/core/build/test_plugins_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_plugins_data/config/TARGETS.fixture b/tests/core/build/test_plugins_data/config/TARGETS.fixture new file mode 100644 index 0000000000000..a2f98d8eaa033 --- /dev/null +++ b/tests/core/build/test_plugins_data/config/TARGETS.fixture @@ -0,0 +1,24 @@ +load(":defs.bzl", "constraint_setting", "constraint_value", "platform", "platforms") + +constraint_setting(name = "os") +constraint_value(name = "windows", setting = ":os") +constraint_value(name = "linux", setting = ":os") + +constraint_setting(name = "mode") +constraint_value(name = "release", setting = ":mode") +constraint_value(name = "debug", setting = ":mode") + +platform( + name = "platform_windows", + configuration = [":windows", ":release"], +) + +platform( + name = "platform_linux", + configuration = [":linux", ":release"], +) + +platforms( + name = "platforms", + platforms = [":platform_linux", ":platform_windows"], +) diff --git a/tests/core/build/test_plugins_data/config/defs.bzl b/tests/core/build/test_plugins_data/config/defs.bzl new file mode 100644 index 0000000000000..3b16bfc1e9a68 --- /dev/null +++ b/tests/core/build/test_plugins_data/config/defs.bzl @@ -0,0 +1,70 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _constraint_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +constraint_setting = rule( + impl = _constraint_setting, + attrs = {}, +) + +def _constraint_value(ctx): + constraint_value = ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + return [ + DefaultInfo(), + constraint_value, + # Provide `ConfigurationInfo` from `constraint_value` so it could be used as select key. + ConfigurationInfo(constraints = { + constraint_value.setting.label: constraint_value, + }, values = {}), + ] + +constraint_value = rule( + impl = _constraint_value, + attrs = {"setting": attrs.dep(providers = [ConstraintSettingInfo])}, +) + +def _platform(ctx): + constraints = {} + values = {} + for x in ctx.attrs.configuration: + constraints |= x[ConfigurationInfo].constraints + values |= x[ConfigurationInfo].values + cfg = ConfigurationInfo(constraints = constraints, values = values) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = cfg, + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = False, + ), + ) + + return [DefaultInfo(), platform, PlatformInfo(label = str(ctx.label.raw_target()), configuration = cfg)] + +platform = rule( + impl = _platform, + attrs = {"configuration": attrs.list(attrs.dep(providers = [ConfigurationInfo]))}, +) + +def _platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [x[ExecutionPlatformInfo] for x in ctx.attrs.platforms], + ), + ] + +platforms = rule( + impl = _platforms, + attrs = {"platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo]))}, +) diff --git a/tests/core/build/test_plugins_data/prelude/prelude.bzl b/tests/core/build/test_plugins_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_plugins_data/repeated_insertion/TARGETS.fixture b/tests/core/build/test_plugins_data/repeated_insertion/TARGETS.fixture new file mode 100644 index 0000000000000..7adad6e21f023 --- /dev/null +++ b/tests/core/build/test_plugins_data/repeated_insertion/TARGETS.fixture @@ -0,0 +1,23 @@ +load(":rules.bzl", "alias", "different_deps", "introduce_plugin", "plugin") + +plugin( + name = "plugin", +) + +introduce_plugin( + name = "has_plugin", + plugin = ":plugin", +) + +# `:plugin` appears as a plugin twice for this target. Make sure that it is propagated +different_deps( + name = "different_deps", + direct = ":plugin", + no_propagate = ":has_plugin", +) + +# This one we just use to be able to inspect whether it's propagated or not +alias( + name = "different_deps_alias", + actual = ":different_deps", +) diff --git a/tests/core/build/test_plugins_data/repeated_insertion/rules.bzl b/tests/core/build/test_plugins_data/repeated_insertion/rules.bzl new file mode 100644 index 0000000000000..83db32aabade9 --- /dev/null +++ b/tests/core/build/test_plugins_data/repeated_insertion/rules.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _nop_impl(_ctx): + return [DefaultInfo()] + +Plugin = plugins.kind() + +plugin = rule( + impl = _nop_impl, + attrs = {}, +) + +introduce_plugin = rule( + impl = _nop_impl, + attrs = { + "plugin": attrs.plugin_dep(kind = Plugin), + }, +) + +different_deps = rule( + impl = _nop_impl, + attrs = { + "direct": attrs.plugin_dep(kind = Plugin), + "no_propagate": attrs.dep(pulls_plugins = [Plugin]), + }, +) + +alias = rule( + impl = _nop_impl, + attrs = { + "actual": attrs.dep(pulls_plugins = [Plugin]), + }, +) diff --git a/tests/core/build/test_plugins_data/test_errors/TARGETS.fixture b/tests/core/build/test_plugins_data/test_errors/TARGETS.fixture new file mode 100644 index 0000000000000..b430c910aa633 --- /dev/null +++ b/tests/core/build/test_plugins_data/test_errors/TARGETS.fixture @@ -0,0 +1,51 @@ +load(":rules.bzl", "multi_dep_rule", "plugin_dep_rule", "regular_rule", "toolchain_rule", "wrong_plugin_kind_rule") + +# Set up a recursive dependence between plugin targets. This should successfully build because +# there's no actual cycle in the configured graph + +plugin_dep_rule( + name = "recursive_target_1", + dep = ":recursive_target_2", +) + +plugin_dep_rule( + name = "recursive_target_2", + dep = ":recursive_target_1", +) + +# Make sure that plugin deps can appear as regular deps too + +regular_rule( + name = "plugin_a", +) + +multi_dep_rule( + name = "regular_a", + dep = ":plugin_a", + plugin_dep = ":plugin_a", +) + +# Make sure that we error if toolchain rules show up as plugin deps. Unfortunately, this error only +# shows up in cquery, not in uquery (the same thing is true for toolchains) + +toolchain_rule( + name = "toolchain", +) + +plugin_dep_rule( + name = "regular_b", + dep = ":toolchain", +) + +# Test that analysis can't access plugins which are not declared as used, even if they are propagated +wrong_plugin_kind_rule( + name = "wrong_plugin_kind", + dep = ":plugin_a", +) + +# A test rule that references a plugin with insufficient visibility + +plugin_dep_rule( + name = "missing_access", + dep = "//test_errors/package:hidden", +) diff --git a/tests/core/build/test_plugins_data/test_errors/rules.bzl b/tests/core/build/test_plugins_data/test_errors/rules.bzl new file mode 100644 index 0000000000000..8cff7b3d1f9a6 --- /dev/null +++ b/tests/core/build/test_plugins_data/test_errors/rules.bzl @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _nop(_ctx): + return [DefaultInfo()] + +MyKind = plugins.kind() + +plugin_dep_rule = rule( + impl = _nop, + attrs = { + "dep": attrs.plugin_dep(kind = MyKind), + }, +) + +multi_dep_rule = rule( + impl = _nop, + attrs = { + "dep": attrs.dep(), + "plugin_dep": attrs.plugin_dep(kind = MyKind), + }, +) + +regular_rule = rule( + impl = _nop, + attrs = {}, +) + +toolchain_rule = rule( + impl = _nop, + attrs = {}, + is_toolchain_rule = True, +) + +A = plugins.kind() +B = plugins.kind() + +def _wrong_plugin_kind_rule_impl(ctx): + # Need to write it this way to make the linter happy + ctx.plugins[A] # buildifier: disable=no-effect + fail("unreachable") + +wrong_plugin_kind_rule = rule( + impl = _wrong_plugin_kind_rule_impl, + attrs = { + "dep": attrs.dep(pulls_and_pushes_plugins = [A]), + }, + uses_plugins = [B], +) diff --git a/tests/core/build/test_plugins_data/tests/TARGETS.fixture b/tests/core/build/test_plugins_data/tests/TARGETS.fixture new file mode 100644 index 0000000000000..cad5ae330c935 --- /dev/null +++ b/tests/core/build/test_plugins_data/tests/TARGETS.fixture @@ -0,0 +1,64 @@ +load(":rules.bzl", "alias", "rust_binary", "rust_library", "rust_proc_macro") + +rust_proc_macro( + name = "reg_a", + data = select({ + "//config:linux": "Reg A (linux)", + "//config:windows": "Reg A (windows)", + }), +) + +alias( + name = "reg_a_alias_one", + actual = ":reg_a", +) + +alias( + name = "reg_a_alias_two", + actual = ":reg_a_alias_one", +) + +rust_proc_macro( + name = "reg_b", + data = select({ + "//config:linux": "Reg B (linux)", + "//config:windows": "Reg B (windows)", + }), +) + +rust_proc_macro( + name = "doc_a", + data = select({ + "//config:linux": "Doc A (linux)", + "//config:windows": "Doc A (windows)", + }), +) + +rust_proc_macro( + name = "doc_b", + target_compatible_with = ["//config:windows"], + data = select({ + "//config:linux": "Doc B (linux)", + "//config:windows": "Doc B (windows)", + }), +) + +rust_library( + name = "l", + deps = [":reg_a_alias_two"], + doc_deps = [":doc_b"], + default_target_platform = "//config:platform_linux", +) + +rust_library( + name = "doc_only_lib", + deps = [":doc_a"], + doc_deps = [], +) + +rust_binary( + name = "b", + deps = [":l", ":reg_b"], + doc_deps = [":doc_only_lib"], + default_target_platform = "//config:platform_linux", +) diff --git a/tests/core/build/test_plugins_data/tests/rules.bzl b/tests/core/build/test_plugins_data/tests/rules.bzl new file mode 100644 index 0000000000000..303b7cf68b2e3 --- /dev/null +++ b/tests/core/build/test_plugins_data/tests/rules.bzl @@ -0,0 +1,117 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @lint-ignore-every FBCODEBZLADDLOADS + +RustProcMacroMarker = provider(fields = ["target"]) + +RustProcMacroInfo = provider(fields = ["data"]) + +RustLibraryInfo = provider(fields = ["proc_macros"]) + +RustProcMacro = plugins.kind() + +def _proc_macro_alias_impl(ctx): + # Test that the `actual` attribute correctly resolved to a target label + if type(ctx.attrs.actual) != "target_label": + fail("Actual is not a target label: " + type(ctx.attrs.actual)) + return [DefaultInfo(), RustProcMacroMarker(target = ctx.attrs.actual)] + +_rust_proc_macro_alias = rule( + impl = _proc_macro_alias_impl, + attrs = { + "actual": attrs.plugin_dep(kind = RustProcMacro), + }, +) + +def _rust_rule_impl(ctx): + def gather_proc_macros(deps): + direct = [] + indirect = [] + for d in deps: + if RustProcMacroMarker in d: + direct.append(d[RustProcMacroMarker].target) + else: + indirect.extend(d[RustLibraryInfo].proc_macros) + return (direct, indirect) + + direct, indirect = gather_proc_macros(ctx.attrs.deps) + direct_doc, indirect_doc = gather_proc_macros(ctx.attrs.doc_deps) + + proc_macros = {} + for prov in ctx.plugins[RustProcMacro]: + proc_macros[str(prov[RustProcMacroMarker].target)] = prov[RustProcMacroInfo].data + + get_data = lambda x: proc_macros[str(x)] + out = { + "direct": map(get_data, direct), + "direct_doc": map(get_data, direct_doc), + "indirect": map(get_data, indirect), + "indirect_doc": map(get_data, indirect_doc), + } + out_art = ctx.actions.write_json("out.json", out) + return (out_art, dedupe(sorted(direct + indirect))) + +def _rust_library_impl(ctx): + out_art, proc_macros = _rust_rule_impl(ctx) + if ctx.attrs.proc_macro: + return [DefaultInfo(default_output = out_art), RustProcMacroInfo(data = ctx.attrs.data), RustProcMacroMarker(target = ctx.label.raw_target())] + else: + return [DefaultInfo(default_output = out_art), RustLibraryInfo(proc_macros = proc_macros)] + +rust_library = rule( + impl = _rust_library_impl, + attrs = { + # Just a dummy value that allows us to pass some extra information around in tests + "data": attrs.string(default = ""), + "deps": attrs.list( + attrs.dep(pulls_and_pushes_plugins = [RustProcMacro]), + default = [], + ), + "doc_deps": attrs.list( + attrs.dep(pulls_plugins = [RustProcMacro]), + default = [], + ), + "proc_macro": attrs.bool(default = False), + }, + uses_plugins = [RustProcMacro], +) + +def _rust_binary_impl(ctx): + out_art, _proc_macros = _rust_rule_impl(ctx) + return [DefaultInfo(default_output = out_art)] + +rust_binary = rule( + impl = _rust_binary_impl, + attrs = { + "deps": attrs.list(attrs.dep(pulls_and_pushes_plugins = [RustProcMacro])), + "doc_deps": attrs.list(attrs.dep(pulls_plugins = [RustProcMacro])), + }, + uses_plugins = [RustProcMacro], +) + +def rust_proc_macro(name, **kwargs): + rust_library( + name = name + "_REAL", + proc_macro = True, + **kwargs + ) + + _rust_proc_macro_alias( + name = name, + actual = ":" + name + "_REAL", + ) + +def _alias_impl(ctx): + return ctx.attrs.actual.providers + +alias = rule( + impl = _alias_impl, + attrs = { + "actual": attrs.dep(pulls_and_pushes_plugins = plugins.All), + }, +) diff --git a/tests/core/build/test_plugins_data/visibility/TARGETS.fixture b/tests/core/build/test_plugins_data/visibility/TARGETS.fixture new file mode 100644 index 0000000000000..d3160155bf1cf --- /dev/null +++ b/tests/core/build/test_plugins_data/visibility/TARGETS.fixture @@ -0,0 +1,11 @@ +load(":rules.bzl", "plugin_dep", "plugin_user") + +plugin_dep( + name = "missing_access", + actual = "//visibility/package:hidden", +) + +plugin_user( + name = "has_access", + actual = "//visibility/package:unhidden", +) diff --git a/tests/core/build/test_plugins_data/visibility/package/TARGETS.fixture b/tests/core/build/test_plugins_data/visibility/package/TARGETS.fixture new file mode 100644 index 0000000000000..4596e07aa2898 --- /dev/null +++ b/tests/core/build/test_plugins_data/visibility/package/TARGETS.fixture @@ -0,0 +1,11 @@ +load("@root//visibility:rules.bzl", "nop", "plugin_dep") + +nop( + name = "hidden", + visibility = [], +) + +plugin_dep( + name = "unhidden", + actual = ":hidden", +) diff --git a/tests/core/build/test_plugins_data/visibility/rules.bzl b/tests/core/build/test_plugins_data/visibility/rules.bzl new file mode 100644 index 0000000000000..0d43e0bffa13d --- /dev/null +++ b/tests/core/build/test_plugins_data/visibility/rules.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _nop(_ctx): + return [DefaultInfo()] + +nop = rule( + impl = _nop, + attrs = {}, +) + +MyKind = plugins.kind() + +plugin_dep = rule( + impl = _nop, + attrs = { + "actual": attrs.plugin_dep(kind = MyKind), + }, +) + +plugin_user = rule( + impl = _nop, + attrs = { + "actual": attrs.dep(pulls_plugins = [MyKind]), + }, + uses_plugins = [MyKind], +) diff --git a/tests/core/build/test_remote_execution.py b/tests/core/build/test_remote_execution.py new file mode 100644 index 0000000000000..e2d89a34315e2 --- /dev/null +++ b/tests/core/build/test_remote_execution.py @@ -0,0 +1,140 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env +from buck2.tests.e2e_util.helper.utils import filter_events, random_string + + +@buck_test() +@env("BUCK2_TEST_FAIL_CONNECT", "true") +async def test_re_connection_failure_no_retry(buck: Buck) -> None: + out = await expect_failure( + buck.build( + "root//:simple", + "--remote-only", + "--no-remote-cache", + ), + ) + + assert "Injected RE Connection error" in out.stderr + assert "retrying after sleeping" not in out.stderr + + +async def filter_re_use_case(buck: Buck) -> list[str]: + use_cases = [] + for action in ["Queue", "WorkerDownload", "Execute", "WorkerUpload"]: + use_cases.extend( + await filter_events( + buck, + "Event", + "data", + "SpanStart", + "data", + "ExecutorStage", + "stage", + "Re", + "stage", + action, + "use_case", + ) + ) + return use_cases + + +@buck_test() +async def test_re_use_case_override_with_arg(buck: Buck) -> None: + # Make sure action is not cached + with open(buck.cwd / "input.txt", "w") as f: + f.write(random_string()) + await buck.build( + "root//:simple", + "--remote-only", + "--no-remote-cache", + ) + use_cases = await filter_re_use_case(buck) + assert len(use_cases) == 4 + assert all(use_case == "buck2-default" for use_case in use_cases) + # Change the target input + with open(buck.cwd / "input.txt", "w") as f: + f.write(random_string()) + await buck.build( + "root//:simple", + "--remote-only", + "--no-remote-cache", + "--config", + "buck2_re_client.override_use_case=buck2-user", + ) + use_cases = await filter_re_use_case(buck) + assert len(use_cases) == 4 + assert all(use_case == "buck2-user" for use_case in use_cases) + + +@buck_test() +async def test_re_use_case_override_with_config(buck: Buck) -> None: + # Make sure action is not cached + with open(buck.cwd / "input.txt", "w") as f: + f.write(random_string()) + await buck.build( + "root//:simple", + "--remote-only", + "--no-remote-cache", + ) + use_cases = await filter_re_use_case(buck) + assert len(use_cases) == 4 + assert all(use_case == "buck2-default" for use_case in use_cases) + # Change the target input + with open(buck.cwd / "input.txt", "w") as f: + f.write(random_string()) + with open(buck.cwd / ".buckconfig.local", "w") as f: + f.write("[buck2_re_client]\n") + f.write("override_use_case = buck2-user\n") + await buck.build( + "root//:simple", + "--remote-only", + "--no-remote-cache", + ) + use_cases = await filter_re_use_case(buck) + assert len(use_cases) == 4 + assert all(use_case == "buck2-user" for use_case in use_cases) + + +@buck_test() +async def test_re_use_case_override_with_external_config(buck: Buck) -> None: + # Make sure action is not cached + with open(buck.cwd / "input.txt", "w") as f: + f.write(random_string()) + await buck.build( + "root//:simple", + "--remote-only", + "--no-remote-cache", + ) + use_cases = await filter_re_use_case(buck) + assert len(use_cases) == 4 + assert all(use_case == "buck2-default" for use_case in use_cases) + # Change the target input + with open(buck.cwd / "input.txt", "w") as f: + f.write(random_string()) + with tempfile.NamedTemporaryFile("w", delete=False) as f: + f.write("[buck2_re_client]\n") + f.write("override_use_case = buck2-user\n") + f.close() + await buck.build( + "root//:simple", + "--remote-only", + "--no-remote-cache", + "--config-file", + f.name, + ) + use_cases = await filter_re_use_case(buck) + assert len(use_cases) == 4 + assert all(use_case == "buck2-user" for use_case in use_cases) diff --git a/tests/core/build/test_remote_execution_data/.buckconfig b/tests/core/build/test_remote_execution_data/.buckconfig new file mode 100644 index 0000000000000..22e652e3c984b --- /dev/null +++ b/tests/core/build/test_remote_execution_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . diff --git a/tests/core/build/test_remote_execution_data/.buckroot b/tests/core/build/test_remote_execution_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_remote_execution_data/TARGETS.fixture b/tests/core/build/test_remote_execution_data/TARGETS.fixture new file mode 100644 index 0000000000000..70409cc5de63a --- /dev/null +++ b/tests/core/build/test_remote_execution_data/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":defs.bzl", "simple") + +simple( + name = "simple", + input = "input.txt", +) diff --git a/tests/core/build/test_remote_execution_data/defs.bzl b/tests/core/build/test_remote_execution_data/defs.bzl new file mode 100644 index 0000000000000..42eae87e7f564 --- /dev/null +++ b/tests/core/build/test_remote_execution_data/defs.bzl @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _simple(ctx): + output = ctx.actions.declare_output("output") + run = ctx.actions.write( + "run.py", + [ + "import os", + "import sys", + "build_id = os.environ[\"BUCK_BUILD_ID\"]", + "with open(sys.argv[1], 'w') as f:", + " f.write(f'{build_id}\\n')", + ], + ) + ctx.actions.run( + cmd_args(["python3", run, output.as_output(), ctx.attrs.input]), + category = "test_category", + ) + + return [DefaultInfo(default_output = output)] + +simple = rule(impl = _simple, attrs = {"input": attrs.source()}) diff --git a/tests/core/build/test_remote_execution_data/input.txt b/tests/core/build/test_remote_execution_data/input.txt new file mode 100644 index 0000000000000..d95f3ad14dee6 --- /dev/null +++ b/tests/core/build/test_remote_execution_data/input.txt @@ -0,0 +1 @@ +content diff --git a/tests/core/build/test_skip_missing.py b/tests/core/build/test_skip_missing.py new file mode 100644 index 0000000000000..b80b1d0a03e67 --- /dev/null +++ b/tests/core/build/test_skip_missing.py @@ -0,0 +1,39 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_build_skip_missing(buck: Buck) -> None: + result = await buck.build( + "//:existing", + "//:missing", + "--skip-missing-targets", + ) + + out = result.get_build_report().output_for_target("//:existing").read_text() + assert "abcd" == out.strip() + assert "Skipped 1 missing targets:" in result.stderr + + +@buck_test() +async def test_build_skip_missing_fails_on_missing_package(buck: Buck) -> None: + await expect_failure( + buck.build( + "//:existing", + "//bad-package:existing", + "--skip-missing-targets", + ), + stderr_regex="`root//bad-package`", + ) diff --git a/tests/core/build/test_skip_missing_data/.buckconfig b/tests/core/build/test_skip_missing_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/build/test_skip_missing_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/build/test_skip_missing_data/.buckroot b/tests/core/build/test_skip_missing_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_skip_missing_data/TARGETS.fixture b/tests/core/build/test_skip_missing_data/TARGETS.fixture new file mode 100644 index 0000000000000..5372616e867bc --- /dev/null +++ b/tests/core/build/test_skip_missing_data/TARGETS.fixture @@ -0,0 +1,3 @@ +trivial_build( + name = "existing", +) diff --git a/tests/core/build/test_symlinks.py b/tests/core/build/test_symlinks.py new file mode 100644 index 0000000000000..94df096175e7b --- /dev/null +++ b/tests/core/build/test_symlinks.py @@ -0,0 +1,109 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import shutil +import sys +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import expect_exec_count + + +@buck_test() +async def test_symlinks(buck: Buck) -> None: + # We want to check in a symlink but given Buck is running this and symlinks + # do not exist we need to put it back and make it be an actual symlink. + symlink_path = os.path.join(buck.cwd, "src", "link") + + if os.path.isdir(symlink_path): + shutil.rmtree(symlink_path) + else: + os.remove(symlink_path) + + src = "..\\dir" if sys.platform == "win32" else "../dir" + os.symlink(src, symlink_path, target_is_directory=True) + + await buck.build("//:cp") + await expect_exec_count(buck, 1) + + await buck.build("//:cp") + await expect_exec_count(buck, 0) + + with open(buck.cwd / "dir/file", "w") as file: + file.write("GOODBYE\n") + + # This isn't really behavior we want to guarantee and we'd rather users + # don't use symlinks, but this is very observable (and it's not worse than + # just reading the files then pretending they are never used!) + await buck.build("//:cp") + await expect_exec_count(buck, 1) + + +@buck_test( + # For some reason, this test fails when using filesystem watcher on macos, so explicitly set + extra_buck_config={"buck2": {"file_watcher": "watchman"}}, +) +async def test_symlinks_redirection(buck: Buck) -> None: + symlink_path = os.path.join(buck.cwd, "src", "link") + + if os.path.isdir(symlink_path): + shutil.rmtree(symlink_path) + else: + os.remove(symlink_path) + + src = "..\\dir" if sys.platform == "win32" else "../dir" + os.symlink(src, symlink_path) + + await buck.build("//:cp") + await expect_exec_count(buck, 1) + + await buck.build("//:cp") + await expect_exec_count(buck, 0) + + # We change the symlink which should invalidate all files depending on it + os.remove(symlink_path) + src2 = "..\\dir2" if sys.platform == "win32" else "../dir2" + os.symlink(src2, symlink_path) + + await buck.build("//:cp") + await expect_exec_count(buck, 1) + + +@buck_test( + # For some reason, this test fails when using filesystem watcher on macos, so explicitly set + extra_buck_config={"buck2": {"file_watcher": "watchman"}}, +) +async def test_symlinks_external(buck: Buck) -> None: + symlink_path = os.path.join(buck.cwd, "ext", "link") + shutil.rmtree(symlink_path) + top_level = tempfile.mkdtemp() + + os.mkdir(os.path.join(top_level, "nested1")) + os.mkdir(os.path.join(top_level, "nested2")) + with open(os.path.join(top_level, "nested1", "file"), "w") as f: + f.write("HELLO") + with open(os.path.join(top_level, "nested2", "file"), "w") as f: + f.write("GOODBYE") + + os.symlink(os.path.join(top_level, "nested1"), symlink_path) + + await buck.build("//:ext") + await expect_exec_count(buck, 1) + + await buck.build("//:ext") + await expect_exec_count(buck, 0) + + os.remove(symlink_path) + os.symlink(os.path.join(top_level, "nested2"), symlink_path) + + await buck.build("//:ext") + await expect_exec_count(buck, 1) diff --git a/tests/core/build/test_symlinks_data/.buckconfig b/tests/core/build/test_symlinks_data/.buckconfig new file mode 100644 index 0000000000000..7ef37da18491c --- /dev/null +++ b/tests/core/build/test_symlinks_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +self = . +prelude = prelude diff --git a/tests/core/build/test_symlinks_data/.buckroot b/tests/core/build/test_symlinks_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_symlinks_data/TARGETS.fixture b/tests/core/build/test_symlinks_data/TARGETS.fixture new file mode 100644 index 0000000000000..dd1b28590587a --- /dev/null +++ b/tests/core/build/test_symlinks_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "cp") + +cp(name = "cp", src = "src/link/file") + +cp(name = "ext", src = "ext/link/file") diff --git a/tests/core/build/test_symlinks_data/defs.bzl b/tests/core/build/test_symlinks_data/defs.bzl new file mode 100644 index 0000000000000..0489cdc7e4898 --- /dev/null +++ b/tests/core/build/test_symlinks_data/defs.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _cp_impl(ctx: AnalysisContext): + out = ctx.actions.declare_output("out") + ctx.actions.run(["python3", "-c", "import shutil, sys; from pathlib import Path; shutil.copyfile(Path(sys.argv[1]), Path(sys.argv[2]))", ctx.attrs.src, out.as_output()], category = "cp", local_only = True) + + return [ + DefaultInfo(default_output = out), + ] + +cp = rule( + impl = _cp_impl, + attrs = {"src": attrs.source()}, +) diff --git a/tests/core/build/test_symlinks_data/dir/file b/tests/core/build/test_symlinks_data/dir/file new file mode 100644 index 0000000000000..e427984d4a2c1 --- /dev/null +++ b/tests/core/build/test_symlinks_data/dir/file @@ -0,0 +1 @@ +HELLO diff --git a/tests/core/build/test_symlinks_data/dir2/file b/tests/core/build/test_symlinks_data/dir2/file new file mode 100644 index 0000000000000..f6e10c0b02ef3 --- /dev/null +++ b/tests/core/build/test_symlinks_data/dir2/file @@ -0,0 +1 @@ +GOODBYE diff --git a/tests/core/build/test_symlinks_data/ext/link/file b/tests/core/build/test_symlinks_data/ext/link/file new file mode 100644 index 0000000000000..9d0fd3239a606 --- /dev/null +++ b/tests/core/build/test_symlinks_data/ext/link/file @@ -0,0 +1 @@ +SALUT diff --git a/tests/core/build/test_symlinks_data/prelude/prelude.bzl b/tests/core/build/test_symlinks_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_symlinks_data/src/link b/tests/core/build/test_symlinks_data/src/link new file mode 100644 index 0000000000000..32059610c64bb --- /dev/null +++ b/tests/core/build/test_symlinks_data/src/link @@ -0,0 +1 @@ +This gets replaced by a link to ../src when the test runs diff --git a/tests/core/build/test_target_aliases.py b/tests/core/build/test_target_aliases.py new file mode 100644 index 0000000000000..cab3203c934f5 --- /dev/null +++ b/tests/core/build/test_target_aliases.py @@ -0,0 +1,111 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_target_aliases(buck: Buck) -> None: + await buck.targets("alias") + await buck.cquery("deps(alias)") + + await buck.targets("chain") + await buck.cquery("deps(chain)") + + res = await buck.targets("--resolve-alias", "alias", "chain", "//targets:target") + assert [line.strip() for line in res.stdout.splitlines()] == [ + "root//targets:target" + ] * 3 + + # Following a broken alias should fail + await expect_failure( + buck.targets("--resolve-alias", "bad"), stderr_regex="Invalid alias: `bad`" + ) + + # Asking for a non-existent alias / target should also fail. Note that + # we're not capable of telling the difference between an alias that doesn't + # exist vs. one that is broken. + await expect_failure( + buck.targets("--resolve-alias", "oops"), stderr_regex="Invalid alias: `oops`" + ) + + await expect_failure( + buck.targets("--resolve-alias", "targets:not_existent"), + stderr_regex="Invalid alias:.*Target does not exist in package", + ) + await expect_failure( + buck.targets("--resolve-alias", "broken:broken"), + stderr_regex="Invalid alias:.*Package cannot be evaluated.*Parse error", + ) + await expect_failure( + buck.targets("--resolve-alias", "not_existent:not_existent"), + stderr_regex="Invalid alias:.*Package cannot be evaluated.*does not exist", + ) + await expect_failure( + buck.targets("--resolve-alias", "..."), + stderr_regex="Invalid alias.*does not expand to a single target", + ) + + +@buck_test() +async def test_resolve_alias_json(buck: Buck) -> None: + res = await buck.targets( + "--resolve-alias", "alias", "chain", "//targets:target", "--json" + ) + + assert json.loads(res.stdout) == [ + { + "alias": "alias", + "buck.package": "root//targets", + "name": "target", + }, + { + "alias": "chain", + "buck.package": "root//targets", + "name": "target", + }, + { + "alias": "//targets:target", + "buck.package": "root//targets", + "name": "target", + }, + ] + + +@buck_test() +async def test_resolve_alias_json_lines(buck: Buck) -> None: + res = await buck.targets( + "--resolve-alias", "alias", "chain", "//targets:target", "--json-lines" + ) + + lines = [line.strip() for line in res.stdout.splitlines()] + lines = [line for line in lines if line] + + assert [json.loads(line) for line in res.stdout.splitlines()] == [ + { + "alias": "alias", + "buck.package": "root//targets", + "name": "target", + }, + { + "alias": "chain", + "buck.package": "root//targets", + "name": "target", + }, + { + "alias": "//targets:target", + "buck.package": "root//targets", + "name": "target", + }, + ] diff --git a/tests/core/build/test_target_aliases_data/.buckconfig b/tests/core/build/test_target_aliases_data/.buckconfig new file mode 100644 index 0000000000000..2427a8d99ccba --- /dev/null +++ b/tests/core/build/test_target_aliases_data/.buckconfig @@ -0,0 +1,11 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude + +[alias] +alias = root//targets:target +chain = alias +bad = root//bad:bad diff --git a/tests/core/build/test_target_aliases_data/.buckroot b/tests/core/build/test_target_aliases_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_target_aliases_data/broken/TARGETS.fixture b/tests/core/build/test_target_aliases_data/broken/TARGETS.fixture new file mode 100644 index 0000000000000..92504f6bcbbf3 --- /dev/null +++ b/tests/core/build/test_target_aliases_data/broken/TARGETS.fixture @@ -0,0 +1,2 @@ +# @lint-ignore-every BUCKFORMAT +this targets file is not syntactically valid diff --git a/tests/core/build/test_target_aliases_data/prelude/prelude.bzl b/tests/core/build/test_target_aliases_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..3d09ff469fe96 --- /dev/null +++ b/tests/core/build/test_target_aliases_data/prelude/prelude.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(_ctx): + return [ + DefaultInfo(), + ] + +test = rule( + impl = _test_impl, + attrs = { + }, +) diff --git a/tests/core/build/test_target_aliases_data/targets/TARGETS.fixture b/tests/core/build/test_target_aliases_data/targets/TARGETS.fixture new file mode 100644 index 0000000000000..8a8f34e9784ce --- /dev/null +++ b/tests/core/build/test_target_aliases_data/targets/TARGETS.fixture @@ -0,0 +1 @@ +test(name = "target") diff --git a/tests/core/build/test_uncategorized.py b/tests/core/build/test_uncategorized.py new file mode 100644 index 0000000000000..db1570e4281df --- /dev/null +++ b/tests/core/build/test_uncategorized.py @@ -0,0 +1,499 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os +import platform +import random +import string +import subprocess +import typing +from pathlib import Path +from typing import List, Tuple + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import json_get, read_what_ran + + +@buck_test(data_dir="anon_exec_deps") +async def test_anon_target_exec_deps(buck: Buck) -> None: + await buck.build("//tests:exec_dep_good", "--remote-only") + + await expect_failure( + buck.build("//tests:exec_dep_bad", "--local-only"), + stderr_regex="Exec deps and the current anon target must have the same execution platform resolution", + ) + + await expect_failure( + buck.build("//tests:exec_dep_rejects_dep"), + stderr_regex="exec dep is missing the execution platform resolution", + ) + + +@buck_test(data_dir="args") +async def test_args(buck: Buck) -> None: + result = await buck.build("//:bin") + output = result.get_build_report().output_for_target("//:bin") + assert ( + output.read_text().rstrip() + == " -- \nlib1 -- this is lib1\nlib2 -- this is lib2" + ) + + +@buck_test(data_dir="prelude_import") +async def test_prelude_imported_once(buck: Buck) -> None: + # See the comments in the relevant targets files: they explain how this + # test works. + await buck.build("cell1//...", "cell2//...") + + +def read_all_outputs(buck: Buck, report: str) -> typing.List[str]: + ret = [] + + with open(buck.cwd / report) as f: + report = json.load(f) + for _target, state in report["results"].items(): + ret.extend(state["outputs"].get("DEFAULT", [])) + ret.extend(state["other_outputs"].get("DEFAULT", [])) + + return ret + + +@buck_test(data_dir="build_providers") +async def test_build_providers(buck: Buck) -> None: + await buck.build( + "//:target", + "-c", + "build_report.unstable_include_other_outputs=true", + "--build-default-info", + "--skip-run-info", + "--skip-test-info", + "--build-report", + "report", + ) + + outputs = read_all_outputs(buck, "report") + assert any("/build" in o for o in outputs) + assert all("/run" not in o for o in outputs) + assert all("/test" not in o for o in outputs) + + await buck.build( + "//:target", + "-c", + "build_report.unstable_include_other_outputs=true", + "--skip-default-info", + "--build-run-info", + "--skip-test-info", + "--build-report", + "report", + ) + + outputs = read_all_outputs(buck, "report") + assert all("/build" not in o for o in outputs) + assert any("/run" in o for o in outputs) + assert all("/test" not in o for o in outputs) + + await buck.build( + "//:target", + "-c", + "build_report.unstable_include_other_outputs=true", + "--skip-default-info", + "--skip-run-info", + "--build-test-info", + "--build-report", + "report", + ) + + outputs = read_all_outputs(buck, "report") + assert all("/build" not in o for o in outputs) + assert all("/run" not in o for o in outputs) + assert any("/test" in o for o in outputs) + + +@buck_test(data_dir="projected_artifacts") +@pytest.mark.parametrize( + "target", + [ + # Check building the whole thing + "//...", + # Check building just one target, which may reveal bugs if things are + # materialized differently when a projected target uses them. + "//:check_c_b_local", + ], +) +async def test_projected_artifacts(buck: Buck, target: str) -> None: + await buck.build(target) + + +@buck_test(data_dir="upload_all_actions") +async def test_upload_all_actions(buck: Buck) -> None: + with open(buck.cwd / "src", "w") as src: + src.write(random_string()) + + # This action includes `src` and is forced to run locally. This means RE + # can never have seen it (and we'll check that later by asserting there is + # only 1 cache query, excluding local actions). + await buck.build("//:cp", "--upload-all-actions") + + what_ran = await read_what_ran( + buck, "--emit-cache-queries", "--skip-local-executions" + ) + assert len(what_ran) == 1 + + # Now, download the action. This will succeed only if we uploaded it. + digest = what_ran[0]["reproducer"]["details"]["digest"] + subprocess.check_call( + ["dotslash", os.environ["RECLI"], "cas", "download-action", digest] + ) + + +@buck_test(data_dir="buckroot") +async def test_buckroot(buck: Buck) -> None: + # Test that .buckroot files work + await buck.build(":inner", rel_cwd=Path("rooted/cell")) + + +@buck_test(data_dir="cell_delete") +async def test_cell_deletion(buck: Buck) -> None: + """ + This is a regression test for https://github.com/facebook/buck2/pull/43, + including the similar issue with directories that was fixed first. + """ + await buck.targets(":") + (buck.cwd / "hello").mkdir() + await buck.targets(":") + (buck.cwd / "hello").rmdir() + await buck.targets(":") + + +@buck_test( + data_dir="invalid_file_invalidation", + skip_for_os=["windows"], + # For some reason, this test fails when using filesystem watcher on macos, so explicitly set + # watchman file watcher here. + extra_buck_config={"buck2": {"file_watcher": "watchman"}}, +) +async def test_invalid_file_invalidation(buck: Buck) -> None: + """ + This is a regression test for T136963408. + """ + + await buck.build(":root") + + src = buck.cwd / "src" + invalid = src / "\\" + invalid_nested = src / "\\" / "a" + invalid_nested_invalid = src / "\\" / "\\" + + # Create an invalid file. Build should work. + invalid.touch() + output = await buck.build(":root") + assert "is not valid. Add the path to" in output.stderr + + # Delete it, build should work. + invalid.unlink() + await buck.build(":root") + + # Create an invalid dir. Build should still work. + invalid_nested.mkdir(parents=True) + output = await buck.build(":root") + assert "is not valid. Add the path to" in output.stderr + + # And delete it. Things should work. + invalid_nested.rmdir() + invalid.rmdir() + await buck.build(":root") + + # Finally, do an invalid file inside an invalid dir... + invalid_nested_invalid.mkdir(parents=True) + output = await buck.build(":root") + assert "is not valid. Add the path to" in output.stderr + + # And delete it. Things should again. + invalid_nested_invalid.rmdir() + invalid.rmdir() + await buck.build(":root") + + +@buck_test(data_dir="concurrency") +async def test_concurrency(buck: Buck) -> None: + await buck.build("//:weight", "--local-only", "--no-remote-cache") + + # Now, since our commands request 20% of resources, check that a no point + # we had more than 5 running commands. Also check that we found the right + # amount of commands. + log = (await buck.log("show")).stdout.strip().splitlines() + + running_execs = {} + execs_done = 0 + + for line in log: + id = json_get(line, "Event", "span_id") + + is_end = json_get( + line, + "Event", + "data", + "SpanEnd", + ) + + if is_end: + if running_execs.pop(id, None) is not None: + execs_done += 1 + + continue + + is_local_exec = json_get( + line, + "Event", + "data", + "SpanStart", + "data", + "ExecutorStage", + "stage", + "Local", + "stage", + "Execute", + ) + + if is_local_exec: + running_execs[id] = True + + # Check that concurrently running local commands + # don't exceed 5. + assert len(running_execs) <= 5 + + assert execs_done == 10 + + +@buck_test(data_dir="fail_fast") +async def test_fail_fast(buck: Buck) -> None: + with pytest.raises(BuckException) as exc: + await buck.build( + "root//:mixed", + "root//:slow", + "--local-only", + "--no-remote-cache", + ) + + assert "fast_default_output" in exc.value.stderr + assert "slow_default_output" in exc.value.stderr + assert "slow_other_output" in exc.value.stderr + + with pytest.raises(BuckException) as exc: + await buck.build( + "root//:mixed", + "root//:slow", + "--local-only", + "--no-remote-cache", + "--fail-fast", + ) + + assert "fast_default_output" in exc.value.stderr + assert "slow_default_output" not in exc.value.stderr + assert "slow_other_output" not in exc.value.stderr + + +@buck_test(data_dir="keep_going_build") +async def test_keep_going(buck: Buck) -> None: + with pytest.raises(BuckException) as exc: + await buck.build( + "root//:top", + "--local-only", + "--no-remote-cache", + ) + + assert "fast_action" in exc.value.stderr + assert "slow_action" not in exc.value.stderr + + # Dont want to re-attach to the ongoing evaluation for slow_action. + # Normally that gets cancelled, but even so that's still a race. + await buck.kill() + + with pytest.raises(BuckException) as exc: + await buck.build( + "root//:top", "--local-only", "--no-remote-cache", "--keep-going" + ) + + assert "fast_action" in exc.value.stderr + assert "slow_action" in exc.value.stderr + + +@buck_test(data_dir="cleanup") +async def test_cleanup(buck: Buck) -> None: + # Test for T85589819 - broken cleanup + target_pattern = "//:cleanup" + result = await buck.build(target_pattern) + output = result.get_build_report().output_for_target(target_pattern) + + # The output should be something like path/__cleanup__/out/dir1/dir2/output.txt + # We want to ensure that if we make a file dir1 or dir1/dir2, cleanup still works + output.unlink() + output.parent.rmdir() + output.parent.write_text("File that must be deleted") + await buck.kill() + await buck.build(target_pattern) + + output.unlink() + output.parent.rmdir() + output.parent.parent.rmdir() + output.parent.parent.write_text("File that must be deleted") + await buck.build(target_pattern) + + +@buck_test(data_dir="log_action_keys") +async def test_log_action_keys(buck: Buck) -> None: + async def read_action_keys() -> List[Tuple[str, str]]: + out = await read_what_ran(buck) + return [ + ( + line["reproducer"]["executor"], + line["reproducer"]["details"]["action_key"], + ) + for line in out + ] + + seed = random_string() + action_key = "executor root//:test () touch" + + # Run on RE + await buck.build( + ":test", "-c", f"test.seed={seed}", "-c", "buck2.log_action_keys=true" + ) + assert await read_action_keys() == [("Re", action_key)] + + await buck.kill() + + # Run on RE again, get a cache hit this time + await buck.build( + ":test", "-c", f"test.seed={seed}", "-c", "buck2.log_action_keys=true" + ) + + assert await read_action_keys() == [("Cache", action_key)] + + +@buck_test(data_dir="roots") +async def test_roots(buck: Buck) -> None: + res = await buck.build("root//:test", "other//:test") + + is_windows: bool = platform.system() == "Windows" + + def platformify(path: str) -> str: + if is_windows: + return path.replace("/", "\\") + return path + + output = res.get_build_report().output_for_target("root//:test") + with open(output) as f: + j = json.load(f) + print(j) + assert (buck.cwd / j["fixture_relative_to_cell"]).exists() + assert (buck.cwd / j["fixture_relative_to_project"]).exists() + + assert j["cell_relative_to_fixture"] == platformify("../../../../../../..") + assert j["project_relative_to_fixture"] == platformify("../../../../../../..") + + output = res.get_build_report().output_for_target("other//:test") + with open(output) as f: + j = json.load(f) + assert (buck.cwd / "other" / j["fixture_relative_to_cell"]).exists() + assert (buck.cwd / j["fixture_relative_to_project"]).exists() + + assert j["cell_relative_to_fixture"] == platformify( + "../../../../../../../other" + ) + assert j["project_relative_to_fixture"] == platformify("../../../../../../..") + + +@buck_test(data_dir="tmpdir") +async def test_tmpdir(buck: Buck) -> None: + await buck.build("root//:") + + +@buck_test(data_dir="materialize_inputs_for_failed_actions") +async def test_materialize_inputs_for_failed_actions(buck: Buck) -> None: + await expect_failure( + buck.build( + "//:action_fail", + "--remote-only", + "--no-remote-cache", + "--materialize-failed-inputs", + "-c", + f"test.cache_buster={random_string()}", + ), + ) + + log = (await buck.log("show")).stdout.strip().splitlines() + + found_action_error = False + found_materialize_failed_inputs_span = False + + for line in log: + # Look for MaterializeFailedInputs ReStage event + if "MaterializeFailedInputs" in line: + found_materialize_failed_inputs_span = True + + # Inpsect failed input + materialized_inputs_for_failed = json_get( + line, + "Event", + "data", + "Instant", + "data", + "ActionError", + "last_command", + "details", + "command_kind", + "command", + "RemoteCommand", + "materialized_inputs_for_failed", + ) + + if materialized_inputs_for_failed: + found_action_error = True + assert len(materialized_inputs_for_failed) == 1 + input = materialized_inputs_for_failed[0] + with open(Path(buck.cwd / input), "r") as materialized_input_path: + contents = materialized_input_path.read() + assert contents == "yay!" + + if not found_action_error: + raise AssertionError("Did not find relevant ActionError") + if not found_materialize_failed_inputs_span: + raise AssertionError("Did not find relevant MaterializeFailedInputs span") + + +def random_string() -> str: + return "".join(random.choice(string.ascii_lowercase) for i in range(256)) + + +@buck_test(data_dir="artifact_consistency") +async def test_artifact_consistency(buck: Buck) -> None: + out = await buck.build_without_report( + ":gen[file3]", + "--local-only", + "--out=-", + ) + + assert out.stdout == "This is file3" + + out = await buck.build_without_report( + "-c", + "gen.idx=2", + ":gen[file3]", + "--local-only", + "--out=-", + ) + assert out.stdout == "This is file3" diff --git a/tests/core/build/test_uncategorized_data/.buckroot b/tests/core/build/test_uncategorized_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/action_digest/.buckconfig b/tests/core/build/test_uncategorized_data/action_digest/.buckconfig new file mode 100644 index 0000000000000..1df397532e03e --- /dev/null +++ b/tests/core/build/test_uncategorized_data/action_digest/.buckconfig @@ -0,0 +1,11 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . + +[repository_aliases] + prelude = root diff --git a/tests/core/build/test_uncategorized_data/action_digest/.buckroot b/tests/core/build/test_uncategorized_data/action_digest/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/action_digest/TARGETS.fixture b/tests/core/build/test_uncategorized_data/action_digest/TARGETS.fixture new file mode 100644 index 0000000000000..d03be96476689 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/action_digest/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "test") + +test(name = "test", script = "test.py") diff --git a/tests/core/build/test_uncategorized_data/action_digest/defs.bzl b/tests/core/build/test_uncategorized_data/action_digest/defs.bzl new file mode 100644 index 0000000000000..d1ef0870f706c --- /dev/null +++ b/tests/core/build/test_uncategorized_data/action_digest/defs.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test(ctx: AnalysisContext): + out = ctx.actions.declare_output("out") + + ctx.actions.run( + [ + "python3", + ctx.attrs.script, + out.as_output(), + ], + category = "check", + identifier = "out", + ) + + return [DefaultInfo(out)] + +test = rule(attrs = {"script": attrs.source()}, impl = _test) diff --git a/tests/core/build/test_uncategorized_data/action_digest/prelude.bzl b/tests/core/build/test_uncategorized_data/action_digest/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/action_digest/test.py b/tests/core/build/test_uncategorized_data/action_digest/test.py new file mode 100644 index 0000000000000..c94587e2816c0 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/action_digest/test.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import sys + + +def main(): + (out,) = sys.argv[1:] + + with open(out, "w"): + pass + + +if __name__ == "__main__": + main() diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/.buckconfig b/tests/core/build/test_uncategorized_data/anon_exec_deps/.buckconfig new file mode 100644 index 0000000000000..b2a50a06c92c8 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/anon_exec_deps/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/platforms/TARGETS.fixture b/tests/core/build/test_uncategorized_data/anon_exec_deps/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..2c02c7644ea8f --- /dev/null +++ b/tests/core/build/test_uncategorized_data/anon_exec_deps/platforms/TARGETS.fixture @@ -0,0 +1,28 @@ +load("//rules:rules.bzl", "config_setting", "platform", "platforms", "target_platform") + +config_setting(name = "remote_setting") + +config_setting(name = "local_setting") + +target_platform(name = "target") + +platform( + name = "local_only", + setting = ":local_setting", + local_enabled = True, + remote_enabled = False, + allow_hybrid_fallbacks_on_failure = False, +) + +platform( + name = "remote_only", + setting = ":remote_setting", + local_enabled = False, + remote_enabled = True, + allow_hybrid_fallbacks_on_failure = False, +) + +platforms( + name = "platforms", + platforms = [":local_only", ":remote_only"], +) diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/anon_exec_deps/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/rules/exec_deps.bzl b/tests/core/build/test_uncategorized_data/anon_exec_deps/rules/exec_deps.bzl new file mode 100644 index 0000000000000..9b0fced83495a --- /dev/null +++ b/tests/core/build/test_uncategorized_data/anon_exec_deps/rules/exec_deps.bzl @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +MirrorInfo = provider(fields = ["info"]) + +ExecDepInfo = provider(fields = ["info"]) + +def _passthrough_impl(ctx): + return [DefaultInfo(), ExecDepInfo(info = ctx.attrs.exec_dep)] + +passthrough = rule( + impl = _passthrough_impl, + attrs = { + "exec_dep": attrs.exec_dep(), + }, +) + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _mirror_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +_mirror_exec_dep = rule(impl = _mirror_impl, attrs = { + "exec_dep": attrs.exec_dep(), +}) + +def _exec_dep_good_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.exec_dep.label.configured_target().name, "remote_only") + return [DefaultInfo()] + + return ctx.actions.anon_target(_mirror_exec_dep, { + "exec_dep": ctx.attrs.dep[ExecDepInfo].info, + }).promise.map(f) + +exec_dep_good = rule(impl = _exec_dep_good_impl, attrs = { + "dep": attrs.dep(), +}) + +def _exec_dep_bad_impl(ctx: AnalysisContext) -> Promise: + def f(providers): + res = providers[MirrorInfo].info + _assert_eq(res.exec_dep, "remote_only") + return [DefaultInfo()] + + return ctx.actions.anon_target(_mirror_exec_dep, { + "exec_dep": ctx.attrs.dep[ExecDepInfo].info, + }).promise.map(f) + +exec_dep_bad = rule(impl = _exec_dep_bad_impl, attrs = { + "dep": attrs.dep(), +}) + +def _exec_dep_rejects_dep_impl(ctx: AnalysisContext) -> Promise: + def f(_providers): + return [DefaultInfo()] + + return ctx.actions.anon_target(_mirror_exec_dep, { + "exec_dep": ctx.attrs.dep, + }).promise.map(f) + +exec_dep_rejects_dep = rule(impl = _exec_dep_rejects_dep_impl, attrs = { + "dep": attrs.dep(), +}) diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/rules/rules.bzl b/tests/core/build/test_uncategorized_data/anon_exec_deps/rules/rules.bzl new file mode 100644 index 0000000000000..6d4c9db81e7b6 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/anon_exec_deps/rules/rules.bzl @@ -0,0 +1,117 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +ExecutorConfigInfo = provider(fields = ["config"]) + +def _platform(ctx): + # We need to introduce a constraint to ensure our different execution + # platforms are distinct. This is because exec_compatible_with selects a + # ConfigurationInfo (which provides a config), not a ExecutionPlatformInfo + # (instead it matches on it). + configuration = ConfigurationInfo( + constraints = { + ctx.attrs.setting.label.raw_target(): ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ), + }, + values = {}, + ) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = configuration, + executor_config = CommandExecutorConfig( + local_enabled = ctx.attrs.local_enabled, + remote_enabled = ctx.attrs.remote_enabled, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_max_input_files_mebibytes = 1, + use_limited_hybrid = ctx.attrs.use_limited_hybrid, + allow_limited_hybrid_fallbacks = ctx.attrs.allow_hybrid_fallbacks_on_failure, + allow_hybrid_fallbacks_on_failure = ctx.attrs.allow_hybrid_fallbacks_on_failure, + remote_execution_use_case = "buck2-testing", + allow_cache_uploads = ctx.attrs.allow_cache_uploads, + experimental_low_pass_filter = ctx.attrs.experimental_low_pass_filter, + max_cache_upload_mebibytes = 1, + ), + ) + + return [ + DefaultInfo(), + platform, + configuration, + ] + +platform = rule( + impl = _platform, + attrs = { + "allow_cache_uploads": attrs.bool(default = False), + "allow_hybrid_fallbacks_on_failure": attrs.bool(default = False), + "experimental_low_pass_filter": attrs.bool( + default = read_config("test", "experimental_low_pass_filter", "") in ["true", "True"], + ), + "local_enabled": attrs.bool(), + "remote_enabled": attrs.bool(), + "setting": attrs.configuration_label(), + "use_limited_hybrid": attrs.bool(default = True), + }, +) + +def _platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [x[ExecutionPlatformInfo] for x in ctx.attrs.platforms], + ), + ] + +platforms = rule( + impl = _platforms, + attrs = { + "platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo])), + }, +) + +def _target_platform(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +target_platform = rule( + impl = _target_platform, + attrs = {}, +) + +def _config_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +config_setting = rule( + impl = _config_setting, + attrs = {}, +) + +def _command(ctx): + return [DefaultInfo(default_output = ctx.attrs.command), RunInfo(args = cmd_args(ctx.attrs.command))] + +command = rule( + impl = _command, + attrs = { + "cache_buster": attrs.string(default = read_config("test", "cache_buster", "")), + "command": attrs.source(), + "force_full_hybrid_if_capable": attrs.bool(default = False), + "local_only": attrs.bool(default = False), + "prefer_local": attrs.bool(default = False), + "prefer_remote": attrs.bool(default = False), + "weight": attrs.int(default = 1), + }, +) diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/TARGETS.fixture b/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/TARGETS.fixture new file mode 100644 index 0000000000000..9b087561c6b70 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/TARGETS.fixture @@ -0,0 +1,44 @@ +load("//rules:exec_deps.bzl", "exec_dep_bad", "exec_dep_good", "exec_dep_rejects_dep", "passthrough") +load("//rules:rules.bzl", "command") + +command( + name = "local_only", + command = "local_only.py", + local_only = True, + default_target_platform = "//platforms:target", + target_compatible_with = ["//platforms:local_only"], +) + +command( + name = "remote_only", + command = "remote_only.py", + prefer_remote = True, + default_target_platform = "//platforms:target", + target_compatible_with = ["//platforms:remote_only"], +) + +passthrough( + name = "passthrough_remote", + exec_compatible_with = ["//platforms:remote_only"], + exec_dep = ":remote_only", +) + +exec_dep_good( + name = "exec_dep_good", + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:remote_only"], + dep = ":passthrough_remote", +) + +exec_dep_bad( + name = "exec_dep_bad", + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:local_only"], + dep = ":passthrough_remote", +) + +exec_dep_rejects_dep( + name = "exec_dep_rejects_dep", + default_target_platform = "//platforms:target", + dep = ":passthrough_remote", +) diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/local_only.py b/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/local_only.py new file mode 100644 index 0000000000000..eca503194cb99 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/local_only.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import pathlib +import sys + +re_worker_path = "/run/re_worker/beacon" +if pathlib.Path(re_worker_path).exists(): + print("This only runs on local", file=sys.stderr) + sys.exit(1) + +out = sys.argv[1] +pathlib.Path(out).touch() diff --git a/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/remote_only.py b/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/remote_only.py new file mode 100644 index 0000000000000..082fcd998eea1 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/anon_exec_deps/tests/remote_only.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import pathlib +import sys + +re_worker_path = "/run/re_worker/beacon" +if not pathlib.Path(re_worker_path).exists(): + print("This only runs on RE", file=sys.stderr) + sys.exit(1) + +out = sys.argv[1] +pathlib.Path(out).touch() diff --git a/tests/core/build/test_uncategorized_data/args/.buckconfig b/tests/core/build/test_uncategorized_data/args/.buckconfig new file mode 100644 index 0000000000000..7ef37da18491c --- /dev/null +++ b/tests/core/build/test_uncategorized_data/args/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +self = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/args/TARGETS.fixture b/tests/core/build/test_uncategorized_data/args/TARGETS.fixture new file mode 100644 index 0000000000000..559e7e0280ce2 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/args/TARGETS.fixture @@ -0,0 +1,16 @@ +load(":defs.bzl", "foo_binary", "foo_library", "foo_toolchain") + +foo_toolchain(name = "toolchain") + +foo_library(name = "lib1", flags = ["this", "is", "lib1"]) + +foo_library(name = "lib2", flags = ["this", "is", "lib2"]) + +foo_binary( + name = "bin", + flags = [ + "$(FOO) -- $(FOO_FLAGS)", + "$(NAME :lib1) -- $(LIB_FLAGS :lib1)", + "$(NAME :lib2) -- $(LIB_FLAGS :lib2)", + ], +) diff --git a/tests/core/build/test_uncategorized_data/args/defs.bzl b/tests/core/build/test_uncategorized_data/args/defs.bzl new file mode 100644 index 0000000000000..e6e0673eb137f --- /dev/null +++ b/tests/core/build/test_uncategorized_data/args/defs.bzl @@ -0,0 +1,56 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _toolchain_impl(_ctx): + return [ + DefaultInfo(), + TemplatePlaceholderInfo( + unkeyed_variables = { + "FOO": "", + "FOO_FLAGS": "", + }, + ), + ] + +def _library_impl(ctx): + return [ + DefaultInfo(), + TemplatePlaceholderInfo( + keyed_variables = { + "LIB_FLAGS": cmd_args(ctx.attrs.flags), + "NAME": ctx.label.name, + }, + ), + ] + +def _binary_impl(ctx): + output, _ = ctx.actions.write("out", ctx.attrs.flags, allow_args = True) + return [ + DefaultInfo( + default_output = output, + ), + ] + +foo_toolchain = rule( + impl = _toolchain_impl, + attrs = {}, +) + +foo_library = rule( + impl = _library_impl, + attrs = { + "flags": attrs.list(attrs.arg()), + }, +) + +foo_binary = rule( + impl = _binary_impl, + attrs = { + "flags": attrs.list(attrs.arg()), + "_toolchains": attrs.list(attrs.dep(), default = ["//:toolchain"]), + }, +) diff --git a/tests/core/build/test_uncategorized_data/args/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/args/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/artifact_consistency/.buckconfig b/tests/core/build/test_uncategorized_data/artifact_consistency/.buckconfig new file mode 100644 index 0000000000000..2d982316051eb --- /dev/null +++ b/tests/core/build/test_uncategorized_data/artifact_consistency/.buckconfig @@ -0,0 +1,8 @@ +[repositories] +root = . +prelude = prelude +[buck2] +allow_eden_io = false +dice = modern +[buildfile] +name = TARGETS.fixture diff --git a/tests/core/build/test_uncategorized_data/artifact_consistency/.buckroot b/tests/core/build/test_uncategorized_data/artifact_consistency/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/artifact_consistency/TARGETS.fixture b/tests/core/build/test_uncategorized_data/artifact_consistency/TARGETS.fixture new file mode 100644 index 0000000000000..b4671cfb6222f --- /dev/null +++ b/tests/core/build/test_uncategorized_data/artifact_consistency/TARGETS.fixture @@ -0,0 +1,6 @@ +gen_files( + name = "gen", + script = "gen.py", + files = ["file1", "file2", "file3"], + action_index = int(read_config("gen", "idx", 0)), +) diff --git a/tests/core/build/test_uncategorized_data/artifact_consistency/gen.py b/tests/core/build/test_uncategorized_data/artifact_consistency/gen.py new file mode 100644 index 0000000000000..7193a67e64b8c --- /dev/null +++ b/tests/core/build/test_uncategorized_data/artifact_consistency/gen.py @@ -0,0 +1,15 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import sys +from pathlib import Path + +out = Path(sys.argv[1]) +Path.mkdir(out) +for f in sys.argv[2:]: + Path(out / f).write_text("This is " + f) diff --git a/tests/core/build/test_uncategorized_data/artifact_consistency/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/artifact_consistency/prelude/prelude.bzl new file mode 100644 index 0000000000000..c4381867bfacd --- /dev/null +++ b/tests/core/build/test_uncategorized_data/artifact_consistency/prelude/prelude.bzl @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _gen_files(ctx): + for i in range(0, ctx.attrs.action_index): + ctx.actions.write("action-{}".format(i), "nonsense data") + + output = ctx.actions.declare_output("out_dir", dir = True) + subouts = [] + subtargets = {} + for f in ctx.attrs.files: + out = output.project(f) + subouts.append(out.as_output()) + subtargets[f] = [DefaultInfo(default_outputs = [out])] + args = cmd_args( + ["python3", ctx.attrs.script, output.as_output()] + ctx.attrs.files, + hidden = subouts, + ) + ctx.actions.run(args, category = "gen") + return [DefaultInfo(sub_targets = subtargets)] + +gen_files = rule( + impl = _gen_files, + attrs = { + "action_index": attrs.int(), + "files": attrs.list(attrs.string()), + "script": attrs.source(), + }, +) diff --git a/tests/core/build/test_uncategorized_data/buckroot/.buckconfig b/tests/core/build/test_uncategorized_data/buckroot/.buckconfig new file mode 100644 index 0000000000000..0c1083b55c5a0 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/buckroot/.buckconfig @@ -0,0 +1 @@ +[ BROKEN diff --git a/tests/core/build/test_uncategorized_data/buckroot/rooted/.buckconfig b/tests/core/build/test_uncategorized_data/buckroot/rooted/.buckconfig new file mode 100644 index 0000000000000..a29bb4dd9273d --- /dev/null +++ b/tests/core/build/test_uncategorized_data/buckroot/rooted/.buckconfig @@ -0,0 +1,7 @@ +[repositories] +cell = cell +root = . +prelude = cell + +[buildfile] +name = OUTER diff --git a/tests/core/build/test_uncategorized_data/buckroot/rooted/.buckroot b/tests/core/build/test_uncategorized_data/buckroot/rooted/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/buckroot/rooted/OUTER b/tests/core/build/test_uncategorized_data/buckroot/rooted/OUTER new file mode 100644 index 0000000000000..4013a0355b217 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/buckroot/rooted/OUTER @@ -0,0 +1,5 @@ + +target( + name = "outer", + visibility = ["PUBLIC"], +) diff --git a/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/.buckconfig b/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/.buckconfig new file mode 100644 index 0000000000000..478b62bc6ac3d --- /dev/null +++ b/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/.buckconfig @@ -0,0 +1,7 @@ +[repositories] +cell = . +root = .. +prelude = . + +[buildfile] +name = INNER diff --git a/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/INNER b/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/INNER new file mode 100644 index 0000000000000..9cac1008fa905 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/INNER @@ -0,0 +1,5 @@ + +target( + name = "inner", + deps = ["root//:outer"], +) diff --git a/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/prelude.bzl b/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/prelude.bzl new file mode 100644 index 0000000000000..6dee9b4790dab --- /dev/null +++ b/tests/core/build/test_uncategorized_data/buckroot/rooted/cell/prelude.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +target = rule(impl = _impl, attrs = {"deps": attrs.list(attrs.dep(), default = [])}) diff --git a/tests/core/build/test_uncategorized_data/build_providers/.buckconfig b/tests/core/build/test_uncategorized_data/build_providers/.buckconfig new file mode 100644 index 0000000000000..cb37c990c90cb --- /dev/null +++ b/tests/core/build/test_uncategorized_data/build_providers/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/build_providers/TARGETS.fixture b/tests/core/build/test_uncategorized_data/build_providers/TARGETS.fixture new file mode 100644 index 0000000000000..bf632f4009eca --- /dev/null +++ b/tests/core/build/test_uncategorized_data/build_providers/TARGETS.fixture @@ -0,0 +1 @@ +simple_test(name = "target") diff --git a/tests/core/build/test_uncategorized_data/build_providers/build b/tests/core/build/test_uncategorized_data/build_providers/build new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/build_providers/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/build_providers/prelude/prelude.bzl new file mode 100644 index 0000000000000..2ee7f0fdd7b40 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/build_providers/prelude/prelude.bzl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _simple_test_impl(ctx): + return [ + DefaultInfo(default_output = ctx.actions.write("build", "")), + RunInfo(args = cmd_args([ + ctx.actions.write("run", ""), + ])), + ExternalRunnerTestInfo( + type = "custom", + command = [cmd_args([ + ctx.actions.write("test", ""), + ])], + env = {}, + labels = [], + contacts = [], + ), + ] + +simple_test = rule( + impl = _simple_test_impl, + attrs = {}, +) diff --git a/tests/core/build/test_uncategorized_data/build_providers/run b/tests/core/build/test_uncategorized_data/build_providers/run new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/build_providers/test b/tests/core/build/test_uncategorized_data/build_providers/test new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/cell_delete/.buckconfig b/tests/core/build/test_uncategorized_data/cell_delete/.buckconfig new file mode 100644 index 0000000000000..98302783f13f6 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/cell_delete/.buckconfig @@ -0,0 +1,7 @@ +[repositories] +hello = hello +root = . +prelude = prelude + +[buildfile] +name=TARGETS.fixture diff --git a/tests/core/build/test_uncategorized_data/cell_delete/TARGETS.fixture b/tests/core/build/test_uncategorized_data/cell_delete/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/cell_delete/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/cell_delete/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/cleanup/.buckconfig b/tests/core/build/test_uncategorized_data/cleanup/.buckconfig new file mode 100644 index 0000000000000..cb37c990c90cb --- /dev/null +++ b/tests/core/build/test_uncategorized_data/cleanup/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/cleanup/TARGETS.fixture b/tests/core/build/test_uncategorized_data/cleanup/TARGETS.fixture new file mode 100644 index 0000000000000..82949477a51aa --- /dev/null +++ b/tests/core/build/test_uncategorized_data/cleanup/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "test_cleanup") + +test_cleanup(name = "cleanup") diff --git a/tests/core/build/test_uncategorized_data/cleanup/defs.bzl b/tests/core/build/test_uncategorized_data/cleanup/defs.bzl new file mode 100644 index 0000000000000..466af8ada44ee --- /dev/null +++ b/tests/core/build/test_uncategorized_data/cleanup/defs.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_cleanup_impl(ctx): + out = ctx.actions.write("path/to/output", "") + return [DefaultInfo(out)] + +test_cleanup = rule(impl = _test_cleanup_impl, attrs = {}) diff --git a/tests/core/build/test_uncategorized_data/cleanup/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/cleanup/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/concurrency/.buckconfig b/tests/core/build/test_uncategorized_data/concurrency/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/build/test_uncategorized_data/concurrency/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/concurrency/TARGETS.fixture b/tests/core/build/test_uncategorized_data/concurrency/TARGETS.fixture new file mode 100644 index 0000000000000..4d22aae1bbc0b --- /dev/null +++ b/tests/core/build/test_uncategorized_data/concurrency/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "test") + +test(name = "weight") diff --git a/tests/core/build/test_uncategorized_data/concurrency/defs.bzl b/tests/core/build/test_uncategorized_data/concurrency/defs.bzl new file mode 100644 index 0000000000000..53d71dcffd869 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/concurrency/defs.bzl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx: AnalysisContext) -> list[Provider]: + outs = {} + for i in range(10): + o = ctx.actions.declare_output("out/{}".format(i)) + ctx.actions.run( + ["python3", "-c", "import time, sys; time.sleep(2); open(sys.argv[1],'w')", o.as_output()], + category = "test", + identifier = str(i), + weight_percentage = 20, + ) + outs[str(i)] = o + + out = ctx.actions.symlinked_dir("outs", outs) + + return [DefaultInfo(out)] + +test = rule(attrs = {}, impl = _test_impl) diff --git a/tests/core/build/test_uncategorized_data/concurrency/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/concurrency/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/fail_fast/.buckconfig b/tests/core/build/test_uncategorized_data/fail_fast/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/build/test_uncategorized_data/fail_fast/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/fail_fast/TARGETS.fixture b/tests/core/build/test_uncategorized_data/fail_fast/TARGETS.fixture new file mode 100644 index 0000000000000..f246328e47195 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/fail_fast/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":defs.bzl", "mixed", "slow") + +slow(name = "slow") +mixed(name = "mixed") diff --git a/tests/core/build/test_uncategorized_data/fail_fast/defs.bzl b/tests/core/build/test_uncategorized_data/fail_fast/defs.bzl new file mode 100644 index 0000000000000..0a8ce433b630b --- /dev/null +++ b/tests/core/build/test_uncategorized_data/fail_fast/defs.bzl @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _slow(ctx): + slow = ctx.actions.declare_output("slow") + + ctx.actions.run( + ["python3", "-c", "import time, sys; time.sleep(10); sys.exit(1)", slow.as_output()], + category = "slow_default_output", + ) + + return [DefaultInfo(slow)] + +def _mixed(ctx): + fast = ctx.actions.declare_output("fast") + slow = ctx.actions.declare_output("slow") + + ctx.actions.run( + ["python3", "-c", "import sys; sys.exit(1)", fast.as_output()], + category = "fast_default_output", + ) + + ctx.actions.run( + ["python3", "-c", "import time, sys; time.sleep(10); sys.exit(1)", slow.as_output()], + category = "slow_other_output", + ) + + return [DefaultInfo(default_output = fast, other_outputs = [slow])] + +slow = rule(impl = _slow, attrs = {}) +mixed = rule(impl = _mixed, attrs = {}) diff --git a/tests/core/build/test_uncategorized_data/fail_fast/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/fail_fast/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/invalid_file_invalidation/.buckconfig b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/.buckconfig new file mode 100644 index 0000000000000..cb37c990c90cb --- /dev/null +++ b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/invalid_file_invalidation/TARGETS.fixture b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/TARGETS.fixture new file mode 100644 index 0000000000000..42d58c778f5ec --- /dev/null +++ b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/TARGETS.fixture @@ -0,0 +1,6 @@ +load( + ":defs.bzl", + "defs", +) + +defs() diff --git a/tests/core/build/test_uncategorized_data/invalid_file_invalidation/defs.bzl b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/defs.bzl new file mode 100644 index 0000000000000..7c92b5cdd54a2 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/defs.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx): + out = ctx.actions.symlinked_dir("out", {"in": ctx.attrs.src}) + return [DefaultInfo(default_output = out)] + +test = rule( + impl = _test_impl, + attrs = {"src": attrs.source(allow_directory = True)}, +) + +def defs(): + test(name = "root", src = "src") diff --git a/tests/core/build/test_uncategorized_data/invalid_file_invalidation/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/invalid_file_invalidation/src/a b/tests/core/build/test_uncategorized_data/invalid_file_invalidation/src/a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/keep_going_build/.buckconfig b/tests/core/build/test_uncategorized_data/keep_going_build/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/build/test_uncategorized_data/keep_going_build/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/keep_going_build/TARGETS.fixture b/tests/core/build/test_uncategorized_data/keep_going_build/TARGETS.fixture new file mode 100644 index 0000000000000..8e416d93d495d --- /dev/null +++ b/tests/core/build/test_uncategorized_data/keep_going_build/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "top") + +top(name = "top") diff --git a/tests/core/build/test_uncategorized_data/keep_going_build/defs.bzl b/tests/core/build/test_uncategorized_data/keep_going_build/defs.bzl new file mode 100644 index 0000000000000..649219a0aea07 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/keep_going_build/defs.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _top(ctx): + fast = ctx.actions.declare_output("fast") + slow = ctx.actions.declare_output("slow") + out = ctx.actions.declare_output("out") + + ctx.actions.run( + ["python3", "-c", "import sys; sys.exit(1)", fast.as_output()], + category = "fast_action", + ) + + ctx.actions.run( + ["python3", "-c", "import time, sys; time.sleep(10); sys.exit(1)", slow.as_output()], + category = "slow_action", + ) + + # Won't actually run because the others will fail + ctx.actions.run( + ["bugbugbug", fast, slow, out.as_output()], + category = "noop", + ) + + return [DefaultInfo(default_output = out)] + +top = rule(impl = _top, attrs = {}) diff --git a/tests/core/build/test_uncategorized_data/keep_going_build/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/keep_going_build/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/log_action_keys/.buckconfig b/tests/core/build/test_uncategorized_data/log_action_keys/.buckconfig new file mode 100644 index 0000000000000..b2a50a06c92c8 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/log_action_keys/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/core/build/test_uncategorized_data/log_action_keys/TARGETS.fixture b/tests/core/build/test_uncategorized_data/log_action_keys/TARGETS.fixture new file mode 100644 index 0000000000000..a0624200c9ee9 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/log_action_keys/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "test") + +test(name = "test", seed = read_config("test", "seed")) diff --git a/tests/core/build/test_uncategorized_data/log_action_keys/defs.bzl b/tests/core/build/test_uncategorized_data/log_action_keys/defs.bzl new file mode 100644 index 0000000000000..8f21e8308432d --- /dev/null +++ b/tests/core/build/test_uncategorized_data/log_action_keys/defs.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx): + out = ctx.actions.declare_output("file") + ctx.actions.run( + ["touch", out.as_output()], + category = "touch", + env = {"seed": ctx.attrs.seed}, + ) + return [DefaultInfo(out)] + +test = rule(attrs = {"seed": attrs.string()}, impl = _test_impl) diff --git a/tests/core/build/test_uncategorized_data/log_action_keys/platforms/TARGETS.fixture b/tests/core/build/test_uncategorized_data/log_action_keys/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..80533d33c2a4b --- /dev/null +++ b/tests/core/build/test_uncategorized_data/log_action_keys/platforms/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "execution_platforms") + +execution_platforms( + name = "platforms", +) diff --git a/tests/core/build/test_uncategorized_data/log_action_keys/platforms/defs.bzl b/tests/core/build/test_uncategorized_data/log_action_keys/platforms/defs.bzl new file mode 100644 index 0000000000000..76070567fe0ff --- /dev/null +++ b/tests/core/build/test_uncategorized_data/log_action_keys/platforms/defs.bzl @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ), + executor_config = CommandExecutorConfig( + local_enabled = False, + remote_enabled = True, + remote_cache_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + remote_execution_action_key = "executor", + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = {}, impl = _execution_platform) diff --git a/tests/core/build/test_uncategorized_data/log_action_keys/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/log_action_keys/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/.buckconfig b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/.buckconfig new file mode 100644 index 0000000000000..b2a50a06c92c8 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/TARGETS.fixture b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/TARGETS.fixture new file mode 100644 index 0000000000000..48b31e926bceb --- /dev/null +++ b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "action_fail", "dep") + +dep(name = "dep", script = "remote_only.py") + +action_fail(name = "action_fail", dep = ":dep") diff --git a/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/defs.bzl b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/defs.bzl new file mode 100644 index 0000000000000..9f66bbdcbb7d4 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/defs.bzl @@ -0,0 +1,51 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _dep_impl(ctx): + out = ctx.actions.declare_output("dep") + ctx.actions.run( + [ + "python3", + ctx.attrs.script, + out.as_output(), + ], + env = {"cache_buster": ctx.attrs.cache_buster}, + category = "test", + ) + return [DefaultInfo(default_output = out)] + +dep = rule( + impl = _dep_impl, + attrs = { + "cache_buster": attrs.string(default = read_config("test", "cache_buster", "")), + "script": attrs.source(), + }, +) + +def _action_fail(ctx): + dep = ctx.attrs.dep[DefaultInfo].default_outputs[0] + out = ctx.actions.declare_output("failed_action") + ctx.actions.run( + cmd_args( + "python3", + "-c", + "import sys; sys.exit(1)", + out.as_output(), + hidden = dep, + ), + env = {"cache_buster": ctx.attrs.cache_buster}, + category = "test", + ) + return [DefaultInfo(default_outputs = [out])] + +action_fail = rule( + impl = _action_fail, + attrs = { + "cache_buster": attrs.string(default = read_config("test", "cache_buster", "")), + "dep": attrs.dep(), + }, +) diff --git a/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/platforms/TARGETS.fixture b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..80533d33c2a4b --- /dev/null +++ b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/platforms/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "execution_platforms") + +execution_platforms( + name = "platforms", +) diff --git a/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/platforms/defs.bzl b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/platforms/defs.bzl new file mode 100644 index 0000000000000..c51233d90aa0c --- /dev/null +++ b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/platforms/defs.bzl @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ), + executor_config = CommandExecutorConfig( + local_enabled = False, + remote_enabled = True, + remote_cache_enabled = False, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + remote_execution_action_key = "executor", + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = {}, impl = _execution_platform) diff --git a/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/remote_only.py b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/remote_only.py new file mode 100644 index 0000000000000..dec207524c059 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/materialize_inputs_for_failed_actions/remote_only.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import pathlib +import sys + +re_worker_path = "/run/re_worker/beacon" +if not pathlib.Path(re_worker_path).exists(): + print("This only runs on RE", file=sys.stderr) + sys.exit(1) + +with open(sys.argv[1], "w") as f: + f.write("yay!") diff --git a/tests/core/build/test_uncategorized_data/prelude_import/.buckconfig b/tests/core/build/test_uncategorized_data/prelude_import/.buckconfig new file mode 100644 index 0000000000000..1fd05961cda7e --- /dev/null +++ b/tests/core/build/test_uncategorized_data/prelude_import/.buckconfig @@ -0,0 +1,11 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude +cell1 = cell1 +cell2 = cell2 diff --git a/tests/core/build/test_uncategorized_data/prelude_import/cell1/.buckconfig b/tests/core/build/test_uncategorized_data/prelude_import/cell1/.buckconfig new file mode 100644 index 0000000000000..4de800be90d4e --- /dev/null +++ b/tests/core/build/test_uncategorized_data/prelude_import/cell1/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[test] +config = cell1 diff --git a/tests/core/build/test_uncategorized_data/prelude_import/cell1/TARGETS.fixture b/tests/core/build/test_uncategorized_data/prelude_import/cell1/TARGETS.fixture new file mode 100644 index 0000000000000..32fd0b1d9c56c --- /dev/null +++ b/tests/core/build/test_uncategorized_data/prelude_import/cell1/TARGETS.fixture @@ -0,0 +1,3 @@ +# This calls check_config from the prelude, so this will have the function +# exported by the prelude, which will use the prelude cell's config. +check_config(name = "cell1") diff --git a/tests/core/build/test_uncategorized_data/prelude_import/cell2/.buckconfig b/tests/core/build/test_uncategorized_data/prelude_import/cell2/.buckconfig new file mode 100644 index 0000000000000..c9f8966c500d0 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/prelude_import/cell2/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[test] +config = cell2 diff --git a/tests/core/build/test_uncategorized_data/prelude_import/cell2/TARGETS.fixture b/tests/core/build/test_uncategorized_data/prelude_import/cell2/TARGETS.fixture new file mode 100644 index 0000000000000..d3fa11fdb2b10 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/prelude_import/cell2/TARGETS.fixture @@ -0,0 +1,8 @@ +load("@prelude//:prelude.bzl", "check_config") + +# This does not use check_config from the prelude, and instead loads it +# explicitly. If we let the config of this build file be used when loading +# imports from the prelude, we'll load it with the "wrong" config, +# which is our test case. + +check_config(name = "cell2") diff --git a/tests/core/build/test_uncategorized_data/prelude_import/prelude/.buckconfig b/tests/core/build/test_uncategorized_data/prelude_import/prelude/.buckconfig new file mode 100644 index 0000000000000..d6ca04d3ce312 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/prelude_import/prelude/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[test] +config = prelude diff --git a/tests/core/build/test_uncategorized_data/prelude_import/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/prelude_import/prelude/prelude.bzl new file mode 100644 index 0000000000000..197febcad4b81 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/prelude_import/prelude/prelude.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +config = read_config("test", "config") + +def _check_config_impl(_ctx): + # This checks that the config we read is the one that was read in the + # prelude cell (our other cells have different configs). + if config != "prelude": + fail("Unexpected config!") + return [DefaultInfo()] + +check_config = rule(attrs = {}, impl = _check_config_impl) diff --git a/tests/core/build/test_uncategorized_data/projected_artifacts/.buckconfig b/tests/core/build/test_uncategorized_data/projected_artifacts/.buckconfig new file mode 100644 index 0000000000000..86593e627bfda --- /dev/null +++ b/tests/core/build/test_uncategorized_data/projected_artifacts/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] + materializations = deferred diff --git a/tests/core/build/test_uncategorized_data/projected_artifacts/TARGETS.fixture b/tests/core/build/test_uncategorized_data/projected_artifacts/TARGETS.fixture new file mode 100644 index 0000000000000..16f79f18b4bf5 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/projected_artifacts/TARGETS.fixture @@ -0,0 +1,57 @@ +declare_sub_targets( + name = "fixture", + sub_targets = ["a", "b", "b/b", "c", "c/b"], + command = "commands/gen_fixture.py", +) + +exists( + name = "check_b", + command = "commands/exists.py", + paths = ["$(location :fixture[b])/b"], +) + +exists( + name = "check_b_local", + command = "commands/exists.py", + paths = ["$(location :fixture[b])/b"], + local = True, +) + +exists( + name = "check_b_b", + command = "commands/exists.py", + paths = ["$(location :fixture[b/b])"], +) + +exists( + name = "check_b_b_local", + command = "commands/exists.py", + paths = ["$(location :fixture[b/b])"], + local = True, +) + +exists( + name = "check_c", + command = "commands/exists.py", + paths = ["$(location :fixture[c])/b", "$(location :fixture[c])/b"], +) + +exists( + name = "check_c_local", + command = "commands/exists.py", + paths = ["$(location :fixture[c])/b", "$(location :fixture[c])/b"], + local = True, +) + +exists( + name = "check_c_b", + command = "commands/exists.py", + paths = ["$(location :fixture[c/b])"], +) + +exists( + name = "check_c_b_local", + command = "commands/exists.py", + paths = ["$(location :fixture[c/b])"], + local = True, +) diff --git a/tests/core/build/test_uncategorized_data/projected_artifacts/commands/exists.py b/tests/core/build/test_uncategorized_data/projected_artifacts/commands/exists.py new file mode 100644 index 0000000000000..27b963e20e723 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/projected_artifacts/commands/exists.py @@ -0,0 +1,21 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import sys +from pathlib import Path + +out = sys.argv[1] + +for i in sys.argv[2:]: + print("check {}".format(i), file=sys.stderr) + if not os.path.exists(i): + exit(1) + + +Path(out).touch() diff --git a/tests/core/build/test_uncategorized_data/projected_artifacts/commands/gen_fixture.py b/tests/core/build/test_uncategorized_data/projected_artifacts/commands/gen_fixture.py new file mode 100644 index 0000000000000..14ac608be8cc5 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/projected_artifacts/commands/gen_fixture.py @@ -0,0 +1,24 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import sys +from pathlib import Path + +out = Path(sys.argv[1]) +Path.mkdir(out) + +Path(out / "a").touch() + +Path.mkdir(out / "b") +Path(out / "b" / "b").write_text("This is b") + +Path.mkdir(out / "c") +Path(out / "c" / "c").write_text("This is c") + +os.symlink(Path("..") / "b" / "b", out / "c" / "b") diff --git a/tests/core/build/test_uncategorized_data/projected_artifacts/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/projected_artifacts/prelude/prelude.bzl new file mode 100644 index 0000000000000..55105f3a72dcd --- /dev/null +++ b/tests/core/build/test_uncategorized_data/projected_artifacts/prelude/prelude.bzl @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _declare_sub_targets(ctx: AnalysisContext) -> list[Provider]: + out_dir = ctx.actions.declare_output("out_dir", dir = True) + sub_targets = { + name: [DefaultInfo(default_output = out_dir.project(name))] + for name in ctx.attrs.sub_targets + } + ctx.actions.run(["python3", ctx.attrs.command, out_dir.as_output()], category = "mkdirs") + return [DefaultInfo(default_output = out_dir, sub_targets = sub_targets)] + +declare_sub_targets = rule( + impl = _declare_sub_targets, + attrs = {"command": attrs.source(), "sub_targets": attrs.list(attrs.string())}, +) + +def _exists(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.declare_output("check") + ctx.actions.run( + ["python3", ctx.attrs.command, out.as_output(), ctx.attrs.paths], + category = "check", + local_only = ctx.attrs.local, + ) + return [DefaultInfo(default_output = out)] + +exists = rule( + impl = _exists, + attrs = {"command": attrs.source(), "local": attrs.bool(default = False), "paths": attrs.list(attrs.arg())}, +) diff --git a/tests/core/build/test_uncategorized_data/roots/.buckconfig b/tests/core/build/test_uncategorized_data/roots/.buckconfig new file mode 100644 index 0000000000000..a59c6036e8210 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/roots/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +other = other + +[repository_aliases] + prelude = root diff --git a/tests/core/build/test_uncategorized_data/roots/TARGETS.fixture b/tests/core/build/test_uncategorized_data/roots/TARGETS.fixture new file mode 100644 index 0000000000000..2671e2ab41071 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/roots/TARGETS.fixture @@ -0,0 +1 @@ +test(name = "test") diff --git a/tests/core/build/test_uncategorized_data/roots/defs.bzl b/tests/core/build/test_uncategorized_data/roots/defs.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/roots/other/.buckconfig b/tests/core/build/test_uncategorized_data/roots/other/.buckconfig new file mode 100644 index 0000000000000..5c40fffb821ac --- /dev/null +++ b/tests/core/build/test_uncategorized_data/roots/other/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] +name=TARGETS.fixture diff --git a/tests/core/build/test_uncategorized_data/roots/other/TARGETS.fixture b/tests/core/build/test_uncategorized_data/roots/other/TARGETS.fixture new file mode 100644 index 0000000000000..2671e2ab41071 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/roots/other/TARGETS.fixture @@ -0,0 +1 @@ +test(name = "test") diff --git a/tests/core/build/test_uncategorized_data/roots/prelude.bzl b/tests/core/build/test_uncategorized_data/roots/prelude.bzl new file mode 100644 index 0000000000000..ab529592d01e9 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/roots/prelude.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test(ctx: AnalysisContext): + fixture = ctx.actions.write("fixture", "") + + out = ctx.actions.write_json("out.json", { + "cell_relative_to_fixture": cmd_args(ctx.label.cell_root, delimiter = "", relative_to = fixture), + "fixture_relative_to_cell": cmd_args(fixture, delimiter = "", relative_to = ctx.label.cell_root), + "fixture_relative_to_project": cmd_args(fixture, delimiter = "", relative_to = ctx.label.project_root), + "project_relative_to_fixture": cmd_args(ctx.label.project_root, delimiter = "", relative_to = fixture), + }) + + return [DefaultInfo(out, other_outputs = [fixture])] + +test = rule(impl = _test, attrs = {}) diff --git a/tests/core/build/test_uncategorized_data/tmpdir/.buckconfig b/tests/core/build/test_uncategorized_data/tmpdir/.buckconfig new file mode 100644 index 0000000000000..1df397532e03e --- /dev/null +++ b/tests/core/build/test_uncategorized_data/tmpdir/.buckconfig @@ -0,0 +1,11 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . + +[repository_aliases] + prelude = root diff --git a/tests/core/build/test_uncategorized_data/tmpdir/.buckroot b/tests/core/build/test_uncategorized_data/tmpdir/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/tmpdir/TARGETS.fixture b/tests/core/build/test_uncategorized_data/tmpdir/TARGETS.fixture new file mode 100644 index 0000000000000..d03be96476689 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/tmpdir/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "test") + +test(name = "test", script = "test.py") diff --git a/tests/core/build/test_uncategorized_data/tmpdir/defs.bzl b/tests/core/build/test_uncategorized_data/tmpdir/defs.bzl new file mode 100644 index 0000000000000..8c708defba002 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/tmpdir/defs.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test(ctx: AnalysisContext): + local = ctx.actions.declare_output("local") + remote = ctx.actions.declare_output("remote") + + ctx.actions.run( + [ + "python3", + ctx.attrs.script, + local.as_output(), + "local", + ], + category = "check", + identifier = "local", + prefer_local = True, + ) + + ctx.actions.run( + [ + "python3", + ctx.attrs.script, + remote.as_output(), + "remote", + ], + category = "check", + identifier = "remote", + prefer_remote = True, + ) + + return [DefaultInfo(other_outputs = [local, remote])] + +test = rule(attrs = {"script": attrs.source()}, impl = _test) diff --git a/tests/core/build/test_uncategorized_data/tmpdir/prelude.bzl b/tests/core/build/test_uncategorized_data/tmpdir/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_uncategorized_data/tmpdir/test.py b/tests/core/build/test_uncategorized_data/tmpdir/test.py new file mode 100644 index 0000000000000..20f08ea9d2a76 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/tmpdir/test.py @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +import platform +import sys + + +def main(): + (out, location) = sys.argv[1:] + + if platform.system() == "Windows": + check = ["TEMP", "TMP"] + buck_out = "buck-out\\v2" + else: + check = ["TMPDIR"] + buck_out = "buck-out/v2" + + scratch = os.environ["BUCK_SCRATCH_PATH"] + assert not os.path.isabs(scratch), scratch + assert buck_out in scratch, scratch + assert os.path.isdir(scratch), scratch + + for c in check: + v = os.environ[c] + assert os.path.isabs(v), v + + if location == "local": + # Check the path is "ours" + assert buck_out in v, v + # Check the path is the same as BUCK_SCRATCH_PATH + rel = os.path.relpath(os.path.normpath(v)) + assert rel == scratch, rel + elif location == "remote": + pass + else: + raise Exception("invalid location: %s" % location) + + with open(out, "w"): + pass + + +if __name__ == "__main__": + main() diff --git a/tests/core/build/test_uncategorized_data/upload_all_actions/.buckconfig b/tests/core/build/test_uncategorized_data/upload_all_actions/.buckconfig new file mode 100644 index 0000000000000..7ef37da18491c --- /dev/null +++ b/tests/core/build/test_uncategorized_data/upload_all_actions/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +self = . +prelude = prelude diff --git a/tests/core/build/test_uncategorized_data/upload_all_actions/TARGETS.fixture b/tests/core/build/test_uncategorized_data/upload_all_actions/TARGETS.fixture new file mode 100644 index 0000000000000..4623a109912a2 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/upload_all_actions/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "cp") + +cp(name = "cp", src = "src") diff --git a/tests/core/build/test_uncategorized_data/upload_all_actions/defs.bzl b/tests/core/build/test_uncategorized_data/upload_all_actions/defs.bzl new file mode 100644 index 0000000000000..0489cdc7e4898 --- /dev/null +++ b/tests/core/build/test_uncategorized_data/upload_all_actions/defs.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _cp_impl(ctx: AnalysisContext): + out = ctx.actions.declare_output("out") + ctx.actions.run(["python3", "-c", "import shutil, sys; from pathlib import Path; shutil.copyfile(Path(sys.argv[1]), Path(sys.argv[2]))", ctx.attrs.src, out.as_output()], category = "cp", local_only = True) + + return [ + DefaultInfo(default_output = out), + ] + +cp = rule( + impl = _cp_impl, + attrs = {"src": attrs.source()}, +) diff --git a/tests/core/build/test_uncategorized_data/upload_all_actions/prelude/prelude.bzl b/tests/core/build/test_uncategorized_data/upload_all_actions/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_universe.py b/tests/core/build/test_universe.py new file mode 100644 index 0000000000000..945bc790bfc06 --- /dev/null +++ b/tests/core/build/test_universe.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_build_universe(buck: Buck) -> None: + # Run the build without universe. + result = await buck.build("//:test") + build_report = result.get_build_report() + output = build_report.output_for_target("//:test") + assert output.read_text().rstrip() == "default" + + # Now build the same target, but with the universe. + result = await buck.build( + "//:test", + "--target-universe", + "//:universe", + ) + build_report = result.get_build_report() + output = build_report.output_for_target("//:test") + assert output.read_text().rstrip() == "cat" diff --git a/tests/core/build/test_universe_data/.buckconfig b/tests/core/build/test_universe_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/build/test_universe_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/build/test_universe_data/.buckroot b/tests/core/build/test_universe_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/build/test_universe_data/TARGETS.fixture b/tests/core/build/test_universe_data/TARGETS.fixture new file mode 100644 index 0000000000000..2a71f11fc039b --- /dev/null +++ b/tests/core/build/test_universe_data/TARGETS.fixture @@ -0,0 +1,37 @@ +load(":write.bzl", "write") + +write( + name = "test", + data = select({ + ":cat": "cat", + "DEFAULT": "default", + }), + default_target_platform = ":p_default", +) + +stub( + name = "universe", + deps = [":test"], + default_target_platform = ":p_cat", +) + +constraint_setting( + name = "animal", +) + +constraint_value( + name = "cat", + constraint_setting = ":animal", +) + +platform( + name = "p_cat", + constraint_values = [ + ":cat", + ], +) + +platform( + name = "p_default", + constraint_values = [], +) diff --git a/tests/core/build/test_universe_data/write.bzl b/tests/core/build/test_universe_data/write.bzl new file mode 100644 index 0000000000000..548818261895e --- /dev/null +++ b/tests/core/build/test_universe_data/write.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + return [DefaultInfo(default_output = ctx.actions.write("out.txt", ctx.attrs.data))] + +write = rule( + impl = _impl, + attrs = { + "data": attrs.string(), + }, +) diff --git a/tests/core/bxl/BUCK b/tests/core/bxl/BUCK new file mode 100644 index 0000000000000..35e2fb3aff5a0 --- /dev/null +++ b/tests/core/bxl/BUCK @@ -0,0 +1,64 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_output", + srcs = ["test_output.py"], + data_dir = "test_output_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_dynamic", + srcs = ["test_dynamic.py"], + data_dir = "test_dynamic_data", + deps = [ + "//buck2/tests/e2e_util:golden", + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_typecheck", + srcs = ["test_typecheck.py"], + data_dir = "test_typecheck_data", +) + +buck2_e2e_test( + name = "test_not_bxl", + srcs = ["test_not_bxl.py"], + data_dir = "test_not_bxl_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_target_universe", + srcs = ["test_target_universe.py"], + data_dir = "test_target_universe_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_configured_target", + srcs = ["test_configured_target.py"], + data_dir = "test_configured_target_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_type_names_and_symbols", + srcs = ["test_type_names_and_symbols.py"], + data_dir = "test_type_names_and_symbols_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/core/bxl/test_configured_target.py b/tests/core/bxl/test_configured_target.py new file mode 100644 index 0000000000000..fc2e48bfae037 --- /dev/null +++ b/tests/core/bxl/test_configured_target.py @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(data_dir="") +async def test_unwrap_forward(buck: Buck) -> None: + await buck.bxl("//bxl/configured_target.bxl:unwrap_forward") + + +@buck_test(data_dir="") +async def test_configured_targets_with_modifiers(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/configured_target.bxl:configured_targets_with_modifiers" + ) + configurations = [line.strip() for line in result.stdout.splitlines()] + linux_cfg = await buck.audit_configurations(configurations[0]) + assert "root//:linux" in linux_cfg.stdout + macos_cfg = await buck.audit_configurations(configurations[1]) + assert "root//:macos" in macos_cfg.stdout diff --git a/tests/core/bxl/test_configured_target_data/.buckconfig b/tests/core/bxl/test_configured_target_data/.buckconfig new file mode 100644 index 0000000000000..0ba88b3812ead --- /dev/null +++ b/tests/core/bxl/test_configured_target_data/.buckconfig @@ -0,0 +1,17 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude + +[cell_aliases] +fbsource = root +fbcode = root +buck = root +config = prelude +ovr_config = prelude +toolchains = prelude + +[external_cells] +prelude = bundled diff --git a/tests/core/bxl/test_configured_target_data/.buckroot b/tests/core/bxl/test_configured_target_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_configured_target_data/TARGETS.fixture b/tests/core/bxl/test_configured_target_data/TARGETS.fixture new file mode 100644 index 0000000000000..996ccf61791c8 --- /dev/null +++ b/tests/core/bxl/test_configured_target_data/TARGETS.fixture @@ -0,0 +1,20 @@ +load(":defs.bzl", "dummy") + +constraint_setting( + name = "os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "linux", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +constraint_value( + name = "macos", + constraint_setting = ":os", + visibility = ["PUBLIC"], +) + +dummy(name = "dummy") diff --git a/tests/core/bxl/test_configured_target_data/bxl/configured_target.bxl b/tests/core/bxl/test_configured_target_data/bxl/configured_target.bxl new file mode 100644 index 0000000000000..429d903fa4fb7 --- /dev/null +++ b/tests/core/bxl/test_configured_target_data/bxl/configured_target.bxl @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _assert_not_eq(a, b): + if a == b: + fail("Expected {} != {}".format(a, b)) + +def _unwrap_forward_impl(ctx): + node = ctx.configured_targets("//transition:rainbow") + _assert_eq(node.rule_type, "forward") + actual_node = node.unwrap_forward() + _assert_not_eq(actual_node.rule_type, "forward") + +unwrap_forward = bxl_main( + impl = _unwrap_forward_impl, + cli_args = {}, +) + +def _configured_targets_with_modifiers_impl(ctx): + node = ctx.configured_targets("root//:dummy", modifiers = ["root//:linux"]) + ctx.output.print(node.label.config()) + node = ctx.configured_targets("root//:dummy", modifiers = ["root//:macos"]) + ctx.output.print(node.label.config()) + +configured_targets_with_modifiers = bxl_main( + impl = _configured_targets_with_modifiers_impl, + cli_args = {}, +) diff --git a/tests/core/bxl/test_configured_target_data/defs.bzl b/tests/core/bxl/test_configured_target_data/defs.bzl new file mode 100644 index 0000000000000..2065a03dc9cf2 --- /dev/null +++ b/tests/core/bxl/test_configured_target_data/defs.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo()] + +dummy = rule( + attrs = {}, + impl = _impl, +) diff --git a/tests/core/bxl/test_configured_target_data/transition/TARGETS.fixture b/tests/core/bxl/test_configured_target_data/transition/TARGETS.fixture new file mode 100644 index 0000000000000..b3b315c791533 --- /dev/null +++ b/tests/core/bxl/test_configured_target_data/transition/TARGETS.fixture @@ -0,0 +1,11 @@ +load(":defs.bzl", "unicorn_library") + +platform( + name = "p", + constraint_values = [], +) + +unicorn_library( + name = "rainbow", + default_target_platform = ":p", +) diff --git a/tests/core/bxl/test_configured_target_data/transition/defs.bzl b/tests/core/bxl/test_configured_target_data/transition/defs.bzl new file mode 100644 index 0000000000000..9143a378603fa --- /dev/null +++ b/tests/core/bxl/test_configured_target_data/transition/defs.bzl @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _unicorn_transition_impl(platform, refs): + _ignore = (platform, refs) # buildifier: disable=unused-variable + return PlatformInfo(label = "", configuration = ConfigurationInfo(constraints = {}, values = {})) + +unicorn_transition = transition( + impl = _unicorn_transition_impl, + refs = {}, +) + +def _unicorn_library_impl(ctx): + _ignore = ctx # buildifier: disable=unused-variable + return [DefaultInfo()] + +unicorn_library = rule( + impl = _unicorn_library_impl, + attrs = {}, + cfg = unicorn_transition, +) diff --git a/tests/core/bxl/test_dynamic.py b/tests/core/bxl/test_dynamic.py new file mode 100644 index 0000000000000..f1f4ae45e00a2 --- /dev/null +++ b/tests/core/bxl/test_dynamic.py @@ -0,0 +1,73 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from buck2.tests.e2e_util.helper.golden import golden + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_bxl_dynamic_action(buck: Buck) -> None: + + result = await buck.bxl( + "//:dynamic.bxl:dynamic_test", + ) + outputs = result.stdout.strip() + assert Path(outputs).read_text() == "content" + + +@buck_test() +async def test_bxl_dynamic_with_bxl_ctx(buck: Buck) -> None: + + result = await buck.bxl( + "//:dynamic.bxl:dynamic_test_with_bxl_ctx", + ) + + outputs = json.loads(result.stdout) + golden_result = {} + for k, v in outputs.items(): + golden_result.update({k: _replace_hash(Path(v).read_text())}) + + golden( + output=json.dumps(golden_result, indent=2), + rel_path="happy_path_dynamic_ctx.golden.json", + ) + + +# Very simple test that the exec_deps/toolchains get propagatd to the dynamic bxl_ctx correctly +@buck_test() +async def test_bxl_dynamic_execution_resolution(buck: Buck) -> None: + + result = await buck.bxl( + "//:dynamic.bxl:dynamic_test_execution_resolution", + ) + + outputs = json.loads(result.stdout) + + assert Path(outputs["dynamic"]).read_text() == Path(outputs["root"]).read_text() + + +@buck_test() +async def test_bxl_dynamic_incompatible_targets(buck: Buck) -> None: + + result = await buck.bxl( + "//:dynamic.bxl:dynamic_test_incompatible_targets", + ) + + assert "Skipped 1 incompatible targets" in result.stderr diff --git a/tests/core/bxl/test_dynamic_data/.buckconfig b/tests/core/bxl/test_dynamic_data/.buckconfig new file mode 100644 index 0000000000000..1e1022e9e90af --- /dev/null +++ b/tests/core/bxl/test_dynamic_data/.buckconfig @@ -0,0 +1,20 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude +config = config + +[buck2] +materializations = deferred +enable_local_caching_of_re_artifacts = true +sqlite_materializer_state = true +sqlite_materializer_state_version = 0 +defer_write_actions = true + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/core/bxl/test_dynamic_data/.buckroot b/tests/core/bxl/test_dynamic_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_dynamic_data/TARGETS.fixture b/tests/core/bxl/test_dynamic_data/TARGETS.fixture new file mode 100644 index 0000000000000..e99e56f132f5c --- /dev/null +++ b/tests/core/bxl/test_dynamic_data/TARGETS.fixture @@ -0,0 +1,35 @@ +load(":rules.bzl", "constraint_setting", "constraint_value", "foo_rule", "foo_runnable", "foo_toolchain", "rule_with_output") + +rule_with_output( + name = "my_output", + content = "my_content", +) + +foo_rule( + name = "rule1", + env = { + "OUTPUT": "$(location :my_output)", + }, +) + +foo_runnable( + name = "runnable", +) + +foo_toolchain( + name = "toolchain", +) + +constraint_setting( + name = "constraint", +) + +constraint_value( + name = "disable", + setting = ":constraint", +) + +foo_rule( + name = "incompatible", + target_compatible_with = ["//:disable"], +) diff --git a/tests/core/bxl/test_dynamic_data/dynamic.bxl b/tests/core/bxl/test_dynamic_data/dynamic.bxl new file mode 100644 index 0000000000000..0a4d025d65dcf --- /dev/null +++ b/tests/core/bxl/test_dynamic_data/dynamic.bxl @@ -0,0 +1,163 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _dynamic(ctx): + action_factory = ctx.bxl_actions().actions + + foo = action_factory.write("foo", "content") + dynamic = action_factory.declare_output("dynamic") + + def my_deferred(ctx, artifacts, outputs): + content = artifacts[foo].read_string() + ctx.bxl_actions().actions.write(outputs[dynamic], content) + + action_factory.dynamic_output(dynamic = [foo], inputs = [], outputs = [dynamic.as_output()], f = my_deferred) + + ctx.output.print(ctx.output.ensure(dynamic).abs_path()) + +dynamic_test = bxl_main( + impl = _dynamic, + cli_args = { + }, +) + +def _dynamic_test_with_bxl_ctx(ctx): + action_factory = ctx.bxl_actions().actions + + query_params = action_factory.write_json("query_params", {"rule_type": "foo_rule", "universe": "root//..."}) + + configured_targets_output = action_factory.declare_output("configured_targets") + configured_targets_different_platform_output = action_factory.declare_output("configured_targets_different_platform") + providers_output = action_factory.declare_output("providers") + resolved_attrs_lazy_output = action_factory.declare_output("resolved_attrs_lazy") + resolved_attrs_eager_output = action_factory.declare_output("resolved_attrs_eager") + + def my_deferred(ctx, artifacts, outputs): + params = artifacts[query_params].read_json() + + # BXL-specific operations: + + # Run cquery with platform1. Then, run analysis and get the providers and resolved attrs + target1 = ctx.cquery(target_platform = "root//platforms:platform1").kind(params["rule_type"], params["universe"])[0] + node1 = ctx.configured_targets(target1.label) + providers_collection = ctx.analysis(node1).providers() + lazy_attrs = node1.resolved_attrs_lazy(ctx) + eager_attrs = node1.resolved_attrs_eager(ctx) + + # Run cquery with platform2 + target2 = ctx.cquery(target_platform = "root//platforms:platform2").kind(params["rule_type"], params["universe"])[0] + node2 = ctx.configured_targets(target2.label) + + actions = ctx.bxl_actions().actions + actions.write(outputs[configured_targets_output], str(node1)) + actions.write(outputs[configured_targets_different_platform_output], str(node2)) + actions.write(outputs[providers_output], str(providers_collection)) + actions.write(outputs[resolved_attrs_eager_output], str(lazy_attrs.get("env"))) + actions.write(outputs[resolved_attrs_lazy_output], str(eager_attrs.env)) + + action_factory.dynamic_output( + dynamic = [query_params], + inputs = [], + outputs = [ + configured_targets_output.as_output(), + configured_targets_different_platform_output.as_output(), + providers_output.as_output(), + resolved_attrs_eager_output.as_output(), + resolved_attrs_lazy_output.as_output(), + ], + f = my_deferred, + ) + + output = {} + output.update({"node1": ctx.output.ensure(configured_targets_output).abs_path()}) + output.update({"node2": ctx.output.ensure(configured_targets_different_platform_output).abs_path()}) + output.update({"node1_providers": ctx.output.ensure(providers_output).abs_path()}) + output.update({"node1_resolved_env_attr_eager": ctx.output.ensure(resolved_attrs_eager_output).abs_path()}) + output.update({"node1_resolved_env_attr_lazy": ctx.output.ensure(resolved_attrs_lazy_output).abs_path()}) + + ctx.output.print_json(output) + +dynamic_test_with_bxl_ctx = bxl_main( + impl = _dynamic_test_with_bxl_ctx, + cli_args = { + }, +) + +def _dynamic_test_execution_resolution(ctx): + root_bxl_actions = ctx.bxl_actions(exec_deps = ["//:runnable"], toolchains = ["//:toolchain"]) + action_factory = root_bxl_actions.actions + + dynamic = action_factory.declare_output("dynamic") + + # unused - just used as a placeholder for `dynamic` in the dynamic_output call + foo = action_factory.write("foo", "content") + + def my_deferred(ctx, _artifacts, outputs): + bxl_actions = ctx.bxl_actions() + actions = bxl_actions.actions + + dynamic_output = {} + dynamic_output.update({"exec_dep_label": str(bxl_actions.exec_deps.keys()[0])}) + dynamic_output.update({"exec_dep": str(bxl_actions.exec_deps.values()[0])}) + dynamic_output.update({"toolchains_label": str(bxl_actions.toolchains.keys()[0])}) + dynamic_output.update({"toolchain_dep": str(bxl_actions.toolchains.values()[0])}) + + actions.write_json(outputs[dynamic], dynamic_output) + + action_factory.dynamic_output( + dynamic = [foo], + inputs = [], + outputs = [dynamic.as_output()], + f = my_deferred, + ) + + root_output = {} + root_output.update({"exec_dep_label": str(root_bxl_actions.exec_deps.keys()[0])}) + root_output.update({"exec_dep": str(root_bxl_actions.exec_deps.values()[0])}) + root_output.update({"toolchains_label": str(root_bxl_actions.toolchains.keys()[0])}) + root_output.update({"toolchain_dep": str(root_bxl_actions.toolchains.values()[0])}) + + root = action_factory.write_json("root", root_output) + + outputs = {} + outputs.update({"dynamic": ctx.output.ensure(dynamic).abs_path()}) + outputs.update({"root": ctx.output.ensure(root).abs_path()}) + + ctx.output.print_json(outputs) + +dynamic_test_execution_resolution = bxl_main( + impl = _dynamic_test_execution_resolution, + cli_args = { + }, +) + +def _dynamic_test_incompatible_targets(ctx): + action_factory = ctx.bxl_actions().actions + + output = action_factory.declare_output("output") + + # unused - just used as a placeholder for `dynamic` in the dynamic_output call + foo = action_factory.write("foo", "content") + + def my_deferred(ctx, _artifacts, outputs): + result = ctx.cquery(target_platform = "//platforms:platform1").deps("//:incompatible", 1) + ctx.bxl_actions().actions.write(outputs[output], str(result)) + + action_factory.dynamic_output( + dynamic = [foo], + inputs = [], + outputs = [output.as_output()], + f = my_deferred, + ) + + ctx.output.print(ctx.output.ensure(output).abs_path()) + +dynamic_test_incompatible_targets = bxl_main( + impl = _dynamic_test_incompatible_targets, + cli_args = { + }, +) diff --git a/tests/core/bxl/test_dynamic_data/happy_path_dynamic_ctx.golden.json b/tests/core/bxl/test_dynamic_data/happy_path_dynamic_ctx.golden.json new file mode 100644 index 0000000000000..cecf277ff3b12 --- /dev/null +++ b/tests/core/bxl/test_dynamic_data/happy_path_dynamic_ctx.golden.json @@ -0,0 +1,9 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "node1": "configured_target_node(name = root//:rule1 (root//platforms:platform1#), ...)", + "node2": "configured_target_node(name = root//:rule1 (root//platforms:platform2#), ...)", + "node1_providers": "Providers([DefaultInfo(sub_targets={}, default_outputs=[], other_outputs=[]), FooInfo(name=\"rule1_foo\", env=\"$(location ...)\")])", + "node1_resolved_env_attr_eager": "{\"OUTPUT\": \"$(location ...)\"}", + "node1_resolved_env_attr_lazy": "{\"OUTPUT\": \"$(location ...)\"}" +} \ No newline at end of file diff --git a/tests/core/bxl/test_dynamic_data/platforms/TARGETS.fixture b/tests/core/bxl/test_dynamic_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..e1839ac21017c --- /dev/null +++ b/tests/core/bxl/test_dynamic_data/platforms/TARGETS.fixture @@ -0,0 +1,15 @@ +load(":defs.bzl", "execution_platforms", "target_platform") + +execution_platforms( + name = "platforms", +) + +target_platform( + name = "platform1", + visibility = ["PUBLIC"], +) + +target_platform( + name = "platform2", + visibility = ["PUBLIC"], +) diff --git a/tests/core/bxl/test_dynamic_data/platforms/defs.bzl b/tests/core/bxl/test_dynamic_data/platforms/defs.bzl new file mode 100644 index 0000000000000..ffce952274d1d --- /dev/null +++ b/tests/core/bxl/test_dynamic_data/platforms/defs.bzl @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ), + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = True, + remote_cache_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = {}, impl = _execution_platform) + +def _target_platform(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +target_platform = rule( + impl = _target_platform, + attrs = {}, +) diff --git a/tests/core/bxl/test_dynamic_data/prelude/prelude.bzl b/tests/core/bxl/test_dynamic_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_dynamic_data/rules.bzl b/tests/core/bxl/test_dynamic_data/rules.bzl new file mode 100644 index 0000000000000..53ebc7dcb92a0 --- /dev/null +++ b/tests/core/bxl/test_dynamic_data/rules.bzl @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +FooInfo = provider(fields = ["name", "env"]) + +def _impl_rule_with_output(ctx): + out = ctx.actions.write(ctx.attrs.name, ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +rule_with_output = rule( + impl = _impl_rule_with_output, + attrs = { + "content": attrs.string(), + }, +) + +def _impl_foo_rule(ctx): + return [DefaultInfo(), FooInfo(name = ctx.attrs.name + "_foo", env = ctx.attrs.env.get("OUTPUT", None))] + +foo_rule = rule( + impl = _impl_foo_rule, + attrs = { + "env": attrs.dict(key = attrs.string(), value = attrs.arg(), sorted = False, default = {}), + }, +) + +def _impl_foo_runnable(_ctx): + return [ + DefaultInfo(), + RunInfo([ + "python3", + "-c", + "import sys; sys.exit(0)", + ]), + ] + +foo_runnable = rule(impl = _impl_foo_runnable, attrs = {}) + +# not really a toolchain rule, but just used to validate exec platform is propagated correctly +foo_toolchain = rule(impl = _impl_foo_runnable, attrs = {}, is_toolchain_rule = True) + +def _constraint_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +constraint_setting = rule( + impl = _constraint_setting, + attrs = {}, +) + +def _constraint_value(ctx): + constraint_value = ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + return [ + DefaultInfo(), + constraint_value, + # Provide `ConfigurationInfo` from `constraint_value` so it could be used as select key. + ConfigurationInfo(constraints = { + constraint_value.setting.label: constraint_value, + }, values = {}), + ] + +constraint_value = rule( + impl = _constraint_value, + attrs = {"setting": attrs.dep(providers = [ConstraintSettingInfo])}, +) diff --git a/tests/core/bxl/test_not_bxl.py b/tests/core/bxl/test_not_bxl.py new file mode 100644 index 0000000000000..fb7e02f568e70 --- /dev/null +++ b/tests/core/bxl/test_not_bxl.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_not_bxl(buck: Buck) -> None: + await expect_failure( + buck.bxl( + "//not_bxl.bxl:not_bxl", + ), + stderr_regex="Expected value of type `bxl` but got `function", + ) + + +@buck_test() +async def test_not_allowed_now(buck: Buck) -> None: + await expect_failure( + buck.build(":"), + stderr_regex="This function can only be called from Bxl", + ) diff --git a/tests/core/bxl/test_not_bxl_data/.buckconfig b/tests/core/bxl/test_not_bxl_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/bxl/test_not_bxl_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/bxl/test_not_bxl_data/.buckroot b/tests/core/bxl/test_not_bxl_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_not_bxl_data/TARGETS.fixture b/tests/core/bxl/test_not_bxl_data/TARGETS.fixture new file mode 100644 index 0000000000000..555488dfc52a5 --- /dev/null +++ b/tests/core/bxl/test_not_bxl_data/TARGETS.fixture @@ -0,0 +1 @@ +now() diff --git a/tests/core/bxl/test_not_bxl_data/not_bxl.bxl b/tests/core/bxl/test_not_bxl_data/not_bxl.bxl new file mode 100644 index 0000000000000..b4ea8cd82876f --- /dev/null +++ b/tests/core/bxl/test_not_bxl_data/not_bxl.bxl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def not_bxl(_ctx): + pass diff --git a/tests/core/bxl/test_output.py b/tests/core/bxl/test_output.py new file mode 100644 index 0000000000000..278b78cd0a31a --- /dev/null +++ b/tests/core/bxl/test_output.py @@ -0,0 +1,89 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_bxl_caching(buck: Buck) -> None: + result = await buck.bxl( + "//caching.bxl:print_caching", + ) + + assert "ran me" in result.stderr + assert "result print" in result.stdout + + result = await buck.bxl( + "//caching.bxl:print_caching", + ) + + assert "ran me" not in result.stderr + assert "result print" in result.stdout + + +@buck_test() +async def test_bxl_caching_with_target_platforms_specified(buck: Buck) -> None: + # run with platform1, result should be cached afterwards + result = await buck.bxl( + "//caching.bxl:caching_with_target_platforms", + "--target-platforms", + "root//:platform1", + ) + + assert "ran me" in result.stderr + assert "root//:platform1" in result.stdout + + # run with platform2, DICE should be invalidated and updated results should be + # cached afterwards + result = await buck.bxl( + "//caching.bxl:caching_with_target_platforms", + "--target-platforms", + "root//:platform2", + ) + + assert "ran me" in result.stderr + assert "root//:platform2" in result.stdout + + # run with platform1 again, we should already have cached results + result = await buck.bxl( + "//caching.bxl:caching_with_target_platforms", + "--target-platforms", + "root//:platform1", + ) + + assert "ran me" not in result.stderr + assert "root//:platform1" in result.stdout + + +@buck_test() +async def test_bxl_error_caching(buck: Buck) -> None: + result = await buck.bxl("//caching.bxl:print_error_caching") + assert "ran me" in result.stderr + assert "Skipped 1 incompatible targets" in result.stderr + assert "root//:incompatible" in result.stderr + + # output stream that writes to stderr should be cached, but regular stdlib print + # statements (which also write to stderr) will not be cached. + result = await buck.bxl("//caching.bxl:print_error_caching") + assert "ran me" not in result.stderr + assert "Skipped 1 incompatible targets" in result.stderr + assert "root//:incompatible" in result.stderr + + +@buck_test() +async def test_bxl_print_with_no_buckd(buck: Buck) -> None: + result = await buck.bxl( + "//caching.bxl:print_caching", + "--no-buckd", + ) + + assert "ran me" in result.stderr + assert "result print" in result.stdout diff --git a/tests/core/bxl/test_output_data/.buckconfig b/tests/core/bxl/test_output_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/bxl/test_output_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/bxl/test_output_data/.buckroot b/tests/core/bxl/test_output_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_output_data/TARGETS.fixture b/tests/core/bxl/test_output_data/TARGETS.fixture new file mode 100644 index 0000000000000..589f1060878e8 --- /dev/null +++ b/tests/core/bxl/test_output_data/TARGETS.fixture @@ -0,0 +1,30 @@ +stub(name = "the_binary") + +platform( + name = "platform", +) + +constraint_setting( + name = "constraint", +) + +constraint_value( + name = "disable", + constraint_setting = ":constraint", +) + +stub( + name = "incompatible", + default_target_platform = ":platform", + target_compatible_with = [":disable"], +) + +platform( + name = "platform1", + visibility = ["PUBLIC"], +) + +platform( + name = "platform2", + visibility = ["PUBLIC"], +) diff --git a/tests/core/bxl/test_output_data/caching.bxl b/tests/core/bxl/test_output_data/caching.bxl new file mode 100644 index 0000000000000..2dca07372612f --- /dev/null +++ b/tests/core/bxl/test_output_data/caching.bxl @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + print("ran me") # buildifier: disable=print + + ctx.output.print("result print") + +print_caching = bxl_main( + impl = _impl, + cli_args = { + }, +) + +def _print_error_caching_impl(ctx): + print("ran me") # buildifier: disable=print + ctx.configured_targets("//:incompatible") + +print_error_caching = bxl_main( + impl = _print_error_caching_impl, + cli_args = {}, +) + +def _impl_caching_with_target_platforms(ctx): + print("ran me") # buildifier: disable=print + + ctx.output.print(ctx.configured_targets("root//:the_binary")) + +caching_with_target_platforms = bxl_main( + impl = _impl_caching_with_target_platforms, + cli_args = { + }, +) diff --git a/tests/core/bxl/test_target_universe.py b/tests/core/bxl/test_target_universe.py new file mode 100644 index 0000000000000..99c37f3176864 --- /dev/null +++ b/tests/core/bxl/test_target_universe.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_bxl_target_universe_keep_going_no_errors(buck: Buck) -> None: + await buck.bxl( + "//target_universe.bxl:target_universe_keep_going_no_errors", + ) + + +@buck_test() +async def test_bxl_target_universe_universe_target_set(buck: Buck) -> None: + await buck.bxl( + "//target_universe.bxl:target_universe_universe_target_set", + ) + + +@buck_test() +async def test_bxl_target_universe_keep_going_with_errors(buck: Buck) -> None: + await buck.bxl( + "//keep_going.bxl:target_universe_keep_going_with_errors", + ) + + +@buck_test() +async def test_bxl_target_universe_keep_going_invalid_input(buck: Buck) -> None: + await expect_failure( + buck.bxl( + "//keep_going_invalid_input.bxl:invalid_input", + ), + stderr_regex="`keep_going` is currently only implemented for a single target pattern as a string literal", + ) diff --git a/tests/core/bxl/test_target_universe_data/.buckconfig b/tests/core/bxl/test_target_universe_data/.buckconfig new file mode 100644 index 0000000000000..32420b2f34a1e --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/.buckconfig @@ -0,0 +1,13 @@ +[cells] + root = . + some_cell = some_cell + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/bxl/test_target_universe_data/.buckroot b/tests/core/bxl/test_target_universe_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_target_universe_data/bad_buildfile/TARGETS.fixture b/tests/core/bxl/test_target_universe_data/bad_buildfile/TARGETS.fixture new file mode 100644 index 0000000000000..37da7206bbb0b --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/bad_buildfile/TARGETS.fixture @@ -0,0 +1,2 @@ +# buildifier: disable=no-effect +this is bad diff --git a/tests/core/bxl/test_target_universe_data/bad_targets/TARGETS.fixture b/tests/core/bxl/test_target_universe_data/bad_targets/TARGETS.fixture new file mode 100644 index 0000000000000..1d7cad2a3508b --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/bad_targets/TARGETS.fixture @@ -0,0 +1,29 @@ +stub( + name = "declared_dep", + deps = [], +) + +stub( + name = "has_a_missing_dep", + deps = [":declared_dep", ":undeclared_dep"], +) + +stub( + name = "has_a_dep_with_package_listing_err", + deps = [":declared_dep", "//missing:package"], +) + +stub( + name = "has_a_dep_from_nonexistent_buildfile", + deps = ["root//missing_buildfile:nonexistent"], +) + +stub( + name = "has_a_dep_from_malformed_buildfile", + deps = ["root//bad_buildfile:bad"], +) + +stub( + name = "has_a_dep_not_visible", + deps = ["root//not_visible:not_visible"], +) diff --git a/tests/core/bxl/test_target_universe_data/good_targets/TARGETS.fixture b/tests/core/bxl/test_target_universe_data/good_targets/TARGETS.fixture new file mode 100644 index 0000000000000..1a2ab52ee53b4 --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/good_targets/TARGETS.fixture @@ -0,0 +1,14 @@ +stub( + name = "declared_dep", +) + +stub( + name = "target1", + deps = [":declared_dep"], +) + +stub( + name = "target2", + deps = [":target1"], + visibility = ["PUBLIC"], +) diff --git a/tests/core/bxl/test_target_universe_data/keep_going.bxl b/tests/core/bxl/test_target_universe_data/keep_going.bxl new file mode 100644 index 0000000000000..d446cfa1150bf --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/keep_going.bxl @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _target_universe_keep_going_with_errors(ctx): + # recursive target pattern + result = ctx.target_universe("//bad_targets/...", keep_going = True).target_set() + _assert_eq(len(result), 1) + _assert_eq("root//bad_targets:declared_dep", str(result[0].label.raw_target())) + + # single target + result = ctx.target_universe("//bad_targets:has_a_missing_dep", keep_going = True).target_set() + _assert_eq(len(result), 0) + +target_universe_keep_going_with_errors = bxl_main( + impl = _target_universe_keep_going_with_errors, + cli_args = {}, +) diff --git a/tests/core/bxl/test_target_universe_data/keep_going_invalid_input.bxl b/tests/core/bxl/test_target_universe_data/keep_going_invalid_input.bxl new file mode 100644 index 0000000000000..6473678f0f78a --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/keep_going_invalid_input.bxl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _invalid_input(ctx): + ctx.target_universe(["//bad_targets/..."], keep_going = True) + +invalid_input = bxl_main( + impl = _invalid_input, + cli_args = {}, +) diff --git a/tests/core/bxl/test_target_universe_data/not_visible/TARGETS.fixture b/tests/core/bxl/test_target_universe_data/not_visible/TARGETS.fixture new file mode 100644 index 0000000000000..0f8ab375d1a23 --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/not_visible/TARGETS.fixture @@ -0,0 +1,4 @@ +stub( + name = "not_visible", + visibility = ["//not_visible/..."], +) diff --git a/tests/core/bxl/test_target_universe_data/platforms/TARGETS.fixture b/tests/core/bxl/test_target_universe_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..0115bdc9a0a0d --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/platforms/TARGETS.fixture @@ -0,0 +1,4 @@ +platform( + name = "platform1", + visibility = ["PUBLIC"], +) diff --git a/tests/core/bxl/test_target_universe_data/some_cell/.buckconfig b/tests/core/bxl/test_target_universe_data/some_cell/.buckconfig new file mode 100644 index 0000000000000..236bdaafdd49c --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/some_cell/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/bxl/test_target_universe_data/some_cell/TARGETS.fixture b/tests/core/bxl/test_target_universe_data/some_cell/TARGETS.fixture new file mode 100644 index 0000000000000..1afdf9b70ec25 --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/some_cell/TARGETS.fixture @@ -0,0 +1,8 @@ +stub( + name = "inner", + deps = ["root//good_targets:target2", ":inner_declared_dep"], +) + +stub( + name = "inner_declared_dep", +) diff --git a/tests/core/bxl/test_target_universe_data/target_universe.bxl b/tests/core/bxl/test_target_universe_data/target_universe.bxl new file mode 100644 index 0000000000000..86d221ae90b24 --- /dev/null +++ b/tests/core/bxl/test_target_universe_data/target_universe.bxl @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _target_universe_keep_going_no_errors(ctx): + pattern = "//good_targets/..." + target_universe = ctx.target_universe(pattern).target_set() + kind = ctx.cquery().kind(".*1", target_universe) + + target_universe_keep_going = ctx.target_universe(pattern, keep_going = True).target_set() + kind_with_keep_going = ctx.cquery().kind(".*1", target_universe_keep_going) + + _assert_eq(kind, kind_with_keep_going) + +target_universe_keep_going_no_errors = bxl_main( + impl = _target_universe_keep_going_no_errors, + cli_args = {}, +) + +def _target_universe_universe_target_set(ctx): + pattern = "some_cell//:inner" + + target_universe = ctx.target_universe(pattern) + direct_target_set = target_universe.target_set() + universe_target_set = target_universe.universe_target_set() + + # should include just the direct targets used to construct the universe + _assert_eq(direct_target_set, ctx.configured_targets(["some_cell//:inner"])) + + # should include all targets in the universe, including the ones outside of + # the direct targets' cells + expected_universe_set = ctarget_set() + expected_universe_set += ctx.configured_targets("root//good_targets/...") + expected_universe_set += ctx.configured_targets("some_cell//...") + _assert_eq(universe_target_set, expected_universe_set) + +target_universe_universe_target_set = bxl_main( + impl = _target_universe_universe_target_set, + cli_args = {}, +) diff --git a/tests/core/bxl/test_type_names_and_symbols.py b/tests/core/bxl/test_type_names_and_symbols.py new file mode 100644 index 0000000000000..2f60942f0e279 --- /dev/null +++ b/tests/core/bxl/test_type_names_and_symbols.py @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_cquery_ctx(buck: Buck) -> None: + await buck.bxl("//bxl/check_type_names_and_symbols.bxl:cquery_ctx") diff --git a/tests/core/bxl/test_type_names_and_symbols_data/.buckconfig b/tests/core/bxl/test_type_names_and_symbols_data/.buckconfig new file mode 100644 index 0000000000000..84ac86849c77c --- /dev/null +++ b/tests/core/bxl/test_type_names_and_symbols_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[cells] + root = . diff --git a/tests/core/bxl/test_type_names_and_symbols_data/.buckroot b/tests/core/bxl/test_type_names_and_symbols_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_type_names_and_symbols_data/TARGETS.fixture b/tests/core/bxl/test_type_names_and_symbols_data/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_type_names_and_symbols_data/bxl/check_type_names_and_symbols.bxl b/tests/core/bxl/test_type_names_and_symbols_data/bxl/check_type_names_and_symbols.bxl new file mode 100644 index 0000000000000..b87c7a6885f64 --- /dev/null +++ b/tests/core/bxl/test_type_names_and_symbols_data/bxl/check_type_names_and_symbols.bxl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _pass_cquery_ctx(cquery_ctx: bxl.CqueryContext): + print(cquery_ctx) + +def _check_cquery_context(ctx: bxl.Context): + cquery_ctx = ctx.cquery() + _assert_eq(type(cquery_ctx), "bxl.CqueryContext") + _pass_cquery_ctx(cquery_ctx) + +cquery_ctx = bxl_main( + impl = _check_cquery_context, + cli_args = {}, +) diff --git a/tests/core/bxl/test_type_names_and_symbols_data/prelude.bzl b/tests/core/bxl/test_type_names_and_symbols_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_typecheck.py b/tests/core/bxl/test_typecheck.py new file mode 100644 index 0000000000000..836ced1b27a15 --- /dev/null +++ b/tests/core/bxl/test_typecheck.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_not_bxl(buck: Buck) -> None: + result = await expect_failure( + buck.bxl( + "//:test_typecheck.bxl:main", + ) + ) + assert "Expected type `int` but got `str`" in result.stderr diff --git a/tests/core/bxl/test_typecheck_data/.buckconfig b/tests/core/bxl/test_typecheck_data/.buckconfig new file mode 100644 index 0000000000000..d6657868aa992 --- /dev/null +++ b/tests/core/bxl/test_typecheck_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . +config = config diff --git a/tests/core/bxl/test_typecheck_data/.buckroot b/tests/core/bxl/test_typecheck_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_typecheck_data/prelude.bzl b/tests/core/bxl/test_typecheck_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/bxl/test_typecheck_data/test_typecheck.bxl b/tests/core/bxl/test_typecheck_data/test_typecheck.bxl new file mode 100644 index 0000000000000..c924d83cf5f76 --- /dev/null +++ b/tests/core/bxl/test_typecheck_data/test_typecheck.bxl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl( + # starlark-lint-disable unused-argument + ctx): # @unused + fail("not needed in test") + +main = bxl_main( + impl = _impl, + cli_args = { + }, +) + +def wrong_types(x: str) -> int: + return x diff --git a/tests/core/cells/BUCK b/tests/core/cells/BUCK new file mode 100644 index 0000000000000..4d03342f05422 --- /dev/null +++ b/tests/core/cells/BUCK @@ -0,0 +1,44 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_cell_aliases", + srcs = ["test_cell_aliases.py"], + data_dir = "test_cell_aliases_data", + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_file_watcher_resolution", + srcs = ["test_file_watcher_resolution.py"], + data_dir = "test_file_watcher_resolution_data", +) + +buck2_e2e_test( + name = "test_buckconfig_paths", + srcs = ["test_buckconfig_paths.py"], + data_dir = "test_buckconfig_paths_data", +) + +buck2_e2e_test( + name = "test_reuse_current_config", + srcs = ["test_reuse_current_config.py"], + data_dir = "test_reuse_current_config_data", + deps = ["//buck2/tests/e2e_util:assert_occurrences"], +) + +buck2_e2e_test( + name = "test_empty_buckconfig", + srcs = ["test_empty_buckconfig.py"], + data_dir = "test_empty_buckconfig_data", +) + +buck2_e2e_test( + name = "test_ignore_state_invalidation", + srcs = ["test_ignore_state_invalidation.py"], + data_dir = "test_ignore_state_invalidation_data", + deps = ["//buck2/tests/e2e_util:utils"], +) diff --git a/tests/core/cells/test_buckconfig_paths.py b/tests/core/cells/test_buckconfig_paths.py new file mode 100644 index 0000000000000..9fab3d266959e --- /dev/null +++ b/tests/core/cells/test_buckconfig_paths.py @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(data_dir="include_external") +async def test_include_external_file(buck: Buck) -> None: + # Note that the repo is inside a tempdir + (buck.cwd.parent / "extra").write_text("[abc]\ndef=x", encoding="utf-8") + await expect_failure( + buck.audit_config("--cell", "root"), + stderr_regex="Improperly include directive path", + ) + + +@buck_test(data_dir="empty", skip_for_os=["windows"]) +async def test_external_symlink_resolution(buck: Buck, tmp_path: Path) -> None: + base = tmp_path / "base" + (base / "b" / "bb").mkdir(parents=True) + (base / "a").mkdir() + (base / "a" / "aa").symlink_to("../b/bb") + (base / "b" / "included").write_text("[sec]\nval = physical", encoding="utf-8") + (base / "a" / "included").write_text("[sec]\nval = logical", encoding="utf-8") + + (base / "b" / "bb" / "config").write_text("", encoding="utf-8") + + config_via_symlink = base / "a" / "aa" / "config" + + res = await buck.audit_config( + "--cell", "root", "--config-file", str(config_via_symlink) + ) + assert "val = physical" in res.stdout + + +@buck_test(data_dir="empty") +async def test_changing_external_include(buck: Buck) -> None: + extra = buck.cwd.parent / "extra" + extra.write_text("[abc]\n def = 1", encoding="utf-8") + + # Start the daemon and build once + await buck.audit_config( + "--all-cells", env={"BUCK2_TEST_EXTRA_EXTERNAL_CONFIG": str(extra)} + ) + + # Change the file and build again + extra.write_text("[abc]\n def = 2", encoding="utf-8") + + res = await buck.audit_config("--cell", "root", "abc.def") + assert "[abc]\n def = 2" in res.stdout + res = await buck.audit_config("--cell", "cell", "abc.def") + assert "[abc]\n def = 2" in res.stdout + + +@buck_test(data_dir="include_through_symlink") +async def test_external_symlink_source_file(buck: Buck) -> None: + external_dir = buck.cwd.parent / "extra" + external_dir.mkdir() + (buck.cwd / "repo_dir").symlink_to(external_dir) + + await buck.audit_config("--cell", "root", "abc.def") diff --git a/tests/core/cells/test_buckconfig_paths_data/.buckroot b/tests/core/cells/test_buckconfig_paths_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_buckconfig_paths_data/empty/.buckconfig b/tests/core/cells/test_buckconfig_paths_data/empty/.buckconfig new file mode 100644 index 0000000000000..faff4c07c2d58 --- /dev/null +++ b/tests/core/cells/test_buckconfig_paths_data/empty/.buckconfig @@ -0,0 +1,6 @@ +[cells] + root = . + cell = cell + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/cells/test_buckconfig_paths_data/empty/.buckroot b/tests/core/cells/test_buckconfig_paths_data/empty/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_buckconfig_paths_data/empty/cell/.buckconfig b/tests/core/cells/test_buckconfig_paths_data/empty/cell/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/cells/test_buckconfig_paths_data/empty/cell/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/cells/test_buckconfig_paths_data/include_external/.buckconfig b/tests/core/cells/test_buckconfig_paths_data/include_external/.buckconfig new file mode 100644 index 0000000000000..905ec683356e2 --- /dev/null +++ b/tests/core/cells/test_buckconfig_paths_data/include_external/.buckconfig @@ -0,0 +1,4 @@ +[cells] + root = . + + diff --git a/tests/core/cells/test_buckconfig_paths_data/include_external/.buckroot b/tests/core/cells/test_buckconfig_paths_data/include_external/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_buckconfig_paths_data/include_through_symlink/.buckconfig b/tests/core/cells/test_buckconfig_paths_data/include_through_symlink/.buckconfig new file mode 100644 index 0000000000000..b898eb827fc77 --- /dev/null +++ b/tests/core/cells/test_buckconfig_paths_data/include_through_symlink/.buckconfig @@ -0,0 +1,4 @@ +[cells] + root = . + + diff --git a/tests/core/cells/test_buckconfig_paths_data/include_through_symlink/.buckroot b/tests/core/cells/test_buckconfig_paths_data/include_through_symlink/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_cell_aliases.py b/tests/core/cells/test_cell_aliases.py new file mode 100644 index 0000000000000..3fcaf57504722 --- /dev/null +++ b/tests/core/cells/test_cell_aliases.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_target_aliases(buck: Buck) -> None: + await buck.uquery("foo//:t") diff --git a/tests/core/cells/test_cell_aliases_data/.buckconfig b/tests/core/cells/test_cell_aliases_data/.buckconfig new file mode 100644 index 0000000000000..f2f06d50f55d8 --- /dev/null +++ b/tests/core/cells/test_cell_aliases_data/.buckconfig @@ -0,0 +1,5 @@ +[repositories] + root = . + prelude = . + foo = foo + bar = bar diff --git a/tests/core/cells/test_cell_aliases_data/.buckroot b/tests/core/cells/test_cell_aliases_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_cell_aliases_data/bar/.buckconfig b/tests/core/cells/test_cell_aliases_data/bar/.buckconfig new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_cell_aliases_data/foo/.buckconfig b/tests/core/cells/test_cell_aliases_data/foo/.buckconfig new file mode 100644 index 0000000000000..28d6148458e0b --- /dev/null +++ b/tests/core/cells/test_cell_aliases_data/foo/.buckconfig @@ -0,0 +1,5 @@ +[repository_aliases] + bar_alias = bar + +[buildfile] +name=TARGETS.fixture diff --git a/tests/core/cells/test_cell_aliases_data/foo/TARGETS.fixture b/tests/core/cells/test_cell_aliases_data/foo/TARGETS.fixture new file mode 100644 index 0000000000000..e8e38f7c810d5 --- /dev/null +++ b/tests/core/cells/test_cell_aliases_data/foo/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":defs.bzl", "r") + +r( + name = "t", + dep = "bar_alias//:fake_target", # uquery only test +) diff --git a/tests/core/cells/test_cell_aliases_data/foo/defs.bzl b/tests/core/cells/test_cell_aliases_data/foo/defs.bzl new file mode 100644 index 0000000000000..30eecc9b97c04 --- /dev/null +++ b/tests/core/cells/test_cell_aliases_data/foo/defs.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + pass + +r = rule( + impl = _impl, + attrs = { + "dep": attrs.dep(), + }, +) diff --git a/tests/core/cells/test_cell_aliases_data/prelude.bzl b/tests/core/cells/test_cell_aliases_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_empty_buckconfig.py b/tests/core/cells/test_empty_buckconfig.py new file mode 100644 index 0000000000000..58e3e082bb45b --- /dev/null +++ b/tests/core/cells/test_empty_buckconfig.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_empty_buckconfig(buck: Buck) -> None: + await expect_failure( + buck.uquery("//..."), + stderr_regex="No cell name for the root path", + ) diff --git a/tests/core/cells/test_empty_buckconfig_data/.buckconfig b/tests/core/cells/test_empty_buckconfig_data/.buckconfig new file mode 100644 index 0000000000000..aeb7c9c295eec --- /dev/null +++ b/tests/core/cells/test_empty_buckconfig_data/.buckconfig @@ -0,0 +1,2 @@ +# buckconfig is empty, but buck2 should still create a root cell, +# and report missing prelude cell. diff --git a/tests/core/cells/test_empty_buckconfig_data/.buckroot b/tests/core/cells/test_empty_buckconfig_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_file_watcher_resolution.py b/tests/core/cells/test_file_watcher_resolution.py new file mode 100644 index 0000000000000..b60271942cb57 --- /dev/null +++ b/tests/core/cells/test_file_watcher_resolution.py @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_changing_cell_location_bug(buck: Buck) -> None: + await buck.targets("foo//:", "bar//:") + + # Switch the location of the 2 cells + (buck.cwd / ".buckconfig").write_text( + "[cells]\nfoo=bar\nbar=foo\nroot=.\nprelude=.\n" + ) + + # Make sure buck picks up the `CellResolver` updates + await buck.targets("foo//:", "bar//:") + + (buck.cwd / "foo" / "TARGETS.fixture").write_text("fail('error')") + + # FIXME(JakobDegen): The change to the `TARGETS.fixture` file does not get picked up by buck. + # The cause is that the file watcher always invalidates injected keys computed from `CellPath`s, + # but the `CellResolver` that it uses to map `ProjectRelativePath`s to `CellPath`s is computed + # once at daemon startup and never updated. So concretely, the file update above results in the + # cell path `bar//TARGETS.fixture` being invalidated, which means the targets in `foo//:` are + # never recomputed. + # + # This is just one example, there's a thousand other ways that you can change the `CellResolver` + # to create similar bugs. + await buck.targets("foo//:", "bar//:") diff --git a/tests/core/cells/test_file_watcher_resolution_data/.buckconfig b/tests/core/cells/test_file_watcher_resolution_data/.buckconfig new file mode 100644 index 0000000000000..7bf7ae1eded46 --- /dev/null +++ b/tests/core/cells/test_file_watcher_resolution_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + foo = foo + bar = bar + root = . + prelude = . diff --git a/tests/core/cells/test_file_watcher_resolution_data/.buckroot b/tests/core/cells/test_file_watcher_resolution_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_file_watcher_resolution_data/bar/.buckconfig b/tests/core/cells/test_file_watcher_resolution_data/bar/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/cells/test_file_watcher_resolution_data/bar/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/cells/test_file_watcher_resolution_data/bar/TARGETS.fixture b/tests/core/cells/test_file_watcher_resolution_data/bar/TARGETS.fixture new file mode 100644 index 0000000000000..bb7b160deb370 --- /dev/null +++ b/tests/core/cells/test_file_watcher_resolution_data/bar/TARGETS.fixture @@ -0,0 +1 @@ +# Intentionally empty diff --git a/tests/core/cells/test_file_watcher_resolution_data/foo/.buckconfig b/tests/core/cells/test_file_watcher_resolution_data/foo/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/cells/test_file_watcher_resolution_data/foo/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/cells/test_file_watcher_resolution_data/foo/TARGETS.fixture b/tests/core/cells/test_file_watcher_resolution_data/foo/TARGETS.fixture new file mode 100644 index 0000000000000..bb7b160deb370 --- /dev/null +++ b/tests/core/cells/test_file_watcher_resolution_data/foo/TARGETS.fixture @@ -0,0 +1 @@ +# Intentionally empty diff --git a/tests/core/cells/test_file_watcher_resolution_data/prelude.bzl b/tests/core/cells/test_file_watcher_resolution_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_ignore_state_invalidation.py b/tests/core/cells/test_ignore_state_invalidation.py new file mode 100644 index 0000000000000..12859905f86b9 --- /dev/null +++ b/tests/core/cells/test_ignore_state_invalidation.py @@ -0,0 +1,103 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +async def check_dice_equality(buck: Buck) -> None: + dice_equal = await filter_events( + buck, + "Event", + "data", + "Instant", + "data", + "DiceEqualityCheck", + "is_equal", + ) + assert len(dice_equal) == 1 + assert dice_equal[0] is True + + +async def check_config_is_the_same(buck: Buck) -> None: + diff_count = await filter_events( + buck, + "Event", + "data", + "Instant", + "data", + "CellConfigDiff", + "config_diff_count", + ) + assert len(diff_count) == 1 + assert diff_count[0] == 0 + + +@buck_test() +async def test_ignore_state_invalidation_with_re_override_in_arg(buck: Buck) -> None: + # Add arg to switch to buck2-user + await buck.build( + "root//:simple", + "--config", + "buck2_re_client.override_use_case=buck2-user", + ) + # No arg, default is buck2-default + await buck.build("root//:simple") + await check_dice_equality(buck) + # Add arg to switch to buck2-user again + await buck.build( + "root//:simple", + "--config", + "buck2_re_client.override_use_case=buck2-user", + ) + await check_dice_equality(buck) + + +@buck_test() +async def test_ignore_state_invalidation_with_re_override_in_config(buck: Buck) -> None: + # Default is buck2-default + await buck.build("root//:simple") + # Add config to switch to buck2-user + with open(buck.cwd / ".buckconfig.local", "w") as f: + f.write("[buck2_re_client]\n") + f.write("override_use_case = buck2-user\n") + await buck.build("root//:simple") + await check_config_is_the_same(buck) + # Add config to return to buck2-default + with open(buck.cwd / ".buckconfig.local", "w") as f: + f.write("[buck2_re_client]\n") + f.write("override_use_case = buck2-default\n") + await buck.build("root//:simple") + await check_config_is_the_same(buck) + + +@buck_test() +async def test_ignore_state_invalidation_with_re_override_in_external_config( + buck: Buck, +) -> None: + # Default is buck2-default + await buck.build("root//:simple") + # Add config to switch to buck2-user + with tempfile.NamedTemporaryFile("w", delete=False) as f: + f.write("[buck2_re_client]\n") + f.write("override_use_case = buck2-user\n") + f.close() + await buck.build("root//:simple", "--config-file", f.name) + await check_config_is_the_same(buck) + # Add config to return to buck2-default + with tempfile.NamedTemporaryFile("w", delete=False) as f: + f.write("[buck2_re_client]\n") + f.write("override_use_case = buck2-default\n") + f.close() + await buck.build("root//:simple", "--config-file", f.name) + await check_config_is_the_same(buck) diff --git a/tests/core/cells/test_ignore_state_invalidation_data/.buckconfig b/tests/core/cells/test_ignore_state_invalidation_data/.buckconfig new file mode 100644 index 0000000000000..22e652e3c984b --- /dev/null +++ b/tests/core/cells/test_ignore_state_invalidation_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . diff --git a/tests/core/cells/test_ignore_state_invalidation_data/.buckroot b/tests/core/cells/test_ignore_state_invalidation_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cells/test_ignore_state_invalidation_data/TARGETS.fixture b/tests/core/cells/test_ignore_state_invalidation_data/TARGETS.fixture new file mode 100644 index 0000000000000..cae9c67462cac --- /dev/null +++ b/tests/core/cells/test_ignore_state_invalidation_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "simple") + +simple(name = "simple") diff --git a/tests/core/cells/test_ignore_state_invalidation_data/defs.bzl b/tests/core/cells/test_ignore_state_invalidation_data/defs.bzl new file mode 100644 index 0000000000000..7c46e6860a90d --- /dev/null +++ b/tests/core/cells/test_ignore_state_invalidation_data/defs.bzl @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _simple(ctx): + re_use_case = read_config("buck2_re_client", "override_use_case") + if re_use_case != None: + fail("RE use case is set to: {}".format(re_use_case)) + output = ctx.actions.declare_output("output") + run = ctx.actions.write( + "run.py", + [ + "import os", + "import sys", + "build_id = os.environ[\"BUCK_BUILD_ID\"]", + "with open(sys.argv[1], 'w') as f:", + " f.write(f'{build_id}\\n')", + ], + ) + ctx.actions.run( + cmd_args(["python3", run, output.as_output()]), + category = "test_category", + ) + + return [DefaultInfo(default_output = output)] + +simple = rule(impl = _simple, attrs = {}) diff --git a/tests/core/cells/test_reuse_current_config.py b/tests/core/cells/test_reuse_current_config.py new file mode 100644 index 0000000000000..490287d44a7b6 --- /dev/null +++ b/tests/core/cells/test_reuse_current_config.py @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_reuse_current_config_with_config_overrides_and_previous_invocation( + buck: Buck, + tmp_path: Path, +) -> None: + result_file = await buck.audit_config( + "test.key", + "--style", + "json", + ) + + assert result_file.get_json().get("test.key") == "val" + + config_override = tmp_path / "config_override.bcfg" + config_override.write_text("[test]\n key = override\n") + + result_file = await buck.audit_config( + "--config-file", + str(config_override), + "--config", + "test.key2=override2", + "--reuse-current-config", + "--style", + "json", + ) + + assert result_file.get_json().get("test.key") == "val" + assert result_file.get_json().get("test.key2") is None + assert "using current config instead" in result_file.stderr + + +@buck_test() +async def test_reuse_current_config_with_config_overrides_and_no_previous_invocation( + buck: Buck, +) -> None: + result_file = await buck.audit_config( + "--config", + "test.key=override", + "--style", + "json", + "--reuse-current-config", + ) + assert result_file.get_json().get("test.key") == "override" + assert "Ignoring --reuse-current-config flag" in result_file.stderr + + +@buck_test() +async def test_reuse_current_config_with_no_previous_invocation(buck: Buck) -> None: + result_file = await buck.audit_config( + "test.key", + "--style", + "json", + "--reuse-current-config", + ) + assert result_file.get_json().get("test.key") == "val" + assert "Ignoring --reuse-current-config flag" in result_file.stderr diff --git a/tests/core/cells/test_reuse_current_config_data/.buckconfig b/tests/core/cells/test_reuse_current_config_data/.buckconfig new file mode 100644 index 0000000000000..8221b1b3df380 --- /dev/null +++ b/tests/core/cells/test_reuse_current_config_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[test] + key = val diff --git a/tests/core/cells/test_reuse_current_config_data/.buckroot b/tests/core/cells/test_reuse_current_config_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/clean/BUCK b/tests/core/clean/BUCK new file mode 100644 index 0000000000000..967cdbba04c55 --- /dev/null +++ b/tests/core/clean/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_clean", + srcs = ["test_clean.py"], + data_dir = "test_clean_data", +) diff --git a/tests/core/clean/test_clean.py b/tests/core/clean/test_clean.py new file mode 100644 index 0000000000000..562bf278e7843 --- /dev/null +++ b/tests/core/clean/test_clean.py @@ -0,0 +1,97 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import platform +from typing import Iterable + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_clean(buck: Buck) -> None: + build_result = await buck.build("root//:trivial_build") + build_report = build_result.get_build_report() + build_report_outputs = [ + str(output) + for output in build_report.outputs_for_target("root//:trivial_build") + ] + + clean_result = await buck.clean() + clean_paths = tuple(filter(None, clean_result.stderr.split("\n"))) + + for output in build_report_outputs: + assert output.startswith(clean_paths) + + _assert_all_paths_do_not_exist(build_report_outputs) + + +@buck_test() +async def test_clean_dry_run(buck: Buck) -> None: + build_result = await buck.build("root//:trivial_build", "--show-output") + build_report = build_result.get_build_report() + build_report_outputs = [ + str(output) + for output in build_report.outputs_for_target("root//:trivial_build") + ] + + dry_clean_result = await buck.clean("--dry-run") + + dry_clean_paths = set( + filter( + is_buck_path, + dry_clean_result.stderr.split("\n"), + ) + ) + _assert_all_paths_exist(dry_clean_paths) + + dry_clean_paths = tuple(i for i in dry_clean_paths) + for output in build_report_outputs: + assert output.startswith(dry_clean_paths) + + _assert_all_paths_exist(build_report_outputs) + + # Run clean without dry-run and make sure all files are removed now + clean_result = await buck.clean() + clean_paths = set( + filter( + is_buck_path, + clean_result.stderr.split("\n"), + ) + ) + # dry_clean_paths and clean_paths should be the same + for clean_path in clean_paths: + assert clean_path in dry_clean_paths + for dry_clean_path in dry_clean_paths: + assert dry_clean_path in clean_paths + + _assert_all_paths_do_not_exist(clean_paths) + + +def is_buck_path(x: str) -> bool: + if platform.system() == "Windows": + return "\\.buck\\buckd\\" in x or "\\buck-out\\" in x + else: + return "/.buck/buckd/" in x or "/buck-out/" in x + + +def _assert_all_paths_exist(paths: Iterable[str]) -> None: + for path in paths: + assert os.path.exists(path) is True + + +def _assert_all_paths_do_not_exist(paths: Iterable[str]) -> None: + for path in paths: + if os.path.exists(f"{path}/buckd.lifecycle"): + # Clean keeps lifecycle file in daemon dir. + assert ["buckd.lifecycle"] == os.listdir(path) + else: + assert os.path.exists(path) is False diff --git a/tests/core/clean/test_clean_data/.buckconfig b/tests/core/clean/test_clean_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/clean/test_clean_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/clean/test_clean_data/.buckroot b/tests/core/clean/test_clean_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/clean/test_clean_data/TARGETS.fixture b/tests/core/clean/test_clean_data/TARGETS.fixture new file mode 100644 index 0000000000000..ee25ee52a7a58 --- /dev/null +++ b/tests/core/clean/test_clean_data/TARGETS.fixture @@ -0,0 +1 @@ +trivial_build(name = "trivial_build") diff --git a/tests/core/client/BUCK b/tests/core/client/BUCK new file mode 100644 index 0000000000000..6946d13e5cea2 --- /dev/null +++ b/tests/core/client/BUCK @@ -0,0 +1,15 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_argfiles", + srcs = ["test_argfiles.py"], + data_dir = "test_argfiles_data", +) + +buck2_e2e_test( + name = "test_common_opts", + srcs = ["test_common_opts.py"], + data_dir = "test_common_opts_data", +) diff --git a/tests/core/client/test_argfiles.py b/tests/core/client/test_argfiles.py new file mode 100644 index 0000000000000..8be653df748ee --- /dev/null +++ b/tests/core/client/test_argfiles.py @@ -0,0 +1,51 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_argfile_with_cell(buck: Buck) -> None: + res = await buck.audit_config("@cell1//argfile", "--cell", "root", "foo.bar") + assert "bar = 1" in res.stdout + + +@buck_test() +async def test_argfile_from_cwd_cell(buck: Buck) -> None: + res = await buck.audit_config( + "@//argfile", + "--cell", + "root", + "foo.bar", + rel_cwd=Path("cell1"), + ) + assert "bar = 1" in res.stdout + + +@buck_test() +async def test_executable_argfile(buck: Buck) -> None: + res = await buck.audit_config( + "@//exec_argfile.py#iphonesimulator-x86_64", "--cell", "root", "foo.bar" + ) + assert "bar = 1" in res.stdout + + +@buck_test() +async def test_stdin_argfile(buck: Buck) -> None: + res = await buck.audit_config( + "@-", + "--cell", + "root", + "foo.bar", + input=str.encode("--config=foo.bar=1"), + ) + assert "bar = 1" in res.stdout diff --git a/tests/core/client/test_argfiles_data/.buckconfig b/tests/core/client/test_argfiles_data/.buckconfig new file mode 100644 index 0000000000000..0716628cbb382 --- /dev/null +++ b/tests/core/client/test_argfiles_data/.buckconfig @@ -0,0 +1,6 @@ +[cells] + root = . + cell1 = cell1 + +[foo] + bar = 0 diff --git a/tests/core/client/test_argfiles_data/.buckroot b/tests/core/client/test_argfiles_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/client/test_argfiles_data/cell1/argfile b/tests/core/client/test_argfiles_data/cell1/argfile new file mode 100644 index 0000000000000..7613364c16631 --- /dev/null +++ b/tests/core/client/test_argfiles_data/cell1/argfile @@ -0,0 +1 @@ +--config=foo.bar=1 diff --git a/tests/core/client/test_argfiles_data/exec_argfile.py b/tests/core/client/test_argfiles_data/exec_argfile.py new file mode 100644 index 0000000000000..99f3a6df95cf9 --- /dev/null +++ b/tests/core/client/test_argfiles_data/exec_argfile.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import os + +if __name__ == "__main__": + result = [] + parser = argparse.ArgumentParser() + parser.add_argument("--flavors", default=None) + ns = parser.parse_args() + + assert ns.flavors == "iphonesimulator-x86_64" + assert os.environ["BUCK2_ARG_FILE"] == "1" + + print("--config=foo.bar=1") diff --git a/tests/core/client/test_common_opts.py b/tests/core/client/test_common_opts.py new file mode 100644 index 0000000000000..c6bcc51db389c --- /dev/null +++ b/tests/core/client/test_common_opts.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import tempfile + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +@pytest.mark.parametrize( # type: ignore + "cmd", + ["build", "targets", "cquery", "bxl", "uquery"], +) +async def test_write_uuid(buck: Buck, cmd: str) -> None: + with tempfile.NamedTemporaryFile() as file: + + cmd_call = getattr(buck, cmd) + await expect_failure(cmd_call("--write-build-id", file.name, "a")) + + assert len(file.read()) > 0 + + +@buck_test() +@pytest.mark.parametrize( # type: ignore + "cmd", + ["build", "targets", "cquery", "bxl", "uquery"], +) +async def test_ban_cell_override(buck: Buck, cmd: str) -> None: + cmd_call = getattr(buck, cmd) + await expect_failure(cmd_call("--config", "repositories.foo=bar", "a")) + await expect_failure(cmd_call("--config", "cells.foo=bar", "a")) diff --git a/tests/core/client/test_common_opts_data/.buckconfig b/tests/core/client/test_common_opts_data/.buckconfig new file mode 100644 index 0000000000000..82ff4e5316342 --- /dev/null +++ b/tests/core/client/test_common_opts_data/.buckconfig @@ -0,0 +1,2 @@ +[cells] + root = . diff --git a/tests/core/client/test_common_opts_data/.buckroot b/tests/core/client/test_common_opts_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/completion/BUCK b/tests/core/completion/BUCK new file mode 100644 index 0000000000000..26232a04ad2e0 --- /dev/null +++ b/tests/core/completion/BUCK @@ -0,0 +1,14 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_completion", + srcs = ["test_completion.py"], + data_dir = "test_completion_data", + env = { + "BUCK2_COMPLETION_VERIFY": "$(location //buck2/shed/completion_verify:completion_verify)", + }, + serialize_test_cases = False, + skip_for_os = ["windows"], +) diff --git a/tests/core/completion/test_completion.py b/tests/core/completion/test_completion.py new file mode 100644 index 0000000000000..a67d6f51b6a3d --- /dev/null +++ b/tests/core/completion/test_completion.py @@ -0,0 +1,225 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os +import platform +import subprocess +import typing +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +IS_LINUX: bool = platform.system() == "Linux" + +# Downloading fish on Mac is not straightforward, so only test it on Linux +SHELLS = ["bash", "fish", "zsh"] if IS_LINUX else ["bash", "zsh"] + + +def completion_test( + name: str, + input: str, + expected: typing.List[str] | typing.Callable[[list[str]], bool], + shells: list[str] = SHELLS, + options_only: bool = False, + cwd: str = "", + bin: str = "buck2", +) -> None: + for shell in shells: + if shell == "fish" and not IS_LINUX: + # As above, not supported on Mac + continue + + # shell=shell is a trick to get the variable captured by value + async def impl(buck: Buck, shell: str = shell) -> None: + tmp_path = Path(buck.cwd).parent / "tmp" + tmp_path.mkdir(exist_ok=True) + + verify_bin = Path(os.environ["BUCK2_COMPLETION_VERIFY"]) + + get_completions = await buck.completion( + shell, *(["--options-only"] if options_only else []) + ) + completions_path = tmp_path / f"completion.{shell}" + completions_path.write_text(get_completions.stdout) + + shell_home = (tmp_path / f"{shell}_tmp").absolute() + shell_home.mkdir(exist_ok=True) + + # Write this to a script to make it easier to debug with `BUCK_E2E_KEEP_TMP=1` + script = "\n".join( + [ + "#!/bin/bash", + "shopt -s dotglob", + f'export PATH="{buck.path_to_executable.parent.absolute()}:$PATH"', + "export BUCK2_COMPLETION_TIMEOUT=30000", + f"if [ -n \"$( ls -A '{shell_home}' )\" ]; then", + f" rm -r -- {shell_home}/*", + "fi", + f"{verify_bin.absolute()} --tempdir {shell_home} --name {bin} {shell} {completions_path.absolute()}", + ] + ) + script_path = tmp_path / f"test_{shell}.sh" + script_path.write_text(script) + script_path.chmod(0o755) + + # Because shells don't report when they're done generating completions, these tests are + # fundamentally racey. Improve on that a little bit by "warming up" the daemon before + # doing the actual test. + await buck.uquery("//...") + + actual = subprocess.check_output( + script_path.absolute(), + input=f"{bin} {input}", + text=True, + cwd=buck.cwd.joinpath(cwd), + ) + actual = actual.splitlines() + if isinstance(expected, list): + expected_ = expected + if shell == "bash": + # Bash treats `:` as a separator for the purpose of + # completions as long as it's not at the end, so adjust for + # that + expected_ = [] + for i in expected: + if i.endswith(":"): + before = i[:-1] + expected_.append(before.split(":")[-1] + ":") + else: + expected_.append(i.split(":")[-1]) + assert actual == expected_, "testing shell: " + shell + else: + assert expected(actual), "testing shell: " + shell + + globals()[name + "_" + shell] = buck_test()(impl) + + +completion_test( + name="test_command_name", + input="t", + # FIXME(JakobDegen): Should probably not be inconsistent + expected=["test", "targets"] if IS_LINUX else ["targets", "test"], + options_only=True, + # Skip this on zsh and fish because they have fancy formatting with help messages for commands + shells=["bash"], +) + +completion_test( + name="test_build_flags", + # Use `--p` so that we don't get too many outputs, which the test framework doesn't handle well + # on zsh + input="build --p", + options_only=True, + expected=lambda actual: ( + "--prefer-local" in actual and "--prefer-remote" in actual + ), +) + +completion_test( + name="test_build_flags_buck_bin", + # Use `--p` so that we don't get too many outputs, which the test framework doesn't handle well + # on zsh + input="build --p", + options_only=True, + expected=lambda actual: ( + "--prefer-local" in actual and "--prefer-remote" in actual + ), + bin="buck", +) + +completion_test( + name="test_completes_simple_partial_directory", + input="build d", + expected=["dir1/", "dir1:", "dir2/"], +) + +completion_test( + name="test_completes_simple_directory", + input="build dir", + expected=["dir1/", "dir1:", "dir2/"], +) + +completion_test( + name="test_completes_dir_from_trailing_slash", + input="build other/", + # FIXME(JakobDegen): Bug + # expected=["other/far/", "other/far:", "other/foo/", "other/foo:"], + expected=[], + shells=["bash", "zsh"], +) + +completion_test( + name="test_completes_dir_from_trailing_slash", + input="build other/", + # FIXME(JakobDegen): Bug + # expected=["other/far/", "other/far:", "other/foo/", "other/foo:"], + expected=[ + "other//other/far/", + "other//other/far:", + "other//other/foo/", + "other//other/foo:", + ], + shells=["fish"], +) + +completion_test( + name="test_completes_simple_cells", + input="build cell", + expected=["cell2a//", "cell2a//:", "cell3//", "cell3//:"], +) + +completion_test( + name="test_completes_rule", + input="build dir1:target1", + expected=["dir1:target1a", "dir1:target1b"], +) + +completion_test( + name="test_starts_with_colon", + input="build :tar", + expected=[":target1a", ":target1b"], + cwd="dir1", +) + +completion_test( + name="test_colon_only_arg", + input="build :", + expected=[":target1a", ":target1b"], + cwd="dir1", +) + +completion_test( + name="test_does_not_complete_files_in_target_position", + input="build TARG", + expected=[], + cwd="dir1", + shells=["fish", "zsh"], +) + +completion_test( + name="test_does_not_complete_files_in_target_position", + input="build TARG", + # FIXME(JakobDegen): Bug: expected=[], + expected=["TARGETS.fixture"], + cwd="dir1", + shells=["bash"], +) + +completion_test( + name="test_no_targets_for_required_option_param", + input="build --console ", + expected=["auto", "none", "simple", "simplenotty", "simpletty", "super"], +) + +completion_test( + name="test_default_completions_after_run_dash_dash", + input="run :sometarget -- d", + expected=["dir1/", "dir2/"], +) diff --git a/tests/core/completion/test_completion_data/.buckconfig b/tests/core/completion/test_completion_data/.buckconfig new file mode 100644 index 0000000000000..aae31f06f9fd3 --- /dev/null +++ b/tests/core/completion/test_completion_data/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + root = . + prelude = dir1/prelude + cell2a = dir2/cell2a + cell3 = cell3 + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/completion/test_completion_data/.buckroot b/tests/core/completion/test_completion_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/completion/test_completion_data/.gitignore b/tests/core/completion/test_completion_data/.gitignore new file mode 100644 index 0000000000000..1e4d4c40e39d8 --- /dev/null +++ b/tests/core/completion/test_completion_data/.gitignore @@ -0,0 +1 @@ +/buck-out/ diff --git a/tests/core/completion/test_completion_data/cell3/.buckconfig b/tests/core/completion/test_completion_data/cell3/.buckconfig new file mode 100644 index 0000000000000..f395fd9dba07e --- /dev/null +++ b/tests/core/completion/test_completion_data/cell3/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + cell3 = . + root = .. + prelude = ../dir1/prelude + cell2a = ../dir2/cell2a + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/completion/test_completion_data/cell3/TARGETS.fixture b/tests/core/completion/test_completion_data/cell3/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/completion/test_completion_data/dir1/TARGETS.fixture b/tests/core/completion/test_completion_data/dir1/TARGETS.fixture new file mode 100644 index 0000000000000..1f70776500550 --- /dev/null +++ b/tests/core/completion/test_completion_data/dir1/TARGETS.fixture @@ -0,0 +1,9 @@ +fake_rule( + name = "target1a", + visibility = ["PUBLIC"], +) + +fake_rule( + name = "target1b", + visibility = ["PUBLIC"], +) diff --git a/tests/core/completion/test_completion_data/dir1/prelude/.buckconfig b/tests/core/completion/test_completion_data/dir1/prelude/.buckconfig new file mode 100644 index 0000000000000..98fd33ecaebdc --- /dev/null +++ b/tests/core/completion/test_completion_data/dir1/prelude/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + prelude = . + root = ../../ + cell2a = ../../dir2/cell2a + cell3 = ../../cell3 + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/completion/test_completion_data/dir1/prelude/prelude.bzl b/tests/core/completion/test_completion_data/dir1/prelude/prelude.bzl new file mode 100644 index 0000000000000..440788fe5e58d --- /dev/null +++ b/tests/core/completion/test_completion_data/dir1/prelude/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _fake_impl(ctx): + return [ + DefaultInfo(default_output = ctx.attrs.src), + ] + +fake_rule = rule( + impl = _fake_impl, + attrs = {}, +) diff --git a/tests/core/completion/test_completion_data/dir2/cell2a/.buckconfig b/tests/core/completion/test_completion_data/dir2/cell2a/.buckconfig new file mode 100644 index 0000000000000..1ea9aad85a741 --- /dev/null +++ b/tests/core/completion/test_completion_data/dir2/cell2a/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + cell2a = . + root = ../../ + prelude = ../../dir1/prelude + cell3 = ../../cell3 + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/completion/test_completion_data/dir2/cell2a/TARGETS.fixture b/tests/core/completion/test_completion_data/dir2/cell2a/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/completion/test_completion_data/other/far/TARGETS.fixture b/tests/core/completion/test_completion_data/other/far/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/completion/test_completion_data/other/foo/TARGETS.fixture b/tests/core/completion/test_completion_data/other/foo/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/BUCK b/tests/core/configurations/configurations/BUCK new file mode 100644 index 0000000000000..803eddc273c0c --- /dev/null +++ b/tests/core/configurations/configurations/BUCK @@ -0,0 +1,63 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_compatible_with_before_select", + srcs = ["test_compatible_with_before_select.py"], + data_dir = "test_compatible_with_before_select_data", +) + +buck2_e2e_test( + name = "test_configuration_rule_unbound", + srcs = ["test_configuration_rule_unbound.py"], + data_dir = "test_configuration_rule_unbound_data", +) + +buck2_e2e_test( + name = "test_select_buckconfig", + srcs = ["test_select_buckconfig.py"], + data_dir = "test_select_buckconfig_data", +) + +buck2_e2e_test( + name = "test_select_refine", + srcs = ["test_select_refine.py"], + data_dir = "test_select_refine_data", +) + +buck2_e2e_test( + name = "test_target_incompatible", + srcs = ["test_target_incompatible.py"], + data_dir = "test_target_incompatible_data", +) + +buck2_e2e_test( + name = "test_target_platforms_arg", + srcs = ["test_target_platforms_arg.py"], + data_dir = "test_target_platforms_arg_data", +) + +buck2_e2e_test( + name = "test_platform_via_alias", + srcs = ["test_platform_via_alias.py"], + data_dir = "test_platform_via_alias_data", +) + +buck2_e2e_test( + name = "test_platform_wrong_label", + srcs = ["test_platform_wrong_label.py"], + data_dir = "test_platform_wrong_label_data", +) + +buck2_e2e_test( + name = "test_toolchain_overconfiguration", + srcs = ["test_toolchain_overconfiguration.py"], + data_dir = "test_toolchain_overconfiguration_data", +) + +buck2_e2e_test( + name = "test_select_concat", + srcs = ["test_select_concat.py"], + data_dir = "test_select_concat_data", +) diff --git a/tests/core/configurations/configurations/test_compatible_with_before_select.py b/tests/core/configurations/configurations/test_compatible_with_before_select.py new file mode 100644 index 0000000000000..dd220a0f1208d --- /dev/null +++ b/tests/core/configurations/configurations/test_compatible_with_before_select.py @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +# Test compatible_with is evaluated before select, +# and if target is incompatible, select should not be evaluated at all. + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_compatible_with_before_select(buck: Buck) -> None: + result = await buck.cquery( + "--target-platforms=root//:p-linux", + "root//:windows-only", + ) + # Check it does not fail. + result.check_returncode() + + result = await buck.cquery( + "--target-platforms=root//:p-linux", + "root//:windows-only-deps", + ) + result.check_returncode() + + result = await buck.cquery( + "--target-platforms=root//:p-linux", + "deps(root//:windows-only-exec-deps)", + ) + assert "root//:windows-only (root//:p-exec-windows" in result.stdout diff --git a/tests/core/configurations/configurations/test_compatible_with_before_select_data/.buckconfig b/tests/core/configurations/configurations/test_compatible_with_before_select_data/.buckconfig new file mode 100644 index 0000000000000..3ed8f55924bd1 --- /dev/null +++ b/tests/core/configurations/configurations/test_compatible_with_before_select_data/.buckconfig @@ -0,0 +1,15 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name=TARGETS.fixture + +[build] + execution_platforms = root//:platforms diff --git a/tests/core/configurations/configurations/test_compatible_with_before_select_data/.buckroot b/tests/core/configurations/configurations/test_compatible_with_before_select_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_compatible_with_before_select_data/TARGETS.fixture b/tests/core/configurations/configurations/test_compatible_with_before_select_data/TARGETS.fixture new file mode 100644 index 0000000000000..ba235b9ddbe06 --- /dev/null +++ b/tests/core/configurations/configurations/test_compatible_with_before_select_data/TARGETS.fixture @@ -0,0 +1,79 @@ +constraint_setting( + name = "os", +) + +constraint_value( + name = "windows", + constraint_setting = ":os", +) + +constraint_value( + name = "linux", + constraint_setting = ":os", +) + +platform( + name = "p-linux", + constraint_values = [ + ":linux", + ], +) + +platform( + name = "p-windows", + constraint_values = [ + ":windows", + ], +) + +execution_platform( + name = "p-exec-windows", + platform = ":p-windows", +) + +execution_platform( + name = "p-exec-linux", + platform = ":p-linux", +) + +execution_platforms( + name = "platforms", + platforms = [":p-exec-windows", ":p-exec-linux"], +) + +stub( + name = "windows-only", + srcs = select({ + # In test we run the query with target platform linux, + # this select does not have DEFAULT, + # but it should not fail because it should not be evaluated at all, + # because of `compatible_with` check. + ":windows": [], + }), + compatible_with = [":windows"], +) + +stub( + name = "windows-only-undeclared", + srcs = select({ + # This rule is only compatible with windows, however we don't declare that + ":windows": [], + }), +) + +stub( + name = "windows-only-deps", + deps = [ + ":windows-only-undeclared", + ":windows-only", + ], + compatible_with = [":windows"], +) + +stub( + name = "windows-only-exec-deps", + exec_deps = [ + ":windows-only-undeclared", + ":windows-only", + ], +) diff --git a/tests/core/configurations/configurations/test_configuration_rule_unbound.py b/tests/core/configurations/configurations/test_configuration_rule_unbound.py new file mode 100644 index 0000000000000..cf1620283f5d8 --- /dev/null +++ b/tests/core/configurations/configurations/test_configuration_rule_unbound.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_configuration_rule_unbound(buck: Buck) -> None: + result = await buck.cquery( + # platform argument is ignored + "--target-platforms=root//:p", + "root//:the-test", + ) + # Note configuration is unbound here. + assert "root//:the-test ()\n" == result.stdout diff --git a/tests/core/configurations/configurations/test_configuration_rule_unbound_data/.buckconfig b/tests/core/configurations/configurations/test_configuration_rule_unbound_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/configurations/configurations/test_configuration_rule_unbound_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/configurations/configurations/test_configuration_rule_unbound_data/.buckroot b/tests/core/configurations/configurations/test_configuration_rule_unbound_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_configuration_rule_unbound_data/TARGETS.fixture b/tests/core/configurations/configurations/test_configuration_rule_unbound_data/TARGETS.fixture new file mode 100644 index 0000000000000..f42d26a411e1c --- /dev/null +++ b/tests/core/configurations/configurations/test_configuration_rule_unbound_data/TARGETS.fixture @@ -0,0 +1,9 @@ +platform( + name = "p", + constraint_values = [], +) + +config_setting( + name = "the-test", + constraint_values = [], +) diff --git a/tests/core/configurations/configurations/test_platform_via_alias.py b/tests/core/configurations/configurations/test_platform_via_alias.py new file mode 100644 index 0000000000000..9e3b6d17168fc --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_via_alias.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# Test platform can be loaded via `alias` rule. +@buck_test() +async def test_platform_via_alias(buck: Buck) -> None: + await buck.build( + "root//:gr", + ) diff --git a/tests/core/configurations/configurations/test_platform_via_alias_data/.buckconfig b/tests/core/configurations/configurations/test_platform_via_alias_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_via_alias_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/configurations/configurations/test_platform_via_alias_data/.buckroot b/tests/core/configurations/configurations/test_platform_via_alias_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_platform_via_alias_data/TARGETS.fixture b/tests/core/configurations/configurations/test_platform_via_alias_data/TARGETS.fixture new file mode 100644 index 0000000000000..914cec8c002ef --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_via_alias_data/TARGETS.fixture @@ -0,0 +1,16 @@ +load(":alias.bzl", "alias") + +platform( + name = "ppp", + constraint_values = [], +) + +alias( + name = "ppp-alias", + actual = ":ppp", +) + +trivial_build( + name = "gr", + default_target_platform = ":ppp-alias", +) diff --git a/tests/core/configurations/configurations/test_platform_via_alias_data/alias.bzl b/tests/core/configurations/configurations/test_platform_via_alias_data/alias.bzl new file mode 100644 index 0000000000000..dff4d8205e07d --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_via_alias_data/alias.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + return ctx.attrs.actual.providers + +alias = rule( + impl = _impl, + attrs = { + "actual": attrs.dep(), + }, +) diff --git a/tests/core/configurations/configurations/test_platform_wrong_label.py b/tests/core/configurations/configurations/test_platform_wrong_label.py new file mode 100644 index 0000000000000..78b7f5b555c81 --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_wrong_label.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_platform_wrong_label(buck: Buck) -> None: + await expect_failure( + buck.build( + "//...", + ), + stderr_regex=r"Platform target `.*` evaluation returned `ProviderInfo` label `.*` which resolved to an unequal configuration", + ) diff --git a/tests/core/configurations/configurations/test_platform_wrong_label_data/.buckconfig b/tests/core/configurations/configurations/test_platform_wrong_label_data/.buckconfig new file mode 100644 index 0000000000000..425a56f43b9c4 --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_wrong_label_data/.buckconfig @@ -0,0 +1,6 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/configurations/configurations/test_platform_wrong_label_data/.buckroot b/tests/core/configurations/configurations/test_platform_wrong_label_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_platform_wrong_label_data/TARGETS.fixture b/tests/core/configurations/configurations/test_platform_wrong_label_data/TARGETS.fixture new file mode 100644 index 0000000000000..cd66ea711d2a2 --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_wrong_label_data/TARGETS.fixture @@ -0,0 +1,14 @@ +load(":rules.bzl", "proper_platform", "useless", "wrong_platform") + +proper_platform( + name = "proper_platform", +) + +wrong_platform( + name = "wrong_platform", +) + +useless( + name = "useless", + default_target_platform = ":wrong_platform", +) diff --git a/tests/core/configurations/configurations/test_platform_wrong_label_data/prelude.bzl b/tests/core/configurations/configurations/test_platform_wrong_label_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_platform_wrong_label_data/rules.bzl b/tests/core/configurations/configurations/test_platform_wrong_label_data/rules.bzl new file mode 100644 index 0000000000000..3e0824fab6110 --- /dev/null +++ b/tests/core/configurations/configurations/test_platform_wrong_label_data/rules.bzl @@ -0,0 +1,61 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# This is how proper platform rule should be implemented. + +def _proper_platform_impl(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ), + ] + +proper_platform = rule( + impl = _proper_platform_impl, + attrs = {}, +) + +# This rule returns a platform with wrong label. + +def _wrong_platform_impl(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + PlatformInfo( + # Proper `platform` implementation should use `str(ctx.label.raw_target())`. + label = "//:proper_platform", + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ), + ] + +wrong_platform = rule( + impl = _wrong_platform_impl, + attrs = {}, +) + +# Some rule + +def _useless(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +useless = rule( + impl = _useless, + attrs = { + }, +) diff --git a/tests/core/configurations/configurations/test_select_buckconfig.py b/tests/core/configurations/configurations/test_select_buckconfig.py new file mode 100644 index 0000000000000..3e04f84850835 --- /dev/null +++ b/tests/core/configurations/configurations/test_select_buckconfig.py @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# Test select works with buckconfig. +@buck_test() +async def test_select_buckconfig(buck: Buck) -> None: + out = await buck.cquery( + "root//:the-test", + "--output-attribute=labels", + ) + q = json.loads(out.stdout) + assert len(q) == 1 + assert list(q.values())[0]["labels"] == ["NO"] + + out = await buck.cquery( + "root//:the-test", + "--output-attribute=labels", + "-c", + "aaa.bbb=ccc", + ) + q = json.loads(out.stdout) + assert len(q) == 1 + assert list(q.values())[0]["labels"] == ["YES"] diff --git a/tests/core/configurations/configurations/test_select_buckconfig_data/.buckconfig b/tests/core/configurations/configurations/test_select_buckconfig_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/configurations/configurations/test_select_buckconfig_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/configurations/configurations/test_select_buckconfig_data/.buckroot b/tests/core/configurations/configurations/test_select_buckconfig_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_select_buckconfig_data/TARGETS.fixture b/tests/core/configurations/configurations/test_select_buckconfig_data/TARGETS.fixture new file mode 100644 index 0000000000000..a075b72a1fcc9 --- /dev/null +++ b/tests/core/configurations/configurations/test_select_buckconfig_data/TARGETS.fixture @@ -0,0 +1,14 @@ +config_setting( + name = "cs", + values = { + "aaa.bbb": "ccc", + }, +) + +stub( + name = "the-test", + labels = select({ + ":cs": ["YES"], + "DEFAULT": ["NO"], + }), +) diff --git a/tests/core/configurations/configurations/test_select_concat.py b/tests/core/configurations/configurations/test_select_concat.py new file mode 100644 index 0000000000000..44e0bbb7123c2 --- /dev/null +++ b/tests/core/configurations/configurations/test_select_concat.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_oneof_concat(buck: Buck) -> None: + res = await buck.cquery("//:foo") + assert res.stdout.startswith("root//:foo ") diff --git a/tests/core/configurations/configurations/test_select_concat_data/.buckconfig b/tests/core/configurations/configurations/test_select_concat_data/.buckconfig new file mode 100644 index 0000000000000..1045f38b01481 --- /dev/null +++ b/tests/core/configurations/configurations/test_select_concat_data/.buckconfig @@ -0,0 +1,9 @@ +[repositories] + root = . + nano_prelude = nano_prelude +[buildfile] + name = TARGETS.fixture +[cell_aliases] + prelude = nano_prelude +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/test_select_concat_data/.buckroot b/tests/core/configurations/configurations/test_select_concat_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_select_concat_data/TARGETS.fixture b/tests/core/configurations/configurations/test_select_concat_data/TARGETS.fixture new file mode 100644 index 0000000000000..f82f2765b3f41 --- /dev/null +++ b/tests/core/configurations/configurations/test_select_concat_data/TARGETS.fixture @@ -0,0 +1,13 @@ +load(":defs.bzl", "my_rule") + +my_rule( + name = "foo", + bar = [] + select({ + "DEFAULT": [] + select({"DEFAULT": []}), + }), + default_target_platform = ":my_platform", +) + +platform( + name = "my_platform", +) diff --git a/tests/core/configurations/configurations/test_select_concat_data/defs.bzl b/tests/core/configurations/configurations/test_select_concat_data/defs.bzl new file mode 100644 index 0000000000000..f9cc6943b0117 --- /dev/null +++ b/tests/core/configurations/configurations/test_select_concat_data/defs.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +my_rule = rule(impl = _impl, attrs = { + "bar": attrs.one_of(attrs.list(attrs.string()), attrs.int()), +}) diff --git a/tests/core/configurations/configurations/test_select_refine.py b/tests/core/configurations/configurations/test_select_refine.py new file mode 100644 index 0000000000000..060ce6e6f695e --- /dev/null +++ b/tests/core/configurations/configurations/test_select_refine.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_select_refine(buck: Buck) -> None: + # Smoke test for select refinement: + # the most specific option is picked even if it is not listed first. + out = await buck.cquery( + "--target-platforms=//:p-good-domestic", + "-a=labels", + "//:the-test", + ) + q = json.loads(out.stdout) + assert len(q) == 1 + assert list(q.values())[0]["labels"] == ["good-domestic"] diff --git a/tests/core/configurations/configurations/test_select_refine_data/.buckconfig b/tests/core/configurations/configurations/test_select_refine_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/configurations/configurations/test_select_refine_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/configurations/configurations/test_select_refine_data/.buckroot b/tests/core/configurations/configurations/test_select_refine_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_select_refine_data/TARGETS.fixture b/tests/core/configurations/configurations/test_select_refine_data/TARGETS.fixture new file mode 100644 index 0000000000000..cda23bfb42d54 --- /dev/null +++ b/tests/core/configurations/configurations/test_select_refine_data/TARGETS.fixture @@ -0,0 +1,60 @@ +constraint_setting( + name = "quality", +) + +constraint_value( + name = "good", + constraint_setting = ":quality", +) + +constraint_value( + name = "bad", + constraint_setting = ":quality", +) + +constraint_setting( + name = "origin", +) + +constraint_value( + name = "domestic", + constraint_setting = ":origin", +) + +constraint_value( + name = "imported", + constraint_setting = ":origin", +) + +config_setting( + name = "c-good-domestic", + constraint_values = [ + ":domestic", + ":good", + ], +) + +config_setting( + name = "c-good", + constraint_values = [ + ":good", + ], +) + +platform( + name = "p-good-domestic", + constraint_values = [ + ":domestic", + ":good", + ], +) + +stub( + name = "the-test", + labels = select({ + ":c-good": ["good"], + # This option is picked in test even previous options match. + ":c-good-domestic": ["good-domestic"], + "DEFAULT": ["default"], + }), +) diff --git a/tests/core/configurations/configurations/test_target_incompatible.py b/tests/core/configurations/configurations/test_target_incompatible.py new file mode 100644 index 0000000000000..61715e889562b --- /dev/null +++ b/tests/core/configurations/configurations/test_target_incompatible.py @@ -0,0 +1,123 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_incompatible_target_skipping(buck: Buck) -> None: + # incompatible target should be skipped when a package + result = await buck.build("//:") + assert "Skipping target incompatible node `root//:incompatible (" in result.stderr + # when explicitly requested, it should be a failure + await expect_failure(buck.build("//:incompatible")) + # should be a failure if it's both explicitly requested and part of a package/recursive pattern + # TODO(cjhopman): this doesn't work correctly yet + # await expect_failure( + # buck.build("//:", "//:incompatible") + # ) + + +INCOMPATIBLE_ERROR = "root//:incompatible is incompatible with" + + +@buck_test() +@pytest.mark.parametrize( # type: ignore + "target_pattern", + [ + "//dep_incompatible:", + "//dep_incompatible:dep_incompatible", + "//dep_incompatible:transitive_dep_incompatible", + ], +) +async def test_dep_incompatible_target(buck: Buck, target_pattern: str) -> None: + # a compatible target with incompatible deps should always fail no matter what. + await expect_failure( + buck.cquery(target_pattern), + stderr_regex=INCOMPATIBLE_ERROR, + ) + await expect_failure( + buck.build(target_pattern, "--skip-incompatible-targets"), + stderr_regex=INCOMPATIBLE_ERROR, + ) + + +@buck_test() +async def test_incompatible_target_with_incompatible_dep(buck: Buck) -> None: + target = "//dep_incompatible:target_and_dep_incompatible" + await buck.cquery(target) + await buck.build(target, "--skip-incompatible-targets") + await expect_failure( + buck.build(target), + stderr_regex=f"{target} is incompatible with", + ) + + +@buck_test() +async def test_exec_dep_transitive_incompatible(buck: Buck) -> None: + await buck.cquery( + "//exec_dep:one_exec_platform_transitive_incompatible", + ) + + +@buck_test() +async def test_exec_dep_transitive_incompatible_post_transition(buck: Buck) -> None: + await buck.cquery( + "//exec_dep:one_exec_platform_transitive_incompatible_post_transition", + ) + + +@pytest.mark.parametrize( + "target_pattern, soft_error", + [ + ("//dep_incompatible:", False), + ("//dep_incompatible/...", False), + ("//...", False), + # target pattern doesn't match //dep_incompatible:dep_incompatible + ( + "//dep_incompatible:dep_incompatible2", + True, + ), + ], +) +@buck_test(allow_soft_errors=True) +async def test_error_on_dep_only_incompatible( + buck: Buck, target_pattern: str, soft_error: bool +) -> None: + args = [ + "-c", + f"buck2.error_on_dep_only_incompatible=//some/...,{target_pattern}", + "//dep_incompatible:dep_incompatible", + ] + if soft_error: + result = await buck.cquery(*args) + assert re.search(INCOMPATIBLE_ERROR, result.stderr, re.DOTALL | re.IGNORECASE) + else: + await expect_failure( + buck.cquery(*args), + stderr_regex=INCOMPATIBLE_ERROR, + ) + + +@buck_test() +async def test_error_on_dep_only_incompatible_conf(buck: Buck) -> None: + args = [ + "//dep_incompatible:dep_incompatible_conf2", + ] + await expect_failure( + buck.cquery(*args), + stderr_regex=INCOMPATIBLE_ERROR, + ) diff --git a/tests/core/configurations/configurations/test_target_incompatible_data/.buckconfig b/tests/core/configurations/configurations/test_target_incompatible_data/.buckconfig new file mode 100644 index 0000000000000..d3a7aaade5341 --- /dev/null +++ b/tests/core/configurations/configurations/test_target_incompatible_data/.buckconfig @@ -0,0 +1,20 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[build] +execution_platforms = root//exec_dep:execution_platforms + +[buck2] +error_on_dep_only_incompatible = \ + //dep_incompatible:dep_incompatible_conf1, \ + //dep_incompatible:dep_incompatible_conf2 diff --git a/tests/core/configurations/configurations/test_target_incompatible_data/.buckroot b/tests/core/configurations/configurations/test_target_incompatible_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_target_incompatible_data/TARGETS.fixture b/tests/core/configurations/configurations/test_target_incompatible_data/TARGETS.fixture new file mode 100644 index 0000000000000..f55fc2bffef1b --- /dev/null +++ b/tests/core/configurations/configurations/test_target_incompatible_data/TARGETS.fixture @@ -0,0 +1,23 @@ +platform( + name = "platform", +) + +constraint_setting( + name = "constraint", +) + +constraint_value( + name = "disable", + constraint_setting = ":constraint", +) + +platform( + name = "disabled", + constraint_values = [":disable"], +) + +trivial_build( + name = "incompatible", + default_target_platform = ":platform", + target_compatible_with = ["//:disable"], +) diff --git a/tests/core/configurations/configurations/test_target_incompatible_data/defs.bzl b/tests/core/configurations/configurations/test_target_incompatible_data/defs.bzl new file mode 100644 index 0000000000000..6eb250cf969bf --- /dev/null +++ b/tests/core/configurations/configurations/test_target_incompatible_data/defs.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +_transition = transition( + refs = {}, + impl = lambda platform, refs: PlatformInfo(configuration = platform.configuration, label = ""), +) + +transitioned_stub = rule( + attrs = { + "stub": attrs.transition_dep(cfg = _transition), + }, + impl = lambda ctx: ctx.attrs.stub, +) diff --git a/tests/core/configurations/configurations/test_target_incompatible_data/dep_incompatible/TARGETS.fixture b/tests/core/configurations/configurations/test_target_incompatible_data/dep_incompatible/TARGETS.fixture new file mode 100644 index 0000000000000..3411f357bb22e --- /dev/null +++ b/tests/core/configurations/configurations/test_target_incompatible_data/dep_incompatible/TARGETS.fixture @@ -0,0 +1,30 @@ +stub( + name = "dep_incompatible", + default_target_platform = "//:platform", + deps = ["//:incompatible"], +) + +stub( + name = "dep_incompatible_conf1", + default_target_platform = "//:platform", + deps = ["//:incompatible"], +) + +stub( + name = "dep_incompatible_conf2", + default_target_platform = "//:platform", + deps = ["//:incompatible"], +) + +stub( + name = "transitive_dep_incompatible", + default_target_platform = "//:platform", + deps = [":dep_incompatible"], +) + +stub( + name = "target_and_dep_incompatible", + default_target_platform = "//:platform", + target_compatible_with = ["//:disable"], + deps = [":dep_incompatible"], +) diff --git a/tests/core/configurations/configurations/test_target_incompatible_data/exec_dep/TARGETS.fixture b/tests/core/configurations/configurations/test_target_incompatible_data/exec_dep/TARGETS.fixture new file mode 100644 index 0000000000000..54d2e16a188ed --- /dev/null +++ b/tests/core/configurations/configurations/test_target_incompatible_data/exec_dep/TARGETS.fixture @@ -0,0 +1,63 @@ +load("@//:defs.bzl", "transitioned_stub") + +constraint_setting(name = "exec_setting") + +constraint_value( + name = "A", + constraint_setting = ":exec_setting", +) + +constraint_value( + name = "B", + constraint_setting = ":exec_setting", +) + +platform( + name = "platformA", + constraint_values = [":A"], +) + +platform( + name = "platformB", + constraint_values = [":B"], +) + +execution_platform( + name = "execA", + platform = ":platformA", +) + +execution_platform( + name = "execB", + platform = ":platformB", +) + +execution_platforms( + name = "execution_platforms", + platforms = [ + ":execA", + ":execB", + ], +) + +stub( + name = "exec_compatible_B", + target_compatible_with = [":B"], +) + +stub( + name = "one_exec_platform_transitive_incompatible", + default_target_platform = "//:platform", + exec_deps = [":exec_compatible_B"], +) + +transitioned_stub( + name = "exec_compatible_B_transitioned", + stub = ":exec_compatible_B", +) + +stub( + name = "one_exec_platform_transitive_incompatible_post_transition", + default_target_platform = "//:platform", + exec_deps = [":exec_compatible_B_transitioned"], +) diff --git a/tests/core/configurations/configurations/test_target_platforms_arg.py b/tests/core/configurations/configurations/test_target_platforms_arg.py new file mode 100644 index 0000000000000..d7c083b1f0df0 --- /dev/null +++ b/tests/core/configurations/configurations/test_target_platforms_arg.py @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_target_platforms_arg(buck: Buck) -> None: + out = await buck.cquery( + # Specifying platform without cell to make sure it is resolved against current cell + "--target-platforms=//:p-clouds", + "deps(//:the-test, 1)", + rel_cwd=Path("subcell"), + ) + stdout = re.sub(":p-clouds#[a-f0-9]+\\)", ":p-clouds#HASH)", out.stdout) + assert ( + stdout + == """\ +subcell//:the-test (subcell//:p-clouds#HASH) +subcell//:t-clouds (subcell//:p-clouds#HASH) +""" + ) diff --git a/tests/core/configurations/configurations/test_target_platforms_arg_data/.buckconfig b/tests/core/configurations/configurations/test_target_platforms_arg_data/.buckconfig new file mode 100644 index 0000000000000..23c65ba48af27 --- /dev/null +++ b/tests/core/configurations/configurations/test_target_platforms_arg_data/.buckconfig @@ -0,0 +1,13 @@ +[cells] + root = . + subcell = subcell + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/configurations/configurations/test_target_platforms_arg_data/.buckroot b/tests/core/configurations/configurations/test_target_platforms_arg_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_target_platforms_arg_data/subcell/.buckconfig b/tests/core/configurations/configurations/test_target_platforms_arg_data/subcell/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/configurations/configurations/test_target_platforms_arg_data/subcell/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/configurations/configurations/test_target_platforms_arg_data/subcell/TARGETS.fixture b/tests/core/configurations/configurations/test_target_platforms_arg_data/subcell/TARGETS.fixture new file mode 100644 index 0000000000000..91ba2c016aed4 --- /dev/null +++ b/tests/core/configurations/configurations/test_target_platforms_arg_data/subcell/TARGETS.fixture @@ -0,0 +1,51 @@ +constraint_setting( + name = "weather", +) + +constraint_value( + name = "rain", + constraint_setting = ":weather", +) + +constraint_value( + name = "clouds", + constraint_setting = ":weather", +) + +config_setting( + name = "c-clouds", + constraint_values = [ + ":clouds", + ], +) + +platform( + name = "p-rain", + constraint_values = [ + ":rain", + ], +) + +platform( + name = "p-clouds", + constraint_values = [ + ":clouds", + ], +) + +stub( + name = "t-clouds", +) + +stub( + name = "t-rain", +) + +stub( + name = "the-test", + deps = [select({ + ":c-clouds": ":t-clouds", + "DEFAULT": ":t-rain", + })], + default_target_platform = ":p-rain", +) diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration.py b/tests/core/configurations/configurations/test_toolchain_overconfiguration.py new file mode 100644 index 0000000000000..01bee7bffc5d9 --- /dev/null +++ b/tests/core/configurations/configurations/test_toolchain_overconfiguration.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_configuration_rule_unbound(buck: Buck) -> None: + await buck.cquery( + "--target-platforms=root//config:cat_platform", + "root//:top", + ) diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/.buckconfig b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/.buckconfig new file mode 100644 index 0000000000000..b1d7df6f9ae59 --- /dev/null +++ b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = . + +[build] +execution_platforms = root//config:platforms diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/.buckroot b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/TARGETS.fixture b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/TARGETS.fixture new file mode 100644 index 0000000000000..63445d44b79d4 --- /dev/null +++ b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/TARGETS.fixture @@ -0,0 +1,33 @@ +load(":rules.bzl", "needs_toolchain", "optional_dep", "toolchain") + +# This target is built in the `cat` platform. It gets a dependency chain like +# +# top (cat) +# toolchain (cat) (dog) +# base (dog) +# +# In the past, a limitation of the toolchain deps implementation would configure the toolchain deps +# of `:top` in all exec platforms, even if the first one (`:dog` in this case) suffices. This +# resulted in an incorrectly reported cycle like this: +# +# top (cat) +# toolchain (cat) (cat) +# base (cat) +# top (cat) +needs_toolchain( + name = "top", + toolchain = ":toolchain", +) + +toolchain( + name = "toolchain", + exec_dep = ":base", +) + +optional_dep( + name = "base", + dep = select({ + "//config:cat": ":top", + "//config:dog": None, + }), +) diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/config/TARGETS.fixture b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/config/TARGETS.fixture new file mode 100644 index 0000000000000..4085e711cadac --- /dev/null +++ b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/config/TARGETS.fixture @@ -0,0 +1,20 @@ +load(":defs.bzl", "constraint_setting", "constraint_value", "platform", "platforms") + +constraint_setting(name = "animal") +constraint_value(name = "cat", setting = ":animal") +constraint_value(name = "dog", setting = ":animal") + +platform( + name = "dog_platform", + configuration = [":dog"], +) + +platform( + name = "cat_platform", + configuration = [":cat"], +) + +platforms( + name = "platforms", + platforms = [":dog_platform", ":cat_platform"], +) diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/config/defs.bzl b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/config/defs.bzl new file mode 100644 index 0000000000000..3b16bfc1e9a68 --- /dev/null +++ b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/config/defs.bzl @@ -0,0 +1,70 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _constraint_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +constraint_setting = rule( + impl = _constraint_setting, + attrs = {}, +) + +def _constraint_value(ctx): + constraint_value = ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + return [ + DefaultInfo(), + constraint_value, + # Provide `ConfigurationInfo` from `constraint_value` so it could be used as select key. + ConfigurationInfo(constraints = { + constraint_value.setting.label: constraint_value, + }, values = {}), + ] + +constraint_value = rule( + impl = _constraint_value, + attrs = {"setting": attrs.dep(providers = [ConstraintSettingInfo])}, +) + +def _platform(ctx): + constraints = {} + values = {} + for x in ctx.attrs.configuration: + constraints |= x[ConfigurationInfo].constraints + values |= x[ConfigurationInfo].values + cfg = ConfigurationInfo(constraints = constraints, values = values) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = cfg, + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = False, + ), + ) + + return [DefaultInfo(), platform, PlatformInfo(label = str(ctx.label.raw_target()), configuration = cfg)] + +platform = rule( + impl = _platform, + attrs = {"configuration": attrs.list(attrs.dep(providers = [ConfigurationInfo]))}, +) + +def _platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [x[ExecutionPlatformInfo] for x in ctx.attrs.platforms], + ), + ] + +platforms = rule( + impl = _platforms, + attrs = {"platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo]))}, +) diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/prelude.bzl b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/rules.bzl b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/rules.bzl new file mode 100644 index 0000000000000..12e5cdab8a04b --- /dev/null +++ b/tests/core/configurations/configurations/test_toolchain_overconfiguration_data/rules.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + fail("cquery only test") + +needs_toolchain = rule( + impl = _impl, + attrs = { + "toolchain": attrs.toolchain_dep(), + }, +) + +toolchain = rule( + impl = _impl, + attrs = { + "exec_dep": attrs.exec_dep(), + }, + is_toolchain_rule = True, +) + +optional_dep = rule( + impl = _impl, + attrs = { + "dep": attrs.option(attrs.dep()), + }, +) diff --git a/tests/core/configurations/configurations/transition/BUCK b/tests/core/configurations/configurations/transition/BUCK new file mode 100644 index 0000000000000..cd4b64298db8c --- /dev/null +++ b/tests/core/configurations/configurations/transition/BUCK @@ -0,0 +1,45 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_access_attr", + srcs = ["test_access_attr.py"], + data_dir = "test_access_attr_data", +) + +buck2_e2e_test( + name = "test_attr", + srcs = ["test_attr.py"], + data_dir = "test_attr_data", +) + +buck2_e2e_test( + name = "test_attr_split", + srcs = ["test_attr_split.py"], + data_dir = "test_attr_split_data", +) + +buck2_e2e_test( + name = "test_constructor_validation", + srcs = ["test_constructor_validation.py"], + data_dir = "test_constructor_validation_data", +) + +buck2_e2e_test( + name = "test_rule", + srcs = ["test_rule.py"], + data_dir = "test_rule_data", +) + +buck2_e2e_test( + name = "test_rule_infinite_bug", + srcs = ["test_rule_infinite_bug.py"], + data_dir = "test_rule_infinite_bug_data", +) + +buck2_e2e_test( + name = "test_select_in_transition_attr", + srcs = ["test_select_in_transition_attr.py"], + data_dir = "test_select_in_transition_attr_data", +) diff --git a/tests/core/configurations/configurations/transition/test_access_attr.py b/tests/core/configurations/configurations/transition/test_access_attr.py new file mode 100644 index 0000000000000..df4f09e771d73 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_access_attr.py @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_configuration_transition_access_attr(buck: Buck) -> None: + # Trigger assertions in transition function implementation. + await buck.cquery("root//:faithful") diff --git a/tests/core/configurations/configurations/transition/test_access_attr_data/.buckconfig b/tests/core/configurations/configurations/transition/test_access_attr_data/.buckconfig new file mode 100644 index 0000000000000..3540a7f78619a --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_access_attr_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/transition/test_access_attr_data/.buckroot b/tests/core/configurations/configurations/transition/test_access_attr_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/transition/test_access_attr_data/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_access_attr_data/TARGETS.fixture new file mode 100644 index 0000000000000..e85a6f6c2cc73 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_access_attr_data/TARGETS.fixture @@ -0,0 +1,11 @@ +load(":rules.bzl", "my_java_library") + +platform( + name = "default_p", +) + +my_java_library( + name = "faithful", + java_version = 14, + default_target_platform = ":default_p", +) diff --git a/tests/core/configurations/configurations/transition/test_access_attr_data/rules.bzl b/tests/core/configurations/configurations/transition/test_access_attr_data/rules.bzl new file mode 100644 index 0000000000000..4eb58c6c52cbb --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_access_attr_data/rules.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":tr.bzl", "tr") + +def _my_java_library(ctx): + _ = ctx # buildifier: disable=unused-variable + fail("we don't build it in test") + +my_java_library = rule( + impl = _my_java_library, + attrs = { + "java_version": attrs.int(), + }, + cfg = tr, +) diff --git a/tests/core/configurations/configurations/transition/test_access_attr_data/tr.bzl b/tests/core/configurations/configurations/transition/test_access_attr_data/tr.bzl new file mode 100644 index 0000000000000..ee368de3a3dce --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_access_attr_data/tr.bzl @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _tr(platform, refs, attrs): + if attrs.java_version != 14: + fail("java_version must be 14 in this test") + _ = refs # buildifier: disable=unused-variable + return platform + +tr = transition( + impl = _tr, + refs = {}, + attrs = [ + "java_version", + ], +) diff --git a/tests/core/configurations/configurations/transition/test_attr.py b/tests/core/configurations/configurations/transition/test_attr.py new file mode 100644 index 0000000000000..347d379eb5502 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_configuration_transition_attr(buck: Buck) -> None: + result = await buck.cquery("deps(root//:the-test)") + result.check_returncode() + # Default configuration is iphoneos and it should be transitioned to watchos + assert ":watchos_resource" in result.stdout + assert ":default_resource" not in result.stdout diff --git a/tests/core/configurations/configurations/transition/test_attr_data/.buckconfig b/tests/core/configurations/configurations/transition/test_attr_data/.buckconfig new file mode 100644 index 0000000000000..3540a7f78619a --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/transition/test_attr_data/.buckroot b/tests/core/configurations/configurations/transition/test_attr_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/transition/test_attr_data/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_attr_data/TARGETS.fixture new file mode 100644 index 0000000000000..5ee0a1cc83797 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_data/TARGETS.fixture @@ -0,0 +1,43 @@ +load(":rules.bzl", "my_alias", "my_little_iphone_binary", "my_resource") + +my_resource(name = "watchos_resource") + +my_resource(name = "default_resource") + +constraint_setting( + name = "os", +) + +constraint_value( + name = "watchos", + constraint_setting = ":os", +) + +constraint_value( + name = "iphoneos", + constraint_setting = ":os", +) + +platform( + name = "iphoneos-p", + constraint_values = [ + ":iphoneos", + ], +) + +my_alias( + name = "resource", + to = select({ + ":watchos": ":watchos_resource", + "DEFAULT": ":default_resource", + }), +) + +my_little_iphone_binary( + name = "the-test", + default_target_platform = ":iphoneos-p", + # `resource` should be transitioned to watchos, + # but selects are not resolved in attributes, + # so we have a separate alias rule for the test. + watch_resource = ":resource", +) diff --git a/tests/core/configurations/configurations/transition/test_attr_data/rules.bzl b/tests/core/configurations/configurations/transition/test_attr_data/rules.bzl new file mode 100644 index 0000000000000..e3f6fb2f3332b --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_data/rules.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":tr.bzl", "iphone_to_watch_transition") + +def _nop_op(*_args, **_kwargs): + fail("this is cquery only test, no rules are executed") + +my_little_iphone_binary = rule(impl = _nop_op, attrs = { + "watch_resource": attrs.transition_dep(cfg = iphone_to_watch_transition), +}) + +my_resource = rule(impl = _nop_op, attrs = {}) + +my_alias = rule(impl = _nop_op, attrs = { + "to": attrs.dep(), +}) diff --git a/tests/core/configurations/configurations/transition/test_attr_data/tr.bzl b/tests/core/configurations/configurations/transition/test_attr_data/tr.bzl new file mode 100644 index 0000000000000..781c51092cddd --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_data/tr.bzl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(platform, refs): + watchos = refs.watchos[ConstraintValueInfo] + constraints = { + s: v + for (s, v) in platform.configuration.constraints.items() + if s != refs.os[ConstraintSettingInfo].label + } + constraints[watchos.setting.label] = watchos + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + return PlatformInfo( + label = "", + configuration = new_cfg, + ) + +iphone_to_watch_transition = transition(impl = _impl, refs = { + "os": "root//:os", + "watchos": "root//:watchos", +}) diff --git a/tests/core/configurations/configurations/transition/test_attr_split.py b/tests/core/configurations/configurations/transition/test_attr_split.py new file mode 100644 index 0000000000000..48070729aff53 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_split.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_configuration_transition_attr_split_cquery(buck: Buck) -> None: + result = await buck.cquery("deps(root//:bb)") + result.check_returncode() + # Check both transitioned deps are present. + assert "root//:code (arm64#" in result.stdout + assert "root//:code (arm32#" in result.stdout + + +@buck_test() +async def test_configuration_transition_attr_split_build(buck: Buck) -> None: + result = await buck.build("root//:bb") + result.check_returncode() + # Rule implementations do the assertions. diff --git a/tests/core/configurations/configurations/transition/test_attr_split_data/.buckconfig b/tests/core/configurations/configurations/transition/test_attr_split_data/.buckconfig new file mode 100644 index 0000000000000..3540a7f78619a --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_split_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/transition/test_attr_split_data/.buckroot b/tests/core/configurations/configurations/transition/test_attr_split_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/transition/test_attr_split_data/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_attr_split_data/TARGETS.fixture new file mode 100644 index 0000000000000..13dec5924a57f --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_split_data/TARGETS.fixture @@ -0,0 +1,31 @@ +load(":rules.bzl", "my_android_binary", "my_cxx_library") + +constraint_setting( + name = "cpu", +) + +constraint_value( + name = "arm64", + constraint_setting = ":cpu", +) + +platform( + name = "default_p", + constraint_values = [ + ], +) + +constraint_value( + name = "arm32", + constraint_setting = ":cpu", +) + +my_cxx_library( + name = "code", +) + +my_android_binary( + name = "bb", + native_deps = [":code"], + default_target_platform = ":default_p", +) diff --git a/tests/core/configurations/configurations/transition/test_attr_split_data/rules.bzl b/tests/core/configurations/configurations/transition/test_attr_split_data/rules.bzl new file mode 100644 index 0000000000000..5da03b0f533f2 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_split_data/rules.bzl @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":tr.bzl", "cpu_split_transition") + +MyArtifact = provider(fields = ["test_data"]) + +def _assert(cond): + if not cond: + fail("assertion failed") + +def _binary_impl(ctx): + # Check deps come as dict from split transition label to providers. + [lib_deps] = ctx.attrs.native_deps + _assert(type(lib_deps) == type({})) + _assert("a64" == lib_deps["arm64"][MyArtifact].test_data) + _assert("a32" == lib_deps["arm32"][MyArtifact].test_data) + return [ + DefaultInfo(), + ] + +def _library_impl(ctx): + # This is not how configurations are meant to be used, we are testing internals here. + if "//:code (arm32#" in str(ctx.label): + test_data = "a32" + elif "//:code (arm64#" in str(ctx.label): + test_data = "a64" + else: + fail("unknown configuration") + return [ + DefaultInfo(), + MyArtifact(test_data = test_data), + ] + +my_android_binary = rule(impl = _binary_impl, attrs = { + "native_deps": attrs.list(attrs.split_transition_dep(cfg = cpu_split_transition)), +}) + +my_cxx_library = rule(impl = _library_impl, attrs = {}) diff --git a/tests/core/configurations/configurations/transition/test_attr_split_data/tr.bzl b/tests/core/configurations/configurations/transition/test_attr_split_data/tr.bzl new file mode 100644 index 0000000000000..0acbeda3e4eb8 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_attr_split_data/tr.bzl @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(platform, refs): + _ = platform # buildifier: disable=unused-variable + cpu = refs.cpu + arm64 = refs.arm64 + arm32 = refs.arm32 + return { + "arm32": PlatformInfo(label = "arm32", configuration = ConfigurationInfo(constraints = { + cpu[ConstraintSettingInfo].label: arm32[ConstraintValueInfo], + }, values = {})), + "arm64": PlatformInfo(label = "arm64", configuration = ConfigurationInfo(constraints = { + cpu[ConstraintSettingInfo].label: arm64[ConstraintValueInfo], + }, values = {})), + } + +cpu_split_transition = transition(impl = _impl, refs = { + "arm32": "root//:arm32", + "arm64": "root//:arm64", + "cpu": "root//:cpu", +}, split = True) diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation.py b/tests/core/configurations/configurations/transition/test_constructor_validation.py new file mode 100644 index 0000000000000..fa303fb34c5f4 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_construction_validation_good(buck: Buck) -> None: + await buck.targets("//good:") + + +@buck_test() +async def test_construction_validation_bad(buck: Buck) -> None: + await expect_failure( + buck.targets("//bad:"), + stderr_regex=r"`impl` function signature is incorrect", + ) + + +@buck_test() +async def test_construction_validation_bad_param_types(buck: Buck) -> None: + await expect_failure( + buck.targets("//bad_param_types:"), + stderr_regex=r"`impl` function signature is incorrect", + ) + + +@buck_test() +async def test_construction_validation_bad_return_type(buck: Buck) -> None: + await expect_failure( + buck.targets("//bad_return_type:"), + stderr_regex=r"`impl` function signature is incorrect", + ) diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/.buckconfig b/tests/core/configurations/configurations/transition/test_constructor_validation_data/.buckconfig new file mode 100644 index 0000000000000..bfad0ca2dc2ed --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/.buckroot b/tests/core/configurations/configurations/transition/test_constructor_validation_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad/TARGETS.fixture new file mode 100644 index 0000000000000..43c519b37ac57 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad/TARGETS.fixture @@ -0,0 +1,5 @@ +# @nolint + +load(":bad.bzl", "bad") + +_keep_the_import = bad diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad/bad.bzl b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad/bad.bzl new file mode 100644 index 0000000000000..df805236169bd --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad/bad.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def _impl(foo, bar): + _ignore = (foo, bar) + pass + +bad = transition( + impl = _impl, + refs = {}, +) diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_param_types/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_param_types/TARGETS.fixture new file mode 100644 index 0000000000000..6d6f4a255f8b7 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_param_types/TARGETS.fixture @@ -0,0 +1,5 @@ +# @nolint + +load(":bad_param_types.bzl", "bad_param_types") + +_keep_the_import = bad_param_types diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_param_types/bad_param_types.bzl b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_param_types/bad_param_types.bzl new file mode 100644 index 0000000000000..ef4edcbab186c --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_param_types/bad_param_types.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +# Type of `platform` must be `PlatformInfo`. +def _impl(platform: str, refs): + _ignore = (platform, refs) + pass + +bad_param_types = transition( + impl = _impl, + refs = {}, +) diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_return_type/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_return_type/TARGETS.fixture new file mode 100644 index 0000000000000..c63c6c456acdd --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_return_type/TARGETS.fixture @@ -0,0 +1,5 @@ +# @nolint + +load(":bad_return_type.bzl", "bad_return_type") + +_keep_the_import = bad_return_type diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_return_type/bad_return_type.bzl b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_return_type/bad_return_type.bzl new file mode 100644 index 0000000000000..477f3409cc9bc --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/bad_return_type/bad_return_type.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +# This is split transition, return type should be `dict[str, PlatformInfo]` +def _impl(platform: PlatformInfo, refs) -> PlatformInfo: + _ignore = (platform, refs) + fail() + +bad_return_type = transition( + impl = _impl, + refs = {}, + split = True, +) diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/good/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_constructor_validation_data/good/TARGETS.fixture new file mode 100644 index 0000000000000..11c7fbc8804eb --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/good/TARGETS.fixture @@ -0,0 +1,5 @@ +# @nolint + +load(":good.bzl", "good") + +_keep_the_import = good diff --git a/tests/core/configurations/configurations/transition/test_constructor_validation_data/good/good.bzl b/tests/core/configurations/configurations/transition/test_constructor_validation_data/good/good.bzl new file mode 100644 index 0000000000000..cc5ad2dbf2896 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_constructor_validation_data/good/good.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def _impl(platform, refs): + _ignore = (platform, refs) + pass + +good = transition( + impl = _impl, + refs = {}, +) diff --git a/tests/core/configurations/configurations/transition/test_rule.py b/tests/core/configurations/configurations/transition/test_rule.py new file mode 100644 index 0000000000000..17108804c966b --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule.py @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_configuration_transition_rule_cquery(buck: Buck) -> None: + # For the reference, cquery output is: P467297091. Note the "forward" node. + result = await buck.cquery("deps(root//:the-test)") + result.check_returncode() + # Watchos resource should be present twice: as forward and as transitioned. + assert result.stdout.count(":watchos-resource") == 2 + # No transition for default resource, so it appears once in cquery output. + assert result.stdout.count(":default-resource") == 1 + + +@buck_test() +async def test_configuration_transition_rule_cquery_actual_attr(buck: Buck) -> None: + result = await buck.cquery( + "--target-platforms=root//:iphoneos-p", + "root//:watchos-resource", + "--output-attribute=actual", + ) + result.check_returncode() + q = json.loads(result.stdout) + + # Each key in the JSON output is a different configuration of the same rule `watchos-resource` + configuration_default = "root//:watchos-resource (#)" + configuration_transition = "root//:watchos-resource (root//:iphoneos-p#)" + configurations = [_replace_hash(c) for c in q.keys()] + assert configuration_default in configurations + assert configuration_transition in configurations + + config_default_has_attribute_actual = False + config_transition_has_no_attributes = False + for config in q.keys(): + if q[config].get("actual"): + config_default_has_attribute_actual = True + if not q[config].values(): + config_transition_has_no_attributes = True + + assert config_default_has_attribute_actual + assert config_transition_has_no_attributes + + +@buck_test() +async def test_configuration_transition_rule_build(buck: Buck) -> None: + # Rule implementations do the assertions. + result = await buck.build("root//:the-test") + result.check_returncode() diff --git a/tests/core/configurations/configurations/transition/test_rule_data/.buckconfig b/tests/core/configurations/configurations/transition/test_rule_data/.buckconfig new file mode 100644 index 0000000000000..3540a7f78619a --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/transition/test_rule_data/.buckroot b/tests/core/configurations/configurations/transition/test_rule_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/transition/test_rule_data/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_rule_data/TARGETS.fixture new file mode 100644 index 0000000000000..583cd3159efe6 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_data/TARGETS.fixture @@ -0,0 +1,49 @@ +load(":rules.bzl", "my_default_resource", "my_little_iphone_binary", "my_watchos_resource") + +constraint_setting( + name = "os", +) + +constraint_value( + name = "watchos", + constraint_setting = ":os", +) + +constraint_value( + name = "iphoneos", + constraint_setting = ":os", +) + +platform( + name = "iphoneos-p", + constraint_values = [ + ":iphoneos", + ], +) + +my_watchos_resource( + name = "watchos-resource", + # This target should be transitioned to watchos because this is what rule does. + resource_value = select({ + ":watchos": "watchos", + "DEFAULT": "DEFAULT", + }), +) + +my_default_resource( + name = "default-resource", + # This target should be transitioned to watchos because this is what rule does. + resource_value = select({ + ":watchos": "watchos", + "DEFAULT": "DEFAULT", + }), +) + +my_little_iphone_binary( + name = "the-test", + default_target_platform = ":iphoneos-p", + resources = [ + ":default-resource", + ":watchos-resource", + ], +) diff --git a/tests/core/configurations/configurations/transition/test_rule_data/rules.bzl b/tests/core/configurations/configurations/transition/test_rule_data/rules.bzl new file mode 100644 index 0000000000000..a684fe93f93bb --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_data/rules.bzl @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":tr.bzl", "iphone_to_watch_transition") + +def _my_little_iphone_binary_impl(ctx): + [_watchos_resource, _default_resource] = ctx.attrs.resources + return [ + DefaultInfo(), + ] + +my_little_iphone_binary = rule( + impl = _my_little_iphone_binary_impl, + attrs = { + "resources": attrs.list(attrs.dep()), + }, +) + +def _my_watchos_resource_impl(ctx): + # Assert that configuration is indeed transitioned, and select is resolved + # to the correct configuration. + if ctx.attrs.resource_value != "watchos": + fail("Expected resource value to be watchos, got: {}".format(ctx.attrs.resource_value)) + return [ + DefaultInfo(), + ] + +my_watchos_resource = rule( + impl = _my_watchos_resource_impl, + # Resource with transition to watchOS. + cfg = iphone_to_watch_transition, + attrs = { + "resource_value": attrs.string(), + }, +) + +def _my_default_resource_impl(ctx): + if ctx.attrs.resource_value != "DEFAULT": + fail("Expected resource value to be DEFAULT, got: {}".format(ctx.attrs.resource_value)) + return [ + DefaultInfo(), + ] + +my_default_resource = rule( + impl = _my_default_resource_impl, + # No transition here. + attrs = { + "resource_value": attrs.string(), + }, +) diff --git a/tests/core/configurations/configurations/transition/test_rule_data/tr.bzl b/tests/core/configurations/configurations/transition/test_rule_data/tr.bzl new file mode 100644 index 0000000000000..5c4bbe88eb97d --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_data/tr.bzl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(platform: PlatformInfo, refs: struct) -> PlatformInfo: + watchos = refs.watchos[ConstraintValueInfo] + constraints = { + s: v + for (s, v) in platform.configuration.constraints.items() + if s != refs.os[ConstraintSettingInfo].label + } + constraints[watchos.setting.label] = watchos + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + return PlatformInfo( + label = "", + configuration = new_cfg, + ) + +iphone_to_watch_transition = transition(impl = _impl, refs = { + "os": "root//:os", + "watchos": "root//:watchos", +}) diff --git a/tests/core/configurations/configurations/transition/test_rule_infinite_bug.py b/tests/core/configurations/configurations/transition/test_rule_infinite_bug.py new file mode 100644 index 0000000000000..d605c2676e0b4 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_infinite_bug.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_configuration_transition_rule_infinite_bug(buck: Buck) -> None: + result = await expect_failure( + buck.cquery( + "deps(root//:xx)", + ) + ) + assert "did not produce identical" in result.stderr diff --git a/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/.buckconfig b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/.buckconfig new file mode 100644 index 0000000000000..3540a7f78619a --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/.buckroot b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/TARGETS.fixture new file mode 100644 index 0000000000000..964d1ad418721 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/TARGETS.fixture @@ -0,0 +1,8 @@ +load(":rules.bzl", "my_rule") + +platform(name = "p") + +my_rule( + name = "xx", + default_target_platform = ":p", +) diff --git a/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/rules.bzl b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/rules.bzl new file mode 100644 index 0000000000000..1778fc75243bc --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/rules.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":tr.bzl", "transition_increase_label_len") + +def _impl(ctx): + _ = ctx # buildifier: disable=unused-variable + fail("Don't care") + +my_rule = rule(impl = _impl, attrs = {}, cfg = transition_increase_label_len) diff --git a/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/tr.bzl b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/tr.bzl new file mode 100644 index 0000000000000..777f5e6991956 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_rule_infinite_bug_data/tr.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(platform, refs): + _ = refs # buildifier: disable=unused-variable + return PlatformInfo( + # Increase the length of the label, so it overflows (incorrectly). + label = platform.label + "!hello!", + configuration = platform.configuration, + ) + +transition_increase_label_len = transition(impl = _impl, refs = {}) diff --git a/tests/core/configurations/configurations/transition/test_select_in_transition_attr.py b/tests/core/configurations/configurations/transition/test_select_in_transition_attr.py new file mode 100644 index 0000000000000..7dc2e1b7cc3b1 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_select_in_transition_attr.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_transition_success_if_attr_value_has_not_changed(buck: Buck) -> None: + await buck.build("root//:target_where_transition_does_not_change_attr") + + +@buck_test() +async def test_transition_dep_success_if_attr_value_has_not_changed(buck: Buck) -> None: + await buck.build("root//:target_with_transition_dep") + + +@buck_test() +async def test_transition_failed_if_attr_value_has_changed(buck: Buck) -> None: + err_msg = ( + r"Target root//:target_where_transition_changes_attr configuration transitioned\n" + r"\s+old: root//:iphone#.*\n" + r"\s+new: #.*\n" + r"\s+but attribute: extra\n" + r"\s+resolved with old configuration to: \"phone\"\n" + r"\s+resolved with new configuration to: \"watch\"" + ) + + await expect_failure( + buck.build("root//:target_where_transition_changes_attr"), + stderr_regex=err_msg, + ) + + +@buck_test() +async def test_transition_failed_if_attr_value_cycle(buck: Buck) -> None: + err_msg = ( + r"Configured target cycle detected \(`->` means \"depends on\"\):\n" + r"\s+root//:target_where_transition_cycles_via_changed_attrs \(#.*\) ->.*\n" + r"\s+root//:target_where_transition_cycles_via_changed_attrs \(#.*\) ->.*\n" + ) + + await expect_failure( + buck.build("root//:target_where_transition_cycles_via_changed_attrs"), + stderr_regex=err_msg, + ) diff --git a/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/.buckconfig b/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/.buckconfig new file mode 100644 index 0000000000000..3540a7f78619a --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/.buckroot b/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/TARGETS.fixture b/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/TARGETS.fixture new file mode 100644 index 0000000000000..99d505fc55c07 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/TARGETS.fixture @@ -0,0 +1,64 @@ +load(":rules.bzl", "any_rule", "test_rule", "test_rule_with_transition_dep") + +constraint_setting( + name = "os", +) + +constraint_value( + name = "watchos", + constraint_setting = ":os", +) + +constraint_value( + name = "iphoneos", + constraint_setting = ":os", +) + +test_rule( + name = "target_where_transition_does_not_change_attr", + default_target_platform = ":iwatch", + device = select({ + ":iphoneos": "phone", + ":watchos": "watch", + }), +) + +test_rule( + name = "target_where_transition_changes_attr", + default_target_platform = ":iphone", + device = "watch", + extra = select({ + ":iphoneos": "phone", + ":watchos": "watch", + }), +) + +test_rule( + name = "target_where_transition_cycles_via_changed_attrs", + default_target_platform = ":iphone", + device = select({ + ":iphoneos": "watch", + ":watchos": "phone", + }), +) + +test_rule_with_transition_dep( + name = "target_with_transition_dep", + default_target_platform = ":iphone", + attr_with_transition = ":foo", + device = select({ + ":iphoneos": "phone", + ":watchos": "watch", + }), +) + +any_rule( + name = "foo", + another_device = select({ + ":iphoneos": "phone", + ":watchos": "watch", + }), +) + +platform(name = "iphone", constraint_values = [":iphoneos"]) +platform(name = "iwatch", constraint_values = [":watchos"]) diff --git a/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/rules.bzl b/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/rules.bzl new file mode 100644 index 0000000000000..cd6744cb677c5 --- /dev/null +++ b/tests/core/configurations/configurations/transition/test_select_in_transition_attr_data/rules.bzl @@ -0,0 +1,52 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_tr(platform, refs, attrs): + constraints = { + s: v + for (s, v) in platform.configuration.constraints.items() + if s != refs.os[ConstraintSettingInfo].label + } + + watchos = refs.watchos[ConstraintValueInfo] + iphoneos = refs.iphoneos[ConstraintValueInfo] + if attrs.device == "watch": + constraints[watchos.setting.label] = watchos + elif attrs.device == "phone": + constraints[iphoneos.setting.label] = iphoneos + else: + fail() + new_cfg = ConfigurationInfo( + constraints = constraints, + values = platform.configuration.values, + ) + return PlatformInfo( + label = "", + configuration = new_cfg, + ) + +iphone_or_watch_transition = transition( + impl = _impl_tr, + refs = { + "iphoneos": "root//:iphoneos", + "os": "root//:os", + "watchos": "root//:watchos", + }, + attrs = [ + "device", + "extra", + ], +) + +def _impl(_ctx): + return [DefaultInfo()] + +test_rule = rule(impl = _impl, attrs = {"device": attrs.string(), "extra": attrs.string(default = "")}, cfg = iphone_or_watch_transition) + +test_rule_with_transition_dep = rule(impl = _impl, attrs = {"attr_with_transition": attrs.transition_dep(cfg = iphone_or_watch_transition), "device": attrs.string(), "extra": attrs.string(default = "")}) + +any_rule = rule(impl = _impl, attrs = {"another_device": attrs.string()}) diff --git a/tests/core/console/BUCK b/tests/core/console/BUCK new file mode 100644 index 0000000000000..d8b36cb0a859c --- /dev/null +++ b/tests/core/console/BUCK @@ -0,0 +1,29 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_emit_console_preferences", + srcs = ["test_emit_console_preferences.py"], + data_dir = "test_emit_console_preferences_data", + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_console", + srcs = ["test_console.py"], + data_dir = "test_console_data", + env = { + "FIXTURES": "$(location :fixtures)", + }, +) + +buck_filegroup( + name = "fixtures", + srcs = glob( + ["fixtures/**"], + ), +) diff --git a/tests/core/console/fixtures/README b/tests/core/console/fixtures/README new file mode 100644 index 0000000000000..da14adca0acda --- /dev/null +++ b/tests/core/console/fixtures/README @@ -0,0 +1,31 @@ +Those are fixtures used for CLI tests. The idea is to not regenerate them very +often, we should only need to do this when our daemon / cli protocol changes. + +Those fixtures are event logs, so they should be possible to re-generate by +just re-running the corresponding build. + +Regenerate `my_genrule0` using, from the `buck2` path: + +``` +rm "$(hg root)/fbcode/buck2/tests/core/console/fixtures/my_genrule0.proto" && \ +./buck2.sh build fbcode//buck2/tests/targets/rules/genrule:my_genrule0 --prefer-remote && \ +./buck2.sh kill && \ +./buck2.sh build fbcode//buck2/tests/targets/rules/genrule:my_genrule0 --prefer-remote --event-log "$(hg root)/fbcode/buck2/tests/core/console/fixtures/my_genrule0.proto" +``` + +(this runs Buck twice to ensure we get a cache hit) + +You also need to update the action digests in `test_whatran` and `test_whatran_no_repo` in test_console.py. +To find the new digest, run: + +``` +./buck2.sh log whatran "$(hg root)/fbcode/buck2/tests/core/console/fixtures/my_genrule0.proto" +``` + +and you will see a line like: + +``` +build fbcode//buck2/tests/targets/rules/genrule:my_genrule0 (ovr_config//platform/linux:x86_64-fbcode-platform010-clang#da4de3c780a17bfa) (genrule) cache 87eeee88c133dfa39711399a81f500147275cfeeb1f06b6b4805f2c0588615d1:145 +``` + +The last part after "cache" is the action digest. diff --git a/tests/core/console/fixtures/my_genrule0.proto b/tests/core/console/fixtures/my_genrule0.proto new file mode 100644 index 0000000000000..78dec2c9d16bb Binary files /dev/null and b/tests/core/console/fixtures/my_genrule0.proto differ diff --git a/tests/core/console/test_console.py b/tests/core/console/test_console.py new file mode 100644 index 0000000000000..75a07af20ac42 --- /dev/null +++ b/tests/core/console/test_console.py @@ -0,0 +1,166 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import collections +import json +import os +import re +from pathlib import Path +from typing import Any, Dict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def fixture(name: str) -> str: + p = Path(os.environ["FIXTURES"]) / "fixtures" / f"{name}.proto" + return str(p.absolute()) + + +@buck_test() +async def test_console_facts(buck: Buck) -> None: + res = await buck.log( + "replay", fixture("my_genrule0"), "--", "build", "--console", "simple" + ) + assert re.search("Network: .*([0-9.]+)([KMG]?)B", res.stderr) is not None + assert "Cache hits: 100%" in res.stderr + assert "Commands: 1 (cached: 1, remote: 0, local: 0)" in res.stderr + + +@buck_test() +async def test_console_facts_no_repo(buck: Buck) -> None: + res = await buck.log( + "replay", + fixture("my_genrule0"), + "--", + "build", + "--console", + "simple", + rel_cwd=Path(os.path.relpath("/", buck.cwd)), + ) + assert re.search("Network: .*([0-9.]+)([KMG]?)B", res.stderr) is not None + assert "Cache hits: 100%" in res.stderr + assert "Commands: 1 (cached: 1, remote: 0, local: 0)" in res.stderr + + +@buck_test() +async def test_super_console_facts(buck: Buck) -> None: + res = await buck.log( + "replay", fixture("my_genrule0"), "--", "build", "--console", "super" + ) + assert re.search("Network: .*([0-9.]+)([KMG]?)B", res.stderr) is not None + assert "Cache hits: 100%" in res.stderr + assert "Commands: 1" in res.stderr + + +@buck_test() +async def test_whatran(buck: Buck) -> None: + res = await buck.log( + "what-ran", + fixture("my_genrule0"), + ) + assert "cache" in res.stdout + assert ( + "87eeee88c133dfa39711399a81f500147275cfeeb1f06b6b4805f2c0588615d1:145" + in res.stdout + ) + + +@buck_test() +async def test_whatran_no_repo(buck: Buck) -> None: + res = await buck.log( + "what-ran", + fixture("my_genrule0"), + rel_cwd=Path(os.path.relpath("/", buck.cwd)), + ) + assert "cache" in res.stdout + assert ( + "87eeee88c133dfa39711399a81f500147275cfeeb1f06b6b4805f2c0588615d1:145" + in res.stdout + ) + + +@buck_test() +async def test_file_watcher_span_depth(buck: Buck) -> None: + """ + We show spans up to depth 2 in the console. We should make sure that spans + whose runtime depends on external tools (i.e. the file watcher) are + displayed. + """ + await buck.build() + log = await buck.log("show") + + depths = collections.defaultdict(int) + file_watcher_span = None + + for line in log.stdout.splitlines(): + line = json.loads(line) + event = line.get("Event") + if event is None: + continue + + span = _get(event, "data", "SpanStart", "data") + if span is None: + continue + + # This event is relevant to us, but it's also not shown, so it means + # its children are roots. + if "DiceCriticalSection" in span: + depth = -1 + else: + depth = depths[event["parent_id"]] + 1 + + depths[event["span_id"]] = depth + + if _get(event, "data", "SpanStart", "data", "FileWatcher"): + file_watcher_span = event + + assert file_watcher_span is not None, "Did not find FileWatcher span" + assert depths[file_watcher_span["span_id"]] <= 2 + + +@buck_test() +async def test_stale_snapshot(buck: Buck, tmp_path: Path) -> None: + original = fixture("my_genrule0") + log = (await buck.log("show", original)).stdout + + # Now we're going to make a new log where we just delay the last event by + # some amount of time. + lines = log.splitlines() + + # Last event (last line is command result). + last = lines[-2] + last = json.loads(last) + last["Event"]["timestamp"][0] += 20 + lines[-2] = json.dumps(last) + + logfile = tmp_path / "test.json-lines" + + with open(logfile, "w") as f: + f.write("\n".join(lines)) + + stale_message = "Resource usage: " + + # Check it's there. + res = await buck.log("replay", str(logfile), "--console", "simple") + assert stale_message in res.stderr + + # Check it's not in the original one. + res = await buck.log("replay", original, "--console", "simple") + assert stale_message not in res.stderr + + +def _get(data: Dict[str, Any], *key: str) -> Dict[str, Any]: + for k in key: + data = data.get(k) + if data is None: + break + + return data diff --git a/tests/core/console/test_console_data/.buckconfig b/tests/core/console/test_console_data/.buckconfig new file mode 100644 index 0000000000000..82ff4e5316342 --- /dev/null +++ b/tests/core/console/test_console_data/.buckconfig @@ -0,0 +1,2 @@ +[cells] + root = . diff --git a/tests/core/console/test_console_data/.buckroot b/tests/core/console/test_console_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/console/test_emit_console_preferences.py b/tests/core/console/test_emit_console_preferences.py new file mode 100644 index 0000000000000..a082c73744fa5 --- /dev/null +++ b/tests/core/console/test_emit_console_preferences.py @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +@buck_test() +async def test_emit_console_preferences(buck: Buck) -> None: + await buck.build("-c", "ui.thread_line_limit=30") + + max_lines = await filter_events( + buck, + "Event", + "data", + "Instant", + "data", + "ConsolePreferences", + "max_lines", + ) + + assert max_lines + assert max_lines[0] == 30 diff --git a/tests/core/console/test_emit_console_preferences_data/.buckconfig b/tests/core/console/test_emit_console_preferences_data/.buckconfig new file mode 100644 index 0000000000000..7078304680646 --- /dev/null +++ b/tests/core/console/test_emit_console_preferences_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + prelude = . diff --git a/tests/core/console/test_emit_console_preferences_data/.buckroot b/tests/core/console/test_emit_console_preferences_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/console/test_emit_console_preferences_data/prelude.bzl b/tests/core/console/test_emit_console_preferences_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/ctargets_command/BUCK b/tests/core/ctargets_command/BUCK new file mode 100644 index 0000000000000..ff762148c185f --- /dev/null +++ b/tests/core/ctargets_command/BUCK @@ -0,0 +1,29 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_ctargets_basic", + srcs = ["test_ctargets_basic.py"], + data_dir = "test_ctargets_basic_data", +) + +buck2_e2e_test( + name = "test_ctargets_transition", + srcs = ["test_ctargets_transition.py"], + data_dir = "test_ctargets_transition_data", +) + +buck2_e2e_test( + name = "test_ctargets_incompatible", + srcs = ["test_ctargets_incompatible.py"], + data_dir = "test_ctargets_incompatible_data", +) + +buck2_e2e_test( + name = "test_ctargets_skip_missing_targets", + srcs = ["test_ctargets_skip_missing_targets.py"], + data_dir = "test_ctargets_skip_missing_targets_data", +) + +# Test for `--stack` lives in `test_target_call_stacks.py`. diff --git a/tests/core/ctargets_command/test_ctargets_basic.py b/tests/core/ctargets_command/test_ctargets_basic.py new file mode 100644 index 0000000000000..9af2ea1c7a545 --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_basic.py @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_ctargets_basic(buck: Buck) -> None: + result = await buck.ctargets( + "root//:gum", + "--target-platforms=root//:p", + ) + [line] = result.stdout.splitlines() + line = _replace_hash(line) + assert line == "root//:gum (root//:p#)" diff --git a/tests/core/ctargets_command/test_ctargets_basic_data/.buckconfig b/tests/core/ctargets_command/test_ctargets_basic_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_basic_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/ctargets_command/test_ctargets_basic_data/.buckroot b/tests/core/ctargets_command/test_ctargets_basic_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/ctargets_command/test_ctargets_basic_data/TARGETS.fixture b/tests/core/ctargets_command/test_ctargets_basic_data/TARGETS.fixture new file mode 100644 index 0000000000000..c124e68ea3d05 --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_basic_data/TARGETS.fixture @@ -0,0 +1,9 @@ +platform( + name = "p", + constraint_values = [], +) + +# Just a simple target. +trivial_build( + name = "gum", +) diff --git a/tests/core/ctargets_command/test_ctargets_incompatible.py b/tests/core/ctargets_command/test_ctargets_incompatible.py new file mode 100644 index 0000000000000..a94fdc406e21d --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_incompatible.py @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_ctargets_incompatible(buck: Buck) -> None: + result = await buck.ctargets( + # This one will be omitted from the output because it is not compatible. + "root//:triangle", + # This one will be output. + "root//:square", + "--target-platforms=root//:rectangular", + ) + stdout = _replace_hash(result.stdout) + [line] = stdout.splitlines() + assert line == "root//:square (root//:rectangular#)" + + assert "Skipped 1 incompatible targets" in result.stderr + assert "root//:triangle (root//:rectangular#" in result.stderr diff --git a/tests/core/ctargets_command/test_ctargets_incompatible_data/.buckconfig b/tests/core/ctargets_command/test_ctargets_incompatible_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_incompatible_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/ctargets_command/test_ctargets_incompatible_data/.buckroot b/tests/core/ctargets_command/test_ctargets_incompatible_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/ctargets_command/test_ctargets_incompatible_data/TARGETS.fixture b/tests/core/ctargets_command/test_ctargets_incompatible_data/TARGETS.fixture new file mode 100644 index 0000000000000..cfdf192efde27 --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_incompatible_data/TARGETS.fixture @@ -0,0 +1,28 @@ +constraint_setting( + name = "number-of-corners", +) + +constraint_value( + name = "three", + constraint_setting = ":number-of-corners", +) + +constraint_value( + name = "four", + constraint_setting = ":number-of-corners", +) + +platform( + name = "rectangular", + constraint_values = [":four"], +) + +trivial_build( + name = "triangle", + compatible_with = [":three"], +) + +trivial_build( + name = "square", + compatible_with = [":four"], +) diff --git a/tests/core/ctargets_command/test_ctargets_skip_missing_targets.py b/tests/core/ctargets_command/test_ctargets_skip_missing_targets.py new file mode 100644 index 0000000000000..93f5ca9d4379d --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_skip_missing_targets.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_ctargets_skip_missing_targets(buck: Buck) -> None: + await expect_failure( + buck.ctargets( + "root//:existing", + "root//:nonexistent", + "--target-platforms=root//:p", + ), + stderr_regex="Unknown target `nonexistent` from package", + ) + + result = await buck.ctargets( + "root//:existing", + "root//:nonexistent", + "--target-platforms=root//:p", + "--skip-missing-targets", + ) + [line] = result.stdout.splitlines() + line = _replace_hash(line) + assert line == "root//:existing (root//:p#)" + + assert "Skipped 1 missing targets:" in result.stderr + assert "root//:nonexistent" in result.stderr diff --git a/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/.buckconfig b/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/.buckroot b/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/TARGETS.fixture b/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/TARGETS.fixture new file mode 100644 index 0000000000000..37dab875b2806 --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_skip_missing_targets_data/TARGETS.fixture @@ -0,0 +1,8 @@ +platform( + name = "p", + constraint_values = [], +) + +trivial_build( + name = "existing", +) diff --git a/tests/core/ctargets_command/test_ctargets_transition.py b/tests/core/ctargets_command/test_ctargets_transition.py new file mode 100644 index 0000000000000..2078f361201e2 --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_transition.py @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_ctargets_transition(buck: Buck) -> None: + # This target does self-transition, and `ctargets` outputs both + # forward node and forward target node. + + result = await buck.ctargets( + "root//:candy", + "--target-platforms=root//:p", + ) + [line1, line2] = result.stdout.splitlines() + line1 = _replace_hash(line1) + line2 = _replace_hash(line2) + assert [line1, line2] == [ + "root//:candy (root//:p#)", + "root//:candy (#)", + ] diff --git a/tests/core/ctargets_command/test_ctargets_transition_data/.buckconfig b/tests/core/ctargets_command/test_ctargets_transition_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_transition_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/ctargets_command/test_ctargets_transition_data/.buckroot b/tests/core/ctargets_command/test_ctargets_transition_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/ctargets_command/test_ctargets_transition_data/TARGETS.fixture b/tests/core/ctargets_command/test_ctargets_transition_data/TARGETS.fixture new file mode 100644 index 0000000000000..3e4547810b08d --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_transition_data/TARGETS.fixture @@ -0,0 +1,10 @@ +load(":defs.bzl", "clay_library") + +platform( + name = "p", + constraint_values = [], +) + +clay_library( + name = "candy", +) diff --git a/tests/core/ctargets_command/test_ctargets_transition_data/defs.bzl b/tests/core/ctargets_command/test_ctargets_transition_data/defs.bzl new file mode 100644 index 0000000000000..faeb037a42678 --- /dev/null +++ b/tests/core/ctargets_command/test_ctargets_transition_data/defs.bzl @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _clay_transition_impl(platform, refs): + _ignore = (platform, refs) # buildifier: disable=unused-variable + return PlatformInfo(label = "", configuration = ConfigurationInfo(constraints = {}, values = {})) + +clay_transition = transition( + impl = _clay_transition_impl, + refs = {}, +) + +def _clay_library_impl(ctx): + _ignore = ctx # buildifier: disable=unused-variable + return [DefaultInfo()] + +clay_library = rule( + impl = _clay_library_impl, + attrs = {}, + cfg = clay_transition, +) diff --git a/tests/core/cycle_detection/BUCK b/tests/core/cycle_detection/BUCK new file mode 100644 index 0000000000000..bcdde470d2da0 --- /dev/null +++ b/tests/core/cycle_detection/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_cycle_detection", + srcs = ["test_cycle_detection.py"], + data_dir = "test_cycle_detection_data", +) diff --git a/tests/core/cycle_detection/test_cycle_detection.py b/tests/core/cycle_detection/test_cycle_detection.py new file mode 100644 index 0000000000000..a748bddba7b60 --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection.py @@ -0,0 +1,111 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def check_load_cycle_stderr(stderr: str) -> None: + # We're not sure which order these will appear. + assert r"root//load_cycle/3.bzl ->" in stderr + assert r"root//load_cycle/2.bzl ->" in stderr + assert r"root//load_cycle/1.bzl ->" in stderr + + +def check_cfg_graph_cycle_stderr(stderr: str) -> None: + # We're not sure which order these will appear. + assert r"root//:cycle_bot () ->" in stderr + assert r"root//:cycle_mid () ->" in stderr + assert r"root//:cycle_top () ->" in stderr + + +def check_cfg_toolchain_graph_cycle_stderr(stderr: str) -> None: + # We're not sure which order these will appear. + assert r"root//:toolchain_cycle_top" in stderr + # toolchain_cycle_mid is the toolchain rule and it doesn't appear in the error. ideally we'd fix that, but + # for performance/memory reasons we aggregate the exec_deps out of toolchain rules. + # assert r"root//:toolchain_cycle_mid" in stderr + assert r"root//:toolchain_cycle_bot" in stderr + assert r"Resolving execution platform" in stderr + + +@buck_test() +async def test_detect_load_cycle(buck: Buck) -> None: + failure = await expect_failure( + buck.cquery( + "//:top", + "-c", + "cycles.load=yes", + ), + ) + check_load_cycle_stderr(failure.stderr) + + +@buck_test() +async def test_detect_configured_graph_cycles(buck: Buck) -> None: + failure = await expect_failure( + buck.cquery( + "//:top", + "-c", + "cycles.cfg_graph=yes", + ), + ) + check_cfg_graph_cycle_stderr(failure.stderr) + + +@buck_test() +async def test_detect_configured_graph_cycles_on_recompute(buck: Buck) -> None: + await buck.cquery("//:top") + + failure = await expect_failure( + buck.cquery( + "//:top", + "-c", + "cycles.cfg_graph=yes", + ), + ) + + check_cfg_graph_cycle_stderr(failure.stderr) + + +@buck_test() +async def test_detect_configured_graph_cycles_2(buck: Buck) -> None: + failure = await expect_failure( + buck.cquery( + "//:top", + "-c", + "cycles.cfg_toolchain=yes", + ), + ) + check_cfg_toolchain_graph_cycle_stderr(failure.stderr) + + +@buck_test() +async def test_more_recompute_cases(buck: Buck) -> None: + await buck.cquery("//:top") + + failure = await expect_failure( + buck.cquery( + "//:top", + "-c", + "cycles.load=yes", + ), + ) + check_load_cycle_stderr(failure.stderr) + + failure = await expect_failure( + buck.cquery( + "//:top", + "-c", + "cycles.cfg_graph=yes", + ), + ) + check_cfg_graph_cycle_stderr(failure.stderr) diff --git a/tests/core/cycle_detection/test_cycle_detection_data/.buckconfig b/tests/core/cycle_detection/test_cycle_detection_data/.buckconfig new file mode 100644 index 0000000000000..223c558db45e7 --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection_data/.buckconfig @@ -0,0 +1,16 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[build] + lazy_cycle_detector = true + execution_platforms = //:execution_platforms + +[buck2] + detect_cycles = disabled diff --git a/tests/core/cycle_detection/test_cycle_detection_data/.buckroot b/tests/core/cycle_detection/test_cycle_detection_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/cycle_detection/test_cycle_detection_data/TARGETS.fixture b/tests/core/cycle_detection/test_cycle_detection_data/TARGETS.fixture new file mode 100644 index 0000000000000..3e35fb3eb9b57 --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection_data/TARGETS.fixture @@ -0,0 +1,50 @@ +load("//defs.bzl", "execution_platforms", "suite", "toolchain") + +suite( + name = "top", + deps = [] + ( + ["//load_cycle:target"] if read_config("cycles", "load") == "yes" else [] + ) + ( + ["//:cycle_top"] if read_config("cycles", "cfg_graph") == "yes" else [] + ) + ( + ["//:toolchain_cycle_top"] if read_config("cycles", "cfg_toolchain") == "yes" else [] + ), +) + +suite( + name = "cycle_top", + deps = [ + ":cycle_mid", + ], +) + +suite( + name = "cycle_mid", + deps = [ + ":cycle_bot", + ], +) + +suite( + name = "cycle_bot", + deps = [ + ":cycle_top", + ], +) + +suite( + name = "toolchain_cycle_top", + toolchain = ":toolchain_cycle_mid", +) + +toolchain( + name = "toolchain_cycle_mid", + exec_deps = [":toolchain_cycle_bot"], +) + +suite( + name = "toolchain_cycle_bot", + deps = [":toolchain_cycle_top"], +) + +execution_platforms(name = "execution_platforms") diff --git a/tests/core/cycle_detection/test_cycle_detection_data/defs.bzl b/tests/core/cycle_detection/test_cycle_detection_data/defs.bzl new file mode 100644 index 0000000000000..1674214be20cc --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection_data/defs.bzl @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + fail() + +suite = rule( + impl = _impl, + attrs = { + "deps": attrs.list(attrs.dep(), default = []), + "toolchain": attrs.option(attrs.toolchain_dep(), default = None), + }, +) + +toolchain = rule( + impl = _impl, + is_toolchain_rule = True, + attrs = { + "exec_deps": attrs.list(attrs.exec_dep()), + }, +) + +def exec_platforms_impl(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [ + ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo(constraints = {}, values = {}), + executor_config = CommandExecutorConfig(local_enabled = True, remote_enabled = False), + ), + ], + ), + ] + +execution_platforms = rule( + impl = exec_platforms_impl, + attrs = {}, +) diff --git a/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/1.bzl b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/1.bzl new file mode 100644 index 0000000000000..134081bd5b1f0 --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/1.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":2.bzl", "two") + +two() +# the load above is a cycle so doesn't matter what comes next diff --git a/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/2.bzl b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/2.bzl new file mode 100644 index 0000000000000..f025c91ec9327 --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/2.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":3.bzl", "three") + +three() +# the load above is a cycle so doesn't matter what comes next diff --git a/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/3.bzl b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/3.bzl new file mode 100644 index 0000000000000..b71b0c5ee3003 --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/3.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":1.bzl", "one") + +one() +# the load above is a cycle so doesn't matter what comes next diff --git a/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/TARGETS.fixture b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/TARGETS.fixture new file mode 100644 index 0000000000000..377aa198e8245 --- /dev/null +++ b/tests/core/cycle_detection/test_cycle_detection_data/load_cycle/TARGETS.fixture @@ -0,0 +1,4 @@ +load(":1.bzl", "one") + +one() +# the load above is a cycle so doesn't matter what comes next diff --git a/tests/core/cycle_detection/test_cycle_detection_data/prelude/prelude.bzl b/tests/core/cycle_detection/test_cycle_detection_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/daemon/BUCK b/tests/core/daemon/BUCK new file mode 100644 index 0000000000000..ca8bceb7b2b96 --- /dev/null +++ b/tests/core/daemon/BUCK @@ -0,0 +1,21 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_daemon", + srcs = ["test_daemon.py"], + data_dir = "test_daemon_data", +) + +buck2_e2e_test( + name = "test_daemon_buster", + srcs = ["test_daemon_buster.py"], + data_dir = "test_daemon_buster_data", +) + +buck2_e2e_test( + name = "test_nested_invocations", + srcs = ["test_nested_invocations.py"], + data_dir = "test_nested_invocations_data", +) diff --git a/tests/core/daemon/test_daemon.py b/tests/core/daemon/test_daemon.py new file mode 100644 index 0000000000000..f0f2f29147fa8 --- /dev/null +++ b/tests/core/daemon/test_daemon.py @@ -0,0 +1,136 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import platform +import subprocess +import time +from pathlib import Path + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +@env("BUCK2_TESTING_INACTIVITY_TIMEOUT", "true") +async def test_inactivity_timeout(buck: Buck) -> None: + ####################################################### + # Recommend running this test in opt mode + # Otherwise the command that is run here + # could take longer than 1 second to finish + # causing this test to be flaky + ####################################################### + + # this will start the daemon + await buck.targets("//:rule") + + time.sleep(1) # 1 sec timeout + + # check it's dead + for _ in range(20): + time.sleep(1) + result = await buck.status() + if result.stderr.splitlines()[-1] == "no buckd running": + return + + raise AssertionError("Server did not die in 20 seconds") + + +@buck_test() +@pytest.mark.parametrize( + "corrupt", + ["not-json", '{"valid-json", "but-not-valid-data"}'], +) +async def test_corrupted_buckd_info(buck: Buck, corrupt: str) -> None: + await buck.targets("//:rule") + + daemon_dir_result = await buck.debug("daemon-dir") + daemon_dir = daemon_dir_result.stdout.strip() + with open(f"{daemon_dir}/buckd.info") as f: + # Check file exists and valid. + json.load(f) + + # Kill that daemon now to avoid having making a mess and leaving 2 daemons + # around. + await buck.kill() + + with open(f"{daemon_dir}/buckd.info", "w") as f: + f.write(corrupt) + + await buck.targets("//:rule") + + +@buck_test() +async def test_process_title(buck: Buck) -> None: + await buck.build() # Start the daemon + status = await buck.status() + status = json.loads(status.stdout) + pid = status["process_info"]["pid"] + + if platform.system() == "Darwin": + out = subprocess.check_output(["ps", "-o", "comm=", str(pid)]).strip() + assert out.startswith(b"buck2d[") + elif platform.system() == "Linux": + out = subprocess.check_output(["ps", "-o", "cmd=", str(pid)]).strip() + assert out.startswith(b"buck2d[") + elif platform.system() == "Windows": + # We guarantee no value there. + pass + else: + raise Exception("Unknown platform") + + +@buck_test() +async def test_status_fields(buck: Buck) -> None: + await buck.build() # Start the daemon + status = await buck.status() + status = json.loads(status.stdout) + assert status["valid_working_directory"] + assert status["valid_buck_out_mount"] + + +@buck_test() +async def test_status_all(buck: Buck) -> None: + # this will start the daemons + await buck.server() + + status = await buck.status() + status = json.loads(status.stdout) + pid = status["process_info"]["pid"] + + status_all = await buck.status("--all") + status_all = json.loads(status_all.stdout) + for status in status_all: + if status["process_info"]["pid"] == pid: + return + raise Exception( + f"buckd status for pid {pid} not found in {json.dumps(status_all, indent=2)}" + ) + + +@buck_test() +@env("BUCK_LOG", "buck2_client_ctx::daemon::client::kill=debug") +async def test_no_buckd_kills_existing_daemon(buck: Buck) -> None: + await buck.audit("cell") # Start the daemon + result = await buck.audit("cell", "--no-buckd") # Kill the existing daemon + assert "Killing daemon with PID" in result.stderr + + +@buck_test() +async def test_buck_out_is_cache_dir(buck: Buck) -> None: + await buck.targets(":") # Start a daemon + root = await buck.root() + assert ( + (Path(root.stdout.strip()) / "buck-out" / "v2" / "CACHEDIR.TAG") + .read_text(encoding="utf-8") + .startswith("Signature: 8a477f597d28d172789f06886806bc55") + ) diff --git a/tests/core/daemon/test_daemon_buster.py b/tests/core/daemon/test_daemon_buster.py new file mode 100644 index 0000000000000..5dbcb3de6945a --- /dev/null +++ b/tests/core/daemon/test_daemon_buster.py @@ -0,0 +1,61 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_daemon_buster(buck: Buck) -> None: + async def pid() -> int: + return json.loads((await buck.status()).stdout)["process_info"]["pid"] + + await buck.build(":") + pid0 = await pid() + + await buck.build(":") + pid1 = await pid() + assert pid1 == pid0 + + with open(buck.cwd / ".buckconfig", "a") as f: + f.write("[buck2]\n") + f.write("daemon_buster = 1\n") + + await buck.build(":") + pid2 = await pid() + assert pid2 != pid1 + + await buck.build(":") + pid3 = await pid() + assert pid3 == pid2 + + with open(buck.cwd / ".buckconfig", "a") as f: + f.write("[buck2]\n") + f.write("daemon_buster = 2\n") + + await buck.build(":") + pid4 = await pid() + assert pid4 != pid3 + + with open(buck.cwd / ".buckconfig", "r") as f: + config = f.read() + + with open(buck.cwd / ".buckconfig", "w") as f: + f.write( + "\n".join( + line for line in config.splitlines() if "daemon_buster" not in line + ) + ) + + await buck.build(":") + pid5 = await pid() + assert pid5 != pid4 diff --git a/tests/core/daemon/test_daemon_buster_data/.buckconfig b/tests/core/daemon/test_daemon_buster_data/.buckconfig new file mode 100644 index 0000000000000..425a56f43b9c4 --- /dev/null +++ b/tests/core/daemon/test_daemon_buster_data/.buckconfig @@ -0,0 +1,6 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/daemon/test_daemon_buster_data/.buckroot b/tests/core/daemon/test_daemon_buster_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/daemon/test_daemon_buster_data/TARGETS.fixture b/tests/core/daemon/test_daemon_buster_data/TARGETS.fixture new file mode 100644 index 0000000000000..fdee4019368a9 --- /dev/null +++ b/tests/core/daemon/test_daemon_buster_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "noop") + +noop(name = "noop") diff --git a/tests/core/daemon/test_daemon_buster_data/defs.bzl b/tests/core/daemon/test_daemon_buster_data/defs.bzl new file mode 100644 index 0000000000000..8f2f08b1ce38c --- /dev/null +++ b/tests/core/daemon/test_daemon_buster_data/defs.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +noop = rule(attrs = {}, impl = lambda _ctx: [DefaultInfo()]) diff --git a/tests/core/daemon/test_daemon_buster_data/prelude.bzl b/tests/core/daemon/test_daemon_buster_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/daemon/test_daemon_buster_data/src b/tests/core/daemon/test_daemon_buster_data/src new file mode 100644 index 0000000000000..85504b36b9ed7 --- /dev/null +++ b/tests/core/daemon/test_daemon_buster_data/src @@ -0,0 +1 @@ +some src file diff --git a/tests/core/daemon/test_daemon_data/.buckconfig b/tests/core/daemon/test_daemon_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/daemon/test_daemon_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/daemon/test_daemon_data/.buckroot b/tests/core/daemon/test_daemon_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/daemon/test_daemon_data/TARGETS.fixture b/tests/core/daemon/test_daemon_data/TARGETS.fixture new file mode 100644 index 0000000000000..29a7e8e5a3563 --- /dev/null +++ b/tests/core/daemon/test_daemon_data/TARGETS.fixture @@ -0,0 +1 @@ +trivial_build(name = "rule") diff --git a/tests/core/daemon/test_nested_invocations.py b/tests/core/daemon/test_nested_invocations.py new file mode 100644 index 0000000000000..1fcba3b52abd3 --- /dev/null +++ b/tests/core/daemon/test_nested_invocations.py @@ -0,0 +1,90 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + + +import typing +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def nested_buck2_args(buck: Buck) -> typing.List[str]: + return [ + "-c", + f"nested.buck2_path={buck.path_to_executable}", + ] + + +@buck_test(allow_soft_errors=True) +async def test_same_state(buck: Buck) -> None: + await buck.build( + "root//:nested_normal", *nested_buck2_args(buck), env={"SANDCASTLE_ID": ""} + ) + + +@buck_test(allow_soft_errors=True) +async def test_different_state_error(buck: Buck, tmp_path: Path) -> None: + # FIXME(JakobDegen): Nested invocations seem to have buggy behavior around writing the event + # logs, so `log show` and friends don't work without this + log = tmp_path / "logfile.json-lines" + await expect_failure( + buck.build( + "-c", + "some.config=Val", + "root//:nested_normal", + "--event-log", + str(log), + *nested_buck2_args(buck), + env={"SANDCASTLE_ID": ""}, + ), + stderr_regex="Failed to build 'root//:nested_normal", + ) + res = await buck.log("what-ran", "--failed", "--show-std-err", str(log)) + assert "Recursive invocation of Buck, with a different state" in res.stdout + + +@buck_test(allow_soft_errors=True) +async def test_different_user_version_and_state(buck: Buck, tmp_path: Path) -> None: + log = tmp_path / "logfile.json-lines" + await expect_failure( + buck.build( + "-c", + "some.config=Val", + "root//:nested_normal", + "--event-log", + str(log), + *nested_buck2_args(buck), + # Set a `SANDCASTLE_ID`; this affects the daemon constraints + env={"SANDCASTLE_ID": "12345"}, + ), + stderr_regex="Failed to build 'root//:nested_normal", + ) + res = await buck.log("what-ran", "--failed", "--show-std-err", str(log)) + assert "Recursive invocation of Buck, with a different state" in res.stdout + + +@buck_test(allow_soft_errors=True) +async def test_trace_io_mismatch(buck: Buck, tmp_path: Path) -> None: + log = tmp_path / "logfile.json-lines" + await expect_failure( + buck.build( + "root//:nested_trace", + "--event-log", + str(log), + *nested_buck2_args(buck), + ), + stderr_regex="Failed to build 'root//:nested_trace", + ) + res = await buck.log("what-ran", "--failed", "--show-std-err", str(log)) + assert ( + "daemon constraint mismatch during nested invocation: Trace IO mismatch" + in res.stdout + ) diff --git a/tests/core/daemon/test_nested_invocations_data/.buckconfig b/tests/core/daemon/test_nested_invocations_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/daemon/test_nested_invocations_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/daemon/test_nested_invocations_data/.buckroot b/tests/core/daemon/test_nested_invocations_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/daemon/test_nested_invocations_data/TARGETS.fixture b/tests/core/daemon/test_nested_invocations_data/TARGETS.fixture new file mode 100644 index 0000000000000..116a99fef6c58 --- /dev/null +++ b/tests/core/daemon/test_nested_invocations_data/TARGETS.fixture @@ -0,0 +1,13 @@ +load(":defs.bzl", "normal_nested_invocation", "trace_nested_invocation") + +trivial_build(name = "trivial") + +normal_nested_invocation( + name = "nested_normal", + buck2_path = read_root_config("nested", "buck2_path"), +) + +trace_nested_invocation( + name = "nested_trace", + buck2_path = read_root_config("nested", "buck2_path"), +) diff --git a/tests/core/daemon/test_nested_invocations_data/defs.bzl b/tests/core/daemon/test_nested_invocations_data/defs.bzl new file mode 100644 index 0000000000000..7bfebff32fede --- /dev/null +++ b/tests/core/daemon/test_nested_invocations_data/defs.bzl @@ -0,0 +1,82 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _normal_impl(ctx): + out = ctx.actions.declare_output("out.txt") + + cmd = cmd_args( + ctx.attrs.buck2_path, + "build", + "root//:trivial", + "-c", + "nested.buck2_path=" + ctx.attrs.buck2_path, + "--out", + out.as_output(), + ) + ctx.actions.run( + cmd, + # Unset the user version. Actions will only sometimes do this, tests always will + # + # Unfortunately, no good way to ask for it to be unset, so we do this instead + env = {"SANDCASTLE_ID": ""}, + local_only = True, + category = "run", + ) + return [DefaultInfo(default_output = out)] + +def _trace_impl(ctx): + trace_out = ctx.actions.declare_output("trace_out.txt") + nested_out = ctx.actions.declare_output("nested_out.txt") + + script = ctx.actions.write("script.py", """ +import subprocess +import sys + +buck_path = sys.argv[1] +subprocess.run([buck_path, "debug", "trace-io", "enable"]) + """) + ctx.actions.run( + [ + "python3", + script, + ctx.attrs.buck2_path, + trace_out.as_output(), + ], + local_only = True, + category = "trace", + ) + + nested_cmd = cmd_args( + ctx.attrs.buck2_path, + "build", + "root//:trivial", + "-c", + "nested.buck2_path=" + ctx.attrs.buck2_path, + "--out", + nested_out.as_output(), + hidden = trace_out, + ) + ctx.actions.run( + nested_cmd, + local_only = True, + category = "run", + ) + return [DefaultInfo(default_output = nested_out)] + +normal_nested_invocation = rule( + impl = _normal_impl, + attrs = { + "buck2_path": attrs.string(), + }, +) + +trace_nested_invocation = rule( + impl = _trace_impl, + attrs = { + "buck2_path": attrs.string(), + }, +) diff --git a/tests/core/debug/BUCK b/tests/core/debug/BUCK new file mode 100644 index 0000000000000..cc7a7eb95fac5 --- /dev/null +++ b/tests/core/debug/BUCK @@ -0,0 +1,21 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_debug", + srcs = ["test_debug.py"], + data_dir = "test_debug_data", +) + +buck2_e2e_test( + name = "test_debug_chrome_trace", + srcs = ["test_debug_chrome_trace.py"], + data_dir = "test_debug_chrome_trace_data", +) + +buck2_e2e_test( + name = "test_debug_eval", + srcs = ["test_debug_eval.py"], + data_dir = "test_debug_eval_data", +) diff --git a/tests/core/debug/test_debug.py b/tests/core/debug/test_debug.py new file mode 100644 index 0000000000000..d6de69cc8365d --- /dev/null +++ b/tests/core/debug/test_debug.py @@ -0,0 +1,73 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os.path +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_debug_crash(buck: Buck) -> None: + # If the first operation immediately does a panic then we fail to connect. + # While that's not great, having some panics is better than none, so test once after we spawn. + await buck.build() + result = await buck.debug("crash", "panic") + assert "explicitly requested panic" in result.stderr + # Our crash output should include a stack trace. + assert "stack backtrace:" in result.stderr + + +@buck_test() +async def test_debug_exe(buck: Buck) -> None: + result = await buck.debug("exe") + path = result.stdout.strip() + assert os.path.exists(path) + + +@buck_test() +async def test_debug_allocative(buck: Buck, tmp_path: Path) -> None: + # Start the server. + await buck.uquery("root//:") + + file_path = tmp_path / "profile" + + output = await buck.debug("allocative", "--output", str(file_path)) + assert os.path.exists(f"{file_path}/flamegraph.src") + assert os.path.exists(f"{file_path}/flamegraph.svg") + assert "Profile written" in output.stderr + + await buck.debug("allocative") + assert os.path.exists(buck.cwd / "allocative-out" / "flamegraph.src") + assert os.path.exists(buck.cwd / "allocative-out" / "flamegraph.svg") + + +@buck_test() +async def test_debug_filestatus(buck: Buck) -> None: + # Start the server. + await buck.uquery("root//:") + # FIXME(JakobDegen): `.` is an error + output = await buck.debug("file-status", "TARGETS.fixture") + assert "No mismatches detected" in output.stderr + + +@buck_test(skip_for_os=["windows", "darwin"]) +async def test_thread_dump(buck: Buck) -> None: + # Make sure we don't start a daemon if there isn't one + await expect_failure( + buck.debug("thread-dump"), + stderr_regex="No running buck daemon", + ) + # Start the daemon + await buck.uquery("root//:") + output = await buck.debug("thread-dump") + assert "frame #0" in output.stdout diff --git a/tests/core/debug/test_debug_chrome_trace.py b/tests/core/debug/test_debug_chrome_trace.py new file mode 100644 index 0000000000000..a7e57529138c5 --- /dev/null +++ b/tests/core/debug/test_debug_chrome_trace.py @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os.path +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_chrome_trace(buck: Buck, tmp_path: Path) -> None: + # Just check it at least runs. + await buck.build("//...") + await buck.debug("chrome-trace", "--trace-path", str(tmp_path / "trace.json")) + + +@buck_test() +async def test_chrome_trace_no_repo(buck: Buck, tmp_path: Path) -> None: + # Check that it runs from a path that is not in the repo. + await buck.build("//...") + log_path = (await buck.log("last")).stdout.strip() + await buck.debug( + "chrome-trace", + "--trace-path", + str(tmp_path / "trace.json"), + "--path", + log_path, + rel_cwd=Path(os.path.relpath("/", buck.cwd)), + ) diff --git a/tests/core/debug/test_debug_chrome_trace_data/.buckconfig b/tests/core/debug/test_debug_chrome_trace_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/debug/test_debug_chrome_trace_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/debug/test_debug_chrome_trace_data/.buckroot b/tests/core/debug/test_debug_chrome_trace_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/debug/test_debug_chrome_trace_data/TARGETS.fixture b/tests/core/debug/test_debug_chrome_trace_data/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/debug/test_debug_chrome_trace_data/prelude.bzl b/tests/core/debug/test_debug_chrome_trace_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/debug/test_debug_data/.buckconfig b/tests/core/debug/test_debug_data/.buckconfig new file mode 100644 index 0000000000000..df06a02c03ca2 --- /dev/null +++ b/tests/core/debug/test_debug_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/debug/test_debug_data/.buckroot b/tests/core/debug/test_debug_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/debug/test_debug_data/TARGETS.fixture b/tests/core/debug/test_debug_data/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/debug/test_debug_eval.py b/tests/core/debug/test_debug_eval.py new file mode 100644 index 0000000000000..a3db80f221eb9 --- /dev/null +++ b/tests/core/debug/test_debug_eval.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_debug_eval_good(buck: Buck) -> None: + await buck.debug( + "eval", + "./good.bzl", + "./good.bxl", + ) + + +@buck_test() +async def test_debug_eval_bad_bzl(buck: Buck) -> None: + await expect_failure( + buck.debug( + "eval", + "./bad.bzl", + ), + stderr_regex="fail: bad bzl", + ) + + +@buck_test() +async def test_debug_eval_bad_bxl(buck: Buck) -> None: + await expect_failure( + buck.debug( + "eval", + "./bad.bxl", + ), + stderr_regex="fail: bad bxl", + ) diff --git a/tests/core/debug/test_debug_eval_data/.buckconfig b/tests/core/debug/test_debug_eval_data/.buckconfig new file mode 100644 index 0000000000000..7b86b4be53592 --- /dev/null +++ b/tests/core/debug/test_debug_eval_data/.buckconfig @@ -0,0 +1,4 @@ +[repositories] + prelude = . +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/debug/test_debug_eval_data/.buckroot b/tests/core/debug/test_debug_eval_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/debug/test_debug_eval_data/TARGETS.fixture b/tests/core/debug/test_debug_eval_data/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/debug/test_debug_eval_data/bad.bxl b/tests/core/debug/test_debug_eval_data/bad.bxl new file mode 100644 index 0000000000000..2951c59aeeecc --- /dev/null +++ b/tests/core/debug/test_debug_eval_data/bad.bxl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +fail("bad bxl") diff --git a/tests/core/debug/test_debug_eval_data/bad.bzl b/tests/core/debug/test_debug_eval_data/bad.bzl new file mode 100644 index 0000000000000..0015dfec0ca6f --- /dev/null +++ b/tests/core/debug/test_debug_eval_data/bad.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +fail("bad bzl") diff --git a/tests/core/debug/test_debug_eval_data/good.bxl b/tests/core/debug/test_debug_eval_data/good.bxl new file mode 100644 index 0000000000000..fe0012031ccde --- /dev/null +++ b/tests/core/debug/test_debug_eval_data/good.bxl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def bar(): + pass diff --git a/tests/core/debug/test_debug_eval_data/good.bzl b/tests/core/debug/test_debug_eval_data/good.bzl new file mode 100644 index 0000000000000..a4d77fe2b50ec --- /dev/null +++ b/tests/core/debug/test_debug_eval_data/good.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def foo(): + pass diff --git a/tests/core/debug/test_debug_eval_data/prelude.bzl b/tests/core/debug/test_debug_eval_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/dice_dump/BUCK b/tests/core/dice_dump/BUCK new file mode 100644 index 0000000000000..d964894a085b6 --- /dev/null +++ b/tests/core/dice_dump/BUCK @@ -0,0 +1,10 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_dump", + srcs = ["test_dump.py"], + data_dir = "test_dump_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) diff --git a/tests/core/dice_dump/test_dump.py b/tests/core/dice_dump/test_dump.py new file mode 100644 index 0000000000000..8ca13abe932d3 --- /dev/null +++ b/tests/core/dice_dump/test_dump.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import gzip +import os.path +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_debug_legacy_dice_dump(buck: Buck, tmp_path: Path) -> None: + file_path = tmp_path / "dump" + + await buck.uquery("//...") + await buck.debug("dice-dump", "--path", str(file_path)) + + assert os.path.exists(f"{file_path}/nodes.gz") + assert os.path.exists(f"{file_path}/edges.gz") + assert os.path.exists(f"{file_path}/nodes_currently_running.gz") + + nodes = gzip.open(f"{file_path}/nodes.gz", "r").read().decode() + assert "BuildDataKey" in nodes + assert "FileOpsKey" in nodes + + edges = gzip.open(f"{file_path}/edges.gz", "r").read().decode() + print(edges) + assert edges # check not empty + + nodes_currently_running = ( + gzip.open(f"{file_path}/nodes_currently_running.gz", "r").read().decode() + ) + print(nodes_currently_running) + assert nodes_currently_running == "" diff --git a/tests/core/dice_dump/test_dump_data/.buckconfig b/tests/core/dice_dump/test_dump_data/.buckconfig new file mode 100644 index 0000000000000..924321258f883 --- /dev/null +++ b/tests/core/dice_dump/test_dump_data/.buckconfig @@ -0,0 +1,3 @@ +[repositories] + root = . + prelude = . diff --git a/tests/core/dice_dump/test_dump_data/.buckroot b/tests/core/dice_dump/test_dump_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/dice_dump/test_dump_data/TARGETS.fixture b/tests/core/dice_dump/test_dump_data/TARGETS.fixture new file mode 100644 index 0000000000000..5a8679d669388 --- /dev/null +++ b/tests/core/dice_dump/test_dump_data/TARGETS.fixture @@ -0,0 +1,17 @@ +myr( + name = "a0", + deps = [":a1", ":a2"], +) + +myr( + name = "a1", + deps = [":a2"], +) + +myr( + name = "a2", +) + +myr( + name = "a3", +) diff --git a/tests/core/dice_dump/test_dump_data/prelude.bzl b/tests/core/dice_dump/test_dump_data/prelude.bzl new file mode 100644 index 0000000000000..0d1802aa3fef0 --- /dev/null +++ b/tests/core/dice_dump/test_dump_data/prelude.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _myr(ctx): + _ = ctx # @unused + return [DefaultInfo()] + +myr = rule( + impl = _myr, + attrs = { + "deps": attrs.list(attrs.dep(), default = []), + }, +) diff --git a/tests/core/digest/BUCK b/tests/core/digest/BUCK new file mode 100644 index 0000000000000..2f7f9bec36ac2 --- /dev/null +++ b/tests/core/digest/BUCK @@ -0,0 +1,12 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_digest", + srcs = ["test_digest.py"], + data_dir = "test_digest_data", + serialize_test_cases = False, + deps = [ + ], +) diff --git a/tests/core/digest/test_digest.py b/tests/core/digest/test_digest.py new file mode 100644 index 0000000000000..73b002f214205 --- /dev/null +++ b/tests/core/digest/test_digest.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_blake3(buck: Buck) -> None: + await buck.build("//:cp") diff --git a/tests/core/digest/test_digest_data/.buckconfig b/tests/core/digest/test_digest_data/.buckconfig new file mode 100644 index 0000000000000..bd0c070efa2b6 --- /dev/null +++ b/tests/core/digest/test_digest_data/.buckconfig @@ -0,0 +1,11 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture +[buck2] + materializations = deferred + digest_algorithms = BLAKE3-KEYED,SHA1 +[build] + execution_platforms = root//:exec_platforms diff --git a/tests/core/digest/test_digest_data/.buckroot b/tests/core/digest/test_digest_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/digest/test_digest_data/TARGETS.fixture b/tests/core/digest/test_digest_data/TARGETS.fixture new file mode 100644 index 0000000000000..09c83995c7f9d --- /dev/null +++ b/tests/core/digest/test_digest_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "cp", "exec_platforms") + +cp(name = "cp", src = "src") + +exec_platforms(name = "exec_platforms") diff --git a/tests/core/digest/test_digest_data/defs.bzl b/tests/core/digest/test_digest_data/defs.bzl new file mode 100644 index 0000000000000..fa02ec0772e0f --- /dev/null +++ b/tests/core/digest/test_digest_data/defs.bzl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_cp(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(["cp", ctx.attrs.src, out.as_output()], category = "cp") + return [DefaultInfo(out)] + +cp = rule(attrs = {"src": attrs.source()}, impl = _impl_cp) + +def _impl_exec_platforms(ctx): + configuration = ConfigurationInfo(constraints = {}, values = {}) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = configuration, + executor_config = CommandExecutorConfig( + local_enabled = False, + remote_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + ), + ) + + return [ + DefaultInfo(), + configuration, + platform, + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +exec_platforms = rule(attrs = {}, impl = _impl_exec_platforms) diff --git a/tests/core/digest/test_digest_data/prelude.bzl b/tests/core/digest/test_digest_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/digest/test_digest_data/src b/tests/core/digest/test_digest_data/src new file mode 100644 index 0000000000000..85504b36b9ed7 --- /dev/null +++ b/tests/core/digest/test_digest_data/src @@ -0,0 +1 @@ +some src file diff --git a/tests/core/docs/BUCK b/tests/core/docs/BUCK new file mode 100644 index 0000000000000..d62ce1ee6c9e7 --- /dev/null +++ b/tests/core/docs/BUCK @@ -0,0 +1,17 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_docs", + srcs = ["test_docs.py"], + data_dir = "test_docs_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_builtin_docs", + srcs = ["test_builtin_docs.py"], + data_dir = "test_builtin_docs_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) diff --git a/tests/core/docs/test_builtin_docs.py b/tests/core/docs/test_builtin_docs.py new file mode 100644 index 0000000000000..bcbf1bcb75db1 --- /dev/null +++ b/tests/core/docs/test_builtin_docs.py @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden_dir + + +@buck_test() +async def test_builtin_docs_golden(buck: Buck) -> None: + output = buck.cwd.parent / "output" + await buck.docs("starlark-builtins", "--output-dir", str(output)) + + outputs = {} + for file in output.glob("**/*.md"): + lines = file.read_text(encoding="utf-8").splitlines() + lines = filter(lambda x: x.startswith("# ") or x.startswith("## "), lines) + s = "\n".join(lines) + + rel_path = file.relative_to(output) + outputs[str(rel_path)] = s + + golden_dir(output=outputs, rel_path="buck2-golden-docs") diff --git a/tests/core/docs/test_builtin_docs_data/.buckconfig b/tests/core/docs/test_builtin_docs_data/.buckconfig new file mode 100644 index 0000000000000..df06a02c03ca2 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/docs/test_builtin_docs_data/.buckroot b/tests/core/docs/test_builtin_docs_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionErrorCtx.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionErrorCtx.md new file mode 100644 index 0000000000000..45b8e6cb7e872 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionErrorCtx.md @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ActionErrorCtx +## ActionErrorCtx.new\_error\_location +## ActionErrorCtx.new\_sub\_error +## ActionErrorCtx.stderr +## ActionErrorCtx.stdout diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionErrorLocation.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionErrorLocation.md new file mode 100644 index 0000000000000..5b34144f4d534 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionErrorLocation.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ActionErrorLocation +## ActionErrorLocation.file +## ActionErrorLocation.line diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionSubError.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionSubError.md new file mode 100644 index 0000000000000..9cc32675be6e7 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ActionSubError.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ActionSubError +## ActionSubError.category +## ActionSubError.locations +## ActionSubError.message diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnalysisActions.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnalysisActions.md new file mode 100644 index 0000000000000..327ecd5e9ebc8 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnalysisActions.md @@ -0,0 +1,22 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# AnalysisActions +## AnalysisActions.anon\_target +## AnalysisActions.anon\_targets +## AnalysisActions.artifact\_tag +## AnalysisActions.assert\_short\_path +## AnalysisActions.cas\_artifact +## AnalysisActions.copied\_dir +## AnalysisActions.copy\_dir +## AnalysisActions.copy\_file +## AnalysisActions.declare\_output +## AnalysisActions.digest\_config +## AnalysisActions.download\_file +## AnalysisActions.dynamic\_output +## AnalysisActions.dynamic\_output\_new +## AnalysisActions.run +## AnalysisActions.symlink\_file +## AnalysisActions.symlinked\_dir +## AnalysisActions.tset +## AnalysisActions.write +## AnalysisActions.write\_json diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnalysisContext.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnalysisContext.md new file mode 100644 index 0000000000000..92f846b6b7827 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnalysisContext.md @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# AnalysisContext +## AnalysisContext.actions +## AnalysisContext.attrs +## AnalysisContext.label +## AnalysisContext.plugins diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnonTarget.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnonTarget.md new file mode 100644 index 0000000000000..e111d9a5fca91 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnonTarget.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# AnonTarget +## AnonTarget.artifact +## AnonTarget.artifacts +## AnonTarget.promise diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnonTargets.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnonTargets.md new file mode 100644 index 0000000000000..2ddfc3b6a84fd --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/AnonTargets.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# AnonTargets +## AnonTargets.anon\_targets +## AnonTargets.promise diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Artifact.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Artifact.md new file mode 100644 index 0000000000000..e0ebb484efd1d --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Artifact.md @@ -0,0 +1,12 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Artifact +## Artifact.as\_output +## Artifact.basename +## Artifact.extension +## Artifact.is\_source +## Artifact.owner +## Artifact.project +## Artifact.short\_path +## Artifact.with\_associated\_artifacts +## Artifact.without\_associated\_artifacts diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ArtifactTag.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ArtifactTag.md new file mode 100644 index 0000000000000..799cdfa6103eb --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ArtifactTag.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ArtifactTag +## ArtifactTag.tag\_artifacts +## ArtifactTag.tag\_inputs diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ArtifactValue.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ArtifactValue.md new file mode 100644 index 0000000000000..21aa125dde587 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ArtifactValue.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ArtifactValue +## ArtifactValue.read\_json +## ArtifactValue.read\_string diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Attr.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Attr.md new file mode 100644 index 0000000000000..dbb622633d3c4 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Attr.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Attr diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CellPath.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CellPath.md new file mode 100644 index 0000000000000..fcccf83a10298 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CellPath.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# CellPath +## CellPath.add diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CellRoot.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CellRoot.md new file mode 100644 index 0000000000000..74ab872957fdd --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CellRoot.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# CellRoot diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CommandExecutorConfig.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CommandExecutorConfig.md new file mode 100644 index 0000000000000..b0c11cbd0f29a --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/CommandExecutorConfig.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# CommandExecutorConfig diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfigurationInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfigurationInfo.md new file mode 100644 index 0000000000000..3b13aa07e703c --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfigurationInfo.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ConfigurationInfo +## ConfigurationInfo.constraints +## ConfigurationInfo.values diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfiguredProvidersLabel.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfiguredProvidersLabel.md new file mode 100644 index 0000000000000..106946b18d3f4 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfiguredProvidersLabel.md @@ -0,0 +1,12 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ConfiguredProvidersLabel +## ConfiguredProvidersLabel.cell +## ConfiguredProvidersLabel.cell\_root +## ConfiguredProvidersLabel.configured\_target +## ConfiguredProvidersLabel.name +## ConfiguredProvidersLabel.package +## ConfiguredProvidersLabel.path +## ConfiguredProvidersLabel.project\_root +## ConfiguredProvidersLabel.raw\_target +## ConfiguredProvidersLabel.sub\_target diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfiguredTargetLabel.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfiguredTargetLabel.md new file mode 100644 index 0000000000000..a8c4deac0f8fd --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConfiguredTargetLabel.md @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ConfiguredTargetLabel +## ConfiguredTargetLabel.cell +## ConfiguredTargetLabel.config +## ConfiguredTargetLabel.name +## ConfiguredTargetLabel.package +## ConfiguredTargetLabel.path +## ConfiguredTargetLabel.raw\_target +## ConfiguredTargetLabel.with\_sub\_target diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConstraintSettingInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConstraintSettingInfo.md new file mode 100644 index 0000000000000..c2d6def7d4134 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConstraintSettingInfo.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ConstraintSettingInfo +## ConstraintSettingInfo.label diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConstraintValueInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConstraintValueInfo.md new file mode 100644 index 0000000000000..f77d8d1b75361 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ConstraintValueInfo.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ConstraintValueInfo +## ConstraintValueInfo.label +## ConstraintValueInfo.setting diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DefaultInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DefaultInfo.md new file mode 100644 index 0000000000000..ff2133e07d567 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DefaultInfo.md @@ -0,0 +1,12 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# DefaultInfo +# //foo_binary.bzl +# //subdir/BUCK +# ":gen_stuff" pulls the default_outputs for //subdir:gen_stuff +# Builds just 'foo' binary. The strip command is never invoked. +# builds the 'foo' binary, because it is needed by the 'strip' command. Ensures that +# both the stripped binary and the debug symbols are built. +## DefaultInfo.default\_outputs +## DefaultInfo.other\_outputs +## DefaultInfo.sub\_targets diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Dependency.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Dependency.md new file mode 100644 index 0000000000000..0a8003034bbe4 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Dependency.md @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Dependency +## Dependency.get +## Dependency.label +## Dependency.providers +## Dependency.sub\_target diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicActions.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicActions.md new file mode 100644 index 0000000000000..580fb4c632fe3 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicActions.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# DynamicActions diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicActionsCallable.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicActionsCallable.md new file mode 100644 index 0000000000000..f6288dd197b6e --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicActionsCallable.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# DynamicActionsCallable diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicValue.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicValue.md new file mode 100644 index 0000000000000..71368c4b90d59 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/DynamicValue.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# DynamicValue diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ExternalRunnerTestInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ExternalRunnerTestInfo.md new file mode 100644 index 0000000000000..0ce6bff7ac783 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ExternalRunnerTestInfo.md @@ -0,0 +1,15 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ExternalRunnerTestInfo +## ExternalRunnerTestInfo.command +## ExternalRunnerTestInfo.contacts +## ExternalRunnerTestInfo.default\_executor +## ExternalRunnerTestInfo.env +## ExternalRunnerTestInfo.executor\_overrides +## ExternalRunnerTestInfo.labels +## ExternalRunnerTestInfo.local\_resources +## ExternalRunnerTestInfo.required\_local\_resources +## ExternalRunnerTestInfo.run\_from\_project\_root +## ExternalRunnerTestInfo.test\_type +## ExternalRunnerTestInfo.use\_project\_relative\_paths +## ExternalRunnerTestInfo.worker diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Label.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Label.md new file mode 100644 index 0000000000000..8411a9c71509a --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Label.md @@ -0,0 +1,12 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Label +## Label.cell +## Label.cell\_root +## Label.configured\_target +## Label.name +## Label.package +## Label.path +## Label.project\_root +## Label.raw\_target +## Label.sub\_target diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/LocalResourceInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/LocalResourceInfo.md new file mode 100644 index 0000000000000..c3022b311d9e1 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/LocalResourceInfo.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# LocalResourceInfo +## LocalResourceInfo.resource\_env\_vars +## LocalResourceInfo.setup +## LocalResourceInfo.setup\_timeout\_seconds diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/OutputArtifact.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/OutputArtifact.md new file mode 100644 index 0000000000000..5322fcfa3199c --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/OutputArtifact.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# OutputArtifact diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/PlatformInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/PlatformInfo.md new file mode 100644 index 0000000000000..9507284ac4537 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/PlatformInfo.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# PlatformInfo +## PlatformInfo.configuration +## PlatformInfo.label diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProjectRoot.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProjectRoot.md new file mode 100644 index 0000000000000..c04b6d626880a --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProjectRoot.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ProjectRoot diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Promise.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Promise.md new file mode 100644 index 0000000000000..c305806e25314 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Promise.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Promise +## Promise.join +## Promise.map diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProviderCollection.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProviderCollection.md new file mode 100644 index 0000000000000..263b074aa37d7 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProviderCollection.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ProviderCollection +## ProviderCollection.get diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProvidersLabel.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProvidersLabel.md new file mode 100644 index 0000000000000..5f71b381b45e0 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ProvidersLabel.md @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ProvidersLabel +## ProvidersLabel.cell +## ProvidersLabel.name +## ProvidersLabel.path +## ProvidersLabel.raw\_target +## ProvidersLabel.sub\_target diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/RequiredTestLocalResource.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/RequiredTestLocalResource.md new file mode 100644 index 0000000000000..fdd64498b7714 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/RequiredTestLocalResource.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# RequiredTestLocalResource +## RequiredTestLocalResource.execution +## RequiredTestLocalResource.listing +## RequiredTestLocalResource.name diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ResolvedDynamicValue.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ResolvedDynamicValue.md new file mode 100644 index 0000000000000..07f77c761888c --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ResolvedDynamicValue.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ResolvedDynamicValue +## ResolvedDynamicValue.providers diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ResolvedStringWithMacros.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ResolvedStringWithMacros.md new file mode 100644 index 0000000000000..52815f0680670 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ResolvedStringWithMacros.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ResolvedStringWithMacros diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/RunInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/RunInfo.md new file mode 100644 index 0000000000000..8da6cf1160a6a --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/RunInfo.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# RunInfo +## RunInfo.args diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Select.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Select.md new file mode 100644 index 0000000000000..19852fc6eabec --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/Select.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Select diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TargetLabel.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TargetLabel.md new file mode 100644 index 0000000000000..e773c3645703b --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TargetLabel.md @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TargetLabel +## TargetLabel.cell +## TargetLabel.name +## TargetLabel.package +## TargetLabel.path +## TargetLabel.with\_sub\_target diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TemplatePlaceholderInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TemplatePlaceholderInfo.md new file mode 100644 index 0000000000000..16a135e20d17f --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TemplatePlaceholderInfo.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TemplatePlaceholderInfo +## TemplatePlaceholderInfo.keyed\_variables +## TemplatePlaceholderInfo.unkeyed\_variables diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSet.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSet.md new file mode 100644 index 0000000000000..39a3caa53fcd8 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSet.md @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TransitiveSet +## TransitiveSet.children +## TransitiveSet.definition +## TransitiveSet.project\_as\_args +## TransitiveSet.project\_as\_json +## TransitiveSet.reduce +## TransitiveSet.traverse +## TransitiveSet.value diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetArgsProjection.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetArgsProjection.md new file mode 100644 index 0000000000000..e02d66dd86a76 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetArgsProjection.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TransitiveSetArgsProjection +## TransitiveSetArgsProjection.projection\_name +## TransitiveSetArgsProjection.transitive\_set +## TransitiveSetArgsProjection.traverse diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetArgsProjectionIterator.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetArgsProjectionIterator.md new file mode 100644 index 0000000000000..541fb22e4b4d4 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetArgsProjectionIterator.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TransitiveSetArgsProjectionIterator diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetDefinition.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetDefinition.md new file mode 100644 index 0000000000000..9bbffa120c888 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetDefinition.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TransitiveSetDefinition diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetIterator.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetIterator.md new file mode 100644 index 0000000000000..3332ea520b58f --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetIterator.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TransitiveSetIterator diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetJsonProjection.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetJsonProjection.md new file mode 100644 index 0000000000000..1ef7ab2b28a18 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/TransitiveSetJsonProjection.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TransitiveSetJsonProjection +## TransitiveSetJsonProjection.projection\_name +## TransitiveSetJsonProjection.transitive\_set +## TransitiveSetJsonProjection.traverse diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ValidationInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ValidationInfo.md new file mode 100644 index 0000000000000..ec70af68a59ea --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ValidationInfo.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ValidationInfo +## ValidationInfo.validations diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ValidationSpec.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ValidationSpec.md new file mode 100644 index 0000000000000..f8ff3240217c9 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/ValidationSpec.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ValidationSpec +## ValidationSpec.name +## ValidationSpec.optional +## ValidationSpec.validation\_result diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/WorkerInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/WorkerInfo.md new file mode 100644 index 0000000000000..eefaee49c223b --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/WorkerInfo.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# WorkerInfo +## WorkerInfo.concurrency +## WorkerInfo.exe diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/WorkerRunInfo.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/WorkerRunInfo.md new file mode 100644 index 0000000000000..5896708b22e1c --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/WorkerRunInfo.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# WorkerRunInfo +## WorkerRunInfo.exe +## WorkerRunInfo.worker diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/cmd_args.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/cmd_args.md new file mode 100644 index 0000000000000..668fd0037c6ac --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/cmd_args.md @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# cmd_args +## cmd\_args.add +## cmd\_args.copy +## cmd\_args.inputs +## cmd\_args.outputs +## cmd\_args.relative\_to diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/dynattrs/DynamicAttrType.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/dynattrs/DynamicAttrType.md new file mode 100644 index 0000000000000..4dde5a9e0c909 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/dynattrs/DynamicAttrType.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# DynamicAttrType diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/dynattrs/index.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/dynattrs/index.md new file mode 100644 index 0000000000000..cd31aae36c385 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/dynattrs/index.md @@ -0,0 +1,11 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# dynattrs +## artifact\_value +## dict +## dynamic\_value +## list +## option +## output +## tuple +## value diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/index.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/index.md new file mode 100644 index 0000000000000..b8f020f175c19 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/index.md @@ -0,0 +1,43 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Build APIs +## ExecutionPlatformInfo +## ExecutionPlatformRegistrationInfo +## InstallInfo +## Provider +## anon\_rule +## attrs +## dedupe +## dynamic\_actions +## get\_base\_path +## get\_cell\_name +## glob +## host\_info +## implicit\_package\_symbol +## load\_symbols +## oncall +## package +## package\_name +## provider +## provider\_field +## read\_config +## read\_oncall +## read\_package\_value +## read\_parent\_package\_value +## read\_root\_config +## regex\_match +## repository\_name +## rule +## rule\_exists +## select +## select\_equal\_internal +## select\_map +## select\_test +## set\_cfg\_constructor +## set\_starlark\_peak\_allocated\_byte\_limit +## sha256 +## soft\_error +## transition +## transitive\_set +## warning +## write\_package\_value diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/plugins/PluginKind.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/plugins/PluginKind.md new file mode 100644 index 0000000000000..f5ec9a2564d80 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/plugins/PluginKind.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# PluginKind diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/plugins/index.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/plugins/index.md new file mode 100644 index 0000000000000..281a987c35017 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/plugins/index.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# plugins +## All +## kind diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/regex.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/regex.md new file mode 100644 index 0000000000000..2d8dfab1a5fc7 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/build/regex.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# regex +## regex.match diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ActionQueryNode.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ActionQueryNode.md new file mode 100644 index 0000000000000..3c0ab3a189f20 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ActionQueryNode.md @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ActionQueryNode +## ActionQueryNode.action +## ActionQueryNode.analysis +## ActionQueryNode.attrs +## ActionQueryNode.rule\_type diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Actions.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Actions.md new file mode 100644 index 0000000000000..d80bb72959d8e --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Actions.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Actions +## Actions.actions +## Actions.exec\_deps +## Actions.toolchains diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AnalysisResult.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AnalysisResult.md new file mode 100644 index 0000000000000..3bd13bf033c3b --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AnalysisResult.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# AnalysisResult +## AnalysisResult.as\_dependency +## AnalysisResult.providers diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AqueryContext.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AqueryContext.md new file mode 100644 index 0000000000000..c66090c2779e4 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AqueryContext.md @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# AqueryContext +## AqueryContext.all\_actions +## AqueryContext.all\_outputs +## AqueryContext.attrfilter +## AqueryContext.deps +## AqueryContext.eval diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AuditContext.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AuditContext.md new file mode 100644 index 0000000000000..bf07bc901d74e --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/AuditContext.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# AuditContext +## AuditContext.cell +## AuditContext.output diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/BuildResult.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/BuildResult.md new file mode 100644 index 0000000000000..d847645472e8a --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/BuildResult.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# BuildResult +## BuildResult.artifacts +## BuildResult.failures diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/CliArgs.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/CliArgs.md new file mode 100644 index 0000000000000..acd91f12d991d --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/CliArgs.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# CliArgs diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ConfiguredTargetNode.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ConfiguredTargetNode.md new file mode 100644 index 0000000000000..d935e396a5e8e --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ConfiguredTargetNode.md @@ -0,0 +1,19 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ConfiguredTargetNode +## ConfiguredTargetNode.attrs\_eager +## ConfiguredTargetNode.attrs\_lazy +## ConfiguredTargetNode.buildfile\_path +## ConfiguredTargetNode.deps +## ConfiguredTargetNode.get\_attr +## ConfiguredTargetNode.get\_attrs +## ConfiguredTargetNode.get\_source +## ConfiguredTargetNode.has\_attr +## ConfiguredTargetNode.label +## ConfiguredTargetNode.oncall +## ConfiguredTargetNode.resolved\_attrs\_eager +## ConfiguredTargetNode.resolved\_attrs\_lazy +## ConfiguredTargetNode.rule\_kind +## ConfiguredTargetNode.rule\_type +## ConfiguredTargetNode.sources +## ConfiguredTargetNode.unwrap\_forward diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ConfiguredTargetSet.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ConfiguredTargetSet.md new file mode 100644 index 0000000000000..95637b4782995 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/ConfiguredTargetSet.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# ConfiguredTargetSet diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Context.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Context.md new file mode 100644 index 0000000000000..1f4d946176487 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Context.md @@ -0,0 +1,25 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Context +## Context.analysis +## Context.aquery +## Context.audit +## Context.build +## Context.bxl\_actions +## Context.cell\_root +## Context.cli\_args +## Context.configured\_targets +## Context.cquery +## Context.fs +## Context.instant\_event +## Context.lazy +## Context.modifiers +## Context.output +## Context.resolve +## Context.root +## Context.target\_exists +## Context.target\_platform +## Context.target\_universe +## Context.unconfigured\_sub\_targets +## Context.unconfigured\_targets +## Context.uquery diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/CqueryContext.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/CqueryContext.md new file mode 100644 index 0000000000000..cabbee095efea --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/CqueryContext.md @@ -0,0 +1,18 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# CqueryContext +## CqueryContext.allpaths +## CqueryContext.attrfilter +## CqueryContext.attrregexfilter +## CqueryContext.buildfile +## CqueryContext.deps +## CqueryContext.eval +## CqueryContext.filter +## CqueryContext.inputs +## CqueryContext.kind +## CqueryContext.nattrfilter +## CqueryContext.owner +## CqueryContext.rdeps +## CqueryContext.somepath +## CqueryContext.testsof +## CqueryContext.testsof\_with\_default\_target\_platform diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/EnsuredArtifact.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/EnsuredArtifact.md new file mode 100644 index 0000000000000..44d1583086775 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/EnsuredArtifact.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# EnsuredArtifact +## EnsuredArtifact.abs\_path +## EnsuredArtifact.rel\_path diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Error.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Error.md new file mode 100644 index 0000000000000..6964c8e769b84 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Error.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Error +## Error.message diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/FileNode.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/FileNode.md new file mode 100644 index 0000000000000..8f2917ee85f4f --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/FileNode.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# FileNode +## FileNode.cell +## FileNode.path diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Filesystem.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Filesystem.md new file mode 100644 index 0000000000000..3714efb624d8b --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Filesystem.md @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Filesystem +## Filesystem.abs\_path\_unsafe +## Filesystem.exists +## Filesystem.is\_dir +## Filesystem.is\_file +## Filesystem.list +## Filesystem.project\_rel\_path +## Filesystem.source diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Lazy.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Lazy.md new file mode 100644 index 0000000000000..538934cfbe981 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Lazy.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Lazy +## Lazy.catch +## Lazy.resolve diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/LazyContext.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/LazyContext.md new file mode 100644 index 0000000000000..2335e1f0ac1c9 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/LazyContext.md @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# LazyContext +## LazyContext.analysis +## LazyContext.configured\_target\_node +## LazyContext.join +## LazyContext.join\_all +## LazyContext.unconfigured\_target\_node diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/LazyResolvedAttrs.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/LazyResolvedAttrs.md new file mode 100644 index 0000000000000..527191f8a2273 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/LazyResolvedAttrs.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# LazyResolvedAttrs +## LazyResolvedAttrs.get diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/OutputStream.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/OutputStream.md new file mode 100644 index 0000000000000..00471434e3b46 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/OutputStream.md @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# OutputStream +## OutputStream.ensure +## OutputStream.ensure\_multiple +## OutputStream.print +## OutputStream.print\_json diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Result.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Result.md new file mode 100644 index 0000000000000..8bf7b2177dd43 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/Result.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Result +## Result.is\_ok +## Result.unwrap +## Result.unwrap\_err diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/TargetUniverse.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/TargetUniverse.md new file mode 100644 index 0000000000000..996dfbc449371 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/TargetUniverse.md @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# TargetUniverse +## TargetUniverse.lookup +## TargetUniverse.target\_set +## TargetUniverse.universe\_target\_set diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UnconfiguredTargetNode.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UnconfiguredTargetNode.md new file mode 100644 index 0000000000000..8104992d6e6b6 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UnconfiguredTargetNode.md @@ -0,0 +1,13 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# UnconfiguredTargetNode +## UnconfiguredTargetNode.attrs +## UnconfiguredTargetNode.buildfile\_path +## UnconfiguredTargetNode.deps +## UnconfiguredTargetNode.get\_attr +## UnconfiguredTargetNode.get\_attrs +## UnconfiguredTargetNode.has\_attr +## UnconfiguredTargetNode.label +## UnconfiguredTargetNode.oncall +## UnconfiguredTargetNode.rule\_kind +## UnconfiguredTargetNode.rule\_type diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UnconfiguredTargetSet.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UnconfiguredTargetSet.md new file mode 100644 index 0000000000000..143ed677a14a9 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UnconfiguredTargetSet.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# UnconfiguredTargetSet diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UqueryContext.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UqueryContext.md new file mode 100644 index 0000000000000..39aa415796813 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/UqueryContext.md @@ -0,0 +1,17 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# UqueryContext +## UqueryContext.allpaths +## UqueryContext.attrfilter +## UqueryContext.attrregexfilter +## UqueryContext.buildfile +## UqueryContext.deps +## UqueryContext.eval +## UqueryContext.filter +## UqueryContext.inputs +## UqueryContext.kind +## UqueryContext.owner +## UqueryContext.rdeps +## UqueryContext.somepath +## UqueryContext.targets\_in\_buildfile +## UqueryContext.testsof diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/cli_args.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/cli_args.md new file mode 100644 index 0000000000000..6b01e9b053887 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/cli_args.md @@ -0,0 +1,15 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# cli_args +## bool +## enum +## float +## int +## json +## list +## option +## string +## sub\_target +## sub\_target\_expr +## target\_expr +## target\_label diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/index.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/index.md new file mode 100644 index 0000000000000..1243489c44510 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/bxl/index.md @@ -0,0 +1,11 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Bxl APIs +## ctarget\_set +## fail\_no\_stacktrace +## file\_set +## get\_path\_without\_materialization +## get\_paths\_without\_materialization +## main +## now +## utarget\_set diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/bool.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/bool.md new file mode 100644 index 0000000000000..1daf46e54ada3 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/bool.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# bool diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/dict.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/dict.md new file mode 100644 index 0000000000000..b90bc04036346 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/dict.md @@ -0,0 +1,12 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# dict +## dict.clear +## dict.get +## dict.items +## dict.keys +## dict.pop +## dict.popitem +## dict.setdefault +## dict.update +## dict.values diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/float.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/float.md new file mode 100644 index 0000000000000..1a2fd876811c8 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/float.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# float diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/index.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/index.md new file mode 100644 index 0000000000000..878699066ad66 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/index.md @@ -0,0 +1,41 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Starlark APIs +## False +## None +## True +## abs +## all +## any +## breakpoint +## call\_stack +## call\_stack\_frame +## chr +## debug +## dir +## enum +## enumerate +## eval\_type +## fail +## field +## filter +## getattr +## hasattr +## hash +## isinstance +## len +## map +## max +## min +## ord +## partial +## pprint +## prepr +## print +## pstr +## record +## repr +## reversed +## set +## sorted +## zip diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/int.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/int.md new file mode 100644 index 0000000000000..9b1f3fb103923 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/int.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# int diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/json.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/json.md new file mode 100644 index 0000000000000..a02c6a31cd4e5 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/json.md @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# json +## decode +## encode diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/list.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/list.md new file mode 100644 index 0000000000000..4fa41964ea5d5 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/list.md @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# list +## list.append +## list.clear +## list.extend +## list.index +## list.insert +## list.pop +## list.remove diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/range.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/range.md new file mode 100644 index 0000000000000..f3f2129dc3907 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/range.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# range diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/starlark_rust_internal.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/starlark_rust_internal.md new file mode 100644 index 0000000000000..3ae848b0b5b1c --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/starlark_rust_internal.md @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# starlark_rust_internal +## ty\_of\_value\_debug diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/str.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/str.md new file mode 100644 index 0000000000000..c01f3c8b6ecc3 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/str.md @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# str +## str.capitalize +## str.codepoints +## str.count +## str.elems +## str.endswith +## str.find +## str.format +## str.index +## str.isalnum +## str.isalpha +## str.isdigit +## str.islower +## str.isspace +## str.istitle +## str.isupper +## str.join +## str.lower +## str.lstrip +## str.partition +## str.removeprefix +## str.removesuffix +## str.replace +## str.rfind +## str.rindex +## str.rpartition +## str.rsplit +## str.rstrip +## str.split +## str.splitlines +## str.startswith +## str.strip +## str.title +## str.upper diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/struct.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/struct.md new file mode 100644 index 0000000000000..cb0800a7add03 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/struct.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# struct diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/tuple.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/tuple.md new file mode 100644 index 0000000000000..ae58c63b2a4b7 --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/tuple.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# tuple diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/type.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/type.md new file mode 100644 index 0000000000000..b3d4593eeab3e --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/type.md @@ -0,0 +1,3 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# type diff --git a/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/typing.md b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/typing.md new file mode 100644 index 0000000000000..33abbab65e73f --- /dev/null +++ b/tests/core/docs/test_builtin_docs_data/buck2-golden-docs/starlark/typing.md @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# typing +## Any +## Callable +## Iterable +## Never diff --git a/tests/core/docs/test_docs.py b/tests/core/docs/test_docs.py new file mode 100644 index 0000000000000..4d070f410be8d --- /dev/null +++ b/tests/core/docs/test_docs.py @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +""" +Tests to ensure that the `buck docs` command works as expected +""" + + +@buck_test() +async def test_docs_returns(buck: Buck) -> None: + result = await buck.docs("starlark") + result.check_returncode() + decoded = json.loads(result.stdout) + assert decoded == [] + + +@buck_test() +async def test_prelude_docs(buck: Buck) -> None: + result = await buck.docs("starlark", "prelude//:prelude.bzl") + result.check_returncode() + decoded = json.loads(result.stdout) + golden( + output=json.dumps(decoded, indent=2), + rel_path="prelude_docs.golden.json", + ) + + +@pytest.mark.xfail(reason="until we ban non .bzl paths, this would be valid") +@buck_test() +async def test_docs_fail_with_invalid_patterns(buck: Buck) -> None: + await expect_failure( + buck.docs("starlark", "not_an_import_path"), + stderr_regex="Expected a cell path to a `.bzl` file, but got `root//not_an_import_path`", + ) + await expect_failure( + buck.docs("starlark", "//cell"), + stderr_regex="Expected a cell path to a `.bzl` file, but got `root//cell`", + ) diff --git a/tests/core/docs/test_docs_data/.buckconfig b/tests/core/docs/test_docs_data/.buckconfig new file mode 100644 index 0000000000000..860942bca063f --- /dev/null +++ b/tests/core/docs/test_docs_data/.buckconfig @@ -0,0 +1,17 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +cell = cell +prelude = prelude +fbcode = fbcode +fbsource = fbsource +buck = buck +config = config +toolchains = toolchains +special = special +prelude = prelude diff --git a/tests/core/docs/test_docs_data/.buckroot b/tests/core/docs/test_docs_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/docs/test_docs_data/cell/.buckconfig b/tests/core/docs/test_docs_data/cell/.buckconfig new file mode 100644 index 0000000000000..e77bcaa60a90f --- /dev/null +++ b/tests/core/docs/test_docs_data/cell/.buckconfig @@ -0,0 +1,14 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +cell = . +root = .. +fbcode = ../fbcode +fbsource = ../fbsource +buck = ../buck +special = ../special +config = ../config diff --git a/tests/core/docs/test_docs_data/cell/dir/defs.bzl b/tests/core/docs/test_docs_data/cell/dir/defs.bzl new file mode 100644 index 0000000000000..c97736bdf6bf3 --- /dev/null +++ b/tests/core/docs/test_docs_data/cell/dir/defs.bzl @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +""" +This is the summary for the module + +And these are its details +""" + +def bar(a, b: str, *, c: str, d: str = "some_default") -> list[str]: + """ + This is the summary line for 'bar' + + These are the details that go below. + We'll query for this symbol, and verify it matches json as expected + Don't document 'd' + + Args: + a: Docs for a + b: Docs for b + c: Docs for c + """ + return ["a={a}, b={b}, c={c}, d={d}".format(a = a, b = b, c = c, d = d)] + +def baz(): + """ Simple docstring for baz """ + pass + +def quz(): + # No docstring + pass + +undocumented_variable = 5 diff --git a/tests/core/docs/test_docs_data/prelude/prelude.bzl b/tests/core/docs/test_docs_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..a4d77fe2b50ec --- /dev/null +++ b/tests/core/docs/test_docs_data/prelude/prelude.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def foo(): + pass diff --git a/tests/core/docs/test_docs_data/prelude_docs.golden.json b/tests/core/docs/test_docs_data/prelude_docs.golden.json new file mode 100644 index 0000000000000..28661a041455a --- /dev/null +++ b/tests/core/docs/test_docs_data/prelude_docs.golden.json @@ -0,0 +1,23 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[ + { + "id": { + "name": "foo", + "location": { + "path": "prelude//:prelude.bzl" + } + }, + "item": { + "kind": "function", + "docs": null, + "params": [], + "ret": { + "docs": null, + "type": "typing.Any" + }, + "as_type": null + }, + "custom_attrs": {} + } +] diff --git a/tests/core/docs/test_docs_data/providers.bzl b/tests/core/docs/test_docs_data/providers.bzl new file mode 100644 index 0000000000000..07e8c38da7683 --- /dev/null +++ b/tests/core/docs/test_docs_data/providers.bzl @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +SummaryInfo = provider( + doc = "Summary for SummaryInfo", + fields = ["a", "b", "c"], +) + +NoDocstringInfo = provider( + fields = ["a", "b", "c"], +) + +SimpleDocumentedInfo = provider( + doc = "Summary for SimpleDocumentedInfo\n\nDetails for SimpleDocumentedInfo", + fields = ["a", "b", "c"], +) + +SimpleSummaryInfo = provider( + doc = "Summary for SimpleSummaryInfo", + fields = ["a", "b", "c"], +) + +SimpleNoDocstringInfo = provider( + fields = ["a", "b", "c"], +) diff --git a/tests/core/errors/BUCK b/tests/core/errors/BUCK new file mode 100644 index 0000000000000..238f11ec8b4bf --- /dev/null +++ b/tests/core/errors/BUCK @@ -0,0 +1,32 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_errors", + srcs = ["test_errors.py"], + data_dir = "test_errors_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_exit_code", + srcs = ["test_exit_code.py"], + data_dir = "test_exit_code_data", +) + +buck2_e2e_test( + name = "test_command_report", + srcs = ["test_command_report.py"], + data_dir = "test_command_report_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_formatting", + srcs = ["test_formatting.py"], + data_dir = "test_formatting_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) diff --git a/tests/core/errors/test_command_report.py b/tests/core/errors/test_command_report.py new file mode 100644 index 0000000000000..502eacba9b5eb --- /dev/null +++ b/tests/core/errors/test_command_report.py @@ -0,0 +1,129 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import json +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env +from buck2.tests.e2e_util.helper.golden import golden + +# From `exit_result.rs/ExitCode` +Success = 0 +UnknownFailure = 1 +InfraError = 2 +UserError = 3 +ConnectError = 11 + + +def command_report_test(name: str, command: List[str]) -> None: + async def impl(buck: Buck, tmp_path: Path) -> None: + report = tmp_path / "command_report.json" + await expect_failure(buck.build("--command-report-path", str(report), *command)) + + with open(report) as f: + report = json.loads(f.read()) + del report["trace_id"] + + golden( + output=json.dumps(report, indent=2, sort_keys=True), + rel_path="fixtures/" + name + ".golden.json", + ) + pass + + globals()[name] = impl + + return buck_test()(impl) + + +# Test build with a couple of build errors +command_report_test("test_command_report_build_errors", [":fail1", ":fail2"]) + + +# Set Watchman timeout to 0 to mimic a Watchman Timeout error. +@buck_test(extra_buck_config={"buck2": {"file_watcher": "watchman"}}) +@env("BUCK2_WATCHMAN_TIMEOUT", "0") +async def test_command_report_watchman_error(buck: Buck, tmp_path: Path) -> None: + report = tmp_path / "command_report.json" + await expect_failure( + buck.build("--command-report-path", str(report), ":build_success") + ) + + with open(report) as f: + report = json.loads(f.read()) + + assert report["exit_code"] == UserError + assert "SyncableQueryHandler returned an error" in report["error_messages"][0] + + +# Early client error that doesn't show up in invocation records +@buck_test() +@env("BUCK2_TEST_INIT_DAEMON_ERROR", "true") +async def test_command_report_init_daemon_error(buck: Buck, tmp_path: Path) -> None: + report = tmp_path / "command_report.json" + await expect_failure( + buck.build("--command-report-path", str(report), ":build_success") + ) + + with open(report) as f: + report = json.loads(f.read()) + + assert report["exit_code"] == InfraError + assert "Injected init daemon error" in report["error_messages"][0] + + +# Deliberately cause a daemon connection failure. +@buck_test() +@env("BUCK2_TEST_FAIL_BUCKD_AUTH", "true") +# This test case spawns a loose daemon that we can't connect to. On windows +# this loose daemon will keep holding onto buck-out files after test case finishes +# and prevent other processes from changing them, so set a termination timeout +# of 20 seconds so that this loose daemon gets killed before test case finishes. +@env("BUCK2_TERMINATE_AFTER", "15") +async def test_exit_result_connection_error(buck: Buck, tmp_path: Path) -> None: + report = tmp_path / "command_report.json" + await expect_failure( + buck.build("--command-report-path", str(report), ":build_success") + ) + + with open(report) as f: + report = json.loads(f.read()) + + assert report["exit_code"] == ConnectError + assert "injected auth error" in report["error_messages"][0] + + await asyncio.sleep( + 20 + ) # Makes sure the daemon terminates before test case finishes + + +# Late client error takes precedence over action errors +@buck_test() +@env("BUCK2_TEST_BUILD_ERROR", "true") +# Ideally both action error and client error should both be included in the exit result, +# but this is difficult to do due to the current design, it might be easier to just rewrite +# and merge exit result stuff into invocation record so everything is logged to scuba. +async def test_command_report_post_build_client_error( + buck: Buck, tmp_path: Path +) -> None: + report = tmp_path / "command_report.json" + # Failed build that should have some action errors + await expect_failure(buck.build("--command-report-path", str(report), ":fail1")) + + with open(report) as f: + report = json.loads(f.read()) + + # There's only 1 error message and it's the late client error that's injected + assert len(report["error_messages"]) == 1 + assert report["exit_code"] == UnknownFailure + assert "Injected Build Response Error" in report["error_messages"][0] diff --git a/tests/core/errors/test_command_report_data/.buckconfig b/tests/core/errors/test_command_report_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/errors/test_command_report_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/errors/test_command_report_data/TARGETS.fixture b/tests/core/errors/test_command_report_data/TARGETS.fixture new file mode 100644 index 0000000000000..30b7fd95c9008 --- /dev/null +++ b/tests/core/errors/test_command_report_data/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":defs.bzl", "build_fail") + +trivial_build(name = "build_success") + +build_fail(name = "fail1") +build_fail(name = "fail2") diff --git a/tests/core/errors/test_command_report_data/defs.bzl b/tests/core/errors/test_command_report_data/defs.bzl new file mode 100644 index 0000000000000..e0f569f81ad61 --- /dev/null +++ b/tests/core/errors/test_command_report_data/defs.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _build_fail(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.run( + cmd_args("exit", "1", hidden = out.as_output()), + category = "run", + ) + return [DefaultInfo(default_output = out)] + +build_fail = rule( + impl = _build_fail, + attrs = {}, +) diff --git a/tests/core/errors/test_command_report_data/fixtures/test_command_report_build_errors.golden.json b/tests/core/errors/test_command_report_data/fixtures/test_command_report_build_errors.golden.json new file mode 100644 index 0000000000000..a10a2f67b25ea --- /dev/null +++ b/tests/core/errors/test_command_report_data/fixtures/test_command_report_build_errors.golden.json @@ -0,0 +1,9 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "error_messages": [ + "Failed to build 'root//:fail1 ()'", + "Failed to build 'root//:fail2 ()'" + ], + "exit_code": 3 +} diff --git a/tests/core/errors/test_errors.py b/tests/core/errors/test_errors.py new file mode 100644 index 0000000000000..5fddefe3e85a5 --- /dev/null +++ b/tests/core/errors/test_errors.py @@ -0,0 +1,85 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_soft_error(buck: Buck) -> None: + await expect_failure( + buck.targets(":"), stderr_regex="starlark_raised_soft_error.*Will be reported" + ) + + +@buck_test() +@env("BUCK2_HARD_ERROR", "false") +async def test_soft_error_quiet(buck: Buck) -> None: + res = await buck.targets("quiet:", ":") + assert "starlark_raised_soft_error" in res.stderr + assert "starlark_quiet_soft_error" not in res.stderr + + +@buck_test() +@env("BUCK2_HARD_ERROR", "false") +async def test_soft_error_no_stack(buck: Buck) -> None: + res = await buck.targets(":") + assert "Traceback" in res.stderr + + res = await buck.targets("no_stack:") + assert "Traceback" not in res.stderr + + +@buck_test( + # windows errors are slightly different, just skip for now + skip_for_os=["windows"], +) +@env("BUCK2_HARD_ERROR", "false") +async def test_package_listing_errors(buck: Buck) -> None: + outs = [] + for target in [ + # //package_listing/missing does not exist + "//package_listing/missing/foo/x/y/lmnop:target", + # //package_listing/ignored is ignored + "//package_listing/ignored/foo/x/y/lmnop:target", + # //package_listing/cell is a cell + "//package_listing/cell/foo/x/y/lmnop:target", + # //package_listing/missing_targets_file has no TARGETS file + "//package_listing/missing_targets_file:target", + # //package_listing/data.file is a file + "//package_listing/data.file:target", + "//package_listing/data.file/subdir:target", + ]: + out = await expect_failure(buck.uquery(target, "-v=0", "--console=none")) + stripped_stderr = re.sub( + "read_dir(.*)", "read_dir()", out.stderr + ) + # version extraction failed message fails to respect "-v=0" + stripped_stderr = re.sub( + r"(?m)^version extraction failed.*\n?", "", stripped_stderr + ) + + outs.append(stripped_stderr) + + golden(output="\n\n\n".join(outs), rel_path="package_listing/expected.golden.out") + + +@buck_test( + # windows errors are slightly different, just skip for now + skip_for_os=["windows"], +) +async def test_configured_graph_deps_collapsed_in_errors(buck: Buck) -> None: + out = await expect_failure( + buck.cquery("//deps_collapsed:top", "-v=0", "--console=none") + ) + stderr = re.sub("#[a-f0-9]*\\)", "#00000000)", out.stderr) + golden(output=stderr, rel_path="deps_collapsed/expected.golden.out") diff --git a/tests/core/errors/test_errors_data/.buckconfig b/tests/core/errors/test_errors_data/.buckconfig new file mode 100644 index 0000000000000..14eeaa10da813 --- /dev/null +++ b/tests/core/errors/test_errors_data/.buckconfig @@ -0,0 +1,14 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +toolchains = toolchains +prelude = prelude +cell = package_listing/cell + +[project] +ignore = package_listing/*red/** + +[buck2] +allow_eden_io = false diff --git a/tests/core/errors/test_errors_data/.buckroot b/tests/core/errors/test_errors_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/errors/test_errors_data/TARGETS.fixture b/tests/core/errors/test_errors_data/TARGETS.fixture new file mode 100644 index 0000000000000..2723c746ef72c --- /dev/null +++ b/tests/core/errors/test_errors_data/TARGETS.fixture @@ -0,0 +1 @@ +soft_error("starlark_raised_soft_error", "Will be reported") diff --git a/tests/core/errors/test_errors_data/deps_collapsed/TARGETS.fixture b/tests/core/errors/test_errors_data/deps_collapsed/TARGETS.fixture new file mode 100644 index 0000000000000..72163199d233d --- /dev/null +++ b/tests/core/errors/test_errors_data/deps_collapsed/TARGETS.fixture @@ -0,0 +1,48 @@ +platform( + name = "target", +) + +platform( + name = "exec", +) + +simple_rule( + name = "top", + default_target_platform = ":target", + deps = [":mid1"], +) + +simple_rule( + name = "mid1", + deps = [":mid2"], +) + +simple_rule( + name = "mid2", + deps = [":mid3"], +) + +simple_rule( + name = "mid3", + deps = [(":next1", ":exec")], +) + +simple_rule( + name = "next1", + deps = [":next2"], +) + +simple_rule( + name = "next2", + deps = [":next3"], +) + +simple_rule( + name = "next3", + deps = [":broken"], +) + +simple_rule( + name = "broken", + deps = [":missing"], +) diff --git a/tests/core/errors/test_errors_data/deps_collapsed/expected.golden.out b/tests/core/errors/test_errors_data/deps_collapsed/expected.golden.out new file mode 100644 index 0000000000000..38c55bd30fc82 --- /dev/null +++ b/tests/core/errors/test_errors_data/deps_collapsed/expected.golden.out @@ -0,0 +1,23 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +Error in configured node dependency, dependency chain follows (-> indicates depends on, ^ indicates same configuration as previous): + root//deps_collapsed:top (root//deps_collapsed:target#00000000) + -> root//deps_collapsed:mid1 (^) + -> root//deps_collapsed:mid2 (^) + -> root//deps_collapsed:mid3 (^) + -> root//deps_collapsed:next1 (root//deps_collapsed:exec#00000000) + -> root//deps_collapsed:next2 (^) + -> root//deps_collapsed:next3 (^) + -> root//deps_collapsed:broken (^) + -> root//deps_collapsed:missing (^) + + +Caused by: + 0: looking up unconfigured target node `root//deps_collapsed:missing` + 1: Unknown target `missing` from package `root//deps_collapsed`. + Did you mean one of the 10 targets in root//deps_collapsed:TARGETS.fixture? + Maybe you meant one of these similar targets? + root//deps_collapsed:mid1 + root//deps_collapsed:mid2 + root//deps_collapsed:mid3 diff --git a/tests/core/errors/test_errors_data/no_stack/TARGETS.fixture b/tests/core/errors/test_errors_data/no_stack/TARGETS.fixture new file mode 100644 index 0000000000000..f20bb59c41e0a --- /dev/null +++ b/tests/core/errors/test_errors_data/no_stack/TARGETS.fixture @@ -0,0 +1 @@ +soft_error("starlark_no_stack_soft_error", "Blah", stack = False) diff --git a/tests/core/errors/test_errors_data/package_listing/cell/README b/tests/core/errors/test_errors_data/package_listing/cell/README new file mode 100644 index 0000000000000..a7888252961f0 --- /dev/null +++ b/tests/core/errors/test_errors_data/package_listing/cell/README @@ -0,0 +1 @@ +directory is used for package listing errors tests, intentionally empty(-ish) diff --git a/tests/core/errors/test_errors_data/package_listing/data.file b/tests/core/errors/test_errors_data/package_listing/data.file new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/errors/test_errors_data/package_listing/expected.golden.out b/tests/core/errors/test_errors_data/package_listing/expected.golden.out new file mode 100644 index 0000000000000..e7d710af4e092 --- /dev/null +++ b/tests/core/errors/test_errors_data/package_listing/expected.golden.out @@ -0,0 +1,76 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +Error evaluating expression: + //package_listing/missing/foo/x/y/lmnop:target + ^--------------------------------------------^ + + +Caused by: + package `root//package_listing/missing/foo/x/y/lmnop:` does not exist + ^---------------------------^ + dir `root//package_listing/missing` does not exist + + + +Command failed: +Error evaluating expression: + //package_listing/ignored/foo/x/y/lmnop:target + ^--------------------------------------------^ + + +Caused by: + package `root//package_listing/ignored/foo/x/y/lmnop:` does not exist + ^-------------------------------^ + dir `root//package_listing/ignored/foo` does not exist (project.ignore contains `package_listing/*red/**`) + + + +Command failed: +Error evaluating expression: + //package_listing/cell/foo/x/y/lmnop:target + ^-----------------------------------------^ + + +Caused by: + package `root//package_listing/cell/foo/x/y/lmnop:` does not exist + ^------------------------^ + this package is using the wrong cell, use `cell//foo/x/y/lmnop` instead + + + +Command failed: +Error evaluating expression: + //package_listing/missing_targets_file:target + ^-------------------------------------------^ + + +Caused by: + package `root//package_listing/missing_targets_file:` does not exist + missing `TARGETS.fixture` file (also missing alternatives `TARGETS.fixture.v2`, `TARGETS.fixture`) + + + +Command failed: +Error evaluating expression: + //package_listing/data.file:target + ^--------------------------------^ + + +Caused by: + package `root//package_listing/data.file:` does not exist + ^-----------------------------^ + path `root//package_listing/data.file` is a file, not a directory + + + +Command failed: +Error evaluating expression: + //package_listing/data.file/subdir:target + ^---------------------------------------^ + + +Caused by: + package `root//package_listing/data.file/subdir:` does not exist + ^-----------------------------^ + path `root//package_listing/data.file` is a file, not a directory diff --git a/tests/core/errors/test_errors_data/package_listing/ignored/README b/tests/core/errors/test_errors_data/package_listing/ignored/README new file mode 100644 index 0000000000000..a7888252961f0 --- /dev/null +++ b/tests/core/errors/test_errors_data/package_listing/ignored/README @@ -0,0 +1 @@ +directory is used for package listing errors tests, intentionally empty(-ish) diff --git a/tests/core/errors/test_errors_data/package_listing/missing_targets_file/README b/tests/core/errors/test_errors_data/package_listing/missing_targets_file/README new file mode 100644 index 0000000000000..a7888252961f0 --- /dev/null +++ b/tests/core/errors/test_errors_data/package_listing/missing_targets_file/README @@ -0,0 +1 @@ +directory is used for package listing errors tests, intentionally empty(-ish) diff --git a/tests/core/errors/test_errors_data/prelude/prelude.bzl b/tests/core/errors/test_errors_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..b1fc19bda7129 --- /dev/null +++ b/tests/core/errors/test_errors_data/prelude/prelude.bzl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +simple_rule = rule( + impl = _impl, + attrs = { + "deps": attrs.list(attrs.one_of(attrs.dep(), attrs.configured_dep()), default = []), + }, +) + +def _platform_impl(ctx): + return [DefaultInfo(), PlatformInfo(label = str(ctx.label.raw_target()), configuration = ConfigurationInfo(constraints = {}, values = {}))] + +platform = rule( + impl = _platform_impl, + attrs = {}, +) diff --git a/tests/core/errors/test_errors_data/quiet/TARGETS.fixture b/tests/core/errors/test_errors_data/quiet/TARGETS.fixture new file mode 100644 index 0000000000000..382684d922ff9 --- /dev/null +++ b/tests/core/errors/test_errors_data/quiet/TARGETS.fixture @@ -0,0 +1 @@ +soft_error("starlark_quiet_soft_error", "Blah", quiet = True) diff --git a/tests/core/errors/test_exit_code.py b/tests/core/errors/test_exit_code.py new file mode 100644 index 0000000000000..d948af6b9de3e --- /dev/null +++ b/tests/core/errors/test_exit_code.py @@ -0,0 +1,44 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import ExitCodeV2 +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +async def test_exit_code_build_success(buck: Buck) -> None: + result = await buck.build(":build_success") + assert result.process.returncode == ExitCodeV2.SUCCESS.value + + +@buck_test() +async def test_exit_code_build_fail(buck: Buck) -> None: + await expect_failure(buck.build(":build_fail"), exit_code=ExitCodeV2.USER_ERROR) + + +# Deliberately cause a daemon connection failure. +@buck_test() +@env("BUCK2_TEST_FAIL_BUCKD_AUTH", "true") +# This test case spawns a loose daemon that we can't connect to. On windows +# this loose daemon will keep holding onto buck-out files after test case finishes +# and prevent other processes from changing them, so set a termination timeout +# of 20 seconds so that this loose daemon gets killed before test case finishes. +@env("BUCK2_TERMINATE_AFTER", "15") +async def test_exit_code_fail_buckd_auth_for_unknown_reason(buck: Buck) -> None: + await expect_failure( + buck.build(":build_success"), exit_code=ExitCodeV2.DAEMON_CONNECTION_FAILURE + ) + await asyncio.sleep( + 20 + ) # Makes sure the daemon terminates before test case finishes diff --git a/tests/core/errors/test_exit_code_data/.buckconfig b/tests/core/errors/test_exit_code_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/errors/test_exit_code_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/errors/test_exit_code_data/.buckroot b/tests/core/errors/test_exit_code_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/errors/test_exit_code_data/TARGETS.fixture b/tests/core/errors/test_exit_code_data/TARGETS.fixture new file mode 100644 index 0000000000000..d4371397fbb58 --- /dev/null +++ b/tests/core/errors/test_exit_code_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "build_fail") + +trivial_build(name = "build_success") + +build_fail(name = "build_fail") diff --git a/tests/core/errors/test_exit_code_data/defs.bzl b/tests/core/errors/test_exit_code_data/defs.bzl new file mode 100644 index 0000000000000..e0f569f81ad61 --- /dev/null +++ b/tests/core/errors/test_exit_code_data/defs.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _build_fail(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.run( + cmd_args("exit", "1", hidden = out.as_output()), + category = "run", + ) + return [DefaultInfo(default_output = out)] + +build_fail = rule( + impl = _build_fail, + attrs = {}, +) diff --git a/tests/core/errors/test_formatting.py b/tests/core/errors/test_formatting.py new file mode 100644 index 0000000000000..05713b4e1265f --- /dev/null +++ b/tests/core/errors/test_formatting.py @@ -0,0 +1,118 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +def _sanitize(s: str) -> str: + # Remove configuration hashes + s = re.sub(r"\b[0-9a-f]{16}\b", "", s) + # And action digests + s = re.sub(r"\b[0-9a-f]{40}:[0-9]{1,3}\b", "", s) + # And version extraction errors + return s.replace( + "version extraction failed. This indicates an issue with the buck2 release, will fallback to binary hash\n", + "", + ) + + +def error_formatting_test( + name: str, command: List[str], command_name: str = "build" +) -> None: + async def impl(buck: Buck) -> None: + func = getattr(buck, command_name) + res = await expect_failure(func("--console=none", *command)) + golden( + output=_sanitize(res.stderr), + rel_path="fixtures/" + name + ".golden.stderr", + ) + + globals()[name] = impl + + buck_test()(impl) + + +error_formatting_test(name="test_action_fail", command=["//:action_fail"]) + +error_formatting_test( + name="test_missing_dep", + command=["//:missing_dep"], +) + +error_formatting_test( + name="test_missing_dep_cquery", + command=["//:missing_dep"], + command_name="cquery", +) + +error_formatting_test( + name="test_attr_coercion", + command=["//attr_coercion:int_rule"], +) + +error_formatting_test( + name="test_during_load", + command=["//during_load:whatever"], +) + +error_formatting_test( + name="test_during_load_via_dep", + command=["//during_load/via_dep:via_dep"], +) + +error_formatting_test( + name="test_during_parse", + command=["//during_parse:whatever"], +) + +error_formatting_test( + name="test_during_select_map", + command=["//during_select:map"], +) + +error_formatting_test( + name="test_bxl_no_stacktrace", + command=["//fail_no_stacktrace.bxl:fail_no_stacktrace_test"], + command_name="bxl", +) + +error_formatting_test( + name="test_bxl_no_stacktrace_verbose", + command=["//fail_no_stacktrace.bxl:fail_no_stacktrace_test", "-v5"], + command_name="bxl", +) + +error_formatting_test( + name="test_bxl_with_stacktrace", + command=["//fail_no_stacktrace.bxl:fail_with_stacktrace_test"], + command_name="bxl", +) + +error_formatting_test( + name="test_bxl_attr_coercion", + command=["//fail_attr_coercion.bxl:int_rule"], + command_name="bxl", +) + +error_formatting_test( + name="test_duplicate_target", + command=["//duplicate_target:foo"], +) + +error_formatting_test( + name="test_duplicate_target_with_stacktrace", + command=["//duplicate_target:foo", "--stack"], +) diff --git a/tests/core/errors/test_formatting_data/.buckconfig b/tests/core/errors/test_formatting_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/errors/test_formatting_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/errors/test_formatting_data/.buckroot b/tests/core/errors/test_formatting_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/errors/test_formatting_data/TARGETS.fixture b/tests/core/errors/test_formatting_data/TARGETS.fixture new file mode 100644 index 0000000000000..4d4a3130f31ba --- /dev/null +++ b/tests/core/errors/test_formatting_data/TARGETS.fixture @@ -0,0 +1,10 @@ +load(":defs.bzl", "action_fail") + +action_fail( + name = "action_fail", +) + +stub( + name = "missing_dep", + deps = [":xxx"], +) diff --git a/tests/core/errors/test_formatting_data/attr_coercion/TARGETS.fixture b/tests/core/errors/test_formatting_data/attr_coercion/TARGETS.fixture new file mode 100644 index 0000000000000..5a70c0dfea866 --- /dev/null +++ b/tests/core/errors/test_formatting_data/attr_coercion/TARGETS.fixture @@ -0,0 +1,9 @@ +load(":defs.bzl", "int_rule") + +int_rule( + name = "int_rule", + x = "foobar", +) + +# This test is interesting because it's a scenario in which there is context both above and below +# the starlark diagnostic diff --git a/tests/core/errors/test_formatting_data/attr_coercion/defs.bzl b/tests/core/errors/test_formatting_data/attr_coercion/defs.bzl new file mode 100644 index 0000000000000..4ff949ee6b69b --- /dev/null +++ b/tests/core/errors/test_formatting_data/attr_coercion/defs.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + fail("unused") + +int_rule = rule( + impl = _impl, + attrs = { + "x": attrs.int(), + }, +) diff --git a/tests/core/errors/test_formatting_data/defs.bzl b/tests/core/errors/test_formatting_data/defs.bzl new file mode 100644 index 0000000000000..1e2673d94455c --- /dev/null +++ b/tests/core/errors/test_formatting_data/defs.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _action_fail(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(cmd_args("false", hidden = out.as_output()), category = "run") + return [DefaultInfo(default_outputs = [out])] + +action_fail = rule( + impl = _action_fail, + attrs = {}, +) diff --git a/tests/core/errors/test_formatting_data/duplicate_target/TARGETS.fixture b/tests/core/errors/test_formatting_data/duplicate_target/TARGETS.fixture new file mode 100644 index 0000000000000..a9c336273f65a --- /dev/null +++ b/tests/core/errors/test_formatting_data/duplicate_target/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":make_target.bzl", "make_target") + +make_target("foo") + +make_target("foo") diff --git a/tests/core/errors/test_formatting_data/duplicate_target/make_target.bzl b/tests/core/errors/test_formatting_data/duplicate_target/make_target.bzl new file mode 100644 index 0000000000000..80996d80c9703 --- /dev/null +++ b/tests/core/errors/test_formatting_data/duplicate_target/make_target.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def make_target(name): + stub(name = name) diff --git a/tests/core/errors/test_formatting_data/during_load/TARGETS.fixture b/tests/core/errors/test_formatting_data/during_load/TARGETS.fixture new file mode 100644 index 0000000000000..5f65ba6f7eea2 --- /dev/null +++ b/tests/core/errors/test_formatting_data/during_load/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":access.bzl", "get") + +get([1, 2, 3]) diff --git a/tests/core/errors/test_formatting_data/during_load/access.bzl b/tests/core/errors/test_formatting_data/during_load/access.bzl new file mode 100644 index 0000000000000..18478aeab1672 --- /dev/null +++ b/tests/core/errors/test_formatting_data/during_load/access.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def get(ls): + return ls[5] diff --git a/tests/core/errors/test_formatting_data/during_load/via_dep/TARGETS.fixture b/tests/core/errors/test_formatting_data/during_load/via_dep/TARGETS.fixture new file mode 100644 index 0000000000000..1cd5e11cbcc75 --- /dev/null +++ b/tests/core/errors/test_formatting_data/during_load/via_dep/TARGETS.fixture @@ -0,0 +1,4 @@ +stub( + name = "via_dep", + deps = ["//during_load:get"], +) diff --git a/tests/core/errors/test_formatting_data/during_parse/TARGETS.fixture b/tests/core/errors/test_formatting_data/during_parse/TARGETS.fixture new file mode 100644 index 0000000000000..4b2123c3700c0 --- /dev/null +++ b/tests/core/errors/test_formatting_data/during_parse/TARGETS.fixture @@ -0,0 +1,2 @@ +# @nolint +not starlark code diff --git a/tests/core/errors/test_formatting_data/during_select/TARGETS.fixture b/tests/core/errors/test_formatting_data/during_select/TARGETS.fixture new file mode 100644 index 0000000000000..76679f5b5b41d --- /dev/null +++ b/tests/core/errors/test_formatting_data/during_select/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":select.bzl", "map") + +map() diff --git a/tests/core/errors/test_formatting_data/during_select/select.bzl b/tests/core/errors/test_formatting_data/during_select/select.bzl new file mode 100644 index 0000000000000..b7cf3822cb3bc --- /dev/null +++ b/tests/core/errors/test_formatting_data/during_select/select.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _fail(_ctx): + fail("unused") + +def map(): + str_select = select({"config/windows:x86_64": "disable"}) + select_map(str_select, _fail) diff --git a/tests/core/errors/test_formatting_data/fail_attr_coercion.bxl b/tests/core/errors/test_formatting_data/fail_attr_coercion.bxl new file mode 100644 index 0000000000000..1f9841a71f481 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fail_attr_coercion.bxl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _int_rule(ctx): + ctx.unconfigured_targets("//attr_coercion:int_rule") + +int_rule = bxl_main( + impl = _int_rule, + cli_args = {}, +) diff --git a/tests/core/errors/test_formatting_data/fail_no_stacktrace.bxl b/tests/core/errors/test_formatting_data/fail_no_stacktrace.bxl new file mode 100644 index 0000000000000..e2f1671b2d012 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fail_no_stacktrace.bxl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _fail_no_stacktrace_impl(_ctx): + fail_no_stacktrace("failing with no stacktrace") + +fail_no_stacktrace_test = bxl_main( + impl = _fail_no_stacktrace_impl, + cli_args = { + }, +) + +def _fail_with_stacktrace_impl(_ctx): + fail("failing with stacktrace") + +fail_with_stacktrace_test = bxl_main( + impl = _fail_with_stacktrace_impl, + cli_args = { + }, +) diff --git a/tests/core/errors/test_formatting_data/fixtures/test_action_fail.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_action_fail.golden.stderr new file mode 100644 index 0000000000000..37fdee14bd591 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_action_fail.golden.stderr @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Failed to build 'root//:action_fail ()' diff --git a/tests/core/errors/test_formatting_data/fixtures/test_attr_coercion.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_attr_coercion.golden.stderr new file mode 100644 index 0000000000000..47ad92d33c37a --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_attr_coercion.golden.stderr @@ -0,0 +1,22 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error evaluating build file: `root//attr_coercion:TARGETS.fixture` + +Caused by: + 0: Traceback (most recent call last): + * attr_coercion/TARGETS.fixture:3, in + int_rule( + error: Error coercing attribute `x` of `root//attr_coercion:int_rule` + --> attr_coercion/TARGETS.fixture:3:1 + | + 3 | / int_rule( + 4 | | name = "int_rule", + 5 | | x = "foobar", + 6 | | ) + | |_^ + | + + 1: Error coercing attribute `x` of type `attrs.int()` + 2: Error coercing "foobar" + 3: Expected value of type `int`, got value with type `string` (value was `"foobar"`) diff --git a/tests/core/errors/test_formatting_data/fixtures/test_bxl_attr_coercion.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_bxl_attr_coercion.golden.stderr new file mode 100644 index 0000000000000..4d05f3a360cef --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_bxl_attr_coercion.golden.stderr @@ -0,0 +1,34 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +Traceback (most recent call last): + File , in + * fail_attr_coercion.bxl:9, in _int_rule + ctx.unconfigured_targets("//attr_coercion:int_rule") +error: Error loading targets in package `root//attr_coercion` for target `root//attr_coercion:int_rule` + --> fail_attr_coercion.bxl:9:5 + | +9 | ctx.unconfigured_targets("//attr_coercion:int_rule") + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + + +Caused by: + 0: Error evaluating build file: `root//attr_coercion:TARGETS.fixture` + 1: Traceback (most recent call last): + * attr_coercion/TARGETS.fixture:3, in + int_rule( + error: Error coercing attribute `x` of `root//attr_coercion:int_rule` + --> attr_coercion/TARGETS.fixture:3:1 + | + 3 | / int_rule( + 4 | | name = "int_rule", + 5 | | x = "foobar", + 6 | | ) + | |_^ + | + + 2: Error coercing attribute `x` of type `attrs.int()` + 3: Error coercing "foobar" + 4: Expected value of type `int`, got value with type `string` (value was `"foobar"`) +BXL FAILED diff --git a/tests/core/errors/test_formatting_data/fixtures/test_bxl_no_stacktrace.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_bxl_no_stacktrace.golden.stderr new file mode 100644 index 0000000000000..a20948be1e5cf --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_bxl_no_stacktrace.golden.stderr @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +fail: failing with no stacktrace +BXL FAILED diff --git a/tests/core/errors/test_formatting_data/fixtures/test_bxl_no_stacktrace_verbose.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_bxl_no_stacktrace_verbose.golden.stderr new file mode 100644 index 0000000000000..82010a764acc2 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_bxl_no_stacktrace_verbose.golden.stderr @@ -0,0 +1,15 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +Traceback (most recent call last): + File , in + * fail_no_stacktrace.bxl:9, in _fail_no_stacktrace_impl + fail_no_stacktrace("failing with no stacktrace") +error: fail: failing with no stacktrace + --> fail_no_stacktrace.bxl:9:5 + | +9 | fail_no_stacktrace("failing with no stacktrace") + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + +BXL FAILED diff --git a/tests/core/errors/test_formatting_data/fixtures/test_bxl_with_stacktrace.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_bxl_with_stacktrace.golden.stderr new file mode 100644 index 0000000000000..09eca5ee542bf --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_bxl_with_stacktrace.golden.stderr @@ -0,0 +1,15 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +Traceback (most recent call last): + File , in + * fail_no_stacktrace.bxl:18, in _fail_with_stacktrace_impl + fail("failing with stacktrace") +error: fail: failing with stacktrace + --> fail_no_stacktrace.bxl:18:5 + | +18 | fail("failing with stacktrace") + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + +BXL FAILED diff --git a/tests/core/errors/test_formatting_data/fixtures/test_duplicate_target.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_duplicate_target.golden.stderr new file mode 100644 index 0000000000000..beb60c7bab975 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_duplicate_target.golden.stderr @@ -0,0 +1,18 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error evaluating build file: `root//duplicate_target:TARGETS.fixture` + +Caused by: + Traceback (most recent call last): + * duplicate_target/TARGETS.fixture:5, in + make_target("foo") + * duplicate_target/make_target.bzl:9, in make_target + stub(name = name) + error: Attempted to register target root//duplicate_target:foo twice, re-run the command with `--stack` to obtain a call stack of the first registration + --> duplicate_target/make_target.bzl:9:5 + | + 9 | stub(name = name) + | ^^^^^^^^^^^^^^^^^ + | + diff --git a/tests/core/errors/test_formatting_data/fixtures/test_duplicate_target_with_stacktrace.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_duplicate_target_with_stacktrace.golden.stderr new file mode 100644 index 0000000000000..5f957ea8397d7 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_duplicate_target_with_stacktrace.golden.stderr @@ -0,0 +1,24 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error evaluating build file: `root//duplicate_target:TARGETS.fixture` + +Caused by: + Traceback (most recent call last): + * duplicate_target/TARGETS.fixture:5, in + make_target("foo") + * duplicate_target/make_target.bzl:9, in make_target + stub(name = name) + error: Attempted to register target root//duplicate_target:foo twice, first registered at: + Traceback (most recent call last): + * duplicate_target/TARGETS.fixture:3, in + make_target("foo") + * duplicate_target/make_target.bzl:9, in make_target + stub(name = name) + + --> duplicate_target/make_target.bzl:9:5 + | + 9 | stub(name = name) + | ^^^^^^^^^^^^^^^^^ + | + diff --git a/tests/core/errors/test_formatting_data/fixtures/test_during_load.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_during_load.golden.stderr new file mode 100644 index 0000000000000..f031db448598d --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_during_load.golden.stderr @@ -0,0 +1,16 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error evaluating build file: `root//during_load:TARGETS.fixture` + +Caused by: + Traceback (most recent call last): + * during_load/TARGETS.fixture:3, in + get([1, 2, 3]) + error: Index `5` is out of bound + --> during_load/access.bzl:9:12 + | + 9 | return ls[5] + | ^^^^^ + | + diff --git a/tests/core/errors/test_formatting_data/fixtures/test_during_load_via_dep.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_during_load_via_dep.golden.stderr new file mode 100644 index 0000000000000..b91411cbcaed4 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_during_load_via_dep.golden.stderr @@ -0,0 +1,23 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error running analysis for `root//during_load/via_dep:via_dep ()` + +Caused by: + 0: Error in configured node dependency, dependency chain follows (-> indicates depends on, ^ indicates same configuration as previous): + root//during_load/via_dep:via_dep () + -> root//during_load:get (^) + + 1: looking up unconfigured target node `root//during_load:get` + 2: Error loading targets in package `root//during_load` for target `root//during_load:get` + 3: Error evaluating build file: `root//during_load:TARGETS.fixture` + 4: Traceback (most recent call last): + * during_load/TARGETS.fixture:3, in + get([1, 2, 3]) + error: Index `5` is out of bound + --> during_load/access.bzl:9:12 + | + 9 | return ls[5] + | ^^^^^ + | + diff --git a/tests/core/errors/test_formatting_data/fixtures/test_during_parse.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_during_parse.golden.stderr new file mode 100644 index 0000000000000..cde8b3c3e7ff9 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_during_parse.golden.stderr @@ -0,0 +1,13 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error parsing: `root//during_parse:TARGETS.fixture` + +Caused by: + error: Parse error: unexpected identifier 'code' here, expected one of "\n", "!=", "%", "%=", "&", "&=", "(", ")", "*", "*=", "+", "+=", ",", "-", "-=", ".", "/", "//", "//=", "/=", ":", ";", "<", "<<", "<<=", "<=", "=", "==", ">", ">=", ">>", ">>=", "[", "]", "^", "^=", "and", "else", "for", "if", "in", "not", "or", "|", "|=" or "}" + --> during_parse/TARGETS.fixture:2:14 + | + 2 | not starlark code + | ^^^^ + | + diff --git a/tests/core/errors/test_formatting_data/fixtures/test_during_select_map.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_during_select_map.golden.stderr new file mode 100644 index 0000000000000..a5f8248bf5f7e --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_during_select_map.golden.stderr @@ -0,0 +1,22 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error evaluating build file: `root//during_select:TARGETS.fixture` + +Caused by: + Traceback (most recent call last): + * during_select/TARGETS.fixture:3, in + map() + * during_select/select.bzl:13, in map + select_map(str_select, _fail) + File , in select_map + File , in None + * during_select/select.bzl:9, in _fail + fail("unused") + error: fail: unused + --> during_select/select.bzl:9:5 + | + 9 | fail("unused") + | ^^^^^^^^^^^^^^ + | + diff --git a/tests/core/errors/test_formatting_data/fixtures/test_missing_dep.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_missing_dep.golden.stderr new file mode 100644 index 0000000000000..e1c27b1c2ff28 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_missing_dep.golden.stderr @@ -0,0 +1,13 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +BUILD FAILED +Error running analysis for `root//:missing_dep ()` + +Caused by: + 0: Error in configured node dependency, dependency chain follows (-> indicates depends on, ^ indicates same configuration as previous): + root//:missing_dep () + -> root//:xxx (^) + + 1: looking up unconfigured target node `root//:xxx` + 2: Unknown target `xxx` from package `root//`. + Did you mean one of the 2 targets in root//:TARGETS.fixture? diff --git a/tests/core/errors/test_formatting_data/fixtures/test_missing_dep_cquery.golden.stderr b/tests/core/errors/test_formatting_data/fixtures/test_missing_dep_cquery.golden.stderr new file mode 100644 index 0000000000000..bfc41da462de9 --- /dev/null +++ b/tests/core/errors/test_formatting_data/fixtures/test_missing_dep_cquery.golden.stderr @@ -0,0 +1,12 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +Error in configured node dependency, dependency chain follows (-> indicates depends on, ^ indicates same configuration as previous): + root//:missing_dep () + -> root//:xxx (^) + + +Caused by: + 0: looking up unconfigured target node `root//:xxx` + 1: Unknown target `xxx` from package `root//`. + Did you mean one of the 2 targets in root//:TARGETS.fixture? diff --git a/tests/core/explain/BUCK b/tests/core/explain/BUCK new file mode 100644 index 0000000000000..8f13c7bc04f93 --- /dev/null +++ b/tests/core/explain/BUCK @@ -0,0 +1,12 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_explain", + srcs = ["test_explain.py"], + data_dir = "test_explain_data", + deps = [ + "//manifold/clients/python:manifold_client_deprecated", + ], +) diff --git a/tests/core/explain/test_explain.py b/tests/core/explain/test_explain.py new file mode 100644 index 0000000000000..275d0193b4b56 --- /dev/null +++ b/tests/core/explain/test_explain.py @@ -0,0 +1,111 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + +from manifold.clients.python.manifold_client_deprecated import Client as ManifoldClient + +BUCKET_CONFIG = {"bucket": "buck2_logs", "apikey": "buck2_logs-key"} + + +async def manifold_exists(path: str) -> bool: + with ManifoldClient(BUCKET_CONFIG) as client: + return client.exists(bucket="buck2_logs", path=path) + + +@buck_test() +async def test_dummy_to_make_this_file_not_empty_on_windows(buck: Buck) -> None: + pass + + +@buck_test(skip_for_os=["windows"]) +async def test_explain(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as tmpdirname: + output = f"{tmpdirname}/index.html" + await buck.build("//:simple") + res = await buck.explain("--output", output) + assert "Using last build invocation `buck2 build //:simple" in res.stderr + + # check we wrote something + with open(output, "rb") as f: + assert len(f.read(10)) == 10 + + +@buck_test(skip_for_os=["windows"]) +async def test_explain_no_output_arg(buck: Buck) -> None: + await expect_failure( + buck.explain(), + stderr_regex="the following required arguments were not provided", + ) + + +@buck_test(skip_for_os=["windows"]) +async def test_explain_alias(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as tmpdirname: + output = f"{tmpdirname}/index.html" + await buck.build("other_alias") + await buck.explain("--output", output) + + +@buck_test(skip_for_os=["windows"]) +async def test_explain_no_cell(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as tmpdirname: + output = f"{tmpdirname}/index.html" + await buck.build(":simple") + await buck.explain("--output", output) + + +@buck_test(skip_for_os=["windows"]) +async def test_explain_universe(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as tmpdirname: + output = f"{tmpdirname}/index.html" + # check no universe fails for both + await expect_failure( + buck.build("//:doesnt_exist"), + stderr_regex="Unknown target `doesnt_exist` from package", + ) + await expect_failure( + buck.explain("--output", output), + stderr_regex="Unknown target `doesnt_exist` from package", + ) + + # both don't fail with universe + buck.build("//:doesnt_exist -u :simple") + buck.explain("--output", output) + + +@buck_test(skip_for_os=["windows"]) +async def test_explain_only_builds(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as tmpdirname: + output = f"{tmpdirname}/index.html" + + await buck.uquery("//:simple") + await expect_failure( + buck.explain("--output", output), + stderr_regex="No recent build commands found", + ) + + await buck.build("//:simple") + await buck.explain("--output", output) + await buck.explain("--output", output) + + +@buck_test(skip_for_os=["windows"]) +@env("BUCK2_TEST_MANIFOLD_TTL_S", str(84_000)) # 1 day +async def test_explain_upload(buck: Buck) -> None: + uuid = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + await buck.build("//:simple") + await buck.explain("--upload", env={"BUCK_WRAPPER_UUID": uuid}) + + assert await manifold_exists(path=f"flat/{uuid}-explain.html") is True diff --git a/tests/core/explain/test_explain_data/.buckconfig b/tests/core/explain/test_explain_data/.buckconfig new file mode 100644 index 0000000000000..84fd6b615fc10 --- /dev/null +++ b/tests/core/explain/test_explain_data/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[alias] +other_alias = //:simple diff --git a/tests/core/explain/test_explain_data/.buckroot b/tests/core/explain/test_explain_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/explain/test_explain_data/TARGETS.fixture b/tests/core/explain/test_explain_data/TARGETS.fixture new file mode 100644 index 0000000000000..e6b75f11e4ae0 --- /dev/null +++ b/tests/core/explain/test_explain_data/TARGETS.fixture @@ -0,0 +1 @@ +trivial_build(name = "simple") diff --git a/tests/core/external_cells/BUCK b/tests/core/external_cells/BUCK new file mode 100644 index 0000000000000..8c39b5e47caf3 --- /dev/null +++ b/tests/core/external_cells/BUCK @@ -0,0 +1,27 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_bundled", + srcs = ["test_bundled.py"], + data_dir = "test_bundled_data", +) + +buck2_e2e_test( + name = "test_prelude", + srcs = ["test_prelude.py"], + data_dir = "test_prelude_data", +) + +buck2_e2e_test( + name = "test_git", + srcs = ["test_git.py"], + data_dir = "test_git_data", +) + +buck2_e2e_test( + name = "test_in_subdir", + srcs = ["test_in_subdir.py"], + data_dir = "test_in_subdir_data", +) diff --git a/tests/core/external_cells/test_bundled.py b/tests/core/external_cells/test_bundled.py new file mode 100644 index 0000000000000..1a0caecc1da89 --- /dev/null +++ b/tests/core/external_cells/test_bundled.py @@ -0,0 +1,85 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_buckconfig_works_in_external_cells(buck: Buck) -> None: + result = await buck.audit( + "config", "--cell", "test_bundled_cell", "user_section.key" + ) + assert "key = value" in result.stdout + + +@buck_test() +async def test_uquery(buck: Buck) -> None: + result = await buck.uquery("deps(other//:other_alias)") + assert result.stdout.strip().split() == [ + "test_bundled_cell//dir:test_hidden", + "test_bundled_cell//dir:test", + "other//:other_alias", + ] + result = await buck.uquery( + "deps(test_bundled_cell//dir:test)", rel_cwd=Path("other") + ) + assert result.stdout.strip().split() == [ + "test_bundled_cell//dir:test_hidden", + "test_bundled_cell//dir:test", + ] + + +@buck_test() +async def test_build_local(buck: Buck) -> None: + result = await buck.build_without_report( + "--show-full-simple-output", "--local-only", "other//:other_alias" + ) + p = Path(result.stdout.strip()) + assert p.read_text().strip() == "\n".join(["value", "6", "foobar", "foobar2"]) + + +@buck_test() +async def test_build_remote(buck: Buck) -> None: + result = await buck.build_without_report( + "--show-full-simple-output", "--remote-only", "other//:other_alias" + ) + p = Path(result.stdout.strip()) + assert p.read_text().strip() == "\n".join(["value", "6", "foobar", "foobar2"]) + + +@buck_test() +async def test_materialize_source_directly(buck: Buck) -> None: + result = await buck.build_without_report( + "--show-full-simple-output", "test_bundled_cell//dir:exported" + ) + p = Path(result.stdout.strip()) + assert f"external_cells{os.path.sep}bundled" in str(p) + assert str(p).endswith("src.txt") + assert p.read_text().strip() == "foobar" + + +@buck_test() +async def test_expand_external_cell(buck: Buck) -> None: + await buck.expand_external_cell("test_bundled_cell") + assert (buck.cwd / "test_bundled_cell" / ".buckconfig").exists() + + # Remove the external cell declaration + (buck.cwd / ".buckconfig_no_external").replace(buck.cwd / ".buckconfig") + (buck.cwd / "test_bundled_cell" / "dir" / "src.txt").write_text("foobar3\n") + + result = await buck.build_without_report( + "--show-full-simple-output", "other//:other_alias" + ) + p = Path(result.stdout.strip()) + assert p.read_text().strip() == "\n".join(["value", "6", "foobar3", "foobar2"]) diff --git a/tests/core/external_cells/test_bundled_data/.buckconfig b/tests/core/external_cells/test_bundled_data/.buckconfig new file mode 100644 index 0000000000000..fc6764dfe1e6d --- /dev/null +++ b/tests/core/external_cells/test_bundled_data/.buckconfig @@ -0,0 +1,11 @@ +[cells] + root = . + test_bundled_cell = test_bundled_cell + prelude = prelude + other = other + +[external_cells] + test_bundled_cell = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/external_cells/test_bundled_data/.buckconfig_no_external b/tests/core/external_cells/test_bundled_data/.buckconfig_no_external new file mode 100644 index 0000000000000..e47ce87562cb2 --- /dev/null +++ b/tests/core/external_cells/test_bundled_data/.buckconfig_no_external @@ -0,0 +1,8 @@ +[cells] + root = . + test_bundled_cell = test_bundled_cell + prelude = prelude + other = other + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/external_cells/test_bundled_data/.buckroot b/tests/core/external_cells/test_bundled_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/external_cells/test_bundled_data/other/.buckconfig b/tests/core/external_cells/test_bundled_data/other/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/external_cells/test_bundled_data/other/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/external_cells/test_bundled_data/other/TARGETS.fixture b/tests/core/external_cells/test_bundled_data/other/TARGETS.fixture new file mode 100644 index 0000000000000..079184d9b2401 --- /dev/null +++ b/tests/core/external_cells/test_bundled_data/other/TARGETS.fixture @@ -0,0 +1,6 @@ +load("@prelude//:alias.bzl", "alias") + +alias( + name = "other_alias", + actual = "test_bundled_cell//dir:test", +) diff --git a/tests/core/external_cells/test_bundled_data/prelude/alias.bzl b/tests/core/external_cells/test_bundled_data/prelude/alias.bzl new file mode 100644 index 0000000000000..f825f87c62d7f --- /dev/null +++ b/tests/core/external_cells/test_bundled_data/prelude/alias.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _alias(ctx): + return ctx.attrs.actual.providers + +alias = rule( + impl = _alias, + attrs = { + "actual": attrs.dep(), + }, +) diff --git a/tests/core/external_cells/test_bundled_data/prelude/prelude.bzl b/tests/core/external_cells/test_bundled_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/external_cells/test_git.py b/tests/core/external_cells/test_git.py new file mode 100644 index 0000000000000..5de6e6f503d0f --- /dev/null +++ b/tests/core/external_cells/test_git.py @@ -0,0 +1,115 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import shutil +import subprocess +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _repo(cwd: Path) -> Path: + return (cwd.parent / "external").absolute() + + +def _git(args: list[str], cwd: Path) -> str: + print("Running " + " ".join(args)) + out = subprocess.check_output(["git"] + args, cwd=_repo(cwd)).decode().strip() + print(f"Git output: {out}") + return out + + +def _git_commit(cwd: Path) -> str: + _git(["add", "."], cwd=cwd) + _git(["commit", "-m", "Commit name"], cwd=cwd) + return _git(["log", "--format=format:%H%n", "-1", "-r", "."], cwd=cwd) + + +def _set_revision(rev: str, cwd: Path) -> None: + p = cwd / ".buckconfig" + data = p.read_text().splitlines()[:-2] + data.append(f" git_origin = file://{_repo(cwd)}") + data.append(f" commit_hash = {rev}") + p.write_text("\n".join(data)) + + +def _init_repo(cwd: Path) -> None: + _repo(cwd).mkdir(parents=True, exist_ok=True) + _git(["init"], cwd=cwd) + _git(["config", "user.name", "notarealuser"], cwd=cwd) + _git(["config", "user.email", "notarealuser@fb.com"], cwd=cwd) + shutil.copytree(cwd / "template", _repo(cwd), dirs_exist_ok=True) + rev = _git_commit(cwd=cwd) + _set_revision(rev, cwd=cwd) + + +@buck_test() +async def test_expand_external(buck: Buck) -> None: + _init_repo(cwd=buck.cwd) + await buck.expand_external_cell("libfoo") + assert (buck.cwd / "libfoo" / "src.txt").exists() + assert "buildfile" in (buck.cwd / "libfoo" / ".buckconfig").read_text() + + +@buck_test() +async def test_non_master_ancestor(buck: Buck) -> None: + _init_repo(cwd=buck.cwd) + + _git(["switch", "-c", "other"], cwd=buck.cwd) + (_repo(cwd=buck.cwd) / "src.txt").write_text("change") + rev = _git_commit(cwd=buck.cwd) + _set_revision(rev, cwd=buck.cwd) + + _git(["switch", "master"], cwd=buck.cwd) + (_repo(cwd=buck.cwd) / "src.txt").write_text("change2") + _git_commit(cwd=buck.cwd) + + res = await buck.build_without_report("libfoo//:t", "--show-full-simple-output") + assert Path(res.stdout.strip()).read_text().strip() == "change" + + +@buck_test() +async def test_changing_commit(buck: Buck) -> None: + _init_repo(cwd=buck.cwd) + + res = await buck.build_without_report("libfoo//:t", "--show-full-simple-output") + assert Path(res.stdout.strip()).read_text().strip() == "" + + (_repo(cwd=buck.cwd) / "src.txt").write_text("change") + rev = _git_commit(cwd=buck.cwd) + _set_revision(rev, cwd=buck.cwd) + + res = await buck.build_without_report("libfoo//:t", "--show-full-simple-output") + assert Path(res.stdout.strip()).read_text().strip() == "change" + + +@buck_test() +async def test_full_clean_cycle(buck: Buck) -> None: + _init_repo(cwd=buck.cwd) + + res = await buck.build_without_report( + "libfoo//:t[src]", "--show-full-simple-output" + ) + src_path = Path(res.stdout.strip()) + + await buck.clean() + + assert not Path(src_path).exists() + + +@buck_test() +async def test_no_refetch_on_restart(buck: Buck) -> None: + _init_repo(cwd=buck.cwd) + + await buck.build("libfoo//:t") + await buck.kill() + + shutil.rmtree(_repo(cwd=buck.cwd)) + await buck.build("libfoo//:t") diff --git a/tests/core/external_cells/test_git_data/.buckconfig b/tests/core/external_cells/test_git_data/.buckconfig new file mode 100644 index 0000000000000..cfbefbc2651f0 --- /dev/null +++ b/tests/core/external_cells/test_git_data/.buckconfig @@ -0,0 +1,23 @@ +[cells] + root = . + nano_prelude = nano_prelude + libfoo = libfoo + +[cell_aliases] + prelude = nano_prelude + +[buildfile] + name = TARGETS.fixture + +[buck2] + materializations = deferred + sqlite_materializer_state = true + +[external_cells] + nano_prelude = bundled + libfoo = git + +# Written by each test before invoking buck +[external_cell_libfoo] + git_origin = + commit_hash = diff --git a/tests/core/external_cells/test_git_data/.buckroot b/tests/core/external_cells/test_git_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/external_cells/test_git_data/defs.bzl b/tests/core/external_cells/test_git_data/defs.bzl new file mode 100644 index 0000000000000..0d71ae39e968e --- /dev/null +++ b/tests/core/external_cells/test_git_data/defs.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.run( + cmd_args("cp", ctx.attrs.src, out.as_output()), + category = "run", + ) + return [DefaultInfo(default_output = out, sub_targets = {"src": [DefaultInfo(default_output = ctx.attrs.src)]})] + +copy_src = rule( + impl = _impl, + attrs = { + "src": attrs.source(), + }, +) diff --git a/tests/core/external_cells/test_git_data/template/.buckconfig b/tests/core/external_cells/test_git_data/template/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/external_cells/test_git_data/template/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/external_cells/test_git_data/template/TARGETS.fixture b/tests/core/external_cells/test_git_data/template/TARGETS.fixture new file mode 100644 index 0000000000000..c8ed575a59e9d --- /dev/null +++ b/tests/core/external_cells/test_git_data/template/TARGETS.fixture @@ -0,0 +1,6 @@ +load("@root//:defs.bzl", "copy_src") + +copy_src( + name = "t", + src = "src.txt", +) diff --git a/tests/core/external_cells/test_git_data/template/src.txt b/tests/core/external_cells/test_git_data/template/src.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/external_cells/test_in_subdir.py b/tests/core/external_cells/test_in_subdir.py new file mode 100644 index 0000000000000..71aea81d2fb3a --- /dev/null +++ b/tests/core/external_cells/test_in_subdir.py @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import platform + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_in_subdir(buck: Buck) -> None: + if platform.system() == "Windows": + err = "The system cannot find the path specified" + else: + err = "No such file or directory" + await expect_failure( + buck.targets("test_bundled_cell//dir:"), + stderr_regex=err, + ) + await expect_failure( + buck.cquery("root//:"), + stderr_regex=err, + ) + # FIXME(JakobDegen): Decide if this is a bug or not + (buck.cwd / "somedir").mkdir() + await buck.targets("test_bundled_cell//dir:") + await buck.cquery("root//:") diff --git a/tests/core/external_cells/test_in_subdir_data/.buckconfig b/tests/core/external_cells/test_in_subdir_data/.buckconfig new file mode 100644 index 0000000000000..44925a3957fb4 --- /dev/null +++ b/tests/core/external_cells/test_in_subdir_data/.buckconfig @@ -0,0 +1,10 @@ +[cells] + root = . + prelude = prelude + test_bundled_cell = somedir/test_bundled_cell + +[external_cells] + test_bundled_cell = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/external_cells/test_in_subdir_data/.buckroot b/tests/core/external_cells/test_in_subdir_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/external_cells/test_in_subdir_data/TARGETS.fixture b/tests/core/external_cells/test_in_subdir_data/TARGETS.fixture new file mode 100644 index 0000000000000..695adc12eff36 --- /dev/null +++ b/tests/core/external_cells/test_in_subdir_data/TARGETS.fixture @@ -0,0 +1,6 @@ +load("@prelude//:alias.bzl", "alias") + +alias( + name = "stub", + actual = "test_bundled_cell//dir:test", +) diff --git a/tests/core/external_cells/test_in_subdir_data/prelude/alias.bzl b/tests/core/external_cells/test_in_subdir_data/prelude/alias.bzl new file mode 100644 index 0000000000000..f825f87c62d7f --- /dev/null +++ b/tests/core/external_cells/test_in_subdir_data/prelude/alias.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _alias(ctx): + return ctx.attrs.actual.providers + +alias = rule( + impl = _alias, + attrs = { + "actual": attrs.dep(), + }, +) diff --git a/tests/core/external_cells/test_in_subdir_data/prelude/prelude.bzl b/tests/core/external_cells/test_in_subdir_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/external_cells/test_prelude.py b/tests/core/external_cells/test_prelude.py new file mode 100644 index 0000000000000..7b15a73aadde3 --- /dev/null +++ b/tests/core/external_cells/test_prelude.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_build(buck: Buck) -> None: + await buck.build("root//:") diff --git a/tests/core/external_cells/test_prelude_data/.buckconfig b/tests/core/external_cells/test_prelude_data/.buckconfig new file mode 100644 index 0000000000000..6b99d638581cf --- /dev/null +++ b/tests/core/external_cells/test_prelude_data/.buckconfig @@ -0,0 +1,17 @@ +[cells] + root = . + prelude = prelude + +[cell_aliases] + ovr_config = prelude + # Just have to exist, not actually used for this test + toolchains = root + fbsource = root + fbcode = root + buck = root + +[external_cells] + prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/external_cells/test_prelude_data/.buckroot b/tests/core/external_cells/test_prelude_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/external_cells/test_prelude_data/TARGETS.fixture b/tests/core/external_cells/test_prelude_data/TARGETS.fixture new file mode 100644 index 0000000000000..eec92925025f5 --- /dev/null +++ b/tests/core/external_cells/test_prelude_data/TARGETS.fixture @@ -0,0 +1,10 @@ +export_file( + name = "gen", + src = "src.txt", + mode = "copy", +) + +alias( + name = "gen_alias", + actual = ":gen", +) diff --git a/tests/core/external_cells/test_prelude_data/src.txt b/tests/core/external_cells/test_prelude_data/src.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/help/BUCK b/tests/core/help/BUCK new file mode 100644 index 0000000000000..4e8dd23e885f5 --- /dev/null +++ b/tests/core/help/BUCK @@ -0,0 +1,24 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_help", + srcs = ["test_help.py"], + data_dir = "test_help_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_help_env", + srcs = ["test_help_env.py"], + data_dir = "test_help_env_data", + use_compiled_buck2_client_and_tpx = True, + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_no_repo", + srcs = ["test_no_repo.py"], + data_dir = "test_no_repo_data", +) diff --git a/tests/core/help/test_help.py b/tests/core/help/test_help.py new file mode 100644 index 0000000000000..c53df8b7e8764 --- /dev/null +++ b/tests/core/help/test_help.py @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import re +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from buck2.tests.e2e_util.helper.golden import golden + + +def _normalize(s: str) -> str: + s = re.sub( + r"buck2 [a-z0-9]{16,64} (|)", + "buck2 ", + s, + ) + s = re.sub(r"buck2\.exe", "buck2", s) + return "\n".join([x.rstrip() for x in s.splitlines()]) + "\n" + + +def _find_subcommands(help: str) -> List[str]: + help = re.sub(r".*SUBCOMMANDS:", "", help, flags=re.DOTALL) + result = re.findall(r"^ ([a-z][a-z0-9_-]*)", help, flags=re.MULTILINE) + result = list(result) + return result + + +semaphore = asyncio.Semaphore(10) + + +async def _test_help(buck: Buck, command_stack: List[str]) -> int: + async with semaphore: + result = await buck.help(*command_stack) + + name = "-".join(["help", *command_stack]) + golden( + output=_normalize(result.stdout), + rel_path=f"buck2-{name}.golden.txt", + ) + + subcommands = _find_subcommands(result.stdout) + subtasks = [ + _test_help(buck, command_stack + [subcommand]) for subcommand in subcommands + ] + subresults = await asyncio.gather(*subtasks) + + return sum(subresults) + 1 + + +@buck_test() +async def test_help(buck: Buck) -> None: + total = await _test_help(buck, []) + assert total > 4 diff --git a/tests/core/help/test_help_data/.buckconfig b/tests/core/help/test_help_data/.buckconfig new file mode 100644 index 0000000000000..7078304680646 --- /dev/null +++ b/tests/core/help/test_help_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + prelude = . diff --git a/tests/core/help/test_help_data/.buckroot b/tests/core/help/test_help_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/help/test_help_data/buck2-help-aquery.golden.txt b/tests/core/help/test_help_data/buck2-help-aquery.golden.txt new file mode 100644 index 0000000000000..9e336b4dcba9f --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-aquery.golden.txt @@ -0,0 +1,222 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Perform queries on the action graph (experimental) + +The action graph consists of all the declared actions for a build, +with dependencies when one action consumes the outputs of another +action. + +Run `buck2 docs aquery` or +https://www.internalfb.com/intern/staticdocs/buck2/docs/users/query/aquery/ +for more documentation about the functions available in aquery +expressions. + +Examples: + +Print the action producing a target's default output + +`buck2 aquery //java/com/example/app:amazing` + +List all the commands for run actions for building a target + +`buck2 aquery 'kind(run, deps("//java/com/example/app:amazing+more"))' --output-attribute=cmd` + +Dynamic outputs (`ctx.actions.dynamic_output`): + +Currently, aquery interacts poorly with dynamic outputs. It may +return incorrect results or otherwise behave unexpectedly. + +Usage: buck2 aquery [OPTIONS] [QUERY_ARGS]... + +Arguments: + + the query to evaluate + + [QUERY_ARGS]... + list of literals for a multi-query (one containing `%s` or `%Ss`) + +Options: + -A, --output-all-attributes + Output all attributes, equivalent of --output-attribute ''. + + Avoid using this flag in automation because it may be expensive to produce certain + attributes, and because it makes harder to track which special attributes are used. + + -B, --output-basic-attributes + Output basic attributes, namely those the user can supply, plus rule type and package name + + -a, --output-attribute + Regular expressions to match attributes. Regular expressions are used in "search" mode, so + for example empty string matches all attributes including special attributes. + + When using in automation, please specify the regular expression to match the attribute + precisely, for example `--output-attribute '^headers$'` to make it easier to track which + special attributes are used. + + --output-attributes ... + Deprecated: Use `--output-attribute` instead. + + List of space-separated attributes to output, --output-attributes attr1 attr2. + + --json + Output in JSON format + + --dot + Output in Graphviz Dot format + + --dot-compact + Output in a more compact format than Graphviz Dot + + --output-format + Output format (default: list). + + dot - dot graph format. + + dot_compact - compact alternative to dot format. + + json - JSON format. + + starlark - targets are printed like starlark code that would produce them. + + + [possible values: dot, json, dot_compact, starlark] + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-analysis-queries.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-analysis-queries.golden.txt new file mode 100644 index 0000000000000..02cfb13eae8c8 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-analysis-queries.golden.txt @@ -0,0 +1,166 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +buck audit analysis resolving query attrs + +Usage: buck2 audit analysis-queries [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Patterns to evaluate. The query attributes for targets matching these patterns will be + evaluated + +Options: + --include-outputs + Enable to print the outputs for the targets in the resolved queries + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-cell.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-cell.golden.txt new file mode 100644 index 0000000000000..db34d6bbd3da4 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-cell.golden.txt @@ -0,0 +1,155 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Query information about the [cells] list in .buckconfig. + +Usage: buck2 audit cell [OPTIONS] [CELL_ALIASES]... + +Arguments: + [CELL_ALIASES]... + Cell aliases to query. These aliases will be resolved in the working directory cell. + +Options: + --json + Output in JSON format + + --paths-only + Don't include the cell name in the output + + --aliases + If enabled and no explicit aliases are passed, will query for all aliases in the working + directory cell. + + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-classpath.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-classpath.golden.txt new file mode 100644 index 0000000000000..e9408e16e1faf --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-classpath.golden.txt @@ -0,0 +1,156 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Prints out a target's classpaths if it has one. + This command is deprecated and currently available for compatibility with buck1. + We will replace this command with something that can audit the entire `TemplatePlaceholderInfo` + in the future. + +Usage: buck2 audit classpath [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Target patterns to audit + +Options: + --json + Output in JSON format + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-config.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-config.golden.txt new file mode 100644 index 0000000000000..be376a1a7b743 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-config.golden.txt @@ -0,0 +1,167 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +buck audit config + +Usage: buck2 audit config [OPTIONS] [SPECS]... + +Arguments: + [SPECS]... + config section/key specs of the form `section` or `section.key`. If any specs are + provided, only values matching a spec will be printed (section headers will be printed + only for sections with a key matching the spec) + +Options: + --cell + + + --all-cells + Produce information for all cells that Buck2 knows about + + --output-format + [possible values: simple, json] + + --json + + + --location + [default: none] + [possible values: none, direct, extended] + + --value + [default: resolved] + [possible values: resolved, raw, both] + + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-configurations.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-configurations.golden.txt new file mode 100644 index 0000000000000..ac866253effec --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-configurations.golden.txt @@ -0,0 +1,146 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prints the constraints for configuration IDs + +Usage: buck2 audit configurations [OPTIONS] [configurations]... + +Arguments: + [configurations]... + configurations to audit (example: `cell//package:target-105fe3389fc7e436`). If none + provided, will print information about all known configurations. + +Options: + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-flush-access-times.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-flush-access-times.golden.txt new file mode 100644 index 0000000000000..ea7258f4a81e1 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-flush-access-times.golden.txt @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Usage: buck2 audit deferred-materializer flush-access-times [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-fsck.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-fsck.golden.txt new file mode 100644 index 0000000000000..24fb47ff828fb --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-fsck.golden.txt @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Usage: buck2 audit deferred-materializer fsck [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-get-refresh-log.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-get-refresh-log.golden.txt new file mode 100644 index 0000000000000..b0ac24d2540e2 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-get-refresh-log.golden.txt @@ -0,0 +1,30 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Get the log for TTL refreshes + +Usage: buck2 audit deferred-materializer get-refresh-log [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-help.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-help.golden.txt new file mode 100644 index 0000000000000..c511ad88890a0 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 audit deferred-materializer help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-list-subscriptions.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-list-subscriptions.golden.txt new file mode 100644 index 0000000000000..eec8056038408 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-list-subscriptions.golden.txt @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Usage: buck2 audit deferred-materializer list-subscriptions [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-list.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-list.golden.txt new file mode 100644 index 0000000000000..a766465878268 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-list.golden.txt @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Usage: buck2 audit deferred-materializer list [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-refresh.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-refresh.golden.txt new file mode 100644 index 0000000000000..3837985b0f027 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-refresh.golden.txt @@ -0,0 +1,32 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Usage: buck2 audit deferred-materializer refresh [OPTIONS] + +Arguments: + + Minimum TTL to require for actions + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-test-iter.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-test-iter.golden.txt new file mode 100644 index 0000000000000..8c16e8bf1ac6c --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer-test-iter.golden.txt @@ -0,0 +1,31 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Usage: buck2 audit deferred-materializer test-iter [OPTIONS] + +Options: + --count + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer.golden.txt new file mode 100644 index 0000000000000..e8cbb49bcb8cf --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-deferred-materializer.golden.txt @@ -0,0 +1,151 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Access and interact with the deferred materializer + +Usage: buck2 audit deferred-materializer [OPTIONS] + +Commands: + list + list-subscriptions + fsck + refresh + get-refresh-log Get the log for TTL refreshes + test-iter + flush-access-times + help Print this message or the help of the given subcommand(s) + +Options: + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-dep-files.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-dep-files.golden.txt new file mode 100644 index 0000000000000..1ffa97229042d --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-dep-files.golden.txt @@ -0,0 +1,156 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prints out the select files for a command + +Usage: buck2 audit dep-files [OPTIONS] [IDENTIFIER] + +Arguments: + + Target to query dep files for + + + Action category + + [IDENTIFIER] + Action identifier + +Options: + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-execution-platform-resolution.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-execution-platform-resolution.golden.txt new file mode 100644 index 0000000000000..35de9c9e64257 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-execution-platform-resolution.golden.txt @@ -0,0 +1,162 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prints out information about execution platform resolution + +Usage: buck2 audit execution-platform-resolution [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Patterns to analyze + +Options: + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-help.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-help.golden.txt new file mode 100644 index 0000000000000..c3714acc95569 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 audit help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-audit-includes.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-includes.golden.txt new file mode 100644 index 0000000000000..963ee04349116 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-includes.golden.txt @@ -0,0 +1,148 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +list build file extensions imported at parse time. + +Usage: buck2 audit includes [OPTIONS] [BUILD_FILES]... + +Arguments: + [BUILD_FILES]... + Build files to audit. These are expected to be relative paths from the working dir cell. + +Options: + --json + Print json representation of outputs + + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-output.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-output.golden.txt new file mode 100644 index 0000000000000..659630b21d40f --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-output.golden.txt @@ -0,0 +1,178 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Query the action that produced the output artifact. Does not support BXL, test, scratch, or anon +artifacts. If the configuration hash of the output path does not match the current platform +configuration, the unconfigured target label will be returned. + +Usage: buck2 audit output [OPTIONS] + +Arguments: + + The buck-out path to the build artifact, starting with `buck-out` and including the + configuration platform. + +Options: + --json + + + -A, --output-all-attributes + Output all attributes, equivalent of --output-attribute ''. + + Avoid using this flag in automation because it may be expensive to produce certain + attributes, and because it makes harder to track which special attributes are used. + + -B, --output-basic-attributes + Output basic attributes, namely those the user can supply, plus rule type and package name + + -a, --output-attribute + Regular expressions to match attributes. Regular expressions are used in "search" mode, so + for example empty string matches all attributes including special attributes. + + When using in automation, please specify the regular expression to match the attribute + precisely, for example `--output-attribute '^headers$'` to make it easier to track which + special attributes are used. + + --output-attributes ... + Deprecated: Use `--output-attribute` instead. + + List of space-separated attributes to output, --output-attributes attr1 attr2. + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-package-values.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-package-values.golden.txt new file mode 100644 index 0000000000000..c7081244851c4 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-package-values.golden.txt @@ -0,0 +1,147 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Inspect package values. + +Package values is the thing set with `write_package_value` function from `PACKAGE` files. + +Usage: buck2 audit package-values [OPTIONS] [PACKAGES]... + +Arguments: + [PACKAGES]... + Package names to inspect (like `//foo/bar`, no trailing colon) + +Options: + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-parse.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-parse.golden.txt new file mode 100644 index 0000000000000..8146e57836cc3 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-parse.golden.txt @@ -0,0 +1,151 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Parses the buck-out path into parts that may be useful (ex: config hash, file path to artifact). + +Usage: buck2 audit parse [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + + --modifier + This option is not used + + --json + + + --output-attribute + + + + The buck-out path to the build artifact, starting with `buck-out` and including the + configuration platform. + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-prelude.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-prelude.golden.txt new file mode 100644 index 0000000000000..bce9c99897461 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-prelude.golden.txt @@ -0,0 +1,141 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +print the interpreter prelude to stdout + +Usage: buck2 audit prelude [OPTIONS] + +Options: + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-providers.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-providers.golden.txt new file mode 100644 index 0000000000000..28ab0059426ea --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-providers.golden.txt @@ -0,0 +1,171 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +prints out the providers for a target pattern + +Usage: buck2 audit providers [OPTIONS] ... + +Arguments: + ... + Patterns to analyze + +Options: + --quiet + + + -l, --list + List the available providers + + --print-debug + Print the providers using debug format (very verbose) + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-starlark-help.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-starlark-help.golden.txt new file mode 100644 index 0000000000000..57ec1d0326fe2 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-starlark-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 audit starlark help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-audit-starlark-module.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-starlark-module.golden.txt new file mode 100644 index 0000000000000..86e3cbacba738 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-starlark-module.golden.txt @@ -0,0 +1,145 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Inspect Starlark module by fully qualified import string like foo//bar:baz.bzl + +Usage: buck2 audit starlark module [OPTIONS] + +Arguments: + + Module import path + +Options: + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-starlark-package-deps.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-starlark-package-deps.golden.txt new file mode 100644 index 0000000000000..be380f09ca354 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-starlark-package-deps.golden.txt @@ -0,0 +1,145 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Inspect Starlark package file all bzl dependencies by package name like foo//bar/baz + +Usage: buck2 audit starlark package-deps [OPTIONS] + +Arguments: + + Package + +Options: + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-starlark.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-starlark.golden.txt new file mode 100644 index 0000000000000..dad29305d7b59 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-starlark.golden.txt @@ -0,0 +1,35 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Debug Starlark interpreter + +Usage: buck2 audit starlark [OPTIONS] + +Commands: + module Inspect Starlark module by fully qualified import string like foo//bar:baz.bzl + package-deps Inspect Starlark package file all bzl dependencies by package name like foo//bar/baz + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-subtargets.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-subtargets.golden.txt new file mode 100644 index 0000000000000..6e4932ca7a677 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-subtargets.golden.txt @@ -0,0 +1,169 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print all subtargets + +Usage: buck2 audit subtargets [OPTIONS] ... + +Arguments: + ... + Patterns to analyze + +Options: + --shallow + Do not recursively print all nested subtargets; print only the first level. This is set to + false by default + + --json + Print subtargets as JSON + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit-visibility.golden.txt b/tests/core/help/test_help_data/buck2-help-audit-visibility.golden.txt new file mode 100644 index 0000000000000..6109bf7fd7b73 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit-visibility.golden.txt @@ -0,0 +1,146 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Verify the visibility for transitive deps of the specified target(s) on the unconfigured target +graph + +Usage: buck2 audit visibility [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Target pattern(s) to analyze. + +Options: + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-audit.golden.txt b/tests/core/help/test_help_data/buck2-help-audit.golden.txt new file mode 100644 index 0000000000000..79dffbcbea440 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-audit.golden.txt @@ -0,0 +1,59 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Perform lower level queries + +Usage: buck2 audit [OPTIONS] + +Commands: + cell Query information about the [cells] list in .buckconfig. + classpath Prints out a target's classpaths if it has one. + This command is deprecated and currently available for + compatibility with buck1. + We will replace this command with something that can audit the + entire `TemplatePlaceholderInfo` in the future. + config buck audit config + configurations prints the constraints for configuration IDs + includes list build file extensions imported at parse time. + prelude print the interpreter prelude to stdout + providers prints out the providers for a target pattern + subtargets Print all subtargets + analysis-queries buck audit analysis resolving query attrs + execution-platform-resolution prints out information about execution platform resolution + visibility Verify the visibility for transitive deps of the specified + target(s) on the unconfigured target graph + starlark Debug Starlark interpreter + dep-files prints out the select files for a command + deferred-materializer Access and interact with the deferred materializer + output Query the action that produced the output artifact. Does not + support BXL, test, scratch, or anon artifacts. If the configuration + hash of the output path does not match the current platform + configuration, the unconfigured target label will be returned. + parse Parses the buck-out path into parts that may be useful (ex: config + hash, file path to artifact). + package-values Inspect package values + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-build.golden.txt b/tests/core/help/test_help_data/buck2-help-build.golden.txt new file mode 100644 index 0000000000000..ab5b889383121 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-build.golden.txt @@ -0,0 +1,313 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Build the specified targets + +Usage: buck2 build [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Patterns to build + +Options: + --show-output + Print the path to the output for each of the rules relative to the project root + + --show-full-output + Print the absolute path to the output for each of the rules + + --show-simple-output + Print only the path to the output for each of the rules relative to the project root + + --show-full-simple-output + Print only the absolute path to the output for each of the rules + + --show-json-output + Print the output paths relative to the project root, in JSON format + + --show-full-json-output + Print the output absolute paths, in JSON format + + -M, --materializations + Materialize (or skip) the final artifacts, bypassing buckconfig. + + [possible values: all, none] + + --build-default-info + Build default info (this is the default) + + --skip-default-info + Do not build default info (this is not the default) + + --build-run-info + Build runtime dependencies (this is the default) + + --skip-run-info + Do not build runtime dependencies (this is not the default) + + --build-test-info + Build tests (this is not the default) + + --skip-test-info + Do not build tests (this is the default) + + --out + Copy the output of the built target to this path (`-` to stdout) + + --output-hashes-file + Experimental: Path to a file where the Buck2 daemon should write a list of produced + artifacts in json format + + --build-report + Print a build report + + `--build-report=-` will print the build report to stdout `--build-report=` will + write the build report to the file + + --enable-optional-validations + Comma separated list of validation names to run that are marked optional. + + By default, validations marked as optional are skipped. This option overrides the + behaviour and executes those validations. + + --build-report-options + Comma separated list of build report options. + + The following options are supported: + + `fill-out-failures`: fill out failures the same way Buck1 would. + + `package-project-relative-paths`: emit the project-relative path of packages for the + targets that were built. + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + --local-only + Enable only local execution. Will reject actions that cannot execute locally + + [env: BUCK_OFFLINE_BUILD=] + + --remote-only + Enable only remote execution. Will reject actions that cannot execute remotely + + --prefer-local + Enable hybrid execution. Will prefer executing actions that can execute locally on the + local host + + --prefer-remote + Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and + will avoid racing local and remote execution + + --unstable-no-execution + Experimental: Disable all execution + + --no-remote-cache + Do not perform remote cache queries or cache writes. If remote execution is enabled, the + RE service might still deduplicate actions, so for e.g. benchmarking, using a random + isolation dir is preferred + + [env: BUCK_OFFLINE_BUILD=] + + --write-to-cache-anyway + Could be used to enable the action cache writes on the RE worker when no_remote_cache is + specified + + --eager-dep-files + Process dep files when they are generated (i.e. after running a command that produces dep + files), rather than when they are used (i.e. before re-running a command that previously + produced dep files). Use this when debugging commands that produce dep files. Note that + commands that previously produced dep files will not re-run: only dep files produced + during this command will be eagerly loaded + + --upload-all-actions + Uploads every action to the RE service, regardless of whether the action needs to execute + on RE. + + This is useful when debugging builds and trying to inspect actions which executed + remotely. It's possible that the action result is cached but the action itself has + expired. In this case, downloading the action itself would fail. Enabling this option + would unconditionally upload all actions, thus you will not hit any expiration issues. + + --fail-fast + If Buck hits an error, do as little work as possible before exiting. + + To illustrate the effect of this flag, consider an invocation of `build :foo :bar`. The + default behavior of buck is to do enough work to get a result for the builds of each of + `:foo` and `:bar`, and no more. This means that buck will continue to complete the build + of `:bar` after the build of `:foo` has failed; however, once one dependency of `:foo` has + failed, other dependencies will be cancelled unless they are needed by `:bar`. + + This flag changes the behavior of buck to not wait on `:bar` to complete once `:foo` has + failed. Generally, this flag only has an effect on builds that specify multiple targets. + + `--keep-going` changes the behavior of buck to not only wait on `:bar` once one dependency + of `:foo` has failed, but to additionally attempt to build other dependencies of `:foo` if + possible. + + --keep-going + If Buck hits an error, continue doing as much work as possible before exiting. + + See `--fail-fast` for more details. + + --skip-missing-targets + If target is missing, then skip building instead of throwing error + + --skip-incompatible-targets + If target is incompatible with the specified configuration, skip building instead of + throwing error. This does not apply to targets specified with glob patterns `/...` or `:` + which are skipped unconditionally + + --materialize-failed-inputs + Materializes inputs for failed actions which ran on RE + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-bxl.golden.txt b/tests/core/help/test_help_data/buck2-help-bxl.golden.txt new file mode 100644 index 0000000000000..cbeaba594201f --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-bxl.golden.txt @@ -0,0 +1,270 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Run BXL scripts + +Usage: buck2 bxl [OPTIONS] [-- ...] + +Arguments: + + The bxl function to execute as defined by the label of form + `//path/file.bxl:` + + [BXL INPUT ARGS]... + Arguments passed to the bxl script + +Options: + -M, --materializations + Materialize (or skip) the final artifacts, bypassing buckconfig. + + [possible values: all, none] + + --user-event-log + Write user events to this log file. Both user and internal events are written to main + event log. If this flag is specified, user events are additionally written to user event + log. Log format is JSONL, uncompressed if no known extensions are detected, or you can + explicitly specify the compression via the file extension (ex: `.json-lines.gz` would be + gzip compressed, `.json-lines.zst` would be zstd compressed). Resulting log is is + compatible with `buck2 log show-user` + + --build-report + Print a build report + + `--build-report=-` will print the build report to stdout `--build-report=` will + write the build report to the file + + --enable-optional-validations + Comma separated list of validation names to run that are marked optional. + + By default, validations marked as optional are skipped. This option overrides the + behaviour and executes those validations. + + --build-report-options + Comma separated list of build report options. + + The following options are supported: + + `fill-out-failures`: fill out failures the same way Buck1 would. + + `package-project-relative-paths`: emit the project-relative path of packages for the + targets that were built. + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + --local-only + Enable only local execution. Will reject actions that cannot execute locally + + [env: BUCK_OFFLINE_BUILD=] + + --remote-only + Enable only remote execution. Will reject actions that cannot execute remotely + + --prefer-local + Enable hybrid execution. Will prefer executing actions that can execute locally on the + local host + + --prefer-remote + Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and + will avoid racing local and remote execution + + --unstable-no-execution + Experimental: Disable all execution + + --no-remote-cache + Do not perform remote cache queries or cache writes. If remote execution is enabled, the + RE service might still deduplicate actions, so for e.g. benchmarking, using a random + isolation dir is preferred + + [env: BUCK_OFFLINE_BUILD=] + + --write-to-cache-anyway + Could be used to enable the action cache writes on the RE worker when no_remote_cache is + specified + + --eager-dep-files + Process dep files when they are generated (i.e. after running a command that produces dep + files), rather than when they are used (i.e. before re-running a command that previously + produced dep files). Use this when debugging commands that produce dep files. Note that + commands that previously produced dep files will not re-run: only dep files produced + during this command will be eagerly loaded + + --upload-all-actions + Uploads every action to the RE service, regardless of whether the action needs to execute + on RE. + + This is useful when debugging builds and trying to inspect actions which executed + remotely. It's possible that the action result is cached but the action itself has + expired. In this case, downloading the action itself would fail. Enabling this option + would unconditionally upload all actions, thus you will not hit any expiration issues. + + --fail-fast + If Buck hits an error, do as little work as possible before exiting. + + To illustrate the effect of this flag, consider an invocation of `build :foo :bar`. The + default behavior of buck is to do enough work to get a result for the builds of each of + `:foo` and `:bar`, and no more. This means that buck will continue to complete the build + of `:bar` after the build of `:foo` has failed; however, once one dependency of `:foo` has + failed, other dependencies will be cancelled unless they are needed by `:bar`. + + This flag changes the behavior of buck to not wait on `:bar` to complete once `:foo` has + failed. Generally, this flag only has an effect on builds that specify multiple targets. + + `--keep-going` changes the behavior of buck to not only wait on `:bar` once one dependency + of `:foo` has failed, but to additionally attempt to build other dependencies of `:foo` if + possible. + + --keep-going + If Buck hits an error, continue doing as much work as possible before exiting. + + See `--fail-fast` for more details. + + --skip-missing-targets + If target is missing, then skip building instead of throwing error + + --skip-incompatible-targets + If target is incompatible with the specified configuration, skip building instead of + throwing error. This does not apply to targets specified with glob patterns `/...` or `:` + which are skipped unconditionally + + --materialize-failed-inputs + Materializes inputs for failed actions which ran on RE + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-clean.golden.txt b/tests/core/help/test_help_data/buck2-help-clean.golden.txt new file mode 100644 index 0000000000000..958c29f0ebba7 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-clean.golden.txt @@ -0,0 +1,157 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Delete generated files and caches. + +The command also kills the buck2 daemon. + +Usage: buck2 clean [OPTIONS] + +Options: + --dry-run + Performs a dry-run and prints the paths that would be removed. + + --stale [] + Delete artifacts from buck-out older than 1 week or older than + the specified duration, without killing the daemon + + --tracked-only + Only considers tracked artifacts for cleanup. + + `buck-out` can contain untracked artifacts for different reasons: - Outputs from aborted + actions - State getting deleted (e.g., new buckversion that changes the on-disk state + format) - Writing to `buck-out` without being expected by Buck + + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-completion.golden.txt b/tests/core/help/test_help_data/buck2-help-completion.golden.txt new file mode 100644 index 0000000000000..b77a51d7ee7a4 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-completion.golden.txt @@ -0,0 +1,40 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print completion configuration for shell + +For a one-time setup, run one of the following commands: + source <(buck2 completion bash) + source <(buck2 completion zsh) + +Usage: buck2 completion [OPTIONS] + +Arguments: + + shell for which to generate completion script + + [possible values: bash, fish, zsh] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-cquery.golden.txt b/tests/core/help/test_help_data/buck2-help-cquery.golden.txt new file mode 100644 index 0000000000000..80271e40b5860 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-cquery.golden.txt @@ -0,0 +1,257 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Perform queries on the configured target graph + +The configured target graph includes information about the configuration +(platforms) and transitions involved in building targets. In the +configured graph, `selects` are fully resolved. The same target may +appear in multiple different configurations (when printed, the +configuration is after the target in parentheses). + +A user can specify a `--target-universe` flag to control how literals +are resolved. When provided, any literals will resolve to all +matching targets within the universe (which includes the targets +passed as the universe and all transitive deps of them). When not +provided, we implicitly set the universe to be rooted at every +target literal in the `cquery`. + +Run `buck2 docs cquery` or +https://www.internalfb.com/intern/staticdocs/buck2/docs/users/query/cquery/ +for more documentation about the functions available in cquery +expressions. + +Examples: + +Print all the attributes of a target + +`buck2 cquery //java/com/example/app:amazing --output-all-attributes` + +List the deps of a target (special characters in a target will +require quotes): + +`buck2 cquery 'deps("//java/com/example/app:amazing+more")'` + +Usage: buck2 cquery [OPTIONS] [QUERY_ARGS]... + +Arguments: + + the query to evaluate + + [QUERY_ARGS]... + list of literals for a multi-query (one containing `%s` or `%Ss`) + +Options: + -A, --output-all-attributes + Output all attributes, equivalent of --output-attribute ''. + + Avoid using this flag in automation because it may be expensive to produce certain + attributes, and because it makes harder to track which special attributes are used. + + -B, --output-basic-attributes + Output basic attributes, namely those the user can supply, plus rule type and package name + + -a, --output-attribute + Regular expressions to match attributes. Regular expressions are used in "search" mode, so + for example empty string matches all attributes including special attributes. + + When using in automation, please specify the regular expression to match the attribute + precisely, for example `--output-attribute '^headers$'` to make it easier to track which + special attributes are used. + + --output-attributes ... + Deprecated: Use `--output-attribute` instead. + + List of space-separated attributes to output, --output-attributes attr1 attr2. + + --json + Output in JSON format + + --dot + Output in Graphviz Dot format + + --dot-compact + Output in a more compact format than Graphviz Dot + + --output-format + Output format (default: list). + + dot - dot graph format. + + dot_compact - compact alternative to dot format. + + json - JSON format. + + starlark - targets are printed like starlark code that would produce them. + + + [possible values: dot, json, dot_compact, starlark] + + --show-providers + Show the providers of the query result instead of the attributes and labels + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Starlark Profiling Options: + --profile-mode + Profile target loading. + + When this option is enabled, Buck will profile every `BUCK` file loaded during the query + and merge the results into a single profile. The command may return cached profile data if + `BUCK` files were not invalidated. + + [possible values: time-flame, heap-flame-allocated, heap-flame-retained, + heap-summary-allocated, heap-summary-retained, statement, bytecode, bytecode-pairs, + typecheck, coverage] + + --profile-output + Where to write profile output + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-ctargets.golden.txt b/tests/core/help/test_help_data/buck2-help-ctargets.golden.txt new file mode 100644 index 0000000000000..5a85e0512ee04 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-ctargets.golden.txt @@ -0,0 +1,154 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Resolve target patterns to configured targets + +Usage: buck2 ctargets [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Patterns to interpret + +Options: + --skip-missing-targets + Skip missing targets from `BUCK` files when non-glob pattern is specified. This option + does not skip missing packages and does not ignore errors of `BUCK` file evaluation + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-docs-aquery.golden.txt b/tests/core/help/test_help_data/buck2-help-docs-aquery.golden.txt new file mode 100644 index 0000000000000..3153e05ae16a7 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs-aquery.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print documentation for aquery + +Usage: buck2 docs aquery [OPTIONS] + +Options: + --format + How to format the documentation + + [default: rendered] + [possible values: markdown, rendered] + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-docs-cquery.golden.txt b/tests/core/help/test_help_data/buck2-help-docs-cquery.golden.txt new file mode 100644 index 0000000000000..0d210bbb731f4 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs-cquery.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print documentation for cquery + +Usage: buck2 docs cquery [OPTIONS] + +Options: + --format + How to format the documentation + + [default: rendered] + [possible values: markdown, rendered] + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-docs-help.golden.txt b/tests/core/help/test_help_data/buck2-help-docs-help.golden.txt new file mode 100644 index 0000000000000..1b7179d66796a --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 docs help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-docs-query.golden.txt b/tests/core/help/test_help_data/buck2-help-docs-query.golden.txt new file mode 100644 index 0000000000000..67df57e9451f8 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs-query.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print documentation for uquery + +Usage: buck2 docs query [OPTIONS] + +Options: + --format + How to format the documentation + + [default: rendered] + [possible values: markdown, rendered] + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-docs-starlark-builtins.golden.txt b/tests/core/help/test_help_data/buck2-help-docs-starlark-builtins.golden.txt new file mode 100644 index 0000000000000..7368233947983 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs-starlark-builtins.golden.txt @@ -0,0 +1,143 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Generate documentation for starlark builtins. + +This command is designed to support buck2's doc generation and does not have stable output. + +Usage: buck2 docs starlark-builtins [OPTIONS] --output-dir + +Options: + --output-dir + The directory to output files to + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-docs-starlark.golden.txt b/tests/core/help/test_help_data/buck2-help-docs-starlark.golden.txt new file mode 100644 index 0000000000000..d397b0671e015 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs-starlark.golden.txt @@ -0,0 +1,152 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print documentation of user-defined starlark symbols + +Usage: buck2 docs starlark [OPTIONS] [SYMBOL_PATTERNS]... + +Arguments: + [SYMBOL_PATTERNS]... + Patterns to interpret. //foo:bar.bzl is 'every symbol in //foo:bar.bzl', //foo:bar.bzl:baz + only returns the documentation for the symbol 'baz' in //foo:bar.bzl + +Options: + --output-dir + Directory to write markdown files to. Required if format is markdown_files. + + --format + how to format the returned documentation + + [default: json] + [possible values: json, markdown_files] + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-docs-uquery.golden.txt b/tests/core/help/test_help_data/buck2-help-docs-uquery.golden.txt new file mode 100644 index 0000000000000..f2948753c9611 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs-uquery.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print documentation for query/uquery + +Usage: buck2 docs uquery [OPTIONS] + +Options: + --format + How to format the documentation + + [default: rendered] + [possible values: markdown, rendered] + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-docs.golden.txt b/tests/core/help/test_help_data/buck2-help-docs.golden.txt new file mode 100644 index 0000000000000..c010111b5ea90 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-docs.golden.txt @@ -0,0 +1,38 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print documentation of specified symbols + +Usage: buck2 docs [OPTIONS] + +Commands: + starlark Print documentation of user-defined starlark symbols + starlark-builtins Generate documentation for starlark builtins + uquery Print documentation for query/uquery + cquery Print documentation for cquery + aquery Print documentation for aquery + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-expand-external-cell.golden.txt b/tests/core/help/test_help_data/buck2-help-expand-external-cell.golden.txt new file mode 100644 index 0000000000000..e0ebf69fae443 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-expand-external-cell.golden.txt @@ -0,0 +1,42 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Expand the contents of an external cell into the repo. + +The contents are placed at the path you specified for this cell in your buckconfig. + +If you additionally remove the entry from the `external_cells` section of your buckconfig, you can +edit the files directly in the repo and see those edits reflected in your build. + +Note that this creates a point-in-time snapshot. The files in the repo will not be updated if you eg +change the git commit of the cell in the future. + +Usage: buck2 expand-external-cell [OPTIONS] + +Arguments: + + + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-help-env.golden.txt b/tests/core/help/test_help_data/buck2-help-help-env.golden.txt new file mode 100644 index 0000000000000..13518124d49ea --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-help-env.golden.txt @@ -0,0 +1,35 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print help for environment variables used by buck2 + +Usage: buck2 help-env [OPTIONS] + +Options: + --self-testing + Also print those environment variables that are only used for buck2 integration tests. + + These are all unstable and not meant to be used by most users. + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-help.golden.txt b/tests/core/help/test_help_data/buck2-help-help.golden.txt new file mode 100644 index 0000000000000..9a1dcfd5c7399 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-init.golden.txt b/tests/core/help/test_help_data/buck2-help-init.golden.txt new file mode 100644 index 0000000000000..9811a66276411 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-init.golden.txt @@ -0,0 +1,76 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Initialize a buck2 project + +Usage: buck2 init [OPTIONS] [PATH] + +Arguments: + [PATH] + The path to initialize the project in. The folder does not need to exist + + [default: .] + +Options: + --no-prelude + Don't include the standard prelude or generate toolchain definitions + + --allow-dirty + Initialize the project even if the git repo at \[PATH\] has uncommitted changes + + --git + Also initialize a git repository at the given path, and set up an appropriate `.gitignore` + file + + -h, --help + Print help (see a summary with '-h') + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-install.golden.txt b/tests/core/help/test_help_data/buck2-help-install.golden.txt new file mode 100644 index 0000000000000..a94a65fcf727b --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-install.golden.txt @@ -0,0 +1,299 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Build and install an application + +Usage: buck2 install [OPTIONS] [TARGET]... [-- ...] + +Arguments: + [TARGET]... + Target to build and install + + [INSTALL_ARGS]... + Additional arguments passed to the install when running it + +Options: + --installer-debug + Prints installer output to stderr. It might break superconsole + + -r, --run + Run an Android activity. Here for compatibility with buck1 - it is automatically forwarded + to the installer + + -e, --emulator + Use this option to use emulators only on Android. Here for compatibility with buck1 - it + is automatically forwarded to the installer + + -d, --device + Use this option to use real devices only on Android. Here for compatibility with buck1 - + it is automatically forwarded to the installer + + -s, --serial + Use Android device or emulator with specific serial or UDID number. Here for compatibility + with buck1 - it is automatically forwarded to the installer + + -x, --all-devices + Use all connected Android devices and/or emulators (multi-install mode). Here for + compatibility with buck1 - it is automatically forwarded to the installer + + -a, --activity + Android activity to launch e.g. com.facebook/.LoginActivity. Implies -r. Here for + compatibility with buck1 - it is automatically forwarded to the installer + + -i, --intent-uri + Android Intent URI to launch e.g. fb://profile. Implies -r. Here for compatibility with + buck1 - it is automatically forwarded to the installer + + -w, --wait-for-debugger + Have the launched Android process wait for the debugger. Here for compatibility with buck1 + - it is automatically forwarded to the installer + + -u, --uninstall + Use this option to uninstall an installed app before installing again. Here for + compatibility with buck1 - it is automatically forwarded to the installer + + -k, --keep + Use this option to Keep user data when uninstalling. Here for compatibility with buck1 - + it is automatically forwarded to the installer + + --build-report + Print a build report + + `--build-report=-` will print the build report to stdout `--build-report=` will + write the build report to the file + + --enable-optional-validations + Comma separated list of validation names to run that are marked optional. + + By default, validations marked as optional are skipped. This option overrides the + behaviour and executes those validations. + + --build-report-options + Comma separated list of build report options. + + The following options are supported: + + `fill-out-failures`: fill out failures the same way Buck1 would. + + `package-project-relative-paths`: emit the project-relative path of packages for the + targets that were built. + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + --local-only + Enable only local execution. Will reject actions that cannot execute locally + + [env: BUCK_OFFLINE_BUILD=] + + --remote-only + Enable only remote execution. Will reject actions that cannot execute remotely + + --prefer-local + Enable hybrid execution. Will prefer executing actions that can execute locally on the + local host + + --prefer-remote + Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and + will avoid racing local and remote execution + + --unstable-no-execution + Experimental: Disable all execution + + --no-remote-cache + Do not perform remote cache queries or cache writes. If remote execution is enabled, the + RE service might still deduplicate actions, so for e.g. benchmarking, using a random + isolation dir is preferred + + [env: BUCK_OFFLINE_BUILD=] + + --write-to-cache-anyway + Could be used to enable the action cache writes on the RE worker when no_remote_cache is + specified + + --eager-dep-files + Process dep files when they are generated (i.e. after running a command that produces dep + files), rather than when they are used (i.e. before re-running a command that previously + produced dep files). Use this when debugging commands that produce dep files. Note that + commands that previously produced dep files will not re-run: only dep files produced + during this command will be eagerly loaded + + --upload-all-actions + Uploads every action to the RE service, regardless of whether the action needs to execute + on RE. + + This is useful when debugging builds and trying to inspect actions which executed + remotely. It's possible that the action result is cached but the action itself has + expired. In this case, downloading the action itself would fail. Enabling this option + would unconditionally upload all actions, thus you will not hit any expiration issues. + + --fail-fast + If Buck hits an error, do as little work as possible before exiting. + + To illustrate the effect of this flag, consider an invocation of `build :foo :bar`. The + default behavior of buck is to do enough work to get a result for the builds of each of + `:foo` and `:bar`, and no more. This means that buck will continue to complete the build + of `:bar` after the build of `:foo` has failed; however, once one dependency of `:foo` has + failed, other dependencies will be cancelled unless they are needed by `:bar`. + + This flag changes the behavior of buck to not wait on `:bar` to complete once `:foo` has + failed. Generally, this flag only has an effect on builds that specify multiple targets. + + `--keep-going` changes the behavior of buck to not only wait on `:bar` once one dependency + of `:foo` has failed, but to additionally attempt to build other dependencies of `:foo` if + possible. + + --keep-going + If Buck hits an error, continue doing as much work as possible before exiting. + + See `--fail-fast` for more details. + + --skip-missing-targets + If target is missing, then skip building instead of throwing error + + --skip-incompatible-targets + If target is incompatible with the specified configuration, skip building instead of + throwing error. This does not apply to targets specified with glob patterns `/...` or `:` + which are skipped unconditionally + + --materialize-failed-inputs + Materializes inputs for failed actions which ran on RE + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-kill.golden.txt b/tests/core/help/test_help_data/buck2-help-kill.golden.txt new file mode 100644 index 0000000000000..746946317b007 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-kill.golden.txt @@ -0,0 +1,51 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Kill the buck daemon. + +Note there's also `buck2 killall` and `buck2 clean`. + +`buck2 killall` kills all the buck2 processes on the machine. + +`buck2 clean` kills the buck2 daemon and also deletes the buck2 state files. + +Usage: buck2 kill [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-killall.golden.txt b/tests/core/help/test_help_data/buck2-help-killall.golden.txt new file mode 100644 index 0000000000000..7736889ad0f9a --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-killall.golden.txt @@ -0,0 +1,45 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Kill all buck2 processes on the machine + +Usage: buck2 killall [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-cmd.golden.txt b/tests/core/help/test_help_data/buck2-help-log-cmd.golden.txt new file mode 100644 index 0000000000000..9623a0efcf057 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-cmd.golden.txt @@ -0,0 +1,51 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Show buck command line arguments from selected invocation. + +This command output is not machine readable. Robots, please use `buck2 log show`. + +Usage: buck2 log cmd [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --expand + Show @-expanded command line arguments instead of the original command line + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-critical-path.golden.txt b/tests/core/help/test_help_data/buck2-help-log-critical-path.golden.txt new file mode 100644 index 0000000000000..1c60325f25577 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-critical-path.golden.txt @@ -0,0 +1,60 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Show the critical path for a selected build. + +This produces tab-delimited output listing every node on the critical path. + +It includes the kind of node, its name, category and identfier, as well as total duration (runtime +of this node), user duration (duration the user can improve) and potential improvement before this +node stops being on the critical path. + +All durations are in microseconds. + +Usage: buck2 log critical-path [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --format + Which output format to use for this command + + [default: tabulated] + [possible values: tabulated, json, csv] + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-diff-action-divergence.golden.txt b/tests/core/help/test_help_data/buck2-help-log-diff-action-divergence.golden.txt new file mode 100644 index 0000000000000..4b0d4f23f36ba --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-diff-action-divergence.golden.txt @@ -0,0 +1,49 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Identifies the first divergent action between two builds. Divergence is identified by the same +action having differing outputs. Useful for identifying non-determinism + +Usage: buck2 log diff action-divergence [OPTIONS] <--path1 |--trace-id1 |--recent1 > <--path2 |--trace-id2 |--recent2 > + +Options: + --path1 + A path to an event-log file of the first build + + --trace-id1 + Trace id of the first build + + --recent1 + Open the event-log file from a recent command for the first build + + --path2 + A path to an event-log file of the second build + + --trace-id2 + Trace id of the second build + + --recent2 + Open the event-log file from a recent command for the second build + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-diff-configs.golden.txt b/tests/core/help/test_help_data/buck2-help-log-diff-configs.golden.txt new file mode 100644 index 0000000000000..048404b404ccd --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-diff-configs.golden.txt @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Usage: buck2 log diff configs [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-diff-help.golden.txt b/tests/core/help/test_help_data/buck2-help-log-diff-help.golden.txt new file mode 100644 index 0000000000000..a2b5a3715635a --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-diff-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 log diff help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-log-diff.golden.txt b/tests/core/help/test_help_data/buck2-help-log-diff.golden.txt new file mode 100644 index 0000000000000..17247f9932a00 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-diff.golden.txt @@ -0,0 +1,37 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Subcommands for diff'ing two buck2 commands + +Usage: buck2 log diff [OPTIONS] + +Commands: + action-divergence Identifies the first divergent action between two builds. Divergence is + identified by the same action having differing outputs. Useful for identifying + non-determinism + configs + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-help.golden.txt b/tests/core/help/test_help_data/buck2-help-log-help.golden.txt new file mode 100644 index 0000000000000..1b36408f8fb16 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 log help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-log-path.golden.txt b/tests/core/help/test_help_data/buck2-help-log-path.golden.txt new file mode 100644 index 0000000000000..06152ea111018 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-path.golden.txt @@ -0,0 +1,49 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Output the path to the selected log + +Usage: buck2 log path [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --all + List all the logs + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-replay.golden.txt b/tests/core/help/test_help_data/buck2-help-log-replay.golden.txt new file mode 100644 index 0000000000000..f9797932e67f2 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-replay.golden.txt @@ -0,0 +1,87 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Replay an event log. + +This command allows visualizing an existing event log in a Superconsole. + +Usage: buck2 log replay [OPTIONS] [PATH] [OVERRIDE_ARGS]... + +Arguments: + [PATH] + A path to an event-log file to read from + + [OVERRIDE_ARGS]... + Override the arguments + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --speed + Control the playback speed using a float (i.e. 0.5, 2, etc) + + --preload + Preload the event log. This is typically only useful for benchmarking + + -h, --help + Print help (see a summary with '-h') + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-show-user.golden.txt b/tests/core/help/test_help_data/buck2-help-log-show-user.golden.txt new file mode 100644 index 0000000000000..deae9d53b2a26 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-show-user.golden.txt @@ -0,0 +1,46 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Converts the event log from a selected invocation into a user event log, in JSONL format + +Usage: buck2 log show-user [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-show.golden.txt b/tests/core/help/test_help_data/buck2-help-log-show.golden.txt new file mode 100644 index 0000000000000..6879ccb5ffa98 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-show.golden.txt @@ -0,0 +1,46 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Outputs the log in JSON format from selected invocation + +Usage: buck2 log show [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-summary.golden.txt b/tests/core/help/test_help_data/buck2-help-log-summary.golden.txt new file mode 100644 index 0000000000000..ebda63b858125 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-summary.golden.txt @@ -0,0 +1,46 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Outputs high level statistics about the build + +Usage: buck2 log summary [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-what-failed.golden.txt b/tests/core/help/test_help_data/buck2-help-log-what-failed.golden.txt new file mode 100644 index 0000000000000..7d49971054a17 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-what-failed.golden.txt @@ -0,0 +1,71 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Outputs every command that failed in the selected invocation. + +Look at the help for what-ran to understand the output format. + +Usage: buck2 log what-failed [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --format + Which output format to use for this command + + [default: tabulated] + [possible values: tabulated, json, csv] + + --emit-cache-queries + + + --skip-cache-hits + + + --skip-remote-executions + + + --skip-local-executions + + + --filter-category + Regular expression to filter commands by given action category (i.e. type of of actions + that are similar but operate on different inputs, such as invocations of a C++ compiler + (whose category would be `cxx_compile`)). Matches by full string + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-what-materialized.golden.txt b/tests/core/help/test_help_data/buck2-help-log-what-materialized.golden.txt new file mode 100644 index 0000000000000..d782d7c21a59d --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-what-materialized.golden.txt @@ -0,0 +1,61 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Outputs materializations from selected invocation. + +The output is a tab-separated list containing the path, the materialization method, the file count, +and the total size (after decompression). + +Usage: buck2 log what-materialized [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + -s, --sort-by-size + Sort the output by total bytes in ascending order + + --aggregate-by-ext + Aggregates the output by file extension + + --format + Which output format to use for this command + + [default: tabulated] + [possible values: tabulated, json, csv] + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-what-ran.golden.txt b/tests/core/help/test_help_data/buck2-help-log-what-ran.golden.txt new file mode 100644 index 0000000000000..240660d187986 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-what-ran.golden.txt @@ -0,0 +1,104 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Output everything Buck2 ran from selected invocation. + +The output is presented as a series of tab-delimited records with the following structure: + +The reason for executing a given command. That's either to build or to test. + +The identity of this command. This will include the target that ran required it. + +The executor for this command. This will either be RE or local. + +Details to reproduce it. For RE, that's the action digest. For local, the command. + +To reproduce an action that ran on RE, use the following command then follow the instructions. The +DIGEST is of the form `hash:size`. + +frecli cas download-action DIGEST + +To reproduce an action that ran locally, make sure your working directory is the project root (if +unsure, use `buck2 root --kind project` to find it), then run the command. The command is already +shell-quoted. + +Usage: buck2 log what-ran [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --format + Which output format to use for this command + + [default: tabulated] + [possible values: tabulated, json, csv] + + --emit-cache-queries + + + --skip-cache-hits + + + --skip-remote-executions + + + --skip-local-executions + + + --filter-category + Regular expression to filter commands by given action category (i.e. type of of actions + that are similar but operate on different inputs, such as invocations of a C++ compiler + (whose category would be `cxx_compile`)). Matches by full string + + --failed + Show only commands that failed + + --incomplete + Show only commands that were not completed. That is command were running if buck2 process + was killed, or command currently running if buck2 is running build now + + --show-std-err + Show also std_err from commands that are run. If the command fails before completing, we + display "". If it finishes but there is no error, we + display "". Otherwise, std_err is shown. For JSON, we show raw values and + null for non-completion + + --omit-empty-std-err + Omit commands if their std_err is empty + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-what-up.golden.txt b/tests/core/help/test_help_data/buck2-help-log-what-up.golden.txt new file mode 100644 index 0000000000000..b65b9d0c7c6da --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-what-up.golden.txt @@ -0,0 +1,49 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Show the spans that were open when the log ended + +Usage: buck2 log what-up [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --after + Print the actions that where open after certain amount of milliseconds + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log-what-uploaded.golden.txt b/tests/core/help/test_help_data/buck2-help-log-what-uploaded.golden.txt new file mode 100644 index 0000000000000..915ae845524a6 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log-what-uploaded.golden.txt @@ -0,0 +1,55 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Outputs stats about uploads to RE from the selected invocation + +Usage: buck2 log what-uploaded [OPTIONS] [PATH] + +Arguments: + [PATH] + A path to an event-log file to read from + +Options: + --recent + Open the event-log file from a recent command + + --trace-id + Show log by trace id + + --allow-remote + This option does nothing + + --no-remote + Do not allow downloading the log from manifold if it's not found locally + + --format + Which output format to use for this command + + [default: tabulated] + [possible values: tabulated, json, csv] + + --aggregate-by-ext + Aggregates the output by file extension + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-log.golden.txt b/tests/core/help/test_help_data/buck2-help-log.golden.txt new file mode 100644 index 0000000000000..30c4a29cf498b --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-log.golden.txt @@ -0,0 +1,47 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Commands for interacting with buck2 logs + +Usage: buck2 log [OPTIONS] + +Commands: + what-ran Output everything Buck2 ran from selected invocation + what-failed Outputs every command that failed in the selected invocation + path Output the path to the selected log + show Outputs the log in JSON format from selected invocation + cmd Show buck command line arguments from selected invocation + what-up Show the spans that were open when the log ended + what-materialized Outputs materializations from selected invocation + what-uploaded Outputs stats about uploads to RE from the selected invocation + critical-path Show the critical path for a selected build + replay Replay an event log + show-user Converts the event log from a selected invocation into a user event log, in + JSONL format + summary Outputs high level statistics about the build + diff Subcommands for diff'ing two buck2 commands + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-lsp.golden.txt b/tests/core/help/test_help_data/buck2-help-lsp.golden.txt new file mode 100644 index 0000000000000..70aafd8dea4d3 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-lsp.golden.txt @@ -0,0 +1,108 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Start an LSP server for starlark files + +Usage: buck2 lsp [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-profile-analysis.golden.txt b/tests/core/help/test_help_data/buck2-help-profile-analysis.golden.txt new file mode 100644 index 0000000000000..19e7100ed3177 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-profile-analysis.golden.txt @@ -0,0 +1,186 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Profile analysis + +Usage: buck2 profile analysis [OPTIONS] --output --mode [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + + +Options: + -r, --recursive + In analysis profiling, capture the profile of the target and its dependencies, and output + the merged profile + + -o, --output + Output file path for profile data. + + File will be created if it does not exist, and overwritten if it does. + + --mode + Profile mode. + + Memory profiling modes have suffixes either `-allocated` or `-retained`. + + `-retained` means memory kept in frozen starlark heap after analysis complete. `-retained` + does not work when profiling loading, because no memory is retained after loading and + frozen heap is not even created. This is probably what you want when profiling analysis. + + `-allocated` means allocated memory, including memory which is later garbage collected. + + [possible values: time-flame, heap-flame-allocated, heap-flame-retained, + heap-summary-allocated, heap-summary-retained, statement, bytecode, bytecode-pairs, + typecheck, coverage] + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-profile-bxl.golden.txt b/tests/core/help/test_help_data/buck2-help-profile-bxl.golden.txt new file mode 100644 index 0000000000000..30a839330ea6f --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-profile-bxl.golden.txt @@ -0,0 +1,302 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Profile BXL script + +Usage: buck2 profile bxl [OPTIONS] --output --mode [-- ...] + +Arguments: + + The bxl function to execute as defined by the label of form + `//path/file.bxl:` + + [BXL INPUT ARGS]... + Arguments passed to the bxl script + +Options: + -M, --materializations + Materialize (or skip) the final artifacts, bypassing buckconfig. + + [possible values: all, none] + + --user-event-log + Write user events to this log file. Both user and internal events are written to main + event log. If this flag is specified, user events are additionally written to user event + log. Log format is JSONL, uncompressed if no known extensions are detected, or you can + explicitly specify the compression via the file extension (ex: `.json-lines.gz` would be + gzip compressed, `.json-lines.zst` would be zstd compressed). Resulting log is is + compatible with `buck2 log show-user` + + --build-report + Print a build report + + `--build-report=-` will print the build report to stdout `--build-report=` will + write the build report to the file + + --enable-optional-validations + Comma separated list of validation names to run that are marked optional. + + By default, validations marked as optional are skipped. This option overrides the + behaviour and executes those validations. + + --build-report-options + Comma separated list of build report options. + + The following options are supported: + + `fill-out-failures`: fill out failures the same way Buck1 would. + + `package-project-relative-paths`: emit the project-relative path of packages for the + targets that were built. + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + --local-only + Enable only local execution. Will reject actions that cannot execute locally + + [env: BUCK_OFFLINE_BUILD=] + + --remote-only + Enable only remote execution. Will reject actions that cannot execute remotely + + --prefer-local + Enable hybrid execution. Will prefer executing actions that can execute locally on the + local host + + --prefer-remote + Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and + will avoid racing local and remote execution + + --unstable-no-execution + Experimental: Disable all execution + + --no-remote-cache + Do not perform remote cache queries or cache writes. If remote execution is enabled, the + RE service might still deduplicate actions, so for e.g. benchmarking, using a random + isolation dir is preferred + + [env: BUCK_OFFLINE_BUILD=] + + --write-to-cache-anyway + Could be used to enable the action cache writes on the RE worker when no_remote_cache is + specified + + --eager-dep-files + Process dep files when they are generated (i.e. after running a command that produces dep + files), rather than when they are used (i.e. before re-running a command that previously + produced dep files). Use this when debugging commands that produce dep files. Note that + commands that previously produced dep files will not re-run: only dep files produced + during this command will be eagerly loaded + + --upload-all-actions + Uploads every action to the RE service, regardless of whether the action needs to execute + on RE. + + This is useful when debugging builds and trying to inspect actions which executed + remotely. It's possible that the action result is cached but the action itself has + expired. In this case, downloading the action itself would fail. Enabling this option + would unconditionally upload all actions, thus you will not hit any expiration issues. + + --fail-fast + If Buck hits an error, do as little work as possible before exiting. + + To illustrate the effect of this flag, consider an invocation of `build :foo :bar`. The + default behavior of buck is to do enough work to get a result for the builds of each of + `:foo` and `:bar`, and no more. This means that buck will continue to complete the build + of `:bar` after the build of `:foo` has failed; however, once one dependency of `:foo` has + failed, other dependencies will be cancelled unless they are needed by `:bar`. + + This flag changes the behavior of buck to not wait on `:bar` to complete once `:foo` has + failed. Generally, this flag only has an effect on builds that specify multiple targets. + + `--keep-going` changes the behavior of buck to not only wait on `:bar` once one dependency + of `:foo` has failed, but to additionally attempt to build other dependencies of `:foo` if + possible. + + --keep-going + If Buck hits an error, continue doing as much work as possible before exiting. + + See `--fail-fast` for more details. + + --skip-missing-targets + If target is missing, then skip building instead of throwing error + + --skip-incompatible-targets + If target is incompatible with the specified configuration, skip building instead of + throwing error. This does not apply to targets specified with glob patterns `/...` or `:` + which are skipped unconditionally + + --materialize-failed-inputs + Materializes inputs for failed actions which ran on RE + + -o, --output + Output file path for profile data. + + File will be created if it does not exist, and overwritten if it does. + + --mode + Profile mode. + + Memory profiling modes have suffixes either `-allocated` or `-retained`. + + `-retained` means memory kept in frozen starlark heap after analysis complete. `-retained` + does not work when profiling loading, because no memory is retained after loading and + frozen heap is not even created. This is probably what you want when profiling analysis. + + `-allocated` means allocated memory, including memory which is later garbage collected. + + [possible values: time-flame, heap-flame-allocated, heap-flame-retained, + heap-summary-allocated, heap-summary-retained, statement, bytecode, bytecode-pairs, + typecheck, coverage] + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-profile-help.golden.txt b/tests/core/help/test_help_data/buck2-help-profile-help.golden.txt new file mode 100644 index 0000000000000..e9f6b0c02f6e5 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-profile-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 profile help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-profile-loading.golden.txt b/tests/core/help/test_help_data/buck2-help-profile-loading.golden.txt new file mode 100644 index 0000000000000..a053fe6e92447 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-profile-loading.golden.txt @@ -0,0 +1,186 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Profile `BUCK` file evaluation + +Usage: buck2 profile loading [OPTIONS] --output --mode [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + + +Options: + -r, --recursive + In analysis profiling, capture the profile of the target and its dependencies, and output + the merged profile + + -o, --output + Output file path for profile data. + + File will be created if it does not exist, and overwritten if it does. + + --mode + Profile mode. + + Memory profiling modes have suffixes either `-allocated` or `-retained`. + + `-retained` means memory kept in frozen starlark heap after analysis complete. `-retained` + does not work when profiling loading, because no memory is retained after loading and + frozen heap is not even created. This is probably what you want when profiling analysis. + + `-allocated` means allocated memory, including memory which is later garbage collected. + + [possible values: time-flame, heap-flame-allocated, heap-flame-retained, + heap-summary-allocated, heap-summary-retained, statement, bytecode, bytecode-pairs, + typecheck, coverage] + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + -u, --target-universe + Comma separated list of targets to construct a configured target universe. + + When the option is specified, command targets are be resolved in this universe. + Additionally, `--target-platforms=` and `--modifier=` flags are be used to configure the + universe targets, not the command targets. + + This argument is particularly recommended on most non-trivial cqueries. In the absence of + this argument, buck2 will use the target literals in your cquery expression as the value + for + this argument, which may not be what you want. + + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-profile.golden.txt b/tests/core/help/test_help_data/buck2-help-profile.golden.txt new file mode 100644 index 0000000000000..4196f027df828 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-profile.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Run starlark profiler + +Usage: buck2 profile [OPTIONS] + +Commands: + analysis Profile analysis + loading Profile `BUCK` file evaluation + bxl Profile BXL script + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-query.golden.txt b/tests/core/help/test_help_data/buck2-help-query.golden.txt new file mode 100644 index 0000000000000..455c5e8b6c774 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-query.golden.txt @@ -0,0 +1,193 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Alias for `uquery` + +Usage: buck2 query [OPTIONS] [QUERY_ARGS]... + +Arguments: + + the query to evaluate + + [QUERY_ARGS]... + list of literals for a multi-query (one containing `%s` or `%Ss`) + +Options: + -A, --output-all-attributes + Output all attributes, equivalent of --output-attribute ''. + + Avoid using this flag in automation because it may be expensive to produce certain + attributes, and because it makes harder to track which special attributes are used. + + -B, --output-basic-attributes + Output basic attributes, namely those the user can supply, plus rule type and package name + + -a, --output-attribute + Regular expressions to match attributes. Regular expressions are used in "search" mode, so + for example empty string matches all attributes including special attributes. + + When using in automation, please specify the regular expression to match the attribute + precisely, for example `--output-attribute '^headers$'` to make it easier to track which + special attributes are used. + + --output-attributes ... + Deprecated: Use `--output-attribute` instead. + + List of space-separated attributes to output, --output-attributes attr1 attr2. + + --json + Output in JSON format + + --dot + Output in Graphviz Dot format + + --dot-compact + Output in a more compact format than Graphviz Dot + + --output-format + Output format (default: list). + + dot - dot graph format. + + dot_compact - compact alternative to dot format. + + json - JSON format. + + starlark - targets are printed like starlark code that would produce them. + + + [possible values: dot, json, dot_compact, starlark] + + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-rage.golden.txt b/tests/core/help/test_help_data/buck2-help-rage.golden.txt new file mode 100644 index 0000000000000..da07f93e73af3 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-rage.golden.txt @@ -0,0 +1,48 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Record information about the previous failed buck2 command + +Usage: buck2 rage [OPTIONS] + +Options: + --timeout + Stop collecting information after `` seconds + + [default: 120] + + --invocation-offset + Use value 0 to select last invocation, 1 to select second to last and so on + + --invocation-id + Select invocation directly using the invocation's UUID + + --no-invocation + Collect rage report about buck2 in general, not about specific invocation + + --no-paste + We may want to omit paste if this is not a user or is called in a machine with no pastry + command + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-root.golden.txt b/tests/core/help/test_help_data/buck2-help-root.golden.txt new file mode 100644 index 0000000000000..2090776b98578 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-root.golden.txt @@ -0,0 +1,40 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Find buck cell, project or package root + +Usage: buck2 root [OPTIONS] + +Options: + -k, --kind + which root to print + + [default: cell] + [possible values: package, cell, project, daemon] + + --dir + determine the root for a specific directory (if not provided, finds the root for the + current directory) + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-run.golden.txt b/tests/core/help/test_help_data/buck2-help-run.golden.txt new file mode 100644 index 0000000000000..024251ad2e9f2 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-run.golden.txt @@ -0,0 +1,269 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Build and run the selected target. + +The Build ID for the underlying build execution is made available to the target in the +`BUCK_RUN_BUILD_ID` environment variable. + +Usage: buck2 run [OPTIONS] [TARGET_ARGS]... + +Arguments: + + Target to build and run + + [TARGET_ARGS]... + Additional arguments passed to the target when running it + +Options: + --command-args-file + Write the command to a file instead of executing it. + + --chdir + Set the current working directory of the executable being run + + --emit-shell + Instead of running the command, print out the command formatted for shell interpolation, + use as: $(buck2 run --emit-shell ...) + + --build-report + Print a build report + + `--build-report=-` will print the build report to stdout `--build-report=` will + write the build report to the file + + --enable-optional-validations + Comma separated list of validation names to run that are marked optional. + + By default, validations marked as optional are skipped. This option overrides the + behaviour and executes those validations. + + --build-report-options + Comma separated list of build report options. + + The following options are supported: + + `fill-out-failures`: fill out failures the same way Buck1 would. + + `package-project-relative-paths`: emit the project-relative path of packages for the + targets that were built. + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + --local-only + Enable only local execution. Will reject actions that cannot execute locally + + [env: BUCK_OFFLINE_BUILD=] + + --remote-only + Enable only remote execution. Will reject actions that cannot execute remotely + + --prefer-local + Enable hybrid execution. Will prefer executing actions that can execute locally on the + local host + + --prefer-remote + Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and + will avoid racing local and remote execution + + --unstable-no-execution + Experimental: Disable all execution + + --no-remote-cache + Do not perform remote cache queries or cache writes. If remote execution is enabled, the + RE service might still deduplicate actions, so for e.g. benchmarking, using a random + isolation dir is preferred + + [env: BUCK_OFFLINE_BUILD=] + + --write-to-cache-anyway + Could be used to enable the action cache writes on the RE worker when no_remote_cache is + specified + + --eager-dep-files + Process dep files when they are generated (i.e. after running a command that produces dep + files), rather than when they are used (i.e. before re-running a command that previously + produced dep files). Use this when debugging commands that produce dep files. Note that + commands that previously produced dep files will not re-run: only dep files produced + during this command will be eagerly loaded + + --upload-all-actions + Uploads every action to the RE service, regardless of whether the action needs to execute + on RE. + + This is useful when debugging builds and trying to inspect actions which executed + remotely. It's possible that the action result is cached but the action itself has + expired. In this case, downloading the action itself would fail. Enabling this option + would unconditionally upload all actions, thus you will not hit any expiration issues. + + --fail-fast + If Buck hits an error, do as little work as possible before exiting. + + To illustrate the effect of this flag, consider an invocation of `build :foo :bar`. The + default behavior of buck is to do enough work to get a result for the builds of each of + `:foo` and `:bar`, and no more. This means that buck will continue to complete the build + of `:bar` after the build of `:foo` has failed; however, once one dependency of `:foo` has + failed, other dependencies will be cancelled unless they are needed by `:bar`. + + This flag changes the behavior of buck to not wait on `:bar` to complete once `:foo` has + failed. Generally, this flag only has an effect on builds that specify multiple targets. + + `--keep-going` changes the behavior of buck to not only wait on `:bar` once one dependency + of `:foo` has failed, but to additionally attempt to build other dependencies of `:foo` if + possible. + + --keep-going + If Buck hits an error, continue doing as much work as possible before exiting. + + See `--fail-fast` for more details. + + --skip-missing-targets + If target is missing, then skip building instead of throwing error + + --skip-incompatible-targets + If target is incompatible with the specified configuration, skip building instead of + throwing error. This does not apply to targets specified with glob patterns `/...` or `:` + which are skipped unconditionally + + --materialize-failed-inputs + Materializes inputs for failed actions which ran on RE + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-server.golden.txt b/tests/core/help/test_help_data/buck2-help-server.golden.txt new file mode 100644 index 0000000000000..b378c5046f91a --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-server.golden.txt @@ -0,0 +1,30 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Start, query, and control the http server + +Usage: buck2 server [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-starlark-debug-attach.golden.txt b/tests/core/help/test_help_data/buck2-help-starlark-debug-attach.golden.txt new file mode 100644 index 0000000000000..c2f9ca48d9b78 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-starlark-debug-attach.golden.txt @@ -0,0 +1,111 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Run the starlark debug adapter protocol server + +This forwards requests received on stdin to a debug server running in the buck daemon. DAP events +and responses are returned from the daemon and sent to this command's stdout. + +Usage: buck2 starlark debug-attach [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-starlark-help.golden.txt b/tests/core/help/test_help_data/buck2-help-starlark-help.golden.txt new file mode 100644 index 0000000000000..c706c7d3577cf --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-starlark-help.golden.txt @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Print this message or the help of the given subcommand(s) + +Usage: buck2 starlark help [COMMAND]... + +Arguments: + [COMMAND]... Print help for the subcommand(s) diff --git a/tests/core/help/test_help_data/buck2-help-starlark-lint.golden.txt b/tests/core/help/test_help_data/buck2-help-starlark-lint.golden.txt new file mode 100644 index 0000000000000..7ab7ad680d485 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-starlark-lint.golden.txt @@ -0,0 +1,141 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Run the Starlark linter. + +Usage: buck2 starlark lint [OPTIONS] ... + +Options: + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + + ... + + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-starlark-typecheck.golden.txt b/tests/core/help/test_help_data/buck2-help-starlark-typecheck.golden.txt new file mode 100644 index 0000000000000..baa26d1d3ab28 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-starlark-typecheck.golden.txt @@ -0,0 +1,141 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Run the Starlark typechecker. + +Usage: buck2 starlark typecheck [OPTIONS] ... + +Options: + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + + ... + + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-starlark.golden.txt b/tests/core/help/test_help_data/buck2-help-starlark.golden.txt new file mode 100644 index 0000000000000..418b83b792ee5 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-starlark.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Run Starlark operations + +Usage: buck2 starlark [OPTIONS] + +Commands: + lint Run the Starlark linter. + typecheck Run the Starlark typechecker. + debug-attach Run the starlark debug adapter protocol server + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-status.golden.txt b/tests/core/help/test_help_data/buck2-help-status.golden.txt new file mode 100644 index 0000000000000..b62bbfec79fe4 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-status.golden.txt @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Buckd status + +Usage: buck2 status [OPTIONS] + +Options: + --snapshot + Whether to include a state snapshot in the output. + + --all + Enable printing status for all running buckd + + -h, --help + Print help (see a summary with '-h') + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-subscribe.golden.txt b/tests/core/help/test_help_data/buck2-help-subscribe.golden.txt new file mode 100644 index 0000000000000..ae1f2c2745b7a --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-subscribe.golden.txt @@ -0,0 +1,126 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Open a subscription channel to the Buck2 daemon. This allows you to interact with the Buck2 daemon +via the `stdin` and `stdout` of this command: you send requests to the daemon by writing to `stdin`, +and you get responses via `stdout`. + +The protocol used by this command is length-prefixed protobuf. This format is a repeated series of a +varint followed by a record of the length indicated by said varint. + +The protobuf spec for those records is described in `buck2_subscription_proto/subscription.proto`. +The client writes `SubscriptionRequest` and reads `SubscriptionResponse`. See the documentation in +`subscription.proto` to discover available APIs. + +This API does not (currently) allow invalid requests and will error out when one is sent. + +Usage: buck2 subscribe [OPTIONS] + +Options: + --active-commands + Whether to request command snapshots + + --unstable-json + Whether to get output as JSON. The JSON format is deemed unstable so this should only be + used for debugging + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-targets.golden.txt b/tests/core/help/test_help_data/buck2-help-targets.golden.txt new file mode 100644 index 0000000000000..e32078312157b --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-targets.golden.txt @@ -0,0 +1,282 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Alias for `utargets` + +Usage: buck2 targets [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Patterns to interpret + +Options: + --json + Print targets as JSON + + --json-lines + Print targets as JSON-lines + + --stats + Print statistics of how many entries were processed + + --resolve-alias + Print the fully-qualified build target for the specified aliases + + --show-target-hash + Print a stable hash of each target after the target name + + --show-unconfigured-target-hash + Print a stable unconfigured hash of each target after the target name + + --target-hash-file-mode + Modifies computation of target hashes. If set to `PATHS_AND_CONTENTS` (the default), the + contents of all files referenced from the targets will be used to compute the target hash. + If set to `PATHS_ONLY`, only files' paths contribute to the hash. If set to `NONE` no + files will be used. See also --target-hash-modified-paths + + [default: paths_and_contents] + [possible values: paths_only, paths_and_contents, none] + + --target-hash-modified-paths ... + Modifies computation of target hashes. Only effective when --target-hash-file-mode is set + to `PATHS_ONLY`. If a target or its dependencies reference a file from this set, the + target's hash will be different than if this option was omitted. Otherwise, the target's + hash will be the same as if this option was omitted + + --target-hash-function + Selects either the "fast" or the "strong" target hash function to be used for computing + target hashes. While we don't specify the exact algorithm, the "strong" algorithm should + be a reasonable cryptographic hash (ex. blake3) while the "fast" function will likely be a + non-crypto hash. Both functions are guaranteed to be deterministic and to have the same + value across different platforms/architectures + + [default: fast] + [possible values: sha1, sha256, murmur-hash3, fast, strong] + + --target-hash-recursive + When true, emit the hash or target node and all dependencies recursively. When false, hash + only the target node + + [default: true] + [possible values: true, false] + + -A, --output-all-attributes + Output all attributes, equivalent of --output-attribute ''. + + Avoid using this flag in automation because it may be expensive to produce certain + attributes, and because it makes harder to track which special attributes are used. + + -B, --output-basic-attributes + Output basic attributes, namely those the user can supply, plus rule type and package name + + -a, --output-attribute + Regular expressions to match attributes. Regular expressions are used in "search" mode, so + for example empty string matches all attributes including special attributes. + + When using in automation, please specify the regular expression to match the attribute + precisely, for example `--output-attribute '^headers$'` to make it easier to track which + special attributes are used. + + --output-attributes ... + Deprecated: Use `--output-attribute` instead. + + List of space-separated attributes to output, --output-attributes attr1 attr2. + + --include-defaults + Enables printing of default attributes. This would be attributes in a target that aren't + explicitly set in the target but instead use the default set in the rule declaration + + --show-output + Print the path to the output for each of the rules relative to the project root + + --show-full-output + Print the absolute path to the output for each of the rules + + --show-simple-output + Print only the path to the output for each of the rules relative to the project root + + --show-full-simple-output + Print only the absolute path to the output for each of the rules + + --show-json-output + Print the output paths relative to the project root, in JSON format + + --show-full-json-output + Print the output absolute paths, in JSON format + + --keep-going + On loading errors, put buck.error in the output stream and continue + + --streaming + Write output as soon as it is available. The order of the output items is + non-deterministic and if multiple patterns cover the same target, may have duplicates + + --no-cache + Don't cache the target information on the build graph + + --imports + Show the imports of each package/import. Shows an additional output per package/import + (not per target), including implicit dependencies (e.g. the prelude) but only direct + dependencies (not the transitive closure) + + --package-values + Show the package values. Produces an additional attribute representing all the package + values for the package containing the target + + --package-values-regex + Regular expressions to match package values. Produces an additional attribute representing + package values for the package containing the target. Regular expressions are used in + "search" mode so, for example, empty string matches all package values + + -o, --output + File to put the output in, rather than sending to stdout. + + File will be created if it does not exist, and overwritten if it does. + + --compression + Compress the output + + [default: none] + [possible values: none, gzip, zstd] + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-test.golden.txt b/tests/core/help/test_help_data/buck2-help-test.golden.txt new file mode 100644 index 0000000000000..33b89d1b24500 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-test.golden.txt @@ -0,0 +1,326 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Build and test the specified targets + +Usage: buck2 test [OPTIONS] [TARGET_PATTERNS]... [-- ...] + +Arguments: + [TARGET_PATTERNS]... + Patterns to test + + [TEST_EXECUTOR_ARGS]... + Additional arguments passed to the test executor. + + Test executor is expected to have `--env` flag to pass environment variables. Can be used + like this: + + buck2 test //foo:bar -- --env PRIVATE_KEY=123 + +Options: + --exclude ... + Labels on targets to exclude from tests + + --include ... + Labels on targets to include from tests. Prefixing with `!` means to exclude. First match + wins unless overridden by `always-exclude` flag. + If include patterns are present, regardless of whether exclude patterns are present, then + all targets are by default excluded unless explicitly included. + + --always-exclude + Whether to always exclude if the label appears in `exclude`, regardless of which appears + first + + --build-filtered + Whether to build tests that are excluded via labels. + + --unstable-allow-compatible-tests-on-re + Will allow tests that are compatible with RE (setup to run from the repo root and use + relative paths) to run from RE + + --unstable-allow-all-tests-on-re + Will run tests to on RE even if they are missing required settings (running from the root + + relative paths). Those required settings just get overridden + + --overall-timeout + How long to execute tests for. If the timeout is exceeded, Buck2 will exit as quickly as + possible and not run further tests. In-flight tests will be cancelled. The test + orchestrator will be allowed to shut down gracefully. + + The exit code is controlled by the test orchestrator (which normally should report zero + for this). + + The format is a concatenation of time spans (separated by spaces). Each time span is an + integer number and a suffix. + + Relevant supported suffixes: seconds, second, sec, s, minutes, minute, min, m, hours, + hour, hr, h + + For example: `5m 10s`, `500s`. + + --test-executor-stdout + Writes the test executor stdout to the provided path + + --test-executor-stdout=- will write to stdout + + --test-executor-stdout=FILEPATH will write to the provided filepath, overwriting the + current file if it exists + + By default the test executor's stdout stream is captured + + --ignore-tests-attribute + Normally testing will follow the `tests` attribute of all targets, to find their + associated tests. When passed, this flag will disable that, and only run the directly + supplied targets + + --test-executor-stderr + Writes the test executor stderr to the provided path + + --test-executor-stderr=- will write to stderr + + --test-executor-stderr=FILEPATH will write to the provided filepath, overwriting the + current file if it exists + + By default test executor's stderr stream is captured + + --build-report + Print a build report + + `--build-report=-` will print the build report to stdout `--build-report=` will + write the build report to the file + + --enable-optional-validations + Comma separated list of validation names to run that are marked optional. + + By default, validations marked as optional are skipped. This option overrides the + behaviour and executes those validations. + + --build-report-options + Comma separated list of build report options. + + The following options are supported: + + `fill-out-failures`: fill out failures the same way Buck1 would. + + `package-project-relative-paths`: emit the project-relative path of packages for the + targets that were built. + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + --local-only + Enable only local execution. Will reject actions that cannot execute locally + + [env: BUCK_OFFLINE_BUILD=] + + --remote-only + Enable only remote execution. Will reject actions that cannot execute remotely + + --prefer-local + Enable hybrid execution. Will prefer executing actions that can execute locally on the + local host + + --prefer-remote + Enable hybrid execution. Will prefer executing actions that can execute remotely on RE and + will avoid racing local and remote execution + + --unstable-no-execution + Experimental: Disable all execution + + --no-remote-cache + Do not perform remote cache queries or cache writes. If remote execution is enabled, the + RE service might still deduplicate actions, so for e.g. benchmarking, using a random + isolation dir is preferred + + [env: BUCK_OFFLINE_BUILD=] + + --write-to-cache-anyway + Could be used to enable the action cache writes on the RE worker when no_remote_cache is + specified + + --eager-dep-files + Process dep files when they are generated (i.e. after running a command that produces dep + files), rather than when they are used (i.e. before re-running a command that previously + produced dep files). Use this when debugging commands that produce dep files. Note that + commands that previously produced dep files will not re-run: only dep files produced + during this command will be eagerly loaded + + --upload-all-actions + Uploads every action to the RE service, regardless of whether the action needs to execute + on RE. + + This is useful when debugging builds and trying to inspect actions which executed + remotely. It's possible that the action result is cached but the action itself has + expired. In this case, downloading the action itself would fail. Enabling this option + would unconditionally upload all actions, thus you will not hit any expiration issues. + + --fail-fast + If Buck hits an error, do as little work as possible before exiting. + + To illustrate the effect of this flag, consider an invocation of `build :foo :bar`. The + default behavior of buck is to do enough work to get a result for the builds of each of + `:foo` and `:bar`, and no more. This means that buck will continue to complete the build + of `:bar` after the build of `:foo` has failed; however, once one dependency of `:foo` has + failed, other dependencies will be cancelled unless they are needed by `:bar`. + + This flag changes the behavior of buck to not wait on `:bar` to complete once `:foo` has + failed. Generally, this flag only has an effect on builds that specify multiple targets. + + `--keep-going` changes the behavior of buck to not only wait on `:bar` once one dependency + of `:foo` has failed, but to additionally attempt to build other dependencies of `:foo` if + possible. + + --keep-going + If Buck hits an error, continue doing as much work as possible before exiting. + + See `--fail-fast` for more details. + + --skip-missing-targets + If target is missing, then skip building instead of throwing error + + --skip-incompatible-targets + If target is incompatible with the specified configuration, skip building instead of + throwing error. This does not apply to targets specified with glob patterns `/...` or `:` + which are skipped unconditionally + + --materialize-failed-inputs + Materializes inputs for failed actions which ran on RE + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-uquery.golden.txt b/tests/core/help/test_help_data/buck2-help-uquery.golden.txt new file mode 100644 index 0000000000000..35895aa9ea8bf --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-uquery.golden.txt @@ -0,0 +1,225 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Perform queries on the unconfigured target graph + +The unconfigured target graph consists of the targets as they are +defined in the build files. In this graph, each target appears +exactly once and `select()`s are in the unresolved form. For large +queries, the unconfigured graph may be much smaller than the +configured graph and queries can be much more efficiently performed +there. + +When querying the unconfigured graph, dependencies appearing in all +branches of `select()` dictionaries will be treated as dependencies. + +Run `buck2 docs uquery` or +https://www.internalfb.com/intern/staticdocs/buck2/docs/users/query/uquery/ +for more documentation about the functions available in uquery +expressions. + +Examples: + +Print all the attributes of a target + +`buck2 uquery //java/com/example/app:amazing --output-all-attributes + +List the deps of a target (special characters in a target will require quotes): +`buck2 uquery 'deps("//java/com/example/app:amazing+more")'` + +select() encoding: + +When printed, values with `select()`s use a special json encoding. + +`1 + select({"//:a": 1, "DEFAULT": 2})` will be encoded as: + +`{"__type": "concat", "items": [1, {"__type": "selector", "entries": {"//:a": 1, "DEFAULT": 2}}]}` + +Usage: buck2 uquery [OPTIONS] [QUERY_ARGS]... + +Arguments: + + the query to evaluate + + [QUERY_ARGS]... + list of literals for a multi-query (one containing `%s` or `%Ss`) + +Options: + -A, --output-all-attributes + Output all attributes, equivalent of --output-attribute ''. + + Avoid using this flag in automation because it may be expensive to produce certain + attributes, and because it makes harder to track which special attributes are used. + + -B, --output-basic-attributes + Output basic attributes, namely those the user can supply, plus rule type and package name + + -a, --output-attribute + Regular expressions to match attributes. Regular expressions are used in "search" mode, so + for example empty string matches all attributes including special attributes. + + When using in automation, please specify the regular expression to match the attribute + precisely, for example `--output-attribute '^headers$'` to make it easier to track which + special attributes are used. + + --output-attributes ... + Deprecated: Use `--output-attribute` instead. + + List of space-separated attributes to output, --output-attributes attr1 attr2. + + --json + Output in JSON format + + --dot + Output in Graphviz Dot format + + --dot-compact + Output in a more compact format than Graphviz Dot + + --output-format + Output format (default: list). + + dot - dot graph format. + + dot_compact - compact alternative to dot format. + + json - JSON format. + + starlark - targets are printed like starlark code that would produce them. + + + [possible values: dot, json, dot_compact, starlark] + + --modifier + This option is not used + + -h, --help + Print help (see a summary with '-h') + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help-utargets.golden.txt b/tests/core/help/test_help_data/buck2-help-utargets.golden.txt new file mode 100644 index 0000000000000..0f23ab24bcdf1 --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help-utargets.golden.txt @@ -0,0 +1,285 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Show details about the specified targets. + +This command is meant to only handle unconfigured targets, but for historical reasons, with certain +flags it can also work with configured targets. + +Usage: buck2 utargets [OPTIONS] [TARGET_PATTERNS]... + +Arguments: + [TARGET_PATTERNS]... + Patterns to interpret + +Options: + --json + Print targets as JSON + + --json-lines + Print targets as JSON-lines + + --stats + Print statistics of how many entries were processed + + --resolve-alias + Print the fully-qualified build target for the specified aliases + + --show-target-hash + Print a stable hash of each target after the target name + + --show-unconfigured-target-hash + Print a stable unconfigured hash of each target after the target name + + --target-hash-file-mode + Modifies computation of target hashes. If set to `PATHS_AND_CONTENTS` (the default), the + contents of all files referenced from the targets will be used to compute the target hash. + If set to `PATHS_ONLY`, only files' paths contribute to the hash. If set to `NONE` no + files will be used. See also --target-hash-modified-paths + + [default: paths_and_contents] + [possible values: paths_only, paths_and_contents, none] + + --target-hash-modified-paths ... + Modifies computation of target hashes. Only effective when --target-hash-file-mode is set + to `PATHS_ONLY`. If a target or its dependencies reference a file from this set, the + target's hash will be different than if this option was omitted. Otherwise, the target's + hash will be the same as if this option was omitted + + --target-hash-function + Selects either the "fast" or the "strong" target hash function to be used for computing + target hashes. While we don't specify the exact algorithm, the "strong" algorithm should + be a reasonable cryptographic hash (ex. blake3) while the "fast" function will likely be a + non-crypto hash. Both functions are guaranteed to be deterministic and to have the same + value across different platforms/architectures + + [default: fast] + [possible values: sha1, sha256, murmur-hash3, fast, strong] + + --target-hash-recursive + When true, emit the hash or target node and all dependencies recursively. When false, hash + only the target node + + [default: true] + [possible values: true, false] + + -A, --output-all-attributes + Output all attributes, equivalent of --output-attribute ''. + + Avoid using this flag in automation because it may be expensive to produce certain + attributes, and because it makes harder to track which special attributes are used. + + -B, --output-basic-attributes + Output basic attributes, namely those the user can supply, plus rule type and package name + + -a, --output-attribute + Regular expressions to match attributes. Regular expressions are used in "search" mode, so + for example empty string matches all attributes including special attributes. + + When using in automation, please specify the regular expression to match the attribute + precisely, for example `--output-attribute '^headers$'` to make it easier to track which + special attributes are used. + + --output-attributes ... + Deprecated: Use `--output-attribute` instead. + + List of space-separated attributes to output, --output-attributes attr1 attr2. + + --include-defaults + Enables printing of default attributes. This would be attributes in a target that aren't + explicitly set in the target but instead use the default set in the rule declaration + + --show-output + Print the path to the output for each of the rules relative to the project root + + --show-full-output + Print the absolute path to the output for each of the rules + + --show-simple-output + Print only the path to the output for each of the rules relative to the project root + + --show-full-simple-output + Print only the absolute path to the output for each of the rules + + --show-json-output + Print the output paths relative to the project root, in JSON format + + --show-full-json-output + Print the output absolute paths, in JSON format + + --keep-going + On loading errors, put buck.error in the output stream and continue + + --streaming + Write output as soon as it is available. The order of the output items is + non-deterministic and if multiple patterns cover the same target, may have duplicates + + --no-cache + Don't cache the target information on the build graph + + --imports + Show the imports of each package/import. Shows an additional output per package/import + (not per target), including implicit dependencies (e.g. the prelude) but only direct + dependencies (not the transitive closure) + + --package-values + Show the package values. Produces an additional attribute representing all the package + values for the package containing the target + + --package-values-regex + Regular expressions to match package values. Produces an additional attribute representing + package values for the package containing the target. Regular expressions are used in + "search" mode so, for example, empty string matches all package values + + -o, --output + File to put the output in, rather than sending to stdout. + + File will be created if it does not exist, and overwritten if it does. + + --compression + Compress the output + + [default: none] + [possible values: none, gzip, zstd] + + -j, --num-threads + Number of threads to use during execution (default is # cores) + + -h, --help + Print help (see a summary with '-h') + +Target Configuration Options: + --target-platforms + Configuration target (one) to use to configure targets + + -m, --modifier + A configuration modifier to configure all targets on the command line. This may be a + constraint value target. + +Buckconfig Options: + -c, --config + List of config options + + --config-file + List of config file paths + + --fake-host + [possible values: default, linux, macos, windows] + + --fake-arch + [possible values: default, aarch64, x8664] + + --fake-xcode-version + Value must be formatted as: version-build (e.g., 14.3.0-14C18 or 14.1-14B47b) + + --reuse-current-config + Re-uses any `--config` values (inline or via modefiles) if there's a previous command, + otherwise the flag is ignored. + + If there is a previous command and `--reuse-current-config` is set, then the old config is + used, ignoring any overrides. + + If there is no previous command but the flag was set, then the flag is ignored, the + command behaves as if the flag was not set at all. + + --exit-when-different-state + Used for exiting a concurrent command when a different state is detected + + --preemptible + Used to configure when this command could be preempted by another command for the same + isolation dir. + + Normally, when you run two commands - from different terminals, say - buck2 will attempt + to run them in parallel. However, if the two commands are based on different state, that + is they either have different configs or different filesystem states, buck2 cannot run + them in parallel. The default behavior in this case is to block the second command until + the first completes. + + Possible values: + - never: (default) When another command starts that cannot run in parallel with + this one, block that command + - always: When another command starts, interrupt this command, *even if they + could run in parallel*. There is no good reason to use this other than that it provides + slightly nicer superconsole output + - ondifferentstate: When another command starts that cannot run in parallel with this one, + interrupt this command + +Starlark Options: + --disable-starlark-types + Disable runtime type checking in Starlark interpreter. + + This option is not stable, and can be used only locally to diagnose evaluation performance + problems. + + --stack + Record or show target call stacks. + + Starlark call stacks will be included in duplicate targets error. + + If a command outputs targets (like `targets` command), starlark call stacks will be + printed after the targets. + +Console Options: + --console + Which console to use for this command + + [env: BUCK_CONSOLE=] + [default: auto] + [possible values: auto, none, simple, simplenotty, simpletty, super] + + --ui ... + Configure additional superconsole ui components. + + Accepts a comma-separated list of superconsole components to add. Possible values are: + + dice - shows information about evaluated dice nodes debugevents - shows information about + the flow of events from buckd + + These components can be turned on/off interactively. Press 'h' for help when superconsole + is active. + + Possible values: + - dice + - debugevents + - io: I/O panel + - re: RE panel + + --no-interactive-console + Disable console interactions + + [env: BUCK_NO_INTERACTIVE_CONSOLE=] + +Event Log Options: + --event-log + Write events to this log file + + --write-build-id + Write command invocation id into this file + + --unstable-write-invocation-record + Write the invocation record (as JSON) to this path. No guarantees whatsoever are made + regarding the stability of the format + + --command-report-path + Write the command report to this path. A command report is always written to + `buck-out/v2//command_report` even without this flag + +Universal Options: + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets diff --git a/tests/core/help/test_help_data/buck2-help.golden.txt b/tests/core/help/test_help_data/buck2-help.golden.txt new file mode 100644 index 0000000000000..4ab9b0680395f --- /dev/null +++ b/tests/core/help/test_help_data/buck2-help.golden.txt @@ -0,0 +1,83 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +A build system + +Documentation: https://internalfb.com/intern/staticdocs/buck2/docs/ + +Usage: buck2 [OPTIONS] + +Commands: + audit Perform lower level queries + aquery Perform queries on the action graph (experimental) + build Build the specified targets + bxl Run BXL scripts + help-env Print help for environment variables used by buck2 + test Build and test the specified targets + cquery Perform queries on the configured target graph + init Initialize a buck2 project + expand-external-cell Expand the contents of an external cell into the repo + install Build and install an application + kill Kill the buck daemon + killall Kill all buck2 processes on the machine + root Find buck cell, project or package root + query Alias for `uquery` + run Build and run the selected target + server Start, query, and control the http server + status Buckd status + starlark Run Starlark operations + targets Alias for `utargets` + utargets Show details about the specified targets + ctargets Resolve target patterns to configured targets + uquery Perform queries on the unconfigured target graph + completion Print completion configuration for shell + docs Print documentation of specified symbols + profile Run starlark profiler + rage Record information about the previous failed buck2 command + clean Delete generated files and caches + log Commands for interacting with buck2 logs + lsp Start an LSP server for starlark files + subscribe Subscribe to updates from the Buck2 daemon + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + + -V, --version + Print version + +Universal Options: + --isolation-dir + The name of the directory that Buck2 creates within buck-out for writing outputs and + daemon information. If one is not provided, Buck2 creates a directory with the default + name. + + Instances of Buck2 share a daemon if and only if their isolation directory is identical. + The isolation directory also influences the output paths provided by Buck2, and as a + result using a non-default isolation dir will cause cache misses (and slower builds). + + [env: BUCK_ISOLATION_DIR=] + [default: v2] + + -v, --verbose + How verbose buck should be while logging. + + Values: 0 = Quiet, errors only; 1 = Show status. Default; 2 = more info about errors; 3 = + more info about everything; 4 = more info about everything + stderr; + + It can be combined with specific log items (stderr, full_failed_command, commands, + actions, status, stats, success) to fine-tune the verbosity of the log. Example usage + "-v=1,stderr" + + [default: 1] + + --oncall + The oncall executing this command + + --client-metadata + Metadata key-value pairs to inject into Buck2's logging. Client metadata must be of the + form `key=value`, where `key` is a snake_case identifier, and will be sent to backend + datasets + + --help-wrapper + Print buck wrapper help diff --git a/tests/core/help/test_help_env.py b/tests/core/help/test_help_env.py new file mode 100644 index 0000000000000..c283b39871486 --- /dev/null +++ b/tests/core/help/test_help_env.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_help(buck: Buck) -> None: + result = await buck.help_env() + golden( + output=result.stdout, + rel_path="buck2-help-env.golden.txt", + ) + result = await buck.help_env("--self-testing") + golden( + output=result.stdout, + rel_path="buck2-help-env-testing.golden.txt", + ) diff --git a/tests/core/help/test_help_env_data/.buckconfig b/tests/core/help/test_help_env_data/.buckconfig new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/help/test_help_env_data/.buckroot b/tests/core/help/test_help_env_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/help/test_help_env_data/buck2-help-env-testing.golden.txt b/tests/core/help/test_help_env_data/buck2-help-env-testing.golden.txt new file mode 100644 index 0000000000000..dee1bb5470f2b --- /dev/null +++ b/tests/core/help/test_help_env_data/buck2-help-env-testing.golden.txt @@ -0,0 +1,68 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Name Type Default +BUCK2_ARG0 String +BUCK2_DEBUG_RAWOUTPUT_CHUNK_SIZE usize DEFAULT_CHUNK_SIZE +BUCK2_DICE_DUMP_ON_PANIC bool false +BUCK2_DICE_SNAPSHOT_INTERVAL_MS u64 500 +BUCK2_DISABLE_FILE_ATTR bool false +BUCK2_DISABLE_MACOS_QOS bool false +BUCK2_EDEN_SEMAPHORE usize 2048 +BUCK2_HARD_ERROR String +BUCK2_IO_SEMAPHORE usize num_cpus::get() +BUCK2_IO_THREADS usize 4 +BUCK2_KEEP_DEP_FILE_DIRECTORIES bool false +BUCK2_LIB_BACKTRACE String +BUCK2_MAX_BLOCKING_THREADS usize +BUCK2_PARANOID_PATH String +BUCK2_RE_DOWNLOAD_CONCURRENCY usize 256 +BUCK2_RUNTIME_THREADS usize +BUCK2_SCRIBE_CATEGORY String +BUCK2_TERMINATE_AFTER u64 +BUCK2_TESTING_INACTIVITY_TIMEOUT bool false +BUCK2_TEST_BLOCK_ON_UPLOAD bool false +BUCK2_TEST_BUILD_ERROR bool false +BUCK2_TEST_DISABLE_CACHING bool +BUCK2_TEST_DISABLE_LOG_UPLOAD bool false +BUCK2_TEST_ERROR_ON_CACHE_UPLOAD bool false +BUCK2_TEST_EXTRA_EXTERNAL_CONFIG String +BUCK2_TEST_FAIL_BUCKD_AUTH bool false +BUCK2_TEST_FAIL_CONNECT bool false +BUCK2_TEST_FAIL_RE_DOWNLOADS bool false +BUCK2_TEST_FORCE_CACHE_UPLOAD bool false +BUCK2_TEST_FORCE_DECLARE_MISMATCH bool false +BUCK2_TEST_INIT_DAEMON_ERROR bool false +BUCK2_TEST_INJECTED_MISSING_DIGESTS Vec +BUCK2_TEST_MANIFOLD_CHUNK_BYTES u64 +BUCK2_TEST_MANIFOLD_TTL_S u64 +BUCK2_TEST_ONLY_REMOTE_DEP_FILE_CACHE bool false +BUCK2_TEST_RESOURCE_CONTROL_CONFIG String +BUCK2_TEST_SKIP_ACTION_CACHE_WRITE bool false +BUCK2_TEST_SKIP_DEFAULT_EXTERNAL_CONFIG bool false +BUCK2_TEST_STDIN_BUFFER_SIZE usize +BUCK2_TEST_TOMBSTONED_DIGESTS HashSet +BUCK2_TEST_TPX_USE_TCP bool false +BUCK2_WATCHMAN_TIMEOUT u64 57 +BUCKD_STARTUP_TIMEOUT u64 +BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE usize 5000 +BUCK_CONSOLE String +BUCK_DAEMON_LOG_TO_FILE u8 +BUCK_DEFAULT_DIGEST_ALGORITHM DigestAlgorithmFamily +BUCK_LOG String +BUCK_NO_INTERACTIVE_CONSOLE String +BUCK_OFFLINE_BUILD String +BUCK_PARANOID bool +CI bool false +COMPLETION_VERIFY_LOCKFILE String +DICE_DETECT_CYCLES_UNSTABLE DetectCycles +ENFORCE_DISPATCHER_SET bool false +FORCE_WANT_RESTART bool false +SANDCASTLE String +SANDCASTLE_ALIAS String +SANDCASTLE_ID String +SANDCASTLE_JOB_INFO String +SANDCASTLE_SCHEDULE_TYPE String +SCHEDULE_TYPE String +SKYCASTLE_WORKFLOW_ALIAS String +SKYCASTLE_WORKFLOW_RUN_ID String +WHICH_DICE_UNSTABLE WhichDice diff --git a/tests/core/help/test_help_env_data/buck2-help-env.golden.txt b/tests/core/help/test_help_env_data/buck2-help-env.golden.txt new file mode 100644 index 0000000000000..7209617c77c6a --- /dev/null +++ b/tests/core/help/test_help_env_data/buck2-help-env.golden.txt @@ -0,0 +1,45 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Name Type Default +BUCK2_ARG0 String +BUCK2_DEBUG_RAWOUTPUT_CHUNK_SIZE usize DEFAULT_CHUNK_SIZE +BUCK2_DICE_DUMP_ON_PANIC bool false +BUCK2_DICE_SNAPSHOT_INTERVAL_MS u64 500 +BUCK2_DISABLE_FILE_ATTR bool false +BUCK2_DISABLE_MACOS_QOS bool false +BUCK2_EDEN_SEMAPHORE usize 2048 +BUCK2_HARD_ERROR String +BUCK2_IO_SEMAPHORE usize num_cpus::get() +BUCK2_IO_THREADS usize 4 +BUCK2_KEEP_DEP_FILE_DIRECTORIES bool false +BUCK2_LIB_BACKTRACE String +BUCK2_MAX_BLOCKING_THREADS usize +BUCK2_PARANOID_PATH String +BUCK2_RE_DOWNLOAD_CONCURRENCY usize 256 +BUCK2_RUNTIME_THREADS usize +BUCK2_SCRIBE_CATEGORY String +BUCK2_TEST_BLOCK_ON_UPLOAD bool false +BUCK2_TEST_TPX_USE_TCP bool false +BUCK2_WATCHMAN_TIMEOUT u64 57 +BUCKD_STARTUP_TIMEOUT u64 +BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE usize 5000 +BUCK_CONSOLE String +BUCK_DAEMON_LOG_TO_FILE u8 +BUCK_DEFAULT_DIGEST_ALGORITHM DigestAlgorithmFamily +BUCK_LOG String +BUCK_NO_INTERACTIVE_CONSOLE String +BUCK_OFFLINE_BUILD String +BUCK_PARANOID bool +CI bool false +DICE_DETECT_CYCLES_UNSTABLE DetectCycles +ENFORCE_DISPATCHER_SET bool false +FORCE_WANT_RESTART bool false +SANDCASTLE String +SANDCASTLE_ALIAS String +SANDCASTLE_ID String +SANDCASTLE_JOB_INFO String +SANDCASTLE_SCHEDULE_TYPE String +SCHEDULE_TYPE String +SKYCASTLE_WORKFLOW_ALIAS String +SKYCASTLE_WORKFLOW_RUN_ID String +WHICH_DICE_UNSTABLE WhichDice diff --git a/tests/core/help/test_no_repo.py b/tests/core/help/test_no_repo.py new file mode 100644 index 0000000000000..961ecb603ad5d --- /dev/null +++ b/tests/core/help/test_no_repo.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(skip_final_kill=True) +async def test_no_repo(buck: Buck, tmp_path: Path) -> None: + await buck.help() + # And make sure this also works with absolute argfiles + arg_path = tmp_path / "argsfile.txt" + arg_path.write_text("--help", encoding="utf-8") + await buck.run_buck_command(f"@{arg_path}") diff --git a/tests/core/help/test_no_repo_data/.buckroot b/tests/core/help/test_no_repo_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/help/test_no_repo_data/file.txt b/tests/core/help/test_no_repo_data/file.txt new file mode 100644 index 0000000000000..0c821cfba7b0b --- /dev/null +++ b/tests/core/help/test_no_repo_data/file.txt @@ -0,0 +1 @@ +A file with data because sapling handles empty directories poorly diff --git a/tests/core/http2/BUCK b/tests/core/http2/BUCK new file mode 100644 index 0000000000000..d5c43d56d6caa --- /dev/null +++ b/tests/core/http2/BUCK @@ -0,0 +1,12 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_http2", + srcs = ["test_http2.py"], + data_dir = "test_http2_data", + deps = [ + "fbsource//third-party/pypi/requests:requests", + ], +) diff --git a/tests/core/http2/test_http2.py b/tests/core/http2/test_http2.py new file mode 100644 index 0000000000000..46a394f688ec8 --- /dev/null +++ b/tests/core/http2/test_http2.py @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from __future__ import annotations + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_http2_enabled(buck: Buck) -> None: + # Get a daemon to start + await buck.build() + result = await buck.status() + status = json.loads(result.stdout) + assert status["http2"] is True, "http2 is enabled by default" + + # Insert necessary buckconfig to pick up http2 configuration. + with open(f"{buck.cwd}/.buckconfig", "a") as buckconfig: + buckconfig.writelines(["[http]\n", "http2 = false\n"]) + + # Get a daemon to start + await buck.build() + result = await buck.status() + status = json.loads(result.stdout) + assert status["http2"] is False, "http2 was disabled by buckconfig" diff --git a/tests/core/http2/test_http2_data/.buckconfig b/tests/core/http2/test_http2_data/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/http2/test_http2_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/http2/test_http2_data/.buckroot b/tests/core/http2/test_http2_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/http2/test_http2_data/prelude/prelude.bzl b/tests/core/http2/test_http2_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/init/BUCK b/tests/core/init/BUCK new file mode 100644 index 0000000000000..95563e5530196 --- /dev/null +++ b/tests/core/init/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_init", + srcs = ["test_init.py"], + data_dir = "test_init_data", +) diff --git a/tests/core/init/test_init.py b/tests/core/init/test_init.py new file mode 100644 index 0000000000000..302fd97c8e00d --- /dev/null +++ b/tests/core/init/test_init.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_init_builds(buck: Buck) -> None: + await buck.init() + await buck.build("root//...") diff --git a/tests/core/init/test_init_data/.buckroot b/tests/core/init/test_init_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/init/test_init_data/file.txt b/tests/core/init/test_init_data/file.txt new file mode 100644 index 0000000000000..9c5eb792ee8da --- /dev/null +++ b/tests/core/init/test_init_data/file.txt @@ -0,0 +1 @@ +A file because sapling handles empty directories poorly diff --git a/tests/core/interpreter/BUCK b/tests/core/interpreter/BUCK new file mode 100644 index 0000000000000..1d4c27235cb53 --- /dev/null +++ b/tests/core/interpreter/BUCK @@ -0,0 +1,131 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_v2_only", + srcs = ["test_v2_only.py"], + data_dir = "test_v2_only_data", +) + +buck2_e2e_test( + name = "test_read_root_config", + srcs = ["test_read_root_config.py"], + data_dir = "test_read_root_config_data", +) + +buck2_e2e_test( + name = "test_package_file_alt_name", + srcs = ["test_package_file_alt_name.py"], + data_dir = "test_package_file_alt_name_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_package_file_package_values", + srcs = ["test_package_file_package_values.py"], + data_dir = "test_package_file_package_values_data", + serialize_test_cases = False, + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_package_values_missing_buck_file", + srcs = ["test_package_values_missing_buck_file.py"], + data_dir = "test_package_values_missing_buck_file_data", + serialize_test_cases = False, + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_unstable_typecheck", + srcs = ["test_unstable_typecheck.py"], + data_dir = "test_unstable_typecheck_data", +) + +buck2_e2e_test( + name = "test_prelude_typecheck", + srcs = ["test_prelude_typecheck.py"], + data_dir = "test_prelude_typecheck_data", +) + +buck2_e2e_test( + name = "test_peak_allocated_bytes", + srcs = ["test_peak_allocated_bytes.py"], + data_dir = "test_peak_allocated_bytes_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_peak_allocated_bytes_exceeds_limit", + srcs = ["test_peak_allocated_bytes_exceeds_limit.py"], + data_dir = "test_peak_allocated_bytes_exceeds_limit_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_peak_allocated_bytes_exceeds_soft_limit", + srcs = ["test_peak_allocated_bytes_exceeds_soft_limit.py"], + data_dir = "test_peak_allocated_bytes_exceeds_soft_limit_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_cpu_instruction_count", + srcs = ["test_cpu_instruction_count.py"], + data_dir = "test_cpu_instruction_count_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_package_values_cross_cell", + srcs = ["test_package_values_cross_cell.py"], + data_dir = "test_package_values_cross_cell_data", + serialize_test_cases = False, + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_callstack_size", + srcs = ["test_callstack_size.py"], + data_dir = "test_callstack_size_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_sub_packages", + srcs = ["test_sub_packages.py"], + data_dir = "test_sub_packages_data", +) + +buck2_e2e_test( + name = "test_missing_source_file", + srcs = ["test_missing_source_file.py"], + data_dir = "test_missing_source_file_data", +) + +buck2_e2e_test( + name = "test_no_package_call_does_not_reset_visibility", + srcs = ["test_package_file_visibility.py"], + data_dir = "test_package_file_visibility_data", +) + +buck2_e2e_test( + name = "test_print", + srcs = ["test_print.py"], + data_dir = "test_print_data", +) diff --git a/tests/core/interpreter/test_callstack_size.py b/tests/core/interpreter/test_callstack_size.py new file mode 100644 index 0000000000000..214f476f84472 --- /dev/null +++ b/tests/core/interpreter/test_callstack_size.py @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_stack_overflow(buck: Buck) -> None: + await expect_failure( + buck.uquery("bad//:"), stderr_regex="Starlark call stack overflow" + ) + + +@buck_test() +async def test_callstack_size(buck: Buck) -> None: + output = await buck.uquery("good//:") + assert "TEST PASSED" in output.stderr diff --git a/tests/core/interpreter/test_callstack_size_data/.buckconfig b/tests/core/interpreter/test_callstack_size_data/.buckconfig new file mode 100644 index 0000000000000..a0db676b9e29f --- /dev/null +++ b/tests/core/interpreter/test_callstack_size_data/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + root = . + bad = bad + good = good +[repository_aliases] + prelude = root +[buck2] + starlark_max_callstack_size = 10 diff --git a/tests/core/interpreter/test_callstack_size_data/.buckroot b/tests/core/interpreter/test_callstack_size_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_callstack_size_data/bad/.buckconfig b/tests/core/interpreter/test_callstack_size_data/bad/.buckconfig new file mode 100644 index 0000000000000..dff2e8b2a4443 --- /dev/null +++ b/tests/core/interpreter/test_callstack_size_data/bad/.buckconfig @@ -0,0 +1,4 @@ +[repositories] + root = .. +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_callstack_size_data/bad/TARGETS.fixture b/tests/core/interpreter/test_callstack_size_data/bad/TARGETS.fixture new file mode 100644 index 0000000000000..6e0b333a9981b --- /dev/null +++ b/tests/core/interpreter/test_callstack_size_data/bad/TARGETS.fixture @@ -0,0 +1,9 @@ +load("@root//:defs.bzl", "test_target") +load("@root//:util.bzl", "foo") + +# 50 minus 1 "empty" call stack frame, that always pushed at start of evaluation +max_stack_size = 49 + +foo(max_stack_size) + +test_target(name = "test_target") diff --git a/tests/core/interpreter/test_callstack_size_data/defs.bzl b/tests/core/interpreter/test_callstack_size_data/defs.bzl new file mode 100644 index 0000000000000..21815d67cb1c6 --- /dev/null +++ b/tests/core/interpreter/test_callstack_size_data/defs.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +test_target = rule(impl = lambda _ctx: [DefaultInfo()], attrs = {}) diff --git a/tests/core/interpreter/test_callstack_size_data/good/.buckconfig b/tests/core/interpreter/test_callstack_size_data/good/.buckconfig new file mode 100644 index 0000000000000..dff2e8b2a4443 --- /dev/null +++ b/tests/core/interpreter/test_callstack_size_data/good/.buckconfig @@ -0,0 +1,4 @@ +[repositories] + root = .. +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_callstack_size_data/good/TARGETS.fixture b/tests/core/interpreter/test_callstack_size_data/good/TARGETS.fixture new file mode 100644 index 0000000000000..b4d402fc4f199 --- /dev/null +++ b/tests/core/interpreter/test_callstack_size_data/good/TARGETS.fixture @@ -0,0 +1,11 @@ +load("@root//:defs.bzl", "test_target") +load("@root//:util.bzl", "foo") + +# 10 minus 1 "empty" call stack frame, that always pushed at start of evaluation +max_stack_size = 9 + +foo(max_stack_size - 1) + +print("TEST PASSED") # buildifier: disable=print + +test_target(name = "test_target") diff --git a/tests/core/interpreter/test_callstack_size_data/prelude.bzl b/tests/core/interpreter/test_callstack_size_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_callstack_size_data/util.bzl b/tests/core/interpreter/test_callstack_size_data/util.bzl new file mode 100644 index 0000000000000..d3ad883f2170e --- /dev/null +++ b/tests/core/interpreter/test_callstack_size_data/util.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def foo(i): + if i == 0: + return + foo(i - 1) diff --git a/tests/core/interpreter/test_cpu_instruction_count.py b/tests/core/interpreter/test_cpu_instruction_count.py new file mode 100644 index 0000000000000..acdbb613040ac --- /dev/null +++ b/tests/core/interpreter/test_cpu_instruction_count.py @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import platform + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +@buck_test() +async def test_cpu_instruction_count(buck: Buck) -> None: + await buck.uquery("//:") + span_end_load_event = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "Load", + ) + assert len(span_end_load_event) == 1 + + cpu_instruction_count = span_end_load_event[0]["cpu_instruction_count"] + + # We only populate counters on Linux + if platform.system() == "Linux": + assert cpu_instruction_count >= 1000 + else: + assert cpu_instruction_count is None diff --git a/tests/core/interpreter/test_cpu_instruction_count_data/.buckconfig b/tests/core/interpreter/test_cpu_instruction_count_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/interpreter/test_cpu_instruction_count_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/interpreter/test_cpu_instruction_count_data/.buckroot b/tests/core/interpreter/test_cpu_instruction_count_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_cpu_instruction_count_data/TARGETS.fixture b/tests/core/interpreter/test_cpu_instruction_count_data/TARGETS.fixture new file mode 100644 index 0000000000000..70966db49437f --- /dev/null +++ b/tests/core/interpreter/test_cpu_instruction_count_data/TARGETS.fixture @@ -0,0 +1,3 @@ +# @nolint + +print("test") diff --git a/tests/core/interpreter/test_cpu_instruction_count_data/prelude.bzl b/tests/core/interpreter/test_cpu_instruction_count_data/prelude.bzl new file mode 100644 index 0000000000000..a869e838b4c7c --- /dev/null +++ b/tests/core/interpreter/test_cpu_instruction_count_data/prelude.bzl @@ -0,0 +1,6 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. diff --git a/tests/core/interpreter/test_missing_source_file.py b/tests/core/interpreter/test_missing_source_file.py new file mode 100644 index 0000000000000..82de889e86aa7 --- /dev/null +++ b/tests/core/interpreter/test_missing_source_file.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_missing_source_file(buck: Buck) -> None: + await expect_failure( + buck.uquery("//:dummy_target"), + stderr_regex="Source file .* does not exist as a member of package", + ) diff --git a/tests/core/interpreter/test_missing_source_file_data/.buckconfig b/tests/core/interpreter/test_missing_source_file_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/interpreter/test_missing_source_file_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/interpreter/test_missing_source_file_data/.buckroot b/tests/core/interpreter/test_missing_source_file_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_missing_source_file_data/TARGETS.fixture b/tests/core/interpreter/test_missing_source_file_data/TARGETS.fixture new file mode 100644 index 0000000000000..0a31ea08b6fdc --- /dev/null +++ b/tests/core/interpreter/test_missing_source_file_data/TARGETS.fixture @@ -0,0 +1 @@ +test_target(name = "dummy_target", srcs = ["non_existent_source_file.txt"]) diff --git a/tests/core/interpreter/test_missing_source_file_data/prelude.bzl b/tests/core/interpreter/test_missing_source_file_data/prelude.bzl new file mode 100644 index 0000000000000..b7eb9a91f0999 --- /dev/null +++ b/tests/core/interpreter/test_missing_source_file_data/prelude.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +test_target = rule(impl = _impl, attrs = { + "srcs": attrs.list(attrs.source(), default = []), +}) diff --git a/tests/core/interpreter/test_package_file_alt_name.py b/tests/core/interpreter/test_package_file_alt_name.py new file mode 100644 index 0000000000000..bb3cf0caea70f --- /dev/null +++ b/tests/core/interpreter/test_package_file_alt_name.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_package_file_alt_name(buck: Buck) -> None: + output = await buck.build("//:") + assert "AAA from BUCK_TREE" in output.stderr + assert "AAA from PACKAGE" not in output.stderr + + os.unlink(buck.cwd / "BUCK_TREE") + + output = await buck.build("//:") + assert "AAA from BUCK_TREE" not in output.stderr + assert "AAA from PACKAGE" in output.stderr diff --git a/tests/core/interpreter/test_package_file_alt_name_data/.buckconfig b/tests/core/interpreter/test_package_file_alt_name_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/interpreter/test_package_file_alt_name_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/interpreter/test_package_file_alt_name_data/.buckroot b/tests/core/interpreter/test_package_file_alt_name_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_file_alt_name_data/TARGETS.fixture b/tests/core/interpreter/test_package_file_alt_name_data/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_file_alt_name_data/prelude.bzl b/tests/core/interpreter/test_package_file_alt_name_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_file_package_values.py b/tests/core/interpreter/test_package_file_package_values.py new file mode 100644 index 0000000000000..75fa49171f4c5 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values.py @@ -0,0 +1,93 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_package_file_package_values(buck: Buck) -> None: + # Build file does all the assertions. + output = await buck.build("//:") + assert "TEST PASSED" in output.stderr + + +@buck_test() +async def test_audit_package_values(buck: Buck) -> None: + stdout = (await buck.audit("package-values", "//")).stdout + golden( + output=stdout, + rel_path="audit-package-values.golden.json", + ) + + +@buck_test() +async def test_targets_package_values(buck: Buck) -> None: + stdout = (await buck.targets("--package-values", "//...")).stdout + golden( + output=stdout, + rel_path="targets-package-values.golden.json", + ) + + +@buck_test() +async def test_targets_package_values_regex(buck: Buck) -> None: + # Empty string as regex. + out = (await buck.targets("--package-values-regex", "", "//...")).stdout + json_result = json.loads(out)[0] + expected = {"aaa.bbb": "ccc", "xxx.yyy": "zzz"} + assert json_result["buck.package_values"] == expected + + out = (await buck.targets("--package-values-regex", "aaa.bbb", "//...")).stdout + json_result = json.loads(out)[0] + expected = {"aaa.bbb": "ccc"} + assert json_result["buck.package_values"] == expected + + out = (await buck.targets("--package-values-regex", "xxx", "//...")).stdout + json_result = json.loads(out)[0] + expected = {"xxx.yyy": "zzz"} + assert json_result["buck.package_values"] == expected + + out = ( + await buck.targets( + "--package-values-regex", + "aaa.bbb", + "--package-values-regex", + "xxx.yyy", + "//...", + ) + ).stdout + json_result = json.loads(out)[0] + expected = {"aaa.bbb": "ccc", "xxx.yyy": "zzz"} + assert json_result["buck.package_values"] == expected + + out = (await buck.targets("--package-values-regex", "non_existent", "//...")).stdout + json_result = json.loads(out)[0] + expected = {} + assert json_result["buck.package_values"] == expected + + args = ["allow", "only", "one", "arg", "per", "flag", "occurrence"] + await expect_failure( + buck.targets("--package-values-regex", *args, "//..."), + stderr_regex="Error parsing root//arg", + ) + + +@buck_test() +async def test_targets_streaming_package_values(buck: Buck) -> None: + stdout = (await buck.targets("--streaming", "--package-values", "//...")).stdout + golden( + output=stdout, + rel_path="targets-streaming-package-values.golden.json", + ) diff --git a/tests/core/interpreter/test_package_file_package_values_data/.buckconfig b/tests/core/interpreter/test_package_file_package_values_data/.buckconfig new file mode 100644 index 0000000000000..425a56f43b9c4 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values_data/.buckconfig @@ -0,0 +1,6 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_package_file_package_values_data/.buckroot b/tests/core/interpreter/test_package_file_package_values_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_file_package_values_data/TARGETS.fixture b/tests/core/interpreter/test_package_file_package_values_data/TARGETS.fixture new file mode 100644 index 0000000000000..e8b482d1b17d1 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values_data/TARGETS.fixture @@ -0,0 +1,12 @@ +load(":defs.bzl", "test_target") +load(":util.bzl", "read") + +# buildifier: disable=no-effect +read_package_value("aaa.bbb") == "ccc" or fail() + +# buildifier: disable=no-effect +read() == "zzz" or fail() + +print("TEST PASSED") # buildifier: disable=print + +test_target(name = "test_target") diff --git a/tests/core/interpreter/test_package_file_package_values_data/audit-package-values.golden.json b/tests/core/interpreter/test_package_file_package_values_data/audit-package-values.golden.json new file mode 100644 index 0000000000000..8be4257979010 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values_data/audit-package-values.golden.json @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//": { + "xxx.yyy": "zzz", + "aaa.bbb": "ccc" + } +} diff --git a/tests/core/interpreter/test_package_file_package_values_data/defs.bzl b/tests/core/interpreter/test_package_file_package_values_data/defs.bzl new file mode 100644 index 0000000000000..21815d67cb1c6 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values_data/defs.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +test_target = rule(impl = lambda _ctx: [DefaultInfo()], attrs = {}) diff --git a/tests/core/interpreter/test_package_file_package_values_data/prelude.bzl b/tests/core/interpreter/test_package_file_package_values_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_file_package_values_data/targets-package-values.golden.json b/tests/core/interpreter/test_package_file_package_values_data/targets-package-values.golden.json new file mode 100644 index 0000000000000..2b115e18d6da7 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values_data/targets-package-values.golden.json @@ -0,0 +1,14 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[ + { + "buck.type":"root//defs.bzl:test_target", + "buck.deps":[], + "buck.inputs":[], + "buck.package":"root//", + "buck.package_values":{"aaa.bbb":"ccc","xxx.yyy":"zzz"}, + "name":"test_target", + "visibility":[], + "within_view":["PUBLIC"] + } +] diff --git a/tests/core/interpreter/test_package_file_package_values_data/targets-streaming-package-values.golden.json b/tests/core/interpreter/test_package_file_package_values_data/targets-streaming-package-values.golden.json new file mode 100644 index 0000000000000..2b115e18d6da7 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values_data/targets-streaming-package-values.golden.json @@ -0,0 +1,14 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[ + { + "buck.type":"root//defs.bzl:test_target", + "buck.deps":[], + "buck.inputs":[], + "buck.package":"root//", + "buck.package_values":{"aaa.bbb":"ccc","xxx.yyy":"zzz"}, + "name":"test_target", + "visibility":[], + "within_view":["PUBLIC"] + } +] diff --git a/tests/core/interpreter/test_package_file_package_values_data/util.bzl b/tests/core/interpreter/test_package_file_package_values_data/util.bzl new file mode 100644 index 0000000000000..292a1b078a896 --- /dev/null +++ b/tests/core/interpreter/test_package_file_package_values_data/util.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def write(): + write_package_value("xxx.yyy", "zzz") + +def read(): + return read_package_value("xxx.yyy") diff --git a/tests/core/interpreter/test_package_file_visibility.py b/tests/core/interpreter/test_package_file_visibility.py new file mode 100644 index 0000000000000..287f21e5fe4fb --- /dev/null +++ b/tests/core/interpreter/test_package_file_visibility.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_no_package_call_does_not_reset_visibility(buck: Buck) -> None: + # Test that PACKAGE file without package() call does not reset visibility inherited from parent PACKAGE file. + + await buck.build("root//b:top") diff --git a/tests/core/interpreter/test_package_file_visibility_data/.buckconfig b/tests/core/interpreter/test_package_file_visibility_data/.buckconfig new file mode 100644 index 0000000000000..b884d57710a9d --- /dev/null +++ b/tests/core/interpreter/test_package_file_visibility_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] + name = TARGETS.fixture + +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled diff --git a/tests/core/interpreter/test_package_file_visibility_data/.buckroot b/tests/core/interpreter/test_package_file_visibility_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_file_visibility_data/a/nested_package/TARGETS.fixture b/tests/core/interpreter/test_package_file_visibility_data/a/nested_package/TARGETS.fixture new file mode 100644 index 0000000000000..b0ab975f1d54f --- /dev/null +++ b/tests/core/interpreter/test_package_file_visibility_data/a/nested_package/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "bottom") diff --git a/tests/core/interpreter/test_package_file_visibility_data/b/TARGETS.fixture b/tests/core/interpreter/test_package_file_visibility_data/b/TARGETS.fixture new file mode 100644 index 0000000000000..bc9cd42042f50 --- /dev/null +++ b/tests/core/interpreter/test_package_file_visibility_data/b/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "top", deps = ["//a/nested_package:bottom"]) diff --git a/tests/core/interpreter/test_package_file_visibility_data/prelude.bzl b/tests/core/interpreter/test_package_file_visibility_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_values_cross_cell.py b/tests/core/interpreter/test_package_values_cross_cell.py new file mode 100644 index 0000000000000..e519f7d6c8dd2 --- /dev/null +++ b/tests/core/interpreter/test_package_values_cross_cell.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_audit_package_values_cross_cell(buck: Buck) -> None: + stdout = (await buck.audit("package-values", "other//")).stdout + golden( + output=stdout, + rel_path="audit-package-values-cross-cell.golden.json", + ) diff --git a/tests/core/interpreter/test_package_values_cross_cell_data/.buckconfig b/tests/core/interpreter/test_package_values_cross_cell_data/.buckconfig new file mode 100644 index 0000000000000..9cd98d201e0f5 --- /dev/null +++ b/tests/core/interpreter/test_package_values_cross_cell_data/.buckconfig @@ -0,0 +1,5 @@ +[repositories] + root = . + other = other +[repository_aliases] + prelude = root diff --git a/tests/core/interpreter/test_package_values_cross_cell_data/.buckroot b/tests/core/interpreter/test_package_values_cross_cell_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_values_cross_cell_data/audit-package-values-cross-cell.golden.json b/tests/core/interpreter/test_package_values_cross_cell_data/audit-package-values-cross-cell.golden.json new file mode 100644 index 0000000000000..44def0a167045 --- /dev/null +++ b/tests/core/interpreter/test_package_values_cross_cell_data/audit-package-values-cross-cell.golden.json @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "other//": { + "aaa.bbb": "ccc", + "xxx.yyy": "zzz" + } +} diff --git a/tests/core/interpreter/test_package_values_cross_cell_data/other/.buckconfig b/tests/core/interpreter/test_package_values_cross_cell_data/other/.buckconfig new file mode 100644 index 0000000000000..b73bf5d5e41d2 --- /dev/null +++ b/tests/core/interpreter/test_package_values_cross_cell_data/other/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_package_values_cross_cell_data/other/TARGETS.fixture b/tests/core/interpreter/test_package_values_cross_cell_data/other/TARGETS.fixture new file mode 100644 index 0000000000000..641f9821d86ce --- /dev/null +++ b/tests/core/interpreter/test_package_values_cross_cell_data/other/TARGETS.fixture @@ -0,0 +1 @@ +eee(name = "EEE") diff --git a/tests/core/interpreter/test_package_values_cross_cell_data/prelude.bzl b/tests/core/interpreter/test_package_values_cross_cell_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_values_missing_buck_file.py b/tests/core/interpreter/test_package_values_missing_buck_file.py new file mode 100644 index 0000000000000..c8a77292c3d2e --- /dev/null +++ b/tests/core/interpreter/test_package_values_missing_buck_file.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_package_values_missing_buck_file(buck: Buck) -> None: + stdout = (await buck.audit("package-values", "//")).stdout + golden( + output=stdout, + rel_path="audit-package-values-missing-buck-file.golden.json", + ) diff --git a/tests/core/interpreter/test_package_values_missing_buck_file_data/.buckconfig b/tests/core/interpreter/test_package_values_missing_buck_file_data/.buckconfig new file mode 100644 index 0000000000000..2cab1f88897fa --- /dev/null +++ b/tests/core/interpreter/test_package_values_missing_buck_file_data/.buckconfig @@ -0,0 +1,2 @@ +[repositories] + root = . diff --git a/tests/core/interpreter/test_package_values_missing_buck_file_data/.buckroot b/tests/core/interpreter/test_package_values_missing_buck_file_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_package_values_missing_buck_file_data/audit-package-values-missing-buck-file.golden.json b/tests/core/interpreter/test_package_values_missing_buck_file_data/audit-package-values-missing-buck-file.golden.json new file mode 100644 index 0000000000000..5b1958bf269d6 --- /dev/null +++ b/tests/core/interpreter/test_package_values_missing_buck_file_data/audit-package-values-missing-buck-file.golden.json @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//": { + "aaa.bbb": "ccc" + } +} diff --git a/tests/core/interpreter/test_peak_allocated_bytes.py b/tests/core/interpreter/test_peak_allocated_bytes.py new file mode 100644 index 0000000000000..290eeb165b8e3 --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes.py @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +@buck_test() +async def test_peak_allocated_bytes(buck: Buck) -> None: + await buck.uquery("//:EEE") + span_end_load_event = await filter_events( + buck, + "Event", + "data", + "SpanEnd", + "data", + "Load", + ) + assert len(span_end_load_event) == 1 + starlark_peak_allocated_bytes = span_end_load_event[0][ + "starlark_peak_allocated_bytes" + ] + # list occupies pointer size (8) * number of elements (~10MB) + some extra overhead for bookkeeping + assert starlark_peak_allocated_bytes >= (8 * 10 * 1 << 20) + # check that it is no more than +10% + assert starlark_peak_allocated_bytes < (8 * 11 * 1 << 20) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_data/.buckconfig b/tests/core/interpreter/test_peak_allocated_bytes_data/.buckconfig new file mode 100644 index 0000000000000..eab02938ca636 --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_data/.buckconfig @@ -0,0 +1,8 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . + +[buck2] +check_starlark_peak_memory = true diff --git a/tests/core/interpreter/test_peak_allocated_bytes_data/.buckroot b/tests/core/interpreter/test_peak_allocated_bytes_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_peak_allocated_bytes_data/TARGETS.fixture b/tests/core/interpreter/test_peak_allocated_bytes_data/TARGETS.fixture new file mode 100644 index 0000000000000..5dcf9f611dcae --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_data/TARGETS.fixture @@ -0,0 +1,4 @@ +eee(name = "EEE") +set_starlark_peak_allocated_byte_limit(1 << 30) +thousand_list = [1 for i in range(1 << 10)] +ten_million_list = 10 * thousand_list * (1 << 10) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_data/prelude.bzl b/tests/core/interpreter/test_peak_allocated_bytes_data/prelude.bzl new file mode 100644 index 0000000000000..622401727afff --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _eee( + # starlark-lint-disable unused-argument + ctx): # @unused + return [DefaultInfo()] + +eee = rule( + impl = _eee, + attrs = {}, +) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit.py b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit.py new file mode 100644 index 0000000000000..94f765e7c5cbd --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_peak_allocated_bytes_exceeds_limit(buck: Buck) -> None: + await expect_failure( + buck.uquery("//:EEE"), + stderr_regex="Starlark peak memory usage for prelude//:TARGETS.fixture is .*MiB which exceeds the limit 1.0KiB!", + ) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/.buckconfig b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/.buckconfig new file mode 100644 index 0000000000000..eab02938ca636 --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/.buckconfig @@ -0,0 +1,8 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . + +[buck2] +check_starlark_peak_memory = true diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/.buckroot b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/TARGETS.fixture b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/TARGETS.fixture new file mode 100644 index 0000000000000..b5286203da07a --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/TARGETS.fixture @@ -0,0 +1,4 @@ +eee(name = "EEE") +set_starlark_peak_allocated_byte_limit(1 << 10) +thousand_list = [1 for i in range(1 << 10)] +ten_million_list = 10 * thousand_list * (1 << 10) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/prelude.bzl b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/prelude.bzl new file mode 100644 index 0000000000000..622401727afff --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_limit_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _eee( + # starlark-lint-disable unused-argument + ctx): # @unused + return [DefaultInfo()] + +eee = rule( + impl = _eee, + attrs = {}, +) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit.py b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit.py new file mode 100644 index 0000000000000..862da26665b02 --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_peak_allocated_bytes_exceeds_soft_limit(buck: Buck) -> None: + await expect_failure( + buck.uquery("//:EEE"), + stderr_regex="Starlark peak memory usage for prelude//:TARGETS.fixture is .*MiB which is over 50% of the limit 10MiB! Consider investigating what takes too much memory.", + ) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/.buckconfig b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/.buckconfig new file mode 100644 index 0000000000000..eab02938ca636 --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/.buckconfig @@ -0,0 +1,8 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . + +[buck2] +check_starlark_peak_memory = true diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/.buckroot b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/TARGETS.fixture b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/TARGETS.fixture new file mode 100644 index 0000000000000..44eccba5084df --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/TARGETS.fixture @@ -0,0 +1,4 @@ +eee(name = "EEE") +set_starlark_peak_allocated_byte_limit(10 * 1 << 20) +thousand_list = [1 for i in range(1 << 10)] +million_list = thousand_list * (1 << 10) diff --git a/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/prelude.bzl b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/prelude.bzl new file mode 100644 index 0000000000000..622401727afff --- /dev/null +++ b/tests/core/interpreter/test_peak_allocated_bytes_exceeds_soft_limit_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _eee( + # starlark-lint-disable unused-argument + ctx): # @unused + return [DefaultInfo()] + +eee = rule( + impl = _eee, + attrs = {}, +) diff --git a/tests/core/interpreter/test_prelude_typecheck.py b/tests/core/interpreter/test_prelude_typecheck.py new file mode 100644 index 0000000000000..b0e339a7648f4 --- /dev/null +++ b/tests/core/interpreter/test_prelude_typecheck.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# Test prelude is typechecked unconditionally. +@buck_test() +async def test_prelude_typecheck(buck: Buck) -> None: + await expect_failure( + buck.uquery("//:"), + stderr_regex="Expected type `str` but got `int`", + ) diff --git a/tests/core/interpreter/test_prelude_typecheck_data/.buckconfig b/tests/core/interpreter/test_prelude_typecheck_data/.buckconfig new file mode 100644 index 0000000000000..7b86b4be53592 --- /dev/null +++ b/tests/core/interpreter/test_prelude_typecheck_data/.buckconfig @@ -0,0 +1,4 @@ +[repositories] + prelude = . +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_prelude_typecheck_data/.buckroot b/tests/core/interpreter/test_prelude_typecheck_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_prelude_typecheck_data/TARGETS.fixture b/tests/core/interpreter/test_prelude_typecheck_data/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_prelude_typecheck_data/bad.bzl b/tests/core/interpreter/test_prelude_typecheck_data/bad.bzl new file mode 100644 index 0000000000000..f7c54b7c65b78 --- /dev/null +++ b/tests/core/interpreter/test_prelude_typecheck_data/bad.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def bad(x: int) -> str: + return x diff --git a/tests/core/interpreter/test_prelude_typecheck_data/prelude.bzl b/tests/core/interpreter/test_prelude_typecheck_data/prelude.bzl new file mode 100644 index 0000000000000..c8b69799a6f0d --- /dev/null +++ b/tests/core/interpreter/test_prelude_typecheck_data/prelude.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":bad.bzl", _bad = "bad") + +bad = _bad diff --git a/tests/core/interpreter/test_print.py b/tests/core/interpreter/test_print.py new file mode 100644 index 0000000000000..831d88534b09d --- /dev/null +++ b/tests/core/interpreter/test_print.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_print(buck: Buck) -> None: + result = await buck.targets("root//:") + assert "print me" in result.stderr + assert "print me" not in result.stdout + + result = await buck.build("root//:", "--no-buckd") + assert "print me" in result.stderr + assert "print me" not in result.stdout diff --git a/tests/core/interpreter/test_print_data/.buckconfig b/tests/core/interpreter/test_print_data/.buckconfig new file mode 100644 index 0000000000000..df06a02c03ca2 --- /dev/null +++ b/tests/core/interpreter/test_print_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_print_data/.buckroot b/tests/core/interpreter/test_print_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_print_data/TARGETS.fixture b/tests/core/interpreter/test_print_data/TARGETS.fixture new file mode 100644 index 0000000000000..8017d31f699d4 --- /dev/null +++ b/tests/core/interpreter/test_print_data/TARGETS.fixture @@ -0,0 +1,2 @@ +# buildifier: disable=print +print("print me") diff --git a/tests/core/interpreter/test_read_root_config.py b/tests/core/interpreter/test_read_root_config.py new file mode 100644 index 0000000000000..81c7ad2371644 --- /dev/null +++ b/tests/core/interpreter/test_read_root_config.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_read_root_config(buck: Buck) -> None: + output = await buck.build("//:") + assert "<>" in output.stderr + assert "<>" in output.stderr + assert "<>" in output.stderr + assert "<>" in output.stderr + + output = await buck.build("other//:") + assert "{{root=regular}}" in output.stderr + assert "{{root_ignore_default=regular}}" in output.stderr + assert "{{root_use_default=quantity}}" in output.stderr + assert "{{local=guerrilla}}" in output.stderr diff --git a/tests/core/interpreter/test_read_root_config_data/.buckconfig b/tests/core/interpreter/test_read_root_config_data/.buckconfig new file mode 100644 index 0000000000000..3e23f14ab95a5 --- /dev/null +++ b/tests/core/interpreter/test_read_root_config_data/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + test_read_root_config = . + prelude = . + other = other +[buildfile] + name = TARGETS.fixture +[unlike] + harsh = regular diff --git a/tests/core/interpreter/test_read_root_config_data/.buckroot b/tests/core/interpreter/test_read_root_config_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_read_root_config_data/TARGETS.fixture b/tests/core/interpreter/test_read_root_config_data/TARGETS.fixture new file mode 100644 index 0000000000000..e14482311837a --- /dev/null +++ b/tests/core/interpreter/test_read_root_config_data/TARGETS.fixture @@ -0,0 +1,5 @@ +print("<>" % read_root_config("unlike", "harsh")) # buildifier: disable=print +print("<>" % read_root_config("unlike", "harsh", "release")) # buildifier: disable=print +print("<>" % read_root_config("unlike", "powder", "predict")) # buildifier: disable=print + +print("<>" % read_config("unlike", "harsh")) # buildifier: disable=print diff --git a/tests/core/interpreter/test_read_root_config_data/other/.buckconfig b/tests/core/interpreter/test_read_root_config_data/other/.buckconfig new file mode 100644 index 0000000000000..f9542c6333868 --- /dev/null +++ b/tests/core/interpreter/test_read_root_config_data/other/.buckconfig @@ -0,0 +1,4 @@ +[buildfile] + name = TARGETS.fixture +[unlike] + harsh = guerrilla diff --git a/tests/core/interpreter/test_read_root_config_data/other/TARGETS.fixture b/tests/core/interpreter/test_read_root_config_data/other/TARGETS.fixture new file mode 100644 index 0000000000000..ec7ecb7f8c525 --- /dev/null +++ b/tests/core/interpreter/test_read_root_config_data/other/TARGETS.fixture @@ -0,0 +1,5 @@ +print("{{root=%s}}" % read_root_config("unlike", "harsh")) # buildifier: disable=print +print("{{root_ignore_default=%s}}" % read_root_config("unlike", "harsh", "honor")) # buildifier: disable=print +print("{{root_use_default=%s}}" % read_root_config("unlike", "powder", "quantity")) # buildifier: disable=print + +print("{{local=%s}}" % read_config("unlike", "harsh")) # buildifier: disable=print diff --git a/tests/core/interpreter/test_read_root_config_data/prelude.bzl b/tests/core/interpreter/test_read_root_config_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_sub_packages.py b/tests/core/interpreter/test_sub_packages.py new file mode 100644 index 0000000000000..a1b2d3f312cd8 --- /dev/null +++ b/tests/core/interpreter/test_sub_packages.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_sub_packages(buck: Buck) -> None: + res = await buck.targets("root//:") + assert 'Pkgs: ["cat/x", "dog"]' in res.stderr + res = await buck.targets("root//dog:") + assert 'Pkgs: ["y"]' in res.stderr + res = await buck.targets("root//cat/x:") + assert "Pkgs: []" in res.stderr diff --git a/tests/core/interpreter/test_sub_packages_data/.buckconfig b/tests/core/interpreter/test_sub_packages_data/.buckconfig new file mode 100644 index 0000000000000..814d1d7cd8e34 --- /dev/null +++ b/tests/core/interpreter/test_sub_packages_data/.buckconfig @@ -0,0 +1,6 @@ +[cells] + root = . + prelude = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_sub_packages_data/.buckroot b/tests/core/interpreter/test_sub_packages_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_sub_packages_data/TARGETS.fixture b/tests/core/interpreter/test_sub_packages_data/TARGETS.fixture new file mode 100644 index 0000000000000..5873cbfe33970 --- /dev/null +++ b/tests/core/interpreter/test_sub_packages_data/TARGETS.fixture @@ -0,0 +1,2 @@ +# buildifier: disable=print +print("Pkgs: " + str(__internal__.sub_packages())) diff --git a/tests/core/interpreter/test_sub_packages_data/cat/x/TARGETS.fixture b/tests/core/interpreter/test_sub_packages_data/cat/x/TARGETS.fixture new file mode 100644 index 0000000000000..5873cbfe33970 --- /dev/null +++ b/tests/core/interpreter/test_sub_packages_data/cat/x/TARGETS.fixture @@ -0,0 +1,2 @@ +# buildifier: disable=print +print("Pkgs: " + str(__internal__.sub_packages())) diff --git a/tests/core/interpreter/test_sub_packages_data/dog/TARGETS.fixture b/tests/core/interpreter/test_sub_packages_data/dog/TARGETS.fixture new file mode 100644 index 0000000000000..5873cbfe33970 --- /dev/null +++ b/tests/core/interpreter/test_sub_packages_data/dog/TARGETS.fixture @@ -0,0 +1,2 @@ +# buildifier: disable=print +print("Pkgs: " + str(__internal__.sub_packages())) diff --git a/tests/core/interpreter/test_sub_packages_data/dog/y/TARGETS.fixture b/tests/core/interpreter/test_sub_packages_data/dog/y/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_sub_packages_data/prelude.bzl b/tests/core/interpreter/test_sub_packages_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_unstable_typecheck.py b/tests/core/interpreter/test_unstable_typecheck.py new file mode 100644 index 0000000000000..fac0022daa333 --- /dev/null +++ b/tests/core/interpreter/test_unstable_typecheck.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_unstable_typecheck(buck: Buck) -> None: + await buck.cquery("//:x") + await expect_failure( + buck.cquery("//:x", "--unstable-typecheck"), + stderr_regex="Expected type `int` but got `str`", + ) diff --git a/tests/core/interpreter/test_unstable_typecheck_data/.buckconfig b/tests/core/interpreter/test_unstable_typecheck_data/.buckconfig new file mode 100644 index 0000000000000..025979aa26374 --- /dev/null +++ b/tests/core/interpreter/test_unstable_typecheck_data/.buckconfig @@ -0,0 +1,5 @@ +[repositories] + root = . + prelude = prelude +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_unstable_typecheck_data/.buckroot b/tests/core/interpreter/test_unstable_typecheck_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_unstable_typecheck_data/TARGETS.fixture b/tests/core/interpreter/test_unstable_typecheck_data/TARGETS.fixture new file mode 100644 index 0000000000000..ed9614500c746 --- /dev/null +++ b/tests/core/interpreter/test_unstable_typecheck_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load("bad.bzl", "x") + +ttt(name = x) diff --git a/tests/core/interpreter/test_unstable_typecheck_data/bad.bzl b/tests/core/interpreter/test_unstable_typecheck_data/bad.bzl new file mode 100644 index 0000000000000..8fbd40508c6b8 --- /dev/null +++ b/tests/core/interpreter/test_unstable_typecheck_data/bad.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +x = "x" + +def bad() -> int: + return "s" diff --git a/tests/core/interpreter/test_unstable_typecheck_data/prelude/prelude.bzl b/tests/core/interpreter/test_unstable_typecheck_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..c26b14adb2126 --- /dev/null +++ b/tests/core/interpreter/test_unstable_typecheck_data/prelude/prelude.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [DefaultInfo()] + +ttt = rule(impl = _impl, attrs = {}) diff --git a/tests/core/interpreter/test_v2_only.py b/tests/core/interpreter/test_v2_only.py new file mode 100644 index 0000000000000..639ad3078c33e --- /dev/null +++ b/tests/core/interpreter/test_v2_only.py @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_v2_only(buck: Buck) -> None: + # Just check it works. + await buck.build("root//:") diff --git a/tests/core/interpreter/test_v2_only_data/.buckconfig b/tests/core/interpreter/test_v2_only_data/.buckconfig new file mode 100644 index 0000000000000..df06a02c03ca2 --- /dev/null +++ b/tests/core/interpreter/test_v2_only_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/interpreter/test_v2_only_data/.buckroot b/tests/core/interpreter/test_v2_only_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/interpreter/test_v2_only_data/TARGETS.fixture b/tests/core/interpreter/test_v2_only_data/TARGETS.fixture new file mode 100644 index 0000000000000..0e669d3b66481 --- /dev/null +++ b/tests/core/interpreter/test_v2_only_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl?v2_only", "my_rule") + +my_rule(name = "my") diff --git a/tests/core/interpreter/test_v2_only_data/defs.bzl b/tests/core/interpreter/test_v2_only_data/defs.bzl new file mode 100644 index 0000000000000..3c50d323e15d9 --- /dev/null +++ b/tests/core/interpreter/test_v2_only_data/defs.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + _ignore = ctx # buildifier: disable=unused-variable + return [DefaultInfo()] + +# This bzl file cannot be interpreted with Buck1 because there's no `rule` builtin. +my_rule = rule(impl = _impl, attrs = {}) diff --git a/tests/core/invalidation/BUCK b/tests/core/invalidation/BUCK new file mode 100644 index 0000000000000..c73b45df059c8 --- /dev/null +++ b/tests/core/invalidation/BUCK @@ -0,0 +1,24 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_root_directory", + srcs = ["test_root_directory.py"], + data_dir = "test_root_directory_data", + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_forward_node", + srcs = ["test_forward_node.py"], + data_dir = "test_forward_node_data", + deps = [ + "fbsource//third-party/pypi/pyre-extensions:pyre-extensions", + "//buck2/tests/e2e_util:utilities", + "//buck2/tests/e2e_util:utils", + "//buck2/tests/e2e_util/api:api", + ], +) diff --git a/tests/core/invalidation/test_forward_node.py b/tests/core/invalidation/test_forward_node.py new file mode 100644 index 0000000000000..1ac6ba22aa749 --- /dev/null +++ b/tests/core/invalidation/test_forward_node.py @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events + + +@buck_test() +async def test_forward_node_supports_cutoff(buck: Buck) -> None: + await buck.targets("--show-output", "root//:main") + # Add a file to the root directory + with open(buck.cwd / "TARGETS.fixture", "a") as targetsfile: + targetsfile.write("\n# a comment\n") + await buck.targets("--show-output", "root//:main") + + events = await filter_events(buck, "Event", "data", "SpanEnd", "data") + loads = [] + analyses = [] + + for ev in events: + if "Load" in ev: + loads.append(ev) + if "Analysis" in ev: + analyses.append(ev) + + assert len(loads) > 0 + # TODO(cjhopman): fix + assert len(analyses) == 0, "should not have analysed anything" diff --git a/tests/core/invalidation/test_forward_node_data/.buckconfig b/tests/core/invalidation/test_forward_node_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/invalidation/test_forward_node_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/invalidation/test_forward_node_data/.buckroot b/tests/core/invalidation/test_forward_node_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/invalidation/test_forward_node_data/TARGETS.fixture b/tests/core/invalidation/test_forward_node_data/TARGETS.fixture new file mode 100644 index 0000000000000..cc819e4075332 --- /dev/null +++ b/tests/core/invalidation/test_forward_node_data/TARGETS.fixture @@ -0,0 +1,25 @@ +load(":defs.bzl", "alias", "self_transitioned_build") + +self_transitioned_build( + name = "target", +) + +alias( + name = "main", + actual = ":target", + default_target_platform = ":default", +) + +constraint_setting( + name = "for_transition", +) + +constraint_value( + name = "value", + constraint_setting = ":for_transition", +) + +platform( + name = "default", + constraint_values = [], +) diff --git a/tests/core/invalidation/test_forward_node_data/defs.bzl b/tests/core/invalidation/test_forward_node_data/defs.bzl new file mode 100644 index 0000000000000..0f0c01794b98d --- /dev/null +++ b/tests/core/invalidation/test_forward_node_data/defs.bzl @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Rule with no attrs that produces an output. Useful if you want to be able to +# build literally anything +def _impl(ctx): + return [DefaultInfo()] + +def _transition_impl(platform, refs): + _ignore = (platform) + constraint = refs.value[ConstraintValueInfo] + return PlatformInfo( + label = "", + configuration = ConfigurationInfo( + constraints = { + constraint.setting.label: constraint, + }, + values = {}, + ), + ) + +_tr = transition( + impl = _transition_impl, + refs = {"value": "//:value"}, +) + +self_transitioned_build = rule( + impl = _impl, + cfg = _tr, + attrs = { + }, +) + +def _alias_impl(ctx): + return ctx.attrs.actual.providers + +alias = rule( + impl = _alias_impl, + attrs = { + "actual": attrs.dep(), + }, +) diff --git a/tests/core/invalidation/test_root_directory.py b/tests/core/invalidation/test_root_directory.py new file mode 100644 index 0000000000000..f11b961974a41 --- /dev/null +++ b/tests/core/invalidation/test_root_directory.py @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import json_get + + +@buck_test() +async def test_no_dice_invalidation_on_root_directory_changes(buck: Buck) -> None: + await buck.build("root//dir:") + + # Add a file to the root directory + (buck.cwd / "file.txt").write_text("hello world") + + await buck.build("root//dir:") + + log = (await buck.log("show")).stdout.splitlines() + + for line in log: + e = json_get( + line, + "Event", + "data", + "SpanEnd", + "data", + "Load", + ) + assert e is None, "Should not have loaded anything" diff --git a/tests/core/invalidation/test_root_directory_data/.buckconfig b/tests/core/invalidation/test_root_directory_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/invalidation/test_root_directory_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/invalidation/test_root_directory_data/.buckroot b/tests/core/invalidation/test_root_directory_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/invalidation/test_root_directory_data/dir/TARGETS.fixture b/tests/core/invalidation/test_root_directory_data/dir/TARGETS.fixture new file mode 100644 index 0000000000000..61d7737875d4d --- /dev/null +++ b/tests/core/invalidation/test_root_directory_data/dir/TARGETS.fixture @@ -0,0 +1,3 @@ +trivial_build( + name = "target", +) diff --git a/tests/core/invocation_record/BUCK b/tests/core/invocation_record/BUCK new file mode 100644 index 0000000000000..661291f930888 --- /dev/null +++ b/tests/core/invocation_record/BUCK @@ -0,0 +1,13 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_invocation_record", + srcs = ["test_invocation_record.py"], + data_dir = "test_invocation_record_data", + serialize_test_cases = False, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/core/invocation_record/test_invocation_record.py b/tests/core/invocation_record/test_invocation_record.py new file mode 100644 index 0000000000000..7d0862c3f98aa --- /dev/null +++ b/tests/core/invocation_record/test_invocation_record.py @@ -0,0 +1,290 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import json +import signal +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import read_invocation_record + +# FIXME(JakobDegen): Flakey in CI +if False: + + @buck_test(skip_for_os=["windows"]) # TODO(T154836875) + async def test_has_end_of_stream_false(buck: Buck, tmp_path: Path) -> None: + hang_path = tmp_path / "hang_path" + record = tmp_path / "record.json" + + cmd = await buck.build( + ":hang", + "-c", + f"test.hang_path={hang_path}", + "--unstable-write-invocation-record", + str(record), + "--local-only", + "--no-remote-cache", + ).start() + + for _ in range(10): + if hang_path.exists(): + break + await asyncio.sleep(1) + else: + print(await cmd.communicate()) + raise Exception(f"Signal file never created: {hang_path}") + + cmd.send_signal(signal.SIGINT) + await cmd.communicate() + + record = read_invocation_record(record) + + assert not record["has_end_of_stream"] + assert not record["has_command_result"] + + +@buck_test() +async def test_has_end_of_stream_true(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + await buck.build(":pass", "--unstable-write-invocation-record", str(record)) + + record = read_invocation_record(record) + + assert record["has_end_of_stream"] + assert record["has_command_result"] + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_has_no_command_result(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.build() + + status = json.loads((await buck.status()).stdout) + pid = status["process_info"]["pid"] + + await expect_failure( + buck.build( + ":kill", + "-c", + f"test.pid={pid}", + "--unstable-write-invocation-record", + str(record), + "--local-only", + "--no-remote-cache", + ), + stderr_regex="Buck daemon event bus encountered an error", + ) + + record = read_invocation_record(record) + + assert record["has_end_of_stream"] + assert not record["has_command_result"] + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_metadata(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.build("--unstable-write-invocation-record", str(record)) + + record = read_invocation_record(record) + + assert "username" in record["metadata"]["strings"] + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_client_metadata(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.build( + "--client-metadata=foo=bar", + "--client-metadata=id=baz", + "--unstable-write-invocation-record", + str(record), + ) + + record = read_invocation_record(record) + + assert record["client_metadata"] == [ + {"key": "foo", "value": "bar"}, + {"key": "id", "value": "baz"}, + ] + + assert record["metadata"]["strings"]["client"] == "baz" + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_client_metadata_clean(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.clean( + "--client-metadata=foo=bar", + "--client-metadata=id=baz", + "--unstable-write-invocation-record", + str(record), + ) + + record = read_invocation_record(record) + + assert record["client_metadata"] == [ + {"key": "foo", "value": "bar"}, + {"key": "id", "value": "baz"}, + ] + + assert record["metadata"]["strings"]["client"] == "baz" + + +@buck_test() +async def test_action_error_message_in_record(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + await expect_failure( + buck.build(":fail", "--unstable-write-invocation-record", str(record)) + ) + + record = read_invocation_record(record) + + assert len(record["command_end"]["errors"]) == 0 + assert len(record["errors"]) == 1 + assert ( + record["errors"][0]["message"] + == "Failed to build 'root//:fail ()'" + ) + assert "Hi from stderr!" in record["errors"][0]["telemetry_message"] + + +@buck_test() +async def test_non_action_error_message_in_record(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + await expect_failure( + buck.build(":missing_target", "--unstable-write-invocation-record", str(record)) + ) + + record = read_invocation_record(record) + + assert len(record["errors"]) == 1 + assert record["errors"][0]["message"].startswith( + "Unknown target `missing_target` from package `root//`" + ) + assert len(record["command_end"]["errors"]) == 0 + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_rule_type_names_ci(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.build( + ":duplicate", + ":and_a_two", + ":last_three", + ":a_one", + "--unstable-write-invocation-record", + str(record), + env={"CI": "true"}, + ) + + record = read_invocation_record(record) + + assert record["target_rule_type_names"] == [ + "one", + "pass_", + "two", + ] + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_rule_type_names_sandcastle(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.build( + ":duplicate", + ":and_a_two", + ":last_three", + ":a_one", + "--unstable-write-invocation-record", + str(record), + env={"SANDCASTLE": "my_fake_job"}, + ) + + record = read_invocation_record(record) + + assert record["target_rule_type_names"] == [ + "one", + "pass_", + "two", + ] + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_rule_type_names_user(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.build( + ":and_a_two", + ":last_three", + ":a_one", + "--unstable-write-invocation-record", + str(record), + ) + + record = read_invocation_record(record) + + assert record["target_rule_type_names"] == [ + "one", + "pass_", + "two", + ] + + +@buck_test(skip_for_os=["windows"]) # TODO(T154836632) +async def test_rule_type_names_on_failure(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await expect_failure( + buck.build( + ":fail", + ":last_three", + ":a_one", + "--unstable-write-invocation-record", + str(record), + ) + ) + + record = read_invocation_record(record) + + assert record["target_rule_type_names"] == [ + "fail", + "one", + "pass_", + ] + + +@buck_test() +async def test_active_networks_kinds(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + + # Start the daemon + await buck.build("--unstable-write-invocation-record", str(record)) + + record = read_invocation_record(record) + + assert "active_networks_kinds" in record diff --git a/tests/core/invocation_record/test_invocation_record_data/.buckconfig b/tests/core/invocation_record/test_invocation_record_data/.buckconfig new file mode 100644 index 0000000000000..5cbd2899e0231 --- /dev/null +++ b/tests/core/invocation_record/test_invocation_record_data/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture +[buck2] + materializations = deferred diff --git a/tests/core/invocation_record/test_invocation_record_data/.buckroot b/tests/core/invocation_record/test_invocation_record_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/invocation_record/test_invocation_record_data/TARGETS.fixture b/tests/core/invocation_record/test_invocation_record_data/TARGETS.fixture new file mode 100644 index 0000000000000..4677905970cf8 --- /dev/null +++ b/tests/core/invocation_record/test_invocation_record_data/TARGETS.fixture @@ -0,0 +1,12 @@ +load(":defs.bzl", "fail", "hang", "kill", "one", "pass_", "two") + +hang(name = "hang", touch = read_config("test", "hang_path", "")) +pass_(name = "pass") +kill(name = "kill", pid = read_config("test", "pid", "")) + +fail(name = "fail") + +one(name = "a_one") +two(name = "and_a_two") +pass_(name = "last_three") +two(name = "duplicate") diff --git a/tests/core/invocation_record/test_invocation_record_data/defs.bzl b/tests/core/invocation_record/test_invocation_record_data/defs.bzl new file mode 100644 index 0000000000000..4f6be6c29e1d9 --- /dev/null +++ b/tests/core/invocation_record/test_invocation_record_data/defs.bzl @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _hang(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run( + ["python3", "-c", 'import os, time; open(os.environ["TOUCH"], "w"); time.sleep(100)'], + env = {"OUT": out.as_output(), "TOUCH": ctx.attrs.touch}, + category = "hang", + ) + return [DefaultInfo(out)] + +# Touch a file to signal, then hang. +hang = rule(attrs = {"touch": attrs.string()}, impl = _hang) + +def _pass(ctx): + out = ctx.actions.write("out", "") + return [DefaultInfo(out)] + +pass_ = rule(attrs = {}, impl = _pass) + +def _kill(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run( + ["python3", "-c", 'import os, signal; os.kill(int(os.environ["PID"]), signal.SIGKILL)'], + env = {"OUT": out.as_output(), "PID": ctx.attrs.pid}, + category = "kill", + ) + return [DefaultInfo(out)] + +kill = rule(attrs = {"pid": attrs.string()}, impl = _kill) + +def _fail(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run( + cmd_args( + "sh", + "-c", + 'echo "Hi from stderr!" >&2 && false', + hidden = out.as_output(), + ), + category = "fail", + ) + return [DefaultInfo(out)] + +fail = rule(attrs = {}, impl = _fail) + +def _one(ctx): + return [DefaultInfo(default_output = ctx.actions.write("out", "one"))] + +one = rule( + impl = _one, + attrs = {}, +) + +def _two(ctx): + return [DefaultInfo(default_output = ctx.actions.write("out", "two"))] + +two = rule( + impl = _two, + attrs = {}, +) diff --git a/tests/core/invocation_record/test_invocation_record_data/prelude.bzl b/tests/core/invocation_record/test_invocation_record_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/io/BUCK b/tests/core/io/BUCK new file mode 100644 index 0000000000000..faceb2f9ea971 --- /dev/null +++ b/tests/core/io/BUCK @@ -0,0 +1,22 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_compare_providers", + srcs = ["test_compare_providers.py"], + data_dir = "test_compare_providers_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_allow_eden", + srcs = ["test_allow_eden.py"], + data_dir = "test_allow_eden_data", +) + +buck2_e2e_test( + name = "test_modify_eden", + srcs = ["test_modify_eden.py"], + data_dir = "test_modify_eden_data", +) diff --git a/tests/core/io/test_allow_eden.py b/tests/core/io/test_allow_eden.py new file mode 100644 index 0000000000000..7dbfcfeb9bc61 --- /dev/null +++ b/tests/core/io/test_allow_eden.py @@ -0,0 +1,72 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +# This file acts as both a test of `buck2.allow_eden_io` as well as a self-test +# of the `setup_eden` logic in the test runner + + +async def _check_io_provider(buck: Buck, name: str) -> None: + await buck.server() + out = await buck.status() + status = json.loads(out.stdout.strip()) + io_provider = status["io_provider"] + assert io_provider == name + + +@buck_test( + setup_eden=False, + extra_buck_config={ + "buck2": { + "allow_eden_io": "false", + } + }, +) +async def test_no_eden(buck: Buck) -> None: + await _check_io_provider(buck, "fs") + + +@buck_test( + setup_eden=False, + extra_buck_config={ + "buck2": { + "allow_eden_io": "true", + } + }, +) +async def test_allow_eden_io_ignored_on_fs_io(buck: Buck) -> None: + await _check_io_provider(buck, "fs") + + +@buck_test( + setup_eden=True, + extra_buck_config={ + "buck2": { + "allow_eden_io": "false", + } + }, +) +async def test_allow_eden_io_respected(buck: Buck) -> None: + await _check_io_provider(buck, "fs") + + +@buck_test( + setup_eden=True, + extra_buck_config={ + "buck2": { + "allow_eden_io": "true", + } + }, +) +async def test_eden_io(buck: Buck) -> None: + await _check_io_provider(buck, "eden") diff --git a/tests/core/io/test_allow_eden_data/.buckconfig b/tests/core/io/test_allow_eden_data/.buckconfig new file mode 100644 index 0000000000000..82ff4e5316342 --- /dev/null +++ b/tests/core/io/test_allow_eden_data/.buckconfig @@ -0,0 +1,2 @@ +[cells] + root = . diff --git a/tests/core/io/test_allow_eden_data/.buckroot b/tests/core/io/test_allow_eden_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/io/test_compare_providers.py b/tests/core/io/test_compare_providers.py new file mode 100644 index 0000000000000..139f09ed82a3f --- /dev/null +++ b/tests/core/io/test_compare_providers.py @@ -0,0 +1,95 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import sys + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from buck2.tests.e2e_util.helper.golden import golden + + +async def _run_test(buck: Buck, name: str) -> None: + if sys.platform == "win32": + kind = "win" + targets = [ + "file_metadata/nested", + ] + else: + # Set up some symlinks. Do this here to avoid relying on the test runner + # copying these correctly. + symlink_dir = buck.cwd / "file_metadata" / "symlinks" + symlink_dir.mkdir() + (symlink_dir / "internal").symlink_to("../file") + (symlink_dir / "external").symlink_to("/absolute") + + kind = "unix" + targets = [ + "file_metadata", + "file_metadata/symlinks/internal/traverse", + "file_metadata/symlinks/external/traverse", + ] + + await buck.build() # Start Buck2 + + res = await buck.debug("file-status", "--show-matches", *targets) + golden(output=res.stdout, rel_path=f"golden/{name}.{kind}.out") + assert "MISMATCH" not in res.stdout + + +@buck_test( + setup_eden=True, + extra_buck_config={ + "buck2": { + "allow_eden_io": "false", + "source_digest_algorithm": "SHA1", + } + }, +) +async def test_default(buck: Buck) -> None: + await _run_test(buck, "default") + + +@buck_test( + setup_eden=True, + extra_buck_config={ + "buck2": { + "allow_eden_io": "true", + "source_digest_algorithm": "SHA1", + } + }, +) +async def test_eden(buck: Buck) -> None: + await _run_test(buck, "default") + + +@buck_test( + setup_eden=True, + extra_buck_config={ + "buck2": { + "allow_eden_io": "false", + "source_digest_algorithm": "BLAKE3-KEYED", + } + }, +) +async def test_blake3(buck: Buck) -> None: + await _run_test(buck, "blake3") + + +@buck_test( + setup_eden=True, + extra_buck_config={ + "buck2": { + "source_digest_algorithm": "BLAKE3-KEYED", + } + }, +) +async def test_eden_blake3(buck: Buck) -> None: + await _run_test(buck, "blake3") diff --git a/tests/core/io/test_compare_providers_data/.buckconfig b/tests/core/io/test_compare_providers_data/.buckconfig new file mode 100644 index 0000000000000..8a71b6d8a6877 --- /dev/null +++ b/tests/core/io/test_compare_providers_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buck2] + digest_algorithms = BLAKE3-KEYED,SHA1 diff --git a/tests/core/io/test_compare_providers_data/.buckroot b/tests/core/io/test_compare_providers_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/io/test_compare_providers_data/file_metadata/file b/tests/core/io/test_compare_providers_data/file_metadata/file new file mode 100644 index 0000000000000..7a1c6130c652b --- /dev/null +++ b/tests/core/io/test_compare_providers_data/file_metadata/file @@ -0,0 +1 @@ +some file diff --git a/tests/core/io/test_compare_providers_data/file_metadata/nested/empty b/tests/core/io/test_compare_providers_data/file_metadata/nested/empty new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/io/test_compare_providers_data/file_metadata/nested/file b/tests/core/io/test_compare_providers_data/file_metadata/nested/file new file mode 100644 index 0000000000000..f73f3093ff865 --- /dev/null +++ b/tests/core/io/test_compare_providers_data/file_metadata/nested/file @@ -0,0 +1 @@ +file diff --git a/tests/core/io/test_compare_providers_data/golden/blake3.unix.out b/tests/core/io/test_compare_providers_data/golden/blake3.unix.out new file mode 100644 index 0000000000000..1067182be4b46 --- /dev/null +++ b/tests/core/io/test_compare_providers_data/golden/blake3.unix.out @@ -0,0 +1,30 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Match: entry kind at file_metadata: Directory +Match: directory contents at file_metadata: file, nested, symlinks +Match: entry kind at file_metadata/file: File +Match: file metadata at file_metadata/file: File(digest=f69ccfdc3d8ec821134049bf9a569861876422615905fd0cb846e497c50bddf5:10, is_executable=false) +Match: entry kind at file_metadata/nested: Directory +Match: directory contents at file_metadata/nested: empty, file +Match: entry kind at file_metadata/nested/empty: File +Match: file metadata at file_metadata/nested/empty: File(digest=72bca042841f347a7b5671af8902e4615550c66bcc670223d86611102d0a0168:0, is_executable=false) +Match: entry kind at file_metadata/nested/file: File +Match: file metadata at file_metadata/nested/file: File(digest=e2086e0a751b10df5222435cc0e0a2c64d24e59dbd78d0ba0d2edd95799dae04:5, is_executable=false) +Match: entry kind at file_metadata/symlinks: Directory +Match: directory contents at file_metadata/symlinks: external, internal +Match: entry kind at file_metadata/symlinks/external: Symlink +Match: symlink component location at file_metadata/symlinks/external: file_metadata/symlinks/external +Match: symlink destination kind at file_metadata/symlinks/external: External +Match: external symlink destination at file_metadata/symlinks/external: /absolute +Match: entry kind at file_metadata/symlinks/internal: Symlink +Match: symlink component location at file_metadata/symlinks/internal: file_metadata/symlinks/internal +Match: symlink destination kind at file_metadata/symlinks/internal: Relative +Match: relative symlink destination at file_metadata/symlinks/internal: file_metadata/file +Match: entry kind at file_metadata/symlinks/internal/traverse: Symlink +Match: symlink component location at file_metadata/symlinks/internal/traverse: file_metadata/symlinks/internal +Match: symlink destination kind at file_metadata/symlinks/internal/traverse: Relative +Match: relative symlink destination at file_metadata/symlinks/internal/traverse: file_metadata/file/traverse +Match: entry kind at file_metadata/symlinks/external/traverse: Symlink +Match: symlink component location at file_metadata/symlinks/external/traverse: file_metadata/symlinks/external +Match: symlink destination kind at file_metadata/symlinks/external/traverse: External +Match: external symlink destination at file_metadata/symlinks/external/traverse: /absolute/traverse diff --git a/tests/core/io/test_compare_providers_data/golden/blake3.win.out b/tests/core/io/test_compare_providers_data/golden/blake3.win.out new file mode 100644 index 0000000000000..e727d8880e1b7 --- /dev/null +++ b/tests/core/io/test_compare_providers_data/golden/blake3.win.out @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Match: entry kind at file_metadata/nested: Directory +Match: directory contents at file_metadata/nested: empty, file +Match: entry kind at file_metadata/nested/empty: File +Match: file metadata at file_metadata/nested/empty: File(digest=72bca042841f347a7b5671af8902e4615550c66bcc670223d86611102d0a0168:0, is_executable=false) +Match: entry kind at file_metadata/nested/file: File +Match: file metadata at file_metadata/nested/file: File(digest=e2086e0a751b10df5222435cc0e0a2c64d24e59dbd78d0ba0d2edd95799dae04:5, is_executable=false) diff --git a/tests/core/io/test_compare_providers_data/golden/default.unix.out b/tests/core/io/test_compare_providers_data/golden/default.unix.out new file mode 100644 index 0000000000000..1ac2efdc86928 --- /dev/null +++ b/tests/core/io/test_compare_providers_data/golden/default.unix.out @@ -0,0 +1,30 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Match: entry kind at file_metadata: Directory +Match: directory contents at file_metadata: file, nested, symlinks +Match: entry kind at file_metadata/file: File +Match: file metadata at file_metadata/file: File(digest=0e08b5e8c10abc3e455b75286ba4a1fbd56e18a5:10, is_executable=false) +Match: entry kind at file_metadata/nested: Directory +Match: directory contents at file_metadata/nested: empty, file +Match: entry kind at file_metadata/nested/empty: File +Match: file metadata at file_metadata/nested/empty: File(digest=da39a3ee5e6b4b0d3255bfef95601890afd80709:0, is_executable=false) +Match: entry kind at file_metadata/nested/file: File +Match: file metadata at file_metadata/nested/file: File(digest=046c168df2244d3a13985f042a50e479fe56455e:5, is_executable=false) +Match: entry kind at file_metadata/symlinks: Directory +Match: directory contents at file_metadata/symlinks: external, internal +Match: entry kind at file_metadata/symlinks/external: Symlink +Match: symlink component location at file_metadata/symlinks/external: file_metadata/symlinks/external +Match: symlink destination kind at file_metadata/symlinks/external: External +Match: external symlink destination at file_metadata/symlinks/external: /absolute +Match: entry kind at file_metadata/symlinks/internal: Symlink +Match: symlink component location at file_metadata/symlinks/internal: file_metadata/symlinks/internal +Match: symlink destination kind at file_metadata/symlinks/internal: Relative +Match: relative symlink destination at file_metadata/symlinks/internal: file_metadata/file +Match: entry kind at file_metadata/symlinks/internal/traverse: Symlink +Match: symlink component location at file_metadata/symlinks/internal/traverse: file_metadata/symlinks/internal +Match: symlink destination kind at file_metadata/symlinks/internal/traverse: Relative +Match: relative symlink destination at file_metadata/symlinks/internal/traverse: file_metadata/file/traverse +Match: entry kind at file_metadata/symlinks/external/traverse: Symlink +Match: symlink component location at file_metadata/symlinks/external/traverse: file_metadata/symlinks/external +Match: symlink destination kind at file_metadata/symlinks/external/traverse: External +Match: external symlink destination at file_metadata/symlinks/external/traverse: /absolute/traverse diff --git a/tests/core/io/test_compare_providers_data/golden/default.win.out b/tests/core/io/test_compare_providers_data/golden/default.win.out new file mode 100644 index 0000000000000..323cb7904b0ac --- /dev/null +++ b/tests/core/io/test_compare_providers_data/golden/default.win.out @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Match: entry kind at file_metadata/nested: Directory +Match: directory contents at file_metadata/nested: empty, file +Match: entry kind at file_metadata/nested/empty: File +Match: file metadata at file_metadata/nested/empty: File(digest=da39a3ee5e6b4b0d3255bfef95601890afd80709:0, is_executable=false) +Match: entry kind at file_metadata/nested/file: File +Match: file metadata at file_metadata/nested/file: File(digest=046c168df2244d3a13985f042a50e479fe56455e:5, is_executable=false) diff --git a/tests/core/io/test_modify_eden.py b/tests/core/io/test_modify_eden.py new file mode 100644 index 0000000000000..8b797a405c21d --- /dev/null +++ b/tests/core/io/test_modify_eden.py @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(setup_eden=True) +async def test_modify_src_eden(buck: Buck) -> None: + path = buck.cwd / "src.txt" + + path.write_text("HELLO\n") + result = await buck.build("root//:copy_file") + output = result.get_build_report().output_for_target("root//:copy_file") + assert Path(output).read_text() == "HELLO\n" + + path.write_text("GOODBYE\n") + result = await buck.build("root//:copy_file") + output = result.get_build_report().output_for_target("root//:copy_file") + assert Path(output).read_text() == "GOODBYE\n" diff --git a/tests/core/io/test_modify_eden_data/.buckconfig b/tests/core/io/test_modify_eden_data/.buckconfig new file mode 100644 index 0000000000000..df06a02c03ca2 --- /dev/null +++ b/tests/core/io/test_modify_eden_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/io/test_modify_eden_data/.buckroot b/tests/core/io/test_modify_eden_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/io/test_modify_eden_data/TARGETS.fixture b/tests/core/io/test_modify_eden_data/TARGETS.fixture new file mode 100644 index 0000000000000..b9543e1908d17 --- /dev/null +++ b/tests/core/io/test_modify_eden_data/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":copy_file.bzl", "copy_file") + +copy_file( + name = "copy_file", + src = "src.txt", +) diff --git a/tests/core/io/test_modify_eden_data/copy_file.bzl b/tests/core/io/test_modify_eden_data/copy_file.bzl new file mode 100644 index 0000000000000..8cd8e6b74d93e --- /dev/null +++ b/tests/core/io/test_modify_eden_data/copy_file.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.copy_file("out.txt", ctx.attrs.src) + return [DefaultInfo(default_output = out)] + +copy_file = rule( + impl = _impl, + attrs = { + "src": attrs.source(), + }, +) diff --git a/tests/core/io/test_modify_eden_data/src.txt b/tests/core/io/test_modify_eden_data/src.txt new file mode 100644 index 0000000000000..5da849b5c6f00 --- /dev/null +++ b/tests/core/io/test_modify_eden_data/src.txt @@ -0,0 +1 @@ +ABC diff --git a/tests/core/kill/BUCK b/tests/core/kill/BUCK new file mode 100644 index 0000000000000..1258132c7a10e --- /dev/null +++ b/tests/core/kill/BUCK @@ -0,0 +1,10 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_kill", + srcs = ["test_kill.py"], + data_dir = "test_kill_data", + serialize_test_cases = False, +) diff --git a/tests/core/kill/test_kill.py b/tests/core/kill/test_kill.py new file mode 100644 index 0000000000000..f5d9fb3403af8 --- /dev/null +++ b/tests/core/kill/test_kill.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +@env("BUCK2_TEST_FAIL_BUCKD_AUTH", "true") +async def test_kill_error(buck: Buck) -> None: + # Performing a build should fail, since we will not be able to authenticate to the + # buck daemon + await expect_failure(buck.build("//:abc"), stderr_regex="injected auth error") + + # Kill should succeed, even though we cannot authenticate to the daemon + await buck.kill() + + +@buck_test() +@env("BUCK2_TEST_FAIL_BUCKD_AUTH", "true") +async def test_clean_error(buck: Buck) -> None: + # Performing a build should fail, since we will not be able to authenticate to the + # buck daemon + await expect_failure(buck.build("//:abc"), stderr_regex="injected auth error") + + # Clean should succeed, even though we cannot authenticate to the daemon + await buck.clean() diff --git a/tests/core/kill/test_kill_data/.buckconfig b/tests/core/kill/test_kill_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/kill/test_kill_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/kill/test_kill_data/.buckroot b/tests/core/kill/test_kill_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/kill/test_kill_data/TARGETS.fixture b/tests/core/kill/test_kill_data/TARGETS.fixture new file mode 100644 index 0000000000000..ba572ee21bb0e --- /dev/null +++ b/tests/core/kill/test_kill_data/TARGETS.fixture @@ -0,0 +1 @@ +rule_for_test_kill(name = "abc") diff --git a/tests/core/kill/test_kill_data/prelude.bzl b/tests/core/kill/test_kill_data/prelude.bzl new file mode 100644 index 0000000000000..3e4681d5ec4fb --- /dev/null +++ b/tests/core/kill/test_kill_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _rule_for_test_kill_impl( + # starlark-lint-disable unused-argument + ctx): # @unused + return [DefaultInfo()] + +rule_for_test_kill = rule( + impl = _rule_for_test_kill_impl, + attrs = {}, +) diff --git a/tests/core/log/BUCK b/tests/core/log/BUCK new file mode 100644 index 0000000000000..b4a8a2ad18829 --- /dev/null +++ b/tests/core/log/BUCK @@ -0,0 +1,58 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_log", + srcs = ["test_log.py"], + data_dir = "test_log_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_user_event_log", + srcs = ["test_user_event_log.py"], + data_dir = "test_user_event_log_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_summary", + srcs = ["test_summary.py"], + data_dir = "test_summary_data", +) + +buck2_e2e_test( + name = "test_what_ran_incomplete", + srcs = ["test_what_ran_incomplete.py"], + data_dir = "test_what_ran_incomplete_data", +) + +buck2_e2e_test( + name = "test_what_materialized", + srcs = ["test_what_materialized.py"], + data_dir = "test_what_materialized_data", + use_compiled_buck2_client_and_tpx = True, +) + +buck2_e2e_test( + name = "test_what_uploaded", + srcs = ["test_what_uploaded.py"], + data_dir = "test_what_uploaded_data", +) + +buck2_e2e_test( + name = "test_whatup", + srcs = ["test_whatup.py"], + data_dir = "test_whatup_data", +) + +buck2_e2e_test( + name = "test_diff", + srcs = ["test_diff.py"], + data_dir = "test_diff_data", +) diff --git a/tests/core/log/test_diff.py b/tests/core/log/test_diff.py new file mode 100644 index 0000000000000..316ee560ed44b --- /dev/null +++ b/tests/core/log/test_diff.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import typing + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def with_buck2_args(output: str) -> typing.List[str]: + return [ + "-c", + f"test.buck2_output={output}", + ] + + +@buck_test() +async def test_no_action_divergence_command(buck: Buck) -> None: + await buck.build("//:simple", *with_buck2_args("foo")) + out1 = await buck.log("last") + path1 = out1.stdout.strip() + await buck.build("//:simple", *with_buck2_args("foo")) + out2 = await buck.log("last") + path2 = out2.stdout.strip() + out = await buck.log( + "diff", "action-divergence", "--path1", path1, "--path2", path2 + ) + + assert "No divergent actions found." in out.stdout + + +@buck_test() +async def test_action_divergence_command(buck: Buck) -> None: + await buck.build("//:non_det", *with_buck2_args("foo")) + await buck.build("//:non_det", *with_buck2_args("bar")) + out = await buck.log( + "diff", "action-divergence", "--recent1", "0", "--recent2", "1" + ) + + assert ( + "Present in both builds with differing output digests\nprelude//:non_det () (write foo.txt)" + in out.stdout + ) diff --git a/tests/core/log/test_diff_data/.buckconfig b/tests/core/log/test_diff_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/log/test_diff_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/log/test_diff_data/.buckroot b/tests/core/log/test_diff_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_diff_data/TARGETS.fixture b/tests/core/log/test_diff_data/TARGETS.fixture new file mode 100644 index 0000000000000..167a509b2e5a3 --- /dev/null +++ b/tests/core/log/test_diff_data/TARGETS.fixture @@ -0,0 +1,8 @@ +trivial_build( + name = "simple", +) + +non_det_build( + name = "non_det", + buck2_output = read_root_config("test", "buck2_output"), +) diff --git a/tests/core/log/test_diff_data/prelude.bzl b/tests/core/log/test_diff_data/prelude.bzl new file mode 100644 index 0000000000000..d89e42f48b534 --- /dev/null +++ b/tests/core/log/test_diff_data/prelude.bzl @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Rule with no attrs that may produce an output non-deterministically based on the path provided in buckconfig +def _non_det_build(ctx): + return [DefaultInfo(default_output = ctx.actions.write("foo.txt", ctx.attrs.buck2_output))] + +non_det_build = rule( + impl = _non_det_build, + attrs = { + "buck2_output": attrs.string(), + }, +) + +# Rule with no attrs that produces an output deterministically +def _trivial_build(ctx): + return [DefaultInfo(default_output = ctx.actions.write("foo.txt", "abcd"))] + +trivial_build = rule( + impl = _trivial_build, + attrs = {}, +) diff --git a/tests/core/log/test_log.py b/tests/core/log/test_log.py new file mode 100644 index 0000000000000..64069e9256758 --- /dev/null +++ b/tests/core/log/test_log.py @@ -0,0 +1,117 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os.path +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from buck2.tests.e2e_util.helper.utils import ( + is_running_on_windows, + read_invocation_record, +) + + +@buck_test() +async def test_log_show_invocation_record(buck: Buck, tmp_path: Path) -> None: + mode_file = tmp_path / "mode" + mode_file.write_text("-c\naa.bb=cc\n-c\ndd.ee=ff\n") + + # Any simple would do. + await buck.uquery(f"@{mode_file}", "//:EEE") + + result = await buck.log("show") + invocation = json.loads(result.stdout.splitlines()[0]) + command_line_args = invocation["command_line_args"] + expanded_command_line_args = invocation["expanded_command_line_args"] + assert f"@{mode_file}" in command_line_args + assert f"@{mode_file}" not in expanded_command_line_args + assert "aa.bb=cc" in expanded_command_line_args + assert "aa.bb=cc" not in command_line_args + + +@buck_test() +async def test_log_size_logging(buck: Buck, tmp_path: Path) -> None: + record_file = tmp_path / "record.json" + await buck.cquery( + "//:EEE", + "--unstable-write-invocation-record", + str(record_file), + ) + + out = await buck.log("last") + path = out.stdout.strip() + with open(path, "rb") as f: + log_size_in_disk = len(f.read()) + + logged_size = read_invocation_record(record_file)["compressed_event_log_size_bytes"] + + assert logged_size == log_size_in_disk + + +@buck_test() +async def test_replay(buck: Buck) -> None: + await buck.build("//:EEE") + replay = await buck.log("replay", "-v2") + assert "//:EEE" in replay.stderr + + +@buck_test() +async def test_last_log(buck: Buck) -> None: + await buck.build("//:EEE") + out = await buck.log("last") + path = out.stdout.strip() + assert os.path.exists(path) + assert "/log/" in path or "\\log\\" in path + out2 = await buck.log("path") + assert path == out2.stdout.strip() + + +@buck_test() +async def test_last_log_all(buck: Buck) -> None: + await buck.build("//:EEE") + out = await buck.log("last", "--all") + paths = list(out.stdout.splitlines()) + assert len(paths) > 0 + for path in paths: + assert os.path.exists(path) + assert "/log/" in path or "\\log\\" in path + + +@buck_test() +async def test_log_command_with_trace_id(buck: Buck, tmp_path: Path) -> None: + build_file_path = tmp_path / "b" + await buck.uquery("//:", f"--write-build-id={build_file_path}") + build_id = build_file_path.read_text("utf-8").strip() + await buck.log("show", f"--trace-id={build_id}") + log = (await buck.log("show", f"--trace-id={build_id}")).stdout.strip().splitlines() + # Check it looks like log. + assert len(log) >= 1 + for line in log: + json.loads(line) + + +@buck_test() +async def test_what_buck(buck: Buck, tmp_path: Path) -> None: + mode_path = tmp_path / "mode" + mode_path.write_text("-c\nxx.yy=zz\n") + + await buck.uquery("//:", f"@{mode_path}") + + out = await buck.log("what-cmd") + assert "uquery //: " in out.stdout + if not is_running_on_windows(): + # Path is quoted on Windows. + assert f"uquery //: @{mode_path}" in out.stdout + + out = await buck.log("what-cmd", "--expand") + assert "uquery //: -c" in out.stdout diff --git a/tests/core/log/test_log_data/.buckconfig b/tests/core/log/test_log_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/log/test_log_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/log/test_log_data/.buckroot b/tests/core/log/test_log_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_log_data/TARGETS.fixture b/tests/core/log/test_log_data/TARGETS.fixture new file mode 100644 index 0000000000000..641f9821d86ce --- /dev/null +++ b/tests/core/log/test_log_data/TARGETS.fixture @@ -0,0 +1 @@ +eee(name = "EEE") diff --git a/tests/core/log/test_log_data/prelude.bzl b/tests/core/log/test_log_data/prelude.bzl new file mode 100644 index 0000000000000..622401727afff --- /dev/null +++ b/tests/core/log/test_log_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _eee( + # starlark-lint-disable unused-argument + ctx): # @unused + return [DefaultInfo()] + +eee = rule( + impl = _eee, + attrs = {}, +) diff --git a/tests/core/log/test_summary.py b/tests/core/log/test_summary.py new file mode 100644 index 0000000000000..a9efea0c4fb9f --- /dev/null +++ b/tests/core/log/test_summary.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_summary_command(buck: Buck) -> None: + await buck.build("//:my_rule") + out = await buck.log("summary") + + assert "Showing summary from:" in out.stderr + assert "targets analysed: 1" in out.stderr diff --git a/tests/core/log/test_summary_data/.buckconfig b/tests/core/log/test_summary_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/log/test_summary_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/log/test_summary_data/.buckroot b/tests/core/log/test_summary_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_summary_data/TARGETS.fixture b/tests/core/log/test_summary_data/TARGETS.fixture new file mode 100644 index 0000000000000..30f53136138c7 --- /dev/null +++ b/tests/core/log/test_summary_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":prelude.bzl", "my_rule") + +my_rule( + name = "my_rule", +) diff --git a/tests/core/log/test_summary_data/prelude.bzl b/tests/core/log/test_summary_data/prelude.bzl new file mode 100644 index 0000000000000..beaf75dd20cbb --- /dev/null +++ b/tests/core/log/test_summary_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _action(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(cmd_args(["sh", "-c", 'echo > "$1"', "--", out.as_output()]), category = "run") + return [DefaultInfo(default_outputs = [out])] + +my_rule = rule( + impl = _action, + attrs = {}, +) diff --git a/tests/core/log/test_user_event_log.py b/tests/core/log/test_user_event_log.py new file mode 100644 index 0000000000000..faf7a85de069a --- /dev/null +++ b/tests/core/log/test_user_event_log.py @@ -0,0 +1,131 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +from pathlib import Path + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +def _replace_timestamp(s: str) -> str: + return re.sub(r"\b[0-9]+\b", "", s) + + +@buck_test(skip_for_os=["windows"]) +async def test_user_event_log_custom_output(buck: Buck, tmp_path: Path) -> None: + local_log = tmp_path / "test.json" + + await buck.bxl( + "root//:test.bxl:instant_event", + "--user-event-log", + str(local_log), + ) + + assert Path(local_log).exists() + + # do some basic validation - golden tests take care of better validation + with open(local_log, "r") as f: + results = f.read().splitlines() + # assert these events can be loaded + json.loads(results[0])["command_line_args"] + json.loads(results[1])["StarlarkUserEvent"] + json.loads(results[2])["StarlarkUserEvent"] + + +@buck_test(skip_for_os=["windows"]) +async def test_user_event_log_with_actions(buck: Buck, tmp_path: Path) -> None: + local_log = tmp_path / "test.json-lines" + + await buck.bxl( + "root//:test.bxl:action", + "--event-log", + str(local_log), + ) + + results = ( + (await buck.log("show-user", str(Path(local_log).absolute()))) + .stdout.strip() + .splitlines()[1:] + ) + + # Remove any durations + a = json.loads(results[0]) + a["ActionExecutionEvent"]["duration_millis"] = "" + a["ActionExecutionEvent"]["input_materialization_duration_millis"] = "" + b = json.loads(results[1]) + b["BxlEnsureArtifactsEvent"]["duration_millis"] = "" + + results = _replace_timestamp(f"{json.dumps(a)}\n{json.dumps(b)}") + + # Just validate the user events, let's skip the invocation record + golden( + output=results, + rel_path="action_event.golden.json", + ) + + +@buck_test(skip_for_os=["windows"]) +async def test_user_event_with_log_show_user(buck: Buck) -> None: + await buck.bxl( + "root//:test.bxl:instant_event", + ) + + results = (await buck.log("show-user")).stdout.strip().splitlines()[1:] + + results = _replace_timestamp("\n".join(results)) + + # Just validate the user events, let's skip the invocation record + golden( + output=results, + rel_path="instant_event.golden.json", + ) + + +@buck_test(skip_for_os=["windows"]) +@pytest.mark.parametrize( + "file_extension", [".json-lines", ".json-lines.gz", ".json-lines.zst"] +) +async def test_user_event_log_with_log_show_user_compatibility( + buck: Buck, + tmp_path: Path, + file_extension: str, +) -> None: + local_log = tmp_path / f"test.{file_extension}" + + await buck.bxl( + "root//:test.bxl:instant_event", + "--event-log", + str(local_log), + ) + + results = ( + (await buck.log("show-user", str(Path(local_log).absolute()))) + .stdout.strip() + .splitlines()[1:] + ) + + results = _replace_timestamp("\n".join(results)) + + # Just validate the user events, let's skip the invocation record + golden( + output=results, + rel_path="instant_event.golden.json", + ) + + +# Placeholder for tests to be listed successfully on Windows. +@buck_test() +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/core/log/test_user_event_log_data/.buckconfig b/tests/core/log/test_user_event_log_data/.buckconfig new file mode 100644 index 0000000000000..6fa80f43ca420 --- /dev/null +++ b/tests/core/log/test_user_event_log_data/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name = TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[build] +execution_platforms = root//:execution_platforms diff --git a/tests/core/log/test_user_event_log_data/.buckroot b/tests/core/log/test_user_event_log_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_user_event_log_data/TARGETS.fixture b/tests/core/log/test_user_event_log_data/TARGETS.fixture new file mode 100644 index 0000000000000..f472b7ff0ed74 --- /dev/null +++ b/tests/core/log/test_user_event_log_data/TARGETS.fixture @@ -0,0 +1 @@ +execution_platforms(name = "execution_platforms") diff --git a/tests/core/log/test_user_event_log_data/action_event.golden.json b/tests/core/log/test_user_event_log_data/action_event.golden.json new file mode 100644 index 0000000000000..b881d0266d579 --- /dev/null +++ b/tests/core/log/test_user_event_log_data/action_event.golden.json @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{"ActionExecutionEvent": {"kind": "Write", "name": {"category": "write", "identifier": "my_output"}, "duration_millis": "", "output_size": , "input_materialization_duration_millis": "", "execution_kind": "Simple", "owner": "root//test.bxl:action"}, "epoch_millis": } +{"BxlEnsureArtifactsEvent": {"duration_millis": ""}, "epoch_millis": } \ No newline at end of file diff --git a/tests/core/log/test_user_event_log_data/instant_event.golden.json b/tests/core/log/test_user_event_log_data/instant_event.golden.json new file mode 100644 index 0000000000000..2c9fd1f288ef7 --- /dev/null +++ b/tests/core/log/test_user_event_log_data/instant_event.golden.json @@ -0,0 +1,6 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{"StarlarkUserEvent":{"id":"foo","metadata":{"bool_value":true}},"epoch_millis":} +{"StarlarkUserEvent":{"id":"bar","metadata":{"bool_value":false}},"epoch_millis":} +{"StarlarkUserEvent":{"id":"metadata_with_dict","metadata":{"dict_value":{"foo":"bar"}}},"epoch_millis":} +{"StarlarkUserEvent":{"id":"metadata_with_list","metadata":{"list_value":["a","b","c"]}},"epoch_millis":} \ No newline at end of file diff --git a/tests/core/log/test_user_event_log_data/test.bxl b/tests/core/log/test_user_event_log_data/test.bxl new file mode 100644 index 0000000000000..99c32dc55ee25 --- /dev/null +++ b/tests/core/log/test_user_event_log_data/test.bxl @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_instant_event(ctx): + metadata = { + "bool_value": True, + } + ctx.instant_event(id = "foo", metadata = metadata) + + metadata = { + "bool_value": False, + } + ctx.instant_event(id = "bar", metadata = metadata) + + my_dict = {"foo": "bar"} + metadata = { + "dict_value": my_dict, + } + ctx.instant_event(id = "metadata_with_dict", metadata = metadata) + + my_list = ["a", "b", "c"] + metadata = { + "list_value": my_list, + } + ctx.instant_event(id = "metadata_with_list", metadata = metadata) + +instant_event = bxl_main( + impl = _impl_instant_event, + cli_args = {}, +) + +def _impl_action(ctx): + actions = ctx.bxl_actions().actions + output = actions.write("my_output", "my_content") + ensured = ctx.output.ensure(output) + ctx.output.print(ensured) + +action = bxl_main( + impl = _impl_action, + cli_args = {}, +) diff --git a/tests/core/log/test_what_materialized.py b/tests/core/log/test_what_materialized.py new file mode 100644 index 0000000000000..10f20f9ef7b36 --- /dev/null +++ b/tests/core/log/test_what_materialized.py @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import csv +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_what_materialized_csv(buck: Buck) -> None: + await buck.build("//:my_rule") + out = await buck.log("what-materialized", "--format", "csv") + header = ["path", "method", "file_count", "total_bytes"] + out = [ + dict(zip(header, record)) + for record in csv.reader(out.stdout.splitlines()) + if record + ] + assert len(out) > 0, "out should have some materializations" + assert out[0] == dict( + zip(header, header) + ), "ensure that first entry in csv is the header" + assert any( + line["path"].endswith("__my_rule__/out") for line in out + ), "should have materialized main test file" + + +@buck_test() +async def test_what_materialized_sorted(buck: Buck) -> None: + await buck.build("//:my_rule") + out = await buck.log("what-materialized", "--format", "json", "--sort-by-size") + out = [json.loads(line) for line in out.stdout.splitlines() if line] + assert len(out) > 0, "out should have some materializations" + assert all( + out[i]["total_bytes"] <= out[i + 1]["total_bytes"] for i in range(len(out) - 1) + ), "should be sorted by size" + + +@buck_test() +async def test_what_materialized_aggregated(buck: Buck) -> None: + await buck.build("//:my_rule") + # buck2 log what-materialized --aggregate-by-ext has the following output: + # cas 1 1 + out = await buck.log("what-materialized", "--aggregate-by-ext") + out = [line.split() for line in out.stdout.splitlines() if line] + assert len(out) > 0, "out should have some materializations" + assert out[0][0] == "" diff --git a/tests/core/log/test_what_materialized_data/.buckconfig b/tests/core/log/test_what_materialized_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/log/test_what_materialized_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/log/test_what_materialized_data/.buckroot b/tests/core/log/test_what_materialized_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_what_materialized_data/TARGETS.fixture b/tests/core/log/test_what_materialized_data/TARGETS.fixture new file mode 100644 index 0000000000000..cce5b172901e6 --- /dev/null +++ b/tests/core/log/test_what_materialized_data/TARGETS.fixture @@ -0,0 +1,3 @@ +my_rule( + name = "my_rule", +) diff --git a/tests/core/log/test_what_materialized_data/prelude.bzl b/tests/core/log/test_what_materialized_data/prelude.bzl new file mode 100644 index 0000000000000..beaf75dd20cbb --- /dev/null +++ b/tests/core/log/test_what_materialized_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _action(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(cmd_args(["sh", "-c", 'echo > "$1"', "--", out.as_output()]), category = "run") + return [DefaultInfo(default_outputs = [out])] + +my_rule = rule( + impl = _action, + attrs = {}, +) diff --git a/tests/core/log/test_what_ran_incomplete.py b/tests/core/log/test_what_ran_incomplete.py new file mode 100644 index 0000000000000..0469d2efe603a --- /dev/null +++ b/tests/core/log/test_what_ran_incomplete.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_what_ran_incomplete(buck: Buck) -> None: + await buck.build("//:my_rule") + + log = (await buck.log("show")).stdout.strip() + log_file = tempfile.NamedTemporaryFile( + suffix=".json-lines", mode="w+", delete=False + ) + + # Truncate log + with log_file as f: + lines = log.splitlines() + for line in lines: + if "SpanEnd" in line and "ActionExecution" in line: + break + f.write(line + "\n") + f.close() + + target = "build\tprelude//:my_rule ()" + + what_ran = await buck.log("what-ran", "--incomplete", log_file.name) + assert "Showing commands from:" in what_ran.stderr + assert target in what_ran.stdout + + what_failed = await buck.log("what-failed", log_file.name) + assert target not in what_failed.stdout + + what_ran = await buck.log("what-ran", "--show-std-err", log_file.name) + assert "" in what_ran.stdout diff --git a/tests/core/log/test_what_ran_incomplete_data/.buckconfig b/tests/core/log/test_what_ran_incomplete_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/log/test_what_ran_incomplete_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/log/test_what_ran_incomplete_data/.buckroot b/tests/core/log/test_what_ran_incomplete_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_what_ran_incomplete_data/TARGETS.fixture b/tests/core/log/test_what_ran_incomplete_data/TARGETS.fixture new file mode 100644 index 0000000000000..30f53136138c7 --- /dev/null +++ b/tests/core/log/test_what_ran_incomplete_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":prelude.bzl", "my_rule") + +my_rule( + name = "my_rule", +) diff --git a/tests/core/log/test_what_ran_incomplete_data/prelude.bzl b/tests/core/log/test_what_ran_incomplete_data/prelude.bzl new file mode 100644 index 0000000000000..beaf75dd20cbb --- /dev/null +++ b/tests/core/log/test_what_ran_incomplete_data/prelude.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _action(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(cmd_args(["sh", "-c", 'echo > "$1"', "--", out.as_output()]), category = "run") + return [DefaultInfo(default_outputs = [out])] + +my_rule = rule( + impl = _action, + attrs = {}, +) diff --git a/tests/core/log/test_what_uploaded.py b/tests/core/log/test_what_uploaded.py new file mode 100644 index 0000000000000..14cd7f82b54d7 --- /dev/null +++ b/tests/core/log/test_what_uploaded.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import csv +import random +import string + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_what_uploaded_csv(buck: Buck) -> None: + # Use a random content on every test invocation to make sure we actually get uploads + content = "".join(random.choices(string.ascii_uppercase + string.digits, k=20)) + await buck.build("//:upload_rule", "--remote-only", "-c", "test.content=" + content) + out = await buck.log("what-uploaded", "--format", "csv") + header = ["action", "digests_uploaded", "bytes_uploaded"] + out = [ + dict(zip(header, record)) + for record in csv.reader(out.stdout.splitlines()) + if record + ] + assert len(out) > 0, "out should have some uploads" + assert out[0] == dict( + zip(header, header) + ), "ensure that first entry in csv is the header" + assert int(out[1]["digests_uploaded"]) > 0, "second entry should be upload digests" + + +@buck_test() +async def test_what_uploaded_aggregated(buck: Buck) -> None: + # Use a random content on every test invocation to make sure we actually get uploads + content = "".join(random.choices(string.ascii_uppercase + string.digits, k=20)) + await buck.build("//:upload_rule", "--remote-only", "-c", "test.content=" + content) + out = await buck.log("what-uploaded", "--aggregate-by-ext") + out = [line.split() for line in out.stdout.splitlines() if line] + assert len(out) > 0, "out should have some uploads" + assert out[0] == ["txt", "1", "20"], f"unexpected output: {out}" diff --git a/tests/core/log/test_what_uploaded_data/.buckconfig b/tests/core/log/test_what_uploaded_data/.buckconfig new file mode 100644 index 0000000000000..2988e9614be50 --- /dev/null +++ b/tests/core/log/test_what_uploaded_data/.buckconfig @@ -0,0 +1,5 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +prelude = . diff --git a/tests/core/log/test_what_uploaded_data/.buckroot b/tests/core/log/test_what_uploaded_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_what_uploaded_data/TARGETS.fixture b/tests/core/log/test_what_uploaded_data/TARGETS.fixture new file mode 100644 index 0000000000000..23061ef83bdd7 --- /dev/null +++ b/tests/core/log/test_what_uploaded_data/TARGETS.fixture @@ -0,0 +1 @@ +upload_rule(name = "upload_rule") diff --git a/tests/core/log/test_what_uploaded_data/prelude.bzl b/tests/core/log/test_what_uploaded_data/prelude.bzl new file mode 100644 index 0000000000000..0601d5a2d685c --- /dev/null +++ b/tests/core/log/test_what_uploaded_data/prelude.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _upload_action(ctx): + val = read_config("test", "content", "") + tmp = ctx.actions.write("tmp.txt", val) + out = ctx.actions.declare_output("out") + ctx.actions.run( + cmd_args(["sh", "-c", 'echo > "$1"', "--", out.as_output()], hidden = [tmp]), + category = "run", + ) + return [DefaultInfo(default_outputs = [out])] + +upload_rule = rule( + impl = _upload_action, + attrs = {}, +) diff --git a/tests/core/log/test_whatup.py b/tests/core/log/test_whatup.py new file mode 100644 index 0000000000000..19a77ff2be63d --- /dev/null +++ b/tests/core/log/test_whatup.py @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_whatup_command(buck: Buck) -> None: + await buck.build("//:long_build") + + log = (await buck.log("show")).stdout.strip() + log_file = tempfile.NamedTemporaryFile( + suffix=".json-lines", mode="w+", delete=False + ) + # Truncate log when analysis started + with log_file as f: + lines = log.splitlines() + for line in lines: + f.write(line + "\n") + if "AnalysisStage" in line: + break + f.close() + + ext = await buck.log("whatup", log_file.name) + assert "running analysis" in ext.stderr + + +@buck_test() +async def test_whatup_after_command(buck: Buck) -> None: + await buck.build("//:long_build", "--local-only", "--no-remote-cache") + + # Get event log + log = (await buck.log("show")).stdout.strip() + elapsed = [0, 0] + + # Get first timestamp + lines = log.splitlines() + first_event = json.loads(lines[1]) + first_timestamp = first_event["Event"]["timestamp"] + # Get timestamp where rule execution starts + for line in lines: + if "Execute" in line: + event = json.loads(line) + # Calculate elapsed seconds, we add 1 to give some padding in order to catch the open span + elapsed[0] = (event["Event"]["timestamp"][0] - first_timestamp[0]) + 1 + # Calculate elapsed millliseconds + elapsed[1] = ( + event["Event"]["timestamp"][1] - first_timestamp[1] + ) // 1000000 + break + + # Verify rule execution appears when running whatup at that timestamp + action_start = (elapsed[0] * 1000) + abs(elapsed[1]) + ext = (await buck.log("whatup", "--after", str(action_start))).stderr.strip() + assert "action (run_python)" in ext diff --git a/tests/core/log/test_whatup_data/.buckconfig b/tests/core/log/test_whatup_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/log/test_whatup_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/log/test_whatup_data/.buckroot b/tests/core/log/test_whatup_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/log/test_whatup_data/TARGETS.fixture b/tests/core/log/test_whatup_data/TARGETS.fixture new file mode 100644 index 0000000000000..1a8357f9773bd --- /dev/null +++ b/tests/core/log/test_whatup_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":long_action.bzl", "long_action") + +long_action( + name = "long_build", +) diff --git a/tests/core/log/test_whatup_data/long_action.bzl b/tests/core/log/test_whatup_data/long_action.bzl new file mode 100644 index 0000000000000..ca13a10376dac --- /dev/null +++ b/tests/core/log/test_whatup_data/long_action.bzl @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.declare_output("out.txt") + py = cmd_args( + "import time; time.sleep(3); ", + "import sys; open(sys.argv[1], 'w').write('')", + delimiter = "", + ) + + ctx.actions.run( + cmd_args("python3", "-c", py, out.as_output()), + category = "run_python", + ) + + return [DefaultInfo(default_output = out)] + +long_action = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/core/lsp/BUCK b/tests/core/lsp/BUCK new file mode 100644 index 0000000000000..eddf7c70d0eb7 --- /dev/null +++ b/tests/core/lsp/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_lsp", + srcs = ["test_lsp.py"], + data_dir = "test_lsp_data", +) diff --git a/tests/core/lsp/test_lsp.py b/tests/core/lsp/test_lsp.py new file mode 100644 index 0000000000000..d348da9f8bb48 --- /dev/null +++ b/tests/core/lsp/test_lsp.py @@ -0,0 +1,238 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from pathlib import Path +from typing import Any, Dict, List, Optional + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.fixtures import Fixture, Span +from buck2.tests.e2e_util.api.lsp import LSPResponseError +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _assert_range(range: Dict[str, Any], expected: Optional[Span]) -> None: + """Assert that this Span is equal to an LSP range dict""" + if expected is None: + expected = Span(0, 0, 0, 0) + assert range["start"]["line"] == expected.start_line + assert range["start"]["character"] == expected.start_col + assert range["end"]["line"] == expected.end_line + assert range["end"]["character"] == expected.end_col + + +def _assert_goto_result( + res: List[Dict[str, Any]], + expected_src: Span, + expected_dest_path: Path, + expected_dest: Optional[Span], +) -> None: + assert len(res) == 1 + _assert_range(res[0]["originSelectionRange"], expected_src) + _assert_range(res[0]["targetRange"], expected_dest) + _assert_range(res[0]["targetSelectionRange"], expected_dest) + assert res[0]["targetUri"] == expected_dest_path.as_uri() + + +def fixture(buck: Buck, path: Path) -> Fixture: + abs_path = buck.cwd / path + fixture = Fixture(abs_path.read_text()) + abs_path.write_text(fixture.content) + return fixture + + +@buck_test() +async def test_lsp_starts(buck: Buck) -> None: + async with await buck.lsp() as lsp: + # Will fail if the initialize response is not received + await lsp.init_connection() + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(skip_for_os=["windows"]) +async def test_lints_on_open(buck: Buck) -> None: + async with await buck.lsp() as lsp: + await lsp.init_connection() + diags = await lsp.open_file(Path("clean_lint.bzl")) + assert diags is not None + assert len(diags["diagnostics"]) == 0 + + diags = await lsp.open_file(Path("bad_syntax.bzl")) + assert diags is not None + assert len(diags["diagnostics"]) == 1 + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(skip_for_os=["windows"]) +async def test_goto_definition(buck: Buck) -> None: + src_targets_path = Path("dir/TARGETS.fixture") + dest_targets_path = Path("cell/sub/TARGETS.fixture") + dest_bzl_path = Path("cell/sub/defs.bzl") + + src_targets = fixture(buck, src_targets_path) + dest_targets = fixture(buck, dest_targets_path) + dest_bzl = fixture(buck, dest_bzl_path) + + async with await buck.lsp() as lsp: + await lsp.init_connection() + diags = await lsp.open_file(src_targets_path) + assert len(diags["diagnostics"]) == 0 + + res = await lsp.goto_definition( + src_targets_path, + src_targets.start_line("load_click"), + src_targets.start_col("load_click"), + ) + _assert_goto_result( + res, src_targets.spans["load"], buck.cwd / dest_bzl_path, None + ) + + res = await lsp.goto_definition( + src_targets_path, + src_targets.start_line("dummy_click"), + src_targets.start_col("dummy_click"), + ) + _assert_goto_result( + res, + src_targets.spans["dummy"], + buck.cwd / dest_bzl_path, + dest_bzl.spans["rule"], + ) + + res = await lsp.goto_definition( + src_targets_path, + src_targets.start_line("missing_click"), + src_targets.start_col("missing_click"), + ) + assert len(res) == 0 + + res = await lsp.goto_definition( + src_targets_path, + src_targets.start_line("missing_foo_click"), + src_targets.start_col("missing_foo_click"), + ) + _assert_goto_result( + res, src_targets.spans["missing_foo"], buck.cwd / dest_targets_path, None + ) + + res = await lsp.goto_definition( + src_targets_path, + src_targets.start_line("rule_click"), + src_targets.start_col("rule_click"), + ) + _assert_goto_result( + res, + src_targets.spans["rule"], + buck.cwd / dest_bzl_path, + dest_bzl.spans["rule"], + ) + + res = await lsp.goto_definition( + src_targets_path, + src_targets.start_line("baz_click"), + src_targets.start_col("baz_click"), + ) + _assert_goto_result( + res, + src_targets.spans["baz"], + buck.cwd / dest_targets_path, + dest_targets.spans["baz"], + ) + + +@buck_test() +async def test_returns_file_contents_for_starlark_types(buck: Buck) -> None: + async with await buck.lsp() as lsp: + await lsp.init_connection() + + res = await lsp.file_contents("starlark:/native/DefaultInfo.bzl") + assert res["contents"] is not None + + res = await lsp.file_contents("starlark:/native/NonExistent.bzl") + assert res["contents"] is None + + with pytest.raises(LSPResponseError): + await lsp.file_contents(f"file:{lsp.cwd / '.buckconfig'}") + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(skip_for_os=["windows"]) +async def test_goto_definition_for_globals(buck: Buck) -> None: + globals_bzl_path = Path("globals.bzl") + + globals_bzl = fixture(buck, globals_bzl_path) + async with await buck.lsp() as lsp: + await lsp.init_connection() + diags = await lsp.open_file(globals_bzl_path) + assert len(diags["diagnostics"]) == 0 + + res = await lsp.goto_definition( + globals_bzl_path, + globals_bzl.start_line("func2_click"), + globals_bzl.start_col("func2_click"), + ) + + assert len(res) == 1 + _assert_range(res[0]["originSelectionRange"], globals_bzl.spans["func2"]) + assert res[0]["targetRange"]["start"]["line"] != 0 + assert res[0]["targetSelectionRange"]["start"]["line"] != 0 + assert res[0]["targetUri"] == (buck.cwd / "prelude" / "prelude.bzl").as_uri() + + res = await lsp.goto_definition( + globals_bzl_path, + globals_bzl.start_line("info_click"), + globals_bzl.start_col("info_click"), + ) + + assert len(res) == 1 + _assert_range(res[0]["originSelectionRange"], globals_bzl.spans["info"]) + assert res[0]["targetUri"] == "starlark:/native/DefaultInfo.bzl" + + res = await lsp.goto_definition( + globals_bzl_path, + globals_bzl.start_line("invalid_click"), + globals_bzl.start_col("invalid_click"), + ) + assert len(res) == 0 + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(skip_for_os=["windows"]) +async def test_supports_bxl_files(buck: Buck) -> None: + src_bxl_path = Path("query.bxl") + + src_bxl = fixture(buck, src_bxl_path) + + async with await buck.lsp() as lsp: + await lsp.init_connection() + diags = await lsp.open_file(src_bxl_path) + assert len(diags["diagnostics"]) == 0 + + res = await lsp.goto_definition( + src_bxl_path, + src_bxl.start_line("foo_click"), + src_bxl.start_col("foo_click"), + ) + _assert_goto_result( + res, + src_bxl.spans["foo"], + buck.cwd / src_bxl_path, + src_bxl.spans["dest_foo"], + ) + + res = await lsp.goto_definition( + src_bxl_path, + src_bxl.start_line("f_click"), + src_bxl.start_col("f_click"), + ) + _assert_goto_result( + res, src_bxl.spans["f"], buck.cwd / src_bxl_path, src_bxl.spans["dest_f"] + ) diff --git a/tests/core/lsp/test_lsp_data/.buckconfig b/tests/core/lsp/test_lsp_data/.buckconfig new file mode 100644 index 0000000000000..28a6db38c28a2 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/.buckconfig @@ -0,0 +1,7 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +cell = cell +prelude = prelude diff --git a/tests/core/lsp/test_lsp_data/.buckroot b/tests/core/lsp/test_lsp_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/lsp/test_lsp_data/bad_syntax.bzl b/tests/core/lsp/test_lsp_data/bad_syntax.bzl new file mode 100644 index 0000000000000..0610d5b434fd9 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/bad_syntax.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def f oo(bar: "string") -> "string": + return bar diff --git a/tests/core/lsp/test_lsp_data/cell/.buckconfig b/tests/core/lsp/test_lsp_data/cell/.buckconfig new file mode 100644 index 0000000000000..da206accf8bd6 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/cell/.buckconfig @@ -0,0 +1,7 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = ../ +cell = . +prelude = ../prelude diff --git a/tests/core/lsp/test_lsp_data/cell/sub/TARGETS.fixture b/tests/core/lsp/test_lsp_data/cell/sub/TARGETS.fixture new file mode 100644 index 0000000000000..e969ae08ce8a7 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/cell/sub/TARGETS.fixture @@ -0,0 +1,4 @@ +# @lint-ignore-every BUCKFORMAT + +dummy_binary(name = "bar") +dummy_binary(name = "baz") diff --git a/tests/core/lsp/test_lsp_data/cell/sub/defs.bzl b/tests/core/lsp/test_lsp_data/cell/sub/defs.bzl new file mode 100644 index 0000000000000..b10d63ed24fef --- /dev/null +++ b/tests/core/lsp/test_lsp_data/cell/sub/defs.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def _dummy_binary_impl(ctx): + out = ctx.write("out.txt", ctx.attrs.name) + return [DefaultInfo(default_output=out)] + +dummy_binary = rule( + impl=dummy_binary_impl, + attrs={ + "deps": attrs.list(attrs.dep(), default=[]) + } +) diff --git a/tests/core/lsp/test_lsp_data/clean_lint.bzl b/tests/core/lsp/test_lsp_data/clean_lint.bzl new file mode 100644 index 0000000000000..55025c25cd800 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/clean_lint.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def foo(bar: str) -> str: + return bar diff --git a/tests/core/lsp/test_lsp_data/dir/TARGETS.fixture b/tests/core/lsp/test_lsp_data/dir/TARGETS.fixture new file mode 100644 index 0000000000000..4b32368b6cc9c --- /dev/null +++ b/tests/core/lsp/test_lsp_data/dir/TARGETS.fixture @@ -0,0 +1,14 @@ +# @lint-ignore-every BUCKFORMAT + +load("@cell//sub:defs.bzl", "dummy_binary") + +"cell//nonexistent:foo" +# Does not exist +"cell//sub:foo" + +dummy_binary( + name="foo", + deps=[ + "cell//sub:baz", + ], +) diff --git a/tests/core/lsp/test_lsp_data/globals.bzl b/tests/core/lsp/test_lsp_data/globals.bzl new file mode 100644 index 0000000000000..8e8cfea9bbde3 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/globals.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +# Test that goto definition works for global symbols +x = my_func2 +y = DefaultInfo +z = invalid_symbol diff --git a/tests/core/lsp/test_lsp_data/prelude/.buckconfig b/tests/core/lsp/test_lsp_data/prelude/.buckconfig new file mode 100644 index 0000000000000..6b8b3c97cef28 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/prelude/.buckconfig @@ -0,0 +1,7 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = ../ +prelude = . +cell = ../cell diff --git a/tests/core/lsp/test_lsp_data/prelude/prelude.bzl b/tests/core/lsp/test_lsp_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..30e5986396f99 --- /dev/null +++ b/tests/core/lsp/test_lsp_data/prelude/prelude.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def my_func1(): + pass + +def my_func2(): + pass diff --git a/tests/core/lsp/test_lsp_data/query.bxl b/tests/core/lsp/test_lsp_data/query.bxl new file mode 100644 index 0000000000000..1da130a91953e --- /dev/null +++ b/tests/core/lsp/test_lsp_data/query.bxl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def some_function(foo): + return foo + 5 + +some_function(3) diff --git a/tests/core/materializer/BUCK b/tests/core/materializer/BUCK new file mode 100644 index 0000000000000..13e7672a2db4c --- /dev/null +++ b/tests/core/materializer/BUCK @@ -0,0 +1,36 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_materializer", + srcs = ["test_materializer.py"], + data_dir = "test_materializer_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_clean_stale", + srcs = ["test_clean_stale.py"], + data_dir = "test_clean_stale_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_clean_stale_bxl", + srcs = ["test_clean_stale_bxl.py"], + data_dir = "test_clean_stale_bxl_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_symlink_to_parent_bug", + srcs = ["test_symlink_to_parent_bug.py"], + data_dir = "test_symlink_to_parent_bug_data", +) + +buck2_e2e_test( + name = "test_symlink_local_remote_bug", + srcs = ["test_symlink_local_remote_bug.py"], + data_dir = "test_symlink_local_remote_bug_data", +) diff --git a/tests/core/materializer/test_clean_stale.py b/tests/core/materializer/test_clean_stale.py new file mode 100644 index 0000000000000..04b64622b5253 --- /dev/null +++ b/tests/core/materializer/test_clean_stale.py @@ -0,0 +1,283 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re +import shutil +import time +from datetime import datetime, timedelta +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +def modify_acess_times_updates(buck: Buck, new_status: str) -> None: + config_file = buck.cwd / ".buckconfig" + replace_in_file( + "update_access_times = full", + "update_access_times = {}".format(new_status), + file=config_file, + ) + + +def replace_in_file(old: str, new: str, file: Path, encoding: str = "utf-8") -> None: + with open(file, encoding=encoding) as f: + file_content = f.read() + file_content = file_content.replace(old, new) + with open(file, "w", encoding=encoding) as f: + f.write(file_content) + + +@buck_test() +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_artifact_access_time(buck: Buck) -> None: + # drop microseconds to match 1s precision from materializer + start = datetime.utcnow().replace(microsecond=0) + target = "root//:copy" + result = await buck.build(target) + assert result.get_build_report().output_for_target(target).exists() + + async def audit_materialized() -> List[str]: + return list( + filter( + lambda x: "\tmaterialized" in x, + (await buck.audit("deferred-materializer", "list")) + .stdout.strip() + .splitlines(), + ) + ) + + def parse_entry_ts(entry: str) -> datetime: + match = re.search("\tmaterialized \\(ts=([^ ,]*)", entry) + assert match + timestamp = datetime.strptime(match.group(1), "%Y-%m-%dT%H:%M:%SZ") + assert timestamp, match.group(1) + return timestamp + + materialized_entries = await audit_materialized() + assert len(materialized_entries) == 1 + materialized_time = parse_entry_ts(materialized_entries[0]) + assert materialized_time >= start + + # Check that access time set after daemon restart + await buck.kill() + materialized_entries = await audit_materialized() + assert len(materialized_entries) == 1 + materialized_time = parse_entry_ts(materialized_entries[0]) + assert materialized_time >= start + + # Check that access time is updated following build + time.sleep(1) + await buck.build(target) + + materialized_entries = await audit_materialized() + + assert len(materialized_entries) == 1 + access_time = parse_entry_ts(materialized_entries[0]) + assert access_time > materialized_time + + +@buck_test() +@env("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE", "0") +async def test_artifact_access_time_flushing(buck: Buck) -> None: + # Create artifact + await buck.build("root//:copy") + # Access artifact to trigger update, because buffer size is 0, + # flushing should happen instantly + await buck.build("root//:copy") + # Force empty flush + flush = await buck.audit("deferred-materializer", "flush-access-times") + # Validate that there was nothing to flush + assert re.search("Finished flushing \\d+ entries in \\d+ ms", flush.stdout) + data = re.findall("\\d+", flush.stdout) + assert len(data) == 2 + assert data[0] == "0" + + +@buck_test() +@env("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE", "0") +async def test_artifact_access_time_flushing_disabled(buck: Buck) -> None: + modify_acess_times_updates(buck, "disabled") + # Create artifact + await buck.build("root//:copy") + # Access artifact to trigger update + await buck.build("root//:copy") + # Force flush + flush = await buck.audit("deferred-materializer", "flush-access-times") + # Validate update didn't happen since access times updates are disabled + assert ( + "Access time updates are disabled. Consider removing `update_access_times = false` from your .buckconfig" + in flush.stdout + ) + + +@buck_test() +@env("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE", "2") +async def test_artifact_access_time_flushing_partial(buck: Buck) -> None: + modify_acess_times_updates(buck, "partial") + # Create artifact + await buck.build("root//:copy") + # Access artifact to trigger update. Buffer size is 1 now so no flushign should be happening + await buck.build("root//:copy") + + # Wait a bit more than what the normal periodic flush would take (5 secs as indicated here https://fburl.com/code/ot5944b2) + time.sleep(10) + # Force flush + flush = await buck.audit("deferred-materializer", "flush-access-times") + + # Validate buffer is not flushed since periodic flush is not triggered + assert re.search("Finished flushing \\d+ entries in \\d+ ms", flush.stdout) + data = re.findall("\\d+", flush.stdout) + assert data[0] == "1" + + +@buck_test() +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +@env("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE", "0") +async def test_clean_stale_artifacts(buck: Buck) -> None: + target_1 = "root//:copy" + result_1 = await buck.build(target_1) + output_1 = result_1.get_build_report().output_for_target(target_1) + + # ensure timestamp is after first materialization and before second + # (resolution for access timestamps is 1 second) + time.sleep(1) + after_first_build = int(time.time()) + time.sleep(1) + + target_2 = "root//:copy_2" + result_2 = await buck.build(target_2) + output_2 = result_2.get_build_report().output_for_target(target_2) + + # Check output is correctly materialized + assert output_1.exists() + assert output_2.exists() + + await buck.clean(f"--keep-since-time={after_first_build}") + # Check output_1 still materialized, it's stale but it was built by running daemon + assert output_1.exists() + + await buck.kill() + res = await buck.clean(f"--keep-since-time={after_first_build}") + # Check output_1 was cleaned because it's stale and not declared by running daemon + assert "1 stale artifact" in res.stderr and "4 bytes cleaned" in res.stderr + assert not output_1.exists() + assert output_2.exists() + + future_time = int((datetime.now() + timedelta(weeks=7)).timestamp()) + + # Check that a previously materialized output re-declared by new daemon is not cleaned + await buck.build(target_2) + await buck.clean(f"--keep-since-time={future_time}") + assert output_2.exists() + + # Check that setting keep-since-time in the future cleans non-active artifacts + await buck.kill() + await buck.clean(f"--keep-since-time={future_time}") + assert "1 stale artifact" in res.stderr and "4 bytes cleaned" in res.stderr + assert not output_2.exists() + + +@buck_test() +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_clean_stale_artifact_dir(buck: Buck) -> None: + target_1 = "root//:copy_dir" + result_1 = await buck.build(target_1) + output_1 = result_1.get_build_report().output_for_target(target_1) + assert output_1.exists() + await buck.kill() + future_time = int((datetime.now() + timedelta(weeks=7)).timestamp()) + res = await buck.clean(f"--keep-since-time={future_time}") + assert "4 bytes cleaned" in res.stderr + assert not output_1.exists() + # NOTE: Currently we require clean twice to delete empty dirs, which is ... + # probably fine. + await buck.clean(f"--keep-since-time={future_time}") + output_parent = output_1.parent + while not output_parent.exists(): + output_parent = output_parent.parent + assert output_parent.parts[-3:] == ("buck-out", "v2", "gen") + + +@buck_test() +@env("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE", "0") +async def test_clean_stale_buck_out_empty(buck: Buck) -> None: + output = await buck.clean("--stale") + assert "Nothing to clean" in output.stderr + + +@buck_test() +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +@env("BUCK_ACCESS_TIME_UPDATE_MAX_BUFFER_SIZE", "0") +async def test_clean_stale_actions(buck: Buck) -> None: + query_res = await buck.cquery("root//...") + targets = [ + target.split(" ")[0] for target in query_res.stdout.split("\n") if target + ] + + outputs = [] + for target in targets: + res = await buck.build(target) + output = res.get_build_report().outputs_for_target(target) + outputs += output + + assert len(outputs) >= len(targets) + for output in outputs: + assert output.exists() + + await buck.clean("--stale") + for output in outputs: + assert output.exists() + + +@buck_test() +async def test_clean_stale_declared(buck: Buck) -> None: + await buck.build("//declared:declared") + await buck.kill() + + # Drop the state. The path exists on disk. + shutil.rmtree(buck.cwd / "buck-out/v2/cache/materializer_state") + + # Build again, start by declaring, then clean, then require locally. + await buck.build("//declared:remote") + await buck.clean("--stale") + await buck.build("//declared:local") + + +@buck_test() +async def test_clean_stale_scheduled(buck: Buck) -> None: + # Need to write to .buckconfig instead of passing cmd line args because + # the config used when creating daemon state does not include cmd line args (but maybe it should). + config_file = buck.cwd / ".buckconfig.local" + with open(config_file, "w") as f: + f.write( + """ +[buck2] +clean_stale_enabled = true +clean_stale_artifact_ttl_hours = 0 +clean_stale_start_offset_hours = 0 +# 0.0001h = 360ms +clean_stale_period_hours = 0.0001 + """ + ) + + # Just test that a clean runs if enabled via config. + # Build a target, output is stale immediately but won't be cleaned until restart. + result = await buck.build("root//:copy") + output = result.get_build_report().output_for_target("root//:copy") + assert output.exists() + await buck.kill() + # Create a new daemon and build something else (could be any command that starts a daemon). + await buck.build("//declared:declared") + # Wait for at least one clean to run (but should have finished multiple cleans). + time.sleep(3) + # Original output should be cleaned. + assert not output.exists() diff --git a/tests/core/materializer/test_clean_stale_bxl.py b/tests/core/materializer/test_clean_stale_bxl.py new file mode 100644 index 0000000000000..828d05ea34ede --- /dev/null +++ b/tests/core/materializer/test_clean_stale_bxl.py @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_clean_stale_bxl(buck: Buck) -> None: + await buck.bxl("//clean_stale/build.bxl:build_test") + + gen_files = [path.name for path in (buck.cwd / "buck-out/v2/gen").glob("**/*")] + assert "out.json" in gen_files + + # Check that artifacts written to gen by bxl are not deleted + await buck.clean("--stale") + gen_files = [path.name for path in (buck.cwd / "buck-out/v2/gen").glob("**/*")] + assert "out.json" in gen_files + + # Force clean of tracked artifacts, check that gen is deleted but not bxl + await buck.kill() + await buck.clean("--stale=0s") + + gen_files = [path.name for path in (buck.cwd / "buck-out/v2/gen").glob("**/*")] + assert "out.json" not in gen_files + + # TODO these should probably be tracked and cleaned too (write to gen instead?) + gen_bxl_files = [ + path.name for path in (buck.cwd / "buck-out/v2/gen-bxl").glob("**/*") + ] + assert "foo_out" in gen_bxl_files diff --git a/tests/core/materializer/test_clean_stale_bxl_data/.buckconfig b/tests/core/materializer/test_clean_stale_bxl_data/.buckconfig new file mode 100644 index 0000000000000..f3f8089161d97 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_bxl_data/.buckconfig @@ -0,0 +1,19 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] +materializations = deferred +enable_local_caching_of_re_artifacts = true +sqlite_materializer_state = true +sqlite_materializer_state_version = 0 +defer_write_actions = true + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/core/materializer/test_clean_stale_bxl_data/.buckroot b/tests/core/materializer/test_clean_stale_bxl_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/TARGETS.fixture b/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/TARGETS.fixture new file mode 100644 index 0000000000000..6ccb1c9bc119e --- /dev/null +++ b/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "write_json") + +write_json(name = "write_json") diff --git a/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/build.bxl b/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/build.bxl new file mode 100644 index 0000000000000..bd73a29c048d1 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/build.bxl @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + ctx.output.print("123") + action_factory = ctx.bxl_actions().actions + out = action_factory.declare_output("foo_out") + action_factory.write(out, "foo") + + ctx.output.print(ctx.output.ensure(out).abs_path()) + + build = ctx.build("//clean_stale:write_json") + + for values in build.values(): + for v in values.artifacts(): + ctx.output.print(ctx.output.ensure(v).abs_path()) + +build_test = bxl_main( + impl = _impl, + cli_args = {}, +) diff --git a/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/defs.bzl b/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/defs.bzl new file mode 100644 index 0000000000000..33af0b9daaff7 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_bxl_data/clean_stale/defs.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _write_json_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.write_json("out.json", ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +write_json = rule( + impl = _write_json_impl, + attrs = { + "content": attrs.string(default = "text"), + }, +) diff --git a/tests/core/materializer/test_clean_stale_bxl_data/platforms/TARGETS.fixture b/tests/core/materializer/test_clean_stale_bxl_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..80533d33c2a4b --- /dev/null +++ b/tests/core/materializer/test_clean_stale_bxl_data/platforms/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "execution_platforms") + +execution_platforms( + name = "platforms", +) diff --git a/tests/core/materializer/test_clean_stale_bxl_data/platforms/defs.bzl b/tests/core/materializer/test_clean_stale_bxl_data/platforms/defs.bzl new file mode 100644 index 0000000000000..39c597a7cb3f7 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_bxl_data/platforms/defs.bzl @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ), + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = True, + remote_cache_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = {}, impl = _execution_platform) diff --git a/tests/core/materializer/test_clean_stale_bxl_data/prelude/prelude.bzl b/tests/core/materializer/test_clean_stale_bxl_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/materializer/test_clean_stale_data/.buckconfig b/tests/core/materializer/test_clean_stale_data/.buckconfig new file mode 100644 index 0000000000000..b262fa1a7fa0c --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/.buckconfig @@ -0,0 +1,13 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture + +[buck2] + materializations = deferred + enable_local_caching_of_re_artifacts = true + sqlite_materializer_state = true + sqlite_materializer_state_version = 0 + defer_write_actions = true + update_access_times = full diff --git a/tests/core/materializer/test_clean_stale_data/.buckroot b/tests/core/materializer/test_clean_stale_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/materializer/test_clean_stale_data/TARGETS.fixture b/tests/core/materializer/test_clean_stale_data/TARGETS.fixture new file mode 100644 index 0000000000000..3d11363713699 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/TARGETS.fixture @@ -0,0 +1,22 @@ +load(":defs.bzl", "copy", "copy_to_dir", "download", "symlink_files", "write_json", "write_string") + +copy(name = "copy", src = "src") +copy(name = "copy_2", src = "src") + +copy_to_dir(name = "copy_dir", src = "src") + +write_string(name = "write", out = "out.txt", content = "HELLO") +copy(name = "copy_dep", src = "src", dep = ":write") + +download(name = "download_deferred", deferrable = True) +download(name = "download_immediate", deferrable = False) + +symlink_files( + name = "out", + srcs = [ + "dir1/dir1_1/file1.txt", + ":write", + ], +) + +write_json(name = "write_json") diff --git a/tests/core/materializer/test_clean_stale_data/declared/TARGETS.fixture b/tests/core/materializer/test_clean_stale_data/declared/TARGETS.fixture new file mode 100644 index 0000000000000..0c8777101868d --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/declared/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "defs") + +defs() diff --git a/tests/core/materializer/test_clean_stale_data/declared/defs.bzl b/tests/core/materializer/test_clean_stale_data/declared/defs.bzl new file mode 100644 index 0000000000000..5bebf7d572544 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/declared/defs.bzl @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _cp(ctx): + out = ctx.actions.declare_output("__objects__/out") + + ctx.actions.run( + ["cp", ctx.attrs.src, out.as_output()], + category = "cp", + prefer_remote = ctx.attrs.remote, + local_only = not ctx.attrs.remote, + ) + + return [DefaultInfo(out)] + +cp = rule(impl = _cp, attrs = {"remote": attrs.bool(), "src": attrs.source()}) + +def defs(): + cp(name = "declared", src = "src", remote = True) + cp(name = "remote", src = ":declared", remote = True) + cp(name = "local", src = ":declared", remote = False) diff --git a/tests/core/materializer/test_clean_stale_data/declared/src b/tests/core/materializer/test_clean_stale_data/declared/src new file mode 100644 index 0000000000000..ce013625030ba --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/declared/src @@ -0,0 +1 @@ +hello diff --git a/tests/core/materializer/test_clean_stale_data/defs.bzl b/tests/core/materializer/test_clean_stale_data/defs.bzl new file mode 100644 index 0000000000000..406a863c30a60 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/defs.bzl @@ -0,0 +1,120 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _write_string_impl(ctx): + out = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +write_string = rule( + impl = _write_string_impl, + attrs = { + "content": attrs.string(default = ""), + "out": attrs.string(), + }, +) + +def _copy_impl(ctx): + out = ctx.actions.declare_output("action_output") + ctx.actions.run( + cmd_args(["cp", ctx.attrs.src, out.as_output()]), + category = "cp", + ) + return [DefaultInfo(default_output = out)] + +copy = rule( + impl = _copy_impl, + attrs = { + "dep": attrs.option(attrs.dep(), default = None), + "src": attrs.source(), + }, +) + +def _copy_to_dir_impl(ctx): + out = ctx.actions.declare_output("action_output", dir = True) + ctx.actions.run( + cmd_args([ + "sh", + "-c", + 'mkdir "$1" && cp "$2" "$1"/"$2"', + "--", + out.as_output(), + ctx.attrs.src, + ]), + category = "cp_to_dir", + ) + return [DefaultInfo(default_output = out)] + +copy_to_dir = rule( + impl = _copy_to_dir_impl, + attrs = { + "src": attrs.source(), + }, +) + +def _download(ctx: AnalysisContext): + url = "https://interncache-all.fbcdn.net/manifold/buck_build_test/tree/buck2_test/http_archive/test.tgz" + sha1 = "1a45666759704bf08fc670aa96118a0415c470fc" + download = ctx.actions.download_file("download", url, sha1 = sha1, is_deferrable = ctx.attrs.deferrable) + return [ + DefaultInfo(default_output = download), + ] + +download = rule( + impl = _download, + attrs = { + "deferrable": attrs.bool(), + }, +) + +def _cas_artifact_impl(ctx: AnalysisContext): + out = ctx.actions.cas_artifact( + ctx.label.name, + ctx.attrs.digest, + ctx.attrs.use_case, + expires_after_timestamp = ctx.attrs.expires_after_timestamp, + is_tree = ctx.attrs.is_tree, + is_directory = ctx.attrs.is_directory, + ) + return [DefaultInfo(default_output = out)] + +cas_artifact = rule(impl = _cas_artifact_impl, attrs = { + "digest": attrs.string(), + "expires_after_timestamp": attrs.int(default = 0), + "is_directory": attrs.bool(default = False), + "is_tree": attrs.bool(default = False), + "use_case": attrs.string(default = "buck2-testing"), +}) + +def symlink_files_impl(ctx): + srcs = { + src.short_path: src + for src in ctx.attrs.srcs + } + srcs.update({ + "subdir/{}.suffix".format(src.short_path): src + for src in ctx.attrs.srcs + }) + out = ctx.actions.symlinked_dir("out", srcs) + return [DefaultInfo(default_output = out)] + +symlink_files = rule( + impl = symlink_files_impl, + attrs = { + "srcs": attrs.list(attrs.source()), + }, +) + +def _write_json_impl(ctx: AnalysisContext) -> list[Provider]: + out = ctx.actions.write_json("out.json", ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +write_json = rule( + impl = _write_json_impl, + attrs = { + "content": attrs.string(default = "text"), + }, +) diff --git a/tests/core/materializer/test_clean_stale_data/dir1/dir1_1/file1.txt b/tests/core/materializer/test_clean_stale_data/dir1/dir1_1/file1.txt new file mode 100644 index 0000000000000..816dba3ddde47 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/dir1/dir1_1/file1.txt @@ -0,0 +1 @@ +dir1_1 out contents diff --git a/tests/core/materializer/test_clean_stale_data/src b/tests/core/materializer/test_clean_stale_data/src new file mode 100644 index 0000000000000..5303a60cb37f8 --- /dev/null +++ b/tests/core/materializer/test_clean_stale_data/src @@ -0,0 +1 @@ +SRC diff --git a/tests/core/materializer/test_materializer.py b/tests/core/materializer/test_materializer.py new file mode 100644 index 0000000000000..b5c74a0318ced --- /dev/null +++ b/tests/core/materializer/test_materializer.py @@ -0,0 +1,259 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import sys +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +def watchman_dependency_linux_only() -> bool: + return sys.platform == "linux" + + +def replace_in_file(old: str, new: str, file: Path, encoding: str = "utf-8") -> None: + with open(file, encoding=encoding) as f: + file_content = f.read() + file_content = file_content.replace(old, new) + with open(file, "w", encoding=encoding) as f: + f.write(file_content) + + +@buck_test(data_dir="modify_deferred_materialization") +async def test_modify_input_source(buck: Buck) -> None: + await buck.build("//:urandom_dep") + + targets_file = buck.cwd / "TARGETS.fixture" + + # Change the label in Targets. + replace_in_file("__NOT_A_REAL_LABEL__", "buck2_test_local_exec", file=targets_file) + + await buck.build("//:urandom_dep") + + +@buck_test( + data_dir="modify_deferred_materialization_deps", + skip_for_os=["windows"], # TODO(marwhal): Fix and enable on Windows +) +async def test_modify_dep_materialization(buck: Buck) -> None: + target = "//:check" + + # Build, expect the symlink to work. We'll materialize the first time. + + result = await buck.build(target) + with open(result.get_build_report().output_for_target(target)) as f: + assert f.read().strip() == "TEXT" + + # Build again, expect the symlink to work. We'll materialize just deps. + + with open(buck.cwd / "text", "w", encoding="utf-8") as f: + f.write("TEXT2") + + result = await buck.build(target) + with open(result.get_build_report().output_for_target(target)) as f: + assert f.read().strip() == "TEXT2" + + # Build again, expect the symlink to work. We'll materialize just deps + # again. However this time our state is a little different since the + # previous future was a check-deps only future. + + with open(buck.cwd / "text", "w", encoding="utf-8") as f: + f.write("TEXT3") + + result = await buck.build(target) + with open(result.get_build_report().output_for_target(target)) as f: + assert f.read().strip() == "TEXT3" + + +@buck_test( + data_dir="deferred_materializer_matching_artifact_optimization", +) +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_matching_artifact_optimization(buck: Buck) -> None: + target = "root//:copy" + result = await buck.build(target) + # Check output is correctly materialized + assert result.get_build_report().output_for_target(target).exists() + + # In this case, modifying `hidden` does not change the output, so the output should not + # need to be rematerialized + with open(buck.cwd / "hidden", "w", encoding="utf-8") as f: + f.write("HIDDEN2") + + result = await buck.build(target) + # Check output still exists + assert result.get_build_report().output_for_target(target).exists() + # Check that materializer did not report any rematerialization + assert "already materialized, updating deps only" in result.stderr + assert "materialize artifact" not in result.stderr + + # In this case, modifying `src` changes the output, so the output should be rematerialized + with open(buck.cwd / "src", "w", encoding="utf-8") as f: + f.write("SRC2") + + result = await buck.build(target) + # Check output still exists + output = result.get_build_report().output_for_target(target) + assert output.exists() + with open(output) as f: + assert f.read().strip() == "SRC2" + + +@buck_test( + data_dir="deferred_materializer_matching_artifact_optimization", +) +async def test_cache_directory_cleanup(buck: Buck) -> None: + # sqlite materializer state is already enabled + cache_dir = Path(buck.cwd, "buck-out", "v2", "cache") + materializer_state_dir = cache_dir / "materializer_state" + command_hashes_dir = cache_dir / "command_hashes" + materializer_state_dir.mkdir(parents=True) + command_hashes_dir.mkdir(parents=True) + + # Need to run a command to start the daemon. + await buck.audit_config() + + cache_dir_listing = list(cache_dir.iterdir()) + assert cache_dir_listing == [materializer_state_dir] + + await buck.kill() + disable_sqlite_materializer_state(buck) + await buck.audit_config() + + cache_dir_listing = list(cache_dir.iterdir()) + assert cache_dir_listing == [] + + +@buck_test( + data_dir="deferred_materializer_matching_artifact_optimization", +) +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_sqlite_materializer_state_matching_artifact_optimization( + buck: Buck, +) -> None: + # sqlite materializer state is already enabled + target = "root//:copy" + res = await buck.build(target) + # Check output is correctly materialized + assert res.get_build_report().output_for_target(target).exists() + + await buck.kill() + + res = await buck.build(target) + # Check that materializer did not report any rematerialization + assert "already materialized, updating deps only" in res.stderr, res.stderr + assert "materialize artifact" not in res.stderr + + await buck.kill() + + # In this case, modifying `src` changes the output, so the output should be rematerialized + with open(buck.cwd / "src", "w", encoding="utf-8") as f: + f.write("SRC2") + + res = await buck.build(target) + # Check output still exists + output = res.get_build_report().output_for_target(target) + assert output.exists() + with open(output) as f: + assert f.read().strip() == "SRC2" + + +@buck_test( + data_dir="deferred_materializer_matching_artifact_optimization", +) +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_download_file_sqlite_matching_artifact_optimization( + buck: Buck, +) -> None: + # sqlite materializer state is already enabled + target = "root//:download" + res = await buck.build(target) + # Check output is correctly materialized + assert res.get_build_report().output_for_target(target).exists() + + await buck.kill() + + res = await buck.build(target) + # Check that materializer did not report any rematerialization + assert "already materialized, updating deps only" in res.stderr, res.stderr + assert "materialize artifact" not in res.stderr + + +@buck_test( + data_dir="deferred_materializer_matching_artifact_optimization", +) +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_sqlite_materializer_state_disabled( + buck: Buck, +) -> None: + disable_sqlite_materializer_state(buck) + + target = "root//:copy" + result = await buck.build(target) + # Check output is correctly materialized + assert result.get_build_report().output_for_target(target).exists() + + await buck.kill() + + result = await buck.build(target) + # Check that materializer did have to rematerialize the same artifact + assert "already materialized, updating deps only" not in result.stderr + assert "materialize artifact" in result.stderr + + +@buck_test( + data_dir="deferred_materializer_matching_artifact_optimization", +) +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_sqlite_materializer_state_buckconfig_version_change( + buck: Buck, +) -> None: + # sqlite materializer state is already enabled + target = "root//:copy" + result = await buck.build(target) + # Check output is correctly materialized + assert result.get_build_report().output_for_target(target).exists() + + await buck.kill() + + # Bump the buckconfig version of sqlite materializer state to invalidate the existing sqlite db + replace_in_file( + "sqlite_materializer_state_version = 0", + "sqlite_materializer_state_version = 1", + buck.cwd / ".buckconfig", + ) + + # just starting the buck2 daemon should delete the sqlite materializer state + await buck.audit_config() + + +def disable_sqlite_materializer_state(buck: Buck) -> None: + config_file = buck.cwd / ".buckconfig" + replace_in_file( + "sqlite_materializer_state = true", + "sqlite_materializer_state = false", + file=config_file, + ) + + +@buck_test( + data_dir="modify_deferred_materialization_deps", + skip_for_os=["windows"], # TODO(marwhal): Fix and enable on Windows +) +async def test_debug_materialize(buck: Buck) -> None: + result = await buck.build("//:remote_text", "--materializations=None") + out = result.get_build_report().output_for_target( + "root//:remote_text", rel_path=True + ) + assert not Path(buck.cwd, out).exists() + + await buck.debug("materialize", str(out)) + assert Path(buck.cwd, out).exists() diff --git a/tests/core/materializer/test_materializer_data/.buckroot b/tests/core/materializer/test_materializer_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/.buckconfig b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/.buckconfig new file mode 100644 index 0000000000000..5e46c9eb703f4 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/.buckconfig @@ -0,0 +1,11 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture + +[buck2] + materializations = deferred + enable_local_caching_of_re_artifacts = true + sqlite_materializer_state = true + sqlite_materializer_state_version = 0 diff --git a/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/TARGETS.fixture b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/TARGETS.fixture new file mode 100644 index 0000000000000..431c195e41dc4 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "copy", "download") + +copy(name = "copy", src = "src", hidden = "hidden") + +download(name = "download") diff --git a/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/defs.bzl b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/defs.bzl new file mode 100644 index 0000000000000..20e1f2c6e71a6 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/defs.bzl @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _copy_impl(ctx): + out = ctx.actions.declare_output("action_output") + ctx.actions.run( + cmd_args( + ["cp", ctx.attrs.src, out.as_output()], + hidden = ctx.attrs.hidden, + ), + category = "cp", + ) + + return [DefaultInfo(default_output = out)] + +copy = rule( + impl = _copy_impl, + attrs = { + "hidden": attrs.source(), + "src": attrs.source(), + }, +) + +def _download(ctx: AnalysisContext): + url = "https://interncache-all.fbcdn.net/manifold/buck_build_test/tree/buck2_test/http_archive/test.tgz" + sha1 = "1a45666759704bf08fc670aa96118a0415c470fc" + + download = ctx.actions.declare_output("download") + ctx.actions.download_file(download, url, sha1 = sha1, is_deferrable = True) + + return [ + DefaultInfo(default_output = download), + ] + +download = rule( + impl = _download, + attrs = { + }, +) diff --git a/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/hidden b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/hidden new file mode 100644 index 0000000000000..834d4bf14527d --- /dev/null +++ b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/hidden @@ -0,0 +1 @@ +HIDDEN diff --git a/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/src b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/src new file mode 100644 index 0000000000000..5303a60cb37f8 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/deferred_materializer_matching_artifact_optimization/src @@ -0,0 +1 @@ +SRC diff --git a/tests/core/materializer/test_materializer_data/modify_deferred_materialization/.buckconfig b/tests/core/materializer/test_materializer_data/modify_deferred_materialization/.buckconfig new file mode 100644 index 0000000000000..b16f1dfe52e41 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/modify_deferred_materialization/.buckconfig @@ -0,0 +1,8 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture + +[buck2] + materializations = deferred diff --git a/tests/core/materializer/test_materializer_data/modify_deferred_materialization/TARGETS.fixture b/tests/core/materializer/test_materializer_data/modify_deferred_materialization/TARGETS.fixture new file mode 100644 index 0000000000000..dc3325ef4549d --- /dev/null +++ b/tests/core/materializer/test_materializer_data/modify_deferred_materialization/TARGETS.fixture @@ -0,0 +1,14 @@ +load(":rules.bzl", "proto_genrule") + +proto_genrule( + name = "urandom", + out = "output.txt", + python = "import os; out = open(os.getenv('OUT'), 'wb'); out.write(os.urandom(50))", + labels = ["__NOT_A_REAL_LABEL__"], +) + +proto_genrule( + name = "urandom_dep", + out = "output.txt", + python = "import os; inp = open(r'$(location :urandom)', 'rb'); out = open(os.getenv('OUT'), 'w'); print(len(inp.read()), file=out)", +) diff --git a/tests/core/materializer/test_materializer_data/modify_deferred_materialization/rules.bzl b/tests/core/materializer/test_materializer_data/modify_deferred_materialization/rules.bzl new file mode 100644 index 0000000000000..3e85698588c63 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/modify_deferred_materialization/rules.bzl @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _requires_local(ctx): + for label in ctx.attrs.labels: + if label == "buck2_test_local_exec": + return True + return False + +def _proto_genrule_impl(ctx): + out_artifact = ctx.actions.declare_output(ctx.attrs.out) + env_vars = { + "OUT": cmd_args(out_artifact.as_output()), + } + ctx.actions.run( + cmd_args(["python3", "-c", ctx.attrs.python]), + env = env_vars, + prefer_local = _requires_local(ctx), + category = "genrule", + ) + return [DefaultInfo(default_output = out_artifact)] + +proto_genrule = rule( + impl = _proto_genrule_impl, + attrs = { + "labels": attrs.list(attrs.string(), default = []), + "out": attrs.string(), + "python": attrs.option(attrs.arg(), default = None), + }, +) diff --git a/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/.buckconfig b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/.buckconfig new file mode 100644 index 0000000000000..b16f1dfe52e41 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/.buckconfig @@ -0,0 +1,8 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture + +[buck2] + materializations = deferred diff --git a/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/TARGETS.fixture b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/TARGETS.fixture new file mode 100644 index 0000000000000..4348f00cfd786 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/TARGETS.fixture @@ -0,0 +1,7 @@ +load(":defs.bzl", "check", "remote_text", "symlink_dir") + +remote_text(name = "remote_text", text = "text") + +symlink_dir(name = "symlink_dir", remote_text = ":remote_text") + +check(name = "check", symlink_dir = ":symlink_dir", text = "text") diff --git a/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/defs.bzl b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/defs.bzl new file mode 100644 index 0000000000000..ea84f21bb6b94 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/defs.bzl @@ -0,0 +1,65 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _remote_text_impl(ctx): + text = ctx.attrs.text + + out = ctx.actions.declare_output("action_output") + ctx.actions.run( + cmd_args(["cp", text, out.as_output()]), + category = "touch", + ) + + return [DefaultInfo(default_output = out)] + +remote_text = rule( + impl = _remote_text_impl, + attrs = { + "text": attrs.source(), + }, +) + +def _symlink_dir_impl(ctx): + remote_text = ctx.attrs.remote_text[DefaultInfo].default_outputs[0] + link = ctx.actions.symlinked_dir(ctx.label.name, {"link": remote_text}) + return [DefaultInfo(default_output = link)] + +symlink_dir = rule( + impl = _symlink_dir_impl, + attrs = { + "remote_text": attrs.dep(), + }, +) + +def _check_impl(ctx): + text = ctx.attrs.text + symlink_dir = ctx.attrs.symlink_dir[DefaultInfo].default_outputs[0] + + out = ctx.actions.declare_output("out") + + ctx.actions.run( + cmd_args( + [ + "cp", + cmd_args(symlink_dir, format = "{}/link"), + out.as_output(), + ], + hidden = text, # Invalidate this action when `text` changes. + ), + category = "test", + local_only = True, + ) + + return [DefaultInfo(default_output = out)] + +check = rule( + impl = _check_impl, + attrs = { + "symlink_dir": attrs.dep(), + "text": attrs.source(), + }, +) diff --git a/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/text b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/text new file mode 100644 index 0000000000000..e8b5482c2bf83 --- /dev/null +++ b/tests/core/materializer/test_materializer_data/modify_deferred_materialization_deps/text @@ -0,0 +1 @@ +TEXT diff --git a/tests/core/materializer/test_symlink_local_remote_bug.py b/tests/core/materializer/test_symlink_local_remote_bug.py new file mode 100644 index 0000000000000..d43a114bd4a0f --- /dev/null +++ b/tests/core/materializer/test_symlink_local_remote_bug.py @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +# TODO(nga): Local and remote execution of `//:dog_and_bone` must produce identical output. +# This is a known limitation of at least our RE implementation. It reads through symlinks. + + +@buck_test(skip_for_os=["windows"]) +async def test_symlink_preserves_empty_directory_local(buck: Buck) -> None: + result = await buck.build("//:dog_and_bone", "--prefer-local", "--no-remote-cache") + out = result.get_build_report().output_for_target("//:dog_and_bone") + assert os.path.islink(out) + assert os.path.isfile(out) + + +@buck_test(skip_for_os=["windows"]) +async def test_symlink_preserves_empty_directory_remote(buck: Buck) -> None: + result = await buck.build("//:dog_and_bone", "--prefer-remote") + out = result.get_build_report().output_for_target("//:dog_and_bone") + # This is incorrect, should be a symlink. + assert not os.path.islink(out) + assert os.path.isfile(out) + + +@buck_test() +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/core/materializer/test_symlink_local_remote_bug_data/.buckconfig b/tests/core/materializer/test_symlink_local_remote_bug_data/.buckconfig new file mode 100644 index 0000000000000..34f01b339e098 --- /dev/null +++ b/tests/core/materializer/test_symlink_local_remote_bug_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . + +[buck2] +materializations = deferred diff --git a/tests/core/materializer/test_symlink_local_remote_bug_data/.buckroot b/tests/core/materializer/test_symlink_local_remote_bug_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/materializer/test_symlink_local_remote_bug_data/TARGETS.fixture b/tests/core/materializer/test_symlink_local_remote_bug_data/TARGETS.fixture new file mode 100644 index 0000000000000..d5637428e359f --- /dev/null +++ b/tests/core/materializer/test_symlink_local_remote_bug_data/TARGETS.fixture @@ -0,0 +1 @@ +dog_and_bone(name = "dog_and_bone") diff --git a/tests/core/materializer/test_symlink_local_remote_bug_data/prelude.bzl b/tests/core/materializer/test_symlink_local_remote_bug_data/prelude.bzl new file mode 100644 index 0000000000000..56f4062c84161 --- /dev/null +++ b/tests/core/materializer/test_symlink_local_remote_bug_data/prelude.bzl @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _dog_and_bone(ctx): + dir = ctx.actions.declare_output("dog") + ctx.actions.run(["touch", dir.as_output()], category = "mkdir") + + out = ctx.actions.declare_output("bone") + ctx.actions.run( + ["ln", "-s", cmd_args(dir, relative_to = (out, 1)), out.as_output()], + category = "symlink", + ) + + return [DefaultInfo(default_output = out)] + +dog_and_bone = rule( + impl = _dog_and_bone, + attrs = {}, +) diff --git a/tests/core/materializer/test_symlink_to_parent_bug.py b/tests/core/materializer/test_symlink_to_parent_bug.py new file mode 100644 index 0000000000000..345e4243a9c2f --- /dev/null +++ b/tests/core/materializer/test_symlink_to_parent_bug.py @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(skip_for_os=["windows"]) +async def test_symlink_to_parent_bug(buck: Buck) -> None: + result = await buck.build("//:whistle", "--prefer-local", "--no-remote-cache") + out = result.get_build_report().output_for_target("//:whistle") + assert str(out).endswith("/whistle") + # Check the link was actually materialized. + assert os.path.islink(out) + + +@buck_test(skip_for_os=["windows"]) +async def test_symlink_to_self(buck: Buck) -> None: + result = await buck.build("//:flute", "--prefer-local", "--no-remote-cache") + out = result.get_build_report().output_for_target("//:flute") + assert str(out).endswith("/flute") + # Check the link was actually materialized. + assert os.path.islink(out) + + +@buck_test() +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/core/materializer/test_symlink_to_parent_bug_data/.buckconfig b/tests/core/materializer/test_symlink_to_parent_bug_data/.buckconfig new file mode 100644 index 0000000000000..19057aa6009ab --- /dev/null +++ b/tests/core/materializer/test_symlink_to_parent_bug_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude + +[buck2] +materializations = deferred diff --git a/tests/core/materializer/test_symlink_to_parent_bug_data/.buckroot b/tests/core/materializer/test_symlink_to_parent_bug_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/materializer/test_symlink_to_parent_bug_data/TARGETS.fixture b/tests/core/materializer/test_symlink_to_parent_bug_data/TARGETS.fixture new file mode 100644 index 0000000000000..61660dde451e3 --- /dev/null +++ b/tests/core/materializer/test_symlink_to_parent_bug_data/TARGETS.fixture @@ -0,0 +1,7 @@ +whistle( + name = "whistle", +) + +flute( + name = "flute", +) diff --git a/tests/core/materializer/test_symlink_to_parent_bug_data/prelude/prelude.bzl b/tests/core/materializer/test_symlink_to_parent_bug_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..dad665ce22f9b --- /dev/null +++ b/tests/core/materializer/test_symlink_to_parent_bug_data/prelude/prelude.bzl @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _whistle(ctx): + whistle = ctx.actions.declare_output("whistle") + ctx.actions.run([ + "ln", + "-s", + ".", + whistle.as_output(), + ], category = "test") + return [DefaultInfo(default_output = whistle)] + +whistle = rule(impl = _whistle, attrs = {}) + +def _flute(ctx): + flute = ctx.actions.declare_output("flute") + ctx.actions.run([ + "ln", + "-s", + "flute", + flute.as_output(), + ], category = "test") + return [DefaultInfo(default_output = flute)] + +flute = rule(impl = _flute, attrs = {}) diff --git a/tests/core/profile/BUCK b/tests/core/profile/BUCK new file mode 100644 index 0000000000000..42c092691196b --- /dev/null +++ b/tests/core/profile/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_profile", + srcs = ["test_profile.py"], + data_dir = "test_profile_data", +) diff --git a/tests/core/profile/test_profile.py b/tests/core/profile/test_profile.py new file mode 100644 index 0000000000000..c4b8d3060dd0d --- /dev/null +++ b/tests/core/profile/test_profile.py @@ -0,0 +1,354 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import os +from pathlib import Path + +import pytest +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException, BuckResult +from buck2.tests.e2e_util.api.process import Process +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +PROFILERS = [ + "heap-flame-allocated", + "heap-flame-retained", + "heap-summary-allocated", + "heap-summary-retained", + "time-flame", + "statement", + "bytecode", + "bytecode-pairs", + "typecheck", + "coverage", +] + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_analysis_for_self_transition( + buck: Buck, tmp_path: Path, profiler: str +) -> None: + file_path = tmp_path / "profile" + + await buck.profile( + "analysis", + "--target-platforms=//self_transition:p", + "--mode", + profiler, + "//self_transition:zzz", + "--output", + str(file_path), + ) + + assert os.path.exists(file_path) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_analysis_last(buck: Buck, tmp_path: Path, profiler: str) -> None: + file_path = tmp_path / "profile" + + await buck.profile( + "analysis", + "--mode", + profiler, + "//simple:test", + "--output", + str(file_path), + ) + + assert os.path.exists(file_path) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_analysis_recursive( + buck: Buck, tmp_path: Path, profiler: str +) -> None: + file_path = tmp_path / "profile" + + command = buck.profile( + "analysis", + "--mode", + profiler, + "//simple:test", + "--output", + str(file_path), + "--recursive", + ) + await assert_flame_outputs(command, file_path, profiler) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_analysis_recursive_transition( + buck: Buck, tmp_path: Path, profiler: str +) -> None: + file_path = tmp_path / "profile" + + command = buck.profile( + "analysis", + "--mode", + profiler, + "//recursive_transition:ccc", + "--output", + str(file_path), + "--recursive", + ) + + await assert_flame_outputs(command, file_path, profiler) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_loading_last(buck: Buck, tmp_path: Path, profiler: str) -> None: + file_path = tmp_path / "profile" + + command = buck.profile( + "loading", + "--mode", + profiler, + "//simple:", + "--output", + str(file_path), + ) + + await _assertions_for_profile_without_frozen_module(command, file_path, profiler) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_query_profile(buck: Buck, tmp_path: Path, profiler: str) -> None: + file_path = tmp_path / "profile" + + command = buck.cquery( + "--profile-mode", + profiler, + "deps(//query/a:a)", + "--profile-output", + str(file_path), + ) + + await _assertions_for_profile_without_frozen_module(command, file_path, profiler) + + if not profiler.endswith("-retained"): + with open(buck.cwd / file_path / "targets.txt", "r") as f: + lines = [x.rstrip() for x in sorted(f.readlines())] + assert [ + "loading:root//query/a", + "loading:root//query/b", + ] == lines + else: + assert not os.path.exists(buck.cwd / file_path) + + +@buck_test() +async def test_profile_loading_last_single_target(buck: Buck, tmp_path: Path) -> None: + file_path = tmp_path / "profile" + + profiler = "statement" + + command = buck.profile( + "loading", + "--mode", + profiler, + "//simple:a", + "--output", + str(file_path), + ) + + await _assertions_for_profile_without_frozen_module(command, file_path, profiler) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +@pytest.mark.parametrize( + "recursive", + [True, False], +) +async def test_profile_analysis_pattern( + buck: Buck, tmp_path: Path, profiler: str, recursive: bool +) -> None: + file_path = tmp_path / "profile" + + command = buck.profile( + "analysis", + "--mode", + profiler, + "//simple/...", # We test this. + "--output", + str(file_path), + *(["--recursive"] if recursive else []), + ) + + await assert_flame_outputs(command, file_path, profiler) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_loading_recursive( + buck: Buck, tmp_path: Path, profiler: str +) -> None: + file_path = tmp_path / "profile" + + command = buck.profile( + "loading", + "--mode", + profiler, + "//simple:", + "--output", + str(file_path), + "--recursive", + ) + + await expect_failure( + command, + stderr_regex="Recursive profiling is not supported for loading profiling", + ) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_bxl_with_actions( + buck: Buck, tmp_path: Path, profiler: str +) -> None: + file_path = tmp_path / "profile" + + command = buck.profile( + "bxl", + "--mode", + profiler, + "//bxl/profile.bxl:profile_with_actions", + "--output", + str(file_path), + ) + + await assert_flame_outputs(command, file_path, profiler) + + +@buck_test() +@pytest.mark.parametrize( + "profiler", + PROFILERS, +) +async def test_profile_bxl_without_actions( + buck: Buck, tmp_path: Path, profiler: str +) -> None: + file_path = tmp_path / "profile" + + command = buck.profile( + "bxl", + "--mode", + profiler, + "//bxl/profile.bxl:profile_without_actions", + "--output", + str(file_path), + ) + + await assert_flame_outputs(command, file_path, profiler) + + +@buck_test(setup_eden=True) +async def test_profile_no_buckd( + buck: Buck, + tmp_path: Path, +) -> None: + file_path = tmp_path / "profile" + + command = await buck.profile( + "loading", + "--mode", + "statement", + "//simple:", + "--output", + str(file_path), + "--no-buckd", + ) + + assert "Total retained bytes:" in command.stdout + assert os.path.exists(file_path) + + +@buck_test() +async def test_profile_loading_recursive_target_pattern( + buck: Buck, tmp_path: Path +) -> None: + file_path = tmp_path / "profile" + profiler = "time-flame" + + command = buck.profile( + "loading", + "--mode", + profiler, + "//simple/...", + "--output", + str(file_path), + ) + + await _assertions_for_profile_without_frozen_module(command, file_path, profiler) + + +async def _assertions_for_profile_without_frozen_module( + command: Process[BuckResult, BuckException], + file_path: Path, + profiler: str, +) -> None: + if profiler.endswith("-retained"): + await expect_failure( + command, + stderr_regex="Retained memory profiling is available only for analysis profile", + ) + else: + await assert_flame_outputs(command, file_path, profiler) + + +async def assert_flame_outputs( + command: Process[BuckResult, BuckException], + file_path: Path, + profiler: str, +) -> None: + await command + + assert os.path.exists(file_path) + if "flame" in profiler: + assert os.path.exists(file_path / "flame.src") + assert os.path.exists(file_path / "flame.svg") + else: + assert os.path.exists(file_path / "profile.txt") + + assert os.path.exists(file_path / "targets.txt") diff --git a/tests/core/profile/test_profile_data/.buckconfig b/tests/core/profile/test_profile_data/.buckconfig new file mode 100644 index 0000000000000..86f95442eda6a --- /dev/null +++ b/tests/core/profile/test_profile_data/.buckconfig @@ -0,0 +1,15 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture + +[build] +execution_platforms=root//:execution_platforms diff --git a/tests/core/profile/test_profile_data/.buckroot b/tests/core/profile/test_profile_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/profile/test_profile_data/TARGETS.fixture b/tests/core/profile/test_profile_data/TARGETS.fixture new file mode 100644 index 0000000000000..f472b7ff0ed74 --- /dev/null +++ b/tests/core/profile/test_profile_data/TARGETS.fixture @@ -0,0 +1 @@ +execution_platforms(name = "execution_platforms") diff --git a/tests/core/profile/test_profile_data/bxl/TARGETS.fixture b/tests/core/profile/test_profile_data/bxl/TARGETS.fixture new file mode 100644 index 0000000000000..7eecd15e80662 --- /dev/null +++ b/tests/core/profile/test_profile_data/bxl/TARGETS.fixture @@ -0,0 +1 @@ +trivial_build(name = "test") diff --git a/tests/core/profile/test_profile_data/bxl/profile.bxl b/tests/core/profile/test_profile_data/bxl/profile.bxl new file mode 100644 index 0000000000000..ca7c8ff240892 --- /dev/null +++ b/tests/core/profile/test_profile_data/bxl/profile.bxl @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _profile_without_actions(ctx): + ctx.build("//bxl:test") + +profile_without_actions = bxl_main( + impl = _profile_without_actions, + cli_args = {}, +) + +def _profile_with_actions(ctx): + actions = ctx.bxl_actions().actions + + output = actions.write("my_output", "out") + + ensured = ctx.output.ensure(output) + ctx.output.print(ensured) + +profile_with_actions = bxl_main( + impl = _profile_with_actions, + cli_args = {}, +) diff --git a/tests/core/profile/test_profile_data/query/a/TARGETS.fixture b/tests/core/profile/test_profile_data/query/a/TARGETS.fixture new file mode 100644 index 0000000000000..d6798300c3cc1 --- /dev/null +++ b/tests/core/profile/test_profile_data/query/a/TARGETS.fixture @@ -0,0 +1,7 @@ +stub( + name = "a", + deps = [ + # Using a target in another package, to check then both targets appear in profile output. + "//query/b:b", + ], +) diff --git a/tests/core/profile/test_profile_data/query/b/TARGETS.fixture b/tests/core/profile/test_profile_data/query/b/TARGETS.fixture new file mode 100644 index 0000000000000..a960b5498d3cd --- /dev/null +++ b/tests/core/profile/test_profile_data/query/b/TARGETS.fixture @@ -0,0 +1,4 @@ +stub( + name = "b", + visibility = ["PUBLIC"], +) diff --git a/tests/core/profile/test_profile_data/recursive_transition/TARGETS.fixture b/tests/core/profile/test_profile_data/recursive_transition/TARGETS.fixture new file mode 100644 index 0000000000000..5a1ae2e88fb18 --- /dev/null +++ b/tests/core/profile/test_profile_data/recursive_transition/TARGETS.fixture @@ -0,0 +1,34 @@ +load(":defs.bzl", "ooo") + +constraint_setting( + name = "constr", +) + +constraint_value(name = "a", constraint_setting = ":constr") +constraint_value(name = "b", constraint_setting = ":constr") +constraint_value(name = "c", constraint_setting = ":constr") + +platform(name = "p") + +ooo( + name = "aaa", + deps = [], + use_constraint = "a", +) + +ooo( + name = "bbb", + deps = [ + ":aaa", + ], + use_constraint = "b", +) + +ooo( + name = "ccc", + deps = [ + ":bbb", + ], + use_constraint = "c", + default_target_platform = ":p", +) diff --git a/tests/core/profile/test_profile_data/recursive_transition/defs.bzl b/tests/core/profile/test_profile_data/recursive_transition/defs.bzl new file mode 100644 index 0000000000000..d5b60f3d98268 --- /dev/null +++ b/tests/core/profile/test_profile_data/recursive_transition/defs.bzl @@ -0,0 +1,38 @@ +# @nolint + +def _tr(platform, refs, attrs): + _ignore = platform + constraint_value = getattr(refs, attrs.use_constraint)[ConstraintValueInfo] + return PlatformInfo( + label = "pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp", + configuration = ConfigurationInfo( + constraints = { + constraint_value.setting.label: constraint_value, + }, + values = {}, + ), + ) + +tr = transition( + impl = _tr, + refs = { + "a": "//recursive_transition:a", + "b": "//recursive_transition:b", + "c": "//recursive_transition:c", + }, + attrs = [ + "use_constraint", + ], +) + +def _ooo(_ctx): + return [DefaultInfo()] + +ooo = rule( + impl = _ooo, + attrs = { + "use_constraint": attrs.string(), + "deps": attrs.list(attrs.dep()), + }, + cfg = tr, +) diff --git a/tests/core/profile/test_profile_data/self_transition/TARGETS.fixture b/tests/core/profile/test_profile_data/self_transition/TARGETS.fixture new file mode 100644 index 0000000000000..e609a04cd785c --- /dev/null +++ b/tests/core/profile/test_profile_data/self_transition/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":rules.bzl", "zzz") + +zzz(name = "zzz") + +platform(name = "p") diff --git a/tests/core/profile/test_profile_data/self_transition/rules.bzl b/tests/core/profile/test_profile_data/self_transition/rules.bzl new file mode 100644 index 0000000000000..2d3d60ee0fc2c --- /dev/null +++ b/tests/core/profile/test_profile_data/self_transition/rules.bzl @@ -0,0 +1,26 @@ +# @nolint + +def _zzz(ctx): + _ignore = ctx + return [DefaultInfo()] + +def _transition_impl(platform, refs): + _ignore = (platform, refs) + return PlatformInfo( + label = "", + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ) + +_tr = transition( + impl = _transition_impl, + refs = {}, +) + +zzz = rule( + impl = _zzz, + attrs = {}, + cfg = _tr, +) diff --git a/tests/core/profile/test_profile_data/simple/TARGETS.fixture b/tests/core/profile/test_profile_data/simple/TARGETS.fixture new file mode 100644 index 0000000000000..546ad1f78e42c --- /dev/null +++ b/tests/core/profile/test_profile_data/simple/TARGETS.fixture @@ -0,0 +1,7 @@ +stub(name = "ddd") + +stub(name = "test", deps = [":ddd"]) + +stub(name = "xxx") +stub(name = "yyy") +stub(name = "zzz") diff --git a/tests/core/query/BUCK b/tests/core/query/BUCK new file mode 100644 index 0000000000000..2405179fdb80d --- /dev/null +++ b/tests/core/query/BUCK @@ -0,0 +1,28 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_buildfiles", + srcs = ["test_buildfiles.py"], + data_dir = "test_buildfiles_data", + deps = ["//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_target_call_stacks", + srcs = ["test_target_call_stacks.py"], + data_dir = "test_target_call_stacks_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_target_configuration_toolchain_deps_traversal", + srcs = ["test_target_configuration_toolchain_deps_traversal.py"], + data_dir = "test_target_configuration_toolchain_deps_traversal_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) diff --git a/tests/core/query/aquery/BUCK b/tests/core/query/aquery/BUCK new file mode 100644 index 0000000000000..585fd38c86241 --- /dev/null +++ b/tests/core/query/aquery/BUCK @@ -0,0 +1,13 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_aquery", + srcs = ["test_aquery.py"], + data_dir = "test_aquery_data", + serialize_test_cases = False, + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) diff --git a/tests/core/query/aquery/test_aquery.py b/tests/core/query/aquery/test_aquery.py new file mode 100644 index 0000000000000..4b5985ec08d6a --- /dev/null +++ b/tests/core/query/aquery/test_aquery.py @@ -0,0 +1,155 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_target(buck: Buck) -> None: + stdout = (await buck.aquery("//:test", "-a", "identifier")).stdout + + golden( + output=stdout, + rel_path="target.golden.json", + ) + + +@buck_test() +async def test_all_outputs(buck: Buck) -> None: + stdout = (await buck.aquery("all_outputs(//:test)", "-a", "identifier")).stdout + + golden( + output=stdout, + rel_path="all_outputs.golden.json", + ) + + +@buck_test() +async def test_all_actions(buck: Buck) -> None: + stdout = (await buck.aquery("all_actions(//:test)", "-a", "identifier")).stdout + + golden( + output=stdout, + rel_path="all_actions.golden.json", + ) + + +@buck_test() +async def test_all_outputs_subtarget(buck: Buck) -> None: + stdout = ( + await buck.aquery("all_outputs('//:test[sub]')", "-a", "identifier") + ).stdout + + golden( + output=stdout, + rel_path="all_outputs_subtarget.golden.json", + ) + + +@buck_test() +async def test_filter(buck: Buck) -> None: + stdout = ( + await buck.aquery( + "attrfilter('identifier', 'other', all_actions('//:test[sub]'))", + "-a", + "identifier", + ) + ).stdout + + golden( + output=stdout, + rel_path="filter.golden.json", + ) + + +@buck_test() +async def test_deps(buck: Buck) -> None: + stdout = (await buck.aquery("deps(//:test)", "-a", "identifier")).stdout + + golden( + output=stdout, + rel_path="deps.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_target(buck: Buck) -> None: + stdout = (await buck.bxl("//:aquery.bxl:target")).stdout + golden( + output=stdout, + rel_path="bxl_target.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_all_outputs(buck: Buck) -> None: + stdout = (await buck.bxl("//:aquery.bxl:all_outputs")).stdout + + golden( + output=stdout, + rel_path="bxl_all_outputs.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_all_actions(buck: Buck) -> None: + stdout = (await buck.bxl("//:aquery.bxl:all_actions")).stdout + + golden( + output=stdout, + rel_path="bxl_all_actions.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_all_outputs_subtarget(buck: Buck) -> None: + stdout = (await buck.bxl("//:aquery.bxl:all_outputs_subtarget")).stdout + + golden( + output=stdout, + rel_path="bxl_all_outputs_subtarget.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_attrfilter(buck: Buck) -> None: + stdout = (await buck.bxl("//:aquery.bxl:attrfilter")).stdout + + golden( + output=stdout, + rel_path="bxl_filter.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_deps(buck: Buck) -> None: + stdout = (await buck.bxl("//:aquery.bxl:deps")).stdout + + golden( + output=stdout, + rel_path="bxl_deps.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_eval(buck: Buck) -> None: + stdout = (await buck.bxl("//:aquery.bxl:eval")).stdout + + golden( + output=stdout, + rel_path="bxl_eval.golden.json", + ) + + +@buck_test() +async def test_bxl_aquery_action_query_node(buck: Buck) -> None: + await buck.bxl("//:aquery.bxl:action_query_node") diff --git a/tests/core/query/aquery/test_aquery_data/.buckconfig b/tests/core/query/aquery/test_aquery_data/.buckconfig new file mode 100644 index 0000000000000..425a56f43b9c4 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/.buckconfig @@ -0,0 +1,6 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/query/aquery/test_aquery_data/.buckroot b/tests/core/query/aquery/test_aquery_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/aquery/test_aquery_data/TARGETS.fixture b/tests/core/query/aquery/test_aquery_data/TARGETS.fixture new file mode 100644 index 0000000000000..2671e2ab41071 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/TARGETS.fixture @@ -0,0 +1 @@ +test(name = "test") diff --git a/tests/core/query/aquery/test_aquery_data/all_actions.golden.json b/tests/core/query/aquery/test_aquery_data/all_actions.golden.json new file mode 100644 index 0000000000000..b984e28b667fb --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/all_actions.golden.json @@ -0,0 +1,22 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `1`)": { + "identifier": "default" + }, + "(target: `root//:test ()`, id: `0`)": { + "identifier": "dep" + }, + "(target: `root//:test ()`, id: `2`)": { + "identifier": "other" + }, + "(target: `root//:test ()`, id: `3`)": { + "identifier": "sub_default" + }, + "(target: `root//:test ()`, id: `4`)": { + "identifier": "sub_other" + }, + "(target: `root//:test ()`, id: `5`)": { + "identifier": "unused" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/all_outputs.golden.json b/tests/core/query/aquery/test_aquery_data/all_outputs.golden.json new file mode 100644 index 0000000000000..fc9559bbe8793 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/all_outputs.golden.json @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `1`)": { + "identifier": "default" + }, + "(target: `root//:test ()`, id: `2`)": { + "identifier": "other" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/all_outputs_subtarget.golden.json b/tests/core/query/aquery/test_aquery_data/all_outputs_subtarget.golden.json new file mode 100644 index 0000000000000..21c3ae22c278f --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/all_outputs_subtarget.golden.json @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `3`)": { + "identifier": "sub_default" + }, + "(target: `root//:test ()`, id: `4`)": { + "identifier": "sub_other" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/aquery.bxl b/tests/core/query/aquery/test_aquery_data/aquery.bxl new file mode 100644 index 0000000000000..3ed9f4429c497 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/aquery.bxl @@ -0,0 +1,142 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _impl_target(ctx): + result = ctx.aquery().eval("//:test") + + output = {} + for node in result: + data = {} + if hasattr(node.attrs, "identifier"): + data.update({"identifier": node.attrs.identifier}) + output.update({str(node): data}) + ctx.output.print_json(output) + +target = bxl_main( + impl = _impl_target, + cli_args = {}, +) + +def _impl_all_outputs(ctx): + target_literal = "//:test" + target_set = ctx.unconfigured_targets("//:test") + + # assert that target literal and target set can both be passed into aquery + target_literal_result = ctx.aquery().all_outputs(target_literal) + target_set_result = ctx.aquery().all_outputs(target_set) + + _assert_eq(target_set_result, target_literal_result) + + output = {} + for node in target_set_result: + attr = {"identifier": node.attrs.identifier} + output.update({str(node): attr}) + ctx.output.print_json(output) + +all_outputs = bxl_main( + impl = _impl_all_outputs, + cli_args = {}, +) + +def _impl_all_actions(ctx): + result = ctx.aquery().all_actions("//:test") + + output = {} + for node in result: + attr = {"identifier": node.attrs.identifier} + output.update({str(node): attr}) + ctx.output.print_json(output) + +all_actions = bxl_main( + impl = _impl_all_actions, + cli_args = {}, +) + +def _impl_all_outputs_subtarget(ctx): + providers_set = ctx.unconfigured_sub_targets("//:test[sub]") + providers_literal = "//:test[sub]" + + # assert that target literal and target set can both be passed into aquery + providers_set_result = ctx.aquery().all_outputs(providers_set) + providers_literal_result = ctx.aquery().all_outputs(providers_literal) + + _assert_eq(providers_set_result, providers_literal_result) + + output = {} + for node in providers_set_result: + attr = {"identifier": node.attrs.identifier} + output.update({str(node): attr}) + ctx.output.print_json(output) + +all_outputs_subtarget = bxl_main( + impl = _impl_all_outputs_subtarget, + cli_args = {}, +) + +def _impl_attrfilter(ctx): + all_actions = ctx.aquery().all_actions("//:test[sub]") + result = ctx.aquery().attrfilter("identifier", "other", all_actions) + + output = {} + for node in result: + attr = {"identifier": node.attrs.identifier} + output.update({str(node): attr}) + ctx.output.print_json(output) + +attrfilter = bxl_main( + impl = _impl_attrfilter, + cli_args = {}, +) + +def _impl_deps(ctx): + result = ctx.aquery().deps("//:test") + + output = {} + for node in result: + output.update({str(node): node.attrs}) + ctx.output.print_json(output) + +deps = bxl_main( + impl = _impl_deps, + cli_args = {}, +) + +def _impl_eval(ctx): + result = ctx.aquery().eval("attrfilter('identifier', 'other', all_actions('//:test[sub]'))") + + output = {} + for node in result: + attr = {"identifier": node.attrs.identifier} + output.update({str(node): attr}) + ctx.output.print_json(output) + +eval = bxl_main( + impl = _impl_eval, + cli_args = {}, +) + +def _impl_action_query_node(ctx): + result = ctx.aquery().eval("//:test") + + action = result[0] + analysis = result[1] + + _assert_eq(type(action.action()), "action") + _assert_eq(action.rule_type, "copy") + _assert_eq(str(action.action().owner().raw_target()), "root//:test") + + _assert_eq(type(analysis.analysis()), "bxl.AnalysisResult") + _assert_eq(analysis.rule_type, "analysis") + +action_query_node = bxl_main( + impl = _impl_action_query_node, + cli_args = {}, +) diff --git a/tests/core/query/aquery/test_aquery_data/bxl_all_actions.golden.json b/tests/core/query/aquery/test_aquery_data/bxl_all_actions.golden.json new file mode 100644 index 0000000000000..b984e28b667fb --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/bxl_all_actions.golden.json @@ -0,0 +1,22 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `1`)": { + "identifier": "default" + }, + "(target: `root//:test ()`, id: `0`)": { + "identifier": "dep" + }, + "(target: `root//:test ()`, id: `2`)": { + "identifier": "other" + }, + "(target: `root//:test ()`, id: `3`)": { + "identifier": "sub_default" + }, + "(target: `root//:test ()`, id: `4`)": { + "identifier": "sub_other" + }, + "(target: `root//:test ()`, id: `5`)": { + "identifier": "unused" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/bxl_all_outputs.golden.json b/tests/core/query/aquery/test_aquery_data/bxl_all_outputs.golden.json new file mode 100644 index 0000000000000..fc9559bbe8793 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/bxl_all_outputs.golden.json @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `1`)": { + "identifier": "default" + }, + "(target: `root//:test ()`, id: `2`)": { + "identifier": "other" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/bxl_all_outputs_subtarget.golden.json b/tests/core/query/aquery/test_aquery_data/bxl_all_outputs_subtarget.golden.json new file mode 100644 index 0000000000000..21c3ae22c278f --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/bxl_all_outputs_subtarget.golden.json @@ -0,0 +1,10 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `3`)": { + "identifier": "sub_default" + }, + "(target: `root//:test ()`, id: `4`)": { + "identifier": "sub_other" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/bxl_deps.golden.json b/tests/core/query/aquery/test_aquery_data/bxl_deps.golden.json new file mode 100644 index 0000000000000..ea64cdb3283a1 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/bxl_deps.golden.json @@ -0,0 +1,21 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:test ()": { + "kind": "analysis" + }, + "(target: `root//:test ()`, id: `0`)": { + "kind": "write", + "category": "write", + "identifier": "dep", + "contents": "", + "absolute": "false", + "executor_configuration": "RemoteEnabled + executor hybrid + remote cache enabled + cache upload disabled + remote dep file cache disabled" + }, + "(target: `root//:test ()`, id: `1`)": { + "kind": "copy", + "category": "copy", + "identifier": "default", + "executor_configuration": "RemoteEnabled + executor hybrid + remote cache enabled + cache upload disabled + remote dep file cache disabled" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/bxl_eval.golden.json b/tests/core/query/aquery/test_aquery_data/bxl_eval.golden.json new file mode 100644 index 0000000000000..52bf9d6dbe585 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/bxl_eval.golden.json @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `2`)": { + "identifier": "other" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/bxl_filter.golden.json b/tests/core/query/aquery/test_aquery_data/bxl_filter.golden.json new file mode 100644 index 0000000000000..52bf9d6dbe585 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/bxl_filter.golden.json @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `2`)": { + "identifier": "other" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/bxl_target.golden.json b/tests/core/query/aquery/test_aquery_data/bxl_target.golden.json new file mode 100644 index 0000000000000..f4fd01e21ec5a --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/bxl_target.golden.json @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `1`)": { + "identifier": "default" + }, + "root//:test ()": {} +} diff --git a/tests/core/query/aquery/test_aquery_data/deps.golden.json b/tests/core/query/aquery/test_aquery_data/deps.golden.json new file mode 100644 index 0000000000000..dad7929afd43e --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/deps.golden.json @@ -0,0 +1,11 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:test ()": {}, + "(target: `root//:test ()`, id: `0`)": { + "identifier": "dep" + }, + "(target: `root//:test ()`, id: `1`)": { + "identifier": "default" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/filter.golden.json b/tests/core/query/aquery/test_aquery_data/filter.golden.json new file mode 100644 index 0000000000000..52bf9d6dbe585 --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/filter.golden.json @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `2`)": { + "identifier": "other" + } +} diff --git a/tests/core/query/aquery/test_aquery_data/prelude.bzl b/tests/core/query/aquery/test_aquery_data/prelude.bzl new file mode 100644 index 0000000000000..a2de5a4f4c4ec --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/prelude.bzl @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test(ctx: AnalysisContext): + dep = ctx.actions.write("dep", "") + default = ctx.actions.copy_file("default", dep) + other = ctx.actions.write("other", "") + + sub_default = ctx.actions.write("sub_default", "") + sub_other = ctx.actions.write("sub_other", "") + + # Unused + ctx.actions.write("unused", "") + + return [DefaultInfo( + default_outputs = [default], + other_outputs = [other], + sub_targets = { + "sub": [ + DefaultInfo( + default_outputs = [sub_default], + other_outputs = [sub_other], + ), + ], + }, + )] + +test = rule(impl = _test, attrs = {}) diff --git a/tests/core/query/aquery/test_aquery_data/target.golden.json b/tests/core/query/aquery/test_aquery_data/target.golden.json new file mode 100644 index 0000000000000..f4fd01e21ec5a --- /dev/null +++ b/tests/core/query/aquery/test_aquery_data/target.golden.json @@ -0,0 +1,8 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "(target: `root//:test ()`, id: `1`)": { + "identifier": "default" + }, + "root//:test ()": {} +} diff --git a/tests/core/query/cquery/BUCK b/tests/core/query/cquery/BUCK new file mode 100644 index 0000000000000..ffec201765350 --- /dev/null +++ b/tests/core/query/cquery/BUCK @@ -0,0 +1,43 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_compatible_with", + srcs = ["test_compatible_with.py"], + data_dir = "test_compatible_with_data", + deps = ["//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_cquery", + srcs = ["test_cquery.py"], + data_dir = "test_cquery_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_filter", + srcs = ["test_filter.py"], + data_dir = "test_filter_data", +) + +buck2_e2e_test( + name = "test_owner", + srcs = ["test_owner.py"], + data_dir = "test_owner_data", +) + +buck2_e2e_test( + name = "test_owner_isolated", + srcs = ["test_owner_isolated.py"], + data_dir = "test_owner_isolated_data", +) + +buck2_e2e_test( + name = "test_cquery_with_transition", + srcs = ["test_cquery_with_transition.py"], + data_dir = "test_cquery_with_transition_data", +) diff --git a/tests/core/query/cquery/test_compatible_with.py b/tests/core/query/cquery/test_compatible_with.py new file mode 100644 index 0000000000000..bd35bf8cc6dbb --- /dev/null +++ b/tests/core/query/cquery/test_compatible_with.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_compatible_with(buck: Buck) -> None: + for good in ["root//:pass", "root//:pass2"]: + out = await buck.cquery(good) + assert re.match( + "{} \\(.*\\)\n".format(good), + out.stdout, + ) + + for bad in ["root//:fail", "root//:fail2"]: + out = await buck.cquery(bad) + assert out.stdout == "" diff --git a/tests/core/query/cquery/test_compatible_with_data/.buckconfig b/tests/core/query/cquery/test_compatible_with_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/query/cquery/test_compatible_with_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/query/cquery/test_compatible_with_data/.buckroot b/tests/core/query/cquery/test_compatible_with_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_compatible_with_data/TARGETS.fixture b/tests/core/query/cquery/test_compatible_with_data/TARGETS.fixture new file mode 100644 index 0000000000000..299e99869367a --- /dev/null +++ b/tests/core/query/cquery/test_compatible_with_data/TARGETS.fixture @@ -0,0 +1,75 @@ +constraint_setting( + name = "constraint", +) + +constraint_value( + name = "value1", + constraint_setting = ":constraint", +) + +constraint_value( + name = "value2", + constraint_setting = ":constraint", +) + +constraint_setting( + name = "other_constraint", +) + +constraint_value( + name = "other_value1", + constraint_setting = ":other_constraint", +) + +constraint_value( + name = "other_value2", + constraint_setting = ":other_constraint", +) + +platform( + name = "platform1", + constraint_values = [ + ":other_value1", + ":value1", + ], +) + +platform( + name = "platform2", + constraint_values = [ + ":other_value2", + ":value2", + ], +) + +stub( + name = "pass", + default_target_platform = ":platform1", + target_compatible_with = [":value1"], +) + +stub( + name = "fail", + default_target_platform = ":platform1", + target_compatible_with = [":value2"], +) + +stub( + name = "fail2", + default_target_platform = ":platform1", + # value1 should match, but not other_value2. target_compatible_with requires that all match + target_compatible_with = [ + ":other_value2", + ":value1", + ], +) + +stub( + name = "pass2", + # value1 should match, and compatible_with (unlike target_compatible_with) only requires one match + compatible_with = [ + ":other_value2", + ":value1", + ], + default_target_platform = ":platform1", +) diff --git a/tests/core/query/cquery/test_cquery.py b/tests/core/query/cquery/test_cquery.py new file mode 100644 index 0000000000000..01c7dd73db033 --- /dev/null +++ b/tests/core/query/cquery/test_cquery.py @@ -0,0 +1,259 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden_replace_cfg_hash + +""" +Generally we test for basic functionality of things working here and do +more extensive testing in the uquery tests. +""" + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(data_dir="unsorted") +async def test_query_inputs(buck: Buck) -> None: + result = await buck.cquery("""inputs(set(root//bin:the_binary //lib:file1))""") + assert result.stdout == "bin/TARGETS.fixture\n" + + +@buck_test(data_dir="unsorted") +async def test_query_cell(buck: Buck) -> None: + result = await buck.cquery("""//stuff:magic""", rel_cwd=Path("special")) + assert ( + _replace_hash(result.stdout) + == "special//stuff:magic (root//platforms:platform1#)\n" + ) + + +@buck_test(data_dir="unsorted") +async def test_query_relative(buck: Buck) -> None: + result = await buck.cquery("""...""", rel_cwd=Path("special")) + assert ( + _replace_hash(result.stdout) + == "special//stuff:magic (root//platforms:platform1#)\n" + ) + + +@buck_test(data_dir="unsorted") +async def test_query_provider_names(buck: Buck) -> None: + await expect_failure( + buck.cquery("'root//bin:the_binary[provider_name]'"), + stderr_regex="Expected a target pattern without providers", + ) + + await expect_failure( + buck.cquery("'root//bin:the_binary#some_flavor'"), + stderr_regex="Expected a target pattern without providers", + ) + + +@buck_test(data_dir="unsorted") +async def test_query_print_provider_text(buck: Buck) -> None: + out = await buck.cquery("%s", "root//bin:the_binary", "--show-providers") + golden_replace_cfg_hash( + output=_replace_hash(out.stdout), + rel_path="unsorted/query_print_provider_text.golden.txt", + ) + + +@buck_test(data_dir="unsorted") +async def test_query_print_provider_json(buck: Buck) -> None: + out = await buck.cquery("%s", "root//bin:the_binary", "--show-providers", "--json") + golden_replace_cfg_hash( + output=_replace_hash(out.stdout), + rel_path="unsorted/query_print_provider_json.golden.json", + ) + + +@buck_test(data_dir="unsorted") +async def test_query_chunked_stream(buck: Buck) -> None: + q = "deps(root//bin:the_binary)" + result1 = await buck.cquery(q) + await buck.kill() + result2 = await buck.cquery(q, env={"BUCK2_DEBUG_RAWOUTPUT_CHUNK_SIZE": "5"}) + assert result1.stdout == result2.stdout + + +@buck_test(data_dir="unsorted") +async def test_attributes(buck: Buck) -> None: + attrs_out = await buck.cquery( + "--output-attribute", + "buck\\..*", + "--output-attribute", + "srcs", + "set(root//bin:the_binary //lib:file1)", + ) + attrs_json_out = await buck.cquery( + "--output-attribute", + "buck\\..*", + "--output-attribute", + "srcs", + "--json", + "set(root//bin:the_binary //lib:file1)", + ) + # specifying any attrs enables json output + assert attrs_json_out.stdout == attrs_out.stdout + attrs_json_out = json.loads(_replace_hash(attrs_json_out.stdout)) + assert { + "root//bin:the_binary (root//platforms:platform1#)": { + "buck.deps": [ + "root//:data (root//platforms:platform1#)", + "root//lib:lib1 (root//platforms:platform1#)", + "root//lib:lib2 (root//platforms:platform1#)", + "root//lib:lib3 (root//platforms:platform1#)", + "root//:foo_toolchain (root//platforms:platform1#)", + "root//:bin (root//platforms:platform1#)", + ], + "buck.execution_platform": "", + "buck.package": "root//bin:TARGETS.fixture", + "buck.plugins": {}, + "buck.target_configuration": "root//platforms:platform1#", + "buck.type": "_foo_binary", + "buck.oncall": None, + "srcs": ["root//bin/TARGETS.fixture"], + }, + "root//lib:file1 (root//platforms:platform1#)": { + "buck.deps": [], + "buck.execution_platform": "", + "buck.package": "root//lib:TARGETS.fixture", + "buck.plugins": {}, + "buck.target_configuration": "root//platforms:platform1#", + "buck.type": "_foo_genrule", + "buck.oncall": None, + }, + } == attrs_json_out + + +# Tests for "%Ss" uses +@buck_test(data_dir="unsorted") +async def test_args_as_set(buck: Buck) -> None: + out = await buck.cquery("%Ss", "root//bin:the_binary", "//lib:file1") + assert ( + _replace_hash(out.stdout) + == "root//bin:the_binary (root//platforms:platform1#)\nroot//lib:file1 (root//platforms:platform1#)\n" + ) + + +@buck_test(data_dir="unsorted") +async def test_multi_query(buck: Buck) -> None: + out = await buck.cquery("%s", "root//bin:the_binary", "//lib:file1") + assert ( + _replace_hash(out.stdout) + == "root//bin:the_binary (root//platforms:platform1#)\nroot//lib:file1 (root//platforms:platform1#)\n" + ) + + +@buck_test(data_dir="multi_query_universe") +async def test_multi_query_universe(buck: Buck) -> None: + out = await buck.cquery( + "deps(%s)", "root//:macos-bin", "//:common-dep", "--output-format=json" + ) + # `common-dep` is configured for linux, so it must not include `only-on-macos` target. + # Which would be the case if we constructed universe from all the queries together + # instead of separate universes for each query. + golden_replace_cfg_hash( + output=_replace_hash(out.stdout), + rel_path="multi_query_universe/multi_query_universe.golden.json", + ) + + +@buck_test(data_dir="unsorted") +async def test_multi_query_print_provider_text(buck: Buck) -> None: + out = await buck.cquery( + "%s", "root//bin:the_binary", "//lib:lib1", "--show-providers" + ) + golden_replace_cfg_hash( + output=_replace_hash(out.stdout), + rel_path="unsorted/multi_query_print_provider_text.golden.txt", + ) + + +@buck_test(data_dir="unsorted") +async def test_multi_query_print_provider_json(buck: Buck) -> None: + out = await buck.cquery( + "%s", "root//bin:the_binary", "//lib:lib1", "--show-providers", "--json" + ) + + golden_replace_cfg_hash( + output=_replace_hash(out.stdout), + rel_path="unsorted/multi_query_print_provider_json.golden.json", + ) + + +@buck_test(data_dir="visibility") +async def test_visibility(buck: Buck) -> None: + for good in [ + "self//:pass1", + "self//:pass2", + "self//:pass3", + "self//:pass4", + ]: + out = await buck.cquery(good) + assert good in out.stdout + + for bad in [ + "self//:fail1", + "self//:fail2", + "self//:fail3", + "self//:fail4", + ]: + print(bad) + failure = await expect_failure(buck.cquery(bad)) + assert "not visible to `%s`" % bad in failure.stderr + + +@buck_test(data_dir="testsof") +async def test_testsof(buck: Buck) -> None: + out = await buck.cquery( + "testsof(//:foo_lib)", + "--target-platforms", + "//:platform_default_tests", + ) + + assert "root//:foo_test" in out.stdout + assert "root//:foo_extra_test" not in out.stdout + assert "root//:foo_lib" not in out.stdout + + out = await buck.cquery( + "testsof(//:foo_lib)", + "--target-platforms", + "//:platform_more_tests", + ) + + assert "root//:foo_test" in out.stdout + assert "root//:foo_extra_test" in out.stdout + assert "root//:foo_lib" not in out.stdout + + +# DICE currently may re-evaluate dead nodes ignoring errors, but it cannot ignore panics. +# The disabling of execution platforms through a buckconfig ended up causing a panic +# that was the root cause of non-deterministic buck2 failures on 10% of fbcode TD in S303188. +# +# TODO(scottcao): Disabling execution platforms is a hack that we need to get rid of +# because it's not how buck2 should be used. Get rid of this test case once fbcode TD +# stops disabling execution platforms +@buck_test(data_dir="toolchain_deps") +async def test_disabling_of_execution_platforms(buck: Buck) -> None: + # Run these commands 10x such that a stress run of 10 on continuous CI would run these commands 100x. + # If there is a regression then the stress run would for sure detect it. + for _ in range(10): + query = "deps(set(tests/...))" + await buck.cquery(query) + await buck.cquery(query, "-c", "build.execution_platforms=") diff --git a/tests/core/query/cquery/test_cquery_data/.buckroot b/tests/core/query/cquery/test_cquery_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_cquery_data/multi_query_universe/.buckconfig b/tests/core/query/cquery/test_cquery_data/multi_query_universe/.buckconfig new file mode 100644 index 0000000000000..ecd6ab068670a --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/multi_query_universe/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[buck2] +file_watcher = fs_hash_crawler diff --git a/tests/core/query/cquery/test_cquery_data/multi_query_universe/.buckroot b/tests/core/query/cquery/test_cquery_data/multi_query_universe/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_cquery_data/multi_query_universe/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/multi_query_universe/TARGETS.fixture new file mode 100644 index 0000000000000..d777a2613f442 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/multi_query_universe/TARGETS.fixture @@ -0,0 +1,29 @@ +constraint_setting(name = "os") +constraint_value(name = "linux", constraint_setting = ":os") +constraint_value(name = "macos", constraint_setting = ":os") + +platform(name = "p-linux", constraint_values = [":linux"]) +platform(name = "p-macos", constraint_values = [":macos"]) + +stub( + name = "only-on-linux", +) + +stub( + name = "only-on-macos", +) + +stub( + name = "common-dep", + deps = select({ + ":linux": [":only-on-linux"], + ":macos": [":only-on-macos"], + }), + default_target_platform = ":p-linux", +) + +stub( + name = "macos-bin", + deps = [":common-dep"], + default_target_platform = ":p-macos", +) diff --git a/tests/core/query/cquery/test_cquery_data/multi_query_universe/multi_query_universe.golden.json b/tests/core/query/cquery/test_cquery_data/multi_query_universe/multi_query_universe.golden.json new file mode 100644 index 0000000000000..6eb72a246f4b5 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/multi_query_universe/multi_query_universe.golden.json @@ -0,0 +1,13 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//:macos-bin": [ + "root//:only-on-macos (root//:p-macos#)", + "root//:common-dep (root//:p-macos#)", + "root//:macos-bin (root//:p-macos#)" + ], + "//:common-dep": [ + "root//:only-on-linux (root//:p-linux#)", + "root//:common-dep (root//:p-linux#)" + ] +} diff --git a/tests/core/query/cquery/test_cquery_data/testsof/.buckconfig b/tests/core/query/cquery/test_cquery_data/testsof/.buckconfig new file mode 100644 index 0000000000000..567334f8836e5 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/testsof/.buckconfig @@ -0,0 +1,18 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[buck2] +file_watcher = fs_hash_crawler diff --git a/tests/core/query/cquery/test_cquery_data/testsof/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/testsof/TARGETS.fixture new file mode 100644 index 0000000000000..1f6d93bdbae6e --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/testsof/TARGETS.fixture @@ -0,0 +1,53 @@ +constraint_setting( + name = "testfullness", +) + +constraint_value( + name = "regular_tests", + constraint_setting = ":testfullness", +) + +constraint_value( + name = "more_tests", + constraint_setting = ":testfullness", +) + +platform( + name = "platform_default_tests", + constraint_values = [":regular_tests"], +) + +platform( + name = "platform_more_tests", + constraint_values = [":more_tests"], +) + +stub( + name = "foo_lib", + tests = [":foo_test"] + select({":more_tests": [":foo_extra_test"], "DEFAULT": []}), +) + +stub( + name = "foo_lib_with_test_with_default_platform", + tests = [":foo_test_with_default_platform"], +) + +stub(name = "foo_test") + +stub(name = "foo_test_with_default_platform", default_target_platform = ":foo_test_default_platform") + +stub(name = "foo_extra_test", compatible_with = [":more_tests"]) + +constraint_setting( + name = "foo_test_constraint_setting", +) + +constraint_value( + name = "foo_test_constraint_value", + constraint_setting = ":foo_test_constraint_setting", +) + +platform( + name = "foo_test_default_platform", + constraint_values = [":foo_test_constraint_value"], +) diff --git a/tests/core/query/cquery/test_cquery_data/toolchain_deps/.buckconfig b/tests/core/query/cquery/test_cquery_data/toolchain_deps/.buckconfig new file mode 100644 index 0000000000000..8154738e7f33b --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/toolchain_deps/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name = TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[build] +execution_platforms = root//config:platforms diff --git a/tests/core/query/cquery/test_cquery_data/toolchain_deps/config/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/toolchain_deps/config/TARGETS.fixture new file mode 100644 index 0000000000000..a731da09619af --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/toolchain_deps/config/TARGETS.fixture @@ -0,0 +1,34 @@ +load(":defs.bzl", "execution_platform", "execution_platforms") + +constraint_setting(name = "os") +constraint_value(name = "windows", constraint_setting = ":os") +constraint_value(name = "linux", constraint_setting = ":os") + +constraint_setting(name = "mode") +constraint_value(name = "release", constraint_setting = ":mode") +constraint_value(name = "debug", constraint_setting = ":mode") + +execution_platform( + name = "platform_windows_exec", + configuration = [":windows", ":release"], +) + +platform( + name = "platform_windows", + constraint_values = [":windows", ":release"], +) + +execution_platform( + name = "platform_linux_exec", + configuration = [":linux", ":release"], +) + +platform( + name = "platform_linux", + constraint_values = [":linux", ":release"], +) + +execution_platforms( + name = "platforms", + platforms = [":platform_linux_exec", ":platform_windows_exec"], +) diff --git a/tests/core/query/cquery/test_cquery_data/toolchain_deps/config/defs.bzl b/tests/core/query/cquery/test_cquery_data/toolchain_deps/config/defs.bzl new file mode 100644 index 0000000000000..23283220e6051 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/toolchain_deps/config/defs.bzl @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + constraints = {} + values = {} + for x in ctx.attrs.configuration: + constraints |= x[ConfigurationInfo].constraints + values |= x[ConfigurationInfo].values + cfg = ConfigurationInfo(constraints = constraints, values = values) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = cfg, + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = False, + ), + ) + + return [DefaultInfo(), platform] + +execution_platform = rule( + impl = _execution_platform, + is_configuration_rule = True, + attrs = {"configuration": attrs.list(attrs.dep(providers = [ConfigurationInfo]))}, +) + +def _execution_platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [x[ExecutionPlatformInfo] for x in ctx.attrs.platforms], + ), + ] + +execution_platforms = rule( + impl = _execution_platforms, + is_configuration_rule = True, + attrs = {"platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo]))}, +) diff --git a/tests/core/query/cquery/test_cquery_data/toolchain_deps/tests/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/toolchain_deps/tests/TARGETS.fixture new file mode 100644 index 0000000000000..1db4b117aded4 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/toolchain_deps/tests/TARGETS.fixture @@ -0,0 +1,16 @@ +stub( + name = "python_and_asic", + default_target_platform = "//config:platform_windows", + toolchain_deps = [ + "//toolchains:asic", + "//toolchains:python", + ], +) + +stub( + name = "python_only", + default_target_platform = "//config:platform_windows", + toolchain_deps = [ + "//toolchains:python", + ], +) diff --git a/tests/core/query/cquery/test_cquery_data/toolchain_deps/toolchains/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/toolchain_deps/toolchains/TARGETS.fixture new file mode 100644 index 0000000000000..d082972def00f --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/toolchain_deps/toolchains/TARGETS.fixture @@ -0,0 +1,51 @@ +load(":defs.bzl", "toolchain") + +stub( + name = "compile_python_release_windows", +) + +stub( + name = "compile_python_debug_windows", +) + +stub( + name = "compile_python_release_linux", +) + +stub( + name = "compile_python_debug_linux", +) + +stub( + name = "compile_asic", + compatible_with = ["//config:windows"], +) + +stub( + name = "python_release", + deps = [select({ + "//config:linux": ":compile_python_release_linux", + "//config:windows": ":compile_python_release_windows", + })], +) + +stub( + name = "python_debug", + deps = [select({ + "//config:linux": ":compile_python_debug_linux", + "//config:windows": ":compile_python_debug_windows", + })], +) + +toolchain( + name = "python", + dep = select({ + "//config:debug": ":python_debug", + "//config:release": ":python_release", + }), +) + +toolchain( + name = "asic", + dep = ":compile_asic", +) diff --git a/tests/core/query/cquery/test_cquery_data/toolchain_deps/toolchains/defs.bzl b/tests/core/query/cquery/test_cquery_data/toolchain_deps/toolchains/defs.bzl new file mode 100644 index 0000000000000..856167ee2e938 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/toolchain_deps/toolchains/defs.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _toolchain( + # starlark-lint-disable unused-argument + ctx): # @unused + fail("we do not run analysis in query tests") + +toolchain = rule( + impl = _toolchain, + attrs = {"dep": attrs.exec_dep()}, + is_toolchain_rule = True, +) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/.buckconfig b/tests/core/query/cquery/test_cquery_data/unsorted/.buckconfig new file mode 100644 index 0000000000000..3a6cb6a3fe4fa --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/.buckconfig @@ -0,0 +1,20 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored,bin/ignored,bin/ignored.txt +package_boundary_exceptions=. + +[cells] +root = . +nano_prelude = nano_prelude +special = special + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[buck2] +file_watcher = fs_hash_crawler diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/.buckroot b/tests/core/query/cquery/test_cquery_data/unsorted/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/unsorted/TARGETS.fixture new file mode 100644 index 0000000000000..2194ea9270d4a --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/TARGETS.fixture @@ -0,0 +1,41 @@ +load("//rules:rules.bzl", "foo_basic_print", "foo_binary", "foo_buildable", "foo_genrule") + +foo_binary( + name = "bin", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "data", + cmd = "$(exe :genrule_binary)", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "foo_toolchain", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "genrule_binary", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_binary( + name = "package_boundary_violation", + srcs = ["package_boundary_violation/bin"], + visibility = ["PUBLIC"], +) + +foo_buildable( + name = "buildable", + content = "FOO", + out = "out.txt", +) + +foo_basic_print( + name = "print", + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/bin/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/unsorted/bin/TARGETS.fixture new file mode 100644 index 0000000000000..814b8eb033aa0 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/bin/TARGETS.fixture @@ -0,0 +1,18 @@ +load("//rules:rules.bzl", "foo_binary") + +foo_binary( + name = "the_binary", + deps = [ + "//lib:lib1", + "//lib:lib2", + "//lib:lib3", + ], + srcs = [ + "TARGETS.fixture", + ], + cmd = ["$(exe //:bin)", "$(location //:data)"], +) + +platform( + name = "platform", +) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/bin/ignored.txt b/tests/core/query/cquery/test_cquery_data/unsorted/bin/ignored.txt new file mode 100644 index 0000000000000..592fd2594b569 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/bin/ignored.txt @@ -0,0 +1 @@ +ignore me diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/bin/ignored/foo.txt b/tests/core/query/cquery/test_cquery_data/unsorted/bin/ignored/foo.txt new file mode 100644 index 0000000000000..257cc5642cb1a --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/bin/ignored/foo.txt @@ -0,0 +1 @@ +foo diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/bin/kind/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/unsorted/bin/kind/TARGETS.fixture new file mode 100644 index 0000000000000..5d53a05d6cfc9 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/bin/kind/TARGETS.fixture @@ -0,0 +1,24 @@ +load(":rules.bzl", "rule1", "rule2", "rule3", "rule4", "rule_tset") + +rule1(name = "foo", foo = "hello") +rule2(name = "bar", foo = "world") +rule3(name = "bzzt", foo = "henlo") +rule4(name = "target_with_outputs", foo = "blah") + +rule_tset(name = "tset1") +rule_tset(name = "tset2") +rule_tset( + name = "tset3", + deps = [ + ":tset1", + ":tset2", + ], +) + +rule_tset( + name = "target_with_tset", + deps = [ + ":tset3", + ":tset1", + ], +) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/bin/kind/rules.bzl b/tests/core/query/cquery/test_cquery_data/unsorted/bin/kind/rules.bzl new file mode 100644 index 0000000000000..ba7b4b7c932cb --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/bin/kind/rules.bzl @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _rule_impl(_ctx): + return [DefaultInfo()] + +rule1 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) +rule2 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) +rule3 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) + +def _rule_impl_with_run_info_and_default_info_outputs(ctx): + out = ctx.actions.write("default_out", "default_out") + run_info_out = ctx.actions.write("run_info_out", "run_info_out") + return [ + DefaultInfo(default_outputs = [out]), + RunInfo(args = cmd_args(run_info_out)), + ] + +rule4 = rule( + impl = _rule_impl_with_run_info_and_default_info_outputs, + attrs = {"foo": attrs.string()}, +) + +def project(f: Artifact): + return f + +NameSet = transitive_set(args_projections = { + "project": project, +}) + +NameInfo = provider(fields = ["tset"]) + +def _rule_impl_with_tset(ctx): + # Produce a file that contains our name. + out = ctx.actions.write("out.txt", str(ctx.label.name) + "\n") + + # Produce a tset that is our file concated wiht all the files emitted by + # our children. + children = [d[NameInfo].tset for d in ctx.attrs.deps] + tset = ctx.actions.tset(NameSet, value = out, children = children) + + # Concatenate all the files declared by the tset, into a single file + # (agg.txt), which we'll return as our output. + agg = ctx.actions.declare_output("tset_out") + projected = tset.project_as_args("project") + + ctx.actions.run([ + "sh", + "-c", + 'out="$1" && shift && cat "$@" > "$out"', + "--", + agg.as_output(), + projected, + ], category = "test") + + return [ + NameInfo(tset = tset), + DefaultInfo(default_output = agg), + RunInfo(args = [projected]), + ] + +rule_tset = rule( + impl = _rule_impl_with_tset, + attrs = { + "deps": attrs.list(attrs.dep(providers = [NameInfo]), default = []), + }, +) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/lib/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/unsorted/lib/TARGETS.fixture new file mode 100644 index 0000000000000..426dc47c048f9 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/lib/TARGETS.fixture @@ -0,0 +1,49 @@ +load("//rules:rules.bzl", "foo_config_setting", "foo_genrule", "foo_library") + +foo_config_setting( + name = "constraint", +) + +foo_library( + name = "lib1", + srcs = [":file1", "TARGETS.fixture"], + description = "this is lib1", + visibility = ["PUBLIC"], +) + +foo_library( + name = "lib2", + srcs = [":file2"], + cmd = ["this is lib2", "cmd", "$(location :file2)"], + description = "this is lib2", + visibility = ["PUBLIC"], +) + +foo_library( + name = "lib3", + srcs = [":file3"], + cmd = ["this is lib3"] + select({ + ":constraint": ["this is lib3 too, case 1"], + "DEFAULT": ["this is lib3 too, case 2"], + }), + description = "this is lib3", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file1", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file2", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file3", + cmd = "", + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/multi_query_print_provider_json.golden.json b/tests/core/query/cquery/test_cquery_data/unsorted/multi_query_print_provider_json.golden.json new file mode 100644 index 0000000000000..067f4da33eb4f --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/multi_query_print_provider_json.golden.json @@ -0,0 +1,39 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//bin:the_binary": { + "root//bin:the_binary (root//platforms:platform1#)": { + "buck.providers": { + "DefaultInfo": { + "sub_targets": {}, + "default_outputs": [], + "other_outputs": [] + }, + "RunInfo": { + "args": { + "items": [], + "hidden": [], + "options": null + } + }, + "FooInfo": { + "foo": "the_binary_foo" + } + } + } + }, + "//lib:lib1": { + "root//lib:lib1 (root//platforms:platform1#)": { + "buck.providers": { + "DefaultInfo": { + "sub_targets": {}, + "default_outputs": [], + "other_outputs": [] + }, + "FooInfo": { + "foo": "lib1_foo" + } + } + } + } +} diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/multi_query_print_provider_text.golden.txt b/tests/core/query/cquery/test_cquery_data/unsorted/multi_query_print_provider_text.golden.txt new file mode 100644 index 0000000000000..46decbd93d1c0 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/multi_query_print_provider_text.golden.txt @@ -0,0 +1,21 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//bin:the_binary (root//platforms:platform1#) + Providers([ + DefaultInfo( + sub_targets={}, + default_outputs=[], + other_outputs=[] + ), + RunInfo( args=cmd_args() ), + FooInfo( foo="the_binary_foo" ) + ]) +root//lib:lib1 (root//platforms:platform1#) + Providers([ + DefaultInfo( + sub_targets={}, + default_outputs=[], + other_outputs=[] + ), + FooInfo( foo="lib1_foo" ) + ]) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/platforms/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/unsorted/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..2781f9031c7ae --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/platforms/TARGETS.fixture @@ -0,0 +1 @@ +platform(name = "platform1", visibility = ["PUBLIC"]) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/query_print_provider_json.golden.json b/tests/core/query/cquery/test_cquery_data/unsorted/query_print_provider_json.golden.json new file mode 100644 index 0000000000000..fa64abb7063bf --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/query_print_provider_json.golden.json @@ -0,0 +1,25 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "root//bin:the_binary": { + "root//bin:the_binary (root//platforms:platform1#)": { + "buck.providers": { + "DefaultInfo": { + "sub_targets": {}, + "default_outputs": [], + "other_outputs": [] + }, + "RunInfo": { + "args": { + "items": [], + "hidden": [], + "options": null + } + }, + "FooInfo": { + "foo": "the_binary_foo" + } + } + } + } +} diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/query_print_provider_text.golden.txt b/tests/core/query/cquery/test_cquery_data/unsorted/query_print_provider_text.golden.txt new file mode 100644 index 0000000000000..ff2215afcc758 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/query_print_provider_text.golden.txt @@ -0,0 +1,12 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//bin:the_binary (root//platforms:platform1#) + Providers([ + DefaultInfo( + sub_targets={}, + default_outputs=[], + other_outputs=[] + ), + RunInfo( args=cmd_args() ), + FooInfo( foo="the_binary_foo" ) + ]) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/rules/rules.bzl b/tests/core/query/cquery/test_cquery_data/unsorted/rules/rules.bzl new file mode 100644 index 0000000000000..7e1771bc5e27a --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/rules/rules.bzl @@ -0,0 +1,117 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +FooInfo = provider(fields = [ + "foo", +]) + +def _platform_impl(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ), + ] + +foo_platform = rule( + impl = _platform_impl, + attrs = {}, +) + +def _config_setting_impl(_ctx): + return [DefaultInfo(), ConfigurationInfo(constraints = {}, values = {})] + +foo_config_setting = rule( + impl = _config_setting_impl, + attrs = {}, +) + +def _impl(ctx): + return [DefaultInfo(), FooInfo(foo = ctx.attrs.name + "_foo")] + +def _binary_impl(ctx): + return [DefaultInfo(), RunInfo(args = []), FooInfo(foo = ctx.attrs.name + "_foo")] + +def _buildable_impl(ctx): + out = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +_foo_library = rule( + impl = _impl, + attrs = { + "cmd": attrs.list(attrs.arg(), default = []), + "deps": attrs.list(attrs.dep(), default = []), + "description": attrs.string(default = ""), + "mapped_srcs": attrs.dict(attrs.string(), attrs.source(), default = {}), + "srcs": attrs.list(attrs.source(), default = []), + "tuple_srcs": attrs.option(attrs.tuple(attrs.source(), attrs.source(), attrs.source()), default = None), + }, +) + +_foo_binary = rule( + impl = _binary_impl, + attrs = { + "cmd": attrs.list(attrs.arg(), default = []), + "deps": attrs.list(attrs.dep(), default = []), + "description": attrs.string(default = ""), + "srcs": attrs.list(attrs.source(), default = []), + "_foo_toolchain": attrs.exec_dep(default = "root//:foo_toolchain"), + }, +) + +_foo_genrule = rule( + impl = _binary_impl, + attrs = { + "cmd": attrs.arg(), + "description": attrs.string(default = ""), + "out": attrs.string(default = ""), + }, +) + +_foo_buildable = rule( + impl = _buildable_impl, + attrs = { + "content": attrs.string(default = ""), + "out": attrs.string(), + }, +) + +_default_platform = "root//platforms:platform1" + +def _basic_print_impl(ctx): + _ignore = ctx # buildifier: disable=unused-variable + + print("print me") # buildifier: disable=print + return [DefaultInfo(), RunInfo(args = [])] + +foo_basic_print = rule( + impl = _basic_print_impl, + attrs = {}, +) + +def foo_library(**kwargs): + _foo_library(default_target_platform = _default_platform, **kwargs) + +def foo_binary(**kwargs): + _foo_binary(default_target_platform = _default_platform, **kwargs) + +def foo_genrule(**kwargs): + _foo_genrule(default_target_platform = _default_platform, **kwargs) + +def foo_buildable(**kwargs): + _foo_buildable(default_target_platform = _default_platform, **kwargs) + +def genrule_select() -> Select: + return select({ + "DEFAULT": "foo", + "ovr_config//os:macos": "bar", + "ovr_config//os:windows": "foobar", + }) diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/special/.buckconfig b/tests/core/query/cquery/test_cquery_data/unsorted/special/.buckconfig new file mode 100644 index 0000000000000..5c40fffb821ac --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/special/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] +name=TARGETS.fixture diff --git a/tests/core/query/cquery/test_cquery_data/unsorted/special/stuff/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/unsorted/special/stuff/TARGETS.fixture new file mode 100644 index 0000000000000..ce1651eae526d --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/unsorted/special/stuff/TARGETS.fixture @@ -0,0 +1,6 @@ +load("@root//rules:rules.bzl", "foo_genrule") + +foo_genrule( + name = "magic", + cmd = "", +) diff --git a/tests/core/query/cquery/test_cquery_data/visibility/.buckconfig b/tests/core/query/cquery/test_cquery_data/visibility/.buckconfig new file mode 100644 index 0000000000000..512ed4d1ae28a --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/visibility/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +self = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[buck2] +file_watcher = fs_hash_crawler diff --git a/tests/core/query/cquery/test_cquery_data/visibility/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/visibility/TARGETS.fixture new file mode 100644 index 0000000000000..bd1bb2f2aa917 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/visibility/TARGETS.fixture @@ -0,0 +1,49 @@ +stub( + name = "pass1", + deps = ["//subdir:public"], +) + +stub( + name = "pass2", + deps = ["//subdir:target"], +) + +stub( + name = "pass3", + deps = ["//subdir:package"], +) + +stub( + name = "pass4", + deps = ["//subdir:recursive"], +) + +stub( + name = "fail1", + deps = ["//subdir:badtarget"], +) + +stub( + name = "fail2", + deps = ["//subdir:badpackage"], +) + +stub( + name = "fail3", + deps = ["//subdir:badrecursive"], +) + +stub( + name = "fail4", + deps = ["//subdir:default"], +) + +stub( + name = "fail5", + deps = ["//subdir:badvisibility"], +) + +stub( + name = "fail6", + deps = ["//subdir:badtransitivevisibility"], +) diff --git a/tests/core/query/cquery/test_cquery_data/visibility/subdir/TARGETS.fixture b/tests/core/query/cquery/test_cquery_data/visibility/subdir/TARGETS.fixture new file mode 100644 index 0000000000000..6072bfb282e87 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_data/visibility/subdir/TARGETS.fixture @@ -0,0 +1,49 @@ +stub( + name = "public", + visibility = ["PUBLIC"], +) + +stub( + name = "default", +) + +stub( + name = "target", + visibility = ["//:pass2"], +) + +stub( + name = "package", + visibility = ["//:"], +) + +stub( + name = "recursive", + visibility = ["//..."], +) + +stub( + name = "badtarget", + visibility = ["//:nothing"], +) + +stub( + name = "badpackage", + # TODO(cjhopman): This should check a package that's a prefix of the dependent, but buck2 doesn't handle that correctly currently. + visibility = ["//buck2:"], +) + +stub( + name = "badrecursive", + visibility = ["//buck2/..."], +) + +stub( + name = "badvisibility", + visibility = ["//:pass2"], +) + +stub( + name = "badtransitivevisibility", + visibility = ["//subdir:badvisibility"], +) diff --git a/tests/core/query/cquery/test_cquery_with_transition.py b/tests/core/query/cquery/test_cquery_with_transition.py new file mode 100644 index 0000000000000..4b8dae010337d --- /dev/null +++ b/tests/core/query/cquery/test_cquery_with_transition.py @@ -0,0 +1,111 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_cquery_transition_without_target_universe(buck: Buck) -> None: + result = await buck.cquery( + "root//:buck", + "--target-platforms=root//:p", + ) + + # Both configurations for the target are returned: the default, and the transition + lines = result.stdout.splitlines() + assert 2 == len(lines) + assert _replace_hash(lines[0]) == "root//:buck (root//:p#)" + assert _replace_hash(lines[1]) == "root//:buck (transitioned-to-reindeer#)" + + # Test cquery with "%s". + result = await buck.cquery( + "%s", + "root//:buck", + "root//:moose", + "--target-platforms=root//:p", + ) + + lines = result.stdout.splitlines() + assert 4 == len(lines) + assert _replace_hash(lines[0]) == "root//:buck (root//:p#)" + assert _replace_hash(lines[1]) == "root//:buck (transitioned-to-reindeer#)" + assert _replace_hash(lines[2]) == "root//:moose (root//:p#)" + assert _replace_hash(lines[3]) == "root//:moose (transitioned-to-reindeer#)" + + # Test cquery with "%Ss" + result = await buck.cquery( + "%Ss", + "root//:buck", + "root//:moose", + "--target-platforms=root//:p", + ) + + lines = result.stdout.splitlines() + assert 4 == len(lines) + assert _replace_hash(lines[0]) == "root//:buck (root//:p#)" + assert _replace_hash(lines[1]) == "root//:buck (transitioned-to-reindeer#)" + assert _replace_hash(lines[2]) == "root//:moose (root//:p#)" + assert _replace_hash(lines[3]) == "root//:moose (transitioned-to-reindeer#)" + + +@buck_test() +async def test_cquery_transition_with_target_universe(buck: Buck) -> None: + result = await buck.cquery( + "root//:buck", + "--target-platforms=root//:p", + "--target-universe", + "root//:buck", + ) + + lines = result.stdout.splitlines() + assert 2 == len(lines) + assert _replace_hash(lines[0]) == "root//:buck (root//:p#)" + assert _replace_hash(lines[1]) == "root//:buck (transitioned-to-reindeer#)" + + # Test cquery with "%s". + result = await buck.cquery( + "%s", + "root//:buck", + "root//:moose", + "--target-platforms=root//:p", + "--target-universe", + "root//:buck,root//:moose", + ) + + lines = result.stdout.splitlines() + assert 4 == len(lines) + assert _replace_hash(lines[0]) == "root//:buck (root//:p#)" + assert _replace_hash(lines[1]) == "root//:buck (transitioned-to-reindeer#)" + assert _replace_hash(lines[2]) == "root//:moose (root//:p#)" + assert _replace_hash(lines[3]) == "root//:moose (transitioned-to-reindeer#)" + + # Test cquery with "%Ss". + result = await buck.cquery( + "%Ss", + "root//:buck", + "root//:moose", + "--target-platforms=root//:p", + "--target-universe", + "root//:buck,root//:moose", + ) + + lines = result.stdout.splitlines() + assert 4 == len(lines) + assert _replace_hash(lines[0]) == "root//:buck (root//:p#)" + assert _replace_hash(lines[1]) == "root//:buck (transitioned-to-reindeer#)" + assert _replace_hash(lines[2]) == "root//:moose (root//:p#)" + assert _replace_hash(lines[3]) == "root//:moose (transitioned-to-reindeer#)" diff --git a/tests/core/query/cquery/test_cquery_with_transition_data/.buckconfig b/tests/core/query/cquery/test_cquery_with_transition_data/.buckconfig new file mode 100644 index 0000000000000..ee3cf6fa96900 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_with_transition_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/query/cquery/test_cquery_with_transition_data/.buckroot b/tests/core/query/cquery/test_cquery_with_transition_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_cquery_with_transition_data/TARGETS.fixture b/tests/core/query/cquery/test_cquery_with_transition_data/TARGETS.fixture new file mode 100644 index 0000000000000..a4d18e8cdd710 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_with_transition_data/TARGETS.fixture @@ -0,0 +1,14 @@ +load(":defs.bzl", "simple") + +platform( + name = "p", + constraint_values = [], +) + +simple( + name = "buck", +) + +simple( + name = "moose", +) diff --git a/tests/core/query/cquery/test_cquery_with_transition_data/defs.bzl b/tests/core/query/cquery/test_cquery_with_transition_data/defs.bzl new file mode 100644 index 0000000000000..09b867c801c02 --- /dev/null +++ b/tests/core/query/cquery/test_cquery_with_transition_data/defs.bzl @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _transition_to_reindeer_impl(platform, refs): + _ignore = (platform, refs) # buildifier: disable=unused-variable + return PlatformInfo(label = "transitioned-to-reindeer", configuration = ConfigurationInfo(constraints = {}, values = {})) + +transition_to_reindeer = transition( + impl = _transition_to_reindeer_impl, + refs = {}, +) + +def _simple_impl(_ctx): + return [DefaultInfo()] + +simple = rule( + impl = _simple_impl, + attrs = {}, + # The configuration transition. + cfg = transition_to_reindeer, +) diff --git a/tests/core/query/cquery/test_filter.py b/tests/core/query/cquery/test_filter.py new file mode 100644 index 0000000000000..5500c0e1f34ea --- /dev/null +++ b/tests/core/query/cquery/test_filter.py @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test() +async def test_cquery_filter_should_not_include_configuration(buck: Buck) -> None: + + # First, self-check. + result = await buck.cquery("//...") + assert [ + "root//:aaaaa ()", + "root//:bbbbb (root//:aaaaa#)", + ] == _replace_hash(result.stdout).splitlines() + + # Now check the behavior of `filter()`. + # `filter()` function checks unconfigured target label, as Buck1 does. + result = await buck.cquery(r"filter('^root//:bbbbb$', //...)") + assert [ + "root//:bbbbb (root//:aaaaa#)", + ] == _replace_hash(result.stdout).splitlines() diff --git a/tests/core/query/cquery/test_filter_data/.buckconfig b/tests/core/query/cquery/test_filter_data/.buckconfig new file mode 100644 index 0000000000000..d0cca261b8bee --- /dev/null +++ b/tests/core/query/cquery/test_filter_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/query/cquery/test_filter_data/.buckroot b/tests/core/query/cquery/test_filter_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_filter_data/TARGETS.fixture b/tests/core/query/cquery/test_filter_data/TARGETS.fixture new file mode 100644 index 0000000000000..7d9262dab9b02 --- /dev/null +++ b/tests/core/query/cquery/test_filter_data/TARGETS.fixture @@ -0,0 +1,3 @@ +platform(name = "aaaaa") + +stub(name = "bbbbb", default_target_platform = ":aaaaa") diff --git a/tests/core/query/cquery/test_owner.py b/tests/core/query/cquery/test_owner.py new file mode 100644 index 0000000000000..e180de46ca0d7 --- /dev/null +++ b/tests/core/query/cquery/test_owner.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(data_dir="deprecated_correct") +async def test_owner_without_universe_correct(buck: Buck) -> None: + # TODO(nga): there should be a warning. + result = await buck.cquery( + "owner(bin.sh)", + ) + assert "" == result.stdout + assert ( + "Query has no target literals and `--target-universe` is not specified" + in result.stderr + ) + + +@buck_test(data_dir="deprecated_correct") +async def test_owner_with_auto_universe_correct(buck: Buck) -> None: + result = await buck.cquery( + "deps(//:test) intersect owner(bin.sh)", + ) + lines = result.stdout.splitlines() + # Drop configuration. + targets = [t.split()[0] for t in lines] + assert ["root//:bin"] == targets diff --git a/tests/core/query/cquery/test_owner_data/.buckroot b/tests/core/query/cquery/test_owner_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_owner_data/deprecated_correct/.buckconfig b/tests/core/query/cquery/test_owner_data/deprecated_correct/.buckconfig new file mode 100644 index 0000000000000..f582f4fd1a333 --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/deprecated_correct/.buckconfig @@ -0,0 +1,17 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +buck = buck +nano_prelude = nano_prelude +config = config + +[buck2] +file_watcher = fs_hash_crawler + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/query/cquery/test_owner_data/deprecated_correct/.buckroot b/tests/core/query/cquery/test_owner_data/deprecated_correct/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_owner_data/deprecated_correct/TARGETS.fixture b/tests/core/query/cquery/test_owner_data/deprecated_correct/TARGETS.fixture new file mode 100644 index 0000000000000..34c1bd1147c05 --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/deprecated_correct/TARGETS.fixture @@ -0,0 +1,30 @@ +load(":rules.bzl", "genrule", "sh_binary") + +sh_binary( + name = "bin", + main = "bin.sh", +) + +constraint_setting( + name = "cs", +) + +constraint_value( + name = "cv", + constraint_setting = ":cs", +) + +platform( + name = "p", + constraint_values = [ + ":cv", + ], +) + +genrule( + name = "test", + out = "test.txt", + # We use exe to switch configuration. + cmd = "echo $(exe :bin) > $OUT", + default_target_platform = ":p", +) diff --git a/tests/core/query/cquery/test_owner_data/deprecated_correct/bin.sh b/tests/core/query/cquery/test_owner_data/deprecated_correct/bin.sh new file mode 100644 index 0000000000000..4a422ca02622d --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/deprecated_correct/bin.sh @@ -0,0 +1,7 @@ +#!/bin/sh -e +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. diff --git a/tests/core/query/cquery/test_owner_data/deprecated_correct/rules.bzl b/tests/core/query/cquery/test_owner_data/deprecated_correct/rules.bzl new file mode 100644 index 0000000000000..4d1c647334204 --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/deprecated_correct/rules.bzl @@ -0,0 +1,24 @@ +# @nolint + +def _genrule(ctx): + _ignore = ctx + fail("not needed in this test") + +genrule = rule( + impl = _genrule, + attrs = { + "cmd": attrs.arg(), + "out": attrs.string(), + }, +) + +def _sh_binary(ctx): + _ignore = ctx + fail("not needed in this test") + +sh_binary = rule( + impl = _sh_binary, + attrs = { + "main": attrs.source(), + }, +) diff --git a/tests/core/query/cquery/test_owner_data/incompatible/.buckconfig b/tests/core/query/cquery/test_owner_data/incompatible/.buckconfig new file mode 100644 index 0000000000000..f582f4fd1a333 --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/incompatible/.buckconfig @@ -0,0 +1,17 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +buck = buck +nano_prelude = nano_prelude +config = config + +[buck2] +file_watcher = fs_hash_crawler + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/query/cquery/test_owner_data/incompatible/.buckroot b/tests/core/query/cquery/test_owner_data/incompatible/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_owner_data/incompatible/TARGETS.fixture b/tests/core/query/cquery/test_owner_data/incompatible/TARGETS.fixture new file mode 100644 index 0000000000000..0443569c5abd1 --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/incompatible/TARGETS.fixture @@ -0,0 +1,37 @@ +load(":rules.bzl", "genrule") + +constraint_setting( + name = "constraint", +) + +constraint_value( + name = "value1", + constraint_setting = ":constraint", +) + +constraint_value( + name = "value2", + constraint_setting = ":constraint", +) + +platform( + name = "platform1", + constraint_values = [ + ":value1", + ], +) + +platform( + name = "platform2", + constraint_values = [ + ":value2", + ], +) + +genrule( + name = "compatible_with_genrule", + srcs = ["src.txt"], + out = "out.txt", + bash = "cat $SRCDIR/src.txt > $OUT", + target_compatible_with = [":value1"], +) diff --git a/tests/core/query/cquery/test_owner_data/incompatible/rules.bzl b/tests/core/query/cquery/test_owner_data/incompatible/rules.bzl new file mode 100644 index 0000000000000..9b63e4c99c7bc --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/incompatible/rules.bzl @@ -0,0 +1,14 @@ +# @nolint + +def _genrule(ctx): + _ignore = ctx + fail("Not needed in test") + +genrule = rule( + impl = _genrule, + attrs = { + "bash": attrs.arg(), + "out": attrs.string(), + "srcs": attrs.list(attrs.source()), + }, +) diff --git a/tests/core/query/cquery/test_owner_data/incompatible/src.txt b/tests/core/query/cquery/test_owner_data/incompatible/src.txt new file mode 100644 index 0000000000000..e427984d4a2c1 --- /dev/null +++ b/tests/core/query/cquery/test_owner_data/incompatible/src.txt @@ -0,0 +1 @@ +HELLO diff --git a/tests/core/query/cquery/test_owner_isolated.py b/tests/core/query/cquery/test_owner_isolated.py new file mode 100644 index 0000000000000..cbefc0b19655f --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(data_dir="simple") +async def test_query_owner(buck: Buck) -> None: + result = await buck.cquery( + "--target-universe=root//bin:the_binary", """owner(bin/TARGETS.fixture)""" + ) + assert ( + _replace_hash(result.stdout) + == "root//bin:the_binary (root//platforms:platform1#)\n" + ) diff --git a/tests/core/query/cquery/test_owner_isolated_data/.buckroot b/tests/core/query/cquery/test_owner_isolated_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/.buckconfig b/tests/core/query/cquery/test_owner_isolated_data/simple/.buckconfig new file mode 100644 index 0000000000000..55aa79deef5df --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/.buckconfig @@ -0,0 +1,25 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored,bin/ignored,bin/ignored.txt +package_boundary_exceptions=. + +[repositories] +root = . +fbcode = fbcode +fbsource = fbsource +buck = buck +toolchains = toolchains +special = special +nano_prelude = nano_prelude +config = config + +[buck2] +file_watcher = fs_hash_crawler + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/.buckroot b/tests/core/query/cquery/test_owner_isolated_data/simple/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/TARGETS.fixture b/tests/core/query/cquery/test_owner_isolated_data/simple/TARGETS.fixture new file mode 100644 index 0000000000000..5e229dd1cfd05 --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/TARGETS.fixture @@ -0,0 +1,30 @@ +load("//rules:rules.bzl", "foo_binary", "foo_genrule") + +stub( + name = "bin", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "data", + cmd = "$(exe :genrule_binary)", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "foo_toolchain", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "genrule_binary", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_binary( + name = "package_boundary_violation", + srcs = ["package_boundary_violation/bin"], + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/bin/TARGETS.fixture b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/TARGETS.fixture new file mode 100644 index 0000000000000..79d1433f4a5c6 --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/TARGETS.fixture @@ -0,0 +1,31 @@ +load("//rules:rules.bzl", "foo_binary") + +foo_binary( + name = "the_binary", + deps = [ + "//lib:lib1", + "//lib:lib2", + "//lib:lib3", + ], + srcs = [ + "TARGETS.fixture", + ], + cmd = ["$(exe //:bin)", "$(location //:data)"], +) + +foo_binary( + name = "the_binary_with_dir_srcs", + deps = [ + "//lib:lib1", + "//lib:lib2", + "//lib:lib3", + ], + srcs = [ + "kind", + ], + cmd = ["$(exe //:bin)", "$(location //:data)"], +) + +platform( + name = "platform", +) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/bin/ignored.txt b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/ignored.txt new file mode 100644 index 0000000000000..592fd2594b569 --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/ignored.txt @@ -0,0 +1 @@ +ignore me diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/bin/ignored/foo.txt b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/ignored/foo.txt new file mode 100644 index 0000000000000..257cc5642cb1a --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/ignored/foo.txt @@ -0,0 +1 @@ +foo diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/bin/kind/TARGETS.fixture b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/kind/TARGETS.fixture new file mode 100644 index 0000000000000..5d53a05d6cfc9 --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/kind/TARGETS.fixture @@ -0,0 +1,24 @@ +load(":rules.bzl", "rule1", "rule2", "rule3", "rule4", "rule_tset") + +rule1(name = "foo", foo = "hello") +rule2(name = "bar", foo = "world") +rule3(name = "bzzt", foo = "henlo") +rule4(name = "target_with_outputs", foo = "blah") + +rule_tset(name = "tset1") +rule_tset(name = "tset2") +rule_tset( + name = "tset3", + deps = [ + ":tset1", + ":tset2", + ], +) + +rule_tset( + name = "target_with_tset", + deps = [ + ":tset3", + ":tset1", + ], +) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/bin/kind/rules.bzl b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/kind/rules.bzl new file mode 100644 index 0000000000000..ba7b4b7c932cb --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/bin/kind/rules.bzl @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _rule_impl(_ctx): + return [DefaultInfo()] + +rule1 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) +rule2 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) +rule3 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) + +def _rule_impl_with_run_info_and_default_info_outputs(ctx): + out = ctx.actions.write("default_out", "default_out") + run_info_out = ctx.actions.write("run_info_out", "run_info_out") + return [ + DefaultInfo(default_outputs = [out]), + RunInfo(args = cmd_args(run_info_out)), + ] + +rule4 = rule( + impl = _rule_impl_with_run_info_and_default_info_outputs, + attrs = {"foo": attrs.string()}, +) + +def project(f: Artifact): + return f + +NameSet = transitive_set(args_projections = { + "project": project, +}) + +NameInfo = provider(fields = ["tset"]) + +def _rule_impl_with_tset(ctx): + # Produce a file that contains our name. + out = ctx.actions.write("out.txt", str(ctx.label.name) + "\n") + + # Produce a tset that is our file concated wiht all the files emitted by + # our children. + children = [d[NameInfo].tset for d in ctx.attrs.deps] + tset = ctx.actions.tset(NameSet, value = out, children = children) + + # Concatenate all the files declared by the tset, into a single file + # (agg.txt), which we'll return as our output. + agg = ctx.actions.declare_output("tset_out") + projected = tset.project_as_args("project") + + ctx.actions.run([ + "sh", + "-c", + 'out="$1" && shift && cat "$@" > "$out"', + "--", + agg.as_output(), + projected, + ], category = "test") + + return [ + NameInfo(tset = tset), + DefaultInfo(default_output = agg), + RunInfo(args = [projected]), + ] + +rule_tset = rule( + impl = _rule_impl_with_tset, + attrs = { + "deps": attrs.list(attrs.dep(providers = [NameInfo]), default = []), + }, +) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/lib/TARGETS.fixture b/tests/core/query/cquery/test_owner_isolated_data/simple/lib/TARGETS.fixture new file mode 100644 index 0000000000000..988573c357a54 --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/lib/TARGETS.fixture @@ -0,0 +1,48 @@ +load("//rules:rules.bzl", "foo_genrule", "foo_library") + +config_setting( + name = "constraint", +) + +stub( + name = "lib1", + srcs = [":file1", "TARGETS.fixture"], + visibility = ["PUBLIC"], +) + +foo_library( + name = "lib2", + srcs = [":file2"], + cmd = ["this is lib2", "cmd", "$(location :file2)"], + description = "this is lib2", + visibility = ["PUBLIC"], +) + +foo_library( + name = "lib3", + srcs = [":file3"], + cmd = ["this is lib3"] + select({ + ":constraint": ["this is lib3 too, case 1"], + "DEFAULT": ["this is lib3 too, case 2"], + }), + description = "this is lib3", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file1", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file2", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file3", + cmd = "", + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/package_boundary_violation/TARGETS.fixture b/tests/core/query/cquery/test_owner_isolated_data/simple/package_boundary_violation/TARGETS.fixture new file mode 100644 index 0000000000000..8b1106eae036e --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/package_boundary_violation/TARGETS.fixture @@ -0,0 +1,7 @@ +load("//rules:rules.bzl", "foo_binary") + +foo_binary( + name = "bin", + srcs = ["bin"], + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/package_boundary_violation/bin b/tests/core/query/cquery/test_owner_isolated_data/simple/package_boundary_violation/bin new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/platforms/TARGETS.fixture b/tests/core/query/cquery/test_owner_isolated_data/simple/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..3835fd9d3536b --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/platforms/TARGETS.fixture @@ -0,0 +1,9 @@ +platform( + name = "platform1", + visibility = ["PUBLIC"], +) + +platform( + name = "platform2", + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/rules/rules.bzl b/tests/core/query/cquery/test_owner_isolated_data/simple/rules/rules.bzl new file mode 100644 index 0000000000000..e40c507542419 --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/rules/rules.bzl @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +FooInfo = provider(fields = [ + "foo", +]) + +def _impl(ctx): + return [DefaultInfo(), FooInfo(foo = ctx.attrs.name + "_foo")] + +def _binary_impl(ctx): + return [DefaultInfo(), RunInfo(args = []), FooInfo(foo = ctx.attrs.name + "_foo")] + +def _buildable_impl(ctx): + out = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +_foo_library = rule( + impl = _impl, + attrs = { + "cmd": attrs.list(attrs.arg(), default = []), + "deps": attrs.list(attrs.dep(), default = []), + "description": attrs.string(default = ""), + "mapped_srcs": attrs.dict(attrs.string(), attrs.source(), default = {}), + "srcs": attrs.list(attrs.source(), default = []), + "tuple_srcs": attrs.option(attrs.tuple(attrs.source(), attrs.source(), attrs.source()), default = None), + }, +) + +_foo_binary = rule( + impl = _binary_impl, + attrs = { + "cmd": attrs.list(attrs.arg(), default = []), + "deps": attrs.list(attrs.dep(), default = []), + "description": attrs.string(default = ""), + "srcs": attrs.list(attrs.source(), default = []), + "_foo_toolchain": attrs.exec_dep(default = "root//:foo_toolchain"), + }, +) + +_foo_genrule = rule( + impl = _binary_impl, + attrs = { + "cmd": attrs.arg(), + "description": attrs.string(default = ""), + "out": attrs.string(default = ""), + }, +) + +_foo_buildable = rule( + impl = _buildable_impl, + attrs = { + "content": attrs.string(default = ""), + "out": attrs.string(), + }, +) + +_default_platform = "root//platforms:platform1" + +def _basic_print_impl(ctx): + _ignore = ctx # buildifier: disable=unused-variable + + print("print me") # buildifier: disable=print + return [DefaultInfo(), RunInfo(args = [])] + +foo_basic_print = rule( + impl = _basic_print_impl, + attrs = {}, +) + +def foo_library(**kwargs): + _foo_library(default_target_platform = _default_platform, **kwargs) + +def foo_binary(**kwargs): + _foo_binary(default_target_platform = _default_platform, **kwargs) + +def foo_genrule(**kwargs): + _foo_genrule(default_target_platform = _default_platform, **kwargs) + +def foo_buildable(**kwargs): + _foo_buildable(default_target_platform = _default_platform, **kwargs) + +def genrule_select() -> Select: + return select({ + "DEFAULT": "foo", + "ovr_config//os:macos": "bar", + "ovr_config//os:windows": "foobar", + }) diff --git a/tests/core/query/cquery/test_owner_isolated_data/simple/rules/unittest.bzl b/tests/core/query/cquery/test_owner_isolated_data/simple/rules/unittest.bzl new file mode 100644 index 0000000000000..61dfd96a9d11c --- /dev/null +++ b/tests/core/query/cquery/test_owner_isolated_data/simple/rules/unittest.bzl @@ -0,0 +1,71 @@ +# Copyright 2017 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @lint-ignore-every LICENSELINT + +"""Unit testing support. + +This is a modified version of https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl. +Currently, if there are any failures, these are raised immediately by calling fail(), +which trigger an analysis-time build error. +""" + +def _assert_equals(expected, actual, msg = None): + """Asserts that the given `expected` and `actual` are equal. + + Args: + expected: the expected value of some computation. + actual: the actual value return by some computation. + msg: An optional message that will be printed that describes the failure. + If omitted, a default will be used. + """ + if expected != actual: + expectation_msg = 'Expected "%s", but got "%s"' % (expected, actual) + if msg: + full_msg = "%s (%s)" % (msg, expectation_msg) + else: + full_msg = expectation_msg + fail(full_msg) + +def _assert_true( + condition, + msg = "Expected condition to be true, but was false."): + """Asserts that the given `condition` is true. + + Args: + condition: A value that will be evaluated in a Boolean context. + msg: An optional message that will be printed that describes the failure. + If omitted, a default will be used. + """ + if not condition: + fail(msg) + +def _assert_false( + condition, + msg = "Expected condition to be false, but was true."): + """Asserts that the given `condition` is false. + + Args: + condition: A value that will be evaluated in a Boolean context. + msg: An optional message that will be printed that describes the failure. + If omitted, a default will be used. + """ + if condition: + fail(msg) + +asserts = struct( + equals = _assert_equals, + true = _assert_true, + false = _assert_false, +) diff --git a/tests/core/query/test_buildfiles.py b/tests/core/query/test_buildfiles.py new file mode 100644 index 0000000000000..1781955279cee --- /dev/null +++ b/tests/core/query/test_buildfiles.py @@ -0,0 +1,84 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_allbuildfiles(buck: Buck) -> None: + target1 = "root//load:abc" + target2 = "root//transitive_load:def" + target3 = "root//transitive_load:ghi" + out1 = (await buck.uquery(f"allbuildfiles({target1})")).stdout + out2 = (await buck.uquery(f"allbuildfiles({target2})")).stdout + out3 = (await buck.uquery(f"allbuildfiles({target3})")).stdout + out4 = (await buck.uquery(f"allbuildfiles(set({target1} {target2}))")).stdout + + # First, check that these are the same for cquery + assert out1 == (await buck.cquery(f"allbuildfiles({target1})")).stdout + assert out2 == (await buck.cquery(f"allbuildfiles({target2})")).stdout + assert out3 == (await buck.cquery(f"allbuildfiles({target3})")).stdout + assert ( + out4 == (await buck.cquery(f"allbuildfiles(set({target1} {target2}))")).stdout + ) + + out1 = [x for x in out1.splitlines() if not x.startswith("nano_prelude/")] + out1.sort() + out2 = [x for x in out2.splitlines() if not x.startswith("nano_prelude/")] + out2.sort() + out3 = [x for x in out3.splitlines() if not x.startswith("nano_prelude/")] + out3.sort() + out4 = [x for x in out4.splitlines() if not x.startswith("nano_prelude/")] + out4.sort() + + # verify loads + expected1 = ["load/TARGETS.fixture", "load/a.bzl"] + assert out1 == expected1 + + # verify transitive loads + expected2 = [ + "transitive_load/TARGETS.fixture", + "transitive_load/b.bzl", + "transitive_load/c.bzl", + ] + assert out2 == expected2 + assert out3 == expected2 + + # correctly handle multiple inputs + expected4 = expected1 + expected2 + expected4.sort() + assert out4 == expected4 + + +@buck_test() +async def test_rbuildfiles(buck: Buck) -> None: + target_file = "transitive_load/TARGETS.fixture" + out1 = ( + await buck.uquery(f"rbuildfiles({target_file}, transitive_load/c.bzl)") + ).stdout + out2 = (await buck.uquery(f"rbuildfiles({target_file}, {target_file})")).stdout + + # Check that these are the same for cquery + assert ( + out1 + == ( + await buck.cquery(f"rbuildfiles({target_file}, transitive_load/c.bzl)") + ).stdout + ) + assert ( + out2 == (await buck.cquery(f"rbuildfiles({target_file}, {target_file})")).stdout + ) + + assert "transitive_load/b.bzl" in out1 + assert "transitive_load/c.bzl" in out1 + assert "transitive_load/TARGETS" in out1 + + assert out2 == target_file + "\n" diff --git a/tests/core/query/test_buildfiles_data/.buckconfig b/tests/core/query/test_buildfiles_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/query/test_buildfiles_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/query/test_buildfiles_data/.buckroot b/tests/core/query/test_buildfiles_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/test_buildfiles_data/load/TARGETS.fixture b/tests/core/query/test_buildfiles_data/load/TARGETS.fixture new file mode 100644 index 0000000000000..55369e131e5ab --- /dev/null +++ b/tests/core/query/test_buildfiles_data/load/TARGETS.fixture @@ -0,0 +1,7 @@ +load(":a.bzl", "nothing_a") + +nothing_a() + +stub( + name = "abc", +) diff --git a/tests/core/query/test_buildfiles_data/load/a.bzl b/tests/core/query/test_buildfiles_data/load/a.bzl new file mode 100644 index 0000000000000..9ec394c7b7a6d --- /dev/null +++ b/tests/core/query/test_buildfiles_data/load/a.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def nothing_a(): + pass diff --git a/tests/core/query/test_buildfiles_data/transitive_load/TARGETS.fixture b/tests/core/query/test_buildfiles_data/transitive_load/TARGETS.fixture new file mode 100644 index 0000000000000..4b0d2c4296ad7 --- /dev/null +++ b/tests/core/query/test_buildfiles_data/transitive_load/TARGETS.fixture @@ -0,0 +1,11 @@ +load(":b.bzl", "nothing_b") + +nothing_b() + +stub( + name = "def", +) + +stub( + name = "ghi", +) diff --git a/tests/core/query/test_buildfiles_data/transitive_load/b.bzl b/tests/core/query/test_buildfiles_data/transitive_load/b.bzl new file mode 100644 index 0000000000000..191ad81605b55 --- /dev/null +++ b/tests/core/query/test_buildfiles_data/transitive_load/b.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":c.bzl", "nothing_c") + +def nothing_b(): + nothing_c() diff --git a/tests/core/query/test_buildfiles_data/transitive_load/c.bzl b/tests/core/query/test_buildfiles_data/transitive_load/c.bzl new file mode 100644 index 0000000000000..f6b56088f51cc --- /dev/null +++ b/tests/core/query/test_buildfiles_data/transitive_load/c.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def nothing_c(): + pass diff --git a/tests/core/query/test_target_call_stacks.py b/tests/core/query/test_target_call_stacks.py new file mode 100644 index 0000000000000..af36d725559cb --- /dev/null +++ b/tests/core/query/test_target_call_stacks.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_target_call_stacks_default(buck: Buck) -> None: + result = await buck.uquery( + "--stack", + "root//:test", + ) + golden( + output=result.stdout, + rel_path="golden/uquery.stdout", + ) + result = await buck.cquery( + "--stack", + "root//:test", + ) + golden( + output=result.stdout, + rel_path="golden/cquery.stdout", + ) diff --git a/tests/core/query/test_target_call_stacks_data/.buckconfig b/tests/core/query/test_target_call_stacks_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/query/test_target_call_stacks_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/query/test_target_call_stacks_data/.buckroot b/tests/core/query/test_target_call_stacks_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/test_target_call_stacks_data/TARGETS.fixture b/tests/core/query/test_target_call_stacks_data/TARGETS.fixture new file mode 100644 index 0000000000000..346459654e668 --- /dev/null +++ b/tests/core/query/test_target_call_stacks_data/TARGETS.fixture @@ -0,0 +1,3 @@ +trivial_build( + name = "test", +) diff --git a/tests/core/query/test_target_call_stacks_data/golden/cquery.stdout b/tests/core/query/test_target_call_stacks_data/golden/cquery.stdout new file mode 100644 index 0000000000000..a75217510d93d --- /dev/null +++ b/tests/core/query/test_target_call_stacks_data/golden/cquery.stdout @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//:test () + Traceback (most recent call last): + * TARGETS.fixture:1, in + trivial_build( + diff --git a/tests/core/query/test_target_call_stacks_data/golden/uquery.stdout b/tests/core/query/test_target_call_stacks_data/golden/uquery.stdout new file mode 100644 index 0000000000000..87f0cf8eb9ce7 --- /dev/null +++ b/tests/core/query/test_target_call_stacks_data/golden/uquery.stdout @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//:test + Traceback (most recent call last): + * TARGETS.fixture:1, in + trivial_build( + diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal.py b/tests/core/query/test_target_configuration_toolchain_deps_traversal.py new file mode 100644 index 0000000000000..7ddbf3a582ad5 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal.py @@ -0,0 +1,80 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden, golden_replace_cfg_hash + + +@buck_test() +# Test `target_deps()` function does not include toolchain deps. +async def test_cquery_target_deps(buck: Buck) -> None: + result = await buck.cquery("deps(tests/..., 1, target_deps())") + # TODO(nga): this test does not test that any target deps are actually returned. + golden_replace_cfg_hash( + output=result.stdout, + rel_path="cquery_target_deps.golden", + ) + + +@buck_test() +# Test `target_deps()` function does not include toolchain deps. +async def test_uquery_target_deps(buck: Buck) -> None: + # TODO(nga): output includes `platform_windows` target, which is probably not meant to be there. + result = await buck.uquery("deps(tests/..., 1, target_deps())") + golden( + output=result.stdout, + rel_path="uquery_target_deps.golden", + ) + + +# Test `configuration_deps()` function does include configuration deps. +@buck_test() +async def test_cquery_configuration_deps(buck: Buck) -> None: + q = "deps(tests/..., 1, configuration_deps())" + result = await buck.cquery(q) + # Note test output includes `root//tests:python_only`, which is not a configuration deps. + # This is now `deps()` with traversal function works: it includes roots. + golden_replace_cfg_hash( + output=result.stdout, + rel_path="cquery_configuration_deps.golden", + ) + + +# Test `configuration_deps()` function does include configuration deps. +@buck_test() +async def test_uquery_configuration_deps(buck: Buck) -> None: + q = "deps(tests/..., 1, configuration_deps())" + result = await buck.uquery(q) + # TODO(nga): this does not return any configuration deps. + golden( + output=result.stdout, + rel_path="uquery_configuration_deps.golden", + ) + + +@buck_test() +async def test_cquery_toolchain_deps(buck: Buck) -> None: + q = "deps(tests:python_and_asic, 1, toolchain_deps())" + out = await buck.cquery(q) + golden_replace_cfg_hash( + output=out.stdout, + rel_path="cquery_toolchain_deps.golden", + ) + + +@buck_test() +async def test_uquery_toolchain_deps(buck: Buck) -> None: + q = "deps(tests:python_and_asic, 1, toolchain_deps())" + out = await buck.uquery(q) + golden( + output=out.stdout, + rel_path="uquery_toolchain_deps.golden", + ) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/.buckconfig b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/.buckconfig new file mode 100644 index 0000000000000..8154738e7f33b --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name = TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[build] +execution_platforms = root//config:platforms diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/.buckroot b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/config/TARGETS.fixture b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/config/TARGETS.fixture new file mode 100644 index 0000000000000..a731da09619af --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/config/TARGETS.fixture @@ -0,0 +1,34 @@ +load(":defs.bzl", "execution_platform", "execution_platforms") + +constraint_setting(name = "os") +constraint_value(name = "windows", constraint_setting = ":os") +constraint_value(name = "linux", constraint_setting = ":os") + +constraint_setting(name = "mode") +constraint_value(name = "release", constraint_setting = ":mode") +constraint_value(name = "debug", constraint_setting = ":mode") + +execution_platform( + name = "platform_windows_exec", + configuration = [":windows", ":release"], +) + +platform( + name = "platform_windows", + constraint_values = [":windows", ":release"], +) + +execution_platform( + name = "platform_linux_exec", + configuration = [":linux", ":release"], +) + +platform( + name = "platform_linux", + constraint_values = [":linux", ":release"], +) + +execution_platforms( + name = "platforms", + platforms = [":platform_linux_exec", ":platform_windows_exec"], +) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/config/defs.bzl b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/config/defs.bzl new file mode 100644 index 0000000000000..23283220e6051 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/config/defs.bzl @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + constraints = {} + values = {} + for x in ctx.attrs.configuration: + constraints |= x[ConfigurationInfo].constraints + values |= x[ConfigurationInfo].values + cfg = ConfigurationInfo(constraints = constraints, values = values) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = cfg, + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = False, + ), + ) + + return [DefaultInfo(), platform] + +execution_platform = rule( + impl = _execution_platform, + is_configuration_rule = True, + attrs = {"configuration": attrs.list(attrs.dep(providers = [ConfigurationInfo]))}, +) + +def _execution_platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [x[ExecutionPlatformInfo] for x in ctx.attrs.platforms], + ), + ] + +execution_platforms = rule( + impl = _execution_platforms, + is_configuration_rule = True, + attrs = {"platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo]))}, +) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_configuration_deps.golden b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_configuration_deps.golden new file mode 100644 index 0000000000000..e8d6cb74f17b1 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_configuration_deps.golden @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//tests:python_and_asic (root//config:platform_windows#) +root//tests:python_only (root//config:platform_windows#) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_target_deps.golden b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_target_deps.golden new file mode 100644 index 0000000000000..e8d6cb74f17b1 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_target_deps.golden @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//tests:python_and_asic (root//config:platform_windows#) +root//tests:python_only (root//config:platform_windows#) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_toolchain_deps.golden b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_toolchain_deps.golden new file mode 100644 index 0000000000000..7d91530f25436 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/cquery_toolchain_deps.golden @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//tests:python_and_asic (root//config:platform_windows#) +root//toolchains:asic (root//config:platform_windows#) (root//config:platform_windows_exec#) +root//toolchains:python (root//config:platform_windows#) (root//config:platform_windows_exec#) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/tests/TARGETS.fixture b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/tests/TARGETS.fixture new file mode 100644 index 0000000000000..1db4b117aded4 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/tests/TARGETS.fixture @@ -0,0 +1,16 @@ +stub( + name = "python_and_asic", + default_target_platform = "//config:platform_windows", + toolchain_deps = [ + "//toolchains:asic", + "//toolchains:python", + ], +) + +stub( + name = "python_only", + default_target_platform = "//config:platform_windows", + toolchain_deps = [ + "//toolchains:python", + ], +) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/toolchains/TARGETS.fixture b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/toolchains/TARGETS.fixture new file mode 100644 index 0000000000000..d082972def00f --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/toolchains/TARGETS.fixture @@ -0,0 +1,51 @@ +load(":defs.bzl", "toolchain") + +stub( + name = "compile_python_release_windows", +) + +stub( + name = "compile_python_debug_windows", +) + +stub( + name = "compile_python_release_linux", +) + +stub( + name = "compile_python_debug_linux", +) + +stub( + name = "compile_asic", + compatible_with = ["//config:windows"], +) + +stub( + name = "python_release", + deps = [select({ + "//config:linux": ":compile_python_release_linux", + "//config:windows": ":compile_python_release_windows", + })], +) + +stub( + name = "python_debug", + deps = [select({ + "//config:linux": ":compile_python_debug_linux", + "//config:windows": ":compile_python_debug_windows", + })], +) + +toolchain( + name = "python", + dep = select({ + "//config:debug": ":python_debug", + "//config:release": ":python_release", + }), +) + +toolchain( + name = "asic", + dep = ":compile_asic", +) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/toolchains/defs.bzl b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/toolchains/defs.bzl new file mode 100644 index 0000000000000..856167ee2e938 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/toolchains/defs.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _toolchain( + # starlark-lint-disable unused-argument + ctx): # @unused + fail("we do not run analysis in query tests") + +toolchain = rule( + impl = _toolchain, + attrs = {"dep": attrs.exec_dep()}, + is_toolchain_rule = True, +) diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_configuration_deps.golden b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_configuration_deps.golden new file mode 100644 index 0000000000000..fecb2e2f1dfe4 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_configuration_deps.golden @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//tests:python_and_asic +root//tests:python_only diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_target_deps.golden b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_target_deps.golden new file mode 100644 index 0000000000000..fecb2e2f1dfe4 --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_target_deps.golden @@ -0,0 +1,4 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//tests:python_and_asic +root//tests:python_only diff --git a/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_toolchain_deps.golden b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_toolchain_deps.golden new file mode 100644 index 0000000000000..46ef40df7702e --- /dev/null +++ b/tests/core/query/test_target_configuration_toolchain_deps_traversal_data/uquery_toolchain_deps.golden @@ -0,0 +1,5 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +root//tests:python_and_asic +root//toolchains:asic +root//toolchains:python diff --git a/tests/core/query/uquery/BUCK b/tests/core/query/uquery/BUCK new file mode 100644 index 0000000000000..f7301e47aa9f1 --- /dev/null +++ b/tests/core/query/uquery/BUCK @@ -0,0 +1,12 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_uquery", + srcs = ["test_uquery.py"], + data_dir = "test_uquery_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) diff --git a/tests/core/query/uquery/test_uquery.py b/tests/core/query/uquery/test_uquery.py new file mode 100644 index 0000000000000..69ab009cd3a9d --- /dev/null +++ b/tests/core/query/uquery/test_uquery.py @@ -0,0 +1,534 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckResult +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +""" +If you need to add a directory that's isolated in buck2/test/targets +(ex. some test of form @buck_test( data_dir=some_new_directory)), +then you will need to update isolated_targets in buck2/test/targets/TARGETS. +Otherwise the test will fail because it cannot recognize the new directory. +""" + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_inputs(buck: Buck) -> None: + result = await buck.uquery("""inputs(set(root//bin:the_binary //lib:file1))""") + assert result.stdout == "bin/TARGETS.fixture\n" + + result = await buck.uquery("""inputs(set())""") + assert result.stdout == "" + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_union(buck: Buck) -> None: + result = await buck.uquery("""deps(root//lib:lib1) + set(root//data:data)""") + assert result.stdout == "root//lib:file1\nroot//lib:lib1\nroot//data:data\n" + + result = await buck.uquery( + """buildfile(root//bin:the_binary) + inputs(deps(root//lib:lib1))""" + ) + assert result.stdout == "bin/TARGETS.fixture\nlib/TARGETS.fixture\n" + + result = await buck.uquery("""'root//bin:the_binary' + set(root//data:data)""") + assert result.stdout == "root//bin:the_binary\nroot//data:data\n" + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_owner(buck: Buck) -> None: + result = await buck.uquery("""owner(bin/TARGETS.fixture)""") + assert result.stdout == "root//bin:the_binary\n" + + result = await buck.uquery("""owner(data/buck/build/data.file)""") + assert result.stdout == "root//data:data\n" + + # there's no buildfile in the root of the special buck, make sure that works + result = await buck.uquery("""owner(special/file)""") + assert "No owner" in result.stderr + assert result.stdout == "" + + # there's a buildfile here, but no target owns the file + result = await buck.uquery("""owner(.buckconfig)""") + assert "No owner" in result.stderr + assert result.stdout == "" + + result = await buck.uquery( + """owner(../data/buck/build/data.file)""", rel_cwd=Path("special") + ) + assert result.stdout == "root//data:data\n" + + result = await buck.uquery("""owner(root//bin/TARGETS.fixture)""") + assert result.stdout == "root//bin:the_binary\n" + + +@buck_test(data_dir="bxl_simple") +async def test_query_owner_with_explicit_package_boundary_violation(buck: Buck) -> None: + # This needs to be changed to `expect_failure` once Buck2 is checking path validity + # outside of `package_boundary_exceptions` + result = await buck.uquery( + """owner(package_boundary_violation/bin)""", + "-c", + "project.package_boundary_exceptions=", + ) + assert "root//package_boundary_violation:bin" in result.stdout + assert "root//:package_boundary_violation" not in result.stdout + + result = await buck.uquery("""owner(package_boundary_violation/bin)""") + assert "root//package_boundary_violation:bin" in result.stdout + assert "root//:package_boundary_violation" in result.stdout + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_buildfile(buck: Buck) -> None: + result = await buck.uquery("""buildfile(root//bin:the_binary)""") + assert result.stdout == "bin/TARGETS.fixture\n" + + result = await buck.uquery("""buildfile(root//bin: + root//data:)""") + assert result.stdout == "bin/TARGETS.fixture\ndata/TARGETS.fixture\n" + + result = await buck.uquery( + """buildfile(owner(../data/buck/build/data.file))""", rel_cwd=Path("special") + ) + assert result.stdout == "data/TARGETS.fixture\n" + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_targets_in_buildfile(buck: Buck) -> None: + result = await buck.uquery("""targets_in_buildfile(bin/TARGETS.fixture)""") + assert ( + result.stdout + == "\n".join( + [ + "root//bin:setting", + "root//bin:my_config", + "root//bin:my_platform", + "root//bin:the_binary", + "root//bin:the_binary_with_dir_srcs", + "root//bin:platform", + ] + ) + + "\n" + ) + + +@buck_test(data_dir="bxl_simple") +async def test_query_configuration_deps(buck: Buck) -> None: + result = await buck.uquery( + """deps(root//bin:the_binary, 1, configuration_deps())""" + ) + assert "root//bin:my_config" in result.stdout + + +@buck_test(data_dir="bxl_simple") +async def test_deps(buck: Buck) -> None: + result = await buck.uquery("""deps(root//bin:the_binary)""") + assert ( + result.stdout + == "\n".join( + [ + "root//:foo_toolchain", + "root//:bin", + "root//lib:file3", + "root//lib:lib3", + "root//lib:file2", + "root//lib:lib2", + "root//lib:file1", + "root//lib:lib1", + "root//:genrule_binary", + "root//:data", + "root//bin:the_binary", + ] + ) + + "\n" + ) + + target_deps_expr = """deps(root//bin:the_binary, 100, target_deps())""" + + result = await buck.uquery(target_deps_expr) + assert ( + result.stdout + == "\n".join( + [ + "root//bin:the_binary", + "root//:data", + "root//lib:lib1", + "root//lib:lib2", + "root//lib:lib3", + "root//lib:file1", + "root//lib:file2", + "root//lib:file3", + ] + ) + + "\n" + ) + + # this is a little subtle, query's deps() function always forms a graph + # with the nodes themselves so we subtract them out. It's not quite right + # if a node in the graph of target deps were to have an exec dep on another. + result = await buck.uquery( + "deps(%s, 1, exec_deps()) - %s" % (target_deps_expr, target_deps_expr) + ) + assert ( + result.stdout + == "\n".join( + [ + "root//:foo_toolchain", + "root//:bin", + "root//:genrule_binary", + ] + ) + + "\n" + ) + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_cell(buck: Buck) -> None: + result = await buck.uquery("""//stuff:magic""", rel_cwd=Path("special")) + assert result.stdout == "special//stuff:magic\n" + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_relative(buck: Buck) -> None: + result = await buck.uquery("""...""", rel_cwd=Path("special")) + assert result.stdout == "special//stuff:magic\n" + result = await buck.uquery("""...""", rel_cwd=Path("bin")) + assert "root//bin:the_binary\n" in result.stdout + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_provider_names(buck: Buck) -> None: + await expect_failure( + buck.uquery("'root//bin:the_binary[provider_name]'"), + stderr_regex="Expected a target pattern without providers", + ) + + await expect_failure( + buck.uquery("'root//bin:the_binary#some_flavor'"), + stderr_regex="Expected a target pattern without providers", + ) + + +@buck_test(data_dir="bxl_simple") +async def test_query_filter(buck: Buck) -> None: + # Test uquery/cquery on target and file sets + out = await buck.uquery("filter('the_binary$', root//...)") + assert out.stdout == "root//bin:the_binary\n" + out = await buck.cquery("filter('the_binary\\w', root//...)") + assert ( + _replace_hash(out.stdout) + == "root//bin:the_binary_with_dir_srcs (root//platforms:platform1#)\n" + ) + out = await buck.uquery("filter('fixture$', inputs(root//bin:the_binary))") + assert out.stdout == "bin/TARGETS.fixture\n" + out = await buck.cquery("filter('fixture$', inputs(root//bin:the_binary))") + assert out.stdout == "bin/TARGETS.fixture\n" + + +@buck_test(setup_eden=True, data_dir="bxl_simple") +async def test_attributes(buck: Buck) -> None: + out = await buck.uquery("set(root//bin:the_binary //lib:file1)") + assert out.stdout == "root//bin:the_binary\nroot//lib:file1\n" + + json_out = await buck.uquery("--json", "set(root//bin:the_binary //lib:file1)") + json_out = json.loads(json_out.stdout) + assert ["root//bin:the_binary", "root//lib:file1"] == json_out + + attrs_out = await buck.uquery( + "--output-attribute", + "buck\\..*", + "--output-attribute", + "srcs", + "--output-attribute", + "deps", + "set(root//bin:the_binary //lib:file1)", + ) + attrs_json_out = await buck.uquery( + "--output-attribute", + "buck\\..*", + "--output-attribute", + "srcs", + "--output-attribute", + "deps", + "--json", + "set(root//bin:the_binary //lib:file1)", + ) + # specifying any attrs enables json output + assert attrs_json_out.stdout == attrs_out.stdout + attrs_json_out = json.loads(attrs_json_out.stdout) + assert { + "root//bin:the_binary": { + "buck.deps": [ + "root//:data", + "root//lib:lib1", + "root//lib:lib2", + "root//lib:lib3", + "root//:foo_toolchain", + "root//:bin", + ], + "buck.package": "root//bin:TARGETS.fixture", + "buck.type": "_foo_binary", + "buck.configuration_deps": ["root//bin:my_config"], + "buck.oncall": None, + "deps": ["root//lib:lib1", "root//lib:lib2", "root//lib:lib3"], + "srcs": ["root//bin/TARGETS.fixture"], + }, + "root//lib:file1": { + "buck.deps": [], + "buck.package": "root//lib:TARGETS.fixture", + "buck.type": "_foo_genrule", + "buck.configuration_deps": [], + "buck.oncall": None, + }, + } == attrs_json_out + + +@buck_test(data_dir="bxl_simple") +async def test_dot(buck: Buck) -> None: + out = await buck.uquery("--dot", "deps(root//bin:the_binary, 100, target_deps())") + golden(output=out.stdout, rel_path="bxl_simple/expected/dot/deps.golden") + + out = await buck.uquery( + "--dot", + "--output-attribute=name", + "--output-attribute=^deps", + "--output-attribute=cmd", + "deps(root//bin:the_binary, 100, target_deps()) - //platforms:", + ) + golden(output=out.stdout, rel_path="bxl_simple/expected/dot/attrs.golden") + + out = await buck.uquery( + "--dot", + "deps(root//bin:the_binary, 100, target_deps()) - set(//lib: //platforms:)", + ) + golden(output=out.stdout, rel_path="bxl_simple/expected/dot/subgraph.golden") + + +@buck_test(data_dir="bxl_simple") +async def test_dot_compact(buck: Buck) -> None: + out = await buck.uquery( + "--dot-compact", "deps(root//bin:the_binary, 100, target_deps())" + ) + golden( + output=out.stdout, + rel_path="bxl_simple/expected/dot_compact/deps.golden", + ) + + out = await buck.uquery( + "--dot-compact", + "--output-attribute=name", + "--output-attribute=^deps", + "--output-attribute=cmd", + "deps(root//bin:the_binary, 100, target_deps()) - //platforms:", + ) + golden( + output=out.stdout, + rel_path="bxl_simple/expected/dot_compact/attrs.golden", + ) + + out = await buck.uquery( + "--dot-compact", + "deps(root//bin:the_binary, 100, target_deps()) - set(//lib: //platforms:)", + ) + golden( + output=out.stdout, + rel_path="bxl_simple/expected/dot_compact/subgraph.golden", + ) + + +# Tests for "%Ss" uses +@buck_test(data_dir="bxl_simple") +async def test_args_as_set(buck: Buck) -> None: + out = await buck.uquery("%Ss", "root//bin:the_binary", "//lib:file1") + assert out.stdout == "root//bin:the_binary\nroot//lib:file1\n" + + result = await buck.uquery("--json", "%Ss", "root//bin:the_binary", "//lib:file1") + json_out = json.loads(result.stdout) + assert json_out == ["root//bin:the_binary", "root//lib:file1"] + + +@buck_test(data_dir="bxl_simple") +async def test_multi_uquery(buck: Buck) -> None: + out = await buck.uquery("%s", "root//bin:the_binary", "//lib:file1") + assert out.stdout == "root//bin:the_binary\nroot//lib:file1\n" + + result = await buck.uquery( + "owner(%s)", "bin/TARGETS.fixture", "data/buck/build/data.file" + ) + assert result.stdout == "root//bin:the_binary\nroot//data:data\n" + + result = await buck.uquery( + "--json", "owner(%s)", "bin/TARGETS.fixture", "data/buck/build/data.file" + ) + json_out = json.loads(result.stdout) + + assert json_out == { + "bin/TARGETS.fixture": ["root//bin:the_binary"], + "data/buck/build/data.file": ["root//data:data"], + } + + # match buck1's strange handling of multi-query with --output-attribute + result = await buck.uquery( + "--json", + "--output-attribute=name", + "owner(%s)", + "bin/TARGETS.fixture", + "data/buck/build/data.file", + ) + json_out = json.loads(result.stdout) + + assert json_out == { + "root//bin:the_binary": {"name": "the_binary"}, + "root//data:data": {"name": "data"}, + } + + # test a case where the query for one arg fails. The process should exit with a non-zero code, but + # the produced output should be valid json with an appropriate error indicator. + failure = await expect_failure( + buck.uquery("--json", "inputs(%s)", "//data:data", "xyz") + ) + json_out = json.loads(failure.stdout) + assert "$error" in json_out["xyz"] + assert json_out["//data:data"] == ["data/buck/build/data.file"] + + # Test where the parameter is not a literal, but a query fragment + out = await buck.uquery("%s", "deps(root//lib:lib1)") + assert out.stdout == "root//lib:file1\nroot//lib:lib1\n" + + out = await buck.uquery("owner(%s)", "inputs(root//bin:the_binary)") + assert out.stdout == "root//bin:the_binary\n" + + out = await buck.uquery("owner(%s)", "data/buck/build/data.file") + assert out.stdout == "root//data:data\n" + + # We'd really prefer this to be an error, but Buck1 allows it + out = await buck.uquery("owner(%s", "data/buck/build/data.file)") + assert out.stdout == "root//data:data\n" + + +@buck_test(data_dir="testsof") +async def test_testsof(buck: Buck) -> None: + out = await buck.uquery("testsof(//:foo_lib)") + + assert "root//:foo_test" in out.stdout + assert "root//:foo_extra_test" in out.stdout + assert "root//:foo_lib" not in out.stdout + + +@buck_test(data_dir="directory_sources") +async def test_directory_source(buck: Buck) -> None: + await buck.build(":a_file") + await buck.build(":a_dir") + + result = await buck.query("owner(dir/file1.txt)") + assert result.stdout == "root//:a_dir\n" + result = await buck.query("inputs(:a_dir)") + assert ( + result.stdout == "dir/file1.txt\ndir/subdir/file2.txt\ndir/subdir/file3.txt\n" + ) + + # Can't reference files that don't exist + await expect_failure( + buck.build("does_not_exist:"), + stderr_regex="Source file `does_not_exist` does not exist as a member of package", + ) + + # Want to make sure we can't do a package boundary violation + # Currently these are soft errors + await expect_failure( + buck.build("subpackage:"), + stderr_regex="Source file `subpackage` does not exist as a member of package", + ) + + await expect_failure( + buck.build("dir_with_subpackage"), + stderr_regex="may not cover any subpackages, but includes subpackage `dir_with_subpackage/subpackage`.", + ) + + +@buck_test(data_dir="oncall") +async def test_oncall(buck: Buck) -> None: + out = await buck.uquery("//:foo", "--output-attribute=oncall") + assert '"magic"' in out.stdout + out = await buck.cquery("//:bar", "--output-attribute=oncall") + assert '"magic"' in out.stdout + + +@buck_test(data_dir="oncall") +async def test_output_all_attributes(buck: Buck) -> None: + def contains(out: BuckResult, want: List[str], notwant: List[str]) -> None: + x = json.loads(out.stdout)["root//:foo"] + for w in want: + assert w in x + for w in notwant: + assert w not in x + + out = await buck.uquery("//:foo", "--output-all-attributes", "--json") + contains( + out, + [ + "buck.type", + "name", + "buck.oncall", + "buck.package", + "buck.configuration_deps", + "buck.deps", + "visibility", + ], + ["madeup"], + ) + out = await buck.uquery("//:foo", "--output-basic-attributes", "--json") + contains( + out, + ["buck.type", "name", "buck.package", "visibility"], + ["buck.oncall", "buck.configuration_deps"], + ) + + +@buck_test(data_dir="bxl_simple") +async def test_output_format_starlark_golden(buck: Buck) -> None: + result = await buck.uquery( + "--output-format=starlark", + "--stack", + "//lib:", + ) + + golden( + output=result.stdout, + rel_path="output_starlark.golden.out", + ) + + +@buck_test(data_dir="bxl_simple") +async def test_uquery_rdeps(buck: Buck) -> None: + result = await buck.query("""rdeps(root//bin:the_binary, //lib:file1)""") + assert result.stdout == "root//bin:the_binary\nroot//lib:lib1\nroot//lib:file1\n" + + result = await buck.query("""rdeps(root//bin:the_binary, //lib:file1, 0)""") + assert result.stdout == "root//lib:file1\n" + + result = await buck.query("""rdeps(root//bin:the_binary, //lib:file1, 1)""") + assert result.stdout == "root//lib:lib1\nroot//lib:file1\n" + + result = await buck.query("""rdeps(root//bin:the_binary, //lib:file1, 100)""") + assert result.stdout == "root//bin:the_binary\nroot//lib:lib1\nroot//lib:file1\n" diff --git a/tests/core/query/uquery/test_uquery_data/.buckroot b/tests/core/query/uquery/test_uquery_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/.buckconfig b/tests/core/query/uquery/test_uquery_data/bxl_simple/.buckconfig new file mode 100644 index 0000000000000..8b7b8a6233547 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/.buckconfig @@ -0,0 +1,22 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored,bin/ignored,bin/ignored.txt +package_boundary_exceptions=. + +[cells] +root = . +nano_prelude = nano_prelude +fbcode = fbcode +fbsource = fbsource +buck = buck +toolchains = toolchains +special = special +config = config + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/TARGETS.fixture new file mode 100644 index 0000000000000..2194ea9270d4a --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/TARGETS.fixture @@ -0,0 +1,41 @@ +load("//rules:rules.bzl", "foo_basic_print", "foo_binary", "foo_buildable", "foo_genrule") + +foo_binary( + name = "bin", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "data", + cmd = "$(exe :genrule_binary)", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "foo_toolchain", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "genrule_binary", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_binary( + name = "package_boundary_violation", + srcs = ["package_boundary_violation/bin"], + visibility = ["PUBLIC"], +) + +foo_buildable( + name = "buildable", + content = "FOO", + out = "out.txt", +) + +foo_basic_print( + name = "print", + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/TARGETS.fixture new file mode 100644 index 0000000000000..8c943327a4ea6 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/TARGETS.fixture @@ -0,0 +1,43 @@ +load("//rules:rules.bzl", "foo_binary") + +constraint_setting(name = "setting") + +constraint_value(name = "my_config", constraint_setting = ":setting") + +platform( + name = "my_platform", + deps = ["root//platforms:platform1"], + constraint_values = [":my_config"], +) + +foo_binary( + name = "the_binary", + deps = [ + "//lib:lib1", + "//lib:lib2", + "//lib:lib3", + ], + target_compatible_with = [":my_config"], + default_target_platform = ":my_platform", + srcs = [ + "TARGETS.fixture", + ], + cmd = ["$(exe //:bin)", "$(location //:data)"], +) + +foo_binary( + name = "the_binary_with_dir_srcs", + deps = [ + "//lib:lib1", + "//lib:lib2", + "//lib:lib3", + ], + srcs = [ + "kind", + ], + cmd = ["$(exe //:bin)", "$(location //:data)"], +) + +platform( + name = "platform", +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/ignored.txt b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/ignored.txt new file mode 100644 index 0000000000000..592fd2594b569 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/ignored.txt @@ -0,0 +1 @@ +ignore me diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/ignored/foo.txt b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/ignored/foo.txt new file mode 100644 index 0000000000000..257cc5642cb1a --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/ignored/foo.txt @@ -0,0 +1 @@ +foo diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/kind/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/kind/TARGETS.fixture new file mode 100644 index 0000000000000..5d53a05d6cfc9 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/kind/TARGETS.fixture @@ -0,0 +1,24 @@ +load(":rules.bzl", "rule1", "rule2", "rule3", "rule4", "rule_tset") + +rule1(name = "foo", foo = "hello") +rule2(name = "bar", foo = "world") +rule3(name = "bzzt", foo = "henlo") +rule4(name = "target_with_outputs", foo = "blah") + +rule_tset(name = "tset1") +rule_tset(name = "tset2") +rule_tset( + name = "tset3", + deps = [ + ":tset1", + ":tset2", + ], +) + +rule_tset( + name = "target_with_tset", + deps = [ + ":tset3", + ":tset1", + ], +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/kind/rules.bzl b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/kind/rules.bzl new file mode 100644 index 0000000000000..ba7b4b7c932cb --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/bin/kind/rules.bzl @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _rule_impl(_ctx): + return [DefaultInfo()] + +rule1 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) +rule2 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) +rule3 = rule(impl = _rule_impl, attrs = {"foo": attrs.string()}) + +def _rule_impl_with_run_info_and_default_info_outputs(ctx): + out = ctx.actions.write("default_out", "default_out") + run_info_out = ctx.actions.write("run_info_out", "run_info_out") + return [ + DefaultInfo(default_outputs = [out]), + RunInfo(args = cmd_args(run_info_out)), + ] + +rule4 = rule( + impl = _rule_impl_with_run_info_and_default_info_outputs, + attrs = {"foo": attrs.string()}, +) + +def project(f: Artifact): + return f + +NameSet = transitive_set(args_projections = { + "project": project, +}) + +NameInfo = provider(fields = ["tset"]) + +def _rule_impl_with_tset(ctx): + # Produce a file that contains our name. + out = ctx.actions.write("out.txt", str(ctx.label.name) + "\n") + + # Produce a tset that is our file concated wiht all the files emitted by + # our children. + children = [d[NameInfo].tset for d in ctx.attrs.deps] + tset = ctx.actions.tset(NameSet, value = out, children = children) + + # Concatenate all the files declared by the tset, into a single file + # (agg.txt), which we'll return as our output. + agg = ctx.actions.declare_output("tset_out") + projected = tset.project_as_args("project") + + ctx.actions.run([ + "sh", + "-c", + 'out="$1" && shift && cat "$@" > "$out"', + "--", + agg.as_output(), + projected, + ], category = "test") + + return [ + NameInfo(tset = tset), + DefaultInfo(default_output = agg), + RunInfo(args = [projected]), + ] + +rule_tset = rule( + impl = _rule_impl_with_tset, + attrs = { + "deps": attrs.list(attrs.dep(providers = [NameInfo]), default = []), + }, +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/data/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/data/TARGETS.fixture new file mode 100644 index 0000000000000..31576b4c247f1 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/data/TARGETS.fixture @@ -0,0 +1,7 @@ +load("//rules:rules.bzl", "foo_library") + +foo_library( + name = "data", + srcs = glob(["**/*.file"]), + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/data/buck/build/data.file b/tests/core/query/uquery/test_uquery_data/bxl_simple/data/buck/build/data.file new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/attrs.golden b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/attrs.golden new file mode 100644 index 0000000000000..d3c32e76814c7 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/attrs.golden @@ -0,0 +1,19 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +digraph result_graph { + "root//bin:the_binary" [style=filled,color="#DFECDF",buck_name=the_binary,buck_cmd="[\"$(exe root//:bin)\", \"$(location root//:data)\"]",buck_deps="[\"root//lib:lib1\", \"root//lib:lib2\", \"root//lib:lib3\"]"]; + "root//bin:the_binary" -> "root//:data"; + "root//bin:the_binary" -> "root//lib:lib1"; + "root//bin:the_binary" -> "root//lib:lib2"; + "root//bin:the_binary" -> "root//lib:lib3"; + "root//:data" [style=filled,color="#DFECDF",buck_name=data,buck_cmd="\"$(exe root//:genrule_binary)\""]; + "root//lib:lib1" [style=filled,color="#DFECDF",buck_name=lib1,buck_cmd="[]",buck_deps="[]"]; + "root//lib:lib1" -> "root//lib:file1"; + "root//lib:lib2" [style=filled,color="#DFECDF",buck_name=lib2,buck_cmd="[\"this is lib2\", \"cmd\", \"$(location root//lib:file2)\"]",buck_deps="[]"]; + "root//lib:lib2" -> "root//lib:file2"; + "root//lib:lib3" [style=filled,color="#DFECDF",buck_name=lib3,buck_cmd="[\"this is lib3\"]+select({\"root//lib:constraint\": [\"this is lib3 too, case 1\"], \"DEFAULT\": [\"this is lib3 too, case 2\"]})",buck_deps="[]"]; + "root//lib:lib3" -> "root//lib:file3"; + "root//lib:file1" [style=filled,color="#DFECDF",buck_name=file1,buck_cmd="\"\""]; + "root//lib:file2" [style=filled,color="#DFECDF",buck_name=file2,buck_cmd="\"\""]; + "root//lib:file3" [style=filled,color="#DFECDF",buck_name=file3,buck_cmd="\"\""]; +} diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/deps.golden b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/deps.golden new file mode 100644 index 0000000000000..13680863719e5 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/deps.golden @@ -0,0 +1,19 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +digraph result_graph { + "root//bin:the_binary" [style=filled,color="#DFECDF"]; + "root//bin:the_binary" -> "root//:data"; + "root//bin:the_binary" -> "root//lib:lib1"; + "root//bin:the_binary" -> "root//lib:lib2"; + "root//bin:the_binary" -> "root//lib:lib3"; + "root//:data" [style=filled,color="#DFECDF"]; + "root//lib:lib1" [style=filled,color="#DFECDF"]; + "root//lib:lib1" -> "root//lib:file1"; + "root//lib:lib2" [style=filled,color="#DFECDF"]; + "root//lib:lib2" -> "root//lib:file2"; + "root//lib:lib3" [style=filled,color="#DFECDF"]; + "root//lib:lib3" -> "root//lib:file3"; + "root//lib:file1" [style=filled,color="#DFECDF"]; + "root//lib:file2" [style=filled,color="#DFECDF"]; + "root//lib:file3" [style=filled,color="#DFECDF"]; +} diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/subgraph.golden b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/subgraph.golden new file mode 100644 index 0000000000000..c470d272a6b32 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot/subgraph.golden @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +digraph result_graph { + "root//bin:the_binary" [style=filled,color="#DFECDF"]; + "root//bin:the_binary" -> "root//:data"; + "root//:data" [style=filled,color="#DFECDF"]; +} diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/attrs.golden b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/attrs.golden new file mode 100644 index 0000000000000..e0a3b08268b5f --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/attrs.golden @@ -0,0 +1,19 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +digraph result_graph { + 1 [style=filled,color="#DFECDF",buck_name=the_binary,buck_cmd="[\"$(exe root//:bin)\", \"$(location root//:data)\"]",buck_deps="[\"root//lib:lib1\", \"root//lib:lib2\", \"root//lib:lib3\"]",label="root//bin:the_binary"]; + 1 -> 2; + 1 -> 3; + 1 -> 4; + 1 -> 5; + 2 [style=filled,color="#DFECDF",buck_name=data,buck_cmd="\"$(exe root//:genrule_binary)\"",label="root//:data"]; + 3 [style=filled,color="#DFECDF",buck_name=lib1,buck_cmd="[]",buck_deps="[]",label="root//lib:lib1"]; + 3 -> 6; + 4 [style=filled,color="#DFECDF",buck_name=lib2,buck_cmd="[\"this is lib2\", \"cmd\", \"$(location root//lib:file2)\"]",buck_deps="[]",label="root//lib:lib2"]; + 4 -> 7; + 5 [style=filled,color="#DFECDF",buck_name=lib3,buck_cmd="[\"this is lib3\"]+select({\"root//lib:constraint\": [\"this is lib3 too, case 1\"], \"DEFAULT\": [\"this is lib3 too, case 2\"]})",buck_deps="[]",label="root//lib:lib3"]; + 5 -> 8; + 6 [style=filled,color="#DFECDF",buck_name=file1,buck_cmd="\"\"",label="root//lib:file1"]; + 7 [style=filled,color="#DFECDF",buck_name=file2,buck_cmd="\"\"",label="root//lib:file2"]; + 8 [style=filled,color="#DFECDF",buck_name=file3,buck_cmd="\"\"",label="root//lib:file3"]; +} diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/deps.golden b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/deps.golden new file mode 100644 index 0000000000000..8b91d358382b6 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/deps.golden @@ -0,0 +1,19 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +digraph result_graph { + 1 [style=filled,color="#DFECDF",label="root//bin:the_binary"]; + 1 -> 2; + 1 -> 3; + 1 -> 4; + 1 -> 5; + 2 [style=filled,color="#DFECDF",label="root//:data"]; + 3 [style=filled,color="#DFECDF",label="root//lib:lib1"]; + 3 -> 6; + 4 [style=filled,color="#DFECDF",label="root//lib:lib2"]; + 4 -> 7; + 5 [style=filled,color="#DFECDF",label="root//lib:lib3"]; + 5 -> 8; + 6 [style=filled,color="#DFECDF",label="root//lib:file1"]; + 7 [style=filled,color="#DFECDF",label="root//lib:file2"]; + 8 [style=filled,color="#DFECDF",label="root//lib:file3"]; +} diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/subgraph.golden b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/subgraph.golden new file mode 100644 index 0000000000000..89c02684030eb --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/expected/dot_compact/subgraph.golden @@ -0,0 +1,7 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +digraph result_graph { + 1 [style=filled,color="#DFECDF",label="root//bin:the_binary"]; + 1 -> 2; + 2 [style=filled,color="#DFECDF",label="root//:data"]; +} diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/lib/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/lib/TARGETS.fixture new file mode 100644 index 0000000000000..426dc47c048f9 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/lib/TARGETS.fixture @@ -0,0 +1,49 @@ +load("//rules:rules.bzl", "foo_config_setting", "foo_genrule", "foo_library") + +foo_config_setting( + name = "constraint", +) + +foo_library( + name = "lib1", + srcs = [":file1", "TARGETS.fixture"], + description = "this is lib1", + visibility = ["PUBLIC"], +) + +foo_library( + name = "lib2", + srcs = [":file2"], + cmd = ["this is lib2", "cmd", "$(location :file2)"], + description = "this is lib2", + visibility = ["PUBLIC"], +) + +foo_library( + name = "lib3", + srcs = [":file3"], + cmd = ["this is lib3"] + select({ + ":constraint": ["this is lib3 too, case 1"], + "DEFAULT": ["this is lib3 too, case 2"], + }), + description = "this is lib3", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file1", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file2", + cmd = "", + visibility = ["PUBLIC"], +) + +foo_genrule( + name = "file3", + cmd = "", + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/package_boundary_violation/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/package_boundary_violation/TARGETS.fixture new file mode 100644 index 0000000000000..8b1106eae036e --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/package_boundary_violation/TARGETS.fixture @@ -0,0 +1,7 @@ +load("//rules:rules.bzl", "foo_binary") + +foo_binary( + name = "bin", + srcs = ["bin"], + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/package_boundary_violation/bin b/tests/core/query/uquery/test_uquery_data/bxl_simple/package_boundary_violation/bin new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/platforms/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..1ac5792488c99 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/platforms/TARGETS.fixture @@ -0,0 +1,11 @@ +load("//rules:rules.bzl", "foo_platform") + +foo_platform( + name = "platform1", + visibility = ["PUBLIC"], +) + +foo_platform( + name = "platform2", + visibility = ["PUBLIC"], +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/rules/rules.bzl b/tests/core/query/uquery/test_uquery_data/bxl_simple/rules/rules.bzl new file mode 100644 index 0000000000000..30637c0f74d48 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/rules/rules.bzl @@ -0,0 +1,111 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +FooInfo = provider(fields = [ + "foo", +]) + +def _platform_impl(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ), + ] + +foo_platform = rule( + impl = _platform_impl, + attrs = {}, +) + +def _config_setting_impl(_ctx): + return [DefaultInfo(), ConfigurationInfo(constraints = {}, values = {})] + +foo_config_setting = rule( + impl = _config_setting_impl, + attrs = {}, +) + +def _impl(ctx): + return [DefaultInfo(), FooInfo(foo = ctx.attrs.name + "_foo")] + +def _binary_impl(ctx): + return [DefaultInfo(), RunInfo(args = []), FooInfo(foo = ctx.attrs.name + "_foo")] + +def _buildable_impl(ctx): + out = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +_foo_library = rule( + impl = _impl, + attrs = { + "cmd": attrs.list(attrs.arg(), default = []), + "deps": attrs.list(attrs.dep(), default = []), + "description": attrs.string(default = ""), + "mapped_srcs": attrs.dict(attrs.string(), attrs.source(), default = {}), + "srcs": attrs.list(attrs.source(), default = []), + "tuple_srcs": attrs.option(attrs.tuple(attrs.source(), attrs.source(), attrs.source()), default = None), + }, +) + +_foo_binary = rule( + impl = _binary_impl, + attrs = { + "cmd": attrs.list(attrs.arg(), default = []), + "deps": attrs.list(attrs.dep(), default = []), + "description": attrs.string(default = ""), + "srcs": attrs.list(attrs.source(), default = []), + "_foo_toolchain": attrs.exec_dep(default = "root//:foo_toolchain"), + }, +) + +_foo_genrule = rule( + impl = _binary_impl, + attrs = { + "cmd": attrs.arg(), + "description": attrs.string(default = ""), + "out": attrs.string(default = ""), + }, +) + +_foo_buildable = rule( + impl = _buildable_impl, + attrs = { + "content": attrs.string(default = ""), + "out": attrs.string(), + }, +) + +_default_platform = "root//platforms:platform1" + +def _basic_print_impl(ctx): + _ignore = ctx # buildifier: disable=unused-variable + + print("print me") # buildifier: disable=print + return [DefaultInfo(), RunInfo(args = [])] + +foo_basic_print = rule( + impl = _basic_print_impl, + attrs = {}, +) + +def foo_library(**kwargs): + _foo_library(default_target_platform = _default_platform, **kwargs) + +def foo_binary(**kwargs): + platform = kwargs.pop("default_target_platform", _default_platform) + _foo_binary(default_target_platform = platform, **kwargs) + +def foo_genrule(**kwargs): + _foo_genrule(default_target_platform = _default_platform, **kwargs) + +def foo_buildable(**kwargs): + _foo_buildable(default_target_platform = _default_platform, **kwargs) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/rules/unittest.bzl b/tests/core/query/uquery/test_uquery_data/bxl_simple/rules/unittest.bzl new file mode 100644 index 0000000000000..61dfd96a9d11c --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/rules/unittest.bzl @@ -0,0 +1,71 @@ +# Copyright 2017 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @lint-ignore-every LICENSELINT + +"""Unit testing support. + +This is a modified version of https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl. +Currently, if there are any failures, these are raised immediately by calling fail(), +which trigger an analysis-time build error. +""" + +def _assert_equals(expected, actual, msg = None): + """Asserts that the given `expected` and `actual` are equal. + + Args: + expected: the expected value of some computation. + actual: the actual value return by some computation. + msg: An optional message that will be printed that describes the failure. + If omitted, a default will be used. + """ + if expected != actual: + expectation_msg = 'Expected "%s", but got "%s"' % (expected, actual) + if msg: + full_msg = "%s (%s)" % (msg, expectation_msg) + else: + full_msg = expectation_msg + fail(full_msg) + +def _assert_true( + condition, + msg = "Expected condition to be true, but was false."): + """Asserts that the given `condition` is true. + + Args: + condition: A value that will be evaluated in a Boolean context. + msg: An optional message that will be printed that describes the failure. + If omitted, a default will be used. + """ + if not condition: + fail(msg) + +def _assert_false( + condition, + msg = "Expected condition to be false, but was true."): + """Asserts that the given `condition` is false. + + Args: + condition: A value that will be evaluated in a Boolean context. + msg: An optional message that will be printed that describes the failure. + If omitted, a default will be used. + """ + if condition: + fail(msg) + +asserts = struct( + equals = _assert_equals, + true = _assert_true, + false = _assert_false, +) diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/special/.buckconfig b/tests/core/query/uquery/test_uquery_data/bxl_simple/special/.buckconfig new file mode 100644 index 0000000000000..9f36f679bda9c --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/special/.buckconfig @@ -0,0 +1,2 @@ +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/query/uquery/test_uquery_data/bxl_simple/special/stuff/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/bxl_simple/special/stuff/TARGETS.fixture new file mode 100644 index 0000000000000..ce1651eae526d --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/bxl_simple/special/stuff/TARGETS.fixture @@ -0,0 +1,6 @@ +load("@root//rules:rules.bzl", "foo_genrule") + +foo_genrule( + name = "magic", + cmd = "", +) diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/.buckconfig b/tests/core/query/uquery/test_uquery_data/directory_sources/.buckconfig new file mode 100644 index 0000000000000..7d499c6d869d3 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/directory_sources/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +prelude = prelude diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/directory_sources/TARGETS.fixture new file mode 100644 index 0000000000000..a9df979c11675 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/directory_sources/TARGETS.fixture @@ -0,0 +1,9 @@ +test( + name = "a_file", + src = "file.txt", +) + +test( + name = "a_dir", + src = "dir", +) diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/dir/file1.txt b/tests/core/query/uquery/test_uquery_data/directory_sources/dir/file1.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/dir/subdir/file2.txt b/tests/core/query/uquery/test_uquery_data/directory_sources/dir/subdir/file2.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/dir/subdir/file3.txt b/tests/core/query/uquery/test_uquery_data/directory_sources/dir/subdir/file3.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/dir_with_subpackage/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/directory_sources/dir_with_subpackage/TARGETS.fixture new file mode 100644 index 0000000000000..c8af61054a9ae --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/directory_sources/dir_with_subpackage/TARGETS.fixture @@ -0,0 +1,4 @@ +test( + name = "a_dir_with_subpackage", + src = "dir_with_subpackage", +) diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/dir_with_subpackage/dir_with_subpackage/file.txt b/tests/core/query/uquery/test_uquery_data/directory_sources/dir_with_subpackage/dir_with_subpackage/file.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/dir_with_subpackage/dir_with_subpackage/subpackage/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/directory_sources/dir_with_subpackage/dir_with_subpackage/subpackage/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/does_not_exist/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/directory_sources/does_not_exist/TARGETS.fixture new file mode 100644 index 0000000000000..da39314717a18 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/directory_sources/does_not_exist/TARGETS.fixture @@ -0,0 +1,4 @@ +test( + name = "does_not_exist", + src = "does_not_exist", +) diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/file.txt b/tests/core/query/uquery/test_uquery_data/directory_sources/file.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/prelude/prelude.bzl b/tests/core/query/uquery/test_uquery_data/directory_sources/prelude/prelude.bzl new file mode 100644 index 0000000000000..6bc80b8be445d --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/directory_sources/prelude/prelude.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_impl(ctx): + return [ + DefaultInfo(default_output = ctx.attrs.src), + ] + +test = rule( + impl = _test_impl, + attrs = { + "src": attrs.source(allow_directory = True), + }, +) diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/subpackage/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/directory_sources/subpackage/TARGETS.fixture new file mode 100644 index 0000000000000..aa8474665b891 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/directory_sources/subpackage/TARGETS.fixture @@ -0,0 +1,4 @@ +test( + name = "a_subpackage", + src = "subpackage", +) diff --git a/tests/core/query/uquery/test_uquery_data/directory_sources/subpackage/subpackage/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/directory_sources/subpackage/subpackage/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/oncall/.buckconfig b/tests/core/query/uquery/test_uquery_data/oncall/.buckconfig new file mode 100644 index 0000000000000..d0cca261b8bee --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/oncall/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/query/uquery/test_uquery_data/oncall/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/oncall/TARGETS.fixture new file mode 100644 index 0000000000000..58e476c8ee797 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/oncall/TARGETS.fixture @@ -0,0 +1,5 @@ +oncall("magic") + +stub(name = "foo") + +stub(name = "bar") diff --git a/tests/core/query/uquery/test_uquery_data/output_starlark.golden.out b/tests/core/query/uquery/test_uquery_data/output_starlark.golden.out new file mode 100644 index 0000000000000..122cb5fc41859 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/output_starlark.golden.out @@ -0,0 +1,109 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +# Traceback (most recent call last): +# * lib/TARGETS.fixture:3, in +# foo_config_setting( +foo_config_setting( + name = "constraint", + visibility = [], + within_view = ["PUBLIC"], +) + + +# Traceback (most recent call last): +# * lib/TARGETS.fixture:33, in +# foo_genrule( +# * rules/rules.bzl:108, in foo_genrule +# _foo_genrule(default_target_platform = _default_platform, **kwargs) +_foo_genrule( + name = "file1", + cmd = "", + default_target_platform = "root//platforms:platform1", + visibility = ["PUBLIC"], + within_view = ["PUBLIC"], +) + + +# Traceback (most recent call last): +# * lib/TARGETS.fixture:39, in +# foo_genrule( +# * rules/rules.bzl:108, in foo_genrule +# _foo_genrule(default_target_platform = _default_platform, **kwargs) +_foo_genrule( + name = "file2", + cmd = "", + default_target_platform = "root//platforms:platform1", + visibility = ["PUBLIC"], + within_view = ["PUBLIC"], +) + + +# Traceback (most recent call last): +# * lib/TARGETS.fixture:45, in +# foo_genrule( +# * rules/rules.bzl:108, in foo_genrule +# _foo_genrule(default_target_platform = _default_platform, **kwargs) +_foo_genrule( + name = "file3", + cmd = "", + default_target_platform = "root//platforms:platform1", + visibility = ["PUBLIC"], + within_view = ["PUBLIC"], +) + + +# Traceback (most recent call last): +# * lib/TARGETS.fixture:7, in +# foo_library( +# * rules/rules.bzl:101, in foo_library +# _foo_library(default_target_platform = _default_platform, **kwargs) +_foo_library( + name = "lib1", + default_target_platform = "root//platforms:platform1", + description = "this is lib1", + srcs = [ + "root//lib:file1", + "root//lib/TARGETS.fixture" + ], + visibility = ["PUBLIC"], + within_view = ["PUBLIC"], +) + + +# Traceback (most recent call last): +# * lib/TARGETS.fixture:14, in +# foo_library( +# * rules/rules.bzl:101, in foo_library +# _foo_library(default_target_platform = _default_platform, **kwargs) +_foo_library( + name = "lib2", + cmd = [ + "this is lib2", + "cmd", + "$(location root//lib:file2)" + ], + default_target_platform = "root//platforms:platform1", + description = "this is lib2", + srcs = [ "root//lib:file2" ], + visibility = ["PUBLIC"], + within_view = ["PUBLIC"], +) + + +# Traceback (most recent call last): +# * lib/TARGETS.fixture:22, in +# foo_library( +# * rules/rules.bzl:101, in foo_library +# _foo_library(default_target_platform = _default_platform, **kwargs) +_foo_library( + name = "lib3", + cmd = [ "this is lib3" ]+select({ + "root//lib:constraint": [ "this is lib3 too, case 1" ], + "DEFAULT": [ "this is lib3 too, case 2" ] + }), + default_target_platform = "root//platforms:platform1", + description = "this is lib3", + srcs = [ "root//lib:file3" ], + visibility = ["PUBLIC"], + within_view = ["PUBLIC"], +) diff --git a/tests/core/query/uquery/test_uquery_data/testsof/.buckconfig b/tests/core/query/uquery/test_uquery_data/testsof/.buckconfig new file mode 100644 index 0000000000000..ff08e90270120 --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/testsof/.buckconfig @@ -0,0 +1,15 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[cells] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled diff --git a/tests/core/query/uquery/test_uquery_data/testsof/TARGETS.fixture b/tests/core/query/uquery/test_uquery_data/testsof/TARGETS.fixture new file mode 100644 index 0000000000000..5aaeb5435a3bd --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/testsof/TARGETS.fixture @@ -0,0 +1,55 @@ +load(":rules.bzl", "config_setting", "configuration", "platform", "test_rule") + +config_setting( + name = "testfullness", +) + +configuration( + name = "regular_tests", + config_setting = ":testfullness", +) + +configuration( + name = "more_tests", + config_setting = ":testfullness", +) + +platform( + name = "platform_default_tests", + configuration = ":regular_tests", +) + +platform( + name = "platform_more_tests", + configuration = ":more_tests", +) + +test_rule( + name = "foo_lib", + tests = [":foo_test"] + select({":more_tests": [":foo_extra_test"], "DEFAULT": []}), +) + +test_rule( + name = "foo_lib_with_test_with_default_platform", + tests = [":foo_test_with_default_platform"], +) + +test_rule(name = "foo_test") + +test_rule(name = "foo_test_with_default_platform", default_target_platform = ":foo_test_default_platform") + +test_rule(name = "foo_extra_test", compatible_with = [":more_tests"]) + +config_setting( + name = "foo_test_config_setting", +) + +configuration( + name = "foo_test_configuration", + config_setting = ":foo_test_config_setting", +) + +platform( + name = "foo_test_default_platform", + configuration = ":foo_test_configuration", +) diff --git a/tests/core/query/uquery/test_uquery_data/testsof/prelude/prelude.bzl b/tests/core/query/uquery/test_uquery_data/testsof/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/query/uquery/test_uquery_data/testsof/rules.bzl b/tests/core/query/uquery/test_uquery_data/testsof/rules.bzl new file mode 100644 index 0000000000000..b623891de071e --- /dev/null +++ b/tests/core/query/uquery/test_uquery_data/testsof/rules.bzl @@ -0,0 +1,59 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +test_rule = rule( + impl = _impl, + attrs = { + }, +) + +def _config_setting_impl(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +config_setting = rule( + impl = _config_setting_impl, + attrs = { + }, +) + +def _configuration_impl(ctx): + config_setting = ctx.attrs.config_setting + value = ConstraintValueInfo( + setting = config_setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + + return [ + DefaultInfo(), + ConfigurationInfo(constraints = {config_setting.label.raw_target(): value}, values = {}), + ] + +configuration = rule( + impl = _configuration_impl, + attrs = { + "config_setting": attrs.configuration_label(), + }, +) + +def _platform_impl(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ctx.attrs.configuration[ConfigurationInfo], + ), + ] + +platform = rule( + impl = _platform_impl, + attrs = { + "configuration": attrs.configuration_label(), + }, +) diff --git a/tests/core/rage/BUCK b/tests/core/rage/BUCK new file mode 100644 index 0000000000000..2c3a5ccd87df4 --- /dev/null +++ b/tests/core/rage/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_rage", + srcs = ["test_rage.py"], + data_dir = "test_rage_data", +) diff --git a/tests/core/rage/test_rage.py b/tests/core/rage/test_rage.py new file mode 100644 index 0000000000000..ea86bfec36446 --- /dev/null +++ b/tests/core/rage/test_rage.py @@ -0,0 +1,62 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def opener(path: str, flags: int) -> int: + # Make it executable by user + return os.open(path, flags, 0o777) + + +def mock_cmd_unix(path: str) -> None: + with open(path, "w", opener=opener) as fl: + fl.write( + """\ +#! /bin/sh +echo "$@" + """ + ) + + +# No windows since mocking pastry command didn't work D41623200 +@buck_test(skip_for_os=["windows"]) +async def test_rage(buck: Buck) -> None: + # Build a trivial action + await buck.build("//:simple") + + with tempfile.TemporaryDirectory() as tmpdirname: + pastry_path = f"{tmpdirname}/pastry" + hg_path = f"{tmpdirname}/hg" + mock_cmd_unix(pastry_path) + mock_cmd_unix(hg_path) + + # We want to find our executable first + cmd_path = tmpdirname + os.pathsep + os.environ["PATH"] + # Run rage aginst the most recent invocation. + await buck.rage(input=b"0", env={"PATH": cmd_path}) + + +@buck_test() +async def test_rage_no_paste(buck: Buck) -> None: + # Build a trivial action + await buck.build("//:simple") + # Run rage aginst the most recent invocation. + await buck.rage("--no-paste", "--invocation-offset", "0") + + +@buck_test() +async def test_rage_no_logs(buck: Buck) -> None: + # Rage doesn't crash even with no invocation logs + await buck.rage("--no-paste") diff --git a/tests/core/rage/test_rage_data/.buckconfig b/tests/core/rage/test_rage_data/.buckconfig new file mode 100644 index 0000000000000..fc9c93d30eddd --- /dev/null +++ b/tests/core/rage/test_rage_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = . diff --git a/tests/core/rage/test_rage_data/.buckroot b/tests/core/rage/test_rage_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/rage/test_rage_data/TARGETS.fixture b/tests/core/rage/test_rage_data/TARGETS.fixture new file mode 100644 index 0000000000000..3d6b84deb738b --- /dev/null +++ b/tests/core/rage/test_rage_data/TARGETS.fixture @@ -0,0 +1,41 @@ +simple_write(name = "simple") + +write_file(name = "uses_declared_output") + +write_file(name = "uses_declared_output_as_output") + +write_file(name = "declares_output") + +write_file( + name = "is_executable", + exe = True, +) + +write_file( + name = "writes_array_of_commands", + dep = ":simple", +) + +write_file( + name = "writes_command_lines", + dep = ":simple", +) + +write_file( + name = "writes_frozen_command_lines", + dep = ":simple", +) + +write_file(name = "fails_on_invalid_contents") + +write_file(name = "fails_on_invalid_output") + +write_file( + name = "with_inputs_and_copy", + dep = ":simple", +) + +write_file( + name = "writes_absolute", + dep = ":simple", +) diff --git a/tests/core/rage/test_rage_data/prelude.bzl b/tests/core/rage/test_rage_data/prelude.bzl new file mode 100644 index 0000000000000..a1e6fa23b8cef --- /dev/null +++ b/tests/core/rage/test_rage_data/prelude.bzl @@ -0,0 +1,81 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +FooInfo = provider(fields = ["args", "out"]) + +def _simple_write_impl(ctx): + out = ctx.actions.write("out.txt", "contents") + args = cmd_args([out]) + return [ + FooInfo(args = args, out = out), + DefaultInfo(default_output = out), + ] + +def _write_file_impl(ctx): + if ctx.attrs.name == "uses_declared_output": + declared = ctx.actions.declare_output(ctx.attrs.out) + output = ctx.actions.write(declared, ctx.attrs.content) + elif ctx.attrs.name == "uses_declared_output_as_output": + declared = ctx.actions.declare_output(ctx.attrs.out) + output = ctx.actions.write(declared.as_output(), ctx.attrs.content) + elif ctx.attrs.name == "declares_output": + output = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + elif ctx.attrs.name == "is_executable": + output = ctx.actions.write(ctx.attrs.out, ctx.attrs.content, is_executable = True) + elif ctx.attrs.name == "writes_array_of_commands": + cmd = [ctx.attrs.dep[FooInfo].out, ctx.attrs.content] + output = ctx.actions.write(ctx.attrs.out, cmd) + elif ctx.attrs.name == "writes_command_lines": + cmd = [ctx.attrs.dep[FooInfo].out, ctx.attrs.content] + output = ctx.actions.write(ctx.attrs.out, cmd_args(cmd)) + elif ctx.attrs.name == "writes_frozen_command_lines": + output = ctx.actions.write(ctx.attrs.out, ctx.attrs.dep[FooInfo].args) + elif ctx.attrs.name == "with_inputs_and_copy": + output1 = ctx.actions.write("intermediate.txt", ctx.attrs.content) + output2 = ctx.actions.declare_output(ctx.attrs.out) + + # Create script with output1 as its associated artifact + cmd = cmd_args(output1, format = "import sys; fp1=open('{}','r'); all=fp1.read(); fp2=open(sys.argv[1], 'w'); fp2.write(all);") + + # Replace \ with \\ for Windows compatibility + cmd = cmd_args(cmd, replace_regex = ("\\\\\\b", "\\\\")) + script = ctx.actions.write( + "script.py", + [cmd], + with_inputs = True, + ) + + # Read output1 and write back into output2. Output1 should be included as an associated artifact here so we do not need to add it as hidden + cmd = cmd_args(["python3", script, output2.as_output()]) + ctx.actions.run(cmd, category = "test") + return [DefaultInfo(default_output = output2)] + elif ctx.attrs.name == "fails_on_invalid_contents": + output = ctx.actions.write(ctx.attrs.out, {}) + elif ctx.attrs.name == "fails_on_invalid_output": + output = ctx.actions.write([], ctx.attrs.content) + elif ctx.attrs.name == "writes_absolute": + content = [ctx.attrs.dep[FooInfo].out] + output = ctx.actions.write(ctx.attrs.out, content, absolute = True) + else: + fail("invalid test") + return [DefaultInfo(default_output = output)] + +write_file = rule( + impl = _write_file_impl, + attrs = { + "content": attrs.string(default = "some content"), + "dep": attrs.option(attrs.dep(providers = [FooInfo]), default = None), + "exe": attrs.bool(default = False), + "out": attrs.string(default = "out.txt"), + }, +) + +simple_write = rule( + impl = _simple_write_impl, + attrs = { + }, +) diff --git a/tests/core/restart/BUCK b/tests/core/restart/BUCK new file mode 100644 index 0000000000000..4ddb1ac6ea0c9 --- /dev/null +++ b/tests/core/restart/BUCK @@ -0,0 +1,11 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_restart", + srcs = ["test_restart.py"], + data_dir = "test_restart_data", + serialize_test_cases = False, + deps = [], +) diff --git a/tests/core/restart/test_restart.py b/tests/core/restart/test_restart.py new file mode 100644 index 0000000000000..cf4447abb325e --- /dev/null +++ b/tests/core/restart/test_restart.py @@ -0,0 +1,142 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import json +import os +import signal +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +TEST_DIGEST = "76f7aea8c1fc400287312b9608ceb24848ba02ac:14" + + +@buck_test() +async def test_restart_requires_no_stdout(buck: Buck) -> None: + res = await buck.targets("//:stage0", env={"FORCE_WANT_RESTART": "true"}) + assert res.stdout.count("//:stage0") == 1 + + +@buck_test() +async def test_restart(buck: Buck) -> None: + # Normally shows once. + res = await expect_failure(buck.targets("//:invalid")) + assert res.stderr.count("Unknown target `invalid`") == 1 + + # But if we force a restart... + res = await expect_failure( + buck.targets("//:invalid", env={"FORCE_WANT_RESTART": "true"}) + ) + assert res.stderr.count("Unknown target `invalid`") == 2 + + +@buck_test(allow_soft_errors=True) +async def test_restart_materializer_corruption(buck: Buck) -> None: + stage1 = "//:stage1" + res = await buck.build(stage1) + out = res.get_build_report().output_for_target(stage1) + + # Now we remove this file (which comes to us via RE) + # Only way to get it back is by killing the materializer state. + os.unlink(out) + + res = await buck.build("//:stage2") + assert "Your command will now restart" in res.stderr + + +@buck_test(allow_soft_errors=True) +async def test_restart_cas_missing(buck: Buck) -> None: + # Make sure Buck is not running. + await buck.kill() + + # Start a daemon with the `src` file tombstoned. This means we cannot download it from RE. + # This is just the hash of `src`. + await buck.build(env={"BUCK2_TEST_TOMBSTONED_DIGESTS": TEST_DIGEST}) + + # Now build //:stage2. Buck2 must try to download the file, fail, then + # restart the daemon. + res = await buck.build("//:stage2") + assert "Your command will now restart" in res.stderr + + # TODO: We should also handle the case where the top-level artifact is what + # fails to download (i.e. build stage1 here instead). + + +@buck_test( + allow_soft_errors=True, + skip_for_os=["windows"], +) +async def test_restart_forkserver_crash(buck: Buck) -> None: + # Start the daemon + await buck.build() + + # Kill its forkserver. + forkserver_pid = json.loads((await buck.status()).stdout)["forkserver_pid"] + assert forkserver_pid is not None + os.kill(forkserver_pid, signal.SIGKILL) + + # Wait for its forkserver to exit. + for _ in range(10): + try: + os.kill(forkserver_pid, 0) + except OSError: + break + else: + await asyncio.sleep(1) + + # Now build a thing and check we restart + res = await buck.build("//:stage2") + assert "Your command will now restart" in res.stderr + + +@buck_test() +async def test_restart_disabled(buck: Buck) -> None: + # Ensure no daemon + await buck.kill() + + with open(buck.cwd / ".buckconfig", "a") as f: + f.write("[buck2]\nrestarter = false") + + result = await expect_failure( + buck.build( + "//:stage2", + env={"BUCK2_TEST_TOMBSTONED_DIGESTS": TEST_DIGEST}, + ), + ) + assert "Your command will now restart" not in result.stderr + + +@buck_test() +async def test_trace_id(buck: Buck, tmp_path: Path) -> None: + trace_id = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + record_file = tmp_path / "record.json" + + # But if we force a restart... + await expect_failure( + buck.targets( + "//:invalid", + "--unstable-write-invocation-record", + str(record_file), + env={"FORCE_WANT_RESTART": "true", "BUCK_WRAPPER_UUID": trace_id}, + ) + ) + + with open(record_file) as f: + record = json.load(f) + + assert record["trace_id"] != trace_id + + assert ( + record["data"]["Record"]["data"]["InvocationRecord"]["restarted_trace_id"] + == trace_id + ) diff --git a/tests/core/restart/test_restart_data/.buckconfig b/tests/core/restart/test_restart_data/.buckconfig new file mode 100644 index 0000000000000..7bf6d5ff96276 --- /dev/null +++ b/tests/core/restart/test_restart_data/.buckconfig @@ -0,0 +1,11 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture +[buck2] + materializations = deferred + sqlite_materializer_state = true + sqlite_materializer_state_version = 0 + restarter = true diff --git a/tests/core/restart/test_restart_data/.buckroot b/tests/core/restart/test_restart_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/restart/test_restart_data/TARGETS.fixture b/tests/core/restart/test_restart_data/TARGETS.fixture new file mode 100644 index 0000000000000..0436945cbe8b5 --- /dev/null +++ b/tests/core/restart/test_restart_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "cp") + +cp(name = "stage0", src = "src") +cp(name = "stage1", src = ":stage0") +cp(name = "stage2", src = ":stage1", local_only = True) diff --git a/tests/core/restart/test_restart_data/defs.bzl b/tests/core/restart/test_restart_data/defs.bzl new file mode 100644 index 0000000000000..9b6441184e636 --- /dev/null +++ b/tests/core/restart/test_restart_data/defs.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_cp(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run( + ["cp", ctx.attrs.src, out.as_output()], + category = "cp", + local_only = ctx.attrs.local_only, + env = {"CACHE_BUSTER": str(ctx.attrs.local_only)}, + ) + return [DefaultInfo(out)] + +cp = rule(attrs = { + "local_only": attrs.bool(default = False), + "src": attrs.source(), +}, impl = _impl_cp) diff --git a/tests/core/restart/test_restart_data/prelude.bzl b/tests/core/restart/test_restart_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/restart/test_restart_data/src b/tests/core/restart/test_restart_data/src new file mode 100644 index 0000000000000..85504b36b9ed7 --- /dev/null +++ b/tests/core/restart/test_restart_data/src @@ -0,0 +1 @@ +some src file diff --git a/tests/core/run/BUCK b/tests/core/run/BUCK new file mode 100644 index 0000000000000..5736607a8594b --- /dev/null +++ b/tests/core/run/BUCK @@ -0,0 +1,15 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_run", + srcs = ["test_run.py"], + data_dir = "test_run_data", +) + +buck2_e2e_test( + name = "test_build_id_env", + srcs = ["test_build_id_env.py"], + data_dir = "test_build_id_env_data", +) diff --git a/tests/core/run/test_build_id_env.py b/tests/core/run/test_build_id_env.py new file mode 100644 index 0000000000000..bd26e17b70b3b --- /dev/null +++ b/tests/core/run/test_build_id_env.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_build_id(buck: Buck) -> None: + await buck.run("root//:check_run_uuid") diff --git a/tests/core/run/test_build_id_env_data/.buckconfig b/tests/core/run/test_build_id_env_data/.buckconfig new file mode 100644 index 0000000000000..df06a02c03ca2 --- /dev/null +++ b/tests/core/run/test_build_id_env_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/run/test_build_id_env_data/.buckroot b/tests/core/run/test_build_id_env_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/run/test_build_id_env_data/TARGETS.fixture b/tests/core/run/test_build_id_env_data/TARGETS.fixture new file mode 100644 index 0000000000000..34f6d5cc72bfc --- /dev/null +++ b/tests/core/run/test_build_id_env_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load("defs.bzl", "all_defs") + +all_defs() diff --git a/tests/core/run/test_build_id_env_data/defs.bzl b/tests/core/run/test_build_id_env_data/defs.bzl new file mode 100644 index 0000000000000..b63e34cb1b7fe --- /dev/null +++ b/tests/core/run/test_build_id_env_data/defs.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _check_run_uuid(_ctx): + return [ + DefaultInfo(), + RunInfo([ + "python3", + "-c", + 'import os; assert "BUCK_RUN_BUILD_ID" in os.environ', + ]), + ] + +check_run_uuid = rule(impl = _check_run_uuid, attrs = {}) + +def all_defs(): + check_run_uuid(name = "check_run_uuid") diff --git a/tests/core/run/test_run.py b/tests/core/run/test_run.py new file mode 100644 index 0000000000000..03907420336b1 --- /dev/null +++ b/tests/core/run/test_run.py @@ -0,0 +1,86 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import subprocess +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_run_executable(buck: Buck) -> None: + result = await buck.run("root//:print_hello") + assert result.stdout.strip() == "hello" + + +@buck_test(skip_for_os=["windows"]) +async def test_emit_shell(buck: Buck) -> None: + result = await buck.run( + "root//:print_hello", + "--emit-shell", + ) + + out = subprocess.check_output(result.stdout, shell=True, encoding="utf-8") + assert out.strip() == "hello" + + +@buck_test() +async def test_run_non_executable_fails(buck: Buck) -> None: + await expect_failure( + buck.run("root//:no_run_info"), + stderr_regex=r"Target `[^`]+` is not a binary rule \(only binary rules can be `run`\)", + ) + + +@buck_test() +async def test_passing_arguments(buck: Buck) -> None: + async def f(args1: List[str], args2: List[str]) -> None: + result = await buck.run("root//:echo_args", *args1, *args2) + assert result.stdout.strip() == " ".join(args2) + + await f(["--"], ["val", "--long", "-s", "spa ces"]) + await f(["--"], ["val", "--", "test"]) + await f([], ["val", "--long"]) # Would fail in Buck1 (--long not found) + await f([], ["val", "--", "x"]) # Would work differently in Buck1 (no -- to user) + await expect_failure( + buck.run("root//:echo_args", "--not-a-flag"), + stderr_regex=r"unexpected argument '--not-a-flag'", + ) + + +@buck_test() +async def test_executable_fail_to_build(buck: Buck) -> None: + await expect_failure( + buck.run("root//:build_fail"), + stderr_regex=r"Failed to build", + ) + + +@buck_test() +async def test_input(buck: Buck) -> None: + await buck.run("root//:check_input_test", input=b"test") + + +@buck_test() +async def test_change_cwd(buck: Buck, tmp_path: Path) -> None: + result = await buck.run( + "root//:print_cwd", + f"--chdir={tmp_path}", + ) + assert tmp_path.resolve() == Path(result.stdout.strip()).resolve() + + +@buck_test() +async def test_dont_change_cwd(buck: Buck) -> None: + result = await buck.run("root//:print_cwd") + assert buck.cwd == Path(result.stdout.strip()) diff --git a/tests/core/run/test_run_data/.buckconfig b/tests/core/run/test_run_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/run/test_run_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/run/test_run_data/.buckroot b/tests/core/run/test_run_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/run/test_run_data/TARGETS.fixture b/tests/core/run/test_run_data/TARGETS.fixture new file mode 100644 index 0000000000000..b07df6775cb5a --- /dev/null +++ b/tests/core/run/test_run_data/TARGETS.fixture @@ -0,0 +1,25 @@ +load(":defs.bzl", "build_fail", "run_python") + +build_fail(name = "build_fail") + +trivial_build(name = "no_run_info") + +run_python( + name = "print_hello", + script = 'print("hello")', +) + +run_python( + name = "echo_args", + script = 'import sys; print(" ".join(sys.argv[1:]))', +) + +run_python( + name = "check_input_test", + script = 'import sys; assert sys.stdin.read() == "test"', +) + +run_python( + name = "print_cwd", + script = "import os; print(os.getcwd())", +) diff --git a/tests/core/run/test_run_data/defs.bzl b/tests/core/run/test_run_data/defs.bzl new file mode 100644 index 0000000000000..a40ba0006398a --- /dev/null +++ b/tests/core/run/test_run_data/defs.bzl @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _run_python(ctx): + return [ + DefaultInfo(), + RunInfo(args = cmd_args("python3", "-c", ctx.attrs.script)), + ] + +run_python = rule( + impl = _run_python, + attrs = { + "script": attrs.string(), + }, +) + +def _build_fail(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.run( + cmd_args("exit", "1", hidden = out.as_output()), + category = "run", + ) + return [DefaultInfo(default_output = out), RunInfo(args = cmd_args(out))] + +build_fail = rule( + impl = _build_fail, + attrs = {}, +) diff --git a/tests/core/starlark_command/BUCK b/tests/core/starlark_command/BUCK new file mode 100644 index 0000000000000..9cf8719c435a1 --- /dev/null +++ b/tests/core/starlark_command/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_lint_and_typecheck", + srcs = ["test_lint_and_typecheck.py"], + data_dir = "test_lint_and_typecheck_data", +) diff --git a/tests/core/starlark_command/test_lint_and_typecheck.py b/tests/core/starlark_command/test_lint_and_typecheck.py new file mode 100644 index 0000000000000..e3765b5a1b7cf --- /dev/null +++ b/tests/core/starlark_command/test_lint_and_typecheck.py @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_lint_fails(buck: Buck) -> None: + await expect_failure( + buck.starlark("lint", "bad_warning.bzl"), + stderr_regex="Found 3 lints", + ) + + +@buck_test() +async def test_typecheck_fails(buck: Buck) -> None: + await buck.starlark("typecheck", "good.bzl") + await expect_failure( + buck.starlark("typecheck", "bad.bzl"), + stderr_regex="Detected 2 errors", + ) diff --git a/tests/core/starlark_command/test_lint_and_typecheck_data/.buckconfig b/tests/core/starlark_command/test_lint_and_typecheck_data/.buckconfig new file mode 100644 index 0000000000000..82ff4e5316342 --- /dev/null +++ b/tests/core/starlark_command/test_lint_and_typecheck_data/.buckconfig @@ -0,0 +1,2 @@ +[cells] + root = . diff --git a/tests/core/starlark_command/test_lint_and_typecheck_data/.buckroot b/tests/core/starlark_command/test_lint_and_typecheck_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/starlark_command/test_lint_and_typecheck_data/bad.bzl b/tests/core/starlark_command/test_lint_and_typecheck_data/bad.bzl new file mode 100644 index 0000000000000..090117d601c08 --- /dev/null +++ b/tests/core/starlark_command/test_lint_and_typecheck_data/bad.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def my_func(x: int, y: str) -> range: + return x == y + +def my_other_func(): + my_func(1, 2) diff --git a/tests/core/starlark_command/test_lint_and_typecheck_data/bad_warning.bzl b/tests/core/starlark_command/test_lint_and_typecheck_data/bad_warning.bzl new file mode 100644 index 0000000000000..5d0934b00a0bf --- /dev/null +++ b/tests/core/starlark_command/test_lint_and_typecheck_data/bad_warning.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def foo(): + pass + +def foo(): + pass + +def bar(x): + return type(x) == list diff --git a/tests/core/starlark_command/test_lint_and_typecheck_data/good.bzl b/tests/core/starlark_command/test_lint_and_typecheck_data/good.bzl new file mode 100644 index 0000000000000..4d21355cc48bc --- /dev/null +++ b/tests/core/starlark_command/test_lint_and_typecheck_data/good.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def my_func(x: int, y: str) -> bool: + return x == int(y) + +if not my_func(1, "2"): + fail("broken") diff --git a/tests/core/subscribe/BUCK b/tests/core/subscribe/BUCK new file mode 100644 index 0000000000000..2d01f14539cbf --- /dev/null +++ b/tests/core/subscribe/BUCK @@ -0,0 +1,15 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_subscribe", + srcs = ["test_subscribe.py"], + data_dir = "test_subscribe_data", + env = { + "BUCK2_EXPECT": "$(exe_target fbcode//buck2/tests/core/subscribe/buck2_expect:buck2_expect)", + }, + serialize_test_cases = False, + deps = [ + ], +) diff --git a/tests/core/subscribe/buck2_expect/BUCK b/tests/core/subscribe/buck2_expect/BUCK new file mode 100644 index 0000000000000..b68a211b2f75a --- /dev/null +++ b/tests/core/subscribe/buck2_expect/BUCK @@ -0,0 +1,20 @@ +load("@fbcode_macros//build_defs:rust_binary.bzl", "rust_binary") + +oncall("build_infra") + +rust_binary( + name = "buck2_expect", + srcs = glob( + ["src/**/*.rs"], + ), + deps = [ + "fbsource//third-party/rust:anyhow", + "fbsource//third-party/rust:clap", + "fbsource//third-party/rust:futures", + "fbsource//third-party/rust:prost", + "fbsource//third-party/rust:tokio", + "fbsource//third-party/rust:tokio-util", + "//buck2/app/buck2_cli_proto:buck2_cli_proto", + "//buck2/app/buck2_subscription_proto:buck2_subscription_proto", + ], +) diff --git a/tests/core/subscribe/buck2_expect/src/main.rs b/tests/core/subscribe/buck2_expect/src/main.rs new file mode 100644 index 0000000000000..795a503e4f3c5 --- /dev/null +++ b/tests/core/subscribe/buck2_expect/src/main.rs @@ -0,0 +1,89 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::path::PathBuf; +use std::process::Stdio; + +use anyhow::Context as _; +use buck2_cli_proto::protobuf_util::ProtobufSplitter; +use buck2_subscription_proto::subscription_response::Response; +use buck2_subscription_proto::Materialized; +use buck2_subscription_proto::SubscribeToPaths; +use buck2_subscription_proto::SubscriptionRequest; +use buck2_subscription_proto::SubscriptionResponse; +use clap::Parser; +use futures::stream::TryStreamExt; +use prost::Message; +use tokio::io::AsyncWriteExt; +use tokio::process::Command; +use tokio_util::codec::FramedRead; + +#[derive(Parser)] +struct Opt { + /// Path to the Buck2 binary + #[clap(long, default_value = "buck2")] + buck2: PathBuf, + + /// Optional isolation dir + #[clap(long)] + isolation_dir: Option, + + /// Path to expect + expect: String, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let Opt { + buck2, + isolation_dir, + expect, + } = Parser::parse(); + + let mut command = Command::new(buck2); + command.stdin(Stdio::piped()); + command.stdout(Stdio::piped()); + command.stderr(Stdio::inherit()); + + if let Some(isolation_dir) = isolation_dir { + command.arg("--isolation-dir"); + command.arg(isolation_dir); + } + + command.arg("subscribe"); + let mut command = command.spawn().context("Error spawning")?; + + let mut stdin = command.stdin.take().unwrap(); + let stdout = command.stdout.take().unwrap(); + + let req = SubscriptionRequest { + request: Some( + SubscribeToPaths { + paths: vec![expect.clone()], + } + .into(), + ), + } + .encode_length_delimited_to_vec(); + + stdin.write_all(&req).await?; + stdin.flush().await?; + + let mut stream = FramedRead::new(stdout, ProtobufSplitter); + let mut msg = stream.try_next().await?.context("was disconnected")?; + let res = SubscriptionResponse::decode_length_delimited(&mut msg).context("Error decoding")?; + + match res.response.as_ref().context("Empty response")? { + Response::Materialized(Materialized { path }) if *path == expect => { + println!("{}", path); + Ok(()) + } + _ => Err(anyhow::anyhow!("Unexpected response: {:?}", res)), + } +} diff --git a/tests/core/subscribe/test_subscribe.py b/tests/core/subscribe/test_subscribe.py new file mode 100644 index 0000000000000..786e05230b529 --- /dev/null +++ b/tests/core/subscribe/test_subscribe.py @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import os + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_subscribe(buck: Buck) -> None: + path = (await buck.targets("//:stage1", "--show-output")).stdout.strip().split()[1] + + # Buck2 wants normalized paths here. + path = path.replace("\\", "/") + + expect = os.environ["BUCK2_EXPECT"] + args = [ + "--buck2", + buck.path_to_executable, + path, + ] + + if buck.isolation_prefix is not None: + args.extend( + [ + "--isolation-dir", + buck.isolation_prefix, + ] + ) + + proc = await asyncio.create_subprocess_exec( + expect, + *args, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=buck.cwd, + env=buck._env + ) + + await buck.build("//:stage2") + + # We don't expect this to actually take anywhere near 20 seconds, but on CI + # on a busy host this could take a while. + (stdout, stderr) = await asyncio.wait_for(proc.communicate(), timeout=20) + assert proc.returncode == 0 + assert stdout.strip().decode("utf-8") == path + + +@buck_test() +async def test_active_commands(buck: Buck) -> None: + async with await buck.subscribe("--active-commands") as subscribe: + msg = await subscribe.read_message() + commands = msg["response"]["ActiveCommandsSnapshot"]["active_commands"] + assert len(commands) == 1 + assert "subscribe" in commands[0]["argv"] + + +@buck_test() +async def test_disconnect_eof(buck: Buck) -> None: + async with await buck.subscribe() as subscribe: + subscribe.stdin.close() + msg = await subscribe.read_message() + assert "EOF" in msg["response"]["Goodbye"]["reason"] + + +@buck_test() +async def test_disconnect_error(buck: Buck) -> None: + with pytest.raises(BuckException): + async with await buck.subscribe() as subscribe: + subscribe.stdin.write(b"x") + subscribe.stdin.close() + msg = await subscribe.read_message() + assert "Error parsing request" in msg["response"]["Goodbye"]["reason"] + + +@buck_test() +async def test_unknown_request_error(buck: Buck) -> None: + with pytest.raises(BuckException): + async with await buck.subscribe() as subscribe: + subscribe.stdin.write(b"\x00") # Would decode to a None request diff --git a/tests/core/subscribe/test_subscribe_data/.buckconfig b/tests/core/subscribe/test_subscribe_data/.buckconfig new file mode 100644 index 0000000000000..5cbd2899e0231 --- /dev/null +++ b/tests/core/subscribe/test_subscribe_data/.buckconfig @@ -0,0 +1,8 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture +[buck2] + materializations = deferred diff --git a/tests/core/subscribe/test_subscribe_data/.buckroot b/tests/core/subscribe/test_subscribe_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/subscribe/test_subscribe_data/TARGETS.fixture b/tests/core/subscribe/test_subscribe_data/TARGETS.fixture new file mode 100644 index 0000000000000..542c6197a20ef --- /dev/null +++ b/tests/core/subscribe/test_subscribe_data/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "cp") + +cp(name = "stage0", src = "src") +cp(name = "stage1", src = ":stage0") +cp(name = "stage2", src = ":stage1") diff --git a/tests/core/subscribe/test_subscribe_data/defs.bzl b/tests/core/subscribe/test_subscribe_data/defs.bzl new file mode 100644 index 0000000000000..ad059ea0b0c1d --- /dev/null +++ b/tests/core/subscribe/test_subscribe_data/defs.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_cp(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run(["cp", ctx.attrs.src, out.as_output()], category = "cp") + return [DefaultInfo(out)] + +cp = rule(attrs = {"src": attrs.source()}, impl = _impl_cp) diff --git a/tests/core/subscribe/test_subscribe_data/prelude.bzl b/tests/core/subscribe/test_subscribe_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/subscribe/test_subscribe_data/src b/tests/core/subscribe/test_subscribe_data/src new file mode 100644 index 0000000000000..85504b36b9ed7 --- /dev/null +++ b/tests/core/subscribe/test_subscribe_data/src @@ -0,0 +1 @@ +some src file diff --git a/tests/core/target_graph/BUCK b/tests/core/target_graph/BUCK new file mode 100644 index 0000000000000..829591902c9f3 --- /dev/null +++ b/tests/core/target_graph/BUCK @@ -0,0 +1,16 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_visibility_from_package", + srcs = ["test_visibility_from_package.py"], + data_dir = "test_visibility_from_package_data", + deps = ["fbcode//buck2/tests/e2e_util:golden"], +) + +buck2_e2e_test( + name = "test_within_view", + srcs = ["test_within_view.py"], + data_dir = "test_within_view_data", +) diff --git a/tests/core/target_graph/test_visibility_from_package.py b/tests/core/target_graph/test_visibility_from_package.py new file mode 100644 index 0000000000000..76ab03331a93b --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package.py @@ -0,0 +1,57 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_visibility_from_package_simple(buck: Buck) -> None: + result = await buck.uquery( + "root//simple:", "--output-attribute=visibility|within_view" + ) + golden( + output=result.stdout, + rel_path="simple/golden.uquery.json", + ) + + +@buck_test() +async def test_visibility_from_package_inherit(buck: Buck) -> None: + result = await buck.uquery( + "root//inherit/...", "--output-attribute=visibility|within_view" + ) + golden( + output=result.stdout, + rel_path="inherit/golden.uquery.json", + ) + + +@buck_test() +async def test_visibility_from_package_override(buck: Buck) -> None: + result = await buck.uquery( + "root//override/...", "--output-attribute=visibility|within_view" + ) + golden( + output=result.stdout, + rel_path="override/golden.uquery.json", + ) + + +@buck_test() +async def test_visibility_from_package_public(buck: Buck) -> None: + result = await buck.uquery( + "root//public/...", "--output-attribute=visibility|within_view" + ) + golden( + output=result.stdout, + rel_path="public/golden.uquery.json", + ) diff --git a/tests/core/target_graph/test_visibility_from_package_data/.buckconfig b/tests/core/target_graph/test_visibility_from_package_data/.buckconfig new file mode 100644 index 0000000000000..7078304680646 --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + prelude = . diff --git a/tests/core/target_graph/test_visibility_from_package_data/.buckroot b/tests/core/target_graph/test_visibility_from_package_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/target_graph/test_visibility_from_package_data/inherit/a/TARGETS.fixture b/tests/core/target_graph/test_visibility_from_package_data/inherit/a/TARGETS.fixture new file mode 100644 index 0000000000000..0fd7593a6536f --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/inherit/a/TARGETS.fixture @@ -0,0 +1,35 @@ +myr( + name = "inherit-visibility", +) + +myr( + name = "override-to-default", + visibility = [], + within_view = [], +) + +myr( + name = "override-to-none", + visibility = None, + within_view = None, +) + +myr( + name = "override-non-default", + visibility = [ + "//:", + ], + within_view = [ + "//:", + ], +) + +myr( + name = "override-to-public", + visibility = [ + "PUBLIC", + ], + within_view = [ + "PUBLIC", + ], +) diff --git a/tests/core/target_graph/test_visibility_from_package_data/inherit/golden.uquery.json b/tests/core/target_graph/test_visibility_from_package_data/inherit/golden.uquery.json new file mode 100644 index 0000000000000..8554ddc36ce0c --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/inherit/golden.uquery.json @@ -0,0 +1,46 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "prelude//inherit/a:inherit-visibility": { + "visibility": [ + "prelude//in-parent/...", + "prelude//in-child/..." + ], + "within_view": [ + "prelude//in-parent/...", + "prelude//in-child/..." + ] + }, + "prelude//inherit/a:override-non-default": { + "visibility": [ + "prelude//:" + ], + "within_view": [ + "prelude//:" + ] + }, + "prelude//inherit/a:override-to-default": { + "visibility": [], + "within_view": [ + "PUBLIC" + ] + }, + "prelude//inherit/a:override-to-none": { + "visibility": [ + "prelude//in-parent/...", + "prelude//in-child/..." + ], + "within_view": [ + "prelude//in-parent/...", + "prelude//in-child/..." + ] + }, + "prelude//inherit/a:override-to-public": { + "visibility": [ + "PUBLIC" + ], + "within_view": [ + "PUBLIC" + ] + } +} diff --git a/tests/core/target_graph/test_visibility_from_package_data/override/a/TARGETS.fixture b/tests/core/target_graph/test_visibility_from_package_data/override/a/TARGETS.fixture new file mode 100644 index 0000000000000..0fd7593a6536f --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/override/a/TARGETS.fixture @@ -0,0 +1,35 @@ +myr( + name = "inherit-visibility", +) + +myr( + name = "override-to-default", + visibility = [], + within_view = [], +) + +myr( + name = "override-to-none", + visibility = None, + within_view = None, +) + +myr( + name = "override-non-default", + visibility = [ + "//:", + ], + within_view = [ + "//:", + ], +) + +myr( + name = "override-to-public", + visibility = [ + "PUBLIC", + ], + within_view = [ + "PUBLIC", + ], +) diff --git a/tests/core/target_graph/test_visibility_from_package_data/override/golden.uquery.json b/tests/core/target_graph/test_visibility_from_package_data/override/golden.uquery.json new file mode 100644 index 0000000000000..2c40bb707db1b --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/override/golden.uquery.json @@ -0,0 +1,42 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "prelude//override/a:inherit-visibility": { + "visibility": [ + "prelude//in-child/..." + ], + "within_view": [ + "prelude//in-child/..." + ] + }, + "prelude//override/a:override-non-default": { + "visibility": [ + "prelude//:" + ], + "within_view": [ + "prelude//:" + ] + }, + "prelude//override/a:override-to-default": { + "visibility": [], + "within_view": [ + "PUBLIC" + ] + }, + "prelude//override/a:override-to-none": { + "visibility": [ + "prelude//in-child/..." + ], + "within_view": [ + "prelude//in-child/..." + ] + }, + "prelude//override/a:override-to-public": { + "visibility": [ + "PUBLIC" + ], + "within_view": [ + "PUBLIC" + ] + } +} diff --git a/tests/core/target_graph/test_visibility_from_package_data/prelude.bzl b/tests/core/target_graph/test_visibility_from_package_data/prelude.bzl new file mode 100644 index 0000000000000..0d1802aa3fef0 --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/prelude.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _myr(ctx): + _ = ctx # @unused + return [DefaultInfo()] + +myr = rule( + impl = _myr, + attrs = { + "deps": attrs.list(attrs.dep(), default = []), + }, +) diff --git a/tests/core/target_graph/test_visibility_from_package_data/public/TARGETS.fixture b/tests/core/target_graph/test_visibility_from_package_data/public/TARGETS.fixture new file mode 100644 index 0000000000000..0fd7593a6536f --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/public/TARGETS.fixture @@ -0,0 +1,35 @@ +myr( + name = "inherit-visibility", +) + +myr( + name = "override-to-default", + visibility = [], + within_view = [], +) + +myr( + name = "override-to-none", + visibility = None, + within_view = None, +) + +myr( + name = "override-non-default", + visibility = [ + "//:", + ], + within_view = [ + "//:", + ], +) + +myr( + name = "override-to-public", + visibility = [ + "PUBLIC", + ], + within_view = [ + "PUBLIC", + ], +) diff --git a/tests/core/target_graph/test_visibility_from_package_data/public/golden.uquery.json b/tests/core/target_graph/test_visibility_from_package_data/public/golden.uquery.json new file mode 100644 index 0000000000000..07eade02f185a --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/public/golden.uquery.json @@ -0,0 +1,42 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "prelude//public:inherit-visibility": { + "visibility": [ + "PUBLIC" + ], + "within_view": [ + "PUBLIC" + ] + }, + "prelude//public:override-non-default": { + "visibility": [ + "prelude//:" + ], + "within_view": [ + "prelude//:" + ] + }, + "prelude//public:override-to-default": { + "visibility": [], + "within_view": [ + "PUBLIC" + ] + }, + "prelude//public:override-to-none": { + "visibility": [ + "PUBLIC" + ], + "within_view": [ + "PUBLIC" + ] + }, + "prelude//public:override-to-public": { + "visibility": [ + "PUBLIC" + ], + "within_view": [ + "PUBLIC" + ] + } +} diff --git a/tests/core/target_graph/test_visibility_from_package_data/simple/TARGETS.fixture b/tests/core/target_graph/test_visibility_from_package_data/simple/TARGETS.fixture new file mode 100644 index 0000000000000..0fd7593a6536f --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/simple/TARGETS.fixture @@ -0,0 +1,35 @@ +myr( + name = "inherit-visibility", +) + +myr( + name = "override-to-default", + visibility = [], + within_view = [], +) + +myr( + name = "override-to-none", + visibility = None, + within_view = None, +) + +myr( + name = "override-non-default", + visibility = [ + "//:", + ], + within_view = [ + "//:", + ], +) + +myr( + name = "override-to-public", + visibility = [ + "PUBLIC", + ], + within_view = [ + "PUBLIC", + ], +) diff --git a/tests/core/target_graph/test_visibility_from_package_data/simple/golden.uquery.json b/tests/core/target_graph/test_visibility_from_package_data/simple/golden.uquery.json new file mode 100644 index 0000000000000..dab100af10cbe --- /dev/null +++ b/tests/core/target_graph/test_visibility_from_package_data/simple/golden.uquery.json @@ -0,0 +1,42 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "prelude//simple:inherit-visibility": { + "visibility": [ + "prelude//from-package/..." + ], + "within_view": [ + "prelude//from-package/..." + ] + }, + "prelude//simple:override-non-default": { + "visibility": [ + "prelude//:" + ], + "within_view": [ + "prelude//:" + ] + }, + "prelude//simple:override-to-default": { + "visibility": [], + "within_view": [ + "PUBLIC" + ] + }, + "prelude//simple:override-to-none": { + "visibility": [ + "prelude//from-package/..." + ], + "within_view": [ + "prelude//from-package/..." + ] + }, + "prelude//simple:override-to-public": { + "visibility": [ + "PUBLIC" + ], + "within_view": [ + "PUBLIC" + ] + } +} diff --git a/tests/core/target_graph/test_within_view.py b/tests/core/target_graph/test_within_view.py new file mode 100644 index 0000000000000..56e6688c6ed14 --- /dev/null +++ b/tests/core/target_graph/test_within_view.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_within_view(buck: Buck) -> None: + await expect_failure( + buck.targets("//..."), + stderr_regex="Target's `within_view` attribute does not allow dependency `prelude//a:a`", + ) diff --git a/tests/core/target_graph/test_within_view_data/.buckconfig b/tests/core/target_graph/test_within_view_data/.buckconfig new file mode 100644 index 0000000000000..7078304680646 --- /dev/null +++ b/tests/core/target_graph/test_within_view_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + prelude = . diff --git a/tests/core/target_graph/test_within_view_data/.buckroot b/tests/core/target_graph/test_within_view_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/target_graph/test_within_view_data/a/TARGETS.fixture b/tests/core/target_graph/test_within_view_data/a/TARGETS.fixture new file mode 100644 index 0000000000000..0cbc363c60d46 --- /dev/null +++ b/tests/core/target_graph/test_within_view_data/a/TARGETS.fixture @@ -0,0 +1,4 @@ +test_within_view_rule( + name = "a", + deps = [], +) diff --git a/tests/core/target_graph/test_within_view_data/b/TARGETS.fixture b/tests/core/target_graph/test_within_view_data/b/TARGETS.fixture new file mode 100644 index 0000000000000..2fbe2e0af3167 --- /dev/null +++ b/tests/core/target_graph/test_within_view_data/b/TARGETS.fixture @@ -0,0 +1,10 @@ +test_within_view_rule( + name = "b", + deps = [ + # This is not within view, so it should fail. + "//a:a", + ], + within_view = [ + "//c/...", + ], +) diff --git a/tests/core/target_graph/test_within_view_data/prelude.bzl b/tests/core/target_graph/test_within_view_data/prelude.bzl new file mode 100644 index 0000000000000..7c982abf5c3ee --- /dev/null +++ b/tests/core/target_graph/test_within_view_data/prelude.bzl @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +test_within_view_rule = rule( + impl = lambda ctx: fail("we don't run analysis in this test, {}".format(ctx)), + attrs = { + "deps": attrs.list(attrs.dep()), + }, +) diff --git a/tests/core/targets_command/BUCK b/tests/core/targets_command/BUCK new file mode 100644 index 0000000000000..7653045f9e46a --- /dev/null +++ b/tests/core/targets_command/BUCK @@ -0,0 +1,48 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_call_stacks", + srcs = ["test_call_stacks.py"], + data_dir = "test_call_stacks_data", +) + +buck2_e2e_test( + name = "test_skip_targets_with_duplicate_names", + srcs = ["test_skip_targets_with_duplicate_names.py"], + data_dir = "test_skip_targets_with_duplicate_names_data", +) + +buck2_e2e_test( + name = "test_target_metadata", + srcs = ["test_target_metadata.py"], + data_dir = "test_target_metadata_data", + deps = [ + "//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_targets_imports", + srcs = ["test_targets_imports.py"], + data_dir = "test_targets_imports_data", +) + +buck2_e2e_test( + name = "test_targets_keep_going", + srcs = ["test_targets_keep_going.py"], + data_dir = "test_targets_keep_going_data", +) + +buck2_e2e_test( + name = "test_recursive", + srcs = ["test_recursive.py"], + data_dir = "test_recursive_data", +) + +buck2_e2e_test( + name = "test_target_hashing", + srcs = ["test_target_hashing.py"], + data_dir = "test_target_hashing_data", +) diff --git a/tests/core/targets_command/test_call_stacks.py b/tests/core/targets_command/test_call_stacks.py new file mode 100644 index 0000000000000..eb8142a582ee1 --- /dev/null +++ b/tests/core/targets_command/test_call_stacks.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_target_call_stacks_json(buck: Buck) -> None: + out = await buck.targets( + "--stack", + "--output-attribute=.*", + "root//:test", + ) + + out = json.loads(out.stdout) + call_stack = out[0]["buck.target_call_stack"] + assert "stub" in call_stack diff --git a/tests/core/targets_command/test_call_stacks_data/.buckconfig b/tests/core/targets_command/test_call_stacks_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/targets_command/test_call_stacks_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/targets_command/test_call_stacks_data/.buckroot b/tests/core/targets_command/test_call_stacks_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/targets_command/test_call_stacks_data/TARGETS.fixture b/tests/core/targets_command/test_call_stacks_data/TARGETS.fixture new file mode 100644 index 0000000000000..cf2d114457c4c --- /dev/null +++ b/tests/core/targets_command/test_call_stacks_data/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "test") diff --git a/tests/core/targets_command/test_recursive.py b/tests/core/targets_command/test_recursive.py new file mode 100644 index 0000000000000..5270144ea43cd --- /dev/null +++ b/tests/core/targets_command/test_recursive.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_targets_recursive(buck: Buck) -> None: + result = await buck.targets("--json", "ignored/...") + assert json.loads(result.stdout) == [] + + await expect_failure(buck.targets("--json", "nonexistent/...")) diff --git a/tests/core/targets_command/test_recursive_data/.buckconfig b/tests/core/targets_command/test_recursive_data/.buckconfig new file mode 100644 index 0000000000000..b5744687e5abf --- /dev/null +++ b/tests/core/targets_command/test_recursive_data/.buckconfig @@ -0,0 +1,15 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture + +[project] + ignore = ignored diff --git a/tests/core/targets_command/test_recursive_data/.buckroot b/tests/core/targets_command/test_recursive_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/targets_command/test_recursive_data/ignored/TARGETS.fixture b/tests/core/targets_command/test_recursive_data/ignored/TARGETS.fixture new file mode 100644 index 0000000000000..98cab7c38fd2a --- /dev/null +++ b/tests/core/targets_command/test_recursive_data/ignored/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "some_target") diff --git a/tests/core/targets_command/test_skip_targets_with_duplicate_names.py b/tests/core/targets_command/test_skip_targets_with_duplicate_names.py new file mode 100644 index 0000000000000..7dc2101de72f7 --- /dev/null +++ b/tests/core/targets_command/test_skip_targets_with_duplicate_names.py @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_skip_targets_with_duplicate_names_without_flag(buck: Buck) -> None: + await expect_failure( + buck.targets("//..."), + stderr_regex="Attempted to register target prelude//:aa twice", + ) + + +@buck_test() +async def test_skip_targets_with_duplicate_names_with_flag(buck: Buck) -> None: + result = await buck.targets("//...", "--skip-targets-with-duplicate-names") + assert [ + "prelude//:aa", + "prelude//:bb", + ] == result.stdout.splitlines() + assert re.search("Attempted to register target prelude//:aa twice", result.stderr) + assert re.search("Attempted to register target prelude//:bb twice", result.stderr) diff --git a/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/.buckconfig b/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/.buckconfig new file mode 100644 index 0000000000000..7078304680646 --- /dev/null +++ b/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + prelude = . diff --git a/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/.buckroot b/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/TARGETS.fixture b/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/TARGETS.fixture new file mode 100644 index 0000000000000..f1abbf41a5115 --- /dev/null +++ b/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/TARGETS.fixture @@ -0,0 +1,23 @@ +clay( + name = "aa", +) + +clay( + name = "aa", +) + +clay( + name = "bb", +) + +clay( + name = "aa", +) + +clay( + name = "bb", +) + +clay( + name = "aa", +) diff --git a/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/prelude.bzl b/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/prelude.bzl new file mode 100644 index 0000000000000..bbc9f909291fa --- /dev/null +++ b/tests/core/targets_command/test_skip_targets_with_duplicate_names_data/prelude.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +clay = rule(impl = lambda _: fail("not needed in test"), attrs = {}) diff --git a/tests/core/targets_command/test_target_hashing.py b/tests/core/targets_command/test_target_hashing.py new file mode 100644 index 0000000000000..04bcdbb7d85f7 --- /dev/null +++ b/tests/core/targets_command/test_target_hashing.py @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_target_hashing_accepts_backreferencing_relative_paths( + buck: Buck, + tmp_path: Path, +) -> None: + await buck.targets( + ":bin", + "--show-target-hash", + "--target-hash-file-mode=paths_only", + "--target-hash-modified-paths=../.buckconfig", + rel_cwd=Path("bin"), + ) + + # Paths outside of the project still fail + await expect_failure( + buck.targets( + "bin:bin", + "--show-target-hash", + "--target-hash-file-mode=paths_only", + "--target-hash-modified-paths=../.buckconfig", + ), + stderr_regex="relativize path.*against project root", + ) + + if os.name != "nt": + # Absolute path non-normalized paths should work + (tmp_path / "symlink").symlink_to(buck.cwd) + + await buck.targets( + ":bin", + "--show-target-hash", + "--target-hash-file-mode=paths_only", + f"--target-hash-modified-paths={tmp_path}/symlink/.buckconfig", + rel_cwd=Path("bin"), + ) diff --git a/tests/core/targets_command/test_target_hashing_data/.buckconfig b/tests/core/targets_command/test_target_hashing_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/targets_command/test_target_hashing_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/targets_command/test_target_hashing_data/.buckroot b/tests/core/targets_command/test_target_hashing_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/targets_command/test_target_hashing_data/bin/TARGETS.fixture b/tests/core/targets_command/test_target_hashing_data/bin/TARGETS.fixture new file mode 100644 index 0000000000000..e908604cc60d2 --- /dev/null +++ b/tests/core/targets_command/test_target_hashing_data/bin/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "bin") diff --git a/tests/core/targets_command/test_target_metadata.py b/tests/core/targets_command/test_target_metadata.py new file mode 100644 index 0000000000000..44774ae68d1b6 --- /dev/null +++ b/tests/core/targets_command/test_target_metadata.py @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +@buck_test() +async def test_metadata(buck: Buck) -> None: + stdout = ( + await buck.targets( + "//...", "--keep-going", "-a", "^metadata|buck.package|name$" + ) + ).stdout + golden( + output=stdout, + rel_path="test_metadata.golden.json", + ) diff --git a/tests/core/targets_command/test_target_metadata_data/.buckconfig b/tests/core/targets_command/test_target_metadata_data/.buckconfig new file mode 100644 index 0000000000000..7078304680646 --- /dev/null +++ b/tests/core/targets_command/test_target_metadata_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + prelude = . diff --git a/tests/core/targets_command/test_target_metadata_data/.buckroot b/tests/core/targets_command/test_target_metadata_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/targets_command/test_target_metadata_data/invalid_key/TARGETS.fixture b/tests/core/targets_command/test_target_metadata_data/invalid_key/TARGETS.fixture new file mode 100644 index 0000000000000..3807f622634d1 --- /dev/null +++ b/tests/core/targets_command/test_target_metadata_data/invalid_key/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "invalid_key", metadata = {"bad": 123}) diff --git a/tests/core/targets_command/test_target_metadata_data/invalid_type/TARGETS.fixture b/tests/core/targets_command/test_target_metadata_data/invalid_type/TARGETS.fixture new file mode 100644 index 0000000000000..229d9124a48b5 --- /dev/null +++ b/tests/core/targets_command/test_target_metadata_data/invalid_type/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "invalid_key", metadata = 123) diff --git a/tests/core/targets_command/test_target_metadata_data/ok/TARGETS.fixture b/tests/core/targets_command/test_target_metadata_data/ok/TARGETS.fixture new file mode 100644 index 0000000000000..59a09acf82e1a --- /dev/null +++ b/tests/core/targets_command/test_target_metadata_data/ok/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "ok", metadata = {"bar.baz": {"stuff": 456}, "foo.bar": 123}) diff --git a/tests/core/targets_command/test_target_metadata_data/prelude.bzl b/tests/core/targets_command/test_target_metadata_data/prelude.bzl new file mode 100644 index 0000000000000..79305806ea57b --- /dev/null +++ b/tests/core/targets_command/test_target_metadata_data/prelude.bzl @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +stub = rule(impl = lambda _: [DefaultInfo()], attrs = {}) diff --git a/tests/core/targets_command/test_target_metadata_data/select/TARGETS.fixture b/tests/core/targets_command/test_target_metadata_data/select/TARGETS.fixture new file mode 100644 index 0000000000000..c45efed85e7e8 --- /dev/null +++ b/tests/core/targets_command/test_target_metadata_data/select/TARGETS.fixture @@ -0,0 +1 @@ +stub(name = "select", metadata = select({"DEFAULT": {}})) diff --git a/tests/core/targets_command/test_target_metadata_data/test_metadata.golden.json b/tests/core/targets_command/test_target_metadata_data/test_metadata.golden.json new file mode 100644 index 0000000000000..64a7c6b8e413d --- /dev/null +++ b/tests/core/targets_command/test_target_metadata_data/test_metadata.golden.json @@ -0,0 +1,21 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +[ + { + "buck.package":"prelude//invalid_key", + "buck.error":"Error evaluating build file: `prelude//invalid_key:TARGETS.fixture`\n\nCaused by:\n 0: Traceback (most recent call last):\n * invalid_key/TARGETS.fixture:1, in \n stub(name = \"invalid_key\", metadata = {\"bad\": 123})\n error: Error coercing attribute `metadata` of `prelude//invalid_key:invalid_key`\n --> invalid_key/TARGETS.fixture:1:1\n |\n 1 | stub(name = \"invalid_key\", metadata = {\"bad\": 123})\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n \n 1: Error coercing attribute `metadata` of type `attrs.metadata(default={})`\n 2: Error coercing {\"bad\": 123}\n 3: key must contain exactly one dot: `bad`" + }, + { + "buck.package":"prelude//invalid_type", + "buck.error":"Error evaluating build file: `prelude//invalid_type:TARGETS.fixture`\n\nCaused by:\n 0: Traceback (most recent call last):\n * invalid_type/TARGETS.fixture:1, in \n stub(name = \"invalid_key\", metadata = 123)\n error: Error coercing attribute `metadata` of `prelude//invalid_type:invalid_key`\n --> invalid_type/TARGETS.fixture:1:1\n |\n 1 | stub(name = \"invalid_key\", metadata = 123)\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n \n 1: Error coercing attribute `metadata` of type `attrs.metadata(default={})`\n 2: Error coercing 123\n 3: Expected value of type `dict`, got value with type `int` (value was `123`)" + }, + { + "buck.package":"prelude//ok", + "name":"ok", + "metadata":{"bar.baz":{"stuff":456},"foo.bar":123} + }, + { + "buck.package":"prelude//select", + "buck.error":"Error evaluating build file: `prelude//select:TARGETS.fixture`\n\nCaused by:\n 0: Traceback (most recent call last):\n * select/TARGETS.fixture:1, in \n stub(name = \"select\", metadata = select({\"DEFAULT\": {}}))\n error: Error coercing attribute `metadata` of `prelude//select:select`\n --> select/TARGETS.fixture:1:1\n |\n 1 | stub(name = \"select\", metadata = select({\"DEFAULT\": {}}))\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n |\n \n 1: Error coercing attribute `metadata` of type `attrs.metadata(default={})`\n 2: select() cannot be used in non-configurable attribute" + } +] diff --git a/tests/core/targets_command/test_targets_imports.py b/tests/core/targets_command/test_targets_imports.py new file mode 100644 index 0000000000000..e85a32e326ba0 --- /dev/null +++ b/tests/core/targets_command/test_targets_imports.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_imports(buck: Buck) -> None: + result = await buck.targets("//...", "--json", "--streaming", "--imports") + xs = json.loads(result.stdout) + found = 0 + for x in xs: + if "buck.imports" in x: + if x["buck.file"] == "root//TARGETS.fixture": + assert x["buck.package"] == "root//" + assert x["buck.imports"] == ["prelude//prelude.bzl", "root//a.bzl"] + found += 1 + elif x["buck.file"] == "root//a.bzl": + assert x["buck.imports"] == [ + "prelude//prelude.bzl", + "root//b.bzl", + ] + assert "buck.package" not in x + found += 1 + elif x["buck.file"] == "root//PACKAGE": + assert x["buck.imports"] == [ + "prelude//prelude.bzl", + "root//b.bzl", + ] + assert "buck.package" not in x + found += 1 + assert found == 3 diff --git a/tests/core/targets_command/test_targets_imports_data/.buckconfig b/tests/core/targets_command/test_targets_imports_data/.buckconfig new file mode 100644 index 0000000000000..cb37c990c90cb --- /dev/null +++ b/tests/core/targets_command/test_targets_imports_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/targets_command/test_targets_imports_data/.buckroot b/tests/core/targets_command/test_targets_imports_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/targets_command/test_targets_imports_data/TARGETS.fixture b/tests/core/targets_command/test_targets_imports_data/TARGETS.fixture new file mode 100644 index 0000000000000..2165e33f23031 --- /dev/null +++ b/tests/core/targets_command/test_targets_imports_data/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":a.bzl", "test") + +test() diff --git a/tests/core/targets_command/test_targets_imports_data/a.bzl b/tests/core/targets_command/test_targets_imports_data/a.bzl new file mode 100644 index 0000000000000..e55c9cf227aea --- /dev/null +++ b/tests/core/targets_command/test_targets_imports_data/a.bzl @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":b.bzl", _test = "test") + +test = _test diff --git a/tests/core/targets_command/test_targets_imports_data/b.bzl b/tests/core/targets_command/test_targets_imports_data/b.bzl new file mode 100644 index 0000000000000..f991d67e38366 --- /dev/null +++ b/tests/core/targets_command/test_targets_imports_data/b.bzl @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def test(): + pass diff --git a/tests/core/targets_command/test_targets_imports_data/prelude/prelude.bzl b/tests/core/targets_command/test_targets_imports_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..a869e838b4c7c --- /dev/null +++ b/tests/core/targets_command/test_targets_imports_data/prelude/prelude.bzl @@ -0,0 +1,6 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. diff --git a/tests/core/targets_command/test_targets_keep_going.py b/tests/core/targets_command/test_targets_keep_going.py new file mode 100644 index 0000000000000..18d2720ec9f38 --- /dev/null +++ b/tests/core/targets_command/test_targets_keep_going.py @@ -0,0 +1,69 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_keep_going_json(buck: Buck) -> None: + result = await buck.targets("//...", "--json", "--keep-going") + xs = json.loads(result.stdout) + # I expect six records, one which is an error + assert len(xs) == 6 + for x in xs: + if x["buck.package"] == "root//a": + assert x["name"].startswith("target") + else: + assert x["buck.package"] == "root//b" + assert "test_error" in x["buck.error"] + + +@buck_test() +async def test_keep_going(buck: Buck) -> None: + result = await buck.targets("//...", "--keep-going") + assert "test_error" in result.stderr + + +@buck_test() +async def test_keep_going_streaming(buck: Buck) -> None: + result = await buck.targets("//...", "--streaming", "--keep-going") + assert "test_error" in result.stderr + + +@buck_test() +async def test_streaming_keep_going_missing_targets(buck: Buck) -> None: + targets = [ + "//a:target1", + "//a:target2", + "//a:bogus_target", + "//a:worse_target", + "//a:target5", + "//c:bogus_package", + ] + result = await buck.targets(*targets, "--json", "--streaming", "--keep-going") + xs = json.loads(result.stdout) + assert len(xs) == 5 # 3 success, 2 errors + bad_packages = [] + good_targets = [] + for x in xs: + if "buck.error" in x: + bad_packages.append(x["buck.package"]) + if x["buck.package"] == "root//a": + assert "`bogus_target`" in x["buck.error"] + assert "`worse_target`" in x["buck.error"] + else: + good_targets.append(x["name"]) + bad_packages.sort() + good_targets.sort() + assert bad_packages == ["root//a", "root//c"] + assert good_targets == ["target1", "target2", "target5"] diff --git a/tests/core/targets_command/test_targets_keep_going_data/.buckconfig b/tests/core/targets_command/test_targets_keep_going_data/.buckconfig new file mode 100644 index 0000000000000..cb37c990c90cb --- /dev/null +++ b/tests/core/targets_command/test_targets_keep_going_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/targets_command/test_targets_keep_going_data/.buckroot b/tests/core/targets_command/test_targets_keep_going_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/targets_command/test_targets_keep_going_data/a/TARGETS.fixture b/tests/core/targets_command/test_targets_keep_going_data/a/TARGETS.fixture new file mode 100644 index 0000000000000..c263919c120bd --- /dev/null +++ b/tests/core/targets_command/test_targets_keep_going_data/a/TARGETS.fixture @@ -0,0 +1,5 @@ +a_target(name = "target1") +a_target(name = "target2") +a_target(name = "target3") +a_target(name = "target4") +a_target(name = "target5") diff --git a/tests/core/targets_command/test_targets_keep_going_data/b/TARGETS.fixture b/tests/core/targets_command/test_targets_keep_going_data/b/TARGETS.fixture new file mode 100644 index 0000000000000..228f138bac069 --- /dev/null +++ b/tests/core/targets_command/test_targets_keep_going_data/b/TARGETS.fixture @@ -0,0 +1 @@ +fail("test_error") diff --git a/tests/core/targets_command/test_targets_keep_going_data/prelude/prelude.bzl b/tests/core/targets_command/test_targets_keep_going_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..33f3aea43d475 --- /dev/null +++ b/tests/core/targets_command/test_targets_keep_going_data/prelude/prelude.bzl @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +a_target = rule(attrs = {}, impl = _impl) diff --git a/tests/core/test/BUCK b/tests/core/test/BUCK new file mode 100644 index 0000000000000..788c6a12f4924 --- /dev/null +++ b/tests/core/test/BUCK @@ -0,0 +1,66 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_internal_runner", + srcs = ["test_internal_runner.py"], + data_dir = "test_internal_runner_data", + serialize_test_cases = False, + use_compiled_buck2_client_and_tpx = True, +) + +buck2_e2e_test( + name = "test_platform_resolution", + srcs = ["test_platform_resolution.py"], + data_dir = "test_platform_resolution_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_selection", + srcs = ["test_selection.py"], + data_dir = "test_selection_data", + serialize_test_cases = False, + use_compiled_buck2_client_and_tpx = True, +) + +buck2_e2e_test( + name = "test_listing", + srcs = ["test_listing.py"], + data_dir = "test_listing_data", + serialize_test_cases = False, + use_compiled_buck2_client_and_tpx = True, + deps = ["//buck2/tests/e2e_util:utils"], +) + +buck2_e2e_test( + name = "test_startup", + srcs = ["test_startup.py"], + data_dir = "test_startup_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_skip_incompatible_targets", + srcs = ["test_skip_incompatible_targets.py"], + data_dir = "test_skip_incompatible_targets_data", + serialize_test_cases = False, +) + +buck2_e2e_test( + name = "test_build_report", + srcs = ["test_build_report.py"], + data_dir = "test_build_report_data", + serialize_test_cases = False, + use_compiled_buck2_client_and_tpx = True, + deps = [ + "fbcode//buck2/tests/e2e_util:golden", + ], +) + +buck2_e2e_test( + name = "test_local_resources", + srcs = ["test_local_resources.py"], + data_dir = "test_local_resources_data", +) diff --git a/tests/core/test/test_build_report.py b/tests/core/test/test_build_report.py new file mode 100644 index 0000000000000..c45c4fa86c8ce --- /dev/null +++ b/tests/core/test/test_build_report.py @@ -0,0 +1,100 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.golden import golden + + +def _sanitize(s: str) -> str: + # Simplify analysis error message (Can change due to line number changes) + s = re.sub( + r"Error running analysis for.*\"", 'Error running analysis for "', s + ) + # Simplify the Unknown target error (Can change due to number of targets in TARGETS.fixture) + s = re.sub( + r"Unknown target `.*` from package .*\"", + 'Unknown target `` from package "', + s, + ) + return s + + +def build_report_test(name: str, command: List[str], should_fail: bool) -> None: + async def impl(buck: Buck, tmp_path: Path) -> None: + report = tmp_path / "build-report.json" + if should_fail: + await expect_failure( + buck.test( + "--build-report", + str(report), + "--build-report-options", + "fill-out-failures", + *command + ) + ) + else: + await buck.test("--build-report", str(report), *command) + + with open(report) as file: + report = json.loads(file.read()) + del report["trace_id"] + del report["project_root"] + + # Build report errors can change based on minor test changes such as + # 1. Adding a target in TARGETS.fixture + # 2. Line number changing due to code moving around + # Sanitize so that we only check the important bits of the error message + golden( + output=_sanitize(json.dumps(report, indent=2, sort_keys=True)), + rel_path="fixtures/" + name + ".golden.json", + ) + pass + + globals()[name] = impl + + return buck_test()(impl) + + +build_report_test( + "test_build_report_format", + ["//:ok", "//:fail_test"], + True, +) + +build_report_test( + "test_build_report_skip_unconfigured", + ["//:ok", "-c", "build_report.print_unconfigured_section=false"], + False, +) + +build_report_test( + "test_failed_build_has_build_report", + ["//:fail_build1"], + True, +) + +build_report_test( + "test_target_doesnt_exist", + ["//:doesnt_exist"], + True, +) + +build_report_test( + "test_multiple_failures_included", + ["//:fail_build1", "//:fail_build2"], + True, +) diff --git a/tests/core/test/test_build_report_data/.buckconfig b/tests/core/test/test_build_report_data/.buckconfig new file mode 100644 index 0000000000000..08e0b0a3d2118 --- /dev/null +++ b/tests/core/test/test_build_report_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture diff --git a/tests/core/test/test_build_report_data/.buckroot b/tests/core/test/test_build_report_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_build_report_data/TARGETS.fixture b/tests/core/test/test_build_report_data/TARGETS.fixture new file mode 100644 index 0000000000000..d5f7c3d31d336 --- /dev/null +++ b/tests/core/test/test_build_report_data/TARGETS.fixture @@ -0,0 +1,6 @@ +load(":defs.bzl", "fail_build1", "fail_build2", "fail_test", "ok_test") + +ok_test(name = "ok") +fail_test(name = "fail_test") +fail_build1(name = "fail_build1") +fail_build2(name = "fail_build2") diff --git a/tests/core/test/test_build_report_data/defs.bzl b/tests/core/test/test_build_report_data/defs.bzl new file mode 100644 index 0000000000000..6b49dbfef66dc --- /dev/null +++ b/tests/core/test/test_build_report_data/defs.bzl @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_ok(_ctx): + return [ + DefaultInfo(), + ExternalRunnerTestInfo( + command = ["python3", "-c", "import sys; sys.exit(0)"], + type = "custom", + ), + ] + +ok_test = rule(attrs = {}, impl = _impl_ok) + +def _impl_fail(_ctx): + return [ + DefaultInfo(), + ExternalRunnerTestInfo( + command = ["python3", "-c", "import sys; sys.exit(1)"], + type = "custom", + ), + ] + +fail_test = rule(attrs = {}, impl = _impl_fail) + +def _impl_build_fail1(ctx): + ctx.actions.run(cmd_args("false"), category = "fail1") + return [DefaultInfo(default_outputs = [])] + +fail_build1 = rule(attrs = {}, impl = _impl_build_fail1) + +def _impl_build_fail2(ctx): + output = ctx.actions.declare_file("fail2") + ctx.actions.run(cmd_args("false"), category = "fail2") + return [DefaultInfo(default_outputs = [output])] + +fail_build2 = rule(attrs = {}, impl = _impl_build_fail2) diff --git a/tests/core/test/test_build_report_data/fixtures/test_build_report_format.golden.json b/tests/core/test/test_build_report_data/fixtures/test_build_report_format.golden.json new file mode 100644 index 0000000000000..66adf0e0199cc --- /dev/null +++ b/tests/core/test/test_build_report_data/fixtures/test_build_report_format.golden.json @@ -0,0 +1,42 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": {}, + "results": { + "root//:fail_test": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "SUCCESS" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "SUCCESS" + }, + "root//:ok": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "SUCCESS" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "SUCCESS" + } + }, + "strings": {}, + "success": true, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/test/test_build_report_data/fixtures/test_build_report_skip_unconfigured.golden.json b/tests/core/test/test_build_report_data/fixtures/test_build_report_skip_unconfigured.golden.json new file mode 100644 index 0000000000000..43d0ae31d0213 --- /dev/null +++ b/tests/core/test/test_build_report_data/fixtures/test_build_report_skip_unconfigured.golden.json @@ -0,0 +1,22 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": {}, + "results": { + "root//:ok": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "SUCCESS" + } + }, + "errors": [] + } + }, + "strings": {}, + "success": true, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/test/test_build_report_data/fixtures/test_failed_build_has_build_report.golden.json b/tests/core/test/test_build_report_data/fixtures/test_failed_build_has_build_report.golden.json new file mode 100644 index 0000000000000..6fc1f84824097 --- /dev/null +++ b/tests/core/test/test_build_report_data/fixtures/test_failed_build_has_build_report.golden.json @@ -0,0 +1,36 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//:fail_build1": "Error running analysis for " + }, + "results": { + "root//:fail_build1": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "6400475771335786033" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "6400475771335786033": "Error running analysis for " + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/test/test_build_report_data/fixtures/test_multiple_failures_included.golden.json b/tests/core/test/test_build_report_data/fixtures/test_multiple_failures_included.golden.json new file mode 100644 index 0000000000000..cb32a89487955 --- /dev/null +++ b/tests/core/test/test_build_report_data/fixtures/test_multiple_failures_included.golden.json @@ -0,0 +1,60 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//:fail_build1": "Error running analysis for ", + "root//:fail_build2": "Error running analysis for " + }, + "results": { + "root//:fail_build1": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "6400475771335786033" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + }, + "root//:fail_build2": { + "configured": { + "": { + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 1, + "message_content": "10147276810488095803" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "configured_graph_size": null, + "errors": [], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "10147276810488095803": "Error running analysis for ", + "6400475771335786033": "Error running analysis for " + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/test/test_build_report_data/fixtures/test_target_doesnt_exist.golden.json b/tests/core/test/test_build_report_data/fixtures/test_target_doesnt_exist.golden.json new file mode 100644 index 0000000000000..7bc64bb8c201b --- /dev/null +++ b/tests/core/test/test_build_report_data/fixtures/test_target_doesnt_exist.golden.json @@ -0,0 +1,28 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +{ + "failures": { + "root//:doesnt_exist": "Unknown target `` from package " + }, + "results": { + "root//:doesnt_exist": { + "configured": {}, + "configured_graph_size": null, + "errors": [ + { + "action_error": null, + "cause_index": 0, + "message_content": "17742238780201989333" + } + ], + "other_outputs": {}, + "outputs": {}, + "success": "FAIL" + } + }, + "strings": { + "17742238780201989333": "Unknown target `` from package " + }, + "success": false, + "truncated": false +} \ No newline at end of file diff --git a/tests/core/test/test_internal_runner.py b/tests/core/test/test_internal_runner.py new file mode 100644 index 0000000000000..6fc76b8bc7ef5 --- /dev/null +++ b/tests/core/test/test_internal_runner.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + +# Empty test executor forces internal test executor to be used. +INTERNAL_TEST_EXECUTOR = "" + + +@buck_test() +@env("BUCK2_ALLOW_INTERNAL_TEST_RUNNER_DO_NOT_USE", "1") +async def test_internal_test_executor(buck: Buck) -> None: + await buck.test( + ":trivial_pass", + test_executor=INTERNAL_TEST_EXECUTOR, + ) + + +@buck_test() +@env("TEST_VAR", "BAD_VALUE") +@env("BUCK2_ALLOW_INTERNAL_TEST_RUNNER_DO_NOT_USE", "1") +async def test_internal_test_executor_env(buck: Buck) -> None: + await buck.test( + ":check_env", + "--", + "--env", + "TEST_VAR=TEST_VALUE", + test_executor=INTERNAL_TEST_EXECUTOR, + ) + + +@buck_test() +@env("BUCK2_ALLOW_INTERNAL_TEST_RUNNER_DO_NOT_USE", "1") +async def test_internal_test_executor_timeout(buck: Buck) -> None: + await expect_failure( + buck.test( + ":timeout", + "--", + "--timeout", + "1", + test_executor=INTERNAL_TEST_EXECUTOR, + ), + stderr_regex="Timeout: ", + ) diff --git a/tests/core/test/test_internal_runner_data/.buckconfig b/tests/core/test/test_internal_runner_data/.buckconfig new file mode 100644 index 0000000000000..df06a02c03ca2 --- /dev/null +++ b/tests/core/test/test_internal_runner_data/.buckconfig @@ -0,0 +1,5 @@ +[cells] + root = . + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/test/test_internal_runner_data/.buckroot b/tests/core/test/test_internal_runner_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_internal_runner_data/TARGETS.fixture b/tests/core/test/test_internal_runner_data/TARGETS.fixture new file mode 100644 index 0000000000000..eb8aff30e0f68 --- /dev/null +++ b/tests/core/test/test_internal_runner_data/TARGETS.fixture @@ -0,0 +1,16 @@ +load(":python_test.bzl", "python_test") + +python_test( + name = "trivial_pass", + script = "import sys; sys.exit(0)", +) + +python_test( + name = "check_env", + script = "import os; assert os.environ['TEST_VAR'] == 'TEST_VALUE'", +) + +python_test( + name = "timeout", + script = "import time; time.sleep(60)", +) diff --git a/tests/core/test/test_internal_runner_data/python_test.bzl b/tests/core/test/test_internal_runner_data/python_test.bzl new file mode 100644 index 0000000000000..719fe692b637e --- /dev/null +++ b/tests/core/test/test_internal_runner_data/python_test.bzl @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + return [ + DefaultInfo(), + ExternalRunnerTestInfo( + command = ["python3", "-c", ctx.attrs.script], + type = "custom", + ), + ] + +python_test = rule( + impl = _impl, + attrs = { + "script": attrs.string(), + }, +) diff --git a/tests/core/test/test_listing.py b/tests/core/test/test_listing.py new file mode 100644 index 0000000000000..b930fb15359f1 --- /dev/null +++ b/tests/core/test/test_listing.py @@ -0,0 +1,125 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from typing import Any, Dict, List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env +from buck2.tests.e2e_util.helper.utils import filter_events, random_string + + +@buck_test() +async def test_discovery_cached_on_dice(buck: Buck) -> None: + args = [ + "-c", + "buck2.cache_test_listings=//:ok", + "//:ok", + ] + await run_test_and_check_discovery_presence(buck, False, args) + await run_test_and_check_discovery_presence(buck, True, args) + + +@buck_test() +async def test_discovery_not_cached_for_not_matching_pattern(buck: Buck) -> None: + args = [ + "-c", + "buck2.cache_test_listings=//:not_ok", + "//:ok", + ] + await run_test_and_check_discovery_presence(buck, False, args) + await run_test_and_check_discovery_presence(buck, False, args) + + +@buck_test() +async def test_discovery_cache_turned_off(buck: Buck) -> None: + args = [ + "//:ok", + ] + await run_test_and_check_discovery_presence(buck, False, args) + await run_test_and_check_discovery_presence(buck, False, args) + + +@buck_test() +async def test_discovery_cached_on_re(buck: Buck) -> None: + seed = random_string() + args = [ + "-c", + "buck2.cache_test_listings=//:ok", + "-c", + f"test.seed={seed}", + "-c", + "test.local_enabled=false", + "-c", + "test.remote_enabled=true", + "-c", + "test.remote_cache_enabled=true", + "//:test", + ] + await run_test_and_check_discovery_presence(buck, False, args) + await buck.kill() + await run_test_and_check_discovery_presence(buck, True, args) + await buck.kill() + args = [ + "-c", + "buck2.cache_test_listings=//:ok", + "-c", + f"test.seed={seed}", + "-c", + "test.remote_enabled=false", + "-c", + "test.local_enabled=true", + "-c", + "test.remote_cache_enabled=true", + "//:test", + ] + await run_test_and_check_discovery_presence(buck, True, args) + + +@buck_test() +@env("BUCK2_TEST_SKIP_ACTION_CACHE_WRITE", "true") +async def test_local_discovery_uploaded_to_cache(buck: Buck) -> None: + seed = random_string() + args = [ + "-c", + "buck2.cache_test_listings=//:ok", + "-c", + f"test.seed={seed}", + "-c", + "test.allow_cache_uploads=true", + "-c", + "test.remote_cache_enabled=true", + "//:ok", + ] + await run_test_and_check_discovery_presence(buck, False, args) + await _check_cache_uploaded(buck) + + +async def _check_cache_uploaded(buck: Buck) -> None: + result = await _cache_uploads(buck) + assert len(result) == 1 + assert result[0]["success"] + + +async def _cache_uploads(buck: Buck) -> List[Dict[str, Any]]: + return await filter_events(buck, "Event", "data", "SpanEnd", "data", "CacheUpload") + + +async def run_test_and_check_discovery_presence( + buck: Buck, + is_absent: bool, + args: List[str], +) -> None: + await buck.test(*args) + stdout = (await buck.log("what-ran")).stdout + + assert "test.run" in stdout + if is_absent: + assert "test.discovery" not in stdout + else: + assert "test.discovery" in stdout diff --git a/tests/core/test/test_listing_data/.buckconfig b/tests/core/test/test_listing_data/.buckconfig new file mode 100644 index 0000000000000..9853433847fd5 --- /dev/null +++ b/tests/core/test/test_listing_data/.buckconfig @@ -0,0 +1,18 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . +nano_prelude = nano_prelude + +[cell_aliases] +prelude = nano_prelude + +[external_cells] +nano_prelude = bundled + +[buck2] +file_watcher = fs_hash_crawler + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/core/test/test_listing_data/.buckroot b/tests/core/test/test_listing_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_listing_data/TARGETS.fixture b/tests/core/test/test_listing_data/TARGETS.fixture new file mode 100644 index 0000000000000..a745c5fc3470e --- /dev/null +++ b/tests/core/test/test_listing_data/TARGETS.fixture @@ -0,0 +1,7 @@ +load(":rules.bzl", "ok_test", "seed") + +seed_val = read_config("test", "seed", "42") + +ok_test(name = "ok", seed = seed_val) + +seed(name = "test", seed = seed_val) diff --git a/tests/core/test/test_listing_data/platforms/TARGETS.fixture b/tests/core/test/test_listing_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..87c98212ee498 --- /dev/null +++ b/tests/core/test/test_listing_data/platforms/TARGETS.fixture @@ -0,0 +1,14 @@ +load(":defs.bzl", "execution_platforms") + +local_enabled = read_config("test", "local_enabled", "true") +remote_enabled = read_config("test", "remote_enabled", "false") +remote_cache_enabled = read_config("test", "remote_cache_enabled", "false") +allow_cache_uploads = read_config("test", "allow_cache_uploads", "false") + +execution_platforms( + name = "platforms", + local_enabled = local_enabled == "true", + remote_enabled = remote_enabled == "true", + remote_cache_enabled = remote_cache_enabled == "true", + allow_cache_uploads = allow_cache_uploads == "true", +) diff --git a/tests/core/test/test_listing_data/platforms/defs.bzl b/tests/core/test/test_listing_data/platforms/defs.bzl new file mode 100644 index 0000000000000..9566239be8584 --- /dev/null +++ b/tests/core/test/test_listing_data/platforms/defs.bzl @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platforms(ctx): + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = configuration, + executor_config = CommandExecutorConfig( + local_enabled = ctx.attrs.local_enabled, + remote_enabled = ctx.attrs.remote_enabled, + remote_cache_enabled = ctx.attrs.remote_cache_enabled, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + allow_cache_uploads = ctx.attrs.allow_cache_uploads, + max_cache_upload_mebibytes = 1, + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = { + "allow_cache_uploads": attrs.bool(), + "local_enabled": attrs.bool(), + "remote_cache_enabled": attrs.option(attrs.bool(), default = None), + "remote_enabled": attrs.bool(), +}, impl = _execution_platforms) diff --git a/tests/core/test/test_listing_data/rules.bzl b/tests/core/test/test_listing_data/rules.bzl new file mode 100644 index 0000000000000..80dfafc9162e5 --- /dev/null +++ b/tests/core/test/test_listing_data/rules.bzl @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +script = """ +import sys; +if '--list' in sys.argv: + print('test1\\n') +sys.exit(0) +""" + +def _impl_ok(ctx): + return [ + DefaultInfo(), + ExternalRunnerTestInfo( + command = ["python3", "-c", script], + type = "lionhead", + env = {"seed": ctx.attrs.seed}, + ), + ] + +def _seed_impl(ctx): + out = ctx.actions.declare_output("file") + ctx.actions.run( + ["touch", out.as_output()], + category = "touch", + env = {"seed": ctx.attrs.seed}, + ) + return [ + DefaultInfo(out), + ExternalRunnerTestInfo( + command = ["python3", "-c", script], + use_project_relative_paths = True, + type = "lionhead", + env = {"seed": ctx.attrs.seed}, + ), + ] + +seed = rule(attrs = {"seed": attrs.string()}, impl = _seed_impl) +ok_test = rule(attrs = {"seed": attrs.string()}, impl = _impl_ok) diff --git a/tests/core/test/test_local_resources.py b/tests/core/test/test_local_resources.py new file mode 100644 index 0000000000000..70d423057d9e4 --- /dev/null +++ b/tests/core/test/test_local_resources.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +@env("BUCK2_ALLOW_INTERNAL_TEST_RUNNER_DO_NOT_USE", "1") +async def test_platform_resolution(buck: Buck) -> None: + await buck.test( + ":my_test", + test_executor="", + ) + res = await buck.log("what-ran") + assert "MY_RESOURCE_ID=42" in res.stdout diff --git a/tests/core/test/test_local_resources_data/.buckconfig b/tests/core/test/test_local_resources_data/.buckconfig new file mode 100644 index 0000000000000..ea4c43838d3d0 --- /dev/null +++ b/tests/core/test/test_local_resources_data/.buckconfig @@ -0,0 +1,8 @@ +[buildfile] +name=TARGETS.fixture + +[repositories] +root = . + +[buck2] +file_watcher = fs_hash_crawler diff --git a/tests/core/test/test_local_resources_data/.buckroot b/tests/core/test/test_local_resources_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_local_resources_data/TARGETS.fixture b/tests/core/test/test_local_resources_data/TARGETS.fixture new file mode 100644 index 0000000000000..fc29901678966 --- /dev/null +++ b/tests/core/test/test_local_resources_data/TARGETS.fixture @@ -0,0 +1,10 @@ +load(":rules.bzl", "broker", "test") + +broker( + name = "my_broker", +) + +test( + name = "my_test", + broker = ":my_broker", +) diff --git a/tests/core/test/test_local_resources_data/rules.bzl b/tests/core/test/test_local_resources_data/rules.bzl new file mode 100644 index 0000000000000..7d4aefe598ab4 --- /dev/null +++ b/tests/core/test/test_local_resources_data/rules.bzl @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _broker_impl(ctx): + json = ctx.actions.write_json("resources.json", { + "resources": [{"my_alias": "42"}], + }) + return [ + DefaultInfo(), + LocalResourceInfo( + setup = cmd_args(["cat", json]), + resource_env_vars = { + "MY_RESOURCE_ID": "my_alias", + }, + setup_timeout_seconds = 5, + ), + ] + +_broker_attrs = {} + +broker = rule(impl = _broker_impl, attrs = _broker_attrs) + +def _test_impl(ctx): + return [DefaultInfo(), ExternalRunnerTestInfo( + type = "custom", + command = ["true"], + local_resources = { + "my_resource_type": ctx.attrs.broker.label, + }, + required_local_resources = [ + RequiredTestLocalResource("my_resource_type"), + ], + )] + +_test_attrs = { + "broker": attrs.dep(providers = [LocalResourceInfo]), +} + +test = rule(impl = _test_impl, attrs = _test_attrs) diff --git a/tests/core/test/test_platform_resolution.py b/tests/core/test/test_platform_resolution.py new file mode 100644 index 0000000000000..454b57a6f8306 --- /dev/null +++ b/tests/core/test/test_platform_resolution.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +@env("BUCK2_ALLOW_INTERNAL_TEST_RUNNER_DO_NOT_USE", "1") +async def test_platform_resolution(buck: Buck) -> None: + # Setup is such that test target is incompatible with testee's default + # target platform. + await buck.test( + ":my_rule", + test_executor="", + ) diff --git a/tests/core/test/test_platform_resolution_data/.buckconfig b/tests/core/test/test_platform_resolution_data/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/test/test_platform_resolution_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/test/test_platform_resolution_data/.buckroot b/tests/core/test/test_platform_resolution_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_platform_resolution_data/TARGETS.fixture b/tests/core/test/test_platform_resolution_data/TARGETS.fixture new file mode 100644 index 0000000000000..e08ef9f7e00a0 --- /dev/null +++ b/tests/core/test/test_platform_resolution_data/TARGETS.fixture @@ -0,0 +1,54 @@ +load(":rules.bzl", "config_setting", "configuration", "lib_rule", "platform", "test_rule") + +config_setting( + name = "_", +) + +configuration( + name = "none", + config_setting = ":_", +) + +config_setting( + name = "setting", +) + +configuration( + name = "config_lib", + config_setting = ":setting", +) + +configuration( + name = "config_test", + config_setting = ":setting", +) + +platform( + name = "platform_lib", + configuration = ":config_lib", +) + +platform( + name = "platform_test", + configuration = ":config_test", +) + +lib_rule( + name = "my_rule", + tests = select({ + ":config_lib": [":my_test"], + "DEFAULT": [":incompatible_test"], + }), + default_target_platform = ":platform_lib", +) + +test_rule( + name = "my_test", + compatible_with = [":config_test"], + default_target_platform = ":platform_test", +) + +test_rule( + name = "incompatible_test", + compatible_with = [":none"], +) diff --git a/tests/core/test/test_platform_resolution_data/prelude/prelude.bzl b/tests/core/test/test_platform_resolution_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_platform_resolution_data/rules.bzl b/tests/core/test/test_platform_resolution_data/rules.bzl new file mode 100644 index 0000000000000..691864fe2d6d9 --- /dev/null +++ b/tests/core/test/test_platform_resolution_data/rules.bzl @@ -0,0 +1,65 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +lib_rule = rule( + impl = _impl, + attrs = { + }, +) + +test_rule = rule( + impl = _impl, + attrs = { + }, +) + +def _config_setting_impl(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +config_setting = rule( + impl = _config_setting_impl, + attrs = { + }, +) + +def _configuration_impl(ctx): + config_setting = ctx.attrs.config_setting + value = ConstraintValueInfo( + setting = config_setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + + return [ + DefaultInfo(), + ConfigurationInfo(constraints = {config_setting.label.raw_target(): value}, values = {}), + ] + +configuration = rule( + impl = _configuration_impl, + attrs = { + "config_setting": attrs.configuration_label(), + }, +) + +def _platform_impl(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ctx.attrs.configuration[ConfigurationInfo], + ), + ] + +platform = rule( + impl = _platform_impl, + attrs = { + "configuration": attrs.configuration_label(), + }, +) diff --git a/tests/core/test/test_selection.py b/tests/core/test/test_selection.py new file mode 100644 index 0000000000000..55428b3031736 --- /dev/null +++ b/tests/core/test/test_selection.py @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test() +async def test_ok(buck: Buck) -> None: + await buck.test("//:ok") + + +@buck_test() +async def test_fail(buck: Buck) -> None: + await expect_failure(buck.test("//:fail"), stderr_regex="Fail: root//:fail - main") + + +@buck_test() +async def test_tests_attribute(buck: Buck) -> None: + await expect_failure( + buck.test("//:noop_references_fail"), stderr_regex="Fail: root//:fail - main" + ) + + +@buck_test() +async def test_tests_attribute_transitive(buck: Buck) -> None: + await expect_failure( + buck.test( + "//:noop_transitively_references_fail", + ), + stderr_regex="Fail: root//:fail - main", + ) + + +@buck_test() +async def test_tests_attribute_cycle(buck: Buck) -> None: + buck.test( + "//:noop_cycle1", + ) + + +@buck_test() +async def test_tests_attribute_self_transition(buck: Buck) -> None: + await expect_failure( + buck.test("//:noop_self_transition_references_fail"), + stderr_regex="Fail: root//:fail - main", + ) diff --git a/tests/core/test/test_selection_data/.buckconfig b/tests/core/test/test_selection_data/.buckconfig new file mode 100644 index 0000000000000..425a56f43b9c4 --- /dev/null +++ b/tests/core/test/test_selection_data/.buckconfig @@ -0,0 +1,6 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/test/test_selection_data/.buckroot b/tests/core/test/test_selection_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_selection_data/TARGETS.fixture b/tests/core/test/test_selection_data/TARGETS.fixture new file mode 100644 index 0000000000000..b0e74f132aef1 --- /dev/null +++ b/tests/core/test/test_selection_data/TARGETS.fixture @@ -0,0 +1,16 @@ +load(":defs.bzl", "fail_test", "noop", "noop_self_transition", "ok_test", "platform") + +platform(name = "platform") + +ok_test(name = "ok") +fail_test(name = "fail") + +noop(name = "noop_references_fail", tests = [":fail"]) +noop(name = "noop_transitively_references_fail", tests = [":noop_references_fail"]) +noop_self_transition( + name = "noop_self_transition_references_fail", + tests = [":fail"], + default_target_platform = ":platform", +) +noop(name = "noop_cycle1", tests = [":noop_cycle2"]) +noop(name = "noop_cycle2", tests = [":noop_cycle1"]) diff --git a/tests/core/test/test_selection_data/defs.bzl b/tests/core/test/test_selection_data/defs.bzl new file mode 100644 index 0000000000000..74042e00f60a7 --- /dev/null +++ b/tests/core/test/test_selection_data/defs.bzl @@ -0,0 +1,64 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_ok(_ctx): + return [ + DefaultInfo(), + ExternalRunnerTestInfo( + command = ["python3", "-c", "import sys; sys.exit(0)"], + type = "custom", + ), + ] + +ok_test = rule(attrs = {}, impl = _impl_ok) + +def _impl_fail(_ctx): + return [ + DefaultInfo(), + ExternalRunnerTestInfo( + command = ["python3", "-c", "import sys; sys.exit(1)"], + type = "custom", + ), + ] + +fail_test = rule(attrs = {}, impl = _impl_fail) + +def _impl_noop(_ctx): + return [ + DefaultInfo(), + ] + +noop = rule(attrs = {}, impl = _impl_noop) + +def _impl_dummy_transition(platform, refs): + _ignore = (platform, refs) # buildifier: disable=unused-variable + return PlatformInfo( + label = "", + configuration = ConfigurationInfo(constraints = {}, values = {}), + ) + +dummy_transition = transition( + impl = _impl_dummy_transition, + refs = {}, +) + +noop_self_transition = rule( + impl = _impl_noop, + attrs = {}, + cfg = dummy_transition, +) + +def _impl_platform(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +platform = rule(impl = _impl_platform, attrs = {}) diff --git a/tests/core/test/test_selection_data/prelude.bzl b/tests/core/test/test_selection_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_skip_incompatible_targets.py b/tests/core/test/test_skip_incompatible_targets.py new file mode 100644 index 0000000000000..63c790c35a831 --- /dev/null +++ b/tests/core/test/test_skip_incompatible_targets.py @@ -0,0 +1,45 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +@env( + "BUCK2_ALLOW_INTERNAL_TEST_RUNNER_DO_NOT_USE", "1" +) # needed to avoid failure on missing buck2-tpx in buck-out +async def test_test_skip_incompatible_targets(buck: Buck) -> None: + targetA = "root//:compatible-with-A" + targetB = "root//:compatible-with-B" + platformA = "root//:platA" + + await expect_failure( + buck.test( + targetA, + targetB, + f"--target-platforms={platformA}", + test_executor="", + ), + stderr_regex=f"{targetB} is incompatible with {platformA}#.*$", + ) + + result = await buck.test( + targetA, + targetB, + f"--target-platforms={platformA}", + "--skip-incompatible-targets", + test_executor="", + ) + assert f"Skipping target incompatible node `{targetB}" in result.stderr + + result.check_returncode() diff --git a/tests/core/test/test_skip_incompatible_targets_data/.buckconfig b/tests/core/test/test_skip_incompatible_targets_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/core/test/test_skip_incompatible_targets_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/test/test_skip_incompatible_targets_data/.buckroot b/tests/core/test/test_skip_incompatible_targets_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_skip_incompatible_targets_data/TARGETS.fixture b/tests/core/test/test_skip_incompatible_targets_data/TARGETS.fixture new file mode 100644 index 0000000000000..148264fc67816 --- /dev/null +++ b/tests/core/test/test_skip_incompatible_targets_data/TARGETS.fixture @@ -0,0 +1,26 @@ +load(":rules.bzl", "test_rule") + +constraint_setting(name = "setting") + +constraint_value(name = "A", constraint_setting = ":setting") +constraint_value(name = "B", constraint_setting = ":setting") + +platform( + name = "platA", + constraint_values = [":A"], +) + +platform( + name = "platB", + constraint_values = [":B"], +) + +test_rule( + name = "compatible-with-A", + target_compatible_with = [":A"], +) + +test_rule( + name = "compatible-with-B", + target_compatible_with = [":B"], +) diff --git a/tests/core/test/test_skip_incompatible_targets_data/rules.bzl b/tests/core/test/test_skip_incompatible_targets_data/rules.bzl new file mode 100644 index 0000000000000..e59105f77777d --- /dev/null +++ b/tests/core/test/test_skip_incompatible_targets_data/rules.bzl @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(_ctx): + return [DefaultInfo()] + +test_rule = rule( + impl = _impl, + attrs = { + }, +) diff --git a/tests/core/test/test_startup.py b/tests/core/test/test_startup.py new file mode 100644 index 0000000000000..ab85742a63045 --- /dev/null +++ b/tests/core/test/test_startup.py @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +@env("BUCK2_TEST_TPX_USE_TCP", "true") +async def test_tcp_startup_fail(buck: Buck) -> None: + # Python is a binary that will just fail when we give it our executor args + # but works on any platform. It's a bit dumb but it'll do + await expect_failure( + buck.test("...", test_executor="python3"), + stderr_regex="Executor exited before connecting", + ) diff --git a/tests/core/test/test_startup_data/.buckconfig b/tests/core/test/test_startup_data/.buckconfig new file mode 100644 index 0000000000000..425a56f43b9c4 --- /dev/null +++ b/tests/core/test/test_startup_data/.buckconfig @@ -0,0 +1,6 @@ +[repositories] + root = . +[repository_aliases] + prelude = root +[buildfile] + name = TARGETS.fixture diff --git a/tests/core/test/test_startup_data/.buckroot b/tests/core/test/test_startup_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_startup_data/TARGETS.fixture b/tests/core/test/test_startup_data/TARGETS.fixture new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/test/test_startup_data/prelude.bzl b/tests/core/test/test_startup_data/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/trace_io/BUCK b/tests/core/trace_io/BUCK new file mode 100644 index 0000000000000..54ff5b7766050 --- /dev/null +++ b/tests/core/trace_io/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_trace_io", + srcs = ["test_trace_io.py"], + data_dir = "test_trace_io_data", +) diff --git a/tests/core/trace_io/test_trace_io.py b/tests/core/trace_io/test_trace_io.py new file mode 100644 index 0000000000000..38e376e37b1a0 --- /dev/null +++ b/tests/core/trace_io/test_trace_io.py @@ -0,0 +1,365 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from __future__ import annotations + +import json +import os +import re +import subprocess +from pathlib import Path +from tempfile import NamedTemporaryFile, TemporaryDirectory + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +def assert_path_in_manifest(path: str, manifest_paths: list[str]) -> None: + assert path in manifest_paths, f"expected manifest to contain {path}" + + +def assert_link_in( + needle: dict[str, str | None], haystack: list[dict[str, str | None]] +) -> None: + assert ( + needle in haystack + ), f'expected haystack to contain link: {needle["link"]} --> {needle["target"]}' + + +def assert_path_exists(path: str) -> None: + assert os.path.exists(path), f"expected {path} to exist" + + +def assert_buck_out_paths_materialized(buck_cwd: str, paths: list[str]) -> None: + for path in paths: + if re.match(r"buck-out\/.+\/{gen,offline-cache}/.+\/.+\/.+", path) is not None: + assert_path_exists(os.path.join(buck_cwd, path)) + + +def hg_init(cwd: Path) -> None: + subprocess.run(["hg", "init"], check=True, cwd=cwd) + subprocess.run( + ["hg", "config", "remotefilelog.reponame", "--local", "no-repo"], + check=True, + cwd=cwd, + ) + + +def _setup_buckconfig_digest_algorithms(buck: Buck) -> None: + # The digests in `//cas_artifact:` require the buckconfig. + with open(buck.cwd / ".buckconfig", "a") as buckconfig: + buckconfig.write("[buck2]\n") + buckconfig.write("digest_algorithms = BLAKE3-KEYED,SHA1\n") + + +# Tracing I/O not implemented for Windows. +@buck_test(skip_for_os=["windows"]) +async def test_simple_binary_build(buck: Buck) -> None: + # Since this is an inplace test, we need to fake an hg repo so that export-manifest + # can extract the repo revision. + hg_init(cwd=buck.cwd) + + await buck.debug("trace-io", "enable") + await buck.build("root//hello_world:welcome") + out = await buck.debug("trace-io", "export-manifest") + manifest = json.loads(out.stdout) + + assert ( + manifest["repository"]["revision"] == "0000000000000000000000000000000000000000" + ), "expected manifest to be at null revision" + assert ( + manifest["repository"]["name"] == "no-repo" + ), "expected repo name to be no-repo" + + assert_path_in_manifest("hello_world/main.cpp", manifest["paths"]) + + +@buck_test(skip_for_os=["windows"]) +async def test_external_buckconfig_path_included_in_manifest(buck: Buck) -> None: + hg_init(cwd=buck.cwd) + + with NamedTemporaryFile("w") as tmp: + tmpname = tmp.name + tmp.writelines( + [ + "[buck2]", + " foo = bar", + ] + ) + + await buck.debug("trace-io", "enable") + await buck.build("root//hello_world:welcome", "--config-file", tmpname) + out = await buck.debug("trace-io", "export-manifest") + + manifest = json.loads(out.stdout) + + assert_path_in_manifest(str(Path(tmpname).resolve()), manifest["external_paths"]) + + +# More complicated example with binary depending on multiple libraries. +@buck_test(skip_for_os=["windows"]) +async def test_binary_with_deps(buck: Buck) -> None: + hg_init(cwd=buck.cwd) + + await buck.debug("trace-io", "enable") + await buck.build("root//linking:root") + out = await buck.debug("trace-io", "export-manifest") + manifest = json.loads(out.stdout) + + assert ( + manifest["repository"]["revision"] == "0000000000000000000000000000000000000000" + ), "expected manifest to be at null revision" + assert ( + manifest["repository"]["name"] == "no-repo" + ), "expected repo name to be no-repo" + + assert_path_in_manifest("linking/main.cpp", manifest["paths"]) + assert_path_in_manifest("linking/static.cpp", manifest["paths"]) + assert_path_in_manifest("linking/static.h", manifest["paths"]) + assert_path_in_manifest("linking/shared.h", manifest["paths"]) + + +# Multiple builds should be logical union of all input files of all builds. +@buck_test(skip_for_os=["windows"]) +async def test_multiple_builds(buck: Buck) -> None: + hg_init(cwd=buck.cwd) + + await buck.debug("trace-io", "enable") + await buck.build("root//linking:root") + await buck.build("root//hello_world:welcome") + out = await buck.debug("trace-io", "export-manifest") + manifest = json.loads(out.stdout) + + # From first build + assert_path_in_manifest("linking/shared.h", manifest["paths"]) + # From second build + assert_path_in_manifest("hello_world/main.cpp", manifest["paths"]) + + +# Symlinks should show up in the *_symlinks attributes of the manifest. +@buck_test(setup_eden=True, skip_for_os=["windows"]) +async def test_symlinks(buck: Buck) -> None: + def symlink(link: str, target: str) -> None: + """ + Symlinks link --> target. Assumes we're based in the buck cwd, so link must be relative. + """ + os.symlink(target, os.path.join(buck.cwd, link)) + + # Set up symlinks during the test; buck will read everything behind symlinks while + # setting up for the test otherwise. + # Symlinks for root//symlinks:relative_link + symlink("symlinks/main.cpp", "../hello_world/main.cpp") + + # Symlinks for root//symlinks:external_link + with TemporaryDirectory() as tempdir: + t = Path(tempdir) + absolute_target = t / "include" / "clang" / "Basic" / "Visibility.h" + absolute_target.parent.mkdir(parents=True) + absolute_target.touch() + + traverses_symlink = t / "include" / "llvm" / "PassRegistry.h" + traverses_symlink.parent.mkdir(parents=True) + traverses_symlink.touch() + + symlink("symlinks/PassRegistry.h", str(absolute_target)) + symlink("symlinks/include", str(t / "include")) + + await buck.debug("trace-io", "enable") + await buck.build("root//symlinks:relative_link") + await buck.build("root//symlinks:external_link") + + out = await buck.debug("trace-io", "export-manifest") + manifest = json.loads(out.stdout) + + assert_link_in( + {"link": "symlinks/main.cpp", "target": "hello_world/main.cpp"}, + manifest["relative_symlinks"], + ) + assert_link_in( + { + "link": "symlinks/PassRegistry.h", + "target": str(absolute_target), + "remaining_path": None, + }, + manifest["external_symlinks"], + ) + assert_link_in( + { + "link": "symlinks/include", + "target": str(t / "include"), + "remaining_path": "clang/Basic/Visibility.h", + }, + manifest["external_symlinks"], + ) + assert_path_in_manifest("symlinks/other.cpp", manifest["paths"]) + + +# Validate that manifest includes downloaded http_archive path in buck-out. +@buck_test(skip_for_os=["windows"]) +async def test_includes_http_archive_in_manifest(buck: Buck) -> None: + hg_init(cwd=buck.cwd) + + await buck.debug("trace-io", "enable") + await buck.build("root//http_archive:test_zip") + out = await buck.debug("trace-io", "export-manifest") + manifest = json.loads(out.stdout) + + assert any( + re.match( + r"buck-out/.+/offline-cache/.+/http_archive/__test_zip__/download", path + ) + for path in manifest["paths"] + ), "manifest should contain http_archive cached output" + assert_buck_out_paths_materialized(buck.cwd, manifest["paths"]) + + +# Ensure offline-cache buck-out dir is _not_ created when not doing I/O tracing. +@buck_test(skip_for_os=["windows"]) +async def test_no_tracing_does_not_write_offline_cache_for_http_archive( + buck: Buck, +) -> None: + await buck.build("root//http_archive:test_zip") + assert not os.path.exists( + os.path.join(buck.cwd, "buck-out/offline-cache") + ), "offline cache should not exist when not doing I/O tracing" + + +# Validate that when buckconfig use_network_action_output_cache=true is set we use the +# offline-cache action output instead of fetching from the network. +@buck_test(skip_for_os=["windows"]) +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_fake_offline_http_archive_uses_offline_cache(buck: Buck) -> None: + hg_init(cwd=buck.cwd) + + # This should materialize the offline-cache dir. + target = "root//http_archive:test_zip" + await buck.debug("trace-io", "enable") + result = await buck.build(target) + print("stderr:", result.stderr) + assert ( + "/offline-cache/" in result.stderr + ), "materializer should declare offline-cache materialization" + + # Validate that offline-cache path doesn't exist prior to manifest export. + http_download_path = result.get_build_report().output_for_target(target) + # This is hacky, but there's no other good way to discover the offline-cache path. + offline_cache_path = ( + Path(str(http_download_path).replace("/gen/", "/offline-cache/")).parent + / "download" + ) + assert ( + not offline_cache_path.exists() + ), "offline cache path should not exist before manifest export" + + # Ensure buck-out/offline-cache paths are materialized. + await buck.debug("trace-io", "export-manifest") + assert ( + offline_cache_path.exists() + ), "offline cache path should exist after manifest export" + + await buck.kill() + + result = await buck.build( + "root//http_archive:test_zip", + "--config", + "buck2.use_network_action_output_cache=true", + "--no-remote-cache", + "--local-only", + ) + assert "LocalCopy" in result.stderr, "offline-cache path should be copied to output" + assert http_download_path.exists(), "http download output path should exist" + + +@buck_test(skip_for_os=["windows"]) +async def test_includes_cas_artifact_in_manifest(buck: Buck) -> None: + hg_init(cwd=buck.cwd) + + _setup_buckconfig_digest_algorithms(buck) + + await buck.debug("trace-io", "enable") + await buck.build("//cas_artifact:tree") + out = await buck.debug("trace-io", "export-manifest") + manifest = json.loads(out.stdout) + + assert any( + re.match( + r"buck-out\/.+\/offline-cache/root\/.+\/cas_artifact/__tree__/tree", path + ) + is not None + for path in manifest["paths"] + ), "offline cache should contain cas artifact tree" + + assert_buck_out_paths_materialized(buck.cwd, manifest["paths"]) + + +# Ensure offline-cache buck-out dir is _not_ created when not doing I/O tracing. +@buck_test(skip_for_os=["windows"]) +async def test_no_tracing_does_not_write_offline_cache_for_cas_artifact( + buck: Buck, +) -> None: + _setup_buckconfig_digest_algorithms(buck) + + await buck.build("//cas_artifact:tree") + assert not os.path.exists( + os.path.join(buck.cwd, "buck-out/offline-cache") + ), "offline cache should not exist when not doing I/O tracing" + + +# Validate that when buckconfig use_network_action_output_cache=true is set we use the +# offline-cache action output instead of fetching from the network. +@buck_test(skip_for_os=["windows"]) +@env("BUCK_LOG", "buck2_execute_impl::materializers=trace") +async def test_fake_offline_cas_artifact_uses_offline_cache(buck: Buck) -> None: + hg_init(cwd=buck.cwd) + + _setup_buckconfig_digest_algorithms(buck) + + # This should materialize the offline-cache dir. + target = "root//cas_artifact:tree" + await buck.debug("trace-io", "enable") + result = await buck.build(target) + print("stderr:", result.stderr) + assert ( + "/offline-cache/" in result.stderr + ), "materializer should declare offline-cache materialization" + + # Validate that offline-cache path doesn't exist prior to manifest export. + cas_download_path = result.get_build_report().output_for_target(target) + # This is hacky, but there's no other good way to discover the offline-cache path. + offline_cache_path = ( + Path(str(cas_download_path).replace("/gen/", "/offline-cache/")).parent / "tree" + ) + assert ( + not offline_cache_path.exists() + ), "offline cache path should not exist before manifest export" + + # Ensure buck-out/offline-cache paths are materialized. + await buck.debug("trace-io", "export-manifest") + assert ( + offline_cache_path.exists() + ), "offline cache path should exist after manifest export" + + await buck.kill() + + result = await buck.build( + target, + "--config", + "buck2.use_network_action_output_cache=true", + "--no-remote-cache", + "--local-only", + ) + assert "LocalCopy" in result.stderr, "offline-cache path should be copied to output" + assert cas_download_path.exists(), "cas action output path should exist" + + +# No-op test for windows. +@buck_test() +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/core/trace_io/test_trace_io_data/.buckconfig b/tests/core/trace_io/test_trace_io_data/.buckconfig new file mode 100644 index 0000000000000..20210c3e28f5f --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/.buckconfig @@ -0,0 +1,12 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude + +[buck2] +materializations = deferred diff --git a/tests/core/trace_io/test_trace_io_data/.buckroot b/tests/core/trace_io/test_trace_io_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/trace_io/test_trace_io_data/cas_artifact/TARGETS.fixture b/tests/core/trace_io/test_trace_io_data/cas_artifact/TARGETS.fixture new file mode 100644 index 0000000000000..b6a468ecc14b0 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/cas_artifact/TARGETS.fixture @@ -0,0 +1,10 @@ +load("@prelude//:prelude.bzl", "cas_artifact") + +cas_artifact( + name = "tree", + # A small tree uploaded in RE with a very large expiration + digest = "0424991a08d1a857d9a4ea858b2f0c9d17f41abbc927d18a8147f0d9e708a77b:77", + use_case = "apple_build_infra_tools", + expires_after_timestamp = 0, + is_tree = True, +) diff --git a/tests/core/trace_io/test_trace_io_data/defs.bzl b/tests/core/trace_io/test_trace_io_data/defs.bzl new file mode 100644 index 0000000000000..ecd049c477094 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/defs.bzl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _binary_impl(ctx): + out = ctx.actions.declare_output("out") + ctx.actions.run( + cmd_args( + "touch", + out.as_output(), + hidden = ctx.attrs.srcs, + ), + category = "test", + ) + return [DefaultInfo(default_output = out)] + +def _library_impl(_ctx): + return [DefaultInfo()] + +my_binary = rule(impl = _binary_impl, attrs = { + "deps": attrs.list(attrs.dep(), default = []), + "srcs": attrs.list(attrs.source(), default = []), +}) + +my_library = rule(impl = _library_impl, attrs = { + "deps": attrs.list(attrs.dep(), default = []), + "srcs": attrs.list(attrs.source(), default = []), +}) diff --git a/tests/core/trace_io/test_trace_io_data/hello_world/TARGETS.fixture b/tests/core/trace_io/test_trace_io_data/hello_world/TARGETS.fixture new file mode 100644 index 0000000000000..c144d28d823d2 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/hello_world/TARGETS.fixture @@ -0,0 +1,8 @@ +load("//:defs.bzl", "my_binary") + +oncall("buck2") + +my_binary( + name = "welcome", + srcs = ["main.cpp"], +) diff --git a/tests/core/trace_io/test_trace_io_data/hello_world/main.cpp b/tests/core/trace_io/test_trace_io_data/hello_world/main.cpp new file mode 100644 index 0000000000000..22b15a7fa58d2 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/hello_world/main.cpp @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +int main() { + return 0; +} diff --git a/tests/core/trace_io/test_trace_io_data/http_archive/TARGETS.fixture b/tests/core/trace_io/test_trace_io_data/http_archive/TARGETS.fixture new file mode 100644 index 0000000000000..b5012daf4ce65 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/http_archive/TARGETS.fixture @@ -0,0 +1,9 @@ +load("@prelude//:prelude.bzl", "http_archive") + +# This URL is just the `./expected` directory that's next to this file, +# packaged up zip. +http_archive( + name = "test_zip", + urls = ["https://interncache-all.fbcdn.net/manifold/buck_build_test/tree/buck2_test/http_archive/test_zip.zip"], + sha1 = "095f3ebdcbecf6a2395f49fd367dfae1a91ec54b", +) diff --git a/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo/bar/qux b/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo/bar/qux new file mode 100644 index 0000000000000..d63774c1b2087 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo/bar/qux @@ -0,0 +1 @@ +foo/bar/qux diff --git a/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo/bar2 b/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo/bar2 new file mode 100644 index 0000000000000..231194b84714d --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo/bar2 @@ -0,0 +1 @@ +foo/bar2 diff --git a/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo2 b/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo2 new file mode 100644 index 0000000000000..54b060eee9654 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/http_archive/expected/foo2 @@ -0,0 +1 @@ +foo2 diff --git a/tests/core/trace_io/test_trace_io_data/linking/TARGETS.fixture b/tests/core/trace_io/test_trace_io_data/linking/TARGETS.fixture new file mode 100644 index 0000000000000..a42498377c26d --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/TARGETS.fixture @@ -0,0 +1,44 @@ +load("//:defs.bzl", "my_binary", "my_library") + +oncall("buck2") + +my_binary( + name = "root", + srcs = [ + "main.cpp", + ], + deps = [ + ":any", + ":shared", + ":shared2", + ":static", + ], +) + +my_library( + name = "static", + srcs = [ + "static.cpp", + ], +) + +my_library( + name = "shared", + srcs = [ + "shared.cpp", + ], +) + +my_library( + name = "shared2", + srcs = [ + "shared2.cpp", + ], +) + +my_library( + name = "any", + srcs = [ + "any.cpp", + ], +) diff --git a/tests/core/trace_io/test_trace_io_data/linking/any.cpp b/tests/core/trace_io/test_trace_io_data/linking/any.cpp new file mode 100644 index 0000000000000..232725b267dad --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/any.cpp @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include "buck2/tests/targets/rules/cxx/linking/any.h" + +void any() {} diff --git a/tests/core/trace_io/test_trace_io_data/linking/any.h b/tests/core/trace_io/test_trace_io_data/linking/any.h new file mode 100644 index 0000000000000..6dfbb94726e43 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/any.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#pragma once + +void any(); diff --git a/tests/core/trace_io/test_trace_io_data/linking/main.cpp b/tests/core/trace_io/test_trace_io_data/linking/main.cpp new file mode 100644 index 0000000000000..1f14f32df8e4d --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/main.cpp @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include "linking/any.h" +#include "linking/shared.h" +#include "linking/shared2.h" +#include "linking/static.h" + +int main() { + any(); + shared(); + shared2(); + Static(); +} diff --git a/tests/core/trace_io/test_trace_io_data/linking/shared.cpp b/tests/core/trace_io/test_trace_io_data/linking/shared.cpp new file mode 100644 index 0000000000000..157f53b00bd08 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/shared.cpp @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include "linking/shared.h" +#include "linking/static.h" + +void shared() { + Static(); +} diff --git a/tests/core/trace_io/test_trace_io_data/linking/shared.h b/tests/core/trace_io/test_trace_io_data/linking/shared.h new file mode 100644 index 0000000000000..f5bf46cd8c971 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/shared.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#pragma once + +void shared(); diff --git a/tests/core/trace_io/test_trace_io_data/linking/shared2.cpp b/tests/core/trace_io/test_trace_io_data/linking/shared2.cpp new file mode 100644 index 0000000000000..9a4ea5022f507 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/shared2.cpp @@ -0,0 +1,15 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include "linking/shared2.h" +#include "linking/static.h" + +void shared2() { + Static(); +} diff --git a/tests/core/trace_io/test_trace_io_data/linking/shared2.h b/tests/core/trace_io/test_trace_io_data/linking/shared2.h new file mode 100644 index 0000000000000..ded31b56f6ee8 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/shared2.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#pragma once + +void shared2(); diff --git a/tests/core/trace_io/test_trace_io_data/linking/static.cpp b/tests/core/trace_io/test_trace_io_data/linking/static.cpp new file mode 100644 index 0000000000000..694a889cef1f2 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/static.cpp @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#include "linking/static.h" + +void Static() {} diff --git a/tests/core/trace_io/test_trace_io_data/linking/static.h b/tests/core/trace_io/test_trace_io_data/linking/static.h new file mode 100644 index 0000000000000..aa6c697b4a5ef --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/linking/static.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#pragma once + +void Static(); diff --git a/tests/core/trace_io/test_trace_io_data/prelude/prelude.bzl b/tests/core/trace_io/test_trace_io_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..38d093ff56898 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/prelude/prelude.bzl @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _http_archive_impl(ctx: AnalysisContext): + download = ctx.actions.declare_output("download") + ctx.actions.download_file(download, ctx.attrs.urls[0], sha1 = ctx.attrs.sha1, is_deferrable = True) + + output = ctx.actions.declare_output("output") + ctx.actions.run(["cp", download, output.as_output()], category = "cp") + + return [ + DefaultInfo(default_output = output), + ] + +http_archive = rule(impl = _http_archive_impl, attrs = { + "sha1": attrs.string(), + "urls": attrs.list(attrs.string()), +}) + +def _cas_artifact_impl(ctx: AnalysisContext): + out = ctx.actions.cas_artifact( + ctx.label.name, + ctx.attrs.digest, + ctx.attrs.use_case, + expires_after_timestamp = ctx.attrs.expires_after_timestamp, + is_tree = ctx.attrs.is_tree, + is_directory = ctx.attrs.is_directory, + ) + return [DefaultInfo(default_output = out)] + +cas_artifact = rule(impl = _cas_artifact_impl, attrs = { + "digest": attrs.string(), + "expires_after_timestamp": attrs.int(), + "is_directory": attrs.bool(default = False), + "is_tree": attrs.bool(default = False), + "use_case": attrs.string(), +}) diff --git a/tests/core/trace_io/test_trace_io_data/symlinks/TARGETS.fixture b/tests/core/trace_io/test_trace_io_data/symlinks/TARGETS.fixture new file mode 100644 index 0000000000000..73cc28289d9c2 --- /dev/null +++ b/tests/core/trace_io/test_trace_io_data/symlinks/TARGETS.fixture @@ -0,0 +1,13 @@ +load("//:defs.bzl", "my_binary") + +oncall("buck2") + +my_binary( + name = "relative_link", + srcs = ["main.cpp", "other.cpp"], +) + +my_binary( + name = "external_link", + srcs = ["other.cpp", "PassRegistry.h", "include/clang/Basic/Visibility.h"], +) diff --git a/tests/core/trace_io/test_trace_io_data/symlinks/other.cpp b/tests/core/trace_io/test_trace_io_data/symlinks/other.cpp new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/uncategorized/test_error_formatting_data/fixtures/test_bxl_no_stacktrace_verbose.golden.stderr b/tests/core/uncategorized/test_error_formatting_data/fixtures/test_bxl_no_stacktrace_verbose.golden.stderr new file mode 100644 index 0000000000000..82010a764acc2 --- /dev/null +++ b/tests/core/uncategorized/test_error_formatting_data/fixtures/test_bxl_no_stacktrace_verbose.golden.stderr @@ -0,0 +1,15 @@ +# This file is @generated, regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command + +Command failed: +Traceback (most recent call last): + File , in + * fail_no_stacktrace.bxl:9, in _fail_no_stacktrace_impl + fail_no_stacktrace("failing with no stacktrace") +error: fail: failing with no stacktrace + --> fail_no_stacktrace.bxl:9:5 + | +9 | fail_no_stacktrace("failing with no stacktrace") + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + +BXL FAILED diff --git a/tests/core/validation/BUCK b/tests/core/validation/BUCK new file mode 100644 index 0000000000000..4f82651ebfc77 --- /dev/null +++ b/tests/core/validation/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_target_validation", + srcs = ["test_target_validation.py"], + data_dir = "test_target_validation_data", +) diff --git a/tests/core/validation/test_target_validation.py b/tests/core/validation/test_target_validation.py new file mode 100644 index 0000000000000..d2601f585d06e --- /dev/null +++ b/tests/core/validation/test_target_validation.py @@ -0,0 +1,84 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +@buck_test() +async def test_validation_affects_build_command(buck: Buck) -> None: + await expect_failure( + buck.build(":plate"), + stderr_regex=""" +Validation for `prelude//:mate \\(\\)` failed: + +"Here I am describing the failure reason"\\. + +Full validation result is located at""", + ) + await buck.build(":date") + + +@buck_test() +async def test_validation_affects_run_command(buck: Buck) -> None: + await expect_failure( + buck.run(":plate"), + stderr_regex=""" +Validation for `prelude//:mate \\(\\)` failed: + +"Here I am describing the failure reason"\\. + +Full validation result is located at""", + ) + await buck.run(":date") + + +@buck_test() +@env("BUCK2_ALLOW_INTERNAL_TEST_RUNNER_DO_NOT_USE", "1") +async def test_validation_affects_test_command(buck: Buck) -> None: + await expect_failure( + buck.test(":plate", test_executor=""), + stderr_regex=""" +Validation for `prelude//:mate \\(\\)` failed: + +"Here I am describing the failure reason"\\. + +Full validation result is located at""", + ) + await buck.test(":date", test_executor="") + + +@buck_test() +async def test_validation_affects_install_command(buck: Buck) -> None: + await expect_failure( + buck.install(":plate"), + stderr_regex="Validation for `prelude//:mate \\(\\)` failed", + ) + # It's too complicated to set up installer properly. + # We intentionally fail on the installer side, but interpret + # an attempt to run it as a successful verification. + await expect_failure( + buck.install(":date"), + stderr_regex="Installer: Incoming connection accepted, now closing it", + ) + + +@buck_test() +async def test_optional_validation(buck: Buck) -> None: + await buck.build(":optional_passing") + + # Optional validations are not run by default. + await buck.build(":optional_failing") + + # Expect a failure when run with --enable-optional-validations. + await expect_failure( + buck.build(":optional_failing", "--enable-optional-validations", "whistle"), + stderr_regex="Validation for `.+` failed", + ) diff --git a/tests/core/validation/test_target_validation_data/.buckconfig b/tests/core/validation/test_target_validation_data/.buckconfig new file mode 100644 index 0000000000000..7078304680646 --- /dev/null +++ b/tests/core/validation/test_target_validation_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] + name = TARGETS.fixture + +[repositories] + root = . + prelude = . diff --git a/tests/core/validation/test_target_validation_data/.buckroot b/tests/core/validation/test_target_validation_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/validation/test_target_validation_data/TARGETS.fixture b/tests/core/validation/test_target_validation_data/TARGETS.fixture new file mode 100644 index 0000000000000..8f3e83f1423c2 --- /dev/null +++ b/tests/core/validation/test_target_validation_data/TARGETS.fixture @@ -0,0 +1,40 @@ +china( + name = "plate", + dep = ":mate", + fail = False, +) + +china( + name = "mate", + fail = True, +) + +china( + name = "date", + dep = ":wait", + fail = False, +) + +china( + name = "wait", + fail = False, +) + +china( + name = "optional_failing", + dep = ":wait", + fail = True, + optional = True, +) + +china( + name = "optional_passing", + dep = ":wait", + fail = False, + optional = True, +) + +installer( + name = "my_installer", + main = "installer.py", +) diff --git a/tests/core/validation/test_target_validation_data/installer.py b/tests/core/validation/test_target_validation_data/installer.py new file mode 100755 index 0000000000000..5432e891eb5d5 --- /dev/null +++ b/tests/core/validation/test_target_validation_data/installer.py @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import argparse +import socket +import sys + + +def parse_args(args): + parser = argparse.ArgumentParser() + parser.add_argument( + "--tcp-port", + type=int, + help="tcp port for installer to connect to", + required=True, + ) + args, _ = parser.parse_known_args(args) + return args + + +def main() -> None: + args = parse_args(sys.argv[1:]) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + address = ("localhost", args.tcp_port) + print(f"Installer: Binding to port {address[1]}") + sock.bind(address) + sock.listen(1) + connection, _ = sock.accept() + print("Installer: Incoming connection accepted, now closing it") + connection.close() + + +if __name__ == "__main__": + main() diff --git a/tests/core/validation/test_target_validation_data/prelude.bzl b/tests/core/validation/test_target_validation_data/prelude.bzl new file mode 100644 index 0000000000000..35c546697c5a6 --- /dev/null +++ b/tests/core/validation/test_target_validation_data/prelude.bzl @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx) -> list[Provider]: + flute = ctx.actions.write_json("flute.json", { + "data": { + "message": "Here I am describing the failure reason" if ctx.attrs.fail else None, + "status": "failure" if ctx.attrs.fail else "success", + }, + "version": 1, + }, pretty = True) + return [ + DefaultInfo(), + RunInfo(args = ["echo", "hello"]), + ExternalRunnerTestInfo(type = "dummy", command = ["true"]), + InstallInfo(installer = ctx.attrs.installer, files = {"random_file": flute}), + ValidationInfo( + validations = [ + ValidationSpec( + name = "whistle", + validation_result = flute, + optional = ctx.attrs.optional, + ), + ], + ), + ] + +china = rule(impl = _impl, attrs = { + "dep": attrs.option(attrs.dep(), default = None), + "fail": attrs.bool(default = False), + "installer": attrs.default_only(attrs.label(default = "//:my_installer")), + "optional": attrs.bool(default = False), +}) + +def _installer_impl(ctx) -> list[Provider]: + return [ + DefaultInfo(), + RunInfo(args = ["python3", ctx.attrs.main]), + ] + +installer = rule(impl = _installer_impl, attrs = { + "main": attrs.source(), +}) diff --git a/tests/core/vpnless/BUCK b/tests/core/vpnless/BUCK new file mode 100644 index 0000000000000..58ff72bf84d66 --- /dev/null +++ b/tests/core/vpnless/BUCK @@ -0,0 +1,12 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_vpnless", + srcs = ["test_vpnless.py"], + data_dir = "test_vpnless_data", + deps = [ + "fbsource//third-party/pypi/requests:requests", + ], +) diff --git a/tests/core/vpnless/test_vpnless.py b/tests/core/vpnless/test_vpnless.py new file mode 100644 index 0000000000000..33ef5d7d96624 --- /dev/null +++ b/tests/core/vpnless/test_vpnless.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from __future__ import annotations + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + +# Note: for test scenarios where we want to ensure the `cpe` crate reports no +# vpnless support, we have to define the env var but =0. Otherwise these +# tests will erroneously fail on macOS. + + +@buck_test() +@env("CPE_RUST_X2P_SUPPORTS_VPNLESS", "0") +@env("CPE_RUST_X2P_HTTP1_PROXY_PORT", "5555") +async def test_vpnless_disabled_by_host(buck: Buck) -> None: + # Get a daemon to start + await buck.build() + result = await buck.status() + status = json.loads(result.stdout) + assert not status[ + "supports_vpnless" + ], "vpnless should be disabled by non-supporting host" + + +@buck_test() +@env("CPE_RUST_X2P_SUPPORTS_VPNLESS", "1") +# Need to set this so Windows doesn't go down the unix socket codepath. +@env("CPE_RUST_X2P_HTTP1_PROXY_PORT", "5555") +async def test_vpnless_enabled(buck: Buck) -> None: + # Get a daemon to start + await buck.build() + result = await buck.status() + status = json.loads(result.stdout) + assert status["supports_vpnless"], "vpnless should be enabled by host" diff --git a/tests/core/vpnless/test_vpnless_data/.buckconfig b/tests/core/vpnless/test_vpnless_data/.buckconfig new file mode 100644 index 0000000000000..6a2e48bc41d8a --- /dev/null +++ b/tests/core/vpnless/test_vpnless_data/.buckconfig @@ -0,0 +1,9 @@ +[buildfile] +name=TARGETS.fixture + +[project] +ignore=ignored + +[repositories] +root = . +prelude = prelude diff --git a/tests/core/vpnless/test_vpnless_data/.buckroot b/tests/core/vpnless/test_vpnless_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/core/vpnless/test_vpnless_data/prelude/prelude.bzl b/tests/core/vpnless/test_vpnless_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/dependencies_test_util.bzl b/tests/dependencies_test_util.bzl new file mode 100644 index 0000000000000..2787103606336 --- /dev/null +++ b/tests/dependencies_test_util.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def rule_list_regex(pattern_list): + configuration_pattern = "( [(].*[)])?" + + if not pattern_list: + return "///" + + return "({}){}($)".format("|".join(pattern_list), configuration_pattern) diff --git a/tests/e2e/BUCK b/tests/e2e/BUCK new file mode 100644 index 0000000000000..e9ebac2fc7d48 --- /dev/null +++ b/tests/e2e/BUCK @@ -0,0 +1,51 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_starlark", + srcs = ["test_starlark.py"], +) + +buck2_e2e_test( + name = "test_rust_project", + srcs = ["test_rust_project.py"], + env = { + "RUST_PROJECT_BIN": "$(location fbcode//buck2/integrations/rust-project:rust-project)", + }, +) + +buck2_e2e_test( + name = "test_install", + srcs = ["test_install.py"], + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_intellij_project", + srcs = ["intellij_project/test_intellij_project.py"], +) + +buck2_e2e_test( + name = "test_linker_argsfile", + srcs = ["test_linker_argsfile.py"], +) + +buck2_e2e_test( + name = "test_lsp_fbsource", + srcs = ["test_lsp_fbsource.py"], +) + +buck2_e2e_test( + name = "test_cpp_gen_cdb", + srcs = ["test_cpp_gen_cdb.py"], +) + +buck2_e2e_test( + name = "test_tools", + srcs = ["test_tools.py"], + test_with_deployed_buck2 = True, + use_compiled_buck2_client_and_tpx = True, +) diff --git a/tests/e2e/audit/BUCK b/tests/e2e/audit/BUCK new file mode 100644 index 0000000000000..2488b8ae57ddf --- /dev/null +++ b/tests/e2e/audit/BUCK @@ -0,0 +1,9 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_audit_classpath", + srcs = ["test_audit_classpath.py"], + skip_for_os = ["windows"], +) diff --git a/tests/e2e/audit/test_audit_classpath.py b/tests/e2e/audit/test_audit_classpath.py new file mode 100644 index 0000000000000..29b769f548966 --- /dev/null +++ b/tests/e2e/audit/test_audit_classpath.py @@ -0,0 +1,70 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +# FIXME(JakobDegen): These tests should be isolated and moved into `tests/isolated/audit` + +import json +from pathlib import Path +from typing import Iterable, Set + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _classpath_jars(classpaths: Iterable[str]) -> Set[str]: + return {Path(p).name for p in classpaths} + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_audit_classpath(buck: Buck) -> None: + result = await buck.audit( + "classpath", "fbsource//fbandroid/buck2/tests/good/classpath:top" + ) + classpath_jars = _classpath_jars(result.stdout.splitlines()) + assert classpath_jars == { + "top.jar", + "direct_dep.jar", + "mid_test.jar", + "transitive_lib.jar", + } + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_audit_classpath_binary(buck: Buck) -> None: + result = await buck.audit( + "classpath", "fbsource//fbandroid/buck2/tests/good/classpath:apk" + ) + classpath_jars = _classpath_jars(result.stdout.splitlines()) + assert classpath_jars == { + "dep_of_android_resource.jar", + "ids_r_dot_java.jar", + "top.jar", + "direct_dep.jar", + "transitive_lib.jar", + "mid_test.jar", + "lib_with_resource_only.jar", + } + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_audit_classpath_json(buck: Buck) -> None: + top = "fbsource//fbandroid/buck2/tests/good/classpath:top" + direct_dep = "fbsource//fbandroid/buck2/tests/good/classpath:direct_dep" + + result = await buck.audit("classpath", top, direct_dep, "--json") + out = json.loads(result.stdout.strip()) + + assert len(out.keys()) == 2, f"Found more than 2 targets in {out}" + assert _classpath_jars(out.get(direct_dep)) == {"direct_dep.jar"} + assert _classpath_jars(out.get(top)) == { + "top.jar", + "direct_dep.jar", + "mid_test.jar", + "transitive_lib.jar", + } diff --git a/tests/e2e/build/BUCK b/tests/e2e/build/BUCK new file mode 100644 index 0000000000000..5b95e52cda17b --- /dev/null +++ b/tests/e2e/build/BUCK @@ -0,0 +1,54 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_build_isolated", + srcs = ["test_build_isolated.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + serialize_test_cases = False, + deps = [ + "fbcode//buck2/tests/e2e_util:assert_occurrences", + "fbcode//buck2/tests/e2e_util:utils", + "fbsource//third-party/pypi/aiohttp:aiohttp", + ], +) + +buck2_e2e_test( + name = "test_build_inplace", + srcs = ["test_build_inplace.py"], + require_nano_prelude = True, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_worker", + srcs = ["test_worker.py"], + tags = ["long_running"], + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_action_digest", + srcs = ["test_action_digest.py"], + # This test is currently broken on Windows due to rustc_link non-determinism + # https://fb.workplace.com/groups/346627374465346/permalink/511477684646980/ + skip_for_os = [ + "windows", + ], + # DO NOT Modify or add more test flags, + # this is used to gate changes that modify action_digest. + # Changing it will prevent the test from working properly + test_with_compiled_buck2 = True, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/e2e/build/test_action_digest.py b/tests/e2e/build/test_action_digest.py new file mode 100644 index 0000000000000..a73a35a547dc0 --- /dev/null +++ b/tests/e2e/build/test_action_digest.py @@ -0,0 +1,57 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, get_mode_from_platform +from buck2.tests.e2e_util.helper.utils import read_what_ran + + +# If this test fails, it means that a change that modifies action digest was made. +# Background in this post: +# https://fb.workplace.com/groups/buck2eng/permalink/3452581371706005/ +# Changes should instead be deployed by: +# 1: Create a new buck2 flag and hide the changes behind it (Ex. D59503359) +# 2: Wait for bvb that contains #1 to land +# 3: Activate the flag via .buckconfig (Ex. D59648609) +# 3.1: Fix/followup on any CI failures caused by cache invalidation +# 4: Observe for a couple of days to ensure that there are no issues +# 5. Remove the code associated with the config flag but NOT the config itself, +# this way this test wouldn't need to be changed at all (Ex. D59864942) +# 6: Wait for bvb that contains #5 to land +# 7: Remove the config flag (Ex. D59988979) +@buck_test(inplace=True) +async def test_action_digest(buck: Buck) -> None: + await buck.build( + get_mode_from_platform(), + "fbcode//buck2/tests/targets/rules/rust/hello_world:welcome", + "--remote-only", + ) + compiled_out = await read_what_ran(buck) + compiled_digests = [ + entry["reproducer"]["details"]["digest"] for entry in compiled_out + ] + compiled_digests.sort() + + buck.path_to_executable = Path("buck2") + await buck.build( + get_mode_from_platform(), + "fbcode//buck2/tests/targets/rules/rust/hello_world:welcome", + "--remote-only", + ) + deployed_out = await read_what_ran(buck) + deployed_digests = [ + entry["reproducer"]["details"]["digest"] for entry in deployed_out + ] + deployed_digests.sort() + + assert ( + compiled_digests == deployed_digests + ), "Action Digest was modified, refer to comment on this test for next steps" diff --git a/tests/e2e/build/test_build_inplace.py b/tests/e2e/build/test_build_inplace.py new file mode 100644 index 0000000000000..3aa1f52016cd8 --- /dev/null +++ b/tests/e2e/build/test_build_inplace.py @@ -0,0 +1,959 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import json +import os +import shutil +import subprocess +import sys +from pathlib import Path +from typing import Any, Dict, Optional, Tuple + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException, BuildResult +from buck2.tests.e2e_util.api.process import Process +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env, get_mode_from_platform +from buck2.tests.e2e_util.helper.utils import json_get, read_what_ran + + +# rust rule implementations hardcode invocation of `/bin/jq` which is not available on Mac RE workers (or mac laptops) +def rust_linux_only() -> bool: + return sys.platform == "linux" + + +# builds targets in an fbcode target configuration, unsupported on mac RE workers +def fbcode_linux_only() -> bool: + return sys.platform == "linux" + + +@buck_test(inplace=True) +async def test_buildfiles(buck: Buck) -> None: + await buck.build("fbcode//buck2/tests/targets/interpreter/buildfiles:buildfile") + + +@buck_test(inplace=True) +async def test_build_output(buck: Buck) -> None: + output_path = os.path.join( + "fbcode", + "buck2", + "tests", + "targets", + "interpreter", + "buildfiles", + "TARGETS.v2", + ) + + result = await buck.build_without_report( + "fbcode//buck2/tests/targets/interpreter/buildfiles:buildfile", + "--show-output", + ) + assert ( + f"fbcode//buck2/tests/targets/interpreter/buildfiles:buildfile {output_path}\n" + == result.stdout + ) + + result = await buck.build_without_report( + "fbcode//buck2/tests/targets/interpreter/buildfiles:buildfile", + "--show-simple-output", + ) + assert f"{output_path}\n" == result.stdout + + result = await buck.build_without_report( + "fbcode//buck2/tests/targets/interpreter/buildfiles:buildfile", + "--show-json-output", + ) + + # Escaping backslashes needed for windows paths + json_escaped_output_path = output_path.replace("\\", "\\\\") + assert ( + f'{{"fbcode//buck2/tests/targets/interpreter/buildfiles:buildfile":"{json_escaped_output_path}"}}\n' + == result.stdout + ) + + +def extract_gen_folder(output: str) -> str: + return output[: output.find("{0}gen{0}".format(os.path.sep)) + 4] + + +if rust_linux_only(): + + @buck_test(inplace=True) + async def test_build_symlink_rust_rule(buck: Buck) -> None: + args = [ + "fbcode//buck2/tests/targets/rules/rust/hello_world:welcome", + "--show-full-output", + ] + if sys.platform == "darwin": + args.append("@//mode/mac") + result = await buck.build(*args) + + output_dict = result.get_target_to_build_output() + for _target, output in output_dict.items(): + gen_folder = extract_gen_folder(output) + # v1: buck2/tests/targets/rules/rust/hello_world/welcome#binary/welcome + symlink = ( + Path(gen_folder) + / "fbcode" + / "buck2" + / "tests" + / "targets" + / "rules" + / "rust" + / "hello_world" + / "welcome" + ) + assert symlink.is_symlink() + + +if fbcode_linux_only(): + + @buck_test(inplace=True) + async def test_build_symlink_python_rule(buck: Buck) -> None: + args = [ + "fbcode//buck2/tests/targets/rules/python/hello_world:welcome", + "--show-full-output", + ] + if sys.platform == "darwin": + args.append("@//mode/mac") + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + for _target, output in output_dict.items(): + gen_folder = extract_gen_folder(output) + # v1: buck2/tests/targets/rules/python/hello_world/welcome.par + symlink = ( + Path(gen_folder) + / "fbcode" + / "buck2" + / "tests" + / "targets" + / "rules" + / "python" + / "hello_world" + / "welcome.par" + ) + assert symlink.is_symlink() + + +if fbcode_linux_only(): + + @buck_test(inplace=True) + async def test_build_symlink_cpp_rule(buck: Buck) -> None: + args = [ + "fbcode//buck2/tests/targets/rules/cxx/hello_world:welcome", + "--show-full-output", + ] + if sys.platform == "darwin": + args.append("@//mode/mac") + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + for _target, output in output_dict.items(): + gen_folder = extract_gen_folder(output) + # v1: buck2/tests/targets/rules/cxx/hello_world/welcome + symlink = ( + Path(gen_folder) + / "fbcode" + / "buck2" + / "tests" + / "targets" + / "rules" + / "cxx" + / "hello_world" + / "welcome" + ) + assert symlink.is_symlink() + + +@buck_test(inplace=True) +async def test_build_symlink_genrule_rule(buck: Buck) -> None: + args = [ + "fbcode//buck2/tests/targets/rules/genrule/hello_world:welcome", + "--show-full-output", + get_mode_from_platform(), + ] + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + for _target, output in output_dict.items(): + gen_folder = extract_gen_folder(output) + # v1: buck2/tests/targets/rules/genrule/hello_world/welcome/out.txt + symlink = ( + Path(gen_folder) + / "fbcode" + / "buck2" + / "tests" + / "targets" + / "rules" + / "genrule" + / "hello_world" + / "out" + / "out.txt" + ) + assert symlink.is_symlink() + + +@buck_test(inplace=True) +async def test_build_symlink_genrule_rule_outs(buck: Buck) -> None: + # Test this using projected artifacts. + args = [ + "fbcode//buck2/tests/targets/rules/genrule/hello_world:outs", + "--show-full-output", + get_mode_from_platform(), + ] + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + for _target, output in output_dict.items(): + gen_folder = extract_gen_folder(output) + symlink = ( + Path(gen_folder) + / "fbcode" + / "buck2" + / "tests" + / "targets" + / "rules" + / "genrule" + / "hello_world" + / "out" + ) + assert symlink.is_symlink() + assert (symlink / "foo" / "out.txt").is_file() + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_build_symlink_sh_binary(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/rules/shell:diff" + args = [target, "--show-full-output", get_mode_from_platform()] + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + + output = output_dict[target] + gen_folder = extract_gen_folder(output) + symlink = ( + Path(gen_folder) / "fbcode" / "buck2" / "tests" / "targets" / "rules" / "shell" + ) + if sys.platform == "win32": + symlink /= "diff.bat" + else: + symlink /= "diff" + + # Verify we can both versions: + subprocess.check_call([output]) + subprocess.check_call([symlink]) + + +@buck_test(inplace=True) +async def test_build_symlink_does_not_traverse_existing_symlinks(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/rules/shell:diff" + + args = [target, "--show-full-output", get_mode_from_platform()] + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + + output = output_dict[target] + gen_folder = extract_gen_folder(output) + symlink_folder = ( + Path(gen_folder) / "fbcode" / "buck2" / "tests" / "targets" / "rules" / "shell" + ) + + # Now, overwrite part of the symlink path with something we cannot traverse. + path = symlink_folder.parent + shutil.rmtree(path) + # On Windows this is just non existing path. + os.symlink("/dev/null", path) + + # Can we still build? If we delete the symlink when walking up the path, we + # can. If we traverse it, we can't. + await buck.build(*args) + + +@buck_test(inplace=True) +async def test_sh_binary_no_append_extension(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/rules/shell:no_extension" + args = [target, "--show-full-output", get_mode_from_platform()] + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + output = Path(output_dict[target]) + + # Verify that we created the script symlink without an extension + assert (output.parent / "resources" / "no_extension").is_symlink() + + # And that we're calling it without an extension as well + last_script_line = output.read_text().splitlines()[-1] + if sys.platform == "win32": + assert "%BUCK_PROJECT_ROOT%\\no_extension %*" in last_script_line + else: + assert '"$BUCK_PROJECT_ROOT/no_extension" "$@"' in last_script_line + + +@buck_test(inplace=True) +async def test_cquery(buck: Buck) -> None: + result = await buck.cquery( + """deps(fbcode//buck2/tests/targets/commands:exported)""" + ) + assert "fbcode//buck2/tests/targets/commands:exported" in result.stdout + + +@buck_test(inplace=True) +async def test_cquery_with_config_value(buck: Buck) -> None: + deps_enabled_result = await buck.cquery( + "--config", + "user.deps_enabled=true", + "deps(fbcode//buck2/tests/targets/commands:lib)", + ) + assert "fbcode//buck2/tests/targets/commands:dynamic" in deps_enabled_result.stdout + + deps_disabled_result = await buck.cquery( + "--config", + "user.deps_enabled=false", + "deps(fbcode//buck2/tests/targets/commands:lib)", + ) + assert ( + "fbcode//buck2/tests/targets/commands:dynamic" + not in deps_disabled_result.stdout + ) + + +if rust_linux_only(): + + @buck_test(inplace=True) + async def test_show_output(buck: Buck) -> None: + TARGET = "fbcode//buck2/tests/targets/rules/genrule:executable_helper" + result = await buck.build(TARGET, "--show-output") + + build_report = result.get_build_report() + build_report_outputs = [ + (TARGET, str(output)) for output in build_report.outputs_for_target(TARGET) + ] + show_output_outputs = [ + (target, os.path.join(build_report.root, output)) + for target, output in result.get_target_to_build_output().items() + ] + + assert show_output_outputs == build_report_outputs + + TARGET = "fbcode//buck2/tests/targets/rules/rust:hello_explicit" + result = await buck.build(TARGET, "--show-output") + + build_report = result.get_build_report() + build_report_outputs = [ + (TARGET, str(output)) for output in build_report.outputs_for_target(TARGET) + ] + show_output_outputs = [ + (target, os.path.join(build_report.root, output)) + for target, output in result.get_target_to_build_output().items() + ] + + assert show_output_outputs == build_report_outputs + + TARGET = "fbcode//buck2/tests/targets/rules/cxx:my_cpp1" + SUBTARGET = "compilation-database" + TARGET_WITH_SUBTARGET = ( + "fbcode//buck2/tests/targets/rules/cxx:my_cpp1[compilation-database]" + ) + result = await buck.build(TARGET_WITH_SUBTARGET, "--show-output") + + build_report = result.get_build_report() + build_report_outputs = [ + (TARGET_WITH_SUBTARGET, str(output)) + for output in build_report.outputs_for_target(TARGET, SUBTARGET) + ] + show_output_outputs = [ + (target, os.path.join(build_report.root, output)) + for target, output in result.get_target_to_build_output().items() + ] + + assert show_output_outputs == build_report_outputs + + +@buck_test(inplace=True) +async def test_show_full_output(buck: Buck) -> None: + TARGET = "fbcode//buck2/tests/targets/rules/genrule:executable_helper" + result = await buck.build(TARGET, "--show-full-output") + + build_report = result.get_build_report() + build_report_outputs = [ + (TARGET, str(output)) for output in build_report.outputs_for_target(TARGET) + ] + show_output_outputs = list(result.get_target_to_build_output().items()) + + assert show_output_outputs == build_report_outputs + + for _, output in show_output_outputs: + assert os.path.isabs(output), f"Output path must be absolute, got `{output}`." + assert os.path.exists(output), f"Output path `{output}` does not exist!" + + +@buck_test(inplace=True) +@env("BUCK_LOG", "info") +async def test_consistent_build(buck: Buck) -> None: + args = ["fbcode//buck2/tests/targets/rules/genrule:"] + if sys.platform == "win32": + args.append("@//mode/win") + result0 = await buck.build(*args) + await buck.kill() + result1 = await buck.build(*args) + # Don't know if action key should stay consistent between clean builds, + # but number of cache misses should. + assert sum(result0.get_action_to_cache_miss_count().values()) == sum( + result1.get_action_to_cache_miss_count().values() + ) + + build_report0 = result0.get_build_report() + build_report1 = result1.get_build_report() + + # Output path should stay the same between builds, in particular the configuration hash. + TARGET = "fbcode//buck2/tests/targets/rules/genrule:my_genrule1" + build_report0_outputs = [ + (TARGET, str(output)) for output in build_report0.outputs_for_target(TARGET) + ] + build_report1_outputs = [ + (TARGET, str(output)) for output in build_report1.outputs_for_target(TARGET) + ] + assert build_report0_outputs == build_report1_outputs + + +@buck_test(inplace=True) +@env("BUCK_LOG", "info") +async def test_cached_build(buck: Buck) -> None: + args = ["fbcode//buck2/tests/targets/rules/genrule:"] + if sys.platform == "win32": + args.append("@//mode/win") + await buck.build(*args) + result = await buck.build(*args) + # Should be empty since nothing needs to be rebuilt + assert sum(result.get_action_to_cache_miss_count().values()) == 0 + + +@buck_test(inplace=True) +async def test_build_test_dependencies(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/rules/sh_test:test_with_env" + build = await buck.build( + target, + "-c", + "build_report.unstable_include_other_outputs=true", + "--build-test-info", + "--build-report", + "-", + ) + report = build.get_build_report().build_report + + path = ["results", target, "other_outputs", "DEFAULT"] + for p in path: + report = report[p] + + has_file = False + for artifact in report: + if "__file__" in artifact: + has_file = True + + assert has_file + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_fat_platforms(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/fat_platforms:example_use" + result = await buck.build( + target, + "-c", + "build.execution_platforms=fbcode//buck2/tests/targets/fat_platforms:platforms", + "--show-full-output", + ) + output = result.get_target_to_build_output()[target] + with open(output) as output: + s = output.read() + assert "darwin" in s, "expected 'darwin' in output: `{}`".format(output) + assert "linux" in s, "expected 'darwin' in output: `{}`".format(output) + + +@buck_test(inplace=True) +async def test_classpath_query(buck: Buck) -> None: + await buck.build("fbcode//buck2/tests/targets/template_placeholder/...") + + +@buck_test(inplace=True) +async def test_missing_outputs_error(buck: Buck) -> None: + # Check that we a) say what went wrong, b) show the command + await expect_failure( + buck.build( + "fbcode//buck2/tests/targets/rules/genrule/bad:my_genrule_bad", + # We really should make this an isolated test to avoid having to set this. + "-c", + "build.use_limited_hybrid=True", + ), + stderr_regex="(Action failed to produce output.*frecli|frecli.*OUTMISS)", + ) + + # Same, but locally. + await expect_failure( + buck.build( + "fbcode//buck2/tests/targets/rules/genrule/bad:my_genrule_bad_local" + ), + stderr_regex="Action failed to produce outputs.*Stdout:\nHELLO_STDOUT.*Stderr:\nHELLO_STDERR", + ) + + +@buck_test(inplace=True) +async def test_local_execution(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/rules/genrule:echo_pythonpath" + + await buck.kill() + res = await buck.build(target, env={"PYTHONPATH": "foobar"}) + + build_report = res.get_build_report() + output = build_report.output_for_target(target) + assert output.read_text().rstrip() == "" + + +if fbcode_linux_only(): # noqa: C901 + + @buck_test(inplace=True) + async def test_instruction_count_disabled(buck: Buck) -> None: + package = "fbcode//buck2/tests/targets/rules/instruction_counts" + name = "three_billion_instructions" + + await buck.build( + f"{package}:{name}", + "-c", + "buck2.miniperf2=false", + "--no-remote-cache", + "--local-only", + ) + + log = (await buck.log("show")).stdout.strip().splitlines() + for line in log: + commands = json_get( + line, "Event", "data", "SpanEnd", "data", "ActionExecution", "commands" + ) + + for c in commands or []: + assert c["details"]["metadata"].get("execution_stats") is None + + async def get_matching_details( + buck: Buck, package: str, name: str + ) -> Dict[str, Any]: + details = None + log = (await buck.log("show")).stdout.strip().splitlines() + for line in log: + action = json_get( + line, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + ) + + if action is None: + continue + + if action["name"]["category"] != "genrule": + continue + + label = action["key"]["owner"]["TargetLabel"]["label"] + if label["package"] != package: + continue + if label["name"] != name: + continue + + details = action["commands"][-1]["details"] + return details + + raise AssertionError("did not find the expected target") + + @buck_test(inplace=True) + async def test_instruction_count_enabled(buck: Buck) -> None: + package = "fbcode//buck2/tests/targets/rules/instruction_counts" + name = "three_billion_instructions" + await buck.build( + f"{package}:{name}", + "-c", + "buck2.miniperf2=true", + "--no-remote-cache", + "--local-only", + ) + + details = await get_matching_details(buck, package, name) + assert "OmittedLocalCommand" in details["command_kind"]["command"] + + # Check that we are within 10% + instruction_count = details["metadata"]["execution_stats"][ + "cpu_instructions_user" + ] + assert instruction_count > 2850000000 + assert instruction_count < 3150000000 + + @buck_test(inplace=True) + async def test_instruction_count_remote(buck: Buck) -> None: + package = "fbcode//buck2/tests/targets/rules/instruction_counts" + name = "three_billion_instructions" + await buck.build( + f"{package}:{name}", + "--no-remote-cache", + "--write-to-cache-anyway", + "--remote-only", + ) + + details = await get_matching_details(buck, package, name) + assert not details["command_kind"]["command"]["RemoteCommand"]["cache_hit"] + + # Check that we are within 10% + instruction_count = details["metadata"]["execution_stats"][ + "cpu_instructions_user" + ] + assert instruction_count > 2850000000 + assert instruction_count < 3150000000 + + # Check we also get it on a cache hit. + + await buck.kill() + await buck.build( + f"{package}:{name}", + "--remote-only", + ) + + details = await get_matching_details(buck, package, name) + assert details["command_kind"]["command"]["RemoteCommand"]["cache_hit"] + + # Check that we are within 10% + instruction_count = details["metadata"]["execution_stats"][ + "cpu_instructions_user" + ] + assert instruction_count > 2850000000 + assert instruction_count < 3150000000 + + +# This test relies on `buck2-asic-devinfra` use-case and `asic-grid` platform. +# In case of timeouts and failures, best would be to just disable this test. +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_asic_platforms(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/asic_platforms:uses_asic_grid_tool" + result = await buck.build( + target, + "--show-full-output", + ) + output = result.get_target_to_build_output()[target] + with open(output) as output: + s = output.read() + assert ( + "thefacebook.com" in s + ), "expected 'thefacebook.com' in output: `{}`".format(output) + + +@buck_test(inplace=True) +async def test_exit_when_different_state(buck: Buck) -> None: + a = buck.build( + "@fbcode//mode/dev", + "--exit-when-different-state", + "fbcode//buck2/tests/targets/exit_when_different_state:long_running_target", + "--local-only", + "--no-remote-cache", + ) + + b = buck.build( + "@fbcode//mode/opt", + "--exit-when-different-state", + "fbcode//buck2/tests/targets/exit_when_different_state:long_running_target", + "--local-only", + "--no-remote-cache", + ) + + # create a coroutine that can return a result + async def process( + p: Process[BuildResult, BuckException] + ) -> Tuple[Optional[int], str]: + result = await expect_failure(p) + return (result.process.returncode, result.stderr) + + done, pending = await asyncio.wait( + [process(a), process(b)], + timeout=10, + return_when=asyncio.FIRST_COMPLETED, + ) + + assert len(done) == 1 + assert len(pending) == 1 + + # these are sets, so can't index them. + for task in done: + exit_code, stderr = task.result() + assert "daemon is busy" in stderr + assert exit_code == 4 + + +@buck_test(inplace=True) +@pytest.mark.parametrize("same_state", [True, False]) +async def test_exit_when_preemptible_always(buck: Buck, same_state: bool) -> None: + a = buck.build( + "@fbcode//mode/dev", + "--preemptible=always", + "fbcode//buck2/tests/targets/exit_when_different_state:long_running_target", + "--local-only", + "--no-remote-cache", + ) + + b = buck.build( + # We expect to ALWAYS preempt commands, to prevent blocking new callees + "@fbcode//mode/dev" if same_state else "@fbcode//mode/opt", + "--preemptible=always", + "fbcode//buck2/tests/targets/exit_when_different_state:long_running_target", + "--local-only", + "--no-remote-cache", + ) + + # create a coroutine that can return a result + async def process( + p: Process[BuildResult, BuckException] + ) -> Tuple[Optional[int], str]: + result = await expect_failure(p) + return (result.process.returncode, result.stderr) + + done, pending = await asyncio.wait( + [process(a), process(b)], + timeout=10, + return_when=asyncio.FIRST_COMPLETED, + ) + + assert len(done) == 1 + assert len(pending) == 1 + + # these are sets, so can't index them. + for task in done: + exit_code, stderr = task.result() + assert "daemon preempted" in stderr + assert exit_code == 5 + + +@buck_test(inplace=True) +@pytest.mark.parametrize("same_state", [True, False]) +async def test_exit_when_preemptible_on_different_state( + buck: Buck, same_state: bool +) -> None: + a = buck.build( + "@fbcode//mode/dev", + "--preemptible=ondifferentstate", + "fbcode//buck2/tests/targets/exit_when_different_state:long_running_target", + "--local-only", + "--no-remote-cache", + ) + + b = buck.build( + # We expect to ALWAYS preempt commands, to prevent blocking new callees + "@fbcode//mode/dev" if same_state else "@fbcode//mode/opt", + "--preemptible=ondifferentstate", + "fbcode//buck2/tests/targets/exit_when_different_state:long_running_target", + "--local-only", + "--no-remote-cache", + ) + + # create a coroutine that can return a result + async def process( + p: Process[BuildResult, BuckException] + ) -> Tuple[Optional[int], str]: + result = await expect_failure(p) + return (result.process.returncode, result.stderr) + + done, pending = await asyncio.wait( + [process(a), process(b)], + timeout=10, + return_when=asyncio.FIRST_COMPLETED, + ) + + if same_state: + # No preempt when state is the same + assert len(done) == 0 + assert len(pending) == 2 + else: + assert len(done) == 1 + assert len(pending) == 1 + + # These are sets, so can't index them. Expect all done tasks to be "done" because they're preempted + for task in done: + exit_code, stderr = task.result() + assert "daemon preempted" in stderr + assert exit_code == 5 + + +@buck_test(inplace=True) +async def test_genrule_with_remote_execution_dependencies(buck: Buck) -> None: + result = await buck.build( + get_mode_from_platform(), + "fbcode//buck2/tests/targets/rules/genrule/re_dependencies:remote_execution_dependencies", + "--config", + "build.default_remote_execution_use_case=buck2-testing", + "--no-remote-cache", + "--remote-only", + "--show-full-output", + ) + output_dict = result.get_target_to_build_output() + for _target, output in output_dict.items(): + with Path(output).open() as f: + deps = json.load(f) + assert len(deps) == 1 + assert deps[0]["smc_tier"] == "noop" + assert deps[0]["id"] == "foo" + assert deps[0]["reservation_id"] == "noop" + + +async def read_io_provider_for_last_build(buck: Buck) -> None: + log = (await buck.log("show")).stdout + for line in log.splitlines(): + io_provider = json_get( + line, + "Event", + "data", + "SpanStart", + "data", + "Command", + "metadata", + "io_provider", + ) + if io_provider: + return io_provider + + raise Exception("Could not find io_provider") + + +# FIXME(JakobDegen): This test is flakey due to something in the apple toolchain that I don't +# understand. The flakeyness needs to be fixed, or better yet, this needs to be made isolated so +# that people don't have to learn things about apple toolchains to debug it +if False: + + @buck_test( + inplace=True, + skip_for_os=["windows"], + extra_buck_config={ + "buck2": { + "allow_eden_io": "false", + "digest_algorithms": "BLAKE3-KEYED", + "source_digest_algorithm": "BLAKE3-KEYED", + } + }, + ) + async def test_source_hashing_blake3_only(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/rules/rust/hello_world:welcome" + + await buck.build(target, "--no-remote-cache", "--remote-only") + run1 = await read_what_ran(buck) + + io_provider = await read_io_provider_for_last_build(buck) + assert io_provider == "fs" + + await buck.kill() + await buck.build( + target, + "--no-remote-cache", + "--remote-only", + env={"BUCK2_DISABLE_FILE_ATTR": "true"}, + ) + run2 = await read_what_ran(buck) + + def key(entry: Dict[str, Any]) -> str: + return entry["identity"] + + assert sorted(run1, key=key) == sorted(run2, key=key) + + @buck_test( + inplace=True, + skip_for_os=["windows"], + extra_buck_config={ + "buck2": { + "allow_eden_io": "true", + "digest_algorithms": "BLAKE3-KEYED", + "source_digest_algorithm": "BLAKE3-KEYED", + } + }, + ) + async def test_source_hashing_eden_blake3_only(buck: Buck) -> None: + if not os.path.exists(buck.cwd / ".eden"): + pytest.skip("This test is meaningless if not using Eden") # pyre-ignore + + # Check we have Eden I/O + await buck.build() + io_provider = await read_io_provider_for_last_build(buck) + + # If our test didn't use Eden then that means the current host's Eden is too old. + # Skip in this case, unless + + if io_provider != "eden" and os.environ.get("SANDCASTLE") is None: + pytest.skip("Unsupported Eden version") # pyre-ignore + + # On Sandcastle we'll assert we *are* using Eden to make sure this test + # isn't just always skipping. + assert io_provider == "eden" + + target = "fbcode//buck2/tests/targets/rules/rust/hello_world:welcome" + + await buck.build(target, "--no-remote-cache", "--remote-only") + run1 = await read_what_ran(buck) + + with open(buck._env["BUCK2_TEST_EXTRA_EXTERNAL_CONFIG"], "a") as f: + f.write("[buck2]\n") + f.write("allow_eden_io = false") + + await buck.kill() + await buck.build( + target, + "--no-remote-cache", + "--remote-only", + env={"BUCK2_DISABLE_FILE_ATTR": "true"}, + ) + run2 = await read_what_ran(buck) + + io_provider = await read_io_provider_for_last_build(buck) + assert io_provider == "fs" + + def key(entry: Dict[str, Any]) -> str: + return entry["identity"] + + assert sorted(run1, key=key) == sorted(run2, key=key) + + @buck_test( + inplace=True, + skip_for_os=["windows"], + extra_buck_config={ + "buck2": { + "allow_eden_io": "false", + "digest_algorithms": "BLAKE3-KEYED,SHA1", + "source_digest_algorithm": "SHA1", + } + }, + ) + async def test_source_hashing(buck: Buck) -> None: + if not os.path.exists(buck.cwd / ".eden"): + pytest.skip("This test is meaningless if not using Eden") # pyre-ignore + + target = "fbcode//buck2/tests/targets/rules/rust/hello_world:welcome" + + await buck.build(target, "--no-remote-cache", "--remote-only") + run1 = await read_what_ran(buck) + + await buck.kill() + await buck.build( + target, + "--no-remote-cache", + "--remote-only", + env={"BUCK2_DISABLE_FILE_ATTR": "true"}, + ) + run2 = await read_what_ran(buck) + + def key(entry: Dict[str, Any]) -> str: + return entry["identity"] + + assert sorted(run1, key=key) == sorted(run2, key=key) + + +@buck_test(inplace=True, allow_soft_errors=True) +async def test_eden_io_with_mismatched_root(buck: Buck) -> None: + cwd = Path("buck2") / "tests" / "targets" / "eden_io" + await buck.build("//...", rel_cwd=cwd) diff --git a/tests/e2e/build/test_build_isolated.py b/tests/e2e/build/test_build_isolated.py new file mode 100644 index 0000000000000..ee1cc0b56ba28 --- /dev/null +++ b/tests/e2e/build/test_build_isolated.py @@ -0,0 +1,1009 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os +import re +import sys +from pathlib import Path +from typing import List + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env + +from buck2.tests.e2e_util.helper.assert_occurrences import ( + assert_occurrences, + assert_occurrences_regex, +) +from buck2.tests.e2e_util.helper.utils import json_get, random_string, read_what_ran + +# Taken from data.proto +ACTION_EXECUTION_KIND_LOCAL = 1 +ACTION_EXECUTION_KIND_LOCAL_DEP_FILE = 7 +ACTION_EXECUTION_KIND_REMOTE_DEP_FILE_CACHE = 9 + + +# Eden materializer only available on Linux +def eden_linux_only() -> bool: + return sys.platform == "linux" + + +############################################################################################ +### NOTE(JakobDegen): Do not add new tests to this file. Instead: +### 1. Use or make a file in `tests/core` with a name that explains what you're testing. +### 2. Give that file its own data directory, not one shared with other tests. +############################################################################################ + + +@buck_test(inplace=False, data_dir="pass") +async def test_pass(buck: Buck) -> None: + results = await buck.build("//:abc") + assert "does not have any outputs" not in results.stderr + + +@buck_test(inplace=False, data_dir="pass") +async def test_missing_target(buck: Buck) -> None: + await expect_failure(buck.build("//:not_a_target_name")) + + +@buck_test(inplace=False, data_dir="pass") +async def test_success_message_printed(buck: Buck) -> None: + results = await buck.build("//:abc", "--console=simplenotty") + + assert "BUILD SUCCEEDED" in results.stderr + + results = await buck.build("//:abc", "--console=simpletty") + + assert_occurrences("\x1b[38;5;10mBUILD SUCCEEDED\x1b[39m", results.stderr, 1) + + results = await buck.build("//:abc", "--console=super") + + assert_occurrences("\x1b[38;5;10mBUILD SUCCEEDED\x1b[39m", results.stderr, 1) + + +@buck_test(inplace=False, data_dir="failing") +async def test_multiple_errors_print_with_simple_console(buck: Buck) -> None: + e = await expect_failure( + buck.build( + "--console=simple", + "//:foo", + "//:bar", + "//:using_dir", + ) + ) + + # Make sure that streamed events come back + assert_occurrences_regex("(Build ID|Buck UI):", e.stderr, 1) + assert_occurrences("RE Session: ", e.stderr, 1) + assert_occurrences_regex("^BUILD FAILED", e.stderr, 1) + + execution_error = "Action failed: {} () (bin_false)" + assert_occurrences(execution_error.format("root//:foo"), e.stderr, 2) + assert_occurrences(execution_error.format("root//:bar"), e.stderr, 2) + + exit_code = "(Local|Remote) command returned non-zero exit code 1" + assert_occurrences_regex(exit_code, e.stderr, 6) + + build_error = "Failed to build '{} ()'" + assert_occurrences(build_error.format("root//:foo"), e.stderr, 1) + assert_occurrences(build_error.format("root//:bar"), e.stderr, 1) + # TODO(nmj): Remove this comment + # assert_occurrences_regex("getting metadata for.*a_dir`", e.stderr, 1) + + e = await expect_failure(buck.build("--console=simple", "//:non_existent")) + + assert_occurrences_regex("^BUILD FAILED", e.stderr, 1) + assert_occurrences( + "Unknown target `non_existent` from package `root//`", e.stderr, 1 + ) + + +@buck_test(inplace=False, data_dir="failing") +async def test_multiple_errors_print_with_super_console(buck: Buck) -> None: + e = await expect_failure( + buck.build( + "--console=super", + "//:foo", + "//:bar", + "//:using_dir", + ) + ) + + # See https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit for how to decode this + DARK_RED = re.escape("\033[38;5;1m") + + execution_error = "Action failed: " + assert_occurrences_regex(execution_error, e.stderr, 3) + + exit_code = "(Local|Remote) command returned non-zero exit code 1" + assert_occurrences_regex(exit_code, e.stderr, 3) + + # These will eventually be red. + build_error = "Failed to build '{} ()'" + assert_occurrences(build_error.format("root//:foo"), e.stderr, 1) + assert_occurrences(build_error.format("root//:bar"), e.stderr, 1) + # TODO(nmj): Remove this comment + # assert_occurrences_regex("getting metadata for.*a_dir`", e.stderr, 1) + + assert_occurrences("\x1b[38;5;1mBUILD FAILED\x1b[39m", e.stderr, 1) + + e = await expect_failure(buck.build("--console=super", "//:non_existent")) + + target_error = f"{DARK_RED}Unknown target `non_existent` from package `root//`" + assert_occurrences("\x1b[38;5;1mBUILD FAILED\x1b[39m", e.stderr, 1) + assert_occurrences_regex(target_error, e.stderr, 1) + + +@buck_test(inplace=False, data_dir="transitive_sets") +async def test_transitive_sets(buck: Buck) -> None: + rule = "//:bar" + report = await buck.build(rule) + out = report.get_build_report().output_for_target(rule) + out = out.read_text() + out = [line.strip() for line in out.strip().split("\n")] + assert out == ["bar", "foo", "foo2", "foo1"] + + +@buck_test(inplace=False, data_dir="pass") +async def test_stderr_is_printed_for_successful_actions(buck: Buck) -> None: + no_color_text = "warning on stderr no color" + # Support '\r\n' which is printed on Windows. + simple_color_stripped = "\\] warning on stderr with color\\r?$" + # Make sure we reset the terminal after printing. + simple_color = "\x1b[33mwarning on stderr with color\x1b[0m" + # Make sure we reset the terminal after printing. Color is represented + # slightly differently (33m vs 38;5;3m) because of how parsed / sanitized + # colors are stored. Those are the same color, though. + super_color = "\x1b[38;5;3mwarning on stderr with color\x1b[39m" + + superconsole_stderr_line = "stderr for {} (printer writing_stderr)" + simpleconsole_stderr_line = "stderr:" + + # By default stderr should not be relayed on success w/o the -v2 or higher flag. + + res = await buck.build("--console=simplenotty", "//:print_stderr_simple_notty") + + assert_occurrences(no_color_text, res.stderr, 0) + assert_occurrences_regex(simple_color_stripped, res.stderr, 0) + assert_occurrences(simpleconsole_stderr_line, res.stderr, 0) + + res = await buck.build("--console=simpletty", "//:print_stderr_simple_tty") + + assert_occurrences(no_color_text, res.stderr, 0) + assert_occurrences(simple_color, res.stderr, 0) + assert_occurrences(simpleconsole_stderr_line, res.stderr, 0) + + res = await buck.build("--console=super", "//:print_stderr_super") + + assert_occurrences(no_color_text, res.stderr, 0) + assert_occurrences(super_color, res.stderr, 0) + assert_occurrences( + superconsole_stderr_line.format("root//:print_stderr_super"), res.stderr, 0 + ) + + res = await buck.build( + "-v5", "--console=simplenotty", "//:v_print_stderr_simple_notty" + ) + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences_regex(simple_color_stripped, res.stderr, 1) + + res = await buck.build("-v5", "--console=simpletty", "//:v_print_stderr_simple_tty") + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences(simple_color, res.stderr, 1) + assert_occurrences(simpleconsole_stderr_line, res.stderr, 1) + + res = await buck.build("-v5", "--console=super", "//:v_print_stderr_super") + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences(super_color, res.stderr, 1) + assert_occurrences( + superconsole_stderr_line.format("root//:v_print_stderr_super"), res.stderr, 1 + ) + + # Things that print all the time should print w/o the verbosity flag, or with it + + res = await buck.build( + "--console=simplenotty", "//:always_print_stderr_simple_notty" + ) + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences_regex(simple_color_stripped, res.stderr, 1) + assert_occurrences(simpleconsole_stderr_line, res.stderr, 1) + + res = await buck.build("--console=simpletty", "//:always_print_stderr_simple_tty") + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences(simple_color, res.stderr, 1) + assert_occurrences(simpleconsole_stderr_line, res.stderr, 1) + + res = await buck.build("--console=super", "//:always_print_stderr_super") + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences(super_color, res.stderr, 1) + assert_occurrences( + superconsole_stderr_line.format("root//:always_print_stderr_super"), + res.stderr, + 1, + ) + + res = await buck.build( + "-v5", "--console=simplenotty", "//:v_always_print_stderr_simple_notty" + ) + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences_regex(simple_color_stripped, res.stderr, 1) + assert_occurrences(simpleconsole_stderr_line, res.stderr, 1) + + res = await buck.build( + "-v5", "--console=simpletty", "//:v_always_print_stderr_simple_tty" + ) + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences(simple_color, res.stderr, 1) + assert_occurrences(simpleconsole_stderr_line, res.stderr, 1) + + res = await buck.build("-v5", "--console=super", "//:v_always_print_stderr_super") + + assert_occurrences(no_color_text, res.stderr, 1) + assert_occurrences(super_color, res.stderr, 1) + assert_occurrences( + superconsole_stderr_line.format("root//:v_always_print_stderr_super"), + res.stderr, + 1, + ) + + +@buck_test(inplace=False, data_dir="flagfiles") +async def test_flagfiles_are_located_correctly(buck: Buck) -> None: + out = await buck.build("@//mode/dev", "cell//subdir:simple", rel_cwd=Path("cell")) + + build_report = out.get_build_report() + output = build_report.output_for_target("cell//subdir:simple") + assert output.read_text().rstrip() == "overridden" + + out = await buck.build("@//mode/dev", "cell//subdir:simple", rel_cwd=Path("cell")) + + build_report = out.get_build_report() + output = build_report.output_for_target("cell//subdir:simple") + assert output.read_text().rstrip() == "overridden" + + # Make sure that relative paths are resolved against the cell root + # (determined from project root + cwd) if they're not found relative + # to cwd + out = await buck.build( + "@mode/dev", "cell//subdir:simple", rel_cwd=Path("cell/subdir") + ) + + build_report = out.get_build_report() + output = build_report.output_for_target("cell//subdir:simple") + assert output.read_text().rstrip() == "overridden" + assert ( + "`@mode/dev` was specified, but not found. Using file at `//mode/dev`.\n" + 'This behavior is being deprecated. Please use `"@//mode/dev"` instead' + ) in out.stderr + + out = await buck.build("@cell//mode/dev", "cell//subdir:simple") + + build_report = out.get_build_report() + output = build_report.output_for_target("cell//subdir:simple") + assert output.read_text().rstrip() == "overridden" + + await expect_failure( + buck.build("@cell/mode/missing", "cell//subdir:simple"), + stderr_regex="Unable to read flag file at `cell/mode/missing`", + ) + + +@buck_test(inplace=False, data_dir="early_action_cutoff") +async def test_early_action_cutoff(buck: Buck, tmp_path: Path) -> None: + sentinel = tmp_path / "sentinel" + sentinel.touch() + + # This action is going to check that the file pointed at by "sentinel" + # exists. Point it at a valid file. + + with open(buck.cwd / "sentinel", "w", encoding="utf-8") as f: + f.write(str(sentinel)) + + await buck.build("//:check") + + ## Now, invalidate the action, and remove the underlying sentinel file. If + # :check executes now, it'll fail. + + with open(buck.cwd / "src", "w", encoding="utf-8") as f: + f.write("TEXT2") + + sentinel.unlink() + + # Run it to find out + + await buck.build("//:check") + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_hybrid_executor_threshold(buck: Buck) -> None: + await buck.build( + "root//executor_threshold_tests/...", + "-c", + f"test.cache_buster={random_string()}", + ) + out = await read_what_ran(buck) + + executors = {line["identity"]: line["reproducer"]["executor"] for line in out} + expected = { + "root//executor_threshold_tests:big () (head)": "Local", + "root//executor_threshold_tests:cp_big () (cp)": "Local", + "root//executor_threshold_tests:small () (head)": "Local", + "root//executor_threshold_tests:cp_small () (cp)": "Re", + } + assert executors == expected + + +@buck_test(inplace=False, data_dir="execution_platforms") +@pytest.mark.parametrize( + "low_pass_filter", + [ + "true", + "false", + ], +) +async def test_hybrid_executor_fallbacks(buck: Buck, low_pass_filter: str) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + "-c", + f"test.experimental_low_pass_filter={low_pass_filter}", + ] + + # Those work as they are allowed to fallback: + await buck.build( + "root//executor_fallback_tests:local_only", + "root//executor_fallback_tests:local_only_full_hybrid", + "root//executor_fallback_tests:remote_only_prefer_local", + *opts, + ) + + # This one doesn't: + await expect_failure( + buck.build( + "root//executor_fallback_tests:local_only_no_fallback", + *opts, + ) + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_hybrid_executor_fallback_preferred_error(buck: Buck) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + ] + + await expect_failure( + buck.build( + "root//executor_fallback_tests:fails_both", + *opts, + ), + stderr_regex="Failed on local", + ) + + await expect_failure( + buck.build( + "root//executor_fallback_tests:fails_both_prefer_local", + *opts, + ), + stderr_regex="Failed on local", + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +@pytest.mark.parametrize( + "target", + [ + "slower_locally", + "slower_locally_force_full_hybrid", + ], +) +async def test_hybrid_executor_cancels_local_execution(buck: Buck, target: str) -> None: + await buck.build( + f"root//executor_race_tests:{target}", + "-c", + f"test.cache_buster={random_string()}", + ) + + log = (await buck.log("show")).stdout.strip().splitlines() + commands = None + + for line in log: + commands = commands or json_get( + line, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "commands", + ) + + assert commands is not None + assert len(commands) == 2 + assert commands[0]["status"] == {"Cancelled": {}} + assert commands[1]["status"] == {"Success": {}} + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_hybrid_executor_logging(buck: Buck) -> None: + await buck.build( + "root//executor_fallback_tests:local_only", + "-c", + f"test.cache_buster={random_string()}", + ) + + log = (await buck.log("show")).stdout.strip().splitlines() + commands = None + + for line in log: + commands = commands or json_get( + line, + "Event", + "data", + "SpanEnd", + "data", + "ActionExecution", + "commands", + ) + + assert commands is not None + assert len(commands) == 2 + assert commands[0]["details"]["signed_exit_code"] != 0 + assert commands[0]["status"] == {"Failure": {}} + assert commands[1]["details"]["signed_exit_code"] == 0 + assert commands[1]["status"] == {"Success": {}} + + +@buck_test(inplace=False, data_dir="execution_platforms") +@pytest.mark.parametrize( + "low_pass_filter", + [ + "true", + "false", + ], +) +async def test_hybrid_executor_prefer_local(buck: Buck, low_pass_filter: str) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + "-c", + f"test.experimental_low_pass_filter={low_pass_filter}", + ] + + # heavyweight_works_only_locally will only succeed if it runs locally, but + # its weight would normally prevent that from happening. It has + # prefer_local, so it only works if that results in local execution being + # attempted. + # + # slower_and_works_only_locally will only work locally but it'll fail + # faster on RE. This means it must not be attempted at al on RE. + await buck.build( + "root//executor_race_tests:heavyweight_works_only_locally", + "root//executor_race_tests:slower_and_works_only_locally", + *opts, + ) + + # Same as above, but with prefer-local on the build command line instead of the command. + await buck.build( + "root//executor_race_tests:heavyweight_works_only_locally_local_not_preferred", + "root//executor_race_tests:slower_and_works_only_locally_local_not_preferred", + "--prefer-local", + *opts, + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_hybrid_executor_prefer_remote_local_fallback(buck: Buck) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + ] + # Local only command that fails with --remote-only, passes with --prefer-remote + await expect_failure( + buck.build( + "root//executor_fallback_tests:local_only_full_hybrid", + "--remote-only", + *opts, + ), + stderr_regex="Failed to build .*local_only_full_hybrid", + ) + + await buck.build( + "root//executor_fallback_tests:local_only_full_hybrid", + "--prefer-remote", + *opts, + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_hybrid_executor_prefer_remote(buck: Buck) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + ] + # Build execution is sequential and remote first with --prefer-remote + # using an action that succeeds slowly on RE and fails fast locally + # that would fail if run concurrently + await buck.build( + "root//executor_race_tests:slower_remotely", + "--prefer-remote", + *opts, + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_executor_preference_priority(buck: Buck) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + ] + + await buck.build( + "root//executor_preference_tests:", + "--prefer-remote", + *opts, + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_executor_preference_with_remote_args(buck: Buck) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + ] + + await buck.build( + "root//executor_preference_prefer_remote_arg_tests:", + *opts, + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_executor_preference_with_remote_args_and_cli_override( + buck: Buck, +) -> None: + opts = [ + "-c", + f"test.cache_buster={random_string()}", + ] + + await expect_failure( + buck.build( + "root//executor_preference_prefer_remote_arg_tests:", + # `--prefer-local` takes priority over any `ctx.actions.run()` + "--prefer-local", + *opts, + ) + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_prefer_local(buck: Buck) -> None: + await expect_failure( + buck.build( + "root//executor_fallback_tests:local_only_no_fallback", + "-c", + f"test.cache_buster={random_string()}", + ) + ) + + await buck.build( + "root//executor_fallback_tests:local_only_no_fallback", "--prefer-local" + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_local_only(buck: Buck) -> None: + args = [ + "root//executor_fallback_tests:local_only_no_fallback", + "-c", + f"test.cache_buster={random_string()}", + ] + + await expect_failure(buck.build(*args)) + + await buck.build( + *args, + "--local-only", + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_remote_only(buck: Buck) -> None: + args = [ + "root//executor_fallback_tests:remote_only_no_fallback", + "root//executor_fallback_tests:remote_only_full_hybrid", + "-c", + f"test.cache_buster={random_string()}", + ] + + await expect_failure(buck.build(*args)) + + await buck.build( + *args, + "--remote-only", + ) + + +async def _assert_locally_executed_upload_attempted(buck: Buck, count: int = 1) -> None: + await _assert_upload_attempted(buck, count) + + +async def _assert_upload_attempted(buck: Buck, count: int) -> None: + log = (await buck.log("show")).stdout.strip().splitlines() + uploads = [] + excluded_uploads = [] + + for line in log: + e = json_get( + line, + "Event", + "data", + "SpanEnd", + "data", + "CacheUpload", + ) + if e is None: + continue + if e["success"] or e["re_error_code"] == "PERMISSION_DENIED": + # Tolerate permission denied errors because we don't have a choice on CI :( + uploads.append(e) + else: + excluded_uploads.append(e) + + if len(uploads) == count: + return + else: + print(f"Expected {count} uploads", file=sys.stderr) + print(f"Actual uploads: {uploads}", file=sys.stderr) + print(f"Excluded uploads: {excluded_uploads}", file=sys.stderr) + raise AssertionError("Wrong number of uploads, see above") + + +@buck_test(inplace=False, data_dir="execution_platforms") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +async def test_re_uploads(buck: Buck) -> None: + args = ["-c", f"write.text={random_string()}"] + await buck.build("root//upload_tests:write", *args) + await _assert_locally_executed_upload_attempted(buck, 1) + + +@buck_test(inplace=False, data_dir="execution_platforms") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +async def test_re_uploads_dir(buck: Buck) -> None: + args = ["-c", f"write.text={random_string()}"] + await buck.build("root//upload_tests:write_in_dir", *args) + await _assert_locally_executed_upload_attempted(buck, 1) + + +@buck_test(inplace=False, data_dir="execution_platforms") +@env("BUCK_LOG", "buck2_execute_impl::executors::caching=debug") +async def test_re_uploads_limit(buck: Buck) -> None: + args = ["-c", f"write.text={random_string()}"] + await buck.build("root//upload_tests:write_xxl", *args) + await _assert_locally_executed_upload_attempted(buck, 0) + + +@buck_test(inplace=False, data_dir="toolchain_deps") +async def test_toolchain_deps(buck: Buck) -> None: + # This test builds two targets, both with the same `default_target_platform` platform + # but which should resolve to different execution platforms because of toolchain deps. + # Both targets still get configured with the `default_target_platform` of release. + # + # The Python toolchain works on Windows/Linux, but we prefer Linux as an exec platform. + # The ASIC toolchain only works on Windows, so `python_and_asic` (which does both) must + # pick Windows for Python as well. + result = await buck.build("root//tests:python_and_asic", "root//tests:python_only") + python_and_asic = ( + result.get_build_report() + .output_for_target("root//tests:python_and_asic") + .read_text() + ) + python_only = ( + result.get_build_report() + .output_for_target("root//tests:python_only") + .read_text() + ) + + # If any of the selects get resolved incorrectly, the toolchain binaries below will change. + assert python_and_asic == "python_release_windows\nasic\n" + assert python_only == "python_release_linux\n" + + await buck.build("root//...", "--target-platforms=root//config:platform_windows") + + # Check we get the toolchain dependencies in uquery and cquery + result = await buck.uquery("deps(//toolchains:python)") + assert "//toolchains:compile_python_release_linux\n" in result.stdout + assert "//toolchains:python_debug\n" in result.stdout + result = await buck.cquery( + "deps(//toolchains:python)", "--target-platforms=root//config:platform_linux" + ) + assert "//toolchains:compile_python_release_linux " in result.stdout + assert "//toolchains:python_debug " not in result.stdout + + +@buck_test(inplace=False, data_dir="http_deferral") +@pytest.mark.parametrize( + "digest_algorithm", + [ + "SHA1", + "SHA256", + ], +) +async def test_http_deferral(buck: Buck, digest_algorithm: str) -> None: + with open(buck.cwd / ".buckconfig", "a") as f: + f.write("[buck2]\n") + f.write(f"digest_algorithms = {digest_algorithm}\n") + + target = "//:download" + + # Check it was deferred + res = await buck.build(target, "--materializations=none") + output = res.get_build_report().output_for_target(target) + assert not os.path.exists(output) + + # Check it can be materialized + res = await buck.build(target) + assert os.path.exists(output) + + +@buck_test(inplace=False, data_dir="http_deferral") +@env( + "BUCK2_TEST_INJECTED_MISSING_DIGESTS", + "1a45666759704bf08fc670aa96118a0415c470fc:221", +) +async def test_http_deferral_uploads(buck: Buck) -> None: + await buck.build("//:target", "--no-remote-cache") + + +@buck_test(inplace=False, data_dir="no_output") +async def test_no_output(buck: Buck) -> None: + results = await buck.build("//:none") + + assert "BUILD SUCCEEDED" in results.stderr + assert "does not have any outputs" in results.stderr + + +@buck_test(inplace=False, data_dir="no_output") +async def test_no_output_wildcard(buck: Buck) -> None: + results = await buck.build("//...") + + assert "BUILD SUCCEEDED" in results.stderr + assert "does not have any outputs" not in results.stderr + + +@buck_test(inplace=False, data_dir="executor_caching") +async def test_executor_caching_disabled(buck: Buck) -> None: + async def read_executors() -> List[str]: + out = await read_what_ran(buck) + return [line["reproducer"]["executor"] for line in out] + + seed = random_string() + # Run on RE with cache lookup and writes disabled + await buck.build( + ":test", + "-c", + f"test.seed={seed}", + "-c", + "test.remote_enabled=true", + "--no-remote-cache", + ) + assert await read_executors() == ["Re"] + + await buck.kill() + + # Run on RE, should not get any cache hit as the cache writes were disabled + await buck.build( + ":test", + "-c", + f"test.seed={seed}", + "-c", + "test.remote_enabled=true", + ) + + executors = await read_executors() + assert executors == ["Re"] or executors == [] + + +@buck_test(inplace=False, data_dir="executor_caching") +async def test_executor_cache_writes_enabled(buck: Buck) -> None: + async def read_executors() -> List[str]: + out = await read_what_ran(buck) + return [line["reproducer"]["executor"] for line in out] + + seed = random_string() + # Run on RE with cache lookup disabled and cache writes enabled + await buck.build( + ":test", + "-c", + f"test.seed={seed}", + "-c", + "test.remote_enabled=true", + "--no-remote-cache", + "--write-to-cache-anyway", + ) + assert await read_executors() == ["Re"] + + await buck.kill() + + # Run on RE, should not get cache hits as writes were enabled + await buck.build( + ":test", + "-c", + f"test.seed={seed}", + "-c", + "test.remote_enabled=true", + ) + assert await read_executors() == ["Cache"] + + +@buck_test(inplace=False, data_dir="executor_caching") +async def test_executor_caching(buck: Buck) -> None: + async def read_executors() -> List[str]: + out = await read_what_ran(buck) + return [line["reproducer"]["executor"] for line in out] + + seed = random_string() + + # Run on RE + await buck.build( + ":test", "-c", f"test.seed={seed}", "-c", "test.remote_enabled=true" + ) + assert (await read_executors()) == ["Re"] + + # Run on RE, with caching (the default) + await buck.kill() + await buck.build( + ":test", "-c", f"test.seed={seed}", "-c", "test.remote_enabled=true" + ) + assert (await read_executors()) == ["Cache"] + + # Kill, run locally, no caching. + await buck.kill() + await buck.build( + ":test", + "-c", + f"test.seed={seed}", + "-c", + "test.local_enabled=true", + "-c", + "test.remote_enabled=false", + "-c", + "test.remote_cache_enabled=false", + ) + assert (await read_executors()) == ["Local"] + + # Kill, run locally, with caching (explicitly). + await buck.kill() + await buck.build( + ":test", + "-c", + f"test.seed={seed}", + "-c", + "test.remote_enabled=false", + "-c", + "test.local_enabled=true", + "-c", + "test.remote_cache_enabled=true", + ) + assert (await read_executors()) == ["Cache"] + + # Kill, run locally, with caching (the default). + await buck.kill() + await buck.build( + ":test", + "-c", + f"test.seed={seed}", + "-c", + "test.remote_enabled=false", + "-c", + "test.local_enabled=true", + ) + assert (await read_executors()) == ["Cache"] + + +@buck_test(inplace=False, data_dir="pass") +async def test_sandcastle_id_check(buck: Buck) -> None: + async def pid() -> int: + res = await buck.status() + return json.loads(res.stdout)["process_info"]["pid"] + + await buck.build() + pid1 = await pid() + await buck.build(env={"SANDCASTLE_ID": "foo"}) + pid2 = await pid() + await buck.build(env={"SANDCASTLE_ID": "foo"}) + pid3 = await pid() + + assert pid1 != pid2 + assert pid2 == pid3 + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_build_fails_with_mutually_exclusive_executors(buck: Buck) -> None: + with pytest.raises(BuckException): + await buck.build( + "--local-only", "--remote-only", "root//executor_threshold_tests/..." + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_enforce_unique_inodes(buck: Buck) -> None: + await buck.build( + "root//executor_unique_inode_tests/...", + "-c", + f"test.cache_buster={random_string()}", + ) + + +@buck_test(inplace=False, data_dir="execution_platforms", skip_for_os=["windows"]) +async def test_executable_bit(buck: Buck) -> None: + await buck.build( + "root//executor_exec_bit_tests/...", + "-c", + f"test.cache_buster={random_string()}", + ) + + +@buck_test(inplace=False, data_dir="execution_platforms") +@env("BUCK_OFFLINE_BUILD", "1") +async def test_build_offline(buck: Buck) -> None: + await buck.build("root//executor_threshold_tests/...") + out = await read_what_ran(buck) + + executors = {line["identity"]: line["reproducer"]["executor"] for line in out} + expected = { + "root//executor_threshold_tests:big () (head)": "Local", + "root//executor_threshold_tests:cp_big () (cp)": "Local", + "root//executor_threshold_tests:small () (head)": "Local", + "root//executor_threshold_tests:cp_small () (cp)": "Local", + } + assert executors == expected + + +@buck_test(inplace=False, data_dir="execution_platforms") +async def test_symlink_output(buck: Buck) -> None: + with open(buck.cwd / ".buckconfig.local", "w") as f: + f.write("[buck2_re_client]\n") + f.write("respect_file_symlinks = false\n") + await buck.build( + "root//executor_symlink_tests:check_not_symlink", + "-c", + f"test.cache_buster={random_string()}", + ) + await buck.kill() + with open(buck.cwd / ".buckconfig.local", "w") as f: + f.write("[buck2_re_client]\n") + f.write("respect_file_symlinks = true\n") + await buck.build( + "root//executor_symlink_tests:check_symlink", + "-c", + f"test.cache_buster={random_string()}", + ) + + +############################################################################################ +### NOTE(JakobDegen): Do not add new tests to this file. Instead: +### 1. Use or make a different test file with a name that explains what you're testing. +### 2. Give that file its own data directory, not one shared with other tests. +############################################################################################ diff --git a/tests/e2e/build/test_worker.py b/tests/e2e/build/test_worker.py new file mode 100644 index 0000000000000..9a9a08d0aab30 --- /dev/null +++ b/tests/e2e/build/test_worker.py @@ -0,0 +1,173 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import glob +import json +from typing import Any, Dict, List, Tuple + +from buck2.tests.e2e_util.api.buck import Buck + +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import read_what_ran + + +package = "fbcode//buck2/tests/targets/rules/worker_grpc" + +worker_args = [ + "-c", + "build.require_persistent_workers=True", + "--no-remote-cache", +] + + +# disabled on mac due to Python GRPC issues +@buck_test(inplace=True, skip_for_os=["darwin", "windows"]) +async def test_worker(buck: Buck) -> None: + async def read_what_ran_for_executor( + buck: Buck, executor: str + ) -> List[Dict[str, Any]]: + return [ + entry + for entry in (await read_what_ran(buck)) + if entry["reproducer"]["executor"] == executor + ] + + res = await buck.build(*worker_args, package + ":gen_worker_run_out") + output = res.get_build_report().output_for_target(package + ":gen_worker_run_out") + assert output.read_text() == "hello worker" + assert len(await read_what_ran_for_executor(buck, "Worker")) == 1 + + # Check non-worker exe runs if workers are disabled. + fallback_args = ["-c", "build.use_persistent_workers=false", "--local-only"] + res = await buck.build(*fallback_args, package + ":gen_worker_run_fallback") + output = res.get_build_report().output_for_target( + package + ":gen_worker_run_fallback" + ) + assert "hello fallback" in output.read_text() + assert len(await read_what_ran_for_executor(buck, "Worker")) == 0 + + # Check worker is shared between multiple actions. + res = await buck.build(*worker_args, package + ":gen_worker_deps") + trace_id = json.loads(res.stdout)["trace_id"] + worker_dirs = glob.glob(f"/tmp/buck2_worker/{trace_id}*/stderr") + num_workers = len(worker_dirs) + assert num_workers == 1, f"expected 1 worker, found {worker_dirs} for {trace_id}" + assert len(await read_what_ran_for_executor(buck, "WorkerInit")) == 1 + + # Check error thrown during worker command execution. + await expect_failure( + buck.build(*worker_args, package + ":gen_worker_run_error"), + stderr_regex="compile error", + ) + # Check error thrown on worker start up. + await expect_failure( + buck.build(*worker_args, package + ":gen_worker_init_fail"), + stderr_regex="init error", + ) + # Check error thrown by missing worker executable. + await expect_failure( + buck.build(*worker_args, package + ":gen_worker_missing"), + stderr_regex="Worker failed to spawn", + ) + # Check connection error if worker server can't be connected to. + await expect_failure( + buck.build(*worker_args, package + ":gen_worker_init_deadlock"), + stderr_regex="Worker failed to connect", + ) + + # With hybrid execution: + # 1. Check that building `:gen_slow_worker_fast_fallback` first (as dependency) causes remote to succeed and worker to be cancelled. + # 2. Check that `:gen_fast_worker_slow_fallback` worker execution succeeds, using same worker initialized by 1. + hybrid_args = [ + "-c", + "build.use_persistent_workers=true", + "-c", + "build.use_limited_hybrid=False", + ] + await buck.build(*hybrid_args, package + ":gen_fast_worker_slow_fallback") + assert len(await read_what_ran_for_executor(buck, "WorkerInit")) == 1 + + def assert_executed( + what_ran: List[Dict[str, Any]], expected: List[Tuple[str, str]] + ) -> None: + def target_name(identity: str) -> str: + return identity.split()[0].split(":")[1] + + expected_target_names = [entry[1] for entry in expected] + what_ran_matching = [ + (entry["reproducer"]["executor"], target_name(entry["identity"])) + for entry in what_ran + if target_name(entry["identity"]) in expected_target_names + ] + assert what_ran_matching == expected, what_ran + + # TODO(ctolliday) re-enable once cancellation is in place + # assert_executed( + # await read_what_ran(buck), + # [ + # # TODO(ctolliday) ideally this should use --no-remote-cache and check for "Re" not "Cache". + # # Will fail if not cached because RE execution will block on local execution. + # # Making workers cancellable fixes this. + # ("Cache", "gen_slow_worker_fast_fallback"), + # ("WorkerInit", "gen_fast_worker_slow_fallback"), + # ("Worker", "gen_fast_worker_slow_fallback"), + # ], + # ) + + +@buck_test(inplace=True, skip_for_os=["darwin", "windows"]) +async def test_worker_exit_handled(buck: Buck) -> None: + # Check connection error if worker server dies mid-request + await expect_failure( + buck.build( + *worker_args, + "-c", + "build.persistent_worker_check_child_liveness=true", + package + ":gen_worker_init_self_destruct", + ), + stderr_regex="Worker exited while running command", + ) + + +@buck_test(inplace=True, skip_for_os=["darwin", "windows"]) +async def test_worker_exit_not_handled(buck: Buck) -> None: + await expect_failure( + asyncio.wait_for( + buck.build( + *worker_args, + package + ":gen_worker_init_self_destruct", + ), + 20, + ), + exception=asyncio.TimeoutError, + ) + + +@buck_test(inplace=True, skip_for_os=["darwin", "windows"]) +async def test_worker_thread_limit(buck: Buck) -> None: + worker_args = [ + "-c", + "build.use_persistent_workers=True", + "--local-only", + "--no-remote-cache", + ] + await expect_failure( + buck.build(*worker_args, package + ":gen_worker_concurrent_fail"), + stderr_regex="Concurrency check failed", + ) + await buck.build(*worker_args, package + ":gen_worker_concurrent_pass") + + +@buck_test(inplace=True) +async def test_dummy() -> None: + # None of the tests in this file pass on Windows or mac and that upsets CI. + pass diff --git a/tests/e2e/bxl/BUCK b/tests/e2e/bxl/BUCK new file mode 100644 index 0000000000000..1f9f373be76c6 --- /dev/null +++ b/tests/e2e/bxl/BUCK @@ -0,0 +1,177 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_bxl", + srcs = ["test_bxl.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + skip_for_os = [ + "darwin", + ], + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_query", + srcs = ["test_bxl_query.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_fs", + srcs = ["test_bxl_fs.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_node_operations", + srcs = ["test_bxl_node_operations.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_cli", + srcs = ["test_bxl_cli.py"], + data_dir = "test_bxl_cli_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_audit", + srcs = ["test_bxl_audit.py"], + data_dir = "test_bxl_audit_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_build", + srcs = ["test_bxl_build.py"], + data_dir = "test_bxl_build_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_analysis", + srcs = ["test_bxl_analysis.py"], + data_dir = "test_bxl_analysis_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_lazy_analysis", + srcs = ["test_bxl_lazy_analysis.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_lazy_configured_target_node", + srcs = ["test_bxl_lazy_configured_target_node.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utilities", + ], +) + +buck2_e2e_test( + name = "test_bxl_lazy_unconfigured_target_node", + srcs = ["test_bxl_lazy_unconfigured_target_node.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utilities", + ], +) + +buck2_e2e_test( + name = "test_bxl_ensure", + srcs = ["test_bxl_ensure.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_actions", + srcs = ["test_bxl_actions.py"], + data_dir = "test_bxl_actions_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_execution_platforms", + srcs = ["test_execution_platforms.py"], + data_dir = "test_execution_platforms_data", + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) + +buck2_e2e_test( + name = "test_bxl_targets", + srcs = ["test_bxl_targets.py"], + data = "//buck2/tests/targets:isolated_targets", + env = { + "OVR_CONFIG": "1", + "PRELUDE": "$(location prelude//:prelude)", + }, + deps = [ + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/e2e/bxl/fs/BUCK b/tests/e2e/bxl/fs/BUCK new file mode 100644 index 0000000000000..e75173a73fc6d --- /dev/null +++ b/tests/e2e/bxl/fs/BUCK @@ -0,0 +1,8 @@ +load("@fbcode//buck2/tests:bxl_test.bzl", "bxl_test") + +oncall("build_infra") + +bxl_test( + name = "fs_inplace.bxl", + src = "fs_inplace.bxl", +) diff --git a/tests/e2e/bxl/fs/fs_inplace.bxl b/tests/e2e/bxl/fs/fs_inplace.bxl new file mode 100644 index 0000000000000..8e55de4eb65b1 --- /dev/null +++ b/tests/e2e/bxl/fs/fs_inplace.bxl @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:asserts.bzl", "asserts") + +def _impl(ctx): + # special case where fbsource is a project and a cell + asserts.true(ctx.fs.exists("fbsource//fbcode")) + asserts.false(ctx.fs.exists("fbsource//fake/path")) + asserts.true(ctx.fs.is_dir("fbsource//fbcode")) + asserts.false(ctx.fs.is_file("fbsource//fbcode")) + + # test fbcode as well + asserts.true(ctx.fs.exists("fbcode//buck2")) + asserts.false(ctx.fs.exists("fbcode//buck100")) + asserts.true(ctx.fs.is_dir("fbcode//buck2")) + asserts.false(ctx.fs.is_file("fbcode//buck2")) + + outputs = [] + for result in ctx.fs.list("fbcode//buck2/tests/e2e/bxl/fs/some_folder"): + outputs.append(result) + asserts.equals(len(outputs), 2) + +test = bxl_main( + impl = _impl, + cli_args = {}, +) diff --git a/tests/e2e/bxl/fs/some_folder/another_file b/tests/e2e/bxl/fs/some_folder/another_file new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/fs/some_folder/some_file b/tests/e2e/bxl/fs/some_folder/some_file new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/test_bxl.py b/tests/e2e/bxl/test_bxl.py new file mode 100644 index 0000000000000..5fd2decb6674f --- /dev/null +++ b/tests/e2e/bxl/test_bxl.py @@ -0,0 +1,127 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_root(buck: Buck) -> None: + + result = await buck.bxl( + "//bxl:root.bxl:root_test", + ) + + assert str(buck.cwd) in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_cell_root(buck: Buck) -> None: + + result = await buck.bxl( + "fbcode//cell_root.bxl:cell_root_test", + ) + + assert str(buck.cwd / "fbcode") in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_instant_event(buck: Buck) -> None: + await buck.bxl( + "//bxl/event.bxl:good", + ) + + # Get event log + log = (await buck.log("show")).stdout.strip() + lines = log.splitlines() + # try to find starlark instant event + + found_event = False + for line in lines: + if "StarlarkUser" in line: + assert "foo" in line + assert "bool_value" in line + assert "string_value" in line + assert "int_value" in line + found_event = True + break + + if not found_event: + raise AssertionError("Failed to find starlark instant event.") + + # Shouldn't fail + await buck.bxl("//bxl/event.bxl:metadata_with_duration") + + await expect_failure( + buck.bxl( + "//bxl/event.bxl:bad_metadata", + ), + stderr_regex="Metadata should be a dict where keys are strings, and values are strings, ints, bools, or dicts/lists of the mentioned types. Got type: `list`", + ) + + await expect_failure( + buck.bxl( + "//bxl/event.bxl:bad_metadata_key", + ), + stderr_regex="Metadata keys should be strings. Got type: `int`", + ) + + await expect_failure( + buck.bxl( + "//bxl/event.bxl:bad_metadata_value", + ), + stderr_regex="Metadata values should be strings, ints, bools, or dicts/lists of the mentioned types. Key `key` had value type `tuple`", + ) + + result = await buck.bxl( + "//bxl/event.bxl:ensured_artifact", + ) + + artifact_path = result.stdout.strip() + + # Get event log + lines = (await buck.log("show-user")).stdout.strip().splitlines() + found_event = False + for line in lines: + if "StarlarkUserEvent" in line: + metadata = json.loads(line)["StarlarkUserEvent"]["metadata"] + assert metadata["rel_path"] == artifact_path + assert metadata["abs_path"] == str(Path(buck.cwd / artifact_path)) + assert metadata["nested"]["nested_artifact"] == artifact_path + found_event = True + break + + if not found_event: + raise AssertionError("Failed to find starlark instant event.") + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_read_config(buck: Buck) -> None: + result = await buck.bxl( + "-c", + "key.section=foo", + "//bxl/read_config.bxl:read_config_test", + ) + + assert "foo" in result.stdout + assert "True" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_load_file(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:load_file.bxl:load_test", + ) + + assert str(buck.cwd) in result.stdout diff --git a/tests/e2e/bxl/test_bxl_actions.py b/tests/e2e/bxl/test_bxl_actions.py new file mode 100644 index 0000000000000..0427f6ca5348e --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions.py @@ -0,0 +1,69 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_bxl_actions(buck: Buck) -> None: + result = await buck.bxl( + "//artifact_test/artifacts.bxl:artifact_test", + ) + + # FIXME(JakobDegen): The first assert doesn't test anything the second doesn't cover + assert "" in result.stdout + assert "[]" in result.stdout + + +@buck_test(inplace=False) +async def test_bxl_create_build_actions(buck: Buck) -> None: + result = await buck.bxl( + "//actions_test:actions.bxl:build_actions_test", + "--", + "--content", + "my_content", + ) + assert (buck.cwd / Path(result.stdout.strip())).read_text() == "my_content" + + +@buck_test(inplace=False) +async def test_resolve(buck: Buck) -> None: + result = await buck.bxl( + "//resolve_test:resolve.bxl:resolve_test", + ) + + assert "a-string\n" == result.stdout + + +@buck_test(inplace=False, skip_for_os=["windows"]) +async def test_bxl_declared_artifact_path(buck: Buck) -> None: + result = await buck.bxl( + "//actions_test/declared_artifact_path.bxl:declared_artifact_path_test", + ) + + output = result.stdout.splitlines() + # first line is result of get_path_without_materialization, second line is output of ctx.output.ensure + assert output[0] == output[1] + + +@buck_test(inplace=False) +async def test_bxl_build_and_write(buck: Buck) -> None: + # Performs a failed build and a successful action. + res = await buck.bxl( + "//actions_test:actions.bxl:build_and_write", + "--", + "--target", + "actions_test:fail", + ) + + assert res.process.returncode == 0 + assert "BXL SUCCEEDED" in res.stderr diff --git a/tests/e2e/bxl/test_bxl_actions_data/.buckconfig b/tests/e2e/bxl/test_bxl_actions_data/.buckconfig new file mode 100644 index 0000000000000..5249924ade1cc --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/.buckconfig @@ -0,0 +1,22 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name=TARGETS.fixture + +[buck2] + materializations = deferred + enable_local_caching_of_re_artifacts = true + sqlite_materializer_state = true + sqlite_materializer_state_version = 0 + defer_write_actions = true + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/e2e/bxl/test_bxl_actions_data/.buckroot b/tests/e2e/bxl/test_bxl_actions_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/test_bxl_actions_data/actions_test/TARGETS.fixture b/tests/e2e/bxl/test_bxl_actions_data/actions_test/TARGETS.fixture new file mode 100644 index 0000000000000..013bff58065d7 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/actions_test/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "fail") + +fail( + name = "fail", +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/actions_test/actions.bxl b/tests/e2e/bxl/test_bxl_actions_data/actions_test/actions.bxl new file mode 100644 index 0000000000000..0e4a8857df28c --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/actions_test/actions.bxl @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _build_actions_impl(ctx): + actions = ctx.bxl_actions().actions + + output = actions.write("my_output", ctx.cli_args.content) + + ensured = ctx.output.ensure(output) + ctx.output.print(ensured) + +build_actions_test = bxl_main( + impl = _build_actions_impl, + cli_args = { + "content": cli_args.string(), + }, +) + +def _impl_build_and_write(ctx): + actions = ctx.bxl_actions().actions + ctx.build(ctx.cli_args.target) + + output1 = actions.declare_output("output1") + actions.write(output1, "sample_content") + ctx.output.ensure(output1) + +build_and_write = bxl_main( + impl = _impl_build_and_write, + cli_args = { + "target": cli_args.target_label(), + }, +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/actions_test/declared_artifact_path.bxl b/tests/e2e/bxl/test_bxl_actions_data/actions_test/declared_artifact_path.bxl new file mode 100644 index 0000000000000..d9dfd573f35c6 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/actions_test/declared_artifact_path.bxl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _declared_artifact_path_test_impl(ctx): + actions = ctx.bxl_actions().actions + artifact = actions.write("foo", "bar") + + ctx.output.print(get_path_without_materialization(artifact, ctx)) + ctx.output.print(ctx.output.ensure(artifact)) + +declared_artifact_path_test = bxl_main( + impl = _declared_artifact_path_test_impl, + cli_args = { + }, +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/actions_test/defs.bzl b/tests/e2e/bxl/test_bxl_actions_data/actions_test/defs.bzl new file mode 100644 index 0000000000000..262c51e3f5341 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/actions_test/defs.bzl @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _fail_impl(ctx): + out = ctx.actions.declare_output("output1") + ctx.actions.run(cmd_args("false", hidden = out.as_output()), category = "fail") + return [DefaultInfo(default_outputs = [out])] + +fail = rule( + impl = _fail_impl, + attrs = {}, +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/artifact_test/TARGETS.fixture b/tests/e2e/bxl/test_bxl_actions_data/artifact_test/TARGETS.fixture new file mode 100644 index 0000000000000..3e9a74ab6e6e9 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/artifact_test/TARGETS.fixture @@ -0,0 +1,4 @@ +stub( + name = "t", + srcs = ["TARGETS.fixture"], +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/artifact_test/artifacts.bxl b/tests/e2e/bxl/test_bxl_actions_data/artifact_test/artifacts.bxl new file mode 100644 index 0000000000000..18a20db4fffef --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/artifact_test/artifacts.bxl @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _artifact_test_impl(ctx): + source = "artifact_test/TARGETS.fixture" + uquery_owners = ctx.uquery().owner(source) + target_universe = ctx.target_universe(uquery_owners).target_set() + owner = ctx.cquery().owner(source, target_universe)[0] + + ctx.output.print(owner.sources()) + + ctx.output.print(owner.get_source(source, ctx)) + +artifact_test = bxl_main( + impl = _artifact_test_impl, + cli_args = { + }, +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/platforms/TARGETS.fixture b/tests/e2e/bxl/test_bxl_actions_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..80533d33c2a4b --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/platforms/TARGETS.fixture @@ -0,0 +1,5 @@ +load(":defs.bzl", "execution_platforms") + +execution_platforms( + name = "platforms", +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/platforms/defs.bzl b/tests/e2e/bxl/test_bxl_actions_data/platforms/defs.bzl new file mode 100644 index 0000000000000..39c597a7cb3f7 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/platforms/defs.bzl @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platform(ctx): + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo( + constraints = { + }, + values = {}, + ), + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = True, + remote_cache_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_use_case = "buck2-testing", + ), + ) + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = [platform]), + ] + +execution_platforms = rule(attrs = {}, impl = _execution_platform) diff --git a/tests/e2e/bxl/test_bxl_actions_data/resolve_test/TARGETS.fixture b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/TARGETS.fixture new file mode 100644 index 0000000000000..327a8badbe79f --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/TARGETS.fixture @@ -0,0 +1,7 @@ +load(":defs.bzl", "foo_buildable") + +foo_buildable( + name = "buildable", + content = "FOO", + out = "out.txt", +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/resolve_test/anon_bxl_rules.bzl b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/anon_bxl_rules.bzl new file mode 100644 index 0000000000000..a8a35a707d55a --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/anon_bxl_rules.bzl @@ -0,0 +1,26 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +MirrorInfo = provider(fields = ["info"]) + +def assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _mirror_impl(ctx: AnalysisContext) -> list[Provider]: + return [DefaultInfo(), MirrorInfo(info = ctx.attrs)] + +mirror = rule(impl = _mirror_impl, attrs = { + "dep": attrs.dep(), + "false": attrs.bool(), + "int": attrs.int(), + "list_string": attrs.list(attrs.string()), + "string": attrs.string(), + "true": attrs.bool(), +}) + +ValidateInfo = provider(fields = ["string"]) diff --git a/tests/e2e/bxl/test_bxl_actions_data/resolve_test/defs.bzl b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/defs.bzl new file mode 100644 index 0000000000000..7b010231ad7f0 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/defs.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _buildable_impl(ctx): + out = ctx.actions.write(ctx.attrs.out, ctx.attrs.content) + return [DefaultInfo(default_output = out)] + +foo_buildable = rule( + impl = _buildable_impl, + attrs = { + "content": attrs.string(default = ""), + "out": attrs.string(), + }, +) diff --git a/tests/e2e/bxl/test_bxl_actions_data/resolve_test/resolve.bxl b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/resolve.bxl new file mode 100644 index 0000000000000..631c477d47150 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_actions_data/resolve_test/resolve.bxl @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("//resolve_test:anon_bxl_rules.bzl", "MirrorInfo", "ValidateInfo", "assert_eq", "mirror") + +def _resolve_test_impl(ctx): + actions = ctx.bxl_actions().actions + + node = ctx.configured_targets("root//resolve_test:buildable") + dep = ctx.analysis(node).as_dependency() + + attrs = { + "dep": dep, + "false": False, + "int": 42, + "list_string": ["a", "b", "c"], + "string": "a-string", + "true": True, + } + + def validate(providers): + res = providers[MirrorInfo].info + assert_eq(res.true, True) + assert_eq(res.false, False) + assert_eq(res.int, 42) + assert_eq(res.string, "a-string") + assert_eq(res.list_string, ["a", "b", "c"]) + assert_eq(type(res.dep), "dependency") + assert_eq(res.dep.label.name, "buildable") + return [DefaultInfo(), ValidateInfo(string = res.string)] + + promise = actions.anon_target(mirror, attrs).promise.map(validate) + promise_result = ctx.resolve(actions, promise) + + # promise.get() returns a "provider_collection". ValidateInfo (a "provider") is at index 1 + ctx.output.print(promise_result[1].string) + +resolve_test = bxl_main( + impl = _resolve_test_impl, + cli_args = { + }, +) diff --git a/tests/e2e/bxl/test_bxl_analysis.py b/tests/e2e/bxl/test_bxl_analysis.py new file mode 100644 index 0000000000000..79ad8ca054030 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_analysis.py @@ -0,0 +1,58 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_bxl_analysis(buck: Buck) -> None: + result = await buck.bxl( + "//analysis.bxl:providers_test", + ) + + lines = result.stdout.splitlines() + assert "provides_foo_foo" in lines[0] + assert "provides_foo_foo" in lines[1] + + result = await buck.bxl( + "//analysis.bxl:dependency_test", + ) + + assert result.stdout.splitlines() == [ + "dependency", + "root//:stub ()", + ] + + +@buck_test(inplace=False, allow_soft_errors=True) +async def test_bxl_analysis_incompatible_targets_list(buck: Buck) -> None: + # multiple incompatible targets should be skipped and the analysis should return empty dict + result = await buck.bxl("//analysis.bxl:incompatible_targets") + assert "Skipping target incompatible node" in result.stderr + assert "root//:incompatible_target" in result.stderr + assert "{}" == result.stdout.strip() + + +@buck_test(inplace=False, allow_soft_errors=True) +async def test_bxl_analysis_incompatible_targets_single(buck: Buck) -> None: + # single incompatible target should be skipped and the analysis should return None + result = await buck.bxl("//analysis.bxl:incompatible_targets_single") + assert "Skipping target incompatible node" in result.stderr + assert "root//:incompatible_target" in result.stderr + assert "None" == result.stdout.strip() + + +@buck_test(inplace=False) +async def test_bxl_analysis_missing_subtarget(buck: Buck) -> None: + await expect_failure( + buck.bxl("//analysis.bxl:missing_subtarget_test"), + stderr_regex="requested sub target named `missing_subtarget` .* is not available", + ) diff --git a/tests/e2e/bxl/test_bxl_analysis_data/.buckconfig b/tests/e2e/bxl/test_bxl_analysis_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/e2e/bxl/test_bxl_analysis_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/e2e/bxl/test_bxl_analysis_data/.buckroot b/tests/e2e/bxl/test_bxl_analysis_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/test_bxl_analysis_data/TARGETS.fixture b/tests/e2e/bxl/test_bxl_analysis_data/TARGETS.fixture new file mode 100644 index 0000000000000..3f0132a8ddf7e --- /dev/null +++ b/tests/e2e/bxl/test_bxl_analysis_data/TARGETS.fixture @@ -0,0 +1,25 @@ +load(":defs.bzl", "provides_foo") + +stub(name = "stub") + +provides_foo(name = "provides_foo") + +constraint_setting( + name = "os", +) + +constraint_value( + name = "linux", + constraint_setting = ":os", +) + +platform( + name = "p_windows", + constraint_values = [], +) + +stub( + name = "incompatible_target", + target_compatible_with = [":linux"], + default_target_platform = ":p_windows", +) diff --git a/tests/e2e/bxl/test_bxl_analysis_data/analysis.bxl b/tests/e2e/bxl/test_bxl_analysis_data/analysis.bxl new file mode 100644 index 0000000000000..611d6fd82ea91 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_analysis_data/analysis.bxl @@ -0,0 +1,60 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load(":defs.bzl", "FooInfo") + +def _providers_test_impl(ctx): + node = ctx.configured_targets("root//:provides_foo") + providers = ctx.analysis(node).providers() + ctx.output.print(providers[FooInfo]) + + providers = ctx.analysis(node.label).providers() + ctx.output.print(providers[FooInfo]) + +providers_test = bxl_main( + impl = _providers_test_impl, + cli_args = {}, +) + +def _incompatible_targets_single(ctx): + result = ctx.analysis("root//:incompatible_target") + ctx.output.print(result) + +incompatible_targets_single = bxl_main( + impl = _incompatible_targets_single, + cli_args = {}, +) + +def _incompatible_targets(ctx): + result = ctx.analysis(["root//:incompatible_target"]) + ctx.output.print(result) + +incompatible_targets = bxl_main( + impl = _incompatible_targets, + cli_args = {}, +) + +def _dependency_test_impl(ctx): + node = ctx.configured_targets("root//:stub") + + dep = ctx.analysis(node).as_dependency() + ctx.output.print(type(dep)) + ctx.output.print(dep.label) + +dependency_test = bxl_main( + impl = _dependency_test_impl, + cli_args = {}, +) + +def _missing_subtarget_test(ctx): + node = ctx.configured_targets("root//:stub").label.with_sub_target("missing_subtarget") + ctx.analysis(node) + +missing_subtarget_test = bxl_main( + impl = _missing_subtarget_test, + cli_args = {}, +) diff --git a/tests/e2e/bxl/test_bxl_analysis_data/defs.bzl b/tests/e2e/bxl/test_bxl_analysis_data/defs.bzl new file mode 100644 index 0000000000000..760c8c1fd43d7 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_analysis_data/defs.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +FooInfo = provider(fields = { + "foo": str, +}) + +def _provides_foo(ctx): + return [ + DefaultInfo(), + FooInfo(foo = ctx.attrs.name + "_foo"), + ] + +provides_foo = rule( + impl = _provides_foo, + attrs = {}, +) diff --git a/tests/e2e/bxl/test_bxl_audit.py b/tests/e2e/bxl/test_bxl_audit.py new file mode 100644 index 0000000000000..b2f71d7c11c18 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_audit.py @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_bxl_audit_output(buck: Buck) -> None: + await buck.bxl( + "//audit.bxl:audit_output_action_exists", + ) + + await buck.bxl( + "//audit.bxl:audit_output_config_not_match", + ) + + await expect_failure( + buck.bxl( + "//audit.bxl:audit_output_invalid_path", + ), + stderr_regex="Malformed buck-out path", + ) diff --git a/tests/e2e/bxl/test_bxl_audit_data/.buckconfig b/tests/e2e/bxl/test_bxl_audit_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/e2e/bxl/test_bxl_audit_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/e2e/bxl/test_bxl_audit_data/.buckroot b/tests/e2e/bxl/test_bxl_audit_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/test_bxl_audit_data/TARGETS.fixture b/tests/e2e/bxl/test_bxl_audit_data/TARGETS.fixture new file mode 100644 index 0000000000000..cb02e606c631c --- /dev/null +++ b/tests/e2e/bxl/test_bxl_audit_data/TARGETS.fixture @@ -0,0 +1,7 @@ +trivial_build( + name = "with_output", +) + +platform( + name = "someplat", +) diff --git a/tests/e2e/bxl/test_bxl_audit_data/audit.bxl b/tests/e2e/bxl/test_bxl_audit_data/audit.bxl new file mode 100644 index 0000000000000..33a61e66782b0 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_audit_data/audit.bxl @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _audit_output_action_exists_impl(ctx): + label = "root//:with_output" + target = ctx.configured_targets(label) + build_result = ctx.build(target) + artifact = build_result.values()[0].artifacts()[0] + buck_out = get_path_without_materialization(artifact, ctx) + action = ctx.audit().output(buck_out) + + asserts.equals(action.owner(), target.label) + +audit_output_action_exists = bxl_main( + impl = _audit_output_action_exists_impl, + cli_args = { + }, +) + +def _audit_output_config_not_match_impl(ctx): + buck_out = "buck-out/v2/gen/root/some_hash/__with_output__/foo.txt" + output = ctx.audit().output(buck_out, "root//:someplat") + asserts.equals(str(output), "root//:with_output") + +audit_output_config_not_match = bxl_main( + impl = _audit_output_config_not_match_impl, + cli_args = { + }, +) + +def _audit_output_invalid_path_impl(ctx): + ctx.audit().output("buck-out/blah/blah") + +audit_output_invalid_path = bxl_main( + impl = _audit_output_invalid_path_impl, + cli_args = { + }, +) diff --git a/tests/e2e/bxl/test_bxl_build.py b/tests/e2e/bxl/test_bxl_build.py new file mode 100644 index 0000000000000..93fe491713717 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build.py @@ -0,0 +1,121 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import replace_hash + + +@buck_test(inplace=False) +async def test_bxl_build(buck: Buck) -> None: + result = await buck.bxl( + "//build.bxl:build_test", + "--", + "--target", + ":trivial_build", + ) + outputs = json.loads(result.stdout) + assert (buck.cwd / Path(outputs["root//:trivial_build"][0])).read_text() == "abcd" + + result = await buck.bxl( + "//build.bxl:cquery_build_test", + ) + outputs = result.stdout.splitlines()[0] + assert (buck.cwd / Path(outputs)).read_text() == "abcd" + + +@buck_test(inplace=False) +async def test_bxl_build_stats(buck: Buck) -> None: + result = await buck.bxl( + "//build.bxl:build_stats", + "--", + "--targets", + "root//build/...", + ) + stats = json.loads(result.stdout) + assert stats["root//build:pass"]["artifacts"] == 1 + assert stats["root//build:pass"]["failures"] == 0 + assert stats["root//build:fail"]["artifacts"] == 0 + assert stats["root//build:fail"]["failures"] == 1 + + +@buck_test(inplace=False) +async def test_bxl_target_platform_from_unpacking_providers_expr(buck: Buck) -> None: + # Pass in explicit target platform from client. Result should be configured with this target platform. + result = await buck.bxl( + "--target-platforms", + "root//:platform2", + "//build.bxl:build_with_target_platform_test", + "--", + "--target", + ":trivial_build", + ) + assert ( + replace_hash(result.stdout) + == "[root//:trivial_build (root//:platform2#)]\n" + ) + + # No target platform specified from client context. Result should be configured with root//:platform1 + result = await buck.bxl( + "//build.bxl:build_with_target_platform_test", + "--", + "--target", + ":trivial_build", + ) + assert ( + replace_hash(result.stdout) + == "[root//:trivial_build (root//:platform3#)]\n" + ) + + # Target platform from client context should be overridden by what's declared in build(). + result = await buck.bxl( + "//build.bxl:build_with_target_platform_test", + "--target-platforms", + "root//:platform2", + "--", + "--target", + ":trivial_build", + "--target_platform", + "root//:platform1", + ) + assert ( + replace_hash(result.stdout) + == "[root//:trivial_build (root//:platform1#)]\n" + ) + + +@buck_test(inplace=False) +async def test_bxl_build_order(buck: Buck) -> None: + await buck.bxl("//build_artifacts_order/check.bxl:check") + + +@buck_test(inplace=False) +async def test_bxl_build_no_materialization(buck: Buck) -> None: + result = await buck.bxl( + "//materializations.bxl:build", + "--", + "--materializations=skip", + ) + + [output] = result.stdout.splitlines() + assert os.path.exists(buck.cwd / Path(output)) is False + + result = await buck.bxl( + "//materializations.bxl:build", + "--", + "--materializations=materialize", + ) + + [output] = result.stdout.splitlines() + assert os.path.exists(buck.cwd / Path(output)) is True diff --git a/tests/e2e/bxl/test_bxl_build_data/.buckconfig b/tests/e2e/bxl/test_bxl_build_data/.buckconfig new file mode 100644 index 0000000000000..9cab9b8b930da --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/.buckconfig @@ -0,0 +1,15 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture + +[buck2] + materializations = deferred diff --git a/tests/e2e/bxl/test_bxl_build_data/.buckroot b/tests/e2e/bxl/test_bxl_build_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/test_bxl_build_data/TARGETS.fixture b/tests/e2e/bxl/test_bxl_build_data/TARGETS.fixture new file mode 100644 index 0000000000000..41e352bb201fd --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/TARGETS.fixture @@ -0,0 +1,22 @@ +load(":defs.bzl", "run_remote") + +run_remote( + name = "run_remote", +) + +trivial_build( + name = "trivial_build", + default_target_platform = ":platform3", +) + +platform( + name = "platform1", +) + +platform( + name = "platform2", +) + +platform( + name = "platform3", +) diff --git a/tests/e2e/bxl/test_bxl_build_data/build.bxl b/tests/e2e/bxl/test_bxl_build_data/build.bxl new file mode 100644 index 0000000000000..dfccbb5a5f367 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/build.bxl @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + outputs = {} + for target, value in ctx.build(ctx.cli_args.target).items(): + outputs.update({target.raw_target(): ctx.output.ensure_multiple(value.artifacts())}) + + ctx.output.print_json(outputs) + +build_test = bxl_main( + impl = _impl, + cli_args = { + "target": cli_args.target_label(), + }, +) + +def _impl_build_stats(ctx): + stats = {} + for target, value in ctx.build(ctx.cli_args.targets).items(): + artifacts = value.artifacts() + failures = value.failures() + + stats[target.raw_target()] = { + "artifacts": len(artifacts), + "failures": len(failures), + } + + for i, artifact in enumerate(artifacts): + if artifacts[i] != artifact: + fail("{} != {}", artifacts[i], artifact) + + for i, failure in enumerate(failures): + if failures[i] != failure: + fail("{} != {}", failures[i], failure) + + ctx.output.print_json(stats) + +build_stats = bxl_main( + impl = _impl_build_stats, + cli_args = { + "targets": cli_args.target_expr(), + }, +) + +def _impl_build_with_target_platform(ctx): + outputs = [] + for target in ctx.build(ctx.cli_args.target, ctx.cli_args.target_platform).keys(): + outputs.append(target.configured_target()) + + ctx.output.print(outputs) + +build_with_target_platform_test = bxl_main( + impl = _impl_build_with_target_platform, + cli_args = { + "target": cli_args.target_label(), + "target_platform": cli_args.option(cli_args.target_label()), + }, +) + +def _cquery_build(ctx): + universe = ctx.target_universe("...").target_set() + targets = ctx.cquery().kind("trivial_build", universe) + + outputs = [] + for value in ctx.build(targets).values(): + outputs.extend(ctx.output.ensure_multiple(value.artifacts())) + + ctx.output.print(sep = "\n", *outputs) + +cquery_build_test = bxl_main( + impl = _cquery_build, + cli_args = {}, +) + +def _impl_build_duplicate(ctx): + outputs = {} + for target, value in ctx.build([ctx.cli_args.target, ctx.cli_args.target]).items(): + outputs.update({target.raw_target(): ctx.output.ensure_multiple(value.artifacts())}) + + ctx.output.print_json(outputs) + +build_duplicate = bxl_main( + impl = _impl_build_duplicate, + cli_args = { + "target": cli_args.target_label(), + }, +) diff --git a/tests/e2e/bxl/test_bxl_build_data/build/TARGETS.fixture b/tests/e2e/bxl/test_bxl_build_data/build/TARGETS.fixture new file mode 100644 index 0000000000000..c249c86d59d9c --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/build/TARGETS.fixture @@ -0,0 +1,9 @@ +load(":defs.bzl", "fail_build") + +trivial_build( + name = "pass", +) + +fail_build( + name = "fail", +) diff --git a/tests/e2e/bxl/test_bxl_build_data/build/defs.bzl b/tests/e2e/bxl/test_bxl_build_data/build/defs.bzl new file mode 100644 index 0000000000000..06340f6232666 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/build/defs.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + out = ctx.actions.declare_output("out.txt") + ctx.actions.run( + cmd_args("exit", "1", hidden = out.as_output()), + category = "run", + ) + return [DefaultInfo(default_output = out)] + +fail_build = rule( + impl = _impl, + attrs = {}, +) diff --git a/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/TARGETS.fixture b/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/TARGETS.fixture new file mode 100644 index 0000000000000..9eead2f1836ac --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/TARGETS.fixture @@ -0,0 +1,3 @@ +load(":defs.bzl", "artifacts") + +artifacts(name = "artifacts") diff --git a/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/check.bxl b/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/check.bxl new file mode 100644 index 0000000000000..3b9856cacc941 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/check.bxl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _check(ctx): + results = list(ctx.build("//build_artifacts_order:artifacts").values()) + if len(results) != 1: + fail("bad results") + + result = results[0] + artifacts = list(result.artifacts()) + + if len(artifacts) != 2: + fail("bad artifacts") + + if artifacts[0].short_path != "slow": + fail("slow is not first") + + if artifacts[1].short_path != "fast": + fail("fast is not second") + +check = bxl_main( + impl = _check, + cli_args = {}, +) diff --git a/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/defs.bzl b/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/defs.bzl new file mode 100644 index 0000000000000..b760c3c63729c --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/build_artifacts_order/defs.bzl @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _artifacts(ctx): + fast = ctx.actions.write("fast", "") + slow = ctx.actions.declare_output("slow") + + ctx.actions.run( + [ + "python3", + "-c", + "import time, sys; time.sleep(5); open(sys.argv[1], 'w')", + slow.as_output(), + ], + category = "slow", + ) + + return [DefaultInfo(slow, other_outputs = [fast])] + +artifacts = rule(impl = _artifacts, attrs = {}) diff --git a/tests/e2e/bxl/test_bxl_build_data/defs.bzl b/tests/e2e/bxl/test_bxl_build_data/defs.bzl new file mode 100644 index 0000000000000..402f12656782e --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/defs.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _run_remote(ctx): + out = ctx.actions.declare_output("out.txt") + data = ctx.actions.write("src.txt", "abcd") + ctx.actions.run( + cmd_args(["cp", data, out.as_output()]), + category = "touch", + prefer_remote = True, + ) + return [DefaultInfo(default_output = out)] + +run_remote = rule( + impl = _run_remote, + attrs = {}, +) diff --git a/tests/e2e/bxl/test_bxl_build_data/materializations.bxl b/tests/e2e/bxl/test_bxl_build_data/materializations.bxl new file mode 100644 index 0000000000000..ae06c9667d953 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_build_data/materializations.bxl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _build_impl(ctx): + for value in ctx.build("//:run_remote", materializations = ctx.cli_args.materializations).values(): + for artifact in value.artifacts(): + ctx.output.print(get_path_without_materialization(artifact, ctx)) + +build = bxl_main( + impl = _build_impl, + cli_args = { + "materializations": cli_args.string(), + }, +) diff --git a/tests/e2e/bxl/test_bxl_cli.py b/tests/e2e/bxl/test_bxl_cli.py new file mode 100644 index 0000000000000..8a80ebb0e0cbb --- /dev/null +++ b/tests/e2e/bxl/test_bxl_cli.py @@ -0,0 +1,429 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import random +import string + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_bxl_cli(buck: Buck) -> None: + result = await buck.bxl( + "//cli_args.bxl:cli_test", + "--", + "--int_arg", + "1", + "--float_arg", + "4.3", + "--enum_type", + "a", + "--list_type", + "1", + "2", + "3", + "--target", + ":foo", + "--sub_target", + "cell/pkg:bar[sub]", + ) + assert ( + result.stdout + == 'bool_arg: False\nbool_arg_with_default: True\nstring_arg: "default"\nint_arg: 1\nfloat_arg: 4.3\noptional: None\nenum_type: "a"\ntarget: root//:foo\nsub_target: root//cell/pkg:bar[sub]\nlist: [1, 2, 3]\n' + ) + + result = await buck.bxl( + "//cli_args.bxl:cli_test", + "--", + # Override default bool arg with false + "--bool_arg_with_default", + "false", + "--int_arg", + "2", + "--float_arg", + "3.4", + "--optional", + "value", + "--enum_type", + "b", + "--list_type", + "1", + "--target", + "bar:foo", + "--sub_target", + "cell/pkg:bar", + ) + assert ( + result.stdout + == 'bool_arg: False\nbool_arg_with_default: False\nstring_arg: "default"\nint_arg: 2\nfloat_arg: 3.4\noptional: "value"\nenum_type: "b"\ntarget: root//bar:foo\nsub_target: root//cell/pkg:bar\nlist: [1]\n' + ) + + # multiple occurrences of a list-type argument + # i.e., --arg 1 --arg 2 --arg 3 + result = await buck.bxl( + "//cli_args.bxl:cli_test", + "--", + "--int_arg", + "1", + "--float_arg", + "4.3", + "--enum_type", + "a", + "--list_type", + "1", + "--list_type", + "2", + "--list_type", + "3", + "--target", + ":foo", + "--sub_target", + "cell/pkg:bar[sub]", + ) + assert ( + result.stdout + == 'bool_arg: False\nbool_arg_with_default: True\nstring_arg: "default"\nint_arg: 1\nfloat_arg: 4.3\noptional: None\nenum_type: "a"\ntarget: root//:foo\nsub_target: root//cell/pkg:bar[sub]\nlist: [1, 2, 3]\n' + ) + + # illegal target + await expect_failure( + buck.bxl( + "//cli_args.bxl:cli_test", + "--", + "--int_arg", + "2", + "--float_arg", + "3.4", + "--optional", + "value", + "--enum_type", + "b", + "--list_type", + "1", + "--target", + "illegal?target", + "--sub_target", + "cell/pkg:bar", + ) + ) + + # not int + await expect_failure( + buck.bxl( + "//cli_args.bxl:cli_test", + "--", + "--int_arg", + "2.0", + "--float_arg", + "3.4", + "--optional", + "value", + "--enum_type", + "b", + "--list_type", + "1", + "--target", + ":foo", + "--sub_target", + "cell/pkg:bar", + ) + ) + + # list inner type mismatch + await expect_failure( + buck.bxl( + "//bxl:cli_args.bxl:cli_test", + "--", + "--int_arg", + "2", + "--float_arg", + "3.4", + "--optional", + "value", + "--enum_type", + "b", + "--list_type", + "wrong_inner_list_type", + "--target", + "bar:foo", + "--sub_target", + "cell/pkg:bar", + ) + ) + + # not valid enum variant + await expect_failure( + buck.bxl( + "//cli_args.bxl:cli_test", + "--", + "--int_arg", + "2", + "--float_arg", + "3.4", + "--optional", + "value", + "--enum_type", + "not_enum", + "--list_type", + "1", + "--target", + ":foo", + "--sub_target", + "cell/pkg:bar", + ) + ) + + # missing non-optional field + await expect_failure( + buck.bxl( + "//cli_args.bxl:cli_test", + "--", + "--int_arg", + "2", + "--optional", + "value", + "--enum_type", + "a", + "--list_type", + "1", + "--target", + ":foo", + "--sub_target", + "cell/pkg:bar", + ) + ) + + # check short args work + result = await buck.bxl( + "//cli_args.bxl:cli_test_short", + "--", + "-i", + "1", + "-f", + "4.3", + "-e", + "a", + "-l", + "1", + "2", + "3", + "-t", + ":foo", + "-s", + "default", + ) + assert ( + result.stdout + == 'bool_arg: False\nstring_arg: "default"\nint_arg: 1\nfloat_arg: 4.3\noptional: None\nenum_type: "a"\ntarget: root//:foo\nlist: [1, 2, 3]\n' + ) + + # check long args still work with short args + result = await buck.bxl( + "//cli_args.bxl:cli_test_short", + "--", + "--int_arg", + "1", + "--float_arg", + "4.3", + "--enum_type", + "a", + "--list_type", + "1", + "2", + "3", + "--target", + ":foo", + "--string_arg", + "default", + ) + assert ( + result.stdout + == 'bool_arg: False\nstring_arg: "default"\nint_arg: 1\nfloat_arg: 4.3\noptional: None\nenum_type: "a"\ntarget: root//:foo\nlist: [1, 2, 3]\n' + ) + + # check snakecase cli_arg access from bxl context, make sure it still works with default args and shorthand args + result = await buck.bxl( + "//cli_args.bxl:cli_test_snakecase_access", + "--", + "--my-arg", + "this is my arg", + ) + assert result.stdout == 'my-arg: "this is my arg"\n' + + result = await buck.bxl( + "//cli_args.bxl:cli_test_snakecase_access", "--", "-a", "this is my arg" + ) + assert result.stdout == 'my-arg: "this is my arg"\n' + + result = await buck.bxl("//cli_args.bxl:cli_test_snakecase_access") + assert result.stdout == 'my-arg: "default"\n' + + await expect_failure( + buck.bxl( + "//cli_args_bad_case.bxl:cli_test_bad_case", + "--", + "--my-arg", + "this is my arg", + ) + ) + + +@buck_test(inplace=False) +async def test_bxl_cli_json_args(buck: Buck) -> None: + json_args = {} + json_args.update({"int": 1}) + json_args.update({"string": "foo"}) + json_args.update({"float": 1.0}) + json_args.update({"bool": True}) + json_args.update({"none": None}) + json_args.update({"list": [1, 2, 3]}) + + nested = {} + nested.update({"nested_string": "bar"}) + nested.update({"nested_int": -1}) + json_args.update({"nested": nested}) + + my_json = json.dumps(json_args) + + await buck.bxl( + "//cli_args.bxl:cli_json_arg", + "--", + "--my-json", + my_json, + ) + + await expect_failure( + buck.bxl( + "//cli_args.bxl:cli_json_arg", + "--", + "--my-json", + "[1,2,3]", + ), + stderr_regex="Expecting json object. Got: `\\[1,2,3]\\`", + ) + + +@buck_test(inplace=False) +async def test_bxl_cli_short_bad(buck: Buck) -> None: + # duplicate "short" + await expect_failure( + buck.bxl( + "//cli_args_bad.bxl:cli_test_short_bad", + "--", + "-a", + "2", + "-a", + "value", + "-a", + "a", + "-a", + "1", + "-a", + ":foo", + "-a", + "default", + ), + stderr_regex="Duplicate short args are not allowed", + ) + + +@buck_test(inplace=False) +async def test_cli_target_pattern(buck: Buck) -> None: + result = await buck.bxl( + "//cli_args.bxl:target_expr_test", + "--", + "--targets", + ":t1", + ) + assert "[root//:t1]" in result.stdout + + result = await buck.bxl( + "//cli_args.bxl:target_expr_test", + "--", + "--targets", + "root//:", + ) + assert "root//:t1" in result.stdout + assert "root//:t2" in result.stdout + + await expect_failure( + buck.bxl( + "//cli_args.bxl:target_expr_test", + "--", + "--targets", + ":non-existent", + ) + ) + + await expect_failure( + buck.bxl( + "//cli_args.bxl:target_expr_test", + "--", + "--targets", + "invalid/...", + ) + ) + + +@buck_test(inplace=False) +async def test_cli_sub_target_pattern(buck: Buck) -> None: + # Tests where no sub-target is specified; should ensure functionality + # of regular target patterns work with these subtarget patterns. + result = await buck.bxl( + "//cli_args.bxl:sub_target_expr_test", + "--", + "--sub_targets", + ":t1", + ) + print(result.stdout) + assert "[root//:t1]" in result.stdout + + result = await buck.bxl( + "//cli_args.bxl:sub_target_expr_test", + "--", + "--sub_targets", + "root//:", + ) + assert "root//:t1" in result.stdout + assert "root//:t2" in result.stdout + + # Test single sub-targets. + result = await buck.bxl( + "//cli_args.bxl:sub_target_expr_test", + "--", + "--sub_targets", + "root//:t1[sub]", + ) + assert "[root//:t1[sub]" in result.stdout + + # Several subtargets / nested subtargets. + result = await buck.bxl( + "//cli_args.bxl:sub_target_expr_test", + "--", + "--sub_targets", + "root//:t2[sub1][sub2]", + ) + assert "[root//:t2[sub1][sub2]" in result.stdout + + await expect_failure( + buck.bxl( + "//cli_args.bxl:sub_target_expr_test", + "--", + "--sub_targets", + ":fake_bin[sub]", + ) + ) + + +def random_string() -> str: + return "".join(random.choice(string.ascii_lowercase) for i in range(256)) diff --git a/tests/e2e/bxl/test_bxl_cli_data/.buckconfig b/tests/e2e/bxl/test_bxl_cli_data/.buckconfig new file mode 100644 index 0000000000000..09556211287db --- /dev/null +++ b/tests/e2e/bxl/test_bxl_cli_data/.buckconfig @@ -0,0 +1,12 @@ +[cells] + root = . + nano_prelude = nano_prelude + +[cell_aliases] + prelude = nano_prelude + +[external_cells] + nano_prelude = bundled + +[buildfile] + name = TARGETS.fixture diff --git a/tests/e2e/bxl/test_bxl_cli_data/.buckroot b/tests/e2e/bxl/test_bxl_cli_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/test_bxl_cli_data/TARGETS.fixture b/tests/e2e/bxl/test_bxl_cli_data/TARGETS.fixture new file mode 100644 index 0000000000000..1cf5dd222b454 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_cli_data/TARGETS.fixture @@ -0,0 +1,3 @@ +stub(name = "t1") + +stub(name = "t2") diff --git a/tests/e2e/bxl/test_bxl_cli_data/cli_args.bxl b/tests/e2e/bxl/test_bxl_cli_data/cli_args.bxl new file mode 100644 index 0000000000000..6e5e0d645ec12 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_cli_data/cli_args.bxl @@ -0,0 +1,129 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl(ctx): + ctx.output.print("bool_arg: " + repr(ctx.cli_args.bool_arg)) + ctx.output.print("bool_arg_with_default: " + repr(ctx.cli_args.bool_arg_with_default)) + ctx.output.print("string_arg: " + repr(ctx.cli_args.string_arg)) + ctx.output.print("int_arg: " + repr(ctx.cli_args.int_arg)) + ctx.output.print("float_arg: " + repr(ctx.cli_args.float_arg)) + ctx.output.print("optional: " + repr(ctx.cli_args.optional)) + ctx.output.print("enum_type: " + repr(ctx.cli_args.enum_type)) + ctx.output.print("target: " + repr(ctx.cli_args.target)) + ctx.output.print("sub_target: " + repr(ctx.cli_args.sub_target)) + ctx.output.print("list: " + repr(ctx.cli_args.list_type)) + +cli_test = bxl_main( + impl = _impl, + cli_args = { + "bool_arg": cli_args.bool(), + "bool_arg_with_default": cli_args.bool(True), + "enum_type": cli_args.enum(["a", "b"]), + "float_arg": cli_args.float(), + "int_arg": cli_args.int(), + "list_type": cli_args.list(cli_args.int()), + "optional": cli_args.option(cli_args.string()), + "string_arg": cli_args.string("default"), + "sub_target": cli_args.sub_target(), + "target": cli_args.target_label(), + }, +) + +def _target_expr_impl(ctx): + ctx.output.print(repr(ctx.cli_args.targets)) + +target_expr_test = bxl_main( + impl = _target_expr_impl, + cli_args = { + "targets": cli_args.target_expr(), + }, +) + +def _sub_target_expr_impl(ctx): + ctx.output.print(repr(ctx.cli_args.sub_targets)) + +sub_target_expr_test = bxl_main( + impl = _sub_target_expr_impl, + cli_args = { + "sub_targets": cli_args.sub_target_expr(), + }, +) + +def _impl_cli_test_short(ctx): + ctx.output.print("bool_arg: " + repr(ctx.cli_args.bool_arg)) + ctx.output.print("string_arg: " + repr(ctx.cli_args.string_arg)) + ctx.output.print("int_arg: " + repr(ctx.cli_args.int_arg)) + ctx.output.print("float_arg: " + repr(ctx.cli_args.float_arg)) + ctx.output.print("optional: " + repr(ctx.cli_args.optional)) + ctx.output.print("enum_type: " + repr(ctx.cli_args.enum_type)) + ctx.output.print("target: " + repr(ctx.cli_args.target)) + ctx.output.print("list: " + repr(ctx.cli_args.list_type)) + +cli_test_short = bxl_main( + impl = _impl_cli_test_short, + cli_args = { + "bool_arg": cli_args.bool(short = "b"), + "enum_type": cli_args.enum(["a", "b"], short = "e"), + "float_arg": cli_args.float(short = "f"), + "int_arg": cli_args.int(short = "i"), + "list_type": cli_args.list(cli_args.int(), short = "l"), + "optional": cli_args.option(cli_args.string(), short = "o"), + "string_arg": cli_args.string(short = "s"), + "target": cli_args.target_label(short = "t"), + }, +) + +def _impl_cli_test_snakecase_access(ctx): + ctx.output.print("my-arg: " + repr(ctx.cli_args.my_arg)) + +cli_test_snakecase_access = bxl_main( + impl = _impl_cli_test_snakecase_access, + cli_args = { + "my-arg": cli_args.string("default", short = "a"), + }, +) + +def _assert_eq(a, b): + if a != b: + fail("Expected {} == {}".format(a, b)) + +def _impl_cli_json_arg(ctx): + ctx.output.print("my-json: " + repr(ctx.cli_args.my_json)) + my_int = ctx.cli_args.my_json["int"] + my_string = ctx.cli_args.my_json["string"] + my_float = ctx.cli_args.my_json["float"] + my_bool = ctx.cli_args.my_json["bool"] + my_none = ctx.cli_args.my_json["none"] + my_list = ctx.cli_args.my_json["list"] + my_nested = ctx.cli_args.my_json["nested"] + + _assert_eq(type(my_int), "int") + _assert_eq(type(my_string), "string") + _assert_eq(type(my_float), "float") + _assert_eq(type(my_bool), "bool") + _assert_eq(type(my_none), "NoneType") + _assert_eq(type(my_list), "list") + _assert_eq(type(my_nested), "dict") + + _assert_eq(my_int, 1) + _assert_eq(my_string, "foo") + _assert_eq(my_float, 1.0) + _assert_eq(my_bool, True) + _assert_eq(my_none, None) + _assert_eq(my_list, [1, 2, 3]) + + expected_nested = {} + expected_nested["nested_string"] = "bar" + expected_nested["nested_int"] = -1 + _assert_eq(my_nested, expected_nested) + +cli_json_arg = bxl_main( + impl = _impl_cli_json_arg, + cli_args = { + "my-json": cli_args.json(short = "j"), + }, +) diff --git a/tests/e2e/bxl/test_bxl_cli_data/cli_args_bad.bxl b/tests/e2e/bxl/test_bxl_cli_data/cli_args_bad.bxl new file mode 100644 index 0000000000000..25cc9d1905a87 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_cli_data/cli_args_bad.bxl @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_cli_test_short(ctx): + ctx.output.print("bool_arg: " + repr(ctx.cli_args.bool_arg)) + ctx.output.print("string_arg: " + repr(ctx.cli_args.string_arg)) + ctx.output.print("int_arg: " + repr(ctx.cli_args.int_arg)) + ctx.output.print("float_arg: " + repr(ctx.cli_args.float_arg)) + ctx.output.print("optional: " + repr(ctx.cli_args.optional)) + ctx.output.print("enum_type: " + repr(ctx.cli_args.enum_type)) + ctx.output.print("target: " + repr(ctx.cli_args.target)) + ctx.output.print("list: " + repr(ctx.cli_args.list_type)) + +cli_test_short_bad = bxl_main( + impl = _impl_cli_test_short, + cli_args = { + "bool_arg": cli_args.bool(short = "a"), + "enum_type": cli_args.enum(["a", "b"], short = "a"), + "float_arg": cli_args.float(short = "a"), + "int_arg": cli_args.int(short = "a"), + "list_type": cli_args.list(cli_args.int(), short = "a"), + "optional": cli_args.option(cli_args.string(), short = "a"), + "string_arg": cli_args.string(short = "a"), + "target": cli_args.target_label(short = "a"), + }, +) diff --git a/tests/e2e/bxl/test_bxl_cli_data/cli_args_bad_case.bxl b/tests/e2e/bxl/test_bxl_cli_data/cli_args_bad_case.bxl new file mode 100644 index 0000000000000..e9fb3eb11fdd4 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_cli_data/cli_args_bad_case.bxl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _impl_cli_test_bad_case(ctx): + ctx.output.print("my_arg: " + repr(ctx.cli_args.my_arg)) + +cli_test_bad_case = bxl_main( + impl = _impl_cli_test_bad_case, + cli_args = { + "my-arg": cli_args.string(), + "my_arg": cli_args.string(), + }, +) diff --git a/tests/e2e/bxl/test_bxl_ensure.py b/tests/e2e/bxl/test_bxl_ensure.py new file mode 100644 index 0000000000000..962469b5fbe58 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_ensure.py @@ -0,0 +1,233 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os +import re +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test( + inplace=False, + data_dir="no_materialization_bxl_build", +) +async def test_bxl_ensure_no_materialization(buck: Buck) -> None: + result = await buck.bxl( + "//remote_text.bxl:ensure", + "--materializations=none", + ) + + [output] = result.stdout.splitlines() + assert os.path.exists(buck.cwd / Path(output)) is False + + result = await buck.bxl( + "//remote_text.bxl:ensure", + ) + + [output] = result.stdout.splitlines() + assert os.path.exists(buck.cwd / Path(output)) is True + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_ensure(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/ensure.bxl:ensure_bxl_build_result_test", + ) + first_output = result.stdout.splitlines()[0] + assert (buck.cwd / Path(first_output)).read_text() == "FOO" + + result = await buck.bxl( + "//bxl/ensure.bxl:ensure_build_result_test", + "--", + "--target", + ":buildable", + ) + + outputs = json.loads(result.stdout) + [buck_out] = [ + v + for (k, v) in outputs.items() + if k.startswith("root//:buildable (root//platforms:platform1#") + ][0] + assert (buck.cwd / Path(buck_out)).read_text() == "FOO" + + result = await buck.bxl( + "//bxl/ensure.bxl:ensure_cmd_line_test", + ) + + lines = sorted(result.stdout.splitlines()) + assert (buck.cwd / Path(lines[0])).read_text() == "run_info_out" + assert (buck.cwd / Path(lines[1])).read_text() == "target_with_tset\n" + assert (buck.cwd / Path(lines[2])).read_text() == "tset1\n" + assert (buck.cwd / Path(lines[3])).read_text() == "tset2\n" + assert (buck.cwd / Path(lines[4])).read_text() == "tset3\n" + + result = await buck.bxl( + "//bxl/ensure.bxl:ensure_cmd_line_json_output", + ) + + json_array = sorted(json.loads(result.stdout)) + assert "target_with_tset" in json_array[0] + assert "tset1" in json_array[1] + assert "tset2" in json_array[2] + assert "tset3" in json_array[3] + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_bxl_artifact_path(buck: Buck) -> None: + + result = await buck.bxl( + "fbcode//buck2/tests/targets/bxl/simple/bxl/artifacts.bxl:artifact_path_test", + ) + + outputs = json.loads(result.stdout) + + assert outputs["sources"] == [""] + + assert outputs["source_artifact"] == "" + # The project relative path of the source artifact + assert ( + outputs["source_artifact_project_rel_path"] + == "fbcode/buck2/tests/targets/rules/shell/DATA" + ) + + # Abs path for the source artifact. The path should exist on the filesystem. + assert outputs["source_artifact_abs_path"] == str( + buck.cwd / Path("buck2/tests/targets/rules/shell/DATA") + ) + assert ( + os.path.exists((buck.cwd / Path(outputs["source_artifact_abs_path"]))) is True + ) + + assert ( + "build artifact out/out.txt bound to fbcode//buck2/tests/targets/rules/shell:gen" + in outputs["build_artifact"] + ) + + prefix = "" + + if buck.isolation_prefix is None: + prefix = "buck-out/v2/gen/fbcode/" + else: + prefix = "buck-out/" + buck.isolation_prefix + "/gen/fbcode/" + + # The project relative path to the buck-out directory with the output + assert prefix in outputs["build_artifact_project_rel_path"] + assert ( + "/buck2/tests/targets/rules/shell/__gen__/out/out.txt" + in outputs["build_artifact_project_rel_path"] + ) + assert str(buck.cwd) not in outputs["build_artifact_project_rel_path"] + + # Abs path for the build artifact. Path should not exist on the filesystem since it's not materialized. + # Note the cwd is "fbcode", so the parent is "fbsource" + assert outputs["build_artifact_abs_path"] == str( + buck.cwd.parent / Path(outputs["build_artifact_project_rel_path"]) + ) + + assert ( + os.path.exists((buck.cwd / Path(outputs["build_artifact_abs_path"]))) is False + ) + + +@buck_test(inplace=False, data_dir="bxl/simple", skip_for_os=["windows"]) +async def test_bxl_artifact_path_cmd_args(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/artifacts.bxl:cmd_args_artifact_path_test", + ) + + outputs = json.loads(result.stdout) + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__target_with_outputs__/run_info_out", + outputs["target_with_outputs_rel_paths"][0], + False, + ) + + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__target_with_outputs__/run_info_out", + outputs["target_with_outputs_abs_paths"][0], + True, + ) + + assert len(outputs["target_with_tset_rel_paths"]) == 4 + + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__target_with_tset__/out.txt", + outputs["target_with_tset_rel_paths"][0], + False, + ) + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__tset1__/out.txt", + outputs["target_with_tset_rel_paths"][1], + False, + ) + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__tset2__/out.txt", + outputs["target_with_tset_rel_paths"][2], + False, + ) + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__tset3__/out.txt", + outputs["target_with_tset_rel_paths"][3], + False, + ) + + assert len(outputs["target_with_tset_abs_paths"]) == 4 + + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__target_with_tset__/out.txt", + outputs["target_with_tset_abs_paths"][0], + True, + ) + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__tset1__/out.txt", + outputs["target_with_tset_abs_paths"][1], + True, + ) + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__tset2__/out.txt", + outputs["target_with_tset_abs_paths"][2], + True, + ) + _test_bxl_artifact_path_cmd_args_helper( + buck, + "bin/kind/__tset3__/out.txt", + outputs["target_with_tset_abs_paths"][3], + True, + ) + + +def _test_bxl_artifact_path_cmd_args_helper( + buck: Buck, part_to_validate: str, full_path: str, is_abs: bool +) -> None: + assert "buck-out/v2/gen/root" in full_path + assert part_to_validate in full_path + if is_abs: + assert str((buck.cwd / Path("buck-out/v2/gen/root"))) in full_path + assert os.path.exists(full_path) is False + else: + assert str(buck.cwd) not in full_path + assert os.path.exists((buck.cwd / Path(full_path))) is False diff --git a/tests/e2e/bxl/test_bxl_fs.py b/tests/e2e/bxl/test_bxl_fs.py new file mode 100644 index 0000000000000..765ce0facc499 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_fs.py @@ -0,0 +1,142 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_fs_exists(buck: Buck) -> None: + await buck.bxl("//bxl:fs.bxl:exists", "--", "--root_path", str(buck.cwd)) + + +@buck_test(inplace=False, data_dir="bxl/simple", skip_for_os=["windows"]) +async def test_bxl_fs_exists_symlink(buck: Buck) -> None: + link_path = buck.cwd / "symlink/foo/bar" + if not os.path.islink(link_path): + os.unlink(link_path) + os.symlink("../bar", link_path) + await buck.bxl("//bxl:fs.bxl:exists_symlink") + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_fs_list(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:fs.bxl:list_relative_path", + ) + + assert result.stdout.splitlines() == [ + "root//bin/TARGETS.fixture", + "root//bin/kind", + ] + + result = await buck.bxl( + "//bxl:fs.bxl:list_absolute_path", "--", "--root_path", str(buck.cwd) + ) + + assert result.stdout.splitlines() == [ + "root//bin/TARGETS.fixture", + "root//bin/kind", + ] + + result = await buck.bxl( + "//bxl:fs.bxl:list_source_artifact", + ) + + assert result.stdout.splitlines() == [ + "root//bin/kind/TARGETS.fixture", + "root//bin/kind/rules.bzl", + ] + + result = await buck.bxl( + "//bxl:fs.bxl:list_file_node", + ) + + assert result.stdout.splitlines() == [ + "root//bin/kind/TARGETS.fixture", + "root//bin/kind/rules.bzl", + ] + + result = await buck.bxl( + "//bxl:fs.bxl:list_dirs_only", + ) + + assert result.stdout.splitlines() == [ + "root//bin/kind", + ] + + result = await buck.bxl("//bxl:fs.bxl:list_cell_path") + + expected_output = [ + "root//bin/TARGETS.fixture", + "root//bin/kind", + ] + + output = json.loads(result.stdout) + assert output["@root//bin"] == expected_output + assert output["root//bin"] == expected_output + assert output["//bin"] == expected_output + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_fs_is_file(buck: Buck) -> None: + await buck.bxl("//bxl:fs.bxl:is_file", "--", "--root_path", str(buck.cwd)) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_fs_is_dir(buck: Buck) -> None: + await buck.bxl("//bxl:fs.bxl:is_dir", "--", "--root_path", str(buck.cwd)) + + +@buck_test(inplace=False, data_dir="bxl/simple", skip_for_os=["windows"]) +async def test_bxl_fs_project_rel_path(buck: Buck) -> None: + result = await buck.bxl("//bxl:fs.bxl:project_rel_path") + + assert result.stdout.splitlines() == [ + "bin/kind/TARGETS.fixture", + "bin/kind/rules.bzl", + ] + + +@buck_test(inplace=False, data_dir="bxl/simple", skip_for_os=["windows"]) +async def test_bxl_fs_abs_path_unsafe(buck: Buck) -> None: + result = await buck.bxl("//bxl:fs.bxl:abs_path_unsafe") + + assert result.stdout.splitlines() == [ + str(buck.cwd / "bin/kind/TARGETS.fixture"), + str(buck.cwd / "bin/kind/rules.bzl"), + ] + + +@buck_test(inplace=False, data_dir="bxl/simple", skip_for_os=["windows"]) +async def test_bxl_fs_source(buck: Buck) -> None: + await buck.bxl("//bxl:fs.bxl:source") + + await expect_failure( + buck.bxl("//bxl:fs.bxl:source_invalid_path"), + stderr_regex="Inferred package path `root//fs` is not a valid package within the given file path `root//this/path/does/not/exist", + ) + await expect_failure( + buck.bxl("//bxl:fs.bxl:source_invalid_hint"), + stderr_regex="Inferred package path `root//bin/kind` is not a valid package within the given file path `root//fs/src/source", + ) + await expect_failure( + buck.bxl("//bxl:fs.bxl:source_too_many_hints"), + stderr_regex="Expected a single target hint, not an iterable", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple", skip_for_os=["windows"]) +async def test_bxl_file_set_ops(buck: Buck) -> None: + await buck.bxl("//bxl/fs.bxl:file_set_operations") diff --git a/tests/e2e/bxl/test_bxl_lazy_analysis.py b/tests/e2e/bxl/test_bxl_lazy_analysis.py new file mode 100644 index 0000000000000..be7a631647d9f --- /dev/null +++ b/tests/e2e/bxl/test_bxl_lazy_analysis.py @@ -0,0 +1,85 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_analysis_resolve(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_analysis.bxl:lazy_analysis_resolve", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_analysis_resolve_error(buck: Buck) -> None: + await expect_failure( + buck.bxl("//bxl/lazy_analysis.bxl:lazy_analysis_resolve_error"), + stderr_regex="requested sub target named `missing_subtarget` .* is not available", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_analysis_try_resolve(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_analysis.bxl:lazy_analysis_try_resolve", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_analysis_try_resolve_error(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_analysis.bxl:lazy_analysis_try_resolve_error", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_batch_lazy_analysis_resolve(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_analysis.bxl:batch_lazy_analysis_resolve", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_batch_lazy_analysis_resolve_error(buck: Buck) -> None: + await expect_failure( + buck.bxl("//bxl/lazy_analysis.bxl:batch_lazy_analysis_resolve_error"), + stderr_regex="requested sub target named `missing_subtarget` .* is not available", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_batch_lazy_analysis_try_resolve(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_analysis.bxl:batch_lazy_analysis_try_resolve", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_join_lazy_analysis_resolve(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_analysis.bxl:join_lazy_analysis_resolve", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_join_lazy_analysis_resolve_error(buck: Buck) -> None: + await expect_failure( + buck.bxl("//bxl/lazy_analysis.bxl:join_lazy_analysis_resolve_error"), + stderr_regex="requested sub target named `missing_subtarget` .* is not available", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_join_lazy_analysis_try_resolve(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_analysis.bxl:join_lazy_analysis_try_resolve", + ) diff --git a/tests/e2e/bxl/test_bxl_lazy_configured_target_node.py b/tests/e2e/bxl/test_bxl_lazy_configured_target_node.py new file mode 100644 index 0000000000000..c38e738f3db1d --- /dev/null +++ b/tests/e2e/bxl/test_bxl_lazy_configured_target_node.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_configured_target(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_configured_target_node.bxl:lazy_configured_target_node_resolve", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_configured_target_error(buck: Buck) -> None: + await expect_failure( + buck.bxl( + "//bxl/lazy_configured_target_node.bxl:lazy_configured_target_node_resolve_error" + ), + stderr_regex="root//incompatible_targets:incompatible", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_configured_target_catch_error(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_configured_target_node.bxl:lazy_configured_target_node_resolve_catch_error", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_configured_target_node_pattern(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_configured_target_node.bxl:lazy_configured_target_node_pattern", + ) diff --git a/tests/e2e/bxl/test_bxl_lazy_unconfigured_target_node.py b/tests/e2e/bxl/test_bxl_lazy_unconfigured_target_node.py new file mode 100644 index 0000000000000..7dde393b068ec --- /dev/null +++ b/tests/e2e/bxl/test_bxl_lazy_unconfigured_target_node.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_unconfigured_target(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_unconfigured_target_node.bxl:lazy_unconfigured_target_node_resolve", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_unconfigured_target_error(buck: Buck) -> None: + await expect_failure( + buck.bxl( + "//bxl/lazy_unconfigured_target_node.bxl:lazy_unconfigured_target_node_resolve_error" + ), + stderr_regex="error: Unknown target `not_exist` from package `root//bin`.", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_unconfigured_target_catch_error(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_unconfigured_target_node.bxl:lazy_unconfigured_target_node_resolve_catch_error", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_unconfigured_target_node_pattern(buck: Buck) -> None: + await buck.bxl( + "//bxl/lazy_unconfigured_target_node.bxl:lazy_unconfigured_target_node_pattern", + ) diff --git a/tests/e2e/bxl/test_bxl_node_operations.py b/tests/e2e/bxl/test_bxl_node_operations.py new file mode 100644 index 0000000000000..1c402ced6d879 --- /dev/null +++ b/tests/e2e/bxl/test_bxl_node_operations.py @@ -0,0 +1,187 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_label_functions(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/label_functions.bxl:label_func_test", + ) + + assert _replace_hash(result.stdout).splitlines() == [ + "root//bin:the_binary (root//platforms:platform1#)", + "root//bin:the_binary[sub] (root//platforms:platform1#)", + # configured_target() called for below, should only return configured target + "root//bin:the_binary (root//platforms:platform1#)", + "root//bin:the_binary[sub1][sub2] (root//platforms:platform1#)", + # configured_target() called for below, should only return configured target + "root//bin:the_binary (root//platforms:platform1#)", + "root//bin:the_binary", + "root//bin:the_binary[sub]", + "root//bin:the_binary[sub1][sub2]", + ] + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_coerced_attrs(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/coerced_attributes.bxl:coerced_attrs_test", + ) + + result = await buck.bxl( + "//bxl/coerced_attributes.bxl:coerced_attributes_display_json_test", + ) + cmd_select = json.loads(result.stdout) + assert cmd_select["__type"] == "selector" + assert cmd_select["entries"] == { + "DEFAULT": "foo", + "ovr_config//os:macos": "bar", + "ovr_config//os:windows": "foobar", + } + + result = await buck.bxl( + "//bxl/coerced_attributes.bxl:coerced_attributes_display_test", + ) + + output = result.stdout + + assert "root//platforms:platform1" in output + assert "genrule_with_selects" in output + assert ( + 'select({"ovr_config//os:macos": "bar", "ovr_config//os:windows": "foobar", "DEFAULT": "foo"})' + in output + ) + assert "PUBLIC" in output + assert "magic" in output + + await buck.bxl( + "//bxl/coerced_attributes.bxl:selector_attrs_test", + ) + await buck.bxl( + "//bxl/coerced_attributes.bxl:concat_attrs_test", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_configured_node(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/node.bxl:configured_node_test", + ) + + assert _replace_hash(result.stdout).splitlines() == [ + "root//bin:the_binary (root//platforms:platform1#)", + "root//rules/rules.bzl:_foo_binary", + "normal", + ] + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_unconfigured_node(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/node.bxl:unconfigured_node_test", + ) + + assert result.stdout.splitlines() == [ + "root//bin:the_binary", + "root//rules/rules.bzl:_foo_binary", + "normal", + ] + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_node_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:node_attributes.bxl:attrs_test", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_node_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:node_attributes.bxl:lazy_attrs_test", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_node_attrs_with_special_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:node_attributes.bxl:lazy_attrs_with_special_attrs_test", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_resolved_node_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:resolved_node_attributes.bxl:resolved_attrs_test", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_resolved_node_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:resolved_node_attributes.bxl:lazy_resolved_attrs_test", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_lazy_resolved_node_with_special_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:resolved_node_attributes.bxl:lazy_resolved_attrs_with_special_attrs_test", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_unconfigured_target_node_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:unconfigure_target_node_attrs.bxl:node_attrs", + ) + + result = await buck.bxl( + "//bxl/unconfigure_target_node_attrs.bxl:node_attrs_display", + ) + + output = result.stdout + + assert "root//platforms:platform1" in output + assert "genrule_with_selects" in output + assert ( + 'select({"ovr_config//os:macos": "bar", "ovr_config//os:windows": "foobar", "DEFAULT": "foo"})' + in output + ) + assert "PUBLIC" in output + assert "magic" in output + + await buck.bxl( + "//bxl/unconfigure_target_node_attrs.bxl:selector_attrs_test", + ) + await buck.bxl( + "//bxl/unconfigure_target_node_attrs.bxl:concat_attrs_test", + ) + + await buck.bxl( + "//bxl/unconfigure_target_node_attrs.bxl:attr_metadata", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_configured_target_node_attrs(buck: Buck) -> None: + await buck.bxl( + "//bxl:configured_target_node_attrs.bxl:attrs_test", + ) diff --git a/tests/e2e/bxl/test_bxl_query.py b/tests/e2e/bxl/test_bxl_query.py new file mode 100644 index 0000000000000..2f8f1b897cb1c --- /dev/null +++ b/tests/e2e/bxl/test_bxl_query.py @@ -0,0 +1,434 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import random +import re +import string + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_owner(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/cquery.bxl:owner_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform1#)]\n" + ) + + result = await buck.bxl( + "//bxl/cquery.bxl:owner_with_cell_path_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform1#)]\n" + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_owner_list(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/cquery.bxl:owner_list_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform1#), root//bin:the_binary_with_dir_srcs (root//platforms:platform1#)]\n" + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_kind(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:cquery.bxl:kind_test", + ) + + assert "foo" in result.stdout + assert "bar" not in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_inputs(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:cquery.bxl:inputs_test", + ) + + assert "TARGETS.fixture" in result.stdout + assert "file_set" in result.stdout + assert "1" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_filter(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:cquery.bxl:filter_test", + ) + + assert "root//bin:the_binary" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_attrregex_filter(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/cquery.bxl:attrregexfilter_test", + ) + + assert "foo" in result.stdout + assert "bzzt" in result.stdout + assert "bar" not in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_attrfilter(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/cquery.bxl:attrfilter_test", + ) + + assert "foo" in result.stdout + assert "bzzt" not in result.stdout + assert "bar" not in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_nattrfilter(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/cquery.bxl:nattrfilter_test", + ) + + assert "foo" not in result.stdout + assert "bzzt" in result.stdout + assert "bar" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_rdeps(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/cquery.bxl:rdeps_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform1#), root//lib:lib1 (root//platforms:platform1#), root//lib:file1 (root//platforms:platform1#)]\n" + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_deps(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/cquery.bxl:deps_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform1#), root//:data (root//platforms:platform1#), root//lib:lib1 (root//platforms:platform1#), root//lib:lib2 (root//platforms:platform1#), root//lib:lib3 (root//platforms:platform1#), root//:foo_toolchain (root//platforms:platform1#), root//:bin (root//platforms:platform1#)]\n" + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_buildfile(buck: Buck) -> None: + await buck.bxl("//bxl/cquery.bxl:buildfile_test") + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_incompatible_configured_targets(buck: Buck) -> None: + # incompatible target should be skipped and the cquery should return compatible targets + result = await buck.bxl("//bxl/cquery.bxl:incompatible_configured_targets_test") + assert "Skipped 1 incompatible targets" in result.stderr + assert "root//incompatible_targets:incompatible" in result.stderr + assert "root//incompatible_targets:foo" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_incompatible_configured_targets_single_label(buck: Buck) -> None: + # incompatible target should be skipped and the cquery should not fail + result = await buck.bxl( + "//bxl/cquery.bxl:incompatible_configured_targets_single_label_test" + ) + assert "Skipped 1 incompatible targets" in result.stderr + assert "root//incompatible_targets:incompatible" in result.stderr + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_incompatible_targets(buck: Buck) -> None: + # incompatible target should be skipped and the cquery should not fail + result = await buck.bxl("//bxl/cquery.bxl:incompatible_targets_test") + assert "Skipped 1 incompatible targets" in result.stderr + assert "root//incompatible_targets:incompatible" in result.stderr + assert "root//incompatible_targets:foo" not in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_incompatible_targets_recursive(buck: Buck) -> None: + # incompatible target should be skipped and the cquery should return compatible targets + result = await buck.bxl("//bxl/cquery.bxl:incompatible_targets_test_recursive") + assert "Skipped 2 incompatible targets" in result.stderr + assert "root//incompatible_targets:incompatible" in result.stderr + assert "root//incompatible_targets/inner_folder:incompatible_inner" in result.stderr + assert "root//incompatible_targets/inner_folder:foo_inner" in result.stdout + assert "root//incompatible_targets:foo" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_configured_label(buck: Buck) -> None: + await buck.bxl("//bxl/cquery.bxl:cquery_configured_label") + + +@buck_test(inplace=False, data_dir="testsof") +async def test_cquery_testsof(buck: Buck) -> None: + result = await buck.bxl( + "//cquery.bxl:testsof_test", + ) + assert "root//:foo_test (root//:platform_default_tests" in result.stdout + + result = await buck.bxl( + "//cquery.bxl:testsof_with_default_target_platform_test", + ) + assert ( + "root//:foo_test_with_default_platform (root//:foo_test_default_platform" + in result.stdout + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_allpaths(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:uquery.bxl:allpaths_test", + ) + + assert ( + "[root//graph:one, root//graph:ten, root//graph:eleven, root//graph:two, root//graph:three]\n" + == result.stdout + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_allpaths_filtered(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:uquery.bxl:allpaths_filtered_test", + ) + + assert "[root//graph:one, root//graph:two, root//graph:three]\n" == result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_somepath(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:uquery.bxl:somepath_test", + ) + + assert "[root//graph:one, root//graph:two, root//graph:three]\n" == result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_somepath_filtered(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:uquery.bxl:somepath_filtered_test", + ) + + assert ( + "[root//graph:one, root//graph:ten, root//graph:twenty]\n" + + "[root//graph:one, root//graph:five, root//graph:six, root//graph:twenty]\n" + == result.stdout + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_kind(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:uquery.bxl:kind_test", + ) + + assert "foo" in result.stdout + assert "bar" not in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_inputs(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:uquery.bxl:inputs_test", + ) + + assert "TARGETS.fixture" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_filter(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:uquery.bxl:filter_test", + ) + + assert "root//bin:the_binary" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_attrregex_filter(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:attrregexfilter_test", + ) + + assert "foo" in result.stdout + assert "bzzt" in result.stdout + assert "bar" not in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_attrfilter(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:attrfilter_test", + ) + + assert "foo" in result.stdout + assert "bzzt" not in result.stdout + assert "bar" not in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_owner(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:owner_test", + ) + assert result.stdout == "[root//bin:the_binary]\n" + + result = await buck.bxl( + "//bxl/uquery.bxl:owner_with_cell_path_test", + ) + assert _replace_hash(result.stdout) == "[root//bin:the_binary]\n" + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_owner_list(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:owner_list_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary, root//bin:the_binary_with_dir_srcs]\n" + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_targets_in_buildfile(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:targets_in_buildfile_test", + ) + assert ( + result.stdout + == "[root//bin:the_binary, root//bin:the_binary_with_dir_srcs, root//bin:platform]\n" + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_buildfile(buck: Buck) -> None: + await buck.bxl("//bxl/uquery.bxl:buildfile_test") + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_rdeps(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:rdeps_test", + ) + assert result.stdout == "[root//bin:the_binary, root//lib:lib1, root//lib:file1]\n" + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_query_deps(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:deps_test", + ) + assert ( + result.stdout + == "[root//bin:the_binary, root//:data, root//lib:lib1, root//lib:lib2, root//lib:lib3, root//:foo_toolchain, root//:bin]\n" + ) + + +@buck_test(inplace=False, data_dir="testsof") +async def test_uquery_testsof(buck: Buck) -> None: + result = await buck.bxl( + "//uquery.bxl:testsof_test", + ) + assert "root//:foo_test" in result.stdout + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_uquery_eval(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/uquery.bxl:eval_query_test", + ) + assert result.stdout == "[root//bin/TARGETS.fixture]\n" + + result = await buck.bxl( + "//bxl/uquery.bxl:eval_query_with_query_args", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_aquery_incompatible_targets(buck: Buck) -> None: + # incompatible target should be skipped and the aquery should not fail + result = await buck.bxl("//bxl/aquery.bxl:incompatible_targets") + assert "Skipped 1 incompatible targets" in result.stderr + assert "root//incompatible_targets:incompatible" in result.stderr + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_eval(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:cquery.bxl:eval_query_test", + ) + + assert "TARGETS.fixture" in result.stdout + + result = await buck.bxl( + "//bxl/cquery.bxl:eval_query_with_query_args", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_allpaths(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:cquery.bxl:allpaths_test", + ) + + assert ( + "[root//graph:one (), root//graph:ten (), root//graph:eleven (), root//graph:two (), root//graph:three ()]\n" + == result.stdout + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_somepath(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:cquery.bxl:somepath_test", + ) + + assert ( + "[root//graph:one (), root//graph:two (), root//graph:three ()]\n" + == result.stdout + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_cquery_somepath_filtered(buck: Buck) -> None: + result = await buck.bxl( + "//bxl:cquery.bxl:somepath_filtered_test", + ) + + assert ( + "[root//graph:one (), root//graph:ten (), root//graph:twenty ()]\n" + + "[root//graph:one (), root//graph:five (), root//graph:six (), root//graph:twenty ()]\n" + == result.stdout + ) + + +def random_string() -> str: + return "".join(random.choice(string.ascii_lowercase) for i in range(256)) diff --git a/tests/e2e/bxl/test_bxl_targets.py b/tests/e2e/bxl/test_bxl_targets.py new file mode 100644 index 0000000000000..e964b2e36957f --- /dev/null +++ b/tests/e2e/bxl/test_bxl_targets.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +def _replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_new_target_set(buck: Buck) -> None: + await buck.bxl( + "//bxl/new_target_set.bxl:new_ctarget_set", + ) + + await buck.bxl( + "//bxl/new_target_set.bxl:new_utarget_set", + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_target_set_ops(buck: Buck) -> None: + await buck.bxl("//bxl/target_set_ops.bxl:test_operations") + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_target_platform_from_value_as_starlark_target_label( + buck: Buck, +) -> None: + # Pass in explicit target platform from client. Result should be configured with this target platform. + result = await buck.bxl( + "--target-platforms", + "root//platforms:platform2", + "//bxl/cquery.bxl:owner_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform2#)]\n" + ) + + # No target platform specified from client context. Result should be configured with root//platforms:platform1 + result = await buck.bxl( + "//bxl/cquery.bxl:owner_test", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform1#)]\n" + ) + + # Target platform from client context should be overridden by what's declared in cquery. + result = await buck.bxl( + "--target-platforms", + "root//platforms:platform2", + "//bxl/cquery.bxl:owner_test_with_target_platform", + ) + assert ( + _replace_hash(result.stdout) + == "[root//bin:the_binary (root//platforms:platform1#)]\n" + ) + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_unconfigured_sub_targets(buck: Buck) -> None: + result = await buck.bxl( + "//bxl/providers.bxl:unconfigured_sub_targets", + ) + + output = json.loads(result.stdout.strip()) + assert output["lib1"] == "root//lib:lib1" + assert output["lib1_FooInfo"] == "root//lib:lib1[FooInfo]" + assert output["lib2"] == "root//lib:lib2" + assert output["lib3_FooInfo"] == "root//lib:lib3[FooInfo]" + + +@buck_test(inplace=False, data_dir="bxl/simple") +async def test_bxl_target_exists(buck: Buck) -> None: + await buck.bxl( + "//bxl/target_exists.bxl:target_exists", + ) + + await expect_failure( + buck.bxl("//bxl/target_exists.bxl:target_exists_no_target_patterns"), + stderr_regex="Expected a single target as a string literal, not a target pattern", + ) + + await expect_failure( + buck.bxl("//bxl/target_exists.bxl:target_exists_no_subtargets"), + stderr_regex="Expecting target pattern, without providers", + ) diff --git a/tests/e2e/bxl/test_execution_platforms.py b/tests/e2e/bxl/test_execution_platforms.py new file mode 100644 index 0000000000000..1fd5de7d7b697 --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms.py @@ -0,0 +1,112 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import random +import string +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_bxl_exec_platform_dynamic_output(buck: Buck) -> None: + result = await buck.bxl( + "//executor_fallback_tests/dynamic.bxl:test_dynamic_output", + "-c", + f"test.cache_buster={random_string()}", + "--local-only", + ) + + output = result.stdout.splitlines()[0] + assert os.path.exists(buck.cwd / Path(output)) + + await expect_failure( + buck.bxl( + "//executor_fallback_tests/dynamic.bxl:test_dynamic_output", + "-c", + f"test.cache_buster={random_string()}", + "--remote-only", + ), + stderr_regex="Incompatible executor preferences", + ) + + +@buck_test(inplace=False) +async def test_bxl_execution_platforms(buck: Buck) -> None: + result = await buck.bxl( + "//executor_fallback_tests/test.bxl:test_exec_platforms", + "-c", + f"test.cache_buster={random_string()}", + "--", + "--exec_deps", + "//executor_fallback_tests:remote_only", + ) + + output = result.stdout.splitlines()[0] + assert os.path.exists(buck.cwd / Path(output)) + + await expect_failure( + buck.bxl( + "//executor_fallback_tests/test.bxl:test_exec_platforms", + "-c", + f"test.cache_buster={random_string()}", + "--", + "--exec_deps", + "//executor_fallback_tests:local_only", + ) + ) + + result = await buck.bxl( + "//executor_fallback_tests/test.bxl:test_exec_platforms", + "-c", + f"test.cache_buster={random_string()}", + "--", + "--toolchains", + "//executor_fallback_tests:remote_only_toolchain", + ) + + output = result.stdout.splitlines()[0] + assert os.path.exists(buck.cwd / Path(output)) + + await expect_failure( + buck.bxl( + "//executor_fallback_tests/test.bxl:test_exec_platforms", + "-c", + f"test.cache_buster={random_string()}", + "--", + "--toolchains", + "//executor_fallback_tests:local_only_toolchain", + ) + ) + + result = await buck.bxl( + "//executor_fallback_tests/test.bxl:test_exec_compatible_with", + "-c", + f"test.cache_buster={random_string()}", + ) + + output = result.stdout.splitlines()[0] + assert os.path.exists(buck.cwd / Path(output)) + + await expect_failure( + buck.bxl( + "//executor_fallback_tests/test.bxl:test_exec_compatible_with", + "-c", + f"test.cache_buster={random_string()}", + "--remote-only", + ) + ) + + +def random_string() -> str: + return "".join(random.choice(string.ascii_lowercase) for i in range(256)) diff --git a/tests/e2e/bxl/test_execution_platforms_data/.buckconfig b/tests/e2e/bxl/test_execution_platforms_data/.buckconfig new file mode 100644 index 0000000000000..d0736fc6c6350 --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/.buckconfig @@ -0,0 +1,8 @@ +[cells] + root = . + +[buildfile] + name=TARGETS.fixture + +[build] + execution_platforms = root//platforms:platforms diff --git a/tests/e2e/bxl/test_execution_platforms_data/.buckroot b/tests/e2e/bxl/test_execution_platforms_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/TARGETS.fixture b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/TARGETS.fixture new file mode 100644 index 0000000000000..ca8e0a9809db1 --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/TARGETS.fixture @@ -0,0 +1,29 @@ +load("//:rules.bzl", "command", "toolchain") + +command( + name = "local_only", + command = "local_only.py", + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:local_only"], +) + +command( + name = "remote_only", + command = "remote_only.py", + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:remote_only"], +) + +toolchain( + name = "local_only_toolchain", + command = "local_only.py", + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:local_only"], +) + +toolchain( + name = "remote_only_toolchain", + command = "remote_only.py", + default_target_platform = "//platforms:target", + exec_compatible_with = ["//platforms:remote_only"], +) diff --git a/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/dynamic.bxl b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/dynamic.bxl new file mode 100644 index 0000000000000..ee69752206f5d --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/dynamic.bxl @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_dynamic_output(ctx): + bxl_actions = ctx.bxl_actions(target_platform = "//platforms:target") + actions = bxl_actions.actions + + dynamic = actions.declare_output("dynamic") + foo = actions.write("foo", "content") + + def my_deferred(ctx, _artifacts, outputs): + ctx.bxl_actions().actions.run( + [ + "python3", + "executor_fallback_tests/local_only.py", + outputs[dynamic].as_output(), + ], + category = "command", + env = {"cache_buster": read_config("test", "cache_buster", "")}, + local_only = True, + ) + + actions.dynamic_output(dynamic = [foo], inputs = [], outputs = [dynamic.as_output()], f = my_deferred) + + ctx.output.print(ctx.output.ensure(dynamic)) + +test_dynamic_output = bxl_main( + impl = _test_dynamic_output, + cli_args = {}, +) diff --git a/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/local_only.py b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/local_only.py new file mode 100644 index 0000000000000..eca503194cb99 --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/local_only.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import pathlib +import sys + +re_worker_path = "/run/re_worker/beacon" +if pathlib.Path(re_worker_path).exists(): + print("This only runs on local", file=sys.stderr) + sys.exit(1) + +out = sys.argv[1] +pathlib.Path(out).touch() diff --git a/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/remote_only.py b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/remote_only.py new file mode 100644 index 0000000000000..082fcd998eea1 --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/remote_only.py @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import pathlib +import sys + +re_worker_path = "/run/re_worker/beacon" +if not pathlib.Path(re_worker_path).exists(): + print("This only runs on RE", file=sys.stderr) + sys.exit(1) + +out = sys.argv[1] +pathlib.Path(out).touch() diff --git a/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/test.bxl b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/test.bxl new file mode 100644 index 0000000000000..738d8cbd8f104 --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/executor_fallback_tests/test.bxl @@ -0,0 +1,61 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_exec_platforms(ctx): + bxl_actions = ctx.bxl_actions(exec_deps = ctx.cli_args.exec_deps, toolchains = ctx.cli_args.toolchains, target_platform = "//platforms:target") + actions = bxl_actions.actions + out = actions.declare_output("out") + + dependency = None + if len(bxl_actions.exec_deps) != 0: + dependency = bxl_actions.exec_deps[ctx.cli_args.exec_deps[0]] + elif len(bxl_actions.toolchains) != 0: + dependency = bxl_actions.toolchains[ctx.cli_args.toolchains[0]] + else: + fail("test setup is wrong - one of exec deps or toolchains should be not None") + + actions.run( + [ + "python3", + dependency[RunInfo], + out.as_output(), + ], + category = "command", + env = {"cache_buster": read_config("test", "cache_buster", "")}, + local_only = False, + ) + ctx.output.print(ctx.output.ensure(out)) + +test_exec_platforms = bxl_main( + impl = _test_exec_platforms, + cli_args = { + "exec_deps": cli_args.option(cli_args.list(cli_args.sub_target())), + "toolchains": cli_args.option(cli_args.list(cli_args.sub_target())), + }, +) + +def _test_exec_compatible_with(ctx): + bxl_actions = ctx.bxl_actions(target_platform = "//platforms:target", exec_compatible_with = "//platforms:local_only") + actions = bxl_actions.actions + out = actions.declare_output("out") + + actions.run( + [ + "python3", + "executor_fallback_tests/local_only.py", + out.as_output(), + ], + category = "command", + env = {"cache_buster": read_config("test", "cache_buster", "")}, + prefer_local = True, + ) + ctx.output.print(ctx.output.ensure(out)) + +test_exec_compatible_with = bxl_main( + impl = _test_exec_compatible_with, + cli_args = {}, +) diff --git a/tests/e2e/bxl/test_execution_platforms_data/platforms/TARGETS.fixture b/tests/e2e/bxl/test_execution_platforms_data/platforms/TARGETS.fixture new file mode 100644 index 0000000000000..5b5853ffe90f2 --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/platforms/TARGETS.fixture @@ -0,0 +1,22 @@ +load("//:rules.bzl", "config_setting", "platform", "platforms", "target_platform") + +config_setting(name = "remote_setting") + +config_setting(name = "local_setting") + +target_platform(name = "target") + +platform( + name = "local_only", + setting = ":local_setting", +) + +platform( + name = "remote_only", + setting = ":remote_setting", +) + +platforms( + name = "platforms", + platforms = [":local_only", ":remote_only"], +) diff --git a/tests/e2e/bxl/test_execution_platforms_data/rules.bzl b/tests/e2e/bxl/test_execution_platforms_data/rules.bzl new file mode 100644 index 0000000000000..7c5f22a74ce6a --- /dev/null +++ b/tests/e2e/bxl/test_execution_platforms_data/rules.bzl @@ -0,0 +1,132 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +ExecutorConfigInfo = provider(fields = ["config"]) + +def _platform(ctx): + # We need to introduce a constraint to ensure our different execution + # platforms are distinct. This is because exec_compatible_with selects a + # ConfigurationInfo (which provides a config), not a ExecutionPlatformInfo + # (instead it matches on it). + configuration = ConfigurationInfo( + constraints = { + ctx.attrs.setting.label.raw_target(): ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ), + }, + values = {}, + ) + + platform = ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = configuration, + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = True, + remote_execution_properties = { + "platform": "linux-remote-execution", + }, + remote_execution_max_input_files_mebibytes = 1, + use_limited_hybrid = ctx.attrs.use_limited_hybrid, + allow_limited_hybrid_fallbacks = ctx.attrs.allow_hybrid_fallbacks_on_failure, + allow_hybrid_fallbacks_on_failure = ctx.attrs.allow_hybrid_fallbacks_on_failure, + remote_execution_use_case = "buck2-testing", + allow_cache_uploads = ctx.attrs.allow_cache_uploads, + experimental_low_pass_filter = ctx.attrs.experimental_low_pass_filter, + max_cache_upload_mebibytes = 1, + ), + ) + + return [ + DefaultInfo(), + platform, + configuration, + ] + +platform = rule( + impl = _platform, + attrs = { + "allow_cache_uploads": attrs.bool(default = False), + "allow_hybrid_fallbacks_on_failure": attrs.bool(default = False), + "experimental_low_pass_filter": attrs.bool( + default = read_config("test", "experimental_low_pass_filter", "") in ["true", "True"], + ), + "setting": attrs.configuration_label(), + "use_limited_hybrid": attrs.bool(default = True), + }, +) + +def _platforms(ctx): + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo( + platforms = [x[ExecutionPlatformInfo] for x in ctx.attrs.platforms], + ), + ] + +platforms = rule( + impl = _platforms, + attrs = { + "platforms": attrs.list(attrs.dep(providers = [ExecutionPlatformInfo])), + }, +) + +def _target_platform(ctx): + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo(constraints = {}, values = {}), + ), + ] + +target_platform = rule( + impl = _target_platform, + attrs = {}, +) + +def _config_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +config_setting = rule( + impl = _config_setting, + attrs = {}, +) + +def _command(ctx): + return [DefaultInfo(default_output = ctx.attrs.command), RunInfo(args = cmd_args(ctx.attrs.command))] + +command = rule( + impl = _command, + attrs = { + "cache_buster": attrs.string(default = read_config("test", "cache_buster", "")), + "command": attrs.source(), + "force_full_hybrid_if_capable": attrs.bool(default = False), + "local_only": attrs.bool(default = False), + "prefer_local": attrs.bool(default = False), + "prefer_remote": attrs.bool(default = False), + "weight": attrs.int(default = 1), + }, +) + +def _toolchain(ctx): + return [DefaultInfo(default_output = ctx.attrs.command), RunInfo(args = cmd_args(ctx.attrs.command))] + +toolchain = rule( + impl = _toolchain, + attrs = { + "cache_buster": attrs.string(default = read_config("test", "cache_buster", "")), + "command": attrs.source(), + "force_full_hybrid_if_capable": attrs.bool(default = False), + "local_only": attrs.bool(default = False), + "prefer_local": attrs.bool(default = False), + "prefer_remote": attrs.bool(default = False), + "weight": attrs.int(default = 1), + }, + is_toolchain_rule = True, +) diff --git a/tests/e2e/check_dependencies_test/BUCK b/tests/e2e/check_dependencies_test/BUCK new file mode 100644 index 0000000000000..d7d64e8e70266 --- /dev/null +++ b/tests/e2e/check_dependencies_test/BUCK @@ -0,0 +1,114 @@ +load("@fbsource//tools/build_defs:check_dependencies_test.bzl", "assert_dependencies_test", "audit_dependents_test", "check_dependencies_test") + +### This file contains some "e2e tests" for the BXL check_dependencies_test infra. + +ALLOW_LIST = [ + ".*:useless_target.*", + "fbsource//xplat/configurations/buck/apple/common_files:dummy.c", + "fbsource//third-party/libgcc.*", + "fbsource//third-party/tp2/libgcc.*", +] + +BLOCK_LIST = [ + "//this_target_will_never_exist_at_least_we_hope.*", +] + +TARGET1 = "fbcode//buck2/tests/targets/check_dependencies_test:useless_target_1" + +TARGET2 = "fbcode//buck2/tests/targets/check_dependencies_test:useless_target_2" + +TARGET3 = "fbcode//buck2/tests/targets/check_dependencies_test:useless_target_3" + +CONTACTS = ["build_infra"] + +oncall("build_infra") + +check_dependencies_test( + name = "allow_list_and_block_none", + allowlist_patterns = ALLOW_LIST, + contacts = CONTACTS, + target = TARGET2, +) + +check_dependencies_test( + name = "allow_none_and_block_list", + blocklist_patterns = BLOCK_LIST, + contacts = CONTACTS, + target = TARGET2, +) + +check_dependencies_test( + name = "allow_list_and_block_list", + allowlist_patterns = ALLOW_LIST, + blocklist_patterns = BLOCK_LIST, + contacts = CONTACTS, + target = TARGET2, +) + +check_dependencies_test( + name = "allow_list_and_block_emptylist", + allowlist_patterns = ALLOW_LIST, + blocklist_patterns = [], + contacts = CONTACTS, + target = TARGET2, +) + +check_dependencies_test( + name = "allow_emptylist_and_block_list", + allowlist_patterns = [], + blocklist_patterns = BLOCK_LIST, + contacts = CONTACTS, + target = TARGET2, +) + +check_dependencies_test( + name = "fail_blocklist", + blocklist_patterns = [".*:useless_target_1"], + contacts = CONTACTS, + expect_failure_msg = "Found blocklisted targets", + target = TARGET2, +) + +check_dependencies_test( + name = "fail_allowlist", + allowlist_patterns = [".*:nonexistent_target"], + contacts = CONTACTS, + expect_failure_msg = "Found banned targets", + target = TARGET2, +) + +assert_dependencies_test( + name = "expected_deps", + contacts = CONTACTS, + expected_deps = [ + TARGET1, + ], + target = TARGET2, +) + +assert_dependencies_test( + name = "fail_expected_deps", + contacts = CONTACTS, + expect_failure_msg = "Expected dependencies not found", + expected_deps = [ + TARGET2, + ], + target = TARGET1, +) + +audit_dependents_test( + name = "audit_dependents_test", + allowlist_patterns = [".*check_dependencies_test:useless_target_2"], + contacts = CONTACTS, + source_target = TARGET3, + target = TARGET1, +) + +audit_dependents_test( + name = "fail_audit_dependents_test", + allowlist_patterns = [".*check_dependencies_test:useless_target_[^2]"], + contacts = CONTACTS, + expect_failure_msg = "Disallowed rules were found", + source_target = TARGET3, + target = TARGET1, +) diff --git a/tests/e2e/configurations/cfg_constructor/BUCK b/tests/e2e/configurations/cfg_constructor/BUCK new file mode 100644 index 0000000000000..3054f551ecf85 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/BUCK @@ -0,0 +1,63 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") +load("@fbsource//tools/build_defs/buck2/cfg:validation.bzl", "CLI_MODIFIER_VALIDATION_SOFT_ERROR_CATEGORY") +load("@prelude//cfg/modifier:name.bzl", "NAMED_CONSTRAINT_SETTINGS") +load(":constraint_setting_group.bzl", "constraint_setting_group") + +oncall("buck2") + +buck2_e2e_test( + name = "test_cli_modifiers.py", + srcs = ["test_cli_modifiers.py"], + deps = [":modifiers_util"], +) + +buck2_e2e_test( + name = "test_invoke_cfg_constructors.py", + srcs = ["test_invoke_cfg_constructors.py"], + data_dir = "test_invoke_cfg_constructors_data", +) + +buck2_e2e_test( + name = "test_set_cfg_modifiers.py", + srcs = ["test_set_cfg_modifiers.py"], + test_with_deployed_buck2 = True, +) + +buck2_e2e_test( + name = "test_invoke_cfg_constructors_bad_constraints.py", + srcs = ["test_invoke_cfg_constructors_bad_constraints.py"], + data_dir = "test_invoke_cfg_constructors_bad_constraints_data", +) + +buck2_e2e_test( + name = "test_cfg_modifiers.py", + srcs = ["test_cfg_modifiers.py"], + test_with_deployed_buck2 = True, +) + +buck2_e2e_test( + name = "test_cfg_constructors_inplace.py", + srcs = ["test_cfg_constructors_inplace.py"], + env = {"CLI_MODIFIER_VALIDATION_SOFT_ERROR_CATEGORY": CLI_MODIFIER_VALIDATION_SOFT_ERROR_CATEGORY}, + test_with_deployed_buck2 = True, + test_with_reverted_buck2 = True, +) + +constraint_setting_group( + name = "test_named_constraint_settings", + deps = NAMED_CONSTRAINT_SETTINGS.keys(), +) + +buck2_e2e_test( + name = "test_cfg_modifiers_attr", + srcs = ["test_cfg_modifiers_attr.py"], + data_dir = "test_cfg_modifiers_attr_data", + deps = [":modifiers_util"], +) + +python_library( + name = "modifiers_util", + srcs = ["modifiers_util.py"], + deps = ["//buck2/tests/e2e_util/api:api"], +) diff --git a/tests/e2e/configurations/cfg_constructor/bxl/BUCK b/tests/e2e/configurations/cfg_constructor/bxl/BUCK new file mode 100644 index 0000000000000..b82cdaaa96a0b --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/bxl/BUCK @@ -0,0 +1,7 @@ +load("@fbcode//buck2/tests:bxl_test.bzl", "bxl_test") + +oncall("build_infra") + +[bxl_test( + src = src, +) for src in glob(["test_*.bxl"])] diff --git a/tests/e2e/configurations/cfg_constructor/bxl/test_cfg_constructor.bxl b/tests/e2e/configurations/cfg_constructor/bxl/test_cfg_constructor.bxl new file mode 100644 index 0000000000000..ce7181c30f1c4 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/bxl/test_cfg_constructor.bxl @@ -0,0 +1,395 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode//buck2/cfg/experimental:modifiers.bzl", "modifiers") +load("@prelude//:asserts.bzl", "asserts") +load("@prelude//cfg/modifier:cfg_constructor.bzl", "cfg_constructor_post_constraint_analysis", "cfg_constructor_pre_constraint_analysis") +load("@prelude//cfg/modifier:common.bzl", "tagged_modifiers_to_json") +load( + "@prelude//cfg/modifier:types.bzl", + "Modifier", + "ModifierPackageLocation", + "TaggedModifiers", +) +load(":util.bxl", "ALIASES", "TestRefs", "get_test_refs") + +# These tests run `cfg_constructor_pre_constraint_analysis` and `cfg_constructor_post_constraint_analysis` with test arguments +# and check if they return the expected PlatformInfo. They act like unit tests except for the fact that they use certain +# constraint targets from the repo (listed below), which requires running from within fbsource. +# TODO(scottcao): Make this test runnable as isolated test. + +def _run_cfg_constructor( + legacy_platform: PlatformInfo | None, + package_modifiers: list[TaggedModifiers], + target_modifiers: list[Modifier], + cli_modifiers: list[str], + rule_name: str, + test_refs: TestRefs) -> PlatformInfo: + refs, params = cfg_constructor_pre_constraint_analysis( + package_modifiers = [tagged_modifiers_to_json(tagged_modifiers) for tagged_modifiers in package_modifiers], + legacy_platform = legacy_platform, + target_modifiers = target_modifiers, + cli_modifiers = cli_modifiers, + rule_name = rule_name, + aliases = ALIASES, + extra_data = struct(), + ) + refs = {ref: test_refs.get(ref) for ref in refs} + return cfg_constructor_post_constraint_analysis(refs = refs, params = params) + +def test_cfg_constructor_returns_legacy_platform_with_no_modifiers( + test_refs: TestRefs): + linux_cfg = test_refs.get("ovr_config//os/constraints:linux")[ConfigurationInfo] + legacy_platform = PlatformInfo(label = "platform", configuration = linux_cfg) + platform = _run_cfg_constructor( + legacy_platform = legacy_platform, + package_modifiers = [], + target_modifiers = [], + cli_modifiers = [], + test_refs = test_refs, + rule_name = "test_rule", + ) + asserts.equals(legacy_platform.label, platform.label) + asserts.equals(legacy_platform.configuration, platform.configuration) + +def test_cfg_constructor_with_cli_modifiers( + test_refs: TestRefs): + constraints = ["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"] + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [], + target_modifiers = [], + cli_modifiers = constraints, + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg(constraints) + asserts.equals("cfg:linux-x86_64", platform.label) + asserts.equals(expected_cfg, platform.configuration) + + # Test with aliases + cli_modifiers = ["linux", "x86_64"] + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [], + target_modifiers = [], + cli_modifiers = cli_modifiers, + test_refs = test_refs, + rule_name = "test_rule", + ) + asserts.equals("cfg:linux-x86_64", platform.label) + asserts.equals(expected_cfg, platform.configuration) + +def test_cfg_constructor_with_legacy_platform_and_cli_modifiers( + test_refs: TestRefs): + legacy_platform = PlatformInfo( + label = "platform", + configuration = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]), + ) + platform = _run_cfg_constructor( + legacy_platform = legacy_platform, + package_modifiers = [], + target_modifiers = [], + cli_modifiers = ["ovr_config//os/constraints:macos"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg( + [ + "ovr_config//os/constraints:macos", + "ovr_config//cpu/constraints:x86_64", + ], + ) + asserts.equals("cfg:macos-x86_64", platform.label) + asserts.equals(expected_cfg, platform.configuration) + +def test_cfg_constructor_with_package_modifiers( + test_refs: TestRefs): + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [ + TaggedModifiers( + modifiers = ["ovr_config//cpu/constraints:x86_64", "ovr_config//os/constraints:linux"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = None, + ), + ], + target_modifiers = [], + cli_modifiers = ["ovr_config//cpu/constraints:arm64"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg( + ["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:arm64"], + ) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:linux-arm64", platform.label) + +def test_cfg_constructor_with_all_modifiers( + test_refs: TestRefs): + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [ + TaggedModifiers( + modifiers = ["ovr_config//os/constraints:linux"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = None, + ), + ], + target_modifiers = ["ovr_config//cpu/constraints:x86_64", "ovr_config//os/constraints:macos"], + cli_modifiers = ["ovr_config//cpu/constraints:arm64"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg( + [ + "ovr_config//os/constraints:macos", + "ovr_config//cpu/constraints:arm64", + ], + ) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:macos-arm64", platform.label) + +def test_cfg_constructor_with_modifiers_match( + test_refs: TestRefs): + package_modifiers = [ + TaggedModifiers( + modifiers = [modifiers.match({ + "ovr_config//os/constraints:macos": "ovr_config//cpu/constraints:arm64", + }), "ovr_config//os/constraints:linux"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = None, + ), + ] + target_modifiers = [ + modifiers.match({ + "ovr_config//os/constraints:linux": "ovr_config//cpu/constraints:x86_64", + }), + ] + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = package_modifiers, + target_modifiers = target_modifiers, + cli_modifiers = ["ovr_config//os/constraints:macos"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg(["ovr_config//os/constraints:macos", "ovr_config//cpu/constraints:arm64"]) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:macos-arm64", platform.label) + + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = package_modifiers, + target_modifiers = target_modifiers, + cli_modifiers = ["ovr_config//os/constraints:linux"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:linux-x86_64", platform.label) + +def test_cfg_constructor_with_modifiers_match_on_legacy_platform( + test_refs: TestRefs): + legacy_platform = PlatformInfo( + label = "platform", + configuration = test_refs.make_cfg(["ovr_config//os/constraints:linux"]), + ) + target_modifiers = [ + modifiers.match({ + "ovr_config//os/constraints:linux": "ovr_config//cpu/constraints:x86_64", + }), + ] + + platform = _run_cfg_constructor( + legacy_platform = legacy_platform, + package_modifiers = [], + target_modifiers = target_modifiers, + cli_modifiers = [], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:linux-x86_64", platform.label) + + platform = _run_cfg_constructor( + legacy_platform = legacy_platform, + package_modifiers = [], + target_modifiers = target_modifiers, + cli_modifiers = ["ovr_config//os/constraints:macos"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg(["ovr_config//os/constraints:macos"]) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:macos", platform.label) + +def test_cfg_constructor_with_modifiers_match_ordering( + test_refs: TestRefs): + target_modifiers = [ + modifiers.match({ + "ovr_config//cpu/constraints:arm64": "ovr_config//build_mode/constraints:no-san", + "ovr_config//cpu/constraints:x86_64": "ovr_config//build_mode/constraints:asan", + }), + modifiers.match({ + "ovr_config//os/constraints:linux": "ovr_config//cpu/constraints:x86_64", + "ovr_config//os/constraints:macos": "ovr_config//cpu/constraints:arm64", + }), + modifiers.match({ + "DEFAULT": None, + # Match on a constraint that does not appear in the platform + "ovr_config//toolchain/clang/constraints:15": "ovr_config//cpu/constraints:x86_64", + }), + ] + + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [], + target_modifiers = target_modifiers, + cli_modifiers = ["ovr_config//os/constraints:linux"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg([ + "ovr_config//os/constraints:linux", + "ovr_config//cpu/constraints:x86_64", + "ovr_config//build_mode/constraints:asan", + ]) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:linux-x86_64-asan", platform.label) + + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [], + target_modifiers = target_modifiers, + cli_modifiers = ["ovr_config//os/constraints:macos"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg([ + "ovr_config//os/constraints:macos", + "ovr_config//cpu/constraints:arm64", + "ovr_config//build_mode/constraints:no-san", + ]) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:macos-arm64-no-san", platform.label) + +def test_cfg_constructor_with_single_constraint_config_setting_modifier( + test_refs: TestRefs): + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [ + TaggedModifiers( + modifiers = ["ovr_config//os:linux"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = None, + ), + ], + target_modifiers = [], + cli_modifiers = ["ovr_config//cpu:x86_64"], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]) + asserts.equals(expected_cfg, platform.configuration) + asserts.equals("cfg:linux-x86_64", platform.label) + +def test_cfg_constructor_with_tagged_modifiers_per_rule1( + test_refs: TestRefs): + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [ + TaggedModifiers( + modifiers = ["ovr_config//cpu/constraints:arm64"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = None, + ), + TaggedModifiers( + modifiers = ["ovr_config//cpu/constraints:x86_64"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = "python_binary", + ), + ], + target_modifiers = [], + cli_modifiers = [], + test_refs = test_refs, + rule_name = "test_rule", + ) + expected_cfg = test_refs.make_cfg( + ["ovr_config//cpu/constraints:arm64"], + ) + asserts.equals(expected_cfg, platform.configuration) + +def test_cfg_constructor_with_tagged_modifiers_per_rule2( + test_refs: TestRefs): + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [ + TaggedModifiers( + modifiers = ["ovr_config//cpu/constraints:arm64"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = None, + ), + TaggedModifiers( + modifiers = ["ovr_config//cpu/constraints:x86_64"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = "python_binary", + ), + ], + target_modifiers = [], + cli_modifiers = [], + test_refs = test_refs, + rule_name = "python_binary", + ) + expected_cfg = test_refs.make_cfg( + ["ovr_config//cpu/constraints:x86_64"], + ) + asserts.equals(expected_cfg, platform.configuration) + +def test_cfg_constructor_with_multiple_modifier_alias(test_refs: TestRefs): + platform = _run_cfg_constructor( + legacy_platform = None, + package_modifiers = [ + TaggedModifiers( + modifiers = ["ovr_config//cpu/constraints:arm64"], + location = ModifierPackageLocation(package_path = "fbcode//PACKAGE"), + rule_name = None, + ), + ], + target_modifiers = [], + cli_modifiers = ["linux_x86_64"], + test_refs = test_refs, + rule_name = "python_binary", + ) + expected_cfg = test_refs.make_cfg( + ["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"], + ) + asserts.equals(expected_cfg, platform.configuration) + +def _impl(ctx: bxl.Context): + test_refs = get_test_refs(ctx) + + test_cfg_constructor_returns_legacy_platform_with_no_modifiers( + test_refs, + ) + test_cfg_constructor_with_cli_modifiers(test_refs) + test_cfg_constructor_with_package_modifiers(test_refs) + test_cfg_constructor_with_all_modifiers(test_refs) + test_cfg_constructor_with_modifiers_match(test_refs) + test_cfg_constructor_with_modifiers_match_on_legacy_platform(test_refs) + test_cfg_constructor_with_modifiers_match_ordering(test_refs) + test_cfg_constructor_with_single_constraint_config_setting_modifier(test_refs) + test_cfg_constructor_with_tagged_modifiers_per_rule1(test_refs) + test_cfg_constructor_with_tagged_modifiers_per_rule2(test_refs) + test_cfg_constructor_with_multiple_modifier_alias(test_refs) + +test = bxl_main( + cli_args = {}, + impl = _impl, +) diff --git a/tests/e2e/configurations/cfg_constructor/bxl/test_cfg_name.bxl b/tests/e2e/configurations/cfg_constructor/bxl/test_cfg_name.bxl new file mode 100644 index 0000000000000..b83382351cbb0 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/bxl/test_cfg_name.bxl @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:asserts.bzl", "asserts") +load("@prelude//cfg/modifier:name.bzl", "cfg_name") +load(":util.bxl", "TestRefs", "get_test_refs") + +def _test_cfg_name_with_clang(test_refs: TestRefs): + cfg = test_refs.make_cfg([ + "ovr_config//os/constraints:linux", + "ovr_config//cpu/constraints:x86_64", + "ovr_config//toolchain/clang/constraints:15", + "ovr_config//build_mode/constraints:no-san", + ]) + name = cfg_name(cfg) + asserts.equals("cfg:linux-x86_64-clang15-no-san", name) + +def _impl(ctx: bxl.Context): + test_refs = get_test_refs(ctx) + _test_cfg_name_with_clang(test_refs) + +test = bxl_main( + cli_args = {}, + impl = _impl, +) diff --git a/tests/e2e/configurations/cfg_constructor/bxl/test_modifier_to_json.bxl b/tests/e2e/configurations/cfg_constructor/bxl/test_modifier_to_json.bxl new file mode 100644 index 0000000000000..b3293d90a9bb2 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/bxl/test_modifier_to_json.bxl @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:asserts.bzl", "asserts") +load("@prelude//cfg/modifier:common.bzl", "json_to_tagged_modifiers", "tagged_modifiers_to_json") +load("@prelude//cfg/modifier:types.bzl", "ModifierPackageLocation", "ModifierTargetLocation", "TaggedModifiers") +load("@fbcode//buck2/cfg/experimental/modifiers.bzl", "modifiers") + +def _impl(_ctx: bxl.Context): + for location in ( + ModifierPackageLocation(package_path = "fbcode//buck2/PACKAGE"), + ModifierTargetLocation(), + ): + tagged_modifiers = TaggedModifiers( + modifiers = [modifiers.match({ + "ovr_config//os/constraints:linux": modifiers.match({ + "DEFAULT": "ovr_config//build_mode/constraints:no-san", + "ovr_config//cpu/constraints:x86_64": "ovr_config//build_mode/constraints:asan", + }), + "ovr_config//os/constraints:windows": modifiers.match({ + "DEFAULT": modifiers.match({ + "ovr_config//cpu/constraints:x86_64": "ovr_config//build_mode/constraints:tsan", + }), + }), + })], + location = location, + rule_name = None, + ) + asserts.equals(tagged_modifiers, json_to_tagged_modifiers(tagged_modifiers_to_json(tagged_modifiers))) + +test = bxl_main( + cli_args = {}, + impl = _impl, +) diff --git a/tests/e2e/configurations/cfg_constructor/bxl/test_modifiers_match.bxl b/tests/e2e/configurations/cfg_constructor/bxl/test_modifiers_match.bxl new file mode 100644 index 0000000000000..3e2b1d678e2d9 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/bxl/test_modifiers_match.bxl @@ -0,0 +1,121 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode//buck2/cfg/experimental:modifiers.bzl", "modifiers") +load("@prelude//:asserts.bzl", "asserts") +load("@prelude//cfg/modifier:common.bzl", "resolve_modifier") +load("@prelude//cfg/modifier:types.bzl", "ModifierTargetLocation", "ModifiersMatchInfo") +load(":util.bxl", "TestRefs", "get_test_refs") + +# These tests run `cfg_constructor_pre_constraint_analysis` and `cfg_constructor_post_constraint_analysis` with test arguments +# and check if they return the expected PlatformInfo. They act like unit tests except for the fact that they use certain +# constraint targets from the repo (listed below), which requires running from within fbsource. +# TODO(scottcao): Make this test runnable as isolated test. + +def test_resolve_constraint_value_modifier( + test_refs: TestRefs): + cfg = test_refs.make_cfg(["ovr_config//os/constraints:macos", "ovr_config//cpu/constraints:arm64"]) + asan_constraint = test_refs.get("ovr_config//build_mode/constraints:asan")[ConstraintValueInfo] + resolved_modifier = resolve_modifier( + cfg = cfg, + modifier = asan_constraint, + ) + asserts.equals(asan_constraint, resolved_modifier) + +def test_resolve_modifiers_match_with_constraint_value_keys( + test_refs: TestRefs): + modifier = modifiers.match({ + "ovr_config//os/constraints:linux": modifiers.match({ + "DEFAULT": "ovr_config//build_mode/constraints:no-san", + "ovr_config//cpu/constraints:x86_64": "ovr_config//build_mode/constraints:asan", + }), + "ovr_config//os/constraints:windows": modifiers.match({ + # These are fairly contrived match since it only contains DEFAULT + "DEFAULT": modifiers.match({ + "DEFAULT": modifiers.match({ + "ovr_config//cpu/constraints:x86_64": "ovr_config//build_mode/constraints:tsan", + }), + }), + }), + }) + modifier_info = test_refs.get_modifier_info(modifier, ModifierTargetLocation()) + linux_x86_64 = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]) + linux_arm64 = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:arm64"]) + macos_x86_64 = test_refs.make_cfg(["ovr_config//os/constraints:macos", "ovr_config//cpu/constraints:x86_64"]) + macos_arm64 = test_refs.make_cfg(["ovr_config//os/constraints:macos", "ovr_config//cpu/constraints:arm64"]) + windows_x86_64 = test_refs.make_cfg(["ovr_config//os/constraints:windows", "ovr_config//cpu/constraints:x86_64"]) + windows_arm64 = test_refs.make_cfg(["ovr_config//os/constraints:windows", "ovr_config//cpu/constraints:arm64"]) + asserts.equals(test_refs.get("ovr_config//build_mode/constraints:asan")[ConstraintValueInfo], resolve_modifier(linux_x86_64, modifier_info)) + asserts.equals(test_refs.get("ovr_config//build_mode/constraints:no-san")[ConstraintValueInfo], resolve_modifier(linux_arm64, modifier_info)) + asserts.equals(None, resolve_modifier(macos_x86_64, modifier_info)) + asserts.equals(None, resolve_modifier(macos_arm64, modifier_info)) + asserts.equals(test_refs.get("ovr_config//build_mode/constraints:tsan")[ConstraintValueInfo], resolve_modifier(windows_x86_64, modifier_info)) + asserts.equals(None, resolve_modifier(windows_arm64, modifier_info)) + +def test_resolve_modifiers_match_with_config_setting_keys( + test_refs: TestRefs): + modifier_info = ModifiersMatchInfo( + selector = [ + (test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]), test_refs.get("ovr_config//build_mode/constraints:asan")[ConstraintValueInfo]), + (test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:arm64"]), test_refs.get("ovr_config//build_mode/constraints:tsan")[ConstraintValueInfo]), + ], + default = test_refs.get("ovr_config//build_mode/constraints:no-san")[ConstraintValueInfo], + ) + linux_x86_64 = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]) + linux_arm64 = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:arm64"]) + macos_x86_64 = test_refs.make_cfg(["ovr_config//os/constraints:macos", "ovr_config//cpu/constraints:x86_64"]) + macos_arm64 = test_refs.make_cfg(["ovr_config//os/constraints:macos", "ovr_config//cpu/constraints:arm64"]) + windows_x86_64 = test_refs.make_cfg(["ovr_config//os/constraints:windows", "ovr_config//cpu/constraints:x86_64"]) + windows_arm64 = test_refs.make_cfg(["ovr_config//os/constraints:windows", "ovr_config//cpu/constraints:arm64"]) + asserts.equals(test_refs.get("ovr_config//build_mode/constraints:asan")[ConstraintValueInfo], resolve_modifier(linux_x86_64, modifier_info)) + asserts.equals(test_refs.get("ovr_config//build_mode/constraints:tsan")[ConstraintValueInfo], resolve_modifier(linux_arm64, modifier_info)) + nosan_constraint = test_refs.get("ovr_config//build_mode/constraints:no-san")[ConstraintValueInfo] + asserts.equals(nosan_constraint, resolve_modifier(macos_x86_64, modifier_info), resolve_modifier(macos_x86_64, modifier_info)) + asserts.equals(nosan_constraint, resolve_modifier(macos_arm64, modifier_info)) + asserts.equals(nosan_constraint, resolve_modifier(windows_x86_64, modifier_info)) + asserts.equals(nosan_constraint, resolve_modifier(windows_arm64, modifier_info)) + +def test_modifiers_match_resolve_first_matching_key( + test_refs: TestRefs): + cfg = test_refs.make_cfg(["ovr_config//os/constraints:linux", "ovr_config//cpu/constraints:x86_64"]) + linux_x86_64_match = test_refs.get_modifier_info(modifiers.match({ + "ovr_config//cpu/constraints:x86_64": "ovr_config//build_mode/constraints:tsan", + "ovr_config//os/constraints:linux": "ovr_config//build_mode/constraints:asan", + }), ModifierTargetLocation()) + + # buildifier: disable=unsorted-dict-items + x86_64_linux_match = test_refs.get_modifier_info(modifiers.match({ + "ovr_config//os/constraints:linux": "ovr_config//build_mode/constraints:asan", + "ovr_config//cpu/constraints:x86_64": "ovr_config//build_mode/constraints:tsan", + }), ModifierTargetLocation()) + asan_constraint = test_refs.get("ovr_config//build_mode/constraints:asan")[ConstraintValueInfo] + tsan_constraint = test_refs.get("ovr_config//build_mode/constraints:tsan")[ConstraintValueInfo] + asserts.equals(tsan_constraint, resolve_modifier(cfg, linux_x86_64_match)) + asserts.equals(asan_constraint, resolve_modifier(cfg, x86_64_linux_match)) + +def test_modifiers_match_with_none(test_refs: TestRefs): + match = test_refs.get_modifier_info(modifiers.match({ + "DEFAULT": "ovr_config//build_mode/constraints:tsan", + "ovr_config//os/constraints:linux": None, + }), ModifierTargetLocation()) + cfg = test_refs.make_cfg(["ovr_config//os/constraints:linux"]) + asserts.equals(None, resolve_modifier(cfg, match)) + cfg = test_refs.make_cfg(["ovr_config//os/constraints:windows"]) + asserts.equals(test_refs.get("ovr_config//build_mode/constraints:tsan")[ConstraintValueInfo], resolve_modifier(cfg, match)) + +def _impl(ctx: bxl.Context): + test_refs = get_test_refs(ctx) + + test_resolve_modifiers_match_with_constraint_value_keys(test_refs) + test_resolve_modifiers_match_with_config_setting_keys(test_refs) + test_modifiers_match_resolve_first_matching_key(test_refs) + test_modifiers_match_with_none(test_refs) + +test = bxl_main( + cli_args = {}, + impl = _impl, +) diff --git a/tests/e2e/configurations/cfg_constructor/bxl/util.bxl b/tests/e2e/configurations/cfg_constructor/bxl/util.bxl new file mode 100644 index 0000000000000..162fbd339cc11 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/bxl/util.bxl @@ -0,0 +1,81 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//cfg/modifier:common.bzl", _get_modifier_info = "get_modifier_info") +load("@prelude//cfg/modifier:types.bzl", "Modifier", "ModifierInfo", "ModifierLocation") +load("@prelude//configurations:util.bzl", "util") + +_TARGETS = [ + "ovr_config//cpu/constraints:cpu", + "ovr_config//cpu:x86_64", + "ovr_config//cpu/constraints:x86_64", + "ovr_config//cpu/constraints:arm64", + "ovr_config//os/constraints:os", + "ovr_config//os:linux", + "ovr_config//os/constraints:linux", + "ovr_config//os/constraints:macos", + "ovr_config//os/constraints:windows", + "ovr_config//build_mode/constraints:san", + "ovr_config//build_mode/constraints:no-san", + "ovr_config//build_mode/constraints:asan", + "ovr_config//build_mode/constraints:tsan", + # TODO: This will probably get broken by updates to clang constraints. + # Figure out how to handle this. + "ovr_config//toolchain/clang/constraints:15", + "ovr_config//toolchain/clang/constraints:17", +] + +ALIASES = struct( + # OS + linux = "ovr_config//os:linux", + macos = "ovr_config//os:macos", + windows = "ovr_config//os:windows", + # CPU + x86_64 = "ovr_config//cpu:x86_64", + arm64 = "ovr_config//cpu:arm64", + # Multiple modifiers + linux_x86_64 = [ + "ovr_config//os:linux", + "ovr_config//cpu:x86_64", + ], +) + +TestRefs = record( + _refs = dict[str, ProviderCollection], + get = field(typing.Callable), + make_cfg = field(typing.Callable), + get_modifier_info = field(typing.Callable), +) + +def _get_providers(ctx: bxl.Context, targets: list[str]) -> dict[str, ProviderCollection]: + analysis_result = ctx.analysis(targets) + return {str(target.raw_target()): v.providers() for target, v in analysis_result.items()} + +def get_test_refs(ctx: bxl.Context) -> TestRefs: + def get(target: str) -> ProviderCollection: + return self._refs[target] + + def make_cfg(targets: list[str]) -> ConfigurationInfo: + return util.configuration_info_union([self._refs[target][ConfigurationInfo] for target in targets]) + + def get_modifier_info( + modifier: Modifier, + location: ModifierLocation) -> ModifierInfo: + _constraint_setting, modifier_info = _get_modifier_info( + self._refs, + modifier, + location, + ) + return modifier_info + + self = TestRefs( + _refs = _get_providers(ctx, _TARGETS), + get = get, + make_cfg = make_cfg, + get_modifier_info = get_modifier_info, + ) + return self diff --git a/tests/e2e/configurations/cfg_constructor/cfg_modifiers/TARGETS.test b/tests/e2e/configurations/cfg_constructor/cfg_modifiers/TARGETS.test new file mode 100644 index 0000000000000..08dbea966f03a --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/cfg_modifiers/TARGETS.test @@ -0,0 +1,9 @@ +load(":rules.bzl", "test_rule") + +test_rule( + name = "test", + metadata = {"buck.cfg_modifiers": [ + "ovr_config//os/constraints:linux", + "ovr_config//cpu/constraints:arm64", + ]}, +) diff --git a/tests/e2e/configurations/cfg_constructor/cfg_modifiers/rules.bzl b/tests/e2e/configurations/cfg_constructor/cfg_modifiers/rules.bzl new file mode 100644 index 0000000000000..23c1921857439 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/cfg_modifiers/rules.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_rule(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/constraint_setting_group.bzl b/tests/e2e/configurations/cfg_constructor/constraint_setting_group.bzl new file mode 100644 index 0000000000000..c503880cebb23 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/constraint_setting_group.bzl @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +constraint_setting_group = rule( + impl = lambda _ctx: [DefaultInfo()], + attrs = { + "deps": attrs.list(attrs.dep(providers = [ConstraintSettingInfo])), + }, + is_configuration_rule = True, +) diff --git a/tests/e2e/configurations/cfg_constructor/modifiers_util.py b/tests/e2e/configurations/cfg_constructor/modifiers_util.py new file mode 100644 index 0000000000000..3b8de9afb451e --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/modifiers_util.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +from buck2.tests.e2e_util.api.buck import Buck + + +async def get_cfg(buck: Buck, *args: str) -> str: + result = await buck.ctargets(*args) + + # Assuming ctargets output is `target (cfg)` + cfg = result.stdout.split()[1].strip("()") + + result = await buck.audit_configurations(cfg) + return result.stdout diff --git a/tests/e2e/configurations/cfg_constructor/test_cfg_constructors_inplace.py b/tests/e2e/configurations/cfg_constructor/test_cfg_constructors_inplace.py new file mode 100644 index 0000000000000..3f0c8b8c8e9ca --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cfg_constructors_inplace.py @@ -0,0 +1,75 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +# Test `cfg_constructors` end to end. This is useful for testing the core + starlark +# implementation so that we know we don't break anything in the repo. For testing +# specific cfg constructor logic, use bxl_test to unit test the cfg constructor instead + + +@buck_test(inplace=True) +async def test_cfg_constructor_without_modifiers_returns_same_configuration( + buck: Buck, +) -> None: + result = await buck.cquery( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data:no_modifiers", + "-A", + ) + result = json.loads(result.stdout) + assert len(result) == 1 + _test_target, test_target_attrs = list(result.items())[0] + assert test_target_attrs["buck.target_configuration"].startswith( + "ovr_config//platform:base" + ) + + +@buck_test(inplace=True) +async def test_cfg_constructor_with_target_modifiers(buck: Buck) -> None: + result = await buck.cquery( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data:has_target_modifier", + "-A", + ) + result = json.loads(result.stdout) + assert len(result) == 1 + _test_target, test_target_attrs = list(result.items())[0] + assert test_target_attrs["buck.target_configuration"].startswith("cfg:linux") + + +@buck_test( + inplace=True, + extra_buck_config={ + # CLI modifier validation is disabled for users and enabled for CI. To make sure this test case always has CLI modifier validation enabled, + # explicitly enable it here. + "buck2": {"skip_cli_modifier_validation_DO_NOT_SET_TO_TRUE_ON_CI": ""} + }, +) +async def test_invoke_cfg_constructors_with_cli_modifier_validation(buck: Buck) -> None: + env = { + "BUCK2_HARD_ERROR": f"only={os.environ['CLI_MODIFIER_VALIDATION_SOFT_ERROR_CATEGORY']}", + } + await buck.cquery( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data:has_target_modifier", + "--modifier=ovr_config//os:linux", + env=env, + ) + await expect_failure( + buck.cquery( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data:has_target_modifier", + "--modifier=fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data:some_constraint_value", + env=env, + ), + stderr_regex="Only a select number of modifiers are allowed to be set from CLI on CI", + ) diff --git a/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers.py b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers.py new file mode 100644 index 0000000000000..56dedc1153648 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers.py @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_cfg_modifiers(buck: Buck) -> None: + result = await buck.targets( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/cfg_modifiers:test", + "--output-attribute=metadata", + ) + targets = json.loads(result.stdout) + assert len(targets) == 1 + target = targets[0] + cfg_modifiers = target["metadata"]["buck.cfg_modifiers"] + assert cfg_modifiers == [ + "ovr_config//os/constraints:linux", + "ovr_config//cpu/constraints:arm64", + ] diff --git a/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr.py b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr.py new file mode 100644 index 0000000000000..9c8f74fb5d001 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr.py @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +import json + +from buck2.tests.e2e.configurations.cfg_constructor.modifiers_util import get_cfg + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure + +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_cfg_modifiers_attr(buck: Buck) -> None: + result = await buck.targets( + "root//:test", + "--output-attribute=modifiers", + ) + + targets = json.loads(result.stdout) + assert len(targets) == 1 + target = targets[0] + target_modifiers = target["modifiers"] + assert target_modifiers == ["root//:A_1"] + + +@buck_test(inplace=False) +async def test_cfg_modifiers_attr_ctargets(buck: Buck) -> None: + result = await get_cfg( + buck, + "root//:test2", + ) + assert ":A_1" in result + + +@buck_test(inplace=False) +async def test_cfg_modifiers_attr_and_metadata_together_fails(buck: Buck) -> None: + await expect_failure( + buck.ctargets( + "root//:test3", + ), + stderr_regex="Usage of both `modifiers` attribute and modifiers in metadata is not allowed for target `root//:test3`", + ) diff --git a/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/.buckconfig b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/.buckconfig new file mode 100644 index 0000000000000..b9d1f50b498f4 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/.buckconfig @@ -0,0 +1,17 @@ +[buildfile] +name=TARGETS.test + +[repositories] +root = . +prelude = prelude + +[cell_aliases] +fbsource = root +fbcode = root +buck = root +config = prelude +ovr_config = prelude +toolchains = prelude + +[external_cells] +prelude = bundled diff --git a/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/.buckroot b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/.buckroot new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/TARGETS.test b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/TARGETS.test new file mode 100644 index 0000000000000..0b61baf01fee1 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/TARGETS.test @@ -0,0 +1,37 @@ +load(":rules.bzl", "test_rule") + +constraint_setting( + name = "constraint_A", +) + +constraint_value( + name = "A_1", + constraint_setting = ":constraint_A", +) + +platform(name = "my_platform") + +test_rule( + name = "test", + modifiers = [ + "root//:A_1" + ], +) + +test_rule( + name = "test2", + modifiers = [ + "root//:A_1" + ], + default_target_platform = ":my_platform", +) + +test_rule( + name = "test3", + modifiers = [ + "root//:A_1" + ], + metadata = { + "buck.cfg_modifiers": ["root//:A_1"] + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/rules.bzl b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/rules.bzl new file mode 100644 index 0000000000000..933a4d46df430 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cfg_modifiers_attr_data/rules.bzl @@ -0,0 +1,17 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_rule(_ctx): + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/TARGETS.test b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/TARGETS.test new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir/TARGETS.test b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir/TARGETS.test new file mode 100644 index 0000000000000..e9daf0e74e1ff --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir/TARGETS.test @@ -0,0 +1,3 @@ +load(":rules.bzl", "test_rule") + +test_rule(name = "test") diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir/rules.bzl b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir/rules.bzl new file mode 100644 index 0000000000000..23c1921857439 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir/rules.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_rule(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/TARGETS.test b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/TARGETS.test new file mode 100644 index 0000000000000..e9daf0e74e1ff --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/TARGETS.test @@ -0,0 +1,3 @@ +load(":rules.bzl", "test_rule") + +test_rule(name = "test") diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/fail.bzl b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/fail.bzl new file mode 100644 index 0000000000000..5f5bdf46c87ab --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/fail.bzl @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@fbcode//buck2/cfg/experimental:set_cfg_modifiers.bzl", "set_cfg_modifiers") + +def set_cfg_modifiers_not_from_package_file(): + if native.read_config("buck_e2e", "testing_failure", False): + set_cfg_modifiers(cfg_modifiers = []) diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/rules.bzl b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/rules.bzl new file mode 100644 index 0000000000000..23c1921857439 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check/rules.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_rule(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data/TARGETS.test b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data/TARGETS.test new file mode 100644 index 0000000000000..081587c00a5e6 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data/TARGETS.test @@ -0,0 +1,28 @@ +load(":rules.bzl", "test_rule") + +test_rule( + name = "no_modifiers", + default_target_platform = "ovr_config//platform:base", +) + +test_rule( + name = "has_target_modifier", + default_target_platform = "ovr_config//platform:base", + metadata = { + "buck.cfg_modifiers": [ + "ovr_config//os:linux", + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data:some_constraint_value", + ], + } +) + +native.constraint_setting( + name = "some_constraint_setting", + visibility = ["PUBLIC"], +) + +native.constraint_value( + name = "some_constraint_value", + constraint_setting = ":some_constraint_setting", + visibility = ["PUBLIC"], +) diff --git a/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data/rules.bzl b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data/rules.bzl new file mode 100644 index 0000000000000..23c1921857439 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/test_cfg_constructor_data/rules.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_rule(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_cli_modifiers.py b/tests/e2e/configurations/cfg_constructor/test_cli_modifiers.py new file mode 100644 index 0000000000000..d54624960bab2 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cli_modifiers.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e.configurations.cfg_constructor.modifiers_util import get_cfg +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +DATA_DIR = ( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_cli_modifiers_data" +) +TARGET = f"{DATA_DIR}:test_target" +CONSTRAINT_A = f"{DATA_DIR}:A_1" +CONSTRAINT_B = f"{DATA_DIR}:B_1" + + +@buck_test(inplace=True) +async def test_one_cli_modifier(buck: Buck) -> None: + # -m A + assert CONSTRAINT_A in await get_cfg(buck, TARGET, "--modifier", CONSTRAINT_A) + + +@buck_test(inplace=True) +async def test_two_cli_modifier(buck: Buck) -> None: + # -m A,B + result = await get_cfg( + buck, TARGET, "--modifier", CONSTRAINT_A, "--modifier", CONSTRAINT_B + ) + assert CONSTRAINT_A in result + assert CONSTRAINT_B in result + + +@buck_test(inplace=True) +async def test_cli_modifiers_bad_input(buck: Buck) -> None: + # -m A B (error) + await expect_failure( + buck.cquery(f"deps({TARGET})", "--modifier", CONSTRAINT_A, CONSTRAINT_B), + stderr_regex=f"got args `{CONSTRAINT_B}`", + ) + + +@buck_test(inplace=True) +async def test_cli_modifier_alias(buck: Buck) -> None: + assert "ovr_config//os/constraints:linux" in await get_cfg( + buck, TARGET, "--modifier", "linux" + ) diff --git a/tests/e2e/configurations/cfg_constructor/test_cli_modifiers_data/TARGETS.test b/tests/e2e/configurations/cfg_constructor/test_cli_modifiers_data/TARGETS.test new file mode 100644 index 0000000000000..029b475f18fb6 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cli_modifiers_data/TARGETS.test @@ -0,0 +1,32 @@ +load(":rules.bzl", "test_rule") + +test_rule(name = "test_target") + +prelude = native + +prelude.constraint_setting( + name = "constraint_A", +) + +prelude.constraint_setting( + name = "constraint_B", +) + +prelude.constraint_setting( + name = "constraint_C", +) + +prelude.constraint_value( + name = "A_1", + constraint_setting = ":constraint_A", +) + +prelude.constraint_value( + name = "B_1", + constraint_setting = ":constraint_B", +) + +prelude.constraint_value( + name = "C_1", + constraint_setting = ":constraint_C", +) diff --git a/tests/e2e/configurations/cfg_constructor/test_cli_modifiers_data/rules.bzl b/tests/e2e/configurations/cfg_constructor/test_cli_modifiers_data/rules.bzl new file mode 100644 index 0000000000000..23c1921857439 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_cli_modifiers_data/rules.bzl @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_rule(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors.py b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors.py new file mode 100644 index 0000000000000..9b7ff16af1c12 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_invoke_cfg_constructors(buck: Buck) -> None: + result = await buck.cquery("root//:test") + assert "root//:test (post_constraint_analysis_test_label" in result.stdout + + +@buck_test(inplace=False) +async def test_invoke_cfg_constructors_without_aliases(buck: Buck) -> None: + # This test ensures that for backwards compatibility, we can call + # `set_cfg_constructor` without explicitly passing in aliases parameter. + result = await buck.cquery("root//:test", "-c", "testing.no_aliases=true") + assert "root//:test (post_constraint_analysis_test_label" in result.stdout + + +@buck_test(inplace=False) +async def test_invoke_cfg_constructors_unbound_platform(buck: Buck) -> None: + result = await buck.cquery("root//:test_unbound") + assert "root//:test_unbound (post_constraint_analysis_test_label" in result.stdout diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints.py b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints.py new file mode 100644 index 0000000000000..8dc2531bee727 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=False) +async def test_invoke_cfg_constructors_bad_constraints(buck: Buck) -> None: + result = await expect_failure(buck.cquery("root//:test")) + assert "root//:not_a_constraint is not a configuration rule." in result.stderr diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/.buckconfig b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/.buckconfig new file mode 100644 index 0000000000000..c04f0f2853757 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/TARGETS.fixture b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/TARGETS.fixture new file mode 100644 index 0000000000000..dd10b507e2661 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/TARGETS.fixture @@ -0,0 +1,15 @@ +load(":rules.bzl", "test_platform", "test_rule") + +test_platform( + name = "platform", +) + +test_rule( + name = "test", + default_target_platform = ":platform", + metadata = {"buck.cfg_modifiers": {}}, +) + +test_rule( + name = "not_a_constraint", +) diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/defs.bzl b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/defs.bzl new file mode 100644 index 0000000000000..371fec2a0fb05 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/defs.bzl @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# Returns refs that aren't configuration rules +def _bad_pre_constraint_analysis( + legacy_platform, + package_modifiers: dict[str, typing.Any] | None, + target_modifiers: dict[str, typing.Any] | None, + cli_modifiers: list[str], + **_kwargs): + _unused = package_modifiers # buildifier: disable=unused-variable + _unused = target_modifiers # buildifier: disable=unused-variable + _unused = cli_modifiers # buildifier: disable=unused-variable + platform = legacy_platform or PlatformInfo(label = "post_constraint_analysis_test_label_unbound", configuration = ConfigurationInfo( + constraints = {}, + values = {}, + )) + return (["root//:not_a_constraint"], platform) + +def _cfg_constructor_post_constraint_analysis(refs: dict[str, ProviderCollection], params): + _unused = refs # buildifier: disable=unused-variable + _unused = params # buildifier: disable=unused-variable + return PlatformInfo(label = "post_constraint_analysis_test_label", configuration = ConfigurationInfo( + constraints = params.configuration.constraints, + values = {}, + )) + +_ALIASES = struct() + +def init_cfg_constructor(): + set_cfg_constructor( + stage0 = _bad_pre_constraint_analysis, + stage1 = _cfg_constructor_post_constraint_analysis, + key = "buck.cfg_modifiers", + aliases = _ALIASES, + ) diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/prelude/prelude.bzl b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/rules.bzl b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/rules.bzl new file mode 100644 index 0000000000000..e80e0d1ed42f9 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_bad_constraints_data/rules.bzl @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _test_platform_impl(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ), + ] + +test_platform = rule( + impl = _test_platform_impl, + attrs = {}, +) + +def _test_rule(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/.buckconfig b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/.buckconfig new file mode 100644 index 0000000000000..c04f0f2853757 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/.buckconfig @@ -0,0 +1,6 @@ +[buildfile] +name = TARGETS.fixture + +[repositories] +root = . +prelude = prelude diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/TARGETS.fixture b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/TARGETS.fixture new file mode 100644 index 0000000000000..c0759c2d7bec3 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/TARGETS.fixture @@ -0,0 +1,33 @@ +load(":rules.bzl", "constraint_setting", "constraint_value", "test_platform", "test_rule") + +constraint_setting( + name = "test_constraint_setting", +) + +constraint_value( + name = "test_constraint_value", + setting = ":test_constraint_setting", +) + +constraint_value( + name = "other_constraint_value", + setting = ":test_constraint_setting", +) + +test_platform( + name = "platform", +) + +test_rule( + name = "test", + default_target_platform = ":platform", +) + +test_rule( + name = "test_unbound", + metadata = { + "cfg_modifiers.target": { + "root//:test_constraint_setting": "root//:other_constraint_value", + }, + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/defs.bzl b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/defs.bzl new file mode 100644 index 0000000000000..fcdaf970ed517 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/defs.bzl @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +KEY = "buck.cfg_modifiers" + +def set_cfg_modifiers(): + write_package_value( + KEY, + {"root//:test_constraint_setting": "root//:test_constraint_value"}, + ) + +def _cfg_constructor_pre_constraint_analysis( + legacy_platform, + package_modifiers: dict[str, typing.Any] | None, + target_modifiers: dict[str, typing.Any] | None, + cli_modifiers: list[str], + rule_name: str, + aliases: struct | None, + extra_data: dict[str, typing.Any] | None): + _unused = target_modifiers # buildifier: disable=unused-variable + _unused = cli_modifiers # buildifier: disable=unused-variable + _unused = extra_data # buildifier: disable=unused-variable + + # Include valid constraints from PACKAGE modifiers + refs = list(package_modifiers.keys()) + list(package_modifiers.values()) if package_modifiers else [] + refs += list(target_modifiers.keys()) + list(target_modifiers.values()) if target_modifiers else [] + platform = legacy_platform or PlatformInfo(label = "post_constraint_analysis_test_label_unbound", configuration = ConfigurationInfo( + constraints = {}, + values = {}, + )) + + if aliases: + getattr(aliases, "test") # If `aliases` is not None, we should always be able to get `test` attr from it. + + if rule_name != "test_rule": + fail("Expected rule name to be `test_rule`. Found `{}` instead.".format(rule_name)) + + return (refs, platform) + +def _cfg_constructor_post_constraint_analysis(refs: dict[str, ProviderCollection], params): + _unused = refs # buildifier: disable=unused-variable + _unused = params # buildifier: disable=unused-variable + return PlatformInfo(label = "post_constraint_analysis_test_label", configuration = ConfigurationInfo( + constraints = params.configuration.constraints, + values = {}, + )) + +_ALIASES = struct( + test = "root//:test_constraint_value", +) + +def init_cfg_constructor(): + kwargs = { + "key": KEY, + "stage0": _cfg_constructor_pre_constraint_analysis, + "stage1": _cfg_constructor_post_constraint_analysis, + } + if not read_root_config("testing", "no_aliases", None): + kwargs["aliases"] = _ALIASES + set_cfg_constructor( + **kwargs + ) diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/prelude/prelude.bzl b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/prelude/prelude.bzl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/rules.bzl b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/rules.bzl new file mode 100644 index 0000000000000..bdde4061c4baf --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_invoke_cfg_constructors_data/rules.bzl @@ -0,0 +1,65 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _constraint_setting(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +constraint_setting = rule( + impl = _constraint_setting, + is_configuration_rule = True, + attrs = {}, +) + +def _constraint_value(ctx): + constraint_value = ConstraintValueInfo( + setting = ctx.attrs.setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + return [ + DefaultInfo(), + constraint_value, + # Provide `ConfigurationInfo` from `constraint_value` so it could be used as select key. + ConfigurationInfo(constraints = { + constraint_value.setting.label: constraint_value, + }, values = {}), + ] + +constraint_value = rule( + impl = _constraint_value, + is_configuration_rule = True, + attrs = {"setting": attrs.dep(providers = [ConstraintSettingInfo])}, +) + +def _test_platform_impl(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = ConfigurationInfo( + constraints = {}, + values = {}, + ), + ), + ] + +test_platform = rule( + impl = _test_platform_impl, + attrs = {}, +) + +def _test_rule(ctx): + _unused = ctx # buildifier: disable=unused-variable + return [ + DefaultInfo(), + ] + +test_rule = rule( + impl = _test_rule, + attrs = { + }, +) diff --git a/tests/e2e/configurations/cfg_constructor/test_set_cfg_modifiers.py b/tests/e2e/configurations/cfg_constructor/test_set_cfg_modifiers.py new file mode 100644 index 0000000000000..1edf8fc71f5e1 --- /dev/null +++ b/tests/e2e/configurations/cfg_constructor/test_set_cfg_modifiers.py @@ -0,0 +1,88 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_set_cfg_modifiers(buck: Buck) -> None: + result = await buck.targets( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir:test", + "--package-values", + ) + targets = json.loads(result.stdout) + assert len(targets) == 1 + target = targets[0] + cfg_modifiers = target["buck.package_values"]["buck.cfg_modifiers"] + assert cfg_modifiers == [ + { + "_type": "TaggedModifiers", + "location": { + "_type": "ModifierPackageLocation", + "package_path": "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/PACKAGE", + }, + "modifiers": [ + { + "_type": "ModifiersMatch", + "ovr_config//os/constraints:linux": "ovr_config//cpu/constraints:arm64", + "ovr_config//os/constraints:macos": "ovr_config//cpu/constraints:x86_64", + }, + { + "DEFAULT": "ovr_config//os/constraints:linux", + "_type": "ModifiersMatch", + }, + ], + "rule_name": None, + }, + { + "_type": "TaggedModifiers", + "location": { + "_type": "ModifierPackageLocation", + "package_path": "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/PACKAGE", + }, + "modifiers": [ + "ovr_config//cpu/constraints:x86_64", + ], + "rule_name": "python_binary", + }, + { + "_type": "TaggedModifiers", + "location": { + "_type": "ModifierPackageLocation", + "package_path": "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/dir/PACKAGE", + }, + "modifiers": [ + { + "_type": "ModifiersMatch", + "ovr_config//os/constraints:windows": "ovr_config//cpu/constraints:x86_64", + }, + "ovr_config//os/constraints:macos", + ], + "rule_name": None, + }, + ] + + +@buck_test(inplace=True) +async def test_set_cfg_modifiers_from_package_file_only( + buck: Buck, +) -> None: + await expect_failure( + buck.targets( + "fbcode//buck2/tests/e2e/configurations/cfg_constructor/test_clear_package_modifiers_data/set_cfg_modifiers/package_file_check:test", + "-c", + "buck_e2e.testing_failure=true", + ), + stderr_regex="set_cfg_modifiers is only allowed to be used from a PACKAGE or BUCK_TREE file, not a bzl file", + ) diff --git a/tests/e2e/configurations/configuration_alias/BUCK b/tests/e2e/configurations/configuration_alias/BUCK new file mode 100644 index 0000000000000..db6e4c0a31387 --- /dev/null +++ b/tests/e2e/configurations/configuration_alias/BUCK @@ -0,0 +1,5 @@ +load("@fbcode//buck2/tests:bxl_test.bzl", "bxl_test") + +bxl_test( + src = "test_configuration_alias.bxl", +) diff --git a/tests/e2e/configurations/configuration_alias/test/TARGETS.test b/tests/e2e/configurations/configuration_alias/test/TARGETS.test new file mode 100644 index 0000000000000..b9cbc76832dfa --- /dev/null +++ b/tests/e2e/configurations/configuration_alias/test/TARGETS.test @@ -0,0 +1,29 @@ +load("@fbsource//tools/build_defs:fb_native_wrapper.bzl", "fb_native") + +oncall("build_infra") + +fb_native.constraint_setting( + name = "config", +) + +fb_native.constraint_value( + name = "on", + constraint_setting = ":config", +) + +fb_native.configuration_alias( + name = "config_alias", + actual = ":config", +) + +fb_native.configuration_alias( + name = "on_alias", + actual = ":on", +) + +fb_native.platform( + name = "platform", + constraint_values = [ + ":on_alias", + ], +) diff --git a/tests/e2e/configurations/configuration_alias/test_configuration_alias.bxl b/tests/e2e/configurations/configuration_alias/test_configuration_alias.bxl new file mode 100644 index 0000000000000..4bfd6894114d5 --- /dev/null +++ b/tests/e2e/configurations/configuration_alias/test_configuration_alias.bxl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:asserts.bzl", "asserts") + +TEST_TARGET_PATH = "fbcode//buck2/tests/e2e/configurations/configuration_alias/test" + +def _impl(ctx: bxl.Context): + config = ctx.analysis(TEST_TARGET_PATH + ":config").providers() + on = ctx.analysis(TEST_TARGET_PATH + ":on").providers() + config_alias = ctx.analysis(TEST_TARGET_PATH + ":config_alias").providers() + on_alias = ctx.analysis(TEST_TARGET_PATH + ":on_alias").providers() + + asserts.equals(config[ConstraintSettingInfo], config_alias[ConstraintSettingInfo]) + asserts.equals(on[ConstraintValueInfo], on_alias[ConstraintValueInfo]) + + platform = ctx.analysis(TEST_TARGET_PATH + ":platform").providers() + + asserts.equals(platform[PlatformInfo].configuration.constraints, {config[ConstraintSettingInfo].label: on[ConstraintValueInfo]}) + +test = bxl_main( + cli_args = {}, + impl = _impl, +) diff --git a/tests/e2e/fdb/BUCK b/tests/e2e/fdb/BUCK new file mode 100644 index 0000000000000..fca34652f87a5 --- /dev/null +++ b/tests/e2e/fdb/BUCK @@ -0,0 +1,25 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") + +oncall("tae") + +python_library( + name = "fdb_e2e_types", + srcs = ["types.py"], + deps = [ + "//buck2/tests/e2e_util/api:api", + ], +) + +buck2_e2e_test( + name = "test_fdb_project", + srcs = [ + "test_fdb_bxl_java.py", + ], + test_with_compiled_buck2 = False, + test_with_deployed_buck2 = True, + deps = [ + ":fdb_e2e_types", + "//buck2/tests/e2e_util:utils", + ], +) diff --git a/tests/e2e/fdb/test_fdb_bxl_java.py b/tests/e2e/fdb/test_fdb_bxl_java.py new file mode 100644 index 0000000000000..2de4de390e033 --- /dev/null +++ b/tests/e2e/fdb/test_fdb_bxl_java.py @@ -0,0 +1,115 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e.fdb.types import ExecInfo + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_java_test(buck: Buck) -> None: + root = (await buck.root("--kind", "project")).stdout.strip("\n") + result = await buck.bxl( + "prelude//debugging/fdb.bxl:inspect_target", + "--", + "--target", + "//buck2/tests/targets/rules/java/java_test:simple_junit_test", + ) + + exec_info = ExecInfo.from_buck_result(result) + classmap = exec_info.read_class_map(root) + names = [class_ref.name for entry in classmap for class_ref in entry.classes] + assert names == ["com.example.SimpleJUnitTest"] + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_java_binary(buck: Buck) -> None: + root = (await buck.root("--kind", "project")).stdout.strip("\n") + result = await buck.bxl( + "prelude//debugging/fdb.bxl:inspect_target", + "--", + "--target", + "//buck2/tests/targets/rules/java/good/java_binary_with_native_libs:binary_with_native_lib", + ) + exec_info: ExecInfo = ExecInfo.from_buck_result(result) + classmap = exec_info.read_class_map(root) + names = [class_ref.name for entry in classmap for class_ref in entry.classes] + assert names == ["JavaBinaryWithNativeLibs"] + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_java_library(buck: Buck) -> None: + root = (await buck.root("--kind", "project")).stdout.strip("\n") + result = await buck.bxl( + "prelude//debugging/fdb.bxl:inspect_target", + "--", + "--target", + "//buck2/tests/targets/rules/java/good/java_binary_with_native_libs:lib", + ) + exec_info: ExecInfo = ExecInfo.from_buck_result(result) + classmap = exec_info.read_class_map(root) + names = [class_ref.name for entry in classmap for class_ref in entry.classes] + assert names == ["JavaBinaryWithNativeLibs"] + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_kotlin_test(buck: Buck) -> None: + root = (await buck.root("--kind", "project")).stdout.strip("\n") + result = await buck.bxl( + "prelude//debugging/fdb.bxl:inspect_target", + "--", + "--target", + "//buck2/tests/targets/rules/kotlin/kotlin_test:simple_kotlin_test", + ) + exec_info: ExecInfo = ExecInfo.from_buck_result(result) + classmap = exec_info.read_class_map(root) + names = [class_ref.name for entry in classmap for class_ref in entry.classes] + assert names == ["com.example.SimpleKotlinTest"] + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_kotlin_library(buck: Buck) -> None: + root = (await buck.root("--kind", "project")).stdout.strip("\n") + result = await buck.bxl( + "prelude//debugging/fdb.bxl:inspect_target", + "--", + "--target", + "//buck2/tests/targets/rules/kotlin/kotlin_library:lib_with_source_only_abi_generation", + ) + exec_info: ExecInfo = ExecInfo.from_buck_result(result) + classmap = exec_info.read_class_map(root) + names = [class_ref.name for entry in classmap for class_ref in entry.classes] + assert names == ["A", "B"] + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_apk_gen_rule(buck: Buck) -> None: + root = (await buck.root("--kind", "project")).stdout.strip("\n") + result = await buck.bxl( + "prelude//debugging/fdb.bxl:inspect_target", + "--", + "--target", + "fbsource//fbandroid/buck2/tests/good/apk:zip_align_basic_apk", + ) + exec_info: ExecInfo = ExecInfo.from_buck_result(result) + classmap = exec_info.read_class_map(root) + names = [class_ref.name for entry in classmap for class_ref in entry.classes] + assert names == [ + "com.example.sampleapp.MainActivity", + "com.example.sampleapp.Helper", + "com.example.sampleapp.Helper$SomeInterface", + ] + + +# This is to ensure at least one of the tests is passing on Windows otherwise CI fails +@buck_test(inplace=True) +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/e2e/fdb/types.py b/tests/e2e/fdb/types.py new file mode 100644 index 0000000000000..8491e984dd324 --- /dev/null +++ b/tests/e2e/fdb/types.py @@ -0,0 +1,95 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os +from dataclasses import dataclass +from typing import Any, Dict, Type, TypeVar, Union + +from buck2.tests.e2e_util.api.buck_result import BuckResult + + +T = TypeVar("T") + + +def _inner_dataclass_from_dict( + klass: Type[T], dikt: Dict[str, Any] +) -> Union[T, Dict[str, Any]]: + if hasattr(klass, "__annotations__"): + return _dataclass_from_dict(klass, dikt) + else: + return dikt + + +def _dataclass_from_dict(klass: Type[T], dikt: Dict[str, Any]) -> T: + fieldtypes = klass.__annotations__ + result = {} + for f, ftype in fieldtypes.items(): + if hasattr(ftype, "__origin__"): + if ftype.__origin__ is list: + # Handle list type + inner_type = ftype.__args__[0] + result[f] = [ + _inner_dataclass_from_dict(inner_type, item) + for item in dikt.get(f, []) + ] + elif ftype.__origin__ is dict: + # Handle dict type + key_type, value_type = ftype.__args__ + result[f] = { + key: _inner_dataclass_from_dict(value_type, value) + for key, value in dikt.get(f, {}).items() + } + else: + result[f] = dikt.get(f) + else: + result[f] = _inner_dataclass_from_dict(ftype, dikt.get(f)) + return klass(**result) + + +def _resolve_relative(root: str, path: str) -> str: + return os.path.join(root, path) + + +@dataclass +class ClassRef: + name: str + methods: list[str] + + +@dataclass +class ClassMapEntry: + classes: list[ClassRef] + file_path: str + language: str + + +@dataclass +class ExecInfo: + data: dict[str, Any] + target_name: str + target_info: dict[str, Any] + + @staticmethod + def from_buck_result(result: BuckResult) -> "ExecInfo": + with open(result.stdout.strip("\n")) as json_file: + return _dataclass_from_dict(ExecInfo, json.loads(json_file.read())) + + def read_class_map(self, root: str) -> list[ClassMapEntry]: + classmap_file = self.data["java"]["classmap_file"] + assert not os.path.isabs( + classmap_file + ), f"{classmap_file} should be relative to {root}" + + with open(_resolve_relative(root, classmap_file)) as classmap_file: + return [ + _dataclass_from_dict(ClassMapEntry, value) + for value in json.loads(classmap_file.read()) + ] diff --git a/tests/e2e/intellij_project/test_intellij_project.py b/tests/e2e/intellij_project/test_intellij_project.py new file mode 100644 index 0000000000000..fd7b74631c72c --- /dev/null +++ b/tests/e2e/intellij_project/test_intellij_project.py @@ -0,0 +1,140 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import difflib +import re +import shutil +import tempfile +from pathlib import Path +from typing import Pattern + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +BXL_LABEL = "fbsource//xplat/buck2/intellij_project/main.bxl:generate_intellij_project" +EXPECTED_DIR_RELATIVE_PATH = "../xplat/buck2/tests/intellij_project/testdata" + +# To replace the entire buck-out path for generated files +BUCK_OUT_HASH_PATTERN: Pattern[str] = re.compile(r"buck-out/[^\"!]*") +BUCK_OUT_HASH_REPLACE_TXT = "buck-out-path" + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_android_apk(buck: Buck) -> None: + expected_dir_name = "sample_intellij_project" + test_targets = "fbsource//xplat/buck2/tests/intellij_project/testdata/sample_intellij_project/apk:apk" + await run_and_verify_project(buck, expected_dir_name, test_targets) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_android_library(buck: Buck) -> None: + expected_dir_name = "android_library" + test_targets = ( + "fbsource//xplat/buck2/tests/intellij_project/testdata/android_library/..." + ) + await run_and_verify_project(buck, expected_dir_name, test_targets) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_android_binary(buck: Buck) -> None: + expected_dir_name = "android_binary" + test_targets = ( + "fbsource//xplat/buck2/tests/intellij_project/testdata/android_binary/..." + ) + await run_and_verify_project(buck, expected_dir_name, test_targets) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_project1(buck: Buck) -> None: + expected_dir_name = "project1" + test_targets = "fbsource//xplat/buck2/tests/intellij_project/testdata/project1/..." + await run_and_verify_project(buck, expected_dir_name, test_targets) + + +async def run_and_verify_project( + buck: Buck, expected_dir_name: str, test_targets: str +) -> None: + expected_dir_path = Path(EXPECTED_DIR_RELATIVE_PATH).resolve() / expected_dir_name + buck_config_path = expected_dir_path / "configfile" + idea_dir_path = expected_dir_path / ".idea" + + result = await buck.bxl( + BXL_LABEL, + "--config-file", + str(buck_config_path), + "--", + "--targets", + test_targets, + ) + output_dir = Path(result.stdout.strip()) + + xml_output_temp_dir = tempfile.mkdtemp() + config_args_map = { + "intellij.default_min_android_sdk_version": 19, + "intellij.default_android_manifest_package_name": "com.facebook", + "intellij.android_generated_files_directory": ".idea/android_gen", + } + config_args = [] + for name, value in config_args_map.items(): + config_args.append("-c") + config_args.append(f"{name}={value}") + root = await buck.root() + await buck.run( + "fbsource//xplat/buck2/intellij_project/tools/project_writer:project_writer", + "--", + "--input_path", + output_dir.as_posix(), + "--output_path", + xml_output_temp_dir, + "--repo_path", + root.stdout.strip(), + "--manifests-to-copy-json", + (output_dir.parent / "manifests_to_copy").as_posix(), + *config_args, + rel_cwd=Path(".."), + ) + verify_expected_files(idea_dir_path, expected_dir_name, Path(xml_output_temp_dir)) + shutil.rmtree(xml_output_temp_dir) + + +def verify_expected_files( + expected_dir_path: Path, expected_dir_name: str, output_dir: Path +) -> None: + for cur_file in expected_dir_path.rglob("*.expected"): + relative_path = cur_file.relative_to(expected_dir_path).parent + file_name = cur_file.stem + + generated_file = output_dir / relative_path / file_name + assert generated_file.is_file(), "File does not exist: {}".format( + generated_file + ) + + cur_lines = [line.strip() for line in open(cur_file).readlines()] + generated_lines = [line.strip() for line in open(generated_file).readlines()] + + generated_lines = [ + BUCK_OUT_HASH_PATTERN.sub(BUCK_OUT_HASH_REPLACE_TXT, line) + for line in generated_lines + ] + + is_match = cur_lines == generated_lines + diff = difflib.context_diff(cur_lines, generated_lines, n=2) + assert ( + is_match + ), "Generated file: {} does not match expected file at: {}\n{}".format( + generated_file, cur_file, "".join(diff) + ) + + +# TODO(marwhal): Add this back one at least one test in this file passes on Windows +@buck_test(inplace=True) +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/e2e/log/BUCK b/tests/e2e/log/BUCK new file mode 100644 index 0000000000000..fe001bba836b1 --- /dev/null +++ b/tests/e2e/log/BUCK @@ -0,0 +1,37 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_what_ran", + srcs = ["test_what_ran.py"], + use_compiled_buck2_client_and_tpx = True, +) + +buck2_e2e_test( + name = "test_what_failed", + srcs = ["test_what_failed.py"], +) + +buck2_e2e_test( + name = "test_log_format", + srcs = ["test_log_format.py"], + use_compiled_buck2_client_and_tpx = True, +) + +buck2_e2e_test( + name = "test_persist_event_logs", + srcs = ["test_persist_event_logs.py"], + deps = [ + "//manifold/clients/python:manifold_client_deprecated", + ], +) + +buck2_e2e_test( + name = "test_upload_re_logs", + srcs = ["test_upload_re_logs.py"], + deps = [ + "fbcode//buck2/tests/e2e_util:utils", + "//manifold/clients/python:manifold_client_deprecated", + ], +) diff --git a/tests/e2e/log/test_log_format.py b/tests/e2e/log/test_log_format.py new file mode 100644 index 0000000000000..7eb684598bb23 --- /dev/null +++ b/tests/e2e/log/test_log_format.py @@ -0,0 +1,65 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# Tests to ensure that the log format for a few important fields hasn't +# changed. This ensures compatibility with downstream processing tools. +# +# If this test needs to be updated, please sync with @athmasagar or the +# fbcode_build_infra oncall to ensure that log parsers are also migrated. +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_log_format(buck: Buck) -> None: + await buck.build("fbcode//buck2/tests/targets/rules/cxx/hello_world:welcome") + out = await buck.log("show") + + lines = [line.strip() for line in out.stdout.splitlines()] + test_line = None + for line in lines: + if "output_size" in line and "cpu_instructions_user" in line: + test_line = line + break + + assert test_line is not None + decoded_line = json.loads(test_line) + + span_end = decoded_line["Event"]["data"]["SpanEnd"] + assert span_end["duration_us"] >= 0 + + action_execution = span_end["data"]["ActionExecution"] + assert action_execution["execution_kind"] >= 0 + + target_label = action_execution["key"]["owner"]["TargetLabel"] + label = target_label["label"] + assert label["package"] is not None + assert label["name"] is not None + assert target_label["configuration"]["full_name"] is not None + + action_name = action_execution["name"] + assert action_name["category"] is not None + assert action_name["identifier"] is not None + assert action_execution["output_size"] >= 0 + + commands = action_execution["commands"] + execution_stats = commands[-1]["details"]["metadata"]["execution_stats"] + assert execution_stats["cpu_instructions_user"] >= 0 + assert execution_stats["userspace_events"]["count"] >= 0 + assert execution_stats["userspace_events"]["time_enabled"] >= 0 + assert execution_stats["userspace_events"]["time_running"] >= 0 + + +# Placeholder for tests to be listed successfully on Windows. +@buck_test(inplace=True) +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/e2e/log/test_persist_event_logs.py b/tests/e2e/log/test_persist_event_logs.py new file mode 100644 index 0000000000000..389b88ed15b6c --- /dev/null +++ b/tests/e2e/log/test_persist_event_logs.py @@ -0,0 +1,101 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import random +import string +import sys +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + +from manifold.clients.python.manifold_client_deprecated import Client as ManifoldClient + +TEST_TRACE_ID = "f115b5da-7d81-47cc-9c4a-57e283bfa384" +BUCKET_CONFIG = {"bucket": "buck2_logs", "apikey": "buck2_logs-key"} + +# This test was failing in macos sandcastle, so attempt fix suggested +# here: https://fb.workplace.com/groups/fbpython/permalink/5214295275278464/ +if sys.platform != "windows" and os.path.exists("/etc/ssl/cert.pem"): + os.environ["SSL_CERT_FILE"] = "/etc/ssl/cert.pem" + + +async def manifold_exists(path: str) -> bool: + with ManifoldClient(BUCKET_CONFIG) as client: + return client.exists(bucket="buck2_logs", path=path) + + +async def manifold_file_size(path: str) -> int: + with ManifoldClient(BUCKET_CONFIG) as client: + return client.getFileSize(bucket="buck2_logs", path=path) + + +EVENT_LOG_PLACEHOLDER = """ +Plants are living organisms that belong to the kingdom Plantae. They are characterized by their ability to produce their own food through photosynthesis, which is the process of converting sunlight, water, and carbon dioxide into glucose and oxygen. Plants come in many different shapes and sizes, from small mosses to towering trees, and can be found in almost every ecosystem on Earth. They play a vital role in the planet's ecology, serving as primary producers at the base of the food chain and providing habitats for a wide range of animal species. In addition to their ecological importance, plants have many practical uses for humans, such as food, medicine, clothing, and shelter. +""" + + +def random_name() -> str: + alphabet = string.ascii_letters + string.digits + return "".join(random.choice(alphabet) for _ in range(8)) + + +@buck_test(inplace=True) +@env("BUCK2_TEST_MANIFOLD_CHUNK_BYTES", str(32)) +@env("BUCK2_TEST_MANIFOLD_TTL_S", str(84_000)) # 1 day +async def test_persist_event_logs(buck: Buck, tmp_path: Path) -> None: + local_log = tmp_path / "test.txt" + + manifold_name = f"test_{random_name()}.txt" + await buck.debug( + "persist-event-logs", + "--manifold-name", + manifold_name, + "--local-path", + str(local_log), + "--trace-id", + TEST_TRACE_ID, + input=EVENT_LOG_PLACEHOLDER.encode(), + ) + + assert Path(local_log).exists() + + with open(local_log, "r") as f: + assert f.read() == EVENT_LOG_PLACEHOLDER + + manifold_size = await manifold_file_size(path=f"flat/{manifold_name}") + # Some OSs return str, others int. Just enforce str to compare + assert str(manifold_size) == str(len(EVENT_LOG_PLACEHOLDER)) + + +@buck_test(inplace=True) +async def test_persist_event_logs_not_uploaded(buck: Buck, tmp_path: Path) -> None: + local_log = tmp_path / "test.txt" + + manifold_name = f"test_{random_name()}.txt" + await buck.debug( + "persist-event-logs", + "--manifold-name", + manifold_name, + "--local-path", + str(local_log), + "--no-upload", + "--trace-id", + TEST_TRACE_ID, + input=EVENT_LOG_PLACEHOLDER.encode(), + ) + + assert Path(local_log).exists() + + with open(local_log, "r") as f: + assert f.read() == EVENT_LOG_PLACEHOLDER + + assert await manifold_exists(path=f"flat/{manifold_name}") is False diff --git a/tests/e2e/log/test_upload_re_logs.py b/tests/e2e/log/test_upload_re_logs.py new file mode 100644 index 0000000000000..578164f3c414c --- /dev/null +++ b/tests/e2e/log/test_upload_re_logs.py @@ -0,0 +1,59 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import sys + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env, get_mode_from_platform +from buck2.tests.e2e_util.helper.utils import json_get + +from manifold.clients.python.manifold_client_deprecated import Client as ManifoldClient + + +BUCKET_CONFIG = {"bucket": "buck2_re_logs", "apikey": "buck2_re_logs-key"} + +# This test was failing in macos sandcastle, so attempt fix suggested +# here: https://fb.workplace.com/groups/fbpython/permalink/5214295275278464/ +if sys.platform != "windows" and os.path.exists("/etc/ssl/cert.pem"): + os.environ["SSL_CERT_FILE"] = "/etc/ssl/cert.pem" + + +async def manifold_exists(path: str) -> bool: + with ManifoldClient(BUCKET_CONFIG) as client: + return client.exists(bucket=BUCKET_CONFIG["bucket"], path=path) + + +@buck_test(inplace=True) +@env("SANDCASTLE", "1") # wait for logs to finish uploading +async def test_upload_re_logs(buck: Buck) -> None: + + # Build a trivial action + await buck.build( + "fbcode//buck2/tests/targets/rules/command_alias:print_cwd", + get_mode_from_platform(), + ) + + session_id = await extract_re_session_id(buck) + await buck.debug("upload-re-logs", "--session-id", session_id) + assert await manifold_exists(path=f"flat/{session_id}.log.zst") is True + + +async def extract_re_session_id(buck: Buck) -> str: + result = await buck.log("show") + session_id = None + for line in result.stdout.splitlines(): + session_id = json_get( + line, "Event", "data", "Instant", "data", "ReSession", "session_id" + ) + if session_id: + break + assert session_id is not None + return session_id diff --git a/tests/e2e/log/test_what_failed.py b/tests/e2e/log/test_what_failed.py new file mode 100644 index 0000000000000..fff02a0f2be4e --- /dev/null +++ b/tests/e2e/log/test_what_failed.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_what_failed(buck: Buck) -> None: + pkg = "fbcode//buck2/tests/targets/rules/genrule/bad" + bad = "my_genrule_bad_with_dep" + good = "stub" + + await expect_failure(buck.build(f"{pkg}:{bad}")) + out = await buck.log("what-failed") + + # Only the failed command should be in what-failed. + assert f"{pkg}:{bad}" in out.stdout + assert f"{pkg}:{good}" not in out.stdout + + # Even though both commands are here. + out = await buck.log("what-ran") + assert f"{pkg}:{bad}" in out.stdout + assert f"{pkg}:{good}" in out.stdout diff --git a/tests/e2e/log/test_what_ran.py b/tests/e2e/log/test_what_ran.py new file mode 100644 index 0000000000000..1e159ae91df44 --- /dev/null +++ b/tests/e2e/log/test_what_ran.py @@ -0,0 +1,160 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import csv +import json +import sys + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# builds targets in an fbcode target configuration, unsupported on mac RE workers +def fbcode_linux_only() -> bool: + return sys.platform == "linux" + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_what_ran_json_target_without_explicit_test_cases(buck: Buck) -> None: + await buck.test("fbcode//buck2/tests/targets/rules/sh_test:test") + out = await buck.log("what-ran", "--format", "json") + out = [line.strip() for line in out.stdout.splitlines()] + out = [json.loads(line) for line in out if line] + assert len(out) == 1, "out should have 1 line: `{}`".format(out) + + repro = out[0] + assert repro["reason"] == "test.run" + assert repro["identity"] == "buck2/tests/targets/rules/sh_test:test" + assert repro["reproducer"]["executor"] == "Local" + assert repro["reproducer"]["details"]["command"][1] == "arg1" + assert repro["extra"]["testcases"] == [] + + +if fbcode_linux_only(): + + @buck_test(inplace=True) + async def test_what_ran(buck: Buck) -> None: + await buck.build("fbcode//buck2/tests/targets/rules/cxx/hello_world:welcome") + out = await buck.log("what-ran") + assert "welcome" in out.stdout + + await buck.test("fbcode//buck2/tests/targets/rules/sh_test:test") + out = await buck.log("what-ran") + assert "sh_test/test.py arg1" in out.stdout + + @buck_test(inplace=True) + async def test_what_ran_filter_category(buck: Buck) -> None: + await buck.build("fbsource//fbobjc/buck2/samples/hello_world:HelloWorldBundle") + out = await buck.log( + "what-ran", + "--filter-category", + ".*cxx.*", + "--format", + "json", + ) + out = [line.strip() for line in out.stdout.splitlines()] + out = [json.loads(line) for line in out if line] + assert any( + "cxx_link_executable" or "cxx_compile" in rec["identity"] for rec in out + ), "action identity must contain the filtered category: `{}`".format(out) + + @buck_test(inplace=True) + async def test_what_ran_show_std_err(buck: Buck) -> None: + await expect_failure( + buck.build("fbcode//buck2/tests/targets/rules/genrule/bad/...") + ) + out = await buck.log("what-ran", "--show-std-err", "--format", "json") + out = [line.strip() for line in out.stdout.splitlines()] + out = [json.loads(line) for line in out if line] + assert any( + rec["std_err"] == "" or rec["std_err"] == "HELLO_STDERR\n" for rec in out + ), "we should have some empty std_errs and also HELLO_STDERR since we echo it in TARGETS: `{}`".format( + out + ) + + out = await buck.log( + "what-ran", "--show-std-err", "--omit-empty-std-err", "--format", "json" + ) + out = [line.strip() for line in out.stdout.splitlines()] + out = [json.loads(line) for line in out if line] + assert all( + rec["std_err"] != "" for rec in out + ), "we should have no empty std_errs: `{}`".format(out) + + @buck_test(inplace=True) + async def test_what_ran_json_target_with_test_cases(buck: Buck) -> None: + await buck.test("fbcode//buck2/tests/targets/rules/go/test:test") + out = await buck.log("what-ran", "--format", "json") + out = [line.strip() for line in out.stdout.splitlines()] + out = [json.loads(line) for line in out if line] + out = [repro for repro in out if repro.get("reason", "").startswith("test.")] + assert len(out) <= 2, "out should have at most 2 test lines: `{}`".format(out) + + repros = {repro["reason"]: repro for repro in out} + + # test discovery + discovery = repros["test.discovery"] + # TODO(T205964663) + if discovery is not None: + assert discovery["identity"] == "buck2/tests/targets/rules/go/test:test" + assert discovery["reproducer"]["executor"] == "Local" + + # test running + repro = repros["test.run"] + assert repro["reason"] == "test.run" + assert repro["identity"] == "buck2/tests/targets/rules/go/test:test" + assert repro["reproducer"]["executor"] == "Local" + assert repro["extra"]["testcases"] == ["TestFoo"] + + @buck_test(inplace=True) + async def test_what_ran_csv_target_with_test_cases(buck: Buck) -> None: + await buck.test("fbcode//buck2/tests/targets/rules/go/test:test") + out = await buck.log("what-ran", "--format", "csv") + out = [line.strip() for line in out.stdout.splitlines()] + header = ["reason", "identity", "executor", "reproducer"] + out = [dict(zip(header, record)) for record in csv.reader(out) if record] + assert out[0] == dict( + zip(header, header) + ), "ensure that first entry in csv is the header" + out = [repro for repro in out if repro.get("reason", "").startswith("test.")] + assert len(out) <= 2, "out should have at most 2 test lines: `{}`".format(out) + + repros = {repro["reason"]: repro for repro in out} + + # test discovery + discovery = repros["test.discovery"] + # TODO(T205964663) + assert ( + discovery is None + or discovery["identity"] == "buck2/tests/targets/rules/go/test:test" + ) + + # test running + repro = repros["test.run"] + assert repro["reason"] == "test.run" + assert repro["identity"] == "buck2/tests/targets/rules/go/test:test" + + +# TODO: This would be more reliable if it were an isolated test. +@buck_test(inplace=True) +async def test_what_ran_local(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/rules/genrule:mktemp" + await buck.build( + target, + "--no-remote-cache", + ) + out = await buck.log("what-ran") + out = [line.strip() for line in out.stdout.splitlines() if target in line] + assert len(out) == 1 + + assert "\tlocal\t" in out[0] + assert "\tre\t" not in out[0] diff --git a/tests/e2e/targets_command/BUCK b/tests/e2e/targets_command/BUCK new file mode 100644 index 0000000000000..ee4769e993bf9 --- /dev/null +++ b/tests/e2e/targets_command/BUCK @@ -0,0 +1,13 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_targets", + srcs = ["test_targets.py"], +) + +buck2_e2e_test( + name = "test_targets_inplace", + srcs = ["test_targets_inplace.py"], +) diff --git a/tests/e2e/targets_command/test_targets.py b/tests/e2e/targets_command/test_targets.py new file mode 100644 index 0000000000000..e0453d63926e5 --- /dev/null +++ b/tests/e2e/targets_command/test_targets.py @@ -0,0 +1,251 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import gzip +import json +import re +import subprocess +import tempfile +from typing import List + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckResult +from buck2.tests.e2e_util.buck_workspace import buck_test + +FOO_UNMODIFIED = ["4", "5", "6", "7"] +BAR_UNMODIFIED = ["1", "2", "3", "4", "5"] + + +# TODO: Make this test isolated, i.e. inplace=False. +@buck_test(inplace=True) +@pytest.mark.parametrize( + "rule, unmodified, src_changed", + [ + ("1", FOO_UNMODIFIED, "foo.txt"), + ("2", FOO_UNMODIFIED, "foo.txt"), + ("3", FOO_UNMODIFIED, "foo.txt"), + ("4", FOO_UNMODIFIED, "foo.txt"), + ("5", FOO_UNMODIFIED, "foo.txt"), + ("6", FOO_UNMODIFIED, "foo.txt"), + ("7", FOO_UNMODIFIED, "foo.txt"), + ("1", BAR_UNMODIFIED, "bar.txt"), + ("2", BAR_UNMODIFIED, "bar.txt"), + ("3", BAR_UNMODIFIED, "bar.txt"), + ("4", BAR_UNMODIFIED, "bar.txt"), + ("5", BAR_UNMODIFIED, "bar.txt"), + ("6", BAR_UNMODIFIED, "bar.txt"), + ("7", BAR_UNMODIFIED, "bar.txt"), + ], +) +async def test_configured_target_hashing( + buck: Buck, + rule: str, + unmodified: List[str], + src_changed: str, +) -> None: + target = "fbcode//buck2/tests/targets/target_hashing:rule{}".format(rule) + result = await buck.targets( + target, + "--show-target-hash", + "--json", + "--target-hash-file-mode", + "PATHS_ONLY", + ) + + # Modify a target + modified_result = await buck.targets( + target, + "--show-target-hash", + "--json", + "--target-hash-file-mode", + "PATHS_ONLY", + "--target-hash-modified-paths", + "buck2/tests/targets/target_hashing/{}".format(src_changed), + ) + output = json.loads(result.stdout) + modified_output = json.loads(modified_result.stdout) + + # Hash should change iff the target is the modified target or depends on the modified target + if rule in unmodified: + assert output[0]["buck.target_hash"] == modified_output[0]["buck.target_hash"] + else: + assert output[0]["buck.target_hash"] != modified_output[0]["buck.target_hash"] + + +# TODO: Make this test isolated, i.e. inplace=False. +@buck_test(inplace=True) +@pytest.mark.parametrize( + "rule, unmodified, src_changed", + [ + ("1", FOO_UNMODIFIED, "foo.txt"), + ("2", FOO_UNMODIFIED, "foo.txt"), + ("3", FOO_UNMODIFIED, "foo.txt"), + ("4", FOO_UNMODIFIED, "foo.txt"), + ("5", FOO_UNMODIFIED, "foo.txt"), + ("6", FOO_UNMODIFIED, "foo.txt"), + ("7", FOO_UNMODIFIED, "foo.txt"), + ("1", BAR_UNMODIFIED, "bar.txt"), + ("2", BAR_UNMODIFIED, "bar.txt"), + ("3", BAR_UNMODIFIED, "bar.txt"), + ("4", BAR_UNMODIFIED, "bar.txt"), + ("5", BAR_UNMODIFIED, "bar.txt"), + ("6", BAR_UNMODIFIED, "bar.txt"), + ("7", BAR_UNMODIFIED, "bar.txt"), + ], +) +async def test_unconfigured_target_hashing( + buck: Buck, + rule: str, + unmodified: List[str], + src_changed: str, +) -> None: + + target = "fbcode//buck2/tests/targets/target_hashing:rule{}".format(rule) + result = await buck.targets( + target, + "--show-unconfigured-target-hash", + "--json", + "--target-hash-file-mode", + "PATHS_ONLY", + ) + + # Modify a target + modified_result = await buck.targets( + target, + "--show-unconfigured-target-hash", + "--json", + "--target-hash-file-mode", + "PATHS_ONLY", + "--target-hash-modified-paths", + "buck2/tests/targets/target_hashing/{}".format(src_changed), + ) + output = json.loads(result.stdout) + modified_output = json.loads(modified_result.stdout) + + # Hash should change iff the target is the modified target or depends on the modified target + if rule in unmodified: + assert output[0]["buck.target_hash"] == modified_output[0]["buck.target_hash"] + else: + assert output[0]["buck.target_hash"] != modified_output[0]["buck.target_hash"] + + +@buck_test(inplace=True) +async def test_configured_ignores_unconfigured(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/target_hashing:rule8" + pre_unconfigured = await buck.targets( + target, "--show-unconfigured-target-hash", "--json" + ) + pre_configured = await buck.targets(target, "--show-target-hash", "--json") + + config = "-ctesting.hashing=1" + post_unconfigured = await buck.targets( + target, config, "--show-unconfigured-target-hash", "--json" + ) + post_configured = await buck.targets(target, config, "--show-target-hash", "--json") + + def grab(x: BuckResult) -> str: + return json.loads(x.stdout)[0]["buck.target_hash"] + + # Hashes differ configured vs unconfigured + assert grab(pre_unconfigured) != grab(pre_configured) + # We spot the unconfigured change + assert grab(pre_unconfigured) != grab(post_unconfigured) + # But the configured remains consistent + assert grab(pre_configured) == grab(post_configured) + + +@buck_test(inplace=True) +async def test_non_recursive_target_hash(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/target_hashing:rule9" + pre_recursive = await buck.targets(target, "--show-target-hash", "--json") + pre_direct = await buck.targets( + target, "--show-target-hash", "--json", "--target-hash-recursive=false" + ) + + config = "-ctesting.hashing=1" + post_recursive = await buck.targets(target, config, "--show-target-hash", "--json") + post_direct = await buck.targets( + target, config, "--show-target-hash", "--json", "--target-hash-recursive=false" + ) + + def grab(x: BuckResult) -> str: + return json.loads(x.stdout)[0]["buck.target_hash"] + + # Hashes changed for recursive + assert grab(pre_recursive) != grab(post_recursive) + # But not for non-recursive + assert grab(pre_direct) == grab(post_direct) + + +@buck_test(inplace=True) +async def test_show_inputs(buck: Buck) -> None: + target = "fbcode//buck2/tests/targets/target_hashing:rule1" + result = await buck.targets(target, "--json") + assert ( + "fbcode//buck2/tests/targets/target_hashing:rule5" + in json.loads(result.stdout)[0]["buck.deps"] + ) + assert json.loads(result.stdout)[0]["buck.inputs"] == [ + "fbcode//buck2/tests/targets/target_hashing/foo.txt" + ] + + +@buck_test(inplace=True) +async def test_streaming_uncached(buck: Buck) -> None: + # This test aims to check the kind of things TD might do - the streaming plus other related features + with tempfile.NamedTemporaryFile() as file: + await buck.targets( + "fbcode//buck2:buck2", + "--json-lines", + "--streaming", + "--imports", + "--output-attribute", + "^buck\\.|name", + "--no-cache", + "--show-unconfigured-target-hash", + "--output=" + file.name, + ) + found = 0 + for x in file.readlines(): + x = json.loads(x) + if x.get("buck.package") == "fbcode//buck2": + if x.get("name") == "buck2": + assert re.match("^[0-9a-f]+$", x["buck.target_hash"]) + found += 1 + elif "buck.imports" in x: + assert "prelude//prelude.bzl" in x["buck.imports"] + found += 1 + assert found == 2 + + +@buck_test(inplace=True) +async def test_compression(buck: Buck) -> None: + with tempfile.TemporaryDirectory() as name: + await buck.targets("fbcode//buck2:", "--output=" + name + "/out.txt") + await buck.targets( + "fbcode//buck2:", "--output=" + name + "/out.txt.gz", "--compression=gzip" + ) + await buck.targets( + "fbcode//buck2:", "--output=" + name + "/out.txt.zst", "--compression=zstd" + ) + with open(name + "/out.txt", "rb") as file: + out_uncompressed = file.read() + with gzip.open(name + "/out.txt.gz") as file: + out_gzip = file.read() + subprocess.run( + ["zstd", "-d", name + "/out.txt.zst", "-o", name + "/out.txt.unzst"], + check=True, + ) + with open(name + "/out.txt.unzst", "rb") as file: + out_zstd = file.read() + assert out_uncompressed == out_gzip + assert out_uncompressed == out_zstd diff --git a/tests/e2e/targets_command/test_targets_inplace.py b/tests/e2e/targets_command/test_targets_inplace.py new file mode 100644 index 0000000000000..ffc29c6426772 --- /dev/null +++ b/tests/e2e/targets_command/test_targets_inplace.py @@ -0,0 +1,166 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import ExitCodeV2 +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, get_mode_from_platform + + +@buck_test(inplace=True) +async def test_targets(buck: Buck) -> None: + result = await buck.targets("fbcode//buck2/tests/targets/commands:") + + targets = [ + "fbcode//buck2/tests/targets/commands:dynamic", + "fbcode//buck2/tests/targets/commands:exported", + "fbcode//buck2/tests/targets/commands:lib", + ] + + for target in targets: + assert target in result.stdout + + +@buck_test(inplace=True) +async def test_targets_errors(buck: Buck) -> None: + await expect_failure( + buck.targets( + "fbcode//buck2/tests/targets/commands:", + "fbcode//buck2/tests/targets/non_existent_path:", + ), + exit_code=ExitCodeV2.USER_ERROR, + ) + + +@buck_test(inplace=True) +async def test_explicit_targets_errors(buck: Buck) -> None: + await expect_failure( + buck.targets( + "fbcode//buck2/tests/targets/commands:notarealtarget", + ), + exit_code=ExitCodeV2.USER_ERROR, + stderr_regex="Unknown target `notarealtarget` from package `fbcode//buck2/tests/targets/commands`", + ) + + +@buck_test(inplace=True) +async def test_targets_with_config_value(buck: Buck) -> None: + targets_enabled_result = await buck.targets( + "--config", + "user.targets_enabled=true", + "fbcode//buck2/tests/targets/commands:", + ) + assert ( + "fbcode//buck2/tests/targets/commands:config_defined_target" + in targets_enabled_result.stdout + ) + + targets_disabled_result = await buck.targets( + "--config", + "user.targets_enabled=false", + "fbcode//buck2/tests/targets/commands:", + ) + assert ( + "fbcode//buck2/tests/targets/commands:config_defined_target" + not in targets_disabled_result.stdout + ) + + targets_cell_rel_result = await buck.targets( + "--config", + "fbsource//user.targets_enabled=true", + "fbcode//buck2/tests/targets/commands:", + ) + assert targets_cell_rel_result.stdout == targets_disabled_result.stdout + + +@buck_test(inplace=True) +async def test_targets_root_relative_from_fbcode(buck: Buck) -> None: + result = await buck.targets("//buck2/tests/targets/commands:") + + targets = [ + "fbcode//buck2/tests/targets/commands:dynamic", + "fbcode//buck2/tests/targets/commands:exported", + "fbcode//buck2/tests/targets/commands:lib", + ] + + for target in targets: + assert target in result.stdout + + +@buck_test(inplace=True) +async def test_targets_show_output(buck: Buck) -> None: + for target in [ + "fbcode//buck2/tests/targets/rules/genrule:executable_helper", + "fbcode//buck2/tests/targets/rules/export_file:exported.txt", + ]: + build_result = await buck.build(target, "--show-output") + targets_result = await buck.targets(target, "--show-output") + + build_report = build_result.get_build_report() + build_report_outputs = [ + (target, str(output)) for output in build_report.outputs_for_target(target) + ] + show_output_outputs = [ + (target, os.path.join(build_report.root, output)) + for target, output in targets_result.get_target_to_build_output().items() + ] + + assert show_output_outputs == build_report_outputs + + +@buck_test(inplace=True) +async def test_targets_show_output_subtargets(buck: Buck) -> None: + TARGET = "fbcode//buck2/tests/targets/rules/cxx:my_cpp1" + SUBTARGET = "compilation-database" + TARGET_WITH_SUBTARGET = ( + "fbcode//buck2/tests/targets/rules/cxx:my_cpp1[compilation-database]" + ) + + build_result = await buck.build( + TARGET_WITH_SUBTARGET, "--show-output", get_mode_from_platform() + ) + targets_result = await buck.targets( + TARGET_WITH_SUBTARGET, "--show-output", get_mode_from_platform() + ) + + build_report = build_result.get_build_report() + build_report_outputs = [ + (TARGET_WITH_SUBTARGET, str(output)) + for output in build_report.outputs_for_target(TARGET, SUBTARGET) + ] + show_output_outputs = [ + (target, os.path.join(build_report.root, output)) + for target, output in targets_result.get_target_to_build_output().items() + ] + + assert show_output_outputs == build_report_outputs + + +@buck_test(inplace=True) +async def test_targets_show_full_output(buck: Buck) -> None: + for target in [ + "fbcode//buck2/tests/targets/rules/genrule:executable_helper", + "fbcode//buck2/tests/targets/rules/export_file:exported.txt", + ]: + build_result = await buck.build(target, "--show-full-output") + targets_result = await buck.targets(target, "--show-full-output") + + build_report = build_result.get_build_report() + build_report_outputs = [ + (target, str(output)) for output in build_report.outputs_for_target(target) + ] + show_output_outputs = [ + (target, os.path.join(build_report.root, output)) + for target, output in targets_result.get_target_to_build_output().items() + ] + + assert show_output_outputs == build_report_outputs diff --git a/tests/e2e/test/BUCK b/tests/e2e/test/BUCK new file mode 100644 index 0000000000000..31a8236b267da --- /dev/null +++ b/tests/e2e/test/BUCK @@ -0,0 +1,55 @@ +load("@fbcode//buck2/tests:buck_e2e.bzl", "buck2_e2e_test") + +oncall("build_infra") + +buck2_e2e_test( + name = "test_test_inplace", + srcs = ["test_test_inplace.py"], + data = "//buck2/tests/targets:isolated_targets", + test_with_deployed_buck2 = True, + use_compiled_buck2_client_and_tpx = True, +) + +buck2_e2e_test( + name = "test_test_coverage", + srcs = glob([ + "test_test_coverage/**/*.py", + ]), + skip_for_os = [ + "darwin", + "windows", + ], + test_with_deployed_buck2 = True, + use_compiled_buck2_client_and_tpx = True, +) + +buck2_e2e_test( + name = "test_test_artifacts", + srcs = ["test_test_artifacts.py"], + env = { + "TESTX_BIN": "$(exe_target fbsource//xplat/tools:testx)", + }, + skip_for_os = [ + "darwin", + "windows", + ], + test_with_deployed_buck2 = True, + use_compiled_buck2_client_and_tpx = True, + deps = [ + "//buck2/tests/e2e_util:utils", + "//remote_execution/client_lib/wrappers/python:py_re_client_lib", + "//remote_execution/lib/if:common-py3-types", + "//remote_execution/lib/if/client_lib:re-client-lib-if-py3-types", + "//tae/testx:py", + ], +) + +buck2_e2e_test( + name = "test_testname_formatting", + srcs = ["test_testname_formatting.py"], + test_with_deployed_buck2 = True, + use_compiled_buck2_client_and_tpx = True, + deps = [ + "//remote_execution/client_lib/wrappers/python:py_re_client_lib", + ], +) diff --git a/tests/e2e/test/test_test_artifacts.py b/tests/e2e/test/test_test_artifacts.py new file mode 100644 index 0000000000000..a776ad5b16ecf --- /dev/null +++ b/tests/e2e/test/test_test_artifacts.py @@ -0,0 +1,165 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os +import re +import typing +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test +from buck2.tests.e2e_util.helper.utils import filter_events +from facebook.remote_execution.common import thrift_types, types +from facebook.remote_execution.re_client_lib_if.client.types import ( + EmbeddedCASDaemonClientCfg, +) +from facebook.remote_execution.re_client_lib_if.remote_execution_metadata.types import ( + RemoteExecutionMetadata, +) +from remote_execution.client_lib.wrappers.python.re_client import ( + PyREClientParams, + REClient, +) + +from tae.testx.py.testx_helpers import TestXClient + + +ARTIFACTS_DIR_NAME = "artifacts_directory" +ANNOTATIONS_DIR_NAME = "artifact_annotations_directory" +TPX_EXEC_DIR = "tpx_execution_dir" +USE_CASE_ID = "tpx-default" +EMBEDDED_CAS_NAME = "tpx" + + +@buck_test(inplace=True) +async def test_produce_artifacts(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python_test:test_produce_artifacts", + ) + + +@buck_test(inplace=True) +async def test_remote_artifact_directory_is_materialized_by_default(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python_test:test_produce_artifacts", + "--remote-only", + ) + + materialized_paths = await filter_events( + buck, "Event", "data", "SpanEnd", "data", "Materialization", "path" + ) + + assert_has_dir(ARTIFACTS_DIR_NAME, materialized_paths) + assert_has_dir(ANNOTATIONS_DIR_NAME, materialized_paths) + assert_has_dir(TPX_EXEC_DIR, materialized_paths) + + +@buck_test(inplace=True) +async def test_remote_artifact_directory_is_not_materialized_when_cas_support_enabled( + buck: Buck, +) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python_test:test_produce_artifacts_in_cas", + "--remote-only", + ) + + materialized_paths = await filter_events( + buck, "Event", "data", "SpanEnd", "data", "Materialization", "path" + ) + assert_has_no_dir(ARTIFACTS_DIR_NAME, materialized_paths) + assert_has_dir(ANNOTATIONS_DIR_NAME, materialized_paths) + assert_has_dir(TPX_EXEC_DIR, materialized_paths) + + +@buck_test(inplace=True) +async def test_remote_artifact_has_cas_handle_with_right_ttl(buck: Buck) -> None: + output = await buck.test( + "fbcode//buck2/tests/targets/rules/python_test:test_produce_artifacts_in_cas", + "--remote-only", + ) + + run_id = extract_test_run_id(output.stderr) + tests_artifact_data = get_testx_client().artifacts_list(run_id) + re_metadata = RemoteExecutionMetadata( + use_case_id=USE_CASE_ID, + ) + re_client = get_re_client(USE_CASE_ID) + + expected_ttl = 13 * 86400 # TTL should be more than 13 days (in seconds) + + for data in tests_artifact_data: + for artifact in data.artifacts: + assert artifact.handle.cas_digest + tdigest = py3_to_python_tdigest(artifact.handle.cas_digest) + digest_with_ttl = await get_digests_ttl(re_client, re_metadata, [tdigest]) + assert len(digest_with_ttl) == 1 + assert digest_with_ttl[0].ttl >= expected_ttl + + +######### +# Helpers +######### + + +def has_dir(dir_name: str, paths: typing.List[str]) -> bool: + return any(dir_name in path for path in paths) + + +def assert_has_dir(dir_name: str, paths: typing.List[str]) -> None: + assert has_dir(dir_name, paths), f"Directory {dir_name} not found in {paths}" + + +def assert_has_no_dir(dir_name: str, paths: typing.List[str]) -> None: + assert not has_dir(dir_name, paths), f"Directory {dir_name} found in {paths}" + + +def get_path_from_env(envvar: str) -> Path: + path = os.getenv(envvar) + assert path + return Path(path) + + +def py3_to_python_tdigest(testx_cas_digest: thrift_types.TDigest) -> types.TDigest: + return types.TDigest( + hash=testx_cas_digest.hash, + size_in_bytes=testx_cas_digest.size_in_bytes, + ) + + +def get_testx_binary() -> Path: + return get_path_from_env("TESTX_BIN") + + +def get_testx_client() -> TestXClient: + return TestXClient(binary=get_testx_binary(), caller="buck-e2e") + + +def get_re_client(use_case: str) -> REClient: + client_params = PyREClientParams().with_embedded_cas( + EmbeddedCASDaemonClientCfg(name=EMBEDDED_CAS_NAME) + ) + return REClient(use_case, re_client_params=client_params) + + +async def get_digests_ttl( + re_client: REClient, + metadata: RemoteExecutionMetadata, + digests: typing.Sequence[types.TDigest], +) -> typing.Sequence[types.TDigestWithTtl]: + response = await re_client.get_digests_ttl(metadata, digests) + assert response + return response.digests_with_ttl + + +def extract_test_run_id(haystack: str) -> int: + matches = re.findall("^.*Test session:.*/(\\d+).*$", haystack, re.MULTILINE) + match = next(iter(matches), None) + assert match, f"Test run ID not found in {haystack}" + return int(match) diff --git a/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_cpp_cxx_only.py b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_cpp_cxx_only.py new file mode 100644 index 0000000000000..d57af225c2603 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_cpp_cxx_only.py @@ -0,0 +1,480 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from .test_test_coverage_utils import collect_coverage_for + + +@buck_test(inplace=True) +async def test_cpp_test_coverage(buck: Buck, tmp_path: Path) -> None: + coverage_file = tmp_path / "coverage.txt" + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + "--", + "--collect-coverage", + f"--coverage-output={coverage_file}", + ) + paths = [] + with open(coverage_file) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + + assert "fbcode/buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" in paths, str(paths) + assert "fbcode/common/gtest/LightMain.cpp" in paths, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_outside_target( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, tmp_path, "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", ["folly"] + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/folly")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/folly")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_of_target( + buck: Buck, tmp_path: Path +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + ["buck2/tests"], + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/buck2/tests")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/buck2/tests")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_of_target_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + filter=["buck2/tests"], + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/buck2/tests")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/buck2/tests")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_in_link_group_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + filter=["folly"], + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/folly")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/folly")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file_of_target_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + source_name = "buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + filter=[source_name], + ) + + fbcode_source_name = f"fbcode/{source_name}" + expected_paths = [p for p in paths if p == fbcode_source_name] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if p != fbcode_source_name] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_source_file_in_link_group_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + source_name = "folly/String.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + filter=[source_name], + ) + + fbcode_source_name = f"fbcode/{source_name}" + expected_paths = [p for p in paths if p == fbcode_source_name] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [ + p for p in paths if not p == fbcode_source_name and not p.endswith(".h") + ] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_header_file_in_link_group_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + header_name = "testing_frameworks/code_coverage/playground/link_groups/LibraryRightRightOnlyUsedHere.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground/link_groups:test_with_link_groups", + mode="@fbcode//mode/dev-lg", + filter=[header_name], + ) + + assert len(paths) == 5, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightRight.cpp" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRight.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.h" + in paths + ) + # because it has executable code used by LibraryRight.cpp + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + in paths + ) + # because they are not in the transitive rdeps of the cxx_library that has the header + # that was selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeft.cpp" + not in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryLeft.cpp" + not in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_header_file_defined_in_one_link_group_and_used_in_another( + buck: Buck, + tmp_path: Path, +) -> None: + header_name = "testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground/link_groups:test_with_header_used_in_different_link_group", + mode="@fbcode//mode/dev-lg", + filter=[header_name], + ) + + assert len(paths) == 5, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeft.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRight.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_header_file_defined_in_one_link_group_and_used_in_test_binary_link_group( + buck: Buck, + tmp_path: Path, +) -> None: + header_name = "testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground/link_groups:test_with_header_used_in_different_link_group", + mode="@fbcode//mode/dev-lg", + filter=[header_name], + ) + + assert len(paths) == 5, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeft.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRight.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file(buck: Buck, tmp_path: Path) -> None: + source_name = "buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + [source_name], + ) + + assert len(paths) == 1, str(paths) + assert paths[0] == f"fbcode/{source_name}" + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_test_binary_header_file( + buck: Buck, tmp_path: Path +) -> None: + header_name = "testing_frameworks/code_coverage/playground/Test.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test", + [header_name], + ) + + assert len(paths) == 2, str(paths) + header_name = f"fbcode/{header_name}" + assert header_name in paths + source_name = "fbcode/testing_frameworks/code_coverage/playground/Test.cpp" + assert source_name in paths + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_library_header_file( + buck: Buck, tmp_path: Path +) -> None: + header_name = "testing_frameworks/code_coverage/playground/ThirdLevelDep.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test", + [header_name], + ) + + assert len(paths) == 9, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDep.cpp" in paths + ) + # because it's header file selected for coverage and it has executable code + assert "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDep.h" in paths + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDepPrivate.h" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepIncludesThirdLevelDep.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepDoesNotIncludeThirdLevelDep.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/FirstLevelDep.cpp" in paths + ) + assert "fbcode/testing_frameworks/code_coverage/playground/Test.cpp" in paths + # because a cpp file (FirstLvelDep.cpp) that included it called a template function + # defined inside it + assert "fbcode/testing_frameworks/code_coverage/playground/FirstLevelDep.h" in paths + assert "fbcode/testing_frameworks/code_coverage/playground/Test.h" in paths + + # because they are not in the transitive rdeps of the cxx_library that has the header + # that was selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepWithoutDeps.cpp" + not in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepWithoutDeps.h" + not in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_library_private_header_file( + buck: Buck, tmp_path: Path +) -> None: + private_header_name = ( + "testing_frameworks/code_coverage/playground/ThirdLevelDepPrivate.h" + ) + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test", + [private_header_name], + ) + + assert len(paths) == 3, str(paths) + assert f"fbcode/{private_header_name}" in paths + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDep.cpp" in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDepPrivate.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_header_file_in_headers_only_library( + buck: Buck, tmp_path: Path +) -> None: + header_name = "testing_frameworks/code_coverage/playground/LibraryWithOnlyHeaders.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test_with_dep_with_only_headers", + [header_name], + ) + + assert len(paths) == 3, str(paths) + assert f"fbcode/{header_name}" in paths + assert ( + "fbcode/testing_frameworks/code_coverage/playground/TestWithDepWithOnlyHeaders.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/TestWithDepWithOnlyHeaders.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file_with_opt_mode( + buck: Buck, + tmp_path: Path, +) -> None: + source_name = "buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + target="fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + filter=[source_name], + mode="@fbcode//mode/opt", + ) + + assert len(paths) == 1, str(paths) + assert paths[0] == f"fbcode/{source_name}" + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file_and_path( + buck: Buck, tmp_path: Path +) -> None: + source_name = "buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + [source_name, "folly"], + ) + + folly_paths = [p for p in paths if p.startswith("fbcode/folly")] + assert len(folly_paths) > 0, f"expected to find some folly sources in {paths}" + + source_path = f"fbcode/{source_name}" + source_paths = [p for p in paths if p == source_path] + assert len(source_paths) == 1, f"expected to find {source_path} in {paths}" + + unexpected_paths = [ + p for p in paths if p != source_path and not p.startswith("fbcode/folly") + ] + assert ( + len(unexpected_paths) == 0 + ), f"Only coverage for the test source file and files under fbcode/folly should have been collected, but got {unexpected_paths}" + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_xplat_filter_by_file_path( + buck: Buck, + tmp_path: Path, +) -> None: + file_to_collect_coverage = "xplat/testinfra/playground/cpp/ExampleTest.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbsource//xplat/testinfra/playground/cpp:example_testFbcode", + filter=[file_to_collect_coverage], + ) + + assert set(paths) == { + file_to_collect_coverage, + }, f"Should collect coverage only for {file_to_collect_coverage} but got coverage for {paths}" diff --git a/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_python_cxx_only.py b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_python_cxx_only.py new file mode 100644 index 0000000000000..7b28ad786268c --- /dev/null +++ b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_python_cxx_only.py @@ -0,0 +1,204 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import tempfile +from dataclasses import dataclass, field +from typing import List, Optional + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_python_coverage(buck: Buck) -> None: + with tempfile.NamedTemporaryFile("w") as covfile: + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/python/coverage:test", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert "fbcode/buck2/tests/targets/rules/python/coverage/lib.py" in paths, str( + paths + ) + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_folder(buck: Buck) -> None: + folder_to_collect = "buck2/tests/targets/rules/python/coverage" + with tempfile.NamedTemporaryFile("w") as covfile: + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/python/coverage:test", + "-c", + f"fbcode.cxx_coverage_only={folder_to_collect}", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert set(paths) == { + f"fbcode/{folder_to_collect}/lib.py", + f"fbcode/{folder_to_collect}/test.py", + }, f"Only folder fbcode/{folder_to_collect} should have coverage, instead got coverage for {str(paths)}" + + +@dataclass +class PythonCoverageResult: + using_new_testpilot_interface_paths: List[str] = field(default_factory=list) + + +async def python_collect_coverage_for( + buck: Buck, + filter: List[str], + target: str = "fbcode//buck2/tests/targets/rules/python/coverage:test", + mode: Optional[str] = None, +) -> PythonCoverageResult: + filter_str = " ".join(filter) + result = PythonCoverageResult() + with tempfile.NamedTemporaryFile("w") as covfile: + buck_args = [mode] if mode else [] + buck_args.extend( + [ + "--config", + "fbcode.coverage_selective=true", + "--config", + f"fbcode.cxx_coverage_only={filter_str}", + target, + "fbcode//buck2/tests/targets/rules/python/coverage:test", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ] + ) + await buck.test(*buck_args) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + + result.using_new_testpilot_interface_paths = paths + + return result + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file(buck: Buck) -> None: + file_to_collect_coverage = "buck2/tests/targets/rules/python/coverage/lib.py" + result = await python_collect_coverage_for(buck, [file_to_collect_coverage]) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert set(result.using_new_testpilot_interface_paths) == { + fbcode_filename + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_with_base_module_remap( + buck: Buck, +) -> None: + file_to_collect_coverage = "buck2/tests/targets/rules/python/coverage/lib.py" + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_using_base_module_remapped_lib", + filter=[file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert set(result.using_new_testpilot_interface_paths) == { + fbcode_filename + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_with_opt_mode(buck: Buck) -> None: + file_to_collect_coverage = "buck2/tests/targets/rules/python/coverage/lib.py" + result = await python_collect_coverage_for( + buck, + filter=[file_to_collect_coverage], + mode="@fbcode//mode/opt", + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert set(result.using_new_testpilot_interface_paths) == { + fbcode_filename + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_on_cinder_target(buck: Buck) -> None: + file_to_collect_coverage = "buck2/tests/targets/rules/python/coverage/test.py" + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:simple_cinder_unittest", + filter=[file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert set(result.using_new_testpilot_interface_paths) == { + fbcode_filename + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_source_file_on_cpp_dep(buck: Buck) -> None: + file_to_collect_coverage = "buck2/tests/targets/rules/python/coverage/cpp_lib.cpp" + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_with_cpp_dep", + filter=[file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert set(result.using_new_testpilot_interface_paths) == { + fbcode_filename + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_header_file_on_cpp_dep(buck: Buck) -> None: + header_file = "buck2/tests/targets/rules/python/coverage/cpp_lib.h" + source_file = header_file.replace(".h", ".cpp") + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_with_cpp_dep", + filter=[header_file], + ) + + fbcode_filename = f"fbcode/{source_file}" + assert set(result.using_new_testpilot_interface_paths) == { + fbcode_filename + }, f"Only {source_file} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_on_ligen_cpp_dep(buck: Buck) -> None: + file_to_collect_coverage = ( + "buck2/tests/targets/rules/python/coverage/adder_ligen.cpp" + ) + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_with_ligen_cpp_dep", + filter=[file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert set(result.using_new_testpilot_interface_paths) == { + fbcode_filename + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" diff --git a/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_rust_cxx_only.py b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_rust_cxx_only.py new file mode 100644 index 0000000000000..d42373691b749 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_rust_cxx_only.py @@ -0,0 +1,182 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from .test_test_coverage_utils import collect_coverage_for + + +@buck_test(inplace=True) +async def test_rust_test_coverage(buck: Buck, tmp_path: Path) -> None: + coverage_file = tmp_path / "coverage.txt" + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/rust:tests_pass", + "--", + "--collect-coverage", + f"--coverage-output={coverage_file}", + ) + paths = [] + with open(coverage_file) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert "fbcode/buck2/tests/targets/rules/rust/tests_pass.rs" in paths, str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_filtering_by_path_of_target( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/rust:tests_pass", + ["buck2/tests"], + ) + + unexpected_paths = [p for p in paths if not p.startswith("fbcode/buck2/tests")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_rust_library_filtering_by_path_outside_of_target( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "buck2/tests/targets/rules/rust/coverage/test_with_rust_library_outside_targets_path:test", + ["testing_frameworks"], + ) + + fbcode_filename = "fbcode/testing_frameworks/code_coverage/adder.rs" + expected_paths = [p for p in paths if p == fbcode_filename] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if p != fbcode_filename] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_cxx( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "testing_frameworks/code_coverage/rust/Adder.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_cxx_cpp_dep", + [file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert paths == [fbcode_filename], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_cxx_through_rust_library( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "testing_frameworks/code_coverage/rust/Adder.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_cxx_rust_library_dep", + [file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert paths == [fbcode_filename], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_cxx_on_autogenerated_library_unittests( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "testing_frameworks/code_coverage/rust/Adder.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_in_library-unittest", + [file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert paths == [fbcode_filename], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_bindgen_rust_library( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "testing_frameworks/code_coverage/rust/AdderC.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_bindgen_rust_library_dep", + [file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert paths == [fbcode_filename], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_ligen_cpp_dep( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "testing_frameworks/code_coverage/rust/AdderLigen.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:test_with_ligen_cpp_dep", + [file_to_collect_coverage], + ) + + fbcode_filename = f"fbcode/{file_to_collect_coverage}" + assert paths == [fbcode_filename], str(paths) + + +def any_item_matches(items: List[str], regex: str) -> bool: + for i in items: + if re.fullmatch(regex, i): + return True + return False + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_header_with_cxx( + buck: Buck, tmp_path: Path +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_code_in_cpp_header", + ["testing_frameworks/code_coverage/rust/AdderWithHeaderCode.h"], + ) + + assert len(paths) == 3, str(paths) + assert ( + "fbcode/testing_frameworks/code_coverage/rust/AdderWithHeaderCode.cpp" in paths + ), str(paths) + assert ( + "fbcode/testing_frameworks/code_coverage/rust/AdderWithHeaderCode.h" in paths + ), str(paths) + assert any_item_matches( + paths, + r"fbcode/[a-z0-9]+/testing_frameworks/code_coverage/rust/__tests_with_code_in_cpp_header-bridge_generated.cc__/out/generated.cc", + ), str(paths) diff --git a/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_unsupported_cxx_only.py b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_unsupported_cxx_only.py new file mode 100644 index 0000000000000..6f0d9462dbe09 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_unsupported_cxx_only.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +from .test_test_coverage_utils import collect_coverage_for + + +@buck_test(inplace=True) +async def test_go_test_selective_coverage_doesnt_produce_coverage(buck: Buck) -> None: + await expect_failure( + buck.test( + *[ + "--config", + "fbcode.coverage_selective=true", + "--config", + "fbcode.cxx_coverage_only=testing_frameworks/code_coverage/go/add.go", + "fbcode//testing_frameworks/code_coverage/go:test", + "--", + "--collect-coverage", + ] + ), + stderr_regex=r"""2 TESTS FATALS + ⚠ testing_frameworks/code_coverage/go:test - Test(Add|Sub) + ⚠ testing_frameworks/code_coverage/go:test - Test(Add|Sub)""", + ) + + +@buck_test(inplace=True) +async def test_junit_test_selective_coverage_doesnt_produce_coverage( + buck: Buck, tmp_path: Path +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/junit/com/facebook/testing_frameworks:test", + [ + "testing_frameworks/code_coverage/junit/com/facebook/testing_frameworks/AddTest.java" + ], + ) + + assert len(paths) == 0, str(paths) diff --git a/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_utils.py b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_utils.py new file mode 100644 index 0000000000000..e1094c131726c --- /dev/null +++ b/tests/e2e/test/test_test_coverage/cxx_coverage_only/test_test_coverage_utils.py @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from pathlib import Path +from typing import List, Optional + +from buck2.tests.e2e_util.api.buck import Buck + + +async def collect_coverage_for( + buck: Buck, + tmp_path: Path, + target: str, + filter: List[str], + mode: Optional[str] = None, +) -> List[str]: + coverage_file = tmp_path / "coverage.txt" + filter_str = " ".join(filter) + buck_args = [] + if mode is not None: + buck_args.append(mode) + buck_args.extend( + [ + "--config", + "fbcode.coverage_selective=true", + "--config", + f"fbcode.cxx_coverage_only={filter_str}", + target, + "--", + "--collect-coverage", + f"--coverage-output={coverage_file}", + ] + ) + await buck.test(*buck_args) + paths = [] + with open(coverage_file) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + + return list(set(paths)) diff --git a/tests/e2e/test/test_test_coverage/test_test_coverage_apple.py b/tests/e2e/test/test_test_coverage/test_test_coverage_apple.py new file mode 100644 index 0000000000000..d3d27bc3b3a05 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/test_test_coverage_apple.py @@ -0,0 +1,57 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import tempfile + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_apple_coverage(buck: Buck) -> None: + with tempfile.NamedTemporaryFile("w") as covfile: + await buck.test( + "-c", + "xplat.available_platforms=APPLE,CXX", + "-c", + "code_coverage.enable=all", + "fbsource//fbobjc/Samples/TestInfra/TpxUnitTests:TpxUnitTests", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert ( + "fbobjc/Samples/TestInfra/TpxUnitTests/TpxUnitTests/TpxUnitTests.m" in paths + ), str(paths) + + +@buck_test(inplace=True) +async def test_apple_coverage_xplat(buck: Buck) -> None: + with tempfile.NamedTemporaryFile("w") as covfile: + await buck.test( + "-c", + "xplat.available_platforms=APPLE,CXX", + "-c", + "code_coverage.enable=all", + "fbsource//xplat/testinfra/playground/cpp:example_testApple", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert "xplat/testinfra/playground/cpp/ExampleTest.cpp" in paths, str(paths) diff --git a/tests/e2e/test/test_test_coverage/test_test_coverage_cpp.py b/tests/e2e/test/test_test_coverage/test_test_coverage_cpp.py new file mode 100644 index 0000000000000..69a8e048eddd0 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/test_test_coverage_cpp.py @@ -0,0 +1,498 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from .test_test_coverage_utils import collect_coverage_for + + +@buck_test(inplace=True) +async def test_cpp_test_coverage(buck: Buck, tmp_path: Path) -> None: + coverage_file = tmp_path / "coverage.txt" + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + "--", + "--collect-coverage", + f"--coverage-output={coverage_file}", + ) + paths = [] + with open(coverage_file) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + + assert "fbcode/buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" in paths, str(paths) + assert "fbcode/common/gtest/LightMain.cpp" in paths, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_outside_target( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + folder_filter=["fbcode/folly"], + file_filter=[], + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/folly")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/folly")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_of_target( + buck: Buck, tmp_path: Path +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + folder_filter=["fbcode/buck2/tests"], + file_filter=[], + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/buck2/tests")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/buck2/tests")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_of_target_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + folder_filter=["fbcode/buck2/tests"], + file_filter=[], + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/buck2/tests")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/buck2/tests")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_path_in_link_group_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + folder_filter=["fbcode/folly"], + file_filter=[], + ) + + expected_paths = [p for p in paths if p.startswith("fbcode/folly")] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if not p.startswith("fbcode/folly")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file_of_target_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + source_name = "fbcode/buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + folder_filter=[], + file_filter=[source_name], + ) + + expected_paths = [p for p in paths if p == source_name] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if p != source_name] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_source_file_in_link_group_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + source_name = "fbcode/folly/String.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + mode="@fbcode//mode/dev-lg", + folder_filter=[], + file_filter=[source_name], + ) + + expected_paths = [p for p in paths if p == source_name] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [ + p for p in paths if not p == source_name and not p.endswith(".h") + ] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_header_file_in_link_group_with_dev_lg( + buck: Buck, + tmp_path: Path, +) -> None: + header_name = "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightRightOnlyUsedHere.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground/link_groups:test_with_link_groups", + mode="@fbcode//mode/dev-lg", + folder_filter=[], + file_filter=[header_name], + ) + + assert len(paths) == 5, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightRight.cpp" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRight.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.h" + in paths + ) + # because it has executable code used by LibraryRight.cpp + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + in paths + ) + # because they are not in the transitive rdeps of the cxx_library that has the header + # that was selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeft.cpp" + not in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryLeft.cpp" + not in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_header_file_defined_in_one_link_group_and_used_in_another( + buck: Buck, + tmp_path: Path, +) -> None: + header_name = "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground/link_groups:test_with_header_used_in_different_link_group", + mode="@fbcode//mode/dev-lg", + folder_filter=[], + file_filter=[header_name], + ) + + assert len(paths) == 5, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeft.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRight.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_header_file_defined_in_one_link_group_and_used_in_test_binary_link_group( + buck: Buck, + tmp_path: Path, +) -> None: + header_name = "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground/link_groups:test_with_header_used_in_different_link_group", + mode="@fbcode//mode/dev-lg", + folder_filter=[], + file_filter=[header_name], + ) + + assert len(paths) == 5, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeft.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRightLeftUsedInOtherLinkGroup.h" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/LibraryRight.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/link_groups/TestWithLinkGroups.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file(buck: Buck, tmp_path: Path) -> None: + source_name = "fbcode/buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + folder_filter=[], + file_filter=[source_name], + ) + + assert len(paths) == 1, str(paths) + assert paths[0] == source_name + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_test_binary_header_file( + buck: Buck, tmp_path: Path +) -> None: + header_name = "fbcode/testing_frameworks/code_coverage/playground/Test.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test", + folder_filter=[], + file_filter=[header_name], + ) + + assert len(paths) == 2, str(paths) + assert header_name in paths + source_name = "fbcode/testing_frameworks/code_coverage/playground/Test.cpp" + assert source_name in paths + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_library_header_file( + buck: Buck, tmp_path: Path +) -> None: + header_name = "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDep.h" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test", + folder_filter=[], + file_filter=[header_name], + ) + + assert len(paths) == 9, str(paths) + # because it belongs to a target that has a header file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDep.cpp" in paths + ) + # because it's header file selected for coverage and it has executable code + assert "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDep.h" in paths + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDepPrivate.h" + in paths + ) + # because it belongs to a target that has a dependency that contains a header + # file selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepIncludesThirdLevelDep.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepDoesNotIncludeThirdLevelDep.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/FirstLevelDep.cpp" in paths + ) + assert "fbcode/testing_frameworks/code_coverage/playground/Test.cpp" in paths + # because a cpp file (FirstLvelDep.cpp) that included it called a template function + # defined inside it + assert "fbcode/testing_frameworks/code_coverage/playground/FirstLevelDep.h" in paths + assert "fbcode/testing_frameworks/code_coverage/playground/Test.h" in paths + + # because they are not in the transitive rdeps of the cxx_library that has the header + # that was selected for coverage + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepWithoutDeps.cpp" + not in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/SecondLevelDepWithoutDeps.h" + not in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_library_private_header_file( + buck: Buck, tmp_path: Path +) -> None: + private_header_name = ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDepPrivate.h" + ) + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test", + folder_filter=[], + file_filter=[private_header_name], + ) + + assert len(paths) == 3, str(paths) + assert private_header_name in paths + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDep.cpp" in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/ThirdLevelDepPrivate.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_when_filter_by_header_file_in_headers_only_library( + buck: Buck, tmp_path: Path +) -> None: + header_name = ( + "fbcode/testing_frameworks/code_coverage/playground/LibraryWithOnlyHeaders.h" + ) + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/playground:test_with_dep_with_only_headers", + folder_filter=[], + file_filter=[header_name], + ) + + assert len(paths) == 3, str(paths) + assert header_name in paths + assert ( + "fbcode/testing_frameworks/code_coverage/playground/TestWithDepWithOnlyHeaders.cpp" + in paths + ) + assert ( + "fbcode/testing_frameworks/code_coverage/playground/TestWithDepWithOnlyHeaders.h" + in paths + ) + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file_with_opt_mode( + buck: Buck, + tmp_path: Path, +) -> None: + source_name = "fbcode/buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + target="fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + folder_filter=[], + file_filter=[source_name], + mode="@fbcode//mode/opt", + ) + + assert len(paths) == 1, str(paths) + assert paths[0] == source_name + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_filter_by_file_and_path( + buck: Buck, tmp_path: Path +) -> None: + source_name = "fbcode/buck2/tests/targets/rules/cxx/cpp_test_pass.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", + folder_filter=["folly"], + file_filter=[source_name], + ) + + folly_paths = [p for p in paths if p.startswith("fbcode/folly")] + assert len(folly_paths) > 0, f"expected to find some folly sources in {paths}" + + source_paths = [p for p in paths if p == source_name] + assert len(source_paths) == 1, f"expected to find {source_name} in {paths}" + + unexpected_paths = [ + p for p in paths if p != source_name and not p.startswith("fbcode/folly") + ] + assert ( + len(unexpected_paths) == 0 + ), f"Only coverage for the test source file and files under fbcode/folly should have been collected, but got {unexpected_paths}" + + +@buck_test(inplace=True) +async def test_cpp_test_coverage_xplat_filter_by_file_path( + buck: Buck, + tmp_path: Path, +) -> None: + file_to_collect_coverage = "xplat/testinfra/playground/cpp/ExampleTest.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "fbsource//xplat/testinfra/playground/cpp:example_testFbcode", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert set(paths) == { + file_to_collect_coverage, + }, f"Should collect coverage only for {file_to_collect_coverage} but got coverage for {paths}" diff --git a/tests/e2e/test/test_test_coverage/test_test_coverage_flags.py b/tests/e2e/test/test_test_coverage/test_test_coverage_flags.py new file mode 100644 index 0000000000000..4e489ca797a01 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/test_test_coverage_flags.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_conflicting_fbcode_coverage_flags_fail(buck: Buck) -> None: + await expect_failure( + buck.test( + *[ + "--config", + "fbcode.coverage=true", + "--config", + "fbcode.coverage_selective=true", + "fbcode//testing_frameworks/code_coverage/playground:test", + ] + ), + stderr_regex=r"""fbcode.coverage and fbcode.coverage_selective are both true. Pick one.""", + ) + + +@buck_test(inplace=True) +async def test_fbcode_coverage_selective_require_filters(buck: Buck) -> None: + await expect_failure( + buck.test( + *[ + "--config", + "fbcode.coverage_selective=true", + "fbcode//testing_frameworks/code_coverage/playground:test", + ] + ), + stderr_regex=r"""fbcode.coverage_selective=true with no filters""", + ) diff --git a/tests/e2e/test/test_test_coverage/test_test_coverage_python.py b/tests/e2e/test/test_test_coverage/test_test_coverage_python.py new file mode 100644 index 0000000000000..394bebdc71488 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/test_test_coverage_python.py @@ -0,0 +1,213 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import tempfile +from dataclasses import dataclass, field +from typing import List, Optional + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_python_coverage(buck: Buck) -> None: + with tempfile.NamedTemporaryFile("w") as covfile: + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/python/coverage:test", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert "fbcode/buck2/tests/targets/rules/python/coverage/lib.py" in paths, str( + paths + ) + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_folder(buck: Buck) -> None: + folder_to_collect = "fbcode/buck2/tests/targets/rules/python/coverage" + with tempfile.NamedTemporaryFile("w") as covfile: + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/python/coverage:test", + "-c", + f"code_coverage.folder_path_filter={folder_to_collect}", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert set(paths) == { + f"{folder_to_collect}/lib.py", + f"{folder_to_collect}/test.py", + }, f"Only folder {folder_to_collect} should have coverage, instead got coverage for {str(paths)}" + + +@dataclass +class PythonCoverageResult: + using_new_testpilot_interface_paths: List[str] = field(default_factory=list) + + +async def python_collect_coverage_for( + buck: Buck, + folder_filter: List[str], + file_filter: List[str], + target: str = "fbcode//buck2/tests/targets/rules/python/coverage:test", + mode: Optional[str] = None, +) -> PythonCoverageResult: + folder_filter_str = ":".join(folder_filter) + file_filter_str = ":".join(file_filter) + result = PythonCoverageResult() + with tempfile.NamedTemporaryFile("w") as covfile: + buck_args = [mode] if mode else [] + buck_args.extend( + [ + "--config", + "code_coverage.enable=filtered", + "--config", + f"code_coverage.folder_path_filter={folder_filter_str}", + "--config", + f"code_coverage.file_path_filter={file_filter_str}", + target, + "fbcode//buck2/tests/targets/rules/python/coverage:test", + "--", + "--collect-coverage", + f"--coverage-output={covfile.name}", + ] + ) + await buck.test(*buck_args) + paths = [] + with open(covfile.name) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + + result.using_new_testpilot_interface_paths = paths + + return result + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file(buck: Buck) -> None: + file_to_collect_coverage = "fbcode/buck2/tests/targets/rules/python/coverage/lib.py" + result = await python_collect_coverage_for( + buck, folder_filter=[], file_filter=[file_to_collect_coverage] + ) + + assert set(result.using_new_testpilot_interface_paths) == { + file_to_collect_coverage + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_with_base_module_remap( + buck: Buck, +) -> None: + file_to_collect_coverage = "fbcode/buck2/tests/targets/rules/python/coverage/lib.py" + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_using_base_module_remapped_lib", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert set(result.using_new_testpilot_interface_paths) == { + file_to_collect_coverage + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_with_opt_mode(buck: Buck) -> None: + file_to_collect_coverage = "fbcode/buck2/tests/targets/rules/python/coverage/lib.py" + result = await python_collect_coverage_for( + buck, + folder_filter=[], + file_filter=[file_to_collect_coverage], + mode="@fbcode//mode/opt", + ) + + assert set(result.using_new_testpilot_interface_paths) == { + file_to_collect_coverage + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_on_cinder_target(buck: Buck) -> None: + file_to_collect_coverage = ( + "fbcode/buck2/tests/targets/rules/python/coverage/test.py" + ) + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:simple_cinder_unittest", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert set(result.using_new_testpilot_interface_paths) == { + file_to_collect_coverage + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_source_file_on_cpp_dep(buck: Buck) -> None: + file_to_collect_coverage = ( + "fbcode/buck2/tests/targets/rules/python/coverage/cpp_lib.cpp" + ) + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_with_cpp_dep", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert set(result.using_new_testpilot_interface_paths) == { + file_to_collect_coverage + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_header_file_on_cpp_dep(buck: Buck) -> None: + header_file = "fbcode/buck2/tests/targets/rules/python/coverage/cpp_lib.h" + source_file = header_file.replace(".h", ".cpp") + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_with_cpp_dep", + folder_filter=[], + file_filter=[header_file], + ) + + assert set(result.using_new_testpilot_interface_paths) == { + source_file + }, f"Only {source_file} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" + + +@buck_test(inplace=True) +async def test_python_coverage_filtering_by_file_on_ligen_cpp_dep(buck: Buck) -> None: + file_to_collect_coverage = ( + "fbcode/buck2/tests/targets/rules/python/coverage/adder_ligen.cpp" + ) + result = await python_collect_coverage_for( + buck, + target="fbcode//buck2/tests/targets/rules/python/coverage:test_with_ligen_cpp_dep", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert set(result.using_new_testpilot_interface_paths) == { + file_to_collect_coverage + }, f"Only {file_to_collect_coverage} should have coverage, instead got coverage for {str(result.using_new_testpilot_interface_paths)}" diff --git a/tests/e2e/test/test_test_coverage/test_test_coverage_rust.py b/tests/e2e/test/test_test_coverage/test_test_coverage_rust.py new file mode 100644 index 0000000000000..7fb42306f57e4 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/test_test_coverage_rust.py @@ -0,0 +1,189 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import re +from pathlib import Path +from typing import List + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + +from .test_test_coverage_utils import collect_coverage_for + + +@buck_test(inplace=True) +async def test_rust_test_coverage(buck: Buck, tmp_path: Path) -> None: + coverage_file = tmp_path / "coverage.txt" + await buck.test( + "@fbcode//mode/dbgo-cov", + "fbcode//buck2/tests/targets/rules/rust:tests_pass", + "--", + "--collect-coverage", + f"--coverage-output={coverage_file}", + ) + paths = [] + with open(coverage_file) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + assert "fbcode/buck2/tests/targets/rules/rust/tests_pass.rs" in paths, str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_filtering_by_path_of_target( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//buck2/tests/targets/rules/rust:tests_pass", + folder_filter=["fbcode/buck2/tests"], + file_filter=[], + ) + + unexpected_paths = [p for p in paths if not p.startswith("fbcode/buck2/tests")] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_rust_library_filtering_by_path_outside_of_target( + buck: Buck, + tmp_path: Path, +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "buck2/tests/targets/rules/rust/coverage/test_with_rust_library_outside_targets_path:test", + folder_filter=["fbcode/testing_frameworks"], + file_filter=[], + ) + + fbcode_filename = "fbcode/testing_frameworks/code_coverage/adder.rs" + expected_paths = [p for p in paths if p == fbcode_filename] + assert len(expected_paths) > 0, str(paths) + unexpected_paths = [p for p in paths if p != fbcode_filename] + assert len(unexpected_paths) == 0, str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_cxx( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "fbcode/testing_frameworks/code_coverage/rust/Adder.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_cxx_cpp_dep", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert paths == [file_to_collect_coverage], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_cxx_through_rust_library( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "fbcode/testing_frameworks/code_coverage/rust/Adder.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_cxx_rust_library_dep", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert paths == [file_to_collect_coverage], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_cxx_on_autogenerated_library_unittests( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "fbcode/testing_frameworks/code_coverage/rust/Adder.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_in_library-unittest", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert paths == [file_to_collect_coverage], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_bindgen_rust_library( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = "fbcode/testing_frameworks/code_coverage/rust/AdderC.cpp" + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_bindgen_rust_library_dep", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert paths == [file_to_collect_coverage], str(paths) + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_file_with_ligen_cpp_dep( + buck: Buck, tmp_path: Path +) -> None: + file_to_collect_coverage = ( + "fbcode/testing_frameworks/code_coverage/rust/AdderLigen.cpp" + ) + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:test_with_ligen_cpp_dep", + folder_filter=[], + file_filter=[file_to_collect_coverage], + ) + + assert paths == [file_to_collect_coverage], str(paths) + + +def any_item_matches(items: List[str], regex: str) -> bool: + for i in items: + if re.fullmatch(regex, i): + return True + return False + + +@buck_test(inplace=True) +async def test_rust_test_coverage_of_cpp_file_filtering_by_header_with_cxx( + buck: Buck, tmp_path: Path +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "testing_frameworks/code_coverage/rust:tests_with_code_in_cpp_header", + folder_filter=[], + file_filter=[ + "fbcode/testing_frameworks/code_coverage/rust/AdderWithHeaderCode.h" + ], + ) + + assert len(paths) == 3, str(paths) + assert ( + "fbcode/testing_frameworks/code_coverage/rust/AdderWithHeaderCode.cpp" in paths + ), str(paths) + assert ( + "fbcode/testing_frameworks/code_coverage/rust/AdderWithHeaderCode.h" in paths + ), str(paths) + assert any_item_matches( + paths, + r"fbcode/[a-z0-9]+/testing_frameworks/code_coverage/rust/__tests_with_code_in_cpp_header-bridge_generated.cc__/out/generated.cc", + ), str(paths) diff --git a/tests/e2e/test/test_test_coverage/test_test_coverage_unsupported.py b/tests/e2e/test/test_test_coverage/test_test_coverage_unsupported.py new file mode 100644 index 0000000000000..603a258b460a2 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/test_test_coverage_unsupported.py @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + +from .test_test_coverage_utils import collect_coverage_for + + +@buck_test(inplace=True) +async def test_go_test_selective_coverage_doesnt_produce_coverage(buck: Buck) -> None: + await expect_failure( + buck.test( + *[ + "--config", + "code_coverage.enable=filtered", + "--config", + "code_coverage.file_path_filter=fbcode/testing_frameworks/code_coverage/go/add.go", + "fbcode//testing_frameworks/code_coverage/go:test", + "--", + "--collect-coverage", + ] + ), + stderr_regex=r"""2 TESTS FATALS + ⚠ testing_frameworks/code_coverage/go:test - Test(Add|Sub) + ⚠ testing_frameworks/code_coverage/go:test - Test(Add|Sub)""", + ) + + +@buck_test(inplace=True) +async def test_junit_test_selective_coverage_doesnt_produce_coverage( + buck: Buck, tmp_path: Path +) -> None: + paths = await collect_coverage_for( + buck, + tmp_path, + "fbcode//testing_frameworks/code_coverage/junit/com/facebook/testing_frameworks:test", + folder_filter=[], + file_filter=[ + "testing_frameworks/code_coverage/junit/com/facebook/testing_frameworks/AddTest.java" + ], + ) + + assert len(paths) == 0, str(paths) diff --git a/tests/e2e/test/test_test_coverage/test_test_coverage_utils.py b/tests/e2e/test/test_test_coverage/test_test_coverage_utils.py new file mode 100644 index 0000000000000..461c5974cfaf0 --- /dev/null +++ b/tests/e2e/test/test_test_coverage/test_test_coverage_utils.py @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from pathlib import Path +from typing import List, Optional + +from buck2.tests.e2e_util.api.buck import Buck + + +async def collect_coverage_for( + buck: Buck, + tmp_path: Path, + target: str, + folder_filter: List[str], + file_filter: List[str], + mode: Optional[str] = None, + extra_args: Optional[List[str]] = None, +) -> List[str]: + coverage_file = tmp_path / "coverage.txt" + folder_filter_str = ":".join(folder_filter) + file_filter_str = ":".join(file_filter) + buck_args = [] + if mode is not None: + buck_args.append(mode) + buck_args.extend( + [ + "--config", + "code_coverage.enable=filtered", + "--config", + f"code_coverage.folder_path_filter={folder_filter_str}", + "--config", + f"code_coverage.file_path_filter={file_filter_str}", + target, + "--", + "--collect-coverage", + f"--coverage-output={coverage_file}", + ] + + (extra_args or []) + ) + await buck.test(*buck_args) + paths = [] + with open(coverage_file) as results: + for line in results: + paths.append(json.loads(line)["filepath"]) + + return list(set(paths)) diff --git a/tests/e2e/test/test_test_inplace.py b/tests/e2e/test/test_test_inplace.py new file mode 100644 index 0000000000000..d914ac0c2f297 --- /dev/null +++ b/tests/e2e/test/test_test_inplace.py @@ -0,0 +1,792 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import asyncio +import json +import os +import re +import signal +from pathlib import Path + +import pytest +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.buck_result import BuckException, ExitCodeV2 +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import ( + buck_test, + env, + get_mode_from_platform, + is_deployed_buck2, +) + +MAC_AND_WINDOWS = ["darwin", "windows"] + + +def remove_ansi_escape_sequences(ansi_str: str) -> str: + """convert ansi_str to str""" + ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + return ansi_escape.sub("", ansi_str) + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_sh_test(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test", + ) + + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_fail", + ), + stderr_regex=r"1 TESTS FAILED\n(\s)+✗ buck2\/tests\/targets\/rules\/sh_test:test_fail - main", + ) + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_sh_test_remote_checks(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test", + "--remote-only", + ), + stderr_regex="Incompatible executor preferences: `RemoteRequired` & `LocalRequired`", + ) + await buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_remote_implicit", + "--local-only", + ) + await buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_remote_implicit", + "--remote-only", + ) + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_remote_explicit", + "--local-only", + ), + stderr_regex="LocalOnly.*is incompatible", + ) + await buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_remote_explicit", + "--remote-only", + ) + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_test_build_fail(buck: Buck) -> None: + await expect_failure( + buck.test( + "hewwo_buck", + ), + stderr_regex="does not exist", + ) + + await buck.test("fbcode//buck2/tests/targets/rules/sh_test:test") + + +@buck_test(inplace=True, skip_for_os=["darwin"]) +async def test_cpp_test(buck: Buck) -> None: + mode = get_mode_from_platform() + await buck.test("fbcode//buck2/tests/targets/rules/cxx:cpp_test_pass", mode) + + await expect_failure( + buck.test("fbcode//buck2/tests/targets/rules/cxx:cpp_test_fail", mode), + stderr_regex=r"1 TESTS FAILED\n(\s)+✗ buck2\/tests\/targets\/rules\/cxx:cpp_test_fail - Simple\.Fail", + ) + + +@buck_test(inplace=True, skip_for_os=["darwin"]) +async def test_cpp_test_fdb_message(buck: Buck) -> None: + + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/cxx:cpp_test_fail", + get_mode_from_platform(), + ), + stderr_regex=r"Run \$ fdb buck test \ to debug", + ) + + +@buck_test(inplace=True, skip_for_os=MAC_AND_WINDOWS) +async def test_python_test(buck: Buck) -> None: + await buck.test("fbcode//buck2/tests/targets/rules/python/test:test") + + await buck.test("fbcode//buck2/tests/targets/rules/python/test:test_env") + + await expect_failure( + buck.test("fbcode//buck2/tests/targets/rules/python/test:test_fail"), + stderr_regex=r"1 TESTS FAILED\n(\s)+✗ buck2\/tests\/targets\/rules\/python\/test:test_fail - test", + ) + + await expect_failure( + buck.test("fbcode//buck2/tests/targets/rules/python/test:test_fatal"), + stderr_regex=r"1 TESTS FATALS\n(\s)+⚠ buck2\/tests\/targets\/rules\/python\/test:test_fatal - test", + ) + + +@buck_test(inplace=True, skip_for_os=MAC_AND_WINDOWS) +async def test_python_test_with_remote_execution(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test_remote_execution", + ) + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test_remote_execution_fail", + ), + stderr_regex=r"1 TESTS FAILED\n(\s)+✗ buck2\/tests\/targets\/rules\/python\/test:test_remote_execution_fail - test", + ) + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test_remote_execution_fatal", + ), + stderr_regex=r"1 TESTS FATALS\n(\s)+⚠ buck2\/tests\/targets\/rules\/python\/test:test_remote_execution_fatal - test", + ) + + +@buck_test(inplace=True, skip_for_os=MAC_AND_WINDOWS) +async def test_python_needed_coverage(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python/needed_coverage:test_pass", + "fbcode//buck2/tests/targets/rules/python/needed_coverage:test_pass_specific_file", + ) + await expect_failure( + buck.test("fbcode//buck2/tests/targets/rules/python/needed_coverage:test_fail"), + stderr_regex="ERROR: Actual coverage [0-9.]*% is smaller than expected 100.% for file", + ) + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/needed_coverage:test_fail_fractional" + ), + stderr_regex="ERROR: Actual coverage [0-9.]*% is smaller than expected [0-9.]*% for file", + ) + + +@buck_test(inplace=True, skip_for_os=MAC_AND_WINDOWS) +async def test_tests_attribute(buck: Buck) -> None: + lib_tests = await buck.test("fbcode//buck2/tests/targets/rules/python/test:lib") + assert "Pass 1" in remove_ansi_escape_sequences(lib_tests.stderr) + + +@buck_test(inplace=True, skip_for_os=MAC_AND_WINDOWS) +async def test_tests_attribute_ignore(buck: Buck) -> None: + lib_tests = await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:lib", + "--ignore-tests-attribute", + ) + assert "NO TESTS RAN" in remove_ansi_escape_sequences(lib_tests.stderr) + + +@buck_test(inplace=True) +async def test_listing_failure(buck: Buck) -> None: + output = await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/broken:broken", + get_mode_from_platform(), + ), + ) + assert re.search(r"Listing Fail 1", output.stderr) + assert re.search( + r"1 LISTINGS FAILED\n(\s)+⚠ buck2\/tests\/targets\/rules\/python\/broken:broken\n", + output.stderr, + re.DOTALL, + ) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_python_import_error_with_static_listing_builtin_runner( + buck: Buck, +) -> None: + output = await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/broken:broken_with_static_listing_builtin_runner", + get_mode_from_platform(), + ), + ) + + assert re.search("2 TESTS FATALS", output.stderr, re.DOTALL) + assert re.search( + r"test_\d \(buck2.tests.targets.rules.python.broken.broken_import.TestCase\)", + output.stderr, + re.DOTALL, + ) + assert not re.search("unittest.loader._FailedTest", output.stderr, re.DOTALL) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_python_import_error_with_static_listing_new_provider(buck: Buck) -> None: + output = await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/broken:broken_with_static_listing_new_adapter", + get_mode_from_platform(), + ), + ) + assert re.search("2 TESTS FATALS", output.stderr, re.DOTALL) + assert not re.search("unittest.loader._FailedTest", output.stderr, re.DOTALL) + assert re.search( + r"test_\d \(buck2.tests.targets.rules.python.broken.broken_import.TestCase\)", + output.stderr, + re.DOTALL, + ) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_python_import_error_with_static_listing_new_provider_bundle( + buck: Buck, +) -> None: + output = await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/broken:broken_with_static_listing_new_adapter_bundle", + get_mode_from_platform(), + ), + ) + assert re.search("1 TESTS FATALS", output.stderr, re.DOTALL) + assert re.search( + r"buck2\/tests\/targets\/rules\/python\/broken:broken_with_static_listing_new_adapter_bundle - main", + output.stderr, + re.DOTALL, + ) + + +@buck_test(inplace=True) +async def test_tests_dedupe(buck: Buck) -> None: + lib_tests = await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:lib", + "fbcode//buck2/tests/targets/rules/python/test:tests_for_lib", + get_mode_from_platform(), + ) + assert "Pass 1" in remove_ansi_escape_sequences(lib_tests.stderr) + + +@pytest.mark.parametrize("build_filtered", [(True), (False)]) +@buck_test( + inplace=True, + skip_for_os=["windows"], # TODO(marwhal): Fix and enable on Windows +) +async def test_label_filtering(buck: Buck, build_filtered: bool) -> None: + cmd = ["fbcode//buck2/tests/targets/rules/label_test_filtering:"] + if build_filtered: + cmd.append("--build-filtered") + + await expect_failure(buck.test(*cmd), stderr_regex="1 TESTS FAILED") + + await expect_failure( + buck.test(*cmd, "--exclude", "label-pass"), stderr_regex="1 TESTS FAILED" + ) + + await expect_failure( + buck.test(*cmd, "--include", "label-fail"), stderr_regex="1 TESTS FAILED" + ) + + await expect_failure( + buck.test(*cmd, "--include", "label-fail", "--exclude", "label-pass"), + stderr_regex="1 TESTS FAILED", + ) + + await expect_failure( + buck.test( + *cmd, + ), + stderr_regex="1 TESTS FAILED", + ) + + await buck.test(*cmd, "--include", "label-pass") + + await buck.test(*cmd, "--exclude", "label-fail") + + await buck.test(*cmd, "--include", "!label-fail") + + await buck.test( + *cmd, "--include", "label-fail", "--exclude", "label-fail", "--always-exclude" + ) + + await buck.test(*cmd, "--include", "!label-fail", "label-fail") + + +@buck_test(inplace=True, skip_for_os=MAC_AND_WINDOWS) +async def test_name_filtering(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python/test/...", "--", "test_env" + ) + + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test/...", "--", "test_fail" + ), + stderr_regex="1 TESTS FAILED", + ) + + +@buck_test(inplace=True) +async def test_compile_error(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/compile_error:cpp_test_compile_error", + get_mode_from_platform(), + ), + stderr_regex="#error Compile error.*1 BUILDS FAILED.*NO TESTS RAN", + ) + + +@buck_test( + inplace=True, + skip_for_os=["windows"], # TODO(marwhal): Fix and enable on Windows +) +async def test_cwd(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_cwd", + ) + + +@buck_test( + inplace=True, + skip_for_os=["windows"], # TODO(marwhal): Fix and enable on Windows +) +async def test_default_label_filtering(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_fail_extended", + "--", + "--extended-tests", + ), + stderr_regex="1 TESTS FAILED", + ) + + # Ignores it by default + await buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_fail_extended", + ) + + +@buck_test( + inplace=True, + skip_for_os=["windows"], # TODO(marwhal): Fix and enable on Windows +) +async def test_stress_runs(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/sh_test:test_fail", + "--", + "--stress-runs", + "10", + ), + stderr_regex="10 TESTS FAILED", + ) + + +# Not-in-place tests cannot run with deployed buck2 +if not is_deployed_buck2(): + + @buck_test(inplace=False, data_dir="testsof") + async def test_target_compatibility(buck: Buck) -> None: + # This excludes some tests + out = await buck.test( + "//...", + "--target-platforms", + "//:platform_default_tests", + ) + + assert "target incompatible node" in out.stderr + + await expect_failure( + buck.test( + "//:foo_extra_test", + "--target-platforms", + "//:platform_default_tests", + ), + stderr_regex="incompatible", + ) + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_external_runner_test_info_options(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/external_runner_test_info/...", + ) + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_allow_tests_on_re(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/external_runner_test_info/...", + "--unstable-allow-tests-on-re", + ) + + +@buck_test(inplace=True) +async def test_incompatible_tests_do_not_run_on_re(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/external_runner_test_info:invalid_test", + "-c", + "external_runner_test_info.declare_invalid_test=1", + ), + stderr_regex="Trying to execute a `local_only = True` action on remote executor", + ) + + +@buck_test(inplace=True) +@env("TEST_MAKE_IT_FAIL", "1") +async def test_env_var_filtering(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + ) + + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + "--", + "--env", + "TEST_MAKE_IT_FAIL=1", + ), + stderr_regex="1 TESTS FAILED", + ) + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +@env("EXTRA_VAR", "foo") +async def test_prepare_for_local_execution_env(buck: Buck, tmp_path: Path) -> None: + out = tmp_path / "out" + await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + "--", + "--no-run-output-test-commands-for-fdb", + str(out), + ) + + with open(out) as f: + config = json.load(f) + + # Expect python/test:test target to support debugging. Executable field is populated only when debugging is supported. + assert "debuggers" in config + assert len(config["debuggers"]) > 0 + assert "executable" in config + env = config["executable"]["env"] + assert "PWD" in env + assert "EXTRA_VAR" not in env + + +@buck_test(inplace=True) +@env("BUCK2_TEST_TPX_USE_TCP", "true") +async def test_tcp(buck: Buck) -> None: + await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + ) + + +@buck_test(inplace=True) +async def test_passing_test_names_are_not_shown(buck: Buck) -> None: + # Passing test headers are not shown unless we pass --print-passing-details explicitly. + tests = await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + ) + assert "Pass: buck2/tests/targets/rules/python/test:test - test" not in tests.stderr + + +@buck_test(inplace=True) +async def test_failing_test_names_are_shown(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + "--", + "--env", + "TEST_ENV=fail", + ), + stderr_regex="Fail: buck2/tests/targets/rules/python/test:test - test", + ) + + +@buck_test(inplace=True) +async def test_no_print_passing_details(buck: Buck) -> None: + # Without --print-passing-details, test headers and stdout is NOT displayed. + tests = await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + ) + assert "Pass: buck2/tests/targets/rules/python/test:test - test" not in tests.stderr + assert "TESTED!" not in tests.stderr + + +@buck_test(inplace=True) +async def test_print_passing_details(buck: Buck) -> None: + # With --print-passing-details, test headers and stdout is displayed. + tests = await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + "--", + "--print-passing-details", + ) + assert "Pass: buck2/tests/targets/rules/python/test:test - test" in tests.stderr + assert "TESTED!" in tests.stderr + + +@buck_test(inplace=True) +async def test_no_no_print_details(buck: Buck) -> None: + # Without --no-print-details the stack trace is displayed. + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + get_mode_from_platform(), + "--", + "--env", + "TEST_ENV=fail", + ), + stderr_regex="AssertionError: 41 != 42", + ) + + +@buck_test(inplace=True) +async def test_no_print_details(buck: Buck) -> None: + # With --no-print-details the stack trace is not displayed. + tests = await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:test", + "--", + "--env", + "TEST_ENV=fail", + "--no-print-details", + ), + ) + assert "AssertionError: 41 != 42" not in tests.stderr + + +@buck_test(inplace=True) +async def test_bundle_sharding(buck: Buck) -> None: + tests = await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:multi_tests", + get_mode_from_platform(), + ) + assert "Pass 4" in tests.stderr + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_cancellation(buck: Buck, tmp_path: Path) -> None: + """ + This test starts a test that writes its PID to a file then runs for 60 + seconds. We test cancellation by sending a CTRL+C as soon as a test + starts. We then check that the process exited, and that nothing else + started (or if anything did, that they stopped). + """ + + # Make sure we are ready to go + await buck.build( + "fbcode//buck2/tests/targets/rules/python/test:cancellation", + "--build-test-info", + ) + + tests = buck.test( + "fbcode//buck2/tests/targets/rules/python/test:cancellation", + "--", + "--stress-runs", + "10", + "--env", + "SLOW_DURATION=60", + "--env", + f"PIDS={tmp_path}", + ) + + tests = await tests.start() + + for _i in range(30): + await asyncio.sleep(1) + pids = os.listdir(tmp_path) + if pids: + break + else: + raise Exception("Tests never started") + + tests.send_signal(signal.SIGINT) + await tests.communicate() # Wait for the command to exit + + # Give stuff time to settle, PIDS don't necessarily disappear + # instantly. Also, verify that we are not starting more tests. + await asyncio.sleep(5) + + # At this point, nothing should be alive. + pids = os.listdir(tmp_path) + for pid in pids: + try: + os.kill(int(pid), 0) + except OSError: + pass + else: + raise Exception(f"PID existed: {pid}") + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_cancellation_on_re(buck: Buck) -> None: + """ + This test starts a test on RE, waits for it to start, cancels, then starts + again and verifies we don't wait for the test to finish. + """ + + # Make sure we are ready to go + await buck.build( + "fbcode//buck2/tests/targets/rules/python/test:cancellation", + "--build-test-info", + ) + + tests = buck.test( + "fbcode//buck2/tests/targets/rules/python/test:cancellation", + "--unstable-force-tests-on-re", + "--remote-only", + "--no-remote-cache", + "--", + "--env", + "SLOW_DURATION=60", + "--env", + "PIDS=/tmp", + ) + + tests = await tests.start() + + async def has_started() -> bool: + try: + stdout = (await buck.log("what-ran")).stdout + except BuckException as e: + # The log is truncated here so this can exit non-zero. + stdout = e.stdout + + # what-ran returns things that started + return "test.run" in stdout + + for _i in range(30): + await asyncio.sleep(1) + if await has_started(): + break + else: + raise Exception("Tests never started") + + tests.send_signal(signal.SIGINT) + await tests.communicate() # Wait for the command to exit + + # Run a command that cannot execute concurrerntly and check it does not + # take 60 seconds to run, which means we went idle. + await asyncio.wait_for(buck.audit_config("-c", "foo.bar=True"), timeout=10) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_timeout_local(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:timeout", + "--local-only", + "--no-remote-cache", + "--", + "--env", + "SLOW_DURATION=60", + "--timeout=5", + ), + stderr_regex="Timeout: buck2/tests/targets/rules/python/test:timeout", + ) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_timeout_re(buck: Buck) -> None: + await expect_failure( + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:timeout", + "--unstable-allow-all-tests-on-re", + "--remote-only", + "--no-remote-cache", + "--", + "--env", + "SLOW_DURATION=60", + "--timeout=5", + ), + stderr_regex="Timeout: buck2/tests/targets/rules/python/test:timeout", + ) + + +if not is_deployed_buck2(): + + @buck_test(inplace=True, skip_for_os=["windows"]) + async def test_overall_timeout(buck: Buck) -> None: + """ + If an overall timeout is set, we expect that to result in OMITs + reported in Tpx, and Tpx does not set an error status for that. + + We're OK with that, we will report how many OMITs there were. + The caller is expected to be aware of how this feature works. + """ + buck.test( + "fbcode//buck2/tests/targets/rules/python/test:timeout", + "--local-only", + "--no-remote-cache", + "--overall-timeout", + "5s", + "--", + "--env", + "SLOW_DURATION=60", + ) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +@pytest.mark.parametrize( + "test", + ["requires_env", "requires_env_location"], +) +async def test_test_env(buck: Buck, test: str) -> None: + test = f"fbcode//buck2/tests/targets/rules/sh_test:{test}" + + await buck.test(test) + + # Check run also works. Note that those tests run from `fbcode` by default + # so no chdir needed here. + await buck.run(test) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_exit_code(buck: Buck) -> None: + result = await expect_failure( + buck.test("fbcode//buck2/tests/targets/rules/sh_test:test_fail") + ) + assert result.process.returncode == 32 + result = await expect_failure(buck.test("not//a/real:target")) + assert result.process.returncode == ExitCodeV2.USER_ERROR.value + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_skip_missing_targets(buck: Buck) -> None: + await expect_failure( + buck.test("fbcode//buck2/tests/targets/rules/python/test:not_a_thing"), + stderr_regex="Unknown target `not_a_thing`", + ) + + res = await buck.test( + "fbcode//buck2/tests/targets/rules/python/test:not_a_thing", + "--skip-missing-targets", + ) + + assert "Skipped 1 missing targets:" in res.stderr + + +@buck_test(inplace=True, skip_for_os=["darwin", "windows"]) +async def test_test_worker(buck: Buck) -> None: + worker_args = [ + "-c", + "build.use_persistent_workers=True", + "--local-only", + "--no-remote-cache", + ] + await buck.test( + *worker_args, "fbcode//buck2/tests/targets/rules/worker_grpc:worker_test" + ) diff --git a/tests/e2e/test/test_testname_formatting.py b/tests/e2e/test/test_testname_formatting.py new file mode 100644 index 0000000000000..b91cc17ad7736 --- /dev/null +++ b/tests/e2e/test/test_testname_formatting.py @@ -0,0 +1,67 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +from typing import Any, List + +import pytest + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +@pytest.mark.parametrize("adapter", ["testpilot", "builtin"]) +@pytest.mark.parametrize("listing", ["static", "dynamic"]) +@pytest.mark.parametrize("python_version", ["3.10", "3.12"]) +async def testname_formatting( + buck: Buck, + adapter: str, + listing: str, + python_version: str, +) -> None: + target = f"{adapter}_{listing}_{python_version}" + + if python_version == "3.12" and adapter == "new": + pytest.xfail("Test name formatting is different in 3.12") # pyre-ignore[29] + + await expect_failure( + buck.test( + f"fbcode//buck2/tests/targets/rules/python/test_name_formatting:{target}", + ) + ) + log = (await buck.log("show")).stdout.strip().splitlines() + actual_tests = get_events_test_names(log) + expected_tests = [ + "test_failure (buck2.tests.targets.rules.python.test_name_formatting.test_name_formatting.TestCase)", + "test_nested_test_class (buck2.tests.targets.rules.python.test_name_formatting.test_name_formatting.TestCase)", + "test_success (buck2.tests.targets.rules.python.test_name_formatting.test_name_formatting.TestCase)", + ] + assert expected_tests == actual_tests + + +######### +# Helpers +######### + + +def get_test_name_from_end_event(event: Any) -> List[str]: # pyre-ignore[2] + return event["Event"]["data"]["SpanEnd"]["data"]["TestEnd"]["suite"]["test_names"] + + +def get_events_test_names(log: List[str]) -> List[str]: + test_end_events = [json.loads(line) for line in log if "TestEnd" in line] + test_end_events = [ + test_name + for test_event in test_end_events + for test_name in get_test_name_from_end_event(test_event) + ] + return sorted(test_end_events) diff --git a/tests/e2e/test_cpp_gen_cdb.py b/tests/e2e/test_cpp_gen_cdb.py new file mode 100644 index 0000000000000..d38792eee173f --- /dev/null +++ b/tests/e2e/test_cpp_gen_cdb.py @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import platform +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# TODO(marwhal): Fix and enable on Windows +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_no_quotes(buck: Buck) -> None: + result = await buck.bxl( + "fbcode//tools/build/buck/bxl/cpp_lsp/cpp_gen_cdb.bxl:cpp_gen_cdb", + "--", + "--filename", + str( + buck.cwd.parent + / "fbcode/buck2/tests/targets/cpp_gen_cdb/basic/src/main.cpp" + ), + "--os", + platform.system().lower(), + ) + outputs = json.loads(result.stdout) + compdb_path = Path(outputs["compilationDatabasePath"]) / ".." / "compdb.json" + + with open(compdb_path) as f: + commands = json.load(f) + + # check that the define is present without any shell quotes + arguments = commands[0]["arguments"] + assert arguments.index("-DM_FOO_BAR=1") + + +# TODO(marwhal): Add this back one at least one test in this file passes on Windows +@buck_test(inplace=True) +async def test_noop(buck: Buck) -> None: + return diff --git a/tests/e2e/test_install.py b/tests/e2e/test_install.py new file mode 100644 index 0000000000000..fe421c2c5eb91 --- /dev/null +++ b/tests/e2e/test_install.py @@ -0,0 +1,163 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import sys +from os.path import exists, islink +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, env +from buck2.tests.e2e_util.helper.utils import read_timestamps + + +# Currently installer grpc doesn't compile on Mac +def linux_only() -> bool: + return sys.platform == "linux" + + +if linux_only(): + + @buck_test(inplace=True) + async def test_success_install(buck: Buck, tmp_path: Path) -> None: + tmp_dir = tmp_path / "install_test" + tmp_dir.mkdir() + args = ["--dst", f"{tmp_dir}/"] + await buck.install( + "fbcode//buck2/tests/targets/rules/install:installer_test", "--", *args + ) + assert exists(f"{tmp_dir}/artifact_a") + assert exists(f"{tmp_dir}/artifact_b") + assert exists(f"{tmp_dir}/etc_hosts") + assert not islink(f"{tmp_dir}/etc_hosts") + + @buck_test(inplace=True) + @env("BUCK_LOG", "buck2_server_commands::commands::install=debug") + async def test_install_logging(buck: Buck, tmp_path: Path) -> None: + record = tmp_path / "record.json" + tmp_dir = tmp_path / "install_test" + tmp_dir.mkdir() + args = ["--dst", f"{tmp_dir}/"] + args += ["--delay", "1"] + await buck.install( + "fbcode//buck2/tests/targets/rules/install:installer_test", + "--unstable-write-invocation-record", + str(record), + "--", + *args, + ) + with open(record) as f: + invocation_record = json.load(f)["data"]["Record"]["data"][ + "InvocationRecord" + ] + + cmd_start_ts = ( + await read_timestamps(buck, "Event", "data", "SpanStart", "data", "Command") + )[0] + last_action_end_ts = ( + await read_timestamps( + buck, "Event", "data", "SpanEnd", "data", "ActionExecution" + ) + )[-1] + + time_to_last_action_ms = last_action_end_ts - cmd_start_ts + install_duration_ms = invocation_record["install_duration_us"] / 1000 + cmd_duration_ms = invocation_record["command_duration_us"] / 1000 + + # Check that installing takes at least as long as the added delay. + assert install_duration_ms > 1 * 1000 + # Check that the we aren't double counting any time between install and + # building the last action. + assert time_to_last_action_ms + install_duration_ms < cmd_duration_ms + + assert invocation_record["install_device_metadata"] == [ + {"entry": [{"key": "version", "value": "1"}]} + ] + + @buck_test(inplace=True) + async def test_artifact_fails_to_install(buck: Buck) -> None: + await expect_failure( + buck.install( + "fbcode//buck2/tests/targets/rules/install:installer_server_sends_error" + ), + stderr_regex=r"Failed to send artifacts to installer", + ) + + @buck_test(inplace=True) + async def test_fail_to_build_artifact(buck: Buck) -> None: + await expect_failure( + buck.install("fbcode//buck2/tests/targets/rules/install:bad_artifacts"), + stderr_regex=r"Failed to build", + ) + + @buck_test(inplace=True) + async def test_install_id_mismatch(buck: Buck) -> None: + await expect_failure( + buck.install( + "fbcode//buck2/tests/targets/rules/install:installer_server_sends_wrong_install_info_response" + ), + stderr_regex=r"doesn't match with the sent one", + ) + + @buck_test(inplace=True) + async def test_installer_needs_forwarded_params(buck: Buck) -> None: + await expect_failure( + buck.install( + "fbcode//buck2/tests/targets/rules/install:installer_server_requires_forwarded_params" + ), + stderr_regex=r"-r_-e_-d_-s_-x_-a_-i_-w_-u_-k_must_be_passed_to_installer", + ) + + @buck_test(inplace=True) + async def test_install_forwards_params(buck: Buck) -> None: + await buck.install( + "-r", + "-e", + "-d", + "-s", + "serial", + "-x", + "-a", + "activity", + "-i", + "intent", + "-w", + "-u", + "-k", + "fbcode//buck2/tests/targets/rules/install:installer_server_requires_forwarded_params", + ) + + @buck_test(inplace=True) + async def test_install_forwards_params_long_form(buck: Buck) -> None: + await buck.install( + "--run", + "--emulator", + "--device", + "--serial", + "serial", + "--all-devices", + "--activity", + "activity", + "--intent-uri", + "intent", + "--wait-for-debugger", + "--uninstall", + "--keep", + "fbcode//buck2/tests/targets/rules/install:installer_server_requires_forwarded_params", + ) + + +@buck_test(inplace=True) +async def test_fail_to_build_installer(buck: Buck) -> None: + await expect_failure( + buck.install("fbcode//buck2/tests/targets/rules/install:bad_installer_target"), + stderr_regex=r"Failed to build installer", + ) diff --git a/tests/e2e/test_linker_argsfile.py b/tests/e2e/test_linker_argsfile.py new file mode 100644 index 0000000000000..b4623d22af4df --- /dev/null +++ b/tests/e2e/test_linker_argsfile.py @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, get_mode_from_platform + + +@buck_test(inplace=True) +async def test_linker_argsfile_valid(buck: Buck) -> None: + args = [ + "fbcode//buck2/tests/targets/rules/cxx/hello_world:welcome[linker.argsfile]", + "--show-full-output", + get_mode_from_platform(), + ] + result = await buck.build(*args) + output_dict = result.get_target_to_build_output() + assert len(output_dict) == 1 + output_path = next(iter(output_dict.values())) + # Ensure that the argsfile exists and is not empty. + assert os.path.exists(output_path) + assert os.path.getsize(output_path) > 0 diff --git a/tests/e2e/test_lsp_fbsource.py b/tests/e2e/test_lsp_fbsource.py new file mode 100644 index 0000000000000..f47368c840e0b --- /dev/null +++ b/tests/e2e/test_lsp_fbsource.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_lsp_starts_fbsource(buck: Buck) -> None: + async with await buck.lsp() as lsp: + # Will fail if the initialize response is not received + await lsp.init_connection() diff --git a/tests/e2e/test_prelude/BUCK b/tests/e2e/test_prelude/BUCK new file mode 100644 index 0000000000000..451133bf18bba --- /dev/null +++ b/tests/e2e/test_prelude/BUCK @@ -0,0 +1,8 @@ +load(":graph_utils_tests.bzl", "test_find_cycle") +load(":link_info_tests.bzl", "test_get_lib_output_style") + +oncall("build_infra") + +test_get_lib_output_style() + +test_find_cycle() diff --git a/tests/e2e/test_prelude/graph_utils_tests.bzl b/tests/e2e/test_prelude/graph_utils_tests.bzl new file mode 100644 index 0000000000000..0ed228b70cd8a --- /dev/null +++ b/tests/e2e/test_prelude/graph_utils_tests.bzl @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:asserts.bzl", "asserts") +load("@prelude//utils:graph_utils.bzl", "find_cycle") + +def assert_cycle(expected_nodes, cycle): + asserts.true(cycle != None) + for node in expected_nodes: + asserts.true(node in cycle) + +def test_find_cycle(): + asserts.equals(None, find_cycle({})) + asserts.equals(None, find_cycle({0: [1], 1: [2], 2: [3], 3: []})) + assert_cycle([0, 0, 1, 2], find_cycle({0: [1], 1: [2], 2: [0]})) diff --git a/tests/e2e/test_prelude/link_info_tests.bzl b/tests/e2e/test_prelude/link_info_tests.bzl new file mode 100644 index 0000000000000..1920ef397f61e --- /dev/null +++ b/tests/e2e/test_prelude/link_info_tests.bzl @@ -0,0 +1,56 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("@prelude//:asserts.bzl", "asserts") +load("@prelude//cxx:cxx_toolchain_types.bzl", "PicBehavior") +load( + "@prelude//linking:link_info.bzl", + "LibOutputStyle", + "LinkStrategy", + "get_lib_output_style", +) +load("@prelude//linking:types.bzl", "Linkage") + +def test_get_lib_output_style(): + # requested_link_style static + asserts.equals(LibOutputStyle("archive"), get_lib_output_style(LinkStrategy("static"), Linkage("static"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("archive"), get_lib_output_style(LinkStrategy("static"), Linkage("static"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("static"), Linkage("static"), PicBehavior("always_enabled"))) + + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("static"), Linkage("shared"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("static"), Linkage("shared"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("static"), Linkage("shared"), PicBehavior("always_enabled"))) + + asserts.equals(LibOutputStyle("archive"), get_lib_output_style(LinkStrategy("static"), Linkage("any"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("archive"), get_lib_output_style(LinkStrategy("static"), Linkage("any"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("static"), Linkage("any"), PicBehavior("always_enabled"))) + + # requested_link_style static_pic + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("static"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("archive"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("static"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("static"), PicBehavior("always_enabled"))) + + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("shared"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("shared"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("shared"), PicBehavior("always_enabled"))) + + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("any"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("archive"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("any"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("static_pic"), Linkage("any"), PicBehavior("always_enabled"))) + + # requested_link_style shared + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("shared"), Linkage("static"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("archive"), get_lib_output_style(LinkStrategy("shared"), Linkage("static"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("pic_archive"), get_lib_output_style(LinkStrategy("shared"), Linkage("static"), PicBehavior("always_enabled"))) + + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("shared"), Linkage("shared"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("shared"), Linkage("shared"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("shared"), Linkage("shared"), PicBehavior("always_enabled"))) + + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("shared"), Linkage("any"), PicBehavior("supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("shared"), Linkage("any"), PicBehavior("not_supported"))) + asserts.equals(LibOutputStyle("shared_lib"), get_lib_output_style(LinkStrategy("shared"), Linkage("any"), PicBehavior("always_enabled"))) diff --git a/tests/e2e/test_rust_project.py b/tests/e2e/test_rust_project.py new file mode 100644 index 0000000000000..839ca9c608879 --- /dev/null +++ b/tests/e2e/test_rust_project.py @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import json +import os +import subprocess + +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_rust_binary() -> None: + rust_project_bin = os.environ["RUST_PROJECT_BIN"] + + env = os.environ.copy() + env["BUCK2_HARD_ERROR"] = "false" + + result = subprocess.run( + [ + rust_project_bin, + "develop", + "--stdout", + "--pretty", + "fbcode//buck2/tests/targets/rules/rust/hello_world:welcome", + ], + stdout=subprocess.PIPE, + env=env, + ) + + json_generated = json.loads(result.stdout) + + assert "sysroot" in json_generated.keys() + assert "sysroot_src" in json_generated.keys() + assert "crates" in json_generated.keys() diff --git a/tests/e2e/test_starlark.py b/tests/e2e/test_starlark.py new file mode 100644 index 0000000000000..55d4c11b03af1 --- /dev/null +++ b/tests/e2e/test_starlark.py @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +@buck_test(inplace=True) +async def test_lint_buck2(buck: Buck) -> None: + # FIXME(JakobDegen): Reusing `project.ignore` for this is bad, `starlark + # lint` should have `-I` and `-X` flags like sapling + await buck.starlark( + "lint", + "buck2", + "-c", + "project.ignore=buck2/tests/e2e,buck2/tests/core", + ) + + +@buck_test(inplace=True) +async def test_typecheck_prelude_lightweight(buck: Buck) -> None: + await buck.starlark("typecheck", "buck2/prelude/prelude.bzl") + + +@buck_test(inplace=True) +async def test_typecheck_prelude_compiler(buck: Buck) -> None: + await buck.uquery("fbcode//buck2:buck2", "--unstable-typecheck") diff --git a/tests/e2e/test_tools.py b/tests/e2e/test_tools.py new file mode 100644 index 0000000000000..de93c35b8e709 --- /dev/null +++ b/tests/e2e/test_tools.py @@ -0,0 +1,69 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + + +import re +import sys + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test, env + + +# builds targets in an fbcode target configuration, unsupported on mac RE workers +def fbcode_linux_only() -> bool: + return sys.platform == "linux" + + +if fbcode_linux_only(): + + @buck_test(inplace=True, skip_for_os=["windows"]) + async def test_swig_pp(buck: Buck) -> None: + await buck.build( + "fbcode//security/ca/lib:CAUtils-py-gen", + ) + + @buck_test(inplace=True) + @env("BUCK2_KEEP_DEP_FILE_DIRECTORIES", "true") + async def test_arvr_cuda_dep_files(buck: Buck) -> None: + target = "fbsource//arvr/tools/buck/tests/cuda:test_cuda_arvr" + mode_file = "@fbsource//arvr/mode/platform010/cuda12_5/opt" + await buck.build(mode_file, target) + res = await buck.audit_dep_files(target, "cuda_compile", "main.cu", mode_file) + out = res.stdout + + # Check that we are tracking our dependency on stdlib headers, even + # though they are neither explicitly included nor tagged. + assert re.search( + "untagged.*arvr/third-party/toolchains/platform010/build/glibc", out + ) + + # Check that we are tracking directly-included headers + assert re.search("headers.*arvr/tools/buck/tests/cuda/direct_dep.h", out) + + # Check that we are tracking transitively-included headers + assert re.search("headers.*arvr/tools/buck/tests/cuda/transitive_dep.h", out) + + # Check that we are not tracking irrelevant headers + assert ( + re.search("headers.*arvr/tools/buck/tests/cuda/unrelated_dep.h", out) + is None + ) + + +@buck_test(inplace=True, skip_for_os=["windows"]) +async def test_swig_pp_unit(buck: Buck) -> None: + await buck.test( + "fbcode//tools/build/buck:swig_filter_test", + ) + + +@buck_test(inplace=True) +async def test_windows_dummy() -> None: + # None of the tests in this file pass on Windows and that upsets CI. + pass diff --git a/tests/e2e_util/BUCK b/tests/e2e_util/BUCK new file mode 100644 index 0000000000000..e8204d516d15d --- /dev/null +++ b/tests/e2e_util/BUCK @@ -0,0 +1,52 @@ +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") + +oncall("build_infra") + +python_library( + name = "assert_occurrences", + srcs = ["helper/assert_occurrences.py"], +) + +python_library( + name = "utils", + srcs = ["helper/utils.py"], + deps = [ + "//buck2/tests/e2e_util/api:api", + ], +) + +python_library( + name = "golden", + srcs = ["helper/golden.py"], +) + +python_library( + name = "utilities", + srcs = [ + "asserts.py", + "buck_workspace.py", + ], + visibility = [ + "PUBLIC", + ], + deps = [ + "fbsource//third-party/pypi/decorator:decorator", + "fbsource//third-party/pypi/pytest:pytest", + "//buck2/tests/e2e_util/api:api", + ], +) + +export_file( + name = "conftest.py", + src = "conftest.py", + visibility = ["PUBLIC"], +) + +export_file(name = "test_bxl_template.py") + +export_file(name = "test_bxl_check_dependencies_template.py") + +export_file(name = "test_bxl_assert_dependencies_template.py") + +export_file(name = "test_bxl_audit_dependents_template.py") diff --git a/tests/e2e_util/api/BUCK b/tests/e2e_util/api/BUCK new file mode 100644 index 0000000000000..9326d6b3096e8 --- /dev/null +++ b/tests/e2e_util/api/BUCK @@ -0,0 +1,17 @@ +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") +# @oss-disable: load(":visibility.fb.bzl", "visibility") + +oncall("build_infra") + +visibility = ["PUBLIC"] # @oss-enable + +python_library( + name = "api", + srcs = glob(["*.py"]), + # This is not public API for buck invocation, but a part of buck2_e2e testing framwork. + # We may want to make it public API, and when we do, let's do it explicitly: + # * clean + # * with tests + # * with public announcement + visibility = visibility, +) diff --git a/tests/e2e_util/api/buck.py b/tests/e2e_util/api/buck.py new file mode 100755 index 0000000000000..cbe9e91de5b2d --- /dev/null +++ b/tests/e2e_util/api/buck.py @@ -0,0 +1,980 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import os +import uuid +from asyncio import subprocess +from pathlib import Path +from typing import Dict, Iterable, Optional, Tuple + +from buck2.tests.e2e_util.api.buck_result import ( + AuditConfigResult, + BuckException, + BuckExceptionType, + BuckResult, + BuckResultType, + BuildResult, + BxlResult, + TargetsResult, + TestResult, +) +from buck2.tests.e2e_util.api.executable import Executable +from buck2.tests.e2e_util.api.lsp import LspClient +from buck2.tests.e2e_util.api.process import Process +from buck2.tests.e2e_util.api.result import E, R, Result +from buck2.tests.e2e_util.api.subscribe import SubscribeClient + + +class Buck(Executable): + """Instantiates a Buck object with a executable path""" + + def __init__( + self, + path_to_executable: Path, + encoding: str, + env: Dict[str, str], + cwd: Optional[Path] = None, + isolation_prefix: Optional[str] = None, + ) -> None: + super().__init__(path_to_executable, encoding, env, cwd) + self.set_buckd(False) + self.isolation_prefix = isolation_prefix + + def set_buckd(self, toggle: bool) -> None: + """ + Setting buckd env to value of toggle. + toggle can be 0 for enabled and 1 for disabled + """ + self._env["NO_BUCKD"] = str(int(toggle)) + + def set_isolation_prefix(self, isolation_prefix: str) -> None: + self.isolation_prefix = isolation_prefix + + def _get_cwd(self, rel_cwd: Optional[Path]) -> Path: + if rel_cwd is None: + return self.cwd + abs_cwd = self.cwd / rel_cwd + assert abs_cwd.exists(), f"{abs_cwd} doesn't exist" + return abs_cwd + + def build( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuildResult, BuckException]: + """ + Returns a Process with BuildResult type using a process + created with the build command and any + additional arguments. + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + args = list(argv) + if not any(arg.startswith("--build-report") for arg in args): + args.append("--build-report=-") + + return self._run_buck_command( + "build", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=lambda proc, stdout, stderr, buck_build_id: BuildResult( + proc, stdout, stderr, buck_build_id, *args + ), + exception_type=BuckException, + ) + + def build_without_report( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the build command and any + additional arguments. + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + + return self._run_buck_command( + "build", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def help( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "help", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def help_env( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "help-env", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def run( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the build command and any + additional arguments + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + return self._run_buck_command( + "run", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def clean( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the clean command and any + additional arguments + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + return self._run_buck_command( + "clean", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def root( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the root command + + kind: --kind argument to the root command + rel_cwd: Optional Path specifying the workding directory to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + return self._run_buck_command( + "root", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def kill( + self, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the kill command + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + return self._run_buck_command( + "kill", + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def test( + self, + *argv: str, + test_executor: Optional[str] = None, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[TestResult, BuckException]: + """ + Returns a Process with TestResult type using a process + created with the test command and any + additional arguments + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + xml_flag, test_output_file = self._create_xml_file() + + argv_list = list(argv) + argv_separator_idx = ( + argv_list.index("--") if "--" in argv_list else len(argv_list) + ) + buck_argv = argv_list[0:argv_separator_idx] + test_argv = argv_list[argv_separator_idx + 1 :] + + if test_executor is None: + test_executor = os.environ.get("BUCK2_TPX") + + if test_executor is not None: + buck_argv = [ + "--config", + "test.v2_test_executor={}".format(test_executor), + *buck_argv, + ] + + # Ignore disabled test status if using tpx. + if test_executor is None or "tpx" in test_executor: + test_argv += ["--run-disabled"] + + patched_argv = buck_argv + ["--"] + test_argv + + return self._run_buck_command( + "test", + *xml_flag, + *patched_argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=lambda proc, stdout, stderr, buck_build_id: TestResult( + proc, stdout, stderr, buck_build_id, self.cwd / test_output_file + ), + exception_type=BuckException, + ) + + def targets( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[TargetsResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the targets command and any + additional arguments + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + + TODO: Add a TargetsResult with structured output. + """ + + args = list(argv) + + return self._run_buck_command( + "targets", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=lambda proc, stdout, stderr, buck_build_id: TargetsResult( + proc, stdout, stderr, buck_build_id, *args + ), + exception_type=BuckException, + ) + + def ctargets( + self, + *argv, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "ctargets", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def complete( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the complete command and any + additional arguments. + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + + my_env = {} if env is None else env.copy() + my_env["BUCK2_COMPLETION_TIMEOUT"] = "30000" + + return self._run_buck_command( + "complete", + *argv, + input=input, + rel_cwd=rel_cwd, + env=my_env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def completion( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the completion command and any + additional arguments. + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + return self._run_buck_command( + "completion", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def audit_config( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[AuditConfigResult, BuckException]: + """ + Returns a Process with AuditConfigResult type using a process + created with the audit_config command + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + args = list(argv) + return self._run_buck_command( + "audit", + "config", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=lambda proc, stdout, stderr, buck_build_id: AuditConfigResult( + proc, stdout, stderr, buck_build_id, *args + ), + exception_type=BuckException, + ) + + def audit_configurations( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + args = list(argv) + return self._run_buck_command( + "audit", + "configurations", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def audit_dep_files( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + args = list(argv) + return self._run_buck_command( + "audit", + "dep-files", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def audit_visibility( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + args = list(argv) + return self._run_buck_command( + "audit", + "visibility", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def audit( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + args = list(argv) + return self._run_buck_command( + "audit", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def audit_output( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + args = list(argv) + return self._run_buck_command( + "audit", + "output", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def query( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._query("query", *argv, rel_cwd=rel_cwd, env=env) + + def cquery( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._query("cquery", *argv, rel_cwd=rel_cwd, env=env) + + def uquery( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._query("uquery", *argv, rel_cwd=rel_cwd, env=env) + + def aquery( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._query("aquery", *argv, rel_cwd=rel_cwd, env=env) + + def _query( + self, + query_command: str, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process + created with the query command and any + additional arguments + + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + + TODO: Add a QueryResult with structured output. + """ + return self._run_buck_command( + query_command, + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def bxl( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BxlResult, BuckException]: + args = list(argv) + return self._run_buck_command( + "bxl", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=lambda proc, stdout, stderr, buck_build_id: BxlResult( + proc, stdout, stderr, buck_build_id, *args + ), + exception_type=BuckException, + ) + + def docs( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "docs", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def profile( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process created with the + profile command and any additional arguments + + args: Arguments to pass to buck2 profile. + rel_cwd: Optional Path specifying the workding directive to run + the command relative to the root. + env: Optional dictionary for environment variables to run command with. + """ + return self._run_buck_command( + "profile", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def debug( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process created with the + debug command and any additional arguments + """ + return self._run_buck_command( + "debug", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def starlark( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + """ + Returns a Process with BuckResult type using a process created with the + debug command and any additional arguments + """ + return self._run_buck_command( + "starlark", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def install( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "install", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def log( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "log", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def status( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "status", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def server( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "server", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def expand_external_cell( + self, + *args: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "expand-external-cell", + *args, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + async def lsp( + self, + *args: str, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> LspClient: + process = await self._run_buck_command( + "lsp", + *args, + input=None, + stdin=subprocess.PIPE, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + intercept_stderr=False, + ).start() + cwd = self._get_cwd(rel_cwd) + return LspClient(process, cwd) + + async def subscribe( + self, + *args: str, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> SubscribeClient: + process = self._run_buck_command( + "subscribe", + "--unstable-json", + *args, + input=None, + stdin=subprocess.PIPE, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + intercept_stderr=False, + ) + client = await SubscribeClient.create(process) + return client + + def _run_buck_command( + self, + cmd: str, + *argv: str, + input: Optional[bytes], + rel_cwd: Optional[Path], + env: Optional[Dict[str, str]], + result_type: BuckResultType[R], + exception_type: BuckExceptionType[E], + stdin: Optional[int] = None, + intercept_stderr: bool = True, + ) -> Process[R, E]: + """ + Returns a process created from the execuable path, + command and any additional arguments + """ + buck_build_id = str(uuid.uuid1()) + command_env = self._get_command_env(env) + if "BUCK_WRAPPER_UUID" not in command_env: + command_env["BUCK_WRAPPER_UUID"] = buck_build_id + + cmd_to_run = [str(self.path_to_executable), cmd] + if self.isolation_prefix: + cmd_to_run = [ + cmd_to_run[0], + "--isolation-dir", + str(self.isolation_prefix), + *cmd_to_run[1:], + ] + cmd_to_run.extend(argv) + cmd_to_run = self._get_windows_cmd_options() + cmd_to_run + stderr = subprocess.PIPE if intercept_stderr else None + return Process( + cmd_to_run=cmd_to_run, + working_dir=self._get_cwd(rel_cwd), + env=command_env, + input=input, + stdin=stdin, + stdout=subprocess.PIPE, + stderr=stderr, + result_type=lambda proc, stdout, stderr: result_type( + proc, stdout, stderr, buck_build_id + ), + exception_type=lambda cmd_to_run, working_dir, env, proc, stdout, stderr: exception_type( + cmd_to_run, working_dir, env, proc, stdout, stderr, buck_build_id + ), + encoding=self.encoding, + ) + + def run_buck_command( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def _create_xml_file(self, *argv: str) -> Tuple[Iterable[str], str]: + """ + Creates a xml file used for the test output. Ensures an xml file + is created if not specified. + """ + xml_flag = [""] + test_output_file = "testOutput.xml" + # ensures xml file is always generated + if "--xml" not in argv: + xml_flag = ["--xml", "testOutput.xml"] + else: + test_output_file = argv[argv.index("--xml") + 1] + return xml_flag, test_output_file + + def execute( + self, + *argv: str, + env: Optional[Dict[str, str]] = None, + input: Optional[bytes] = None, + stdin: Optional[int] = None, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) -> Process[Result, Exception]: + raise NotImplementedError("Buck does not use execute.") + + def rage( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "rage", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def explain( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "explain", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) + + def init( + self, + *argv: str, + input: Optional[bytes] = None, + rel_cwd: Optional[Path] = None, + env: Optional[Dict[str, str]] = None, + ) -> Process[BuckResult, BuckException]: + return self._run_buck_command( + "init", + *argv, + input=input, + rel_cwd=rel_cwd, + env=env, + result_type=BuckResult, + exception_type=BuckException, + ) diff --git a/tests/e2e_util/api/buck_result.py b/tests/e2e_util/api/buck_result.py new file mode 100755 index 0000000000000..33398adfc8229 --- /dev/null +++ b/tests/e2e_util/api/buck_result.py @@ -0,0 +1,429 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import json +import textwrap +import xml.etree.ElementTree as ET +from asyncio import subprocess +from collections import defaultdict +from enum import auto, Enum +from pathlib import Path +from typing import Callable, Dict, Iterable, List, Tuple + +from buck2.tests.e2e_util.api.result import E, R, Result + + +BuckResultType = Callable[[subprocess.Process, str, str, str], R] +BuckExceptionType = Callable[ + [Iterable[str], Path, Dict[str, str], subprocess.Process, str, str, str], E +] + + +class ExitCode(Enum): + """Enum for exit codes of Buck1""" + + SUCCESS = 0 + BUILD_ERROR = 1 + BUSY = 2 + COMMANDLINE_ERROR = 3 + NOTHING_TO_DO = 4 + PARSE_ERROR = 5 + RUN_ERROR = 6 + FATAL_GENERIC = 10 + FATAL_BOOTSTRAP = 11 + FATAL_OOM = 12 + FATAL_IO = 13 + FATAL_DISK_FULL = 14 + FIX_FAILED = 16 + TEST_ERROR = 32 + TEST_NOTHING = 64 + SIGNAL_INTERRUPT = 130 + + +class ExitCodeV2(Enum): + """Enum for exit codes of Buck2""" + + SUCCESS = 0 + UNKNOWN_ERROR = 1 + INFRA_ERROR = 2 + USER_ERROR = 3 + DAEMON_CONNECTION_FAILURE = 11 + + +class AutoName(Enum): + """Makes the value of the Enum its name""" + + @staticmethod + def _generate_next_value_(name, start, count, last_values): + return name + + +class ResultType(AutoName): + """Enum for result types of buck test""" + + DRY_RUN = auto() + EXCLUDED = auto() + DISABLED = auto() + ASSUMPTION_VIOLATION = auto() + FAILURE = auto() + SUCCESS = auto() + + +class BuckResult(Result): + """ + Represents a buck process that has finished running and succeeded. + If the buck process failed, proceeds to raise BuckException + """ + + def __init__( + self, process: subprocess.Process, stdout: str, stderr: str, buck_build_id: str + ) -> None: + super().__init__(process, stdout, stderr) + self.buck_build_id = buck_build_id + + +class BuckException(Exception, BuckResult): + """Represents a Buck process that has finished running and failed.""" + + def __init__( + self, + cmd_to_run: Iterable[str], + working_dir: Path, + env: Dict[str, str], + process: subprocess.Process, + stdout: str, + stderr: str, + buck_build_id: str, + ) -> None: + cmd = " ".join(str(e) for e in cmd_to_run) + if stdout != "": + indented_stdout = textwrap.indent(stdout, " " * 8) + rendered_stdout = "\n\n" + indented_stdout + "\n" + else: + rendered_stdout = "" + rendered_stderr = ( + "\n\n" + textwrap.indent(stderr, " " * 8) + "\n" + ) + error_msg = ( + textwrap.dedent( + f""" + {cmd} + {working_dir} + """ + ) + + rendered_stdout + + rendered_stderr + ) + Exception.__init__( + self, + error_msg, + ) + BuckResult.__init__(self, process, stdout, stderr, buck_build_id) + + def check_returncode(self) -> None: + assert self.process.returncode != 0 + + def get_exit_code(self) -> ExitCode: + """Returns the exit code of a Buck Result when it exits""" + # See https://docs.python.org/3/library/subprocess.html#subprocess.Popen.returncode + # for negative return code. + assert self.process.returncode is not None + if self.process.returncode < 0: # type: ignore + return ExitCode(128 - self.process.returncode) # type: ignore + return ExitCode(self.process.returncode) + + def get_exit_code_v2(self) -> ExitCodeV2: + """Returns the exit code of a Buck Result when it exits""" + # See https://docs.python.org/3/library/subprocess.html#subprocess.Popen.returncode + # for negative return code. + assert self.process.returncode is not None + if self.process.returncode < 0: # type: ignore + return ExitCodeV2(128 - self.process.returncode) # type: ignore + return ExitCodeV2(self.process.returncode) + + +class BuildReport: + """ + A parsed JSON representation of buck v2 build output on stdout. + Build report is invoked on v2 builds by passing --build-report flag. + Does not support buck v1. + + Attributes: + build_report: A JSON dictionary parsed representation of stdout build report. + root: A Path to the project root. Parsed from build report. + results: A dictionary mapping targets to a tuple of output paths. + Parsed from build report. + """ + + def __init__(self, parsed) -> None: + assert isinstance(parsed, Dict) + self.build_report: Dict[str, ...] = parsed # type: ignore + self.root = Path(self.build_report["project_root"]) # type: ignore + self.results: Dict[str, Dict[str, ...]] = self.build_report["results"] + + def _to_abs_paths(self, paths: Tuple[Path, ...]) -> Tuple[Path, ...]: + return tuple(self.root / path for path in paths) + + def outputs_for_target( + self, target: str, sub_target: str = "DEFAULT", rel_path: bool = False + ) -> Tuple[Path, ...]: + assert "//" in target + paths: Tuple[Path, ...] + if target.startswith("//"): + # Get the full target "cell//target" that matches this target. + matched_outputs = [ + entry["outputs"][sub_target] + for t, entry in self.results.items() + if t.endswith(target) + ] + assert ( + len(matched_outputs) > 0 + ), f"Found no match for target {target} in {self.build_report}" + assert ( + len(matched_outputs) == 1 + ), f"Found different cells for target {target} in {self.results}" + paths = matched_outputs[0] + else: + paths = self.results[target]["outputs"][sub_target] + if rel_path: + return tuple(Path(p) for p in paths) + return self._to_abs_paths(paths) + + def output_for_target( + self, target: str, sub_target: str = "DEFAULT", rel_path: bool = False + ) -> Path: + paths = self.outputs_for_target(target, sub_target, rel_path) + assert len(paths) == 1, f"Found more than 1 output for target {target}: {paths}" + return paths[0] + + +LOG_COMPUTE_KEY = "build_api::actions::calculation: compute" + + +class TargetsResult(BuckResult): + """Represents a Buck process of a targets command that has finished running""" + + def __init__( + self, + process: subprocess.Process, + stdout: str, + stderr: str, + buck_build_id: str, + *argv: str, + ) -> None: + self.args = " ".join(argv) + super().__init__(process, stdout, stderr, buck_build_id) + + def get_target_to_build_output(self) -> Dict[str, str]: + """ + Returns a dict of the target and its output file in buck-out + """ + target_to_output = {} + assert ( + "--show-output" in self.args or "--show-full-output" in self.args + ), "Must add --show-output or --show-full-output arg to get targets output" + show_output = self.stdout.strip().splitlines() + for line in show_output: + output_mapping = line.split() + assert len(output_mapping) <= 2, "Output mapping should be less than 2" + target = output_mapping[0] + if len(output_mapping) == 1: + target_to_output[target] = "" + else: + target_to_output[target] = output_mapping[1] + return target_to_output + + +class BuildResult(BuckResult): + """Represents a Buck process of a build command that has finished running""" + + def __init__( + self, + process: subprocess.Process, + stdout: str, + stderr: str, + buck_build_id: str, + *argv: str, + ) -> None: + self.args = " ".join(argv) + super().__init__(process, stdout, stderr, buck_build_id) + + def get_target_to_build_output(self) -> Dict[str, str]: + """ + Returns a dict of the build target and file created in buck-out + Prints to build target followed by path to buck-out file to stdout + """ + target_to_output = {} + assert ( + "--show-output" in self.args or "--show-full-output" in self.args + ), "Must add --show-output or --show-full-output arg to get build output" + show_output = self.stdout.strip().splitlines() + if "--build-report=-" in self.args: + # When mixing --show-output with --build-report=-, the first line is + # the build report, and the remaining ones are the results, we only + # want the results for the purpose of this function so we skip the report + show_output = show_output[1:] + for line in show_output: + output_mapping = line.split() + assert len(output_mapping) <= 2, "Output mapping should be less than 2" + target = output_mapping[0] + if len(output_mapping) == 1: + target_to_output[target] = "" + else: + target_to_output[target] = output_mapping[1] + return target_to_output + + def get_build_report(self) -> BuildReport: + """ + Returns a BuildReport object for a buck v2 build invoked with --build-report. + Looks for a '{' and parses the build stdout starting from '{' as a json. + """ + try: + start = self.stdout.index("{") + end = self.stdout.index("\n", start) + parsed = json.loads(self.stdout[start:end]) + return BuildReport(parsed) + except Exception as e: + print(f"stdout: {self.stdout}\nstderr: {self.stderr}") + raise e + + def get_action_to_cache_miss_count(self) -> Dict[str, int]: + """ + Returns a dictionary of action key to number of cache misses. + Populates this dictionary by going through stdout looking for logs of compute calls. + + Currently, there is no unique identifier for the action in buck2, so the action key + is just a tuple of configured target name and analysis id. + """ + action_to_cache_miss_count: Dict[str, int] = defaultdict(int) + for line in self.stdout.splitlines(): + if LOG_COMPUTE_KEY in line: + target = line.split(LOG_COMPUTE_KEY)[-1].strip() + action_to_cache_miss_count[target] += 1 + return dict(action_to_cache_miss_count) + + +class TestResultSummary: + """Represents a summary of a test result""" + + def __init__(self, name: str, status: str, result_type: ResultType) -> None: + self.name: str = name + self.status: str = status + self.result_type: ResultType = ResultType(result_type) + + def get_name(self) -> str: + """Returns the name of the test""" + return self.name + + def get_status(self) -> str: + """Returns the status of the test""" + return self.status + + def get_result_type(self) -> ResultType: + """Returns the result type of the test""" + return self.result_type + + +class TestResult(BuckResult): + """Represents a Buck process of a test command that has finished running""" + + def __init__( + self, + process: subprocess.Process, + stdout: str, + stderr: str, + buck_build_id: str, + test_output_file: Path, + ) -> None: + super().__init__(process, stdout, stderr, buck_build_id) + self.test_root = ( + ET.parse(str(test_output_file)).getroot() + if test_output_file.exists() + else None + ) + + def get_tests(self) -> List[TestResultSummary]: + """Returns a list of test result summaries""" + if not self.test_root: + return [] + test_list = [] + for tests in self.test_root: + for testresult in tests.iter("testresult"): + name = testresult.get("name") + status = testresult.get("status") + testresult_type = testresult.get("type") + assert testresult_type in ( + e.value for e in ResultType + ), f"Type {testresult_type} is not a ResultType Enum" + result_type = ResultType(testresult_type) + test_result_summary = TestResultSummary(name, status, result_type) + test_list.append(test_result_summary) + return test_list + + def get_success_count(self) -> int: + """Returns the number of successful tests""" + return self._get_count(ResultType.SUCCESS) + + def get_failure_count(self) -> int: + """Returns the number of failed tests""" + return self._get_count(ResultType.FAILURE) + + def get_skipped_count(self) -> int: + """Returns the number of tests skipped""" + return self._get_count(ResultType.EXCLUDED) + + def _get_count(self, result_type: ResultType) -> int: + """Returns the number of tests with the given status""" + return sum( + 1 for test in self.get_tests() if test.get_result_type() == result_type + ) + + +class AuditConfigResult(BuckResult): + """Represents a Buck process of an audit config command that has finished running""" + + def __init__( + self, + process: subprocess.Process, + stdout: str, + stderr: str, + buck_build_id: str, + *argv: str, + ) -> None: + self.args = " ".join(argv) + super().__init__(process, stdout, stderr, buck_build_id) + + def get_json(self) -> Dict[str, str]: + """Returns a dict of the json sent back by buck""" + assert ( + "--style=json" in self.args or "--style json" in self.args + ), "Must add --style=json or `--style json` arg to get json output" + try: + start = self.stdout.index("{") + audit_json = self.stdout[start:].strip() + parsed = json.loads(audit_json) + return parsed + except Exception as e: + print(f"stdout: {self.stdout}\nstderr: {self.stderr}") + raise e + + +class BxlResult(BuckResult): + """Represents a Buck process of a bxl command that has finished running""" + + def __init__( + self, + process: subprocess.Process, + stdout: str, + stderr: str, + buck_build_id: str, + *argv: str, + ) -> None: + self.args = " ".join(argv) + super().__init__(process, stdout, stderr, buck_build_id) diff --git a/tests/e2e_util/api/executable.py b/tests/e2e_util/api/executable.py new file mode 100644 index 0000000000000..035a48056a147 --- /dev/null +++ b/tests/e2e_util/api/executable.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import platform +from asyncio import subprocess +from enum import Enum +from pathlib import Path +from typing import Dict, List, Optional + +from buck2.tests.e2e_util.api.process import Process +from buck2.tests.e2e_util.api.result import Result + + +class WindowsCmdOption(Enum): + DelayedExpansion = "v" + Extensions = "e" + + +class Executable: + """An object with a exectuable path""" + + def __init__( + self, + path_to_executable: Path, + encoding: str, + env: Dict[str, str], + cwd: Optional[Path] = None, + ) -> None: + self.path_to_executable = path_to_executable + self.cwd = Path() if cwd is None else cwd + assert self.cwd.exists(), str(self.cwd) + self.encoding = encoding + self._env = env + self._windows_cmd_options: Dict[WindowsCmdOption, bool] = {} + + def _get_command_env(self, env: Optional[Dict[str, str]]) -> Dict[str, str]: + # Combine self._env and env into 1 dictionary. env overrides self._env + return {**self._env, **(env or {})} + + def set_env(self, key: str, value: str) -> None: + self._env[key] = value + + def set_windows_cmd_option(self, key: WindowsCmdOption, value: bool) -> None: + self._windows_cmd_options[key] = value + + def _get_windows_cmd_options(self) -> List[str]: + """CMD.EXE on windows has two options "DELAYEDEXPANSION" and "EXTENSIONS" that modify the way its scripts are parsed. + These can be turned on/off with the `/[ve]:` arguments to cmd.exe. See the help for `setlocal` for more details + """ + cmd: List[str] = [] + is_windows = platform.system() == "Windows" + if is_windows and self._windows_cmd_options: + cmd.append("cmd.exe") + for option, value in self._windows_cmd_options.items(): + cmd.append(f"/{option.value}:{'on' if value else 'off'}") + cmd.append("/c") + return cmd + + def execute( + self, + *argv: str, + env: Optional[Dict[str, str]] = None, + input: Optional[bytes] = None, + stdin: Optional[int] = None, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) -> Process[Result, Exception]: + """ + Runs the executable with a list of arguments. + + argv: a list of arguments to pass to the executable + env: An optional dictionary of environment variables to run with in addition + to env passed in constructor. This env overrides env passed in constructor. + """ + command_env = self._get_command_env(env) + cmd_to_run = self._get_windows_cmd_options() + [ + str(self.path_to_executable), + *argv, + ] + + return Process( + cmd_to_run=cmd_to_run, + working_dir=self.cwd, + env=command_env, + input=input, + stdin=stdin, + stdout=stdout, + stderr=stderr, + result_type=Result, + exception_type=Exception, + encoding=self.encoding, + ) diff --git a/tests/e2e_util/api/fixtures.py b/tests/e2e_util/api/fixtures.py new file mode 100644 index 0000000000000..84d1141107bb5 --- /dev/null +++ b/tests/e2e_util/api/fixtures.py @@ -0,0 +1,167 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import re +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +""" +A few simple helpers that allow us to use simple markup to +point to ranges in a file. This is especially useful in the +LSP tests where we want to make sure that clicking on a link +will go to a specific location. It also helps make building +those structures easier. + +See `Fixture` for the markkup format. +""" + + +@dataclass +class Span: + """A span within a template file""" + + start_line: int + start_col: int + end_line: int + end_col: int + + +class Lines: + """ + Helper class to take a multiline string, and find the line and + column based on the string index + """ + + lines: List[Tuple[int, int]] + + def __init__(self, content: str): + lines = [] + idx = 0 + for line in content.splitlines(keepends=True): + new_idx = idx + len(line) + lines.append((idx, new_idx)) + idx = new_idx + lines.append((idx, len(content))) + + self.lines = lines + + def find_range(self, start_pos: int, end_pos: int) -> Span: + def pos_in_line(line_start: int, line_end: int, pos: int) -> Optional[int]: + if pos < line_start or pos >= line_end: + return None + else: + return pos - line_start + + start_line = None + start_col = None + end_line = None + end_col = None + + for lineno, (line_start, line_end) in enumerate(self.lines): + s_col = pos_in_line(line_start, line_end, start_pos) + e_col = pos_in_line(line_start, line_end, end_pos) + if s_col is not None: + start_line = lineno + start_col = s_col + if e_col is not None: + end_line = lineno + end_col = e_col + + if ( + start_line is not None + and start_col is not None + and end_line is not None + and end_col is not None + ): + return Span( + start_line=start_line, + start_col=start_col, + end_line=end_line, + end_col=end_col, + ) + else: + raise ValueError( + f"Could not find range for position `{start_pos}` and `{end_pos}`" + ) + + +""" +Representation of a template file with named spans to make things +like clicking and getting a range back in e2e LSP tests easier + +Example template file: +``` +def foo(x: "string"): + pass +``` + +Would yield "new_content" of: +``` +def foo(x: "string"): + pass +``` + +and spans of {"foo": Span(1, 4, 1, 7), "f": Span(1, 5, 1, 6)} +""" + + +@dataclass +class Fixture: + original_content: str + content: str + spans: Dict[str, Span] + + def __init__(self, content: str): + raw_positions = {} + new_content = "" + idx = 0 + regex = re.compile(r"<(/)?(\w[-\w_]*)>") + while idx < len(content): + m = regex.search(content, idx) + if m: + new_content += content[idx : m.start()] + idx = m.end() + + identifier = m.group(2) + if m.group(1): + # Closing tag. Must have seen an open tag + if identifier not in raw_positions: + raise ValueError( + f"Found closing, but not starting tag, for `{identifier}`" + ) + existing = raw_positions[identifier] + if existing[1] is not None: + raise ValueError( + f"Found duplicate closing tags for `{identifier}`" + ) + raw_positions[identifier] = (existing[0], len(new_content)) + else: + # Open tag. Duplicates are not allowed. + if identifier in raw_positions: + raise ValueError(f"Duplicate span `{identifier}`") + raw_positions[identifier] = (len(new_content), None) + else: + break + new_content += content[idx:] + + lines = Lines(new_content) + spans = { + identifier: lines.find_range(start, end) + for (identifier, (start, end)) in raw_positions.items() + } + + self.original_content = content + self.content = new_content + self.spans = spans + + def start_line(self, id: "str") -> int: + return self.spans[id].start_line + + def start_col(self, id: "str") -> int: + return self.spans[id].start_col diff --git a/tests/e2e_util/api/lsp.py b/tests/e2e_util/api/lsp.py new file mode 100644 index 0000000000000..48076c5e001b1 --- /dev/null +++ b/tests/e2e_util/api/lsp.py @@ -0,0 +1,326 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import contextlib +import copy +import json +import re +from asyncio import subprocess, wait_for +from asyncio.streams import StreamReader, StreamWriter + +from collections import deque +from pathlib import Path +from typing import Any, Optional + +CONTENT_HEADER_REGEX = re.compile(r"Content-Length: (\d+)") + + +class LSPResponseError(Exception): + def __init__(self, json_error: Any): + super().__init__(f"Error returned from LSP: `{json_error}`") + self.json_error = json_error + + +class LspClient(contextlib.AbstractAsyncContextManager): + def __init__(self, process: subprocess.Process, cwd: Path): + self.process = process + self.cwd = cwd + self.id = 0 + self.notifications = deque() + self.responses = {} + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + try: + self.process.kill() + except ProcessLookupError: + pass + # Yes, we really wait that long. Process startup time is slow + # sometimes in dev builds, the box can be overloaded, etc. + # Average case is much lower. + await wait_for(self.process.wait(), timeout=120) + + def stdin(self) -> StreamWriter: + if self.process.stdin is None: + raise ValueError("Tried to fetch stdin, but it was not set") + else: + return self.process.stdin + + def stdout(self) -> StreamReader: + if self.process.stdout is None: + raise ValueError("Tried to fetch stdout, but it was not set") + else: + return self.process.stdout + + async def write_message(self, payload: Any): + payload_bytes = json.dumps(payload).encode("utf-8") + self.stdin().write( + f"Content-Length: {len(payload_bytes)}\r\n\r\n".encode("utf-8") + ) + self.stdin().write(payload_bytes) + await self.stdin().drain() + + async def _read_message(self) -> Any: + content_length = (await self.stdout().readline()).decode("utf-8") + await self.stdout().readline() + matches = CONTENT_HEADER_REGEX.match(content_length) + if not matches: + raise ValueError( + f"Did not get content length header. Got `{content_length}`" + ) + content_length_bytes = int(matches.group(1)) + content = (await self.stdout().read(content_length_bytes)).decode("utf-8") + + js = json.loads(content) + if "id" in js: + self.responses[js["id"]] = js + else: + self.notifications.append(js) + return js + + async def read_message(self) -> Any: + return await wait_for(self._read_message(), timeout=30) + + async def open_file( + self, relative_path: Path, contents: Optional[str] = None + ) -> Optional[Any]: + absolute_path = self.cwd / relative_path + if contents is None: + with open(absolute_path, encoding="utf-8") as fin: + contents = fin.read() + + payload = { + "textDocument": { + "uri": absolute_path.as_uri(), + "languageId": "starlark", + "version": 1, + "text": contents, + } + } + await self.send_notification("textDocument/didOpen", payload) + notif = await self.receive_notification("textDocument/publishDiagnostics") + + assert notif["params"]["uri"] == absolute_path.as_uri() + return notif["params"] + + async def goto_definition( + self, relative_path: Path, line: int, col: int + ) -> Optional[Any]: + + absolute_path = self.cwd / relative_path + payload = { + "textDocument": { + "uri": absolute_path.as_uri(), + }, + "position": { + "line": line, + "character": col, + }, + } + + req_id = await self.send_request("textDocument/definition", payload) + return await self.receive_response(req_id) + + async def file_contents(self, uri: str) -> Optional[Any]: + payload = { + "uri": uri, + } + + req_id = await self.send_request("starlark/fileContents", payload) + return await self.receive_response(req_id) + + async def send_notification(self, method: str, notification: Any): + payload = {"jsonrpc": "2.0", "method": method, "params": notification} + await self.write_message(payload) + + async def receive_notification( + self, method: Optional[str] = None, retries: int = 10 + ) -> Optional[Any]: + for _ in range(0, retries): + await self.read_message() + try: + (idx, value) = next( + ( + (i, v) + for (i, v) in enumerate(self.notifications) + if (method is None or v["method"] == method) + ), + (-1, None), + ) + if value is not None: + del self.notifications[idx] + return value + + except (IndexError, ValueError): + pass + + return None + + async def send_request(self, method: str, request: Any) -> int: + self.id += 1 + id = self.id + payload = {"jsonrpc": "2.0", "id": id, "method": method, "params": request} + await self.write_message(payload) + return id + + async def receive_response(self, request_id: int, retries: int = 10): + for _ in range(0, retries): + await self.read_message() + if request_id not in self.responses: + continue + response = self.responses.pop(request_id) + if response.get("result") is not None: + return response["result"] + else: + raise LSPResponseError(response["error"]) + return None + + async def init_connection(self): + repo_uri = self.cwd.as_uri() + request = copy.deepcopy(_INIT_REQUEST) + request["workspaceFolders"][0]["uri"] = repo_uri + request["rootUri"] = repo_uri + + req_id = await self.send_request("initialize", request) + response = await self.receive_response(req_id) + + assert "definitionProvider" in response["capabilities"] + assert "textDocumentSync" in response["capabilities"] + await self.send_notification("initialized", {}) + + +# Sample initialization request sent from vscode. +_INIT_REQUEST = { + "capabilities": { + "textDocument": { + "completion": { + "completionItem": { + "commitCharactersSupport": True, + "deprecatedSupport": True, + "documentationFormat": ["markdown", "plaintext"], + "preselectSupport": True, + "snippetSupport": True, + "tagSupport": {"valueSet": [1]}, + }, + "completionItemKind": { + "valueSet": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + ] + }, + "contextSupport": True, + "dynamicRegistration": True, + }, + "declaration": {"dynamicRegistration": True, "linkSupport": True}, + "definition": {"dynamicRegistration": True, "linkSupport": True}, + "documentHighlight": {"dynamicRegistration": True}, + "documentLink": {"dynamicRegistration": True, "tooltipSupport": True}, + "documentSymbol": { + "dynamicRegistration": True, + "hierarchicalDocumentSymbolSupport": True, + "symbolKind": { + "valueSet": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + ] + }, + }, + "foldingRange": { + "dynamicRegistration": True, + "lineFoldingOnly": True, + "rangeLimit": 5000, + }, + "formatting": {"dynamicRegistration": True}, + "hover": { + "contentFormat": ["markdown", "plaintext"], + "dynamicRegistration": True, + }, + "implementation": {"dynamicRegistration": True, "linkSupport": True}, + "onTypeFormatting": {"dynamicRegistration": True}, + "publishDiagnostics": { + "relatedInformation": True, + "tagSupport": {"valueSet": [1, 2]}, + "versionSupport": False, + }, + "rangeFormatting": {"dynamicRegistration": True}, + "references": {"dynamicRegistration": True}, + "rename": {"dynamicRegistration": True, "prepareSupport": True}, + "selectionRange": {"dynamicRegistration": True}, + "signatureHelp": { + "contextSupport": True, + "dynamicRegistration": True, + "signatureInformation": { + "documentationFormat": ["markdown", "plaintext"], + "parameterInformation": {"labelOffsetSupport": True}, + }, + }, + "synchronization": { + "didSave": True, + "dynamicRegistration": True, + "willSave": True, + "willSaveWaitUntil": True, + }, + "typeDefinition": {"dynamicRegistration": True, "linkSupport": True}, + }, + "window": {"workDoneProgress": True}, + }, + "processId": None, + "rootUri": "file:///INVALID", + "trace": "off", + "workspaceFolders": [{"name": "buck2", "uri": "file:///INVALID"}], +} diff --git a/tests/e2e_util/api/process.py b/tests/e2e_util/api/process.py new file mode 100644 index 0000000000000..9f354f31af719 --- /dev/null +++ b/tests/e2e_util/api/process.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import signal +from asyncio import subprocess +from pathlib import Path +from typing import ( + Any, + Awaitable, + Dict, + Generator, + Generic, + IO, + Iterable, + Optional, + Union, +) + +from buck2.tests.e2e_util.api.result import E, ExceptionType, R, ResultType + + +class Process(Generic[R, E], Awaitable[R]): + """Instantiates a BuckProcess object with a running process""" + + def __init__( + self, + *, + cmd_to_run: Iterable[str], + working_dir: Path, + env: Dict[str, str], + input: Union[bytes, None], + stdin: Optional[int], + stdout: Union[int, IO[Any], None], + stderr: Union[int, IO[Any], None], + result_type: ResultType, + exception_type: ExceptionType, + encoding: str, + ) -> None: + self.cmd_to_run = cmd_to_run + self.working_dir = working_dir + self.env = env + self.input = input + self._result_type = result_type + self._exception_type = exception_type + self._encoding = encoding + if stdin is None: + stdin = None if input is None else subprocess.PIPE + self._awaitable_process = subprocess.create_subprocess_exec( + *cmd_to_run, + cwd=working_dir, + env=env, + stdin=stdin, + stdout=stdout, + stderr=stderr, + ) + + async def _get_result_or_raise_exception(self, process: subprocess.Process) -> R: + stdout, stderr = await process.communicate(input=self.input) + args = ( + process, + ( + str(stdout, self._encoding) + if stdout is not None + else "" + ), + ( + str(stderr, self._encoding) + if stderr is not None + else "" + ), + ) + if process.returncode != 0: + raise self._exception_type( + self.cmd_to_run, self.working_dir, self.env, *args + ) + return self._result_type(*args) + + async def _wait(self) -> R: + """Returns a BuckResult with a finished process""" + process = await self._awaitable_process + return await self._get_result_or_raise_exception(process) + + async def start(self) -> subprocess.Process: + """ + Starts a running process and then returns that process. + Unlike wait, which waits for the process to finish, this does + not wait for the process to finish. + """ + return await self._awaitable_process + + def __await__(self) -> Generator[Any, None, R]: + """ + Overrides __await__ of Awaitable class to implement Awaitable[R]. + Usage: + ``` + buck_result = await process + ``` + """ + return self._wait().__await__() + + async def interrupt(self) -> R: + """Sends SIGINT, and returns a BuckResult with an interrupted process""" + process = await self._awaitable_process + process.send_signal(signal.SIGINT) + return await self._get_result_or_raise_exception(process) diff --git a/tests/e2e_util/api/result.py b/tests/e2e_util/api/result.py new file mode 100644 index 0000000000000..cd69a5991f579 --- /dev/null +++ b/tests/e2e_util/api/result.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +from asyncio import subprocess +from pathlib import Path +from typing import Callable, Dict, Iterable, TypeVar + + +class Result: + """ + Represents a process that has finished running and succeeded. + If the buck process failed, it should raise Exception + """ + + def __init__( + self, + process: subprocess.Process, + stdout: str, + stderr: str, + ) -> None: + self.process = process + self.stdout = stdout + self.stderr = stderr + self.check_returncode() + + def check_returncode(self) -> None: + assert self.process.returncode == 0 + + +R = TypeVar("R", bound=Result) +E = TypeVar("E", bound=Exception) +ResultType = Callable[[subprocess.Process, str, str], R] +ExceptionType = Callable[ + [Iterable[str], Path, Dict[str, str], subprocess.Process, str, str], E +] diff --git a/tests/e2e_util/api/subscribe.py b/tests/e2e_util/api/subscribe.py new file mode 100644 index 0000000000000..4f29f494040a2 --- /dev/null +++ b/tests/e2e_util/api/subscribe.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import contextlib +import json +from asyncio import subprocess, wait_for +from typing import Any + +from buck2.tests.e2e_util.api.process import Process + + +class SubscribeClient(contextlib.AbstractAsyncContextManager): + def __init__(self, start: Process, process: subprocess.Process): + self._start = start + self._process = process + + @classmethod + async def create(cls, start: Process): + process = await start.start() + return cls(start, process) + + @property + def stdin(self): + return self._process.stdin + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + self._process.stdin.close() + await wait_for( + self._start._get_result_or_raise_exception(self._process), timeout=120 + ) + + async def read_message(self) -> Any: + stdout = self._process.stdout + assert stdout is not None + message = await wait_for(stdout.readline(), timeout=30) + return json.loads(message) diff --git a/tests/e2e_util/asserts.py b/tests/e2e_util/asserts.py new file mode 100644 index 0000000000000..829cb56e6da7d --- /dev/null +++ b/tests/e2e_util/asserts.py @@ -0,0 +1,92 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import platform +import re +import stat +from pathlib import Path +from typing import Awaitable, Optional, Type, TypeVar, Union + +import pytest +from buck2.tests.e2e_util.api.buck_result import ( + BuckException, + BuckResult, + ExitCode, + ExitCodeV2, +) + + +E = TypeVar("E", bound=BaseException) + + +def assert_executable(output: Path) -> None: + # stat.S_IXUSR is executable by owner + # Checks that the file is executable by owner + # Windows doesn't have executable permission. + if platform.system() != "Windows": + assert output.stat().st_mode & stat.S_IXUSR != 0 + + +def assert_not_executable(output: Path) -> None: + # Checks that the file is not executable by owner + if platform.system() != "Windows": + assert output.stat().st_mode & stat.S_IXUSR == 0 + + +async def expect_failure( + process: Awaitable[BuckResult], + *, + exception: Type[E] = BuckException, + exit_code: Union[ExitCode, ExitCodeV2, None] = None, + stdout_regex: Optional[str] = None, + stderr_regex: Optional[str] = None, +) -> E: + """ + Asserts that the process raises a BuckException. + + Parameters: + process: An Awaitable of BuckResult, usually a Process + exception: + The type of exception to check for. + The exception can be a BuckException or any subclass. + Default is BuckException. + exit_code: + An optional exit code to check for if provided. + Raises an AssertionError if the actual exit code is different. + stdout_regex: + An optional regex pattern to search for in stdout if provided. + Raises an AssertionError if the regex pattern is not found. + stderr_regex: + An optional regex pattern to search for in stderr if provided. + Raises an AssertionError if the regex pattern is not found. + """ + with pytest.raises(exception) as execinfo: # type: ignore + await process + failure = execinfo.value + if not isinstance(failure, BuckException): + return failure + if exit_code is not None: + actual_exit_code = ( + failure.get_exit_code() + if isinstance(exit_code, ExitCode) + else failure.get_exit_code_v2() + ) + assert ( + actual_exit_code == exit_code + ), f"Expected exit code {exit_code} but found {actual_exit_code}" + if stdout_regex is not None: + assert re.search( + stdout_regex, failure.stdout, re.DOTALL + ), f'Did not find pattern: "{stdout_regex}" in stdout: "{failure.stdout}"' + if stderr_regex is not None: + assert re.search( + stderr_regex, failure.stderr, re.DOTALL | re.IGNORECASE + ), f'Did not find pattern: "{stderr_regex}" in stderr: "{failure.stderr}"' + return failure diff --git a/tests/e2e_util/buck_workspace.py b/tests/e2e_util/buck_workspace.py new file mode 100644 index 0000000000000..2a2faf7aaa20f --- /dev/null +++ b/tests/e2e_util/buck_workspace.py @@ -0,0 +1,596 @@ +#!/usr/bin/env fbpython +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import contextlib +import hashlib +import json +import os +import platform +import shutil +import subprocess +import sys +import tempfile +from collections import namedtuple +from pathlib import Path +from typing import ( + AsyncGenerator, + AsyncIterator, + Awaitable, + Callable, + Dict, + Iterable, + List, + Optional, +) + +import __manifest__ + +import pytest +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.api.executable import WindowsCmdOption +from decorator import decorator + +BuckTestMarker = namedtuple( + "BuckTestMarker", + [ + "inplace", + "data_dir", + "allow_soft_errors", + "extra_buck_config", + "skip_final_kill", + "setup_eden", + ], +) + + +@contextlib.asynccontextmanager +async def buck_fixture( # noqa C901 : "too complex" + marker, +) -> AsyncGenerator[Buck, None]: + """Returns a Buck for testing""" + + is_windows = platform.system() == "Windows" + test_executable = os.environ["TEST_EXECUTABLE"] + + env: Dict[str, str] = {**os.environ} + # This is necessary for static linking on Linux. + if platform.system() != "Windows": + env["BUCKD_STARTUP_TIMEOUT"] = "120" + + # allow_soft_errors will override any existing environment variable behavior + if marker.allow_soft_errors or marker.inplace: + env["BUCK2_HARD_ERROR"] = "false" + + # Use a very small stdin buffer to catch any scenarios in which we + # don't properly handle partial input. + env["BUCK2_TEST_STDIN_BUFFER_SIZE"] = "8" + # Explicitly disable log uploading, we don't care about stats for tests. + env["BUCK2_TEST_DISABLE_LOG_UPLOAD"] = "true" + # But still block on it, because the upload process also writes + # locally, and we want that to be synchronous instead of backgrounded. + env["BUCK2_TEST_BLOCK_ON_UPLOAD"] = "true" + # Require the events dispatcher to be set for e2e tests. + env["ENFORCE_DISPATCHER_SET"] = "true" + # Auto-destroy after a while. This should be longer than the test timeout. + env["BUCK2_TERMINATE_AFTER"] = "650" + # Timeout Watchman requests because we often see it hang and crash. + env["BUCK2_WATCHMAN_TIMEOUT"] = "30" + # Use little threads. We don't do much work in tests but we do run lots of Bucks. + env["BUCK2_RUNTIME_THREADS"] = "2" + + # Windows uses blocking threads for subprocess I/O so we can't do this there. + if not is_windows: + env["BUCK2_MAX_BLOCKING_THREADS"] = "2" + + # Filter out some environment variables that may interfere with the + # running of tests. Notably, since this framework is used to write + # Python tests that run Buck, we clear out Python test environment + # variables so that if we run a Python test via Buck 2, they won't + # interfere. + for var in ["PYTEST_CURRENT_TEST", "TEST_PILOT"]: + env.pop(var, None) + + common_dir = await _get_common_dir() + base_dir = Path(tempfile.mkdtemp(dir=common_dir)) + + isolation_prefix = None + keep_temp = os.environ.get("BUCK_E2E_KEEP_TEMP") == "1" + + env["BUCK2_TEST_SKIP_DEFAULT_EXTERNAL_CONFIG"] = "true" + + # Because we may change the working directory, create an absolute path to the test data srcs if + # the exist and make it available in a different envvar. This is used by golden tests + test_repo_data = os.environ.get("TEST_REPO_DATA") + if test_repo_data is not None: + os.environ["TEST_REPO_DATA_SRC"] = str(Path(test_repo_data).absolute()) + + # Create a temporary file to store all lines of extra buck config values. + extra_config_lines = [] + + project_dir = base_dir / "project" + + # Temp dir needed for EdenFS, will only be created if necessary + eden_dir = base_dir / "eden" + + try: + if marker.setup_eden: + assert ( + not marker.inplace + ), "EdenFS for e2e tests is not supported for inplace tests" + + _setup_eden( + eden_dir, + project_dir, + env, + is_windows, + ) + + if marker.inplace: + # We need a unique isolated prefix per test case. + current_test = ( + __manifest__.fbmake["build_rule"] + os.environ["PYTEST_CURRENT_TEST"] + ) + isolation_prefix = hashlib.sha1(current_test.encode("utf-8")).hexdigest() + # FIXME(T136079642): Buck2 on Windows has problem with relative symlinks over 260 chars, shorten the hash + if is_windows: + isolation_prefix = isolation_prefix[:5] + else: + isolation_prefix = ".buck_e2e" + isolation_prefix + + buck_cwd = Path.cwd() + + # NOTE: In theory, this isn't true of all Linux hosts, but all + # our tests actually rely on it and will break if you ran them + # on a host without this, so just make it the default. + if sys.platform == "linux": + extra_config_lines.append("[host_features]\ngvfs = true\n") + extra_config_lines.append("[buildfile]\nextra_for_test = TARGETS.test\n") + + else: + if marker.data_dir is not None: + src = Path(os.environ["TEST_REPO_DATA"], marker.data_dir) + _copytree(src, project_dir) + _maybe_setup_prelude_and_ovr_config(project_dir) + with open(Path(project_dir, ".watchmanconfig"), "w") as f: + # Use the FS Events watcher, which is more reliable than the default. + json.dump( + { + "ignore_dirs": ["buck-out", ".hg"], + "fsevents_watch_files": True, + "prefer_split_fsevents_watcher": False, + }, + f, + ) + + # Our mac tests keep failing due to watchman errors, do disable watchman on macs here + if sys.platform == "darwin": + extra_config_lines.append("[buck2]\nfile_watcher = fs_hash_crawler\n") + + buck_cwd = project_dir + + for section, config in marker.extra_buck_config.items(): + extra_config_lines.append(f"[{section}]\n") + for key, value in config.items(): + extra_config_lines.append(f"{key} = {value}\n") + + extra_config = os.path.join(base_dir, "extra.bcfg") + with open(extra_config, "w") as f: + for line in extra_config_lines: + f.write(line) + env["BUCK2_TEST_EXTRA_EXTERNAL_CONFIG"] = extra_config + + buck = Buck( + Path(test_executable), + cwd=buck_cwd, + encoding="utf-8", + env=env, + ) + + if isolation_prefix is not None: + buck.set_isolation_prefix(isolation_prefix) + + yield buck + + if not marker.skip_final_kill: + if keep_temp: + await buck.kill() + else: + await buck.clean() + finally: + if keep_temp: + print(f"Not deleting temporary directory at {base_dir}", file=sys.stderr) + else: + if marker.setup_eden: + _cleanup_eden(eden_dir, project_dir, env) + shutil.rmtree(base_dir, ignore_errors=True) + + +@pytest.fixture(scope="function") +async def buck(request) -> AsyncIterator[Buck]: + marker = request.node.get_closest_marker("buck_test") + if marker is None: + raise Exception( + "Test method must be decorated with @buck_test() to use the buck fixture." + ) + marker = marker.args[0] + async with buck_fixture(marker) as buck: + yield buck + + +async def _get_common_dir() -> Path: + from asyncio import subprocess + + """ + Returns a temporary directory using mkscratch. + The advantage of using mkscratch is that it can return the same directory on multiple calls. + """ + # Need to use `--hash` over `--subdir` here because the tmp path would be too long and + # Eden would fail with `Socket path too large to fit into sockaddr_un` otherwise + mkscratch_proc = await subprocess.create_subprocess_exec( + "mkscratch", + "path", + "--hash", + stdout=subprocess.PIPE, + ) + stdout, _ = await mkscratch_proc.communicate() + assert stdout is not None, "stdout should not be None" + common_dir = Path(stdout.decode().strip()) + return common_dir + + +def nobuckd(fn: Callable) -> Callable: + """Disables buck daemon""" + + def wrapped(fn: Callable, buck: Buck, *args, **kwargs): + buck.set_buckd(True) + return fn(buck, *args, **kwargs) + + return decorator(wrapped, fn) + + +def _eden_base_cmd(eden_dir: Path) -> List[str]: + config_dir = eden_dir / "config" + etc_dir = eden_dir / "etc" + home_dir = eden_dir / "home" + + config_dir.mkdir(exist_ok=True) + etc_dir.mkdir(exist_ok=True) + home_dir.mkdir(exist_ok=True) + + return [ + "eden", + "--config-dir", + str(config_dir), + "--home-dir", + str(home_dir), + "--etc-eden-dir", + str(etc_dir), + ] + + +# Adapted from Eden integration test, didn't use their code because Eden uses the compiled binary in their buck-out +# which we don't have, extracting that part out would be more work than what was done below. +# https://www.internalfb.com/code/fbsource/[45334ead4a72]/fbcode/eden/integration/lib/testcase.py?lines=123 +def _setup_eden( + eden_dir: Path, + project_dir: Path, + env: Dict[str, str], + is_windows: bool, +): + eden_dir.mkdir(exist_ok=True) + # Start up an EdenFS Client and point it to the temp dirs + subprocess.check_call( + _eden_base_cmd(eden_dir) + + [ + "start", + ], + stdout=sys.stdout, + stderr=sys.stderr, + env=env, + ) + + temp_repo = eden_dir / "temp_repo" + # Initialize a hg repo, so Eden can mount it + subprocess.check_call( + ["hg", "init", str(temp_repo)], + stdout=sys.stdout, + stderr=sys.stderr, + env=env, + ) + + # Mount the hg repo we created + project_dir.mkdir(exist_ok=True) + cmd = _eden_base_cmd(eden_dir) + [ + "clone", + temp_repo, + project_dir, + "--allow-empty-repo", + "--case-insensitive", + ] + + if is_windows: + cmd.append("--enable-windows-symlinks") + + subprocess.check_call( + cmd, + stdout=sys.stdout, + stderr=sys.stderr, + env=env, + ) + + subprocess.check_call( + _eden_base_cmd(eden_dir) + + [ + "redirect", + "add", + "buck-out", + "bind", + ], + stdout=sys.stdout, + stderr=sys.stderr, + env=env, + cwd=project_dir, + ) + + +def _cleanup_eden( + eden_dir: Path, + project_dir: Path, + env: Dict[str, str], +): + # Remove the Eden mount created for the test + subprocess.run( + _eden_base_cmd(eden_dir) + + [ + "remove", + str(project_dir), + "-y", + ], + stdout=sys.stdout, + stderr=sys.stderr, + env=env, + ) + + subprocess.run( + _eden_base_cmd(eden_dir) + + [ + "shutdown", + ], + stdout=sys.stdout, + stderr=sys.stderr, + env=env, + ) + + +def _copytree( + src: Path, + dst: Path, + symlinks: bool = False, + ignore: Optional[Callable[..., Iterable[str]]] = None, +) -> None: + """Copies all files and directories from src into dst""" + dst.mkdir(parents=True, exist_ok=True) + for item in os.listdir(src): + if item == "buck-out": + continue + s = src / item + d = dst / item + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore, dirs_exist_ok=True) + else: + shutil.copy2(s, d) + + +def _maybe_setup_prelude_and_ovr_config(path: Path) -> None: + if "PRELUDE" in os.environ or "OVR_CONFIG" in os.environ: + if os.environ.get("BUCK2_E2E_TEST_FLAVOR") == "isolated": + raise Exception( + "Don't set `PRELUDE` or `OVR_CONFIG` in `tests/core` - these tests are always isolated" + ) + + if "PRELUDE" in os.environ: + prelude = Path(path, "prelude") + if not prelude.exists(): + _copytree( + Path(os.environ["PRELUDE"]), + Path(path, "prelude"), + ) + + # TODO: The toolchain platform definitions we hard-code in the prelude are + # in the ovr_config cell, so copy them in for now. Longer-term, D31566140 + # has a discusion on bettter approaches. + if "OVR_CONFIG" not in os.environ: + return + _copytree( + Path(os.pardir, "arvr", "tools", "build_defs", "config"), + Path(path, "arvr", "tools", "build_defs", "config"), + ) + + _copytree( + Path(os.pardir, "tools", "build_defs", "fbcode_macros"), + Path(path, "tools", "build_defs", "fbcode_macros"), + ) + + with Path(path, ".buckconfig").open("a") as f: + print( + "", file=f + ) # append newline because test `.buckconfig` may not end with newline + print("# Following lines are added by buck_workspace.py", file=f) + print("[repositories]", file=f) + print("ovr_config = arvr/tools/build_defs/config", file=f) + print("fbcode_macros = tools/build_defs/fbcode_macros", file=f) + with Path(path, "arvr", "tools", "build_defs", "config", ".buckconfig").open( + "w" + ) as f: + pass + with Path(path, "tools", "build_defs", "fbcode_macros", ".buckconfig").open( + "w" + ) as f: + pass + + +BuckTestFn = Callable[..., Awaitable[None]] + +SKIPPABLE_PLATFORMS = ["darwin", "linux", "windows"] + + +def buck_test( + inplace: bool | None = None, + data_dir: Optional[str] = "", + # Accepted values are specified in SKIPPABLE_PLATFORMS + skip_for_os: List[str] = [], # noqa: B006 value is read-only + allow_soft_errors=False, + extra_buck_config: Optional[Dict[str, Dict[str, str]]] = None, + skip_final_kill=False, + setup_eden=False, +) -> Callable: + """ + Defines a buck test. This is a must have decorator on all test case functions. + + Parameters: + inplace: + A bool for whether to run tests in-repo. + If false, runs test under a sandbox repo. If `data_dir` or `data` are set on the target, + the sandbox repo will be initialized with the contents of that directory. This can be + disabled by setting `data_dir = None` on the test, or the test can set + `data_dir = "subdir"` to just use the contents of a subdirectory. + If true, runs test in fbsource. + data_dir: + data_dir is an optional string. + If data_dir is set, then data_dir is the directory that contains test project data to + copy, or the working directory relative to the cwd. + skip_for_os: + List of OS to skip the test on. + allow_soft_errors: + Like it says in the arg name. The default is to hard error. + extra_buck_config: + A optional dict of extra buck config to add to the test. + The key is the section name, the value is a dict of key value pairs. + skip_final_kill: + Don't run a `buck2 kill` or `buck2 clean` at the end of the test + setup_eden: + Whether or not to set up an EdenFS repo for this test. Only matters for inplace=False. + Note that this will slow the test down, so it should not be widely enabled. + """ + + if inplace and data_dir == "": + data_dir = None + + if os.environ.get("BUCK2_E2E_TEST_FLAVOR") == "isolated": + if inplace is not None: + raise Exception( + "Don't set `inplace` in `tests/core` - these tests are always isolated" + ) + + inplace = False + else: + if inplace is None: + raise Exception("`inplace` must be set for `buck_test()`") + + # Set up arguments to use for the buck fixture. + + # Just ignore the test, calling pytest.skip() is treated as a failure by tpx unfortunately + for p in skip_for_os: + if p not in SKIPPABLE_PLATFORMS: + raise Exception(f"skip_for_os must specifiy one of {SKIPPABLE_PLATFORMS}") + if platform.system().lower() in skip_for_os: + return lambda *args: None + + if data_dir is not None and inplace: + raise Exception( + "`data_dir` is not an allowed parameter for an `inplace=True`test" + ) + + return pytest.mark.buck_test( + BuckTestMarker( + inplace=inplace, + data_dir=data_dir, + allow_soft_errors=allow_soft_errors, + extra_buck_config=extra_buck_config or {}, + skip_final_kill=skip_final_kill, + setup_eden=setup_eden, + ) + ) + + +def env(key: str, value: str) -> Callable: + """ + Decorator for adding an environment variable to a test case. + For example, @env("BUCK_LOG", "info") + """ + + def inner_decorator(fn: BuckTestFn) -> Callable: + async def wrapped(fn: BuckTestFn, buck: Buck, *args, **kwargs) -> None: + buck.set_env(key, value) + return await fn(buck, *args, **kwargs) + + return decorator(wrapped, fn) + + return inner_decorator + + +def windows_cmd_option(key: WindowsCmdOption, value: bool) -> Callable: + """ + Decorator for specifying the state for cmd.exe's specified key feature + For example, @windows_cmd_option(WindowsCmdOption.DelayedExpansion, True) + """ + + def inner_decorator(fn: BuckTestFn) -> Callable: + async def wrapped(fn: BuckTestFn, buck: Buck, *args, **kwargs) -> None: + buck.set_windows_cmd_option(key, value) + return await fn(buck, *args, **kwargs) + + return decorator(wrapped, fn) + + return inner_decorator + + +def is_deployed_buck2() -> bool: + """ + This function detects whether or not you are using a deployed version of buck2 + so you can skip certain rule tests to only use deployed buck2. + This may break deployed buck2, so please make sure this only affects tests for rules + that buck2 users are not using. + + Example of skipping test case with deployed buck2: + @pytest.mark.skipif( + is_deployed_buck2(), + reason="Skip if testing with deployed buck2", + ) + """ + return os.environ.get("TEST_EXECUTABLE") == "buck2" + + +def get_mode_from_platform( + mode="dev", prefix=True, skip_validation_i_know_what_im_doing=False +) -> str: + if not skip_validation_i_know_what_im_doing and (mode not in ("dev", "opt")): + raise Exception(f"Invalid mode: {mode}") + + def modefile_basename(): + if sys.platform == "darwin": + if mode.startswith("dev"): + return "mac" + else: + return "opt-mac" + elif sys.platform == "win32": + if mode.startswith("dev"): + return "win" + else: + return "opt-win" + if mode.startswith("dev"): + return "dev" + else: + return "opt" + + if prefix: + return f"@fbcode//mode/{modefile_basename()}" + + return modefile_basename() diff --git a/tests/e2e_util/conftest.py b/tests/e2e_util/conftest.py new file mode 100644 index 0000000000000..09acd1b5a76a4 --- /dev/null +++ b/tests/e2e_util/conftest.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import inspect + +import pytest +from buck2.tests.e2e_util.buck_workspace import buck # noqa F401 + + +def pytest_collection_modifyitems(items): + """ + Used to automatically mark async test functions with pytest.mark.asyncio decorator. + """ + for item in items: + if isinstance(item, pytest.Function) and inspect.iscoroutinefunction( + item.function + ): + item.add_marker(pytest.mark.asyncio) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", "buck_test: used by buck_test to pass data to Buck fixtures" + ) diff --git a/tests/e2e_util/helper/assert_occurrences.py b/tests/e2e_util/helper/assert_occurrences.py new file mode 100644 index 0000000000000..9f55a2a5b3e16 --- /dev/null +++ b/tests/e2e_util/helper/assert_occurrences.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import re + + +def truncate(x: str, limit: int) -> str: + if len(x) <= limit: + return x + else: + return x[: limit // 2] + " <> " + x[-(limit // 2) :] + + +def print_occurences_msg( + needle: str, haystack: str, occurrences: int, success: bool +) -> None: + OUTPUT_LIMIT = 10000 + # Hacky way to actually make sure we print the full output when a string + # does not appear the correct number of times. + assert success, "Expected to find {} occurrences of `{}` in `{}`".format( + occurrences, needle, truncate(repr(haystack), OUTPUT_LIMIT) + ) + + +def assert_occurrences(needle: str, haystack: str, occurrences: int) -> None: + print_occurences_msg( + needle, haystack, occurrences, haystack.count(needle) == occurrences + ) + + +def assert_occurrences_regex(needle: str, haystack: str, occurrences: int) -> None: + print_occurences_msg( + needle, + haystack, + occurrences, + len(re.findall(needle, haystack, re.MULTILINE)) == occurrences, + ) diff --git a/tests/e2e_util/helper/golden.py b/tests/e2e_util/helper/golden.py new file mode 100644 index 0000000000000..43a3dff1b2421 --- /dev/null +++ b/tests/e2e_util/helper/golden.py @@ -0,0 +1,147 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import os +import re +import typing + +from pathlib import Path + + +def _prepend_header(content: str) -> str: + return ( + f"# This file is {'@'}generated, " + f"regenerate by re-running test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command\n\n{content}" + ) + + +def _remove_ci_labels(content: str) -> str: + # this label is only added for CI jobs, causing inconsistenty between local test and ci test. + # Examples: + # "ci:overwrite", + # "ci:diff:linux:@fbcode//mode/dev-lg", + # "ci:continuous:linux:@fbcode//mode/dev-lg", + new_content = [] + for line in content.splitlines(): + if "ci:" in line: + continue + new_content.append(line) + return "\n".join(new_content) + + +def _normalize_newlines(content: str) -> str: + """ + We use golden() with text data so in the interest of being a bit more + platform independent we just normalize the newlines. + """ + return "".join([line + "\n" for line in content.splitlines()]) + + +def _test_repo_data_src() -> str: + # `TEST_REPO_DATA_SRC` is set in the test runner + dir = os.getenv("TEST_REPO_DATA_SRC") + assert dir, "TEST_REPO_DATA_SRC must be set" + return dir + + +def _unified_diff( + *, + left: str, + right: str, + file: str, + context: int, +) -> str: + import difflib + + return "".join( + difflib.unified_diff( + left.splitlines(keepends=True), + right.splitlines(keepends=True), + fromfile=file, + tofile=file, + n=context, + ), + ) + + +def _is_update_invocation() -> bool: + return os.getenv("BUCK2_UPDATE_GOLDEN") is not None + + +# Output is a map of `rel_path`-relative files to their expected values +def golden_dir(*, output: typing.Dict[str, str], rel_path: str) -> None: + assert "golden" in rel_path, f"Golden path `{rel_path}` must contain `golden`" + + rel_path_path = Path(rel_path) + + for file, contents in output.items(): + golden( + output=contents, + rel_path=str(rel_path_path.joinpath(Path(file))), + ) + + # Check that there are no extra files + path_in_src = Path(_test_repo_data_src()).joinpath(rel_path_path) + + for file in path_in_src.glob("**/*"): + if file.is_dir(): + continue + rel_file_path = str(file.relative_to(path_in_src)) + if rel_file_path not in output: + if _is_update_invocation(): + file.unlink() + else: + raise AssertionError( + f"Extra golden file `{rel_file_path}` found, please remove it" + ) + + +def golden(*, output: str, rel_path: str) -> None: + assert "golden" in rel_path, f"Golden path `{rel_path}` must contain `golden`" + + output = _prepend_header(output) + output = _normalize_newlines(output) + + path_in_src = os.path.join(_test_repo_data_src(), rel_path) + + if _is_update_invocation(): + Path(path_in_src).parent.mkdir(parents=True, exist_ok=True) + with open(path_in_src, "w") as f: + f.write(output) + return + + assert os.path.exists(path_in_src), f"Golden path `{path_in_src}` must exist" + + with open(path_in_src, "r") as f: + expected = f.read() + + if _remove_ci_labels(expected) != _remove_ci_labels(output): + unified_diff = _unified_diff( + left=expected, + right=output, + file=path_in_src, + context=3, + ) + raise AssertionError( + f"Expected golden file to match actual\n" + f"\n\n{unified_diff}\n\n" + "Re-run test with `-- --env BUCK2_UPDATE_GOLDEN=1` appended to the test command to regenerate the files" + ) + + +# Replace 128-bit configuration with placeholder. +def _replace_cfg_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +def golden_replace_cfg_hash(*, output: str, rel_path: str) -> None: + golden( + output=_replace_cfg_hash(output), + rel_path=rel_path, + ) diff --git a/tests/e2e_util/helper/utils.py b/tests/e2e_util/helper/utils.py new file mode 100644 index 0000000000000..20b6194af527d --- /dev/null +++ b/tests/e2e_util/helper/utils.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-unsafe + +import json +import random +import re +import string +import sys +import typing +from pathlib import Path + +from buck2.tests.e2e_util.api.buck import Buck + + +async def read_what_ran(buck: Buck, *args) -> typing.List[typing.Dict[str, typing.Any]]: + out = await buck.log("what-ran", "--format", "json", *args) + out = [line.strip() for line in out.stdout.splitlines()] + out = [json.loads(line) for line in out if line] + return out + + +def timestamp_ms(s: int, ns: int) -> int: + f = int(ns / 1000000) + assert f < 1000 + return s * 1000 + f + + +async def read_timestamps(buck: Buck, *args) -> typing.List[int]: + log = (await buck.log("show")).stdout.strip().splitlines() + return [ + timestamp_ms(*json.loads(line)["Event"]["timestamp"]) + for line in log + if json_get(line, *args) is not None + ] + + +def is_running_on_linux() -> bool: + return sys.platform == "linux" + + +def is_running_on_mac() -> bool: + return sys.platform == "darwin" + + +def is_running_on_windows() -> bool: + return sys.platform == "win32" + + +def get_targets_from_what_ran(what_ran): + targets = set() + + for entry in what_ran: + m = re.match(r"^(.*?)( \((.*?)\))?( \((.*?)\))?$", entry["identity"]) + rule, category = m.group(1), m.group(5) + targets.add((rule, category)) + + return targets + + +async def expect_exec_count(buck: Buck, n: int) -> None: + out = await read_what_ran(buck) + assert len(out) == n, "unexpected actions: %s" % (out,) + + +async def filter_events(buck: Buck, *args): + log = (await buck.log("show")).stdout.strip().splitlines() + found = [] + for line in log: + e = json_get(line, *args) + if e is None: + continue + found.append(e) + return found + + +def json_get(data, *key): + data = json.loads(data) + + for k in key: + data = data.get(k) + if data is None: + break + + return data + + +def random_string(): + return "".join(random.choice(string.ascii_lowercase) for i in range(256)) + + +def replace_hashes(strings: typing.List[str]) -> typing.List[str]: + return [replace_hash(s) for s in strings] + + +def replace_hash(s: str) -> str: + return re.sub(r"\b[0-9a-f]{16}\b", "", s) + + +def read_invocation_record(record: Path) -> typing.Dict[str, typing.Any]: + return json.loads(record.read_text(encoding="utf-8"))["data"]["Record"]["data"][ + "InvocationRecord" + ] diff --git a/tests/e2e_util/nano_prelude/BUCK b/tests/e2e_util/nano_prelude/BUCK new file mode 100644 index 0000000000000..c6dbe626d9a06 --- /dev/null +++ b/tests/e2e_util/nano_prelude/BUCK @@ -0,0 +1,12 @@ +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup") +# @oss-disable: load("//buck2/tests/e2e_util/api:visibility.fb.bzl", "visibility") + +oncall("build_infra") + +visibility = ["PUBLIC"] # @oss-enable + +buck_filegroup( + name = "nano_prelude", + srcs = glob(["*.bzl"]), + visibility = visibility, +) diff --git a/tests/e2e_util/nano_prelude/README.md b/tests/e2e_util/nano_prelude/README.md new file mode 100644 index 0000000000000..cebf9a2e3cca6 --- /dev/null +++ b/tests/e2e_util/nano_prelude/README.md @@ -0,0 +1,8 @@ +# Nano-prelude + +Very small prelude for e2e tests. + +Please keep this prelude as small as possible to keep tests maintainable. + +- Only trivial rules +- Only rules needed in multiple tests diff --git a/tests/e2e_util/nano_prelude/asserts.bzl b/tests/e2e_util/nano_prelude/asserts.bzl new file mode 100644 index 0000000000000..3fd6a28718b37 --- /dev/null +++ b/tests/e2e_util/nano_prelude/asserts.bzl @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _equals(expected, actual, msg = None): + if expected != actual: + if msg == None: + fail("expected: {}, got: {}".format(expected, actual)) + else: + fail("{}: expected: {}, got: {}{}".format(msg, expected, actual)) + +def _true(condition, msg = None): + if not condition: + if msg != None: + fail(msg) + else: + fail("Condition is not met") + +def _false(condition, msg = None): + if condition: + if msg != None: + fail(msg) + else: + fail("Condition is expected to be false") + +asserts = struct( + equals = _equals, + true = _true, + false = _false, +) diff --git a/tests/e2e_util/nano_prelude/cfg_rules.bzl b/tests/e2e_util/nano_prelude/cfg_rules.bzl new file mode 100644 index 0000000000000..01bdb6b26beb6 --- /dev/null +++ b/tests/e2e_util/nano_prelude/cfg_rules.bzl @@ -0,0 +1,118 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# This is copy-paste from `prelude/configurations/util.bzl` + +def _configuration_info_union(infos): + if len(infos) == 0: + return ConfigurationInfo( + constraints = {}, + values = {}, + ) + if len(infos) == 1: + return infos[0] + constraints = {k: v for info in infos for (k, v) in info.constraints.items()} + values = {k: v for info in infos for (k, v) in info.values.items()} + return ConfigurationInfo( + constraints = constraints, + values = values, + ) + +def _constraint_values_to_configuration(values): + return ConfigurationInfo(constraints = { + info[ConstraintValueInfo].setting.label: info[ConstraintValueInfo] + for info in values + }, values = {}) + +# This is copy-paste from `prelude/configurations/rules.bzl` + +def _constraint_setting_impl(ctx): + return [DefaultInfo(), ConstraintSettingInfo(label = ctx.label.raw_target())] + +constraint_setting = rule( + impl = _constraint_setting_impl, + is_configuration_rule = True, + attrs = {}, +) + +def _constraint_value_impl(ctx): + constraint_value = ConstraintValueInfo( + setting = ctx.attrs.constraint_setting[ConstraintSettingInfo], + label = ctx.label.raw_target(), + ) + return [ + DefaultInfo(), + constraint_value, + # Provide `ConfigurationInfo` from `constraint_value` so it could be used as select key. + ConfigurationInfo(constraints = { + constraint_value.setting.label: constraint_value, + }, values = {}), + ] + +constraint_value = rule( + impl = _constraint_value_impl, + is_configuration_rule = True, + attrs = { + "constraint_setting": attrs.dep(providers = [ConstraintSettingInfo]), + }, +) + +def _platform_impl(ctx): + subinfos = ( + [dep[PlatformInfo].configuration for dep in ctx.attrs.deps] + + [_constraint_values_to_configuration(ctx.attrs.constraint_values)] + ) + return [ + DefaultInfo(), + PlatformInfo( + label = str(ctx.label.raw_target()), + configuration = _configuration_info_union(subinfos), + ), + ] + +platform = rule( + impl = _platform_impl, + is_configuration_rule = True, + attrs = { + "constraint_values": attrs.list(attrs.dep(providers = [ConfigurationInfo]), default = []), + "deps": attrs.list(attrs.dep(providers = [PlatformInfo]), default = []), + }, +) + +def _config_setting_impl(ctx): + subinfos = [_constraint_values_to_configuration(ctx.attrs.constraint_values)] + subinfos.append(ConfigurationInfo(constraints = {}, values = ctx.attrs.values)) + return [DefaultInfo(), _configuration_info_union(subinfos)] + +config_setting = rule( + impl = _config_setting_impl, + is_configuration_rule = True, + attrs = { + "constraint_values": attrs.list(attrs.dep(providers = [ConstraintValueInfo]), default = []), + "values": attrs.dict(attrs.string(), attrs.string(), default = {}), + }, +) + +def _execution_platform(ctx): + return [ + DefaultInfo(), + ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ctx.attrs.platform[PlatformInfo].configuration, + executor_config = CommandExecutorConfig( + local_enabled = True, + remote_enabled = False, + ), + ), + ] + +execution_platform = rule( + impl = _execution_platform, + attrs = { + "platform": attrs.dep(providers = [PlatformInfo]), + }, +) diff --git a/tests/e2e_util/nano_prelude/execution_platforms.bzl b/tests/e2e_util/nano_prelude/execution_platforms.bzl new file mode 100644 index 0000000000000..ebb1efc1ede6c --- /dev/null +++ b/tests/e2e_util/nano_prelude/execution_platforms.bzl @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +def _execution_platforms(ctx): + platforms = [ + p[ExecutionPlatformInfo] + for p in ctx.attrs.platforms + ] if ctx.attrs.platforms else [ExecutionPlatformInfo( + label = ctx.label.raw_target(), + configuration = ConfigurationInfo(constraints = {}, values = {}), + executor_config = CommandExecutorConfig(local_enabled = True, remote_enabled = False), + )] + + return [ + DefaultInfo(), + ExecutionPlatformRegistrationInfo(platforms = platforms), + ] + +execution_platforms = rule( + impl = _execution_platforms, + attrs = { + "platforms": attrs.option(attrs.list(attrs.dep(providers = [ExecutionPlatformInfo])), default = None), + }, +) diff --git a/tests/e2e_util/nano_prelude/prelude.bzl b/tests/e2e_util/nano_prelude/prelude.bzl new file mode 100644 index 0000000000000..3718c220afb6b --- /dev/null +++ b/tests/e2e_util/nano_prelude/prelude.bzl @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +load("//:asserts.bzl", _asserts = "asserts") +load("//:cfg_rules.bzl", _config_setting = "config_setting", _constraint_setting = "constraint_setting", _constraint_value = "constraint_value", _execution_platform = "execution_platform", _platform = "platform") +load("//:execution_platforms.bzl", _execution_platforms = "execution_platforms") +load("//:stub_rules.bzl", _stub = "stub", _trivial_build = "trivial_build") + +asserts = _asserts +platform = _platform +config_setting = _config_setting +constraint_setting = _constraint_setting +constraint_value = _constraint_value +execution_platform = _execution_platform +execution_platforms = _execution_platforms +stub = _stub +trivial_build = _trivial_build diff --git a/tests/e2e_util/nano_prelude/stub_rules.bzl b/tests/e2e_util/nano_prelude/stub_rules.bzl new file mode 100644 index 0000000000000..5bb8d9f07b935 --- /dev/null +++ b/tests/e2e_util/nano_prelude/stub_rules.bzl @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# @nolint + +def _stub(ctx): + _ignore = ctx + return [ + DefaultInfo(), + ] + +# Rule that has dependencies and does nothing. Useful for query-like tests. +stub = rule( + impl = _stub, + attrs = { + "deps": attrs.list(attrs.dep(), default = []), + "exec_deps": attrs.list(attrs.exec_dep(), default = []), + "srcs": attrs.list(attrs.source(), default = []), + "toolchain_deps": attrs.list(attrs.toolchain_dep(), default = []), + "labels": attrs.list(attrs.string(), default = []), + }, +) + +# Rule with no attrs that produces an output. Useful if you want to be able to +# build literally anything +def _trivial_build(ctx): + return [DefaultInfo(default_output = ctx.actions.write("foo.txt", "abcd"))] + +trivial_build = rule( + impl = _trivial_build, + attrs = {}, +) diff --git a/tests/e2e_util/test_bxl_assert_dependencies_template.py b/tests/e2e_util/test_bxl_assert_dependencies_template.py new file mode 100644 index 0000000000000..0e0c9ade7ad69 --- /dev/null +++ b/tests/e2e_util/test_bxl_assert_dependencies_template.py @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os + +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# This is just a template test case for `check_dependencies_test` to use buck2's e2e test framework. +# It does not need to be edited for new `check_dependencies_test`. + + +def process_list_arg() -> list[str]: + list_env = os.environ["DEPS"] + split_list = [] if list_env == "" else list_env.split(",") + return [elem for item in split_list for elem in ("--deps", item)] + + +@buck_test(inplace=True) +async def test_check_dependencies_bxl(buck) -> None: + dep_list = process_list_arg() + expect_failure_msg = os.environ["EXPECT_FAILURE_MSG"] + bxl_call = buck.bxl( + os.environ["BXL_MAIN"], + "--", + "--target", + os.environ["TARGET"], + *dep_list, + ) + if expect_failure_msg == "": + await bxl_call + else: + await expect_failure(bxl_call, stderr_regex=expect_failure_msg) diff --git a/tests/e2e_util/test_bxl_audit_dependents_template.py b/tests/e2e_util/test_bxl_audit_dependents_template.py new file mode 100644 index 0000000000000..95d9750a02300 --- /dev/null +++ b/tests/e2e_util/test_bxl_audit_dependents_template.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os + +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# This is just a template test case for `audit_dependents_test` to use buck2's e2e test framework. +# It does not need to be edited for new `check_dependencies_test`. + + +def process_list_arg() -> list[str]: + list_env = os.environ["ALLOWLIST"] + split_list = [] if list_env == "" else list_env.split(",") + return [elem for item in split_list for elem in ("--allowlist_patterns", item)] + + +@buck_test(inplace=True) +async def test_audit_dependents_bxl(buck) -> None: + allow_list = process_list_arg() + expect_failure_msg = os.environ["EXPECT_FAILURE_MSG"] + + bxl_call = buck.bxl( + os.environ["BXL_MAIN"], + "--", + "--target", + os.environ["TARGET"], + "--source_target", + os.environ["SOURCE_TARGET"], + "--allowlist_patterns", + *allow_list, + ) + if expect_failure_msg == "": + await bxl_call + else: + await expect_failure(bxl_call, stderr_regex=expect_failure_msg) diff --git a/tests/e2e_util/test_bxl_check_dependencies_template.py b/tests/e2e_util/test_bxl_check_dependencies_template.py new file mode 100644 index 0000000000000..b07ac37a86390 --- /dev/null +++ b/tests/e2e_util/test_bxl_check_dependencies_template.py @@ -0,0 +1,52 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os + +from buck2.tests.e2e_util.asserts import expect_failure +from buck2.tests.e2e_util.buck_workspace import buck_test, get_mode_from_platform + + +# This is just a template test case for `check_dependencies_test` to use buck2's e2e test framework. +# It does not need to be edited for new `check_dependencies_test`. + + +def process_list_arg(is_allowlist: bool) -> list[str]: + env_var = "ALLOWLIST" if is_allowlist else "BLOCKLIST" + list_env = os.environ[env_var] + split_list = [] if list_env == "" else list_env.split(",") + list_str_arg = "--allowlist_patterns" if is_allowlist else "--blocklist_patterns" + return [elem for item in split_list for elem in (list_str_arg, item)] + + +@buck_test(inplace=True) +async def test_check_dependencies_bxl(buck) -> None: + allowlist = process_list_arg(is_allowlist=True) + blocklist = process_list_arg(is_allowlist=False) + expect_failure_msg = os.environ["EXPECT_FAILURE_MSG"] + + fbcode_build_mode = os.environ.get("CHECK_DEPENDENCIES_TEST_FBCODE_BUILD_MODE") + if fbcode_build_mode: + mode_argfile = get_mode_from_platform( + fbcode_build_mode, skip_validation_i_know_what_im_doing=True + ) + else: + mode_argfile = get_mode_from_platform() + + bxl_call = buck.bxl( + os.environ["BXL_MAIN"], + mode_argfile, + "--", + "--target", + os.environ["TARGET"], + *allowlist, + *blocklist, + ) + if expect_failure_msg == "": + await bxl_call + else: + await expect_failure(bxl_call, stderr_regex=expect_failure_msg) diff --git a/tests/e2e_util/test_bxl_template.py b/tests/e2e_util/test_bxl_template.py new file mode 100644 index 0000000000000..3ff2ab0dfe3a8 --- /dev/null +++ b/tests/e2e_util/test_bxl_template.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +import os + +from buck2.tests.e2e_util.api.buck import Buck +from buck2.tests.e2e_util.buck_workspace import buck_test + + +# This is just a template test case for `bxl_test` to use buck2's e2e test framework. +# It does not need to be edited for new `bxl_test`. + + +@buck_test(inplace=True) +async def test_bxl(buck: Buck) -> None: + await buck.bxl(os.environ["BXL_MAIN"]) diff --git a/tests/tools/makefile_to_depfile/BUCK.v2 b/tests/tools/makefile_to_depfile/BUCK.v2 new file mode 100644 index 0000000000000..89bb6611a6574 --- /dev/null +++ b/tests/tools/makefile_to_depfile/BUCK.v2 @@ -0,0 +1,12 @@ +load("@fbsource//tools/build_defs:cram_test.bzl", "cram_test") + +cram_test( + name = "integration", + srcs = glob([ + "*.t", + "fixtures/**/*", + ]), + env = { + "DEP_FILE_PROCESSOR": "$(exe prelude//cxx/tools:dep_file_processor)", + }, +) diff --git a/tests/tools/makefile_to_depfile/fixtures/absolute.mk b/tests/tools/makefile_to_depfile/fixtures/absolute.mk new file mode 100644 index 0000000000000..67627ce598ff1 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/absolute.mk @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +foobar.o: __ROOT__/foo/bar /absolute/path \ + foo bar baz diff --git a/tests/tools/makefile_to_depfile/fixtures/basic.mk b/tests/tools/makefile_to_depfile/fixtures/basic.mk new file mode 100644 index 0000000000000..b3c4c81c9ccf7 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/basic.mk @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +app: app.c header.h diff --git a/tests/tools/makefile_to_depfile/fixtures/edge-cases.mk b/tests/tools/makefile_to_depfile/fixtures/edge-cases.mk new file mode 100644 index 0000000000000..328f445ab3966 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/edge-cases.mk @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +app.o: leading-separator \ + \ leading-separator-in-file \ + trailing-separator-in-file\ \ + multiple\ \ separator \ + trailing-separator \ diff --git a/tests/tools/makefile_to_depfile/fixtures/empty.mk b/tests/tools/makefile_to_depfile/fixtures/empty.mk new file mode 100644 index 0000000000000..c515d7ea9add8 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/empty.mk @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +app.o: diff --git a/tests/tools/makefile_to_depfile/fixtures/escape.mk b/tests/tools/makefile_to_depfile/fixtures/escape.mk new file mode 100644 index 0000000000000..c7ccb19aad0a2 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/escape.mk @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +app: app.c header.h header\ 2.h diff --git a/tests/tools/makefile_to_depfile/fixtures/newlines.mk b/tests/tools/makefile_to_depfile/fixtures/newlines.mk new file mode 100644 index 0000000000000..96f3667f93be4 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/newlines.mk @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +buck-out/v2/gen/fbsource/999b02f9444004c1/some/path/gtest.pic.o: \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-all.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-internal-inl.h \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-death-test.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-filepath.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-matchers.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-port.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-printers.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-test-part.cc \ + third-party/googletest/1.14.0/googletest/googletest/src/gtest-typed-test.cc diff --git a/tests/tools/makefile_to_depfile/fixtures/not-normalized.mk b/tests/tools/makefile_to_depfile/fixtures/not-normalized.mk new file mode 100644 index 0000000000000..33b0410de2ff3 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/not-normalized.mk @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +foobar.o: foo/bar/../baz diff --git a/tests/tools/makefile_to_depfile/fixtures/windows.mk b/tests/tools/makefile_to_depfile/fixtures/windows.mk new file mode 100644 index 0000000000000..d2500c5a03ee4 --- /dev/null +++ b/tests/tools/makefile_to_depfile/fixtures/windows.mk @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +foo\\bar.obj: \ + foo\a \ + bar\\b \ + baz diff --git a/tests/tools/makefile_to_depfile/test-absolute.t b/tests/tools/makefile_to_depfile/test-absolute.t new file mode 100644 index 0000000000000..d1388c2555106 --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-absolute.t @@ -0,0 +1,7 @@ + $ sed "s:__ROOT__:$(pwd):g" < "$TESTDIR/fixtures/absolute.mk" > "$CRAMTMP/absolute.mk" + $ $DEP_FILE_PROCESSOR "makefile" "$CRAMTMP/absolute.mk" "$CRAMTMP/absolute" true + $ cat "$CRAMTMP/absolute" + foo/bar + foo + bar + baz diff --git a/tests/tools/makefile_to_depfile/test-basic.t b/tests/tools/makefile_to_depfile/test-basic.t new file mode 100644 index 0000000000000..5596887d4ec45 --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-basic.t @@ -0,0 +1,4 @@ + $ $DEP_FILE_PROCESSOR "makefile" "$TESTDIR/fixtures/basic.mk" "$CRAMTMP/basic" true + $ cat "$CRAMTMP/basic" + app.c + header.h diff --git a/tests/tools/makefile_to_depfile/test-edge-cases.t b/tests/tools/makefile_to_depfile/test-edge-cases.t new file mode 100644 index 0000000000000..bd3f510338857 --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-edge-cases.t @@ -0,0 +1,8 @@ + $ $DEP_FILE_PROCESSOR "makefile" "$TESTDIR/fixtures/edge-cases.mk" "$CRAMTMP/edge-cases" true + $ cat "$CRAMTMP/edge-cases" + leading-separator + leading-separator-in-file + trailing-separator-in-file + multiple separator + trailing-separator + diff --git a/tests/tools/makefile_to_depfile/test-empty.t b/tests/tools/makefile_to_depfile/test-empty.t new file mode 100644 index 0000000000000..e63f6ce2154a0 --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-empty.t @@ -0,0 +1,2 @@ + $ $DEP_FILE_PROCESSOR "makefile" "$TESTDIR/fixtures/empty.mk" "$CRAMTMP/empty" true + $ cat "$CRAMTMP/empty" diff --git a/tests/tools/makefile_to_depfile/test-escape.t b/tests/tools/makefile_to_depfile/test-escape.t new file mode 100644 index 0000000000000..7ad1a7cd90044 --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-escape.t @@ -0,0 +1,5 @@ + $ $DEP_FILE_PROCESSOR "makefile" "$TESTDIR/fixtures/escape.mk" "$CRAMTMP/escape" true + $ cat "$CRAMTMP/escape" + app.c + header.h + header 2.h diff --git a/tests/tools/makefile_to_depfile/test-newlines.t b/tests/tools/makefile_to_depfile/test-newlines.t new file mode 100644 index 0000000000000..af84f40a6f2dd --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-newlines.t @@ -0,0 +1,12 @@ + $ $DEP_FILE_PROCESSOR "makefile" "$TESTDIR/fixtures/newlines.mk" "$CRAMTMP/newlines" true + $ cat "$CRAMTMP/newlines" + third-party/googletest/1.14.0/googletest/googletest/src/gtest-all.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest-internal-inl.h + third-party/googletest/1.14.0/googletest/googletest/src/gtest-death-test.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest-filepath.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest-matchers.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest-port.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest-printers.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest-test-part.cc + third-party/googletest/1.14.0/googletest/googletest/src/gtest-typed-test.cc diff --git a/tests/tools/makefile_to_depfile/test-not-normalized.t b/tests/tools/makefile_to_depfile/test-not-normalized.t new file mode 100644 index 0000000000000..ac7ecb283deab --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-not-normalized.t @@ -0,0 +1,3 @@ + $ $DEP_FILE_PROCESSOR "makefile" "$TESTDIR/fixtures/not-normalized.mk" "$CRAMTMP/not-normalized" true + $ cat "$CRAMTMP/not-normalized" + foo/baz diff --git a/tests/tools/makefile_to_depfile/test-windows.t b/tests/tools/makefile_to_depfile/test-windows.t new file mode 100644 index 0000000000000..70f70ea9789b0 --- /dev/null +++ b/tests/tools/makefile_to_depfile/test-windows.t @@ -0,0 +1,5 @@ + $ $DEP_FILE_PROCESSOR "makefile" "$TESTDIR/fixtures/windows.mk" "$CRAMTMP/windows" true + $ cat "$CRAMTMP/windows" + foo/a + bar//b + baz diff --git a/website/README.md b/website/README.md index 2aee58ee8c97f..e53dd182fcc99 100644 --- a/website/README.md +++ b/website/README.md @@ -1,62 +1,93 @@ # Website -This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. +This website is built using [Docusaurus 2](https://docusaurus.io/), a modern +static website generator. + +## Mac Setup + +`yarn` and `node` are installed by default on devservers, but not on Macs. To +work on this site on Macs, add `fbsource/xplat/third-party/node/bin` and +`fbsource/third-party/yarn` to your `PATH`. ## Installation +The very first time: + ```shell $ yarn global add node-gyp -$ yarn ``` -If on Eden you might get faster builds by doing `eden redirect add $PWD/node_modules bind` first. - -## Build - -To build a copy of the static content against the version of `buck2` on your path: +And if on Eden: ```shell -$ yarn build +$ eden redirect add $PWD/node_modules bind +$ eden redirect add $PWD/build bind ``` -To build a copy of the static content using `../.buck2.sh` (which builds buck2 from the repo before invoking it): +Also on your first time, and potentially after each large rebase: ```shell -$ yarn build_local +$ yarn install ``` -To build a copy of the static content using Cargo to build buck2: +## Building Generated Content + +Again, on your first time: + ```shell -$ yarn build_cargo +$ yarn generate ``` -All of these commands generate static content into the `build` directory and can be served using any static contents hosting service. +You will need to re-run this each time you want to see changes to generated +content, primarily the API docs. You can alternatively `yarn generate_local` to +update generated content using a built-from-source buck2. ## Local Development ```shell -$ yarn start +$ yarn start-fb ``` -This command starts a local development server and opens up a browser window. Any changes to generated Starlark API documentation require running the build command above, but changes to the .md files that are checked into the repository should be reflected live without having to restart the server. +This command starts a local development server, and if on Mac, opens up a local +browser window. + +On a devserver, the window does not open for you, but you can head to +https://devvmXX.foo.com:9094 in your browser. This requires lighthouse or VPN. + +To get your changes reflected on the local server: -### Run on devserver +1. For non-generated markdown content in the `docs/` directory, reload the + page. +2. For generated markdown content, re-run `yarn generate` as above and reload + the page. +3. For other changes to the site configuration, Ctrl+C and restart + `yarn start`. Then, hard-reload the page (Ctrl+Shift+R). -If developing on a devserver, you'll need to create a tunnel from your Mac to the server, so you can access it in the browser. +## OSS Variants -To do that, run the following **from your mac**: +To see the external versions of the page, do: + +```shell +$ yarn start ``` + +If on a devserver, this will require manually setting up an SSH tunnel by +running the following **from your mac**: + +```shell ssh -L 3000:localhost:3000 $DEVSERVER ``` -## Internal variants +In all cases, you'll need to be either on lighthouse or VPN for this to work. -To see the internal versions of the page, do: +## Prod Build -```shell -$ yarn build-fb -$ yarn start-fb -``` +You can perform a production build via `yarn build` or `yarn build-fb`. This +generates a static site into `build/`. This site can be served via any static +site viewer - docusaurus has one built in that you can run via `yarn serve`. As +of Oct 2024 this only works on Macs, not on devservers. + +Iterating on the prod build is slower than on the local `yarn start` server. ## Deployment @@ -64,4 +95,5 @@ $ yarn start-fb $ GIT_USER= USE_SSH=true yarn deploy ``` -If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. +If you are using GitHub pages for hosting, this command is a convenient way to +build the website and push to the `gh-pages` branch. diff --git a/website/config_impl.ts b/website/config_impl.ts new file mode 100644 index 0000000000000..61c6ded761b33 --- /dev/null +++ b/website/config_impl.ts @@ -0,0 +1,184 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import lightCodeTheme from 'prism-react-renderer/themes/github'; +import darkCodeTheme from 'prism-react-renderer/themes/dracula'; +import { fbContent, isInternal } from 'docusaurus-plugin-internaldocs-fb/internal'; +import type { ThemeConfig as ClassicPresetConfig, Options as ClassicPresetOptions } from '@docusaurus/preset-classic'; +import type { DocusaurusConfig } from '@docusaurus/types'; + +import { postProcessItems } from './sidebars.js'; +import { redirects } from './redirects'; + +const presetOptions: ClassicPresetOptions = ({ + docs: { + path: '../docs', + sidebarPath: require.resolve('./sidebars_generated.ts'), + async sidebarItemsGenerator({ defaultSidebarItemsGenerator, ...args }) { + const items = await defaultSidebarItemsGenerator({ + ...args + }); + return postProcessItems(items); + }, + }, + theme: { + customCss: require.resolve('./src/css/custom.css'), + }, +}); + +const themeConfig: ClassicPresetConfig = ({ + docs: { + sidebar: { + hideable: true, + }, + }, + navbar: { + title: 'Buck2', + logo: { + alt: 'Buck2 Logo', + src: 'img/logo.svg', + }, + items: [ + { + type: 'doc', + docId: 'index', + position: 'left', + label: 'Docs', + }, + { + to: '/docs/api', + position: 'left', + label: 'API', + activeBaseRegex: '/docs/api', + }, + { + to: '/docs/prelude/globals', + position: 'left', + label: 'Rules', + activeBasePath: '/docs/prelude', + }, + { + href: fbContent({ + internal: 'https://www.internalfb.com/code/buck2', + external: 'https://github.com/facebook/buck2', + }), + // @ts-ignore : The type signature for `fbContent` incorrectly claims it might return a `[]` + label: fbContent({ + internal: 'CodeHub', + external: 'GitHub', + }), + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Docs', + items: [ + { + label: 'User guide', + to: '/docs', + }, + ], + }, + { + title: 'Community', + items: isInternal() ? [ + { + label: 'User group', + href: 'https://fb.workplace.com/groups/buck2users', + }, + { + label: 'Announcement group', + href: 'https://fb.workplace.com/groups/buck2prototyping', + }, + ] : [ + { + label: 'GitHub issues', + href: 'https://github.com/facebook/buck2/issues', + }, + ], + }, + { + title: 'More', + items: [ + { + label: 'Code', + href: fbContent({ + internal: 'https://www.internalfb.com/code/fbsource/fbcode/buck2/', + external: 'https://github.com/facebook/buck2', + }), + }, + { + label: 'Terms of Use', + href: 'https://opensource.fb.com/legal/terms', + }, + { + label: 'Privacy Policy', + href: 'https://opensource.fb.com/legal/privacy', + }, + ], + }, + ], + copyright: `Copyright © ${new Date().getFullYear()} Meta Platforms, Inc. Built with Docusaurus.`, + }, + prism: { + theme: lightCodeTheme, + darkTheme: darkCodeTheme, + }, +}); + +const config: DocusaurusConfig = ({ + title: 'Buck2', + url: 'https://buck2.build', + baseUrl: '/', + onBrokenLinks: 'throw', + trailingSlash: true, + onBrokenMarkdownLinks: 'warn', + favicon: 'img/logo.png', + organizationName: 'facebook', + projectName: 'buck2', + + presets: [ + [ + require.resolve('docusaurus-plugin-internaldocs-fb/docusaurus-preset'), + presetOptions, + ], + ], + + plugins: [ + [ + '@docusaurus/plugin-google-gtag', + { + trackingID: 'G-GEGGHE39PE', + anonymizeIP: true, + }, + ], + [ + '@docusaurus/plugin-client-redirects', + { + redirects: redirects, + }, + ], + ], + + themeConfig, + + // @ts-ignore : Fields of this are not declared as optional, but they are + markdown: ({ + // Use mdx for `.mdx` files and commonmark for `.md` files + format: 'mdx', + }), +}); + +module.exports = { + config: config, +}; diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js index c5338f2ac603e..68eedac07e215 100644 --- a/website/docusaurus.config.js +++ b/website/docusaurus.config.js @@ -7,166 +7,9 @@ * of this source tree. */ -const lightCodeTheme = require('prism-react-renderer/themes/github'); -const darkCodeTheme = require('prism-react-renderer/themes/dracula'); -const { itemFilter, itemSort } = require('./sidebars.js'); -const { fbContent, isInternal } = require('docusaurus-plugin-internaldocs-fb/internal'); +// Our internal doc builder requires a `.js` file to exist, so have this and keep the actual +// implementation in `.ts` -// With JSDoc @type annotations, IDEs can provide config autocompletion -/** @type {import('@docusaurus/types').DocusaurusConfig} */ -(module.exports = { - title: 'Buck2', - url: 'https://buck2.build', - baseUrl: '/', - onBrokenLinks: 'throw', - trailingSlash: true, - onBrokenMarkdownLinks: 'warn', - favicon: 'img/logo.png', - organizationName: 'facebook', // Usually your GitHub org/user name. - projectName: 'buck2', // Usually your repo name. +const { config } = require('./config_impl.ts'); - presets: [ - [ - require.resolve('docusaurus-plugin-internaldocs-fb/docusaurus-preset'), - /** @type {import('@docusaurus/preset-classic').Options} */ - ({ - docs: { - path: '../docs', - sidebarPath: require.resolve('./sidebars_generated.js'), - // Please change this to your repo. - // editUrl: 'https://github.com/facebook/docusaurus/edit/main/website/', - async sidebarItemsGenerator({ defaultSidebarItemsGenerator, docs, item, ...args }) { - const items = await defaultSidebarItemsGenerator({ - docs: itemFilter(item, docs), - item: item, - ...args - }); - return itemSort(items); - }, - }, - theme: { - customCss: require.resolve('./src/css/custom.css'), - }, - }), - ], - ], - - plugins: [ - [require.resolve('docusaurus-lunr-search'), { - excludeRoutes: [ - ] - }], - [ - '@docusaurus/plugin-google-gtag', - { - trackingID: 'G-GEGGHE39PE', - anonymizeIP: true, - }, - ], - ], - - themeConfig: - /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ - ({ - docs: { - sidebar: { - hideable: true, - }, - }, - navbar: { - title: 'Buck2', - logo: { - alt: 'My Site Logo', - src: 'img/logo.svg', - }, - items: [ - { - type: 'doc', - docId: 'index', - position: 'left', - label: 'Docs', - }, - { - type: 'doc', - docId: 'api/rules', - position: 'left', - label: 'Rules', - }, - { - type: 'doc', - docId: 'api', - position: 'left', - label: 'API', - }, - { - href: fbContent({ - internal: 'https://www.internalfb.com/code/buck2', - external: 'https://github.com/facebook/buck2', - }), - label: fbContent({ - internal: 'CodeHub', - external: 'GitHub', - }), - position: 'right', - }, - ], - }, - footer: { - style: 'dark', - links: [ - { - title: 'Docs', - items: [ - { - label: 'User guide', - to: '/docs', - }, - ], - }, - { - title: 'Community', - items: isInternal() ? [ - { - label: 'User group', - href: 'https://fb.workplace.com/groups/buck2users', - }, - { - label: 'Announcement group', - href: 'https://fb.workplace.com/groups/buck2prototyping', - }, - ] : [ - { - label: 'GitHub issues', - href: 'https://github.com/facebook/buck2/issues', - }, - ], - }, - { - title: 'More', - items: [ - { - label: 'Code', - href: fbContent({ - internal: 'https://www.internalfb.com/code/fbsource/fbcode/buck2/', - external: 'https://github.com/facebook/buck2', - }), - }, - { - label: 'Terms of Use', - href: 'https://opensource.fb.com/legal/terms', - }, - { - label: 'Privacy Policy', - href: 'https://opensource.fb.com/legal/privacy', - }, - ], - }, - ], - copyright: `Copyright © ${new Date().getFullYear()} Meta Platforms, Inc. Built with Docusaurus.`, - }, - prism: { - theme: lightCodeTheme, - darkTheme: darkCodeTheme, - }, - }), -}); +module.exports = config diff --git a/website/gen_docs.py b/website/gen_docs.py new file mode 100755 index 0000000000000..d66fd94cddf08 --- /dev/null +++ b/website/gen_docs.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under both the MIT license found in the +# LICENSE-MIT file in the root directory of this source tree and the Apache +# License, Version 2.0 found in the LICENSE-APACHE file in the root directory +# of this source tree. + +# pyre-strict + +""" +Generate API documentation for the website. +""" + +import argparse +import os +import shutil +import subprocess +import tempfile +from pathlib import Path +from typing import List + + +def read_file(path: Path) -> str: + with open(path, "r") as f: + return f.read() + + +def write_file(path: Path, contents: str) -> None: + with open(path, "w") as f: + f.write(contents) + + +def setup_gen_dir(path: Path) -> None: + shutil.rmtree(path, ignore_errors=True) + path.mkdir(parents=True, exist_ok=True) + (path / "README.txt").write_text( + """ +This directory contains generated files. + +Re-generate by running `fbcode/buck2/website/gen_docs.py`. +""" + ) + + +def buck_command(args: argparse.Namespace) -> str: + if args.buck2: + return args.buck2 + elif args.prod: + return "buck2" + elif args.cargo: + return "cargo run --bin=buck2 --" + else: + return "./buck2.sh" + + +def copy_starlark_docs() -> None: + base_path = Path("docs") / "developers" / "starlark" + setup_gen_dir(base_path) + # Copy the starlark docs over. docusaurus does not handle upward path traversal very well. + for x in Path("starlark-rust/docs").glob("*.md"): + name = Path(x).stem + prefix = "---\nid: " + name + "\n---\n" + write_file(base_path / (name + ".generated.md"), prefix + read_file(x)) + + +def generate_api_docs(buck: str) -> None: + with tempfile.TemporaryDirectory() as tmp: + base_dir = Path("docs") / "prelude" + setup_gen_dir(base_dir) + # Actually generate the docs + print("Running Buck...") + subprocess.run( + buck + + " docs starlark --format=markdown_files --output-dir=" + + tmp + + " prelude//docs:rules.bzl", + shell=True, + check=True, + ) + + src = read_file(Path(tmp) / "prelude" / "docs" / "rules.bzl.md") + dest = base_dir / "globals.generated.md" + + prefix = "---\nid: globals\n---\n" + prefix += "# Rules\n\nThese rules are available as standard in Buck2.\n" + src = "\n".join(src.splitlines()[1:]) + + os.makedirs(dest.parent, exist_ok=True) + write_file(dest, prefix + src) + + with tempfile.TemporaryDirectory() as tmp: + base_dir = Path("docs") / "api" + setup_gen_dir(base_dir) + subprocess.run( + buck + " docs starlark-builtins --output-dir " + tmp, + shell=True, + check=True, + ) + + for orig in Path(tmp).rglob("*.md"): + path = orig.relative_to(tmp) + dest = base_dir.joinpath(path) + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copyfile(orig, dest) + + +def parse_subcommands(output: str) -> List[str]: + res = [] + seen_subcommands = False + for x in output.splitlines(): + if x == "Commands:": + seen_subcommands = True + if seen_subcommands and x.startswith(" ") and len(x) > 2 and x[2].isalpha(): + sub = x.strip().split()[0] + if sub != "help": + res.append(sub) + return res + + +def generate_help_docs_subcommand(buck: str, args: List[str]) -> str: + cmd = buck + " " + " ".join(args) + " --help" + print("Running " + cmd + " ...") + res = subprocess.run(cmd, shell=True, check=True, capture_output=True) + root = res.stdout.decode() + return ( + "\n\n```text\n" + + root + + "\n```" + + "\n\n".join( + [ + generate_help_docs_subcommand(buck, args + [sub]) + for sub in parse_subcommands(root) + ] + ) + ) + + +def generate_help_docs(buck: str) -> None: + base_dir = Path("docs") / "users" / "commands" + setup_gen_dir(base_dir) + + cmd = buck + " --help" + print("Running " + cmd + " ...") + res = subprocess.run(cmd, shell=True, check=True, capture_output=True) + for sub in parse_subcommands(res.stdout.decode()): + output = generate_help_docs_subcommand(buck, [sub]) + write_file( + base_dir / (sub + ".generated.md"), + "---\nid: " + + sub + + "\ntitle: " + + sub + + "\n---\nThese are the flags/commands under `buck2 " + + sub + + "` and their `--help` output:" + + output, + ) + + +def generate_query_docs(buck: str) -> None: + base_dir = Path("docs") / "users" / "query" + setup_gen_dir(base_dir) + + for x in ["uquery", "cquery", "aquery"]: + cmd = buck + " docs " + x + " --format=markdown" + print("Running " + cmd + " ...") + res = subprocess.run(cmd, shell=True, check=True, capture_output=True) + write_file( + base_dir / (x + ".generated.md"), + "---\nid: " + + x + + "\ntitle: " + + x.title() + + " Environment\n---\n" + + res.stdout.decode(), + ) + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--prod", + action="store_true", + default=False, + help="Whether to use the production `buck2` binary", + ) + parser.add_argument( + "--cargo", + action="store_true", + default=False, + help="Whether to use a `cargo` built binary.", + ) + parser.add_argument( + "--buck2", + nargs="?", + help="Whether to use provided binary.", + ) + args = parser.parse_args() + + # Change to buck2 directory + buck2_dir = Path(__file__).absolute().parent.parent + os.chdir(str(buck2_dir)) + + # Clear the docs folder first so that if we change the names of any + # objects, we'll remove old docs + for x in Path("docs").rglob("*.generated.md"): + os.remove(x) + + buck = buck_command(args) + copy_starlark_docs() + generate_api_docs(buck) + generate_help_docs(buck) + generate_query_docs(buck) + + +if __name__ == "__main__": + main() diff --git a/website/package.json b/website/package.json index 69d6b09d2b3e0..f6a9275175c49 100644 --- a/website/package.json +++ b/website/package.json @@ -5,10 +5,11 @@ "scripts": { "docusaurus": "docusaurus", "start": "docusaurus start", - "build": "../docs.py --prod && docusaurus build", - "build_local": "../docs.py && docusaurus build", - "build_cargo": "../docs.py --cargo && docusaurus build", - "build_prebuilt": "../docs.py --buck2 $BUCK2_BIN && docusaurus build", + "generate": "./gen_docs.py --prod", + "generate_local": "./gen_docs.py", + "build": "./gen_docs.py && docusaurus build", + "build_cargo": "./gen_docs.py --cargo && docusaurus build", + "build_prebuilt": "./gen_docs.py --buck2 $BUCK2_BIN && docusaurus build", "swizzle": "docusaurus swizzle", "deploy": "docusaurus deploy", "clear": "docusaurus clear", @@ -17,18 +18,19 @@ "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "@docusaurus/core": "2.3.1", - "@docusaurus/plugin-google-gtag": "^2.3.1", - "@docusaurus/preset-classic": "2.3.1", + "@babel/traverse": "^7.23.2", + "@docusaurus/core": "3.5.1", + "@docusaurus/plugin-client-redirects": "3.5.1", + "@docusaurus/plugin-google-gtag": "3.5.1", + "@docusaurus/preset-classic": "3.5.1", "@mdx-js/react": "^1.6.21", "@svgr/webpack": "^5.5.0", "clsx": "^1.1.1", - "docusaurus-lunr-search": "^2.2.0", - "docusaurus-plugin-internaldocs-fb": "1.8.0", + "docusaurus-plugin-internaldocs-fb": "^1.18.4", "file-loader": "^6.2.0", "prism-react-renderer": "^1.2.1", - "react": "^17.0.1", - "react-dom": "^17.0.1", + "react": "^18.0.0", + "react-dom": "^18.0.0", "url-loader": "^4.1.1" }, "browserslist": { @@ -45,6 +47,7 @@ }, "resolutions": { "shelljs": "^0.8.5", - "ansi-html": "0.0.8" + "ansi-html": "0.0.8", + "@babel/traverse": "^7.23.2" } } diff --git a/website/redirects.ts b/website/redirects.ts new file mode 100644 index 0000000000000..921d6df3e37c8 --- /dev/null +++ b/website/redirects.ts @@ -0,0 +1,223 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +import { isInternal } from 'docusaurus-plugin-internaldocs-fb/internal'; + +const baseRedirects = [ + { + to: '/docs/about/why', + from: '/docs/why', + }, + { + to: '/docs/about/getting_started', + from: '/docs/getting_started', + }, + { + to: '/docs/about/benefits/compared_to_buck1', + from: '/docs/benefits', + }, + { + to: '/docs/about/bootstrapping', + from: '/docs/bootstrapping', + }, + { + to: '/docs/prelude/globals', + from: '/docs/api/rules', + }, + ]; + +// Redirects that need to be introduced following changes to the generated API docs in D61778036 +const globalsBasedApiDocs = [ + { + from: '/docs/api/build/actions', + to: '/docs/api/build/AnalysisActions', + }, + { + from: ['/docs/api/build/anon_target', '/docs/api/bxl/anon_target'], + to: '/docs/api/build/AnonTarget', + }, + { + from: ['/docs/api/build/anon_targets', '/docs/api/bxl/anon_targets'], + to: '/docs/api/build/AnonTargets', + }, + { + from: ['/docs/api/build/artifact', '/docs/api/bxl/artifact'], + to: '/docs/api/build/Artifact', + }, + { + from: ['/docs/api/build/attrs', '/docs/api/bxl/attrs'], + to: '/docs/api/build/Attr', + }, + { + from: ['/docs/api/build/buck_regex', '/docs/api/bxl/buck_regex'], + to: '/docs/api/build/regex', + }, + { + from: ['/docs/api/build/configured_target_label', '/docs/api/bxl/configured_target_label'], + to: '/docs/api/build/ConfiguredTargetLabel', + }, + { + from: ['/docs/api/build/context', '/docs/api/bxl/context'], + to: '/docs/api/build/AnalysisContext', + }, + { + from: ['/docs/api/build/dependency', '/docs/api/bxl/dependency'], + to: '/docs/api/build/Dependency', + }, + { + from: '/docs/api/build/globals', + to: '/docs/api/build', + }, + { + from: ['/docs/api/build/label', '/docs/api/bxl/label'], + to: '/docs/api/build/Label', + }, + { + from: ['/docs/api/build/promise', '/docs/api/bxl/promise'], + to: '/docs/api/build/Promise', + }, + { + from: ['/docs/api/build/provider_collection', '/docs/api/bxl/provider_collection'], + to: '/docs/api/build/ProviderCollection', + }, + { + from: ['/docs/api/build/providers_label', '/docs/api/bxl/providers_label'], + to: '/docs/api/build/ProvidersLabel', + }, + { + from: ['/docs/api/build/target_label', '/docs/api/bxl/target_label'], + to: '/docs/api/build/TargetLabel', + }, + { + to: '/docs/api/bxl/Actions', + from: '/docs/api/bxl/actions', + }, + { + from: '/docs/api/bxl/bxl.ActionQueryNode', + to: '/docs/api/bxl/ActionQueryNode', + }, + { + from: '/docs/api/bxl/bxl.Actions', + to: '/docs/api/bxl/Actions', + }, + { + from: '/docs/api/bxl/bxl.AnalysisResult', + to: '/docs/api/bxl/AnalysisResult', + }, + { + from: '/docs/api/bxl/bxl.AqueryContext', + to: '/docs/api/bxl/AqueryContext', + }, + { + from: '/docs/api/bxl/bxl.AuditContext', + to: '/docs/api/bxl/AuditContext', + }, + { + from: '/docs/api/bxl/bxl.BuildResult', + to: '/docs/api/bxl/BuildResult', + }, + { + from: '/docs/api/bxl/bxl.ConfiguredTargetNode', + to: '/docs/api/bxl/ConfiguredTargetNode', + }, + { + from: '/docs/api/bxl/bxl.Context', + to: '/docs/api/bxl/Context', + }, + { + from: '/docs/api/bxl/bxl.CqueryContext', + to: '/docs/api/bxl/CqueryContext', + }, + { + from: '/docs/api/bxl/bxl.EnsuredArtifact', + to: '/docs/api/bxl/EnsuredArtifact', + }, + { + from: '/docs/api/bxl/bxl.Error', + to: '/docs/api/bxl/Error', + }, + { + from: '/docs/api/bxl/bxl.FileNode', + to: '/docs/api/bxl/FileNode', + }, + { + from: '/docs/api/bxl/bxl.Filesystem', + to: '/docs/api/bxl/Filesystem', + }, + { + from: '/docs/api/bxl/bxl.LazyContext', + to: '/docs/api/bxl/LazyContext', + }, + { + from: '/docs/api/bxl/bxl.Lazy', + to: '/docs/api/bxl/Lazy', + }, + { + from: '/docs/api/bxl/bxl.LazyResolvedAttrs', + to: '/docs/api/bxl/LazyResolvedAttrs', + }, + { + from: '/docs/api/bxl/bxl.OutputStream', + to: '/docs/api/bxl/OutputStream', + }, + { + from: '/docs/api/bxl/bxl.Result', + to: '/docs/api/bxl/Result', + }, + { + from: '/docs/api/bxl/bxl.TargetUniverse', + to: '/docs/api/bxl/TargetUniverse', + }, + { + from: '/docs/api/bxl/bxl.UnconfiguredTargetNode', + to: '/docs/api/bxl/UnconfiguredTargetNode', + }, + { + from: '/docs/api/bxl/bxl.UqueryContext', + to: '/docs/api/bxl/UqueryContext', + }, + { + from: '/docs/api/bxl/CellPath', + to: '/docs/api/build/CellPath', + }, + { + from: '/docs/api/bxl/cmd_args', + to: '/docs/api/build/cmd_args', + }, + { + to: '/docs/api/bxl', + from: '/docs/api/bxl/globals', + }, + { + to: '/docs/api/bxl/LazyResolvedAttrs', + from: '/docs/api/bxl/lazy_attrs', + }, + { + from: '/docs/api/starlark/globals', + to: '/docs/api/starlark', + }, + { + from: '/docs/api/starlark/string', + to: '/docs/api/starlark/str', + }, +]; + +// Internal-only redirects +const internalRedirects = !isInternal() ? [] : [ + { + to: '/docs/about/benefits/testimonials', + from: '/docs/testimonials', + }, + { + to: '/docs/about/knowledge_sharing', + from: '/docs/knowledge_sharing', + }, +]; + +export const redirects = [...baseRedirects, ...globalsBasedApiDocs, ...internalRedirects]; diff --git a/website/sidebars.js b/website/sidebars.js deleted file mode 100644 index 4affd4ccbe2b9..0000000000000 --- a/website/sidebars.js +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -/** - * The sidebars for buck2 documentation work slightly differently than normal. - * Normally when globbing you don't have control over any ordering (in an easy to manage way) - * Instead, we do some processing on the manualSidebar array to remove any manually specified - * files from the autogenerated glob, and keep the manuallly specified ones in order. - * - * - To specify manual ordering, just put the filename into the list of items. - * - New sections should be in a subdirectory, and should generally end have an autogenerated - * item as their last item. - * - Directories that should be excluded from sidebars should be added to the - * 'universallyExcludedDirs' set below - * - * If you're curious how this works, look at `generateSidebarExclusions` and - * `filterItems` in this module, and `sidebarItemsGenerator` in docusaurus.config.js. Note - * that `sidebarItemsGenerator` runs for each "autogenerated" item, so that's why we - * keep track of all globs that have been specified. We need to make sure that only things - * in "developers/" are included in the developers glob, e.g. - */ - -const { isInternal } = require("docusaurus-plugin-internaldocs-fb/internal"); - -const universallyExcludedDirs = new Set([ - "rfcs/", - "legacy/", -]); - -const manualSidebar = [ - 'index', - { - type: 'category', - label: 'About Buck2', - items: [ - 'why', - 'getting_started', - { - type: 'category', - label: 'Benefits', - items: [ - 'benefits', - isInternal() ? 'testimonials' : [], - ], - }, - isInternal() ? 'knowledge_sharing' : [], - 'bootstrapping', - ], - }, - { - type: 'category', - label: 'Concepts', - items: [ - 'concepts/concept_map', - 'concepts/target_pattern', - 'concepts/visibility', - 'concepts/daemon', - 'concepts/buckconfig', - 'concepts/glossary', - ], - }, - { - type: 'category', - label: 'Buck2 Users', - items: [ - isInternal() ? 'users/migration_guide' : [], - { - type: 'category', - label: 'Commands', - items: [ - 'users/commands', - /* Should add autogenerated commands docs - 1 command per page */ - ], - }, - { - type: 'category', - label: 'Troubleshooting', - items: [ - isInternal() ? 'users/faq/getting_help' : [], - 'users/faq/common_issues', - isInternal() ? 'users/faq/meta_issues' : [], - isInternal() ? 'users/faq/meta_installation' : [], - isInternal() ? 'users/faq/remote_execution' : [], - ], - }, - { - type: 'category', - label: 'Build Observability', - items: [ - 'users/build_observability/interactive_console', - 'users/build_observability/logging', - isInternal() ? 'users/build_observability/observability' : [], - isInternal() ? 'users/build_observability/scuba' : [], - isInternal() ? 'users/build_observability/ods' : [], - ], - }, - 'users/remote_execution', - { - type: 'category', - label: 'Advanced Features', - items: [ - 'users/advanced/deferred_materialization', - 'users/advanced/restarter', - 'users/advanced/in_memory_cache', - isInternal() ? 'users/advanced/offline_build_archives' : [], - isInternal() ? 'users/advanced/vpnless' : [], - ], - }, - ], - }, - { - type: 'category', - label: 'Rule Authors', - items: [ - 'rule_authors/writing_rules', - 'rule_authors/rule_api', - 'rule_authors/transitive_sets', - 'rule_authors/configurations', - 'rule_authors/configuration_transitions', - 'rule_authors/dynamic_dependencies', - 'rule_authors/anon_targets', - 'rule_authors/test_execution', - 'rule_authors/optimization', - isInternal() ? 'rule_authors/rule_writing_tips' : [], - 'rule_authors/incremental_actions', - 'rule_authors/alias', - 'rule_authors/local_resources', - { type: 'autogenerated', dirName: 'rule_authors' }, - ], - }, - { - type: 'category', - label: 'BXL Developers', - items: [ - { - type: 'category', - label: 'About BXL', - items: [ - 'developers/bxl', - isInternal() ? 'developers/bxl_testimonials' : [], - ], - }, - { - type: 'category', - label: 'User Guide', - items: [ - 'developers/bxl_getting_started', - 'developers/bxl_how_tos', - 'developers/target_universe', - 'developers/bxl_telemetry', - 'developers/anon_targets', - 'developers/dynamic_output', - ], - }, - 'developers/bxl_faqs', - { - type: 'category', - label: 'BXL APIs', - items: [ - { type: 'autogenerated', dirName: 'api/bxl' }, - ], - }, - ], - }, - { - type: 'category', - label: 'Buck2 Developers', - items: [ - { - type: 'category', - label: 'Architecture', - items: [ - 'developers/architecture/buck2', - 'developers/architecture/buck1_vs_buck2', - ], - }, - isInternal() ? 'developers/developers' : [], - isInternal() ? 'developers/heap_profiling' : [], - 'developers/parity_script', - 'developers/what-ran', - { - type: 'category', - label: 'Starlark Language', - items: [ - { type: 'autogenerated', dirName: 'developers/starlark' }, - ], - }, - 'developers/request_for_comments', - 'developers/windows_cheat_sheet', - ], - }, - { - type: 'category', - label: 'API', - link: { - type: 'doc', - id: 'api', - }, - items: [ - { - type: 'doc', - id: 'api/rules', - label: 'Rules', - }, - { - type: 'category', - label: 'Starlark APIs', - items: [{ type: 'autogenerated', dirName: 'api/starlark' }], - }, - { - type: 'category', - label: 'Build APIs', - items: [{ type: 'autogenerated', dirName: 'api/build' }], - }, - { - type: 'category', - label: 'BXL APIs', - items: [ - { type: 'autogenerated', dirName: 'api/bxl' }, - ], - }, - ] - } -] - -function generateSidebarExclusions(items) { - let excludedDirs = new Set(); - let excludedFiles = new Set(); - - for (const item of items) { - if (item["type"] === "category") { - const [newExcludedDirs, newExcludedFiles] = generateSidebarExclusions(item.items); - excludedDirs = new Set([...excludedDirs, ...newExcludedDirs]); - excludedFiles = new Set([...excludedFiles, ...newExcludedFiles]); - } else if (item["type"] === "autogenerated") { - excludedDirs.add(item.dirName + "/"); - } else if (Array.isArray(item)) { - const [newExcludedDirs, newExcludedFiles] = generateSidebarExclusions(item); - excludedDirs = new Set([...excludedDirs, ...newExcludedDirs]); - excludedFiles = new Set([...excludedFiles, ...newExcludedFiles]); - } else { - excludedFiles.add(item) - } - } - - return [excludedDirs, excludedFiles]; -} - -const [mainExcludedDirs, mainExcludedFiles] = generateSidebarExclusions(manualSidebar); - -function itemFilter(item, docs) { - const dirName = item.dirName + '/'; - return docs.filter((doc) => { - if (!isInternal() && doc.source.endsWith(".fb.md")) { - return false; - } - if (item.dirName != '.' && !doc.id.startsWith(dirName)) { - return false; - } - if (mainExcludedFiles.has(doc.id)) { - return false; - } - for (dir of universallyExcludedDirs) { - if (doc.id.startsWith(dir)) { - return false; - } - } - for (dir of mainExcludedDirs) { - if (dirName != dir && doc.id.startsWith(dir)) { - return false; - } - } - return true; - }); -} - -function itemSort(items) { - function is_globals(x) { - // We want API "globals" docs to come first - return x.id && x.id.endsWith("/globals") ? 0 : 1; - } - - // Reverse items in categories - const result = items.map((item) => { - if (item.type === 'category') { - return {...item, items: itemSort(item.items)}; - } - return item; - }); - // Make `globals` come first - result.sort((a, b) => is_globals(a) - is_globals(b)); - return result; -} - -module.exports = { - itemFilter: itemFilter, - itemSort: itemSort, - manualSidebar: manualSidebar, -}; diff --git a/website/sidebars.ts b/website/sidebars.ts new file mode 100644 index 0000000000000..f41df363ce72e --- /dev/null +++ b/website/sidebars.ts @@ -0,0 +1,267 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +/** + * The sidebars for buck2 documentation work slightly differently than normal. + * Normally when globbing you don't have control over any ordering (in an easy to manage way) + * Instead, we do some processing on the manualSidebar array to remove any manually specified + * files from the autogenerated glob, and keep the manuallly specified ones in order. + * + * - To specify manual ordering, just put the filename into the list of items. + * - New sections should be in a subdirectory, and should generally end have an autogenerated + * item as their last item. + * - Directories that should be excluded from sidebars should be added to the + * 'universallyExcludedDirs' set below + * + * If you're curious how this works, look at `generateSidebarExclusions` and + * `filterItems` in this module, and `sidebarItemsGenerator` in docusaurus.config.js. Note + * that `sidebarItemsGenerator` runs for each "autogenerated" item, so that's why we + * keep track of all globs that have been specified. We need to make sure that only things + * in "developers/" are included in the developers glob, e.g. + */ + +import { isInternal } from "docusaurus-plugin-internaldocs-fb/internal"; +import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; + + +export const sidebars: SidebarsConfig = { + main: [ + 'index', + { + type: 'category' as const, + label: 'About Buck2', + items: [ + 'about/why', + // The getting_started page is for OSS only. + isInternal() ? null : 'about/getting_started', + { + type: 'category' as const, + label: 'Benefits', + items: [ + 'about/benefits/compared_to_buck1', + isInternal() ? 'about/benefits/testimonials' : null, + ].flatMap(x => x !== null ? [x] : []), + }, + isInternal() ? 'about/knowledge_sharing' : null, + 'about/bootstrapping', + ].flatMap(x => x !== null ? [x] : []), + }, + { + type: 'category', + label: 'Concepts', + items: [ + 'concepts/key_concepts', + 'concepts/concept_map', + 'concepts/build_rule', + 'concepts/build_file', + 'concepts/build_target', + 'concepts/target_pattern', + 'concepts/buck_out', + 'concepts/visibility', + 'concepts/daemon', + 'concepts/buckconfig', + 'concepts/configurations', + 'concepts/glossary', + ], + }, + { + type: 'category' as const, + label: 'Buck2 Users', + items: [ + isInternal() ? 'users/migration_guide' : null, + { + type: 'category' as const, + label: 'Commands', + items: [ + { type: 'autogenerated' as const, dirName: 'users/commands'}, + ], + }, + 'users/cheat_sheet', + { + type: 'category' as const, + label: 'Troubleshooting', + items: [ + isInternal() ? 'users/faq/getting_help' : null, + 'users/faq/common_issues', + isInternal() ? 'users/faq/meta_issues' : null, + isInternal() ? 'users/faq/meta_installation' : null, + isInternal() ? 'users/faq/remote_execution' : null, + 'users/faq/starlark_peak_mem', + 'users/faq/buck_hanging', + isInternal() ? 'users/faq/how_to_bisect' : null, + isInternal() ? 'users/faq/how_to_expedite_fix' : null, + ].flatMap(x => x !== null ? [x] : []), + }, + { + type: 'category' as const, + label: 'Build Observability', + items: [ + 'users/build_observability/interactive_console', + 'users/build_observability/logging', + 'users/build_observability/build_report', + isInternal() ? 'users/build_observability/observability' : null, + isInternal() ? 'users/build_observability/scuba' : null, + isInternal() ? 'users/build_observability/ods' : null, + ].flatMap(x => x !== null ? [x] : []), + }, + 'users/remote_execution', + { + type: 'category' as const, + label: 'Queries', + items: [ + { type: 'autogenerated' as const, dirName: 'users/query' }, + ], + }, + { + type: 'category' as const, + label: 'Advanced Features', + items: [ + 'users/advanced/deferred_materialization', + 'users/advanced/restarter', + 'users/advanced/in_memory_cache', + 'users/advanced/external_cells', + isInternal() ? 'users/advanced/offline_build_archives' : null, + isInternal() ? 'users/advanced/vpnless' : null, + ].flatMap(x => x !== null ? [x] : []), + }, + ].flatMap(x => x !== null ? [x] : []), + }, + { + type: 'category', + label: 'Rule Authors', + items: [ + 'rule_authors/writing_rules', + 'rule_authors/transitive_sets', + 'rule_authors/configurations', + 'rule_authors/configuration_transitions', + 'rule_authors/dynamic_dependencies', + 'rule_authors/anon_targets', + 'rule_authors/test_execution', + 'rule_authors/optimization', + isInternal() ? 'rule_authors/rule_writing_tips' : null, + 'rule_authors/incremental_actions', + 'rule_authors/alias', + 'rule_authors/local_resources', + 'rule_authors/package_files', + isInternal() ? 'rule_authors/client_metadata' : null, + isInternal() ? 'rule_authors/action_error_handler' : null, + 'rule_authors/dep_files', + ].flatMap(x => x !== null ? [x] : []), + }, + { + type: 'category', + label: 'BXL Developers', + items: [ + { + type: 'category', + label: 'About BXL', + items: [ + 'developers/bxl', + isInternal() ? 'developers/bxl_testimonials' : null, + ].flatMap(x => x !== null ? [x] : []), + }, + { + type: 'category', + label: 'User Guide', + items: [ + 'developers/bxl_getting_started', + 'developers/bxl_basics', + 'developers/bxl_how_tos', + 'developers/target_universe', + 'developers/bxl_telemetry', + 'developers/anon_targets', + 'developers/dynamic_output', + ], + }, + 'developers/bxl_faqs', + { + type: 'ref', + label: 'BXL APIs', + id: 'api/bxl/index', + }, + ], + }, + { + type: 'category', + label: 'Buck2 Developers', + items: [ + { + type: 'category' as const, + label: 'Architecture', + items: [ + 'developers/architecture/buck2', + 'developers/architecture/buck1_vs_buck2', + ], + }, + isInternal() ? 'developers/developers' : null, + isInternal() ? 'developers/heap_profiling' : null, + 'developers/what-ran', + { + type: 'category' as const, + label: 'Starlark Language', + items: [ + { type: 'autogenerated' as const, dirName: 'developers/starlark' }, + ], + }, + 'developers/request_for_comments', + 'developers/windows_cheat_sheet', + ].flatMap(x => x !== null ? [x] : []), + }, + ], + apiSidebar: [ + { + type: 'doc', + id: 'api', + }, + { + type: 'doc', + id: 'prelude/globals', + label: 'Rules', + }, + { + type: 'category', + label: 'Starlark APIs', + items: [{ type: 'autogenerated', dirName: 'api/starlark' }], + link: {type: 'doc', id: 'api/starlark/index'}, + }, + { + type: 'category', + label: 'Build APIs', + items: [{ type: 'autogenerated', dirName: 'api/build' }], + link: {type: 'doc', id: 'api/build/index'}, + }, + { + type: 'category', + label: 'BXL APIs', + items: [ + { type: 'autogenerated', dirName: 'api/bxl' }, + ], + link: {type: 'doc', id: 'api/bxl/index'}, + }, + ], +}; + +export function postProcessItems(items) { + // First, handle recursive categories + const result = items.map((item) => { + if (item.type === 'category') { + return {...item, items: postProcessItems(item.items)}; + } + return item; + }); + + // Filter out index pages. Docusaurus only does this correctly on subcategories + return result.filter((item) => { + if (item.type === 'doc' && item.id) { + return item.id.split("/").at(-1) !== "index"; + } else { + return true; + } + }); +} diff --git a/website/sidebars_generated.js b/website/sidebars_generated.js deleted file mode 100644 index 1aba9b22a1073..0000000000000 --- a/website/sidebars_generated.js +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -// Small module that re-exports a single entry from sidebars.js. This is done -// because docusaurus really does not like having anything else exported from the -// sidebars module, and we need to make some functionality available (itemFilter) -// in docusaurus.config.js - -const manualSidebar = require('./sidebars.js').manualSidebar; - -module.exports = { - manualSidebar: manualSidebar, -} diff --git a/website/sidebars_generated.ts b/website/sidebars_generated.ts new file mode 100644 index 0000000000000..a0fd1190aa5fb --- /dev/null +++ b/website/sidebars_generated.ts @@ -0,0 +1,17 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +// Small module that re-exports a single entry from sidebars.js. This is done +// because docusaurus really does not like having anything else exported from the +// sidebars module, and we need to make some functionality available (itemFilter) +// in docusaurus.config.js + +import { sidebars } from "./sidebars"; + +module.exports = sidebars diff --git a/website/src/css/custom.css b/website/src/css/custom.css index c0c9ca150c183..7f69cbd3bd7ac 100644 --- a/website/src/css/custom.css +++ b/website/src/css/custom.css @@ -57,3 +57,7 @@ table.starlark_members_table td:nth-child(3) { width: 30%; max-width: 0; } + +.navbar__link--active { + text-decoration-line: underline; +} diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 7e0785e20d694..c5bfaf2406461 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -30,7 +30,7 @@ function HomepageHeader() {

    diff --git a/website/static/img/buck2_conceptmap.png b/website/static/img/buck2_conceptmap.png index a237c6c2b36a6..00c949999a068 100644 Binary files a/website/static/img/buck2_conceptmap.png and b/website/static/img/buck2_conceptmap.png differ diff --git a/website/static/img/packages-1.png b/website/static/img/packages-1.png new file mode 100644 index 0000000000000..4f48eb1d97a08 Binary files /dev/null and b/website/static/img/packages-1.png differ diff --git a/website/yarn.lock b/website/yarn.lock index 80e9dee912a28..2c6bef7f2949d 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -2,602 +2,434 @@ # yarn lockfile v1 -"@algolia/autocomplete-core@1.7.2": - version "1.7.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-core/-/autocomplete-core-1.7.2.tgz#8abbed88082f611997538760dffcb43b33b1fd1d" - integrity sha512-eclwUDC6qfApNnEfu1uWcL/rudQsn59tjEoUYZYE2JSXZrHLRjBUGMxiCoknobU2Pva8ejb0eRxpIYDtVVqdsw== +"@algolia/autocomplete-core@1.9.3": + version "1.9.3" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz#1d56482a768c33aae0868c8533049e02e8961be7" + integrity sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw== dependencies: - "@algolia/autocomplete-shared" "1.7.2" + "@algolia/autocomplete-plugin-algolia-insights" "1.9.3" + "@algolia/autocomplete-shared" "1.9.3" -"@algolia/autocomplete-preset-algolia@1.7.2": - version "1.7.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.7.2.tgz#9cd4f64b3d64399657ee2dc2b7e0a939e0713a26" - integrity sha512-+RYEG6B0QiGGfRb2G3MtPfyrl0dALF3cQNTWBzBX6p5o01vCCGTTinAm2UKG3tfc2CnOMAtnPLkzNZyJUpnVJw== +"@algolia/autocomplete-plugin-algolia-insights@1.9.3": + version "1.9.3" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz#9b7f8641052c8ead6d66c1623d444cbe19dde587" + integrity sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg== + dependencies: + "@algolia/autocomplete-shared" "1.9.3" + +"@algolia/autocomplete-preset-algolia@1.9.3": + version "1.9.3" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz#64cca4a4304cfcad2cf730e83067e0c1b2f485da" + integrity sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA== dependencies: - "@algolia/autocomplete-shared" "1.7.2" + "@algolia/autocomplete-shared" "1.9.3" -"@algolia/autocomplete-shared@1.7.2": - version "1.7.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.7.2.tgz#daa23280e78d3b42ae9564d12470ae034db51a89" - integrity sha512-QCckjiC7xXHIUaIL3ektBtjJ0w7tTA3iqKcAE/Hjn1lZ5omp7i3Y4e09rAr9ZybqirL7AbxCLLq0Ra5DDPKeug== +"@algolia/autocomplete-shared@1.9.3": + version "1.9.3" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz#2e22e830d36f0a9cf2c0ccd3c7f6d59435b77dfa" + integrity sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ== -"@algolia/cache-browser-local-storage@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.14.3.tgz#b9e0da012b2f124f785134a4d468ee0841b2399d" - integrity sha512-hWH1yCxgG3+R/xZIscmUrWAIBnmBFHH5j30fY/+aPkEZWt90wYILfAHIOZ1/Wxhho5SkPfwFmT7ooX2d9JeQBw== +"@algolia/cache-browser-local-storage@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz#97bc6d067a9fd932b9c922faa6b7fd6e546e1348" + integrity sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww== dependencies: - "@algolia/cache-common" "4.14.3" + "@algolia/cache-common" "4.24.0" -"@algolia/cache-common@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/cache-common/-/cache-common-4.14.3.tgz#a78e9faee3dfec018eab7b0996e918e06b476ac7" - integrity sha512-oZJofOoD9FQOwiGTzyRnmzvh3ZP8WVTNPBLH5xU5JNF7drDbRT0ocVT0h/xB2rPHYzOeXRrLaQQBwRT/CKom0Q== +"@algolia/cache-common@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/cache-common/-/cache-common-4.24.0.tgz#81a8d3a82ceb75302abb9b150a52eba9960c9744" + integrity sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g== -"@algolia/cache-in-memory@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/cache-in-memory/-/cache-in-memory-4.14.3.tgz#96cefb942aeb80e51e6a7e29f25f4f7f3439b736" - integrity sha512-ES0hHQnzWjeioLQf5Nq+x1AWdZJ50znNPSH3puB/Y4Xsg4Av1bvLmTJe7SY2uqONaeMTvL0OaVcoVtQgJVw0vg== +"@algolia/cache-in-memory@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz#ffcf8872f3a10cb85c4f4641bdffd307933a6e44" + integrity sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w== dependencies: - "@algolia/cache-common" "4.14.3" + "@algolia/cache-common" "4.24.0" -"@algolia/client-account@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/client-account/-/client-account-4.14.3.tgz#6d7d032a65c600339ce066505c77013d9a9e4966" - integrity sha512-PBcPb0+f5Xbh5UfLZNx2Ow589OdP8WYjB4CnvupfYBrl9JyC1sdH4jcq/ri8osO/mCZYjZrQsKAPIqW/gQmizQ== +"@algolia/client-account@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/client-account/-/client-account-4.24.0.tgz#eba7a921d828e7c8c40a32d4add21206c7fe12f1" + integrity sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA== dependencies: - "@algolia/client-common" "4.14.3" - "@algolia/client-search" "4.14.3" - "@algolia/transporter" "4.14.3" + "@algolia/client-common" "4.24.0" + "@algolia/client-search" "4.24.0" + "@algolia/transporter" "4.24.0" -"@algolia/client-analytics@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/client-analytics/-/client-analytics-4.14.3.tgz#ca409d00a8fff98fdcc215dc96731039900055dc" - integrity sha512-eAwQq0Hb/aauv9NhCH5Dp3Nm29oFx28sayFN2fdOWemwSeJHIl7TmcsxVlRsO50fsD8CtPcDhtGeD3AIFLNvqw== +"@algolia/client-analytics@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/client-analytics/-/client-analytics-4.24.0.tgz#9d2576c46a9093a14e668833c505ea697a1a3e30" + integrity sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg== dependencies: - "@algolia/client-common" "4.14.3" - "@algolia/client-search" "4.14.3" - "@algolia/requester-common" "4.14.3" - "@algolia/transporter" "4.14.3" + "@algolia/client-common" "4.24.0" + "@algolia/client-search" "4.24.0" + "@algolia/requester-common" "4.24.0" + "@algolia/transporter" "4.24.0" -"@algolia/client-common@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/client-common/-/client-common-4.14.3.tgz#c44e48652b2121a20d7a40cfd68d095ebb4191a8" - integrity sha512-jkPPDZdi63IK64Yg4WccdCsAP4pHxSkr4usplkUZM5C1l1oEpZXsy2c579LQ0rvwCs5JFmwfNG4ahOszidfWPw== +"@algolia/client-common@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/client-common/-/client-common-4.24.0.tgz#77c46eee42b9444a1d1c1583a83f7df4398a649d" + integrity sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA== dependencies: - "@algolia/requester-common" "4.14.3" - "@algolia/transporter" "4.14.3" + "@algolia/requester-common" "4.24.0" + "@algolia/transporter" "4.24.0" -"@algolia/client-personalization@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/client-personalization/-/client-personalization-4.14.3.tgz#8f71325035aa2a5fa7d1d567575235cf1d6c654f" - integrity sha512-UCX1MtkVNgaOL9f0e22x6tC9e2H3unZQlSUdnVaSKpZ+hdSChXGaRjp2UIT7pxmPqNCyv51F597KEX5WT60jNg== +"@algolia/client-personalization@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/client-personalization/-/client-personalization-4.24.0.tgz#8b47789fb1cb0f8efbea0f79295b7c5a3850f6ae" + integrity sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w== dependencies: - "@algolia/client-common" "4.14.3" - "@algolia/requester-common" "4.14.3" - "@algolia/transporter" "4.14.3" + "@algolia/client-common" "4.24.0" + "@algolia/requester-common" "4.24.0" + "@algolia/transporter" "4.24.0" -"@algolia/client-search@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/client-search/-/client-search-4.14.3.tgz#cf1e77549f5c3e73408ffe6441ede985fde69da0" - integrity sha512-I2U7xBx5OPFdPLA8AXKUPPxGY3HDxZ4r7+mlZ8ZpLbI8/ri6fnu6B4z3wcL7sgHhDYMwnAE8Xr0AB0h3Hnkp4A== +"@algolia/client-search@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/client-search/-/client-search-4.24.0.tgz#75e6c02d33ef3e0f34afd9962c085b856fc4a55f" + integrity sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA== dependencies: - "@algolia/client-common" "4.14.3" - "@algolia/requester-common" "4.14.3" - "@algolia/transporter" "4.14.3" + "@algolia/client-common" "4.24.0" + "@algolia/requester-common" "4.24.0" + "@algolia/transporter" "4.24.0" "@algolia/events@^4.0.1": version "4.0.1" resolved "https://registry.yarnpkg.com/@algolia/events/-/events-4.0.1.tgz#fd39e7477e7bc703d7f893b556f676c032af3950" integrity sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ== -"@algolia/logger-common@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/logger-common/-/logger-common-4.14.3.tgz#87d4725e7f56ea5a39b605771b7149fff62032a7" - integrity sha512-kUEAZaBt/J3RjYi8MEBT2QEexJR2kAE2mtLmezsmqMQZTV502TkHCxYzTwY2dE7OKcUTxi4OFlMuS4GId9CWPw== - -"@algolia/logger-console@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/logger-console/-/logger-console-4.14.3.tgz#1f19f8f0a5ef11f01d1f9545290eb6a89b71fb8a" - integrity sha512-ZWqAlUITktiMN2EiFpQIFCJS10N96A++yrexqC2Z+3hgF/JcKrOxOdT4nSCQoEPvU4Ki9QKbpzbebRDemZt/hw== - dependencies: - "@algolia/logger-common" "4.14.3" - -"@algolia/requester-browser-xhr@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.14.3.tgz#bcf55cba20f58fd9bc95ee55793b5219f3ce8888" - integrity sha512-AZeg2T08WLUPvDncl2XLX2O67W5wIO8MNaT7z5ii5LgBTuk/rU4CikTjCe2xsUleIZeFl++QrPAi4Bdxws6r/Q== - dependencies: - "@algolia/requester-common" "4.14.3" - -"@algolia/requester-common@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/requester-common/-/requester-common-4.14.3.tgz#2d02fbe01afb7ae5651ae8dfe62d6c089f103714" - integrity sha512-RrRzqNyKFDP7IkTuV3XvYGF9cDPn9h6qEDl595lXva3YUk9YSS8+MGZnnkOMHvjkrSCKfoLeLbm/T4tmoIeclw== - -"@algolia/requester-node-http@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/requester-node-http/-/requester-node-http-4.14.3.tgz#72389e1c2e5d964702451e75e368eefe85a09d8f" - integrity sha512-O5wnPxtDRPuW2U0EaOz9rMMWdlhwP0J0eSL1Z7TtXF8xnUeeUyNJrdhV5uy2CAp6RbhM1VuC3sOJcIR6Av+vbA== - dependencies: - "@algolia/requester-common" "4.14.3" - -"@algolia/transporter@4.14.3": - version "4.14.3" - resolved "https://registry.yarnpkg.com/@algolia/transporter/-/transporter-4.14.3.tgz#5593036bd9cf2adfd077fdc3e81d2e6118660a7a" - integrity sha512-2qlKlKsnGJ008exFRb5RTeTOqhLZj0bkMCMVskxoqWejs2Q2QtWmsiH98hDfpw0fmnyhzHEt0Z7lqxBYp8bW2w== - dependencies: - "@algolia/cache-common" "4.14.3" - "@algolia/logger-common" "4.14.3" - "@algolia/requester-common" "4.14.3" - -"@ampproject/remapping@^2.1.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.0.tgz#56c133824780de3174aed5ab6834f3026790154d" - integrity sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w== +"@algolia/logger-common@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/logger-common/-/logger-common-4.24.0.tgz#28d439976019ec0a46ba7a1a739ef493d4ef8123" + integrity sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA== + +"@algolia/logger-console@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/logger-console/-/logger-console-4.24.0.tgz#c6ff486036cd90b81d07a95aaba04461da7e1c65" + integrity sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg== + dependencies: + "@algolia/logger-common" "4.24.0" + +"@algolia/recommend@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/recommend/-/recommend-4.24.0.tgz#8a3f78aea471ee0a4836b78fd2aad4e9abcaaf34" + integrity sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw== + dependencies: + "@algolia/cache-browser-local-storage" "4.24.0" + "@algolia/cache-common" "4.24.0" + "@algolia/cache-in-memory" "4.24.0" + "@algolia/client-common" "4.24.0" + "@algolia/client-search" "4.24.0" + "@algolia/logger-common" "4.24.0" + "@algolia/logger-console" "4.24.0" + "@algolia/requester-browser-xhr" "4.24.0" + "@algolia/requester-common" "4.24.0" + "@algolia/requester-node-http" "4.24.0" + "@algolia/transporter" "4.24.0" + +"@algolia/requester-browser-xhr@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz#313c5edab4ed73a052e75803855833b62dd19c16" + integrity sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA== + dependencies: + "@algolia/requester-common" "4.24.0" + +"@algolia/requester-common@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/requester-common/-/requester-common-4.24.0.tgz#1c60c198031f48fcdb9e34c4057a3ea987b9a436" + integrity sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA== + +"@algolia/requester-node-http@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz#4461593714031d02aa7da221c49df675212f482f" + integrity sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw== + dependencies: + "@algolia/requester-common" "4.24.0" + +"@algolia/transporter@4.24.0": + version "4.24.0" + resolved "https://registry.yarnpkg.com/@algolia/transporter/-/transporter-4.24.0.tgz#226bb1f8af62430374c1972b2e5c8580ab275102" + integrity sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA== + dependencies: + "@algolia/cache-common" "4.24.0" + "@algolia/logger-common" "4.24.0" + "@algolia/requester-common" "4.24.0" + +"@ampproject/remapping@^2.2.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4" + integrity sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw== dependencies: - "@jridgewell/gen-mapping" "^0.1.0" - "@jridgewell/trace-mapping" "^0.3.9" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.24" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.18.6", "@babel/code-frame@^7.8.3": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.18.6.tgz#3b25d38c89600baa2dcc219edfa88a74eb2c427a" - integrity sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q== +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.25.7", "@babel/code-frame@^7.8.3": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.25.7.tgz#438f2c524071531d643c6f0188e1e28f130cebc7" + integrity sha512-0xZJFNE5XMpENsgfHYTw8FbX4kv53mFLn2i3XPoq69LyhYSCBJtitaHx9QnsVTrsogI4Z3+HtEfZ2/GFPOtf5g== dependencies: - "@babel/highlight" "^7.18.6" - -"@babel/compat-data@^7.17.7", "@babel/compat-data@^7.20.1", "@babel/compat-data@^7.20.5": - version "7.20.10" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.20.10.tgz#9d92fa81b87542fff50e848ed585b4212c1d34ec" - integrity sha512-sEnuDPpOJR/fcafHMjpcpGN5M2jbUGUHwmuWKM/YdPzeEDJg8bgmbcWQFUfE32MQjti1koACvoPVsDe8Uq+idg== + "@babel/highlight" "^7.25.7" + picocolors "^1.0.0" -"@babel/core@7.12.9": - version "7.12.9" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.12.9.tgz#fd450c4ec10cdbb980e2928b7aa7a28484593fc8" - integrity sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ== - dependencies: - "@babel/code-frame" "^7.10.4" - "@babel/generator" "^7.12.5" - "@babel/helper-module-transforms" "^7.12.1" - "@babel/helpers" "^7.12.5" - "@babel/parser" "^7.12.7" - "@babel/template" "^7.12.7" - "@babel/traverse" "^7.12.9" - "@babel/types" "^7.12.7" - convert-source-map "^1.7.0" - debug "^4.1.0" - gensync "^1.0.0-beta.1" - json5 "^2.1.2" - lodash "^4.17.19" - resolve "^1.3.2" - semver "^5.4.1" - source-map "^0.5.0" - -"@babel/core@^7.12.3", "@babel/core@^7.18.6", "@babel/core@^7.19.6": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.20.7.tgz#37072f951bd4d28315445f66e0ec9f6ae0c8c35f" - integrity sha512-t1ZjCluspe5DW24bn2Rr1CDb2v9rn/hROtg9a2tmd0+QYf4bsloYfLQzjG4qHPNMhWtKdGC33R5AxGR2Af2cBw== - dependencies: - "@ampproject/remapping" "^2.1.0" - "@babel/code-frame" "^7.18.6" - "@babel/generator" "^7.20.7" - "@babel/helper-compilation-targets" "^7.20.7" - "@babel/helper-module-transforms" "^7.20.7" - "@babel/helpers" "^7.20.7" - "@babel/parser" "^7.20.7" - "@babel/template" "^7.20.7" - "@babel/traverse" "^7.20.7" - "@babel/types" "^7.20.7" - convert-source-map "^1.7.0" +"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.25.7", "@babel/compat-data@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.25.8.tgz#0376e83df5ab0eb0da18885c0140041f0747a402" + integrity sha512-ZsysZyXY4Tlx+Q53XdnOFmqwfB9QDTHYxaZYajWRoBLuLEAwI2UIbtxOjWh/cFaa9IKUlcB+DDuoskLuKu56JA== + +"@babel/core@^7.12.3", "@babel/core@^7.21.3", "@babel/core@^7.23.3": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.25.8.tgz#a57137d2a51bbcffcfaeba43cb4dd33ae3e0e1c6" + integrity sha512-Oixnb+DzmRT30qu9d3tJSQkxuygWm32DFykT4bRoORPa9hZ/L4KhVB/XiRm6KG+roIEM7DBQlmg27kw2HZkdZg== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.25.7" + "@babel/generator" "^7.25.7" + "@babel/helper-compilation-targets" "^7.25.7" + "@babel/helper-module-transforms" "^7.25.7" + "@babel/helpers" "^7.25.7" + "@babel/parser" "^7.25.8" + "@babel/template" "^7.25.7" + "@babel/traverse" "^7.25.7" + "@babel/types" "^7.25.8" + convert-source-map "^2.0.0" debug "^4.1.0" gensync "^1.0.0-beta.2" - json5 "^2.2.1" - semver "^6.3.0" - -"@babel/generator@^7.12.5", "@babel/generator@^7.18.7", "@babel/generator@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.20.7.tgz#f8ef57c8242665c5929fe2e8d82ba75460187b4a" - integrity sha512-7wqMOJq8doJMZmP4ApXTzLxSr7+oO2jroJURrVEp6XShrQUObV8Tq/D0NCcoYg2uHqUrjzO0zwBjoYzelxK+sw== - dependencies: - "@babel/types" "^7.20.7" - "@jridgewell/gen-mapping" "^0.3.2" - jsesc "^2.5.1" - -"@babel/helper-annotate-as-pure@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz#eaa49f6f80d5a33f9a5dd2276e6d6e451be0a6bb" - integrity sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-builder-binary-assignment-operator-visitor@^7.18.6": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz#acd4edfd7a566d1d51ea975dff38fd52906981bb" - integrity sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw== - dependencies: - "@babel/helper-explode-assignable-expression" "^7.18.6" - "@babel/types" "^7.18.9" - -"@babel/helper-compilation-targets@^7.17.7", "@babel/helper-compilation-targets@^7.18.9", "@babel/helper-compilation-targets@^7.20.0", "@babel/helper-compilation-targets@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.20.7.tgz#a6cd33e93629f5eb473b021aac05df62c4cd09bb" - integrity sha512-4tGORmfQcrc+bvrjb5y3dG9Mx1IOZjsHqQVUz7XCNHO+iTmqxWnVg3KRygjGmpRLJGdQSKuvFinbIb0CnZwHAQ== - dependencies: - "@babel/compat-data" "^7.20.5" - "@babel/helper-validator-option" "^7.18.6" - browserslist "^4.21.3" + json5 "^2.2.3" + semver "^6.3.1" + +"@babel/generator@^7.12.5", "@babel/generator@^7.23.3", "@babel/generator@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.25.7.tgz#de86acbeb975a3e11ee92dd52223e6b03b479c56" + integrity sha512-5Dqpl5fyV9pIAD62yK9P7fcA768uVPUyrQmqpqstHWgMma4feF1x/oFysBCVZLY5wJ2GkMUCdsNDnGZrPoR6rA== + dependencies: + "@babel/types" "^7.25.7" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" + jsesc "^3.0.2" + +"@babel/helper-annotate-as-pure@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.7.tgz#63f02dbfa1f7cb75a9bdb832f300582f30bb8972" + integrity sha512-4xwU8StnqnlIhhioZf1tqnVWeQ9pvH/ujS8hRfw/WOza+/a+1qv69BWNy+oY231maTCWgKWhfBU7kDpsds6zAA== + dependencies: + "@babel/types" "^7.25.7" + +"@babel/helper-builder-binary-assignment-operator-visitor@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.25.7.tgz#d721650c1f595371e0a23ee816f1c3c488c0d622" + integrity sha512-12xfNeKNH7jubQNm7PAkzlLwEmCs1tfuX3UjIw6vP6QXi+leKh6+LyC/+Ed4EIQermwd58wsyh070yjDHFlNGg== + dependencies: + "@babel/traverse" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.7.tgz#11260ac3322dda0ef53edfae6e97b961449f5fa4" + integrity sha512-DniTEax0sv6isaw6qSQSfV4gVRNtw2rte8HHM45t9ZR0xILaufBRNkpMifCRiAPyvL4ACD6v0gfCwCmtOQaV4A== + dependencies: + "@babel/compat-data" "^7.25.7" + "@babel/helper-validator-option" "^7.25.7" + browserslist "^4.24.0" lru-cache "^5.1.1" - semver "^6.3.0" - -"@babel/helper-create-class-features-plugin@^7.18.6", "@babel/helper-create-class-features-plugin@^7.20.5", "@babel/helper-create-class-features-plugin@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.20.7.tgz#d0e1f8d7e4ed5dac0389364d9c0c191d948ade6f" - integrity sha512-LtoWbDXOaidEf50hmdDqn9g8VEzsorMexoWMQdQODbvmqYmaF23pBP5VNPAGIFHsFQCIeKokDiz3CH5Y2jlY6w== - dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-function-name" "^7.19.0" - "@babel/helper-member-expression-to-functions" "^7.20.7" - "@babel/helper-optimise-call-expression" "^7.18.6" - "@babel/helper-replace-supers" "^7.20.7" - "@babel/helper-split-export-declaration" "^7.18.6" - -"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.20.5": - version "7.20.5" - resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.20.5.tgz#5ea79b59962a09ec2acf20a963a01ab4d076ccca" - integrity sha512-m68B1lkg3XDGX5yCvGO0kPx3v9WIYLnzjKfPcQiwntEQa5ZeRkPmo2X/ISJc8qxWGfwUr+kvZAeEzAwLec2r2w== - dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - regexpu-core "^5.2.1" - -"@babel/helper-define-polyfill-provider@^0.3.3": - version "0.3.3" - resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz#8612e55be5d51f0cd1f36b4a5a83924e89884b7a" - integrity sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww== + semver "^6.3.1" + +"@babel/helper-create-class-features-plugin@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.7.tgz#5d65074c76cae75607421c00d6bd517fe1892d6b" + integrity sha512-bD4WQhbkx80mAyj/WCm4ZHcF4rDxkoLFO6ph8/5/mQ3z4vAzltQXAmbc7GvVJx5H+lk5Mi5EmbTeox5nMGCsbw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.7" + "@babel/helper-member-expression-to-functions" "^7.25.7" + "@babel/helper-optimise-call-expression" "^7.25.7" + "@babel/helper-replace-supers" "^7.25.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.7" + "@babel/traverse" "^7.25.7" + semver "^6.3.1" + +"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.25.7.tgz#dcb464f0e2cdfe0c25cc2a0a59c37ab940ce894e" + integrity sha512-byHhumTj/X47wJ6C6eLpK7wW/WBEcnUeb7D0FNc/jFQnQVw7DOso3Zz5u9x/zLrFVkHa89ZGDbkAa1D54NdrCQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.7" + regexpu-core "^6.1.1" + semver "^6.3.1" + +"@babel/helper-define-polyfill-provider@^0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz#18594f789c3594acb24cfdb4a7f7b7d2e8bd912d" + integrity sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ== dependencies: - "@babel/helper-compilation-targets" "^7.17.7" - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-compilation-targets" "^7.22.6" + "@babel/helper-plugin-utils" "^7.22.5" debug "^4.1.1" lodash.debounce "^4.0.8" resolve "^1.14.2" - semver "^6.1.2" - -"@babel/helper-environment-visitor@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz#0c0cee9b35d2ca190478756865bb3528422f51be" - integrity sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg== - -"@babel/helper-explode-assignable-expression@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz#41f8228ef0a6f1a036b8dfdfec7ce94f9a6bc096" - integrity sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-function-name@^7.18.9", "@babel/helper-function-name@^7.19.0": - version "7.19.0" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz#941574ed5390682e872e52d3f38ce9d1bef4648c" - integrity sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w== - dependencies: - "@babel/template" "^7.18.10" - "@babel/types" "^7.19.0" - -"@babel/helper-hoist-variables@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz#d4d2c8fb4baeaa5c68b99cc8245c56554f926678" - integrity sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-member-expression-to-functions@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.20.7.tgz#a6f26e919582275a93c3aa6594756d71b0bb7f05" - integrity sha512-9J0CxJLq315fEdi4s7xK5TQaNYjZw+nDVpVqr1axNGKzdrdwYBD5b4uKv3n75aABG0rCCTK8Im8Ww7eYfMrZgw== - dependencies: - "@babel/types" "^7.20.7" - -"@babel/helper-module-imports@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz#1e3ebdbbd08aad1437b428c50204db13c5a3ca6e" - integrity sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-module-transforms@^7.12.1", "@babel/helper-module-transforms@^7.18.6", "@babel/helper-module-transforms@^7.20.11", "@babel/helper-module-transforms@^7.20.7": - version "7.20.11" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.20.11.tgz#df4c7af713c557938c50ea3ad0117a7944b2f1b0" - integrity sha512-uRy78kN4psmji1s2QtbtcCSaj/LILFDp0f/ymhpQH5QY3nljUZCaNWz9X1dEj/8MBdBEFECs7yRhKn8i7NjZgg== - dependencies: - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-module-imports" "^7.18.6" - "@babel/helper-simple-access" "^7.20.2" - "@babel/helper-split-export-declaration" "^7.18.6" - "@babel/helper-validator-identifier" "^7.19.1" - "@babel/template" "^7.20.7" - "@babel/traverse" "^7.20.10" - "@babel/types" "^7.20.7" - -"@babel/helper-optimise-call-expression@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz#9369aa943ee7da47edab2cb4e838acf09d290ffe" - integrity sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-plugin-utils@7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz#2f75a831269d4f677de49986dff59927533cf375" - integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg== - -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.18.9", "@babel/helper-plugin-utils@^7.19.0", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": - version "7.20.2" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.20.2.tgz#d1b9000752b18d0877cff85a5c376ce5c3121629" - integrity sha512-8RvlJG2mj4huQ4pZ+rU9lqKi9ZKiRmuvGuM2HlWmkmgOhbs6zEAw6IEiJ5cQqGbDzGZOhwuOQNtZMi/ENLjZoQ== - -"@babel/helper-remap-async-to-generator@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz#997458a0e3357080e54e1d79ec347f8a8cd28519" - integrity sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA== - dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-wrap-function" "^7.18.9" - "@babel/types" "^7.18.9" - -"@babel/helper-replace-supers@^7.18.6", "@babel/helper-replace-supers@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.20.7.tgz#243ecd2724d2071532b2c8ad2f0f9f083bcae331" - integrity sha512-vujDMtB6LVfNW13jhlCrp48QNslK6JXi7lQG736HVbHz/mbf4Dc7tIRh1Xf5C0rF7BP8iiSxGMCmY6Ci1ven3A== - dependencies: - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-member-expression-to-functions" "^7.20.7" - "@babel/helper-optimise-call-expression" "^7.18.6" - "@babel/template" "^7.20.7" - "@babel/traverse" "^7.20.7" - "@babel/types" "^7.20.7" - -"@babel/helper-simple-access@^7.20.2": - version "7.20.2" - resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.20.2.tgz#0ab452687fe0c2cfb1e2b9e0015de07fc2d62dd9" - integrity sha512-+0woI/WPq59IrqDYbVGfshjT5Dmk/nnbdpcF8SnMhhXObpTq2KNBdLFRFrkVdbDOyUmHBCxzm5FHV1rACIkIbA== - dependencies: - "@babel/types" "^7.20.2" - -"@babel/helper-skip-transparent-expression-wrappers@^7.20.0": - version "7.20.0" - resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.20.0.tgz#fbe4c52f60518cab8140d77101f0e63a8a230684" - integrity sha512-5y1JYeNKfvnT8sZcK9DVRtpTbGiomYIHviSP3OQWmDPU3DeH4a1ZlT/N2lyQ5P8egjcRaT/Y9aNqUxK0WsnIIg== - dependencies: - "@babel/types" "^7.20.0" - -"@babel/helper-split-export-declaration@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz#7367949bc75b20c6d5a5d4a97bba2824ae8ef075" - integrity sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-string-parser@^7.19.4": - version "7.19.4" - resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz#38d3acb654b4701a9b77fb0615a96f775c3a9e63" - integrity sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw== - -"@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1": - version "7.19.1" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz#7eea834cf32901ffdc1a7ee555e2f9c27e249ca2" - integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w== - -"@babel/helper-validator-option@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz#bf0d2b5a509b1f336099e4ff36e1a63aa5db4db8" - integrity sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw== - -"@babel/helper-wrap-function@^7.18.9": - version "7.20.5" - resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.20.5.tgz#75e2d84d499a0ab3b31c33bcfe59d6b8a45f62e3" - integrity sha512-bYMxIWK5mh+TgXGVqAtnu5Yn1un+v8DDZtqyzKRLUzrh70Eal2O3aZ7aPYiMADO4uKlkzOiRiZ6GX5q3qxvW9Q== - dependencies: - "@babel/helper-function-name" "^7.19.0" - "@babel/template" "^7.18.10" - "@babel/traverse" "^7.20.5" - "@babel/types" "^7.20.5" -"@babel/helpers@^7.12.5", "@babel/helpers@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.20.7.tgz#04502ff0feecc9f20ecfaad120a18f011a8e6dce" - integrity sha512-PBPjs5BppzsGaxHQCDKnZ6Gd9s6xl8bBCluz3vEInLGRJmnZan4F6BYCeqtyXqkk4W5IlPmjK4JlOuZkpJ3xZA== - dependencies: - "@babel/template" "^7.20.7" - "@babel/traverse" "^7.20.7" - "@babel/types" "^7.20.7" - -"@babel/highlight@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf" - integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g== - dependencies: - "@babel/helper-validator-identifier" "^7.18.6" - chalk "^2.0.0" +"@babel/helper-member-expression-to-functions@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.25.7.tgz#541a33b071f0355a63a0fa4bdf9ac360116b8574" + integrity sha512-O31Ssjd5K6lPbTX9AAYpSKrZmLeagt9uwschJd+Ixo6QiRyfpvgtVQp8qrDR9UNFjZ8+DO34ZkdrN+BnPXemeA== + dependencies: + "@babel/traverse" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/helper-module-imports@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.25.7.tgz#dba00d9523539152906ba49263e36d7261040472" + integrity sha512-o0xCgpNmRohmnoWKQ0Ij8IdddjyBFE4T2kagL/x6M3+4zUgc+4qTOUBoNe4XxDskt1HPKO007ZPiMgLDq2s7Kw== + dependencies: + "@babel/traverse" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/helper-module-transforms@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.25.7.tgz#2ac9372c5e001b19bc62f1fe7d96a18cb0901d1a" + integrity sha512-k/6f8dKG3yDz/qCwSM+RKovjMix563SLxQFo0UhRNo239SP6n9u5/eLtKD6EAjwta2JHJ49CsD8pms2HdNiMMQ== + dependencies: + "@babel/helper-module-imports" "^7.25.7" + "@babel/helper-simple-access" "^7.25.7" + "@babel/helper-validator-identifier" "^7.25.7" + "@babel/traverse" "^7.25.7" + +"@babel/helper-optimise-call-expression@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.7.tgz#1de1b99688e987af723eed44fa7fc0ee7b97d77a" + integrity sha512-VAwcwuYhv/AT+Vfr28c9y6SHzTan1ryqrydSTFGjU0uDJHw3uZ+PduI8plCLkRsDnqK2DMEDmwrOQRsK/Ykjng== + dependencies: + "@babel/types" "^7.25.7" + +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.25.7", "@babel/helper-plugin-utils@^7.8.0": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.7.tgz#8ec5b21812d992e1ef88a9b068260537b6f0e36c" + integrity sha512-eaPZai0PiqCi09pPs3pAFfl/zYgGaE6IdXtYvmf0qlcDTd3WCtO7JWCcRd64e0EQrcYgiHibEZnOGsSY4QSgaw== + +"@babel/helper-remap-async-to-generator@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.7.tgz#9efdc39df5f489bcd15533c912b6c723a0a65021" + integrity sha512-kRGE89hLnPfcz6fTrlNU+uhgcwv0mBE4Gv3P9Ke9kLVJYpi4AMVVEElXvB5CabrPZW4nCM8P8UyyjrzCM0O2sw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.7" + "@babel/helper-wrap-function" "^7.25.7" + "@babel/traverse" "^7.25.7" + +"@babel/helper-replace-supers@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.25.7.tgz#38cfda3b6e990879c71d08d0fef9236b62bd75f5" + integrity sha512-iy8JhqlUW9PtZkd4pHM96v6BdJ66Ba9yWSE4z0W4TvSZwLBPkyDsiIU3ENe4SmrzRBs76F7rQXTy1lYC49n6Lw== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.25.7" + "@babel/helper-optimise-call-expression" "^7.25.7" + "@babel/traverse" "^7.25.7" + +"@babel/helper-simple-access@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.25.7.tgz#5eb9f6a60c5d6b2e0f76057004f8dacbddfae1c0" + integrity sha512-FPGAkJmyoChQeM+ruBGIDyrT2tKfZJO8NcxdC+CWNJi7N8/rZpSxK7yvBJ5O/nF1gfu5KzN7VKG3YVSLFfRSxQ== + dependencies: + "@babel/traverse" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/helper-skip-transparent-expression-wrappers@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.25.7.tgz#382831c91038b1a6d32643f5f49505b8442cb87c" + integrity sha512-pPbNbchZBkPMD50K0p3JGcFMNLVUCuU/ABybm/PGNj4JiHrpmNyqqCphBk4i19xXtNV0JhldQJJtbSW5aUvbyA== + dependencies: + "@babel/traverse" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/helper-string-parser@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.7.tgz#d50e8d37b1176207b4fe9acedec386c565a44a54" + integrity sha512-CbkjYdsJNHFk8uqpEkpCvRs3YRp9tY6FmFY7wLMSYuGYkrdUi7r2lc4/wqsvlHoMznX3WJ9IP8giGPq68T/Y6g== + +"@babel/helper-validator-identifier@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.7.tgz#77b7f60c40b15c97df735b38a66ba1d7c3e93da5" + integrity sha512-AM6TzwYqGChO45oiuPqwL2t20/HdMC1rTPAesnBCgPCSF1x3oN9MVUwQV2iyz4xqWrctwK5RNC8LV22kaQCNYg== + +"@babel/helper-validator-option@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.25.7.tgz#97d1d684448228b30b506d90cace495d6f492729" + integrity sha512-ytbPLsm+GjArDYXJ8Ydr1c/KJuutjF2besPNbIZnZ6MKUxi/uTA22t2ymmA4WFjZFpjiAMO0xuuJPqK2nvDVfQ== + +"@babel/helper-wrap-function@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.25.7.tgz#9f6021dd1c4fdf4ad515c809967fc4bac9a70fe7" + integrity sha512-MA0roW3JF2bD1ptAaJnvcabsVlNQShUaThyJbCDD4bCp8NEgiFvpoqRI2YS22hHlc2thjO/fTg2ShLMC3jygAg== + dependencies: + "@babel/template" "^7.25.7" + "@babel/traverse" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/helpers@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.25.7.tgz#091b52cb697a171fe0136ab62e54e407211f09c2" + integrity sha512-Sv6pASx7Esm38KQpF/U/OXLwPPrdGHNKoeblRxgZRLXnAtnkEe4ptJPDtAZM7fBLadbc1Q07kQpSiGQ0Jg6tRA== + dependencies: + "@babel/template" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/highlight@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.25.7.tgz#20383b5f442aa606e7b5e3043b0b1aafe9f37de5" + integrity sha512-iYyACpW3iW8Fw+ZybQK+drQre+ns/tKpXbNESfrhNnPLIklLbXr7MYJ6gPEd0iETGLOK+SxMjVvKb/ffmk+FEw== + dependencies: + "@babel/helper-validator-identifier" "^7.25.7" + chalk "^2.4.2" js-tokens "^4.0.0" + picocolors "^1.0.0" -"@babel/parser@^7.12.7", "@babel/parser@^7.18.8", "@babel/parser@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.20.7.tgz#66fe23b3c8569220817d5feb8b9dcdc95bb4f71b" - integrity sha512-T3Z9oHybU+0vZlY9CiDSJQTD5ZapcW18ZctFMi0MOAl/4BjFF4ul7NVSARLdbGO5vDqy9eQiGTV0LtKfvCYvcg== - -"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz#da5b8f9a580acdfbe53494dba45ea389fb09a4d2" - integrity sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.18.9": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.20.7.tgz#d9c85589258539a22a901033853101a6198d4ef1" - integrity sha512-sbr9+wNE5aXMBBFBICk01tt7sBf2Oc9ikRFEcem/ZORup9IMUdNhW7/wVLEbbtlWOsEubJet46mHAL2C8+2jKQ== - dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-skip-transparent-expression-wrappers" "^7.20.0" - "@babel/plugin-proposal-optional-chaining" "^7.20.7" - -"@babel/plugin-proposal-async-generator-functions@^7.20.1": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.7.tgz#bfb7276d2d573cb67ba379984a2334e262ba5326" - integrity sha512-xMbiLsn/8RK7Wq7VeVytytS2L6qE69bXPB10YCmMdDZbKF4okCqY74pI/jJQ/8U0b/F6NrT2+14b8/P9/3AMGA== - dependencies: - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-remap-async-to-generator" "^7.18.9" - "@babel/plugin-syntax-async-generators" "^7.8.4" - -"@babel/plugin-proposal-class-properties@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz#b110f59741895f7ec21a6fff696ec46265c446a3" - integrity sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ== - dependencies: - "@babel/helper-create-class-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/plugin-proposal-class-static-block@^7.18.6": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.20.7.tgz#92592e9029b13b15be0f7ce6a7aedc2879ca45a7" - integrity sha512-AveGOoi9DAjUYYuUAG//Ig69GlazLnoyzMw68VCDux+c1tsnnH/OkYcpz/5xzMkEFC6UxjR5Gw1c+iY2wOGVeQ== - dependencies: - "@babel/helper-create-class-features-plugin" "^7.20.7" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-class-static-block" "^7.14.5" - -"@babel/plugin-proposal-dynamic-import@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz#72bcf8d408799f547d759298c3c27c7e7faa4d94" - integrity sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-dynamic-import" "^7.8.3" - -"@babel/plugin-proposal-export-namespace-from@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz#5f7313ab348cdb19d590145f9247540e94761203" - integrity sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA== - dependencies: - "@babel/helper-plugin-utils" "^7.18.9" - "@babel/plugin-syntax-export-namespace-from" "^7.8.3" - -"@babel/plugin-proposal-json-strings@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz#7e8788c1811c393aff762817e7dbf1ebd0c05f0b" - integrity sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-json-strings" "^7.8.3" - -"@babel/plugin-proposal-logical-assignment-operators@^7.18.9": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.20.7.tgz#dfbcaa8f7b4d37b51e8bfb46d94a5aea2bb89d83" - integrity sha512-y7C7cZgpMIjWlKE5T7eJwp+tnRYM89HmRvWM5EQuB5BoHEONjmQ8lSNmBUwOyy/GFRsohJED51YBF79hE1djug== - dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" - -"@babel/plugin-proposal-nullish-coalescing-operator@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz#fdd940a99a740e577d6c753ab6fbb43fdb9467e1" - integrity sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" - -"@babel/plugin-proposal-numeric-separator@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz#899b14fbafe87f053d2c5ff05b36029c62e13c75" - integrity sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-numeric-separator" "^7.10.4" - -"@babel/plugin-proposal-object-rest-spread@7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz#def9bd03cea0f9b72283dac0ec22d289c7691069" - integrity sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA== +"@babel/parser@^7.12.7", "@babel/parser@^7.25.7", "@babel/parser@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.25.8.tgz#f6aaf38e80c36129460c1657c0762db584c9d5e2" + integrity sha512-HcttkxzdPucv3nNFmfOOMfFf64KgdJVqm1KaCm25dPGMLElo9nsLvXeJECQg8UzPuBGLyTSA0ZzqCtDSzKTEoQ== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-object-rest-spread" "^7.8.0" - "@babel/plugin-transform-parameters" "^7.12.1" + "@babel/types" "^7.25.8" -"@babel/plugin-proposal-object-rest-spread@^7.20.2": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz#aa662940ef425779c75534a5c41e9d936edc390a" - integrity sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg== +"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.7.tgz#93969ac50ef4d68b2504b01b758af714e4cbdd64" + integrity sha512-UV9Lg53zyebzD1DwQoT9mzkEKa922LNUp5YkTJ6Uta0RbyXaQNUgcvSt7qIu1PpPzVb6rd10OVNTzkyBGeVmxQ== dependencies: - "@babel/compat-data" "^7.20.5" - "@babel/helper-compilation-targets" "^7.20.7" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-object-rest-spread" "^7.8.3" - "@babel/plugin-transform-parameters" "^7.20.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/traverse" "^7.25.7" -"@babel/plugin-proposal-optional-catch-binding@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz#f9400d0e6a3ea93ba9ef70b09e72dd6da638a2cb" - integrity sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw== +"@babel/plugin-bugfix-safari-class-field-initializer-scope@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.25.7.tgz#a338d611adb9dcd599b8b1efa200c88ebeffe046" + integrity sha512-GDDWeVLNxRIkQTnJn2pDOM1pkCgYdSqPeT1a9vh9yIqu2uzzgw1zcqEb+IJOhy+dTBMlNdThrDIksr2o09qrrQ== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-proposal-optional-chaining@^7.18.9", "@babel/plugin-proposal-optional-chaining@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.20.7.tgz#49f2b372519ab31728cc14115bb0998b15bfda55" - integrity sha512-T+A7b1kfjtRM51ssoOfS1+wbyCVqorfyZhT99TvxxLMirPShD8CzKMRepMlCBGM5RpHMbn8s+5MMHnPstJH6mQ== +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.7.tgz#c5f755e911dfac7ef6957300c0f9c4a8c18c06f4" + integrity sha512-wxyWg2RYaSUYgmd9MR0FyRGyeOMQE/Uzr1wzd/g5cf5bwi9A4v6HFdDm7y1MgDtod/fLOSTZY6jDgV0xU9d5bA== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-skip-transparent-expression-wrappers" "^7.20.0" - "@babel/plugin-syntax-optional-chaining" "^7.8.3" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-proposal-private-methods@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz#5209de7d213457548a98436fa2882f52f4be6bea" - integrity sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA== +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.25.7.tgz#3b7ea04492ded990978b6deaa1dfca120ad4455a" + integrity sha512-Xwg6tZpLxc4iQjorYsyGMyfJE7nP5MV8t/Ka58BgiA7Jw0fRqQNcANlLfdJ/yvBt9z9LD2We+BEkT7vLqZRWng== dependencies: - "@babel/helper-create-class-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.7" + "@babel/plugin-transform-optional-chaining" "^7.25.7" -"@babel/plugin-proposal-private-property-in-object@^7.18.6": - version "7.20.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.20.5.tgz#309c7668f2263f1c711aa399b5a9a6291eef6135" - integrity sha512-Vq7b9dUA12ByzB4EjQTPo25sFhY+08pQDBSZRtUAkj7lb7jahaHR5igera16QZ+3my1nYR4dKsNdYj5IjPHilQ== +"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.7.tgz#9622b1d597a703aa3a921e6f58c9c2d9a028d2c5" + integrity sha512-UVATLMidXrnH+GMUIuxq55nejlj02HP7F5ETyBONzP6G87fPBogG4CH6kxrSrdIuAjdwNO9VzyaYsrZPscWUrw== dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - "@babel/helper-create-class-features-plugin" "^7.20.5" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-private-property-in-object" "^7.14.5" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/traverse" "^7.25.7" -"@babel/plugin-proposal-unicode-property-regex@^7.18.6", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz#af613d2cd5e643643b65cded64207b15c85cb78e" - integrity sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w== - dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/plugin-syntax-async-generators@^7.8.4": - version "7.8.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" - integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-class-properties@^7.12.13": - version "7.12.13" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" - integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA== - dependencies: - "@babel/helper-plugin-utils" "^7.12.13" - -"@babel/plugin-syntax-class-static-block@^7.14.5": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz#195df89b146b4b78b3bf897fd7a257c84659d406" - integrity sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw== - dependencies: - "@babel/helper-plugin-utils" "^7.14.5" +"@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2": + version "7.21.0-placeholder-for-preset-env.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz#7844f9289546efa9febac2de4cfe358a050bd703" + integrity sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w== "@babel/plugin-syntax-dynamic-import@^7.8.3": version "7.8.3" @@ -606,1126 +438,1177 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-export-namespace-from@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz#028964a9ba80dbc094c915c487ad7c4e7a66465a" - integrity sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q== +"@babel/plugin-syntax-import-assertions@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.25.7.tgz#8ce248f9f4ed4b7ed4cb2e0eb4ed9efd9f52921f" + integrity sha512-ZvZQRmME0zfJnDQnVBKYzHxXT7lYBB3Revz1GuS7oLXWMgqUPX4G+DDbT30ICClht9WKV34QVrZhSw6WdklwZQ== dependencies: - "@babel/helper-plugin-utils" "^7.8.3" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-import-assertions@^7.20.0": - version "7.20.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.20.0.tgz#bb50e0d4bea0957235390641209394e87bdb9cc4" - integrity sha512-IUh1vakzNoWalR8ch/areW7qFopR2AEw03JlG7BbrDqmQ4X3q9uuipQwSGrUn7oGiemKjtSLDhNtQHzMHr1JdQ== +"@babel/plugin-syntax-import-attributes@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.25.7.tgz#d78dd0499d30df19a598e63ab895e21b909bc43f" + integrity sha512-AqVo+dguCgmpi/3mYBdu9lkngOBlQ2w2vnNpa6gfiCxQZLzV4ZbhsXitJ2Yblkoe1VQwtHSaNmIaGll/26YWRw== dependencies: - "@babel/helper-plugin-utils" "^7.19.0" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-json-strings@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" - integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== +"@babel/plugin-syntax-jsx@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.7.tgz#5352d398d11ea5e7ef330c854dea1dae0bf18165" + integrity sha512-ruZOnKO+ajVL/MVx+PwNBPOkrnXTXoWMtte1MBpegfCArhqOe3Bj52avVj1huLLxNKYKXYaSxZ2F+woK1ekXfw== dependencies: - "@babel/helper-plugin-utils" "^7.8.0" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-jsx@7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz#9d9d357cc818aa7ae7935917c1257f67677a0926" - integrity sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg== +"@babel/plugin-syntax-typescript@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.7.tgz#bfc05b0cc31ebd8af09964650cee723bb228108b" + integrity sha512-rR+5FDjpCHqqZN2bzZm18bVYGaejGq5ZkpVCJLXor/+zlSrSoc4KWcHI0URVWjl/68Dyr1uwZUz/1njycEAv9g== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-jsx@^7.18.6": +"@babel/plugin-syntax-unicode-sets-regex@^7.18.6": version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.18.6.tgz#a8feef63b010150abd97f1649ec296e849943ca0" - integrity sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q== + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz#d49a3b3e6b52e5be6740022317580234a6a47357" + integrity sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg== dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.18.6" "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-syntax-logical-assignment-operators@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz#ca91ef46303530448b906652bac2e9fe9941f699" - integrity sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig== +"@babel/plugin-transform-arrow-functions@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.25.7.tgz#1b9ed22e6890a0e9ff470371c73b8c749bcec386" + integrity sha512-EJN2mKxDwfOUCPxMO6MUI58RN3ganiRAG/MS/S3HfB6QFNjroAMelQo/gybyYq97WerCBAZoyrAoW8Tzdq2jWg== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" - integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== +"@babel/plugin-transform-async-generator-functions@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.25.8.tgz#3331de02f52cc1f2c75b396bec52188c85b0b1ec" + integrity sha512-9ypqkozyzpG+HxlH4o4gdctalFGIjjdufzo7I2XPda0iBnZ6a+FO0rIEQcdSPXp02CkvGsII1exJhmROPQd5oA== dependencies: - "@babel/helper-plugin-utils" "^7.8.0" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-remap-async-to-generator" "^7.25.7" + "@babel/traverse" "^7.25.7" -"@babel/plugin-syntax-numeric-separator@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz#b9b070b3e33570cd9fd07ba7fa91c0dd37b9af97" - integrity sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug== +"@babel/plugin-transform-async-to-generator@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.25.7.tgz#a44c7323f8d4285a6c568dd43c5c361d6367ec52" + integrity sha512-ZUCjAavsh5CESCmi/xCpX1qcCaAglzs/7tmuvoFnJgA1dM7gQplsguljoTg+Ru8WENpX89cQyAtWoaE0I3X3Pg== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-module-imports" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-remap-async-to-generator" "^7.25.7" -"@babel/plugin-syntax-object-rest-spread@7.8.3", "@babel/plugin-syntax-object-rest-spread@^7.8.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" - integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== +"@babel/plugin-transform-block-scoped-functions@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.25.7.tgz#e0b8843d5571719a2f1bf7e284117a3379fcc17c" + integrity sha512-xHttvIM9fvqW+0a3tZlYcZYSBpSWzGBFIt/sYG3tcdSzBB8ZeVgz2gBP7Df+sM0N1850jrviYSSeUuc+135dmQ== dependencies: - "@babel/helper-plugin-utils" "^7.8.0" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-optional-catch-binding@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" - integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== +"@babel/plugin-transform-block-scoping@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.25.7.tgz#6dab95e98adf780ceef1b1c3ab0e55cd20dd410a" + integrity sha512-ZEPJSkVZaeTFG/m2PARwLZQ+OG0vFIhPlKHK/JdIMy8DbRJ/htz6LRrTFtdzxi9EHmcwbNPAKDnadpNSIW+Aow== dependencies: - "@babel/helper-plugin-utils" "^7.8.0" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-optional-chaining@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" - integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== +"@babel/plugin-transform-class-properties@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.7.tgz#a389cfca7a10ac80e3ff4c75fca08bd097ad1523" + integrity sha512-mhyfEW4gufjIqYFo9krXHJ3ElbFLIze5IDp+wQTxoPd+mwFb1NxatNAwmv8Q8Iuxv7Zc+q8EkiMQwc9IhyGf4g== dependencies: - "@babel/helper-plugin-utils" "^7.8.0" + "@babel/helper-create-class-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-private-property-in-object@^7.14.5": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz#0dc6671ec0ea22b6e94a1114f857970cd39de1ad" - integrity sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg== +"@babel/plugin-transform-class-static-block@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.25.8.tgz#a8af22028920fe404668031eceb4c3aadccb5262" + integrity sha512-e82gl3TCorath6YLf9xUwFehVvjvfqFhdOo4+0iVIVju+6XOi5XHkqB3P2AXnSwoeTX0HBoXq5gJFtvotJzFnQ== dependencies: - "@babel/helper-plugin-utils" "^7.14.5" + "@babel/helper-create-class-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-top-level-await@^7.14.5": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz#c1cfdadc35a646240001f06138247b741c34d94c" - integrity sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw== +"@babel/plugin-transform-classes@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.7.tgz#5103206cf80d02283bbbd044509ea3b65d0906bb" + integrity sha512-9j9rnl+YCQY0IGoeipXvnk3niWicIB6kCsWRGLwX241qSXpbA4MKxtp/EdvFxsc4zI5vqfLxzOd0twIJ7I99zg== dependencies: - "@babel/helper-plugin-utils" "^7.14.5" + "@babel/helper-annotate-as-pure" "^7.25.7" + "@babel/helper-compilation-targets" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-replace-supers" "^7.25.7" + "@babel/traverse" "^7.25.7" + globals "^11.1.0" -"@babel/plugin-syntax-typescript@^7.20.0": - version "7.20.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.20.0.tgz#4e9a0cfc769c85689b77a2e642d24e9f697fc8c7" - integrity sha512-rd9TkG+u1CExzS4SM1BlMEhMXwFLKVjOAFFCDx9PbX5ycJWDoWMcwdJH9RhkPu1dOgn5TrxLot/Gx6lWFuAUNQ== +"@babel/plugin-transform-computed-properties@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.25.7.tgz#7f621f0aa1354b5348a935ab12e3903842466f65" + integrity sha512-QIv+imtM+EtNxg/XBKL3hiWjgdLjMOmZ+XzQwSgmBfKbfxUjBzGgVPklUuE55eq5/uVoh8gg3dqlrwR/jw3ZeA== dependencies: - "@babel/helper-plugin-utils" "^7.19.0" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/template" "^7.25.7" -"@babel/plugin-transform-arrow-functions@^7.18.6": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.20.7.tgz#bea332b0e8b2dab3dafe55a163d8227531ab0551" - integrity sha512-3poA5E7dzDomxj9WXWwuD6A5F3kc7VXwIJO+E+J8qtDtS+pXPAhrgEyh+9GBwBgPq1Z+bB+/JD60lp5jsN7JPQ== +"@babel/plugin-transform-destructuring@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.25.7.tgz#f6f26a9feefb5aa41fd45b6f5838901b5333d560" + integrity sha512-xKcfLTlJYUczdaM1+epcdh1UGewJqr9zATgrNHcLBcV2QmfvPPEixo/sK/syql9cEmbr7ulu5HMFG5vbbt/sEA== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-async-to-generator@^7.18.6": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.20.7.tgz#dfee18623c8cb31deb796aa3ca84dda9cea94354" - integrity sha512-Uo5gwHPT9vgnSXQxqGtpdufUiWp96gk7yiP4Mp5bm1QMkEmLXBO7PAGYbKoJ6DhAwiNkcHFBol/x5zZZkL/t0Q== +"@babel/plugin-transform-dotall-regex@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.25.7.tgz#9d775c4a3ff1aea64045300fcd4309b4a610ef02" + integrity sha512-kXzXMMRzAtJdDEgQBLF4oaiT6ZCU3oWHgpARnTKDAqPkDJ+bs3NrZb310YYevR5QlRo3Kn7dzzIdHbZm1VzJdQ== dependencies: - "@babel/helper-module-imports" "^7.18.6" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-remap-async-to-generator" "^7.18.9" + "@babel/helper-create-regexp-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-block-scoped-functions@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz#9187bf4ba302635b9d70d986ad70f038726216a8" - integrity sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ== +"@babel/plugin-transform-duplicate-keys@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.25.7.tgz#fbba7d1155eab76bd4f2a038cbd5d65883bd7a93" + integrity sha512-by+v2CjoL3aMnWDOyCIg+yxU9KXSRa9tN6MbqggH5xvymmr9p4AMjYkNlQy4brMceBnUyHZ9G8RnpvT8wP7Cfg== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/plugin-transform-block-scoping@^7.20.2": - version "7.20.11" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.20.11.tgz#9f5a3424bd112a3f32fe0cf9364fbb155cff262a" - integrity sha512-tA4N427a7fjf1P0/2I4ScsHGc5jcHPbb30xMbaTke2gxDuWpUfXDuX1FEymJwKk4tuGUvGcejAR6HdZVqmmPyw== - dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - -"@babel/plugin-transform-classes@^7.20.2": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.20.7.tgz#f438216f094f6bb31dc266ebfab8ff05aecad073" - integrity sha512-LWYbsiXTPKl+oBlXUGlwNlJZetXD5Am+CyBdqhPsDVjM9Jc8jwBJFrKhHf900Kfk2eZG1y9MAG3UNajol7A4VQ== - dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - "@babel/helper-compilation-targets" "^7.20.7" - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-function-name" "^7.19.0" - "@babel/helper-optimise-call-expression" "^7.18.6" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-replace-supers" "^7.20.7" - "@babel/helper-split-export-declaration" "^7.18.6" - globals "^11.1.0" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-computed-properties@^7.18.9": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.20.7.tgz#704cc2fd155d1c996551db8276d55b9d46e4d0aa" - integrity sha512-Lz7MvBK6DTjElHAmfu6bfANzKcxpyNPeYBGEafyA6E5HtRpjpZwU+u7Qrgz/2OR0z+5TvKYbPdphfSaAcZBrYQ== +"@babel/plugin-transform-duplicate-named-capturing-groups-regex@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.25.7.tgz#102b31608dcc22c08fbca1894e104686029dc141" + integrity sha512-HvS6JF66xSS5rNKXLqkk7L9c/jZ/cdIVIcoPVrnl8IsVpLggTjXs8OWekbLHs/VtYDDh5WXnQyeE3PPUGm22MA== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/template" "^7.20.7" + "@babel/helper-create-regexp-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-destructuring@^7.20.2": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.20.7.tgz#8bda578f71620c7de7c93af590154ba331415454" - integrity sha512-Xwg403sRrZb81IVB79ZPqNQME23yhugYVqgTxAhT99h485F4f+GMELFhhOsscDUB7HCswepKeCKLn/GZvUKoBA== +"@babel/plugin-transform-dynamic-import@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.25.8.tgz#f1edbe75b248cf44c70c8ca8ed3818a668753aaa" + integrity sha512-gznWY+mr4ZQL/EWPcbBQUP3BXS5FwZp8RUOw06BaRn8tQLzN4XLIxXejpHN9Qo8x8jjBmAAKp6FoS51AgkSA/A== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-dotall-regex@^7.18.6", "@babel/plugin-transform-dotall-regex@^7.4.4": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz#b286b3e7aae6c7b861e45bed0a2fafd6b1a4fef8" - integrity sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg== +"@babel/plugin-transform-exponentiation-operator@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.25.7.tgz#5961a3a23a398faccd6cddb34a2182807d75fb5f" + integrity sha512-yjqtpstPfZ0h/y40fAXRv2snciYr0OAoMXY/0ClC7tm4C/nG5NJKmIItlaYlLbIVAWNfrYuy9dq1bE0SbX0PEg== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-builder-binary-assignment-operator-visitor" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-duplicate-keys@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz#687f15ee3cdad6d85191eb2a372c4528eaa0ae0e" - integrity sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw== +"@babel/plugin-transform-export-namespace-from@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.25.8.tgz#d1988c3019a380b417e0516418b02804d3858145" + integrity sha512-sPtYrduWINTQTW7FtOy99VCTWp4H23UX7vYcut7S4CIMEXU+54zKX9uCoGkLsWXteyaMXzVHgzWbLfQ1w4GZgw== dependencies: - "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-exponentiation-operator@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz#421c705f4521888c65e91fdd1af951bfefd4dacd" - integrity sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw== +"@babel/plugin-transform-for-of@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.25.7.tgz#0acfea0f27aa290818b5b48a5a44b3f03fc13669" + integrity sha512-n/TaiBGJxYFWvpJDfsxSj9lEEE44BFM1EPGz4KEiTipTgkoFVVcCmzAL3qA7fdQU96dpo4gGf5HBx/KnDvqiHw== dependencies: - "@babel/helper-builder-binary-assignment-operator-visitor" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.7" -"@babel/plugin-transform-for-of@^7.18.8": - version "7.18.8" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.8.tgz#6ef8a50b244eb6a0bdbad0c7c61877e4e30097c1" - integrity sha512-yEfTRnjuskWYo0k1mHUqrVWaZwrdq8AYbfrpqULOJOaucGSp4mNMVps+YtA8byoevxS/urwU75vyhQIxcCgiBQ== +"@babel/plugin-transform-function-name@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.7.tgz#7e394ccea3693902a8b50ded8b6ae1fa7b8519fd" + integrity sha512-5MCTNcjCMxQ63Tdu9rxyN6cAWurqfrDZ76qvVPrGYdBxIj+EawuuxTu/+dgJlhK5eRz3v1gLwp6XwS8XaX2NiQ== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-compilation-targets" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/traverse" "^7.25.7" -"@babel/plugin-transform-function-name@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz#cc354f8234e62968946c61a46d6365440fc764e0" - integrity sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ== +"@babel/plugin-transform-json-strings@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.25.8.tgz#6fb3ec383a2ea92652289fdba653e3f9de722694" + integrity sha512-4OMNv7eHTmJ2YXs3tvxAfa/I43di+VcF+M4Wt66c88EAED1RoGaf1D64cL5FkRpNL+Vx9Hds84lksWvd/wMIdA== dependencies: - "@babel/helper-compilation-targets" "^7.18.9" - "@babel/helper-function-name" "^7.18.9" - "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-literals@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz#72796fdbef80e56fba3c6a699d54f0de557444bc" - integrity sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg== +"@babel/plugin-transform-literals@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.7.tgz#70cbdc742f2cfdb1a63ea2cbd018d12a60b213c3" + integrity sha512-fwzkLrSu2fESR/cm4t6vqd7ebNIopz2QHGtjoU+dswQo/P6lwAG04Q98lliE3jkz/XqnbGFLnUcE0q0CVUf92w== dependencies: - "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-member-expression-literals@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz#ac9fdc1a118620ac49b7e7a5d2dc177a1bfee88e" - integrity sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA== +"@babel/plugin-transform-logical-assignment-operators@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.25.8.tgz#01868ff92daa9e525b4c7902aa51979082a05710" + integrity sha512-f5W0AhSbbI+yY6VakT04jmxdxz+WsID0neG7+kQZbCOjuyJNdL5Nn4WIBm4hRpKnUcO9lP0eipUhFN12JpoH8g== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-modules-amd@^7.19.6": - version "7.20.11" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.20.11.tgz#3daccca8e4cc309f03c3a0c4b41dc4b26f55214a" - integrity sha512-NuzCt5IIYOW0O30UvqktzHYR2ud5bOWbY0yaxWZ6G+aFzOMJvrs5YHNikrbdaT15+KNO31nPOy5Fim3ku6Zb5g== +"@babel/plugin-transform-member-expression-literals@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.25.7.tgz#0a36c3fbd450cc9e6485c507f005fa3d1bc8fca5" + integrity sha512-Std3kXwpXfRV0QtQy5JJcRpkqP8/wG4XL7hSKZmGlxPlDqmpXtEPRmhF7ztnlTCtUN3eXRUJp+sBEZjaIBVYaw== dependencies: - "@babel/helper-module-transforms" "^7.20.11" - "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-modules-commonjs@^7.19.6": - version "7.20.11" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.20.11.tgz#8cb23010869bf7669fd4b3098598b6b2be6dc607" - integrity sha512-S8e1f7WQ7cimJQ51JkAaDrEtohVEitXjgCGAS2N8S31Y42E+kWwfSz83LYz57QdBm7q9diARVqanIaH2oVgQnw== +"@babel/plugin-transform-modules-amd@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.25.7.tgz#bb4e543b5611f6c8c685a2fd485408713a3adf3d" + integrity sha512-CgselSGCGzjQvKzghCvDTxKHP3iooenLpJDO842ehn5D2G5fJB222ptnDwQho0WjEvg7zyoxb9P+wiYxiJX5yA== dependencies: - "@babel/helper-module-transforms" "^7.20.11" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-simple-access" "^7.20.2" + "@babel/helper-module-transforms" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-modules-systemjs@^7.19.6": - version "7.20.11" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.20.11.tgz#467ec6bba6b6a50634eea61c9c232654d8a4696e" - integrity sha512-vVu5g9BPQKSFEmvt2TA4Da5N+QVS66EX21d8uoOihC+OCpUoGvzVsXeqFdtAEfVa5BILAeFt+U7yVmLbQnAJmw== +"@babel/plugin-transform-modules-commonjs@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.25.7.tgz#173f0c791bb7407c092ce6d77ee90eb3f2d1d2fd" + integrity sha512-L9Gcahi0kKFYXvweO6n0wc3ZG1ChpSFdgG+eV1WYZ3/dGbJK7vvk91FgGgak8YwRgrCuihF8tE/Xg07EkL5COg== dependencies: - "@babel/helper-hoist-variables" "^7.18.6" - "@babel/helper-module-transforms" "^7.20.11" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-validator-identifier" "^7.19.1" + "@babel/helper-module-transforms" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-simple-access" "^7.25.7" -"@babel/plugin-transform-modules-umd@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz#81d3832d6034b75b54e62821ba58f28ed0aab4b9" - integrity sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ== +"@babel/plugin-transform-modules-systemjs@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.7.tgz#8b14d319a177cc9c85ef8b0512afd429d9e2e60b" + integrity sha512-t9jZIvBmOXJsiuyOwhrIGs8dVcD6jDyg2icw1VL4A/g+FnWyJKwUfSSU2nwJuMV2Zqui856El9u+ElB+j9fV1g== dependencies: - "@babel/helper-module-transforms" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-module-transforms" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-validator-identifier" "^7.25.7" + "@babel/traverse" "^7.25.7" -"@babel/plugin-transform-named-capturing-groups-regex@^7.19.1": - version "7.20.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.20.5.tgz#626298dd62ea51d452c3be58b285d23195ba69a8" - integrity sha512-mOW4tTzi5iTLnw+78iEq3gr8Aoq4WNRGpmSlrogqaiCBoR1HFhpU4JkpQFOHfeYx3ReVIFWOQJS4aZBRvuZ6mA== +"@babel/plugin-transform-modules-umd@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.25.7.tgz#00ee7a7e124289549381bfb0e24d87fd7f848367" + integrity sha512-p88Jg6QqsaPh+EB7I9GJrIqi1Zt4ZBHUQtjw3z1bzEXcLh6GfPqzZJ6G+G1HBGKUNukT58MnKG7EN7zXQBCODw== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.20.5" - "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-module-transforms" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-new-target@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz#d128f376ae200477f37c4ddfcc722a8a1b3246a8" - integrity sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw== +"@babel/plugin-transform-named-capturing-groups-regex@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.25.7.tgz#a2f3f6d7f38693b462542951748f0a72a34d196d" + integrity sha512-BtAT9LzCISKG3Dsdw5uso4oV1+v2NlVXIIomKJgQybotJY3OwCwJmkongjHgwGKoZXd0qG5UZ12JUlDQ07W6Ow== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-create-regexp-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-object-super@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz#fb3c6ccdd15939b6ff7939944b51971ddc35912c" - integrity sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA== +"@babel/plugin-transform-new-target@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.25.7.tgz#52b2bde523b76c548749f38dc3054f1f45e82bc9" + integrity sha512-CfCS2jDsbcZaVYxRFo2qtavW8SpdzmBXC2LOI4oO0rP+JSRDxxF3inF4GcPsLgfb5FjkhXG5/yR/lxuRs2pySA== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/helper-replace-supers" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-parameters@^7.12.1", "@babel/plugin-transform-parameters@^7.20.1", "@babel/plugin-transform-parameters@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.20.7.tgz#0ee349e9d1bc96e78e3b37a7af423a4078a7083f" - integrity sha512-WiWBIkeHKVOSYPO0pWkxGPfKeWrCJyD3NJ53+Lrp/QMSZbsVPovrVl2aWZ19D/LTVnaDv5Ap7GJ/B2CTOZdrfA== +"@babel/plugin-transform-nullish-coalescing-operator@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.25.8.tgz#befb4900c130bd52fccf2b926314557987f1b552" + integrity sha512-Z7WJJWdQc8yCWgAmjI3hyC+5PXIubH9yRKzkl9ZEG647O9szl9zvmKLzpbItlijBnVhTUf1cpyWBsZ3+2wjWPQ== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-property-literals@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz#e22498903a483448e94e032e9bbb9c5ccbfc93a3" - integrity sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg== +"@babel/plugin-transform-numeric-separator@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.25.8.tgz#91e370486371637bd42161052f2602c701386891" + integrity sha512-rm9a5iEFPS4iMIy+/A/PiS0QN0UyjPIeVvbU5EMZFKJZHt8vQnasbpo3T3EFcxzCeYO0BHfc4RqooCZc51J86Q== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-react-constant-elements@^7.12.1", "@babel/plugin-transform-react-constant-elements@^7.18.12": - version "7.20.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.20.2.tgz#3f02c784e0b711970d7d8ccc96c4359d64e27ac7" - integrity sha512-KS/G8YI8uwMGKErLFOHS/ekhqdHhpEloxs43NecQHVgo2QuQSyJhGIY1fL8UGl9wy5ItVwwoUL4YxVqsplGq2g== +"@babel/plugin-transform-object-rest-spread@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.25.8.tgz#0904ac16bcce41df4db12d915d6780f85c7fb04b" + integrity sha512-LkUu0O2hnUKHKE7/zYOIjByMa4VRaV2CD/cdGz0AxU9we+VA3kDDggKEzI0Oz1IroG+6gUP6UmWEHBMWZU316g== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-compilation-targets" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/plugin-transform-parameters" "^7.25.7" -"@babel/plugin-transform-react-display-name@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.18.6.tgz#8b1125f919ef36ebdfff061d664e266c666b9415" - integrity sha512-TV4sQ+T013n61uMoygyMRm+xf04Bd5oqFpv2jAEQwSZ8NwQA7zeRPg1LMVg2PWi3zWBz+CLKD+v5bcpZ/BS0aA== +"@babel/plugin-transform-object-super@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.25.7.tgz#582a9cea8cf0a1e02732be5b5a703a38dedf5661" + integrity sha512-pWT6UXCEW3u1t2tcAGtE15ornCBvopHj9Bps9D2DsH15APgNVOTwwczGckX+WkAvBmuoYKRCFa4DK+jM8vh5AA== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-replace-supers" "^7.25.7" -"@babel/plugin-transform-react-jsx-development@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.18.6.tgz#dbe5c972811e49c7405b630e4d0d2e1380c0ddc5" - integrity sha512-SA6HEjwYFKF7WDjWcMcMGUimmw/nhNRDWxr+KaLSCrkD/LMDBvWRmHAYgE1HDeF8KUuI8OAu+RT6EOtKxSW2qA== +"@babel/plugin-transform-optional-catch-binding@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.25.8.tgz#2649b86a3bb202c6894ec81a6ddf41b94d8f3103" + integrity sha512-EbQYweoMAHOn7iJ9GgZo14ghhb9tTjgOc88xFgYngifx7Z9u580cENCV159M4xDh3q/irbhSjZVpuhpC2gKBbg== dependencies: - "@babel/plugin-transform-react-jsx" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-react-jsx@^7.18.6": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.20.7.tgz#025d85a1935fd7e19dfdcb1b1d4df34d4da484f7" - integrity sha512-Tfq7qqD+tRj3EoDhY00nn2uP2hsRxgYGi5mLQ5TimKav0a9Lrpd4deE+fcLXU8zFYRjlKPHZhpCvfEA6qnBxqQ== +"@babel/plugin-transform-optional-chaining@^7.25.7", "@babel/plugin-transform-optional-chaining@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.25.8.tgz#f46283b78adcc5b6ab988a952f989e7dce70653f" + integrity sha512-q05Bk7gXOxpTHoQ8RSzGSh/LHVB9JEIkKnk3myAWwZHnYiTGYtbdrYkIsS8Xyh4ltKf7GNUSgzs/6P2bJtBAQg== dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - "@babel/helper-module-imports" "^7.18.6" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-jsx" "^7.18.6" - "@babel/types" "^7.20.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.7" -"@babel/plugin-transform-react-pure-annotations@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.18.6.tgz#561af267f19f3e5d59291f9950fd7b9663d0d844" - integrity sha512-I8VfEPg9r2TRDdvnHgPepTKvuRomzA8+u+nhY7qSI1fR2hRNebasZEETLyM5mAUr0Ku56OkXJ0I7NHJnO6cJiQ== +"@babel/plugin-transform-parameters@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.25.7.tgz#80c38b03ef580f6d6bffe1c5254bb35986859ac7" + integrity sha512-FYiTvku63me9+1Nz7TOx4YMtW3tWXzfANZtrzHhUZrz4d47EEtMQhzFoZWESfXuAMMT5mwzD4+y1N8ONAX6lMQ== dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-regenerator@^7.18.6": - version "7.20.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.20.5.tgz#57cda588c7ffb7f4f8483cc83bdcea02a907f04d" - integrity sha512-kW/oO7HPBtntbsahzQ0qSE3tFvkFwnbozz3NWFhLGqH75vLEg+sCGngLlhVkePlCs3Jv0dBBHDzCHxNiFAQKCQ== +"@babel/plugin-transform-private-methods@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.7.tgz#c790a04f837b4bd61d6b0317b43aa11ff67dce80" + integrity sha512-KY0hh2FluNxMLwOCHbxVOKfdB5sjWG4M183885FmaqWWiGMhRZq4DQRKH6mHdEucbJnyDyYiZNwNG424RymJjA== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - regenerator-transform "^0.15.1" + "@babel/helper-create-class-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-reserved-words@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz#b1abd8ebf8edaa5f7fe6bbb8d2133d23b6a6f76a" - integrity sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA== +"@babel/plugin-transform-private-property-in-object@^7.25.8": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.25.8.tgz#1234f856ce85e061f9688764194e51ea7577c434" + integrity sha512-8Uh966svuB4V8RHHg0QJOB32QK287NBksJOByoKmHMp1TAobNniNalIkI2i5IPj5+S9NYCG4VIjbEuiSN8r+ow== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-annotate-as-pure" "^7.25.7" + "@babel/helper-create-class-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-runtime@^7.18.6": - version "7.19.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.19.6.tgz#9d2a9dbf4e12644d6f46e5e75bfbf02b5d6e9194" - integrity sha512-PRH37lz4JU156lYFW1p8OxE5i7d6Sl/zV58ooyr+q1J1lnQPyg5tIiXlIwNVhJaY4W3TmOtdc8jqdXQcB1v5Yw== +"@babel/plugin-transform-property-literals@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.25.7.tgz#a8612b4ea4e10430f00012ecf0155662c7d6550d" + integrity sha512-lQEeetGKfFi0wHbt8ClQrUSUMfEeI3MMm74Z73T9/kuz990yYVtfofjf3NuA42Jy3auFOpbjDyCSiIkTs1VIYw== dependencies: - "@babel/helper-module-imports" "^7.18.6" - "@babel/helper-plugin-utils" "^7.19.0" - babel-plugin-polyfill-corejs2 "^0.3.3" - babel-plugin-polyfill-corejs3 "^0.6.0" - babel-plugin-polyfill-regenerator "^0.4.1" - semver "^6.3.0" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-shorthand-properties@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz#6d6df7983d67b195289be24909e3f12a8f664dc9" - integrity sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw== +"@babel/plugin-transform-react-constant-elements@^7.12.1", "@babel/plugin-transform-react-constant-elements@^7.21.3": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.25.7.tgz#b7f18dcdfac137a635a3f1242ea7c931df82a666" + integrity sha512-/qXt69Em8HgsjCLu7G3zdIQn7A2QwmYND7Wa0LTp09Na+Zn8L5d0A7wSXrKi18TJRc/Q5S1i1De/SU1LzVkSvA== dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-spread@^7.19.0": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.20.7.tgz#c2d83e0b99d3bf83e07b11995ee24bf7ca09401e" - integrity sha512-ewBbHQ+1U/VnH1fxltbJqDeWBU1oNLG8Dj11uIv3xVf7nrQu0bPGe5Rf716r7K5Qz+SqtAOVswoVunoiBtGhxw== +"@babel/plugin-transform-react-display-name@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.25.7.tgz#2753e875a1b702fb1d806c4f5d4c194d64cadd88" + integrity sha512-r0QY7NVU8OnrwE+w2IWiRom0wwsTbjx4+xH2RTd7AVdof3uurXOF+/mXHQDRk+2jIvWgSaCHKMgggfvM4dyUGA== dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-skip-transparent-expression-wrappers" "^7.20.0" - -"@babel/plugin-transform-sticky-regex@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz#c6706eb2b1524028e317720339583ad0f444adcc" - integrity sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-template-literals@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz#04ec6f10acdaa81846689d63fae117dd9c243a5e" - integrity sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA== +"@babel/plugin-transform-react-jsx-development@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.25.7.tgz#2fbd77887b8fa2942d7cb61edf1029ea1b048554" + integrity sha512-5yd3lH1PWxzW6IZj+p+Y4OLQzz0/LzlOG8vGqonHfVR3euf1vyzyMUJk9Ac+m97BH46mFc/98t9PmYLyvgL3qg== dependencies: - "@babel/helper-plugin-utils" "^7.18.9" + "@babel/plugin-transform-react-jsx" "^7.25.7" -"@babel/plugin-transform-typeof-symbol@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz#c8cea68263e45addcd6afc9091429f80925762c0" - integrity sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw== +"@babel/plugin-transform-react-jsx@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.25.7.tgz#f5e2af6020a562fe048dd343e571c4428e6c5632" + integrity sha512-vILAg5nwGlR9EXE8JIOX4NHXd49lrYbN8hnjffDtoULwpL9hUx/N55nqh2qd0q6FyNDfjl9V79ecKGvFbcSA0Q== dependencies: - "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-annotate-as-pure" "^7.25.7" + "@babel/helper-module-imports" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/plugin-syntax-jsx" "^7.25.7" + "@babel/types" "^7.25.7" -"@babel/plugin-transform-typescript@^7.18.6": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.20.7.tgz#673f49499cd810ae32a1ea5f3f8fab370987e055" - integrity sha512-m3wVKEvf6SoszD8pu4NZz3PvfKRCMgk6D6d0Qi9hNnlM5M6CFS92EgF4EiHVLKbU0r/r7ty1hg7NPZwE7WRbYw== +"@babel/plugin-transform-react-pure-annotations@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.25.7.tgz#6d0b8dadb2d3c5cbb8ade68c5efd49470b0d65f7" + integrity sha512-6YTHJ7yjjgYqGc8S+CbEXhLICODk0Tn92j+vNJo07HFk9t3bjFgAKxPLFhHwF2NjmQVSI1zBRfBWUeVBa2osfA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.20.7" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-typescript" "^7.20.0" + "@babel/helper-annotate-as-pure" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-transform-unicode-escapes@^7.18.10": - version "7.18.10" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz#1ecfb0eda83d09bbcb77c09970c2dd55832aa246" - integrity sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ== +"@babel/plugin-transform-regenerator@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.25.7.tgz#6eb006e6d26f627bc2f7844a9f19770721ad6f3e" + integrity sha512-mgDoQCRjrY3XK95UuV60tZlFCQGXEtMg8H+IsW72ldw1ih1jZhzYXbJvghmAEpg5UVhhnCeia1CkGttUvCkiMQ== dependencies: - "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-plugin-utils" "^7.25.7" + regenerator-transform "^0.15.2" -"@babel/plugin-transform-unicode-regex@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz#194317225d8c201bbae103364ffe9e2cea36cdca" - integrity sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA== +"@babel/plugin-transform-reserved-words@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.25.7.tgz#dc56b25e02afaabef3ce0c5b06b0916e8523e995" + integrity sha512-3OfyfRRqiGeOvIWSagcwUTVk2hXBsr/ww7bLn6TRTuXnexA+Udov2icFOxFX9abaj4l96ooYkcNN1qi2Zvqwng== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/preset-env@^7.12.1", "@babel/preset-env@^7.18.6", "@babel/preset-env@^7.19.4": - version "7.20.2" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.20.2.tgz#9b1642aa47bb9f43a86f9630011780dab7f86506" - integrity sha512-1G0efQEWR1EHkKvKHqbG+IN/QdgwfByUpM5V5QroDzGV2t3S/WXNQd693cHiHTlCFMpr9B6FkPFXDA2lQcKoDg== - dependencies: - "@babel/compat-data" "^7.20.1" - "@babel/helper-compilation-targets" "^7.20.0" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-validator-option" "^7.18.6" - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.18.6" - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.18.9" - "@babel/plugin-proposal-async-generator-functions" "^7.20.1" - "@babel/plugin-proposal-class-properties" "^7.18.6" - "@babel/plugin-proposal-class-static-block" "^7.18.6" - "@babel/plugin-proposal-dynamic-import" "^7.18.6" - "@babel/plugin-proposal-export-namespace-from" "^7.18.9" - "@babel/plugin-proposal-json-strings" "^7.18.6" - "@babel/plugin-proposal-logical-assignment-operators" "^7.18.9" - "@babel/plugin-proposal-nullish-coalescing-operator" "^7.18.6" - "@babel/plugin-proposal-numeric-separator" "^7.18.6" - "@babel/plugin-proposal-object-rest-spread" "^7.20.2" - "@babel/plugin-proposal-optional-catch-binding" "^7.18.6" - "@babel/plugin-proposal-optional-chaining" "^7.18.9" - "@babel/plugin-proposal-private-methods" "^7.18.6" - "@babel/plugin-proposal-private-property-in-object" "^7.18.6" - "@babel/plugin-proposal-unicode-property-regex" "^7.18.6" - "@babel/plugin-syntax-async-generators" "^7.8.4" - "@babel/plugin-syntax-class-properties" "^7.12.13" - "@babel/plugin-syntax-class-static-block" "^7.14.5" - "@babel/plugin-syntax-dynamic-import" "^7.8.3" - "@babel/plugin-syntax-export-namespace-from" "^7.8.3" - "@babel/plugin-syntax-import-assertions" "^7.20.0" - "@babel/plugin-syntax-json-strings" "^7.8.3" - "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" - "@babel/plugin-syntax-numeric-separator" "^7.10.4" - "@babel/plugin-syntax-object-rest-spread" "^7.8.3" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" - "@babel/plugin-syntax-optional-chaining" "^7.8.3" - "@babel/plugin-syntax-private-property-in-object" "^7.14.5" - "@babel/plugin-syntax-top-level-await" "^7.14.5" - "@babel/plugin-transform-arrow-functions" "^7.18.6" - "@babel/plugin-transform-async-to-generator" "^7.18.6" - "@babel/plugin-transform-block-scoped-functions" "^7.18.6" - "@babel/plugin-transform-block-scoping" "^7.20.2" - "@babel/plugin-transform-classes" "^7.20.2" - "@babel/plugin-transform-computed-properties" "^7.18.9" - "@babel/plugin-transform-destructuring" "^7.20.2" - "@babel/plugin-transform-dotall-regex" "^7.18.6" - "@babel/plugin-transform-duplicate-keys" "^7.18.9" - "@babel/plugin-transform-exponentiation-operator" "^7.18.6" - "@babel/plugin-transform-for-of" "^7.18.8" - "@babel/plugin-transform-function-name" "^7.18.9" - "@babel/plugin-transform-literals" "^7.18.9" - "@babel/plugin-transform-member-expression-literals" "^7.18.6" - "@babel/plugin-transform-modules-amd" "^7.19.6" - "@babel/plugin-transform-modules-commonjs" "^7.19.6" - "@babel/plugin-transform-modules-systemjs" "^7.19.6" - "@babel/plugin-transform-modules-umd" "^7.18.6" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.19.1" - "@babel/plugin-transform-new-target" "^7.18.6" - "@babel/plugin-transform-object-super" "^7.18.6" - "@babel/plugin-transform-parameters" "^7.20.1" - "@babel/plugin-transform-property-literals" "^7.18.6" - "@babel/plugin-transform-regenerator" "^7.18.6" - "@babel/plugin-transform-reserved-words" "^7.18.6" - "@babel/plugin-transform-shorthand-properties" "^7.18.6" - "@babel/plugin-transform-spread" "^7.19.0" - "@babel/plugin-transform-sticky-regex" "^7.18.6" - "@babel/plugin-transform-template-literals" "^7.18.9" - "@babel/plugin-transform-typeof-symbol" "^7.18.9" - "@babel/plugin-transform-unicode-escapes" "^7.18.10" - "@babel/plugin-transform-unicode-regex" "^7.18.6" - "@babel/preset-modules" "^0.1.5" - "@babel/types" "^7.20.2" - babel-plugin-polyfill-corejs2 "^0.3.3" - babel-plugin-polyfill-corejs3 "^0.6.0" - babel-plugin-polyfill-regenerator "^0.4.1" - core-js-compat "^3.25.1" - semver "^6.3.0" - -"@babel/preset-modules@^0.1.5": - version "0.1.5" - resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.5.tgz#ef939d6e7f268827e1841638dc6ff95515e115d9" - integrity sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA== + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-runtime@^7.22.9": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.25.7.tgz#435a4fab67273f00047dc806e05069c9c6344e12" + integrity sha512-Y9p487tyTzB0yDYQOtWnC+9HGOuogtP3/wNpun1xJXEEvI6vip59BSBTsHnekZLqxmPcgsrAKt46HAAb//xGhg== + dependencies: + "@babel/helper-module-imports" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + babel-plugin-polyfill-corejs2 "^0.4.10" + babel-plugin-polyfill-corejs3 "^0.10.6" + babel-plugin-polyfill-regenerator "^0.6.1" + semver "^6.3.1" + +"@babel/plugin-transform-shorthand-properties@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.25.7.tgz#92690a9c671915602d91533c278cc8f6bf12275f" + integrity sha512-uBbxNwimHi5Bv3hUccmOFlUy3ATO6WagTApenHz9KzoIdn0XeACdB12ZJ4cjhuB2WSi80Ez2FWzJnarccriJeA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-spread@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.25.7.tgz#df83e899a9fc66284ee601a7b738568435b92998" + integrity sha512-Mm6aeymI0PBh44xNIv/qvo8nmbkpZze1KvR8MkEqbIREDxoiWTi18Zr2jryfRMwDfVZF9foKh060fWgni44luw== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.7" + +"@babel/plugin-transform-sticky-regex@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.25.7.tgz#341c7002bef7f29037be7fb9684e374442dd0d17" + integrity sha512-ZFAeNkpGuLnAQ/NCsXJ6xik7Id+tHuS+NT+ue/2+rn/31zcdnupCdmunOizEaP0JsUmTFSTOPoQY7PkK2pttXw== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-template-literals@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.25.7.tgz#e566c581bb16d8541dd8701093bb3457adfce16b" + integrity sha512-SI274k0nUsFFmyQupiO7+wKATAmMFf8iFgq2O+vVFXZ0SV9lNfT1NGzBEhjquFmD8I9sqHLguH+gZVN3vww2AA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-typeof-symbol@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.25.7.tgz#debb1287182efd20488f126be343328c679b66eb" + integrity sha512-OmWmQtTHnO8RSUbL0NTdtpbZHeNTnm68Gj5pA4Y2blFNh+V4iZR68V1qL9cI37J21ZN7AaCnkfdHtLExQPf2uA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-typescript@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.25.7.tgz#8fc7c3d28ddd36bce45b9b48594129d0e560cfbe" + integrity sha512-VKlgy2vBzj8AmEzunocMun2fF06bsSWV+FvVXohtL6FGve/+L217qhHxRTVGHEDO/YR8IANcjzgJsd04J8ge5Q== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.7" + "@babel/helper-create-class-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.7" + "@babel/plugin-syntax-typescript" "^7.25.7" + +"@babel/plugin-transform-unicode-escapes@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.25.7.tgz#973592b6d13a914794e1de8cf1383e50e0f87f81" + integrity sha512-BN87D7KpbdiABA+t3HbVqHzKWUDN3dymLaTnPFAMyc8lV+KN3+YzNhVRNdinaCPA4AUqx7ubXbQ9shRjYBl3SQ== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-unicode-property-regex@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.25.7.tgz#25349197cce964b1343f74fa7cfdf791a1b1919e" + integrity sha512-IWfR89zcEPQGB/iB408uGtSPlQd3Jpq11Im86vUgcmSTcoWAiQMCTOa2K2yNNqFJEBVICKhayctee65Ka8OB0w== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-unicode-regex@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.25.7.tgz#f93a93441baf61f713b6d5552aaa856bfab34809" + integrity sha512-8JKfg/hiuA3qXnlLx8qtv5HWRbgyFx2hMMtpDDuU2rTckpKkGu4ycK5yYHwuEa16/quXfoxHBIApEsNyMWnt0g== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/plugin-transform-unicode-sets-regex@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.7.tgz#d1b3295d29e0f8f4df76abc909ad1ebee919560c" + integrity sha512-YRW8o9vzImwmh4Q3Rffd09bH5/hvY0pxg+1H1i0f7APoUeg12G7+HhLj9ZFNIrYkgBXhIijPJ+IXypN0hLTIbw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + +"@babel/preset-env@^7.12.1", "@babel/preset-env@^7.20.2", "@babel/preset-env@^7.22.9": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.25.8.tgz#dc6b719627fb29cd9cccbbbe041802fd575b524c" + integrity sha512-58T2yulDHMN8YMUxiLq5YmWUnlDCyY1FsHM+v12VMx+1/FlrUj5tY50iDCpofFQEM8fMYOaY9YRvym2jcjn1Dg== + dependencies: + "@babel/compat-data" "^7.25.8" + "@babel/helper-compilation-targets" "^7.25.7" + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-validator-option" "^7.25.7" + "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.25.7" + "@babel/plugin-bugfix-safari-class-field-initializer-scope" "^7.25.7" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.25.7" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.25.7" + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.25.7" + "@babel/plugin-proposal-private-property-in-object" "7.21.0-placeholder-for-preset-env.2" + "@babel/plugin-syntax-import-assertions" "^7.25.7" + "@babel/plugin-syntax-import-attributes" "^7.25.7" + "@babel/plugin-syntax-unicode-sets-regex" "^7.18.6" + "@babel/plugin-transform-arrow-functions" "^7.25.7" + "@babel/plugin-transform-async-generator-functions" "^7.25.8" + "@babel/plugin-transform-async-to-generator" "^7.25.7" + "@babel/plugin-transform-block-scoped-functions" "^7.25.7" + "@babel/plugin-transform-block-scoping" "^7.25.7" + "@babel/plugin-transform-class-properties" "^7.25.7" + "@babel/plugin-transform-class-static-block" "^7.25.8" + "@babel/plugin-transform-classes" "^7.25.7" + "@babel/plugin-transform-computed-properties" "^7.25.7" + "@babel/plugin-transform-destructuring" "^7.25.7" + "@babel/plugin-transform-dotall-regex" "^7.25.7" + "@babel/plugin-transform-duplicate-keys" "^7.25.7" + "@babel/plugin-transform-duplicate-named-capturing-groups-regex" "^7.25.7" + "@babel/plugin-transform-dynamic-import" "^7.25.8" + "@babel/plugin-transform-exponentiation-operator" "^7.25.7" + "@babel/plugin-transform-export-namespace-from" "^7.25.8" + "@babel/plugin-transform-for-of" "^7.25.7" + "@babel/plugin-transform-function-name" "^7.25.7" + "@babel/plugin-transform-json-strings" "^7.25.8" + "@babel/plugin-transform-literals" "^7.25.7" + "@babel/plugin-transform-logical-assignment-operators" "^7.25.8" + "@babel/plugin-transform-member-expression-literals" "^7.25.7" + "@babel/plugin-transform-modules-amd" "^7.25.7" + "@babel/plugin-transform-modules-commonjs" "^7.25.7" + "@babel/plugin-transform-modules-systemjs" "^7.25.7" + "@babel/plugin-transform-modules-umd" "^7.25.7" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.25.7" + "@babel/plugin-transform-new-target" "^7.25.7" + "@babel/plugin-transform-nullish-coalescing-operator" "^7.25.8" + "@babel/plugin-transform-numeric-separator" "^7.25.8" + "@babel/plugin-transform-object-rest-spread" "^7.25.8" + "@babel/plugin-transform-object-super" "^7.25.7" + "@babel/plugin-transform-optional-catch-binding" "^7.25.8" + "@babel/plugin-transform-optional-chaining" "^7.25.8" + "@babel/plugin-transform-parameters" "^7.25.7" + "@babel/plugin-transform-private-methods" "^7.25.7" + "@babel/plugin-transform-private-property-in-object" "^7.25.8" + "@babel/plugin-transform-property-literals" "^7.25.7" + "@babel/plugin-transform-regenerator" "^7.25.7" + "@babel/plugin-transform-reserved-words" "^7.25.7" + "@babel/plugin-transform-shorthand-properties" "^7.25.7" + "@babel/plugin-transform-spread" "^7.25.7" + "@babel/plugin-transform-sticky-regex" "^7.25.7" + "@babel/plugin-transform-template-literals" "^7.25.7" + "@babel/plugin-transform-typeof-symbol" "^7.25.7" + "@babel/plugin-transform-unicode-escapes" "^7.25.7" + "@babel/plugin-transform-unicode-property-regex" "^7.25.7" + "@babel/plugin-transform-unicode-regex" "^7.25.7" + "@babel/plugin-transform-unicode-sets-regex" "^7.25.7" + "@babel/preset-modules" "0.1.6-no-external-plugins" + babel-plugin-polyfill-corejs2 "^0.4.10" + babel-plugin-polyfill-corejs3 "^0.10.6" + babel-plugin-polyfill-regenerator "^0.6.1" + core-js-compat "^3.38.1" + semver "^6.3.1" + +"@babel/preset-modules@0.1.6-no-external-plugins": + version "0.1.6-no-external-plugins" + resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz#ccb88a2c49c817236861fee7826080573b8a923a" + integrity sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA== dependencies: "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-proposal-unicode-property-regex" "^7.4.4" - "@babel/plugin-transform-dotall-regex" "^7.4.4" "@babel/types" "^7.4.4" esutils "^2.0.2" -"@babel/preset-react@^7.12.5", "@babel/preset-react@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.18.6.tgz#979f76d6277048dc19094c217b507f3ad517dd2d" - integrity sha512-zXr6atUmyYdiWRVLOZahakYmOBHtWc2WGCkP8PYTgZi0iJXDY2CN180TdrIW4OGOAdLc7TifzDIvtx6izaRIzg== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/helper-validator-option" "^7.18.6" - "@babel/plugin-transform-react-display-name" "^7.18.6" - "@babel/plugin-transform-react-jsx" "^7.18.6" - "@babel/plugin-transform-react-jsx-development" "^7.18.6" - "@babel/plugin-transform-react-pure-annotations" "^7.18.6" - -"@babel/preset-typescript@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.18.6.tgz#ce64be3e63eddc44240c6358daefac17b3186399" - integrity sha512-s9ik86kXBAnD760aybBucdpnLsAt0jK1xqJn2juOn9lkOvSHV60os5hxoVJsPzMQxvnUJFAlkont2DvvaYEBtQ== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/helper-validator-option" "^7.18.6" - "@babel/plugin-transform-typescript" "^7.18.6" - -"@babel/runtime-corejs3@^7.18.6": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.20.7.tgz#a1e5ea3d758ba6beb715210142912e3f29981d84" - integrity sha512-jr9lCZ4RbRQmCR28Q8U8Fu49zvFqLxTY9AMOUz+iyMohMoAgpEcVxY+wJNay99oXOpOcCTODkk70NDN2aaJEeg== - dependencies: - core-js-pure "^3.25.1" - regenerator-runtime "^0.13.11" - -"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.18.6", "@babel/runtime@^7.8.4": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.20.7.tgz#fcb41a5a70550e04a7b708037c7c32f7f356d8fd" - integrity sha512-UF0tvkUtxwAgZ5W/KrkHf0Rn0fdnLDU9ScxBrEVNUprE/MzirjK4MJUX1/BVDv00Sv8cljtukVK1aky++X1SjQ== - dependencies: - regenerator-runtime "^0.13.11" - -"@babel/template@^7.12.7", "@babel/template@^7.18.10", "@babel/template@^7.20.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.20.7.tgz#a15090c2839a83b02aa996c0b4994005841fd5a8" - integrity sha512-8SegXApWe6VoNw0r9JHpSteLKTpTiLZ4rMlGIm9JQ18KiCtyQiAMEazujAHrUS5flrcqYZa75ukev3P6QmUwUw== - dependencies: - "@babel/code-frame" "^7.18.6" - "@babel/parser" "^7.20.7" - "@babel/types" "^7.20.7" - -"@babel/traverse@^7.12.9", "@babel/traverse@^7.18.8", "@babel/traverse@^7.20.10", "@babel/traverse@^7.20.5", "@babel/traverse@^7.20.7": - version "7.20.10" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.20.10.tgz#2bf98239597fcec12f842756f186a9dde6d09230" - integrity sha512-oSf1juCgymrSez8NI4A2sr4+uB/mFd9MXplYGPEBnfAuWmmyeVcHa6xLPiaRBcXkcb/28bgxmQLTVwFKE1yfsg== - dependencies: - "@babel/code-frame" "^7.18.6" - "@babel/generator" "^7.20.7" - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-function-name" "^7.19.0" - "@babel/helper-hoist-variables" "^7.18.6" - "@babel/helper-split-export-declaration" "^7.18.6" - "@babel/parser" "^7.20.7" - "@babel/types" "^7.20.7" - debug "^4.1.0" +"@babel/preset-react@^7.12.5", "@babel/preset-react@^7.18.6", "@babel/preset-react@^7.22.5": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.25.7.tgz#081cbe1dea363b732764d06a0fdda67ffa17735d" + integrity sha512-GjV0/mUEEXpi1U5ZgDprMRRgajGMRW3G5FjMr5KLKD8nT2fTG8+h/klV3+6Dm5739QE+K5+2e91qFKAYI3pmRg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-validator-option" "^7.25.7" + "@babel/plugin-transform-react-display-name" "^7.25.7" + "@babel/plugin-transform-react-jsx" "^7.25.7" + "@babel/plugin-transform-react-jsx-development" "^7.25.7" + "@babel/plugin-transform-react-pure-annotations" "^7.25.7" + +"@babel/preset-typescript@^7.21.0", "@babel/preset-typescript@^7.22.5": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.25.7.tgz#43c5b68eccb856ae5b52274b77b1c3c413cde1b7" + integrity sha512-rkkpaXJZOFN45Fb+Gki0c+KMIglk4+zZXOoMJuyEK8y8Kkc8Jd3BDmP7qPsz0zQMJj+UD7EprF+AqAXcILnexw== + dependencies: + "@babel/helper-plugin-utils" "^7.25.7" + "@babel/helper-validator-option" "^7.25.7" + "@babel/plugin-syntax-jsx" "^7.25.7" + "@babel/plugin-transform-modules-commonjs" "^7.25.7" + "@babel/plugin-transform-typescript" "^7.25.7" + +"@babel/runtime-corejs3@^7.22.6": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.25.7.tgz#29ca319b1272e9d78faa3f7ee891d0af63c53aa2" + integrity sha512-gMmIEhg35sXk9Te5qbGp3W9YKrvLt3HV658/d3odWrHSqT0JeG5OzsJWFHRLiOohRyjRsJc/x03DhJm3i8VJxg== + dependencies: + core-js-pure "^3.30.2" + regenerator-runtime "^0.14.0" + +"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.22.6", "@babel/runtime@^7.8.4": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.25.7.tgz#7ffb53c37a8f247c8c4d335e89cdf16a2e0d0fb6" + integrity sha512-FjoyLe754PMiYsFaN5C94ttGiOmBNYTf6pLr4xXHAT5uctHb092PBszndLDR5XA/jghQvn4n7JMHl7dmTgbm9w== + dependencies: + regenerator-runtime "^0.14.0" + +"@babel/template@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.25.7.tgz#27f69ce382855d915b14ab0fe5fb4cbf88fa0769" + integrity sha512-wRwtAgI3bAS+JGU2upWNL9lSlDcRCqD05BZ1n3X2ONLH1WilFP6O1otQjeMK/1g0pvYcXC7b/qVUB1keofjtZA== + dependencies: + "@babel/code-frame" "^7.25.7" + "@babel/parser" "^7.25.7" + "@babel/types" "^7.25.7" + +"@babel/traverse@^7.22.8", "@babel/traverse@^7.23.2", "@babel/traverse@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.25.7.tgz#83e367619be1cab8e4f2892ef30ba04c26a40fa8" + integrity sha512-jatJPT1Zjqvh/1FyJs6qAHL+Dzb7sTb+xr7Q+gM1b+1oBsMsQQ4FkVKb6dFlJvLlVssqkRzV05Jzervt9yhnzg== + dependencies: + "@babel/code-frame" "^7.25.7" + "@babel/generator" "^7.25.7" + "@babel/parser" "^7.25.7" + "@babel/template" "^7.25.7" + "@babel/types" "^7.25.7" + debug "^4.3.1" globals "^11.1.0" -"@babel/types@^7.12.6", "@babel/types@^7.12.7", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.19.0", "@babel/types@^7.20.0", "@babel/types@^7.20.2", "@babel/types@^7.20.5", "@babel/types@^7.20.7", "@babel/types@^7.4.4": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.20.7.tgz#54ec75e252318423fc07fb644dc6a58a64c09b7f" - integrity sha512-69OnhBxSSgK0OzTJai4kyPDiKTIe3j+ctaHdIGVbRahTLAT7L3R9oeXHC2aVSuGYt3cVnoAMDmOCgJ2yaiLMvg== +"@babel/types@^7.12.6", "@babel/types@^7.21.3", "@babel/types@^7.25.7", "@babel/types@^7.25.8", "@babel/types@^7.4.4": + version "7.25.8" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.25.8.tgz#5cf6037258e8a9bcad533f4979025140cb9993e1" + integrity sha512-JWtuCu8VQsMladxVz/P4HzHUGCAwpuqacmowgXFs5XjxIgKuNjnLokQzuVjlTvIzODaDmpjT3oxcC48vyk9EWg== dependencies: - "@babel/helper-string-parser" "^7.19.4" - "@babel/helper-validator-identifier" "^7.19.1" + "@babel/helper-string-parser" "^7.25.7" + "@babel/helper-validator-identifier" "^7.25.7" to-fast-properties "^2.0.0" -"@braintree/sanitize-url@^6.0.0": - version "6.0.2" - resolved "https://registry.yarnpkg.com/@braintree/sanitize-url/-/sanitize-url-6.0.2.tgz#6110f918d273fe2af8ea1c4398a88774bb9fc12f" - integrity sha512-Tbsj02wXCbqGmzdnXNk0SOF19ChhRU70BsroIi4Pm6Ehp56in6vch94mfbdQ17DozxkL3BAVjbZ4Qc1a0HFRAg== +"@braintree/sanitize-url@^6.0.1": + version "6.0.4" + resolved "https://registry.yarnpkg.com/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz#923ca57e173c6b232bbbb07347b1be982f03e783" + integrity sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A== "@colors/colors@1.5.0": version "1.5.0" resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== -"@docsearch/css@3.3.1": - version "3.3.1" - resolved "https://registry.yarnpkg.com/@docsearch/css/-/css-3.3.1.tgz#32041581bffb1a834072fd21ca66d1dd9f016098" - integrity sha512-nznHXeFHpAYjyaSNFNFpU+IJPjQA7AINM8ONjDx/Zx4O/pGAvqwgmcLNc7zR8qXRutqnzLo06yN63xFn36KFBw== - -"@docsearch/react@^3.1.1": - version "3.3.1" - resolved "https://registry.yarnpkg.com/@docsearch/react/-/react-3.3.1.tgz#47ce4a267a9daf1b5d913b979284b4f624088003" - integrity sha512-wdeQBODPkue6yVEEg4ntt+TiGJ6iXMBUNjBQJ0s1WVoc1OdcCnks/lkQ5LEfXETYR/q9QSbCCBnMjvnSoILaag== - dependencies: - "@algolia/autocomplete-core" "1.7.2" - "@algolia/autocomplete-preset-algolia" "1.7.2" - "@docsearch/css" "3.3.1" - algoliasearch "^4.0.0" - -"@docusaurus/core@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/core/-/core-2.3.1.tgz#32849f2ffd2f086a4e55739af8c4195c5eb386f2" - integrity sha512-0Jd4jtizqnRAr7svWaBbbrCCN8mzBNd2xFLoT/IM7bGfFie5y58oz97KzXliwiLY3zWjqMXjQcuP1a5VgCv2JA== - dependencies: - "@babel/core" "^7.18.6" - "@babel/generator" "^7.18.7" +"@discoveryjs/json-ext@0.5.7": + version "0.5.7" + resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz#1d572bfbbe14b7704e0ba0f39b74815b84870d70" + integrity sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw== + +"@docsearch/css@3.6.2": + version "3.6.2" + resolved "https://registry.yarnpkg.com/@docsearch/css/-/css-3.6.2.tgz#ccd9c83dbfeaf34efe4e3547ee596714ae7e5891" + integrity sha512-vKNZepO2j7MrYBTZIGXvlUOIR+v9KRf70FApRgovWrj3GTs1EITz/Xb0AOlm1xsQBp16clVZj1SY/qaOJbQtZw== + +"@docsearch/react@^3.5.2": + version "3.6.2" + resolved "https://registry.yarnpkg.com/@docsearch/react/-/react-3.6.2.tgz#32b16dd7d5614f0d39e6bc018549816b68d171b8" + integrity sha512-rtZce46OOkVflCQH71IdbXSFK+S8iJZlUF56XBW5rIgx/eG5qoomC7Ag3anZson1bBac/JFQn7XOBfved/IMRA== + dependencies: + "@algolia/autocomplete-core" "1.9.3" + "@algolia/autocomplete-preset-algolia" "1.9.3" + "@docsearch/css" "3.6.2" + algoliasearch "^4.19.1" + +"@docusaurus/core@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/core/-/core-3.5.1.tgz#404069d12167bae097e73fdb0ad1c5e109407827" + integrity sha512-N3+9IbGI2jbkiRc6ZbEnU9dC02nHQXi8ivM1VJldkPQyP7WlyHXS+NDhmL3rwaYOMbGH96X2LcKigCKg7pEEqg== + dependencies: + "@babel/core" "^7.23.3" + "@babel/generator" "^7.23.3" "@babel/plugin-syntax-dynamic-import" "^7.8.3" - "@babel/plugin-transform-runtime" "^7.18.6" - "@babel/preset-env" "^7.18.6" - "@babel/preset-react" "^7.18.6" - "@babel/preset-typescript" "^7.18.6" - "@babel/runtime" "^7.18.6" - "@babel/runtime-corejs3" "^7.18.6" - "@babel/traverse" "^7.18.8" - "@docusaurus/cssnano-preset" "2.3.1" - "@docusaurus/logger" "2.3.1" - "@docusaurus/mdx-loader" "2.3.1" - "@docusaurus/react-loadable" "5.5.2" - "@docusaurus/utils" "2.3.1" - "@docusaurus/utils-common" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - "@slorber/static-site-generator-webpack-plugin" "^4.0.7" - "@svgr/webpack" "^6.2.1" - autoprefixer "^10.4.7" - babel-loader "^8.2.5" + "@babel/plugin-transform-runtime" "^7.22.9" + "@babel/preset-env" "^7.22.9" + "@babel/preset-react" "^7.22.5" + "@babel/preset-typescript" "^7.22.5" + "@babel/runtime" "^7.22.6" + "@babel/runtime-corejs3" "^7.22.6" + "@babel/traverse" "^7.22.8" + "@docusaurus/cssnano-preset" "3.5.1" + "@docusaurus/logger" "3.5.1" + "@docusaurus/mdx-loader" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + autoprefixer "^10.4.14" + babel-loader "^9.1.3" babel-plugin-dynamic-import-node "^2.3.3" boxen "^6.2.1" chalk "^4.1.2" chokidar "^3.5.3" - clean-css "^5.3.0" - cli-table3 "^0.6.2" + clean-css "^5.3.2" + cli-table3 "^0.6.3" combine-promises "^1.1.0" commander "^5.1.0" copy-webpack-plugin "^11.0.0" - core-js "^3.23.3" - css-loader "^6.7.1" - css-minimizer-webpack-plugin "^4.0.0" - cssnano "^5.1.12" + core-js "^3.31.1" + css-loader "^6.8.1" + css-minimizer-webpack-plugin "^5.0.1" + cssnano "^6.1.2" del "^6.1.1" - detect-port "^1.3.0" + detect-port "^1.5.1" escape-html "^1.0.3" - eta "^2.0.0" + eta "^2.2.0" + eval "^0.1.8" file-loader "^6.2.0" - fs-extra "^10.1.0" - html-minifier-terser "^6.1.0" - html-tags "^3.2.0" - html-webpack-plugin "^5.5.0" - import-fresh "^3.3.0" + fs-extra "^11.1.1" + html-minifier-terser "^7.2.0" + html-tags "^3.3.1" + html-webpack-plugin "^5.5.3" leven "^3.1.0" lodash "^4.17.21" - mini-css-extract-plugin "^2.6.1" - postcss "^8.4.14" - postcss-loader "^7.0.0" + mini-css-extract-plugin "^2.7.6" + p-map "^4.0.0" + postcss "^8.4.26" + postcss-loader "^7.3.3" prompts "^2.4.2" react-dev-utils "^12.0.1" react-helmet-async "^1.3.0" - react-loadable "npm:@docusaurus/react-loadable@5.5.2" + react-loadable "npm:@docusaurus/react-loadable@6.0.0" react-loadable-ssr-addon-v5-slorber "^1.0.1" - react-router "^5.3.3" + react-router "^5.3.4" react-router-config "^5.1.1" - react-router-dom "^5.3.3" + react-router-dom "^5.3.4" rtl-detect "^1.0.4" - semver "^7.3.7" - serve-handler "^6.1.3" + semver "^7.5.4" + serve-handler "^6.1.5" shelljs "^0.8.5" - terser-webpack-plugin "^5.3.3" - tslib "^2.4.0" - update-notifier "^5.1.0" + terser-webpack-plugin "^5.3.9" + tslib "^2.6.0" + update-notifier "^6.0.2" url-loader "^4.1.1" - wait-on "^6.0.1" - webpack "^5.73.0" - webpack-bundle-analyzer "^4.5.0" - webpack-dev-server "^4.9.3" - webpack-merge "^5.8.0" + webpack "^5.88.1" + webpack-bundle-analyzer "^4.9.0" + webpack-dev-server "^4.15.1" + webpack-merge "^5.9.0" webpackbar "^5.0.2" -"@docusaurus/cssnano-preset@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/cssnano-preset/-/cssnano-preset-2.3.1.tgz#e042487655e3e062417855e12edb3f6eee8f5ecb" - integrity sha512-7mIhAROES6CY1GmCjR4CZkUfjTL6B3u6rKHK0ChQl2d1IevYXq/k/vFgvOrJfcKxiObpMnE9+X6R2Wt1KqxC6w== +"@docusaurus/cssnano-preset@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/cssnano-preset/-/cssnano-preset-3.5.1.tgz#7b6911d3d991e02563ff88ffd04fbe1c5ec9246a" + integrity sha512-mvtWPLWePlm+4doepxMUT5ynsJQ3CgPtDdbaQh9wm3iAE/7OATBpSgLlfz5N+YtxI5bjIErjbkH8yzISP+S65g== dependencies: - cssnano-preset-advanced "^5.3.8" - postcss "^8.4.14" - postcss-sort-media-queries "^4.2.1" - tslib "^2.4.0" + cssnano-preset-advanced "^6.1.2" + postcss "^8.4.38" + postcss-sort-media-queries "^5.2.0" + tslib "^2.6.0" -"@docusaurus/logger@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/logger/-/logger-2.3.1.tgz#d76aefb452e3734b4e0e645efc6cbfc0aae52869" - integrity sha512-2lAV/olKKVr9qJhfHFCaqBIl8FgYjbUFwgUnX76+cULwQYss+42ZQ3grHGFvI0ocN2X55WcYe64ellQXz7suqg== +"@docusaurus/logger@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/logger/-/logger-3.5.1.tgz#85e56c58835c922fed25e32adf4e2a93c94f13b8" + integrity sha512-B36a88CEHCtxIylAV1HNuiiISpoKBqm0UxA6a/JwtHX++Dxb7LNDSGs8ELBlQsZN0OG2tX3tBsCWyaLPwYorkQ== dependencies: chalk "^4.1.2" - tslib "^2.4.0" - -"@docusaurus/mdx-loader@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/mdx-loader/-/mdx-loader-2.3.1.tgz#7ec6acee5eff0a280e1b399ea4dd690b15a793f7" - integrity sha512-Gzga7OsxQRpt3392K9lv/bW4jGppdLFJh3luKRknCKSAaZrmVkOQv2gvCn8LAOSZ3uRg5No7AgYs/vpL8K94lA== - dependencies: - "@babel/parser" "^7.18.8" - "@babel/traverse" "^7.18.8" - "@docusaurus/logger" "2.3.1" - "@docusaurus/utils" "2.3.1" - "@mdx-js/mdx" "^1.6.22" + tslib "^2.6.0" + +"@docusaurus/mdx-loader@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/mdx-loader/-/mdx-loader-3.5.1.tgz#6c04ed40387fe37a74f2db7743e189d52276a61c" + integrity sha512-D6Ea2dt32xhoqH+1EuHLGDVSX2HLFiR4QpI0GTU46qOu2hb2ChpQENIUZ2inOsdGFunNa0fCnDG3qn7Kdbzq1A== + dependencies: + "@docusaurus/logger" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + "@mdx-js/mdx" "^3.0.0" + "@slorber/remark-comment" "^1.0.0" escape-html "^1.0.3" + estree-util-value-to-estree "^3.0.1" file-loader "^6.2.0" - fs-extra "^10.1.0" - image-size "^1.0.1" - mdast-util-to-string "^2.0.0" - remark-emoji "^2.2.0" + fs-extra "^11.1.1" + image-size "^1.0.2" + mdast-util-mdx "^3.0.0" + mdast-util-to-string "^4.0.0" + rehype-raw "^7.0.0" + remark-directive "^3.0.0" + remark-emoji "^4.0.0" + remark-frontmatter "^5.0.0" + remark-gfm "^4.0.0" stringify-object "^3.3.0" - tslib "^2.4.0" - unified "^9.2.2" - unist-util-visit "^2.0.3" + tslib "^2.6.0" + unified "^11.0.3" + unist-util-visit "^5.0.0" url-loader "^4.1.1" - webpack "^5.73.0" + vfile "^6.0.1" + webpack "^5.88.1" -"@docusaurus/module-type-aliases@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/module-type-aliases/-/module-type-aliases-2.3.1.tgz#986186200818fed999be2e18d6c698eaf4683a33" - integrity sha512-6KkxfAVOJqIUynTRb/tphYCl+co3cP0PlHiMDbi+SzmYxMdgIrwYqH9yAnGSDoN6Jk2ZE/JY/Azs/8LPgKP48A== +"@docusaurus/module-type-aliases@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/module-type-aliases/-/module-type-aliases-3.5.1.tgz#3deeab0112cb2327614ce6653ae9ebbe6459e88b" + integrity sha512-SKKdA5RnvZr3pvFXkxtfsBVNgflRGa/bN1HbNi+1s0HNVYPuhB9DFC/CrKe2OoOfUXx7F7k2gg0Jg9gJYDy4rA== dependencies: - "@docusaurus/react-loadable" "5.5.2" - "@docusaurus/types" "2.3.1" + "@docusaurus/types" "3.5.1" "@types/history" "^4.7.11" "@types/react" "*" "@types/react-router-config" "*" "@types/react-router-dom" "*" react-helmet-async "*" - react-loadable "npm:@docusaurus/react-loadable@5.5.2" - -"@docusaurus/plugin-content-blog@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.3.1.tgz#236b8ee4f20f7047aa9c285ae77ae36683ad48a3" - integrity sha512-f5LjqX+9WkiLyGiQ41x/KGSJ/9bOjSD8lsVhPvYeUYHCtYpuiDKfhZE07O4EqpHkBx4NQdtQDbp+aptgHSTuiw== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/logger" "2.3.1" - "@docusaurus/mdx-loader" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils" "2.3.1" - "@docusaurus/utils-common" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - cheerio "^1.0.0-rc.12" + react-loadable "npm:@docusaurus/react-loadable@6.0.0" + +"@docusaurus/plugin-client-redirects@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.5.1.tgz#ca7896c7dfd5746ed60eb12dae65fa73fe2aca21" + integrity sha512-0At2RdS+7gDA25IMQROp4CcKx526jfER7bsna0EdWtPkC+rimSwxcxEHy0A+7xkBuUPh4SZNNuPVJAnYnvggrA== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/logger" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + eta "^2.2.0" + fs-extra "^11.1.1" + lodash "^4.17.21" + tslib "^2.6.0" + +"@docusaurus/plugin-content-blog@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.5.1.tgz#9d8e70c81a4ef6471e65bd4a17647889e275f555" + integrity sha512-aPmrMV5cDa2QUZ+kPVJID5O6r+ZuLFtHEyneVl9AgryL/9ECudhtpTUdmdnmapnWfUzSSgqYRZ1JtydGLheSzw== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/logger" "3.5.1" + "@docusaurus/mdx-loader" "3.5.1" + "@docusaurus/theme-common" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + cheerio "1.0.0-rc.12" feed "^4.2.2" - fs-extra "^10.1.0" + fs-extra "^11.1.1" lodash "^4.17.21" reading-time "^1.5.0" - tslib "^2.4.0" - unist-util-visit "^2.0.3" + srcset "^4.0.0" + tslib "^2.6.0" + unist-util-visit "^5.0.0" utility-types "^3.10.0" - webpack "^5.73.0" - -"@docusaurus/plugin-content-docs@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.3.1.tgz#feae1555479558a55182f22f8a07acc5e0d7444d" - integrity sha512-DxztTOBEruv7qFxqUtbsqXeNcHqcVEIEe+NQoI1oi2DBmKBhW/o0MIal8lt+9gvmpx3oYtlwmLOOGepxZgJGkw== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/logger" "2.3.1" - "@docusaurus/mdx-loader" "2.3.1" - "@docusaurus/module-type-aliases" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - "@types/react-router-config" "^5.0.6" + webpack "^5.88.1" + +"@docusaurus/plugin-content-docs@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.5.1.tgz#91d79dc90b1f4b5dc792da8297b5b2b5dea60cc6" + integrity sha512-DX+I3eVyXak9KqYXg8dgptomqz/O4twjydpLJT8ZSe9lsZ0Pa1ZNPwmftWYn160O3o6GGeUYzr13Y1Got3iXRQ== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/logger" "3.5.1" + "@docusaurus/mdx-loader" "3.5.1" + "@docusaurus/module-type-aliases" "3.5.1" + "@docusaurus/theme-common" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + "@types/react-router-config" "^5.0.7" combine-promises "^1.1.0" - fs-extra "^10.1.0" - import-fresh "^3.3.0" + fs-extra "^11.1.1" js-yaml "^4.1.0" lodash "^4.17.21" - tslib "^2.4.0" + tslib "^2.6.0" utility-types "^3.10.0" - webpack "^5.73.0" - -"@docusaurus/plugin-content-pages@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.3.1.tgz#f534a37862be5b3f2ba5b150458d7527646b6f39" - integrity sha512-E80UL6hvKm5VVw8Ka8YaVDtO6kWWDVUK4fffGvkpQ/AJQDOg99LwOXKujPoICC22nUFTsZ2Hp70XvpezCsFQaA== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/mdx-loader" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - fs-extra "^10.1.0" - tslib "^2.4.0" - webpack "^5.73.0" - -"@docusaurus/plugin-debug@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-debug/-/plugin-debug-2.3.1.tgz#26fef904713e148f6dee44957506280f8b7853bb" - integrity sha512-Ujpml1Ppg4geB/2hyu2diWnO49az9U2bxM9Shen7b6qVcyFisNJTkVG2ocvLC7wM1efTJcUhBO6zAku2vKJGMw== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils" "2.3.1" - fs-extra "^10.1.0" - react-json-view "^1.21.3" - tslib "^2.4.0" - -"@docusaurus/plugin-google-analytics@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.3.1.tgz#e2e7db4cf6a7063e8ba5e128d4e413f4d6a0c862" - integrity sha512-OHip0GQxKOFU8n7gkt3TM4HOYTXPCFDjqKbMClDD3KaDnyTuMp/Zvd9HSr770lLEscgPWIvzhJByRAClqsUWiQ== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - tslib "^2.4.0" - -"@docusaurus/plugin-google-gtag@2.3.1", "@docusaurus/plugin-google-gtag@^2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.3.1.tgz#b8da54a60c0a50aca609c3643faef78cb4f247a0" - integrity sha512-uXtDhfu4+Hm+oqWUySr3DNI5cWC/rmP6XJyAk83Heor3dFjZqDwCbkX8yWPywkRiWev3Dk/rVF8lEn0vIGVocA== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - tslib "^2.4.0" - -"@docusaurus/plugin-google-tag-manager@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.3.1.tgz#f19bc01cc784fa4734187c5bc637f0574857e15d" - integrity sha512-Ww2BPEYSqg8q8tJdLYPFFM3FMDBCVhEM4UUqKzJaiRMx3NEoly3qqDRAoRDGdIhlC//Rf0iJV9cWAoq2m6k3sw== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - tslib "^2.4.0" - -"@docusaurus/plugin-sitemap@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.3.1.tgz#f526ab517ca63b7a3460d585876f5952cb908aa0" - integrity sha512-8Yxile/v6QGYV9vgFiYL+8d2N4z4Er3pSHsrD08c5XI8bUXxTppMwjarDUTH/TRTfgAWotRbhJ6WZLyajLpozA== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/logger" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils" "2.3.1" - "@docusaurus/utils-common" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - fs-extra "^10.1.0" + webpack "^5.88.1" + +"@docusaurus/plugin-content-pages@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.5.1.tgz#906ca4cba3ece2422aedf562a12ad44150dabdd7" + integrity sha512-V2PDVrO2vHYJ7uhrEHpfzg3TTuwfrgNC0pGhM5gXaMfCbdhKm7iwV0huGLcyIX5Peyh7EMP2e8GFccUzWFMYOg== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/mdx-loader" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + fs-extra "^11.1.1" + tslib "^2.6.0" + webpack "^5.88.1" + +"@docusaurus/plugin-debug@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-debug/-/plugin-debug-3.5.1.tgz#6bc6679130b6a3379f398cb793cea9078d32fb80" + integrity sha512-teFZamoECDiELwM1cx5OXd6dBpRtHarc7kWGL1iQozAkYcobZmqOWykBl4joMjSWUbJlx5v9/CVciykWbFNXjA== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils" "3.5.1" + fs-extra "^11.1.1" + react-json-view-lite "^1.2.0" + tslib "^2.6.0" + +"@docusaurus/plugin-google-analytics@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.5.1.tgz#e21a61b9c2deab61d77f2e15aaef79d996cf85b2" + integrity sha512-5FUiYZQWPXTPucMzaOOM25R7IwIPvMKbiB0SNVGtxVsGyFyo5i5fzrkBQl4mkZd7uqmslEPzwYbC28ZeFnrxjg== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + tslib "^2.6.0" + +"@docusaurus/plugin-google-gtag@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.5.1.tgz#81bc5075c99e3898d17684396379027b22116a7e" + integrity sha512-jxBtLBPMv9BJXPXrwJSs69qYcHP/evT1NkVza2yOai7wi5r3E1tVm0bAxdciWitpM0dgS/HDa30qXE7vA1NRDg== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + "@types/gtag.js" "^0.0.12" + tslib "^2.6.0" + +"@docusaurus/plugin-google-tag-manager@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.5.1.tgz#1f9d0bff161399777e45edafc1c59d9243508c30" + integrity sha512-W5WsKoRmb3lDmg2IBfmKsZDlQAkEx/dXuwr4bj7sSQdM8qd829Rsc4Gp5RddUrQdUz/W3Iocn7LayRM5aacJlA== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + tslib "^2.6.0" + +"@docusaurus/plugin-sitemap@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.5.1.tgz#1b0c579c3ffe2a433768afb9df4aed367813ebeb" + integrity sha512-VXMGJM6uy4jx6HUsFs+kn8MujWGjN7S7p7PYUYSf1bmcFNlf+Qg5vDZtwBElHa2hapeH2AIj2b3QmTgmWeyOHw== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/logger" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + fs-extra "^11.1.1" sitemap "^7.1.1" - tslib "^2.4.0" - -"@docusaurus/preset-classic@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/preset-classic/-/preset-classic-2.3.1.tgz#f0193f06093eb55cafef66bd1ad9e0d33198bf95" - integrity sha512-OQ5W0AHyfdUk0IldwJ3BlnZ1EqoJuu2L2BMhqLbqwNWdkmzmSUvlFLH1Pe7CZSQgB2YUUC/DnmjbPKk/qQD0lQ== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/plugin-content-blog" "2.3.1" - "@docusaurus/plugin-content-docs" "2.3.1" - "@docusaurus/plugin-content-pages" "2.3.1" - "@docusaurus/plugin-debug" "2.3.1" - "@docusaurus/plugin-google-analytics" "2.3.1" - "@docusaurus/plugin-google-gtag" "2.3.1" - "@docusaurus/plugin-google-tag-manager" "2.3.1" - "@docusaurus/plugin-sitemap" "2.3.1" - "@docusaurus/theme-classic" "2.3.1" - "@docusaurus/theme-common" "2.3.1" - "@docusaurus/theme-search-algolia" "2.3.1" - "@docusaurus/types" "2.3.1" - -"@docusaurus/react-loadable@5.5.2", "react-loadable@npm:@docusaurus/react-loadable@5.5.2": - version "5.5.2" - resolved "https://registry.yarnpkg.com/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz#81aae0db81ecafbdaee3651f12804580868fa6ce" - integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ== - dependencies: - "@types/react" "*" - prop-types "^15.6.2" - -"@docusaurus/theme-classic@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/theme-classic/-/theme-classic-2.3.1.tgz#8e6e194236e702c0d4e8d7b7cbb6886ae456e598" - integrity sha512-SelSIDvyttb7ZYHj8vEUhqykhAqfOPKk+uP0z85jH72IMC58e7O8DIlcAeBv+CWsLbNIl9/Hcg71X0jazuxJug== - dependencies: - "@docusaurus/core" "2.3.1" - "@docusaurus/mdx-loader" "2.3.1" - "@docusaurus/module-type-aliases" "2.3.1" - "@docusaurus/plugin-content-blog" "2.3.1" - "@docusaurus/plugin-content-docs" "2.3.1" - "@docusaurus/plugin-content-pages" "2.3.1" - "@docusaurus/theme-common" "2.3.1" - "@docusaurus/theme-translations" "2.3.1" - "@docusaurus/types" "2.3.1" - "@docusaurus/utils" "2.3.1" - "@docusaurus/utils-common" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - "@mdx-js/react" "^1.6.22" - clsx "^1.2.1" - copy-text-to-clipboard "^3.0.1" - infima "0.2.0-alpha.42" + tslib "^2.6.0" + +"@docusaurus/preset-classic@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/preset-classic/-/preset-classic-3.5.1.tgz#d7bcf5545a56fdeff5363cf81b30825ea39f4282" + integrity sha512-afDMZoNYxdloJ7qJJbd3Lmv9uYXKKsEAOtvnvu2945kqe1LUGIIwOo1nMAKgB9y21E5FEvWKnla0MvkMraumZA== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/plugin-content-blog" "3.5.1" + "@docusaurus/plugin-content-docs" "3.5.1" + "@docusaurus/plugin-content-pages" "3.5.1" + "@docusaurus/plugin-debug" "3.5.1" + "@docusaurus/plugin-google-analytics" "3.5.1" + "@docusaurus/plugin-google-gtag" "3.5.1" + "@docusaurus/plugin-google-tag-manager" "3.5.1" + "@docusaurus/plugin-sitemap" "3.5.1" + "@docusaurus/theme-classic" "3.5.1" + "@docusaurus/theme-common" "3.5.1" + "@docusaurus/theme-search-algolia" "3.5.1" + "@docusaurus/types" "3.5.1" + +"@docusaurus/theme-classic@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-classic/-/theme-classic-3.5.1.tgz#a957270a88e64078fefc39e85e07128521700ef3" + integrity sha512-k8rLMwHuTc3SqYekc20s1uZHjabt9yi6mt1RUjbkwmjsJlAB6zrtYvsB+ZxrhY5yeUD8DZm3h0qVvKbClHVCCA== + dependencies: + "@docusaurus/core" "3.5.1" + "@docusaurus/mdx-loader" "3.5.1" + "@docusaurus/module-type-aliases" "3.5.1" + "@docusaurus/plugin-content-blog" "3.5.1" + "@docusaurus/plugin-content-docs" "3.5.1" + "@docusaurus/plugin-content-pages" "3.5.1" + "@docusaurus/theme-common" "3.5.1" + "@docusaurus/theme-translations" "3.5.1" + "@docusaurus/types" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + "@mdx-js/react" "^3.0.0" + clsx "^2.0.0" + copy-text-to-clipboard "^3.2.0" + infima "0.2.0-alpha.44" lodash "^4.17.21" nprogress "^0.2.0" - postcss "^8.4.14" - prism-react-renderer "^1.3.5" - prismjs "^1.28.0" - react-router-dom "^5.3.3" - rtlcss "^3.5.0" - tslib "^2.4.0" + postcss "^8.4.26" + prism-react-renderer "^2.3.0" + prismjs "^1.29.0" + react-router-dom "^5.3.4" + rtlcss "^4.1.0" + tslib "^2.6.0" utility-types "^3.10.0" -"@docusaurus/theme-common@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/theme-common/-/theme-common-2.3.1.tgz#82f52d80226efef8c4418c4eacfc5051aa215f7f" - integrity sha512-RYmYl2OR2biO+yhmW1aS5FyEvnrItPINa+0U2dMxcHpah8reSCjQ9eJGRmAgkZFchV1+aIQzXOI1K7LCW38O0g== - dependencies: - "@docusaurus/mdx-loader" "2.3.1" - "@docusaurus/module-type-aliases" "2.3.1" - "@docusaurus/plugin-content-blog" "2.3.1" - "@docusaurus/plugin-content-docs" "2.3.1" - "@docusaurus/plugin-content-pages" "2.3.1" - "@docusaurus/utils" "2.3.1" +"@docusaurus/theme-common@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-common/-/theme-common-3.5.1.tgz#579fa956fde8f57bf8d88a5405878683cb27447e" + integrity sha512-r34YDzSjggX+B+8W+mG2dVh1ps4JJRCiyq8E1LnZIKLU6F89I2KpAZpPQ2/njKsKhBRLtQ1x92HVkD0FZ3xjrg== + dependencies: + "@docusaurus/mdx-loader" "3.5.1" + "@docusaurus/module-type-aliases" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" "@types/history" "^4.7.11" "@types/react" "*" "@types/react-router-config" "*" - clsx "^1.2.1" + clsx "^2.0.0" parse-numeric-range "^1.3.0" - prism-react-renderer "^1.3.5" - tslib "^2.4.0" - use-sync-external-store "^1.2.0" + prism-react-renderer "^2.3.0" + tslib "^2.6.0" utility-types "^3.10.0" -"@docusaurus/theme-search-algolia@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.3.1.tgz#d587b40913119e9287d14670e277b933d8f453f0" - integrity sha512-JdHaRqRuH1X++g5fEMLnq7OtULSGQdrs9AbhcWRQ428ZB8/HOiaN6mj3hzHvcD3DFgu7koIVtWPQnvnN7iwzHA== - dependencies: - "@docsearch/react" "^3.1.1" - "@docusaurus/core" "2.3.1" - "@docusaurus/logger" "2.3.1" - "@docusaurus/plugin-content-docs" "2.3.1" - "@docusaurus/theme-common" "2.3.1" - "@docusaurus/theme-translations" "2.3.1" - "@docusaurus/utils" "2.3.1" - "@docusaurus/utils-validation" "2.3.1" - algoliasearch "^4.13.1" - algoliasearch-helper "^3.10.0" - clsx "^1.2.1" - eta "^2.0.0" - fs-extra "^10.1.0" +"@docusaurus/theme-search-algolia@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.5.1.tgz#70917ef7ee2ddf65814df1b232666e5e0806b0c4" + integrity sha512-IcUbgh9YcedANhpa0Q3+67WUKY8G7YkN/pZxVBEFjq3d2bniRKktPv41Nh/+AtGLSNJIcspZwEAs/r/mKSZGug== + dependencies: + "@docsearch/react" "^3.5.2" + "@docusaurus/core" "3.5.1" + "@docusaurus/logger" "3.5.1" + "@docusaurus/plugin-content-docs" "3.5.1" + "@docusaurus/theme-common" "3.5.1" + "@docusaurus/theme-translations" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-validation" "3.5.1" + algoliasearch "^4.18.0" + algoliasearch-helper "^3.13.3" + clsx "^2.0.0" + eta "^2.2.0" + fs-extra "^11.1.1" lodash "^4.17.21" - tslib "^2.4.0" + tslib "^2.6.0" utility-types "^3.10.0" -"@docusaurus/theme-translations@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/theme-translations/-/theme-translations-2.3.1.tgz#b2b1ecc00a737881b5bfabc19f90b20f0fe02bb3" - integrity sha512-BsBZzAewJabVhoGG1Ij2u4pMS3MPW6gZ6sS4pc+Y7czevRpzxoFNJXRtQDVGe7mOpv/MmRmqg4owDK+lcOTCVQ== +"@docusaurus/theme-translations@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-translations/-/theme-translations-3.5.1.tgz#c400204a82b05aa922257143c7fbcb7fb0f38b2a" + integrity sha512-fyzQOWrTm0+ZpTlS0/xHsIK4f+LA4qVFrq8rCzIHjxZRip/noYUOwF64lA95vcuw6qnOVBoNE/LyfbBvExnpcw== dependencies: - fs-extra "^10.1.0" - tslib "^2.4.0" + fs-extra "^11.1.1" + tslib "^2.6.0" -"@docusaurus/types@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/types/-/types-2.3.1.tgz#785ade2e0f4e35e1eb7fb0d04c27d11c3991a2e8" - integrity sha512-PREbIRhTaNNY042qmfSE372Jb7djZt+oVTZkoqHJ8eff8vOIc2zqqDqBVc5BhOfpZGPTrE078yy/torUEZy08A== +"@docusaurus/types@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/types/-/types-3.5.1.tgz#f9e86fbf8f6e090d635a3016cfa25bf9d5fb6fcf" + integrity sha512-IXTGQBoXAGFliGF5Cn3F+gSGskgzAL8+4y6dDY1gcePA0r8WngHj8oovS1YPv+b9JOff32nv8YGGZITHOMXJsA== dependencies: + "@mdx-js/mdx" "^3.0.0" "@types/history" "^4.7.11" "@types/react" "*" commander "^5.1.0" - joi "^17.6.0" + joi "^17.9.2" react-helmet-async "^1.3.0" utility-types "^3.10.0" - webpack "^5.73.0" - webpack-merge "^5.8.0" - -"@docusaurus/utils-common@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/utils-common/-/utils-common-2.3.1.tgz#1abe66846eb641547e4964d44f3011938e58e50b" - integrity sha512-pVlRpXkdNcxmKNxAaB1ya2hfCEvVsLDp2joeM6K6uv55Oc5nVIqgyYSgSNKZyMdw66NnvMfsu0RBylcwZQKo9A== - dependencies: - tslib "^2.4.0" - -"@docusaurus/utils-validation@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/utils-validation/-/utils-validation-2.3.1.tgz#b65c718ba9b84b7a891bccf5ac6d19b57ee7d887" - integrity sha512-7n0208IG3k1HVTByMHlZoIDjjOFC8sbViHVXJx0r3Q+3Ezrx+VQ1RZ/zjNn6lT+QBCRCXlnlaoJ8ug4HIVgQ3w== - dependencies: - "@docusaurus/logger" "2.3.1" - "@docusaurus/utils" "2.3.1" - joi "^17.6.0" + webpack "^5.88.1" + webpack-merge "^5.9.0" + +"@docusaurus/utils-common@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/utils-common/-/utils-common-3.5.1.tgz#970989453033478c366b48af45e0b1be5f91d336" + integrity sha512-374n6/IW34gHR65JMMN33XLFogTCsrGVPQDVbv2vG96EYHvYzE/plfcGV7xSbXB8yS1YHsxVfvNgVUGi973bfQ== + dependencies: + tslib "^2.6.0" + +"@docusaurus/utils-validation@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/utils-validation/-/utils-validation-3.5.1.tgz#369f38e599d105e2ccb5017db1e233b847405788" + integrity sha512-LZdQnqVVLStgTCn0rfvf4wuOQkjPbGtLXJIQ449em1wJeSFO7lfmn5VGUNLt+xKHvIPfN272EHG8BuvijCI0+A== + dependencies: + "@docusaurus/logger" "3.5.1" + "@docusaurus/utils" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + fs-extra "^11.2.0" + joi "^17.9.2" js-yaml "^4.1.0" - tslib "^2.4.0" + lodash "^4.17.21" + tslib "^2.6.0" -"@docusaurus/utils@2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@docusaurus/utils/-/utils-2.3.1.tgz#24b9cae3a23b1e6dc88f95c45722c7e82727b032" - integrity sha512-9WcQROCV0MmrpOQDXDGhtGMd52DHpSFbKLfkyaYumzbTstrbA5pPOtiGtxK1nqUHkiIv8UwexS54p0Vod2I1lg== +"@docusaurus/utils@3.5.1": + version "3.5.1" + resolved "https://registry.yarnpkg.com/@docusaurus/utils/-/utils-3.5.1.tgz#680067a3a38d46b6fed80b95555a58bcf19cb5ef" + integrity sha512-/4QAvXyiQviz2FQ4ct5l1ckvDihIdjS8FsOExC0T+Y1UD38jgPbjTwRJXsDaRsDRCCrDAtXvlonxXw2kixcnXw== dependencies: - "@docusaurus/logger" "2.3.1" - "@svgr/webpack" "^6.2.1" + "@docusaurus/logger" "3.5.1" + "@docusaurus/utils-common" "3.5.1" + "@svgr/webpack" "^8.1.0" escape-string-regexp "^4.0.0" file-loader "^6.2.0" - fs-extra "^10.1.0" - github-slugger "^1.4.0" + fs-extra "^11.1.1" + github-slugger "^1.5.0" globby "^11.1.0" gray-matter "^4.0.3" + jiti "^1.20.0" js-yaml "^4.1.0" lodash "^4.17.21" micromatch "^4.0.5" + prompts "^2.4.2" resolve-pathname "^3.0.0" shelljs "^0.8.5" - tslib "^2.4.0" + tslib "^2.6.0" url-loader "^4.1.1" - webpack "^5.73.0" + utility-types "^3.10.0" + webpack "^5.88.1" -"@hapi/hoek@^9.0.0": +"@hapi/hoek@^9.0.0", "@hapi/hoek@^9.3.0": version "9.3.0" resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.3.0.tgz#8368869dcb735be2e7f5cb7647de78e167a251fb" integrity sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ== -"@hapi/topo@^5.0.0": +"@hapi/topo@^5.1.0": version "5.1.0" resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-5.1.0.tgz#dc448e332c6c6e37a4dc02fd84ba8d44b9afb012" integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg== dependencies: "@hapi/hoek" "^9.0.0" -"@jest/schemas@^29.0.0": - version "29.0.0" - resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.0.0.tgz#5f47f5994dd4ef067fb7b4188ceac45f77fe952a" - integrity sha512-3Ab5HgYIIAnS0HjqJHQYZS+zXc4tUmTmBH3z83ajI6afXp8X3ZtdLX+nXx+I7LNkJD7uN9LAVhgnjDgZa2z0kA== +"@jest/schemas@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.3.tgz#430b5ce8a4e0044a7e3819663305a7b3091c8e03" + integrity sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA== dependencies: - "@sinclair/typebox" "^0.24.1" + "@sinclair/typebox" "^0.27.8" -"@jest/types@^29.3.1": - version "29.3.1" - resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.3.1.tgz#7c5a80777cb13e703aeec6788d044150341147e3" - integrity sha512-d0S0jmmTpjnhCmNpApgX3jrUZgZ22ivKJRvL2lli5hpCRoNnp1f85r2/wpKfXuYu8E7Jjh1hGfhPyup1NM5AmA== +"@jest/types@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.6.3.tgz#1131f8cf634e7e84c5e77bab12f052af585fba59" + integrity sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw== dependencies: - "@jest/schemas" "^29.0.0" + "@jest/schemas" "^29.6.3" "@types/istanbul-lib-coverage" "^2.0.0" "@types/istanbul-reports" "^3.0.0" "@types/node" "*" "@types/yargs" "^17.0.8" chalk "^4.0.0" -"@jridgewell/gen-mapping@^0.1.0": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz#e5d2e450306a9491e3bd77e323e38d7aff315996" - integrity sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w== +"@jridgewell/gen-mapping@^0.3.5": + version "0.3.5" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz#dcce6aff74bdf6dad1a95802b69b04a2fcb1fb36" + integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg== dependencies: - "@jridgewell/set-array" "^1.0.0" + "@jridgewell/set-array" "^1.2.1" "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.24" -"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2": - version "0.3.2" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9" - integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A== - dependencies: - "@jridgewell/set-array" "^1.0.1" - "@jridgewell/sourcemap-codec" "^1.4.10" - "@jridgewell/trace-mapping" "^0.3.9" - -"@jridgewell/resolve-uri@3.1.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78" - integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w== +"@jridgewell/resolve-uri@^3.1.0": + version "3.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== -"@jridgewell/set-array@^1.0.0", "@jridgewell/set-array@^1.0.1": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" - integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== +"@jridgewell/set-array@^1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" + integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== -"@jridgewell/source-map@^0.3.2": - version "0.3.2" - resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb" - integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw== +"@jridgewell/source-map@^0.3.3": + version "0.3.6" + resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.6.tgz#9d71ca886e32502eb9362c9a74a46787c36df81a" + integrity sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ== dependencies: - "@jridgewell/gen-mapping" "^0.3.0" - "@jridgewell/trace-mapping" "^0.3.9" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" -"@jridgewell/sourcemap-codec@1.4.14", "@jridgewell/sourcemap-codec@^1.4.10": - version "1.4.14" - resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24" - integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw== +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" + integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== -"@jridgewell/trace-mapping@^0.3.14", "@jridgewell/trace-mapping@^0.3.9": - version "0.3.17" - resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz#793041277af9073b0951a7fe0f0d8c4c98c36985" - integrity sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g== +"@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.20", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": + version "0.3.25" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" + integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== dependencies: - "@jridgewell/resolve-uri" "3.1.0" - "@jridgewell/sourcemap-codec" "1.4.14" + "@jridgewell/resolve-uri" "^3.1.0" + "@jridgewell/sourcemap-codec" "^1.4.14" "@leichtgewicht/ip-codec@^2.0.1": - version "2.0.4" - resolved "https://registry.yarnpkg.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz#b2ac626d6cb9c8718ab459166d4bb405b8ffa78b" - integrity sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A== - -"@mdx-js/mdx@^1.6.22": - version "1.6.22" - resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-1.6.22.tgz#8a723157bf90e78f17dc0f27995398e6c731f1ba" - integrity sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA== - dependencies: - "@babel/core" "7.12.9" - "@babel/plugin-syntax-jsx" "7.12.1" - "@babel/plugin-syntax-object-rest-spread" "7.8.3" - "@mdx-js/util" "1.6.22" - babel-plugin-apply-mdx-type-prop "1.6.22" - babel-plugin-extract-import-names "1.6.22" - camelcase-css "2.0.1" - detab "2.0.4" - hast-util-raw "6.0.1" - lodash.uniq "4.5.0" - mdast-util-to-hast "10.0.1" - remark-footnotes "2.0.0" - remark-mdx "1.6.22" - remark-parse "8.0.3" - remark-squeeze-paragraphs "4.0.0" - style-to-object "0.3.0" - unified "9.2.0" - unist-builder "2.0.3" - unist-util-visit "2.0.3" + version "2.0.5" + resolved "https://registry.yarnpkg.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz#4fc56c15c580b9adb7dc3c333a134e540b44bfb1" + integrity sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw== "@mdx-js/mdx@^2.1.1": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-2.2.1.tgz#611af1b68135c94eb467eb07e006fa1d8eebe029" - integrity sha512-hZ3ex7exYLJn6FfReq8yTvA6TE53uW9UHJQM9IlSauOuS55J9y8RtA7W+dzp6Yrzr00/U1sd7q+Wf61q6SfiTQ== + version "2.3.0" + resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-2.3.0.tgz#d65d8c3c28f3f46bb0e7cb3bf7613b39980671a9" + integrity sha512-jLuwRlz8DQfQNiUCJR50Y09CGPq3fLtmtUQfVrj79E0JWu3dvsVcxVIcfhR5h0iXu+/z++zDrYeiJqifRynJkA== dependencies: "@types/estree-jsx" "^1.0.0" "@types/mdx" "^2.0.0" @@ -1745,15 +1628,47 @@ unist-util-visit "^4.0.0" vfile "^5.0.0" +"@mdx-js/mdx@^3.0.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-3.1.0.tgz#10235cab8ad7d356c262e8c21c68df5850a97dc3" + integrity sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw== + dependencies: + "@types/estree" "^1.0.0" + "@types/estree-jsx" "^1.0.0" + "@types/hast" "^3.0.0" + "@types/mdx" "^2.0.0" + collapse-white-space "^2.0.0" + devlop "^1.0.0" + estree-util-is-identifier-name "^3.0.0" + estree-util-scope "^1.0.0" + estree-walker "^3.0.0" + hast-util-to-jsx-runtime "^2.0.0" + markdown-extensions "^2.0.0" + recma-build-jsx "^1.0.0" + recma-jsx "^1.0.0" + recma-stringify "^1.0.0" + rehype-recma "^1.0.0" + remark-mdx "^3.0.0" + remark-parse "^11.0.0" + remark-rehype "^11.0.0" + source-map "^0.7.0" + unified "^11.0.0" + unist-util-position-from-estree "^2.0.0" + unist-util-stringify-position "^4.0.0" + unist-util-visit "^5.0.0" + vfile "^6.0.0" + "@mdx-js/react@^1.6.21", "@mdx-js/react@^1.6.22": version "1.6.22" resolved "https://registry.yarnpkg.com/@mdx-js/react/-/react-1.6.22.tgz#ae09b4744fddc74714ee9f9d6f17a66e77c43573" integrity sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg== -"@mdx-js/util@1.6.22": - version "1.6.22" - resolved "https://registry.yarnpkg.com/@mdx-js/util/-/util-1.6.22.tgz#219dfd89ae5b97a8801f015323ffa4b62f45718b" - integrity sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA== +"@mdx-js/react@^3.0.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@mdx-js/react/-/react-3.1.0.tgz#c4522e335b3897b9a845db1dbdd2f966ae8fb0ed" + integrity sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ== + dependencies: + "@types/mdx" "^2.0.0" "@nodelib/fs.scandir@2.1.5": version "2.1.5" @@ -1776,19 +1691,40 @@ "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" -"@polka/url@^1.0.0-next.20": - version "1.0.0-next.21" - resolved "https://registry.yarnpkg.com/@polka/url/-/url-1.0.0-next.21.tgz#5de5a2385a35309427f6011992b544514d559aa1" - integrity sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g== +"@pnpm/config.env-replace@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz#ab29da53df41e8948a00f2433f085f54de8b3a4c" + integrity sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w== -"@sideway/address@^4.1.3": - version "4.1.4" - resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.4.tgz#03dccebc6ea47fdc226f7d3d1ad512955d4783f0" - integrity sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw== +"@pnpm/network.ca-file@^1.0.1": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz#2ab05e09c1af0cdf2fcf5035bea1484e222f7983" + integrity sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA== + dependencies: + graceful-fs "4.2.10" + +"@pnpm/npm-conf@^2.1.0": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@pnpm/npm-conf/-/npm-conf-2.3.1.tgz#bb375a571a0bd63ab0a23bece33033c683e9b6b0" + integrity sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw== + dependencies: + "@pnpm/config.env-replace" "^1.1.0" + "@pnpm/network.ca-file" "^1.0.1" + config-chain "^1.1.11" + +"@polka/url@^1.0.0-next.24": + version "1.0.0-next.28" + resolved "https://registry.yarnpkg.com/@polka/url/-/url-1.0.0-next.28.tgz#d45e01c4a56f143ee69c54dd6b12eade9e270a73" + integrity sha512-8LduaNlMZGwdZ6qWrKlfa+2M4gahzFkprZiAt2TF8uS0qQgBizKXpXURqvTJ4WtmupWxaLqjRb2UCTe72mu+Aw== + +"@sideway/address@^4.1.5": + version "4.1.5" + resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.5.tgz#4bc149a0076623ced99ca8208ba780d65a99b9d5" + integrity sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q== dependencies: "@hapi/hoek" "^9.0.0" -"@sideway/formula@^3.0.0": +"@sideway/formula@^3.0.1": version "3.0.1" resolved "https://registry.yarnpkg.com/@sideway/formula/-/formula-3.0.1.tgz#80fcbcbaf7ce031e0ef2dd29b1bfc7c3f583611f" integrity sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg== @@ -1798,104 +1734,123 @@ resolved "https://registry.yarnpkg.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz#cff8ffadc372ad29fd3f78277aeb29e632cc70df" integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== -"@sinclair/typebox@^0.24.1": - version "0.24.51" - resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.24.51.tgz#645f33fe4e02defe26f2f5c0410e1c094eac7f5f" - integrity sha512-1P1OROm/rdubP5aFDSZQILU0vrLCJ4fvHt6EoqHEM+2D/G5MK3bIaymUKLit8Js9gbns5UyJnkP/TZROLw4tUA== +"@sinclair/typebox@^0.27.8": + version "0.27.8" + resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" + integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== -"@sindresorhus/is@^0.14.0": - version "0.14.0" - resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" - integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ== +"@sindresorhus/is@^4.6.0": + version "4.6.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-4.6.0.tgz#3c7c9c46e678feefe7a2e5bb609d3dbd665ffb3f" + integrity sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw== -"@slorber/static-site-generator-webpack-plugin@^4.0.7": - version "4.0.7" - resolved "https://registry.yarnpkg.com/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz#fc1678bddefab014e2145cbe25b3ce4e1cfc36f3" - integrity sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA== +"@sindresorhus/is@^5.2.0": + version "5.6.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-5.6.0.tgz#41dd6093d34652cddb5d5bdeee04eafc33826668" + integrity sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g== + +"@slorber/remark-comment@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@slorber/remark-comment/-/remark-comment-1.0.0.tgz#2a020b3f4579c89dec0361673206c28d67e08f5a" + integrity sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA== dependencies: - eval "^0.1.8" - p-map "^4.0.0" - webpack-sources "^3.2.2" + micromark-factory-space "^1.0.0" + micromark-util-character "^1.1.0" + micromark-util-symbol "^1.0.1" + +"@svgr/babel-plugin-add-jsx-attribute@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz#4001f5d5dd87fa13303e36ee106e3ff3a7eb8b22" + integrity sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g== "@svgr/babel-plugin-add-jsx-attribute@^5.4.0": version "5.4.0" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-5.4.0.tgz#81ef61947bb268eb9d50523446f9c638fb355906" integrity sha512-ZFf2gs/8/6B8PnSofI0inYXr2SDNTDScPXhN7k5EqD4aZ3gi6u+rbmZHVB8IM3wDyx8ntKACZbtXSm7oZGRqVg== -"@svgr/babel-plugin-add-jsx-attribute@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz#74a5d648bd0347bda99d82409d87b8ca80b9a1ba" - integrity sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ== - -"@svgr/babel-plugin-remove-jsx-attribute@*": - version "6.5.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-6.5.0.tgz#652bfd4ed0a0699843585cda96faeb09d6e1306e" - integrity sha512-8zYdkym7qNyfXpWvu4yq46k41pyNM9SOstoWhKlm+IfdCE1DdnRKeMUPsWIEO/DEkaWxJ8T9esNdG3QwQ93jBA== +"@svgr/babel-plugin-remove-jsx-attribute@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz#69177f7937233caca3a1afb051906698f2f59186" + integrity sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA== "@svgr/babel-plugin-remove-jsx-attribute@^5.4.0": version "5.4.0" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-5.4.0.tgz#6b2c770c95c874654fd5e1d5ef475b78a0a962ef" integrity sha512-yaS4o2PgUtwLFGTKbsiAy6D0o3ugcUhWK0Z45umJ66EPWunAz9fuFw2gJuje6wqQvQWOTJvIahUwndOXb7QCPg== -"@svgr/babel-plugin-remove-jsx-empty-expression@*": - version "6.5.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-6.5.0.tgz#4b78994ab7d39032c729903fc2dd5c0fa4565cb8" - integrity sha512-NFdxMq3xA42Kb1UbzCVxplUc0iqSyM9X8kopImvFnB+uSDdzIHOdbs1op8ofAvVRtbg4oZiyRl3fTYeKcOe9Iw== +"@svgr/babel-plugin-remove-jsx-empty-expression@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz#c2c48104cfd7dcd557f373b70a56e9e3bdae1d44" + integrity sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA== "@svgr/babel-plugin-remove-jsx-empty-expression@^5.0.1": version "5.0.1" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-5.0.1.tgz#25621a8915ed7ad70da6cea3d0a6dbc2ea933efd" integrity sha512-LA72+88A11ND/yFIMzyuLRSMJ+tRKeYKeQ+mR3DcAZ5I4h5CPWN9AHyUzJbWSYp/u2u0xhmgOe0+E41+GjEueA== +"@svgr/babel-plugin-replace-jsx-attribute-value@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz#8fbb6b2e91fa26ac5d4aa25c6b6e4f20f9c0ae27" + integrity sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ== + "@svgr/babel-plugin-replace-jsx-attribute-value@^5.0.1": version "5.0.1" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-5.0.1.tgz#0b221fc57f9fcd10e91fe219e2cd0dd03145a897" integrity sha512-PoiE6ZD2Eiy5mK+fjHqwGOS+IXX0wq/YDtNyIgOrc6ejFnxN4b13pRpiIPbtPwHEc+NT2KCjteAcq33/F1Y9KQ== -"@svgr/babel-plugin-replace-jsx-attribute-value@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz#fb9d22ea26d2bc5e0a44b763d4c46d5d3f596c60" - integrity sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg== +"@svgr/babel-plugin-svg-dynamic-title@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz#1d5ba1d281363fc0f2f29a60d6d936f9bbc657b0" + integrity sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og== "@svgr/babel-plugin-svg-dynamic-title@^5.4.0": version "5.4.0" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-5.4.0.tgz#139b546dd0c3186b6e5db4fefc26cb0baea729d7" integrity sha512-zSOZH8PdZOpuG1ZVx/cLVePB2ibo3WPpqo7gFIjLV9a0QsuQAzJiwwqmuEdTaW2pegyBE17Uu15mOgOcgabQZg== -"@svgr/babel-plugin-svg-dynamic-title@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz#01b2024a2b53ffaa5efceaa0bf3e1d5a4c520ce4" - integrity sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw== +"@svgr/babel-plugin-svg-em-dimensions@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz#35e08df300ea8b1d41cb8f62309c241b0369e501" + integrity sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g== "@svgr/babel-plugin-svg-em-dimensions@^5.4.0": version "5.4.0" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-5.4.0.tgz#6543f69526632a133ce5cabab965deeaea2234a0" integrity sha512-cPzDbDA5oT/sPXDCUYoVXEmm3VIoAWAPT6mSPTJNbQaBNUuEKVKyGH93oDY4e42PYHRW67N5alJx/eEol20abw== -"@svgr/babel-plugin-svg-em-dimensions@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz#dd3fa9f5b24eb4f93bcf121c3d40ff5facecb217" - integrity sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA== +"@svgr/babel-plugin-transform-react-native-svg@8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz#90a8b63998b688b284f255c6a5248abd5b28d754" + integrity sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q== "@svgr/babel-plugin-transform-react-native-svg@^5.4.0": version "5.4.0" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-5.4.0.tgz#00bf9a7a73f1cad3948cdab1f8dfb774750f8c80" integrity sha512-3eYP/SaopZ41GHwXma7Rmxcv9uRslRDTY1estspeB1w1ueZWd/tPlMfEOoccYpEMZU3jD4OU7YitnXcF5hLW2Q== -"@svgr/babel-plugin-transform-react-native-svg@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz#1d8e945a03df65b601551097d8f5e34351d3d305" - integrity sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg== +"@svgr/babel-plugin-transform-svg-component@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz#013b4bfca88779711f0ed2739f3f7efcefcf4f7e" + integrity sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw== "@svgr/babel-plugin-transform-svg-component@^5.5.0": version "5.5.0" resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-5.5.0.tgz#583a5e2a193e214da2f3afeb0b9e8d3250126b4a" integrity sha512-q4jSH1UUvbrsOtlo/tKcgSeiCHRSBdXoIoqX1pgcKK/aU3JD27wmMKwGtpB8qRYUYoyXvfGxUVKchLuR5pB3rQ== -"@svgr/babel-plugin-transform-svg-component@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz#48620b9e590e25ff95a80f811544218d27f8a250" - integrity sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ== +"@svgr/babel-preset@8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-8.1.0.tgz#0e87119aecdf1c424840b9d4565b7137cabf9ece" + integrity sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug== + dependencies: + "@svgr/babel-plugin-add-jsx-attribute" "8.0.0" + "@svgr/babel-plugin-remove-jsx-attribute" "8.0.0" + "@svgr/babel-plugin-remove-jsx-empty-expression" "8.0.0" + "@svgr/babel-plugin-replace-jsx-attribute-value" "8.0.0" + "@svgr/babel-plugin-svg-dynamic-title" "8.0.0" + "@svgr/babel-plugin-svg-em-dimensions" "8.0.0" + "@svgr/babel-plugin-transform-react-native-svg" "8.1.0" + "@svgr/babel-plugin-transform-svg-component" "8.0.0" "@svgr/babel-preset@^5.5.0": version "5.5.0" @@ -1911,19 +1866,16 @@ "@svgr/babel-plugin-transform-react-native-svg" "^5.4.0" "@svgr/babel-plugin-transform-svg-component" "^5.5.0" -"@svgr/babel-preset@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-6.5.1.tgz#b90de7979c8843c5c580c7e2ec71f024b49eb828" - integrity sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw== +"@svgr/core@8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/core/-/core-8.1.0.tgz#41146f9b40b1a10beaf5cc4f361a16a3c1885e88" + integrity sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA== dependencies: - "@svgr/babel-plugin-add-jsx-attribute" "^6.5.1" - "@svgr/babel-plugin-remove-jsx-attribute" "*" - "@svgr/babel-plugin-remove-jsx-empty-expression" "*" - "@svgr/babel-plugin-replace-jsx-attribute-value" "^6.5.1" - "@svgr/babel-plugin-svg-dynamic-title" "^6.5.1" - "@svgr/babel-plugin-svg-em-dimensions" "^6.5.1" - "@svgr/babel-plugin-transform-react-native-svg" "^6.5.1" - "@svgr/babel-plugin-transform-svg-component" "^6.5.1" + "@babel/core" "^7.21.3" + "@svgr/babel-preset" "8.1.0" + camelcase "^6.2.0" + cosmiconfig "^8.1.3" + snake-case "^3.0.4" "@svgr/core@^5.5.0": version "5.5.0" @@ -1934,16 +1886,13 @@ camelcase "^6.2.0" cosmiconfig "^7.0.0" -"@svgr/core@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/core/-/core-6.5.1.tgz#d3e8aa9dbe3fbd747f9ee4282c1c77a27410488a" - integrity sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw== +"@svgr/hast-util-to-babel-ast@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz#6952fd9ce0f470e1aded293b792a2705faf4ffd4" + integrity sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q== dependencies: - "@babel/core" "^7.19.6" - "@svgr/babel-preset" "^6.5.1" - "@svgr/plugin-jsx" "^6.5.1" - camelcase "^6.2.0" - cosmiconfig "^7.0.1" + "@babel/types" "^7.21.3" + entities "^4.4.0" "@svgr/hast-util-to-babel-ast@^5.5.0": version "5.5.0" @@ -1952,13 +1901,15 @@ dependencies: "@babel/types" "^7.12.6" -"@svgr/hast-util-to-babel-ast@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz#81800bd09b5bcdb968bf6ee7c863d2288fdb80d2" - integrity sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw== +"@svgr/plugin-jsx@8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz#96969f04a24b58b174ee4cd974c60475acbd6928" + integrity sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA== dependencies: - "@babel/types" "^7.20.0" - entities "^4.4.0" + "@babel/core" "^7.21.3" + "@svgr/babel-preset" "8.1.0" + "@svgr/hast-util-to-babel-ast" "8.0.0" + svg-parser "^2.0.4" "@svgr/plugin-jsx@^5.5.0": version "5.5.0" @@ -1970,15 +1921,14 @@ "@svgr/hast-util-to-babel-ast" "^5.5.0" svg-parser "^2.0.2" -"@svgr/plugin-jsx@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz#0e30d1878e771ca753c94e69581c7971542a7072" - integrity sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw== +"@svgr/plugin-svgo@8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz#b115b7b967b564f89ac58feae89b88c3decd0f00" + integrity sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA== dependencies: - "@babel/core" "^7.19.6" - "@svgr/babel-preset" "^6.5.1" - "@svgr/hast-util-to-babel-ast" "^6.5.1" - svg-parser "^2.0.4" + cosmiconfig "^8.1.3" + deepmerge "^4.3.1" + svgo "^3.0.2" "@svgr/plugin-svgo@^5.5.0": version "5.5.0" @@ -1989,15 +1939,6 @@ deepmerge "^4.2.2" svgo "^1.2.2" -"@svgr/plugin-svgo@^6.5.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz#0f91910e988fc0b842f88e0960c2862e022abe84" - integrity sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ== - dependencies: - cosmiconfig "^7.0.1" - deepmerge "^4.2.2" - svgo "^2.8.0" - "@svgr/webpack@^5.5.0": version "5.5.0" resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-5.5.0.tgz#aae858ee579f5fa8ce6c3166ef56c6a1b381b640" @@ -2012,26 +1953,26 @@ "@svgr/plugin-svgo" "^5.5.0" loader-utils "^2.0.0" -"@svgr/webpack@^6.2.1": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-6.5.1.tgz#ecf027814fc1cb2decc29dc92f39c3cf691e40e8" - integrity sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA== +"@svgr/webpack@^8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-8.1.0.tgz#16f1b5346f102f89fda6ec7338b96a701d8be0c2" + integrity sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA== dependencies: - "@babel/core" "^7.19.6" - "@babel/plugin-transform-react-constant-elements" "^7.18.12" - "@babel/preset-env" "^7.19.4" + "@babel/core" "^7.21.3" + "@babel/plugin-transform-react-constant-elements" "^7.21.3" + "@babel/preset-env" "^7.20.2" "@babel/preset-react" "^7.18.6" - "@babel/preset-typescript" "^7.18.6" - "@svgr/core" "^6.5.1" - "@svgr/plugin-jsx" "^6.5.1" - "@svgr/plugin-svgo" "^6.5.1" + "@babel/preset-typescript" "^7.21.0" + "@svgr/core" "8.1.0" + "@svgr/plugin-jsx" "8.1.0" + "@svgr/plugin-svgo" "8.1.0" -"@szmarczak/http-timer@^1.1.2": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-1.1.2.tgz#b1665e2c461a2cd92f4c1bbf50d5454de0d4b421" - integrity sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA== +"@szmarczak/http-timer@^5.0.1": + version "5.0.1" + resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-5.0.1.tgz#c7c1bf1141cdd4751b0399c8fc7b8b664cd5be3a" + integrity sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw== dependencies: - defer-to-connect "^1.0.1" + defer-to-connect "^2.0.1" "@trysound/sax@0.2.0": version "0.2.0" @@ -2046,105 +1987,134 @@ "@types/estree" "*" "@types/body-parser@*": - version "1.19.2" - resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.2.tgz#aea2059e28b7658639081347ac4fab3de166e6f0" - integrity sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g== + version "1.19.5" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.5.tgz#04ce9a3b677dc8bd681a17da1ab9835dc9d3ede4" + integrity sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg== dependencies: "@types/connect" "*" "@types/node" "*" "@types/bonjour@^3.5.9": - version "3.5.10" - resolved "https://registry.yarnpkg.com/@types/bonjour/-/bonjour-3.5.10.tgz#0f6aadfe00ea414edc86f5d106357cda9701e275" - integrity sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw== + version "3.5.13" + resolved "https://registry.yarnpkg.com/@types/bonjour/-/bonjour-3.5.13.tgz#adf90ce1a105e81dd1f9c61fdc5afda1bfb92956" + integrity sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ== dependencies: "@types/node" "*" "@types/buble@^0.20.0": - version "0.20.1" - resolved "https://registry.yarnpkg.com/@types/buble/-/buble-0.20.1.tgz#cba009801fd417b0d2eb8fa6824b537842e05803" - integrity sha512-itmN3lGSTvXg9IImY5j290H+n0B3PpZST6AgEfJJDXfaMx2cdJJZro3/Ay+bZZdIAa25Z5rnoo9rHiPCbANZoQ== + version "0.20.5" + resolved "https://registry.yarnpkg.com/@types/buble/-/buble-0.20.5.tgz#2c4bcda910c6c46e027273fa8cdb0d34588a22a3" + integrity sha512-CNpql2WPrZloamMweLkyM42nPsUVa10NDurkhTB5+tGu8SstDd568dothJi7tFSAsbqJK0rSb83W9ZwGt8My/A== dependencies: magic-string "^0.25.0" "@types/connect-history-api-fallback@^1.3.5": - version "1.3.5" - resolved "https://registry.yarnpkg.com/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz#d1f7a8a09d0ed5a57aee5ae9c18ab9b803205dae" - integrity sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw== + version "1.5.4" + resolved "https://registry.yarnpkg.com/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz#7de71645a103056b48ac3ce07b3520b819c1d5b3" + integrity sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw== dependencies: "@types/express-serve-static-core" "*" "@types/node" "*" "@types/connect@*": - version "3.4.35" - resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.35.tgz#5fcf6ae445e4021d1fc2219a4873cc73a3bb2ad1" - integrity sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ== + version "3.4.38" + resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.38.tgz#5ba7f3bc4fbbdeaff8dded952e5ff2cc53f8d858" + integrity sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug== dependencies: "@types/node" "*" -"@types/debug@^4.0.0": - version "4.1.7" - resolved "https://registry.yarnpkg.com/@types/debug/-/debug-4.1.7.tgz#7cc0ea761509124709b8b2d1090d8f6c17aadb82" - integrity sha512-9AonUzyTjXXhEOa0DnqpzZi6VHlqKMswga9EXjpXnnqxwLtdvPPtlO8evrI5D9S6asFRCQ6v+wpiUKbw+vKqyg== - dependencies: - "@types/ms" "*" +"@types/d3-scale-chromatic@^3.0.0": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.3.tgz#fc0db9c10e789c351f4c42d96f31f2e4df8f5644" + integrity sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw== -"@types/eslint-scope@^3.7.3": - version "3.7.4" - resolved "https://registry.yarnpkg.com/@types/eslint-scope/-/eslint-scope-3.7.4.tgz#37fc1223f0786c39627068a12e94d6e6fc61de16" - integrity sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA== +"@types/d3-scale@^4.0.3": + version "4.0.8" + resolved "https://registry.yarnpkg.com/@types/d3-scale/-/d3-scale-4.0.8.tgz#d409b5f9dcf63074464bf8ddfb8ee5a1f95945bb" + integrity sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ== dependencies: - "@types/eslint" "*" - "@types/estree" "*" + "@types/d3-time" "*" + +"@types/d3-time@*": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/d3-time/-/d3-time-3.0.3.tgz#3c186bbd9d12b9d84253b6be6487ca56b54f88be" + integrity sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw== -"@types/eslint@*": - version "8.4.10" - resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-8.4.10.tgz#19731b9685c19ed1552da7052b6f668ed7eb64bb" - integrity sha512-Sl/HOqN8NKPmhWo2VBEPm0nvHnu2LL3v9vKo8MEq0EtbJ4eVzGPl41VNPvn5E1i5poMk4/XD8UriLHpJvEP/Nw== +"@types/debug@^4.0.0": + version "4.1.12" + resolved "https://registry.yarnpkg.com/@types/debug/-/debug-4.1.12.tgz#a155f21690871953410df4b6b6f53187f0500917" + integrity sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ== dependencies: - "@types/estree" "*" - "@types/json-schema" "*" + "@types/ms" "*" "@types/estree-jsx@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@types/estree-jsx/-/estree-jsx-1.0.0.tgz#7bfc979ab9f692b492017df42520f7f765e98df1" - integrity sha512-3qvGd0z8F2ENTGr/GG1yViqfiKmRfrXVx5sJyHGFu3z7m5g5utCQtGp/g29JnjflhtQJBv1WDQukHiT58xPcYQ== + version "1.0.5" + resolved "https://registry.yarnpkg.com/@types/estree-jsx/-/estree-jsx-1.0.5.tgz#858a88ea20f34fe65111f005a689fa1ebf70dc18" + integrity sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg== dependencies: "@types/estree" "*" -"@types/estree@*", "@types/estree@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.0.tgz#5fb2e536c1ae9bf35366eed879e827fa59ca41c2" - integrity sha512-WulqXMDUTYAXCjZnk6JtIHPigp55cVtDgDrO2gHRwhyJto21+1zbVCtOYB2L1F9w4qCQ0rOGWBnBe0FNTiEJIQ== +"@types/estree@*", "@types/estree@^1.0.0", "@types/estree@^1.0.5": + version "1.0.6" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.6.tgz#628effeeae2064a1b4e79f78e81d87b7e5fc7b50" + integrity sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw== -"@types/estree@^0.0.51": - version "0.0.51" - resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.51.tgz#cfd70924a25a3fd32b218e5e420e6897e1ac4f40" - integrity sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ== +"@types/express-serve-static-core@*", "@types/express-serve-static-core@^5.0.0": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-5.0.0.tgz#91f06cda1049e8f17eeab364798ed79c97488a1c" + integrity sha512-AbXMTZGt40T+KON9/Fdxx0B2WK5hsgxcfXJLr5bFpZ7b4JCex2WyQPTEKdXqfHiY5nKKBScZ7yCoO6Pvgxfvnw== + dependencies: + "@types/node" "*" + "@types/qs" "*" + "@types/range-parser" "*" + "@types/send" "*" -"@types/express-serve-static-core@*", "@types/express-serve-static-core@^4.17.31": - version "4.17.32" - resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.32.tgz#93dda387f5516af616d8d3f05f2c4c79d81e1b82" - integrity sha512-aI5h/VOkxOF2Z1saPy0Zsxs5avets/iaiAJYznQFm5By/pamU31xWKL//epiF4OfUA2qTOc9PV6tCUjhO8wlZA== +"@types/express-serve-static-core@^4.17.33": + version "4.19.6" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz#e01324c2a024ff367d92c66f48553ced0ab50267" + integrity sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A== dependencies: "@types/node" "*" "@types/qs" "*" "@types/range-parser" "*" + "@types/send" "*" + +"@types/express@*": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@types/express/-/express-5.0.0.tgz#13a7d1f75295e90d19ed6e74cab3678488eaa96c" + integrity sha512-DvZriSMehGHL1ZNLzi6MidnsDhUZM/x2pRdDIKdwbUNqqwHxMlRdkxtn6/EPKyqKpHqTl/4nRZsRNLpZxZRpPQ== + dependencies: + "@types/body-parser" "*" + "@types/express-serve-static-core" "^5.0.0" + "@types/qs" "*" + "@types/serve-static" "*" -"@types/express@*", "@types/express@^4.17.13": - version "4.17.15" - resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.15.tgz#9290e983ec8b054b65a5abccb610411953d417ff" - integrity sha512-Yv0k4bXGOH+8a+7bELd2PqHQsuiANB+A8a4gnQrkRWzrkKlb6KHaVvyXhqs04sVW/OWlbPyYxRgYlIXLfrufMQ== +"@types/express@^4.17.13": + version "4.17.21" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.21.tgz#c26d4a151e60efe0084b23dc3369ebc631ed192d" + integrity sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ== dependencies: "@types/body-parser" "*" - "@types/express-serve-static-core" "^4.17.31" + "@types/express-serve-static-core" "^4.17.33" "@types/qs" "*" "@types/serve-static" "*" +"@types/gtag.js@^0.0.12": + version "0.0.12" + resolved "https://registry.yarnpkg.com/@types/gtag.js/-/gtag.js-0.0.12.tgz#095122edca896689bdfcdd73b057e23064d23572" + integrity sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg== + "@types/hast@^2.0.0": - version "2.3.4" - resolved "https://registry.yarnpkg.com/@types/hast/-/hast-2.3.4.tgz#8aa5ef92c117d20d974a82bdfb6a648b08c0bafc" - integrity sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g== + version "2.3.10" + resolved "https://registry.yarnpkg.com/@types/hast/-/hast-2.3.10.tgz#5c9d9e0b304bbb8879b857225c5ebab2d81d7643" + integrity sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw== + dependencies: + "@types/unist" "^2" + +"@types/hast@^3.0.0": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/hast/-/hast-3.0.4.tgz#1d6b39993b82cea6ad783945b0508c25903e15aa" + integrity sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ== dependencies: "@types/unist" "*" @@ -2158,36 +2128,46 @@ resolved "https://registry.yarnpkg.com/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz#4fc33a00c1d0c16987b1a20cf92d20614c55ac35" integrity sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg== +"@types/http-cache-semantics@^4.0.2": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz#b979ebad3919799c979b17c72621c0bc0a31c6c4" + integrity sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA== + +"@types/http-errors@*": + version "2.0.4" + resolved "https://registry.yarnpkg.com/@types/http-errors/-/http-errors-2.0.4.tgz#7eb47726c391b7345a6ec35ad7f4de469cf5ba4f" + integrity sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA== + "@types/http-proxy@^1.17.8": - version "1.17.9" - resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.9.tgz#7f0e7931343761efde1e2bf48c40f02f3f75705a" - integrity sha512-QsbSjA/fSk7xB+UXlCT3wHBy5ai9wOcNDWwZAtud+jXhwOM3l+EYZh8Lng4+/6n8uar0J7xILzqftJdJ/Wdfkw== + version "1.17.15" + resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.15.tgz#12118141ce9775a6499ecb4c01d02f90fc839d36" + integrity sha512-25g5atgiVNTIv0LBDTg1H74Hvayx0ajtJPLLcYE3whFv75J0pWNtOBzaXJQgDTmrX1bx5U9YC2w/n65BN1HwRQ== dependencies: "@types/node" "*" "@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0": - version "2.0.4" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz#8467d4b3c087805d63580480890791277ce35c44" - integrity sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g== + version "2.0.6" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7" + integrity sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w== "@types/istanbul-lib-report@*": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#c14c24f18ea8190c118ee7562b7ff99a36552686" - integrity sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg== + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz#53047614ae72e19fc0401d872de3ae2b4ce350bf" + integrity sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA== dependencies: "@types/istanbul-lib-coverage" "*" "@types/istanbul-reports@^3.0.0": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz#9153fe98bba2bd565a63add9436d6f0d7f8468ff" - integrity sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw== + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz#0f03e3d2f670fbdac586e34b433783070cc16f54" + integrity sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ== dependencies: "@types/istanbul-lib-report" "*" -"@types/json-schema@*", "@types/json-schema@^7.0.4", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9": - version "7.0.11" - resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.11.tgz#d421b6c527a3037f7c84433fd2c4229e016863d3" - integrity sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ== +"@types/json-schema@^7.0.4", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9": + version "7.0.15" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" + integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== "@types/lodash.debounce@4.0.7": version "4.0.7" @@ -2196,42 +2176,60 @@ dependencies: "@types/lodash" "*" +"@types/lodash.escape@4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@types/lodash.escape/-/lodash.escape-4.0.0.tgz#0f60dfe769a6296481e93f27306acea482546efb" + integrity sha512-z4SlV7GOKC7nnPlNoLUVugKwUwa9+XJtLimwq/7OBBzKePMcGsV4QJR5jyoBN4c4Boo6mbmyD6My9IAz3xWy0w== + dependencies: + "@types/lodash" "*" + "@types/lodash@*": - version "4.14.191" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.191.tgz#09511e7f7cba275acd8b419ddac8da9a6a79e2fa" - integrity sha512-BdZ5BCCvho3EIXw6wUCXHe7rS53AIDPLE+JzwgT+OsJk53oBfbSmZZ7CX4VaRoN78N+TJpFi9QPlfIVNmJYWxQ== + version "4.17.12" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.17.12.tgz#25d71312bf66512105d71e55d42e22c36bcfc689" + integrity sha512-sviUmCE8AYdaF/KIHLDJBQgeYzPBI0vf/17NaYehBJfYD1j6/L95Slh07NlyK2iNyBNaEkb3En2jRt+a8y3xZQ== "@types/mdast@^3.0.0": - version "3.0.10" - resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-3.0.10.tgz#4724244a82a4598884cbbe9bcfd73dff927ee8af" - integrity sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA== + version "3.0.15" + resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-3.0.15.tgz#49c524a263f30ffa28b71ae282f813ed000ab9f5" + integrity sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ== + dependencies: + "@types/unist" "^2" + +"@types/mdast@^4.0.0", "@types/mdast@^4.0.2": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-4.0.4.tgz#7ccf72edd2f1aa7dd3437e180c64373585804dd6" + integrity sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA== dependencies: "@types/unist" "*" "@types/mdx@^2.0.0": - version "2.0.3" - resolved "https://registry.yarnpkg.com/@types/mdx/-/mdx-2.0.3.tgz#43fd32414f17fcbeced3578109a6edd877a2d96e" - integrity sha512-IgHxcT3RC8LzFLhKwP3gbMPeaK7BM9eBH46OdapPA7yvuIUJ8H6zHZV53J8hGZcTSnt95jANt+rTBNUUc22ACQ== + version "2.0.13" + resolved "https://registry.yarnpkg.com/@types/mdx/-/mdx-2.0.13.tgz#68f6877043d377092890ff5b298152b0a21671bd" + integrity sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw== -"@types/mermaid@^8.2.9": - version "8.2.9" - resolved "https://registry.yarnpkg.com/@types/mermaid/-/mermaid-8.2.9.tgz#1844505dcffcd47703e94628a6200583d35c2c76" - integrity sha512-f1i8fNoVFVJXedk+R7GcEk4KoOWzWAU3CzFqlVw1qWKktfsataBERezCz1pOdKy8Ec02ZdPQXGM7NU2lPHABYQ== - -"@types/mime@*": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@types/mime/-/mime-3.0.1.tgz#5f8f2bca0a5863cb69bc0b0acd88c96cb1d4ae10" - integrity sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA== +"@types/mime@^1": + version "1.3.5" + resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.5.tgz#1ef302e01cf7d2b5a0fa526790c9123bf1d06690" + integrity sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w== "@types/ms@*": - version "0.7.31" - resolved "https://registry.yarnpkg.com/@types/ms/-/ms-0.7.31.tgz#31b7ca6407128a3d2bbc27fe2d21b345397f6197" - integrity sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA== + version "0.7.34" + resolved "https://registry.yarnpkg.com/@types/ms/-/ms-0.7.34.tgz#10964ba0dee6ac4cd462e2795b6bebd407303433" + integrity sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g== + +"@types/node-forge@^1.3.0": + version "1.3.11" + resolved "https://registry.yarnpkg.com/@types/node-forge/-/node-forge-1.3.11.tgz#0972ea538ddb0f4d9c2fa0ec5db5724773a604da" + integrity sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ== + dependencies: + "@types/node" "*" "@types/node@*": - version "18.11.18" - resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.18.tgz#8dfb97f0da23c2293e554c5a50d61ef134d7697f" - integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== + version "22.7.8" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.7.8.tgz#a1dbf0dc5f71bdd2642fc89caef65d58747ce825" + integrity sha512-a922jJy31vqR5sk+kAdIENJjHblqcZ4RmERviFsER4WJcEONqxKcjNOlk0q7OUfrF5sddT+vng070cdfMlrPLg== + dependencies: + undici-types "~6.19.2" "@types/node@^17.0.5": version "17.0.45" @@ -2239,34 +2237,39 @@ integrity sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw== "@types/parse-json@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" - integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== + version "4.0.2" + resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.2.tgz#5950e50960793055845e956c427fc2b0d70c5239" + integrity sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw== "@types/parse5@^5.0.0": version "5.0.3" resolved "https://registry.yarnpkg.com/@types/parse5/-/parse5-5.0.3.tgz#e7b5aebbac150f8b5fdd4a46e7f0bd8e65e19109" integrity sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw== +"@types/prismjs@^1.26.0": + version "1.26.4" + resolved "https://registry.yarnpkg.com/@types/prismjs/-/prismjs-1.26.4.tgz#1a9e1074619ce1d7322669e5b46fbe823925103a" + integrity sha512-rlAnzkW2sZOjbqZ743IHUhFcvzaGbqijwOu8QZnZCjfQzBqFE3s4lOTJEsxikImav9uzz/42I+O7YUs1mWgMlg== + "@types/prop-types@*": - version "15.7.5" - resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.5.tgz#5f19d2b85a98e9558036f6a3cacc8819420f05cf" - integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w== + version "15.7.13" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.13.tgz#2af91918ee12d9d32914feb13f5326658461b451" + integrity sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA== "@types/q@^1.5.1": - version "1.5.5" - resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.5.tgz#75a2a8e7d8ab4b230414505d92335d1dcb53a6df" - integrity sha512-L28j2FcJfSZOnL1WBjDYp2vUHCeIFlyYI/53EwD/rKUBQ7MtUUfbQWiyKJGpcnv4/WgrhWsFKrcPstcAt/J0tQ== + version "1.5.8" + resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.8.tgz#95f6c6a08f2ad868ba230ead1d2d7f7be3db3837" + integrity sha512-hroOstUScF6zhIi+5+x0dzqrHA1EJi+Irri6b1fxolMTqqHIV/Cg77EtnQcZqZCu8hR3mX2BzIxN4/GzI68Kfw== "@types/qs@*": - version "6.9.7" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.7.tgz#63bb7d067db107cc1e457c303bc25d511febf6cb" - integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw== + version "6.9.16" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.16.tgz#52bba125a07c0482d26747d5d4947a64daf8f794" + integrity sha512-7i+zxXdPD0T4cKDuxCUXJ4wHcsJLwENa6Z3dCu8cfCK743OGy5Nu1RmAGqDPsoTDINVEcdXKRvR/zre+P2Ku1A== "@types/range-parser@*": - version "1.2.4" - resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.4.tgz#cd667bcfdd025213aafb7ca5915a932590acdcdc" - integrity sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw== + version "1.2.7" + resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.7.tgz#50ae4353eaaddc04044279812f52c8c65857dbcb" + integrity sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ== "@types/react-modal@3.13.1": version "3.13.1" @@ -2275,14 +2278,14 @@ dependencies: "@types/react" "*" -"@types/react-router-config@*", "@types/react-router-config@^5.0.6": - version "5.0.6" - resolved "https://registry.yarnpkg.com/@types/react-router-config/-/react-router-config-5.0.6.tgz#87c5c57e72d241db900d9734512c50ccec062451" - integrity sha512-db1mx37a1EJDf1XeX8jJN7R3PZABmJQXR8r28yUjVMFSjkmnQo6X6pOEEmNl+Tp2gYQOGPdYbFIipBtdElZ3Yg== +"@types/react-router-config@*", "@types/react-router-config@^5.0.7": + version "5.0.11" + resolved "https://registry.yarnpkg.com/@types/react-router-config/-/react-router-config-5.0.11.tgz#2761a23acc7905a66a94419ee40294a65aaa483a" + integrity sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw== dependencies: "@types/history" "^4.7.11" "@types/react" "*" - "@types/react-router" "*" + "@types/react-router" "^5.1.0" "@types/react-router-dom@*": version "5.3.3" @@ -2293,7 +2296,7 @@ "@types/react" "*" "@types/react-router" "*" -"@types/react-router@*": +"@types/react-router@*", "@types/react-router@^5.1.0": version "5.1.20" resolved "https://registry.yarnpkg.com/@types/react-router/-/react-router-5.1.20.tgz#88eccaa122a82405ef3efbcaaa5dcdd9f021387c" integrity sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q== @@ -2302,12 +2305,11 @@ "@types/react" "*" "@types/react@*": - version "18.0.26" - resolved "https://registry.yarnpkg.com/@types/react/-/react-18.0.26.tgz#8ad59fc01fef8eaf5c74f4ea392621749f0b7917" - integrity sha512-hCR3PJQsAIXyxhTNSiDFY//LhnMZWpNNr5etoCqx/iUfGc5gXWtQR2Phl908jVR6uPXacojQWTg4qRpkxTuGug== + version "18.3.11" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.11.tgz#9d530601ff843ee0d7030d4227ea4360236bd537" + integrity sha512-r6QZ069rFTjrEYgFdOck1gK7FLVsgJE7tTz0pQBczlBNUhBNk0MQH4UbnFSwjpQLMkLzgqvBBa+qGpLje16eTQ== dependencies: "@types/prop-types" "*" - "@types/scheduler" "*" csstype "^3.0.2" "@types/retry@0.12.0": @@ -2316,182 +2318,196 @@ integrity sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA== "@types/sax@^1.2.1": - version "1.2.4" - resolved "https://registry.yarnpkg.com/@types/sax/-/sax-1.2.4.tgz#8221affa7f4f3cb21abd22f244cfabfa63e6a69e" - integrity sha512-pSAff4IAxJjfAXUG6tFkO7dsSbTmf8CtUpfhhZ5VhkRpC4628tJhh3+V6H1E+/Gs9piSzYKT5yzHO5M4GG9jkw== + version "1.2.7" + resolved "https://registry.yarnpkg.com/@types/sax/-/sax-1.2.7.tgz#ba5fe7df9aa9c89b6dff7688a19023dd2963091d" + integrity sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A== dependencies: "@types/node" "*" -"@types/scheduler@*": - version "0.16.2" - resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.2.tgz#1a62f89525723dde24ba1b01b092bf5df8ad4d39" - integrity sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew== +"@types/send@*": + version "0.17.4" + resolved "https://registry.yarnpkg.com/@types/send/-/send-0.17.4.tgz#6619cd24e7270793702e4e6a4b958a9010cfc57a" + integrity sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA== + dependencies: + "@types/mime" "^1" + "@types/node" "*" "@types/serve-index@^1.9.1": - version "1.9.1" - resolved "https://registry.yarnpkg.com/@types/serve-index/-/serve-index-1.9.1.tgz#1b5e85370a192c01ec6cec4735cf2917337a6278" - integrity sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg== + version "1.9.4" + resolved "https://registry.yarnpkg.com/@types/serve-index/-/serve-index-1.9.4.tgz#e6ae13d5053cb06ed36392110b4f9a49ac4ec898" + integrity sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug== dependencies: "@types/express" "*" "@types/serve-static@*", "@types/serve-static@^1.13.10": - version "1.15.0" - resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.15.0.tgz#c7930ff61afb334e121a9da780aac0d9b8f34155" - integrity sha512-z5xyF6uh8CbjAu9760KDKsH2FcDxZ2tFCsA4HIMWE6IkiYMXfVoa+4f9KX+FN0ZLsaMw1WNG2ETLA6N+/YA+cg== + version "1.15.7" + resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.15.7.tgz#22174bbd74fb97fe303109738e9b5c2f3064f714" + integrity sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw== dependencies: - "@types/mime" "*" + "@types/http-errors" "*" "@types/node" "*" + "@types/send" "*" "@types/sockjs@^0.3.33": - version "0.3.33" - resolved "https://registry.yarnpkg.com/@types/sockjs/-/sockjs-0.3.33.tgz#570d3a0b99ac995360e3136fd6045113b1bd236f" - integrity sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw== + version "0.3.36" + resolved "https://registry.yarnpkg.com/@types/sockjs/-/sockjs-0.3.36.tgz#ce322cf07bcc119d4cbf7f88954f3a3bd0f67535" + integrity sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q== dependencies: "@types/node" "*" -"@types/unist@*", "@types/unist@^2.0.0", "@types/unist@^2.0.2", "@types/unist@^2.0.3": - version "2.0.6" - resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.6.tgz#250a7b16c3b91f672a24552ec64678eeb1d3a08d" - integrity sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ== +"@types/unist@*", "@types/unist@^3.0.0": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/unist/-/unist-3.0.3.tgz#acaab0f919ce69cce629c2d4ed2eb4adc1b6c20c" + integrity sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q== -"@types/ws@^8.5.1": - version "8.5.4" - resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.4.tgz#bb10e36116d6e570dd943735f86c933c1587b8a5" - integrity sha512-zdQDHKUgcX/zBc4GrwsE/7dVdAD8JR4EuiAXiiUhhfyIJXXb2+PrGshFyeXWQPMmmZ2XxgaqclgpIC7eTXc1mg== +"@types/unist@^2", "@types/unist@^2.0.0", "@types/unist@^2.0.2": + version "2.0.11" + resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.11.tgz#11af57b127e32487774841f7a4e54eab166d03c4" + integrity sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA== + +"@types/ws@^8.5.5": + version "8.5.12" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.12.tgz#619475fe98f35ccca2a2f6c137702d85ec247b7e" + integrity sha512-3tPRkv1EtkDpzlgyKyI8pGsGZAGPEaXeu0DOj5DI25Ja91bdAYddYHbADRYVrZMRbfW+1l5YwXVDKohDJNQxkQ== dependencies: "@types/node" "*" "@types/yargs-parser@*": - version "21.0.0" - resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.0.tgz#0c60e537fa790f5f9472ed2776c2b71ec117351b" - integrity sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA== + version "21.0.3" + resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz#815e30b786d2e8f0dcd85fd5bcf5e1a04d008f15" + integrity sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ== "@types/yargs@^17.0.8": - version "17.0.18" - resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.18.tgz#466225ab4fbabb9aa711f5b406796daf1374a5b7" - integrity sha512-eIJR1UER6ur3EpKM3d+2Pgd+ET+k6Kn9B4ZItX0oPjjVI5PrfaRjKyLT5UYendDpLuoiJMNJvovLQbEXqhsPaw== + version "17.0.33" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.33.tgz#8c32303da83eec050a84b3c7ae7b9f922d13e32d" + integrity sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA== dependencies: "@types/yargs-parser" "*" -"@webassemblyjs/ast@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.11.1.tgz#2bfd767eae1a6996f432ff7e8d7fc75679c0b6a7" - integrity sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw== - dependencies: - "@webassemblyjs/helper-numbers" "1.11.1" - "@webassemblyjs/helper-wasm-bytecode" "1.11.1" - -"@webassemblyjs/floating-point-hex-parser@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz#f6c61a705f0fd7a6aecaa4e8198f23d9dc179e4f" - integrity sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ== - -"@webassemblyjs/helper-api-error@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz#1a63192d8788e5c012800ba6a7a46c705288fd16" - integrity sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg== - -"@webassemblyjs/helper-buffer@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz#832a900eb444884cde9a7cad467f81500f5e5ab5" - integrity sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA== - -"@webassemblyjs/helper-numbers@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz#64d81da219fbbba1e3bd1bfc74f6e8c4e10a62ae" - integrity sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ== - dependencies: - "@webassemblyjs/floating-point-hex-parser" "1.11.1" - "@webassemblyjs/helper-api-error" "1.11.1" +"@ungap/structured-clone@^1.0.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406" + integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ== + +"@webassemblyjs/ast@1.12.1", "@webassemblyjs/ast@^1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.12.1.tgz#bb16a0e8b1914f979f45864c23819cc3e3f0d4bb" + integrity sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg== + dependencies: + "@webassemblyjs/helper-numbers" "1.11.6" + "@webassemblyjs/helper-wasm-bytecode" "1.11.6" + +"@webassemblyjs/floating-point-hex-parser@1.11.6": + version "1.11.6" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz#dacbcb95aff135c8260f77fa3b4c5fea600a6431" + integrity sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw== + +"@webassemblyjs/helper-api-error@1.11.6": + version "1.11.6" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz#6132f68c4acd59dcd141c44b18cbebbd9f2fa768" + integrity sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q== + +"@webassemblyjs/helper-buffer@1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz#6df20d272ea5439bf20ab3492b7fb70e9bfcb3f6" + integrity sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw== + +"@webassemblyjs/helper-numbers@1.11.6": + version "1.11.6" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz#cbce5e7e0c1bd32cf4905ae444ef64cea919f1b5" + integrity sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g== + dependencies: + "@webassemblyjs/floating-point-hex-parser" "1.11.6" + "@webassemblyjs/helper-api-error" "1.11.6" "@xtuc/long" "4.2.2" -"@webassemblyjs/helper-wasm-bytecode@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz#f328241e41e7b199d0b20c18e88429c4433295e1" - integrity sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q== +"@webassemblyjs/helper-wasm-bytecode@1.11.6": + version "1.11.6" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz#bb2ebdb3b83aa26d9baad4c46d4315283acd51e9" + integrity sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA== -"@webassemblyjs/helper-wasm-section@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz#21ee065a7b635f319e738f0dd73bfbda281c097a" - integrity sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg== +"@webassemblyjs/helper-wasm-section@1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz#3da623233ae1a60409b509a52ade9bc22a37f7bf" + integrity sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g== dependencies: - "@webassemblyjs/ast" "1.11.1" - "@webassemblyjs/helper-buffer" "1.11.1" - "@webassemblyjs/helper-wasm-bytecode" "1.11.1" - "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/ast" "1.12.1" + "@webassemblyjs/helper-buffer" "1.12.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.6" + "@webassemblyjs/wasm-gen" "1.12.1" -"@webassemblyjs/ieee754@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz#963929e9bbd05709e7e12243a099180812992614" - integrity sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ== +"@webassemblyjs/ieee754@1.11.6": + version "1.11.6" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz#bb665c91d0b14fffceb0e38298c329af043c6e3a" + integrity sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg== dependencies: "@xtuc/ieee754" "^1.2.0" -"@webassemblyjs/leb128@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.11.1.tgz#ce814b45574e93d76bae1fb2644ab9cdd9527aa5" - integrity sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw== +"@webassemblyjs/leb128@1.11.6": + version "1.11.6" + resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.11.6.tgz#70e60e5e82f9ac81118bc25381a0b283893240d7" + integrity sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ== dependencies: "@xtuc/long" "4.2.2" -"@webassemblyjs/utf8@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.11.1.tgz#d1f8b764369e7c6e6bae350e854dec9a59f0a3ff" - integrity sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ== - -"@webassemblyjs/wasm-edit@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz#ad206ebf4bf95a058ce9880a8c092c5dec8193d6" - integrity sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA== - dependencies: - "@webassemblyjs/ast" "1.11.1" - "@webassemblyjs/helper-buffer" "1.11.1" - "@webassemblyjs/helper-wasm-bytecode" "1.11.1" - "@webassemblyjs/helper-wasm-section" "1.11.1" - "@webassemblyjs/wasm-gen" "1.11.1" - "@webassemblyjs/wasm-opt" "1.11.1" - "@webassemblyjs/wasm-parser" "1.11.1" - "@webassemblyjs/wast-printer" "1.11.1" - -"@webassemblyjs/wasm-gen@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz#86c5ea304849759b7d88c47a32f4f039ae3c8f76" - integrity sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA== - dependencies: - "@webassemblyjs/ast" "1.11.1" - "@webassemblyjs/helper-wasm-bytecode" "1.11.1" - "@webassemblyjs/ieee754" "1.11.1" - "@webassemblyjs/leb128" "1.11.1" - "@webassemblyjs/utf8" "1.11.1" - -"@webassemblyjs/wasm-opt@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz#657b4c2202f4cf3b345f8a4c6461c8c2418985f2" - integrity sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw== - dependencies: - "@webassemblyjs/ast" "1.11.1" - "@webassemblyjs/helper-buffer" "1.11.1" - "@webassemblyjs/wasm-gen" "1.11.1" - "@webassemblyjs/wasm-parser" "1.11.1" - -"@webassemblyjs/wasm-parser@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz#86ca734534f417e9bd3c67c7a1c75d8be41fb199" - integrity sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA== - dependencies: - "@webassemblyjs/ast" "1.11.1" - "@webassemblyjs/helper-api-error" "1.11.1" - "@webassemblyjs/helper-wasm-bytecode" "1.11.1" - "@webassemblyjs/ieee754" "1.11.1" - "@webassemblyjs/leb128" "1.11.1" - "@webassemblyjs/utf8" "1.11.1" - -"@webassemblyjs/wast-printer@1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz#d0c73beda8eec5426f10ae8ef55cee5e7084c2f0" - integrity sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg== - dependencies: - "@webassemblyjs/ast" "1.11.1" +"@webassemblyjs/utf8@1.11.6": + version "1.11.6" + resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.11.6.tgz#90f8bc34c561595fe156603be7253cdbcd0fab5a" + integrity sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA== + +"@webassemblyjs/wasm-edit@^1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz#9f9f3ff52a14c980939be0ef9d5df9ebc678ae3b" + integrity sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g== + dependencies: + "@webassemblyjs/ast" "1.12.1" + "@webassemblyjs/helper-buffer" "1.12.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.6" + "@webassemblyjs/helper-wasm-section" "1.12.1" + "@webassemblyjs/wasm-gen" "1.12.1" + "@webassemblyjs/wasm-opt" "1.12.1" + "@webassemblyjs/wasm-parser" "1.12.1" + "@webassemblyjs/wast-printer" "1.12.1" + +"@webassemblyjs/wasm-gen@1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz#a6520601da1b5700448273666a71ad0a45d78547" + integrity sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w== + dependencies: + "@webassemblyjs/ast" "1.12.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.6" + "@webassemblyjs/ieee754" "1.11.6" + "@webassemblyjs/leb128" "1.11.6" + "@webassemblyjs/utf8" "1.11.6" + +"@webassemblyjs/wasm-opt@1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz#9e6e81475dfcfb62dab574ac2dda38226c232bc5" + integrity sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg== + dependencies: + "@webassemblyjs/ast" "1.12.1" + "@webassemblyjs/helper-buffer" "1.12.1" + "@webassemblyjs/wasm-gen" "1.12.1" + "@webassemblyjs/wasm-parser" "1.12.1" + +"@webassemblyjs/wasm-parser@1.12.1", "@webassemblyjs/wasm-parser@^1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz#c47acb90e6f083391e3fa61d113650eea1e95937" + integrity sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ== + dependencies: + "@webassemblyjs/ast" "1.12.1" + "@webassemblyjs/helper-api-error" "1.11.6" + "@webassemblyjs/helper-wasm-bytecode" "1.11.6" + "@webassemblyjs/ieee754" "1.11.6" + "@webassemblyjs/leb128" "1.11.6" + "@webassemblyjs/utf8" "1.11.6" + +"@webassemblyjs/wast-printer@1.12.1": + version "1.12.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz#bcecf661d7d1abdaf989d8341a4833e33e2b31ac" + integrity sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA== + dependencies: + "@webassemblyjs/ast" "1.12.1" "@xtuc/long" "4.2.2" "@xtuc/ieee754@^1.2.0": @@ -2517,10 +2533,10 @@ accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.8: mime-types "~2.1.34" negotiator "0.6.3" -acorn-import-assertions@^1.7.6: - version "1.8.0" - resolved "https://registry.yarnpkg.com/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz#ba2b5939ce62c238db6d93d81c9b111b29b855e9" - integrity sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw== +acorn-import-attributes@^1.9.5: + version "1.9.5" + resolved "https://registry.yarnpkg.com/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz#7eb1557b1ba05ef18b5ed0ec67591bfab04688ef" + integrity sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ== acorn-jsx@^5.0.0: version "5.3.2" @@ -2528,14 +2544,16 @@ acorn-jsx@^5.0.0: integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== acorn-walk@^8.0.0: - version "8.2.0" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1" - integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== + version "8.3.4" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.4.tgz#794dd169c3977edf4ba4ea47583587c5866236b7" + integrity sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g== + dependencies: + acorn "^8.11.0" -acorn@^8.0.0, acorn@^8.0.4, acorn@^8.5.0, acorn@^8.7.1: - version "8.8.1" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.1.tgz#0a3f9cbecc4ec3bea6f0a80b66ae8dd2da250b73" - integrity sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA== +acorn@^8.0.0, acorn@^8.0.4, acorn@^8.11.0, acorn@^8.7.1, acorn@^8.8.2: + version "8.13.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.13.0.tgz#2a30d670818ad16ddd6a35d3842dacec9e5d7ca3" + integrity sha512-8zSiw54Oxrdym50NlZ9sUusyO1Z1ZchgRLWRaK6c86XJFClyCgFKetdowBg5bKxyp/u+CDBJG4Mpp0m3HLZl9w== address@^1.0.1, address@^1.1.2: version "1.2.2" @@ -2562,14 +2580,14 @@ ajv-keywords@^3.4.1, ajv-keywords@^3.5.2: resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.5.2.tgz#31f29da5ab6e00d1c2d329acf7b5929614d5014d" integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ== -ajv-keywords@^5.0.0: +ajv-keywords@^5.1.0: version "5.1.0" resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz#69d4d385a4733cdbeab44964a1170a88f87f0e16" integrity sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw== dependencies: fast-deep-equal "^3.1.3" -ajv@^6.12.2, ajv@^6.12.4, ajv@^6.12.5: +ajv@^6.12.2, ajv@^6.12.5: version "6.12.6" resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== @@ -2579,44 +2597,45 @@ ajv@^6.12.2, ajv@^6.12.4, ajv@^6.12.5: json-schema-traverse "^0.4.1" uri-js "^4.2.2" -ajv@^8.0.0, ajv@^8.8.0: - version "8.11.2" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.11.2.tgz#aecb20b50607acf2569b6382167b65a96008bb78" - integrity sha512-E4bfmKAhGiSTvMfL1Myyycaub+cUEU2/IvpylXkUu7CHBkBj1f/ikdzbD7YQ6FKUbixDxeYvB/xY4fvyroDlQg== +ajv@^8.0.0, ajv@^8.9.0: + version "8.17.1" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.17.1.tgz#37d9a5c776af6bc92d7f4f9510eba4c0a60d11a6" + integrity sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g== dependencies: - fast-deep-equal "^3.1.1" + fast-deep-equal "^3.1.3" + fast-uri "^3.0.1" json-schema-traverse "^1.0.0" require-from-string "^2.0.2" - uri-js "^4.2.2" -algoliasearch-helper@^3.10.0: - version "3.11.1" - resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.11.1.tgz#d83ab7f1a2a374440686ef7a144b3c288b01188a" - integrity sha512-mvsPN3eK4E0bZG0/WlWJjeqe/bUD2KOEVOl0GyL/TGXn6wcpZU8NOuztGHCUKXkyg5gq6YzUakVTmnmSSO5Yiw== +algoliasearch-helper@^3.13.3: + version "3.22.5" + resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz#2fcc26814e10a121a2c2526a1b05c754061c56c0" + integrity sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw== dependencies: "@algolia/events" "^4.0.1" -algoliasearch@^4.0.0, algoliasearch@^4.13.1: - version "4.14.3" - resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-4.14.3.tgz#f02a77a4db17de2f676018938847494b692035e7" - integrity sha512-GZTEuxzfWbP/vr7ZJfGzIl8fOsoxN916Z6FY2Egc9q2TmZ6hvq5KfAxY89pPW01oW/2HDEKA8d30f9iAH9eXYg== - dependencies: - "@algolia/cache-browser-local-storage" "4.14.3" - "@algolia/cache-common" "4.14.3" - "@algolia/cache-in-memory" "4.14.3" - "@algolia/client-account" "4.14.3" - "@algolia/client-analytics" "4.14.3" - "@algolia/client-common" "4.14.3" - "@algolia/client-personalization" "4.14.3" - "@algolia/client-search" "4.14.3" - "@algolia/logger-common" "4.14.3" - "@algolia/logger-console" "4.14.3" - "@algolia/requester-browser-xhr" "4.14.3" - "@algolia/requester-common" "4.14.3" - "@algolia/requester-node-http" "4.14.3" - "@algolia/transporter" "4.14.3" - -ansi-align@^3.0.0, ansi-align@^3.0.1: +algoliasearch@^4.18.0, algoliasearch@^4.19.1: + version "4.24.0" + resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-4.24.0.tgz#b953b3e2309ef8f25da9de311b95b994ac918275" + integrity sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g== + dependencies: + "@algolia/cache-browser-local-storage" "4.24.0" + "@algolia/cache-common" "4.24.0" + "@algolia/cache-in-memory" "4.24.0" + "@algolia/client-account" "4.24.0" + "@algolia/client-analytics" "4.24.0" + "@algolia/client-common" "4.24.0" + "@algolia/client-personalization" "4.24.0" + "@algolia/client-search" "4.24.0" + "@algolia/logger-common" "4.24.0" + "@algolia/logger-console" "4.24.0" + "@algolia/recommend" "4.24.0" + "@algolia/requester-browser-xhr" "4.24.0" + "@algolia/requester-common" "4.24.0" + "@algolia/requester-node-http" "4.24.0" + "@algolia/transporter" "4.24.0" + +ansi-align@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-3.0.1.tgz#0cdf12e111ace773a86e9a1fad1225c43cb19a59" integrity sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w== @@ -2639,9 +2658,9 @@ ansi-regex@^5.0.1: integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== ansi-regex@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" - integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== + version "6.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" + integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== ansi-styles@^3.2.1: version "3.2.1" @@ -2650,7 +2669,7 @@ ansi-styles@^3.2.1: dependencies: color-convert "^1.9.0" -ansi-styles@^4.0.0, ansi-styles@^4.1.0: +ansi-styles@^4.1.0: version "4.3.0" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== @@ -2692,51 +2711,66 @@ argparse@^2.0.1: resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== +array-buffer-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f" + integrity sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg== + dependencies: + call-bind "^1.0.5" + is-array-buffer "^3.0.4" + array-flatten@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== -array-flatten@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.2.tgz#24ef80a28c1a893617e2149b0c6d0d788293b099" - integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ== - array-union@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== -array.prototype.reduce@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/array.prototype.reduce/-/array.prototype.reduce-1.0.5.tgz#6b20b0daa9d9734dd6bc7ea66b5bbce395471eac" - integrity sha512-kDdugMl7id9COE8R7MHF5jWk7Dqt/fs4Pv+JXoICnYwqpjjjbUurz6w5fT5IG6brLdJhv6/VoHB0H7oyIBXd+Q== +array.prototype.reduce@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/array.prototype.reduce/-/array.prototype.reduce-1.0.7.tgz#6aadc2f995af29cb887eb866d981dc85ab6f7dc7" + integrity sha512-mzmiUCVwtiD4lgxYP8g7IYy8El8p2CSMePvIbTS7gchKir/L1fgJrk0yDKmAX6mnRQFKNADYIk8nNlTris5H1Q== dependencies: - call-bind "^1.0.2" - define-properties "^1.1.4" - es-abstract "^1.20.4" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.2" es-array-method-boxes-properly "^1.0.0" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" is-string "^1.0.7" -asap@~2.0.3: - version "2.0.6" - resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" - integrity sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA== +arraybuffer.prototype.slice@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz#097972f4255e41bc3425e37dc3f6421cf9aefde6" + integrity sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A== + dependencies: + array-buffer-byte-length "^1.0.1" + call-bind "^1.0.5" + define-properties "^1.2.1" + es-abstract "^1.22.3" + es-errors "^1.2.1" + get-intrinsic "^1.2.3" + is-array-buffer "^3.0.4" + is-shared-array-buffer "^1.0.2" assert@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/assert/-/assert-2.0.0.tgz#95fc1c616d48713510680f2eaf2d10dd22e02d32" - integrity sha512-se5Cd+js9dXJnu6Ag2JFc00t+HmHOen+8Q+L7O9zI0PqQXr20uk2J0XQqMxZEeo5U50o8Nvmmx7dZrl+Ufr35A== + version "2.1.0" + resolved "https://registry.yarnpkg.com/assert/-/assert-2.1.0.tgz#6d92a238d05dc02e7427c881fb8be81c8448b2dd" + integrity sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw== dependencies: - es6-object-assign "^1.1.0" - is-nan "^1.2.1" - object-is "^1.0.1" - util "^0.12.0" + call-bind "^1.0.2" + is-nan "^1.3.2" + object-is "^1.1.5" + object.assign "^4.1.4" + util "^0.12.5" astring@^1.8.0: - version "1.8.4" - resolved "https://registry.yarnpkg.com/astring/-/astring-1.8.4.tgz#6d4c5d8de7be2ead9e4a3cc0e2efb8d759378904" - integrity sha512-97a+l2LBU3Op3bBQEff79i/E4jMD2ZLFD8rHx9B6mXyB2uQwhJQYfiDqUwtfjF4QA1F2qs//N6Cw8LetMbQjcw== + version "1.9.0" + resolved "https://registry.yarnpkg.com/astring/-/astring-1.9.0.tgz#cc73e6062a7eb03e7d19c22d8b0b3451fd9bfeef" + integrity sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg== at-least-node@^1.0.0: version "1.0.0" @@ -2750,47 +2784,32 @@ autocomplete.js@^0.37.0: dependencies: immediate "^3.2.3" -autoprefixer@^10.4.12, autoprefixer@^10.4.7: - version "10.4.13" - resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.13.tgz#b5136b59930209a321e9fa3dca2e7c4d223e83a8" - integrity sha512-49vKpMqcZYsJjwotvt4+h/BCjJVnhGwcLpDt5xkcaOG3eLrG/HUYLagrihYsQ+qrIBgIzX1Rw7a6L8I/ZA1Atg== +autoprefixer@^10.4.14, autoprefixer@^10.4.19: + version "10.4.20" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.20.tgz#5caec14d43976ef42e32dcb4bd62878e96be5b3b" + integrity sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g== dependencies: - browserslist "^4.21.4" - caniuse-lite "^1.0.30001426" - fraction.js "^4.2.0" + browserslist "^4.23.3" + caniuse-lite "^1.0.30001646" + fraction.js "^4.3.7" normalize-range "^0.1.2" - picocolors "^1.0.0" + picocolors "^1.0.1" postcss-value-parser "^4.2.0" -available-typed-arrays@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz#92f95616501069d07d10edb2fc37d3e1c65123b7" - integrity sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw== - -axios@^0.25.0: - version "0.25.0" - resolved "https://registry.yarnpkg.com/axios/-/axios-0.25.0.tgz#349cfbb31331a9b4453190791760a8d35b093e0a" - integrity sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g== - dependencies: - follow-redirects "^1.14.7" - -babel-loader@^8.2.5: - version "8.3.0" - resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.3.0.tgz#124936e841ba4fe8176786d6ff28add1f134d6a8" - integrity sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q== +available-typed-arrays@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846" + integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ== dependencies: - find-cache-dir "^3.3.1" - loader-utils "^2.0.0" - make-dir "^3.1.0" - schema-utils "^2.6.5" + possible-typed-array-names "^1.0.0" -babel-plugin-apply-mdx-type-prop@1.6.22: - version "1.6.22" - resolved "https://registry.yarnpkg.com/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz#d216e8fd0de91de3f1478ef3231e05446bc8705b" - integrity sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ== +babel-loader@^9.1.3: + version "9.2.1" + resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-9.2.1.tgz#04c7835db16c246dd19ba0914418f3937797587b" + integrity sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA== dependencies: - "@babel/helper-plugin-utils" "7.10.4" - "@mdx-js/util" "1.6.22" + find-cache-dir "^4.0.0" + schema-utils "^4.0.0" babel-plugin-dynamic-import-node@^2.3.3: version "2.3.3" @@ -2799,36 +2818,29 @@ babel-plugin-dynamic-import-node@^2.3.3: dependencies: object.assign "^4.1.0" -babel-plugin-extract-import-names@1.6.22: - version "1.6.22" - resolved "https://registry.yarnpkg.com/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz#de5f9a28eb12f3eb2578bf74472204e66d1a13dc" - integrity sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ== - dependencies: - "@babel/helper-plugin-utils" "7.10.4" - -babel-plugin-polyfill-corejs2@^0.3.3: - version "0.3.3" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz#5d1bd3836d0a19e1b84bbf2d9640ccb6f951c122" - integrity sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q== +babel-plugin-polyfill-corejs2@^0.4.10: + version "0.4.11" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz#30320dfe3ffe1a336c15afdcdafd6fd615b25e33" + integrity sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q== dependencies: - "@babel/compat-data" "^7.17.7" - "@babel/helper-define-polyfill-provider" "^0.3.3" - semver "^6.1.1" + "@babel/compat-data" "^7.22.6" + "@babel/helper-define-polyfill-provider" "^0.6.2" + semver "^6.3.1" -babel-plugin-polyfill-corejs3@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.6.0.tgz#56ad88237137eade485a71b52f72dbed57c6230a" - integrity sha512-+eHqR6OPcBhJOGgsIar7xoAB1GcSwVUA3XjAd7HJNzOXT4wv6/H7KIdA/Nc60cvUlDbKApmqNvD1B1bzOt4nyA== +babel-plugin-polyfill-corejs3@^0.10.6: + version "0.10.6" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.6.tgz#2deda57caef50f59c525aeb4964d3b2f867710c7" + integrity sha512-b37+KR2i/khY5sKmWNVQAnitvquQbNdWy6lJdsr0kmquCKEEUgMKK4SboVM3HtfnZilfjr4MMQ7vY58FVWDtIA== dependencies: - "@babel/helper-define-polyfill-provider" "^0.3.3" - core-js-compat "^3.25.1" + "@babel/helper-define-polyfill-provider" "^0.6.2" + core-js-compat "^3.38.0" -babel-plugin-polyfill-regenerator@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.1.tgz#390f91c38d90473592ed43351e801a9d3e0fd747" - integrity sha512-NtQGmyQDXjQqQ+IzRkBVwEOz9lQ4zxAQZgoAYEtU9dJjnl1Oc98qnN7jcp+bE7O7aYzVpavXE3/VKXNzUbh7aw== +babel-plugin-polyfill-regenerator@^0.6.1: + version "0.6.2" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz#addc47e240edd1da1058ebda03021f382bba785e" + integrity sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg== dependencies: - "@babel/helper-define-polyfill-provider" "^0.3.3" + "@babel/helper-define-polyfill-provider" "^0.6.2" bail@^1.0.0: version "1.0.5" @@ -2845,11 +2857,6 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== -base16@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/base16/-/base16-1.0.0.tgz#e297f60d7ec1014a7a971a39ebc8a98c0b681e70" - integrity sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ== - base64-js@^1.3.1: version "1.5.1" resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" @@ -2871,35 +2878,33 @@ big.js@^5.2.2: integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== binary-extensions@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" - integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== + version "2.3.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" + integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== -body-parser@1.20.1: - version "1.20.1" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.1.tgz#b1812a8912c195cd371a3ee5e66faa2338a5c668" - integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw== +body-parser@1.20.3: + version "1.20.3" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.3.tgz#1953431221c6fb5cd63c4b36d53fab0928e548c6" + integrity sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g== dependencies: bytes "3.1.2" - content-type "~1.0.4" + content-type "~1.0.5" debug "2.6.9" depd "2.0.0" destroy "1.2.0" http-errors "2.0.0" iconv-lite "0.4.24" on-finished "2.4.1" - qs "6.11.0" - raw-body "2.5.1" + qs "6.13.0" + raw-body "2.5.2" type-is "~1.6.18" unpipe "1.0.0" bonjour-service@^1.0.11: - version "1.0.14" - resolved "https://registry.yarnpkg.com/bonjour-service/-/bonjour-service-1.0.14.tgz#c346f5bc84e87802d08f8d5a60b93f758e514ee7" - integrity sha512-HIMbgLnk1Vqvs6B4Wq5ep7mxvj9sGz5d1JJyDNSGNIdA/w2MCz6GTjWTdjqOJV1bEPj+6IkxDvWNFKEBxNt4kQ== + version "1.2.1" + resolved "https://registry.yarnpkg.com/bonjour-service/-/bonjour-service-1.2.1.tgz#eb41b3085183df3321da1264719fbada12478d02" + integrity sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw== dependencies: - array-flatten "^2.1.2" - dns-equal "^1.0.0" fast-deep-equal "^3.1.3" multicast-dns "^7.2.5" @@ -2908,20 +2913,6 @@ boolbase@^1.0.0, boolbase@~1.0.0: resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" integrity sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww== -boxen@^5.0.0: - version "5.1.2" - resolved "https://registry.yarnpkg.com/boxen/-/boxen-5.1.2.tgz#788cb686fc83c1f486dfa8a40c68fc2b831d2b50" - integrity sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ== - dependencies: - ansi-align "^3.0.0" - camelcase "^6.2.0" - chalk "^4.1.0" - cli-boxes "^2.2.1" - string-width "^4.2.2" - type-fest "^0.20.2" - widest-line "^3.1.0" - wrap-ansi "^7.0.0" - boxen@^6.2.1: version "6.2.1" resolved "https://registry.yarnpkg.com/boxen/-/boxen-6.2.1.tgz#b098a2278b2cd2845deef2dff2efc38d329b434d" @@ -2936,6 +2927,20 @@ boxen@^6.2.1: widest-line "^4.0.1" wrap-ansi "^8.0.1" +boxen@^7.0.0: + version "7.1.1" + resolved "https://registry.yarnpkg.com/boxen/-/boxen-7.1.1.tgz#f9ba525413c2fec9cdb88987d835c4f7cad9c8f4" + integrity sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog== + dependencies: + ansi-align "^3.0.1" + camelcase "^7.0.1" + chalk "^5.2.0" + cli-boxes "^3.0.0" + string-width "^5.1.2" + type-fest "^2.13.0" + widest-line "^4.0.1" + wrap-ansi "^8.1.0" + brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" @@ -2944,22 +2949,22 @@ brace-expansion@^1.1.7: balanced-match "^1.0.0" concat-map "0.0.1" -braces@^3.0.2, braces@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== +braces@^3.0.3, braces@~3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" -browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.16.6, browserslist@^4.18.1, browserslist@^4.21.3, browserslist@^4.21.4: - version "4.21.4" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.4.tgz#e7496bbc67b9e39dd0f98565feccdcb0d4ff6987" - integrity sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw== +browserslist@^4.0.0, browserslist@^4.18.1, browserslist@^4.21.10, browserslist@^4.23.0, browserslist@^4.23.3, browserslist@^4.24.0: + version "4.24.2" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.2.tgz#f5845bc91069dbd55ee89faf9822e1d885d16580" + integrity sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg== dependencies: - caniuse-lite "^1.0.30001400" - electron-to-chromium "^1.4.251" - node-releases "^2.0.6" - update-browserslist-db "^1.0.9" + caniuse-lite "^1.0.30001669" + electron-to-chromium "^1.5.41" + node-releases "^2.0.18" + update-browserslist-db "^1.1.1" buble@0.19.6: version "0.19.6" @@ -2996,26 +3001,34 @@ bytes@3.1.2: resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== -cacheable-request@^6.0.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912" - integrity sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg== - dependencies: - clone-response "^1.0.2" - get-stream "^5.1.0" - http-cache-semantics "^4.0.0" - keyv "^3.0.0" - lowercase-keys "^2.0.0" - normalize-url "^4.1.0" - responselike "^1.0.2" - -call-bind@^1.0.0, call-bind@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c" - integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA== +cacheable-lookup@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz#3476a8215d046e5a3202a9209dd13fec1f933a27" + integrity sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w== + +cacheable-request@^10.2.8: + version "10.2.14" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-10.2.14.tgz#eb915b665fda41b79652782df3f553449c406b9d" + integrity sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ== + dependencies: + "@types/http-cache-semantics" "^4.0.2" + get-stream "^6.0.1" + http-cache-semantics "^4.1.1" + keyv "^4.5.3" + mimic-response "^4.0.0" + normalize-url "^8.0.0" + responselike "^3.0.0" + +call-bind@^1.0.0, call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" + integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== dependencies: - function-bind "^1.1.1" - get-intrinsic "^1.0.2" + es-define-property "^1.0.0" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + set-function-length "^1.2.1" callsites@^3.0.0: version "3.1.0" @@ -3030,16 +3043,16 @@ camel-case@^4.1.2: pascal-case "^3.1.2" tslib "^2.0.3" -camelcase-css@2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/camelcase-css/-/camelcase-css-2.0.1.tgz#ee978f6947914cc30c6b44741b6ed1df7f043fd5" - integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA== - camelcase@^6.2.0: version "6.3.0" resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== +camelcase@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-7.0.1.tgz#f02e50af9fd7782bc8b88a3558c32fd3a388f048" + integrity sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw== + caniuse-api@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-3.0.0.tgz#5e4d90e2274961d46291997df599e3ed008ee4c0" @@ -3050,22 +3063,17 @@ caniuse-api@^3.0.0: lodash.memoize "^4.1.2" lodash.uniq "^4.5.0" -caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001400, caniuse-lite@^1.0.30001426: - version "1.0.30001441" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001441.tgz#987437b266260b640a23cd18fbddb509d7f69f3e" - integrity sha512-OyxRR4Vof59I3yGWXws6i908EtGbMzVUi3ganaZQHmydk1iwDhRnvaPG2WaR0KcqrDFKrxVZHULT396LEPhXfg== - -ccount@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.1.0.tgz#246687debb6014735131be8abab2d93898f8d043" - integrity sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg== +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001646, caniuse-lite@^1.0.30001669: + version "1.0.30001669" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001669.tgz#fda8f1d29a8bfdc42de0c170d7f34a9cf19ed7a3" + integrity sha512-DlWzFDJqstqtIVx1zeSpIMLjunf5SmwOw0N2Ck/QSQdS8PLS4+9HrLaYei4w8BIAL7IB/UEDu889d8vhCTPA0w== ccount@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/ccount/-/ccount-2.0.1.tgz#17a3bf82302e0870d6da43a01311a8bc02a3ecf5" integrity sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg== -chalk@^2.0.0, chalk@^2.4.1: +chalk@^2.4.1, chalk@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== @@ -3082,36 +3090,31 @@ chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@^5.0.1, chalk@^5.2.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" + integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== + +char-regex@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" + integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw== + character-entities-html4@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/character-entities-html4/-/character-entities-html4-2.1.0.tgz#1f1adb940c971a4b22ba39ddca6b618dc6e56b2b" integrity sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA== -character-entities-legacy@^1.0.0: - version "1.1.4" - resolved "https://registry.yarnpkg.com/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz#94bc1845dce70a5bb9d2ecc748725661293d8fc1" - integrity sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA== - character-entities-legacy@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz#76bc83a90738901d7bc223a9e93759fdd560125b" integrity sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ== -character-entities@^1.0.0: - version "1.2.4" - resolved "https://registry.yarnpkg.com/character-entities/-/character-entities-1.2.4.tgz#e12c3939b7eaf4e5b15e7ad4c5e28e1d48c5b16b" - integrity sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw== - character-entities@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/character-entities/-/character-entities-2.0.2.tgz#2d09c2e72cd9523076ccb21157dff66ad43fcc22" integrity sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ== -character-reference-invalid@^1.0.0: - version "1.1.4" - resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz#083329cda0eae272ab3dbbf37e9a382c13af1560" - integrity sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg== - character-reference-invalid@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz#85c66b041e43b47210faf401278abf808ac45cb9" @@ -3129,7 +3132,7 @@ cheerio-select@^2.1.0: domhandler "^5.0.3" domutils "^3.0.1" -cheerio@^1.0.0-rc.12: +cheerio@1.0.0-rc.12: version "1.0.0-rc.12" resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.12.tgz#788bf7466506b1c6bf5fae51d24a2c4d62e47683" integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q== @@ -3143,9 +3146,9 @@ cheerio@^1.0.0-rc.12: parse5-htmlparser2-tree-adapter "^7.0.0" chokidar@^3.4.2, chokidar@^3.5.3: - version "3.5.3" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" - integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== + version "3.6.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b" + integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw== dependencies: anymatch "~3.1.2" braces "~3.0.2" @@ -3158,29 +3161,19 @@ chokidar@^3.4.2, chokidar@^3.5.3: fsevents "~2.3.2" chrome-trace-event@^1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz#1015eced4741e15d06664a957dbbf50d041e26ac" - integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg== - -ci-info@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" - integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== + version "1.0.4" + resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz#05bffd7ff928465093314708c93bdfa9bd1f0f5b" + integrity sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ== ci-info@^3.2.0: - version "3.7.1" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.7.1.tgz#708a6cdae38915d597afdf3b145f2f8e1ff55f3f" - integrity sha512-4jYS4MOAaCIStSRwiuxc4B8MYhIe676yO1sYGzARnjXkWpmzZMMYxY6zu8WYWDhSuth5zhrQ1rhNSibyyvv4/w== - -classnames@^2.2.6: - version "2.3.2" - resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.3.2.tgz#351d813bf0137fcc6a76a16b88208d2560a0d924" - integrity sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw== + version "3.9.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" + integrity sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ== -clean-css@^5.2.2, clean-css@^5.3.0: - version "5.3.1" - resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-5.3.1.tgz#d0610b0b90d125196a2894d35366f734e5d7aa32" - integrity sha512-lCr8OHhiWCTw4v8POJovCoh4T7I9U11yVsPjMWWnnMmp9ZowCxyad1Pathle/9HjaDp+fdQKjO9fQydE6RHTZg== +clean-css@^5.2.2, clean-css@^5.3.2, clean-css@~5.3.2: + version "5.3.3" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-5.3.3.tgz#b330653cd3bd6b75009cc25c714cae7b93351ccd" + integrity sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg== dependencies: source-map "~0.6.0" @@ -3189,20 +3182,15 @@ clean-stack@^2.0.0: resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== -cli-boxes@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-2.2.1.tgz#ddd5035d25094fce220e9cab40a45840a440318f" - integrity sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw== - cli-boxes@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-3.0.0.tgz#71a10c716feeba005e4504f36329ef0b17cf3145" integrity sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g== -cli-table3@^0.6.2: - version "0.6.3" - resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.3.tgz#61ab765aac156b52f222954ffc607a6f01dbeeb2" - integrity sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg== +cli-table3@^0.6.3: + version "0.6.5" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.5.tgz#013b91351762739c16a9567c21a04632e449bf2f" + integrity sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ== dependencies: string-width "^4.2.0" optionalDependencies: @@ -3217,18 +3205,16 @@ clone-deep@^4.0.1: kind-of "^6.0.2" shallow-clone "^3.0.0" -clone-response@^1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.3.tgz#af2032aa47816399cf5f0a1d0db902f517abb8c3" - integrity sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA== - dependencies: - mimic-response "^1.0.0" - clsx@^1.1.1, clsx@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg== +clsx@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-2.1.1.tgz#eed397c9fd8bd882bfb18deab7102049a2f32999" + integrity sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA== + coa@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/coa/-/coa-2.0.2.tgz#43f6c21151b4ef2bf57187db0d73de229e3e7ec3" @@ -3238,10 +3224,10 @@ coa@^2.0.2: chalk "^2.4.1" q "^1.1.2" -collapse-white-space@^1.0.2: - version "1.0.6" - resolved "https://registry.yarnpkg.com/collapse-white-space/-/collapse-white-space-1.0.6.tgz#e63629c0016665792060dbbeb79c42239d2c5287" - integrity sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ== +collapse-white-space@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/collapse-white-space/-/collapse-white-space-2.1.0.tgz#640257174f9f42c740b40f3b55ee752924feefca" + integrity sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw== color-convert@^1.9.0: version "1.9.3" @@ -3272,20 +3258,20 @@ color-support@^1.1.2: resolved "https://registry.yarnpkg.com/color-support/-/color-support-1.1.3.tgz#93834379a1cc9a0c61f82f52f0d04322251bd5a2" integrity sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg== -colord@^2.9.1: +colord@^2.9.3: version "2.9.3" resolved "https://registry.yarnpkg.com/colord/-/colord-2.9.3.tgz#4f8ce919de456f1d5c1c368c307fe20f3e59fb43" integrity sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw== colorette@^2.0.10: - version "2.0.19" - resolved "https://registry.yarnpkg.com/colorette/-/colorette-2.0.19.tgz#cdf044f47ad41a0f4b56b3a0d5b4e6e1a2d5a798" - integrity sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ== + version "2.0.20" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-2.0.20.tgz#9eb793e6833067f7235902fcd3b09917a000a95a" + integrity sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w== combine-promises@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/combine-promises/-/combine-promises-1.1.0.tgz#72db90743c0ca7aab7d0d8d2052fd7b0f674de71" - integrity sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg== + version "1.2.0" + resolved "https://registry.yarnpkg.com/combine-promises/-/combine-promises-1.2.0.tgz#5f2e68451862acf85761ded4d9e2af7769c2ca6a" + integrity sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ== comma-separated-tokens@^1.0.0: version "1.0.8" @@ -3302,6 +3288,11 @@ commander@7, commander@^7.2.0: resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== +commander@^10.0.0: + version "10.0.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06" + integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug== + commander@^2.20.0: version "2.20.3" resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" @@ -3317,10 +3308,10 @@ commander@^8.3.0: resolved "https://registry.yarnpkg.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66" integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww== -commondir@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" - integrity sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg== +common-path-prefix@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/common-path-prefix/-/common-path-prefix-3.0.0.tgz#7d007a7e07c58c4b4d5f433131a19141b29f11e0" + integrity sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w== component-props@1.1.1: version "1.1.1" @@ -3357,17 +3348,24 @@ concat-map@0.0.1: resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== -configstore@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/configstore/-/configstore-5.0.1.tgz#d365021b5df4b98cdd187d6a3b0e3f6a7cc5ed96" - integrity sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA== +config-chain@^1.1.11: + version "1.1.13" + resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.13.tgz#fad0795aa6a6cdaff9ed1b68e9dff94372c232f4" + integrity sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ== dependencies: - dot-prop "^5.2.0" - graceful-fs "^4.1.2" - make-dir "^3.0.0" - unique-string "^2.0.0" - write-file-atomic "^3.0.0" - xdg-basedir "^4.0.0" + ini "^1.3.4" + proto-list "~1.2.1" + +configstore@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/configstore/-/configstore-6.0.0.tgz#49eca2ebc80983f77e09394a1a56e0aca8235566" + integrity sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA== + dependencies: + dot-prop "^6.0.1" + graceful-fs "^4.2.6" + unique-string "^3.0.0" + write-file-atomic "^3.0.3" + xdg-basedir "^5.0.1" connect-history-api-fallback@^2.0.0: version "2.0.0" @@ -3396,30 +3394,30 @@ content-disposition@0.5.4: dependencies: safe-buffer "5.2.1" -content-type@~1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" - integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== +content-type@~1.0.4, content-type@~1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.5.tgz#8b773162656d1d1086784c8f23a54ce6d73d7918" + integrity sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA== -convert-source-map@^1.7.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.9.0.tgz#7faae62353fb4213366d0ca98358d22e8368b05f" - integrity sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A== +convert-source-map@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" + integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg== cookie-signature@1.0.6: version "1.0.6" resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ== -cookie@0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b" - integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== +cookie@0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.7.1.tgz#2f73c42142d5d5cf71310a74fc4ae61670e5dbc9" + integrity sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w== -copy-text-to-clipboard@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/copy-text-to-clipboard/-/copy-text-to-clipboard-3.0.1.tgz#8cbf8f90e0a47f12e4a24743736265d157bce69c" - integrity sha512-rvVsHrpFcL4F2P8ihsoLdFHmd404+CMg71S756oRSeQgqk51U3kicGdnvfkrxva0xXH92SjGS62B0XIJsbh+9Q== +copy-text-to-clipboard@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz#0202b2d9bdae30a49a53f898626dcc3b49ad960b" + integrity sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q== copy-webpack-plugin@^11.0.0: version "11.0.0" @@ -3433,28 +3431,35 @@ copy-webpack-plugin@^11.0.0: schema-utils "^4.0.0" serialize-javascript "^6.0.0" -core-js-compat@^3.25.1: - version "3.27.1" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.27.1.tgz#b5695eb25c602d72b1d30cbfba3cb7e5e4cf0a67" - integrity sha512-Dg91JFeCDA17FKnneN7oCMz4BkQ4TcffkgHP4OWwp9yx3pi7ubqMDXXSacfNak1PQqjc95skyt+YBLHQJnkJwA== +core-js-compat@^3.38.0, core-js-compat@^3.38.1: + version "3.38.1" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.38.1.tgz#2bc7a298746ca5a7bcb9c164bcb120f2ebc09a09" + integrity sha512-JRH6gfXxGmrzF3tZ57lFx97YARxCXPaMzPo6jELZhv88pBH5VXpQ+y0znKGlFnzuaihqhLbefxSJxWJMPtfDzw== dependencies: - browserslist "^4.21.4" + browserslist "^4.23.3" -core-js-pure@^3.25.1: - version "3.27.1" - resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.27.1.tgz#ede4a6b8440585c7190062757069c01d37a19dca" - integrity sha512-BS2NHgwwUppfeoqOXqi08mUqS5FiZpuRuJJpKsaME7kJz0xxuk0xkhDdfMIlP/zLa80krBqss1LtD7f889heAw== +core-js-pure@^3.30.2: + version "3.38.1" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.38.1.tgz#e8534062a54b7221344884ba9b52474be495ada3" + integrity sha512-BY8Etc1FZqdw1glX0XNOq2FDwfrg/VGqoZOZCdaL+UmdaqDwQwYXkMJT4t6In+zfEfOJDcM9T0KdbBeJg8KKCQ== -core-js@^3.14.0, core-js@^3.23.3: - version "3.27.1" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.27.1.tgz#23cc909b315a6bb4e418bf40a52758af2103ba46" - integrity sha512-GutwJLBChfGCpwwhbYoqfv03LAfmiz7e7D/BNxzeMxwQf10GRSzqiOjx7AmtEk+heiD/JWmBuyBPgFtx0Sg1ww== +core-js@^3.14.0, core-js@^3.31.1: + version "3.38.1" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.38.1.tgz#aa375b79a286a670388a1a363363d53677c0383e" + integrity sha512-OP35aUorbU3Zvlx7pjsFdu1rGNnD4pgw/CWoYzRY3t2EzoVT7shKHY1dlAy3f41cGIO7ZDPQimhGFTlEYkG/Hw== core-util-is@~1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== +cose-base@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/cose-base/-/cose-base-1.0.3.tgz#650334b41b869578a543358b80cda7e0abe0a60a" + integrity sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg== + dependencies: + layout-base "^1.0.0" + cosmiconfig@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" @@ -3466,7 +3471,7 @@ cosmiconfig@^6.0.0: path-type "^4.0.0" yaml "^1.7.2" -cosmiconfig@^7.0.0, cosmiconfig@^7.0.1: +cosmiconfig@^7.0.0: version "7.1.0" resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.1.0.tgz#1443b9afa596b670082ea46cbd8f6a62b84635f6" integrity sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA== @@ -3477,12 +3482,15 @@ cosmiconfig@^7.0.0, cosmiconfig@^7.0.1: path-type "^4.0.0" yaml "^1.10.0" -cross-fetch@^3.1.5: - version "3.1.5" - resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.1.5.tgz#e1389f44d9e7ba767907f7af8454787952ab534f" - integrity sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw== +cosmiconfig@^8.1.3, cosmiconfig@^8.3.5: + version "8.3.6" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-8.3.6.tgz#060a2b871d66dba6c8538ea1118ba1ac16f5fae3" + integrity sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA== dependencies: - node-fetch "2.6.7" + import-fresh "^3.3.0" + js-yaml "^4.1.0" + parse-json "^5.2.0" + path-type "^4.0.0" cross-spawn@^7.0.3: version "7.0.3" @@ -3493,41 +3501,43 @@ cross-spawn@^7.0.3: shebang-command "^2.0.0" which "^2.0.1" -crypto-random-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-2.0.0.tgz#ef2a7a966ec11083388369baa02ebead229b30d5" - integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA== +crypto-random-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-4.0.0.tgz#5a3cc53d7dd86183df5da0312816ceeeb5bb1fc2" + integrity sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA== + dependencies: + type-fest "^1.0.1" -css-declaration-sorter@^6.3.1: - version "6.3.1" - resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-6.3.1.tgz#be5e1d71b7a992433fb1c542c7a1b835e45682ec" - integrity sha512-fBffmak0bPAnyqc/HO8C3n2sHrp9wcqQz6ES9koRF2/mLOVAx9zIQ3Y7R29sYCteTPqMCwns4WYQoCX91Xl3+w== +css-declaration-sorter@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz#6dec1c9523bc4a643e088aab8f09e67a54961024" + integrity sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow== -css-loader@^6.7.1: - version "6.7.3" - resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-6.7.3.tgz#1e8799f3ccc5874fdd55461af51137fcc5befbcd" - integrity sha512-qhOH1KlBMnZP8FzRO6YCH9UHXQhVMcEGLyNdb7Hv2cpcmJbW0YrddO+tG1ab5nT41KpHIYGsbeHqxB9xPu1pKQ== +css-loader@^6.8.1: + version "6.11.0" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-6.11.0.tgz#33bae3bf6363d0a7c2cf9031c96c744ff54d85ba" + integrity sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g== dependencies: icss-utils "^5.1.0" - postcss "^8.4.19" - postcss-modules-extract-imports "^3.0.0" - postcss-modules-local-by-default "^4.0.0" - postcss-modules-scope "^3.0.0" + postcss "^8.4.33" + postcss-modules-extract-imports "^3.1.0" + postcss-modules-local-by-default "^4.0.5" + postcss-modules-scope "^3.2.0" postcss-modules-values "^4.0.0" postcss-value-parser "^4.2.0" - semver "^7.3.8" + semver "^7.5.4" -css-minimizer-webpack-plugin@^4.0.0: - version "4.2.2" - resolved "https://registry.yarnpkg.com/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz#79f6199eb5adf1ff7ba57f105e3752d15211eb35" - integrity sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA== +css-minimizer-webpack-plugin@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz#33effe662edb1a0bf08ad633c32fa75d0f7ec565" + integrity sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg== dependencies: - cssnano "^5.1.8" - jest-worker "^29.1.2" - postcss "^8.4.17" - schema-utils "^4.0.0" - serialize-javascript "^6.0.0" - source-map "^0.6.1" + "@jridgewell/trace-mapping" "^0.3.18" + cssnano "^6.0.1" + jest-worker "^29.4.3" + postcss "^8.4.24" + schema-utils "^4.0.1" + serialize-javascript "^6.0.1" css-select-base-adapter@^0.1.1: version "0.1.1" @@ -3579,7 +3589,7 @@ css-tree@1.0.0-alpha.37: mdn-data "2.0.4" source-map "^0.6.1" -css-tree@^1.1.2, css-tree@^1.1.3: +css-tree@^1.1.2: version "1.1.3" resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.1.3.tgz#eb4870fb6fd7707327ec95c2ff2ab09b5e8db91d" integrity sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q== @@ -3587,6 +3597,22 @@ css-tree@^1.1.2, css-tree@^1.1.3: mdn-data "2.0.14" source-map "^0.6.1" +css-tree@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-2.3.1.tgz#10264ce1e5442e8572fc82fbe490644ff54b5c20" + integrity sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw== + dependencies: + mdn-data "2.0.30" + source-map-js "^1.0.1" + +css-tree@~2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-2.2.1.tgz#36115d382d60afd271e377f9c5f67d02bd48c032" + integrity sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA== + dependencies: + mdn-data "2.0.28" + source-map-js "^1.0.1" + css-what@^3.2.1: version "3.4.2" resolved "https://registry.yarnpkg.com/css-what/-/css-what-3.4.2.tgz#ea7026fcb01777edbde52124e21f327e7ae950e4" @@ -3602,83 +3628,110 @@ cssesc@^3.0.0: resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg== -cssnano-preset-advanced@^5.3.8: - version "5.3.9" - resolved "https://registry.yarnpkg.com/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.9.tgz#99e1cdf81a467a5e6c366cfc6d874a166c4d9a67" - integrity sha512-njnh4pp1xCsibJcEHnWZb4EEzni0ePMqPuPNyuWT4Z+YeXmsgqNuTPIljXFEXhxGsWs9183JkXgHxc1TcsahIg== - dependencies: - autoprefixer "^10.4.12" - cssnano-preset-default "^5.2.13" - postcss-discard-unused "^5.1.0" - postcss-merge-idents "^5.1.1" - postcss-reduce-idents "^5.2.0" - postcss-zindex "^5.1.0" - -cssnano-preset-default@^5.2.13: - version "5.2.13" - resolved "https://registry.yarnpkg.com/cssnano-preset-default/-/cssnano-preset-default-5.2.13.tgz#e7353b0c57975d1bdd97ac96e68e5c1b8c68e990" - integrity sha512-PX7sQ4Pb+UtOWuz8A1d+Rbi+WimBIxJTRyBdgGp1J75VU0r/HFQeLnMYgHiCAp6AR4rqrc7Y4R+1Rjk3KJz6DQ== - dependencies: - css-declaration-sorter "^6.3.1" - cssnano-utils "^3.1.0" - postcss-calc "^8.2.3" - postcss-colormin "^5.3.0" - postcss-convert-values "^5.1.3" - postcss-discard-comments "^5.1.2" - postcss-discard-duplicates "^5.1.0" - postcss-discard-empty "^5.1.1" - postcss-discard-overridden "^5.1.0" - postcss-merge-longhand "^5.1.7" - postcss-merge-rules "^5.1.3" - postcss-minify-font-values "^5.1.0" - postcss-minify-gradients "^5.1.1" - postcss-minify-params "^5.1.4" - postcss-minify-selectors "^5.2.1" - postcss-normalize-charset "^5.1.0" - postcss-normalize-display-values "^5.1.0" - postcss-normalize-positions "^5.1.1" - postcss-normalize-repeat-style "^5.1.1" - postcss-normalize-string "^5.1.0" - postcss-normalize-timing-functions "^5.1.0" - postcss-normalize-unicode "^5.1.1" - postcss-normalize-url "^5.1.0" - postcss-normalize-whitespace "^5.1.1" - postcss-ordered-values "^5.1.3" - postcss-reduce-initial "^5.1.1" - postcss-reduce-transforms "^5.1.0" - postcss-svgo "^5.1.0" - postcss-unique-selectors "^5.1.1" - -cssnano-utils@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/cssnano-utils/-/cssnano-utils-3.1.0.tgz#95684d08c91511edfc70d2636338ca37ef3a6861" - integrity sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA== +cssnano-preset-advanced@^6.1.2: + version "6.1.2" + resolved "https://registry.yarnpkg.com/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz#82b090872b8f98c471f681d541c735acf8b94d3f" + integrity sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ== + dependencies: + autoprefixer "^10.4.19" + browserslist "^4.23.0" + cssnano-preset-default "^6.1.2" + postcss-discard-unused "^6.0.5" + postcss-merge-idents "^6.0.3" + postcss-reduce-idents "^6.0.3" + postcss-zindex "^6.0.2" + +cssnano-preset-default@^6.1.2: + version "6.1.2" + resolved "https://registry.yarnpkg.com/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz#adf4b89b975aa775f2750c89dbaf199bbd9da35e" + integrity sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg== + dependencies: + browserslist "^4.23.0" + css-declaration-sorter "^7.2.0" + cssnano-utils "^4.0.2" + postcss-calc "^9.0.1" + postcss-colormin "^6.1.0" + postcss-convert-values "^6.1.0" + postcss-discard-comments "^6.0.2" + postcss-discard-duplicates "^6.0.3" + postcss-discard-empty "^6.0.3" + postcss-discard-overridden "^6.0.2" + postcss-merge-longhand "^6.0.5" + postcss-merge-rules "^6.1.1" + postcss-minify-font-values "^6.1.0" + postcss-minify-gradients "^6.0.3" + postcss-minify-params "^6.1.0" + postcss-minify-selectors "^6.0.4" + postcss-normalize-charset "^6.0.2" + postcss-normalize-display-values "^6.0.2" + postcss-normalize-positions "^6.0.2" + postcss-normalize-repeat-style "^6.0.2" + postcss-normalize-string "^6.0.2" + postcss-normalize-timing-functions "^6.0.2" + postcss-normalize-unicode "^6.1.0" + postcss-normalize-url "^6.0.2" + postcss-normalize-whitespace "^6.0.2" + postcss-ordered-values "^6.0.2" + postcss-reduce-initial "^6.1.0" + postcss-reduce-transforms "^6.0.2" + postcss-svgo "^6.0.3" + postcss-unique-selectors "^6.0.4" + +cssnano-utils@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/cssnano-utils/-/cssnano-utils-4.0.2.tgz#56f61c126cd0f11f2eef1596239d730d9fceff3c" + integrity sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ== -cssnano@^5.1.12, cssnano@^5.1.8: - version "5.1.14" - resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-5.1.14.tgz#07b0af6da73641276fe5a6d45757702ebae2eb05" - integrity sha512-Oou7ihiTocbKqi0J1bB+TRJIQX5RMR3JghA8hcWSw9mjBLQ5Y3RWqEDoYG3sRNlAbCIXpqMoZGbq5KDR3vdzgw== +cssnano@^6.0.1, cssnano@^6.1.2: + version "6.1.2" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-6.1.2.tgz#4bd19e505bd37ee7cf0dc902d3d869f6d79c66b8" + integrity sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA== dependencies: - cssnano-preset-default "^5.2.13" - lilconfig "^2.0.3" - yaml "^1.10.2" + cssnano-preset-default "^6.1.2" + lilconfig "^3.1.1" -csso@^4.0.2, csso@^4.2.0: +csso@^4.0.2: version "4.2.0" resolved "https://registry.yarnpkg.com/csso/-/csso-4.2.0.tgz#ea3a561346e8dc9f546d6febedd50187cf389529" integrity sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA== dependencies: css-tree "^1.1.2" +csso@^5.0.5: + version "5.0.5" + resolved "https://registry.yarnpkg.com/csso/-/csso-5.0.5.tgz#f9b7fe6cc6ac0b7d90781bb16d5e9874303e2ca6" + integrity sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ== + dependencies: + css-tree "~2.2.0" + csstype@^3.0.2: - version "3.1.1" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.1.tgz#841b532c45c758ee546a11d5bd7b7b473c8c30b9" - integrity sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw== + version "3.1.3" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" + integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== + +cytoscape-cose-bilkent@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz#762fa121df9930ffeb51a495d87917c570ac209b" + integrity sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ== + dependencies: + cose-base "^1.0.0" + +cytoscape@^3.28.1: + version "3.30.2" + resolved "https://registry.yarnpkg.com/cytoscape/-/cytoscape-3.30.2.tgz#94149707fb6547a55e3b44f03ffe232706212161" + integrity sha512-oICxQsjW8uSaRmn4UK/jkczKOqTrVqt5/1WL0POiJUT2EKNc9STM4hYFHv917yu55aTBMFNRzymlJhVAiWPCxw== + +"d3-array@1 - 2": + version "2.12.1" + resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-2.12.1.tgz#e20b41aafcdffdf5d50928004ececf815a465e81" + integrity sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ== + dependencies: + internmap "^1.0.0" "d3-array@2 - 3", "d3-array@2.10.0 - 3", "d3-array@2.5.0 - 3", d3-array@3, d3-array@^3.2.0: - version "3.2.1" - resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.2.1.tgz#39331ea706f5709417d31bbb6ec152e0328b39b3" - integrity sha512-gUY/qeHq/yNqqoCKNq4vtpFLdoCdvyNpWoC/KNjhGbhDuQpAM9sIQQKkXSNpXa9h5KySs/gzm7R88WkUutgwWQ== + version "3.2.4" + resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.2.4.tgz#15fec33b237f97ac5d7c986dc77da273a8ed0bb5" + integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg== dependencies: internmap "1 - 2" @@ -3711,16 +3764,16 @@ d3-chord@3: integrity sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA== d3-contour@4: - version "4.0.0" - resolved "https://registry.yarnpkg.com/d3-contour/-/d3-contour-4.0.0.tgz#5a1337c6da0d528479acdb5db54bc81a0ff2ec6b" - integrity sha512-7aQo0QHUTu/Ko3cP9YK9yUTxtoDEiDGwnBHyLxG5M4vqlBkO/uixMRele3nfsfj6UXOcuReVpVXzAboGraYIJw== + version "4.0.2" + resolved "https://registry.yarnpkg.com/d3-contour/-/d3-contour-4.0.2.tgz#bb92063bc8c5663acb2422f99c73cbb6c6ae3bcc" + integrity sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA== dependencies: d3-array "^3.2.0" d3-delaunay@6: - version "6.0.2" - resolved "https://registry.yarnpkg.com/d3-delaunay/-/d3-delaunay-6.0.2.tgz#7fd3717ad0eade2fc9939f4260acfb503f984e92" - integrity sha512-IMLNldruDQScrcfT+MWnazhHbDJhcRJyOEBAJfwQnHle1RPh6WDuLvxNArUju2VSMSUuKlY5BGHRJ2cYyoFLQQ== + version "6.0.4" + resolved "https://registry.yarnpkg.com/d3-delaunay/-/d3-delaunay-6.0.4.tgz#98169038733a0a5babbeda55054f795bb9e4a58b" + integrity sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A== dependencies: delaunator "5" @@ -3773,9 +3826,9 @@ d3-force@3: integrity sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA== d3-geo@3: - version "3.1.0" - resolved "https://registry.yarnpkg.com/d3-geo/-/d3-geo-3.1.0.tgz#74fd54e1f4cebd5185ac2039217a98d39b0a4c0e" - integrity sha512-JEo5HxXDdDYXCaWdwLRt79y7giK8SbhZJbFWXqbRTolCHFI5jRqteLzCsq51NKbUoX0PjBVSohxrx+NoOUujYA== + version "3.1.1" + resolved "https://registry.yarnpkg.com/d3-geo/-/d3-geo-3.1.1.tgz#6027cf51246f9b2ebd64f99e01dc7c3364033a4d" + integrity sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q== dependencies: d3-array "2.5.0 - 3" @@ -3791,6 +3844,11 @@ d3-hierarchy@3: dependencies: d3-color "1 - 3" +d3-path@1: + version "1.0.9" + resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-1.0.9.tgz#48c050bb1fe8c262493a8caf5524e3e9591701cf" + integrity sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg== + "d3-path@1 - 3", d3-path@3, d3-path@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-3.1.0.tgz#22df939032fb5a71ae8b1800d61ddb7851c42526" @@ -3811,10 +3869,18 @@ d3-random@3: resolved "https://registry.yarnpkg.com/d3-random/-/d3-random-3.0.1.tgz#d4926378d333d9c0bfd1e6fa0194d30aebaa20f4" integrity sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ== +d3-sankey@^0.12.3: + version "0.12.3" + resolved "https://registry.yarnpkg.com/d3-sankey/-/d3-sankey-0.12.3.tgz#b3c268627bd72e5d80336e8de6acbfec9d15d01d" + integrity sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ== + dependencies: + d3-array "1 - 2" + d3-shape "^1.2.0" + d3-scale-chromatic@3: - version "3.0.0" - resolved "https://registry.yarnpkg.com/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz#15b4ceb8ca2bb0dcb6d1a641ee03d59c3b62376a" - integrity sha512-Lx9thtxAKrO2Pq6OO2Ua474opeziKr279P/TKZsMAhYyNDD3EnCffdbgeSYN5O7m2ByQsxtuP2CSDczNUIZ22g== + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz#34c39da298b23c20e02f1a4b239bd0f22e7f1314" + integrity sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ== dependencies: d3-color "1 - 3" d3-interpolate "1 - 3" @@ -3842,6 +3908,13 @@ d3-shape@3: dependencies: d3-path "^3.1.0" +d3-shape@^1.2.0: + version "1.3.7" + resolved "https://registry.yarnpkg.com/d3-shape/-/d3-shape-1.3.7.tgz#df63801be07bc986bc54f63789b4fe502992b5d7" + integrity sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw== + dependencies: + d3-path "1" + "d3-time-format@2 - 4", d3-time-format@4: version "4.1.0" resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-4.1.0.tgz#7ab5257a5041d11ecb4fe70a5c7d16a195bb408a" @@ -3883,10 +3956,10 @@ d3-zoom@3: d3-selection "2 - 3" d3-transition "2 - 3" -d3@^7.0.0, d3@^7.7.0: - version "7.8.0" - resolved "https://registry.yarnpkg.com/d3/-/d3-7.8.0.tgz#c9441f0ea9266b1003a97c2ffd53e79e9e14b1fc" - integrity sha512-a5rNemRadWkEfqnY5NsD4RdCP9vn8EIJ4I5Rl14U0uKH1SXqcNmk/h9aGaAF1O98lz6L9M0IeUcuPa9GUYbI5A== +d3@^7.4.0, d3@^7.8.2: + version "7.9.0" + resolved "https://registry.yarnpkg.com/d3/-/d3-7.9.0.tgz#579e7acb3d749caf8860bd1741ae8d371070cd5d" + integrity sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA== dependencies: d3-array "3" d3-axis "3" @@ -3919,13 +3992,50 @@ d3@^7.0.0, d3@^7.7.0: d3-transition "3" d3-zoom "3" -dagre-d3-es@7.0.6: - version "7.0.6" - resolved "https://registry.yarnpkg.com/dagre-d3-es/-/dagre-d3-es-7.0.6.tgz#8cab465ff95aca8a1ca2292d07e1fb31b5db83f2" - integrity sha512-CaaE/nZh205ix+Up4xsnlGmpog5GGm81Upi2+/SBHxwNwrccBb3K51LzjZ1U6hgvOlAEUsVWf1xSTzCyKpJ6+Q== +dagre-d3-es@7.0.10: + version "7.0.10" + resolved "https://registry.yarnpkg.com/dagre-d3-es/-/dagre-d3-es-7.0.10.tgz#19800d4be674379a3cd8c86a8216a2ac6827cadc" + integrity sha512-qTCQmEhcynucuaZgY5/+ti3X/rnszKZhEQH/ZdWdtP1tA/y3VoHJzcVrO9pjjJCNpigfscAtoUB5ONcd2wNn0A== + dependencies: + d3 "^7.8.2" + lodash-es "^4.17.21" + +data-view-buffer@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/data-view-buffer/-/data-view-buffer-1.0.1.tgz#8ea6326efec17a2e42620696e671d7d5a8bc66b2" + integrity sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA== dependencies: - d3 "^7.7.0" - lodash-es "^4.17.21" + call-bind "^1.0.6" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +data-view-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz#90721ca95ff280677eb793749fce1011347669e2" + integrity sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +data-view-byte-offset@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz#5e0bbfb4828ed2d1b9b400cd8a7d119bca0ff18a" + integrity sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA== + dependencies: + call-bind "^1.0.6" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +dayjs@^1.11.7: + version "1.11.13" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.13.tgz#92430b0139055c3ebb60150aa13e860a4b5a366c" + integrity sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg== + +debounce@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/debounce/-/debounce-1.2.1.tgz#38881d8f4166a5c5848020c11827b834bcb3e0a5" + integrity sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug== debug@2.6.9, debug@^2.6.0: version "2.6.9" @@ -3934,12 +4044,12 @@ debug@2.6.9, debug@^2.6.0: dependencies: ms "2.0.0" -debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1: - version "4.3.4" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" - integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== +debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: + version "4.3.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== dependencies: - ms "2.1.2" + ms "^2.1.3" decode-named-character-reference@^1.0.0: version "1.0.2" @@ -3948,22 +4058,22 @@ decode-named-character-reference@^1.0.0: dependencies: character-entities "^2.0.0" -decompress-response@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" - integrity sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA== +decompress-response@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-6.0.0.tgz#ca387612ddb7e104bd16d85aab00d5ecf09c66fc" + integrity sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ== dependencies: - mimic-response "^1.0.0" + mimic-response "^3.1.0" deep-extend@^0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== -deepmerge@^4.2.2: - version "4.2.2" - resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.2.2.tgz#44d2ea3679b8f4d4ffba33f03d865fc1e7bf4955" - integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg== +deepmerge@^4.2.2, deepmerge@^4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a" + integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== default-gateway@^6.0.3: version "6.0.3" @@ -3972,21 +4082,31 @@ default-gateway@^6.0.3: dependencies: execa "^5.0.0" -defer-to-connect@^1.0.1: - version "1.1.3" - resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-1.1.3.tgz#331ae050c08dcf789f8c83a7b81f0ed94f4ac591" - integrity sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ== +defer-to-connect@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-2.0.1.tgz#8016bdb4143e4632b77a3449c6236277de520587" + integrity sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg== + +define-data-property@^1.0.1, define-data-property@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + gopd "^1.0.1" define-lazy-prop@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz#3f7ae421129bcaaac9bc74905c98a0009ec9ee7f" integrity sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og== -define-properties@^1.1.3, define-properties@^1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.4.tgz#0b14d7bd7fbeb2f3572c3a7eda80ea5d57fb05b1" - integrity sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA== +define-properties@^1.1.3, define-properties@^1.2.0, define-properties@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" + integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg== dependencies: + define-data-property "^1.0.1" has-property-descriptors "^1.0.0" object-keys "^1.1.1" @@ -4005,11 +4125,11 @@ del@^6.1.1: slash "^3.0.0" delaunator@5: - version "5.0.0" - resolved "https://registry.yarnpkg.com/delaunator/-/delaunator-5.0.0.tgz#60f052b28bd91c9b4566850ebf7756efe821d81b" - integrity sha512-AyLvtyJdbv/U1GkiS6gUUzclRoAY4Gs75qkMygJJhU75LW4DNuSF2RMzpxs9jw9Oz1BobHjTdkG3zdP55VxAqw== + version "5.0.1" + resolved "https://registry.yarnpkg.com/delaunator/-/delaunator-5.0.1.tgz#39032b08053923e924d6094fe2cde1a99cc51278" + integrity sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw== dependencies: - robust-predicates "^3.0.0" + robust-predicates "^3.0.2" depd@2.0.0: version "2.0.0" @@ -4031,13 +4151,6 @@ destroy@1.2.0: resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== -detab@2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/detab/-/detab-2.0.4.tgz#b927892069aff405fbb9a186fe97a44a92a94b43" - integrity sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g== - dependencies: - repeat-string "^1.5.4" - detect-node@^2.0.4: version "2.1.0" resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.1.0.tgz#c9c70775a49c3d03bc2c06d9a73be550f978f8b1" @@ -4051,18 +4164,25 @@ detect-port-alt@^1.1.6: address "^1.0.1" debug "^2.6.0" -detect-port@^1.3.0: - version "1.5.1" - resolved "https://registry.yarnpkg.com/detect-port/-/detect-port-1.5.1.tgz#451ca9b6eaf20451acb0799b8ab40dff7718727b" - integrity sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ== +detect-port@^1.5.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/detect-port/-/detect-port-1.6.1.tgz#45e4073997c5f292b957cb678fb0bb8ed4250a67" + integrity sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q== dependencies: address "^1.0.1" debug "4" +devlop@^1.0.0, devlop@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/devlop/-/devlop-1.1.0.tgz#4db7c2ca4dc6e0e834c30be70c94bbc976dc7018" + integrity sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA== + dependencies: + dequal "^2.0.0" + diff@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/diff/-/diff-5.1.0.tgz#bc52d298c5ea8df9194800224445ed43ffc87e40" - integrity sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw== + version "5.2.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.0.tgz#26ded047cd1179b78b9537d5ef725503ce1ae531" + integrity sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A== dir-glob@^3.0.1: version "3.0.1" @@ -4076,25 +4196,20 @@ direction@^1.0.0: resolved "https://registry.yarnpkg.com/direction/-/direction-1.0.4.tgz#2b86fb686967e987088caf8b89059370d4837442" integrity sha512-GYqKi1aH7PJXxdhTeZBFrg8vUBeKXi+cNprXsC1kpJcbcVnV9wBsrOu1cQEdG0WeQwlfHiy3XvnKfIrJ2R0NzQ== -dns-equal@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d" - integrity sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg== - dns-packet@^5.2.2: - version "5.4.0" - resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-5.4.0.tgz#1f88477cf9f27e78a213fb6d118ae38e759a879b" - integrity sha512-EgqGeaBB8hLiHLZtp/IbaDQTL8pZ0+IvwzSHA6d7VyMDM+B9hgddEMa9xjK5oYnw0ci0JQ6g2XCD7/f6cafU6g== + version "5.6.1" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-5.6.1.tgz#ae888ad425a9d1478a0674256ab866de1012cf2f" + integrity sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw== dependencies: "@leichtgewicht/ip-codec" "^2.0.1" -docusaurus-lunr-search@^2.2.0: - version "2.3.2" - resolved "https://registry.yarnpkg.com/docusaurus-lunr-search/-/docusaurus-lunr-search-2.3.2.tgz#9991ef51addb0bf09ac80a06cb5729f28939332d" - integrity sha512-Ngvm2kXwliWThqAThXI1912rOKHlFL7BjIc+OVNUfzkjpk5ar4TFEh+EUaaMOLw4V0BBko3CW0Ym7prqqm3jLQ== +docusaurus-lunr-search@^2.3.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/docusaurus-lunr-search/-/docusaurus-lunr-search-2.4.2.tgz#6a9ccaa50eb7140ce6f71ff5cb395fd53c33a5e6" + integrity sha512-t6Uk45ED5gZ4ma5s5fEzHrf52QmoTpKSC7LnskaSBqyFL3uj5ciW14WOm3nE/dlhkzx+ZphLjOEoRXgkwaSy7Q== dependencies: autocomplete.js "^0.37.0" - classnames "^2.2.6" + clsx "^1.2.1" gauge "^3.0.0" hast-util-select "^4.0.0" hast-util-to-text "^2.0.0" @@ -4108,28 +4223,30 @@ docusaurus-lunr-search@^2.2.0: unified "^9.0.0" unist-util-is "^4.0.2" -docusaurus-plugin-internaldocs-fb@1.8.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/docusaurus-plugin-internaldocs-fb/-/docusaurus-plugin-internaldocs-fb-1.8.0.tgz#0fb8ed835506fb3c26f870b3328e15569924f8d8" - integrity sha512-6xoYfXM1sVW4Lv0x4zJHvKyJxOQTRgpi1P/uEWdmIgjkWPPcoU69dmgGdz258u5qHbPLJ7Wbe9IzAntoU9DBSg== +docusaurus-plugin-internaldocs-fb@^1.18.4: + version "1.18.5" + resolved "https://registry.yarnpkg.com/docusaurus-plugin-internaldocs-fb/-/docusaurus-plugin-internaldocs-fb-1.18.5.tgz#d7d8b95dfd47d568ed6f046ed6ed160c50d5c255" + integrity sha512-CX6kKWzKTybM0EEDww6/3th36/LAYmA7mjSYegedLV3LXMh6m180zrzcV/9EBQe4rIazr/SFfe8IfwaZ0qmaPQ== dependencies: "@mdx-js/mdx" "^2.1.1" "@mdx-js/react" "^1.6.22" "@types/lodash.debounce" "4.0.7" - "@types/mermaid" "^8.2.9" + "@types/lodash.escape" "4.0.0" "@types/react-modal" "3.13.1" assert "^2.0.0" buffer "^6.0.3" clsx "^1.2.1" + docusaurus-lunr-search "^2.3.2" fs-extra "^10.1.0" lodash.debounce "^4.0.8" - mermaid "^9.1.3" + lodash.escape "^4.0.0" + mermaid "^10.9.0" node-fetch "2.6.7" path-browserify "^1.0.1" react-live "^2.2.3" react-modal "3.15.1" remark-gfm "^3.0.1" - remark-mdx-filter-imports "^0.1.2" + remark-mdx-filter-imports "^0.1.3" unified "^9.2.1" unist-util-remove "^3.1.0" unist-util-visit "^2.0.1" @@ -4193,17 +4310,17 @@ domhandler@^4.0.0, domhandler@^4.2.0, domhandler@^4.3.1: dependencies: domelementtype "^2.2.0" -domhandler@^5.0.1, domhandler@^5.0.2, domhandler@^5.0.3: +domhandler@^5.0.2, domhandler@^5.0.3: version "5.0.3" resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-5.0.3.tgz#cc385f7f751f1d1fc650c21374804254538c7d31" integrity sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w== dependencies: domelementtype "^2.3.0" -dompurify@2.4.1: - version "2.4.1" - resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-2.4.1.tgz#f9cb1a275fde9af6f2d0a2644ef648dd6847b631" - integrity sha512-ewwFzHzrrneRjxzmK6oVz/rZn9VWspGFRDb4/rRtIsM1n36t9AKma/ye8syCpcw+XJ25kOK/hOG7t1j2I2yBqA== +"dompurify@^3.0.5 <3.1.7": + version "3.1.6" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.1.6.tgz#43c714a94c6a7b8801850f82e756685300a027e2" + integrity sha512-cTOAhc36AalkjtBpfG6O8JimdTMWNXjiePT2xQH/ppBGi/4uIpmj8eKyIkMJErXWARyINV/sB38yf8JCLF5pbQ== domutils@^1.7.0: version "1.7.0" @@ -4223,13 +4340,13 @@ domutils@^2.5.2, domutils@^2.8.0: domhandler "^4.2.0" domutils@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.0.1.tgz#696b3875238338cb186b6c0612bd4901c89a4f1c" - integrity sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q== + version "3.1.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.1.0.tgz#c47f551278d3dc4b0b1ab8cbb42d751a6f0d824e" + integrity sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA== dependencies: dom-serializer "^2.0.0" domelementtype "^2.3.0" - domhandler "^5.0.1" + domhandler "^5.0.3" dot-case@^3.0.4: version "3.0.4" @@ -4239,18 +4356,13 @@ dot-case@^3.0.4: no-case "^3.0.4" tslib "^2.0.3" -dot-prop@^5.2.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-5.3.0.tgz#90ccce708cd9cd82cc4dc8c3ddd9abdd55b20e88" - integrity sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q== +dot-prop@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-6.0.1.tgz#fc26b3cf142b9e59b74dbd39ed66ce620c681083" + integrity sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA== dependencies: is-obj "^2.0.0" -duplexer3@^0.1.4: - version "0.1.5" - resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.5.tgz#0b5e4d7bad5de8901ea4440624c8e1d20099217e" - integrity sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA== - duplexer@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.2.tgz#3abe43aef3835f8ae077d136ddce0f276b0400e6" @@ -4266,10 +4378,15 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== -electron-to-chromium@^1.4.251: - version "1.4.284" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.284.tgz#61046d1e4cab3a25238f6bf7413795270f125592" - integrity sha512-M8WEXFuKXMYMVr45fo8mq0wUrrJHheiKZf6BArTKk9ZBYCKJEOU5H8cdWgDT+qCVZf7Na4lVUaZsA+h6uA9+PA== +electron-to-chromium@^1.5.41: + version "1.5.42" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.42.tgz#4b3ca7648fb0865daec92298f9ba79e278a476a3" + integrity sha512-gIfKavKDw1mhvic9nbzA5lZw8QSHpdMwLwXc0cWidQz9B15pDoDdDH4boIatuFfeoCatb3a/NGL6CYRVFxGZ9g== + +elkjs@^0.9.0: + version "0.9.3" + resolved "https://registry.yarnpkg.com/elkjs/-/elkjs-0.9.3.tgz#16711f8ceb09f1b12b99e971b138a8384a529161" + integrity sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ== emoji-regex@^8.0.0: version "8.0.0" @@ -4281,32 +4398,35 @@ emoji-regex@^9.2.2: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== +emojilib@^2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/emojilib/-/emojilib-2.4.0.tgz#ac518a8bb0d5f76dda57289ccb2fdf9d39ae721e" + integrity sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw== + emojis-list@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q== -emoticon@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/emoticon/-/emoticon-3.2.0.tgz#c008ca7d7620fac742fe1bf4af8ff8fed154ae7f" - integrity sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg== +emoticon@^4.0.1: + version "4.1.0" + resolved "https://registry.yarnpkg.com/emoticon/-/emoticon-4.1.0.tgz#d5a156868ee173095627a33de3f1e914c3dde79e" + integrity sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ== encodeurl@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== -end-of-stream@^1.1.0: - version "1.4.4" - resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" - integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== - dependencies: - once "^1.4.0" +encodeurl@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-2.0.0.tgz#7b8ea898077d7e409d3ac45474ea38eaf0857a58" + integrity sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg== -enhanced-resolve@^5.10.0: - version "5.12.0" - resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.12.0.tgz#300e1c90228f5b570c4d35babf263f6da7155634" - integrity sha512-QHTXI/sZQmko1cbDoNAa3mJ5qhWUUNAq3vR0/YiD379fWQrcfuoX1+HW2S0MTt7XmoPLapdaDKUtelUSPic7hQ== +enhanced-resolve@^5.17.1: + version "5.17.1" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15" + integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg== dependencies: graceful-fs "^4.2.4" tapable "^2.2.0" @@ -4316,10 +4436,10 @@ entities@^2.0.0: resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== -entities@^4.2.0, entities@^4.3.0, entities@^4.4.0: - version "4.4.0" - resolved "https://registry.yarnpkg.com/entities/-/entities-4.4.0.tgz#97bdaba170339446495e653cfd2db78962900174" - integrity sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA== +entities@^4.2.0, entities@^4.4.0, entities@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" + integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== error-ex@^1.3.1: version "1.3.2" @@ -4328,46 +4448,95 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" -es-abstract@^1.17.2, es-abstract@^1.19.0, es-abstract@^1.20.4: - version "1.20.5" - resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.20.5.tgz#e6dc99177be37cacda5988e692c3fa8b218e95d2" - integrity sha512-7h8MM2EQhsCA7pU/Nv78qOXFpD8Rhqd12gYiSJVkrH9+e8VuA8JlPJK/hQjjlLv6pJvx/z1iRFKzYb0XT/RuAQ== - dependencies: - call-bind "^1.0.2" +es-abstract@^1.17.2, es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23.2: + version "1.23.3" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.3.tgz#8f0c5a35cd215312573c5a27c87dfd6c881a0aa0" + integrity sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A== + dependencies: + array-buffer-byte-length "^1.0.1" + arraybuffer.prototype.slice "^1.0.3" + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + data-view-buffer "^1.0.1" + data-view-byte-length "^1.0.1" + data-view-byte-offset "^1.0.0" + es-define-property "^1.0.0" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + es-set-tostringtag "^2.0.3" es-to-primitive "^1.2.1" - function-bind "^1.1.1" - function.prototype.name "^1.1.5" - get-intrinsic "^1.1.3" - get-symbol-description "^1.0.0" + function.prototype.name "^1.1.6" + get-intrinsic "^1.2.4" + get-symbol-description "^1.0.2" + globalthis "^1.0.3" gopd "^1.0.1" - has "^1.0.3" - has-property-descriptors "^1.0.0" + has-property-descriptors "^1.0.2" + has-proto "^1.0.3" has-symbols "^1.0.3" - internal-slot "^1.0.3" + hasown "^2.0.2" + internal-slot "^1.0.7" + is-array-buffer "^3.0.4" is-callable "^1.2.7" - is-negative-zero "^2.0.2" + is-data-view "^1.0.1" + is-negative-zero "^2.0.3" is-regex "^1.1.4" - is-shared-array-buffer "^1.0.2" + is-shared-array-buffer "^1.0.3" is-string "^1.0.7" + is-typed-array "^1.1.13" is-weakref "^1.0.2" - object-inspect "^1.12.2" + object-inspect "^1.13.1" object-keys "^1.1.1" - object.assign "^4.1.4" - regexp.prototype.flags "^1.4.3" - safe-regex-test "^1.0.0" - string.prototype.trimend "^1.0.6" - string.prototype.trimstart "^1.0.6" + object.assign "^4.1.5" + regexp.prototype.flags "^1.5.2" + safe-array-concat "^1.1.2" + safe-regex-test "^1.0.3" + string.prototype.trim "^1.2.9" + string.prototype.trimend "^1.0.8" + string.prototype.trimstart "^1.0.8" + typed-array-buffer "^1.0.2" + typed-array-byte-length "^1.0.1" + typed-array-byte-offset "^1.0.2" + typed-array-length "^1.0.6" unbox-primitive "^1.0.2" + which-typed-array "^1.1.15" es-array-method-boxes-properly@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz#873f3e84418de4ee19c5be752990b2e44718d09e" integrity sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA== -es-module-lexer@^0.9.0: - version "0.9.3" - resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-0.9.3.tgz#6f13db00cc38417137daf74366f535c8eb438f19" - integrity sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ== +es-define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" + integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== + dependencies: + get-intrinsic "^1.2.4" + +es-errors@^1.2.1, es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + +es-module-lexer@^1.2.1: + version "1.5.4" + resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-1.5.4.tgz#a8efec3a3da991e60efa6b633a7cad6ab8d26b78" + integrity sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw== + +es-object-atoms@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.0.0.tgz#ddb55cd47ac2e240701260bc2a8e31ecb643d941" + integrity sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw== + dependencies: + es-errors "^1.3.0" + +es-set-tostringtag@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz#8bb60f0a440c2e4281962428438d58545af39777" + integrity sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ== + dependencies: + get-intrinsic "^1.2.4" + has-tostringtag "^1.0.2" + hasown "^2.0.1" es-to-primitive@^1.2.1: version "1.2.1" @@ -4378,20 +4547,35 @@ es-to-primitive@^1.2.1: is-date-object "^1.0.1" is-symbol "^1.0.2" -es6-object-assign@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/es6-object-assign/-/es6-object-assign-1.1.0.tgz#c2c3582656247c39ea107cb1e6652b6f9f24523c" - integrity sha512-MEl9uirslVwqQU369iHNWZXsI8yaZYGg/D65aOgZkeyFJwHYSxilf7rQzXKI7DdDuBPrBXbfk3sl9hJhmd5AUw== +esast-util-from-estree@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz#8d1cfb51ad534d2f159dc250e604f3478a79f1ad" + integrity sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ== + dependencies: + "@types/estree-jsx" "^1.0.0" + devlop "^1.0.0" + estree-util-visit "^2.0.0" + unist-util-position-from-estree "^2.0.0" -escalade@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" - integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== +esast-util-from-js@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz#5147bec34cc9da44accf52f87f239a40ac3e8225" + integrity sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw== + dependencies: + "@types/estree-jsx" "^1.0.0" + acorn "^8.0.0" + esast-util-from-estree "^2.0.0" + vfile-message "^4.0.0" -escape-goat@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/escape-goat/-/escape-goat-2.1.1.tgz#1b2dc77003676c457ec760b2dc68edb648188675" - integrity sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q== +escalade@^3.1.1, escalade@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5" + integrity sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA== + +escape-goat@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-goat/-/escape-goat-4.0.0.tgz#9424820331b510b0666b98f7873fe11ac4aa8081" + integrity sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg== escape-html@^1.0.3, escape-html@~1.0.3: version "1.0.3" @@ -4444,57 +4628,113 @@ estraverse@^5.2.0: integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== estree-util-attach-comments@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/estree-util-attach-comments/-/estree-util-attach-comments-2.1.0.tgz#47d69900588bcbc6bf58c3798803ec5f1f3008de" - integrity sha512-rJz6I4L0GaXYtHpoMScgDIwM0/Vwbu5shbMeER596rB2D1EWF6+Gj0e0UKzJPZrpoOc87+Q2kgVFHfjAymIqmw== + version "2.1.1" + resolved "https://registry.yarnpkg.com/estree-util-attach-comments/-/estree-util-attach-comments-2.1.1.tgz#ee44f4ff6890ee7dfb3237ac7810154c94c63f84" + integrity sha512-+5Ba/xGGS6mnwFbXIuQiDPTbuTxuMCooq3arVv7gPZtYpjp+VXH/NkHAP35OOefPhNG/UGqU3vt/LTABwcHX0w== + dependencies: + "@types/estree" "^1.0.0" + +estree-util-attach-comments@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz#344bde6a64c8a31d15231e5ee9e297566a691c2d" + integrity sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw== dependencies: "@types/estree" "^1.0.0" estree-util-build-jsx@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/estree-util-build-jsx/-/estree-util-build-jsx-2.2.0.tgz#d4307bbeee28c14eb4d63b75c9aad28fa61d84f5" - integrity sha512-apsfRxF9uLrqosApvHVtYZjISPvTJ+lBiIydpC+9wE6cF6ssbhnjyQLqaIjgzGxvC2Hbmec1M7g91PoBayYoQQ== + version "2.2.2" + resolved "https://registry.yarnpkg.com/estree-util-build-jsx/-/estree-util-build-jsx-2.2.2.tgz#32f8a239fb40dc3f3dca75bb5dcf77a831e4e47b" + integrity sha512-m56vOXcOBuaF+Igpb9OPAy7f9w9OIkb5yhjsZuaPm7HoGi4oTOQi0h2+yZ+AtKklYFZ+rPC4n0wYCJCEU1ONqg== dependencies: "@types/estree-jsx" "^1.0.0" estree-util-is-identifier-name "^2.0.0" estree-walker "^3.0.0" +estree-util-build-jsx@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz#b6d0bced1dcc4f06f25cf0ceda2b2dcaf98168f1" + integrity sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ== + dependencies: + "@types/estree-jsx" "^1.0.0" + devlop "^1.0.0" + estree-util-is-identifier-name "^3.0.0" + estree-walker "^3.0.0" + estree-util-is-identifier-name@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/estree-util-is-identifier-name/-/estree-util-is-identifier-name-2.0.1.tgz#cf07867f42705892718d9d89eb2d85eaa8f0fcb5" - integrity sha512-rxZj1GkQhY4x1j/CSnybK9cGuMFQYFPLq0iNyopqf14aOVLFtMv7Esika+ObJWPWiOHuMOAHz3YkWoLYYRnzWQ== + version "2.1.0" + resolved "https://registry.yarnpkg.com/estree-util-is-identifier-name/-/estree-util-is-identifier-name-2.1.0.tgz#fb70a432dcb19045e77b05c8e732f1364b4b49b2" + integrity sha512-bEN9VHRyXAUOjkKVQVvArFym08BTWB0aJPppZZr0UNyAqWsLaVfAqP7hbaTJjzHifmB5ebnR8Wm7r7yGN/HonQ== + +estree-util-is-identifier-name@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz#0b5ef4c4ff13508b34dcd01ecfa945f61fce5dbd" + integrity sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg== + +estree-util-scope@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/estree-util-scope/-/estree-util-scope-1.0.0.tgz#9cbdfc77f5cb51e3d9ed4ad9c4adbff22d43e585" + integrity sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ== + dependencies: + "@types/estree" "^1.0.0" + devlop "^1.0.0" estree-util-to-js@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/estree-util-to-js/-/estree-util-to-js-1.1.0.tgz#3bd9bb86354063537cc3d81259be2f0d4c3af39f" - integrity sha512-490lbfCcpLk+ofK6HCgqDfYs4KAfq6QVvDw3+Bm1YoKRgiOjKiKYGAVQE1uwh7zVxBgWhqp4FDtp5SqunpUk1A== + version "1.2.0" + resolved "https://registry.yarnpkg.com/estree-util-to-js/-/estree-util-to-js-1.2.0.tgz#0f80d42443e3b13bd32f7012fffa6f93603f4a36" + integrity sha512-IzU74r1PK5IMMGZXUVZbmiu4A1uhiPgW5hm1GjcOfr4ZzHaMPpLNJjR7HjXiIOzi25nZDrgFTobHTkV5Q6ITjA== dependencies: "@types/estree-jsx" "^1.0.0" astring "^1.8.0" source-map "^0.7.0" +estree-util-to-js@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz#10a6fb924814e6abb62becf0d2bc4dea51d04f17" + integrity sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg== + dependencies: + "@types/estree-jsx" "^1.0.0" + astring "^1.8.0" + source-map "^0.7.0" + +estree-util-value-to-estree@^3.0.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.2.tgz#d2f0e5d350a6c181673eb7299743325b86a9bf5c" + integrity sha512-S0gW2+XZkmsx00tU2uJ4L9hUT7IFabbml9pHh2WQqFmAbxit++YGZne0sKJbNwkj9Wvg9E4uqWl4nCIFQMmfag== + dependencies: + "@types/estree" "^1.0.0" + estree-util-visit@^1.0.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/estree-util-visit/-/estree-util-visit-1.2.0.tgz#aa0311a9c2f2aa56e9ae5e8b9d87eac14e4ec8f8" - integrity sha512-wdsoqhWueuJKsh5hqLw3j8lwFqNStm92VcwtAOAny8g/KS/l5Y8RISjR4k5W6skCj3Nirag/WUCMS0Nfy3sgsg== + version "1.2.1" + resolved "https://registry.yarnpkg.com/estree-util-visit/-/estree-util-visit-1.2.1.tgz#8bc2bc09f25b00827294703835aabee1cc9ec69d" + integrity sha512-xbgqcrkIVbIG+lI/gzbvd9SGTJL4zqJKBFttUl5pP27KhAjtMKbX/mQXJ7qgyXpMgVy/zvpm0xoQQaGL8OloOw== dependencies: "@types/estree-jsx" "^1.0.0" "@types/unist" "^2.0.0" +estree-util-visit@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/estree-util-visit/-/estree-util-visit-2.0.0.tgz#13a9a9f40ff50ed0c022f831ddf4b58d05446feb" + integrity sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww== + dependencies: + "@types/estree-jsx" "^1.0.0" + "@types/unist" "^3.0.0" + estree-walker@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-3.0.1.tgz#c2a9fb4a30232f5039b7c030b37ead691932debd" - integrity sha512-woY0RUD87WzMBUiZLx8NsYr23N5BKsOMZHhu2hoNRVh6NXGfoiT1KOL8G3UHlJAnEDGmfa5ubNA/AacfG+Kb0g== + version "3.0.3" + resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-3.0.3.tgz#67c3e549ec402a487b4fc193d1953a524752340d" + integrity sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g== + dependencies: + "@types/estree" "^1.0.0" esutils@^2.0.2: version "2.0.3" resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== -eta@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/eta/-/eta-2.0.1.tgz#199e675359cb6e19d38f29e1f405e1ba0e79a6df" - integrity sha512-46E2qDPDm7QA+usjffUWz9KfXsxVZclPOuKsXs4ZWZdI/X1wpDF7AO424pt7fdYohCzWsIkXAhNGXSlwo5naAg== +eta@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/eta/-/eta-2.2.0.tgz#eb8b5f8c4e8b6306561a455e62cd7492fe3a9b8a" + integrity sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g== etag@~1.8.1: version "1.8.1" @@ -4540,36 +4780,36 @@ exenv@^1.2.0: integrity sha512-Z+ktTxTwv9ILfgKCk32OX3n/doe+OcLTRtqK9pcL+JsP3J1/VW8Uvl4ZjLlKqeW4rzK4oesDOGMEMRIZqtP4Iw== express@^4.17.3: - version "4.18.2" - resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59" - integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ== + version "4.21.1" + resolved "https://registry.yarnpkg.com/express/-/express-4.21.1.tgz#9dae5dda832f16b4eec941a4e44aa89ec481b281" + integrity sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ== dependencies: accepts "~1.3.8" array-flatten "1.1.1" - body-parser "1.20.1" + body-parser "1.20.3" content-disposition "0.5.4" content-type "~1.0.4" - cookie "0.5.0" + cookie "0.7.1" cookie-signature "1.0.6" debug "2.6.9" depd "2.0.0" - encodeurl "~1.0.2" + encodeurl "~2.0.0" escape-html "~1.0.3" etag "~1.8.1" - finalhandler "1.2.0" + finalhandler "1.3.1" fresh "0.5.2" http-errors "2.0.0" - merge-descriptors "1.0.1" + merge-descriptors "1.0.3" methods "~1.1.2" on-finished "2.4.1" parseurl "~1.3.3" - path-to-regexp "0.1.7" + path-to-regexp "0.1.10" proxy-addr "~2.0.7" - qs "6.11.0" + qs "6.13.0" range-parser "~1.2.1" safe-buffer "5.2.1" - send "0.18.0" - serve-static "1.15.0" + send "0.19.0" + serve-static "1.16.2" setprototypeof "1.2.0" statuses "2.0.1" type-is "~1.6.18" @@ -4593,10 +4833,10 @@ fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== -fast-glob@^3.2.11, fast-glob@^3.2.9: - version "3.2.12" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.12.tgz#7f39ec99c2e6ab030337142da9e0c18f37afae80" - integrity sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w== +fast-glob@^3.2.11, fast-glob@^3.2.9, fast-glob@^3.3.0: + version "3.3.2" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" + integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== dependencies: "@nodelib/fs.stat" "^2.0.2" "@nodelib/fs.walk" "^1.2.3" @@ -4609,20 +4849,25 @@ fast-json-stable-stringify@^2.0.0: resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== -fast-url-parser@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/fast-url-parser/-/fast-url-parser-1.1.3.tgz#f4af3ea9f34d8a271cf58ad2b3759f431f0b318d" - integrity sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ== - dependencies: - punycode "^1.3.2" +fast-uri@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.3.tgz#892a1c91802d5d7860de728f18608a0573142241" + integrity sha512-aLrHthzCjH5He4Z2H9YZ+v6Ujb9ocRuW6ZzkJQOrTxleEijANq4v1TsaPaVG1PZcuurEzrLcWRyYBYXD5cEiaw== fastq@^1.6.0: - version "1.15.0" - resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.15.0.tgz#d04d07c6a2a68fe4599fea8d2e103a937fae6b3a" - integrity sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw== + version "1.17.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" + integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== dependencies: reusify "^1.0.4" +fault@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/fault/-/fault-2.0.1.tgz#d47ca9f37ca26e4bd38374a7c500b5a384755b6c" + integrity sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ== + dependencies: + format "^0.2.0" + faye-websocket@^0.11.3: version "0.11.4" resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.4.tgz#7f0d9275cfdd86a1c963dc8b65fcc451edcbb1da" @@ -4630,31 +4875,6 @@ faye-websocket@^0.11.3: dependencies: websocket-driver ">=0.5.1" -fbemitter@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/fbemitter/-/fbemitter-3.0.0.tgz#00b2a1af5411254aab416cd75f9e6289bee4bff3" - integrity sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw== - dependencies: - fbjs "^3.0.0" - -fbjs-css-vars@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz#216551136ae02fe255932c3ec8775f18e2c078b8" - integrity sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ== - -fbjs@^3.0.0, fbjs@^3.0.1: - version "3.0.4" - resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-3.0.4.tgz#e1871c6bd3083bac71ff2da868ad5067d37716c6" - integrity sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ== - dependencies: - cross-fetch "^3.1.5" - fbjs-css-vars "^1.0.0" - loose-envify "^1.0.0" - object-assign "^4.1.0" - promise "^7.1.1" - setimmediate "^1.0.5" - ua-parser-js "^0.7.30" - feed@^4.2.2: version "4.2.2" resolved "https://registry.yarnpkg.com/feed/-/feed-4.2.2.tgz#865783ef6ed12579e2c44bbef3c9113bc4956a7e" @@ -4675,34 +4895,33 @@ filesize@^8.0.6: resolved "https://registry.yarnpkg.com/filesize/-/filesize-8.0.7.tgz#695e70d80f4e47012c132d57a059e80c6b580bd8" integrity sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ== -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" -finalhandler@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" - integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg== +finalhandler@1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.3.1.tgz#0c575f1d1d324ddd1da35ad7ece3df7d19088019" + integrity sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ== dependencies: debug "2.6.9" - encodeurl "~1.0.2" + encodeurl "~2.0.0" escape-html "~1.0.3" on-finished "2.4.1" parseurl "~1.3.3" statuses "2.0.1" unpipe "~1.0.0" -find-cache-dir@^3.3.1: - version "3.3.2" - resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-3.3.2.tgz#b30c5b6eff0730731aea9bbd9dbecbd80256d64b" - integrity sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig== +find-cache-dir@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-4.0.0.tgz#a30ee0448f81a3990708f6453633c733e2f6eec2" + integrity sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg== dependencies: - commondir "^1.0.1" - make-dir "^3.0.2" - pkg-dir "^4.1.0" + common-path-prefix "^3.0.0" + pkg-dir "^7.0.0" find-up@^3.0.0: version "3.0.0" @@ -4711,14 +4930,6 @@ find-up@^3.0.0: dependencies: locate-path "^3.0.0" -find-up@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" - integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== - dependencies: - locate-path "^5.0.0" - path-exists "^4.0.0" - find-up@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" @@ -4727,18 +4938,23 @@ find-up@^5.0.0: locate-path "^6.0.0" path-exists "^4.0.0" -flux@^4.0.1: - version "4.0.3" - resolved "https://registry.yarnpkg.com/flux/-/flux-4.0.3.tgz#573b504a24982c4768fdfb59d8d2ea5637d72ee7" - integrity sha512-yKAbrp7JhZhj6uiT1FTuVMlIAT1J4jqEyBpFApi1kxpGZCvacMVc/t1pMQyotqHhAgvoE3bNvAykhCo2CLjnYw== +find-up@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-6.3.0.tgz#2abab3d3280b2dc7ac10199ef324c4e002c8c790" + integrity sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw== dependencies: - fbemitter "^3.0.0" - fbjs "^3.0.1" + locate-path "^7.1.0" + path-exists "^5.0.0" -follow-redirects@^1.0.0, follow-redirects@^1.14.7: - version "1.15.2" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13" - integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA== +flat@^5.0.2: + version "5.0.2" + resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" + integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ== + +follow-redirects@^1.0.0: + version "1.15.9" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.9.tgz#a604fa10e443bf98ca94228d9eebcc2e8a2c8ee1" + integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ== for-each@^0.3.3: version "0.3.3" @@ -4748,9 +4964,9 @@ for-each@^0.3.3: is-callable "^1.1.3" fork-ts-checker-webpack-plugin@^6.5.0: - version "6.5.2" - resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.2.tgz#4f67183f2f9eb8ba7df7177ce3cf3e75cdafb340" - integrity sha512-m5cUmF30xkZ7h4tWUgTAcEaKmUW7tfyUyTqNNOz7OxWJ0v1VWKTcOvH8FWHUwSjlW/356Ijc9vi3XfcPstpQKA== + version "6.5.3" + resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz#eda2eff6e22476a2688d10661688c47f611b37f3" + integrity sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ== dependencies: "@babel/code-frame" "^7.8.3" "@types/json-schema" "^7.0.5" @@ -4766,15 +4982,25 @@ fork-ts-checker-webpack-plugin@^6.5.0: semver "^7.3.2" tapable "^1.0.0" +form-data-encoder@^2.1.2: + version "2.1.4" + resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-2.1.4.tgz#261ea35d2a70d48d30ec7a9603130fa5515e9cd5" + integrity sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw== + +format@^0.2.0: + version "0.2.2" + resolved "https://registry.yarnpkg.com/format/-/format-0.2.2.tgz#d6170107e9efdc4ed30c9dc39016df942b5cb58b" + integrity sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww== + forwarded@0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811" integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow== -fraction.js@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/fraction.js/-/fraction.js-4.2.0.tgz#448e5109a313a3527f5a3ab2119ec4cf0e0e2950" - integrity sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA== +fraction.js@^4.3.7: + version "4.3.7" + resolved "https://registry.yarnpkg.com/fraction.js/-/fraction.js-4.3.7.tgz#06ca0085157e42fda7f9e726e79fefc4068840f7" + integrity sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew== fresh@0.5.2: version "0.5.2" @@ -4790,6 +5016,15 @@ fs-extra@^10.1.0: jsonfile "^6.0.1" universalify "^2.0.0" +fs-extra@^11.1.1, fs-extra@^11.2.0: + version "11.2.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.2.0.tgz#e70e17dfad64232287d01929399e0ea7c86b0e5b" + integrity sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + fs-extra@^9.0.0: version "9.1.0" resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" @@ -4800,10 +5035,10 @@ fs-extra@^9.0.0: jsonfile "^6.0.1" universalify "^2.0.0" -fs-monkey@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/fs-monkey/-/fs-monkey-1.0.3.tgz#ae3ac92d53bb328efe0e9a1d9541f6ad8d48e2d3" - integrity sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q== +fs-monkey@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/fs-monkey/-/fs-monkey-1.0.6.tgz#8ead082953e88d992cf3ff844faa907b26756da2" + integrity sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg== fs.realpath@^1.0.0: version "1.0.0" @@ -4811,26 +5046,26 @@ fs.realpath@^1.0.0: integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== fsevents@~2.3.2: - version "2.3.2" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" - integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + version "2.3.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== -function-bind@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" - integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== +function-bind@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== -function.prototype.name@^1.1.5: - version "1.1.5" - resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.5.tgz#cce0505fe1ffb80503e6f9e46cc64e46a12a9621" - integrity sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA== +function.prototype.name@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd" + integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg== dependencies: call-bind "^1.0.2" - define-properties "^1.1.3" - es-abstract "^1.19.0" - functions-have-names "^1.2.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + functions-have-names "^1.2.3" -functions-have-names@^1.2.2: +functions-have-names@^1.2.3: version "1.2.3" resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== @@ -4850,53 +5085,42 @@ gauge@^3.0.0: strip-ansi "^6.0.1" wide-align "^1.1.2" -gensync@^1.0.0-beta.1, gensync@^1.0.0-beta.2: +gensync@^1.0.0-beta.2: version "1.0.0-beta.2" resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== -get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.3.tgz#063c84329ad93e83893c7f4f243ef63ffa351385" - integrity sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A== +get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" + integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== dependencies: - function-bind "^1.1.1" - has "^1.0.3" + es-errors "^1.3.0" + function-bind "^1.1.2" + has-proto "^1.0.1" has-symbols "^1.0.3" + hasown "^2.0.0" get-own-enumerable-property-symbols@^3.0.0: version "3.0.2" resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz#b5fde77f22cbe35f390b4e089922c50bce6ef664" integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g== -get-stream@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" - integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== - dependencies: - pump "^3.0.0" - -get-stream@^5.1.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.2.0.tgz#4966a1795ee5ace65e706c4b7beb71257d6e22d3" - integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA== - dependencies: - pump "^3.0.0" - -get-stream@^6.0.0: +get-stream@^6.0.0, get-stream@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== -get-symbol-description@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" - integrity sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw== +get-symbol-description@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.2.tgz#533744d5aa20aca4e079c8e5daf7fd44202821f5" + integrity sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg== dependencies: - call-bind "^1.0.2" - get-intrinsic "^1.1.1" + call-bind "^1.0.5" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" -github-slugger@^1.4.0: +github-slugger@^1.5.0: version "1.5.0" resolved "https://registry.yarnpkg.com/github-slugger/-/github-slugger-1.5.0.tgz#17891bbc73232051474d68bd867a34625c955f7d" integrity sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw== @@ -4960,6 +5184,14 @@ globals@^11.1.0: resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== +globalthis@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236" + integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ== + dependencies: + define-properties "^1.2.1" + gopd "^1.0.1" + globby@^11.0.1, globby@^11.0.4, globby@^11.1.0: version "11.1.0" resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" @@ -4973,13 +5205,13 @@ globby@^11.0.1, globby@^11.0.4, globby@^11.1.0: slash "^3.0.0" globby@^13.1.1: - version "13.1.3" - resolved "https://registry.yarnpkg.com/globby/-/globby-13.1.3.tgz#f62baf5720bcb2c1330c8d4ef222ee12318563ff" - integrity sha512-8krCNHXvlCgHDpegPzleMq07yMYTO2sXKASmZmquEYWEmCx6J5UTRbp5RwMJkTJGtcQ44YpiUYUiN0b9mzy8Bw== + version "13.2.2" + resolved "https://registry.yarnpkg.com/globby/-/globby-13.2.2.tgz#63b90b1bf68619c2135475cbd4e71e66aa090592" + integrity sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w== dependencies: dir-glob "^3.0.1" - fast-glob "^3.2.11" - ignore "^5.2.0" + fast-glob "^3.3.0" + ignore "^5.2.4" merge2 "^1.4.1" slash "^4.0.0" @@ -4990,28 +5222,33 @@ gopd@^1.0.1: dependencies: get-intrinsic "^1.1.3" -got@^9.6.0: - version "9.6.0" - resolved "https://registry.yarnpkg.com/got/-/got-9.6.0.tgz#edf45e7d67f99545705de1f7bbeeeb121765ed85" - integrity sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q== - dependencies: - "@sindresorhus/is" "^0.14.0" - "@szmarczak/http-timer" "^1.1.2" - cacheable-request "^6.0.0" - decompress-response "^3.3.0" - duplexer3 "^0.1.4" - get-stream "^4.1.0" - lowercase-keys "^1.0.1" - mimic-response "^1.0.1" - p-cancelable "^1.0.0" - to-readable-stream "^1.0.0" - url-parse-lax "^3.0.0" - -graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9: +got@^12.1.0: + version "12.6.1" + resolved "https://registry.yarnpkg.com/got/-/got-12.6.1.tgz#8869560d1383353204b5a9435f782df9c091f549" + integrity sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ== + dependencies: + "@sindresorhus/is" "^5.2.0" + "@szmarczak/http-timer" "^5.0.1" + cacheable-lookup "^7.0.0" + cacheable-request "^10.2.8" + decompress-response "^6.0.0" + form-data-encoder "^2.1.2" + get-stream "^6.0.1" + http2-wrapper "^2.1.10" + lowercase-keys "^3.0.0" + p-cancelable "^3.0.0" + responselike "^3.0.0" + +graceful-fs@4.2.10: version "4.2.10" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c" integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== +graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.11, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9: + version "4.2.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" + integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== + gray-matter@^4.0.3: version "4.0.3" resolved "https://registry.yarnpkg.com/gray-matter/-/gray-matter-4.0.3.tgz#e893c064825de73ea1f5f7d88c7a9f7274288798" @@ -5049,54 +5286,46 @@ has-flag@^4.0.0: resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== -has-property-descriptors@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz#610708600606d36961ed04c196193b6a607fa861" - integrity sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ== +has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== dependencies: - get-intrinsic "^1.1.1" + es-define-property "^1.0.0" + +has-proto@^1.0.1, has-proto@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" + integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== has-symbols@^1.0.1, has-symbols@^1.0.2, has-symbols@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== -has-tostringtag@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25" - integrity sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ== +has-tostringtag@^1.0.0, has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== dependencies: - has-symbols "^1.0.2" + has-symbols "^1.0.3" has-unicode@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" integrity sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ== -has-yarn@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/has-yarn/-/has-yarn-2.1.0.tgz#137e11354a7b5bf11aa5cb649cf0c6f3ff2b2e77" - integrity sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw== - -has@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" - integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== - dependencies: - function-bind "^1.1.1" +has-yarn@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-yarn/-/has-yarn-3.0.0.tgz#c3c21e559730d1d3b57e28af1f30d06fac38147d" + integrity sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA== -hast-to-hyperscript@^9.0.0: - version "9.0.1" - resolved "https://registry.yarnpkg.com/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz#9b67fd188e4c81e8ad66f803855334173920218d" - integrity sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA== +hasown@^2.0.0, hasown@^2.0.1, hasown@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== dependencies: - "@types/unist" "^2.0.3" - comma-separated-tokens "^1.0.0" - property-information "^5.3.0" - space-separated-tokens "^1.0.0" - style-to-object "^0.3.0" - unist-util-is "^4.0.0" - web-namespaces "^1.0.0" + function-bind "^1.1.2" hast-util-from-parse5@^6.0.0: version "6.0.1" @@ -5110,6 +5339,20 @@ hast-util-from-parse5@^6.0.0: vfile-location "^3.2.0" web-namespaces "^1.0.0" +hast-util-from-parse5@^8.0.0: + version "8.0.1" + resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz#654a5676a41211e14ee80d1b1758c399a0327651" + integrity sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ== + dependencies: + "@types/hast" "^3.0.0" + "@types/unist" "^3.0.0" + devlop "^1.0.0" + hastscript "^8.0.0" + property-information "^6.0.0" + vfile "^6.0.0" + vfile-location "^5.0.0" + web-namespaces "^2.0.0" + hast-util-has-property@^1.0.0: version "1.0.4" resolved "https://registry.yarnpkg.com/hast-util-has-property/-/hast-util-has-property-1.0.4.tgz#9f137565fad6082524b382c1e7d7d33ca5059f36" @@ -5125,21 +5368,31 @@ hast-util-parse-selector@^2.0.0: resolved "https://registry.yarnpkg.com/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz#d57c23f4da16ae3c63b3b6ca4616683313499c3a" integrity sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ== -hast-util-raw@6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-6.0.1.tgz#973b15930b7529a7b66984c98148b46526885977" - integrity sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig== - dependencies: - "@types/hast" "^2.0.0" - hast-util-from-parse5 "^6.0.0" - hast-util-to-parse5 "^6.0.0" - html-void-elements "^1.0.0" - parse5 "^6.0.0" - unist-util-position "^3.0.0" - vfile "^4.0.0" - web-namespaces "^1.0.0" - xtend "^4.0.0" - zwitch "^1.0.0" +hast-util-parse-selector@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz#352879fa86e25616036037dd8931fb5f34cb4a27" + integrity sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A== + dependencies: + "@types/hast" "^3.0.0" + +hast-util-raw@^9.0.0: + version "9.0.4" + resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-9.0.4.tgz#2da03e37c46eb1a6f1391f02f9b84ae65818f7ed" + integrity sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA== + dependencies: + "@types/hast" "^3.0.0" + "@types/unist" "^3.0.0" + "@ungap/structured-clone" "^1.0.0" + hast-util-from-parse5 "^8.0.0" + hast-util-to-parse5 "^8.0.0" + html-void-elements "^3.0.0" + mdast-util-to-hast "^13.0.0" + parse5 "^7.0.0" + unist-util-position "^5.0.0" + unist-util-visit "^5.0.0" + vfile "^6.0.0" + web-namespaces "^2.0.0" + zwitch "^2.0.0" hast-util-select@^4.0.0: version "4.0.2" @@ -5162,36 +5415,81 @@ hast-util-select@^4.0.0: zwitch "^1.0.0" hast-util-to-estree@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/hast-util-to-estree/-/hast-util-to-estree-2.1.0.tgz#aeac70aad0102ae309570907b3f56a08231d5323" - integrity sha512-Vwch1etMRmm89xGgz+voWXvVHba2iiMdGMKmaMfYt35rbVtFDq8JNwwAIvi8zHMkO6Gvqo9oTMwJTmzVRfXh4g== + version "2.3.3" + resolved "https://registry.yarnpkg.com/hast-util-to-estree/-/hast-util-to-estree-2.3.3.tgz#da60142ffe19a6296923ec222aba73339c8bf470" + integrity sha512-ihhPIUPxN0v0w6M5+IiAZZrn0LH2uZomeWwhn7uP7avZC6TE7lIiEh2yBMPr5+zi1aUCXq6VoYRgs2Bw9xmycQ== + dependencies: + "@types/estree" "^1.0.0" + "@types/estree-jsx" "^1.0.0" + "@types/hast" "^2.0.0" + "@types/unist" "^2.0.0" + comma-separated-tokens "^2.0.0" + estree-util-attach-comments "^2.0.0" + estree-util-is-identifier-name "^2.0.0" + hast-util-whitespace "^2.0.0" + mdast-util-mdx-expression "^1.0.0" + mdast-util-mdxjs-esm "^1.0.0" + property-information "^6.0.0" + space-separated-tokens "^2.0.0" + style-to-object "^0.4.1" + unist-util-position "^4.0.0" + zwitch "^2.0.0" + +hast-util-to-estree@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz#f2afe5e869ddf0cf690c75f9fc699f3180b51b19" + integrity sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw== dependencies: "@types/estree" "^1.0.0" "@types/estree-jsx" "^1.0.0" - "@types/hast" "^2.0.0" - "@types/unist" "^2.0.0" + "@types/hast" "^3.0.0" comma-separated-tokens "^2.0.0" - estree-util-attach-comments "^2.0.0" - estree-util-is-identifier-name "^2.0.0" - hast-util-whitespace "^2.0.0" - mdast-util-mdx-expression "^1.0.0" - mdast-util-mdxjs-esm "^1.0.0" + devlop "^1.0.0" + estree-util-attach-comments "^3.0.0" + estree-util-is-identifier-name "^3.0.0" + hast-util-whitespace "^3.0.0" + mdast-util-mdx-expression "^2.0.0" + mdast-util-mdx-jsx "^3.0.0" + mdast-util-mdxjs-esm "^2.0.0" property-information "^6.0.0" space-separated-tokens "^2.0.0" - style-to-object "^0.3.0" - unist-util-position "^4.0.0" + style-to-object "^0.4.0" + unist-util-position "^5.0.0" zwitch "^2.0.0" -hast-util-to-parse5@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz#1ec44650b631d72952066cea9b1445df699f8479" - integrity sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ== +hast-util-to-jsx-runtime@^2.0.0: + version "2.3.2" + resolved "https://registry.yarnpkg.com/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.2.tgz#6d11b027473e69adeaa00ca4cfb5bb68e3d282fa" + integrity sha512-1ngXYb+V9UT5h+PxNRa1O1FYguZK/XL+gkeqvp7EdHlB9oHUG0eYRo/vY5inBdcqo3RkPMC58/H94HvkbfGdyg== dependencies: - hast-to-hyperscript "^9.0.0" - property-information "^5.0.0" - web-namespaces "^1.0.0" - xtend "^4.0.0" - zwitch "^1.0.0" + "@types/estree" "^1.0.0" + "@types/hast" "^3.0.0" + "@types/unist" "^3.0.0" + comma-separated-tokens "^2.0.0" + devlop "^1.0.0" + estree-util-is-identifier-name "^3.0.0" + hast-util-whitespace "^3.0.0" + mdast-util-mdx-expression "^2.0.0" + mdast-util-mdx-jsx "^3.0.0" + mdast-util-mdxjs-esm "^2.0.0" + property-information "^6.0.0" + space-separated-tokens "^2.0.0" + style-to-object "^1.0.0" + unist-util-position "^5.0.0" + vfile-message "^4.0.0" + +hast-util-to-parse5@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz#477cd42d278d4f036bc2ea58586130f6f39ee6ed" + integrity sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw== + dependencies: + "@types/hast" "^3.0.0" + comma-separated-tokens "^2.0.0" + devlop "^1.0.0" + property-information "^6.0.0" + space-separated-tokens "^2.0.0" + web-namespaces "^2.0.0" + zwitch "^2.0.0" hast-util-to-string@^1.0.0: version "1.0.4" @@ -5213,9 +5511,16 @@ hast-util-whitespace@^1.0.0: integrity sha512-I5GTdSfhYfAPNztx2xJRQpG8cuDSNt599/7YUn7Gx/WxNMsG+a835k97TDkFgk123cwjfwINaZknkKkphx/f2A== hast-util-whitespace@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/hast-util-whitespace/-/hast-util-whitespace-2.0.0.tgz#4fc1086467cc1ef5ba20673cb6b03cec3a970f1c" - integrity sha512-Pkw+xBHuV6xFeJprJe2BBEoDV+AvQySaz3pPDRUs5PNZEMQjpXJJueqrpcHIXxnWTcAGi/UOCgVShlkY6kLoqg== + version "2.0.1" + resolved "https://registry.yarnpkg.com/hast-util-whitespace/-/hast-util-whitespace-2.0.1.tgz#0ec64e257e6fc216c7d14c8a1b74d27d650b4557" + integrity sha512-nAxA0v8+vXSBDt3AnRUNjyRIQ0rD+ntpbAp4LnPkumc5M9yUbSMa4XDU9Q6etY4f1Wp4bNgvc1yjiZtsTTrSng== + +hast-util-whitespace@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz#7778ed9d3c92dd9e8c5c8f648a49c21fc51cb621" + integrity sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw== + dependencies: + "@types/hast" "^3.0.0" hastscript@^6.0.0: version "6.0.0" @@ -5228,6 +5533,17 @@ hastscript@^6.0.0: property-information "^5.0.0" space-separated-tokens "^1.0.0" +hastscript@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-8.0.0.tgz#4ef795ec8dee867101b9f23cc830d4baf4fd781a" + integrity sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw== + dependencies: + "@types/hast" "^3.0.0" + comma-separated-tokens "^2.0.0" + hast-util-parse-selector "^4.0.0" + property-information "^6.0.0" + space-separated-tokens "^2.0.0" + he@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" @@ -5271,11 +5587,16 @@ hpack.js@^2.1.6: wbuf "^1.1.0" html-entities@^2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-2.3.3.tgz#117d7626bece327fc8baace8868fa6f5ef856e46" - integrity sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA== + version "2.5.2" + resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-2.5.2.tgz#201a3cf95d3a15be7099521620d19dfb4f65359f" + integrity sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA== + +html-escaper@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" + integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg== -html-minifier-terser@^6.0.2, html-minifier-terser@^6.1.0: +html-minifier-terser@^6.0.2: version "6.1.0" resolved "https://registry.yarnpkg.com/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz#bfc818934cc07918f6b3669f5774ecdfd48f32ab" integrity sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw== @@ -5288,20 +5609,33 @@ html-minifier-terser@^6.0.2, html-minifier-terser@^6.1.0: relateurl "^0.2.7" terser "^5.10.0" -html-tags@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/html-tags/-/html-tags-3.2.0.tgz#dbb3518d20b726524e4dd43de397eb0a95726961" - integrity sha512-vy7ClnArOZwCnqZgvv+ddgHgJiAFXe3Ge9ML5/mBctVJoUoYPCdxVucOywjDARn6CVoh3dRSFdPHy2sX80L0Wg== +html-minifier-terser@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz#18752e23a2f0ed4b0f550f217bb41693e975b942" + integrity sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA== + dependencies: + camel-case "^4.1.2" + clean-css "~5.3.2" + commander "^10.0.0" + entities "^4.4.0" + param-case "^3.0.4" + relateurl "^0.2.7" + terser "^5.15.1" -html-void-elements@^1.0.0: - version "1.0.5" - resolved "https://registry.yarnpkg.com/html-void-elements/-/html-void-elements-1.0.5.tgz#ce9159494e86d95e45795b166c2021c2cfca4483" - integrity sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w== +html-tags@^3.3.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/html-tags/-/html-tags-3.3.1.tgz#a04026a18c882e4bba8a01a3d39cfe465d40b5ce" + integrity sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ== -html-webpack-plugin@^5.5.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-5.5.0.tgz#c3911936f57681c1f9f4d8b68c158cd9dfe52f50" - integrity sha512-sy88PC2cRTVxvETRgUHFrL4No3UxvcH8G1NepGhqaTT+GXN2kTamqasot0inS5hXeg1cMbFDt27zzo9p35lZVw== +html-void-elements@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/html-void-elements/-/html-void-elements-3.0.0.tgz#fc9dbd84af9e747249034d4d62602def6517f1d7" + integrity sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg== + +html-webpack-plugin@^5.5.3: + version "5.6.2" + resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-5.6.2.tgz#174a67c8e55aa3fa2ba94c8e8e42894bfe4978ea" + integrity sha512-q7xp/FO9RGBVoTKNItkdX1jKLscLFkgn/dLVFNYbHVbfHLBk6DYW5nsQ8kCzIWcgKP/kUBocetjvav6lD8YfCQ== dependencies: "@types/html-minifier-terser" "^6.0.0" html-minifier-terser "^6.0.2" @@ -5320,19 +5654,19 @@ htmlparser2@^6.1.0: entities "^2.0.0" htmlparser2@^8.0.1: - version "8.0.1" - resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-8.0.1.tgz#abaa985474fcefe269bc761a779b544d7196d010" - integrity sha512-4lVbmc1diZC7GUJQtRQ5yBAeUCL1exyMwmForWkRLnwyzWBFxN633SALPMGYaWZvKe9j1pRZJpauvmxENSp/EA== + version "8.0.2" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-8.0.2.tgz#f002151705b383e62433b5cf466f5b716edaec21" + integrity sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA== dependencies: domelementtype "^2.3.0" - domhandler "^5.0.2" + domhandler "^5.0.3" domutils "^3.0.1" - entities "^4.3.0" + entities "^4.4.0" -http-cache-semantics@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" - integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== +http-cache-semantics@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a" + integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ== http-deceiver@^1.2.7: version "1.2.7" @@ -5366,9 +5700,9 @@ http-parser-js@>=0.5.1: integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q== http-proxy-middleware@^2.0.3: - version "2.0.6" - resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f" - integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw== + version "2.0.7" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6" + integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA== dependencies: "@types/http-proxy" "^1.17.8" http-proxy "^1.18.1" @@ -5385,6 +5719,14 @@ http-proxy@^1.18.1: follow-redirects "^1.0.0" requires-port "^1.0.0" +http2-wrapper@^2.1.10: + version "2.2.1" + resolved "https://registry.yarnpkg.com/http2-wrapper/-/http2-wrapper-2.2.1.tgz#310968153dcdedb160d8b72114363ef5fce1f64a" + integrity sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ== + dependencies: + quick-lru "^5.1.1" + resolve-alpn "^1.2.0" + human-signals@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" @@ -5414,15 +5756,15 @@ ieee754@^1.2.1: resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== -ignore@^5.2.0: - version "5.2.4" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" - integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== +ignore@^5.2.0, ignore@^5.2.4: + version "5.3.2" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" + integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== -image-size@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.0.2.tgz#d778b6d0ab75b2737c1556dd631652eb963bc486" - integrity sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg== +image-size@^1.0.2: + version "1.1.1" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.1.1.tgz#ddd67d4dc340e52ac29ce5f546a09f4e29e840ac" + integrity sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ== dependencies: queue "6.0.2" @@ -5432,9 +5774,9 @@ immediate@^3.2.3: integrity sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q== immer@^9.0.7: - version "9.0.17" - resolved "https://registry.yarnpkg.com/immer/-/immer-9.0.17.tgz#7cfe8fbb8b461096444e9da7a5ec4a67c6c4adf4" - integrity sha512-+hBruaLSQvkPfxRiTLK/mi4vLH+/VQS6z2KJahdoxlleFOI8ARqzOF17uy12eFDlqWmPoygwc5evgwcp+dlHhg== + version "9.0.21" + resolved "https://registry.yarnpkg.com/immer/-/immer-9.0.21.tgz#1e025ea31a40f24fb064f1fef23e931496330176" + integrity sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA== import-fresh@^3.1.0, import-fresh@^3.2.1, import-fresh@^3.3.0: version "3.3.0" @@ -5444,10 +5786,10 @@ import-fresh@^3.1.0, import-fresh@^3.2.1, import-fresh@^3.3.0: parent-module "^1.0.0" resolve-from "^4.0.0" -import-lazy@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-2.1.0.tgz#05698e3d45c88e8d7e9d92cb0584e77f096f3e43" - integrity sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A== +import-lazy@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-4.0.0.tgz#e8eb627483a0a43da3c03f3e35548be5cb0cc153" + integrity sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw== imurmurhash@^0.1.4: version "0.1.4" @@ -5459,10 +5801,10 @@ indent-string@^4.0.0: resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== -infima@0.2.0-alpha.42: - version "0.2.0-alpha.42" - resolved "https://registry.yarnpkg.com/infima/-/infima-0.2.0-alpha.42.tgz#f6e86a655ad40877c6b4d11b2ede681eb5470aa5" - integrity sha512-ift8OXNbQQwtbIt6z16KnSWP7uJ/SysSMFI4F87MNRTicypfl4Pv3E2OGVv6N3nSZFJvA8imYulCBS64iyHYww== +infima@0.2.0-alpha.44: + version "0.2.0-alpha.44" + resolved "https://registry.yarnpkg.com/infima/-/infima-0.2.0-alpha.44.tgz#9cd9446e473b44d49763f48efabe31f32440861d" + integrity sha512-tuRkUSO/lB3rEhLJk25atwAjgLuzq070+pOW8XcvpHky/YbENnRRdPd85IBkyeTgttmOy5ah+yHYsK1HhUd4lQ== inflight@^1.0.4: version "1.0.6" @@ -5472,7 +5814,7 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3: +inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3: version "2.0.4" resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== @@ -5487,7 +5829,7 @@ ini@2.0.0: resolved "https://registry.yarnpkg.com/ini/-/ini-2.0.0.tgz#e5fd556ecdd5726be978fa1001862eacb0a94bc5" integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA== -ini@^1.3.5, ini@~1.3.0: +ini@^1.3.4, ini@^1.3.5, ini@~1.3.0: version "1.3.8" resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== @@ -5497,13 +5839,18 @@ inline-style-parser@0.1.1: resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz#ec8a3b429274e9c0a1f1c4ffa9453a7fef72cea1" integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q== -internal-slot@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.4.tgz#8551e7baf74a7a6ba5f749cfb16aa60722f0d6f3" - integrity sha512-tA8URYccNzMo94s5MQZgH8NB/XTa6HsOo0MLfXTKKEnHVVdegzaQoFZ7Jp44bdvLvY2waT5dc+j5ICEswhi7UQ== +inline-style-parser@0.2.4: + version "0.2.4" + resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.2.4.tgz#f4af5fe72e612839fcd453d989a586566d695f22" + integrity sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q== + +internal-slot@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.7.tgz#c06dcca3ed874249881007b0a5523b172a190802" + integrity sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g== dependencies: - get-intrinsic "^1.1.3" - has "^1.0.3" + es-errors "^1.3.0" + hasown "^2.0.0" side-channel "^1.0.4" "internmap@1 - 2": @@ -5511,6 +5858,11 @@ internal-slot@^1.0.3: resolved "https://registry.yarnpkg.com/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== +internmap@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/internmap/-/internmap-1.0.1.tgz#0017cc8a3b99605f0302f2b198d272e015e5df95" + integrity sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw== + interpret@^1.0.0: version "1.4.0" resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.4.0.tgz#665ab8bc4da27a774a40584e812e3e0fa45b1a1e" @@ -5529,28 +5881,15 @@ ipaddr.js@1.9.1: integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== ipaddr.js@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-2.0.1.tgz#eca256a7a877e917aeb368b0a7497ddf42ef81c0" - integrity sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng== - -is-alphabetical@1.0.4, is-alphabetical@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-alphabetical/-/is-alphabetical-1.0.4.tgz#9e7d6b94916be22153745d184c298cbf986a686d" - integrity sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg== + version "2.2.0" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-2.2.0.tgz#d33fa7bac284f4de7af949638c9d68157c6b92e8" + integrity sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA== is-alphabetical@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/is-alphabetical/-/is-alphabetical-2.0.1.tgz#01072053ea7c1036df3c7d19a6daaec7f19e789b" integrity sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ== -is-alphanumerical@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz#7eb9a2431f855f6b1ef1a78e326df515696c4dbf" - integrity sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A== - dependencies: - is-alphabetical "^1.0.0" - is-decimal "^1.0.0" - is-alphanumerical@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz#7c03fbe96e3e931113e57f964b0a368cc2dfd875" @@ -5567,6 +5906,14 @@ is-arguments@^1.0.4: call-bind "^1.0.2" has-tostringtag "^1.0.0" +is-array-buffer@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.4.tgz#7a1f92b3d61edd2bc65d24f130530ea93d7fae98" + integrity sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.2.1" + is-arrayish@^0.2.1: version "0.2.1" resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" @@ -5604,19 +5951,26 @@ is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7: resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== -is-ci@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c" - integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w== +is-ci@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-3.0.1.tgz#db6ecbed1bd659c43dac0f45661e7674103d1867" + integrity sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ== + dependencies: + ci-info "^3.2.0" + +is-core-module@^2.13.0: + version "2.15.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.15.1.tgz#a7363a25bee942fefab0de13bf6aa372c82dcc37" + integrity sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ== dependencies: - ci-info "^2.0.0" + hasown "^2.0.2" -is-core-module@^2.9.0: - version "2.11.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.11.0.tgz#ad4cb3e3863e814523c96f3f58d26cc570ff0144" - integrity sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw== +is-data-view@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-data-view/-/is-data-view-1.0.1.tgz#4b4d3a511b70f3dc26d42c03ca9ca515d847759f" + integrity sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w== dependencies: - has "^1.0.3" + is-typed-array "^1.1.13" is-date-object@^1.0.1: version "1.0.5" @@ -5625,11 +5979,6 @@ is-date-object@^1.0.1: dependencies: has-tostringtag "^1.0.0" -is-decimal@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-1.0.4.tgz#65a3a5958a1c5b63a706e1b333d7cd9f630d3fa5" - integrity sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw== - is-decimal@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-2.0.1.tgz#9469d2dc190d0214fd87d78b78caecc0cc14eef7" @@ -5669,11 +6018,6 @@ is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1: dependencies: is-extglob "^2.1.1" -is-hexadecimal@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz#cc35c97588da4bd49a8eedd6bc4082d44dcb23a7" - integrity sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw== - is-hexadecimal@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz#86b5bf668fca307498d319dfc03289d781a90027" @@ -5687,7 +6031,7 @@ is-installed-globally@^0.4.0: global-dirs "^3.0.0" is-path-inside "^3.0.2" -is-nan@^1.2.1: +is-nan@^1.3.2: version "1.3.2" resolved "https://registry.yarnpkg.com/is-nan/-/is-nan-1.3.2.tgz#043a54adea31748b55b6cd4e09aadafa69bd9e1d" integrity sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w== @@ -5695,15 +6039,15 @@ is-nan@^1.2.1: call-bind "^1.0.0" define-properties "^1.1.3" -is-negative-zero@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.2.tgz#7bf6f03a28003b8b3965de3ac26f664d765f3150" - integrity sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA== +is-negative-zero@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747" + integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw== -is-npm@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-5.0.0.tgz#43e8d65cc56e1b67f8d47262cf667099193f45a8" - integrity sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA== +is-npm@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-6.0.0.tgz#b59e75e8915543ca5d881ecff864077cba095261" + integrity sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ== is-number-object@^1.0.4: version "1.0.7" @@ -5760,9 +6104,9 @@ is-plain-object@^2.0.4: isobject "^3.0.1" is-reference@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-reference/-/is-reference-3.0.0.tgz#b1380c03d96ddf7089709781e3208fceb0c92cd6" - integrity sha512-Eo1W3wUoHWoCoVM4GVl/a+K0IgiqE5aIo4kJABFyMum1ZORlPkC+UC357sSQUL5w5QCE5kCC9upl75b7+7CY/Q== + version "3.0.2" + resolved "https://registry.yarnpkg.com/is-reference/-/is-reference-3.0.2.tgz#154747a01f45cd962404ee89d43837af2cba247c" + integrity sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg== dependencies: "@types/estree" "*" @@ -5784,12 +6128,12 @@ is-root@^2.1.0: resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.1.0.tgz#809e18129cf1129644302a4f8544035d51984a9c" integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg== -is-shared-array-buffer@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz#8f259c573b60b6a32d4058a1a07430c0a7344c79" - integrity sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA== +is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz#1237f1cba059cdb62431d378dcc37d9680181688" + integrity sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg== dependencies: - call-bind "^1.0.2" + call-bind "^1.0.7" is-stream@^2.0.0: version "2.0.1" @@ -5810,16 +6154,12 @@ is-symbol@^1.0.2, is-symbol@^1.0.3: dependencies: has-symbols "^1.0.2" -is-typed-array@^1.1.10, is-typed-array@^1.1.3: - version "1.1.10" - resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.10.tgz#36a5b5cb4189b575d1a3e4b08536bfb485801e3f" - integrity sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A== +is-typed-array@^1.1.13, is-typed-array@^1.1.3: + version "1.1.13" + resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.13.tgz#d6c5ca56df62334959322d7d7dd1cca50debe229" + integrity sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw== dependencies: - available-typed-arrays "^1.0.5" - call-bind "^1.0.2" - for-each "^0.3.3" - gopd "^1.0.1" - has-tostringtag "^1.0.0" + which-typed-array "^1.1.14" is-typedarray@^1.0.0: version "1.0.0" @@ -5833,16 +6173,6 @@ is-weakref@^1.0.2: dependencies: call-bind "^1.0.2" -is-whitespace-character@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz#0858edd94a95594c7c9dd0b5c174ec6e45ee4aa7" - integrity sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w== - -is-word-character@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-word-character/-/is-word-character-1.0.4.tgz#ce0e73216f98599060592f62ff31354ddbeb0230" - integrity sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA== - is-wsl@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" @@ -5850,16 +6180,21 @@ is-wsl@^2.2.0: dependencies: is-docker "^2.0.0" -is-yarn-global@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/is-yarn-global/-/is-yarn-global-0.3.0.tgz#d502d3382590ea3004893746754c89139973e232" - integrity sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw== +is-yarn-global@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/is-yarn-global/-/is-yarn-global-0.4.1.tgz#b312d902b313f81e4eaf98b6361ba2b45cd694bb" + integrity sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ== isarray@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ== +isarray@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" + integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== + isarray@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" @@ -5875,12 +6210,12 @@ isobject@^3.0.1: resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" integrity sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg== -jest-util@^29.3.1: - version "29.3.1" - resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.3.1.tgz#1dda51e378bbcb7e3bc9d8ab651445591ed373e1" - integrity sha512-7YOVZaiX7RJLv76ZfHt4nbNEzzTRiMW/IiOG7ZOKmTXmoGBxUDefgMAxQubu6WPVqP5zSzAdZG0FfLcC7HOIFQ== +jest-util@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.7.0.tgz#23c2b62bfb22be82b44de98055802ff3710fc0bc" + integrity sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA== dependencies: - "@jest/types" "^29.3.1" + "@jest/types" "^29.6.3" "@types/node" "*" chalk "^4.0.0" ci-info "^3.2.0" @@ -5896,25 +6231,30 @@ jest-worker@^27.4.5: merge-stream "^2.0.0" supports-color "^8.0.0" -jest-worker@^29.1.2: - version "29.3.1" - resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.3.1.tgz#e9462161017a9bb176380d721cab022661da3d6b" - integrity sha512-lY4AnnmsEWeiXirAIA0c9SDPbuCBq8IYuDVL8PMm0MZ2PEs2yPvRA/J64QBXuZp7CYKrDM/rmNrc9/i3KJQncw== +jest-worker@^29.4.3: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.7.0.tgz#acad073acbbaeb7262bd5389e1bcf43e10058d4a" + integrity sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw== dependencies: "@types/node" "*" - jest-util "^29.3.1" + jest-util "^29.7.0" merge-stream "^2.0.0" supports-color "^8.0.0" -joi@^17.6.0: - version "17.7.0" - resolved "https://registry.yarnpkg.com/joi/-/joi-17.7.0.tgz#591a33b1fe1aca2bc27f290bcad9b9c1c570a6b3" - integrity sha512-1/ugc8djfn93rTE3WRKdCzGGt/EtiYKxITMO4Wiv6q5JL1gl9ePt4kBsl1S499nbosspfctIQTpYIhSmHA3WAg== +jiti@^1.20.0: + version "1.21.6" + resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.6.tgz#6c7f7398dd4b3142767f9a168af2f317a428d268" + integrity sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w== + +joi@^17.9.2: + version "17.13.3" + resolved "https://registry.yarnpkg.com/joi/-/joi-17.13.3.tgz#0f5cc1169c999b30d344366d384b12d92558bcec" + integrity sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA== dependencies: - "@hapi/hoek" "^9.0.0" - "@hapi/topo" "^5.0.0" - "@sideway/address" "^4.1.3" - "@sideway/formula" "^3.0.0" + "@hapi/hoek" "^9.3.0" + "@hapi/topo" "^5.1.0" + "@sideway/address" "^4.1.5" + "@sideway/formula" "^3.0.1" "@sideway/pinpoint" "^2.0.0" "js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: @@ -5937,20 +6277,20 @@ js-yaml@^4.1.0: dependencies: argparse "^2.0.1" -jsesc@^2.5.1: - version "2.5.2" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" - integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== +jsesc@^3.0.2, jsesc@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-3.0.2.tgz#bb8b09a6597ba426425f2e4a07245c3d00b9343e" + integrity sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g== jsesc@~0.5.0: version "0.5.0" resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" integrity sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA== -json-buffer@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" - integrity sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ== +json-buffer@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" + integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== json-parse-even-better-errors@^2.3.0, json-parse-even-better-errors@^2.3.1: version "2.3.1" @@ -5967,7 +6307,7 @@ json-schema-traverse@^1.0.0: resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2" integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== -json5@^2.1.2, json5@^2.2.1: +json5@^2.1.2, json5@^2.2.3: version "2.2.3" resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== @@ -5981,17 +6321,24 @@ jsonfile@^6.0.1: optionalDependencies: graceful-fs "^4.1.6" -keyv@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" - integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== +katex@^0.16.9: + version "0.16.11" + resolved "https://registry.yarnpkg.com/katex/-/katex-0.16.11.tgz#4bc84d5584f996abece5f01c6ad11304276a33f5" + integrity sha512-RQrI8rlHY92OLf3rho/Ts8i/XvjgguEjOkO1BEXcU3N8BqPpSzBNwV/G0Ukr+P/l3ivvJUE/Fa/CwbS6HesGNQ== + dependencies: + commander "^8.3.0" + +keyv@^4.5.3: + version "4.5.4" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93" + integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== dependencies: - json-buffer "3.0.0" + json-buffer "3.0.1" khroma@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/khroma/-/khroma-2.0.0.tgz#7577de98aed9f36c7a474c4d453d94c0d6c6588b" - integrity sha512-2J8rDNlQWbtiNYThZRvmMv5yt44ZakX+Tz5ZIp/mN1pt4snn+m030Va5Z4v8xA0cQFDXBwO/8i42xL4QPsVk3g== + version "2.1.0" + resolved "https://registry.yarnpkg.com/khroma/-/khroma-2.1.0.tgz#45f2ce94ce231a437cf5b63c2e886e6eb42bbbb1" + integrity sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw== kind-of@^6.0.0, kind-of@^6.0.2: version "6.0.3" @@ -6008,27 +6355,35 @@ kleur@^4.0.3: resolved "https://registry.yarnpkg.com/kleur/-/kleur-4.1.5.tgz#95106101795f7050c6c650f350c683febddb1780" integrity sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ== -klona@^2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/klona/-/klona-2.0.5.tgz#d166574d90076395d9963aa7a928fabb8d76afbc" - integrity sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ== +latest-version@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-7.0.0.tgz#843201591ea81a4d404932eeb61240fe04e9e5da" + integrity sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg== + dependencies: + package-json "^8.1.0" -latest-version@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-5.1.0.tgz#119dfe908fe38d15dfa43ecd13fa12ec8832face" - integrity sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA== +launch-editor@^2.6.0: + version "2.9.1" + resolved "https://registry.yarnpkg.com/launch-editor/-/launch-editor-2.9.1.tgz#253f173bd441e342d4344b4dae58291abb425047" + integrity sha512-Gcnl4Bd+hRO9P9icCP/RVVT2o8SFlPXofuCxvA2SaZuH45whSvf5p8x5oih5ftLiVhEI4sp5xDY+R+b3zJBh5w== dependencies: - package-json "^6.3.0" + picocolors "^1.0.0" + shell-quote "^1.8.1" + +layout-base@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/layout-base/-/layout-base-1.0.2.tgz#1291e296883c322a9dd4c5dd82063721b53e26e2" + integrity sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg== leven@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== -lilconfig@^2.0.3: - version "2.0.6" - resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-2.0.6.tgz#32a384558bd58af3d4c6e077dd1ad1d397bc69d4" - integrity sha512-9JROoBW7pobfsx+Sq2JsASvCo6Pfo6WWoUW79HuB1BCoBXD4PLWJPqDF6fNj67pqBYTbAHkE57M1kS/+L1neOg== +lilconfig@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-3.1.2.tgz#e4a7c3cb549e3a606c8dcc32e5ae1005e62c05cb" + integrity sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow== lines-and-columns@^1.1.6: version "1.2.4" @@ -6050,9 +6405,9 @@ loader-utils@^2.0.0: json5 "^2.1.2" loader-utils@^3.2.0: - version "3.2.1" - resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-3.2.1.tgz#4fb104b599daafd82ef3e1a41fb9265f87e1f576" - integrity sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw== + version "3.3.1" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-3.3.1.tgz#735b9a19fd63648ca7adbd31c2327dfe281304e5" + integrity sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg== locate-path@^3.0.0: version "3.0.0" @@ -6062,13 +6417,6 @@ locate-path@^3.0.0: p-locate "^3.0.0" path-exists "^3.0.0" -locate-path@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" - integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== - dependencies: - p-locate "^4.1.0" - locate-path@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" @@ -6076,37 +6424,39 @@ locate-path@^6.0.0: dependencies: p-locate "^5.0.0" +locate-path@^7.1.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-7.2.0.tgz#69cb1779bd90b35ab1e771e1f2f89a202c2a8a8a" + integrity sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA== + dependencies: + p-locate "^6.0.0" + lodash-es@^4.17.21: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.21.tgz#43e626c46e6591b7750beb2b50117390c609e3ee" integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== -lodash.curry@^4.0.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/lodash.curry/-/lodash.curry-4.1.1.tgz#248e36072ede906501d75966200a86dab8b23170" - integrity sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA== - lodash.debounce@^4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== -lodash.flow@^3.3.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/lodash.flow/-/lodash.flow-3.5.0.tgz#87bf40292b8cf83e4e8ce1a3ae4209e20071675a" - integrity sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw== +lodash.escape@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/lodash.escape/-/lodash.escape-4.0.1.tgz#c9044690c21e04294beaa517712fded1fa88de98" + integrity sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw== lodash.memoize@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" integrity sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag== -lodash.uniq@4.5.0, lodash.uniq@^4.5.0: +lodash.uniq@^4.5.0: version "4.5.0" resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ== -lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21: +lodash@^4.17.20, lodash@^4.17.21: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -6130,15 +6480,10 @@ lower-case@^2.0.2: dependencies: tslib "^2.0.3" -lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" - integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== - -lowercase-keys@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" - integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== +lowercase-keys@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-3.0.0.tgz#c5e7d442e37ead247ae9db117a9d0a467c89d4f2" + integrity sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ== lru-cache@^5.1.1: version "5.1.1" @@ -6147,17 +6492,10 @@ lru-cache@^5.1.1: dependencies: yallist "^3.0.2" -lru-cache@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" - integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== - dependencies: - yallist "^4.0.0" - lunr-languages@^1.4.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/lunr-languages/-/lunr-languages-1.10.0.tgz#2afe9fff47b435d9bc74bd372fb923dbf8ee1990" - integrity sha512-BBjKKcwrieJlzwwc9M5H/MRXGJ2qyOSDx/NXYiwkuKjiLOOoouh0WsDzeqcLoUWcX31y7i8sb8IgsZKObdUCkw== + version "1.14.0" + resolved "https://registry.yarnpkg.com/lunr-languages/-/lunr-languages-1.14.0.tgz#6e97635f434631729dd0e5654daedd291cd6f2d0" + integrity sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA== lunr@^2.3.8: version "2.3.9" @@ -6171,64 +6509,68 @@ magic-string@^0.25.0, magic-string@^0.25.1: dependencies: sourcemap-codec "^1.4.8" -make-dir@^3.0.0, make-dir@^3.0.2, make-dir@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" - integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== - dependencies: - semver "^6.0.0" - -markdown-escapes@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/markdown-escapes/-/markdown-escapes-1.0.4.tgz#c95415ef451499d7602b91095f3c8e8975f78535" - integrity sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg== - markdown-extensions@^1.0.0: version "1.1.1" resolved "https://registry.yarnpkg.com/markdown-extensions/-/markdown-extensions-1.1.1.tgz#fea03b539faeaee9b4ef02a3769b455b189f7fc3" integrity sha512-WWC0ZuMzCyDHYCasEGs4IPvLyTGftYwh6wIEOULOF0HXcqZlhwRzrK0w2VUlxWA98xnvb/jszw4ZSkJ6ADpM6Q== +markdown-extensions@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/markdown-extensions/-/markdown-extensions-2.0.0.tgz#34bebc83e9938cae16e0e017e4a9814a8330d3c4" + integrity sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q== + markdown-table@^3.0.0: version "3.0.3" resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-3.0.3.tgz#e6331d30e493127e031dd385488b5bd326e4a6bd" integrity sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw== -mdast-squeeze-paragraphs@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz#7c4c114679c3bee27ef10b58e2e015be79f1ef97" - integrity sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ== - dependencies: - unist-util-remove "^2.0.0" - -mdast-util-definitions@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz#c5c1a84db799173b4dcf7643cda999e440c24db2" - integrity sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ== - dependencies: - unist-util-visit "^2.0.0" - mdast-util-definitions@^5.0.0: - version "5.1.1" - resolved "https://registry.yarnpkg.com/mdast-util-definitions/-/mdast-util-definitions-5.1.1.tgz#2c1d684b28e53f84938bb06317944bee8efa79db" - integrity sha512-rQ+Gv7mHttxHOBx2dkF4HWTg+EE+UR78ptQWDylzPKaQuVGdG4HIoY3SrS/pCp80nZ04greFvXbVFHT+uf0JVQ== + version "5.1.2" + resolved "https://registry.yarnpkg.com/mdast-util-definitions/-/mdast-util-definitions-5.1.2.tgz#9910abb60ac5d7115d6819b57ae0bcef07a3f7a7" + integrity sha512-8SVPMuHqlPME/z3gqVwWY4zVXn8lqKv/pAhC57FuJ40ImXyBpmO5ukh98zB2v7Blql2FiHjHv9LVztSIqjY+MA== dependencies: "@types/mdast" "^3.0.0" "@types/unist" "^2.0.0" unist-util-visit "^4.0.0" +mdast-util-directive@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz#3fb1764e705bbdf0afb0d3f889e4404c3e82561f" + integrity sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q== + dependencies: + "@types/mdast" "^4.0.0" + "@types/unist" "^3.0.0" + devlop "^1.0.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + parse-entities "^4.0.0" + stringify-entities "^4.0.0" + unist-util-visit-parents "^6.0.0" + mdast-util-find-and-replace@^2.0.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.2.1.tgz#249901ef43c5f41d6e8a8d446b3b63b17e592d7c" - integrity sha512-SobxkQXFAdd4b5WmEakmkVoh18icjQRxGy5OWTCzgsLRm1Fu/KCtwD1HIQSsmq5ZRjVH0Ehwg6/Fn3xIUk+nKw== + version "2.2.2" + resolved "https://registry.yarnpkg.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.2.2.tgz#cc2b774f7f3630da4bd592f61966fecade8b99b1" + integrity sha512-MTtdFRz/eMDHXzeK6W3dO7mXUlF82Gom4y0oOgvHhh/HXZAGvIQDUvQ0SuUx+j2tv44b8xTHOm8K/9OoRFnXKw== dependencies: + "@types/mdast" "^3.0.0" escape-string-regexp "^5.0.0" unist-util-is "^5.0.0" unist-util-visit-parents "^5.0.0" -mdast-util-from-markdown@^1.0.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-1.2.0.tgz#84df2924ccc6c995dec1e2368b2b208ad0a76268" - integrity sha512-iZJyyvKD1+K7QX1b5jXdE7Sc5dtoTry1vzV28UZZe8Z1xVnB/czKntJ7ZAkG0tANqRnBF6p3p7GpU1y19DTf2Q== +mdast-util-find-and-replace@^3.0.0, mdast-util-find-and-replace@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz#a6fc7b62f0994e973490e45262e4bc07607b04e0" + integrity sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA== + dependencies: + "@types/mdast" "^4.0.0" + escape-string-regexp "^5.0.0" + unist-util-is "^6.0.0" + unist-util-visit-parents "^6.0.0" + +mdast-util-from-markdown@^1.0.0, mdast-util-from-markdown@^1.1.0, mdast-util-from-markdown@^1.3.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz#9421a5a247f10d31d2faed2a30df5ec89ceafcf0" + integrity sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww== dependencies: "@types/mdast" "^3.0.0" "@types/unist" "^2.0.0" @@ -6243,55 +6585,137 @@ mdast-util-from-markdown@^1.0.0: unist-util-stringify-position "^3.0.0" uvu "^0.5.0" +mdast-util-from-markdown@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz#32a6e8f512b416e1f51eb817fc64bd867ebcd9cc" + integrity sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA== + dependencies: + "@types/mdast" "^4.0.0" + "@types/unist" "^3.0.0" + decode-named-character-reference "^1.0.0" + devlop "^1.0.0" + mdast-util-to-string "^4.0.0" + micromark "^4.0.0" + micromark-util-decode-numeric-character-reference "^2.0.0" + micromark-util-decode-string "^2.0.0" + micromark-util-normalize-identifier "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + unist-util-stringify-position "^4.0.0" + +mdast-util-frontmatter@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz#f5f929eb1eb36c8a7737475c7eb438261f964ee8" + integrity sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA== + dependencies: + "@types/mdast" "^4.0.0" + devlop "^1.0.0" + escape-string-regexp "^5.0.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + micromark-extension-frontmatter "^2.0.0" + mdast-util-gfm-autolink-literal@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-1.0.2.tgz#4032dcbaddaef7d4f2f3768ed830475bb22d3970" - integrity sha512-FzopkOd4xTTBeGXhXSBU0OCDDh5lUj2rd+HQqG92Ld+jL4lpUfgX2AT2OHAVP9aEeDKp7G92fuooSZcYJA3cRg== + version "1.0.3" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-1.0.3.tgz#67a13abe813d7eba350453a5333ae1bc0ec05c06" + integrity sha512-My8KJ57FYEy2W2LyNom4n3E7hKTuQk/0SES0u16tjA9Z3oFkF4RrC/hPAPgjlSpezsOvI8ObcXcElo92wn5IGA== dependencies: "@types/mdast" "^3.0.0" ccount "^2.0.0" mdast-util-find-and-replace "^2.0.0" micromark-util-character "^1.0.0" +mdast-util-gfm-autolink-literal@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz#abd557630337bd30a6d5a4bd8252e1c2dc0875d5" + integrity sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ== + dependencies: + "@types/mdast" "^4.0.0" + ccount "^2.0.0" + devlop "^1.0.0" + mdast-util-find-and-replace "^3.0.0" + micromark-util-character "^2.0.0" + mdast-util-gfm-footnote@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-1.0.1.tgz#11d2d40a1a673a399c459e467fa85e00223191fe" - integrity sha512-p+PrYlkw9DeCRkTVw1duWqPRHX6Ywh2BNKJQcZbCwAuP/59B0Lk9kakuAd7KbQprVO4GzdW8eS5++A9PUSqIyw== + version "1.0.2" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-1.0.2.tgz#ce5e49b639c44de68d5bf5399877a14d5020424e" + integrity sha512-56D19KOGbE00uKVj3sgIykpwKL179QsVFwx/DCW0u/0+URsryacI4MAdNJl0dh+u2PSsD9FtxPFbHCzJ78qJFQ== dependencies: "@types/mdast" "^3.0.0" mdast-util-to-markdown "^1.3.0" micromark-util-normalize-identifier "^1.0.0" +mdast-util-gfm-footnote@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz#25a1753c7d16db8bfd53cd84fe50562bd1e6d6a9" + integrity sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ== + dependencies: + "@types/mdast" "^4.0.0" + devlop "^1.1.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + micromark-util-normalize-identifier "^2.0.0" + mdast-util-gfm-strikethrough@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-1.0.2.tgz#6b4fa4ae37d449ccb988192ac0afbb2710ffcefd" - integrity sha512-T/4DVHXcujH6jx1yqpcAYYwd+z5lAYMw4Ls6yhTfbMMtCt0PHY4gEfhW9+lKsLBtyhUGKRIzcUA2FATVqnvPDA== + version "1.0.3" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-1.0.3.tgz#5470eb105b483f7746b8805b9b989342085795b7" + integrity sha512-DAPhYzTYrRcXdMjUtUjKvW9z/FNAMTdU0ORyMcbmkwYNbKocDpdk+PX1L1dQgOID/+vVs1uBQ7ElrBQfZ0cuiQ== dependencies: "@types/mdast" "^3.0.0" mdast-util-to-markdown "^1.3.0" +mdast-util-gfm-strikethrough@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz#d44ef9e8ed283ac8c1165ab0d0dfd058c2764c16" + integrity sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg== + dependencies: + "@types/mdast" "^4.0.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + mdast-util-gfm-table@^1.0.0: - version "1.0.6" - resolved "https://registry.yarnpkg.com/mdast-util-gfm-table/-/mdast-util-gfm-table-1.0.6.tgz#184e900979fe790745fc3dabf77a4114595fcd7f" - integrity sha512-uHR+fqFq3IvB3Rd4+kzXW8dmpxUhvgCQZep6KdjsLK4O6meK5dYZEayLtIxNus1XO3gfjfcIFe8a7L0HZRGgag== + version "1.0.7" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-table/-/mdast-util-gfm-table-1.0.7.tgz#3552153a146379f0f9c4c1101b071d70bbed1a46" + integrity sha512-jjcpmNnQvrmN5Vx7y7lEc2iIOEytYv7rTvu+MeyAsSHTASGCCRA79Igg2uKssgOs1i1po8s3plW0sTu1wkkLGg== dependencies: "@types/mdast" "^3.0.0" markdown-table "^3.0.0" mdast-util-from-markdown "^1.0.0" mdast-util-to-markdown "^1.3.0" +mdast-util-gfm-table@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz#7a435fb6223a72b0862b33afbd712b6dae878d38" + integrity sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg== + dependencies: + "@types/mdast" "^4.0.0" + devlop "^1.0.0" + markdown-table "^3.0.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + mdast-util-gfm-task-list-item@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-1.0.1.tgz#6f35f09c6e2bcbe88af62fdea02ac199cc802c5c" - integrity sha512-KZ4KLmPdABXOsfnM6JHUIjxEvcx2ulk656Z/4Balw071/5qgnhz+H1uGtf2zIGnrnvDC8xR4Fj9uKbjAFGNIeA== + version "1.0.2" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-1.0.2.tgz#b280fcf3b7be6fd0cc012bbe67a59831eb34097b" + integrity sha512-PFTA1gzfp1B1UaiJVyhJZA1rm0+Tzn690frc/L8vNX1Jop4STZgOE6bxUhnzdVSB+vm2GU1tIsuQcA9bxTQpMQ== dependencies: "@types/mdast" "^3.0.0" mdast-util-to-markdown "^1.3.0" +mdast-util-gfm-task-list-item@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz#e68095d2f8a4303ef24094ab642e1047b991a936" + integrity sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ== + dependencies: + "@types/mdast" "^4.0.0" + devlop "^1.0.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + mdast-util-gfm@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/mdast-util-gfm/-/mdast-util-gfm-2.0.1.tgz#16fcf70110ae689a06d77e8f4e346223b64a0ea6" - integrity sha512-42yHBbfWIFisaAfV1eixlabbsa6q7vHeSPY+cg+BBjX51M8xhgMacqH9g6TftB/9+YkcI0ooV4ncfrJslzm/RQ== + version "2.0.2" + resolved "https://registry.yarnpkg.com/mdast-util-gfm/-/mdast-util-gfm-2.0.2.tgz#e92f4d8717d74bdba6de57ed21cc8b9552e2d0b6" + integrity sha512-qvZ608nBppZ4icQlhQQIAdc6S3Ffj9RGmzwUKUWuEICFnd1LVkN3EktF7ZHAgfcEdvZB5owU9tQgt99e2TlLjg== dependencies: mdast-util-from-markdown "^1.0.0" mdast-util-gfm-autolink-literal "^1.0.0" @@ -6301,10 +6725,23 @@ mdast-util-gfm@^2.0.0: mdast-util-gfm-task-list-item "^1.0.0" mdast-util-to-markdown "^1.0.0" +mdast-util-gfm@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz#3f2aecc879785c3cb6a81ff3a243dc11eca61095" + integrity sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw== + dependencies: + mdast-util-from-markdown "^2.0.0" + mdast-util-gfm-autolink-literal "^2.0.0" + mdast-util-gfm-footnote "^2.0.0" + mdast-util-gfm-strikethrough "^2.0.0" + mdast-util-gfm-table "^2.0.0" + mdast-util-gfm-task-list-item "^2.0.0" + mdast-util-to-markdown "^2.0.0" + mdast-util-mdx-expression@^1.0.0: - version "1.3.1" - resolved "https://registry.yarnpkg.com/mdast-util-mdx-expression/-/mdast-util-mdx-expression-1.3.1.tgz#2224cf0b5b150093704a3c225bd529d2de21f50f" - integrity sha512-TTb6cKyTA1RD+1su1iStZ5PAv3rFfOUKcoU5EstUpv/IZo63uDX03R8+jXjMEhcobXnNOiG6/ccekvVl4eV1zQ== + version "1.3.2" + resolved "https://registry.yarnpkg.com/mdast-util-mdx-expression/-/mdast-util-mdx-expression-1.3.2.tgz#d027789e67524d541d6de543f36d51ae2586f220" + integrity sha512-xIPmR5ReJDu/DHH1OoIT1HkuybIfRGYRywC+gJtI7qHjCJp/M9jrmBEJW22O8lskDWm562BX2W8TiAwRTb0rKA== dependencies: "@types/estree-jsx" "^1.0.0" "@types/hast" "^2.0.0" @@ -6312,15 +6749,29 @@ mdast-util-mdx-expression@^1.0.0: mdast-util-from-markdown "^1.0.0" mdast-util-to-markdown "^1.0.0" +mdast-util-mdx-expression@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz#43f0abac9adc756e2086f63822a38c8d3c3a5096" + integrity sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ== + dependencies: + "@types/estree-jsx" "^1.0.0" + "@types/hast" "^3.0.0" + "@types/mdast" "^4.0.0" + devlop "^1.0.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + mdast-util-mdx-jsx@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-2.1.0.tgz#029f5a9c38485dbb5cf482059557ee7d788f1947" - integrity sha512-KzgzfWMhdteDkrY4mQtyvTU5bc/W4ppxhe9SzelO6QUUiwLAM+Et2Dnjjprik74a336kHdo0zKm7Tp+n6FFeRg== + version "2.1.4" + resolved "https://registry.yarnpkg.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-2.1.4.tgz#7c1f07f10751a78963cfabee38017cbc8b7786d1" + integrity sha512-DtMn9CmVhVzZx3f+optVDF8yFgQVt7FghCRNdlIaS3X5Bnym3hZwPbg/XW86vdpKjlc1PVj26SpnLGeJBXD3JA== dependencies: "@types/estree-jsx" "^1.0.0" "@types/hast" "^2.0.0" "@types/mdast" "^3.0.0" + "@types/unist" "^2.0.0" ccount "^2.0.0" + mdast-util-from-markdown "^1.1.0" mdast-util-to-markdown "^1.3.0" parse-entities "^4.0.0" stringify-entities "^4.0.0" @@ -6328,19 +6779,50 @@ mdast-util-mdx-jsx@^2.0.0: unist-util-stringify-position "^3.0.0" vfile-message "^3.0.0" +mdast-util-mdx-jsx@^3.0.0: + version "3.1.3" + resolved "https://registry.yarnpkg.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz#76b957b3da18ebcfd0de3a9b4451dcd6fdec2320" + integrity sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ== + dependencies: + "@types/estree-jsx" "^1.0.0" + "@types/hast" "^3.0.0" + "@types/mdast" "^4.0.0" + "@types/unist" "^3.0.0" + ccount "^2.0.0" + devlop "^1.1.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + parse-entities "^4.0.0" + stringify-entities "^4.0.0" + unist-util-stringify-position "^4.0.0" + vfile-message "^4.0.0" + mdast-util-mdx@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/mdast-util-mdx/-/mdast-util-mdx-2.0.0.tgz#dd4f6c993cf27da32725e50a04874f595b7b63fb" - integrity sha512-M09lW0CcBT1VrJUaF/PYxemxxHa7SLDHdSn94Q9FhxjCQfuW7nMAWKWimTmA3OyDMSTH981NN1csW1X+HPSluw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-mdx/-/mdast-util-mdx-2.0.1.tgz#49b6e70819b99bb615d7223c088d295e53bb810f" + integrity sha512-38w5y+r8nyKlGvNjSEqWrhG0w5PmnRA+wnBvm+ulYCct7nsGYhFVb0lljS9bQav4psDAS1eGkP2LMVcZBi/aqw== dependencies: + mdast-util-from-markdown "^1.0.0" mdast-util-mdx-expression "^1.0.0" mdast-util-mdx-jsx "^2.0.0" mdast-util-mdxjs-esm "^1.0.0" + mdast-util-to-markdown "^1.0.0" + +mdast-util-mdx@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz#792f9cf0361b46bee1fdf1ef36beac424a099c41" + integrity sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w== + dependencies: + mdast-util-from-markdown "^2.0.0" + mdast-util-mdx-expression "^2.0.0" + mdast-util-mdx-jsx "^3.0.0" + mdast-util-mdxjs-esm "^2.0.0" + mdast-util-to-markdown "^2.0.0" mdast-util-mdxjs-esm@^1.0.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-1.3.0.tgz#137345ef827169aeeeb6069277cd3e090830ce9a" - integrity sha512-7N5ihsOkAEGjFotIX9p/YPdl4TqUoMxL4ajNz7PbT89BqsdWJuBC9rvgt6wpbwTZqWWR0jKWqQbwsOWDBUZv4g== + version "1.3.1" + resolved "https://registry.yarnpkg.com/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-1.3.1.tgz#645d02cd607a227b49721d146fd81796b2e2d15b" + integrity sha512-SXqglS0HrEvSdUEfoXFtcg7DRl7S2cwOXc7jkuusG472Mmjag34DUDeOJUZtl+BVnyeO1frIgVpHlNRWc2gk/w== dependencies: "@types/estree-jsx" "^1.0.0" "@types/hast" "^2.0.0" @@ -6348,89 +6830,141 @@ mdast-util-mdxjs-esm@^1.0.0: mdast-util-from-markdown "^1.0.0" mdast-util-to-markdown "^1.0.0" -mdast-util-to-hast@10.0.1: - version "10.0.1" - resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz#0cfc82089494c52d46eb0e3edb7a4eb2aea021eb" - integrity sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA== +mdast-util-mdxjs-esm@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz#019cfbe757ad62dd557db35a695e7314bcc9fa97" + integrity sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg== + dependencies: + "@types/estree-jsx" "^1.0.0" + "@types/hast" "^3.0.0" + "@types/mdast" "^4.0.0" + devlop "^1.0.0" + mdast-util-from-markdown "^2.0.0" + mdast-util-to-markdown "^2.0.0" + +mdast-util-phrasing@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-phrasing/-/mdast-util-phrasing-3.0.1.tgz#c7c21d0d435d7fb90956038f02e8702781f95463" + integrity sha512-WmI1gTXUBJo4/ZmSk79Wcb2HcjPJBzM1nlI/OUWA8yk2X9ik3ffNbBGsU+09BFmXaL1IBb9fiuvq6/KMiNycSg== dependencies: "@types/mdast" "^3.0.0" - "@types/unist" "^2.0.0" - mdast-util-definitions "^4.0.0" - mdurl "^1.0.0" - unist-builder "^2.0.0" - unist-util-generated "^1.0.0" - unist-util-position "^3.0.0" - unist-util-visit "^2.0.0" + unist-util-is "^5.0.0" + +mdast-util-phrasing@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz#7cc0a8dec30eaf04b7b1a9661a92adb3382aa6e3" + integrity sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w== + dependencies: + "@types/mdast" "^4.0.0" + unist-util-is "^6.0.0" mdast-util-to-hast@^12.1.0: - version "12.2.5" - resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-12.2.5.tgz#91532ebd929a7def21585034f7901eb367d2d272" - integrity sha512-EFNhT35ZR/VZ85/EedDdCNTq0oFM+NM/+qBomVGQ0+Lcg0nhI8xIwmdCzNMlVlCJNXRprpobtKP/IUh8cfz6zQ== + version "12.3.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-12.3.0.tgz#045d2825fb04374e59970f5b3f279b5700f6fb49" + integrity sha512-pits93r8PhnIoU4Vy9bjW39M2jJ6/tdHyja9rrot9uujkN7UTU9SDnE6WNJz/IGyQk3XHX6yNNtrBH6cQzm8Hw== dependencies: "@types/hast" "^2.0.0" "@types/mdast" "^3.0.0" mdast-util-definitions "^5.0.0" micromark-util-sanitize-uri "^1.1.0" trim-lines "^3.0.0" - unist-builder "^3.0.0" unist-util-generated "^2.0.0" unist-util-position "^4.0.0" unist-util-visit "^4.0.0" +mdast-util-to-hast@^13.0.0: + version "13.2.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz#5ca58e5b921cc0a3ded1bc02eed79a4fe4fe41f4" + integrity sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA== + dependencies: + "@types/hast" "^3.0.0" + "@types/mdast" "^4.0.0" + "@ungap/structured-clone" "^1.0.0" + devlop "^1.0.0" + micromark-util-sanitize-uri "^2.0.0" + trim-lines "^3.0.0" + unist-util-position "^5.0.0" + unist-util-visit "^5.0.0" + vfile "^6.0.0" + mdast-util-to-markdown@^1.0.0, mdast-util-to-markdown@^1.3.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-1.4.0.tgz#bb0153a865dbc022975f403a156fb6399c494ddf" - integrity sha512-IjXARf/O8VGx/pc5SZ7syfydq1DYL9vd92orsG5U0b4GNCmAvXzu+n7sbzfIKrXwB0AVrYk3NV2kXl0AIi9LCA== + version "1.5.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-1.5.0.tgz#c13343cb3fc98621911d33b5cd42e7d0731171c6" + integrity sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A== dependencies: "@types/mdast" "^3.0.0" "@types/unist" "^2.0.0" longest-streak "^3.0.0" + mdast-util-phrasing "^3.0.0" mdast-util-to-string "^3.0.0" micromark-util-decode-string "^1.0.0" unist-util-visit "^4.0.0" zwitch "^2.0.0" -mdast-util-to-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz#b8cfe6a713e1091cb5b728fc48885a4767f8b97b" - integrity sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w== +mdast-util-to-markdown@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz#9813f1d6e0cdaac7c244ec8c6dabfdb2102ea2b4" + integrity sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ== + dependencies: + "@types/mdast" "^4.0.0" + "@types/unist" "^3.0.0" + longest-streak "^3.0.0" + mdast-util-phrasing "^4.0.0" + mdast-util-to-string "^4.0.0" + micromark-util-decode-string "^2.0.0" + unist-util-visit "^5.0.0" + zwitch "^2.0.0" mdast-util-to-string@^3.0.0, mdast-util-to-string@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-3.1.0.tgz#56c506d065fbf769515235e577b5a261552d56e9" - integrity sha512-n4Vypz/DZgwo0iMHLQL49dJzlp7YtAJP+N07MZHpjPf/5XJuHUWstviF4Mn2jEiR/GNmtnRRqnwsXExk3igfFA== + version "3.2.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz#66f7bb6324756741c5f47a53557f0cbf16b6f789" + integrity sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg== + dependencies: + "@types/mdast" "^3.0.0" + +mdast-util-to-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz#7a5121475556a04e7eddeb67b264aae79d312814" + integrity sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg== + dependencies: + "@types/mdast" "^4.0.0" mdn-data@2.0.14: version "2.0.14" resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.14.tgz#7113fc4281917d63ce29b43446f701e68c25ba50" integrity sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow== +mdn-data@2.0.28: + version "2.0.28" + resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.28.tgz#5ec48e7bef120654539069e1ae4ddc81ca490eba" + integrity sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g== + +mdn-data@2.0.30: + version "2.0.30" + resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.30.tgz#ce4df6f80af6cfbe218ecd5c552ba13c4dfa08cc" + integrity sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA== + mdn-data@2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.4.tgz#699b3c38ac6f1d728091a64650b65d388502fd5b" integrity sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA== -mdurl@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e" - integrity sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g== - media-typer@0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== memfs@^3.1.2, memfs@^3.4.3: - version "3.4.12" - resolved "https://registry.yarnpkg.com/memfs/-/memfs-3.4.12.tgz#d00f8ad8dab132dc277c659dc85bfd14b07d03bd" - integrity sha512-BcjuQn6vfqP+k100e0E9m61Hyqa//Brp+I3f0OBmN0ATHlFA8vx3Lt8z57R3u2bPqe3WGDBC+nF72fTH7isyEw== + version "3.6.0" + resolved "https://registry.yarnpkg.com/memfs/-/memfs-3.6.0.tgz#d7a2110f86f79dd950a8b6df6d57bc984aa185f6" + integrity sha512-EGowvkkgbMcIChjMTMkESFDbZeSh8xZ7kNSF0hAiAN4Jh6jgHCRS0Ga/+C8y6Au+oqpezRHCfPsmJ2+DwAgiwQ== dependencies: - fs-monkey "^1.0.3" + fs-monkey "^1.0.4" -merge-descriptors@1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" - integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== +merge-descriptors@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.3.tgz#d80319a65f3c7935351e5cfdac8f9318504dbed5" + integrity sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ== merge-stream@^2.0.0: version "2.0.0" @@ -6442,21 +6976,31 @@ merge2@^1.3.0, merge2@^1.4.1: resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== -mermaid@^9.1.3: - version "9.3.0" - resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-9.3.0.tgz#8bd7c4a44b53e4e85c53a0a474442e9c273494ae" - integrity sha512-mGl0BM19TD/HbU/LmlaZbjBi//tojelg8P/mxD6pPZTAYaI+VawcyBdqRsoUHSc7j71PrMdJ3HBadoQNdvP5cg== - dependencies: - "@braintree/sanitize-url" "^6.0.0" - d3 "^7.0.0" - dagre-d3-es "7.0.6" - dompurify "2.4.1" +mermaid@^10.9.0: + version "10.9.2" + resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-10.9.2.tgz#108fe98060e6fba6bc826e5b454674aa2d32b817" + integrity sha512-UkZyMSuIYcI1Q0H+2pv/5CiY84sOwQ2XlKoDZMl9Y/MtrLEtxQtyA6LWGkMxnZxj0dJqI+7nw51bYjNnrbdFsQ== + dependencies: + "@braintree/sanitize-url" "^6.0.1" + "@types/d3-scale" "^4.0.3" + "@types/d3-scale-chromatic" "^3.0.0" + cytoscape "^3.28.1" + cytoscape-cose-bilkent "^4.1.0" + d3 "^7.4.0" + d3-sankey "^0.12.3" + dagre-d3-es "7.0.10" + dayjs "^1.11.7" + dompurify "^3.0.5 <3.1.7" + elkjs "^0.9.0" + katex "^0.16.9" khroma "^2.0.0" lodash-es "^4.17.21" - moment-mini "^2.24.0" + mdast-util-from-markdown "^1.3.0" non-layered-tidy-tree-layout "^2.0.2" - stylis "^4.1.2" + stylis "^4.1.3" + ts-dedent "^2.2.0" uuid "^9.0.0" + web-worker "^1.2.0" methods@~1.1.2: version "1.1.2" @@ -6464,9 +7008,9 @@ methods@~1.1.2: integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== micromark-core-commonmark@^1.0.0, micromark-core-commonmark@^1.0.1: - version "1.0.6" - resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-1.0.6.tgz#edff4c72e5993d93724a3c206970f5a15b0585ad" - integrity sha512-K+PkJTxqjFfSNkfAhp4GB+cZPfQd6dxtTXnf+RjZOV7T4EEXnvgzOcnp+eSTmpGk9d1S9sL6/lqrgSNn/s0HZA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz#1386628df59946b2d39fb2edfd10f3e8e0a75bb8" + integrity sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw== dependencies: decode-named-character-reference "^1.0.0" micromark-factory-destination "^1.0.0" @@ -6485,21 +7029,75 @@ micromark-core-commonmark@^1.0.0, micromark-core-commonmark@^1.0.1: micromark-util-types "^1.0.1" uvu "^0.5.0" +micromark-core-commonmark@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz#9a45510557d068605c6e9a80f282b2bb8581e43d" + integrity sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA== + dependencies: + decode-named-character-reference "^1.0.0" + devlop "^1.0.0" + micromark-factory-destination "^2.0.0" + micromark-factory-label "^2.0.0" + micromark-factory-space "^2.0.0" + micromark-factory-title "^2.0.0" + micromark-factory-whitespace "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-chunked "^2.0.0" + micromark-util-classify-character "^2.0.0" + micromark-util-html-tag-name "^2.0.0" + micromark-util-normalize-identifier "^2.0.0" + micromark-util-resolve-all "^2.0.0" + micromark-util-subtokenize "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + +micromark-extension-directive@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz#2eb61985d1995a7c1ff7621676a4f32af29409e8" + integrity sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA== + dependencies: + devlop "^1.0.0" + micromark-factory-space "^2.0.0" + micromark-factory-whitespace "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + parse-entities "^4.0.0" + +micromark-extension-frontmatter@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz#651c52ffa5d7a8eeed687c513cd869885882d67a" + integrity sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg== + dependencies: + fault "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-extension-gfm-autolink-literal@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-1.0.3.tgz#dc589f9c37eaff31a175bab49f12290edcf96058" - integrity sha512-i3dmvU0htawfWED8aHMMAzAVp/F0Z+0bPh3YrbTPPL1v4YAlCZpy5rBO5p0LPYiZo0zFVkoYh7vDU7yQSiCMjg== + version "1.0.5" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-1.0.5.tgz#5853f0e579bbd8ef9e39a7c0f0f27c5a063a66e7" + integrity sha512-z3wJSLrDf8kRDOh2qBtoTRD53vJ+CWIyo7uyZuxf/JAbNJjiHsOpG1y5wxk8drtv3ETAHutCu6N3thkOOgueWg== dependencies: micromark-util-character "^1.0.0" micromark-util-sanitize-uri "^1.0.0" micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" - uvu "^0.5.0" + +micromark-extension-gfm-autolink-literal@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz#6286aee9686c4462c1e3552a9d505feddceeb935" + integrity sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw== + dependencies: + micromark-util-character "^2.0.0" + micromark-util-sanitize-uri "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" micromark-extension-gfm-footnote@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-1.0.4.tgz#cbfd8873b983e820c494498c6dac0105920818d5" - integrity sha512-E/fmPmDqLiMUP8mLJ8NbJWJ4bTw6tS+FEQS8CcuDtZpILuOb2kjLqPEeAePF1djXROHXChM/wPJw0iS4kHCcIg== + version "1.1.2" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-1.1.2.tgz#05e13034d68f95ca53c99679040bc88a6f92fe2e" + integrity sha512-Yxn7z7SxgyGWRNa4wzf8AhYYWNrwl5q1Z8ii+CSTTIqVkmGZF1CElX2JI8g5yGoM3GAman9/PVCUFUSJ0kB/8Q== dependencies: micromark-core-commonmark "^1.0.0" micromark-factory-space "^1.0.0" @@ -6510,10 +7108,24 @@ micromark-extension-gfm-footnote@^1.0.0: micromark-util-types "^1.0.0" uvu "^0.5.0" +micromark-extension-gfm-footnote@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz#4dab56d4e398b9853f6fe4efac4fc9361f3e0750" + integrity sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw== + dependencies: + devlop "^1.0.0" + micromark-core-commonmark "^2.0.0" + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-normalize-identifier "^2.0.0" + micromark-util-sanitize-uri "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-extension-gfm-strikethrough@^1.0.0: - version "1.0.4" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-1.0.4.tgz#162232c284ffbedd8c74e59c1525bda217295e18" - integrity sha512-/vjHU/lalmjZCT5xt7CcHVJGq8sYRm80z24qAKXzaHzem/xsDYb2yLL+NNVbYvmpLx3O7SYPuGL5pzusL9CLIQ== + version "1.0.7" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-1.0.7.tgz#c8212c9a616fa3bf47cb5c711da77f4fdc2f80af" + integrity sha512-sX0FawVE1o3abGk3vRjOH50L5TTLr3b5XMqnP9YDRb34M0v5OoZhG+OHFz1OffZ9dlwgpTBKaT4XW/AsUVnSDw== dependencies: micromark-util-chunked "^1.0.0" micromark-util-classify-character "^1.0.0" @@ -6522,10 +7134,22 @@ micromark-extension-gfm-strikethrough@^1.0.0: micromark-util-types "^1.0.0" uvu "^0.5.0" +micromark-extension-gfm-strikethrough@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz#86106df8b3a692b5f6a92280d3879be6be46d923" + integrity sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw== + dependencies: + devlop "^1.0.0" + micromark-util-chunked "^2.0.0" + micromark-util-classify-character "^2.0.0" + micromark-util-resolve-all "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-extension-gfm-table@^1.0.0: - version "1.0.5" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-1.0.5.tgz#7b708b728f8dc4d95d486b9e7a2262f9cddbcbb4" - integrity sha512-xAZ8J1X9W9K3JTJTUL7G6wSKhp2ZYHrFk5qJgY/4B33scJzE2kpfRL6oiw/veJTbt7jiM/1rngLlOKPWr1G+vg== + version "1.0.7" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-1.0.7.tgz#dcb46074b0c6254c3fc9cc1f6f5002c162968008" + integrity sha512-3ZORTHtcSnMQEKtAOsBQ9/oHp9096pI/UvdPtN7ehKvrmZZ2+bbWhi0ln+I9drmwXMt5boocn6OlwQzNXeVeqw== dependencies: micromark-factory-space "^1.0.0" micromark-util-character "^1.0.0" @@ -6533,17 +7157,35 @@ micromark-extension-gfm-table@^1.0.0: micromark-util-types "^1.0.0" uvu "^0.5.0" +micromark-extension-gfm-table@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz#5cadedfbb29fca7abf752447967003dc3b6583c9" + integrity sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g== + dependencies: + devlop "^1.0.0" + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-extension-gfm-tagfilter@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-1.0.1.tgz#fb2e303f7daf616db428bb6a26e18fda14a90a4d" - integrity sha512-Ty6psLAcAjboRa/UKUbbUcwjVAv5plxmpUTy2XC/3nJFL37eHej8jrHrRzkqcpipJliuBH30DTs7+3wqNcQUVA== + version "1.0.2" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-1.0.2.tgz#aa7c4dd92dabbcb80f313ebaaa8eb3dac05f13a7" + integrity sha512-5XWB9GbAUSHTn8VPU8/1DBXMuKYT5uOgEjJb8gN3mW0PNW5OPHpSdojoqf+iq1xo7vWzw/P8bAHY0n6ijpXF7g== dependencies: micromark-util-types "^1.0.0" +micromark-extension-gfm-tagfilter@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz#f26d8a7807b5985fba13cf61465b58ca5ff7dc57" + integrity sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg== + dependencies: + micromark-util-types "^2.0.0" + micromark-extension-gfm-task-list-item@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-1.0.3.tgz#7683641df5d4a09795f353574d7f7f66e47b7fc4" - integrity sha512-PpysK2S1Q/5VXi72IIapbi/jliaiOFzv7THH4amwXeYXLq3l1uo8/2Be0Ac1rEwK20MQEsGH2ltAZLNY2KI/0Q== + version "1.0.5" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-1.0.5.tgz#b52ce498dc4c69b6a9975abafc18f275b9dde9f4" + integrity sha512-RMFXl2uQ0pNQy6Lun2YBYT9g9INXtWJULgbt01D/x8/6yJ2qpKyzdZD3pi6UIkzF++Da49xAelVKUeUMqd5eIQ== dependencies: micromark-factory-space "^1.0.0" micromark-util-character "^1.0.0" @@ -6551,10 +7193,21 @@ micromark-extension-gfm-task-list-item@^1.0.0: micromark-util-types "^1.0.0" uvu "^0.5.0" +micromark-extension-gfm-task-list-item@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz#bcc34d805639829990ec175c3eea12bb5b781f2c" + integrity sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw== + dependencies: + devlop "^1.0.0" + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-extension-gfm@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm/-/micromark-extension-gfm-2.0.1.tgz#40f3209216127a96297c54c67f5edc7ef2d1a2a2" - integrity sha512-p2sGjajLa0iYiGQdT0oelahRYtMWvLjy8J9LOCxzIQsllMCGLbsLW+Nc+N4vi02jcRJvedVJ68cjelKIO6bpDA== + version "2.0.3" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm/-/micromark-extension-gfm-2.0.3.tgz#e517e8579949a5024a493e49204e884aa74f5acf" + integrity sha512-vb9OoHqrhCmbRidQv/2+Bc6pkP0FrtlhurxZofvOEy5o8RtuuvTq+RQ1Vw5ZDNrVraQZu3HixESqbG+0iKk/MQ== dependencies: micromark-extension-gfm-autolink-literal "^1.0.0" micromark-extension-gfm-footnote "^1.0.0" @@ -6565,11 +7218,26 @@ micromark-extension-gfm@^2.0.0: micromark-util-combine-extensions "^1.0.0" micromark-util-types "^1.0.0" +micromark-extension-gfm@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz#3e13376ab95dd7a5cfd0e29560dfe999657b3c5b" + integrity sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w== + dependencies: + micromark-extension-gfm-autolink-literal "^2.0.0" + micromark-extension-gfm-footnote "^2.0.0" + micromark-extension-gfm-strikethrough "^2.0.0" + micromark-extension-gfm-table "^2.0.0" + micromark-extension-gfm-tagfilter "^2.0.0" + micromark-extension-gfm-task-list-item "^2.0.0" + micromark-util-combine-extensions "^2.0.0" + micromark-util-types "^2.0.0" + micromark-extension-mdx-expression@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-1.0.3.tgz#cd3843573921bf55afcfff4ae0cd2e857a16dcfa" - integrity sha512-TjYtjEMszWze51NJCZmhv7MEBcgYRgb3tJeMAJ+HQCAaZHHRBaDCccqQzGizR/H4ODefP44wRTgOn2vE5I6nZA== + version "1.0.8" + resolved "https://registry.yarnpkg.com/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-1.0.8.tgz#5bc1f5fd90388e8293b3ef4f7c6f06c24aff6314" + integrity sha512-zZpeQtc5wfWKdzDsHRBY003H2Smg+PUi2REhqgIhdzAa5xonhP03FcXxqFSerFiNUr5AWmHpaNPQTBVOS4lrXw== dependencies: + "@types/estree" "^1.0.0" micromark-factory-mdx-expression "^1.0.0" micromark-factory-space "^1.0.0" micromark-util-character "^1.0.0" @@ -6578,12 +7246,27 @@ micromark-extension-mdx-expression@^1.0.0: micromark-util-types "^1.0.0" uvu "^0.5.0" +micromark-extension-mdx-expression@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz#1407b9ce69916cf5e03a196ad9586889df25302a" + integrity sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ== + dependencies: + "@types/estree" "^1.0.0" + devlop "^1.0.0" + micromark-factory-mdx-expression "^2.0.0" + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-events-to-acorn "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-extension-mdx-jsx@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-1.0.3.tgz#9f196be5f65eb09d2a49b237a7b3398bba2999be" - integrity sha512-VfA369RdqUISF0qGgv2FfV7gGjHDfn9+Qfiv5hEwpyr1xscRj/CiVRkU7rywGFCO7JwJ5L0e7CJz60lY52+qOA== + version "1.0.5" + resolved "https://registry.yarnpkg.com/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-1.0.5.tgz#e72d24b7754a30d20fb797ece11e2c4e2cae9e82" + integrity sha512-gPH+9ZdmDflbu19Xkb8+gheqEDqkSpdCEubQyxuz/Hn8DOXiXvrXeikOoBA71+e8Pfi0/UYmU3wW3H58kr7akA== dependencies: "@types/acorn" "^4.0.0" + "@types/estree" "^1.0.0" estree-util-is-identifier-name "^2.0.0" micromark-factory-mdx-expression "^1.0.0" micromark-factory-space "^1.0.0" @@ -6593,18 +7276,43 @@ micromark-extension-mdx-jsx@^1.0.0: uvu "^0.5.0" vfile-message "^3.0.0" +micromark-extension-mdx-jsx@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz#5abb83da5ddc8e473a374453e6ea56fbd66b59ad" + integrity sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg== + dependencies: + "@types/acorn" "^4.0.0" + "@types/estree" "^1.0.0" + devlop "^1.0.0" + estree-util-is-identifier-name "^3.0.0" + micromark-factory-mdx-expression "^2.0.0" + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-events-to-acorn "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + vfile-message "^4.0.0" + micromark-extension-mdx-md@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-extension-mdx-md/-/micromark-extension-mdx-md-1.0.0.tgz#382f5df9ee3706dd120b51782a211f31f4760d22" - integrity sha512-xaRAMoSkKdqZXDAoSgp20Azm0aRQKGOl0RrS81yGu8Hr/JhMsBmfs4wR7m9kgVUIO36cMUQjNyiyDKPrsv8gOw== + version "1.0.1" + resolved "https://registry.yarnpkg.com/micromark-extension-mdx-md/-/micromark-extension-mdx-md-1.0.1.tgz#595d4b2f692b134080dca92c12272ab5b74c6d1a" + integrity sha512-7MSuj2S7xjOQXAjjkbjBsHkMtb+mDGVW6uI2dBL9snOBCbZmoNgDAeZ0nSn9j3T42UE/g2xVNMn18PJxZvkBEA== dependencies: micromark-util-types "^1.0.0" +micromark-extension-mdx-md@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz#1d252881ea35d74698423ab44917e1f5b197b92d" + integrity sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ== + dependencies: + micromark-util-types "^2.0.0" + micromark-extension-mdxjs-esm@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-1.0.3.tgz#630d9dc9db2c2fd470cac8c1e7a824851267404d" - integrity sha512-2N13ol4KMoxb85rdDwTAC6uzs8lMX0zeqpcyx7FhS7PxXomOnLactu8WI8iBNXW8AVyea3KIJd/1CKnUmwrK9A== + version "1.0.5" + resolved "https://registry.yarnpkg.com/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-1.0.5.tgz#e4f8be9c14c324a80833d8d3a227419e2b25dec1" + integrity sha512-xNRBw4aoURcyz/S69B19WnZAkWJMxHMT5hE36GtDAyhoyn/8TuAeqjFJQlwk+MKQsUD7b3l7kFX+vlfVWgcX1w== dependencies: + "@types/estree" "^1.0.0" micromark-core-commonmark "^1.0.0" micromark-util-character "^1.0.0" micromark-util-events-to-acorn "^1.0.0" @@ -6614,10 +7322,25 @@ micromark-extension-mdxjs-esm@^1.0.0: uvu "^0.5.0" vfile-message "^3.0.0" +micromark-extension-mdxjs-esm@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz#de21b2b045fd2059bd00d36746081de38390d54a" + integrity sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A== + dependencies: + "@types/estree" "^1.0.0" + devlop "^1.0.0" + micromark-core-commonmark "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-events-to-acorn "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + unist-util-position-from-estree "^2.0.0" + vfile-message "^4.0.0" + micromark-extension-mdxjs@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-extension-mdxjs/-/micromark-extension-mdxjs-1.0.0.tgz#772644e12fc8299a33e50f59c5aa15727f6689dd" - integrity sha512-TZZRZgeHvtgm+IhtgC2+uDMR7h8eTKF0QUX9YsgoL9+bADBpBY6SiLvWqnBlLbCEevITmTqmEuY3FoxMKVs1rQ== + version "1.0.1" + resolved "https://registry.yarnpkg.com/micromark-extension-mdxjs/-/micromark-extension-mdxjs-1.0.1.tgz#f78d4671678d16395efeda85170c520ee795ded8" + integrity sha512-7YA7hF6i5eKOfFUzZ+0z6avRG52GpWR8DL+kN47y3f2KhxbBZMhmxe7auOeaTBrW2DenbbZTf1ea9tA2hDpC2Q== dependencies: acorn "^8.0.0" acorn-jsx "^5.0.0" @@ -6628,31 +7351,64 @@ micromark-extension-mdxjs@^1.0.0: micromark-util-combine-extensions "^1.0.0" micromark-util-types "^1.0.0" +micromark-extension-mdxjs@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz#b5a2e0ed449288f3f6f6c544358159557549de18" + integrity sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ== + dependencies: + acorn "^8.0.0" + acorn-jsx "^5.0.0" + micromark-extension-mdx-expression "^3.0.0" + micromark-extension-mdx-jsx "^3.0.0" + micromark-extension-mdx-md "^2.0.0" + micromark-extension-mdxjs-esm "^3.0.0" + micromark-util-combine-extensions "^2.0.0" + micromark-util-types "^2.0.0" + micromark-factory-destination@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-destination/-/micromark-factory-destination-1.0.0.tgz#fef1cb59ad4997c496f887b6977aa3034a5a277e" - integrity sha512-eUBA7Rs1/xtTVun9TmV3gjfPz2wEwgK5R5xcbIM5ZYAtvGF6JkyaDsj0agx8urXnO31tEO6Ug83iVH3tdedLnw== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz#eb815957d83e6d44479b3df640f010edad667b9f" + integrity sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg== dependencies: micromark-util-character "^1.0.0" micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" +micromark-factory-destination@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz#857c94debd2c873cba34e0445ab26b74f6a6ec07" + integrity sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA== + dependencies: + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-factory-label@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/micromark-factory-label/-/micromark-factory-label-1.0.2.tgz#6be2551fa8d13542fcbbac478258fb7a20047137" - integrity sha512-CTIwxlOnU7dEshXDQ+dsr2n+yxpP0+fn271pu0bwDIS8uqfFcumXpj5mLn3hSC8iw2MUr6Gx8EcKng1dD7i6hg== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz#cc95d5478269085cfa2a7282b3de26eb2e2dec68" + integrity sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w== dependencies: micromark-util-character "^1.0.0" micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" uvu "^0.5.0" +micromark-factory-label@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz#17c5c2e66ce39ad6f4fc4cbf40d972f9096f726a" + integrity sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw== + dependencies: + devlop "^1.0.0" + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-factory-mdx-expression@^1.0.0: - version "1.0.6" - resolved "https://registry.yarnpkg.com/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-1.0.6.tgz#917e17d16e6e9c2551f3a862e6a9ebdd22056476" - integrity sha512-WRQIc78FV7KrCfjsEf/sETopbYjElh3xAmNpLkd1ODPqxEngP42eVRGbiPEQWpRV27LzqW+XVTvQAMIIRLPnNA== + version "1.0.9" + resolved "https://registry.yarnpkg.com/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-1.0.9.tgz#57ba4571b69a867a1530f34741011c71c73a4976" + integrity sha512-jGIWzSmNfdnkJq05c7b0+Wv0Kfz3NJ3N4cBjnbO4zjXIlxJr+f8lk+5ZmwFvqdAbUy2q6B5rCY//g0QAAaXDWA== dependencies: - micromark-factory-space "^1.0.0" + "@types/estree" "^1.0.0" micromark-util-character "^1.0.0" micromark-util-events-to-acorn "^1.0.0" micromark-util-symbol "^1.0.0" @@ -6661,154 +7417,313 @@ micromark-factory-mdx-expression@^1.0.0: uvu "^0.5.0" vfile-message "^3.0.0" +micromark-factory-mdx-expression@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz#2afaa8ba6d5f63e0cead3e4dee643cad184ca260" + integrity sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw== + dependencies: + "@types/estree" "^1.0.0" + devlop "^1.0.0" + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-events-to-acorn "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + unist-util-position-from-estree "^2.0.0" + vfile-message "^4.0.0" + micromark-factory-space@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-space/-/micromark-factory-space-1.0.0.tgz#cebff49968f2b9616c0fcb239e96685cb9497633" - integrity sha512-qUmqs4kj9a5yBnk3JMLyjtWYN6Mzfcx8uJfi5XAveBniDevmZasdGBba5b4QsvRcAkmvGo5ACmSUmyGiKTLZew== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz#c8f40b0640a0150751d3345ed885a080b0d15faf" + integrity sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ== dependencies: micromark-util-character "^1.0.0" micromark-util-types "^1.0.0" +micromark-factory-space@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz#5e7afd5929c23b96566d0e1ae018ae4fcf81d030" + integrity sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg== + dependencies: + micromark-util-character "^2.0.0" + micromark-util-types "^2.0.0" + micromark-factory-title@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/micromark-factory-title/-/micromark-factory-title-1.0.2.tgz#7e09287c3748ff1693930f176e1c4a328382494f" - integrity sha512-zily+Nr4yFqgMGRKLpTVsNl5L4PMu485fGFDOQJQBl2NFpjGte1e86zC0da93wf97jrc4+2G2GQudFMHn3IX+A== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz#dd0fe951d7a0ac71bdc5ee13e5d1465ad7f50ea1" + integrity sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ== dependencies: micromark-factory-space "^1.0.0" micromark-util-character "^1.0.0" micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" - uvu "^0.5.0" + +micromark-factory-title@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz#726140fc77892af524705d689e1cf06c8a83ea95" + integrity sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A== + dependencies: + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" micromark-factory-whitespace@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-whitespace/-/micromark-factory-whitespace-1.0.0.tgz#e991e043ad376c1ba52f4e49858ce0794678621c" - integrity sha512-Qx7uEyahU1lt1RnsECBiuEbfr9INjQTGa6Err+gF3g0Tx4YEviPbqqGKNv/NrBaE7dVHdn1bVZKM/n5I/Bak7A== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz#798fb7489f4c8abafa7ca77eed6b5745853c9705" + integrity sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ== dependencies: micromark-factory-space "^1.0.0" micromark-util-character "^1.0.0" micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" -micromark-util-character@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/micromark-util-character/-/micromark-util-character-1.1.0.tgz#d97c54d5742a0d9611a68ca0cd4124331f264d86" - integrity sha512-agJ5B3unGNJ9rJvADMJ5ZiYjBRyDpzKAOk01Kpi1TKhlT1APx3XZk6eN7RtSz1erbWHC2L8T3xLZ81wdtGRZzg== +micromark-factory-whitespace@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz#9e92eb0f5468083381f923d9653632b3cfb5f763" + integrity sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA== + dependencies: + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + +micromark-util-character@^1.0.0, micromark-util-character@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/micromark-util-character/-/micromark-util-character-1.2.0.tgz#4fedaa3646db249bc58caeb000eb3549a8ca5dcc" + integrity sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg== dependencies: micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" +micromark-util-character@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-character/-/micromark-util-character-2.1.0.tgz#31320ace16b4644316f6bf057531689c71e2aee1" + integrity sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ== + dependencies: + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-util-chunked@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-chunked/-/micromark-util-chunked-1.0.0.tgz#5b40d83f3d53b84c4c6bce30ed4257e9a4c79d06" - integrity sha512-5e8xTis5tEZKgesfbQMKRCyzvffRRUX+lK/y+DvsMFdabAicPkkZV6gO+FEWi9RfuKKoxxPwNL+dFF0SMImc1g== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz#37a24d33333c8c69a74ba12a14651fd9ea8a368b" + integrity sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ== dependencies: micromark-util-symbol "^1.0.0" +micromark-util-chunked@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz#e51f4db85fb203a79dbfef23fd41b2f03dc2ef89" + integrity sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg== + dependencies: + micromark-util-symbol "^2.0.0" + micromark-util-classify-character@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-classify-character/-/micromark-util-classify-character-1.0.0.tgz#cbd7b447cb79ee6997dd274a46fc4eb806460a20" - integrity sha512-F8oW2KKrQRb3vS5ud5HIqBVkCqQi224Nm55o5wYLzY/9PwHGXC01tr3d7+TqHHz6zrKQ72Okwtvm/xQm6OVNZA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz#6a7f8c8838e8a120c8e3c4f2ae97a2bff9190e9d" + integrity sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw== dependencies: micromark-util-character "^1.0.0" micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" +micromark-util-classify-character@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz#8c7537c20d0750b12df31f86e976d1d951165f34" + integrity sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw== + dependencies: + micromark-util-character "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromark-util-combine-extensions@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.0.0.tgz#91418e1e74fb893e3628b8d496085639124ff3d5" - integrity sha512-J8H058vFBdo/6+AsjHp2NF7AJ02SZtWaVUjsayNFeAiydTxUwViQPxN0Hf8dp4FmCQi0UUFovFsEyRSUmFH3MA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz#192e2b3d6567660a85f735e54d8ea6e3952dbe84" + integrity sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA== dependencies: micromark-util-chunked "^1.0.0" micromark-util-types "^1.0.0" +micromark-util-combine-extensions@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz#75d6ab65c58b7403616db8d6b31315013bfb7ee5" + integrity sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ== + dependencies: + micromark-util-chunked "^2.0.0" + micromark-util-types "^2.0.0" + micromark-util-decode-numeric-character-reference@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.0.0.tgz#dcc85f13b5bd93ff8d2868c3dba28039d490b946" - integrity sha512-OzO9AI5VUtrTD7KSdagf4MWgHMtET17Ua1fIpXTpuhclCqD8egFWo85GxSGvxgkGS74bEahvtM0WP0HjvV0e4w== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz#b1e6e17009b1f20bc652a521309c5f22c85eb1c6" + integrity sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw== dependencies: micromark-util-symbol "^1.0.0" +micromark-util-decode-numeric-character-reference@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz#2698bbb38f2a9ba6310e359f99fcb2b35a0d2bd5" + integrity sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ== + dependencies: + micromark-util-symbol "^2.0.0" + micromark-util-decode-string@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/micromark-util-decode-string/-/micromark-util-decode-string-1.0.2.tgz#942252ab7a76dec2dbf089cc32505ee2bc3acf02" - integrity sha512-DLT5Ho02qr6QWVNYbRZ3RYOSSWWFuH3tJexd3dgN1odEuPNxCngTCXJum7+ViRAd9BbdxCvMToPOD/IvVhzG6Q== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz#dc12b078cba7a3ff690d0203f95b5d5537f2809c" + integrity sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ== dependencies: decode-named-character-reference "^1.0.0" micromark-util-character "^1.0.0" micromark-util-decode-numeric-character-reference "^1.0.0" micromark-util-symbol "^1.0.0" +micromark-util-decode-string@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz#7dfa3a63c45aecaa17824e656bcdb01f9737154a" + integrity sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA== + dependencies: + decode-named-character-reference "^1.0.0" + micromark-util-character "^2.0.0" + micromark-util-decode-numeric-character-reference "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-encode@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-1.0.1.tgz#2c1c22d3800870ad770ece5686ebca5920353383" - integrity sha512-U2s5YdnAYexjKDel31SVMPbfi+eF8y1U4pfiRW/Y8EFVCy/vgxk/2wWTxzcqE71LHtCuCzlBDRU2a5CQ5j+mQA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz#92e4f565fd4ccb19e0dcae1afab9a173bbeb19a5" + integrity sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw== + +micromark-util-encode@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz#0921ac7953dc3f1fd281e3d1932decfdb9382ab1" + integrity sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA== micromark-util-events-to-acorn@^1.0.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-1.2.0.tgz#65785cb77299d791bfefdc6a5213ab57ceead115" - integrity sha512-WWp3bf7xT9MppNuw3yPjpnOxa8cj5ACivEzXJKu0WwnjBYfzaBvIAT9KfeyI0Qkll+bfQtfftSwdgTH6QhTOKw== + version "1.2.3" + resolved "https://registry.yarnpkg.com/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-1.2.3.tgz#a4ab157f57a380e646670e49ddee97a72b58b557" + integrity sha512-ij4X7Wuc4fED6UoLWkmo0xJQhsktfNh1J0m8g4PbIMPlx+ek/4YdW5mvbye8z/aZvAPUoxgXHrwVlXAPKMRp1w== dependencies: "@types/acorn" "^4.0.0" "@types/estree" "^1.0.0" + "@types/unist" "^2.0.0" estree-util-visit "^1.0.0" + micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" uvu "^0.5.0" - vfile-location "^4.0.0" vfile-message "^3.0.0" +micromark-util-events-to-acorn@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz#4275834f5453c088bd29cd72dfbf80e3327cec07" + integrity sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA== + dependencies: + "@types/acorn" "^4.0.0" + "@types/estree" "^1.0.0" + "@types/unist" "^3.0.0" + devlop "^1.0.0" + estree-util-visit "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + vfile-message "^4.0.0" + micromark-util-html-tag-name@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.1.0.tgz#eb227118befd51f48858e879b7a419fc0df20497" - integrity sha512-BKlClMmYROy9UiV03SwNmckkjn8QHVaWkqoAqzivabvdGcwNGMMMH/5szAnywmsTBUzDsU57/mFi0sp4BQO6dA== + version "1.2.0" + resolved "https://registry.yarnpkg.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz#48fd7a25826f29d2f71479d3b4e83e94829b3588" + integrity sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q== + +micromark-util-html-tag-name@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz#ae34b01cbe063363847670284c6255bb12138ec4" + integrity sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw== micromark-util-normalize-identifier@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.0.0.tgz#4a3539cb8db954bbec5203952bfe8cedadae7828" - integrity sha512-yg+zrL14bBTFrQ7n35CmByWUTFsgst5JhA4gJYoty4Dqzj4Z4Fr/DHekSS5aLfH9bdlfnSvKAWsAgJhIbogyBg== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz#7a73f824eb9f10d442b4d7f120fecb9b38ebf8b7" + integrity sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q== dependencies: micromark-util-symbol "^1.0.0" +micromark-util-normalize-identifier@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz#91f9a4e65fe66cc80c53b35b0254ad67aa431d8b" + integrity sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w== + dependencies: + micromark-util-symbol "^2.0.0" + micromark-util-resolve-all@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-resolve-all/-/micromark-util-resolve-all-1.0.0.tgz#a7c363f49a0162e931960c44f3127ab58f031d88" - integrity sha512-CB/AGk98u50k42kvgaMM94wzBqozSzDDaonKU7P7jwQIuH2RU0TeBqGYJz2WY1UdihhjweivStrJ2JdkdEmcfw== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz#4652a591ee8c8fa06714c9b54cd6c8e693671188" + integrity sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA== dependencies: micromark-util-types "^1.0.0" +micromark-util-resolve-all@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz#189656e7e1a53d0c86a38a652b284a252389f364" + integrity sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA== + dependencies: + micromark-util-types "^2.0.0" + micromark-util-sanitize-uri@^1.0.0, micromark-util-sanitize-uri@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.1.0.tgz#f12e07a85106b902645e0364feb07cf253a85aee" - integrity sha512-RoxtuSCX6sUNtxhbmsEFQfWzs8VN7cTctmBPvYivo98xb/kDEoTCtJQX5wyzIYEmk/lvNFTat4hL8oW0KndFpg== + version "1.2.0" + resolved "https://registry.yarnpkg.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz#613f738e4400c6eedbc53590c67b197e30d7f90d" + integrity sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A== dependencies: micromark-util-character "^1.0.0" micromark-util-encode "^1.0.0" micromark-util-symbol "^1.0.0" +micromark-util-sanitize-uri@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz#ec8fbf0258e9e6d8f13d9e4770f9be64342673de" + integrity sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw== + dependencies: + micromark-util-character "^2.0.0" + micromark-util-encode "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-subtokenize@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-1.0.2.tgz#ff6f1af6ac836f8bfdbf9b02f40431760ad89105" - integrity sha512-d90uqCnXp/cy4G881Ub4psE57Sf8YD0pim9QdjCRNjfas2M1u6Lbt+XZK9gnHL2XFhnozZiEdCa9CNfXSfQ6xA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz#941c74f93a93eaf687b9054aeb94642b0e92edb1" + integrity sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A== dependencies: micromark-util-chunked "^1.0.0" micromark-util-symbol "^1.0.0" micromark-util-types "^1.0.0" uvu "^0.5.0" -micromark-util-symbol@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/micromark-util-symbol/-/micromark-util-symbol-1.0.1.tgz#b90344db62042ce454f351cf0bebcc0a6da4920e" - integrity sha512-oKDEMK2u5qqAptasDAwWDXq0tG9AssVwAx3E9bBF3t/shRIGsWIRG+cGafs2p/SnDSOecnt6hZPCE2o6lHfFmQ== +micromark-util-subtokenize@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz#76129c49ac65da6e479c09d0ec4b5f29ec6eace5" + integrity sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q== + dependencies: + devlop "^1.0.0" + micromark-util-chunked "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + +micromark-util-symbol@^1.0.0, micromark-util-symbol@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz#813cd17837bdb912d069a12ebe3a44b6f7063142" + integrity sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag== + +micromark-util-symbol@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz#12225c8f95edf8b17254e47080ce0862d5db8044" + integrity sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw== micromark-util-types@^1.0.0, micromark-util-types@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-1.0.2.tgz#f4220fdb319205812f99c40f8c87a9be83eded20" - integrity sha512-DCfg/T8fcrhrRKTPjRrw/5LLvdGV7BHySf/1LOZx7TzWZdYRjogNtyNq885z3nNallwr3QUKARjqvHqX1/7t+w== + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-1.1.0.tgz#e6676a8cae0bb86a2171c498167971886cb7e283" + integrity sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg== + +micromark-util-types@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-2.0.0.tgz#63b4b7ffeb35d3ecf50d1ca20e68fc7caa36d95e" + integrity sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w== micromark@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/micromark/-/micromark-3.1.0.tgz#eeba0fe0ac1c9aaef675157b52c166f125e89f62" - integrity sha512-6Mj0yHLdUZjHnOPgr5xfWIMqMWS12zDN6iws9SLuSz76W8jTtAv24MN4/CL7gJrl5vtxGInkkqDv/JIoRsQOvA== + version "3.2.0" + resolved "https://registry.yarnpkg.com/micromark/-/micromark-3.2.0.tgz#1af9fef3f995ea1ea4ac9c7e2f19c48fd5c006e9" + integrity sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA== dependencies: "@types/debug" "^4.0.0" debug "^4.0.0" @@ -6828,19 +7743,47 @@ micromark@^3.0.0: micromark-util-types "^1.0.1" uvu "^0.5.0" +micromark@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/micromark/-/micromark-4.0.0.tgz#84746a249ebd904d9658cfabc1e8e5f32cbc6249" + integrity sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ== + dependencies: + "@types/debug" "^4.0.0" + debug "^4.0.0" + decode-named-character-reference "^1.0.0" + devlop "^1.0.0" + micromark-core-commonmark "^2.0.0" + micromark-factory-space "^2.0.0" + micromark-util-character "^2.0.0" + micromark-util-chunked "^2.0.0" + micromark-util-combine-extensions "^2.0.0" + micromark-util-decode-numeric-character-reference "^2.0.0" + micromark-util-encode "^2.0.0" + micromark-util-normalize-identifier "^2.0.0" + micromark-util-resolve-all "^2.0.0" + micromark-util-sanitize-uri "^2.0.0" + micromark-util-subtokenize "^2.0.0" + micromark-util-symbol "^2.0.0" + micromark-util-types "^2.0.0" + micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5: - version "4.0.5" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: - braces "^3.0.2" + braces "^3.0.3" picomatch "^2.3.1" -mime-db@1.52.0, "mime-db@>= 1.43.0 < 2": +mime-db@1.52.0: version "1.52.0" resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== +"mime-db@>= 1.43.0 < 2": + version "1.53.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.53.0.tgz#3cb63cd820fc29896d9d4e8c32ab4fcd74ccb447" + integrity sha512-oHlN/w+3MQ3rba9rqFr6V/ypF10LSkdwUysQL7GkXoTgIWeV+tcXGA852TBxH+gsh8UWoyhR1hKcoMJTuWflpg== + mime-db@~1.33.0: version "1.33.0" resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" @@ -6870,17 +7813,23 @@ mimic-fn@^2.1.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== -mimic-response@^1.0.0, mimic-response@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" - integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== +mimic-response@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-3.1.0.tgz#2d1d59af9c1b129815accc2c46a022a5ce1fa3c9" + integrity sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ== + +mimic-response@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-4.0.0.tgz#35468b19e7c75d10f5165ea25e75a5ceea7cf70f" + integrity sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg== -mini-css-extract-plugin@^2.6.1: - version "2.7.2" - resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.2.tgz#e049d3ea7d3e4e773aad585c6cb329ce0c7b72d7" - integrity sha512-EdlUizq13o0Pd+uCp+WO/JpkLvHRVGt97RqfeGhXqAcorYo1ypJSpkV+WDT0vY/kmh/p7wRdJNJtuyK540PXDw== +mini-css-extract-plugin@^2.7.6: + version "2.9.1" + resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.1.tgz#4d184f12ce90582e983ccef0f6f9db637b4be758" + integrity sha512-+Vyi+GCCOHnrJ2VPS+6aPoXN2k2jgUzDRhTFLjjTBn23qyXJXkjUWQgTL+mXpF5/A8ixLdCc6kWsoeOjKGejKQ== dependencies: schema-utils "^4.0.0" + tapable "^2.2.1" minimalistic-assert@^1.0.0: version "1.0.1" @@ -6894,10 +7843,10 @@ minimatch@3.1.2, minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1: dependencies: brace-expansion "^1.1.7" -minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6: - version "1.2.7" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18" - integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g== +minimist@^1.2.0, minimist@^1.2.6: + version "1.2.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" + integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== mkdirp@0.3.0: version "0.3.0" @@ -6911,32 +7860,22 @@ mkdirp@~0.5.1: dependencies: minimist "^1.2.6" -moment-mini@^2.24.0: - version "2.29.4" - resolved "https://registry.yarnpkg.com/moment-mini/-/moment-mini-2.29.4.tgz#cbbcdc58ce1b267506f28ea6668dbe060a32758f" - integrity sha512-uhXpYwHFeiTbY9KSgPPRoo1nt8OxNVdMVoTBYHfSEKeRkIkwGpO+gERmhuhBtzfaeOyTkykSrm2+noJBgqt3Hg== - mri@^1.1.0: version "1.2.0" resolved "https://registry.yarnpkg.com/mri/-/mri-1.2.0.tgz#6721480fec2a11a4889861115a48b6cbe7cc8f0b" integrity sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA== -mrmime@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/mrmime/-/mrmime-1.0.1.tgz#5f90c825fad4bdd41dc914eff5d1a8cfdaf24f27" - integrity sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw== +mrmime@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mrmime/-/mrmime-2.0.0.tgz#151082a6e06e59a9a39b46b3e14d5cfe92b3abb4" + integrity sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw== ms@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== -ms@2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" - integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== - -ms@2.1.3: +ms@2.1.3, ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -6949,10 +7888,10 @@ multicast-dns@^7.2.5: dns-packet "^5.2.2" thunky "^1.0.2" -nanoid@^3.3.4: - version "3.3.4" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.4.tgz#730b67e3cd09e2deacf03c027c81c9d9dbc5e8ab" - integrity sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw== +nanoid@^3.3.7: + version "3.3.7" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" + integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== negotiator@0.6.3: version "0.6.3" @@ -6972,12 +7911,15 @@ no-case@^3.0.4: lower-case "^2.0.2" tslib "^2.0.3" -node-emoji@^1.10.0: - version "1.11.0" - resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c" - integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A== +node-emoji@^2.1.0: + version "2.1.3" + resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-2.1.3.tgz#93cfabb5cc7c3653aa52f29d6ffb7927d8047c06" + integrity sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA== dependencies: - lodash "^4.17.21" + "@sindresorhus/is" "^4.6.0" + char-regex "^1.0.2" + emojilib "^2.4.0" + skin-tone "^2.0.0" node-fetch@2.6.7: version "2.6.7" @@ -6991,10 +7933,10 @@ node-forge@^1: resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.1.tgz#be8da2af243b2417d5f646a770663a92b7e9ded3" integrity sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA== -node-releases@^2.0.6: - version "2.0.8" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.8.tgz#0f349cdc8fcfa39a92ac0be9bc48b7706292b9ae" - integrity sha512-dFSmB8fFHEH/s81Xi+Y/15DQY6VHW81nXRj86EMSL3lmuTmK1e+aT4wrFCkTbm+gSwkw4KpX+rT/pMM2c1mF+A== +node-releases@^2.0.18: + version "2.0.18" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.18.tgz#f010e8d35e2fe8d6b2944f03f70213ecedc4ca3f" + integrity sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g== non-layered-tidy-tree-layout@^2.0.2: version "2.0.2" @@ -7018,15 +7960,10 @@ normalize-range@^0.1.2: resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" integrity sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA== -normalize-url@^4.1.0: - version "4.5.1" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.1.tgz#0dd90cf1288ee1d1313b87081c9a5932ee48518a" - integrity sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA== - -normalize-url@^6.0.1: - version "6.1.0" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-6.1.0.tgz#40d0885b535deffe3f3147bec877d05fe4c5668a" - integrity sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A== +normalize-url@^8.0.0: + version "8.0.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-8.0.1.tgz#9b7d96af9836577c58f5883e939365fa15623a4a" + integrity sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w== not@^0.1.0: version "0.1.0" @@ -7059,57 +7996,60 @@ nth-check@^2.0.0, nth-check@^2.0.1: dependencies: boolbase "^1.0.0" -object-assign@^4.1.0, object-assign@^4.1.1: +object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== -object-inspect@^1.12.2, object-inspect@^1.9.0: - version "1.12.2" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.2.tgz#c0641f26394532f28ab8d796ab954e43c009a8ea" - integrity sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ== +object-inspect@^1.13.1: + version "1.13.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" + integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== -object-is@^1.0.1: - version "1.1.5" - resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.1.5.tgz#b9deeaa5fc7f1846a0faecdceec138e5778f53ac" - integrity sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw== +object-is@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.1.6.tgz#1a6a53aed2dd8f7e6775ff870bea58545956ab07" + integrity sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q== dependencies: - call-bind "^1.0.2" - define-properties "^1.1.3" + call-bind "^1.0.7" + define-properties "^1.2.1" object-keys@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== -object.assign@^4.1.0, object.assign@^4.1.4: - version "4.1.4" - resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.4.tgz#9673c7c7c351ab8c4d0b516f4343ebf4dfb7799f" - integrity sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ== +object.assign@^4.1.0, object.assign@^4.1.4, object.assign@^4.1.5: + version "4.1.5" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0" + integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.1.4" + call-bind "^1.0.5" + define-properties "^1.2.1" has-symbols "^1.0.3" object-keys "^1.1.1" object.getownpropertydescriptors@^2.1.0: - version "2.1.5" - resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.5.tgz#db5a9002489b64eef903df81d6623c07e5b4b4d3" - integrity sha512-yDNzckpM6ntyQiGTik1fKV1DcVDRS+w8bvpWNCBanvH5LfRX9O8WTHqQzG4RZwRAM4I0oU7TV11Lj5v0g20ibw== - dependencies: - array.prototype.reduce "^1.0.5" - call-bind "^1.0.2" - define-properties "^1.1.4" - es-abstract "^1.20.4" + version "2.1.8" + resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.8.tgz#2f1fe0606ec1a7658154ccd4f728504f69667923" + integrity sha512-qkHIGe4q0lSYMv0XI4SsBTJz3WaURhLvd0lKSgtVuOsJ2krg4SgMw3PIRQFMp07yi++UR3se2mkcLqsBNpBb/A== + dependencies: + array.prototype.reduce "^1.0.6" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.2" + es-object-atoms "^1.0.0" + gopd "^1.0.1" + safe-array-concat "^1.1.2" object.values@^1.1.0: - version "1.1.6" - resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.6.tgz#4abbaa71eba47d63589d402856f908243eea9b1d" - integrity sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw== + version "1.2.0" + resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.2.0.tgz#65405a9d92cee68ac2d303002e0b8470a4d9ab1b" + integrity sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.1.4" - es-abstract "^1.20.4" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" obuf@^1.0.0, obuf@^1.1.2: version "1.1.2" @@ -7128,7 +8068,7 @@ on-headers@~1.0.2: resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA== -once@^1.3.0, once@^1.3.1, once@^1.4.0: +once@^1.3.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== @@ -7143,9 +8083,9 @@ onetime@^5.1.2: mimic-fn "^2.1.0" open@^8.0.9, open@^8.4.0: - version "8.4.0" - resolved "https://registry.yarnpkg.com/open/-/open-8.4.0.tgz#345321ae18f8138f82565a910fdc6b39e8c244f8" - integrity sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q== + version "8.4.2" + resolved "https://registry.yarnpkg.com/open/-/open-8.4.2.tgz#5b5ffe2a8f793dcd2aad73e550cb87b59cb084f9" + integrity sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ== dependencies: define-lazy-prop "^2.0.0" is-docker "^2.1.1" @@ -7161,12 +8101,12 @@ os-homedir@^1.0.1: resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" integrity sha512-B5JU3cabzk8c67mRRd3ECmROafjYMXbuzlwtqdM8IbS8ktlTix8aFGb2bAGKrSRIlnfKwovGUUr72JUPyOb6kQ== -p-cancelable@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" - integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== +p-cancelable@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-3.0.0.tgz#63826694b54d61ca1c20ebcb6d3ecf5e14cd8050" + integrity sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw== -p-limit@^2.0.0, p-limit@^2.2.0: +p-limit@^2.0.0: version "2.3.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== @@ -7180,6 +8120,13 @@ p-limit@^3.0.2: dependencies: yocto-queue "^0.1.0" +p-limit@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-4.0.0.tgz#914af6544ed32bfa54670b061cafcbd04984b644" + integrity sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ== + dependencies: + yocto-queue "^1.0.0" + p-locate@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" @@ -7187,13 +8134,6 @@ p-locate@^3.0.0: dependencies: p-limit "^2.0.0" -p-locate@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" - integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== - dependencies: - p-limit "^2.2.0" - p-locate@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" @@ -7201,6 +8141,13 @@ p-locate@^5.0.0: dependencies: p-limit "^3.0.2" +p-locate@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-6.0.0.tgz#3da9a49d4934b901089dca3302fa65dc5a05c04f" + integrity sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw== + dependencies: + p-limit "^4.0.0" + p-map@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" @@ -7221,15 +8168,15 @@ p-try@^2.0.0: resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== -package-json@^6.3.0: - version "6.5.0" - resolved "https://registry.yarnpkg.com/package-json/-/package-json-6.5.0.tgz#6feedaca35e75725876d0b0e64974697fed145b0" - integrity sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ== +package-json@^8.1.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-8.1.1.tgz#3e9948e43df40d1e8e78a85485f1070bf8f03dc8" + integrity sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA== dependencies: - got "^9.6.0" - registry-auth-token "^4.0.0" - registry-url "^5.0.0" - semver "^6.2.0" + got "^12.1.0" + registry-auth-token "^5.0.1" + registry-url "^6.0.0" + semver "^7.3.7" param-case@^3.0.4: version "3.0.4" @@ -7246,22 +8193,10 @@ parent-module@^1.0.0: dependencies: callsites "^3.0.0" -parse-entities@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-2.0.0.tgz#53c6eb5b9314a1f4ec99fa0fdf7ce01ecda0cbe8" - integrity sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ== - dependencies: - character-entities "^1.0.0" - character-entities-legacy "^1.0.0" - character-reference-invalid "^1.0.0" - is-alphanumerical "^1.0.0" - is-decimal "^1.0.0" - is-hexadecimal "^1.0.0" - parse-entities@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-4.0.0.tgz#f67c856d4e3fe19b1a445c3fabe78dcdc1053eeb" - integrity sha512-5nk9Fn03x3rEhGaX1FU6IDwG/k+GxLXlFAkgrbM1asuAFl3BhdQWvASaIsmwWypRNcZKHPYnIuOSfIWEyEQnPQ== + version "4.0.1" + resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-4.0.1.tgz#4e2a01111fb1c986549b944af39eeda258fc9e4e" + integrity sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w== dependencies: "@types/unist" "^2.0.0" character-entities "^2.0.0" @@ -7272,7 +8207,7 @@ parse-entities@^4.0.0: is-decimal "^2.0.0" is-hexadecimal "^2.0.0" -parse-json@^5.0.0: +parse-json@^5.0.0, parse-json@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== @@ -7288,11 +8223,11 @@ parse-numeric-range@^1.3.0: integrity sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ== parse5-htmlparser2-tree-adapter@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz#23c2cc233bcf09bb7beba8b8a69d46b08c62c2f1" - integrity sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g== + version "7.1.0" + resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz#b5a806548ed893a43e24ccb42fbb78069311e81b" + integrity sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g== dependencies: - domhandler "^5.0.2" + domhandler "^5.0.3" parse5 "^7.0.0" parse5@^6.0.0: @@ -7301,11 +8236,11 @@ parse5@^6.0.0: integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== parse5@^7.0.0: - version "7.1.2" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32" - integrity sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw== + version "7.2.0" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.2.0.tgz#8a0591ce9b7c5e2027173ab737d4d3fc3d826fab" + integrity sha512-ZkDsAOcxsUMZ4Lz5fVciOehNcJ+Gb8gTzcA4yl3wnc273BAybYWrQ+Ks/OjCjSEpjvQkDSeZbybK9qj2VHHdGA== dependencies: - entities "^4.4.0" + entities "^4.5.0" parseurl@~1.3.2, parseurl@~1.3.3: version "1.3.3" @@ -7335,6 +8270,11 @@ path-exists@^4.0.0: resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== +path-exists@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-5.0.0.tgz#a6aad9489200b21fab31e49cf09277e5116fb9e7" + integrity sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ== + path-is-absolute@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" @@ -7367,20 +8307,20 @@ path-root@^0.1.1: dependencies: path-root-regex "^0.1.0" -path-to-regexp@0.1.7: - version "0.1.7" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" - integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== +path-to-regexp@0.1.10: + version "0.1.10" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.10.tgz#67e9108c5c0551b9e5326064387de4763c4d5f8b" + integrity sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w== -path-to-regexp@2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-2.2.1.tgz#90b617025a16381a879bc82a38d4e8bdeb2bcf45" - integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ== +path-to-regexp@3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-3.3.0.tgz#f7f31d32e8518c2660862b644414b6d5c63a611b" + integrity sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw== path-to-regexp@^1.7.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.8.0.tgz#887b3ba9d84393e87a0a0b9f4cb756198b53548a" - integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA== + version "1.9.0" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.9.0.tgz#5dc0753acbf8521ca2e0f137b4578b917b10cf24" + integrity sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g== dependencies: isarray "0.0.1" @@ -7390,29 +8330,30 @@ path-type@^4.0.0: integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== periscopic@^3.0.0: - version "3.0.4" - resolved "https://registry.yarnpkg.com/periscopic/-/periscopic-3.0.4.tgz#b3fbed0d1bc844976b977173ca2cd4a0ef4fa8d1" - integrity sha512-SFx68DxCv0Iyo6APZuw/AKewkkThGwssmU0QWtTlvov3VAtPX+QJ4CadwSaz8nrT5jPIuxdvJWB4PnD2KNDxQg== + version "3.1.0" + resolved "https://registry.yarnpkg.com/periscopic/-/periscopic-3.1.0.tgz#7e9037bf51c5855bd33b48928828db4afa79d97a" + integrity sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw== dependencies: + "@types/estree" "^1.0.0" estree-walker "^3.0.0" is-reference "^3.0.0" -picocolors@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" - integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== +picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== -pkg-dir@^4.1.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" - integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== +pkg-dir@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-7.0.0.tgz#8f0c08d6df4476756c5ff29b3282d0bab7517d11" + integrity sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA== dependencies: - find-up "^4.0.0" + find-up "^6.3.0" pkg-up@^3.1.0: version "3.1.0" @@ -7421,144 +8362,149 @@ pkg-up@^3.1.0: dependencies: find-up "^3.0.0" -postcss-calc@^8.2.3: - version "8.2.4" - resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-8.2.4.tgz#77b9c29bfcbe8a07ff6693dc87050828889739a5" - integrity sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q== +possible-typed-array-names@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f" + integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q== + +postcss-calc@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-9.0.1.tgz#a744fd592438a93d6de0f1434c572670361eb6c6" + integrity sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ== dependencies: - postcss-selector-parser "^6.0.9" + postcss-selector-parser "^6.0.11" postcss-value-parser "^4.2.0" -postcss-colormin@^5.3.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-5.3.0.tgz#3cee9e5ca62b2c27e84fce63affc0cfb5901956a" - integrity sha512-WdDO4gOFG2Z8n4P8TWBpshnL3JpmNmJwdnfP2gbk2qBA8PWwOYcmjmI/t3CmMeL72a7Hkd+x/Mg9O2/0rD54Pg== +postcss-colormin@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-6.1.0.tgz#076e8d3fb291fbff7b10e6b063be9da42ff6488d" + integrity sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw== dependencies: - browserslist "^4.16.6" + browserslist "^4.23.0" caniuse-api "^3.0.0" - colord "^2.9.1" + colord "^2.9.3" postcss-value-parser "^4.2.0" -postcss-convert-values@^5.1.3: - version "5.1.3" - resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz#04998bb9ba6b65aa31035d669a6af342c5f9d393" - integrity sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA== +postcss-convert-values@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz#3498387f8efedb817cbc63901d45bd1ceaa40f48" + integrity sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w== dependencies: - browserslist "^4.21.4" + browserslist "^4.23.0" postcss-value-parser "^4.2.0" -postcss-discard-comments@^5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz#8df5e81d2925af2780075840c1526f0660e53696" - integrity sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ== +postcss-discard-comments@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz#e768dcfdc33e0216380623652b0a4f69f4678b6c" + integrity sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw== -postcss-discard-duplicates@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz#9eb4fe8456706a4eebd6d3b7b777d07bad03e848" - integrity sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw== +postcss-discard-duplicates@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz#d121e893c38dc58a67277f75bb58ba43fce4c3eb" + integrity sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw== -postcss-discard-empty@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz#e57762343ff7f503fe53fca553d18d7f0c369c6c" - integrity sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A== +postcss-discard-empty@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz#ee39c327219bb70473a066f772621f81435a79d9" + integrity sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ== -postcss-discard-overridden@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz#7e8c5b53325747e9d90131bb88635282fb4a276e" - integrity sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw== +postcss-discard-overridden@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz#4e9f9c62ecd2df46e8fdb44dc17e189776572e2d" + integrity sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ== -postcss-discard-unused@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz#8974e9b143d887677304e558c1166d3762501142" - integrity sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw== +postcss-discard-unused@^6.0.5: + version "6.0.5" + resolved "https://registry.yarnpkg.com/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz#c1b0e8c032c6054c3fbd22aaddba5b248136f338" + integrity sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA== dependencies: - postcss-selector-parser "^6.0.5" + postcss-selector-parser "^6.0.16" -postcss-loader@^7.0.0: - version "7.0.2" - resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-7.0.2.tgz#b53ff44a26fba3688eee92a048c7f2d4802e23bb" - integrity sha512-fUJzV/QH7NXUAqV8dWJ9Lg4aTkDCezpTS5HgJ2DvqznexTbSTxgi/dTECvTZ15BwKTtk8G/bqI/QTu2HPd3ZCg== +postcss-loader@^7.3.3: + version "7.3.4" + resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-7.3.4.tgz#aed9b79ce4ed7e9e89e56199d25ad1ec8f606209" + integrity sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A== dependencies: - cosmiconfig "^7.0.0" - klona "^2.0.5" - semver "^7.3.8" + cosmiconfig "^8.3.5" + jiti "^1.20.0" + semver "^7.5.4" -postcss-merge-idents@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz#7753817c2e0b75d0853b56f78a89771e15ca04a1" - integrity sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw== +postcss-merge-idents@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz#7b9c31c7bc823c94bec50f297f04e3c2b838ea65" + integrity sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g== dependencies: - cssnano-utils "^3.1.0" + cssnano-utils "^4.0.2" postcss-value-parser "^4.2.0" -postcss-merge-longhand@^5.1.7: - version "5.1.7" - resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz#24a1bdf402d9ef0e70f568f39bdc0344d568fb16" - integrity sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ== +postcss-merge-longhand@^6.0.5: + version "6.0.5" + resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz#ba8a8d473617c34a36abbea8dda2b215750a065a" + integrity sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w== dependencies: postcss-value-parser "^4.2.0" - stylehacks "^5.1.1" + stylehacks "^6.1.1" -postcss-merge-rules@^5.1.3: - version "5.1.3" - resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-5.1.3.tgz#8f97679e67cc8d08677a6519afca41edf2220894" - integrity sha512-LbLd7uFC00vpOuMvyZop8+vvhnfRGpp2S+IMQKeuOZZapPRY4SMq5ErjQeHbHsjCUgJkRNrlU+LmxsKIqPKQlA== +postcss-merge-rules@^6.1.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz#7aa539dceddab56019469c0edd7d22b64c3dea9d" + integrity sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ== dependencies: - browserslist "^4.21.4" + browserslist "^4.23.0" caniuse-api "^3.0.0" - cssnano-utils "^3.1.0" - postcss-selector-parser "^6.0.5" + cssnano-utils "^4.0.2" + postcss-selector-parser "^6.0.16" -postcss-minify-font-values@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz#f1df0014a726083d260d3bd85d7385fb89d1f01b" - integrity sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA== +postcss-minify-font-values@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz#a0e574c02ee3f299be2846369211f3b957ea4c59" + integrity sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg== dependencies: postcss-value-parser "^4.2.0" -postcss-minify-gradients@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz#f1fe1b4f498134a5068240c2f25d46fcd236ba2c" - integrity sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw== +postcss-minify-gradients@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz#ca3eb55a7bdb48a1e187a55c6377be918743dbd6" + integrity sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q== dependencies: - colord "^2.9.1" - cssnano-utils "^3.1.0" + colord "^2.9.3" + cssnano-utils "^4.0.2" postcss-value-parser "^4.2.0" -postcss-minify-params@^5.1.4: - version "5.1.4" - resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz#c06a6c787128b3208b38c9364cfc40c8aa5d7352" - integrity sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw== +postcss-minify-params@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz#54551dec77b9a45a29c3cb5953bf7325a399ba08" + integrity sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA== dependencies: - browserslist "^4.21.4" - cssnano-utils "^3.1.0" + browserslist "^4.23.0" + cssnano-utils "^4.0.2" postcss-value-parser "^4.2.0" -postcss-minify-selectors@^5.2.1: - version "5.2.1" - resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz#d4e7e6b46147b8117ea9325a915a801d5fe656c6" - integrity sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg== +postcss-minify-selectors@^6.0.4: + version "6.0.4" + resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz#197f7d72e6dd19eed47916d575d69dc38b396aff" + integrity sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ== dependencies: - postcss-selector-parser "^6.0.5" + postcss-selector-parser "^6.0.16" -postcss-modules-extract-imports@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz#cda1f047c0ae80c97dbe28c3e76a43b88025741d" - integrity sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw== +postcss-modules-extract-imports@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz#b4497cb85a9c0c4b5aabeb759bb25e8d89f15002" + integrity sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q== -postcss-modules-local-by-default@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz#ebbb54fae1598eecfdf691a02b3ff3b390a5a51c" - integrity sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ== +postcss-modules-local-by-default@^4.0.5: + version "4.0.5" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.5.tgz#f1b9bd757a8edf4d8556e8d0f4f894260e3df78f" + integrity sha512-6MieY7sIfTK0hYfafw1OMEG+2bg8Q1ocHCpoWLqOKj3JXlKu4G7btkmM/B7lFubYkYWmRSPLZi5chid63ZaZYw== dependencies: icss-utils "^5.0.0" postcss-selector-parser "^6.0.2" postcss-value-parser "^4.1.0" -postcss-modules-scope@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz#9ef3151456d3bbfa120ca44898dfca6f2fa01f06" - integrity sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg== +postcss-modules-scope@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-3.2.0.tgz#a43d28289a169ce2c15c00c4e64c0858e43457d5" + integrity sha512-oq+g1ssrsZOsx9M96c5w8laRmvEu9C3adDSjI8oTcbfkrTE8hx/zfyobUoWIxaKPO8bt6S62kxpw5GqypEw1QQ== dependencies: postcss-selector-parser "^6.0.4" @@ -7569,152 +8515,146 @@ postcss-modules-values@^4.0.0: dependencies: icss-utils "^5.0.0" -postcss-normalize-charset@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz#9302de0b29094b52c259e9b2cf8dc0879879f0ed" - integrity sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg== +postcss-normalize-charset@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz#1ec25c435057a8001dac942942a95ffe66f721e1" + integrity sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ== -postcss-normalize-display-values@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz#72abbae58081960e9edd7200fcf21ab8325c3da8" - integrity sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA== +postcss-normalize-display-values@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz#54f02764fed0b288d5363cbb140d6950dbbdd535" + integrity sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg== dependencies: postcss-value-parser "^4.2.0" -postcss-normalize-positions@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz#ef97279d894087b59325b45c47f1e863daefbb92" - integrity sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg== +postcss-normalize-positions@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz#e982d284ec878b9b819796266f640852dbbb723a" + integrity sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q== dependencies: postcss-value-parser "^4.2.0" -postcss-normalize-repeat-style@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz#e9eb96805204f4766df66fd09ed2e13545420fb2" - integrity sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g== +postcss-normalize-repeat-style@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz#f8006942fd0617c73f049dd8b6201c3a3040ecf3" + integrity sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ== dependencies: postcss-value-parser "^4.2.0" -postcss-normalize-string@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz#411961169e07308c82c1f8c55f3e8a337757e228" - integrity sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w== +postcss-normalize-string@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz#e3cc6ad5c95581acd1fc8774b309dd7c06e5e363" + integrity sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ== dependencies: postcss-value-parser "^4.2.0" -postcss-normalize-timing-functions@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz#d5614410f8f0b2388e9f240aa6011ba6f52dafbb" - integrity sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg== +postcss-normalize-timing-functions@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz#40cb8726cef999de984527cbd9d1db1f3e9062c0" + integrity sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA== dependencies: postcss-value-parser "^4.2.0" -postcss-normalize-unicode@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz#f67297fca3fea7f17e0d2caa40769afc487aa030" - integrity sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA== +postcss-normalize-unicode@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz#aaf8bbd34c306e230777e80f7f12a4b7d27ce06e" + integrity sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg== dependencies: - browserslist "^4.21.4" + browserslist "^4.23.0" postcss-value-parser "^4.2.0" -postcss-normalize-url@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz#ed9d88ca82e21abef99f743457d3729a042adcdc" - integrity sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew== +postcss-normalize-url@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz#292792386be51a8de9a454cb7b5c58ae22db0f79" + integrity sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ== dependencies: - normalize-url "^6.0.1" postcss-value-parser "^4.2.0" -postcss-normalize-whitespace@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz#08a1a0d1ffa17a7cc6efe1e6c9da969cc4493cfa" - integrity sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA== +postcss-normalize-whitespace@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz#fbb009e6ebd312f8b2efb225c2fcc7cf32b400cd" + integrity sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q== dependencies: postcss-value-parser "^4.2.0" -postcss-ordered-values@^5.1.3: - version "5.1.3" - resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz#b6fd2bd10f937b23d86bc829c69e7732ce76ea38" - integrity sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ== +postcss-ordered-values@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz#366bb663919707093451ab70c3f99c05672aaae5" + integrity sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q== dependencies: - cssnano-utils "^3.1.0" + cssnano-utils "^4.0.2" postcss-value-parser "^4.2.0" -postcss-reduce-idents@^5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz#c89c11336c432ac4b28792f24778859a67dfba95" - integrity sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg== +postcss-reduce-idents@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz#b0d9c84316d2a547714ebab523ec7d13704cd486" + integrity sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA== dependencies: postcss-value-parser "^4.2.0" -postcss-reduce-initial@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-5.1.1.tgz#c18b7dfb88aee24b1f8e4936541c29adbd35224e" - integrity sha512-//jeDqWcHPuXGZLoolFrUXBDyuEGbr9S2rMo19bkTIjBQ4PqkaO+oI8wua5BOUxpfi97i3PCoInsiFIEBfkm9w== +postcss-reduce-initial@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz#4401297d8e35cb6e92c8e9586963e267105586ba" + integrity sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw== dependencies: - browserslist "^4.21.4" + browserslist "^4.23.0" caniuse-api "^3.0.0" -postcss-reduce-transforms@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz#333b70e7758b802f3dd0ddfe98bb1ccfef96b6e9" - integrity sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ== +postcss-reduce-transforms@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz#6fa2c586bdc091a7373caeee4be75a0f3e12965d" + integrity sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA== dependencies: postcss-value-parser "^4.2.0" -postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4, postcss-selector-parser@^6.0.5, postcss-selector-parser@^6.0.9: - version "6.0.11" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.11.tgz#2e41dc39b7ad74046e1615185185cd0b17d0c8dc" - integrity sha512-zbARubNdogI9j7WY4nQJBiNqQf3sLS3wCP4WfOidu+p28LofJqDH1tcXypGrcmMHhDk2t9wGhCsYe/+szLTy1g== +postcss-selector-parser@^6.0.11, postcss-selector-parser@^6.0.16, postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4: + version "6.1.2" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz#27ecb41fb0e3b6ba7a1ec84fff347f734c7929de" + integrity sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg== dependencies: cssesc "^3.0.0" util-deprecate "^1.0.2" -postcss-sort-media-queries@^4.2.1: - version "4.3.0" - resolved "https://registry.yarnpkg.com/postcss-sort-media-queries/-/postcss-sort-media-queries-4.3.0.tgz#f48a77d6ce379e86676fc3f140cf1b10a06f6051" - integrity sha512-jAl8gJM2DvuIJiI9sL1CuiHtKM4s5aEIomkU8G3LFvbP+p8i7Sz8VV63uieTgoewGqKbi+hxBTiOKJlB35upCg== +postcss-sort-media-queries@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz#4556b3f982ef27d3bac526b99b6c0d3359a6cf97" + integrity sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA== dependencies: - sort-css-media-queries "2.1.0" + sort-css-media-queries "2.2.0" -postcss-svgo@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-5.1.0.tgz#0a317400ced789f233a28826e77523f15857d80d" - integrity sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA== +postcss-svgo@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-6.0.3.tgz#1d6e180d6df1fa8a3b30b729aaa9161e94f04eaa" + integrity sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g== dependencies: postcss-value-parser "^4.2.0" - svgo "^2.7.0" + svgo "^3.2.0" -postcss-unique-selectors@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz#a9f273d1eacd09e9aa6088f4b0507b18b1b541b6" - integrity sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA== +postcss-unique-selectors@^6.0.4: + version "6.0.4" + resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz#983ab308896b4bf3f2baaf2336e14e52c11a2088" + integrity sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg== dependencies: - postcss-selector-parser "^6.0.5" + postcss-selector-parser "^6.0.16" postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0: version "4.2.0" resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== -postcss-zindex@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-5.1.0.tgz#4a5c7e5ff1050bd4c01d95b1847dfdcc58a496ff" - integrity sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A== +postcss-zindex@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-6.0.2.tgz#e498304b83a8b165755f53db40e2ea65a99b56e1" + integrity sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg== -postcss@^8.3.11, postcss@^8.4.14, postcss@^8.4.17, postcss@^8.4.19: - version "8.4.20" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.20.tgz#64c52f509644cecad8567e949f4081d98349dc56" - integrity sha512-6Q04AXR1212bXr5fh03u8aAwbLxAQNGQ/Q1LNa0VfOI06ZAlhPHtQvE4OIdpj4kLThXilalPnmDSOD65DcHt+g== +postcss@^8.4.21, postcss@^8.4.24, postcss@^8.4.26, postcss@^8.4.33, postcss@^8.4.38: + version "8.4.47" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.47.tgz#5bf6c9a010f3e724c503bf03ef7947dcb0fea365" + integrity sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ== dependencies: - nanoid "^3.3.4" - picocolors "^1.0.0" - source-map-js "^1.0.2" - -prepend-http@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" - integrity sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA== + nanoid "^3.3.7" + picocolors "^1.1.0" + source-map-js "^1.2.1" pretty-error@^4.0.0: version "4.0.0" @@ -7729,12 +8669,20 @@ pretty-time@^1.1.0: resolved "https://registry.yarnpkg.com/pretty-time/-/pretty-time-1.1.0.tgz#ffb7429afabb8535c346a34e41873adf3d74dd0e" integrity sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA== -prism-react-renderer@^1.2.1, prism-react-renderer@^1.3.5: +prism-react-renderer@^1.2.1: version "1.3.5" resolved "https://registry.yarnpkg.com/prism-react-renderer/-/prism-react-renderer-1.3.5.tgz#786bb69aa6f73c32ba1ee813fbe17a0115435085" integrity sha512-IJ+MSwBWKG+SM3b2SUfdrhC+gu01QkV2KmRQgREThBfSQRoufqRfxfHUxpG1WcaFjP+kojcFyO9Qqtpgt3qLCg== -prismjs@^1.28.0: +prism-react-renderer@^2.3.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/prism-react-renderer/-/prism-react-renderer-2.4.0.tgz#c5ea692029c2f8b3fd04f63662d04ffd4eaf10a0" + integrity sha512-327BsVCD/unU4CNLZTWVHyUHKnsqcvj2qbPlQ8MiBE2eq2rgctjigPA1Gp9HLF83kZ20zNN6jgizHJeEsyFYOw== + dependencies: + "@types/prismjs" "^1.26.0" + clsx "^2.0.0" + +prismjs@^1.29.0: version "1.29.0" resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.29.0.tgz#f113555a8fa9b57c35e637bba27509dcf802dd12" integrity sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q== @@ -7744,13 +8692,6 @@ process-nextick-args@~2.0.0: resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== -promise@^7.1.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" - integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg== - dependencies: - asap "~2.0.3" - prompts@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" @@ -7768,7 +8709,7 @@ prop-types@^15.6.2, prop-types@^15.7.2: object-assign "^4.1.1" react-is "^16.13.1" -property-information@^5.0.0, property-information@^5.3.0: +property-information@^5.0.0: version "5.6.0" resolved "https://registry.yarnpkg.com/property-information/-/property-information-5.6.0.tgz#61675545fb23002f245c6540ec46077d4da3ed69" integrity sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA== @@ -7776,9 +8717,14 @@ property-information@^5.0.0, property-information@^5.3.0: xtend "^4.0.0" property-information@^6.0.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/property-information/-/property-information-6.2.0.tgz#b74f522c31c097b5149e3c3cb8d7f3defd986a1d" - integrity sha512-kma4U7AFCTwpqq5twzC1YVIDXSqg6qQK6JN0smOw8fgRy1OkMi0CYSzFmsy6dnqSenamAtj0CyXMUJ1Mf6oROg== + version "6.5.0" + resolved "https://registry.yarnpkg.com/property-information/-/property-information-6.5.0.tgz#6212fbb52ba757e92ef4fb9d657563b933b7ffec" + integrity sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig== + +proto-list@~1.2.1: + version "1.2.4" + resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" + integrity sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA== proxy-addr@~2.0.7: version "2.0.7" @@ -7788,47 +8734,29 @@ proxy-addr@~2.0.7: forwarded "0.2.0" ipaddr.js "1.9.1" -pump@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" - integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -punycode@^1.3.2: - version "1.4.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" - integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ== - punycode@^2.1.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" - integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== + version "2.3.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" + integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== -pupa@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/pupa/-/pupa-2.1.1.tgz#f5e8fd4afc2c5d97828faa523549ed8744a20d62" - integrity sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A== +pupa@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/pupa/-/pupa-3.1.0.tgz#f15610274376bbcc70c9a3aa8b505ea23f41c579" + integrity sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug== dependencies: - escape-goat "^2.0.0" - -pure-color@^1.2.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/pure-color/-/pure-color-1.3.0.tgz#1fe064fb0ac851f0de61320a8bf796836422f33e" - integrity sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA== + escape-goat "^4.0.0" q@^1.1.2: version "1.5.1" resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" integrity sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw== -qs@6.11.0: - version "6.11.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" - integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== +qs@6.13.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" + integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== dependencies: - side-channel "^1.0.4" + side-channel "^1.0.6" queue-microtask@^1.2.2: version "1.2.3" @@ -7842,6 +8770,11 @@ queue@6.0.2: dependencies: inherits "~2.0.3" +quick-lru@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-5.1.1.tgz#366493e6b3e42a3a6885e2e99d18f80fb7a8c932" + integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA== + randombytes@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" @@ -7859,17 +8792,17 @@ range-parser@^1.2.1, range-parser@~1.2.1: resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== -raw-body@2.5.1: - version "2.5.1" - resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.1.tgz#fe1b1628b181b700215e5fd42389f98b71392857" - integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig== +raw-body@2.5.2: + version "2.5.2" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.2.tgz#99febd83b90e08975087e8f1f9419a149366b68a" + integrity sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA== dependencies: bytes "3.1.2" http-errors "2.0.0" iconv-lite "0.4.24" unpipe "1.0.0" -rc@1.2.8, rc@^1.2.8: +rc@1.2.8: version "1.2.8" resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== @@ -7879,16 +8812,6 @@ rc@1.2.8, rc@^1.2.8: minimist "^1.2.0" strip-json-comments "~2.0.1" -react-base16-styling@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/react-base16-styling/-/react-base16-styling-0.6.0.tgz#ef2156d66cf4139695c8a167886cb69ea660792c" - integrity sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ== - dependencies: - base16 "^1.0.0" - lodash.curry "^4.0.1" - lodash.flow "^3.3.0" - pure-color "^1.2.0" - react-dev-utils@^12.0.1: version "12.0.1" resolved "https://registry.yarnpkg.com/react-dev-utils/-/react-dev-utils-12.0.1.tgz#ba92edb4a1f379bd46ccd6bcd4e7bc398df33e73" @@ -7919,26 +8842,34 @@ react-dev-utils@^12.0.1: strip-ansi "^6.0.1" text-table "^0.2.0" -react-dom@^17.0.1: - version "17.0.2" - resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-17.0.2.tgz#ecffb6845e3ad8dbfcdc498f0d0a939736502c23" - integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA== +react-dom@^18.0.0: + version "18.3.1" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-18.3.1.tgz#c2265d79511b57d479b3dd3fdfa51536494c5cb4" + integrity sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw== dependencies: loose-envify "^1.1.0" - object-assign "^4.1.1" - scheduler "^0.20.2" + scheduler "^0.23.2" react-error-overlay@^6.0.11: version "6.0.11" resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.0.11.tgz#92835de5841c5cf08ba00ddd2d677b6d17ff9adb" integrity sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg== -react-fast-compare@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.0.tgz#641a9da81b6a6320f270e89724fb45a0b39e43bb" - integrity sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA== +react-fast-compare@^3.2.0, react-fast-compare@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.2.tgz#929a97a532304ce9fee4bcae44234f1ce2c21d49" + integrity sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ== + +react-helmet-async@*: + version "2.0.5" + resolved "https://registry.yarnpkg.com/react-helmet-async/-/react-helmet-async-2.0.5.tgz#cfc70cd7bb32df7883a8ed55502a1513747223ec" + integrity sha512-rYUYHeus+i27MvFE+Jaa4WsyBKGkL6qVgbJvSBoX8mbsWoABJXdEO0bZyi0F6i+4f0NuIb8AvqPMj3iXFHkMwg== + dependencies: + invariant "^2.2.4" + react-fast-compare "^3.2.2" + shallowequal "^1.1.0" -react-helmet-async@*, react-helmet-async@^1.3.0: +react-helmet-async@^1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/react-helmet-async/-/react-helmet-async-1.3.0.tgz#7bd5bf8c5c69ea9f02f6083f14ce33ef545c222e" integrity sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg== @@ -7954,17 +8885,12 @@ react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0: resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== -react-json-view@^1.21.3: - version "1.21.3" - resolved "https://registry.yarnpkg.com/react-json-view/-/react-json-view-1.21.3.tgz#f184209ee8f1bf374fb0c41b0813cff54549c475" - integrity sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw== - dependencies: - flux "^4.0.1" - react-base16-styling "^0.6.0" - react-lifecycles-compat "^3.0.4" - react-textarea-autosize "^8.3.2" +react-json-view-lite@^1.2.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/react-json-view-lite/-/react-json-view-lite-1.5.0.tgz#377cc302821717ac79a1b6d099e1891df54c8662" + integrity sha512-nWqA1E4jKPklL2jvHWs6s+7Na0qNgw9HCP6xehdQJeg6nPBTFZgGwyko9Q0oj+jQWKTTVRS30u0toM5wiuL3iw== -react-lifecycles-compat@^3.0.0, react-lifecycles-compat@^3.0.4: +react-lifecycles-compat@^3.0.0: version "3.0.4" resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362" integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA== @@ -7990,6 +8916,13 @@ react-loadable-ssr-addon-v5-slorber@^1.0.1: dependencies: "@babel/runtime" "^7.10.3" +"react-loadable@npm:@docusaurus/react-loadable@6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz#de6c7f73c96542bd70786b8e522d535d69069dc4" + integrity sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ== + dependencies: + "@types/react" "*" + react-modal@3.15.1: version "3.15.1" resolved "https://registry.yarnpkg.com/react-modal/-/react-modal-3.15.1.tgz#950ce67bfef80971182dd0ed38f2d9b1a681288b" @@ -8007,7 +8940,7 @@ react-router-config@^5.1.1: dependencies: "@babel/runtime" "^7.1.2" -react-router-dom@^5.3.3: +react-router-dom@^5.3.4: version "5.3.4" resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.3.4.tgz#2ed62ffd88cae6db134445f4a0c0ae8b91d2e5e6" integrity sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ== @@ -8020,7 +8953,7 @@ react-router-dom@^5.3.3: tiny-invariant "^1.0.2" tiny-warning "^1.0.0" -react-router@5.3.4, react-router@^5.3.3: +react-router@5.3.4, react-router@^5.3.4: version "5.3.4" resolved "https://registry.yarnpkg.com/react-router/-/react-router-5.3.4.tgz#8ca252d70fcc37841e31473c7a151cf777887bb5" integrity sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA== @@ -8040,27 +8973,17 @@ react-simple-code-editor@^0.11.0: resolved "https://registry.yarnpkg.com/react-simple-code-editor/-/react-simple-code-editor-0.11.3.tgz#6e5af1c2e51588aded2c89b86e98fac144212f61" integrity sha512-7bVI4Yd1aNCeuldErXUt8ksaAG5Fi+GZ6vp3mtFBnckKdzsQtrgkDvdwMFXIhwTGG+mUYmk5ZpMo0axSW9JBzA== -react-textarea-autosize@^8.3.2: - version "8.4.0" - resolved "https://registry.yarnpkg.com/react-textarea-autosize/-/react-textarea-autosize-8.4.0.tgz#4d0244d6a50caa897806b8c44abc0540a69bfc8c" - integrity sha512-YrTFaEHLgJsi8sJVYHBzYn+mkP3prGkmP2DKb/tm0t7CLJY5t1Rxix8070LAKb0wby7bl/lf2EeHkuMihMZMwQ== - dependencies: - "@babel/runtime" "^7.10.2" - use-composed-ref "^1.3.0" - use-latest "^1.2.1" - -react@^17.0.1: - version "17.0.2" - resolved "https://registry.yarnpkg.com/react/-/react-17.0.2.tgz#d0b5cc516d29eb3eee383f75b62864cfb6800037" - integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA== +react@^18.0.0: + version "18.3.1" + resolved "https://registry.yarnpkg.com/react/-/react-18.3.1.tgz#49ab892009c53933625bd16b2533fc754cab2891" + integrity sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ== dependencies: loose-envify "^1.1.0" - object-assign "^4.1.1" readable-stream@^2.0.1: - version "2.3.7" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.7.tgz#1eca1cf711aef814c04f62252a36a62f6cb23b57" - integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw== + version "2.3.8" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.8.tgz#91125e8042bba1b9887f49345f6277027ce8be9b" + integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== dependencies: core-util-is "~1.0.0" inherits "~2.0.3" @@ -8071,9 +8994,9 @@ readable-stream@^2.0.1: util-deprecate "~1.0.1" readable-stream@^3.0.6: - version "3.6.0" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" - integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== + version "3.6.2" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" + integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== dependencies: inherits "^2.0.3" string_decoder "^1.1.1" @@ -8098,6 +9021,46 @@ rechoir@^0.6.2: dependencies: resolve "^1.1.6" +recma-build-jsx@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz#c02f29e047e103d2fab2054954e1761b8ea253c4" + integrity sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew== + dependencies: + "@types/estree" "^1.0.0" + estree-util-build-jsx "^3.0.0" + vfile "^6.0.0" + +recma-jsx@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/recma-jsx/-/recma-jsx-1.0.0.tgz#f7bef02e571a49d6ba3efdfda8e2efab48dbe3aa" + integrity sha512-5vwkv65qWwYxg+Atz95acp8DMu1JDSqdGkA2Of1j6rCreyFUE/gp15fC8MnGEuG1W68UKjM6x6+YTWIh7hZM/Q== + dependencies: + acorn-jsx "^5.0.0" + estree-util-to-js "^2.0.0" + recma-parse "^1.0.0" + recma-stringify "^1.0.0" + unified "^11.0.0" + +recma-parse@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/recma-parse/-/recma-parse-1.0.0.tgz#c351e161bb0ab47d86b92a98a9d891f9b6814b52" + integrity sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ== + dependencies: + "@types/estree" "^1.0.0" + esast-util-from-js "^2.0.0" + unified "^11.0.0" + vfile "^6.0.0" + +recma-stringify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/recma-stringify/-/recma-stringify-1.0.0.tgz#54632030631e0c7546136ff9ef8fde8e7b44f130" + integrity sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g== + dependencies: + "@types/estree" "^1.0.0" + estree-util-to-js "^2.0.0" + unified "^11.0.0" + vfile "^6.0.0" + recursive-readdir@^2.2.2: version "2.2.3" resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.3.tgz#e726f328c0d69153bcabd5c322d3195252379372" @@ -8105,10 +9068,10 @@ recursive-readdir@^2.2.2: dependencies: minimatch "^3.0.5" -regenerate-unicode-properties@^10.1.0: - version "10.1.0" - resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz#7c3192cab6dd24e21cb4461e5ddd7dd24fa8374c" - integrity sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ== +regenerate-unicode-properties@^10.2.0: + version "10.2.0" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz#626e39df8c372338ea9b8028d1f99dc3fd9c3db0" + integrity sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA== dependencies: regenerate "^1.4.2" @@ -8124,26 +9087,27 @@ regenerate@^1.4.2: resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== -regenerator-runtime@^0.13.11: - version "0.13.11" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" - integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== +regenerator-runtime@^0.14.0: + version "0.14.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" + integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== -regenerator-transform@^0.15.1: - version "0.15.1" - resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.1.tgz#f6c4e99fc1b4591f780db2586328e4d9a9d8dc56" - integrity sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg== +regenerator-transform@^0.15.2: + version "0.15.2" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" + integrity sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg== dependencies: "@babel/runtime" "^7.8.4" -regexp.prototype.flags@^1.4.3: - version "1.4.3" - resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz#87cab30f80f66660181a3bb7bf5981a872b367ac" - integrity sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA== +regexp.prototype.flags@^1.5.2: + version "1.5.3" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.3.tgz#b3ae40b1d2499b8350ab2c3fe6ef3845d3a96f42" + integrity sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.1.3" - functions-have-names "^1.2.2" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-errors "^1.3.0" + set-function-name "^2.0.2" regexpu-core@^4.2.0: version "4.8.0" @@ -8157,41 +9121,48 @@ regexpu-core@^4.2.0: unicode-match-property-ecmascript "^2.0.0" unicode-match-property-value-ecmascript "^2.0.0" -regexpu-core@^5.2.1: - version "5.2.2" - resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-5.2.2.tgz#3e4e5d12103b64748711c3aad69934d7718e75fc" - integrity sha512-T0+1Zp2wjF/juXMrMxHxidqGYn8U4R+zleSJhX9tQ1PUsS8a9UtYfbsF9LdiVgNX3kiX8RNaKM42nfSgvFJjmw== +regexpu-core@^6.1.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-6.1.1.tgz#b469b245594cb2d088ceebc6369dceb8c00becac" + integrity sha512-k67Nb9jvwJcJmVpw0jPttR1/zVfnKf8Km0IPatrU/zJ5XeG3+Slx0xLXs9HByJSzXzrlz5EDvN6yLNMDc2qdnw== dependencies: regenerate "^1.4.2" - regenerate-unicode-properties "^10.1.0" - regjsgen "^0.7.1" - regjsparser "^0.9.1" + regenerate-unicode-properties "^10.2.0" + regjsgen "^0.8.0" + regjsparser "^0.11.0" unicode-match-property-ecmascript "^2.0.0" unicode-match-property-value-ecmascript "^2.1.0" -registry-auth-token@^4.0.0: - version "4.2.2" - resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-4.2.2.tgz#f02d49c3668884612ca031419491a13539e21fac" - integrity sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg== +registry-auth-token@^5.0.1: + version "5.0.2" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-5.0.2.tgz#8b026cc507c8552ebbe06724136267e63302f756" + integrity sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ== dependencies: - rc "1.2.8" + "@pnpm/npm-conf" "^2.1.0" -registry-url@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-5.1.0.tgz#e98334b50d5434b81136b44ec638d9c2009c5009" - integrity sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw== +registry-url@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-6.0.1.tgz#056d9343680f2f64400032b1e199faa692286c58" + integrity sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q== dependencies: - rc "^1.2.8" + rc "1.2.8" regjsgen@^0.5.2: version "0.5.2" resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.2.tgz#92ff295fb1deecbf6ecdab2543d207e91aa33733" integrity sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A== -regjsgen@^0.7.1: - version "0.7.1" - resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.7.1.tgz#ee5ef30e18d3f09b7c369b76e7c2373ed25546f6" - integrity sha512-RAt+8H2ZEzHeYWxZ3H2z6tF18zyyOnlcdaafLrm21Bguj7uZy6ULibiAFdXEtKQY4Sy7wDTwDiOazasMLc4KPA== +regjsgen@^0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.8.0.tgz#df23ff26e0c5b300a6470cad160a9d090c3a37ab" + integrity sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q== + +regjsparser@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.11.1.tgz#ae55c74f646db0c8fcb922d4da635e33da405149" + integrity sha512-1DHODs4B8p/mQHU9kr+jv8+wIC9mtG4eBHxWxIq5mhjE3D5oORhCc6deRKzTjs9DcfRFmj9BHSDguZklqCGFWQ== + dependencies: + jsesc "~3.0.2" regjsparser@^0.7.0: version "0.7.0" @@ -8200,13 +9171,6 @@ regjsparser@^0.7.0: dependencies: jsesc "~0.5.0" -regjsparser@^0.9.1: - version "0.9.1" - resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.9.1.tgz#272d05aa10c7c1f67095b1ff0addae8442fc5709" - integrity sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ== - dependencies: - jsesc "~0.5.0" - rehype-parse@^7.0.1: version "7.0.1" resolved "https://registry.yarnpkg.com/rehype-parse/-/rehype-parse-7.0.1.tgz#58900f6702b56767814afc2a9efa2d42b1c90c57" @@ -8215,24 +9179,59 @@ rehype-parse@^7.0.1: hast-util-from-parse5 "^6.0.0" parse5 "^6.0.0" +rehype-raw@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/rehype-raw/-/rehype-raw-7.0.0.tgz#59d7348fd5dbef3807bbaa1d443efd2dd85ecee4" + integrity sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww== + dependencies: + "@types/hast" "^3.0.0" + hast-util-raw "^9.0.0" + vfile "^6.0.0" + +rehype-recma@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/rehype-recma/-/rehype-recma-1.0.0.tgz#d68ef6344d05916bd96e25400c6261775411aa76" + integrity sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw== + dependencies: + "@types/estree" "^1.0.0" + "@types/hast" "^3.0.0" + hast-util-to-estree "^3.0.0" + relateurl@^0.2.7: version "0.2.7" resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" integrity sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog== -remark-emoji@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/remark-emoji/-/remark-emoji-2.2.0.tgz#1c702090a1525da5b80e15a8f963ef2c8236cac7" - integrity sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w== +remark-directive@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/remark-directive/-/remark-directive-3.0.0.tgz#34452d951b37e6207d2e2a4f830dc33442923268" + integrity sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA== dependencies: - emoticon "^3.2.0" - node-emoji "^1.10.0" - unist-util-visit "^2.0.3" + "@types/mdast" "^4.0.0" + mdast-util-directive "^3.0.0" + micromark-extension-directive "^3.0.0" + unified "^11.0.0" -remark-footnotes@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/remark-footnotes/-/remark-footnotes-2.0.0.tgz#9001c4c2ffebba55695d2dd80ffb8b82f7e6303f" - integrity sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ== +remark-emoji@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/remark-emoji/-/remark-emoji-4.0.1.tgz#671bfda668047689e26b2078c7356540da299f04" + integrity sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg== + dependencies: + "@types/mdast" "^4.0.2" + emoticon "^4.0.1" + mdast-util-find-and-replace "^3.0.1" + node-emoji "^2.1.0" + unified "^11.0.4" + +remark-frontmatter@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz#b68d61552a421ec412c76f4f66c344627dc187a2" + integrity sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ== + dependencies: + "@types/mdast" "^4.0.0" + mdast-util-frontmatter "^2.0.0" + micromark-extension-frontmatter "^2.0.0" + unified "^11.0.0" remark-gfm@^3.0.1: version "3.0.1" @@ -8244,64 +9243,62 @@ remark-gfm@^3.0.1: micromark-extension-gfm "^2.0.0" unified "^10.0.0" -remark-mdx-filter-imports@^0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/remark-mdx-filter-imports/-/remark-mdx-filter-imports-0.1.2.tgz#fa521585a14822a8e177f1b1d353b695e278cc96" - integrity sha512-8MAgusHtNjbNXKkBc/ckjh2U25N/D0aCfN2KDdnBQaWknLupU60pQzTEBpsZLZN67MsC6rVt6ZQgEzthOmQAAw== +remark-gfm@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/remark-gfm/-/remark-gfm-4.0.0.tgz#aea777f0744701aa288b67d28c43565c7e8c35de" + integrity sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA== + dependencies: + "@types/mdast" "^4.0.0" + mdast-util-gfm "^3.0.0" + micromark-extension-gfm "^3.0.0" + remark-parse "^11.0.0" + remark-stringify "^11.0.0" + unified "^11.0.0" -remark-mdx@1.6.22: - version "1.6.22" - resolved "https://registry.yarnpkg.com/remark-mdx/-/remark-mdx-1.6.22.tgz#06a8dab07dcfdd57f3373af7f86bd0e992108bbd" - integrity sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ== - dependencies: - "@babel/core" "7.12.9" - "@babel/helper-plugin-utils" "7.10.4" - "@babel/plugin-proposal-object-rest-spread" "7.12.1" - "@babel/plugin-syntax-jsx" "7.12.1" - "@mdx-js/util" "1.6.22" - is-alphabetical "1.0.4" - remark-parse "8.0.3" - unified "9.2.0" +remark-mdx-filter-imports@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/remark-mdx-filter-imports/-/remark-mdx-filter-imports-0.1.3.tgz#52aeeb235a1ac43d22d40359ae7df2632596b00c" + integrity sha512-tReGHtQ9sFMqrp95pwsIaEB6qVtGCN5urnVnkVauYHk4OLsYL5EcCVw0CnqvvjPCS4IX1I6ikK8LKWraTj+JjQ== + dependencies: + "@babel/generator" "^7.12.5" + "@babel/parser" "^7.12.7" + unist-util-visit "^2.0.3" remark-mdx@^2.0.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/remark-mdx/-/remark-mdx-2.2.1.tgz#0d006436acf039b526c3b6b47ea4a44b3d544db7" - integrity sha512-R9wcN+/THRXTKyRBp6Npo/mcbGA2iT3N4G8qUqLA5pOEg7kBidHv8K2hHidCMYZ6DXmwK18umu0K4cicgA2PPQ== + version "2.3.0" + resolved "https://registry.yarnpkg.com/remark-mdx/-/remark-mdx-2.3.0.tgz#efe678025a8c2726681bde8bf111af4a93943db4" + integrity sha512-g53hMkpM0I98MU266IzDFMrTD980gNF3BJnkyFcmN+dD873mQeD5rdMO3Y2X+x8umQfbSE0PcoEDl7ledSA+2g== dependencies: mdast-util-mdx "^2.0.0" micromark-extension-mdxjs "^1.0.0" -remark-parse@8.0.3: - version "8.0.3" - resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-8.0.3.tgz#9c62aa3b35b79a486454c690472906075f40c7e1" - integrity sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q== - dependencies: - ccount "^1.0.0" - collapse-white-space "^1.0.2" - is-alphabetical "^1.0.0" - is-decimal "^1.0.0" - is-whitespace-character "^1.0.0" - is-word-character "^1.0.0" - markdown-escapes "^1.0.0" - parse-entities "^2.0.0" - repeat-string "^1.5.4" - state-toggle "^1.0.0" - trim "0.0.1" - trim-trailing-lines "^1.0.0" - unherit "^1.0.4" - unist-util-remove-position "^2.0.0" - vfile-location "^3.0.0" - xtend "^4.0.1" +remark-mdx@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/remark-mdx/-/remark-mdx-3.1.0.tgz#f979be729ecb35318fa48e2135c1169607a78343" + integrity sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA== + dependencies: + mdast-util-mdx "^3.0.0" + micromark-extension-mdxjs "^3.0.0" remark-parse@^10.0.0: - version "10.0.1" - resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-10.0.1.tgz#6f60ae53edbf0cf38ea223fe643db64d112e0775" - integrity sha512-1fUyHr2jLsVOkhbvPRBJ5zTKZZyD6yZzYaWCS6BPBdQ8vEMBCH+9zNCDA6tET/zHCi/jLqjCWtlJZUPk+DbnFw== + version "10.0.2" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-10.0.2.tgz#ca241fde8751c2158933f031a4e3efbaeb8bc262" + integrity sha512-3ydxgHa/ZQzG8LvC7jTXccARYDcRld3VfcgIIFs7bI6vbRSxJJmzgLEIIoYKyrfhaY+ujuWaf/PJiMZXoiCXgw== dependencies: "@types/mdast" "^3.0.0" mdast-util-from-markdown "^1.0.0" unified "^10.0.0" +remark-parse@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-11.0.0.tgz#aa60743fcb37ebf6b069204eb4da304e40db45a1" + integrity sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA== + dependencies: + "@types/mdast" "^4.0.0" + mdast-util-from-markdown "^2.0.0" + micromark-util-types "^2.0.0" + unified "^11.0.0" + remark-rehype@^10.0.0: version "10.1.0" resolved "https://registry.yarnpkg.com/remark-rehype/-/remark-rehype-10.1.0.tgz#32dc99d2034c27ecaf2e0150d22a6dcccd9a6279" @@ -8312,12 +9309,25 @@ remark-rehype@^10.0.0: mdast-util-to-hast "^12.1.0" unified "^10.0.0" -remark-squeeze-paragraphs@4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz#76eb0e085295131c84748c8e43810159c5653ead" - integrity sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw== +remark-rehype@^11.0.0: + version "11.1.1" + resolved "https://registry.yarnpkg.com/remark-rehype/-/remark-rehype-11.1.1.tgz#f864dd2947889a11997c0a2667cd6b38f685bca7" + integrity sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ== + dependencies: + "@types/hast" "^3.0.0" + "@types/mdast" "^4.0.0" + mdast-util-to-hast "^13.0.0" + unified "^11.0.0" + vfile "^6.0.0" + +remark-stringify@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/remark-stringify/-/remark-stringify-11.0.0.tgz#4c5b01dd711c269df1aaae11743eb7e2e7636fd3" + integrity sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw== dependencies: - mdast-squeeze-paragraphs "^4.0.0" + "@types/mdast" "^4.0.0" + mdast-util-to-markdown "^2.0.0" + unified "^11.0.0" renderkid@^3.0.0: version "3.0.0" @@ -8330,7 +9340,7 @@ renderkid@^3.0.0: lodash "^4.17.21" strip-ansi "^6.0.1" -repeat-string@^1.0.0, repeat-string@^1.5.4: +repeat-string@^1.0.0: version "1.6.1" resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" integrity sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w== @@ -8350,12 +9360,17 @@ requires-port@^1.0.0: resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" integrity sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ== +resolve-alpn@^1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/resolve-alpn/-/resolve-alpn-1.2.1.tgz#b7adbdac3546aaaec20b45e7d8265927072726f9" + integrity sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g== + resolve-from@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== -resolve-package-path@^4.0.0: +resolve-package-path@^4.0.3: version "4.0.3" resolved "https://registry.yarnpkg.com/resolve-package-path/-/resolve-package-path-4.0.3.tgz#31dab6897236ea6613c72b83658d88898a9040aa" integrity sha512-SRpNAPW4kewOaNUt8VPqhJ0UMxawMwzJD8V7m1cJfdSTK9ieZwS6K7Dabsm4bmLFM96Z5Y/UznrpG5kt1im8yA== @@ -8367,21 +9382,21 @@ resolve-pathname@^3.0.0: resolved "https://registry.yarnpkg.com/resolve-pathname/-/resolve-pathname-3.0.0.tgz#99d02224d3cf263689becbb393bc560313025dcd" integrity sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng== -resolve@^1.1.6, resolve@^1.14.2, resolve@^1.3.2: - version "1.22.1" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.1.tgz#27cb2ebb53f91abb49470a928bba7558066ac177" - integrity sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw== +resolve@^1.1.6, resolve@^1.14.2: + version "1.22.8" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d" + integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw== dependencies: - is-core-module "^2.9.0" + is-core-module "^2.13.0" path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" -responselike@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" - integrity sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ== +responselike@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-3.0.0.tgz#20decb6c298aff0dbee1c355ca95461d42823626" + integrity sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg== dependencies: - lowercase-keys "^1.0.0" + lowercase-keys "^3.0.0" retry@^0.13.1: version "0.13.1" @@ -8400,24 +9415,24 @@ rimraf@^3.0.2: dependencies: glob "^7.1.3" -robust-predicates@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/robust-predicates/-/robust-predicates-3.0.1.tgz#ecde075044f7f30118682bd9fb3f123109577f9a" - integrity sha512-ndEIpszUHiG4HtDsQLeIuMvRsDnn8c8rYStabochtUeCvfuvNptb5TUbVD68LRAILPX7p9nqQGh4xJgn3EHS/g== +robust-predicates@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/robust-predicates/-/robust-predicates-3.0.2.tgz#d5b28528c4824d20fc48df1928d41d9efa1ad771" + integrity sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg== rtl-detect@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/rtl-detect/-/rtl-detect-1.0.4.tgz#40ae0ea7302a150b96bc75af7d749607392ecac6" - integrity sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ== + version "1.1.2" + resolved "https://registry.yarnpkg.com/rtl-detect/-/rtl-detect-1.1.2.tgz#ca7f0330af5c6bb626c15675c642ba85ad6273c6" + integrity sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ== -rtlcss@^3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/rtlcss/-/rtlcss-3.5.0.tgz#c9eb91269827a102bac7ae3115dd5d049de636c3" - integrity sha512-wzgMaMFHQTnyi9YOwsx9LjOxYXJPzS8sYnFaKm6R5ysvTkwzHiB0vxnbHwchHQT65PTdBjDG21/kQBWI7q9O7A== +rtlcss@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/rtlcss/-/rtlcss-4.3.0.tgz#f8efd4d5b64f640ec4af8fa25b65bacd9e07cc97" + integrity sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig== dependencies: - find-up "^5.0.0" + escalade "^3.1.1" picocolors "^1.0.0" - postcss "^8.3.11" + postcss "^8.4.21" strip-json-comments "^3.1.1" run-parallel@^1.1.9: @@ -8432,13 +9447,6 @@ rw@1: resolved "https://registry.yarnpkg.com/rw/-/rw-1.3.3.tgz#3f862dfa91ab766b14885ef4d01124bfda074fb4" integrity sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ== -rxjs@^7.5.4: - version "7.8.0" - resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.0.tgz#90a938862a82888ff4c7359811a595e14e1e09a4" - integrity sha512-F2+gxDshqmIub1KdvZkaEfGDwLNpPvk9Fs6LD/MyQxNgMds/WH9OdDDXOmxUZpME+iSK3rQCctkL0DYyytUqMg== - dependencies: - tslib "^2.1.0" - sade@^1.7.3: version "1.8.1" resolved "https://registry.yarnpkg.com/sade/-/sade-1.8.1.tgz#0a78e81d658d394887be57d2a409bf703a3b2701" @@ -8446,6 +9454,16 @@ sade@^1.7.3: dependencies: mri "^1.1.0" +safe-array-concat@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.2.tgz#81d77ee0c4e8b863635227c721278dd524c20edb" + integrity sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q== + dependencies: + call-bind "^1.0.7" + get-intrinsic "^1.2.4" + has-symbols "^1.0.3" + isarray "^2.0.5" + safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: version "5.1.2" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" @@ -8456,13 +9474,13 @@ safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@~5.2.0: resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== -safe-regex-test@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.0.tgz#793b874d524eb3640d1873aad03596db2d4f2295" - integrity sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA== +safe-regex-test@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.3.tgz#a5b4c0f06e0ab50ea2c395c14d8371232924c377" + integrity sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw== dependencies: - call-bind "^1.0.2" - get-intrinsic "^1.1.3" + call-bind "^1.0.6" + es-errors "^1.3.0" is-regex "^1.1.4" "safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0": @@ -8470,18 +9488,22 @@ safe-regex-test@^1.0.0: resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== -sax@^1.2.4, sax@~1.2.4: +sax@^1.2.4: + version "1.4.1" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.4.1.tgz#44cc8988377f126304d3b3fc1010c733b929ef0f" + integrity sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg== + +sax@~1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== -scheduler@^0.20.2: - version "0.20.2" - resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.20.2.tgz#4baee39436e34aa93b4874bddcbf0fe8b8b50e91" - integrity sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ== +scheduler@^0.23.2: + version "0.23.2" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.23.2.tgz#414ba64a3b282892e944cf2108ecc078d115cdc3" + integrity sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ== dependencies: loose-envify "^1.1.0" - object-assign "^4.1.1" schema-utils@2.7.0: version "2.7.0" @@ -8492,33 +9514,24 @@ schema-utils@2.7.0: ajv "^6.12.2" ajv-keywords "^3.4.1" -schema-utils@^2.6.5: - version "2.7.1" - resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-2.7.1.tgz#1ca4f32d1b24c590c203b8e7a50bf0ea4cd394d7" - integrity sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg== - dependencies: - "@types/json-schema" "^7.0.5" - ajv "^6.12.4" - ajv-keywords "^3.5.2" - -schema-utils@^3.0.0, schema-utils@^3.1.0, schema-utils@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-3.1.1.tgz#bc74c4b6b6995c1d88f76a8b77bea7219e0c8281" - integrity sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw== +schema-utils@^3.0.0, schema-utils@^3.1.1, schema-utils@^3.2.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-3.3.0.tgz#f50a88877c3c01652a15b622ae9e9795df7a60fe" + integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg== dependencies: "@types/json-schema" "^7.0.8" ajv "^6.12.5" ajv-keywords "^3.5.2" -schema-utils@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.0.0.tgz#60331e9e3ae78ec5d16353c467c34b3a0a1d3df7" - integrity sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg== +schema-utils@^4.0.0, schema-utils@^4.0.1: + version "4.2.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.2.0.tgz#70d7c93e153a273a805801882ebd3bff20d89c8b" + integrity sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw== dependencies: "@types/json-schema" "^7.0.9" - ajv "^8.8.0" + ajv "^8.9.0" ajv-formats "^2.1.1" - ajv-keywords "^5.0.0" + ajv-keywords "^5.1.0" section-matter@^1.0.0: version "1.0.0" @@ -8534,40 +9547,34 @@ select-hose@^2.0.0: integrity sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg== selfsigned@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-2.1.1.tgz#18a7613d714c0cd3385c48af0075abf3f266af61" - integrity sha512-GSL3aowiF7wa/WtSFwnUrludWFoNhftq8bUkH9pkzjpN2XSPOAYEgg6e0sS9s0rZwgJzJiQRPU18A6clnoW5wQ== + version "2.4.1" + resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-2.4.1.tgz#560d90565442a3ed35b674034cec4e95dceb4ae0" + integrity sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q== dependencies: + "@types/node-forge" "^1.3.0" node-forge "^1" -semver-diff@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-3.1.1.tgz#05f77ce59f325e00e2706afd67bb506ddb1ca32b" - integrity sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg== +semver-diff@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-4.0.0.tgz#3afcf5ed6d62259f5c72d0d5d50dffbdc9680df5" + integrity sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA== dependencies: - semver "^6.3.0" + semver "^7.3.5" -semver@^5.4.1: - version "5.7.1" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" - integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== - -semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.2.0, semver@^6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" - integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== +semver@^6.3.1: + version "6.3.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.3.2, semver@^7.3.4, semver@^7.3.7, semver@^7.3.8: - version "7.3.8" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.8.tgz#07a78feafb3f7b32347d725e33de7e2a2df67798" - integrity sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A== - dependencies: - lru-cache "^6.0.0" +semver@^7.3.2, semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semver@^7.5.4: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== -send@0.18.0: - version "0.18.0" - resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" - integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== +send@0.19.0: + version "0.19.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.19.0.tgz#bbc5a388c8ea6c048967049dbeac0e4a3f09d7f8" + integrity sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw== dependencies: debug "2.6.9" depd "2.0.0" @@ -8583,25 +9590,24 @@ send@0.18.0: range-parser "~1.2.1" statuses "2.0.1" -serialize-javascript@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" - integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag== +serialize-javascript@^6.0.0, serialize-javascript@^6.0.1: + version "6.0.2" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.2.tgz#defa1e055c83bf6d59ea805d8da862254eb6a6c2" + integrity sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g== dependencies: randombytes "^2.1.0" -serve-handler@^6.1.3: - version "6.1.5" - resolved "https://registry.yarnpkg.com/serve-handler/-/serve-handler-6.1.5.tgz#a4a0964f5c55c7e37a02a633232b6f0d6f068375" - integrity sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg== +serve-handler@^6.1.5: + version "6.1.6" + resolved "https://registry.yarnpkg.com/serve-handler/-/serve-handler-6.1.6.tgz#50803c1d3e947cd4a341d617f8209b22bd76cfa1" + integrity sha512-x5RL9Y2p5+Sh3D38Fh9i/iQ5ZK+e4xuXRd/pGbM4D13tgo/MGwbttUk8emytcr1YYzBYs+apnUngBDFYfpjPuQ== dependencies: bytes "3.0.0" content-disposition "0.5.2" - fast-url-parser "1.1.3" mime-types "2.1.18" minimatch "3.1.2" path-is-inside "1.0.2" - path-to-regexp "2.2.1" + path-to-regexp "3.3.0" range-parser "1.2.0" serve-index@^1.9.1: @@ -8617,20 +9623,37 @@ serve-index@^1.9.1: mime-types "~2.1.17" parseurl "~1.3.2" -serve-static@1.15.0: - version "1.15.0" - resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" - integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== +serve-static@1.16.2: + version "1.16.2" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.16.2.tgz#b6a5343da47f6bdd2673848bf45754941e803296" + integrity sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw== dependencies: - encodeurl "~1.0.2" + encodeurl "~2.0.0" escape-html "~1.0.3" parseurl "~1.3.3" - send "0.18.0" + send "0.19.0" -setimmediate@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" - integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA== +set-function-length@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + +set-function-name@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985" + integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + functions-have-names "^1.2.3" + has-property-descriptors "^1.0.2" setprototypeof@1.1.0: version "1.1.0" @@ -8666,10 +9689,10 @@ shebang-regex@^3.0.0: resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== -shell-quote@^1.7.3: - version "1.7.4" - resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.7.4.tgz#33fe15dee71ab2a81fcbd3a52106c5cfb9fb75d8" - integrity sha512-8o/QEhSSRb1a5i7TFR0iM4G16Z0vYB2OQVs4G3aAFXjn3T6yEx8AZxy1PgDF7I00LZHYA3WxaSYIf5e5sAX8Rw== +shell-quote@^1.7.3, shell-quote@^1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.8.1.tgz#6dbf4db75515ad5bac63b4f1894c3a154c766680" + integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA== shelljs@^0.8.5: version "0.8.5" @@ -8680,28 +9703,29 @@ shelljs@^0.8.5: interpret "^1.0.0" rechoir "^0.6.2" -side-channel@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" - integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw== +side-channel@^1.0.4, side-channel@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" + integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== dependencies: - call-bind "^1.0.0" - get-intrinsic "^1.0.2" - object-inspect "^1.9.0" + call-bind "^1.0.7" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + object-inspect "^1.13.1" signal-exit@^3.0.0, signal-exit@^3.0.2, signal-exit@^3.0.3: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== -sirv@^1.0.7: - version "1.0.19" - resolved "https://registry.yarnpkg.com/sirv/-/sirv-1.0.19.tgz#1d73979b38c7fe91fcba49c85280daa9c2363b49" - integrity sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ== +sirv@^2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/sirv/-/sirv-2.0.4.tgz#5dd9a725c578e34e449f332703eb2a74e46a29b0" + integrity sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ== dependencies: - "@polka/url" "^1.0.0-next.20" - mrmime "^1.0.0" - totalist "^1.0.0" + "@polka/url" "^1.0.0-next.24" + mrmime "^2.0.0" + totalist "^3.0.0" sisteransi@^1.0.5: version "1.0.5" @@ -8709,15 +9733,22 @@ sisteransi@^1.0.5: integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== sitemap@^7.1.1: - version "7.1.1" - resolved "https://registry.yarnpkg.com/sitemap/-/sitemap-7.1.1.tgz#eeed9ad6d95499161a3eadc60f8c6dce4bea2bef" - integrity sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg== + version "7.1.2" + resolved "https://registry.yarnpkg.com/sitemap/-/sitemap-7.1.2.tgz#6ce1deb43f6f177c68bc59cf93632f54e3ae6b72" + integrity sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw== dependencies: "@types/node" "^17.0.5" "@types/sax" "^1.2.1" arg "^5.0.0" sax "^1.2.4" +skin-tone@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/skin-tone/-/skin-tone-2.0.0.tgz#4e3933ab45c0d4f4f781745d64b9f4c208e41237" + integrity sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA== + dependencies: + unicode-emoji-modifier-base "^1.0.0" + slash@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" @@ -8728,6 +9759,14 @@ slash@^4.0.0: resolved "https://registry.yarnpkg.com/slash/-/slash-4.0.0.tgz#2422372176c4c6c5addb5e2ada885af984b396a7" integrity sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew== +snake-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/snake-case/-/snake-case-3.0.4.tgz#4f2bbd568e9935abdfd593f34c691dadb49c452c" + integrity sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg== + dependencies: + dot-case "^3.0.4" + tslib "^2.0.3" + sockjs@^0.3.24: version "0.3.24" resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.24.tgz#c9bc8995f33a111bea0395ec30aa3206bdb5ccce" @@ -8737,15 +9776,15 @@ sockjs@^0.3.24: uuid "^8.3.2" websocket-driver "^0.7.4" -sort-css-media-queries@2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz#7c85e06f79826baabb232f5560e9745d7a78c4ce" - integrity sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA== +sort-css-media-queries@2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz#aa33cf4a08e0225059448b6c40eddbf9f1c8334c" + integrity sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA== -source-map-js@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" - integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== +source-map-js@^1.0.1, source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== source-map-support@~0.5.20: version "0.5.21" @@ -8755,11 +9794,6 @@ source-map-support@~0.5.20: buffer-from "^1.0.0" source-map "^0.6.0" -source-map@^0.5.0: - version "0.5.7" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" - integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== - source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0: version "0.6.1" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" @@ -8813,16 +9847,16 @@ sprintf-js@~1.0.2: resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== +srcset@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/srcset/-/srcset-4.0.0.tgz#336816b665b14cd013ba545b6fe62357f86e65f4" + integrity sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw== + stable@^0.1.8: version "0.1.8" resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== -state-toggle@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/state-toggle/-/state-toggle-1.0.3.tgz#e123b16a88e143139b09c6852221bc9815917dfe" - integrity sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ== - statuses@2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" @@ -8834,11 +9868,11 @@ statuses@2.0.1: integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== std-env@^3.0.1: - version "3.3.1" - resolved "https://registry.yarnpkg.com/std-env/-/std-env-3.3.1.tgz#93a81835815e618c8aa75e7c8a4dc04f7c314e29" - integrity sha512-3H20QlwQsSm2OvAxWIYhs+j01MzzqwMwGiiO1NQaJYZgJZFPuAbf95/DiKRBSTYIJ2FeGUc+B/6mPGcWP9dO3Q== + version "3.7.0" + resolved "https://registry.yarnpkg.com/std-env/-/std-env-3.7.0.tgz#c9f7386ced6ecf13360b6c6c55b8aaa4ef7481d2" + integrity sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg== -"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: +"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -8847,7 +9881,7 @@ std-env@^3.0.1: is-fullwidth-code-point "^3.0.0" strip-ansi "^6.0.1" -string-width@^5.0.1: +string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== @@ -8856,23 +9890,33 @@ string-width@^5.0.1: emoji-regex "^9.2.2" strip-ansi "^7.0.1" -string.prototype.trimend@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz#c4a27fa026d979d79c04f17397f250a462944533" - integrity sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ== +string.prototype.trim@^1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz#b6fa326d72d2c78b6df02f7759c73f8f6274faa4" + integrity sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw== dependencies: - call-bind "^1.0.2" - define-properties "^1.1.4" - es-abstract "^1.20.4" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.0" + es-object-atoms "^1.0.0" -string.prototype.trimstart@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz#e90ab66aa8e4007d92ef591bbf3cd422c56bdcf4" - integrity sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA== +string.prototype.trimend@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz#3651b8513719e8a9f48de7f2f77640b26652b229" + integrity sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.1.4" - es-abstract "^1.20.4" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + +string.prototype.trimstart@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz#7ee834dda8c7c17eff3118472bb35bfedaa34dde" + integrity sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" string_decoder@^1.1.1: version "1.3.0" @@ -8889,9 +9933,9 @@ string_decoder@~1.1.1: safe-buffer "~5.1.0" stringify-entities@^4.0.0: - version "4.0.3" - resolved "https://registry.yarnpkg.com/stringify-entities/-/stringify-entities-4.0.3.tgz#cfabd7039d22ad30f3cc435b0ca2c1574fc88ef8" - integrity sha512-BP9nNHMhhfcMbiuQKCqMjhDP5yBCAxsPu4pHFFzJ6Alo9dZgY4VLDPutXqIjpRiMoKdp7Av85Gr73Q5uH9k7+g== + version "4.0.4" + resolved "https://registry.yarnpkg.com/stringify-entities/-/stringify-entities-4.0.4.tgz#b3b79ef5f277cc4ac73caeb0236c5ba939b3a4f3" + integrity sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg== dependencies: character-entities-html4 "^2.0.0" character-entities-legacy "^3.0.0" @@ -8905,7 +9949,7 @@ stringify-object@^3.3.0: is-obj "^1.0.1" is-regexp "^1.0.0" -strip-ansi@^6.0.0, strip-ansi@^6.0.1: +strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -8913,9 +9957,9 @@ strip-ansi@^6.0.0, strip-ansi@^6.0.1: ansi-regex "^5.0.1" strip-ansi@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.0.1.tgz#61740a08ce36b61e50e65653f07060d000975fb2" - integrity sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw== + version "7.1.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" + integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ== dependencies: ansi-regex "^6.0.1" @@ -8939,25 +9983,32 @@ strip-json-comments@~2.0.1: resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== -style-to-object@0.3.0, style-to-object@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-0.3.0.tgz#b1b790d205991cc783801967214979ee19a76e46" - integrity sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA== +style-to-object@^0.4.0, style-to-object@^0.4.1: + version "0.4.4" + resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-0.4.4.tgz#266e3dfd56391a7eefb7770423612d043c3f33ec" + integrity sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg== dependencies: inline-style-parser "0.1.1" -stylehacks@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-5.1.1.tgz#7934a34eb59d7152149fa69d6e9e56f2fc34bcc9" - integrity sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw== +style-to-object@^1.0.0: + version "1.0.8" + resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-1.0.8.tgz#67a29bca47eaa587db18118d68f9d95955e81292" + integrity sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g== dependencies: - browserslist "^4.21.4" - postcss-selector-parser "^6.0.4" + inline-style-parser "0.2.4" -stylis@^4.1.2: - version "4.1.3" - resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.1.3.tgz#fd2fbe79f5fed17c55269e16ed8da14c84d069f7" - integrity sha512-GP6WDNWf+o403jrEp9c5jibKavrtLW+/qYGhFxFrG8maXhwTBI7gLLhiBb0o7uFccWN+EOS9aMO6cGHWAO07OA== +stylehacks@^6.1.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-6.1.1.tgz#543f91c10d17d00a440430362d419f79c25545a6" + integrity sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg== + dependencies: + browserslist "^4.23.0" + postcss-selector-parser "^6.0.16" + +stylis@^4.1.3: + version "4.3.4" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.3.4.tgz#ca5c6c4a35c4784e4e93a2a24dc4e9fa075250a4" + integrity sha512-osIBl6BGUmSfDkyH2mB7EFvCJntXDrLhKjHTRj/rK6xLH0yuPrHULDRQzKokSOD4VoorhtKpfcfW1GAntu8now== supports-color@^5.3.0: version "5.5.0" @@ -9009,47 +10060,47 @@ svgo@^1.2.2: unquote "~1.1.1" util.promisify "~1.0.0" -svgo@^2.7.0, svgo@^2.8.0: - version "2.8.0" - resolved "https://registry.yarnpkg.com/svgo/-/svgo-2.8.0.tgz#4ff80cce6710dc2795f0c7c74101e6764cfccd24" - integrity sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg== +svgo@^3.0.2, svgo@^3.2.0: + version "3.3.2" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-3.3.2.tgz#ad58002652dffbb5986fc9716afe52d869ecbda8" + integrity sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw== dependencies: "@trysound/sax" "0.2.0" commander "^7.2.0" - css-select "^4.1.3" - css-tree "^1.1.3" - csso "^4.2.0" + css-select "^5.1.0" + css-tree "^2.3.1" + css-what "^6.1.0" + csso "^5.0.5" picocolors "^1.0.0" - stable "^0.1.8" tapable@^1.0.0: version "1.1.3" resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2" integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA== -tapable@^2.0.0, tapable@^2.1.1, tapable@^2.2.0: +tapable@^2.0.0, tapable@^2.1.1, tapable@^2.2.0, tapable@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.2.1.tgz#1967a73ef4060a82f12ab96af86d52fdb76eeca0" integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ== -terser-webpack-plugin@^5.1.3, terser-webpack-plugin@^5.3.3: - version "5.3.6" - resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.6.tgz#5590aec31aa3c6f771ce1b1acca60639eab3195c" - integrity sha512-kfLFk+PoLUQIbLmB1+PZDMRSZS99Mp+/MHqDNmMA6tOItzRt+Npe3E+fsMs5mfcM0wCtrrdU387UnV+vnSffXQ== +terser-webpack-plugin@^5.3.10, terser-webpack-plugin@^5.3.9: + version "5.3.10" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz#904f4c9193c6fd2a03f693a2150c62a92f40d199" + integrity sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w== dependencies: - "@jridgewell/trace-mapping" "^0.3.14" + "@jridgewell/trace-mapping" "^0.3.20" jest-worker "^27.4.5" schema-utils "^3.1.1" - serialize-javascript "^6.0.0" - terser "^5.14.1" + serialize-javascript "^6.0.1" + terser "^5.26.0" -terser@^5.10.0, terser@^5.14.1: - version "5.16.1" - resolved "https://registry.yarnpkg.com/terser/-/terser-5.16.1.tgz#5af3bc3d0f24241c7fb2024199d5c461a1075880" - integrity sha512-xvQfyfA1ayT0qdK47zskQgRZeWLoOQ8JQ6mIgRGVNwZKdQMU+5FkCBjmv4QjcrTzyZquRw2FVtlJSRUmMKQslw== +terser@^5.10.0, terser@^5.15.1, terser@^5.26.0: + version "5.36.0" + resolved "https://registry.yarnpkg.com/terser/-/terser-5.36.0.tgz#8b0dbed459ac40ff7b4c9fd5a3a2029de105180e" + integrity sha512-IYV9eNMuFAV4THUspIRXkLakHnV6XO7FEdtKjf/mDyrnqUg9LnlOn6/RwRvM9SZjR4GUq8Nk8zj67FzVARr74w== dependencies: - "@jridgewell/source-map" "^0.3.2" - acorn "^8.5.0" + "@jridgewell/source-map" "^0.3.3" + acorn "^8.8.2" commander "^2.20.0" source-map-support "~0.5.20" @@ -9064,9 +10115,9 @@ thunky@^1.0.2: integrity sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA== tiny-invariant@^1.0.2: - version "1.3.1" - resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.3.1.tgz#8560808c916ef02ecfd55e66090df23a4b7aa642" - integrity sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw== + version "1.3.3" + resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.3.3.tgz#46680b7a873a0d5d10005995eb90a70d74d60127" + integrity sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg== tiny-warning@^1.0.0: version "1.0.3" @@ -9078,11 +10129,6 @@ to-fast-properties@^2.0.0: resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog== -to-readable-stream@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" - integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== - to-regex-range@^5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" @@ -9103,10 +10149,10 @@ toidentifier@1.0.1: resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== -totalist@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/totalist/-/totalist-1.1.0.tgz#a4d65a3e546517701e3e5c37a47a70ac97fe56df" - integrity sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g== +totalist@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/totalist/-/totalist-3.0.1.tgz#ba3a3d600c915b1a97872348f79c127475f6acf8" + integrity sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ== tr46@~0.0.3: version "0.0.3" @@ -9118,37 +10164,32 @@ trim-lines@^3.0.0: resolved "https://registry.yarnpkg.com/trim-lines/-/trim-lines-3.0.1.tgz#d802e332a07df861c48802c04321017b1bd87338" integrity sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg== -trim-trailing-lines@^1.0.0: - version "1.1.4" - resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz#bd4abbec7cc880462f10b2c8b5ce1d8d1ec7c2c0" - integrity sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ== - -trim@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" - integrity sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ== - trough@^1.0.0: version "1.0.5" resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406" integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA== trough@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/trough/-/trough-2.1.0.tgz#0f7b511a4fde65a46f18477ab38849b22c554876" - integrity sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g== + version "2.2.0" + resolved "https://registry.yarnpkg.com/trough/-/trough-2.2.0.tgz#94a60bd6bd375c152c1df911a4b11d5b0256f50f" + integrity sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw== -tslib@^2.0.3, tslib@^2.1.0, tslib@^2.4.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.1.tgz#0d0bfbaac2880b91e22df0768e55be9753a5b17e" - integrity sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA== +ts-dedent@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/ts-dedent/-/ts-dedent-2.2.0.tgz#39e4bd297cd036292ae2394eb3412be63f563bb5" + integrity sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ== + +tslib@^2.0.3, tslib@^2.6.0: + version "2.8.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.0.tgz#d124c86c3c05a40a91e6fdea4021bd31d377971b" + integrity sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA== -type-fest@^0.20.2: - version "0.20.2" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4" - integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ== +type-fest@^1.0.1: + version "1.4.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-1.4.0.tgz#e9fb813fe3bf1744ec359d55d1affefa76f14be1" + integrity sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA== -type-fest@^2.5.0: +type-fest@^2.13.0, type-fest@^2.5.0: version "2.19.0" resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-2.19.0.tgz#88068015bb33036a598b952e55e9311a60fd3a9b" integrity sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA== @@ -9161,6 +10202,50 @@ type-is@~1.6.18: media-typer "0.3.0" mime-types "~2.1.24" +typed-array-buffer@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz#1867c5d83b20fcb5ccf32649e5e2fc7424474ff3" + integrity sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + is-typed-array "^1.1.13" + +typed-array-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz#d92972d3cff99a3fa2e765a28fcdc0f1d89dec67" + integrity sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw== + dependencies: + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + +typed-array-byte-offset@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz#f9ec1acb9259f395093e4567eb3c28a580d02063" + integrity sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + +typed-array-length@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.6.tgz#57155207c76e64a3457482dfdc1c9d1d3c4c73a3" + integrity sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g== + dependencies: + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + possible-typed-array-names "^1.0.0" + typedarray-to-buffer@^3.1.5: version "3.1.5" resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" @@ -9168,11 +10253,6 @@ typedarray-to-buffer@^3.1.5: dependencies: is-typedarray "^1.0.0" -ua-parser-js@^0.7.30: - version "0.7.32" - resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.32.tgz#cd8c639cdca949e30fa68c44b7813ef13e36d211" - integrity sha512-f9BESNVhzlhEFf2CHMSj40NWOjYPl1YKYbrvIr/hFTDEmLq7SRbWvm7FcdcpCYT95zrOhC7gZSxjdnnTpBcwVw== - unbox-primitive@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e" @@ -9183,6 +10263,11 @@ unbox-primitive@^1.0.2: has-symbols "^1.0.3" which-boxed-primitive "^1.0.2" +undici-types@~6.19.2: + version "6.19.8" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" + integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== + unescape@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/unescape/-/unescape-1.0.1.tgz#956e430f61cad8a4d57d82c518f5e6cc5d0dda96" @@ -9190,18 +10275,15 @@ unescape@^1.0.1: dependencies: extend-shallow "^2.0.1" -unherit@^1.0.4: - version "1.1.3" - resolved "https://registry.yarnpkg.com/unherit/-/unherit-1.1.3.tgz#6c9b503f2b41b262330c80e91c8614abdaa69c22" - integrity sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ== - dependencies: - inherits "^2.0.0" - xtend "^4.0.0" - unicode-canonical-property-names-ecmascript@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz#301acdc525631670d39f6146e0e77ff6bbdebddc" - integrity sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ== + version "2.0.1" + resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz#cb3173fe47ca743e228216e4a3ddc4c84d628cc2" + integrity sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg== + +unicode-emoji-modifier-base@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz#dbbd5b54ba30f287e2a8d5a249da6c0cef369459" + integrity sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g== unicode-match-property-ecmascript@^2.0.0: version "2.0.0" @@ -9212,27 +10294,15 @@ unicode-match-property-ecmascript@^2.0.0: unicode-property-aliases-ecmascript "^2.0.0" unicode-match-property-value-ecmascript@^2.0.0, unicode-match-property-value-ecmascript@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz#cb5fffdcd16a05124f5a4b0bf7c3770208acbbe0" - integrity sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA== + version "2.2.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz#a0401aee72714598f739b68b104e4fe3a0cb3c71" + integrity sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg== unicode-property-aliases-ecmascript@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz#43d41e3be698bd493ef911077c9b131f827e8ccd" integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w== -unified@9.2.0: - version "9.2.0" - resolved "https://registry.yarnpkg.com/unified/-/unified-9.2.0.tgz#67a62c627c40589edebbf60f53edfd4d822027f8" - integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg== - dependencies: - bail "^1.0.0" - extend "^3.0.0" - is-buffer "^2.0.0" - is-plain-obj "^2.0.0" - trough "^1.0.0" - vfile "^4.0.0" - unified@^10.0.0: version "10.1.2" resolved "https://registry.yarnpkg.com/unified/-/unified-10.1.2.tgz#b1d64e55dafe1f0b98bb6c719881103ecf6c86df" @@ -9246,7 +10316,20 @@ unified@^10.0.0: trough "^2.0.0" vfile "^5.0.0" -unified@^9.0.0, unified@^9.2.1, unified@^9.2.2: +unified@^11.0.0, unified@^11.0.3, unified@^11.0.4: + version "11.0.5" + resolved "https://registry.yarnpkg.com/unified/-/unified-11.0.5.tgz#f66677610a5c0a9ee90cab2b8d4d66037026d9e1" + integrity sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA== + dependencies: + "@types/unist" "^3.0.0" + bail "^2.0.0" + devlop "^1.0.0" + extend "^3.0.0" + is-plain-obj "^4.0.0" + trough "^2.0.0" + vfile "^6.0.0" + +unified@^9.0.0, unified@^9.2.1: version "9.2.2" resolved "https://registry.yarnpkg.com/unified/-/unified-9.2.2.tgz#67649a1abfc3ab85d2969502902775eb03146975" integrity sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ== @@ -9258,24 +10341,12 @@ unified@^9.0.0, unified@^9.2.1, unified@^9.2.2: trough "^1.0.0" vfile "^4.0.0" -unique-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-2.0.0.tgz#39c6451f81afb2749de2b233e3f7c5e8843bd89d" - integrity sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg== - dependencies: - crypto-random-string "^2.0.0" - -unist-builder@2.0.3, unist-builder@^2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/unist-builder/-/unist-builder-2.0.3.tgz#77648711b5d86af0942f334397a33c5e91516436" - integrity sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw== - -unist-builder@^3.0.0: +unique-string@^3.0.0: version "3.0.0" - resolved "https://registry.yarnpkg.com/unist-builder/-/unist-builder-3.0.0.tgz#728baca4767c0e784e1e64bb44b5a5a753021a04" - integrity sha512-GFxmfEAa0vi9i5sd0R2kcrI9ks0r82NasRq5QHh2ysGngrc6GiqD5CDf1FjPenY4vApmFASBIIlk/jj5J5YbmQ== + resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-3.0.0.tgz#84a1c377aff5fd7a8bc6b55d8244b2bd90d75b9a" + integrity sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ== dependencies: - "@types/unist" "^2.0.0" + crypto-random-string "^4.0.0" unist-util-find-after@^3.0.0: version "3.0.0" @@ -9284,15 +10355,10 @@ unist-util-find-after@^3.0.0: dependencies: unist-util-is "^4.0.0" -unist-util-generated@^1.0.0: - version "1.1.6" - resolved "https://registry.yarnpkg.com/unist-util-generated/-/unist-util-generated-1.1.6.tgz#5ab51f689e2992a472beb1b35f2ce7ff2f324d4b" - integrity sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg== - unist-util-generated@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/unist-util-generated/-/unist-util-generated-2.0.0.tgz#86fafb77eb6ce9bfa6b663c3f5ad4f8e56a60113" - integrity sha512-TiWE6DVtVe7Ye2QxOVW9kqybs6cZexNwTwSMVgkfjEReqy/xwGpAXb99OxktoWwmL+Z+Epb0Dn8/GNDYP1wnUw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/unist-util-generated/-/unist-util-generated-2.0.1.tgz#e37c50af35d3ed185ac6ceacb6ca0afb28a85cae" + integrity sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A== unist-util-is@^4.0.0, unist-util-is@^4.0.2: version "4.1.0" @@ -9300,55 +10366,59 @@ unist-util-is@^4.0.0, unist-util-is@^4.0.2: integrity sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg== unist-util-is@^5.0.0: - version "5.1.1" - resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-5.1.1.tgz#e8aece0b102fa9bc097b0fef8f870c496d4a6236" - integrity sha512-F5CZ68eYzuSvJjGhCLPL3cYx45IxkqXSetCcRgUXtbcm50X2L9oOWQlfUfDdAf+6Pd27YDblBfdtmsThXmwpbQ== + version "5.2.1" + resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-5.2.1.tgz#b74960e145c18dcb6226bc57933597f5486deae9" + integrity sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw== + dependencies: + "@types/unist" "^2.0.0" + +unist-util-is@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-6.0.0.tgz#b775956486aff107a9ded971d996c173374be424" + integrity sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw== + dependencies: + "@types/unist" "^3.0.0" unist-util-position-from-estree@^1.0.0, unist-util-position-from-estree@^1.1.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/unist-util-position-from-estree/-/unist-util-position-from-estree-1.1.1.tgz#96f4d543dfb0428edc01ebb928570b602d280c4c" - integrity sha512-xtoY50b5+7IH8tFbkw64gisG9tMSpxDjhX9TmaJJae/XuxQ9R/Kc8Nv1eOsf43Gt4KV/LkriMy9mptDr7XLcaw== + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-position-from-estree/-/unist-util-position-from-estree-1.1.2.tgz#8ac2480027229de76512079e377afbcabcfcce22" + integrity sha512-poZa0eXpS+/XpoQwGwl79UUdea4ol2ZuCYguVaJS4qzIOMDzbqz8a3erUCOmubSZkaOuGamb3tX790iwOIROww== dependencies: "@types/unist" "^2.0.0" -unist-util-position@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/unist-util-position/-/unist-util-position-3.1.0.tgz#1c42ee6301f8d52f47d14f62bbdb796571fa2d47" - integrity sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA== +unist-util-position-from-estree@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz#d94da4df596529d1faa3de506202f0c9a23f2200" + integrity sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ== + dependencies: + "@types/unist" "^3.0.0" unist-util-position@^4.0.0: - version "4.0.3" - resolved "https://registry.yarnpkg.com/unist-util-position/-/unist-util-position-4.0.3.tgz#5290547b014f6222dff95c48d5c3c13a88fadd07" - integrity sha512-p/5EMGIa1qwbXjA+QgcBXaPWjSnZfQ2Sc3yBEEfgPwsEmJd8Qh+DSk3LGnmOM4S1bY2C0AjmMnB8RuEYxpPwXQ== + version "4.0.4" + resolved "https://registry.yarnpkg.com/unist-util-position/-/unist-util-position-4.0.4.tgz#93f6d8c7d6b373d9b825844645877c127455f037" + integrity sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg== dependencies: "@types/unist" "^2.0.0" -unist-util-remove-position@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz#5d19ca79fdba712301999b2b73553ca8f3b352cc" - integrity sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA== +unist-util-position@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/unist-util-position/-/unist-util-position-5.0.0.tgz#678f20ab5ca1207a97d7ea8a388373c9cf896be4" + integrity sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA== dependencies: - unist-util-visit "^2.0.0" + "@types/unist" "^3.0.0" unist-util-remove-position@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-4.0.1.tgz#d5b46a7304ac114c8d91990ece085ca7c2c135c8" - integrity sha512-0yDkppiIhDlPrfHELgB+NLQD5mfjup3a8UYclHruTJWmY74je8g+CIFr79x5f6AkmzSwlvKLbs63hC0meOMowQ== + version "4.0.2" + resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-4.0.2.tgz#a89be6ea72e23b1a402350832b02a91f6a9afe51" + integrity sha512-TkBb0HABNmxzAcfLf4qsIbFbaPDvMO6wa3b3j4VcEzFVaw1LBKwnW4/sRJ/atSLSzoIg41JWEdnE7N6DIhGDGQ== dependencies: "@types/unist" "^2.0.0" unist-util-visit "^4.0.0" -unist-util-remove@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/unist-util-remove/-/unist-util-remove-2.1.0.tgz#b0b4738aa7ee445c402fda9328d604a02d010588" - integrity sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q== - dependencies: - unist-util-is "^4.0.0" - unist-util-remove@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/unist-util-remove/-/unist-util-remove-3.1.0.tgz#8042577e151dac989b7517976bfe4bac58f76ccd" - integrity sha512-rO/sIghl13eN8irs5OBN2a4RC10MsJdiePCfwrvnzGtgIbHcDXr2REr0qi9F2r/CIb1r9FyyFmcMRIGs+EyUFw== + version "3.1.1" + resolved "https://registry.yarnpkg.com/unist-util-remove/-/unist-util-remove-3.1.1.tgz#8bfa181aff916bd32a4ed30b3ed76d0c21c077df" + integrity sha512-kfCqZK5YVY5yEa89tvpl7KnBBHu2c6CzMkqHUrlOqaRgGOMp0sMvwWOVrbAtj03KhovQB7i96Gda72v/EFE0vw== dependencies: "@types/unist" "^2.0.0" unist-util-is "^5.0.0" @@ -9362,12 +10432,19 @@ unist-util-stringify-position@^2.0.0: "@types/unist" "^2.0.2" unist-util-stringify-position@^3.0.0: - version "3.0.2" - resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-3.0.2.tgz#5c6aa07c90b1deffd9153be170dce628a869a447" - integrity sha512-7A6eiDCs9UtjcwZOcCpM4aPII3bAAGv13E96IkawkOAW0OhH+yRxtY0lzo8KiHpzEMfH7Q+FizUmwp8Iqy5EWg== + version "3.0.3" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz#03ad3348210c2d930772d64b489580c13a7db39d" + integrity sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg== dependencies: "@types/unist" "^2.0.0" +unist-util-stringify-position@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz#449c6e21a880e0855bf5aabadeb3a740314abac2" + integrity sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ== + dependencies: + "@types/unist" "^3.0.0" + unist-util-visit-parents@^3.0.0: version "3.1.1" resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz#65a6ce698f78a6b0f56aa0e88f13801886cdaef6" @@ -9377,14 +10454,22 @@ unist-util-visit-parents@^3.0.0: unist-util-is "^4.0.0" unist-util-visit-parents@^5.0.0, unist-util-visit-parents@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-5.1.1.tgz#868f353e6fce6bf8fa875b251b0f4fec3be709bb" - integrity sha512-gks4baapT/kNRaWxuGkl5BIhoanZo7sC/cUT/JToSRNL1dYoXRFl75d++NkjYk4TAu2uv2Px+l8guMajogeuiw== + version "5.1.3" + resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz#b4520811b0ca34285633785045df7a8d6776cfeb" + integrity sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg== dependencies: "@types/unist" "^2.0.0" unist-util-is "^5.0.0" -unist-util-visit@2.0.3, unist-util-visit@^2.0.0, unist-util-visit@^2.0.1, unist-util-visit@^2.0.3: +unist-util-visit-parents@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz#4d5f85755c3b8f0dc69e21eca5d6d82d22162815" + integrity sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw== + dependencies: + "@types/unist" "^3.0.0" + unist-util-is "^6.0.0" + +unist-util-visit@^2.0.0, unist-util-visit@^2.0.1, unist-util-visit@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-2.0.3.tgz#c3703893146df47203bb8a9795af47d7b971208c" integrity sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q== @@ -9394,18 +10479,27 @@ unist-util-visit@2.0.3, unist-util-visit@^2.0.0, unist-util-visit@^2.0.1, unist- unist-util-visit-parents "^3.0.0" unist-util-visit@^4.0.0: - version "4.1.1" - resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-4.1.1.tgz#1c4842d70bd3df6cc545276f5164f933390a9aad" - integrity sha512-n9KN3WV9k4h1DxYR1LoajgN93wpEi/7ZplVe02IoB4gH5ctI1AaF2670BLHQYbwj+pY83gFtyeySFiyMHJklrg== + version "4.1.2" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-4.1.2.tgz#125a42d1eb876283715a3cb5cceaa531828c72e2" + integrity sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg== dependencies: "@types/unist" "^2.0.0" unist-util-is "^5.0.0" unist-util-visit-parents "^5.1.1" +unist-util-visit@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-5.0.0.tgz#a7de1f31f72ffd3519ea71814cccf5fd6a9217d6" + integrity sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg== + dependencies: + "@types/unist" "^3.0.0" + unist-util-is "^6.0.0" + unist-util-visit-parents "^6.0.0" + universalify@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" - integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== + version "2.0.1" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.1.tgz#168efc2180964e6386d061e094df61afe239b18d" + integrity sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw== unpipe@1.0.0, unpipe@~1.0.0: version "1.0.0" @@ -9417,33 +10511,33 @@ unquote@~1.1.1: resolved "https://registry.yarnpkg.com/unquote/-/unquote-1.1.1.tgz#8fded7324ec6e88a0ff8b905e7c098cdc086d544" integrity sha512-vRCqFv6UhXpWxZPyGDh/F3ZpNv8/qo7w6iufLpQg9aKnQ71qM4B5KiI7Mia9COcjEhrO9LueHpMYjYzsWH3OIg== -update-browserslist-db@^1.0.9: - version "1.0.10" - resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz#0f54b876545726f17d00cd9a2561e6dade943ff3" - integrity sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ== +update-browserslist-db@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz#80846fba1d79e82547fb661f8d141e0945755fe5" + integrity sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A== dependencies: - escalade "^3.1.1" - picocolors "^1.0.0" + escalade "^3.2.0" + picocolors "^1.1.0" -update-notifier@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-5.1.0.tgz#4ab0d7c7f36a231dd7316cf7729313f0214d9ad9" - integrity sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw== - dependencies: - boxen "^5.0.0" - chalk "^4.1.0" - configstore "^5.0.1" - has-yarn "^2.1.0" - import-lazy "^2.1.0" - is-ci "^2.0.0" +update-notifier@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-6.0.2.tgz#a6990253dfe6d5a02bd04fbb6a61543f55026b60" + integrity sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og== + dependencies: + boxen "^7.0.0" + chalk "^5.0.1" + configstore "^6.0.0" + has-yarn "^3.0.0" + import-lazy "^4.0.0" + is-ci "^3.0.1" is-installed-globally "^0.4.0" - is-npm "^5.0.0" - is-yarn-global "^0.3.0" - latest-version "^5.1.0" - pupa "^2.1.1" - semver "^7.3.4" - semver-diff "^3.1.1" - xdg-basedir "^4.0.0" + is-npm "^6.0.0" + is-yarn-global "^0.4.0" + latest-version "^7.0.0" + pupa "^3.1.0" + semver "^7.3.7" + semver-diff "^4.0.0" + xdg-basedir "^5.1.0" uri-js@^4.2.2: version "4.4.1" @@ -9461,35 +10555,6 @@ url-loader@^4.1.1: mime-types "^2.1.27" schema-utils "^3.0.0" -url-parse-lax@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" - integrity sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ== - dependencies: - prepend-http "^2.0.0" - -use-composed-ref@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/use-composed-ref/-/use-composed-ref-1.3.0.tgz#3d8104db34b7b264030a9d916c5e94fbe280dbda" - integrity sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ== - -use-isomorphic-layout-effect@^1.1.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz#497cefb13d863d687b08477d9e5a164ad8c1a6fb" - integrity sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA== - -use-latest@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/use-latest/-/use-latest-1.2.1.tgz#d13dfb4b08c28e3e33991546a2cee53e14038cf2" - integrity sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw== - dependencies: - use-isomorphic-layout-effect "^1.1.1" - -use-sync-external-store@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz#7dbefd6ef3fe4e767a0cf5d7287aacfb5846928a" - integrity sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA== - util-deprecate@^1.0.1, util-deprecate@^1.0.2, util-deprecate@~1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" @@ -9505,7 +10570,7 @@ util.promisify@~1.0.0: has-symbols "^1.0.1" object.getownpropertydescriptors "^2.1.0" -util@^0.12.0: +util@^0.12.5: version "0.12.5" resolved "https://registry.yarnpkg.com/util/-/util-0.12.5.tgz#5f17a6059b73db61a875668781a1c2b136bd6fbc" integrity sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA== @@ -9522,9 +10587,9 @@ utila@~0.4: integrity sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA== utility-types@^3.10.0: - version "3.10.0" - resolved "https://registry.yarnpkg.com/utility-types/-/utility-types-3.10.0.tgz#ea4148f9a741015f05ed74fd615e1d20e6bed82b" - integrity sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg== + version "3.11.0" + resolved "https://registry.yarnpkg.com/utility-types/-/utility-types-3.11.0.tgz#607c40edb4f258915e901ea7995607fdf319424c" + integrity sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw== utils-merge@1.0.1: version "1.0.1" @@ -9537,9 +10602,9 @@ uuid@^8.3.2: integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== uuid@^9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5" - integrity sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg== + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== uvu@^0.5.0: version "0.5.6" @@ -9552,12 +10617,12 @@ uvu@^0.5.0: sade "^1.7.3" validate-peer-dependencies@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/validate-peer-dependencies/-/validate-peer-dependencies-2.1.0.tgz#1ad8218b1b168aeb500165f9de2a3f53269ece56" - integrity sha512-x+M+mp16g4N+jDQJO6a+AKnMHAViov9mRzYfgMYR6Bq+UTwewf8aTQsP+e1QH0oZrADqP7fuI/bEbl3CzRFhOQ== + version "2.2.0" + resolved "https://registry.yarnpkg.com/validate-peer-dependencies/-/validate-peer-dependencies-2.2.0.tgz#47b8ff008f66a66fc5d8699123844522c1d874f4" + integrity sha512-8X1OWlERjiUY6P6tdeU9E0EwO8RA3bahoOVG7ulOZT5MqgNDUO/BQoVjYiHPcNe+v8glsboZRIw9iToMAA2zAA== dependencies: - resolve-package-path "^4.0.0" - semver "^7.3.2" + resolve-package-path "^4.0.3" + semver "^7.3.8" value-equal@^1.0.1: version "1.0.1" @@ -9569,18 +10634,18 @@ vary@~1.1.2: resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== -vfile-location@^3.0.0, vfile-location@^3.2.0: +vfile-location@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-3.2.0.tgz#d8e41fbcbd406063669ebf6c33d56ae8721d0f3c" integrity sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA== -vfile-location@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-4.0.1.tgz#06f2b9244a3565bef91f099359486a08b10d3a95" - integrity sha512-JDxPlTbZrZCQXogGheBHjbRWjESSPEak770XwWPfw5mTc1v1nWGLB/apzZxsx8a0SJVfF8HK8ql8RD308vXRUw== +vfile-location@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-5.0.3.tgz#cb9eacd20f2b6426d19451e0eafa3d0a846225c3" + integrity sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg== dependencies: - "@types/unist" "^2.0.0" - vfile "^5.0.0" + "@types/unist" "^3.0.0" + vfile "^6.0.0" vfile-message@^2.0.0: version "2.0.4" @@ -9591,13 +10656,21 @@ vfile-message@^2.0.0: unist-util-stringify-position "^2.0.0" vfile-message@^3.0.0: - version "3.1.3" - resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-3.1.3.tgz#1360c27a99234bebf7bddbbbca67807115e6b0dd" - integrity sha512-0yaU+rj2gKAyEk12ffdSbBfjnnj+b1zqTBv3OQCTn8yEB02bsPizwdBPrLJjHnK+cU9EMMcUnNv938XcZIkmdA== + version "3.1.4" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-3.1.4.tgz#15a50816ae7d7c2d1fa87090a7f9f96612b59dea" + integrity sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw== dependencies: "@types/unist" "^2.0.0" unist-util-stringify-position "^3.0.0" +vfile-message@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-4.0.2.tgz#c883c9f677c72c166362fd635f21fc165a7d1181" + integrity sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw== + dependencies: + "@types/unist" "^3.0.0" + unist-util-stringify-position "^4.0.0" + vfile@^4.0.0: version "4.2.1" resolved "https://registry.yarnpkg.com/vfile/-/vfile-4.2.1.tgz#03f1dce28fc625c625bc6514350fbdb00fa9e624" @@ -9609,31 +10682,28 @@ vfile@^4.0.0: vfile-message "^2.0.0" vfile@^5.0.0: - version "5.3.6" - resolved "https://registry.yarnpkg.com/vfile/-/vfile-5.3.6.tgz#61b2e70690cc835a5d0d0fd135beae74e5a39546" - integrity sha512-ADBsmerdGBs2WYckrLBEmuETSPyTD4TuLxTrw0DvjirxW1ra4ZwkbzG8ndsv3Q57smvHxo677MHaQrY9yxH8cA== + version "5.3.7" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-5.3.7.tgz#de0677e6683e3380fafc46544cfe603118826ab7" + integrity sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g== dependencies: "@types/unist" "^2.0.0" is-buffer "^2.0.0" unist-util-stringify-position "^3.0.0" vfile-message "^3.0.0" +vfile@^6.0.0, vfile@^6.0.1: + version "6.0.3" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-6.0.3.tgz#3652ab1c496531852bf55a6bac57af981ebc38ab" + integrity sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q== + dependencies: + "@types/unist" "^3.0.0" + vfile-message "^4.0.0" + vlq@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/vlq/-/vlq-1.0.1.tgz#c003f6e7c0b4c1edd623fd6ee50bbc0d6a1de468" integrity sha512-gQpnTgkubC6hQgdIcRdYGDSDc+SaujOdyesZQMv6JlfQee/9Mp0Qhnys6WxDWvQnL5WZdT7o2Ul187aSt0Rq+w== -wait-on@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/wait-on/-/wait-on-6.0.1.tgz#16bbc4d1e4ebdd41c5b4e63a2e16dbd1f4e5601e" - integrity sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw== - dependencies: - axios "^0.25.0" - joi "^17.6.0" - lodash "^4.17.21" - minimist "^1.2.5" - rxjs "^7.5.4" - warning@^4.0.3: version "4.0.3" resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3" @@ -9641,10 +10711,10 @@ warning@^4.0.3: dependencies: loose-envify "^1.0.0" -watchpack@^2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.0.tgz#fa33032374962c78113f93c7f2fb4c54c9862a5d" - integrity sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg== +watchpack@^2.4.1: + version "2.4.2" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.2.tgz#2feeaed67412e7c33184e5a79ca738fbd38564da" + integrity sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw== dependencies: glob-to-regexp "^0.4.1" graceful-fs "^4.1.2" @@ -9661,30 +10731,43 @@ web-namespaces@^1.0.0: resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-1.1.4.tgz#bc98a3de60dadd7faefc403d1076d529f5e030ec" integrity sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw== +web-namespaces@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-2.0.1.tgz#1010ff7c650eccb2592cebeeaf9a1b253fd40692" + integrity sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ== + +web-worker@^1.2.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/web-worker/-/web-worker-1.3.0.tgz#e5f2df5c7fe356755a5fb8f8410d4312627e6776" + integrity sha512-BSR9wyRsy/KOValMgd5kMyr3JzpdeoR9KVId8u5GVlTTAtNChlsE4yTxeY7zMdNSyOmoKBv8NH2qeRY9Tg+IaA== + webidl-conversions@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== -webpack-bundle-analyzer@^4.5.0: - version "4.7.0" - resolved "https://registry.yarnpkg.com/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.7.0.tgz#33c1c485a7fcae8627c547b5c3328b46de733c66" - integrity sha512-j9b8ynpJS4K+zfO5GGwsAcQX4ZHpWV+yRiHDiL+bE0XHJ8NiPYLTNVQdlFYWxtpg9lfAQNlwJg16J9AJtFSXRg== +webpack-bundle-analyzer@^4.9.0: + version "4.10.2" + resolved "https://registry.yarnpkg.com/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz#633af2862c213730be3dbdf40456db171b60d5bd" + integrity sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw== dependencies: + "@discoveryjs/json-ext" "0.5.7" acorn "^8.0.4" acorn-walk "^8.0.0" - chalk "^4.1.0" commander "^7.2.0" + debounce "^1.2.1" + escape-string-regexp "^4.0.0" gzip-size "^6.0.0" - lodash "^4.17.20" + html-escaper "^2.0.2" opener "^1.5.2" - sirv "^1.0.7" + picocolors "^1.0.0" + sirv "^2.0.3" ws "^7.3.1" -webpack-dev-middleware@^5.3.1: - version "5.3.3" - resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz#efae67c2793908e7311f1d9b06f2a08dcc97e51f" - integrity sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA== +webpack-dev-middleware@^5.3.4: + version "5.3.4" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz#eb7b39281cbce10e104eb2b8bf2b63fce49a3517" + integrity sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q== dependencies: colorette "^2.0.10" memfs "^3.4.3" @@ -9692,10 +10775,10 @@ webpack-dev-middleware@^5.3.1: range-parser "^1.2.1" schema-utils "^4.0.0" -webpack-dev-server@^4.9.3: - version "4.11.1" - resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-4.11.1.tgz#ae07f0d71ca0438cf88446f09029b92ce81380b5" - integrity sha512-lILVz9tAUy1zGFwieuaQtYiadImb5M3d+H+L1zDYalYoDl0cksAB1UNyuE5MMWJrG6zR1tXkCP2fitl7yoUJiw== +webpack-dev-server@^4.15.1: + version "4.15.2" + resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz#9e0c70a42a012560860adb186986da1248333173" + integrity sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g== dependencies: "@types/bonjour" "^3.5.9" "@types/connect-history-api-fallback" "^1.3.5" @@ -9703,7 +10786,7 @@ webpack-dev-server@^4.9.3: "@types/serve-index" "^1.9.1" "@types/serve-static" "^1.13.10" "@types/sockjs" "^0.3.33" - "@types/ws" "^8.5.1" + "@types/ws" "^8.5.5" ansi-html-community "^0.0.8" bonjour-service "^1.0.11" chokidar "^3.5.3" @@ -9716,6 +10799,7 @@ webpack-dev-server@^4.9.3: html-entities "^2.3.2" http-proxy-middleware "^2.0.3" ipaddr.js "^2.0.1" + launch-editor "^2.6.0" open "^8.0.9" p-retry "^4.5.0" rimraf "^3.0.2" @@ -9724,50 +10808,50 @@ webpack-dev-server@^4.9.3: serve-index "^1.9.1" sockjs "^0.3.24" spdy "^4.0.2" - webpack-dev-middleware "^5.3.1" - ws "^8.4.2" + webpack-dev-middleware "^5.3.4" + ws "^8.13.0" -webpack-merge@^5.8.0: - version "5.8.0" - resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-5.8.0.tgz#2b39dbf22af87776ad744c390223731d30a68f61" - integrity sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q== +webpack-merge@^5.9.0: + version "5.10.0" + resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-5.10.0.tgz#a3ad5d773241e9c682803abf628d4cd62b8a4177" + integrity sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA== dependencies: clone-deep "^4.0.1" + flat "^5.0.2" wildcard "^2.0.0" -webpack-sources@^3.2.2, webpack-sources@^3.2.3: +webpack-sources@^3.2.3: version "3.2.3" resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde" integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== -webpack@^5.73.0: - version "5.76.1" - resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.76.1.tgz#7773de017e988bccb0f13c7d75ec245f377d295c" - integrity sha512-4+YIK4Abzv8172/SGqObnUjaIHjLEuUasz9EwQj/9xmPPkYJy2Mh03Q/lJfSD3YLzbxy5FeTq5Uw0323Oh6SJQ== +webpack@^5.88.1: + version "5.95.0" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.95.0.tgz#8fd8c454fa60dad186fbe36c400a55848307b4c0" + integrity sha512-2t3XstrKULz41MNMBF+cJ97TyHdyQ8HCt//pqErqDvNjU9YQBnZxIHa11VXsi7F3mb5/aO2tuDxdeTPdU7xu9Q== dependencies: - "@types/eslint-scope" "^3.7.3" - "@types/estree" "^0.0.51" - "@webassemblyjs/ast" "1.11.1" - "@webassemblyjs/wasm-edit" "1.11.1" - "@webassemblyjs/wasm-parser" "1.11.1" + "@types/estree" "^1.0.5" + "@webassemblyjs/ast" "^1.12.1" + "@webassemblyjs/wasm-edit" "^1.12.1" + "@webassemblyjs/wasm-parser" "^1.12.1" acorn "^8.7.1" - acorn-import-assertions "^1.7.6" - browserslist "^4.14.5" + acorn-import-attributes "^1.9.5" + browserslist "^4.21.10" chrome-trace-event "^1.0.2" - enhanced-resolve "^5.10.0" - es-module-lexer "^0.9.0" + enhanced-resolve "^5.17.1" + es-module-lexer "^1.2.1" eslint-scope "5.1.1" events "^3.2.0" glob-to-regexp "^0.4.1" - graceful-fs "^4.2.9" + graceful-fs "^4.2.11" json-parse-even-better-errors "^2.3.1" loader-runner "^4.2.0" mime-types "^2.1.27" neo-async "^2.6.2" - schema-utils "^3.1.0" + schema-utils "^3.2.0" tapable "^2.1.1" - terser-webpack-plugin "^5.1.3" - watchpack "^2.4.0" + terser-webpack-plugin "^5.3.10" + watchpack "^2.4.1" webpack-sources "^3.2.3" webpackbar@^5.0.2: @@ -9813,17 +10897,16 @@ which-boxed-primitive@^1.0.2: is-string "^1.0.5" is-symbol "^1.0.3" -which-typed-array@^1.1.2: - version "1.1.9" - resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.9.tgz#307cf898025848cf995e795e8423c7f337efbde6" - integrity sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA== +which-typed-array@^1.1.14, which-typed-array@^1.1.15, which-typed-array@^1.1.2: + version "1.1.15" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d" + integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA== dependencies: - available-typed-arrays "^1.0.5" - call-bind "^1.0.2" + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" for-each "^0.3.3" gopd "^1.0.1" - has-tostringtag "^1.0.0" - is-typed-array "^1.1.10" + has-tostringtag "^1.0.2" which@^1.3.1: version "1.3.1" @@ -9846,13 +10929,6 @@ wide-align@^1.1.2: dependencies: string-width "^1.0.2 || 2 || 3 || 4" -widest-line@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca" - integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg== - dependencies: - string-width "^4.0.0" - widest-line@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-4.0.1.tgz#a0fc673aaba1ea6f0a0d35b3c2795c9a9cc2ebf2" @@ -9861,23 +10937,14 @@ widest-line@^4.0.1: string-width "^5.0.1" wildcard@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/wildcard/-/wildcard-2.0.0.tgz#a77d20e5200c6faaac979e4b3aadc7b3dd7f8fec" - integrity sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw== - -wrap-ansi@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" + version "2.0.1" + resolved "https://registry.yarnpkg.com/wildcard/-/wildcard-2.0.1.tgz#5ab10d02487198954836b6349f74fff961e10f67" + integrity sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ== -wrap-ansi@^8.0.1: - version "8.0.1" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.0.1.tgz#2101e861777fec527d0ea90c57c6b03aac56a5b3" - integrity sha512-QFF+ufAqhoYHvoHdajT/Po7KoXVBPXS2bgjIam5isfWJPfIOnQZ50JtUiVvCv/sjgacf3yRrt2ZKUZ/V4itN4g== +wrap-ansi@^8.0.1, wrap-ansi@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" + integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== dependencies: ansi-styles "^6.1.0" string-width "^5.0.1" @@ -9888,7 +10955,7 @@ wrappy@1: resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== -write-file-atomic@^3.0.0: +write-file-atomic@^3.0.3: version "3.0.3" resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz#56bd5c5a5c70481cd19c571bd39ab965a5de56e8" integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q== @@ -9899,19 +10966,19 @@ write-file-atomic@^3.0.0: typedarray-to-buffer "^3.1.5" ws@^7.3.1: - version "7.5.9" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" - integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== + version "7.5.10" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.10.tgz#58b5c20dc281633f6c19113f39b349bd8bd558d9" + integrity sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ== -ws@^8.4.2: - version "8.11.0" - resolved "https://registry.yarnpkg.com/ws/-/ws-8.11.0.tgz#6a0d36b8edfd9f96d8b25683db2f8d7de6e8e143" - integrity sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg== +ws@^8.13.0: + version "8.18.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" + integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== -xdg-basedir@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13" - integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q== +xdg-basedir@^5.0.1, xdg-basedir@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-5.1.0.tgz#1efba19425e73be1bc6f2a6ceb52a3d2c884c0c9" + integrity sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ== xml-js@^1.6.11: version "1.6.11" @@ -9920,7 +10987,7 @@ xml-js@^1.6.11: dependencies: sax "^1.2.4" -xtend@^4.0.0, xtend@^4.0.1: +xtend@^4.0.0: version "4.0.2" resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== @@ -9930,12 +10997,7 @@ yallist@^3.0.2: resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== -yallist@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" - integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== - -yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: +yaml@^1.10.0, yaml@^1.7.2: version "1.10.2" resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== @@ -9945,6 +11007,11 @@ yocto-queue@^0.1.0: resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== +yocto-queue@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.1.1.tgz#fef65ce3ac9f8a32ceac5a634f74e17e5b232110" + integrity sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g== + zwitch@^1.0.0: version "1.0.5" resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-1.0.5.tgz#d11d7381ffed16b742f6af7b3f223d5cd9fe9920"

    { #[cold] #[inline(never)] -fn get_attr_no_attr_error<'v>(x: Value<'v>, attribute: &Symbol) -> anyhow::Error { +fn get_attr_no_attr_error<'v>(x: Value<'v>, attribute: &Symbol) -> crate::Error { match did_you_mean(attribute.as_str(), x.dir_attr().iter().map(|s| s.as_str())) { None => ValueError::NoAttr(x.get_type().to_owned(), attribute.as_str().to_owned()).into(), Some(better) => ValueError::NoAttrDidYouMean( @@ -1101,16 +1120,23 @@ fn get_attr_no_attr_error<'v>(x: Value<'v>, attribute: &Symbol) -> anyhow::Error } } -pub(crate) enum MemberOrValue<'v> { - Member(FrozenValueNotSpecial), +pub(crate) enum MemberOrValue<'v, 'a> { + Member(&'a UnboundValue), Value(Value<'v>), } -impl<'v> MemberOrValue<'v> { - pub(crate) fn to_value(&self) -> Value<'v> { +impl<'v, 'a> MemberOrValue<'v, 'a> { + #[inline] + pub(crate) fn invoke( + &self, + this: Value<'v>, + span: FrozenRef<'static, FrameSpan>, + args: &Arguments<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { match self { - MemberOrValue::Member(x) => x.to_value(), - MemberOrValue::Value(x) => *x, + MemberOrValue::Member(member) => member.invoke_method(this, span, args, eval), + MemberOrValue::Value(value) => value.invoke_with_loc(Some(span), args, eval), } } } @@ -1120,7 +1146,7 @@ pub(crate) fn get_attr_hashed_raw<'v>( x: Value<'v>, attribute: &Symbol, heap: &'v Heap, -) -> anyhow::Result> { +) -> crate::Result> { let aref = x.get_ref(); if let Some(methods) = aref.vtable().methods() { if let Some(v) = methods.get_frozen_symbol(attribute) { @@ -1137,11 +1163,11 @@ pub(crate) fn get_attr_hashed_bind<'v>( x: Value<'v>, attribute: &Symbol, heap: &'v Heap, -) -> anyhow::Result> { +) -> crate::Result> { let aref = x.get_ref(); if let Some(methods) = aref.vtable().methods() { if let Some(v) = methods.get_frozen_symbol(attribute) { - return MaybeUnboundValue::new(v).bind(x, heap); + return v.bind(x, heap); } } match aref.get_attr_hashed(attribute.as_str_hashed(), heap) { @@ -1154,7 +1180,7 @@ pub(crate) fn get_attr_hashed_bind<'v>( } } -impl<'v, 'a, 'e> Compiler<'v, 'a, 'e> { +impl<'v, 'a, 'e> Compiler<'v, 'a, 'e, '_> { fn expr_ident(&mut self, ident: &CstIdent) -> ExprCompiled { let resolved_ident = ident .node @@ -1193,12 +1219,15 @@ impl<'v, 'a, 'e> Compiler<'v, 'a, 'e> { } } - fn opt_ctx<'s>(&'s mut self) -> OptCtx<'v, 'a, 's> { + fn opt_ctx<'s>(&'s mut self) -> OptCtx<'v, 'a, 'e, 's> { let param_count = self.current_scope().param_count(); OptCtx::new(self.eval, param_count) } - pub(crate) fn expr(&mut self, expr: &CstExpr) -> IrSpanned { + pub(crate) fn expr( + &mut self, + expr: &CstExpr, + ) -> Result, CompilerInternalError> { // println!("compile {}", expr.node); let span = FrameSpan::new(FrozenFileSpan::new(self.codemap, expr.span)); let expr = match &expr.node { @@ -1216,72 +1245,75 @@ impl<'v, 'a, 'e> Compiler<'v, 'a, 'e> { // TODO(nga): unnecessary clone. node: StmtP::Return(Some(*body.clone())), }; - self.function("lambda", signature_span, *scope_id, params, None, &suite) + self.function("lambda", signature_span, *scope_id, params, None, &suite)? } ExprP::Tuple(exprs) => { - let xs = exprs.map(|x| self.expr(x)); + let xs = self.exprs(exprs)?; ExprCompiled::tuple(xs, self.eval.module_env.frozen_heap()) } ExprP::List(exprs) => { - let xs = exprs.map(|x| self.expr(x)); + let xs = self.exprs(exprs)?; ExprCompiled::List(xs) } ExprP::Dict(exprs) => { - let xs = exprs.map(|(k, v)| (self.expr(k), self.expr(v))); + let xs = exprs + .iter() + .map(|(k, v)| Ok((self.expr(k)?, self.expr(v)?))) + .collect::>()?; ExprCompiled::Dict(xs) } ExprP::If(cond_then_expr_else_expr) => { let (cond, then_expr, else_expr) = &**cond_then_expr_else_expr; - let cond = self.expr(cond); - let then_expr = self.expr(then_expr); - let else_expr = self.expr(else_expr); - return ExprCompiled::if_expr(cond, then_expr, else_expr); + let cond = self.expr(cond)?; + let then_expr = self.expr(then_expr)?; + let else_expr = self.expr(else_expr)?; + return Ok(ExprCompiled::if_expr(cond, then_expr, else_expr)); } ExprP::Dot(left, right) => { - let left = self.expr(left); + let left = self.expr(left)?; let s = Symbol::new(&right.node); ExprCompiled::dot(left, &s, &mut self.opt_ctx()) } ExprP::Call(left, args) => { - let left = self.expr(left); - let args = self.args(args); + let left = self.expr(left)?; + let args = self.args(args)?; CallCompiled::call(span, left, args, &mut self.opt_ctx()) } ExprP::Index(array_index) => { let (array, index) = &**array_index; - let array = self.expr(array); - let index = self.expr(index); + let array = self.expr(array)?; + let index = self.expr(index)?; ExprCompiled::index(array, index, &mut self.opt_ctx()) } ExprP::Index2(array_index0_index1) => { let (array, index0, index1) = &**array_index0_index1; - let array = self.expr(array); - let index0 = self.expr(index0); - let index1 = self.expr(index1); + let array = self.expr(array)?; + let index0 = self.expr(index0)?; + let index1 = self.expr(index1)?; ExprCompiled::index2(array, index0, index1) } ExprP::Slice(collection, start, stop, stride) => { - let collection = self.expr(collection); - let start = start.as_ref().map(|x| self.expr(x)); - let stop = stop.as_ref().map(|x| self.expr(x)); - let stride = stride.as_ref().map(|x| self.expr(x)); + let collection = self.expr(collection)?; + let start = start.as_ref().map(|x| self.expr(x)).transpose()?; + let stop = stop.as_ref().map(|x| self.expr(x)).transpose()?; + let stride = stride.as_ref().map(|x| self.expr(x)).transpose()?; ExprCompiled::slice(span, collection, start, stop, stride, &mut self.opt_ctx()) } ExprP::Not(expr) => { - let expr = self.expr(expr); - return ExprCompiled::not(span, expr); + let expr = self.expr(expr)?; + return Ok(ExprCompiled::not(span, expr)); } ExprP::Minus(expr) => { - let expr = self.expr(expr); + let expr = self.expr(expr)?; ExprCompiled::un_op(span, &Builtin1::Minus, expr, &mut self.opt_ctx()) } ExprP::Plus(expr) => { - let expr = self.expr(expr); + let expr = self.expr(expr)?; ExprCompiled::un_op(span, &Builtin1::Plus, expr, &mut self.opt_ctx()) } ExprP::BitNot(expr) => { - let expr = self.expr(expr); + let expr = self.expr(expr)?; ExprCompiled::un_op(span, &Builtin1::BitNot, expr, &mut self.opt_ctx()) } ExprP::Op(left, op, right) => { @@ -1298,14 +1330,14 @@ impl<'v, 'a, 'e> Compiler<'v, 'a, 'e> { Cow::Borrowed(&**right) }; - let l = self.expr(left); - let r = self.expr(&right); + let l = self.expr(left)?; + let r = self.expr(&right)?; match op { - BinOp::Or => return ExprCompiled::or(l, r), - BinOp::And => return ExprCompiled::and(l, r), - BinOp::Equal => return ExprCompiled::equals(l, r), + BinOp::Or => return Ok(ExprCompiled::or(l, r)), + BinOp::And => return Ok(ExprCompiled::and(l, r)), + BinOp::Equal => return Ok(ExprCompiled::equals(l, r)), BinOp::NotEqual => { - return ExprCompiled::not(span, ExprCompiled::equals(l, r)); + return Ok(ExprCompiled::not(span, ExprCompiled::equals(l, r))); } BinOp::Less => ExprCompiled::bin_op( Builtin2::Compare(CompareOp::Less), @@ -1383,10 +1415,12 @@ impl<'v, 'a, 'e> Compiler<'v, 'a, 'e> { } } } - ExprP::ListComprehension(x, for_, clauses) => self.list_comprehension(x, for_, clauses), + ExprP::ListComprehension(x, for_, clauses) => { + self.list_comprehension(x, for_, clauses)? + } ExprP::DictComprehension(k_v, for_, clauses) => { let (k, v) = &**k_v; - self.dict_comprehension(k, v, for_, clauses) + self.dict_comprehension(k, v, for_, clauses)? } ExprP::Literal(x) => { let val = x.compile(self.eval.module_env.frozen_heap()); @@ -1418,19 +1452,32 @@ impl<'v, 'a, 'e> Compiler<'v, 'a, 'e> { let mut args = ArgsCompiledValue::default(); for expr in expressions { - args.push_pos(self.expr(expr)); + args.push_pos(self.expr(expr)?); } CallCompiled::call(span, method, args, &mut self.opt_ctx()) } }; - IrSpanned { node: expr, span } + Ok(IrSpanned { node: expr, span }) } /// Like `expr` but returns an expression optimized assuming /// only the truth of the result is needed. - pub(crate) fn expr_truth(&mut self, expr: &CstExpr) -> IrSpanned { - let expr = self.expr(expr); - ExprCompiledBool::new(expr) + pub(crate) fn expr_truth( + &mut self, + expr: &CstExpr, + ) -> Result, CompilerInternalError> { + let expr = self.expr(expr)?; + Ok(ExprCompiledBool::new(expr)) + } + + pub(crate) fn exprs( + &mut self, + exprs: &[CstExpr], + ) -> Result>, CompilerInternalError> { + exprs + .iter() + .map(|e| self.expr(e)) + .collect::>() } } diff --git a/starlark-rust/starlark/src/eval/compiler/mod.rs b/starlark-rust/starlark/src/eval/compiler/mod.rs deleted file mode 100644 index cf971bb145369..0000000000000 --- a/starlark-rust/starlark/src/eval/compiler/mod.rs +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -pub(crate) mod args; -pub(crate) mod call; -pub(crate) mod compr; -pub(crate) mod constants; -pub(crate) mod def; -pub(crate) mod def_inline; -pub(crate) mod expr; -pub(crate) mod expr_bool; -pub(crate) mod known; -pub(crate) mod module; -pub(crate) mod opt_ctx; -pub(crate) mod scope; -pub(crate) mod small_vec_1; -pub(crate) mod span; -pub(crate) mod stmt; -pub(crate) mod types; - -use starlark_syntax::diagnostic::Diagnostic; -use starlark_syntax::eval_exception::EvalException; - -use crate::codemap::CodeMap; -use crate::environment::Globals; -use crate::eval::compiler::scope::ModuleScopeData; -use crate::eval::compiler::scope::ScopeId; -use crate::eval::compiler::scope::ScopeNames; -use crate::eval::runtime::frame_span::FrameSpan; -use crate::eval::Evaluator; -use crate::values::FrozenRef; - -#[cold] -#[inline(never)] -fn add_span_to_error(e: anyhow::Error, span: FrameSpan, eval: &Evaluator) -> anyhow::Error { - Diagnostic::modify(e, |d: &mut Diagnostic| { - d.set_span(span.span.span(), &span.span.file()); - d.set_call_stack(|| eval.call_stack.to_diagnostic_frames(span.inlined_frames)); - }) -} - -#[cold] -#[inline(never)] -pub(crate) fn add_span_to_expr_error( - e: anyhow::Error, - span: FrameSpan, - eval: &Evaluator, -) -> EvalException { - EvalException::unchecked_new(add_span_to_error(e, span, eval)) -} - -/// Convert syntax error to spanned evaluation exception -#[inline(always)] -pub(crate) fn expr_throw<'v, T>( - r: anyhow::Result, - span: FrameSpan, - eval: &Evaluator<'v, '_>, -) -> Result { - match r { - Ok(v) => Ok(v), - Err(e) => Err(add_span_to_expr_error(e, span, eval)), - } -} - -pub(crate) struct Compiler<'v, 'a, 'e> { - pub(crate) eval: &'e mut Evaluator<'v, 'a>, - pub(crate) scope_data: ModuleScopeData<'v>, - pub(crate) locals: Vec, - pub(crate) globals: FrozenRef<'static, Globals>, - pub(crate) codemap: FrozenRef<'static, CodeMap>, - pub(crate) check_types: bool, - pub(crate) top_level_stmt_count: usize, - pub(crate) allow_string_literals_in_type_expr: bool, - /// Set with `@starlark-rust: typecheck`. - pub(crate) typecheck: bool, -} - -impl Compiler<'_, '_, '_> { - pub(crate) fn enter_scope(&mut self, scope_id: ScopeId) { - self.locals.push(scope_id); - } - - pub(crate) fn exit_scope(&mut self) -> ScopeId { - self.locals.pop().unwrap() - } - - pub(crate) fn current_scope(&self) -> &ScopeNames { - self.scope_data.get_scope(*self.locals.last().unwrap()) - } -} diff --git a/starlark-rust/starlark/src/eval/compiler/module.rs b/starlark-rust/starlark/src/eval/compiler/module.rs index 914b862a386eb..2660953c7d816 100644 --- a/starlark-rust/starlark/src/eval/compiler/module.rs +++ b/starlark-rust/starlark/src/eval/compiler/module.rs @@ -55,7 +55,7 @@ enum ModuleError { TopLevelStmtCountMismatch, } -impl<'v> Compiler<'v, '_, '_> { +impl<'v> Compiler<'v, '_, '_, '_> { fn eval_load(&mut self, load: Spanned<&LoadP>) -> Result<(), EvalException> { let name = &load.node.module.node; @@ -64,7 +64,7 @@ impl<'v> Compiler<'v, '_, '_> { let loadenv = match self.eval.loader.as_ref() { None => { return Err(add_span_to_expr_error( - ModuleError::NoImportsAvailable(name.to_owned()).into(), + crate::Error::new_other(ModuleError::NoImportsAvailable(name.to_owned())), span, self.eval, )); @@ -101,14 +101,16 @@ impl<'v> Compiler<'v, '_, '_> { local_names: FrozenRef<'static, [FrozenStringValue]>, ) -> Result, EvalException> { if matches!(stmt.node, StmtP::Statements(_) | StmtP::Load(_)) { - return Err(EvalException::new( + return Err(EvalException::new_anyhow( ModuleError::UnexpectedStatement.into(), stmt.span, &self.codemap, )); } - let stmt = self.module_top_level_stmt(stmt); + let stmt = self + .module_top_level_stmt(stmt) + .map_err(|e| e.into_eval_exception())?; let bc = stmt.as_bc( &self.compile_context(false), local_names, @@ -137,7 +139,7 @@ impl<'v> Compiler<'v, '_, '_> { let mut stmts = top_level_stmts_mut(stmt); if stmts.len() != self.top_level_stmt_count { - return Err(EvalException::new( + return Err(EvalException::new_anyhow( ModuleError::TopLevelStmtCountMismatch.into(), stmt.span, &self.codemap, diff --git a/starlark-rust/starlark/src/eval/compiler/opt_ctx.rs b/starlark-rust/starlark/src/eval/compiler/opt_ctx.rs index 4a40353b967d0..bcaa5851752bd 100644 --- a/starlark-rust/starlark/src/eval/compiler/opt_ctx.rs +++ b/starlark-rust/starlark/src/eval/compiler/opt_ctx.rs @@ -21,14 +21,14 @@ use crate::eval::Evaluator; use crate::values::FrozenHeap; use crate::values::Heap; -pub(crate) trait OptCtxEval<'v, 'a> { +pub(crate) trait OptCtxEval<'v, 'a, 'e> { fn heap(&self) -> &'v Heap; fn frozen_heap(&self) -> &FrozenHeap; - fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a>>; + fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a, 'e>>; fn frozen_module(&self) -> Option<&FrozenModuleData>; } -impl<'v, 'a> OptCtxEval<'v, 'a> for OptimizeOnFreezeContext<'v, 'a> { +impl<'v, 'a, 'e> OptCtxEval<'v, 'a, 'e> for OptimizeOnFreezeContext<'v, 'a> { fn heap(&self) -> &'v Heap { self.heap } @@ -37,7 +37,7 @@ impl<'v, 'a> OptCtxEval<'v, 'a> for OptimizeOnFreezeContext<'v, 'a> { self.frozen_heap } - fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a>> { + fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a, 'e>> { None } @@ -46,7 +46,7 @@ impl<'v, 'a> OptCtxEval<'v, 'a> for OptimizeOnFreezeContext<'v, 'a> { } } -impl<'v, 'a> OptCtxEval<'v, 'a> for Evaluator<'v, 'a> { +impl<'v, 'a, 'e> OptCtxEval<'v, 'a, 'e> for Evaluator<'v, 'a, 'e> { fn heap(&self) -> &'v Heap { self.heap() } @@ -55,7 +55,7 @@ impl<'v, 'a> OptCtxEval<'v, 'a> for Evaluator<'v, 'a> { self.frozen_heap() } - fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a>> { + fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a, 'e>> { Some(self) } @@ -69,17 +69,17 @@ impl<'v, 'a> OptCtxEval<'v, 'a> for Evaluator<'v, 'a> { /// We perform optimization /// * during compilation of AST to IR, and /// * when freezing the heap. -pub(crate) struct OptCtx<'v, 'a, 'e> { - pub(crate) eval: &'e mut dyn OptCtxEval<'v, 'a>, +pub(crate) struct OptCtx<'v, 'a, 'e: 'a, 'x> { + pub(crate) eval: &'x mut dyn OptCtxEval<'v, 'a, 'e>, /// Current function parameter slot count. Zero when compiling module. pub(crate) param_count: u32, } -impl<'v, 'a, 'e> OptCtx<'v, 'a, 'e> { +impl<'v, 'a, 'e: 'a, 'x> OptCtx<'v, 'a, 'e, 'x> { pub(crate) fn new( - eval: &'e mut dyn OptCtxEval<'v, 'a>, + eval: &'x mut dyn OptCtxEval<'v, 'a, 'e>, param_count: u32, - ) -> OptCtx<'v, 'a, 'e> { + ) -> OptCtx<'v, 'a, 'e, 'x> { OptCtx { eval, param_count } } @@ -91,7 +91,7 @@ impl<'v, 'a, 'e> OptCtx<'v, 'a, 'e> { self.eval.frozen_heap() } - pub(crate) fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a>> { + pub(crate) fn eval(&mut self) -> Option<&mut Evaluator<'v, 'a, 'e>> { self.eval.eval() } diff --git a/starlark-rust/starlark/src/eval/compiler/scope.rs b/starlark-rust/starlark/src/eval/compiler/scope.rs new file mode 100644 index 0000000000000..f96581d58a84f --- /dev/null +++ b/starlark-rust/starlark/src/eval/compiler/scope.rs @@ -0,0 +1,1210 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod payload; +pub(crate) mod scope_resolver_globals; +mod tests; + +use std::collections::HashMap; +use std::iter; +use std::marker::PhantomData; +use std::mem; + +use dupe::Dupe; +use starlark_derive::VisitSpanMut; +use starlark_map::small_map; +use starlark_map::small_map::SmallMap; +use starlark_syntax::eval_exception::EvalException; +use starlark_syntax::syntax::ast::AssignIdent; +use starlark_syntax::syntax::ast::AssignP; +use starlark_syntax::syntax::ast::AssignTarget; +use starlark_syntax::syntax::ast::AstAssignIdentP; +use starlark_syntax::syntax::ast::AstStmt; +use starlark_syntax::syntax::ast::ClauseP; +use starlark_syntax::syntax::ast::DefP; +use starlark_syntax::syntax::ast::ExprP; +use starlark_syntax::syntax::ast::ForClauseP; +use starlark_syntax::syntax::ast::ForP; +use starlark_syntax::syntax::ast::LambdaP; +use starlark_syntax::syntax::ast::LoadArgP; +use starlark_syntax::syntax::ast::Stmt; +use starlark_syntax::syntax::ast::StmtP; +use starlark_syntax::syntax::ast::Visibility; +use starlark_syntax::syntax::top_level_stmts::top_level_stmts_mut; +use starlark_syntax::syntax::uniplate::VisitMut; + +use crate::codemap::CodeMap; +use crate::codemap::Span; +use crate::environment::names::MutableNames; +use crate::environment::slots::ModuleSlotId; +use crate::environment::Module; +use crate::errors::did_you_mean::did_you_mean; +use crate::eval::compiler::def::CopySlotFromParent; +use crate::eval::compiler::scope::payload::CstAssignIdent; +use crate::eval::compiler::scope::payload::CstAssignTarget; +use crate::eval::compiler::scope::payload::CstExpr; +use crate::eval::compiler::scope::payload::CstIdent; +use crate::eval::compiler::scope::payload::CstParameter; +use crate::eval::compiler::scope::payload::CstPayload; +use crate::eval::compiler::scope::payload::CstStmt; +use crate::eval::compiler::scope::payload::CstStmtFromAst; +use crate::eval::compiler::scope::payload::CstTypeExpr; +use crate::eval::compiler::scope::scope_resolver_globals::ScopeResolverGlobals; +use crate::eval::runtime::slots::LocalSlotIdCapturedOrNot; +use crate::syntax::Dialect; +use crate::typing::error::InternalError; +use crate::typing::Interface; +use crate::values::FrozenHeap; +use crate::values::FrozenRef; +use crate::values::FrozenStringValue; +use crate::values::FrozenValue; + +#[derive(Debug, thiserror::Error)] +enum ScopeError { + #[error("Variable `{0}` not found")] + VariableNotFound(String), + #[error("Variable `{0}` not found, did you mean `{1}`?")] + VariableNotFoundDidYouMean(String, String), + #[error("Identifiers in type expressions can only refer globals or builtins: `{0}`")] + TypeExpressionGlobalOrBuiltin(String), +} + +impl From for crate::Error { + fn from(e: ScopeError) -> Self { + crate::Error::new_kind(crate::ErrorKind::Scope(anyhow::Error::new(e))) + } +} + +/// All scopes and bindings in a module. +struct ModuleScopeBuilder<'a> { + scope_data: ModuleScopeData<'a>, + module: &'a MutableNames, + frozen_heap: &'a FrozenHeap, + module_bindings: SmallMap, + // The first scope is a module-level scope (including comprehensions in module scope). + // The rest are scopes for functions (which include their comprehensions). + locals: Vec, + unscopes: Vec, + codemap: FrozenRef<'static, CodeMap>, + globals: ScopeResolverGlobals, + errors: Vec, + top_level_stmt_count: usize, +} + +pub(crate) struct ModuleScopes<'f> { + pub(crate) scope_data: ModuleScopeData<'f>, + pub(crate) module_slot_count: u32, + pub(crate) cst: CstStmt, + /// Number of top-level statements in the module. + pub(crate) top_level_stmt_count: usize, +} + +struct UnscopeBinding { + /// Variable mappings in local scope are overwritten by comprehension variables. + /// + /// When we enter comprehension, we replace local scope variable slots with comprehension + /// scope slots. This field stores the original slot in the local scope, + /// or `None` if there was no mapping for the variable. + /// + /// When we pop the comprehension scope, we restore the mapping from this value. + undo: Option<(LocalSlotIdCapturedOrNot, BindingId)>, +} + +#[derive(Default)] +struct Unscope(SmallMap); + +#[derive(Default, Debug)] +pub(crate) struct ScopeNames<'f> { + /// `Some` when scope is initialized. + /// For module scope, the value is zero. + pub param_count: Option, + /// Slots this scope uses, including for parameters and `parent`. + /// Indexed by [`LocalSlotId`], values are variable names. + pub used: Vec, + /// The names that are in this scope + pub mp: SmallMap, + /// Slots to copy from the parent. + /// Module-level identifiers are not copied over, to avoid excess copying. + pub parent: Vec, + /// We store frozen strings. + _heap: PhantomData<&'f ()>, +} + +impl<'f> ScopeNames<'f> { + fn set_param_count(&mut self, param_count: u32) { + assert!(self.param_count.is_none()); + self.param_count = Some(param_count); + } + + pub(crate) fn param_count(&self) -> u32 { + self.param_count + .expect("param_count must be set during analysis") + } + + fn copy_parent( + &mut self, + parent_slot: LocalSlotIdCapturedOrNot, + binding_id: BindingId, + name: FrozenStringValue, + ) -> LocalSlotIdCapturedOrNot { + assert!(self.get_name(name).is_none()); // Or we'll be overwriting our variable + let res = self.add_name(name, binding_id); + self.parent.push(CopySlotFromParent { + parent: parent_slot, + child: res, + }); + res + } + + fn next_slot(&mut self, name: FrozenStringValue) -> LocalSlotIdCapturedOrNot { + let res = LocalSlotIdCapturedOrNot(self.used.len().try_into().unwrap()); + self.used.push(name); + res + } + + fn add_name( + &mut self, + name: FrozenStringValue, + binding_id: BindingId, + ) -> LocalSlotIdCapturedOrNot { + let slot = self.next_slot(name); + let old = self.mp.insert_hashed(name.get_hashed(), (slot, binding_id)); + assert!(old.is_none()); + slot + } + + fn add_scoped( + &mut self, + name: FrozenStringValue, + binding_id: BindingId, + unscope: &mut Unscope, + ) -> LocalSlotIdCapturedOrNot { + let slot = self.next_slot(name); + let undo = match self.mp.get_mut_hashed(name.get_hashed().as_ref()) { + Some(v) => { + let old = *v; + *v = (slot, binding_id); + Some(old) + } + None => { + self.mp.insert_hashed(name.get_hashed(), (slot, binding_id)); + None + } + }; + assert!( + unscope + .0 + .insert_hashed(name.get_hashed(), UnscopeBinding { undo }) + .is_none() + ); + slot + } + + fn unscope(&mut self, unscope: Unscope) { + for (name, UnscopeBinding { undo }) in unscope.0 { + match undo { + None => { + self.mp.shift_remove(&name); + } + Some(v) => *self.mp.get_mut(&name).unwrap() = v, + } + } + } + + fn get_name(&self, name: FrozenStringValue) -> Option<(LocalSlotIdCapturedOrNot, BindingId)> { + self.mp.get_hashed(name.get_hashed().as_ref()).copied() + } +} + +#[derive(Copy, Clone, Dupe, Debug)] +pub(crate) enum Slot { + /// Top-level module scope. + Module(ModuleSlotId), + /// Local scope, always mutable. + Local(LocalSlotIdCapturedOrNot), +} + +#[derive(Clone, Copy, Dupe)] +enum ResolveIdentScope { + /// Resolving normal identifier. + Any, + /// Resolving identifier in type expression. + GlobalForTypeExpression, +} + +impl<'f> ModuleScopeBuilder<'f> { + fn top_scope_id(&self) -> ScopeId { + *self.locals.last().unwrap() + } + + fn scope_at_level(&self, level: usize) -> &ScopeNames<'f> { + let scope_id = self.locals[level]; + self.scope_data.get_scope(scope_id) + } + + fn scope_at_level_mut(&mut self, level: usize) -> &mut ScopeNames<'f> { + let scope_id = self.locals[level]; + self.scope_data.mut_scope(scope_id) + } + + /// Resolve symbols in a module. + /// + /// Checks all the symbols are resolved to locals/globals/captured/etc. + /// Do not check types yet. But validate type expressions. + /// + /// This function does not fail, errors are stored in the `errors` field. + fn enter_module( + module: &'f MutableNames, + frozen_heap: &'f FrozenHeap, + loads: &HashMap, + stmt: AstStmt, + globals: ScopeResolverGlobals, + codemap: FrozenRef<'static, CodeMap>, + dialect: &Dialect, + ) -> (CstStmt, ModuleScopeBuilder<'f>) { + let mut scope_data = ModuleScopeData::new(); + let scope_id = scope_data.new_scope().0; + let mut cst = CstStmt::from_ast(stmt, &mut scope_data, loads); + + let mut top_level_stmts = top_level_stmts_mut(&mut cst); + + // Not really important, sanity check + assert_eq!(scope_id, ScopeId::module()); + + scope_data.mut_scope(scope_id).set_param_count(0); + + let mut locals: SmallMap = SmallMap::new(); + + let existing_module_names_and_visibilites = module.all_names_and_visibilities(); + for (name, vis) in existing_module_names_and_visibilites.iter() { + let (binding_id, _binding) = scope_data.new_binding( + *name, + BindingSource::FromModule, + *vis, + AssignCount::AtMostOnce, + ); + locals.insert_hashed(name.get_hashed(), binding_id); + } + + for stmt in top_level_stmts.iter_mut() { + Stmt::collect_defines( + stmt, + InLoop::No, + &mut scope_data, + frozen_heap, + &mut locals, + dialect, + ); + } + + let mut module_bindings = SmallMap::new(); + for (x, binding_id) in locals { + let binding = scope_data.mut_binding(binding_id); + let slot = module.add_name_visibility(x, binding.vis); + binding.init_slot(Slot::Module(slot), &codemap).unwrap(); + let old_binding = module_bindings.insert_hashed(x.get_hashed(), binding_id); + assert!(old_binding.is_none()); + } + + // Here we traverse the AST second time to collect scopes of defs + for stmt in top_level_stmts.iter_mut() { + ModuleScopeBuilder::collect_defines_recursively( + &mut scope_data, + stmt, + frozen_heap, + dialect, + &codemap, + ); + } + let mut scope = ModuleScopeBuilder { + scope_data, + frozen_heap, + module, + module_bindings, + locals: vec![scope_id], + unscopes: Vec::new(), + codemap, + globals, + errors: Vec::new(), + top_level_stmt_count: top_level_stmts.len(), + }; + for stmt in top_level_stmts.iter_mut() { + scope.resolve_idents(stmt); + } + (cst, scope) + } +} + +impl<'f> ModuleScopeBuilder<'f> { + // Number of module slots I need, a struct holding all scopes, and module bindings. + fn exit_module( + mut self, + ) -> ( + u32, + ModuleScopeData<'f>, + SmallMap, + ) { + assert!(self.locals.len() == 1); + assert!(self.unscopes.is_empty()); + let scope_id = self.locals.pop().unwrap(); + assert!(scope_id == ScopeId::module()); + let scope = self.scope_data.get_scope(scope_id); + assert!(scope.parent.is_empty()); + ( + self.module.slot_count(), + self.scope_data, + self.module_bindings, + ) + } +} + +impl<'f> ModuleScopes<'f> { + pub(crate) fn check_module_err( + module: &'f MutableNames, + frozen_heap: &'f FrozenHeap, + loads: &HashMap, + stmt: AstStmt, + globals: ScopeResolverGlobals, + codemap: FrozenRef<'static, CodeMap>, + dialect: &Dialect, + ) -> crate::Result> { + let (errors, scopes) = + ModuleScopes::check_module(module, frozen_heap, loads, stmt, globals, codemap, dialect); + if let Some(error) = errors.into_iter().next() { + return Err(error.into_error()); + } + Ok(scopes) + } + + pub(crate) fn check_module( + module: &'f MutableNames, + frozen_heap: &'f FrozenHeap, + loads: &HashMap, + stmt: AstStmt, + globals: ScopeResolverGlobals, + codemap: FrozenRef<'static, CodeMap>, + dialect: &Dialect, + ) -> (Vec, ModuleScopes<'f>) { + let (stmt, mut scope) = ModuleScopeBuilder::enter_module( + module, + frozen_heap, + loads, + stmt, + globals, + codemap, + dialect, + ); + let top_level_stmt_count = scope.top_level_stmt_count; + let errors = mem::take(&mut scope.errors); + let (module_slot_count, scope_data, _module_bindings) = scope.exit_module(); + ( + errors, + ModuleScopes { + cst: stmt, + scope_data, + module_slot_count, + top_level_stmt_count, + }, + ) + } +} + +impl<'f> ModuleScopeBuilder<'f> { + fn collect_defines_in_def( + scope_data: &mut ModuleScopeData, + scope_id: ScopeId, + params: &mut [CstParameter], + body: Option<&mut CstStmt>, + + frozen_heap: &FrozenHeap, + dialect: &Dialect, + codemap: &CodeMap, + ) { + let params: Vec<&mut AstAssignIdentP<_>> = params + .iter_mut() + .filter_map(|p| p.node.split_mut().0) + .collect::>(); + scope_data + .mut_scope(scope_id) + .set_param_count(params.len().try_into().unwrap()); + let mut locals: SmallMap = SmallMap::new(); + for p in params { + let name = frozen_heap.alloc_str_intern(&p.ident); + // Subtle invariant: the slots for the params must be ordered and at the + // beginning + let binding_id = scope_data + .new_binding( + name, + BindingSource::Source(p.span), + Visibility::Public, + AssignCount::AtMostOnce, + ) + .0; + p.payload = Some(binding_id); + let old_local = locals.insert_hashed(name.get_hashed(), binding_id); + assert!(old_local.is_none()); + } + if let Some(code) = body { + Stmt::collect_defines( + code, + InLoop::No, + scope_data, + frozen_heap, + &mut locals, + dialect, + ); + } + for (name, binding_id) in locals.into_iter() { + let slot = scope_data.mut_scope(scope_id).add_name(name, binding_id); + let binding = scope_data.mut_binding(binding_id); + binding.init_slot(Slot::Local(slot), codemap).unwrap(); + } + } + + fn collect_defines_recursively( + scope_data: &mut ModuleScopeData, + code: &mut CstStmt, + + frozen_heap: &FrozenHeap, + dialect: &Dialect, + codemap: &CodeMap, + ) { + if let StmtP::Def(DefP { + name: _, + params, + return_type: _, + body, + payload: scope_id, + }) = &mut code.node + { + // Here we traverse the AST twice: once for this def scope, + // second time below for nested defs. + Self::collect_defines_in_def( + scope_data, + *scope_id, + params, + Some(body), + frozen_heap, + dialect, + codemap, + ); + } + + code.visit_children_mut(&mut |visit| match visit { + VisitMut::Expr(e) => Self::collect_defines_recursively_in_expr( + scope_data, + e, + frozen_heap, + dialect, + codemap, + ), + VisitMut::Stmt(s) => { + Self::collect_defines_recursively(scope_data, s, frozen_heap, dialect, codemap) + } + }); + } + + fn collect_defines_recursively_in_expr( + scope_data: &mut ModuleScopeData, + code: &mut CstExpr, + + frozen_heap: &FrozenHeap, + dialect: &Dialect, + codemap: &CodeMap, + ) { + if let ExprP::Lambda(LambdaP { + params, + body: _, + payload: scope_id, + }) = &mut code.node + { + Self::collect_defines_in_def( + scope_data, + *scope_id, + params, + None, + frozen_heap, + dialect, + codemap, + ); + } + + code.visit_expr_mut(|e| { + Self::collect_defines_recursively_in_expr(scope_data, e, frozen_heap, dialect, codemap) + }); + } + + fn resolve_idents(&mut self, code: &mut CstStmt) { + match &mut code.node { + StmtP::Def(DefP { + name: _, + params, + return_type, + body, + payload: scope_id, + }) => self.resolve_idents_in_def( + *scope_id, + params, + return_type.as_mut().map(|r| &mut **r), + Some(body), + None, + ), + StmtP::Assign(AssignP { lhs, ty, rhs }) => { + self.resolve_idents_in_assign(lhs); + if let Some(ty) = ty { + self.resolve_idents_in_type_expr(ty); + } + self.resolve_idents_in_expr(rhs); + } + _ => code.visit_children_mut(|visit| match visit { + VisitMut::Stmt(stmt) => self.resolve_idents(stmt), + VisitMut::Expr(expr) => self.resolve_idents_in_expr(expr), + }), + } + } + + fn resolve_idents_in_assign(&mut self, assign: &mut CstAssignTarget) { + assign.visit_expr_mut(|expr| self.resolve_idents_in_expr(expr)); + } + + fn resolve_idents_in_def( + &mut self, + scope_id: ScopeId, + params: &mut [CstParameter], + ret: Option<&mut CstTypeExpr>, + body_stmt: Option<&mut CstStmt>, + body_expr: Option<&mut CstExpr>, + ) { + for param in params { + let (_, ty, def) = param.split_mut(); + if let Some(ty) = ty { + self.resolve_idents_in_type_expr(ty); + } + if let Some(def) = def { + self.resolve_idents_in_expr(def); + } + } + if let Some(ret) = ret { + self.resolve_idents_in_type_expr(ret); + } + + self.enter_def(scope_id); + if let Some(body_stmt) = body_stmt { + self.resolve_idents(body_stmt); + } + if let Some(body_expr) = body_expr { + self.resolve_idents_in_expr(body_expr); + } + self.exit_def(); + } + + fn resolve_idents_in_expr_impl(&mut self, scope: ResolveIdentScope, expr: &mut CstExpr) { + match &mut expr.node { + ExprP::Identifier(ident) => self.resolve_ident(scope, ident), + ExprP::Lambda(LambdaP { + params, + body, + payload: scope_id, + }) => self.resolve_idents_in_def(*scope_id, params, None, None, Some(body)), + ExprP::ListComprehension(expr, first_for, clauses) => { + self.resolve_idents_in_compr(&mut [expr], first_for, clauses) + } + ExprP::DictComprehension(k_v, first_for, clauses) => { + let (k, v) = &mut **k_v; + self.resolve_idents_in_compr(&mut [k, v], first_for, clauses) + } + _ => expr.visit_expr_mut(|expr| self.resolve_idents_in_expr_impl(scope, expr)), + } + } + + fn resolve_idents_in_expr(&mut self, expr: &mut CstExpr) { + self.resolve_idents_in_expr_impl(ResolveIdentScope::Any, expr); + } + + fn resolve_idents_in_type_expr(&mut self, expr: &mut CstTypeExpr) { + self.resolve_idents_in_expr_impl( + ResolveIdentScope::GlobalForTypeExpression, + &mut expr.node.expr, + ); + } + + fn current_scope_all_visible_names_for_did_you_mean(&self) -> Option> { + // It is OK to return non-unique identifiers + let mut r: Vec = Vec::new(); + for &scope_id in self.locals.iter().rev() { + let scope = self.scope_data.get_scope(scope_id); + r.extend(scope.mp.keys().map(|s| s.as_str().to_owned())); + } + r.extend(self.module_bindings.keys().map(|s| s.as_str().to_owned())); + r.extend(self.globals.names()?); + Some(r) + } + + #[cold] + fn variable_not_found_err(&self, ident: &CstIdent) -> EvalException { + let variants = self + .current_scope_all_visible_names_for_did_you_mean() + .unwrap_or_default(); + let better = did_you_mean( + ident.node.ident.as_str(), + variants.iter().map(|s| s.as_str()), + ); + EvalException::new( + match better { + Some(better) => ScopeError::VariableNotFoundDidYouMean( + ident.node.ident.clone(), + better.to_owned(), + ), + None => ScopeError::VariableNotFound(ident.node.ident.clone()), + } + .into(), + ident.span, + &self.codemap, + ) + } + + fn resolve_ident(&mut self, scope: ResolveIdentScope, ident: &mut CstIdent) { + assert!(ident.node.payload.is_none()); + let resolved = match self.get_name(self.frozen_heap.alloc_str_intern(&ident.node.ident)) { + None => { + // Must be a global, since we know all variables + match self.globals.get_global(&ident.node.ident) { + None => { + self.errors.push(self.variable_not_found_err(ident)); + return; + } + Some(v) => ResolvedIdent::Global(v), + } + } + Some((slot, binding_id)) => ResolvedIdent::Slot(slot, binding_id), + }; + match scope { + ResolveIdentScope::Any => {} + ResolveIdentScope::GlobalForTypeExpression => match resolved { + ResolvedIdent::Slot(Slot::Local(_), _) => { + self.errors.push(EvalException::new( + ScopeError::TypeExpressionGlobalOrBuiltin(ident.node.ident.clone()).into(), + ident.span, + &self.codemap, + )); + return; + } + ResolvedIdent::Slot(Slot::Module(_), _) => {} + ResolvedIdent::Global(_) => {} + }, + } + ident.node.payload = Some(resolved); + } + + fn resolve_idents_in_compr( + &mut self, + exprs: &mut [&mut CstExpr], + first_for: &mut ForClauseP, + clauses: &mut [ClauseP], + ) { + // First for is resolved in outer scope + self.resolve_idents_in_for_clause(first_for); + + self.enter_compr(); + + // Add identifiers to compr scope + + self.add_compr( + iter::once(&mut first_for.var).chain(clauses.iter_mut().filter_map( + |clause| match clause { + ClauseP::For(for_clause) => Some(&mut for_clause.var), + ClauseP::If(..) => None, + }, + )), + ); + + // Now resolve idents in compr scope + + for clause in clauses.iter_mut() { + match clause { + ClauseP::For(for_clause) => self.resolve_idents_in_for_clause(for_clause), + ClauseP::If(cond) => self.resolve_idents_in_expr(cond), + } + } + + // Finally, resolve the item expression + + for expr in exprs { + self.resolve_idents_in_expr(expr); + } + + self.exit_compr(); + } + + fn resolve_idents_in_for_clause(&mut self, for_clause: &mut ForClauseP) { + self.resolve_idents_in_expr(&mut for_clause.over); + self.resolve_idents_in_assign(&mut for_clause.var); + } + + pub fn enter_def(&mut self, scope_id: ScopeId) { + assert!(scope_id != ScopeId::module()); + self.locals.push(scope_id); + } + + // Which slots to grab from the current scope to the parent scope, size of your + // self scope Future state: Should return the slots to use from the parent + // scope + pub fn exit_def(&mut self) -> &mut ScopeNames<'f> { + let scope_id = self.locals.pop().unwrap(); + self.scope_data.mut_scope(scope_id) + } + + fn enter_compr(&mut self) { + self.unscopes.push(Unscope::default()); + } + + fn add_compr<'x>(&mut self, var: impl IntoIterator) { + let scope_id = self.top_scope_id(); + let mut locals = SmallMap::new(); + for var in var { + AssignTarget::collect_defines_lvalue( + var, + InLoop::Yes, + &mut self.scope_data, + self.frozen_heap, + &mut locals, + ); + } + for (name, binding_id) in locals.into_iter() { + let slot = self.scope_data.mut_scope(scope_id).add_scoped( + name, + binding_id, + self.unscopes.last_mut().unwrap(), + ); + let binding = self.scope_data.mut_binding(binding_id); + binding.init_slot(Slot::Local(slot), &self.codemap).unwrap(); + } + } + + fn exit_compr(&mut self) { + self.scope_data + .mut_scope(self.top_scope_id()) + .unscope(self.unscopes.pop().unwrap()); + } + + fn get_name(&mut self, name: FrozenStringValue) -> Option<(Slot, BindingId)> { + // look upwards to find the first place the variable occurs + // then copy that variable downwards + for i in (0..self.locals.len()).rev() { + if let Some((mut v, binding_id)) = self.scope_at_level(i).get_name(name) { + if i + 1 != self.locals.len() { + self.scope_data.mut_binding(binding_id).captured = Captured::Yes; + } + for j in (i + 1)..self.locals.len() { + v = self.scope_at_level_mut(j).copy_parent(v, binding_id, name); + } + return Some((Slot::Local(v), binding_id)); + } + } + let binding_id = self + .module_bindings + .get_hashed(name.get_hashed().as_ref()) + .copied(); + match binding_id { + Some(binding_id) => { + let binding = self.scope_data.mut_binding(binding_id); + if self.locals.len() > 1 { + binding.captured = Captured::Yes; + } + let slot = binding.resolved_slot(&self.codemap).unwrap(); + assert!(matches!(slot, Slot::Module(_))); + Some((slot, binding_id)) + } + None => None, + } + } +} + +/// While performing analysis. +#[derive(Copy, Clone, Dupe)] +enum InLoop { + /// Current statement has an enclosing loop in the current scope. + Yes, + /// Current statement has no enclosing loop in the current scope. + No, +} + +trait StmtCollectDefines { + fn collect_defines<'a>( + stmt: &'a mut CstStmt, + in_loop: InLoop, + scope_data: &mut ModuleScopeData, + frozen_heap: &FrozenHeap, + result: &mut SmallMap, + dialect: &Dialect, + ); +} + +impl StmtCollectDefines for Stmt { + // Collect all the variables that are defined in this scope + fn collect_defines<'a>( + stmt: &'a mut CstStmt, + in_loop: InLoop, + scope_data: &mut ModuleScopeData, + frozen_heap: &FrozenHeap, + result: &mut SmallMap, + dialect: &Dialect, + ) { + match &mut stmt.node { + StmtP::Assign(AssignP { lhs: dest, .. }) | StmtP::AssignModify(dest, _, _) => { + AssignTarget::collect_defines_lvalue( + dest, + in_loop, + scope_data, + frozen_heap, + result, + ); + } + StmtP::For(ForP { var, over: _, body }) => { + AssignTarget::collect_defines_lvalue( + var, + InLoop::Yes, + scope_data, + frozen_heap, + result, + ); + StmtP::collect_defines(body, InLoop::Yes, scope_data, frozen_heap, result, dialect); + } + StmtP::Def(DefP { name, .. }) => AssignIdent::collect_assign_ident( + name, + in_loop, + Visibility::Public, + scope_data, + frozen_heap, + result, + ), + StmtP::Load(load) => { + // TODO(nga): visibility does not belong to AST. + let vis = match dialect.enable_load_reexport { + true => Visibility::Public, + false => Visibility::Private, + }; + for LoadArgP { local, .. } in &mut load.args { + let mut vis = vis; + if Module::default_visibility(&local.ident) == Visibility::Private { + vis = Visibility::Private; + } + AssignIdent::collect_assign_ident( + local, + in_loop, + vis, + scope_data, + frozen_heap, + result, + ); + } + } + stmt => stmt.visit_stmt_mut(|x| { + Stmt::collect_defines(x, in_loop, scope_data, frozen_heap, result, dialect) + }), + } + } +} + +trait AssignIdentCollect { + fn collect_assign_ident<'a>( + assign: &'a mut CstAssignIdent, + in_loop: InLoop, + vis: Visibility, + scope_data: &mut ModuleScopeData, + frozen_heap: &FrozenHeap, + result: &mut SmallMap, + ); +} + +impl AssignIdentCollect for AssignIdent { + fn collect_assign_ident<'a>( + assign: &'a mut CstAssignIdent, + in_loop: InLoop, + vis: Visibility, + scope_data: &mut ModuleScopeData, + frozen_heap: &FrozenHeap, + result: &mut SmallMap, + ) { + // Helper function to untangle lifetimes: we read and modify `assign` fields. + fn assign_ident_impl<'b>( + name: FrozenStringValue, + span: Span, + + binding: &'b mut Option, + in_loop: InLoop, + mut vis: Visibility, + scope_data: &mut ModuleScopeData, + result: &mut SmallMap, + ) { + assert!( + binding.is_none(), + "binding can be assigned only once: `{}`", + name.as_str() + ); + if vis == Visibility::Public { + vis = Module::default_visibility(&name); + } + match result.entry_hashed(name.get_hashed()) { + small_map::Entry::Occupied(e) => { + let prev_binding_id = *e.get(); + let prev_binding = scope_data.mut_binding(prev_binding_id); + // If we are in the map as Public and Private, then Public wins. + // Everything but Load is definitely Public. + // So only insert if it wasn't already there. + if vis == Visibility::Public { + prev_binding.vis = Visibility::Public; + } + prev_binding.assign_count = AssignCount::Any; + *binding = Some(prev_binding_id); + } + small_map::Entry::Vacant(e) => { + let assign_count = match in_loop { + InLoop::Yes => AssignCount::Any, + InLoop::No => AssignCount::AtMostOnce, + }; + let (new_binding_id, _) = scope_data.new_binding( + name, + BindingSource::Source(span), + vis, + assign_count, + ); + e.insert(new_binding_id); + *binding = Some(new_binding_id); + } + }; + } + assign_ident_impl( + frozen_heap.alloc_str_intern(&assign.node.ident), + assign.span, + &mut assign.node.payload, + in_loop, + vis, + scope_data, + result, + ); + } +} + +trait AssignTargetCollectDefinesLvalue { + fn collect_defines_lvalue<'a>( + expr: &'a mut CstAssignTarget, + in_loop: InLoop, + scope_data: &mut ModuleScopeData, + frozen_heap: &FrozenHeap, + result: &mut SmallMap, + ); +} + +impl AssignTargetCollectDefinesLvalue for AssignTarget { + // Collect variables defined in an expression on the LHS of an assignment (or + // for variable etc) + fn collect_defines_lvalue<'a>( + expr: &'a mut CstAssignTarget, + in_loop: InLoop, + scope_data: &mut ModuleScopeData, + frozen_heap: &FrozenHeap, + result: &mut SmallMap, + ) { + expr.node.visit_lvalue_mut(|x| { + AssignIdent::collect_assign_ident( + x, + in_loop, + Visibility::Public, + scope_data, + frozen_heap, + result, + ) + }); + } +} + +/// Storage of objects referenced by AST. +#[derive(Default)] +pub(crate) struct ModuleScopeData<'f> { + /// Bindings by id. + bindings: Vec>, + /// Scopes by id. + scopes: Vec>, +} + +#[derive(Debug, Eq, PartialEq)] +pub(crate) enum AssignCount { + /// Variable is assigned at most once during the execution of the scope. + AtMostOnce, + /// Variable may be assigned more than once during execution of the scope. + Any, +} + +/// Was a binding captured by nested def or lambda scopes? +#[derive(Debug, Copy, Clone, Dupe, Eq, PartialEq, VisitSpanMut)] +pub(crate) enum Captured { + Yes, + No, +} + +#[derive(Debug)] +pub(crate) enum BindingSource { + /// Variable is defined in the source of the module. + Source(Span), + /// Variable came from `Module`, not defined in the source file. + FromModule, +} + +/// Binding defines a place for a variable. +/// +/// For example, in code `x = 1; x = 2`, there's one binding for name `x`. +/// +/// In code `x = 1; def f(): x = 2`, there are two bindings for name `x`. +#[derive(Debug)] +pub(crate) struct Binding<'f> { + pub(crate) name: FrozenStringValue, + pub(crate) source: BindingSource, + pub(crate) vis: Visibility, + /// `slot` is `None` when it is not initialized yet. + /// When analysis is completed, `slot` is always `Some`. + slot: Option, + pub(crate) assign_count: AssignCount, + // Whether a variable defined in a scope gets captured in nested def or lambda scope. + // (Comprehension scopes do not count, because they are considered + // local by the runtime and do not allocate a frame). + pub(crate) captured: Captured, + _marker: PhantomData<&'f ()>, +} + +impl<'f> Binding<'f> { + fn new( + name: FrozenStringValue, + source: BindingSource, + vis: Visibility, + assign_count: AssignCount, + ) -> Binding<'f> { + Binding { + name, + source, + vis, + slot: None, + assign_count, + captured: Captured::No, + _marker: PhantomData, + } + } + + fn span(&self) -> Span { + match self.source { + BindingSource::Source(span) => span, + BindingSource::FromModule => Span::default(), + } + } + + /// Get resolved slot after analysis is completed. + pub(crate) fn resolved_slot(&self, codemap: &CodeMap) -> Result { + match self.slot { + Some(slot) => Ok(slot), + None => Err(InternalError::msg( + "slot is not resolved", + self.span(), + codemap, + )), + } + } + + /// Initialize the slot during analysis. + pub(crate) fn init_slot(&mut self, slot: Slot, codemap: &CodeMap) -> Result<(), InternalError> { + match mem::replace(&mut self.slot, Some(slot)) { + Some(_) => Err(InternalError::msg( + "slot is already assigned", + self.span(), + codemap, + )), + None => Ok(()), + } + } +} + +/// If of a binding within current module. +#[derive(Copy, Clone, Dupe, Debug, Hash, PartialEq, Eq, Ord, PartialOrd)] +pub(crate) struct BindingId(usize); + +/// Id of a scope within current module. +#[derive(Copy, Clone, Dupe, Debug, Eq, PartialEq)] +pub(crate) struct ScopeId(usize); + +impl ScopeId { + pub(crate) fn module() -> ScopeId { + ScopeId(0) + } +} + +impl<'f> ModuleScopeData<'f> { + pub(crate) fn new() -> ModuleScopeData<'f> { + ModuleScopeData::default() + } + + pub(crate) fn get_binding(&self, BindingId(id): BindingId) -> &Binding<'f> { + &self.bindings[id] + } + + fn mut_binding(&mut self, BindingId(id): BindingId) -> &mut Binding<'f> { + &mut self.bindings[id] + } + + fn new_binding( + &mut self, + name: FrozenStringValue, + source: BindingSource, + vis: Visibility, + assigned_count: AssignCount, + ) -> (BindingId, &mut Binding<'f>) { + let binding_id = BindingId(self.bindings.len()); + self.bindings + .push(Binding::new(name, source, vis, assigned_count)); + (binding_id, self.bindings.last_mut().unwrap()) + } + + pub(crate) fn get_scope(&self, ScopeId(id): ScopeId) -> &ScopeNames<'f> { + &self.scopes[id] + } + + pub(crate) fn mut_scope(&mut self, ScopeId(id): ScopeId) -> &mut ScopeNames<'f> { + &mut self.scopes[id] + } + + pub(crate) fn new_scope(&mut self) -> (ScopeId, &mut ScopeNames<'f>) { + let scope_id = ScopeId(self.scopes.len()); + self.scopes.push(ScopeNames::default()); + (scope_id, self.scopes.last_mut().unwrap()) + } + + /// Get resolved slot for assigning identifier. + pub(crate) fn get_assign_ident_slot( + &self, + ident: &CstAssignIdent, + codemap: &CodeMap, + ) -> (Slot, Captured) { + let binding_id = ident.payload.expect("binding not assigned for ident"); + let binding = self.get_binding(binding_id); + let slot = binding.resolved_slot(codemap).unwrap(); + (slot, binding.captured) + } +} + +#[derive(Debug, Clone, Dupe, Copy)] +pub(crate) enum ResolvedIdent { + Slot(Slot, BindingId), + Global(FrozenValue), +} diff --git a/starlark-rust/starlark/src/eval/compiler/scope/mod.rs b/starlark-rust/starlark/src/eval/compiler/scope/mod.rs deleted file mode 100644 index a82bb2fca7bc2..0000000000000 --- a/starlark-rust/starlark/src/eval/compiler/scope/mod.rs +++ /dev/null @@ -1,1204 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -pub(crate) mod payload; -pub(crate) mod scope_resolver_globals; -mod tests; - -use std::collections::HashMap; -use std::iter; -use std::marker::PhantomData; -use std::mem; - -use dupe::Dupe; -use starlark_derive::VisitSpanMut; -use starlark_map::small_map; -use starlark_map::small_map::SmallMap; -use starlark_syntax::eval_exception::EvalException; -use starlark_syntax::syntax::ast::AssignIdent; -use starlark_syntax::syntax::ast::AssignP; -use starlark_syntax::syntax::ast::AssignTarget; -use starlark_syntax::syntax::ast::AstAssignIdentP; -use starlark_syntax::syntax::ast::AstStmt; -use starlark_syntax::syntax::ast::ClauseP; -use starlark_syntax::syntax::ast::DefP; -use starlark_syntax::syntax::ast::ExprP; -use starlark_syntax::syntax::ast::ForClauseP; -use starlark_syntax::syntax::ast::ForP; -use starlark_syntax::syntax::ast::LambdaP; -use starlark_syntax::syntax::ast::LoadArgP; -use starlark_syntax::syntax::ast::Stmt; -use starlark_syntax::syntax::ast::StmtP; -use starlark_syntax::syntax::ast::Visibility; -use starlark_syntax::syntax::top_level_stmts::top_level_stmts_mut; -use starlark_syntax::syntax::uniplate::VisitMut; - -use crate::codemap::CodeMap; -use crate::codemap::Span; -use crate::environment::names::MutableNames; -use crate::environment::slots::ModuleSlotId; -use crate::environment::Module; -use crate::errors::did_you_mean::did_you_mean; -use crate::eval::compiler::def::CopySlotFromParent; -use crate::eval::compiler::scope::payload::CstAssignIdent; -use crate::eval::compiler::scope::payload::CstAssignTarget; -use crate::eval::compiler::scope::payload::CstExpr; -use crate::eval::compiler::scope::payload::CstIdent; -use crate::eval::compiler::scope::payload::CstParameter; -use crate::eval::compiler::scope::payload::CstPayload; -use crate::eval::compiler::scope::payload::CstStmt; -use crate::eval::compiler::scope::payload::CstStmtFromAst; -use crate::eval::compiler::scope::payload::CstTypeExpr; -use crate::eval::compiler::scope::scope_resolver_globals::ScopeResolverGlobals; -use crate::eval::runtime::slots::LocalSlotIdCapturedOrNot; -use crate::syntax::Dialect; -use crate::typing::error::InternalError; -use crate::typing::Interface; -use crate::values::FrozenHeap; -use crate::values::FrozenRef; -use crate::values::FrozenStringValue; -use crate::values::FrozenValue; - -#[derive(Debug, thiserror::Error)] -enum ScopeError { - #[error("Variable `{0}` not found")] - VariableNotFound(String), - #[error("Variable `{0}` not found, did you mean `{1}`?")] - VariableNotFoundDidYouMean(String, String), - #[error("Identifiers in type expressions can only refer globals or builtins: `{0}`")] - TypeExpressionGlobalOrBuiltin(String), -} - -/// All scopes and bindings in a module. -struct ModuleScopeBuilder<'a> { - scope_data: ModuleScopeData<'a>, - module: &'a MutableNames, - frozen_heap: &'a FrozenHeap, - module_bindings: SmallMap, - // The first scope is a module-level scope (including comprehensions in module scope). - // The rest are scopes for functions (which include their comprehensions). - locals: Vec, - unscopes: Vec, - codemap: FrozenRef<'static, CodeMap>, - globals: ScopeResolverGlobals, - errors: Vec, - top_level_stmt_count: usize, -} - -pub(crate) struct ModuleScopes<'f> { - pub(crate) scope_data: ModuleScopeData<'f>, - pub(crate) module_slot_count: u32, - pub(crate) cst: CstStmt, - /// Number of top-level statements in the module. - pub(crate) top_level_stmt_count: usize, -} - -struct UnscopeBinding { - /// Variable mappings in local scope are overwritten by comprehension variables. - /// - /// When we enter comprehension, we replace local scope variable slots with comprehension - /// scope slots. This field stores the original slot in the local scope, - /// or `None` if there was no mapping for the variable. - /// - /// When we pop the comprehension scope, we restore the mapping from this value. - undo: Option<(LocalSlotIdCapturedOrNot, BindingId)>, -} - -#[derive(Default)] -struct Unscope(SmallMap); - -#[derive(Default, Debug)] -pub(crate) struct ScopeNames<'f> { - /// `Some` when scope is initialized. - /// For module scope, the value is zero. - pub param_count: Option, - /// Slots this scope uses, including for parameters and `parent`. - /// Indexed by [`LocalSlotId`], values are variable names. - pub used: Vec, - /// The names that are in this scope - pub mp: SmallMap, - /// Slots to copy from the parent. - /// Module-level identifiers are not copied over, to avoid excess copying. - pub parent: Vec, - /// We store frozen strings. - _heap: PhantomData<&'f ()>, -} - -impl<'f> ScopeNames<'f> { - fn set_param_count(&mut self, param_count: u32) { - assert!(self.param_count.is_none()); - self.param_count = Some(param_count); - } - - pub(crate) fn param_count(&self) -> u32 { - self.param_count - .expect("param_count must be set during analysis") - } - - fn copy_parent( - &mut self, - parent_slot: LocalSlotIdCapturedOrNot, - binding_id: BindingId, - name: FrozenStringValue, - ) -> LocalSlotIdCapturedOrNot { - assert!(self.get_name(name).is_none()); // Or we'll be overwriting our variable - let res = self.add_name(name, binding_id); - self.parent.push(CopySlotFromParent { - parent: parent_slot, - child: res, - }); - res - } - - fn next_slot(&mut self, name: FrozenStringValue) -> LocalSlotIdCapturedOrNot { - let res = LocalSlotIdCapturedOrNot(self.used.len().try_into().unwrap()); - self.used.push(name); - res - } - - fn add_name( - &mut self, - name: FrozenStringValue, - binding_id: BindingId, - ) -> LocalSlotIdCapturedOrNot { - let slot = self.next_slot(name); - let old = self.mp.insert_hashed(name.get_hashed(), (slot, binding_id)); - assert!(old.is_none()); - slot - } - - fn add_scoped( - &mut self, - name: FrozenStringValue, - binding_id: BindingId, - unscope: &mut Unscope, - ) -> LocalSlotIdCapturedOrNot { - let slot = self.next_slot(name); - let undo = match self.mp.get_mut_hashed(name.get_hashed().as_ref()) { - Some(v) => { - let old = *v; - *v = (slot, binding_id); - Some(old) - } - None => { - self.mp.insert_hashed(name.get_hashed(), (slot, binding_id)); - None - } - }; - assert!( - unscope - .0 - .insert_hashed(name.get_hashed(), UnscopeBinding { undo }) - .is_none() - ); - slot - } - - fn unscope(&mut self, unscope: Unscope) { - for (name, UnscopeBinding { undo }) in unscope.0 { - match undo { - None => { - self.mp.remove(&name); - } - Some(v) => *self.mp.get_mut(&name).unwrap() = v, - } - } - } - - fn get_name(&self, name: FrozenStringValue) -> Option<(LocalSlotIdCapturedOrNot, BindingId)> { - self.mp.get_hashed(name.get_hashed().as_ref()).copied() - } -} - -#[derive(Copy, Clone, Dupe, Debug)] -pub(crate) enum Slot { - /// Top-level module scope. - Module(ModuleSlotId), - /// Local scope, always mutable. - Local(LocalSlotIdCapturedOrNot), -} - -#[derive(Clone, Copy, Dupe)] -enum ResolveIdentScope { - /// Resolving normal identifier. - Any, - /// Resolving identifier in type expression. - GlobalForTypeExpression, -} - -impl<'f> ModuleScopeBuilder<'f> { - fn top_scope_id(&self) -> ScopeId { - *self.locals.last().unwrap() - } - - fn scope_at_level(&self, level: usize) -> &ScopeNames<'f> { - let scope_id = self.locals[level]; - self.scope_data.get_scope(scope_id) - } - - fn scope_at_level_mut(&mut self, level: usize) -> &mut ScopeNames<'f> { - let scope_id = self.locals[level]; - self.scope_data.mut_scope(scope_id) - } - - /// Resolve symbols in a module. - /// - /// Checks all the symbols are resolved to locals/globals/captured/etc. - /// Do not check types yet. But validate type expressions. - /// - /// This function does not fail, errors are stored in the `errors` field. - fn enter_module( - module: &'f MutableNames, - frozen_heap: &'f FrozenHeap, - loads: &HashMap, - stmt: AstStmt, - globals: ScopeResolverGlobals, - codemap: FrozenRef<'static, CodeMap>, - dialect: &Dialect, - ) -> (CstStmt, ModuleScopeBuilder<'f>) { - let mut scope_data = ModuleScopeData::new(); - let scope_id = scope_data.new_scope().0; - let mut cst = CstStmt::from_ast(stmt, &mut scope_data, loads); - - let mut top_level_stmts = top_level_stmts_mut(&mut cst); - - // Not really important, sanity check - assert_eq!(scope_id, ScopeId::module()); - - scope_data.mut_scope(scope_id).set_param_count(0); - - let mut locals: SmallMap = SmallMap::new(); - - let existing_module_names_and_visibilites = module.all_names_and_visibilities(); - for (name, vis) in existing_module_names_and_visibilites.iter() { - let (binding_id, _binding) = scope_data.new_binding( - *name, - BindingSource::FromModule, - *vis, - AssignCount::AtMostOnce, - ); - locals.insert_hashed(name.get_hashed(), binding_id); - } - - for stmt in top_level_stmts.iter_mut() { - Stmt::collect_defines( - stmt, - InLoop::No, - &mut scope_data, - frozen_heap, - &mut locals, - dialect, - ); - } - - let mut module_bindings = SmallMap::new(); - for (x, binding_id) in locals { - let binding = scope_data.mut_binding(binding_id); - let slot = module.add_name_visibility(x, binding.vis); - binding.init_slot(Slot::Module(slot), &codemap).unwrap(); - let old_binding = module_bindings.insert_hashed(x.get_hashed(), binding_id); - assert!(old_binding.is_none()); - } - - // Here we traverse the AST second time to collect scopes of defs - for stmt in top_level_stmts.iter_mut() { - ModuleScopeBuilder::collect_defines_recursively( - &mut scope_data, - stmt, - frozen_heap, - dialect, - &codemap, - ); - } - let mut scope = ModuleScopeBuilder { - scope_data, - frozen_heap, - module, - module_bindings, - locals: vec![scope_id], - unscopes: Vec::new(), - codemap, - globals, - errors: Vec::new(), - top_level_stmt_count: top_level_stmts.len(), - }; - for stmt in top_level_stmts.iter_mut() { - scope.resolve_idents(stmt); - } - (cst, scope) - } -} - -impl<'f> ModuleScopeBuilder<'f> { - // Number of module slots I need, a struct holding all scopes, and module bindings. - fn exit_module( - mut self, - ) -> ( - u32, - ModuleScopeData<'f>, - SmallMap, - ) { - assert!(self.locals.len() == 1); - assert!(self.unscopes.is_empty()); - let scope_id = self.locals.pop().unwrap(); - assert!(scope_id == ScopeId::module()); - let scope = self.scope_data.get_scope(scope_id); - assert!(scope.parent.is_empty()); - ( - self.module.slot_count(), - self.scope_data, - self.module_bindings, - ) - } -} - -impl<'f> ModuleScopes<'f> { - pub(crate) fn check_module_err( - module: &'f MutableNames, - frozen_heap: &'f FrozenHeap, - loads: &HashMap, - stmt: AstStmt, - globals: ScopeResolverGlobals, - codemap: FrozenRef<'static, CodeMap>, - dialect: &Dialect, - ) -> anyhow::Result> { - let (errors, scopes) = - ModuleScopes::check_module(module, frozen_heap, loads, stmt, globals, codemap, dialect); - if let Some(error) = errors.into_iter().next() { - return Err(error.into_anyhow()); - } - Ok(scopes) - } - - pub(crate) fn check_module( - module: &'f MutableNames, - frozen_heap: &'f FrozenHeap, - loads: &HashMap, - stmt: AstStmt, - globals: ScopeResolverGlobals, - codemap: FrozenRef<'static, CodeMap>, - dialect: &Dialect, - ) -> (Vec, ModuleScopes<'f>) { - let (stmt, mut scope) = ModuleScopeBuilder::enter_module( - module, - frozen_heap, - loads, - stmt, - globals, - codemap, - dialect, - ); - let top_level_stmt_count = scope.top_level_stmt_count; - let errors = mem::take(&mut scope.errors); - let (module_slot_count, scope_data, _module_bindings) = scope.exit_module(); - ( - errors, - ModuleScopes { - cst: stmt, - scope_data, - module_slot_count, - top_level_stmt_count, - }, - ) - } -} - -impl<'f> ModuleScopeBuilder<'f> { - fn collect_defines_in_def( - scope_data: &mut ModuleScopeData, - scope_id: ScopeId, - params: &mut [CstParameter], - body: Option<&mut CstStmt>, - - frozen_heap: &FrozenHeap, - dialect: &Dialect, - codemap: &CodeMap, - ) { - let params: Vec<&mut AstAssignIdentP<_>> = params - .iter_mut() - .filter_map(|p| p.node.split_mut().0) - .collect::>(); - scope_data - .mut_scope(scope_id) - .set_param_count(params.len().try_into().unwrap()); - let mut locals: SmallMap = SmallMap::new(); - for p in params { - let name = frozen_heap.alloc_str_intern(&p.ident); - // Subtle invariant: the slots for the params must be ordered and at the - // beginning - let binding_id = scope_data - .new_binding( - name, - BindingSource::Source(p.span), - Visibility::Public, - AssignCount::AtMostOnce, - ) - .0; - p.payload = Some(binding_id); - let old_local = locals.insert_hashed(name.get_hashed(), binding_id); - assert!(old_local.is_none()); - } - if let Some(code) = body { - Stmt::collect_defines( - code, - InLoop::No, - scope_data, - frozen_heap, - &mut locals, - dialect, - ); - } - for (name, binding_id) in locals.into_iter() { - let slot = scope_data.mut_scope(scope_id).add_name(name, binding_id); - let binding = scope_data.mut_binding(binding_id); - binding.init_slot(Slot::Local(slot), codemap).unwrap(); - } - } - - fn collect_defines_recursively( - scope_data: &mut ModuleScopeData, - code: &mut CstStmt, - - frozen_heap: &FrozenHeap, - dialect: &Dialect, - codemap: &CodeMap, - ) { - if let StmtP::Def(DefP { - name: _, - params, - return_type: _, - body, - payload: scope_id, - }) = &mut code.node - { - // Here we traverse the AST twice: once for this def scope, - // second time below for nested defs. - Self::collect_defines_in_def( - scope_data, - *scope_id, - params, - Some(body), - frozen_heap, - dialect, - codemap, - ); - } - - code.visit_children_mut(&mut |visit| match visit { - VisitMut::Expr(e) => Self::collect_defines_recursively_in_expr( - scope_data, - e, - frozen_heap, - dialect, - codemap, - ), - VisitMut::Stmt(s) => { - Self::collect_defines_recursively(scope_data, s, frozen_heap, dialect, codemap) - } - }); - } - - fn collect_defines_recursively_in_expr( - scope_data: &mut ModuleScopeData, - code: &mut CstExpr, - - frozen_heap: &FrozenHeap, - dialect: &Dialect, - codemap: &CodeMap, - ) { - if let ExprP::Lambda(LambdaP { - params, - body: _, - payload: scope_id, - }) = &mut code.node - { - Self::collect_defines_in_def( - scope_data, - *scope_id, - params, - None, - frozen_heap, - dialect, - codemap, - ); - } - - code.visit_expr_mut(|e| { - Self::collect_defines_recursively_in_expr(scope_data, e, frozen_heap, dialect, codemap) - }); - } - - fn resolve_idents(&mut self, code: &mut CstStmt) { - match &mut code.node { - StmtP::Def(DefP { - name: _, - params, - return_type, - body, - payload: scope_id, - }) => self.resolve_idents_in_def( - *scope_id, - params, - return_type.as_mut().map(|r| &mut **r), - Some(body), - None, - ), - StmtP::Assign(AssignP { lhs, ty, rhs }) => { - self.resolve_idents_in_assign(lhs); - if let Some(ty) = ty { - self.resolve_idents_in_type_expr(ty); - } - self.resolve_idents_in_expr(rhs); - } - _ => code.visit_children_mut(|visit| match visit { - VisitMut::Stmt(stmt) => self.resolve_idents(stmt), - VisitMut::Expr(expr) => self.resolve_idents_in_expr(expr), - }), - } - } - - fn resolve_idents_in_assign(&mut self, assign: &mut CstAssignTarget) { - assign.visit_expr_mut(|expr| self.resolve_idents_in_expr(expr)); - } - - fn resolve_idents_in_def( - &mut self, - scope_id: ScopeId, - params: &mut [CstParameter], - ret: Option<&mut CstTypeExpr>, - body_stmt: Option<&mut CstStmt>, - body_expr: Option<&mut CstExpr>, - ) { - for param in params { - let (_, ty, def) = param.split_mut(); - if let Some(ty) = ty { - self.resolve_idents_in_type_expr(ty); - } - if let Some(def) = def { - self.resolve_idents_in_expr(def); - } - } - if let Some(ret) = ret { - self.resolve_idents_in_type_expr(ret); - } - - self.enter_def(scope_id); - if let Some(body_stmt) = body_stmt { - self.resolve_idents(body_stmt); - } - if let Some(body_expr) = body_expr { - self.resolve_idents_in_expr(body_expr); - } - self.exit_def(); - } - - fn resolve_idents_in_expr_impl(&mut self, scope: ResolveIdentScope, expr: &mut CstExpr) { - match &mut expr.node { - ExprP::Identifier(ident) => self.resolve_ident(scope, ident), - ExprP::Lambda(LambdaP { - params, - body, - payload: scope_id, - }) => self.resolve_idents_in_def(*scope_id, params, None, None, Some(body)), - ExprP::ListComprehension(expr, first_for, clauses) => { - self.resolve_idents_in_compr(&mut [expr], first_for, clauses) - } - ExprP::DictComprehension(k_v, first_for, clauses) => { - let (k, v) = &mut **k_v; - self.resolve_idents_in_compr(&mut [k, v], first_for, clauses) - } - _ => expr.visit_expr_mut(|expr| self.resolve_idents_in_expr_impl(scope, expr)), - } - } - - fn resolve_idents_in_expr(&mut self, expr: &mut CstExpr) { - self.resolve_idents_in_expr_impl(ResolveIdentScope::Any, expr); - } - - fn resolve_idents_in_type_expr(&mut self, expr: &mut CstTypeExpr) { - self.resolve_idents_in_expr_impl( - ResolveIdentScope::GlobalForTypeExpression, - &mut expr.node.expr, - ); - } - - fn current_scope_all_visible_names_for_did_you_mean(&self) -> Option> { - // It is OK to return non-unique identifiers - let mut r: Vec = Vec::new(); - for &scope_id in self.locals.iter().rev() { - let scope = self.scope_data.get_scope(scope_id); - r.extend(scope.mp.keys().map(|s| s.as_str().to_owned())); - } - r.extend(self.module_bindings.keys().map(|s| s.as_str().to_owned())); - r.extend(self.globals.names()?); - Some(r) - } - - #[cold] - fn variable_not_found_err(&self, ident: &CstIdent) -> EvalException { - let variants = self - .current_scope_all_visible_names_for_did_you_mean() - .unwrap_or(Vec::new()); - let better = did_you_mean( - ident.node.ident.as_str(), - variants.iter().map(|s| s.as_str()), - ); - EvalException::new( - match better { - Some(better) => ScopeError::VariableNotFoundDidYouMean( - ident.node.ident.clone(), - better.to_owned(), - ), - None => ScopeError::VariableNotFound(ident.node.ident.clone()), - } - .into(), - ident.span, - &self.codemap, - ) - } - - fn resolve_ident(&mut self, scope: ResolveIdentScope, ident: &mut CstIdent) { - assert!(ident.node.payload.is_none()); - let resolved = match self.get_name(self.frozen_heap.alloc_str_intern(&ident.node.ident)) { - None => { - // Must be a global, since we know all variables - match self.globals.get_global(&ident.node.ident) { - None => { - self.errors.push(self.variable_not_found_err(ident)); - return; - } - Some(v) => ResolvedIdent::Global(v), - } - } - Some((slot, binding_id)) => ResolvedIdent::Slot(slot, binding_id), - }; - match scope { - ResolveIdentScope::Any => {} - ResolveIdentScope::GlobalForTypeExpression => match resolved { - ResolvedIdent::Slot(Slot::Local(_), _) => { - self.errors.push(EvalException::new( - ScopeError::TypeExpressionGlobalOrBuiltin(ident.node.ident.clone()).into(), - ident.span, - &self.codemap, - )); - return; - } - ResolvedIdent::Slot(Slot::Module(_), _) => {} - ResolvedIdent::Global(_) => {} - }, - } - ident.node.payload = Some(resolved); - } - - fn resolve_idents_in_compr( - &mut self, - exprs: &mut [&mut CstExpr], - first_for: &mut ForClauseP, - clauses: &mut [ClauseP], - ) { - // First for is resolved in outer scope - self.resolve_idents_in_for_clause(first_for); - - self.enter_compr(); - - // Add identifiers to compr scope - - self.add_compr( - iter::once(&mut first_for.var).chain(clauses.iter_mut().filter_map( - |clause| match clause { - ClauseP::For(for_clause) => Some(&mut for_clause.var), - ClauseP::If(..) => None, - }, - )), - ); - - // Now resolve idents in compr scope - - for clause in clauses.iter_mut() { - match clause { - ClauseP::For(for_clause) => self.resolve_idents_in_for_clause(for_clause), - ClauseP::If(cond) => self.resolve_idents_in_expr(cond), - } - } - - // Finally, resolve the item expression - - for expr in exprs { - self.resolve_idents_in_expr(expr); - } - - self.exit_compr(); - } - - fn resolve_idents_in_for_clause(&mut self, for_clause: &mut ForClauseP) { - self.resolve_idents_in_expr(&mut for_clause.over); - self.resolve_idents_in_assign(&mut for_clause.var); - } - - pub fn enter_def(&mut self, scope_id: ScopeId) { - assert!(scope_id != ScopeId::module()); - self.locals.push(scope_id); - } - - // Which slots to grab from the current scope to the parent scope, size of your - // self scope Future state: Should return the slots to use from the parent - // scope - pub fn exit_def(&mut self) -> &mut ScopeNames<'f> { - let scope_id = self.locals.pop().unwrap(); - self.scope_data.mut_scope(scope_id) - } - - fn enter_compr(&mut self) { - self.unscopes.push(Unscope::default()); - } - - fn add_compr<'x>(&mut self, var: impl IntoIterator) { - let scope_id = self.top_scope_id(); - let mut locals = SmallMap::new(); - for var in var { - AssignTarget::collect_defines_lvalue( - var, - InLoop::Yes, - &mut self.scope_data, - self.frozen_heap, - &mut locals, - ); - } - for (name, binding_id) in locals.into_iter() { - let slot = self.scope_data.mut_scope(scope_id).add_scoped( - name, - binding_id, - self.unscopes.last_mut().unwrap(), - ); - let binding = self.scope_data.mut_binding(binding_id); - binding.init_slot(Slot::Local(slot), &self.codemap).unwrap(); - } - } - - fn exit_compr(&mut self) { - self.scope_data - .mut_scope(self.top_scope_id()) - .unscope(self.unscopes.pop().unwrap()); - } - - fn get_name(&mut self, name: FrozenStringValue) -> Option<(Slot, BindingId)> { - // look upwards to find the first place the variable occurs - // then copy that variable downwards - for i in (0..self.locals.len()).rev() { - if let Some((mut v, binding_id)) = self.scope_at_level(i).get_name(name) { - if i + 1 != self.locals.len() { - self.scope_data.mut_binding(binding_id).captured = Captured::Yes; - } - for j in (i + 1)..self.locals.len() { - v = self.scope_at_level_mut(j).copy_parent(v, binding_id, name); - } - return Some((Slot::Local(v), binding_id)); - } - } - let binding_id = self - .module_bindings - .get_hashed(name.get_hashed().as_ref()) - .copied(); - match binding_id { - Some(binding_id) => { - let binding = self.scope_data.mut_binding(binding_id); - if self.locals.len() > 1 { - binding.captured = Captured::Yes; - } - let slot = binding.resolved_slot(&self.codemap).unwrap(); - assert!(matches!(slot, Slot::Module(_))); - Some((slot, binding_id)) - } - None => None, - } - } -} - -/// While performing analysis. -#[derive(Copy, Clone, Dupe)] -enum InLoop { - /// Current statement has an enclosing loop in the current scope. - Yes, - /// Current statement has no enclosing loop in the current scope. - No, -} - -trait StmtCollectDefines { - fn collect_defines<'a>( - stmt: &'a mut CstStmt, - in_loop: InLoop, - scope_data: &mut ModuleScopeData, - frozen_heap: &FrozenHeap, - result: &mut SmallMap, - dialect: &Dialect, - ); -} - -impl StmtCollectDefines for Stmt { - // Collect all the variables that are defined in this scope - fn collect_defines<'a>( - stmt: &'a mut CstStmt, - in_loop: InLoop, - scope_data: &mut ModuleScopeData, - frozen_heap: &FrozenHeap, - result: &mut SmallMap, - dialect: &Dialect, - ) { - match &mut stmt.node { - StmtP::Assign(AssignP { lhs: dest, .. }) | StmtP::AssignModify(dest, _, _) => { - AssignTarget::collect_defines_lvalue( - dest, - in_loop, - scope_data, - frozen_heap, - result, - ); - } - StmtP::For(ForP { var, over: _, body }) => { - AssignTarget::collect_defines_lvalue( - var, - InLoop::Yes, - scope_data, - frozen_heap, - result, - ); - StmtP::collect_defines(body, InLoop::Yes, scope_data, frozen_heap, result, dialect); - } - StmtP::Def(DefP { name, .. }) => AssignIdent::collect_assign_ident( - name, - in_loop, - Visibility::Public, - scope_data, - frozen_heap, - result, - ), - StmtP::Load(load) => { - // TODO(nga): visibility does not belong to AST. - let vis = match dialect.enable_load_reexport { - true => Visibility::Public, - false => Visibility::Private, - }; - for LoadArgP { local, .. } in &mut load.args { - let mut vis = vis; - if Module::default_visibility(&local.ident) == Visibility::Private { - vis = Visibility::Private; - } - AssignIdent::collect_assign_ident( - local, - in_loop, - vis, - scope_data, - frozen_heap, - result, - ); - } - } - stmt => stmt.visit_stmt_mut(|x| { - Stmt::collect_defines(x, in_loop, scope_data, frozen_heap, result, dialect) - }), - } - } -} - -trait AssignIdentCollect { - fn collect_assign_ident<'a>( - assign: &'a mut CstAssignIdent, - in_loop: InLoop, - vis: Visibility, - scope_data: &mut ModuleScopeData, - frozen_heap: &FrozenHeap, - result: &mut SmallMap, - ); -} - -impl AssignIdentCollect for AssignIdent { - fn collect_assign_ident<'a>( - assign: &'a mut CstAssignIdent, - in_loop: InLoop, - vis: Visibility, - scope_data: &mut ModuleScopeData, - frozen_heap: &FrozenHeap, - result: &mut SmallMap, - ) { - // Helper function to untangle lifetimes: we read and modify `assign` fields. - fn assign_ident_impl<'b>( - name: FrozenStringValue, - span: Span, - - binding: &'b mut Option, - in_loop: InLoop, - mut vis: Visibility, - scope_data: &mut ModuleScopeData, - result: &mut SmallMap, - ) { - assert!( - binding.is_none(), - "binding can be assigned only once: `{}`", - name.as_str() - ); - if vis == Visibility::Public { - vis = Module::default_visibility(&name); - } - match result.entry_hashed(name.get_hashed()) { - small_map::Entry::Occupied(e) => { - let prev_binding_id = *e.get(); - let prev_binding = scope_data.mut_binding(prev_binding_id); - // If we are in the map as Public and Private, then Public wins. - // Everything but Load is definitely Public. - // So only insert if it wasn't already there. - if vis == Visibility::Public { - prev_binding.vis = Visibility::Public; - } - prev_binding.assign_count = AssignCount::Any; - *binding = Some(prev_binding_id); - } - small_map::Entry::Vacant(e) => { - let assign_count = match in_loop { - InLoop::Yes => AssignCount::Any, - InLoop::No => AssignCount::AtMostOnce, - }; - let (new_binding_id, _) = scope_data.new_binding( - name, - BindingSource::Source(span), - vis, - assign_count, - ); - e.insert(new_binding_id); - *binding = Some(new_binding_id); - } - }; - } - assign_ident_impl( - frozen_heap.alloc_str_intern(&assign.node.ident), - assign.span, - &mut assign.node.payload, - in_loop, - vis, - scope_data, - result, - ); - } -} - -trait AssignTargetCollectDefinesLvalue { - fn collect_defines_lvalue<'a>( - expr: &'a mut CstAssignTarget, - in_loop: InLoop, - scope_data: &mut ModuleScopeData, - frozen_heap: &FrozenHeap, - result: &mut SmallMap, - ); -} - -impl AssignTargetCollectDefinesLvalue for AssignTarget { - // Collect variables defined in an expression on the LHS of an assignment (or - // for variable etc) - fn collect_defines_lvalue<'a>( - expr: &'a mut CstAssignTarget, - in_loop: InLoop, - scope_data: &mut ModuleScopeData, - frozen_heap: &FrozenHeap, - result: &mut SmallMap, - ) { - expr.node.visit_lvalue_mut(|x| { - AssignIdent::collect_assign_ident( - x, - in_loop, - Visibility::Public, - scope_data, - frozen_heap, - result, - ) - }); - } -} - -/// Storage of objects referenced by AST. -#[derive(Default)] -pub(crate) struct ModuleScopeData<'f> { - /// Bindings by id. - bindings: Vec>, - /// Scopes by id. - scopes: Vec>, -} - -#[derive(Debug, Eq, PartialEq)] -pub(crate) enum AssignCount { - /// Variable is assigned at most once during the execution of the scope. - AtMostOnce, - /// Variable may be assigned more than once during execution of the scope. - Any, -} - -/// Was a binding captured by nested def or lambda scopes? -#[derive(Debug, Copy, Clone, Dupe, Eq, PartialEq, VisitSpanMut)] -pub(crate) enum Captured { - Yes, - No, -} - -#[derive(Debug)] -pub(crate) enum BindingSource { - /// Variable is defined in the source of the module. - Source(Span), - /// Variable came from `Module`, not defined in the source file. - FromModule, -} - -/// Binding defines a place for a variable. -/// -/// For example, in code `x = 1; x = 2`, there's one binding for name `x`. -/// -/// In code `x = 1; def f(): x = 2`, there are two bindings for name `x`. -#[derive(Debug)] -pub(crate) struct Binding<'f> { - pub(crate) name: FrozenStringValue, - pub(crate) source: BindingSource, - pub(crate) vis: Visibility, - /// `slot` is `None` when it is not initialized yet. - /// When analysis is completed, `slot` is always `Some`. - slot: Option, - pub(crate) assign_count: AssignCount, - // Whether a variable defined in a scope gets captured in nested def or lambda scope. - // (Comprehension scopes do not count, because they are considered - // local by the runtime and do not allocate a frame). - pub(crate) captured: Captured, - _marker: PhantomData<&'f ()>, -} - -impl<'f> Binding<'f> { - fn new( - name: FrozenStringValue, - source: BindingSource, - vis: Visibility, - assign_count: AssignCount, - ) -> Binding<'f> { - Binding { - name, - source, - vis, - slot: None, - assign_count, - captured: Captured::No, - _marker: PhantomData, - } - } - - fn span(&self) -> Span { - match self.source { - BindingSource::Source(span) => span, - BindingSource::FromModule => Span::default(), - } - } - - /// Get resolved slot after analysis is completed. - pub(crate) fn resolved_slot(&self, codemap: &CodeMap) -> Result { - match self.slot { - Some(slot) => Ok(slot), - None => Err(InternalError::msg( - "slot is not resolved", - self.span(), - codemap, - )), - } - } - - /// Initialize the slot during analysis. - pub(crate) fn init_slot(&mut self, slot: Slot, codemap: &CodeMap) -> Result<(), InternalError> { - match mem::replace(&mut self.slot, Some(slot)) { - Some(_) => Err(InternalError::msg( - "slot is already assigned", - self.span(), - codemap, - )), - None => Ok(()), - } - } -} - -/// If of a binding within current module. -#[derive(Copy, Clone, Dupe, Debug, Hash, PartialEq, Eq, Ord, PartialOrd)] -pub(crate) struct BindingId(usize); - -/// Id of a scope within current module. -#[derive(Copy, Clone, Dupe, Debug, Eq, PartialEq)] -pub(crate) struct ScopeId(usize); - -impl ScopeId { - pub(crate) fn module() -> ScopeId { - ScopeId(0) - } -} - -impl<'f> ModuleScopeData<'f> { - pub(crate) fn new() -> ModuleScopeData<'f> { - ModuleScopeData::default() - } - - pub(crate) fn get_binding(&self, BindingId(id): BindingId) -> &Binding<'f> { - &self.bindings[id] - } - - fn mut_binding(&mut self, BindingId(id): BindingId) -> &mut Binding<'f> { - &mut self.bindings[id] - } - - fn new_binding( - &mut self, - name: FrozenStringValue, - source: BindingSource, - vis: Visibility, - assigned_count: AssignCount, - ) -> (BindingId, &mut Binding<'f>) { - let binding_id = BindingId(self.bindings.len()); - self.bindings - .push(Binding::new(name, source, vis, assigned_count)); - (binding_id, self.bindings.last_mut().unwrap()) - } - - pub(crate) fn get_scope(&self, ScopeId(id): ScopeId) -> &ScopeNames<'f> { - &self.scopes[id] - } - - pub(crate) fn mut_scope(&mut self, ScopeId(id): ScopeId) -> &mut ScopeNames<'f> { - &mut self.scopes[id] - } - - pub(crate) fn new_scope(&mut self) -> (ScopeId, &mut ScopeNames<'f>) { - let scope_id = ScopeId(self.scopes.len()); - self.scopes.push(ScopeNames::default()); - (scope_id, self.scopes.last_mut().unwrap()) - } - - /// Get resolved slot for assigning identifier. - pub(crate) fn get_assign_ident_slot( - &self, - ident: &CstAssignIdent, - codemap: &CodeMap, - ) -> (Slot, Captured) { - let binding_id = ident.payload.expect("binding not assigned for ident"); - let binding = self.get_binding(binding_id); - let slot = binding.resolved_slot(codemap).unwrap(); - (slot, binding.captured) - } -} - -#[derive(Debug, Clone, Dupe, Copy)] -pub(crate) enum ResolvedIdent { - Slot(Slot, BindingId), - Global(FrozenValue), -} diff --git a/starlark-rust/starlark/src/eval/compiler/scope/payload.rs b/starlark-rust/starlark/src/eval/compiler/scope/payload.rs index cdf47a6868e50..78a2d5957bdac 100644 --- a/starlark-rust/starlark/src/eval/compiler/scope/payload.rs +++ b/starlark-rust/starlark/src/eval/compiler/scope/payload.rs @@ -22,7 +22,6 @@ use std::collections::HashMap; use dupe::OptionDupedExt; -use starlark_syntax::syntax::ast::AstArgumentP; use starlark_syntax::syntax::ast::AstAssignIdentP; use starlark_syntax::syntax::ast::AstAssignTargetP; use starlark_syntax::syntax::ast::AstExprP; @@ -148,6 +147,5 @@ pub(crate) type CstTypeExpr = AstTypeExprP; pub(crate) type CstAssignTarget = AstAssignTargetP; pub(crate) type CstAssignIdent = AstAssignIdentP; pub(crate) type CstIdent = AstIdentP; -pub(crate) type CstArgument = AstArgumentP; pub(crate) type CstParameter = AstParameterP; pub(crate) type CstStmt = AstStmtP; diff --git a/starlark-rust/starlark/src/eval/compiler/scope/tests.rs b/starlark-rust/starlark/src/eval/compiler/scope/tests.rs index a7b7ea31aa65e..95efa74b99f30 100644 --- a/starlark-rust/starlark/src/eval/compiler/scope/tests.rs +++ b/starlark-rust/starlark/src/eval/compiler/scope/tests.rs @@ -46,9 +46,9 @@ use crate::values::FrozenHeap; use crate::values::FrozenRef; fn test_with_module(program: &str, expected: &str, module: &MutableNames) { - let ast = AstModule::parse("t.star", program.to_owned(), &Dialect::Extended).unwrap(); + let ast = AstModule::parse("t.star", program.to_owned(), &Dialect::AllOptionsInternal).unwrap(); let frozen_heap = FrozenHeap::new(); - let codemap = frozen_heap.alloc_any_display_from_debug(ast.codemap().dupe()); + let codemap = frozen_heap.alloc_any(ast.codemap().dupe()); let ModuleScopes { cst, scope_data, .. } = ModuleScopes::check_module_err( @@ -60,7 +60,7 @@ fn test_with_module(program: &str, expected: &str, module: &MutableNames) { globals: Some(FrozenRef::new(Globals::empty())), }, codemap, - &Dialect::Extended, + &Dialect::AllOptionsInternal, ) .unwrap(); let mut r = String::new(); diff --git a/starlark-rust/starlark/src/eval/compiler/stmt.rs b/starlark-rust/starlark/src/eval/compiler/stmt.rs index 7899006ad540e..fcee0e542ce21 100644 --- a/starlark-rust/starlark/src/eval/compiler/stmt.rs +++ b/starlark-rust/starlark/src/eval/compiler/stmt.rs @@ -39,6 +39,7 @@ use crate::codemap::Span; use crate::codemap::Spanned; use crate::environment::slots::ModuleSlotId; use crate::environment::FrozenModuleData; +use crate::eval::compiler::error::CompilerInternalError; use crate::eval::compiler::expr::Builtin1; use crate::eval::compiler::expr::ExprCompiled; use crate::eval::compiler::expr::ExprLogicalBinOp; @@ -392,23 +393,29 @@ impl IrSpanned { } } -impl Compiler<'_, '_, '_> { - pub fn assign_target(&mut self, expr: &CstAssignTarget) -> IrSpanned { +impl Compiler<'_, '_, '_, '_> { + pub fn assign_target( + &mut self, + expr: &CstAssignTarget, + ) -> Result, CompilerInternalError> { let span = FrameSpan::new(FrozenFileSpan::new(self.codemap, expr.span)); let assign = match &expr.node { AssignTargetP::Dot(e, s) => { - let e = self.expr(e); + let e = self.expr(e)?; let s = &s.node; AssignCompiledValue::Dot(e, s.to_owned()) } AssignTargetP::Index(e_idx) => { let (e, idx) = &**e_idx; - let e = self.expr(e); - let idx = self.expr(idx); + let e = self.expr(e)?; + let idx = self.expr(idx)?; AssignCompiledValue::Index(e, idx) } AssignTargetP::Tuple(v) => { - let v = v.map(|x| self.assign_target(x)); + let v = v + .iter() + .map(|x| self.assign_target(x)) + .collect::>()?; AssignCompiledValue::Tuple(v) } AssignTargetP::Identifier(ident) => { @@ -430,7 +437,7 @@ impl Compiler<'_, '_, '_> { } } }; - IrSpanned { node: assign, span } + Ok(IrSpanned { node: assign, span }) } fn assign_modify( @@ -439,29 +446,29 @@ impl Compiler<'_, '_, '_> { lhs: &CstAssignTarget, rhs: IrSpanned, op: AssignOp, - ) -> StmtsCompiled { + ) -> Result { let span_stmt = FrameSpan::new(FrozenFileSpan::new(self.codemap, span_stmt)); let span_lhs = FrameSpan::new(FrozenFileSpan::new(self.codemap, lhs.span)); match &lhs.node { AssignTargetP::Dot(e, s) => { - let e = self.expr(e); - StmtsCompiled::one(IrSpanned { + let e = self.expr(e)?; + Ok(StmtsCompiled::one(IrSpanned { span: span_stmt, node: StmtCompiled::AssignModify( AssignModifyLhs::Dot(e, s.node.clone()), op, rhs, ), - }) + })) } AssignTargetP::Index(e_idx) => { let (e, idx) = &**e_idx; - let e = self.expr(e); - let idx = self.expr(idx); - StmtsCompiled::one(IrSpanned { + let e = self.expr(e)?; + let idx = self.expr(idx)?; + Ok(StmtsCompiled::one(IrSpanned { span: span_stmt, node: StmtCompiled::AssignModify(AssignModifyLhs::Array(e, idx), op, rhs), - }) + })) } AssignTargetP::Identifier(ident) => { let (slot, captured) = self.scope_data.get_assign_ident_slot(ident, &self.codemap); @@ -471,34 +478,34 @@ impl Compiler<'_, '_, '_> { node: LocalSlotId(slot.0), span: span_lhs, }; - StmtsCompiled::one(IrSpanned { + Ok(StmtsCompiled::one(IrSpanned { span: span_stmt, node: StmtCompiled::AssignModify(AssignModifyLhs::Local(lhs), op, rhs), - }) + })) } (Slot::Local(slot), Captured::Yes) => { let lhs = IrSpanned { node: LocalCapturedSlotId(slot.0), span: span_lhs, }; - StmtsCompiled::one(IrSpanned { + Ok(StmtsCompiled::one(IrSpanned { span: span_stmt, node: StmtCompiled::AssignModify( AssignModifyLhs::LocalCaptured(lhs), op, rhs, ), - }) + })) } (Slot::Module(slot), _) => { let lhs = IrSpanned { node: slot, span: span_lhs, }; - StmtsCompiled::one(IrSpanned { + Ok(StmtsCompiled::one(IrSpanned { span: span_stmt, node: StmtCompiled::AssignModify(AssignModifyLhs::Module(lhs), op, rhs), - }) + })) } } } @@ -554,7 +561,7 @@ pub(crate) fn bit_or_assign<'v>( lhs: Value<'v>, rhs: Value<'v>, heap: &'v Heap, -) -> anyhow::Result> { +) -> crate::Result> { // The Starlark spec says dict |= mutates, while nothing else does. // When mutating, be careful if they alias, so we don't have `lhs` // mutably borrowed when we iterate over `rhs`, as they might alias. @@ -578,7 +585,7 @@ pub(crate) fn bit_or_assign<'v>( Ok, )?; for (k, v) in rhs.iter_hashed() { - dict.insert_hashed(k, v); + dict.aref.insert_hashed(k, v); } } Ok(lhs) @@ -593,7 +600,7 @@ pub(crate) fn add_assign<'v>( lhs: Value<'v>, rhs: Value<'v>, heap: &'v Heap, -) -> anyhow::Result> { +) -> crate::Result> { // Checking whether a value is an integer or a string is cheap (no virtual call), // and `Value::add` has optimizations for these types, so check them first // and delegate to `Value::add`. @@ -629,15 +636,19 @@ pub(crate) fn add_assign<'v>( } } -impl Compiler<'_, '_, '_> { +impl Compiler<'_, '_, '_, '_> { pub(crate) fn compile_context(&self, has_return_type: bool) -> StmtCompileContext { StmtCompileContext { has_return_type } } - pub(crate) fn stmt(&mut self, stmt: &CstStmt, allow_gc: bool) -> StmtsCompiled { + pub(crate) fn stmt( + &mut self, + stmt: &CstStmt, + allow_gc: bool, + ) -> Result { let span = FrameSpan::new(FrozenFileSpan::new(self.codemap, stmt.span)); let is_statements = matches!(&stmt.node, StmtP::Statements(_)); - let res = self.stmt_direct(stmt, allow_gc); + let res = self.stmt_direct(stmt, allow_gc)?; // No point inserting a GC point around statements, since they will contain inner statements we can do if allow_gc && !is_statements { // We could do this more efficiently by fusing the possible_gc @@ -647,13 +658,16 @@ impl Compiler<'_, '_, '_> { node: StmtCompiled::PossibleGc, }); with_gc.extend(res); - with_gc + Ok(with_gc) } else { - res + Ok(res) } } - pub(crate) fn module_top_level_stmt(&mut self, stmt: &CstStmt) -> StmtsCompiled { + pub(crate) fn module_top_level_stmt( + &mut self, + stmt: &CstStmt, + ) -> Result { match &stmt.node { StmtP::Statements(..) => { unreachable!("top level statement lists are handled by outer loop") @@ -679,10 +693,15 @@ impl Compiler<'_, '_, '_> { cond: &CstExpr, then_block: &CstStmt, allow_gc: bool, - ) -> StmtsCompiled { - let cond = self.expr(cond); - let then_block = self.stmt(then_block, allow_gc); - StmtsCompiled::if_stmt(span, cond, then_block, StmtsCompiled::empty()) + ) -> Result { + let cond = self.expr(cond)?; + let then_block = self.stmt(then_block, allow_gc)?; + Ok(StmtsCompiled::if_stmt( + span, + cond, + then_block, + StmtsCompiled::empty(), + )) } fn stmt_if_else( @@ -692,19 +711,23 @@ impl Compiler<'_, '_, '_> { then_block: &CstStmt, else_block: &CstStmt, allow_gc: bool, - ) -> StmtsCompiled { - let cond = self.expr(cond); - let then_block = self.stmt(then_block, allow_gc); - let else_block = self.stmt(else_block, allow_gc); - StmtsCompiled::if_stmt(span, cond, then_block, else_block) + ) -> Result { + let cond = self.expr(cond)?; + let then_block = self.stmt(then_block, allow_gc)?; + let else_block = self.stmt(else_block, allow_gc)?; + Ok(StmtsCompiled::if_stmt(span, cond, then_block, else_block)) } - fn stmt_expr(&mut self, expr: &CstExpr) -> StmtsCompiled { - let expr = self.expr(expr); - StmtsCompiled::expr(expr) + fn stmt_expr(&mut self, expr: &CstExpr) -> Result { + let expr = self.expr(expr)?; + Ok(StmtsCompiled::expr(expr)) } - fn stmt_direct(&mut self, stmt: &CstStmt, allow_gc: bool) -> StmtsCompiled { + fn stmt_direct( + &mut self, + stmt: &CstStmt, + allow_gc: bool, + ) -> Result { let span = FrameSpan::new(FrozenFileSpan::new(self.codemap, stmt.span)); match &stmt.node { StmtP::Def(def) => { @@ -725,36 +748,36 @@ impl Compiler<'_, '_, '_> { params, return_type.as_deref(), body, - ), + )?, span, }; let lhs = self.assign_target(&Spanned { span: name.span, node: AssignTargetP::Identifier(name.clone()), - }); - StmtsCompiled::one(IrSpanned { + })?; + Ok(StmtsCompiled::one(IrSpanned { span, node: StmtCompiled::Assign(lhs, None, rhs), - }) + })) } StmtP::For(ForP { var, over, body }) => { let over = list_to_tuple(over); - let var = self.assign_target(var); - let over = self.expr(&over); - let st = self.stmt(body, false); - StmtsCompiled::for_stmt(span, var, over, st) + let var = self.assign_target(var)?; + let over = self.expr(&over)?; + let st = self.stmt(body, false)?; + Ok(StmtsCompiled::for_stmt(span, var, over, st)) } - StmtP::Return(None) => StmtsCompiled::one(IrSpanned { + StmtP::Return(None) => Ok(StmtsCompiled::one(IrSpanned { node: StmtCompiled::Return(IrSpanned { span, node: ExprCompiled::Value(FrozenValue::new_none()), }), span, - }), - StmtP::Return(Some(e)) => StmtsCompiled::one(IrSpanned { - node: StmtCompiled::Return(self.expr(e)), + })), + StmtP::Return(Some(e)) => Ok(StmtsCompiled::one(IrSpanned { + node: StmtCompiled::Return(self.expr(e)?), span, - }), + })), StmtP::If(cond, then_block) => self.stmt_if(span, cond, then_block, allow_gc), StmtP::IfElse(cond, then_block_else_block) => { let (then_block, else_block) = &**then_block_else_block; @@ -766,34 +789,34 @@ impl Compiler<'_, '_, '_> { if r.is_terminal() { break; } - r.extend(self.stmt(stmt, allow_gc)); + r.extend(self.stmt(stmt, allow_gc)?); } - r + Ok(r) } StmtP::Expression(e) => self.stmt_expr(e), StmtP::Assign(AssignP { lhs, ty, rhs }) => { - let rhs = self.expr(rhs); + let rhs = self.expr(rhs)?; let ty = self.expr_for_type(ty.as_ref()); - let lhs = self.assign_target(lhs); - StmtsCompiled::one(IrSpanned { + let lhs = self.assign_target(lhs)?; + Ok(StmtsCompiled::one(IrSpanned { span, node: StmtCompiled::Assign(lhs, ty, rhs), - }) + })) } StmtP::AssignModify(lhs, op, rhs) => { - let rhs = self.expr(rhs); + let rhs = self.expr(rhs)?; self.assign_modify(span.span.span(), lhs, rhs, *op) } StmtP::Load(..) => unreachable!(), - StmtP::Pass => StmtsCompiled::empty(), - StmtP::Break => StmtsCompiled::one(IrSpanned { + StmtP::Pass => Ok(StmtsCompiled::empty()), + StmtP::Break => Ok(StmtsCompiled::one(IrSpanned { span, node: StmtCompiled::Break, - }), - StmtP::Continue => StmtsCompiled::one(IrSpanned { + })), + StmtP::Continue => Ok(StmtsCompiled::one(IrSpanned { span, node: StmtCompiled::Continue, - }), + })), } } } diff --git a/starlark-rust/starlark/src/eval/compiler/types.rs b/starlark-rust/starlark/src/eval/compiler/types.rs index fda4205fdbcc2..0d0ad620566bb 100644 --- a/starlark-rust/starlark/src/eval/compiler/types.rs +++ b/starlark-rust/starlark/src/eval/compiler/types.rs @@ -16,8 +16,10 @@ */ use starlark_syntax::eval_exception::EvalException; +use starlark_syntax::internal_error; use starlark_syntax::slice_vec_ext::VecExt; use starlark_syntax::syntax::type_expr::TypeExprUnpackP; +use starlark_syntax::syntax::type_expr::TypePathP; use crate::codemap::Span; use crate::codemap::Spanned; @@ -40,8 +42,6 @@ use crate::values::Value; #[derive(Debug, thiserror::Error)] enum TypesError { - #[error("Type already initialized (internal error)")] - TypeAlreadySet, #[error("Identifier is not resolved (internal error)")] UnresolvedIdentifier, #[error("Identifier is resolve as local variable (internal error)")] @@ -52,13 +52,11 @@ enum TypesError { TypePayloadNotSet, #[error("[] can only be applied to list function in type expression")] TypeIndexOnNonList, - #[error("[,] can only be applied to dict function in type expression")] - TypeIndexOnNonDict, - #[error("[,...] can only be applied to tuple function in type expression")] - TypeIndexEllipsisOnNonTuple, + #[error("[,] can only be applied to dict or tuple functions in type expression")] + TypeIndexOnNonDictOrTuple, } -impl<'v> Compiler<'v, '_, '_> { +impl<'v> Compiler<'v, '_, '_, '_> { /// Compile expression when it is expected to be interpreted as type. pub(crate) fn expr_for_type( &mut self, @@ -74,7 +72,7 @@ impl<'v> Compiler<'v, '_, '_> { // Still make an error in panic to produce nice panic message. panic!( "{:?}", - EvalException::new( + EvalException::new_anyhow( TypesError::TypePayloadNotSet.into(), expr.span, &self.codemap @@ -93,26 +91,25 @@ impl<'v> Compiler<'v, '_, '_> { } /// We evaluated type expression to `Value`, now convert it to `FrozenValue`. - // TODO(nga): this step is not really necessary, we should just create `TypeCompiled` directly. fn alloc_value_for_type( &mut self, value: Value<'v>, span: Span, ) -> Result>, EvalException> { let ty = TypeCompiled::new(value, self.eval.heap()); - ty.map_err(|e| EvalException::new(e, span, &self.codemap)) + ty.map_err(|e| EvalException::new_anyhow(e, span, &self.codemap)) } fn eval_ident_in_type_expr(&mut self, ident: &CstIdent) -> Result, EvalException> { let Some(ident_payload) = &ident.node.payload else { - return Err(EvalException::new( + return Err(EvalException::new_anyhow( TypesError::UnresolvedIdentifier.into(), ident.span, &self.codemap, )); }; match ident_payload { - ResolvedIdent::Slot(Slot::Local(..), _) => Err(EvalException::new( + ResolvedIdent::Slot(Slot::Local(..), _) => Err(EvalException::new_anyhow( TypesError::LocalIdentifier.into(), ident.span, &self.codemap, @@ -120,7 +117,7 @@ impl<'v> Compiler<'v, '_, '_> { ResolvedIdent::Slot(Slot::Module(module_slot_id), _) => { match self.eval.module_env.slots().get_slot(*module_slot_id) { Some(v) => Ok(v), - None => Err(EvalException::new( + None => Err(EvalException::new_anyhow( TypesError::ModuleVariableNotSet(ident.node.ident.clone()).into(), ident.span, &self.codemap, @@ -133,95 +130,88 @@ impl<'v> Compiler<'v, '_, '_> { /// We may use non-frozen values as types, so we don't reuse `expr_ident` function /// which is used in normal compilation. - fn eval_path_as_type( - &mut self, - first: &CstIdent, - rem: &[Spanned<&str>], - ) -> Result>, EvalException> { + fn eval_path(&mut self, path: TypePathP) -> Result, EvalException> { + let TypePathP { first, rem } = path; let mut value = self.eval_ident_in_type_expr(first)?; for step in rem { value = value .get_attr_error(step.node, self.eval.heap()) .map_err(|e| EvalException::new(e, step.span, &self.codemap))?; } - let mut span = first.span; - if let Some(last) = rem.last() { - span = span.merge(last.span); - } - self.alloc_value_for_type(value, span) + Ok(value) } fn eval_expr_as_type( &mut self, expr: Spanned>, ) -> Result>, EvalException> { + let span = expr.span; + let value = self.eval_expr(expr)?; + self.alloc_value_for_type(value, span) + } + + /// Evaluate expression in context of typechecker. + /// It is very restricted in what it can do. + fn eval_expr( + &mut self, + expr: Spanned>, + ) -> Result, EvalException> { match expr.node { - TypeExprUnpackP::Path(ident, rem) => self.eval_path_as_type(ident, &rem), + TypeExprUnpackP::Ellipsis => Ok(Ellipsis::new_value().to_value()), + TypeExprUnpackP::List(items) => { + let values: Vec<_> = items + .into_iter() + .map(|item| self.eval_expr(item)) + .collect::>()?; + Ok(self.eval.heap().alloc_list(&values)) + } + TypeExprUnpackP::Path(path) => self.eval_path(path), TypeExprUnpackP::Index(a, i) => { let a = self.eval_ident_in_type_expr(a)?; - if !a.ptr_eq(Constants::get().fn_list.0.to_value()) { - return Err(EvalException::new( + if !a.ptr_eq(Constants::get().fn_list.0.to_value()) + && !a.ptr_eq(Constants::get().fn_set.0.to_value()) + { + return Err(EvalException::new_anyhow( TypesError::TypeIndexOnNonList.into(), expr.span, &self.codemap, )); } let i = self.eval_expr_as_type(*i)?; - let t = a - .get_ref() + a.get_ref() .at(i.to_inner(), self.eval.heap()) - .map_err(|e| EvalException::new(e, expr.span, &self.codemap))?; - Ok(TypeCompiled::new(t, self.eval.heap()) - .map_err(|e| EvalException::new(e, expr.span, &self.codemap))?) + .map_err(|e| EvalException::new(e, expr.span, &self.codemap)) } TypeExprUnpackP::Index2(a, i0, i1) => { - let a = self.eval_ident_in_type_expr(a)?; - if !a.ptr_eq(Constants::get().fn_dict.0.to_value()) { - return Err(EvalException::new( - TypesError::TypeIndexOnNonDict.into(), + let a = self.eval_path(a.node)?; + if a.ptr_eq(Constants::get().fn_dict.0.to_value()) + || a.ptr_eq(Constants::get().fn_tuple.0.to_value()) + || a.ptr_eq(Constants::get().typing_callable.0.to_value()) + { + let i0 = self.eval_expr(*i0)?; + let i1 = self.eval_expr(*i1)?; + a.get_ref() + .at2(i0, i1, self.eval.heap()) + .map_err(|e| EvalException::new(e, expr.span, &self.codemap)) + } else { + return Err(EvalException::new_anyhow( + TypesError::TypeIndexOnNonDictOrTuple.into(), expr.span, &self.codemap, )); } - let i0 = self.eval_expr_as_type(*i0)?; - let i1 = self.eval_expr_as_type(*i1)?; - let t = a - .get_ref() - .at2(i0.to_inner(), i1.to_inner(), self.eval.heap()) - .map_err(|e| EvalException::new(e, expr.span, &self.codemap))?; - Ok(TypeCompiled::new(t, self.eval.heap()) - .map_err(|e| EvalException::new(e, expr.span, &self.codemap))?) - } - TypeExprUnpackP::Index2Ellipsis(a0, i) => { - let a = self.eval_ident_in_type_expr(a0)?; - if !a.ptr_eq(Constants::get().fn_tuple.0.to_value()) { - return Err(EvalException::new( - TypesError::TypeIndexEllipsisOnNonTuple.into(), - expr.span, - &self.codemap, - )); - } - let i = self.eval_expr_as_type(*i)?; - let t = a - .get_ref() - .at2( - i.to_inner(), - Ellipsis::new_value().to_value(), - self.eval.heap(), - ) - .map_err(|e| EvalException::new(e, expr.span, &self.codemap))?; - Ok(TypeCompiled::new(t, self.eval.heap()) - .map_err(|e| EvalException::new(e, expr.span, &self.codemap))?) } TypeExprUnpackP::Union(xs) => { let xs = xs.into_try_map(|x| self.eval_expr_as_type(x))?; - Ok(TypeCompiled::type_any_of(xs, self.eval.heap())) + Ok(TypeCompiled::type_any_of(xs, self.eval.heap()).to_inner()) } + // TODO(nga): tuple type should be `tuple[str, int, bool]`, not `(str, int, bool)`. TypeExprUnpackP::Tuple(xs) => { - let xs = xs.into_try_map(|x| Ok(self.eval_expr_as_type(x)?.as_ty().clone()))?; - Ok(TypeCompiled::from_ty(&Ty::tuple(xs), self.eval.heap())) + let xs = xs.into_try_map(|x| { + Ok::<_, EvalException>(self.eval_expr_as_type(x)?.as_ty().clone()) + })?; + Ok(TypeCompiled::from_ty(&Ty::tuple(xs), self.eval.heap()).to_inner()) } - TypeExprUnpackP::Literal(s) => Ok(TypeCompiled::from_str(s.node, self.eval.heap())), } } @@ -231,17 +221,13 @@ impl<'v> Compiler<'v, '_, '_> { ) -> Result<(), EvalException> { if type_expr.payload.compiler_ty.is_some() { return Err(EvalException::new( - TypesError::TypeAlreadySet.into(), + internal_error!("Type already initialized"), type_expr.span, &self.codemap, )); } // This should not fail because we validated it at parse time. - let unpack = TypeExprUnpackP::unpack( - &type_expr.expr, - &self.codemap, - self.allow_string_literals_in_type_expr, - )?; + let unpack = TypeExprUnpackP::unpack(&type_expr.expr, &self.codemap)?; let type_value = self.eval_expr_as_type(unpack)?; type_expr.payload.compiler_ty = Some(type_value.as_ty().clone()); Ok(()) diff --git a/starlark-rust/starlark/src/eval/mod.rs b/starlark-rust/starlark/src/eval/mod.rs deleted file mode 100644 index 9f3f9a8ce00b7..0000000000000 --- a/starlark-rust/starlark/src/eval/mod.rs +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Evaluate some code, typically done by creating an [`Evaluator`], then calling -//! [`eval_module`](Evaluator::eval_module). - -pub(crate) mod bc; -pub(crate) mod compiler; -pub(crate) mod runtime; - -use std::collections::HashMap; -use std::mem; -use std::time::Instant; - -use dupe::Dupe; -pub use runtime::arguments::Arguments; -pub use runtime::before_stmt::BeforeStmtFuncDyn; -pub use runtime::evaluator::Evaluator; -pub use runtime::file_loader::FileLoader; -pub use runtime::file_loader::ReturnFileLoader; -pub use runtime::params::ParametersParser; -pub use runtime::params::ParametersSpec; -pub use runtime::params::ParametersSpecBuilder; -pub use runtime::profile::data::ProfileData; -pub use runtime::profile::ProfileMode; -pub use starlark_syntax::call_stack::CallStack; -use starlark_syntax::eval_exception::EvalException; -use starlark_syntax::slice_vec_ext::SliceExt; -use starlark_syntax::syntax::module::AstModule; -use starlark_syntax::syntax::module::AstModuleFields; - -use crate::collections::symbol_map::Symbol; -use crate::docs::DocString; -use crate::environment::Globals; -use crate::eval::compiler::def::DefInfo; -use crate::eval::compiler::scope::scope_resolver_globals::ScopeResolverGlobals; -use crate::eval::compiler::scope::ModuleScopes; -use crate::eval::compiler::scope::ScopeId; -use crate::eval::compiler::Compiler; -use crate::eval::runtime::arguments::ArgNames; -use crate::eval::runtime::arguments::ArgumentsFull; -use crate::syntax::DialectTypes; -use crate::values::Value; - -impl<'v, 'a> Evaluator<'v, 'a> { - /// Evaluate an [`AstModule`] with this [`Evaluator`], modifying the in-scope - /// [`Module`](crate::environment::Module) as appropriate. - pub fn eval_module(&mut self, ast: AstModule, globals: &Globals) -> anyhow::Result> { - let start = Instant::now(); - - let (codemap, statement, dialect, allow_string_literals_in_type_expr, typecheck) = - ast.into_parts(); - - let codemap = self - .module_env - .frozen_heap() - .alloc_any_display_from_debug(codemap.dupe()); - - let globals = self - .module_env - .frozen_heap() - .alloc_any_display_from_type_name(globals.dupe()); - - if let Some(docstring) = DocString::extract_raw_starlark_docstring(&statement) { - self.module_env.set_docstring(docstring) - } - - let ModuleScopes { - cst, - module_slot_count, - scope_data, - top_level_stmt_count, - } = ModuleScopes::check_module_err( - self.module_env.mutable_names(), - self.module_env.frozen_heap(), - &HashMap::new(), - statement, - ScopeResolverGlobals { - globals: Some(globals), - }, - codemap, - &dialect, - )?; - - let scope_names = scope_data.get_scope(ScopeId::module()); - let local_names = self - .frozen_heap() - .alloc_any_slice_display_from_debug(&scope_names.used); - - self.module_env.slots().ensure_slots(module_slot_count); - let old_def_info = mem::replace( - &mut self.module_def_info, - self.module_env.frozen_heap().alloc_any(DefInfo::for_module( - codemap, - local_names, - self.module_env - .frozen_heap() - .alloc_any_slice_display_from_debug(&scope_names.parent), - globals, - )), - ); - - // Set up the world to allow evaluation (do NOT use ? from now on) - - self.call_stack.push(Value::new_none(), None).unwrap(); - - // Evaluation - let mut compiler = Compiler { - scope_data, - locals: Vec::new(), - globals, - codemap, - eval: self, - check_types: dialect.enable_types == DialectTypes::Enable, - top_level_stmt_count, - allow_string_literals_in_type_expr, - typecheck, - }; - - let res = compiler.eval_module(cst, local_names); - - // Clean up the world, putting everything back - self.call_stack.pop(); - - self.module_def_info = old_def_info; - - self.module_env.add_eval_duration(start.elapsed()); - - // Return the result of evaluation - res.map_err(EvalException::into_anyhow) - } - - /// Evaluate a function stored in a [`Value`], passing in `positional` and `named` arguments. - pub fn eval_function( - &mut self, - function: Value<'v>, - positional: &[Value<'v>], - named: &[(&str, Value<'v>)], - ) -> anyhow::Result> { - let names = named.map(|(s, _)| (Symbol::new(s), self.heap().alloc_str(s))); - let named = named.map(|x| x.1); - let params = Arguments(ArgumentsFull { - pos: positional, - named: &named, - names: ArgNames::new(&names), - args: None, - kwargs: None, - }); - // eval_module pushes an "empty" call stack frame. other places expect that first frame to be ignorable, and - // so we push an empty frame too (otherwise things would ignore this function's own frame). - self.with_call_stack(Value::new_none(), None, |this| { - function.invoke(¶ms, this) - }) - } -} diff --git a/starlark-rust/starlark/src/eval/params.rs b/starlark-rust/starlark/src/eval/params.rs new file mode 100644 index 0000000000000..68c94535aaa28 --- /dev/null +++ b/starlark-rust/starlark/src/eval/params.rs @@ -0,0 +1,64 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::eval::ParametersSpec; +use crate::eval::ParametersSpecParam; +use crate::typing::ParamSpec; +use crate::typing::Ty; +use crate::util::ArcStr; + +/// Build both [`ParametersSpec`] (for parsing) and [`ParamSpec`] (for typechecking) +/// from a list of parameters. +pub fn param_specs<'a, V: Copy>( + function_name: &str, + pos_only: impl IntoIterator, Ty)>, + pos_or_named: impl IntoIterator, Ty)>, + args: Option, + named_only: impl IntoIterator, Ty)>, + kwargs: Option, +) -> crate::Result<(ParametersSpec, ParamSpec)> { + let pos_only = Vec::from_iter(pos_only); + let pos_or_named = Vec::from_iter(pos_or_named); + let named_only = Vec::from_iter(named_only); + + let parameters_spec = ParametersSpec::new_parts( + function_name, + pos_only.iter().map(|(name, param, _ty)| (*name, *param)), + pos_or_named + .iter() + .map(|(name, param, _ty)| (*name, *param)), + args.is_some(), + named_only.iter().map(|(name, param, _ty)| (*name, *param)), + kwargs.is_some(), + ); + + let param_spec = ParamSpec::new_parts( + pos_only + .into_iter() + .map(|(_name, param, ty)| (param.is_required(), ty)), + pos_or_named + .into_iter() + .map(|(name, param, ty)| (ArcStr::from(name), param.is_required(), ty)), + args, + named_only + .into_iter() + .map(|(name, param, ty)| (ArcStr::from(name), param.is_required(), ty)), + kwargs, + )?; + + Ok((parameters_spec, param_spec)) +} diff --git a/starlark-rust/starlark/src/eval/runtime/mod.rs b/starlark-rust/starlark/src/eval/runtime.rs similarity index 100% rename from starlark-rust/starlark/src/eval/runtime/mod.rs rename to starlark-rust/starlark/src/eval/runtime.rs diff --git a/starlark-rust/starlark/src/eval/runtime/arguments.rs b/starlark-rust/starlark/src/eval/runtime/arguments.rs index 245b92c363206..51ff5efad97db 100644 --- a/starlark-rust/starlark/src/eval/runtime/arguments.rs +++ b/starlark-rust/starlark/src/eval/runtime/arguments.rs @@ -22,31 +22,29 @@ use std::marker::PhantomData; use dupe::Clone_; use dupe::Dupe_; use either::Either; +use starlark_map::small_set::SmallSet; +use starlark_syntax::value_error; use thiserror::Error; use crate::cast::transmute; use crate::coerce::coerce; use crate::coerce::Coerce; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::collections::Hashed; use crate::collections::SmallMap; use crate::collections::StarlarkHashValue; -use crate::eval::runtime::params::ParametersSpec; +use crate::eval::ParametersSpec; use crate::hint::unlikely; use crate::values::dict::Dict; use crate::values::dict::DictRef; use crate::values::iter::StarlarkIterator; use crate::values::Heap; use crate::values::StringValue; -use crate::values::UnpackValue; use crate::values::Value; -use crate::values::ValueError; use crate::values::ValueLike; #[derive(Debug, Clone, Error)] pub(crate) enum FunctionError { - #[error("Missing parameter `{name}` for call to {function}")] - MissingParameter { name: String, function: String }, #[error("Found {count} extra positional argument(s) for call to {function}")] ExtraPositionalArg { count: usize, function: String }, #[error("Found `{}` extra named parameter(s) for call to {function}", .names.join("` `"))] @@ -67,6 +65,12 @@ pub(crate) enum FunctionError { WrongNumberOfArgs { min: usize, max: usize, got: usize }, } +impl From for crate::Error { + fn from(e: FunctionError) -> Self { + crate::Error::new_kind(crate::ErrorKind::Function(anyhow::Error::new(e))) + } +} + /// An object accompanying argument name for faster argument resolution. pub(crate) trait ArgSymbol: Debug + Coerce + 'static { fn get_index_from_param_spec<'v, V: ValueLike<'v>>( @@ -116,40 +120,47 @@ unsafe impl Coerce for ResolvedArgName {} #[derive(Debug, Clone_, Dupe_)] pub(crate) struct ArgNames<'a, 'v, S: ArgSymbol> { - /// Names are not guaranteed to be unique here. + /// Names are guaranteed to be unique here. names: &'a [(S, StringValue<'v>)], } impl<'a, 'v, S: ArgSymbol> Default for ArgNames<'a, 'v, S> { fn default() -> Self { - ArgNames { names: &[] } + Self::new_unique(&[]) } } impl<'a, 'v, S: ArgSymbol> Copy for ArgNames<'a, 'v, S> {} impl<'a, 'v, S: ArgSymbol> ArgNames<'a, 'v, S> { - /// Names are allowed to be not-unique. + /// Names must be unique. /// String in `Symbol` must be equal to the `StringValue`, /// it is caller responsibility to ensure that. - pub(crate) fn new(names: &'a [(S, StringValue<'v>)]) -> ArgNames<'a, 'v, S> { + /// + /// When this invariant is violated, it is memory safe, + /// but behavior will be incorrect (errors in wrong places, missing errors, panics, etc.) + pub(crate) fn new_unique(names: &'a [(S, StringValue<'v>)]) -> ArgNames<'a, 'v, S> { ArgNames { names } } - pub(crate) fn names(&self) -> &'a [(S, StringValue<'v>)] { - self.names - } - - pub(crate) fn iter(&self) -> impl ExactSizeIterator)> { - self.names.iter() - } - - pub(crate) fn is_empty(&self) -> bool { - self.names.is_empty() + pub(crate) fn new_check_unique( + names: &'a [(S, StringValue<'v>)], + ) -> crate::Result> { + let mut set = SmallSet::with_capacity(names.len()); + for (s, name) in names { + if !set.insert_hashed(Hashed::new_unchecked(s.small_hash(), name.as_str())) { + return Err(value_error!( + "Argument `{}` occurs more than once", + name.as_str() + )); + } + } + Ok(Self::new_unique(names)) } - pub(crate) fn len(&self) -> usize { - self.names.len() + /// Unique names. + pub(crate) fn names(&self) -> &'a [(S, StringValue<'v>)] { + self.names } } @@ -267,40 +278,33 @@ impl<'v, 'a> Arguments<'v, 'a> { /// Unwrap all named arguments (both explicit and in `**kwargs`) into a map. /// /// This operation fails if named argument names are not unique. - pub fn names_map(&self) -> anyhow::Result, Value<'v>>> { + pub fn names_map(&self) -> crate::Result, Value<'v>>> { match self.unpack_kwargs()? { None => { - let mut result = SmallMap::with_capacity(self.0.names.len()); - for (k, v) in self.0.names.iter().zip(self.0.named) { - let old = - result.insert_hashed(Hashed::new_unchecked(k.0.small_hash(), k.1), *v); - if unlikely(old.is_some()) { - return Err(FunctionError::RepeatedArg { - name: k.1.as_str().to_owned(), - } - .into()); - } + let mut result = SmallMap::with_capacity(self.0.names.names().len()); + for (k, v) in self.0.names.names().iter().zip(self.0.named) { + result.insert_hashed_unique_unchecked( + Hashed::new_unchecked(k.0.small_hash(), k.1), + *v, + ); } Ok(result) } Some(kwargs) => { - if self.0.names.is_empty() { + if self.0.names().names().is_empty() { match kwargs.downcast_ref_key_string() { Some(kwargs) => Ok(kwargs.clone()), None => Err(FunctionError::ArgsValueIsNotString.into()), } } else { // We have to insert the names before the kwargs since the iteration order is observable - let mut result = SmallMap::with_capacity(self.0.names.len() + kwargs.len()); - for (k, v) in self.0.names.iter().zip(self.0.named) { - let old = - result.insert_hashed(Hashed::new_unchecked(k.0.small_hash(), k.1), *v); - if unlikely(old.is_some()) { - return Err(FunctionError::RepeatedArg { - name: k.1.as_str().to_owned(), - } - .into()); - } + let mut result = + SmallMap::with_capacity(self.0.names.names().len() + kwargs.len()); + for (k, v) in self.0.names.names().iter().zip(self.0.named) { + result.insert_hashed_unique_unchecked( + Hashed::new_unchecked(k.0.small_hash(), k.1), + *v, + ); } for (k, v) in kwargs.iter_hashed() { let s = Arguments::unpack_kwargs_key_as_value(*k.key())?; @@ -322,7 +326,7 @@ impl<'v, 'a> Arguments<'v, 'a> { /// The number of arguments, where those inside a args/kwargs are counted as multiple arguments. /// /// This operation fails if the `kwargs` is not a dictionary, or `args` does not support `len`. - pub fn len(&self) -> anyhow::Result { + pub fn len(&self) -> crate::Result { let args = match self.0.args { None => 0, Some(v) => v.length()? as usize, @@ -337,15 +341,15 @@ impl<'v, 'a> Arguments<'v, 'a> { /// Unwrap all named arguments (both explicit and in `**kwargs`) into a dictionary. /// /// This operation fails if named argument names are not unique. - pub fn names(&self) -> anyhow::Result> { + pub(crate) fn names(&self) -> crate::Result> { Ok(Dict::new(coerce(self.names_map()?))) } /// Unpack all positional parameters into an iterator. - pub fn positions<'b>( + pub(crate) fn positions<'b>( &'b self, heap: &'v Heap, - ) -> anyhow::Result> + 'b> { + ) -> crate::Result> + 'b> { let tail = match self.0.args { None => Either::Left(iter::empty()), Some(args) => Either::Right(args.iterate(heap)?), @@ -358,7 +362,7 @@ impl<'v, 'a> Arguments<'v, 'a> { /// will _not_ have been validated to be strings (as they must be). /// The arguments may also overlap with named, which would be an error. #[inline(always)] - pub fn unpack_kwargs(&self) -> anyhow::Result>> { + pub(crate) fn unpack_kwargs(&self) -> crate::Result>> { match self.0.kwargs { None => Ok(None), Some(kwargs) => match DictRef::from_value(kwargs) { @@ -370,7 +374,7 @@ impl<'v, 'a> Arguments<'v, 'a> { /// Confirm that a key in the `kwargs` field is indeed a string, or [`Err`]. #[inline(always)] - pub(crate) fn unpack_kwargs_key_as_value(k: Value<'v>) -> anyhow::Result> { + pub(crate) fn unpack_kwargs_key_as_value(k: Value<'v>) -> crate::Result> { match StringValue::new(k) { None => Err(FunctionError::ArgsValueIsNotString.into()), Some(k) => Ok(k), @@ -379,26 +383,26 @@ impl<'v, 'a> Arguments<'v, 'a> { /// Confirm that a key in the `kwargs` field is indeed a string, or [`Err`]. #[inline(always)] - pub fn unpack_kwargs_key(k: Value<'v>) -> anyhow::Result<&'v str> { + pub(crate) fn unpack_kwargs_key(k: Value<'v>) -> crate::Result<&'v str> { Arguments::unpack_kwargs_key_as_value(k).map(|k| k.as_str()) } /// Produce [`Err`] if there are any positional arguments. #[inline(always)] - pub fn no_positional_args(&self, heap: &'v Heap) -> anyhow::Result<()> { + pub fn no_positional_args(&self, heap: &'v Heap) -> crate::Result<()> { let [] = self.positional(heap)?; Ok(()) } /// Produce [`Err`] if there are any named (i.e. non-positional) arguments. #[inline(always)] - pub fn no_named_args(&self) -> anyhow::Result<()> { + pub fn no_named_args(&self) -> crate::Result<()> { #[cold] #[inline(never)] - fn bad(x: &Arguments) -> anyhow::Result<()> { + fn bad(x: &Arguments) -> crate::Result<()> { // We might have a empty kwargs dictionary, but probably have an error let mut extra = Vec::new(); - extra.extend(x.0.names.iter().map(|x| x.0.as_str().to_owned())); + extra.extend(x.0.names.names().iter().map(|x| x.0.as_str().to_owned())); if let Some(kwargs) = x.unpack_kwargs()? { for k in kwargs.keys() { extra.push(Arguments::unpack_kwargs_key(k)?.to_owned()); @@ -408,11 +412,10 @@ impl<'v, 'a> Arguments<'v, 'a> { Ok(()) } else { // Would be nice to give a better name here, but it's in the call stack, so no big deal - Err(FunctionError::ExtraNamedArg { + Err(crate::Error::from(FunctionError::ExtraNamedArg { names: extra, function: "function".to_owned(), - } - .into()) + })) } } @@ -426,59 +429,28 @@ impl<'v, 'a> Arguments<'v, 'a> { /// Collect exactly `N` positional arguments from the [`Arguments`], failing if there are too many/few /// arguments. Ignores named arguments. #[inline(always)] - pub fn positional(&self, heap: &'v Heap) -> anyhow::Result<[Value<'v>; N]> { - #[cold] - #[inline(never)] - fn rare<'v, const N: usize>( - x: &Arguments<'v, '_>, - heap: &'v Heap, - ) -> anyhow::Result<[Value<'v>; N]> { - // Very sad that we allocate into a vector, but I expect calling into a small positional argument - // with a *args is very rare. - let xs = - x.0.pos - .iter() - .copied() - .chain(x.0.args.unwrap().iterate(heap)?) - .collect::>(); - xs.as_slice().try_into().map_err(|_| { - FunctionError::WrongNumberOfArgs { - min: N, - max: N, - got: x.0.pos.len(), - } - .into() - }) - } - - if self.0.args.is_none() { - self.0.pos.try_into().map_err(|_| { - FunctionError::WrongNumberOfArgs { - min: N, - max: N, - got: self.0.pos.len(), - } - .into() - }) - } else { - rare(self, heap) - } + pub(crate) fn positional( + &self, + heap: &'v Heap, + ) -> crate::Result<[Value<'v>; N]> { + let (positional, []) = self.optional::(heap)?; + Ok(positional) } /// Collect exactly `REQUIRED` positional arguments, plus at most `OPTIONAL` positional arguments /// from the [`Arguments`], failing if there are too many/few arguments. Ignores named arguments. /// The `OPTIONAL` array will never have a [`Some`] after a [`None`]. #[inline(always)] - pub fn optional( + pub(crate) fn optional( &self, heap: &'v Heap, - ) -> anyhow::Result<([Value<'v>; REQUIRED], [Option>; OPTIONAL])> { + ) -> crate::Result<([Value<'v>; REQUIRED], [Option>; OPTIONAL])> { #[cold] #[inline(never)] fn rare<'v, const REQUIRED: usize, const OPTIONAL: usize>( x: &Arguments<'v, '_>, heap: &'v Heap, - ) -> anyhow::Result<([Value<'v>; REQUIRED], [Option>; OPTIONAL])> { + ) -> crate::Result<([Value<'v>; REQUIRED], [Option>; OPTIONAL])> { // Very sad that we allocate into a vector, but I expect calling into a small positional argument // with a *args is very rare. let args = match x.0.args { @@ -494,12 +466,11 @@ impl<'v, 'a> Arguments<'v, 'a> { } Ok((required, optional)) } else { - Err(FunctionError::WrongNumberOfArgs { + Err(crate::Error::from(FunctionError::WrongNumberOfArgs { min: REQUIRED, max: REQUIRED + OPTIONAL, got: xs.len(), - } - .into()) + })) } } @@ -521,7 +492,7 @@ impl<'v, 'a> Arguments<'v, 'a> { /// Collect 1 positional arguments from the [`Arguments`], failing if there are too many/few /// arguments. Ignores named arguments. #[inline(always)] - pub fn positional1(&self, heap: &'v Heap) -> anyhow::Result> { + pub fn positional1(&self, heap: &'v Heap) -> crate::Result> { // Could be implemented more directly, let's see if profiling shows it up let [x] = self.positional(heap)?; Ok(x) @@ -530,49 +501,13 @@ impl<'v, 'a> Arguments<'v, 'a> { /// Collect up to 1 optional arguments from the [`Arguments`], failing if there are too many /// arguments. Ignores named arguments. #[inline(always)] - pub fn optional1(&self, heap: &'v Heap) -> anyhow::Result>> { + pub(crate) fn optional1(&self, heap: &'v Heap) -> crate::Result>> { // Could be implemented more directly, let's see if profiling shows it up let ([], [x]) = self.optional(heap)?; Ok(x) } } -impl Arguments<'_, '_> { - /// Utility for checking a `this` parameter matches what you expect. - pub fn check_this<'v, T: UnpackValue<'v>>(this: Value<'v>) -> anyhow::Result { - T::unpack_named_param(this, "this") - } - - /// Utility for checking a required parameter matches what you expect. - pub fn check_required<'v, T: UnpackValue<'v>>( - name: &str, - x: Option>, - ) -> anyhow::Result { - let x = x.ok_or_else(|| ValueError::MissingRequired(name.to_owned()))?; - T::unpack_named_param(x, name) - } - - /// Utility for checking an optional parameter matches what you expect. - pub fn check_optional<'v, T: UnpackValue<'v>>( - name: &str, - x: Option>, - ) -> anyhow::Result> { - match x { - None => Ok(None), - Some(x) => Ok(Some(T::unpack_value(x).ok_or_else::( - || { - ValueError::IncorrectParameterTypeNamedWithExpected( - name.to_owned(), - T::expected(), - x.get_type().to_owned(), - ) - .into() - }, - )?)), - } - } -} - impl<'a> Arguments<'static, 'a> { /// Convert `Arguments` with `FrozenValue` (because no other values can have `'v` lifetime) /// to arbitrary `'v` lifetime. @@ -699,14 +634,13 @@ mod tests { let named = [Value::new_none()]; p.0.named = &named; let names = [(Symbol::new("test"), heap.alloc_str("test"))]; - p.0.names = ArgNames::new(&names); + p.0.names = ArgNames::new_check_unique(&names).unwrap(); assert!(p.no_named_args().is_err()); assert_eq!(p.len().unwrap(), 1); } #[test] fn test_names_map_repeated_name_in_arg_names() { - let named = vec![Value::testing_new_int(10), Value::new_bool(true)]; let names = vec![ ( Symbol::new("a"), @@ -717,15 +651,6 @@ mod tests { const_frozen_string!("a").to_string_value(), ), ]; - let error = Arguments(ArgumentsFull { - pos: &[], - named: &named, - names: ArgNames::new(&names), - args: None, - kwargs: None, - }) - .names_map() - .unwrap_err(); - assert!(error.to_string().contains("occurs more than once")); + assert!(ArgNames::new_check_unique(&names).is_err()); } } diff --git a/starlark-rust/starlark/src/eval/runtime/before_stmt.rs b/starlark-rust/starlark/src/eval/runtime/before_stmt.rs index 9e51fba1faec7..4143199d1c919 100644 --- a/starlark-rust/starlark/src/eval/runtime/before_stmt.rs +++ b/starlark-rust/starlark/src/eval/runtime/before_stmt.rs @@ -22,9 +22,9 @@ use crate::eval::Evaluator; /// Configuration of `BeforeStmt` instrumentation of bytecode. #[derive(Default)] -pub(crate) struct BeforeStmt<'a> { +pub(crate) struct BeforeStmt<'a, 'e: 'a> { /// Functions to run before each statement. - pub(crate) before_stmt: Vec>, + pub(crate) before_stmt: Vec>, /// Explicitly request generation of `BeforeStmt` instructions /// even if no `before_stmt` functions are registered. /// This is needed when compiling dependencies of a file to be profiled. @@ -34,15 +34,22 @@ pub(crate) struct BeforeStmt<'a> { /// This is used by DAP, and it is not public API. // TODO(cjhopman): pull DAP into the crate, and hide this function. #[doc(hidden)] -pub enum BeforeStmtFunc<'a> { - Fn(&'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a>)), - Dyn(Box>), +pub enum BeforeStmtFunc<'a, 'e: 'a> { + Fn(&'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a, 'e>)), + Dyn(Box>), } -impl<'a> BeforeStmtFunc<'a> { - pub(crate) fn call<'v>(&mut self, span: FileSpanRef, eval: &mut Evaluator<'v, 'a>) { +impl<'a, 'e: 'a> BeforeStmtFunc<'a, 'e> { + pub(crate) fn call<'v>( + &mut self, + span: FileSpanRef, + eval: &mut Evaluator<'v, 'a, 'e>, + ) -> crate::Result<()> { match self { - BeforeStmtFunc::Fn(f) => f(span, eval), + BeforeStmtFunc::Fn(f) => { + f(span, eval); + Ok(()) + } BeforeStmtFunc::Dyn(d) => d.call(span, eval), } } @@ -51,27 +58,33 @@ impl<'a> BeforeStmtFunc<'a> { /// This is used by DAP, and it is not public API. // TODO(cjhopman): pull DAP into the crate, and hide this function. #[doc(hidden)] -pub trait BeforeStmtFuncDyn<'a> { +pub trait BeforeStmtFuncDyn<'a, 'e: 'a> { /// This is used by DAP, and it is not public API. // TODO(cjhopman): pull DAP into the crate, and hide this function. #[doc(hidden)] - fn call<'v>(&mut self, span: FileSpanRef, eval: &mut Evaluator<'v, 'a>); + fn call<'v>( + &mut self, + span: FileSpanRef, + eval: &mut Evaluator<'v, 'a, 'e>, + ) -> crate::Result<()>; } -impl<'a> BeforeStmt<'a> { +impl<'a, 'e: 'a> BeforeStmt<'a, 'e> { pub(crate) fn enabled(&self) -> bool { self.instrument || !self.before_stmt.is_empty() } } -impl<'a> From<&'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a>)> for BeforeStmtFunc<'a> { - fn from(value: &'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a>)) -> Self { +impl<'a, 'e: 'a> From<&'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a, 'e>)> + for BeforeStmtFunc<'a, 'e> +{ + fn from(value: &'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a, 'e>)) -> Self { Self::Fn(value) } } -impl<'a> From>> for BeforeStmtFunc<'a> { - fn from(value: Box>) -> Self { +impl<'a, 'e: 'a> From>> for BeforeStmtFunc<'a, 'e> { + fn from(value: Box>) -> Self { Self::Dyn(value) } } diff --git a/starlark-rust/starlark/src/eval/runtime/cheap_call_stack.rs b/starlark-rust/starlark/src/eval/runtime/cheap_call_stack.rs index 292edf0c00569..08e657b6984a2 100644 --- a/starlark-rust/starlark/src/eval/runtime/cheap_call_stack.rs +++ b/starlark-rust/starlark/src/eval/runtime/cheap_call_stack.rs @@ -17,10 +17,12 @@ use std::fmt; use std::fmt::Debug; +use std::vec; use dupe::Dupe; use starlark_syntax::codemap::FileSpan; use starlark_syntax::slice_vec_ext::SliceExt; +use starlark_syntax::ErrorKind; use crate::errors::Frame; use crate::eval::runtime::frame_span::FrameSpan; @@ -76,13 +78,15 @@ enum CallStackError { StackIsTooShallowForNthTopFrame(usize, usize), #[error("Starlark call stack overflow")] Overflow, + #[error("Starlark call stack is already allocated")] + AlreadyAllocated, } /// Starlark call stack. #[derive(Debug)] pub(crate) struct CheapCallStack<'v> { count: usize, - stack: Box<[CheapFrame<'v>; MAX_CALLSTACK_RECURSION]>, + stack: Box<[CheapFrame<'v>]>, } impl<'v> Default for CheapCallStack<'v> { @@ -93,25 +97,12 @@ impl<'v> Default for CheapCallStack<'v> { [CheapFrame { function: Value::new_none(), span: None, - }; MAX_CALLSTACK_RECURSION], + }; 0], ), } } } -// Currently, each frame typically allocates about 1K of native stack size (see `test_frame_size`), -// but it is a bit more complicated: -// * each for loop in a frame allocates more native stack -// * inlined functions do not allocate native stack -// Practically max call stack depends on native stack size, -// and depending on environment, it may be configured differently, for example: -// * macOS default stack size is 512KB -// * Linux default stack size is 8MB -// * [tokio default stack size is 2MB][1] -// [1] https://docs.rs/tokio/0.2.1/tokio/runtime/struct.Builder.html#method.thread_stack_size -// TODO(nga): make it configurable. -const MAX_CALLSTACK_RECURSION: usize = 50; - unsafe impl<'v> Trace<'v> for CheapCallStack<'v> { fn trace(&mut self, tracer: &Tracer<'v>) { let (used, unused) = self.stack.split_at_mut(self.count); @@ -128,15 +119,47 @@ unsafe impl<'v> Trace<'v> for CheapCallStack<'v> { } impl<'v> CheapCallStack<'v> { + // Currently, each frame typically allocates about 1K of native stack size (see `test_frame_size`), + // but it is a bit more complicated: + // * each for loop in a frame allocates more native stack + // * inlined functions do not allocate native stack + // Practically max call stack depends on native stack size, + // and depending on environment, it may be configured differently, for example: + // * macOS default stack size is 512KB + // * Linux default stack size is 8MB + // * [tokio default stack size is 2MB][1] + // [1] https://docs.rs/tokio/0.2.1/tokio/runtime/struct.Builder.html#method.thread_stack_size + pub(crate) fn alloc_if_needed(&mut self, max_size: usize) -> anyhow::Result<()> { + if self.stack.len() != 0 { + return if self.stack.len() == max_size { + Ok(()) + } else { + Err(CallStackError::AlreadyAllocated.into()) + }; + } + + self.stack = vec![ + CheapFrame { + function: Value::new_none(), + span: None, + }; + max_size + ] + .into_boxed_slice(); + Ok(()) + } + /// Push an element to the stack. It is important the each `push` is paired /// with a `pop`. pub(crate) fn push( &mut self, function: Value<'v>, span: Option>, - ) -> anyhow::Result<()> { - if unlikely(self.count >= MAX_CALLSTACK_RECURSION) { - return Err(CallStackError::Overflow.into()); + ) -> crate::Result<()> { + if unlikely(self.count >= self.stack.len()) { + return Err(crate::Error::new_kind(ErrorKind::StackOverflow( + CallStackError::Overflow.into(), + ))); } self.stack[self.count] = CheapFrame { function, span }; self.count += 1; @@ -175,14 +198,13 @@ impl<'v> CheapCallStack<'v> { /// `n`-th element from the top of the stack. pub(crate) fn top_nth_function(&self, n: usize) -> anyhow::Result> { - let index = self - .count - .checked_sub(1) - .and_then(|x| x.checked_sub(n)) - .ok_or(CallStackError::StackIsTooShallowForNthTopFrame( - n, self.count, - ))?; - Ok(self.stack[index].function) + self.top_nth_function_opt(n) + .ok_or_else(|| CallStackError::StackIsTooShallowForNthTopFrame(n, self.count).into()) + } + + pub(crate) fn top_nth_function_opt(&self, n: usize) -> Option> { + let index = self.count.checked_sub(1).and_then(|x| x.checked_sub(n))?; + Some(self.stack[index].function) } pub(crate) fn to_diagnostic_frames(&self, inlined_frames: InlinedFrames) -> CallStack { diff --git a/starlark-rust/starlark/src/eval/runtime/evaluator.rs b/starlark-rust/starlark/src/eval/runtime/evaluator.rs index ef06ab0c62918..d93cc35647c07 100644 --- a/starlark-rust/starlark/src/eval/runtime/evaluator.rs +++ b/starlark-rust/starlark/src/eval/runtime/evaluator.rs @@ -21,9 +21,9 @@ use std::mem::MaybeUninit; use std::path::Path; use dupe::Dupe; -use starlark_syntax::diagnostic::Diagnostic; use starlark_syntax::eval_exception::EvalException; use starlark_syntax::frame::Frame; +use starlark_syntax::internal_error; use thiserror::Error; use crate::any::AnyLifetime; @@ -56,16 +56,18 @@ use crate::eval::runtime::profile::data::ProfileData; use crate::eval::runtime::profile::heap::HeapProfile; use crate::eval::runtime::profile::heap::HeapProfileFormat; use crate::eval::runtime::profile::heap::RetainedHeapProfileMode; +use crate::eval::runtime::profile::mode::ProfileMode; use crate::eval::runtime::profile::or_instrumentation::ProfileOrInstrumentationMode; use crate::eval::runtime::profile::stmt::StmtProfile; use crate::eval::runtime::profile::time_flame::TimeFlameProfile; use crate::eval::runtime::profile::typecheck::TypecheckProfile; -use crate::eval::runtime::profile::ProfileMode; use crate::eval::runtime::rust_loc::rust_loc; use crate::eval::runtime::slots::LocalCapturedSlotId; use crate::eval::runtime::slots::LocalSlotId; +use crate::eval::soft_error::HardErrorSoftErrorHandler; use crate::eval::CallStack; use crate::eval::FileLoader; +use crate::eval::SoftErrorHandler; use crate::stdlib::breakpoint::BreakpointConsole; use crate::stdlib::breakpoint::RealBreakpointConsole; use crate::stdlib::extra::PrintHandler; @@ -94,27 +96,26 @@ enum EvaluatorError { ProfileOrInstrumentationAlreadyEnabled, #[error("Top frame is not def (internal error)")] TopFrameNotDef, - #[error( - "Coverage profile generation not implemented (but can be obtained with `.coverage()` function)" - )] - CoverageNotImplemented, #[error("Coverage not enabled")] CoverageNotEnabled, #[error("Local variable `{0}` referenced before assignment")] LocalVariableReferencedBeforeAssignment(String), + #[error("Max callstack size is already set")] + CallstackSizeAlreadySet, + #[error("Max callstack size cannot be zero")] + ZeroCallstackSize, } /// Number of bytes to allocate between GC's. pub(crate) const GC_THRESHOLD: usize = 100000; +/// Default value for max starlark stack size +pub(crate) const DEFAULT_STACK_SIZE: usize = 50; + /// Holds everything about an ongoing evaluation (local variables, globals, module resolution etc). -pub struct Evaluator<'v, 'a> { +pub struct Evaluator<'v, 'a, 'e> { // The module that is being used for this evaluation pub(crate) module_env: &'v Module, - // The module-level variables in scope at the moment. - // If `None` then we're in the initial module, use variables from `module_env`. - // If `Some` we've called a `def` in a loaded frozen module. - pub(crate) module_variables: Option>, /// Current function (`def` or `lambda`) frame: locals and bytecode stack. pub(crate) current_frame: BcFramePtr<'v>, // How we deal with a `load` function. @@ -140,7 +141,7 @@ pub struct Evaluator<'v, 'a> { // Used for line profiling stmt_profile: StmtProfile, // Holds things that require hooking into evaluation. - eval_instrumentation: EvaluationInstrumentation<'a>, + eval_instrumentation: EvaluationInstrumentation<'a, 'e>, // Total time spent in runtime typechecking. // Filled only if runtime typechecking profiling is enabled. pub(crate) typecheck_profile: TypecheckProfile, @@ -150,30 +151,34 @@ pub struct Evaluator<'v, 'a> { pub(crate) string_pool: StringPool, /// Field that can be used for any purpose you want (can store types you define). /// Typically accessed via native functions you also define. - pub extra: Option<&'a dyn AnyLifetime<'a>>, + pub extra: Option<&'a dyn AnyLifetime<'e>>, /// Called to perform console IO each time `breakpoint` function is called. pub(crate) breakpoint_handler: Option anyhow::Result>>>, /// Use in implementation of `print` function. pub(crate) print_handler: &'a (dyn PrintHandler + 'a), + /// Deprecation handler. + pub(crate) soft_error_handler: &'a (dyn SoftErrorHandler + 'a), + /// Max size of starlark stack + pub(crate) max_callstack_size: Option, // The Starlark-level call-stack of functions. // Must go last because it's quite a big structure pub(crate) call_stack: CheapCallStack<'v>, } /// Just holds things that require using EvaluationCallbacksEnabled so that we can cache whether that needs to be enabled or not. -struct EvaluationInstrumentation<'a> { +struct EvaluationInstrumentation<'a, 'e: 'a> { // Bytecode profile. bc_profile: BcProfile, // Extra functions to run on each statement, usually empty - before_stmt: BeforeStmt<'a>, + before_stmt: BeforeStmt<'a, 'e>, heap_or_flame_profile: bool, // Whether we need to instrument evaluation or not, should be set if before_stmt or bc_profile are enabled. enabled: bool, } -impl<'a> EvaluationInstrumentation<'a> { - fn new() -> EvaluationInstrumentation<'a> { +impl<'a, 'e: 'a> EvaluationInstrumentation<'a, 'e> { + fn new() -> EvaluationInstrumentation<'a, 'e> { Self { bc_profile: BcProfile::new(), before_stmt: BeforeStmt::default(), @@ -186,20 +191,21 @@ impl<'a> EvaluationInstrumentation<'a> { self.heap_or_flame_profile = true; } - fn change)>(&mut self, f: F) { - f(self); + fn change) -> R, R>(&mut self, f: F) -> R { + let r = f(self); self.enabled = self.bc_profile.enabled() || self.before_stmt.enabled() || self.heap_or_flame_profile; + r } } // Implementing this forces users to be more careful about lifetimes that the Evaluator captures such that we could // add captures of types that implement Drop without needing changes to client code. -impl Drop for Evaluator<'_, '_> { +impl Drop for Evaluator<'_, '_, '_> { fn drop(&mut self) {} } -impl<'v, 'a> Evaluator<'v, 'a> { +impl<'v, 'a, 'e: 'a> Evaluator<'v, 'a, 'e> { /// Crate a new [`Evaluator`] specifying the [`Module`] used for module variables. /// /// If your program contains `load()` statements, you also need to call @@ -208,7 +214,6 @@ impl<'v, 'a> Evaluator<'v, 'a> { Evaluator { call_stack: CheapCallStack::default(), module_env: module, - module_variables: None, current_frame: BcFramePtr::null(), loader: None, extra: None, @@ -225,8 +230,10 @@ impl<'v, 'a> Evaluator<'v, 'a> { string_pool: StringPool::default(), breakpoint_handler: None, print_handler: &StderrPrintHandler, + soft_error_handler: &HardErrorSoftErrorHandler, verbose_gc: false, static_typechecking: false, + max_callstack_size: None, } } @@ -280,10 +287,10 @@ impl<'v, 'a> Evaluator<'v, 'a> { match mode { ProfileMode::HeapFlameRetained => self .module_env - .enable_heap_profile(RetainedHeapProfileMode::Flame), + .enable_retained_heap_profile(RetainedHeapProfileMode::Flame), ProfileMode::HeapSummaryRetained => self .module_env - .enable_heap_profile(RetainedHeapProfileMode::Summary), + .enable_retained_heap_profile(RetainedHeapProfileMode::Summary), _ => {} } @@ -320,19 +327,21 @@ impl<'v, 'a> Evaluator<'v, 'a> { /// Write a profile to a file. /// Only valid if corresponding profiler was enabled. - pub fn write_profile>(&mut self, filename: P) -> anyhow::Result<()> { + pub fn write_profile>(&mut self, filename: P) -> crate::Result<()> { self.gen_profile()?.write(filename.as_ref()) } /// Generate profile for a given mode. /// Only valid if corresponding profiler was enabled. - pub fn gen_profile(&mut self) -> anyhow::Result { + pub fn gen_profile(&mut self) -> crate::Result { let mode = match &self.profile_or_instrumentation_mode { ProfileOrInstrumentationMode::None => { - return Err(EvaluatorError::ProfilingNotEnabled.into()); + return Err(crate::Error::new_other(EvaluatorError::ProfilingNotEnabled)); } ProfileOrInstrumentationMode::Collected => { - return Err(EvaluatorError::ProfileDataAlreadyCollected.into()); + return Err(crate::Error::new_other( + EvaluatorError::ProfileDataAlreadyCollected, + )); } ProfileOrInstrumentationMode::Profile(mode) => mode.dupe(), }; @@ -345,10 +354,12 @@ impl<'v, 'a> Evaluator<'v, 'a> { .heap_profile .gen(self.heap(), HeapProfileFormat::FlameGraph), ProfileMode::HeapSummaryRetained | ProfileMode::HeapFlameRetained => { - Err(EvaluatorError::RetainedMemoryProfilingCannotBeObtainedFromEvaluator.into()) + Err(crate::Error::new_other( + EvaluatorError::RetainedMemoryProfilingCannotBeObtainedFromEvaluator, + )) } ProfileMode::Statement => self.stmt_profile.gen(), - ProfileMode::Coverage => Err(EvaluatorError::CoverageNotImplemented.into()), + ProfileMode::Coverage => self.stmt_profile.gen_coverage(), ProfileMode::Bytecode => self.gen_bc_profile(), ProfileMode::BytecodePairs => self.gen_bc_pairs_profile(), ProfileMode::TimeFlame => self.time_flame_profile.gen(), @@ -363,12 +374,12 @@ impl<'v, 'a> Evaluator<'v, 'a> { /// Note coverage is not precise, because /// * some optimizer transformations may create incorrect spans /// * some optimizer transformations may remove statements - pub fn coverage(&self) -> anyhow::Result> { + pub fn coverage(&self) -> crate::Result> { match self.profile_or_instrumentation_mode { ProfileOrInstrumentationMode::Profile(ProfileMode::Coverage) => { self.stmt_profile.coverage() } - _ => Err(EvaluatorError::CoverageNotEnabled.into()), + _ => Err(crate::Error::new_other(EvaluatorError::CoverageNotEnabled)), } } @@ -379,7 +390,7 @@ impl<'v, 'a> Evaluator<'v, 'a> { self.breakpoint_handler = Some(RealBreakpointConsole::factory()); } - /// Obtain the current call-stack, suitable for use with [`Diagnostic`]. + /// Obtain the current call-stack, suitable for use in diagnostics. pub fn call_stack(&self) -> CallStack { self.call_stack .to_diagnostic_frames(InlinedFrames::default()) @@ -404,12 +415,12 @@ impl<'v, 'a> Evaluator<'v, 'a> { pub(crate) fn before_stmt_fn( &mut self, - f: &'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a>), + f: &'a dyn for<'v1> Fn(FileSpanRef, &mut Evaluator<'v1, 'a, 'e>), ) { self.before_stmt(f.into()) } - pub(crate) fn before_stmt(&mut self, f: BeforeStmtFunc<'a>) { + pub(crate) fn before_stmt(&mut self, f: BeforeStmtFunc<'a, 'e>) { self.eval_instrumentation .change(|v| v.before_stmt.before_stmt.push(f)) } @@ -417,7 +428,7 @@ impl<'v, 'a> Evaluator<'v, 'a> { /// This function is used by DAP, and it is not public API. // TODO(nga): pull DAP into the crate, and hide this function. #[doc(hidden)] - pub fn before_stmt_for_dap(&mut self, f: BeforeStmtFunc<'a>) { + pub fn before_stmt_for_dap(&mut self, f: BeforeStmtFunc<'a, 'e>) { self.before_stmt(f) } @@ -426,6 +437,11 @@ impl<'v, 'a> Evaluator<'v, 'a> { self.print_handler = handler; } + /// Set deprecation handler. If not set, deprecations are treated as hard errors. + pub fn set_soft_error_handler(&mut self, handler: &'a (dyn SoftErrorHandler + 'a)) { + self.soft_error_handler = handler; + } + /// Called to add an entry to the call stack, by the function being invoked. /// Called for all types of function, including those written in Rust. #[inline(always)] @@ -433,15 +449,14 @@ impl<'v, 'a> Evaluator<'v, 'a> { &mut self, function: Value<'v>, span: Option>, - within: impl FnOnce(&mut Self) -> anyhow::Result, - ) -> anyhow::Result { + within: impl FnOnce(&mut Self) -> crate::Result, + ) -> crate::Result { #[cold] #[inline(never)] - fn add_diagnostics(e: anyhow::Error, me: &Evaluator) -> anyhow::Error { - Diagnostic::modify(e, |d: &mut Diagnostic| { - // Make sure we capture the call_stack before popping things off it - d.set_call_stack(|| me.call_stack.to_diagnostic_frames(InlinedFrames::default())); - }) + fn add_diagnostics(mut e: crate::Error, me: &Evaluator) -> crate::Error { + // Make sure we capture the call_stack before popping things off it + e.set_call_stack(|| me.call_stack.to_diagnostic_frames(InlinedFrames::default())); + e } self.call_stack.push(function, span)?; @@ -451,26 +466,6 @@ impl<'v, 'a> Evaluator<'v, 'a> { res } - /// Called to change the local variables, from the callee. - /// Only called for user written functions. - #[inline(always)] // There is only one caller - pub(crate) fn with_function_context( - &mut self, - def: Value<'v>, - module: Option>, // None == use module_env - bc: &Bc, - ) -> Result, EvalException> { - // Set up for the new function call - let old_module_variables = mem::replace(&mut self.module_variables, module); - - // Run the computation - let res = self.eval_bc(def, bc); - - // Restore them all back - self.module_variables = old_module_variables; - res - } - /// The active heap where [`Value`]s are allocated. pub fn heap(&self) -> &'v Heap { self.module_env.heap() @@ -490,24 +485,27 @@ impl<'v, 'a> Evaluator<'v, 'a> { self.module_env.frozen_heap() } - pub(crate) fn get_slot_module(&self, slot: ModuleSlotId) -> anyhow::Result> { + pub(crate) fn get_slot_module(&self, slot: ModuleSlotId) -> crate::Result> { // Make sure the error-path doesn't get inlined into the normal-path execution #[cold] #[inline(never)] - fn error<'v>(eval: &Evaluator<'v, '_>, slot: ModuleSlotId) -> anyhow::Error { - let name = match &eval.module_variables { - None => eval + fn error<'v>(eval: &Evaluator<'v, '_, '_>, slot: ModuleSlotId) -> crate::Error { + let name = match eval.top_frame_def_frozen_module(false) { + Err(e) => Some(format!("")), + Ok(None) => eval .module_env .mutable_names() .get_slot(slot) .map(|s| s.as_str().to_owned()), - Some(e) => e.get_slot_name(slot).map(|s| s.as_str().to_owned()), + Ok(Some(e)) => e.get_slot_name(slot).map(|s| s.as_str().to_owned()), } .unwrap_or_else(|| "".to_owned()); - EvaluatorError::LocalVariableReferencedBeforeAssignment(name).into() + crate::Error::new_other(EvaluatorError::LocalVariableReferencedBeforeAssignment( + name, + )) } - match &self.module_variables { + match self.top_frame_def_frozen_module(false)? { None => self.module_env.slots().get_slot(slot), Some(e) => e.get_slot(slot).map(Value::new_frozen), } @@ -517,17 +515,16 @@ impl<'v, 'a> Evaluator<'v, 'a> { // Make sure the error-path doesn't get inlined into the normal-path execution #[cold] #[inline(never)] - pub(crate) fn local_var_referenced_before_assignment( - &self, - slot: LocalSlotId, - ) -> anyhow::Error { + pub(crate) fn local_var_referenced_before_assignment(&self, slot: LocalSlotId) -> crate::Error { let def_info = match self.top_frame_def_info() { Ok(def_info) => def_info, Err(e) => return e, }; let names = &def_info.used; let name = names[slot.0 as usize].as_str().to_owned(); - EvaluatorError::LocalVariableReferencedBeforeAssignment(name).into() + crate::Error::new_other(EvaluatorError::LocalVariableReferencedBeforeAssignment( + name, + )) } #[inline(always)] @@ -535,7 +532,7 @@ impl<'v, 'a> Evaluator<'v, 'a> { &self, frame: BcFramePtr<'v>, slot: LocalSlotId, - ) -> anyhow::Result> { + ) -> crate::Result> { // We access locals from explicitly passed frame because it is faster. debug_assert!(self.current_frame == frame); @@ -547,7 +544,7 @@ impl<'v, 'a> Evaluator<'v, 'a> { pub(crate) fn get_slot_local_captured( &self, slot: LocalCapturedSlotId, - ) -> anyhow::Result> { + ) -> crate::Result> { let value_captured = self.get_slot_local(self.current_frame, LocalSlotId(slot.0))?; let value_captured = value_captured_get(value_captured); value_captured @@ -601,7 +598,7 @@ impl<'v, 'a> Evaluator<'v, 'a> { &mut self, name: &str, value: Value<'v>, - ) -> anyhow::Result<()> { + ) -> crate::Result<()> { value.export_as(name, self)?; self.module_env.set(name, value); Ok(()) @@ -640,18 +637,18 @@ impl<'v, 'a> Evaluator<'v, 'a> { .set_slot(slot.to_captured_or_not(), value_captured); } - pub(crate) fn check_return_type(&mut self, ret: Value<'v>) -> anyhow::Result<()> { + pub(crate) fn check_return_type(&mut self, ret: Value<'v>) -> crate::Result<()> { let func = self.call_stack.top_nth_function(0)?; if let Some(func) = func.downcast_ref::() { func.check_return_type(ret, self) } else if let Some(func) = func.downcast_ref::() { func.check_return_type(ret, self) } else { - Err(EvaluatorError::TopFrameNotDef.into()) + Err(crate::Error::new_other(EvaluatorError::TopFrameNotDef)) } } - fn func_to_def_info(&self, func: Value<'_>) -> anyhow::Result> { + fn func_to_def_info(&self, func: Value<'_>) -> crate::Result> { if let Some(func) = func.downcast_ref::() { Ok(func.def_info) } else if let Some(func) = func.downcast_ref::() { @@ -660,28 +657,43 @@ impl<'v, 'a> Evaluator<'v, 'a> { // For module, it is `None`. Ok(self.module_def_info) } else { - Err(EvaluatorError::TopFrameNotDef.into()) + Err(crate::Error::new_other(EvaluatorError::TopFrameNotDef)) } } - pub(crate) fn top_frame_def_info(&self) -> anyhow::Result> { + pub(crate) fn top_frame_def_info(&self) -> crate::Result> { let func = self.call_stack.top_nth_function(0)?; self.func_to_def_info(func) } + pub(crate) fn top_frame_def_frozen_module( + &self, + for_debugger: bool, + ) -> anyhow::Result>> { + let func = self.top_frame_maybe_for_debugger(for_debugger)?; + if let Some(func) = func.downcast_ref::() { + Ok(func.module.load_relaxed()) + } else if let Some(func) = func.downcast_ref::() { + Ok(func.module.load_relaxed()) + } else { + Ok(None) + } + } + + fn top_frame_maybe_for_debugger(&self, for_debugger: bool) -> anyhow::Result> { + let func = self.call_stack.top_nth_function(0)?; + if for_debugger && func.downcast_ref::().is_some() { + // If top frame is `breakpoint` or `debug_evaluate`, it will be skipped. + self.call_stack.top_nth_function(1) + } else { + Ok(func) + } + } + /// Gets the "top frame" for debugging. If the real top frame is `breakpoint` or `debug_evaluate` /// it will be skipped. This should only be used for the starlark debugger. - pub(crate) fn top_frame_def_info_for_debugger(&self) -> anyhow::Result> { - let func = { - let top = self.call_stack.top_nth_function(0)?; - if top.downcast_ref::().is_some() { - // we are in `breakpoint` or `debug_evaluate` function, get the next frame. - self.call_stack.top_nth_function(1)? - } else { - top - } - }; - + pub(crate) fn top_frame_def_info_for_debugger(&self) -> crate::Result> { + let func = self.top_frame_maybe_for_debugger(true)?; self.func_to_def_info(func) } @@ -761,11 +773,11 @@ impl<'v, 'a> Evaluator<'v, 'a> { alloca.alloca_concat(x, y, |xs| k(xs, self)) } - pub(crate) fn gen_bc_profile(&mut self) -> anyhow::Result { + pub(crate) fn gen_bc_profile(&mut self) -> crate::Result { self.eval_instrumentation.bc_profile.gen_bc_profile() } - pub(crate) fn gen_bc_pairs_profile(&mut self) -> anyhow::Result { + pub(crate) fn gen_bc_pairs_profile(&mut self) -> crate::Result { self.eval_instrumentation.bc_profile.gen_bc_pairs_profile() } @@ -788,8 +800,23 @@ impl<'v, 'a> Evaluator<'v, 'a> { bc.run( self, &mut EvalCallbacksEnabled { - bc_profile: self.eval_instrumentation.bc_profile.enabled(), - before_stmt: self.eval_instrumentation.before_stmt.enabled(), + mode: match ( + self.eval_instrumentation.before_stmt.enabled(), + self.eval_instrumentation.bc_profile.enabled(), + ) { + (true, false) => EvalCallbacksMode::BeforeStmt, + (false, true) => EvalCallbacksMode::BcProfile, + (true, true) => { + return Err(EvalException::new_unknown_span(internal_error!( + "both before_stmt and bc_profile are enabled" + ))); + } + (false, false) => { + return Err(EvalException::new_unknown_span(internal_error!( + "neither before_stmt nor bc_profile are enabled" + ))); + } + }, stmt_locs: &bc.instrs.stmt_locs, bc_start_ptr: bc.instrs.start_ptr(), }, @@ -805,43 +832,79 @@ impl<'v, 'a> Evaluator<'v, 'a> { bc.run(self, &mut EvalCallbacksDisabled) } } + + /// Sets max call stack size. + /// Stack allocation will happen on entry point of evaluation if not allocated yet. + pub fn set_max_callstack_size(&mut self, stack_size: usize) -> anyhow::Result<()> { + if stack_size == 0 { + return Err(EvaluatorError::ZeroCallstackSize.into()); + } + if self.max_callstack_size.is_some() { + return Err(EvaluatorError::CallstackSizeAlreadySet.into()); + } + self.max_callstack_size = Some(stack_size); + Ok(()) + } } pub(crate) trait EvaluationCallbacks { - fn before_instr(&mut self, _eval: &mut Evaluator, _ip: BcPtrAddr, _opcode: BcOpcode); + fn before_instr( + &mut self, + _eval: &mut Evaluator, + _ip: BcPtrAddr, + _opcode: BcOpcode, + ) -> crate::Result<()>; } pub(crate) struct EvalCallbacksDisabled; impl EvaluationCallbacks for EvalCallbacksDisabled { #[inline(always)] - fn before_instr(&mut self, _eval: &mut Evaluator, _ip: BcPtrAddr, _opcode: BcOpcode) {} + fn before_instr( + &mut self, + _eval: &mut Evaluator, + _ip: BcPtrAddr, + _opcode: BcOpcode, + ) -> crate::Result<()> { + Ok(()) + } +} + +pub(crate) enum EvalCallbacksMode { + BcProfile, + BeforeStmt, } pub(crate) struct EvalCallbacksEnabled<'a> { - pub(crate) bc_profile: bool, - pub(crate) before_stmt: bool, + pub(crate) mode: EvalCallbacksMode, pub(crate) stmt_locs: &'a BcStatementLocations, pub(crate) bc_start_ptr: BcPtrAddr<'a>, } impl<'a> EvalCallbacksEnabled<'a> { - fn before_stmt(&mut self, eval: &mut Evaluator, ip: BcPtrAddr) { + fn before_stmt(&mut self, eval: &mut Evaluator, ip: BcPtrAddr) -> crate::Result<()> { let offset = ip.offset_from(self.bc_start_ptr); if let Some(loc) = self.stmt_locs.stmt_at(offset) { - before_stmt(loc.span, eval); + before_stmt(loc.span, eval)?; } + Ok(()) } } impl<'a> EvaluationCallbacks for EvalCallbacksEnabled<'a> { #[inline(always)] - fn before_instr(&mut self, eval: &mut Evaluator, ip: BcPtrAddr, opcode: BcOpcode) { - if self.bc_profile { - eval.eval_instrumentation.bc_profile.before_instr(opcode) - } - if self.before_stmt { - self.before_stmt(eval, ip); + fn before_instr( + &mut self, + eval: &mut Evaluator, + ip: BcPtrAddr, + opcode: BcOpcode, + ) -> crate::Result<()> { + match self.mode { + EvalCallbacksMode::BcProfile => { + eval.eval_instrumentation.bc_profile.before_instr(opcode); + Ok(()) + } + EvalCallbacksMode::BeforeStmt => self.before_stmt(eval, ip), } } } @@ -850,18 +913,26 @@ impl<'a> EvaluationCallbacks for EvalCallbacksEnabled<'a> { // The purposes are GC, profiling and debugging. // // This function is called only if `before_stmt` is set before compilation start. -pub(crate) fn before_stmt(span: FrameSpan, eval: &mut Evaluator) { +pub(crate) fn before_stmt(span: FrameSpan, eval: &mut Evaluator) -> crate::Result<()> { assert!( eval.eval_instrumentation.before_stmt.enabled(), "this code should only be called if `before_stmt` is set" ); - let mut fs = mem::take(&mut eval.eval_instrumentation.before_stmt.before_stmt); + let mut fs = eval.eval_instrumentation.change(|eval_instrumentation| { + mem::take(&mut eval_instrumentation.before_stmt.before_stmt) + }); + let mut result = Ok(()); for f in &mut fs { - f.call(span.span.file_span_ref(), eval) + if result.is_ok() { + result = f.call(span.span.file_span_ref(), eval); + } } - let added = mem::replace(&mut eval.eval_instrumentation.before_stmt.before_stmt, fs); + let added = eval.eval_instrumentation.change(|eval_instrumentation| { + mem::replace(&mut eval_instrumentation.before_stmt.before_stmt, fs) + }); assert!( added.is_empty(), "`before_stmt` cannot be modified during evaluation" ); + result } diff --git a/starlark-rust/starlark/src/eval/runtime/file_loader.rs b/starlark-rust/starlark/src/eval/runtime/file_loader.rs index 40b9694a2899a..a19fd226dc78b 100644 --- a/starlark-rust/starlark/src/eval/runtime/file_loader.rs +++ b/starlark-rust/starlark/src/eval/runtime/file_loader.rs @@ -53,10 +53,12 @@ impl<'a> FileLoader for ReturnFileLoader<'a> { } /// Same as [`ReturnFileLoader`], but does not require fighting the borrow checker. +#[cfg(test)] pub(crate) struct ReturnOwnedFileLoader { pub(crate) modules: HashMap, } +#[cfg(test)] impl FileLoader for ReturnOwnedFileLoader { fn load(&self, path: &str) -> anyhow::Result { match self.modules.get(path) { diff --git a/starlark-rust/starlark/src/eval/runtime/inlined_frame.rs b/starlark-rust/starlark/src/eval/runtime/inlined_frame.rs index edf181a1a2845..e1b801668ac61 100644 --- a/starlark-rust/starlark/src/eval/runtime/inlined_frame.rs +++ b/starlark-rust/starlark/src/eval/runtime/inlined_frame.rs @@ -132,7 +132,7 @@ impl<'f> InlinedFrameAlloc<'f> { return last_alloc; } } - let frame = self.frozen_heap.alloc_any_display_from_debug(frame); + let frame = self.frozen_heap.alloc_any(frame); self.last_alloc = Some(frame); frame } @@ -172,7 +172,7 @@ mod tests { fn make_span(heap: &FrozenHeap, text: &str) -> FrameSpan { let codemap = CodeMap::new(format!("{}.bzl", text), text.to_owned()); - let codemap = heap.alloc_any_display_from_debug(codemap); + let codemap = heap.alloc_any(codemap); FrameSpan { span: FrozenFileSpan::new(codemap, codemap.full_span()), inlined_frames: InlinedFrames::default(), diff --git a/starlark-rust/starlark/src/eval/runtime/params.rs b/starlark-rust/starlark/src/eval/runtime/params.rs index 1628ab45c9a1b..e40819533fee2 100644 --- a/starlark-rust/starlark/src/eval/runtime/params.rs +++ b/starlark-rust/starlark/src/eval/runtime/params.rs @@ -17,966 +17,6 @@ //! Function parameters. -use std::cell::Cell; -use std::cmp; -use std::collections::HashMap; -use std::iter; - -use allocative::Allocative; -use dupe::Dupe; -use starlark_derive::Freeze; -use starlark_derive::Trace; -use starlark_map::small_map::SmallMap; -use starlark_map::Hashed; - -use crate as starlark; -use crate::coerce::coerce; -use crate::coerce::Coerce; -use crate::collections::symbol_map::SymbolMap; -use crate::docs::DocParam; -use crate::docs::DocString; -use crate::eval::runtime::arguments::ArgSymbol; -use crate::eval::runtime::arguments::ArgumentsImpl; -use crate::eval::runtime::arguments::FunctionError; -use crate::eval::runtime::arguments::ResolvedArgName; -use crate::eval::Arguments; -use crate::eval::Evaluator; -use crate::hint::unlikely; -use crate::typing::Ty; -use crate::values::dict::Dict; -use crate::values::dict::DictRef; -use crate::values::Heap; -use crate::values::StringValue; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueLike; - -#[derive(Debug, Copy, Clone, Dupe, Coerce, PartialEq, Trace, Freeze, Allocative)] -#[repr(C)] -pub(crate) enum ParameterKind { - Required, - /// When optional parameter is not supplied, there's no error, - /// but the slot remains `None`. - /// - /// This is used only in native code, parameters of type `Option` become `Optional`. - Optional, - Defaulted(V), - Args, - KWargs, -} - -#[derive(Debug, Copy, Clone, Dupe, PartialEq, Eq, PartialOrd, Ord)] -enum CurrentParameterStyle { - /// Parameter can be only filled positionally. - PosOnly, - /// Parameter can be filled positionally or by name. - PosOrNamed, - /// Parameter can be filled by name only. - NamedOnly, - /// No more args accepted. - NoMore, -} - -/// Builder for [`ParametersSpec`] -pub struct ParametersSpecBuilder { - function_name: String, - params: Vec<(String, ParameterKind)>, - names: SymbolMap, - /// Number of parameters that can be filled only positionally. - positional_only: usize, - /// Number of parameters that can be filled positionally. - positional: usize, - - /// Has the no_args been passed - current_style: CurrentParameterStyle, - - args: Option, - kwargs: Option, -} - -/// Define a list of parameters. This code assumes that all names are distinct and that -/// `*args`/`**kwargs` occur in well-formed locations. -// V = Value, or FrozenValue -#[derive(Debug, Clone, Trace, Freeze, Allocative)] -#[repr(C)] -pub struct ParametersSpec { - /// Only used in error messages - function_name: String, - - /// Parameters in the order they occur. - param_kinds: Box<[ParameterKind]>, - /// Parameter names in the order they occur. - param_names: Box<[String]>, - /// Mapping from name to index where the argument lives. - #[freeze(identity)] - pub(crate) names: SymbolMap, - - /// Number of arguments that can be filled only positionally. - positional_only: u32, - /// Number of arguments that can be filled positionally. - /// Excludes *args/**kwargs, keyword arguments after *args - positional: u32, - - /// The index at which *args should go - args: Option, - /// The index at which **kwargs should go - kwargs: Option, -} - -// Can't derive this since we don't want ParameterKind to be public -unsafe impl, To> Coerce> for ParametersSpec {} - -impl ParametersSpecBuilder { - fn add(&mut self, name: &str, val: ParameterKind) { - assert!(!matches!(val, ParameterKind::Args | ParameterKind::KWargs)); - - // Regular arguments cannot follow `**kwargs`, but can follow `*args`. - assert!(self.current_style < CurrentParameterStyle::NoMore); - assert!(self.kwargs.is_none()); - - let i = self.params.len(); - self.params.push((name.to_owned(), val)); - if self.current_style != CurrentParameterStyle::PosOnly { - let old = self.names.insert(name, i.try_into().unwrap()); - assert!(old.is_none(), "Repeated parameter `{}`", name); - } - if self.args.is_none() && self.current_style != CurrentParameterStyle::NamedOnly { - // If you've already seen `args` or `no_args`, you can't enter these - // positionally - self.positional = i + 1; - if self.current_style == CurrentParameterStyle::PosOnly { - self.positional_only = i + 1; - } - } - } - - /// Add a required parameter. Will be an error if the caller doesn't supply - /// it. If you want to supply a position-only argument, prepend a `$` to - /// the name. - pub fn required(&mut self, name: &str) { - self.add(name, ParameterKind::Required); - } - - /// Add an optional parameter. Will be None if the caller doesn't supply it. - /// If you want to supply a position-only argument, prepend a `$` to the - /// name. - pub fn optional(&mut self, name: &str) { - self.add(name, ParameterKind::Optional); - } - - /// Add an optional parameter. Will be the default value if the caller - /// doesn't supply it. If you want to supply a position-only argument, - /// prepend a `$` to the name. - pub fn defaulted(&mut self, name: &str, val: V) { - self.add(name, ParameterKind::Defaulted(val)); - } - - /// Add an `*args` parameter which will be an iterable sequence of parameters, - /// recorded into a [`Vec`]. A function can only have one `args` - /// parameter. After this call, any subsequent - /// [`required`](ParametersSpecBuilder::required), - /// [`optional`](ParametersSpecBuilder::optional) or - /// [`defaulted`](ParametersSpecBuilder::defaulted) - /// parameters can _only_ be supplied by name. - pub fn args(&mut self) { - assert!(self.args.is_none()); - assert!(self.current_style < CurrentParameterStyle::NamedOnly); - assert!(self.kwargs.is_none()); - self.params.push(("*args".to_owned(), ParameterKind::Args)); - self.args = Some(self.params.len() - 1); - self.current_style = CurrentParameterStyle::NamedOnly; - } - - /// Following parameters can be filled positionally or by name. - pub fn no_more_positional_only_args(&mut self) { - assert_eq!(self.current_style, CurrentParameterStyle::PosOnly); - self.current_style = CurrentParameterStyle::PosOrNamed; - } - - /// This function has no `*args` parameter, corresponds to the Python parameter `*`. - /// After this call, any subsequent - /// [`required`](ParametersSpecBuilder::required), - /// [`optional`](ParametersSpecBuilder::optional) or - /// [`defaulted`](ParametersSpecBuilder::defaulted) - /// parameters can _only_ be supplied by name. - pub fn no_more_positional_args(&mut self) { - assert!(self.args.is_none()); - assert!(self.current_style < CurrentParameterStyle::NamedOnly); - assert!(self.kwargs.is_none()); - self.current_style = CurrentParameterStyle::NamedOnly; - } - - /// Add a `**kwargs` parameter which will be a dictionary, recorded into a [`SmallMap`]. - /// A function can only have one `kwargs` parameter. - /// parameter. After this call, any subsequent - /// [`required`](ParametersSpecBuilder::required), - /// [`optional`](ParametersSpecBuilder::optional) or - /// [`defaulted`](ParametersSpecBuilder::defaulted) - /// parameters can _only_ be supplied by position. - pub fn kwargs(&mut self) { - assert!(self.kwargs.is_none()); - self.params - .push(("**kwargs".to_owned(), ParameterKind::KWargs)); - self.current_style = CurrentParameterStyle::NoMore; - self.kwargs = Some(self.params.len() - 1); - } - - /// Construct the parameters specification. - pub fn finish(self) -> ParametersSpec { - let ParametersSpecBuilder { - function_name, - positional_only, - positional, - args, - current_style, - kwargs, - params, - names, - } = self; - let _ = current_style; - let positional_only: u32 = positional_only.try_into().unwrap(); - let positional: u32 = positional.try_into().unwrap(); - assert!(positional_only <= positional); - ParametersSpec { - function_name, - param_kinds: params.iter().map(|p| p.1).collect(), - param_names: params.into_iter().map(|p| p.0).collect(), - names, - positional_only, - positional, - args: args.map(|args| args.try_into().unwrap()), - kwargs: kwargs.map(|kwargs| kwargs.try_into().unwrap()), - } - } -} - -impl ParametersSpec { - /// Create a new [`ParametersSpec`] with the given function name. - pub fn new(function_name: String) -> ParametersSpecBuilder { - Self::with_capacity(function_name, 0) - } - - /// Create a new [`ParametersSpec`] with the given function name and an advance capacity hint. - pub fn with_capacity(function_name: String, capacity: usize) -> ParametersSpecBuilder { - ParametersSpecBuilder { - function_name, - params: Vec::with_capacity(capacity), - names: SymbolMap::with_capacity(capacity), - positional_only: 0, - positional: 0, - current_style: CurrentParameterStyle::PosOnly, - args: None, - kwargs: None, - } - } - - /// Produce an approximate signature for the function, combining the name and arguments. - pub fn signature(&self) -> String { - let mut collector = String::new(); - self.collect_signature(&mut collector); - collector - } - - // Generate a good error message for it - pub(crate) fn collect_signature(&self, collector: &mut String) { - collector.push_str(&self.function_name); - - // We used to make the "name" of a function include all its parameters, but that is a lot of - // details and visually crowds out everything else. Try disabling, although we might want it - // in some contexts, so don't delete it. - } - - /// Function parameter as they would appear in `def` - /// (excluding types, default values and formatting). - pub fn parameters_str(&self) -> String { - let mut emitted_star = false; - let mut collector = String::new(); - for (i, typ) in self.iter_params().enumerate() { - if !collector.is_empty() { - collector.push_str(", "); - } - - // TODO: also print `/` for positional-only parameters. - - if i == (self.positional as usize) - && !emitted_star - && !matches!(typ.1, ParameterKind::Args | ParameterKind::KWargs) - { - collector.push_str("*, "); - emitted_star = true; - } - - match typ.1 { - ParameterKind::Args | ParameterKind::KWargs => { - // For `*args` or `**kwargs` param name includes the `*` or `**`. - collector.push_str(typ.0); - emitted_star = true; - } - ParameterKind::Required => { - collector.push_str(typ.0); - } - ParameterKind::Optional | ParameterKind::Defaulted(_) => { - collector.push_str(typ.0); - collector.push_str(" = ..."); - } - } - } - collector - } - - /// Iterate over the parameters - /// - /// Returns an iterator over (parameter index, name, kind) - pub(crate) fn iter_params(&self) -> impl Iterator)> { - assert_eq!(self.param_names.len(), self.param_kinds.len()); - self.param_names - .iter() - .map(|name| name.as_str()) - .zip(&*self.param_kinds) - } - - pub(crate) fn resolve_name(&self, name: Hashed<&str>) -> ResolvedArgName { - let hash = name.hash(); - let param_index = self.names.get_hashed_str(name).copied(); - ResolvedArgName { hash, param_index } - } - - pub(crate) fn has_args_or_kwargs(&self) -> bool { - self.args.is_some() || self.kwargs.is_some() - } -} - -impl<'v, V: ValueLike<'v>> ParametersSpec { - /// Number of function parameters. - pub fn len(&self) -> usize { - self.param_kinds.len() - } - - /// Move parameters from [`Arguments`] to a list of [`Value`], - /// using the supplied [`ParametersSpec`]. - pub fn collect( - &self, - args: &Arguments<'v, '_>, - slots: &[Cell>>], - heap: &'v Heap, - ) -> anyhow::Result<()> { - self.collect_inline(&args.0, slots, heap) - } - - /// Collect `N` arguments. - /// - /// This function is called by generated code. - pub fn collect_into( - &self, - args: &Arguments<'v, '_>, - heap: &'v Heap, - ) -> anyhow::Result<[Cell>>; N]> { - let slots = [(); N].map(|_| Cell::new(None)); - self.collect(args, &slots, heap)?; - Ok(slots) - } - - /// A variant of collect that is always inlined - /// for Def and NativeFunction that are hot-spots - #[inline(always)] - pub(crate) fn collect_inline<'a, A: ArgumentsImpl<'v, 'a>>( - &self, - args: &A, - slots: &[Cell>>], - heap: &'v Heap, - ) -> anyhow::Result<()> - where - 'v: 'a, - { - // If the arguments equal the length and the kinds, and we don't have any other args, - // then no_args, *args and **kwargs must all be unset, - // and we don't have to crate args/kwargs objects, we can skip everything else - if args.pos().len() == (self.positional as usize) - && args.pos().len() == self.param_kinds.len() - && args.named().is_empty() - && args.args().is_none() - && args.kwargs().is_none() - { - for (v, s) in args.pos().iter().zip(slots.iter()) { - s.set(Some(*v)); - } - - return Ok(()); - } - - self.collect_slow(args, slots, heap) - } - - fn collect_slow<'a, A: ArgumentsImpl<'v, 'a>>( - &self, - args: &A, - slots: &[Cell>>], - heap: &'v Heap, - ) -> anyhow::Result<()> - where - 'v: 'a, - { - /// Lazily initialized `kwargs` object. - #[derive(Default)] - struct LazyKwargs<'v> { - kwargs: Option, Value<'v>>>, - } - - impl<'v> LazyKwargs<'v> { - // Return true if the value is a duplicate - #[inline(always)] - fn insert(&mut self, key: Hashed>, val: Value<'v>) -> bool { - match &mut self.kwargs { - None => { - let mut mp = SmallMap::with_capacity(12); - mp.insert_hashed_unique_unchecked(key, val); - self.kwargs = Some(mp); - false - } - Some(mp) => mp.insert_hashed(key, val).is_some(), - } - } - - fn alloc(self, heap: &'v Heap) -> Value<'v> { - let kwargs = match self.kwargs { - Some(kwargs) => Dict::new(coerce(kwargs)), - None => Dict::default(), - }; - heap.alloc(kwargs) - } - } - - let len = self.param_kinds.len(); - // We might do unchecked stuff later on, so make sure we have as many slots as we expect - assert!(slots.len() >= len); - - let mut star_args = Vec::new(); - let mut kwargs = LazyKwargs::default(); - let mut next_position = 0; - - // First deal with positional parameters - if args.pos().len() <= (self.positional as usize) { - // fast path for when we don't need to bounce down to filling in args - for (v, s) in args.pos().iter().zip(slots.iter()) { - s.set(Some(*v)); - } - next_position = args.pos().len(); - } else { - for v in args.pos() { - if next_position < (self.positional as usize) { - slots[next_position].set(Some(*v)); - next_position += 1; - } else { - star_args.push(*v); - } - } - } - - // Next deal with named parameters - // The lowest position at which we've written a name. - // If at the end lowest_name is less than next_position, we got the same variable twice. - // So no duplicate checking until after all positional arguments - let mut lowest_name = usize::MAX; - // Avoid a lot of loop setup etc in the common case - if !args.names().is_empty() { - for ((name, name_value), v) in args.names().iter().zip(args.named()) { - // Safe to use new_unchecked because hash for the Value and str are the same - match name.get_index_from_param_spec(self) { - None => { - kwargs.insert(Hashed::new_unchecked(name.small_hash(), *name_value), *v); - } - Some(i) => { - slots[i].set(Some(*v)); - lowest_name = cmp::min(lowest_name, i); - } - } - } - } - - // Next up are the *args parameters - if let Some(param_args) = args.args() { - for v in param_args - .iterate(heap) - .map_err(|_| FunctionError::ArgsArrayIsNotIterable)? - { - if next_position < (self.positional as usize) { - slots[next_position].set(Some(v)); - next_position += 1; - } else { - star_args.push(v); - } - } - } - - // Check if the named arguments clashed with the positional arguments - if unlikely(next_position > lowest_name) { - return Err(FunctionError::RepeatedArg { - name: self.param_names[lowest_name].clone(), - } - .into()); - } - - // Now insert the kwargs, if there are any - if let Some(param_kwargs) = args.kwargs() { - match DictRef::from_value(param_kwargs) { - Some(y) => { - for (k, v) in y.iter_hashed() { - match StringValue::new(*k.key()) { - None => return Err(FunctionError::ArgsValueIsNotString.into()), - Some(s) => { - let repeat = match self - .names - .get_hashed_string_value(Hashed::new_unchecked(k.hash(), s)) - { - None => kwargs.insert(Hashed::new_unchecked(k.hash(), s), v), - Some(i) => { - let this_slot = &slots[*i as usize]; - let repeat = this_slot.get().is_some(); - this_slot.set(Some(v)); - repeat - } - }; - if unlikely(repeat) { - return Err(FunctionError::RepeatedArg { - name: s.as_str().to_owned(), - } - .into()); - } - } - } - } - } - None => return Err(FunctionError::KwArgsIsNotDict.into()), - } - } - - // We have moved parameters into all the relevant slots, so need to finalise things. - // We need to set default values and error if any required values are missing - let kinds = &*self.param_kinds; - // This code is very hot, and setting up iterators was a noticeable bottleneck. - for index in next_position..kinds.len() { - // The number of locals must be at least the number of parameters, see `collect` - // which reserves `max(_, kinds.len())`. - let slot = unsafe { slots.get_unchecked(index) }; - let def = unsafe { kinds.get_unchecked(index) }; - - // We know that up to next_position got filled positionally, so we don't need to check those - if slot.get().is_some() { - continue; - } - match def { - ParameterKind::Required => { - return Err(FunctionError::MissingParameter { - name: self.param_names[index].clone(), - function: self.signature(), - } - .into()); - } - ParameterKind::Defaulted(x) => { - slot.set(Some(x.to_value())); - } - _ => {} - } - } - - // Now set the kwargs/args slots, if they are requested, and fail it they are absent but used - // Note that we deliberately give warnings about missing parameters _before_ giving warnings - // about unexpected extra parameters, so if a user misspells an argument they get a better error. - if let Some(args_pos) = self.args { - slots[args_pos as usize].set(Some(heap.alloc_tuple(&star_args))); - } else if unlikely(!star_args.is_empty()) { - return Err(FunctionError::ExtraPositionalArg { - count: star_args.len(), - function: self.signature(), - } - .into()); - } - - if let Some(kwargs_pos) = self.kwargs { - slots[kwargs_pos as usize].set(Some(kwargs.alloc(heap))); - } else if let Some(kwargs) = kwargs.kwargs { - return Err(FunctionError::ExtraNamedArg { - names: kwargs.keys().map(|x| x.as_str().to_owned()).collect(), - function: self.signature(), - } - .into()); - } - Ok(()) - } - - /// Check if current parameters can be filled with given arguments signature. - #[allow(clippy::needless_range_loop)] - pub fn can_fill_with_args(&self, pos: usize, names: &[&str]) -> bool { - let mut filled = vec![false; self.param_kinds.len()]; - for p in 0..pos { - if p < (self.positional as usize) { - filled[p] = true; - } else if self.args.is_some() { - // Filled into `*args`. - } else { - return false; - } - } - if pos > (self.positional as usize) && self.args.is_none() { - return false; - } - for name in names { - match self.names.get_str(name) { - Some(i) => { - if filled[*i as usize] { - // Duplicate argument. - return false; - } - filled[*i as usize] = true; - } - None => { - if self.kwargs.is_none() { - return false; - } - } - } - } - for (filled, p) in filled.iter().zip(self.param_kinds.iter()) { - if *filled { - continue; - } - match p { - ParameterKind::Args => {} - ParameterKind::KWargs => {} - ParameterKind::Defaulted(_) => {} - ParameterKind::Optional => {} - ParameterKind::Required => return false, - } - } - true - } - - /// Generate documentation for each of the parameters. - /// - /// # Arguments - /// * `parameter_types` should be a mapping of parameter index to type - /// * `parameter_docs` should be a mapping of parameter name to possible documentation for - /// that parameter - pub fn documentation( - &self, - parameter_types: Vec, - mut parameter_docs: HashMap>, - ) -> Vec { - assert_eq!( - self.param_kinds.len(), - parameter_types.len(), - "function: `{}`", - self.function_name, - ); - self.iter_params() - .enumerate() - .zip(parameter_types) - .flat_map(|((i, (name, kind)), typ)| { - let docs = parameter_docs.remove(name).flatten(); - let name = name.to_owned(); - - // Add `/` before the first named parameter. - let only_pos_before = if i != 0 && i == self.positional_only as usize { - Some(DocParam::OnlyPosBefore) - } else { - None - }; - - // Add `*` before first named-only parameter. - let no_args = match kind { - ParameterKind::Args | ParameterKind::KWargs => None, - ParameterKind::Required - | ParameterKind::Optional - | ParameterKind::Defaulted(_) => { - if i == self.positional as usize { - Some(DocParam::NoArgs) - } else { - None - } - } - }; - - let doc_param = match kind { - ParameterKind::Required => DocParam::Arg { - name, - docs, - typ, - default_value: None, - }, - ParameterKind::Optional => DocParam::Arg { - name, - docs, - typ, - default_value: Some("_".to_owned()), - }, - ParameterKind::Defaulted(v) => DocParam::Arg { - name, - docs, - typ, - default_value: Some(v.to_value().to_repr()), - }, - ParameterKind::Args => DocParam::Args { name, docs, typ }, - ParameterKind::KWargs => DocParam::Kwargs { name, docs, typ }, - }; - only_pos_before - .into_iter() - .chain(no_args) - .chain(iter::once(doc_param)) - }) - .chain( - // Add last `/`. - if self.positional_only == self.param_kinds.len() as u32 - && self.param_kinds.len() != 0 - { - Some(DocParam::OnlyPosBefore) - } else { - None - }, - ) - .collect() - } - - /// Create a [`ParametersParser`] for given arguments. - pub fn parser( - &self, - args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - k: F, - ) -> anyhow::Result - where - F: FnOnce(ParametersParser<'v, '_>, &mut Evaluator<'v, '_>) -> anyhow::Result, - { - eval.alloca_init( - self.len(), - || Cell::new(None), - |slots, eval| { - self.collect_inline(&args.0, slots, eval.heap())?; - let parser = ParametersParser::new(slots); - k(parser, eval) - }, - ) - } -} - -/// Parse a series of parameters which were specified by [`ParametersSpec`]. -/// -/// This is usually created with [`ParametersSpec::parser`]. -pub struct ParametersParser<'v, 'a>(std::slice::Iter<'a, Cell>>>); - -impl<'v, 'a> ParametersParser<'v, 'a> { - /// Create a parameter parser, which stored parameters into provided slots reference. - pub fn new(slots: &'a [Cell>>]) -> Self { - Self(slots.iter()) - } - - fn get_next(&mut self) -> Option> { - let v = self - .0 - .next() - .expect("ParametersParser: wrong number of requested arguments"); - v.get() - } - - /// Obtain the next parameter, corresponding to [`ParametersSpecBuilder::optional`]. - /// It is an error to request more parameters than were specified. - /// The `name` is only used for error messages. - pub fn next_opt>(&mut self, name: &str) -> anyhow::Result> { - match self.get_next() { - None => Ok(None), - Some(v) => Ok(Some(T::unpack_named_param(v, name)?)), - } - } - - /// Obtain the next parameter, which can't be defined by [`ParametersSpecBuilder::optional`]. - /// It is an error to request more parameters than were specified. - /// The `name` is only used for error messages. - pub fn next>(&mut self, name: &str) -> anyhow::Result { - // After ParametersCollect.done() all variables will be Some, - // apart from those where we called ParametersSpec.optional(), - // and for those we should call next_opt() - - // This is definitely not unassigned because ParametersCollect.done checked - // that. - let v = self.get_next().unwrap(); - T::unpack_named_param(v, name) - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - - use crate::assert::Assert; - use crate::docs::DocParam; - use crate::docs::DocString; - use crate::docs::DocStringKind; - use crate::eval::compiler::def::FrozenDef; - use crate::eval::runtime::params::ParameterKind; - use crate::eval::ParametersSpec; - use crate::typing::Ty; - use crate::values::FrozenValue; - - #[test] - fn test_parameter_iteration() { - let mut p = ParametersSpec::::new("f".to_owned()); - p.required("a"); - p.optional("b"); - p.no_more_positional_args(); - p.optional("c"); - p.kwargs(); - let p = p.finish(); - - let params: Vec<(&str, &ParameterKind)> = p.iter_params().collect(); - - let expected: Vec<(&str, &ParameterKind)> = vec![ - ("a", &ParameterKind::Required), - ("b", &ParameterKind::Optional), - ("c", &ParameterKind::Optional), - ("**kwargs", &ParameterKind::KWargs), - ]; - - assert_eq!(expected, params); - - let mut p = ParametersSpec::::new("f".to_owned()); - p.required("a"); - p.args(); - p.kwargs(); - let p = p.finish(); - - let params: Vec<(&str, &ParameterKind)> = p.iter_params().collect(); - - let expected: Vec<(&str, &ParameterKind)> = vec![ - ("a", &ParameterKind::Required), - ("*args", &ParameterKind::Args), - ("**kwargs", &ParameterKind::KWargs), - ]; - - assert_eq!(expected, params); - - let mut p = ParametersSpec::::new("f".to_owned()); - p.args(); - p.optional("a"); - p.optional("b"); - let p = p.finish(); - - let params: Vec<(&str, &ParameterKind)> = p.iter_params().collect(); - - let expected: Vec<(&str, &ParameterKind)> = vec![ - ("*args", &ParameterKind::Args), - ("a", &ParameterKind::Optional), - ("b", &ParameterKind::Optional), - ]; - - assert_eq!(expected, params); - } - - #[test] - fn test_documentation() -> anyhow::Result<()> { - // Make sure that documentation for some odder parameter specs works properly. - let mut p = ParametersSpec::::new("f".to_owned()); - p.args(); - p.optional("a"); - p.optional("b"); - let p = p.finish(); - - let expected = vec![ - DocParam::Args { - name: "*args".to_owned(), - docs: None, - typ: Ty::any(), - }, - DocParam::Arg { - name: "a".to_owned(), - docs: None, - typ: Ty::int(), - default_value: Some("_".to_owned()), - }, - DocParam::Arg { - name: "b".to_owned(), - docs: DocString::from_docstring(DocStringKind::Rust, "param b docs"), - typ: Ty::any(), - default_value: Some("_".to_owned()), - }, - ]; - let types = vec![Ty::any(), Ty::int(), Ty::any()]; - let mut docs = HashMap::new(); - docs.insert("a".to_owned(), None); - docs.insert( - "b".to_owned(), - DocString::from_docstring(DocStringKind::Rust, "param b docs"), - ); - - let params = p.documentation(types, docs); - assert_eq!(expected, params); - Ok(()) - } - - #[test] - fn test_parameters_str() { - fn test(sig: &str) { - let a = Assert::new(); - let f = a - .pass_module(&format!("def f({sig}): pass")) - .get("f") - .unwrap(); - assert_eq!(sig, &f.value().parameters_spec().unwrap().parameters_str()); - } - - test(""); - - test("a, b, c, d, e, f, g, h, *args, **kwargs"); - - test("*, a"); - test("x, *, a"); - - test("*args, a"); - test("x, *args, a"); - - test("**kwargs"); - test("a, **kwargs"); - } - - #[test] - fn test_can_fill_with_args() { - fn test(sig: &str, pos: usize, names: &[&str], expected: bool) { - let a = Assert::new(); - let module = a.pass_module(&format!("def f({}): pass", sig)); - let f = module.get("f").unwrap().downcast::().unwrap(); - let parameters_spec = &f.parameters; - assert_eq!(expected, parameters_spec.can_fill_with_args(pos, names)); - } - - test("", 0, &[], true); - test("", 1, &[], false); - test("", 0, &["a"], false); - - test("a", 1, &[], true); - test("a", 0, &["a"], true); - test("a", 1, &["a"], false); - test("a", 0, &["x"], false); - - test("a, b = 1", 1, &[], true); - test("a, b = 1", 2, &[], true); - test("a, b = 1", 0, &["a"], true); - test("a, b = 1", 0, &["b"], false); - test("a, b = 1", 0, &["a", "b"], true); - - test("*, a", 0, &[], false); - test("*, a", 1, &[], false); - test("*, a", 0, &["a"], true); - - test("a, *args", 0, &[], false); - test("a, *args", 1, &[], true); - test("a, *args", 10, &[], true); - - test("*args, b", 0, &[], false); - test("*args, b", 1, &[], false); - test("*args, b", 0, &["b"], true); - - test("**kwargs", 0, &[], true); - test("**kwargs", 0, &["a"], true); - test("**kwargs", 1, &[], false); - - // No test for positional-only args because we can't create them in starlark. - } -} +pub(crate) mod display; +pub(crate) mod parser; +pub(crate) mod spec; diff --git a/starlark-rust/starlark/src/eval/runtime/params/display.rs b/starlark-rust/starlark/src/eval/runtime/params/display.rs new file mode 100644 index 0000000000000..78c455b2fb13a --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/params/display.rs @@ -0,0 +1,165 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt; +use std::fmt::Display; +use std::iter; + +/// Parameter or `*` or `/` separator, but only if needed for formatting. +pub enum FmtParam { + /// Positional-only, positional-or-named, or named-only parameter. + Regular(T), + /// `*args` parameter. + Args(T), + /// `**kwargs` parameter. + Kwargs(T), + /// `/` separator. + Slash, + /// `*` separator. + Star, +} + +/// Flatten parameters and insert `/` and `*` separators if needed. +pub(crate) fn iter_fmt_param_spec( + pos_only: impl IntoIterator, + pos_named: impl IntoIterator, + args: Option, + named_only: impl IntoIterator, + kwargs: Option, +) -> impl Iterator> { + let mut pos_only = pos_only.into_iter().peekable(); + let slash = match pos_only.peek().is_some() { + true => Some(FmtParam::Slash), + false => None, + }; + + let mut named_only = named_only.into_iter().peekable(); + // `*args`, otherwise `*` if needed. + let args_or_star = match (named_only.peek().is_some(), args) { + (_, Some(args)) => Some(FmtParam::Args(args)), + (true, None) => Some(FmtParam::Star), + (false, None) => None, + }; + + iter::empty() + .chain(pos_only.map(FmtParam::Regular)) + .chain(slash) + .chain(pos_named.into_iter().map(FmtParam::Regular)) + .chain(args_or_star) + .chain(named_only.map(FmtParam::Regular)) + .chain(kwargs.map(FmtParam::Kwargs)) +} + +/// What to print for unknown default/optional. +pub(crate) const PARAM_FMT_OPTIONAL: &str = "..."; + +pub(crate) struct ParamFmt<'a, T: Display, D: Display> { + /// Parameter name. + pub(crate) name: &'a str, + /// Parameter type. If `None`, it will be omitted. + pub(crate) ty: Option, + pub(crate) default: Option, +} + +/// Utility to format function signature. +pub(crate) fn fmt_param_spec<'n, T: Display, D: Display>( + f: &mut dyn fmt::Write, + pos_only: impl IntoIterator>, + pos_named: impl IntoIterator>, + args: Option>, + named_only: impl IntoIterator>, + kwargs: Option>, +) -> fmt::Result { + fmt_param_spec_maybe_multiline(f, None, pos_only, pos_named, args, named_only, kwargs) +} + +#[allow(clippy::write_with_newline)] +pub(crate) fn fmt_param_spec_maybe_multiline<'n, T: Display, D: Display>( + f: &mut dyn fmt::Write, + // Single-line if `None`. + indent: Option<&str>, + pos_only: impl IntoIterator>, + pos_named: impl IntoIterator>, + args: Option>, + named_only: impl IntoIterator>, + kwargs: Option>, +) -> fmt::Result { + struct Printer<'w> { + f: &'w mut dyn fmt::Write, + } + + impl<'w> Printer<'w> { + fn write_param( + &mut self, + name: impl Display, + ty: Option, + default: Option, + ) -> fmt::Result { + write!(self.f, "{name}")?; + if let Some(ty) = ty { + write!(self.f, ": {ty}")?; + } + if let Some(default) = default { + write!(self.f, " = {default}")?; + } + Ok(()) + } + } + + let mut printer = Printer { f }; + + let mut iter = iter_fmt_param_spec(pos_only, pos_named, args, named_only, kwargs).peekable(); + + let not_empty = iter.peek().is_some(); + + for (i, param) in iter.enumerate() { + if i == 0 { + if let Some(indent) = indent { + write!(printer.f, "{indent}")?; + } + } else { + if let Some(indent) = indent { + write!(printer.f, ",\n{indent}")?; + } else { + write!(printer.f, ", ")?; + } + } + match param { + FmtParam::Regular(p) => { + printer.write_param(p.name, p.ty, p.default)?; + } + FmtParam::Args(p) => { + printer.write_param(format_args!("*{}", p.name), p.ty, p.default)?; + } + FmtParam::Kwargs(p) => { + printer.write_param(format_args!("**{}", p.name), p.ty, p.default)?; + } + FmtParam::Slash => { + write!(printer.f, "/")?; + } + FmtParam::Star => { + write!(printer.f, "*")?; + } + } + } + + if not_empty && indent.is_some() { + write!(printer.f, ",\n")?; + } + + Ok(()) +} diff --git a/starlark-rust/starlark/src/eval/runtime/params/parser.rs b/starlark-rust/starlark/src/eval/runtime/params/parser.rs new file mode 100644 index 0000000000000..26361c82fc7eb --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/params/parser.rs @@ -0,0 +1,222 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::slice; + +use starlark_syntax::internal_error; +use starlark_syntax::other_error; + +use crate::values::UnpackValue; +use crate::values::Value; + +/// Parse a series of parameters which were specified by +/// [`ParametersSpec`](crate::eval::ParametersSpec). +/// +/// This is created with [`ParametersSpec::parser`](crate::eval::ParametersSpec::parser). +pub struct ParametersParser<'v, 'a> { + // Invariant: `slots` and `names` are the same length. + slots: slice::Iter<'a, Option>>, + names: slice::Iter<'a, String>, +} + +impl<'v, 'a> ParametersParser<'v, 'a> { + /// Create a parameter parser, which stored parameters into provided slots reference. + pub(crate) fn new(slots: &'a [Option>], names: &'a [String]) -> Self { + // This assertion is important because we get unchecked in `get_next`. + assert_eq!(slots.len(), names.len()); + ParametersParser { + slots: slots.iter(), + names: names.iter(), + } + } + + #[inline] + fn get_next(&mut self) -> anyhow::Result<(Option>, &'a str)> { + let Some(v) = self.slots.next() else { + return Err( + internal_error!("Requesting more parameters than were specified").into_anyhow(), + ); + }; + // SAFETY: struct fields invariant. + let name = unsafe { self.names.next().unwrap_unchecked() }; + Ok((*v, name)) + } + + /// Obtain the next optional parameter (without a default value). + pub fn next_opt>(&mut self) -> anyhow::Result> { + match self.get_next()? { + (None, _) => Ok(None), + (Some(v), name) => Ok(Some(T::unpack_named_param(v, name)?)), + } + } + + /// Obtain the next parameter. Fail if the parameter is optional and not provided. + pub fn next>(&mut self) -> anyhow::Result { + let (v, name) = self.get_next()?; + let Some(v) = v else { + return Err(other_error!( + "Requested non-optional param {name} which was declared optional in signature" + ) + .into_anyhow()); + }; + T::unpack_named_param(v, name) + } + + #[inline] + pub(crate) fn is_eof(&self) -> bool { + self.slots.len() == 0 + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use crate::assert::Assert; + use crate::docs::DocParam; + use crate::docs::DocParams; + use crate::docs::DocString; + use crate::docs::DocStringKind; + use crate::eval::compiler::def::FrozenDef; + use crate::eval::runtime::params::display::PARAM_FMT_OPTIONAL; + use crate::eval::runtime::params::spec::ParametersSpec; + use crate::eval::ParametersSpecParam; + use crate::typing::Ty; + use crate::values::FrozenValue; + + #[test] + fn test_documentation() -> anyhow::Result<()> { + // Make sure that documentation for some odder parameter specs works properly. + let p = ParametersSpec::::new_parts( + "f", + [], + [], + true, + [ + ("a", ParametersSpecParam::Optional), + ("b", ParametersSpecParam::Optional), + ], + false, + ); + + let expected = DocParams { + args: Some(DocParam { + name: "args".to_owned(), + docs: None, + typ: Ty::any(), + default_value: None, + }), + named_only: vec![ + DocParam { + name: "a".to_owned(), + docs: None, + typ: Ty::int(), + default_value: Some(PARAM_FMT_OPTIONAL.to_owned()), + }, + DocParam { + name: "b".to_owned(), + docs: DocString::from_docstring(DocStringKind::Rust, "param b docs"), + typ: Ty::any(), + default_value: Some(PARAM_FMT_OPTIONAL.to_owned()), + }, + ], + pos_only: Vec::new(), + pos_or_named: Vec::new(), + kwargs: None, + }; + let types = vec![Ty::any(), Ty::int(), Ty::any()]; + let mut docs = HashMap::new(); + docs.insert("a".to_owned(), None); + docs.insert( + "b".to_owned(), + DocString::from_docstring(DocStringKind::Rust, "param b docs"), + ); + + let params = p.documentation(types, docs); + assert_eq!(expected, params); + Ok(()) + } + + #[test] + fn test_parameters_str() { + fn test(sig: &str) { + let a = Assert::new(); + let f = a + .pass_module(&format!("def f({sig}): pass")) + .get("f") + .unwrap(); + assert_eq!(sig, &f.value().parameters_spec().unwrap().parameters_str()); + } + + test(""); + + test("a, b, c, d, e, f, g, h, *args, **kwargs"); + + test("*, a"); + test("x, *, a"); + + test("*args, a"); + test("x, *args, a"); + + test("**kwargs"); + test("a, **kwargs"); + } + + #[test] + fn test_can_fill_with_args() { + fn test(sig: &str, pos: usize, names: &[&str], expected: bool) { + let a = Assert::new(); + let module = a.pass_module(&format!("def f({}): pass", sig)); + let f = module.get("f").unwrap().downcast::().unwrap(); + let parameters_spec = &f.parameters; + assert_eq!(expected, parameters_spec.can_fill_with_args(pos, names)); + } + + test("", 0, &[], true); + test("", 1, &[], false); + test("", 0, &["a"], false); + + test("a", 1, &[], true); + test("a", 0, &["a"], true); + test("a", 1, &["a"], false); + test("a", 0, &["x"], false); + + test("a, b = 1", 1, &[], true); + test("a, b = 1", 2, &[], true); + test("a, b = 1", 0, &["a"], true); + test("a, b = 1", 0, &["b"], false); + test("a, b = 1", 0, &["a", "b"], true); + + test("*, a", 0, &[], false); + test("*, a", 1, &[], false); + test("*, a", 0, &["a"], true); + + test("a, *args", 0, &[], false); + test("a, *args", 1, &[], true); + test("a, *args", 10, &[], true); + + test("*args, b", 0, &[], false); + test("*args, b", 1, &[], false); + test("*args, b", 0, &["b"], true); + + test("**kwargs", 0, &[], true); + test("**kwargs", 0, &["a"], true); + test("**kwargs", 1, &[], false); + + // No test for positional-only args because we can't create them in starlark. + } +} diff --git a/starlark-rust/starlark/src/eval/runtime/params/spec.rs b/starlark-rust/starlark/src/eval/runtime/params/spec.rs new file mode 100644 index 0000000000000..5263ed6148e77 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/params/spec.rs @@ -0,0 +1,991 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cmp; +use std::collections::HashMap; +use std::fmt; + +use allocative::Allocative; +use dupe::Dupe; +use starlark_derive::Coerce; +use starlark_derive::Freeze; +use starlark_derive::Trace; +use starlark_map::small_map::SmallMap; +use starlark_map::Hashed; +use starlark_syntax::function_error; +use starlark_syntax::other_error; +use starlark_syntax::syntax::def::DefParamIndices; + +use crate as starlark; +use crate::__macro_refs::coerce; +use crate::cast::transmute; +use crate::collections::symbol::map::SymbolMap; +use crate::docs::DocParam; +use crate::docs::DocParams; +use crate::docs::DocString; +use crate::eval::runtime::arguments::ArgSymbol; +use crate::eval::runtime::arguments::ArgumentsImpl; +use crate::eval::runtime::arguments::FunctionError; +use crate::eval::runtime::arguments::ResolvedArgName; +use crate::eval::runtime::params::display::fmt_param_spec; +use crate::eval::runtime::params::display::ParamFmt; +use crate::eval::runtime::params::display::PARAM_FMT_OPTIONAL; +use crate::eval::Arguments; +use crate::eval::Evaluator; +use crate::eval::ParametersParser; +use crate::hint::unlikely; +use crate::typing::ParamIsRequired; +use crate::typing::Ty; +use crate::values::dict::Dict; +use crate::values::dict::DictRef; +use crate::values::Heap; +use crate::values::StringValue; +use crate::values::Value; +use crate::values::ValueLike; + +/// Describe parameter for [`ParametersSpec`]. +#[derive( + Debug, Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Trace, Freeze, Allocative +)] +pub enum ParametersSpecParam { + /// Parameter is required. + Required, + /// Parameter is optional (returned as `None`). + Optional, + /// Parameter has default value. + Defaulted(V), +} + +impl ParametersSpecParam { + pub(crate) fn is_required(&self) -> ParamIsRequired { + match self { + ParametersSpecParam::Required => ParamIsRequired::Yes, + ParametersSpecParam::Optional | ParametersSpecParam::Defaulted(_) => { + ParamIsRequired::No + } + } + } +} + +#[derive(Debug, Copy, Clone, Dupe, Coerce, PartialEq, Trace, Freeze, Allocative)] +#[repr(C)] +pub(crate) enum ParameterKind { + Required, + /// When optional parameter is not supplied, there's no error, + /// but the slot remains `None`. + /// + /// This is used only in native code, parameters of type `Option` become `Optional`. + Optional, + Defaulted(V), + Args, + KWargs, +} + +#[derive(Debug, Copy, Clone, Dupe, PartialEq, Eq, PartialOrd, Ord)] +enum CurrentParameterStyle { + /// Parameter can be only filled positionally. + PosOnly, + /// Parameter can be filled positionally or by name. + PosOrNamed, + /// Parameter can be filled by name only. + NamedOnly, + /// No more args accepted. + NoMore, +} + +/// Builder for [`ParametersSpec`] +pub(crate) struct ParametersSpecBuilder { + function_name: String, + params: Vec<(String, ParameterKind)>, + names: SymbolMap, + /// Number of parameters that can be filled only positionally. + positional_only: usize, + /// Number of parameters that can be filled positionally. + positional: usize, + + /// Has the no_args been passed + current_style: CurrentParameterStyle, + + args: Option, + kwargs: Option, +} + +/// Define a list of parameters. This code assumes that all names are distinct and that +/// `*args`/`**kwargs` occur in well-formed locations. +// V = Value, or FrozenValue +#[derive(Debug, Clone, Trace, Freeze, Allocative)] +#[repr(C)] +pub struct ParametersSpec { + /// Only used in error messages + function_name: String, + + /// Parameters in the order they occur. + param_kinds: Box<[ParameterKind]>, + /// Parameter names in the order they occur. + param_names: Box<[String]>, + /// Mapping from name to index where the argument lives. + #[freeze(identity)] + pub(crate) names: SymbolMap, + #[freeze(identity)] + indices: DefParamIndices, +} + +impl ParametersSpecBuilder { + fn add(&mut self, name: &str, val: ParameterKind) { + assert!( + !matches!(val, ParameterKind::Args | ParameterKind::KWargs), + "adding parameter `{}` to `{}", + name, + self.function_name + ); + + // Regular arguments cannot follow `**kwargs`, but can follow `*args`. + assert!( + self.current_style < CurrentParameterStyle::NoMore, + "adding parameter `{}` to `{}", + name, + self.function_name + ); + assert!( + self.kwargs.is_none(), + "adding parameter `{}` to `{}", + name, + self.function_name + ); + + let i = self.params.len(); + self.params.push((name.to_owned(), val)); + if self.current_style != CurrentParameterStyle::PosOnly { + let old = self.names.insert(name, i.try_into().unwrap()); + assert!(old.is_none(), "Repeated parameter `{}`", name); + } + if self.args.is_none() && self.current_style != CurrentParameterStyle::NamedOnly { + // If you've already seen `args` or `no_args`, you can't enter these + // positionally + self.positional = i + 1; + if self.current_style == CurrentParameterStyle::PosOnly { + self.positional_only = i + 1; + } + } + } + + /// Add a required parameter. Will be an error if the caller doesn't supply + /// it. If you want to supply a position-only argument, prepend a `$` to + /// the name. + pub(crate) fn required(&mut self, name: &str) { + self.add(name, ParameterKind::Required); + } + + /// Add an optional parameter. Will be None if the caller doesn't supply it. + /// If you want to supply a position-only argument, prepend a `$` to the + /// name. + pub(crate) fn optional(&mut self, name: &str) { + self.add(name, ParameterKind::Optional); + } + + /// Add an optional parameter. Will be the default value if the caller + /// doesn't supply it. If you want to supply a position-only argument, + /// prepend a `$` to the name. + pub(crate) fn defaulted(&mut self, name: &str, val: V) { + self.add(name, ParameterKind::Defaulted(val)); + } + + fn param(&mut self, name: &str, param: ParametersSpecParam) { + match param { + ParametersSpecParam::Required => self.required(name), + ParametersSpecParam::Optional => self.optional(name), + ParametersSpecParam::Defaulted(x) => self.defaulted(name, x), + } + } + + /// Add an `*args` parameter which will be an iterable sequence of parameters, + /// recorded into a [`Vec`]. A function can only have one `args` + /// parameter. After this call, any subsequent + /// [`required`](ParametersSpecBuilder::required), + /// [`optional`](ParametersSpecBuilder::optional) or + /// [`defaulted`](ParametersSpecBuilder::defaulted) + /// parameters can _only_ be supplied by name. + pub(crate) fn args(&mut self) { + assert!( + self.args.is_none(), + "adding *args to `{}`", + self.function_name + ); + assert!( + self.current_style < CurrentParameterStyle::NamedOnly, + "adding *args to `{}`", + self.function_name + ); + assert!( + self.kwargs.is_none(), + "adding *args to `{}`", + self.function_name + ); + self.params.push(("*args".to_owned(), ParameterKind::Args)); + self.args = Some(self.params.len() - 1); + self.current_style = CurrentParameterStyle::NamedOnly; + } + + /// Following parameters can be filled positionally or by name. + pub(crate) fn no_more_positional_only_args(&mut self) { + assert_eq!( + self.current_style, + CurrentParameterStyle::PosOnly, + "adding / to `{}`", + self.function_name + ); + self.current_style = CurrentParameterStyle::PosOrNamed; + } + + /// This function has no `*args` parameter, corresponds to the Python parameter `*`. + /// After this call, any subsequent + /// [`required`](ParametersSpecBuilder::required), + /// [`optional`](ParametersSpecBuilder::optional) or + /// [`defaulted`](ParametersSpecBuilder::defaulted) + /// parameters can _only_ be supplied by name. + pub(crate) fn no_more_positional_args(&mut self) { + assert!(self.args.is_none(), "adding * to `{}`", self.function_name); + assert!( + self.current_style < CurrentParameterStyle::NamedOnly, + "adding * to `{}`", + self.function_name + ); + assert!( + self.kwargs.is_none(), + "adding * to `{}`", + self.function_name + ); + self.current_style = CurrentParameterStyle::NamedOnly; + } + + /// Add a `**kwargs` parameter which will be a dictionary, recorded into a [`SmallMap`]. + /// A function can only have one `kwargs` parameter. + /// parameter. After this call, any subsequent + /// [`required`](ParametersSpecBuilder::required), + /// [`optional`](ParametersSpecBuilder::optional) or + /// [`defaulted`](ParametersSpecBuilder::defaulted) + /// parameters can _only_ be supplied by position. + pub(crate) fn kwargs(&mut self) { + assert!( + self.kwargs.is_none(), + "adding **kwargs to `{}`", + self.function_name + ); + self.params + .push(("**kwargs".to_owned(), ParameterKind::KWargs)); + self.current_style = CurrentParameterStyle::NoMore; + self.kwargs = Some(self.params.len() - 1); + } + + /// Construct the parameters specification. + pub(crate) fn finish(self) -> ParametersSpec { + let ParametersSpecBuilder { + function_name, + positional_only, + positional, + args, + current_style, + kwargs, + params, + names, + } = self; + let _ = current_style; + let positional_only: u32 = positional_only.try_into().unwrap(); + let positional: u32 = positional.try_into().unwrap(); + assert!( + positional_only <= positional, + "building `{}`", + function_name + ); + ParametersSpec { + function_name, + param_kinds: params.iter().map(|p| p.1).collect(), + param_names: params.into_iter().map(|p| p.0).collect(), + names, + indices: DefParamIndices { + num_positional_only: positional_only, + num_positional: positional, + args: args.map(|args| args.try_into().unwrap()), + kwargs: kwargs.map(|kwargs| kwargs.try_into().unwrap()), + }, + } + } +} + +impl ParametersSpec { + /// Create a new [`ParametersSpec`] with the given function name and an advance capacity hint. + pub(crate) fn with_capacity( + function_name: String, + capacity: usize, + ) -> ParametersSpecBuilder { + ParametersSpecBuilder { + function_name, + params: Vec::with_capacity(capacity), + names: SymbolMap::with_capacity(capacity), + positional_only: 0, + positional: 0, + current_style: CurrentParameterStyle::PosOnly, + args: None, + kwargs: None, + } + } + + /// Create a new [`ParametersSpec`]. + pub fn new_parts<'a>( + function_name: &str, + pos_only: impl IntoIterator)>, + pos_or_named: impl IntoIterator)>, + args: bool, + named_only: impl IntoIterator)>, + kwargs: bool, + ) -> ParametersSpec + where + V: Copy, + { + let pos_only = pos_only.into_iter(); + let pos_or_named = pos_or_named.into_iter(); + let named_only = named_only.into_iter(); + + let mut builder = ParametersSpec::with_capacity( + function_name.to_owned(), + pos_only.size_hint().0 + + pos_or_named.size_hint().0 + + args as usize + + named_only.size_hint().0 + + kwargs as usize, + ); + + for (name, val) in pos_only { + builder.param(name, val); + } + builder.no_more_positional_only_args(); + for (name, val) in pos_or_named { + builder.param(name, val); + } + if args { + builder.args(); + } else { + builder.no_more_positional_args(); + } + for (name, val) in named_only { + builder.param(name, val); + } + if kwargs { + builder.kwargs(); + } + builder.finish() + } + + /// Parameter parse with only named parameters. + pub fn new_named_only<'a>( + function_name: &str, + named_only: impl IntoIterator)>, + ) -> ParametersSpec + where + V: Copy, + { + Self::new_parts( + function_name, + std::iter::empty(), + std::iter::empty(), + false, + named_only, + false, + ) + } + + /// Produce an approximate signature for the function, combining the name and arguments. + pub fn signature(&self) -> String { + let mut collector = String::new(); + self.collect_signature(&mut collector); + collector + } + + // Generate a good error message for it + pub(crate) fn collect_signature(&self, collector: &mut String) { + collector.push_str(&self.function_name); + + // We used to make the "name" of a function include all its parameters, but that is a lot of + // details and visually crowds out everything else. Try disabling, although we might want it + // in some contexts, so don't delete it. + } + + /// Function parameter as they would appear in `def` + /// (excluding types, default values and formatting). + pub fn parameters_str(&self) -> String { + #[cold] + fn err(args: fmt::Arguments) -> String { + if cfg!(test) { + panic!("{}", args); + } + format!("<{}>", args) + } + + if let Some(args) = self.indices.args { + if args != self.indices.num_positional { + return err(format_args!( + "Inconsistent *args: {:?}, args={}, positional={}", + self.function_name, args, self.indices.num_positional + )); + } + } + if let Some(kwargs) = self.indices.kwargs { + if kwargs as usize + 1 != self.param_kinds.len() { + return err(format_args!( + "Inconsistent **kwargs: {:?}, kwargs={}, param_kinds.len()={}", + self.function_name, + kwargs, + self.param_kinds.len() + )); + } + } + + let pf = |i: usize| { + let name = self.param_names[i].as_str(); + let name = name.strip_prefix("**").unwrap_or(name); + let name = name.strip_prefix("*").unwrap_or(name); + ParamFmt { + name, + ty: None::<&str>, + default: match self.param_kinds[i] { + ParameterKind::Defaulted(_) | ParameterKind::Optional => { + Some(PARAM_FMT_OPTIONAL) + } + ParameterKind::Required | ParameterKind::Args | ParameterKind::KWargs => None, + }, + } + }; + + let mut s = String::new(); + fmt_param_spec( + &mut s, + self.indices.pos_only().map(pf), + self.indices.pos_or_named().map(pf), + self.indices.args.map(|a| a as usize).map(pf), + self.indices.named_only(self.param_kinds.len()).map(pf), + self.indices.kwargs.map(|a| a as usize).map(pf), + ) + .unwrap(); + s + } + + pub(crate) fn resolve_name(&self, name: Hashed<&str>) -> ResolvedArgName { + let hash = name.hash(); + let param_index = self.names.get_hashed_str(name).copied(); + ResolvedArgName { hash, param_index } + } + + pub(crate) fn has_args_or_kwargs(&self) -> bool { + self.indices.args.is_some() || self.indices.kwargs.is_some() + } +} + +impl<'v, V: ValueLike<'v>> ParametersSpec { + pub(crate) fn as_value(&self) -> &ParametersSpec> { + // Everything is `repr(C)` and `Value` and `FrozenValue` have the same layout. + unsafe { transmute!(&ParametersSpec, &ParametersSpec, self) } + } + + /// Number of function parameters. + pub fn len(&self) -> usize { + self.param_kinds.len() + } +} + +impl<'v> ParametersSpec> { + /// Move parameters from [`Arguments`] to a list of [`Value`], + /// using the supplied [`ParametersSpec`]. + #[inline] + fn collect_impl( + &self, + args: &Arguments<'v, '_>, + slots: &mut [Option>], + heap: &'v Heap, + ) -> crate::Result<()> { + self.collect_inline(&args.0, slots, heap) + } + + /// Collect `N` arguments. + /// + /// This function is called by generated code. + #[inline] + fn collect_into_impl( + &self, + args: &Arguments<'v, '_>, + heap: &'v Heap, + ) -> crate::Result<[Option>; N]> { + let mut slots = [(); N].map(|_| None); + self.collect(args, &mut slots, heap)?; + Ok(slots) + } + + /// A variant of `collect` that is always inlined + /// for Def and NativeFunction that are hot-spots + #[inline(always)] + fn collect_inline_impl<'a, A: ArgumentsImpl<'v, 'a>>( + &self, + args: &A, + slots: &mut [Option>], + heap: &'v Heap, + ) -> crate::Result<()> + where + 'v: 'a, + { + // If the arguments equal the length and the kinds, and we don't have any other args, + // then no_args, *args and **kwargs must all be unset, + // and we don't have to crate args/kwargs objects, we can skip everything else + if args.pos().len() == (self.indices.num_positional as usize) + && args.pos().len() == self.param_kinds.len() + && args.named().is_empty() + && args.args().is_none() + && args.kwargs().is_none() + { + for (v, s) in args.pos().iter().zip(slots.iter_mut()) { + *s = Some(*v); + } + + return Ok(()); + } + + self.collect_slow(args, slots, heap) + } + + fn collect_slow<'a, A: ArgumentsImpl<'v, 'a>>( + &self, + args: &A, + slots: &mut [Option>], + heap: &'v Heap, + ) -> crate::Result<()> + where + 'v: 'a, + { + /// Lazily initialized `kwargs` object. + #[derive(Default)] + struct LazyKwargs<'v> { + kwargs: Option, Value<'v>>>, + } + + impl<'v> LazyKwargs<'v> { + // Return true if the value is a duplicate + #[inline(always)] + fn insert(&mut self, key: Hashed>, val: Value<'v>) -> bool { + match &mut self.kwargs { + None => { + let mut mp = SmallMap::with_capacity(12); + mp.insert_hashed_unique_unchecked(key, val); + self.kwargs = Some(mp); + false + } + Some(mp) => mp.insert_hashed(key, val).is_some(), + } + } + + #[inline(always)] + fn insert_unique_unchecked(&mut self, key: Hashed>, val: Value<'v>) { + match &mut self.kwargs { + None => { + let mut mp = SmallMap::with_capacity(12); + mp.insert_hashed_unique_unchecked(key, val); + self.kwargs = Some(mp); + } + Some(mp) => { + mp.insert_hashed_unique_unchecked(key, val); + } + } + } + + fn alloc(self, heap: &'v Heap) -> Value<'v> { + let kwargs = match self.kwargs { + Some(kwargs) => Dict::new(coerce(kwargs)), + None => Dict::default(), + }; + heap.alloc(kwargs) + } + } + + let len = self.param_kinds.len(); + // We might do unchecked stuff later on, so make sure we have as many slots as we expect + assert!(slots.len() >= len); + + let mut star_args = Vec::new(); + let mut kwargs = LazyKwargs::default(); + let mut next_position = 0; + + // First deal with positional parameters + if args.pos().len() <= (self.indices.num_positional as usize) { + // fast path for when we don't need to bounce down to filling in args + for (v, s) in args.pos().iter().zip(slots.iter_mut()) { + *s = Some(*v); + } + next_position = args.pos().len(); + } else { + for v in args.pos() { + if next_position < (self.indices.num_positional as usize) { + slots[next_position] = Some(*v); + next_position += 1; + } else { + star_args.push(*v); + } + } + } + + // Next deal with named parameters + // The lowest position at which we've written a name. + // If at the end lowest_name is less than next_position, we got the same variable twice. + // So no duplicate checking until after all positional arguments + let mut lowest_name = usize::MAX; + // Avoid a lot of loop setup etc in the common case + if !args.names().names().is_empty() { + for ((name, name_value), v) in args.names().names().iter().zip(args.named()) { + // Safe to use new_unchecked because hash for the Value and str are the same + match name.get_index_from_param_spec(self) { + None => { + kwargs.insert_unique_unchecked( + Hashed::new_unchecked(name.small_hash(), *name_value), + *v, + ); + } + Some(i) => { + slots[i] = Some(*v); + lowest_name = cmp::min(lowest_name, i); + } + } + } + } + + // Next up are the *args parameters + if let Some(param_args) = args.args() { + for v in param_args + .iterate(heap) + .map_err(|_| FunctionError::ArgsArrayIsNotIterable)? + { + if next_position < (self.indices.num_positional as usize) { + slots[next_position] = Some(v); + next_position += 1; + } else { + star_args.push(v); + } + } + } + + // Check if the named arguments clashed with the positional arguments + if unlikely(next_position > lowest_name) { + return Err(FunctionError::RepeatedArg { + name: self.param_names[lowest_name].clone(), + } + .into()); + } + + // Now insert the kwargs, if there are any + if let Some(param_kwargs) = args.kwargs() { + match DictRef::from_value(param_kwargs) { + Some(y) => { + for (k, v) in y.iter_hashed() { + match StringValue::new(*k.key()) { + None => return Err(FunctionError::ArgsValueIsNotString.into()), + Some(s) => { + let repeat = match self + .names + .get_hashed_string_value(Hashed::new_unchecked(k.hash(), s)) + { + None => kwargs.insert(Hashed::new_unchecked(k.hash(), s), v), + Some(i) => { + let this_slot = &mut slots[*i as usize]; + let repeat = this_slot.is_some(); + *this_slot = Some(v); + repeat + } + }; + if unlikely(repeat) { + return Err(FunctionError::RepeatedArg { + name: s.as_str().to_owned(), + } + .into()); + } + } + } + } + } + None => return Err(FunctionError::KwArgsIsNotDict.into()), + } + } + + // We have moved parameters into all the relevant slots, so need to finalise things. + // We need to set default values and error if any required values are missing + let kinds = &*self.param_kinds; + // This code is very hot, and setting up iterators was a noticeable bottleneck. + for index in next_position..kinds.len() { + // The number of locals must be at least the number of parameters, see `collect` + // which reserves `max(_, kinds.len())`. + let slot = unsafe { slots.get_unchecked_mut(index) }; + let def = unsafe { kinds.get_unchecked(index) }; + + // We know that up to next_position got filled positionally, so we don't need to check those + if slot.is_some() { + continue; + } + match def { + ParameterKind::Required => { + let function_name = &self.function_name; + let param_name = &self.param_names[index]; + if index < self.indices.num_positional_only as usize { + return Err(function_error!( + "Missing positional-only parameter `{param_name}` for call to `{function_name}`", + )); + } else if index >= self.indices.num_positional as usize { + return Err(function_error!( + "Missing named-only parameter `{param_name}` for call to `{function_name}`", + )); + } else { + return Err(function_error!( + "Missing parameter `{param_name}` for call to `{function_name}`" + )); + } + } + ParameterKind::Defaulted(x) => { + *slot = Some(x.to_value()); + } + _ => {} + } + } + + // Now set the kwargs/args slots, if they are requested, and fail it they are absent but used + // Note that we deliberately give warnings about missing parameters _before_ giving warnings + // about unexpected extra parameters, so if a user misspells an argument they get a better error. + if let Some(args_pos) = self.indices.args { + slots[args_pos as usize] = Some(heap.alloc_tuple(&star_args)); + } else if unlikely(!star_args.is_empty()) { + return Err(FunctionError::ExtraPositionalArg { + count: star_args.len(), + function: self.signature(), + } + .into()); + } + + if let Some(kwargs_pos) = self.indices.kwargs { + slots[kwargs_pos as usize] = Some(kwargs.alloc(heap)); + } else if let Some(kwargs) = kwargs.kwargs { + return Err(FunctionError::ExtraNamedArg { + names: kwargs.keys().map(|x| x.as_str().to_owned()).collect(), + function: self.signature(), + } + .into()); + } + Ok(()) + } + + /// Check if current parameters can be filled with given arguments signature. + #[allow(clippy::needless_range_loop)] + fn can_fill_with_args_impl(&self, pos: usize, names: &[&str]) -> bool { + let mut filled = vec![false; self.param_kinds.len()]; + for p in 0..pos { + if p < (self.indices.num_positional as usize) { + filled[p] = true; + } else if self.indices.args.is_some() { + // Filled into `*args`. + } else { + return false; + } + } + if pos > (self.indices.num_positional as usize) && self.indices.args.is_none() { + return false; + } + for name in names { + match self.names.get_str(name) { + Some(i) => { + if filled[*i as usize] { + // Duplicate argument. + return false; + } + filled[*i as usize] = true; + } + None => { + if self.indices.kwargs.is_none() { + return false; + } + } + } + } + for (filled, p) in filled.iter().zip(self.param_kinds.iter()) { + if *filled { + continue; + } + match p { + ParameterKind::Args => {} + ParameterKind::KWargs => {} + ParameterKind::Defaulted(_) => {} + ParameterKind::Optional => {} + ParameterKind::Required => return false, + } + } + true + } + + /// Generate documentation for each of the parameters. + fn documentation_impl( + &self, + parameter_types: Vec, + mut parameter_docs: HashMap>, + ) -> DocParams { + assert_eq!( + self.param_kinds.len(), + parameter_types.len(), + "function: `{}`", + self.function_name, + ); + + let mut dp = |i: usize| -> DocParam { + let name = self.param_names[i].as_str(); + let name = name.strip_prefix("**").unwrap_or(name); + let name = name.strip_prefix("*").unwrap_or(name); + + let docs = parameter_docs.remove(name).flatten(); + + let name = name.to_owned(); + + DocParam { + name, + docs, + typ: parameter_types[i].dupe(), + default_value: match self.param_kinds[i] { + ParameterKind::Required => None, + ParameterKind::Optional => Some(PARAM_FMT_OPTIONAL.to_owned()), + ParameterKind::Defaulted(v) => Some(v.to_value().to_repr()), + ParameterKind::Args => None, + ParameterKind::KWargs => None, + }, + } + }; + + DocParams { + pos_only: self.indices.pos_only().map(&mut dp).collect(), + pos_or_named: self.indices.pos_or_named().map(&mut dp).collect(), + args: self.indices.args.map(|a| a as usize).map(&mut dp), + named_only: self + .indices + .named_only(self.param_kinds.len()) + .map(&mut dp) + .collect(), + kwargs: self.indices.kwargs.map(|a| a as usize).map(&mut dp), + } + } + + /// Create a [`ParametersParser`] for given arguments. + #[inline] + fn parser_impl( + &self, + args: &Arguments<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, + k: F, + ) -> crate::Result + where + F: FnOnce(&mut ParametersParser<'v, '_>, &mut Evaluator<'v, '_, '_>) -> crate::Result, + { + eval.alloca_init( + self.len(), + || None, + |slots, eval| { + self.collect_inline(&args.0, slots, eval.heap())?; + let mut parser = ParametersParser::new(slots, &self.param_names); + let r = k(&mut parser, eval)?; + if !parser.is_eof() { + return Err(other_error!( + "Parser for `{}` did not consume all arguments", + self.function_name + )); + } + Ok(r) + }, + ) + } +} + +impl<'v, V: ValueLike<'v>> ParametersSpec { + /// Collect `N` arguments. + /// + /// This function is called by generated code. + #[inline] + pub fn collect_into( + &self, + args: &Arguments<'v, '_>, + heap: &'v Heap, + ) -> crate::Result<[Option>; N]> { + self.as_value().collect_into_impl(args, heap) + } + + /// Move parameters from [`Arguments`] to a list of [`Value`], + /// using the supplied [`ParametersSpec`]. + #[inline] + pub fn collect( + &self, + args: &Arguments<'v, '_>, + slots: &mut [Option>], + heap: &'v Heap, + ) -> crate::Result<()> { + self.as_value().collect_impl(args, slots, heap) + } + + /// Generate documentation for each of the parameters. + /// + /// # Arguments + /// * `parameter_types` should be a mapping of parameter index to type + /// * `parameter_docs` should be a mapping of parameter name to possible documentation for + /// that parameter + #[inline] + pub fn documentation( + &self, + parameter_types: Vec, + parameter_docs: HashMap>, + ) -> DocParams { + self.as_value() + .documentation_impl(parameter_types, parameter_docs) + } + + /// Create a [`ParametersParser`] for given arguments. + #[inline] + pub fn parser( + &self, + args: &Arguments<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, + k: F, + ) -> crate::Result + where + F: FnOnce(&mut ParametersParser<'v, '_>, &mut Evaluator<'v, '_, '_>) -> crate::Result, + { + self.as_value().parser_impl(args, eval, k) + } + + /// A variant of `collect` that is always inlined + /// for Def and NativeFunction that are hot-spots + #[inline(always)] + pub(crate) fn collect_inline<'a, A: ArgumentsImpl<'v, 'a>>( + &self, + args: &A, + slots: &mut [Option>], + heap: &'v Heap, + ) -> crate::Result<()> + where + 'v: 'a, + { + self.as_value().collect_inline_impl(args, slots, heap) + } + + /// Check if current parameters can be filled with given arguments signature. + pub fn can_fill_with_args(&self, pos: usize, names: &[&str]) -> bool { + self.as_value().can_fill_with_args_impl(pos, names) + } +} diff --git a/starlark-rust/starlark/src/eval/runtime/profile.rs b/starlark-rust/starlark/src/eval/runtime/profile.rs new file mode 100644 index 0000000000000..2b64261b2f5d4 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile.rs @@ -0,0 +1,30 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod bc; +pub(crate) mod csv; +pub(crate) mod data; +pub(crate) mod flamegraph; +pub(crate) mod heap; +pub(crate) mod instant; +pub(crate) mod mode; +pub(crate) mod or_instrumentation; +pub(crate) mod profiler_type; +pub(crate) mod stmt; +pub(crate) mod tests; +pub(crate) mod time_flame; +pub(crate) mod typecheck; diff --git a/starlark-rust/starlark/src/eval/runtime/profile/bc.rs b/starlark-rust/starlark/src/eval/runtime/profile/bc.rs index e3a262f61b491..f8f490a7ad5e3 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/bc.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/bc.rs @@ -27,9 +27,55 @@ use dupe::Dupe; use crate::eval::bc::opcode::BcOpcode; use crate::eval::runtime::profile::csv::CsvWriter; use crate::eval::runtime::profile::data::ProfileDataImpl; +use crate::eval::runtime::profile::profiler_type::ProfilerType; use crate::eval::ProfileData; use crate::eval::ProfileMode; +pub(crate) struct BcProfilerType; +pub(crate) struct BcPairsProfilerType; + +impl ProfilerType for BcProfilerType { + type Data = Box; + const PROFILE_MODE: ProfileMode = ProfileMode::Bytecode; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::Bc(bc) => Some(bc), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::Bc(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(Box::new(BcProfileData::merge( + profiles.iter().map(|x| &***x), + ))) + } +} + +impl ProfilerType for BcPairsProfilerType { + type Data = BcPairsProfileData; + const PROFILE_MODE: ProfileMode = ProfileMode::BytecodePairs; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::BcPairs(bc) => Some(bc), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::BcPairs(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(BcPairsProfileData::merge(profiles.iter().map(|x| &**x))) + } +} + #[derive(Debug, thiserror::Error)] enum BcProfileError { #[error("Can't call `write_bc_profile` unless you first call `enable_bc_profile`.")] @@ -142,7 +188,7 @@ impl BcProfileData { csv.finish() } - pub(crate) fn merge<'a>(iter: impl IntoIterator) -> BcProfileData { + fn merge<'a>(iter: impl IntoIterator) -> BcProfileData { let mut sum = BcProfileData::default(); for profile in iter { sum += profile; @@ -184,9 +230,7 @@ impl BcPairsProfileData { csv.finish() } - pub(crate) fn merge<'a>( - iter: impl IntoIterator, - ) -> BcPairsProfileData { + fn merge<'a>(iter: impl IntoIterator) -> BcPairsProfileData { let mut sum = BcPairsProfileData::default(); for profile in iter { sum += profile; @@ -228,23 +272,25 @@ impl BcProfile { } } - pub(crate) fn gen_bc_profile(&mut self) -> anyhow::Result { + pub(crate) fn gen_bc_profile(&mut self) -> crate::Result { match mem::replace(&mut self.data, BcProfileDataMode::Disabled) { BcProfileDataMode::Bc(bc) => Ok(ProfileData { - profile_mode: ProfileMode::Bytecode, profile: ProfileDataImpl::Bc(bc), }), - _ => Err(BcProfileError::BcProfilingNotEnabled.into()), + _ => Err(crate::Error::new_other( + BcProfileError::BcProfilingNotEnabled, + )), } } - pub(crate) fn gen_bc_pairs_profile(&mut self) -> anyhow::Result { + pub(crate) fn gen_bc_pairs_profile(&mut self) -> crate::Result { match mem::replace(&mut self.data, BcProfileDataMode::Disabled) { BcProfileDataMode::BcPairs(bc_pairs) => Ok(ProfileData { - profile_mode: ProfileMode::BytecodePairs, profile: ProfileDataImpl::BcPairs(*bc_pairs), }), - _ => Err(BcProfileError::BcProfilingNotEnabled.into()), + _ => Err(crate::Error::new_other( + BcProfileError::BcProfilingNotEnabled, + )), } } @@ -267,8 +313,8 @@ mod tests { use crate::eval::bc::opcode::BcOpcode; use crate::eval::runtime::profile::bc::BcPairsProfileData; use crate::eval::runtime::profile::bc::BcProfileData; + use crate::eval::runtime::profile::mode::ProfileMode; use crate::eval::Evaluator; - use crate::eval::ProfileMode; use crate::syntax::AstModule; use crate::syntax::Dialect; diff --git a/starlark-rust/starlark/src/eval/runtime/profile/data.rs b/starlark-rust/starlark/src/eval/runtime/profile/data.rs index b4bf0f6575eec..0d8989414b902 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/data.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/data.rs @@ -18,85 +18,106 @@ use std::fs; use std::path::Path; -use anyhow::Context; -use dupe::Dupe; -use starlark_syntax::slice_vec_ext::SliceExt; - use crate::eval::runtime::profile::bc::BcPairsProfileData; +use crate::eval::runtime::profile::bc::BcPairsProfilerType; use crate::eval::runtime::profile::bc::BcProfileData; +use crate::eval::runtime::profile::bc::BcProfilerType; use crate::eval::runtime::profile::flamegraph::FlameGraphData; -use crate::eval::ProfileMode; -use crate::values::AggregateHeapProfileInfo; +use crate::eval::runtime::profile::heap::HeapFlameAllocatedProfilerType; +use crate::eval::runtime::profile::heap::HeapFlameRetainedProfilerType; +use crate::eval::runtime::profile::heap::HeapSummaryAllocatedProfilerType; +use crate::eval::runtime::profile::heap::HeapSummaryRetainedProfilerType; +use crate::eval::runtime::profile::mode::ProfileMode; +use crate::eval::runtime::profile::profiler_type::ProfilerType; +use crate::eval::runtime::profile::stmt::CoverageProfileType; +use crate::eval::runtime::profile::stmt::StmtProfileData; +use crate::eval::runtime::profile::stmt::StmtProfilerType; +use crate::eval::runtime::profile::time_flame::TimeFlameProfilerType; +use crate::eval::runtime::profile::typecheck::TypecheckProfileData; +use crate::eval::runtime::profile::typecheck::TypecheckProfilerType; +use crate::values::layout::heap::profile::aggregated::AggregateHeapProfileInfo; #[derive(Debug, thiserror::Error)] enum ProfileDataError { - #[error("Profile data is not consistent with profile mode (internal error)")] - ProfileDataNotConsistent, #[error("Empty profile list cannot be merged")] EmptyProfileList, #[error("Different profile modes in profile")] DifferentProfileModes, - #[error("Merge of profile data for profile mode `{0}` is not implemented")] - MergeNotImplemented(ProfileMode), } #[derive(Clone, Debug)] pub(crate) enum ProfileDataImpl { Bc(Box), BcPairs(BcPairsProfileData), - AggregateHeapProfileInfo(Box), + HeapFlameRetained(Box), + HeapFlameAllocated(Box), + HeapSummaryRetained(Box), + HeapSummaryAllocated(Box), /// Flame graph data is in milliseconds. TimeFlameProfile(FlameGraphData), - Other(String), + Statement(StmtProfileData), + Coverage(StmtProfileData), + Typecheck(TypecheckProfileData), +} + +impl ProfileDataImpl { + pub(crate) fn profile_mode(&self) -> ProfileMode { + match self { + ProfileDataImpl::Bc(_) => ProfileMode::Bytecode, + ProfileDataImpl::BcPairs(_) => ProfileMode::BytecodePairs, + ProfileDataImpl::HeapFlameRetained(_) => ProfileMode::HeapFlameRetained, + ProfileDataImpl::HeapFlameAllocated(_) => ProfileMode::HeapFlameAllocated, + ProfileDataImpl::HeapSummaryRetained(_) => ProfileMode::HeapSummaryRetained, + ProfileDataImpl::HeapSummaryAllocated(_) => ProfileMode::HeapSummaryAllocated, + ProfileDataImpl::TimeFlameProfile(_) => ProfileMode::TimeFlame, + ProfileDataImpl::Statement(_) => ProfileMode::Statement, + ProfileDataImpl::Coverage(_) => ProfileMode::Coverage, + ProfileDataImpl::Typecheck(_) => ProfileMode::Typecheck, + } + } } /// Collected profiling data. #[derive(Clone, Debug)] pub struct ProfileData { - pub(crate) profile_mode: ProfileMode, - /// Serialized to text (e.g. CSV or flamegraph). pub(crate) profile: ProfileDataImpl, } +fn _assert_profile_data_send_sync() { + fn _assert_send_sync() {} + _assert_send_sync::(); +} + impl ProfileData { - pub(crate) fn new(profile_mode: ProfileMode, profile: String) -> ProfileData { - ProfileData { - profile_mode, - profile: ProfileDataImpl::Other(profile), - } + /// Profile mode used to collect this data. + pub fn profile_mode(&self) -> ProfileMode { + self.profile.profile_mode() } /// Generate a string with profile data (e.g. CSV or flamegraph, depending on profile type). - pub fn gen(&self) -> anyhow::Result { - match (&self.profile, &self.profile_mode) { - (ProfileDataImpl::Other(profile), _) => Ok(profile.clone()), - (ProfileDataImpl::Bc(bc), _) => Ok(bc.gen_csv()), - (ProfileDataImpl::BcPairs(bc_pairs), _) => Ok(bc_pairs.gen_csv()), - ( - ProfileDataImpl::AggregateHeapProfileInfo(profile), - ProfileMode::HeapFlameRetained | ProfileMode::HeapFlameAllocated, - ) => Ok(profile.gen_flame_graph()), - ( - ProfileDataImpl::AggregateHeapProfileInfo(profile), - ProfileMode::HeapSummaryRetained | ProfileMode::HeapSummaryAllocated, - ) => Ok(profile.gen_summary_csv()), - (ProfileDataImpl::AggregateHeapProfileInfo(_), _) => { - Err(ProfileDataError::ProfileDataNotConsistent.into()) - } - (ProfileDataImpl::TimeFlameProfile(data), ProfileMode::TimeFlame) => Ok(data.write()), - (ProfileDataImpl::TimeFlameProfile(_), _) => { - Err(ProfileDataError::ProfileDataNotConsistent.into()) - } + pub fn gen(&self) -> crate::Result { + match &self.profile { + ProfileDataImpl::Bc(bc) => Ok(bc.gen_csv()), + ProfileDataImpl::BcPairs(bc_pairs) => Ok(bc_pairs.gen_csv()), + ProfileDataImpl::HeapFlameRetained(profile) + | ProfileDataImpl::HeapFlameAllocated(profile) => Ok(profile.gen_flame_graph()), + ProfileDataImpl::HeapSummaryRetained(profile) + | ProfileDataImpl::HeapSummaryAllocated(profile) => Ok(profile.gen_summary_csv()), + ProfileDataImpl::TimeFlameProfile(data) => Ok(data.write()), + ProfileDataImpl::Statement(data) => Ok(data.write_to_string()), + ProfileDataImpl::Coverage(data) => Ok(data.write_coverage()), + ProfileDataImpl::Typecheck(data) => Ok(data.gen_csv()), } } /// Write to a file. - pub fn write(&self, path: &Path) -> anyhow::Result<()> { - fs::write(path, self.gen()?).with_context(|| { - format!( - "write profile `{}` data to `{}`", - self.profile_mode, - path.display() + pub fn write(&self, path: &Path) -> crate::Result<()> { + fs::write(path, self.gen()?).map_err(|e| { + anyhow::anyhow!( + "Could not write profile `{}` data to `{}`: {}", + self.profile.profile_mode(), + path.display(), + e, ) })?; Ok(()) @@ -105,118 +126,47 @@ impl ProfileData { /// Merge profiles (aggregate). pub fn merge<'a>( profiles: impl IntoIterator, - ) -> anyhow::Result { + ) -> crate::Result { let profiles = Vec::from_iter(profiles); + + if let [one] = profiles.as_slice() { + // If there's only one profile, just return it instead of invoking merge. + // - Merge may fail + // - Or may not be implemented for the profile type + return Ok(ProfileData::clone(one)); + } + let profile_mode = match profiles.first() { - None => return Err(ProfileDataError::EmptyProfileList.into()), - Some(p) => p.profile_mode.dupe(), + None => return Err(crate::Error::new_other(ProfileDataError::EmptyProfileList)), + Some(p) => p.profile.profile_mode(), }; for p in &profiles { - if p.profile_mode != profile_mode { - return Err(ProfileDataError::DifferentProfileModes.into()); + if p.profile.profile_mode() != profile_mode { + return Err(crate::Error::new_other( + ProfileDataError::DifferentProfileModes, + )); } } let profile = match &profile_mode { - ProfileMode::Bytecode => { - let profiles = profiles.try_map(|p| match &p.profile { - ProfileDataImpl::Bc(bc) => Ok(&**bc), - _ => Err(ProfileDataError::ProfileDataNotConsistent), - })?; - let profile = BcProfileData::merge(profiles); - ProfileDataImpl::Bc(Box::new(profile)) - } - ProfileMode::BytecodePairs => { - let profiles = profiles.try_map(|p| match &p.profile { - ProfileDataImpl::BcPairs(bc_pairs) => Ok(bc_pairs), - _ => Err(ProfileDataError::ProfileDataNotConsistent), - })?; - let profile = BcPairsProfileData::merge(profiles); - ProfileDataImpl::BcPairs(profile) + ProfileMode::Bytecode => BcProfilerType::merge_profiles(&profiles)?.profile, + ProfileMode::BytecodePairs => BcPairsProfilerType::merge_profiles(&profiles)?.profile, + ProfileMode::HeapSummaryAllocated => { + HeapSummaryAllocatedProfilerType::merge_profiles(&profiles)?.profile } - ProfileMode::HeapSummaryAllocated - | ProfileMode::HeapSummaryRetained - | ProfileMode::HeapFlameAllocated - | ProfileMode::HeapFlameRetained => { - let profiles = profiles.try_map(|p| match &p.profile { - ProfileDataImpl::AggregateHeapProfileInfo(profile) => Ok(&**profile), - _ => Err(ProfileDataError::ProfileDataNotConsistent), - })?; - let profile = AggregateHeapProfileInfo::merge(profiles); - ProfileDataImpl::AggregateHeapProfileInfo(Box::new(profile)) + ProfileMode::HeapSummaryRetained => { + HeapSummaryRetainedProfilerType::merge_profiles(&profiles)?.profile } - ProfileMode::TimeFlame => { - let profiles = profiles.try_map(|p| match &p.profile { - ProfileDataImpl::TimeFlameProfile(data) => Ok(data), - _ => Err(ProfileDataError::ProfileDataNotConsistent), - })?; - let profile = FlameGraphData::merge(profiles); - ProfileDataImpl::TimeFlameProfile(profile) + ProfileMode::HeapFlameAllocated => { + HeapFlameAllocatedProfilerType::merge_profiles(&profiles)?.profile } - profile_mode => { - return Err(ProfileDataError::MergeNotImplemented(profile_mode.dupe()).into()); + ProfileMode::HeapFlameRetained => { + HeapFlameRetainedProfilerType::merge_profiles(&profiles)?.profile } + ProfileMode::TimeFlame => TimeFlameProfilerType::merge_profiles(&profiles)?.profile, + ProfileMode::Typecheck => TypecheckProfilerType::merge_profiles(&profiles)?.profile, + ProfileMode::Statement => StmtProfilerType::merge_profiles(&profiles)?.profile, + ProfileMode::Coverage => CoverageProfileType::merge_profiles(&profiles)?.profile, }; - Ok(ProfileData { - profile_mode, - profile, - }) - } -} - -#[cfg(test)] -mod tests { - use dupe::Dupe; - - use crate::eval::runtime::profile::bc::BcPairsProfileData; - use crate::eval::runtime::profile::data::ProfileDataImpl; - use crate::eval::runtime::profile::flamegraph::FlameGraphData; - use crate::eval::ProfileData; - use crate::eval::ProfileMode; - - #[test] - fn merge_bc() { - let profile = ProfileData { - profile_mode: ProfileMode::Bytecode, - profile: ProfileDataImpl::Bc(Box::default()), - }; - // Smoke. - ProfileData::merge([&profile, &profile]).unwrap(); - } - - #[test] - fn merge_bc_pairs() { - let profile = ProfileData { - profile_mode: ProfileMode::BytecodePairs, - profile: ProfileDataImpl::BcPairs(BcPairsProfileData::default()), - }; - // Smoke. - ProfileData::merge([&profile, &profile]).unwrap(); - } - - #[test] - fn merge_aggregated_heap_profile() { - for profile_mode in [ - ProfileMode::HeapFlameRetained, - ProfileMode::HeapFlameAllocated, - ProfileMode::HeapSummaryRetained, - ProfileMode::HeapSummaryAllocated, - ] { - let profile = ProfileData { - profile_mode: profile_mode.dupe(), - profile: ProfileDataImpl::AggregateHeapProfileInfo(Box::default()), - }; - // Smoke. - ProfileData::merge([&profile, &profile]).unwrap(); - } - } - - #[test] - fn merge_time_flame() { - let profile = ProfileData { - profile_mode: ProfileMode::TimeFlame, - profile: ProfileDataImpl::TimeFlameProfile(FlameGraphData::default()), - }; - // Smoke. - ProfileData::merge([&profile, &profile]).unwrap(); + Ok(ProfileData { profile }) } } diff --git a/starlark-rust/starlark/src/eval/runtime/profile/flamegraph.rs b/starlark-rust/starlark/src/eval/runtime/profile/flamegraph.rs index 46d9683a0062b..2049beb850e78 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/flamegraph.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/flamegraph.rs @@ -22,7 +22,7 @@ use std::fmt::Write; use dupe::Dupe; use starlark_map::small_map::SmallMap; -use crate::values::layout::heap::profile::arc_str::ArcStr; +use crate::util::arc_str::ArcStr; /// Node in flamegraph tree. #[derive(Debug, Clone, Default, PartialEq, Eq)] diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/bytecode.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/bytecode.golden new file mode 100644 index 0000000000000..cca75a93311c7 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/bytecode.golden @@ -0,0 +1,93 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Opcode,Count,Count / Total +"TOTAL",745,"1.000" +"CallFrozenNativePos",244,"0.328" +"Continue",220,"0.295" +"Const",40,"0.054" +"Mov",40,"0.054" +"LoadModule",24,"0.032" +"ListOfConsts",24,"0.032" +"Iter",24,"0.032" +"CallPos",24,"0.032" +"ReturnConst",23,"0.031" +"AddAssign",20,"0.027" +"Multiply",20,"0.027" +"IfNotBr",20,"0.027" +"Return",7,"0.009" +"PossibleGc",6,"0.008" +"ListNew",4,"0.005" +"StoreModuleAndExport",3,"0.004" +"Def",2,"0.003" +"LoadLocal",0,"0.000" +"LoadLocalCaptured",0,"0.000" +"StoreLocalCaptured",0,"0.000" +"StoreModule",0,"0.000" +"Unpack",0,"0.000" +"ArrayIndex",0,"0.000" +"SetArrayIndex",0,"0.000" +"ArrayIndexSet",0,"0.000" +"Slice",0,"0.000" +"ObjectField",0,"0.000" +"SetObjectField",0,"0.000" +"Eq",0,"0.000" +"EqConst",0,"0.000" +"EqPtr",0,"0.000" +"EqStr",0,"0.000" +"EqInt",0,"0.000" +"Not",0,"0.000" +"Minus",0,"0.000" +"Plus",0,"0.000" +"BitNot",0,"0.000" +"Less",0,"0.000" +"Greater",0,"0.000" +"LessOrEqual",0,"0.000" +"GreaterOrEqual",0,"0.000" +"In",0,"0.000" +"Add",0,"0.000" +"Sub",0,"0.000" +"Percent",0,"0.000" +"PercentSOne",0,"0.000" +"FormatOne",0,"0.000" +"Divide",0,"0.000" +"FloorDivide",0,"0.000" +"BitAnd",0,"0.000" +"BitOr",0,"0.000" +"BitOrAssign",0,"0.000" +"BitXor",0,"0.000" +"LeftShift",0,"0.000" +"RightShift",0,"0.000" +"Len",0,"0.000" +"Type",0,"0.000" +"TypeIs",0,"0.000" +"IsInstance",0,"0.000" +"TupleNPop",0,"0.000" +"ListNPop",0,"0.000" +"DictNew",0,"0.000" +"DictNPop",0,"0.000" +"DictOfConsts",0,"0.000" +"DictConstKeys",0,"0.000" +"ComprListAppend",0,"0.000" +"ComprDictInsert",0,"0.000" +"CheckType",0,"0.000" +"Br",0,"0.000" +"IfBr",0,"0.000" +"Break",0,"0.000" +"IterStop",0,"0.000" +"ReturnCheckType",0,"0.000" +"Call",0,"0.000" +"CallFrozenDef",0,"0.000" +"CallFrozenDefPos",0,"0.000" +"CallFrozenNative",0,"0.000" +"CallFrozen",0,"0.000" +"CallFrozenPos",0,"0.000" +"CallMethod",0,"0.000" +"CallMethodPos",0,"0.000" +"CallMaybeKnownMethod",0,"0.000" +"CallMaybeKnownMethodPos",0,"0.000" +"ArrayIndex2",0,"0.000" +"End",0,"0.000" diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/bytecode_pairs.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/bytecode_pairs.golden new file mode 100644 index 0000000000000..27a6e66b76fc7 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/bytecode_pairs.golden @@ -0,0 +1,39 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Opcode[0],Opcode[1],Count,Count / Total +"CallFrozenNativePos","Continue",200,"0.269" +"Continue","CallFrozenNativePos",180,"0.242" +"LoadModule","CallPos",24,"0.032" +"Const","Multiply",20,"0.027" +"Const","Iter",20,"0.027" +"Mov","ListOfConsts",20,"0.027" +"Mov","Continue",20,"0.027" +"AddAssign","Mov",20,"0.027" +"Multiply","CallFrozenNativePos",20,"0.027" +"ListOfConsts","Const",20,"0.027" +"IfNotBr","Const",20,"0.027" +"Iter","CallFrozenNativePos",20,"0.027" +"Continue","ReturnConst",20,"0.027" +"ReturnConst","Mov",20,"0.027" +"CallPos","CallFrozenNativePos",20,"0.027" +"CallFrozenNativePos","AddAssign",20,"0.027" +"CallFrozenNativePos","IfNotBr",20,"0.027" +"Continue","LoadModule",16,"0.022" +"ListNew","ListOfConsts",4,"0.005" +"ListOfConsts","CallFrozenNativePos",4,"0.005" +"Iter","LoadModule",4,"0.005" +"Continue","Return",4,"0.005" +"CallPos","ListNew",4,"0.005" +"CallFrozenNativePos","Iter",4,"0.005" +"PossibleGc","LoadModule",4,"0.005" +"StoreModuleAndExport","ReturnConst",3,"0.004" +"Return","Return",3,"0.004" +"Return","PossibleGc",3,"0.004" +"ReturnConst","PossibleGc",2,"0.003" +"Def","StoreModuleAndExport",2,"0.003" +"PossibleGc","Def",2,"0.003" +"Return","StoreModuleAndExport",1,"0.001" diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/coverage.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/coverage.golden new file mode 100644 index 0000000000000..a5e1a26b1ca77 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/coverage.golden @@ -0,0 +1,20 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +test.star:2:1-9:1 +test.star:3:5-9:1 +test.star:6:9-9:1 +test.star:7:13-19 +test.star:9:1-16:1 +test.star:10:5-11 +test.star:11:5-14:1 +test.star:12:9-17 +test.star:13:9-27 +test.star:14:5-13 +test.star:16:1-7 +test.star:17:1-7 +test.star:18:1-7 +test.star:20:1-11 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_flame_allocated.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_flame_allocated.golden new file mode 100644 index 0000000000000..fee068019b8c0 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_flame_allocated.golden @@ -0,0 +1,13 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +range 8 +module;function 16 +module;test.star.test;dict 192 +module;test.star.test;list 384 +module;test.star.test;array 480 +module;test.star.test;tuple 192 +module;test.star.test;test.star.inner;dict 1760 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_flame_retained.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_flame_retained.golden new file mode 100644 index 0000000000000..19ad028ef5c72 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_flame_retained.golden @@ -0,0 +1,8 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +module;function 16 +module;test.star.test;list 8 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_summary_allocated.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_summary_allocated.golden new file mode 100644 index 0000000000000..248b86c90c242 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_summary_allocated.golden @@ -0,0 +1,11 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Function,Time(s),TimeRec(s),Calls,Callers,TopCaller,TopCallerCount,Allocs,AllocBytes,dict,array,list,tuple,function +"TOTALS",0.378,0.378,24,0,"",0,378,3024,244,60,48,24,2 +"test.star.test",0.168,0.308,20,1,"module",1,156,1248,24,60,48,24,0 +"test.star.inner",0.140,0.140,0,1,"test.star.test",1,220,1760,220,0,0,0,0 +"module",0.070,0.378,4,1,"(root)",1,2,16,0,0,0,0,2 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_summary_retained.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_summary_retained.golden new file mode 100644 index 0000000000000..5e47100e83084 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/heap_summary_retained.golden @@ -0,0 +1,11 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Function,Time(s),TimeRec(s),Calls,Callers,TopCaller,TopCallerCount,Allocs,AllocBytes,function,list +"TOTALS",0.378,0.378,24,0,"",0,3,24,2,1 +"test.star.test",0.168,0.308,20,1,"module",1,1,8,0,1 +"test.star.inner",0.140,0.140,0,1,"test.star.test",1,0,0,0,0 +"module",0.070,0.378,4,1,"(root)",1,2,16,2,0 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/statement.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/statement.golden new file mode 100644 index 0000000000000..66ed7d8d7c878 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/statement.golden @@ -0,0 +1,22 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +File,Span,Duration(s),Count +"TOTAL","",2.128,304 +"test.star","7:13-19",1.400,200 +"test.star","3:5-9:1",0.140,20 +"test.star","6:9-9:1",0.140,20 +"test.star","12:9-17",0.140,20 +"test.star","13:9-27",0.140,20 +"test.star","10:5-11",0.028,4 +"test.star","11:5-14:1",0.028,4 +"test.star","14:5-13",0.028,4 +"test.star","2:1-9:1",0.014,2 +"test.star","9:1-16:1",0.014,2 +"test.star","16:1-7",0.014,2 +"test.star","17:1-7",0.014,2 +"test.star","18:1-7",0.014,2 +"test.star","20:1-11",0.014,2 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/time_flame.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/time_flame.golden new file mode 100644 index 0000000000000..4d867bcaedf41 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/time_flame.golden @@ -0,0 +1,10 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +root 35 +root;"module" 70 +root;"module";test.star.test 168 +root;"module";test.star.test;test.star.inner 140 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/golden/typecheck.golden b/starlark-rust/starlark/src/eval/runtime/profile/golden/typecheck.golden new file mode 100644 index 0000000000000..480f3c3e42591 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/golden/typecheck.golden @@ -0,0 +1,9 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Function,Time (s) +"TOTAL",0.140 +"inner",0.140 diff --git a/starlark-rust/starlark/src/eval/runtime/profile/heap.rs b/starlark-rust/starlark/src/eval/runtime/profile/heap.rs index 69ceab50236a8..71509a32b18ba 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/heap.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/heap.rs @@ -22,24 +22,109 @@ use dupe::Dupe; use crate::eval::runtime::profile::data::ProfileData; use crate::eval::runtime::profile::data::ProfileDataImpl; +use crate::eval::runtime::profile::profiler_type::ProfilerType; use crate::eval::ProfileMode; use crate::values::layout::heap::profile::aggregated::AggregateHeapProfileInfo; use crate::values::Heap; use crate::values::Value; -#[derive(Copy, Clone, Dupe, Debug, Allocative)] -pub(crate) enum RetainedHeapProfileMode { - Flame, - Summary, +pub(crate) struct HeapSummaryAllocatedProfilerType; +pub(crate) struct HeapFlameAllocatedProfilerType; +pub(crate) struct HeapSummaryRetainedProfilerType; +pub(crate) struct HeapFlameRetainedProfilerType; + +impl ProfilerType for HeapSummaryAllocatedProfilerType { + type Data = Box; + const PROFILE_MODE: ProfileMode = ProfileMode::HeapSummaryAllocated; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::HeapSummaryAllocated(data) => Some(data), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::HeapSummaryAllocated(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(Box::new(AggregateHeapProfileInfo::merge( + profiles.iter().map(|x| &***x), + ))) + } } -impl RetainedHeapProfileMode { - pub(crate) fn to_profile_mode(self) -> ProfileMode { - match self { - RetainedHeapProfileMode::Flame => ProfileMode::HeapFlameRetained, - RetainedHeapProfileMode::Summary => ProfileMode::HeapSummaryRetained, +impl ProfilerType for HeapFlameAllocatedProfilerType { + type Data = Box; + const PROFILE_MODE: ProfileMode = ProfileMode::HeapFlameAllocated; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::HeapFlameAllocated(data) => Some(data), + _ => None, } } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::HeapFlameAllocated(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(Box::new(AggregateHeapProfileInfo::merge( + profiles.iter().map(|x| &***x), + ))) + } +} + +impl ProfilerType for HeapSummaryRetainedProfilerType { + type Data = Box; + const PROFILE_MODE: ProfileMode = ProfileMode::HeapSummaryRetained; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::HeapSummaryRetained(data) => Some(data), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::HeapSummaryRetained(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(Box::new(AggregateHeapProfileInfo::merge( + profiles.iter().map(|x| &***x), + ))) + } +} + +impl ProfilerType for HeapFlameRetainedProfilerType { + type Data = Box; + const PROFILE_MODE: ProfileMode = ProfileMode::HeapFlameRetained; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::HeapFlameRetained(data) => Some(data), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::HeapFlameRetained(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(Box::new(AggregateHeapProfileInfo::merge( + profiles.iter().map(|x| &***x), + ))) + } +} + +#[derive(Copy, Clone, Dupe, Debug, Allocative)] +pub(crate) enum RetainedHeapProfileMode { + Flame, + Summary, } #[derive(Debug, thiserror::Error)] @@ -84,13 +169,9 @@ impl HeapProfile { } // We could expose profile on the Heap, but it's an implementation detail that it works here. - pub(crate) fn gen( - &self, - heap: &Heap, - format: HeapProfileFormat, - ) -> anyhow::Result { + pub(crate) fn gen(&self, heap: &Heap, format: HeapProfileFormat) -> crate::Result { if !self.enabled { - return Err(HeapProfileError::NotEnabled.into()); + return Err(crate::Error::new_other(HeapProfileError::NotEnabled)); } Ok(Self::gen_enabled(heap, format)) } @@ -105,33 +186,31 @@ impl HeapProfile { fn write_flame_heap_profile(heap: &Heap) -> ProfileData { let stacks = AggregateHeapProfileInfo::collect(heap, None); ProfileData { - profile_mode: ProfileMode::HeapFlameAllocated, - profile: ProfileDataImpl::AggregateHeapProfileInfo(Box::new(stacks)), + profile: ProfileDataImpl::HeapFlameAllocated(Box::new(stacks)), } } fn write_summarized_heap_profile(heap: &Heap) -> ProfileData { let stacks = AggregateHeapProfileInfo::collect(heap, None); ProfileData { - profile_mode: ProfileMode::HeapSummaryAllocated, - profile: ProfileDataImpl::AggregateHeapProfileInfo(Box::new(stacks)), + profile: ProfileDataImpl::HeapSummaryAllocated(Box::new(stacks)), } } } #[cfg(test)] mod tests { - use super::*; use crate::environment::Globals; use crate::environment::Module; + use crate::eval::runtime::profile::heap::HeapProfile; + use crate::eval::runtime::profile::mode::ProfileMode; use crate::eval::Evaluator; - use crate::eval::ProfileMode; use crate::syntax::AstModule; use crate::syntax::Dialect; use crate::values::Value; #[test] - fn test_profiling() -> anyhow::Result<()> { + fn test_profiling() -> crate::Result<()> { // We don't test that the profile looks any particular way, but we do test it doesn't crash let ast = AstModule::parse( "foo.bzl", @@ -142,7 +221,7 @@ y = 8 * 9 + 2 f "# .to_owned(), - &Dialect::Extended, + &Dialect::AllOptionsInternal, )?; let globals = Globals::standard(); let module = Module::new(); diff --git a/starlark-rust/starlark/src/eval/runtime/profile/instant.rs b/starlark-rust/starlark/src/eval/runtime/profile/instant.rs new file mode 100644 index 0000000000000..af62e0ee37649 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/instant.rs @@ -0,0 +1,85 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::ops::Sub; +use std::time::Duration; + +use allocative::Allocative; + +/// Real `Instant` for production code, thread-local counter for tests. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Allocative)] +pub(crate) struct ProfilerInstant( + #[cfg(not(test))] std::time::Instant, + #[cfg(test)] u64, // Millis. +); + +impl ProfilerInstant { + #[cfg(test)] + pub(crate) const TEST_TICK_MILLIS: u64 = 7; + + #[inline] + pub(crate) fn now() -> Self { + #[cfg(not(test))] + { + ProfilerInstant(std::time::Instant::now()) + } + #[cfg(test)] + { + thread_local! { + static NOW_MILLIS: std::cell::Cell = const { std::cell::Cell::new(100003) }; + } + ProfilerInstant(NOW_MILLIS.with(|v| { + let r = v.get(); + v.set(r + ProfilerInstant::TEST_TICK_MILLIS); + r + })) + } + } + + #[inline] + pub(crate) fn duration_since(&self, earlier: ProfilerInstant) -> Duration { + #[cfg(not(test))] + { + self.0.duration_since(earlier.0) + } + #[cfg(test)] + { + Duration::from_millis(self.0.checked_sub(earlier.0).unwrap()) + } + } + + #[inline] + pub(crate) fn elapsed(&self) -> Duration { + #[cfg(not(test))] + { + self.0.elapsed() + } + #[cfg(test)] + { + ProfilerInstant::now().duration_since(*self) + } + } +} + +impl Sub for ProfilerInstant { + type Output = Duration; + + #[inline] + fn sub(self, rhs: Self) -> Self::Output { + self.duration_since(rhs) + } +} diff --git a/starlark-rust/starlark/src/eval/runtime/profile/mod.rs b/starlark-rust/starlark/src/eval/runtime/profile/mod.rs deleted file mode 100644 index 27009b783e1ec..0000000000000 --- a/starlark-rust/starlark/src/eval/runtime/profile/mod.rs +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::fmt::Display; -use std::str::FromStr; - -use allocative::Allocative; -use dupe::Dupe; - -pub(crate) mod bc; -pub(crate) mod csv; -pub(crate) mod data; -pub(crate) mod flamegraph; -pub(crate) mod heap; -pub(crate) mod or_instrumentation; -pub(crate) mod stmt; -pub(crate) mod time_flame; -pub(crate) mod typecheck; - -/// How to profile starlark code. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Dupe, Allocative)] -#[non_exhaustive] -pub enum ProfileMode { - /// The heap profile mode provides information about the time spent in each function and allocations - /// performed by each function. Enabling this mode the side effect of disabling garbage-collection. - /// This profiling mode is the recommended one. - HeapSummaryAllocated, - /// Like heap summary, but information about retained memory after module is frozen. - HeapSummaryRetained, - /// Like heap profile, but writes output comparible with - /// [flamegraph.pl](https://github.com/brendangregg/FlameGraph/blob/master/flamegraph.pl). - HeapFlameAllocated, - /// Like heap flame, but information about retained memory after module is frozen. - HeapFlameRetained, - /// The statement profile mode provides information about time spent in each statement. - Statement, - /// Code coverage. - Coverage, - /// The bytecode profile mode provides information about bytecode instructions. - Bytecode, - /// The bytecode profile mode provides information about bytecode instruction pairs. - BytecodePairs, - /// Provide output compatible with - /// [flamegraph.pl](https://github.com/brendangregg/FlameGraph/blob/master/flamegraph.pl). - TimeFlame, - /// Profile runtime typechecking. - Typecheck, -} - -impl Display for ProfileMode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(self.name()) - } -} - -impl ProfileMode { - fn name(&self) -> &str { - match self { - ProfileMode::HeapSummaryAllocated => "heap-summary-allocated", - ProfileMode::HeapSummaryRetained => "heap-summary-retained", - ProfileMode::HeapFlameAllocated => "heap-flame-allocated", - ProfileMode::HeapFlameRetained => "heap-flame-retained", - ProfileMode::Statement => "statement", - ProfileMode::Coverage => "coverage", - ProfileMode::Bytecode => "bytecode", - ProfileMode::BytecodePairs => "bytecode-pairs", - ProfileMode::TimeFlame => "time-flame", - ProfileMode::Typecheck => "typecheck", - } - } -} - -impl FromStr for ProfileMode { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - for mode in [ - ProfileMode::HeapSummaryAllocated, - ProfileMode::HeapSummaryRetained, - ProfileMode::HeapFlameAllocated, - ProfileMode::HeapFlameRetained, - ProfileMode::Statement, - ProfileMode::Coverage, - ProfileMode::Bytecode, - ProfileMode::BytecodePairs, - ProfileMode::TimeFlame, - ProfileMode::Typecheck, - ] { - if s == mode.name() { - return Ok(mode); - } - } - Err(anyhow::anyhow!("Invalid ProfileMode: `{}`", s)) - } -} diff --git a/starlark-rust/starlark/src/eval/runtime/profile/mode.rs b/starlark-rust/starlark/src/eval/runtime/profile/mode.rs new file mode 100644 index 0000000000000..eadb65a0404c9 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/mode.rs @@ -0,0 +1,110 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt::Display; +use std::str::FromStr; + +use allocative::Allocative; +use dupe::Dupe; + +/// How to profile starlark code. +#[derive(Debug, PartialEq, Eq, Hash, Clone, Dupe, Allocative)] +#[non_exhaustive] +pub enum ProfileMode { + /// The heap profile mode provides information about the time spent in each function and allocations + /// performed by each function. Enabling this mode has the side effect of disabling garbage-collection. + /// This profiling mode is the recommended one. + HeapSummaryAllocated, + /// Like heap summary, but information about retained memory after module is frozen. + HeapSummaryRetained, + /// Like heap profile, but writes output comparible with + /// [flamegraph.pl](https://github.com/brendangregg/FlameGraph/blob/master/flamegraph.pl). + HeapFlameAllocated, + /// Like heap flame, but information about retained memory after module is frozen. + HeapFlameRetained, + /// The statement profile mode provides information about time spent in each statement. + Statement, + /// Code coverage. + Coverage, + /// The bytecode profile mode provides information about bytecode instructions. + Bytecode, + /// The bytecode profile mode provides information about bytecode instruction pairs. + BytecodePairs, + /// Provide output compatible with + /// [flamegraph.pl](https://github.com/brendangregg/FlameGraph/blob/master/flamegraph.pl). + TimeFlame, + /// Profile runtime typechecking. + Typecheck, +} + +impl Display for ProfileMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.name()) + } +} + +impl ProfileMode { + pub(crate) const ALL: [ProfileMode; 10] = [ + ProfileMode::HeapSummaryAllocated, + ProfileMode::HeapSummaryRetained, + ProfileMode::HeapFlameAllocated, + ProfileMode::HeapFlameRetained, + ProfileMode::Statement, + ProfileMode::Coverage, + ProfileMode::Bytecode, + ProfileMode::BytecodePairs, + ProfileMode::TimeFlame, + ProfileMode::Typecheck, + ]; + + pub(crate) fn name(&self) -> &str { + match self { + ProfileMode::HeapSummaryAllocated => "heap-summary-allocated", + ProfileMode::HeapSummaryRetained => "heap-summary-retained", + ProfileMode::HeapFlameAllocated => "heap-flame-allocated", + ProfileMode::HeapFlameRetained => "heap-flame-retained", + ProfileMode::Statement => "statement", + ProfileMode::Coverage => "coverage", + ProfileMode::Bytecode => "bytecode", + ProfileMode::BytecodePairs => "bytecode-pairs", + ProfileMode::TimeFlame => "time-flame", + ProfileMode::Typecheck => "typecheck", + } + } + + /// Profile data for this mode can be obtained from + /// [`FrozenModule::heap_profile`](crate::environment::FrozenModule::heap_profile). + pub fn requires_frozen_module(&self) -> bool { + match self { + ProfileMode::HeapSummaryRetained | ProfileMode::HeapFlameRetained => true, + _ => false, + } + } +} + +impl FromStr for ProfileMode { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + for mode in Self::ALL { + if s == mode.name() { + return Ok(mode); + } + } + Err(anyhow::anyhow!("Invalid ProfileMode: `{}`", s)) + } +} diff --git a/starlark-rust/starlark/src/eval/runtime/profile/or_instrumentation.rs b/starlark-rust/starlark/src/eval/runtime/profile/or_instrumentation.rs index eaf2da0f7ab81..0bbad4bd943ce 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/or_instrumentation.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/or_instrumentation.rs @@ -17,7 +17,7 @@ use dupe::Dupe; -use crate::eval::ProfileMode; +use crate::eval::runtime::profile::mode::ProfileMode; #[derive(Debug, Default, Clone, Dupe, Eq, PartialEq)] pub(crate) enum ProfileOrInstrumentationMode { diff --git a/starlark-rust/starlark/src/eval/runtime/profile/profiler_type.rs b/starlark-rust/starlark/src/eval/runtime/profile/profiler_type.rs new file mode 100644 index 0000000000000..f06ea906b5457 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/profiler_type.rs @@ -0,0 +1,59 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::eval::runtime::profile::data::ProfileDataImpl; +use crate::eval::ProfileData; +use crate::eval::ProfileMode; + +#[derive(Debug, thiserror::Error)] +enum ProfileError { + #[error("Inconsistent profile type, expected `{0}`, got `{1}`")] + InconsistentProfileType(ProfileMode, ProfileMode), +} + +pub(crate) trait ProfilerType { + /// Result of profiling. + type Data; + + const PROFILE_MODE: ProfileMode; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data>; + fn data_to_generic(data: Self::Data) -> ProfileDataImpl; + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> crate::Result; + + // Provided methods. + + fn merge_profiles(profiles: &[&ProfileData]) -> crate::Result { + let profiles: Vec<&Self::Data> = profiles + .iter() + .map(|p| match Self::data_from_generic(&p.profile) { + None => Err(crate::Error::new_other( + ProfileError::InconsistentProfileType( + Self::PROFILE_MODE, + p.profile.profile_mode(), + ), + )), + Some(p) => Ok(p), + }) + .collect::>()?; + let merged = Self::merge_profiles_impl(&profiles)?; + Ok(ProfileData { + profile: Self::data_to_generic(merged), + }) + } +} diff --git a/starlark-rust/starlark/src/eval/runtime/profile/stmt.rs b/starlark-rust/starlark/src/eval/runtime/profile/stmt.rs index cb2071a882d37..4a227c1b1c07c 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/stmt.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/stmt.rs @@ -15,13 +15,16 @@ * limitations under the License. */ +use std::cmp::Reverse; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::collections::HashSet; -use std::iter; -use std::time::Instant; +use std::fmt::Write; use dupe::Dupe; +use starlark_map::StarlarkHasherBuilder; +use starlark_syntax::codemap::CodeMaps; +use starlark_syntax::internal_error; use crate::codemap::CodeMap; use crate::codemap::CodeMapId; @@ -31,107 +34,190 @@ use crate::codemap::ResolvedFileSpan; use crate::codemap::Span; use crate::eval::runtime::profile::csv::CsvWriter; use crate::eval::runtime::profile::data::ProfileData; +use crate::eval::runtime::profile::data::ProfileDataImpl; +use crate::eval::runtime::profile::instant::ProfilerInstant; +use crate::eval::runtime::profile::profiler_type::ProfilerType; use crate::eval::runtime::small_duration::SmallDuration; use crate::eval::ProfileMode; +pub(crate) struct StmtProfilerType; +pub(crate) struct CoverageProfileType; + +impl ProfilerType for StmtProfilerType { + type Data = StmtProfileData; + const PROFILE_MODE: ProfileMode = ProfileMode::Statement; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::Statement(data) => Some(data), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::Statement(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(StmtProfileData::merge(profiles)) + } +} + +impl ProfilerType for CoverageProfileType { + type Data = StmtProfileData; + const PROFILE_MODE: ProfileMode = ProfileMode::Coverage; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::Coverage(data) => Some(data), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::Coverage(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(StmtProfileData::merge(profiles)) + } +} + #[derive(Debug, thiserror::Error)] enum StmtProfileError { - #[error("Statement profiling is not enabled")] + #[error("Statement or coverage profiling is not enabled")] NotEnabled, } -// When line profiling is not enabled, we want this to be small and cheap -pub(crate) struct StmtProfile(Option>); +pub(crate) struct StmtProfile( + // Box because when profiling is not enabled, we want this to be small and cheap + Option>, +); + +#[derive(Clone)] +struct Last { + file: CodeMapId, + span: Span, + start: ProfilerInstant, +} // So we don't need a special case for the first time around, // we have a special FileId of empty that we ignore when printing #[derive(Clone)] -struct StmtProfileData { - files: HashMap, - stmts: HashMap<(CodeMapId, Span), (usize, SmallDuration)>, - next_file: CodeMapId, - last_span: (CodeMapId, Span), - last_start: Instant, +struct StmtProfileState { + files: CodeMaps, + stmts: HashMap<(CodeMapId, Span), (usize, SmallDuration), StarlarkHasherBuilder>, + last: Option, } -impl StmtProfileData { +/// Result of running statement or coverage profiler. +#[derive(Clone, Debug, Default, PartialEq)] +pub(crate) struct StmtProfileData { + stmts: HashMap, +} + +impl StmtProfileState { fn new() -> Self { - StmtProfileData { - files: HashMap::new(), - stmts: HashMap::new(), - next_file: CodeMapId::EMPTY, - last_span: (CodeMapId::EMPTY, Span::default()), - last_start: Instant::now(), + StmtProfileState { + files: CodeMaps::default(), + stmts: HashMap::default(), + last: None, } } // Add the data from last_span into the entries - fn add_last(&mut self, now: Instant) { - let time = now - self.last_start; - match self.stmts.entry(self.last_span) { - Entry::Occupied(mut x) => { - let v = x.get_mut(); - v.0 += 1; - v.1 += SmallDuration::from_duration(time); - } - Entry::Vacant(x) => { - x.insert((1, SmallDuration::from_duration(time))); + fn add_last(&mut self, now: ProfilerInstant) { + if let Some(last) = &self.last { + let time = now - last.start; + match self.stmts.entry((last.file, last.span)) { + Entry::Occupied(mut x) => { + let v = x.get_mut(); + v.0 += 1; + v.1 += SmallDuration::from_duration(time); + } + Entry::Vacant(x) => { + x.insert((1, SmallDuration::from_duration(time))); + } } } } fn before_stmt(&mut self, span: Span, codemap: &CodeMap) { - let now = Instant::now(); + let now = ProfilerInstant::now(); self.add_last(now); - if self.last_span.0 != codemap.id() { - self.add_codemap(codemap); - } - self.last_span = (self.next_file, span); - self.last_start = now; - } - - fn add_codemap(&mut self, codemap: &CodeMap) { - let id = codemap.id(); - self.next_file = id; - match self.files.entry(id) { - Entry::Occupied(_) => { - // Nothing to do, we have already got an owned version of this CodeMap - } - Entry::Vacant(x) => { - x.insert(codemap.dupe()); + match &self.last { + None => self.files.add(codemap), + Some(last) => { + if last.file != codemap.id() { + self.files.add(codemap); + } } } + self.last = Some(Last { + file: codemap.id(), + span, + start: now, + }); } - fn write_to_string(&self, now: Instant) -> String { + fn finish(&self) -> crate::Result { // The statement that was running last won't have been properly updated. // However, at this point, we have probably run some post-execution code, // so it probably wouldn't have a "fair" timing anyway. // We do our best though, and give it a time of now. // Clone first, since we don't want to impact the real timing with our odd // final execution finish. + let now = ProfilerInstant::now(); let mut data = self.clone(); data.add_last(now); + Ok(StmtProfileData { + stmts: data + .stmts + .iter() + .map(|((file, span), v)| { + Ok::<_, crate::Error>(( + FileSpan { + file: data + .files + .get(*file) + .ok_or_else(|| internal_error!("no file corresponding to file id"))? + .dupe(), + span: *span, + }, + *v, + )) + }) + .collect::>()?, + }) + } +} + +impl StmtProfileData { + pub(crate) fn write_to_string(&self) -> String { struct Item { span: FileSpan, time: SmallDuration, count: usize, } // There should be one EMPTY span entry - let mut items = Vec::with_capacity(data.stmts.len() - 1); + let mut items = Vec::with_capacity(self.stmts.len() - 1); let mut total_time = SmallDuration::default(); let mut total_count = 0; - for ((file, span), (count, time)) in data.stmts { + for (file_span, &(count, time)) in &self.stmts { // EMPTY represents the first time special-case - if file != CodeMapId::EMPTY { - let span = data.files[&file].file_span(span); + if file_span.file.id() != CodeMapId::EMPTY { total_time += time; total_count += count; - items.push(Item { span, time, count }) + items.push(Item { + span: file_span.dupe(), + time, + count, + }) } } - items.sort_by_key(|x| -(x.time.nanos as i128)); + + items.sort_by_key(|x| (Reverse(x.time), Reverse(x.count), x.span.dupe())); let mut csv = CsvWriter::new(["File", "Span", "Duration(s)", "Count"]); csv.write_value("TOTAL"); @@ -151,20 +237,48 @@ impl StmtProfileData { csv.finish() } + pub(crate) fn write_coverage(&self) -> String { + let mut s = String::new(); + let mut keys: Vec<_> = self + .stmts + .keys() + .filter(|file_span| file_span.file.id() != CodeMapId::EMPTY) + .map(|file_span| file_span.resolve()) + .collect(); + keys.sort(); + for key in keys { + writeln!(s, "{}", key).unwrap(); + } + s + } + fn coverage(&self) -> HashSet { self.stmts .keys() - .filter(|(file, _)| *file != CodeMapId::EMPTY) - .chain(iter::once(&self.last_span)) - .map(|(code_map_id, span)| { - self.files - .get(code_map_id) - .unwrap() - .file_span(*span) - .resolve() - }) + .filter(|file_span| file_span.file.id() != CodeMapId::EMPTY) + .map(|file_span| file_span.resolve()) .collect() } + + fn merge(profiles: &[&StmtProfileData]) -> StmtProfileData { + let mut result = StmtProfileData::default(); + let StmtProfileData { stmts } = &mut result; + for profile in profiles { + for (file_span, (count, time)) in &profile.stmts { + match stmts.entry(file_span.dupe()) { + Entry::Occupied(mut x) => { + let v = x.get_mut(); + v.0 += count; + v.1 += *time; + } + Entry::Vacant(x) => { + x.insert((*count, *time)); + } + } + } + } + result + } } impl StmtProfile { @@ -173,7 +287,7 @@ impl StmtProfile { } pub(crate) fn enable(&mut self) { - self.0 = Some(Box::new(StmtProfileData::new())) + self.0 = Some(Box::new(StmtProfileState::new())) } pub(crate) fn before_stmt(&mut self, span: FileSpanRef) { @@ -183,34 +297,56 @@ impl StmtProfile { } // None = not applicable because not enabled - pub(crate) fn gen(&self) -> anyhow::Result { - let now = Instant::now(); + pub(crate) fn gen(&self) -> crate::Result { match &self.0 { - Some(data) => Ok(ProfileData::new( - ProfileMode::Statement, - data.write_to_string(now), - )), - None => Err(StmtProfileError::NotEnabled.into()), + Some(data) => Ok(ProfileData { + profile: ProfileDataImpl::Statement(data.finish()?), + }), + None => Err(crate::Error::new_other(StmtProfileError::NotEnabled)), } } - pub(crate) fn coverage(&self) -> anyhow::Result> { + pub(crate) fn coverage(&self) -> crate::Result> { Ok(self .0 .as_ref() - .ok_or(StmtProfileError::NotEnabled)? + .ok_or_else(|| crate::Error::new_other(StmtProfileError::NotEnabled))? + .finish()? .coverage()) } + + pub(crate) fn gen_coverage(&self) -> crate::Result { + match &self.0 { + Some(data) => Ok(ProfileData { + profile: ProfileDataImpl::Coverage(data.finish()?), + }), + None => Err(crate::Error::new_other(StmtProfileError::NotEnabled)), + } + } } #[cfg(test)] mod tests { + use std::collections::HashMap; + + use starlark_syntax::codemap::CodeMap; + use starlark_syntax::codemap::CodeMaps; + use starlark_syntax::codemap::FileSpan; + use starlark_syntax::codemap::FileSpanRef; + use starlark_syntax::codemap::Pos; + use starlark_syntax::codemap::Span; use crate::assert::test_functions; use crate::environment::GlobalsBuilder; use crate::environment::Module; + use crate::eval::runtime::profile::data::ProfileDataImpl; + use crate::eval::runtime::profile::instant::ProfilerInstant; + use crate::eval::runtime::profile::mode::ProfileMode; + use crate::eval::runtime::profile::stmt::StmtProfile; + use crate::eval::runtime::profile::stmt::StmtProfileData; + use crate::eval::runtime::small_duration::SmallDuration; use crate::eval::Evaluator; - use crate::eval::ProfileMode; + use crate::eval::ProfileData; use crate::syntax::AstModule; use crate::syntax::Dialect; @@ -229,7 +365,7 @@ xx(*[1]) xx(*[2]) "# .to_owned(), - &Dialect::Extended, + &Dialect::AllOptionsInternal, ) .unwrap(); eval.enable_profile(&ProfileMode::Coverage).unwrap(); @@ -255,4 +391,83 @@ xx(*[2]) coverage ); } + + #[test] + fn test_merge() { + let x = CodeMap::new("x.star".to_owned(), "def a(): pass".to_owned()); + let y = CodeMap::new("y.star".to_owned(), "def b(): pass".to_owned()); + let z = CodeMap::new("z.star".to_owned(), "def c(): pass".to_owned()); + + let mut all_files = CodeMaps::default(); + all_files.add(&x); + all_files.add(&y); + all_files.add(&z); + + let mut a = StmtProfile::new(); + a.enable(); + a.before_stmt(FileSpanRef { + file: &x, + span: Span::new(Pos::new(1), Pos::new(2)), + }); + a.before_stmt(FileSpanRef { + file: &y, + span: Span::new(Pos::new(2), Pos::new(4)), + }); + let a = a.gen().unwrap(); + + let mut b = StmtProfile::new(); + b.enable(); + b.before_stmt(FileSpanRef { + file: &y, + span: Span::new(Pos::new(2), Pos::new(4)), + }); + b.before_stmt(FileSpanRef { + file: &z, + span: Span::new(Pos::new(3), Pos::new(5)), + }); + let b = b.gen().unwrap(); + + let ProfileDataImpl::Statement(merged) = ProfileData::merge([&a, &b]).unwrap().profile + else { + panic!("Expected statement profile data"); + }; + + assert_eq!( + StmtProfileData { + stmts: HashMap::from_iter([ + ( + FileSpan { + file: x, + span: Span::new(Pos::new(1), Pos::new(2)) + }, + ( + 1, + SmallDuration::from_millis(ProfilerInstant::TEST_TICK_MILLIS) + ) + ), + ( + FileSpan { + file: y, + span: Span::new(Pos::new(2), Pos::new(4)) + }, + ( + 2, + SmallDuration::from_millis(ProfilerInstant::TEST_TICK_MILLIS * 2) + ) + ), + ( + FileSpan { + file: z, + span: Span::new(Pos::new(3), Pos::new(5)) + }, + ( + 1, + SmallDuration::from_millis(ProfilerInstant::TEST_TICK_MILLIS) + ) + ), + ]), + }, + merged + ); + } } diff --git a/starlark-rust/starlark/src/eval/runtime/profile/tests.rs b/starlark-rust/starlark/src/eval/runtime/profile/tests.rs new file mode 100644 index 0000000000000..ad23f14955b04 --- /dev/null +++ b/starlark-rust/starlark/src/eval/runtime/profile/tests.rs @@ -0,0 +1,145 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![cfg(test)] + +use starlark_syntax::golden_test_template::golden_test_template; +use starlark_syntax::syntax::AstModule; +use starlark_syntax::syntax::Dialect; + +use crate::assert::test_functions; +use crate::environment::GlobalsBuilder; +use crate::environment::Module; +use crate::eval::runtime::profile::data::ProfileDataImpl; +use crate::eval::Evaluator; +use crate::eval::ProfileData; +use crate::eval::ProfileMode; + +fn test_profile_golden_for_mode(mode: ProfileMode) { + let module = Module::new(); + let mut eval = Evaluator::new(&module); + eval.enable_profile(&mode).unwrap(); + eval.eval_module( + AstModule::parse( + "test.star", + r#" +def inner(x: int): + if noop(): + return 10 + else: + for x in range(10): + noop() + +def test(): + r = [] + for x in noop([1, 2, 3, 4, 5]): + inner(x) + r += noop([1] * 3) + return r + +test() +test() +test() + +R = test() +"# + .to_owned(), + &Dialect::AllOptionsInternal, + ) + .unwrap(), + &GlobalsBuilder::extended().with(test_functions).build(), + ) + .unwrap(); + + let mut profile_data = match mode { + ProfileMode::HeapSummaryRetained | ProfileMode::HeapFlameRetained => { + drop(eval); + let module = module.freeze().unwrap(); + module.heap_profile().unwrap() + } + _ => eval.gen_profile().unwrap(), + }; + + if let ProfileDataImpl::HeapFlameRetained(profile) + | ProfileDataImpl::HeapFlameAllocated(profile) + | ProfileDataImpl::HeapSummaryRetained(profile) + | ProfileDataImpl::HeapSummaryAllocated(profile) = &mut profile_data.profile + { + profile.normalize_for_golden_tests(); + } + + golden_test_template( + &format!( + "src/eval/runtime/profile/golden/{}.golden", + mode.name().replace('-', "_") + ), + &profile_data.gen().unwrap(), + ); + + // Smoke test for profile merging. + ProfileData::merge([&profile_data, &profile_data]).unwrap(); +} + +#[test] +fn test_profile_golden_heap_summary_allocated() { + test_profile_golden_for_mode(ProfileMode::HeapSummaryAllocated); +} + +#[test] +fn test_profile_golden_heap_summary_retained() { + test_profile_golden_for_mode(ProfileMode::HeapSummaryRetained); +} + +#[test] +fn test_profile_golden_heap_flame_allocated() { + test_profile_golden_for_mode(ProfileMode::HeapFlameAllocated); +} + +#[test] +fn test_profile_golden_heap_flame_retained() { + test_profile_golden_for_mode(ProfileMode::HeapFlameRetained); +} + +#[test] +fn test_profile_golden_statement() { + test_profile_golden_for_mode(ProfileMode::Statement); +} + +#[test] +fn test_profile_golden_coverage() { + test_profile_golden_for_mode(ProfileMode::Coverage); +} + +#[test] +fn test_profile_golden_bytecode() { + test_profile_golden_for_mode(ProfileMode::Bytecode); +} + +#[test] +fn test_profile_golden_bytecode_pairs() { + test_profile_golden_for_mode(ProfileMode::BytecodePairs); +} + +#[test] +fn test_profile_golden_time_flame() { + test_profile_golden_for_mode(ProfileMode::TimeFlame); +} + +#[test] +fn test_profile_golden_typecheck() { + test_profile_golden_for_mode(ProfileMode::Typecheck); +} diff --git a/starlark-rust/starlark/src/eval/runtime/profile/time_flame.rs b/starlark-rust/starlark/src/eval/runtime/profile/time_flame.rs index 57f96b17963e8..7ccf7db3f7e00 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/time_flame.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/time_flame.rs @@ -18,7 +18,6 @@ use std::collections::hash_map::Entry; use std::collections::HashMap; use std::slice; -use std::time::Instant; use dupe::Dupe; use starlark_map::StarlarkHasherBuilder; @@ -29,15 +28,39 @@ use crate::eval::runtime::profile::data::ProfileData; use crate::eval::runtime::profile::data::ProfileDataImpl; use crate::eval::runtime::profile::flamegraph::FlameGraphData; use crate::eval::runtime::profile::flamegraph::FlameGraphNode; +use crate::eval::runtime::profile::instant::ProfilerInstant; +use crate::eval::runtime::profile::profiler_type::ProfilerType; use crate::eval::runtime::small_duration::SmallDuration; use crate::eval::ProfileMode; -use crate::values::layout::heap::profile::arc_str::ArcStr; +use crate::util::arc_str::ArcStr; use crate::values::layout::pointer::RawPointer; use crate::values::FrozenValue; use crate::values::Trace; use crate::values::Tracer; use crate::values::Value; +pub(crate) struct TimeFlameProfilerType; + +impl ProfilerType for TimeFlameProfilerType { + type Data = FlameGraphData; + const PROFILE_MODE: ProfileMode = ProfileMode::TimeFlame; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::TimeFlameProfile(data) => Some(data), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::TimeFlameProfile(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + Ok(FlameGraphData::merge(profiles.iter().copied())) + } +} + #[derive(Debug, thiserror::Error)] enum FlameProfileError { #[error("Flame profile not enabled")] @@ -136,7 +159,7 @@ pub(crate) struct TimeFlameProfile<'v>( #[derive(Default, Trace)] struct FlameData<'v> { /// All events in the profile, i.e. function entry or exit with timestamp. - frames: Vec<(Frame, Instant)>, + frames: Vec<(Frame, ProfilerInstant)>, index: ValueIndex<'v>, } @@ -158,7 +181,7 @@ impl<'a> Stacks<'a> { fn new( mutable_names: &'a [String], frozen_names: &'a [String], - frames: &[(Frame, Instant)], + frames: &[(Frame, ProfilerInstant)], ) -> Self { let mut res = Stacks::blank("root"); let Some(mut last_time) = frames.first().map(|x| x.1) else { @@ -177,8 +200,8 @@ impl<'a> Stacks<'a> { &mut self, mutable_names: &'a [String], frozen_names: &'a [String], - frames: &mut slice::Iter<(Frame, Instant)>, - last_time: &mut Instant, + frames: &mut slice::Iter<(Frame, ProfilerInstant)>, + last_time: &mut ProfilerInstant, ) { while let Some((frame, time)) = frames.next() { self.time += time.duration_since(*last_time); @@ -230,20 +253,20 @@ impl<'v> TimeFlameProfile<'v> { pub(crate) fn record_call_enter(&mut self, function: Value<'v>) { if let Some(x) = &mut self.0 { let ind = x.index.index(function); - x.frames.push((Frame::Push(ind), Instant::now())) + x.frames.push((Frame::Push(ind), ProfilerInstant::now())) } } pub(crate) fn record_call_exit(&mut self) { if let Some(x) = &mut self.0 { - x.frames.push((Frame::Pop, Instant::now())) + x.frames.push((Frame::Pop, ProfilerInstant::now())) } } // We could expose profile on the Heap, but it's an implementation detail that it works here. - pub(crate) fn gen(&self) -> anyhow::Result { + pub(crate) fn gen(&self) -> crate::Result { match &self.0 { - None => Err(FlameProfileError::NotEnabled.into()), + None => Err(crate::Error::new_other(FlameProfileError::NotEnabled)), Some(x) => Ok(Self::gen_profile(x)), } } @@ -255,7 +278,6 @@ impl<'v> TimeFlameProfile<'v> { let mutable_names = x.index.mutable_values.map(|x| x.to_repr()); let frozen_names = x.index.frozen_values.map(|x| x.to_value().to_repr()); ProfileData { - profile_mode: ProfileMode::TimeFlame, profile: ProfileDataImpl::TimeFlameProfile( Stacks::new(&mutable_names, &frozen_names, &x.frames).render(), ), @@ -278,8 +300,8 @@ mod tests { use crate::environment::GlobalsBuilder; use crate::environment::Module; use crate::eval::runtime::file_loader::ReturnOwnedFileLoader; + use crate::eval::runtime::profile::mode::ProfileMode; use crate::eval::Evaluator; - use crate::eval::ProfileMode; use crate::syntax::AstModule; use crate::syntax::Dialect; use crate::values::none::NoneType; diff --git a/starlark-rust/starlark/src/eval/runtime/profile/typecheck.rs b/starlark-rust/starlark/src/eval/runtime/profile/typecheck.rs index bb519aee55386..1bb4163d2e8c1 100644 --- a/starlark-rust/starlark/src/eval/runtime/profile/typecheck.rs +++ b/starlark-rust/starlark/src/eval/runtime/profile/typecheck.rs @@ -17,38 +17,70 @@ //! Runtime typecheck profile. +use std::collections::HashMap; use std::time::Duration; +use dupe::Dupe; +use starlark_map::Hashed; +use starlark_map::StarlarkHasherBuilder; + use crate::collections::SmallMap; use crate::eval::runtime::profile::csv::CsvWriter; use crate::eval::runtime::profile::data::ProfileData; +use crate::eval::runtime::profile::data::ProfileDataImpl; +use crate::eval::runtime::profile::profiler_type::ProfilerType; use crate::eval::runtime::small_duration::SmallDuration; use crate::eval::ProfileMode; +use crate::util::arc_str::ArcStr; use crate::values::FrozenStringValue; +pub(crate) struct TypecheckProfilerType; + +impl ProfilerType for TypecheckProfilerType { + type Data = TypecheckProfileData; + const PROFILE_MODE: ProfileMode = ProfileMode::Typecheck; + + fn data_from_generic(profile_data: &ProfileDataImpl) -> Option<&Self::Data> { + match profile_data { + ProfileDataImpl::Typecheck(data) => Some(data), + _ => None, + } + } + + fn data_to_generic(data: Self::Data) -> ProfileDataImpl { + ProfileDataImpl::Typecheck(data) + } + + fn merge_profiles_impl(profiles: &[&Self::Data]) -> starlark_syntax::Result { + let mut by_function = SmallMap::new(); + for profile in profiles { + for (name, time) in &profile.by_function { + *by_function.entry(name.dupe()).or_default() += *time; + } + } + Ok(TypecheckProfileData { by_function }) + } +} + #[derive(Debug, thiserror::Error)] enum TypecheckProfileError { #[error("Typecheck profile not enabled")] NotEnabled, } +#[derive(Default, Debug, Clone, Eq, PartialEq)] +pub(crate) struct TypecheckProfileData { + by_function: SmallMap, +} + #[derive(Default, Debug)] pub(crate) struct TypecheckProfile { pub(crate) enabled: bool, - // TODO(nga): we don't need ordered map here. - by_function: SmallMap, + by_function: HashMap, SmallDuration, StarlarkHasherBuilder>, } -impl TypecheckProfile { - pub(crate) fn add(&mut self, function: FrozenStringValue, time: Duration) { - assert!(self.enabled); - *self - .by_function - .entry_hashed(function.get_hashed()) - .or_default() += time; - } - - fn gen_csv(&self) -> String { +impl TypecheckProfileData { + pub(crate) fn gen_csv(&self) -> String { let total_time = self.by_function.values().sum::(); let mut w = CsvWriter::new(["Function", "Time (s)"]); @@ -67,26 +99,48 @@ impl TypecheckProfile { w.finish() } +} + +impl TypecheckProfile { + pub(crate) fn add(&mut self, function: FrozenStringValue, time: Duration) { + assert!(self.enabled); + *self.by_function.entry(function.get_hashed()).or_default() += time; + } - pub(crate) fn gen(&self) -> anyhow::Result { + pub(crate) fn gen(&self) -> crate::Result { if !self.enabled { - return Err(TypecheckProfileError::NotEnabled.into()); + return Err(crate::Error::new_other(TypecheckProfileError::NotEnabled)); } - Ok(ProfileData::new(ProfileMode::Typecheck, self.gen_csv())) + Ok(ProfileData { + profile: ProfileDataImpl::Typecheck(TypecheckProfileData { + by_function: self + .by_function + .iter() + .map(|(k, v)| (ArcStr::from(k.as_str()), *v)) + .collect(), + }), + }) } } #[cfg(test)] mod tests { + use starlark_map::small_map::SmallMap; + use crate::environment::Globals; use crate::environment::Module; + use crate::eval::runtime::profile::mode::ProfileMode; + use crate::eval::runtime::profile::profiler_type::ProfilerType; + use crate::eval::runtime::profile::typecheck::TypecheckProfileData; + use crate::eval::runtime::profile::typecheck::TypecheckProfilerType; + use crate::eval::runtime::small_duration::SmallDuration; use crate::eval::Evaluator; - use crate::eval::ProfileMode; use crate::syntax::AstModule; use crate::syntax::Dialect; + use crate::util::arc_str::ArcStr; #[test] - fn test_typecheck_profile() -> anyhow::Result<()> { + fn test_typecheck_profile() -> crate::Result<()> { let module = Module::new(); let mut eval = Evaluator::new(&module); let program = r#" @@ -99,11 +153,15 @@ def g(): g() "#; - let program = AstModule::parse("test.star", program.to_owned(), &Dialect::Extended)?; + let program = AstModule::parse( + "test.star", + program.to_owned(), + &Dialect::AllOptionsInternal, + )?; eval.enable_profile(&ProfileMode::Typecheck)?; eval.eval_module(program, &Globals::extended_internal())?; - let csv = eval.typecheck_profile.gen_csv(); + let csv = eval.typecheck_profile.gen()?.gen()?; let lines: Vec<&str> = csv.lines().collect(); assert_eq!("Function,Time (s)", lines[0]); assert!(lines[1].starts_with("\"TOTAL\","), "{:?}", lines[1]); @@ -112,4 +170,30 @@ g() Ok(()) } + + #[test] + fn test_typecheck_profile_merge() { + let a = TypecheckProfileData { + by_function: SmallMap::from_iter([ + (ArcStr::from("a"), SmallDuration::from_millis(10)), + (ArcStr::from("b"), SmallDuration::from_millis(20)), + ]), + }; + let b = TypecheckProfileData { + by_function: SmallMap::from_iter([ + (ArcStr::from("b"), SmallDuration::from_millis(300)), + (ArcStr::from("c"), SmallDuration::from_millis(400)), + ]), + }; + let merged = TypecheckProfilerType::merge_profiles_impl(&[&a, &b]).unwrap(); + + let expected = TypecheckProfileData { + by_function: SmallMap::from_iter([ + (ArcStr::from("a"), SmallDuration::from_millis(10)), + (ArcStr::from("b"), SmallDuration::from_millis(320)), + (ArcStr::from("c"), SmallDuration::from_millis(400)), + ]), + }; + assert_eq!(expected, merged); + } } diff --git a/starlark-rust/starlark/src/eval/runtime/rust_loc.rs b/starlark-rust/starlark/src/eval/runtime/rust_loc.rs index 7fe7829c769ed..e85a137dfb123 100644 --- a/starlark-rust/starlark/src/eval/runtime/rust_loc.rs +++ b/starlark-rust/starlark/src/eval/runtime/rust_loc.rs @@ -40,6 +40,7 @@ pub(crate) use rust_loc; #[cfg(test)] mod tests { use starlark_derive::starlark_module; + use starlark_syntax::error::StarlarkResultExt; use crate as starlark; use crate::assert::Assert; @@ -50,8 +51,9 @@ mod tests { #[starlark_module] fn rust_loc_globals(globals: &mut GlobalsBuilder) { - fn invoke<'v>(f: Value<'v>, eval: &mut Evaluator<'v, '_>) -> anyhow::Result> { + fn invoke<'v>(f: Value<'v>, eval: &mut Evaluator<'v, '_, '_>) -> anyhow::Result> { f.invoke_with_loc(Some(rust_loc!()), &Arguments::default(), eval) + .into_anyhow_result() } } diff --git a/starlark-rust/starlark/src/eval/runtime/small_duration.rs b/starlark-rust/starlark/src/eval/runtime/small_duration.rs index c3c986ca44380..0452bdd627436 100644 --- a/starlark-rust/starlark/src/eval/runtime/small_duration.rs +++ b/starlark-rust/starlark/src/eval/runtime/small_duration.rs @@ -25,7 +25,9 @@ use allocative::Allocative; use dupe::Dupe; /// Slightly faster than `Duration`. -#[derive(Copy, Clone, Dupe, Default, Debug, Allocative)] +#[derive( + Copy, Clone, Dupe, Default, Eq, PartialEq, Ord, PartialOrd, Debug, Allocative +)] pub(crate) struct SmallDuration { /// `u64::MAX` nanos is 500 years. pub(crate) nanos: u64, @@ -38,6 +40,11 @@ impl SmallDuration { } } + #[cfg(test)] + pub(crate) fn from_millis(millis: u64) -> SmallDuration { + Self::from_duration(Duration::from_millis(millis)) + } + pub(crate) fn to_duration(self) -> Duration { Duration::from_nanos(self.nanos) } diff --git a/starlark-rust/starlark/src/eval/runtime/visit_span.rs b/starlark-rust/starlark/src/eval/runtime/visit_span.rs index 66ee30e5535a7..590b55fa49137 100644 --- a/starlark-rust/starlark/src/eval/runtime/visit_span.rs +++ b/starlark-rust/starlark/src/eval/runtime/visit_span.rs @@ -15,7 +15,10 @@ * limitations under the License. */ -use crate::collections::symbol_map::Symbol; +use starlark_syntax::syntax::def::DefParamIndices; +use starlark_syntax::syntax::def::DefRegularParamMode; + +use crate::collections::symbol::symbol::Symbol; use crate::environment::slots::ModuleSlotId; use crate::eval::compiler::expr::CompareOp; use crate::eval::compiler::span::IrSpanned; @@ -125,3 +128,11 @@ impl VisitSpanMut for Option { } } } + +impl VisitSpanMut for DefRegularParamMode { + fn visit_spans(&mut self, _visitor: &mut impl FnMut(&mut FrameSpan)) {} +} + +impl VisitSpanMut for DefParamIndices { + fn visit_spans(&mut self, _visitor: &mut impl FnMut(&mut FrameSpan)) {} +} diff --git a/starlark-rust/starlark/src/eval/soft_error.rs b/starlark-rust/starlark/src/eval/soft_error.rs new file mode 100644 index 0000000000000..7943430baf8c3 --- /dev/null +++ b/starlark-rust/starlark/src/eval/soft_error.rs @@ -0,0 +1,32 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/// Deprecation handler provided by a user. +pub trait SoftErrorHandler { + /// Handle deprecation error. If this function returns `Ok`, error will be ignored, + /// otherwise error will be propagated. + fn soft_error(&self, category: &str, error: crate::Error) -> Result<(), crate::Error>; +} + +/// Default handler: warnings are treated as errors. +pub(crate) struct HardErrorSoftErrorHandler; + +impl SoftErrorHandler for HardErrorSoftErrorHandler { + fn soft_error(&self, _category: &str, error: crate::Error) -> Result<(), crate::Error> { + Err(error) + } +} diff --git a/starlark-rust/starlark/src/lib.rs b/starlark-rust/starlark/src/lib.rs index 46cce35753dfe..56906a8c6130d 100644 --- a/starlark-rust/starlark/src/lib.rs +++ b/starlark-rust/starlark/src/lib.rs @@ -15,18 +15,20 @@ * limitations under the License. */ -//! A [Starlark interpreter in Rust](https://github.com/facebookexperimental/starlark-rust). +//! A [Starlark interpreter in Rust](https://github.com/facebook/starlark-rust). //! Starlark is a deterministic version of Python, with [a specification](https://github.com/bazelbuild/starlark/blob/master/spec.md), //! used by (amongst others) the [Buck](https://buck.build) and [Bazel](https://bazel.build) build systems. //! //! To evaluate a simple file: //! //! ``` -//! # fn run() -> anyhow::Result<()> { +//! # fn run() -> starlark::Result<()> { +//! use starlark::environment::Globals; +//! use starlark::environment::Module; //! use starlark::eval::Evaluator; -//! use starlark::environment::{Module, Globals}; +//! use starlark::syntax::AstModule; +//! use starlark::syntax::Dialect; //! use starlark::values::Value; -//! use starlark::syntax::{AstModule, Dialect}; //! //! let content = r#" //! def hello(): @@ -36,7 +38,8 @@ //! //! // We first parse the content, giving a filename and the Starlark //! // `Dialect` we'd like to use (we pick standard). -//! let ast: AstModule = AstModule::parse("hello_world.star", content.to_owned(), &Dialect::Standard)?; +//! let ast: AstModule = +//! AstModule::parse("hello_world.star", content.to_owned(), &Dialect::Standard)?; //! //! // We create a `Globals`, defining the standard library functions available. //! // The `standard` function uses those defined in the Starlark specification. @@ -67,7 +70,7 @@ //! ``` //! #[macro_use] //! extern crate starlark; -//! # fn run() -> anyhow::Result<()> { +//! # fn run() -> starlark::Result<()> { //! use starlark::environment::{GlobalsBuilder, Module}; //! use starlark::eval::Evaluator; //! use starlark::syntax::{AstModule, Dialect}; @@ -107,14 +110,19 @@ //! ``` //! #[macro_use] //! extern crate starlark; -//! # fn run() -> anyhow::Result<()> { -//! use starlark::environment::{GlobalsBuilder, Module}; -//! use starlark::eval::Evaluator; -//! use starlark::syntax::{AstModule, Dialect}; -//! use starlark::values::{none::NoneType, Value, ValueLike}; -//! use starlark::any::ProvidesStaticType; +//! # fn run() -> starlark::Result<()> { //! use std::cell::RefCell; //! +//! use starlark::any::ProvidesStaticType; +//! use starlark::environment::GlobalsBuilder; +//! use starlark::environment::Module; +//! use starlark::eval::Evaluator; +//! use starlark::syntax::AstModule; +//! use starlark::syntax::Dialect; +//! use starlark::values::none::NoneType; +//! use starlark::values::Value; +//! use starlark::values::ValueLike; +//! //! let content = r#" //! emit(1) //! emit(["test"]) @@ -127,7 +135,7 @@ //! //! impl Store { //! fn add(&self, x: String) { -//! self.0.borrow_mut().push(x) +//! self.0.borrow_mut().push(x) //! } //! } //! @@ -168,10 +176,13 @@ //! controlled by the [`Dialect`](syntax::Dialect) type. //! //! ``` -//! # fn run() -> anyhow::Result<()> { -//! use starlark::environment::{Globals, Module}; +//! # fn run() -> starlark::Result<()> { +//! use starlark::environment::Globals; +//! use starlark::environment::Module; //! use starlark::eval::Evaluator; -//! use starlark::syntax::{AstModule, Dialect, DialectTypes}; +//! use starlark::syntax::AstModule; +//! use starlark::syntax::Dialect; +//! use starlark::syntax::DialectTypes; //! //! let content = r#" //! def takes_int(x: int): @@ -180,7 +191,10 @@ //! "#; //! //! // Make the dialect enable types -//! let dialect = Dialect {enable_types: DialectTypes::Enable, ..Dialect::Standard}; +//! let dialect = Dialect { +//! enable_types: DialectTypes::Enable, +//! ..Dialect::Standard +//! }; //! // We could equally have done `dialect = Dialect::Extended`. //! let ast = AstModule::parse("json.star", content.to_owned(), &dialect)?; //! let globals = Globals::standard(); @@ -188,7 +202,11 @@ //! let mut eval = Evaluator::new(&module); //! let res = eval.eval_module(ast, &globals); //! // We expect this to fail, since it is a type violation -//! assert!(res.unwrap_err().to_string().contains("Value `test` of type `string` does not match the type annotation `int`")); +//! assert!( +//! res.unwrap_err() +//! .to_string() +//! .contains("Value `test` of type `string` does not match the type annotation `int`") +//! ); //! # Ok(()) //! # } //! # fn main(){ run().unwrap(); } @@ -201,17 +219,22 @@ //! There is no requirement that the files are on disk, but that would be a common pattern. //! //! ``` -//! # fn run() -> anyhow::Result<()> { -//! use starlark::environment::{FrozenModule, Globals, Module}; -//! use starlark::eval::{Evaluator, ReturnFileLoader}; -//! use starlark::syntax::{AstModule, Dialect}; +//! # fn run() -> starlark::Result<()> { +//! use starlark::environment::FrozenModule; +//! use starlark::environment::Globals; +//! use starlark::environment::Module; +//! use starlark::eval::Evaluator; +//! use starlark::eval::ReturnFileLoader; +//! use starlark::syntax::AstModule; +//! use starlark::syntax::Dialect; //! //! // Get the file contents (for the demo), in reality use `AstModule::parse_file`. //! fn get_source(file: &str) -> &str { //! match file { //! "a.star" => "a = 7", //! "b.star" => "b = 6", -//! _ => { r#" +//! _ => { +//! r#" //! load('a.star', 'a') //! load('b.star', 'b') //! ab = a * b @@ -220,28 +243,28 @@ //! } //! } //! -//! fn get_module(file: &str) -> anyhow::Result { -//! let ast = AstModule::parse(file, get_source(file).to_owned(), &Dialect::Standard)?; -//! -//! // We can get the loaded modules from `ast.loads`. -//! // And ultimately produce a `loader` capable of giving those modules to Starlark. -//! let mut loads = Vec::new(); -//! for load in ast.loads() { -//! loads.push((load.module_id.to_owned(), get_module(load.module_id)?)); -//! } -//! let modules = loads.iter().map(|(a, b)| (a.as_str(), b)).collect(); -//! let mut loader = ReturnFileLoader { modules: &modules }; -//! -//! let globals = Globals::standard(); -//! let module = Module::new(); -//! { -//! let mut eval = Evaluator::new(&module); -//! eval.set_loader(&mut loader); -//! eval.eval_module(ast, &globals)?; -//! } -//! // After creating a module we freeze it, preventing further mutation. -//! // It can now be used as the input for other Starlark modules. -//! Ok(module.freeze()?) +//! fn get_module(file: &str) -> starlark::Result { +//! let ast = AstModule::parse(file, get_source(file).to_owned(), &Dialect::Standard)?; +//! +//! // We can get the loaded modules from `ast.loads`. +//! // And ultimately produce a `loader` capable of giving those modules to Starlark. +//! let mut loads = Vec::new(); +//! for load in ast.loads() { +//! loads.push((load.module_id.to_owned(), get_module(load.module_id)?)); +//! } +//! let modules = loads.iter().map(|(a, b)| (a.as_str(), b)).collect(); +//! let mut loader = ReturnFileLoader { modules: &modules }; +//! +//! let globals = Globals::standard(); +//! let module = Module::new(); +//! { +//! let mut eval = Evaluator::new(&module); +//! eval.set_loader(&mut loader); +//! eval.eval_module(ast, &globals)?; +//! } +//! // After creating a module we freeze it, preventing further mutation. +//! // It can now be used as the input for other Starlark modules. +//! Ok(module.freeze()?) //! } //! //! let ab = get_module("ab.star")?; @@ -256,10 +279,12 @@ //! You can extract functions from Starlark, and call them from Rust, using [`eval_function`](eval::Evaluator::eval_function). //! //! ``` -//! # fn run() -> anyhow::Result<()> { -//! use starlark::environment::{Globals, Module}; +//! # fn run() -> starlark::Result<()> { +//! use starlark::environment::Globals; +//! use starlark::environment::Module; //! use starlark::eval::Evaluator; -//! use starlark::syntax::{AstModule, Dialect}; +//! use starlark::syntax::AstModule; +//! use starlark::syntax::Dialect; //! use starlark::values::Value; //! //! let content = r#" @@ -268,7 +293,7 @@ //! quadratic //! "#; //! -//! let ast = AstModule::parse("quadratic.star", content.to_owned(), &Dialect::Extended)?; +//! let ast = AstModule::parse("quadratic.star", content.to_owned(), &Dialect::Standard)?; //! let globals = Globals::standard(); //! let module = Module::new(); //! let mut eval = Evaluator::new(&module); @@ -291,14 +316,25 @@ //! Such types are relatively complex, see the details at [`StarlarkValue`](values::StarlarkValue). //! //! ``` -//! # fn run() -> anyhow::Result<()> { -//! use starlark::environment::{Globals, Module}; +//! # fn run() -> starlark::Result<()> { +//! use std::fmt::Display; +//! use std::fmt::Write; +//! use std::fmt::{self}; +//! +//! use allocative::Allocative; +//! use starlark::environment::Globals; +//! use starlark::environment::Module; //! use starlark::eval::Evaluator; -//! use starlark::syntax::{AstModule, Dialect}; -//! use starlark::values::{Heap, StarlarkValue, Value, ValueError, ValueLike, ProvidesStaticType, NoSerialize}; //! use starlark::starlark_simple_value; -//! use std::fmt::{self, Display, Write}; -//! use allocative::Allocative; +//! use starlark::syntax::AstModule; +//! use starlark::syntax::Dialect; +//! use starlark::values::Heap; +//! use starlark::values::NoSerialize; +//! use starlark::values::ProvidesStaticType; +//! use starlark::values::StarlarkValue; +//! use starlark::values::Value; +//! use starlark::values::ValueError; +//! use starlark::values::ValueLike; //! use starlark_derive::starlark_value; //! //! // Define complex numbers @@ -318,8 +354,7 @@ //! #[starlark_value(type = "complex")] //! impl<'v> StarlarkValue<'v> for Complex { //! // How we add them -//! fn add(&self, rhs: Value<'v>, heap: &'v Heap) -//! -> Option>> { +//! fn add(&self, rhs: Value<'v>, heap: &'v Heap) -> Option>> { //! if let Some(rhs) = rhs.downcast_ref::() { //! Some(Ok(heap.alloc(Complex { //! real: self.real + rhs.real, @@ -337,9 +372,15 @@ //! let globals = Globals::standard(); //! let module = Module::new(); //! // We inject some complex numbers into the module before we start. -//! let a = module.heap().alloc(Complex {real: 1, imaginary: 8}); +//! let a = module.heap().alloc(Complex { +//! real: 1, +//! imaginary: 8, +//! }); //! module.set("a", a); -//! let b = module.heap().alloc(Complex {real: 4, imaginary: 2}); +//! let b = module.heap().alloc(Complex { +//! real: 4, +//! imaginary: 2, +//! }); //! module.set("b", b); //! let mut eval = Evaluator::new(&module); //! let res = eval.eval_module(ast, &globals)?; @@ -352,15 +393,11 @@ // Features we use #![allow(stable_features)] #![allow(unknown_lints)] // for clippy::tuple_array_conversions +#![cfg_attr(rust_nightly, allow(internal_features))] #![cfg_attr(rust_nightly, feature(const_type_id))] #![cfg_attr(rust_nightly, feature(core_intrinsics))] #![cfg_attr(rust_nightly, feature(cfg_sanitize))] #![cfg_attr(rust_nightly, feature(const_type_name))] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] -// // Good reasons #![allow(clippy::needless_return)] // Mixing explicit returns with implicit ones sometimes looks odd #![allow(clippy::new_ret_no_self)] // We often return Value, even though its morally a Self @@ -371,6 +408,7 @@ #![allow(clippy::float_cmp)] #![allow(clippy::if_same_then_else)] #![allow(clippy::len_without_is_empty)] +#![allow(clippy::manual_map)] #![allow(clippy::manual_range_contains)] #![allow(clippy::match_like_matches_macro)] #![allow(clippy::missing_safety_doc)] @@ -385,13 +423,17 @@ #![allow(clippy::wrong_self_convention)] // FIXME: Temporary #![allow(clippy::useless_transmute)] // Seems to be a clippy bug, but we should be using less transmute anyway +#![allow(clippy::zero_repeat_side_effects)] #![deny(missing_docs)] mod macros; pub use starlark_derive::starlark_module; -pub use starlark_derive::StarlarkDocs; pub use starlark_syntax::codemap; +pub use starlark_syntax::Error; +pub use starlark_syntax::ErrorKind; +pub use starlark_syntax::Result; +pub use starlark_syntax::StarlarkResultExt; pub use stdlib::PrintHandler; pub mod analysis; @@ -412,6 +454,7 @@ pub mod typing; pub(crate) mod cast; mod hint; mod stdlib; +pub mod util; pub mod values; pub mod wasm; @@ -430,15 +473,4 @@ pub mod __macro_refs { pub use crate::coerce::coerce; } -/// __derive_refs allows us to reference other crates in starlark_derive without users needing to be -/// aware of those dependencies. We make them public here and then can reference them like -/// `starlark::__derive_refs::foo`. -#[doc(hidden)] -pub mod __derive_refs { - pub mod serde { - pub use serde::ser::Error; - pub use serde::Serialize; - pub use serde::Serializer; - } - pub use inventory; -} +pub mod __derive_refs; diff --git a/starlark-rust/starlark/src/macros.rs b/starlark-rust/starlark/src/macros.rs new file mode 100644 index 0000000000000..825a3b243027f --- /dev/null +++ b/starlark-rust/starlark/src/macros.rs @@ -0,0 +1,221 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/// Reduce boilerplate when making types instances of [`ComplexValue`](crate::values::ComplexValue) +/// - see the [`ComplexValue`](crate::values::ComplexValue) docs for an example. +#[macro_export] +macro_rules! starlark_complex_value { + // Common part of macro variants. + (impl $x:ident) => { + $crate::__macro_refs::item! { + impl<'v> $crate::values::AllocValue<'v> for $x<'v> { + #[inline] + fn alloc_value(self, heap: &'v $crate::values::Heap) -> $crate::values::Value<'v> { + heap.alloc_complex(self) + } + } + + impl $crate::values::AllocFrozenValue for [< Frozen $x >] { + #[inline] + fn alloc_frozen_value(self, heap: &$crate::values::FrozenHeap) -> $crate::values::FrozenValue { + heap.alloc_simple(self) + } + } + + impl<'v> $x<'v> { + /// Downcast the value. + #[inline] + pub fn from_value(x: $crate::values::Value<'v>) -> Option<&'v Self> { + if let Some(x) = x.unpack_frozen() { + $crate::values::ValueLike::downcast_ref::< [< Frozen $x >] >(x).map($crate::__macro_refs::coerce) + } else { + $crate::values::ValueLike::downcast_ref::< $x<'v> >(x) + } + } + } + + impl<'v> $crate::values::type_repr::StarlarkTypeRepr for &'v $x<'v> { + type Canonical = $x<'v>; + + #[inline] + fn starlark_type_repr() -> $crate::typing::Ty { + <$x as $crate::values::StarlarkValue>::get_type_starlark_repr() + } + } + + impl<'v> $crate::values::UnpackValue<'v> for &'v $x<'v> { + type Error = std::convert::Infallible; + + #[inline] + fn unpack_value_impl(x: $crate::values::Value<'v>) -> Result>, Self::Error> { + Ok($x::from_value(x)) + } + } + } + }; + ($v:vis $x:ident) => { + $crate::__macro_refs::item! { + /// Type of value. + $v type $x<'v> = [< $x Gen >]<$crate::values::Value<'v>>; + /// Type of frozen value. + $v type [< Frozen $x >] = [< $x Gen >]<$crate::values::FrozenValue>; + + starlark_complex_value!(impl $x); + } + }; + ($v:vis $x:ident <'v>) => { + $crate::__macro_refs::item! { + /// Type of unfrozen value. + $v type $x<'v> = [< $x Gen >]<'v, $crate::values::Value<'v>>; + /// Type of frozen value. + $v type [< Frozen $x >] = [< $x Gen >]<'static, $crate::values::FrozenValue>; + + starlark_complex_value!(impl $x); + } + }; +} + +/// Reduce boilerplate when making types instances of [`ComplexValue`](crate::values::ComplexValue) +/// - see the [`ComplexValue`](crate::values::ComplexValue) docs for an example. +#[macro_export] +macro_rules! starlark_complex_values { + ($x:ident) => { + $crate::__macro_refs::item! { + impl<'v> $crate::values::AllocValue<'v> for $x<'v> { + #[inline] + fn alloc_value(self, heap: &'v $crate::values::Heap) -> $crate::values::Value<'v> { + heap.alloc_complex(self) + } + } + + impl $crate::values::AllocFrozenValue for [< Frozen $x >] { + #[inline] + fn alloc_frozen_value(self, heap: &$crate::values::FrozenHeap) -> $crate::values::FrozenValue { + heap.alloc_simple(self) + } + } + + impl<'v> $x<'v> { + #[allow(dead_code)] + #[inline] + pub(crate) fn from_value( + x: $crate::values::Value<'v>, + ) -> Option<$crate::__macro_refs::Either<&'v Self, &'v [< Frozen $x >]>> { + if let Some(x) = x.unpack_frozen() { + $crate::values::ValueLike::downcast_ref(x).map($crate::__macro_refs::Either::Right) + } else { + $crate::values::ValueLike::downcast_ref(x).map($crate::__macro_refs::Either::Left) + } + } + } + } + }; +} + +/// A macro reducing boilerplace defining Starlark values which are simple - they +/// aren't mutable and can't contain references to other Starlark values. +/// +/// Let's define a simple object, where `+x` makes the string uppercase: +/// +/// ``` +/// use allocative::Allocative; +/// use derive_more::Display; +/// use starlark::starlark_simple_value; +/// use starlark::values::Heap; +/// use starlark::values::NoSerialize; +/// use starlark::values::ProvidesStaticType; +/// use starlark::values::StarlarkValue; +/// use starlark::values::Value; +/// use starlark_derive::starlark_value; +/// +/// #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] +/// struct MyObject(String); +/// starlark_simple_value!(MyObject); +/// +/// #[starlark_value(type = "my_object")] +/// impl<'v> StarlarkValue<'v> for MyObject { +/// // We can choose to implement whichever methods we want. +/// // All other operations will result in runtime errors. +/// fn plus(&self, heap: &'v Heap) -> starlark::Result> { +/// Ok(heap.alloc(MyObject(self.0.to_uppercase()))) +/// } +/// } +/// ``` +/// +/// The `starlark_simple_value!` macro defines instances of +/// [`ProvidesStaticType`](crate::values::ProvidesStaticType), +/// [`AllocValue`](crate::values::AllocValue), +/// [`AllocFrozenValue`](crate::values::AllocFrozenValue) and +/// [`UnpackValue`](crate::values::UnpackValue). It also defines a method: +/// +/// ``` +/// # use crate::starlark::values::*; +/// # struct MyObject; +/// impl MyObject { +/// pub fn from_value<'v>(x: Value<'v>) -> Option<&'v MyObject> { +/// # unimplemented!( +/// # r#" +/// ... +/// # "#); +/// } +/// } +/// ``` +#[macro_export] +macro_rules! starlark_simple_value { + ($x:ident) => { + $crate::__macro_refs::item! { + impl<'v> $crate::values::AllocValue<'v> for $x { + #[inline] + fn alloc_value(self, heap: &'v $crate::values::Heap) -> $crate::values::Value<'v> { + heap.alloc_simple(self) + } + } + + impl $crate::values::AllocFrozenValue for $x { + #[inline] + fn alloc_frozen_value(self, heap: &$crate::values::FrozenHeap) -> $crate::values::FrozenValue { + heap.alloc_simple(self) + } + } + + impl $x { + /// Downcast a value to self type. + #[inline] + pub fn from_value<'v>(x: $crate::values::Value<'v>) -> Option<&'v Self> { + $crate::values::ValueLike::downcast_ref::< $x >(x) + } + } + + impl<'v> $crate::values::type_repr::StarlarkTypeRepr for &'v $x { + type Canonical = $x; + + fn starlark_type_repr() -> $crate::typing::Ty { + <$x as $crate::values::StarlarkValue>::get_type_starlark_repr() + } + } + + impl<'v> $crate::values::UnpackValue<'v> for &'v $x { + type Error = std::convert::Infallible; + + #[inline] + fn unpack_value_impl(x: $crate::values::Value<'v>) -> std::result::Result, Self::Error> { + std::result::Result::Ok($x::from_value(x)) + } + } + } + }; +} diff --git a/starlark-rust/starlark/src/macros/mod.rs b/starlark-rust/starlark/src/macros/mod.rs deleted file mode 100644 index 30dc9364ce4f0..0000000000000 --- a/starlark-rust/starlark/src/macros/mod.rs +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/// Reduce boilerplate when making types instances of [`ComplexValue`](crate::values::ComplexValue) -/// - see the [`ComplexValue`](crate::values::ComplexValue) docs for an example. -#[macro_export] -macro_rules! starlark_complex_value { - // Common part of macro variants. - (impl $x:ident) => { - $crate::__macro_refs::item! { - impl<'v> $crate::values::AllocValue<'v> for $x<'v> { - #[inline] - fn alloc_value(self, heap: &'v $crate::values::Heap) -> $crate::values::Value<'v> { - heap.alloc_complex(self) - } - } - - impl $crate::values::AllocFrozenValue for [< Frozen $x >] { - #[inline] - fn alloc_frozen_value(self, heap: &$crate::values::FrozenHeap) -> $crate::values::FrozenValue { - heap.alloc_simple(self) - } - } - - impl<'v> $x<'v> { - /// Downcast the value. - #[inline] - pub fn from_value(x: $crate::values::Value<'v>) -> Option<&'v Self> { - if let Some(x) = x.unpack_frozen() { - $crate::values::ValueLike::downcast_ref::< [< Frozen $x >] >(x).map($crate::__macro_refs::coerce) - } else { - $crate::values::ValueLike::downcast_ref::< $x<'v> >(x) - } - } - } - - impl<'v> $crate::values::type_repr::StarlarkTypeRepr for &'v $x<'v> { - #[inline] - fn starlark_type_repr() -> $crate::typing::Ty { - <$x as $crate::values::StarlarkValue>::get_type_starlark_repr() - } - } - - impl<'v> $crate::values::UnpackValue<'v> for &'v $x<'v> { - #[inline] - fn unpack_value(x: $crate::values::Value<'v>) -> Option<&'v $x<'v>> { - $x::from_value(x) - } - } - } - }; - ($v:vis $x:ident) => { - $crate::__macro_refs::item! { - /// Type of value. - $v type $x<'v> = [< $x Gen >]<$crate::values::Value<'v>>; - /// Type of frozen value. - $v type [< Frozen $x >] = [< $x Gen >]<$crate::values::FrozenValue>; - - starlark_complex_value!(impl $x); - } - }; - ($v:vis $x:ident <'v>) => { - $crate::__macro_refs::item! { - /// Type of unfrozen value. - $v type $x<'v> = [< $x Gen >]<'v, $crate::values::Value<'v>>; - /// Type of frozen value. - $v type [< Frozen $x >] = [< $x Gen >]<'static, $crate::values::FrozenValue>; - - starlark_complex_value!(impl $x); - } - }; -} - -/// Reduce boilerplate when making types instances of [`ComplexValue`](crate::values::ComplexValue) -/// - see the [`ComplexValue`](crate::values::ComplexValue) docs for an example. -#[macro_export] -macro_rules! starlark_complex_values { - ($x:ident) => { - $crate::__macro_refs::item! { - impl<'v> $crate::values::AllocValue<'v> for $x<'v> { - #[inline] - fn alloc_value(self, heap: &'v $crate::values::Heap) -> $crate::values::Value<'v> { - heap.alloc_complex(self) - } - } - - impl $crate::values::AllocFrozenValue for [< Frozen $x >] { - #[inline] - fn alloc_frozen_value(self, heap: &$crate::values::FrozenHeap) -> $crate::values::FrozenValue { - heap.alloc_simple(self) - } - } - - impl<'v> $x<'v> { - #[allow(dead_code)] - #[inline] - pub(crate) fn from_value( - x: $crate::values::Value<'v>, - ) -> Option<$crate::__macro_refs::Either<&'v Self, &'v [< Frozen $x >]>> { - if let Some(x) = x.unpack_frozen() { - $crate::values::ValueLike::downcast_ref(x).map($crate::__macro_refs::Either::Right) - } else { - $crate::values::ValueLike::downcast_ref(x).map($crate::__macro_refs::Either::Left) - } - } - } - } - }; -} - -/// A macro reducing boilerplace defining Starlark values which are simple - they -/// aren't mutable and can't contain references to other Starlark values. -/// -/// Let's define a simple object, where `+x` makes the string uppercase: -/// -/// ``` -/// use starlark::values::{Heap, StarlarkValue, Value, ProvidesStaticType, NoSerialize}; -/// use starlark::{starlark_simple_value}; -/// use derive_more::Display; -/// use allocative::Allocative; -/// use starlark_derive::starlark_value; -/// -/// #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] -/// struct MyObject(String); -/// starlark_simple_value!(MyObject); -/// -/// #[starlark_value(type = "my_object")] -/// impl<'v> StarlarkValue<'v> for MyObject { -/// // We can choose to implement whichever methods we want. -/// // All other operations will result in runtime errors. -/// fn plus(&self, heap: &'v Heap) -> anyhow::Result> { -/// Ok(heap.alloc(MyObject(self.0.to_uppercase()))) -/// } -/// } -/// ``` -/// -/// The `starlark_simple_value!` macro defines instances of -/// [`ProvidesStaticType`](crate::values::ProvidesStaticType), -/// [`AllocValue`](crate::values::AllocValue), -/// [`AllocFrozenValue`](crate::values::AllocFrozenValue) and -/// [`UnpackValue`](crate::values::UnpackValue). It also defines a method: -/// -/// ``` -/// # use crate::starlark::values::*; -/// # struct MyObject; -/// impl MyObject { -/// pub fn from_value<'v>(x: Value<'v>) -> Option<&'v MyObject> { -/// # unimplemented!( -/// # r#" -/// ... -/// # "#); -/// } -/// } -/// ``` -#[macro_export] -macro_rules! starlark_simple_value { - ($x:ident) => { - $crate::__macro_refs::item! { - impl<'v> $crate::values::AllocValue<'v> for $x { - #[inline] - fn alloc_value(self, heap: &'v $crate::values::Heap) -> $crate::values::Value<'v> { - heap.alloc_simple(self) - } - } - - impl $crate::values::AllocFrozenValue for $x { - #[inline] - fn alloc_frozen_value(self, heap: &$crate::values::FrozenHeap) -> $crate::values::FrozenValue { - heap.alloc_simple(self) - } - } - - impl $x { - /// Downcast a value to self type. - #[inline] - pub fn from_value<'v>(x: $crate::values::Value<'v>) -> Option<&'v Self> { - $crate::values::ValueLike::downcast_ref::< $x >(x) - } - } - - impl<'v> $crate::values::type_repr::StarlarkTypeRepr for &'v $x { - fn starlark_type_repr() -> $crate::typing::Ty { - <$x as $crate::values::StarlarkValue>::get_type_starlark_repr() - } - } - - impl<'v> $crate::values::UnpackValue<'v> for &'v $x { - fn expected() -> String { - <$x as $crate::values::StarlarkValue>::get_type_value_static().as_str().to_owned() - } - - #[inline] - fn unpack_value(x: $crate::values::Value<'v>) -> Option<&'v $x> { - $x::from_value(x) - } - } - } - }; -} diff --git a/starlark-rust/starlark/src/stdlib.rs b/starlark-rust/starlark/src/stdlib.rs new file mode 100644 index 0000000000000..4984e27379e48 --- /dev/null +++ b/starlark-rust/starlark/src/stdlib.rs @@ -0,0 +1,270 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! A module with the standard function and constants that are by default in all +//! dialect of Starlark + +use dupe::Dupe; + +use crate::environment::GlobalsBuilder; +use crate::values::namespace::globals::register_namespace; + +pub(crate) mod breakpoint; +pub(crate) mod call_stack; +pub(crate) mod extra; +mod funcs; +pub(crate) mod internal; +pub(crate) mod json; +pub(crate) mod partial; + +pub use extra::PrintHandler; + +use crate::stdlib::funcs::globals::register_globals; +use crate::stdlib::internal::register_internal; +use crate::values::enumeration::globals::register_enum; +use crate::values::record::globals::register_record; +use crate::values::structs::structs::register_struct; +use crate::values::types::set::set::register_set; +use crate::values::typing; + +/// Return the default global environment, it is not yet frozen so that a caller +/// can refine it. +/// +/// For example `stdlib::standard_environment().freeze().child("test")` create a +/// child environment of this global environment that have been frozen. +pub(crate) fn standard_environment() -> GlobalsBuilder { + GlobalsBuilder::new().with(register_globals) +} + +/// The extra library definitions available in this Starlark implementation, but not in the standard. +#[derive(PartialEq, Eq, Copy, Clone, Dupe)] +pub enum LibraryExtension { + /// Definitions to support the `struct` type, the `struct()` constructor. + StructType, + /// Definitions to support the `record` type, the `record()` constructor and `field()` function. + RecordType, + /// Definitions to support the `enum` type, the `enum()` constructor. + EnumType, + /// Add a function `namespace()` which acts much like `struct()` but is clear about it's + /// intended use and stricter + NamespaceType, + /// A function `map(f, xs)` which applies `f` to each element of `xs` and returns the result. + // TODO(nga): add set: https://www.internalfb.com/tasks/?t=184017710 + Map, + /// A function `filter(f, xs)` which applies `f` to each element of `xs` and returns those for which `f` returns `True`. + /// As a special case, `filter(None, xs)` removes all `None` values. + Filter, + /// Partially apply a function, `partial(f, *args, **kwargs)` will create a function where those `args` `kwargs` + /// are already applied to `f`. + Partial, + /// Add a function `debug(x)` which shows the Rust [`Debug`](std::fmt::Debug) representation of a value. + /// Useful when debugging, but the output should not be considered stable. + Debug, + /// Add a function `print(x)` which prints to stderr. + Print, + /// Add a function `pprint(x)` which pretty-prints to stderr. + Pprint, + /// Add a function `pstr` which is a pretty-printed version of `str`. + Pstr, + /// Add a function `prepr` which is a pretty-printed version of `repr`. + Prepr, + /// Add a function `breakpoint()` which will drop into a console-module evaluation prompt. + Breakpoint, + /// Add a function `json()` which will generate JSON for a module. + Json, + /// Provides `typing.All`, `typing.Callable` etc. + /// Usually used in conjunction with + /// [`Dialect::enable_types`](crate::syntax::Dialect::enable_types). + Typing, + /// Utilities exposing starlark-rust internals. + /// These are not for production use. + Internal, + /// Add a function `call_stack()` which returns a string representation of + /// the current call stack. + CallStack, + /// Definitions to support the `set` type, the `set()` constructor. + SetType, + // Make sure if you add anything new, you add it to `all` below. +} + +impl LibraryExtension { + /// A list of all extensions that will be updated as new methods are added. + pub(crate) fn all() -> &'static [Self] { + use LibraryExtension::*; + &[ + StructType, + RecordType, + EnumType, + NamespaceType, + Map, + Filter, + Partial, + Debug, + Print, + Pprint, + Pstr, + Prepr, + Breakpoint, + Json, + Typing, + Internal, + CallStack, + SetType, + ] + } + + /// Add a specific extension to a [`GlobalsBuilder`]. + pub fn add(self, builder: &mut GlobalsBuilder) { + use LibraryExtension::*; + match self { + StructType => register_struct(builder), + NamespaceType => register_namespace(builder), + RecordType => register_record(builder), + EnumType => register_enum(builder), + SetType => register_set(builder), + Map => extra::map(builder), + Filter => extra::filter(builder), + Partial => partial::partial(builder), + Debug => extra::debug(builder), + Print => extra::print(builder), + Pprint => extra::pprint(builder), + Pstr => extra::pstr(builder), + Prepr => extra::prepr(builder), + Breakpoint => breakpoint::global(builder), + Json => json::json(builder), + Typing => typing::globals::register_typing(builder), + Internal => register_internal(builder), + CallStack => call_stack::global(builder), + } + } +} + +#[cfg(test)] +mod tests { + use std::convert::Infallible; + + use allocative::Allocative; + use derive_more::Display; + use dupe::Dupe; + use starlark_derive::starlark_module; + use starlark_derive::starlark_value; + use starlark_derive::NoSerialize; + + use crate as starlark; + use crate::any::ProvidesStaticType; + use crate::assert::Assert; + use crate::environment::GlobalsBuilder; + use crate::environment::Methods; + use crate::environment::MethodsBuilder; + use crate::environment::MethodsStatic; + use crate::starlark_simple_value; + use crate::values::none::NoneType; + use crate::values::StarlarkValue; + use crate::values::UnpackValue; + use crate::values::Value; + use crate::values::ValueLike; + + #[test] + fn test_no_arg() { + #[starlark_module] + fn global(builder: &mut GlobalsBuilder) { + fn nop() -> anyhow::Result { + Ok(NoneType) + } + } + + let env = GlobalsBuilder::new().with(global).build(); + env.get("nop").unwrap(); + } + + #[test] + fn test_value_attributes() { + #[derive( + Copy, + Clone, + Debug, + Dupe, + PartialEq, + Display, + ProvidesStaticType, + NoSerialize, + Allocative + )] + struct Bool2(bool); + starlark_simple_value!(Bool2); + + #[starlark_value(type = "bool2")] + impl<'v> StarlarkValue<'v> for Bool2 { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(methods) + } + + fn equals(&self, other: Value<'v>) -> crate::Result { + match other.downcast_ref::() { + None => Ok(false), + Some(v) => Ok(*v == *self), + } + } + } + + impl<'v> UnpackValue<'v> for Bool2 { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(Some(*value.downcast_ref::().unwrap())) + } + } + + #[starlark_module] + fn globals(builder: &mut GlobalsBuilder) { + const True2: Bool2 = Bool2(true); + const False2: Bool2 = Bool2(false); + } + + #[starlark_module] + fn methods(builder: &mut MethodsBuilder) { + #[starlark(attribute)] + fn invert1(this: Bool2) -> anyhow::Result { + Ok(Bool2(!this.0)) + } + + fn invert2(this: Bool2) -> anyhow::Result { + Ok(Bool2(!this.0)) + } + } + + let mut a = Assert::new(); + a.globals_add(globals); + a.all_true( + r#" +True2 == True2 +True2 != False2 +True2.invert1 == False2 +False2.invert1 == True2 +False2.invert2() == True2 +hasattr(True2, "invert1") == True +hasattr(True2, "invert2") == True +hasattr(True2, "invert3") == False +dir(False2) == ["invert1","invert2"] +getattr(False2, "invert1") == True2 +getattr(True2, "invert1") == False2 +getattr(True2, "invert2")() == False2 +"#, + ); + } +} diff --git a/starlark-rust/starlark/src/stdlib/breakpoint.rs b/starlark-rust/starlark/src/stdlib/breakpoint.rs index 4465be2e51c0b..ded2bcef87cb3 100644 --- a/starlark-rust/starlark/src/stdlib/breakpoint.rs +++ b/starlark-rust/starlark/src/stdlib/breakpoint.rs @@ -178,7 +178,7 @@ fn breakpoint_loop( } } } else { - let ast = AstModule::parse("interactive", line, &Dialect::Extended); + let ast = AstModule::parse("interactive", line, &Dialect::AllOptionsInternal); let res = ast.and_then(|ast| eval.eval_statements(ast)); match res { Err(e) => { diff --git a/starlark-rust/starlark/src/stdlib/call_stack.rs b/starlark-rust/starlark/src/stdlib/call_stack.rs new file mode 100644 index 0000000000000..1d6e2cf26b181 --- /dev/null +++ b/starlark-rust/starlark/src/stdlib/call_stack.rs @@ -0,0 +1,228 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Implementation of `call_stack` function. + +use std::fmt; +use std::fmt::Display; +use std::fmt::Formatter; + +use allocative::Allocative; +use starlark_derive::starlark_module; +use starlark_syntax::codemap::FileSpan; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::environment::Methods; +use crate::environment::MethodsBuilder; +use crate::environment::MethodsStatic; +use crate::eval::Evaluator; +use crate::values::none::NoneOr; +use crate::values::starlark_value; +use crate::values::AllocValue; +use crate::values::Heap; +use crate::values::NoSerialize; +use crate::values::ProvidesStaticType; +use crate::values::StarlarkValue; +use crate::values::Trace; +use crate::values::Value; + +#[derive(ProvidesStaticType, Trace, Allocative, Debug, NoSerialize, Clone)] +/// A frame of the call-stack. +struct StackFrame { + /// The name of the entry on the call-stack. + name: String, + /// The location of the definition, or [`None`] for native Rust functions. + location: Option, +} + +#[starlark_value(type = "StackFrame", StarlarkTypeRepr, UnpackValue)] +impl<'v> StarlarkValue<'v> for StackFrame { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(stack_frame_methods) + } +} + +impl<'v> AllocValue<'v> for StackFrame { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex_no_freeze(self) + } +} + +impl Display for StackFrame { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "") + } +} + +#[starlark_module] +fn stack_frame_methods(builder: &mut MethodsBuilder) { + /// Returns the name of the entry on the call-stack. + #[starlark(attribute)] + fn func_name(this: &StackFrame) -> anyhow::Result { + Ok(this.name.clone()) + } + + /// Returns a path of the module from which the entry was called, or [`None`] for native Rust functions. + #[starlark(attribute)] + fn module_path(this: &StackFrame) -> anyhow::Result> { + match this.location { + Some(ref location) => Ok(NoneOr::Other(location.file.filename().to_owned())), + None => Ok(NoneOr::None), + } + } +} + +#[starlark_module] +pub(crate) fn global(builder: &mut GlobalsBuilder) { + /// Get a textual representation of the call stack. + /// + /// This is intended only for debugging purposes to display to a human and + /// should not be considered stable or parseable. + /// + /// strip_frames will pop N frames from the top of the call stack, which can + /// be useful to hide non-interesting lines - for example, strip_frames=1 + /// will hide the call to and location of `call_stack()` itself. + fn call_stack( + #[starlark(require=named, default = 0)] strip_frames: u32, + eval: &mut Evaluator, + ) -> anyhow::Result { + let mut stack = eval.call_stack(); + stack + .frames + .truncate(stack.frames.len().saturating_sub(strip_frames as usize)); + Ok(stack.to_string()) + } + + /// Get a structural representation of the n-th call stack frame. + /// + /// With `n=0` returns `call_stack_frame` itself. + /// Returns `None` if `n` is greater than or equal to the stack size. + fn call_stack_frame( + #[starlark(require = pos)] n: u32, + eval: &mut Evaluator, + ) -> anyhow::Result> { + let stack = eval.call_stack(); + let n = n as usize; + if n >= stack.frames.len() { + return Ok(NoneOr::None); + } + match stack.frames.get(stack.frames.len() - n - 1) { + Some(frame) => Ok(NoneOr::Other(StackFrame { + name: frame.name.clone(), + location: frame.location.clone(), + })), + + None => Ok(NoneOr::None), + } + } +} + +#[cfg(test)] +mod tests { + use super::global; + use crate::assert::Assert; + + #[test] + fn test_simple() { + let mut a = Assert::new(); + a.globals_add(global); + a.is_true( + r#" +def foo(): + return bar() + +def bar(): + s = call_stack() + return all([ + "foo()" in s, + "bar()" in s, + "call_stack()" in s, + ]) + +foo() + "#, + ); + } + + #[test] + fn test_strip_one() { + let mut a = Assert::new(); + a.globals_add(global); + a.is_true( + r#" +def foo(): + return bar() + +def bar(): + s = call_stack(strip_frames=1) + return all([ + "foo()" in s, + "bar()" in s, + "call_stack()" not in s, + ]) + +foo() + "#, + ); + } + + #[test] + fn test_strip_all() { + let mut a = Assert::new(); + a.globals_add(global); + a.is_true( + r#" +def foo(): + return bar() + +def bar(): + s = call_stack(strip_frames=10) + return not bool(s) + +foo() + "#, + ); + } + + #[test] + fn test_call_stack_frame() { + let mut a = Assert::new(); + a.globals_add(global); + a.is_true( + r#" +def foo(): + return bar() + +def bar(): + return all([ + "call_stack_frame" == call_stack_frame(0).func_name, + "assert.bzl" == call_stack_frame(0).module_path, + "bar" == call_stack_frame(1).func_name, + "assert.bzl" == call_stack_frame(1).module_path, + "foo" == call_stack_frame(2).func_name, + "assert.bzl" == call_stack_frame(2).module_path, + None == call_stack_frame(3), + None == call_stack_frame(4), + ]) + +foo() + "#, + ); + } +} diff --git a/starlark-rust/starlark/src/stdlib/dict.rs b/starlark-rust/starlark/src/stdlib/dict.rs deleted file mode 100644 index 5ea4fb8f4f671..0000000000000 --- a/starlark-rust/starlark/src/stdlib/dict.rs +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Methods for the `dict` type. - -use std::mem; - -use starlark_derive::starlark_module; - -use crate as starlark; -use crate::environment::MethodsBuilder; -use crate::hint::unlikely; -use crate::values::dict::DictMut; -use crate::values::dict::DictRef; -use crate::values::list::AllocList; -use crate::values::list::ListOf; -use crate::values::list::ListRef; -use crate::values::none::NoneType; -use crate::values::Heap; -use crate::values::Value; -use crate::values::ValueOfUnchecked; - -#[starlark_module] -pub(crate) fn dict_methods(registry: &mut MethodsBuilder) { - /// [dict.clear]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·clear - /// ): clear a dictionary - /// - /// `D.clear()` removes all the entries of dictionary D and returns `None`. - /// It fails if the dictionary is frozen or if there are active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// x.clear() - /// x == {} - /// # "#); - /// ``` - fn clear(this: Value) -> anyhow::Result { - let mut this = DictMut::from_value(this)?; - this.clear(); - Ok(NoneType) - } - - /// [dict.get]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·get - /// ): return an element from the dictionary. - /// - /// `D.get(key[, default])` returns the dictionary value corresponding to - /// the given key. If the dictionary contains no such value, `get` - /// returns `None`, or the value of the optional `default` parameter if - /// present. - /// - /// `get` fails if `key` is unhashable. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// # ( - /// x.get("one") == 1 - /// # and - /// x.get("three") == None - /// # and - /// x.get("three", 0) == 0 - /// # )"#); - /// ``` - #[starlark(speculative_exec_safe)] - fn get<'v>( - this: DictRef<'v>, - #[starlark(require = pos)] key: Value<'v>, - #[starlark(require = pos)] default: Option>, - ) -> anyhow::Result> { - match this.get(key)? { - None => Ok(default.unwrap_or_else(Value::new_none)), - Some(x) => Ok(x), - } - } - - /// [dict.items]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·items - /// ): get list of (key, value) pairs. - /// - /// `D.items()` returns a new list of key/value pairs, one per element in - /// dictionary D, in the same order as they would be returned by a `for` - /// loop. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// x.items() == [("one", 1), ("two", 2)] - /// # "#); - /// ``` - fn items<'v>( - this: DictRef<'v>, - heap: &'v Heap, - ) -> anyhow::Result, Value<'v>)>>> { - Ok(ValueOfUnchecked::new(heap.alloc_list_iter( - this.iter().map(|(k, v)| heap.alloc((k, v))), - ))) - } - - /// [dict.keys]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·keys - /// ): get the list of keys of the dictionary. - /// - /// `D.keys()` returns a new list containing the keys of dictionary D, in - /// the same order as they would be returned by a `for` loop. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// x.keys() == ["one", "two"] - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn keys<'v>( - this: DictRef<'v>, - heap: &'v Heap, - ) -> anyhow::Result>> { - Ok(ValueOfUnchecked::new(heap.alloc(AllocList(this.keys())))) - } - - /// [dict.pop]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·pop - /// ): return an element and remove it from a dictionary. - /// - /// `D.pop(key[, default])` returns the value corresponding to the specified - /// key, and removes it from the dictionary. If the dictionary contains no - /// such value, and the optional `default` parameter is present, `pop` - /// returns that value; otherwise, it fails. - /// - /// `pop` fails if `key` is unhashable, or the dictionary is frozen or has - /// active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// # ( - /// x.pop("one") == 1 - /// # and - /// x == {"two": 2} - /// # and - /// x.pop("three", 0) == 0 - /// # and - /// x.pop("three", None) == None - /// # )"#); - /// ``` - /// - /// Failure: - /// - /// ``` - /// # starlark::assert::fail(r#" - /// {'one': 1}.pop('four') # error: not found - /// # "#, "not found"); - /// ``` - fn pop<'v>( - this: Value<'v>, - #[starlark(require = pos)] key: Value<'v>, - #[starlark(require = pos)] default: Option>, - ) -> anyhow::Result> { - let mut me = DictMut::from_value(this)?; - match me.remove_hashed(key.get_hashed()?) { - Some(x) => Ok(x), - None => match default { - Some(v) => Ok(v), - None => { - mem::drop(me); - Err(anyhow::anyhow!( - "Key `{}` not found in dictionary `{}`", - key.to_repr(), - this.to_repr() - )) - } - }, - } - } - - /// [dict.popitem]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·popitem - /// ): returns and removes the first key/value pair of a dictionary. - /// - /// `D.popitem()` returns the first key/value pair, removing it from the - /// dictionary. - /// - /// `popitem` fails if the dictionary is empty, frozen, or has active - /// iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// # ( - /// x.popitem() == ("one", 1) - /// # and - /// x.popitem() == ("two", 2) - /// # and - /// x == {} - /// # )"#); - /// ``` - /// - /// Failure: - /// - /// ``` - /// # starlark::assert::fail(r#" - /// {}.popitem() # error: empty dict - /// # "#, "empty dict"); - /// ``` - fn popitem<'v>(this: Value<'v>) -> anyhow::Result<(Value<'v>, Value<'v>)> { - let mut this = DictMut::from_value(this)?; - - let key = this.iter_hashed().next().map(|(k, _)| k); - match key { - Some(k) => Ok((*k.key(), this.remove_hashed(k).unwrap())), - None => Err(anyhow::anyhow!("Cannot .popitem() on an empty dictionary")), - } - } - - /// [dict.setdefault]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·setdefault - /// ): get a value from a dictionary, setting it to a new value if not - /// present. - /// - /// `D.setdefault(key[, default])` returns the dictionary value - /// corresponding to the given key. If the dictionary contains no such - /// value, `setdefault`, like `get`, returns `None` or the value of the - /// optional `default` parameter if present; `setdefault` additionally - /// inserts the new key/value entry into the dictionary. - /// - /// `setdefault` fails if the key is unhashable or if the dictionary is - /// frozen. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// # ( - /// x.setdefault("one") == 1 - /// # and - /// x.setdefault("three", 0) == 0 - /// # and - /// x == {"one": 1, "two": 2, "three": 0} - /// # and - /// x.setdefault("four") == None - /// # and - /// x == {"one": 1, "two": 2, "three": 0, "four": None} - /// # )"#) - /// ``` - fn setdefault<'v>( - this: Value<'v>, - #[starlark(require = pos)] key: Value<'v>, - #[starlark(require = pos)] default: Option>, - ) -> anyhow::Result> { - let mut this = DictMut::from_value(this)?; - let key = key.get_hashed()?; - if let Some(r) = this.get_hashed(key) { - return Ok(r); - } - let def = default.unwrap_or_else(Value::new_none); - this.insert_hashed(key, def); - Ok(def) - } - - /// [dict.update]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·update - /// ): update values in the dictionary. - /// - /// `D.update([pairs][, name=value[, ...])` makes a sequence of key/value - /// insertions into dictionary D, then returns `None.` - /// - /// If the positional argument `pairs` is present, it must be `None`, - /// another `dict`, or some other iterable. - /// If it is another `dict`, then its key/value pairs are inserted into D. - /// If it is an iterable, it must provide a sequence of pairs (or other - /// iterables of length 2), each of which is treated as a key/value pair - /// to be inserted into D. - /// - /// For each `name=value` argument present, the name is converted to a - /// string and used as the key for an insertion into D, with its - /// corresponding value being `value`. - /// - /// `update` fails if the dictionary is frozen. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {} - /// x.update([("a", 1), ("b", 2)], c=3) - /// x.update({"d": 4}) - /// x.update(e=5) - /// x == {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5} - /// # "#); - /// ``` - fn update<'v>( - this: Value<'v>, - #[starlark(require = pos)] pairs: Option>, - #[starlark(kwargs)] kwargs: DictRef<'v>, - heap: &'v Heap, - ) -> anyhow::Result { - let pairs = if pairs.map(|x| x.ptr_eq(this)) == Some(true) { - // someone has done `x.update(x)` - that isn't illegal, but we will have issues - // with trying to iterate over x while holding x for mutation, and it doesn't do - // anything useful, so just change pairs back to None - None - } else { - pairs - }; - - let mut this = DictMut::from_value(this)?; - if let Some(pairs) = pairs { - if let Some(dict) = DictRef::from_value(pairs) { - for (k, v) in dict.iter_hashed() { - this.insert_hashed(k, v); - } - } else { - for v in pairs.iterate(heap)? { - let mut it = v.iterate(heap)?; - let k = it.next(); - let v = if k.is_some() { it.next() } else { None }; - if unlikely(v.is_none() || it.next().is_some()) { - return Err(anyhow::anyhow!( - "dict.update expect a list of pairs or a dictionary as first argument, got a list of non-pairs.", - )); - }; - this.insert_hashed(k.unwrap().get_hashed()?, v.unwrap()); - } - } - } - - for (k, v) in kwargs.iter_hashed() { - this.insert_hashed(k, v); - } - Ok(NoneType) - } - - /// [dict.values]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#dict·values - /// ): get the list of values of the dictionary. - /// - /// `D.values()` returns a new list containing the dictionary's values, in - /// the same order as they would be returned by a `for` loop over the - /// dictionary. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = {"one": 1, "two": 2} - /// x.values() == [1, 2] - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn values<'v>( - this: DictRef<'v>, - heap: &'v Heap, - ) -> anyhow::Result>> { - Ok(ValueOfUnchecked::new(heap.alloc_list_iter(this.values()))) - } -} - -#[cfg(test)] -mod tests { - use crate::assert; - - #[test] - fn test_error_codes() { - assert::fail(r#"x = {"one": 1}; x.pop("four")"#, "not found"); - assert::fail("x = {}; x.popitem()", "empty"); - } - - #[test] - fn test_dict_add() { - assert::fail("{1: 2} + {3: 4}", "not supported"); - } - - #[test] - fn test_dict_with_duplicates() { - // In Starlark spec this is a runtime error. In Python it's fine. - // We make it a runtime error, plus have a lint that checks for it statically. - assert::fails("{40+2: 2, 6*7: 3}", &["key repeated", "42"]); - // Also check we fail if the entire dictionary is static (a different code path). - assert::fails("{42: 2, 42: 3}", &["key repeated", "42"]); - } -} diff --git a/starlark-rust/starlark/src/stdlib/extra.rs b/starlark-rust/starlark/src/stdlib/extra.rs index fb11087076223..d427ff3c7a01b 100644 --- a/starlark-rust/starlark/src/stdlib/extra.rs +++ b/starlark-rust/starlark/src/stdlib/extra.rs @@ -26,8 +26,9 @@ use crate::eval::Evaluator; use crate::values::function::StarlarkFunction; use crate::values::none::NoneOr; use crate::values::none::NoneType; -use crate::values::regex::StarlarkRegex; +use crate::values::tuple::UnpackTuple; use crate::values::typing::iter::StarlarkIter; +use crate::values::StringValue; use crate::values::Value; use crate::values::ValueOfUnchecked; @@ -46,8 +47,8 @@ pub fn filter(builder: &mut GlobalsBuilder) { fn filter<'v>( #[starlark(require = pos)] func: NoneOr>, #[starlark(require = pos)] seq: ValueOfUnchecked<'v, StarlarkIter>>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result>> { let mut res = Vec::new(); for v in seq.get().iterate(eval.heap())? { @@ -81,8 +82,8 @@ pub fn map(builder: &mut GlobalsBuilder) { fn map<'v>( #[starlark(require = pos)] func: ValueOfUnchecked<'v, StarlarkFunction>, #[starlark(require = pos)] seq: ValueOfUnchecked<'v, StarlarkIter>>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result>> { let it = seq.get().iterate(eval.heap())?; let mut res = Vec::with_capacity(it.size_hint().0); for v in it { @@ -101,23 +102,6 @@ pub fn debug(builder: &mut GlobalsBuilder) { } } -#[starlark_module] -pub fn regex(builder: &mut GlobalsBuilder) { - /// Creates a regex which can be used for matching. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// experimental_regex("^[a-z]*$").match("test") == True - /// experimental_regex("^[a-z]*$").match("1234") == False - /// # "#); - /// ``` - fn experimental_regex<'v>( - #[starlark(require = pos)] regex: &str, - ) -> anyhow::Result { - StarlarkRegex::new(regex) - } -} - struct PrintWrapper<'a, 'b>(&'a Vec>); impl fmt::Display for PrintWrapper<'_, '_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -149,11 +133,14 @@ impl PrintHandler for StderrPrintHandler { #[starlark_module] pub fn print(builder: &mut GlobalsBuilder) { /// Print some values to the output. - fn print(#[starlark(args)] args: Vec, eval: &mut Evaluator) -> anyhow::Result { + fn print( + #[starlark(args)] args: UnpackTuple, + eval: &mut Evaluator, + ) -> anyhow::Result { // In practice most users should want to put the print somewhere else, but this does for now // Unfortunately, we can't use PrintWrapper because strings to_str() and Display are different. eval.print_handler - .println(&args.iter().map(|x| x.to_str()).join(" "))?; + .println(&args.items.iter().map(|x| x.to_str()).join(" "))?; Ok(NoneType) } } @@ -161,16 +148,55 @@ pub fn print(builder: &mut GlobalsBuilder) { #[starlark_module] pub fn pprint(builder: &mut GlobalsBuilder) { fn pprint( - #[starlark(args)] args: Vec, + #[starlark(args)] args: UnpackTuple, eval: &mut Evaluator, ) -> anyhow::Result { // In practice most users may want to put the print somewhere else, but this does for now eval.print_handler - .println(&format!("{:#}", PrintWrapper(&args)))?; + .println(&format!("{:#}", PrintWrapper(&args.items)))?; Ok(NoneType) } } +fn pretty_repr<'v>( + a: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, +) -> anyhow::Result> { + use std::fmt::Write; + + let mut s = eval.string_pool.alloc(); + write!(s, "{:#}", a).unwrap(); + let r = eval.heap().alloc_str(&s); + eval.string_pool.release(s); + Ok(r) +} + +#[starlark_module] +pub fn pstr(builder: &mut GlobalsBuilder) { + /// Like `str`, but produces more verbose pretty-printed output + fn pstr<'v>( + #[starlark(require = pos)] a: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + if let Some(a) = StringValue::new(a) { + return Ok(a); + } + + pretty_repr(a, eval) + } +} + +#[starlark_module] +pub fn prepr(builder: &mut GlobalsBuilder) { + /// Like `repr`, but produces more verbose pretty-printed output + fn prepr<'v>( + #[starlark(require = pos)] a: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + pretty_repr(a, eval) + } +} + #[cfg(test)] mod tests { use std::cell::RefCell; @@ -253,4 +279,34 @@ assert_eq(["11",8], map(double, ["1",4])) a.pass("print('hw')"); assert_eq!("hw", s_copy.borrow().as_str()); } + + #[test] + fn test_pstr() { + assert::pass( + r#" +assert_eq(pstr([]), "[]") +assert_eq(pstr([1,2,[]]), """[ + 1, + 2, + [] +]""") +assert_eq(pstr("abcd"), "abcd") +"#, + ); + } + + #[test] + fn test_prepr() { + assert::pass( + r#" +assert_eq(prepr([]), "[]") +assert_eq(prepr([1,2,[]]), """[ + 1, + 2, + [] +]""") +assert_eq(prepr("abcd"), "\"abcd\"") +"#, + ); + } } diff --git a/starlark-rust/starlark/src/stdlib/funcs.rs b/starlark-rust/starlark/src/stdlib/funcs.rs new file mode 100644 index 0000000000000..d5964f23c9097 --- /dev/null +++ b/starlark-rust/starlark/src/stdlib/funcs.rs @@ -0,0 +1,21 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod globals; +pub(crate) mod min_max; +pub(crate) mod other; +pub(crate) mod zip; diff --git a/starlark-rust/starlark/src/stdlib/funcs/dict.rs b/starlark-rust/starlark/src/stdlib/funcs/dict.rs deleted file mode 100644 index 8511870dad185..0000000000000 --- a/starlark-rust/starlark/src/stdlib/funcs/dict.rs +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use starlark_derive::starlark_module; -use starlark_map::small_map::SmallMap; - -use crate as starlark; -use crate::environment::GlobalsBuilder; -use crate::eval::Arguments; -use crate::values::dict::value::FrozenDict; -use crate::values::dict::Dict; -use crate::values::dict::DictRef; -use crate::values::function::SpecialBuiltinFunction; -use crate::values::Heap; -use crate::values::Value; - -fn unpack_pair<'v>(pair: Value<'v>, heap: &'v Heap) -> anyhow::Result<(Value<'v>, Value<'v>)> { - let mut it = pair.iterate(heap)?; - if let Some(first) = it.next() { - if let Some(second) = it.next() { - if it.next().is_none() { - return Ok((first, second)); - } - } - } - Err(anyhow::anyhow!( - "Found a non-pair element in the positional argument of dict(): {}", - pair.to_repr(), - )) -} - -#[starlark_module] -pub(crate) fn register_dict(globals: &mut GlobalsBuilder) { - /// [dict]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict - /// ): creates a dictionary. - /// - /// `dict` creates a dictionary. It accepts up to one positional argument, - /// which is interpreted as an iterable of two-element sequences - /// (pairs), each specifying a key/value pair in the - /// resulting dictionary. - /// - /// `dict` also accepts any number of keyword arguments, each of which - /// specifies a key/value pair in the resulting dictionary; each keyword - /// is treated as a string. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// dict() == {} - /// dict(**{'a': 1}) == {'a': 1} - /// dict({'a': 1}) == {'a': 1} - /// dict([(1, 2), (3, 4)]) == {1: 2, 3: 4} - /// dict([(1, 2), ['a', 'b']]) == {1: 2, 'a': 'b'} - /// dict(one=1, two=2) == {'one': 1, 'two': 2} - /// dict([(1, 2)], x=3) == {1: 2, 'x': 3} - /// dict([('x', 2)], x=3) == {'x': 3} - /// # "#); - /// # starlark::assert::is_true(r#" - /// x = {'a': 1} - /// y = dict([('x', 2)], **x) - /// x == {'a': 1} and y == {'x': 2, 'a': 1} - /// # "#); - /// ``` - #[starlark( - as_type = FrozenDict, - speculative_exec_safe, - special_builtin_function = SpecialBuiltinFunction::Dict, - )] - fn dict<'v>(args: &Arguments<'v, '_>, heap: &'v Heap) -> anyhow::Result> { - // Dict is super hot, and has a slightly odd signature, so we can do a bunch of special cases on it. - // In particular, we don't generate the kwargs if there are no positional arguments. - // Therefore we make it take the raw Arguments. - // It might have one positional argument, which could be a dict or an array of pairs. - // It might have named/kwargs arguments, which we copy over (afterwards). - - let pos = args.optional1(heap)?; - let kwargs = args.names()?; - - match pos { - None => Ok(kwargs), - Some(pos) => { - let mut result = match DictRef::from_value(pos) { - Some(pos) => { - let mut result = pos.clone(); - result.reserve(kwargs.len()); - result - } - None => { - let it = pos.iterate(heap)?; - let mut result = SmallMap::with_capacity(it.size_hint().0 + kwargs.len()); - for el in it { - let (k, v) = unpack_pair(el, heap)?; - let k = k.get_hashed()?; - result.insert_hashed(k, v); - } - Dict::new(result) - } - }; - for (k, v) in kwargs.iter_hashed() { - result.insert_hashed(k, v); - } - Ok(result) - } - } - } -} diff --git a/starlark-rust/starlark/src/stdlib/funcs/globals.rs b/starlark-rust/starlark/src/stdlib/funcs/globals.rs index 99fc45f3c7eee..557d6cc13226c 100644 --- a/starlark-rust/starlark/src/stdlib/funcs/globals.rs +++ b/starlark-rust/starlark/src/stdlib/funcs/globals.rs @@ -16,15 +16,31 @@ */ use crate::environment::GlobalsBuilder; -use crate::stdlib::funcs::dict::register_dict; -use crate::stdlib::funcs::list::register_list; use crate::stdlib::funcs::min_max::register_min_max; use crate::stdlib::funcs::other::register_other; use crate::stdlib::funcs::zip::register_zip; +use crate::values::bool::globals::register_bool; +use crate::values::float::globals::register_float; +use crate::values::int::globals::register_int; +use crate::values::none::globals::register_none; +use crate::values::range::globals::register_range; +use crate::values::string::globals::register_str; +use crate::values::tuple::globals::register_tuple; +use crate::values::types::dict::globals::register_dict; +use crate::values::types::list::globals::register_list; +use crate::values::types::num::globals::register_num; pub(crate) fn register_globals(globals: &mut GlobalsBuilder) { register_list(globals); + register_tuple(globals); register_dict(globals); + register_bool(globals); + register_none(globals); + register_str(globals); + register_range(globals); + register_int(globals); + register_num(globals); + register_float(globals); register_min_max(globals); register_zip(globals); register_other(globals); diff --git a/starlark-rust/starlark/src/stdlib/funcs/list.rs b/starlark-rust/starlark/src/stdlib/funcs/list.rs deleted file mode 100644 index b625db479aba4..0000000000000 --- a/starlark-rust/starlark/src/stdlib/funcs/list.rs +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use allocative::Allocative; -use once_cell::sync::Lazy; -use starlark_derive::starlark_module; - -use crate as starlark; -use crate::codemap::Span; -use crate::codemap::Spanned; -use crate::environment::GlobalsBuilder; -use crate::typing::error::TypingOrInternalError; -use crate::typing::function::TyCustomFunctionImpl; -use crate::typing::Arg; -use crate::typing::Param; -use crate::typing::Ty; -use crate::typing::TyFunction; -use crate::typing::TypingOracleCtx; -use crate::values::function::SpecialBuiltinFunction; -use crate::values::list::value::FrozenList; -use crate::values::list::AllocList; -use crate::values::list::ListRef; -use crate::values::typing::StarlarkIter; -use crate::values::Heap; -use crate::values::Value; -use crate::values::ValueOfUnchecked; - -#[derive(Allocative, Hash, Eq, PartialEq, Ord, PartialOrd, Clone, Debug)] -struct ListType; - -impl TyCustomFunctionImpl for ListType { - fn has_type_attr(&self) -> bool { - true - } - - fn validate_call( - &self, - span: Span, - args: &[Spanned], - oracle: TypingOracleCtx, - ) -> Result { - static LIST: Lazy = Lazy::new(|| { - TyFunction::new_with_type_attr( - vec![Param::pos_only(Ty::iter(Ty::any())).optional()], - Ty::any_list(), - Ty::any_list(), - ) - }); - - oracle.validate_fn_call(span, &LIST, args)?; - - if let Some(arg) = args.get(0) { - // This is infallible after the check above. - if let Arg::Pos(arg_ty) = &arg.node { - // This is also infallible. - let item = oracle.iter_item(Spanned { span, node: arg_ty })?; - return Ok(Ty::list(item)); - } - } - - Ok(Ty::any_list()) - } -} - -#[starlark_module] -pub(crate) fn register_list(globals: &mut GlobalsBuilder) { - /// [list]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list - /// ): construct a list. - /// - /// `list(x)` returns a new list containing the elements of the - /// iterable sequence x. - /// - /// With no argument, `list()` returns a new empty list. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// list() == [] - /// list((1,2,3)) == [1, 2, 3] - /// # "#); - /// # starlark::assert::fail(r#" - /// list("strings are not iterable") # error: not supported - /// # "#, r#"not supported on type"#); - /// ``` - #[starlark( - as_type = FrozenList, - speculative_exec_safe, - special_builtin_function = SpecialBuiltinFunction::List, - ty_custom_function = ListType, - )] - fn list<'v>( - #[starlark(require = pos)] a: Option>>>, - heap: &'v Heap, - ) -> anyhow::Result>> { - Ok(ValueOfUnchecked::new(if let Some(a) = a { - if let Some(xs) = ListRef::from_value(a.get()) { - heap.alloc_list(xs.content()) - } else { - let it = a.get().iterate(heap)?; - heap.alloc(AllocList(it)) - } - } else { - heap.alloc(AllocList::EMPTY) - })) - } -} diff --git a/starlark-rust/starlark/src/stdlib/funcs/min_max.rs b/starlark-rust/starlark/src/stdlib/funcs/min_max.rs index e2c8567a11098..b0d8a311f7070 100644 --- a/starlark-rust/starlark/src/stdlib/funcs/min_max.rs +++ b/starlark-rust/starlark/src/stdlib/funcs/min_max.rs @@ -22,21 +22,23 @@ use starlark_derive::starlark_module; use crate as starlark; use crate::environment::GlobalsBuilder; use crate::eval::Evaluator; +use crate::values::tuple::UnpackTuple; use crate::values::Value; fn min_max_iter<'v>( mut it: impl Iterator>, key: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, // Select min on true, max on false. min: bool, -) -> anyhow::Result> { +) -> crate::Result> { let mut max = match it.next() { Some(x) => x, None => { return Err(anyhow::anyhow!( "Argument is an empty iterable, max() expect a non empty iterable" - )); + ) + .into()); } }; let update_max_ordering = if min { @@ -68,17 +70,17 @@ fn min_max_iter<'v>( /// Common implementation of `min` and `max`. fn min_max<'v>( - mut args: Vec>, + mut args: UnpackTuple>, key: Option>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, // Select min on true, max on false. min: bool, -) -> anyhow::Result> { - if args.len() == 1 { - let it = args.swap_remove(0).iterate(eval.heap())?; +) -> crate::Result> { + if args.items.len() == 1 { + let it = args.items.swap_remove(0).iterate(eval.heap())?; min_max_iter(it, key, eval, min) } else { - min_max_iter(args.into_iter(), key, eval, min) + min_max_iter(args.items.into_iter(), key, eval, min) } } @@ -105,10 +107,10 @@ pub(crate) fn register_min_max(globals: &mut GlobalsBuilder) { /// ``` #[starlark(speculative_exec_safe)] fn max<'v>( - #[starlark(args)] args: Vec>, + #[starlark(args)] args: UnpackTuple>, key: Option>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { min_max(args, key, eval, false) } @@ -130,10 +132,10 @@ pub(crate) fn register_min_max(globals: &mut GlobalsBuilder) { /// ``` #[starlark(speculative_exec_safe)] fn min<'v>( - #[starlark(args)] args: Vec>, + #[starlark(args)] args: UnpackTuple>, key: Option>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { min_max(args, key, eval, true) } } diff --git a/starlark-rust/starlark/src/stdlib/funcs/mod.rs b/starlark-rust/starlark/src/stdlib/funcs/mod.rs deleted file mode 100644 index fc3a0d4370ff8..0000000000000 --- a/starlark-rust/starlark/src/stdlib/funcs/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -pub(crate) mod dict; -pub(crate) mod globals; -pub(crate) mod list; -pub(crate) mod min_max; -pub(crate) mod other; -pub(crate) mod zip; diff --git a/starlark-rust/starlark/src/stdlib/funcs/other.rs b/starlark-rust/starlark/src/stdlib/funcs/other.rs index 6ac2376d586c5..7ff59f792f7e0 100644 --- a/starlark-rust/starlark/src/stdlib/funcs/other.rs +++ b/starlark-rust/starlark/src/stdlib/funcs/other.rs @@ -18,31 +18,15 @@ //! A module with the standard function and constants that are by default in all //! dialect of Starlark -use std::char; use std::cmp::Ordering; -use std::num::NonZeroI32; -use either::Either; use starlark_derive::starlark_module; use crate as starlark; use crate::environment::GlobalsBuilder; use crate::eval::Evaluator; -use crate::values::bool::StarlarkBool; -use crate::values::float::StarlarkFloat; -use crate::values::function::SpecialBuiltinFunction; -use crate::values::int::PointerI32; use crate::values::list::AllocList; -use crate::values::none::NoneType; -use crate::values::num::value::NumRef; -use crate::values::range::Range; -use crate::values::string::repr::string_repr; -use crate::values::string::StarlarkStr; -use crate::values::tuple::value::FrozenTuple; -use crate::values::tuple::AllocTuple; -use crate::values::tuple::TupleRef; -use crate::values::types::int_or_big::StarlarkInt; -use crate::values::types::int_or_big::StarlarkIntRef; +use crate::values::tuple::UnpackTuple; use crate::values::typing::never::StarlarkNever; use crate::values::typing::ty::AbstractType; use crate::values::typing::StarlarkIter; @@ -50,24 +34,12 @@ use crate::values::value_of_unchecked::ValueOfUnchecked; use crate::values::AllocValue; use crate::values::FrozenStringValue; use crate::values::Heap; -use crate::values::StringValue; use crate::values::Value; use crate::values::ValueError; use crate::values::ValueLike; -use crate::values::ValueOf; #[starlark_module] pub(crate) fn register_other(builder: &mut GlobalsBuilder) { - /// The `None` value, used to represent nothing. - /// Implicitly returned from functions that don't have an explicit return. - const None: NoneType = NoneType; - - /// A boolean representing true. - const True: bool = true; - - /// A boolean representing false. - const False: bool = false; - /// fail: fail the execution /// /// ``` @@ -78,30 +50,18 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { /// fail("oops", 1, False) # fail: oops 1 False /// # "#, "oops 1 False"); /// ``` - fn fail(#[starlark(args)] args: Vec) -> anyhow::Result { + fn fail(#[starlark(args)] args: UnpackTuple) -> starlark::Result { let mut s = String::new(); - for x in args { + for x in args.items { s.push(' '); match x.unpack_str() { Some(x) => s.push_str(x), None => x.collect_repr(&mut s), } } - Err(anyhow::anyhow!("fail:{}", s)) - } - - /// Take the absolute value of an int. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// abs(0) == 0 - /// abs(-10) == 10 - /// abs(10) == 10 - /// # "#); - /// ``` - fn abs(#[starlark(require = pos)] x: StarlarkIntRef) -> anyhow::Result { - // TODO(nga): does not handle float. - Ok(x.abs()) + Err(starlark::Error::new_kind(starlark::ErrorKind::Fail( + anyhow::Error::msg(s), + ))) } /// [any]( @@ -122,7 +82,7 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { fn any<'v>( #[starlark(require = pos)] x: ValueOfUnchecked<'v, StarlarkIter>>, heap: &'v Heap, - ) -> anyhow::Result { + ) -> starlark::Result { for i in x.get().iterate(heap)? { if i.to_bool() { return Ok(true); @@ -152,7 +112,7 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { fn all<'v>( #[starlark(require = pos)] x: ValueOfUnchecked<'v, StarlarkIter>>, heap: &'v Heap, - ) -> anyhow::Result { + ) -> starlark::Result { for i in x.get().iterate(heap)? { if !i.to_bool() { return Ok(false); @@ -161,65 +121,6 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { Ok(true) } - /// [bool]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#bool - /// ): returns the truth value of any starlark value. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// bool() == False - /// bool([]) == False - /// bool([1]) == True - /// bool(True) == True - /// bool(False) == False - /// bool(None) == False - /// bool(bool) == True - /// bool(1) == True - /// bool(0) == False - /// bool({}) == False - /// bool({1:2}) == True - /// bool(()) == False - /// bool((1,)) == True - /// bool("") == False - /// bool("1") == True - /// # "#); - /// ``` - #[starlark(as_type = StarlarkBool, speculative_exec_safe)] - fn bool(#[starlark(require = pos)] x: Option) -> anyhow::Result { - match x { - None => Ok(false), - Some(x) => Ok(x.to_bool()), - } - } - - /// [chr]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#bool - /// ): returns a string encoding a codepoint. - /// - /// `chr(i)` returns a returns a string that encodes the single Unicode code - /// point whose value is specified by the integer `i`. `chr` fails - /// unless `0 ≤ i ≤ 0x10FFFF`. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// chr(65) == 'A' - /// chr(1049) == 'Й' - /// chr(0x1F63F) == '😿' - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn chr(#[starlark(require = pos)] i: i32) -> anyhow::Result { - let cp = u32::try_from(i) - .map_err(|_| anyhow::anyhow!("chr() parameter value negative integer {i}"))?; - match char::from_u32(cp) { - Some(x) => Ok(x), - None => Err(anyhow::anyhow!( - "chr() parameter value is 0x{:x} which is not a valid UTF-8 codepoint", - cp - )), - } - } - /// [dir]( /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dir /// ): list attributes of a value. @@ -260,7 +161,7 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { #[starlark(require = pos)] it: ValueOfUnchecked<'v, StarlarkIter>>, #[starlark(default = 0)] start: i32, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> starlark::Result> { let v = it .get() .iterate(heap)? @@ -269,67 +170,6 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { Ok(AllocList(v)) } - /// [float]( - /// https://github.com/google/skylark/blob/a5f7082aabed29c0e429c722292c66ec8ecf9591/doc/spec.md#float - /// ): interprets its argument as a floating-point number. - /// - /// If x is a `float`, the result is x. - /// if x is an `int`, the result is the nearest floating point value to x. - /// If x is a string, the string is interpreted as a floating-point literal. - /// With no arguments, `float()` returns `0.0`. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// float() == 0.0 - /// float(1) == 1.0 - /// float('1') == 1.0 - /// float('1.0') == 1.0 - /// float('.25') == 0.25 - /// float('1e2') == 100.0 - /// float(False) == 0.0 - /// float(True) == 1.0 - /// # "#); - /// # starlark::assert::fail(r#" - /// float("hello") # error: not a valid number - /// # "#, "not a valid number"); - /// # starlark::assert::fail(r#" - /// float([]) # error - /// # "#, "doesn't match, expected"); - /// ``` - #[starlark(as_type = StarlarkFloat, speculative_exec_safe)] - fn float( - #[starlark(require = pos)] a: Option, &str>>, - ) -> anyhow::Result { - if a.is_none() { - return Ok(0.0); - } - let a = a.unwrap(); - match a { - Either::Left(Either::Left(f)) => Ok(f.as_float()), - Either::Left(Either::Right(b)) => Ok(if b { 1.0 } else { 0.0 }), - Either::Right(s) => { - match s.parse::() { - Ok(f) => { - if f.is_infinite() && !s.to_lowercase().contains("inf") { - // if a resulting float is infinite but the parsed string is not explicitly infinity then we should fail with an error - Err(anyhow::anyhow!( - "float() floating-point number too large: {}", - s - )) - } else { - Ok(f) - } - } - Err(x) => { - let mut repr = String::new(); - string_repr(s, &mut repr); - Err(anyhow::anyhow!("{} is not a valid number: {}", repr, x,)) - } - } - } - } - } - /// [getattr]( /// https://github.com/bazelbuild/starlark/blob/master/spec.md#getattr /// ): returns the value of an attribute @@ -339,6 +179,9 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { /// /// `getattr(x, "f")` is equivalent to `x.f`. /// + /// `getattr(x, "f", d)` is equivalent to `x.f if hasattr(x, "f") else d` + /// and will never raise an error. + /// /// ``` /// # starlark::assert::all_true(r#" /// getattr("banana", "split")("a") == ["b", "n", "n", ""] # equivalent to "banana".split("a") @@ -350,7 +193,7 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { #[starlark(require = pos)] attr: &str, #[starlark(require = pos)] default: Option>, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> starlark::Result> { // TODO(nga): this doesn't cache string hash, so it is suboptimal. match a.get_attr(attr, heap)? { Some(v) => Ok(v), @@ -380,8 +223,8 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { /// https://github.com/bazelbuild/starlark/blob/master/spec.md#hash /// ): returns the hash number of a value. /// - /// `hash(x)`` returns an integer hash value for x such that `x == y` - /// implies `hash(x) == hash(y)``. + /// `hash(x)` returns an integer hash value for x such that `x == y` + /// implies `hash(x) == hash(y)`. /// /// `hash` fails if x, or any value upon which its hash depends, is /// unhashable. @@ -418,143 +261,6 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { })) } - /// [int]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#int - /// ): convert a value to integer. - /// - /// `int(x[, base])` interprets its argument as an integer. - /// - /// If x is an `int`, the result is x. - /// If x is a `float`, the result is the integer value nearest to x, - /// truncating towards zero; it is an error if x is not finite (`NaN`, - /// `+Inf`, `-Inf`). - /// If x is a `bool`, the result is 0 for `False` or 1 for `True`. - /// - /// If x is a string, it is interpreted like a string literal; - /// an optional base prefix (`0`, `0b`, `0B`, `0x`, `0X`) determines which - /// base to use. The string may specify an arbitrarily large integer, - /// whereas true integer literals are restricted to 64 bits. - /// If a non-zero `base` argument is provided, the string is interpreted - /// in that base and no base prefix is permitted; the base argument may - /// specified by name. - /// - /// `int()` with no arguments returns 0. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// int() == 0 - /// int(1) == 1 - /// int(False) == 0 - /// int(True) == 1 - /// int('1') == 1 - /// int('16') == 16 - /// int('16', 10) == 16 - /// int('16', 8) == 14 - /// int('16', 16) == 22 - /// int(0.0) == 0 - /// int(3.14) == 3 - /// int(-12345.6789) == -12345 - /// int(2e9) == 2000000000 - /// # "#); - /// # starlark::assert::fail(r#" - /// int("hello") # error: Cannot parse - /// # "#, "Cannot parse"); - /// # starlark::assert::fail(r#" - /// int(float("nan")) # error: cannot be represented as exact integer - /// # "#, "cannot be represented as exact integer"); - /// # starlark::assert::fail(r#" - /// int(float("inf")) # error: cannot be represented as exact integer - /// # "#, "cannot be represented as exact integer"); - /// ``` - #[starlark(as_type = PointerI32, speculative_exec_safe)] - fn int<'v>( - #[starlark(require = pos)] a: Option< - ValueOf<'v, Either, bool>, &'v str>>, - >, - base: Option, - heap: &'v Heap, - ) -> anyhow::Result> { - let Some(a) = a else { - return Ok(ValueOfUnchecked::new(heap.alloc(0))); - }; - let num_or_bool = match a.typed { - Either::Left(num_or_bool) => num_or_bool, - Either::Right(s) => { - let base = base.unwrap_or(0); - if base == 1 || base < 0 || base > 36 { - return Err(anyhow::anyhow!( - "{} is not a valid base, int() base must be >= 2 and <= 36", - base - )); - } - let (negate, s) = { - match s.chars().next() { - Some('+') => (false, s.get(1..).unwrap()), - Some('-') => (true, s.get(1..).unwrap()), - _ => (false, s), - } - }; - let base = if base == 0 { - match s.get(0..2) { - Some("0b") | Some("0B") => 2, - Some("0o") | Some("0O") => 8, - Some("0x") | Some("0X") => 16, - _ => 10, - } - } else { - base as u32 - }; - let s = match base { - 16 => { - if s.starts_with("0x") || s.starts_with("0X") { - s.get(2..).unwrap() - } else { - s - } - } - 8 => { - if s.starts_with("0o") || s.starts_with("0O") { - s.get(2..).unwrap() - } else { - s - } - } - 2 => { - if s.starts_with("0b") || s.starts_with("0B") { - s.get(2..).unwrap() - } else { - s - } - } - _ => s, - }; - // We already handled the sign above, so we are not trying to parse another sign. - if s.starts_with('-') || s.starts_with('+') { - return Err(anyhow::anyhow!("Cannot parse `{}` as an integer", s,)); - } - - let x = StarlarkInt::from_str_radix(s, base)?; - let x = if negate { -x } else { x }; - return Ok(ValueOfUnchecked::new(heap.alloc(x))); - } - }; - - if let Some(base) = base { - return Err(anyhow::anyhow!( - "int() cannot convert non-string with explicit base '{}'", - base - )); - } - - match num_or_bool { - Either::Left(NumRef::Int(_)) => Ok(ValueOfUnchecked::new(a.value)), - Either::Left(NumRef::Float(f)) => Ok(ValueOfUnchecked::new( - heap.alloc(StarlarkInt::from_f64_exact(f.trunc())?), - )), - Either::Right(b) => Ok(ValueOfUnchecked::new(heap.alloc(b as i32))), - } - } - /// [len]( /// https://github.com/bazelbuild/starlark/blob/master/spec.md#len /// ): get the length of a sequence @@ -577,125 +283,10 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { /// # "#, "not supported"); /// ``` #[starlark(speculative_exec_safe)] - fn len(#[starlark(require = pos)] a: Value) -> anyhow::Result { + fn len(#[starlark(require = pos)] a: Value) -> starlark::Result { a.length() } - /// [ord]( - /// https://github.com/google/skylark/blob/a0e5de7e63b47e716cca7226662a4c95d47bf873/doc/spec.mdord - /// ): returns the codepoint of a character - /// - /// `ord(s)` returns the integer value of the sole Unicode code point - /// encoded by the string `s`. - /// - /// If `s` does not encode exactly one Unicode code point, `ord` fails. - /// Each invalid code within the string is treated as if it encodes the - /// Unicode replacement character, U+FFFD. - /// - /// Example: - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// ord("A") == 65 - /// ord("Й") == 1049 - /// ord("😿") == 0x1F63F - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn ord<'v>(#[starlark(require = pos)] a: StringValue<'v>) -> anyhow::Result { - let mut chars = a.as_str().chars(); - if let Some(c) = chars.next() { - if chars.next().is_none() { - return Ok(u32::from(c) as i32); - } - } - Err(anyhow::anyhow!( - "ord(): {} is not a single character string", - a.to_value().to_repr() - )) - } - - /// [range]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#range - /// ): return a range of integers - /// - /// `range` returns a tuple of integers defined by the specified interval - /// and stride. - /// - /// ```python - /// range(stop) # equivalent to range(0, stop) - /// range(start, stop) # equivalent to range(start, stop, 1) - /// range(start, stop, step) - /// ``` - /// - /// `range` requires between one and three integer arguments. - /// With one argument, `range(stop)` returns the ascending sequence of - /// non-negative integers less than `stop`. - /// With two arguments, `range(start, stop)` returns only integers not less - /// than `start`. - /// - /// With three arguments, `range(start, stop, step)` returns integers - /// formed by successively adding `step` to `start` until the value meets or - /// passes `stop`. A call to `range` fails if the value of `step` is - /// zero. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// list(range(10)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - /// list(range(3, 10)) == [3, 4, 5, 6, 7, 8, 9] - /// list(range(3, 10, 2)) == [3, 5, 7, 9] - /// list(range(10, 3, -2)) == [10, 8, 6, 4] - /// # "#); - /// ``` - #[starlark(as_type = Range, speculative_exec_safe)] - fn range( - #[starlark(require = pos)] a1: i32, - #[starlark(require = pos)] a2: Option, - #[starlark(require = pos, default = 1)] step: i32, - ) -> anyhow::Result { - let start = match a2 { - None => 0, - Some(_) => a1, - }; - let stop = a2.unwrap_or(a1); - let step = match NonZeroI32::new(step) { - Some(step) => step, - None => { - return Err(anyhow::anyhow!( - "Third argument of range (step) cannot be zero" - )); - } - }; - Ok(Range::new(start, stop, step)) - } - - /// [repr]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#repr - /// ): formats its argument as a string. - /// - /// All strings in the result are double-quoted. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// repr(1) == '1' - /// repr("x") == "\"x\"" - /// repr([1, "x"]) == "[1, \"x\"]" - /// repr("test \"'") == "\"test \\\"'\"" - /// repr("x\"y😿 \\'") == "\"x\\\"y\\U0001f63f \\\\'\"" - /// "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn repr<'v>( - #[starlark(require = pos)] a: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let mut s = eval.string_pool.alloc(); - a.collect_repr(&mut s); - let r = eval.heap().alloc_str(&s); - eval.string_pool.release(s); - Ok(r) - } - /// [reversed]( /// https://github.com/bazelbuild/starlark/blob/master/spec.md#reversed /// ): reverse a sequence @@ -715,7 +306,7 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { fn reversed<'v>( #[starlark(require = pos)] a: ValueOfUnchecked<'v, StarlarkIter>>, heap: &'v Heap, - ) -> anyhow::Result>> { + ) -> starlark::Result>> { let mut v: Vec = a.get().iterate(heap)?.collect(); v.reverse(); Ok(v) @@ -746,11 +337,11 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { // This function is not spec-safe, because it may call `key` function // which might be not spec-safe. fn sorted<'v>( - #[starlark(require = pos)] x: ValueOfUnchecked<'v, ValueOfUnchecked>>, + #[starlark(require = pos)] x: ValueOfUnchecked<'v, StarlarkIter>>, #[starlark(require = named)] key: Option>, #[starlark(require = named, default = false)] reverse: bool, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result>>> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result>>> { let it = x.get().iterate(eval.heap())?; let mut it = match key { None => it.map(|x| (x, x)).collect(), @@ -785,71 +376,6 @@ pub(crate) fn register_other(builder: &mut GlobalsBuilder) { Ok(AllocList(it.into_iter().map(|x| x.0))) } - /// [str]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#str - /// ): formats its argument as a string. - /// - /// If x is a string, the result is x (without quotation). - /// All other strings, such as elements of a list of strings, are - /// double-quoted. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// str(1) == '1' - /// str("x") == 'x' - /// str([1, "x"]) == "[1, \"x\"]" - /// # "#); - /// ``` - #[starlark(as_type = StarlarkStr, speculative_exec_safe)] - fn str<'v>( - #[starlark(require = pos)] a: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - if let Some(a) = StringValue::new(a) { - // Special case that can avoid reallocating, but is equivalent. - Ok(a) - } else { - let mut s = eval.string_pool.alloc(); - a.collect_repr(&mut s); - let r = eval.heap().alloc_str(&s); - eval.string_pool.release(s); - Ok(r) - } - } - - /// [tuple]( - /// https://github.com/bazelbuild/starlark/blob/master/spec.md#tuple - /// ): returns a tuple containing the elements of the iterable x. - /// - /// With no arguments, `tuple()` returns the empty tuple. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// tuple() == () - /// tuple([1,2,3]) == (1, 2, 3) - /// # "#); - /// ``` - #[starlark( - as_type = FrozenTuple, - speculative_exec_safe, - special_builtin_function = SpecialBuiltinFunction::Tuple, - )] - fn tuple<'v>( - #[starlark(require = pos)] a: Option>>>, - heap: &'v Heap, - ) -> anyhow::Result>> { - if let Some(a) = a { - if TupleRef::from_value(a.get()).is_some() { - return Ok(ValueOfUnchecked::new(a.get())); - } - - let it = a.get().iterate(heap)?; - Ok(ValueOfUnchecked::new(heap.alloc_tuple_iter(it))) - } else { - Ok(ValueOfUnchecked::new(heap.alloc(AllocTuple::EMPTY))) - } - } - /// [type]( /// https://github.com/bazelbuild/starlark/blob/master/spec.md#type /// ): returns a string describing the type of its operand. @@ -882,6 +408,9 @@ mod tests { assert::eq("2147483648", "abs(-2147483648)"); assert::eq("2147483648000", "abs(2147483648000)"); assert::eq("2147483648000", "abs(-2147483648000)"); + assert::eq("1.23", "abs(-1.23)"); + assert::eq("2.3", "abs(2.3)"); + assert::is_true("isinstance(abs(1), int)"); } #[test] diff --git a/starlark-rust/starlark/src/stdlib/funcs/zip.rs b/starlark-rust/starlark/src/stdlib/funcs/zip.rs index 5afedd70a4c7b..b4aa1ac3ddc3c 100644 --- a/starlark-rust/starlark/src/stdlib/funcs/zip.rs +++ b/starlark-rust/starlark/src/stdlib/funcs/zip.rs @@ -20,52 +20,42 @@ use starlark_derive::starlark_module; use crate as starlark; use crate::codemap::Span; -use crate::codemap::Spanned; use crate::environment::GlobalsBuilder; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; use crate::typing::error::TypingOrInternalError; use crate::typing::function::TyCustomFunctionImpl; -use crate::typing::Arg; +use crate::typing::ParamSpec; use crate::typing::Ty; use crate::typing::TypingOracleCtx; +use crate::values::tuple::UnpackTuple; +use crate::values::typing::StarlarkIter; +use crate::values::FrozenValue; use crate::values::Heap; use crate::values::Value; +use crate::values::ValueOfUnchecked; #[derive(Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd, Allocative)] struct ZipType; impl TyCustomFunctionImpl for ZipType { + fn as_callable(&self) -> TyCallable { + // TODO(nga): this should be obtained from function signature from function definition. + TyCallable::new(ParamSpec::args(Ty::iter(Ty::any())), Ty::list(Ty::any())) + } + fn validate_call( &self, _span: Span, - args: &[Spanned], + args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result { let mut iter_item_types: Vec = Vec::new(); - let mut seen_star_args = false; - for arg in args { - match &arg.node { - Arg::Pos(pos) => { - let item_ty = oracle.iter_item(Spanned { - span: arg.span, - node: pos, - })?; - iter_item_types.push(item_ty); - } - Arg::Name(_, _) => { - return Err( - oracle.msg_error(arg.span, "zip() does not accept keyword arguments") - ); - } - Arg::Args(_) => { - seen_star_args = true; - } - Arg::Kwargs(_) => { - // `zip()` does not accept keyword args, - // but if `**kwargs` is empty, the call is valid. - } - } + for pos in &args.pos { + let item_ty = oracle.iter_item(pos.as_ref())?; + iter_item_types.push(item_ty); } - if seen_star_args { + if args.args.is_some() { Ok(Ty::list(Ty::any())) } else { Ok(Ty::list(Ty::tuple(iter_item_types))) @@ -95,14 +85,14 @@ pub(crate) fn register_zip(globals: &mut GlobalsBuilder) { /// ``` #[starlark(speculative_exec_safe, ty_custom_function = ZipType)] fn zip<'v>( - #[starlark(args)] args: Vec>, + #[starlark(args)] args: UnpackTuple>>, heap: &'v Heap, - ) -> anyhow::Result>> { + ) -> starlark::Result>> { let mut v = Vec::new(); let mut first = true; - for arg in args { + for arg in args.items { let mut idx = 0; - for e in arg.iterate(heap)? { + for e in arg.get().iterate(heap)? { if first { v.push(heap.alloc((e,))); idx += 1; diff --git a/starlark-rust/starlark/src/stdlib/internal.rs b/starlark-rust/starlark/src/stdlib/internal.rs index 1faf479e055f1..940a8c47a32d2 100644 --- a/starlark-rust/starlark/src/stdlib/internal.rs +++ b/starlark-rust/starlark/src/stdlib/internal.rs @@ -34,7 +34,7 @@ fn starlark_rust_internal_members(globals: &mut GlobalsBuilder) { } pub(crate) fn register_internal(globals: &mut GlobalsBuilder) { - globals.struct_("starlark_rust_internal", |s| { + globals.namespace("starlark_rust_internal", |s| { starlark_rust_internal_members(s) }); } diff --git a/starlark-rust/starlark/src/stdlib/json.rs b/starlark-rust/starlark/src/stdlib/json.rs index c844f25608561..a54a981e59fa5 100644 --- a/starlark-rust/starlark/src/stdlib/json.rs +++ b/starlark-rust/starlark/src/stdlib/json.rs @@ -30,7 +30,7 @@ use crate::environment::GlobalsBuilder; use crate::typing::Ty; use crate::values::dict::AllocDict; use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkInt; use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::FrozenHeap; @@ -39,12 +39,16 @@ use crate::values::Heap; use crate::values::Value; impl StarlarkTypeRepr for serde_json::Number { + type Canonical = Either; + fn starlark_type_repr() -> Ty { Either::::starlark_type_repr() } } impl<'a> StarlarkTypeRepr for &'a serde_json::Number { + type Canonical = serde_json::Number; + fn starlark_type_repr() -> Ty { serde_json::Number::starlark_type_repr() } @@ -68,6 +72,8 @@ impl<'v, 'a> AllocValue<'v> for &'a serde_json::Number { impl<'v> AllocValue<'v> for serde_json::Number { fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + // If you follow this hint, it becomes infinite recursion + #[allow(clippy::needless_borrows_for_generic_args)] heap.alloc(&self) } } @@ -90,17 +96,23 @@ impl<'a> AllocFrozenValue for &'a serde_json::Number { impl AllocFrozenValue for serde_json::Number { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + // If you follow this hint, it becomes infinite recursion + #[allow(clippy::needless_borrows_for_generic_args)] heap.alloc(&self) } } impl<'a, K: StarlarkTypeRepr, V: StarlarkTypeRepr> StarlarkTypeRepr for &'a serde_json::Map { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { AllocDict::>::starlark_type_repr() } } impl StarlarkTypeRepr for serde_json::Map { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { AllocDict::>::starlark_type_repr() } @@ -116,6 +128,8 @@ impl<'a, 'v> AllocValue<'v> for &'a serde_json::Map { impl<'v> AllocValue<'v> for serde_json::Map { fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + // If you follow this hint, it becomes infinite recursion + #[allow(clippy::needless_borrows_for_generic_args)] heap.alloc(&self) } } @@ -128,11 +142,15 @@ impl<'a> AllocFrozenValue for &'a serde_json::Map { impl AllocFrozenValue for serde_json::Map { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + // If you follow this hint, it becomes infinite recursion + #[allow(clippy::needless_borrows_for_generic_args)] heap.alloc(&self) } } impl<'a> StarlarkTypeRepr for &'a serde_json::Value { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { // Any. Value::starlark_type_repr() @@ -140,6 +158,8 @@ impl<'a> StarlarkTypeRepr for &'a serde_json::Value { } impl StarlarkTypeRepr for serde_json::Value { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { // Any. Value::starlark_type_repr() @@ -161,6 +181,8 @@ impl<'v, 'a> AllocValue<'v> for &'a serde_json::Value { impl<'v> AllocValue<'v> for serde_json::Value { fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + // If you follow this hint, it becomes infinite recursion + #[allow(clippy::needless_borrows_for_generic_args)] heap.alloc(&self) } } @@ -180,6 +202,8 @@ impl<'a> AllocFrozenValue for &'a serde_json::Value { impl AllocFrozenValue for serde_json::Value { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + // If you follow this hint, it becomes infinite recursion + #[allow(clippy::needless_borrows_for_generic_args)] heap.alloc(&self) } } @@ -202,7 +226,7 @@ pub(crate) fn json(globals: &mut GlobalsBuilder) { // Copying Bazel's json module: https://bazel.build/rules/lib/json // or starlark-go json module: // https://github.com/google/starlark-go/blob/d1966c6b9fcd6631f48f5155f47afcd7adcc78c2/lib/json/json.go#L28 - globals.struct_("json", json_members); + globals.namespace("json", json_members); } #[cfg(test)] diff --git a/starlark-rust/starlark/src/stdlib/list.rs b/starlark-rust/starlark/src/stdlib/list.rs deleted file mode 100644 index 6a0aa367c03a6..0000000000000 --- a/starlark-rust/starlark/src/stdlib/list.rs +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Methods for the `list` type. - -use starlark_derive::starlark_module; -use starlark_syntax::convert_indices::convert_index; -use starlark_syntax::convert_indices::convert_indices; - -use crate as starlark; -use crate::environment::MethodsBuilder; -use crate::values::list::ListRef; -use crate::values::none::NoneOr; -use crate::values::none::NoneType; -use crate::values::types::list::value::ListData; -use crate::values::typing::StarlarkIter; -use crate::values::Heap; -use crate::values::Value; -use crate::values::ValueError; -use crate::values::ValueOfUnchecked; - -#[starlark_module] -pub(crate) fn list_methods(builder: &mut MethodsBuilder) { - /// [list.append]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#list·append - /// ): append an element to a list. - /// - /// `L.append(x)` appends `x` to the list L, and returns `None`. - /// - /// `append` fails if the list is frozen or has active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = [] - /// x.append(1) - /// x.append(2) - /// x.append(3) - /// x == [1, 2, 3] - /// # "#); - /// ``` - fn append<'v>( - this: Value<'v>, - #[starlark(require = pos)] el: Value<'v>, - heap: &'v Heap, - ) -> anyhow::Result { - let this = ListData::from_value_mut(this)?; - this.push(el, heap); - Ok(NoneType) - } - - /// [list.clear]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#list·clear - /// ): clear a list - /// - /// `L.clear()` removes all the elements of the list L and returns `None`. - /// It fails if the list is frozen or if there are active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = [1, 2, 3] - /// x.clear() - /// x == [] - /// # "#); - /// ``` - fn clear(this: Value) -> anyhow::Result { - let this = ListData::from_value_mut(this)?; - this.clear(); - Ok(NoneType) - } - - /// [list.extend]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#list·extend - /// ): extend a list with another iterable's content. - /// - /// `L.extend(x)` appends the elements of `x`, which must be iterable, to - /// the list L, and returns `None`. - /// - /// `extend` fails if `x` is not iterable, or if the list L is frozen or has - /// active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = [] - /// x.extend([1, 2, 3]) - /// x.extend(["foo"]) - /// x == [1, 2, 3, "foo"] - /// # "#); - /// ``` - fn extend<'v>( - this: Value<'v>, - #[starlark(require = pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, - heap: &'v Heap, - ) -> anyhow::Result { - let res = ListData::from_value_mut(this)?; - if this.ptr_eq(other.get()) { - // If the types alias, we can't borrow the `other` for iteration. - // But we can do something smarter to double the elements - res.double(heap); - } else { - let it = other.get().iterate(heap)?; - res.extend(it, heap); - } - Ok(NoneType) - } - - /// [list.index]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#list·index - /// ): get the index of an element in the list. - /// - /// `L.index(x[, start[, end]])` finds `x` within the list L and returns its - /// index. - /// - /// The optional `start` and `end` parameters restrict the portion of - /// list L that is inspected. If provided and not `None`, they must be list - /// indices of type `int`. If an index is negative, `len(L)` is effectively - /// added to it, then if the index is outside the range `[0:len(L)]`, the - /// nearest value within that range is used; see [Indexing](#indexing). - /// - /// `index` fails if `x` is not found in L, or if `start` or `end` - /// is not a valid index (`int` or `None`). - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = ["b", "a", "n", "a", "n", "a"] - /// # ( - /// x.index("a") == 1 # bAnana - /// # and - /// x.index("a", 2) == 3 # banAna - /// # and - /// x.index("a", -2) == 5 # bananA - /// # )"#); - /// ``` - #[starlark(speculative_exec_safe)] - fn index<'v>( - this: &ListRef<'v>, - #[starlark(require = pos)] needle: Value<'v>, - #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, - #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, - ) -> anyhow::Result { - let (start, end) = - convert_indices(this.len() as i32, start.into_option(), end.into_option()); - if let Some(haystack) = this.get(start..end) { - for (i, x) in haystack.iter().enumerate() { - if x.equals(needle)? { - return Ok((i + start) as i32); - } - } - } - Err(anyhow::anyhow!( - "Element '{}' not found in '{}'", - needle, - this - )) - } - - /// [list.insert]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#list·insert - /// ): insert an element in a list. - /// - /// `L.insert(i, x)` inserts the value `x` in the list L at index `i`, - /// moving higher-numbered elements along by one. It returns `None`. - /// - /// As usual, the index `i` must be an `int`. If its value is negative, - /// the length of the list is added, then its value is clamped to the - /// nearest value in the range `[0:len(L)]` to yield the effective index. - /// - /// `insert` fails if the list is frozen or has active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = ["b", "c", "e"] - /// x.insert(0, "a") - /// x.insert(-1, "d") - /// x == ["a", "b", "c", "d", "e"] - /// # "#); - /// ``` - fn insert<'v>( - this: Value<'v>, - #[starlark(require = pos)] index: i32, - #[starlark(require = pos)] el: Value<'v>, - heap: &'v Heap, - ) -> anyhow::Result { - let this = ListData::from_value_mut(this)?; - let index = convert_index(this.len() as i32, index); - this.insert(index, el, heap); - Ok(NoneType) - } - - /// [list.pop]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#list·pop - /// ): removes and returns the last element of a list. - /// - /// `L.pop([index])` removes and returns the last element of the list L, or, - /// if the optional index is provided, at that index. - /// - /// `pop` fails if the index is negative or not less than the length of - /// the list, of if the list is frozen or has active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = [1, 2, 3] - /// # ( - /// x.pop() == 3 - /// # and - /// x.pop() == 2 - /// # and - /// x == [1] - /// # )"#); - /// ``` - fn pop<'v>( - this: Value<'v>, - #[starlark(require = pos)] index: Option, - ) -> anyhow::Result> { - let this = ListData::from_value_mut(this)?; - let index = index.unwrap_or_else(|| (this.len() as i32) - 1); - if index < 0 || index >= this.len() as i32 { - return Err(ValueError::IndexOutOfBound(index).into()); - } - Ok(this.remove(index as usize)) - } - - /// [list.remove]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#list·remove - /// ): remove a value from a list - /// - /// `L.remove(x)` removes the first occurrence of the value `x` from the - /// list L, and returns `None`. - /// - /// `remove` fails if the list does not contain `x`, is frozen, or has - /// active iterators. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// x = [1, 2, 3, 2] - /// x.remove(2) - /// # t = ( - /// x == [1, 3, 2] - /// # ) - /// x.remove(2) - /// # (t and ( - /// x == [1, 3] - /// # ))"#); - /// ``` - /// - /// A subsequent call to `x.remove(2)` would yield an error because the - /// element won't be found. - /// - /// ``` - /// # starlark::assert::fail(r#" - /// x = [1, 2, 3, 2] - /// x.remove(2) - /// x.remove(2) - /// x.remove(2) # error: not found - /// # "#, "not found"); - /// ``` - fn remove<'v>( - this: Value<'v>, - #[starlark(require = pos)] needle: Value<'v>, - ) -> anyhow::Result { - // Written in two separate blocks so we ensure we give up the - // immutable borrow before making the mutable borrow. - let position = { - let this = ListRef::from_value(this).unwrap(); - let position = this.iter().position(|v| v == needle); - match position { - Some(i) => i, - None => { - return Err(anyhow::anyhow!( - "Element '{}' not found in list '{}'", - needle, - this - )); - } - } - }; - { - // now mutate it with no further value calls - let this = ListData::from_value_mut(this)?; - this.remove(position); - Ok(NoneType) - } - } -} - -#[cfg(test)] -mod tests { - use crate::assert; - - #[test] - fn test_error_codes() { - assert::fail( - "x = [1, 2, 3, 2]; x.remove(2); x.remove(2); x.remove(2)", - "not found in list", - ); - } - - #[test] - fn test_index() { - // Should fail, but should not panic. - assert::fail("[True].index(True, 1, 0)", "not found"); - } - - #[test] - fn recursive_list() { - assert::is_true( - r#" -cyclic = [1, 2, 3] -cyclic[1] = cyclic -len(cyclic) == 3 and len(cyclic[1]) == 3 - "#, - ) - } -} diff --git a/starlark-rust/starlark/src/stdlib/mod.rs b/starlark-rust/starlark/src/stdlib/mod.rs deleted file mode 100644 index c52699bb08fd9..0000000000000 --- a/starlark-rust/starlark/src/stdlib/mod.rs +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! A module with the standard function and constants that are by default in all -//! dialect of Starlark - -use dupe::Dupe; - -use crate::environment::GlobalsBuilder; - -pub(crate) mod breakpoint; -pub(crate) mod dict; -pub(crate) mod extra; -mod funcs; -pub(crate) mod internal; -pub(crate) mod json; -pub(crate) mod list; -pub(crate) mod partial; -pub(crate) mod string; -pub(crate) mod structs; - -pub use extra::PrintHandler; - -use crate::stdlib::funcs::globals::register_globals; -use crate::stdlib::internal::register_internal; -use crate::values::enumeration::globals::register_enum; -use crate::values::record::globals::register_record; -use crate::values::typing; - -/// Return the default global environment, it is not yet frozen so that a caller -/// can refine it. -/// -/// For example `stdlib::standard_environment().freeze().child("test")` create a -/// child environment of this global environment that have been frozen. -pub(crate) fn standard_environment() -> GlobalsBuilder { - GlobalsBuilder::new().with(register_globals) -} - -/// The extra library definitions available in this Starlark implementation, but not in the standard. -#[derive(PartialEq, Eq, Copy, Clone, Dupe)] -pub enum LibraryExtension { - /// Definitions to support the `struct` type, the `struct()` constructor. - StructType, - /// Definitions to support the `record` type, the `record()` constructor and `field()` function. - RecordType, - /// Definitions to support the `enum` type, the `enum()` constructor. - EnumType, - /// A function `map(f, xs)` which applies `f` to each element of `xs` and returns the result. - Map, - /// A function `filter(f, xs)` which applies `f` to each element of `xs` and returns those for which `f` returns `True`. - /// As a special case, `filter(None, xs)` removes all `None` values. - Filter, - /// Partially apply a function, `partial(f, *args, **kwargs)` will create a function where those `args` `kwargs` - /// are already applied to `f`. - Partial, - /// Create a regex from a string. - ExperimentalRegex, - /// Add a function `debug(x)` which shows the Rust [`Debug`](std::fmt::Debug) representation of a value. - /// Useful when debugging, but the output should not be considered stable. - Debug, - /// Add a function `print(x)` which prints to stderr. - Print, - /// Add a function `pprint(x)` which pretty-prints to stderr. - Pprint, - /// Add a function `breakpoint()` which will drop into a console-module evaluation prompt. - Breakpoint, - /// Add a function `json()` which will generate JSON for a module. - Json, - /// Provides `typing.All`, `typing.Callable` etc. - /// Usually used in conjunction with - /// [`Dialect::enable_types`](crate::syntax::Dialect::enable_types). - Typing, - /// Utilities exposing starlark-rust internals. - /// These are not for production use. - Internal, - // Make sure if you add anything new, you add it to `all` below. -} - -impl LibraryExtension { - /// A list of all extensions that will be updated as new methods are added. - pub(crate) fn all() -> &'static [Self] { - use LibraryExtension::*; - &[ - StructType, - RecordType, - EnumType, - Map, - Filter, - Partial, - ExperimentalRegex, - Debug, - Print, - Pprint, - Breakpoint, - Json, - Typing, - Internal, - ] - } - - /// Add a specific extension to a [`GlobalsBuilder`]. - pub fn add(self, builder: &mut GlobalsBuilder) { - use LibraryExtension::*; - match self { - StructType => structs::global(builder), - RecordType => register_record(builder), - EnumType => register_enum(builder), - Map => extra::map(builder), - Filter => extra::filter(builder), - Partial => partial::partial(builder), - ExperimentalRegex => extra::regex(builder), - Debug => extra::debug(builder), - Print => extra::print(builder), - Pprint => extra::pprint(builder), - Breakpoint => breakpoint::global(builder), - Json => json::json(builder), - Typing => typing::globals::register_typing(builder), - Internal => register_internal(builder), - } - } -} - -#[cfg(test)] -mod tests { - use allocative::Allocative; - use derive_more::Display; - use dupe::Dupe; - use starlark_derive::starlark_module; - use starlark_derive::starlark_value; - use starlark_derive::NoSerialize; - - use crate as starlark; - use crate::any::ProvidesStaticType; - use crate::assert::Assert; - use crate::environment::GlobalsBuilder; - use crate::environment::Methods; - use crate::environment::MethodsBuilder; - use crate::environment::MethodsStatic; - use crate::starlark_simple_value; - use crate::values::none::NoneType; - use crate::values::StarlarkValue; - use crate::values::UnpackValue; - use crate::values::Value; - use crate::values::ValueLike; - - #[test] - fn test_no_arg() { - #[starlark_module] - fn global(builder: &mut GlobalsBuilder) { - fn nop() -> anyhow::Result { - Ok(NoneType) - } - } - - let env = GlobalsBuilder::new().with(global).build(); - env.get("nop").unwrap(); - } - - #[test] - fn test_value_attributes() { - #[derive( - Copy, - Clone, - Debug, - Dupe, - PartialEq, - Display, - ProvidesStaticType, - NoSerialize, - Allocative - )] - struct Bool2(bool); - starlark_simple_value!(Bool2); - - #[starlark_value(type = "bool2")] - impl<'v> StarlarkValue<'v> for Bool2 { - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(methods) - } - - fn equals(&self, other: Value<'v>) -> anyhow::Result { - match other.downcast_ref::() { - None => Ok(false), - Some(v) => Ok(*v == *self), - } - } - } - - impl<'v> UnpackValue<'v> for Bool2 { - fn unpack_value(value: Value<'v>) -> Option { - Some(*value.downcast_ref::().unwrap()) - } - } - - #[starlark_module] - fn globals(builder: &mut GlobalsBuilder) { - const True2: Bool2 = Bool2(true); - const False2: Bool2 = Bool2(false); - } - - #[starlark_module] - fn methods(builder: &mut MethodsBuilder) { - #[starlark(attribute)] - fn invert1(this: Bool2) -> anyhow::Result { - Ok(Bool2(!this.0)) - } - - fn invert2(this: Bool2) -> anyhow::Result { - Ok(Bool2(!this.0)) - } - } - - let mut a = Assert::new(); - a.globals_add(globals); - a.all_true( - r#" -True2 == True2 -True2 != False2 -True2.invert1 == False2 -False2.invert1 == True2 -False2.invert2() == True2 -hasattr(True2, "invert1") == True -hasattr(True2, "invert2") == True -hasattr(True2, "invert3") == False -dir(False2) == ["invert1","invert2"] -getattr(False2, "invert1") == True2 -getattr(True2, "invert1") == False2 -getattr(True2, "invert2")() == False2 -"#, - ); - } -} diff --git a/starlark-rust/starlark/src/stdlib/partial.rs b/starlark-rust/starlark/src/stdlib/partial.rs index 6aa84d1439637..e0b6fbe6281e3 100644 --- a/starlark-rust/starlark/src/stdlib/partial.rs +++ b/starlark-rust/starlark/src/stdlib/partial.rs @@ -19,17 +19,19 @@ use std::fmt; use std::fmt::Display; use allocative::Allocative; +use hashbrown::HashTable; use starlark_derive::starlark_module; use starlark_derive::starlark_value; use starlark_derive::NoSerialize; use starlark_syntax::slice_vec_ext::SliceExt; use starlark_syntax::slice_vec_ext::VecExt; +use starlark_syntax::value_error; use crate as starlark; use crate::any::ProvidesStaticType; use crate::coerce::coerce; use crate::coerce::Coerce; -use crate::collections::symbol_map::Symbol; +use crate::collections::symbol::symbol::Symbol; use crate::environment::GlobalsBuilder; use crate::eval::runtime::arguments::ArgNames; use crate::eval::runtime::arguments::ArgumentsFull; @@ -60,7 +62,7 @@ pub fn partial(builder: &mut GlobalsBuilder) { #[starlark(kwargs)] kwargs: DictRef<'v>, ) -> anyhow::Result> { debug_assert!(Tuple::from_value(args).is_some()); - let names = kwargs + let names: Vec<_> = kwargs .keys() .map(|x| { let x = StringValue::new(x).unwrap(); @@ -72,11 +74,16 @@ pub fn partial(builder: &mut GlobalsBuilder) { ) }) .collect(); + let mut names_index = HashTable::with_capacity(names.len()); + for (i, (k, _)) in names.iter().enumerate() { + names_index.insert_unique(k.hash(), i, |i| names[*i].0.hash()); + } Ok(Partial { func, pos: args, named: kwargs.values().collect(), names, + names_index, }) } } @@ -89,6 +96,7 @@ struct PartialGen { pos: V, named: Vec, names: Vec<(Symbol, S)>, + names_index: HashTable, } impl<'v, V: ValueLike<'v>, S> PartialGen { @@ -132,12 +140,13 @@ impl<'v> Freeze for Partial<'v> { names: self .names .into_try_map(|(s, x)| anyhow::Ok((s, x.freeze(freezer)?)))?, + names_index: self.names_index, }) } } #[starlark_value(type = FUNCTION_TYPE)] -impl<'v, V: ValueLike<'v> + 'v, S: StringValueLike<'v> + 'v> StarlarkValue<'v> for PartialGen +impl<'v, V: ValueLike<'v>, S: StringValueLike<'v>> StarlarkValue<'v> for PartialGen where Self: ProvidesStaticType<'v>, { @@ -151,21 +160,34 @@ where &self, _me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { // apply the partial arguments first, then the remaining arguments I was given let self_pos = self.pos_content(); let self_named = coerce(&self.named); let self_names = coerce(&self.names); + for (symbol, _) in args.0.names.names() { + if self + .names_index + .find(symbol.hash(), |i| &self.names[*i].0 == symbol) + .is_some() + { + return Err(value_error!( + "partial() got multiple values for argument `{}`", + symbol.as_str(), + )); + } + } + eval.alloca_concat(self_pos, args.0.pos, |pos, eval| { eval.alloca_concat(self_named, args.0.named, |named, eval| { eval.alloca_concat(self_names, args.0.names.names(), |names, eval| { let params = Arguments(ArgumentsFull { pos, named, - names: ArgNames::new(names), + names: ArgNames::new_unique(names), args: args.0.args, kwargs: args.0.kwargs, }); diff --git a/starlark-rust/starlark/src/stdlib/string.rs b/starlark-rust/starlark/src/stdlib/string.rs deleted file mode 100644 index 9737a7299e176..0000000000000 --- a/starlark-rust/starlark/src/stdlib/string.rs +++ /dev/null @@ -1,1333 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Methods for the `string` type. - -use std::cmp; - -use starlark_derive::starlark_module; -use starlark_syntax::fast_string; -use starlark_syntax::fast_string::convert_str_indices; -use starlark_syntax::fast_string::StrIndices; -use starlark_syntax::slice_vec_ext::SliceExt; - -use crate as starlark; -use crate::environment::MethodsBuilder; -use crate::eval::Arguments; -use crate::eval::Evaluator; -use crate::typing::Ty; -use crate::values::list::ListOf; -use crate::values::none::NoneOr; -use crate::values::string::dot_format; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::string::iter::iterate_chars; -use crate::values::types::string::iter::iterate_codepoints; -use crate::values::types::tuple::value::Tuple; -use crate::values::typing::iter::StarlarkIter; -use crate::values::Heap; -use crate::values::StringValue; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueOfUnchecked; - -// This does not exists in rust, split would cut the string incorrectly and -// split_whitespace cannot take a n parameter. -fn splitn_whitespace(s: &str, maxsplit: usize) -> Vec { - let mut v = Vec::new(); - let mut cur = String::new(); - let mut split = 1; - let mut eat_ws = true; - for c in s.chars() { - if split >= maxsplit && !eat_ws { - cur.push(c) - } else if c.is_whitespace() { - if !cur.is_empty() { - v.push(cur); - cur = String::new(); - split += 1; - eat_ws = true; - } - } else { - eat_ws = false; - cur.push(c) - } - } - if !cur.is_empty() { - v.push(cur) - } - v -} - -fn rsplitn_whitespace(s: &str, maxsplit: usize) -> Vec { - let mut v = Vec::new(); - let mut cur = String::new(); - let mut split = 1; - let mut eat_ws = true; - for c in s.chars().rev() { - if split >= maxsplit && !eat_ws { - cur.push(c) - } else if c.is_whitespace() { - if !cur.is_empty() { - v.push(cur.chars().rev().collect()); - cur = String::new(); - split += 1; - eat_ws = true; - } - } else { - eat_ws = false; - cur.push(c) - } - } - if !cur.is_empty() { - v.push(cur.chars().rev().collect()); - } - v.reverse(); - v -} - -enum StringOrTuple<'v> { - String(&'v str), - Tuple(Vec<&'v str>), -} - -impl<'v> StarlarkTypeRepr for StringOrTuple<'v> { - fn starlark_type_repr() -> Ty { - Ty::union2(String::starlark_type_repr(), Tuple::starlark_type_repr()) - } -} - -impl<'v> UnpackValue<'v> for StringOrTuple<'v> { - fn expected() -> String { - "str or tuple".to_owned() - } - - fn unpack_value(value: Value<'v>) -> Option { - if let Some(s) = value.unpack_str() { - Some(Self::String(s)) - } else { - Some(Self::Tuple( - Tuple::from_value(value)? - .iter() - .map(|x| x.unpack_str()) - .collect::>()?, - )) - } - } -} - -#[starlark_module] -pub(crate) fn string_methods(builder: &mut MethodsBuilder) { - /// [string.elems]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·elems - /// ): returns an iterable of the bytes values of a string. - /// - /// `S.elems()` returns an iterable value containing the - /// sequence of numeric bytes values in the string S. - /// - /// To materialize the entire sequence of bytes, apply `list(...)` to the - /// result. - /// - /// ``` - /// # starlark::assert::is_true(r#" - /// list("Hello, 世界".elems()) == [ - /// "H", "e", "l", "l", "o", ",", " ", "世", "界"] - /// # "#); - /// ``` - fn elems<'v>( - this: StringValue<'v>, - heap: &'v Heap, - ) -> anyhow::Result>> { - Ok(iterate_chars(this, heap)) - } - - /// string.capitalize: returns a copy of string S, where the first character (if any) is converted to uppercase; - /// all other characters are converted to lowercase. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "hello, world!".capitalize() == "Hello, world!" - /// "Hello, World!".capitalize() == "Hello, world!" - /// "".capitalize() == "" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn capitalize(this: &str) -> anyhow::Result { - let mut result = String::with_capacity(this.len()); - for (i, c) in this.chars().enumerate() { - if i == 0 { - result.extend(c.to_uppercase()) - } else { - result.extend(c.to_lowercase()) - } - } - Ok(result) - } - - /// [string.codepoints]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·codepoints - /// ): returns an iterable of the unicode codepoint of a string. - /// - /// `S.codepoints()` returns an iterable value containing the - /// sequence of integer Unicode code points encoded by the string S. - /// Each invalid code within the string is treated as if it encodes the - /// Unicode replacement character, U+FFFD. - /// - /// By returning an iterable, not a list, the cost of decoding the string - /// is deferred until actually needed; apply `list(...)` to the result to - /// materialize the entire sequence. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// list("Hello, 世界".codepoints()) == [72, 101, 108, 108, 111, 44, 32, 19990, 30028] - /// # "#); - /// ``` - fn codepoints<'v>( - this: StringValue<'v>, - heap: &'v Heap, - ) -> anyhow::Result>> { - Ok(iterate_codepoints(this, heap)) - } - - /// [string.count]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·count - /// ): count the number of occurrences of a string in another string. - /// - /// `S.count(sub[, start[, end]])` returns the number of occcurences of - /// `sub` within the string S, or, if the optional substring indices - /// `start` and `end` are provided, within the designated substring of S. - /// They are interpreted according to Skylark's [indexing conventions]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#indexing). - /// - /// This implementation does not count occurrence of `sub` in the string `S` - /// that overlap other occurrence of S (which can happen if some suffix of S - /// is a prefix of S). For instance, `"abababa".count("aba")` returns 2 - /// for `[aba]a[aba]`, not counting the middle occurrence: `ab[aba]ba` - /// (this is following Python behavior). - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "hello, world!".count("o") == 2 - /// "abababa".count("aba") == 2 - /// "hello, world!".count("o", 7, 12) == 1 # in "world" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn count( - this: &str, - #[starlark(require = pos)] needle: &str, - #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, - #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, - ) -> anyhow::Result { - if let Some(StrIndices { haystack, .. }) = - convert_str_indices(this, start.into_option(), end.into_option()) - { - Ok(fast_string::count_matches(haystack, needle) as i32) - } else { - Ok(0) - } - } - - /// [string.endswith]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·endswith - /// ): determine if a string ends with a given suffix. - /// - /// `S.endswith(suffix)` reports whether the string S has the specified - /// suffix. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "filename.sky".endswith(".sky") == True - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn endswith( - this: &str, - #[starlark(require = pos)] suffix: StringOrTuple, - ) -> anyhow::Result { - match suffix { - StringOrTuple::String(x) => Ok(this.ends_with(x)), - StringOrTuple::Tuple(xs) => Ok(xs.iter().any(|x| this.ends_with(x))), - } - } - - /// [string.find]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·find - /// ): find a substring in a string. - /// - /// `S.find(sub[, start[, end]])` returns the index of the first - /// occurrence of the substring `sub` within S. - /// - /// If either or both of `start` or `end` are specified, - /// they specify a subrange of S to which the search should be restricted. - /// They are interpreted according to Skylark's [indexing - /// conventions](#indexing). - /// - /// If no occurrence is found, `found` returns -1. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "bonbon".find("on") == 1 - /// "bonbon".find("on", 2) == 4 - /// "bonbon".find("on", 2, 5) == -1 - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn find( - this: &str, - #[starlark(require = pos)] needle: &str, - #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, - #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, - ) -> anyhow::Result { - if let Some(StrIndices { start, haystack }) = - convert_str_indices(this, start.into_option(), end.into_option()) - { - if let Some(index) = haystack.find(needle) { - let index = fast_string::len(&haystack[..index]); - return Ok((start + index).0 as i32); - } - } - Ok(-1) - } - - /// [string.format]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·format - /// ): format a string. - /// - /// `S.format(*args, **kwargs)` returns a version of the format string S - /// in which bracketed portions `{...}` are replaced - /// by arguments from `args` and `kwargs`. - /// - /// Within the format string, a pair of braces `{{` or `}}` is treated as - /// a literal open or close brace. - /// Each unpaired open brace must be matched by a close brace `}`. - /// The optional text between corresponding open and close braces - /// specifies which argument to use and how to format it, and consists of - /// three components, all optional: - /// a field name, a conversion preceded by '`!`', and a format specifier - /// preceded by '`:`'. - /// - /// ```text - /// {field} - /// {field:spec} - /// {field!conv} - /// {field!conv:spec} - /// ``` - /// - /// The *field name* may be either a decimal number or a keyword. - /// A number is interpreted as the index of a positional argument; - /// a keyword specifies the value of a keyword argument. - /// If all the numeric field names form the sequence 0, 1, 2, and so on, - /// they may be omitted and those values will be implied; however, - /// the explicit and implicit forms may not be mixed. - /// - /// The *conversion* specifies how to convert an argument value `x` to a - /// string. It may be either `!r`, which converts the value using - /// `repr(x)`, or `!s`, which converts the value using `str(x)` and is - /// the default. - /// - /// The *format specifier*, after a colon, specifies field width, - /// alignment, padding, and numeric precision. - /// Currently it must be empty, but it is reserved for future use. - /// - /// ```rust - /// # starlark::assert::all_true(r#" - /// "a {} c".format(3) == "a 3 c" - /// "a{x}b{y}c{}".format(1, x=2, y=3) == "a2b3c1" - /// "a{}b{}c".format(1, 2) == "a1b2c" - /// "({1}, {0})".format("zero", "one") == "(one, zero)" - /// "Is {0!r} {0!s}?".format("heterological") == "Is \"heterological\" heterological?" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn format<'v>( - this: &str, - args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - let iter = args.positions(eval.heap())?; - dot_format::format( - this, - iter, - args.names()?, - &mut eval.string_pool, - eval.module_env.heap(), - ) - } - - /// [string.index]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·index - /// ): search a substring inside a string, failing on not found. - /// - /// `S.index(sub[, start[, end]])` returns the index of the first - /// occurrence of the substring `sub` within S, like `S.find`, except - /// that if the substring is not found, the operation fails. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "bonbon".index("on") == 1 - /// "bonbon".index("on", 2) == 4 - /// # "#); - /// # starlark::assert::fail(r#" - /// "bonbon".index("on", 2, 5) # error: not found - /// # "#, "not found"); - /// ``` - #[starlark(speculative_exec_safe)] - fn index( - this: &str, - #[starlark(require = pos)] needle: &str, - #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, - #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, - ) -> anyhow::Result { - if let Some(StrIndices { start, haystack }) = - convert_str_indices(this, start.into_option(), end.into_option()) - { - if let Some(index) = haystack.find(needle) { - let index = fast_string::len(&haystack[..index]); - return Ok((start + index).0 as i32); - } - } - Err(anyhow::anyhow!( - "Substring '{}' not found in '{}'", - needle, - this - )) - } - - /// [string.isalnum]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·isalnum - /// ): test if a string is composed only of letters and digits. - /// - /// `S.isalnum()` reports whether the string S is non-empty and consists - /// only Unicode letters and digits. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "base64".isalnum() == True - /// "Catch-22".isalnum() == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn isalnum(this: &str) -> anyhow::Result { - if this.is_empty() { - return Ok(false); - } - for c in this.chars() { - if !c.is_alphanumeric() { - return Ok(false); - } - } - Ok(true) - } - - /// [string.isalpha]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·isalpha - /// ): test if a string is composed only of letters. - /// - /// `S.isalpha()` reports whether the string S is non-empty and consists - /// only of Unicode letters. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "ABC".isalpha() == True - /// "Catch-22".isalpha() == False - /// "".isalpha() == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn isalpha(this: &str) -> anyhow::Result { - if this.is_empty() { - return Ok(false); - } - for c in this.chars() { - if !c.is_alphabetic() { - return Ok(false); - } - } - Ok(true) - } - - /// [string.isdigit]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·isdigit - /// ): test if a string is composed only of digits. - /// - /// `S.isdigit()` reports whether the string S is non-empty and consists - /// only of Unicode digits. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "123".isdigit() == True - /// "Catch-22".isdigit() == False - /// "".isdigit() == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn isdigit(this: &str) -> anyhow::Result { - if this.is_empty() { - return Ok(false); - } - for c in this.chars() { - if !c.is_numeric() { - return Ok(false); - } - } - Ok(true) - } - - /// [string.islower]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·islower - /// ): test if all letters of a string are lowercase. - /// - /// `S.islower()` reports whether the string S contains at least one cased - /// Unicode letter, and all such letters are lowercase. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "hello, world".islower() == True - /// "Catch-22".islower() == False - /// "123".islower() == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn islower(this: &str) -> anyhow::Result { - let mut result = false; - for c in this.chars() { - if c.is_uppercase() { - return Ok(false); - } else if c.is_lowercase() { - result = true; - } - } - Ok(result) - } - - /// [string.isspace]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·isspace - /// ): test if all characters of a string are whitespaces. - /// - /// `S.isspace()` reports whether the string S is non-empty and consists - /// only of Unicode spaces. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// " ".isspace() == True - /// "\r\t\n".isspace() == True - /// "".isspace() == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn isspace(this: &str) -> anyhow::Result { - if this.is_empty() { - return Ok(false); - } - for c in this.chars() { - if !c.is_whitespace() { - return Ok(false); - } - } - Ok(true) - } - - /// [string.istitle]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·istitle - /// ): test if the string is title cased. - /// - /// `S.istitle()` reports whether the string S contains at least one cased - /// Unicode letter, and all such letters that begin a word are in title - /// case. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "Hello, World!".istitle() == True - /// "Catch-22".istitle() == True - /// "HAL-9000".istitle() == False - /// "123".istitle() == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn istitle(this: &str) -> anyhow::Result { - let mut last_space = true; - let mut result = false; - - for c in this.chars() { - if !c.is_alphabetic() { - last_space = true; - } else { - if last_space { - if c.is_lowercase() { - return Ok(false); - } - } else if c.is_uppercase() { - return Ok(false); - } - if c.is_alphabetic() { - result = true; - } - last_space = false; - } - } - Ok(result) - } - - /// [string.isupper]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·isupper - /// ): test if all letters of a string are uppercase. - /// - /// `S.isupper()` reports whether the string S contains at least one cased - /// Unicode letter, and all such letters are uppercase. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "HAL-9000".isupper() == True - /// "Catch-22".isupper() == False - /// "123".isupper() == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn isupper(this: &str) -> anyhow::Result { - let mut result = false; - for c in this.chars() { - if c.is_lowercase() { - return Ok(false); - } else if c.is_uppercase() { - result = true; - } - } - Ok(result) - } - - /// [string.lower]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·lower - /// ): convert a string to all lowercase. - /// - /// `S.lower()` returns a copy of the string S with letters converted to - /// lowercase. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "Hello, World!".lower() == "hello, world!" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn lower(this: &str) -> anyhow::Result { - Ok(this.to_lowercase()) - } - - /// [string.join]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·join - /// ): join elements with a separator. - /// - /// `S.join(iterable)` returns the string formed by concatenating each - /// element of its argument, with a copy of the string S between - /// successive elements. The argument must be an iterable whose elements - /// are strings. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// ", ".join([]) == "" - /// ", ".join(("x", )) == "x" - /// ", ".join(["one", "two", "three"]) == "one, two, three" - /// "a".join("ctmrn".elems()) == "catamaran" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn join<'v>( - this: &str, - #[starlark(require = pos)] to_join: ValueOfUnchecked<'v, StarlarkIter>, - heap: &'v Heap, - ) -> anyhow::Result> { - #[inline(always)] - fn as_str<'v>(x: Value<'v>) -> anyhow::Result<&'v str> { - <&str>::unpack_named_param(x, "to_join") - } - - let mut it = to_join.get().iterate(heap)?; - match it.next() { - None => Ok(ValueOfUnchecked::new(Value::new_empty_string())), - Some(x1) => { - match it.next() { - None => { - as_str(x1)?; - // If there is a singleton we can avoid reallocation - Ok(ValueOfUnchecked::new(x1)) - } - Some(x2) => { - let s1 = as_str(x1)?; - let s2 = as_str(x2)?; - // guess towards the upper bound, since we throw away over-allocations quickly - // include a buffer (20 bytes) - let n = it.size_hint().0 + 2; - let guess = - (cmp::max(s1.len(), s2.len()) * n) + (this.len() * (n - 1)) + 20; - let mut r = String::with_capacity(guess); - r.push_str(s1); - r.push_str(this); - r.push_str(s2); - for x in it { - r.push_str(this); - r.push_str(as_str(x)?); - } - Ok(ValueOfUnchecked::new(heap.alloc(r))) - } - } - } - } - } - - /// [string.lstrip]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·lstrip - /// ): trim leading whitespaces. - /// - /// `S.lstrip()` returns a copy of the string S with leading whitespace removed. - /// In most cases instead of passing an argument you should use `removeprefix`. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// " hello ".lstrip() == "hello " - /// "x!hello ".lstrip("!x ") == "hello " - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn lstrip<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] chars: Option<&str>, - heap: &'v Heap, - ) -> anyhow::Result> { - let res = match chars { - None => this.trim_start(), - Some(s) => this.trim_start_matches(|c| s.contains(c)), - }; - if res.len() == this.len() { - Ok(this) - } else { - Ok(heap.alloc_str(res)) - } - } - - /// [string.partition]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·partition - /// ): partition a string in 3 components - /// - /// `S.partition(x = " ")` splits string S into three parts and returns them - /// as a tuple: the portion before the first occurrence of string `x`, - /// `x` itself, and the portion following it. - /// If S does not contain `x`, `partition` returns `(S, "", "")`. - /// - /// `partition` fails if `x` is not a string, or is the empty string. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "one/two/three".partition("/") == ("one", "/", "two/three") - /// "one".partition("/") == ("one", "", "") - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn partition<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] needle: StringValue<'v>, - heap: &'v Heap, - ) -> anyhow::Result<(StringValue<'v>, StringValue<'v>, StringValue<'v>)> { - if needle.is_empty() { - return Err(anyhow::anyhow!( - "Empty separator cannot be used for partitioning" - )); - } - if let Some(offset) = this.find(needle.as_str()) { - let offset2 = offset + needle.len(); - Ok(( - heap.alloc_str(this.get(..offset).unwrap()), - needle, - heap.alloc_str(this.get(offset2..).unwrap()), - )) - } else { - let empty = StringValue::default(); - Ok((this, empty, empty)) - } - } - - /// [string.replace]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·replace - /// ): replace all occurrences of a substring. - /// - /// `S.replace(old, new[, count])` returns a copy of string S with all - /// occurrences of substring `old` replaced by `new`. If the optional - /// argument `count`, which must be an `int`, is non-negative, it - /// specifies a maximum number of occurrences to replace. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "banana".replace("a", "o") == "bonono" - /// "banana".replace("a", "o", 2) == "bonona" - /// "banana".replace("z", "x") == "banana" - /// "banana".replace("", "x") == "xbxaxnxaxnxax" - /// "banana".replace("", "x", 2) == "xbxanana" - /// "".replace("", "x") == "x" - /// "# ); - /// # starlark::assert::fail(r#" - /// "banana".replace("a", "o", -2) # error: argument was negative - /// "#, "argument was negative"); - /// ``` - #[starlark(speculative_exec_safe)] - fn replace<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] old: &str, - #[starlark(require = pos)] new: &str, - #[starlark(require = pos)] count: Option, - heap: &'v Heap, - ) -> anyhow::Result> { - match count { - Some(count) if count >= 0 => { - Ok(heap.alloc_str(&this.replacen(old, new, count as usize))) - } - Some(count) => Err(anyhow::anyhow!( - "Replace final argument was negative '{}'", - count - )), - None => { - // Optimise `replace` using the Rust standard library definition, - // but avoiding redundant allocation in the last step - let x = this.as_str(); - let mut result = String::new(); - let mut last_end = 0; - for (start, part) in x.match_indices(old) { - result.push_str(unsafe { x.get_unchecked(last_end..start) }); - result.push_str(new); - last_end = start + part.len(); - } - if result.is_empty() && last_end == 0 { - Ok(this) - } else { - Ok(heap - .alloc_str_concat(&result, unsafe { x.get_unchecked(last_end..x.len()) })) - } - } - } - } - - /// [string.rfind]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·rfind - /// ): find the last index of a substring. - /// - /// `S.rfind(sub[, start[, end]])` returns the index of the substring `sub` - /// within S, like `S.find`, except that `rfind` returns the index of - /// the substring's _last_ occurrence. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "bonbon".rfind("on") == 4 - /// "bonbon".rfind("on", None, 5) == 1 - /// "bonbon".rfind("on", 2, 5) == -1 - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn rfind( - this: &str, - #[starlark(require = pos)] needle: &str, - #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, - #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, - ) -> anyhow::Result { - if let Some(StrIndices { start, haystack }) = - convert_str_indices(this, start.into_option(), end.into_option()) - { - if let Some(index) = haystack.rfind(needle) { - let index = fast_string::len(&haystack[..index]); - return Ok((start + index).0 as i32); - } - } - Ok(-1) - } - - /// [string.rindex]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·rindex - /// ): find the last index of a substring, failing on not found. - /// - /// `S.rindex(sub[, start[, end]])` returns the index of the substring `sub` - /// within S, like `S.index`, except that `rindex` returns the index of - /// the substring's _last_ occurrence. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "bonbon".rindex("on") == 4 - /// "bonbon".rindex("on", None, 5) == 1 # in "bonbo" - /// # "#); - /// # starlark::assert::fail(r#" - /// "bonbon".rindex("on", 2, 5) # error: not found - /// # "#, "not found"); - /// ``` - #[starlark(speculative_exec_safe)] - fn rindex( - this: &str, - #[starlark(require = pos)] needle: &str, - #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, - #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, - ) -> anyhow::Result { - if let Some(StrIndices { start, haystack }) = - convert_str_indices(this, start.into_option(), end.into_option()) - { - if let Some(index) = haystack.rfind(needle) { - let index = fast_string::len(&haystack[..index]); - return Ok((start + index).0 as i32); - } - } - Err(anyhow::anyhow!( - "Substring '{}' not found in '{}'", - needle, - this - )) - } - - /// [string.rpartition]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·rpartition - /// ): partition a string in 3 elements. - /// - /// `S.rpartition([x = ' '])` is like `partition`, but splits `S` at the - /// last occurrence of `x`. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "one/two/three".rpartition("/") == ("one/two", "/", "three") - /// "one".rpartition("/") == ("", "", "one") - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn rpartition<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] needle: StringValue<'v>, - heap: &'v Heap, - ) -> anyhow::Result<(StringValue<'v>, StringValue<'v>, StringValue<'v>)> { - if needle.is_empty() { - return Err(anyhow::anyhow!( - "Empty separator cannot be used for partitioning" - )); - } - if let Some(offset) = this.rfind(needle.as_str()) { - let offset2 = offset + needle.len(); - Ok(( - heap.alloc_str(this.get(..offset).unwrap()), - needle, - heap.alloc_str(this.get(offset2..).unwrap()), - )) - } else { - let empty = StringValue::default(); - Ok((empty, empty, this)) - } - } - - /// [string.rsplit]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·rsplit - /// ): splits a string into substrings. - /// - /// `S.rsplit([sep[, maxsplit]])` splits a string into substrings like - /// `S.split`, except that when a maximum number of splits is specified, - /// `rsplit` chooses the rightmost splits. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "banana".rsplit("n") == ["ba", "a", "a"] - /// "banana".rsplit("n", 1) == ["bana", "a"] - /// "one two three".rsplit(None, 1) == ["one two", "three"] - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn rsplit<'v>( - this: &str, - #[starlark(require = pos, default = NoneOr::None)] sep: NoneOr<&str>, - #[starlark(require = pos, default = NoneOr::None)] maxsplit: NoneOr, - heap: &'v Heap, - ) -> anyhow::Result>> { - let maxsplit = match maxsplit.into_option() { - None => None, - Some(v) => { - if v < 0 { - None - } else { - Some((v + 1) as usize) - } - } - }; - Ok(ValueOfUnchecked::new(heap.alloc_list( - &match sep.into_option() { - None => match maxsplit { - None => this.split_whitespace().map(|x| heap.alloc(x)).collect(), - Some(maxsplit) => rsplitn_whitespace(this, maxsplit).map(|x| heap.alloc(x)), - }, - Some(sep) => { - let mut v: Vec<_> = match maxsplit { - None => this.rsplit(sep).map(|x| heap.alloc(x)).collect(), - Some(maxsplit) => { - this.rsplitn(maxsplit, sep).map(|x| heap.alloc(x)).collect() - } - }; - v.reverse(); - v - } - }, - ))) - } - - /// [string.rstrip]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·rstrip - /// ): trim trailing whitespace. - /// - /// `S.rstrip()` returns a copy of the string S with trailing whitespace removed. - /// In most cases instead of passing an argument you should use `removesuffix`. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// " hello ".rstrip() == " hello" - /// " hello!x".rstrip(" x!") == " hello" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn rstrip<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] chars: Option<&str>, - heap: &'v Heap, - ) -> anyhow::Result> { - let res = match chars { - None => this.trim_end(), - Some(s) => this.trim_end_matches(|c| s.contains(c)), - }; - if res.len() == this.len() { - Ok(this) - } else { - Ok(heap.alloc_str(res)) - } - } - - /// [string.split]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·split - /// ): split a string in substrings. - /// - /// `S.split([sep [, maxsplit]])` returns the list of substrings of S, - /// splitting at occurrences of the delimiter string `sep`. - /// - /// Consecutive occurrences of `sep` are considered to delimit empty - /// strings, so `'food'.split('o')` returns `['f', '', 'd']`. - /// Splitting an empty string with a specified separator returns `['']`. - /// If `sep` is the empty string, `split` fails. - /// - /// If `sep` is not specified or is `None`, `split` uses a different - /// algorithm: it removes all leading spaces from S - /// (or trailing spaces in the case of `rsplit`), - /// then splits the string around each consecutive non-empty sequence of - /// Unicode white space characters. - /// - /// If S consists only of white space, `split` returns the empty list. - /// - /// If `maxsplit` is given and non-negative, it specifies a maximum number - /// of splits. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "one two three".split() == ["one", "two", "three"] - /// "one two three".split(" ") == ["one", "two", "", "three"] - /// "one two three".split(None, 1) == ["one", "two three"] - /// "banana".split("n") == ["ba", "a", "a"] - /// "banana".split("n", 1) == ["ba", "ana"] - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn split<'v>( - this: &str, - #[starlark(require = pos, default = NoneOr::None)] sep: NoneOr<&str>, - #[starlark(require = pos, default = NoneOr::None)] maxsplit: NoneOr, - heap: &'v Heap, - ) -> anyhow::Result>> { - let maxsplit = match maxsplit.into_option() { - None => None, - Some(v) => { - if v < 0 { - None - } else { - Some((v + 1) as usize) - } - } - }; - Ok(ValueOfUnchecked::new(heap.alloc_list( - &match (sep.into_option(), maxsplit) { - (None, None) => this.split_whitespace().map(|x| heap.alloc(x)).collect(), - (None, Some(maxsplit)) => splitn_whitespace(this, maxsplit).map(|x| heap.alloc(x)), - (Some(sep), None) => { - if sep.len() == 1 { - // If we are searching for a 1-byte string, we can provide a much faster path. - // Since it is one byte, given how UTF8 works, all the resultant slices must be UTF8 too. - let b = sep.as_bytes()[0]; - let count = fast_string::count_matches_byte(this, b); - let mut res = Vec::with_capacity(count + 1); - res.extend( - this.as_bytes() - .split(|x| *x == b) - .map(|x| heap.alloc(unsafe { std::str::from_utf8_unchecked(x) })), - ); - debug_assert_eq!(res.len(), count + 1); - res - } else { - this.split(sep).map(|x| heap.alloc(x)).collect() - } - } - (Some(sep), Some(maxsplit)) => { - this.splitn(maxsplit, sep).map(|x| heap.alloc(x)).collect() - } - }, - ))) - } - - /// [string.splitlines]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·splitlines - /// ): return the list of lines of a string. - /// - /// `S.splitlines([keepends])` returns a list whose elements are the - /// successive lines of S, that is, the strings formed by splitting S at - /// line terminators ('\n', '\r' or '\r\n'). - /// - /// The optional argument, `keepends`, is interpreted as a Boolean. - /// If true, line terminators are preserved in the result, though - /// the final element does not necessarily end with a line terminator. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "one\n\ntwo".splitlines() == ["one", "", "two"] - /// "one\n\ntwo".splitlines(True) == ["one\n", "\n", "two"] - /// "a\nb".splitlines() == ["a", "b"] - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn splitlines<'v>( - this: &str, - #[starlark(require = pos, default = false)] keepends: bool, - heap: &'v Heap, - ) -> anyhow::Result>> { - let mut s = this; - let mut lines: Vec = Vec::new(); - loop { - if let Some(x) = s.find(|x| x == '\n' || x == '\r') { - let y = x; - let x = match s.get(y..y + 2) { - Some("\r\n") => y + 2, - _ => y + 1, - }; - if keepends { - lines.push(heap.alloc_str(s.get(..x).unwrap())) - } else { - lines.push(heap.alloc_str(s.get(..y).unwrap())) - } - if x == s.len() { - return Ok(lines); - } - s = s.get(x..).unwrap(); - } else { - if !s.is_empty() { - lines.push(heap.alloc_str(s)); - } - return Ok(lines); - } - } - } - - /// [string.startswith]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·startswith - /// ): test whether a string starts with a given prefix. - /// - /// `S.startswith(suffix)` reports whether the string S has the specified - /// prefix. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "filename.sky".startswith("filename") == True - /// "filename.sky".startswith("sky") == False - /// 'abc'.startswith(('a', 'A')) == True - /// 'ABC'.startswith(('a', 'A')) == True - /// 'def'.startswith(('a', 'A')) == False - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn startswith( - this: &str, - #[starlark(require = pos)] prefix: StringOrTuple, - ) -> anyhow::Result { - match prefix { - StringOrTuple::String(x) => Ok(this.starts_with(x)), - StringOrTuple::Tuple(xs) => Ok(xs.iter().any(|x| this.starts_with(x))), - } - } - - /// [string.strip]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·strip - /// ): trim leading and trailing whitespaces. - /// - /// `S.strip()` returns a copy of the string S with leading and trailing - /// whitespace removed. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// " hello ".strip() == "hello" - /// "xxhello!!".strip("x!") == "hello" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn strip<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] chars: Option<&str>, - heap: &'v Heap, - ) -> anyhow::Result> { - let res = match chars { - None => this.trim(), - Some(s) => this.trim_matches(|c| s.contains(c)), - }; - if res.len() == this.len() { - Ok(this) - } else { - Ok(heap.alloc_str(res)) - } - } - - /// [string.title]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·title - /// ): convert a string to title case. - /// - /// `S.title()` returns a copy of the string S with letters converted to - /// titlecase. - /// - /// Letters are converted to uppercase at the start of words, lowercase - /// elsewhere. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "hElLo, WoRlD!".title() == "Hello, World!" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn title(this: &str) -> anyhow::Result { - let mut last_space = true; - let mut result = String::with_capacity(this.len()); - for c in this.chars() { - if !c.is_alphabetic() { - last_space = true; - result.extend(c.to_lowercase()); - } else { - if last_space { - result.extend(c.to_uppercase()) - } else { - result.extend(c.to_lowercase()) - } - last_space = false; - } - } - Ok(result) - } - - /// [string.upper]( - /// https://github.com/google/skylark/blob/3705afa472e466b8b061cce44b47c9ddc6db696d/doc/spec.md#string·upper - /// ): convert a string to all uppercase. - /// - /// `S.upper()` returns a copy of the string S with letters converted to - /// uppercase. - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "Hello, World!".upper() == "HELLO, WORLD!" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn upper(this: &str) -> anyhow::Result { - Ok(this.to_uppercase()) - } - - /// [string.removeprefix]( - /// https://docs.python.org/3.9/library/stdtypes.html#str.removeprefix - /// ): remove a prefix from a string. _Not part of standard Starlark._ - /// - /// If the string starts with the prefix string, return `string[len(prefix):]`. - /// Otherwise, return a copy of the original string: - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "Hello, World!".removeprefix("Hello") == ", World!" - /// "Hello, World!".removeprefix("Goodbye") == "Hello, World!" - /// "Hello".removeprefix("Hello") == "" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn removeprefix<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] prefix: &str, - heap: &'v Heap, - ) -> anyhow::Result> { - let x = this.as_str(); - if x.starts_with(prefix) && !prefix.is_empty() { - Ok(heap.alloc_str(&x[prefix.len()..])) - } else { - Ok(this) - } - } - - /// [string.removesuffix]( - /// https://docs.python.org/3.9/library/stdtypes.html#str.removesuffix - /// ): remove a prefix from a string. _Not part of standard Starlark._ - /// - /// If the string starts with the prefix string, return `string[len(prefix):]`. - /// Otherwise, return a copy of the original string: - /// - /// ``` - /// # starlark::assert::all_true(r#" - /// "Hello, World!".removesuffix("World!") == "Hello, " - /// "Hello, World!".removesuffix("World") == "Hello, World!" - /// "Hello".removesuffix("Hello") == "" - /// # "#); - /// ``` - #[starlark(speculative_exec_safe)] - fn removesuffix<'v>( - this: StringValue<'v>, - #[starlark(require = pos)] suffix: &str, - heap: &'v Heap, - ) -> anyhow::Result> { - let x = this.as_str(); - if x.ends_with(suffix) && !suffix.is_empty() { - Ok(heap.alloc_str(&x[..x.len() - suffix.len()])) - } else { - Ok(this) - } - } -} - -#[cfg(test)] -mod tests { - use crate::assert; - - #[test] - fn test_error_codes() { - assert::fail(r#""bonbon".index("on", 2, 5)"#, "not found in"); - assert::fail(r#"("banana".replace("a", "o", -2))"#, "negative"); - assert::fail(r#""bonbon".rindex("on", 2, 5)"#, "not found in"); - } - - #[test] - fn test_count() { - assert::eq("'abc'.count('a', 10, -10)", "0"); - } - - #[test] - fn test_find() { - assert::eq("'Троянская война окончена'.find('война')", "10"); - } - - #[test] - fn test_opaque_iterator() { - assert::is_true("type('foo'.elems()) != type([])"); - assert::is_true("type('foo'.codepoints()) != type([])"); - } -} diff --git a/starlark-rust/starlark/src/stdlib/structs.rs b/starlark-rust/starlark/src/stdlib/structs.rs deleted file mode 100644 index 07e798f2513a8..0000000000000 --- a/starlark-rust/starlark/src/stdlib/structs.rs +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Implementation of `struct` function. - -use allocative::Allocative; -use dupe::Dupe; -use starlark_derive::starlark_module; -use starlark_map::sorted_map::SortedMap; - -use crate as starlark; -use crate::codemap::Span; -use crate::codemap::Spanned; -use crate::environment::GlobalsBuilder; -use crate::eval::Arguments; -use crate::typing::error::TypingOrInternalError; -use crate::typing::function::Arg; -use crate::typing::function::TyCustomFunctionImpl; -use crate::typing::oracle::ctx::TypingOracleCtx; -use crate::typing::structs::TyStruct; -use crate::typing::Ty; -use crate::values::layout::heap::profile::arc_str::ArcStr; -use crate::values::structs::value::FrozenStruct; -use crate::values::structs::value::Struct; -use crate::values::Heap; - -#[derive( - Allocative, Clone, Copy, Dupe, Debug, Eq, PartialEq, Hash, Ord, PartialOrd -)] -struct StructType; - -impl TyCustomFunctionImpl for StructType { - fn validate_call( - &self, - _span: Span, - args: &[Spanned], - oracle: TypingOracleCtx, - ) -> Result { - let mut fields = Vec::new(); - let mut extra = false; - for x in args { - match &x.node { - Arg::Pos(_) => { - return Err(oracle.msg_error(x.span, "Positional arguments not allowed")); - } - Arg::Args(_) => { - // Args can be empty, and this is valid call: - // ``` - // struct(*[], **{}) - // ``` - } - Arg::Name(name, val) => { - fields.push((ArcStr::from(*name), val.clone())); - } - Arg::Kwargs(_) => extra = true, - } - } - Ok(Ty::custom(TyStruct { - fields: SortedMap::from_iter(fields), - extra, - })) - } -} - -#[starlark_module] -pub fn global(builder: &mut GlobalsBuilder) { - #[starlark( - ty_custom_function = StructType, - as_type = FrozenStruct, - )] - fn r#struct<'v>(args: &Arguments<'v, '_>, heap: &'v Heap) -> anyhow::Result> { - args.no_positional_args(heap)?; - // TODO(nga): missing optimization: practically most `struct` invocations are - // performed with fixed named arguments, e.g. `struct(a = 1, b = 2)`. - // In this case we can avoid allocating the map, but instead - // allocate field index once at compilation time and store field values in a vector. - Ok(Struct::new(args.names_map()?)) - } -} diff --git a/starlark-rust/starlark/src/tests.rs b/starlark-rust/starlark/src/tests.rs new file mode 100644 index 0000000000000..fa735f77f5f56 --- /dev/null +++ b/starlark-rust/starlark/src/tests.rs @@ -0,0 +1,35 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod basic; +mod bc; +mod before_stmt; +mod call; +mod comprehension; +mod def; +mod derive; +mod for_loop; +mod freeze_access_value; +mod fstring; +mod go; +mod interop; +mod opt; +mod replace_binary; +mod runtime; +mod type_annot; +mod uncategorized; +pub(crate) mod util; diff --git a/starlark-rust/starlark/src/tests/bc.rs b/starlark-rust/starlark/src/tests/bc.rs new file mode 100644 index 0000000000000..7e06a71b4dbd2 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc.rs @@ -0,0 +1,28 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Bytecode generation tests. + +mod and_or; +mod call; +mod compr; +mod definitely_assigned; +mod expr; +mod for_stmt; +pub(crate) mod golden; +mod if_stmt; +mod isinstance; diff --git a/starlark-rust/starlark/src/tests/bc/call.rs b/starlark-rust/starlark/src/tests/bc/call.rs new file mode 100644 index 0000000000000..5f18b42451a38 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/call.rs @@ -0,0 +1,37 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::tests::bc::golden::bc_golden_test; + +#[test] +fn test_call() { + bc_golden_test( + "call", + r#" +def test(a, k): + noop( + 10, + 20, + p=30, + q=40, + r=50, + *a, + **k, + ) +"#, + ); +} diff --git a/starlark-rust/starlark/src/tests/bc/golden.rs b/starlark-rust/starlark/src/tests/bc/golden.rs index cc0bc03383504..984a735e8f883 100644 --- a/starlark-rust/starlark/src/tests/bc/golden.rs +++ b/starlark-rust/starlark/src/tests/bc/golden.rs @@ -28,10 +28,7 @@ fn test_function_bytecode(program: &str) -> String { let program = program.trim(); let mut a = Assert::new(); - a.dialect(&Dialect { - enable_f_strings: true, - ..Dialect::Extended - }); + a.dialect(&Dialect::AllOptionsInternal); let def = a .module("instrs.star", program) .get("test") diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_false_and_x.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_false_and_x.golden index 77b79c5628d85..3c3b7ae656cd9 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_false_and_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_false_and_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return False and x diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_false_or_x.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_false_or_x.golden index 1a5588f341d9b..d7194f5c856fd 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_false_or_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_false_or_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return False or x diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_true_and_x.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_true_and_x.golden index 3d2e8b089a4bf..bfba65851e2ea 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_true_and_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_true_and_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return True and x diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_true_or_x.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_true_or_x.golden index 3a63dafce3c96..c410a96f239ba 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_true_or_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_true_or_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return True or x diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_false.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_false.golden index 147744810bfb6..625c1e2ccb5dd 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_false.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_false.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return x and False @@ -11,8 +11,8 @@ def test(x): return x and False Max stack size: 1 Instructions: 0: IfNotBr &x 48 - 16: Const False &1 + 16: Const False ->&1 40: Br 64 - >48: Mov &x &1 + >48: Mov &x ->&1 >64: Return &1 72: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_true.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_true.golden index bb133c45e4c1d..a1b5abea1392c 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_true.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_and_true.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return x and True @@ -11,8 +11,8 @@ def test(x): return x and True Max stack size: 1 Instructions: 0: IfNotBr &x 48 - 16: Const True &1 + 16: Const True ->&1 40: Br 64 - >48: Mov &x &1 + >48: Mov &x ->&1 >64: Return &1 72: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_false.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_false.golden index 234ea802aeee7..2ba61a46fec47 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_false.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_false.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return x or False @@ -11,8 +11,8 @@ def test(x): return x or False Max stack size: 1 Instructions: 0: IfNotBr &x 40 - 16: Mov &x &1 + 16: Mov &x ->&1 32: Br 64 - >40: Const False &1 + >40: Const False ->&1 >64: Return &1 72: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_true.golden b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_true.golden index 75972accecc1c..0f962245ff72c 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_true.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/and_or_x_or_true.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return x or True @@ -11,8 +11,8 @@ def test(x): return x or True Max stack size: 1 Instructions: 0: IfNotBr &x 40 - 16: Mov &x &1 + 16: Mov &x ->&1 32: Br 64 - >40: Const True &1 + >40: Const True ->&1 >64: Return &1 72: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/call.golden b/starlark-rust/starlark/src/tests/bc/golden/call.golden new file mode 100644 index 0000000000000..aed0ad0b48b61 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/call.golden @@ -0,0 +1,29 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def test(a, k): + noop( + 10, + 20, + p=30, + q=40, + r=50, + *a, + **k, + ) + +# Bytecode: + +Max stack size: 6 +Instructions: + 0: Const 10 ->&3 + 24: Const 20 ->&4 + 48: Const 30 ->&5 + 72: Const 40 ->&6 + 96: Const 50 ->&7 + 120: CallFrozenNative noop {&3..&8 2 p q r *&0 **&1} instrs.star.bzl:2:5-10:6 ->&2 + 208: ReturnConst None + 224: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause.golden b/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause.golden index badbe070cbdd4..fd228c9245426 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(y): return [x for x in y if True] @@ -10,10 +10,10 @@ def test(y): return [x for x in y if True] Max stack size: 3 Instructions: - 0: ListNew &3 - 8: Iter &y 0 &4 &x 72 + 0: ListNew ->&3 + 8: Iter &y 0 ->&4 ->&x 72 > 32: ComprListAppend &3 &x - 48: Continue &4 0 &x 32 72 - >72: Mov &3 &2 + 48: Continue &4 0 ->&x 32 72 + >72: Mov &3 ->&2 88: Return &2 96: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause_on_freeze.golden b/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause_on_freeze.golden index 77ba59b41ee4d..a516c61fc5b60 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause_on_freeze.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/compr_if_true_clause_on_freeze.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(y): return [x for x in y if C] @@ -12,10 +12,10 @@ C = True Max stack size: 3 Instructions: - 0: ListNew &3 - 8: Iter &y 0 &4 &x 72 + 0: ListNew ->&3 + 8: Iter &y 0 ->&4 ->&x 72 > 32: ComprListAppend &3 &x - 48: Continue &4 0 &x 32 72 - >72: Mov &3 &2 + 48: Continue &4 0 ->&x 32 72 + >72: Mov &3 ->&2 88: Return &2 96: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty.golden b/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty.golden index d8e9172d7d0e6..09e2ed519f446 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): return [x for x in []] @@ -10,6 +10,6 @@ def test(): return [x for x in []] Max stack size: 1 Instructions: - 0: ListNew &1 + 0: ListNew ->&1 8: Return &1 16: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty_on_freeze.golden b/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty_on_freeze.golden index ad55cee59833e..93e91cd6c15c8 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty_on_freeze.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/compr_no_loop_if_top_collection_is_empty_on_freeze.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): return [x for x in D] @@ -11,6 +11,6 @@ D = {} Max stack size: 1 Instructions: - 0: ListNew &1 + 0: ListNew ->&1 8: Return &1 16: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/constant_folding_list_add.golden b/starlark-rust/starlark/src/tests/bc/golden/constant_folding_list_add.golden index 9575edfd7dfa2..6abf0ce3e51f3 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/constant_folding_list_add.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/constant_folding_list_add.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): return [1] + [2] @@ -10,6 +10,6 @@ def test(): return [1] + [2] Max stack size: 1 Instructions: - 0: ListOfConsts [1, 2] &0 + 0: ListOfConsts [1, 2] ->&0 32: Return &0 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_args_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_args_inlined.golden index d61ddb5b5bc5a..2e4aa86c6524f 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_args_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_args_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def foo(x, y): @@ -15,8 +15,8 @@ def test(): Max stack size: 3 Instructions: - 0: Const True &1 - 24: Const 10 &2 - 48: CallFrozenNativePos noop &1..&3 instrs.star.bzl:2:12-22 &0 + 0: Const True ->&1 + 24: Const 10 ->&2 + 48: CallFrozenNativePos noop &1..&3 instrs.star.bzl:2:12-22 ->&0 104: Return &0 112: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_inlined.golden index ff02e14fb170b..79823065c529b 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/def_inline_const_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def trivial(): diff --git a/starlark-rust/starlark/src/tests/bc/golden/def_inline_dict_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/def_inline_dict_inlined.golden index 824a5195d129f..3d72646cdcb29 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/def_inline_dict_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/def_inline_dict_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def returns_dict(): @@ -15,8 +15,8 @@ def test(): Max stack size: 3 Instructions: - 0: ListNew &1 - 8: Const 10 &2 - 32: DictNPop [&1, &2] &0 + 0: ListNew ->&1 + 8: Const 10 ->&2 + 32: DictNPop [&1, &2] ->&0 48: Return &0 56: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/def_inline_list_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/def_inline_list_inlined.golden index 1f171d886cd9d..dec16ef7d2420 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/def_inline_list_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/def_inline_list_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): @@ -15,6 +15,6 @@ def returns_list(): Max stack size: 1 Instructions: - 0: ListOfConsts [10, True] &0 + 0: ListOfConsts [10, True] ->&0 32: Return &0 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/def_inline_locals_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/def_inline_locals_inlined.golden index 4d94a2555a915..4c78d73f1dcb7 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/def_inline_locals_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/def_inline_locals_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def foo(x, y): @@ -15,6 +15,6 @@ def test(x, y): Max stack size: 1 Instructions: - 0: CallFrozenNativePos noop &0..&2 instrs.star.bzl:2:12-22 &2 + 0: CallFrozenNativePos noop &0..&2 instrs.star.bzl:2:12-22 ->&2 56: Return &2 64: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/def_inline_return_type_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/def_inline_return_type_inlined.golden index a4b5a06d5aa21..6552d6ed23472 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/def_inline_return_type_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/def_inline_return_type_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def smth() -> str: @@ -15,6 +15,6 @@ def test(): Max stack size: 1 Instructions: - 0: CallFrozenDefPos instrs.star.bzl.smth &0..&0 instrs.star.bzl:6:12-18 &0 + 0: CallFrozenDefPos instrs.star.bzl.smth &0..&0 instrs.star.bzl:6:12-18 ->&0 40: Return &0 48: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_mov_is_used.golden b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_mov_is_used.golden index e9b954cb9aaa9..1d4ad281b4faf 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_mov_is_used.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_mov_is_used.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x, y): noop(y, x) @@ -10,8 +10,8 @@ def test(x, y): noop(y, x) Max stack size: 3 Instructions: - 0: Mov &y &3 - 16: Mov &x &4 - 32: CallFrozenNativePos noop &3..&5 instrs.star.bzl:1:17-27 &2 + 0: Mov &y ->&3 + 16: Mov &x ->&4 + 32: CallFrozenNativePos noop &3..&5 instrs.star.bzl:1:17-27 ->&2 88: ReturnConst None 104: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_no_op_movs.golden b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_no_op_movs.golden index c6ba8e1fe70f5..39af8be28b430 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_no_op_movs.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_no_op_movs.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): x = x diff --git a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_call.golden b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_call.golden index ee77e8f8a603e..249983949a2d5 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_call.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_call.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x, y): noop(x, y) @@ -10,6 +10,6 @@ def test(x, y): noop(x, y) Max stack size: 1 Instructions: - 0: CallFrozenNativePos noop &0..&2 instrs.star.bzl:1:17-27 &2 + 0: CallFrozenNativePos noop &0..&2 instrs.star.bzl:1:17-27 ->&2 56: ReturnConst None 72: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_list.golden b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_list.golden index ab00c3b828b15..b8f389efc9b99 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_list.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/definitely_assigned_slot_range_in_list.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x, y): return [x, y] @@ -10,6 +10,6 @@ def test(x, y): return [x, y] Max stack size: 1 Instructions: - 0: ListNPop [&x, &y] &2 + 0: ListNPop [&x, &y] ->&2 16: Return &2 24: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/eq_bool.golden b/starlark-rust/starlark/src/tests/bc/golden/eq_bool.golden new file mode 100644 index 0000000000000..0f3a823491602 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/eq_bool.golden @@ -0,0 +1,16 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def test(x): + return x == True + +# Bytecode: + +Max stack size: 1 +Instructions: + 0: EqPtr &x True ->&1 + 24: Return &1 + 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/eq_const.golden b/starlark-rust/starlark/src/tests/bc/golden/eq_const.golden new file mode 100644 index 0000000000000..1cdff4c3e6f8c --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/eq_const.golden @@ -0,0 +1,18 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +S = struct(a = 2) + +def test(x): + return x == S + +# Bytecode: + +Max stack size: 1 +Instructions: + 0: EqConst &x struct(a=2) ->&1 + 24: Return &1 + 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/eq_enum.golden b/starlark-rust/starlark/src/tests/bc/golden/eq_enum.golden new file mode 100644 index 0000000000000..b7fa4397c295b --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/eq_enum.golden @@ -0,0 +1,18 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Color = enum("RED", "GREEN", "BLUE") + +def test(x): + return x == Color("RED") + +# Bytecode: + +Max stack size: 1 +Instructions: + 0: EqPtr &x Color("RED") ->&1 + 24: Return &1 + 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/eq_int.golden b/starlark-rust/starlark/src/tests/bc/golden/eq_int.golden new file mode 100644 index 0000000000000..f4f5ce4992904 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/eq_int.golden @@ -0,0 +1,16 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def test(x): + return x == 10 + +# Bytecode: + +Max stack size: 1 +Instructions: + 0: EqInt &x 10 ->&1 + 24: Return &1 + 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/eq_short_str.golden b/starlark-rust/starlark/src/tests/bc/golden/eq_short_str.golden new file mode 100644 index 0000000000000..c402ccafa816a --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/eq_short_str.golden @@ -0,0 +1,16 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def test(x): + return x == "a" + +# Bytecode: + +Max stack size: 1 +Instructions: + 0: EqPtr &x "a" ->&1 + 24: Return &1 + 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/eq_str.golden b/starlark-rust/starlark/src/tests/bc/golden/eq_str.golden new file mode 100644 index 0000000000000..4572932b9c8d4 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/eq_str.golden @@ -0,0 +1,16 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def test(x): + return x == "hello" + +# Bytecode: + +Max stack size: 1 +Instructions: + 0: EqStr &x "hello" ->&1 + 24: Return &1 + 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/expr_call_maybe_known_method.golden b/starlark-rust/starlark/src/tests/bc/golden/expr_call_maybe_known_method.golden index 5c3b090cdb592..683721334ac47 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/expr_call_maybe_known_method.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/expr_call_maybe_known_method.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): x.append(1) @@ -10,7 +10,7 @@ def test(x): x.append(1) Max stack size: 2 Instructions: - 0: Const 1 &2 - 24: CallMaybeKnownMethodPos &x append &2..&3 instrs.star.bzl:1:14-25 &1 + 0: Const 1 ->&2 + 24: CallMaybeKnownMethodPos &x append &2..&3 instrs.star.bzl:1:14-25 ->&1 120: ReturnConst None 136: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/expr_format_one.golden b/starlark-rust/starlark/src/tests/bc/golden/expr_format_one.golden index 921a273ecd863..b9ebfcfffab6d 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/expr_format_one.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/expr_format_one.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return '(({}))'.format(x) @@ -10,6 +10,6 @@ def test(x): return '(({}))'.format(x) Max stack size: 1 Instructions: - 0: FormatOne "((" &x "))" &1 + 0: FormatOne "((" &x "))" ->&1 32: Return &1 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/expr_fstring.golden b/starlark-rust/starlark/src/tests/bc/golden/expr_fstring.golden index c512df5428650..1f3555e1bef2b 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/expr_fstring.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/expr_fstring.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return f'test: {x}' @@ -10,6 +10,6 @@ def test(x): return f'test: {x}' Max stack size: 1 Instructions: - 0: FormatOne "test: " &x "" &1 + 0: FormatOne "test: " &x "" ->&1 32: Return &1 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/expr_percent_s_one.golden b/starlark-rust/starlark/src/tests/bc/golden/expr_percent_s_one.golden index 06f907f10ee2f..cb3ad3a2b78fc 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/expr_percent_s_one.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/expr_percent_s_one.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return '((%s))' % x @@ -10,6 +10,6 @@ def test(x): return '((%s))' % x Max stack size: 1 Instructions: - 0: PercentSOne "((" &x "))" &1 + 0: PercentSOne "((" &x "))" ->&1 32: Return &1 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/expr_spec_exec_list.golden b/starlark-rust/starlark/src/tests/bc/golden/expr_spec_exec_list.golden index 8658c1e970c20..b57e24149ba05 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/expr_spec_exec_list.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/expr_spec_exec_list.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): return list((10, 20)) @@ -10,6 +10,6 @@ def test(): return list((10, 20)) Max stack size: 1 Instructions: - 0: ListOfConsts [10, 20] &0 + 0: ListOfConsts [10, 20] ->&0 32: Return &0 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/expr_type.golden b/starlark-rust/starlark/src/tests/bc/golden/expr_type.golden index 2f77f5218d9c7..62014f0eb4d42 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/expr_type.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/expr_type.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return type(x) @@ -10,6 +10,6 @@ def test(x): return type(x) Max stack size: 1 Instructions: - 0: Type &x &1 + 0: Type &x ->&1 16: Return &1 24: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/for.golden b/starlark-rust/starlark/src/tests/bc/golden/for.golden index 513555001ecb6..3b9c2b4f67403 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/for.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/for.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -12,8 +12,8 @@ def test(x): Max stack size: 2 Instructions: - 0: Iter &x 0 &2 &i 104 - > 24: CallFrozenNativePos noop &1..&2 instrs.star.bzl:3:5-12 &3 - 80: Continue &2 0 &i 24 104 + 0: Iter &x 0 ->&2 ->&i 104 + > 24: CallFrozenNativePos noop &1..&2 instrs.star.bzl:3:5-12 ->&3 + 80: Continue &2 0 ->&i 24 104 >104: ReturnConst None 120: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/for_break.golden b/starlark-rust/starlark/src/tests/bc/golden/for_break.golden index 300333801cac0..b8cc34354b5b4 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/for_break.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/for_break.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -13,10 +13,10 @@ def test(x): Max stack size: 2 Instructions: - 0: Iter &x 0 &2 &i 136 + 0: Iter &x 0 ->&2 ->&i 136 > 24: IfNotBr &i 56 40: Break &2 136 - > 56: CallFrozenNativePos noop &1..&2 instrs.star.bzl:4:5-12 &3 - 112: Continue &2 0 &i 24 136 + > 56: CallFrozenNativePos noop &1..&2 instrs.star.bzl:4:5-12 ->&3 + 112: Continue &2 0 ->&i 24 136 >136: ReturnConst None 152: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/for_continue.golden b/starlark-rust/starlark/src/tests/bc/golden/for_continue.golden index b793957d16edc..7f1d39b8737a4 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/for_continue.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/for_continue.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -13,10 +13,10 @@ def test(x): Max stack size: 2 Instructions: - 0: Iter &x 0 &2 &i 144 + 0: Iter &x 0 ->&2 ->&i 144 > 24: IfNotBr &i 64 - 40: Continue &2 0 &i 24 144 - > 64: CallFrozenNativePos noop &1..&2 instrs.star.bzl:4:5-12 &3 - 120: Continue &2 0 &i 24 144 + 40: Continue &2 0 ->&i 24 144 + > 64: CallFrozenNativePos noop &1..&2 instrs.star.bzl:4:5-12 ->&3 + 120: Continue &2 0 ->&i 24 144 >144: ReturnConst None 160: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_and_stmt.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_and_stmt.golden index 0e8ab177b7dd5..245363115ae7e 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_and_stmt.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_and_stmt.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -12,6 +12,6 @@ def test(x): Max stack size: 1 Instructions: 0: IfNotBr &x 72 - 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:9-15 &1 + 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:9-15 ->&1 >72: ReturnConst None 88: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_and_y.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_and_y.golden index dc81fbf68a2e9..27f8b4c6853d1 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_and_y.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_and_y.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x, y): diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_or_y.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_or_y.golden index 4bbd6a91c945f..dbbbddafe96fa 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_or_y.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_else_x_or_y.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x, y): diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_and_x.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_and_x.golden index 46b0ebfec1f84..cd437f22298c3 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_and_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_and_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_or_x.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_or_x.golden index fb0c699a77b32..bbf1657e4b25f 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_or_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_false_or_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -12,6 +12,6 @@ def test(x): Max stack size: 1 Instructions: 0: IfNotBr &x 72 - 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 &1 + 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 ->&1 >72: ReturnConst None 88: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_and_x.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_and_x.golden index e46cc3a6a3207..5bdd250726f1e 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_and_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_and_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -12,6 +12,6 @@ def test(x): Max stack size: 1 Instructions: 0: IfNotBr &x 72 - 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 &1 + 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 ->&1 >72: ReturnConst None 88: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_or_x.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_or_x.golden index c3fbad16c1ef7..3b32b523d0984 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_or_x.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_true_or_x.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -11,6 +11,6 @@ def test(x): Max stack size: 1 Instructions: - 0: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:17-23 &1 + 0: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:17-23 ->&1 56: ReturnConst None 72: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_false.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_false.golden index f8def2ef26f1e..aed380a40c849 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_false.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_false.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_true.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_true.golden index f1a42a046d829..da011227d2166 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_true.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_and_true.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -12,6 +12,6 @@ def test(x): Max stack size: 1 Instructions: 0: IfNotBr &x 72 - 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 &1 + 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 ->&1 >72: ReturnConst None 88: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_false.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_false.golden index ce2d03da07366..794d207278466 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_false.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_false.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -12,6 +12,6 @@ def test(x): Max stack size: 1 Instructions: 0: IfNotBr &x 72 - 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 &1 + 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:18-24 ->&1 >72: ReturnConst None 88: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_true.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_true.golden index 7893149a50599..33a41a1e97add 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_true.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_if_x_or_true.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -11,6 +11,6 @@ def test(x): Max stack size: 1 Instructions: - 0: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:17-23 &1 + 0: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:17-23 ->&1 56: ReturnConst None 72: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_or_stmt.golden b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_or_stmt.golden index 3373bba3190e6..9a210af6088c6 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/if_stmt_or_stmt.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/if_stmt_or_stmt.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): @@ -12,6 +12,6 @@ def test(x): Max stack size: 1 Instructions: 0: IfBr &x 72 - 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:8-14 &1 + 16: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:8-14 ->&1 >72: ReturnConst None 88: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/isinstance.golden b/starlark-rust/starlark/src/tests/bc/golden/isinstance.golden index b3f9aba8501e7..ba86378cc4ce9 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/isinstance.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/isinstance.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(x): return isinstance(x, str) @@ -10,6 +10,6 @@ def test(x): return isinstance(x, str) Max stack size: 1 Instructions: - 0: IsInstance &x str &1 + 0: IsInstance &x str ->&1 24: Return &1 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_empty_iterable_optimized_away.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_empty_iterable_optimized_away.golden index 44d58032888ec..d54145267f452 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_empty_iterable_optimized_away.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_empty_iterable_optimized_away.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` L = [] diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_list_of_const_add.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_list_of_const_add.golden new file mode 100644 index 0000000000000..e0adf87717060 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_list_of_const_add.golden @@ -0,0 +1,16 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def test(): + return [1, 2] + [3, 4, 5] + +# Bytecode: + +Max stack size: 1 +Instructions: + 0: ListOfConsts [1, 2, 3, 4, 5] ->&0 + 32: Return &0 + 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_list_of_expr_add.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_list_of_expr_add.golden new file mode 100644 index 0000000000000..47b5e5447bb43 --- /dev/null +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_list_of_expr_add.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def test(): + return [noop(), noop()] + [noop(), noop(), noop()] + +# Bytecode: + +Max stack size: 6 +Instructions: + 0: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:13-19 ->&1 + 56: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:21-27 ->&2 + 112: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:32-38 ->&3 + 168: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:40-46 ->&4 + 224: CallFrozenNativePos noop &0..&0 instrs.star.bzl:2:48-54 ->&5 + 280: ListNPop [&1, &2, &3, &4, &5] ->&0 + 296: Return &0 + 304: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_list_plus_list.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_list_plus_list.golden index c187a49bca591..026e0c821bc6c 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_list_plus_list.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_list_plus_list.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` L = [1, 2] @@ -13,6 +13,6 @@ def test(): Max stack size: 1 Instructions: - 0: ListOfConsts [1, 2, 1] &0 + 0: ListOfConsts [1, 2, 1] ->&0 32: Return &0 40: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_mutual_recursion.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_mutual_recursion.golden index 1e7011c6829ba..99b148ec54ed9 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_mutual_recursion.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_mutual_recursion.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): @@ -14,6 +14,6 @@ def g(): Max stack size: 1 Instructions: - 0: CallFrozenDefPos instrs.star.bzl.g &0..&0 instrs.star.bzl:2:12-15 &0 + 0: CallFrozenDefPos instrs.star.bzl.g &0..&0 instrs.star.bzl:2:12-15 ->&0 40: Return &0 48: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_private_forward_mutable_module_vars_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_private_forward_mutable_module_vars_inlined.golden index 81a7c209221c0..2aac66280389d 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_private_forward_mutable_module_vars_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_private_forward_mutable_module_vars_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_recursion.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_recursion.golden index 2e5d924f84ff9..3381ad3ba3ec0 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_recursion.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_recursion.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): return test() @@ -10,6 +10,6 @@ def test(): return test() Max stack size: 1 Instructions: - 0: CallFrozenDefPos instrs.star.bzl.test &0..&0 instrs.star.bzl:1:20-26 &0 + 0: CallFrozenDefPos instrs.star.bzl.test &0..&0 instrs.star.bzl:1:20-26 ->&0 40: Return &0 48: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_erased.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_erased.golden index 28ad447b5e7fb..e9eb3d6b03b96 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_erased.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_erased.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test() -> typing.Any: return 1 diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_present.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_present.golden index 8fed58e789a45..c6a223ab35d1a 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_present.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_return_type_present.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test() -> str: return 'x' @@ -10,6 +10,6 @@ def test() -> str: return 'x' Max stack size: 1 Instructions: - 0: Const "x" &0 + 0: Const "x" ->&0 24: ReturnCheckType &0 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_same_module_struct_getattr_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_same_module_struct_getattr_inlined.golden index e9574a8970ead..b25e875f7c00d 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_same_module_struct_getattr_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_same_module_struct_getattr_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_type_is_inlined.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_type_is_inlined.golden index 290c357a5f6e6..dd55cc5567f11 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_type_is_inlined.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_type_is_inlined.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def is_list(x): @@ -14,6 +14,6 @@ def test(x): Max stack size: 1 Instructions: - 0: TypeIs &x "list" &1 + 0: TypeIs &x "list" ->&1 24: Return &1 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/opt_unreachable_code_optimized_away.golden b/starlark-rust/starlark/src/tests/bc/golden/opt_unreachable_code_optimized_away.golden index 87646b3c3d229..81063e863e5b5 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/opt_unreachable_code_optimized_away.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/opt_unreachable_code_optimized_away.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): diff --git a/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_enum_inline.golden b/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_enum_inline.golden index 6762c00413d41..fa134e48fbc88 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_enum_inline.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_enum_inline.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` MyEnum = enum("red", "green", "blue") @@ -14,6 +14,6 @@ def test(x): Max stack size: 1 Instructions: - 0: EqConst &x "red" &1 + 0: EqPtr &x MyEnum("red") ->&1 24: Return &1 32: End diff --git a/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_format_speculatively_before_format_instr.golden b/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_format_speculatively_before_format_instr.golden index eb5f27b0cb7e7..2a33e9af060bd 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_format_speculatively_before_format_instr.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_format_speculatively_before_format_instr.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): diff --git a/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_methods_invoked_speculatively.golden b/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_methods_invoked_speculatively.golden index ef631f905534c..c312388147838 100644 --- a/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_methods_invoked_speculatively.golden +++ b/starlark-rust/starlark/src/tests/bc/golden/speculative_exec_methods_invoked_speculatively.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` def test(): diff --git a/starlark-rust/starlark/src/tests/bc/mod.rs b/starlark-rust/starlark/src/tests/bc/mod.rs deleted file mode 100644 index 2cb0e5f673dda..0000000000000 --- a/starlark-rust/starlark/src/tests/bc/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Bytecode generation tests. - -mod and_or; -mod compr; -mod definitely_assigned; -mod expr; -mod for_stmt; -pub(crate) mod golden; -mod if_stmt; -mod isinstance; diff --git a/starlark-rust/starlark/src/tests/before_stmt.rs b/starlark-rust/starlark/src/tests/before_stmt.rs index cfe0ce79388b1..36064c34115cf 100644 --- a/starlark-rust/starlark/src/tests/before_stmt.rs +++ b/starlark-rust/starlark/src/tests/before_stmt.rs @@ -29,7 +29,7 @@ fn before_stmt() { let module = Module::new(); let globals = Globals::new(); let counter = Cell::new(0); - let before_stmt = |_span: FileSpanRef, _eval: &mut Evaluator<'_, '_>| { + let before_stmt = |_span: FileSpanRef, _eval: &mut Evaluator<'_, '_, '_>| { counter.set(counter.get() + 1); }; @@ -43,7 +43,7 @@ def f(): # 1 + 1 return x + 1 # 3 f() # 2 + 1 "; - let ast = AstModule::parse("a.star", program.to_owned(), &Dialect::Extended).unwrap(); + let ast = AstModule::parse("a.star", program.to_owned(), &Dialect::AllOptionsInternal).unwrap(); evaluator.eval_module(ast, &globals).unwrap(); assert_eq!(7, counter.get()); } diff --git a/starlark-rust/starlark/src/tests/call.rs b/starlark-rust/starlark/src/tests/call.rs index 90f36921d4cab..bde9862c46a69 100644 --- a/starlark-rust/starlark/src/tests/call.rs +++ b/starlark-rust/starlark/src/tests/call.rs @@ -72,10 +72,10 @@ def rec6(): rec2() assert::is_true("NAME=True\ndef f(*args, pkg=NAME, **kwargs): return pkg\nf()"); assert::is_true("def f(*args, pkg=False, **kwargs): return pkg\nf(pkg=True)"); assert::is_true("def f(a, b=1, *args, c=False): return c\nf(a=1,c=True)"); - assert::fail("def f(a, **kwargs, b=1): pass", "Default parameter after"); + assert::fail("def f(a, **kwargs, b=1): pass", "Parameter after kwargs"); assert::fail( "def f(a, b=1, **kwargs, c=1): pass", - "Default parameter after", + "Parameter after kwargs", ); assert::fail("def f(a, **kwargs, *args): pass", "parameter after another"); } @@ -240,7 +240,44 @@ def f(x, *, y): pass noop(f)(1) "#, - "Missing parameter `y`", + "Missing named-only parameter `y`", + ); +} + +#[test] +fn test_non_optional_after_optional() { + assert::pass( + r#" +def f(*args, x, y = 42, z): + return (args, x, y, z) +assert_eq(f(x = 1, z = 3), ((), 1, 42, 3)) +assert_eq(f(2, 4, y = 7, x = 1, z = 3), ((2, 4), 1, 7, 3)) +"#, + ); +} + +#[test] +fn test_pos_only_pass() { + assert::pass( + r#" +def f(x, /, y): + return x, y +assert_eq((1, 2), f(1, y=2)) +"#, + ); +} + +#[test] +fn test_pos_only_fail() { + assert::fail( + r#" +def f(x, /, y): + return x, y +g = noop(f) # Hide from static type checker. +g(x=1, y=2) +"#, + // TODO(nga): bad message. + "Missing positional-only parameter `x` for call", ); } @@ -249,6 +286,7 @@ noop(f)(1) #[cfg_attr(rust_nightly, cfg(not(sanitize = "address")))] #[test] fn test_frame_size() { + use starlark::values::list_or_tuple::UnpackListOrTuple; use starlark_derive::starlark_module; use crate as starlark; @@ -258,7 +296,7 @@ fn test_frame_size() { #[starlark_module] fn natives(builder: &mut GlobalsBuilder) { - fn stack_ptr(args: Vec) -> anyhow::Result { + fn stack_ptr(args: UnpackListOrTuple) -> anyhow::Result { drop(args); let x = std::hint::black_box(1); @@ -282,8 +320,12 @@ G_F_PTR = g([]) let mut a = Assert::new(); a.globals_add(natives); let module = a.pass_module(program); - let one = usize::unpack_value(module.get("F_PTR").unwrap().value()).unwrap(); - let two = usize::unpack_value(module.get("G_F_PTR").unwrap().value()).unwrap(); + let one = usize::unpack_value(module.get("F_PTR").unwrap().value()) + .unwrap() + .unwrap(); + let two = usize::unpack_value(module.get("G_F_PTR").unwrap().value()) + .unwrap() + .unwrap(); assert!( two < one, "stack grows down everywhere we support starlark-rust" diff --git a/starlark-rust/starlark/src/tests/def.rs b/starlark-rust/starlark/src/tests/def.rs index d3484e776f4f8..dc2700c82175b 100644 --- a/starlark-rust/starlark/src/tests/def.rs +++ b/starlark-rust/starlark/src/tests/def.rs @@ -187,10 +187,17 @@ fn test_context_captured() { #[test] fn test_lambda_errors() { - // Test from https://github.com/facebookexperimental/starlark-rust/issues/36 + // Test from https://github.com/facebook/starlark-rust/issues/36 assert::fail("lambda a,a:a", "duplicated parameter name"); } +#[test] +fn test_lambda_errors_nested() { + // Test from https://issues.oss-fuzz.com/issues/369003809 + assert::fail("lambda: lambda a,a:a", "duplicated parameter name"); + assert::fail("[lambda a,a:a]", "duplicated parameter name"); +} + #[test] fn test_double_capture_and_freeze() { let mut a = Assert::new(); diff --git a/starlark-rust/starlark/src/tests/derive.rs b/starlark-rust/starlark/src/tests/derive.rs new file mode 100644 index 0000000000000..853d37631ba3d --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive.rs @@ -0,0 +1,25 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod alloc_value; +mod attrs; +mod docs; +mod freeze; +mod module; +mod trace; +mod unpack_value; +mod unpack_value_attr; diff --git a/starlark-rust/starlark/src/tests/derive/alloc_value.rs b/starlark-rust/starlark/src/tests/derive/alloc_value.rs new file mode 100644 index 0000000000000..7dc44516b94d8 --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/alloc_value.rs @@ -0,0 +1,45 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Tests for `#[derive(AllocValue)]`. + +#![allow(dead_code)] // Only check it compiles. + +use starlark_derive::AllocFrozenValue; + +use crate as starlark; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::AllocValue; + +#[derive(StarlarkTypeRepr, AllocValue, AllocFrozenValue)] +enum AllocNoVariant {} + +#[derive(StarlarkTypeRepr, AllocValue, AllocFrozenValue)] +enum AllocOneVariant { + Int(u32), +} + +#[derive(StarlarkTypeRepr, AllocValue, AllocFrozenValue)] +enum AllocTwoVariants { + Int(u32), + String(String), +} + +#[derive(StarlarkTypeRepr, AllocValue, AllocFrozenValue)] +enum AllocWithLifetime<'v> { + String(&'v str), +} diff --git a/starlark-rust/starlark/src/tests/derive/attrs.rs b/starlark-rust/starlark/src/tests/derive/attrs.rs index 13a1b3d11e0ba..3c3ff78405e90 100644 --- a/starlark-rust/starlark/src/tests/derive/attrs.rs +++ b/starlark-rust/starlark/src/tests/derive/attrs.rs @@ -38,7 +38,7 @@ fn test_derive_attrs() { NoSerialize, Allocative )] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct Example { hello: String, #[starlark(skip)] @@ -64,7 +64,7 @@ fn test_derive_attrs() { NoSerialize, Allocative )] - #[display(fmt = "{}", foo)] + #[display("{}", foo)] struct Nested { foo: String, } diff --git a/starlark-rust/starlark/src/tests/derive/docs.rs b/starlark-rust/starlark/src/tests/derive/docs.rs index d7313c688c639..50a46404f8d90 100644 --- a/starlark-rust/starlark/src/tests/derive/docs.rs +++ b/starlark-rust/starlark/src/tests/derive/docs.rs @@ -17,24 +17,21 @@ use allocative::Allocative; use derive_more::Display; -use maplit::hashmap; use serde::Serialize; use serde::Serializer; use starlark_derive::starlark_module; use starlark_derive::starlark_value; use starlark_derive::Freeze; use starlark_derive::NoSerialize; -use starlark_derive::StarlarkDocs; use starlark_derive::Trace; use crate as starlark; use crate::any::ProvidesStaticType; use crate::coerce::Coerce; -use crate::docs::get_registered_starlark_docs; -use crate::docs::DocItem; use crate::docs::DocMember; use crate::docs::DocString; use crate::docs::DocStringKind; +use crate::docs::DocType; use crate::environment::Methods; use crate::environment::MethodsBuilder; use crate::environment::MethodsStatic; @@ -42,7 +39,6 @@ use crate::starlark_complex_value; use crate::starlark_simple_value; use crate::values::StarlarkValue; use crate::values::ValueLike; -use crate::wasm::is_wasm; /// Main module docs #[starlark_module] @@ -54,14 +50,7 @@ fn object_docs_1(_: &mut MethodsBuilder) { } } -#[derive( - Debug, - Display, - ProvidesStaticType, - NoSerialize, - StarlarkDocs, - Allocative -)] +#[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] struct TestExample {} starlark_simple_value!(TestExample); @@ -85,7 +74,6 @@ impl<'v> StarlarkValue<'v> for TestExample { Trace, Freeze, ProvidesStaticType, - StarlarkDocs, Allocative )] #[repr(C)] @@ -106,8 +94,7 @@ where starlark_complex_value!(ComplexTestExample); #[starlark_value(type = "ComplexTestExample")] -impl<'v, T: ValueLike<'v> + 'v + ProvidesStaticType<'v>> StarlarkValue<'v> - for ComplexTestExampleGen +impl<'v, T: ValueLike<'v> + ProvidesStaticType<'v>> StarlarkValue<'v> for ComplexTestExampleGen where Self: ProvidesStaticType<'v>, { @@ -122,19 +109,7 @@ where #[test] fn test_derive_docs() { - if is_wasm() { - // `inventory` doesn't work on wasm. - return; - } - - let docs = get_registered_starlark_docs() - .into_iter() - .find(|d| d.id.name == "TestExample") - .unwrap(); - let obj = match docs.item { - DocItem::Object(o) => o, - _ => panic!("Expected object as docitem"), - }; + let obj = DocType::from_starlark_value::(); assert_eq!( DocString::from_docstring(DocStringKind::Rust, "Main module docs"), @@ -150,24 +125,11 @@ fn test_derive_docs() { }) .unwrap() ); - assert!(docs.custom_attrs.is_empty()); } #[test] fn test_derive_docs_on_complex_values() { - if is_wasm() { - // `inventory` doesn't work on wasm. - return; - } - - let complex_docs = get_registered_starlark_docs() - .into_iter() - .find(|d| d.id.name == "ComplexTestExample") - .unwrap(); - let complex_obj = match complex_docs.item { - DocItem::Object(o) => o, - _ => panic!("Expected object as docitem"), - }; + let complex_obj = DocType::from_starlark_value::(); assert_eq!( DocString::from_docstring(DocStringKind::Rust, "Main module docs"), @@ -184,51 +146,4 @@ fn test_derive_docs_on_complex_values() { }) .unwrap() ); - assert!(complex_docs.custom_attrs.is_empty()); -} - -/// Main module docs -#[starlark_module] -fn object_docs_2(_: &mut MethodsBuilder) {} - -#[derive( - Debug, - Display, - ProvidesStaticType, - NoSerialize, - StarlarkDocs, - Allocative -)] -#[starlark_docs(key = "value", key2 = "value2")] -struct TestAttrExample {} - -starlark_simple_value!(TestAttrExample); - -#[starlark_value(type = "TestAttrExample")] -impl<'v> StarlarkValue<'v> for TestAttrExample { - fn get_methods() -> Option<&'static Methods> - where - Self: Sized, - { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(object_docs_2) - } -} - -#[test] -fn test_derive_docs_custom_attrs() { - if is_wasm() { - // `inventory` doesn't work on wasm. - return; - } - - let docs = get_registered_starlark_docs() - .into_iter() - .find(|d| d.id.name == "TestAttrExample") - .unwrap(); - let expected_attrs = hashmap! { - "key".to_owned()=> "value".to_owned(), - "key2".to_owned()=> "value2".to_owned(), - }; - assert_eq!(expected_attrs, docs.custom_attrs); } diff --git a/starlark-rust/starlark/src/tests/derive/freeze.rs b/starlark-rust/starlark/src/tests/derive/freeze.rs new file mode 100644 index 0000000000000..f0a271fb7b71a --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/freeze.rs @@ -0,0 +1,23 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod basic; +mod bounds; +mod enums; +mod identity; +mod validator; +mod validator_order; diff --git a/starlark-rust/starlark/src/tests/derive/freeze/basic.rs b/starlark-rust/starlark/src/tests/derive/freeze/basic.rs index b6390341d99aa..b2b9a908a3f43 100644 --- a/starlark-rust/starlark/src/tests/derive/freeze/basic.rs +++ b/starlark-rust/starlark/src/tests/derive/freeze/basic.rs @@ -17,37 +17,7 @@ use crate as starlark; use crate::values::Freeze; -use crate::values::Freezer; -use crate::values::FrozenHeap; - -#[derive(Freeze)] -struct TestStruct { - s: String, - #[freeze(identity)] - s2: String, -} - -#[derive(Freeze)] -struct TestAnonStruct(String, #[freeze(identity)] String); #[derive(Freeze)] +#[allow(dead_code)] struct TestUnitStruct; - -#[test] -fn test_struct() -> anyhow::Result<()> { - let t = TestStruct { - s: "test".to_owned(), - s2: "test2".to_owned(), - }; - let freezer = Freezer::new(FrozenHeap::new()); - t.freeze(&freezer)?; - Ok(()) -} - -#[test] -fn test_anon_struct() -> anyhow::Result<()> { - let t = TestAnonStruct("test".to_owned(), "test2".to_owned()); - let freezer = Freezer::new(FrozenHeap::new()); - t.freeze(&freezer)?; - Ok(()) -} diff --git a/starlark-rust/starlark/src/tests/derive/freeze/identity.rs b/starlark-rust/starlark/src/tests/derive/freeze/identity.rs new file mode 100644 index 0000000000000..5f0e09ab28d49 --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/freeze/identity.rs @@ -0,0 +1,61 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![allow(dead_code)] + +use starlark::values::Freeze; + +use crate as starlark; +use crate::values::Freezer; +use crate::values::FrozenHeap; + +struct NonFreeze(u32); + +#[derive(Freeze)] +struct TestStruct { + s: String, + #[freeze(identity)] + s2: NonFreeze, +} + +#[derive(Freeze)] +struct TestUnitStruct(String, #[freeze(identity)] NonFreeze); + +#[derive(Freeze)] +enum TestEnum { + A(String), + B(#[freeze(identity)] NonFreeze), +} + +#[test] +fn test_struct() -> anyhow::Result<()> { + let t = TestStruct { + s: "test".to_owned(), + s2: NonFreeze(55), + }; + let freezer = Freezer::new(FrozenHeap::new()); + t.freeze(&freezer)?; + Ok(()) +} + +#[test] +fn test_anon_struct() -> anyhow::Result<()> { + let t = TestUnitStruct("test".to_owned(), NonFreeze(56)); + let freezer = Freezer::new(FrozenHeap::new()); + t.freeze(&freezer)?; + Ok(()) +} diff --git a/starlark-rust/starlark/src/tests/derive/freeze/mod.rs b/starlark-rust/starlark/src/tests/derive/freeze/mod.rs deleted file mode 100644 index 577057aa5cffc..0000000000000 --- a/starlark-rust/starlark/src/tests/derive/freeze/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod basic; -mod bounds; -mod enums; -mod validator; -mod validator_order; diff --git a/starlark-rust/starlark/src/tests/derive/mod.rs b/starlark-rust/starlark/src/tests/derive/mod.rs deleted file mode 100644 index d1451ce55d3e8..0000000000000 --- a/starlark-rust/starlark/src/tests/derive/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod attrs; -mod docs; -mod freeze; -mod module; -mod trace; -mod unpack_value; -mod unpack_value_attr; diff --git a/starlark-rust/starlark/src/tests/derive/module.rs b/starlark-rust/starlark/src/tests/derive/module.rs new file mode 100644 index 0000000000000..b8b77f5babce0 --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/module.rs @@ -0,0 +1,27 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod basic; +mod default_value; +mod kwargs; +mod methods; +mod named_positional; +mod other_attributes; +mod return_impl; +mod special_params; +mod type_annotation; +mod unpack_value; diff --git a/starlark-rust/starlark/src/tests/derive/module/basic.rs b/starlark-rust/starlark/src/tests/derive/module/basic.rs index e01e353e7d4a6..0c7132b8f532b 100644 --- a/starlark-rust/starlark/src/tests/derive/module/basic.rs +++ b/starlark-rust/starlark/src/tests/derive/module/basic.rs @@ -21,18 +21,20 @@ use crate as starlark; use crate::assert::Assert; use crate::environment::GlobalsBuilder; use crate::environment::MethodsBuilder; +use crate::values::list_or_tuple::UnpackListOrTuple; use crate::values::Heap; use crate::values::StringValue; use crate::values::Value; +use crate::values::ValueOfUnchecked; // The examples from the starlark_module documentation. #[test] fn test_starlark_module() { #[starlark_module] fn global(builder: &mut GlobalsBuilder) { - fn cc_binary(name: &str, srcs: Vec<&str>) -> anyhow::Result { + fn cc_binary(name: &str, srcs: UnpackListOrTuple<&str>) -> anyhow::Result { // real implementation may write it to a global variable - Ok(format!("{:?} {:?}", name, srcs)) + Ok(format!("{:?} {:?}", name, srcs.items)) } } @@ -60,3 +62,15 @@ fn test_starlark_methods() { MethodsBuilder::new().with(methods).build(); } + +#[test] +fn test_static_allowed() { + #[starlark_module] + fn globals(globals: &mut GlobalsBuilder) { + fn test<'v>() -> anyhow::Result> { + panic!() + } + } + + GlobalsBuilder::standard().with(globals).build(); +} diff --git a/starlark-rust/starlark/src/tests/derive/module/kwargs.rs b/starlark-rust/starlark/src/tests/derive/module/kwargs.rs new file mode 100644 index 0000000000000..9d5bca74715f1 --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/module/kwargs.rs @@ -0,0 +1,56 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; +use starlark_map::small_map::SmallMap; + +use crate as starlark; +use crate::assert::Assert; +use crate::environment::GlobalsBuilder; + +#[starlark_module] +fn test_kwargs_module(globals: &mut GlobalsBuilder) { + fn pos_kwargs( + #[starlark(require = pos)] a: u32, + #[starlark(require = pos)] b: bool, + #[starlark(kwargs)] kwargs: SmallMap, + ) -> anyhow::Result { + Ok(format!("a={} b={} kwargs={:?}", a, b, kwargs)) + } + + fn pos_named_kwargs( + #[starlark(require = pos)] a: u32, + #[starlark(require = named)] b: bool, + #[starlark(kwargs)] kwargs: SmallMap, + ) -> anyhow::Result { + Ok(format!("a={} b={} kwargs={:?}", a, b, kwargs)) + } +} + +#[test] +fn test_kwargs() { + let mut a = Assert::new(); + a.globals_add(test_kwargs_module); + a.eq( + r#"'a=1 b=true kwargs={"x": 3}'"#, + "pos_kwargs(1, True, x=3)", + ); + a.eq( + r#"'a=1 b=true kwargs={"x": 3}'"#, + "pos_named_kwargs(1, b=True, x=3)", + ); +} diff --git a/starlark-rust/starlark/src/tests/derive/module/methods.rs b/starlark-rust/starlark/src/tests/derive/module/methods.rs index c9c3c17a97614..4f7ee5c20b1df 100644 --- a/starlark-rust/starlark/src/tests/derive/module/methods.rs +++ b/starlark-rust/starlark/src/tests/derive/module/methods.rs @@ -40,7 +40,7 @@ use crate::values::ValueLike; NoSerialize, Allocative )] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] struct Applaud { value: i32, } diff --git a/starlark-rust/starlark/src/tests/derive/module/mod.rs b/starlark-rust/starlark/src/tests/derive/module/mod.rs deleted file mode 100644 index f852164490803..0000000000000 --- a/starlark-rust/starlark/src/tests/derive/module/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod basic; -mod default_value; -mod methods; -mod named_positional; -mod return_impl; -mod special_params; -mod type_annotation; -mod unpack_value; diff --git a/starlark-rust/starlark/src/tests/derive/module/named_positional.rs b/starlark-rust/starlark/src/tests/derive/module/named_positional.rs index 014ef37ba784c..810779acaf277 100644 --- a/starlark-rust/starlark/src/tests/derive/module/named_positional.rs +++ b/starlark-rust/starlark/src/tests/derive/module/named_positional.rs @@ -20,6 +20,7 @@ use starlark_derive::starlark_module; use crate as starlark; use crate::assert::Assert; use crate::environment::GlobalsBuilder; +use crate::values::tuple::UnpackTuple; #[starlark_module] fn named_positional_functions(globals: &mut GlobalsBuilder) { @@ -35,16 +36,19 @@ fn named_positional_functions(globals: &mut GlobalsBuilder) { Ok(x) } - fn named_after_args(#[starlark(args)] star_args: Vec, x: i32) -> anyhow::Result { - Ok(x + star_args.iter().sum::()) + fn named_after_args( + #[starlark(args)] star_args: UnpackTuple, + x: i32, + ) -> anyhow::Result { + Ok(x + star_args.items.iter().sum::()) } // Same as above, but with explicit redundant annotation. fn named_after_args_explicitly_marked( - #[starlark(args)] args: Vec, + #[starlark(args)] args: UnpackTuple, #[starlark(require = named)] x: i32, ) -> anyhow::Result { - Ok(x + args.iter().sum::()) + Ok(x + args.items.iter().sum::()) } } @@ -69,7 +73,7 @@ fn test_named_only() { let mut a = Assert::new(); a.globals_add(named_positional_functions); a.eq("31", "named_only(x=31)"); - a.fail("noop(named_only)(37)", "Missing parameter"); + a.fail("noop(named_only)(37)", "Missing named-only parameter"); } #[test] @@ -77,7 +81,10 @@ fn test_named_after_args() { let mut a = Assert::new(); a.globals_add(named_positional_functions); a.eq("13", "named_after_args(1, 2, x=10)"); - a.fail("noop(named_after_args)(1, 2, 3)", "Missing parameter"); + a.fail( + "noop(named_after_args)(1, 2, 3)", + "Missing named-only parameter", + ); } #[test] @@ -87,6 +94,6 @@ fn test_named_after_args_explicitly_marked() { a.eq("13", "named_after_args_explicitly_marked(1, 2, x=10)"); a.fail( "noop(named_after_args_explicitly_marked)(1, 2, 3)", - "Missing parameter", + "Missing named-only parameter", ); } diff --git a/starlark-rust/starlark/src/tests/derive/module/other_attributes.rs b/starlark-rust/starlark/src/tests/derive/module/other_attributes.rs new file mode 100644 index 0000000000000..0744aa46b7193 --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/module/other_attributes.rs @@ -0,0 +1,51 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! In these tests, parameters are declared as unused, +//! attributes should be preserved and no warnings should be emitted. + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::environment::MethodsBuilder; +use crate::values::none::NoneType; + +#[starlark_module] +fn test_other_attributes_in_globals(globals: &mut GlobalsBuilder) { + fn test_global(#[allow(unused_variables)] foo: u32) -> anyhow::Result { + Ok(NoneType) + } +} + +#[starlark_module] +fn test_other_attributes_in_methods(methods: &mut MethodsBuilder) { + fn test_method(#[allow(unused_variables)] this: u32) -> anyhow::Result { + Ok(NoneType) + } +} + +#[starlark_module] +fn test_other_attributes_in_atributes(methods: &mut MethodsBuilder) { + #[starlark(attribute)] + fn test_attribute( + // TODO(nga): this marker is no-op. + #[allow(unused_variables)] this: u32, + ) -> anyhow::Result { + Ok(NoneType) + } +} diff --git a/starlark-rust/starlark/src/tests/derive/module/type_annotation.rs b/starlark-rust/starlark/src/tests/derive/module/type_annotation.rs index 6848b8e586d42..50114d36069f3 100644 --- a/starlark-rust/starlark/src/tests/derive/module/type_annotation.rs +++ b/starlark-rust/starlark/src/tests/derive/module/type_annotation.rs @@ -33,7 +33,7 @@ use crate::values::StarlarkValue; NoSerialize, Allocative )] -#[display(fmt = "foo")] +#[display("foo")] struct Foo; #[starlark_value(type = "Foo")] diff --git a/starlark-rust/starlark/src/tests/derive/module/unpack_value.rs b/starlark-rust/starlark/src/tests/derive/module/unpack_value.rs index 37cf1e6330051..b25db88e0157e 100644 --- a/starlark-rust/starlark/src/tests/derive/module/unpack_value.rs +++ b/starlark-rust/starlark/src/tests/derive/module/unpack_value.rs @@ -22,9 +22,8 @@ use starlark_derive::starlark_module; use crate as starlark; use crate::assert::Assert; use crate::environment::GlobalsBuilder; -use crate::values::dict::DictOf; -use crate::values::list::ListOf; -use crate::values::structs::StructOf; +use crate::values::dict::UnpackDictEntries; +use crate::values::list::UnpackList; use crate::values::Value; use crate::values::ValueOf; @@ -34,82 +33,85 @@ fn validate_module(builder: &mut GlobalsBuilder) { fn with_int<'v>(v: ValueOf<'v, i32>) -> anyhow::Result<(Value<'v>, String)> { Ok((*v, format!("{}", v.typed))) } - fn with_int_list<'v>(v: ListOf<'v, i32>) -> anyhow::Result<(Value<'v>, String)> { - let repr = v.to_vec().iter().join(", "); - Ok((*v, repr)) + fn with_int_list<'v>(v: ValueOf<'v, UnpackList>) -> anyhow::Result<(Value<'v>, String)> { + let repr = v.typed.items.iter().join(", "); + Ok((v.value, repr)) } - fn with_list_list<'v>(v: ListOf<'v, ListOf<'v, i32>>) -> anyhow::Result<(Value<'v>, String)> { + fn with_list_list<'v>( + v: ValueOf<'v, UnpackList>>>, + ) -> anyhow::Result<(Value<'v>, String)> { let repr = v - .to_vec() + .typed + .items .iter() - .map(|l| l.to_vec().iter().join(", ")) + .map(|l| l.typed.items.iter().join(", ")) .join(" + "); - Ok((*v, repr)) + Ok((v.value, repr)) } fn with_dict_list<'v>( - v: ListOf<'v, DictOf<'v, i32, i32>>, + v: ValueOf<'v, UnpackList>>, ) -> anyhow::Result<(Value<'v>, String)> { let repr = v - .to_vec() + .typed + .items .iter() .map(|l| { - l.to_dict() + l.entries .iter() .map(|(k, v)| format!("{}: {}", k, v)) .join(", ") }) .join(" + "); - Ok((*v, repr)) + Ok((v.value, repr)) } - fn with_int_dict<'v>(v: DictOf<'v, i32, i32>) -> anyhow::Result<(Value<'v>, String)> { + fn with_int_dict<'v>( + v: ValueOf<'v, UnpackDictEntries>, + ) -> anyhow::Result<(Value<'v>, String)> { let repr = v - .to_dict() + .typed + .entries .iter() .map(|(k, v)| format!("{}: {}", k, v)) .join(" + "); - Ok((*v, repr)) + Ok((v.value, repr)) } fn with_list_dict<'v>( - v: DictOf<'v, i32, ListOf<'v, i32>>, + v: ValueOf<'v, UnpackDictEntries>>>, ) -> anyhow::Result<(Value<'v>, String)> { let repr = v - .to_dict() + .typed + .entries .iter() - .map(|(k, v)| format!("{}: {}", k, v.to_vec().iter().join(", "))) + .map(|(k, v)| format!("{}: {}", k, v.typed.items.iter().join(", "))) .join(" + "); - Ok((*v, repr)) + Ok((v.value, repr)) } fn with_dict_dict<'v>( - v: DictOf<'v, i32, DictOf<'v, i32, i32>>, + v: ValueOf<'v, UnpackDictEntries>>, ) -> anyhow::Result<(Value<'v>, String)> { let repr = v - .to_dict() + .typed + .entries .iter() .map(|(k, v)| { let inner_repr = v - .to_dict() + .entries .iter() .map(|(k2, v2)| format!("{}:{}", k2, v2)) .join(", "); format!("{}: {}", k, inner_repr) }) .join(" + "); - Ok((*v, repr)) - } - fn with_struct_int<'v>(v: StructOf<'v, i32>) -> anyhow::Result<(Value<'v>, String)> { - let repr = v - .to_map() - .iter() - .map(|(k, v)| format!("{}={}", k, v)) - .join(" + "); - Ok((v.to_value(), repr)) + Ok((v.value, repr)) } - fn with_either(v: Either>>) -> anyhow::Result { + fn with_either<'v>( + v: Either>>>, + ) -> anyhow::Result { match v { Either::Left(i) => Ok(i.to_string()), Either::Right(nested) => match nested { Either::Left(s) => Ok(s), - Either::Right(l) => Ok(l.to_repr()), + Either::Right(l) => Ok(l.value.to_repr()), }, } } @@ -163,14 +165,6 @@ fn test_dict_of() { a.eq(expected, test); } -#[test] -fn test_struct_of() { - let mut a = Assert::new(); - a.globals_add(validate_module); - a.eq("(struct(a=1), '\"a\"=1')", "with_struct_int(struct(a=1))"); - a.fail("with_struct_int(struct(a=True))", BAD); -} - #[test] fn test_either_of() { let mut a = Assert::new(); diff --git a/starlark-rust/starlark/src/tests/derive/trace.rs b/starlark-rust/starlark/src/tests/derive/trace.rs new file mode 100644 index 0000000000000..b0f5e73620c66 --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/trace.rs @@ -0,0 +1,20 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod bounds; +mod enums; +mod statics; diff --git a/starlark-rust/starlark/src/tests/derive/trace/bounds.rs b/starlark-rust/starlark/src/tests/derive/trace/bounds.rs new file mode 100644 index 0000000000000..e97f2df71f8ad --- /dev/null +++ b/starlark-rust/starlark/src/tests/derive/trace/bounds.rs @@ -0,0 +1,37 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![allow(dead_code)] + +use crate as starlark; +use crate::values::Trace; + +#[derive(Trace)] +#[trace(bound = "A: Trace<'v>, B: 'static")] +struct TestTraceWithBounds { + a: A, + #[trace(static)] + b: B, +} + +struct NotTrace; + +fn assert_trace<'v, T: Trace<'v>>() {} + +fn test() { + assert_trace::>(); +} diff --git a/starlark-rust/starlark/src/tests/derive/trace/mod.rs b/starlark-rust/starlark/src/tests/derive/trace/mod.rs deleted file mode 100644 index 0153604f4c0b3..0000000000000 --- a/starlark-rust/starlark/src/tests/derive/trace/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod enums; -mod statics; diff --git a/starlark-rust/starlark/src/tests/derive/trace/statics.rs b/starlark-rust/starlark/src/tests/derive/trace/statics.rs index a112f98c06d29..9fb17b2d78a46 100644 --- a/starlark-rust/starlark/src/tests/derive/trace/statics.rs +++ b/starlark-rust/starlark/src/tests/derive/trace/statics.rs @@ -15,6 +15,8 @@ * limitations under the License. */ +use std::fmt::Display; + use starlark_derive::Trace; use crate as starlark; @@ -26,7 +28,11 @@ struct TraceWithStatic<'v> { actual_value: Value<'v>, // This field doesn't have a Trace trait, but should be ignored // because it looks like it is static - ignored_because_static: StaticType<'static, std::string::String>, + ignored_because_static: StaticType<'static, String>, + ignored_because_static_in_dyn: Box, + ignored_because_static_in_dyn_with_static_bound: Box, + #[trace(static)] // This is no-op, because it is inferred automatically. + explicit_static: String, } #[allow(dead_code)] diff --git a/starlark-rust/starlark/src/tests/derive/unpack_value.rs b/starlark-rust/starlark/src/tests/derive/unpack_value.rs index d600ac6a0202e..e92f470e55548 100644 --- a/starlark-rust/starlark/src/tests/derive/unpack_value.rs +++ b/starlark-rust/starlark/src/tests/derive/unpack_value.rs @@ -68,25 +68,25 @@ fn test_starlark_type_repr() { fn test_unpack_value() { assert_eq!( Some(JustInt::Int(17)), - JustInt::unpack_value(Value::testing_new_int(17)), + JustInt::unpack_value(Value::testing_new_int(17)).unwrap(), ); assert_eq!( Some(IntOrStr::Int(19)), - IntOrStr::unpack_value(Value::testing_new_int(19)), + IntOrStr::unpack_value(Value::testing_new_int(19)).unwrap(), ); assert_eq!( Some(IntOrStr::Str("abc".to_owned())), - IntOrStr::unpack_value(const_frozen_string!("abc").to_value()), + IntOrStr::unpack_value(const_frozen_string!("abc").to_value()).unwrap(), ); assert_eq!( Some(WithLifetime::Int(23)), - WithLifetime::unpack_value(Value::testing_new_int(23)), + WithLifetime::unpack_value(Value::testing_new_int(23)).unwrap(), ); assert_eq!( Some(WithLifetime::Str("def")), - WithLifetime::unpack_value(const_frozen_string!("def").to_value()), + WithLifetime::unpack_value(const_frozen_string!("def").to_value()).unwrap(), ); } diff --git a/starlark-rust/starlark/src/tests/derive/unpack_value_attr.rs b/starlark-rust/starlark/src/tests/derive/unpack_value_attr.rs index b8f4875bf6fab..e3832f88d8e2b 100644 --- a/starlark-rust/starlark/src/tests/derive/unpack_value_attr.rs +++ b/starlark-rust/starlark/src/tests/derive/unpack_value_attr.rs @@ -33,7 +33,7 @@ use crate::values::Value; ProvidesStaticType, Allocative )] -#[display(fmt = "ValueWithLifetimeParam")] +#[display("ValueWithLifetimeParam")] struct ValueWithLifetimeParam<'v>(Value<'v>); #[derive( Debug, @@ -42,7 +42,7 @@ struct ValueWithLifetimeParam<'v>(Value<'v>); ProvidesStaticType, Allocative )] -#[display(fmt = "ValueWithoutParam")] +#[display("ValueWithoutParam")] struct ValueWithoutParam(String); #[starlark_value(type = "ValueWithLifetimeParam", StarlarkTypeRepr, UnpackValue)] diff --git a/starlark-rust/starlark/src/tests/docs/golden.rs b/starlark-rust/starlark/src/tests/docs/golden.rs deleted file mode 100644 index 95b724e78685a..0000000000000 --- a/starlark-rust/starlark/src/tests/docs/golden.rs +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use starlark_syntax::golden_test_template::golden_test_template; - -use crate::docs::Doc; -use crate::docs::DocItem; -use crate::docs::MarkdownFlavor; -use crate::docs::RenderMarkdown; - -pub(crate) fn docs_golden_test(test_name: &str, doc: DocItem) -> String { - let output = Doc::named_item("name".to_owned(), doc).render_markdown(MarkdownFlavor::DocFile); - - golden_test_template( - &format!("src/tests/docs/golden/{test_name}.golden.md"), - &output, - ); - - output -} diff --git a/starlark-rust/starlark/src/tests/docs/golden/module.golden.md b/starlark-rust/starlark/src/tests/docs/golden/module.golden.md deleted file mode 100644 index d5e3d3816f79d..0000000000000 --- a/starlark-rust/starlark/src/tests/docs/golden/module.golden.md +++ /dev/null @@ -1,109 +0,0 @@ -# @generated -# To regenerate, run: -# ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests -# ``` - -# name - -These are where the module docs go - -## MAGIC - -```python -MAGIC: int -``` - ---- - -## func1 - -```python -def func1(foo: str) -> str -``` - -Docs for func1 - -#### Parameters - -* `foo`: Docs for foo - - -#### Returns - -The string 'func1' - ---- - -## func2 - -```python -def func2() -> str -``` - ---- - -## func3 - -```python -def func3( - a1: int, - a2: int = _, - step: int = 1, - / -) -> str -``` - -A function with only positional arguments. - -#### `.type` attribute - -Produces `"magic"` - -#### Details - -And a slightly longer description. With some example code: - -```python -func3(1) -``` - -And some assertions: - -```rust -1 == 1 -``` - ---- - -## notypes - -```python -def notypes(a) -``` - ---- - -## pos\_either\_named - -```python -def pos_either_named( - a: int, - /, - b: int, - *, - c: int -) -> None -``` - ---- - -## with\_defaults - -```python -def with_defaults( - explicit_default: list[str] = [], - hidden_default: list[str] = _, - string_default: str = "my_default" -) -> None -``` diff --git a/starlark-rust/starlark/src/tests/docs/mod.rs b/starlark-rust/starlark/src/tests/docs/mod.rs deleted file mode 100644 index 51a6e4352321f..0000000000000 --- a/starlark-rust/starlark/src/tests/docs/mod.rs +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use allocative::Allocative; -use derive_more::Display; -use serde::Serialize; -use starlark_derive::starlark_module; -use starlark_derive::starlark_value; -use starlark_derive::NoSerialize; - -use crate as starlark; -use crate::any::ProvidesStaticType; -use crate::assert; -use crate::docs::Doc; -use crate::docs::DocItem; -use crate::docs::MarkdownFlavor; -use crate::docs::RenderMarkdown; -use crate::environment::GlobalsBuilder; -use crate::environment::Methods; -use crate::environment::MethodsBuilder; -use crate::environment::MethodsStatic; -use crate::tests::docs::golden::docs_golden_test; -use crate::values::none::NoneType; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::Value; - -mod golden; -mod rustdocs; - -const STARLARK_CODE: &str = r#" -""" -This is the summary of the module's docs - -Some extra details can go here, - and indentation is kept as expected -""" - -def f1(a, b: str, c: int = 5, *, d: str = "some string", **kwargs) -> list[str]: - """ - Summary line goes here - - Args: - a: The docs for a - b: The docs for b - c: The docs for c, but these - go onto two lines - **kwargs: Docs for the keyword args - - Returns: - A string repr of the args - """ - return [str((a, b, c, d, repr(kwargs)))] - -def f2(a, *args: list[str]): - """ - This is a function with *args, and no return type - - Args: - *args: Only doc this arg - """ - return None - -def f3(a: str) -> str: - return a - -def f4(a: str) -> str: - """ This is a docstring with no 'Args:' section """ - return a - -# Not public, so shouldn't show up -def _do_not_export(): - pass -"#; - -#[derive( - Debug, - derive_more::Display, - ProvidesStaticType, - Allocative, - NoSerialize -)] -#[display(fmt = "magic")] -struct Magic; - -#[starlark_value(type = "magic")] -impl<'v> StarlarkValue<'v> for Magic {} - -/// These are where the module docs go -#[starlark_module] -fn module(builder: &mut GlobalsBuilder) { - const MAGIC: i32 = 42; - - /// Docs for func1 - /// - /// # Arguments - /// * `foo`: Docs for foo - /// - /// # Returns - /// The string 'func1' - fn func1(foo: String) -> anyhow::Result { - let _ignore = foo; - Ok("func1".to_owned()) - } - - fn func2() -> anyhow::Result { - Ok("func2".to_owned()) - } - - /// A function with only positional arguments. - /// - /// And a slightly longer description. With some example code: - /// - /// ```python - /// func3(1) - /// ``` - /// - /// And some assertions: - /// - /// ```rust - /// # starlark::assert::all_true(r#" - /// 1 == 1 - /// # "#); - /// ``` - #[starlark(as_type = Magic)] - fn func3( - #[starlark(require = pos)] a1: i32, - #[starlark(require = pos)] a2: Option, - #[starlark(require = pos, default = 1)] step: i32, - ) -> anyhow::Result { - let _ = (a1, a2, step); - Ok("func3".to_owned()) - } - - fn with_defaults<'v>( - #[starlark(default=Vec::new())] explicit_default: Vec, - hidden_default: Option>, - #[starlark(default = "my_default")] string_default: &str, - ) -> anyhow::Result { - let _unused = (explicit_default, hidden_default, string_default); - Ok(NoneType) - } - - fn pos_either_named( - #[starlark(require = pos)] a: i32, - b: i32, - #[starlark(require = named)] c: i32, - ) -> anyhow::Result { - let _unused = (a, b, c); - Ok(NoneType) - } - - fn notypes<'v>(a: Value<'v>) -> anyhow::Result> { - Ok(a) - } -} - -#[derive(ProvidesStaticType, Debug, Display, Allocative, Serialize)] -#[display(format = "obj")] -struct Obj; - -#[starlark_value(type = "obj")] -impl<'v> StarlarkValue<'v> for Obj { - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(object) - } -} - -/// These are where the module docs go -#[starlark_module] -fn object(builder: &mut MethodsBuilder) { - /// Docs for attr1 - #[starlark(attribute)] - fn attr1<'v>(this: Value<'v>) -> anyhow::Result { - Ok("attr1".to_owned()) - } - - #[starlark(attribute)] - fn attr2<'v>(this: Value<'v>) -> anyhow::Result { - Ok("attr2".to_owned()) - } - - /// Docs for func1 - /// - /// # Arguments - /// * `foo`: Docs for foo - /// - /// # Returns - /// The string 'func1' - fn func1<'v>(this: Value<'v>, foo: String) -> anyhow::Result { - let _ignore = (this, foo); - Ok("func1".to_owned()) - } - - fn func2<'v>(this: Value<'v>) -> anyhow::Result { - let _ = this; - Ok("func2".to_owned()) - } - - /// Needs to be escaped when rendered in markdown. - fn __exported__<'v>(this: Value<'v>) -> anyhow::Result { - let _ = this; - Ok(NoneType) - } -} - -#[test] -fn golden_docs_starlark() { - let res = docs_golden_test( - "starlark", - DocItem::Module(assert::pass_module(STARLARK_CODE).documentation()), - ); - assert!(!res.contains("_do_not_export")); -} - -#[test] -fn golden_docs_module() { - let res = docs_golden_test( - "module", - DocItem::Module(GlobalsBuilder::new().with(module).build().documentation()), - ); - assert!(!res.contains("starlark::assert::all_true")); - assert!(res.contains(r#"string_default: str = "my_default"#)); -} - -#[test] -fn golden_docs_object() { - let res = docs_golden_test("object", Obj.documentation().unwrap()); - assert!(res.contains(r#"name.\_\_exported\_\_"#)); -} - -#[test] -fn inner_object_functions_have_docs() { - let heap = Heap::new(); - let obj = heap.alloc_simple(Obj); - let item = obj - .get_attr("func1", &heap) - .unwrap() - .unwrap() - .documentation() - .unwrap(); - let res = Doc::named_item("func1".to_owned(), item).render_markdown(MarkdownFlavor::DocFile); - assert!(res.contains("Docs for func1")); -} - -#[test] -fn inner_module_functions_have_docs() { - let item = GlobalsBuilder::new() - .with(module) - .build() - .get("func1") - .unwrap() - .documentation() - .unwrap(); - let res = Doc::named_item("func1".to_owned(), item).render_markdown(MarkdownFlavor::DocFile); - assert!(res.contains("Docs for func1")) -} diff --git a/starlark-rust/starlark/src/tests/docs/rustdocs.rs b/starlark-rust/starlark/src/tests/docs/rustdocs.rs deleted file mode 100644 index e8971298bc880..0000000000000 --- a/starlark-rust/starlark/src/tests/docs/rustdocs.rs +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::collections::HashMap; - -use allocative::Allocative; -use starlark_derive::starlark_module; -use starlark_derive::starlark_value; -use starlark_derive::NoSerialize; -use starlark_derive::ProvidesStaticType; -use starlark_map::small_map::SmallMap; - -use crate as starlark; -use crate::assert::Assert; -use crate::docs::DocItem; -use crate::docs::DocMember; -use crate::environment::GlobalsBuilder; -use crate::eval::Arguments; -use crate::eval::Evaluator; -use crate::values::none::NoneType; -use crate::values::starlark_value_as_type::StarlarkValueAsType; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::StringValue; -use crate::values::Value; -use crate::values::ValueOfUnchecked; - -#[derive( - Debug, - derive_more::Display, - Allocative, - NoSerialize, - ProvidesStaticType -)] -#[display(fmt = "input")] -struct InputTypeRepr; -#[derive( - Debug, - derive_more::Display, - Allocative, - NoSerialize, - ProvidesStaticType -)] -#[display(fmt = "output")] -struct OutputTypeRepr; - -#[starlark_value(type = "input")] -impl<'v> StarlarkValue<'v> for InputTypeRepr {} - -#[starlark_value(type = "output")] -impl<'v> StarlarkValue<'v> for OutputTypeRepr {} - -#[starlark_module] -#[allow(unused_variables)] // Since this is for a test -fn globals(builder: &mut GlobalsBuilder) { - const Input: StarlarkValueAsType = StarlarkValueAsType::new(); - const Output: StarlarkValueAsType = StarlarkValueAsType::new(); - - fn simple( - arg_int: i32, - arg_bool: bool, - arg_vec: Vec<&str>, - arg_dict: SmallMap, - ) -> anyhow::Result { - unimplemented!() - } - - fn default_arg<'v>( - arg1: Option>, - #[starlark(default = NoneType)] arg2: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - unimplemented!() - } - - fn args_kwargs<'v>( - #[starlark(args)] args: Vec>, - #[starlark(kwargs)] kwargs: Value<'v>, - ) -> anyhow::Result { - unimplemented!() - } - - fn custom_types<'v>( - arg1: StringValue<'v>, - arg2: ValueOfUnchecked<'v, InputTypeRepr>, - heap: &'v Heap, - ) -> anyhow::Result> { - unimplemented!() - } - - fn pos_named(arg1: i32, #[starlark(require = named)] arg2: i32) -> anyhow::Result { - unimplemented!() - } - - fn with_arguments(args: &Arguments) -> anyhow::Result { - unimplemented!() - } -} - -/// Test that a Rust starlark_module produces the right documentation. -#[test] -fn test_rustdoc() { - let got = GlobalsBuilder::new().with(globals).build(); - let mut a = Assert::new(); - a.globals_add(globals); - let expected = a.pass_module(r#" -# @starlark-rust: allow_string_literals_in_type_expr - -def args_kwargs(*args, **kwargs: typing.Any) -> None: pass -def custom_types(arg1: str, arg2: Input) -> Output: pass -def default_arg(arg1 = "_", arg2: typing.Any = None) -> list[str]: pass -def pos_named(arg1: int, *, arg2: int) -> int: pass -def simple(arg_int: int, arg_bool: bool, arg_vec: list[str], arg_dict: dict[str, (bool, int)]) -> None: pass -def with_arguments(*args, **kwargs) -> int: pass -"#); - - fn unpack(x: DocItem) -> HashMap { - match x { - DocItem::Module(obj) => obj - .members - .into_iter() - .filter_map(|(name, member)| match member { - DocMember::Property(_) => None, - DocMember::Function(f) => Some((name, DocItem::Function(f))), - }) - .collect(), - _ => HashMap::new(), - } - } - - fn cleanup_types(x: &str) -> String { - x.replace("Some(Any)", "None") - .replace("\\\"_\\\"", "_") - // `ArcStr` debug differ. I don't know why this test exists. - .replace("(Static(", "(Arc(") - } - - let expected = expected.documentation().members; - let got = unpack(DocItem::Module(got.documentation())); - assert_eq!(expected.len(), got.len()); - for (name, expected1) in expected.iter() { - let got1 = got.get(name).unwrap(); - assert_eq!( - cleanup_types(&format!("{:?}", expected1)), - cleanup_types(&format!("{:?}", got1)), - "Function {}", - name - ); - } -} diff --git a/starlark-rust/starlark/src/tests/fstring.rs b/starlark-rust/starlark/src/tests/fstring.rs index 754a655d22980..98c2850b0e292 100644 --- a/starlark-rust/starlark/src/tests/fstring.rs +++ b/starlark-rust/starlark/src/tests/fstring.rs @@ -21,10 +21,7 @@ mod pass { fn assert() -> Assert<'static> { let mut a = Assert::new(); - a.dialect(&Dialect { - enable_f_strings: true, - ..Dialect::Extended - }); + a.dialect(&Dialect::AllOptionsInternal); a } @@ -102,21 +99,24 @@ f"{x}" == '("x",)' "#, ); } + + #[test] + fn conv() { + assert().is_true(r#"x = 'a'; f"{x}" == 'a'"#); + assert().is_true(r#"x = 'a'; f"{x!s}" == 'a'"#); + assert().is_true(r#"x = 'a'; f"{x!r}" == '"a"'"#); + } } mod fail { use starlark_syntax::golden_test_template::golden_test_template; - use crate::assert; use crate::assert::Assert; use crate::syntax::Dialect; - fn fstring_golden_test(test_name: &str, text: &str) { + fn fstring_golden_test_with_dialect(test_name: &str, text: &str, dialect: &Dialect) { let mut a = Assert::new(); - a.dialect(&Dialect { - enable_f_strings: true, - ..Dialect::Extended - }); + a.dialect(dialect); let err = a.fails(text, &[]); @@ -126,6 +126,10 @@ mod fail { ); } + fn fstring_golden_test(test_name: &str, text: &str) { + fstring_golden_test_with_dialect(test_name, text, &Dialect::AllOptionsInternal); + } + #[test] fn undeclared_variable() { fstring_golden_test("undeclared_variable", "f'foo {bar}'"); @@ -169,12 +173,6 @@ mod fail { #[test] fn not_enabled() { - // Default dialect does not enable fstrings. - let err = assert::fails("f'{foo}'", &[]); - - golden_test_template( - "src/tests/fstring/golden/not_enabled.err.golden.md", - &format!("{}", err), - ); + fstring_golden_test_with_dialect("not_enabled", "f'{foo}'", &Dialect::Standard); } } diff --git a/starlark-rust/starlark/src/tests/fstring/golden/escape.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/escape.err.golden.md index b33a31d14b1c9..722ec72fdba0e 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/escape.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/escape.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Not a valid identifier: `bar baz` diff --git a/starlark-rust/starlark/src/tests/fstring/golden/invalid_format.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/invalid_format.err.golden.md index 9f50c59f4b374..f3ad67c11b6f1 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/invalid_format.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/invalid_format.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Invalid format: Unmatched '{' in format string `foo {bar` diff --git a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier.err.golden.md index 7322d10c901b0..c264998ca44cd 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Not a valid identifier: `bar baz` diff --git a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_expression.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_expression.err.golden.md index e81bd30627303..e94bdb9d2b070 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_expression.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_expression.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Not a valid identifier: `bar[123]` diff --git a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_multiline.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_multiline.err.golden.md index 46b9e8f6a56cb..bbcff21146052 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_multiline.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_multiline.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Not a valid identifier: `bar baz` diff --git a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_raw.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_raw.err.golden.md index 838cd83947c05..63635bb9e72c8 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_raw.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_raw.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Not a valid identifier: `bar baz` diff --git a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_triple_quotes.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_triple_quotes.err.golden.md index a983256859c46..922f77d6359de 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_triple_quotes.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/invalid_identifier_triple_quotes.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Not a valid identifier: `bar baz` diff --git a/starlark-rust/starlark/src/tests/fstring/golden/not_enabled.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/not_enabled.err.golden.md index 3acf75b887b58..87fb1d245fffb 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/not_enabled.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/not_enabled.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Your Starlark dialect must enable f-strings to use them diff --git a/starlark-rust/starlark/src/tests/fstring/golden/undeclared_variable.err.golden.md b/starlark-rust/starlark/src/tests/fstring/golden/undeclared_variable.err.golden.md index 75f568c108919..b23ae8aeaa3bc 100644 --- a/starlark-rust/starlark/src/tests/fstring/golden/undeclared_variable.err.golden.md +++ b/starlark-rust/starlark/src/tests/fstring/golden/undeclared_variable.err.golden.md @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` error: Variable `bar` not found, did you mean `chr`? diff --git a/starlark-rust/starlark/src/tests/go.rs b/starlark-rust/starlark/src/tests/go.rs index 5637432d3ef2a..fa81606f013f7 100644 --- a/starlark-rust/starlark/src/tests/go.rs +++ b/starlark-rust/starlark/src/tests/go.rs @@ -98,6 +98,18 @@ fn test_go() { "Verify position of an \"unhashable key\"", // FIXME: we should do better ], ); + assert.conformance_except( + &ignore_bad_lines( + test_case!("set.star"), + &[ + "cannot insert into frozen hash table", // We don't actually have freeze + "cannot clear frozen hash table", + "discard: cannot delete from frozen hash table", + ], + ), + &[], + ); + assert.conformance(&ignore_bad_lines( test_case!("float.star"), &[ diff --git a/starlark-rust/starlark/src/tests/interop.rs b/starlark-rust/starlark/src/tests/interop.rs index 96b7dd73a4ebd..54048fc3e4ce0 100644 --- a/starlark-rust/starlark/src/tests/interop.rs +++ b/starlark-rust/starlark/src/tests/interop.rs @@ -84,8 +84,8 @@ fn test_export_as() { fn export_as( &self, variable_name: &str, - _eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result<()> { + _eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result<()> { self.named.try_export_as(variable_name); Ok(()) } @@ -135,8 +135,8 @@ fn test_load_symbols() { fn load_symbol<'v>( name: &str, value: Value<'v>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result { eval.set_module_variable_at_some_point(name, value)?; Ok(NoneType) } @@ -164,13 +164,13 @@ fn test_load_public_symbols_does_not_reexport() -> anyhow::Result<()> { #[test] // Test that we can express something that loads symbols into the exported module, // but not using the very dubious `set_module_variable_at_some_point`. -fn test_load_symbols_extra() -> anyhow::Result<()> { +fn test_load_symbols_extra() -> crate::Result<()> { #[starlark_module] fn module(builder: &mut GlobalsBuilder) { fn load_symbol<'v>( name: &str, value: Value<'v>, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { let extra = eval .module() @@ -192,7 +192,7 @@ fn test_load_symbols_extra() -> anyhow::Result<()> { NoSerialize, Allocative )] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct Extra<'v>(Arc>>>); #[starlark_value(type = "Extra")] @@ -204,7 +204,11 @@ fn test_load_symbols_extra() -> anyhow::Result<()> { let mut eval = Evaluator::new(&modu); modu.set_extra_value(eval.heap().alloc_complex_no_freeze(Extra::default())); eval.eval_module( - AstModule::parse("a", "load_symbol('x', 6*7)".to_owned(), &Dialect::Extended)?, + AstModule::parse( + "a", + "load_symbol('x', 6*7)".to_owned(), + &Dialect::AllOptionsInternal, + )?, &globals, )?; } @@ -224,7 +228,7 @@ fn test_load_symbols_extra() -> anyhow::Result<()> { #[test] fn test_repr_str() { #[derive(ProvidesStaticType, Debug, Display)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct Foo(Option); #[starlark_module] diff --git a/starlark-rust/starlark/src/tests/mod.rs b/starlark-rust/starlark/src/tests/mod.rs deleted file mode 100644 index 153a8cf54c43e..0000000000000 --- a/starlark-rust/starlark/src/tests/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod basic; -mod bc; -mod before_stmt; -mod call; -mod comprehension; -mod def; -mod derive; -mod docs; -mod for_loop; -mod freeze_access_value; -mod fstring; -mod go; -mod interop; -mod opt; -mod replace_binary; -mod runtime; -mod type_annot; -mod uncategorized; diff --git a/starlark-rust/starlark/src/tests/opt.rs b/starlark-rust/starlark/src/tests/opt.rs new file mode 100644 index 0000000000000..9e6252761e101 --- /dev/null +++ b/starlark-rust/starlark/src/tests/opt.rs @@ -0,0 +1,133 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Optimizer tests. + +mod constant_folding; +mod def_inline; +mod eq; +mod if_rand; +mod list_add; +mod speculative_exec; +mod type_is; +mod types; + +use crate::tests::bc::golden::bc_golden_test; + +#[test] +fn test_type_is_inlined() { + bc_golden_test( + "opt_type_is_inlined", + r#" +def is_list(x): + return type(x) == type([]) + +def test(x): + return is_list(x) + "#, + ) +} + +#[test] +fn test_private_forward_mutable_module_vars_inlined() { + bc_golden_test( + "opt_private_forward_mutable_module_vars_inlined", + r#" +def test(): + # Reference to module variable should be replaced with constant + return _private_forward_mutable + +_private_forward_mutable = {1: 2} +"#, + ); +} + +#[test] +fn test_same_module_struct_getattr_inlined() { + bc_golden_test( + "opt_same_module_struct_getattr_inlined", + r#" +def test(): + return _s.f + +_s = struct(f = 1) +"#, + ); +} + +#[test] +fn test_list_plus_list() { + bc_golden_test( + "opt_list_plus_list", + r#" +L = [1, 2] + +def test(): + return L + [1] +"#, + ); +} + +#[test] +fn test_empty_iterable_optimized_away() { + bc_golden_test( + "opt_empty_iterable_optimized_away", + r#" +L = [] +def test(): + for x in L: + print(x) +"#, + ); +} + +#[test] +fn test_unreachable_code_optimized_away() { + bc_golden_test( + "opt_unreachable_code_optimized_away", + r#" +def test(): + if True: + return + fail("unreachable") +"#, + ); +} + +#[test] +fn test_recursion() { + bc_golden_test( + "opt_recursion", + // Test inlining does not fail here. + "def test(): return test()", + ); +} + +#[test] +fn test_mutual_recursion() { + // Just check we do not enter an infinite recursion in the optimizer here. + bc_golden_test( + "opt_mutual_recursion", + r#" +def test(): + return g() + +def g(): + return test() +"#, + ); +} diff --git a/starlark-rust/starlark/src/tests/opt/eq.rs b/starlark-rust/starlark/src/tests/opt/eq.rs new file mode 100644 index 0000000000000..7340fa1898a32 --- /dev/null +++ b/starlark-rust/starlark/src/tests/opt/eq.rs @@ -0,0 +1,91 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Test for `a == b` optimizations. + +use crate::tests::bc::golden::bc_golden_test; + +#[test] +fn test_eq_int() { + bc_golden_test( + "eq_int", + r#" +def test(x): + return x == 10 +"#, + ); +} + +#[test] +fn test_eq_str() { + bc_golden_test( + "eq_str", + r#" +def test(x): + return x == "hello" +"#, + ); +} + +#[test] +fn test_eq_short_str_is_ptr_eq() { + bc_golden_test( + "eq_short_str", + r#" +def test(x): + return x == "a" +"#, + ); +} + +#[test] +fn test_eq_bool_is_ptr_eq() { + bc_golden_test( + "eq_bool", + r#" +def test(x): + return x == True +"#, + ); +} + +/// Enum values do not override `equals` method, so we can use pointer equality. +#[test] +fn test_eq_enum_is_ptr_eq() { + bc_golden_test( + "eq_enum", + r#" +Color = enum("RED", "GREEN", "BLUE") + +def test(x): + return x == Color("RED") +"#, + ); +} + +#[test] +fn test_eq_const() { + bc_golden_test( + "eq_const", + r#" +S = struct(a = 2) + +def test(x): + return x == S +"#, + ); +} diff --git a/starlark-rust/starlark/src/tests/opt/if_rand.rs b/starlark-rust/starlark/src/tests/opt/if_rand.rs index d05fe87ac2ce2..88c62d9a67424 100644 --- a/starlark-rust/starlark/src/tests/opt/if_rand.rs +++ b/starlark-rust/starlark/src/tests/opt/if_rand.rs @@ -19,7 +19,6 @@ use std::cell::Cell; use std::fmt; -use std::fmt::Display; use derive_more::Display; use dupe::Dupe; @@ -76,9 +75,9 @@ fn bool_fns(globals: &mut GlobalsBuilder) { #[derive(Display, Debug, Copy, Clone, Dupe)] enum TestBinOp { - #[display(fmt = "and")] + #[display("and")] And, - #[display(fmt = "or")] + #[display("or")] Or, } @@ -148,7 +147,7 @@ impl Display for TestExpr { /// * Count side effects. fn eval_program(program: &str) -> (bool, CountCalls) { let module = Module::new(); - let ast = AstModule::parse("t.star", program.to_owned(), &Dialect::Extended).unwrap(); + let ast = AstModule::parse("t.star", program.to_owned(), &Dialect::AllOptionsInternal).unwrap(); let mut globals = GlobalsBuilder::standard(); bool_fns(&mut globals); diff --git a/starlark-rust/starlark/src/tests/opt/list_add.rs b/starlark-rust/starlark/src/tests/opt/list_add.rs new file mode 100644 index 0000000000000..d24b1141aadeb --- /dev/null +++ b/starlark-rust/starlark/src/tests/opt/list_add.rs @@ -0,0 +1,40 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::tests::bc::golden::bc_golden_test; + +#[test] +fn test_list_of_const_add_opt() { + bc_golden_test( + "opt_list_of_const_add", + r#" +def test(): + return [1, 2] + [3, 4, 5] +"#, + ) +} + +#[test] +fn test_list_of_expr_add() { + bc_golden_test( + "opt_list_of_expr_add", + r#" +def test(): + return [noop(), noop()] + [noop(), noop(), noop()] +"#, + ); +} diff --git a/starlark-rust/starlark/src/tests/opt/mod.rs b/starlark-rust/starlark/src/tests/opt/mod.rs deleted file mode 100644 index 1a0192e126607..0000000000000 --- a/starlark-rust/starlark/src/tests/opt/mod.rs +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Optimizer tests. - -mod constant_folding; -mod def_inline; -mod if_rand; -mod speculative_exec; -mod type_is; -mod types; - -use crate::tests::bc::golden::bc_golden_test; - -#[test] -fn test_type_is_inlined() { - bc_golden_test( - "opt_type_is_inlined", - r#" -def is_list(x): - return type(x) == type([]) - -def test(x): - return is_list(x) - "#, - ) -} - -#[test] -fn test_private_forward_mutable_module_vars_inlined() { - bc_golden_test( - "opt_private_forward_mutable_module_vars_inlined", - r#" -def test(): - # Reference to module variable should be replaced with constant - return _private_forward_mutable - -_private_forward_mutable = {1: 2} -"#, - ); -} - -#[test] -fn test_same_module_struct_getattr_inlined() { - bc_golden_test( - "opt_same_module_struct_getattr_inlined", - r#" -def test(): - return _s.f - -_s = struct(f = 1) -"#, - ); -} - -#[test] -fn test_list_plus_list() { - bc_golden_test( - "opt_list_plus_list", - r#" -L = [1, 2] - -def test(): - return L + [1] -"#, - ); -} - -#[test] -fn test_empty_iterable_optimized_away() { - bc_golden_test( - "opt_empty_iterable_optimized_away", - r#" -L = [] -def test(): - for x in L: - print(x) -"#, - ); -} - -#[test] -fn test_unreachable_code_optimized_away() { - bc_golden_test( - "opt_unreachable_code_optimized_away", - r#" -def test(): - if True: - return - fail("unreachable") -"#, - ); -} - -#[test] -fn test_recursion() { - bc_golden_test( - "opt_recursion", - // Test inlining does not fail here. - "def test(): return test()", - ); -} - -#[test] -fn test_mutual_recursion() { - // Just check we do not enter an infinite recursion in the optimizer here. - bc_golden_test( - "opt_mutual_recursion", - r#" -def test(): - return g() - -def g(): - return test() -"#, - ); -} diff --git a/starlark-rust/starlark/src/tests/type_annot.rs b/starlark-rust/starlark/src/tests/type_annot.rs index c56652dfe9dbc..a9d2bc0a52b54 100644 --- a/starlark-rust/starlark/src/tests/type_annot.rs +++ b/starlark-rust/starlark/src/tests/type_annot.rs @@ -111,3 +111,24 @@ def g(): f("") "Expected type `int` but got `str`", ); } + +#[test] +fn test_string_lit_as_type() { + assert::fail( + r#" +def foo(x: ""): pass +"#, + "string literal expression is not allowed in type expression", + ); +} + +#[test] +fn test_string_const_as_type() { + assert::fail( + r#" +T = "" +def foo(x: T): pass +"#, + "String literals are not allowed in type expressions", + ); +} diff --git a/starlark-rust/starlark/src/tests/uncategorized.rs b/starlark-rust/starlark/src/tests/uncategorized.rs index 7bf5189cb6b91..83336029d352f 100644 --- a/starlark-rust/starlark/src/tests/uncategorized.rs +++ b/starlark-rust/starlark/src/tests/uncategorized.rs @@ -23,7 +23,6 @@ use anyhow::Context; use derive_more::Display; use starlark_derive::starlark_module; use starlark_derive::starlark_value; -use starlark_syntax::diagnostic::Diagnostic; use starlark_syntax::golden_test_template::golden_test_template; use crate as starlark; @@ -38,6 +37,8 @@ use crate::eval::Evaluator; use crate::starlark_simple_value; use crate::syntax::AstModule; use crate::syntax::Dialect; +use crate::tests::util::trim_rust_backtrace; +use crate::values::list_or_tuple::UnpackListOrTuple; use crate::values::none::NoneType; use crate::values::Freeze; use crate::values::Freezer; @@ -286,15 +287,22 @@ fn test_radd() { // We want select append to always produce a select, much like the // Bazel/Buck `select` function. #[derive(Debug, Display, Clone, ProvidesStaticType, NoSerialize, Allocative)] - #[display(fmt = "${:?}", _0)] + #[display("${:?}", _0)] struct Select(Vec); starlark_simple_value!(Select); impl<'v> UnpackValue<'v> for Select { - fn unpack_value(value: Value<'v>) -> Option { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { match Select::from_value(value) { - Some(x) => Some(x.clone()), - None => Some(Select(UnpackValue::unpack_value(value)?)), + Some(x) => Ok(Some(x.clone())), + None => { + let Some(list_or_tuple) = UnpackListOrTuple::unpack_value(value)? else { + return Ok(None); + }; + Ok(Some(Select(list_or_tuple.items))) + } } } } @@ -308,12 +316,12 @@ fn test_radd() { #[starlark_value(type = "select")] impl<'v> StarlarkValue<'v> for Select { - fn radd(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { - let lhs: Select = UnpackValue::unpack_value(lhs).unwrap(); + fn radd(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { + let lhs: Select = Select::unpack_value(lhs).unwrap().unwrap(); Some(Ok(heap.alloc(lhs.add(self)))) } - fn add(&self, rhs: Value<'v>, heap: &'v Heap) -> Option>> { - let rhs: Select = UnpackValue::unpack_value(rhs).unwrap(); + fn add(&self, rhs: Value<'v>, heap: &'v Heap) -> Option>> { + let rhs: Select = UnpackValue::unpack_value(rhs).unwrap().unwrap(); Some(Ok(heap.alloc(self.clone().add(&rhs)))) } fn collect_repr(&self, collector: &mut String) { @@ -323,8 +331,8 @@ fn test_radd() { #[starlark_module] fn module(build: &mut GlobalsBuilder) { - fn select(xs: Vec) -> anyhow::Result { + Ok(Select(xs.items)) } } @@ -402,19 +410,8 @@ assert_eq(names[str], "str") ); } -/// There's no anyhow API to print error without rust backtrace -/// ([issue](https://github.com/dtolnay/anyhow/issues/300)). -fn trim_rust_backtrace(error: &str) -> &str { - match error.find("\nStack backtrace:") { - Some(pos) => error[..pos].trim_end(), - None => error.trim_end(), - } -} - #[test] // Tests diagnostics error display. -// -// > EYEBALL=1 cargo test -p starlark diagnostics_display -- --nocapture fn test_diagnostics_display() { fn fail1() -> anyhow::Result<()> { Err(anyhow::anyhow!("fail 1")) @@ -459,27 +456,25 @@ should_fail()"#, "rust failure", ); - let diag = err.downcast::().unwrap(); - golden_test_template( "src/tests/uncategorized_diagnostics_display_default.golden", - trim_rust_backtrace(&format!("{}", diag)), + trim_rust_backtrace(&format!("{}", err)), ); golden_test_template( "src/tests/uncategorized_diagnostics_display_hash.golden", - trim_rust_backtrace(&format!("{:#}", diag)), + trim_rust_backtrace(&format!("{:#}", err)), + ); + + golden_test_template( + "src/tests/uncategorized_diagnostics_display_debug.golden", + trim_rust_backtrace(&format!("{:?}", err)), ); } #[test] -// Check that errors print out "nicely" - can be used to view it. -// First set `display` to `true` then run: -// -// > EYEBALL=1 cargo test -p starlark eyeball -- --nocapture -fn test_eyeball() { - let display = std::env::var("EYEBALL") == Ok("1".to_owned()); - +// Check that errors print out "nicely" +fn test_error_display() { let mut a = Assert::new(); a.module( "imported", @@ -495,7 +490,8 @@ def add2(z): def add(z): x.append(z)"#, ); - let diag = a.fail( + + let err = a.fail( r#" load('imported', 'add2') def add3(z): @@ -503,48 +499,15 @@ def add3(z): add3(8)"#, "Immutable", ); - if display { - Diagnostic::eprint(&diag) - } - assert_eq!( - &format!("\n{}", diag), - r#" -Traceback (most recent call last): - * assert.bzl:5, in - add3(8) - * assert.bzl:4, in add3 - add2(z) - * imported.bzl:9, in add2 - add(z) - * imported.bzl:11, in add - x.append(z) -error: Immutable - --> imported.bzl:11:3 - | -11 | x.append(z) - | ^^^^^^^^^^^ - | -"# - ); - assert_eq!( - &format!("\n{:#}", diag), - r#" -Traceback (most recent call last): - * assert.bzl:5, in - add3(8) - * assert.bzl:4, in add3 - add2(z) - * imported.bzl:9, in add2 - add(z) - * imported.bzl:11, in add - x.append(z) -error: Immutable - --> imported.bzl:11:3 - | -11 | x.append(z) - | ^^^^^^^^^^^ - | -"# + + golden_test_template( + "src/tests/uncategorized_error_display.golden", + trim_rust_backtrace(&format!("{}", err)), + ); + + golden_test_template( + "src/tests/uncategorized_error_display_hash.golden", + trim_rust_backtrace(&format!("{:#}", err)), ); } @@ -567,7 +530,7 @@ fn test_load_reexport() { } #[test] -fn test_module_visibility_preserved_by_evaluator() -> anyhow::Result<()> { +fn test_module_visibility_preserved_by_evaluator() -> crate::Result<()> { // Make sure that when we use a module in the evaluator, the entering / exiting the // module with ScopeData preserves the visibility of symbols. @@ -643,7 +606,7 @@ fn test_getattr_did_you_mean_custom() { "Object of type `struct` has no attribute `gray`, did you mean `grey`?", ); assert::fail( - "Rec = record(grey=int.type); Rec(grey=1).gray", + "Rec = record(grey=int); Rec(grey=1).gray", "Object of type `record` has no attribute `gray`, did you mean `grey`?", ); } @@ -747,7 +710,7 @@ fn test_label_assign() { // No builtin Starlark types support it, so we have to define a custom type (wapping a dictionary) #[derive(Debug, Trace, ProvidesStaticType, Display, NoSerialize, Allocative)] - #[display(fmt = "{:?}", self)] + #[display("{:?}", self)] struct Wrapper<'v>(RefCell>>); #[starlark_value(type = "wrapper")] @@ -756,14 +719,14 @@ fn test_label_assign() { Some(*self.0.borrow().get(attribute).unwrap()) } - fn set_attr(&self, attribute: &str, new_value: Value<'v>) -> anyhow::Result<()> { + fn set_attr(&self, attribute: &str, new_value: Value<'v>) -> starlark::Result<()> { self.0.borrow_mut().insert(attribute.to_owned(), new_value); Ok(()) } } #[derive(Debug, ProvidesStaticType, Display, NoSerialize, Allocative)] - #[display(fmt = "FrozenWrapper")] + #[display("FrozenWrapper")] struct FrozenWrapper; #[starlark_value(type = "wrapper")] @@ -866,17 +829,23 @@ xs == [1, 2, 3, 1, 2, 3] xs = [] xs[xs] "#, - "Type of parameter", + "Expected `int`, but got", ); a.fail( r#" xs = [] xs[xs] = xs "#, - "Type of parameter", + "Expected `int`, but got", ); } +#[test] +fn test_list_slice_does_not_accept_bool() { + // TODO(nga): this should fail. + assert::fail("[1][False]", "Expected `int`, but got `bool"); +} + #[test] fn test_self_mutate_dict() { // Check functions that mutate and access self on dicts @@ -952,7 +921,7 @@ f() #[test] fn test_joe() { - // Based on discussions at https://github.com/facebookexperimental/starlark-rust/issues/22 + // Based on discussions at https://github.com/facebook/starlark-rust/issues/22 let code = r#" def animal(id): return { @@ -984,20 +953,20 @@ animal("Joe") fn test_fuzzer_59102() { // From https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=59102 let src = "\"\u{e0070}"; - let res: Result = + let res: Result = AstModule::parse("hello_world.star", src.to_owned(), &Dialect::Standard); // The panic actually only happens when we format the result - format!("{:?}", res); + let _unused = format!("{:?}", res); } #[test] fn test_fuzzer_59371() { // From https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=59371 let src = "\"\u{2009}\\x"; - let res: Result = + let res: Result = AstModule::parse("hello_world.star", src.to_owned(), &Dialect::Standard); // The panic actually only happens when we format the result - format!("{:?}", res); + let _unused = format!("{:?}", res); } #[test] diff --git a/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_debug.golden b/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_debug.golden new file mode 100644 index 0000000000000..ae892bd5eff27 --- /dev/null +++ b/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_debug.golden @@ -0,0 +1,25 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Traceback (most recent call last): + * assert.bzl:3, in + should_fail() + * imported.bzl:9, in should_fail + rust_failure() +error: rust failure + --> imported.bzl:9:5 + | +9 | rust_failure() + | ^^^^^^^^^^^^^^ + | + + +rust failure + +Caused by: + 0: fail 3 + 1: fail 2 + 2: fail 1 diff --git a/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_default.golden b/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_default.golden index a89d00c9d5141..9f66fead61519 100644 --- a/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_default.golden +++ b/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_default.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Traceback (most recent call last): @@ -15,11 +15,3 @@ error: rust failure 9 | rust_failure() | ^^^^^^^^^^^^^^ | - - -rust failure - -Caused by: - 0: fail 3 - 1: fail 2 - 2: fail 1 diff --git a/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_hash.golden b/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_hash.golden index a89d00c9d5141..ae892bd5eff27 100644 --- a/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_hash.golden +++ b/starlark-rust/starlark/src/tests/uncategorized_diagnostics_display_hash.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Traceback (most recent call last): diff --git a/starlark-rust/starlark/src/tests/uncategorized_error_display.golden b/starlark-rust/starlark/src/tests/uncategorized_error_display.golden new file mode 100644 index 0000000000000..b3d0924d7c960 --- /dev/null +++ b/starlark-rust/starlark/src/tests/uncategorized_error_display.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Traceback (most recent call last): + * assert.bzl:5, in + add3(8) + * assert.bzl:4, in add3 + add2(z) + * imported.bzl:9, in add2 + add(z) + * imported.bzl:11, in add + x.append(z) +error: Immutable + --> imported.bzl:11:3 + | +11 | x.append(z) + | ^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/tests/uncategorized_error_display_hash.golden b/starlark-rust/starlark/src/tests/uncategorized_error_display_hash.golden new file mode 100644 index 0000000000000..b3d0924d7c960 --- /dev/null +++ b/starlark-rust/starlark/src/tests/uncategorized_error_display_hash.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Traceback (most recent call last): + * assert.bzl:5, in + add3(8) + * assert.bzl:4, in add3 + add2(z) + * imported.bzl:9, in add2 + add(z) + * imported.bzl:11, in add + x.append(z) +error: Immutable + --> imported.bzl:11:3 + | +11 | x.append(z) + | ^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/tests/util.rs b/starlark-rust/starlark/src/tests/util.rs new file mode 100644 index 0000000000000..57133b357a147 --- /dev/null +++ b/starlark-rust/starlark/src/tests/util.rs @@ -0,0 +1,75 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![cfg(test)] + +use allocative::Allocative; +use starlark_derive::starlark_value; +use starlark_derive::Freeze; +use starlark_derive::NoSerialize; +use starlark_derive::Trace; + +use crate as starlark; +use crate::any::ProvidesStaticType; +use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::FrozenHeap; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::Value; +use crate::values::ValueLifetimeless; +use crate::values::ValueLike; + +#[derive( + Trace, + Freeze, + Debug, + derive_more::Display, + Allocative, + ProvidesStaticType, + NoSerialize +)] +#[display("TestComplexValue<{}>", _0)] +pub(crate) struct TestComplexValue(pub(crate) V); + +#[starlark_value(type = "TestComplexValue")] +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TestComplexValue where + Self: ProvidesStaticType<'v> +{ +} + +impl<'v> AllocValue<'v> for TestComplexValue> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(self) + } +} + +impl AllocFrozenValue for TestComplexValue { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc_simple(self) + } +} + +/// There's no anyhow API to print error without rust backtrace +/// ([issue](https://github.com/dtolnay/anyhow/issues/300)). +pub(crate) fn trim_rust_backtrace(error: &str) -> &str { + match error.find("\nStack backtrace:") { + Some(pos) => error[..pos].trim_end(), + None => error.trim_end(), + } +} diff --git a/starlark-rust/starlark/src/typing.rs b/starlark-rust/starlark/src/typing.rs new file mode 100644 index 0000000000000..b33f68be3b7dd --- /dev/null +++ b/starlark-rust/starlark/src/typing.rs @@ -0,0 +1,73 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Notes: +// We deal with list.append/list.extend/list.insert, which mutate their list argument +// We ignore dict.setdefault/dict.update, as these are pretty complex functions +// We consider "non-sensicle" operations like list.remove and == to have implied types that make them meaningful +// even if they don't fail when doing something silly + +//! Types required to support the [`typecheck`](crate::syntax::AstModule::typecheck) function. + +pub(crate) mod arc_ty; +pub(crate) mod basic; +pub(crate) mod bindings; +pub(crate) mod call_args; +pub(crate) mod callable; +pub(crate) mod callable_param; +pub(crate) mod ctx; +pub(crate) mod custom; +pub(crate) mod error; +pub(crate) mod fill_types_for_lint; +pub(crate) mod function; +pub(crate) mod interface; +pub(crate) mod mode; +pub(crate) mod oracle; +pub(crate) mod small_arc_vec; +pub(crate) mod small_arc_vec_or_static; +pub(crate) mod starlark_value; +pub(crate) mod structs; +pub(crate) mod tuple; +pub(crate) mod ty; +pub(crate) mod typecheck; +pub(crate) mod user; + +pub mod macro_support; + +#[cfg(test)] +mod tests; + +pub use basic::TyBasic; +pub use callable::TyCallable; +pub use callable_param::ParamIsRequired; +pub use callable_param::ParamSpec; +pub use function::TyFunction; +pub use interface::Interface; +pub use oracle::ctx::TypingOracleCtx; +pub use oracle::traits::TypingBinOp; +pub use oracle::traits::TypingUnOp; +pub use starlark_value::TyStarlarkValue; +pub use structs::TyStruct; +pub use ty::Approximation; +pub use ty::Ty; +pub use ty::TypeRenderConfig; +pub use typecheck::AstModuleTypecheck; +pub use typecheck::TypeMap; +pub use user::TyUser; +pub use user::TyUserFields; +pub use user::TyUserIndex; +pub use user::TyUserParams; diff --git a/starlark-rust/starlark/src/typing/arc_ty.rs b/starlark-rust/starlark/src/typing/arc_ty.rs index a4cfc7441476e..5993eff6492c1 100644 --- a/starlark-rust/starlark/src/typing/arc_ty.rs +++ b/starlark-rust/starlark/src/typing/arc_ty.rs @@ -24,6 +24,7 @@ use std::sync::Arc; use allocative::Allocative; use dupe::Dupe; +use crate::typing::ty::TypeRenderConfig; use crate::typing::Ty; #[derive(Dupe, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Allocative)] @@ -102,6 +103,10 @@ impl ArcTy { ArcTy::new(Ty::union2(a.to_ty(), b.to_ty())) } } + + pub(crate) fn display_with<'a>(&'a self, config: &'a TypeRenderConfig) -> ArcTyDisplay<'a> { + ArcTyDisplay { ty: self, config } + } } impl Deref for ArcTy { @@ -137,3 +142,14 @@ impl Deref for ArcTy { } } } + +pub(crate) struct ArcTyDisplay<'a> { + ty: &'a ArcTy, + config: &'a TypeRenderConfig, +} + +impl Display for ArcTyDisplay<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.ty.deref().fmt_with_config(f, self.config) + } +} diff --git a/starlark-rust/starlark/src/typing/basic.rs b/starlark-rust/starlark/src/typing/basic.rs index e20935cc5d78b..2da0fbee4814c 100644 --- a/starlark-rust/starlark/src/typing/basic.rs +++ b/starlark-rust/starlark/src/typing/basic.rs @@ -22,15 +22,16 @@ use allocative::Allocative; use dupe::Dupe; use crate::typing::arc_ty::ArcTy; +use crate::typing::callable::TyCallable; use crate::typing::custom::TyCustom; use crate::typing::custom::TyCustomImpl; use crate::typing::starlark_value::TyStarlarkValue; use crate::typing::tuple::TyTuple; +use crate::typing::ty::TypeRenderConfig; use crate::typing::Ty; use crate::typing::TyFunction; -use crate::typing::TyName; use crate::values::none::NoneType; -use crate::values::string::StarlarkStr; +use crate::values::string::str_type::StarlarkStr; use crate::values::typing::any::TypingAny; use crate::values::StarlarkValue; @@ -39,17 +40,13 @@ use crate::values::StarlarkValue; pub enum TyBasic { /// Type that contain anything Any, - /// A name, represented by `"name"` in the Starlark type. - /// Will never be a type that can be represented by another operation, - /// e.g. never `"list"` because `Ty::List` could be used instead. - Name(TyName), /// Type is handled by `StarlarkValue` trait implementation. StarlarkValue(TyStarlarkValue), /// Iter is a type that supports iteration, only used as arguments to primitive functions. /// The inner type is applicable for each iteration element. Iter(ArcTy), /// `typing.Callable`. - Callable, + Callable(TyCallable), /// `type`. Type, /// A list. @@ -60,6 +57,8 @@ pub enum TyBasic { Dict(ArcTy, ArcTy), /// Custom type. Custom(TyCustom), + /// A set. + Set(ArcTy), } impl TyBasic { @@ -98,6 +97,10 @@ impl TyBasic { Self::dict(Ty::any(), Ty::any()) } + pub(crate) fn any_set() -> Self { + TyBasic::Set(ArcTy::any()) + } + /// Create a iterable type. pub(crate) fn iter(item: Ty) -> Self { TyBasic::Iter(ArcTy::new(item)) @@ -108,6 +111,11 @@ impl TyBasic { TyBasic::Dict(ArcTy::new(key), ArcTy::new(value)) } + /// Create a set type. + pub(crate) fn set(item: Ty) -> Self { + TyBasic::Set(ArcTy::new(item)) + } + pub(crate) fn custom(custom: impl TyCustomImpl) -> Self { TyBasic::Custom(TyCustom::new(custom)) } @@ -117,21 +125,17 @@ impl TyBasic { /// Types like [`Ty::any`] will return `None`. pub fn as_name(&self) -> Option<&str> { match self { - TyBasic::Name(x) => Some(x.as_str()), TyBasic::StarlarkValue(t) => Some(t.as_name()), TyBasic::List(_) => Some("list"), TyBasic::Tuple(_) => Some("tuple"), TyBasic::Dict(..) => Some("dict"), TyBasic::Type => Some("type"), TyBasic::Custom(c) => c.as_name(), - TyBasic::Any | TyBasic::Iter(_) | TyBasic::Callable => None, + TyBasic::Any | TyBasic::Iter(_) | TyBasic::Callable(_) => None, + TyBasic::Set(_) => Some("set"), } } - pub(crate) fn is_function(&self) -> bool { - self.as_name() == Some("function") - } - /// If this type is function, return the function type. pub(crate) fn as_function(&self) -> Option<&TyFunction> { match self { @@ -150,30 +154,51 @@ impl TyBasic { self.as_name() == Some("list") } - pub(crate) fn is_str(&self) -> bool { - self == &TyBasic::string() - } -} - -impl Display for TyBasic { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + pub(crate) fn fmt_with_config( + &self, + f: &mut fmt::Formatter<'_>, + config: &TypeRenderConfig, + ) -> fmt::Result { match self { TyBasic::Any => write!(f, "{}", TypingAny::TYPE), - TyBasic::Name(x) => write!(f, "{}", x), - TyBasic::StarlarkValue(x) => write!(f, "{}", x), + TyBasic::StarlarkValue(x) => x.fmt_with_config(f, config), TyBasic::Iter(x) => { if x.is_any() { write!(f, "typing.Iterable") } else { - write!(f, "typing.Iterable[{}]", x) + write!(f, "typing.Iterable[{}]", x.display_with(config)) + } + } + TyBasic::Callable(c) => c.fmt_with_config(f, config), + TyBasic::List(x) => { + if x.is_any() { + write!(f, "list") + } else { + write!(f, "list[{}]", x.display_with(config)) + } + } + TyBasic::Tuple(tuple) => tuple.fmt_with_config(f, config), + TyBasic::Dict(k, v) => { + if k.is_any() && v.is_any() { + write!(f, "dict") + } else { + write!( + f, + "dict[{}, {}]", + k.display_with(config), + v.display_with(config) + ) } } - TyBasic::Callable => write!(f, "typing.Callable"), - TyBasic::List(x) => write!(f, "list[{}]", x), - TyBasic::Tuple(tuple) => Display::fmt(tuple, f), - TyBasic::Dict(k, v) => write!(f, "dict[{}, {}]", k, v), TyBasic::Type => write!(f, "type"), TyBasic::Custom(c) => Display::fmt(c, f), + TyBasic::Set(x) => write!(f, "set[{}]", x.display_with(config)), } } } + +impl Display for TyBasic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.fmt_with_config(f, &TypeRenderConfig::Default) + } +} diff --git a/starlark-rust/starlark/src/typing/bindings.rs b/starlark-rust/starlark/src/typing/bindings.rs index 1cc13efcfbb31..646f7cfab56c4 100644 --- a/starlark-rust/starlark/src/typing/bindings.rs +++ b/starlark-rust/starlark/src/typing/bindings.rs @@ -17,6 +17,7 @@ use std::collections::HashMap; +use dupe::Dupe; use starlark_map::small_map::SmallMap; use starlark_syntax::syntax::ast::AssignOp; use starlark_syntax::syntax::ast::AssignP; @@ -30,6 +31,7 @@ use starlark_syntax::syntax::ast::IdentP; use starlark_syntax::syntax::ast::StmtP; use starlark_syntax::syntax::def::DefParamKind; use starlark_syntax::syntax::def::DefParams; +use starlark_syntax::syntax::def::DefRegularParamMode; use starlark_syntax::syntax::uniplate::Visit; use crate::codemap::CodeMap; @@ -44,13 +46,15 @@ use crate::eval::compiler::scope::payload::CstTypeExpr; use crate::eval::compiler::scope::BindingId; use crate::eval::compiler::scope::ResolvedIdent; use crate::typing::arc_ty::ArcTy; +use crate::typing::callable_param::ParamIsRequired; use crate::typing::error::InternalError; -use crate::typing::function::Param; use crate::typing::mode::TypecheckMode; use crate::typing::tuple::TyTuple; use crate::typing::ty::Approximation; use crate::typing::ty::Ty; +use crate::typing::ParamSpec; use crate::typing::TyBasic; +use crate::util::arc_str::ArcStr; #[derive(Clone)] pub(crate) enum BindExpr<'a> { @@ -214,35 +218,55 @@ impl<'a, 'b> BindingsCollect<'a, 'b> { return_type, .. } = def; - let mut params2 = Vec::with_capacity(params.len()); - let def_params = + let DefParams { params, indices: _ } = DefParams::unpack(params, codemap).map_err(InternalError::from_eval_exception)?; - for (i, p) in def_params.params.iter().enumerate() { + + let mut pos_only = Vec::new(); + let mut pos_or_named = Vec::new(); + let mut args = None; + let mut named_only = Vec::new(); + let mut kwargs = None; + + for p in params { let name = &p.node.ident; let ty = p.node.ty; let ty = Self::resolve_ty_opt(ty, typecheck_mode, codemap)?; let name_ty = match &p.node.kind { - DefParamKind::Regular(default_value) => { - let mut param = if i >= def_params.num_positional as usize { - Param::name_only(&name.ident, ty.clone()) - } else { - Param::pos_or_name(&name.ident, ty.clone()) + DefParamKind::Regular(mode, default_value) => { + let required = match default_value.is_some() { + true => ParamIsRequired::No, + false => ParamIsRequired::Yes, }; - if default_value.is_some() { - param = param.optional(); + match mode { + DefRegularParamMode::PosOnly => { + pos_only.push((required, ty.dupe())); + } + DefRegularParamMode::PosOrName => { + pos_or_named.push(( + ArcStr::from(name.ident.as_str()), + required, + ty.dupe(), + )); + } + DefRegularParamMode::NameOnly => { + named_only.push(( + ArcStr::from(name.ident.as_str()), + required, + ty.dupe(), + )); + } } - params2.push(param); Some((name, ty)) } DefParamKind::Args => { // There is the type we require people calling us use (usually any) // and then separately the type we are when we are running (always tuple) - params2.push(Param::args(ty.clone())); + args = Some(ty.dupe()); Some((name, Ty::basic(TyBasic::Tuple(TyTuple::Of(ArcTy::new(ty)))))) } DefParamKind::Kwargs => { let var_ty = Ty::dict(Ty::string(), ty.clone()); - params2.push(Param::kwargs(ty)); + kwargs = Some(ty.dupe()); Some((name, var_ty)) } }; @@ -252,6 +276,8 @@ impl<'a, 'b> BindingsCollect<'a, 'b> { .insert(name.resolved_binding_id(codemap)?, ty); } } + let params2 = ParamSpec::new_parts(pos_only, pos_or_named, args, named_only, kwargs) + .map_err(|e| InternalError::from_error(e, def.signature_span(), codemap))?; let ret_ty = Self::resolve_ty_opt(return_type.as_deref(), typecheck_mode, codemap)?; self.bindings.types.insert( name.resolved_binding_id(codemap)?, @@ -311,9 +337,9 @@ impl<'a, 'b> BindingsCollect<'a, 'b> { if let ExprP::Dot(id, attr) = &***fun { if let ExprP::Identifier(id) = &id.node { let res = match attr.as_str() { - "append" if args.len() == 1 => Some((false, 0)), - "insert" if args.len() == 2 => Some((false, 1)), - "extend" if args.len() == 1 => Some((true, 0)), + "append" if args.args.len() == 1 => Some((false, 0)), + "insert" if args.args.len() == 2 => Some((false, 1)), + "extend" if args.args.len() == 1 => Some((true, 0)), _ => None, }; if let Some((extend, arg)) = res { @@ -321,9 +347,9 @@ impl<'a, 'b> BindingsCollect<'a, 'b> { id.node.payload.as_ref().unwrap() { let bind = if extend { - BindExpr::ListExtend(*id, args[arg].expr()) + BindExpr::ListExtend(*id, args.args[arg].expr()) } else { - BindExpr::ListAppend(*id, args[arg].expr()) + BindExpr::ListAppend(*id, args.args[arg].expr()) }; self.bindings.expressions.entry(*id).or_default().push(bind) } diff --git a/starlark-rust/starlark/src/typing/call_args.rs b/starlark-rust/starlark/src/typing/call_args.rs new file mode 100644 index 0000000000000..6675a45024a5c --- /dev/null +++ b/starlark-rust/starlark/src/typing/call_args.rs @@ -0,0 +1,29 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_syntax::codemap::Spanned; + +use crate::typing::Ty; + +/// Function call arguments. +pub struct TyCallArgs<'a> { + pub(crate) pos: Vec>, + pub(crate) named: Vec>, + /// In starlark, `*args` always come after all positional and named arguments. + pub(crate) args: Option>, + pub(crate) kwargs: Option>, +} diff --git a/starlark-rust/starlark/src/typing/callable.rs b/starlark-rust/starlark/src/typing/callable.rs new file mode 100644 index 0000000000000..bbcd02fd1ad59 --- /dev/null +++ b/starlark-rust/starlark/src/typing/callable.rs @@ -0,0 +1,113 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt::Display; +use std::sync::OnceLock; + +use allocative::Allocative; +use dupe::Dupe; +use starlark_syntax::codemap::Span; + +use crate::typing::call_args::TyCallArgs; +use crate::typing::error::TypingOrInternalError; +use crate::typing::ty::TypeRenderConfig; +use crate::typing::ParamSpec; +use crate::typing::Ty; +use crate::typing::TypingOracleCtx; +use crate::util::arc_or_static::ArcOrStatic; + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Allocative)] +struct TyCallableInner { + params: ParamSpec, + result: Ty, +} + +/// `typing.Callable`. +#[derive(Debug, Dupe, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Allocative)] +pub struct TyCallable { + inner: ArcOrStatic, +} + +impl TyCallable { + /// Create a new callable type. + pub fn new(params: ParamSpec, result: Ty) -> TyCallable { + TyCallable { + inner: ArcOrStatic::new(TyCallableInner { params, result }), + } + } + + pub(crate) fn validate_call( + &self, + span: Span, + args: &TyCallArgs, + oracle: TypingOracleCtx, + ) -> Result { + oracle.validate_fn_call(span, self, args) + } + + pub(crate) fn params(&self) -> &ParamSpec { + &self.inner.params + } + + pub(crate) fn result(&self) -> &Ty { + &self.inner.result + } + + pub(crate) fn any() -> TyCallable { + static INNER: OnceLock = OnceLock::new(); + TyCallable { + inner: ArcOrStatic::new_static(INNER.get_or_init(|| TyCallableInner { + params: ParamSpec::any(), + result: Ty::any(), + })), + } + } + + pub(crate) fn fmt_with_config( + &self, + f: &mut std::fmt::Formatter<'_>, + config: &TypeRenderConfig, + ) -> std::fmt::Result { + if self.params() == &ParamSpec::any() && self.result() == &Ty::any() { + write!(f, "typing.Callable")?; + } else { + write!(f, "typing.Callable[")?; + if self.params().is_any() { + write!(f, "...")?; + } else if let Some(pos) = self.params().all_required_pos_only() { + write!(f, "[")?; + for (i, p) in pos.iter().enumerate() { + if i != 0 { + write!(f, ", ")?; + } + p.fmt_with_config(f, config)?; + } + write!(f, "]")?; + } else { + write!(f, "\"{}\"", self.params().display_with(config))?; + } + write!(f, ", {}]", self.result().display_with(config))?; + } + Ok(()) + } +} + +impl Display for TyCallable { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.fmt_with_config(f, &TypeRenderConfig::Default) + } +} diff --git a/starlark-rust/starlark/src/typing/callable_param.rs b/starlark-rust/starlark/src/typing/callable_param.rs new file mode 100644 index 0000000000000..9a8f32a069825 --- /dev/null +++ b/starlark-rust/starlark/src/typing/callable_param.rs @@ -0,0 +1,469 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt; +use std::fmt::Display; +use std::fmt::Formatter; +use std::iter; + +use allocative::Allocative; +use dupe::Dupe; +use starlark_map::small_set::SmallSet; +use starlark_syntax::other_error; +use starlark_syntax::syntax::def::DefParamIndices; + +use crate::eval::runtime::params::display::fmt_param_spec; +use crate::eval::runtime::params::display::ParamFmt; +use crate::eval::runtime::params::display::PARAM_FMT_OPTIONAL; +use crate::typing::small_arc_vec_or_static::SmallArcVec1OrStatic; +use crate::typing::ty::TyDisplay; +use crate::typing::ty::TypeRenderConfig; +use crate::typing::Ty; +use crate::util::arc_str::ArcStr; + +/// Indication whether parameter is required. +#[derive( + Debug, Clone, Dupe, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Allocative +)] +pub enum ParamIsRequired { + /// Parameter is required. + Yes, + /// Parameter is optional. + No, +} + +/// The type of a parameter - can be positional, by name, `*args` or `**kwargs`. +#[derive(Debug, Clone, Dupe, PartialEq, Eq, Hash, PartialOrd, Ord, Allocative)] +pub(crate) enum ParamMode { + /// Parameter can only be passed by position. + PosOnly(ParamIsRequired), + /// Parameter can be passed by position or name. + PosOrName(ArcStr, ParamIsRequired), + /// Parameter can only be passed by name. + NameOnly(ArcStr, ParamIsRequired), + /// Parameter is `*args`. + Args, + /// Parameter is `**kwargs`. + Kwargs, +} + +/// A parameter argument to a function +#[derive(Debug, Clone, Dupe, PartialEq, Eq, Hash, PartialOrd, Ord, Allocative)] +pub(crate) struct Param { + /// The type of parameter + pub(crate) mode: ParamMode, + /// The type of the parameter. + /// For `*args` it is the type of the tuple elements. + /// For `**kwargs` it is the type of the dict values. + pub(crate) ty: Ty, +} + +impl Param { + /// Create a `*args` parameter. + /// + /// `ty` is a tuple item type. + pub const fn args(ty: Ty) -> Self { + Self { + mode: ParamMode::Args, + ty, + } + } + + /// Create a `**kwargs` parameter. + /// + /// `ty` is a dict value type. + pub const fn kwargs(ty: Ty) -> Self { + Self { + mode: ParamMode::Kwargs, + ty, + } + } + + pub(crate) fn allows_pos(&self) -> bool { + match self.mode { + ParamMode::PosOnly(_) | ParamMode::PosOrName(_, _) | ParamMode::Args => true, + ParamMode::NameOnly(_, _) | ParamMode::Kwargs => false, + } + } + + pub(crate) fn name(&self) -> Option<&str> { + match &self.mode { + ParamMode::PosOnly(_) => None, + ParamMode::PosOrName(x, _) => Some(x.as_str()), + ParamMode::NameOnly(x, _) => Some(x.as_str()), + ParamMode::Args => None, + ParamMode::Kwargs => None, + } + } + + /// Get a display name for this parameter. + pub(crate) fn name_display(&self) -> &str { + match &self.mode { + ParamMode::PosOnly(_) => "_", + ParamMode::PosOrName(x, _) => x, + ParamMode::NameOnly(x, _) => x, + ParamMode::Args => "*args", + ParamMode::Kwargs => "**kwargs", + } + } +} + +struct ParamSpecSplit<'a> { + pos_only: &'a [Param], + pos_or_named: &'a [Param], + args: Option<&'a Param>, + named_only: &'a [Param], + kwargs: Option<&'a Param>, +} + +/// Callable parameter specification (e.g. positional only followed by `**kwargs`). +#[derive(Debug, Eq, PartialEq, Clone, Dupe, Hash, PartialOrd, Ord, Allocative)] +pub struct ParamSpec { + params: SmallArcVec1OrStatic, + indices: DefParamIndices, +} + +impl Display for ParamSpec { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.fmt_with_config(f, &TypeRenderConfig::Default) + } +} + +impl ParamSpec { + pub(crate) fn fmt_with_config( + &self, + f: &mut Formatter<'_>, + config: &TypeRenderConfig, + ) -> fmt::Result { + fn pf<'a>( + p: &'a Param, + config: &'a TypeRenderConfig, + ) -> ParamFmt<'a, TyDisplay<'a>, &'static str> { + ParamFmt { + name: match &p.mode { + ParamMode::PosOrName(name, _) | ParamMode::NameOnly(name, _) => name.as_str(), + ParamMode::PosOnly(_) => "_", + ParamMode::Args => "args", + ParamMode::Kwargs => "kwargs", + }, + ty: Some(p.ty.display_with(config)), + default: match p.mode { + ParamMode::PosOnly(ParamIsRequired::Yes) + | ParamMode::PosOrName(_, ParamIsRequired::Yes) + | ParamMode::NameOnly(_, ParamIsRequired::Yes) => None, + ParamMode::PosOnly(ParamIsRequired::No) + | ParamMode::PosOrName(_, ParamIsRequired::No) + | ParamMode::NameOnly(_, ParamIsRequired::No) => Some(PARAM_FMT_OPTIONAL), + ParamMode::Args | ParamMode::Kwargs => None, + }, + } + } + + let ParamSpecSplit { + pos_only, + pos_or_named, + args, + named_only, + kwargs, + } = self.split(); + + fmt_param_spec( + f, + pos_only.iter().map(|p| pf(p, config)), + pos_or_named.iter().map(|p| pf(p, config)), + args.map(|p| pf(p, config)), + named_only.iter().map(|p| pf(p, config)), + kwargs.map(|p| pf(p, config)), + ) + } + + pub(crate) fn display_with<'a>(&'a self, config: &'a TypeRenderConfig) -> ParamSpecDisplay<'a> { + ParamSpecDisplay { + param_spec: self, + config, + } + } + + pub(crate) fn params(&self) -> &[Param] { + &self.params + } + + /// Create a new parameter specification from different parameter kinds in order. + pub fn new_parts( + pos_only: impl IntoIterator, + pos_or_name: impl IntoIterator, + args: Option, + named_only: impl IntoIterator, + kwargs: Option, + ) -> crate::Result { + let pos_only = pos_only.into_iter(); + let pos_or_name = pos_or_name.into_iter(); + let named_only = named_only.into_iter(); + + let mut seen_names: SmallSet = SmallSet::new(); + + let mut params = Vec::with_capacity( + pos_only.size_hint().0 + + pos_or_name.size_hint().0 + + args.is_some() as usize + + named_only.size_hint().0 + + kwargs.is_some() as usize, + ); + + for (req, ty) in pos_only { + params.push(Param { + mode: ParamMode::PosOnly(req), + ty, + }); + } + + let num_positional_only = params.len() as u32; + + for (name, req, ty) in pos_or_name { + if !seen_names.insert(name.dupe()) { + return Err(other_error!("duplicate parameter name: `{}`", name)); + } + params.push(Param { + mode: ParamMode::PosOrName(name, req), + ty, + }); + } + + let num_positional = params.len() as u32; + + let mut index_of_args = None; + if let Some(ty) = args { + index_of_args = Some(params.len() as u32); + params.push(Param { + mode: ParamMode::Args, + ty, + }); + } + + for (name, req, ty) in named_only { + if !seen_names.insert(name.dupe()) { + return Err(other_error!("duplicate parameter name: `{}`", name)); + } + params.push(Param { + mode: ParamMode::NameOnly(name, req), + ty, + }); + } + + let mut index_of_kwargs = None; + if let Some(ty) = kwargs { + index_of_kwargs = Some(params.len() as u32); + params.push(Param { + mode: ParamMode::Kwargs, + ty, + }); + } + + Ok(ParamSpec { + params: SmallArcVec1OrStatic::clone_from_slice(¶ms), + indices: DefParamIndices { + num_positional, + num_positional_only, + args: index_of_args, + kwargs: index_of_kwargs, + }, + }) + } + + /// `*, x, y`. + pub fn new_named_only( + named_only: impl IntoIterator, + ) -> crate::Result { + Self::new_parts([], [], None, named_only, None) + } + + /// `*args`. + pub(crate) fn args(ty: Ty) -> ParamSpec { + ParamSpec::new_parts([], [], Some(ty), [], None).expect("Cannot fail") + } + + /// `**kwargs`. + pub fn kwargs(ty: Ty) -> ParamSpec { + ParamSpec::new_parts([], [], None, [], Some(ty)).expect("Cannot fail") + } + + /// `arg=, arg=, ..., arg, arg, ..., /`. + pub(crate) fn pos_only( + required: impl IntoIterator, + optional: impl IntoIterator, + ) -> ParamSpec { + ParamSpec::new_parts( + iter::empty() + .chain(required.into_iter().map(|ty| (ParamIsRequired::Yes, ty))) + .chain(optional.into_iter().map(|ty| (ParamIsRequired::No, ty))), + [], + None, + [], + None, + ) + .expect("Cannot fail") + } + + /// No parameters. + pub fn empty() -> ParamSpec { + ParamSpec::pos_only([], []) + } + + pub(crate) fn any() -> ParamSpec { + static ANY_PARAMS: [Param; 2] = [Param::args(Ty::any()), Param::kwargs(Ty::any())]; + ParamSpec { + params: SmallArcVec1OrStatic::new_static(&ANY_PARAMS), + indices: DefParamIndices { + num_positional: 0, + num_positional_only: 0, + args: Some(0), + kwargs: Some(1), + }, + } + } + + /// Is `*args, **kwargs`. + pub(crate) fn is_any(&self) -> bool { + self == &Self::any() + } + + fn split(&self) -> ParamSpecSplit<'_> { + ParamSpecSplit { + pos_only: &self.params[self.indices.pos_only()], + pos_or_named: &self.params[self.indices.pos_or_named()], + args: self.indices.args.map(|a| &self.params[a as usize]), + named_only: &self.params[self.indices.named_only(self.params.len())], + kwargs: self.indices.kwargs.map(|a| &self.params[a as usize]), + } + } + + /// All parameters are required and positional only. + pub(crate) fn all_required_pos_only(&self) -> Option> { + let (pos_only, named_only) = self.all_required_pos_only_named_only()?; + if named_only.is_empty() { + Some(pos_only) + } else { + None + } + } + + /// All parameters are required and positional only or named only. + pub(crate) fn all_required_pos_only_named_only(&self) -> Option<(Vec<&Ty>, Vec<(&str, &Ty)>)> { + match self.split() { + ParamSpecSplit { + pos_only, + pos_or_named: [], + args: None, + named_only, + kwargs: None, + } => { + let pos_only: Vec<&Ty> = pos_only + .iter() + .map(|p| match p.mode { + ParamMode::PosOnly(ParamIsRequired::Yes) => Some(&p.ty), + _ => None, + }) + .collect::>()?; + let named_only: Vec<(&str, &Ty)> = named_only + .iter() + .map(|p| match &p.mode { + ParamMode::NameOnly(name, ParamIsRequired::Yes) => { + Some((name.as_str(), &p.ty)) + } + _ => None, + }) + .collect::>()?; + Some((pos_only, named_only)) + } + _ => None, + } + } +} + +pub(crate) struct ParamSpecDisplay<'a> { + param_spec: &'a ParamSpec, + config: &'a TypeRenderConfig, +} + +impl Display for ParamSpecDisplay<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.param_spec.fmt_with_config(f, self.config) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::fmt::Write; + + use starlark_syntax::dialect::Dialect; + use starlark_syntax::golden_test_template::golden_test_template; + use starlark_syntax::syntax::AstModule; + + use crate::environment::Globals; + use crate::typing::AstModuleTypecheck; + + #[test] + fn test_param_spec_display() { + let functions = r#" +def simple(x, y, z): pass +def default_value(x, y=1, z=2): pass +def param_type(x: int, y: str, z: int, w: list): pass +def named_only_a(x, *, y): pass +def named_only_b(*, y): pass +def pos_only_a(x, /, y): pass +def pos_only_b(x, /, *, y): pass +def pos_only_c(x, /, *args): pass +def pos_only_d(x, /, *args, **kwargs): pass +"#; + let mut out = String::new(); + let mut first = true; + + for test in functions.lines() { + let test = test.trim(); + if test.is_empty() { + continue; + } + + let ast = AstModule::parse( + "test_param_spec_display.star", + test.to_owned(), + &Dialect::AllOptionsInternal, + ) + .unwrap(); + let (errors, typemap, _interface, approximations) = + ast.typecheck(&Globals::standard(), &HashMap::new()); + if let Some(error) = errors.into_iter().next() { + panic!("Error: {:?}", error); + } + assert!(approximations.is_empty()); + let def = typemap.find_first_binding().unwrap(); + + if first { + first = false; + } else { + writeln!(out).unwrap(); + } + write!(out, "{test}\n{def}\n").unwrap(); + } + + golden_test_template( + "src/typing/callable_param_test_param_spec_display.golden", + &out, + ); + } +} diff --git a/starlark-rust/starlark/src/typing/callable_param_test_param_spec_display.golden b/starlark-rust/starlark/src/typing/callable_param_test_param_spec_display.golden new file mode 100644 index 0000000000000..eb401e291288f --- /dev/null +++ b/starlark-rust/starlark/src/typing/callable_param_test_param_spec_display.golden @@ -0,0 +1,32 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +def simple(x, y, z): pass +def(x: typing.Any, y: typing.Any, z: typing.Any) -> typing.Any + +def default_value(x, y=1, z=2): pass +def(x: typing.Any, y: typing.Any = ..., z: typing.Any = ...) -> typing.Any + +def param_type(x: int, y: str, z: int, w: list): pass +def(x: int, y: str, z: int, w: list) -> typing.Any + +def named_only_a(x, *, y): pass +def(x: typing.Any, *, y: typing.Any) -> typing.Any + +def named_only_b(*, y): pass +def(*, y: typing.Any) -> typing.Any + +def pos_only_a(x, /, y): pass +def(_: typing.Any, /, y: typing.Any) -> typing.Any + +def pos_only_b(x, /, *, y): pass +def(_: typing.Any, /, *, y: typing.Any) -> typing.Any + +def pos_only_c(x, /, *args): pass +def(_: typing.Any, /, *args: typing.Any) -> typing.Any + +def pos_only_d(x, /, *args, **kwargs): pass +def(_: typing.Any, /, *args: typing.Any, **kwargs: typing.Any) -> typing.Any diff --git a/starlark-rust/starlark/src/typing/ctx.rs b/starlark-rust/starlark/src/typing/ctx.rs index 565e8ccd4ac09..0eca3439fa6e7 100644 --- a/starlark-rust/starlark/src/typing/ctx.rs +++ b/starlark-rust/starlark/src/typing/ctx.rs @@ -20,18 +20,18 @@ use std::fmt::Debug; use starlark_map::unordered_map::UnorderedMap; use starlark_syntax::slice_vec_ext::SliceExt; -use starlark_syntax::syntax::ast::ArgumentP; use starlark_syntax::syntax::ast::AssignOp; use starlark_syntax::syntax::ast::AssignTargetP; use starlark_syntax::syntax::ast::AstLiteral; use starlark_syntax::syntax::ast::BinOp; +use starlark_syntax::syntax::ast::CallArgsP; use starlark_syntax::syntax::ast::ClauseP; use starlark_syntax::syntax::ast::ExprP; use starlark_syntax::syntax::ast::ForClauseP; +use starlark_syntax::syntax::call::CallArgsUnpack; use crate::codemap::Span; use crate::codemap::Spanned; -use crate::eval::compiler::scope::payload::CstArgument; use crate::eval::compiler::scope::payload::CstAssignTarget; use crate::eval::compiler::scope::payload::CstExpr; use crate::eval::compiler::scope::payload::CstIdent; @@ -41,11 +41,11 @@ use crate::eval::compiler::scope::ResolvedIdent; use crate::eval::compiler::scope::Slot; use crate::typing::basic::TyBasic; use crate::typing::bindings::BindExpr; +use crate::typing::call_args::TyCallArgs; use crate::typing::error::InternalError; use crate::typing::error::TypingError; use crate::typing::error::TypingOrInternalError; use crate::typing::fill_types_for_lint::ModuleVarTypes; -use crate::typing::function::Arg; use crate::typing::oracle::ctx::TypingOracleCtx; use crate::typing::oracle::traits::TypingBinOp; use crate::typing::oracle::traits::TypingUnOp; @@ -94,12 +94,7 @@ impl TypingContext<'_> { } } - fn validate_call( - &self, - fun: &Ty, - args: &[Spanned], - span: Span, - ) -> Result { + fn validate_call(&self, fun: &Ty, args: &TyCallArgs, span: Span) -> Result { self.result_to_ty_with_internal_error(self.oracle.validate_call(span, fun, args)) } @@ -107,10 +102,22 @@ impl TypingContext<'_> { self.result_to_ty(self.oracle.iter_item(Spanned { node: ty, span })) } - pub(crate) fn validate_type(&self, got: Spanned<&Ty>, require: &Ty) { + pub(crate) fn validate_type( + &self, + got: Spanned<&Ty>, + require: &Ty, + ) -> Result<(), InternalError> { if let Err(e) = self.oracle.validate_type(got, require) { - self.errors.borrow_mut().push(e); + match e { + TypingOrInternalError::Typing(e) => { + self.errors.borrow_mut().push(e); + } + TypingOrInternalError::Internal(e) => { + return Err(e); + } + } } + Ok(()) } fn expr_dot(&self, ty: &Ty, attr: &str, span: Span) -> Ty { @@ -185,7 +192,7 @@ impl TypingContext<'_> { // We know about list and dict, everything else we just ignore if self.types[id].is_list() { // If we know it MUST be a list, then the index must be an int - self.validate_type(index.as_ref(), &Ty::int()); + self.validate_type(index.as_ref(), &Ty::int())?; } for ty in self.types[id].iter_union() { match ty { @@ -204,7 +211,7 @@ impl TypingContext<'_> { Ok(Ty::unions(res)) } BindExpr::ListAppend(id, e) => { - if self.oracle.probably_a_list(&self.types[id]) { + if self.oracle.probably_a_list(&self.types[id])? { Ok(Ty::list(self.expression_type(e)?)) } else { // It doesn't seem to be a list, so let's assume the append is non-mutating @@ -212,7 +219,7 @@ impl TypingContext<'_> { } } BindExpr::ListExtend(id, e) => { - if self.oracle.probably_a_list(&self.types[id]) { + if self.oracle.probably_a_list(&self.types[id])? { Ok(Ty::list( self.from_iterated(&self.expression_type(e)?, e.span), )) @@ -295,27 +302,64 @@ impl TypingContext<'_> { &self, span: Span, f: &CstExpr, - args: &[CstArgument], + args: &CallArgsP, ) -> Result { - let args_ty: Vec> = args.try_map(|x| { - Ok(Spanned { - span: x.span, - node: match &**x { - ArgumentP::Positional(x) => Arg::Pos(self.expression_type(x)?), - ArgumentP::Named(name, x) => Arg::Name(name.as_str(), self.expression_type(x)?), - ArgumentP::Args(x) => { - let ty = self.expression_type(x)?; - self.from_iterated(&ty, x.span); - Arg::Args(ty) - } - ArgumentP::KwArgs(x) => { - let ty = self.expression_type_spanned(x)?; - self.validate_type(ty.as_ref(), &Ty::dict(Ty::string(), Ty::any())); - Arg::Kwargs(ty.node) - } - }, - }) - })?; + let args = CallArgsUnpack::unpack(args, self.oracle.codemap) + .map_err(InternalError::from_eval_exception)?; + + let CallArgsUnpack { + pos, + named, + star, + star_star, + } = args; + + let mut pos_ty: Vec> = Vec::new(); + for pos in pos { + pos_ty.push(Spanned { + span: pos.span, + node: self.expression_type(&pos.node.expr())?, + }); + } + + let mut named_ty: Vec> = Vec::new(); + for named in named { + let Some(name) = named.name() else { + return Err(InternalError::msg( + "Named argument without name", + named.span, + self.oracle.codemap, + )); + }; + named_ty.push(Spanned { + span: named.span, + node: (name, self.expression_type(&named.node.expr())?), + }); + } + + let args_ty = if let Some(star) = star { + let ty = self.expression_type_spanned(&star.node.expr())?; + self.from_iterated(&ty, star.span); + Some(ty) + } else { + None + }; + + let kwargs_ty = if let Some(star_star) = star_star { + let ty = self.expression_type_spanned(&star_star.node.expr())?; + self.validate_type(ty.as_ref(), &Ty::dict(Ty::string(), Ty::any()))?; + Some(ty) + } else { + None + }; + + let args_ty = TyCallArgs { + pos: pos_ty, + named: named_ty, + args: args_ty, + kwargs: kwargs_ty, + }; + let f_ty = self.expression_type(f)?; // If we can't resolve the types of the arguments, we can't validate the call, // but we still know the type of the result since the args don't impact that @@ -331,7 +375,7 @@ impl TypingContext<'_> { stride: Option<&CstExpr>, ) -> Result { for e in [start, stop, stride].iter().copied().flatten() { - self.validate_type(self.expression_type_spanned(e)?.as_ref(), &Ty::int()); + self.validate_type(self.expression_type_spanned(e)?.as_ref(), &Ty::int())?; } Ok(self.result_to_ty(self.oracle.expr_slice(span, self.expression_type(x)?))) } @@ -387,7 +431,7 @@ impl TypingContext<'_> { ExprP::Identifier(x) => Ok(self.expr_ident(x)), ExprP::Lambda(_) => { self.approximation("We don't type check lambdas", ()); - Ok(Ty::any_function()) + Ok(Ty::any_callable()) } ExprP::Literal(x) => match x { AstLiteral::Int(_) => Ok(Ty::int()), diff --git a/starlark-rust/starlark/src/typing/custom.rs b/starlark-rust/starlark/src/typing/custom.rs index 0a5771bb53c85..0f061ec7397fa 100644 --- a/starlark-rust/starlark/src/typing/custom.rs +++ b/starlark-rust/starlark/src/typing/custom.rs @@ -32,9 +32,12 @@ use dupe::Dupe; use starlark_map::StarlarkHasher; use crate::codemap::Span; -use crate::codemap::Spanned; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; +use crate::typing::error::InternalError; +use crate::typing::error::TypingNoContextError; +use crate::typing::error::TypingNoContextOrInternalError; use crate::typing::error::TypingOrInternalError; -use crate::typing::Arg; use crate::typing::Ty; use crate::typing::TyBasic; use crate::typing::TyFunction; @@ -53,30 +56,39 @@ pub trait TyCustomImpl: Debug + Display + Hash + Ord + Allocative + Send + Sync fn validate_call( &self, span: Span, - _args: &[Spanned], + _args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result { Err(oracle.msg_error(span, format!("Value of type `{}` is not callable", self))) } /// Must override if implementing `validate_call`. - fn is_callable(&self) -> bool { - false + fn as_callable(&self) -> Option { + None } fn as_function(&self) -> Option<&TyFunction> { None } - fn bin_op(&self, bin_op: TypingBinOp, rhs: &TyBasic, ctx: &TypingOracleCtx) -> Result { + fn bin_op( + &self, + bin_op: TypingBinOp, + rhs: &TyBasic, + ctx: &TypingOracleCtx, + ) -> Result { let _unused = (bin_op, rhs, ctx); - Err(()) + Err(TypingNoContextOrInternalError::Typing) } - fn iter_item(&self) -> Result { - Err(()) + fn iter_item(&self) -> Result { + Err(TypingNoContextError) } - fn index(&self, item: &TyBasic, ctx: &TypingOracleCtx) -> Result { + fn index( + &self, + item: &TyBasic, + ctx: &TypingOracleCtx, + ) -> Result { let _unused = (item, ctx); - Err(()) + Err(TypingNoContextOrInternalError::Typing) } - fn attribute(&self, attr: &str) -> Result; + fn attribute(&self, attr: &str) -> Result; fn union2(x: Arc, other: Arc) -> Result, (Arc, Arc)> { if x == other { Ok(x) } else { Err((x, other)) } } @@ -104,21 +116,25 @@ pub(crate) trait TyCustomDyn: Debug + Display + Allocative + Send + Sync + 'stat fn validate_call_dyn( &self, span: Span, - args: &[Spanned], + args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result; - fn is_callable_dyn(&self) -> bool; fn is_intersects_with_dyn(&self, other: &TyBasic) -> bool; + fn as_callable_dyn(&self) -> Option; fn as_function_dyn(&self) -> Option<&TyFunction>; - fn iter_item_dyn(&self) -> Result; - fn index_dyn(&self, index: &TyBasic, ctx: &TypingOracleCtx) -> Result; - fn attribute_dyn(&self, attr: &str) -> Result; + fn iter_item_dyn(&self) -> Result; + fn index_dyn( + &self, + index: &TyBasic, + ctx: &TypingOracleCtx, + ) -> Result; + fn attribute_dyn(&self, attr: &str) -> Result; fn bin_op_dyn( &self, bin_op: TypingBinOp, rhs: &TyBasic, ctx: &TypingOracleCtx, - ) -> Result; + ) -> Result; fn union2_dyn( self: Arc, other: Arc, @@ -163,14 +179,14 @@ impl TyCustomDyn for T { fn validate_call_dyn( &self, span: Span, - args: &[Spanned], + args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result { self.validate_call(span, args, oracle) } - fn is_callable_dyn(&self) -> bool { - self.is_callable() + fn as_callable_dyn(&self) -> Option { + self.as_callable() } fn is_intersects_with_dyn(&self, other: &TyBasic) -> bool { @@ -181,15 +197,19 @@ impl TyCustomDyn for T { self.as_function() } - fn attribute_dyn(&self, attr: &str) -> Result { + fn attribute_dyn(&self, attr: &str) -> Result { self.attribute(attr) } - fn iter_item_dyn(&self) -> Result { + fn iter_item_dyn(&self) -> Result { self.iter_item() } - fn index_dyn(&self, index: &TyBasic, ctx: &TypingOracleCtx) -> Result { + fn index_dyn( + &self, + index: &TyBasic, + ctx: &TypingOracleCtx, + ) -> Result { self.index(index, ctx) } @@ -198,7 +218,7 @@ impl TyCustomDyn for T { bin_op: TypingBinOp, rhs: &TyBasic, ctx: &TypingOracleCtx, - ) -> Result { + ) -> Result { self.bin_op(bin_op, rhs, ctx) } @@ -258,15 +278,24 @@ impl TyCustom { x.0.intersects_dyn(&*y.0) } - pub(crate) fn intersects_with(&self, other: &TyBasic) -> bool { + pub(crate) fn intersects_with( + &self, + other: &TyBasic, + ctx: TypingOracleCtx, + ) -> Result { if self.0.is_intersects_with_dyn(other) { - return true; + return Ok(true); } match other { - TyBasic::Custom(other) => Self::intersects(self, other), - TyBasic::Name(name) => self.as_name() == Some(name.as_str()), - TyBasic::Callable => self.0.is_callable_dyn(), - _ => false, + TyBasic::Custom(other) => Ok(Self::intersects(self, other)), + TyBasic::Callable(c) => { + if let Some(this) = self.0.as_callable_dyn() { + ctx.callables_intersect(&this, c) + } else { + Ok(false) + } + } + _ => Ok(false), } } diff --git a/starlark-rust/starlark/src/typing/error.rs b/starlark-rust/starlark/src/typing/error.rs index 9558890e9157e..05926fba430b0 100644 --- a/starlark-rust/starlark/src/typing/error.rs +++ b/starlark-rust/starlark/src/typing/error.rs @@ -17,6 +17,7 @@ use std::fmt::Display; +use starlark_syntax::diagnostic::WithDiagnostic; use starlark_syntax::eval_exception::EvalException; use crate::codemap::CodeMap; @@ -30,20 +31,38 @@ impl InternalError { #[cold] pub(crate) fn msg(message: impl Display, span: Span, codemap: &CodeMap) -> InternalError { InternalError(EvalException::new( - anyhow::anyhow!("{} (internal error)", message), + crate::Error::new_kind(crate::ErrorKind::Internal(anyhow::Error::msg( + message.to_string(), + ))), span, codemap, )) } + #[cold] + pub(crate) fn from_diagnostic(d: WithDiagnostic) -> InternalError { + let internal = d.map(|m| { + crate::Error::new_kind(crate::ErrorKind::Internal(anyhow::Error::msg( + m.to_string(), + ))) + }); + InternalError(internal.into()) + } + #[cold] pub(crate) fn from_eval_exception(e: EvalException) -> InternalError { - InternalError(e) + InternalError(e.into_internal_error()) + } + + #[cold] + pub(crate) fn from_error(e: crate::Error, span: Span, codemap: &CodeMap) -> InternalError { + let e = e.into_internal_error(); + InternalError(EvalException::new(e, span, codemap)) } #[cold] - pub(crate) fn into_anyhow(self) -> anyhow::Error { - self.0.into_anyhow() + pub(crate) fn into_error(self) -> crate::Error { + self.0.into_error() } #[cold] @@ -61,7 +80,7 @@ impl TypingError { // So we pay for expensive error creation we ignore. Make this function cheap. #[cold] pub(crate) fn msg(message: impl Display, span: Span, codemap: &CodeMap) -> TypingError { - TypingError(EvalException::new( + TypingError(EvalException::new_anyhow( anyhow::Error::msg(message.to_string()), span, codemap, @@ -69,18 +88,23 @@ impl TypingError { } #[cold] - pub(crate) fn new(error: anyhow::Error, span: Span, codemap: &CodeMap) -> TypingError { + pub(crate) fn new(error: crate::Error, span: Span, codemap: &CodeMap) -> TypingError { TypingError(EvalException::new(error, span, codemap)) } + #[cold] + pub(crate) fn new_anyhow(error: anyhow::Error, span: Span, codemap: &CodeMap) -> TypingError { + TypingError(EvalException::new_anyhow(error, span, codemap)) + } + #[cold] pub(crate) fn from_eval_exception(e: EvalException) -> TypingError { TypingError(e) } #[cold] - pub(crate) fn into_anyhow(self) -> anyhow::Error { - self.0.into_anyhow() + pub(crate) fn into_error(self) -> crate::Error { + self.0.into_error() } #[cold] @@ -89,6 +113,9 @@ impl TypingError { } } +/// Like [`TypingError`], but without a message or span. +pub struct TypingNoContextError; + /// Either a typing error or an internal error. /// * Typing error means, types are not compatible. /// * Internal error means, bug in the typechecker. @@ -108,3 +135,20 @@ impl From for TypingOrInternalError { TypingOrInternalError::Internal(e) } } + +pub enum TypingNoContextOrInternalError { + Typing, + Internal(InternalError), +} + +impl From for TypingNoContextOrInternalError { + fn from(_: TypingNoContextError) -> Self { + TypingNoContextOrInternalError::Typing + } +} + +impl From for TypingNoContextOrInternalError { + fn from(e: InternalError) -> Self { + TypingNoContextOrInternalError::Internal(e) + } +} diff --git a/starlark-rust/starlark/src/typing/fill_types_for_lint.rs b/starlark-rust/starlark/src/typing/fill_types_for_lint.rs index edf3b35693942..aba11524f42d3 100644 --- a/starlark-rust/starlark/src/typing/fill_types_for_lint.rs +++ b/starlark-rust/starlark/src/typing/fill_types_for_lint.rs @@ -26,6 +26,7 @@ use starlark_syntax::syntax::ast::AssignTargetP; use starlark_syntax::syntax::ast::AstLiteral; use starlark_syntax::syntax::ast::AstString; use starlark_syntax::syntax::ast::BinOp; +use starlark_syntax::syntax::ast::CallArgsP; use starlark_syntax::syntax::ast::DefP; use starlark_syntax::syntax::ast::ExprP; use starlark_syntax::syntax::ast::ForP; @@ -34,13 +35,14 @@ use starlark_syntax::syntax::ast::LoadP; use starlark_syntax::syntax::ast::StmtP; use starlark_syntax::syntax::def::DefParamKind; use starlark_syntax::syntax::def::DefParams; +use starlark_syntax::syntax::def::DefRegularParamMode; use starlark_syntax::syntax::type_expr::TypeExprUnpackP; +use starlark_syntax::syntax::type_expr::TypePathP; use crate::codemap::Span; use crate::codemap::Spanned; use crate::environment::slots::ModuleSlotId; use crate::eval::compiler::constants::Constants; -use crate::eval::compiler::scope::payload::CstArgument; use crate::eval::compiler::scope::payload::CstAssignIdent; use crate::eval::compiler::scope::payload::CstAssignIdentExt; use crate::eval::compiler::scope::payload::CstExpr; @@ -51,12 +53,14 @@ use crate::eval::compiler::scope::payload::CstTypeExpr; use crate::eval::compiler::scope::ModuleScopeData; use crate::eval::compiler::scope::ResolvedIdent; use crate::eval::compiler::scope::Slot; +use crate::typing::callable_param::ParamIsRequired; use crate::typing::error::InternalError; use crate::typing::error::TypingError; use crate::typing::Approximation; -use crate::typing::Param; +use crate::typing::ParamSpec; use crate::typing::Ty; use crate::typing::TypingOracleCtx; +use crate::util::arc_str::ArcStr; use crate::values::tuple::AllocTuple; use crate::values::types::ellipsis::Ellipsis; use crate::values::typing::type_compiled::compiled::TypeCompiled; @@ -117,7 +121,6 @@ struct GlobalTypesBuilder<'a, 'v> { errors: Vec, module_scope_data: &'a ModuleScopeData<'a>, ctx: TypingOracleCtx<'a>, - allow_string_literals_in_type_expr: bool, } impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { @@ -125,7 +128,7 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { InternalError::msg(message, span, self.ctx.codemap) } - fn err(&mut self, span: Span, e: anyhow::Error) -> GlobalValue<'v> { + fn err(&mut self, span: Span, e: crate::Error) -> GlobalValue<'v> { self.errors .push(TypingError::new(e, span, self.ctx.codemap)); GlobalValue::any() @@ -134,7 +137,7 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { fn call( &mut self, _f: &CstExpr, - _args: &[CstArgument], + _args: &CallArgsP, ) -> Result, InternalError> { // TODO(nga): could be a call like `record(...)`, and we need to evaluate it. Ok(GlobalValue::any()) @@ -438,35 +441,47 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { } fn top_level_def(&mut self, def: &DefP) -> Result<(), InternalError> { - let def_params = DefParams::unpack(&def.params, self.ctx.codemap) + let DefParams { + params: def_params, + indices: _, + } = DefParams::unpack(&def.params, self.ctx.codemap) .map_err(InternalError::from_eval_exception)?; - let mut params = Vec::with_capacity(def_params.params.len()); - for (i, param) in def_params.params.iter().enumerate() { + let mut pos_only = Vec::new(); + let mut pos_or_name = Vec::new(); + let mut args = None; + let mut name_only = Vec::new(); + let mut kwargs = None; + + for param in def_params { let ty = self.get_ty_expr_opt(param.ty)?; match param.kind { - DefParamKind::Regular(default_value) => { - let pos_only = i < def_params.num_positional as usize; + DefParamKind::Regular(mode, default_value) => { let name = param.ident.ident.as_str(); - let param = if pos_only { - Param::pos_or_name(name, ty) - } else { - Param::name_only(name, ty) + let required = match default_value.is_some() { + true => ParamIsRequired::No, + false => ParamIsRequired::Yes, }; - let param = if default_value.is_some() { - param.optional() - } else { - param + match mode { + DefRegularParamMode::PosOnly => pos_only.push((required, ty)), + DefRegularParamMode::PosOrName => { + pos_or_name.push((ArcStr::from(name), required, ty)) + } + DefRegularParamMode::NameOnly => { + name_only.push((ArcStr::from(name), required, ty)) + } }; - params.push(param); } - DefParamKind::Args => params.push(Param::args(ty)), - DefParamKind::Kwargs => params.push(Param::kwargs(ty)), + DefParamKind::Args => args = Some(ty), + DefParamKind::Kwargs => kwargs = Some(ty), } } let result = self.get_ty_expr_opt(def.return_type.as_deref())?; + let params = ParamSpec::new_parts(pos_only, pos_or_name, args, name_only, kwargs) + .map_err(|e| InternalError::from_error(e, def.signature_span(), self.ctx.codemap))?; + self.assign_ident_value(&def.name, GlobalValue::ty(Ty::function(params, result))) } @@ -502,11 +517,11 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { Ty::any() } - fn try_proper_ty( + fn eval_path( &mut self, - first: &CstIdent, - rem: &[Spanned<&str>], - ) -> Result, InternalError> { + path: &TypePathP, + ) -> Result>, InternalError> { + let TypePathP { first, rem } = path; let Some(mut value) = self.expr_ident(first)?.value else { return Ok(None); }; @@ -517,24 +532,33 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { let span = first.span.merge(x.span); self.errors .push(TypingError::new(e, span, self.ctx.codemap)); - return Ok(Some(Ty::any())); + return Ok(None); } } } + Ok(Some(value)) + } + + fn try_proper_ty(&mut self, path: &TypePathP) -> Result, InternalError> { + let TypePathP { first, rem } = path; + let Some(value) = self.eval_path(path)? else { + return Ok(None); + }; match TypeCompiled::new(value, self.heap) { Ok(ty) => Ok(Some(ty.as_ty().clone())), Err(e) => { let span = Span::merge_all(iter::once(first.span).chain(rem.iter().map(|x| x.span))); self.errors - .push(TypingError::new(e, span, self.ctx.codemap)); - Ok(Some(Ty::any())) + .push(TypingError::new_anyhow(e, span, self.ctx.codemap)); + Ok(None) } } } - fn path_ty(&mut self, first: &CstIdent, rem: &[Spanned<&str>]) -> Result { - if let Some(ty) = self.try_proper_ty(first, rem)? { + fn path_ty(&mut self, path: &TypePathP) -> Result { + let TypePathP { first, rem } = path; + if let Some(ty) = self.try_proper_ty(path)? { return Ok(ty); } @@ -547,20 +571,25 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { x: &Spanned>, ) -> Result { match &x.node { + TypeExprUnpackP::Ellipsis => { + self.approximations + .push(Approximation::new("Ellipsis cannot be used as type", x)); + Ok(Ty::any()) + } + TypeExprUnpackP::List(..) => { + self.approximations.push(Approximation::new( + "List literal [...] cannot be used as type", + x, + )); + Ok(Ty::any()) + } TypeExprUnpackP::Tuple(xs) => { Ok(Ty::tuple(xs.try_map(|x| self.from_type_expr_impl(x))?)) } TypeExprUnpackP::Union(xs) => { Ok(Ty::unions(xs.try_map(|x| self.from_type_expr_impl(x))?)) } - TypeExprUnpackP::Literal(x) => { - if x.is_empty() || x.starts_with('_') { - Ok(Ty::any()) - } else { - Ok(Ty::name(x)) - } - } - TypeExprUnpackP::Path(first, rem) => self.path_ty(first, rem), + TypeExprUnpackP::Path(path) => self.path_ty(path), TypeExprUnpackP::Index(a, i) => { if let Some(a) = self.expr_ident(a)?.value { if !a.ptr_eq(Constants::get().fn_list.0.to_value()) { @@ -592,62 +621,90 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { } } TypeExprUnpackP::Index2(a, i0, i1) => { - if let Some(a) = self.expr_ident(a)?.value { - if !a.ptr_eq(Constants::get().fn_dict.0.to_value()) { - self.approximations.push(Approximation::new("Not dict", x)); - return Ok(Ty::any()); - } - let i0 = self.from_type_expr_impl(i0)?; - let i1 = self.from_type_expr_impl(i1)?; - let i0 = TypeCompiled::from_ty(&i0, self.heap); - let i1 = TypeCompiled::from_ty(&i1, self.heap); - match a.get_ref().at2(i0.to_inner(), i1.to_inner(), self.heap) { - Ok(t) => match TypeCompiled::new(t, self.heap) { - Ok(ty) => Ok(ty.as_ty().clone()), - Err(_) => { + if let Some(a) = self.eval_path(a)? { + if a.ptr_eq(Constants::get().fn_dict.0.to_value()) { + let i0 = self.from_type_expr_impl(i0)?; + let i1 = self.from_type_expr_impl(i1)?; + let i0 = TypeCompiled::from_ty(&i0, self.heap); + let i1 = TypeCompiled::from_ty(&i1, self.heap); + match a.get_ref().at2(i0.to_inner(), i1.to_inner(), self.heap) { + Ok(t) => match TypeCompiled::new(t, self.heap) { + Ok(ty) => Ok(ty.as_ty().clone()), + Err(_) => { + self.approximations + .push(Approximation::new("TypeCompiled::new failed", x)); + Ok(Ty::any()) + } + }, + Err(e) => { self.approximations - .push(Approximation::new("TypeCompiled::new failed", x)); + .push(Approximation::new("Getitem2 failed", e)); Ok(Ty::any()) } - }, - Err(e) => { + } + } else if a.ptr_eq(Constants::get().fn_tuple.0.to_value()) { + let i0 = self.from_type_expr_impl(i0)?; + let TypeExprUnpackP::Ellipsis = i1.node else { self.approximations - .push(Approximation::new("Getitem2 failed", e)); - Ok(Ty::any()) + .push(Approximation::new("Expecting ellipsis in tuple[x, ...]", x)); + return Ok(Ty::any()); + }; + let r0 = TypeCompiled::from_ty(&i0, self.heap); + match a.get_ref().at2( + r0.to_inner(), + Ellipsis::new_value().to_value(), + self.heap, + ) { + Ok(t) => match TypeCompiled::new(t, self.heap) { + Ok(ty) => Ok(ty.as_ty().clone()), + Err(_) => { + self.approximations + .push(Approximation::new("TypeCompiled::new failed", x)); + Ok(Ty::any()) + } + }, + Err(e) => { + self.approximations + .push(Approximation::new("Getitem2 failed", e)); + Ok(Ty::any()) + } } - } - } else { - self.approximations - .push(Approximation::new("Not global", x)); - Ok(Ty::any()) - } - } - TypeExprUnpackP::Index2Ellipsis(a, i) => { - if let Some(a) = self.expr_ident(a)?.value { - if !a.ptr_eq(Constants::get().fn_tuple.0.to_value()) { - // TODO(nga): this should be an error. - self.approximations.push(Approximation::new("Not tuple", x)); - return Ok(Ty::any()); - } - let i = self.from_type_expr_impl(i)?; - let i = TypeCompiled::from_ty(&i, self.heap); - match a - .get_ref() - .at2(i.to_inner(), Ellipsis::new_value().to_value(), self.heap) - { - Ok(t) => match TypeCompiled::new(t, self.heap) { - Ok(ty) => Ok(ty.as_ty().clone()), - Err(_) => { + } else if a.ptr_eq(Constants::get().typing_callable.0.to_value()) { + let TypeExprUnpackP::List(items) = &i0.node else { + self.approximations.push(Approximation::new( + "Expecting list in Callable[[...], ...]", + x, + )); + return Ok(Ty::any()); + }; + let args = items.try_map(|x| { + Ok( + TypeCompiled::from_ty(&self.from_type_expr_impl(x)?, self.heap) + .to_inner(), + ) + })?; + let args = self.heap.alloc_list(&args); + let ret = self.from_type_expr_impl(i1)?; + let ret = TypeCompiled::from_ty(&ret, self.heap).to_inner(); + match a.get_ref().at2(args, ret, self.heap) { + Ok(t) => match TypeCompiled::new(t, self.heap) { + Ok(ty) => Ok(ty.as_ty().clone()), + Err(_) => { + self.approximations + .push(Approximation::new("TypeCompiled::new failed", x)); + Ok(Ty::any()) + } + }, + Err(e) => { self.approximations - .push(Approximation::new("TypeCompiled::new failed", x)); + .push(Approximation::new("Getitem2 failed", e)); Ok(Ty::any()) } - }, - Err(e) => { - self.approximations - .push(Approximation::new("Getitem2 failed", e)); - Ok(Ty::any()) } + } else { + self.approximations + .push(Approximation::new("Not dict or tuple", x)); + return Ok(Ty::any()); } } else { self.approximations @@ -659,12 +716,8 @@ impl<'a, 'v> GlobalTypesBuilder<'a, 'v> { } fn ty_expr(&mut self, expr: &CstTypeExpr) -> Result { - let x = TypeExprUnpackP::unpack( - &expr.expr, - self.ctx.codemap, - self.allow_string_literals_in_type_expr, - ) - .map_err(InternalError::from_eval_exception)?; + let x = TypeExprUnpackP::unpack(&expr.expr, self.ctx.codemap) + .map_err(InternalError::from_diagnostic)?; self.from_type_expr_impl(&x) } @@ -713,7 +766,6 @@ pub(crate) fn fill_types_for_lint_typechecker( ctx: TypingOracleCtx, module_scope_data: &ModuleScopeData, approximations: &mut Vec, - allow_string_literals_in_type_expr: bool, ) -> Result<(Vec, ModuleVarTypes), InternalError> { let heap = Heap::new(); let mut builder = GlobalTypesBuilder { @@ -723,7 +775,6 @@ pub(crate) fn fill_types_for_lint_typechecker( errors: Vec::new(), module_scope_data, approximations, - allow_string_literals_in_type_expr, }; for stmt in module.iter_mut() { builder.top_level_stmt(stmt)?; diff --git a/starlark-rust/starlark/src/typing/function.rs b/starlark-rust/starlark/src/typing/function.rs index b5aec80c7dd57..621159be5352f 100644 --- a/starlark-rust/starlark/src/typing/function.rs +++ b/starlark-rust/starlark/src/typing/function.rs @@ -15,165 +15,43 @@ * limitations under the License. */ -use std::fmt; use std::fmt::Debug; -use std::fmt::Display; -use std::fmt::Formatter; use std::hash::Hash; use allocative::Allocative; use dupe::Dupe; use crate::codemap::Span; -use crate::codemap::Spanned; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; use crate::typing::custom::TyCustomImpl; +use crate::typing::error::TypingNoContextError; +use crate::typing::error::TypingNoContextOrInternalError; use crate::typing::error::TypingOrInternalError; -use crate::typing::small_arc_vec_or_static::SmallArcVec1OrStatic; +use crate::typing::ParamSpec; use crate::typing::Ty; use crate::typing::TyBasic; use crate::typing::TypingBinOp; use crate::typing::TypingOracleCtx; -use crate::values::layout::heap::profile::arc_str::ArcStr; use crate::values::typing::type_compiled::alloc::TypeMatcherAlloc; -/// An argument being passed to a function -#[derive(Debug)] -pub enum Arg<'a> { - /// A positional argument. - Pos(Ty), - /// A named argument. - Name(&'a str, Ty), - /// A `*args`. - Args(Ty), - /// A `**kwargs`. - Kwargs(Ty), -} - -/// The type of a parameter - can be positional, by name, `*args` or `**kwargs`. -#[derive(Debug, Clone, Dupe, PartialEq, Eq, Hash, PartialOrd, Ord, Allocative)] -pub(crate) enum ParamMode { - /// Parameter can only be passed by position. - PosOnly, - /// Parameter can be passed by position or name. - PosOrName(ArcStr), - /// Parameter can only be passed by name. - NameOnly(ArcStr), - /// Parameter is `*args`. - Args, - /// Parameter is `**kwargs`. - Kwargs, -} - -/// A parameter argument to a function -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Allocative)] -pub struct Param { - /// The type of parameter - pub(crate) mode: ParamMode, - /// Whether the parameter have a default value or is otherwise optional - pub(crate) optional: bool, - /// The type of the parameter - pub(crate) ty: Ty, -} - -impl Param { - /// Create a positional only parameter. - pub fn pos_only(ty: Ty) -> Self { - Self { - mode: ParamMode::PosOnly, - optional: false, - ty, - } - } - - /// Create a named only parameter. - pub fn name_only(name: &str, ty: Ty) -> Self { - Self { - mode: ParamMode::NameOnly(ArcStr::from(name)), - optional: false, - ty, - } - } - - /// Create a positional or named parameter. - pub fn pos_or_name(name: &str, ty: Ty) -> Self { - Self { - mode: ParamMode::PosOrName(ArcStr::from(name)), - optional: false, - ty, - } - } - - /// Make a parameter optional. - pub fn optional(self) -> Self { - Self { - optional: true, - ..self - } - } - - /// Create a `*args` parameter. - /// - /// `ty` is a tuple item type. - pub const fn args(ty: Ty) -> Self { - Self { - mode: ParamMode::Args, - optional: true, - ty, - } - } - - /// Create a `**kwargs` parameter. - /// - /// `ty` is a dict value type. - pub const fn kwargs(ty: Ty) -> Self { - Self { - mode: ParamMode::Kwargs, - optional: true, - ty, - } - } - - pub(crate) fn allows_pos(&self) -> bool { - match self.mode { - ParamMode::PosOnly | ParamMode::PosOrName(_) | ParamMode::Args => true, - ParamMode::NameOnly(_) | ParamMode::Kwargs => false, - } - } - - pub(crate) fn allows_many(&self) -> bool { - match self.mode { - ParamMode::Args | ParamMode::Kwargs => true, - _ => false, - } - } - - /// Get a display name for this parameter. - pub fn name(&self) -> &str { - match &self.mode { - ParamMode::PosOnly => "_", - ParamMode::PosOrName(x) => x, - ParamMode::NameOnly(x) => x, - ParamMode::Args => "*args", - ParamMode::Kwargs => "**kwargs", - } - } -} - /// Custom function typechecker. pub trait TyCustomFunctionImpl: Debug + Eq + Ord + Hash + Allocative + Send + Sync + 'static { - fn has_type_attr(&self) -> bool { + fn is_type(&self) -> bool { false } fn validate_call( &self, span: Span, - args: &[Spanned], + args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result; + fn as_callable(&self) -> TyCallable; + fn as_function(&self) -> Option<&TyFunction> { None } @@ -189,7 +67,11 @@ pub trait TyCustomFunctionImpl: Debug, derive_more::Display )] -#[display(fmt = "\"function\"")] +#[display( + "def({}) -> {}", + self.0.as_callable().params(), + self.0.as_callable().result(), +)] pub struct TyCustomFunction(pub F); impl TyCustomImpl for TyCustomFunction { @@ -200,14 +82,14 @@ impl TyCustomImpl for TyCustomFunction { fn validate_call( &self, span: Span, - args: &[Spanned], + args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result { self.0.validate_call(span, args, oracle) } - fn is_callable(&self) -> bool { - true + fn as_callable(&self) -> Option { + Some(self.0.as_callable()) } fn as_function(&self) -> Option<&TyFunction> { @@ -219,29 +101,26 @@ impl TyCustomImpl for TyCustomFunction { bin_op: TypingBinOp, _rhs: &TyBasic, _ctx: &TypingOracleCtx, - ) -> Result { + ) -> Result { match bin_op { // `str | list`. - TypingBinOp::BitOr if self.0.has_type_attr() => { - // TODO(nga): result is type, but we don't have a type for type yet. - Ok(Ty::any()) - } - _ => Err(()), + TypingBinOp::BitOr if self.0.is_type() => Ok(Ty::basic(TyBasic::Type)), + _ => Err(TypingNoContextOrInternalError::Typing), } } - fn index(&self, _item: &TyBasic, _ctx: &TypingOracleCtx) -> Result { + fn index( + &self, + _item: &TyBasic, + _ctx: &TypingOracleCtx, + ) -> Result { // TODO(nga): this is hack for `enum` (type) which pretends to be a function. // Should be a custom type. Ok(Ty::any()) } - fn attribute(&self, attr: &str) -> Result { - if attr == "type" && self.0.has_type_attr() { - Ok(Ty::string()) - } else { - Err(()) - } + fn attribute(&self, _attr: &str) -> Result { + Err(TypingNoContextError) } fn matcher(&self, factory: T) -> T::Result { @@ -254,91 +133,49 @@ impl TyCustomImpl for TyCustomFunction { pub struct TyFunction { /// The `.type` property of the function, often `""`. pub(crate) type_attr: Option, - /// The parameters to the function. - pub(crate) params: SmallArcVec1OrStatic, - /// The result type of the function. - pub(crate) result: Ty, + pub(crate) callable: TyCallable, } impl TyFunction { /// Constructor. - pub fn new_with_type_attr(params: Vec, result: Ty, type_attr: Ty) -> Self { + pub fn new_with_type_attr(params: ParamSpec, result: Ty, type_attr: Ty) -> Self { // TODO(nga): validate params are in correct order. TyFunction { type_attr: Some(type_attr), - params: Self::maybe_intern_params(params), - result, + callable: TyCallable::new(params, result), } } /// Constructor. - pub fn new(params: Vec, result: Ty) -> Self { + pub fn new(params: ParamSpec, result: Ty) -> Self { TyFunction { type_attr: None, - params: Self::maybe_intern_params(params), - result, + callable: TyCallable::new(params, result), } } - fn maybe_intern_params(params: Vec) -> SmallArcVec1OrStatic { - if params.as_slice() == Self::any_params() { - SmallArcVec1OrStatic::new_static(Self::any_params()) - } else { - SmallArcVec1OrStatic::clone_from_slice(¶ms) - } - } - - /// `*args`, `**kwargs` parameters. - fn any_params() -> &'static [Param] { - static ANY_PARAMS: [Param; 2] = [Param::args(Ty::any()), Param::kwargs(Ty::any())]; - &ANY_PARAMS - } - - /// Function type that accepts any arguments and returns any result. - pub(crate) fn _any() -> TyFunction { - TyFunction { - type_attr: None, - params: SmallArcVec1OrStatic::new_static(Self::any_params()), - result: Ty::any(), - } - } -} - -impl Display for TyFunction { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - let TyFunction { params, result, .. } = self; - write!(f, "def(")?; - let mut first = true; - for param in params.iter() { - if !first { - write!(f, ", ")?; - first = false; - } - let opt = if param.optional { "=.." } else { "" }; - match ¶m.mode { - ParamMode::PosOnly => write!(f, "#: {}{}", param.ty, opt)?, - ParamMode::PosOrName(name) => write!(f, "#{}: {}{}", name, param.ty, opt)?, - ParamMode::NameOnly(name) => write!(f, "{}: {}{}", name, param.ty, opt)?, - ParamMode::Args => write!(f, "*args: {}", param.ty)?, - ParamMode::Kwargs => write!(f, "**kwargs: {}", param.ty)?, - } - } - write!(f, ") -> {}", result) + /// Callable signature of the function. + pub fn callable(&self) -> &TyCallable { + &self.callable } } impl TyCustomFunctionImpl for TyFunction { - fn has_type_attr(&self) -> bool { + fn is_type(&self) -> bool { self.type_attr.is_some() } fn validate_call( &self, span: Span, - args: &[Spanned], + args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result { - oracle.validate_fn_call(span, self, args) + oracle.validate_fn_call(span, &self.callable, args) + } + + fn as_callable(&self) -> TyCallable { + self.callable.dupe() } fn as_function(&self) -> Option<&TyFunction> { diff --git a/starlark-rust/starlark/src/typing/golden/call_not_callable.golden b/starlark-rust/starlark/src/typing/golden/call_not_callable.golden deleted file mode 100644 index d61393cc4f3d8..0000000000000 --- a/starlark-rust/starlark/src/typing/golden/call_not_callable.golden +++ /dev/null @@ -1,25 +0,0 @@ -# @generated -# To regenerate, run: -# ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests -# ``` - -Code: -def foo(x: list): - x() - -Error: -error: Call to a non-callable type `list[typing.Any]` - --> filename:3:5 - | -3 | x() - | ^^^ - | - -Compiler typechecker (eval): -error: Call to a non-callable type `list[typing.Any]` - --> filename:3:5 - | -3 | x() - | ^^^ - | diff --git a/starlark-rust/starlark/src/typing/golden/new_syntax_without_dot_type.golden b/starlark-rust/starlark/src/typing/golden/new_syntax_without_dot_type.golden deleted file mode 100644 index cb8ff5131b199..0000000000000 --- a/starlark-rust/starlark/src/typing/golden/new_syntax_without_dot_type.golden +++ /dev/null @@ -1,31 +0,0 @@ -# @generated -# To regenerate, run: -# ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests -# ``` - -Code: -def foo(x: str): pass - -def bar(): - # good - foo("test") - - # bad - foo(1) - -Error: -error: Expected type `str` but got `int` - --> filename:9:9 - | -9 | foo(1) - | ^ - | - -Compiler typechecker (eval): -error: Expected type `str` but got `int` - --> filename:9:9 - | -9 | foo(1) - | ^ - | diff --git a/starlark-rust/starlark/src/typing/golden/types_of_args_kwargs.golden b/starlark-rust/starlark/src/typing/golden/types_of_args_kwargs.golden deleted file mode 100644 index 135d76d75b3f6..0000000000000 --- a/starlark-rust/starlark/src/typing/golden/types_of_args_kwargs.golden +++ /dev/null @@ -1,18 +0,0 @@ -# @generated -# To regenerate, run: -# ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests -# ``` - -Code: -def foo(*args: str, **kwargs: int): - pass - -No errors. - -Types: -args: tuple[str, ...] -kwargs: dict[str, int] - -Compiler typechecker (eval): -No errors. diff --git a/starlark-rust/starlark/src/typing/macro_support.rs b/starlark-rust/starlark/src/typing/macro_support.rs new file mode 100644 index 0000000000000..b15273321b710 --- /dev/null +++ b/starlark-rust/starlark/src/typing/macro_support.rs @@ -0,0 +1,45 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![doc(hidden)] + +use crate::typing::Ty; +use crate::typing::TyBasic; + +pub(crate) fn unpack_args_item_ty(ty: Ty) -> Ty { + Ty::unions( + ty.iter_union() + .iter() + .map(|ty| match ty { + TyBasic::Tuple(item) => item.item_ty(), + _ => Ty::any(), + }) + .collect(), + ) +} + +pub(crate) fn unpack_kwargs_value_ty(ty: Ty) -> Ty { + Ty::unions( + ty.iter_union() + .iter() + .map(|ty| match ty { + TyBasic::Dict(_, value) => value.to_ty(), + _ => Ty::any(), + }) + .collect(), + ) +} diff --git a/starlark-rust/starlark/src/typing/mod.rs b/starlark-rust/starlark/src/typing/mod.rs deleted file mode 100644 index 4b1ed90c503ec..0000000000000 --- a/starlark-rust/starlark/src/typing/mod.rs +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Notes: -// We deal with list.append/list.extend/list.insert, which mutate their list argument -// We ignore dict.setdefault/dict.update, as these are pretty complex functions -// We consider "non-sensicle" operations like list.remove and == to have implied types that make them meaningful -// even if they don't fail when doing something silly - -//! Types required to support the [`typecheck`](crate::syntax::AstModule::typecheck) function. - -pub(crate) mod arc_ty; -pub(crate) mod basic; -pub(crate) mod bindings; -pub(crate) mod ctx; -pub(crate) mod custom; -pub(crate) mod error; -pub(crate) mod fill_types_for_lint; -pub(crate) mod function; -pub(crate) mod interface; -pub(crate) mod mode; -pub(crate) mod oracle; -pub(crate) mod small_arc_vec; -pub(crate) mod small_arc_vec_or_static; -pub(crate) mod starlark_value; -pub(crate) mod structs; -pub(crate) mod tuple; -pub(crate) mod ty; -pub(crate) mod typecheck; -pub(crate) mod user; - -#[cfg(test)] -mod tests; - -pub use basic::TyBasic; -pub use function::Arg; -pub use function::Param; -pub use function::TyFunction; -pub use interface::Interface; -pub use oracle::ctx::TypingOracleCtx; -pub use oracle::traits::TypingBinOp; -pub use oracle::traits::TypingUnOp; -pub use starlark_value::TyStarlarkValue; -pub use structs::TyStruct; -pub use ty::Approximation; -pub use ty::Ty; -pub use ty::TyName; -pub use typecheck::AstModuleTypecheck; -pub use typecheck::TypeMap; -pub use user::TyUser; -pub use user::TyUserFields; -pub use user::TyUserIndex; -pub use user::TyUserParams; diff --git a/starlark-rust/starlark/src/typing/oracle/mod.rs b/starlark-rust/starlark/src/typing/oracle.rs similarity index 100% rename from starlark-rust/starlark/src/typing/oracle/mod.rs rename to starlark-rust/starlark/src/typing/oracle.rs diff --git a/starlark-rust/starlark/src/typing/oracle/ctx.rs b/starlark-rust/starlark/src/typing/oracle/ctx.rs index 70dfa88eeab2d..4ed996ab93d2c 100644 --- a/starlark-rust/starlark/src/typing/oracle/ctx.rs +++ b/starlark-rust/starlark/src/typing/oracle/ctx.rs @@ -16,29 +16,34 @@ */ use std::fmt::Display; +use std::iter; use dupe::Dupe; +use starlark_map::small_map::SmallMap; use starlark_syntax::syntax::ast::BinOp; use crate::codemap::CodeMap; use crate::codemap::Span; use crate::codemap::Spanned; use crate::typing::basic::TyBasic; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; +use crate::typing::callable_param::ParamIsRequired; +use crate::typing::callable_param::ParamMode; use crate::typing::error::InternalError; use crate::typing::error::TypingError; +use crate::typing::error::TypingNoContextError; +use crate::typing::error::TypingNoContextOrInternalError; use crate::typing::error::TypingOrInternalError; -use crate::typing::function::Arg; -use crate::typing::function::Param; -use crate::typing::function::ParamMode; -use crate::typing::function::TyFunction; use crate::typing::starlark_value::TyStarlarkValue; use crate::typing::tuple::TyTuple; +use crate::typing::ParamSpec; use crate::typing::Ty; -use crate::typing::TyName; use crate::typing::TypingBinOp; use crate::typing::TypingUnOp; use crate::values::dict::value::MutableDict; use crate::values::list::value::List; +use crate::values::set::value::MutableSet; use crate::values::tuple::value::Tuple; #[derive(Debug, thiserror::Error)] @@ -83,7 +88,7 @@ pub struct TypingOracleCtx<'a> { impl<'a> TypingOracleCtx<'a> { pub(crate) fn mk_error(&self, span: Span, err: impl Into) -> TypingError { - TypingError::new(err.into(), span, self.codemap) + TypingError::new_anyhow(err.into(), span, self.codemap) } pub(crate) fn mk_error_as_maybe_internal( @@ -91,7 +96,7 @@ impl<'a> TypingOracleCtx<'a> { span: Span, err: impl Into, ) -> TypingOrInternalError { - TypingOrInternalError::Typing(TypingError::new(err.into(), span, self.codemap)) + TypingOrInternalError::Typing(TypingError::new_anyhow(err.into(), span, self.codemap)) } pub(crate) fn msg_error(&self, span: Span, msg: impl Display) -> TypingOrInternalError { @@ -119,9 +124,13 @@ impl<'a> TypingOracleCtx<'a> { ) } - pub(crate) fn validate_type(&self, got: Spanned<&Ty>, require: &Ty) -> Result<(), TypingError> { - if !self.intersects(got.node, require) { - Err(self.mk_error( + pub(crate) fn validate_type( + &self, + got: Spanned<&Ty>, + require: &Ty, + ) -> Result<(), TypingOrInternalError> { + if !self.intersects(got.node, require)? { + Err(self.mk_error_as_maybe_internal( got.span, TypingOracleCtxError::IncompatibleType { got: got.to_string(), @@ -133,98 +142,100 @@ impl<'a> TypingOracleCtx<'a> { } } + #[allow(clippy::redundant_pattern_matching)] fn validate_args( &self, - params: &[Param], - args: &[Spanned], + params: &ParamSpec, + args: &TyCallArgs, span: Span, ) -> Result<(), TypingOrInternalError> { // Want to figure out which arguments go in which positions - let mut param_args: Vec>> = vec![vec![]; params.len()]; + let mut param_args: Vec>> = vec![vec![]; params.params().len()]; // The next index a positional parameter might fill let mut param_pos = 0; let mut seen_vargs = false; - for arg in args { - match &arg.node { - Arg::Pos(ty) => loop { - match params.get(param_pos) { - None => { - return Err(self.mk_error_as_maybe_internal( - arg.span, - TypingOracleCtxError::TooManyPositionalArguments, - )); - } - Some(param) => { - let found_index = param_pos; - if param.mode != ParamMode::Args { - param_pos += 1; - } - if param.allows_pos() { - param_args[found_index].push(Spanned { - span: arg.span, - node: ty, - }); - break; - } - } + let TyCallArgs { + pos: args_pos, + named: args_named, + args: args_args, + kwargs: args_kwargs, + } = args; + for ty in args_pos { + loop { + match params.params().get(param_pos) { + None => { + return Err(self.mk_error_as_maybe_internal( + ty.span, + TypingOracleCtxError::TooManyPositionalArguments, + )); } - }, - Arg::Name(name, ty) => { - let mut success = false; - for (i, param) in params.iter().enumerate() { - if param.name() == *name || param.mode == ParamMode::Kwargs { - param_args[i].push(Spanned { - span: arg.span, - node: ty, - }); - success = true; + Some(param) => { + let found_index = param_pos; + if param.mode != ParamMode::Args { + param_pos += 1; + } + if param.allows_pos() { + param_args[found_index].push(ty.as_ref()); break; } } - if !success { - return Err(self.mk_error_as_maybe_internal( - arg.span, - TypingOracleCtxError::UnexpectedNamedArgument { - name: (*name).to_owned(), - }, - )); - } - } - Arg::Args(_) => { - param_pos = params.len(); - seen_vargs = true; - } - Arg::Kwargs(_) => { - seen_vargs = true; } } } - - for (param, args) in std::iter::zip(params, param_args) { - if !param.allows_many() && args.len() > 1 { - return Err(TypingOrInternalError::Internal(InternalError::msg( - "bad", - span, - self.codemap, - ))); - } - if args.is_empty() { - // We assume that *args/**kwargs might have splatted things everywhere. - if !param.optional && !seen_vargs { - return Err(self.mk_error_as_maybe_internal( - span, - TypingOracleCtxError::MissingRequiredParameter { - name: param.name().to_owned(), - }, - )); + for arg in args_named { + let (name, ty) = &arg.node; + let mut success = false; + for (i, param) in params.params().iter().enumerate() { + if param.name() == Some(*name) || param.mode == ParamMode::Kwargs { + param_args[i].push(Spanned { + span: arg.span, + node: ty, + }); + success = true; + break; } - continue; } + if !success { + return Err(self.mk_error_as_maybe_internal( + arg.span, + TypingOracleCtxError::UnexpectedNamedArgument { + name: (*name).to_owned(), + }, + )); + } + } + if let Some(_) = args_args { + seen_vargs = true; + } + if let Some(_) = args_kwargs { + seen_vargs = true; + } + + for (param, args) in iter::zip(params.params(), param_args) { match param.mode { - ParamMode::PosOnly | ParamMode::PosOrName(_) | ParamMode::NameOnly(_) => { - self.validate_type(args[0], ¶m.ty)?; - } + ParamMode::PosOnly(req) + | ParamMode::PosOrName(_, req) + | ParamMode::NameOnly(_, req) => match args.as_slice() { + [] => { + if req == ParamIsRequired::Yes && !seen_vargs { + return Err(self.mk_error_as_maybe_internal( + span, + TypingOracleCtxError::MissingRequiredParameter { + name: param.name_display().to_owned(), + }, + )); + } + } + [arg] => self.validate_type(*arg, ¶m.ty)?, + [_, _, ..] => { + return Err(TypingOrInternalError::Internal(InternalError::msg( + "Multiple arguments bound to parameter", + span, + self.codemap, + ))); + } + }, ParamMode::Args => { for ty in args { // For an arg, we require the type annotation to be inner value, @@ -233,20 +244,8 @@ impl<'a> TypingOracleCtx<'a> { } } ParamMode::Kwargs => { - let val_types: Vec<_> = param - .ty - .iter_union() - .iter() - .filter_map(|x| match x { - TyBasic::Dict(_k, v) => Some(v.to_ty()), - _ => None, - }) - .collect(); - if !val_types.is_empty() { - let require = Ty::unions(val_types); - for ty in args { - self.validate_type(ty, &require)?; - } + for ty in args { + self.validate_type(ty, ¶m.ty)?; } } } @@ -257,23 +256,11 @@ impl<'a> TypingOracleCtx<'a> { pub(crate) fn validate_fn_call( &self, span: Span, - fun: &TyFunction, - args: &[Spanned], + fun: &TyCallable, + args: &TyCallArgs, ) -> Result { - self.validate_args(&fun.params, args, span)?; - Ok(fun.result.clone()) - } - - fn validate_call_for_type_name( - &self, - span: Span, - ty: &TyName, - _args: &[Spanned], - ) -> Result { - Err(self.mk_error_as_maybe_internal( - span, - TypingOracleCtxError::CallToNonCallable { ty: ty.to_string() }, - )) + self.validate_args(fun.params(), args, span)?; + Ok(fun.result().dupe()) } #[allow(clippy::collapsible_else_if)] @@ -281,13 +268,12 @@ impl<'a> TypingOracleCtx<'a> { &self, span: Span, fun: &TyBasic, - args: &[Spanned], + args: &TyCallArgs, ) -> Result { match fun { TyBasic::Any => Ok(Ty::any()), - TyBasic::Name(n) => self.validate_call_for_type_name(span, n, args), TyBasic::StarlarkValue(t) => Ok(t.validate_call(span, *self)?), - TyBasic::List(_) | TyBasic::Dict(..) | TyBasic::Tuple(_) => Err(self + TyBasic::List(_) | TyBasic::Dict(..) | TyBasic::Tuple(_) | TyBasic::Set(_) => Err(self .mk_error_as_maybe_internal( span, TypingOracleCtxError::CallToNonCallable { @@ -298,7 +284,7 @@ impl<'a> TypingOracleCtx<'a> { // Unknown type, may be callable. Ok(Ty::any()) } - TyBasic::Callable => Ok(Ty::any()), + TyBasic::Callable(c) => c.validate_call(span, args, *self), TyBasic::Custom(t) => t.0.validate_call_dyn(span, args, *self), } } @@ -308,7 +294,7 @@ impl<'a> TypingOracleCtx<'a> { &self, span: Span, fun: &Ty, - args: &[Spanned], + args: &TyCallArgs, ) -> Result { if fun.is_any() || fun.is_never() { return Ok(fun.dupe()); @@ -339,18 +325,18 @@ impl<'a> TypingOracleCtx<'a> { } } - fn iter_item_basic(&self, ty: &TyBasic) -> Result { + fn iter_item_basic(&self, ty: &TyBasic) -> Result { match ty { TyBasic::Any => Ok(Ty::any()), TyBasic::StarlarkValue(ty) => ty.iter_item(), TyBasic::List(item) => Ok((**item).dupe()), TyBasic::Dict(k, _v) => Ok((**k).dupe()), TyBasic::Tuple(tuple) => Ok(tuple.item_ty()), - TyBasic::Callable => Ok(Ty::any()), + TyBasic::Callable(_) => Ok(Ty::any()), TyBasic::Type => Ok(Ty::any()), TyBasic::Iter(ty) => Ok(ty.to_ty()), TyBasic::Custom(ty) => ty.0.iter_item_dyn(), - TyBasic::Name(_) => Ok(Ty::any()), + TyBasic::Set(item) => Ok((**item).dupe()), } } @@ -358,7 +344,7 @@ impl<'a> TypingOracleCtx<'a> { pub(crate) fn iter_item(&self, iter: Spanned<&Ty>) -> Result { match iter.typecheck_union_simple(|basic| self.iter_item_basic(basic)) { Ok(ty) => Ok(ty), - Err(()) => Err(self.mk_error( + Err(TypingNoContextError) => Err(self.mk_error( iter.span, TypingOracleCtxError::NotIterable { ty: iter.node.clone(), @@ -371,32 +357,35 @@ impl<'a> TypingOracleCtx<'a> { &self, array: &TyBasic, index: Spanned<&TyBasic>, - ) -> Result, InternalError> { + ) -> Result { match array { - TyBasic::Any | TyBasic::Callable | TyBasic::Iter(_) | TyBasic::Type => { - Ok(Ok(Ty::any())) - } + TyBasic::Any | TyBasic::Callable(_) | TyBasic::Iter(_) | TyBasic::Type => Ok(Ty::any()), TyBasic::Tuple(tuple) => { - if !self.intersects_basic(index.node, &TyBasic::int()) { - return Ok(Err(())); + if !self.intersects_basic(index.node, &TyBasic::int())? { + return Err(TypingNoContextOrInternalError::Typing); } - Ok(Ok(tuple.item_ty())) + Ok(tuple.item_ty()) } TyBasic::List(item) => { - if !self.intersects_basic(index.node, &TyBasic::int()) { - return Ok(Err(())); + if !self.intersects_basic(index.node, &TyBasic::int())? { + return Err(TypingNoContextOrInternalError::Typing); } - Ok(Ok((**item).dupe())) + Ok((**item).dupe()) } TyBasic::Dict(k, v) => { - if !self.intersects(&Ty::basic(index.node.dupe()), k) { - return Ok(Err(())); + if !self.intersects(&Ty::basic(index.node.dupe()), k)? { + return Err(TypingNoContextOrInternalError::Typing); } - Ok(Ok((**v).dupe())) + Ok((**v).dupe()) } - TyBasic::StarlarkValue(array) => Ok(array.index(index.node)), - TyBasic::Custom(c) => Ok(c.0.index_dyn(index.node, self)), - TyBasic::Name(_) => Ok(Ok(Ty::any())), + TyBasic::Set(item) => { + if !self.intersects(&Ty::basic(index.node.dupe()), item)? { + return Err(TypingNoContextOrInternalError::Typing); + } + Ok((**item).dupe()) + } + TyBasic::StarlarkValue(array) => Ok(array.index(index.node)?), + TyBasic::Custom(c) => Ok(c.0.index_dyn(index.node, self)?), } } @@ -422,11 +411,14 @@ impl<'a> TypingOracleCtx<'a> { span: index.span, node: index_basic, }, - )? { + ) { Ok(ty) => { good.push(ty); } - Err(()) => {} + Err(TypingNoContextOrInternalError::Internal(e)) => { + return Err(TypingOrInternalError::Internal(e)); + } + Err(TypingNoContextOrInternalError::Typing) => {} } } } @@ -447,44 +439,42 @@ impl<'a> TypingOracleCtx<'a> { } } - fn expr_slice_basic(&self, array: &TyBasic) -> Result { - if array.is_str() || array.is_tuple() || array.is_list() || array.as_name() == Some("range") - { + fn expr_slice_basic(&self, array: &TyBasic) -> Result { + if let TyBasic::StarlarkValue(v) = array { + v.slice() + } else if array.is_tuple() || array.is_list() { Ok(Ty::basic(array.dupe())) } else { - Err(()) + Err(TypingNoContextError) } } pub(crate) fn expr_slice(&self, span: Span, array: Ty) -> Result { match array.typecheck_union_simple(|basic| self.expr_slice_basic(basic)) { Ok(ty) => Ok(ty), - Err(()) => Err(self.mk_error( + Err(TypingNoContextError) => Err(self.mk_error( span, TypingOracleCtxError::MissingSliceOperator { ty: array }, )), } } - fn expr_dot_basic(&self, array: &TyBasic, attr: &str) -> Result { + fn expr_dot_basic(&self, array: &TyBasic, attr: &str) -> Result { match array { - TyBasic::Any | TyBasic::Callable | TyBasic::Iter(_) | TyBasic::Type => Ok(Ty::any()), + TyBasic::Any | TyBasic::Callable(_) | TyBasic::Iter(_) | TyBasic::Type => Ok(Ty::any()), TyBasic::StarlarkValue(s) => s.attr(attr), - TyBasic::Tuple(_) => Err(()), + TyBasic::Tuple(_) => Err(TypingNoContextError), TyBasic::List(elem) => match attr { "pop" => Ok(Ty::function( - vec![Param::pos_only(Ty::int()).optional()], + ParamSpec::pos_only([], [Ty::int()]), (**elem).dupe(), )), "index" => Ok(Ty::function( - vec![ - Param::pos_only((**elem).dupe()), - Param::pos_only(Ty::int()).optional(), - ], + ParamSpec::pos_only([(**elem).dupe()], [Ty::int()]), Ty::int(), )), "remove" => Ok(Ty::function( - vec![Param::pos_only((**elem).dupe())], + ParamSpec::pos_only([(**elem).dupe()], []), Ty::none(), )), attr => TyStarlarkValue::new::().attr(attr), @@ -493,37 +483,41 @@ impl<'a> TypingOracleCtx<'a> { match attr { "get" => Ok(Ty::union2( Ty::function( - vec![Param::pos_only(tk.to_ty())], + ParamSpec::pos_only([tk.to_ty()], []), Ty::union2(tv.to_ty(), Ty::none()), ), // This second signature is a bit too lax, but get with a default is much rarer - Ty::function( - vec![Param::pos_only(tk.to_ty()), Param::pos_only(Ty::any())], - Ty::any(), - ), + Ty::function(ParamSpec::pos_only([tk.to_ty(), Ty::any()], []), Ty::any()), + )), + "keys" => Ok(Ty::function( + ParamSpec::empty(), + Ty::basic(TyBasic::List(tk.dupe())), + )), + "values" => Ok(Ty::function( + ParamSpec::empty(), + Ty::basic(TyBasic::List(tv.dupe())), )), - "keys" => Ok(Ty::function(vec![], Ty::basic(TyBasic::List(tk.dupe())))), - "values" => Ok(Ty::function(vec![], Ty::basic(TyBasic::List(tv.dupe())))), "items" => Ok(Ty::function( - vec![], + ParamSpec::empty(), Ty::list(Ty::tuple(vec![tk.to_ty(), tv.to_ty()])), )), "popitem" => Ok(Ty::function( - vec![], + ParamSpec::empty(), Ty::tuple(vec![tk.to_ty(), tv.to_ty()]), )), attr => TyStarlarkValue::new::().attr(attr), } } TyBasic::Custom(custom) => custom.0.attribute_dyn(attr), - TyBasic::Name(_) => Ok(Ty::any()), + //TODO(romanp) add match on attr similar to Dict + TyBasic::Set(_) => TyStarlarkValue::new::().attr(attr), } } pub(crate) fn expr_dot(&self, span: Span, array: &Ty, attr: &str) -> Result { match array.typecheck_union_simple(|basic| self.expr_dot_basic(basic, attr)) { Ok(x) => Ok(x), - Err(()) => Err(self.mk_error( + Err(TypingNoContextError) => Err(self.mk_error( span, TypingOracleCtxError::AttributeNotAvailable { ty: array.clone(), @@ -533,13 +527,17 @@ impl<'a> TypingOracleCtx<'a> { } } - fn expr_un_op_basic(&self, ty: &TyBasic, un_op: TypingUnOp) -> Result { + fn expr_un_op_basic( + &self, + ty: &TyBasic, + un_op: TypingUnOp, + ) -> Result { match ty { TyBasic::StarlarkValue(ty) => match ty.un_op(un_op) { Ok(x) => Ok(Ty::basic(TyBasic::StarlarkValue(x))), - Err(()) => Err(()), + Err(TypingNoContextError) => Err(TypingNoContextError), }, - _ => Err(()), + _ => Err(TypingNoContextError), } } @@ -551,7 +549,7 @@ impl<'a> TypingOracleCtx<'a> { ) -> Result { match ty.typecheck_union_simple(|basic| self.expr_un_op_basic(basic, un_op)) { Ok(ty) => Ok(ty), - Err(()) => Err(self.mk_error( + Err(TypingNoContextError) => Err(self.mk_error( span, TypingOracleCtxError::UnaryOperatorNotAvailable { ty, un_op }, )), @@ -563,70 +561,92 @@ impl<'a> TypingOracleCtx<'a> { lhs: &TyBasic, bin_op: TypingBinOp, rhs: Spanned<&TyBasic>, - ) -> Result { + ) -> Result { match lhs { - TyBasic::Any | TyBasic::Iter(_) | TyBasic::Callable | TyBasic::Type => Ok(Ty::any()), - TyBasic::StarlarkValue(lhs) => lhs.bin_op(bin_op, rhs.node), + TyBasic::Any | TyBasic::Iter(_) | TyBasic::Callable(_) | TyBasic::Type => Ok(Ty::any()), + TyBasic::StarlarkValue(lhs) => Ok(lhs.bin_op(bin_op, rhs.node)?), lhs @ TyBasic::List(elem) => match bin_op { TypingBinOp::Less => { - if self.intersects_basic(lhs, rhs.node) { + if self.intersects_basic(lhs, rhs.node)? { Ok(Ty::bool()) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } TypingBinOp::In => { - if self.intersects(elem, &Ty::basic(rhs.node.dupe())) { + if self.intersects(elem, &Ty::basic(rhs.node.dupe()))? { Ok(Ty::bool()) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } TypingBinOp::Add => { - if self.intersects_basic(rhs.node, &TyBasic::any_list()) { + if self.intersects_basic(rhs.node, &TyBasic::any_list())? { Ok(Ty::list(Ty::union2( elem.to_ty(), self.iter_item_basic(rhs.node)?, ))) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } TypingBinOp::Mul => { - if self.intersects_basic(rhs.node, &TyBasic::int()) { + if self.intersects_basic(rhs.node, &TyBasic::int())? { Ok(Ty::basic(lhs.dupe())) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } - _ => TyStarlarkValue::new::().bin_op(bin_op, rhs.node), + _ => Ok(TyStarlarkValue::new::().bin_op(bin_op, rhs.node)?), }, TyBasic::Tuple(_) => { // TODO(nga): can do better types. - TyStarlarkValue::new::().bin_op(bin_op, rhs.node) + Ok(TyStarlarkValue::new::().bin_op(bin_op, rhs.node)?) } TyBasic::Dict(k, v) => match bin_op { TypingBinOp::BitOr => { - if self.intersects_basic(rhs.node, &TyBasic::any_dict()) { + if self.intersects_basic(rhs.node, &TyBasic::any_dict())? { Ok(Ty::union2( Ty::dict(k.to_ty(), v.to_ty()), Ty::basic(rhs.node.dupe()), )) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) + } + } + TypingBinOp::In => { + if self.intersects(&Ty::basic(rhs.node.dupe()), k)? { + Ok(Ty::bool()) + } else { + Err(TypingNoContextOrInternalError::Typing) } } + bin_op => Ok(TyStarlarkValue::new::().bin_op(bin_op, rhs.node)?), + }, + TyBasic::Custom(lhs) => Ok(lhs.0.bin_op_dyn(bin_op, rhs.node, self)?), + TyBasic::Set(elem) => match bin_op { TypingBinOp::In => { - if self.intersects(&Ty::basic(rhs.node.dupe()), k) { + if self.intersects(&Ty::basic(rhs.node.dupe()), elem)? { Ok(Ty::bool()) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } - bin_op => TyStarlarkValue::new::().bin_op(bin_op, rhs.node), + TypingBinOp::BitXor + | TypingBinOp::BitAnd + | TypingBinOp::Sub + | TypingBinOp::BitOr => { + if self.intersects_basic(rhs.node, &TyBasic::any_set())? { + Ok(Ty::union2( + Ty::set(elem.to_ty()), + Ty::basic(rhs.node.dupe()), + )) + } else { + Err(TypingNoContextOrInternalError::Typing) + } + } + bin_op => Ok(TyStarlarkValue::new::().bin_op(bin_op, rhs.node)?), }, - TyBasic::Custom(lhs) => lhs.0.bin_op_dyn(bin_op, rhs.node, self), - TyBasic::Name(_) => Ok(Ty::any()), } } @@ -635,31 +655,30 @@ impl<'a> TypingOracleCtx<'a> { lhs: &TyBasic, bin_op: TypingBinOp, rhs: &TyBasic, - ) -> Result { + ) -> Result { match rhs { - TyBasic::StarlarkValue(rhs) => rhs.rbin_op(bin_op, lhs), + TyBasic::StarlarkValue(rhs) => Ok(rhs.rbin_op(bin_op, lhs)?), rhs @ TyBasic::List(_) => match bin_op { TypingBinOp::Mul => { - if self.intersects_basic(lhs, &TyBasic::int()) { + if self.intersects_basic(lhs, &TyBasic::int())? { Ok(Ty::basic(rhs.clone())) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } - _ => TyStarlarkValue::new::().rbin_op(bin_op, lhs), + _ => Ok(TyStarlarkValue::new::().rbin_op(bin_op, lhs)?), }, TyBasic::Tuple(_) => match bin_op { TypingBinOp::Mul => { - if self.intersects_basic(lhs, &TyBasic::int()) { + if self.intersects_basic(lhs, &TyBasic::int())? { Ok(Ty::any_tuple()) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } - _ => TyStarlarkValue::tuple().rbin_op(bin_op, lhs), + _ => Ok(TyStarlarkValue::tuple().rbin_op(bin_op, lhs)?), }, - TyBasic::Name(..) => Ok(Ty::any()), - _ => Err(()), + _ => Err(TypingNoContextOrInternalError::Typing), } } @@ -794,66 +813,164 @@ impl<'a> TypingOracleCtx<'a> { } /// Returns false on Void, since that is definitely not a list - pub(crate) fn probably_a_list(&self, ty: &Ty) -> bool { + pub(crate) fn probably_a_list(&self, ty: &Ty) -> Result { if ty.is_never() { - return false; + return Ok(false); } self.intersects(ty, &Ty::list(Ty::any())) } /// If you get to a point where these types are being checked, might they succeed - pub(crate) fn intersects(&self, xs: &Ty, ys: &Ty) -> bool { + pub(crate) fn intersects(&self, xs: &Ty, ys: &Ty) -> Result { if xs.is_any() || xs.is_never() || ys.is_any() || ys.is_never() { - return true; + return Ok(true); } for x in xs.iter_union() { for y in ys.iter_union() { - if self.intersects_basic(x, y) { - return true; + if self.intersects_basic(x, y)? { + return Ok(true); + } + } + } + Ok(false) + } + + pub(crate) fn intersects_basic(&self, x: &TyBasic, y: &TyBasic) -> Result { + Ok(x == y || self.intersects_one_side(x, y)? || self.intersects_one_side(y, x)?) + } + + fn params_intersect(&self, x: &ParamSpec, y: &ParamSpec) -> Result { + // Fast path. + if x == y { + return Ok(true); + } + // Another fast path. + if x.is_any() || y.is_any() { + return Ok(true); + } + match ( + x.all_required_pos_only_named_only(), + y.all_required_pos_only_named_only(), + ) { + (Some((x_p, x_n)), Some((y_p, y_n))) => { + if x_p.len() != y_p.len() || x_n.len() != y_n.len() { + return Ok(false); + } + for (x, y) in x_p.iter().zip(y_p.iter()) { + if !self.intersects(x, y)? { + return Ok(false); + } + } + let y_n = SmallMap::from_iter(y_n); + for (name, x) in x_n { + if let Some(y) = y_n.get(name) { + if !self.intersects(x, y)? { + return Ok(false); + } + } else { + return Ok(false); + } } + Ok(true) + } + (Some((x_p, x_n)), None) => { + self.params_all_pos_only_named_only_intersect(&x_p, &x_n, y) + } + (None, Some((y_p, y_n))) => { + self.params_all_pos_only_named_only_intersect(&y_p, &y_n, x) + } + _ => { + // The rest is hard to check, but required pos-only in signatures + // is what we need the most. + Ok(true) } } - false } - pub(crate) fn intersects_basic(&self, x: &TyBasic, y: &TyBasic) -> bool { - x == y || self.intersects_one_side(x, y) || self.intersects_one_side(y, x) + fn params_all_pos_only_named_only_intersect( + &self, + x_p: &[&Ty], + x_n: &[(&str, &Ty)], + y: &ParamSpec, + ) -> Result { + match self.validate_args( + y, + &TyCallArgs { + pos: x_p + .iter() + .map(|ty| Spanned { + node: (*ty).dupe(), + // TODO(nga): proper span. + span: Span::default(), + }) + .collect(), + named: x_n + .iter() + .map(|(name, ty)| Spanned { + node: (*name, (*ty).dupe()), + // TODO(nga): proper span. + span: Span::default(), + }) + .collect(), + args: None, + kwargs: None, + }, + Span::default(), + ) { + Ok(()) => Ok(true), + Err(TypingOrInternalError::Internal(e)) => Err(e), + Err(TypingOrInternalError::Typing(_)) => Ok(false), + } + } + + pub(crate) fn callables_intersect( + &self, + x: &TyCallable, + y: &TyCallable, + ) -> Result { + Ok(self.params_intersect(x.params(), y.params())? + && self.intersects(x.result(), y.result())?) } /// We consider two type intersecting if either side knows if they intersect. /// This function checks the left side. - fn intersects_one_side(&self, x: &TyBasic, y: &TyBasic) -> bool { + fn intersects_one_side(&self, x: &TyBasic, y: &TyBasic) -> Result { match (x, y) { - (TyBasic::Any, _) => true, - (TyBasic::Name(x), TyBasic::Name(y)) => x == y, - (TyBasic::Name(_), TyBasic::Custom(_)) => true, - (TyBasic::Name(_), TyBasic::StarlarkValue(_)) => true, - (TyBasic::Name(x), y) => Some(x.as_str()) == y.as_name(), + (TyBasic::Any, _) => Ok(true), (TyBasic::List(x), TyBasic::List(y)) => self.intersects(x, y), - (TyBasic::List(_), TyBasic::StarlarkValue(y)) => y.is_list(), + (TyBasic::List(_), TyBasic::StarlarkValue(y)) => Ok(y.is_list()), + (TyBasic::List(_), _) => Ok(false), + (TyBasic::Set(x), TyBasic::Set(y)) => self.intersects(x, y), + (TyBasic::Set(_), TyBasic::StarlarkValue(y)) => Ok(y.is_set()), + (TyBasic::Set(_), _) => Ok(false), (TyBasic::Dict(x_k, x_v), TyBasic::Dict(y_k, y_v)) => { - self.intersects(x_k, y_k) && self.intersects(x_v, y_v) + Ok(self.intersects(x_k, y_k)? && self.intersects(x_v, y_v)?) } - (TyBasic::Dict(..), TyBasic::StarlarkValue(y)) => y.is_dict(), + (TyBasic::Dict(..), TyBasic::StarlarkValue(y)) => Ok(y.is_dict()), + (TyBasic::Dict(..), _) => Ok(false), (TyBasic::Tuple(x), TyBasic::Tuple(y)) => TyTuple::intersects(x, y, self), - (TyBasic::Tuple(_), TyBasic::StarlarkValue(y)) => y.is_tuple(), + (TyBasic::Tuple(_), TyBasic::StarlarkValue(y)) => Ok(y.is_tuple()), + (TyBasic::Tuple(_), _) => Ok(false), (TyBasic::Iter(x), TyBasic::Iter(y)) => self.intersects(x, y), (TyBasic::Iter(x), y) | (y, TyBasic::Iter(x)) => match self.iter_item_basic(y) { Ok(yy) => self.intersects(x, &yy), - Err(()) => false, + Err(TypingNoContextError) => Ok(false), }, - (TyBasic::Custom(x), y) => x.intersects_with(y), - (TyBasic::StarlarkValue(x), TyBasic::Callable) => x.is_callable(), - (TyBasic::Type, TyBasic::StarlarkValue(y)) => y.is_type(), + (TyBasic::Callable(x), TyBasic::Callable(y)) => self.callables_intersect(x, y), + (TyBasic::Callable(_), TyBasic::Custom(_)) => { + // Handled when custom is lhs + Ok(false) + } + (TyBasic::Callable(_), _) => Ok(false), + (TyBasic::Custom(x), y) => x.intersects_with(y, *self), + (TyBasic::StarlarkValue(x), TyBasic::Callable(_)) => Ok(x.is_callable()), + (TyBasic::StarlarkValue(_), _) => Ok(false), + (TyBasic::Type, TyBasic::StarlarkValue(y)) => Ok(y.is_type()), (TyBasic::Type, _) => { // TODO(nga): more precise. - true + Ok(true) } - // TODO(nga): remove this branch. - (x, y) if x.is_function() && y.is_function() => true, - // There are lots of other cases that overlap, but add them as we need them - _ => false, } } } diff --git a/starlark-rust/starlark/src/typing/oracle/traits.rs b/starlark-rust/starlark/src/typing/oracle/traits.rs index b7a99b6e9ea5c..b91fd391589ea 100644 --- a/starlark-rust/starlark/src/typing/oracle/traits.rs +++ b/starlark-rust/starlark/src/typing/oracle/traits.rs @@ -24,13 +24,13 @@ use dupe::Dupe; #[derive(Copy, Clone, Dupe, Eq, PartialEq, derive_more::Display, Debug)] pub enum TypingUnOp { /// `+`. - #[display(fmt = "+")] + #[display("+")] Plus, /// `+`. - #[display(fmt = "-")] + #[display("-")] Minus, /// `~`. - #[display(fmt = "~")] + #[display("~")] BitNot, } @@ -38,43 +38,43 @@ pub enum TypingUnOp { #[derive(Copy, Clone, Dupe, Eq, PartialEq, derive_more::Display, Debug)] pub enum TypingBinOp { /// `+`. - #[display(fmt = "+")] + #[display("+")] Add, /// `-`. - #[display(fmt = "-")] + #[display("-")] Sub, /// `/`. - #[display(fmt = "/")] + #[display("/")] Div, /// `//`. - #[display(fmt = "/")] + #[display("/")] FloorDiv, /// `*`. - #[display(fmt = "*")] + #[display("*")] Mul, /// `%`. - #[display(fmt = "%")] + #[display("%")] Percent, /// `y in x`. - #[display(fmt = "in")] + #[display("in")] In, /// `|`. - #[display(fmt = "|")] + #[display("|")] BitOr, /// `^`. - #[display(fmt = "^")] + #[display("^")] BitXor, /// `&`. - #[display(fmt = "&")] + #[display("&")] BitAnd, /// `<`. - #[display(fmt = "<")] + #[display("<")] Less, /// `<<`. - #[display(fmt = "<<")] + #[display("<<")] LeftShift, /// `>>`. - #[display(fmt = ">>")] + #[display(">>")] RightShift, } diff --git a/starlark-rust/starlark/src/typing/small_arc_vec_or_static.rs b/starlark-rust/starlark/src/typing/small_arc_vec_or_static.rs index a10ace991a3aa..9ddf93fa8ffd5 100644 --- a/starlark-rust/starlark/src/typing/small_arc_vec_or_static.rs +++ b/starlark-rust/starlark/src/typing/small_arc_vec_or_static.rs @@ -17,6 +17,7 @@ use std::hash::Hash; use std::ops::Deref; +use std::slice; use allocative::Allocative; use dupe::Dupe; @@ -97,3 +98,12 @@ impl SmallArcVec1OrStatic { } } } + +impl<'a, T> IntoIterator for &'a SmallArcVec1OrStatic { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.as_slice().iter() + } +} diff --git a/starlark-rust/starlark/src/typing/starlark_value.rs b/starlark-rust/starlark/src/typing/starlark_value.rs index 8d54b0bea61a0..c0ea3f35a578b 100644 --- a/starlark-rust/starlark/src/typing/starlark_value.rs +++ b/starlark-rust/starlark/src/typing/starlark_value.rs @@ -29,6 +29,8 @@ use dupe::Dupe; use starlark_syntax::codemap::Span; use crate::typing::error::TypingError; +use crate::typing::error::TypingNoContextError; +use crate::typing::ty::TypeRenderConfig; use crate::typing::Ty; use crate::typing::TyBasic; use crate::typing::TypingBinOp; @@ -39,8 +41,9 @@ use crate::values::dict::value::FrozenDict; use crate::values::float::StarlarkFloat; use crate::values::list::value::FrozenList; use crate::values::none::NoneType; +use crate::values::set::value::FrozenSet; use crate::values::starlark_type_id::StarlarkTypeId; -use crate::values::string::StarlarkStr; +use crate::values::string::str_type::StarlarkStr; use crate::values::traits::StarlarkValueVTable; use crate::values::traits::StarlarkValueVTableGet; use crate::values::tuple::value::Tuple; @@ -95,11 +98,7 @@ impl Debug for TyStarlarkValue { impl Display for TyStarlarkValue { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self.vtable.type_name { - "string" => write!(f, "str"), - "NoneType" => write!(f, "None"), - type_name => write!(f, "{}", type_name), - } + self.fmt_with_config(f, &TypeRenderConfig::Default) } } @@ -197,35 +196,59 @@ impl TyStarlarkValue { self == TyStarlarkValue::new::() } + #[allow(dead_code)] + pub(crate) fn is_set(self) -> bool { + self.self_check(); + self == TyStarlarkValue::new::() + } + /// Result of applying unary operator to this type. - pub(crate) fn un_op(self, un_op: TypingUnOp) -> Result { + pub(crate) fn un_op(self, un_op: TypingUnOp) -> Result { let has = match un_op { TypingUnOp::Plus => self.vtable.vtable.HAS_plus, TypingUnOp::Minus => self.vtable.vtable.HAS_minus, TypingUnOp::BitNot => self.vtable.vtable.HAS_bit_not, }; - if has { Ok(self) } else { Err(()) } + if has { + Ok(self) + } else { + Err(TypingNoContextError) + } } - pub(crate) fn bin_op(self, op: TypingBinOp, rhs: &TyBasic) -> Result { + pub(crate) fn bin_op(self, op: TypingBinOp, rhs: &TyBasic) -> Result { match (self.vtable.vtable.bin_op_ty)(op, rhs) { Some(ty) => Ok(ty), - None => Err(()), + None => Err(TypingNoContextError), } } - pub(crate) fn rbin_op(self, op: TypingBinOp, lhs: &TyBasic) -> Result { + pub(crate) fn rbin_op( + self, + op: TypingBinOp, + lhs: &TyBasic, + ) -> Result { match (self.vtable.vtable.rbin_op_ty)(lhs, op) { Some(ty) => Ok(ty), - None => Err(()), + None => Err(TypingNoContextError), } } - pub(crate) fn index(self, _index: &TyBasic) -> Result { + pub(crate) fn index(self, _index: &TyBasic) -> Result { if self.vtable.vtable.HAS_at { Ok(Ty::any()) } else { - Err(()) + Err(TypingNoContextError) + } + } + + /// If this type can be slice, return the result type of slicing. + pub(crate) fn slice(self) -> Result { + if self.vtable.vtable.HAS_slice { + // All known implementations of slice return self type. + Ok(Ty::basic(TyBasic::StarlarkValue(self))) + } else { + Err(TypingNoContextError) } } @@ -233,23 +256,23 @@ impl TyStarlarkValue { self.vtable.vtable.HAS_at } - pub(crate) fn attr_from_methods(self, name: &str) -> Result { + pub(crate) fn attr_from_methods(self, name: &str) -> Result { if let Some(methods) = (self.vtable.vtable.get_methods)() { - if let Some(method) = methods.get(name) { - return Ok(Ty::of_value(method)); + if let Some(ty) = methods.get_ty(name) { + return Ok(ty); } } - Err(()) + Err(TypingNoContextError) } - pub(crate) fn attr(self, name: &str) -> Result { + pub(crate) fn attr(self, name: &str) -> Result { if let Ok(ty) = self.attr_from_methods(name) { return Ok(ty); } if let Some(ty) = (self.vtable.vtable.attr_ty)(name) { return Ok(ty); } - Err(()) + Err(TypingNoContextError) } pub(crate) fn is_callable(self) -> bool { @@ -284,11 +307,11 @@ impl TyStarlarkValue { Self::is_type_from_vtable(&self.vtable.vtable) } - pub(crate) fn iter_item(self) -> Result { + pub(crate) fn iter_item(self) -> Result { if Self::is_iterable(&self.vtable.vtable) { Ok(Ty::any()) } else { - Err(()) + Err(TypingNoContextError) } } @@ -310,4 +333,28 @@ impl TyStarlarkValue { matcher.alloc(StarlarkTypeIdMatcher::new(self)) } } + + pub(crate) fn fmt_with_config( + &self, + f: &mut fmt::Formatter<'_>, + config: &TypeRenderConfig, + ) -> fmt::Result { + let type_name = match self.vtable.type_name { + "string" => "str", + "NoneType" => "None", + name => name, + }; + match config { + TypeRenderConfig::Default => write!(f, "{}", type_name), + TypeRenderConfig::LinkedType { ty_to_path_map } => { + if let Some(link_path) = + ty_to_path_map.get(&Ty::basic(TyBasic::StarlarkValue(self.dupe()))) + { + write!(f, "{type_name}") + } else { + write!(f, "{}", type_name) + } + } + } + } } diff --git a/starlark-rust/starlark/src/typing/structs.rs b/starlark-rust/starlark/src/typing/structs.rs index 25a0026f887bb..3e74bffc8c65f 100644 --- a/starlark-rust/starlark/src/typing/structs.rs +++ b/starlark-rust/starlark/src/typing/structs.rs @@ -25,11 +25,13 @@ use dupe::Dupe; use starlark_map::sorted_map::SortedMap; use crate::typing::custom::TyCustomImpl; +use crate::typing::error::TypingNoContextError; +use crate::typing::error::TypingNoContextOrInternalError; use crate::typing::Ty; use crate::typing::TyBasic; use crate::typing::TypingBinOp; use crate::typing::TypingOracleCtx; -use crate::values::layout::heap::profile::arc_str::ArcStr; +use crate::util::arc_str::ArcStr; use crate::values::structs::StructRef; use crate::values::typing::type_compiled::alloc::TypeMatcherAlloc; use crate::values::typing::type_compiled::matcher::TypeMatcher; @@ -60,25 +62,30 @@ impl TyCustomImpl for TyStruct { Some("struct") } - fn bin_op(&self, bin_op: TypingBinOp, rhs: &TyBasic, ctx: &TypingOracleCtx) -> Result { + fn bin_op( + &self, + bin_op: TypingBinOp, + rhs: &TyBasic, + ctx: &TypingOracleCtx, + ) -> Result { match bin_op { TypingBinOp::Less => { // TODO(nga): do not clone. - if ctx.intersects_basic(&TyBasic::custom(self.clone()), rhs) { + if ctx.intersects_basic(&TyBasic::custom(self.clone()), rhs)? { Ok(Ty::bool()) } else { - Err(()) + Err(TypingNoContextOrInternalError::Typing) } } - _ => Err(()), + _ => Err(TypingNoContextOrInternalError::Typing), } } - fn attribute(&self, attr: &str) -> Result { + fn attribute(&self, attr: &str) -> Result { match self.fields.get(attr) { Some(ty) => Ok(ty.clone()), None if self.extra => Ok(Ty::any()), - _ => Err(()), + _ => Err(TypingNoContextError), } } diff --git a/starlark-rust/starlark/src/typing/tests.rs b/starlark-rust/starlark/src/typing/tests.rs index 48298b6ad6c65..874a7a8cdaf0c 100644 --- a/starlark-rust/starlark/src/typing/tests.rs +++ b/starlark-rust/starlark/src/typing/tests.rs @@ -20,6 +20,7 @@ use std::fmt::Write; use dupe::Dupe; use starlark_derive::starlark_module; +use starlark_map::small_map::SmallMap; use starlark_syntax::golden_test_template::golden_test_template; use crate as starlark; @@ -31,25 +32,65 @@ use crate::eval::runtime::file_loader::ReturnOwnedFileLoader; use crate::eval::Evaluator; use crate::syntax::AstModule; use crate::syntax::Dialect; +use crate::tests::util::trim_rust_backtrace; +use crate::typing::callable_param::ParamIsRequired; use crate::typing::interface::Interface; use crate::typing::AstModuleTypecheck; +use crate::typing::ParamSpec; +use crate::typing::Ty; +use crate::util::ArcStr; use crate::values::none::NoneType; +use crate::values::typing::StarlarkCallable; +use crate::values::typing::StarlarkCallableParamSpec; use crate::values::typing::StarlarkIter; use crate::values::Value; use crate::values::ValueOfUnchecked; +mod call; +mod callable; +mod list; +mod special_function; +mod tuple; +mod types; + #[derive(Default)] struct TypeCheck { expect_types: Vec, loads: HashMap, } +struct NamedXy; + +impl StarlarkCallableParamSpec for NamedXy { + fn params() -> ParamSpec { + ParamSpec::new_named_only([ + (ArcStr::new_static("x"), ParamIsRequired::Yes, Ty::string()), + (ArcStr::new_static("y"), ParamIsRequired::Yes, Ty::int()), + ]) + .unwrap() + } +} + #[starlark_module] fn register_typecheck_globals(globals: &mut GlobalsBuilder) { fn accepts_iterable<'v>( #[starlark(require = pos)] xs: ValueOfUnchecked<'v, StarlarkIter>>, ) -> anyhow::Result { - let _ = xs; + let _ignore = xs; + Ok(NoneType) + } + + fn accepts_typed_kwargs( + #[starlark(kwargs)] x: SmallMap, + ) -> anyhow::Result { + let _ignore = x; + Ok(NoneType) + } + + fn accepts_callable_named_xy<'v>( + #[starlark(require = pos)] f: StarlarkCallable<'v, NamedXy, NoneType>, + ) -> anyhow::Result { + let _ignore = f; Ok(NoneType) } } @@ -82,10 +123,9 @@ impl TypeCheck { let globals = GlobalsBuilder::extended() .with(register_typecheck_globals) .build(); - // `AstModule` is not `Clone`. Parse twice. - let ast0 = AstModule::parse("filename", code.to_owned(), &Dialect::Extended).unwrap(); - let ast1 = AstModule::parse("filename", code.to_owned(), &Dialect::Extended).unwrap(); - let (errors, typemap, interface, approximations) = ast0.typecheck( + let ast = + AstModule::parse("filename", code.to_owned(), &Dialect::AllOptionsInternal).unwrap(); + let (errors, typemap, interface, approximations) = ast.clone().typecheck( &globals, &self .loads @@ -142,15 +182,16 @@ impl TypeCheck { eval.set_loader(&loader); eval.enable_static_typechecking(true); - let eval_result = eval.eval_module(ast1, &globals); - match &eval_result { - Ok(_) => writeln!(output, "No errors.").unwrap(), - Err(err) => writeln!(output, "{}", err).unwrap(), - } - + let eval_result = eval.eval_module(ast, &globals); if eval_result.is_ok() != errors.is_empty() { - writeln!(output).unwrap(); writeln!(output, "Compiler typechecker and eval results mismatch.").unwrap(); + writeln!(output).unwrap(); + } + + // Additional writes must happen above this line otherwise it might be erased by trim_rust_backtrace + match &eval_result { + Ok(_) => writeln!(output, "No errors.").unwrap(), + Err(err) => writeln!(output, "{:?}", err).unwrap(), } // Help borrow checker. @@ -159,7 +200,10 @@ impl TypeCheck { module.freeze().unwrap() }; - golden_test_template(&format!("src/typing/golden/{}.golden", test_name), &output); + golden_test_template( + &format!("src/typing/tests/golden/{}.golden", test_name), + trim_rust_backtrace(&output), + ); (interface, module) } @@ -224,31 +268,6 @@ def test(): ); } -#[test] -fn test_type_kwargs() { - TypeCheck::new().check( - "type_kwargs", - r#" -def foo(**kwargs): - pass - -def bar(): - foo(**{1: "x"}) -"#, - ); -} - -#[test] -fn test_types_of_args_kwargs() { - TypeCheck::new().ty("args").ty("kwargs").check( - "types_of_args_kwargs", - r#" -def foo(*args: str, **kwargs: int): - pass -"#, - ); -} - #[test] fn test_dot_type() { TypeCheck::new().check( @@ -273,187 +292,6 @@ def bar(): ); } -#[test] -fn test_special_function_zip() { - TypeCheck::new().ty("x").check( - "zip", - r#" -def test(): - x = zip([1,2], [True, False], ["a", "b"]) -"#, - ); -} - -#[test] -fn test_special_function_struct() { - TypeCheck::new().ty("x").check( - "struct", - r#" -def test(): - x = struct(a = 1, b = "test") -"#, - ); -} - -#[test] -fn test_call_callable() { - TypeCheck::new().check( - "call_callable", - r#" -def foo(x: typing.Callable): - x() -"#, - ); -} - -#[test] -fn test_call_not_callable() { - TypeCheck::new().check( - "call_not_callable", - r#" -def foo(x: list): - x() -"#, - ); -} - -#[test] -fn test_call_callable_or_not_callable() { - TypeCheck::new().check( - "call_callable_or_not_callable", - r#" -def foo(x: [typing.Callable, str], y: [str, typing.Callable]): - x() - y() -"#, - ); -} - -#[test] -fn test_tuple() { - TypeCheck::new().check( - "tuple", - r#" -def empty_tuple_fixed_name() -> (): return tuple() -def empty_tuple_name_fixed() -> tuple: return () -"#, - ); -} - -#[test] -fn test_tuple_ellipsis() { - TypeCheck::new().check( - "tuple_ellipsis", - r#" -def f(t: tuple[int, ...]) -> int: - return t[0] - -def g(): - # Good. - f((1, 2, 3)) - - # Bad. - f((1, "x")) -"#, - ); -} - -#[test] -fn test_test_new_syntax_without_dot_type() { - TypeCheck::new().check( - "new_syntax_without_dot_type", - r#" -def foo(x: str): pass - -def bar(): - # good - foo("test") - - # bad - foo(1) -"#, - ); -} - -#[test] -fn test_calls() { - TypeCheck::new().check( - "calls", - r#" -def f(y): pass - -def g(): - # Extra parameter. - f(1, 2) - - # Not enough parameters. - f() -"#, - ); -} - -#[test] -fn test_list_append() { - TypeCheck::new().ty("x").check( - "list_append", - r#" -def test(): - # Type of `x` should be inferred as list of either `int` or `str`. - x = [] - x.append(1) - x.append("") -"#, - ); -} - -#[test] -fn test_list_append_bug() { - // TODO(nga): fix. - TypeCheck::new().ty("x").check( - "list_append_bug", - r#" -def test(): - x = [] - x.append(x) -"#, - ); -} - -#[test] -fn test_list_function() { - TypeCheck::new().ty("x").check( - "list_function", - r#" -def test(): - x = list([1, 2]) -"#, - ); -} - -#[test] -fn test_list_less() { - TypeCheck::new().check( - "list_less", - r#" -def test(x: list[str], y: list[str]) -> bool: - return x < y -"#, - ); -} - -#[test] -fn test_list_bin_op() { - TypeCheck::new().ty("x").ty("y").ty("z").check( - "list_bin_op", - r#" -def test(a: list[str]): - x = a + a - y = a * 3 - z = 3 * a -"#, - ); -} - #[test] fn test_accepts_iterable() { TypeCheck::new().check( @@ -548,18 +386,6 @@ def test(): ); } -#[test] -fn test_int_mul_list() { - // TODO(nga): fix. - TypeCheck::new().ty("x").check( - "int_mul_list", - r#" -def test(): - x = 1 * ["a"] -"#, - ); -} - #[test] fn test_un_op() { TypeCheck::new().ty("x").ty("y").ty("z").check( @@ -593,47 +419,58 @@ def func_which_returns_union(p) -> str | int: } #[test] -fn test_type_alias() { +fn test_methods_work_for_ty_starlark_value() { TypeCheck::new().ty("x").check( - "type_alias", + "methods_work_for_ty_starlark_value", r#" -MyList = list[int] - -def f(x: MyList): - pass +def test(s: str): + x = s.startswith("a") "#, ); } #[test] -fn test_incorrect_type_dot() { +fn test_bit_or_return_int() { TypeCheck::new().check( - "incorrect_type_dot", + "bit_or_return_int", r#" -def foo(x: list.foo.bar): +test = int | 3 + +def foo() -> test: pass "#, ); } #[test] -fn test_never_call_bug() { - TypeCheck::new().ty("y").check( - "never_call_bug", +fn test_bit_or_return_list() { + TypeCheck::new().check( + "bit_or_return_list", r#" -def foo(x: typing.Never): - y = x(1) +test = int | list[3] + +def foo() -> test: + pass "#, ); } #[test] -fn test_methods_work_for_ty_starlark_value() { - TypeCheck::new().ty("x").check( - "methods_work_for_ty_starlark_value", +fn test_bit_or_with_load() { + let (interface, module) = TypeCheck::new().check( + "test_bit_or_with_load_foo", r#" -def test(s: str): - x = s.startswith("a") +def foo() -> str: + return "test" +"#, + ); + TypeCheck::new().load("foo.bzl", interface, module).check( + "test_bit_or_with_load", + r#" +load("foo.bzl", "foo") +test = int | foo() +def test() -> test: + pass "#, ); } diff --git a/starlark-rust/starlark/src/typing/tests/call.rs b/starlark-rust/starlark/src/typing/tests/call.rs new file mode 100644 index 0000000000000..728964744cb2a --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/call.rs @@ -0,0 +1,144 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Tests for functions, callables and calls. + +use crate::typing::tests::TypeCheck; + +#[test] +fn test_type_kwargs() { + TypeCheck::new().check( + "type_kwargs", + r#" +def foo(**kwargs): + pass + +def bar(): + foo(**{1: "x"}) +"#, + ); +} + +#[test] +fn test_types_of_args_kwargs() { + TypeCheck::new().ty("args").ty("kwargs").check( + "types_of_args_kwargs", + r#" +def foo(*args: str, **kwargs: int): + pass + +def test(): + # Good + foo("a") + foo(b=1) + # Bad + foo(1) + foo(c="x") +"#, + ); +} + +#[test] +fn test_kwargs_in_native_code() { + TypeCheck::new().check( + "kwargs_in_native_code", + r#" +def test(): + # Good. + accepts_typed_kwargs(x=1) + # Bad. + accepts_typed_kwargs(x=None) +"#, + ); +} + +#[test] +fn test_call_callable() { + TypeCheck::new().check( + "call_callable", + r#" +def foo(x: typing.Callable): + x() +"#, + ); +} + +#[test] +fn test_call_not_callable() { + TypeCheck::new().check( + "call_not_callable", + r#" +def foo(x: list): + x() +"#, + ); +} + +#[test] +fn test_call_callable_or_not_callable() { + TypeCheck::new().check( + "call_callable_or_not_callable", + r#" +def foo(x: [typing.Callable, str], y: [str, typing.Callable]): + x() + y() +"#, + ); +} + +#[test] +fn test_calls() { + TypeCheck::new().check( + "calls", + r#" +def f(y): pass + +def g(): + # Extra parameter. + f(1, 2) + + # Not enough parameters. + f() +"#, + ); +} + +#[test] +fn test_never_call_bug() { + TypeCheck::new().ty("y").check( + "never_call_bug", + r#" +def foo(x: typing.Never): + y = x(1) +"#, + ); +} + +#[test] +fn test_call_pos_only() { + TypeCheck::new().check( + "call_pos_only", + r#" +def f(x, /): + pass + +def test(): + f("good") + f(x="bad") +"#, + ); +} diff --git a/starlark-rust/starlark/src/typing/tests/callable.rs b/starlark-rust/starlark/src/typing/tests/callable.rs new file mode 100644 index 0000000000000..744194bb43a92 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/callable.rs @@ -0,0 +1,71 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Test for callables, but not calls. + +use crate::typing::tests::TypeCheck; + +#[test] +fn test_callable_with_args() { + TypeCheck::new().check( + "callable_with_args", + r#" +def accept_f(x: typing.Callable[[int, str], str]): + pass + +def good_function(x: int, y: str) -> str: + return "" + +def bad_function(x: int, y: bool) -> str: + return "" + +def test(): + accept_f(good_function) + accept_f(bad_function) +"#, + ); +} + +#[test] +fn test_callable_named() { + TypeCheck::new().check( + "callable_named", + r#" +def good_function_pos_or_named(x: str, y: int) -> None: + pass + +def good_function_named_only(*, x: str, y: int) -> None: + pass + +def bad_function_wrong_types(x: bool, y: list) -> None: + pass + +def bad_function_missing_params(x: str) -> None: + pass + +def bad_function_extra_params(x: str, y: int, z: int) -> None: + pass + +def test(): + accepts_callable_named_xy(good_function_pos_or_named) + accepts_callable_named_xy(good_function_named_only) + accepts_callable_named_xy(bad_function_wrong_types) + accepts_callable_named_xy(bad_function_missing_params) + accepts_callable_named_xy(bad_function_extra_params) +"#, + ); +} diff --git a/starlark-rust/starlark/src/typing/golden/accepts_iterable.golden b/starlark-rust/starlark/src/typing/tests/golden/accepts_iterable.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/accepts_iterable.golden rename to starlark-rust/starlark/src/typing/tests/golden/accepts_iterable.golden index ac3449e6b14e6..76397d15d418e 100644 --- a/starlark-rust/starlark/src/typing/golden/accepts_iterable.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/accepts_iterable.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/tests/golden/bit_or_return_int.golden b/starlark-rust/starlark/src/typing/tests/golden/bit_or_return_int.golden new file mode 100644 index 0000000000000..2feeebc3d8b55 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/bit_or_return_int.golden @@ -0,0 +1,26 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +test = int | 3 + +def foo() -> test: + pass + +No errors. + +Approximations: +Approximation: Unknown type = "Span { begin: Pos(30), end: Pos(34) }" + +Compiler typechecker (eval): +Compiler typechecker and eval results mismatch. + +error: Type `3` is not a valid type annotation + --> filename:2:8 + | +2 | test = int | 3 + | ^^^^^^^ + | diff --git a/starlark-rust/starlark/src/typing/tests/golden/bit_or_return_list.golden b/starlark-rust/starlark/src/typing/tests/golden/bit_or_return_list.golden new file mode 100644 index 0000000000000..ff9affc4e0918 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/bit_or_return_list.golden @@ -0,0 +1,26 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +test = int | list[3] + +def foo() -> test: + pass + +No errors. + +Approximations: +Approximation: Unknown type = "Span { begin: Pos(36), end: Pos(40) }" + +Compiler typechecker (eval): +Compiler typechecker and eval results mismatch. + +error: Type `3` is not a valid type annotation + --> filename:2:14 + | +2 | test = int | list[3] + | ^^^^^^^ + | diff --git a/starlark-rust/starlark/src/typing/golden/call_callable.golden b/starlark-rust/starlark/src/typing/tests/golden/call_callable.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/call_callable.golden rename to starlark-rust/starlark/src/typing/tests/golden/call_callable.golden index ba9b282278c4a..e637fa5e68411 100644 --- a/starlark-rust/starlark/src/typing/golden/call_callable.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/call_callable.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/call_callable_or_not_callable.golden b/starlark-rust/starlark/src/typing/tests/golden/call_callable_or_not_callable.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/call_callable_or_not_callable.golden rename to starlark-rust/starlark/src/typing/tests/golden/call_callable_or_not_callable.golden index 160661b785cc8..bdb928fccbe12 100644 --- a/starlark-rust/starlark/src/typing/golden/call_callable_or_not_callable.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/call_callable_or_not_callable.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/tests/golden/call_not_callable.golden b/starlark-rust/starlark/src/typing/tests/golden/call_not_callable.golden new file mode 100644 index 0000000000000..b29bd77ff557b --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/call_not_callable.golden @@ -0,0 +1,25 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def foo(x: list): + x() + +Error: +error: Call to a non-callable type `list` + --> filename:3:5 + | +3 | x() + | ^^^ + | + +Compiler typechecker (eval): +error: Call to a non-callable type `list` + --> filename:3:5 + | +3 | x() + | ^^^ + | diff --git a/starlark-rust/starlark/src/typing/tests/golden/call_pos_only.golden b/starlark-rust/starlark/src/typing/tests/golden/call_pos_only.golden new file mode 100644 index 0000000000000..027c036d2ce24 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/call_pos_only.golden @@ -0,0 +1,29 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def f(x, /): + pass + +def test(): + f("good") + f(x="bad") + +Error: +error: Unexpected parameter named `x` + --> filename:7:7 + | +7 | f(x="bad") + | ^^^^^^^ + | + +Compiler typechecker (eval): +error: Unexpected parameter named `x` + --> filename:7:7 + | +7 | f(x="bad") + | ^^^^^^^ + | diff --git a/starlark-rust/starlark/src/typing/tests/golden/callable_named.golden b/starlark-rust/starlark/src/typing/tests/golden/callable_named.golden new file mode 100644 index 0000000000000..5f4025016d4d4 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/callable_named.golden @@ -0,0 +1,60 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def good_function_pos_or_named(x: str, y: int) -> None: + pass + +def good_function_named_only(*, x: str, y: int) -> None: + pass + +def bad_function_wrong_types(x: bool, y: list) -> None: + pass + +def bad_function_missing_params(x: str) -> None: + pass + +def bad_function_extra_params(x: str, y: int, z: int) -> None: + pass + +def test(): + accepts_callable_named_xy(good_function_pos_or_named) + accepts_callable_named_xy(good_function_named_only) + accepts_callable_named_xy(bad_function_wrong_types) + accepts_callable_named_xy(bad_function_missing_params) + accepts_callable_named_xy(bad_function_extra_params) + +Error: +error: Expected type `typing.Callable["*, x: str, y: int", None]` but got `def(x: bool, y: list) -> None` + --> filename:20:31 + | +20 | accepts_callable_named_xy(bad_function_wrong_types) + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + +Error: +error: Expected type `typing.Callable["*, x: str, y: int", None]` but got `def(x: str) -> None` + --> filename:21:31 + | +21 | accepts_callable_named_xy(bad_function_missing_params) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + +Error: +error: Expected type `typing.Callable["*, x: str, y: int", None]` but got `def(x: str, y: int, z: int) -> None` + --> filename:22:31 + | +22 | accepts_callable_named_xy(bad_function_extra_params) + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + +Compiler typechecker (eval): +error: Expected type `typing.Callable["*, x: str, y: int", None]` but got `def(x: bool, y: list) -> None` + --> filename:20:31 + | +20 | accepts_callable_named_xy(bad_function_wrong_types) + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/typing/tests/golden/callable_with_args.golden b/starlark-rust/starlark/src/typing/tests/golden/callable_with_args.golden new file mode 100644 index 0000000000000..7a752404a061b --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/callable_with_args.golden @@ -0,0 +1,35 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def accept_f(x: typing.Callable[[int, str], str]): + pass + +def good_function(x: int, y: str) -> str: + return "" + +def bad_function(x: int, y: bool) -> str: + return "" + +def test(): + accept_f(good_function) + accept_f(bad_function) + +Error: +error: Expected type `typing.Callable[[int, str], str]` but got `def(x: int, y: bool) -> str` + --> filename:13:14 + | +13 | accept_f(bad_function) + | ^^^^^^^^^^^^ + | + +Compiler typechecker (eval): +error: Expected type `typing.Callable[[int, str], str]` but got `def(x: int, y: bool) -> str` + --> filename:13:14 + | +13 | accept_f(bad_function) + | ^^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/typing/golden/calls.golden b/starlark-rust/starlark/src/typing/tests/golden/calls.golden similarity index 97% rename from starlark-rust/starlark/src/typing/golden/calls.golden rename to starlark-rust/starlark/src/typing/tests/golden/calls.golden index 54a5c0263206a..15e5abdc680e0 100644 --- a/starlark-rust/starlark/src/typing/golden/calls.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/calls.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/dict_bug.golden b/starlark-rust/starlark/src/typing/tests/golden/dict_bug.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/dict_bug.golden rename to starlark-rust/starlark/src/typing/tests/golden/dict_bug.golden index 52bfe5a683214..83eeff9779913 100644 --- a/starlark-rust/starlark/src/typing/golden/dict_bug.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/dict_bug.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/dict_never_key.golden b/starlark-rust/starlark/src/typing/tests/golden/dict_never_key.golden similarity index 96% rename from starlark-rust/starlark/src/typing/golden/dict_never_key.golden rename to starlark-rust/starlark/src/typing/tests/golden/dict_never_key.golden index 61cad682bc919..4e660c0fd99ef 100644 --- a/starlark-rust/starlark/src/typing/golden/dict_never_key.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/dict_never_key.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/dot_type_0.golden b/starlark-rust/starlark/src/typing/tests/golden/dot_type_0.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/dot_type_0.golden rename to starlark-rust/starlark/src/typing/tests/golden/dot_type_0.golden index 7a189380c1e64..553df6b3c45fd 100644 --- a/starlark-rust/starlark/src/typing/golden/dot_type_0.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/dot_type_0.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/dot_type_1.golden b/starlark-rust/starlark/src/typing/tests/golden/dot_type_1.golden similarity index 89% rename from starlark-rust/starlark/src/typing/golden/dot_type_1.golden rename to starlark-rust/starlark/src/typing/tests/golden/dot_type_1.golden index 41c310e44ed82..bceb43f505354 100644 --- a/starlark-rust/starlark/src/typing/golden/dot_type_1.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/dot_type_1.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: @@ -20,7 +20,7 @@ error: Expected type `str` but got `list[typing.Never]` | Error: -error: Expected type `list[typing.Any]` but got `bool` +error: Expected type `list` but got `bool` --> filename:6:9 | 6 | foo(True) diff --git a/starlark-rust/starlark/src/typing/golden/failure.golden b/starlark-rust/starlark/src/typing/tests/golden/failure.golden similarity index 96% rename from starlark-rust/starlark/src/typing/golden/failure.golden rename to starlark-rust/starlark/src/typing/tests/golden/failure.golden index 53acf93c1d534..888ab9d2a8e64 100644 --- a/starlark-rust/starlark/src/typing/golden/failure.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/failure.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/false_negative.golden b/starlark-rust/starlark/src/typing/tests/golden/false_negative.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/false_negative.golden rename to starlark-rust/starlark/src/typing/tests/golden/false_negative.golden index 6106cff7c4eff..712545d5abcd9 100644 --- a/starlark-rust/starlark/src/typing/golden/false_negative.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/false_negative.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/tests/golden/function_as_type_bit_or.golden b/starlark-rust/starlark/src/typing/tests/golden/function_as_type_bit_or.golden new file mode 100644 index 0000000000000..65714bb082dab --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/function_as_type_bit_or.golden @@ -0,0 +1,18 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def test(): + # This test should work even if `t` is global. There's a bug in test framework somewhere. + t = int | str + +No errors. + +Types: +t: type + +Compiler typechecker (eval): +No errors. diff --git a/starlark-rust/starlark/src/typing/golden/incorrect_type_dot.golden b/starlark-rust/starlark/src/typing/tests/golden/incorrect_type_dot.golden similarity index 83% rename from starlark-rust/starlark/src/typing/golden/incorrect_type_dot.golden rename to starlark-rust/starlark/src/typing/tests/golden/incorrect_type_dot.golden index d6a2d0950457b..be2293a36b3bf 100644 --- a/starlark-rust/starlark/src/typing/golden/incorrect_type_dot.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/incorrect_type_dot.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: @@ -16,6 +16,9 @@ error: Operation `.foo` not supported on type `function` | ^^^^^^^^ | +Approximations: +Approximation: Unknown type = "Span { begin: Pos(12), end: Pos(24) }" + Compiler typechecker (eval): error: Operation `.foo` not supported on type `function` --> filename:2:17 diff --git a/starlark-rust/starlark/src/typing/golden/int_bitor_float.golden b/starlark-rust/starlark/src/typing/tests/golden/int_bitor_float.golden similarity index 97% rename from starlark-rust/starlark/src/typing/golden/int_bitor_float.golden rename to starlark-rust/starlark/src/typing/tests/golden/int_bitor_float.golden index 54e3f0f6c6848..9d7e483a428e9 100644 --- a/starlark-rust/starlark/src/typing/golden/int_bitor_float.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/int_bitor_float.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/int_mul_list.golden b/starlark-rust/starlark/src/typing/tests/golden/int_mul_list.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/int_mul_list.golden rename to starlark-rust/starlark/src/typing/tests/golden/int_mul_list.golden index 2259976b2902e..47fac745526bd 100644 --- a/starlark-rust/starlark/src/typing/golden/int_mul_list.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/int_mul_list.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/int_plus_float.golden b/starlark-rust/starlark/src/typing/tests/golden/int_plus_float.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/int_plus_float.golden rename to starlark-rust/starlark/src/typing/tests/golden/int_plus_float.golden index 24c7daa7230fa..7af0e85021313 100644 --- a/starlark-rust/starlark/src/typing/golden/int_plus_float.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/int_plus_float.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/tests/golden/kwargs_in_native_code.golden b/starlark-rust/starlark/src/typing/tests/golden/kwargs_in_native_code.golden new file mode 100644 index 0000000000000..05cb4ab9ce1de --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/kwargs_in_native_code.golden @@ -0,0 +1,28 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def test(): + # Good. + accepts_typed_kwargs(x=1) + # Bad. + accepts_typed_kwargs(x=None) + +Error: +error: Expected type `int` but got `None` + --> filename:6:26 + | +6 | accepts_typed_kwargs(x=None) + | ^^^^^^ + | + +Compiler typechecker (eval): +error: Expected type `int` but got `None` + --> filename:6:26 + | +6 | accepts_typed_kwargs(x=None) + | ^^^^^^ + | diff --git a/starlark-rust/starlark/src/typing/golden/list_append.golden b/starlark-rust/starlark/src/typing/tests/golden/list_append.golden similarity index 96% rename from starlark-rust/starlark/src/typing/golden/list_append.golden rename to starlark-rust/starlark/src/typing/tests/golden/list_append.golden index aabe62fd5b668..d2f5cb52034a7 100644 --- a/starlark-rust/starlark/src/typing/golden/list_append.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/list_append.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/list_append_bug.golden b/starlark-rust/starlark/src/typing/tests/golden/list_append_bug.golden similarity index 98% rename from starlark-rust/starlark/src/typing/golden/list_append_bug.golden rename to starlark-rust/starlark/src/typing/tests/golden/list_append_bug.golden index 4d6aafc07f285..04c9edb45091a 100644 --- a/starlark-rust/starlark/src/typing/golden/list_append_bug.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/list_append_bug.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/list_bin_op.golden b/starlark-rust/starlark/src/typing/tests/golden/list_bin_op.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/list_bin_op.golden rename to starlark-rust/starlark/src/typing/tests/golden/list_bin_op.golden index eeb3710a89cec..4b3c236b2be5c 100644 --- a/starlark-rust/starlark/src/typing/golden/list_bin_op.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/list_bin_op.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/list_function.golden b/starlark-rust/starlark/src/typing/tests/golden/list_function.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/list_function.golden rename to starlark-rust/starlark/src/typing/tests/golden/list_function.golden index dc432d450d859..9a9be6693f49d 100644 --- a/starlark-rust/starlark/src/typing/golden/list_function.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/list_function.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/list_less.golden b/starlark-rust/starlark/src/typing/tests/golden/list_less.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/list_less.golden rename to starlark-rust/starlark/src/typing/tests/golden/list_less.golden index 2ba2e66544f49..59e4ef797c1ca 100644 --- a/starlark-rust/starlark/src/typing/golden/list_less.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/list_less.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/load_0.golden b/starlark-rust/starlark/src/typing/tests/golden/load_0.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/load_0.golden rename to starlark-rust/starlark/src/typing/tests/golden/load_0.golden index b2e388b7ecf62..f524a5268e815 100644 --- a/starlark-rust/starlark/src/typing/golden/load_0.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/load_0.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/load_1.golden b/starlark-rust/starlark/src/typing/tests/golden/load_1.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/load_1.golden rename to starlark-rust/starlark/src/typing/tests/golden/load_1.golden index d13fada6537b1..d715089c4a946 100644 --- a/starlark-rust/starlark/src/typing/golden/load_1.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/load_1.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/methods_work_for_ty_starlark_value.golden b/starlark-rust/starlark/src/typing/tests/golden/methods_work_for_ty_starlark_value.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/methods_work_for_ty_starlark_value.golden rename to starlark-rust/starlark/src/typing/tests/golden/methods_work_for_ty_starlark_value.golden index d42a17afa6919..c5e9ec4e36da9 100644 --- a/starlark-rust/starlark/src/typing/golden/methods_work_for_ty_starlark_value.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/methods_work_for_ty_starlark_value.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/never_call_bug.golden b/starlark-rust/starlark/src/typing/tests/golden/never_call_bug.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/never_call_bug.golden rename to starlark-rust/starlark/src/typing/tests/golden/never_call_bug.golden index e328fdee0d779..6378624055338 100644 --- a/starlark-rust/starlark/src/typing/golden/never_call_bug.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/never_call_bug.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/new_list_dict_syntax.golden b/starlark-rust/starlark/src/typing/tests/golden/new_list_dict_syntax.golden similarity index 96% rename from starlark-rust/starlark/src/typing/golden/new_list_dict_syntax.golden rename to starlark-rust/starlark/src/typing/tests/golden/new_list_dict_syntax.golden index c09e3bd8b226f..e05a4c8ce9a04 100644 --- a/starlark-rust/starlark/src/typing/golden/new_list_dict_syntax.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/new_list_dict_syntax.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/new_list_dict_syntax_as_value.golden b/starlark-rust/starlark/src/typing/tests/golden/new_list_dict_syntax_as_value.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/new_list_dict_syntax_as_value.golden rename to starlark-rust/starlark/src/typing/tests/golden/new_list_dict_syntax_as_value.golden index b1effbc1b4326..6ca4e849d1ce1 100644 --- a/starlark-rust/starlark/src/typing/golden/new_list_dict_syntax_as_value.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/new_list_dict_syntax_as_value.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/struct.golden b/starlark-rust/starlark/src/typing/tests/golden/struct.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/struct.golden rename to starlark-rust/starlark/src/typing/tests/golden/struct.golden index 488f2ce85a9ff..1edf10e9037a7 100644 --- a/starlark-rust/starlark/src/typing/golden/struct.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/struct.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/success.golden b/starlark-rust/starlark/src/typing/tests/golden/success.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/success.golden rename to starlark-rust/starlark/src/typing/tests/golden/success.golden index 68c566d290047..e532cfc273975 100644 --- a/starlark-rust/starlark/src/typing/golden/success.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/success.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/tests/golden/test_bit_or_with_load.golden b/starlark-rust/starlark/src/typing/tests/golden/test_bit_or_with_load.golden new file mode 100644 index 0000000000000..7eeba04aa61c9 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/test_bit_or_with_load.golden @@ -0,0 +1,26 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +load("foo.bzl", "foo") +test = int | foo() +def test() -> test: + pass + +No errors. + +Approximations: +Approximation: Unknown type = "Span { begin: Pos(57), end: Pos(61) }" + +Compiler typechecker (eval): +Compiler typechecker and eval results mismatch. + +error: String literals are not allowed in type expressions: `"test"` + --> filename:3:8 + | +3 | test = int | foo() + | ^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/typing/tests/golden/test_bit_or_with_load_foo.golden b/starlark-rust/starlark/src/typing/tests/golden/test_bit_or_with_load_foo.golden new file mode 100644 index 0000000000000..19c6cddf62d57 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/test_bit_or_with_load_foo.golden @@ -0,0 +1,14 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def foo() -> str: + return "test" + +No errors. + +Compiler typechecker (eval): +No errors. diff --git a/starlark-rust/starlark/src/typing/golden/tuple.golden b/starlark-rust/starlark/src/typing/tests/golden/tuple.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/tuple.golden rename to starlark-rust/starlark/src/typing/tests/golden/tuple.golden index 2ef4e9326d454..adbd00af7e9a0 100644 --- a/starlark-rust/starlark/src/typing/golden/tuple.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/tuple.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/tuple_ellipsis.golden b/starlark-rust/starlark/src/typing/tests/golden/tuple_ellipsis.golden similarity index 97% rename from starlark-rust/starlark/src/typing/golden/tuple_ellipsis.golden rename to starlark-rust/starlark/src/typing/tests/golden/tuple_ellipsis.golden index d53e5b61d2a8f..144c5654bc14f 100644 --- a/starlark-rust/starlark/src/typing/golden/tuple_ellipsis.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/tuple_ellipsis.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/type_alias.golden b/starlark-rust/starlark/src/typing/tests/golden/type_alias.golden similarity index 94% rename from starlark-rust/starlark/src/typing/golden/type_alias.golden rename to starlark-rust/starlark/src/typing/tests/golden/type_alias.golden index a1c5ffd249cd2..ae30c620dea26 100644 --- a/starlark-rust/starlark/src/typing/golden/type_alias.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/type_alias.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/type_kwargs.golden b/starlark-rust/starlark/src/typing/tests/golden/type_kwargs.golden similarity index 97% rename from starlark-rust/starlark/src/typing/golden/type_kwargs.golden rename to starlark-rust/starlark/src/typing/tests/golden/type_kwargs.golden index 2431837471423..04f23d01ace6d 100644 --- a/starlark-rust/starlark/src/typing/golden/type_kwargs.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/type_kwargs.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/tests/golden/types_of_args_kwargs.golden b/starlark-rust/starlark/src/typing/tests/golden/types_of_args_kwargs.golden new file mode 100644 index 0000000000000..81353ccb1f33d --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/golden/types_of_args_kwargs.golden @@ -0,0 +1,45 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Code: +def foo(*args: str, **kwargs: int): + pass + +def test(): + # Good + foo("a") + foo(b=1) + # Bad + foo(1) + foo(c="x") + +Error: +error: Expected type `str` but got `int` + --> filename:10:9 + | +10 | foo(1) + | ^ + | + +Error: +error: Expected type `int` but got `str` + --> filename:11:9 + | +11 | foo(c="x") + | ^^^^^ + | + +Types: +args: tuple[str, ...] +kwargs: dict[str, int] + +Compiler typechecker (eval): +error: Expected type `str` but got `int` + --> filename:10:9 + | +10 | foo(1) + | ^ + | diff --git a/starlark-rust/starlark/src/typing/golden/un_op.golden b/starlark-rust/starlark/src/typing/tests/golden/un_op.golden similarity index 97% rename from starlark-rust/starlark/src/typing/golden/un_op.golden rename to starlark-rust/starlark/src/typing/tests/golden/un_op.golden index 78e0442b06460..53bd35066e13f 100644 --- a/starlark-rust/starlark/src/typing/golden/un_op.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/un_op.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/union.golden b/starlark-rust/starlark/src/typing/tests/golden/union.golden similarity index 97% rename from starlark-rust/starlark/src/typing/golden/union.golden rename to starlark-rust/starlark/src/typing/tests/golden/union.golden index 5b99a997b9524..4d3e78d726006 100644 --- a/starlark-rust/starlark/src/typing/golden/union.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/union.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/golden/zip.golden b/starlark-rust/starlark/src/typing/tests/golden/zip.golden similarity index 95% rename from starlark-rust/starlark/src/typing/golden/zip.golden rename to starlark-rust/starlark/src/typing/tests/golden/zip.golden index d801118bc3ef5..96eb64f6b9dae 100644 --- a/starlark-rust/starlark/src/typing/golden/zip.golden +++ b/starlark-rust/starlark/src/typing/tests/golden/zip.golden @@ -1,7 +1,7 @@ # @generated # To regenerate, run: # ``` -# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib tests +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib # ``` Code: diff --git a/starlark-rust/starlark/src/typing/tests/list.rs b/starlark-rust/starlark/src/typing/tests/list.rs new file mode 100644 index 0000000000000..0356506746550 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/list.rs @@ -0,0 +1,92 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::typing::tests::TypeCheck; + +#[test] +fn test_int_mul_list() { + // TODO(nga): fix. + TypeCheck::new().ty("x").check( + "int_mul_list", + r#" +def test(): + x = 1 * ["a"] +"#, + ); +} + +#[test] +fn test_list_append() { + TypeCheck::new().ty("x").check( + "list_append", + r#" +def test(): + # Type of `x` should be inferred as list of either `int` or `str`. + x = [] + x.append(1) + x.append("") +"#, + ); +} + +#[test] +fn test_list_append_bug() { + // TODO(nga): fix. + TypeCheck::new().ty("x").check( + "list_append_bug", + r#" +def test(): + x = [] + x.append(x) +"#, + ); +} + +#[test] +fn test_list_function() { + TypeCheck::new().ty("x").check( + "list_function", + r#" +def test(): + x = list([1, 2]) +"#, + ); +} + +#[test] +fn test_list_less() { + TypeCheck::new().check( + "list_less", + r#" +def test(x: list[str], y: list[str]) -> bool: + return x < y +"#, + ); +} + +#[test] +fn test_list_bin_op() { + TypeCheck::new().ty("x").ty("y").ty("z").check( + "list_bin_op", + r#" +def test(a: list[str]): + x = a + a + y = a * 3 + z = 3 * a +"#, + ); +} diff --git a/starlark-rust/starlark/src/typing/tests/special_function.rs b/starlark-rust/starlark/src/typing/tests/special_function.rs new file mode 100644 index 0000000000000..6e477be0039ae --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/special_function.rs @@ -0,0 +1,40 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::typing::tests::TypeCheck; + +#[test] +fn test_special_function_struct() { + TypeCheck::new().ty("x").check( + "struct", + r#" +def test(): + x = struct(a = 1, b = "test") +"#, + ); +} + +#[test] +fn test_special_function_zip() { + TypeCheck::new().ty("x").check( + "zip", + r#" +def test(): + x = zip([1,2], [True, False], ["a", "b"]) +"#, + ); +} diff --git a/starlark-rust/starlark/src/typing/tests/tuple.rs b/starlark-rust/starlark/src/typing/tests/tuple.rs new file mode 100644 index 0000000000000..149ed4f540f99 --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/tuple.rs @@ -0,0 +1,47 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::typing::tests::TypeCheck; + +#[test] +fn test_tuple() { + TypeCheck::new().check( + "tuple", + r#" +def empty_tuple_fixed_name() -> (): return tuple() +def empty_tuple_name_fixed() -> tuple: return () +"#, + ); +} + +#[test] +fn test_tuple_ellipsis() { + TypeCheck::new().check( + "tuple_ellipsis", + r#" +def f(t: tuple[int, ...]) -> int: + return t[0] + +def g(): + # Good. + f((1, 2, 3)) + + # Bad. + f((1, "x")) +"#, + ); +} diff --git a/starlark-rust/starlark/src/typing/tests/types.rs b/starlark-rust/starlark/src/typing/tests/types.rs new file mode 100644 index 0000000000000..5f10de8891d4c --- /dev/null +++ b/starlark-rust/starlark/src/typing/tests/types.rs @@ -0,0 +1,56 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Type-related operations. + +use crate::typing::tests::TypeCheck; + +#[test] +fn test_type_alias() { + TypeCheck::new().ty("x").check( + "type_alias", + r#" +MyList = list[int] + +def f(x: MyList): + pass +"#, + ); +} + +#[test] +fn test_incorrect_type_dot() { + TypeCheck::new().check( + "incorrect_type_dot", + r#" +def foo(x: list.foo.bar): + pass +"#, + ); +} + +#[test] +fn test_function_as_type_bit_or() { + TypeCheck::new().ty("t").check( + "function_as_type_bit_or", + r#" +def test(): + # This test should work even if `t` is global. There's a bug in test framework somewhere. + t = int | str +"#, + ); +} diff --git a/starlark-rust/starlark/src/typing/tuple.rs b/starlark-rust/starlark/src/typing/tuple.rs index 75ad0ac26affc..9dc6b6cb77fa2 100644 --- a/starlark-rust/starlark/src/typing/tuple.rs +++ b/starlark-rust/starlark/src/typing/tuple.rs @@ -18,14 +18,15 @@ use std::fmt; use std::fmt::Display; use std::fmt::Formatter; -use std::iter; use std::sync::Arc; use allocative::Allocative; use dupe::Dupe; use crate::typing::arc_ty::ArcTy; +use crate::typing::error::InternalError; use crate::typing::starlark_value::TyStarlarkValue; +use crate::typing::ty::TypeRenderConfig; use crate::typing::Ty; use crate::typing::TypingOracleCtx; use crate::values::typing::type_compiled::alloc::TypeMatcherAlloc; @@ -46,11 +47,6 @@ pub enum TyTuple { } impl TyTuple { - /// `tuple`. - pub(crate) fn any() -> TyTuple { - TyTuple::Of(ArcTy::any()) - } - pub(crate) fn get(&self, i: usize) -> Option<&Ty> { match self { TyTuple::Elems(elems) => elems.get(i), @@ -65,17 +61,30 @@ impl TyTuple { } } - pub(crate) fn intersects(this: &TyTuple, other: &TyTuple, ctx: &TypingOracleCtx) -> bool { + pub(crate) fn intersects( + this: &TyTuple, + other: &TyTuple, + ctx: &TypingOracleCtx, + ) -> Result { match (this, other) { - (TyTuple::Elems(this), TyTuple::Elems(other)) => { - this.len() == other.len() - && iter::zip(&**this, &**other).all(|(x, y)| ctx.intersects(x, y)) - } + (TyTuple::Elems(this), TyTuple::Elems(other)) => Ok(this.len() == other.len() && { + for (x, y) in this.iter().zip(other.iter()) { + if !ctx.intersects(x, y)? { + return Ok(false); + } + } + true + }), (TyTuple::Of(this), TyTuple::Of(other)) => ctx.intersects(this, other), (TyTuple::Elems(elems), TyTuple::Of(item)) | (TyTuple::Of(item), TyTuple::Elems(elems)) => { // For example `tuple[str, int]` does not intersect with `tuple[str, ...]`. - elems.iter().all(|x| ctx.intersects(x, item)) + for x in elems.iter() { + if !ctx.intersects(x, item)? { + return Ok(false); + } + } + Ok(true) } } } @@ -120,6 +129,31 @@ impl TyTuple { } } } + + pub(crate) fn fmt_with_config( + &self, + f: &mut Formatter<'_>, + config: &TypeRenderConfig, + ) -> fmt::Result { + match self { + TyTuple::Elems(elems) => match &**elems { + [x] => write!(f, "({},)", x.display_with(config)), + xs => display_container::fmt_container( + f, + "(", + ")", + xs.iter().map(|x| x.display_with(config)), + ), + }, + TyTuple::Of(item) => { + if item.is_any() { + write!(f, "tuple") + } else { + write!(f, "tuple[{}, ...]", item.display_with(config)) + } + } + } + } } impl Display for TyTuple { diff --git a/starlark-rust/starlark/src/typing/ty.rs b/starlark-rust/starlark/src/typing/ty.rs index 1f7faa40aa354..95ae41d3efe1a 100644 --- a/starlark-rust/starlark/src/typing/ty.rs +++ b/starlark-rust/starlark/src/typing/ty.rs @@ -15,6 +15,7 @@ * limitations under the License. */ +use std::collections::HashMap; use std::fmt; use std::fmt::Debug; use std::fmt::Display; @@ -24,22 +25,21 @@ use allocative::Allocative; use dupe::Dupe; use dupe::IterDupedExt; use either::Either; -use serde::Serialize; -use serde::Serializer; use starlark_derive::Trace; -use starlark_syntax::syntax::type_expr::type_str_literal_is_wildcard; +use starlark_syntax::codemap::CodeMap; +use starlark_syntax::codemap::Span; +use starlark_syntax::codemap::Spanned; use crate as starlark; -use crate::docs::DocFunction; -use crate::docs::DocMember; -use crate::docs::DocParam; +use crate::__derive_refs::components::NativeCallableComponents; use crate::eval::compiler::small_vec_1::SmallVec1; use crate::typing::arc_ty::ArcTy; use crate::typing::basic::TyBasic; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; use crate::typing::custom::TyCustom; use crate::typing::custom::TyCustomImpl; -use crate::typing::function::Param; -use crate::typing::function::ParamMode; +use crate::typing::error::TypingNoContextError; use crate::typing::function::TyCustomFunction; use crate::typing::function::TyCustomFunctionImpl; use crate::typing::function::TyFunction; @@ -47,8 +47,9 @@ use crate::typing::small_arc_vec::SmallArcVec1; use crate::typing::starlark_value::TyStarlarkValue; use crate::typing::structs::TyStruct; use crate::typing::tuple::TyTuple; +use crate::typing::ParamSpec; +use crate::typing::TypingOracleCtx; use crate::values::bool::StarlarkBool; -use crate::values::layout::heap::profile::arc_str::ArcStr; use crate::values::typing::never::TypingNever; use crate::values::StarlarkValue; use crate::values::Value; @@ -103,43 +104,6 @@ pub struct Ty { alternatives: SmallArcVec1, } -impl Serialize for Ty { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - // Arbitrary custom types are not deserializable, so serialization to string is enough. - serializer.serialize_str(&self.to_string()) - } -} - -/// The name of an atomic type. -#[derive(Debug, Clone, Dupe, PartialEq, Eq, Hash, PartialOrd, Ord, Allocative)] -pub struct TyName(ArcStr); - -impl Display for TyName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "\"{}\"", self.0.as_str()) - } -} - -impl PartialEq for TyName { - fn eq(&self, other: &str) -> bool { - self.as_str() == other - } -} - -impl TyName { - pub(crate) fn new(s: impl Into) -> TyName { - TyName(s.into()) - } - - /// Get the underlying `str` for a `TyName`. - pub fn as_str(&self) -> &str { - &self.0 - } -} - fn merge_adjacent(xs: Vec, f: impl Fn(T, T) -> Either) -> SmallVec1 { let mut res = SmallVec1::new(); let mut last = None; @@ -167,32 +131,6 @@ impl Ty { Ty::any() } - fn try_name_special(name: &str) -> Option { - match name { - name if type_str_literal_is_wildcard(name) => Some(Self::any()), - "list" => Some(Self::list(Ty::any())), - "dict" => Some(Self::dict(Ty::any(), Ty::any())), - "function" => Some(Self::any_function()), - "struct" => Some(Self::custom(TyStruct::any())), - "never" => Some(Self::never()), - "NoneType" => Some(Self::none()), - "bool" => Some(Self::bool()), - "int" => Some(Self::int()), - "float" => Some(Self::float()), - "string" => Some(Self::string()), - "tuple" => Some(Self::any_tuple()), - _ => None, - } - } - - pub(crate) fn name(name: &str) -> Self { - if let Some(x) = Self::try_name_special(name) { - x - } else { - Ty::basic(TyBasic::Name(TyName::new(name))) - } - } - /// Turn a type back into a name, potentially erasing some structure. /// E.g. the type `[bool]` would return `list`. /// Types like [`Ty::any`] will return `None`. @@ -222,7 +160,8 @@ impl Ty { Ty::basic(TyBasic::Any) } - pub(crate) const fn never() -> Self { + /// Never type: can hold no value. + pub const fn never() -> Self { Ty { alternatives: SmallArcVec1::empty(), } @@ -276,6 +215,15 @@ impl Ty { Self::dict(Ty::any(), Ty::any()) } + /// Create a set type. + pub fn set(item: Ty) -> Self { + Ty::basic(TyBasic::set(item)) + } + + pub(crate) fn any_set() -> Self { + Self::set(Ty::any()) + } + /// Create a tuple of two elements pub fn tuple2(a: Ty, b: Ty) -> Self { Ty::tuple(vec![a, b]) @@ -288,31 +236,38 @@ impl Ty { /// Tuple where elements are unknown. pub(crate) fn any_tuple() -> Self { - Ty::basic(TyBasic::Tuple(TyTuple::any())) + Self::tuple_of(Ty::any()) + } + + pub(crate) fn tuple_of(item: Ty) -> Self { + Ty::basic(TyBasic::Tuple(TyTuple::Of(ArcTy::new(item)))) } /// Create a function type. - pub fn function(params: Vec, result: Ty) -> Self { + pub fn function(params: ParamSpec, result: Ty) -> Self { Self::ty_function(TyFunction::new(params, result)) } + /// Create a function type. + pub fn callable(params: ParamSpec, result: Ty) -> Self { + Ty::basic(TyBasic::Callable(TyCallable::new(params, result))) + } + /// Create a function type. pub fn ty_function(f: TyFunction) -> Self { Self::custom(TyCustomFunction(f)) } /// Create a function, where the first argument is the result of `.type`. - pub fn ctor_function(type_attr: &Ty, params: Vec, result: Ty) -> Self { + pub fn ctor_function(type_attr: Ty, params: ParamSpec, result: Ty) -> Self { Self::custom(TyCustomFunction(TyFunction::new_with_type_attr( - params, - result, - type_attr.clone(), + params, result, type_attr, ))) } /// Function type that accepts any arguments and returns any result. - pub(crate) fn any_function() -> Self { - Ty::basic(TyBasic::Callable) + pub(crate) fn any_callable() -> Self { + Ty::basic(TyBasic::Callable(TyCallable::any())) } pub(crate) fn any_struct() -> Self { @@ -416,8 +371,8 @@ impl Ty { /// If at least one was successful, return the union of all successful results. pub(crate) fn typecheck_union_simple( &self, - typecheck: impl Fn(&TyBasic) -> Result, - ) -> Result { + typecheck: impl Fn(&TyBasic) -> Result, + ) -> Result { if self.is_any() || self.is_never() { Ok(self.dupe()) } else { @@ -429,11 +384,11 @@ impl Ty { for basic in xs { match typecheck(basic) { Ok(ty) => good.push(ty), - Err(()) => {} + Err(TypingNoContextError) => {} } } if good.is_empty() { - Err(()) + Err(TypingNoContextError) } else { Ok(Ty::unions(good)) } @@ -470,79 +425,136 @@ impl Ty { Ty::custom(TyCustomFunction(f)) } - pub(crate) fn from_docs_member(member: &DocMember) -> Self { - match member { - DocMember::Property(x) => x.typ.clone(), - DocMember::Function(x) => Self::from_docs_function(x), - } - } - /// Typechecker type of value. pub fn of_value(value: Value) -> Ty { if let Some(t) = value.get_ref().typechecker_ty() { t } else { - Ty::from_docs_member(&DocMember::from_value(value)) + value.get_type_starlark_repr() } } - pub(crate) fn from_docs_function(function: &DocFunction) -> Self { - let mut params = Vec::with_capacity(function.params.len()); - let mut seen_no_args = false; - for p in &function.params { - match p { - DocParam::Arg { - name, - typ, - default_value, - .. - } => { - let mut r = if seen_no_args { - Param::name_only(name, typ.clone()) - } else { - Param::pos_or_name(name, typ.clone()) - }; - if default_value.is_some() { - r = r.optional(); - } - params.push(r); - } - DocParam::OnlyPosBefore => { - for x in params.iter_mut() { - if matches!(x.mode, ParamMode::PosOrName(_)) { - x.mode = ParamMode::PosOnly; - } - } - } - DocParam::NoArgs => seen_no_args = true, - DocParam::Args { typ, .. } => { - seen_no_args = true; - params.push(Param::args(typ.clone())) - } - DocParam::Kwargs { typ, .. } => params.push(Param::kwargs(typ.clone())), - } + /// Check if the value of this type can be called with given arguments and expected return type. + #[must_use] + pub(crate) fn check_call<'a>( + &self, + pos: impl IntoIterator, + named: impl IntoIterator, + args: Option, + kwargs: Option, + expected_return_type: Ty, + ) -> bool { + let oracle = TypingOracleCtx { + codemap: CodeMap::empty_static(), + }; + let Ok(ret) = oracle.validate_call( + Span::default(), + self, + &TyCallArgs { + pos: pos + .into_iter() + .map(|p| Spanned { + span: Span::default(), + node: p, + }) + .collect(), + named: named + .into_iter() + .map(|p| Spanned { + span: Span::default(), + node: p, + }) + .collect(), + args: args.map(|t| Spanned { + span: Span::default(), + node: t, + }), + kwargs: kwargs.map(|t| Spanned { + span: Span::default(), + node: t, + }), + }, + ) else { + return false; + }; + let Ok(ok) = oracle.intersects(&ret, &expected_return_type) else { + return false; + }; + ok + } + + pub(crate) fn check_intersects(&self, other: &Ty) -> crate::Result { + let oracle = TypingOracleCtx { + codemap: CodeMap::empty_static(), + }; + match oracle.intersects(self, other) { + Ok(ok) => Ok(ok), + Err(e) => Err(e.into_error()), } - let result = function.ret.typ.clone(); - match &function.as_type { - None => Ty::function(params, result), - Some(type_attr) => Ty::ctor_function(type_attr, params, result), + } + + pub(crate) fn from_native_callable_components( + comp: &NativeCallableComponents, + as_type: Option, + ) -> starlark::Result { + let result = comp.return_type.clone(); + + let params = comp.param_spec.param_spec(); + + match as_type { + None => Ok(Ty::function(params, result)), + Some(type_attr) => Ok(Ty::ctor_function(type_attr, params, result)), } } -} -impl Display for Ty { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.alternatives.as_slice() { + pub(crate) fn fmt_with_config( + &self, + f: &mut fmt::Formatter<'_>, + config: &TypeRenderConfig, + ) -> fmt::Result { + match self.iter_union() { [] => write!(f, "{}", TypingNever::TYPE), xs => { for (i, x) in xs.iter().enumerate() { if i != 0 { write!(f, " | ")?; } - write!(f, "{}", x)?; + x.fmt_with_config(f, config)?; } Ok(()) } } } + + pub(crate) fn display_with<'a>(&'a self, config: &'a TypeRenderConfig) -> TyDisplay<'a> { + TyDisplay { ty: self, config } + } +} + +/// Configuration for rendering types. +pub enum TypeRenderConfig { + /// Uses the default rendering configuration. + Default, + /// Uses for linked type in doc + LinkedType { + /// The map from type to it's path link + ty_to_path_map: HashMap, + }, +} + +pub(crate) struct TyDisplay<'a> { + ty: &'a Ty, + config: &'a TypeRenderConfig, +} + +impl Display for TyDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.ty.fmt_with_config(f, self.config) + } +} + +impl Display for Ty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.fmt_with_config(f, &TypeRenderConfig::Default) + } } diff --git a/starlark-rust/starlark/src/typing/typecheck.rs b/starlark-rust/starlark/src/typing/typecheck.rs index 8c4c36bd9e814..fc7be3e4bddc8 100644 --- a/starlark-rust/starlark/src/typing/typecheck.rs +++ b/starlark-rust/starlark/src/typing/typecheck.rs @@ -118,7 +118,7 @@ pub(crate) fn solve_bindings( span: *span, }, require, - ); + )?; } Ok(( ctx.errors.into_inner(), @@ -166,6 +166,14 @@ impl TypeMap { ) .collect() } + + #[cfg(test)] + pub(crate) fn find_first_binding<'a>(&'a self) -> Option<&'a Ty> { + self.bindings + .entries_unordered() + .min_by_key(|(id, _)| *id) + .map(|(_, (_, _, ty))| ty) + } } /// Typecheck a module. @@ -175,7 +183,7 @@ pub trait AstModuleTypecheck { self, globals: &Globals, loads: &HashMap, - ) -> (Vec, TypeMap, Interface, Vec); + ) -> (Vec, TypeMap, Interface, Vec); } impl AstModuleTypecheck for AstModule { @@ -183,9 +191,8 @@ impl AstModuleTypecheck for AstModule { self, globals: &Globals, loads: &HashMap, - ) -> (Vec, TypeMap, Interface, Vec) { - let (codemap, statement, _dialect, allow_string_literals_in_type_expr, _) = - self.into_parts(); + ) -> (Vec, TypeMap, Interface, Vec) { + let (codemap, statement, _dialect, _) = self.into_parts(); let names = MutableNames::new(); let frozen_heap = FrozenHeap::new(); let ( @@ -201,10 +208,10 @@ impl AstModuleTypecheck for AstModule { loads, statement, ScopeResolverGlobals { - globals: Some(frozen_heap.alloc_any_display_from_debug(globals.dupe())), + globals: Some(frozen_heap.alloc_any(globals.dupe())), }, - frozen_heap.alloc_any_display_from_debug(codemap.dupe()), - &Dialect::Extended, + frozen_heap.alloc_any(codemap.dupe()), + &Dialect::AllOptionsInternal, ); let scope_errors = scope_errors.into_map(TypingError::from_eval_exception); // We don't really need to properly unpack top-level statements, @@ -218,12 +225,11 @@ impl AstModuleTypecheck for AstModule { oracle, &scope_data, &mut approximations, - allow_string_literals_in_type_expr, ) { Ok(fill_types_errors) => fill_types_errors, Err(e) => { return ( - vec![InternalError::into_anyhow(e)], + vec![InternalError::into_error(e)], TypeMap { codemap, bindings: UnorderedMap::new(), @@ -248,7 +254,7 @@ impl AstModuleTypecheck for AstModule { Ok(bindings) => bindings, Err(e) => { return ( - vec![InternalError::into_anyhow(e)], + vec![InternalError::into_error(e)], TypeMap { codemap, bindings: UnorderedMap::new(), @@ -263,7 +269,7 @@ impl AstModuleTypecheck for AstModule { Ok(x) => x, Err(e) => { return ( - vec![e.into_anyhow()], + vec![e.into_error()], TypeMap { codemap, bindings: UnorderedMap::new(), @@ -297,7 +303,7 @@ impl AstModuleTypecheck for AstModule { let errors = [scope_errors, fill_types_errors, all_solve_errors] .into_iter() .flatten() - .map(TypingError::into_anyhow) + .map(TypingError::into_error) .collect(); let mut res = HashMap::new(); diff --git a/starlark-rust/starlark/src/typing/user.rs b/starlark-rust/starlark/src/typing/user.rs index 616bd57fe3559..35092554c83f2 100644 --- a/starlark-rust/starlark/src/typing/user.rs +++ b/starlark-rust/starlark/src/typing/user.rs @@ -23,16 +23,16 @@ use allocative::Allocative; use dupe::Dupe; use starlark_map::sorted_map::SortedMap; use starlark_syntax::codemap::Span; -use starlark_syntax::codemap::Spanned; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; use crate::typing::custom::TyCustomImpl; +use crate::typing::error::TypingNoContextError; +use crate::typing::error::TypingNoContextOrInternalError; use crate::typing::error::TypingOrInternalError; -use crate::typing::function::TyCustomFunctionImpl; use crate::typing::starlark_value::TyStarlarkValue; -use crate::typing::Arg; use crate::typing::Ty; use crate::typing::TyBasic; -use crate::typing::TyFunction; use crate::typing::TypingOracleCtx; use crate::values::types::type_instance_id::TypeInstanceId; use crate::values::typing::type_compiled::alloc::TypeMatcherAlloc; @@ -40,17 +40,13 @@ use crate::values::typing::type_compiled::type_matcher_factory::TypeMatcherFacto #[derive(Debug, thiserror::Error)] enum TyUserError { - #[error( - "Type `{0}` specifies custom callable, but underlying `StarlarkValue` is not callable" - )] + #[error("Type `{0}` specifies custom callable, but underlying `StarlarkValue` is not callable")] CallableNotCallable(String), #[error( "Type `{0}` specifies custom indexable, but underlying `StarlarkValue` is not indexable" )] IndexableNotIndexable(String), - #[error( - "Type `{0}` specifies custom iterable, but underlying `StarlarkValue` is not iterable" - )] + #[error("Type `{0}` specifies custom iterable, but underlying `StarlarkValue` is not iterable")] IterableNotIterable(String), } @@ -110,7 +106,7 @@ pub struct TyUserParams { /// Custom fields for this type (use `TyStarlarkValue` fields if not specified). pub fields: TyUserFields, /// Set if more precise callable signature is known than `base` provides. - pub callable: Option, + pub callable: Option, /// Set if more precise index signature is known than `base` provides. pub index: Option, /// Set if more precise iter item is known than `base` provides. @@ -121,7 +117,7 @@ pub struct TyUserParams { /// Type description for arbitrary type. #[derive(Allocative, Debug, derive_more::Display)] -#[display(fmt = "{}", name)] +#[display("{}", name)] pub struct TyUser { name: String, /// Base type for this custom type, e.g. generic record for record with known fields. @@ -132,7 +128,7 @@ pub struct TyUser { id: TypeInstanceId, fields: TyUserFields, /// Set if more precise callable signature is known than `base` provides. - callable: Option, + callable: Option, /// Set if more precise index signature is known than `base` provides. index: Option, /// Set if more precise iter item is known than `base` provides. @@ -156,20 +152,14 @@ impl TyUser { iter_item, _non_exhaustive: (), } = params; - if callable.is_some() { - if !base.is_callable() { - return Err(TyUserError::CallableNotCallable(name).into()); - } + if callable.is_some() && !base.is_callable() { + return Err(TyUserError::CallableNotCallable(name).into()); } - if index.is_some() { - if !base.is_indexable() { - return Err(TyUserError::IndexableNotIndexable(name).into()); - } + if index.is_some() && !base.is_indexable() { + return Err(TyUserError::IndexableNotIndexable(name).into()); } - if iter_item.is_some() { - if base.iter_item().is_err() { - return Err(TyUserError::IterableNotIterable(name).into()); - } + if iter_item.is_some() && base.iter_item().is_err() { + return Err(TyUserError::IterableNotIterable(name).into()); } Ok(TyUser { name, @@ -217,7 +207,7 @@ impl TyCustomImpl for TyUser { Some(&self.name) } - fn attribute(&self, attr: &str) -> Result { + fn attribute(&self, attr: &str) -> Result { if let Ok(ty) = self.base.attr_from_methods(attr) { Ok(ty) } else { @@ -227,25 +217,29 @@ impl TyCustomImpl for TyUser { if self.fields.unknown { Ok(Ty::any()) } else { - Err(()) + Err(TypingNoContextError) } } } } } - fn index(&self, item: &TyBasic, ctx: &TypingOracleCtx) -> Result { + fn index( + &self, + item: &TyBasic, + ctx: &TypingOracleCtx, + ) -> Result { if let Some(index) = &self.index { - if !ctx.intersects(&Ty::basic(item.dupe()), &index.index) { - return Err(()); + if !ctx.intersects(&Ty::basic(item.dupe()), &index.index)? { + return Err(TypingNoContextOrInternalError::Typing); } Ok(index.result.dupe()) } else { - self.base.index(item) + Ok(self.base.index(item)?) } } - fn iter_item(&self) -> Result { + fn iter_item(&self) -> Result { if let Some(iter_item) = &self.iter_item { Ok(iter_item.dupe()) } else { @@ -253,14 +247,18 @@ impl TyCustomImpl for TyUser { } } - fn is_callable(&self) -> bool { - self.base.is_callable() + fn as_callable(&self) -> Option { + if self.base.is_callable() { + Some(TyCallable::any()) + } else { + None + } } fn validate_call( &self, span: Span, - args: &[Spanned], + args: &TyCallArgs, oracle: TypingOracleCtx, ) -> Result { if let Some(callable) = &self.callable { @@ -305,9 +303,10 @@ mod tests { use crate::environment::GlobalsBuilder; use crate::eval::Arguments; use crate::eval::Evaluator; + use crate::typing::callable::TyCallable; use crate::typing::user::TyUserParams; + use crate::typing::ParamSpec; use crate::typing::Ty; - use crate::typing::TyFunction; use crate::typing::TyStarlarkValue; use crate::typing::TyUser; use crate::values::starlark_value_as_type::StarlarkValueAsType; @@ -324,7 +323,7 @@ mod tests { Allocative, NoSerialize )] - #[display(fmt = "plant")] + #[display("plant")] enum AbstractPlant {} #[starlark_value(type = "plant")] @@ -341,7 +340,7 @@ mod tests { Allocative, NoSerialize )] - #[display(fmt = "fruit_callable")] + #[display("fruit_callable")] struct FruitCallable { name: String, ty_fruit_callable: Ty, @@ -372,8 +371,8 @@ mod tests { &self, _me: Value<'v>, _args: &Arguments<'v, '_>, - _eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + _eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { unreachable!("not needed in tests, but typechecker requires it") } } @@ -421,7 +420,7 @@ mod tests { TyStarlarkValue::new::(), TypeInstanceId::gen(), TyUserParams { - callable: Some(TyFunction::new(vec![], ty_fruit.clone())), + callable: Some(TyCallable::new(ParamSpec::empty(), ty_fruit.clone())), ..TyUserParams::default() }, diff --git a/starlark-rust/starlark/src/util.rs b/starlark-rust/starlark/src/util.rs new file mode 100644 index 0000000000000..9bc680c5f7ff3 --- /dev/null +++ b/starlark-rust/starlark/src/util.rs @@ -0,0 +1,25 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Utilities. + +pub(crate) mod arc_or_static; +pub(crate) mod arc_str; +pub(crate) mod non_static_type_id; +pub(crate) mod refcell; +pub(crate) mod rtabort; +pub use crate::util::arc_str::ArcStr; diff --git a/starlark-rust/starlark/src/util/arc_or_static.rs b/starlark-rust/starlark/src/util/arc_or_static.rs new file mode 100644 index 0000000000000..01941fed5fe6e --- /dev/null +++ b/starlark-rust/starlark/src/util/arc_or_static.rs @@ -0,0 +1,116 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt::Display; +use std::hash::Hash; +use std::ops::Deref; +use std::sync::Arc; + +use allocative::Allocative; +use dupe::Dupe; + +#[derive(Debug, Allocative)] +enum Inner { + Arc(Arc), + Static(&'static T), +} + +#[derive(Debug, Allocative)] +pub(crate) struct ArcOrStatic(Inner); + +impl ArcOrStatic { + pub(crate) fn new_static(a: &'static T) -> Self { + ArcOrStatic(Inner::Static(a)) + } + + pub(crate) fn new_arc(a: Arc) -> Self { + ArcOrStatic(Inner::Arc(a)) + } + + pub(crate) fn new(a: T) -> Self + where + T: Sized, + { + Self::new_arc(Arc::new(a)) + } +} + +impl Deref for ArcOrStatic { + type Target = T; + + fn deref(&self) -> &Self::Target { + match &self.0 { + Inner::Arc(a) => a, + Inner::Static(s) => s, + } + } +} + +impl Display for ArcOrStatic { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(&**self, f) + } +} + +impl Clone for ArcOrStatic { + fn clone(&self) -> Self { + Self(match &self.0 { + Inner::Arc(a) => Inner::Arc(a.dupe()), + Inner::Static(s) => Inner::Static(*s), + }) + } +} + +impl Dupe for ArcOrStatic {} + +impl PartialEq for ArcOrStatic +where + T: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + **self == **other + } +} + +impl Eq for ArcOrStatic where T: Eq {} + +impl PartialOrd for ArcOrStatic +where + T: PartialOrd, +{ + fn partial_cmp(&self, other: &Self) -> Option { + (**self).partial_cmp(&**other) + } +} + +impl Ord for ArcOrStatic +where + T: Ord, +{ + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + (**self).cmp(&**other) + } +} + +impl Hash for ArcOrStatic +where + T: Hash, +{ + fn hash(&self, state: &mut H) { + (**self).hash(state) + } +} diff --git a/starlark-rust/starlark/src/util/arc_str.rs b/starlark-rust/starlark/src/util/arc_str.rs new file mode 100644 index 0000000000000..12e7813999fc3 --- /dev/null +++ b/starlark-rust/starlark/src/util/arc_str.rs @@ -0,0 +1,78 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::borrow::Borrow; +use std::hash::Hash; +use std::ops::Deref; +use std::sync::Arc; + +use allocative::Allocative; +use dupe::Dupe; + +use crate::util::arc_or_static::ArcOrStatic; + +/// Wrapper for `Arc`. +#[derive( + Clone, + Dupe, + Eq, + PartialEq, + Hash, + Ord, + PartialOrd, + Debug, + derive_more::Display, + Allocative +)] +#[display("{}", &**self)] +pub struct ArcStr(ArcOrStatic); + +impl ArcStr { + /// Create from static `str` without allocation. + pub fn new_static(s: &'static str) -> ArcStr { + ArcStr(ArcOrStatic::new_static(s)) + } + + /// Get the `str`. + pub fn as_str(&self) -> &str { + self + } +} + +impl Deref for ArcStr { + type Target = str; + + fn deref(&self) -> &str { + self.0.deref() + } +} + +impl Borrow for ArcStr { + fn borrow(&self) -> &str { + self + } +} + +impl<'a> From<&'a str> for ArcStr { + fn from(s: &'a str) -> Self { + if s.is_empty() { + ArcStr(ArcOrStatic::new_static("")) + } else { + ArcStr(ArcOrStatic::new_arc(Arc::from(s))) + } + } +} diff --git a/starlark-rust/starlark/src/util/non_static_type_id.rs b/starlark-rust/starlark/src/util/non_static_type_id.rs new file mode 100644 index 0000000000000..64a3143727e5b --- /dev/null +++ b/starlark-rust/starlark/src/util/non_static_type_id.rs @@ -0,0 +1,56 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![cfg(test)] + +use std::any::TypeId; +use std::marker::PhantomData; +use std::mem; + +pub(crate) fn non_static_type_id() -> TypeId { + trait NonStaticAny { + fn get_type_id(&self) -> TypeId + where + Self: 'static; + } + + impl NonStaticAny for PhantomData { + fn get_type_id(&self) -> TypeId + where + Self: 'static, + { + TypeId::of::() + } + } + + let phantom_data = PhantomData::; + NonStaticAny::get_type_id(unsafe { + mem::transmute::<&dyn NonStaticAny, &(dyn NonStaticAny + 'static)>(&phantom_data) + }) +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + + use crate::util::non_static_type_id::non_static_type_id; + + #[test] + fn test_non_static_type_id() { + assert_eq!(non_static_type_id::<&str>(), TypeId::of::<&'static str>()); + } +} diff --git a/starlark-rust/starlark/src/values/types/dict/refcell.rs b/starlark-rust/starlark/src/util/refcell.rs similarity index 100% rename from starlark-rust/starlark/src/values/types/dict/refcell.rs rename to starlark-rust/starlark/src/util/refcell.rs diff --git a/starlark-rust/starlark/src/util/rtabort.rs b/starlark-rust/starlark/src/util/rtabort.rs new file mode 100644 index 0000000000000..a25096630fe1f --- /dev/null +++ b/starlark-rust/starlark/src/util/rtabort.rs @@ -0,0 +1,90 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt::Arguments; +use std::io; +use std::process; + +/// Like `panic!`, but aborts the process instead of unwinding. +/// +/// Although we compile buck2 with `panic=abort`, this is safer because +/// others may copy-paste code. +macro_rules! rtabort { + ($m:literal) => { + $crate::util::rtabort::rtabort_impl_fixed_string( + file!(), + line!(), + $m, + ) + }; + ($($t:tt)*) => { + $crate::util::rtabort::rtabort_impl( + file!(), + line!(), + format_args!($($t)*), + ) + }; +} + +pub(crate) use rtabort; + +#[cold] +pub(crate) fn rtabort_impl_fixed_string(file: &str, line: u32, message: &str) -> ! { + rtabort_impl(file, line, format_args!("{}", message)); +} + +#[cold] +pub(crate) fn rtabort_impl(file: &str, line: u32, msg: Arguments) -> ! { + // Make sure we abort even if formatting panics. + let _abort = AbortOnDrop; + + // `eprintln!` followed by `abort` does not print anything in tests. + io::Write::write_fmt( + &mut io::stderr(), + format_args!("{}:{}: abort: {}\n", file, line, msg), + ) + .ok(); + + // Tell the compiler that we never return. + process::abort(); +} + +struct AbortOnDrop; + +impl Drop for AbortOnDrop { + fn drop(&mut self) { + process::abort(); + } +} + +#[cfg(test)] +mod tests { + fn _test_compiles_fixed_string() { + rtabort!("test"); + } + + fn _test_compiles_with_format_args() { + rtabort!("test {}", 17); + } + + #[test] + fn test_rtabort() { + // Uncomment to test. + // rtabort!("test {}", 17); + // rtabort!("test {}", { panic!(); 17 }); + } +} diff --git a/starlark-rust/starlark/src/values.rs b/starlark-rust/starlark/src/values.rs new file mode 100644 index 0000000000000..172602b54fa5a --- /dev/null +++ b/starlark-rust/starlark/src/values.rs @@ -0,0 +1,131 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). +//! +//! This module contains code for working with Starlark values: +//! +//! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in +//! Starlark. When frozen, they become [`FrozenValue`]. +//! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. +//! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], +//! and deconstructed from a [`Value`] with [`UnpackValue`] +//! (or specialised methods like [`unpack_str`](Value::unpack_str)). +//! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] +//! trait. +//! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], +//! so may serve as interesting inspiration for writing your own values, in addition to occurring in Starlark programs. + +pub use layout::alloc_static_simple::AllocStaticSimple; +pub use owned_frozen_ref::OwnedFrozenRef; +pub use owned_frozen_ref::OwnedRefFrozenRef; +pub use starlark_derive::starlark_attrs; +pub use starlark_derive::starlark_value; +pub use starlark_derive::AllocFrozenValue; +pub use starlark_derive::AllocValue; +pub use starlark_derive::Freeze; +pub use starlark_derive::NoSerialize; +pub use starlark_derive::StarlarkAttrs; +pub use starlark_derive::Trace; +pub use starlark_derive::UnpackValue; + +pub use crate::any::AnyLifetime; +pub use crate::any::ProvidesStaticType; +pub use crate::coerce::Coerce; +pub use crate::values::alloc_value::AllocFrozenValue; +pub use crate::values::alloc_value::AllocValue; +pub use crate::values::demand::Demand; +pub use crate::values::error::ValueError; +pub use crate::values::freeze::Freeze; +pub use crate::values::frozen_ref::FrozenRef; +pub use crate::values::iter::StarlarkIterator; +pub use crate::values::layout::complex::ValueTypedComplex; +pub use crate::values::layout::heap::heap_type::Freezer; +pub use crate::values::layout::heap::heap_type::FrozenHeap; +pub use crate::values::layout::heap::heap_type::FrozenHeapRef; +pub use crate::values::layout::heap::heap_type::Heap; +pub use crate::values::layout::heap::heap_type::Tracer; +pub use crate::values::layout::identity::ValueIdentity; +pub use crate::values::layout::static_string::constant_string; +pub use crate::values::layout::static_string::StarlarkStrNRepr; +pub use crate::values::layout::typed::string::FrozenStringValue; +pub use crate::values::layout::typed::string::StringValue; +pub use crate::values::layout::typed::string::StringValueLike; +pub use crate::values::layout::typed::FrozenValueTyped; +pub use crate::values::layout::typed::ValueTyped; +pub use crate::values::layout::value::FrozenValue; +pub use crate::values::layout::value::Value; +pub use crate::values::layout::value::ValueLike; +pub use crate::values::layout::value_lifetimeless::ValueLifetimeless; +pub use crate::values::owned::OwnedFrozenValue; +pub use crate::values::owned::OwnedFrozenValueTyped; +pub use crate::values::trace::Trace; +pub use crate::values::traits::ComplexValue; +pub use crate::values::traits::StarlarkValue; +pub use crate::values::types::any; +pub use crate::values::types::any_complex; +pub use crate::values::types::array; +pub use crate::values::types::bool; +pub use crate::values::types::dict; +pub use crate::values::types::enumeration; +pub use crate::values::types::exported_name; +pub use crate::values::types::float; +pub use crate::values::types::function; +pub use crate::values::types::int; +pub use crate::values::types::list; +pub use crate::values::types::list_or_tuple; +pub use crate::values::types::namespace; +pub use crate::values::types::none; +pub use crate::values::types::range; +pub use crate::values::types::record; +pub use crate::values::types::set; +pub use crate::values::types::starlark_value_as_type; +pub use crate::values::types::string; +pub use crate::values::types::structs; +pub use crate::values::types::tuple; +pub use crate::values::unpack::UnpackValue; +pub use crate::values::unpack::UnpackValueError; +pub use crate::values::unpack::UnpackValueErrorInfallible; +pub use crate::values::unpack_and_discard::UnpackAndDiscard; +pub use crate::values::value_of::ValueOf; +pub use crate::values::value_of_unchecked::FrozenValueOfUnchecked; +pub use crate::values::value_of_unchecked::ValueOfUnchecked; +pub use crate::values::value_of_unchecked::ValueOfUncheckedGeneric; + +mod alloc_value; +mod comparison; +pub(crate) mod demand; +pub(crate) mod error; +mod freeze; +pub(crate) mod frozen_ref; +mod index; +pub(crate) mod iter; +pub(crate) mod layout; +mod owned; +pub(crate) mod owned_frozen_ref; +pub(crate) mod recursive_repr_or_json_guard; +mod stack_guard; +pub(crate) mod starlark_type_id; +mod trace; +pub(crate) mod traits; +pub mod type_repr; +pub(crate) mod types; +pub mod typing; +mod unpack; +mod unpack_and_discard; +pub(crate) mod value_of; +pub(crate) mod value_of_unchecked; diff --git a/starlark-rust/starlark/src/values/alloc_value.rs b/starlark-rust/starlark/src/values/alloc_value.rs index 2ed7616ed10bd..f6ff185bf1997 100644 --- a/starlark-rust/starlark/src/values/alloc_value.rs +++ b/starlark-rust/starlark/src/values/alloc_value.rs @@ -55,6 +55,21 @@ use crate::values::Value; /// } /// } /// ``` +/// +/// # Derive +/// +/// `AllocValue` can be derived for enums, like this: +/// +/// ``` +/// use starlark::values::type_repr::StarlarkTypeRepr; +/// use starlark::values::AllocValue; +/// +/// #[derive(StarlarkTypeRepr, AllocValue)] +/// enum AllocIntOrStr { +/// Int(i32), +/// Str(String), +/// } +/// ``` pub trait AllocValue<'v>: StarlarkTypeRepr { /// Allocate the value on a heap and return a reference to the allocated value. /// @@ -81,18 +96,6 @@ impl<'v> AllocValue<'v> for Value<'v> { } } -impl<'v, T> AllocValue<'v> for Option -where - T: AllocValue<'v>, -{ - fn alloc_value(self, heap: &'v Heap) -> Value<'v> { - match self { - Some(v) => v.alloc_value(heap), - None => Value::new_none(), - } - } -} - impl<'v, A: AllocValue<'v>, B: AllocValue<'v>> AllocValue<'v> for Either { #[inline] fn alloc_value(self, heap: &'v Heap) -> Value<'v> { @@ -114,6 +117,21 @@ impl AllocFrozenValue for Either } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. +/// +/// # Derive +/// +/// `AllocFrozenValue` can be derived for enums, like this: +/// +/// ``` +/// use starlark::values::type_repr::StarlarkTypeRepr; +/// use starlark::values::AllocFrozenValue; +/// +/// #[derive(StarlarkTypeRepr, AllocFrozenValue)] +/// enum AllocIntOrStr { +/// Int(i32), +/// Str(String), +/// } +/// ``` pub trait AllocFrozenValue: StarlarkTypeRepr { /// Allocate a value in the frozen heap and return a reference to the allocated value. fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; diff --git a/starlark-rust/starlark/src/values/comparison.rs b/starlark-rust/starlark/src/values/comparison.rs index a906be94088bb..95c5c51067db1 100644 --- a/starlark-rust/starlark/src/values/comparison.rs +++ b/starlark-rust/starlark/src/values/comparison.rs @@ -19,6 +19,7 @@ use std::cmp::Ordering; use std::hash::Hash; use itertools::Itertools; +use starlark_map::small_set::SmallSet; use starlark_map::Equivalent; use crate::collections::SmallMap; @@ -63,6 +64,24 @@ where Ok(true) } +pub(crate) fn equals_small_set(xs: &SmallSet, ys: &SmallSet) -> bool +where + K1: Equivalent + Eq, + K2: Eq, +{ + if xs.len() != ys.len() { + return false; + } + + for x in xs.iter_hashed() { + if !ys.contains_hashed(x) { + return false; + } + } + + true +} + pub(crate) fn compare_slice( xs: &[X1], ys: &[X2], diff --git a/starlark-rust/starlark/src/values/demand.rs b/starlark-rust/starlark/src/values/demand.rs index f60c057c0b62e..bfee6ad8ba711 100644 --- a/starlark-rust/starlark/src/values/demand.rs +++ b/starlark-rust/starlark/src/values/demand.rs @@ -94,7 +94,7 @@ mod tests { NoSerialize, Allocative )] - #[display(fmt = "SomeType")] + #[display("SomeType")] struct MyValue { payload: u32, } diff --git a/starlark-rust/starlark/src/values/error.rs b/starlark-rust/starlark/src/values/error.rs index c063a68548400..9b5828e148c6a 100644 --- a/starlark-rust/starlark/src/values/error.rs +++ b/starlark-rust/starlark/src/values/error.rs @@ -40,10 +40,6 @@ pub enum ValueError { IntegerOverflow, #[error("Negative shift count")] NegativeShiftCount, - #[error("Type of parameters mismatch, expected `{0}`, actual `{1}`")] - IncorrectParameterTypeWithExpected(String, String), - #[error("Type of parameter `{0}` doesn't match, expected `{1}`, actual `{2}`")] - IncorrectParameterTypeNamedWithExpected(String, String, String), #[error("Type of parameters mismatch")] IncorrectParameterType, #[error("Type of parameter `{0}` doesn't match")] @@ -66,6 +62,12 @@ pub enum ValueError { NoAttrDidYouMean(String, String, String), } +impl From for crate::Error { + fn from(e: ValueError) -> Self { + crate::Error::new_kind(crate::ErrorKind::Value(anyhow::Error::new(e))) + } +} + #[derive(Debug, Error)] pub(crate) enum ControlError { #[error("Value of type `{0}` is not hashable")] @@ -80,7 +82,7 @@ impl ValueError { left: &str, op: &str, right: Option<&str>, - ) -> anyhow::Result { + ) -> crate::Result { match right { None => Err(ValueError::OperationNotSupported { op: op.to_owned(), @@ -98,26 +100,22 @@ impl ValueError { /// Helper to create an [`OperationNotSupported`](ValueError::OperationNotSupported) error. #[cold] - pub fn unsupported<'v, T, V: StarlarkValue<'v> + ?Sized>( - _left: &V, - op: &str, - ) -> anyhow::Result { + pub fn unsupported<'v, T, V: StarlarkValue<'v>>(_left: &V, op: &str) -> crate::Result { Self::unsupported_owned(V::TYPE, op, None) } - /// Helper to create an [`OperationNotSupported`](ValueError::OperationNotSupported) error. #[cold] - pub(crate) fn unsupported_type(left: Value, op: &str) -> anyhow::Result { + pub(crate) fn unsupported_type(left: Value, op: &str) -> crate::Result { Self::unsupported_owned(left.get_type(), op, None) } /// Helper to create an [`OperationNotSupported`](ValueError::OperationNotSupportedBinary) error. #[cold] - pub fn unsupported_with<'v, T, V: StarlarkValue<'v> + ?Sized>( + pub fn unsupported_with<'v, T, V: StarlarkValue<'v>>( _left: &V, op: &str, right: Value, - ) -> anyhow::Result { + ) -> crate::Result { Self::unsupported_owned(V::TYPE, op, Some(right.get_type())) } } diff --git a/starlark-rust/starlark/src/values/freeze.rs b/starlark-rust/starlark/src/values/freeze.rs index eab4eee9801e6..d6b04849a61a8 100644 --- a/starlark-rust/starlark/src/values/freeze.rs +++ b/starlark-rust/starlark/src/values/freeze.rs @@ -15,6 +15,7 @@ * limitations under the License. */ +use std::cell::OnceCell; use std::cell::RefCell; use std::cell::UnsafeCell; use std::marker; @@ -161,6 +162,17 @@ where } } +impl Freeze for OnceCell +where + T: Freeze, +{ + type Frozen = Option; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + self.into_inner().freeze(freezer) + } +} + impl Freeze for Box where T: Freeze, diff --git a/starlark-rust/starlark/src/values/frozen_ref.rs b/starlark-rust/starlark/src/values/frozen_ref.rs index 2c144218f8482..c1628373d57eb 100644 --- a/starlark-rust/starlark/src/values/frozen_ref.rs +++ b/starlark-rust/starlark/src/values/frozen_ref.rs @@ -78,6 +78,26 @@ impl<'f, T: 'f + ?Sized> FrozenRef<'f, T> { value: f(self.value), } } + + /// Fallible map the reference to another one. + pub fn try_map_result(self, f: F) -> Result, E> + where + for<'v> F: FnOnce(&'v T) -> Result<&'v U, E>, + { + Ok(FrozenRef { + value: f(self.value)?, + }) + } + + /// Optionally map the reference to another one. + pub fn try_map_option(self, f: F) -> Option> + where + for<'v> F: FnOnce(&'v T) -> Option<&'v U>, + { + Some(FrozenRef { + value: f(self.value)?, + }) + } } impl<'f, T: ?Sized + Display> Display for FrozenRef<'f, T> { diff --git a/starlark-rust/starlark/src/values/index.rs b/starlark-rust/starlark/src/values/index.rs index 991d46be7ebe7..c7e66f797e8d2 100644 --- a/starlark-rust/starlark/src/values/index.rs +++ b/starlark-rust/starlark/src/values/index.rs @@ -15,6 +15,7 @@ * limitations under the License. */ +use crate::values::UnpackValue; use crate::values::Value; use crate::values::ValueError; @@ -30,22 +31,14 @@ fn convert_index_aux( if v.is_none() { Ok(default) } else { - match v.to_int() { - Ok(x) => { - let i = if x < 0 { len + x } else { x }; - if i < min { - Ok(min) - } else if i > max { - Ok(max) - } else { - Ok(i) - } - } - Err(..) => Err(ValueError::IncorrectParameterTypeWithExpected( - "none or int".to_owned(), - v.get_type().to_owned(), - ) - .into()), + let x = i32::unpack_value_err(v)?; + let i = if x < 0 { len + x } else { x }; + if i < min { + Ok(min) + } else if i > max { + Ok(max) + } else { + Ok(i) } } } else { @@ -59,24 +52,16 @@ fn convert_index_aux( /// and len. Raise the correct errors if the value is not numeric or the /// index is out of bound. pub(crate) fn convert_index(v: Value, len: i32) -> anyhow::Result { - match v.to_int() { - Ok(x) => { - let i = if x < 0 { - len.checked_add(x).ok_or(ValueError::IntegerOverflow)? - } else { - x - }; - if i < 0 || i >= len { - Err(ValueError::IndexOutOfBound(i).into()) - } else { - Ok(i) - } - } - Err(..) => Err(ValueError::IncorrectParameterTypeWithExpected( - "int".to_owned(), - v.get_type().to_owned(), - ) - .into()), + let x = i32::unpack_value_err(v)?; + let i = if x < 0 { + len.checked_add(x).ok_or(ValueError::IntegerOverflow)? + } else { + x + }; + if i < 0 || i >= len { + Err(ValueError::IndexOutOfBound(i).into()) + } else { + Ok(i) } } @@ -95,12 +80,7 @@ pub(crate) fn convert_slice_indices( let stride = match stride { None => 1, Some(v) if v.is_none() => 1, - Some(v) => v.to_int().map_err(|_| { - ValueError::IncorrectParameterTypeWithExpected( - "int or None".to_owned(), - v.get_type().to_owned(), - ) - })?, + Some(v) => i32::unpack_value_err(v)?, }; match stride { 0 => Err(ValueError::IndexOutOfBound(0).into()), @@ -168,7 +148,7 @@ pub(crate) fn apply_slice( #[cfg(test)] mod tests { use super::*; - use crate::values::types::inline_int::InlineInt; + use crate::values::types::int::inline_int::InlineInt; use crate::values::Heap; #[test] diff --git a/starlark-rust/starlark/src/values/layout.rs b/starlark-rust/starlark/src/values/layout.rs new file mode 100644 index 0000000000000..79081d7a8efb8 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout.rs @@ -0,0 +1,37 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Possible optimisations: +// Encoding none, bool etc in the pointer of frozen value + +pub(crate) mod aligned_size; +pub(crate) mod alloc_static_simple; +pub(crate) mod avalue; +pub(crate) mod complex; +mod const_frozen_string; +pub(crate) mod const_type_id; +pub(crate) mod heap; +pub(crate) mod identity; +pub(crate) mod pointer; +pub(crate) mod static_string; +pub(crate) mod typed; +pub(crate) mod value; +pub(crate) mod value_alloc_size; +pub(crate) mod value_captured; +pub(crate) mod value_lifetimeless; +pub(crate) mod value_not_special; +pub(crate) mod vtable; diff --git a/starlark-rust/starlark/src/values/layout/aligned_size.rs b/starlark-rust/starlark/src/values/layout/aligned_size.rs index d99f5805479df..ca88b1906ef80 100644 --- a/starlark-rust/starlark/src/values/layout/aligned_size.rs +++ b/starlark-rust/starlark/src/values/layout/aligned_size.rs @@ -18,6 +18,9 @@ use std::alloc::Layout; use std::mem; use std::ops::Add; +use std::ops::Mul; +use std::ops::Sub; +use std::ptr::NonNull; use allocative::Allocative; use dupe::Dupe; @@ -26,7 +29,18 @@ use crate::values::layout::heap::repr::AValueHeader; /// Allocations in Starlark are word-aligned, and this type represents the size of an allocation. #[derive( - Copy, Clone, Dupe, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Allocative + Copy, + Clone, + Dupe, + Default, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + Allocative, + derive_more::Display )] #[repr(transparent)] pub(crate) struct AlignedSize { @@ -35,20 +49,33 @@ pub(crate) struct AlignedSize { } impl AlignedSize { + pub(crate) const ZERO: AlignedSize = AlignedSize::new_bytes(0); + const MAX_SIZE: AlignedSize = AlignedSize::new_bytes(u32::MAX as usize - AValueHeader::ALIGN + 1); + #[track_caller] #[inline] pub(crate) const fn new_bytes(bytes: usize) -> AlignedSize { - assert!(bytes % AValueHeader::ALIGN == 0); - assert!(bytes as u32 as usize == bytes); + assert!( + bytes % AValueHeader::ALIGN == 0, + "AlignedSize must be aligned" + ); + assert!( + bytes as u32 as usize == bytes, + "AlignedSize must not exceed u32::MAX" + ); let bytes = bytes as u32; AlignedSize { bytes } } + #[track_caller] #[inline] pub(crate) const fn align_up(bytes: usize) -> AlignedSize { - assert!(bytes <= AlignedSize::MAX_SIZE.bytes() as usize); + assert!( + bytes <= AlignedSize::MAX_SIZE.bytes() as usize, + "AlignedSize must not exceed u32::MAX" + ); let bytes = (bytes + AValueHeader::ALIGN - 1) & !(AValueHeader::ALIGN - 1); let bytes = bytes as u32; AlignedSize { bytes } @@ -71,13 +98,99 @@ impl AlignedSize { Err(_) => panic!("Layout::from_size_align failed"), } } + + #[inline] + pub(crate) fn checked_next_power_of_two(self) -> Option { + let bytes = self.bytes.checked_next_power_of_two()?; + Some(AlignedSize::new_bytes(bytes as usize)) + } + + #[inline] + pub(crate) fn unchecked_sub(self, rhs: AlignedSize) -> AlignedSize { + debug_assert!(self.bytes >= rhs.bytes, "{:?} - {:?}", self, rhs); + AlignedSize { + bytes: self.bytes - rhs.bytes, + } + } + + #[inline] + pub(crate) fn ptr_diff(begin: NonNull, end: NonNull) -> AlignedSize { + unsafe { AlignedSize::new_bytes(end.as_ptr().byte_offset_from(begin.as_ptr()) as usize) } + } } impl Add for AlignedSize { type Output = AlignedSize; + #[track_caller] #[inline] fn add(self, rhs: AlignedSize) -> AlignedSize { - AlignedSize::new_bytes(self.bytes.checked_add(rhs.bytes).unwrap() as usize) + let bytes = self.bytes.checked_add(rhs.bytes).unwrap(); + AlignedSize { bytes } + } +} + +impl Sub for AlignedSize { + type Output = AlignedSize; + + #[track_caller] + #[inline] + fn sub(self, rhs: AlignedSize) -> AlignedSize { + let bytes = self.bytes.checked_sub(rhs.bytes).unwrap(); + AlignedSize { bytes } + } +} + +impl Mul for AlignedSize { + type Output = AlignedSize; + + #[track_caller] + #[inline] + fn mul(self, rhs: u32) -> Self::Output { + let bytes = self.bytes.checked_mul(rhs).unwrap(); + AlignedSize { bytes } + } +} + +#[cfg(test)] +mod tests { + use crate::values::layout::aligned_size::AlignedSize; + use crate::values::layout::heap::repr::AValueHeader; + + #[test] + fn test_checked_next_power_of_two() { + assert_eq!( + AlignedSize::new_bytes(AValueHeader::ALIGN), + AlignedSize::new_bytes(AValueHeader::ALIGN) + .checked_next_power_of_two() + .unwrap() + ); + assert_eq!( + AlignedSize::new_bytes(2 * AValueHeader::ALIGN), + AlignedSize::new_bytes(2 * AValueHeader::ALIGN) + .checked_next_power_of_two() + .unwrap() + ); + assert_eq!( + AlignedSize::new_bytes(4 * AValueHeader::ALIGN), + AlignedSize::new_bytes(3 * AValueHeader::ALIGN) + .checked_next_power_of_two() + .unwrap() + ); + assert_eq!( + AlignedSize::new_bytes(8 * AValueHeader::ALIGN), + AlignedSize::new_bytes(5 * AValueHeader::ALIGN) + .checked_next_power_of_two() + .unwrap() + ); + } + + #[test] + fn test_sub() { + assert_eq!( + AlignedSize::new_bytes(2 * AValueHeader::ALIGN), + AlignedSize::new_bytes(5 * AValueHeader::ALIGN) + - AlignedSize::new_bytes(3 * AValueHeader::ALIGN) + ); } } diff --git a/starlark-rust/starlark/src/values/layout/alloc_static_simple.rs b/starlark-rust/starlark/src/values/layout/alloc_static_simple.rs new file mode 100644 index 0000000000000..d94c0cb188e44 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/alloc_static_simple.rs @@ -0,0 +1,78 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::values::layout::avalue; +use crate::values::layout::avalue::AValueImpl; +use crate::values::layout::avalue::AValueSimple; +use crate::values::layout::heap::repr::AValueRepr; +use crate::values::FrozenValue; +use crate::values::FrozenValueTyped; +use crate::values::StarlarkValue; + +/// Allocate simple value statically. +pub struct AllocStaticSimple>( + AValueRepr>>, +); + +impl> AllocStaticSimple { + /// Allocate a value statically. + pub const fn alloc(value: T) -> Self { + AllocStaticSimple(avalue::alloc_static::>(value)) + } + + /// Get the value. + pub fn unpack(&'static self) -> FrozenValueTyped<'static, T> { + FrozenValueTyped::new_repr(&self.0) + } + + /// Get the value. + pub fn to_frozen_value(&'static self) -> FrozenValue { + self.unpack().to_frozen_value() + } +} + +#[cfg(test)] +mod tests { + use allocative::Allocative; + use starlark_derive::starlark_value; + use starlark_derive::NoSerialize; + use starlark_derive::ProvidesStaticType; + + use crate as starlark; + use crate::values::AllocStaticSimple; + use crate::values::StarlarkValue; + + #[test] + fn test_alloc_static_simple() { + #[derive( + Debug, + derive_more::Display, + ProvidesStaticType, + NoSerialize, + Allocative + )] + #[display("MySimpleValue")] + struct MySimpleValue(u32); + + #[starlark_value(type = "MySimpleValue")] + impl<'v> StarlarkValue<'v> for MySimpleValue {} + + static VALUE: AllocStaticSimple = + AllocStaticSimple::alloc(MySimpleValue(17)); + assert_eq!(17, VALUE.unpack().as_ref().0); + } +} diff --git a/starlark-rust/starlark/src/values/layout/avalue.rs b/starlark-rust/starlark/src/values/layout/avalue.rs index 6b77ec4b92457..331faa4db8f2f 100644 --- a/starlark-rust/starlark/src/values/layout/avalue.rs +++ b/starlark-rust/starlark/src/values/layout/avalue.rs @@ -24,8 +24,6 @@ use std::mem; use allocative::Allocative; use derive_more::Display; -use serde::Serialize; -use serde::Serializer; use starlark_syntax::slice_vec_ext::SliceExt; use crate as starlark; @@ -44,8 +42,7 @@ use crate::values::layout::heap::repr::ForwardPtr; use crate::values::layout::value_alloc_size::ValueAllocSize; use crate::values::layout::vtable::AValueVTable; use crate::values::list::value::ListGen; -use crate::values::list::value::VALUE_EMPTY_FROZEN_LIST; -use crate::values::string::StarlarkStr; +use crate::values::string::str_type::StarlarkStr; use crate::values::types::any_array::AnyArray; use crate::values::types::array::Array; use crate::values::types::list::value::FrozenListData; @@ -61,14 +58,12 @@ use crate::values::Tracer; use crate::values::Value; use crate::values::ValueTyped; -pub(crate) const fn alloc_static(mode: M, value: T) -> AValueRepr> +pub(crate) const fn alloc_static<'v, A>(value: A::StarlarkValue) -> AValueRepr> where - M: AValueMode, - AValueImpl: AValue<'static>, + A: AValue<'v>, { - mem::forget(mode); - let payload = AValueImpl::::new(value); - AValueRepr::with_metadata(AValueVTable::new::>(), payload) + let payload = AValueImpl::::new(value); + AValueRepr::with_metadata(AValueVTable::new::(), payload) } pub(crate) const VALUE_STR_A_VALUE_PTR: AValueHeader = @@ -80,7 +75,7 @@ enum AValueError { CannotBeFrozen(&'static str), } -/// Sized counterpart of [`AValueDyn`]. +/// Extended vtable methods (those not covered by `StarlarkValue`). pub(crate) trait AValue<'v>: Sized + 'v { /// Unwrapped type. type StarlarkValue: StarlarkValue<'v>; @@ -90,7 +85,7 @@ pub(crate) trait AValue<'v>: Sized + 'v { type ExtraElem: 'v; /// Payload array length. - fn extra_len(&self) -> usize; + fn extra_len(value: &Self::StarlarkValue) -> usize; /// Offset of field holding content, in bytes. /// @@ -120,152 +115,132 @@ pub(crate) trait AValue<'v>: Sized + 'v { } unsafe fn heap_freeze( - me: *mut AValueRepr, + me: *mut AValueRepr, freezer: &Freezer, ) -> anyhow::Result; - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) -> Value<'v>; + unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) + -> Value<'v>; } #[inline] pub(crate) fn starlark_str<'v>( len: usize, hash: StarlarkHashValue, -) -> impl AValue<'v, ExtraElem = usize> + Send + Sync { - AValueImpl::::new(unsafe { StarlarkStr::new(len, hash) }) +) -> AValueImpl<'v, impl AValue<'v, ExtraElem = usize> + Send + Sync> { + AValueImpl::::new(unsafe { StarlarkStr::new(len, hash) }) } -pub(crate) fn tuple_avalue<'v>(len: usize) -> impl AValue<'v, ExtraElem = Value<'v>> { - AValueImpl::::new(unsafe { Tuple::new(len) }) +pub(crate) fn tuple_avalue<'v>( + len: usize, +) -> AValueImpl<'v, impl AValue<'v, ExtraElem = Value<'v>>> { + AValueImpl::::new(unsafe { Tuple::new(len) }) } -pub(crate) fn frozen_tuple_avalue(len: usize) -> impl AValue<'static, ExtraElem = FrozenValue> { - AValueImpl::::new(unsafe { FrozenTuple::new(len) }) +pub(crate) fn frozen_tuple_avalue( + len: usize, +) -> AValueImpl<'static, impl AValue<'static, ExtraElem = FrozenValue>> { + AValueImpl::::new(unsafe { FrozenTuple::new(len) }) } pub(crate) fn list_avalue<'v>( content: ValueTyped<'v, Array<'v>>, -) -> impl AValue<'v, StarlarkValue = ListGen>, ExtraElem = ()> { - AValueImpl::::new(ListGen(ListData::new(content))) +) -> AValueImpl<'v, impl AValue<'v, StarlarkValue = ListGen>, ExtraElem = ()>> { + AValueImpl::::new(ListGen(ListData::new(content))) } -pub(crate) fn frozen_list_avalue(len: usize) -> impl AValue<'static, ExtraElem = FrozenValue> { - AValueImpl::::new(unsafe { ListGen(FrozenListData::new(len)) }) +pub(crate) fn frozen_list_avalue( + len: usize, +) -> AValueImpl<'static, impl AValue<'static, ExtraElem = FrozenValue>> { + AValueImpl::::new(unsafe { ListGen(FrozenListData::new(len)) }) } pub(crate) fn array_avalue<'v>( cap: u32, -) -> impl AValue<'v, StarlarkValue = Array<'v>, ExtraElem = Value<'v>> { - AValueImpl::::new(unsafe { Array::new(0, cap) }) +) -> AValueImpl<'v, impl AValue<'v, StarlarkValue = Array<'v>, ExtraElem = Value<'v>>> { + AValueImpl::::new(unsafe { Array::new(0, cap) }) } pub(crate) fn any_array_avalue( cap: usize, -) -> impl AValue<'static, StarlarkValue = AnyArray, ExtraElem = T> { - AValueImpl::::new(unsafe { AnyArray::new(cap) }) +) -> AValueImpl<'static, impl AValue<'static, StarlarkValue = AnyArray, ExtraElem = T>> { + AValueImpl::>::new(unsafe { AnyArray::new(cap) }) } pub(crate) fn simple + Send + Sync>( x: T, -) -> impl AValue<'static, ExtraElem = ()> + Send + Sync { +) -> AValueImpl<'static, impl AValue<'static, ExtraElem = ()> + Send + Sync> { assert!(!T::is_special(Private)); - AValueImpl::::new(x) + AValueImpl::>::new(x) } -pub(crate) fn complex<'v, C>(x: C) -> impl AValue<'v, ExtraElem = ()> +pub(crate) fn complex<'v, C>(x: C) -> AValueImpl<'v, impl AValue<'v, ExtraElem = ()>> where C: ComplexValue<'v>, C::Frozen: StarlarkValue<'static>, { assert!(!C::is_special(Private)); - AValueImpl::::new(x) + AValueImpl::>::new(x) } -pub(crate) fn complex_no_freeze<'v, C>(x: C) -> impl AValue<'v, ExtraElem = ()> +pub(crate) fn complex_no_freeze<'v, C>(x: C) -> AValueImpl<'v, impl AValue<'v, ExtraElem = ()>> where C: StarlarkValue<'v> + Trace<'v>, { assert!(!C::is_special(Private)); - AValueImpl::::new(x) + AValueImpl::>::new(x) } -pub(crate) trait AValueMode: Send + Sync + 'static {} - -// A type where the second element is in control of what instances are in scope -pub(crate) struct Direct; -impl AValueMode for Direct {} - -// A type that implements StarlarkValue but nothing else, so will never be stored -// in the heap (e.g. bool, None) -pub(crate) struct Basic; -impl AValueMode for Basic {} - -// A non-special type with no references to other Starlark values. -pub(crate) struct Simple; -impl AValueMode for Simple {} - -// A type that implements ComplexValue. -pub(crate) struct Complex; -impl AValueMode for Complex {} - -// A value which can be traced, but cannot be frozen. -pub(crate) struct ComplexNoFreeze; -impl AValueMode for ComplexNoFreeze {} - -// We want to define several types (Simple, Complex) that wrap a StarlarkValue, -// reimplement it, and do some things custom. The easiest way to avoid repeating -// the StarlarkValue trait each time is to make them all share a single wrapper, -// where Mode is one of Simple/Complex. +/// A value with extended (`AValue`) vtable methods. #[repr(C)] -pub(crate) struct AValueImpl(PhantomData, pub(crate) T); +pub(crate) struct AValueImpl<'v, T: AValue<'v>>(PhantomData, pub(crate) T::StarlarkValue); -impl AValueImpl { - pub(crate) const fn new(value: T) -> Self { +impl<'v, T: AValue<'v>> AValueImpl<'v, T> { + pub(crate) const fn new(value: T::StarlarkValue) -> Self { AValueImpl(PhantomData, value) } } -/// The overwrite operation in the heap requires that the LSB not be set. -/// For FrozenValue this is the case, but for Value the LSB is always set. -/// Fortunately, the consumer of the overwritten value reapplies the -/// FrozenValue/Value tags, so we can freely discard it here. -fn clear_lsb(x: usize) -> usize { - x & !1 -} +/// For types which are only allocated statically (never in heap). +/// Technically we can use `AValueSimple` for these, but this is more explicit and safe. +pub(crate) struct AValueBasic(PhantomData); -impl<'v, T: StarlarkValue<'v>> AValue<'v> for AValueImpl { +impl<'v, T: StarlarkValue<'v>> AValue<'v> for AValueBasic { type StarlarkValue = T; type ExtraElem = (); - fn extra_len(&self) -> usize { - 0 + fn extra_len(_value: &T) -> usize { + unreachable!("Basic types don't appear in the heap") } fn offset_of_extra() -> usize { - mem::size_of::() + unreachable!("Basic types don't appear in the heap") } unsafe fn heap_freeze( - _me: *mut AValueRepr, + _me: *mut AValueRepr, _freezer: &Freezer, ) -> anyhow::Result { unreachable!("Basic types don't appear in the heap") } - unsafe fn heap_copy(_me: *mut AValueRepr, _tracer: &Tracer<'v>) -> Value<'v> { + unsafe fn heap_copy( + _me: *mut AValueRepr, + _tracer: &Tracer<'v>, + ) -> Value<'v> { unreachable!("Basic types don't appear in the heap") } } -pub(crate) type StarlarkStrAValue = AValueImpl; +pub(crate) struct StarlarkStrAValue; -impl<'v> AValue<'v> for AValueImpl { +impl<'v> AValue<'v> for StarlarkStrAValue { type StarlarkValue = StarlarkStr; type ExtraElem = usize; - fn extra_len(&self) -> usize { - StarlarkStr::payload_len_for_len(self.1.len()) + fn extra_len(value: &StarlarkStr) -> usize { + StarlarkStr::payload_len_for_len(value.len()) } fn offset_of_extra() -> usize { @@ -275,45 +250,50 @@ impl<'v> AValue<'v> for AValueImpl { const IS_STR: bool = true; unsafe fn heap_freeze( - me: *mut AValueRepr, + me: *mut AValueRepr, freezer: &Freezer, ) -> anyhow::Result { debug_assert!( - (*me).payload.1.len() > 1, + (*me).payload.len() > 1, "short strings are allocated statically" ); - let s = (*me).payload.1.as_str(); + let s = (*me).payload.as_str(); let fv = freezer.alloc(s); debug_assert!(fv.is_str()); - AValueHeader::overwrite_with_forward::(me, ForwardPtr::new(fv.0.raw().ptr_value())); + AValueHeader::overwrite_with_forward::(me, ForwardPtr::new_frozen(fv)); Ok(fv) } - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) -> Value<'v> { + unsafe fn heap_copy( + me: *mut AValueRepr, + tracer: &Tracer<'v>, + ) -> Value<'v> { debug_assert!( - (*me).payload.1.len() > 1, + (*me).payload.len() > 1, "short strings are allocated statically" ); - let s = (*me).payload.1.as_str(); + let s = (*me).payload.as_str(); let v = tracer.alloc_str(s); debug_assert!(v.is_str()); - AValueHeader::overwrite_with_forward::( + AValueHeader::overwrite_with_forward::( me, - ForwardPtr::new(v.0.raw().ptr_value() & !1), + ForwardPtr::new_unfrozen(v), ); v } } -impl<'v> AValue<'v> for AValueImpl> { +pub(crate) struct AValueTuple; + +impl<'v> AValue<'v> for AValueTuple { type StarlarkValue = Tuple<'v>; type ExtraElem = Value<'v>; - fn extra_len(&self) -> usize { - self.1.len() + fn extra_len(value: &Tuple<'v>) -> usize { + value.len() } fn offset_of_extra() -> usize { @@ -321,65 +301,69 @@ impl<'v> AValue<'v> for AValueImpl> { } unsafe fn heap_freeze( - me: *mut AValueRepr, + me: *mut AValueRepr, freezer: &Freezer, ) -> anyhow::Result { debug_assert!( - (*me).payload.1.len() != 0, + (*me).payload.len() != 0, "empty tuple is allocated statically" ); AValueForward::assert_does_not_overwrite_extra::(); - let content = (*me).payload.1.content(); + let content = (*me).payload.content(); - let (fv, r, extra) = - freezer.reserve_with_extra::>(content.len()); - AValueHeader::overwrite_with_forward::(me, ForwardPtr::new(fv.0.raw().ptr_value())); + let (fv, r, extra) = freezer.reserve_with_extra::(content.len()); + AValueHeader::overwrite_with_forward::(me, ForwardPtr::new_frozen(fv)); // TODO: this allocation is unnecessary let frozen_values = content.try_map(|v| freezer.freeze(*v))?; - r.fill(AValueImpl( - PhantomData::, - FrozenTuple::new(content.len()), - )); + r.fill(FrozenTuple::new(content.len())); + + let extra = &mut *extra; maybe_uninit_write_slice(extra, &frozen_values); Ok(fv) } - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) -> Value<'v> { + unsafe fn heap_copy( + me: *mut AValueRepr, + tracer: &Tracer<'v>, + ) -> Value<'v> { debug_assert!( - (*me).payload.1.len() != 0, + (*me).payload.len() != 0, "empty tuple is allocated statically" ); AValueForward::assert_does_not_overwrite_extra::(); - let content = (*me).payload.1.content_mut(); + let content = (*me).payload.content_mut(); let (v, r, extra) = tracer.reserve_with_extra::(content.len()); - let x = AValueHeader::overwrite_with_forward::( + let x = AValueHeader::overwrite_with_forward::( me, - ForwardPtr::new(clear_lsb(v.0.raw().ptr_value())), + ForwardPtr::new_unfrozen(v), ); - debug_assert_eq!(content.len(), x.1.len()); + debug_assert_eq!(content.len(), x.len()); for elem in content.iter_mut() { tracer.trace(elem); } r.fill(x); + let extra = unsafe { &mut *extra }; maybe_uninit_write_slice(extra, content); v } } -impl<'v> AValue<'v> for AValueImpl { +pub(crate) struct AValueFrozenTuple; + +impl<'v> AValue<'v> for AValueFrozenTuple { type StarlarkValue = FrozenTuple; type ExtraElem = FrozenValue; - fn extra_len(&self) -> usize { - self.1.len() + fn extra_len(value: &FrozenTuple) -> usize { + value.len() } fn offset_of_extra() -> usize { @@ -387,23 +371,28 @@ impl<'v> AValue<'v> for AValueImpl { } unsafe fn heap_freeze( - _me: *mut AValueRepr, + _me: *mut AValueRepr, _freezer: &Freezer, ) -> anyhow::Result { panic!("already frozen"); } - unsafe fn heap_copy(_me: *mut AValueRepr, _tracer: &Tracer<'v>) -> Value<'v> { + unsafe fn heap_copy( + _me: *mut AValueRepr, + _tracer: &Tracer<'v>, + ) -> Value<'v> { panic!("shouldn't be copying frozen values"); } } -impl<'v> AValue<'v> for AValueImpl>> { +pub(crate) struct AValueList; + +impl<'v> AValue<'v> for AValueList { type StarlarkValue = ListGen>; type ExtraElem = (); - fn extra_len(&self) -> usize { + fn extra_len(_value: &ListGen>) -> usize { 0 } @@ -412,26 +401,24 @@ impl<'v> AValue<'v> for AValueImpl>> { } unsafe fn heap_freeze( - me: *mut AValueRepr, + me: *mut AValueRepr, freezer: &Freezer, ) -> anyhow::Result { - let content = (*me).payload.1.0.content(); + let content = (*me).payload.0.content(); if content.is_empty() { - let fv = FrozenValue::new_repr(&VALUE_EMPTY_FROZEN_LIST); - AValueHeader::overwrite_with_forward::( + let fv = FrozenValue::new_empty_list(); + AValueHeader::overwrite_with_forward::( me, - ForwardPtr::new(fv.0.raw().ptr_value()), + ForwardPtr::new_frozen(fv), ); return Ok(fv); } - let (fv, r, extra) = freezer - .reserve_with_extra::>>(content.len()); - AValueHeader::overwrite_with_forward::(me, ForwardPtr::new(fv.0.raw().ptr_value())); - r.fill(AValueImpl::::new(ListGen(FrozenListData::new( - content.len(), - )))); + let (fv, r, extra) = freezer.reserve_with_extra::(content.len()); + AValueHeader::overwrite_with_forward::(me, ForwardPtr::new_frozen(fv)); + r.fill(ListGen(FrozenListData::new(content.len()))); + let extra = unsafe { &mut *extra }; assert_eq!(extra.len(), content.len()); for (elem_place, elem) in extra.iter_mut().zip(content) { elem_place.write(freezer.freeze(*elem)?); @@ -439,18 +426,23 @@ impl<'v> AValue<'v> for AValueImpl>> { Ok(fv) } - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) -> Value<'v> { - Self::heap_copy_impl(me, tracer, Trace::trace) + unsafe fn heap_copy( + me: *mut AValueRepr, + tracer: &Tracer<'v>, + ) -> Value<'v> { + heap_copy_impl::(me, tracer, Trace::trace) } } -impl<'v> AValue<'v> for AValueImpl> { +pub(crate) struct AValueFrozenList; + +impl<'v> AValue<'v> for AValueFrozenList { type StarlarkValue = ListGen; type ExtraElem = FrozenValue; - fn extra_len(&self) -> usize { - self.1.0.len() + fn extra_len(value: &ListGen) -> usize { + value.0.len() } fn offset_of_extra() -> usize { @@ -458,25 +450,30 @@ impl<'v> AValue<'v> for AValueImpl> { } unsafe fn heap_freeze( - _me: *mut AValueRepr, + _me: *mut AValueRepr, _freezer: &Freezer, ) -> anyhow::Result { panic!("already frozen"); } - unsafe fn heap_copy(_me: *mut AValueRepr, _tracer: &Tracer<'v>) -> Value<'v> { + unsafe fn heap_copy( + _me: *mut AValueRepr, + _tracer: &Tracer<'v>, + ) -> Value<'v> { panic!("shouldn't be copying frozen values"); } } -impl<'v> AValue<'v> for AValueImpl> { +pub(crate) struct AValueArray; + +impl<'v> AValue<'v> for AValueArray { type StarlarkValue = Array<'v>; type ExtraElem = Value<'v>; - fn extra_len(&self) -> usize { + fn extra_len(value: &Array<'v>) -> usize { // Note we return capacity, not length here. - self.1.capacity() + value.capacity() } fn offset_of_extra() -> usize { @@ -484,51 +481,54 @@ impl<'v> AValue<'v> for AValueImpl> { } unsafe fn heap_freeze( - _me: *mut AValueRepr, + _me: *mut AValueRepr, _freezer: &Freezer, ) -> anyhow::Result { panic!("arrays should not be frozen") } - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) -> Value<'v> { + unsafe fn heap_copy( + me: *mut AValueRepr, + tracer: &Tracer<'v>, + ) -> Value<'v> { debug_assert!( - (*me).payload.1.capacity() != 0, + (*me).payload.capacity() != 0, "empty array is allocated statically" ); - if (*me).payload.1.len() == 0 { + if (*me).payload.len() == 0 { return FrozenValue::new_repr(VALUE_EMPTY_ARRAY.repr()).to_value(); } AValueForward::assert_does_not_overwrite_extra::(); - let content = (*me).payload.1.content_mut(); + let content = (*me).payload.content_mut(); let (v, r, extra) = tracer.reserve_with_extra::(content.len()); - let x = AValueHeader::overwrite_with_forward::( + let x = AValueHeader::overwrite_with_forward::( me, - ForwardPtr::new(clear_lsb(v.0.raw().ptr_value())), + ForwardPtr::new_unfrozen(v), ); - debug_assert_eq!(content.len(), x.1.len()); + debug_assert_eq!(content.len(), x.len()); content.trace(tracer); // Note when copying we are dropping extra capacity. - r.fill(AValueImpl::::new(Array::new( - content.len() as u32, - content.len() as u32, - ))); + r.fill(Array::new(content.len() as u32, content.len() as u32)); + let extra = unsafe { &mut *extra }; maybe_uninit_write_slice(extra, content); v } } -impl<'v, T: Debug + 'static> AValue<'v> for AValueImpl> { +pub(crate) struct AValueAnyArray(PhantomData); + +impl<'v, T: Debug + 'static> AValue<'v> for AValueAnyArray { type StarlarkValue = AnyArray; type ExtraElem = T; - fn extra_len(&self) -> usize { - self.1.len + fn extra_len(value: &AnyArray) -> usize { + value.len } fn offset_of_extra() -> usize { @@ -536,43 +536,59 @@ impl<'v, T: Debug + 'static> AValue<'v> for AValueImpl> { } unsafe fn heap_freeze( - _me: *mut AValueRepr, + _me: *mut AValueRepr, _freezer: &Freezer, ) -> anyhow::Result { panic!("AnyArray for now can only be allocated in FrozenHeap"); } - unsafe fn heap_copy(_me: *mut AValueRepr, _tracer: &Tracer<'v>) -> Value<'v> { + unsafe fn heap_copy( + _me: *mut AValueRepr, + _tracer: &Tracer<'v>, + ) -> Value<'v> { panic!("AnyArray for now can only be allocated in FrozenHeap"); } } -impl AValueImpl { - /// `heap_freeze` implementation for simple `StarlarkValue` and `StarlarkFloat` - /// (`StarlarkFloat` is logically a simple type, but it is not considered simple type). - unsafe fn heap_freeze_simple_impl<'v>( - me: *mut AValueRepr, - freezer: &Freezer, - ) -> anyhow::Result - where - Self: AValue<'v, ExtraElem = ()>, - { - let (fv, r) = freezer.reserve::(); - let x = AValueHeader::overwrite_with_forward::( - me, - ForwardPtr::new(fv.0.raw().ptr_value()), - ); - r.fill(x); - Ok(fv) - } +/// If `A` provides a statically allocated frozen value, +/// replace object with the forward to that frozen value instead of using default freeze. +unsafe fn try_freeze_static<'v, A>(me: *mut AValueRepr) -> Option +where + A: AValue<'v>, +{ + let f = (*me).payload.try_freeze_static()?; + + drop(AValueHeader::overwrite_with_forward::( + me, + ForwardPtr::new_frozen(f), + )); + Some(f) } -impl> AValue<'static> for AValueImpl { +/// `heap_freeze` implementation for simple `StarlarkValue` and `StarlarkFloat` +/// (`StarlarkFloat` is logically a simple type, but it is not considered simple type). +unsafe fn heap_freeze_simple_impl<'v, A>( + me: *mut AValueRepr, + freezer: &Freezer, +) -> anyhow::Result +where + A: AValue<'v, ExtraElem = ()>, +{ + let (fv, r) = freezer.reserve::(); + let x = + AValueHeader::overwrite_with_forward::(me, ForwardPtr::new_frozen(fv)); + r.fill(x); + Ok(fv) +} + +pub(crate) struct AValueSimple(PhantomData); + +impl> AValue<'static> for AValueSimple { type StarlarkValue = T; type ExtraElem = (); - fn extra_len(&self) -> usize { + fn extra_len(_value: &T) -> usize { 0 } @@ -581,40 +597,45 @@ impl> AValue<'static> for AValueImpl { } unsafe fn heap_freeze( - me: *mut AValueRepr, + me: *mut AValueRepr, freezer: &Freezer, ) -> anyhow::Result { - Self::heap_freeze_simple_impl(me, freezer) + if let Some(f) = try_freeze_static::(me) { + return Ok(f); + } + + heap_freeze_simple_impl::(me, freezer) } - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'static>) -> Value<'static> { - Self::heap_copy_impl(me, tracer, |_v, _tracer| {}) + unsafe fn heap_copy( + me: *mut AValueRepr, + tracer: &Tracer<'static>, + ) -> Value<'static> { + heap_copy_impl::(me, tracer, |_v, _tracer| {}) } } -impl AValueImpl { - /// Common `heap_copy` implementation for types without extra. - unsafe fn heap_copy_impl<'v>( - me: *mut AValueRepr, - tracer: &Tracer<'v>, - trace: impl FnOnce(&mut C, &Tracer<'v>), - ) -> Value<'v> - where - Self: AValue<'v, ExtraElem = ()>, - { - let (v, r) = tracer.reserve::(); - let mut x = AValueHeader::overwrite_with_forward::( - me, - ForwardPtr::new(clear_lsb(v.0.raw().ptr_value())), - ); - // We have to put the forwarding node in _before_ we trace in case there are cycles - trace(&mut x.1, tracer); - r.fill(x); - v - } +/// Common `heap_copy` implementation for types without extra. +unsafe fn heap_copy_impl<'v, A>( + me: *mut AValueRepr, + tracer: &Tracer<'v>, + trace: impl FnOnce(&mut A::StarlarkValue, &Tracer<'v>), +) -> Value<'v> +where + A: AValue<'v, ExtraElem = ()>, +{ + let (v, r) = tracer.reserve::(); + let mut x = + AValueHeader::overwrite_with_forward::(me, ForwardPtr::new_unfrozen(v)); + // We have to put the forwarding node in _before_ we trace in case there are cycles + trace(&mut x, tracer); + r.fill(x); + v } -impl<'v, T> AValue<'v> for AValueImpl +pub(crate) struct AValueComplex(PhantomData); + +impl<'v, T> AValue<'v> for AValueComplex where T: ComplexValue<'v>, T::Frozen: StarlarkValue<'static>, @@ -623,7 +644,7 @@ where type ExtraElem = (); - fn extra_len(&self) -> usize { + fn extra_len(_value: &T) -> usize { 0 } @@ -632,16 +653,20 @@ where } unsafe fn heap_freeze( - me: *mut AValueRepr, + me: *mut AValueRepr, freezer: &Freezer, ) -> anyhow::Result { - let (fv, r) = freezer.reserve::>(); - let x = AValueHeader::overwrite_with_forward::( + if let Some(f) = try_freeze_static::(me) { + return Ok(f); + } + + let (fv, r) = freezer.reserve::>(); + let x = AValueHeader::overwrite_with_forward::( me, - ForwardPtr::new(fv.0.raw().ptr_value()), + ForwardPtr::new_frozen(fv), ); - let res = x.1.freeze(freezer)?; - r.fill(AValueImpl::::new(res)); + let res = x.freeze(freezer)?; + r.fill(res); if TypeId::of::() == TypeId::of::() { let frozen_def = fv.downcast_frozen_ref().unwrap(); freezer.frozen_defs.borrow_mut().push(frozen_def); @@ -649,12 +674,17 @@ where Ok(fv) } - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) -> Value<'v> { - Self::heap_copy_impl(me, tracer, Trace::trace) + unsafe fn heap_copy( + me: *mut AValueRepr, + tracer: &Tracer<'v>, + ) -> Value<'v> { + heap_copy_impl::(me, tracer, Trace::trace) } } -impl<'v, T> AValue<'v> for AValueImpl +pub(crate) struct AValueComplexNoFreeze(PhantomData); + +impl<'v, T> AValue<'v> for AValueComplexNoFreeze where T: StarlarkValue<'v> + Trace<'v>, { @@ -662,52 +692,40 @@ where type ExtraElem = (); - fn extra_len(&self) -> usize { + fn extra_len(_value: &T) -> usize { 0 } fn offset_of_extra() -> usize { - mem::size_of::() + mem::size_of::() } unsafe fn heap_freeze( - _me: *mut AValueRepr, + _me: *mut AValueRepr, _freezer: &Freezer, ) -> anyhow::Result { Err(AValueError::CannotBeFrozen(type_name::()).into()) } - unsafe fn heap_copy(me: *mut AValueRepr, tracer: &Tracer<'v>) -> Value<'v> { - Self::heap_copy_impl(me, tracer, Trace::trace) + unsafe fn heap_copy( + me: *mut AValueRepr, + tracer: &Tracer<'v>, + ) -> Value<'v> { + heap_copy_impl::(me, tracer, Trace::trace) } } #[derive(Debug, Display, ProvidesStaticType, Allocative)] -#[display(fmt = "BlackHole")] +#[display("BlackHole")] pub(crate) struct BlackHole(pub(crate) ValueAllocSize); -impl Serialize for BlackHole { - fn serialize(&self, _s: S) -> Result - where - S: Serializer, - { - panic!() - } -} - -impl<'v, Mode: AValueMode, T: StarlarkValue<'v>> Serialize for AValueImpl { - fn serialize(&self, s: S) -> Result - where - S: Serializer, - { - erased_serde::serialize(&self.1, s) - } -} - #[cfg(test)] mod tests { use crate::environment::Module; + use crate::values::dict::AllocDict; use crate::values::types::list::value::ListData; + use crate::values::UnpackValue; + use crate::values::Value; #[test] fn tuple_cycle_freeze() { @@ -720,4 +738,24 @@ mod tests { module.set("t", tuple); module.freeze().unwrap(); } + + #[test] + fn test_try_freeze_static() { + // `try_freeze_static` is only implemented for `dict` at the moment of writing, + // so use it for the test. + + let module = Module::new(); + let d0 = module.heap().alloc(AllocDict::EMPTY); + let d1 = module.heap().alloc(AllocDict::EMPTY); + // Pointers are not equal. + assert_ne!(d0.0.raw(), d1.0.raw()); + + module.set_extra_value(module.heap().alloc((d0, d1))); + + let module = module.freeze().unwrap(); + let (d0, d1) = + <(Value, Value)>::unpack_value_err(module.extra_value().unwrap().to_value()).unwrap(); + // Pointers are equal. + assert_eq!(d0.0.raw(), d1.0.raw()); + } } diff --git a/starlark-rust/starlark/src/values/layout/complex.rs b/starlark-rust/starlark/src/values/layout/complex.rs index b25fa5e62a024..2ef54ced288b1 100644 --- a/starlark-rust/starlark/src/values/layout/complex.rs +++ b/starlark-rust/starlark/src/values/layout/complex.rs @@ -15,6 +15,7 @@ * limitations under the License. */ +use std::convert::Infallible; use std::fmt; use std::marker::PhantomData; @@ -23,11 +24,15 @@ use dupe::Clone_; use dupe::Copy_; use dupe::Dupe_; use either::Either; +use starlark_syntax::value_error; use crate::typing::Ty; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::AllocValue; use crate::values::ComplexValue; +use crate::values::Freeze; +use crate::values::Freezer; +use crate::values::FrozenValueTyped; use crate::values::StarlarkValue; use crate::values::Trace; use crate::values::Tracer; @@ -62,6 +67,19 @@ where } } + /// Downcast. + pub fn new_err(value: Value<'v>) -> anyhow::Result { + match Self::new(value) { + Some(v) => Ok(v), + None => Err(value_error!( + "Expected value of type `{}`, got: `{}`", + T::TYPE, + value.to_string_for_type_error() + ) + .into_anyhow()), + } + } + /// Get the value back. #[inline] pub fn to_value(self) -> Value<'v> { @@ -88,6 +106,8 @@ where T: ComplexValue<'v>, T::Frozen: StarlarkValue<'static>, { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { T::starlark_type_repr() } @@ -109,8 +129,10 @@ where T: ComplexValue<'v>, T::Frozen: StarlarkValue<'static>, { - fn unpack_value(value: Value<'v>) -> Option { - Self::new(value) + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(Self::new(value)) } } @@ -146,48 +168,36 @@ where } } +impl<'v, T> Freeze for ValueTypedComplex<'v, T> +where + T: ComplexValue<'v>, + T::Frozen: StarlarkValue<'static>, +{ + type Frozen = FrozenValueTyped<'static, T::Frozen>; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + FrozenValueTyped::new_err(self.0.freeze(freezer)?) + } +} + #[cfg(test)] mod tests { - use allocative::Allocative; use anyhow::Context; use either::Either; use starlark_derive::starlark_module; - use starlark_derive::Freeze; - use starlark_derive::NoSerialize; - use starlark_derive::Trace; use crate as starlark; - use crate::any::ProvidesStaticType; use crate::assert::Assert; use crate::const_frozen_string; use crate::environment::GlobalsBuilder; + use crate::tests::util::TestComplexValue; use crate::values::layout::complex::ValueTypedComplex; - use crate::values::starlark_value; - use crate::values::StarlarkValue; use crate::values::Value; - use crate::values::ValueLike; - - #[derive( - Trace, - Freeze, - Debug, - derive_more::Display, - Allocative, - ProvidesStaticType, - NoSerialize - )] - struct TestValueOfComplex(V); - - #[starlark_value(type = "test_value_of_complex")] - impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TestValueOfComplex where - Self: ProvidesStaticType<'v> - { - } #[starlark_module] fn test_module(globals: &mut GlobalsBuilder) { fn test_unpack<'v>( - v: ValueTypedComplex<'v, TestValueOfComplex>>, + v: ValueTypedComplex<'v, TestComplexValue>>, ) -> anyhow::Result<&'v str> { Ok(match v.unpack() { Either::Left(v) => v.0.unpack_str().context("not a string")?, @@ -202,8 +212,8 @@ mod tests { a.globals_add(test_module); a.setup_eval(|eval| { let s = eval.heap().alloc("test1"); - let x = eval.heap().alloc_complex(TestValueOfComplex(s)); - let y = eval.frozen_heap().alloc_simple(TestValueOfComplex( + let x = eval.heap().alloc(TestComplexValue(s)); + let y = eval.frozen_heap().alloc(TestComplexValue( const_frozen_string!("test2").to_frozen_value(), )); eval.module().set("x", x); diff --git a/starlark-rust/starlark/src/values/layout/heap/mod.rs b/starlark-rust/starlark/src/values/layout/heap.rs similarity index 100% rename from starlark-rust/starlark/src/values/layout/heap/mod.rs rename to starlark-rust/starlark/src/values/layout/heap.rs diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator.rs b/starlark-rust/starlark/src/values/layout/heap/allocator.rs new file mode 100644 index 0000000000000..058b2374a879f --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/allocator.rs @@ -0,0 +1,20 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod alloc; +pub(crate) mod api; +pub(crate) mod bumpalo; diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/alloc.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc.rs new file mode 100644 index 0000000000000..4f1ab3bee86b9 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc.rs @@ -0,0 +1,32 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Allocator for the Starlark heap. +//! +//! The idea of the allocator is this: most allocation should be as fast as pointer increment. +//! To achieve that, memory is allocated in chunks, and these pointer increments happen +//! inside the chunk. When the chunk is full, a new chunk is allocated. +//! +//! When heap construction is finished, we need to release the memory, +//! Last chunk is half full. So heaps keeps half of the chunk, +//! and the other half is shared with the following heap. + +pub(crate) mod allocator; +pub(crate) mod chain; +pub(crate) mod chunk; +pub(crate) mod chunk_part; +pub(crate) mod per_thread; diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/allocator.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/allocator.rs new file mode 100644 index 0000000000000..1b302b290113e --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/allocator.rs @@ -0,0 +1,297 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cell::Cell; +use std::cell::UnsafeCell; +use std::fmt; +use std::fmt::Debug; +use std::mem; +use std::mem::MaybeUninit; +use std::ptr::NonNull; +use std::slice; + +use crate::values::layout::aligned_size::AlignedSize; +use crate::values::layout::heap::allocator::alloc::chain::ChunkChain; +use crate::values::layout::heap::allocator::alloc::chain::ChunkChainIterator; +use crate::values::layout::heap::allocator::alloc::per_thread::thread_local_alloc_at_least; +use crate::values::layout::heap::allocator::alloc::per_thread::thread_local_release; +use crate::values::layout::heap::allocator::api::ArenaAllocator; +use crate::values::layout::heap::allocator::api::ChunkAllocationDirection; +use crate::values::layout::value_alloc_size::ValueAllocSize; + +pub(crate) struct ChunkAllocator { + /// Current chunk in the chunk chain is partially filled. + /// The rest of the chain contain allocated data. + chain: UnsafeCell, + // TODO(nga): we don't need pointers after the heap is frozen. + /// Pointer to the currently filled part of the chunk. + current_ptr: Cell>, + /// Pointer to the end of the current chunk part. + end_ptr: Cell>, +} + +impl Debug for ChunkAllocator { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ChunkAllocator").finish_non_exhaustive() + } +} + +unsafe impl Send for ChunkAllocator {} + +impl Default for ChunkAllocator { + #[inline] + fn default() -> ChunkAllocator { + let chain = ChunkChain::default(); + let current_ptr = Cell::new(chain.begin()); + let end_ptr = Cell::new(chain.begin()); + ChunkAllocator { + chain: UnsafeCell::new(chain), + current_ptr, + end_ptr, + } + } +} + +impl Drop for ChunkAllocator { + fn drop(&mut self) { + self.chain.get_mut().clear_with(&mut thread_local_release); + } +} + +impl ChunkAllocator { + fn replace_chain(&self, chain: ChunkChain) -> (ChunkChain, NonNull) { + unsafe { + let current_ptr = self.current_ptr.replace(chain.begin()); + self.end_ptr.set(chain.end()); + let chain = mem::replace(&mut *self.chain.get(), chain); + (chain, current_ptr) + } + } + + fn take_chain(&self) -> (ChunkChain, NonNull) { + self.replace_chain(ChunkChain::default()) + } + + #[inline] + fn try_alloc_fast(&self, len: AlignedSize) -> Option> { + let rem = AlignedSize::ptr_diff(self.current_ptr.get(), self.end_ptr.get()); + if rem >= len { + let ptr = self.current_ptr.get(); + unsafe { + self.current_ptr.set(NonNull::new_unchecked( + ptr.as_ptr().byte_add(len.bytes() as usize), + )); + } + Some(ptr.cast()) + } else { + None + } + } + + #[cold] + fn alloc_slow(&self, len: AlignedSize) -> NonNull { + let (chain, current_ptr) = self.take_chain(); + let (rem_chain, after) = unsafe { chain.split_at_ptr(current_ptr) }; + thread_local_release(after); + + let required_len = len + ChunkChain::HEADER_SIZE; + let next_chunk = thread_local_alloc_at_least(required_len, rem_chain.depth()); + + let next_chain = ChunkChain::new(next_chunk, rem_chain); + + self.replace_chain(next_chain); + + self.try_alloc_fast(len) + .expect("try_allow_fast must not fail in alloc_slow") + } +} + +pub(crate) struct ChunkRevIterator<'a> { + current: &'a [MaybeUninit], + chain: ChunkChainIterator<'a>, +} + +impl<'a> Iterator for ChunkRevIterator<'a> { + type Item = &'a [MaybeUninit]; + + #[inline] + fn next(&mut self) -> Option { + if !self.current.is_empty() { + // Reset to empty slice and return. + return Some(mem::take(&mut self.current)); + } + loop { + let chain = self.chain.next()?; + let data = chain.data_bytes(); + if !data.is_empty() { + return Some(data); + } + } + } +} + +impl ArenaAllocator for ChunkAllocator { + fn allocated_bytes(&self) -> usize { + unsafe { (*self.chain.get()).allocated_bytes() } + } + + fn remaining_capacity(&self) -> usize { + AlignedSize::ptr_diff(self.current_ptr.get(), self.end_ptr.get()).bytes() as usize + } + + fn allocation_overhead(&self) -> usize { + unsafe { + let allocated_bytes_with_metadata = (*self.chain.get()).allocated_bytes_with_metadata(); + allocated_bytes_with_metadata.saturating_sub(self.allocated_bytes()) + } + } + + #[inline] + fn alloc(&self, size: ValueAllocSize) -> NonNull { + if let Some(ptr) = self.try_alloc_fast(size.size()) { + ptr + } else { + self.alloc_slow(size.size()) + } + } + + const CHUNK_ALLOCATION_DIRECTION: ChunkAllocationDirection = ChunkAllocationDirection::Up; + + type ChunkRevIterator<'a> = ChunkRevIterator<'a>; + + unsafe fn iter_allocated_chunks_rev(&self) -> ChunkRevIterator<'_> { + let begin = (*self.chain.get()).begin(); + ChunkRevIterator { + current: slice::from_raw_parts( + begin.cast().as_ptr(), + AlignedSize::ptr_diff(begin, self.current_ptr.get()).bytes() as usize, + ), + chain: (*self.chain.get()) + .prev() + .map(|next| next.iter()) + .unwrap_or_default(), + } + } + + fn finish(&mut self) { + let (chain, current_ptr) = self.take_chain(); + let (new_chain, rem) = unsafe { chain.split_at_ptr(current_ptr) }; + thread_local_release(rem); + let current_ptr = new_chain.end(); + self.replace_chain(new_chain); + self.current_ptr.set(current_ptr); + } +} + +#[cfg(test)] +mod tests { + use rand::rngs::SmallRng; + use rand::Rng; + use rand::SeedableRng; + + use crate::values::layout::aligned_size::AlignedSize; + use crate::values::layout::heap::allocator::alloc::allocator::ChunkAllocator; + use crate::values::layout::heap::allocator::alloc::chunk::Chunk; + use crate::values::layout::heap::allocator::api::ArenaAllocator; + use crate::values::layout::heap::repr::AValueHeader; + use crate::values::layout::value_alloc_size::ValueAllocSize; + + #[test] + fn test_small() { + let allocator = ChunkAllocator::default(); + let p0 = allocator.alloc(ValueAllocSize::new(AlignedSize::new_bytes( + 3 * AValueHeader::ALIGN, + ))); + let p1 = allocator.alloc(ValueAllocSize::new(AlignedSize::new_bytes( + 4 * AValueHeader::ALIGN, + ))); + let p2 = allocator.alloc(ValueAllocSize::new(AlignedSize::new_bytes( + 5 * AValueHeader::ALIGN, + ))); + assert_eq!( + AlignedSize::new_bytes(3 * AValueHeader::ALIGN), + AlignedSize::ptr_diff(p0.cast(), p1.cast()) + ); + assert_eq!( + AlignedSize::new_bytes(4 * AValueHeader::ALIGN), + AlignedSize::ptr_diff(p1.cast(), p2.cast()) + ); + + let chunks = unsafe { allocator.iter_allocated_chunks_rev().collect::>() }; + assert_eq!(1, chunks.len()); + assert_eq!( + AlignedSize::new_bytes((3 + 4 + 5) * AValueHeader::ALIGN).bytes() as usize, + chunks[0].len() + ); + } + + #[test] + fn test_big() { + let allocator = ChunkAllocator::default(); + allocator.alloc(ValueAllocSize::new( + AlignedSize::new_bytes(128 << 10) - Chunk::HEADER_SIZE, + )); + } + + fn random_iteration(i: u32) { + let mut rng = SmallRng::seed_from_u64(i as u64); + + let mut expected_total_size_bytes = 0; + let mut allocator = ChunkAllocator::default(); + for _ in 0..i { + let size = match rng.gen_range(0..=2) { + 0 => rng.gen_range(0..10), + 1 => rng.gen_range(0..100), + 2 => rng.gen_range(0..1000), + _ => unreachable!(), + }; + let Some(size) = + ValueAllocSize::try_new(AlignedSize::new_bytes(size * AValueHeader::ALIGN)) + else { + continue; + }; + allocator.alloc(size); + expected_total_size_bytes += size.bytes() as usize; + } + + let actual_total_size_bytes = unsafe { + allocator + .iter_allocated_chunks_rev() + .map(|c| c.len()) + .sum::() + }; + assert_eq!(expected_total_size_bytes, actual_total_size_bytes); + + // And do the same assertion after finishing. + allocator.finish(); + + let actual_total_size_bytes = unsafe { + allocator + .iter_allocated_chunks_rev() + .map(|c| c.len()) + .sum::() + }; + assert_eq!(expected_total_size_bytes, actual_total_size_bytes); + } + + #[test] + fn test_many() { + for i in 0..10000 { + random_iteration(i); + } + } +} diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chain.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chain.rs new file mode 100644 index 0000000000000..531185cfe9454 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chain.rs @@ -0,0 +1,361 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cell::Cell; +use std::mem; +use std::mem::MaybeUninit; +use std::ptr; +use std::ptr::NonNull; +use std::slice; + +use static_assertions::assert_eq_size; + +use crate::values::layout::aligned_size::AlignedSize; +use crate::values::layout::heap::allocator::alloc::chunk_part::ChunkPart; + +/// What is stored inside `ChunkChain.chunk` data. +#[repr(C, align(8))] // Must be aligned to 8 bytes because starlark values are 8 bytes aligned. +struct ChunkChainData { + prev: ChunkChain, + data: [MaybeUninit; 0], +} + +/// Linked list of chunk parts. +#[derive(Debug, Default, PartialEq)] +pub(crate) struct ChunkChain { + /// Chunk part data is `ChunkChainData`. + /// `None` means that the chain is empty. + chunk: Option, +} + +assert_eq_size!(ChunkChain, ChunkPart); + +impl Drop for ChunkChain { + fn drop(&mut self) { + self.clear_with(&mut |_| {}); + } +} + +static EMPTY_DATA: &[usize] = &[]; + +thread_local! { + /// Running `test_split_at_zero` test. + static SPLIT_AT_ZERO_TEST: Cell = const { Cell::new(false) }; +} + +impl ChunkChain { + /// The header is `ChunkChain`. + pub(crate) const HEADER_SIZE: AlignedSize = AlignedSize::of::(); + + #[inline] + pub(crate) fn new(chunk: ChunkPart, prev: ChunkChain) -> ChunkChain { + // Does not have to be strictly greater, but it is pointless otherwise. + assert!(chunk.len() > ChunkChain::HEADER_SIZE); + unsafe { + ptr::write( + chunk.begin().cast().as_ptr(), + ChunkChainData { prev, data: [] }, + ); + } + ChunkChain { chunk: Some(chunk) } + } + + #[inline] + pub(crate) fn prev(&self) -> Option<&ChunkChain> { + match &self.chunk { + Some(chunk) => unsafe { + Some(&(*chunk.begin().cast::().as_ptr()).prev) + }, + None => None, + } + } + + /// Size of memory available for allocation in the current chunk part, + /// that is the size of the current chunk part minus the header. + pub(crate) fn current_chunk_available_len(&self) -> AlignedSize { + // This code can be made branchless if we allocate the prev statically. + match &self.chunk { + Some(chunk) => chunk.len().unchecked_sub(ChunkChain::HEADER_SIZE), + None => AlignedSize::ZERO, + } + } + + pub(crate) fn begin(&self) -> NonNull { + match &self.chunk { + Some(chunk) => chunk.ptr_at_offset(ChunkChain::HEADER_SIZE), + None => NonNull::new(EMPTY_DATA.as_ptr() as *mut usize).unwrap(), + } + } + + pub(crate) fn end(&self) -> NonNull { + match &self.chunk { + Some(chunk) => chunk.end(), + None => NonNull::new(EMPTY_DATA.as_ptr() as *mut usize).unwrap(), + } + } + + pub(crate) fn data_bytes(&self) -> &[MaybeUninit] { + unsafe { + slice::from_raw_parts( + self.begin().cast().as_ptr(), + self.current_chunk_available_len().bytes() as usize, + ) + } + } + + /// Split current chunk in the chain at the given offset. + pub(crate) fn split_at(mut self, offset: AlignedSize) -> (ChunkChain, ChunkPart) { + let chunk = mem::take(&mut self.chunk); + match chunk { + None => { + assert_eq!(AlignedSize::ZERO, offset); + (ChunkChain::default(), ChunkPart::default()) + } + Some(chunk) => { + debug_assert!(chunk.len() > ChunkChain::HEADER_SIZE); + + let (before, after) = chunk.split_at_offset(offset + ChunkChain::HEADER_SIZE); + assert!(before.len() >= ChunkChain::HEADER_SIZE); + if before.len() == ChunkChain::HEADER_SIZE { + // This branch is only taken in tests of `ChunkChain`, + // because real allocator never finishes with an empty last chunk part. + // For that reason we don't care about somewhat unoptimal code below: + // we could extend the `after` chunk with the `before` chunk. + assert!(cfg!(test) && SPLIT_AT_ZERO_TEST.get()); + + unsafe { + // We are abandoning `before` chunk, + // and it won't run the destructor of `ChunkChainData`, + // so it is safe to read the data. + let prev_part: ChunkChainData = ptr::read(before.begin().cast().as_ptr()); + (prev_part.prev, after) + } + } else { + ( + ChunkChain { + chunk: Some(before), + }, + after, + ) + } + } + } + } + + pub(crate) unsafe fn split_at_ptr(self, ptr: NonNull) -> (ChunkChain, ChunkPart) { + debug_assert!(ptr >= self.begin()); + debug_assert!(ptr <= self.end()); + let offset = + AlignedSize::new_bytes(ptr.as_ptr().byte_offset_from(self.begin().as_ptr()) as usize); + self.split_at(offset) + } + + /// Clear the content invoking provided callback to release the chunks. + pub(crate) fn clear_with(&mut self, chunk_drop: &mut impl FnMut(ChunkPart)) { + if let Some(chunk) = mem::take(&mut self.chunk) { + assert!(chunk.len() >= ChunkChain::HEADER_SIZE); + let mut prev_chain = chunk.begin().cast::(); + unsafe { + prev_chain.as_mut().prev.clear_with(chunk_drop); + + debug_assert!(prev_chain.as_ref().prev.chunk.is_none()); + // This is no-op, because `clear_with` has replaced the chunk with `None`. + ptr::drop_in_place::(prev_chain.as_ptr()); + } + chunk_drop(chunk); + } + } + + pub(crate) fn iter(&self) -> ChunkChainIterator { + ChunkChainIterator { next: Some(self) } + } + + pub(crate) fn depth(&self) -> usize { + self.iter().count().saturating_sub(1) + } + + pub(crate) fn allocated_bytes(&self) -> usize { + let mut allocated_bytes = 0; + for chain in self.iter() { + allocated_bytes += chain.current_chunk_available_len().bytes() as usize; + } + allocated_bytes + } + + /// Returns the total size of the allocation backing this chunk, + /// including any overhead used by the allocator itself. + pub(crate) fn allocated_bytes_with_metadata(&self) -> usize { + let mut allocation_overhead = 0; + for chain in self.iter() { + if let Some(chunk) = &chain.chunk { + allocation_overhead += chunk.allocated_bytes_with_metadata(); + } + } + allocation_overhead + } +} + +/// Iterator over `ChunkChain` elements. +#[derive(Default)] +pub(crate) struct ChunkChainIterator<'a> { + next: Option<&'a ChunkChain>, +} + +impl<'a> Iterator for ChunkChainIterator<'a> { + type Item = &'a ChunkChain; + + fn next(&mut self) -> Option { + let next = self.next?; + self.next = next.prev(); + Some(next) + } +} + +#[cfg(test)] +mod tests { + use crate::values::layout::aligned_size::AlignedSize; + use crate::values::layout::heap::allocator::alloc::chain::ChunkChain; + use crate::values::layout::heap::allocator::alloc::chain::SPLIT_AT_ZERO_TEST; + use crate::values::layout::heap::allocator::alloc::chunk_part::ChunkPart; + use crate::values::layout::heap::repr::AValueHeader; + + #[test] + fn test_default() { + let chain = ChunkChain::default(); + assert_eq!(AlignedSize::ZERO, chain.current_chunk_available_len()); + } + + #[test] + fn test_new_drop() { + let chunk_part = + ChunkPart::alloc_at_least(AlignedSize::new_bytes(10 * AValueHeader::ALIGN)); + let chunk_len = chunk_part.len(); + let mut chain = ChunkChain::new(chunk_part, ChunkChain::default()); + assert_eq!( + chunk_len, + chain.current_chunk_available_len() + ChunkChain::HEADER_SIZE + ); + let mut drop_called = false; + chain.clear_with(&mut |_| { + assert!(!drop_called); + drop_called = true; + }); + assert!(drop_called); + } + + #[test] + fn test_new_drop_many() { + let chain = ChunkChain::new( + ChunkPart::alloc_at_least(AlignedSize::new_bytes(10 * AValueHeader::ALIGN)), + ChunkChain::default(), + ); + let chain = ChunkChain::new( + ChunkPart::alloc_at_least(AlignedSize::new_bytes(10 * AValueHeader::ALIGN)), + chain, + ); + let mut chain = ChunkChain::new( + ChunkPart::alloc_at_least(AlignedSize::new_bytes(10 * AValueHeader::ALIGN)), + chain, + ); + let mut drop_count = 0; + chain.clear_with(&mut |_| { + drop_count += 1; + }); + assert_eq!(3, drop_count); + } + + #[test] + fn test_split_at() { + let chunk_part = + ChunkPart::alloc_at_least(AlignedSize::new_bytes(20 * AValueHeader::ALIGN)); + let chunk_len = chunk_part.len(); + let chain = ChunkChain::new(chunk_part, ChunkChain::default()); + + let (new_chain, chunk) = chain.split_at(AlignedSize::new_bytes(3 * AValueHeader::ALIGN)); + assert_eq!( + AlignedSize::new_bytes(3 * AValueHeader::ALIGN), + new_chain.current_chunk_available_len() + ); + assert_eq!( + chunk_len - AlignedSize::new_bytes(3 * AValueHeader::ALIGN) - ChunkChain::HEADER_SIZE, + chunk.len() + ); + + // After split, the chain first chunk should be the same as split off chunk. + assert!(new_chain.chunk.as_ref().unwrap().chunk_ptr_eq(&chunk)); + assert_eq!(2, chunk.chunk_ref_count()); + } + + #[test] + fn test_split_at_len() { + let chunk_part = + ChunkPart::alloc_at_least(AlignedSize::new_bytes(20 * AValueHeader::ALIGN)); + let chain = ChunkChain::new(chunk_part, ChunkChain::default()); + let chain_len = chain.current_chunk_available_len(); + + let (new_chain, rem) = chain.split_at(chain_len); + assert_eq!(chain_len, new_chain.current_chunk_available_len()); + assert_eq!(AlignedSize::ZERO, rem.len()); + + assert_eq!(0, rem.chunk_ref_count(), "statically allocated empty chunk"); + assert_eq!(1, new_chain.chunk.as_ref().unwrap().chunk_ref_count()); + } + + #[test] + fn test_split_at_zero() { + struct ResetSplitAtZeroTest; + impl Drop for ResetSplitAtZeroTest { + fn drop(&mut self) { + assert!(SPLIT_AT_ZERO_TEST.get()); + SPLIT_AT_ZERO_TEST.set(false); + } + } + assert!(!SPLIT_AT_ZERO_TEST.get()); + SPLIT_AT_ZERO_TEST.set(true); + + let _reset = ResetSplitAtZeroTest; + + let chunk_part = + ChunkPart::alloc_at_least(AlignedSize::new_bytes(20 * AValueHeader::ALIGN)); + let chain = ChunkChain::new(chunk_part, ChunkChain::default()); + let chain_len = chain.current_chunk_available_len(); + let (new_chain, rem) = chain.split_at(AlignedSize::ZERO); + assert!( + new_chain.chunk.is_none(), + "Should be replaced with underlying chain" + ); + assert_eq!(chain_len, rem.len()); + } + + #[test] + fn test_depth() { + let chain = ChunkChain::default(); + assert_eq!(0, chain.depth()); + + let chain = ChunkChain::new( + ChunkPart::alloc_at_least(AlignedSize::new_bytes(10 * AValueHeader::ALIGN)), + chain, + ); + assert_eq!(1, chain.depth()); + + let chain = ChunkChain::new( + ChunkPart::alloc_at_least(AlignedSize::new_bytes(20 * AValueHeader::ALIGN)), + chain, + ); + assert_eq!(2, chain.depth()); + } +} diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chunk.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chunk.rs new file mode 100644 index 0000000000000..eee41f1d590e3 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chunk.rs @@ -0,0 +1,289 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use core::slice; +use std::alloc; +use std::alloc::Layout; +use std::fmt; +use std::mem; +use std::mem::MaybeUninit; +use std::ptr; +use std::ptr::NonNull; +use std::sync::atomic; +use std::sync::atomic::AtomicU32; + +use dupe::Dupe; + +use crate::util::rtabort::rtabort; +use crate::values::layout::aligned_size::AlignedSize; + +#[repr(C)] +struct ChunkData { + ref_count: AtomicU32, + /// Data length in words. Does not include `ChunkData` header. + len: AlignedSize, + data: [MaybeUninit; 0], +} + +/// Identical to `ChunkData`, but does not have `UnsafeCell`, so it is statically allocated. +#[repr(C)] +struct ChunkDataEmpty { + /// Zero. + ref_count: u32, + /// Zero. + len_words: AlignedSize, + data: [MaybeUninit; 0], +} + +const _: () = assert!(mem::size_of::() == mem::size_of::()); +const _: () = assert!(mem::align_of::() == mem::align_of::()); +const _: () = assert!(mem::size_of::() % mem::size_of::() == 0); + +static EMPTY_ALLOC: ChunkDataEmpty = ChunkDataEmpty { + ref_count: 0, + len_words: AlignedSize::ZERO, + data: [], +}; + +impl fmt::Debug for ChunkData { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ChunkData") + .field("ref_count", &self.ref_count) + .field("len", &self.len) + .finish_non_exhaustive() + } +} + +impl ChunkData { + fn layout_for_len(len: AlignedSize) -> Layout { + // We put `ChunkData` in the beginning of the allocated memory. + let alloc_len = Chunk::HEADER_SIZE + len; + alloc_len.layout() + } + + fn alloc_ref_count_1(len: AlignedSize) -> NonNull { + assert!(len > AlignedSize::ZERO); + + let layout = Self::layout_for_len(len); + let ptr = unsafe { alloc::alloc(layout) }; + let ptr = ptr as *mut ChunkData; + let ptr = match NonNull::new(ptr) { + None => alloc::handle_alloc_error(layout), + Some(ptr) => ptr, + }; + unsafe { + ptr::write( + ptr.as_ptr(), + ChunkData { + ref_count: AtomicU32::new(1), + len, + data: [], + }, + ); + } + let data: &mut [MaybeUninit] = unsafe { + slice::from_raw_parts_mut( + ptr.as_ref().begin().cast::>().as_ptr(), + len.bytes() as usize, + ) + }; + if cfg!(miri) { + // Tell Miri that the memory is uninitialized. + data.fill(MaybeUninit::uninit()); + } else if cfg!(test) { + // Initialize memory with garbage in tests to catch bugs. + data.fill(MaybeUninit::new(0x17)); + } + ptr + } + + #[inline] + fn begin(&self) -> NonNull { + unsafe { NonNull::new_unchecked(self.data.as_ptr() as *mut usize) } + } +} + +/// Refcounted chunk of memory. +#[derive(PartialEq, Eq)] +pub(crate) struct Chunk { + ptr: NonNull, +} + +impl Default for Chunk { + #[inline] + fn default() -> Chunk { + // This does not allocate anything, just returns a pointer to the static memory. + Chunk::alloc_at_least(AlignedSize::ZERO) + } +} + +impl fmt::Debug for Chunk { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Chunk").field("ptr", self.data()).finish() + } +} + +impl Chunk { + pub(crate) const HEADER_SIZE: AlignedSize = AlignedSize::of::(); + + /// Allocate chunk which can hold at least `len_words` words. + #[inline] + pub(crate) fn alloc_at_least(len: AlignedSize) -> Chunk { + if len == AlignedSize::ZERO { + Chunk { + ptr: NonNull::::from(&EMPTY_ALLOC).cast::(), + } + } else { + Self::alloc_at_least_not_empty(len) + } + } + + fn alloc_at_least_not_empty(len: AlignedSize) -> Chunk { + debug_assert!(len > AlignedSize::ZERO); + + let alloc_len = Chunk::HEADER_SIZE + len; + // Round up to power of two to avoid spacing in allocation. + // We don't have to use power of two according to jemalloc docs + // (https://jemalloc.net/jemalloc.3.html, see "Size classes") + // but power of two is easier to compute, + // and it is OK to allocate larger chunk than requested. + let alloc_len = alloc_len.checked_next_power_of_two().unwrap(); + let len = alloc_len - Chunk::HEADER_SIZE; + Chunk { + ptr: ChunkData::alloc_ref_count_1(len), + } + } + + #[inline] + pub(crate) fn ref_count(&self) -> u32 { + self.data().ref_count.load(atomic::Ordering::Relaxed) + } + + #[cfg(test)] + pub(crate) fn ptr_eq(&self, other: &Chunk) -> bool { + self.ptr == other.ptr + } + + #[inline] + fn data(&self) -> &ChunkData { + unsafe { self.ptr.as_ref() } + } + + #[inline] + pub(crate) fn len(&self) -> AlignedSize { + self.data().len + } + + #[inline] + pub(crate) fn allocated_bytes_with_metadata(&self) -> usize { + if self.is_empty() { + // Allocated statically. + 0 + } else { + ChunkData::layout_for_len(self.len()).size() + } + } + + #[inline] + fn is_empty(&self) -> bool { + // Faster than checking the length + // because it doesn't require dereferencing the pointer. + ptr::eq( + self.ptr.as_ptr(), + &EMPTY_ALLOC as *const ChunkDataEmpty as *mut ChunkData, + ) + } + + #[inline] + pub(crate) fn begin(&self) -> NonNull { + self.data().begin() + } + + #[inline] + pub(crate) fn ptr_at_offset(&self, offset: AlignedSize) -> NonNull { + unsafe { NonNull::new_unchecked(self.begin().as_ptr().byte_add(offset.bytes() as usize)) } + } +} + +impl Drop for Chunk { + fn drop(&mut self) { + if self.is_empty() { + return; + } + + unsafe { + if self.data().ref_count.fetch_sub(1, atomic::Ordering::SeqCst) == 1 { + let layout = ChunkData::layout_for_len(self.data().len); + alloc::dealloc(self.ptr.as_ptr() as *mut u8, layout); + } + } + } +} + +impl Clone for Chunk { + #[inline] + fn clone(&self) -> Self { + if self.is_empty() { + return Chunk::default(); + } + + #[cold] + fn counter_overflow() -> ! { + rtabort!("Refcount overflow") + } + + let prev = self + .data() + .ref_count + .fetch_add(1, atomic::Ordering::Relaxed); + if prev > i32::MAX as u32 { + counter_overflow(); + } + Chunk { ptr: self.ptr } + } +} + +impl Dupe for Chunk {} + +#[cfg(test)] +mod tests { + use crate::values::layout::aligned_size::AlignedSize; + use crate::values::layout::heap::allocator::alloc::chunk::Chunk; + use crate::values::layout::heap::repr::AValueHeader; + + #[test] + fn test_empty() { + let chunk = Chunk::default(); + assert!(chunk.is_empty()); + assert_eq!(AlignedSize::ZERO, chunk.len()); + assert_eq!(0, chunk.ref_count()); + } + + #[test] + fn test_alloc_release() { + let chunk = Chunk::alloc_at_least(AlignedSize::new_bytes(100 * AValueHeader::ALIGN)); + assert_eq!( + AlignedSize::new_bytes(128 * AValueHeader::ALIGN) - Chunk::HEADER_SIZE, + chunk.len() + ); + assert_eq!(1, chunk.ref_count()); + let chunk2 = chunk.clone(); + assert_eq!(2, chunk.ref_count()); + drop(chunk); + assert_eq!(1, chunk2.ref_count()); + } +} diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chunk_part.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chunk_part.rs new file mode 100644 index 0000000000000..73ad5406e34e3 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/chunk_part.rs @@ -0,0 +1,170 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::ptr::NonNull; + +use dupe::Dupe; + +use crate::values::layout::aligned_size::AlignedSize; +use crate::values::layout::heap::allocator::alloc::chunk::Chunk; + +/// Chunk is shared by multiple `ChunkPart`s. +#[derive(Debug, Default, PartialEq)] +pub(crate) struct ChunkPart { + allocation: Chunk, + /// Offset from the chunk data. + begin: AlignedSize, + /// Offset from the chunk data. + end: AlignedSize, +} + +impl ChunkPart { + /// Create a chunk part from a whole chunk. + #[inline] + pub(crate) fn new(allocation: Chunk) -> ChunkPart { + let len = allocation.len(); + ChunkPart::new_subslice(allocation, AlignedSize::ZERO, len) + } + + #[inline] + pub(crate) fn new_subslice( + allocation: Chunk, + begin: AlignedSize, + end: AlignedSize, + ) -> ChunkPart { + assert!(begin <= end); + assert!(end <= allocation.len()); + ChunkPart { + allocation, + begin, + end, + } + } + + /// Allocate a chunk part to store at least `len`. + #[inline] + pub(crate) fn alloc_at_least(len: AlignedSize) -> ChunkPart { + ChunkPart::new(Chunk::alloc_at_least(len)) + } + + #[inline] + pub(crate) fn len(&self) -> AlignedSize { + self.end.unchecked_sub(self.begin) + } + + #[inline] + pub(crate) fn begin(&self) -> NonNull { + self.allocation.ptr_at_offset(self.begin) + } + + #[inline] + pub(crate) fn ptr_at_offset(&self, offset: AlignedSize) -> NonNull { + debug_assert!(offset <= self.len()); + self.allocation.ptr_at_offset(self.begin + offset) + } + + #[inline] + pub(crate) fn end(&self) -> NonNull { + self.allocation.ptr_at_offset(self.end) + } + + pub(crate) fn allocated_bytes_with_metadata(&self) -> usize { + if self.chunk_ref_count() == 1 { + self.allocation.allocated_bytes_with_metadata() + } else { + // We cannot know for sure, so try the best to estimate. + (self.len().bytes() + Chunk::HEADER_SIZE.bytes() / self.chunk_ref_count()) as usize + } + } + + /// Does this chunk part occupy the whole chunk? + #[inline] + pub(crate) fn is_full(&self) -> bool { + self.len() == self.allocation.len() + } + + pub(crate) fn split_at_offset(self, offset: AlignedSize) -> (ChunkPart, ChunkPart) { + if offset == AlignedSize::ZERO { + (ChunkPart::default(), self) + } else if offset == self.len() { + (self, ChunkPart::default()) + } else { + assert!(offset <= self.len()); + let offset_relative_to_chunk = self.begin + offset; + ( + ChunkPart::new_subslice( + self.allocation.dupe(), + self.begin, + offset_relative_to_chunk, + ), + ChunkPart::new_subslice(self.allocation, offset_relative_to_chunk, self.end), + ) + } + } + + #[inline] + pub(crate) fn chunk_ref_count(&self) -> u32 { + self.allocation.ref_count() + } + + #[cfg(test)] + pub(crate) fn chunk_ptr_eq(&self, other: &ChunkPart) -> bool { + self.allocation.ptr_eq(&other.allocation) + } +} + +#[cfg(test)] +mod tests { + use crate::values::layout::aligned_size::AlignedSize; + use crate::values::layout::heap::allocator::alloc::chunk::Chunk; + use crate::values::layout::heap::allocator::alloc::chunk_part::ChunkPart; + use crate::values::layout::heap::repr::AValueHeader; + + #[test] + fn test_split_at() { + let chunk_part = ChunkPart::new(Chunk::alloc_at_least(AlignedSize::new_bytes( + 100 * AValueHeader::ALIGN, + ))); + let (a, b) = chunk_part.split_at_offset(AlignedSize::new_bytes(50 * AValueHeader::ALIGN)); + assert_eq!(a.allocation, b.allocation); + assert_eq!(2, a.allocation.ref_count()); + assert_eq!(a.end, b.begin); + drop(a); + assert_eq!(1, b.allocation.ref_count()); + } + + #[test] + fn test_split_at_zero() { + let chunk_part = ChunkPart::new(Chunk::alloc_at_least(AlignedSize::new_bytes( + 100 * AValueHeader::ALIGN, + ))); + let len = chunk_part.len(); + let (a, b) = chunk_part.split_at_offset(AlignedSize::ZERO); + assert_eq!(AlignedSize::ZERO, a.len()); + assert_eq!(0, a.chunk_ref_count()); + assert_eq!(len, b.len()); + assert_eq!(1, b.chunk_ref_count()); + } + + #[test] + fn test_is_full() { + let chunk_part = ChunkPart::new(Chunk::alloc_at_least(AlignedSize::new_bytes( + 100 * AValueHeader::ALIGN, + ))); + assert!(chunk_part.is_full()); + } +} diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/per_thread.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/per_thread.rs new file mode 100644 index 0000000000000..97ff47628e088 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/allocator/alloc/per_thread.rs @@ -0,0 +1,151 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cell::RefCell; +use std::cmp; +use std::mem; + +use crate::values::layout::aligned_size::AlignedSize; +use crate::values::layout::heap::allocator::alloc::chain::ChunkChain; +use crate::values::layout::heap::allocator::alloc::chunk::Chunk; +use crate::values::layout::heap::allocator::alloc::chunk_part::ChunkPart; +use crate::values::layout::heap::arena::MIN_ALLOC; + +/// Minimum usable cached allocation. +const MIN_USABLE_ALLOC: AlignedSize = AlignedSize::new_bytes( + ( + // * All chunks are used to store chains, so we need to include chain header. + // * We need to be able to store at least one object after the header. + ChunkChain::HEADER_SIZE.bytes() + MIN_ALLOC.bytes() + ) as usize, +); + +#[derive(Debug, Default)] +struct PerThreadChunkCache { + /// Keep a few last chunks. + /// + /// Frozen heap has two arenas: drop and non-drop. So we should keep at least two chunks. + last_chunks: [ChunkPart; 4], +} + +impl PerThreadChunkCache { + /// Save a chunk to the thread-local cache if it is large enough. + fn store(&mut self, mut chunk: ChunkPart) { + for next in &mut self.last_chunks { + // Keep the largest chunks in the pool. + if chunk.len() > next.len() { + mem::swap(next, &mut chunk); + } + } + } + + /// Fetch a chunk from the thread-local cache if the cache has a chunk large enough. + fn fetch(&mut self, len: AlignedSize) -> Option { + for next in &mut self.last_chunks { + // Pick any chunk which is large enough. + if next.len() >= len { + let result = mem::take(next); + return Some(result); + } + } + None + } +} + +thread_local! { + static PER_THREAD_ALLOCATOR: RefCell = RefCell::new(PerThreadChunkCache::default()); +} + +fn next_chunk_size(chunk_count_in_bump: usize) -> AlignedSize { + // Replicate `bumpalo` behavior: 512 in the first chunk, double each next, + // but not greater than 2G. + // TODO(nga): we should stop doubling after 1M or so. + let size = AlignedSize::new_bytes( + 512u32 + .checked_shl(chunk_count_in_bump.try_into().unwrap()) + .unwrap() as usize, + ); + if size.bytes() == 0 { + AlignedSize::new_bytes(1 << 31) + } else { + size + } +} + +/// Allocate chunk which is large enough for given number of words. +pub(crate) fn thread_local_alloc_at_least( + len: AlignedSize, + chunk_count_in_bump: usize, +) -> ChunkPart { + let chunk = if let Some(chunk) = + PER_THREAD_ALLOCATOR.with_borrow_mut(|allocator| allocator.fetch(len)) + { + chunk + } else { + let next_chunk_size = next_chunk_size(chunk_count_in_bump) - Chunk::HEADER_SIZE; + let len = cmp::max(len, next_chunk_size); + ChunkPart::alloc_at_least(len) + }; + debug_assert!(chunk.len() >= len); + chunk +} + +/// Release chunk part to thread-local pool. +#[allow(clippy::if_same_then_else)] +#[inline] +pub(crate) fn thread_local_release(chunk: ChunkPart) { + if chunk.is_full() { + // Chunk part is the full chunk. Better return it to malloc. + drop(chunk) + } else if chunk.len() < MIN_USABLE_ALLOC { + // It is not reusable. + drop(chunk) + } else if chunk.chunk_ref_count() == 1 { + // We could reuse the chunk, but since it is not shared, + // better return it to malloc. + drop(chunk); + } else { + PER_THREAD_ALLOCATOR.with_borrow_mut(|allocator| allocator.store(chunk)); + } +} + +#[cfg(test)] +mod tests { + use crate::values::layout::aligned_size::AlignedSize; + use crate::values::layout::heap::allocator::alloc::chunk_part::ChunkPart; + use crate::values::layout::heap::allocator::alloc::per_thread::PerThreadChunkCache; + use crate::values::layout::heap::repr::AValueHeader; + + #[test] + fn test_release_partial() { + let mut allocator = PerThreadChunkCache::default(); + let chunk = ChunkPart::alloc_at_least(AlignedSize::new_bytes(10 * AValueHeader::ALIGN)); + let (a, b) = chunk.split_at_offset(AlignedSize::new_bytes(5 * AValueHeader::ALIGN)); + let old_a_ptr = a.begin().as_ptr(); + let old_b_ptr = b.begin().as_ptr(); + allocator.store(a); + allocator.store(b); + let a = allocator + .fetch(AlignedSize::new_bytes(3 * AValueHeader::ALIGN)) + .unwrap(); + let b = allocator + .fetch(AlignedSize::new_bytes(3 * AValueHeader::ALIGN)) + .unwrap(); + assert!(old_a_ptr == a.begin().as_ptr() || old_a_ptr == b.begin().as_ptr()); + assert!(old_b_ptr == a.begin().as_ptr() || old_b_ptr == b.begin().as_ptr()); + } +} diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/api.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/api.rs index 64049f074eb22..018e358927b1a 100644 --- a/starlark-rust/starlark/src/values/layout/heap/allocator/api.rs +++ b/starlark-rust/starlark/src/values/layout/heap/allocator/api.rs @@ -22,7 +22,6 @@ use crate::values::layout::value_alloc_size::ValueAllocSize; pub(crate) enum ChunkAllocationDirection { /// Next allocation in the chunk has higher address than the previous one. - #[allow(dead_code)] // TODO(nga): used in the following diff D40738710. Up, /// Next allocation in the chunk has lower address than the previous one. Down, diff --git a/starlark-rust/starlark/src/values/layout/heap/allocator/mod.rs b/starlark-rust/starlark/src/values/layout/heap/allocator/mod.rs deleted file mode 100644 index 52096f4224b0f..0000000000000 --- a/starlark-rust/starlark/src/values/layout/heap/allocator/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -pub(crate) mod api; -pub(crate) mod bumpalo; diff --git a/starlark-rust/starlark/src/values/layout/heap/arena.rs b/starlark-rust/starlark/src/values/layout/heap/arena.rs index 42d10ea1bd2f1..199dbbc679721 100644 --- a/starlark-rust/starlark/src/values/layout/heap/arena.rs +++ b/starlark-rust/starlark/src/values/layout/heap/arena.rs @@ -27,12 +27,10 @@ //! item it replaced. use std::collections::HashMap; -use std::marker::PhantomData; use std::mem; use std::mem::MaybeUninit; use std::ptr; use std::slice; -use std::time::Instant; use allocative::Allocative; use allocative::Visitor; @@ -40,11 +38,12 @@ use bumpalo::Bump; use dupe::Dupe; use starlark_map::small_map::SmallMap; -use crate::cast::transmute; use crate::collections::StarlarkHashValue; +use crate::eval::runtime::profile::instant::ProfilerInstant; use crate::values::layout::aligned_size::AlignedSize; use crate::values::layout::avalue::starlark_str; use crate::values::layout::avalue::AValue; +use crate::values::layout::avalue::AValueImpl; use crate::values::layout::avalue::BlackHole; use crate::values::layout::heap::allocator::api::ArenaAllocator; use crate::values::layout::heap::allocator::api::ChunkAllocationDirection; @@ -61,7 +60,7 @@ use crate::values::layout::heap::repr::AValueOrForward; use crate::values::layout::heap::repr::AValueOrForwardUnpack; use crate::values::layout::heap::repr::AValueRepr; use crate::values::layout::vtable::AValueVTable; -use crate::values::string::StarlarkStr; +use crate::values::string::str_type::StarlarkStr; use crate::values::Value; use crate::values::ValueLike; @@ -89,13 +88,12 @@ pub(crate) struct Arena { /// Reservation is morally a Reservation, but we treat is as an /// existential. /// Tied to the lifetime of the heap. -pub(crate) struct Reservation<'v, 'v2, T: AValue<'v2>> { - pointer: *mut AValueRepr, // Secretly AValueObject - phantom: PhantomData<(&'v (), &'v2 T)>, +pub(crate) struct Reservation<'v, T: AValue<'v>> { + pointer: *mut AValueRepr, } -impl<'v, 'v2, T: AValue<'v2>> Reservation<'v, 'v2, T> { - pub(crate) fn fill(self, x: T) { +impl<'v, T: AValue<'v>> Reservation<'v, T> { + pub(crate) fn fill(self, x: T::StarlarkValue) { unsafe { ptr::write( self.pointer, @@ -113,9 +111,10 @@ impl<'v, 'v2, T: AValue<'v2>> Reservation<'v, 'v2, T> { } pub(crate) trait ArenaVisitor<'v> { + fn enter_bump(&mut self); fn regular_value(&mut self, value: &'v AValueOrForward); - fn call_enter(&mut self, function: Value<'v>, time: Instant); - fn call_exit(&mut self, time: Instant); + fn call_enter(&mut self, function: Value<'v>, time: ProfilerInstant); + fn call_exit(&mut self, time: ProfilerInstant); } /// Iterate over chunk contents. @@ -141,6 +140,66 @@ impl<'c> Iterator for ChunkIter<'c> { } } +/// Result of allocation. Both fields are uninitialized. +pub(crate) struct ArenaUninit<'v, T: AValue<'v>> { + // We use `MaybeUninit` here to emphasize that the memory is uninitialized. + repr: *mut MaybeUninit>, + extra: *mut [MaybeUninit], +} + +impl<'v, T: AValue<'v>> ArenaUninit<'v, T> { + pub(crate) unsafe fn write_black_hole( + self, + extra_len: usize, + ) -> (Reservation<'v, T>, *mut [MaybeUninit]) { + let p = self.repr as *mut AValueRepr; + p.write(AValueRepr { + header: AValueHeader(AValueVTable::new_black_hole()), + payload: BlackHole(T::alloc_size_for_extra_len(extra_len)), + }); + ( + Reservation { + pointer: p as *mut _, + }, + self.extra, + ) + } + + pub(crate) fn debug_assert_extra_is_empty(&self) { + let extra = unsafe { &*self.extra }; + debug_assert!(extra.is_empty()); + } + + pub(crate) fn write( + self, + x: T::StarlarkValue, + ) -> ( + *mut AValueRepr>, + *mut [MaybeUninit], + ) { + unsafe { + let repr = self.repr as *mut AValueRepr>; + repr.write(AValueRepr { + header: AValueHeader::new::(), + payload: AValueImpl::new(x), + }); + (repr, self.extra) + } + } + + pub(crate) fn write_no_extra(self, x: T::StarlarkValue) -> *mut AValueRepr> { + self.debug_assert_extra_is_empty(); + self.write(x).0 + } +} + +enum ArenaVisitEvent<'a> { + /// Called when entering new bump. + EnterBump, + /// Visiting a value in the bump. + Value(&'a AValueOrForward), +} + impl Arena { pub(crate) fn is_empty(&self) -> bool { self.allocated_bytes() == 0 @@ -164,13 +223,7 @@ impl Arena { self.non_drop.finish(); } - fn alloc_uninit<'v, 'v2: 'v, T: AValue<'v2>>( - bump: &'v A, - extra_len: usize, - ) -> ( - &'v mut MaybeUninit>, - &'v mut [MaybeUninit], - ) { + fn alloc_uninit<'v, 'v2, T: AValue<'v2>>(bump: &'v A, extra_len: usize) -> ArenaUninit<'v2, T> { assert!( mem::align_of::() <= AValueHeader::ALIGN, "Unexpected alignment in Starlark arena. Type {} has alignment {}, expected <= {}", @@ -182,17 +235,17 @@ impl Arena { let size = T::alloc_size_for_extra_len(extra_len); let p = bump.alloc(size).as_ptr(); unsafe { - let repr = &mut *(p as *mut MaybeUninit>); + let repr = p as *mut MaybeUninit>; let extra = slice::from_raw_parts_mut( p.add(AValueRepr::::offset_of_extra()) as *mut _, extra_len, ); - (repr, extra) + ArenaUninit { repr, extra } } } fn bump_for_type<'v, T: AValue<'v>>(&self) -> &A { - if mem::needs_drop::() { + if mem::needs_drop::() { &self.drop } else { &self.non_drop @@ -200,10 +253,10 @@ impl Arena { } // Reservation should really be an incremental type - pub(crate) fn reserve_with_extra<'v, 'v2: 'v, T: AValue<'v2>>( - &'v self, + pub(crate) fn reserve_with_extra<'v2, T: AValue<'v2>>( + &self, extra_len: usize, - ) -> (Reservation<'v, 'v2, T>, &'v mut [MaybeUninit]) { + ) -> (Reservation<'v2, T>, *mut [MaybeUninit]) { // We don't create reservations for strings because we don't need to, // but also because we need to be able to reconstruct a `Pointer` // from `AValueHeader` (with `TAG_STR` when appropriate). @@ -211,62 +264,40 @@ impl Arena { // it returns `false` from `is_str`. assert!(!T::IS_STR); - let (p, extra) = Self::alloc_uninit::(self.bump_for_type::(), extra_len); + let arena_uninit = Self::alloc_uninit::(self.bump_for_type::(), extra_len); // If we don't have a vtable we can't skip over missing elements to drop, // so very important to put in a current vtable // We always alloc at least one pointer worth of space, so can write in a one-ST blackhole - let x = BlackHole(T::alloc_size_for_extra_len(extra_len)); - let p = unsafe { - transmute!( - &mut MaybeUninit>, - &mut MaybeUninit>, - p - ) - }; - let p = p.write(AValueRepr { - header: AValueHeader(AValueVTable::new_black_hole()), - payload: x, - }); - let p = unsafe { transmute!(&mut AValueRepr, &mut AValueRepr, p) }; - - ( - Reservation { - pointer: p, - phantom: PhantomData, - }, - extra, - ) + unsafe { arena_uninit.write_black_hole(extra_len) } } /// Allocate a type `T`. - pub(crate) fn alloc<'v, 'v2: 'v, T: AValue<'v2, ExtraElem = ()>>( + pub(crate) fn alloc<'v, 'v2, T: AValue<'v2, ExtraElem = ()>>( &'v self, - x: T, - ) -> &'v AValueRepr { - debug_assert!(x.extra_len() == 0); + x: AValueImpl<'v2, T>, + ) -> &'v AValueRepr> { + debug_assert!(T::extra_len(&x.1) == 0); let bump = self.bump_for_type::(); - let (p, extra) = Self::alloc_uninit::(bump, 0); - debug_assert!(extra.is_empty()); - p.write(AValueRepr { - header: AValueHeader::new::(), - payload: x, - }) + let arena_uninit = Self::alloc_uninit::(bump, 0); + arena_uninit.debug_assert_extra_is_empty(); + unsafe { &mut *arena_uninit.write_no_extra(x.1) } } /// Allocate a type `T` plus `extra` bytes. /// /// The type `T` will never be dropped, so had better not do any memory allocation. - pub(crate) fn alloc_extra<'v, 'v2: 'v, T: AValue<'v2>>( - &'v self, - x: T, - ) -> (*mut AValueRepr, &'v mut [MaybeUninit]) { + pub(crate) fn alloc_extra<'v, T: AValue<'v>>( + &self, + x: AValueImpl<'v, T>, + ) -> ( + *mut AValueRepr>, + *mut [MaybeUninit], + ) { let bump = self.bump_for_type::(); - let (p, extra) = Self::alloc_uninit::(bump, x.extra_len()); - let p = p.write(AValueRepr { - header: AValueHeader::new::(), - payload: x, - }); + let extra_len = T::extra_len(&x.1); + let arena_uninit = Self::alloc_uninit::(bump, extra_len); + let (p, extra) = arena_uninit.write(x.1); (p, extra) } @@ -279,6 +310,7 @@ impl Arena { ) -> *mut AValueHeader { assert!(len > 1); let (v, extra) = self.alloc_extra::<_>(starlark_str(len, hash)); + let extra = unsafe { &mut *extra }; debug_assert_eq!(StarlarkStr::payload_len_for_len(len), extra.len()); unsafe { extra.last_mut().unwrap_unchecked().write(0usize); @@ -300,11 +332,12 @@ impl Arena { // Iterate over the values in the heap in the order they // were added. - pub(crate) fn for_each_ordered<'a>(&'a mut self, mut f: impl FnMut(&'a AValueOrForward)) { + fn for_each_ordered<'a>(&'a mut self, mut f: impl FnMut(ArenaVisitEvent<'a>)) { // We get the chunks from most newest to oldest as per the bumpalo spec. // And within each chunk, the values are filled newest to oldest. // So need to do two sets of reversing. for bump in [&mut self.drop, &mut self.non_drop] { + f(ArenaVisitEvent::EnterBump); let chunks = unsafe { bump.iter_allocated_chunks_rev().collect::>() }; // Use a single buffer to reduce allocations, but clear it after use let mut buffer = Vec::new(); @@ -313,13 +346,13 @@ impl Arena { ChunkAllocationDirection::Down => { buffer.extend(Arena::::iter_chunk(chunk)); for x in buffer.iter().rev() { - f(x); + f(ArenaVisitEvent::Value(x)); } buffer.clear(); } ChunkAllocationDirection::Up => { for x in Arena::::iter_chunk(chunk) { - f(x); + f(ArenaVisitEvent::Value(x)); } } } @@ -351,30 +384,31 @@ impl Arena { } } - self.for_each_ordered(|x| match x.unpack() { - AValueOrForwardUnpack::Header(header) => { - let value = header.unpack_value(heap_kind); - if let Some(call_enter) = value.downcast_ref::>() { - visitor.call_enter( - fix_function(call_enter.function, forward_heap_kind), - call_enter.time, - ); - } else if let Some(call_enter) = value.downcast_ref::>() { - visitor.call_enter( - fix_function(call_enter.function, forward_heap_kind), - call_enter.time, - ); - } else if let Some(call_exit) = value.downcast_ref::>() { - visitor.call_exit(call_exit.time); - } else if let Some(call_exit) = value.downcast_ref::>() { - visitor.call_exit(call_exit.time); - } else { - visitor.regular_value(x); + self.for_each_ordered(|x| match x { + ArenaVisitEvent::EnterBump => visitor.enter_bump(), + ArenaVisitEvent::Value(x) => match x.unpack() { + AValueOrForwardUnpack::Header(header) => { + let value = header.unpack_value(heap_kind); + if let Some(call_enter) = value.downcast_ref::>() { + visitor.call_enter( + fix_function(call_enter.function, forward_heap_kind), + call_enter.time, + ); + } else if let Some(call_enter) = value.downcast_ref::>() { + visitor.call_enter( + fix_function(call_enter.function, forward_heap_kind), + call_enter.time, + ); + } else if let Some(call_exit) = value.downcast_ref::>() { + visitor.call_exit(call_exit.time); + } else if let Some(call_exit) = value.downcast_ref::>() { + visitor.call_exit(call_exit.time); + } else { + visitor.regular_value(x); + } } - } - AValueOrForwardUnpack::Forward(_forward) => { - visitor.regular_value(x); - } + AValueOrForwardUnpack::Forward(_forward) => visitor.regular_value(x), + }, }); } @@ -435,11 +469,6 @@ impl Arena { } HeapSummary { summary } } - - /// Memory allocated in the arena but not used for allocation in starlark. - pub(crate) fn unused_capacity(&self) -> usize { - self.drop.remaining_capacity() + self.non_drop.remaining_capacity() - } } impl Drop for Arena { @@ -507,14 +536,14 @@ mod tests { s } - fn mk_str(x: &str) -> impl AValue<'static, ExtraElem = ()> { + fn mk_str(x: &str) -> AValueImpl<'static, impl AValue<'static, ExtraElem = ()>> { simple(StarlarkAny::new(x.to_owned())) } fn reserve_str<'v, T: AValue<'static>>( arena: &'v Arena, - _: &T, - ) -> Reservation<'v, 'static, T> { + _: &AValueImpl<'static, T>, + ) -> Reservation<'static, T> { arena.reserve_with_extra::(0).0 } @@ -535,7 +564,7 @@ mod tests { } assert!(!reserved.is_empty()); for (r, i) in reserved { - r.fill(mk_str(&i.to_string())); + r.fill(mk_str(&i.to_string()).1); } // Not a functional part of the test, just makes sure we go through @@ -545,10 +574,13 @@ mod tests { "Didn't allocate enough to test properly" ); let mut j = 0; - arena.for_each_ordered(|i| { - if let Some(i) = i.unpack_header() { - assert_eq!(to_repr(i), j.to_string()); - j += 1; + arena.for_each_ordered(|i| match i { + ArenaVisitEvent::EnterBump => {} + ArenaVisitEvent::Value(i) => { + if let Some(i) = i.unpack_header() { + assert_eq!(to_repr(i), format!("{:?}", j.to_string())); + j += 1; + } } }); assert_eq!(j, LIMIT); @@ -566,14 +598,17 @@ mod tests { reserve_str(&arena, &mk_str("")); arena.alloc(mk_str("hello")); let mut res = Vec::new(); - arena.for_each_ordered(|x| { - if let Some(x) = x.unpack_header() { - res.push(x); + arena.for_each_ordered(|x| match x { + ArenaVisitEvent::EnterBump => {} + ArenaVisitEvent::Value(x) => { + if let Some(x) = x.unpack_header() { + res.push(x); + } } }); assert_eq!(res.len(), 3); - assert_eq!(to_repr(res[0]), "test"); - assert_eq!(to_repr(res[2]), "hello"); + assert_eq!(to_repr(res[0]), "\"test\""); + assert_eq!(to_repr(res[2]), "\"hello\""); } #[test] diff --git a/starlark-rust/starlark/src/values/layout/heap/call_enter_exit.rs b/starlark-rust/starlark/src/values/layout/heap/call_enter_exit.rs index ce5e03e270a7a..f9313351ead3a 100644 --- a/starlark-rust/starlark/src/values/layout/heap/call_enter_exit.rs +++ b/starlark-rust/starlark/src/values/layout/heap/call_enter_exit.rs @@ -18,7 +18,6 @@ //! Marker objects to track allocations. use std::fmt::Debug; -use std::time::Instant; use allocative::Allocative; use starlark_derive::starlark_value; @@ -26,6 +25,7 @@ use starlark_derive::NoSerialize; use crate as starlark; use crate::any::ProvidesStaticType; +use crate::eval::runtime::profile::instant::ProfilerInstant; use crate::values::StarlarkValue; use crate::values::Trace; use crate::values::Value; @@ -59,10 +59,10 @@ impl MaybeDrop for NoDrop {} NoSerialize, Allocative )] -#[display(fmt = "CallEnter")] +#[display("CallEnter")] pub(crate) struct CallEnter<'v, D: MaybeDrop + 'static> { pub(crate) function: Value<'v>, - pub(crate) time: Instant, + pub(crate) time: ProfilerInstant, pub(crate) maybe_drop: D, } @@ -78,9 +78,9 @@ impl<'v, D: MaybeDrop + Trace<'v> + 'v> StarlarkValue<'v> for CallEnter<'v, D> { NoSerialize, Allocative )] -#[display(fmt = "CallExit")] +#[display("CallExit")] pub(crate) struct CallExit { - pub(crate) time: Instant, + pub(crate) time: ProfilerInstant, pub(crate) maybe_drop: D, } diff --git a/starlark-rust/starlark/src/values/layout/heap/heap_type.rs b/starlark-rust/starlark/src/values/layout/heap/heap_type.rs index 0d70c4846755a..1595bc1914a67 100644 --- a/starlark-rust/starlark/src/values/layout/heap/heap_type.rs +++ b/starlark-rust/starlark/src/values/layout/heap/heap_type.rs @@ -15,12 +15,14 @@ * limitations under the License. */ +use std::any; use std::cell::Cell; use std::cell::RefCell; +use std::cell::RefMut; use std::cmp; +use std::convert::Infallible; use std::fmt; use std::fmt::Debug; -use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; use std::hash::Hasher; @@ -32,13 +34,10 @@ use std::ops::Deref; use std::ptr; use std::slice; use std::sync::Arc; -use std::time::Instant; -use std::usize; use allocative::Allocative; use bumpalo::Bump; use dupe::Dupe; -use either::Either; use starlark_map::small_set::SmallSet; use crate::cast; @@ -48,6 +47,7 @@ use crate::collections::maybe_uninit_backport::maybe_uninit_write_slice_cloned; use crate::collections::Hashed; use crate::collections::StarlarkHashValue; use crate::eval::compiler::def::FrozenDef; +use crate::eval::runtime::profile::instant::ProfilerInstant; use crate::values::any::StarlarkAny; use crate::values::array::Array; use crate::values::array::VALUE_EMPTY_ARRAY; @@ -61,6 +61,8 @@ use crate::values::layout::avalue::list_avalue; use crate::values::layout::avalue::simple; use crate::values::layout::avalue::tuple_avalue; use crate::values::layout::avalue::AValue; +use crate::values::layout::avalue::AValueImpl; +use crate::values::layout::heap::allocator::alloc::allocator::ChunkAllocator; use crate::values::layout::heap::arena::Arena; use crate::values::layout::heap::arena::ArenaVisitor; use crate::values::layout::heap::arena::Reservation; @@ -71,25 +73,29 @@ use crate::values::layout::heap::call_enter_exit::NoDrop; use crate::values::layout::heap::fast_cell::FastCell; use crate::values::layout::heap::maybe_uninit_slice_util::maybe_uninit_write_from_exact_size_iter; use crate::values::layout::heap::profile::by_type::HeapSummary; +use crate::values::layout::heap::repr::AValueOrForwardUnpack; use crate::values::layout::heap::repr::AValueRepr; use crate::values::layout::static_string::constant_string; use crate::values::layout::typed::string::StringValueLike; use crate::values::layout::value::FrozenValue; use crate::values::layout::value::Value; use crate::values::list::value::VALUE_EMPTY_FROZEN_LIST; -use crate::values::string::intern::interner::FrozenStringInterner; -use crate::values::string::StarlarkStr; +use crate::values::string::intern::interner::FrozenStringValueInterner; +use crate::values::string::intern::interner::StringValueInterner; +use crate::values::string::str_type::StarlarkStr; use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::ComplexValue; use crate::values::FrozenRef; use crate::values::FrozenStringValue; +use crate::values::FrozenValueOfUnchecked; use crate::values::FrozenValueTyped; use crate::values::StarlarkValue; use crate::values::StringValue; use crate::values::Trace; use crate::values::UnpackValue; use crate::values::ValueOf; +use crate::values::ValueOfUnchecked; use crate::values::ValueTyped; #[derive(Copy, Clone, Dupe)] @@ -104,6 +110,7 @@ pub struct Heap { /// Peak memory seen when a garbage collection takes place (may be lower than currently allocated) peak_allocated: Cell, arena: FastCell>, + str_interner: RefCell>, } impl Debug for Heap { @@ -117,16 +124,29 @@ impl Debug for Heap { } } +impl Heap { + pub(crate) fn trace_interner<'v>(&'v self, tracer: &Tracer<'v>) { + unsafe { + transmute!( + RefMut<'_, StringValueInterner<'static>>, + RefMut<'_, StringValueInterner<'v>>, + self.str_interner.borrow_mut() + ) + .trace(tracer); + } + } +} + /// A heap on which [`FrozenValue`]s can be allocated. /// Can be kept alive by a [`FrozenHeapRef`]. #[derive(Default)] pub struct FrozenHeap { /// My memory. - arena: Arena, + arena: Arena, /// Memory I depend on. refs: RefCell>, /// String interner. - str_interner: RefCell, + str_interner: RefCell, } /// `FrozenHeap` when it is no longer modified and can be share between threads. @@ -134,7 +154,7 @@ pub struct FrozenHeap { #[derive(Default, Allocative)] #[allow(clippy::non_send_fields_in_send_ty)] struct FrozenFrozenHeap { - arena: Arena, + arena: Arena, refs: Box<[FrozenHeapRef]>, } @@ -257,8 +277,11 @@ impl FrozenHeap { } } - fn alloc_raw(&self, x: impl AValue<'static, ExtraElem = ()> + Send + Sync) -> FrozenValue { - let v: &AValueRepr<_> = self.arena.alloc(x); + fn alloc_raw(&self, x: AValueImpl<'static, T>) -> FrozenValue + where + T: AValue<'static, ExtraElem = ()> + Send + Sync, + { + let v: &AValueRepr> = self.arena.alloc(x); unsafe { FrozenValue::new_repr(cast::ptr_lifetime(v)) } } @@ -320,6 +343,7 @@ impl FrozenHeap { let (avalue, extra) = self .arena .alloc_extra::<_>(frozen_tuple_avalue(elems.len())); + let extra = &mut *extra; maybe_uninit_write_slice(extra, elems); FrozenValue::new_repr(&*avalue) } @@ -339,6 +363,7 @@ impl FrozenHeap { unsafe { let (avalue, extra) = self.arena.alloc_extra::<_>(frozen_tuple_avalue(lower)); + let extra = &mut *extra; maybe_uninit_write_from_exact_size_iter(extra, elems, FrozenValue::new_none()); FrozenValue::new_repr(&*avalue) } @@ -355,6 +380,7 @@ impl FrozenHeap { unsafe { let (avalue, elem_places) = self.arena.alloc_extra(frozen_list_avalue(elems.len())); + let elem_places = &mut *elem_places; maybe_uninit_write_slice(elem_places, elems); FrozenValue::new_repr(&*avalue) } @@ -373,6 +399,7 @@ impl FrozenHeap { unsafe { let (avalue, elem_places) = self.arena.alloc_extra(frozen_list_avalue(lower)); + let elem_places = &mut *elem_places; maybe_uninit_write_from_exact_size_iter( elem_places, elems, @@ -411,57 +438,46 @@ impl FrozenHeap { } /// Allocate any value in the frozen heap. - pub(crate) fn alloc_any( - &self, - value: T, - ) -> FrozenRef<'static, T> { + pub fn alloc_any(&self, value: T) -> FrozenRef<'static, T> { let value = self.alloc_simple_frozen_ref(StarlarkAny::new(value)); value.map(|r| &r.0) } - /// Allocate any value, use `Debug` implementation for `Display`. - pub fn alloc_any_display_from_debug( - &self, - value: T, - ) -> FrozenRef<'static, T> { - #[derive(derive_more::Display, Debug)] - #[display(fmt = "{:?}", _0)] - struct Wrapper(T); - self.alloc_any(Wrapper(value)).map(|r| &r.0) - } - - pub(crate) fn alloc_any_display_from_type_name( + pub(crate) fn alloc_any_debug_type_name( &self, value: T, ) -> FrozenRef<'static, T> { - #[derive(derive_more::Display, Debug)] - #[display(fmt = "{}", "std::any::type_name::()")] - struct Wrapper(T); - self.alloc_any(Wrapper(value)).map(|r| &r.0) + struct DebugValue(T); + impl Debug for DebugValue { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct(any::type_name::()) + .finish_non_exhaustive() + } + } + self.alloc_any(DebugValue(value)).map(|r| &r.0) } - fn do_alloc_any_slice_display_from_debug( + fn do_alloc_any_slice( &self, values: &[T], ) -> FrozenRef<'static, [T]> { let (_any_array, content) = self.arena.alloc_extra(any_array_avalue(values.len())); - // Drop lifetime. - let content = unsafe { transmute!(&mut [MaybeUninit], &mut [MaybeUninit], content) }; + let content = unsafe { &mut *content }; FrozenRef::new(&*maybe_uninit_write_slice_cloned(content, values)) } /// Allocate a slice in the frozen heap. - pub(crate) fn alloc_any_slice_display_from_debug( + pub(crate) fn alloc_any_slice( &self, values: &[T], ) -> FrozenRef<'static, [T]> { if values.is_empty() { FrozenRef::new(&[]) } else if values.len() == 1 { - self.alloc_any_display_from_debug(values[0].clone()) + self.alloc_any(values[0].clone()) .map(|r| slice::from_ref(r)) } else { - self.do_alloc_any_slice_display_from_debug(values) + self.do_alloc_any_slice(values) } } @@ -470,6 +486,14 @@ impl FrozenHeap { val.alloc_frozen_value(self) } + /// Allocate a value and return [`ValueOfUnchecked`] of it. + pub fn alloc_typed_unchecked( + &self, + val: T, + ) -> FrozenValueOfUnchecked<'static, T> { + FrozenValueOfUnchecked::new(val.alloc_frozen_value(self)) + } + /// Number of bytes allocated on this heap, not including any memory /// allocated outside of the starlark heap. pub fn allocated_bytes(&self) -> usize { @@ -485,11 +509,6 @@ impl FrozenHeap { pub fn allocated_summary(&self) -> HeapSummary { self.arena.allocated_summary() } - - /// Memory allocated in the arena, but not used for allocation of starlark values. - pub(crate) fn unused_capacity(&self) -> usize { - self.arena.unused_capacity() - } } /// Used to `freeze` values by [`Freeze::freeze`](crate::values::Freeze::freeze). @@ -520,21 +539,22 @@ impl Freezer { val.alloc_frozen_value(&self.heap) } - pub(crate) fn reserve<'v, 'v2: 'v, T: AValue<'v2, ExtraElem = ()>>( + pub(crate) fn reserve<'v, 'v2, T: AValue<'v2, ExtraElem = ()>>( &'v self, - ) -> (FrozenValue, Reservation<'v, 'v2, T>) { + ) -> (FrozenValue, Reservation<'v2, T>) { let (fv, r, extra) = self.reserve_with_extra::(0); + let extra = unsafe { &mut *extra }; debug_assert!(extra.is_empty()); (fv, r) } - pub(crate) fn reserve_with_extra<'v, 'v2: 'v, T: AValue<'v2>>( + pub(crate) fn reserve_with_extra<'v, 'v2, T: AValue<'v2>>( &'v self, extra_len: usize, ) -> ( FrozenValue, - Reservation<'v, 'v2, T>, - &'v mut [MaybeUninit], + Reservation<'v2, T>, + *mut [MaybeUninit], ) { let (r, extra) = self.heap.arena.reserve_with_extra::(extra_len); let fv = FrozenValue::new_ptr(unsafe { cast::ptr_lifetime(r.ptr()) }, false); @@ -550,9 +570,11 @@ impl Freezer { // Case 2: We have already been replaced with a forwarding, or need to freeze let value = value.0.unpack_ptr().unwrap(); - match value.unpack_overwrite() { - Either::Left(x) => Ok(unsafe { x.unpack_frozen_value() }), - Either::Right(v) => unsafe { v.heap_freeze(self) }, + match value.unpack() { + AValueOrForwardUnpack::Forward(x) => { + Ok(unsafe { x.forward_ptr().unpack_frozen_value() }) + } + AValueOrForwardUnpack::Header(v) => unsafe { v.unpack().heap_freeze(self) }, } } @@ -587,7 +609,10 @@ impl Heap { self.arena.borrow().available_bytes() } - fn alloc_raw<'v, 'v2: 'v2>(&'v self, x: impl AValue<'v2, ExtraElem = ()>) -> Value<'v> { + fn alloc_raw<'v, 'v2: 'v2>( + &'v self, + x: AValueImpl<'v2, impl AValue<'v2, ExtraElem = ()>>, + ) -> Value<'v> { let arena = self.arena.borrow(); let v: &AValueRepr<_> = arena.alloc(x); @@ -602,7 +627,7 @@ impl Heap { fn alloc_raw_typed<'v, A: AValue<'v, ExtraElem = ()>>( &'v self, - x: A, + x: AValueImpl<'v, A>, ) -> ValueTyped<'v, A::StarlarkValue> { unsafe { ValueTyped::new_unchecked(self.alloc_raw(x)) } } @@ -610,10 +635,11 @@ impl Heap { pub(crate) fn alloc_str_init<'v>( &'v self, len: usize, + hash: StarlarkHashValue, init: impl FnOnce(*mut u8), ) -> StringValue<'v> { let arena = self.arena.borrow(); - let v = arena.alloc_str_init(len, StarlarkStr::UNINIT_HASH, init); + let v = arena.alloc_str_init(len, hash, init); // We have an arena inside a RefCell which stores ValueMem<'v> // However, we promise not to clear the RefCell other than for GC @@ -624,17 +650,42 @@ impl Heap { } } + fn alloc_str_impl<'v>(&'v self, x: &str, hash: StarlarkHashValue) -> StringValue<'v> { + if let Some(x) = constant_string(x) { + x.to_string_value() + } else { + self.alloc_str_init(x.len(), hash, |dest| unsafe { + copy_nonoverlapping(x.as_ptr(), dest, x.len()) + }) + } + } + /// Allocate a string on the heap. pub fn alloc_str<'v>(&'v self, x: &str) -> StringValue<'v> { if let Some(x) = constant_string(x) { x.to_string_value() } else { - self.alloc_str_init(x.len(), |dest| unsafe { + self.alloc_str_init(x.len(), StarlarkStr::UNINIT_HASH, |dest| unsafe { copy_nonoverlapping(x.as_ptr(), dest, x.len()) }) } } + /// Intern string. + pub fn alloc_str_intern<'v>(&'v self, x: &str) -> StringValue<'v> { + if let Some(x) = constant_string(x) { + x.to_string_value() + } else { + let x = Hashed::new(x); + let mut interner = self.str_interner.borrow_mut(); + unsafe { + interner + .intern(x, || self.alloc_str_impl(x.key(), x.hash()).cast_lifetime()) + .cast_lifetime() + } + } + } + /// Allocate a string on the heap, based on two concatenated strings. pub fn alloc_str_concat<'v>(&'v self, x: &str, y: &str) -> StringValue<'v> { if x.is_empty() { @@ -642,7 +693,7 @@ impl Heap { } else if y.is_empty() { self.alloc_str(x) } else { - self.alloc_str_init(x.len() + y.len(), |dest| unsafe { + self.alloc_str_init(x.len() + y.len(), StarlarkStr::UNINIT_HASH, |dest| unsafe { copy_nonoverlapping(x.as_ptr(), dest, x.len()); copy_nonoverlapping(y.as_ptr(), dest.add(x.len()), y.len()) }) @@ -658,13 +709,17 @@ impl Heap { } else if z.is_empty() { self.alloc_str_concat(x, y) } else { - self.alloc_str_init(x.len() + y.len() + z.len(), |dest| unsafe { - copy_nonoverlapping(x.as_ptr(), dest, x.len()); - let dest = dest.add(x.len()); - copy_nonoverlapping(y.as_ptr(), dest, y.len()); - let dest = dest.add(y.len()); - copy_nonoverlapping(z.as_ptr(), dest, z.len()); - }) + self.alloc_str_init( + x.len() + y.len() + z.len(), + StarlarkStr::UNINIT_HASH, + |dest| unsafe { + copy_nonoverlapping(x.as_ptr(), dest, x.len()); + let dest = dest.add(x.len()); + copy_nonoverlapping(y.as_ptr(), dest, y.len()); + let dest = dest.add(y.len()); + copy_nonoverlapping(z.as_ptr(), dest, z.len()); + }, + ) } } @@ -677,6 +732,7 @@ impl Heap { unsafe { let arena = self.arena.borrow(); let (avalue, extra) = arena.alloc_extra(tuple_avalue(elems.len())); + let extra = &mut *extra; maybe_uninit_write_slice(extra, elems); Value::new_repr(&*avalue) } @@ -696,6 +752,7 @@ impl Heap { unsafe { let arena = self.arena.borrow(); let (avalue, extra) = arena.alloc_extra(tuple_avalue(lower)); + let extra = &mut *extra; maybe_uninit_write_from_exact_size_iter(extra, elems, Value::new_none()); Value::new_repr(&*avalue) } @@ -729,11 +786,25 @@ impl Heap { &'v self, elems: impl IntoIterator>, ) -> Value<'v> { + match self.try_alloc_list_iter(elems.into_iter().map(Ok)) { + Ok(value) => value, + Err(e) => { + let e: Infallible = e; + match e {} + } + } + } + + /// Allocate a list with the given elements. + pub(crate) fn try_alloc_list_iter<'v, E>( + &'v self, + elems: impl IntoIterator, E>>, + ) -> Result, E> { let elems = elems.into_iter(); let array = self.alloc_array(0); let list = self.alloc_raw_typed(list_avalue(array)); - list.0.extend(elems, self); - list.to_value() + list.0.try_extend(elems, self)?; + Ok(list.to_value()) } /// Allocate a list by concatenating two slices. @@ -794,6 +865,11 @@ impl Heap { ValueTyped::new(self.alloc(x)).expect("just allocated value must have the right type") } + /// Allocate a value and return [`ValueOfUnchecked`] of it. + pub fn alloc_typed_unchecked<'v, T: AllocValue<'v>>(&'v self, x: T) -> ValueOfUnchecked<'v, T> { + ValueOfUnchecked::new(self.alloc(x)) + } + /// Allocate a value and return [`ValueOf`] of it. pub fn alloc_value_of<'v, T>(&'v self, x: T) -> ValueOf<'v, &'v T> where @@ -802,6 +878,7 @@ impl Heap { { let value = self.alloc(x); ValueOf::unpack_value(value) + .unwrap() .expect("just allocate value must be unpackable to the type of value") } @@ -843,7 +920,7 @@ impl Heap { } pub(crate) fn record_call_enter<'v>(&'v self, function: Value<'v>) { - let time = Instant::now(); + let time = ProfilerInstant::now(); assert!(mem::needs_drop::>()); assert!(!mem::needs_drop::>()); self.alloc_complex_no_freeze(CallEnter { @@ -859,7 +936,7 @@ impl Heap { } pub(crate) fn record_call_exit<'v>(&'v self) { - let time = Instant::now(); + let time = ProfilerInstant::now(); assert!(mem::needs_drop::>()); assert!(!mem::needs_drop::>()); self.alloc_simple(CallExit { @@ -871,11 +948,6 @@ impl Heap { maybe_drop: NoDrop, }); } - - /// Memory allocated in the arena, but not used for allocation of starlark values. - pub(crate) fn unused_capacity(&self) -> usize { - self.arena.borrow().unused_capacity() - } } /// Used to perform garbage collection by [`Trace::trace`](crate::values::Trace::trace). @@ -893,26 +965,25 @@ impl<'v> Tracer<'v> { /// Helper function to annotate that this field has been considered for tracing, /// but is not relevant because it has a static lifetime containing no relevant values. /// Does nothing. - pub fn trace_static(&self, value: &mut T) { + pub fn trace_static(&self, value: &T) { // Nothing to do because T can't contain the lifetime 'v let _ = value; } - pub(crate) fn reserve<'a, 'v2: 'v + 'a, T: AValue<'v2, ExtraElem = ()>>( - &'a self, - ) -> (Value<'v>, Reservation<'a, 'v2, T>) { + pub(crate) fn reserve>(&self) -> (Value<'v>, Reservation<'v, T>) { let (v, r, extra) = self.reserve_with_extra::(0); + let extra = unsafe { &mut *extra }; debug_assert!(extra.is_empty()); (v, r) } - pub(crate) fn reserve_with_extra<'a, 'v2: 'v + 'a, T: AValue<'v2>>( - &'a self, + pub(crate) fn reserve_with_extra>( + &self, extra_len: usize, ) -> ( Value<'v>, - Reservation<'a, 'v2, T>, - &'a mut [MaybeUninit], + Reservation<'v, T>, + *mut [MaybeUninit], ) { assert!(!T::IS_STR, "strings cannot be reserved"); let (r, extra) = self.arena.reserve_with_extra::(extra_len); @@ -933,18 +1004,74 @@ impl<'v> Tracer<'v> { let old_val = value.0.unpack_ptr().unwrap(); // Case 2: We have already been replaced with a forwarding, or need to freeze - let res = match old_val.unpack_overwrite() { - Either::Left(x) => unsafe { x.unpack_unfrozen_value() }, - Either::Right(v) => unsafe { v.heap_copy(self) }, + let res = match old_val.unpack() { + AValueOrForwardUnpack::Forward(x) => unsafe { x.forward_ptr().unpack_unfrozen_value() }, + AValueOrForwardUnpack::Header(v) => unsafe { v.unpack().heap_copy(self) }, }; res } } -#[test] -fn test_send_sync() -where - FrozenHeapRef: Send + Sync, -{ +#[cfg(test)] +mod tests { + use starlark_derive::starlark_module; + + use super::FrozenHeapRef; + use super::Heap; + use crate as starlark; + use crate::assert::Assert; + use crate::environment::GlobalsBuilder; + use crate::values::StringValue; + + #[test] + fn test_send_sync() + where + FrozenHeapRef: Send + Sync, + { + } + + #[test] + fn test_string_reallocated_on_heap() { + let heap = Heap::new(); + let first = heap.alloc_str("xx"); + let second = heap.alloc_str("xx"); + assert!( + !first.to_value().ptr_eq(second.to_value()), + "Plain allocations should recreate values. Note assertion negation." + ); + } + + #[test] + fn test_interned_string_equal() { + let heap = Heap::new(); + let first = heap.alloc_str_intern("xx"); + let second = heap.alloc_str_intern("xx"); + assert!( + first.to_value().ptr_eq(second.to_value()), + "Interned allocations should be equal." + ); + } + + #[starlark_module] + fn validate_str_interning(globals: &mut GlobalsBuilder) { + fn append_x<'v>(str: StringValue<'v>, heap: &'v Heap) -> anyhow::Result> { + Ok(heap.alloc_str_intern(&(str.as_str().to_owned() + "x"))) + } + } + + #[test] + fn test_interned_str_starlark() { + let mut a = Assert::new(); + a.globals_add(validate_str_interning); + + a.pass( + r#" +x = append_x("foo") +assert_eq(x, "foox") +garbage_collect() +assert_eq(x, "foox") + "#, + ); + } } diff --git a/starlark-rust/starlark/src/values/layout/heap/profile.rs b/starlark-rust/starlark/src/values/layout/heap/profile.rs new file mode 100644 index 0000000000000..8469f2a6616e9 --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/heap/profile.rs @@ -0,0 +1,24 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Summary of heap allocations and function times with stacks. + +pub(crate) mod aggregated; +pub(crate) mod alloc_counts; +pub(crate) mod by_type; +pub(crate) mod string_index; +mod summary_by_function; diff --git a/starlark-rust/starlark/src/values/layout/heap/profile/aggregated.rs b/starlark-rust/starlark/src/values/layout/heap/profile/aggregated.rs index eeae914fd663e..30e061276077a 100644 --- a/starlark-rust/starlark/src/values/layout/heap/profile/aggregated.rs +++ b/starlark-rust/starlark/src/values/layout/heap/profile/aggregated.rs @@ -22,9 +22,6 @@ use std::fmt; use std::fmt::Debug; use std::fmt::Formatter; use std::rc::Rc; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::time::Instant; use allocative::Allocative; use dupe::Dupe; @@ -34,12 +31,13 @@ use crate::eval::runtime::profile::data::ProfileDataImpl; use crate::eval::runtime::profile::flamegraph::FlameGraphData; use crate::eval::runtime::profile::flamegraph::FlameGraphNode; use crate::eval::runtime::profile::heap::RetainedHeapProfileMode; +use crate::eval::runtime::profile::instant::ProfilerInstant; use crate::eval::runtime::small_duration::SmallDuration; use crate::eval::ProfileData; +use crate::util::arc_str::ArcStr; use crate::values::layout::heap::arena::ArenaVisitor; use crate::values::layout::heap::heap_type::HeapKind; use crate::values::layout::heap::profile::alloc_counts::AllocCounts; -use crate::values::layout::heap::profile::arc_str::ArcStr; use crate::values::layout::heap::profile::by_type::HeapSummary; use crate::values::layout::heap::profile::string_index::StringId; use crate::values::layout::heap::profile::string_index::StringIndex; @@ -126,7 +124,7 @@ impl StackFrameBuilder { /// An accumulator for stack frames that lets us visit the heap. pub(crate) struct StackCollector { /// Timestamp of last call enter or exit. - last_time: Option, + last_time: Option, ids: FunctionIds, current: Vec, /// What we are collecting. @@ -147,6 +145,10 @@ impl StackCollector { } impl<'v> ArenaVisitor<'v> for StackCollector { + fn enter_bump(&mut self) { + self.last_time = None; + } + fn regular_value(&mut self, value: &'v AValueOrForward) { let value = match (value.unpack(), self.retained) { (AValueOrForwardUnpack::Header(header), None) => unsafe { @@ -175,10 +177,10 @@ impl<'v> ArenaVisitor<'v> for StackCollector { ); } - fn call_enter(&mut self, function: Value<'v>, time: Instant) { + fn call_enter(&mut self, function: Value<'v>, time: ProfilerInstant) { if let Some(last_time) = self.last_time { self.current.last_mut().unwrap().0.borrow_mut().time_x2 += - time.saturating_duration_since(last_time); + time.duration_since(last_time); self.current.last_mut().unwrap().0.borrow_mut().calls_x2 += 1; } @@ -195,10 +197,10 @@ impl<'v> ArenaVisitor<'v> for StackCollector { self.last_time = Some(time) } - fn call_exit(&mut self, time: Instant) { + fn call_exit(&mut self, time: ProfilerInstant) { if let Some(last_time) = self.last_time { self.current.last_mut().unwrap().0.borrow_mut().time_x2 += - time.saturating_duration_since(last_time); + time.duration_since(last_time); } self.current.pop().unwrap(); self.last_time = Some(time); @@ -256,6 +258,14 @@ impl StackFrame { calls_x2, } } + + #[cfg(test)] + pub(crate) fn normalize_for_golden_tests(&mut self) { + for (_, v) in &mut self.callees { + v.normalize_for_golden_tests(); + } + self.allocs.normalize_for_golden_tests(); + } } struct StackFrameWithContext<'c> { @@ -289,41 +299,15 @@ impl<'c> StackFrameWithContext<'c> { } } -/// `Clone` wrapper. -#[derive(Default, Allocative)] -pub(crate) struct UnusedCapacity(AtomicUsize); - -impl Clone for UnusedCapacity { - fn clone(&self) -> Self { - UnusedCapacity(AtomicUsize::new(self.0.load(Ordering::Relaxed))) - } -} - -impl UnusedCapacity { - pub(crate) fn new(value: usize) -> UnusedCapacity { - UnusedCapacity(AtomicUsize::new(value)) - } - - pub(crate) fn get(&self) -> usize { - self.0.load(Ordering::Relaxed) - } - - pub(crate) fn set(&self, value: usize) { - self.0.store(value, Ordering::Relaxed); - } -} - /// Aggregated heap profiling data when heap profiling is enabled. /// /// Can be: /// * written as CSV or flamegraph /// * merged with another data #[derive(Clone, Allocative)] -pub struct AggregateHeapProfileInfo { +pub(crate) struct AggregateHeapProfileInfo { pub(crate) strings: StringIndex, pub(crate) root: StackFrame, - /// Memory allocated in bump, but unused. - pub(crate) unused_capacity: UnusedCapacity, } impl Debug for AggregateHeapProfileInfo { @@ -339,7 +323,6 @@ impl Default for AggregateHeapProfileInfo { AggregateHeapProfileInfo { root: StackFrame::default(), strings, - unused_capacity: UnusedCapacity::default(), } } } @@ -351,16 +334,9 @@ impl AggregateHeapProfileInfo { heap.visit_arena(HeapKind::Unfrozen, &mut collector); } assert_eq!(1, collector.current.len()); - let unused_capacity = if retained.is_some() { - // Filled later. - UnusedCapacity::default() - } else { - UnusedCapacity::new(heap.unused_capacity()) - }; AggregateHeapProfileInfo { strings: collector.ids.strings, root: collector.current.pop().unwrap().build(), - unused_capacity, } } @@ -378,24 +354,15 @@ impl AggregateHeapProfileInfo { let profiles: Vec<_> = Vec::from_iter(profiles); let mut strings = StringIndex::default(); - let unused_capacity = - UnusedCapacity::new(profiles.iter().map(|p| p.unused_capacity.get()).sum()); let roots = profiles.into_iter().map(|p| p.root()); let root = StackFrame::merge(roots, &mut strings); - AggregateHeapProfileInfo { - strings, - root, - unused_capacity, - } + AggregateHeapProfileInfo { strings, root } } /// Write this out recursively to a file. pub fn gen_flame_graph(&self) -> String { let mut data = FlameGraphData::default(); self.root().write_flame_graph(data.root()); - data.root() - .child(ArcStr::new_static("unused_capacity")) - .add(self.unused_capacity.get() as u64); data.write() } @@ -403,6 +370,11 @@ impl AggregateHeapProfileInfo { pub fn gen_summary_csv(&self) -> String { HeapSummaryByFunction::init(self).gen_csv() } + + #[cfg(test)] + pub(crate) fn normalize_for_golden_tests(&mut self) { + self.root.normalize_for_golden_tests(); + } } #[derive(Debug, Allocative)] @@ -414,8 +386,14 @@ pub(crate) struct RetainedHeapProfile { impl RetainedHeapProfile { pub(crate) fn to_profile(&self) -> ProfileData { ProfileData { - profile: ProfileDataImpl::AggregateHeapProfileInfo(Box::new(self.info.clone())), - profile_mode: self.mode.to_profile_mode(), + profile: match self.mode { + RetainedHeapProfileMode::Flame => { + ProfileDataImpl::HeapFlameRetained(Box::new(self.info.clone())) + } + RetainedHeapProfileMode::Summary => { + ProfileDataImpl::HeapSummaryRetained(Box::new(self.info.clone())) + } + }, } } } diff --git a/starlark-rust/starlark/src/values/layout/heap/profile/alloc_counts.rs b/starlark-rust/starlark/src/values/layout/heap/profile/alloc_counts.rs index f7c5fcddc33fc..487027587e03c 100644 --- a/starlark-rust/starlark/src/values/layout/heap/profile/alloc_counts.rs +++ b/starlark-rust/starlark/src/values/layout/heap/profile/alloc_counts.rs @@ -29,6 +29,14 @@ pub(crate) struct AllocCounts { pub(crate) count: usize, } +impl AllocCounts { + #[cfg(test)] + pub(crate) fn normalize_for_golden_tests(&mut self) { + // Value sizes depend on compiler version, so normalize them. + self.bytes = self.count * 8; + } +} + impl AddAssign for AllocCounts { fn add_assign(&mut self, other: AllocCounts) { self.bytes += other.bytes; diff --git a/starlark-rust/starlark/src/values/layout/heap/profile/arc_str.rs b/starlark-rust/starlark/src/values/layout/heap/profile/arc_str.rs deleted file mode 100644 index d7c3cdca87eeb..0000000000000 --- a/starlark-rust/starlark/src/values/layout/heap/profile/arc_str.rs +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::borrow::Borrow; -use std::hash::Hash; -use std::hash::Hasher; -use std::ops::Deref; -use std::sync::Arc; - -use allocative::Allocative; -use dupe::Dupe; - -#[derive(Clone, Dupe, Debug, Allocative)] -enum Inner { - Arc(Arc), - Static(&'static str), -} - -/// Wrapper for `Arc`. -#[derive(Clone, Dupe, Debug, derive_more::Display, Allocative)] -#[display(fmt = "{}", "&**self")] -pub(crate) struct ArcStr(Inner); - -impl ArcStr { - pub(crate) fn new_static(s: &'static str) -> ArcStr { - ArcStr(Inner::Static(s)) - } - - pub(crate) fn as_str(&self) -> &str { - match &self.0 { - Inner::Arc(s) => s, - Inner::Static(s) => s, - } - } -} - -impl PartialEq for ArcStr { - fn eq(&self, other: &Self) -> bool { - self.as_str() == other.as_str() - } -} - -impl Eq for ArcStr {} - -impl PartialOrd for ArcStr { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for ArcStr { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.as_str().cmp(other.as_str()) - } -} - -impl Hash for ArcStr { - fn hash(&self, state: &mut H) { - self.as_str().hash(state) - } -} - -impl Deref for ArcStr { - type Target = str; - - fn deref(&self) -> &str { - self.as_str() - } -} - -impl Borrow for ArcStr { - fn borrow(&self) -> &str { - self - } -} - -impl<'a> From<&'a str> for ArcStr { - fn from(s: &'a str) -> Self { - if s.is_empty() { - ArcStr(Inner::Static("")) - } else { - ArcStr(Inner::Arc(Arc::from(s))) - } - } -} diff --git a/starlark-rust/starlark/src/values/layout/heap/profile/by_type.rs b/starlark-rust/starlark/src/values/layout/heap/profile/by_type.rs index b4fb743e78afc..d69f9bd608ed8 100644 --- a/starlark-rust/starlark/src/values/layout/heap/profile/by_type.rs +++ b/starlark-rust/starlark/src/values/layout/heap/profile/by_type.rs @@ -64,4 +64,11 @@ impl HeapSummary { } HeapSummary { summary } } + + #[cfg(test)] + pub(crate) fn normalize_for_golden_tests(&mut self) { + for v in self.summary.values_mut() { + v.normalize_for_golden_tests(); + } + } } diff --git a/starlark-rust/starlark/src/values/layout/heap/profile/mod.rs b/starlark-rust/starlark/src/values/layout/heap/profile/mod.rs deleted file mode 100644 index ec3f219b40530..0000000000000 --- a/starlark-rust/starlark/src/values/layout/heap/profile/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Summary of heap allocations and function times with stacks. - -pub(crate) mod aggregated; -pub(crate) mod alloc_counts; -pub(crate) mod arc_str; -pub(crate) mod by_type; -pub(crate) mod string_index; -mod summary_by_function; diff --git a/starlark-rust/starlark/src/values/layout/heap/profile/string_index.rs b/starlark-rust/starlark/src/values/layout/heap/profile/string_index.rs index 2769dc32169c5..67da0c1dbfd28 100644 --- a/starlark-rust/starlark/src/values/layout/heap/profile/string_index.rs +++ b/starlark-rust/starlark/src/values/layout/heap/profile/string_index.rs @@ -19,7 +19,7 @@ use allocative::Allocative; use dupe::Dupe; use starlark_map::small_set::SmallSet; -use crate::values::layout::heap::profile::arc_str::ArcStr; +use crate::util::arc_str::ArcStr; /// Map strings to integers 0, 1, 2, ... #[derive(Default, Clone, Allocative)] diff --git a/starlark-rust/starlark/src/values/layout/heap/profile/summary_by_function.rs b/starlark-rust/starlark/src/values/layout/heap/profile/summary_by_function.rs index 2d3e5b4155277..ee3837bc9a229 100644 --- a/starlark-rust/starlark/src/values/layout/heap/profile/summary_by_function.rs +++ b/starlark-rust/starlark/src/values/layout/heap/profile/summary_by_function.rs @@ -20,10 +20,10 @@ use starlark_map::small_map::SmallMap; use crate::eval::runtime::profile::csv::CsvWriter; use crate::eval::runtime::small_duration::SmallDuration; +use crate::util::arc_str::ArcStr; use crate::values::layout::heap::profile::aggregated::AggregateHeapProfileInfo; use crate::values::layout::heap::profile::aggregated::StackFrame; use crate::values::layout::heap::profile::alloc_counts::AllocCounts; -use crate::values::layout::heap::profile::arc_str::ArcStr; use crate::values::layout::heap::profile::string_index::StringId; use crate::values::layout::heap::profile::string_index::StringIndex; @@ -77,15 +77,12 @@ impl FuncInfo { pub(crate) struct HeapSummaryByFunction { /// Information about all functions. info: SmallMap, - /// Allocated but unused memory. - unused_capacity: usize, } impl HeapSummaryByFunction { pub(crate) fn init(stacks: &AggregateHeapProfileInfo) -> HeapSummaryByFunction { let mut info = HeapSummaryByFunction { info: SmallMap::new(), - unused_capacity: stacks.unused_capacity.get(), }; info.init_children(&stacks.root, &ArcStr::new_static("(root)"), &stacks.strings); info @@ -155,32 +152,15 @@ impl HeapSummaryByFunction { let mut info = self.info(); info.sort_by_key(|x| -(x.1.time.nanos as i128)); - let unused_capacity = FuncInfo { - calls: 0, - callers: SmallMap::new(), - time: SmallDuration::default(), - time_rec: SmallDuration::default(), - alloc: SmallMap::new(), - }; - enum RowKind { Total, - UnusedCapacity, Func, } let totals_str = ArcStr::new_static("TOTALS"); - let unused_capacity_str = ArcStr::new_static("UNUSED CAPACITY"); - let info = [ - (&totals_str, &totals, RowKind::Total), - ( - &unused_capacity_str, - &unused_capacity, - RowKind::UnusedCapacity, - ), - ] - .into_iter() - .chain(info.into_iter().map(|(k, v)| (k, v, RowKind::Func))); + let info = [(&totals_str, &totals, RowKind::Total)] + .into_iter() + .chain(info.into_iter().map(|(k, v)| (k, v, RowKind::Func))); let mut csv = CsvWriter::new( [ @@ -198,7 +178,7 @@ impl HeapSummaryByFunction { .copied() .chain(columns.iter().map(|c| c.0)), ); - for (rowname, info, row_kind) in info { + for (rowname, info, _row_kind) in info { let blank = ArcStr::new_static(""); let callers = info .callers @@ -218,16 +198,8 @@ impl HeapSummaryByFunction { csv.write_value(info.callers.len()); csv.write_value(callers.0.as_str()); csv.write_value(callers.1); - match row_kind { - RowKind::UnusedCapacity => { - csv.write_value(1); - csv.write_value(self.unused_capacity); - } - _ => { - csv.write_value(info.alloc_count()); - csv.write_value(info.alloc_bytes()); - } - } + csv.write_value(info.alloc_count()); + csv.write_value(info.alloc_bytes()); for c in &columns { csv.write_value(info.alloc.get(c.0).unwrap_or(&AllocCounts::default()).count); } @@ -241,8 +213,8 @@ impl HeapSummaryByFunction { mod tests { use crate::environment::Globals; use crate::environment::Module; + use crate::eval::runtime::profile::mode::ProfileMode; use crate::eval::Evaluator; - use crate::eval::ProfileMode; use crate::syntax::AstModule; use crate::syntax::Dialect; use crate::values::layout::heap::profile::aggregated::AggregateHeapProfileInfo; @@ -259,7 +231,7 @@ _ignore = {1: 2} # allocate a dict in drop _ignore = str([1]) # allocate a string in non_drop " .to_owned(), - &Dialect::Extended, + &Dialect::AllOptionsInternal, ) .unwrap(); @@ -278,8 +250,6 @@ _ignore = str([1]) # allocate a string in non_drop // Run the assertions. info.gen_csv(); - assert!(info.unused_capacity > 0); - let total = FuncInfo::merge(info.info.values()); // from non-drop heap assert_eq!(total.alloc.get("string").unwrap().count, 1); diff --git a/starlark-rust/starlark/src/values/layout/heap/repr.rs b/starlark-rust/starlark/src/values/layout/heap/repr.rs index d34ab99502741..7eeb8947131df 100644 --- a/starlark-rust/starlark/src/values/layout/heap/repr.rs +++ b/starlark-rust/starlark/src/values/layout/heap/repr.rs @@ -21,7 +21,6 @@ use std::mem::ManuallyDrop; use std::ptr; use dupe::Dupe; -use either::Either; use crate::any::AnyLifetime; use crate::cast; @@ -77,11 +76,22 @@ pub(crate) struct AValueRepr { pub(crate) struct ForwardPtr(usize); impl ForwardPtr { - pub(crate) fn new(ptr: usize) -> ForwardPtr { + fn new(ptr: usize) -> ForwardPtr { debug_assert!(ptr & 1 == 0); ForwardPtr(ptr) } + /// Create a forward pointer to a frozen value. This is used during heap freeze. + pub(crate) fn new_frozen(value: FrozenValue) -> ForwardPtr { + ForwardPtr::new(value.0.raw().ptr_value()) + } + + /// Create a forward pointer to an unfrozen value. This is used during heap GC. + pub(crate) fn new_unfrozen(value: Value) -> ForwardPtr { + debug_assert!(value.unpack_frozen().is_none()); + ForwardPtr::new(value.0.raw().ptr_value() & !1) + } + /// It's caller responsibility to ensure that forward pointer points to a frozen value. pub(crate) unsafe fn unpack_frozen_value(self) -> FrozenValue { FrozenValue::new_ptr_usize_with_str_tag(self.0) @@ -102,6 +112,7 @@ impl ForwardPtr { /// This is object written over [`AValueRepr`] during GC. #[repr(C)] +#[derive(Debug)] pub(crate) struct AValueForward { /// Moved object pointer with lowest bit set. forward_ptr: usize, @@ -149,14 +160,6 @@ impl AValueOrForward { } } - /// Unpack something that might have been overwritten. - pub(crate) fn unpack_overwrite<'v>(&'v self) -> Either> { - match self.unpack() { - AValueOrForwardUnpack::Header(header) => Either::Right(header.unpack()), - AValueOrForwardUnpack::Forward(forward) => Either::Left(forward.forward_ptr()), - } - } - #[inline] pub(crate) unsafe fn unpack_header_unchecked(&self) -> &AValueHeader { debug_assert!(!self.is_forward()); @@ -257,7 +260,7 @@ impl AValueHeader { /// After performing the overwrite any existing pointers to this value /// are corrupted. - pub unsafe fn overwrite_with_forward<'v, T: AValue<'v>>( + pub unsafe fn overwrite_with_forward<'v, T: StarlarkValue<'v>>( me: *mut AValueRepr, forward_ptr: ForwardPtr, ) -> T { @@ -272,12 +275,7 @@ impl AValueHeader { /// Cast header pointer to repr pointer. #[inline] - pub(crate) unsafe fn as_repr<'v, A: AValue<'v>>(&self) -> &AValueRepr { - &*(self.as_repr_v::() as *const _ as *const _) - } - - #[inline] - pub(crate) unsafe fn as_repr_v<'v, T: StarlarkValue<'v>>(&self) -> &AValueRepr { + pub(crate) unsafe fn as_repr<'v, T: StarlarkValue<'v>>(&self) -> &AValueRepr { debug_assert_eq!(T::static_type_id(), self.0.static_type_of_value.get()); &*(self as *const AValueHeader as *const AValueRepr) } diff --git a/starlark-rust/starlark/src/values/layout/mod.rs b/starlark-rust/starlark/src/values/layout/mod.rs deleted file mode 100644 index 8678341be2bed..0000000000000 --- a/starlark-rust/starlark/src/values/layout/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Possible optimisations: -// Encoding none, bool etc in the pointer of frozen value - -pub(crate) mod aligned_size; -pub(crate) mod avalue; -pub(crate) mod complex; -mod const_frozen_string; -pub(crate) mod const_type_id; -pub(crate) mod heap; -pub(crate) mod identity; -pub(crate) mod pointer; -pub(crate) mod static_string; -pub(crate) mod typed; -pub(crate) mod value; -pub(crate) mod value_alloc_size; -pub(crate) mod value_captured; -pub(crate) mod value_not_special; -pub(crate) mod vtable; diff --git a/starlark-rust/starlark/src/values/layout/pointer.rs b/starlark-rust/starlark/src/values/layout/pointer.rs index a6248d4f02b4c..27b3da8176eb9 100644 --- a/starlark-rust/starlark/src/values/layout/pointer.rs +++ b/starlark-rust/starlark/src/values/layout/pointer.rs @@ -36,10 +36,10 @@ use either::Either; use static_assertions::assert_eq_size; use crate::cast; -use crate::values::int::PointerI32; +use crate::values::int::pointer_i32::PointerI32; use crate::values::layout::heap::repr::AValueHeader; use crate::values::layout::heap::repr::AValueOrForward; -use crate::values::types::inline_int::InlineInt; +use crate::values::types::int::inline_int::InlineInt; /// Tagged pointer logically equivalent to `*mut AValueHeader`. #[derive(Clone, Copy, Dupe, PartialEq, Eq, Hash, Allocative)] @@ -57,7 +57,12 @@ impl RawPointer { #[inline] pub(crate) unsafe fn new_unchecked(ptr: usize) -> RawPointer { debug_assert!(ptr != 0); - RawPointer(NonZeroUsize::new_unchecked(ptr)) + let ptr = RawPointer(NonZeroUsize::new_unchecked(ptr)); + + // Run debug assertions. + let _ignore = PointerTags::from_pointer(ptr); + + ptr } #[inline] @@ -88,19 +93,24 @@ impl RawPointer { self.0.get() } + #[inline] + pub(crate) fn tags(self) -> PointerTags { + PointerTags::from_pointer(self) + } + #[inline] pub(crate) fn is_str(self) -> bool { - (self.0.get() & TAG_STR) != 0 + self.tags().is_str() } #[inline] pub(crate) fn is_int(self) -> bool { - (self.0.get() & TAG_INT) != 0 + self.tags().is_int() } #[inline] pub(crate) fn is_unfrozen(self) -> bool { - (self.0.get() & TAG_UNFROZEN) != 0 + self.tags().is_unfrozen() } #[inline] @@ -176,6 +186,68 @@ const TAG_STR: usize = 0b100; // Note, an object can be changed from unfrozen to frozen, not vice versa. const TAG_UNFROZEN: usize = 0b001; +/// All possible tag values, three least significant bits of a pointer. +#[repr(usize)] +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum PointerTags { + Int = TAG_INT, + StrUnfrozen = TAG_STR | TAG_UNFROZEN, + StrFrozen = TAG_STR, + OtherUnfrozen = TAG_UNFROZEN, + OtherFrozen = 0, +} + +impl PointerTags { + #[inline] + unsafe fn from_usize_unchecked(x: usize) -> Self { + debug_assert!( + x == PointerTags::Int as usize + || x == PointerTags::StrUnfrozen as usize + || x == PointerTags::StrFrozen as usize + || x == PointerTags::OtherUnfrozen as usize + || x == PointerTags::OtherFrozen as usize + ); + unsafe { mem::transmute(x) } + } + + #[inline] + fn from_pointer(ptr: RawPointer) -> Self { + unsafe { Self::from_usize_unchecked(ptr.0.get() & TAG_MASK) } + } + + #[inline] + fn to_usize(self) -> usize { + self as usize + } + + /// String value, frozen or not. + #[inline] + fn is_str(self) -> bool { + self.to_usize() & TAG_STR != 0 + } + + /// Inline integer. + #[inline] + fn is_int(self) -> bool { + self == PointerTags::Int + } + + /// Not frozen, not an integer. + #[inline] + fn is_unfrozen(self) -> bool { + self.to_usize() & TAG_UNFROZEN != 0 + } +} + +/// All possible tag values for frozen pointers, three least significant bits of a pointer. +#[repr(usize)] +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +enum _FrozenPointerTags { + Int = TAG_INT, + Str = TAG_STR, + Other = 0, +} + /// `InlineInt` is shift by this number of bits to the left to be stored in a pointer. const INT_SHIFT: usize = mem::size_of::() * 8 - InlineInt::BITS; const INT_DATA_MASK: usize = ((1usize << InlineInt::BITS) - 1) << INT_SHIFT; @@ -203,7 +275,7 @@ impl<'p> Pointer<'p> { } #[inline] - pub fn new_unfrozen(x: &'p AValueHeader, is_string: bool) -> Self { + pub(crate) fn new_unfrozen(x: &'p AValueHeader, is_string: bool) -> Self { unsafe { Self::new(RawPointer::new_unfrozen(x, is_string)) } } @@ -213,12 +285,12 @@ impl<'p> Pointer<'p> { } #[inline] - pub fn is_unfrozen(self) -> bool { + pub(crate) fn is_unfrozen(self) -> bool { self.ptr.is_unfrozen() } #[inline] - pub fn unpack(self) -> Either<&'p AValueOrForward, &'static PointerI32> { + pub(crate) fn unpack(self) -> Either<&'p AValueOrForward, &'static PointerI32> { if !self.ptr.is_int() { Either::Left(unsafe { self.ptr.unpack_ptr_no_int_unchecked() }) } else { @@ -227,15 +299,14 @@ impl<'p> Pointer<'p> { } #[inline] - pub fn unpack_int(self) -> Option { + pub(crate) fn unpack_int(self) -> Option { self.ptr.unpack_int() } #[inline] - pub fn unpack_ptr(self) -> Option<&'p AValueOrForward> { - let p = self.ptr.0.get(); - if p & TAG_INT == 0 { - Some(unsafe { untag_pointer(p) }) + pub(crate) fn unpack_ptr(self) -> Option<&'p AValueOrForward> { + if !self.ptr.is_int() { + Some(unsafe { untag_pointer(self.ptr.0.get()) }) } else { None } @@ -245,7 +316,7 @@ impl<'p> Pointer<'p> { #[inline] pub(crate) unsafe fn unpack_ptr_no_int_unchecked(self) -> &'p AValueOrForward { let p = self.ptr.0.get(); - debug_assert!(p & TAG_INT == 0); + debug_assert!(!self.ptr.is_int()); untag_pointer(p) } @@ -256,17 +327,17 @@ impl<'p> Pointer<'p> { } #[inline] - pub fn ptr_eq(self, other: Pointer<'_>) -> bool { + pub(crate) fn ptr_eq(self, other: Pointer<'_>) -> bool { self.ptr == other.ptr } #[inline] - pub fn raw(self) -> RawPointer { + pub(crate) fn raw(self) -> RawPointer { self.ptr } #[inline] - pub unsafe fn cast_lifetime<'p2>(self) -> Pointer<'p2> { + pub(crate) unsafe fn cast_lifetime<'p2>(self) -> Pointer<'p2> { Pointer { ptr: self.ptr, _phantom: PhantomData, @@ -290,7 +361,7 @@ impl<'p> FrozenPointer<'p> { } #[inline] - pub fn new_frozen_usize_with_str_tag(x: usize) -> Self { + pub(crate) fn new_frozen_usize_with_str_tag(x: usize) -> Self { debug_assert!((x & TAG_MASK & !TAG_STR) == 0); unsafe { Self::new(RawPointer::new_unchecked(x)) } } @@ -302,10 +373,7 @@ impl<'p> FrozenPointer<'p> { #[inline] pub(crate) fn new_int(x: InlineInt) -> Self { - FrozenPointer { - ptr: RawPointer::new_int(x), - phantom: PhantomData, - } + unsafe { Self::new(RawPointer::new_int(x)) } } /// It is safe to bitcast `FrozenPointer` to `Pointer` @@ -336,12 +404,11 @@ impl<'p> FrozenPointer<'p> { self.ptr.unpack_pointer_i32_unchecked() } - /// Unpack pointer when it is known to be not an integer, not a string, and not frozen. + /// Unpack pointer when it is known to be frozen, not an integer, not a string. #[inline] pub(crate) unsafe fn unpack_ptr_no_int_no_str_unchecked(self) -> &'p AValueOrForward { - let p = self.ptr.0.get(); - debug_assert!(p & TAG_MASK == 0); - cast::usize_to_ptr(p) + debug_assert!(self.ptr.tags() == PointerTags::OtherFrozen); + cast::usize_to_ptr(self.ptr.0.get()) } } diff --git a/starlark-rust/starlark/src/values/layout/static_string.rs b/starlark-rust/starlark/src/values/layout/static_string.rs index d8dcc1190cd0b..6a0c62ea87d25 100644 --- a/starlark-rust/starlark/src/values/layout/static_string.rs +++ b/starlark-rust/starlark/src/values/layout/static_string.rs @@ -22,8 +22,8 @@ use std::sync::atomic::AtomicU32; use crate::values::layout::avalue::VALUE_STR_A_VALUE_PTR; use crate::values::layout::heap::repr::AValueRepr; -use crate::values::string::StarlarkStr; -use crate::values::string::StarlarkStrN; +use crate::values::string::str_type::StarlarkStr; +use crate::values::string::str_type::StarlarkStrN; use crate::values::FrozenStringValue; use crate::values::FrozenValue; diff --git a/starlark-rust/starlark/src/values/layout/typed.rs b/starlark-rust/starlark/src/values/layout/typed.rs new file mode 100644 index 0000000000000..0386a688b826d --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/typed.rs @@ -0,0 +1,507 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod string; + +use std::convert::Infallible; +use std::fmt; +use std::fmt::Debug; +use std::fmt::Display; +use std::fmt::Formatter; +use std::marker; +use std::ops::Deref; + +use allocative::Allocative; +use dupe::Clone_; +use dupe::Copy_; +use dupe::Dupe_; +use serde::Serialize; +use starlark_map::Hashed; + +use crate as starlark; +use crate::any::AnyLifetime; +use crate::any::ProvidesStaticType; +use crate::cast; +use crate::cast::transmute; +use crate::coerce::Coerce; +use crate::coerce::CoerceKey; +use crate::typing::Ty; +use crate::values::alloc_value::AllocFrozenStringValue; +use crate::values::alloc_value::AllocStringValue; +use crate::values::int::pointer_i32::PointerI32; +use crate::values::layout::avalue::AValue; +use crate::values::layout::avalue::AValueImpl; +use crate::values::layout::heap::repr::AValueRepr; +use crate::values::starlark_type_id::StarlarkTypeId; +use crate::values::string::str_type::StarlarkStr; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::Freeze; +use crate::values::Freezer; +use crate::values::FrozenHeap; +use crate::values::FrozenRef; +use crate::values::FrozenStringValue; +use crate::values::FrozenValue; +use crate::values::FrozenValueOfUnchecked; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::StringValue; +use crate::values::StringValueLike; +use crate::values::Trace; +use crate::values::Tracer; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueLike; +use crate::values::ValueOfUnchecked; + +/// [`Value`] wrapper which asserts contained value is of type ``. +#[derive(Copy_, Clone_, Dupe_, ProvidesStaticType, Allocative)] +#[allocative(skip)] // Heap owns the value. +pub struct ValueTyped<'v, T: StarlarkValue<'v>>(Value<'v>, marker::PhantomData<&'v T>); +/// [`FrozenValue`] wrapper which asserts contained value is of type ``. +#[derive(Copy_, Clone_, Dupe_, ProvidesStaticType, Allocative)] +#[allocative(skip)] // Heap owns the value. +pub struct FrozenValueTyped<'v, T: StarlarkValue<'v>>(FrozenValue, marker::PhantomData<&'v T>); + +unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for ValueTyped<'v, T> {} +unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> for ValueTyped<'v, T> {} +unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for ValueTyped<'v, T> {} +unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> for ValueTyped<'v, T> {} +unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for FrozenValueTyped<'v, T> {} +unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> + for FrozenValueTyped<'v, T> +{ +} +unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for FrozenValueTyped<'v, T> {} +unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> for FrozenValueTyped<'v, T> {} + +unsafe impl<'v, 'f, T: StarlarkValue<'f>> Trace<'v> for FrozenValueTyped<'f, T> { + fn trace(&mut self, _tracer: &Tracer<'v>) {} +} + +impl> Freeze for FrozenValueTyped<'static, T> { + type Frozen = Self; + + fn freeze(self, _freezer: &Freezer) -> anyhow::Result { + Ok(self) + } +} + +impl<'v, T: StarlarkValue<'v>> Debug for ValueTyped<'v, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("ValueTyped").field(&self.0).finish() + } +} + +impl<'v, T: StarlarkValue<'v>> Debug for FrozenValueTyped<'v, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("FrozenValueTyped").field(&self.0).finish() + } +} + +impl<'v, T: StarlarkValue<'v>> Display for ValueTyped<'v, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl<'v, T: StarlarkValue<'v>> Display for FrozenValueTyped<'v, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl<'v, T: StarlarkValue<'v>> Serialize for ValueTyped<'v, T> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0.serialize(serializer) + } +} + +impl<'v, T: StarlarkValue<'v>> Serialize for FrozenValueTyped<'v, T> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0.serialize(serializer) + } +} + +impl<'v, T: StarlarkValue<'v>> PartialEq for ValueTyped<'v, T> { + fn eq(&self, other: &Self) -> bool { + // Poor man specialization. + if T::static_type_id() == StarlarkStr::static_type_id() { + // SAFETY: just checked type ids. + let (this, other) = unsafe { + ( + StringValue::new_unchecked(self.0), + StringValue::new_unchecked(other.0), + ) + }; + this.0.ptr_eq(other.0) || StarlarkStr::eq(this.as_ref(), other.as_ref()) + } else { + // Slow comparison with virtual call. + self.0 == other.0 + } + } +} + +impl<'v, T: StarlarkValue<'v>> Eq for ValueTyped<'v, T> {} + +impl<'v, T: StarlarkValue<'v>> PartialEq for FrozenValueTyped<'v, T> { + fn eq(&self, other: &Self) -> bool { + self.to_value_typed() == other.to_value_typed() + } +} + +impl<'v, T: StarlarkValue<'v>> Eq for FrozenValueTyped<'v, T> {} + +impl<'v, T: StarlarkValue<'v>> ValueTyped<'v, T> { + /// Downcast. + #[inline] + pub fn new(value: Value<'v>) -> Option> { + value.downcast_ref::()?; + Some(ValueTyped(value, marker::PhantomData)) + } + + /// Downcast. + #[inline] + pub fn new_err(value: Value<'v>) -> anyhow::Result> { + value.downcast_ref_err::()?; + Ok(ValueTyped(value, marker::PhantomData)) + } + + /// Construct typed value without checking the value is of type ``. + #[inline] + pub unsafe fn new_unchecked(value: Value<'v>) -> ValueTyped<'v, T> { + debug_assert!(value.downcast_ref::().is_some()); + ValueTyped(value, marker::PhantomData) + } + + #[inline] + pub(crate) fn new_repr>( + repr: &'v AValueRepr>, + ) -> ValueTyped<'v, T> { + ValueTyped(Value::new_repr(repr), marker::PhantomData) + } + + /// Erase the type. + #[inline] + pub fn to_value(self) -> Value<'v> { + self.0 + } + + /// Get the reference to the pointed value. + #[inline] + pub fn as_ref(self) -> &'v T { + // SAFETY: type is checked in constructor. + unsafe { self.0.downcast_ref_unchecked() } + } + + /// Compute the hash value. + pub fn hashed(self) -> crate::Result> { + let hash = if let Some(s) = self.to_value().unpack_starlark_str() { + s.get_hash() + } else { + self.to_value().get_hash()? + }; + Ok(Hashed::new_unchecked(hash, self)) + } + + /// Convert to another `Value` wrapper. + #[inline] + pub fn to_value_of_unchecked(self) -> ValueOfUnchecked<'v, T> { + ValueOfUnchecked::new(self.to_value()) + } +} + +impl<'v, T: StarlarkValue<'v>> FrozenValueTyped<'v, T> { + pub(crate) fn is_str() -> bool { + T::static_type_id() == StarlarkStr::static_type_id() + } + + pub(crate) fn is_pointer_i32() -> bool { + PointerI32::type_is_pointer_i32::() + } + + /// Construct `FrozenValueTyped` without checking that the value is of correct type. + #[inline] + pub unsafe fn new_unchecked(value: FrozenValue) -> FrozenValueTyped<'v, T> { + debug_assert!(value.downcast_ref::().is_some()); + FrozenValueTyped(value, marker::PhantomData) + } + + /// Downcast. + #[inline] + pub fn new(value: FrozenValue) -> Option> { + value.downcast_ref::()?; + Some(FrozenValueTyped(value, marker::PhantomData)) + } + + /// Downcast. + #[inline] + pub fn new_err(value: FrozenValue) -> anyhow::Result> { + value.downcast_ref_err::()?; + Ok(FrozenValueTyped(value, marker::PhantomData)) + } + + #[inline] + pub(crate) fn new_repr>( + repr: &'v AValueRepr>, + ) -> FrozenValueTyped<'v, T> { + // drop lifetime: `FrozenValue` is not (yet) parameterized with lifetime. + let header = unsafe { cast::ptr_lifetime(&repr.header) }; + FrozenValueTyped(FrozenValue::new_ptr(header, A::IS_STR), marker::PhantomData) + } + + /// Erase the type. + #[inline] + pub fn to_frozen_value(self) -> FrozenValue { + self.0 + } + + /// Convert to the value. + #[inline] + pub fn to_value(self) -> Value<'v> { + self.0.to_value() + } + + /// Convert to the value. + #[inline] + pub fn to_value_typed(self) -> ValueTyped<'v, T> { + unsafe { ValueTyped::new_unchecked(self.0.to_value()) } + } + + /// Get the reference to the pointed value. + #[inline] + pub fn as_ref(self) -> &'v T { + if Self::is_pointer_i32() { + unsafe { transmute!(&PointerI32, &T, self.0.0.unpack_pointer_i32_unchecked()) } + } else if Self::is_str() { + unsafe { + self.0 + .0 + .unpack_ptr_no_int_unchecked() + .unpack_header_unchecked() + .payload::() + } + } else { + // When a frozen pointer is not str and not int, + // unpack is does not need untagging. + // This generates slightly more efficient machine code. + unsafe { + self.0 + .0 + .unpack_ptr_no_int_no_str_unchecked() + .unpack_header_unchecked() + .payload::() + } + } + } + + #[inline] + pub(crate) fn as_frozen_ref(self) -> FrozenRef<'v, T> { + FrozenRef::new(self.as_ref()) + } + + /// Convert to another `FrozenValue` wrapper. + #[inline] + pub fn to_value_of_unchecked(self) -> FrozenValueOfUnchecked<'v, T> { + FrozenValueOfUnchecked::new(self.to_frozen_value()) + } +} + +impl<'v> ValueTyped<'v, StarlarkStr> { + /// Get the Rust string reference. + #[inline] + pub fn as_str(self) -> &'v str { + self.as_ref().as_str() + } +} + +impl<'v> FrozenValueTyped<'v, StarlarkStr> { + /// Get the Rust string reference. + #[inline] + pub fn as_str(self) -> &'v str { + self.as_ref().as_str() + } +} + +unsafe impl<'v, T: StarlarkValue<'v>> Trace<'v> for ValueTyped<'v, T> { + fn trace(&mut self, tracer: &Tracer<'v>) { + tracer.trace(&mut self.0); + // If type of value changed, dereference will produce the wrong object type. + debug_assert!(self.0.downcast_ref::().is_some()); + } +} + +impl<'v, T: StarlarkValue<'v>> Deref for FrozenValueTyped<'v, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.as_ref() + } +} + +impl<'v, T: StarlarkValue<'v>> Deref for ValueTyped<'v, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.as_ref() + } +} + +impl<'v, T: StarlarkValue<'v>> StarlarkTypeRepr for ValueTyped<'v, T> { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + T::starlark_type_repr() + } +} + +impl<'v, T: StarlarkValue<'v>> UnpackValue<'v> for ValueTyped<'v, T> { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(ValueTyped::new(value)) + } +} + +impl<'v, T: StarlarkValue<'v>> AllocValue<'v> for ValueTyped<'v, T> { + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + self.0 + } +} + +impl<'v> AllocStringValue<'v> for StringValue<'v> { + fn alloc_string_value(self, _heap: &'v Heap) -> StringValue<'v> { + self + } +} + +impl<'v, T: StarlarkValue<'v>> StarlarkTypeRepr for FrozenValueTyped<'v, T> { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + T::starlark_type_repr() + } +} + +impl<'v, T: StarlarkValue<'v>> UnpackValue<'v> for FrozenValueTyped<'v, T> { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { + if let Some(value) = value.unpack_frozen() { + if let Some(value) = FrozenValueTyped::new(value) { + return Ok(Some(value)); + } + } else if StarlarkTypeId::of::() == value.vtable().starlark_type_id { + #[derive(thiserror::Error, Debug)] + #[error("Expected frozen value of type `{expected}`, got unfrozen: `{value}`")] + struct NotFrozenError { + expected: Ty, + value: String, + } + + return Err(crate::Error::new_value(NotFrozenError { + expected: T::starlark_type_repr(), + value: value.to_string_for_type_error(), + })); + } + + Ok(None) + } +} + +impl<'v, 'f, T: StarlarkValue<'f>> AllocValue<'v> for FrozenValueTyped<'f, T> { + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + self.0.to_value() + } +} + +impl<'v> AllocStringValue<'v> for FrozenStringValue { + fn alloc_string_value(self, _heap: &'v Heap) -> StringValue<'v> { + self.to_string_value() + } +} + +impl<'v, T: StarlarkValue<'v>> AllocFrozenValue for FrozenValueTyped<'v, T> { + fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { + self.0 + } +} + +impl AllocFrozenStringValue for FrozenStringValue { + fn alloc_frozen_string_value(self, _heap: &FrozenHeap) -> FrozenStringValue { + self + } +} + +#[cfg(test)] +mod tests { + use starlark_derive::starlark_module; + + use crate as starlark; + use crate::assert::Assert; + use crate::environment::GlobalsBuilder; + use crate::tests::util::TestComplexValue; + use crate::values::int::pointer_i32::PointerI32; + use crate::values::none::NoneType; + use crate::values::FrozenValue; + use crate::values::FrozenValueTyped; + use crate::values::Value; + + #[test] + fn int() { + let v = FrozenValueTyped::::new(FrozenValue::testing_new_int(17)).unwrap(); + assert_eq!(17, v.as_ref().get().to_i32()); + } + + #[test] + fn test_unpack_value_for_frozen_value_typed() { + #[starlark_module] + fn module(globals: &mut GlobalsBuilder) { + fn mutable<'v>() -> anyhow::Result>> { + Ok(TestComplexValue(Value::new_none())) + } + + const FROZEN: TestComplexValue = TestComplexValue(FrozenValue::new_none()); + + fn takes_frozen_value_typed<'v>( + value: FrozenValueTyped<'v, TestComplexValue>, + ) -> anyhow::Result { + let _ = value; + Ok(NoneType) + } + } + + let mut a = Assert::new(); + a.globals_add(module); + + a.pass("takes_frozen_value_typed(FROZEN)"); + a.fail("takes_frozen_value_typed(1)", "Type of parameter `value` doesn't match, expected `TestComplexValue`, actual `int (repr: 1)`"); + a.fail( + "takes_frozen_value_typed(mutable())", + "Expected frozen value", + ); + } +} diff --git a/starlark-rust/starlark/src/values/layout/typed/mod.rs b/starlark-rust/starlark/src/values/layout/typed/mod.rs deleted file mode 100644 index 2658050266c53..0000000000000 --- a/starlark-rust/starlark/src/values/layout/typed/mod.rs +++ /dev/null @@ -1,370 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -pub(crate) mod string; - -use std::fmt; -use std::fmt::Debug; -use std::fmt::Display; -use std::fmt::Formatter; -use std::marker; -use std::ops::Deref; - -use allocative::Allocative; -use dupe::Clone_; -use dupe::Copy_; -use dupe::Dupe_; -use serde::Serialize; - -use crate as starlark; -use crate::any::AnyLifetime; -use crate::any::ProvidesStaticType; -use crate::cast; -use crate::cast::transmute; -use crate::coerce::Coerce; -use crate::coerce::CoerceKey; -use crate::typing::Ty; -use crate::values::alloc_value::AllocFrozenStringValue; -use crate::values::alloc_value::AllocStringValue; -use crate::values::int::PointerI32; -use crate::values::layout::avalue::AValue; -use crate::values::layout::heap::repr::AValueRepr; -use crate::values::string::StarlarkStr; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::AllocFrozenValue; -use crate::values::AllocValue; -use crate::values::Freeze; -use crate::values::Freezer; -use crate::values::FrozenHeap; -use crate::values::FrozenRef; -use crate::values::FrozenStringValue; -use crate::values::FrozenValue; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::StringValue; -use crate::values::StringValueLike; -use crate::values::Trace; -use crate::values::Tracer; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueLike; - -/// [`Value`] wrapper which asserts contained value is of type ``. -#[derive(Copy_, Clone_, Dupe_, ProvidesStaticType, Allocative)] -#[allocative(skip)] // Heap owns the value. -pub struct ValueTyped<'v, T: StarlarkValue<'v>>(Value<'v>, marker::PhantomData<&'v T>); -/// [`FrozenValue`] wrapper which asserts contained value is of type ``. -#[derive(Copy_, Clone_, Dupe_, ProvidesStaticType, Allocative)] -#[allocative(skip)] // Heap owns the value. -pub struct FrozenValueTyped<'v, T: StarlarkValue<'v>>(FrozenValue, marker::PhantomData<&'v T>); - -unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for ValueTyped<'v, T> {} -unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> for ValueTyped<'v, T> {} -unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for ValueTyped<'v, T> {} -unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> for ValueTyped<'v, T> {} -unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for FrozenValueTyped<'v, T> {} -unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> - for FrozenValueTyped<'v, T> -{ -} -unsafe impl<'v, T: StarlarkValue<'v>> Coerce> for FrozenValueTyped<'v, T> {} -unsafe impl<'v, T: StarlarkValue<'v>> CoerceKey> for FrozenValueTyped<'v, T> {} - -unsafe impl<'v, 'f, T: StarlarkValue<'f>> Trace<'v> for FrozenValueTyped<'f, T> { - fn trace(&mut self, _tracer: &Tracer<'v>) {} -} - -impl> Freeze for FrozenValueTyped<'static, T> { - type Frozen = Self; - - fn freeze(self, _freezer: &Freezer) -> anyhow::Result { - Ok(self) - } -} - -impl<'v, T: StarlarkValue<'v>> Debug for ValueTyped<'v, T> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_tuple("ValueTyped").field(&self.0).finish() - } -} - -impl<'v, T: StarlarkValue<'v>> Debug for FrozenValueTyped<'v, T> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_tuple("FrozenValueTyped").field(&self.0).finish() - } -} - -impl<'v, T: StarlarkValue<'v>> Display for ValueTyped<'v, T> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Display::fmt(&self.0, f) - } -} - -impl<'v, T: StarlarkValue<'v>> Display for FrozenValueTyped<'v, T> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Display::fmt(&self.0, f) - } -} - -impl<'v, T: StarlarkValue<'v>> Serialize for ValueTyped<'v, T> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.0.serialize(serializer) - } -} - -impl<'v, T: StarlarkValue<'v>> Serialize for FrozenValueTyped<'v, T> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.0.serialize(serializer) - } -} - -impl<'v, T: StarlarkValue<'v>> ValueTyped<'v, T> { - /// Downcast. - #[inline] - pub fn new(value: Value<'v>) -> Option> { - value.downcast_ref::()?; - Some(ValueTyped(value, marker::PhantomData)) - } - - /// Downcast. - #[inline] - pub fn new_err(value: Value<'v>) -> anyhow::Result> { - value.downcast_ref_err::()?; - Ok(ValueTyped(value, marker::PhantomData)) - } - - /// Construct typed value without checking the value is of type ``. - #[inline] - pub unsafe fn new_unchecked(value: Value<'v>) -> ValueTyped<'v, T> { - debug_assert!(value.downcast_ref::().is_some()); - ValueTyped(value, marker::PhantomData) - } - - #[inline] - pub(crate) fn new_repr>( - repr: &'v AValueRepr, - ) -> ValueTyped<'v, T> { - ValueTyped(Value::new_repr(repr), marker::PhantomData) - } - - /// Erase the type. - #[inline] - pub fn to_value(self) -> Value<'v> { - self.0 - } - - /// Get the reference to the pointed value. - #[inline] - pub fn as_ref(self) -> &'v T { - // SAFETY: type is checked in constructor. - unsafe { self.0.downcast_ref_unchecked() } - } -} - -impl<'v, T: StarlarkValue<'v>> FrozenValueTyped<'v, T> { - /// Construct `FrozenValueTyped` without checking that the value is of correct type. - #[inline] - pub unsafe fn new_unchecked(value: FrozenValue) -> FrozenValueTyped<'v, T> { - debug_assert!(value.downcast_ref::().is_some()); - FrozenValueTyped(value, marker::PhantomData) - } - - /// Downcast. - #[inline] - pub fn new(value: FrozenValue) -> Option> { - value.downcast_ref::()?; - Some(FrozenValueTyped(value, marker::PhantomData)) - } - - #[inline] - pub(crate) fn new_repr>( - repr: &'v AValueRepr, - ) -> FrozenValueTyped<'v, T> { - // drop lifetime: `FrozenValue` is not (yet) parameterized with lifetime. - let header = unsafe { cast::ptr_lifetime(&repr.header) }; - FrozenValueTyped(FrozenValue::new_ptr(header, A::IS_STR), marker::PhantomData) - } - - /// Erase the type. - #[inline] - pub fn to_frozen_value(self) -> FrozenValue { - self.0 - } - - /// Convert to the value. - #[inline] - pub fn to_value(self) -> Value<'v> { - self.0.to_value() - } - - /// Convert to the value. - #[inline] - pub fn to_value_typed(self) -> ValueTyped<'v, T> { - unsafe { ValueTyped::new_unchecked(self.0.to_value()) } - } - - /// Get the reference to the pointed value. - #[inline] - pub fn as_ref(self) -> &'v T { - if PointerI32::type_is_pointer_i32::() { - unsafe { transmute!(&PointerI32, &T, self.0.0.unpack_pointer_i32_unchecked()) } - } else if T::static_type_id() == StarlarkStr::static_type_id() { - unsafe { - self.0 - .0 - .unpack_ptr_no_int_unchecked() - .unpack_header_unchecked() - .payload::() - } - } else { - // When a frozen pointer is not str and not int, - // unpack is does not need untagging. - // This generates slightly more efficient machine code. - unsafe { - self.0 - .0 - .unpack_ptr_no_int_no_str_unchecked() - .unpack_header_unchecked() - .payload::() - } - } - } - - #[inline] - pub(crate) fn as_frozen_ref(self) -> FrozenRef<'v, T> { - FrozenRef::new(self.as_ref()) - } -} - -impl<'v> ValueTyped<'v, StarlarkStr> { - /// Get the Rust string reference. - #[inline] - pub fn as_str(self) -> &'v str { - self.as_ref().as_str() - } -} - -impl<'v> FrozenValueTyped<'v, StarlarkStr> { - /// Get the Rust string reference. - #[inline] - pub fn as_str(self) -> &'v str { - self.as_ref().as_str() - } -} - -unsafe impl<'v, T: StarlarkValue<'v>> Trace<'v> for ValueTyped<'v, T> { - fn trace(&mut self, tracer: &Tracer<'v>) { - tracer.trace(&mut self.0); - // If type of value changed, dereference will produce the wrong object type. - debug_assert!(self.0.downcast_ref::().is_some()); - } -} - -impl<'v, T: StarlarkValue<'v>> Deref for FrozenValueTyped<'v, T> { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - self.as_ref() - } -} - -impl<'v, T: StarlarkValue<'v>> Deref for ValueTyped<'v, T> { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - self.as_ref() - } -} - -impl<'v, T: StarlarkValue<'v>> StarlarkTypeRepr for ValueTyped<'v, T> { - fn starlark_type_repr() -> Ty { - T::starlark_type_repr() - } -} - -impl<'v, T: StarlarkValue<'v>> UnpackValue<'v> for ValueTyped<'v, T> { - fn expected() -> String { - T::get_type_value_static().as_str().to_owned() - } - - fn unpack_value(value: Value<'v>) -> Option { - ValueTyped::new(value) - } -} - -impl<'v, T: StarlarkValue<'v>> AllocValue<'v> for ValueTyped<'v, T> { - fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { - self.0 - } -} - -impl<'v> AllocStringValue<'v> for StringValue<'v> { - fn alloc_string_value(self, _heap: &'v Heap) -> StringValue<'v> { - self - } -} - -impl<'v, T: StarlarkValue<'v>> StarlarkTypeRepr for FrozenValueTyped<'v, T> { - fn starlark_type_repr() -> Ty { - T::starlark_type_repr() - } -} - -impl<'v, 'f, T: StarlarkValue<'f>> AllocValue<'v> for FrozenValueTyped<'f, T> { - fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { - self.0.to_value() - } -} - -impl<'v> AllocStringValue<'v> for FrozenStringValue { - fn alloc_string_value(self, _heap: &'v Heap) -> StringValue<'v> { - self.to_string_value() - } -} - -impl<'v, T: StarlarkValue<'v>> AllocFrozenValue for FrozenValueTyped<'v, T> { - fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { - self.0 - } -} - -impl AllocFrozenStringValue for FrozenStringValue { - fn alloc_frozen_string_value(self, _heap: &FrozenHeap) -> FrozenStringValue { - self - } -} - -#[cfg(test)] -mod tests { - use crate::values::int::PointerI32; - use crate::values::FrozenValue; - use crate::values::FrozenValueTyped; - - #[test] - fn int() { - let v = FrozenValueTyped::::new(FrozenValue::testing_new_int(17)).unwrap(); - assert_eq!(17, v.as_ref().get().to_i32()); - } -} diff --git a/starlark-rust/starlark/src/values/layout/typed/string.rs b/starlark-rust/starlark/src/values/layout/typed/string.rs index adc02caf8f50f..474452c176a07 100644 --- a/starlark-rust/starlark/src/values/layout/typed/string.rs +++ b/starlark-rust/starlark/src/values/layout/typed/string.rs @@ -31,7 +31,7 @@ use crate::coerce::CoerceKey; use crate::collections::Hashed; use crate::sealed::Sealed; use crate::values::layout::static_string::VALUE_EMPTY_STRING; -use crate::values::string::StarlarkStr; +use crate::values::string::str_type::StarlarkStr; use crate::values::Freeze; use crate::values::Freezer; use crate::values::FrozenValue; @@ -49,7 +49,8 @@ use crate::values::ValueTyped; /// /// ``` /// use starlark::const_frozen_string; -/// use starlark::values::{FrozenStringValue, FrozenValue}; +/// use starlark::values::FrozenStringValue; +/// use starlark::values::FrozenValue; /// /// let fv: FrozenStringValue = const_frozen_string!("magic"); /// assert_eq!("magic", fv.as_str()); @@ -146,6 +147,11 @@ impl<'v> StringValue<'v> { .unpack_frozen() .map(|s| unsafe { FrozenStringValue::new_unchecked(s) }) } + + #[inline] + pub(crate) unsafe fn cast_lifetime<'w>(self) -> StringValue<'w> { + StringValue::new_unchecked(self.to_value().cast_lifetime()) + } } /// Common type for [`StringValue`] and [`FrozenStringValue`]. @@ -153,6 +159,7 @@ pub trait StringValueLike<'v>: Trace<'v> + Freeze + CoerceKey> + + Borrow + Display + Debug + Default @@ -164,6 +171,7 @@ pub trait StringValueLike<'v>: + Serialize + Allocative + Sealed + + 'v { /// Convert to a [`StringValue`]. fn to_string_value(self) -> StringValue<'v>; @@ -190,25 +198,6 @@ impl<'v> StringValueLike<'v> for FrozenStringValue { } } -impl<'v1, 'v2> PartialEq> for StringValue<'v2> { - fn eq(&self, other: &StringValue) -> bool { - // `PartialEq` can be implemented for other types, not just for `StarlarkStr`. - // But at the moment of writing, we don't guarantee that `PartialEq` for `T` - // is consistent with `StarlarkValue::equals` for `T`. - self.to_value().ptr_eq(other.to_value()) || self.as_ref() == other.as_ref() - } -} - -impl<'v> Eq for StringValue<'v> {} - -impl PartialEq for FrozenStringValue { - fn eq(&self, other: &Self) -> bool { - self.to_value_typed() == other.to_value_typed() - } -} - -impl Eq for FrozenStringValue {} - impl<'v> PartialEq> for FrozenStringValue { fn eq(&self, other: &StringValue<'v>) -> bool { &self.to_value_typed() == other @@ -275,6 +264,7 @@ mod tests { let heap = Heap::new(); let s: StringValue = heap.alloc_str("xyz"); assert_eq!(expected, Hashed::new(s).hash()); + assert_eq!(s.get_hashed().hash(), s.hashed().unwrap().hash()); let v: Value = heap.alloc_str("xyz").to_value(); assert_eq!(expected, v.get_hashed().unwrap().hash()); diff --git a/starlark-rust/starlark/src/values/layout/value.rs b/starlark-rust/starlark/src/values/layout/value.rs index 10976ed9402f1..03e6e2276d2fa 100644 --- a/starlark-rust/starlark/src/values/layout/value.rs +++ b/starlark-rust/starlark/src/values/layout/value.rs @@ -29,6 +29,7 @@ // our val_ref requires a pointer to the value. We need to put that pointer // somewhere. The solution is to have a separate value storage vs vtable. +use std::any; use std::cmp::Ordering; use std::fmt; use std::fmt::Debug; @@ -39,17 +40,19 @@ use dupe::Clone_; use dupe::Copy_; use dupe::Dupe; use dupe::Dupe_; +use dupe::IterDupedExt; +use dupe::OptionDupedExt; use either::Either; use num_bigint::BigInt; use serde::Serialize; use serde::Serializer; use starlark_map::Equivalent; +use starlark_syntax::value_error; use crate as starlark; use crate::any::AnyLifetime; use crate::any::ProvidesStaticType; use crate::cast::transmute; -use crate::coerce::coerce; use crate::coerce::Coerce; use crate::coerce::CoerceKey; use crate::collections::Hashed; @@ -64,31 +67,38 @@ use crate::eval::Arguments; use crate::eval::Evaluator; use crate::eval::ParametersSpec; use crate::sealed::Sealed; +use crate::typing::ParamIsRequired; +use crate::typing::ParamSpec; use crate::typing::Ty; -use crate::values::bool::VALUE_FALSE_TRUE; +use crate::typing::TyCallable; +use crate::util::ArcStr; +use crate::values::bool::value::VALUE_FALSE_TRUE; use crate::values::demand::request_value_impl; +use crate::values::dict::value::VALUE_EMPTY_FROZEN_DICT; use crate::values::dict::FrozenDictRef; use crate::values::enumeration::EnumType; use crate::values::enumeration::FrozenEnumValue; use crate::values::function::FrozenBoundMethod; use crate::values::function::NativeFunction; use crate::values::function::FUNCTION_TYPE; -use crate::values::int::PointerI32; +use crate::values::int::pointer_i32::PointerI32; use crate::values::iter::StarlarkIterator; use crate::values::layout::avalue::AValue; -use crate::values::layout::avalue::StarlarkStrAValue; +use crate::values::layout::avalue::AValueImpl; use crate::values::layout::heap::repr::AValueHeader; +use crate::values::layout::heap::repr::AValueOrForwardUnpack; use crate::values::layout::heap::repr::AValueRepr; use crate::values::layout::pointer::FrozenPointer; use crate::values::layout::pointer::Pointer; use crate::values::layout::pointer::RawPointer; use crate::values::layout::static_string::VALUE_EMPTY_STRING; use crate::values::layout::typed::string::StringValueLike; +use crate::values::layout::value_lifetimeless::ValueLifetimeless; use crate::values::layout::vtable::AValueDyn; use crate::values::layout::vtable::AValueDynFull; use crate::values::layout::vtable::AValueVTable; +use crate::values::list::value::VALUE_EMPTY_FROZEN_LIST; use crate::values::none::none_type::VALUE_NONE; -use crate::values::num::value::NumRef; use crate::values::range::Range; use crate::values::record::instance::FrozenRecord; use crate::values::record::record_type::RecordType; @@ -96,17 +106,16 @@ use crate::values::recursive_repr_or_json_guard::json_stack_push; use crate::values::recursive_repr_or_json_guard::repr_stack_push; use crate::values::stack_guard; use crate::values::starlark_type_id::StarlarkTypeId; -use crate::values::string::StarlarkStr; +use crate::values::string::str_type::StarlarkStr; use crate::values::structs::value::FrozenStruct; use crate::values::tuple::value::VALUE_EMPTY_TUPLE; use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::inline_int::InlineInt; -use crate::values::types::int_or_big::StarlarkIntRef; +use crate::values::types::int::inline_int::InlineInt; +use crate::values::types::int::int_or_big::StarlarkIntRef; use crate::values::types::list::value::FrozenListData; +use crate::values::types::num::value::NumRef; use crate::values::types::tuple::value::FrozenTuple; use crate::values::types::tuple::value::Tuple; -use crate::values::types::unbound::MaybeUnboundValue; -use crate::values::Freeze; use crate::values::Freezer; use crate::values::FrozenRef; use crate::values::FrozenStringValue; @@ -114,6 +123,7 @@ use crate::values::FrozenValueTyped; use crate::values::Heap; use crate::values::StarlarkValue; use crate::values::StringValue; +use crate::values::Trace; use crate::values::UnpackValue; use crate::values::ValueError; use crate::values::ValueIdentity; @@ -121,8 +131,8 @@ use crate::values::ValueIdentity; // We already import another `ValueError`, hence the odd name. #[derive(Debug, thiserror::Error)] enum ValueValueError { - #[error("Value is of type `{0}` but `{1}` was expected")] - WrongType(&'static str, &'static str), + #[error("Expected value of type `{0}` but got `{1}`")] + WrongType(&'static str, String), } /// A Starlark value. The lifetime argument `'v` corresponds to the [`Heap`](crate::values::Heap) it is stored on. @@ -175,6 +185,14 @@ impl Display for FrozenValue { } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { + // When value is being moved during GC or freeze, + // `Value` pointee is not a proper value, but a GC-related information. + // Regular operations like `.to_repr()` crash, but `Debug` should work. + if let Some(x) = v.0.unpack_ptr() { + if let AValueOrForwardUnpack::Forward(fwd) = x.unpack() { + return f.debug_tuple(typ).field(&fwd).finish(); + } + } f.debug_tuple(typ).field(v.get_ref().as_debug()).finish() } @@ -237,6 +255,13 @@ pub struct FrozenValue( unsafe impl Send for FrozenValue {} unsafe impl Sync for FrozenValue {} +#[derive(thiserror::Error, Debug)] +#[error("Integer value is too big to fit in {integer_type}: {value}")] +pub(crate) struct IntegerTooBigError { + pub(crate) integer_type: &'static str, + pub(crate) value: String, +} + impl<'v> Value<'v> { #[inline] pub(crate) fn new_ptr(x: &'v AValueHeader, is_str: bool) -> Self { @@ -250,7 +275,7 @@ impl<'v> Value<'v> { } #[inline] - pub(crate) fn new_repr>(x: &'v AValueRepr) -> Self { + pub(crate) fn new_repr>(x: &'v AValueRepr>) -> Self { Self::new_ptr(&x.header, T::IS_STR) } @@ -333,18 +358,33 @@ impl<'v> Value<'v> { /// Obtain the underlying numerical value, if it is one. pub(crate) fn unpack_num(self) -> Option> { - NumRef::unpack_value(self) + if let Some(int) = StarlarkIntRef::unpack(self) { + Some(NumRef::Int(int)) + } else if let Some(float) = self.downcast_ref() { + Some(NumRef::Float(*float)) + } else { + None + } } - pub(crate) fn unpack_integer(self) -> Option + pub(crate) fn unpack_integer(self) -> crate::Result> where I: TryFrom, I: TryFrom<&'v BigInt>, { - match self.unpack_num()? { - NumRef::Float(_) => None, - NumRef::Int(StarlarkIntRef::Small(x)) => I::try_from(x.to_i32()).ok(), - NumRef::Int(StarlarkIntRef::Big(x)) => x.unpack_integer(), + let Some(num) = StarlarkIntRef::unpack_value_opt(self) else { + return Ok(None); + }; + let option = match num { + StarlarkIntRef::Small(x) => I::try_from(x.to_i32()).ok(), + StarlarkIntRef::Big(x) => x.unpack_integer(), + }; + match option { + Some(i) => Ok(Some(i)), + None => Err(crate::Error::new_value(IntegerTooBigError { + integer_type: any::type_name::(), + value: num.to_string(), + })), } } @@ -364,7 +404,11 @@ impl<'v> Value<'v> { /// Note floats are not considered integers, i. e. `unpack_i32` for `1.0` will return `None`. #[inline] pub fn unpack_i32(self) -> Option { - i32::unpack_value(self) + if InlineInt::smaller_than_i32() { + StarlarkIntRef::unpack(self)?.to_i32() + } else { + self.unpack_inline_int().map(|i| i.to_i32()) + } } #[inline] @@ -406,9 +450,8 @@ impl<'v> Value<'v> { .0 .unpack_ptr_no_int_unchecked() .unpack_header_unchecked() - .as_repr::() - .payload - .1, + .as_repr::() + .payload, ) } } else { @@ -462,7 +505,7 @@ impl<'v> Value<'v> { } } - pub(crate) fn get_hash(self) -> anyhow::Result { + pub(crate) fn get_hash(self) -> crate::Result { self.get_ref().get_hash() } @@ -517,22 +560,8 @@ impl<'v> Value<'v> { } } - /// Conversion to an int that sees through `bool` and `int`. - pub(crate) fn to_int(self) -> anyhow::Result { - // Fast path for the common case - if let Some(x) = self.unpack_i32() { - Ok(x) - } else if let Some(x) = self.unpack_bool() { - Ok(x as i32) - } else if let Some(NumRef::Int(_)) = self.unpack_num() { - Err(ValueError::IntegerOverflow.into()) - } else { - ValueError::unsupported_owned(self.get_type(), "int()", None) - } - } - /// `x[index]`. - pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn at(self, index: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().at(index, heap) } @@ -543,37 +572,37 @@ impl<'v> Value<'v> { stop: Option>, stride: Option>, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> crate::Result> { self.get_ref().slice(start, stop, stride, heap) } /// `len(x)`. - pub fn length(self) -> anyhow::Result { + pub fn length(self) -> crate::Result { self.get_ref().length() } /// `other in x`. - pub fn is_in(self, other: Value<'v>) -> anyhow::Result { + pub fn is_in(self, other: Value<'v>) -> crate::Result { self.get_ref().is_in(other) } /// `+x`. - pub fn plus(self, heap: &'v Heap) -> anyhow::Result> { + pub fn plus(self, heap: &'v Heap) -> crate::Result> { self.get_ref().plus(heap) } /// `-x`. - pub fn minus(self, heap: &'v Heap) -> anyhow::Result> { + pub fn minus(self, heap: &'v Heap) -> crate::Result> { self.get_ref().minus(heap) } /// `x - other`. - pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().sub(other, heap) } /// `x * other`. - pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { if let Some(r) = self.get_ref().mul(other, heap) { r } else if let Some(r) = other.get_ref().rmul(self, heap) { @@ -584,47 +613,47 @@ impl<'v> Value<'v> { } /// `x % other`. - pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().percent(other, heap) } /// `x / other`. - pub fn div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn div(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().div(other, heap) } /// `x // other`. - pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().floor_div(other, heap) } /// `x & other`. - pub fn bit_and(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn bit_and(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().bit_and(other, heap) } /// `x | other`. - pub fn bit_or(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn bit_or(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().bit_or(other, heap) } /// `x ^ other`. - pub fn bit_xor(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn bit_xor(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().bit_xor(other, heap) } /// `~x`. - pub fn bit_not(self, heap: &'v Heap) -> anyhow::Result> { + pub fn bit_not(self, heap: &'v Heap) -> crate::Result> { self.get_ref().bit_not(heap) } /// `x << other`. - pub fn left_shift(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn left_shift(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().left_shift(other, heap) } /// `x >> other`. - pub fn right_shift(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub fn right_shift(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { self.get_ref().right_shift(other, heap) } @@ -632,8 +661,8 @@ impl<'v> Value<'v> { self, location: Option>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { eval.with_call_stack(self, location, |eval| { self.get_ref_full().invoke(args, eval) }) @@ -646,7 +675,7 @@ impl<'v> Value<'v> { if let Some(def) = self.downcast_ref::() { Some(&def.parameters) } else if let Some(def) = self.downcast_ref::() { - Some(coerce(&def.parameters)) + Some(def.parameters.as_value()) } else { None } @@ -656,8 +685,8 @@ impl<'v> Value<'v> { pub(crate) fn invoke( self, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { self.invoke_with_loc(None, args, eval) } @@ -665,8 +694,8 @@ impl<'v> Value<'v> { pub(crate) fn invoke_pos( self, pos: &[Value<'v>], - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { let params = Arguments(ArgumentsFull { pos, ..ArgumentsFull::default() @@ -674,6 +703,78 @@ impl<'v> Value<'v> { self.invoke(¶ms, eval) } + fn check_callable(self) -> crate::Result<()> { + if !self.vtable().starlark_value.HAS_invoke { + return Err(value_error!( + "Value is not callable: {}", + self.to_string_for_type_error() + )); + } + Ok(()) + } + + /// Check this value can be "called" with given parameter types, and provided return type. + /// + /// This check is done optimistically: when it is not known + /// whether the value is compatible with given arguments, return `Ok(())`. + /// + /// This operation is expensive. + pub fn check_callable_with<'a>( + self, + pos: impl IntoIterator, + named: impl IntoIterator, + args: Option<&Ty>, + kwargs: Option<&Ty>, + ret: &Ty, + ) -> crate::Result<()> { + let pos = Vec::from_iter(pos); + let named = Vec::from_iter(named); + self.check_callable_with_impl(&pos, &named, args, kwargs, ret) + } + + fn check_callable_with_impl<'a>( + self, + pos: &[&Ty], + named: &[(&'a str, &Ty)], + args: Option<&Ty>, + kwargs: Option<&Ty>, + ret: &Ty, + ) -> crate::Result<()> { + // First, provide a good error message when the value is not callable + // without invoking a typechecker. + self.check_callable()?; + + let sig = TyCallable::new( + ParamSpec::new_parts( + pos.iter().map(|ty| (ParamIsRequired::Yes, (*ty).dupe())), + [], + args.duped(), + named + .iter() + .map(|(n, ty)| (ArcStr::from(*n), ParamIsRequired::Yes, (*ty).dupe())), + kwargs.duped(), + )?, + ret.dupe(), + ); + + let ty = Ty::of_value(self); + if !ty.check_call( + pos.iter().copied().duped(), + named.iter().map(|(n, ty)| (*n, (*ty).dupe())), + args.duped(), + kwargs.duped(), + ret.dupe(), + ) { + return Err(value_error!( + "Value `{}` is not compatible with the signature `{}`", + self.to_string_for_type_error(), + sig + )); + } + + Ok(()) + } + /// `type(x)`. pub fn get_type_value(self) -> FrozenStringValue { self.vtable().type_value() @@ -690,9 +791,9 @@ impl<'v> Value<'v> { self.vtable().type_starlark_repr() } - /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), - /// before falling back to [`add`](StarlarkValue::add). - pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + /// Add two [`Value`]s together. Will first try using [`add`](StarlarkValue::add), + /// before falling back to [`radd`](StarlarkValue::radd). + pub fn add(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { // Fast special case for ints. if let Some(ls) = self.unpack_inline_int() { if let Some(rs) = other.unpack_inline_int() { @@ -763,37 +864,37 @@ impl<'v> Value<'v> { } /// Forwards to [`StarlarkValue::set_attr`]. - pub fn set_attr(self, attribute: &str, alloc_value: Value<'v>) -> anyhow::Result<()> { + pub fn set_attr(self, attribute: &str, alloc_value: Value<'v>) -> crate::Result<()> { self.get_ref().set_attr(attribute, alloc_value) } /// Forwards to [`StarlarkValue::set_at`]. - pub fn set_at(self, index: Value<'v>, alloc_value: Value<'v>) -> anyhow::Result<()> { + pub fn set_at(self, index: Value<'v>, alloc_value: Value<'v>) -> crate::Result<()> { self.get_ref().set_at(index, alloc_value) } /// Forwards to [`StarlarkValue::documentation`]. - pub fn documentation(self) -> Option { + pub fn documentation(self) -> DocItem { self.get_ref().documentation() } /// Produce an iterable from a value. #[inline] - pub fn iterate(self, heap: &'v Heap) -> anyhow::Result> { + pub fn iterate(self, heap: &'v Heap) -> crate::Result> { let iter = self.get_ref().iterate(self, heap)?; Ok(StarlarkIterator::new(iter, heap)) } /// Get the [`Hashed`] version of this [`Value`]. #[inline] - pub fn get_hashed(self) -> anyhow::Result> { + pub fn get_hashed(self) -> crate::Result> { ValueLike::get_hashed(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. #[inline] - pub fn equals(self, other: Value<'v>) -> anyhow::Result { + pub fn equals(self, other: Value<'v>) -> crate::Result { if self.ptr_eq(other) { Ok(true) } else { @@ -805,14 +906,14 @@ impl<'v> Value<'v> { } #[inline] - fn equals_not_ptr_eq(self, other: Value<'v>) -> anyhow::Result { + fn equals_not_ptr_eq(self, other: Value<'v>) -> crate::Result { let _guard = stack_guard::stack_guard()?; self.get_ref().equals(other) } /// How are two values comparable. For values of different types will return [`Err`]. #[inline] - pub fn compare(self, other: Value<'v>) -> anyhow::Result { + pub fn compare(self, other: Value<'v>) -> crate::Result { ValueLike::compare(self, other) } @@ -834,18 +935,18 @@ impl<'v> Value<'v> { pub fn export_as( self, variable_name: &str, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result<()> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result<()> { self.get_ref().export_as(variable_name, eval) } /// Return the attribute with the given name. - pub fn get_attr(self, attribute: &str, heap: &'v Heap) -> anyhow::Result>> { + pub fn get_attr(self, attribute: &str, heap: &'v Heap) -> crate::Result>> { let aref = self.get_ref(); if let Some(methods) = aref.vtable().methods() { let attribute = Hashed::new(attribute); if let Some(v) = methods.get_hashed(attribute) { - return Ok(Some(MaybeUnboundValue::new(v).bind(self, heap)?)); + return Ok(Some(v.bind(self, heap)?)); } Ok(aref.get_attr_hashed(attribute, heap)) } else { @@ -854,7 +955,7 @@ impl<'v> Value<'v> { } /// Like `get_attr` but return an error if the attribute is not available. - pub fn get_attr_error(self, attribute: &str, heap: &'v Heap) -> anyhow::Result> { + pub fn get_attr_error(self, attribute: &str, heap: &'v Heap) -> crate::Result> { match self.get_attr(attribute, heap)? { None => { ValueError::unsupported_owned(self.get_type(), &format!(".{}", attribute), None) @@ -894,6 +995,56 @@ impl<'v> Value<'v> { pub fn request_value>(self) -> Option { request_value_impl(self) } + + #[cold] + fn display_for_type_error(self) -> impl Display + 'v { + fn split_at_safe(s: &str, index: usize) -> (&str, &str) { + for index in index..s.len() { + if s.is_char_boundary(index) { + return s.split_at(index); + } + } + (s, "") + } + + struct DisplayWithTypeImpl<'v>(Value<'v>); + + impl<'v> Display for DisplayWithTypeImpl<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut repr = self.0.to_repr(); + + let max_len = 60usize; + + if repr.len() > max_len && repr.chars().count() > max_len { + let truncated = "<<...>>"; + + // 1/3 from back, 2/3 from front, because front is usually more interesting. + let take_from_back = max_len.saturating_sub(truncated.len()) / 3; + let take_from_front = take_from_back * 2; + + // Resulting repr is approximately `max_len` long. + repr = format!( + "{}{}{}", + split_at_safe(&repr, take_from_front).0, + truncated, + split_at_safe(&repr, repr.len().saturating_sub(take_from_back)).1 + ); + } + + write!(f, "{} (repr: {})", self.0.get_type(), repr) + } + } + + DisplayWithTypeImpl(self) + } + + /// Return a string usable for error messages. + /// + /// If the value is too large, it may be truncated. + #[cold] + pub fn to_string_for_type_error(self) -> String { + self.display_for_type_error().to_string() + } } impl FrozenValue { @@ -909,7 +1060,7 @@ impl FrozenValue { } #[inline] - pub(crate) fn new_repr<'a, T: AValue<'a>>(x: &'static AValueRepr) -> Self { + pub(crate) fn new_repr<'a, T: AValue<'a>>(x: &'static AValueRepr>) -> Self { Self::new_ptr(&x.header, T::IS_STR) } @@ -955,6 +1106,18 @@ impl FrozenValue { FrozenValue::new_repr(&VALUE_EMPTY_TUPLE) } + /// Create a new empty list. + #[inline] + pub fn new_empty_list() -> Self { + FrozenValue::new_repr(&VALUE_EMPTY_FROZEN_LIST) + } + + /// Create a new empty dict. + #[inline] + pub fn new_empty_dict() -> Self { + FrozenValue::new_repr(&VALUE_EMPTY_FROZEN_DICT) + } + #[inline] pub(crate) fn ptr_value(self) -> RawPointer { self.0.raw() @@ -1013,7 +1176,7 @@ impl FrozenValue { self.is_none() || self.is_str() || self.unpack_bool().is_some() - || NumRef::unpack_value(self.to_value()).is_some() + || NumRef::unpack_value(self.to_value()).is_ok_and(|n| n.is_some()) || FrozenListData::from_frozen_value(&self).is_some() || FrozenDictRef::from_frozen_value(self).is_some() || FrozenValueTyped::::new(self).is_some() @@ -1042,8 +1205,9 @@ impl FrozenValue { /// `self == b` is `ptr_eq`. pub(crate) fn eq_is_ptr_eq(self) -> bool { // Note `int` is not `ptr_eq` because `int` can be equal to `float`. - self.is_none() - || self.unpack_bool().is_some() + + // If a value does not override equality, it is `ptr_eq`. + !self.to_value().get_ref().vtable().starlark_value.HAS_equals // Strings of length <= 1 are statically allocated. || matches!(self.unpack_str(), Some(s) if s.len() <= 1) // Empty tuple is statically allocated. @@ -1095,12 +1259,16 @@ impl Serialize for FrozenValue { } impl<'v> StarlarkTypeRepr for Value<'v> { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { FrozenValue::starlark_type_repr() } } impl StarlarkTypeRepr for FrozenValue { + type Canonical = Self; + fn starlark_type_repr() -> Ty { Ty::any() } @@ -1114,18 +1282,7 @@ impl StarlarkTypeRepr for FrozenValue { /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: - Eq - + Copy - + Debug - + Default - + Display - + Serialize - + CoerceKey> - + Freeze - + Allocative - + ProvidesStaticType<'v> - + Sealed - + 'v + ValueLifetimeless + Trace<'v> + CoerceKey> + ProvidesStaticType<'v> + 'v { /// `StringValue` or `FrozenStringValue`. type String: StringValueLike<'v>; @@ -1140,16 +1297,16 @@ pub trait ValueLike<'v>: fn invoke( self, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { self.to_value().invoke(args, eval) } /// Hash the value. - fn write_hash(self, hasher: &mut StarlarkHasher) -> anyhow::Result<()>; + fn write_hash(self, hasher: &mut StarlarkHasher) -> crate::Result<()>; /// Get hash value. - fn get_hashed(self) -> anyhow::Result> { + fn get_hashed(self) -> crate::Result> { let hash = if let Some(s) = self.to_value().unpack_starlark_str() { s.get_hash() } else { @@ -1173,10 +1330,10 @@ pub trait ValueLike<'v>: /// `x == other`. /// /// This operation can only return error on stack overflow. - fn equals(self, other: Value<'v>) -> anyhow::Result; + fn equals(self, other: Value<'v>) -> crate::Result; /// `x <=> other`. - fn compare(self, other: Value<'v>) -> anyhow::Result; + fn compare(self, other: Value<'v>) -> crate::Result; /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. @@ -1187,7 +1344,11 @@ pub trait ValueLike<'v>: fn downcast_ref_err>(self) -> anyhow::Result<&'v T> { match self.downcast_ref() { Some(v) => Ok(v), - None => Err(ValueValueError::WrongType(self.to_value().get_type(), T::TYPE).into()), + None => Err(ValueValueError::WrongType( + T::TYPE, + self.to_value().to_string_for_type_error(), + ) + .into()), } } } @@ -1198,6 +1359,8 @@ struct ToJsonCycleError(&'static str); impl<'v> Sealed for Value<'v> {} +impl<'v> ValueLifetimeless for Value<'v> {} + impl<'v> ValueLike<'v> for Value<'v> { type String = StringValue<'v>; @@ -1242,16 +1405,16 @@ impl<'v> ValueLike<'v> for Value<'v> { } } - fn write_hash(self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(self, hasher: &mut StarlarkHasher) -> crate::Result<()> { self.get_ref().write_hash(hasher) } #[inline] - fn equals(self, other: Value<'v>) -> anyhow::Result { + fn equals(self, other: Value<'v>) -> crate::Result { self.equals(other) } - fn compare(self, other: Value<'v>) -> anyhow::Result { + fn compare(self, other: Value<'v>) -> crate::Result { let _guard = stack_guard::stack_guard()?; self.get_ref().compare(other) } @@ -1259,6 +1422,8 @@ impl<'v> ValueLike<'v> for Value<'v> { impl Sealed for FrozenValue {} +impl ValueLifetimeless for FrozenValue {} + impl<'v> ValueLike<'v> for FrozenValue { type String = FrozenStringValue; @@ -1283,17 +1448,17 @@ impl<'v> ValueLike<'v> for FrozenValue { } #[inline] - fn write_hash(self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(self, hasher: &mut StarlarkHasher) -> crate::Result<()> { self.to_value().write_hash(hasher) } #[inline] - fn equals(self, other: Value<'v>) -> anyhow::Result { + fn equals(self, other: Value<'v>) -> crate::Result { self.to_value().equals(other) } #[inline] - fn compare(self, other: Value<'v>) -> anyhow::Result { + fn compare(self, other: Value<'v>) -> crate::Result { self.to_value().compare(other) } } @@ -1309,9 +1474,12 @@ mod tests { use num_bigint::BigInt; use crate::assert; + use crate::environment::Globals; + use crate::typing::Ty; + use crate::values::int::pointer_i32::PointerI32; + use crate::values::list::AllocList; use crate::values::none::NoneType; - use crate::values::string::StarlarkStr; - use crate::values::types::int::PointerI32; + use crate::values::string::str_type::StarlarkStr; use crate::values::unpack::UnpackValue; use crate::values::Heap; use crate::values::Value; @@ -1347,12 +1515,21 @@ mod tests { assert_eq!(Some(i32::MAX), value.unpack_i32()); } + #[test] + fn test_unpack_frozen() { + assert!(Value::new_none().unpack_frozen().is_some()); + assert!(Value::testing_new_int(10).unpack_frozen().is_some()); + } + #[test] fn test_unpack_bigint() { let heap = Heap::new(); let value = heap.alloc(BigInt::from(i64::MAX)); assert_eq!(None, value.unpack_i32()); - assert_eq!(Some(BigInt::from(i64::MAX)), BigInt::unpack_value(value)); + assert_eq!( + Some(BigInt::from(i64::MAX)), + BigInt::unpack_value(value).unwrap() + ); } #[test] @@ -1363,4 +1540,60 @@ mod tests { value.value().to_json_value().unwrap() ); } + + #[test] + fn test_display_for_type_error() { + assert_eq!( + "NoneType (repr: None)", + Value::new_none().to_string_for_type_error(), + ); + + let heap = Heap::new(); + let list = heap.alloc(AllocList(0..12345)); + assert_eq!( + "list (repr: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,<<...>>42, 12343, 12344])", + list.to_string_for_type_error(), + ); + } + + #[test] + fn test_check_callable_with_none() { + let e = Value::new_none() + .check_callable_with([], [], None, None, &Ty::int()) + .unwrap_err(); + assert!( + e.to_string().contains("Value is not callable: NoneType"), + "{e}" + ); + } + + #[test] + fn test_check_callable_with_good_function() { + let g = Globals::standard(); + let f = g.get("bool").unwrap(); + + // Positional. + f.check_callable_with([&Ty::any_list()], [], None, None, &Ty::bool()) + .unwrap(); + + // Named. + let e = f + .check_callable_with([], [("x", &Ty::any_list())], None, None, &Ty::bool()) + .unwrap_err(); + assert!( + e.to_string() + .contains("Value `function (repr: bool)` is not compatible with"), + "{e}" + ); + + // Return type. + let e = f + .check_callable_with([&Ty::any_list()], [], None, None, &Ty::string()) + .unwrap_err(); + assert!( + e.to_string() + .contains("Value `function (repr: bool)` is not compatible with"), + "{e}" + ); + } } diff --git a/starlark-rust/starlark/src/values/layout/value_alloc_size.rs b/starlark-rust/starlark/src/values/layout/value_alloc_size.rs index 9ea85f7229dee..7016fae48a39c 100644 --- a/starlark-rust/starlark/src/values/layout/value_alloc_size.rs +++ b/starlark-rust/starlark/src/values/layout/value_alloc_size.rs @@ -30,10 +30,21 @@ pub(crate) struct ValueAllocSize { } impl ValueAllocSize { + #[inline] + pub(crate) fn try_new(size: AlignedSize) -> Option { + if size < MIN_ALLOC { + None + } else { + Some(ValueAllocSize { size }) + } + } + #[inline] pub(crate) fn new(size: AlignedSize) -> ValueAllocSize { - assert!(size >= MIN_ALLOC); - ValueAllocSize { size } + match ValueAllocSize::try_new(size) { + Some(value) => value, + None => panic!("{size} is too small for a value (minimum is {MIN_ALLOC})"), + } } #[inline] @@ -41,6 +52,11 @@ impl ValueAllocSize { self.size.layout() } + #[inline] + pub(crate) fn size(self) -> AlignedSize { + self.size + } + #[inline] pub(crate) const fn bytes(self) -> u32 { self.size.bytes() diff --git a/starlark-rust/starlark/src/values/layout/value_captured.rs b/starlark-rust/starlark/src/values/layout/value_captured.rs index dd7694e1d673f..d58869a270e7d 100644 --- a/starlark-rust/starlark/src/values/layout/value_captured.rs +++ b/starlark-rust/starlark/src/values/layout/value_captured.rs @@ -38,13 +38,13 @@ use crate::values::Value; use crate::values::ValueLike; #[derive(Debug, Trace, ProvidesStaticType, Display, NoSerialize, Allocative)] -#[display(fmt = "{:?}", self)] // This type should never be user visible +#[display("{:?}", self)] // This type should never be user visible #[repr(transparent)] #[allocative(skip)] pub(crate) struct ValueCaptured<'v>(Cell>>); #[derive(Debug, ProvidesStaticType, Display, NoSerialize, Allocative)] -#[display(fmt = "{:?}", self)] // Type is not user visible +#[display("{:?}", self)] // Type is not user visible #[repr(transparent)] pub(crate) struct FrozenValueCaptured(Option); diff --git a/starlark-rust/starlark/src/values/layout/value_lifetimeless.rs b/starlark-rust/starlark/src/values/layout/value_lifetimeless.rs new file mode 100644 index 0000000000000..153a13128605d --- /dev/null +++ b/starlark-rust/starlark/src/values/layout/value_lifetimeless.rs @@ -0,0 +1,43 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt::Debug; +use std::fmt::Display; + +use allocative::Allocative; +use dupe::Dupe; +use serde::Serialize; + +use crate::sealed::Sealed; +use crate::values::Freeze; +use crate::values::FrozenValue; + +/// Implemented by [`Value`](crate::values::Value) and [`FrozenValue`](crate::values::FrozenValue). +pub trait ValueLifetimeless: + Sealed + + Eq + + Copy + + Dupe + + Debug + + Default + + Display + + Serialize + + Allocative + + Freeze + + Sized +{ +} diff --git a/starlark-rust/starlark/src/values/layout/value_not_special.rs b/starlark-rust/starlark/src/values/layout/value_not_special.rs index 7515144122335..e802c3d5cb02b 100644 --- a/starlark-rust/starlark/src/values/layout/value_not_special.rs +++ b/starlark-rust/starlark/src/values/layout/value_not_special.rs @@ -17,12 +17,8 @@ use dupe::Dupe; -use crate::eval::runtime::frame_span::FrameSpan; -use crate::eval::Arguments; -use crate::eval::Evaluator; use crate::values::layout::vtable::AValueDyn; use crate::values::stack_guard; -use crate::values::FrozenRef; use crate::values::FrozenValue; use crate::values::Value; @@ -63,7 +59,7 @@ impl FrozenValueNotSpecial { } #[inline] - pub(crate) fn equals(self, other: Value) -> anyhow::Result { + pub(crate) fn equals(self, other: Value) -> crate::Result { if self.to_value().ptr_eq(other) { Ok(true) } else { @@ -75,20 +71,8 @@ impl FrozenValueNotSpecial { } #[inline] - fn equals_not_ptr_eq(self, other: Value) -> anyhow::Result { + fn equals_not_ptr_eq(self, other: Value) -> crate::Result { let _guard = stack_guard::stack_guard()?; self.get_ref().equals(other) } - - pub(crate) fn invoke_method<'v>( - self, - this: Value<'v>, - location: FrozenRef<'static, FrameSpan>, - args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - eval.with_call_stack(self.to_value(), Some(location), |eval| { - self.get_ref().invoke_method(this, args, eval) - }) - } } diff --git a/starlark-rust/starlark/src/values/layout/vtable.rs b/starlark-rust/starlark/src/values/layout/vtable.rs index e717fed29d51a..fc2506cc652cc 100644 --- a/starlark-rust/starlark/src/values/layout/vtable.rs +++ b/starlark-rust/starlark/src/values/layout/vtable.rs @@ -40,7 +40,7 @@ use crate::eval::Evaluator; use crate::private::Private; use crate::typing::Ty; use crate::values::demand::Demand; -use crate::values::int::PointerI32; +use crate::values::int::pointer_i32::PointerI32; use crate::values::layout::avalue::AValue; use crate::values::layout::avalue::BlackHole; use crate::values::layout::const_type_id::ConstTypeId; @@ -134,9 +134,9 @@ pub(crate) struct AValueVTable { allocative: unsafe fn(StarlarkValueRawPtr) -> *const dyn Allocative, } -struct GetTypeId<'v, T: StarlarkValue<'v> + ?Sized>(PhantomData<&'v T>); +struct GetTypeId<'v, T: StarlarkValue<'v>>(PhantomData<&'v T>); -impl<'v, T: StarlarkValue<'v> + ?Sized> GetTypeId<'v, T> { +impl<'v, T: StarlarkValue<'v>> GetTypeId<'v, T> { const TYPE_ID: ConstTypeId = ConstTypeId::of::<::StaticType>(); const STARLARK_TYPE_ID: StarlarkTypeId = StarlarkTypeId::of::(); } @@ -174,10 +174,7 @@ impl AValueVTable { let this = unsafe { &*this.value_ptr::() }; this as *const dyn Debug }, - erased_serde_serialize: |this| { - let this = unsafe { &*this.value_ptr::() }; - this as *const dyn erased_serde::Serialize - }, + erased_serde_serialize: |_this| unreachable!(), allocative: |this| { let this = unsafe { &*this.value_ptr::() }; this as *const dyn Allocative @@ -189,19 +186,19 @@ impl AValueVTable { pub(crate) const fn new<'v, T: AValue<'v>>() -> &'static AValueVTable { &AValueVTable { drop_in_place: |p| unsafe { - ptr::drop_in_place(p.value_ptr::()); + ptr::drop_in_place(p.value_ptr::()); }, is_str: T::IS_STR, memory_size: |p| unsafe { - let p = &*p.value_ptr::(); - T::alloc_size_for_extra_len(p.extra_len()) + let p = &*p.value_ptr::(); + T::alloc_size_for_extra_len(T::extra_len(p)) }, heap_freeze: |p, freezer| unsafe { - let p = &mut *AValueRepr::from_payload_ptr_mut(p.value_ptr::()); + let p = &mut *AValueRepr::from_payload_ptr_mut(p.value_ptr::()); T::heap_freeze(p, transmute!(&Freezer, &Freezer, freezer)) }, heap_copy: |p, tracer| unsafe { - let p = &mut *AValueRepr::from_payload_ptr_mut(p.value_ptr::()); + let p = &mut *AValueRepr::from_payload_ptr_mut(p.value_ptr::()); let value = T::heap_copy(p, transmute!(&Tracer, &Tracer, tracer)); transmute!(Value, Value, value) }, @@ -313,7 +310,7 @@ impl<'v> AValueDyn<'v> { } #[inline] - pub(crate) fn documentation(self) -> Option { + pub(crate) fn documentation(self) -> DocItem { (self.vtable.starlark_value.documentation)(self.value) } @@ -326,7 +323,7 @@ impl<'v> AValueDyn<'v> { } #[inline] - pub(crate) fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn at(self, index: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.at)(self.value, index, heap) } @@ -335,12 +332,12 @@ impl<'v> AValueDyn<'v> { index0: Value<'v>, index1: Value<'v>, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> crate::Result> { (self.vtable.starlark_value.at2)(self.value, index0, index1, heap, Private) } #[inline] - pub(crate) fn is_in(self, collection: Value<'v>) -> anyhow::Result { + pub(crate) fn is_in(self, collection: Value<'v>) -> crate::Result { (self.vtable.starlark_value.is_in)(self.value, collection) } @@ -351,7 +348,7 @@ impl<'v> AValueDyn<'v> { stop: Option>, step: Option>, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> crate::Result> { (self.vtable.starlark_value.slice)(self.value, start, stop, step, heap) } @@ -376,22 +373,22 @@ impl<'v> AValueDyn<'v> { } #[inline] - pub(crate) fn bit_and(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn bit_and(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.bit_and)(self.value, other, heap) } #[inline] - pub(crate) fn bit_or(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn bit_or(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.bit_or)(self.value, other, heap) } #[inline] - pub(crate) fn bit_xor(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn bit_xor(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.bit_xor)(self.value, other, heap) } #[inline] - pub(crate) fn bit_not(self, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn bit_not(self, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.bit_not)(self.value, heap) } @@ -401,12 +398,12 @@ impl<'v> AValueDyn<'v> { } #[inline] - pub(crate) fn length(self) -> anyhow::Result { + pub(crate) fn length(self) -> crate::Result { (self.vtable.starlark_value.length)(self.value) } #[inline] - pub(crate) fn iterate(self, me: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn iterate(self, me: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.iterate)(self.value, me, heap) } @@ -426,75 +423,67 @@ impl<'v> AValueDyn<'v> { } #[inline] - pub(crate) fn get_hash(self) -> anyhow::Result { + pub(crate) fn get_hash(self) -> crate::Result { (self.vtable.starlark_value.get_hash)(self.value, Private) } #[inline] - pub(crate) fn plus(self, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn plus(self, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.plus)(self.value, heap) } #[inline] - pub(crate) fn minus(self, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn minus(self, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.minus)(self.value, heap) } #[inline] - pub(crate) fn add(self, other: Value<'v>, heap: &'v Heap) -> Option>> { + pub(crate) fn add(self, other: Value<'v>, heap: &'v Heap) -> Option>> { (self.vtable.starlark_value.add)(self.value, other, heap) } #[inline] - pub(crate) fn radd( - self, - other: Value<'v>, - heap: &'v Heap, - ) -> Option>> { + pub(crate) fn radd(self, other: Value<'v>, heap: &'v Heap) -> Option>> { (self.vtable.starlark_value.radd)(self.value, other, heap) } #[inline] - pub(crate) fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn sub(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.sub)(self.value, other, heap) } #[inline] - pub(crate) fn mul(self, other: Value<'v>, heap: &'v Heap) -> Option>> { + pub(crate) fn mul(self, other: Value<'v>, heap: &'v Heap) -> Option>> { (self.vtable.starlark_value.mul)(self.value, other, heap) } #[inline] - pub(crate) fn rmul( - self, - other: Value<'v>, - heap: &'v Heap, - ) -> Option>> { + pub(crate) fn rmul(self, other: Value<'v>, heap: &'v Heap) -> Option>> { (self.vtable.starlark_value.rmul)(self.value, other, heap) } #[inline] - pub(crate) fn div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn div(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.div)(self.value, other, heap) } #[inline] - pub(crate) fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.floor_div)(self.value, other, heap) } #[inline] - pub(crate) fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn percent(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.percent)(self.value, other, heap) } #[inline] - pub(crate) fn left_shift(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn left_shift(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.left_shift)(self.value, other, heap) } #[inline] - pub(crate) fn right_shift(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn right_shift(self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { (self.vtable.starlark_value.right_shift)(self.value, other, heap) } @@ -519,25 +508,15 @@ impl<'v> AValueDyn<'v> { } #[inline] - pub(crate) fn equals(self, other: Value<'v>) -> anyhow::Result { + pub(crate) fn equals(self, other: Value<'v>) -> crate::Result { (self.vtable.starlark_value.equals)(self.value, other) } #[inline] - pub(crate) fn compare(self, other: Value<'v>) -> anyhow::Result { + pub(crate) fn compare(self, other: Value<'v>) -> crate::Result { (self.vtable.starlark_value.compare)(self.value, other) } - #[inline] - pub(crate) fn invoke_method( - self, - this: Value<'v>, - args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - (self.vtable.starlark_value.invoke_method)(self.value, this, args, eval, Private) - } - #[inline] pub(crate) fn name_for_call_stack(self, me: Value<'v>) -> String { (self.vtable.starlark_value.name_for_call_stack)(self.value, me) @@ -547,31 +526,26 @@ impl<'v> AValueDyn<'v> { pub(crate) fn export_as( self, variable_name: &str, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result<()> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result<()> { (self.vtable.starlark_value.export_as)(self.value, variable_name, eval) } #[inline] - pub(crate) fn set_at(self, index: Value<'v>, new_value: Value<'v>) -> anyhow::Result<()> { + pub(crate) fn set_at(self, index: Value<'v>, new_value: Value<'v>) -> crate::Result<()> { (self.vtable.starlark_value.set_at)(self.value, index, new_value) } #[inline] - pub(crate) fn set_attr(self, attribute: &str, new_value: Value<'v>) -> anyhow::Result<()> { + pub(crate) fn set_attr(self, attribute: &str, new_value: Value<'v>) -> crate::Result<()> { (self.vtable.starlark_value.set_attr)(self.value, attribute, new_value) } #[inline] - pub(crate) fn write_hash(self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + pub(crate) fn write_hash(self, hasher: &mut StarlarkHasher) -> crate::Result<()> { (self.vtable.starlark_value.write_hash)(self.value, hasher) } - #[inline] - pub(crate) fn matches_type(self, t: &str) -> bool { - (self.vtable.starlark_value.matches_type)(self.value, t) - } - #[inline] pub(crate) fn type_matches_value(self, value: Value<'v>) -> bool { (self.vtable.starlark_value.type_matches_value)(self.value, value, Private) @@ -614,8 +588,8 @@ impl<'v> AValueDynFull<'v> { pub(crate) fn invoke( self, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { (self.avalue.vtable.starlark_value.invoke)(self.avalue.value, self.value, args, eval) } } diff --git a/starlark-rust/starlark/src/values/mod.rs b/starlark-rust/starlark/src/values/mod.rs deleted file mode 100644 index 052b5f7d48848..0000000000000 --- a/starlark-rust/starlark/src/values/mod.rs +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). -//! -//! This module contains code for working with Starlark values: -//! -//! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in -//! Starlark. When frozen, they become [`FrozenValue`]. -//! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. -//! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], -//! and deconstructed from a [`Value`] with [`UnpackValue`] -//! (or specialised methods like [`unpack_str`](Value::unpack_str)). -//! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] -//! trait. -//! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], -//! so may serve as interesting inspiration for writing your own values, in addition to occurring in Starlark programs. - -pub use starlark_derive::starlark_attrs; -pub use starlark_derive::starlark_value; -pub use starlark_derive::Freeze; -pub use starlark_derive::NoSerialize; -pub use starlark_derive::StarlarkAttrs; -pub use starlark_derive::Trace; -pub use starlark_derive::UnpackValue; - -pub use crate::any::AnyLifetime; -pub use crate::any::ProvidesStaticType; -pub use crate::coerce::Coerce; -pub use crate::values::alloc_value::AllocFrozenValue; -pub use crate::values::alloc_value::AllocValue; -pub use crate::values::demand::Demand; -pub use crate::values::error::ValueError; -pub use crate::values::freeze::Freeze; -pub use crate::values::frozen_ref::FrozenRef; -pub use crate::values::iter::StarlarkIterator; -pub use crate::values::layout::complex::ValueTypedComplex; -pub use crate::values::layout::heap::heap_type::Freezer; -pub use crate::values::layout::heap::heap_type::FrozenHeap; -pub use crate::values::layout::heap::heap_type::FrozenHeapRef; -pub use crate::values::layout::heap::heap_type::Heap; -pub use crate::values::layout::heap::heap_type::Tracer; -pub use crate::values::layout::heap::profile::aggregated::AggregateHeapProfileInfo; -pub use crate::values::layout::identity::ValueIdentity; -pub use crate::values::layout::static_string::constant_string; -pub use crate::values::layout::static_string::StarlarkStrNRepr; -pub use crate::values::layout::typed::string::FrozenStringValue; -pub use crate::values::layout::typed::string::StringValue; -pub use crate::values::layout::typed::string::StringValueLike; -pub use crate::values::layout::typed::FrozenValueTyped; -pub use crate::values::layout::typed::ValueTyped; -pub use crate::values::layout::value::FrozenValue; -pub use crate::values::layout::value::Value; -pub use crate::values::layout::value::ValueLike; -pub use crate::values::owned::OwnedFrozenValue; -pub use crate::values::owned::OwnedFrozenValueTyped; -pub use crate::values::trace::Trace; -pub use crate::values::traits::ComplexValue; -pub use crate::values::traits::StarlarkValue; -pub use crate::values::types::any; -pub use crate::values::types::array; -pub use crate::values::types::bool; -pub use crate::values::types::dict; -pub use crate::values::types::enumeration; -pub use crate::values::types::exported_name; -pub use crate::values::types::float; -pub use crate::values::types::function; -pub use crate::values::types::int; -pub use crate::values::types::list; -pub use crate::values::types::none; -pub use crate::values::types::range; -pub use crate::values::types::record; -pub use crate::values::types::regex; -pub use crate::values::types::starlark_value_as_type; -pub use crate::values::types::string; -pub use crate::values::types::structs; -pub use crate::values::types::tuple; -pub use crate::values::unpack::UnpackValue; -pub use crate::values::unpack::ValueOf; -pub use crate::values::value_of_unchecked::ValueOfUnchecked; - -mod alloc_value; -mod comparison; -pub(crate) mod demand; -pub(crate) mod error; -mod freeze; -pub(crate) mod frozen_ref; -mod index; -pub(crate) mod iter; -pub(crate) mod layout; -pub(crate) mod num; -mod owned; -pub(crate) mod recursive_repr_or_json_guard; -mod stack_guard; -pub(crate) mod starlark_type_id; -mod trace; -pub(crate) mod traits; -pub mod type_repr; -pub(crate) mod types; -pub mod typing; -mod unpack; -pub(crate) mod value_of_unchecked; diff --git a/starlark-rust/starlark/src/values/num/mod.rs b/starlark-rust/starlark/src/values/num/mod.rs deleted file mode 100644 index 14af586f3372f..0000000000000 --- a/starlark-rust/starlark/src/values/num/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Helpers for numerical values. - -pub(crate) mod typecheck; -pub(crate) mod value; diff --git a/starlark-rust/starlark/src/values/num/value.rs b/starlark-rust/starlark/src/values/num/value.rs deleted file mode 100644 index ace51e6f9bf4d..0000000000000 --- a/starlark-rust/starlark/src/values/num/value.rs +++ /dev/null @@ -1,391 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::cmp::Ordering; -use std::ops::Add; -use std::ops::Mul; -use std::ops::Sub; - -use dupe::Dupe; -use either::Either; - -use crate::collections::StarlarkHashValue; -use crate::typing::Ty; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::float::StarlarkFloat; -use crate::values::types::int_or_big::StarlarkInt; -use crate::values::types::int_or_big::StarlarkIntRef; -use crate::values::AllocFrozenValue; -use crate::values::AllocValue; -use crate::values::FrozenHeap; -use crate::values::FrozenValue; -use crate::values::Heap; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueLike; - -#[derive(Debug, thiserror::Error)] -enum NumError { - #[error("float division by zero: {0} / {1}")] - DivisionByZero(Num, Num), -} - -/// [`NumRef`] represents a numerical value that can be unpacked from a [`Value`]. -/// -/// It's an intermediate representation that facilitates conversions between -/// numerical types and helps in implementation of arithmetical operations -/// between them. -#[derive(Clone, Debug, Dupe, Copy)] -pub(crate) enum NumRef<'v> { - Int(StarlarkIntRef<'v>), - Float(f64), -} - -#[derive(Debug, derive_more::Display)] -pub(crate) enum Num { - Int(StarlarkInt), - Float(f64), -} - -impl<'v> StarlarkTypeRepr for NumRef<'v> { - fn starlark_type_repr() -> Ty { - Either::::starlark_type_repr() - } -} - -impl StarlarkTypeRepr for Num { - fn starlark_type_repr() -> Ty { - NumRef::starlark_type_repr() - } -} - -impl<'v> UnpackValue<'v> for NumRef<'v> { - fn expected() -> String { - "int or float".to_owned() - } - - #[allow(clippy::manual_map)] - fn unpack_value(value: Value<'v>) -> Option { - if let Some(i) = StarlarkIntRef::unpack_value(value) { - Some(NumRef::Int(i)) - } else if let Some(f) = value.downcast_ref::() { - Some(NumRef::Float(f.0)) - } else { - None - } - } -} - -impl<'v> AllocValue<'v> for Num { - fn alloc_value(self, heap: &'v Heap) -> Value<'v> { - match self { - Self::Int(i) => heap.alloc(i), - Self::Float(f) => heap.alloc(f), - } - } -} - -impl AllocFrozenValue for Num { - fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { - match self { - Self::Int(i) => heap.alloc(i), - Self::Float(f) => heap.alloc(f), - } - } -} - -impl<'v> NumRef<'v> { - /// Get underlying value as float - pub(crate) fn as_float(&self) -> f64 { - match self { - Self::Int(i) => i.to_f64(), - Self::Float(f) => *f, - } - } - - pub(crate) fn f64_to_i32_exact(f: f64) -> Option { - let i = f as i32; - if i as f64 == f { Some(i) } else { None } - } - - /// Get underlying value as int (if it can be precisely expressed as int) - pub(crate) fn as_int(&self) -> Option { - match self { - Self::Int(i) => i.to_i32(), - Self::Float(f) => Self::f64_to_i32_exact(*f), - } - } - - /// Get hash of the underlying number - pub(crate) fn get_hash_64(self) -> u64 { - fn float_hash(f: f64) -> u64 { - if f.is_nan() { - // all possible NaNs should hash to the same value - 0 - } else if f.is_infinite() { - u64::MAX - } else if f == 0.0 { - // Both 0.0 and -0.0 need the same hash, but are both equal to 0.0 - 0.0f64.to_bits() - } else { - f.to_bits() - } - } - - match (self.as_int(), self) { - // equal ints and floats should have the same hash - (Some(i), _) => i as u64, - (None, Self::Float(f)) => float_hash(f), - (None, Self::Int(StarlarkIntRef::Small(i))) => { - // shouldn't happen - as_int() should have resulted in an int - i.to_i32() as u64 - } - (None, Self::Int(StarlarkIntRef::Big(b))) => { - // Not perfect, but OK: `1000000000000000000000003` and `1000000000000000000000005` - // flush to the same float, and neither is exact float, - // so we could use better hash for such numbers. - float_hash(b.to_f64()) - } - } - } - - pub(crate) fn get_hash(self) -> StarlarkHashValue { - StarlarkHashValue::hash_64(self.get_hash_64()) - } - - fn to_owned(self) -> Num { - match self { - NumRef::Int(i) => Num::Int(i.to_owned()), - NumRef::Float(f) => Num::Float(f), - } - } - - pub(crate) fn div(self, other: NumRef) -> anyhow::Result { - let a = self.as_float(); - let b = other.as_float(); - if b == 0.0 { - Err(NumError::DivisionByZero(self.to_owned(), other.to_owned()).into()) - } else { - Ok(a / b) - } - } - - pub(crate) fn floor_div(self, other: NumRef) -> anyhow::Result { - if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { - a.floor_div(b).map(Num::Int) - } else { - StarlarkFloat::floor_div_impl(self.as_float(), other.as_float()).map(Num::Float) - } - } - - pub(crate) fn percent(self, other: NumRef) -> anyhow::Result { - if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { - a.percent(b).map(Num::Int) - } else { - StarlarkFloat::percent_impl(self.as_float(), other.as_float()).map(Num::Float) - } - } -} - -impl<'v> From for NumRef<'v> { - fn from(f: f64) -> Self { - Self::Float(f) - } -} - -/// This is total eq per starlark spec, not Rust's partial eq. -impl<'v> PartialEq for NumRef<'v> { - fn eq(&self, other: &Self) -> bool { - if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { - a == b - } else { - StarlarkFloat::compare_impl(self.as_float(), other.as_float()) == Ordering::Equal - } - } -} - -impl<'v> Eq for NumRef<'v> {} - -impl<'v> PartialOrd for NumRef<'v> { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl<'v> Ord for NumRef<'v> { - fn cmp(&self, other: &Self) -> Ordering { - if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { - a.cmp(b) - } else { - StarlarkFloat::compare_impl(self.as_float(), other.as_float()) - } - } -} - -impl<'v> Add for NumRef<'v> { - type Output = Num; - - fn add(self, rhs: Self) -> Self::Output { - if let (NumRef::Int(a), NumRef::Int(b)) = (self, rhs) { - return Num::Int(a + b); - } - Num::Float(self.as_float() + rhs.as_float()) - } -} - -impl<'v> Sub for NumRef<'v> { - type Output = Num; - - fn sub(self, rhs: Self) -> Self::Output { - if let (NumRef::Int(a), NumRef::Int(b)) = (self, rhs) { - return Num::Int(a - b); - } - Num::Float(self.as_float() - rhs.as_float()) - } -} - -impl<'v> Mul for NumRef<'v> { - type Output = Num; - - fn mul(self, rhs: Self) -> Self::Output { - if let (NumRef::Int(a), NumRef::Int(b)) = (self, rhs) { - return Num::Int(a * b); - } - Num::Float(self.as_float() * rhs.as_float()) - } -} - -#[cfg(test)] -mod tests { - use num_bigint::BigInt; - - use super::*; - use crate::values::types::inline_int::InlineInt; - - #[test] - fn test_from_value() { - assert!(NumRef::unpack_value(Value::new_bool(true)).is_none()); - assert!(NumRef::unpack_value(Value::new_bool(false)).is_none()); - assert!(NumRef::unpack_value(Value::new_empty_string()).is_none()); - assert!(NumRef::unpack_value(Value::new_none()).is_none()); - - assert_eq!( - NumRef::unpack_value(Value::testing_new_int(0)) - .unwrap() - .as_int(), - Some(0) - ); - assert_eq!( - NumRef::unpack_value(Value::testing_new_int(42)) - .unwrap() - .as_int(), - Some(42) - ); - assert_eq!( - NumRef::unpack_value(Value::testing_new_int(-42)) - .unwrap() - .as_int(), - Some(-42) - ); - } - - #[test] - fn test_conversion_to_float() { - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::ZERO)).as_float(), - 0.0 - ); - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::MAX)).as_float(), - InlineInt::MAX.to_f64() - ); - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::MIN)).as_float(), - InlineInt::MIN.to_f64() - ); - - assert_eq!(NumRef::Float(0.0).as_float(), 0.0); - assert!(NumRef::Float(f64::NAN).as_float().is_nan()); - } - - #[test] - fn test_conversion_to_int() { - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(0))).as_int(), - Some(0) - ); - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(42))).as_int(), - Some(42) - ); - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(-42))).as_int(), - Some(-42) - ); - - assert_eq!(NumRef::Float(0_f64).as_int(), Some(0)); - assert_eq!(NumRef::Float(42_f64).as_int(), Some(42)); - assert_eq!(NumRef::Float(-42_f64).as_int(), Some(-42)); - - assert_eq!(NumRef::Float(i32::MIN as f64).as_int(), Some(i32::MIN)); - assert_eq!(NumRef::Float(i32::MAX as f64).as_int(), Some(i32::MAX)); - - assert_eq!(NumRef::Float(42.75).as_int(), None); - assert_eq!(NumRef::Float(-42.75).as_int(), None); - assert_eq!(NumRef::Float(f64::NAN).as_int(), None); - assert_eq!(NumRef::Float(f64::INFINITY).as_int(), None); - assert_eq!(NumRef::Float(f64::NEG_INFINITY).as_int(), None); - } - - #[test] - fn test_hashing() { - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(0))).get_hash_64(), - NumRef::Float(0.0).get_hash_64() - ); - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(42))).get_hash_64(), - NumRef::Float(42.0).get_hash_64() - ); - - assert_eq!( - NumRef::Float(f64::INFINITY + f64::NEG_INFINITY).get_hash_64(), - NumRef::Float(f64::NAN).get_hash_64() - ); - assert_eq!( - NumRef::Float("0.25".parse().unwrap()).get_hash_64(), - NumRef::Float("25e-2".parse().unwrap()).get_hash_64() - ); - - let x = 1u64 << 55; - assert_eq!(x as f64 as u64, x, "Self-check"); - assert_eq!( - NumRef::Float(x as f64).get_hash_64(), - NumRef::Int(StarlarkInt::from(BigInt::from(x)).as_ref()).get_hash_64(), - ) - } - - #[test] - fn test_eq() { - assert_eq!(NumRef::Float(f64::NAN), NumRef::Float(f64::NAN)); - assert_eq!(NumRef::Float(f64::INFINITY), NumRef::Float(f64::INFINITY)); - assert_eq!( - NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(10))), - NumRef::Float(10.0) - ); - } -} diff --git a/starlark-rust/starlark/src/values/owned.rs b/starlark-rust/starlark/src/values/owned.rs index 138f36bac3095..f157c47a6c09a 100644 --- a/starlark-rust/starlark/src/values/owned.rs +++ b/starlark-rust/starlark/src/values/owned.rs @@ -24,21 +24,24 @@ use dupe::Clone_; use dupe::Dupe; use dupe::Dupe_; +use crate::cast::transmute; use crate::typing::Ty; use crate::values::none::NoneType; +use crate::values::owned_frozen_ref::OwnedFrozenRef; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::AllocFrozenValue; use crate::values::FrozenHeap; use crate::values::FrozenHeapRef; use crate::values::FrozenValue; use crate::values::FrozenValueTyped; +use crate::values::OwnedRefFrozenRef; use crate::values::StarlarkValue; use crate::values::Value; #[derive(Debug, thiserror::Error)] enum OwnedError { #[error("Expected value of type `{0}` but got `{1}`")] - WrongType(&'static str, &'static str), + WrongType(&'static str, String), } /// A [`FrozenValue`] along with a [`FrozenHeapRef`] that ensures it is kept alive. @@ -69,6 +72,8 @@ impl Display for OwnedFrozenValue { } impl StarlarkTypeRepr for OwnedFrozenValue { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { FrozenValue::starlark_type_repr() } @@ -89,7 +94,8 @@ impl OwnedFrozenValue { /// `owner`, typically because the value was created on the heap. /// /// ``` - /// use starlark::values::{FrozenHeap, OwnedFrozenValue}; + /// use starlark::values::FrozenHeap; + /// use starlark::values::OwnedFrozenValue; /// let heap = FrozenHeap::new(); /// let value = heap.alloc("test"); /// unsafe { OwnedFrozenValue::new(heap.into_ref(), value) }; @@ -139,9 +145,11 @@ impl OwnedFrozenValue { ) -> anyhow::Result> { match self.downcast() { Ok(v) => Ok(v), - Err(this) => { - Err(OwnedError::WrongType(T::TYPE, this.value.to_value().get_type()).into()) - } + Err(this) => Err(OwnedError::WrongType( + T::TYPE, + this.value.to_value().to_string_for_type_error(), + ) + .into()), } } @@ -220,6 +228,23 @@ impl> Deref for OwnedFrozenValueTyped { } impl> OwnedFrozenValueTyped { + /// Create an [`OwnedFrozenValueTyped`] - generally [`OwnedFrozenValueTyped`]s are obtained + /// from downcasting [`OwnedFrozenValue`]. + /// + /// Safe provided the `value` (and any values it points at) are kept alive by the + /// `owner`, typically because the value was created on the heap. + /// + /// ``` + /// use starlark::values::FrozenHeap; + /// use starlark::values::OwnedFrozenValue; + /// let heap = FrozenHeap::new(); + /// let value = heap.alloc("test"); + /// unsafe { OwnedFrozenValue::new(heap.into_ref(), value) }; + /// ``` + pub unsafe fn new(owner: FrozenHeapRef, value: FrozenValueTyped<'static, T>) -> Self { + Self { owner, value } + } + /// Erase the type. /// /// This operation is unsafe because returned value is not bound by the heap lifetime. @@ -243,6 +268,17 @@ impl> OwnedFrozenValueTyped { } } + /// Convert to borrowed ref. + pub fn as_owned_ref_frozen_ref(&self) -> OwnedRefFrozenRef<'_, T> { + unsafe { OwnedRefFrozenRef::new_unchecked(self.value.as_ref(), &self.owner) } + } + + /// Convert to an owned ref. + pub fn into_owned_frozen_ref(self) -> OwnedFrozenRef { + // SAFETY: Heap matches the value + unsafe { OwnedFrozenRef::new_unchecked(self.value.as_ref(), self.owner) } + } + /// Obtain a reference to the FrozenHeap that owns this value. pub fn owner(&self) -> &FrozenHeapRef { &self.owner @@ -253,12 +289,24 @@ impl> OwnedFrozenValueTyped { self.value.as_ref() } + /// Obtain a reference to the value. + /// + /// This should return `FrozenValueTyped<'_, T>`, but it is hard to make it work. + pub unsafe fn value_typed(&self) -> FrozenValueTyped<'static, T> { + self.value + } + + /// Extract a [`FrozenValueTyped`] by passing the [`FrozenHeap`] which will keep it alive. + pub fn owned_frozen_value_typed(&self, heap: &FrozenHeap) -> FrozenValueTyped<'static, T> { + heap.add_reference(&self.owner); + self.value + } + /// Extract a [`FrozenValue`] by passing the [`FrozenHeap`] which will keep it alive. /// /// See [`OwnedFrozenValue::owned_frozen_value`]. - pub unsafe fn owned_frozen_value(&self, heap: &FrozenHeap) -> FrozenValue { - heap.add_reference(&self.owner); - self.value.to_frozen_value() + pub fn owned_frozen_value(&self, heap: &FrozenHeap) -> FrozenValue { + self.owned_frozen_value_typed(heap).to_frozen_value() } /// Extract a [`Value`] by passing the [`FrozenHeap`] which will promise to keep it alive. @@ -266,7 +314,20 @@ impl> OwnedFrozenValueTyped { /// See [`OwnedFrozenValue::owned_value`]. pub fn owned_value<'v>(&self, heap: &'v FrozenHeap) -> Value<'v> { // Safe because we convert it to a value which is tied to the owning heap - unsafe { self.owned_frozen_value(heap).to_value() } + self.owned_frozen_value(heap).to_value() + } + + /// Extract a reference by passing the [`FrozenHeap`] which will promise to keep it alive. + /// + /// See [`OwnedFrozenValue::owned_frozen_value`]. + /// + /// Not returning `ValueTyped` because of lifetime issues. + pub fn owned_as_ref<'v>(&self, heap: &'v FrozenHeap) -> &'v T { + // Keep the reference. + self.owned_value(heap); + + // SAFETY: we attached the value to the heap, and we return value with a heap lifetime. + unsafe { transmute!(&T, &T, self.as_ref()) } } /// Operate on the [`FrozenValue`] stored inside. @@ -292,4 +353,15 @@ impl> OwnedFrozenValueTyped { value: f(self.value)?, }) } + + /// Same as [`map`](OwnedFrozenValue::map) above but with [`Option`] + pub fn maybe_map>( + &self, + f: impl FnOnce(FrozenValueTyped) -> Option>, + ) -> Option> { + Some(OwnedFrozenValueTyped { + owner: self.owner.dupe(), + value: f(self.value)?, + }) + } } diff --git a/starlark-rust/starlark/src/values/owned_frozen_ref.rs b/starlark-rust/starlark/src/values/owned_frozen_ref.rs new file mode 100644 index 0000000000000..ed50cdafc0d6d --- /dev/null +++ b/starlark-rust/starlark/src/values/owned_frozen_ref.rs @@ -0,0 +1,209 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::convert::Infallible; +use std::fmt; +use std::fmt::Formatter; +use std::mem; +use std::ops::Deref; + +use allocative::Allocative; +use dupe::Clone_; +use dupe::Copy_; +use dupe::Dupe; +use dupe::Dupe_; + +use crate::values::FrozenHeap; +use crate::values::FrozenHeapRef; +use crate::values::FrozenRef; + +/// A reference to a value stored in a frozen heap with a reference to the heap. +#[derive(Copy_, Clone_, Dupe_)] +pub struct OwnedRefFrozenRef<'f, T: ?Sized + 'static> { + owner: &'f FrozenHeapRef, + value: FrozenRef<'f, T>, +} + +/// Same as a `FrozenRef`, but it keeps itself alive by storing a reference to the owning heap. +/// +/// Usually constructed from an `OwnedFrozenValueTyped`. +#[derive(Clone, Dupe, Allocative)] +pub struct OwnedFrozenRef { + owner: FrozenHeapRef, + // Invariant: this FrozenValue must be kept alive by the `owner` field. + value: FrozenRef<'static, T>, +} + +impl<'f, T: ?Sized> OwnedRefFrozenRef<'f, T> { + /// Create a new `OwnedRefFrozenRef` pointing at the given value. + pub unsafe fn new_unchecked( + value: &'f T, + owner: &'f FrozenHeapRef, + ) -> OwnedRefFrozenRef<'f, T> { + OwnedRefFrozenRef { + owner, + value: FrozenRef::new(value), + } + } + + /// Owner heap. + pub fn owner(&self) -> &'f FrozenHeapRef { + self.owner + } + + /// Return a reference to the underlying value. + pub fn as_ref(self) -> &'f T { + self.value.as_ref() + } + + /// Add a reference to a new heap, and return the pointer with the lifetime of the new heap. + pub fn add_heap_ref<'v>(self, heap: &'v FrozenHeap) -> &'v T { + heap.add_reference(self.owner); + unsafe { mem::transmute::<&'f T, &'v T>(self.value.as_ref()) } + } + + /// Convert heap pointer to an owned one. + pub fn to_owned(self) -> OwnedFrozenRef { + OwnedFrozenRef { + owner: self.owner.dupe(), + value: unsafe { mem::transmute::, FrozenRef<'static, T>>(self.value) }, + } + } + + /// Fallible map the reference to another one. + pub fn try_map_result(self, f: F) -> Result, E> + where + F: FnOnce(&'f T) -> Result<&'f U, E>, + { + Ok(OwnedRefFrozenRef { + owner: self.owner, + value: FrozenRef::new(f(self.value.as_ref())?), + }) + } + + /// Apply a function to the underlying value. Projection operation. + pub fn map(self, f: F) -> OwnedRefFrozenRef<'f, U> + where + F: FnOnce(&'f T) -> &'f U, + { + match self.try_map_result(|x| Ok(f(x))) { + Ok(x) => x, + Err(e) => { + let e: Infallible = e; + match e {} + } + } + } + + /// Optionally map the reference to another one. + pub fn try_map_option(self, f: F) -> Option> + where + F: FnOnce(&'f T) -> Option<&'f U>, + { + match self.try_map_result(|x| f(x).ok_or(())) { + Ok(x) => Some(x), + Err(()) => None, + } + } +} + +impl OwnedFrozenRef { + /// Creates a new `OwnedFrozenRef` pointing at the given value. + /// + /// ## Safety + /// + /// The reference must be kept alive by the owning heap + pub unsafe fn new_unchecked(value: &'static T, owner: FrozenHeapRef) -> OwnedFrozenRef { + OwnedFrozenRef { + owner, + value: FrozenRef::new(value), + } + } + + /// Borrow. + pub fn as_owned_ref_frozen_ref(&self) -> OwnedRefFrozenRef<'_, T> { + OwnedRefFrozenRef { + owner: &self.owner, + value: self.value, + } + } + + /// Returns a reference to the underlying value. + pub fn as_ref<'a>(&'a self) -> &'a T { + self.as_owned_ref_frozen_ref().as_ref() + } + + /// Converts `self` into a new reference that points at something reachable from the previous. + /// + /// See the caveats on `[starlark::values::OwnedFrozenValue::map]` + pub fn map(self, f: F) -> OwnedFrozenRef + where + for<'v> F: FnOnce(&'v T) -> &'v U, + { + OwnedFrozenRef { + owner: self.owner, + value: self.value.map(f), + } + } + + /// Fallible map the reference to another one. + pub fn try_map_result(self, f: F) -> Result, E> + where + for<'v> F: FnOnce(&'v T) -> Result<&'v U, E>, + { + Ok(OwnedFrozenRef { + owner: self.owner, + value: self.value.try_map_result(f)?, + }) + } + + /// Optionally map the reference to another one. + pub fn try_map_option(self, f: F) -> Option> + where + for<'v> F: FnOnce(&'v T) -> Option<&'v U>, + { + Some(OwnedFrozenRef { + owner: self.owner, + value: self.value.try_map_option(f)?, + }) + } + + /// Get a reference to the owning frozen heap + pub fn owner(&self) -> &FrozenHeapRef { + &self.owner + } +} + +impl fmt::Debug for OwnedFrozenRef { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.value, f) + } +} + +impl fmt::Display for OwnedFrozenRef { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.value, f) + } +} + +impl Deref for OwnedFrozenRef { + type Target = T; + + fn deref(&self) -> &T { + self.as_ref() + } +} diff --git a/starlark-rust/starlark/src/values/trace.rs b/starlark-rust/starlark/src/values/trace.rs index 44dfd8e7bc4ce..c101c1091119e 100644 --- a/starlark-rust/starlark/src/values/trace.rs +++ b/starlark-rust/starlark/src/values/trace.rs @@ -36,7 +36,9 @@ use std::sync::Mutex; use either::Either; use hashbrown::raw::RawTable; +use hashbrown::HashTable; use starlark_map::small_set::SmallSet; +use starlark_map::Hashed; use crate::collections::SmallMap; use crate::values::FrozenValue; @@ -54,7 +56,7 @@ use crate::values::Value; /// /// #[derive(Trace)] /// struct MySet<'v> { -/// keys: Vec> +/// keys: Vec>, /// } /// ``` pub unsafe trait Trace<'v> { @@ -87,6 +89,12 @@ unsafe impl<'v, T: Trace<'v>> Trace<'v> for RawTable { } } +unsafe impl<'v, T: Trace<'v>> Trace<'v> for HashTable { + fn trace(&mut self, tracer: &Tracer<'v>) { + self.iter_mut().for_each(|e| e.trace(tracer)); + } +} + unsafe impl<'v, K: Trace<'v>, V: Trace<'v>> Trace<'v> for SmallMap { fn trace(&mut self, tracer: &Tracer<'v>) { for (k, v) in self.iter_mut_unchecked() { @@ -104,6 +112,12 @@ unsafe impl<'v, T: Trace<'v>> Trace<'v> for SmallSet { } } +unsafe impl<'v, T: Trace<'v>> Trace<'v> for Hashed { + fn trace(&mut self, tracer: &Tracer<'v>) { + self.key_mut().trace(tracer); + } +} + unsafe impl<'v, T: Trace<'v>> Trace<'v> for Option { fn trace(&mut self, tracer: &Tracer<'v>) { if let Some(x) = self { diff --git a/starlark-rust/starlark/src/values/traits.rs b/starlark-rust/starlark/src/values/traits.rs index 9e8c65d9781aa..253e1919bf798 100644 --- a/starlark-rust/starlark/src/values/traits.rs +++ b/starlark-rust/starlark/src/values/traits.rs @@ -25,7 +25,7 @@ //! //! __Note__: we use _sequence_, _iterable_ and _indexable_ according to the //! definition in the [Starlark specification]( -//! https://github.com/google/skylark/blob/a0e5de7e63b47e716cca7226662a4c95d47bf873/doc/spec.md#sequence-types). +//! https://github.com/bazelbuild/starlark/blob/master/spec.md#sequence-types). //! We also use the term _container_ for denoting any of those type that can //! hold several values. @@ -43,6 +43,8 @@ use crate::any::ProvidesStaticType; use crate::collections::Hashed; use crate::collections::StarlarkHasher; use crate::docs::DocItem; +use crate::docs::DocMember; +use crate::docs::DocProperty; use crate::environment::Methods; use crate::eval::Arguments; use crate::eval::Evaluator; @@ -55,13 +57,14 @@ use crate::values::error::ControlError; use crate::values::function::FUNCTION_TYPE; use crate::values::Freeze; use crate::values::FrozenStringValue; +use crate::values::FrozenValue; use crate::values::Heap; use crate::values::Trace; use crate::values::Value; use crate::values::ValueError; -/// A trait for values which are more complex - because they are either mutable, -/// or contain references to other values. +/// A trait for values which are more complex - because they are either mutable +/// (e.g. using [`RefCell`](std::cell::RefCell)), or contain references to other values. /// /// For values that contain nested [`Value`] types (mutable or not) there are a bunch of helpers /// and macros. @@ -76,20 +79,40 @@ use crate::values::ValueError; /// generate `One` and `FrozenOne` aliases. /// /// ``` -/// use starlark::values::{ProvidesStaticType, ComplexValue, Coerce, Freezer, FrozenValue, StarlarkValue, Value, ValueLike, Trace, Tracer, Freeze, NoSerialize}; -/// use starlark::{starlark_complex_value}; -/// use derive_more::Display; /// use allocative::Allocative; +/// use derive_more::Display; +/// use starlark::starlark_complex_value; +/// use starlark::values::Coerce; +/// use starlark::values::ComplexValue; +/// use starlark::values::Freeze; +/// use starlark::values::Freezer; +/// use starlark::values::FrozenValue; +/// use starlark::values::NoSerialize; +/// use starlark::values::ProvidesStaticType; +/// use starlark::values::StarlarkValue; +/// use starlark::values::Trace; +/// use starlark::values::Tracer; +/// use starlark::values::Value; +/// use starlark::values::ValueLike; /// use starlark_derive::starlark_value; /// -/// #[derive(Debug, Trace, Coerce, Display, ProvidesStaticType, NoSerialize, Allocative)] +/// #[derive( +/// Debug, +/// Trace, +/// Coerce, +/// Display, +/// ProvidesStaticType, +/// NoSerialize, +/// Allocative +/// )] /// #[repr(C)] /// struct OneGen(V); /// starlark_complex_value!(One); /// /// #[starlark_value(type = "one")] -/// impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for OneGen -/// where Self: ProvidesStaticType<'v>, +/// impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for OneGen +/// where +/// Self: ProvidesStaticType<'v>, /// { /// // To implement methods which work for both `One` and `FrozenOne`, /// // use the `ValueLike` trait. @@ -177,8 +200,9 @@ where /// Every Rust value stored in a [`Value`] must implement this trait. /// You _must_ also implement [`ComplexValue`] if: /// -/// * A type is _mutable_, if you ever need to get a `&mut self` reference to it. -/// * A type _contains_ nested Starlark [`Value`]s. +/// * A type is not [`Send`] and [`Sync`], typically because it contains +/// interior mutability such as a [`RefCell`](std::cell::RefCell). +/// * A type contains nested Starlark [`Value`]s. /// /// There are only two required members of [`StarlarkValue`], namely /// [`TYPE`](StarlarkValue::TYPE) @@ -187,21 +211,20 @@ where /// proc macro: /// /// ``` -/// use starlark::values::StarlarkValue; -/// use starlark::values::ProvidesStaticType; -/// use starlark::values::NoSerialize; +/// use allocative::Allocative; /// # use starlark::starlark_simple_value; /// use derive_more::Display; -/// use allocative::Allocative; +/// use starlark::values::NoSerialize; +/// use starlark::values::ProvidesStaticType; +/// use starlark::values::StarlarkValue; /// use starlark_derive::starlark_value; /// /// #[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] -/// #[display(fmt = "Foo")] +/// #[display("Foo")] /// struct Foo; /// # starlark_simple_value!(Foo); /// #[starlark_value(type = "foo")] -/// impl<'v> StarlarkValue<'v> for Foo { -/// } +/// impl<'v> StarlarkValue<'v> for Foo {} /// ``` /// /// Every additional field enables further features in Starlark. In most cases the default @@ -269,12 +292,6 @@ pub trait StarlarkValue<'v>: false } - /// Is this value a match for a named type. Usually returns `true` for - /// values matching `get_type`, but might also work for subtypes it implements. - fn matches_type(&self, ty: &str) -> bool { - Self::TYPE == ty - } - /// Function is implemented for types values. #[doc(hidden)] fn type_matches_value(&self, _value: Value<'v>, _private: Private) -> bool { @@ -293,12 +310,22 @@ pub trait StarlarkValue<'v>: None } - /// Return structured documentation for self, if available. - fn documentation(&self) -> Option + /// Return the documentation for this value. + /// + /// This should be the doc-item that is expected to be generated when this value appears as a + /// global in a module. In other words, for normal types this should generally return a + /// `DocMember::Property`. In that case there is no need to override this method. + fn documentation(&self) -> DocItem where Self: Sized, { - Self::get_methods().map(|methods| DocItem::Object(methods.documentation())) + let ty = self + .typechecker_ty() + .unwrap_or_else(|| Self::get_type_starlark_repr()); + DocItem::Member(DocMember::Property(DocProperty { + docs: None, + typ: ty, + })) } /// Type of this instance for typechecker. @@ -355,7 +382,7 @@ pub trait StarlarkValue<'v>: /// Return a hash data for self to be used when self is placed as a key in a `Dict`. /// Return an [`Err`] if there is no hash for this value (e.g. list). /// Must be stable between frozen and non-frozen values. - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { if Self::TYPE == FUNCTION_TYPE { // The Starlark spec says values of type "function" must be hashable. // We could return the address of the function, but that changes @@ -366,13 +393,15 @@ pub trait StarlarkValue<'v>: let _ = hasher; Ok(()) } else { - Err(ControlError::NotHashableValue(Self::TYPE.to_owned()).into()) + Err(crate::Error::new_other(ControlError::NotHashableValue( + Self::TYPE.to_owned(), + ))) } } /// Get the hash value. Calls [`write_hash`](Self::write_hash) by default. #[doc(hidden)] - fn get_hash(&self, _private: Private) -> anyhow::Result { + fn get_hash(&self, _private: Private) -> crate::Result { let mut hasher = StarlarkHasher::new(); self.write_hash(&mut hasher)?; Ok(hasher.finish_small()) @@ -389,7 +418,7 @@ pub trait StarlarkValue<'v>: /// Equality must be symmetric (`a == b` implies `b == a`). /// When evaluating `a == b` (or when using equality in dicts and such), /// it is not specified whether `a.equals(b)` or `b.equals(a)` is called. - fn equals(&self, _other: Value<'v>) -> anyhow::Result { + fn equals(&self, _other: Value<'v>) -> crate::Result { // Type is only equal via a pointer Ok(false) } @@ -397,7 +426,7 @@ pub trait StarlarkValue<'v>: /// Compare `self` with `other`. /// This method returns a result of type [`Ordering`], or an [`Err`] /// if the two types differ. - fn compare(&self, other: Value<'v>) -> anyhow::Result { + fn compare(&self, other: Value<'v>) -> crate::Result { ValueError::unsupported_with(self, "compare", other) } @@ -412,32 +441,13 @@ pub trait StarlarkValue<'v>: &self, _me: Value<'v>, _args: &Arguments<'v, '_>, - _eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + _eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { ValueError::unsupported(self, "call()") } - /// Invoke this object as a method (after getattr, so this object is unbound). - /// - /// This is an internal operation, it cannot be used or implemented - /// outside of the Starlark crate. - /// - /// # Parameters - /// - /// * `this` - the object to invoke the unbound method on - #[doc(hidden)] - fn invoke_method( - &self, - _this: Value<'v>, - _args: &Arguments<'v, '_>, - _eval: &mut Evaluator<'v, '_>, - _sealed: Private, - ) -> anyhow::Result> { - unreachable!("invoke_method should only be invoked for method or attribute"); - } - /// Return the result of `a[index]` if `a` is indexable. - fn at(&self, index: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "[]", index) } @@ -448,7 +458,7 @@ pub trait StarlarkValue<'v>: _index1: Value<'v>, _heap: &'v Heap, _private: Private, - ) -> anyhow::Result> { + ) -> crate::Result> { ValueError::unsupported(self, "[,]") } @@ -482,14 +492,14 @@ pub trait StarlarkValue<'v>: _stop: Option>, _stride: Option>, _heap: &'v Heap, - ) -> anyhow::Result> { + ) -> crate::Result> { ValueError::unsupported(self, "[::]") } /// Implement iteration over the value of this container by providing /// the values in a `Vec`. #[starlark_internal_vtable(skip)] - fn iterate_collect(&self, _heap: &'v Heap) -> anyhow::Result>> { + fn iterate_collect(&self, _heap: &'v Heap) -> crate::Result>> { ValueError::unsupported(self, "(iter)") } @@ -518,7 +528,7 @@ pub trait StarlarkValue<'v>: /// So implementations of iterators may acquire mutation lock in `iterate`, /// assume that it is held in `iter_next`, and release it in `iter_stop`. /// Obviously, there are no such guarantees if these functions are called directly. - unsafe fn iterate(&self, _me: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + unsafe fn iterate(&self, _me: Value<'v>, heap: &'v Heap) -> crate::Result> { Ok(heap.alloc_tuple(&self.iterate_collect(heap)?)) } @@ -560,7 +570,7 @@ pub trait StarlarkValue<'v>: } /// Returns the length of the value, if this value is a sequence. - fn length(&self) -> anyhow::Result { + fn length(&self) -> crate::Result { ValueError::unsupported(self, "len()") } @@ -626,7 +636,7 @@ pub trait StarlarkValue<'v>: /// ('z' in 'abc') == False /// # "#); /// ``` - fn is_in(&self, other: Value<'v>) -> anyhow::Result { + fn is_in(&self, other: Value<'v>) -> crate::Result { ValueError::unsupported_owned(other.get_type(), "in", Some(Self::TYPE)) } @@ -639,7 +649,7 @@ pub trait StarlarkValue<'v>: /// +1 == 1 /// # "#); /// ``` - fn plus(&self, _heap: &'v Heap) -> anyhow::Result> { + fn plus(&self, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported(self, "+") } @@ -652,18 +662,19 @@ pub trait StarlarkValue<'v>: /// -(1) == -1 /// # "#); /// ``` - fn minus(&self, _heap: &'v Heap) -> anyhow::Result> { + fn minus(&self, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported(self, "-") } - /// Add with the arguments the other way around. Should return [`None`] - /// to fall through to normal add. - fn radd(&self, _lhs: Value<'v>, _heap: &'v Heap) -> Option>> { + /// Add with the arguments the other way around. + /// Normal `add` should return `None` in order for it to be evaluated. + fn radd(&self, _lhs: Value<'v>, _heap: &'v Heap) -> Option>> { None } /// Add `other` to the current value. Pass both self and - /// the Value form of self as original. + /// the Value form of self as original. Should return [`None`] + /// to fall through to `radd`. /// /// # Examples /// @@ -675,7 +686,7 @@ pub trait StarlarkValue<'v>: /// (1, 2, 3) + (2, 3) == (1, 2, 3, 2, 3) /// # "#); /// ``` - fn add(&self, _rhs: Value<'v>, _heap: &'v Heap) -> Option>> { + fn add(&self, _rhs: Value<'v>, _heap: &'v Heap) -> Option>> { None } @@ -688,12 +699,12 @@ pub trait StarlarkValue<'v>: /// 1 - 2 == -1 /// # "#); /// ``` - fn sub(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn sub(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "-", other) } /// Called on `rhs` of `lhs * rhs` when `lhs.mul` returns `None`. - fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { + fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { let _ignore = (lhs, heap); None } @@ -712,7 +723,7 @@ pub trait StarlarkValue<'v>: /// (1, 2, 3) * 3 == (1, 2, 3, 1, 2, 3, 1, 2, 3) /// # "#); /// ``` - fn mul(&self, _rhs: Value<'v>, _heap: &'v Heap) -> Option>> { + fn mul(&self, _rhs: Value<'v>, _heap: &'v Heap) -> Option>> { None } @@ -726,13 +737,13 @@ pub trait StarlarkValue<'v>: /// 7 / 2 == 3.5 /// # "#); /// ``` - fn div(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn div(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "/", other) } /// Apply the percent operator between the current value and `other`. Usually used on /// strings, as per - /// [the Starlark spec](https://github.com/google/skylark/blob/a0e5de7e63b47e716cca7226662a4c95d47bf873/doc/spec.md#string-interpolation). + /// [the Starlark spec](https://github.com/bazelbuild/starlark/blob/master/spec.md#string-interpolation). /// /// # Examples /// @@ -757,7 +768,7 @@ pub trait StarlarkValue<'v>: /// "test" % () == "test" /// # "#); /// ``` - fn percent(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn percent(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "%", other) } @@ -778,12 +789,12 @@ pub trait StarlarkValue<'v>: /// 3.0 // 2.0 == 1.0 /// # "#); /// ``` - fn floor_div(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn floor_div(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "//", other) } /// Bitwise `&` operator. - fn bit_and(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn bit_and(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "&", other) } @@ -799,27 +810,27 @@ pub trait StarlarkValue<'v>: /// {1: 2} | {1: 3} == {1: 3} /// # "#); /// ``` - fn bit_or(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn bit_or(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "|", other) } /// Bitwise `^` operator. - fn bit_xor(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn bit_xor(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "^", other) } /// Bitwise `~` operator. - fn bit_not(&self, _heap: &'v Heap) -> anyhow::Result> { + fn bit_not(&self, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported(self, "~") } /// Bitwise `<<` operator. - fn left_shift(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn left_shift(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, "<<", other) } /// Bitwise `>>` operator. - fn right_shift(&self, other: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn right_shift(&self, other: Value<'v>, _heap: &'v Heap) -> crate::Result> { ValueError::unsupported_with(self, ">>", other) } @@ -834,7 +845,11 @@ pub trait StarlarkValue<'v>: } /// Called when exporting a value under a specific name, - fn export_as(&self, _variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + _variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result<()> { // Most data types ignore how they are exported // but rules/providers like to use it as a helpful hint for users Ok(()) @@ -850,13 +865,15 @@ pub trait StarlarkValue<'v>: /// v == [1, 1, [2, 3]] /// # "#); /// ``` - fn set_at(&self, _index: Value<'v>, _new_value: Value<'v>) -> anyhow::Result<()> { - Err(ValueError::CannotMutateImmutableValue.into()) + fn set_at(&self, _index: Value<'v>, _new_value: Value<'v>) -> crate::Result<()> { + Err(crate::Error::new_other( + ValueError::CannotMutateImmutableValue, + )) } /// Set the attribute named `attribute` of the current value to /// `value` (e.g. `a.attribute = value`). - fn set_attr(&self, attribute: &str, _new_value: Value<'v>) -> anyhow::Result<()> { + fn set_attr(&self, attribute: &str, _new_value: Value<'v>) -> crate::Result<()> { ValueError::unsupported(self, &format!(".{}=", attribute)) } @@ -869,4 +886,12 @@ pub trait StarlarkValue<'v>: fn provide(&'v self, demand: &mut Demand<'_, 'v>) { let _ = demand; } + + /// When freezing, this function is called on mutable value to return + /// statically allocated singleton value if possible. + /// + /// This function is used for optimization and rarely needed to be implemented. + fn try_freeze_static(&self) -> Option { + None + } } diff --git a/starlark-rust/starlark/src/values/type_repr.rs b/starlark-rust/starlark/src/values/type_repr.rs index d4cd7aa116a88..f11b166188523 100644 --- a/starlark-rust/starlark/src/values/type_repr.rs +++ b/starlark-rust/starlark/src/values/type_repr.rs @@ -24,64 +24,107 @@ use either::Either; pub use starlark_derive::StarlarkTypeRepr; use crate::typing::Ty; +use crate::values::list::ListType; use crate::values::none::NoneType; -use crate::values::string::StarlarkStr; +use crate::values::string::str_type::StarlarkStr; use crate::values::Heap; use crate::values::StarlarkValue; use crate::values::Value; /// Provides a starlark type representation, even if StarlarkValue is not implemented. +/// +/// # Derive +/// +/// There is `#[derive(StarlarkTypeRepr)]` for enums, for example: +/// +/// ``` +/// use starlark::values::type_repr::StarlarkTypeRepr; +/// +/// #[derive(StarlarkTypeRepr)] +/// enum IntOrString { +/// Int(i32), +/// String(String), +/// } +/// ``` +/// +/// It emits type `int | str`. +/// +/// This derive is useful in combination with derive of [`UnpackValue`](crate::values::UnpackValue). pub trait StarlarkTypeRepr { + /// Different Rust type representing the same Starlark Type. + /// + /// For example, `bool` and `StarlarkBool` Rust types represent the same Starlark type `bool`. + /// + /// Formal requirement: `Self::starlark_type_repr() == Self::Canonical::starlark_type_repr()`. + /// + /// If unsure, it is safe to put `= Self` here. + /// When [`associated_type_defaults`](https://github.com/rust-lang/rust/issues/29661) + /// is stabilized, this will be the default. + type Canonical: StarlarkTypeRepr; + /// The representation of a type that a user would use verbatim in starlark type annotations fn starlark_type_repr() -> Ty; } -/// A dict used just for display purposes. +/// A set used just for display purposes. /// -/// `DictOf` requires `Unpack` to be implemented, and `Dict` does not take type parameters so +/// `SetOf` requires `Unpack` to be implemented, and `Set` does not take type parameters so /// we need something for documentation generation. -pub struct DictType { - k: PhantomData, - v: PhantomData, +pub struct SetType { + t: PhantomData, } -impl StarlarkTypeRepr for DictType { +impl StarlarkTypeRepr for SetType { + type Canonical = SetType; + fn starlark_type_repr() -> Ty { - Ty::dict(K::starlark_type_repr(), V::starlark_type_repr()) + Ty::set(T::starlark_type_repr()) } } -impl<'v, T: StarlarkValue<'v> + ?Sized> StarlarkTypeRepr for T { +impl<'v, T: StarlarkValue<'v>> StarlarkTypeRepr for T { + type Canonical = Self; + fn starlark_type_repr() -> Ty { Self::get_type_starlark_repr() } } impl StarlarkTypeRepr for String { + type Canonical = StarlarkStr; + fn starlark_type_repr() -> Ty { StarlarkStr::starlark_type_repr() } } impl StarlarkTypeRepr for &str { + type Canonical = StarlarkStr; + fn starlark_type_repr() -> Ty { StarlarkStr::starlark_type_repr() } } impl StarlarkTypeRepr for Option { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Either::::starlark_type_repr() } } impl StarlarkTypeRepr for Vec { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { - Ty::list(T::starlark_type_repr()) + ListType::::starlark_type_repr() } } impl StarlarkTypeRepr for Either { + type Canonical = Either; + fn starlark_type_repr() -> Ty { Ty::union2(TLeft::starlark_type_repr(), TRight::starlark_type_repr()) } @@ -95,3 +138,21 @@ pub fn type_repr_from_attr_impl<'v, T: StarlarkTypeRepr>( ) -> Ty { T::starlark_type_repr() } + +#[cfg(test)] +mod tests { + use crate::tests::util::TestComplexValue; + use crate::util::non_static_type_id::non_static_type_id; + use crate::values::type_repr::StarlarkTypeRepr; + use crate::values::FrozenValue; + use crate::values::Value; + + #[test] + fn test_canonical_for_complex_value() { + // TODO(nga): `StarlarkTypeRepr::Canonical` should be equal. + assert_ne!( + non_static_type_id::< as StarlarkTypeRepr>::Canonical>(), + non_static_type_id::< as StarlarkTypeRepr>::Canonical>(), + ); + } +} diff --git a/starlark-rust/starlark/src/values/types.rs b/starlark-rust/starlark/src/values/types.rs new file mode 100644 index 0000000000000..c7c92ebea5d70 --- /dev/null +++ b/starlark-rust/starlark/src/values/types.rs @@ -0,0 +1,45 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub mod any; +pub mod any_array; +pub mod any_complex; +pub mod array; +pub mod bigint; +pub mod bool; +pub mod dict; +pub(crate) mod ellipsis; +pub mod enumeration; +pub mod exported_name; +pub mod float; +pub mod function; +pub mod int; +pub(crate) mod known_methods; +pub mod list; +pub mod list_or_tuple; +pub mod namespace; +pub mod none; +pub(crate) mod num; +pub mod range; +pub mod record; +pub mod set; +pub mod starlark_value_as_type; +pub mod string; +pub mod structs; +pub mod tuple; +pub(crate) mod type_instance_id; +pub(crate) mod unbound; diff --git a/starlark-rust/starlark/src/values/types/any.rs b/starlark-rust/starlark/src/values/types/any.rs index 3d3f37e5bd3d4..49cd014c299b4 100644 --- a/starlark-rust/starlark/src/values/types/any.rs +++ b/starlark-rust/starlark/src/values/types/any.rs @@ -28,23 +28,17 @@ //! #[macro_use] //! extern crate starlark; //! # fn main() { +//! use std::fmt; +//! use std::time::Instant; +//! //! use starlark::assert::Assert; //! use starlark::environment::GlobalsBuilder; -//! use starlark::values::Value; //! use starlark::values::any::StarlarkAny; -//! use std::fmt; -//! use std::fmt::Display; -//! use std::time::Instant; +//! use starlark::values::Value; //! //! #[derive(Debug)] //! struct MyInstant(Instant); //! -//! impl Display for MyInstant { -//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { -//! write!(f, "{:?}", self) -//! } -//! } -//! //! #[starlark_module] //! fn globals(builder: &mut GlobalsBuilder) { //! fn start() -> anyhow::Result> { @@ -52,25 +46,28 @@ //! } //! //! fn elapsed(x: Value) -> anyhow::Result { -//! Ok(StarlarkAny::::get(x).unwrap().0.elapsed().as_secs_f64().to_string()) +//! Ok(StarlarkAny::::get(x) +//! .unwrap() +//! .0 +//! .elapsed() +//! .as_secs_f64() +//! .to_string()) //! } //! } //! //! let mut a = Assert::new(); //! a.globals_add(globals); -//! a.pass(r#" -//! duration = start() -//! y = 100 -//! for x in range(100): -//! y += x -//! print(elapsed(duration)) -//! "#); +//! a.pass( +//! r#" +//! instant = start() +//! print(elapsed(instant)) +//! "#, +//! ); //! # } //! ``` use std::fmt; use std::fmt::Debug; -use std::fmt::Display; use allocative::Allocative; use starlark_derive::starlark_value; @@ -87,37 +84,36 @@ use crate::values::ValueLike; /// A type that can be passed around as a Starlark [`Value`], but in most /// ways is uninteresting/opaque to Starlark. Constructed with /// [`new`](StarlarkAny::new) and decomposed with [`get`](StarlarkAny::get). -#[derive(ProvidesStaticType, NoSerialize, Allocative)] +/// +/// This is version for "simple" values (not requiring trace during GC). +/// For "complex" version check +/// [`StarlarkAnyComplex`](crate::values::types::any_complex::StarlarkAnyComplex). +#[derive(ProvidesStaticType, NoSerialize, Allocative, derive_more::Display)] #[allocative(bound = "")] -pub struct StarlarkAny( +#[display("{:?}", self)] +pub struct StarlarkAny( #[allocative(skip)] // TODO(nga): do not skip. pub T, ); #[starlark_value(type = "any")] -impl<'v, T: Debug + Display + Send + Sync + 'static> StarlarkValue<'v> for StarlarkAny { +impl<'v, T: Debug + Send + Sync + 'static> StarlarkValue<'v> for StarlarkAny { type Canonical = Self; } -impl<'v, T: Debug + Display + Send + Sync + 'static> AllocValue<'v> for StarlarkAny { +impl<'v, T: Debug + Send + Sync + 'static> AllocValue<'v> for StarlarkAny { fn alloc_value(self, heap: &'v Heap) -> Value<'v> { heap.alloc_simple(self) } } -impl Debug for StarlarkAny { +impl Debug for StarlarkAny { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Debug::fmt(&self.0, f) } } -impl Display for StarlarkAny { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Display::fmt(&self.0, f) - } -} - -impl StarlarkAny { +impl StarlarkAny { /// Create a new [`StarlarkAny`] value. Such a value can be allocated on a heap with /// `heap.alloc(StarlarkAny::new(x))`. pub fn new(x: T) -> Self { diff --git a/starlark-rust/starlark/src/values/types/any_array.rs b/starlark-rust/starlark/src/values/types/any_array.rs index 3f2afe7edf3a8..5188d819d96cb 100644 --- a/starlark-rust/starlark/src/values/types/any_array.rs +++ b/starlark-rust/starlark/src/values/types/any_array.rs @@ -32,7 +32,7 @@ use crate::values::StarlarkValue; #[derive(derive_more::Display, ProvidesStaticType, NoSerialize, Allocative)] #[repr(C)] -#[display(fmt = "{:?}", self)] +#[display("{:?}", self)] #[allocative(bound = "")] pub(crate) struct AnyArray { pub(crate) len: usize, @@ -109,7 +109,7 @@ mod tests { } let heap = FrozenHeap::new(); - let values = heap.alloc_any_slice_display_from_debug(&[ + let values = heap.alloc_any_slice(&[ IncrementOnDrop(counter1.dupe()), IncrementOnDrop(counter1.dupe()), IncrementOnDrop(counter2.dupe()), @@ -138,7 +138,7 @@ mod tests { #[test] fn test_allocation_size() { let heap = FrozenHeap::new(); - heap.alloc_any_slice_display_from_debug(&[1, 2, 3]); + heap.alloc_any_slice(&[1, 2, 3]); let quake = heap.alloc_str("quake"); // Test array allocation did not overwrite the string. assert_eq!(quake.as_str(), "quake"); diff --git a/starlark-rust/starlark/src/values/types/any_complex.rs b/starlark-rust/starlark/src/values/types/any_complex.rs new file mode 100644 index 0000000000000..cd6a6ff8ae465 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/any_complex.rs @@ -0,0 +1,174 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! A type [`StarlarkAnyComplex`] which can wrap any Rust value into a [`Value`]. + +use std::any; +use std::fmt; +use std::fmt::Debug; +use std::fmt::Display; + +use allocative::Allocative; +use starlark_derive::starlark_value; +use starlark_derive::NoSerialize; + +use crate as starlark; +use crate::any::ProvidesStaticType; +use crate::values::AllocValue; +use crate::values::Freeze; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::Trace; +use crate::values::Value; +use crate::values::ValueLike; + +/// Allocate arbitrary value on the starlark heap without implementing full [`StarlarkValue`]. +/// +/// This is useful for data not directly visible to starlark code. +/// +/// This type is for "complex" values (with tracing during GC). For no GC version check +/// [`StarlarkAny`](crate::values::types::any::StarlarkAny). +#[derive(Trace, Freeze, Allocative, ProvidesStaticType, NoSerialize)] +pub struct StarlarkAnyComplex { + /// The value. + pub value: T, +} + +impl<'v, T> StarlarkAnyComplex +where + Self: StarlarkValue<'v>, +{ + /// Construct a new `StarlarkAnyComplex` value, which can be allocated on the heap. + pub fn new(value: T) -> StarlarkAnyComplex { + StarlarkAnyComplex { value } + } + + /// Obtain the value from a `Value`, if it is a `StarlarkAnyComplex`. + pub fn get(value: Value<'v>) -> Option<&'v T> { + value.downcast_ref::().map(|x| &x.value) + } + + /// Obtain the value from a `Value`, if it is a `StarlarkAnyComplex`. + pub fn get_err(value: Value<'v>) -> anyhow::Result<&'v T> { + value.downcast_ref_err::().map(|x| &x.value) + } +} + +// Proper `Debug` is hard to require from users because of `Freeze` and `ProvidesStaticType`. +impl Debug for StarlarkAnyComplex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct(any::type_name::()) + .finish_non_exhaustive() + } +} + +impl Display for StarlarkAnyComplex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(self, f) + } +} + +#[starlark_value(type = "any_complex")] +impl<'v, T> StarlarkValue<'v> for StarlarkAnyComplex +where + T: Allocative + ProvidesStaticType<'v> + 'v, + T::StaticType: Sized, +{ + type Canonical = Self; +} + +impl<'v, T> AllocValue<'v> for StarlarkAnyComplex +where + Self: StarlarkValue<'v> + Freeze, + T: Trace<'v>, + ::Frozen: StarlarkValue<'static>, +{ + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(self) + } +} + +#[cfg(test)] +mod tests { + use allocative::Allocative; + use starlark_derive::ProvidesStaticType; + use starlark_derive::Trace; + + use crate as starlark; + use crate::const_frozen_string; + use crate::environment::Module; + use crate::values::list::AllocList; + use crate::values::types::any_complex::StarlarkAnyComplex; + use crate::values::Freeze; + use crate::values::Freezer; + use crate::values::FrozenStringValue; + use crate::values::FrozenValue; + use crate::values::StringValue; + use crate::values::Value; + + #[test] + fn test_any_complex() { + #[derive(Trace, Allocative, ProvidesStaticType)] + struct UnfrozenData<'v> { + string: StringValue<'v>, + other: Value<'v>, + } + + impl<'v> Freeze for UnfrozenData<'v> { + type Frozen = FrozenData; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + Ok(FrozenData { + string: self.string.freeze(freezer)?, + other: freezer.freeze(self.other)?, + }) + } + } + + #[derive(Allocative, ProvidesStaticType)] + struct FrozenData { + string: FrozenStringValue, + other: FrozenValue, + } + + let module = Module::new(); + + let data = module.heap().alloc(StarlarkAnyComplex::new(UnfrozenData { + string: module.heap().alloc_str("aaa"), + other: module.heap().alloc(AllocList([1, 2])), + })); + + assert_eq!( + const_frozen_string!("aaa"), + StarlarkAnyComplex::::get_err(data) + .unwrap() + .string + ); + + module.set_extra_value(data); + + let module = module.freeze().unwrap(); + + let data = module.extra_value().unwrap(); + assert_eq!( + const_frozen_string!("aaa"), + StarlarkAnyComplex::::get_err(data.to_value()) + .unwrap() + .string + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/array.rs b/starlark-rust/starlark/src/values/types/array.rs index e5f5f4450c152..2aeb0d225f736 100644 --- a/starlark-rust/starlark/src/values/types/array.rs +++ b/starlark-rust/starlark/src/values/types/array.rs @@ -37,8 +37,8 @@ use crate::cast::transmute; use crate::private::Private; use crate::values::layout::avalue::alloc_static; use crate::values::layout::avalue::AValue; +use crate::values::layout::avalue::AValueArray; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Direct; use crate::values::layout::heap::repr::AValueRepr; use crate::values::types::list::value::display_list; use crate::values::Heap; @@ -84,22 +84,22 @@ impl<'v> Debug for Array<'v> { /// `Array` is not `Sync`, so wrap it into this struct to store it in static variable. /// Empty `Array` is logically `Sync`. -pub(crate) struct ValueEmptyArray(AValueRepr>>); +pub(crate) struct ValueEmptyArray(AValueRepr>); unsafe impl Sync for ValueEmptyArray {} pub(crate) static VALUE_EMPTY_ARRAY: ValueEmptyArray = - ValueEmptyArray(alloc_static(Direct, unsafe { Array::new(0, 0) })); + ValueEmptyArray(alloc_static(unsafe { Array::new(0, 0) })); impl ValueEmptyArray { pub(crate) fn repr<'v>( &'static self, - ) -> &'v AValueRepr>> { + ) -> &'v AValueRepr>>> { // Cast lifetimes. Cannot use `crate::cast::ptr_lifetime` here // because type parameter of `AValue` also need to be casted. unsafe { transmute!( - &AValueRepr>, - &AValueRepr>, + &AValueRepr>, + &AValueRepr>, &self.0 ) } @@ -237,10 +237,18 @@ impl<'v> Array<'v> { } } - pub(crate) fn extend(&self, iter: impl IntoIterator>) { + /// Extend with given elements. + /// + /// Return `Err` if any of the elements is an error. + /// Panic if there's not enough capacity. + pub(crate) fn try_extend( + &self, + iter: impl IntoIterator, E>>, + ) -> Result<(), E> { for item in iter { - self.push(item); + self.push(item?); } + Ok(()) } pub(crate) fn extend_from_slice(&self, slice: &[Value<'v>]) { @@ -293,7 +301,7 @@ impl<'v> StarlarkValue<'v> for Array<'v> { true } - fn length(&self) -> anyhow::Result { + fn length(&self) -> crate::Result { Ok(self.len() as i32) } diff --git a/starlark-rust/starlark/src/values/types/bigint.rs b/starlark-rust/starlark/src/values/types/bigint.rs new file mode 100644 index 0000000000000..0db016892cbcf --- /dev/null +++ b/starlark-rust/starlark/src/values/types/bigint.rs @@ -0,0 +1,704 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Outside of `i32` range int. + +mod convert; + +use std::cmp::Ordering; +use std::hash::Hash; + +use allocative::Allocative; +use num_bigint::BigInt; +use num_bigint::Sign; +use num_traits::cast::ToPrimitive; +use serde::Serialize; +use starlark_derive::starlark_value; + +use crate as starlark; +use crate::any::ProvidesStaticType; +use crate::collections::StarlarkHasher; +use crate::typing::Ty; +use crate::typing::TyBasic; +use crate::typing::TypingBinOp; +use crate::values::types::int::inline_int::InlineInt; +use crate::values::types::int::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkIntRef; +use crate::values::types::num::typecheck::typecheck_num_bin_op; +use crate::values::types::num::typecheck::NumTy; +use crate::values::types::num::value::NumRef; +use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::FrozenHeap; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueError; + +/// `int` implementation for larger integers. +#[derive( + Clone, + Debug, + Default, + derive_more::Display, + ProvidesStaticType, + Ord, + PartialOrd, + Eq, + PartialEq, + Hash, + Allocative +)] +#[display("{}", value)] +pub struct StarlarkBigInt { + /// `value` is strictly either smaller than `i32::MIN` or larger than `i32::MAX`. + /// Many operation implementations depend on this fact. + /// For example, `non_zero_int << positive_big_int` is considered to be overflow + /// without checking the actual value of `positive_big_int`. + #[allocative(skip)] // TODO(nga): do not skip. + value: BigInt, +} + +impl StarlarkBigInt { + pub(crate) fn unchecked_new(value: BigInt) -> Self { + debug_assert!( + InlineInt::try_from(&value).is_err(), + "BigInt must be outside of `InlineInt` range" + ); + Self { value } + } + + pub(crate) fn get(&self) -> &BigInt { + &self.value + } + + pub(crate) fn to_f64(&self) -> f64 { + // `to_f64` is infallible. + self.value.to_f64().unwrap() + } + + pub(crate) fn to_i32(&self) -> Option { + // Avoid calling `to_i32` if the value is known to be out of range. + if InlineInt::smaller_than_i32() { + let v = self.value.to_i32(); + if let Some(v) = v { + debug_assert!(InlineInt::try_from(v).is_err()); + } + v + } else { + None + } + } + + pub(crate) fn cmp_small_big(a: InlineInt, b: &StarlarkBigInt) -> Ordering { + let a_sign = a.signum(); + let b_sign = match b.value.sign() { + Sign::Plus => 2, + Sign::Minus => -2, + Sign::NoSign => 0, + }; + // Sign comparison is enough because `StarlarkBigInt` is out of range of `i32`. + a_sign.cmp(&b_sign) + } + + pub(crate) fn cmp_big_small(a: &StarlarkBigInt, b: InlineInt) -> Ordering { + Self::cmp_small_big(b, a).reverse() + } + + pub(crate) fn unpack_integer<'v, I: TryFrom<&'v BigInt>>(&'v self) -> Option { + I::try_from(&self.value).ok() + } +} + +impl PartialEq for StarlarkBigInt { + fn eq(&self, _other: &i32) -> bool { + false + } +} + +impl Serialize for StarlarkBigInt { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.value.to_string()) + } +} + +impl<'v> AllocValue<'v> for StarlarkBigInt { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_simple(self) + } +} + +impl AllocFrozenValue for StarlarkBigInt { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc_simple(self) + } +} + +#[starlark_value(type = "int")] +impl<'v> StarlarkValue<'v> for StarlarkBigInt { + fn to_bool(&self) -> bool { + // `StarlarkBigInt` is non-zero. + true + } + + fn minus(&self, heap: &'v Heap) -> starlark::Result> { + Ok(heap.alloc(StarlarkInt::from(-&self.value))) + } + + fn plus(&self, heap: &'v Heap) -> starlark::Result> { + // This unnecessarily allocates, could return `self`. + // But practically people rarely write `+NNN` except in constants, + // and in constants we fold `+NNN` into `NNN`. + Ok(heap.alloc(StarlarkInt::from(self.value.clone()))) + } + + fn equals(&self, other: Value<'v>) -> crate::Result { + Ok(Some(NumRef::Int(StarlarkIntRef::Big(self))) == other.unpack_num()) + } + + fn compare(&self, other: Value<'v>) -> crate::Result { + match other.unpack_num() { + None => ValueError::unsupported_with(self, "compare", other), + Some(other) => Ok(NumRef::Int(StarlarkIntRef::Big(self)).cmp(&other)), + } + } + + fn add(&self, rhs: Value<'v>, heap: &'v Heap) -> Option>> { + Some(Ok(heap.alloc( + NumRef::Int(StarlarkIntRef::Big(self)) + rhs.unpack_num()?, + ))) + } + + fn sub(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(other) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)) - other)), + None => ValueError::unsupported_with(self, "-", other), + } + } + + fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + Some(Ok(heap.alloc( + NumRef::Int(StarlarkIntRef::Big(self)) * other.unpack_num()?, + ))) + } + + fn div(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(other) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)).div(other)?)), + None => ValueError::unsupported_with(self, "/", other), + } + } + + fn floor_div(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(rhs) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)).floor_div(rhs)?)), + None => ValueError::unsupported_with(self, "//", other), + } + } + + fn percent(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(rhs) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)).percent(rhs)?)), + None => ValueError::unsupported_with(self, "%", other), + } + } + + fn bit_and(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + let rhs = match StarlarkIntRef::unpack_value_opt(other) { + Some(rhs) => rhs, + None => return ValueError::unsupported_with(self, "&", other), + }; + Ok(heap.alloc(StarlarkIntRef::Big(self) & rhs)) + } + + fn bit_xor(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + let rhs = match StarlarkIntRef::unpack_value_opt(other) { + Some(rhs) => rhs, + None => return ValueError::unsupported_with(self, "^", other), + }; + Ok(heap.alloc(StarlarkIntRef::Big(self) ^ rhs)) + } + + fn bit_or(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + let rhs = match StarlarkIntRef::unpack_value_opt(other) { + Some(rhs) => rhs, + None => return ValueError::unsupported_with(self, "|", other), + }; + Ok(heap.alloc(StarlarkIntRef::Big(self) | rhs)) + } + + fn bit_not(&self, heap: &'v Heap) -> crate::Result> { + Ok(heap.alloc(!StarlarkIntRef::Big(self))) + } + + fn left_shift(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match StarlarkIntRef::unpack_value_opt(other) { + None => ValueError::unsupported_with(self, "<<", other), + Some(other) => Ok(heap.alloc(StarlarkIntRef::Big(self).left_shift(other)?)), + } + } + + fn right_shift(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match StarlarkIntRef::unpack_value_opt(other) { + None => ValueError::unsupported_with(self, ">>", other), + Some(other) => Ok(heap.alloc(StarlarkIntRef::Big(self).right_shift(other)?)), + } + } + + fn bin_op_ty(op: TypingBinOp, rhs: &TyBasic) -> Option { + typecheck_num_bin_op(NumTy::Int, op, rhs) + } + + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { + NumRef::Int(StarlarkIntRef::Big(self)) + .get_hash_64() + .hash(hasher); + Ok(()) + } + + fn typechecker_ty(&self) -> Option { + Some(Ty::int()) + } +} + +#[cfg(test)] +mod tests { + use std::hash::Hasher; + + use num_bigint::BigInt; + + use crate::assert; + use crate::collections::StarlarkHasher; + use crate::values::float::StarlarkFloat; + use crate::values::types::bigint::StarlarkBigInt; + use crate::values::StarlarkValue; + + #[test] + fn test_parse() { + assert::eq( + "'1234567890112233445566778899'", + "str(1234567890112233445566778899)", + ); + assert::eq( + "'1234567890112233445566778899'", + "str(0x3fd35eb6d519aff76f50e13)", + ); + assert::eq( + "'1234567890112233445566778899'", + "str(0o776465726665214657756675207023)", + ); + assert::eq( + "'1234567890112233445566778899'", + "str(0b11111111010011010111101011011011010101000\ + 1100110101111111101110110111101010000111000010011)", + ); + } + + #[test] + fn test_str() { + assert::eq( + "'1234567890112233445566778899'", + "str(1234567890112233445566778899)", + ); + } + + #[test] + fn test_repr() { + assert::eq( + "'1234567890112233445566778899'", + "repr(1234567890112233445566778899)", + ); + } + + #[test] + fn test_equals() { + assert::eq("10000000000000000000000", "10000000000000000000000"); + assert::eq("10000000000000000000000", "10000000000000000000000.0"); + assert::eq("10000000000000000000000.0", "10000000000000000000000"); + } + + #[test] + fn test_plus() { + assert::eq("10000000000000000000000", "+10000000000000000000000"); + } + + #[test] + fn test_compare_big_big() { + assert::is_true("10000000000000000000000 < 20000000000000000000000"); + assert::is_true("-20000000000000000000000 < -10000000000000000000000"); + assert::is_true("20000000000000000000000 > 10000000000000000000000"); + assert::is_true("-10000000000000000000000 > -20000000000000000000000"); + } + + #[test] + fn test_compare_big_small() { + assert::is_true("1 < 10000000000000000000000"); + assert::is_true("-1 < 10000000000000000000000"); + assert::is_true("1 > -10000000000000000000000"); + assert::is_true("-1 > -10000000000000000000000"); + assert::is_true("10000000000000000000000 > 1"); + assert::is_true("10000000000000000000000 > -1"); + assert::is_true("-10000000000000000000000 < 1"); + assert::is_true("-10000000000000000000000 < -1"); + } + + #[test] + fn test_compare_big_float() { + assert::is_true("1.0 < 10000000000000000000000"); + assert::is_true("-1.0 < 10000000000000000000000"); + assert::is_true("1.0 > -10000000000000000000000"); + assert::is_true("-1.0 > -10000000000000000000000"); + assert::is_true("10000000000000000000000 > 1.0"); + assert::is_true("10000000000000000000000 > -1.0"); + assert::is_true("-10000000000000000000000 < 1.0"); + assert::is_true("-10000000000000000000000 < -1.0"); + } + + #[test] + fn test_add_big() { + assert::eq( + "300000000000000000009", + "100000000000000000004 + 200000000000000000005", + ); + assert::eq("7", "100000000000000000007 + -100000000000000000000"); + assert::eq( + "200000000000000000005", + "300000000000000000009 - 100000000000000000004", + ); + assert::eq("7", "100000000000000000007 - 100000000000000000000"); + } + + #[test] + fn test_add_big_small() { + assert::eq("100000000000000000017", "100000000000000000000 + 17"); + assert::eq("100000000000000000017", "17 + 100000000000000000000"); + assert::eq("100000000000000000000", "100000000000000000017 - 17"); + assert::eq("-100000000000000000017", "17 - 100000000000000000034"); + } + + #[test] + fn test_add_big_float() { + assert::eq("2e20", "100000000000000000000 + 1e20"); + assert::eq("2e20", "1e20 + 100000000000000000000"); + assert::eq("2e20", "300000000000000000000 - 1e20"); + assert::eq("2e20", "3e20 - 100000000000000000000"); + } + + #[test] + fn test_mul_big() { + assert::eq( + "60000000000000000000000000000000000000000", + "200000000000000000000 * 300000000000000000000", + ); + } + + #[test] + fn test_mul_big_small() { + assert::eq("600000000000000000000", "200000000000000000000 * 3"); + assert::eq("600000000000000000000", "3 * 200000000000000000000"); + } + + #[test] + fn test_mul_big_float() { + assert::eq("6e20", "200000000000000000000 * 3.0"); + assert::eq("6e20", "3.0 * 200000000000000000000"); + } + + #[test] + fn test_div_big() { + assert::eq( + "2e20", + "60000000000000000000000000000000000000000 / 300000000000000000000", + ); + } + + #[test] + fn test_div_big_small() { + assert::eq("2e20", "600000000000000000000 / 3"); + assert::eq("2e-20", "6 / 300000000000000000000"); + } + + #[test] + fn test_div_big_float() { + assert::eq("2e20", "600000000000000000000 / 3.0"); + assert::eq("2e-20", "6.0 / 300000000000000000000"); + } + + #[test] + fn test_floor_div_big() { + assert::eq("2", "600000000000000000000 // 300000000000000000000"); + } + + #[test] + fn test_floor_div_big_small() { + assert::eq("200000000000000000000", "600000000000000000000 // 3"); + assert::eq("0", "3 // 600000000000000000000"); + } + + #[test] + fn test_floor_div_big_float() { + assert::eq("2e20", "600000000000000000000 / 3.0"); + assert::eq("2e-20", "6.0 / 300000000000000000000"); + } + + #[test] + fn test_percent_big() { + assert::eq("7", "600000000000000000007 % 200000000000000000000"); + } + + #[test] + fn test_percent_big_small() { + assert::eq("7", "600000000000000000007 % 20"); + assert::eq("3", "3 % 600000000000000000000"); + } + + #[test] + fn test_percent_big_float() { + assert::eq("1e20", "100000000000000000000 % 1e50"); + assert::eq("10.0", "10.0 % 100000000000000000000"); + } + + #[test] + fn test_bit_and_big() { + assert::eq( + "0x10000000000000000000000", + "0x30000000000000000000000 & 0x90000000000000000000000", + ); + } + + #[test] + fn test_bit_and_big_small() { + assert::eq("1", "0x60000000000000000000003 & 0x9"); + assert::eq("1", "0x9 & 0x60000000000000000000003"); + } + + #[test] + fn test_bit_and_float() { + assert::fail_skip_typecheck("0x60000000000000000000000 & 1.0", "not supported"); + assert::fail_skip_typecheck("1.0 & 0x60000000000000000000000", "not supported"); + assert::fail( + "def f(): 0x60000000000000000000000 & 1.0", + "is not available on the types", + ); + assert::fail( + "def f(): 1.0 & 0x60000000000000000000000", + "is not available on the types", + ); + } + + #[test] + fn test_bit_or_big() { + assert::eq( + "0x70000000000000000000000", + "0x30000000000000000000000 | 0x50000000000000000000000", + ); + } + + #[test] + fn test_bit_or_big_small() { + assert::eq( + "0x60000000000000000000009", + "0x60000000000000000000000 | 0x9", + ); + assert::eq( + "0x60000000000000000000009", + "0x9 | 0x60000000000000000000000", + ); + } + + #[test] + fn test_bit_or_float() { + assert::fail_skip_typecheck("0x60000000000000000000000 | 1.0", "not supported"); + assert::fail_skip_typecheck("1.0 | 0x60000000000000000000000", "not supported"); + assert::fail( + "def f(): 0x60000000000000000000000 | 1.0", + "is not available on the types", + ); + assert::fail( + "def f(): 1.0 | 0x60000000000000000000000", + "is not available on the types", + ); + } + + #[test] + fn test_bit_xor_big() { + assert::eq( + "0x60000000000000000000000", + "0x30000000000000000000000 ^ 0x50000000000000000000000", + ); + } + + #[test] + fn test_bit_xor_big_small() { + assert::eq( + "0x60000000000000000000000", + "0x60000000000000000000009 ^ 0x9", + ); + assert::eq( + "0x60000000000000000000000", + "0x9 ^ 0x60000000000000000000009", + ); + } + + #[test] + fn test_bit_xor_float() { + assert::fail_skip_typecheck("0x60000000000000000000000 ^ 1.0", "not supported"); + assert::fail_skip_typecheck("1.0 ^ 0x60000000000000000000000", "not supported"); + assert::fail( + "def f(): 0x60000000000000000000000 ^ 1.0", + "Binary operator `^` is not available", + ); + assert::fail( + "def f(): 1.0 ^ 0x60000000000000000000000", + "Binary operator `^` is not available", + ); + } + + #[test] + fn test_bit_not() { + assert::eq( + "-0x10000000000000000000000000000001", + "~0x10000000000000000000000000000000", + ); + } + + #[test] + fn test_left_shift() { + assert::fail( + "0x10000000000000000000000000000000 << 0x10000000000000000000000000000000", + "Integer overflow", + ); + assert::fail( + "0x10000000000000000000000000000000 << -0x10000000000000000000000000000000", + "Negative left shift", + ); + } + + #[test] + fn test_left_shift_small() { + assert::eq( + "0x20000000000000000000000000000000", + "0x10000000000000000000000000000000 << 1", + ); + assert::fail( + "0x10000000000000000000000000000000 << -1", + "Negative left shift", + ); + assert::fail( + "1 << 0x10000000000000000000000000000000", + "Integer overflow", + ); + assert::fail( + "1 << -0x10000000000000000000000000000000", + "Negative left shift", + ); + assert::eq("0", "0 << 0x10000000000000000000000000000000"); + assert::eq("1267650600228229401496703205376", "1 << 100"); + assert::eq("-1267650600228229401496703205376", "-1 << 100"); + } + + #[test] + fn test_left_shift_float() { + assert::fail_skip_typecheck("0x10000000000000000000000000000000 << 1.0", "not supported"); + assert::fail_skip_typecheck("1.0 << 0x10000000000000000000000000000000", "not supported"); + assert::fail( + "def f(): 0x10000000000000000000000000000000 << 1.0", + "is not available", + ); + assert::fail( + "def f(): 1.0 << 0x10000000000000000000000000000000", + "is not available", + ); + } + + #[test] + fn test_right_shift() { + assert::eq( + "0", + "0x20000000000000000000000000000000 >> 0x20000000000000000000000000000000", + ); + assert::eq( + "-1", + "-0x20000000000000000000000000000000 >> 0x20000000000000000000000000000000", + ); + assert::fail( + "0x20000000000000000000000000000000 >> -0x20000000000000000000000000000000", + "Negative right shift", + ); + } + + #[test] + fn test_right_shift_small() { + assert::eq( + "0x10000000000000000000000000000000", + "0x20000000000000000000000000000000 >> 1", + ); + assert::fail( + "0x20000000000000000000000000000000 >> -1", + "Negative right shift", + ); + assert::eq("0", "1 >> 0x20000000000000000000000000000000"); + assert::eq("-1", "-1 >> 0x20000000000000000000000000000000"); + assert::fail( + "1 >> -0x10000000000000000000000000000000", + "Negative right shift", + ); + } + + #[test] + fn test_right_shift_float() { + assert::fail_skip_typecheck("0x20000000000000000000000000000000 >> 1.0", "not supported"); + assert::fail_skip_typecheck("1.0 >> 0x20000000000000000000000000000000", "not supported"); + assert::fail( + "def f(): 0x20000000000000000000000000000000 >> 1.0", + "is not available", + ); + assert::fail( + "def f(): 1.0 >> 0x20000000000000000000000000000000", + "is not available", + ); + } + + #[test] + fn test_int_function() { + assert::eq( + "123456789012345678901234567890", + "int(123456789012345678901234567890)", + ); + } + + #[test] + fn test_hash() { + let mut hash1 = StarlarkHasher::new(); + let mut hash2 = StarlarkHasher::new(); + StarlarkFloat(1e20).write_hash(&mut hash1).unwrap(); + StarlarkBigInt::unchecked_new(BigInt::from(10).pow(20)) + .write_hash(&mut hash2) + .unwrap(); + assert_eq!(hash1.finish(), hash2.finish()); + } + + #[test] + fn test_int_type_matches_bigint() { + assert::is_true("isinstance(1 << 100, int)"); + } +} diff --git a/starlark-rust/starlark/src/values/types/bigint/convert.rs b/starlark-rust/starlark/src/values/types/bigint/convert.rs index 025823764f8d2..b87f3febd0103 100644 --- a/starlark-rust/starlark/src/values/types/bigint/convert.rs +++ b/starlark-rust/starlark/src/values/types/bigint/convert.rs @@ -19,8 +19,8 @@ use num_bigint::BigInt; use crate::typing::Ty; use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::int_or_big::StarlarkInt; -use crate::values::types::int_or_big::StarlarkIntRef; +use crate::values::types::int::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkIntRef; use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::FrozenHeap; @@ -30,6 +30,8 @@ use crate::values::UnpackValue; use crate::values::Value; impl StarlarkTypeRepr for u32 { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { i32::starlark_type_repr() } @@ -50,6 +52,8 @@ impl AllocFrozenValue for u32 { } impl StarlarkTypeRepr for u64 { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { i32::starlark_type_repr() } @@ -70,6 +74,8 @@ impl AllocFrozenValue for u64 { } impl StarlarkTypeRepr for i64 { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { i32::starlark_type_repr() } @@ -90,6 +96,8 @@ impl AllocFrozenValue for i64 { } impl StarlarkTypeRepr for usize { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { i32::starlark_type_repr() } @@ -110,6 +118,8 @@ impl AllocFrozenValue for usize { } impl StarlarkTypeRepr for isize { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { i32::starlark_type_repr() } @@ -130,6 +140,8 @@ impl AllocFrozenValue for isize { } impl StarlarkTypeRepr for BigInt { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { i32::starlark_type_repr() } @@ -149,40 +161,96 @@ impl AllocFrozenValue for BigInt { } impl<'v> UnpackValue<'v> for u32 { - fn unpack_value(value: Value<'v>) -> Option { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { value.unpack_integer() } } impl<'v> UnpackValue<'v> for u64 { - fn unpack_value(value: Value<'v>) -> Option { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { value.unpack_integer() } } impl<'v> UnpackValue<'v> for i64 { - fn unpack_value(value: Value<'v>) -> Option { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { value.unpack_integer() } } impl<'v> UnpackValue<'v> for usize { - fn unpack_value(value: Value<'v>) -> Option { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { value.unpack_integer() } } impl<'v> UnpackValue<'v> for isize { - fn unpack_value(value: Value<'v>) -> Option { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { value.unpack_integer() } } impl<'v> UnpackValue<'v> for BigInt { - fn unpack_value(value: Value<'v>) -> Option { - match StarlarkIntRef::unpack_value(value)? { + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { + let Some(int) = StarlarkIntRef::unpack_value_opt(value) else { + return Ok(None); + }; + Ok(match int { StarlarkIntRef::Small(x) => Some(BigInt::from(x.to_i32())), StarlarkIntRef::Big(x) => Some(x.get().to_owned()), + }) + } +} + +#[cfg(test)] +mod tests { + use starlark_derive::starlark_module; + + use crate as starlark; + use crate::assert::Assert; + use crate::environment::GlobalsBuilder; + use crate::values::none::NoneType; + + #[test] + fn test_unpack_int_error() { + #[starlark_module] + fn module(globals: &mut GlobalsBuilder) { + fn takes_i32(#[starlark(require=pos)] _i: i32) -> starlark::Result { + Ok(NoneType) + } + + fn takes_i64(#[starlark(require=pos)] _i: i64) -> starlark::Result { + Ok(NoneType) + } } + + let mut a = Assert::new(); + a.globals_add(module); + a.fails( + "takes_i32(1 << 100)", + &[ + "Integer value is too big to fit in i32: 1267650600228229401496703205376", + "Error unpacking value for parameter `_i`", + ], + ); + a.fails( + "takes_i64(1 << 100)", + &[ + "Integer value is too big to fit in i64: 1267650600228229401496703205376", + "Error unpacking value for parameter `_i`", + ], + ); } } diff --git a/starlark-rust/starlark/src/values/types/bigint/mod.rs b/starlark-rust/starlark/src/values/types/bigint/mod.rs deleted file mode 100644 index 93df06350803f..0000000000000 --- a/starlark-rust/starlark/src/values/types/bigint/mod.rs +++ /dev/null @@ -1,704 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Outside of `i32` range int. - -mod convert; - -use std::cmp::Ordering; -use std::hash::Hash; - -use allocative::Allocative; -use num_bigint::BigInt; -use num_bigint::Sign; -use num_traits::cast::ToPrimitive; -use serde::Serialize; -use starlark_derive::starlark_value; - -use crate as starlark; -use crate::any::ProvidesStaticType; -use crate::collections::StarlarkHasher; -use crate::typing::Ty; -use crate::typing::TyBasic; -use crate::typing::TypingBinOp; -use crate::values::num::typecheck::typecheck_num_bin_op; -use crate::values::num::typecheck::NumTy; -use crate::values::num::value::NumRef; -use crate::values::types::inline_int::InlineInt; -use crate::values::types::int_or_big::StarlarkInt; -use crate::values::types::int_or_big::StarlarkIntRef; -use crate::values::AllocFrozenValue; -use crate::values::AllocValue; -use crate::values::FrozenHeap; -use crate::values::FrozenValue; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueError; - -/// `int` implementation for larger integers. -#[derive( - Clone, - Debug, - Default, - derive_more::Display, - ProvidesStaticType, - Ord, - PartialOrd, - Eq, - PartialEq, - Hash, - Allocative -)] -#[display(fmt = "{}", value)] -pub struct StarlarkBigInt { - /// `value` is strictly either smaller than `i32::MIN` or larger than `i32::MAX`. - /// Many operation implementations depend on this fact. - /// For example, `non_zero_int << positive_big_int` is considered to be overflow - /// without checking the actual value of `positive_big_int`. - #[allocative(skip)] // TODO(nga): do not skip. - value: BigInt, -} - -impl StarlarkBigInt { - pub(crate) fn unchecked_new(value: BigInt) -> Self { - debug_assert!( - InlineInt::try_from(&value).is_err(), - "BigInt must be outside of `InlineInt` range" - ); - Self { value } - } - - pub(crate) fn get(&self) -> &BigInt { - &self.value - } - - pub(crate) fn to_f64(&self) -> f64 { - // `to_f64` is infallible. - self.value.to_f64().unwrap() - } - - pub(crate) fn to_i32(&self) -> Option { - // Avoid calling `to_i32` if the value is known to be out of range. - if InlineInt::smaller_than_i32() { - let v = self.value.to_i32(); - if let Some(v) = v { - debug_assert!(InlineInt::try_from(v).is_err()); - } - v - } else { - None - } - } - - pub(crate) fn cmp_small_big(a: InlineInt, b: &StarlarkBigInt) -> Ordering { - let a_sign = a.signum(); - let b_sign = match b.value.sign() { - Sign::Plus => 2, - Sign::Minus => -2, - Sign::NoSign => 0, - }; - // Sign comparison is enough because `StarlarkBigInt` is out of range of `i32`. - a_sign.cmp(&b_sign) - } - - pub(crate) fn cmp_big_small(a: &StarlarkBigInt, b: InlineInt) -> Ordering { - Self::cmp_small_big(b, a).reverse() - } - - pub(crate) fn unpack_integer<'v, I: TryFrom<&'v BigInt>>(&'v self) -> Option { - I::try_from(&self.value).ok() - } -} - -impl PartialEq for StarlarkBigInt { - fn eq(&self, _other: &i32) -> bool { - false - } -} - -impl Serialize for StarlarkBigInt { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&self.value.to_string()) - } -} - -impl<'v> AllocValue<'v> for StarlarkBigInt { - fn alloc_value(self, heap: &'v Heap) -> Value<'v> { - heap.alloc_simple(self) - } -} - -impl AllocFrozenValue for StarlarkBigInt { - fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { - heap.alloc_simple(self) - } -} - -#[starlark_value(type = "int")] -impl<'v> StarlarkValue<'v> for StarlarkBigInt { - fn to_bool(&self) -> bool { - // `StarlarkBigInt` is non-zero. - true - } - - fn minus(&self, heap: &'v Heap) -> anyhow::Result> { - Ok(heap.alloc(StarlarkInt::from(-&self.value))) - } - - fn plus(&self, heap: &'v Heap) -> anyhow::Result> { - // This unnecessarily allocates, could return `self`. - // But practically people rarely write `+NNN` except in constants, - // and in constants we fold `+NNN` into `NNN`. - Ok(heap.alloc(StarlarkInt::from(self.value.clone()))) - } - - fn equals(&self, other: Value<'v>) -> anyhow::Result { - Ok(Some(NumRef::Int(StarlarkIntRef::Big(self))) == other.unpack_num()) - } - - fn compare(&self, other: Value<'v>) -> anyhow::Result { - match other.unpack_num() { - None => ValueError::unsupported_with(self, "compare", other), - Some(other) => Ok(NumRef::Int(StarlarkIntRef::Big(self)).cmp(&other)), - } - } - - fn add(&self, rhs: Value<'v>, heap: &'v Heap) -> Option>> { - Some(Ok(heap.alloc( - NumRef::Int(StarlarkIntRef::Big(self)) + rhs.unpack_num()?, - ))) - } - - fn sub(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(other) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)) - other)), - None => ValueError::unsupported_with(self, "-", other), - } - } - - fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { - Some(Ok(heap.alloc( - NumRef::Int(StarlarkIntRef::Big(self)) * other.unpack_num()?, - ))) - } - - fn div(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(other) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)).div(other)?)), - None => ValueError::unsupported_with(self, "/", other), - } - } - - fn floor_div(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(rhs) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)).floor_div(rhs)?)), - None => ValueError::unsupported_with(self, "//", other), - } - } - - fn percent(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(rhs) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Big(self)).percent(rhs)?)), - None => ValueError::unsupported_with(self, "%", other), - } - } - - fn bit_and(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - let rhs = match StarlarkIntRef::unpack_value(other) { - Some(rhs) => rhs, - None => return ValueError::unsupported_with(self, "&", other), - }; - Ok(heap.alloc(StarlarkIntRef::Big(self) & rhs)) - } - - fn bit_xor(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - let rhs = match StarlarkIntRef::unpack_value(other) { - Some(rhs) => rhs, - None => return ValueError::unsupported_with(self, "^", other), - }; - Ok(heap.alloc(StarlarkIntRef::Big(self) ^ rhs)) - } - - fn bit_or(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - let rhs = match StarlarkIntRef::unpack_value(other) { - Some(rhs) => rhs, - None => return ValueError::unsupported_with(self, "|", other), - }; - Ok(heap.alloc(StarlarkIntRef::Big(self) | rhs)) - } - - fn bit_not(&self, heap: &'v Heap) -> anyhow::Result> { - Ok(heap.alloc(!StarlarkIntRef::Big(self))) - } - - fn left_shift(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match StarlarkIntRef::unpack_value(other) { - None => ValueError::unsupported_with(self, "<<", other), - Some(other) => Ok(heap.alloc(StarlarkIntRef::Big(self).left_shift(other)?)), - } - } - - fn right_shift(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match StarlarkIntRef::unpack_value(other) { - None => ValueError::unsupported_with(self, ">>", other), - Some(other) => Ok(heap.alloc(StarlarkIntRef::Big(self).right_shift(other)?)), - } - } - - fn bin_op_ty(op: TypingBinOp, rhs: &TyBasic) -> Option { - typecheck_num_bin_op(NumTy::Int, op, rhs) - } - - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { - NumRef::Int(StarlarkIntRef::Big(self)) - .get_hash_64() - .hash(hasher); - Ok(()) - } - - fn typechecker_ty(&self) -> Option { - Some(Ty::int()) - } -} - -#[cfg(test)] -mod tests { - use std::hash::Hasher; - - use num_bigint::BigInt; - - use crate::assert; - use crate::collections::StarlarkHasher; - use crate::values::float::StarlarkFloat; - use crate::values::types::bigint::StarlarkBigInt; - use crate::values::StarlarkValue; - - #[test] - fn test_parse() { - assert::eq( - "'1234567890112233445566778899'", - "str(1234567890112233445566778899)", - ); - assert::eq( - "'1234567890112233445566778899'", - "str(0x3fd35eb6d519aff76f50e13)", - ); - assert::eq( - "'1234567890112233445566778899'", - "str(0o776465726665214657756675207023)", - ); - assert::eq( - "'1234567890112233445566778899'", - "str(0b11111111010011010111101011011011010101000\ - 1100110101111111101110110111101010000111000010011)", - ); - } - - #[test] - fn test_str() { - assert::eq( - "'1234567890112233445566778899'", - "str(1234567890112233445566778899)", - ); - } - - #[test] - fn test_repr() { - assert::eq( - "'1234567890112233445566778899'", - "repr(1234567890112233445566778899)", - ); - } - - #[test] - fn test_equals() { - assert::eq("10000000000000000000000", "10000000000000000000000"); - assert::eq("10000000000000000000000", "10000000000000000000000.0"); - assert::eq("10000000000000000000000.0", "10000000000000000000000"); - } - - #[test] - fn test_plus() { - assert::eq("10000000000000000000000", "+10000000000000000000000"); - } - - #[test] - fn test_compare_big_big() { - assert::is_true("10000000000000000000000 < 20000000000000000000000"); - assert::is_true("-20000000000000000000000 < -10000000000000000000000"); - assert::is_true("20000000000000000000000 > 10000000000000000000000"); - assert::is_true("-10000000000000000000000 > -20000000000000000000000"); - } - - #[test] - fn test_compare_big_small() { - assert::is_true("1 < 10000000000000000000000"); - assert::is_true("-1 < 10000000000000000000000"); - assert::is_true("1 > -10000000000000000000000"); - assert::is_true("-1 > -10000000000000000000000"); - assert::is_true("10000000000000000000000 > 1"); - assert::is_true("10000000000000000000000 > -1"); - assert::is_true("-10000000000000000000000 < 1"); - assert::is_true("-10000000000000000000000 < -1"); - } - - #[test] - fn test_compare_big_float() { - assert::is_true("1.0 < 10000000000000000000000"); - assert::is_true("-1.0 < 10000000000000000000000"); - assert::is_true("1.0 > -10000000000000000000000"); - assert::is_true("-1.0 > -10000000000000000000000"); - assert::is_true("10000000000000000000000 > 1.0"); - assert::is_true("10000000000000000000000 > -1.0"); - assert::is_true("-10000000000000000000000 < 1.0"); - assert::is_true("-10000000000000000000000 < -1.0"); - } - - #[test] - fn test_add_big() { - assert::eq( - "300000000000000000009", - "100000000000000000004 + 200000000000000000005", - ); - assert::eq("7", "100000000000000000007 + -100000000000000000000"); - assert::eq( - "200000000000000000005", - "300000000000000000009 - 100000000000000000004", - ); - assert::eq("7", "100000000000000000007 - 100000000000000000000"); - } - - #[test] - fn test_add_big_small() { - assert::eq("100000000000000000017", "100000000000000000000 + 17"); - assert::eq("100000000000000000017", "17 + 100000000000000000000"); - assert::eq("100000000000000000000", "100000000000000000017 - 17"); - assert::eq("-100000000000000000017", "17 - 100000000000000000034"); - } - - #[test] - fn test_add_big_float() { - assert::eq("2e20", "100000000000000000000 + 1e20"); - assert::eq("2e20", "1e20 + 100000000000000000000"); - assert::eq("2e20", "300000000000000000000 - 1e20"); - assert::eq("2e20", "3e20 - 100000000000000000000"); - } - - #[test] - fn test_mul_big() { - assert::eq( - "60000000000000000000000000000000000000000", - "200000000000000000000 * 300000000000000000000", - ); - } - - #[test] - fn test_mul_big_small() { - assert::eq("600000000000000000000", "200000000000000000000 * 3"); - assert::eq("600000000000000000000", "3 * 200000000000000000000"); - } - - #[test] - fn test_mul_big_float() { - assert::eq("6e20", "200000000000000000000 * 3.0"); - assert::eq("6e20", "3.0 * 200000000000000000000"); - } - - #[test] - fn test_div_big() { - assert::eq( - "2e20", - "60000000000000000000000000000000000000000 / 300000000000000000000", - ); - } - - #[test] - fn test_div_big_small() { - assert::eq("2e20", "600000000000000000000 / 3"); - assert::eq("2e-20", "6 / 300000000000000000000"); - } - - #[test] - fn test_div_big_float() { - assert::eq("2e20", "600000000000000000000 / 3.0"); - assert::eq("2e-20", "6.0 / 300000000000000000000"); - } - - #[test] - fn test_floor_div_big() { - assert::eq("2", "600000000000000000000 // 300000000000000000000"); - } - - #[test] - fn test_floor_div_big_small() { - assert::eq("200000000000000000000", "600000000000000000000 // 3"); - assert::eq("0", "3 // 600000000000000000000"); - } - - #[test] - fn test_floor_div_big_float() { - assert::eq("2e20", "600000000000000000000 / 3.0"); - assert::eq("2e-20", "6.0 / 300000000000000000000"); - } - - #[test] - fn test_percent_big() { - assert::eq("7", "600000000000000000007 % 200000000000000000000"); - } - - #[test] - fn test_percent_big_small() { - assert::eq("7", "600000000000000000007 % 20"); - assert::eq("3", "3 % 600000000000000000000"); - } - - #[test] - fn test_percent_big_float() { - assert::eq("1e20", "100000000000000000000 % 1e50"); - assert::eq("10.0", "10.0 % 100000000000000000000"); - } - - #[test] - fn test_bit_and_big() { - assert::eq( - "0x10000000000000000000000", - "0x30000000000000000000000 & 0x90000000000000000000000", - ); - } - - #[test] - fn test_bit_and_big_small() { - assert::eq("1", "0x60000000000000000000003 & 0x9"); - assert::eq("1", "0x9 & 0x60000000000000000000003"); - } - - #[test] - fn test_bit_and_float() { - assert::fail_skip_typecheck("0x60000000000000000000000 & 1.0", "not supported"); - assert::fail_skip_typecheck("1.0 & 0x60000000000000000000000", "not supported"); - assert::fail( - "def f(): 0x60000000000000000000000 & 1.0", - "is not available on the types", - ); - assert::fail( - "def f(): 1.0 & 0x60000000000000000000000", - "is not available on the types", - ); - } - - #[test] - fn test_bit_or_big() { - assert::eq( - "0x70000000000000000000000", - "0x30000000000000000000000 | 0x50000000000000000000000", - ); - } - - #[test] - fn test_bit_or_big_small() { - assert::eq( - "0x60000000000000000000009", - "0x60000000000000000000000 | 0x9", - ); - assert::eq( - "0x60000000000000000000009", - "0x9 | 0x60000000000000000000000", - ); - } - - #[test] - fn test_bit_or_float() { - assert::fail_skip_typecheck("0x60000000000000000000000 | 1.0", "not supported"); - assert::fail_skip_typecheck("1.0 | 0x60000000000000000000000", "not supported"); - assert::fail( - "def f(): 0x60000000000000000000000 | 1.0", - "is not available on the types", - ); - assert::fail( - "def f(): 1.0 | 0x60000000000000000000000", - "is not available on the types", - ); - } - - #[test] - fn test_bit_xor_big() { - assert::eq( - "0x60000000000000000000000", - "0x30000000000000000000000 ^ 0x50000000000000000000000", - ); - } - - #[test] - fn test_bit_xor_big_small() { - assert::eq( - "0x60000000000000000000000", - "0x60000000000000000000009 ^ 0x9", - ); - assert::eq( - "0x60000000000000000000000", - "0x9 ^ 0x60000000000000000000009", - ); - } - - #[test] - fn test_bit_xor_float() { - assert::fail_skip_typecheck("0x60000000000000000000000 ^ 1.0", "not supported"); - assert::fail_skip_typecheck("1.0 ^ 0x60000000000000000000000", "not supported"); - assert::fail( - "def f(): 0x60000000000000000000000 ^ 1.0", - "Binary operator `^` is not available", - ); - assert::fail( - "def f(): 1.0 ^ 0x60000000000000000000000", - "Binary operator `^` is not available", - ); - } - - #[test] - fn test_bit_not() { - assert::eq( - "-0x10000000000000000000000000000001", - "~0x10000000000000000000000000000000", - ); - } - - #[test] - fn test_left_shift() { - assert::fail( - "0x10000000000000000000000000000000 << 0x10000000000000000000000000000000", - "Integer overflow", - ); - assert::fail( - "0x10000000000000000000000000000000 << -0x10000000000000000000000000000000", - "Negative left shift", - ); - } - - #[test] - fn test_left_shift_small() { - assert::eq( - "0x20000000000000000000000000000000", - "0x10000000000000000000000000000000 << 1", - ); - assert::fail( - "0x10000000000000000000000000000000 << -1", - "Negative left shift", - ); - assert::fail( - "1 << 0x10000000000000000000000000000000", - "Integer overflow", - ); - assert::fail( - "1 << -0x10000000000000000000000000000000", - "Negative left shift", - ); - assert::eq("0", "0 << 0x10000000000000000000000000000000"); - assert::eq("1267650600228229401496703205376", "1 << 100"); - assert::eq("-1267650600228229401496703205376", "-1 << 100"); - } - - #[test] - fn test_left_shift_float() { - assert::fail_skip_typecheck("0x10000000000000000000000000000000 << 1.0", "not supported"); - assert::fail_skip_typecheck("1.0 << 0x10000000000000000000000000000000", "not supported"); - assert::fail( - "def f(): 0x10000000000000000000000000000000 << 1.0", - "is not available", - ); - assert::fail( - "def f(): 1.0 << 0x10000000000000000000000000000000", - "is not available", - ); - } - - #[test] - fn test_right_shift() { - assert::eq( - "0", - "0x20000000000000000000000000000000 >> 0x20000000000000000000000000000000", - ); - assert::eq( - "-1", - "-0x20000000000000000000000000000000 >> 0x20000000000000000000000000000000", - ); - assert::fail( - "0x20000000000000000000000000000000 >> -0x20000000000000000000000000000000", - "Negative right shift", - ); - } - - #[test] - fn test_right_shift_small() { - assert::eq( - "0x10000000000000000000000000000000", - "0x20000000000000000000000000000000 >> 1", - ); - assert::fail( - "0x20000000000000000000000000000000 >> -1", - "Negative right shift", - ); - assert::eq("0", "1 >> 0x20000000000000000000000000000000"); - assert::eq("-1", "-1 >> 0x20000000000000000000000000000000"); - assert::fail( - "1 >> -0x10000000000000000000000000000000", - "Negative right shift", - ); - } - - #[test] - fn test_right_shift_float() { - assert::fail_skip_typecheck("0x20000000000000000000000000000000 >> 1.0", "not supported"); - assert::fail_skip_typecheck("1.0 >> 0x20000000000000000000000000000000", "not supported"); - assert::fail( - "def f(): 0x20000000000000000000000000000000 >> 1.0", - "is not available", - ); - assert::fail( - "def f(): 1.0 >> 0x20000000000000000000000000000000", - "is not available", - ); - } - - #[test] - fn test_int_function() { - assert::eq( - "123456789012345678901234567890", - "int(123456789012345678901234567890)", - ); - } - - #[test] - fn test_hash() { - let mut hash1 = StarlarkHasher::new(); - let mut hash2 = StarlarkHasher::new(); - StarlarkFloat(1e20).write_hash(&mut hash1).unwrap(); - StarlarkBigInt::unchecked_new(BigInt::from(10).pow(20)) - .write_hash(&mut hash2) - .unwrap(); - assert_eq!(hash1.finish(), hash2.finish()); - } - - #[test] - fn test_int_type_matches_bigint() { - assert::is_true("isinstance(1 << 100, int)"); - } -} diff --git a/starlark-rust/starlark/src/values/types/bool.rs b/starlark-rust/starlark/src/values/types/bool.rs index 0e080a32db06e..322b24406a050 100644 --- a/starlark-rust/starlark/src/values/types/bool.rs +++ b/starlark-rust/starlark/src/values/types/bool.rs @@ -17,146 +17,16 @@ //! The boolean type (`False` and `True`). //! -//! Can be created with [`new_bool`](Value::new_bool) and unwrapped with [`unpack_bool`](Value::unpack_bool). -//! Unlike most Starlark values, these aren't actually represented on the [`Heap`], but as special values. - -use std::cmp::Ordering; -use std::fmt; -use std::fmt::Display; -use std::hash::Hasher; - -use allocative::Allocative; -use serde::Serialize; -use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; - -use crate as starlark; -use crate::any::ProvidesStaticType; -use crate::collections::StarlarkHashValue; -use crate::collections::StarlarkHasher; -use crate::private::Private; -use crate::typing::Ty; -use crate::values::layout::avalue::alloc_static; -use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; -use crate::values::layout::heap::repr::AValueRepr; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::AllocFrozenValue; -use crate::values::AllocValue; -use crate::values::FrozenHeap; -use crate::values::FrozenValue; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueError; - -/// The result of calling `type()` on booleans. -pub const BOOL_TYPE: &str = "bool"; - -// We have to alias bool so we can have a Display that uses True/False. -#[derive(ProvidesStaticType, Debug, Serialize, StarlarkDocs, Allocative)] -#[starlark_docs(builtin = "standard")] -#[serde(transparent)] -pub(crate) struct StarlarkBool(pub(crate) bool); - -impl Display for StarlarkBool { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.0 { - write!(f, "True") - } else { - write!(f, "False") - } - } -} - -pub(crate) static VALUE_FALSE_TRUE: [AValueRepr>; 2] = [ - alloc_static(Basic, StarlarkBool(false)), - alloc_static(Basic, StarlarkBool(true)), -]; - -impl<'v> AllocValue<'v> for bool { - fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { - Value::new_bool(self) - } -} - -impl AllocFrozenValue for bool { - fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { - FrozenValue::new_bool(self) - } -} - -impl StarlarkTypeRepr for bool { - fn starlark_type_repr() -> Ty { - StarlarkBool::get_type_starlark_repr() - } -} - -impl UnpackValue<'_> for bool { - fn unpack_value(value: Value) -> Option { - value.unpack_bool() - } -} - -/// Define the bool type -#[starlark_value(type = BOOL_TYPE)] -impl<'v> StarlarkValue<'v> for StarlarkBool { - fn is_special(_: Private) -> bool - where - Self: Sized, - { - true - } - - fn collect_repr(&self, s: &mut String) { - // repr() for bool is quite hot, so optimise it - if self.0 { - s.push_str("True") - } else { - s.push_str("False") - } - } - - fn to_bool(&self) -> bool { - self.0 - } - - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { - hasher.write_u8(if self.0 { 1 } else { 0 }); - Ok(()) - } - - fn get_hash(&self, _private: Private) -> anyhow::Result { - // These constants are just two random numbers. - Ok(StarlarkHashValue::new_unchecked(if self.0 { - 0xa4acba08 - } else { - 0x71e8ba71 - })) - } - - fn equals(&self, other: Value) -> anyhow::Result { - // We always compare values for pointer equality before calling `equals`, - // and there are only two instances of `StarlarkBool`. - // So if we are here, values are definitely not equal. - debug_assert!(!matches!(other.unpack_bool(), Some(other) if other == self.0)); - Ok(false) - } - - fn compare(&self, other: Value) -> anyhow::Result { - if let Some(other) = other.unpack_bool() { - Ok(self.0.cmp(&other)) - } else { - ValueError::unsupported_with(self, "<>", other) - } - } - - fn typechecker_ty(&self) -> Option { - Some(Ty::bool()) - } - - fn get_type_starlark_repr() -> Ty { - Ty::bool() - } -} +//! Can be created with [`Value::new_bool`](crate::values::Value::new_bool) +//! and unwrapped with [`Value::unpack_bool`](crate::values::Value::unpack_bool). +//! Unlike most Starlark values, these aren't actually allocated on the [`Heap`](crate::values::Heap), +//! but as special values. + +mod alloc; +pub(crate) mod globals; +mod type_repr; +mod unpack; +pub(crate) mod value; + +pub use value::StarlarkBool; +pub use value::BOOL_TYPE; diff --git a/starlark-rust/starlark/src/values/types/bool/alloc.rs b/starlark-rust/starlark/src/values/types/bool/alloc.rs new file mode 100644 index 0000000000000..36dbd2b08e9bb --- /dev/null +++ b/starlark-rust/starlark/src/values/types/bool/alloc.rs @@ -0,0 +1,35 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::FrozenHeap; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::Value; + +impl<'v> AllocValue<'v> for bool { + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + Value::new_bool(self) + } +} + +impl AllocFrozenValue for bool { + fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { + FrozenValue::new_bool(self) + } +} diff --git a/starlark-rust/starlark/src/values/types/bool/globals.rs b/starlark-rust/starlark/src/values/types/bool/globals.rs new file mode 100644 index 0000000000000..b63bb9ef52c4c --- /dev/null +++ b/starlark-rust/starlark/src/values/types/bool/globals.rs @@ -0,0 +1,63 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::bool::StarlarkBool; +use crate::values::Value; + +#[starlark_module] +pub(crate) fn register_bool(globals: &mut GlobalsBuilder) { + /// A boolean representing true. + const True: bool = true; + + /// A boolean representing false. + const False: bool = false; + + /// [bool]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#bool + /// ): returns the truth value of any starlark value. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// bool() == False + /// bool([]) == False + /// bool([1]) == True + /// bool(True) == True + /// bool(False) == False + /// bool(None) == False + /// bool(bool) == True + /// bool(1) == True + /// bool(0) == False + /// bool({}) == False + /// bool({1:2}) == True + /// bool(()) == False + /// bool((1,)) == True + /// bool("") == False + /// bool("1") == True + /// # "#); + /// ``` + #[starlark(as_type = StarlarkBool, speculative_exec_safe)] + fn bool(#[starlark(require = pos)] x: Option) -> anyhow::Result { + match x { + None => Ok(false), + Some(x) => Ok(x.to_bool()), + } + } +} diff --git a/starlark-rust/starlark/src/values/types/bool/type_repr.rs b/starlark-rust/starlark/src/values/types/bool/type_repr.rs new file mode 100644 index 0000000000000..0b6da3004711f --- /dev/null +++ b/starlark-rust/starlark/src/values/types/bool/type_repr.rs @@ -0,0 +1,29 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::typing::Ty; +use crate::values::bool::StarlarkBool; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::StarlarkValue; + +impl StarlarkTypeRepr for bool { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + StarlarkBool::get_type_starlark_repr() + } +} diff --git a/starlark-rust/starlark/src/values/types/bool/unpack.rs b/starlark-rust/starlark/src/values/types/bool/unpack.rs new file mode 100644 index 0000000000000..d69a6dff9c5f7 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/bool/unpack.rs @@ -0,0 +1,29 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::convert::Infallible; + +use crate::values::UnpackValue; +use crate::values::Value; + +impl UnpackValue<'_> for bool { + type Error = Infallible; + + fn unpack_value_impl(value: Value) -> Result, Self::Error> { + Ok(value.unpack_bool()) + } +} diff --git a/starlark-rust/starlark/src/values/types/bool/value.rs b/starlark-rust/starlark/src/values/types/bool/value.rs new file mode 100644 index 0000000000000..b2d7c324a22d6 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/bool/value.rs @@ -0,0 +1,116 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cmp::Ordering; +use std::fmt; +use std::fmt::Display; +use std::hash::Hasher; + +use allocative::Allocative; +use starlark_derive::starlark_value; +use starlark_derive::ProvidesStaticType; +use starlark_map::StarlarkHashValue; +use starlark_map::StarlarkHasher; + +use crate as starlark; +use crate::__derive_refs::serde::Serialize; +use crate::private::Private; +use crate::typing::Ty; +use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; +use crate::values::layout::avalue::AValueImpl; +use crate::values::layout::heap::repr::AValueRepr; +use crate::values::StarlarkValue; +use crate::values::Value; +use crate::values::ValueError; + +/// The result of calling `type()` on booleans. +pub const BOOL_TYPE: &str = "bool"; + +/// `bool` value. +#[derive(ProvidesStaticType, Debug, Serialize, Allocative)] +#[serde(transparent)] +pub struct StarlarkBool(pub(crate) bool); + +impl Display for StarlarkBool { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0 { + write!(f, "True") + } else { + write!(f, "False") + } + } +} + +pub(crate) static VALUE_FALSE_TRUE: [AValueRepr>>; 2] = [ + alloc_static(StarlarkBool(false)), + alloc_static(StarlarkBool(true)), +]; + +/// Define the bool type +#[starlark_value(type = BOOL_TYPE)] +impl<'v> StarlarkValue<'v> for StarlarkBool { + fn is_special(_: Private) -> bool + where + Self: Sized, + { + true + } + + fn collect_repr(&self, s: &mut String) { + // repr() for bool is quite hot, so optimise it + if self.0 { + s.push_str("True") + } else { + s.push_str("False") + } + } + + fn to_bool(&self) -> bool { + self.0 + } + + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { + hasher.write_u8(if self.0 { 1 } else { 0 }); + Ok(()) + } + + fn get_hash(&self, _private: Private) -> crate::Result { + // These constants are just two random numbers. + Ok(StarlarkHashValue::new_unchecked(if self.0 { + 0xa4acba08 + } else { + 0x71e8ba71 + })) + } + + fn compare(&self, other: Value) -> crate::Result { + if let Some(other) = other.unpack_bool() { + Ok(self.0.cmp(&other)) + } else { + ValueError::unsupported_with(self, "<>", other) + } + } + + fn typechecker_ty(&self) -> Option { + Some(Ty::bool()) + } + + fn get_type_starlark_repr() -> Ty { + Ty::bool() + } +} diff --git a/starlark-rust/starlark/src/values/types/dict.rs b/starlark-rust/starlark/src/values/types/dict.rs new file mode 100644 index 0000000000000..f6a164ded7895 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/dict.rs @@ -0,0 +1,36 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The dictionary type, a mutable associative-map, which iterates in insertion order. + +mod alloc; + +mod dict_type; +pub(crate) mod globals; +pub(crate) mod methods; +mod refs; +mod traits; +pub(crate) mod unpack; +pub(crate) mod value; + +pub use crate::values::dict::alloc::AllocDict; +pub use crate::values::dict::dict_type::DictType; +pub use crate::values::dict::refs::DictMut; +pub use crate::values::dict::refs::DictRef; +pub use crate::values::dict::refs::FrozenDictRef; +pub use crate::values::dict::unpack::UnpackDictEntries; +pub use crate::values::dict::value::Dict; diff --git a/starlark-rust/starlark/src/values/types/dict/alloc.rs b/starlark-rust/starlark/src/values/types/dict/alloc.rs index 245c6c56be74d..8f76b2f0cbbae 100644 --- a/starlark-rust/starlark/src/values/types/dict/alloc.rs +++ b/starlark-rust/starlark/src/values/types/dict/alloc.rs @@ -23,8 +23,8 @@ use crate::typing::Ty; use crate::values::dict::value::FrozenDictData; use crate::values::dict::Dict; use crate::values::layout::value::ValueLike; -use crate::values::type_repr::DictType; use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::dict::dict_type::DictType; use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::FrozenHeap; @@ -65,6 +65,8 @@ where K: StarlarkTypeRepr, V: StarlarkTypeRepr, { + type Canonical = DictType; + fn starlark_type_repr() -> Ty { DictType::::starlark_type_repr() } diff --git a/starlark-rust/starlark/src/values/types/dict/dict_type.rs b/starlark-rust/starlark/src/values/types/dict/dict_type.rs new file mode 100644 index 0000000000000..b1d3a905fb2d2 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/dict/dict_type.rs @@ -0,0 +1,61 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::marker::PhantomData; + +use either::Either; + +use crate::typing::Ty; +use crate::values::dict::UnpackDictEntries; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::UnpackAndDiscard; +use crate::values::UnpackValue; +use crate::values::Value; + +/// A dict type marker. +/// +/// [`StarlarkTypeRepr`] provides `dict[K, V]`. +/// [`UnpackValue`] implementation verifies the types of entries and discards them. +pub struct DictType { + k: PhantomData, + v: PhantomData, +} + +impl StarlarkTypeRepr for DictType { + type Canonical = DictType; + + fn starlark_type_repr() -> Ty { + Ty::dict(K::starlark_type_repr(), V::starlark_type_repr()) + } +} + +impl<'v, K: UnpackValue<'v>, V: UnpackValue<'v>> UnpackValue<'v> for DictType { + type Error = Either; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + match UnpackDictEntries::, UnpackAndDiscard>::unpack_value_impl( + value, + ) { + Ok(Some(_)) => Ok(Some(DictType { + k: PhantomData, + v: PhantomData, + })), + Ok(None) => Ok(None), + Err(e) => Err(e), + } + } +} diff --git a/starlark-rust/starlark/src/values/types/dict/globals.rs b/starlark-rust/starlark/src/values/types/dict/globals.rs new file mode 100644 index 0000000000000..e1f1e8ae1a8e4 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/dict/globals.rs @@ -0,0 +1,121 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; +use starlark_map::small_map::SmallMap; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::eval::Arguments; +use crate::values::dict::value::FrozenDict; +use crate::values::dict::Dict; +use crate::values::dict::DictRef; +use crate::values::function::SpecialBuiltinFunction; +use crate::values::Heap; +use crate::values::Value; + +fn unpack_pair<'v>(pair: Value<'v>, heap: &'v Heap) -> crate::Result<(Value<'v>, Value<'v>)> { + let mut it = pair.iterate(heap)?; + if let Some(first) = it.next() { + if let Some(second) = it.next() { + if it.next().is_none() { + return Ok((first, second)); + } + } + } + Err(anyhow::anyhow!( + "Found a non-pair element in the positional argument of dict(): {}", + pair.to_repr(), + ) + .into()) +} + +#[starlark_module] +pub(crate) fn register_dict(globals: &mut GlobalsBuilder) { + /// [dict]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict + /// ): creates a dictionary. + /// + /// `dict` creates a dictionary. It accepts up to one positional argument, + /// which is interpreted as an iterable of two-element sequences + /// (pairs), each specifying a key/value pair in the + /// resulting dictionary. + /// + /// `dict` also accepts any number of keyword arguments, each of which + /// specifies a key/value pair in the resulting dictionary; each keyword + /// is treated as a string. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// dict() == {} + /// dict(**{'a': 1}) == {'a': 1} + /// dict({'a': 1}) == {'a': 1} + /// dict([(1, 2), (3, 4)]) == {1: 2, 3: 4} + /// dict([(1, 2), ['a', 'b']]) == {1: 2, 'a': 'b'} + /// dict(one=1, two=2) == {'one': 1, 'two': 2} + /// dict([(1, 2)], x=3) == {1: 2, 'x': 3} + /// dict([('x', 2)], x=3) == {'x': 3} + /// # "#); + /// # starlark::assert::is_true(r#" + /// x = {'a': 1} + /// y = dict([('x', 2)], **x) + /// x == {'a': 1} and y == {'x': 2, 'a': 1} + /// # "#); + /// ``` + #[starlark( + as_type = FrozenDict, + speculative_exec_safe, + special_builtin_function = SpecialBuiltinFunction::Dict, + )] + fn dict<'v>(args: &Arguments<'v, '_>, heap: &'v Heap) -> starlark::Result> { + // Dict is super hot, and has a slightly odd signature, so we can do a bunch of special cases on it. + // In particular, we don't generate the kwargs if there are no positional arguments. + // Therefore we make it take the raw Arguments. + // It might have one positional argument, which could be a dict or an array of pairs. + // It might have named/kwargs arguments, which we copy over (afterwards). + + let pos = args.optional1(heap)?; + let kwargs = args.names()?; + + match pos { + None => Ok(kwargs), + Some(pos) => { + let mut result: Dict = match DictRef::from_value(pos) { + Some(pos) => { + let mut result = (*pos).clone(); + result.reserve(kwargs.len()); + result + } + None => { + let it = pos.iterate(heap)?; + let mut result = SmallMap::with_capacity(it.size_hint().0 + kwargs.len()); + for el in it { + let (k, v) = unpack_pair(el, heap)?; + let k = k.get_hashed()?; + result.insert_hashed(k, v); + } + Dict::new(result) + } + }; + for (k, v) in kwargs.iter_hashed() { + result.insert_hashed(k, v); + } + Ok(result) + } + } + } +} diff --git a/starlark-rust/starlark/src/values/types/dict/methods.rs b/starlark-rust/starlark/src/values/types/dict/methods.rs new file mode 100644 index 0000000000000..02263b731b22d --- /dev/null +++ b/starlark-rust/starlark/src/values/types/dict/methods.rs @@ -0,0 +1,435 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Methods for the `dict` type. + +use std::mem; + +use either::Either; +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::MethodsBuilder; +use crate::values::dict::DictMut; +use crate::values::dict::DictRef; +use crate::values::list::AllocList; +use crate::values::list::ListRef; +use crate::values::list::UnpackList; +use crate::values::none::NoneType; +use crate::values::typing::StarlarkIter; +use crate::values::Heap; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +#[starlark_module] +pub(crate) fn dict_methods(registry: &mut MethodsBuilder) { + /// [dict.clear]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·clear + /// ): clear a dictionary + /// + /// `D.clear()` removes all the entries of dictionary D and returns `None`. + /// It fails if the dictionary is frozen or if there are active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// x.clear() + /// x == {} + /// # "#); + /// ``` + fn clear(this: Value) -> anyhow::Result { + let mut this = DictMut::from_value(this)?; + this.aref.clear(); + Ok(NoneType) + } + + /// [dict.get]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·get + /// ): return an element from the dictionary. + /// + /// `D.get(key[, default])` returns the dictionary value corresponding to + /// the given key. If the dictionary contains no such value, `get` + /// returns `None`, or the value of the optional `default` parameter if + /// present. + /// + /// `get` fails if `key` is unhashable. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// # ( + /// x.get("one") == 1 + /// # and + /// x.get("three") == None + /// # and + /// x.get("three", 0) == 0 + /// # )"#); + /// ``` + #[starlark(speculative_exec_safe)] + fn get<'v>( + this: DictRef<'v>, + #[starlark(require = pos)] key: Value<'v>, + #[starlark(require = pos)] default: Option>, + ) -> starlark::Result> { + match this.get(key)? { + None => Ok(default.unwrap_or_else(Value::new_none)), + Some(x) => Ok(x), + } + } + + /// [dict.items]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·items + /// ): get list of (key, value) pairs. + /// + /// `D.items()` returns a new list of key/value pairs, one per element in + /// dictionary D, in the same order as they would be returned by a `for` + /// loop. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// x.items() == [("one", 1), ("two", 2)] + /// # "#); + /// ``` + fn items<'v>( + this: DictRef<'v>, + heap: &'v Heap, + ) -> anyhow::Result, Value<'v>)>>> { + Ok(heap.alloc_typed_unchecked(AllocList(this.iter())).cast()) + } + + /// [dict.keys]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·keys + /// ): get the list of keys of the dictionary. + /// + /// `D.keys()` returns a new list containing the keys of dictionary D, in + /// the same order as they would be returned by a `for` loop. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// x.keys() == ["one", "two"] + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn keys<'v>( + this: DictRef<'v>, + heap: &'v Heap, + ) -> anyhow::Result>> { + Ok(ValueOfUnchecked::new(heap.alloc(AllocList(this.keys())))) + } + + /// [dict.pop]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·pop + /// ): return an element and remove it from a dictionary. + /// + /// `D.pop(key[, default])` returns the value corresponding to the specified + /// key, and removes it from the dictionary. If the dictionary contains no + /// such value, and the optional `default` parameter is present, `pop` + /// returns that value; otherwise, it fails. + /// + /// `pop` fails if `key` is unhashable, or the dictionary is frozen or has + /// active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// # ( + /// x.pop("one") == 1 + /// # and + /// x == {"two": 2} + /// # and + /// x.pop("three", 0) == 0 + /// # and + /// x.pop("three", None) == None + /// # )"#); + /// ``` + /// + /// Failure: + /// + /// ``` + /// # starlark::assert::fail(r#" + /// {'one': 1}.pop('four') # error: not found + /// # "#, "not found"); + /// ``` + fn pop<'v>( + this: Value<'v>, + #[starlark(require = pos)] key: Value<'v>, + #[starlark(require = pos)] default: Option>, + ) -> starlark::Result> { + let mut me = DictMut::from_value(this)?; + match me.aref.remove_hashed(key.get_hashed()?) { + Some(x) => Ok(x), + None => match default { + Some(v) => Ok(v), + None => { + mem::drop(me); + Err(anyhow::anyhow!( + "Key `{}` not found in dictionary `{}`", + key.to_repr(), + this.to_repr() + ) + .into()) + } + }, + } + } + + /// [dict.popitem]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·popitem + /// ): returns and removes the first key/value pair of a dictionary. + /// + /// `D.popitem()` returns the first key/value pair, removing it from the + /// dictionary. + /// + /// `popitem` fails if the dictionary is empty, frozen, or has active + /// iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// # ( + /// x.popitem() == ("one", 1) + /// # and + /// x.popitem() == ("two", 2) + /// # and + /// x == {} + /// # )"#); + /// ``` + /// + /// Failure: + /// + /// ``` + /// # starlark::assert::fail(r#" + /// {}.popitem() # error: empty dict + /// # "#, "empty dict"); + /// ``` + fn popitem<'v>(this: Value<'v>) -> anyhow::Result<(Value<'v>, Value<'v>)> { + let mut this = DictMut::from_value(this)?; + + // TODO(nga): this implementation is O(N). + // https://github.com/bazelbuild/starlark/issues/286 + + match this.aref.content.shift_remove_index(0) { + Some((k, v)) => Ok((k, v)), + None => Err(anyhow::anyhow!("Cannot .popitem() on an empty dictionary")), + } + } + + /// [dict.setdefault]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·setdefault + /// ): get a value from a dictionary, setting it to a new value if not + /// present. + /// + /// `D.setdefault(key[, default])` returns the dictionary value + /// corresponding to the given key. If the dictionary contains no such + /// value, `setdefault`, like `get`, returns `None` or the value of the + /// optional `default` parameter if present; `setdefault` additionally + /// inserts the new key/value entry into the dictionary. + /// + /// `setdefault` fails if the key is unhashable or if the dictionary is + /// frozen. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// # ( + /// x.setdefault("one") == 1 + /// # and + /// x.setdefault("three", 0) == 0 + /// # and + /// x == {"one": 1, "two": 2, "three": 0} + /// # and + /// x.setdefault("four") == None + /// # and + /// x == {"one": 1, "two": 2, "three": 0, "four": None} + /// # )"#) + /// ``` + fn setdefault<'v>( + this: Value<'v>, + #[starlark(require = pos)] key: Value<'v>, + #[starlark(require = pos)] default: Option>, + ) -> starlark::Result> { + let mut this = DictMut::from_value(this)?; + let key = key.get_hashed()?; + match this.aref.content.entry_hashed(key) { + starlark_map::small_map::Entry::Occupied(e) => Ok(*e.get()), + starlark_map::small_map::Entry::Vacant(e) => { + let default = default.unwrap_or_else(Value::new_none); + e.insert(default); + Ok(default) + } + } + } + + /// [dict.update]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·update + /// ): update values in the dictionary. + /// + /// `D.update([pairs][, name=value[, ...])` makes a sequence of key/value + /// insertions into dictionary D, then returns `None.` + /// + /// If the positional argument `pairs` is present, it must be `None`, + /// another `dict`, or some other iterable. + /// If it is another `dict`, then its key/value pairs are inserted into D. + /// If it is an iterable, it must provide a sequence of pairs (or other + /// iterables of length 2), each of which is treated as a key/value pair + /// to be inserted into D. + /// + /// For each `name=value` argument present, the name is converted to a + /// string and used as the key for an insertion into D, with its + /// corresponding value being `value`. + /// + /// `update` fails if the dictionary is frozen. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {} + /// x.update([("a", 1), ("b", 2)], c=3) + /// x.update({"d": 4}) + /// x.update(e=5) + /// x == {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5} + /// # "#); + /// ``` + fn update<'v>( + this: Value<'v>, + #[starlark(require = pos)] pairs: Option< + ValueOfUnchecked<'v, Either, StarlarkIter<(Value<'v>, Value<'v>)>>>, + >, + #[starlark(kwargs)] kwargs: DictRef<'v>, + heap: &'v Heap, + ) -> starlark::Result { + let pairs = if pairs.map(|x| x.get().ptr_eq(this)) == Some(true) { + // someone has done `x.update(x)` - that isn't illegal, but we will have issues + // with trying to iterate over x while holding x for mutation, and it doesn't do + // anything useful, so just change pairs back to None + None + } else { + pairs.map(|x| x.get()) + }; + + let mut this = DictMut::from_value(this)?; + if let Some(pairs) = pairs { + if let Some(dict) = DictRef::from_value(pairs) { + for (k, v) in dict.iter_hashed() { + this.aref.insert_hashed(k, v); + } + } else { + for v in pairs.iterate(heap)? { + let mut it = v.iterate(heap)?; + // `StarlarkIterator` is fused. + let (Some(k), Some(v), None) = (it.next(), it.next(), it.next()) else { + return Err(anyhow::anyhow!( + "dict.update expect a list of pairs or a dictionary as first argument, got a list of non-pairs.", + ).into()); + }; + this.aref.insert_hashed(k.get_hashed()?, v); + } + } + } + + for (k, v) in kwargs.iter_hashed() { + this.aref.insert_hashed(k, v); + } + Ok(NoneType) + } + + /// [dict.values]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#dict·values + /// ): get the list of values of the dictionary. + /// + /// `D.values()` returns a new list containing the dictionary's values, in + /// the same order as they would be returned by a `for` loop over the + /// dictionary. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = {"one": 1, "two": 2} + /// x.values() == [1, 2] + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn values<'v>( + this: DictRef<'v>, + heap: &'v Heap, + ) -> anyhow::Result>> { + Ok(ValueOfUnchecked::new(heap.alloc_list_iter(this.values()))) + } +} + +#[cfg(test)] +mod tests { + use crate::assert; + use crate::assert::Assert; + + #[test] + fn test_error_codes() { + assert::fail(r#"x = {"one": 1}; x.pop("four")"#, "not found"); + assert::fail("x = {}; x.popitem()", "empty"); + } + + #[test] + fn test_dict_add() { + assert::fail("{1: 2} + {3: 4}", "not supported"); + } + + #[test] + fn test_dict_with_duplicates() { + // In Starlark spec this is a runtime error. In Python it's fine. + // We make it a runtime error, plus have a lint that checks for it statically. + assert::fails("{40+2: 2, 6*7: 3}", &["key repeated", "42"]); + // Also check we fail if the entire dictionary is static (a different code path). + assert::fails("{42: 2, 42: 3}", &["key repeated", "42"]); + } + + #[test] + fn test_dict_update_with_self_pos() { + assert::eq("{3: 4, 1: 2}", "d = {3: 4, 1: 2}; d.update(d); d"); + } + + #[test] + fn test_dict_update_with_self_as_kwargs() { + assert::eq("{'a': 1, 'b': 2}", "d = {'a': 1, 'b': 2}; d.update(**d); d"); + } + + #[test] + fn test_frozen_dict_cannot_be_updated_with_self_pos() { + let mut a = Assert::new(); + a.module("d.star", "D = {7: 8, 9: 0}"); + a.fail( + r#" +load('d.star', 'D') + +D.update(D) +"#, + "Immutable", + ); + } + + #[test] + fn test_frozen_dict_cannot_be_updated_with_self_as_kwargs() { + let mut a = Assert::new(); + a.module("d.star", "D = {'x': 17, 'y': 19}"); + a.fail( + r#" +load('d.star', 'D') +D.update(**D) +"#, + "Immutable", + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/dict/mod.rs b/starlark-rust/starlark/src/values/types/dict/mod.rs deleted file mode 100644 index 68946035da667..0000000000000 --- a/starlark-rust/starlark/src/values/types/dict/mod.rs +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! The dictionary type, a mutable associative-map, which iterates in insertion order. - -mod alloc; -mod of; -pub(crate) mod refcell; -mod refs; -mod traits; -pub(crate) mod value; - -pub use crate::values::dict::alloc::AllocDict; -pub use crate::values::dict::of::DictOf; -pub use crate::values::dict::refs::DictMut; -pub use crate::values::dict::refs::DictRef; -pub use crate::values::dict::refs::FrozenDictRef; -pub use crate::values::dict::value::Dict; diff --git a/starlark-rust/starlark/src/values/types/dict/of.rs b/starlark-rust/starlark/src/values/types/dict/of.rs deleted file mode 100644 index 1c14673bafdd5..0000000000000 --- a/starlark-rust/starlark/src/values/types/dict/of.rs +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::hash::Hash; -use std::marker::PhantomData; -use std::ops::Deref; - -use starlark_derive::Trace; -use starlark_map::small_map::SmallMap; - -use crate as starlark; -use crate::typing::Ty; -use crate::values::dict::DictRef; -use crate::values::type_repr::DictType; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::UnpackValue; -use crate::values::Value; - -/// Like [`ValueOf`](crate::values::ValueOf), but only validates key and value types; does not construct -/// or store a map. Use `to_dict` to get at the map. -#[derive(Debug, Trace)] -pub struct DictOf<'v, K: UnpackValue<'v>, V: UnpackValue<'v>> { - value: Value<'v>, - phantom: PhantomData<(K, V)>, -} - -impl<'v, K: UnpackValue<'v>, V: UnpackValue<'v>> DictOf<'v, K, V> { - /// Get all the elements. - // This should return an iterator, but it is not trivial to do with `ARef`. - pub fn collect_entries(&self) -> Vec<(K, V)> { - DictRef::from_value(self.value) - .expect("already validated as a dict") - .iter() - .map(|(k, v)| { - ( - K::unpack_value(k).expect("already validated key"), - V::unpack_value(v).expect("already validated value"), - ) - }) - .collect() - } - - /// Number of entries. - #[inline] - pub fn len(&self) -> usize { - DictRef::from_value(self.value) - .expect("already validated as a dict") - .len() - } -} - -impl<'v, K: UnpackValue<'v> + Hash + Eq, V: UnpackValue<'v>> DictOf<'v, K, V> { - /// Collect all the elements to a fresh `SmallMap`. - pub fn to_dict(&self) -> SmallMap { - DictRef::from_value(self.value) - .expect("already validated as a dict") - .iter() - .map(|(k, v)| { - ( - K::unpack_value(k).expect("already validated key"), - V::unpack_value(v).expect("already validated value"), - ) - }) - .collect() - } -} - -impl<'v, K: UnpackValue<'v>, V: UnpackValue<'v>> StarlarkTypeRepr for DictOf<'v, K, V> { - fn starlark_type_repr() -> Ty { - DictType::::starlark_type_repr() - } -} - -impl<'v, K: UnpackValue<'v>, V: UnpackValue<'v>> UnpackValue<'v> for DictOf<'v, K, V> { - fn expected() -> String { - format!("dict mapping {} to {}", K::expected(), V::expected()) - } - - fn unpack_value(value: Value<'v>) -> Option { - let dict = DictRef::from_value(value)?; - let all_valid = dict - .iter() - .all(|(k, v)| K::unpack_value(k).is_some() && V::unpack_value(v).is_some()); - if all_valid { - Some(DictOf { - value, - phantom: PhantomData, - }) - } else { - None - } - } -} - -impl<'v, K: UnpackValue<'v> + Hash, V: UnpackValue<'v>> Deref for DictOf<'v, K, V> { - type Target = Value<'v>; - - fn deref(&self) -> &Self::Target { - &self.value - } -} diff --git a/starlark-rust/starlark/src/values/types/dict/refs.rs b/starlark-rust/starlark/src/values/types/dict/refs.rs index 25a7ca64d3546..66d0ca34cb153 100644 --- a/starlark-rust/starlark/src/values/types/dict/refs.rs +++ b/starlark-rust/starlark/src/values/types/dict/refs.rs @@ -18,9 +18,10 @@ use std::cell::Ref; use std::cell::RefCell; use std::cell::RefMut; +use std::convert::Infallible; use std::ops::Deref; -use std::ops::DerefMut; +use dupe::Dupe; use either::Either; use crate::coerce::coerce; @@ -29,6 +30,7 @@ use crate::values::dict::value::DictGen; use crate::values::dict::value::FrozenDictData; use crate::values::dict::Dict; use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::dict::dict_type::DictType; use crate::values::FrozenValue; use crate::values::UnpackValue; use crate::values::Value; @@ -40,6 +42,21 @@ pub struct DictRef<'v> { pub(crate) aref: Either>, &'v Dict<'v>>, } +impl<'v> Clone for DictRef<'v> { + fn clone(&self) -> Self { + match &self.aref { + Either::Left(x) => DictRef { + aref: Either::Left(Ref::clone(x)), + }, + Either::Right(x) => DictRef { + aref: Either::Right(*x), + }, + } + } +} + +impl<'v> Dupe for DictRef<'v> {} + /// Mutably borrowed `Dict`. pub struct DictMut<'v> { pub(crate) aref: RefMut<'v, Dict<'v>>, @@ -122,32 +139,18 @@ impl<'v> Deref for DictRef<'v> { } } -impl<'v> Deref for DictMut<'v> { - type Target = Dict<'v>; - - fn deref(&self) -> &Self::Target { - &self.aref - } -} - -impl<'v> DerefMut for DictMut<'v> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.aref - } -} - impl<'v> StarlarkTypeRepr for DictRef<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { - Dict::<'v>::starlark_type_repr() + DictType::::starlark_type_repr() } } impl<'v> UnpackValue<'v> for DictRef<'v> { - fn expected() -> String { - "dict".to_owned() - } + type Error = Infallible; - fn unpack_value(value: Value<'v>) -> Option> { - DictRef::from_value(value) + fn unpack_value_impl(value: Value<'v>) -> Result>, Infallible> { + Ok(DictRef::from_value(value)) } } diff --git a/starlark-rust/starlark/src/values/types/dict/traits.rs b/starlark-rust/starlark/src/values/types/dict/traits.rs index 13783e5038f83..5811dd8e22e77 100644 --- a/starlark-rust/starlark/src/values/types/dict/traits.rs +++ b/starlark-rust/starlark/src/values/types/dict/traits.rs @@ -18,12 +18,14 @@ use std::collections::BTreeMap; use std::hash::Hash; +use either::Either; + use crate::collections::SmallMap; use crate::typing::Ty; use crate::values::dict::AllocDict; use crate::values::dict::DictRef; -use crate::values::type_repr::DictType; use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::dict::dict_type::DictType; use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::FrozenHeap; @@ -68,30 +70,41 @@ where } impl<'a, K: StarlarkTypeRepr, V: StarlarkTypeRepr> StarlarkTypeRepr for &'a SmallMap { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { DictType::::starlark_type_repr() } } impl StarlarkTypeRepr for SmallMap { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { DictType::::starlark_type_repr() } } impl<'v, K: UnpackValue<'v> + Hash + Eq, V: UnpackValue<'v>> UnpackValue<'v> for SmallMap { - fn expected() -> String { - format!("dict mapping {} to {}", K::expected(), V::expected()) - } + type Error = Either; - fn unpack_value(value: Value<'v>) -> Option { - let dict = DictRef::from_value(value)?; + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(dict) = DictRef::from_value(value) else { + return Ok(None); + }; let it = dict.iter(); let mut r = SmallMap::with_capacity(it.len()); for (k, v) in it { - r.insert(K::unpack_value(k)?, V::unpack_value(v)?); + let Some(k) = K::unpack_value_impl(k).map_err(Either::Left)? else { + return Ok(None); + }; + let Some(v) = V::unpack_value_impl(v).map_err(Either::Right)? else { + return Ok(None); + }; + // TODO(nga): return error if keys are not unique. + r.insert(k, v); } - Some(r) + Ok(Some(r)) } } @@ -131,28 +144,39 @@ where } impl<'a, K: StarlarkTypeRepr, V: StarlarkTypeRepr> StarlarkTypeRepr for &'a BTreeMap { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { DictType::::starlark_type_repr() } } impl StarlarkTypeRepr for BTreeMap { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { DictType::::starlark_type_repr() } } impl<'v, K: UnpackValue<'v> + Ord, V: UnpackValue<'v>> UnpackValue<'v> for BTreeMap { - fn expected() -> String { - format!("dict mapping {} to {}", K::expected(), V::expected()) - } + type Error = Either; - fn unpack_value(value: Value<'v>) -> Option { - let dict = DictRef::from_value(value)?; + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(dict) = DictRef::from_value(value) else { + return Ok(None); + }; let mut r = BTreeMap::new(); for (k, v) in dict.iter() { - r.insert(K::unpack_value(k)?, V::unpack_value(v)?); + let Some(k) = K::unpack_value_impl(k).map_err(Either::Left)? else { + return Ok(None); + }; + let Some(v) = V::unpack_value_impl(v).map_err(Either::Right)? else { + return Ok(None); + }; + // TODO(nga): return error if keys are not unique. + r.insert(k, v); } - Some(r) + Ok(Some(r)) } } diff --git a/starlark-rust/starlark/src/values/types/dict/unpack.rs b/starlark-rust/starlark/src/values/types/dict/unpack.rs new file mode 100644 index 0000000000000..339f2fce92e69 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/dict/unpack.rs @@ -0,0 +1,70 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use either::Either; + +use crate::values::dict::DictRef; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::dict::dict_type::DictType; +use crate::values::UnpackValue; +use crate::values::Value; + +/// Unpack `dict`. +/// +/// There's `impl` [`UnpackValue`] for [`SmallMap`](starlark_map::small_map::SmallMap) +/// but this can be used when hashing of unpacked keys is not needed. +pub struct UnpackDictEntries { + /// Entries of the dictionary. + pub entries: Vec<(K, V)>, +} + +impl Default for UnpackDictEntries { + fn default() -> Self { + UnpackDictEntries { + entries: Vec::new(), + } + } +} + +impl StarlarkTypeRepr for UnpackDictEntries { + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> crate::typing::Ty { + DictType::::starlark_type_repr() + } +} + +impl<'v, K: UnpackValue<'v>, V: UnpackValue<'v>> UnpackValue<'v> for UnpackDictEntries { + type Error = Either; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(dict) = DictRef::unpack_value_opt(value) else { + return Ok(None); + }; + let mut entries = Vec::with_capacity(dict.len()); + for (k, v) in dict.iter() { + let Some(k) = K::unpack_value_impl(k).map_err(Either::Left)? else { + return Ok(None); + }; + let Some(v) = V::unpack_value_impl(v).map_err(Either::Right)? else { + return Ok(None); + }; + entries.push((k, v)); + } + Ok(Some(UnpackDictEntries { entries })) + } +} diff --git a/starlark-rust/starlark/src/values/types/dict/value.rs b/starlark-rust/starlark/src/values/types/dict/value.rs index 9a9a859fa36be..d7874ae8647ae 100644 --- a/starlark-rust/starlark/src/values/types/dict/value.rs +++ b/starlark-rust/starlark/src/values/types/dict/value.rs @@ -31,7 +31,6 @@ use allocative::Allocative; use display_container::fmt_keyed_container; use serde::Serialize; use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; use starlark_map::Equivalent; use crate as starlark; @@ -45,17 +44,17 @@ use crate::environment::Methods; use crate::environment::MethodsStatic; use crate::hint::unlikely; use crate::typing::Ty; +use crate::util::refcell::unleak_borrow; use crate::values::comparison::equals_small_map; -use crate::values::dict::refcell::unleak_borrow; -use crate::values::dict::DictOf; use crate::values::dict::DictRef; use crate::values::error::ValueError; use crate::values::layout::avalue::alloc_static; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Simple; +use crate::values::layout::avalue::AValueSimple; use crate::values::layout::heap::repr::AValueRepr; -use crate::values::string::hash_string_value; +use crate::values::string::str_type::hash_string_value; use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::dict::dict_type::DictType; use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::Freeze; @@ -70,16 +69,7 @@ use crate::values::Trace; use crate::values::Value; use crate::values::ValueLike; -#[derive( - Clone, - Default, - Trace, - Debug, - ProvidesStaticType, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "standard")] +#[derive(Clone, Default, Trace, Debug, ProvidesStaticType, Allocative)] pub(crate) struct DictGen(pub(crate) T); impl<'v, T: DictLike<'v>> Display for DictGen { @@ -99,12 +89,14 @@ impl<'v> Display for Dict<'v> { #[repr(transparent)] pub struct Dict<'v> { /// The data stored by the dictionary. The keys must all be hashable values. - content: SmallMap, Value<'v>>, + pub(crate) content: SmallMap, Value<'v>>, } impl<'v> StarlarkTypeRepr for Dict<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { - DictOf::, Value<'v>>::starlark_type_repr() + Self::Canonical::starlark_type_repr() } } @@ -120,13 +112,11 @@ pub(crate) type FrozenDict = DictGen; pub(crate) type MutableDict<'v> = DictGen>>; -pub(crate) static VALUE_EMPTY_FROZEN_DICT: AValueRepr>> = - alloc_static( - Simple, - DictGen(FrozenDictData { - content: SmallMap::new(), - }), - ); +pub(crate) static VALUE_EMPTY_FROZEN_DICT: AValueRepr< + AValueImpl<'static, AValueSimple>>, +> = alloc_static(DictGen(FrozenDictData { + content: SmallMap::new(), +})); unsafe impl<'v> Coerce> for FrozenDictData {} @@ -137,6 +127,8 @@ impl<'v> AllocValue<'v> for Dict<'v> { } impl StarlarkTypeRepr for FrozenDictData { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Ty::dict(Ty::any(), Ty::any()) } @@ -195,7 +187,9 @@ impl<'v> Dict<'v> { DictGen::::get_type_value_static() } - /// Create a new [`Dict`]. + /// This function is deprecated. + /// Use [`AllocDict`](crate::values::dict::AllocDict) or [`SmallMap`] + /// to allocate a new dictionary on the heap. pub fn new(content: SmallMap, Value<'v>>) -> Self { Self { content } } @@ -235,7 +229,7 @@ impl<'v> Dict<'v> { /// Get the value associated with a particular key. Will be [`Err`] if the key is not hashable, /// and otherwise [`Some`] if the key exists in the dictionary and [`None`] otherwise. - pub fn get(&self, key: Value<'v>) -> anyhow::Result>> { + pub fn get(&self, key: Value<'v>) -> crate::Result>> { Ok(self.get_hashed(key.get_hashed()?)) } @@ -291,7 +285,7 @@ impl<'v> Dict<'v> { /// Remove given key from the dictionary. pub fn remove_hashed(&mut self, key: Hashed>) -> Option> { - self.content.remove_hashed(key.as_ref()) + self.content.shift_remove_hashed(key.as_ref()) } /// Remove all elements from the dictionary. @@ -332,7 +326,7 @@ trait DictLike<'v>: Debug + Allocative { unsafe fn iter_start(&self); unsafe fn content_unchecked(&self) -> &SmallMap, Value<'v>>; unsafe fn iter_stop(&self); - fn set_at(&self, index: Hashed>, value: Value<'v>) -> anyhow::Result<()>; + fn set_at(&self, index: Hashed>, value: Value<'v>) -> crate::Result<()>; } impl<'v> DictLike<'v> for RefCell> { @@ -358,13 +352,13 @@ impl<'v> DictLike<'v> for RefCell> { &self.try_borrow_unguarded().ok().unwrap_unchecked().content } - fn set_at(&self, index: Hashed>, alloc_value: Value<'v>) -> anyhow::Result<()> { + fn set_at(&self, index: Hashed>, alloc_value: Value<'v>) -> crate::Result<()> { match self.try_borrow_mut() { Ok(mut xs) => { xs.content.insert_hashed(index, alloc_value); Ok(()) } - Err(_) => Err(ValueError::MutationDuringIteration.into()), + Err(_) => Err(crate::Error::new_other(ValueError::MutationDuringIteration)), } } } @@ -384,14 +378,16 @@ impl<'v> DictLike<'v> for FrozenDictData { coerce(&self.content) } - fn set_at(&self, _index: Hashed>, _value: Value<'v>) -> anyhow::Result<()> { - Err(ValueError::CannotMutateImmutableValue.into()) + fn set_at(&self, _index: Hashed>, _value: Value<'v>) -> crate::Result<()> { + Err(crate::Error::new_other( + ValueError::CannotMutateImmutableValue, + )) } } pub(crate) fn dict_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(crate::stdlib::dict::dict_methods) + RES.methods(crate::values::types::dict::methods::dict_methods) } #[starlark_value(type = Dict::TYPE)] @@ -427,7 +423,7 @@ where !self.0.content().is_empty() } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> crate::Result { match DictRef::from_value(other) { None => Ok(false), Some(other) => { @@ -436,25 +432,27 @@ where } } - fn at(&self, index: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, _heap: &'v Heap) -> crate::Result> { match self.0.content().get_hashed_by_value(index.get_hashed()?) { Some(v) => Ok(v.to_value()), - None => Err(ValueError::KeyNotFound(index.to_repr()).into()), + None => Err(crate::Error::new_other(ValueError::KeyNotFound( + index.to_repr(), + ))), } } - fn length(&self) -> anyhow::Result { + fn length(&self) -> crate::Result { Ok(self.0.content().len() as i32) } - fn is_in(&self, other: Value<'v>) -> anyhow::Result { + fn is_in(&self, other: Value<'v>) -> crate::Result { Ok(self .0 .content() .contains_key_hashed_by_value(other.get_hashed()?)) } - unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> crate::Result> { self.0.iter_start(); Ok(me) } @@ -473,16 +471,16 @@ where self.0.iter_stop(); } - fn set_at(&self, index: Value<'v>, alloc_value: Value<'v>) -> anyhow::Result<()> { + fn set_at(&self, index: Value<'v>, alloc_value: Value<'v>) -> crate::Result<()> { let index = index.get_hashed()?; self.0.set_at(index, alloc_value) } - fn bit_or(&self, rhs: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn bit_or(&self, rhs: Value<'v>, heap: &'v Heap) -> crate::Result> { let rhs = DictRef::from_value(rhs) .map_or_else(|| ValueError::unsupported_with(self, "|", rhs), Ok)?; if self.0.content().is_empty() { - return Ok(heap.alloc(rhs.clone())); + return Ok(heap.alloc((*rhs).clone())); } // Might be faster if we preallocate the capacity, but then copying in the LHS // is more expensive and might oversize given the behaviour on duplicates. @@ -501,6 +499,14 @@ where fn get_type_starlark_repr() -> Ty { Ty::any_dict() } + + fn try_freeze_static(&self) -> Option { + if self.0.content().is_empty() { + Some(FrozenValue::new_repr(&VALUE_EMPTY_FROZEN_DICT)) + } else { + None + } + } } impl<'v, T: DictLike<'v>> Serialize for DictGen { @@ -514,9 +520,10 @@ impl<'v, T: DictLike<'v>> Serialize for DictGen { #[cfg(test)] mod tests { - use super::*; use crate::assert; + use crate::coerce::coerce; use crate::collections::SmallMap; + use crate::values::dict::Dict; use crate::values::Heap; #[test] @@ -535,7 +542,7 @@ b1 and b2 and b3 } #[test] - fn test_get_str() -> anyhow::Result<()> { + fn test_get_str() -> crate::Result<()> { let heap = Heap::new(); let k1 = heap.alloc_str("hello").get_hashed(); let k2 = heap.alloc_str("world").get_hashed(); diff --git a/starlark-rust/starlark/src/values/types/ellipsis.rs b/starlark-rust/starlark/src/values/types/ellipsis.rs index ec77060555c24..af5d3cc89470f 100644 --- a/starlark-rust/starlark/src/values/types/ellipsis.rs +++ b/starlark-rust/starlark/src/values/types/ellipsis.rs @@ -22,8 +22,8 @@ use starlark_derive::ProvidesStaticType; use crate as starlark; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::AllocFrozenValue; use crate::values::FrozenHeap; @@ -37,11 +37,11 @@ use crate::values::StarlarkValue; derive_more::Display, ProvidesStaticType )] -#[display(fmt = "Ellipsis")] +#[display("Ellipsis")] pub(crate) struct Ellipsis; -pub(crate) static VALUE_ELLIPSIS: AValueRepr> = - alloc_static(Basic, Ellipsis); +pub(crate) static VALUE_ELLIPSIS: AValueRepr>> = + alloc_static(Ellipsis); #[starlark_value(type = "ellipsis")] impl<'v> StarlarkValue<'v> for Ellipsis {} diff --git a/starlark-rust/starlark/src/values/types/enumeration/mod.rs b/starlark-rust/starlark/src/values/types/enumeration.rs similarity index 100% rename from starlark-rust/starlark/src/values/types/enumeration/mod.rs rename to starlark-rust/starlark/src/values/types/enumeration.rs diff --git a/starlark-rust/starlark/src/values/types/enumeration/enum_type.rs b/starlark-rust/starlark/src/values/types/enumeration/enum_type.rs index e4d3c72b092e3..116af16486005 100644 --- a/starlark-rust/starlark/src/values/types/enumeration/enum_type.rs +++ b/starlark-rust/starlark/src/values/types/enumeration/enum_type.rs @@ -30,7 +30,6 @@ use starlark_derive::starlark_module; use starlark_derive::starlark_value; use starlark_derive::Coerce; use starlark_derive::NoSerialize; -use starlark_derive::StarlarkDocs; use starlark_derive::Trace; use starlark_map::small_map::SmallMap; use starlark_map::Equivalent; @@ -42,13 +41,13 @@ use crate::environment::MethodsBuilder; use crate::environment::MethodsStatic; use crate::eval::Arguments; use crate::eval::Evaluator; +use crate::typing::callable::TyCallable; use crate::typing::starlark_value::TyStarlarkValue; use crate::typing::user::TyUser; use crate::typing::user::TyUserIndex; use crate::typing::user::TyUserParams; -use crate::typing::Param; +use crate::typing::ParamSpec; use crate::typing::Ty; -use crate::typing::TyFunction; use crate::values::enumeration::matcher::EnumTypeMatcher; use crate::values::enumeration::ty_enum_type::TyEnumData; use crate::values::enumeration::value::EnumValueGen; @@ -81,8 +80,8 @@ pub trait EnumCell: Freeze { fn get_or_init_ty( ty: &Self::TyEnumDataOpt, - f: impl FnOnce() -> anyhow::Result>, - ) -> anyhow::Result<()>; + f: impl FnOnce() -> crate::Result>, + ) -> crate::Result<()>; fn get_ty(ty: &Self::TyEnumDataOpt) -> Option<&Arc>; } @@ -91,8 +90,8 @@ impl<'v> EnumCell for Value<'v> { fn get_or_init_ty( ty: &Self::TyEnumDataOpt, - f: impl FnOnce() -> anyhow::Result>, - ) -> anyhow::Result<()> { + f: impl FnOnce() -> crate::Result>, + ) -> crate::Result<()> { ty.get_or_try_init(f)?; Ok(()) } @@ -107,8 +106,8 @@ impl EnumCell for FrozenValue { fn get_or_init_ty( ty: &Self::TyEnumDataOpt, - f: impl FnOnce() -> anyhow::Result>, - ) -> anyhow::Result<()> { + f: impl FnOnce() -> crate::Result>, + ) -> crate::Result<()> { let _ignore = (ty, f); Ok(()) } @@ -119,16 +118,7 @@ impl EnumCell for FrozenValue { } /// The type of an enumeration, created by `enum()`. -#[derive( - Debug, - Trace, - Coerce, - NoSerialize, - ProvidesStaticType, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "extension")] +#[derive(Debug, Trace, Coerce, NoSerialize, ProvidesStaticType, Allocative)] #[repr(C)] // Deliberately store fully populated values // for each entry, so we can produce enum values with zero allocation. @@ -178,7 +168,7 @@ pub type EnumType<'v> = EnumTypeGen>; pub type FrozenEnumType = EnumTypeGen; impl<'v> EnumType<'v> { - pub(crate) fn new(elements: Vec>, heap: &'v Heap) -> anyhow::Result> { + pub(crate) fn new(elements: Vec>, heap: &'v Heap) -> crate::Result> { // We are constructing the enum and all elements in one go. // They both point at each other, which adds to the complexity. let id = TypeInstanceId::gen(); @@ -197,7 +187,9 @@ impl<'v> EnumType<'v> { value: x.to_value(), }); if res.insert_hashed(x.to_value().get_hashed()?, v).is_some() { - return Err(EnumError::DuplicateEnumValue(x.to_string()).into()); + return Err(crate::Error::new_other(EnumError::DuplicateEnumValue( + x.to_string(), + ))); } } @@ -221,16 +213,19 @@ impl EnumTypeGen { impl<'v, V> EnumTypeGen where Value<'v>: Equivalent, - V: ValueLike<'v> + 'v + EnumCell, + V: ValueLike<'v> + EnumCell, { pub(crate) fn ty_enum_data(&self) -> Option<&Arc> { V::get_ty(&self.ty_enum_data) } - pub(crate) fn construct(&self, val: Value<'v>) -> anyhow::Result { + pub(crate) fn construct(&self, val: Value<'v>) -> crate::Result { match self.elements().get_hashed_by_value(val.get_hashed()?) { Some(v) => Ok(*v), - None => Err(EnumError::InvalidElement(val.to_str(), self.to_string()).into()), + None => Err(crate::Error::new_other(EnumError::InvalidElement( + val.to_str(), + self.to_string(), + ))), } } } @@ -240,26 +235,28 @@ impl<'v, V> StarlarkValue<'v> for EnumTypeGen where Self: ProvidesStaticType<'v>, Value<'v>: Equivalent, - V: ValueLike<'v> + 'v + EnumCell, + V: ValueLike<'v> + EnumCell, { type Canonical = FrozenEnumType; + // TODO(nga): replace `Color("RED")` with `Color.RED`. + // https://www.internalfb.com/tasks/?t=183515013 fn invoke( &self, _me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { args.no_named_args()?; let val = args.positional1(eval.heap())?; Ok(self.construct(val)?.to_value()) } - fn length(&self) -> anyhow::Result { + fn length(&self) -> crate::Result { Ok(self.elements().len() as i32) } - fn at(&self, index: Value, _heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value, _heap: &'v Heap) -> crate::Result> { let i = convert_index(index, self.elements().len() as i32)? as usize; // Must be in the valid range since convert_index checks that, so just unwrap Ok(self @@ -270,7 +267,7 @@ where .to_value()) } - unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> crate::Result> { Ok(me) } @@ -299,7 +296,11 @@ where self.ty_enum_data().map(|t| t.ty_enum_type.dupe()) } - fn export_as(&self, variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result<()> { V::get_or_init_ty(&self.ty_enum_data, || { let ty_enum_value = Ty::custom(TyUser::new( variable_name.to_owned(), @@ -320,11 +321,14 @@ where result: ty_enum_value.dupe(), }), iter_item: Some(ty_enum_value.dupe()), - callable: Some(TyFunction::new( - vec![Param::pos_only( - // TODO(nga): we can do better parameter type. - Ty::any(), - )], + callable: Some(TyCallable::new( + ParamSpec::pos_only( + [ + // TODO(nga): we can do better parameter type. + Ty::any(), + ], + [], + ), ty_enum_value.dupe(), )), ..TyUserParams::default() @@ -410,7 +414,7 @@ def g(x): g(Season[0]) "#, - r#"Value `"SPRING"` of type `enum` does not match the type annotation `Color` for argument `x`"#, + r#"Value `Season("SPRING")` of type `enum` does not match the type annotation `Color` for argument `x`"#, ); } diff --git a/starlark-rust/starlark/src/values/types/enumeration/globals.rs b/starlark-rust/starlark/src/values/types/enumeration/globals.rs index f29634acfa193..8cbc9808cf2d8 100644 --- a/starlark-rust/starlark/src/values/types/enumeration/globals.rs +++ b/starlark-rust/starlark/src/values/types/enumeration/globals.rs @@ -21,6 +21,7 @@ use starlark_derive::starlark_module; use crate as starlark; use crate::environment::GlobalsBuilder; use crate::values::enumeration::EnumType; +use crate::values::tuple::UnpackTuple; use crate::values::Heap; use crate::values::StringValue; use crate::values::Value; @@ -32,26 +33,26 @@ pub fn register_enum(builder: &mut GlobalsBuilder) { /// For example: /// /// ```python - /// MyEnum = enum("option1", "option2", True) + /// MyEnum = enum("option1", "option2", "option3") /// ``` /// - /// This statement defines an enumeration `MyEnum` that consists of the three values `"option1"`, `"option2"` and `True`. + /// This statement defines an enumeration `MyEnum` that consists of the three values `"option1"`, `"option2"` and `option3`. /// /// Now `MyEnum` is defined, it's possible to do the following: /// /// * Create values of this type with `MyEnum("option2")`. It is a runtime error if the argument is not one of the predeclared values of the enumeration. - /// * Get the type of the enum suitable for a type annotation with `MyEnum.type`. - /// * Given a value of the enum (for example, `v = MyEnum("option2")`), get the underlying value `v.value == "option2"` or the index in the enumeration `v.index = 1`. - /// * Get a list of the values that make up the array with `MyEnum.values() == ["option1", "option2", True]`. - /// * Treat `MyEnum` a bit like an array, with `len(MyEnum) == 3`, `MyEnum[1] == MyEnum("option2")` and iteration over enums `[x.value for x in MyEnum] == ["option1", "option2", True]`. + /// * Get the type of the enum suitable for a type annotation with `MyEnum`. + /// * Given a value of the enum (for example, `v = MyEnum("option2")`), get the underlying value `v.value == "option2"` or the index in the enumeration `v.index == 1`. + /// * Get a list of the values that make up the array with `MyEnum.values() == ["option1", "option2", "option3"]`. + /// * Treat `MyEnum` a bit like an array, with `len(MyEnum) == 3`, `MyEnum[1] == MyEnum("option2")` and iteration over enums `[x.value for x in MyEnum] == ["option1", "option2", "option3"]`. /// /// Enumeration types store each value once, which are then efficiently referenced by enumeration values. fn r#enum<'v>( - #[starlark(args)] args: Vec>, + #[starlark(args)] args: UnpackTuple>, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> starlark::Result> { // Every Value must either be a field or a value (the type) - EnumType::new(args, heap) + EnumType::new(args.items, heap) } } @@ -91,20 +92,16 @@ enum_type("option3")"#, ); assert::pass( r#" -# @starlark-rust: allow_string_literals_in_type_expr - enum_type = enum("option1","option2") -def foo(x: enum_type) -> "enum_type": +def foo(x: enum_type) -> enum_type: return x foo(enum_type("option1"))"#, ); assert::pass( r#" -# @starlark-rust: allow_string_literals_in_type_expr - v = [enum("option1","option2")] v_0 = v[0] -def foo(y: v_0) -> "enum": +def foo(y: v_0) -> v_0: # TODO(nga): fails at compile time. return noop(y) foo(v[0]("option1"))"#, @@ -122,7 +119,7 @@ assert_eq([x.value for x in enum_type], ["option1","option2"])"#, enum_type = enum("option1","option2") x = enum_type("option1") assert_eq(str(enum_type), "enum(\"option1\", \"option2\")") -assert_eq(str(x), "\"option1\"") +assert_eq(str(x), "enum_type(\"option1\")") "#, ); assert::pass( @@ -182,6 +179,17 @@ diff = enum("one") assert_ne(r1("one"), rt("one")) assert_ne(rt("one"), r2("one")) assert_ne(rt("one"), diff("one")) +"#, + ); + } + + #[test] + fn test_enum_repr() { + assert::pass( + r#" +enum_type = enum("option1", "option2") +assert_eq("enum_type(\"option1\")", repr(enum_type("option1"))) +assert_eq("enum()(\"option1\")", repr(enum("option1", "option2")("option1"))) "#, ); } diff --git a/starlark-rust/starlark/src/values/types/enumeration/value.rs b/starlark-rust/starlark/src/values/types/enumeration/value.rs index 1d688138aec3e..8d087853f4af7 100644 --- a/starlark-rust/starlark/src/values/types/enumeration/value.rs +++ b/starlark-rust/starlark/src/values/types/enumeration/value.rs @@ -43,6 +43,7 @@ use crate::values::enumeration::enum_type::FrozenEnumType; use crate::values::types::type_instance_id::TypeInstanceId; use crate::values::StarlarkValue; use crate::values::Value; +use crate::values::ValueLifetimeless; use crate::values::ValueLike; /// A value from an enumeration. @@ -57,7 +58,7 @@ use crate::values::ValueLike; )] #[repr(C)] #[derivative(Debug)] -pub struct EnumValueGen { +pub struct EnumValueGen { // Must ignore value.typ or type.elements, since they are circular #[derivative(Debug = "ignore")] pub(crate) typ: V, // Must be EnumType it points back to (so it can get the type) @@ -66,9 +67,31 @@ pub struct EnumValueGen { pub(crate) id: TypeInstanceId, } -impl Display for EnumValueGen { +impl<'v, V: ValueLike<'v>> Display for EnumValueGen { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.value.fmt(f) + let ty_enum_data = match self.get_enum_type() { + Either::Left(x) => x.ty_enum_data(), + Either::Right(x) => x.ty_enum_data(), + }; + match ty_enum_data { + Some(ty_enum_data) => { + { + write!(f, "{}", &ty_enum_data.name)?; + write!(f, "(")?; + Display::fmt(&self.value, f)?; + write!(f, ")")? + }; + Ok(()) + } + None => { + { + write!(f, "enum()(")?; + Display::fmt(&self.value, f)?; + write!(f, ")")? + }; + Ok(()) + } + } } } @@ -86,25 +109,11 @@ impl<'v, V: ValueLike<'v>> EnumValueGen { } #[starlark_value(type = EnumValue::TYPE)] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for EnumValueGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for EnumValueGen where Self: ProvidesStaticType<'v>, { - fn matches_type(&self, ty: &str) -> bool { - if ty == EnumValue::TYPE { - return true; - } - let ty_enum_data = match self.get_enum_type() { - Either::Left(x) => x.ty_enum_data(), - Either::Right(x) => x.ty_enum_data(), - }; - match ty_enum_data { - Some(ty_enum_data) => ty_enum_data.name == ty, - None => false, - } - } - - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { self.value.write_hash(hasher) } diff --git a/starlark-rust/starlark/src/values/types/exported_name.rs b/starlark-rust/starlark/src/values/types/exported_name.rs index 0396fb45e61f7..5677ae0c87546 100644 --- a/starlark-rust/starlark/src/values/types/exported_name.rs +++ b/starlark-rust/starlark/src/values/types/exported_name.rs @@ -63,24 +63,37 @@ impl<'a> Eq for BorrowedExportedName<'a> {} /// ``` /// use allocative::Allocative; /// use starlark::eval::Evaluator; -/// use starlark::values::exported_name::{ExportedName, FrozenExportedName}; +/// use starlark::values::exported_name::ExportedName; +/// use starlark::values::exported_name::FrozenExportedName; /// use starlark::values::StarlarkValue; -/// use starlark_derive::{NoSerialize, ProvidesStaticType, starlark_value}; +/// use starlark_derive::starlark_value; +/// use starlark_derive::NoSerialize; +/// use starlark_derive::ProvidesStaticType; /// -/// #[derive(Debug, NoSerialize, ProvidesStaticType, Allocative, derive_more::Display)] -/// #[display(fmt = "{:?}", "self")] +/// #[derive( +/// Debug, +/// NoSerialize, +/// ProvidesStaticType, +/// Allocative, +/// derive_more::Display +/// )] +/// #[display("{:?}", self)] /// struct MyStruct { -/// name: T, +/// name: T, /// } /// /// #[starlark_value(type = "MyStruct")] /// impl<'v, T: ExportedName> StarlarkValue<'v> for MyStruct { -/// type Canonical = MyStruct; +/// type Canonical = MyStruct; /// -/// fn export_as(&self, variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { -/// self.name.try_export_as(variable_name); -/// Ok(()) -/// } +/// fn export_as( +/// &self, +/// variable_name: &str, +/// _eval: &mut Evaluator<'v, '_, '_>, +/// ) -> starlark::Result<()> { +/// self.name.try_export_as(variable_name); +/// Ok(()) +/// } /// } /// ``` /// diff --git a/starlark-rust/starlark/src/values/types/float.rs b/starlark-rust/starlark/src/values/types/float.rs index 2be5c7e02247d..5f6f80d0e5a7f 100644 --- a/starlark-rust/starlark/src/values/types/float.rs +++ b/starlark-rust/starlark/src/values/types/float.rs @@ -17,462 +17,9 @@ //! The floating point number type (3.14, 4e2). -use std::cmp::Ordering; -use std::fmt; -use std::fmt::Display; -use std::fmt::Write; -use std::hash::Hasher; +pub(crate) mod float; +pub(crate) mod globals; +pub(crate) mod unpack; -use allocative::Allocative; -use dupe::Dupe; -use serde::Serialize; -use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; -use starlark_map::StarlarkHashValue; - -use crate as starlark; -use crate::any::ProvidesStaticType; -use crate::collections::StarlarkHasher; -use crate::private::Private; -use crate::typing::Ty; -use crate::typing::TyBasic; -use crate::typing::TypingBinOp; -use crate::values::num::typecheck::typecheck_num_bin_op; -use crate::values::num::typecheck::NumTy; -use crate::values::num::value::NumRef; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::AllocFrozenValue; -use crate::values::AllocValue; -use crate::values::FrozenHeap; -use crate::values::FrozenValue; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueError; -use crate::values::ValueLike; - -const WRITE_PRECISION: usize = 6; - -fn write_non_finite(output: &mut W, f: f64) -> fmt::Result { - debug_assert!(f.is_nan() || f.is_infinite()); - if f.is_nan() { - write!(output, "nan") - } else { - write!( - output, - "{}inf", - if f.is_sign_positive() { "+" } else { "-" } - ) - } -} - -pub(crate) fn write_decimal(output: &mut W, f: f64) -> fmt::Result { - if !f.is_finite() { - write_non_finite(output, f) - } else { - write!(output, "{:.prec$}", f, prec = WRITE_PRECISION) - } -} - -pub(crate) fn write_scientific( - output: &mut W, - f: f64, - exponent_char: char, - strip_trailing_zeros: bool, -) -> fmt::Result { - if !f.is_finite() { - write_non_finite(output, f) - } else { - let abs = f.abs(); - let exponent = if f == 0.0 { - 0 - } else { - abs.log10().floor() as i32 - }; - let normal = if f == 0.0 { - 0.0 - } else { - abs / 10f64.powf(exponent as f64) - }; - - // start with "-" for a negative number - if f.is_sign_negative() { - output.write_char('-')? - } - - // use the whole integral part of normal (a single digit) - output.write_fmt(format_args!("{}", normal.trunc()))?; - - // calculate the fractional tail for given precision - let mut tail = (normal.fract() * 10f64.powf(WRITE_PRECISION as f64)).round() as u64; - let mut rev_tail = [0u8; WRITE_PRECISION]; - let mut rev_tail_len = 0; - let mut removing_trailing_zeros = strip_trailing_zeros; - for _ in 0..WRITE_PRECISION { - let tail_digit = tail % 10; - if tail_digit != 0 || !removing_trailing_zeros { - removing_trailing_zeros = false; - rev_tail[rev_tail_len] = tail_digit as u8; - rev_tail_len += 1; - } - tail /= 10; - } - - // write fractional part - if rev_tail_len != 0 { - output.write_char('.')?; - } - for digit in rev_tail[0..rev_tail_len].iter().rev() { - output.write_char((b'0' + digit) as char)?; - } - - // add exponent part - output.write_char(exponent_char)?; - output.write_fmt(format_args!("{:+03}", exponent)) - } -} - -pub(crate) fn write_compact( - output: &mut W, - f: f64, - exponent_char: char, -) -> fmt::Result { - if !f.is_finite() { - write_non_finite(output, f) - } else { - let abs = f.abs(); - let exponent = if f == 0.0 { - 0 - } else { - abs.log10().floor() as i32 - }; - - if exponent.abs() >= WRITE_PRECISION as i32 { - // use scientific notation if exponent is outside of our precision (but strip 0s) - write_scientific(output, f, exponent_char, true) - } else if f.fract() == 0.0 { - // make sure there's a fractional part even if the number doesn't have it - output.write_fmt(format_args!("{:.1}", f)) - } else { - // rely on the built-in formatting otherwise - output.write_fmt(format_args!("{}", f)) - } - } -} - -/// Runtime representation of Starlark `float` type. -#[derive( - Clone, - Dupe, - Copy, - Debug, - ProvidesStaticType, - Serialize, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "standard")] -#[serde(transparent)] -pub struct StarlarkFloat(pub f64); - -impl StarlarkFloat { - /// The result of calling `type()` on floats. - pub const TYPE: &'static str = "float"; - - pub(crate) fn compare_impl(a: f64, b: f64) -> Ordering { - // According to the spec (https://github.com/bazelbuild/starlark/blob/689f54426951638ef5b7c41a14d8fc48e65c5f77/spec.md#floating-point-numbers) - // All NaN values compare equal to each other, but greater than any non-NaN float value. - if let Some(ord) = a.partial_cmp(&b) { - ord - } else { - a.is_nan().cmp(&b.is_nan()) - } - } - - pub(crate) fn floor_div_impl(a: f64, b: f64) -> anyhow::Result { - if b == 0.0 { - Err(ValueError::DivisionByZero.into()) - } else { - Ok((a / b).floor()) - } - } - - pub(crate) fn percent_impl(a: f64, b: f64) -> anyhow::Result { - if b == 0.0 { - Err(ValueError::DivisionByZero.into()) - } else { - let r = a % b; - if r == 0.0 { - Ok(0.0) - } else { - Ok(if b.signum() != r.signum() { r + b } else { r }) - } - } - } -} - -impl StarlarkTypeRepr for f64 { - fn starlark_type_repr() -> Ty { - StarlarkFloat::starlark_type_repr() - } -} - -impl<'v> AllocValue<'v> for StarlarkFloat { - fn alloc_value(self, heap: &'v Heap) -> Value<'v> { - heap.alloc_simple(self) - } -} - -impl AllocFrozenValue for StarlarkFloat { - fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { - heap.alloc_simple(self) - } -} - -impl<'v> AllocValue<'v> for f64 { - fn alloc_value(self, heap: &'v Heap) -> Value<'v> { - heap.alloc(StarlarkFloat(self)) - } -} - -impl AllocFrozenValue for f64 { - fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { - heap.alloc(StarlarkFloat(self)) - } -} - -/// Allows only a float - an int will not be accepted. -impl<'v> UnpackValue<'v> for StarlarkFloat { - fn unpack_value(value: Value<'v>) -> Option { - Some(*value.downcast_ref::()?) - } -} - -/// Allows either a float or an int. If the int is not in the range of a float, it will lose precision. -impl<'v> UnpackValue<'v> for f64 { - fn unpack_value(value: Value<'v>) -> Option { - value.unpack_num().map(|x| x.as_float()) - } -} - -impl Display for StarlarkFloat { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write_compact(f, self.0, 'e') - } -} - -#[starlark_value(type = StarlarkFloat::TYPE)] -impl<'v> StarlarkValue<'v> for StarlarkFloat { - fn equals(&self, other: Value) -> anyhow::Result { - Ok(Some(NumRef::Float(self.0)) == other.unpack_num()) - } - - fn collect_repr(&self, s: &mut String) { - write!(s, "{}", self).unwrap() - } - - fn to_bool(&self) -> bool { - self.0 != 0.0 - } - - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { - hasher.write_u64(NumRef::from(self.0).get_hash_64()); - Ok(()) - } - - fn get_hash(&self, _private: Private) -> anyhow::Result { - Ok(NumRef::Float(self.0).get_hash()) - } - - fn plus(&self, heap: &'v Heap) -> anyhow::Result> { - Ok(heap.alloc(*self)) - } - - fn minus(&self, heap: &'v Heap) -> anyhow::Result> { - Ok(heap.alloc(StarlarkFloat(-self.0))) - } - - fn add(&self, other: Value, heap: &'v Heap) -> Option>> { - Some(Ok(heap.alloc(NumRef::Float(self.0) + other.unpack_num()?))) - } - - fn sub(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - None => ValueError::unsupported_with(self, "-", other), - Some(other) => Ok(heap.alloc(NumRef::Float(self.0) - other)), - } - } - - fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { - Some(Ok(heap.alloc(NumRef::Float(self.0) * other.unpack_num()?))) - } - - fn div(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - None => ValueError::unsupported_with(self, "/", other), - Some(other) => Ok(heap.alloc(NumRef::Float(self.0).div(other)?)), - } - } - - fn percent(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(other) => Ok(heap.alloc(NumRef::Float(self.0).percent(other)?)), - None => ValueError::unsupported_with(self, "%", other), - } - } - - fn floor_div(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - None => ValueError::unsupported_with(self, "//", other), - Some(other) => Ok(heap.alloc(NumRef::Float(self.0).floor_div(other)?)), - } - } - - fn bin_op_ty(op: TypingBinOp, rhs: &TyBasic) -> Option { - typecheck_num_bin_op(NumTy::Float, op, rhs) - } - - fn compare(&self, other: Value) -> anyhow::Result { - match other.unpack_num() { - None => ValueError::unsupported_with(self, "compare", other), - Some(other) => Ok(NumRef::Float(self.0).cmp(&other)), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::assert; - use crate::assert::Assert; - - fn non_finite(f: f64) -> String { - let mut buf = String::new(); - write_non_finite(&mut buf, f).unwrap(); - buf - } - - #[test] - fn test_write_non_finite() { - assert_eq!(non_finite(f64::NAN), "nan"); - assert_eq!(non_finite(f64::INFINITY), "+inf"); - assert_eq!(non_finite(f64::NEG_INFINITY), "-inf"); - } - - fn decimal(f: f64) -> String { - let mut buf = String::new(); - write_decimal(&mut buf, f).unwrap(); - buf - } - - #[test] - fn test_write_decimal() { - assert_eq!(decimal(f64::NAN), "nan"); - assert_eq!(decimal(f64::INFINITY), "+inf"); - assert_eq!(decimal(f64::NEG_INFINITY), "-inf"); - - assert_eq!(decimal(0f64), "0.000000"); - assert_eq!(decimal(std::f64::consts::PI), "3.141593"); - assert_eq!(decimal(-std::f64::consts::E), "-2.718282"); - assert_eq!(decimal(1e10), "10000000000.000000"); - } - - fn scientific(f: f64) -> String { - let mut buf = String::new(); - write_scientific(&mut buf, f, 'e', false).unwrap(); - buf - } - - #[test] - fn test_write_scientific() { - assert_eq!(scientific(f64::NAN), "nan"); - assert_eq!(scientific(f64::INFINITY), "+inf"); - assert_eq!(scientific(f64::NEG_INFINITY), "-inf"); - - assert_eq!(scientific(0f64), "0.000000e+00"); - assert_eq!(scientific(-0f64), "-0.000000e+00"); - assert_eq!(scientific(1.23e45), "1.230000e+45"); - assert_eq!(scientific(-3.14e-145), "-3.140000e-145"); - assert_eq!(scientific(1e300), "1.000000e+300"); - } - - fn compact(f: f64) -> String { - let mut buf = String::new(); - write_compact(&mut buf, f, 'e').unwrap(); - buf - } - - #[test] - fn test_write_compact() { - assert_eq!(compact(f64::NAN), "nan"); - assert_eq!(compact(f64::INFINITY), "+inf"); - assert_eq!(compact(f64::NEG_INFINITY), "-inf"); - - assert_eq!(compact(0f64), "0.0"); - assert_eq!(compact(std::f64::consts::PI), "3.141592653589793"); - assert_eq!(compact(-std::f64::consts::E), "-2.718281828459045"); - assert_eq!(compact(1e10), "1e+10"); - assert_eq!(compact(1.23e45), "1.23e+45"); - assert_eq!(compact(-3.14e-145), "-3.14e-145"); - assert_eq!(compact(1e300), "1e+300"); - } - - #[test] - fn test_arithmetic_operators() { - assert::all_true( - r#" -+1.0 == 1.0 --1.0 == 0. - 1. -1.0 + 2.0 == 3.0 -1.0 - 2.0 == -1.0 -2.0 * 3.0 == 6.0 -5.0 / 2.0 == 2.5 -5.0 % 3.0 == 2.0 -5.0 // 2.0 == 2.0 -"#, - ); - } - - #[test] - fn test_dictionary_key() { - assert::pass( - r#" -x = {0: 123} -assert_eq(x[0], 123) -# TODO(nga): fix typechecker, and remove `noop`. -assert_eq(x[noop(0.0)], 123) -assert_eq(x[noop(-0.0)], 123) -assert_eq(1 in x, False) - "#, - ); - } - - #[test] - fn test_comparisons() { - let mut a = Assert::new(); - // TODO(nga): fix and enable. - a.disable_static_typechecking(); - a.all_true( - r#" -+0.0 == -0.0 -0.0 == 0 -0 == 0.0 -0 < 1.0 -0.0 < 1 -1 > 0.0 -1.0 > 0 -0.0 < float("nan") -float("+inf") < float("nan") -"#, - ); - } - - #[test] - fn test_comparisons_by_sorting() { - assert::eq( - "sorted([float('inf'), float('-inf'), float('nan'), 1e300, -1e300, 1.0, -1.0, 1, -1, 1e-300, -1e-300, 0, 0.0, float('-0.0'), 1e-300, -1e-300])", - "[float('-inf'), -1e+300, -1.0, -1, -1e-300, -1e-300, 0, 0.0, -0.0, 1e-300, 1e-300, 1.0, 1, 1e+300, float('+inf'), float('nan')]", - ); - } -} +pub use crate::values::types::float::float::StarlarkFloat; +pub use crate::values::types::float::unpack::UnpackFloat; diff --git a/starlark-rust/starlark/src/values/types/float/float.rs b/starlark-rust/starlark/src/values/types/float/float.rs new file mode 100644 index 0000000000000..8a9f113996427 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/float/float.rs @@ -0,0 +1,466 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cmp::Ordering; +use std::convert::Infallible; +use std::fmt; +use std::fmt::Display; +use std::fmt::Write; +use std::hash::Hasher; + +use allocative::Allocative; +use dupe::Dupe; +use serde::Serialize; +use starlark_derive::starlark_value; +use starlark_map::StarlarkHashValue; + +use crate as starlark; +use crate::any::ProvidesStaticType; +use crate::collections::StarlarkHasher; +use crate::private::Private; +use crate::typing::Ty; +use crate::typing::TyBasic; +use crate::typing::TypingBinOp; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::num::typecheck::typecheck_num_bin_op; +use crate::values::types::num::typecheck::NumTy; +use crate::values::types::num::value::NumRef; +use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::FrozenHeap; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueError; +use crate::values::ValueLike; + +const WRITE_PRECISION: usize = 6; + +fn write_non_finite(output: &mut W, f: f64) -> fmt::Result { + debug_assert!(f.is_nan() || f.is_infinite()); + if f.is_nan() { + write!(output, "nan") + } else { + write!( + output, + "{}inf", + if f.is_sign_positive() { "+" } else { "-" } + ) + } +} + +pub(crate) fn write_decimal(output: &mut W, f: f64) -> fmt::Result { + if !f.is_finite() { + write_non_finite(output, f) + } else { + write!(output, "{:.prec$}", f, prec = WRITE_PRECISION) + } +} + +pub(crate) fn write_scientific( + output: &mut W, + f: f64, + exponent_char: char, + strip_trailing_zeros: bool, +) -> fmt::Result { + if !f.is_finite() { + write_non_finite(output, f) + } else { + let abs = f.abs(); + let exponent = if f == 0.0 { + 0 + } else { + abs.log10().floor() as i32 + }; + let normal = if f == 0.0 { + 0.0 + } else { + abs / 10f64.powf(exponent as f64) + }; + + // start with "-" for a negative number + if f.is_sign_negative() { + output.write_char('-')? + } + + // use the whole integral part of normal (a single digit) + output.write_fmt(format_args!("{}", normal.trunc()))?; + + // calculate the fractional tail for given precision + let mut tail = (normal.fract() * 10f64.powf(WRITE_PRECISION as f64)).round() as u64; + let mut rev_tail = [0u8; WRITE_PRECISION]; + let mut rev_tail_len = 0; + let mut removing_trailing_zeros = strip_trailing_zeros; + for _ in 0..WRITE_PRECISION { + let tail_digit = tail % 10; + if tail_digit != 0 || !removing_trailing_zeros { + removing_trailing_zeros = false; + rev_tail[rev_tail_len] = tail_digit as u8; + rev_tail_len += 1; + } + tail /= 10; + } + + // write fractional part + if rev_tail_len != 0 { + output.write_char('.')?; + } + for digit in rev_tail[0..rev_tail_len].iter().rev() { + output.write_char((b'0' + digit) as char)?; + } + + // add exponent part + output.write_char(exponent_char)?; + output.write_fmt(format_args!("{:+03}", exponent)) + } +} + +pub(crate) fn write_compact( + output: &mut W, + f: f64, + exponent_char: char, +) -> fmt::Result { + if !f.is_finite() { + write_non_finite(output, f) + } else { + let abs = f.abs(); + let exponent = if f == 0.0 { + 0 + } else { + abs.log10().floor() as i32 + }; + + if exponent.abs() >= WRITE_PRECISION as i32 { + // use scientific notation if exponent is outside of our precision (but strip 0s) + write_scientific(output, f, exponent_char, true) + } else if f.fract() == 0.0 { + // make sure there's a fractional part even if the number doesn't have it + output.write_fmt(format_args!("{:.1}", f)) + } else { + // rely on the built-in formatting otherwise + output.write_fmt(format_args!("{}", f)) + } + } +} + +/// Runtime representation of Starlark `float` type. +#[derive(Clone, Dupe, Copy, Debug, ProvidesStaticType, Serialize, Allocative)] +#[serde(transparent)] +pub struct StarlarkFloat(pub f64); + +impl StarlarkFloat { + /// The result of calling `type()` on floats. + pub const TYPE: &'static str = "float"; + + pub(crate) fn compare_impl(a: f64, b: f64) -> Ordering { + // According to the spec (https://github.com/bazelbuild/starlark/blob/689f54426951638ef5b7c41a14d8fc48e65c5f77/spec.md#floating-point-numbers) + // All NaN values compare equal to each other, but greater than any non-NaN float value. + if let Some(ord) = a.partial_cmp(&b) { + ord + } else { + a.is_nan().cmp(&b.is_nan()) + } + } + + pub(crate) fn floor_div_impl(a: f64, b: f64) -> anyhow::Result { + if b == 0.0 { + Err(ValueError::DivisionByZero.into()) + } else { + Ok((a / b).floor()) + } + } + + pub(crate) fn percent_impl(a: f64, b: f64) -> anyhow::Result { + if b == 0.0 { + Err(ValueError::DivisionByZero.into()) + } else { + let r = a % b; + if r == 0.0 { + Ok(0.0) + } else { + Ok(if b.signum() != r.signum() { r + b } else { r }) + } + } + } +} + +impl StarlarkTypeRepr for f64 { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + StarlarkFloat::starlark_type_repr() + } +} + +impl<'v> AllocValue<'v> for StarlarkFloat { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_simple(self) + } +} + +impl AllocFrozenValue for StarlarkFloat { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc_simple(self) + } +} + +impl<'v> AllocValue<'v> for f64 { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc(StarlarkFloat(self)) + } +} + +impl AllocFrozenValue for f64 { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc(StarlarkFloat(self)) + } +} + +/// Allows only a float - an int will not be accepted. +impl<'v> UnpackValue<'v> for StarlarkFloat { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(value) = value.downcast_ref::() else { + return Ok(None); + }; + Ok(Some(*value)) + } +} + +impl Display for StarlarkFloat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write_compact(f, self.0, 'e') + } +} + +#[starlark_value(type = StarlarkFloat::TYPE)] +impl<'v> StarlarkValue<'v> for StarlarkFloat { + fn equals(&self, other: Value) -> crate::Result { + Ok(Some(NumRef::Float(StarlarkFloat(self.0))) == other.unpack_num()) + } + + fn collect_repr(&self, s: &mut String) { + write!(s, "{}", self).unwrap() + } + + fn to_bool(&self) -> bool { + self.0 != 0.0 + } + + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { + hasher.write_u64(NumRef::from(self.0).get_hash_64()); + Ok(()) + } + + fn get_hash(&self, _private: Private) -> crate::Result { + Ok(NumRef::Float(*self).get_hash()) + } + + fn plus(&self, heap: &'v Heap) -> crate::Result> { + Ok(heap.alloc(*self)) + } + + fn minus(&self, heap: &'v Heap) -> crate::Result> { + Ok(heap.alloc(StarlarkFloat(-self.0))) + } + + fn add(&self, other: Value, heap: &'v Heap) -> Option>> { + Some(Ok(heap.alloc(NumRef::Float(*self) + other.unpack_num()?))) + } + + fn sub(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + None => ValueError::unsupported_with(self, "-", other), + Some(other) => Ok(heap.alloc(NumRef::Float(*self) - other)), + } + } + + fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + Some(Ok(heap.alloc(NumRef::Float(*self) * other.unpack_num()?))) + } + + fn div(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + None => ValueError::unsupported_with(self, "/", other), + Some(other) => Ok(heap.alloc(NumRef::Float(*self).div(other)?)), + } + } + + fn percent(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(other) => Ok(heap.alloc(NumRef::Float(*self).percent(other)?)), + None => ValueError::unsupported_with(self, "%", other), + } + } + + fn floor_div(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + None => ValueError::unsupported_with(self, "//", other), + Some(other) => Ok(heap.alloc(NumRef::Float(*self).floor_div(other)?)), + } + } + + fn bin_op_ty(op: TypingBinOp, rhs: &TyBasic) -> Option { + typecheck_num_bin_op(NumTy::Float, op, rhs) + } + + fn compare(&self, other: Value) -> crate::Result { + match other.unpack_num() { + None => ValueError::unsupported_with(self, "compare", other), + Some(other) => Ok(NumRef::Float(*self).cmp(&other)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::assert; + use crate::assert::Assert; + + fn non_finite(f: f64) -> String { + let mut buf = String::new(); + write_non_finite(&mut buf, f).unwrap(); + buf + } + + #[test] + fn test_write_non_finite() { + assert_eq!(non_finite(f64::NAN), "nan"); + assert_eq!(non_finite(f64::INFINITY), "+inf"); + assert_eq!(non_finite(f64::NEG_INFINITY), "-inf"); + } + + fn decimal(f: f64) -> String { + let mut buf = String::new(); + write_decimal(&mut buf, f).unwrap(); + buf + } + + #[test] + fn test_write_decimal() { + assert_eq!(decimal(f64::NAN), "nan"); + assert_eq!(decimal(f64::INFINITY), "+inf"); + assert_eq!(decimal(f64::NEG_INFINITY), "-inf"); + + assert_eq!(decimal(0f64), "0.000000"); + assert_eq!(decimal(std::f64::consts::PI), "3.141593"); + assert_eq!(decimal(-std::f64::consts::E), "-2.718282"); + assert_eq!(decimal(1e10), "10000000000.000000"); + } + + fn scientific(f: f64) -> String { + let mut buf = String::new(); + write_scientific(&mut buf, f, 'e', false).unwrap(); + buf + } + + #[test] + fn test_write_scientific() { + assert_eq!(scientific(f64::NAN), "nan"); + assert_eq!(scientific(f64::INFINITY), "+inf"); + assert_eq!(scientific(f64::NEG_INFINITY), "-inf"); + + assert_eq!(scientific(0f64), "0.000000e+00"); + assert_eq!(scientific(-0f64), "-0.000000e+00"); + assert_eq!(scientific(1.23e45), "1.230000e+45"); + assert_eq!(scientific(-3.14e-145), "-3.140000e-145"); + assert_eq!(scientific(1e300), "1.000000e+300"); + } + + fn compact(f: f64) -> String { + let mut buf = String::new(); + write_compact(&mut buf, f, 'e').unwrap(); + buf + } + + #[test] + fn test_write_compact() { + assert_eq!(compact(f64::NAN), "nan"); + assert_eq!(compact(f64::INFINITY), "+inf"); + assert_eq!(compact(f64::NEG_INFINITY), "-inf"); + + assert_eq!(compact(0f64), "0.0"); + assert_eq!(compact(std::f64::consts::PI), "3.141592653589793"); + assert_eq!(compact(-std::f64::consts::E), "-2.718281828459045"); + assert_eq!(compact(1e10), "1e+10"); + assert_eq!(compact(1.23e45), "1.23e+45"); + assert_eq!(compact(-3.14e-145), "-3.14e-145"); + assert_eq!(compact(1e300), "1e+300"); + } + + #[test] + fn test_arithmetic_operators() { + assert::all_true( + r#" ++1.0 == 1.0 +-1.0 == 0. - 1. +1.0 + 2.0 == 3.0 +1.0 - 2.0 == -1.0 +2.0 * 3.0 == 6.0 +5.0 / 2.0 == 2.5 +5.0 % 3.0 == 2.0 +5.0 // 2.0 == 2.0 +"#, + ); + } + + #[test] + fn test_dictionary_key() { + assert::pass( + r#" +x = {0: 123} +assert_eq(x[0], 123) +# TODO(nga): fix typechecker, and remove `noop`. +assert_eq(x[noop(0.0)], 123) +assert_eq(x[noop(-0.0)], 123) +assert_eq(1 in x, False) + "#, + ); + } + + #[test] + fn test_comparisons() { + let mut a = Assert::new(); + // TODO(nga): fix and enable. + a.disable_static_typechecking(); + a.all_true( + r#" ++0.0 == -0.0 +0.0 == 0 +0 == 0.0 +0 < 1.0 +0.0 < 1 +1 > 0.0 +1.0 > 0 +0.0 < float("nan") +float("+inf") < float("nan") +"#, + ); + } + + #[test] + fn test_comparisons_by_sorting() { + assert::eq( + "sorted([float('inf'), float('-inf'), float('nan'), 1e300, -1e300, 1.0, -1.0, 1, -1, 1e-300, -1e-300, 0, 0.0, float('-0.0'), 1e-300, -1e-300])", + "[float('-inf'), -1e+300, -1.0, -1, -1e-300, -1e-300, 0, 0.0, -0.0, 1e-300, 1e-300, 1.0, 1, 1e+300, float('+inf'), float('nan')]", + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/float/globals.rs b/starlark-rust/starlark/src/values/types/float/globals.rs new file mode 100644 index 0000000000000..b8ae7cc030120 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/float/globals.rs @@ -0,0 +1,89 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use either::Either; +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::float::StarlarkFloat; +use crate::values::string::repr::string_repr; +use crate::values::types::num::value::NumRef; + +#[starlark_module] +pub(crate) fn register_float(globals: &mut GlobalsBuilder) { + /// [float]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#float + /// ): interprets its argument as a floating-point number. + /// + /// If x is a `float`, the result is x. + /// if x is an `int`, the result is the nearest floating point value to x. + /// If x is a string, the string is interpreted as a floating-point literal. + /// With no arguments, `float()` returns `0.0`. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// float() == 0.0 + /// float(1) == 1.0 + /// float('1') == 1.0 + /// float('1.0') == 1.0 + /// float('.25') == 0.25 + /// float('1e2') == 100.0 + /// float(False) == 0.0 + /// float(True) == 1.0 + /// # "#); + /// # starlark::assert::fail(r#" + /// float("hello") # error: not a valid number + /// # "#, "not a valid number"); + /// # starlark::assert::fail(r#" + /// float([]) # error + /// # "#, "doesn't match, expected"); + /// ``` + #[starlark(as_type = StarlarkFloat, speculative_exec_safe)] + fn float( + #[starlark(require = pos)] a: Option, &str>>, + ) -> anyhow::Result { + if a.is_none() { + return Ok(0.0); + } + let a = a.unwrap(); + match a { + Either::Left(Either::Left(f)) => Ok(f.as_float()), + Either::Left(Either::Right(b)) => Ok(if b { 1.0 } else { 0.0 }), + Either::Right(s) => { + match s.parse::() { + Ok(f) => { + if f.is_infinite() && !s.to_lowercase().contains("inf") { + // if a resulting float is infinite but the parsed string is not explicitly infinity then we should fail with an error + Err(anyhow::anyhow!( + "float() floating-point number too large: {}", + s + )) + } else { + Ok(f) + } + } + Err(x) => { + let mut repr = String::new(); + string_repr(s, &mut repr); + Err(anyhow::anyhow!("{} is not a valid number: {}", repr, x,)) + } + } + } + } + } +} diff --git a/starlark-rust/starlark/src/values/types/float/unpack.rs b/starlark-rust/starlark/src/values/types/float/unpack.rs new file mode 100644 index 0000000000000..f3971ea208c2e --- /dev/null +++ b/starlark-rust/starlark/src/values/types/float/unpack.rs @@ -0,0 +1,72 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::typing::Ty; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::num::value::Num; +use crate::values::types::num::value::NumRef; +use crate::values::UnpackValue; +use crate::values::Value; + +/// Unpack `int` or `float` into `f64`. +pub struct UnpackFloat(pub f64); + +impl StarlarkTypeRepr for UnpackFloat { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + ::starlark_type_repr() + } +} + +impl<'v> UnpackValue<'v> for UnpackFloat { + type Error = as UnpackValue<'v>>::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(num) = NumRef::unpack_value_impl(value)? else { + return Ok(None); + }; + Ok(Some(UnpackFloat(num.as_float()))) + } +} + +#[cfg(test)] +mod tests { + use crate::values::float::UnpackFloat; + use crate::values::Heap; + use crate::values::UnpackValue; + use crate::values::Value; + + #[test] + fn test_unpack_float() { + let heap = Heap::new(); + assert_eq!( + 1.0, + UnpackFloat::unpack_value(Value::testing_new_int(1)) + .unwrap() + .unwrap() + .0 + ); + assert_eq!( + 1.0, + UnpackFloat::unpack_value(heap.alloc(1.0)) + .unwrap() + .unwrap() + .0 + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/function.rs b/starlark-rust/starlark/src/values/types/function.rs index 394676e6d3317..01062417e1bb3 100644 --- a/starlark-rust/starlark/src/values/types/function.rs +++ b/starlark-rust/starlark/src/values/types/function.rs @@ -17,8 +17,6 @@ //! Function types, including native functions and `object.member` functions. -use std::collections::HashMap; - use allocative::Allocative; use derivative::Derivative; use derive_more::Display; @@ -29,15 +27,13 @@ use starlark_derive::NoSerialize; use crate as starlark; use crate::any::ProvidesStaticType; use crate::coerce::Coerce; -use crate::docs::DocFunction; use crate::docs::DocItem; +use crate::docs::DocMember; use crate::docs::DocProperty; use crate::docs::DocString; use crate::docs::DocStringKind; use crate::eval::Arguments; use crate::eval::Evaluator; -use crate::eval::ParametersParser; -use crate::eval::ParametersSpec; use crate::private::Private; use crate::starlark_complex_value; use crate::starlark_simple_value; @@ -52,6 +48,7 @@ use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::Freeze; use crate::values::FrozenHeap; +use crate::values::FrozenRef; use crate::values::FrozenValue; use crate::values::FrozenValueTyped; use crate::values::Heap; @@ -59,6 +56,7 @@ use crate::values::StarlarkValue; use crate::values::Trace; use crate::values::Value; use crate::values::ValueError; +use crate::values::ValueLifetimeless; use crate::values::ValueLike; #[derive(Debug, thiserror::Error)] @@ -74,8 +72,10 @@ pub const FUNCTION_TYPE: &str = "function"; pub(crate) enum StarlarkFunction {} impl StarlarkTypeRepr for StarlarkFunction { + type Canonical = Self; + fn starlark_type_repr() -> Ty { - Ty::any_function() + Ty::any_callable() } } @@ -85,6 +85,7 @@ pub enum SpecialBuiltinFunction { List, Dict, Tuple, + Set, } /// A native function that can be evaluated. @@ -94,25 +95,9 @@ pub trait NativeFunc: Send + Sync + 'static { /// Invoke the function. fn invoke<'v>( &self, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, args: &Arguments<'v, '_>, - ) -> anyhow::Result>; -} - -impl NativeFunc for T -where - T: for<'v> Fn(&mut Evaluator<'v, '_>, &Arguments<'v, '_>) -> anyhow::Result> - + Send - + Sync - + 'static, -{ - fn invoke<'v>( - &self, - eval: &mut Evaluator<'v, '_>, - args: &Arguments<'v, '_>, - ) -> anyhow::Result> { - (*self)(eval, args) - } + ) -> crate::Result>; } /// Native method implementation. @@ -122,87 +107,41 @@ pub trait NativeMeth: Send + Sync + 'static { /// Invoke the method. fn invoke<'v>( &self, - eval: &mut Evaluator<'v, '_>, - this: Value<'v>, - args: &Arguments<'v, '_>, - ) -> anyhow::Result>; -} - -impl NativeMeth for T -where - T: for<'v> Fn( - &mut Evaluator<'v, '_>, - Value<'v>, - &Arguments<'v, '_>, - ) -> anyhow::Result> - + Send - + Sync - + 'static, -{ - fn invoke<'v>( - &self, - eval: &mut Evaluator<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, this: Value<'v>, args: &Arguments<'v, '_>, - ) -> anyhow::Result> { - (*self)(eval, this, args) - } + ) -> crate::Result>; } /// A native function that can be evaluated. pub trait NativeAttr: - for<'v> Fn(Value<'v>, &'v Heap) -> anyhow::Result> + Send + Sync + 'static + for<'v> Fn(Value<'v>, &'v Heap) -> crate::Result> + Send + Sync + 'static { } impl NativeAttr for T where - T: for<'v> Fn(Value<'v>, &'v Heap) -> anyhow::Result> + Send + Sync + 'static + T: for<'v> Fn(Value<'v>, &'v Heap) -> crate::Result> + Send + Sync + 'static { } -/// Enough details to get the documentation for a callable ([`NativeFunction`] or [`NativeMethod`]) -#[doc(hidden)] -#[derive(Allocative)] -pub struct NativeCallableRawDocs { - pub rust_docstring: Option<&'static str>, - pub signature: ParametersSpec, - pub parameter_types: Vec, - pub return_type: Ty, - pub as_type: Option, -} - -#[doc(hidden)] -impl NativeCallableRawDocs { - pub fn documentation(&self) -> DocFunction { - DocFunction::from_docstring( - DocStringKind::Rust, - self.signature - .documentation(self.parameter_types.clone(), HashMap::new()), - self.return_type.clone(), - self.rust_docstring, - self.as_type.clone(), - ) - } -} - /// Starlark representation of native (Rust) functions. /// /// Almost always created with [`#[starlark_module]`](macro@crate::starlark_module). #[derive(Derivative, ProvidesStaticType, Display, NoSerialize, Allocative)] #[derivative(Debug)] -#[display(fmt = "{}", name)] -pub struct NativeFunction { +#[display("{}", name)] +pub(crate) struct NativeFunction { #[derivative(Debug = "ignore")] #[allocative(skip)] pub(crate) function: Box, pub(crate) name: String, /// `.type` attribute and a type when this function is used in type expression. - pub(crate) type_attr: Option, - pub(crate) ty: Option, + pub(crate) as_type: Option, + pub(crate) ty: Ty, /// Safe to evaluate speculatively. pub(crate) speculative_exec_safe: bool, #[derivative(Debug = "ignore")] - pub(crate) raw_docs: Option, + pub(crate) docs: DocItem, pub(crate) special_builtin_function: Option, } @@ -212,48 +151,6 @@ impl AllocFrozenValue for NativeFunction { } } -impl NativeFunction { - /// Create a new [`NativeFunction`] from the Rust function which works directly on the parameters. - /// The called function is responsible for validating the parameters are correct. - pub fn new_direct(function: F, name: String) -> Self - where - // If I switch this to the trait alias then it fails to resolve the usages - F: for<'v> Fn(&mut Evaluator<'v, '_>, &Arguments<'v, '_>) -> anyhow::Result> - + Send - + Sync - + 'static, - { - NativeFunction { - function: Box::new(function), - name, - type_attr: None, - ty: None, - speculative_exec_safe: false, - raw_docs: None, - special_builtin_function: None, - } - } - - /// Create a new [`NativeFunction`] from the Rust function, plus the parameter specification. - pub fn new(function: F, name: String, parameters: ParametersSpec) -> Self - where - F: for<'v> Fn( - &mut Evaluator<'v, '_>, - ParametersParser<'v, '_>, - ) -> anyhow::Result> - + Send - + Sync - + 'static, - { - Self::new_direct( - move |eval, params| { - parameters.parser(params, eval, |parser, eval| function(eval, parser)) - }, - name, - ) - } -} - impl<'v> AllocValue<'v> for NativeFunction { fn alloc_value(self, heap: &'v Heap) -> Value<'v> { heap.alloc_simple(self) @@ -267,13 +164,13 @@ impl<'v> StarlarkValue<'v> for NativeFunction { &self, _me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { - self.function.invoke(eval, args) + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { + self.function.invoke(eval, args).map_err(Into::into) } fn get_attr(&self, attribute: &str, heap: &'v Heap) -> Option> { - if let Some(s) = self.type_attr.as_ref().map(|t| t.as_name()) { + if let Some(s) = self.as_type.as_ref().and_then(|t| t.as_name()) { if attribute == "type" { return Some(heap.alloc(s)); } @@ -281,9 +178,8 @@ impl<'v> StarlarkValue<'v> for NativeFunction { None } - #[allow(clippy::manual_map)] fn eval_type(&self) -> Option { - self.type_attr.clone() + self.as_type.clone() } fn has_attr(&self, _attribute: &str, _heap: &'v Heap) -> bool { @@ -292,29 +188,31 @@ impl<'v> StarlarkValue<'v> for NativeFunction { } fn dir_attr(&self) -> Vec { - if self.type_attr.is_some() { + if self.as_type.is_some() { vec!["type".to_owned()] } else { Vec::new() } } - fn documentation(&self) -> Option { - self.raw_docs - .as_ref() - .map(|raw_docs| DocItem::Function(raw_docs.documentation())) + fn documentation(&self) -> DocItem { + self.docs.clone() } fn typechecker_ty(&self) -> Option { - self.ty.clone() + Some(self.ty.dupe()) } - fn at(&self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value<'v>, heap: &'v Heap) -> crate::Result> { match &self.special_builtin_function { Some(SpecialBuiltinFunction::List) => { let index = TypeCompiled::new(index, heap)?; Ok(TypeCompiled::type_list_of(index, heap).to_inner()) } + Some(SpecialBuiltinFunction::Set) => { + let index = TypeCompiled::new(index, heap)?; + Ok(TypeCompiled::type_set_of(index, heap).to_inner()) + } _ => ValueError::unsupported(self, "[]"), } } @@ -325,7 +223,7 @@ impl<'v> StarlarkValue<'v> for NativeFunction { index1: Value<'v>, heap: &'v Heap, _private: Private, - ) -> anyhow::Result> { + ) -> crate::Result> { match &self.special_builtin_function { Some(SpecialBuiltinFunction::Dict) => { let index0 = TypeCompiled::new(index0, heap)?; @@ -343,7 +241,7 @@ impl<'v> StarlarkValue<'v> for NativeFunction { ) .to_inner()) } else { - Err(FunctionError::TupleOnlyEllipsis.into()) + Err(crate::Error::new_other(FunctionError::TupleOnlyEllipsis)) } } _ => ValueError::unsupported(self, "[,]"), @@ -353,35 +251,25 @@ impl<'v> StarlarkValue<'v> for NativeFunction { #[derive(Derivative, Display, NoSerialize, ProvidesStaticType, Allocative)] #[derivative(Debug)] -#[display(fmt = "{}", name)] +#[display("{}", name)] pub(crate) struct NativeMethod { #[derivative(Debug = "ignore")] #[allocative(skip)] - pub(crate) function: Box, + pub(crate) function: FrozenRef<'static, dyn NativeMeth>, pub(crate) name: String, pub(crate) ty: Ty, /// Safe to evaluate speculatively. pub(crate) speculative_exec_safe: bool, #[derivative(Debug = "ignore")] - pub(crate) raw_docs: NativeCallableRawDocs, + pub(crate) docs: DocItem, } starlark_simple_value!(NativeMethod); #[starlark_value(type = "native_method")] impl<'v> StarlarkValue<'v> for NativeMethod { - fn invoke_method( - &self, - this: Value<'v>, - args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - _: Private, - ) -> anyhow::Result> { - self.function.invoke(eval, this, args) - } - - fn documentation(&self) -> Option { - Some(DocItem::Function(self.raw_docs.documentation())) + fn documentation(&self) -> DocItem { + self.docs.clone() } fn typechecker_ty(&self) -> Option { @@ -392,12 +280,9 @@ impl<'v> StarlarkValue<'v> for NativeMethod { /// Used by the `#[starlark(attribute)]` tag of [`#[starlark_module]`](macro@starlark_module) /// to define a function that pretends to be an attribute. #[derive(Derivative, Display, NoSerialize, ProvidesStaticType, Allocative)] -#[display(fmt = "Attribute")] +#[display("Attribute")] #[derivative(Debug)] pub(crate) struct NativeAttribute { - #[derivative(Debug = "ignore")] - #[allocative(skip)] - pub(crate) function: Box, /// Safe to evaluate speculatively. pub(crate) speculative_exec_safe: bool, pub(crate) docstring: Option, @@ -407,31 +292,27 @@ pub(crate) struct NativeAttribute { starlark_simple_value!(NativeAttribute); impl NativeAttribute { - pub(crate) fn call<'v>(&self, value: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - (self.function)(value, heap) - } -} - -#[starlark_value(type = "attribute")] -impl<'v> StarlarkValue<'v> for NativeAttribute { - fn invoke_method( - &self, + #[inline] + pub(crate) fn invoke_method_impl<'v>( + function: &dyn NativeAttr, this: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - _: Private, - ) -> anyhow::Result> { - let method = self.call(this, eval.heap())?; + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { + let method = function(this, eval.heap())?; method.invoke(args, eval) } +} - fn documentation(&self) -> Option { +#[starlark_value(type = "attribute")] +impl<'v> StarlarkValue<'v> for NativeAttribute { + fn documentation(&self) -> DocItem { let ds = self .docstring .as_ref() .and_then(|ds| DocString::from_docstring(DocStringKind::Rust, ds)); let typ = self.typ.clone(); - Some(DocItem::Property(DocProperty { docs: ds, typ })) + DocItem::Member(DocMember::Property(DocProperty { docs: ds, typ })) } } @@ -448,8 +329,8 @@ impl<'v> StarlarkValue<'v> for NativeAttribute { Allocative )] #[repr(C)] -#[display(fmt = "{}", method)] -pub(crate) struct BoundMethodGen { +#[display("{}", method)] +pub(crate) struct BoundMethodGen { pub(crate) method: FrozenValueTyped<'static, NativeMethod>, pub(crate) this: V, } @@ -465,7 +346,7 @@ impl<'v, V: ValueLike<'v>> BoundMethodGen { } #[starlark_value(type = FUNCTION_TYPE)] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for BoundMethodGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for BoundMethodGen where Self: ProvidesStaticType<'v>, { @@ -473,13 +354,14 @@ where &self, _me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { self.method - .invoke_method(self.this.to_value(), args, eval, Private) + .function + .invoke(eval, self.this.to_value(), args) } - fn documentation(&self) -> Option { + fn documentation(&self) -> DocItem { self.method.documentation() } } diff --git a/starlark-rust/starlark/src/values/types/int.rs b/starlark-rust/starlark/src/values/types/int.rs index 4373099fdcce8..01e3ad5e2b481 100644 --- a/starlark-rust/starlark/src/values/types/int.rs +++ b/starlark-rust/starlark/src/values/types/int.rs @@ -17,363 +17,15 @@ //! The integer type. //! -//! For small values, we try not to allocate on the [`Heap`], but instead use -//! special values. If the value doesn't fit in the special representation, -//! we use [`BigInt`]. - -use std::cmp::Ordering; -use std::fmt; -use std::fmt::Debug; -use std::fmt::Display; -use std::hash::Hasher; -use std::mem; -use std::ptr; - -use allocative::Allocative; -use num_bigint::BigInt; -use serde::Serialize; -use serde::Serializer; -use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; - -use crate as starlark; -use crate::any::AnyLifetime; -use crate::any::ProvidesStaticType; -use crate::cast; -use crate::collections::StarlarkHashValue; -use crate::collections::StarlarkHasher; -use crate::private::Private; -use crate::typing::Ty; -use crate::typing::TyBasic; -use crate::typing::TypingBinOp; -use crate::values::error::ValueError; -use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; -use crate::values::layout::pointer::RawPointer; -use crate::values::layout::vtable::AValueDyn; -use crate::values::layout::vtable::AValueVTable; -use crate::values::layout::vtable::StarlarkValueRawPtr; -use crate::values::num::typecheck::typecheck_num_bin_op; -use crate::values::num::typecheck::NumTy; -use crate::values::num::value::NumRef; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::bigint::StarlarkBigInt; -use crate::values::types::inline_int::InlineInt; -use crate::values::types::int_or_big::StarlarkInt; -use crate::values::types::int_or_big::StarlarkIntRef; -use crate::values::AllocFrozenValue; -use crate::values::AllocValue; -use crate::values::FrozenHeap; -use crate::values::FrozenValue; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::UnpackValue; -use crate::values::Value; - -/// The result of calling `type()` on integers. -pub const INT_TYPE: &str = "int"; - -impl<'v> AllocValue<'v> for i32 { - #[inline] - fn alloc_value(self, heap: &'v Heap) -> Value<'v> { - heap.alloc(StarlarkInt::from(self)) - } -} -impl AllocFrozenValue for i32 { - #[inline] - fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { - heap.alloc(StarlarkInt::from(self)) - } -} - -impl StarlarkTypeRepr for i32 { - fn starlark_type_repr() -> Ty { - PointerI32::starlark_type_repr() - } -} - -impl UnpackValue<'_> for i32 { - fn unpack_value(value: Value) -> Option { - if InlineInt::smaller_than_i32() { - StarlarkIntRef::unpack_value(value)?.to_i32() - } else { - Some(InlineInt::unpack_value(value)?.to_i32()) - } - } -} - -// WARNING: This type isn't a real type, a pointer to this is secretly an i32. -// Therefore, don't derive stuff on it, since it will be wrong. -// However, `ProvidesStaticType` promises not to peek at its value, so that's fine. -#[derive(ProvidesStaticType, StarlarkDocs, Allocative)] -#[starlark_docs(builtin = "standard")] -#[repr(C)] -pub(crate) struct PointerI32 { - _private: (), -} - -impl PartialEq for PointerI32 { - fn eq(&self, other: &Self) -> bool { - ptr::eq(self, other) - } -} - -impl Eq for PointerI32 {} - -impl Debug for PointerI32 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Debug::fmt(&self.get(), f) - } -} - -impl PointerI32 { - const _ASSERTIONS: () = { - assert!(mem::align_of::() == 1); - }; - - #[inline] - pub(crate) unsafe fn from_raw_pointer_unchecked( - raw_pointer: RawPointer, - ) -> &'static PointerI32 { - debug_assert!(raw_pointer.is_int()); - // UB if the pointer isn't aligned, or it is zero. - // Alignment is 1, so that's not an issue. - // And the pointer is not zero because it has `TAG_INT` bit set. - cast::usize_to_ptr(raw_pointer.ptr_value()) - } - - #[inline] - pub(crate) fn get(&self) -> InlineInt { - unsafe { RawPointer::new_unchecked(self as *const Self as usize).unpack_int_unchecked() } - } - - #[inline] - pub(crate) fn as_avalue_dyn(&'static self) -> AValueDyn<'static> { - unsafe { AValueDyn::new(StarlarkValueRawPtr::new_pointer_i32(self), Self::vtable()) } - } - - #[inline] - pub(crate) fn vtable() -> &'static AValueVTable { - AValueVTable::new::>() - } - - /// This operation is expensive, use only if you have to. - fn to_bigint(&self) -> BigInt { - self.get().to_bigint() - } - - pub(crate) fn type_is_pointer_i32<'v, T: StarlarkValue<'v>>() -> bool { - T::static_type_id() == PointerI32::static_type_id() - } -} - -impl Display for PointerI32 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.get()) - } -} - -/// Define the int type -#[starlark_value(type = INT_TYPE)] -impl<'v> StarlarkValue<'v> for PointerI32 { - type Canonical = StarlarkBigInt; - - fn is_special(_: Private) -> bool - where - Self: Sized, - { - true - } - - fn equals(&self, other: Value) -> anyhow::Result { - Ok(Some(NumRef::Int(StarlarkIntRef::Small(self.get()))) == other.unpack_num()) - } - - fn to_bool(&self) -> bool { - self.get().to_i32() != 0 - } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { - hasher.write_u64(NumRef::Int(StarlarkIntRef::Small(self.get())).get_hash_64()); - Ok(()) - } - - fn get_hash(&self, _private: Private) -> anyhow::Result { - Ok(NumRef::Int(StarlarkIntRef::Small(self.get())).get_hash()) - } - - fn plus(&self, _heap: &'v Heap) -> anyhow::Result> { - Ok(Value::new_int(self.get())) - } - fn minus(&self, heap: &'v Heap) -> anyhow::Result> { - Ok(heap.alloc(-StarlarkIntRef::Small(self.get()))) - } - fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { - Some(Ok(heap.alloc( - NumRef::Int(StarlarkIntRef::Small(self.get())) + other.unpack_num()?, - ))) - } - fn sub(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(other) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())) - other)), - None => ValueError::unsupported_with(self, "-", other), - } - } - fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { - Some(Ok(heap.alloc( - NumRef::Int(StarlarkIntRef::Small(self.get())) * other.unpack_num()?, - ))) - } - fn div(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(other) => { - Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())).div(other)?)) - } - None => ValueError::unsupported_with(self, "/", other), - } - } - fn percent(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(other) => { - Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())).percent(other)?)) - } - None => ValueError::unsupported_with(self, "%", other), - } - } - fn floor_div(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - match other.unpack_num() { - Some(other) => { - Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())).floor_div(other)?)) - } - None => ValueError::unsupported_with(self, "//", other), - } - } - - fn compare(&self, other: Value) -> anyhow::Result { - match other.unpack_num() { - None => ValueError::unsupported_with(self, "compare", other), - Some(other) => Ok(NumRef::Int(StarlarkIntRef::Small(self.get())).cmp(&other)), - } - } - - fn bit_and(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match StarlarkIntRef::unpack_value(other) { - None => ValueError::unsupported_with(self, "&", other), - Some(StarlarkIntRef::Small(i)) => Ok(Value::new_int(self.get() & i)), - Some(StarlarkIntRef::Big(b)) => { - Ok(heap.alloc(StarlarkInt::from(&self.to_bigint() & b.get()))) - } - } - } - - fn bit_or(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match StarlarkIntRef::unpack_value(other) { - None => ValueError::unsupported_with(self, "|", other), - Some(StarlarkIntRef::Small(i)) => Ok(Value::new_int(self.get() | i)), - Some(StarlarkIntRef::Big(b)) => { - Ok(heap.alloc(StarlarkInt::from(&self.to_bigint() | b.get()))) - } - } - } - - fn bit_xor(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match StarlarkIntRef::unpack_value(other) { - None => ValueError::unsupported_with(self, "^", other), - Some(StarlarkIntRef::Small(i)) => Ok(Value::new_int(self.get() ^ i)), - Some(StarlarkIntRef::Big(b)) => { - Ok(heap.alloc(StarlarkInt::from(&self.to_bigint() ^ b.get()))) - } - } - } - - fn bit_not(&self, _heap: &'v Heap) -> anyhow::Result> { - Ok(Value::new_int(!self.get())) - } - - fn left_shift(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match StarlarkIntRef::unpack_value(other) { - None => ValueError::unsupported_with(self, "<<", other), - Some(other) => Ok(heap.alloc(StarlarkIntRef::Small(self.get()).left_shift(other)?)), - } - } - - fn right_shift(&self, other: Value, heap: &'v Heap) -> anyhow::Result> { - match StarlarkIntRef::unpack_value(other) { - None => ValueError::unsupported_with(self, ">>", other), - Some(other) => Ok(heap.alloc(StarlarkIntRef::Small(self.get()).right_shift(other)?)), - } - } - - fn bin_op_ty(op: TypingBinOp, rhs: &TyBasic) -> Option { - // This is dead code, because canonical int type is `StarlarkBigInt`, - // but keep for consistency. - typecheck_num_bin_op(NumTy::Int, op, rhs) - } - - fn typechecker_ty(&self) -> Option { - Some(Ty::int()) - } -} - -impl Serialize for PointerI32 { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.get().serialize(serializer) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::assert; - - #[test] - fn test_arithmetic_operators() { - assert::all_true( - r#" -+1 == 1 --1 == 0 - 1 -1 + 2 == 3 -1 + 2.0 == 3.0 -1 - 2 == -1 -1 - 2.0 == -1.0 -2 * 3 == 6 -2 * 3.0 == 6.0 -4 / 2 == 2.0 -5 % 3 == 2 -4 // 2 == 2 -"#, - ); - } - - #[test] - fn test_minus() { - // `-i32::MIN` should overflow to `StarlarkBigInt`. - assert::eq("2147483648", "-(-2147483647 - 1)") - } - - #[test] - fn test_int_tag() { - fn check(x: InlineInt) { - assert_eq!(x, FrozenValue::new_int(x).unpack_inline_int().unwrap()); - } - - for x in -10..10 { - check(InlineInt::try_from(x).ok().unwrap()) - } - check(InlineInt::MAX); - check(InlineInt::MIN); - } - - #[test] - fn test_alignment_int_pointer() { - assert_eq!(1, std::mem::align_of::()); - } - - #[test] - fn test_as_avalue_dyn() { - // `get_type` calls `as_avalue_dyn` internally. - assert_eq!("int", Value::new_int(InlineInt::MINUS_ONE).get_type()); - } -} +//! For small values, we try not to allocate on the [`Heap`](crate::values::Heap), +//! but instead use special values. If the value doesn't fit in the special representation, +//! we use [`BigInt`](num_bigint::BigInt) to store it. + +pub(crate) mod globals; +mod i32; +pub(crate) mod inline_int; +pub(crate) mod int_or_big; +pub(crate) mod pointer_i32; +mod tests; + +pub use pointer_i32::INT_TYPE; diff --git a/starlark-rust/starlark/src/values/types/int/globals.rs b/starlark-rust/starlark/src/values/types/int/globals.rs new file mode 100644 index 0000000000000..895a10798b33b --- /dev/null +++ b/starlark-rust/starlark/src/values/types/int/globals.rs @@ -0,0 +1,170 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use either::Either; +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::int::int_or_big::StarlarkInt; +use crate::values::int::pointer_i32::PointerI32; +use crate::values::types::num::value::NumRef; +use crate::values::Heap; +use crate::values::ValueOf; +use crate::values::ValueOfUnchecked; + +#[starlark_module] +pub(crate) fn register_int(globals: &mut GlobalsBuilder) { + /// [int]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#int + /// ): convert a value to integer. + /// + /// `int(x[, base])` interprets its argument as an integer. + /// + /// If x is an `int`, the result is x. + /// If x is a `float`, the result is the integer value nearest to x, + /// truncating towards zero; it is an error if x is not finite (`NaN`, + /// `+Inf`, `-Inf`). + /// If x is a `bool`, the result is 0 for `False` or 1 for `True`. + /// + /// If x is a string, it is interpreted like a string literal; + /// an optional base prefix (`0`, `0b`, `0B`, `0x`, `0X`) determines which + /// base to use. The string may specify an arbitrarily large integer, + /// whereas true integer literals are restricted to 64 bits. + /// If a non-zero `base` argument is provided, the string is interpreted + /// in that base and no base prefix is permitted; the base argument may + /// specified by name. + /// + /// `int()` with no arguments returns 0. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// int() == 0 + /// int(1) == 1 + /// int(False) == 0 + /// int(True) == 1 + /// int('1') == 1 + /// int('16') == 16 + /// int('16', 10) == 16 + /// int('16', 8) == 14 + /// int('16', 16) == 22 + /// int(0.0) == 0 + /// int(3.14) == 3 + /// int(-12345.6789) == -12345 + /// int(2e9) == 2000000000 + /// # "#); + /// # starlark::assert::fail(r#" + /// int("hello") # error: Cannot parse + /// # "#, "Cannot parse"); + /// # starlark::assert::fail(r#" + /// int(float("nan")) # error: cannot be represented as exact integer + /// # "#, "cannot be represented as exact integer"); + /// # starlark::assert::fail(r#" + /// int(float("inf")) # error: cannot be represented as exact integer + /// # "#, "cannot be represented as exact integer"); + /// ``` + #[starlark(as_type = PointerI32, speculative_exec_safe)] + fn int<'v>( + #[starlark(require = pos)] a: Option< + ValueOf<'v, Either, bool>, &'v str>>, + >, + base: Option, + heap: &'v Heap, + ) -> starlark::Result> { + let Some(a) = a else { + return Ok(ValueOfUnchecked::new(heap.alloc(0))); + }; + let num_or_bool = match a.typed { + Either::Left(num_or_bool) => num_or_bool, + Either::Right(s) => { + let base = base.unwrap_or(0); + if base == 1 || base < 0 || base > 36 { + return Err(anyhow::anyhow!( + "{} is not a valid base, int() base must be >= 2 and <= 36", + base + ) + .into()); + } + let (negate, s) = { + match s.chars().next() { + Some('+') => (false, s.get(1..).unwrap()), + Some('-') => (true, s.get(1..).unwrap()), + _ => (false, s), + } + }; + let base = if base == 0 { + match s.get(0..2) { + Some("0b") | Some("0B") => 2, + Some("0o") | Some("0O") => 8, + Some("0x") | Some("0X") => 16, + _ => 10, + } + } else { + base as u32 + }; + let s = match base { + 16 => { + if s.starts_with("0x") || s.starts_with("0X") { + s.get(2..).unwrap() + } else { + s + } + } + 8 => { + if s.starts_with("0o") || s.starts_with("0O") { + s.get(2..).unwrap() + } else { + s + } + } + 2 => { + if s.starts_with("0b") || s.starts_with("0B") { + s.get(2..).unwrap() + } else { + s + } + } + _ => s, + }; + // We already handled the sign above, so we are not trying to parse another sign. + if s.starts_with('-') || s.starts_with('+') { + return Err(anyhow::anyhow!("Cannot parse `{}` as an integer", s,).into()); + } + + let x = StarlarkInt::from_str_radix(s, base)?; + let x = if negate { -x } else { x }; + return Ok(ValueOfUnchecked::new(heap.alloc(x))); + } + }; + + if let Some(base) = base { + return Err(anyhow::anyhow!( + "int() cannot convert non-string with explicit base '{}'", + base + ) + .into()); + } + + match num_or_bool { + Either::Left(NumRef::Int(_)) => Ok(ValueOfUnchecked::new(a.value)), + Either::Left(NumRef::Float(f)) => Ok(ValueOfUnchecked::new( + heap.alloc(StarlarkInt::from_f64_exact(f.0.trunc())?), + )), + Either::Right(b) => Ok(ValueOfUnchecked::new(heap.alloc(b as i32))), + } + } +} diff --git a/starlark-rust/starlark/src/values/types/int/i32.rs b/starlark-rust/starlark/src/values/types/int/i32.rs new file mode 100644 index 0000000000000..1f0443c1f9959 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/int/i32.rs @@ -0,0 +1,76 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::any; + +use crate::typing::Ty; +use crate::values::int::pointer_i32::PointerI32; +use crate::values::layout::value::IntegerTooBigError; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::int::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkIntRef; +use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::FrozenHeap; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::UnpackValue; +use crate::values::Value; + +impl<'v> AllocValue<'v> for i32 { + #[inline] + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc(StarlarkInt::from(self)) + } +} +impl AllocFrozenValue for i32 { + #[inline] + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc(StarlarkInt::from(self)) + } +} + +impl StarlarkTypeRepr for i32 { + type Canonical = ::Canonical; + + fn starlark_type_repr() -> Ty { + PointerI32::starlark_type_repr() + } +} + +impl UnpackValue<'_> for i32 { + type Error = crate::Error; + + fn unpack_value_impl(value: Value) -> crate::Result> { + // Note this does not use `Value::unpack_integer()` + // because we unlike other call sites, + // we know that `i32` is `InlineInt` on 64-bit platforms and never `BigInt`, + // so this is faster. + if let Some(v) = value.unpack_i32() { + Ok(Some(v)) + } else { + if let Some(int) = StarlarkIntRef::unpack(value) { + Err(crate::Error::new_value(IntegerTooBigError { + value: int.to_string(), + integer_type: any::type_name::(), + })) + } else { + Ok(None) + } + } + } +} diff --git a/starlark-rust/starlark/src/values/types/inline_int.rs b/starlark-rust/starlark/src/values/types/int/inline_int.rs similarity index 94% rename from starlark-rust/starlark/src/values/types/inline_int.rs rename to starlark-rust/starlark/src/values/types/int/inline_int.rs index 16749b82611bb..75655da16c055 100644 --- a/starlark-rust/starlark/src/values/types/inline_int.rs +++ b/starlark-rust/starlark/src/values/types/int/inline_int.rs @@ -31,9 +31,9 @@ use serde::Serialize; use crate::hint; use crate::typing::Ty; -use crate::values::int::PointerI32; +use crate::values::int::pointer_i32::PointerI32; use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkInt; use crate::values::AllocFrozenValue; use crate::values::AllocValue; use crate::values::FrozenHeap; @@ -56,7 +56,8 @@ use crate::values::Value; Serialize )] #[serde(transparent)] -pub(crate) struct InlineInt(i32); +#[doc(hidden)] +pub struct InlineInt(i32); impl Debug for InlineInt { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { @@ -203,7 +204,8 @@ impl InlineInt { } } -pub(crate) struct InlineIntOverflow; +#[doc(hidden)] +pub struct InlineIntOverflow; impl TryFrom for InlineInt { type Error = InlineIntOverflow; @@ -338,14 +340,19 @@ impl Rem for InlineInt { } impl StarlarkTypeRepr for InlineInt { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { PointerI32::starlark_type_repr() } } impl<'v> UnpackValue<'v> for InlineInt { - fn unpack_value(value: Value<'v>) -> Option { - value.0.unpack_int() + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { + // TODO(nga): return error on too big integer. + Ok(value.0.unpack_int()) } } @@ -363,7 +370,7 @@ impl AllocFrozenValue for InlineInt { #[cfg(test)] mod tests { - use crate::values::types::inline_int::InlineInt; + use crate::values::types::int::inline_int::InlineInt; #[test] fn test_min_max_for_bits() { diff --git a/starlark-rust/starlark/src/values/types/int_or_big.rs b/starlark-rust/starlark/src/values/types/int/int_or_big.rs similarity index 93% rename from starlark-rust/starlark/src/values/types/int_or_big.rs rename to starlark-rust/starlark/src/values/types/int/int_or_big.rs index 17ef69d2e6730..8af0d9a0d9c85 100644 --- a/starlark-rust/starlark/src/values/types/int_or_big.rs +++ b/starlark-rust/starlark/src/values/types/int/int_or_big.rs @@ -16,6 +16,7 @@ */ use std::cmp::Ordering; +use std::convert::Infallible; use std::ops::Add; use std::ops::BitAnd; use std::ops::BitOr; @@ -26,7 +27,6 @@ use std::ops::Not; use std::ops::Sub; use std::str::FromStr; -use anyhow::Context; use dupe::Dupe; use num_bigint::BigInt; use num_bigint::Sign; @@ -36,15 +36,13 @@ use num_traits::ToPrimitive; use num_traits::Zero; use starlark_syntax::lexer::TokenInt; +use crate as starlark; use crate::typing::Ty; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::types::bigint::StarlarkBigInt; -use crate::values::types::inline_int::InlineInt; +use crate::values::types::int::inline_int::InlineInt; use crate::values::AllocFrozenValue; use crate::values::AllocValue; -use crate::values::FrozenHeap; -use crate::values::FrozenValue; -use crate::values::Heap; use crate::values::UnpackValue; use crate::values::Value; use crate::values::ValueLike; @@ -65,14 +63,33 @@ enum StarlarkIntError { RightShiftNegative, } -#[derive(Debug, Clone, Eq, PartialEq, derive_more::Display, Hash)] -pub(crate) enum StarlarkInt { +#[derive( + Debug, + Clone, + Eq, + PartialEq, + derive_more::Display, + Hash, + AllocValue, + AllocFrozenValue +)] +#[doc(hidden)] +pub enum StarlarkInt { Small(InlineInt), Big(StarlarkBigInt), } -#[derive(Eq, PartialEq, Copy, Clone, Dupe, Debug)] -pub(crate) enum StarlarkIntRef<'v> { +impl StarlarkTypeRepr for StarlarkInt { + type Canonical = Self; + + fn starlark_type_repr() -> Ty { + Ty::int() + } +} + +#[derive(Eq, PartialEq, Copy, Clone, Dupe, Debug, derive_more::Display)] +#[doc(hidden)] +pub enum StarlarkIntRef<'v> { Small(InlineInt), Big(&'v StarlarkBigInt), } @@ -87,7 +104,7 @@ impl FromStr for StarlarkInt { } impl StarlarkInt { - pub(crate) fn from_str_radix(s: &str, base: u32) -> anyhow::Result { + pub(crate) fn from_str_radix(s: &str, base: u32) -> crate::Result { Ok(StarlarkInt::from(TokenInt::from_str_radix(s, base)?)) } @@ -127,6 +144,17 @@ impl StarlarkInt { } impl<'v> StarlarkIntRef<'v> { + #[inline] + pub(crate) fn unpack(value: Value<'v>) -> Option> { + if let Some(int) = value.unpack_inline_int() { + Some(StarlarkIntRef::Small(int)) + } else if let Some(int) = value.downcast_ref() { + Some(StarlarkIntRef::Big(int)) + } else { + None + } + } + pub(crate) fn to_owned(self) -> StarlarkInt { match self { StarlarkIntRef::Small(i) => StarlarkInt::Small(i), @@ -174,7 +202,8 @@ impl<'v> StarlarkIntRef<'v> { let offset = if sig < 0 && a % b != 0 { 1 } else { 0 }; match a.checked_div(b) { Some(div) => Ok(StarlarkInt::Small( - div.checked_sub_i32(offset).context("unreachable")?, + div.checked_sub_i32(offset) + .ok_or_else(|| anyhow::anyhow!("unreachable"))?, )), None => Self::floor_div_big_big(&a.to_bigint(), &b.to_bigint()), } @@ -247,7 +276,7 @@ impl<'v> StarlarkIntRef<'v> { .into()); } // In Rust `i32::min_value() % -1` is overflow, but we should eval it to zero. - if a == i32::min_value() && b == -1 { + if a == i32::MIN && b == -1 { return Ok(InlineInt::ZERO); } let r = a % b; @@ -255,7 +284,8 @@ impl<'v> StarlarkIntRef<'v> { Ok(InlineInt::ZERO) } else { Ok(if b.signum() != r.signum() { - r.checked_add(b).context("unreachable")? + r.checked_add(b) + .ok_or_else(|| anyhow::anyhow!("unreachable"))? } else { r }) @@ -376,43 +406,19 @@ impl<'v> StarlarkIntRef<'v> { } } -impl StarlarkTypeRepr for StarlarkInt { - fn starlark_type_repr() -> Ty { - StarlarkBigInt::starlark_type_repr() - } -} - -impl<'v> AllocValue<'v> for StarlarkInt { - fn alloc_value(self, heap: &'v Heap) -> Value<'v> { - match self { - StarlarkInt::Small(i) => heap.alloc(i), - StarlarkInt::Big(i) => heap.alloc(i), - } - } -} - -impl AllocFrozenValue for StarlarkInt { - fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { - match self { - StarlarkInt::Small(i) => heap.alloc(i), - StarlarkInt::Big(i) => heap.alloc(i), - } - } -} - impl<'v> StarlarkTypeRepr for StarlarkIntRef<'v> { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { StarlarkInt::starlark_type_repr() } } impl<'v> UnpackValue<'v> for StarlarkIntRef<'v> { - fn unpack_value(value: Value<'v>) -> Option { - if let Some(i) = InlineInt::unpack_value(value) { - Some(StarlarkIntRef::Small(i)) - } else { - value.downcast_ref().map(StarlarkIntRef::Big) - } + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(StarlarkIntRef::unpack(value)) } } @@ -657,7 +663,7 @@ impl<'v> PartialOrd> for i32 { mod tests { use std::str::FromStr; - use crate::values::types::int_or_big::StarlarkInt; + use crate::values::types::int::int_or_big::StarlarkInt; fn int(s: &str) -> StarlarkInt { StarlarkInt::from_str(s).unwrap() diff --git a/starlark-rust/starlark/src/values/types/int/pointer_i32.rs b/starlark-rust/starlark/src/values/types/int/pointer_i32.rs new file mode 100644 index 0000000000000..fa21a05e3cd8d --- /dev/null +++ b/starlark-rust/starlark/src/values/types/int/pointer_i32.rs @@ -0,0 +1,280 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cmp::Ordering; +use std::fmt; +use std::fmt::Debug; +use std::fmt::Display; +use std::hash::Hasher; +use std::mem; +use std::ptr; + +use allocative::Allocative; +use num_bigint::BigInt; +use serde::Serialize; +use serde::Serializer; +use starlark_derive::starlark_value; +use starlark_derive::ProvidesStaticType; +use starlark_map::StarlarkHashValue; +use starlark_map::StarlarkHasher; + +use crate as starlark; +use crate::any::AnyLifetime; +use crate::cast; +use crate::private::Private; +use crate::typing::Ty; +use crate::typing::TyBasic; +use crate::typing::TypingBinOp; +use crate::values::layout::avalue::AValueBasic; +use crate::values::layout::pointer::RawPointer; +use crate::values::layout::vtable::AValueDyn; +use crate::values::layout::vtable::AValueVTable; +use crate::values::layout::vtable::StarlarkValueRawPtr; +use crate::values::types::bigint::StarlarkBigInt; +use crate::values::types::int::inline_int::InlineInt; +use crate::values::types::int::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkIntRef; +use crate::values::types::num::typecheck::typecheck_num_bin_op; +use crate::values::types::num::typecheck::NumTy; +use crate::values::types::num::value::NumRef; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::Value; +use crate::values::ValueError; + +/// The result of calling `type()` on integers. +pub const INT_TYPE: &str = "int"; + +// WARNING: This type isn't a real type, a pointer to this is secretly an i32. +// Therefore, don't derive stuff on it, since it will be wrong. +// However, `ProvidesStaticType` promises not to peek at its value, so that's fine. +#[derive(ProvidesStaticType, Allocative)] +#[repr(C)] +pub(crate) struct PointerI32 { + _private: (), +} + +impl PartialEq for PointerI32 { + fn eq(&self, other: &Self) -> bool { + ptr::eq(self, other) + } +} + +impl Eq for PointerI32 {} + +impl Debug for PointerI32 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.get(), f) + } +} + +impl PointerI32 { + const _ASSERTIONS: () = { + assert!(mem::align_of::() == 1); + }; + + #[inline] + pub(crate) unsafe fn from_raw_pointer_unchecked( + raw_pointer: RawPointer, + ) -> &'static PointerI32 { + debug_assert!(raw_pointer.is_int()); + // UB if the pointer isn't aligned, or it is zero. + // Alignment is 1, so that's not an issue. + // And the pointer is not zero because it has `TAG_INT` bit set. + cast::usize_to_ptr(raw_pointer.ptr_value()) + } + + #[inline] + pub(crate) fn get(&self) -> InlineInt { + unsafe { RawPointer::new_unchecked(self as *const Self as usize).unpack_int_unchecked() } + } + + #[inline] + pub(crate) fn as_avalue_dyn(&'static self) -> AValueDyn<'static> { + unsafe { AValueDyn::new(StarlarkValueRawPtr::new_pointer_i32(self), Self::vtable()) } + } + + #[inline] + pub(crate) fn vtable() -> &'static AValueVTable { + AValueVTable::new::>() + } + + /// This operation is expensive, use only if you have to. + fn to_bigint(&self) -> BigInt { + self.get().to_bigint() + } + + pub(crate) fn type_is_pointer_i32<'v, T: StarlarkValue<'v>>() -> bool { + T::static_type_id() == PointerI32::static_type_id() + } +} + +impl Display for PointerI32 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.get()) + } +} + +/// Define the int type +#[starlark_value(type = INT_TYPE)] +impl<'v> StarlarkValue<'v> for PointerI32 { + type Canonical = StarlarkBigInt; + + fn is_special(_: Private) -> bool + where + Self: Sized, + { + true + } + + fn equals(&self, other: Value) -> crate::Result { + Ok(Some(NumRef::Int(StarlarkIntRef::Small(self.get()))) == other.unpack_num()) + } + + fn to_bool(&self) -> bool { + self.get().to_i32() != 0 + } + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { + hasher.write_u64(NumRef::Int(StarlarkIntRef::Small(self.get())).get_hash_64()); + Ok(()) + } + + fn get_hash(&self, _private: Private) -> crate::Result { + Ok(NumRef::Int(StarlarkIntRef::Small(self.get())).get_hash()) + } + + fn plus(&self, _heap: &'v Heap) -> crate::Result> { + Ok(Value::new_int(self.get())) + } + fn minus(&self, heap: &'v Heap) -> crate::Result> { + Ok(heap.alloc(-StarlarkIntRef::Small(self.get()))) + } + fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + Some(Ok(heap.alloc( + NumRef::Int(StarlarkIntRef::Small(self.get())) + other.unpack_num()?, + ))) + } + fn sub(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(other) => Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())) - other)), + None => ValueError::unsupported_with(self, "-", other), + } + } + fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + Some(Ok(heap.alloc( + NumRef::Int(StarlarkIntRef::Small(self.get())) * other.unpack_num()?, + ))) + } + fn div(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(other) => { + Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())).div(other)?)) + } + None => ValueError::unsupported_with(self, "/", other), + } + } + fn percent(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(other) => { + Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())).percent(other)?)) + } + None => ValueError::unsupported_with(self, "%", other), + } + } + fn floor_div(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + match other.unpack_num() { + Some(other) => { + Ok(heap.alloc(NumRef::Int(StarlarkIntRef::Small(self.get())).floor_div(other)?)) + } + None => ValueError::unsupported_with(self, "//", other), + } + } + + fn compare(&self, other: Value) -> crate::Result { + match other.unpack_num() { + None => ValueError::unsupported_with(self, "compare", other), + Some(other) => Ok(NumRef::Int(StarlarkIntRef::Small(self.get())).cmp(&other)), + } + } + + fn bit_and(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match StarlarkIntRef::unpack(other) { + None => ValueError::unsupported_with(self, "&", other), + Some(StarlarkIntRef::Small(i)) => Ok(Value::new_int(self.get() & i)), + Some(StarlarkIntRef::Big(b)) => { + Ok(heap.alloc(StarlarkInt::from(&self.to_bigint() & b.get()))) + } + } + } + + fn bit_or(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match StarlarkIntRef::unpack(other) { + None => ValueError::unsupported_with(self, "|", other), + Some(StarlarkIntRef::Small(i)) => Ok(Value::new_int(self.get() | i)), + Some(StarlarkIntRef::Big(b)) => { + Ok(heap.alloc(StarlarkInt::from(&self.to_bigint() | b.get()))) + } + } + } + + fn bit_xor(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match StarlarkIntRef::unpack(other) { + None => ValueError::unsupported_with(self, "^", other), + Some(StarlarkIntRef::Small(i)) => Ok(Value::new_int(self.get() ^ i)), + Some(StarlarkIntRef::Big(b)) => { + Ok(heap.alloc(StarlarkInt::from(&self.to_bigint() ^ b.get()))) + } + } + } + + fn bit_not(&self, _heap: &'v Heap) -> crate::Result> { + Ok(Value::new_int(!self.get())) + } + + fn left_shift(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match StarlarkIntRef::unpack(other) { + None => ValueError::unsupported_with(self, "<<", other), + Some(other) => Ok(heap.alloc(StarlarkIntRef::Small(self.get()).left_shift(other)?)), + } + } + + fn right_shift(&self, other: Value, heap: &'v Heap) -> crate::Result> { + match StarlarkIntRef::unpack(other) { + None => ValueError::unsupported_with(self, ">>", other), + Some(other) => Ok(heap.alloc(StarlarkIntRef::Small(self.get()).right_shift(other)?)), + } + } + + fn bin_op_ty(op: TypingBinOp, rhs: &TyBasic) -> Option { + // This is dead code, because canonical int type is `StarlarkBigInt`, + // but keep for consistency. + typecheck_num_bin_op(NumTy::Int, op, rhs) + } + + fn typechecker_ty(&self) -> Option { + Some(Ty::int()) + } +} + +impl Serialize for PointerI32 { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.get().serialize(serializer) + } +} diff --git a/starlark-rust/starlark/src/values/types/int/tests.rs b/starlark-rust/starlark/src/values/types/int/tests.rs new file mode 100644 index 0000000000000..b4d8ba7958311 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/int/tests.rs @@ -0,0 +1,73 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![cfg(test)] + +use crate::assert; +use crate::values::int::pointer_i32::PointerI32; +use crate::values::types::int::inline_int::InlineInt; +use crate::values::FrozenValue; +use crate::values::Value; + +#[test] +fn test_arithmetic_operators() { + assert::all_true( + r#" ++1 == 1 +-1 == 0 - 1 +1 + 2 == 3 +1 + 2.0 == 3.0 +1 - 2 == -1 +1 - 2.0 == -1.0 +2 * 3 == 6 +2 * 3.0 == 6.0 +4 / 2 == 2.0 +5 % 3 == 2 +4 // 2 == 2 +"#, + ); +} + +#[test] +fn test_minus() { + // `-i32::MIN` should overflow to `StarlarkBigInt`. + assert::eq("2147483648", "-(-2147483647 - 1)") +} + +#[test] +fn test_int_tag() { + fn check(x: InlineInt) { + assert_eq!(x, FrozenValue::new_int(x).unpack_inline_int().unwrap()); + } + + for x in -10..10 { + check(InlineInt::try_from(x).ok().unwrap()) + } + check(InlineInt::MAX); + check(InlineInt::MIN); +} + +#[test] +fn test_alignment_int_pointer() { + assert_eq!(1, std::mem::align_of::()); +} + +#[test] +fn test_as_avalue_dyn() { + // `get_type` calls `as_avalue_dyn` internally. + assert_eq!("int", Value::new_int(InlineInt::MINUS_ONE).get_type()); +} diff --git a/starlark-rust/starlark/src/values/types/known_methods.rs b/starlark-rust/starlark/src/values/types/known_methods.rs index 69b92dc9fc594..d3ee2e0a06ab2 100644 --- a/starlark-rust/starlark/src/values/types/known_methods.rs +++ b/starlark-rust/starlark/src/values/types/known_methods.rs @@ -26,7 +26,8 @@ use crate::values::dict::value::dict_methods; use crate::values::function::NativeMeth; use crate::values::function::NativeMethod; use crate::values::list::value::list_methods; -use crate::values::string::str_methods; +use crate::values::set::value::set_methods; +use crate::values::string::str_type::str_methods; use crate::values::FrozenRef; use crate::values::FrozenValueTyped; use crate::values::Value; @@ -53,8 +54,8 @@ impl KnownMethod { &self, this: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { self.imp.invoke(eval, this, args) } } @@ -93,6 +94,7 @@ impl KnownMethods { // We don't need to add all the methods, only the most common ones. This is fine. add_methods(&mut methods, list_methods()); add_methods(&mut methods, dict_methods()); + add_methods(&mut methods, set_methods()); add_methods(&mut methods, str_methods()); KnownMethods { methods } diff --git a/starlark-rust/starlark/src/values/types/list.rs b/starlark-rust/starlark/src/values/types/list.rs new file mode 100644 index 0000000000000..86eccb22ab21c --- /dev/null +++ b/starlark-rust/starlark/src/values/types/list.rs @@ -0,0 +1,31 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The list type, a mutable sequence of values. + +pub(crate) mod alloc; +pub(crate) mod globals; +mod list_type; +pub(crate) mod methods; +mod refs; +pub(crate) mod unpack; +pub(crate) mod value; + +pub use crate::values::types::list::alloc::AllocList; +pub use crate::values::types::list::list_type::ListType; +pub use crate::values::types::list::refs::ListRef; +pub use crate::values::types::list::unpack::UnpackList; diff --git a/starlark-rust/starlark/src/values/types/list/alloc.rs b/starlark-rust/starlark/src/values/types/list/alloc.rs index 5fa32cb98e344..37a42a7f17eb8 100644 --- a/starlark-rust/starlark/src/values/types/list/alloc.rs +++ b/starlark-rust/starlark/src/values/types/list/alloc.rs @@ -51,6 +51,8 @@ where L: IntoIterator, L::Item: StarlarkTypeRepr, { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Vec::::starlark_type_repr() } diff --git a/starlark-rust/starlark/src/values/types/list/globals.rs b/starlark-rust/starlark/src/values/types/list/globals.rs new file mode 100644 index 0000000000000..fd36164f9b0c2 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/list/globals.rs @@ -0,0 +1,126 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use allocative::Allocative; +use dupe::Dupe; +use once_cell::sync::Lazy; +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::codemap::Span; +use crate::codemap::Spanned; +use crate::environment::GlobalsBuilder; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; +use crate::typing::error::TypingOrInternalError; +use crate::typing::function::TyCustomFunctionImpl; +use crate::typing::ParamSpec; +use crate::typing::Ty; +use crate::typing::TyFunction; +use crate::typing::TypingOracleCtx; +use crate::values::function::SpecialBuiltinFunction; +use crate::values::list::value::FrozenList; +use crate::values::list::AllocList; +use crate::values::list::ListRef; +use crate::values::typing::StarlarkIter; +use crate::values::Heap; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +#[derive(Allocative, Hash, Eq, PartialEq, Ord, PartialOrd, Clone, Debug)] +struct ListType; + +static LIST: Lazy = Lazy::new(|| { + TyFunction::new_with_type_attr( + ParamSpec::pos_only([], [Ty::iter(Ty::any())]), + Ty::any_list(), + Ty::any_list(), + ) +}); + +impl TyCustomFunctionImpl for ListType { + fn is_type(&self) -> bool { + true + } + + fn as_callable(&self) -> TyCallable { + LIST.callable.dupe() + } + + fn validate_call( + &self, + span: Span, + args: &TyCallArgs, + oracle: TypingOracleCtx, + ) -> Result { + oracle.validate_fn_call(span, &LIST.callable, args)?; + + if let Some(arg) = args.pos.first() { + // This is infallible after the check above. + let item = oracle.iter_item(Spanned { + span, + node: &arg.node, + })?; + return Ok(Ty::list(item)); + } + + Ok(Ty::any_list()) + } +} + +#[starlark_module] +pub(crate) fn register_list(globals: &mut GlobalsBuilder) { + /// [list]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list + /// ): construct a list. + /// + /// `list(x)` returns a new list containing the elements of the + /// iterable sequence x. + /// + /// With no argument, `list()` returns a new empty list. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// list() == [] + /// list((1,2,3)) == [1, 2, 3] + /// # "#); + /// # starlark::assert::fail(r#" + /// list("strings are not iterable") # error: not supported + /// # "#, r#"not supported on type"#); + /// ``` + #[starlark( + as_type = FrozenList, + speculative_exec_safe, + special_builtin_function = SpecialBuiltinFunction::List, + ty_custom_function = ListType, + )] + fn list<'v>( + #[starlark(require = pos)] a: Option>>>, + heap: &'v Heap, + ) -> starlark::Result>> { + Ok(ValueOfUnchecked::new(if let Some(a) = a { + if let Some(xs) = ListRef::from_value(a.get()) { + heap.alloc_list(xs.content()) + } else { + let it = a.get().iterate(heap)?; + heap.alloc(AllocList(it)) + } + } else { + heap.alloc(AllocList::EMPTY) + })) + } +} diff --git a/starlark-rust/starlark/src/values/types/list/list_type.rs b/starlark-rust/starlark/src/values/types/list/list_type.rs new file mode 100644 index 0000000000000..d98200a443b39 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/list/list_type.rs @@ -0,0 +1,53 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::typing::Ty; +use crate::values::list::UnpackList; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::UnpackAndDiscard; +use crate::values::UnpackValue; +use crate::values::Value; + +/// A list type marker. +/// +/// [`StarlarkTypeRepr`] provides `list[T]`. +/// [`UnpackValue`] implementation verifies the types of items. +pub struct ListType { + _item: std::marker::PhantomData, +} + +impl StarlarkTypeRepr for ListType { + type Canonical = ListType; + + fn starlark_type_repr() -> Ty { + Ty::list(T::starlark_type_repr()) + } +} + +impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for ListType { + type Error = >::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + match UnpackList::>::unpack_value_impl(value) { + Ok(Some(_)) => Ok(Some(ListType { + _item: std::marker::PhantomData, + })), + Ok(None) => Ok(None), + Err(e) => Err(e), + } + } +} diff --git a/starlark-rust/starlark/src/values/types/list/methods.rs b/starlark-rust/starlark/src/values/types/list/methods.rs new file mode 100644 index 0000000000000..b746dbb8803db --- /dev/null +++ b/starlark-rust/starlark/src/values/types/list/methods.rs @@ -0,0 +1,323 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Methods for the `list` type. + +use starlark_derive::starlark_module; +use starlark_syntax::convert_indices::convert_index; +use starlark_syntax::convert_indices::convert_indices; + +use crate as starlark; +use crate::environment::MethodsBuilder; +use crate::values::list::ListRef; +use crate::values::none::NoneOr; +use crate::values::none::NoneType; +use crate::values::types::list::value::ListData; +use crate::values::typing::StarlarkIter; +use crate::values::Heap; +use crate::values::Value; +use crate::values::ValueError; +use crate::values::ValueOfUnchecked; + +#[starlark_module] +pub(crate) fn list_methods(builder: &mut MethodsBuilder) { + /// [list.append]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list·append + /// ): append an element to a list. + /// + /// `L.append(x)` appends `x` to the list L, and returns `None`. + /// + /// `append` fails if the list is frozen or has active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = [] + /// x.append(1) + /// x.append(2) + /// x.append(3) + /// x == [1, 2, 3] + /// # "#); + /// ``` + fn append<'v>( + this: Value<'v>, + #[starlark(require = pos)] el: Value<'v>, + heap: &'v Heap, + ) -> anyhow::Result { + let this = ListData::from_value_mut(this)?; + this.push(el, heap); + Ok(NoneType) + } + + /// [list.clear]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list·clear + /// ): clear a list + /// + /// `L.clear()` removes all the elements of the list L and returns `None`. + /// It fails if the list is frozen or if there are active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = [1, 2, 3] + /// x.clear() + /// x == [] + /// # "#); + /// ``` + fn clear(this: Value) -> anyhow::Result { + let this = ListData::from_value_mut(this)?; + this.clear(); + Ok(NoneType) + } + + /// [list.extend]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list·extend + /// ): extend a list with another iterable's content. + /// + /// `L.extend(x)` appends the elements of `x`, which must be iterable, to + /// the list L, and returns `None`. + /// + /// `extend` fails if `x` is not iterable, or if the list L is frozen or has + /// active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = [] + /// x.extend([1, 2, 3]) + /// x.extend(["foo"]) + /// x == [1, 2, 3, "foo"] + /// # "#); + /// ``` + fn extend<'v>( + this: Value<'v>, + #[starlark(require = pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> starlark::Result { + let res = ListData::from_value_mut(this)?; + if this.ptr_eq(other.get()) { + // If the types alias, we can't borrow the `other` for iteration. + // But we can do something smarter to double the elements + res.double(heap); + } else { + let it = other.get().iterate(heap)?; + res.extend(it, heap); + } + Ok(NoneType) + } + + /// [list.index]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list·index + /// ): get the index of an element in the list. + /// + /// `L.index(x[, start[, end]])` finds `x` within the list L and returns its + /// index. + /// + /// The optional `start` and `end` parameters restrict the portion of + /// list L that is inspected. If provided and not `None`, they must be list + /// indices of type `int`. If an index is negative, `len(L)` is effectively + /// added to it, then if the index is outside the range `[0:len(L)]`, the + /// nearest value within that range is used; see [Indexing](#indexing). + /// + /// `index` fails if `x` is not found in L, or if `start` or `end` + /// is not a valid index (`int` or `None`). + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = ["b", "a", "n", "a", "n", "a"] + /// # ( + /// x.index("a") == 1 # bAnana + /// # and + /// x.index("a", 2) == 3 # banAna + /// # and + /// x.index("a", -2) == 5 # bananA + /// # )"#); + /// ``` + #[starlark(speculative_exec_safe)] + fn index<'v>( + this: &ListRef<'v>, + #[starlark(require = pos)] needle: Value<'v>, + #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, + #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, + ) -> starlark::Result { + let (start, end) = + convert_indices(this.len() as i32, start.into_option(), end.into_option()); + if let Some(haystack) = this.get(start..end) { + for (i, x) in haystack.iter().enumerate() { + if x.equals(needle)? { + return Ok((i + start) as i32); + } + } + } + Err(anyhow::anyhow!("Element '{}' not found in '{}'", needle, this).into()) + } + + /// [list.insert]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list·insert + /// ): insert an element in a list. + /// + /// `L.insert(i, x)` inserts the value `x` in the list L at index `i`, + /// moving higher-numbered elements along by one. It returns `None`. + /// + /// As usual, the index `i` must be an `int`. If its value is negative, + /// the length of the list is added, then its value is clamped to the + /// nearest value in the range `[0:len(L)]` to yield the effective index. + /// + /// `insert` fails if the list is frozen or has active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = ["b", "c", "e"] + /// x.insert(0, "a") + /// x.insert(-1, "d") + /// x == ["a", "b", "c", "d", "e"] + /// # "#); + /// ``` + fn insert<'v>( + this: Value<'v>, + #[starlark(require = pos)] index: i32, + #[starlark(require = pos)] el: Value<'v>, + heap: &'v Heap, + ) -> anyhow::Result { + let this = ListData::from_value_mut(this)?; + let index = convert_index(this.len() as i32, index); + this.insert(index, el, heap); + Ok(NoneType) + } + + /// [list.pop]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list·pop + /// ): removes and returns the last element of a list. + /// + /// `L.pop([index])` removes and returns the last element of the list L, or, + /// if the optional index is provided, at that index. + /// + /// `pop` fails if the index is negative or not less than the length of + /// the list, of if the list is frozen or has active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = [1, 2, 3] + /// # ( + /// x.pop() == 3 + /// # and + /// x.pop() == 2 + /// # and + /// x == [1] + /// # )"#); + /// ``` + fn pop<'v>( + this: Value<'v>, + #[starlark(require = pos)] index: Option, + ) -> anyhow::Result> { + let this = ListData::from_value_mut(this)?; + let index = index.unwrap_or_else(|| (this.len() as i32) - 1); + if index < 0 || index >= this.len() as i32 { + return Err(ValueError::IndexOutOfBound(index).into()); + } + Ok(this.remove(index as usize)) + } + + /// [list.remove]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#list·remove + /// ): remove a value from a list + /// + /// `L.remove(x)` removes the first occurrence of the value `x` from the + /// list L, and returns `None`. + /// + /// `remove` fails if the list does not contain `x`, is frozen, or has + /// active iterators. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = [1, 2, 3, 2] + /// x.remove(2) + /// # t = ( + /// x == [1, 3, 2] + /// # ) + /// x.remove(2) + /// # (t and ( + /// x == [1, 3] + /// # ))"#); + /// ``` + /// + /// A subsequent call to `x.remove(2)` would yield an error because the + /// element won't be found. + /// + /// ``` + /// # starlark::assert::fail(r#" + /// x = [1, 2, 3, 2] + /// x.remove(2) + /// x.remove(2) + /// x.remove(2) # error: not found + /// # "#, "not found"); + /// ``` + fn remove<'v>( + this: Value<'v>, + #[starlark(require = pos)] needle: Value<'v>, + ) -> anyhow::Result { + // Written in two separate blocks so we ensure we give up the + // immutable borrow before making the mutable borrow. + let position = { + let this = ListRef::from_value(this).unwrap(); + let position = this.iter().position(|v| v == needle); + match position { + Some(i) => i, + None => { + return Err(anyhow::anyhow!( + "Element '{}' not found in list '{}'", + needle, + this + )); + } + } + }; + { + // now mutate it with no further value calls + let this = ListData::from_value_mut(this)?; + this.remove(position); + Ok(NoneType) + } + } +} + +#[cfg(test)] +mod tests { + use crate::assert; + + #[test] + fn test_error_codes() { + assert::fail( + "x = [1, 2, 3, 2]; x.remove(2); x.remove(2); x.remove(2)", + "not found in list", + ); + } + + #[test] + fn test_index() { + // Should fail, but should not panic. + assert::fail("[True].index(True, 1, 0)", "not found"); + } + + #[test] + fn recursive_list() { + assert::is_true( + r#" +cyclic = [1, 2, 3] +cyclic[1] = cyclic +len(cyclic) == 3 and len(cyclic[1]) == 3 + "#, + ) + } +} diff --git a/starlark-rust/starlark/src/values/types/list/mod.rs b/starlark-rust/starlark/src/values/types/list/mod.rs deleted file mode 100644 index ab0c072c4c14b..0000000000000 --- a/starlark-rust/starlark/src/values/types/list/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! The list type, a mutable sequence of values. - -pub(crate) mod alloc; -mod of; -mod refs; -pub(crate) mod value; - -pub use crate::values::types::list::alloc::AllocList; -pub use crate::values::types::list::of::ListOf; -pub use crate::values::types::list::refs::ListRef; diff --git a/starlark-rust/starlark/src/values/types/list/of.rs b/starlark-rust/starlark/src/values/types/list/of.rs deleted file mode 100644 index 293bc9687f9e6..0000000000000 --- a/starlark-rust/starlark/src/values/types/list/of.rs +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::marker::PhantomData; -use std::ops::Deref; - -use starlark_derive::Trace; - -use crate as starlark; -use crate::typing::Ty; -use crate::values::list::ListRef; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::UnpackValue; -use crate::values::Value; - -/// Like `ValueOf`, but only validates item types; does not construct or store a -/// vec. Use `to_vec` to get a Vec. -#[derive(Debug, Trace)] -pub struct ListOf<'v, V: UnpackValue<'v>> { - value: Value<'v>, - phantom: PhantomData, -} - -impl<'v, V: UnpackValue<'v>> ListOf<'v, V> { - /// Collect the list elements into a `Vec`. - pub fn to_vec(&self) -> Vec { - ListRef::from_value(self.value) - .expect("already validated as a list") - .iter() - .map(|v| V::unpack_value(v).expect("already validated value")) - .collect() - } -} - -impl<'v, V: UnpackValue<'v>> StarlarkTypeRepr for ListOf<'v, V> { - fn starlark_type_repr() -> Ty { - Vec::::starlark_type_repr() - } -} - -impl<'v, V: UnpackValue<'v>> UnpackValue<'v> for ListOf<'v, V> { - fn expected() -> String { - format!("list of {}", V::expected()) - } - - fn unpack_value(value: Value<'v>) -> Option { - let list = ListRef::from_value(value)?; - if list.iter().all(|v| V::unpack_value(v).is_some()) { - Some(ListOf { - value, - phantom: PhantomData, - }) - } else { - None - } - } -} - -impl<'v, V: UnpackValue<'v>> Deref for ListOf<'v, V> { - type Target = Value<'v>; - - fn deref(&self) -> &Self::Target { - &self.value - } -} diff --git a/starlark-rust/starlark/src/values/types/list/refs.rs b/starlark-rust/starlark/src/values/types/list/refs.rs index 2140f6ab09cce..78fd0d86af58c 100644 --- a/starlark-rust/starlark/src/values/types/list/refs.rs +++ b/starlark-rust/starlark/src/values/types/list/refs.rs @@ -15,11 +15,16 @@ * limitations under the License. */ +use std::convert::Infallible; use std::fmt; use std::fmt::Display; +use std::iter; use std::ops::Deref; +use std::slice; + +use ref_cast::ref_cast_custom; +use ref_cast::RefCastCustom; -use crate as starlark; use crate::coerce::coerce; use crate::typing::Ty; use crate::values::list::value::display_list; @@ -27,7 +32,6 @@ use crate::values::list::value::FrozenListData; use crate::values::list::value::ListGen; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::types::list::value::ListData; -use crate::values::Coerce; use crate::values::FrozenValue; use crate::values::UnpackValue; use crate::values::Value; @@ -35,14 +39,14 @@ use crate::values::ValueLike; /// Reference to list content (mutable or frozen). #[repr(transparent)] -#[derive(Coerce)] +#[derive(RefCastCustom)] pub struct ListRef<'v> { pub(crate) content: [Value<'v>], } /// Reference to frozen list content. #[repr(transparent)] -#[derive(Coerce)] +#[derive(RefCastCustom)] pub struct FrozenListRef { pub(crate) content: [FrozenValue], } @@ -51,8 +55,12 @@ impl<'v> ListRef<'v> { /// `type([])`, which is `"list"`. pub const TYPE: &'static str = ListData::TYPE; - pub(crate) fn new<'a>(slice: &'a [Value<'v>]) -> &'a ListRef<'v> { - coerce(slice) + #[ref_cast_custom] + pub(crate) fn new<'a>(slice: &'a [Value<'v>]) -> &'a ListRef<'v>; + + /// Empty list reference. + pub fn empty() -> &'v ListRef<'v> { + ListRef::new(&[]) } /// List elements. @@ -61,7 +69,7 @@ impl<'v> ListRef<'v> { } /// Iterate over the elements in the list. - pub fn iter<'a>(&'a self) -> impl ExactSizeIterator> + 'a + pub fn iter<'a>(&'a self) -> iter::Copied>> where 'v: 'a, { @@ -90,9 +98,8 @@ impl FrozenListRef { /// `type([])`, which is `"list"`. pub const TYPE: &'static str = ListRef::TYPE; - fn new(slice: &[FrozenValue]) -> &FrozenListRef { - coerce(slice) - } + #[ref_cast_custom] + fn new(slice: &[FrozenValue]) -> &FrozenListRef; /// Downcast to the frozen list. /// @@ -140,33 +147,34 @@ impl Display for FrozenListRef { } impl<'v> StarlarkTypeRepr for &'v ListRef<'v> { + type Canonical = > as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Vec::>::starlark_type_repr() } } impl<'v> StarlarkTypeRepr for &'v FrozenListRef { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Vec::::starlark_type_repr() } } impl<'v> UnpackValue<'v> for &'v ListRef<'v> { - fn expected() -> String { - "list".to_owned() - } + type Error = Infallible; - fn unpack_value(value: Value<'v>) -> Option { - ListRef::from_value(value) + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(ListRef::from_value(value)) } } impl<'v> UnpackValue<'v> for &'v FrozenListRef { - fn expected() -> String { - "frozen list".to_owned() - } + type Error = crate::Error; - fn unpack_value(value: Value<'v>) -> Option { - FrozenListRef::from_value(value) + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { + // TODO(nga): error if not frozen. + Ok(FrozenListRef::from_value(value)) } } diff --git a/starlark-rust/starlark/src/values/types/list/unpack.rs b/starlark-rust/starlark/src/values/types/list/unpack.rs new file mode 100644 index 0000000000000..eb72b7a2961c1 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/list/unpack.rs @@ -0,0 +1,116 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::slice; +use std::vec; + +use crate::typing::Ty; +use crate::values::list::ListRef; +use crate::values::list::ListType; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::UnpackValue; +use crate::values::Value; + +/// Unpack a value of type `list` into a vec. +#[derive(Debug, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] +pub struct UnpackList { + /// Unpacked items. + pub items: Vec, +} + +impl Default for UnpackList { + fn default() -> Self { + UnpackList { items: Vec::new() } + } +} + +impl StarlarkTypeRepr for UnpackList { + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + ListType::::starlark_type_repr() + } +} + +impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for UnpackList { + type Error = >::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(list) = <&ListRef>::unpack_value_opt(value) else { + return Ok(None); + }; + // TODO(nga): should not allocate if the first element is of the wrong type. + let mut items = Vec::with_capacity(list.len()); + for v in list.iter() { + let Some(v) = T::unpack_value_impl(v)? else { + return Ok(None); + }; + items.push(v); + } + Ok(Some(UnpackList { items })) + } +} + +impl IntoIterator for UnpackList { + type Item = T; + type IntoIter = vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.items.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a UnpackList { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.items.iter() + } +} + +impl<'a, T> IntoIterator for &'a mut UnpackList { + type Item = &'a mut T; + type IntoIter = slice::IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.items.iter_mut() + } +} + +#[cfg(test)] +mod tests { + use crate::values::list::UnpackList; + use crate::values::Heap; + use crate::values::UnpackValue; + + #[test] + fn test_unpack() { + let heap = Heap::new(); + let v = heap.alloc(vec!["a", "b"]); + assert_eq!( + vec!["a", "b"], + UnpackList::<&str>::unpack_value(v).unwrap().unwrap().items + ); + assert!(UnpackList::::unpack_value(v).unwrap().is_none()); + assert!( + UnpackList::<&str>::unpack_value(heap.alloc(1)) + .unwrap() + .is_none() + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/list/value.rs b/starlark-rust/starlark/src/values/types/list/value.rs index 2d29455f57576..450a8fdce2b44 100644 --- a/starlark-rust/starlark/src/values/types/list/value.rs +++ b/starlark-rust/starlark/src/values/types/list/value.rs @@ -19,6 +19,7 @@ use std::any::TypeId; use std::cell::Cell; use std::cmp; use std::cmp::Ordering; +use std::convert::Infallible; use std::fmt; use std::fmt::Debug; use std::fmt::Display; @@ -29,7 +30,6 @@ use allocative::Allocative; use display_container::fmt_container; use serde::Serialize; use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; use starlark_derive::Trace; use starlark_syntax::slice_vec_ext::SliceExt; use starlark_syntax::slice_vec_ext::VecExt; @@ -50,8 +50,8 @@ use crate::values::error::ValueError; use crate::values::index::apply_slice; use crate::values::index::convert_index; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueFrozenList; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Direct; use crate::values::layout::heap::repr::AValueRepr; use crate::values::list::ListRef; use crate::values::type_repr::StarlarkTypeRepr; @@ -67,16 +67,7 @@ use crate::values::Value; use crate::values::ValueLike; use crate::values::ValueTyped; -#[derive( - Clone, - Default, - Trace, - Debug, - ProvidesStaticType, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "standard")] +#[derive(Clone, Default, Trace, Debug, ProvidesStaticType, Allocative)] #[repr(transparent)] pub(crate) struct ListGen(pub(crate) T); @@ -110,8 +101,8 @@ pub(crate) type FrozenList = ListGen; pub(crate) type List<'v> = ListGen>; -pub(crate) static VALUE_EMPTY_FROZEN_LIST: AValueRepr>> = - alloc_static(Direct, unsafe { ListGen(FrozenListData::new(0)) }); +pub(crate) static VALUE_EMPTY_FROZEN_LIST: AValueRepr> = + alloc_static(unsafe { ListGen(FrozenListData::new(0)) }); impl ListGen { pub(crate) fn offset_of_content() -> usize { @@ -193,6 +184,21 @@ impl<'v> ListData<'v> { #[inline] pub(crate) fn extend>>(&self, iter: I, heap: &'v Heap) { + match self.try_extend(iter.into_iter().map(Ok), heap) { + Ok(()) => {} + Err(e) => { + let e: Infallible = e; + match e {} + } + } + } + + #[inline] + pub(crate) fn try_extend, E>>>( + &self, + iter: I, + heap: &'v Heap, + ) -> Result<(), E> { let iter = iter.into_iter(); let (lo, hi) = iter.size_hint(); match hi { @@ -200,21 +206,22 @@ impl<'v> ListData<'v> { // Exact size iterator. self.reserve_additional(lo, heap); // Extend will panic if upper bound is provided incorrectly. - self.content.get().extend(iter); + self.content.get().try_extend(iter)?; } Some(hi) if self.content.get().remaining_capacity() >= hi => { // Enough capacity for upper bound. // Extend will panic if upper bound is provided incorrectly. - self.content.get().extend(iter); + self.content.get().try_extend(iter)?; } _ => { // Default slow version. self.reserve_additional(iter.size_hint().0, heap); for item in iter { - self.push(item, heap); + self.push(item?, heap); } } } + Ok(()) } pub(crate) fn push(&self, value: Value<'v>, heap: &'v Heap) { @@ -252,6 +259,8 @@ impl<'a, V: 'a> StarlarkTypeRepr for &'a [V] where &'a V: StarlarkTypeRepr, { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Vec::<&'a V>::starlark_type_repr() } @@ -340,7 +349,7 @@ impl Display for FrozenListData { // This trait need to be `pub(crate)` because `ListGen` is. pub(crate) trait ListLike<'v>: Debug + Allocative { fn content(&self) -> &[Value<'v>]; - fn set_at(&self, i: usize, v: Value<'v>) -> anyhow::Result<()>; + fn set_at(&self, i: usize, v: Value<'v>) -> crate::Result<()>; // These functions are unsafe for the same reason // `StarlarkValue` iterator functions are unsafe. @@ -355,7 +364,7 @@ impl<'v> ListLike<'v> for ListData<'v> { self.content.get().as_ref().content() } - fn set_at(&self, i: usize, v: Value<'v>) -> anyhow::Result<()> { + fn set_at(&self, i: usize, v: Value<'v>) -> crate::Result<()> { self.check_can_mutate()?; self.content.get().set_at(i, v); Ok(()) @@ -384,8 +393,10 @@ impl<'v> ListLike<'v> for FrozenListData { coerce(self.content()) } - fn set_at(&self, _i: usize, _v: Value<'v>) -> anyhow::Result<()> { - Err(ValueError::CannotMutateImmutableValue.into()) + fn set_at(&self, _i: usize, _v: Value<'v>) -> crate::Result<()> { + Err(crate::Error::new_other( + ValueError::CannotMutateImmutableValue, + )) } unsafe fn iter_size_hint(&self, index: usize) -> (usize, Option) { @@ -417,7 +428,7 @@ pub(crate) fn display_list(xs: &[Value], f: &mut fmt::Formatter<'_>) -> fmt::Res pub(crate) fn list_methods() -> Option<&'static Methods> { static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(crate::stdlib::list::list_methods) + RES.methods(crate::values::types::list::methods::list_methods) } #[starlark_value(type = ListData::TYPE)] @@ -458,30 +469,30 @@ where !self.0.content().is_empty() } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> crate::Result { match ListRef::from_value(other) { None => Ok(false), Some(other) => equals_slice(self.0.content(), &other.content, |x, y| x.equals(*y)), } } - fn compare(&self, other: Value<'v>) -> anyhow::Result { + fn compare(&self, other: Value<'v>) -> crate::Result { match ListRef::from_value(other) { None => ValueError::unsupported_with(self, "cmp()", other), Some(other) => compare_slice(self.0.content(), &other.content, |x, y| x.compare(*y)), } } - fn at(&self, index: Value, _heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value, _heap: &'v Heap) -> crate::Result> { let i = convert_index(index, self.0.content().len() as i32)? as usize; Ok(self.0.content()[i]) } - fn length(&self) -> anyhow::Result { + fn length(&self) -> crate::Result { Ok(self.0.content().len() as i32) } - fn is_in(&self, other: Value<'v>) -> anyhow::Result { + fn is_in(&self, other: Value<'v>) -> crate::Result { for x in self.0.content().iter() { if x.equals(other)? { return Ok(true); @@ -496,13 +507,13 @@ where stop: Option, stride: Option, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> crate::Result> { let xs = self.0.content(); let res = apply_slice(xs, start, stop, stride)?; Ok(heap.alloc_list(&res)) } - unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> crate::Result> { Ok(self.0.new_iter(me)) } @@ -518,13 +529,17 @@ where self.0.iter_stop(); } - fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { ListRef::from_value(other) .map(|other| Ok(heap.alloc_list_concat(self.0.content(), other.content()))) } - fn mul(&self, other: Value, heap: &'v Heap) -> Option>> { - let l = i32::unpack_value(other)?; + fn mul(&self, other: Value, heap: &'v Heap) -> Option>> { + let l = match i32::unpack_value(other) { + Ok(Some(l)) => l, + Ok(None) => return None, + Err(e) => return Some(Err(e)), + }; let mut result = Vec::with_capacity(self.0.content().len() * cmp::max(0, l) as usize); for _ in 0..l { result.extend(self.0.content().iter()); @@ -532,11 +547,11 @@ where Some(Ok(heap.alloc_list(&result))) } - fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { + fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { self.mul(lhs, heap) } - fn set_at(&self, index: Value<'v>, alloc_value: Value<'v>) -> anyhow::Result<()> { + fn set_at(&self, index: Value<'v>, alloc_value: Value<'v>) -> crate::Result<()> { let i = convert_index(index, self.0.content().len() as i32)? as usize; self.0.set_at(i, alloc_value) } diff --git a/starlark-rust/starlark/src/values/types/list_or_tuple.rs b/starlark-rust/starlark/src/values/types/list_or_tuple.rs new file mode 100644 index 0000000000000..693f4ce8ce90f --- /dev/null +++ b/starlark-rust/starlark/src/values/types/list_or_tuple.rs @@ -0,0 +1,137 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Utility for unpacking a value of type `list[T]` or `tuple[T, ...]` into a vec. + +use std::slice; +use std::vec; + +use either::Either; + +use crate::typing::Ty; +use crate::values::list::UnpackList; +use crate::values::tuple::UnpackTuple; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::UnpackValue; +use crate::values::Value; + +/// Unpack a value of type `list[T]` or `tuple[T, ...]` into a vec. +#[derive(Debug, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] +pub struct UnpackListOrTuple { + /// Unpacked items of the list or tuple. + pub items: Vec, +} + +impl Default for UnpackListOrTuple { + fn default() -> Self { + UnpackListOrTuple { items: Vec::new() } + } +} + +impl StarlarkTypeRepr for UnpackListOrTuple { + type Canonical = , UnpackTuple> as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + Either::, UnpackTuple>::starlark_type_repr() + } +} + +impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for UnpackListOrTuple { + type Error = >::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + match Either::, UnpackTuple>::unpack_value_impl(value) + .map_err(|e| e.into_inner())? + { + Some(Either::Left(l)) => Ok(Some(UnpackListOrTuple { items: l.items })), + Some(Either::Right(r)) => Ok(Some(UnpackListOrTuple { items: r.items })), + None => Ok(None), + } + } +} + +impl IntoIterator for UnpackListOrTuple { + type Item = T; + type IntoIter = vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.items.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a UnpackListOrTuple { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.items.iter() + } +} + +impl<'a, T> IntoIterator for &'a mut UnpackListOrTuple { + type Item = &'a mut T; + type IntoIter = slice::IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.items.iter_mut() + } +} + +#[cfg(test)] +mod tests { + use crate::values::types::list_or_tuple::UnpackListOrTuple; + use crate::values::Heap; + use crate::values::UnpackValue; + + #[test] + fn test_unpack() { + let heap = Heap::new(); + let list = heap.alloc(vec!["a", "b"]); + let tuple = heap.alloc(("a", "b")); + let list_of_ints = heap.alloc(vec![1, 2]); + let tuple_of_ints = heap.alloc((1, 2)); + assert_eq!( + vec!["a", "b"], + UnpackListOrTuple::<&str>::unpack_value(list) + .unwrap() + .unwrap() + .items + ); + assert_eq!( + vec!["a", "b"], + UnpackListOrTuple::<&str>::unpack_value(tuple) + .unwrap() + .unwrap() + .items + ); + assert!( + UnpackListOrTuple::<&str>::unpack_value(list_of_ints) + .unwrap() + .is_none() + ); + assert!( + UnpackListOrTuple::<&str>::unpack_value(tuple_of_ints) + .unwrap() + .is_none() + ); + assert!( + UnpackListOrTuple::<&str>::unpack_value(heap.alloc(1)) + .unwrap() + .is_none() + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/mod.rs b/starlark-rust/starlark/src/values/types/mod.rs deleted file mode 100644 index 8417a6ffc5f47..0000000000000 --- a/starlark-rust/starlark/src/values/types/mod.rs +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -pub mod any; -pub mod any_array; -pub mod array; -pub mod bigint; -pub mod bool; -pub mod dict; -pub(crate) mod ellipsis; -pub mod enumeration; -pub mod exported_name; -pub mod float; -pub mod function; -pub(crate) mod inline_int; -pub mod int; -pub(crate) mod int_or_big; -pub(crate) mod known_methods; -pub mod list; -pub mod none; -pub mod range; -pub mod record; -pub mod regex; -pub mod starlark_value_as_type; -pub mod string; -pub mod structs; -pub mod tuple; -pub(crate) mod type_instance_id; -pub(crate) mod unbound; diff --git a/starlark-rust/starlark/src/values/types/namespace.rs b/starlark-rust/starlark/src/values/types/namespace.rs new file mode 100644 index 0000000000000..14f1e6cda06d7 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/namespace.rs @@ -0,0 +1,25 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Provides the `namespace` type and global + +pub(crate) mod globals; +mod typing; +mod value; + +pub use value::FrozenNamespace; +pub use value::Namespace; diff --git a/starlark-rust/starlark/src/values/types/namespace/globals.rs b/starlark-rust/starlark/src/values/types/namespace/globals.rs new file mode 100644 index 0000000000000..055fe9962e9fe --- /dev/null +++ b/starlark-rust/starlark/src/values/types/namespace/globals.rs @@ -0,0 +1,39 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::eval::Arguments; +use crate::values::namespace::typing::TyNamespaceFunction; +use crate::values::namespace::FrozenNamespace; +use crate::values::namespace::Namespace; +use crate::values::Heap; + +#[starlark_module] +pub fn register_namespace(builder: &mut GlobalsBuilder) { + #[starlark( + ty_custom_function = TyNamespaceFunction, + as_type = FrozenNamespace, + )] + fn namespace<'v>(args: &Arguments<'v, '_>, heap: &'v Heap) -> starlark::Result> { + args.no_positional_args(heap)?; + + Ok(Namespace::new(args.names_map()?)) + } +} diff --git a/starlark-rust/starlark/src/values/types/namespace/typing.rs b/starlark-rust/starlark/src/values/types/namespace/typing.rs new file mode 100644 index 0000000000000..1e0585d8e5b56 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/namespace/typing.rs @@ -0,0 +1,135 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt; +use std::fmt::Display; +use std::fmt::Formatter; + +use allocative::Allocative; +use dupe::Dupe; +use starlark_map::sorted_map::SortedMap; + +use crate::codemap::Span; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; +use crate::typing::custom::TyCustomImpl; +use crate::typing::error::TypingNoContextError; +use crate::typing::error::TypingOrInternalError; +use crate::typing::function::TyCustomFunctionImpl; +use crate::typing::oracle::ctx::TypingOracleCtx; +use crate::typing::ParamSpec; +use crate::typing::Ty; +use crate::util::arc_str::ArcStr; +use crate::values::starlark_type_id::StarlarkTypeId; +use crate::values::types::namespace::value::Namespace; +use crate::values::typing::type_compiled::alloc::TypeMatcherAlloc; +use crate::values::typing::type_compiled::matcher::TypeMatcher; +use crate::values::Value; + +#[derive( + Allocative, Clone, Copy, Dupe, Debug, Eq, PartialEq, Hash, Ord, PartialOrd +)] +pub(super) struct TyNamespaceFunction; + +impl TyCustomFunctionImpl for TyNamespaceFunction { + fn as_callable(&self) -> TyCallable { + // TODO(nga): this should be obtained from function signature from function definition. + TyCallable::new( + ParamSpec::kwargs(Ty::any()), + Ty::custom(TyNamespace { + fields: Default::default(), + extra: true, + }), + ) + } + + fn validate_call( + &self, + _span: Span, + args: &TyCallArgs, + oracle: TypingOracleCtx, + ) -> Result { + if let [pos, ..] = args.pos.as_slice() { + return Err(oracle.msg_error(pos.span, "Positional arguments not allowed")); + } + let mut fields = Vec::new(); + for named in &args.named { + let (name, ty) = &named.node; + fields.push((ArcStr::from(*name), ty.clone())); + } + let extra = args.kwargs.is_some(); + Ok(Ty::custom(TyNamespace { + fields: SortedMap::from_iter(fields), + extra, + })) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Allocative)] +pub(super) struct TyNamespace { + pub(super) fields: SortedMap, + /// [`true`] if there might be additional fields not captured above, + /// [`false`] if this struct has no extra members. + pub(super) extra: bool, +} + +impl TyCustomImpl for TyNamespace { + fn as_name(&self) -> Option<&str> { + Some("namespace") + } + + fn attribute(&self, attr: &str) -> Result { + match self.fields.get(attr) { + Some(ty) => Ok(ty.dupe()), + None => { + if self.extra { + Ok(Ty::any()) + } else { + Err(TypingNoContextError) + } + } + } + } + + fn matcher(&self, factory: T) -> T::Result { + #[derive(Allocative, Eq, PartialEq, Hash, Debug, Clone, Copy, Dupe)] + struct NamespaceMatcher; + + impl TypeMatcher for NamespaceMatcher { + fn matches(&self, value: Value) -> bool { + value.starlark_type_id() == StarlarkTypeId::of::>() + } + } + + factory.alloc(NamespaceMatcher) + } +} + +impl Display for TyNamespace { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let TyNamespace { fields, extra } = self; + display_container::fmt_container( + f, + "namespace(", + ")", + display_container::iter_display_chain( + fields.iter().map(|(k, v)| format!("{} = {}", k, v)), + extra.then_some(".."), + ), + ) + } +} diff --git a/starlark-rust/starlark/src/values/types/namespace/value.rs b/starlark-rust/starlark/src/values/types/namespace/value.rs new file mode 100644 index 0000000000000..06aa14d027f85 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/namespace/value.rs @@ -0,0 +1,177 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt; +use std::fmt::Display; + +use allocative::Allocative; +use display_container::fmt_keyed_container; +use serde::Serialize; +use starlark_derive::starlark_value; +use starlark_derive::Freeze; +use starlark_derive::Trace; +use starlark_map::small_map::SmallMap; +use starlark_map::Hashed; + +use crate as starlark; +use crate::any::ProvidesStaticType; +use crate::coerce::Coerce; +use crate::docs::DocItem; +use crate::docs::DocModule; +use crate::starlark_complex_value; +use crate::typing::Ty; +use crate::util::arc_str::ArcStr; +use crate::values::types::namespace::typing::TyNamespace; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::StringValueLike; +use crate::values::Value; +use crate::values::ValueLike; + +/// The return value of `namespace()` +#[derive(Clone, Debug, Trace, Freeze, ProvidesStaticType, Allocative)] +#[repr(C)] +pub struct NamespaceGen<'v, V: ValueLike<'v>> { + fields: SmallMap, +} + +impl<'v, V: ValueLike<'v>> NamespaceGen<'v, V> { + pub fn new(fields: SmallMap) -> Self { + Self { fields } + } + + pub fn get(&self, key: &str) -> Option { + self.fields.get_hashed(Hashed::new(key)).copied() + } +} + +unsafe impl<'v> Coerce>> for NamespaceGen<'static, FrozenValue> {} + +starlark_complex_value!(pub Namespace<'v>); + +impl<'v, V: ValueLike<'v>> Display for NamespaceGen<'v, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt_keyed_container( + f, + "namespace(", + ")", + "=", + self.fields.iter().map(|(k, v)| (k.as_str(), v)), + ) + } +} + +#[starlark_value(type = "namespace")] +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for NamespaceGen<'v, V> +where + Self: ProvidesStaticType<'v>, +{ + fn collect_repr_cycle(&self, collector: &mut String) { + collector.push_str("namespace(...)"); + } + + fn get_attr(&self, attribute: &str, heap: &'v Heap) -> Option> { + self.get_attr_hashed(Hashed::new(attribute), heap) + } + + fn get_attr_hashed(&self, attribute: Hashed<&str>, _heap: &'v Heap) -> Option> { + self.fields.get_hashed(attribute).map(|v| v.to_value()) + } + + fn dir_attr(&self) -> Vec { + self.fields.keys().map(|x| x.as_str().to_owned()).collect() + } + + fn documentation(&self) -> DocItem { + DocItem::Module(DocModule { + docs: None, + members: self + .fields + .iter() + .map(|(k, v)| (k.as_str().to_owned(), v.to_value().documentation())) + .collect(), + }) + } + + fn get_type_starlark_repr() -> Ty { + Ty::custom(TyNamespace { + fields: Default::default(), + extra: true, + }) + } + + fn typechecker_ty(&self) -> Option { + Some(Ty::custom(TyNamespace { + fields: self + .fields + .iter() + .map(|(name, value)| (ArcStr::from(name.as_str()), Ty::of_value(value.to_value()))) + .collect(), + extra: false, + })) + } +} + +impl<'v, V: ValueLike<'v>> Serialize for NamespaceGen<'v, V> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.collect_map(self.fields.iter()) + } +} + +#[cfg(test)] +mod tests { + + use crate::assert; + + #[test] + fn test_repr() { + assert::eq("repr(namespace(a=1, b=[]))", "'namespace(a=1, b=[])'"); + assert::eq("str(namespace(a=1, b=[]))", "'namespace(a=1, b=[])'"); + } + + #[test] + fn test_repr_cycle() { + assert::eq( + "l = []; s = namespace(f=l); l.append(s); repr(s)", + "'namespace(f=[namespace(...)])'", + ); + assert::eq( + "l = []; s = namespace(f=l); l.append(s); str(s)", + "'namespace(f=[namespace(...)])'", + ); + } + + #[test] + fn test_to_json_cycle() { + assert::fail( + "l = []; s = namespace(f=l); l.append(s); json.encode(s)", + "Cycle detected when serializing value of type `namespace` to JSON", + ); + } + + #[test] + fn test_kwargs() { + assert::eq( + "d = {'b': 2}; s = namespace(a=1, **d); str(s)", + "'namespace(a=1, b=2)'", + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/none.rs b/starlark-rust/starlark/src/values/types/none.rs new file mode 100644 index 0000000000000..20cb199d5adc2 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/none.rs @@ -0,0 +1,25 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The `None` type. + +pub(crate) mod globals; +mod none_or; +pub(crate) mod none_type; + +pub use none_or::NoneOr; +pub use none_type::NoneType; diff --git a/starlark-rust/starlark/src/values/types/none/globals.rs b/starlark-rust/starlark/src/values/types/none/globals.rs new file mode 100644 index 0000000000000..704528ec0af27 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/none/globals.rs @@ -0,0 +1,29 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::none::NoneType; + +#[starlark_module] +pub(crate) fn register_none(globals: &mut GlobalsBuilder) { + /// The `None` value, used to represent nothing. + /// Implicitly returned from functions that don't have an explicit return. + const None: NoneType = NoneType; +} diff --git a/starlark-rust/starlark/src/values/types/none/mod.rs b/starlark-rust/starlark/src/values/types/none/mod.rs deleted file mode 100644 index 7c1ce8ec3f466..0000000000000 --- a/starlark-rust/starlark/src/values/types/none/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! The `None` type. - -mod none_or; -pub(crate) mod none_type; - -pub use none_or::NoneOr; -pub use none_type::NoneType; diff --git a/starlark-rust/starlark/src/values/types/none/none_or.rs b/starlark-rust/starlark/src/values/types/none/none_or.rs index a22ed2d78ef90..0d7e2b8c18aa8 100644 --- a/starlark-rust/starlark/src/values/types/none/none_or.rs +++ b/starlark-rust/starlark/src/values/types/none/none_or.rs @@ -15,6 +15,7 @@ * limitations under the License. */ +use allocative::Allocative; use dupe::Dupe; use either::Either; @@ -32,7 +33,7 @@ use crate::values::Value; /// Equivalent of a Rust [`Option`], where `None` /// is encoded as [`NoneType`](crate::values::none::NoneType). /// Useful for its [`UnpackValue`] instance. -#[derive(Debug, Eq, PartialEq, Copy, Clone, Dupe)] +#[derive(Debug, Eq, PartialEq, Copy, Clone, Dupe, Allocative)] pub enum NoneOr { /// Starlark `None`. None, @@ -50,6 +51,15 @@ impl NoneOr { } } + /// Convert a Rust [`Option`] to a [`NoneOr`]. + #[inline] + pub fn from_option(option: Option) -> Self { + match option { + None => NoneOr::None, + Some(x) => NoneOr::Other(x), + } + } + /// Is the value a [`NoneOr::None`]. pub fn is_none(&self) -> bool { matches!(self, NoneOr::None) @@ -57,21 +67,21 @@ impl NoneOr { } impl StarlarkTypeRepr for NoneOr { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Either::::starlark_type_repr() } } impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for NoneOr { - fn expected() -> String { - format!("None or {}", T::expected()) - } + type Error = >::Error; - fn unpack_value(value: Value<'v>) -> Option { + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { if value.is_none() { - Some(NoneOr::None) + Ok(Some(NoneOr::None)) } else { - T::unpack_value(value).map(NoneOr::Other) + Ok(T::unpack_value_impl(value)?.map(NoneOr::Other)) } } } diff --git a/starlark-rust/starlark/src/values/types/none/none_type.rs b/starlark-rust/starlark/src/values/types/none/none_type.rs index d55fb85e87f23..073d247d0d031 100644 --- a/starlark-rust/starlark/src/values/types/none/none_type.rs +++ b/starlark-rust/starlark/src/values/types/none/none_type.rs @@ -15,6 +15,7 @@ * limitations under the License. */ +use std::convert::Infallible; use std::hash::Hasher; use allocative::Allocative; @@ -23,7 +24,6 @@ use dupe::Dupe; use serde::Serialize; use serde::Serializer; use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; use crate as starlark; use crate::any::ProvidesStaticType; @@ -32,8 +32,8 @@ use crate::collections::StarlarkHasher; use crate::private::Private; use crate::typing::Ty; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::AllocFrozenValue; use crate::values::AllocValue; @@ -45,17 +45,8 @@ use crate::values::UnpackValue; use crate::values::Value; /// Define the None type, use [`NoneType`] in Rust. -#[derive( - Debug, - Clone, - Dupe, - ProvidesStaticType, - Display, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "standard")] -#[display(fmt = "None")] +#[derive(Debug, Clone, Dupe, ProvidesStaticType, Display, Allocative)] +#[display("None")] pub struct NoneType; impl NoneType { @@ -73,23 +64,16 @@ impl<'v> StarlarkValue<'v> for NoneType { true } - fn equals(&self, other: Value) -> anyhow::Result { - // We always compare pointers before calling `equals`, - // so if we are here, the other is definitely not `None`. - debug_assert!(!other.is_none()); - Ok(false) - } - fn to_bool(&self) -> bool { false } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { // just took the result of hash(None) in macos python 2.7.10 interpreter. hasher.write_u64(9_223_380_832_852_120_682); Ok(()) } - fn get_hash(&self, _private: Private) -> anyhow::Result { + fn get_hash(&self, _private: Private) -> crate::Result { // Just a random number. Ok(StarlarkHashValue::new_unchecked(0xf9c2263d)) } @@ -122,8 +106,8 @@ impl Serialize for NoneType { } } -pub(crate) static VALUE_NONE: AValueRepr> = - alloc_static(Basic, NoneType); +pub(crate) static VALUE_NONE: AValueRepr>> = + alloc_static(NoneType); impl AllocFrozenValue for NoneType { fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { @@ -132,11 +116,13 @@ impl AllocFrozenValue for NoneType { } impl<'v> UnpackValue<'v> for NoneType { - fn unpack_value(value: Value<'v>) -> Option { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { if value.is_none() { - Some(NoneType) + Ok(Some(NoneType)) } else { - None + Ok(None) } } } diff --git a/starlark-rust/starlark/src/values/types/num.rs b/starlark-rust/starlark/src/values/types/num.rs new file mode 100644 index 0000000000000..9dfa1b6a6d0cc --- /dev/null +++ b/starlark-rust/starlark/src/values/types/num.rs @@ -0,0 +1,22 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Helpers for numerical values. + +pub(crate) mod globals; +pub(crate) mod typecheck; +pub(crate) mod value; diff --git a/starlark-rust/starlark/src/values/types/num/globals.rs b/starlark-rust/starlark/src/values/types/num/globals.rs new file mode 100644 index 0000000000000..2fbf94973f2bc --- /dev/null +++ b/starlark-rust/starlark/src/values/types/num/globals.rs @@ -0,0 +1,44 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::types::num::value::Num; +use crate::values::types::num::value::NumRef; + +#[starlark_module] +pub(crate) fn register_num(globals: &mut GlobalsBuilder) { + /// Take the absolute value of an int. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// abs(0) == 0 + /// abs(-10) == 10 + /// abs(10) == 10 + /// abs(10.0) == 10.0 + /// abs(-12.34) == 12.34 + /// # "#); + /// ``` + fn abs(#[starlark(require = pos)] x: NumRef) -> anyhow::Result { + match x { + NumRef::Int(a) => Ok(Num::Int(a.abs())), + NumRef::Float(a) => Ok(Num::Float(a.0.abs())), + } + } +} diff --git a/starlark-rust/starlark/src/values/num/typecheck.rs b/starlark-rust/starlark/src/values/types/num/typecheck.rs similarity index 100% rename from starlark-rust/starlark/src/values/num/typecheck.rs rename to starlark-rust/starlark/src/values/types/num/typecheck.rs diff --git a/starlark-rust/starlark/src/values/types/num/value.rs b/starlark-rust/starlark/src/values/types/num/value.rs new file mode 100644 index 0000000000000..d2306740610e4 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/num/value.rs @@ -0,0 +1,378 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cmp::Ordering; +use std::ops::Add; +use std::ops::Mul; +use std::ops::Sub; + +use dupe::Dupe; + +use crate as starlark; +use crate::collections::StarlarkHashValue; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::float::StarlarkFloat; +use crate::values::types::int::int_or_big::StarlarkInt; +use crate::values::types::int::int_or_big::StarlarkIntRef; +use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::UnpackValue; + +#[derive(Debug, thiserror::Error)] +enum NumError { + #[error("float division by zero: {0} / {1}")] + DivisionByZero(Num, Num), +} + +/// [`NumRef`] represents a numerical value that can be unpacked from a [`Value`]. +/// +/// It's an intermediate representation that facilitates conversions between +/// numerical types and helps in implementation of arithmetical operations +/// between them. +#[derive(Clone, Debug, Dupe, Copy, StarlarkTypeRepr, UnpackValue)] +#[doc(hidden)] +pub enum NumRef<'v> { + Int(StarlarkIntRef<'v>), + // `StarlarkFloat` not `f64` here because `f64` unpacks from `int` too. + Float(StarlarkFloat), +} + +#[derive( + Debug, + derive_more::Display, + StarlarkTypeRepr, + AllocValue, + AllocFrozenValue +)] +#[doc(hidden)] +pub enum Num { + Int(StarlarkInt), + Float(f64), +} + +impl<'v> NumRef<'v> { + /// Get underlying value as float + pub(crate) fn as_float(&self) -> f64 { + match self { + Self::Int(i) => i.to_f64(), + Self::Float(f) => f.0, + } + } + + pub(crate) fn f64_to_i32_exact(f: f64) -> Option { + let i = f as i32; + if i as f64 == f { Some(i) } else { None } + } + + /// Get underlying value as int (if it can be precisely expressed as int) + pub(crate) fn as_int(&self) -> Option { + match self { + Self::Int(i) => i.to_i32(), + Self::Float(f) => Self::f64_to_i32_exact(f.0), + } + } + + /// Get hash of the underlying number + pub(crate) fn get_hash_64(self) -> u64 { + fn float_hash(f: f64) -> u64 { + if f.is_nan() { + // all possible NaNs should hash to the same value + 0 + } else if f.is_infinite() { + u64::MAX + } else if f == 0.0 { + // Both 0.0 and -0.0 need the same hash, but are both equal to 0.0 + 0.0f64.to_bits() + } else { + f.to_bits() + } + } + + match (self.as_int(), self) { + // equal ints and floats should have the same hash + (Some(i), _) => i as u64, + (None, Self::Float(f)) => float_hash(f.0), + (None, Self::Int(StarlarkIntRef::Small(i))) => { + // shouldn't happen - as_int() should have resulted in an int + i.to_i32() as u64 + } + (None, Self::Int(StarlarkIntRef::Big(b))) => { + // Not perfect, but OK: `1000000000000000000000003` and `1000000000000000000000005` + // flush to the same float, and neither is exact float, + // so we could use better hash for such numbers. + float_hash(b.to_f64()) + } + } + } + + pub(crate) fn get_hash(self) -> StarlarkHashValue { + StarlarkHashValue::hash_64(self.get_hash_64()) + } + + fn to_owned(self) -> Num { + match self { + NumRef::Int(i) => Num::Int(i.to_owned()), + NumRef::Float(f) => Num::Float(f.0), + } + } + + pub(crate) fn div(self, other: NumRef) -> anyhow::Result { + let a = self.as_float(); + let b = other.as_float(); + if b == 0.0 { + Err(NumError::DivisionByZero(self.to_owned(), other.to_owned()).into()) + } else { + Ok(a / b) + } + } + + pub(crate) fn floor_div(self, other: NumRef) -> anyhow::Result { + if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { + a.floor_div(b).map(Num::Int) + } else { + StarlarkFloat::floor_div_impl(self.as_float(), other.as_float()).map(Num::Float) + } + } + + pub(crate) fn percent(self, other: NumRef) -> anyhow::Result { + if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { + a.percent(b).map(Num::Int) + } else { + StarlarkFloat::percent_impl(self.as_float(), other.as_float()).map(Num::Float) + } + } +} + +impl<'v> From for NumRef<'v> { + fn from(f: f64) -> Self { + Self::Float(StarlarkFloat(f)) + } +} + +/// This is total eq per starlark spec, not Rust's partial eq. +impl<'v> PartialEq for NumRef<'v> { + fn eq(&self, other: &Self) -> bool { + if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { + a == b + } else { + StarlarkFloat::compare_impl(self.as_float(), other.as_float()) == Ordering::Equal + } + } +} + +impl<'v> Eq for NumRef<'v> {} + +impl<'v> PartialOrd for NumRef<'v> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl<'v> Ord for NumRef<'v> { + fn cmp(&self, other: &Self) -> Ordering { + if let (NumRef::Int(a), NumRef::Int(b)) = (self, other) { + a.cmp(b) + } else { + StarlarkFloat::compare_impl(self.as_float(), other.as_float()) + } + } +} + +impl<'v> Add for NumRef<'v> { + type Output = Num; + + fn add(self, rhs: Self) -> Self::Output { + if let (NumRef::Int(a), NumRef::Int(b)) = (self, rhs) { + return Num::Int(a + b); + } + Num::Float(self.as_float() + rhs.as_float()) + } +} + +impl<'v> Sub for NumRef<'v> { + type Output = Num; + + fn sub(self, rhs: Self) -> Self::Output { + if let (NumRef::Int(a), NumRef::Int(b)) = (self, rhs) { + return Num::Int(a - b); + } + Num::Float(self.as_float() - rhs.as_float()) + } +} + +impl<'v> Mul for NumRef<'v> { + type Output = Num; + + fn mul(self, rhs: Self) -> Self::Output { + if let (NumRef::Int(a), NumRef::Int(b)) = (self, rhs) { + return Num::Int(a * b); + } + Num::Float(self.as_float() * rhs.as_float()) + } +} + +#[cfg(test)] +mod tests { + use num_bigint::BigInt; + + use super::*; + use crate::values::types::int::inline_int::InlineInt; + use crate::values::Value; + + #[test] + fn test_from_value() { + assert!( + NumRef::unpack_value(Value::new_bool(true)) + .unwrap() + .is_none() + ); + assert!( + NumRef::unpack_value(Value::new_bool(false)) + .unwrap() + .is_none() + ); + assert!( + NumRef::unpack_value(Value::new_empty_string()) + .unwrap() + .is_none() + ); + assert!(NumRef::unpack_value(Value::new_none()).unwrap().is_none()); + + assert_eq!( + NumRef::unpack_value(Value::testing_new_int(0)) + .unwrap() + .unwrap() + .as_int(), + Some(0) + ); + assert_eq!( + NumRef::unpack_value(Value::testing_new_int(42)) + .unwrap() + .unwrap() + .as_int(), + Some(42) + ); + assert_eq!( + NumRef::unpack_value(Value::testing_new_int(-42)) + .unwrap() + .unwrap() + .as_int(), + Some(-42) + ); + } + + #[test] + fn test_conversion_to_float() { + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::ZERO)).as_float(), + 0.0 + ); + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::MAX)).as_float(), + InlineInt::MAX.to_f64() + ); + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::MIN)).as_float(), + InlineInt::MIN.to_f64() + ); + + assert_eq!(NumRef::Float(StarlarkFloat(0.0)).as_float(), 0.0); + assert!(NumRef::Float(StarlarkFloat(f64::NAN)).as_float().is_nan()); + } + + #[test] + fn test_conversion_to_int() { + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(0))).as_int(), + Some(0) + ); + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(42))).as_int(), + Some(42) + ); + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(-42))).as_int(), + Some(-42) + ); + + assert_eq!(NumRef::Float(StarlarkFloat(0_f64)).as_int(), Some(0)); + assert_eq!(NumRef::Float(StarlarkFloat(42_f64)).as_int(), Some(42)); + assert_eq!(NumRef::Float(StarlarkFloat(-42_f64)).as_int(), Some(-42)); + + assert_eq!( + NumRef::Float(StarlarkFloat(i32::MIN as f64)).as_int(), + Some(i32::MIN) + ); + assert_eq!( + NumRef::Float(StarlarkFloat(i32::MAX as f64)).as_int(), + Some(i32::MAX) + ); + + assert_eq!(NumRef::Float(StarlarkFloat(42.75)).as_int(), None); + assert_eq!(NumRef::Float(StarlarkFloat(-42.75)).as_int(), None); + assert_eq!(NumRef::Float(StarlarkFloat(f64::NAN)).as_int(), None); + assert_eq!(NumRef::Float(StarlarkFloat(f64::INFINITY)).as_int(), None); + assert_eq!( + NumRef::Float(StarlarkFloat(f64::NEG_INFINITY)).as_int(), + None + ); + } + + #[test] + fn test_hashing() { + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(0))).get_hash_64(), + NumRef::Float(StarlarkFloat(0.0)).get_hash_64() + ); + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(42))).get_hash_64(), + NumRef::Float(StarlarkFloat(42.0)).get_hash_64() + ); + + assert_eq!( + NumRef::Float(StarlarkFloat(f64::INFINITY + f64::NEG_INFINITY)).get_hash_64(), + NumRef::Float(StarlarkFloat(f64::NAN)).get_hash_64() + ); + assert_eq!( + NumRef::Float(StarlarkFloat("0.25".parse().unwrap())).get_hash_64(), + NumRef::Float(StarlarkFloat("25e-2".parse().unwrap())).get_hash_64() + ); + + let x = 1u64 << 55; + assert_eq!(x as f64 as u64, x, "Self-check"); + assert_eq!( + NumRef::Float(StarlarkFloat(x as f64)).get_hash_64(), + NumRef::Int(StarlarkInt::from(BigInt::from(x)).as_ref()).get_hash_64(), + ) + } + + #[test] + fn test_eq() { + assert_eq!( + NumRef::Float(StarlarkFloat(f64::NAN)), + NumRef::Float(StarlarkFloat(f64::NAN)) + ); + assert_eq!( + NumRef::Float(StarlarkFloat(f64::INFINITY)), + NumRef::Float(StarlarkFloat(f64::INFINITY)) + ); + assert_eq!( + NumRef::Int(StarlarkIntRef::Small(InlineInt::testing_new(10))), + NumRef::Float(StarlarkFloat(10.0)) + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/range.rs b/starlark-rust/starlark/src/values/types/range.rs index 6f2e89a3095a5..2ae31b53022cf 100644 --- a/starlark-rust/starlark/src/values/types/range.rs +++ b/starlark-rust/starlark/src/values/types/range.rs @@ -17,367 +17,7 @@ //! The range type, constructed with `range()`. -use std::fmt; -use std::fmt::Display; -use std::num::NonZeroI32; +pub(crate) mod globals; +mod range_type; -use allocative::Allocative; -use dupe::Dupe; -use starlark_derive::starlark_value; -use starlark_derive::NoSerialize; -use starlark_derive::StarlarkDocs; - -use crate as starlark; -use crate::any::ProvidesStaticType; -use crate::starlark_simple_value; -use crate::typing::Ty; -use crate::values::index::convert_index; -use crate::values::index::convert_slice_indices; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::Value; -use crate::values::ValueError; -use crate::values::ValueLike; - -/// Representation of `range()` type. -#[derive( - Clone, - Copy, - Dupe, - Debug, - ProvidesStaticType, - NoSerialize, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "standard")] -pub struct Range { - start: i32, - stop: i32, - step: NonZeroI32, -} - -impl Display for Range { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.step.get() != 1 { - write!(f, "range({}, {}, {})", self.start, self.stop, self.step) - } else if self.start != 0 { - write!(f, "range({}, {})", self.start, self.stop) - } else { - write!(f, "range({})", self.stop) - } - } -} - -starlark_simple_value!(Range); - -impl Range { - /// The result of calling `type()` on a range. - pub const TYPE: &'static str = "range"; - - /// Create a new [`Range`]. - pub fn new(start: i32, stop: i32, step: NonZeroI32) -> Range { - Range { start, stop, step } - } - - fn equals_range(&self, other: &Range) -> anyhow::Result { - let self_length = self.length()?; - let other_length = other.length()?; - if self_length == 0 || other_length == 0 { - return Ok(self_length == other_length); - } - if self.start != other.start { - return Ok(false); - } - if self_length == 1 || other_length == 1 { - return Ok(self_length == other_length); - } - debug_assert!(self_length > 1); - debug_assert!(other_length > 1); - if self.step.get() == other.step.get() { - return Ok(self_length == other_length); - } else { - return Ok(false); - } - } - - fn rem_range_at_iter(&self, index: usize) -> Option { - let index = i64::try_from(index).ok()?; - - let start = - (self.start as i64).saturating_add(index.saturating_mul(self.step.get() as i64)); - - Some(Range { - start: i32::try_from(start).ok()?, - stop: self.stop, - step: self.step, - }) - } -} - -#[starlark_value(type = Range::TYPE)] -impl<'v> StarlarkValue<'v> for Range { - fn to_bool(&self) -> bool { - (self.start < self.stop && self.step.get() > 0) - || (self.start > self.stop && self.step.get() < 0) - } - - fn length(&self) -> anyhow::Result { - if self.start == self.stop { - return Ok(0); - } - - // If step is into opposite direction of stop, then length is zero. - if (self.stop >= self.start) != (self.step.get() > 0) { - return Ok(0); - } - - // Convert range and step to `u64` - let (dist, step) = if self.step.get() >= 0 { - ( - self.stop.wrapping_sub(self.start) as u64, - self.step.get() as u64, - ) - } else { - ( - self.start.wrapping_sub(self.stop) as u64, - self.step.get().wrapping_neg() as u64, - ) - }; - let i = ((dist - 1) / step + 1) as i32; - if i >= 0 { - Ok(i) - } else { - Err(ValueError::IntegerOverflow.into()) - } - } - - fn at(&self, index: Value, heap: &'v Heap) -> anyhow::Result> { - let index = convert_index(index, self.length()?)?; - // Must not overflow if `length` is computed correctly - Ok(heap.alloc(self.start + self.step.get() * index)) - } - - fn equals(&self, other: Value) -> anyhow::Result { - if let Some(other) = other.downcast_ref::() { - self.equals_range(other) - } else { - Ok(false) - } - } - - fn slice( - &self, - start: Option, - stop: Option, - stride: Option, - heap: &'v Heap, - ) -> anyhow::Result> { - let (start, stop, step) = convert_slice_indices(self.length()?, start, stop, stride)?; - return Ok(heap.alloc(Range { - start: self - .start - .checked_add( - start - .checked_mul(self.step.get()) - .ok_or(ValueError::IntegerOverflow)?, - ) - .ok_or(ValueError::IntegerOverflow)?, - stop: self - .start - .checked_add( - stop.checked_mul(self.step.get()) - .ok_or(ValueError::IntegerOverflow)?, - ) - .ok_or(ValueError::IntegerOverflow)?, - step: NonZeroI32::new( - step.checked_mul(self.step.get()) - .ok_or(ValueError::IntegerOverflow)?, - ) - .unwrap(), - })); - } - - unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { - Ok(me) - } - - unsafe fn iter_next(&self, index: usize, heap: &'v Heap) -> Option> { - let rem_range = self.rem_range_at_iter(index)?; - - if !rem_range.to_bool() { - return None; - } - - Some(heap.alloc(rem_range.start)) - } - - unsafe fn iter_size_hint(&self, index: usize) -> (usize, Option) { - let Some(rem_range) = self.rem_range_at_iter(index) else { - return (0, Some(0)); - }; - match rem_range.length() { - Ok(length) => (length as usize, Some(length as usize)), - Err(_) => (0, None), - } - } - - unsafe fn iter_stop(&self) {} - - fn is_in(&self, other: Value) -> anyhow::Result { - let other = match other.unpack_num().and_then(|n| n.as_int()) { - Some(other) => other, - None => { - // Consider `"a" in range(3)` - // - // Should we error or return false? - // Go Starlark errors. Python returns false. - // Discussion at https://github.com/bazelbuild/starlark/issues/175 - return Ok(false); - } - }; - if !self.to_bool() { - return Ok(false); - } - if self.start == other { - return Ok(true); - } - if self.step.get() > 0 { - if other < self.start || other >= self.stop { - return Ok(false); - } - Ok((other.wrapping_sub(self.start) as u64) % (self.step.get() as u64) == 0) - } else { - if other > self.start || other <= self.stop { - return Ok(false); - } - Ok( - (self.start.wrapping_sub(other) as u64) % (self.step.get().wrapping_neg() as u64) - == 0, - ) - } - } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } -} - -/// For tests -impl PartialEq for Range { - fn eq(&self, other: &Range) -> bool { - self.equals_range(other).unwrap() - } -} - -#[cfg(test)] -mod tests { - use std::num::NonZeroI32; - - use crate::assert; - use crate::values::range::Range; - use crate::values::types::inline_int::InlineInt; - use crate::values::Heap; - use crate::values::StarlarkValue; - use crate::values::Value; - - fn range(start: i32, stop: i32, range: i32) -> Range { - Range { - start, - stop, - step: NonZeroI32::new(range).unwrap(), - } - } - - fn range_start_stop(start: i32, stop: i32) -> Range { - range(start, stop, 1) - } - - fn range_stop(stop: i32) -> Range { - range_start_stop(0, stop) - } - - #[test] - fn length_stop() { - assert_eq!(Some(0), range_stop(0).length().ok()); - assert_eq!(Some(17), range_stop(17).length().ok()); - } - - #[test] - fn length_start_stop() { - assert_eq!(Some(20), range_start_stop(10, 30).length().ok()); - assert_eq!(Some(0), range_start_stop(10, -30).length().ok()); - assert_eq!( - Some(i32::max_value()), - range_start_stop(0, i32::max_value()).length().ok() - ); - assert!(range_start_stop(-1, i32::max_value()).length().is_err()); - } - - #[test] - fn length_start_stop_step() { - assert_eq!(Some(5), range(0, 10, 2).length().ok()); - assert_eq!(Some(5), range(0, 9, 2).length().ok()); - assert_eq!(Some(0), range(0, 10, -2).length().ok()); - assert_eq!(Some(5), range(10, 0, -2).length().ok()); - assert_eq!(Some(5), range(9, 0, -2).length().ok()); - assert_eq!(Some(1), range(4, 14, 10).length().ok()); - } - - #[test] - fn eq() { - assert_eq!(range_stop(0), range(2, 1, 3)); - } - - #[test] - fn test_range_exhaustive() { - // The range implementation is fairly hairy. Lots of corner cases etc. - // Especially around equality, length. - // Therefore, generate ranges exhaustively over a very small range - // and test lots of properties about them. - let mut ranges = Vec::with_capacity(294); - for start in -3..4 { - for stop in -3..4 { - for step in -3..3 { - let step = if step >= 0 { step + 1 } else { step }; - ranges.push(range(start, stop, step)) - } - } - } - assert_eq!(ranges.len(), 294); // Assert we don't accidentally take too long - - let heap = Heap::new(); - for x in &ranges { - let x = heap.alloc_simple(*x); - let full: Vec = x.iterate(&heap).unwrap().collect(); - assert_eq!(x.length().unwrap(), full.len() as i32); - for (i, v) in full.iter().enumerate() { - assert_eq!(x.at(heap.alloc(i), &heap).unwrap(), *v); - } - } - - // Takes 294^2 steps - but completes instantly - for x in &ranges { - for y in &ranges { - let x = heap.alloc_simple(*x); - let y = heap.alloc_simple(*y); - assert_eq!( - x == y, - Iterator::eq(x.iterate(&heap).unwrap(), y.iterate(&heap).unwrap()) - ) - } - } - } - - #[test] - fn test_max_len() { - assert::eq( - &InlineInt::MAX.to_string(), - &format!("len(range({}))", InlineInt::MAX), - ); - assert::eq( - &InlineInt::MAX.to_string(), - &format!("len(range({}, -1))", InlineInt::MIN), - ); - } -} +pub use range_type::Range; diff --git a/starlark-rust/starlark/src/values/types/range/globals.rs b/starlark-rust/starlark/src/values/types/range/globals.rs new file mode 100644 index 0000000000000..cd2a27aef85ea --- /dev/null +++ b/starlark-rust/starlark/src/values/types/range/globals.rs @@ -0,0 +1,81 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::num::NonZeroI32; + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::range::Range; + +#[starlark_module] +pub(crate) fn register_range(globals: &mut GlobalsBuilder) { + /// [range]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#range + /// ): return a range of integers + /// + /// `range` returns a tuple of integers defined by the specified interval + /// and stride. + /// + /// ```python + /// range(stop) # equivalent to range(0, stop) + /// range(start, stop) # equivalent to range(start, stop, 1) + /// range(start, stop, step) + /// ``` + /// + /// `range` requires between one and three integer arguments. + /// With one argument, `range(stop)` returns the ascending sequence of + /// non-negative integers less than `stop`. + /// With two arguments, `range(start, stop)` returns only integers not less + /// than `start`. + /// + /// With three arguments, `range(start, stop, step)` returns integers + /// formed by successively adding `step` to `start` until the value meets or + /// passes `stop`. A call to `range` fails if the value of `step` is + /// zero. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// list(range(10)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + /// list(range(3, 10)) == [3, 4, 5, 6, 7, 8, 9] + /// list(range(3, 10, 2)) == [3, 5, 7, 9] + /// list(range(10, 3, -2)) == [10, 8, 6, 4] + /// # "#); + /// ``` + #[starlark(as_type = Range, speculative_exec_safe)] + fn range( + #[starlark(require = pos)] a1: i32, + #[starlark(require = pos)] a2: Option, + #[starlark(require = pos, default = 1)] step: i32, + ) -> anyhow::Result { + let start = match a2 { + None => 0, + Some(_) => a1, + }; + let stop = a2.unwrap_or(a1); + let step = match NonZeroI32::new(step) { + Some(step) => step, + None => { + return Err(anyhow::anyhow!( + "Third argument of range (step) cannot be zero" + )); + } + }; + Ok(Range::new(start, stop, step)) + } +} diff --git a/starlark-rust/starlark/src/values/types/range/range_type.rs b/starlark-rust/starlark/src/values/types/range/range_type.rs new file mode 100644 index 0000000000000..82c47d906c4d3 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/range/range_type.rs @@ -0,0 +1,367 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::fmt; +use std::fmt::Display; +use std::num::NonZeroI32; + +use allocative::Allocative; +use dupe::Dupe; +use starlark_derive::starlark_value; +use starlark_derive::NoSerialize; + +use crate as starlark; +use crate::any::ProvidesStaticType; +use crate::starlark_simple_value; +use crate::typing::Ty; +use crate::values::index::convert_index; +use crate::values::index::convert_slice_indices; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::Value; +use crate::values::ValueError; +use crate::values::ValueLike; + +/// Representation of `range()` type. +#[derive(Clone, Copy, Dupe, Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct Range { + start: i32, + stop: i32, + step: NonZeroI32, +} + +impl Display for Range { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.step.get() != 1 { + write!(f, "range({}, {}, {})", self.start, self.stop, self.step) + } else if self.start != 0 { + write!(f, "range({}, {})", self.start, self.stop) + } else { + write!(f, "range({})", self.stop) + } + } +} + +starlark_simple_value!(Range); + +impl Range { + /// The result of calling `type()` on a range. + pub const TYPE: &'static str = "range"; + + /// Create a new [`Range`]. + pub fn new(start: i32, stop: i32, step: NonZeroI32) -> Range { + Range { start, stop, step } + } + + fn equals_range(&self, other: &Range) -> crate::Result { + let self_length = self.length()?; + let other_length = other.length()?; + if self_length == 0 || other_length == 0 { + return Ok(self_length == other_length); + } + if self.start != other.start { + return Ok(false); + } + if self_length == 1 || other_length == 1 { + return Ok(self_length == other_length); + } + debug_assert!(self_length > 1); + debug_assert!(other_length > 1); + if self.step.get() == other.step.get() { + return Ok(self_length == other_length); + } else { + return Ok(false); + } + } + + fn rem_range_at_iter(&self, index: usize) -> Option { + let index = i64::try_from(index).ok()?; + + let start = + (self.start as i64).saturating_add(index.saturating_mul(self.step.get() as i64)); + + Some(Range { + start: i32::try_from(start).ok()?, + stop: self.stop, + step: self.step, + }) + } +} + +#[starlark_value(type = Range::TYPE)] +impl<'v> StarlarkValue<'v> for Range { + fn to_bool(&self) -> bool { + (self.start < self.stop && self.step.get() > 0) + || (self.start > self.stop && self.step.get() < 0) + } + + fn length(&self) -> crate::Result { + if self.start == self.stop { + return Ok(0); + } + + // If step is into opposite direction of stop, then length is zero. + if (self.stop >= self.start) != (self.step.get() > 0) { + return Ok(0); + } + + // Convert range and step to `u64` + let (dist, step) = if self.step.get() >= 0 { + ( + self.stop.wrapping_sub(self.start) as u64, + self.step.get() as u64, + ) + } else { + ( + self.start.wrapping_sub(self.stop) as u64, + self.step.get().wrapping_neg() as u64, + ) + }; + let i = ((dist - 1) / step + 1) as i32; + if i >= 0 { + Ok(i) + } else { + Err(ValueError::IntegerOverflow.into()) + } + } + + fn at(&self, index: Value, heap: &'v Heap) -> crate::Result> { + let index = convert_index(index, self.length()?)?; + // Must not overflow if `length` is computed correctly + Ok(heap.alloc(self.start + self.step.get() * index)) + } + + fn equals(&self, other: Value) -> crate::Result { + if let Some(other) = other.downcast_ref::() { + self.equals_range(other) + } else { + Ok(false) + } + } + + fn slice( + &self, + start: Option, + stop: Option, + stride: Option, + heap: &'v Heap, + ) -> crate::Result> { + let (start, stop, step) = convert_slice_indices(self.length()?, start, stop, stride)?; + return Ok(heap.alloc(Range { + start: self + .start + .checked_add( + start + .checked_mul(self.step.get()) + .ok_or(ValueError::IntegerOverflow)?, + ) + .ok_or(ValueError::IntegerOverflow)?, + stop: self + .start + .checked_add( + stop.checked_mul(self.step.get()) + .ok_or(ValueError::IntegerOverflow)?, + ) + .ok_or(ValueError::IntegerOverflow)?, + step: NonZeroI32::new( + step.checked_mul(self.step.get()) + .ok_or(ValueError::IntegerOverflow)?, + ) + .unwrap(), + })); + } + + unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> crate::Result> { + Ok(me) + } + + unsafe fn iter_next(&self, index: usize, heap: &'v Heap) -> Option> { + let rem_range = self.rem_range_at_iter(index)?; + + if !rem_range.to_bool() { + return None; + } + + Some(heap.alloc(rem_range.start)) + } + + unsafe fn iter_size_hint(&self, index: usize) -> (usize, Option) { + let Some(rem_range) = self.rem_range_at_iter(index) else { + return (0, Some(0)); + }; + match rem_range.length() { + Ok(length) => (length as usize, Some(length as usize)), + Err(_) => (0, None), + } + } + + unsafe fn iter_stop(&self) {} + + fn is_in(&self, other: Value) -> crate::Result { + let other = match other.unpack_num().and_then(|n| n.as_int()) { + Some(other) => other, + None => { + // Consider `"a" in range(3)` + // + // Should we error or return false? + // Go Starlark errors. Python returns false. + // Discussion at https://github.com/bazelbuild/starlark/issues/175 + return Ok(false); + } + }; + if !self.to_bool() { + return Ok(false); + } + if self.start == other { + return Ok(true); + } + if self.step.get() > 0 { + if other < self.start || other >= self.stop { + return Ok(false); + } + Ok((other.wrapping_sub(self.start) as u64) % (self.step.get() as u64) == 0) + } else { + if other > self.start || other <= self.stop { + return Ok(false); + } + Ok( + (self.start.wrapping_sub(other) as u64) % (self.step.get().wrapping_neg() as u64) + == 0, + ) + } + } + + fn get_type_starlark_repr() -> Ty { + Ty::starlark_value::() + } +} + +/// For tests +impl PartialEq for Range { + fn eq(&self, other: &Range) -> bool { + self.equals_range(other).unwrap() + } +} + +#[cfg(test)] +mod tests { + use std::num::NonZeroI32; + + use crate::assert; + use crate::values::range::Range; + use crate::values::types::int::inline_int::InlineInt; + use crate::values::Heap; + use crate::values::StarlarkValue; + use crate::values::Value; + + fn range(start: i32, stop: i32, range: i32) -> Range { + Range { + start, + stop, + step: NonZeroI32::new(range).unwrap(), + } + } + + fn range_start_stop(start: i32, stop: i32) -> Range { + range(start, stop, 1) + } + + fn range_stop(stop: i32) -> Range { + range_start_stop(0, stop) + } + + #[test] + fn length_stop() { + assert_eq!(Some(0), range_stop(0).length().ok()); + assert_eq!(Some(17), range_stop(17).length().ok()); + } + + #[test] + fn length_start_stop() { + assert_eq!(Some(20), range_start_stop(10, 30).length().ok()); + assert_eq!(Some(0), range_start_stop(10, -30).length().ok()); + assert_eq!(Some(i32::MAX), range_start_stop(0, i32::MAX).length().ok()); + assert!(range_start_stop(-1, i32::MAX).length().is_err()); + } + + #[test] + fn length_start_stop_step() { + assert_eq!(Some(5), range(0, 10, 2).length().ok()); + assert_eq!(Some(5), range(0, 9, 2).length().ok()); + assert_eq!(Some(0), range(0, 10, -2).length().ok()); + assert_eq!(Some(5), range(10, 0, -2).length().ok()); + assert_eq!(Some(5), range(9, 0, -2).length().ok()); + assert_eq!(Some(1), range(4, 14, 10).length().ok()); + } + + #[test] + fn eq() { + assert_eq!(range_stop(0), range(2, 1, 3)); + } + + #[test] + fn test_range_exhaustive() { + // The range implementation is fairly hairy. Lots of corner cases etc. + // Especially around equality, length. + // Therefore, generate ranges exhaustively over a very small range + // and test lots of properties about them. + let mut ranges = Vec::with_capacity(294); + for start in -3..4 { + for stop in -3..4 { + for step in -3..3 { + let step = if step >= 0 { step + 1 } else { step }; + ranges.push(range(start, stop, step)) + } + } + } + assert_eq!(ranges.len(), 294); // Assert we don't accidentally take too long + + let heap = Heap::new(); + for x in &ranges { + let x = heap.alloc_simple(*x); + let full: Vec = x.iterate(&heap).unwrap().collect(); + assert_eq!(x.length().unwrap(), full.len() as i32); + for (i, v) in full.iter().enumerate() { + assert_eq!(x.at(heap.alloc(i), &heap).unwrap(), *v); + } + } + + // Takes 294^2 steps - but completes instantly + for x in &ranges { + for y in &ranges { + let x = heap.alloc_simple(*x); + let y = heap.alloc_simple(*y); + assert_eq!( + x == y, + Iterator::eq(x.iterate(&heap).unwrap(), y.iterate(&heap).unwrap()) + ) + } + } + } + + #[test] + fn test_max_len() { + assert::eq( + &InlineInt::MAX.to_string(), + &format!("len(range({}))", InlineInt::MAX), + ); + assert::eq( + &InlineInt::MAX.to_string(), + &format!("len(range({}, -1))", InlineInt::MIN), + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/record.rs b/starlark-rust/starlark/src/values/types/record.rs new file mode 100644 index 0000000000000..54c6c81a97da6 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record.rs @@ -0,0 +1,51 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! A `record` type, comprising of a fixed set of fields. +//! +//! Calling `record()` produces a `RecordType`. Calling `RecordType` produces a [`Record`]. +//! The field names of the record are only stored once, potentially reducing memory usage. +//! Created in Starlark using the `record()` function, which accepts keyword arguments. +//! The keys become field names, and values are the types. Calling the resulting +//! function produces an actual record. +//! +//! ``` +//! # starlark::assert::is_true(r#" +//! IpAddress = record(host=str, port=int) +//! rec = IpAddress(host="localhost", port=80) +//! rec.port == 80 +//! # "#); +//! ``` +//! +//! It is also possible to use `field(type, default)` type to give defaults: +//! +//! ``` +//! # starlark::assert::is_true(r#" +//! IpAddress = record(host=str, port=field(int, 80)) +//! rec = IpAddress(host="localhost") +//! rec.port == 80 +//! # "#); +//! ``` + +pub(crate) mod field; +pub(crate) mod globals; +pub(crate) mod instance; +pub(crate) mod matcher; +pub(crate) mod record_type; +pub(crate) mod ty_record_type; + +pub use crate::values::record::instance::Record; diff --git a/starlark-rust/starlark/src/values/types/record/field.rs b/starlark-rust/starlark/src/values/types/record/field.rs index a8ac2fe530006..7bd4670396e35 100644 --- a/starlark-rust/starlark/src/values/types/record/field.rs +++ b/starlark-rust/starlark/src/values/types/record/field.rs @@ -24,7 +24,6 @@ use dupe::Dupe; use starlark_derive::starlark_value; use starlark_derive::Freeze; use starlark_derive::NoSerialize; -use starlark_derive::StarlarkDocs; use starlark_derive::Trace; use starlark_map::StarlarkHasher; @@ -35,6 +34,7 @@ use crate::starlark_complex_value; use crate::typing::Ty; use crate::values::typing::type_compiled::compiled::TypeCompiled; use crate::values::StarlarkValue; +use crate::values::ValueLifetimeless; use crate::values::ValueLike; /// The result of `field()`. @@ -46,11 +46,9 @@ use crate::values::ValueLike; Freeze, NoSerialize, ProvidesStaticType, - StarlarkDocs, Allocative )] -#[starlark_docs(builtin = "extension")] -pub struct FieldGen { +pub struct FieldGen { pub(crate) typ: TypeCompiled, pub(crate) default: Option, } @@ -68,11 +66,14 @@ impl<'v, V: ValueLike<'v>> Display for FieldGen { } // Manual because no instance for Option -unsafe impl, To> Coerce> for FieldGen {} +unsafe impl + ValueLifetimeless, To: ValueLifetimeless> Coerce> + for FieldGen +{ +} starlark_complex_value!(pub(crate) Field); -impl FieldGen { +impl FieldGen { pub(crate) fn new(typ: TypeCompiled, default: Option) -> Self { Self { typ, default } } @@ -85,11 +86,11 @@ impl<'v, V: ValueLike<'v>> FieldGen { } #[starlark_value(type = "field")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for FieldGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for FieldGen where Self: ProvidesStaticType<'v>, { - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { self.typ.write_hash(hasher)?; self.default.is_some().hash(hasher); if let Some(d) = self.default { @@ -98,10 +99,6 @@ where Ok(()) } - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } - fn typechecker_ty(&self) -> Option { Some(Ty::starlark_value::()) } diff --git a/starlark-rust/starlark/src/values/types/record/globals.rs b/starlark-rust/starlark/src/values/types/record/globals.rs index 570c2da4279e8..37c9c398d5269 100644 --- a/starlark-rust/starlark/src/values/types/record/globals.rs +++ b/starlark-rust/starlark/src/values/types/record/globals.rs @@ -23,10 +23,10 @@ use starlark_derive::starlark_module; use crate as starlark; use crate::collections::SmallMap; use crate::environment::GlobalsBuilder; +use crate::eval::Evaluator; use crate::values::record::field::Field; use crate::values::record::record_type::RecordType; use crate::values::typing::type_compiled::compiled::TypeCompiled; -use crate::values::Heap; use crate::values::Value; #[starlark_module] @@ -60,13 +60,13 @@ pub(crate) fn register_record(builder: &mut GlobalsBuilder) { /// Records are stored deduplicating their field names, making them more memory efficient than dictionaries. fn record<'v>( #[starlark(kwargs)] kwargs: SmallMap>, - heap: &'v Heap, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result> { // Every Value must either be a field or a value (the type) let mut mp = SmallMap::with_capacity(kwargs.len()); for (k, v) in kwargs.into_iter_hashed() { let field = match Field::from_value(v) { - None => Field::new(TypeCompiled::new(v, heap)?, None), + None => Field::new(TypeCompiled::new(v, eval.heap())?, None), Some(v) => v.dupe(), }; mp.insert_hashed(k, field); @@ -87,10 +87,10 @@ pub(crate) fn register_record(builder: &mut GlobalsBuilder) { fn field<'v>( #[starlark(require = pos)] typ: Value<'v>, default: Option>, - heap: &'v Heap, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { // We compile the type even if we don't have a default to raise the error sooner - let compiled = TypeCompiled::new(typ, heap)?; + let compiled = TypeCompiled::new(typ, eval.heap())?; if let Some(d) = default { compiled.check_type(d, Some("default"))?; } @@ -104,10 +104,10 @@ mod tests { use crate::assert::Assert; #[test] - fn test_record() { + fn test_record_pass() { assert::pass( r#" -rec_type = record(host=str.type, port=int.type) +rec_type = record(host=str, port=int) rec1 = rec_type(host = "test", port=80) rec2 = rec_type(host = "test", port=90) assert_eq(rec1, rec1) @@ -117,57 +117,79 @@ assert_eq(rec1.port, 80) assert_eq(dir(rec1), ["host", "port"]) "#, ); + } + + #[test] + fn test_record_fail_0() { assert::fails( r#" -rec_type = record(host=str.type, port=int.type) +rec_type = record(host=str, port=int) rec_type(host=1, port=80) "#, &[ "Value `1` of type `int` does not match the type annotation `str` for argument `host`", ], ); + } + + #[test] + fn test_record_fail_1() { assert::fails( r#" -rec_type = record(host=str.type, port=int.type) +rec_type = record(host=str, port=int) rec_type(port=80) "#, - &["Missing parameter", "`host`"], + &["Missing named-only parameter", "`host`"], ); + } + + #[test] + fn test_record_fail_2() { assert::fails( r#" -rec_type = record(host=str.type, port=int.type) +rec_type = record(host=str, port=int) rec_type(host="localhost", port=80, mask=255) "#, &["extra named", "mask"], ); + } + + #[test] + fn test_record_fail_3() { assert::pass( r#" -rec_type = record(host=str.type, port=int.type) +rec_type = record(host=str, port=int) def foo(x: rec_type) -> rec_type: return x foo(rec_type(host="localhost", port=80))"#, ); + } + + #[test] + fn test_record_fail_4() { assert::pass( r#" -# @starlark-rust: allow_string_literals_in_type_expr - -v = [record(host=str.type, port=int.type)] +v = [record(host=str, port=int)] v_0 = v[0] -def foo(y: v_0) -> "record": +def foo(y: v_0) -> v_0: # TODO(nga): fails at compile time. return noop(y) foo(v[0](host="localhost", port=80))"#, ); + } + + #[test] + fn test_record_fail_5() { assert::pass( r#" -rec_type = record(host=str.type, port=field(int.type, 80), mask=int.type) +rec_type = record(host=str, port=field(int, 80), mask=int) assert_eq(rec_type(host="localhost", mask=255), rec_type(host="localhost", port=80, mask=255))"#, ); // Make sure the default value is heap allocated (used to fail with a GC issue) assert::pass( r#" heap_string = "test{}".format(42) -rec_type = record(test_gc=field(str.type, heap_string)) +rec_type = record(test_gc=field(str, heap_string)) assert_eq(rec_type().test_gc, "test42")"#, ); } @@ -176,7 +198,7 @@ assert_eq(rec_type().test_gc, "test42")"#, fn test_record_equality() { assert::pass( r#" -rec_type = record(host=str.type, port=field(int.type, 80)) +rec_type = record(host=str, port=field(int, 80)) assert_eq(rec_type(host="s"), rec_type(host="s")) assert_eq(rec_type(host="s"), rec_type(host="s", port=80)) assert_ne(rec_type(host="s"), rec_type(host="t")) @@ -187,7 +209,7 @@ assert_ne(rec_type(host="s"), rec_type(host="t")) a.module( "m", r#" -rec_type = record(host=str.type, port=field(int.type, 80)) +rec_type = record(host=str, port=field(int, 80)) rec_val = rec_type(host="s") "#, ); @@ -203,14 +225,14 @@ assert_ne(rec_val, rec_type(host="t")) a.module( "m", r#" -rt = record(host=str.type) +rt = record(host=str) "#, ); a.pass( r#" load('m', r1='rt') -rt = record(host=str.type) -diff = record(host=str.type) +rt = record(host=str) +diff = record(host=str) assert_ne(r1(host="test"), rt(host="test")) assert_ne(r1(host="test"), diff(host="test")) "#, @@ -220,7 +242,7 @@ assert_ne(r1(host="test"), diff(host="test")) #[test] fn test_field_invalid() { assert::fails( - "field(str.type, None)", + "field(str, None)", &["does not match the type", "`default`"], ); assert::fails("field(True)", &["`True`", "not a valid type"]); diff --git a/starlark-rust/starlark/src/values/types/record/instance.rs b/starlark-rust/starlark/src/values/types/record/instance.rs index 74526c76a99ea..b67d11108fc80 100644 --- a/starlark-rust/starlark/src/values/types/record/instance.rs +++ b/starlark-rust/starlark/src/values/types/record/instance.rs @@ -44,12 +44,13 @@ use crate::values::Heap; use crate::values::StarlarkValue; use crate::values::Trace; use crate::values::Value; +use crate::values::ValueLifetimeless; use crate::values::ValueLike; /// An actual record. #[derive(Clone, Debug, Trace, Coerce, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] -pub struct RecordGen { +pub struct RecordGen { pub(crate) typ: V, // Must be RecordType pub(crate) values: Box<[V]>, } @@ -103,21 +104,11 @@ impl<'v, V: ValueLike<'v>> RecordGen { } #[starlark_value(type = Record::TYPE)] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for RecordGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for RecordGen where Self: ProvidesStaticType<'v>, { - fn matches_type(&self, ty: &str) -> bool { - if ty == Record::TYPE { - return true; - } - match self.get_record_type() { - Either::Left(x) => x.ty_record_data().map(|t| t.name.as_str()) == Some(ty), - Either::Right(x) => x.ty_record_data().map(|t| t.name.as_str()) == Some(ty), - } - } - - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> crate::Result { match Record::from_value(other) { Some(other) if self.typ.equals(other.typ)? => { equals_slice(&self.values, &other.values, |x, y| x.equals(*y)) @@ -135,7 +126,7 @@ where Some(self.values[i].to_value()) } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { self.typ.write_hash(hasher)?; for v in &*self.values { v.write_hash(hasher)?; diff --git a/starlark-rust/starlark/src/values/types/record/mod.rs b/starlark-rust/starlark/src/values/types/record/mod.rs deleted file mode 100644 index 32f03ca0e4e8c..0000000000000 --- a/starlark-rust/starlark/src/values/types/record/mod.rs +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! A `record` type, comprising of a fixed set of fields. -//! -//! Calling `record()` produces a `RecordType`. Calling `RecordType` produces a [`Record`]. -//! The field names of the record are only stored once, potentially reducing memory usage. -//! Created in Starlark using the `record()` function, which accepts keyword arguments. -//! The keys become field names, and values are the types. Calling the resulting -//! function produces an actual record. -//! -//! ``` -//! # starlark::assert::is_true(r#" -//! IpAddress = record(host=str.type, port=int.type) -//! rec = IpAddress(host="localhost", port=80) -//! rec.port == 80 -//! # "#); -//! ``` -//! -//! It is also possible to use `field(type, default)` type to give defaults: -//! -//! ``` -//! # starlark::assert::is_true(r#" -//! IpAddress = record(host=str.type, port=field(int.type, 80)) -//! rec = IpAddress(host="localhost") -//! rec.port == 80 -//! # "#); -//! ``` - -pub(crate) mod field; -pub(crate) mod globals; -pub(crate) mod instance; -pub(crate) mod matcher; -pub(crate) mod record_type; -pub(crate) mod ty_record_type; - -pub use crate::values::record::instance::Record; diff --git a/starlark-rust/starlark/src/values/types/record/record_type.rs b/starlark-rust/starlark/src/values/types/record/record_type.rs index 2b072f43d2022..dcdadc17379ef 100644 --- a/starlark-rust/starlark/src/values/types/record/record_type.rs +++ b/starlark-rust/starlark/src/values/types/record/record_type.rs @@ -29,7 +29,6 @@ use once_cell::unsync::OnceCell; use starlark_derive::starlark_module; use starlark_derive::starlark_value; use starlark_derive::NoSerialize; -use starlark_derive::StarlarkDocs; use starlark_map::small_map::SmallMap; use starlark_map::sorted_map::SortedMap; use starlark_map::StarlarkHasher; @@ -43,14 +42,17 @@ use crate::environment::MethodsStatic; use crate::eval::Arguments; use crate::eval::Evaluator; use crate::eval::ParametersSpec; +use crate::eval::ParametersSpecParam; use crate::starlark_complex_values; +use crate::typing::callable::TyCallable; use crate::typing::starlark_value::TyStarlarkValue; use crate::typing::user::TyUser; use crate::typing::user::TyUserFields; use crate::typing::user::TyUserParams; -use crate::typing::Param; +use crate::typing::ParamIsRequired; +use crate::typing::ParamSpec; use crate::typing::Ty; -use crate::typing::TyFunction; +use crate::util::ArcStr; use crate::values::function::FUNCTION_TYPE; use crate::values::record::field::FieldGen; use crate::values::record::matcher::RecordTypeMatcher; @@ -64,17 +66,18 @@ use crate::values::FrozenValue; use crate::values::StarlarkValue; use crate::values::Trace; use crate::values::Value; +use crate::values::ValueLifetimeless; use crate::values::ValueLike; use crate::values::ValueTypedComplex; #[doc(hidden)] -pub trait RecordCell { +pub trait RecordCell: ValueLifetimeless { type TyRecordDataOpt: Debug; fn get_or_init_ty( ty: &Self::TyRecordDataOpt, - f: impl FnOnce() -> anyhow::Result>, - ) -> anyhow::Result<()>; + f: impl FnOnce() -> crate::Result>, + ) -> crate::Result<()>; fn get_ty(ty: &Self::TyRecordDataOpt) -> Option<&Arc>; } @@ -83,8 +86,8 @@ impl<'v> RecordCell for Value<'v> { fn get_or_init_ty( ty: &Self::TyRecordDataOpt, - f: impl FnOnce() -> anyhow::Result>, - ) -> anyhow::Result<()> { + f: impl FnOnce() -> crate::Result>, + ) -> crate::Result<()> { ty.get_or_try_init(f)?; Ok(()) } @@ -98,8 +101,8 @@ impl RecordCell for FrozenValue { fn get_or_init_ty( ty: &Self::TyRecordDataOpt, - f: impl FnOnce() -> anyhow::Result>, - ) -> anyhow::Result<()> { + f: impl FnOnce() -> crate::Result>, + ) -> crate::Result<()> { let _ignore = (ty, f); Ok(()) } @@ -118,15 +121,7 @@ enum RecordTypeError { } /// The result of `record()`, being the type of records. -#[derive( - Debug, - Trace, - NoSerialize, - ProvidesStaticType, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "extension")] +#[derive(Debug, Trace, NoSerialize, ProvidesStaticType, Allocative)] pub struct RecordTypeGen { pub(crate) id: TypeInstanceId, #[allocative(skip)] // TODO(nga): do not skip. @@ -173,16 +168,18 @@ impl<'v> RecordType<'v> { fn make_parameter_spec( fields: &SmallMap>>, ) -> ParametersSpec { - let mut parameters = ParametersSpec::with_capacity("record".to_owned(), fields.len()); - parameters.no_more_positional_args(); - for (name, field) in fields { - if field.default.is_some() { - parameters.optional(name); - } else { - parameters.required(name); - } - } - parameters.finish() + ParametersSpec::new_named_only( + "record", + fields.iter().map(|(name, field)| { + ( + name.as_str(), + match field.default { + None => ParametersSpecParam::Required, + Some(_default) => ParametersSpecParam::Optional, + }, + ) + }), + ) } } @@ -223,7 +220,7 @@ where { type Canonical = FrozenRecordType; - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { for (name, typ) in &self.fields { name.hash(hasher); // No need to hash typ.1, since it was computed from typ.0 @@ -236,27 +233,29 @@ where &self, me: Value<'v>, args: &Arguments<'v, '_>, - eval: &mut Evaluator<'v, '_>, - ) -> anyhow::Result> { + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { if self.ty_record_data().is_none() { - return Err(RecordTypeError::RecordTypeNotAssigned.into()); + return Err(crate::Error::new_other( + RecordTypeError::RecordTypeNotAssigned, + )); } let this = me; self.parameter_spec - .parser(args, eval, |mut param_parser, eval| { + .parser(args, eval, |param_parser, eval| { let fields = record_fields(RecordType::from_value(this).unwrap()); let mut values = Vec::with_capacity(fields.len()); for (name, field) in fields.iter() { let value = match field.default { None => { - let v: Value = param_parser.next(name)?; + let v: Value = param_parser.next()?; field.typ.check_type(v, Some(name))?; v } Some(default) => { - let v: Option = param_parser.next_opt(name)?; + let v: Option = param_parser.next_opt()?; match v { None => default, Some(v) => { @@ -273,6 +272,7 @@ where values: values.into_boxed_slice(), })) }) + .map_err(Into::into) } fn get_methods() -> Option<&'static Methods> @@ -291,7 +291,11 @@ where self.ty_record_data().map(|t| t.ty_record_type.dupe()) } - fn export_as(&self, variable_name: &str, _eval: &mut Evaluator<'v, '_>) -> anyhow::Result<()> { + fn export_as( + &self, + variable_name: &str, + _eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result<()> { V::get_or_init_ty(&self.ty_record_data, || { let fields: SortedMap = self .fields @@ -318,9 +322,18 @@ where TyStarlarkValue::new::(), TypeInstanceId::gen(), TyUserParams { - callable: Some(TyFunction::new( - // TODO(nga): more precise parameter types. - vec![Param::kwargs(Ty::any())], + callable: Some(TyCallable::new( + ParamSpec::new_named_only(self.fields.iter().map(|(name, field)| { + ( + ArcStr::from(name.as_str()), + if field.default.is_some() { + ParamIsRequired::No + } else { + ParamIsRequired::Yes + }, + field.ty(), + ) + }))?, ty_record.dupe(), )), ..TyUserParams::default() @@ -369,7 +382,8 @@ f_pass(RecPass(a = 1, b = 2)) #[test] fn test_record_type_as_type_compile_time() { - assert::fail( + assert::fail_golden( + "src/values/types/record/record_type/record_type_as_type_compile_time.golden", r" RecFailCt1 = record(a = field(int), b = field(int)) RecFailCt2 = record(a = field(int), b = field(int)) @@ -377,16 +391,16 @@ RecFailCt2 = record(a = field(int), b = field(int)) def f_fail_ct(x: RecFailCt1): return x.a -f_fail_ct(RecFailCt2(a = 1, b = 2)) +def test(): + f_fail_ct(RecFailCt2(a = 1, b = 2)) ", - // TODO(nga): this is runtime error, not compile time. - "Value `record[RecFailCt2](a=1, b=2)` of type `record` does not match", ); } #[test] fn test_record_type_as_type_runtime() { - assert::fail( + assert::fail_golden( + "src/values/types/record/record_type/record_type_as_type_runtime.golden", r" RecFailRt1 = record(a = field(int), b = field(int)) RecFailRt2 = record(a = field(int), b = field(int)) @@ -396,15 +410,14 @@ def f_fail_rt(x: RecFailRt1): noop(f_fail_rt)(RecFailRt2(a = 1, b = 2)) ", - "Value `record[RecFailRt2](a=1, b=2)` of type `record` does not match the type annotation", ); } #[test] fn test_anon_record() { - assert::fail( + assert::fail_golden( + "src/values/types/record/record_type/anon_record.golden", "record(a = field(int))(a = 1)", - "not assigned to a global variable", ); } } diff --git a/starlark-rust/starlark/src/values/types/record/record_type/anon_record.golden b/starlark-rust/starlark/src/values/types/record/record_type/anon_record.golden new file mode 100644 index 0000000000000..9b24b54435bdd --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/record_type/anon_record.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +record(a = field(int))(a = 1) + +Error: + +Traceback (most recent call last): + * assert.bzl:1, in + record(a = field(int))(a = 1) +error: Record instance cannot be created if record type is not assigned to a global variable + --> assert.bzl:1:1 + | +1 | record(a = field(int))(a = 1) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/values/types/record/record_type/record_callable_compile_time.golden b/starlark-rust/starlark/src/values/types/record/record_type/record_callable_compile_time.golden new file mode 100644 index 0000000000000..930bbbd04dfd3 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/record_type/record_callable_compile_time.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +Rec = record(a = int, b = str) + +def test(): + Rec(c = []) + +Error: + +error: Unexpected parameter named `c` + --> assert.bzl:4:9 + | +4 | Rec(c = []) + | ^^^^^^ + | diff --git a/starlark-rust/starlark/src/values/types/record/record_type/record_type_as_type_compile_time.golden b/starlark-rust/starlark/src/values/types/record/record_type/record_type_as_type_compile_time.golden new file mode 100644 index 0000000000000..7abf7c1f08fa2 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/record_type/record_type_as_type_compile_time.golden @@ -0,0 +1,25 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +RecFailCt1 = record(a = field(int), b = field(int)) +RecFailCt2 = record(a = field(int), b = field(int)) + +def f_fail_ct(x: RecFailCt1): + return x.a + +def test(): + f_fail_ct(RecFailCt2(a = 1, b = 2)) + +Error: + +error: Expected type `RecFailCt1` but got `RecFailCt2` + --> assert.bzl:8:15 + | +8 | f_fail_ct(RecFailCt2(a = 1, b = 2)) + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/values/types/record/record_type/record_type_as_type_runtime.golden b/starlark-rust/starlark/src/values/types/record/record_type/record_type_as_type_runtime.golden new file mode 100644 index 0000000000000..16cfa90a5c4b0 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/record_type/record_type_as_type_runtime.golden @@ -0,0 +1,27 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +RecFailRt1 = record(a = field(int), b = field(int)) +RecFailRt2 = record(a = field(int), b = field(int)) + +def f_fail_rt(x: RecFailRt1): + return x.a + +noop(f_fail_rt)(RecFailRt2(a = 1, b = 2)) + +Error: + +Traceback (most recent call last): + * assert.bzl:7, in + noop(f_fail_rt)(RecFailRt2(a = 1, b = 2)) +error: Value `record[RecFailRt2](a=1, b=2)` of type `record` does not match the type annotation `RecFailRt1` for argument `x` + --> assert.bzl:7:1 + | +7 | noop(f_fail_rt)(RecFailRt2(a = 1, b = 2)) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/values/types/record/ty_record_type.rs b/starlark-rust/starlark/src/values/types/record/ty_record_type.rs index ed51e878db91b..7603001dd6c0f 100644 --- a/starlark-rust/starlark/src/values/types/record/ty_record_type.rs +++ b/starlark-rust/starlark/src/values/types/record/ty_record_type.rs @@ -52,7 +52,8 @@ foo(MyRec(x = 1)) #[test] fn test_fail_compile_time() { - assert::fail( + assert::fail_golden( + "src/values/types/record/ty_record_type/fail_compile_time.golden", r#" MyRec = record(x = int) WrongRec = record(x = int) @@ -62,22 +63,21 @@ def foo(x: MyRec): pass def bar(): foo(WrongRec(x = 1)) "#, - r#"Expected type `MyRec` but got `WrongRec`"#, ); } #[test] fn test_fail_runtime_time() { - assert::fail_skip_typecheck( + assert::fail_golden( + "src/values/types/record/ty_record_type/fail_runtime_time.golden", r#" MyRec = record(x = int) WrongRec = record(x = int) def foo(x: MyRec): pass -foo(WrongRec(x = 1)) +noop(foo)(WrongRec(x = 1)) "#, - r#"Value `record[WrongRec](x=1)` of type `record` does not match the type annotation `MyRec`"#, ); } @@ -111,14 +111,27 @@ assert_eq(f(MyRec(x = 1, y = 2)), 3) #[test] fn test_typecheck_field_fail() { - assert::fail( + assert::fail_golden( + "src/values/types/record/ty_record_type/typecheck_field_fail.golden", r#" MyRec = record(x = int, y = int) def f(rec: MyRec) -> int: return rec.z "#, - r#"The attribute `z` is not available on the type `MyRec`"#, + ); + } + + #[test] + fn test_typecheck_record_type_call() { + assert::fail_golden( + "src/values/types/record/ty_record_type/typecheck_record_type_call.golden", + r#" +MyRec = record(x = int) + +def test(): + MyRec(x = "") +"#, ); } } diff --git a/starlark-rust/starlark/src/values/types/record/ty_record_type/fail_compile_time.golden b/starlark-rust/starlark/src/values/types/record/ty_record_type/fail_compile_time.golden new file mode 100644 index 0000000000000..046ee08c1813a --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/ty_record_type/fail_compile_time.golden @@ -0,0 +1,24 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +MyRec = record(x = int) +WrongRec = record(x = int) + +def foo(x: MyRec): pass + +def bar(): + foo(WrongRec(x = 1)) + +Error: + +error: Expected type `MyRec` but got `WrongRec` + --> assert.bzl:7:9 + | +7 | foo(WrongRec(x = 1)) + | ^^^^^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/values/types/record/ty_record_type/fail_runtime_time.golden b/starlark-rust/starlark/src/values/types/record/ty_record_type/fail_runtime_time.golden new file mode 100644 index 0000000000000..9a8ecb2f343f2 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/ty_record_type/fail_runtime_time.golden @@ -0,0 +1,26 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +MyRec = record(x = int) +WrongRec = record(x = int) + +def foo(x: MyRec): pass + +noop(foo)(WrongRec(x = 1)) + +Error: + +Traceback (most recent call last): + * assert.bzl:6, in + noop(foo)(WrongRec(x = 1)) +error: Value `record[WrongRec](x=1)` of type `record` does not match the type annotation `MyRec` for argument `x` + --> assert.bzl:6:1 + | +6 | noop(foo)(WrongRec(x = 1)) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | diff --git a/starlark-rust/starlark/src/values/types/record/ty_record_type/typecheck_field_fail.golden b/starlark-rust/starlark/src/values/types/record/ty_record_type/typecheck_field_fail.golden new file mode 100644 index 0000000000000..5b7d7b3cf551f --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/ty_record_type/typecheck_field_fail.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +MyRec = record(x = int, y = int) + +def f(rec: MyRec) -> int: + return rec.z + +Error: + +error: The attribute `z` is not available on the type `MyRec` + --> assert.bzl:4:16 + | +4 | return rec.z + | ^ + | diff --git a/starlark-rust/starlark/src/values/types/record/ty_record_type/typecheck_record_type_call.golden b/starlark-rust/starlark/src/values/types/record/ty_record_type/typecheck_record_type_call.golden new file mode 100644 index 0000000000000..b0e0c16e0a450 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/record/ty_record_type/typecheck_record_type_call.golden @@ -0,0 +1,21 @@ +# @generated +# To regenerate, run: +# ``` +# STARLARK_RUST_REGENERATE_GOLDEN_TESTS=1 cargo test -p starlark --lib +# ``` + +Program: + +MyRec = record(x = int) + +def test(): + MyRec(x = "") + +Error: + +error: Expected type `int` but got `str` + --> assert.bzl:4:11 + | +4 | MyRec(x = "") + | ^^^^^^ + | diff --git a/starlark-rust/starlark/src/values/types/regex.rs b/starlark-rust/starlark/src/values/types/regex.rs deleted file mode 100644 index 5236e83605fce..0000000000000 --- a/starlark-rust/starlark/src/values/types/regex.rs +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! A type [`StarlarkRegex`] which wraps Rust value fancy_regex::Regex. -use std::fmt; -use std::fmt::Display; - -use allocative::Allocative; -use fancy_regex::Regex; -use starlark_derive::starlark_module; -use starlark_derive::starlark_value; -use starlark_derive::NoSerialize; -use starlark_derive::StarlarkDocs; - -use crate as starlark; -use crate::any::ProvidesStaticType; -use crate::environment::Methods; -use crate::environment::MethodsBuilder; -use crate::environment::MethodsStatic; -use crate::starlark_simple_value; -use crate::typing::Ty; -use crate::values::StarlarkValue; - -/// A type that can be passed around as a StarlarkRegex, which wraps Rust value -/// fancy_regex::Regex. -#[derive(ProvidesStaticType, Debug, NoSerialize, StarlarkDocs, Allocative)] -#[starlark_docs(builtin = "extension")] -pub struct StarlarkRegex(#[allocative(skip)] pub Regex); - -#[starlark_value(type = StarlarkRegex::TYPE)] -impl<'v> StarlarkValue<'v> for StarlarkRegex { - fn get_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(regex_type_methods) - } - - fn get_type_starlark_repr() -> Ty { - Ty::starlark_value::() - } - - fn typechecker_ty(&self) -> Option { - Some(Ty::starlark_value::()) - } -} - -impl StarlarkRegex { - /// The result of calling `type()` on regex. - pub const TYPE: &'static str = "regex"; -} - -impl Display for StarlarkRegex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "regex({:?})", &self.0.as_str()) - } -} - -starlark_simple_value!(StarlarkRegex); -impl StarlarkRegex { - /// Create a new [`StarlarkRegex`] value. Such a value can be allocated on a heap with - /// `heap.alloc(StarlarkRegex::new(x))`. - pub fn new(x: &str) -> anyhow::Result { - Ok(Self(Regex::new(x)?)) - } -} - -#[starlark_module] -fn regex_type_methods(builder: &mut MethodsBuilder) { - fn r#match(this: &StarlarkRegex, #[starlark(require = pos)] str: &str) -> anyhow::Result { - Ok(this.0.is_match(str)?) - } -} - -#[cfg(test)] -mod tests { - use crate::assert; - - #[test] - fn test_match() { - assert::all_true( - r#" -experimental_regex("abc|def|ghi").match("abc") -not experimental_regex("abc|def|ghi").match("xyz") -not experimental_regex("^((?!abc).)*$").match("abc") -experimental_regex("^((?!abc).)*$").match("xyz") -experimental_regex("").match("xyz") -"#, - ); - } - - #[test] - fn test_str() { - assert::is_true( - r#" -str(experimental_regex("foo")) == 'regex("foo")' -"#, - ); - } -} diff --git a/starlark-rust/starlark/src/values/types/set.rs b/starlark-rust/starlark/src/values/types/set.rs new file mode 100644 index 0000000000000..8905eb82e99e7 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/set.rs @@ -0,0 +1,24 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The set type +pub(crate) mod methods; +pub(crate) mod refs; +pub(crate) mod set; +pub(crate) mod value; +pub use crate::values::set::refs::SetMut; +pub use crate::values::set::refs::SetRef; diff --git a/starlark-rust/starlark/src/values/types/set/methods.rs b/starlark-rust/starlark/src/values/types/set/methods.rs new file mode 100644 index 0000000000000..fda1e807e0028 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/set/methods.rs @@ -0,0 +1,648 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Methods for the `set` type. + +use std::mem; + +use either::Either; +use starlark_derive::starlark_module; +use starlark_map::small_set::SmallSet; +use starlark_map::Hashed; +use starlark_syntax::value_error; + +use crate as starlark; +use crate::environment::MethodsBuilder; +use crate::values::none::NoneType; +use crate::values::set::refs::SetMut; +use crate::values::set::refs::SetRef; +use crate::values::set::value::SetData; +use crate::values::typing::StarlarkIter; +use crate::values::Heap; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +enum SetFromValue<'v> { + Set(SmallSet>), + Ref(SetRef<'v>), +} + +impl<'v> SetFromValue<'v> { + fn from_value( + value: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> crate::Result { + if let Some(v) = SetRef::unpack_value_opt(value.get()) { + Ok(SetFromValue::Ref(v)) + } else { + let mut set = SmallSet::default(); + for elem in value.get().iterate(heap)? { + set.insert_hashed(elem.get_hashed()?); + } + Ok(SetFromValue::Set(set)) + } + } + + fn get(&self) -> &SmallSet> { + match self { + SetFromValue::Set(set) => set, + SetFromValue::Ref(set) => &set.aref.content, + } + } + + fn into_set(self) -> SmallSet> { + match self { + SetFromValue::Set(set) => set, + SetFromValue::Ref(set) => set.aref.content.clone(), + } + } + + fn is_empty(&self) -> bool { + self.get().is_empty() + } + + fn contains_hashed(&self, value: Hashed>) -> bool { + self.get().contains_hashed(value.as_ref()) + } +} + +#[starlark_module] +pub(crate) fn set_methods(builder: &mut MethodsBuilder) { + fn clear(this: Value) -> anyhow::Result { + let mut this = SetMut::from_value(this)?; + this.aref.clear(); + Ok(NoneType) + } + + /// Return a new set with elements from the set and all others. + /// Unlike Python does not support variable number of arguments. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// y = [3, 4, 5] + /// x.union(y) == set([1, 2, 3, 4, 5]) + /// # "#); + /// ``` + fn union<'v>( + this: SetRef<'v>, + #[starlark(require=pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> starlark::Result> { + let it = other.get().iterate(heap)?; + // TODO(romanp) omptimize if this is empty + let mut data = this.aref.content.clone(); + for elem in it { + let hashed = elem.get_hashed()?; + data.insert_hashed(hashed); + } + Ok(SetData { content: data }) + } + + /// Return a new set with elements common to the set and all others. + /// Unlike Python does not support variable number of arguments. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// y = [3, 4, 5] + /// x.intersection(y) == set([3]) + /// # "#); + /// ``` + fn intersection<'v>( + this: SetRef<'v>, + #[starlark(require=pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> starlark::Result> { + let other_set = SetFromValue::from_value(other, heap)?; + let mut data = SetData::default(); + if other_set.is_empty() { + return Ok(data); + } + + for hashed in this.aref.content.iter_hashed() { + if other_set.contains_hashed(hashed.copied()) { + data.content.insert_hashed_unique_unchecked(hashed.copied()); + } + } + Ok(data) + } + + /// Returns a new set with elements in either the set or the specified iterable but not both. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// y = [3, 4, 5] + /// x.symmetric_difference(y) == set([1, 2, 4, 5]) + /// # "#); + /// ``` + fn symmetric_difference<'v>( + this: SetRef<'v>, + #[starlark(require=pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> starlark::Result> { + let other_set = SetFromValue::from_value(other, heap)?; + + if other_set.is_empty() { + return Ok(SetData { + content: this.aref.content.clone(), + }); + } + + //TODO(romanp) add symmetric_difference to small set and use it here and in xor + if this.aref.content.is_empty() { + return Ok(SetData { + content: other_set.into_set(), + }); + } + + let mut data = SetData::default(); + for elem in this.aref.content.iter_hashed() { + if !other_set.contains_hashed(elem.copied()) { + data.add_hashed(elem.copied()); + } + } + + for elem in other_set.get() { + let hashed = elem.get_hashed()?; + if !this.aref.content.contains_hashed(hashed.as_ref()) { + data.add_hashed(hashed); + } + } + Ok(data) + } + + /// Add an item to the set. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// x.add(4) + /// x == set([1, 2, 3, 4]) + /// # "#); + /// ``` + fn add<'v>( + this: Value<'v>, + #[starlark(require = pos)] value: Value<'v>, + ) -> starlark::Result { + let mut this = SetMut::from_value(this)?; + let hashed = value.get_hashed()?; + this.aref.add_hashed(hashed); + Ok(NoneType) + } + + /// Remove the item from the set. It raises an error if there is no such item. + /// + /// `remove` fails if the key is unhashable or if the dictionary is + /// frozen. + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the set. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// x.remove(2) + /// x == set([1, 3]) + /// # "#) + /// ``` + /// A subsequent call to `x.remove(2)` would yield an error because the + /// element won't be found. + /// ``` + /// # starlark::assert::fail(r#" + /// x = set([1, 2, 3]) + /// x.remove(2) + /// x.remove(2) # error: not found + /// # "#, "not found"); + /// ``` + fn remove<'v>( + this: Value<'v>, + #[starlark(require = pos)] value: Value<'v>, + ) -> starlark::Result { + let mut set = SetMut::from_value(this)?; + let hashed = value.get_hashed()?; + if set.aref.remove_hashed(hashed.as_ref()) { + Ok(NoneType) + } else { + mem::drop(set); + Err(value_error!("`{value}` not found in `{this}`")) + } + } + + /// Remove the item from the set. It does nothing if there is no such item. + /// + /// `discard` fails if the key is unhashable or if the dictionary is + /// frozen. + /// Time complexity of this operation is *O(N)* where *N* is the number of entries in the set. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// x.discard(2) + /// x == set([1, 3]) + /// # "#) + /// ``` + /// A subsequent call to `x.discard(2)` would do nothing. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// x.discard(2) + /// x.discard(2) + /// x == set([1, 3]) + /// # "#); + /// ``` + fn discard<'v>( + this: Value<'v>, + #[starlark(require = pos)] value: Value<'v>, + ) -> starlark::Result { + let mut set = SetMut::from_value(this)?; + let hashed = value.get_hashed()?; + set.aref.remove_hashed(hashed.as_ref()); + Ok(NoneType) + } + + /// Removes and returns the **last** element of a set. + /// + /// `S.pop()` removes and returns the last element of the set S. + /// + /// `pop` fails if the set is empty, or if the set is frozen or has active iterators. + /// Time complexity of this operation is *O(1)*. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// # ( + /// x.pop() == 3 + /// # and + /// x.pop() == 2 + /// # and + /// x == set([1]) + /// # )"#); + /// ``` + fn pop<'v>(this: Value<'v>) -> starlark::Result> { + let mut set = SetMut::from_value(this)?; + match set.aref.content.pop() { + Some(x) => Ok(x), + None => Err(value_error!("pop from an empty set")), + } + } + + /// Returns a new set with elements unique the set when compared to the specified iterable. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// y = [3, 4, 5] + /// x.difference(y) == set([1, 2]) + /// # "#); + /// ``` + fn difference<'v>( + this: SetRef<'v>, + #[starlark(require=pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> starlark::Result> { + if this.aref.content.is_empty() { + other.get().iterate(heap)?; + return Ok(SetData::default()); + } + + let other_set = SetFromValue::from_value(other, heap)?; + + if other_set.is_empty() { + return Ok(SetData { + content: this.aref.content.clone(), + }); + } + + let mut data = SetData::default(); + for elem in this.aref.content.iter_hashed() { + if !other_set.contains_hashed(elem.copied()) { + data.add_hashed(elem.copied()); + } + } + Ok(data) + } + + /// Test whether every element other iterable is in the set. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// y = [1, 3] + /// x.issuperset(y) == True + /// # "#); + /// ``` + fn issuperset<'v>( + this: SetRef<'v>, + #[starlark(require=pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> starlark::Result { + let other_var; + let other = if let Some(other) = SetRef::unpack_value_opt(other.get()) { + if this.aref.content.len() < other.aref.content.len() { + return Ok(false); + } + other_var = other; + Either::Left(other_var.aref.content.iter_hashed().map(|v| Ok(v.copied()))) + } else { + Either::Right(other.get().iterate(heap)?.map(|v| v.get_hashed())) + }; + + for elem in other { + if !this.aref.contains_hashed(elem?) { + return Ok(false); + } + } + Ok(true) + } + + /// Test whether every element in the set is in other iterable. + /// ``` + /// # starlark::assert::is_true(r#" + /// x = set([1, 2, 3]) + /// y = [3, 1, 2] + /// x.issubset(y) + /// # "#); + /// ``` + fn issubset<'v>( + this: SetRef<'v>, + #[starlark(require=pos)] other: ValueOfUnchecked<'v, StarlarkIter>>, + heap: &'v Heap, + ) -> starlark::Result { + if this.aref.content.is_empty() { + other.get().iterate(heap)?; + return Ok(true); + } + let rhs = SetFromValue::from_value(other, heap)?; + if this.aref.content.len() > rhs.get().len() { + return Ok(false); + } + for elem in this.aref.content.iter_hashed() { + if !rhs.contains_hashed(elem.copied()) { + return Ok(false); + } + } + Ok(true) + } +} + +#[cfg(test)] +mod tests { + use crate::assert; + + #[test] + fn test_empty() { + assert::is_true("s = set(); len(s) == 0") + } + + #[test] + fn test_single() { + assert::is_true("s = set([0, 1]); len(s) == 2") + } + + #[test] + fn test_eq() { + assert::is_true("set([1, 2, 3]) == set([3, 2, 1])") + } + + #[test] + fn test_clear() { + assert::is_true("s = set([1, 2, 3]); s.clear(); s == set()") + } + + #[test] + fn test_type() { + assert::eq("type(set([1, 2, 3]))", "'set'") + } + + #[test] + fn test_iter() { + assert::is_true("list([elem for elem in set([1, 2, 3])]) == [1, 2, 3]") + } + + #[test] + fn test_bool_true() { + assert::is_true("bool(set([1, 2, 3]))") + } + + #[test] + fn test_bool_false() { + assert::is_false("bool(set())") + } + + #[test] + fn test_union() { + assert::eq( + "set([1, 2, 3]).union(set([3, 4, 5]))", + "set([1, 2, 3, 4, 5])", + ) + } + + #[test] + fn test_union_empty() { + assert::eq("set([1, 2, 3]).union(set([]))", "set([1, 2, 3])") + } + + #[test] + fn test_union_iter() { + assert::eq("set([1, 2, 3]).union([3, 4])", "set([1, 2, 3, 4])") + } + + #[test] + fn test_union_ordering_mixed() { + assert::eq("list(set([1, 3, 5]).union(set([4, 3])))", "[1, 3, 5, 4]"); + } + + #[test] + fn test_intersection() { + assert::eq("set([1, 2, 3]).intersection(set([3, 4, 5]))", "set([3])") + } + + #[test] + fn test_intersection_empty() { + assert::eq("set([1, 2, 3]).intersection(set([]))", "set([])") + } + + #[test] + fn test_intersection_iter() { + assert::eq("set([1, 2, 3]).intersection([3, 4])", "set([3])") + } + + #[test] + fn test_intersection_order() { + assert::eq("list(set([1, 2, 3]).intersection([4, 3, 1]))", "[1, 3]") + } + + #[test] + fn test_symmetric_difference() { + assert::eq( + "set([1, 2, 3]).symmetric_difference(set([3, 4, 5]))", + "set([1, 2, 4, 5])", + ) + } + + #[test] + fn test_symmetric_difference_empty() { + assert::eq( + "set([1, 2, 3]).symmetric_difference(set([]))", + "set([1, 2, 3])", + ) + } + + #[test] + fn test_symmetric_difference_iter() { + assert::eq( + "set([1, 2, 3]).symmetric_difference([3, 4])", + "set([1, 2, 4])", + ) + } + + #[test] + fn test_symmetric_difference_ord() { + assert::eq( + "list(set([1, 2, 3, 7]).symmetric_difference(set([4, 3, 1])))", + "[2, 7, 4]", + ) + } + + #[test] + fn test_add() { + assert::eq(r#"x = set([1, 2, 3]);x.add(0);x"#, "set([0, 1, 2, 3])") + } + + #[test] + fn test_add_empty() { + assert::eq(r#"x = set([]);x.add(0);x"#, "set([0])") + } + + #[test] + fn test_add_existing() { + assert::eq(r#"x = set([0]);x.add(0);x"#, "set([0])") + } + + #[test] + fn test_add_order() { + assert::eq(r#"x = set([1, 2, 3]);x.add(2);list(x)"#, "[1, 2, 3]"); + assert::eq(r#"x = set([1, 2, 3]);x.add(0);list(x)"#, "[1, 2, 3, 0]") + } + + #[test] + fn test_remove() { + assert::eq("x = set([0, 1]);x.remove(1);x", "set([0])") + } + + #[test] + fn test_remove_empty() { + assert::fail("set([]).remove(0)", "`0` not found in `set([])`"); + } + + #[test] + fn test_remove_not_existing() { + assert::fail("set([1]).remove(0)", "`0` not found in `set([1])`"); + } + + #[test] + fn test_discard() { + assert::eq("x = set([0, 1]);x.discard(1);x", "set([0])") + } + + #[test] + fn test_discard_multiple_times() { + assert::eq("x = set([0, 1]); x.discard(0); x.discard(0); x", "set([1])"); + } + + #[test] + fn test_pop() { + assert::is_true("x = set([1, 0]); (x.pop() == 0 and x.pop() == 1 and x == set())"); + } + + #[test] + fn test_pop_empty() { + assert::fail("x = set([]); x.pop()", "pop from an empty set"); + } + + #[test] + fn test_difference() { + assert::eq("set([1, 2, 3]).difference(set([2]))", "set([1, 3])") + } + + #[test] + fn test_difference_iter() { + assert::eq("set([1, 2, 3]).difference([3, 2])", "set([1])") + } + + #[test] + fn test_difference_order() { + assert::eq("list(set([3, 2, 1]).difference([2]))", "[3, 1]") + } + + #[test] + fn test_difference_empty_lhs() { + assert::eq("set([]).difference(set([2]))", "set([])") + } + + #[test] + fn test_difference_empty_rhs() { + assert::eq("set([1, 2]).difference(set([]))", "set([2, 1])") + } + + #[test] + fn test_is_superset() { + assert::is_true("set([1, 2, 3, 4]).issuperset(set([1, 3, 2]))") + } + + #[test] + fn test_is_not_superset() { + assert::is_false("set([1, 2]).issuperset(set([1, 3, 5]))") + } + + #[test] + fn test_is_not_superset_empty_lhs() { + assert::is_false("set([]).issuperset(set([1]))"); + } + + #[test] + fn test_is_superset_empty_rhs() { + assert::is_true("set([1, 2]).issuperset(set([]))"); + assert::is_true("set([]).issuperset(set([]))") + } + + #[test] + fn test_is_superset_iter() { + assert::is_true("set([1, 2, 3]).issuperset([3, 1])") + } + + #[test] + fn test_is_subset() { + assert::is_true("set([1, 2]).issubset(set([1, 3, 2]))") + } + + #[test] + fn test_is_not_subset() { + assert::is_false("set([1, 2]).issubset(set([1, 3, 5]))") + } + + #[test] + fn test_is_subset_empty_lhs() { + assert::is_true("set([]).issubset(set([1, 3, 5]))"); + assert::is_true("set([]).issubset(set([]))") + } + + #[test] + fn test_is_not_subset_empty_rhs() { + assert::is_false("set([1, 2]).issubset(set([]))") + } + + #[test] + fn test_is_subset_iter() { + assert::is_true("set([1, 2]).issubset([1, 3, 2])") + } +} diff --git a/starlark-rust/starlark/src/values/types/set/refs.rs b/starlark-rust/starlark/src/values/types/set/refs.rs new file mode 100644 index 0000000000000..f9096338dbff9 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/set/refs.rs @@ -0,0 +1,120 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cell::Ref; +use std::cell::RefCell; +use std::cell::RefMut; +use std::convert::Infallible; + +use dupe::Dupe; +use either::Either; + +use super::value::FrozenSetData; +use super::value::SetData; +use crate::coerce::coerce; +use crate::typing::Ty; +use crate::values::layout::value::ValueLike; +use crate::values::set::value::SetGen; +use crate::values::type_repr::SetType; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::FrozenValue; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueError; + +/// Define the set type. +pub struct SetRef<'v> { + pub(crate) aref: Either>, &'v SetData<'v>>, +} + +impl<'v> Clone for SetRef<'v> { + fn clone(&self) -> Self { + match &self.aref { + Either::Left(x) => SetRef { + aref: Either::Left(Ref::clone(x)), + }, + Either::Right(x) => SetRef { + aref: Either::Right(*x), + }, + } + } +} + +impl<'v> Dupe for SetRef<'v> {} + +/// Mutably borrowed `Set`. +pub struct SetMut<'v> { + pub(crate) aref: RefMut<'v, SetData<'v>>, +} + +impl<'v> SetMut<'v> { + /// Downcast the value to a mutable set reference. + #[inline] + pub fn from_value(x: Value<'v>) -> anyhow::Result { + #[derive(thiserror::Error, Debug)] + #[error("Value is not set, value type: `{0}`")] + struct NotSetError(&'static str); + + #[cold] + #[inline(never)] + fn error<'v>(x: Value<'v>) -> anyhow::Error { + if x.downcast_ref::>().is_some() { + ValueError::CannotMutateImmutableValue.into() + } else { + NotSetError(x.get_type()).into() + } + } + + let ptr = x.downcast_ref::>>>(); + match ptr { + None => Err(error(x)), + Some(ptr) => match ptr.0.try_borrow_mut() { + Ok(x) => Ok(SetMut { aref: x }), + Err(_) => Err(ValueError::MutationDuringIteration.into()), + }, + } + } +} + +impl<'v> StarlarkTypeRepr for SetRef<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + ::starlark_type_repr() + } +} + +impl<'v> UnpackValue<'v> for SetRef<'v> { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result>, Infallible> { + let result = if let Some(value) = value.unpack_frozen() { + value + .downcast_ref::>() + .map(|x| SetRef { + aref: Either::Right(coerce(&x.0)), + }) + } else { + value + .downcast_ref::>>>() + .map(|ptr| SetRef { + aref: Either::Left(ptr.0.borrow()), + }) + }; + Ok(result) + } +} diff --git a/starlark-rust/starlark/src/values/types/set/set.rs b/starlark-rust/starlark/src/values/types/set/set.rs new file mode 100644 index 0000000000000..4cc627d60af05 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/set/set.rs @@ -0,0 +1,107 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::function::SpecialBuiltinFunction; +use crate::values::set::refs::SetRef; +use crate::values::set::value::SetData; +use crate::values::typing::StarlarkIter; +use crate::values::Heap; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +#[starlark_module] +pub(crate) fn register_set(globals: &mut GlobalsBuilder) { + #[starlark( + speculative_exec_safe, + special_builtin_function = SpecialBuiltinFunction::Set, + )] + fn set<'v>( + #[starlark(require = pos)] arg: Option>>>, + heap: &'v Heap, + ) -> starlark::Result> { + let set = match arg { + Some(pos) => match SetRef::unpack_value_opt(pos.get()) { + Some(set) => (set.aref).clone(), + None => { + let it = pos.get().iterate(heap)?; + let mut data = SetData::default(); + for el in it { + let el = el.get_hashed()?; + data.content.insert_hashed(el); + } + data + } + }, + None => SetData::default(), + }; + Ok(set) + } +} +#[cfg(test)] +mod tests { + use crate::assert; + + #[test] + fn test_set_type_as_type_compile_time() { + assert::fail( + r" +def f_fail_ct(x: set[int]): + return x + +s = set(['not_int']) + +f_fail_ct(s) +", + //Is it actually runtime or compile time error? + r#"Value `set(["not_int"])` of type `set` does not match the type annotation `set[int]` for argument `x`"#, + ); + } + + #[test] + fn test_return_set_type_as_type_compile_time() { + assert::fail( + r" +def f_fail_ct(x: str) -> set[int]: + return set([x]) + +f_fail_ct('not_int') +", + //Is it actually runtime or compile time error? + r#"Value `set(["not_int"])` of type `set` does not match the type annotation `set[int]` for return type"#, + ); + } + + #[test] + fn test_set_type_as_type_run_time() { + assert::fail( + r" +def f_fail_rt(x: set[int]): + return x + +s = set(['not_int']) + +noop(f_fail_rt)(s) +", + r#"Value `set(["not_int"])` of type `set` does not match the type annotation `set[int]` for argument `x`"#, + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/set/value.rs b/starlark-rust/starlark/src/values/types/set/value.rs new file mode 100644 index 0000000000000..f8d6e296d7c01 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/set/value.rs @@ -0,0 +1,472 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cell::Ref; +use std::cell::RefCell; +use std::fmt; +use std::fmt::Debug; +use std::fmt::Display; +use std::mem; +use std::ops::Deref; + +use allocative::Allocative; +use display_container::fmt_container; +use serde::Serialize; +use starlark_map::small_set::SmallSet; +use starlark_map::Hashed; + +use super::refs::SetRef; +use crate as starlark; +use crate::coerce::coerce; +use crate::coerce::Coerce; +use crate::environment::Methods; +use crate::environment::MethodsStatic; +use crate::typing::Ty; +use crate::util::refcell::unleak_borrow; +use crate::values::comparison::equals_small_set; +use crate::values::set::methods; +use crate::values::starlark_value; +use crate::values::type_repr::SetType; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::AllocValue; +use crate::values::Freeze; +use crate::values::Freezer; +use crate::values::FrozenValue; +use crate::values::Heap; +use crate::values::ProvidesStaticType; +use crate::values::StarlarkValue; +use crate::values::Trace; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueError; + +#[derive(Clone, Default, Trace, Debug, ProvidesStaticType, Allocative)] +#[repr(transparent)] +pub(crate) struct SetGen(pub(crate) T); + +/// Define the mutable set type. +#[derive(Default, Trace, Debug, ProvidesStaticType, Allocative, Clone)] +pub(crate) struct SetData<'v> { + /// The data stored by the list. + pub(crate) content: SmallSet>, +} + +impl<'v> SetData<'v> { + pub fn clear(&mut self) { + self.content.clear(); + } + + /// Iterate through the values in the set. + pub fn iter<'a>(&'a self) -> impl ExactSizeIterator> + 'a { + self.content.iter().copied() + } + + /// Iterate through the values in the set, but retaining the hash of the values. + pub fn iter_hashed<'a>(&'a self) -> impl Iterator>> + 'a + where + 'v: 'a, + { + self.content.iter_hashed().map(|h| h.copied()) + } + + /// Check if the set contains an hashed element. + pub(crate) fn contains_hashed(&self, key: Hashed>) -> bool { + self.content.contains_hashed(key.as_ref()) + } + + pub(crate) fn add_hashed(&mut self, value: Hashed>) -> bool { + self.content.insert_hashed(value) + } + + pub(crate) fn add_hashed_unique_unchecked(&mut self, value: Hashed>) { + self.content.insert_hashed_unique_unchecked(value) + } + + pub fn remove_hashed(&mut self, value: Hashed<&Value<'v>>) -> bool { + self.content.shift_remove_hashed(value) + } +} + +#[derive(Clone, Default, Debug, ProvidesStaticType, Allocative)] +#[repr(transparent)] +pub(crate) struct FrozenSetData { + /// The data stored by the set. The values must all be hashable values. + content: SmallSet, +} + +pub(crate) type MutableSet<'v> = SetGen>>; + +pub(crate) type FrozenSet = SetGen; + +impl<'v> AllocValue<'v> for SetData<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(SetGen(RefCell::new(self))) + } +} + +impl<'v> StarlarkTypeRepr for SetData<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + SetType::>::starlark_type_repr() + } +} + +unsafe impl<'v> Coerce> for FrozenSetData {} + +// TODO Add optimizations not to allocate empty set. +impl<'v> Freeze for MutableSet<'v> { + type Frozen = SetGen; + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + let content = self.0.into_inner().content.freeze(freezer)?; + Ok(SetGen(FrozenSetData { content })) + } +} + +pub(crate) fn set_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(methods::set_methods) +} + +trait SetLike<'v>: Debug + Allocative { + type ContentRef<'a>: Deref>> + where + Self: 'a, + 'v: 'a; + fn content<'a>(&'a self) -> Self::ContentRef<'a>; + + // These functions are unsafe for the same reason + // `StarlarkValue` iterator functions are unsafe. + unsafe fn iter_start(&self); + unsafe fn content_unchecked(&self) -> &SmallSet>; + unsafe fn iter_stop(&self); + // fn set_at(&self, index: Hashed>, value: Value<'v>) -> crate::Result<()>; +} + +impl<'v> SetLike<'v> for RefCell> { + type ContentRef<'a> = Ref<'a, SmallSet>> where Self: 'a, 'v: 'a; + + fn content<'a>(&'a self) -> Ref<'a, SmallSet>> { + Ref::map(self.borrow(), |x| &x.content) + } + + #[inline] + unsafe fn iter_start(&self) { + mem::forget(self.borrow()); + } + + #[inline] + unsafe fn iter_stop(&self) { + unleak_borrow(self); + } + + #[inline] + unsafe fn content_unchecked(&self) -> &SmallSet> { + &self.try_borrow_unguarded().ok().unwrap_unchecked().content + } +} + +impl<'v> SetLike<'v> for FrozenSetData { + type ContentRef<'a> = &'a SmallSet> where Self: 'a, 'v: 'a; + + fn content(&self) -> &SmallSet> { + coerce(&self.content) + } + + unsafe fn iter_start(&self) {} + + unsafe fn iter_stop(&self) {} + + unsafe fn content_unchecked(&self) -> &SmallSet> { + coerce(&self.content) + } +} + +#[starlark_value(type = "set")] +impl<'v, T: SetLike<'v> + 'v> StarlarkValue<'v> for SetGen +where + Self: ProvidesStaticType<'v>, +{ + type Canonical = FrozenSet; + + /// Returns the length of the value, if this value is a sequence. + fn length(&self) -> crate::Result { + Ok(self.0.content().len() as i32) + } + + fn is_in(&self, other: Value<'v>) -> crate::Result { + Ok(self + .0 + .content() + .contains_hashed(other.get_hashed()?.as_ref())) + } + + fn equals(&self, other: Value<'v>) -> crate::Result { + match SetRef::unpack_value_opt(other) { + None => Ok(false), + Some(other) => Ok(equals_small_set(&self.0.content(), &other.aref.content)), + } + } + + fn get_methods() -> Option<&'static Methods> { + set_methods() + } + + unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> crate::Result> { + self.0.iter_start(); + Ok(me) + } + + unsafe fn iter_size_hint(&self, index: usize) -> (usize, Option) { + debug_assert!(index <= self.0.content().len()); + let rem = self.0.content().len() - index; + (rem, Some(rem)) + } + + unsafe fn iter_next(&self, index: usize, _heap: &'v Heap) -> Option> { + self.0.content_unchecked().iter().nth(index).copied() + } + + unsafe fn iter_stop(&self) { + self.0.iter_stop(); + } + + fn to_bool(&self) -> bool { + !self.0.content().is_empty() + } + + // Set union + fn bit_or(&self, rhs: Value<'v>, heap: &'v Heap) -> crate::Result> { + // Unlike in `union` it is not possible to `|` `set` and iterable. This is due python semantics. + let rhs = SetRef::unpack_value_opt(rhs) + .map_or_else(|| ValueError::unsupported_with(self, "|", rhs), Ok)?; + if self.0.content().is_empty() { + return Ok(heap.alloc((*rhs.aref).clone())); + } + let mut items = self.0.content().clone(); + for h in rhs.aref.iter_hashed() { + items.insert_hashed(h); + } + Ok(heap.alloc(SetData { content: items })) + } + + // Set intersection + fn bit_and(&self, rhs: Value<'v>, heap: &'v Heap) -> crate::Result> { + let rhs = SetRef::unpack_value_opt(rhs) + .map_or_else(|| ValueError::unsupported_with(self, "&", rhs), Ok)?; + + let mut items = SmallSet::new(); + if self.0.content().is_empty() { + return Ok(heap.alloc(SetData { content: items })); + } + + for h in rhs.aref.iter_hashed() { + if self.0.content().contains_hashed(h.as_ref()) { + items.insert_hashed_unique_unchecked(h); + } + } + + Ok(heap.alloc(SetData { content: items })) + } + + // Set symmetric difference + fn bit_xor(&self, rhs: Value<'v>, heap: &'v Heap) -> crate::Result> { + let rhs = SetRef::unpack_value_opt(rhs) + .map_or_else(|| ValueError::unsupported_with(self, "^", rhs), Ok)?; + if rhs.aref.content.is_empty() { + return Ok(heap.alloc(SetData { + content: self.0.content().clone(), + })); + } + let mut data = SetData::default(); + for elem in self.0.content().iter_hashed() { + if !rhs.aref.contains_hashed(elem.copied()) { + data.add_hashed_unique_unchecked(elem.copied()); + } + } + + for hashed in rhs.aref.iter_hashed() { + if !self.0.content().contains_hashed(hashed.as_ref()) { + data.add_hashed(hashed); + } + } + Ok(heap.alloc(data)) + } + + // Set difference + //TODO(romanp) implement difference on small_set level and reuse it here and in difference function + fn sub(&self, rhs: Value<'v>, heap: &'v Heap) -> crate::Result> { + let rhs = SetRef::unpack_value_opt(rhs) + .map_or_else(|| ValueError::unsupported_with(self, "-", rhs), Ok)?; + + if self.0.content().is_empty() { + return Ok(heap.alloc(SetData { + content: SmallSet::new(), + })); + } + + if rhs.aref.content.is_empty() { + return Ok(heap.alloc(SetData { + content: self.0.content().clone(), + })); + } + + let mut data = SetData::default(); + + for elem in self.0.content().iter_hashed() { + if !rhs.aref.contains_hashed(elem.copied()) { + data.add_hashed(elem.copied()); + } + } + Ok(heap.alloc(data)) + } + + fn typechecker_ty(&self) -> Option { + Some(Ty::any_set()) + } + + fn get_type_starlark_repr() -> Ty { + Ty::any_set() + } +} + +impl<'v, T: SetLike<'v>> Serialize for SetGen { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.collect_seq(self.0.content().iter()) + } +} + +impl<'v, T: SetLike<'v>> Display for SetGen { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt_container(f, "set([", "])", self.0.content().iter()) + } +} + +#[cfg(test)] +mod tests { + use crate::assert; + + #[test] + fn test_bit_or() { + assert::eq("set([1, 2, 3]) | set([3, 4])", "set([1, 2, 3, 4])") + } + + #[test] + fn test_bit_or_lhs_empty() { + assert::eq("set() | set([3, 4])", "set([3, 4])") + } + + #[test] + fn test_bit_or_rhs_empty() { + assert::eq("set([1, 2, 3]) | set()", "set([1, 2, 3])") + } + + #[test] + fn test_bit_or_fail_iter() { + assert::fail( + "set([1, 2, 3]) | []", + "Operation `|` not supported for types `set` and `list`", + ); + } + + #[test] + fn test_bit_or_ord() { + assert::eq("list(set([5, 1, 3]) | set([4, 5, 2]))", "[5, 1, 3, 4, 2]") + } + + #[test] + fn test_bit_and() { + assert::eq("set([1, 2, 3]) & set([3, 4])", "set([3])") + } + + #[test] + fn test_bit_and_lhs_empty() { + assert::eq("set() & set([3, 4])", "set([])") + } + + #[test] + fn test_bit_and_rhs_empty() { + assert::eq("set([1, 2, 3]) & set()", "set([])") + } + + #[test] + fn test_bit_and_ord() { + assert::eq("list(set([1, 2, 3]) & set([4, 3, 1]))", "[3, 1]") + } + + #[test] + fn test_bit_and_fail_iter() { + assert::fail( + "set([1, 2, 3]) & []", + "Operation `&` not supported for types `set` and `list`", + ); + } + + #[test] + fn test_bit_xor() { + assert::eq("set([1, 2, 3]) ^ set([3, 4])", "set([4, 2, 1])") + } + + #[test] + fn test_bit_xor_ord() { + assert::eq("list(set([1, 2, 3, 7]) ^ set([4, 3, 1]))", "[2, 7, 4]") + } + + #[test] + fn test_bit_xor_lhs_empty() { + assert::eq("set() ^ set([3, 4])", "set([3, 4])") + } + + #[test] + fn test_bit_xor_rhs_empty() { + assert::eq("set([1, 2, 3]) ^ set()", "set([3, 2, 1])") + } + + #[test] + fn test_bit_xor_fail_iter() { + assert::fail( + "set([1, 2, 3]) ^ []", + "Operation `^` not supported for types `set` and `list`", + ); + } + + #[test] + fn test_sub() { + assert::eq("set([1, 2, 3]) - set([2])", "set([1, 3])") + } + + #[test] + fn test_sub_empty_lhs() { + assert::eq("set([]) - set([2])", "set([])") + } + + #[test] + fn test_sub_empty_rhs() { + assert::eq("set([1, 2]) - set([])", "set([2, 1])") + } + + #[test] + fn test_sub_fail_iter() { + assert::fail( + "set([1, 2, 3]) - []", + "Operation `-` not supported for types `set` and `list`", + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/starlark_value_as_type.rs b/starlark-rust/starlark/src/values/types/starlark_value_as_type.rs index 06526f79b91a8..f62dc5e28f68b 100644 --- a/starlark-rust/starlark/src/values/types/starlark_value_as_type.rs +++ b/starlark-rust/starlark/src/values/types/starlark_value_as_type.rs @@ -29,20 +29,28 @@ use starlark_derive::NoSerialize; use crate as starlark; use crate::any::ProvidesStaticType; +use crate::docs::DocItem; +use crate::docs::DocMember; +use crate::docs::DocProperty; +use crate::docs::DocType; use crate::typing::Ty; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::typing::ty::AbstractType; +use crate::values::typing::TypeType; use crate::values::AllocFrozenValue; +use crate::values::AllocValue; use crate::values::FrozenHeap; use crate::values::FrozenValue; +use crate::values::Heap; use crate::values::StarlarkValue; +use crate::values::Value; #[derive(Debug, NoSerialize, Allocative, ProvidesStaticType)] -struct StarlarkValueAsTypeStarlarkValue(fn() -> Ty); +struct StarlarkValueAsTypeStarlarkValue(fn() -> Ty, fn() -> DocItem); #[starlark_value(type = "type")] impl<'v> StarlarkValue<'v> for StarlarkValueAsTypeStarlarkValue { @@ -51,6 +59,10 @@ impl<'v> StarlarkValue<'v> for StarlarkValueAsTypeStarlarkValue { fn eval_type(&self) -> Option { Some((self.0)()) } + + fn documentation(&self) -> DocItem { + (self.1)() + } } impl Display for StarlarkValueAsTypeStarlarkValue { @@ -65,17 +77,23 @@ impl Display for StarlarkValueAsTypeStarlarkValue { /// /// ``` /// use allocative::Allocative; +/// use starlark::any::ProvidesStaticType; /// use starlark::environment::GlobalsBuilder; +/// use starlark::values::starlark_value; /// use starlark::values::starlark_value_as_type::StarlarkValueAsType; +/// use starlark::values::NoSerialize; /// use starlark::values::StarlarkValue; -/// use starlark::any::ProvidesStaticType; -/// use starlark::values::{NoSerialize, starlark_value}; -/// #[derive(Debug, derive_more::Display, Allocative, ProvidesStaticType, NoSerialize)] +/// #[derive( +/// Debug, +/// derive_more::Display, +/// Allocative, +/// ProvidesStaticType, +/// NoSerialize +/// )] /// struct Temperature; /// /// #[starlark_value(type = "temperature")] -/// impl<'v> StarlarkValue<'v> for Temperature { -/// } +/// impl<'v> StarlarkValue<'v> for Temperature {} /// /// fn my_type_globals(globals: &mut GlobalsBuilder) { /// // This can now be used like: @@ -85,7 +103,10 @@ impl Display for StarlarkValueAsTypeStarlarkValue { /// const Temperature: StarlarkValueAsType = StarlarkValueAsType::new(); /// } /// ``` -pub struct StarlarkValueAsType(PhantomData); +pub struct StarlarkValueAsType( + &'static AValueRepr>>, + PhantomData, +); impl Debug for StarlarkValueAsType { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -103,31 +124,59 @@ impl Display for StarlarkValueAsType { impl StarlarkValueAsType { /// Constructor. - pub const fn new() -> Self { - Self(PhantomData) + /// + /// Use [`new_no_docs`](Self::new_no_docs) if `T` is not a `StarlarkValue`. + pub const fn new() -> Self + where + T: StarlarkValue<'static>, + { + StarlarkValueAsType( + &const { + alloc_static(StarlarkValueAsTypeStarlarkValue( + T::starlark_type_repr, + || DocItem::Type(DocType::from_starlark_value::()), + )) + }, + PhantomData, + ) } - const INSTANCE: AValueRepr> = alloc_static( - Basic, - StarlarkValueAsTypeStarlarkValue(T::starlark_type_repr), - ); -} - -impl Default for StarlarkValueAsType { - fn default() -> Self { - Self::new() + /// Constructor. + pub const fn new_no_docs() -> Self { + StarlarkValueAsType( + &const { + alloc_static(StarlarkValueAsTypeStarlarkValue( + T::starlark_type_repr, + || { + DocItem::Member(DocMember::Property(DocProperty { + docs: None, + typ: AbstractType::starlark_type_repr(), + })) + }, + )) + }, + PhantomData, + ) } } impl StarlarkTypeRepr for StarlarkValueAsType { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { - AbstractType::starlark_type_repr() + ::starlark_type_repr() + } +} + +impl<'v, T: StarlarkTypeRepr> AllocValue<'v> for StarlarkValueAsType { + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + FrozenValue::new_repr(self.0).to_value() } } impl AllocFrozenValue for StarlarkValueAsType { fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { - FrozenValue::new_repr(&Self::INSTANCE) + FrozenValue::new_repr(self.0) } } diff --git a/starlark-rust/starlark/src/values/types/string.rs b/starlark-rust/starlark/src/values/types/string.rs new file mode 100644 index 0000000000000..b66c4768a6301 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/string.rs @@ -0,0 +1,32 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The string type. All strings must be valid UTF8. + +mod alloc_unpack; +pub(crate) mod dot_format; +pub(crate) mod globals; +pub(crate) mod intern; +pub(crate) mod interpolation; +pub(crate) mod iter; +pub(crate) mod methods; +pub(crate) mod repr; +pub(crate) mod simd; +pub(crate) mod str_type; + +pub use crate::values::types::string::str_type::StarlarkStr; +pub use crate::values::types::string::str_type::STRING_TYPE; diff --git a/starlark-rust/starlark/src/values/types/string/alloc_unpack.rs b/starlark-rust/starlark/src/values/types/string/alloc_unpack.rs index a769ed4157bf8..c1e01fcab2e3c 100644 --- a/starlark-rust/starlark/src/values/types/string/alloc_unpack.rs +++ b/starlark-rust/starlark/src/values/types/string/alloc_unpack.rs @@ -17,6 +17,8 @@ //! Implementations of alloc and unpack traits for string. +use std::convert::Infallible; + use crate::typing::Ty; use crate::values::alloc_value::AllocFrozenStringValue; use crate::values::alloc_value::AllocStringValue; @@ -68,6 +70,8 @@ impl<'v> AllocStringValue<'v> for String { } impl StarlarkTypeRepr for char { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { String::starlark_type_repr() } @@ -86,6 +90,8 @@ impl<'v> AllocStringValue<'v> for char { } impl StarlarkTypeRepr for &'_ String { + type Canonical = ::Canonical; + fn starlark_type_repr() -> Ty { String::starlark_type_repr() } @@ -116,21 +122,17 @@ impl<'v> AllocStringValue<'v> for &'_ str { } impl<'v> UnpackValue<'v> for &'v str { - fn expected() -> String { - "str".to_owned() - } + type Error = Infallible; - fn unpack_value(value: Value<'v>) -> Option { - value.unpack_str() + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(value.unpack_str()) } } impl<'v> UnpackValue<'v> for String { - fn expected() -> String { - "str".to_owned() - } + type Error = Infallible; - fn unpack_value(value: Value<'v>) -> Option { - value.unpack_str().map(ToOwned::to_owned) + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(value.unpack_str().map(ToOwned::to_owned)) } } diff --git a/starlark-rust/starlark/src/values/types/string/dot_format.rs b/starlark-rust/starlark/src/values/types/string/dot_format.rs index e150313873080..d9e89a72c13a3 100644 --- a/starlark-rust/starlark/src/values/types/string/dot_format.rs +++ b/starlark-rust/starlark/src/values/types/string/dot_format.rs @@ -17,7 +17,7 @@ use std::str::FromStr; -use anyhow::Context as _; +use starlark_syntax::dot_format_parser::FormatConv; use starlark_syntax::dot_format_parser::FormatParser; use starlark_syntax::dot_format_parser::FormatToken; @@ -37,7 +37,11 @@ pub(crate) fn parse_format_one(s: &str) -> Option<(String, String)> { match parser.next().ok()?? { FormatToken::Text(text) => before.push_str(text), FormatToken::Escape(e) => before.push_str(e.as_str()), - FormatToken::Capture { capture: "", .. } => break, + FormatToken::Capture { + capture: "", + conv: FormatConv::Str, + pos: _, + } => break, FormatToken::Capture { .. } => return None, } } @@ -142,9 +146,11 @@ pub(crate) fn format<'v>( match token { FormatToken::Text(text) => result.push_str(text), FormatToken::Escape(e) => result.push_str(e.as_str()), - FormatToken::Capture { capture, .. } => { - format_capture(capture, &mut args, &kwargs, &mut result)? - } + FormatToken::Capture { + capture, + conv, + pos: _, + } => format_capture(capture, conv, &mut args, &kwargs, &mut result)?, } } let r = heap.alloc_str(&result); @@ -153,53 +159,39 @@ pub(crate) fn format<'v>( } fn format_capture<'v, T: Iterator>>( - capture: &str, + field: &str, + conv: FormatConv, args: &mut FormatArgs<'v, T>, kwargs: &Dict, result: &mut String, ) -> anyhow::Result<()> { - let (n, conv) = { - if let Some((n, conv)) = capture.split_once('!') { - (n, conv) - } else { - (capture, "s") - } - }; let conv_s = |x: Value, result: &mut String| x.collect_str(result); let conv_r = |x: Value, result: &mut String| x.collect_repr(result); let conv: &dyn Fn(Value, &mut String) = match conv { - "s" => &conv_s, - "r" => &conv_r, - c => { - return Err(anyhow::anyhow!( - concat!( - "'{}' is not a valid format string specifier, only ", - "'s' and 'r' are valid specifiers", - ), - c - )); - } + FormatConv::Str => &conv_s, + FormatConv::Repr => &conv_r, }; - if n.is_empty() { + if field.is_empty() { conv(args.next_ordered()?, result); Ok(()) - } else if n.chars().all(|c| c.is_ascii_digit()) { - let i = usize::from_str(n) - .with_context(|| format!("Error parsing `{n}` as a format string index"))?; + } else if field.bytes().all(|c| c.is_ascii_digit()) { + let i = usize::from_str(field).map_err(|e| { + anyhow::anyhow!("Error parsing `{field}` as a format string index: {e}") + })?; conv(args.by_index(i)?, result); Ok(()) } else { - if let Some(x) = n.chars().find(|c| match c { - '.' | ',' | '[' | ']' => true, + if let Some(x) = field.bytes().find(|c| match c { + b'.' | b',' | b'[' | b']' => true, _ => false, }) { return Err(anyhow::anyhow!( "Invalid character '{}' inside replacement field", - x + char::from(x) )); } - match kwargs.get_str(n) { - None => Err(ValueError::KeyNotFound(n.to_owned()).into()), + match kwargs.get_str(field) { + None => Err(ValueError::KeyNotFound(field.to_owned()).into()), Some(v) => { conv(v, result); Ok(()) @@ -211,10 +203,12 @@ fn format_capture<'v, T: Iterator>>( #[cfg(test)] mod tests { use starlark_map::small_map::SmallMap; + use starlark_syntax::dot_format_parser::FormatConv; use crate::assert; use crate::coerce::coerce; use crate::values::dict::Dict; + use crate::values::string::dot_format::format_capture; use crate::values::string::dot_format::parse_format_one; use crate::values::string::dot_format::FormatArgs; use crate::values::Heap; @@ -222,11 +216,12 @@ mod tests { fn format_capture_for_test<'v, T: Iterator>>( capture: &str, + conv: FormatConv, args: &mut FormatArgs<'v, T>, kwargs: &Dict, ) -> anyhow::Result { let mut result = String::new(); - super::format_capture(capture, args, kwargs, &mut result)?; + format_capture(capture, conv, args, kwargs, &mut result)?; Ok(result) } @@ -242,32 +237,32 @@ mod tests { kwargs.insert_hashed(heap.alloc_str("c").get_hashed(), heap.alloc("z")); let kwargs = Dict::new(coerce(kwargs)); assert_eq!( - format_capture_for_test("", &mut args, &kwargs).unwrap(), + format_capture_for_test("", FormatConv::Str, &mut args, &kwargs).unwrap(), "1" ); assert_eq!( - format_capture_for_test("!s", &mut args, &kwargs).unwrap(), + format_capture_for_test("", FormatConv::Str, &mut args, &kwargs).unwrap(), "2" ); assert_eq!( - format_capture_for_test("!r", &mut args, &kwargs).unwrap(), + format_capture_for_test("", FormatConv::Repr, &mut args, &kwargs).unwrap(), "\"3\"" ); assert_eq!( - format_capture_for_test("a!r", &mut args, &kwargs).unwrap(), + format_capture_for_test("a", FormatConv::Repr, &mut args, &kwargs).unwrap(), "\"x\"" ); assert_eq!( - format_capture_for_test("a!s", &mut args, &kwargs).unwrap(), + format_capture_for_test("a", FormatConv::Str, &mut args, &kwargs).unwrap(), "x" ); - assert!(format_capture_for_test("1", &mut args, &kwargs).is_err()); + assert!(format_capture_for_test("1", FormatConv::Str, &mut args, &kwargs).is_err()); let mut args = FormatArgs::new(original_args.iter().copied()); assert_eq!( - format_capture_for_test("1", &mut args, &kwargs).unwrap(), + format_capture_for_test("1", FormatConv::Str, &mut args, &kwargs).unwrap(), "2" ); - assert!(format_capture_for_test("", &mut args, &kwargs).is_err()); + assert!(format_capture_for_test("", FormatConv::Str, &mut args, &kwargs).is_err()); } #[test] @@ -282,6 +277,11 @@ mod tests { Some(("abc".to_owned(), "def".to_owned())), parse_format_one("abc{}def") ); + assert_eq!( + Some(("abc".to_owned(), "def".to_owned())), + parse_format_one("abc{!s}def") + ); + assert_eq!(None, parse_format_one("abc{!r}def")); assert_eq!( Some(("a{b".to_owned(), "c}d{".to_owned())), parse_format_one("a{{b{}c}}d{{") diff --git a/starlark-rust/starlark/src/values/types/string/globals.rs b/starlark-rust/starlark/src/values/types/string/globals.rs new file mode 100644 index 0000000000000..36367bf5247f5 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/string/globals.rs @@ -0,0 +1,150 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::eval::Evaluator; +use crate::values::string::StarlarkStr; +use crate::values::StringValue; +use crate::values::Value; +use crate::values::ValueLike; + +#[starlark_module] +pub(crate) fn register_str(globals: &mut GlobalsBuilder) { + /// [chr]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#bool + /// ): returns a string encoding a codepoint. + /// + /// `chr(i)` returns a string that encodes the single Unicode code + /// point whose value is specified by the integer `i`. `chr` fails + /// unless `0 ≤ i ≤ 0x10FFFF`. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// chr(65) == 'A' + /// chr(1049) == 'Й' + /// chr(0x1F63F) == '😿' + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn chr(#[starlark(require = pos)] i: i32) -> anyhow::Result { + let cp = u32::try_from(i) + .map_err(|_| anyhow::anyhow!("chr() parameter value negative integer {i}"))?; + match char::from_u32(cp) { + Some(x) => Ok(x), + None => Err(anyhow::anyhow!( + "chr() parameter value is 0x{:x} which is not a valid UTF-8 codepoint", + cp + )), + } + } + + /// [ord]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#ord + /// ): returns the codepoint of a character + /// + /// `ord(s)` returns the integer value of the sole Unicode code point + /// encoded by the string `s`. + /// + /// If `s` does not encode exactly one Unicode code point, `ord` fails. + /// Each invalid code within the string is treated as if it encodes the + /// Unicode replacement character, U+FFFD. + /// + /// Example: + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// ord("A") == 65 + /// ord("Й") == 1049 + /// ord("😿") == 0x1F63F + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn ord<'v>(#[starlark(require = pos)] a: StringValue<'v>) -> anyhow::Result { + let mut chars = a.as_str().chars(); + if let Some(c) = chars.next() { + if chars.next().is_none() { + return Ok(u32::from(c) as i32); + } + } + Err(anyhow::anyhow!( + "ord(): {} is not a single character string", + a.to_value().to_repr() + )) + } + + /// [repr]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#repr + /// ): formats its argument as a string. + /// + /// All strings in the result are double-quoted. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// repr(1) == '1' + /// repr("x") == "\"x\"" + /// repr([1, "x"]) == "[1, \"x\"]" + /// repr("test \"'") == "\"test \\\"'\"" + /// repr("x\"y😿 \\'") == "\"x\\\"y\\U0001f63f \\\\'\"" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn repr<'v>( + #[starlark(require = pos)] a: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + let mut s = eval.string_pool.alloc(); + a.collect_repr(&mut s); + let r = eval.heap().alloc_str(&s); + eval.string_pool.release(s); + Ok(r) + } + + /// [str]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#str + /// ): formats its argument as a string. + /// + /// If x is a string, the result is x (without quotation). + /// All other strings, such as elements of a list of strings, are + /// double-quoted. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// str(1) == '1' + /// str("x") == 'x' + /// str([1, "x"]) == "[1, \"x\"]" + /// # "#); + /// ``` + #[starlark(as_type = StarlarkStr, speculative_exec_safe)] + fn str<'v>( + #[starlark(require = pos)] a: Value<'v>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result> { + if let Some(a) = StringValue::new(a) { + // Special case that can avoid reallocating, but is equivalent. + Ok(a) + } else { + let mut s = eval.string_pool.alloc(); + a.collect_repr(&mut s); + let r = eval.heap().alloc_str(&s); + eval.string_pool.release(s); + Ok(r) + } + } +} diff --git a/starlark-rust/starlark/src/values/types/string/intern/mod.rs b/starlark-rust/starlark/src/values/types/string/intern.rs similarity index 100% rename from starlark-rust/starlark/src/values/types/string/intern/mod.rs rename to starlark-rust/starlark/src/values/types/string/intern.rs diff --git a/starlark-rust/starlark/src/values/types/string/intern/interner.rs b/starlark-rust/starlark/src/values/types/string/intern/interner.rs index 0e2d50d209200..0d3361b3d3287 100644 --- a/starlark-rust/starlark/src/values/types/string/intern/interner.rs +++ b/starlark-rust/starlark/src/values/types/string/intern/interner.rs @@ -19,16 +19,19 @@ use hashbrown::raw::RawTable; +use crate as starlark; use crate::collections::Hashed; use crate::values::FrozenStringValue; +use crate::values::StringValue; +use crate::values::Trace; /// `[FrozenStringValue]` interner. #[derive(Default)] -pub(crate) struct FrozenStringInterner { +pub(crate) struct FrozenStringValueInterner { map: RawTable, } -impl FrozenStringInterner { +impl FrozenStringValueInterner { pub(crate) fn intern( &mut self, s: Hashed<&str>, @@ -50,20 +53,60 @@ impl FrozenStringInterner { } } +#[derive(Default, Trace)] +pub(crate) struct StringValueInterner<'v> { + map: RawTable>, +} + +impl<'v> StringValueInterner<'v> { + pub(crate) fn intern( + &mut self, + s: Hashed<&str>, + alloc: impl FnOnce() -> StringValue<'v>, + ) -> StringValue<'v> { + match self + .map + .get(s.hash().promote(), |x| s == x.get_hashed_str()) + { + Some(string_value) => *string_value, + None => { + let string_value = alloc(); + self.map + .insert(s.hash().promote(), string_value, |x| x.get_hash().promote()); + string_value + } + } + } +} + #[cfg(test)] -mod test { +mod tests { use crate::collections::Hashed; - use crate::values::string::intern::interner::FrozenStringInterner; + use crate::values::string::intern::interner::FrozenStringValueInterner; + use crate::values::string::intern::interner::StringValueInterner; use crate::values::FrozenHeap; + use crate::values::Heap; #[test] fn test_intern() { let heap1 = FrozenHeap::new(); let heap2 = FrozenHeap::new(); - let mut intern = FrozenStringInterner::default(); + let mut intern = FrozenStringValueInterner::default(); let xx1 = intern.intern(Hashed::new("xx"), || heap1.alloc_str("xx")); let xx2 = intern.intern(Hashed::new("xx"), || heap2.alloc_str("xx")); assert!(xx1.to_value().ptr_eq(xx2.to_value())); } + + #[test] + fn test_string_value_intern() { + let heap1 = Heap::new(); + let mut intern = StringValueInterner::default(); + + let xx1 = intern.intern(Hashed::new("xx"), || heap1.alloc_str("xx")); + let xx2 = intern.intern(Hashed::new("xx"), || { + panic!("alloc_str should be only called once") + }); + assert!(xx1.to_value().ptr_eq(xx2.to_value())); + } } diff --git a/starlark-rust/starlark/src/values/types/string/interpolation.rs b/starlark-rust/starlark/src/values/types/string/interpolation.rs index 97cd10038fd57..26a8fc42b4ac4 100644 --- a/starlark-rust/starlark/src/values/types/string/interpolation.rs +++ b/starlark-rust/starlark/src/values/types/string/interpolation.rs @@ -24,10 +24,11 @@ use dupe::Dupe; use num_traits::Signed; use thiserror::Error; -use crate::values::float; -use crate::values::num::value::NumRef; +use crate::values::float::float; +use crate::values::float::StarlarkFloat; use crate::values::string::dot_format::format_one; -use crate::values::types::int_or_big::StarlarkIntRef; +use crate::values::types::int::int_or_big::StarlarkIntRef; +use crate::values::types::num::value::NumRef; use crate::values::types::tuple::value::Tuple; use crate::values::Heap; use crate::values::StringValue; @@ -189,7 +190,7 @@ impl<'a> Iterator for PercentFormatParser<'a> { } } -pub(crate) fn percent(format: &str, value: Value) -> anyhow::Result { +pub(crate) fn percent(format: &str, value: Value) -> crate::Result { // NOTE(nga): use could reuse `Evaluator::string_pool` here, but // * we don't have access to `Evaluator` in `StarlarkValue::percent` // * after single %s made intrinsic, this code is not that hot now @@ -233,10 +234,12 @@ pub(crate) fn percent(format: &str, value: Value) -> anyhow::Result { Some(NumRef::Int(StarlarkIntRef::Big(v))) => { write!(res, "{}", v.get()).unwrap() } - Some(NumRef::Float(v)) => match NumRef::Float(v.trunc()).as_int() { - Some(v) => write!(res, "{}", v).unwrap(), - None => ValueError::unsupported_type(value, "format(%d)")?, - }, + Some(NumRef::Float(v)) => { + match NumRef::Float(StarlarkFloat(v.0.trunc())).as_int() { + Some(v) => write!(res, "{}", v).unwrap(), + None => ValueError::unsupported_type(value, "format(%d)")?, + } + } None => ValueError::unsupported_type(value, "format(%d)")?, } } @@ -344,7 +347,9 @@ pub(crate) fn percent(format: &str, value: Value) -> anyhow::Result { } } if values.next().is_some() { - Err(StringInterpolationError::TooManyParameters.into()) + Err(crate::Error::new_other( + StringInterpolationError::TooManyParameters, + )) } else { Ok(res) } @@ -384,15 +389,23 @@ pub(crate) fn percent_s_one<'v>( arg: Value<'v>, after: &str, heap: &'v Heap, -) -> anyhow::Result> { +) -> crate::Result> { Ok(match StringValue::new(arg) { Some(arg) => heap.alloc_str_concat3(before, &arg, after), None => { let one = match Tuple::from_value(arg) { Some(tuple) => match tuple.content() { - [] => return Err(StringInterpolationError::NotEnoughParameters.into()), + [] => { + return Err(crate::Error::new_other( + StringInterpolationError::NotEnoughParameters, + )); + } [value] => *value, - [_, _, ..] => return Err(StringInterpolationError::TooManyParameters.into()), + [_, _, ..] => { + return Err(crate::Error::new_other( + StringInterpolationError::TooManyParameters, + )); + } }, None => arg, }; @@ -509,15 +522,15 @@ mod tests { assert::fail( "'%e' % (True,)", - "Type of parameters mismatch, expected `int or float`, actual `bool`", + "Type of parameters mismatch, expected `float | int`, actual `bool (repr: True)`", ); assert::fail( "'%e' % ('abc',)", - "Type of parameters mismatch, expected `int or float`, actual `string`", + "Type of parameters mismatch, expected `float | int`, actual `string (repr:", ); assert::fail( "'%e' % ([],)", - "Type of parameters mismatch, expected `int or float`, actual `list`", + "Type of parameters mismatch, expected `float | int`, actual `list (repr", ); } diff --git a/starlark-rust/starlark/src/values/types/string/iter.rs b/starlark-rust/starlark/src/values/types/string/iter.rs index c37ffcd6bff7e..a953d45e53fd7 100644 --- a/starlark-rust/starlark/src/values/types/string/iter.rs +++ b/starlark-rust/starlark/src/values/types/string/iter.rs @@ -47,7 +47,7 @@ use crate::values::ValueOfUnchecked; ProvidesStaticType, Allocative )] -#[display(fmt = "iterator")] +#[display("iterator")] #[repr(C)] struct StringIterableGen<'v, V: ValueLike<'v>> { string: V::String, @@ -75,11 +75,11 @@ pub(crate) fn iterate_codepoints<'v>( } #[starlark_value(type = "iterator")] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for StringIterableGen<'v, V> +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StringIterableGen<'v, V> where Self: ProvidesStaticType<'v>, { - unsafe fn iterate(&self, _me: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + unsafe fn iterate(&self, _me: Value<'v>, heap: &'v Heap) -> crate::Result> { // Lazy implementation: we allocate a tuple and then iterate over it. let iter = if self.produce_char { heap.alloc_tuple_iter(self.string.as_str().chars().map(|c| heap.alloc(c))) diff --git a/starlark-rust/starlark/src/values/types/string/methods.rs b/starlark-rust/starlark/src/values/types/string/methods.rs new file mode 100644 index 0000000000000..55d51c70ceb93 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/string/methods.rs @@ -0,0 +1,1312 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Methods for the `string` type. + +use std::cmp; + +use starlark_derive::starlark_module; +use starlark_syntax::fast_string; +use starlark_syntax::fast_string::convert_str_indices; +use starlark_syntax::fast_string::StrIndices; + +use crate as starlark; +use crate::environment::MethodsBuilder; +use crate::eval::Arguments; +use crate::eval::Evaluator; +use crate::values::list::AllocList; +use crate::values::list::UnpackList; +use crate::values::none::NoneOr; +use crate::values::string::dot_format; +use crate::values::tuple::UnpackTuple; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::types::string::iter::iterate_chars; +use crate::values::types::string::iter::iterate_codepoints; +use crate::values::typing::iter::StarlarkIter; +use crate::values::Heap; +use crate::values::StringValue; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +// This does not exists in rust, split would cut the string incorrectly and +// split_whitespace cannot take a n parameter. +fn splitn_whitespace(s: &str, maxsplit: usize) -> Vec { + let mut v = Vec::new(); + let mut cur = String::new(); + let mut split = 1; + let mut eat_ws = true; + for c in s.chars() { + if split >= maxsplit && !eat_ws { + cur.push(c) + } else if c.is_whitespace() { + if !cur.is_empty() { + v.push(cur); + cur = String::new(); + split += 1; + eat_ws = true; + } + } else { + eat_ws = false; + cur.push(c) + } + } + if !cur.is_empty() { + v.push(cur) + } + v +} + +fn rsplitn_whitespace(s: &str, maxsplit: usize) -> Vec { + let mut v = Vec::new(); + let mut cur = String::new(); + let mut split = 1; + let mut eat_ws = true; + for c in s.chars().rev() { + if split >= maxsplit && !eat_ws { + cur.push(c) + } else if c.is_whitespace() { + if !cur.is_empty() { + v.push(cur.chars().rev().collect()); + cur = String::new(); + split += 1; + eat_ws = true; + } + } else { + eat_ws = false; + cur.push(c) + } + } + if !cur.is_empty() { + v.push(cur.chars().rev().collect()); + } + v.reverse(); + v +} + +#[derive(StarlarkTypeRepr, UnpackValue)] +enum StringOrTuple<'v> { + String(&'v str), + Tuple(UnpackTuple<&'v str>), +} + +#[starlark_module] +pub(crate) fn string_methods(builder: &mut MethodsBuilder) { + /// [string.elems]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·elems + /// ): returns an iterable of the bytes values of a string. + /// + /// `S.elems()` returns an iterable value containing the + /// sequence of numeric bytes values in the string S. + /// + /// To materialize the entire sequence of bytes, apply `list(...)` to the + /// result. + /// + /// ``` + /// # starlark::assert::is_true(r#" + /// list("Hello, 世界".elems()) == ["H", "e", "l", "l", "o", ",", " ", "世", "界"] + /// # "#); + /// ``` + fn elems<'v>( + this: StringValue<'v>, + heap: &'v Heap, + ) -> anyhow::Result>> { + Ok(iterate_chars(this, heap)) + } + + /// [string.capitalize]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string%C2%B7capitalize + /// ): returns a copy of string S, where the first character (if any) is converted to uppercase; + /// all other characters are converted to lowercase. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "hello, world!".capitalize() == "Hello, world!" + /// "Hello, World!".capitalize() == "Hello, world!" + /// "".capitalize() == "" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn capitalize(this: &str) -> anyhow::Result { + let mut result = String::with_capacity(this.len()); + for (i, c) in this.chars().enumerate() { + if i == 0 { + result.extend(c.to_uppercase()) + } else { + result.extend(c.to_lowercase()) + } + } + Ok(result) + } + + /// [string.codepoints]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·codepoints + /// ): returns an iterable of the unicode codepoint of a string. + /// + /// `S.codepoints()` returns an iterable value containing the + /// sequence of integer Unicode code points encoded by the string S. + /// Each invalid code within the string is treated as if it encodes the + /// Unicode replacement character, U+FFFD. + /// + /// By returning an iterable, not a list, the cost of decoding the string + /// is deferred until actually needed; apply `list(...)` to the result to + /// materialize the entire sequence. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// list("Hello, 世界".codepoints()) == [72, 101, 108, 108, 111, 44, 32, 19990, 30028] + /// # "#); + /// ``` + fn codepoints<'v>( + this: StringValue<'v>, + heap: &'v Heap, + ) -> anyhow::Result>> { + Ok(iterate_codepoints(this, heap)) + } + + /// [string.count]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·count + /// ): count the number of occurrences of a string in another string. + /// + /// `S.count(sub[, start[, end]])` returns the number of occurrences of + /// `sub` within the string S, or, if the optional substring indices + /// `start` and `end` are provided, within the designated substring of S. + /// They are interpreted according to Skylark's [indexing conventions]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#indexing). + /// + /// This implementation does not count occurrence of `sub` in the string `S` + /// that overlap other occurrence of S (which can happen if some suffix of S + /// is a prefix of S). For instance, `"abababa".count("aba")` returns 2 + /// for `[aba]a[aba]`, not counting the middle occurrence: `ab[aba]ba` + /// (this is following Python behavior). + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "hello, world!".count("o") == 2 + /// "abababa".count("aba") == 2 + /// "hello, world!".count("o", 7, 12) == 1 # in "world" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn count( + this: &str, + #[starlark(require = pos)] needle: &str, + #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, + #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, + ) -> anyhow::Result { + if let Some(StrIndices { haystack, .. }) = + convert_str_indices(this, start.into_option(), end.into_option()) + { + Ok(fast_string::count_matches(haystack, needle) as i32) + } else { + Ok(0) + } + } + + /// [string.endswith]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·endswith + /// ): determine if a string ends with a given suffix. + /// + /// `S.endswith(suffix)` reports whether the string S has the specified + /// suffix. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "filename.sky".endswith(".sky") == True + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn endswith( + this: &str, + #[starlark(require = pos)] suffix: StringOrTuple, + ) -> anyhow::Result { + match suffix { + StringOrTuple::String(x) => Ok(this.ends_with(x)), + StringOrTuple::Tuple(xs) => Ok(xs.items.iter().any(|x| this.ends_with(x))), + } + } + + /// [string.find]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·find + /// ): find a substring in a string. + /// + /// `S.find(sub[, start[, end]])` returns the index of the first + /// occurrence of the substring `sub` within S. + /// + /// If either or both of `start` or `end` are specified, + /// they specify a subrange of S to which the search should be restricted. + /// They are interpreted according to Skylark's [indexing + /// conventions](#indexing). + /// + /// If no occurrence is found, `found` returns -1. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "bonbon".find("on") == 1 + /// "bonbon".find("on", 2) == 4 + /// "bonbon".find("on", 2, 5) == -1 + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn find( + this: &str, + #[starlark(require = pos)] needle: &str, + #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, + #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, + ) -> anyhow::Result { + if let Some(StrIndices { start, haystack }) = + convert_str_indices(this, start.into_option(), end.into_option()) + { + if let Some(index) = haystack.find(needle) { + let index = fast_string::len(&haystack[..index]); + return Ok((start + index).0 as i32); + } + } + Ok(-1) + } + + /// [string.format]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·format + /// ): format a string. + /// + /// `S.format(*args, **kwargs)` returns a version of the format string S + /// in which bracketed portions `{...}` are replaced + /// by arguments from `args` and `kwargs`. + /// + /// Within the format string, a pair of braces `{{` or `}}` is treated as + /// a literal open or close brace. + /// Each unpaired open brace must be matched by a close brace `}`. + /// The optional text between corresponding open and close braces + /// specifies which argument to use and how to format it, and consists of + /// three components, all optional: + /// a field name, a conversion preceded by '`!`', and a format specifier + /// preceded by '`:`'. + /// + /// ```text + /// {field} + /// {field:spec} + /// {field!conv} + /// {field!conv:spec} + /// ``` + /// + /// The *field name* may be either a decimal number or a keyword. + /// A number is interpreted as the index of a positional argument; + /// a keyword specifies the value of a keyword argument. + /// If all the numeric field names form the sequence 0, 1, 2, and so on, + /// they may be omitted and those values will be implied; however, + /// the explicit and implicit forms may not be mixed. + /// + /// The *conversion* specifies how to convert an argument value `x` to a + /// string. It may be either `!r`, which converts the value using + /// `repr(x)`, or `!s`, which converts the value using `str(x)` and is + /// the default. + /// + /// The *format specifier*, after a colon, specifies field width, + /// alignment, padding, and numeric precision. + /// Currently it must be empty, but it is reserved for future use. + /// + /// ```rust + /// # starlark::assert::all_true(r#" + /// "a {} c".format(3) == "a 3 c" + /// "a{x}b{y}c{}".format(1, x=2, y=3) == "a2b3c1" + /// "a{}b{}c".format(1, 2) == "a1b2c" + /// "({1}, {0})".format("zero", "one") == "(one, zero)" + /// "Is {0!r} {0!s}?".format("heterological") == "Is \"heterological\" heterological?" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn format<'v>( + this: &str, + args: &Arguments<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + let iter = args.positions(eval.heap())?; + dot_format::format( + this, + iter, + args.names()?, + &mut eval.string_pool, + eval.module_env.heap(), + ) + .map_err(Into::into) + } + + /// [string.index]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·index + /// ): search a substring inside a string, failing on not found. + /// + /// `S.index(sub[, start[, end]])` returns the index of the first + /// occurrence of the substring `sub` within S, like `S.find`, except + /// that if the substring is not found, the operation fails. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "bonbon".index("on") == 1 + /// "bonbon".index("on", 2) == 4 + /// # "#); + /// # starlark::assert::fail(r#" + /// "bonbon".index("on", 2, 5) # error: not found + /// # "#, "not found"); + /// ``` + #[starlark(speculative_exec_safe)] + fn index( + this: &str, + #[starlark(require = pos)] needle: &str, + #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, + #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, + ) -> anyhow::Result { + if let Some(StrIndices { start, haystack }) = + convert_str_indices(this, start.into_option(), end.into_option()) + { + if let Some(index) = haystack.find(needle) { + let index = fast_string::len(&haystack[..index]); + return Ok((start + index).0 as i32); + } + } + Err(anyhow::anyhow!( + "Substring '{}' not found in '{}'", + needle, + this + )) + } + + /// [string.isalnum]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·isalnum + /// ): test if a string is composed only of letters and digits. + /// + /// `S.isalnum()` reports whether the string S is non-empty and consists + /// only Unicode letters and digits. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "base64".isalnum() == True + /// "Catch-22".isalnum() == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn isalnum(this: &str) -> anyhow::Result { + if this.is_empty() { + return Ok(false); + } + for c in this.chars() { + if !c.is_alphanumeric() { + return Ok(false); + } + } + Ok(true) + } + + /// [string.isalpha]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·isalpha + /// ): test if a string is composed only of letters. + /// + /// `S.isalpha()` reports whether the string S is non-empty and consists + /// only of Unicode letters. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "ABC".isalpha() == True + /// "Catch-22".isalpha() == False + /// "".isalpha() == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn isalpha(this: &str) -> anyhow::Result { + if this.is_empty() { + return Ok(false); + } + for c in this.chars() { + if !c.is_alphabetic() { + return Ok(false); + } + } + Ok(true) + } + + /// [string.isdigit]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·isdigit + /// ): test if a string is composed only of digits. + /// + /// `S.isdigit()` reports whether the string S is non-empty and consists + /// only of Unicode digits. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "123".isdigit() == True + /// "Catch-22".isdigit() == False + /// "".isdigit() == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn isdigit(this: &str) -> anyhow::Result { + if this.is_empty() { + return Ok(false); + } + for c in this.chars() { + if !c.is_numeric() { + return Ok(false); + } + } + Ok(true) + } + + /// [string.islower]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·islower + /// ): test if all letters of a string are lowercase. + /// + /// `S.islower()` reports whether the string S contains at least one cased + /// Unicode letter, and all such letters are lowercase. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "hello, world".islower() == True + /// "Catch-22".islower() == False + /// "123".islower() == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn islower(this: &str) -> anyhow::Result { + let mut result = false; + for c in this.chars() { + if c.is_uppercase() { + return Ok(false); + } else if c.is_lowercase() { + result = true; + } + } + Ok(result) + } + + /// [string.isspace]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·isspace + /// ): test if all characters of a string are whitespaces. + /// + /// `S.isspace()` reports whether the string S is non-empty and consists + /// only of Unicode spaces. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// " ".isspace() == True + /// "\r\t\n".isspace() == True + /// "".isspace() == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn isspace(this: &str) -> anyhow::Result { + if this.is_empty() { + return Ok(false); + } + for c in this.chars() { + if !c.is_whitespace() { + return Ok(false); + } + } + Ok(true) + } + + /// [string.istitle]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·istitle + /// ): test if the string is title cased. + /// + /// `S.istitle()` reports whether the string S contains at least one cased + /// Unicode letter, and all such letters that begin a word are in title + /// case. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "Hello, World!".istitle() == True + /// "Catch-22".istitle() == True + /// "HAL-9000".istitle() == False + /// "123".istitle() == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn istitle(this: &str) -> anyhow::Result { + let mut last_space = true; + let mut result = false; + + for c in this.chars() { + if !c.is_alphabetic() { + last_space = true; + } else { + if last_space { + if c.is_lowercase() { + return Ok(false); + } + } else if c.is_uppercase() { + return Ok(false); + } + if c.is_alphabetic() { + result = true; + } + last_space = false; + } + } + Ok(result) + } + + /// [string.isupper]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·isupper + /// ): test if all letters of a string are uppercase. + /// + /// `S.isupper()` reports whether the string S contains at least one cased + /// Unicode letter, and all such letters are uppercase. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "HAL-9000".isupper() == True + /// "Catch-22".isupper() == False + /// "123".isupper() == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn isupper(this: &str) -> anyhow::Result { + let mut result = false; + for c in this.chars() { + if c.is_lowercase() { + return Ok(false); + } else if c.is_uppercase() { + result = true; + } + } + Ok(result) + } + + /// [string.lower]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·lower + /// ): convert a string to all lowercase. + /// + /// `S.lower()` returns a copy of the string S with letters converted to + /// lowercase. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "Hello, World!".lower() == "hello, world!" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn lower(this: &str) -> anyhow::Result { + Ok(this.to_lowercase()) + } + + /// [string.join]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·join + /// ): join elements with a separator. + /// + /// `S.join(iterable)` returns the string formed by concatenating each + /// element of its argument, with a copy of the string S between + /// successive elements. The argument must be an iterable whose elements + /// are strings. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// ", ".join([]) == "" + /// ", ".join(("x", )) == "x" + /// ", ".join(["one", "two", "three"]) == "one, two, three" + /// "a".join("ctmrn".elems()) == "catamaran" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn join<'v>( + this: &str, + #[starlark(require = pos)] to_join: ValueOfUnchecked<'v, StarlarkIter>, + heap: &'v Heap, + ) -> starlark::Result> { + #[inline(always)] + fn as_str<'v>(x: Value<'v>) -> anyhow::Result> { + StringValue::unpack_named_param(x, "to_join") + } + + let mut it = to_join.get().iterate(heap)?; + match it.next() { + None => Ok(ValueOfUnchecked::new(Value::new_empty_string())), + Some(x1) => { + match it.next() { + None => { + // If there is a singleton we can avoid reallocation + Ok(as_str(x1)?.to_value_of_unchecked().cast()) + } + Some(x2) => { + let s1 = as_str(x1)?.as_str(); + let s2 = as_str(x2)?.as_str(); + // guess towards the upper bound, since we throw away over-allocations quickly + // include a buffer (20 bytes) + let n = it.size_hint().0 + 2; + let guess = + (cmp::max(s1.len(), s2.len()) * n) + (this.len() * (n - 1)) + 20; + let mut r = String::with_capacity(guess); + r.push_str(s1); + r.push_str(this); + r.push_str(s2); + for x in it { + r.push_str(this); + r.push_str(as_str(x)?.as_str()); + } + Ok(heap.alloc_typed_unchecked(r)) + } + } + } + } + } + + /// [string.lstrip]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·lstrip + /// ): trim leading whitespaces. + /// + /// `S.lstrip()` returns a copy of the string S with leading whitespace removed. + /// In most cases instead of passing an argument you should use `removeprefix`. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// " hello ".lstrip() == "hello " + /// "x!hello ".lstrip("!x ") == "hello " + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn lstrip<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] chars: Option<&str>, + heap: &'v Heap, + ) -> anyhow::Result> { + let res = match chars { + None => this.trim_start(), + Some(s) => this.trim_start_matches(|c| s.contains(c)), + }; + if res.len() == this.len() { + Ok(this) + } else { + Ok(heap.alloc_str(res)) + } + } + + /// [string.partition]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·partition + /// ): partition a string in 3 components + /// + /// `S.partition(x = " ")` splits string S into three parts and returns them + /// as a tuple: the portion before the first occurrence of string `x`, + /// `x` itself, and the portion following it. + /// If S does not contain `x`, `partition` returns `(S, "", "")`. + /// + /// `partition` fails if `x` is not a string, or is the empty string. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "one/two/three".partition("/") == ("one", "/", "two/three") + /// "one".partition("/") == ("one", "", "") + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn partition<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] needle: StringValue<'v>, + heap: &'v Heap, + ) -> anyhow::Result<(StringValue<'v>, StringValue<'v>, StringValue<'v>)> { + if needle.is_empty() { + return Err(anyhow::anyhow!( + "Empty separator cannot be used for partitioning" + )); + } + if let Some(offset) = this.find(needle.as_str()) { + let offset2 = offset + needle.len(); + Ok(( + heap.alloc_str(this.get(..offset).unwrap()), + needle, + heap.alloc_str(this.get(offset2..).unwrap()), + )) + } else { + let empty = StringValue::default(); + Ok((this, empty, empty)) + } + } + + /// [string.replace]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·replace + /// ): replace all occurrences of a substring. + /// + /// `S.replace(old, new[, count])` returns a copy of string S with all + /// occurrences of substring `old` replaced by `new`. If the optional + /// argument `count`, which must be an `int`, is non-negative, it + /// specifies a maximum number of occurrences to replace. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "banana".replace("a", "o") == "bonono" + /// "banana".replace("a", "o", 2) == "bonona" + /// "banana".replace("z", "x") == "banana" + /// "banana".replace("", "x") == "xbxaxnxaxnxax" + /// "banana".replace("", "x", 2) == "xbxanana" + /// "".replace("", "x") == "x" + /// # "# ); + /// # starlark::assert::fail(r#" + /// "banana".replace("a", "o", -2) # error: argument was negative + /// # "#, "argument was negative"); + /// ``` + #[starlark(speculative_exec_safe)] + fn replace<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] old: &str, + #[starlark(require = pos)] new: &str, + #[starlark(require = pos)] count: Option, + heap: &'v Heap, + ) -> anyhow::Result> { + match count { + Some(count) if count >= 0 => { + Ok(heap.alloc_str(&this.replacen(old, new, count as usize))) + } + Some(count) => Err(anyhow::anyhow!( + "Replace final argument was negative '{}'", + count + )), + None => { + // Optimise `replace` using the Rust standard library definition, + // but avoiding redundant allocation in the last step + let x = this.as_str(); + let mut result = String::new(); + let mut last_end = 0; + for (start, part) in x.match_indices(old) { + result.push_str(unsafe { x.get_unchecked(last_end..start) }); + result.push_str(new); + last_end = start + part.len(); + } + if result.is_empty() && last_end == 0 { + Ok(this) + } else { + Ok(heap + .alloc_str_concat(&result, unsafe { x.get_unchecked(last_end..x.len()) })) + } + } + } + } + + /// [string.rfind]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·rfind + /// ): find the last index of a substring. + /// + /// `S.rfind(sub[, start[, end]])` returns the index of the substring `sub` + /// within S, like `S.find`, except that `rfind` returns the index of + /// the substring's _last_ occurrence. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "bonbon".rfind("on") == 4 + /// "bonbon".rfind("on", None, 5) == 1 + /// "bonbon".rfind("on", 2, 5) == -1 + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn rfind( + this: &str, + #[starlark(require = pos)] needle: &str, + #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, + #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, + ) -> anyhow::Result { + if let Some(StrIndices { start, haystack }) = + convert_str_indices(this, start.into_option(), end.into_option()) + { + if let Some(index) = haystack.rfind(needle) { + let index = fast_string::len(&haystack[..index]); + return Ok((start + index).0 as i32); + } + } + Ok(-1) + } + + /// [string.rindex]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·rindex + /// ): find the last index of a substring, failing on not found. + /// + /// `S.rindex(sub[, start[, end]])` returns the index of the substring `sub` + /// within S, like `S.index`, except that `rindex` returns the index of + /// the substring's _last_ occurrence. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "bonbon".rindex("on") == 4 + /// "bonbon".rindex("on", None, 5) == 1 # in "bonbo" + /// # "#); + /// # starlark::assert::fail(r#" + /// "bonbon".rindex("on", 2, 5) # error: not found + /// # "#, "not found"); + /// ``` + #[starlark(speculative_exec_safe)] + fn rindex( + this: &str, + #[starlark(require = pos)] needle: &str, + #[starlark(require = pos, default = NoneOr::None)] start: NoneOr, + #[starlark(require = pos, default = NoneOr::None)] end: NoneOr, + ) -> anyhow::Result { + if let Some(StrIndices { start, haystack }) = + convert_str_indices(this, start.into_option(), end.into_option()) + { + if let Some(index) = haystack.rfind(needle) { + let index = fast_string::len(&haystack[..index]); + return Ok((start + index).0 as i32); + } + } + Err(anyhow::anyhow!( + "Substring '{}' not found in '{}'", + needle, + this + )) + } + + /// [string.rpartition]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·rpartition + /// ): partition a string in 3 elements. + /// + /// `S.rpartition([x = ' '])` is like `partition`, but splits `S` at the + /// last occurrence of `x`. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "one/two/three".rpartition("/") == ("one/two", "/", "three") + /// "one".rpartition("/") == ("", "", "one") + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn rpartition<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] needle: StringValue<'v>, + heap: &'v Heap, + ) -> anyhow::Result<(StringValue<'v>, StringValue<'v>, StringValue<'v>)> { + if needle.is_empty() { + return Err(anyhow::anyhow!( + "Empty separator cannot be used for partitioning" + )); + } + if let Some(offset) = this.rfind(needle.as_str()) { + let offset2 = offset + needle.len(); + Ok(( + heap.alloc_str(this.get(..offset).unwrap()), + needle, + heap.alloc_str(this.get(offset2..).unwrap()), + )) + } else { + let empty = StringValue::default(); + Ok((empty, empty, this)) + } + } + + /// [string.rsplit]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·rsplit + /// ): splits a string into substrings. + /// + /// `S.rsplit([sep[, maxsplit]])` splits a string into substrings like + /// `S.split`, except that when a maximum number of splits is specified, + /// `rsplit` chooses the rightmost splits. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "banana".rsplit("n") == ["ba", "a", "a"] + /// "banana".rsplit("n", 1) == ["bana", "a"] + /// "one two three".rsplit(None, 1) == ["one two", "three"] + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn rsplit<'v>( + this: &str, + #[starlark(require = pos, default = NoneOr::None)] sep: NoneOr<&str>, + #[starlark(require = pos, default = NoneOr::None)] maxsplit: NoneOr, + heap: &'v Heap, + ) -> anyhow::Result>> { + let maxsplit = match maxsplit.into_option() { + None => None, + Some(v) => { + if v < 0 { + None + } else { + Some((v + 1) as usize) + } + } + }; + Ok(match sep.into_option() { + None => match maxsplit { + None => heap + .alloc_typed_unchecked(AllocList(this.split_whitespace())) + .cast(), + Some(maxsplit) => heap + .alloc_typed_unchecked(rsplitn_whitespace(this, maxsplit)) + .cast(), + }, + Some(sep) => { + let mut v: Vec<_> = match maxsplit { + None => this.rsplit(sep).collect(), + Some(maxsplit) => this.rsplitn(maxsplit, sep).collect(), + }; + v.reverse(); + heap.alloc_typed_unchecked(AllocList(v)).cast() + } + }) + } + + /// [string.rstrip]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·rstrip + /// ): trim trailing whitespace. + /// + /// `S.rstrip()` returns a copy of the string S with trailing whitespace removed. + /// In most cases instead of passing an argument you should use `removesuffix`. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// " hello ".rstrip() == " hello" + /// " hello!x".rstrip(" x!") == " hello" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn rstrip<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] chars: Option<&str>, + heap: &'v Heap, + ) -> anyhow::Result> { + let res = match chars { + None => this.trim_end(), + Some(s) => this.trim_end_matches(|c| s.contains(c)), + }; + if res.len() == this.len() { + Ok(this) + } else { + Ok(heap.alloc_str(res)) + } + } + + /// [string.split]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·split + /// ): split a string in substrings. + /// + /// `S.split([sep [, maxsplit]])` returns the list of substrings of S, + /// splitting at occurrences of the delimiter string `sep`. + /// + /// Consecutive occurrences of `sep` are considered to delimit empty + /// strings, so `'food'.split('o')` returns `['f', '', 'd']`. + /// Splitting an empty string with a specified separator returns `['']`. + /// If `sep` is the empty string, `split` fails. + /// + /// If `sep` is not specified or is `None`, `split` uses a different + /// algorithm: it removes all leading spaces from S + /// (or trailing spaces in the case of `rsplit`), + /// then splits the string around each consecutive non-empty sequence of + /// Unicode white space characters. + /// + /// If S consists only of white space, `split` returns the empty list. + /// + /// If `maxsplit` is given and non-negative, it specifies a maximum number + /// of splits. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "one two three".split() == ["one", "two", "three"] + /// "one two three".split(" ") == ["one", "two", "", "three"] + /// "one two three".split(None, 1) == ["one", "two three"] + /// "banana".split("n") == ["ba", "a", "a"] + /// "banana".split("n", 1) == ["ba", "ana"] + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn split<'v>( + this: &str, + #[starlark(require = pos, default = NoneOr::None)] sep: NoneOr<&str>, + #[starlark(require = pos, default = NoneOr::None)] maxsplit: NoneOr, + heap: &'v Heap, + ) -> anyhow::Result>> { + let maxsplit = match maxsplit.into_option() { + None => None, + Some(v) => { + if v < 0 { + None + } else { + Some((v + 1) as usize) + } + } + }; + Ok(match (sep.into_option(), maxsplit) { + (None, None) => heap + .alloc_typed_unchecked(AllocList(this.split_whitespace())) + .cast(), + (None, Some(maxsplit)) => heap + .alloc_typed_unchecked(AllocList(splitn_whitespace(this, maxsplit))) + .cast(), + (Some(sep), None) => { + if sep.len() == 1 { + // If we are searching for a 1-byte string, we can provide a much faster path. + // Since it is one byte, given how UTF8 works, all the resultant slices must be UTF8 too. + let b = sep.as_bytes()[0]; + let count = fast_string::count_matches_byte(this, b); + let mut res = Vec::with_capacity(count + 1); + res.extend( + this.as_bytes() + .split(|x| *x == b) + .map(|x| unsafe { std::str::from_utf8_unchecked(x) }), + ); + debug_assert_eq!(res.len(), count + 1); + heap.alloc_typed_unchecked(AllocList(res)).cast() + } else { + heap.alloc_typed_unchecked(AllocList(this.split(sep))) + .cast() + } + } + (Some(sep), Some(maxsplit)) => heap + .alloc_typed_unchecked(AllocList(this.splitn(maxsplit, sep))) + .cast(), + }) + } + + /// [string.splitlines]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·splitlines + /// ): return the list of lines of a string. + /// + /// `S.splitlines([keepends])` returns a list whose elements are the + /// successive lines of S, that is, the strings formed by splitting S at + /// line terminators ('\n', '\r' or '\r\n'). + /// + /// The optional argument, `keepends`, is interpreted as a Boolean. + /// If true, line terminators are preserved in the result, though + /// the final element does not necessarily end with a line terminator. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "one\n\ntwo".splitlines() == ["one", "", "two"] + /// "one\n\ntwo".splitlines(True) == ["one\n", "\n", "two"] + /// "a\nb".splitlines() == ["a", "b"] + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn splitlines<'v>( + this: &str, + #[starlark(require = pos, default = false)] keepends: bool, + heap: &'v Heap, + ) -> anyhow::Result>> { + let mut s = this; + let mut lines: Vec = Vec::new(); + loop { + if let Some(x) = s.find(['\n', '\r']) { + let y = x; + let x = match s.get(y..y + 2) { + Some("\r\n") => y + 2, + _ => y + 1, + }; + if keepends { + lines.push(heap.alloc_str(s.get(..x).unwrap())) + } else { + lines.push(heap.alloc_str(s.get(..y).unwrap())) + } + if x == s.len() { + return Ok(lines); + } + s = s.get(x..).unwrap(); + } else { + if !s.is_empty() { + lines.push(heap.alloc_str(s)); + } + return Ok(lines); + } + } + } + + /// [string.startswith]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·startswith + /// ): test whether a string starts with a given prefix. + /// + /// `S.startswith(suffix)` reports whether the string S has the specified + /// prefix. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "filename.sky".startswith("filename") == True + /// "filename.sky".startswith("sky") == False + /// 'abc'.startswith(('a', 'A')) == True + /// 'ABC'.startswith(('a', 'A')) == True + /// 'def'.startswith(('a', 'A')) == False + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn startswith( + this: &str, + #[starlark(require = pos)] prefix: StringOrTuple, + ) -> anyhow::Result { + match prefix { + StringOrTuple::String(x) => Ok(this.starts_with(x)), + StringOrTuple::Tuple(xs) => Ok(xs.items.iter().any(|x| this.starts_with(x))), + } + } + + /// [string.strip]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·strip + /// ): trim leading and trailing whitespaces. + /// + /// `S.strip()` returns a copy of the string S with leading and trailing + /// whitespace removed. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// " hello ".strip() == "hello" + /// "xxhello!!".strip("x!") == "hello" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn strip<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] chars: Option<&str>, + heap: &'v Heap, + ) -> anyhow::Result> { + let res = match chars { + None => this.trim(), + Some(s) => this.trim_matches(|c| s.contains(c)), + }; + if res.len() == this.len() { + Ok(this) + } else { + Ok(heap.alloc_str(res)) + } + } + + /// [string.title]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·title + /// ): convert a string to title case. + /// + /// `S.title()` returns a copy of the string S with letters converted to + /// titlecase. + /// + /// Letters are converted to uppercase at the start of words, lowercase + /// elsewhere. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "hElLo, WoRlD!".title() == "Hello, World!" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn title(this: &str) -> anyhow::Result { + let mut last_space = true; + let mut result = String::with_capacity(this.len()); + for c in this.chars() { + if !c.is_alphabetic() { + last_space = true; + result.extend(c.to_lowercase()); + } else { + if last_space { + result.extend(c.to_uppercase()) + } else { + result.extend(c.to_lowercase()) + } + last_space = false; + } + } + Ok(result) + } + + /// [string.upper]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#string·upper + /// ): convert a string to all uppercase. + /// + /// `S.upper()` returns a copy of the string S with letters converted to + /// uppercase. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "Hello, World!".upper() == "HELLO, WORLD!" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn upper(this: &str) -> anyhow::Result { + Ok(this.to_uppercase()) + } + + /// [string.removeprefix]( + /// https://docs.python.org/3.9/library/stdtypes.html#str.removeprefix + /// ): remove a prefix from a string. _Not part of standard Starlark._ + /// + /// If the string starts with the prefix string, return `string[len(prefix):]`. + /// Otherwise, return a copy of the original string: + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "Hello, World!".removeprefix("Hello") == ", World!" + /// "Hello, World!".removeprefix("Goodbye") == "Hello, World!" + /// "Hello".removeprefix("Hello") == "" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn removeprefix<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] prefix: &str, + heap: &'v Heap, + ) -> anyhow::Result> { + let x = this.as_str(); + if x.starts_with(prefix) && !prefix.is_empty() { + Ok(heap.alloc_str(&x[prefix.len()..])) + } else { + Ok(this) + } + } + + /// [string.removesuffix]( + /// https://docs.python.org/3.9/library/stdtypes.html#str.removesuffix + /// ): remove a prefix from a string. _Not part of standard Starlark._ + /// + /// If the string starts with the prefix string, return `string[len(prefix):]`. + /// Otherwise, return a copy of the original string: + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// "Hello, World!".removesuffix("World!") == "Hello, " + /// "Hello, World!".removesuffix("World") == "Hello, World!" + /// "Hello".removesuffix("Hello") == "" + /// # "#); + /// ``` + #[starlark(speculative_exec_safe)] + fn removesuffix<'v>( + this: StringValue<'v>, + #[starlark(require = pos)] suffix: &str, + heap: &'v Heap, + ) -> anyhow::Result> { + let x = this.as_str(); + if x.ends_with(suffix) && !suffix.is_empty() { + Ok(heap.alloc_str(&x[..x.len() - suffix.len()])) + } else { + Ok(this) + } + } +} + +#[cfg(test)] +mod tests { + use crate::assert; + + #[test] + fn test_error_codes() { + assert::fail(r#""bonbon".index("on", 2, 5)"#, "not found in"); + assert::fail(r#"("banana".replace("a", "o", -2))"#, "negative"); + assert::fail(r#""bonbon".rindex("on", 2, 5)"#, "not found in"); + } + + #[test] + fn test_count() { + assert::eq("'abc'.count('a', 10, -10)", "0"); + } + + #[test] + fn test_find() { + assert::eq("'Троянская война окончена'.find('война')", "10"); + } + + #[test] + fn test_opaque_iterator() { + assert::is_true("type('foo'.elems()) != type([])"); + assert::is_true("type('foo'.codepoints()) != type([])"); + } +} diff --git a/starlark-rust/starlark/src/values/types/string/mod.rs b/starlark-rust/starlark/src/values/types/string/mod.rs deleted file mode 100644 index 3d93ae8c1164e..0000000000000 --- a/starlark-rust/starlark/src/values/types/string/mod.rs +++ /dev/null @@ -1,564 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! The string type. All strings must be valid UTF8. - -use std::cmp; -use std::cmp::Ordering; -use std::fmt; -use std::fmt::Debug; -use std::fmt::Display; -use std::hash::Hash; -use std::hash::Hasher; -use std::mem; -use std::ops::Deref; -use std::slice; -use std::str; -use std::sync::atomic; - -use allocative::Allocative; -use fast_string::CharIndex; -use serde::Serialize; -use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; -use starlark_syntax::fast_string; - -use crate as starlark; -use crate::any::ProvidesStaticType; -use crate::collections::aligned_padded_str::AlignedPaddedStr; -use crate::collections::Hashed; -use crate::collections::StarlarkHashValue; -use crate::collections::StarlarkHasher; -use crate::environment::Methods; -use crate::environment::MethodsStatic; -use crate::private::Private; -use crate::typing::Ty; -use crate::values::index::apply_slice; -use crate::values::string::repr::string_repr; -use crate::values::types::none::NoneOr; -use crate::values::types::string::fast_string::StrIndices; -use crate::values::Heap; -use crate::values::StarlarkValue; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueError; - -mod alloc_unpack; -pub(crate) mod dot_format; -pub(crate) mod intern; -pub(crate) mod interpolation; -pub(crate) mod iter; -pub(crate) mod repr; -pub(crate) mod simd; - -/// The result of calling `type()` on strings. -pub const STRING_TYPE: &str = "string"; - -#[repr(C)] // We want the body to come after len -#[derive(ProvidesStaticType, Allocative)] -pub(crate) struct StarlarkStrN { - // Lazily-initialized cached hash code. - pub(crate) hash: atomic::AtomicU32, - // Length in bytes. - pub(crate) len: u32, - // Followed by an unsized block, meaning this type is unsized. - // But we can't mark it as such since we really want &StarlarkStr to - // take up only one word. - pub(crate) body: [usize; N], -} - -/// A pointer to this type represents a Starlark string. -/// Use of this type is discouraged and not considered stable. -#[derive(ProvidesStaticType, StarlarkDocs, Allocative)] -#[starlark_docs(builtin = "standard")] -#[repr(C)] -pub struct StarlarkStr { - str: StarlarkStrN<0>, -} - -impl Deref for StarlarkStr { - type Target = str; - - fn deref(&self) -> &str { - self.as_str() - } -} - -impl PartialEq for StarlarkStr { - fn eq(&self, other: &Self) -> bool { - self.as_aligned_padded_str() == other.as_aligned_padded_str() - } -} - -impl Eq for StarlarkStr {} - -impl PartialOrd for StarlarkStr { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for StarlarkStr { - fn cmp(&self, other: &Self) -> Ordering { - self.as_str().cmp(other.as_str()) - } -} - -impl Debug for StarlarkStr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Debug::fmt(self.as_str(), f) - } -} - -impl StarlarkStr { - /// Hash value when hash field is not initialized. - pub(crate) const UNINIT_HASH: StarlarkHashValue = StarlarkHashValue::new_unchecked(0); - - /// Used in `const_frozen_string!` macro, so it is public. - #[doc(hidden)] - #[inline] - pub const fn payload_len_for_len(len: usize) -> usize { - (len + mem::size_of::() - 1) / mem::size_of::() - } - - /// Unsafe because if you do `unpack` on this it will blow up - #[inline] - pub(crate) const unsafe fn new(len: usize, hash: StarlarkHashValue) -> Self { - assert!(len as u32 as usize == len, "len overflow"); - StarlarkStr { - str: StarlarkStrN { - hash: atomic::AtomicU32::new(hash.get()), - len: len as u32, - body: [], - }, - } - } - - /// Get a Rust string reference from this Starlark string. - pub fn as_str(&self) -> &str { - unsafe { - let slice = slice::from_raw_parts(self.str.body.as_ptr() as *const u8, self.len()); - str::from_utf8_unchecked(slice) - } - } - - #[inline] - pub(crate) fn as_aligned_padded_str(&self) -> AlignedPaddedStr { - unsafe { AlignedPaddedStr::new(self.len(), self.str.body.as_ptr()) } - } - - /// Get cached hash value or compute if it is not cached yet. - pub fn get_hash(&self) -> StarlarkHashValue { - // Note relaxed load and store are practically non-locking memory operations. - let hash = self.str.hash.load(atomic::Ordering::Relaxed); - if hash != 0 { - StarlarkHashValue::new_unchecked(hash) - } else { - let mut s = StarlarkHasher::new(); - hash_string_value(self.as_str(), &mut s); - let hash = s.finish_small(); - // If hash is zero, we are unlucky, but it is highly improbable. - self.str.hash.store(hash.get(), atomic::Ordering::Relaxed); - hash - } - } - - /// Rust string reference along with its hash value. - pub fn as_str_hashed(&self) -> Hashed<&str> { - Hashed::new_unchecked(self.get_hash(), self.as_str()) - } - - /// String length, in bytes. - pub fn len(&self) -> usize { - self.str.len as usize - } - - /// Is this string empty? - pub fn is_empty(&self) -> bool { - self.str.len == 0 - } - - pub(crate) fn offset_of_content() -> usize { - memoffset::offset_of!(StarlarkStrN<0>, body) - } -} - -/// How to hash a string in a way that is compatible with Value -#[inline] -pub(crate) fn hash_string_value(x: &str, state: &mut H) { - x.hash(state) -} - -impl Display for StarlarkStr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // We could either accumulate straight into the buffer (can't preallocate, virtual call on each character) - // or accumulate into a String buffer first. Not sure which is faster, but string buffer lets us - // share code with collect_repr more easily. - let mut buffer = String::new(); - string_repr(self.as_str(), &mut buffer); - f.write_str(&buffer) - } -} - -pub(crate) fn str_methods() -> Option<&'static Methods> { - static RES: MethodsStatic = MethodsStatic::new(); - RES.methods(crate::stdlib::string::string_methods) -} - -#[starlark_value(type = STRING_TYPE)] -impl<'v> StarlarkValue<'v> for StarlarkStr { - fn is_special(_: Private) -> bool - where - Self: Sized, - { - true - } - - fn get_methods() -> Option<&'static Methods> { - str_methods() - } - - fn collect_repr(&self, buffer: &mut String) { - // String repr() is quite hot, so optimise it - string_repr(self, buffer) - } - - fn to_bool(&self) -> bool { - !self.is_empty() - } - - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { - // Don't defer to str because we cache the Hash in StarlarkStr - hasher.write_u32(self.get_hash().get()); - Ok(()) - } - - fn get_hash(&self, _private: Private) -> anyhow::Result { - Ok(self.get_hash()) - } - - fn equals(&self, other: Value) -> anyhow::Result { - if let Some(other) = other.unpack_str() { - Ok(self.as_str() == other) - } else { - Ok(false) - } - } - - fn compare(&self, other: Value) -> anyhow::Result { - if let Some(other) = other.unpack_str() { - Ok(self.as_str().cmp(other)) - } else { - ValueError::unsupported_with(self, "cmp()", other) - } - } - - fn at(&self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - // This method is disturbingly hot. Use the logic from `convert_index`, - // but modified to be UTF8 string friendly. - let i = i32::unpack_param(index)?; - if i >= 0 { - match fast_string::at(self, CharIndex(i as usize)) { - None => Err(ValueError::IndexOutOfBound(i).into()), - Some(c) => Ok(heap.alloc(c)), - } - } else { - let len_chars = fast_string::len(self); - let ind = CharIndex((-i) as usize); // Index from the end, minimum of 1 - if ind > len_chars { - Err(ValueError::IndexOutOfBound(i).into()) - } else if len_chars.0 == self.len() { - // We are a 7bit ASCII string, so take the fast-path - Ok(heap.alloc(self.as_bytes()[(len_chars - ind).0] as char)) - } else { - Ok(heap.alloc(fast_string::at(self, len_chars - ind).unwrap())) - } - } - } - - fn length(&self) -> anyhow::Result { - Ok(fast_string::len(self).0 as i32) - } - - fn is_in(&self, other: Value) -> anyhow::Result { - let s = <&str>::unpack_param(other)?; - Ok(fast_string::contains(self, s)) - } - - fn slice( - &self, - start: Option>, - stop: Option>, - stride: Option>, - heap: &'v Heap, - ) -> anyhow::Result> { - let s = self; - if matches!(stride, Some(stride) if stride.unpack_i32() != Some(1)) { - // The stride case is super rare and super complex, so let's do something inefficient but safe - let xs = s.chars().collect::>(); - let xs = apply_slice(&xs, start, stop, stride)?; - return Ok(heap.alloc(xs.into_iter().collect::())); - } - - #[inline(always)] - fn start_stop_to_none_or(v: Option) -> anyhow::Result> { - match v { - None => Ok(NoneOr::None), - Some(v) => Ok(NoneOr::Other(v.to_int()?)), - } - } - - let (start, stop) = (start_stop_to_none_or(start)?, start_stop_to_none_or(stop)?); - - match fast_string::convert_str_indices(self, start.into_option(), stop.into_option()) { - Some(StrIndices { haystack, .. }) => Ok(heap.alloc_str(haystack).to_value()), - None => Ok(heap.alloc_str("").to_value()), - } - } - - fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { - if let Some(other_str) = other.unpack_str() { - if self.is_empty() { - Some(Ok(other)) - } else { - Some(Ok(heap.alloc_str_concat(self, other_str).to_value())) - } - } else { - None - } - } - - fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { - let l = i32::unpack_value(other)?; - let mut result = String::with_capacity(self.len() * cmp::max(0, l) as usize); - for _i in 0..l { - result.push_str(self) - } - Some(Ok(heap.alloc(result))) - } - - fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { - self.mul(lhs, heap) - } - - fn percent(&self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result> { - Ok(heap.alloc(interpolation::percent(self, other)?)) - } - - fn typechecker_ty(&self) -> Option { - Some(Ty::starlark_value::()) - } -} - -impl Serialize for StarlarkStr { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(self.as_str()) - } -} - -#[cfg(test)] -mod tests { - use crate::assert; - use crate::values::index::apply_slice; - use crate::values::Heap; - use crate::values::Value; - - #[test] - fn test_string_corruption() { - assert::fail("'U4V6'[93]", "out of bound"); - assert::fail("''[2]", "out of bound"); - } - - #[test] - fn test_escape_characters() { - // Test cases from the Starlark spec - assert_eq!( - assert::pass(r#"'\a\b\f\n\r\t\v'"#).unpack_str().unwrap(), - "\x07\x08\x0C\x0A\x0D\x09\x0B" - ); - assert_eq!(assert::pass(r#"'\0'"#).unpack_str().unwrap(), "\x00"); - assert_eq!(assert::pass(r#"'\12'"#).unpack_str().unwrap(), "\n"); - assert_eq!(assert::pass(r#"'\101-\132'"#).unpack_str().unwrap(), "A-Z"); - // 9 is not an octal digit, so it terminates early - assert_eq!(assert::pass(r#"'\119'"#).unpack_str().unwrap(), "\t9"); - assert_eq!(assert::pass(r#"'\117'"#).unpack_str().unwrap(), "O"); - assert_eq!(assert::pass(r#"'\u0041'"#).unpack_str().unwrap(), "A"); - assert_eq!(assert::pass(r#"'\u0414'"#).unpack_str().unwrap(), "Д"); - assert_eq!(assert::pass(r#"'\u754c'"#).unpack_str().unwrap(), "界"); - assert_eq!(assert::pass(r#"'\U0001F600'"#).unpack_str().unwrap(), "😀"); - } - - const EXAMPLES: &[&str] = &[ - "", - "short", - "longer string which is all ASCII!#", - "🤗", - "mix of prefix ASCII and 🤗 some emjoi", - "🤗 and the emjoi can go first", - "😥🍊🍉🫐🥥🥬🥒🥑🍈🍋", - "© and other characters Ŕ", - "ça va bien merci", - "Диана is a name in Russia", - ]; - - #[test] - fn test_string_hash() { - let heap = Heap::new(); - for x in EXAMPLES { - assert_eq!( - heap.alloc_str(x).get_hashed().hash(), - heap.alloc(*x).get_hashed().unwrap().hash() - ); - } - } - - // If hash was zero, we'd need to mask the value in the hash cache. - #[test] - fn test_zero_length_string_hash_is_not_zero() { - let heap = Heap::new(); - assert_ne!(0, heap.alloc("").get_hash().unwrap().get()); - } - - #[test] - fn test_string_len() { - assert::all_true( - r#" -len("😿") == 1 -"#, - ); - } - - #[test] - fn test_arithmetic_on_string() { - assert::all_true( - r#" -"abc" + "def" == "abcdef" -"abc" * 3 == "abcabcabc" -"#, - ); - } - - #[test] - fn test_slice_string() { - let heap = Heap::new(); - for example in EXAMPLES { - let s = heap.alloc_str(example).to_value(); - for i in -5..=6 { - for j in -5..=6 { - let start = if i == 6 { - None - } else { - Some(Value::testing_new_int(i)) - }; - let stop = if j == 6 { - None - } else { - Some(Value::testing_new_int(j)) - }; - // Compare list slicing (comparatively simple) to string slicing (complex unicode) - let res1 = apply_slice(&example.chars().collect::>(), start, stop, None) - .unwrap() - .iter() - .collect::(); - let res2 = s - .slice(start, stop, None, &heap) - .unwrap() - .unpack_str() - .unwrap(); - assert_eq!( - &res1, - res2, - "{:?}[{}:{}]", - example, - start.map_or("".to_owned(), |x| x.to_string()), - stop.map_or("".to_owned(), |x| x.to_string()) - ); - } - } - } - - assert::all_true( - r#" -"abc"[1:] == "bc" # Remove the first element -"abc"[:-1] == "ab" # Remove the last element -"abc"[1:-1] == "b" # Remove the first and the last element -"banana"[1::2] == "aaa" # Select one element out of 2, skipping the first -"banana"[4::-2] == "nnb" # Select one element out of 2 in reverse order, starting at index 4 -"242"[ -0:-2:-1] == "" # From https://github.com/facebookexperimental/starlark-rust/issues/35 -"#, - ); - } - - #[test] - fn test_string_is_in() { - assert::all_true( - r#" -("a" in "abc") == True -("b" in "abc") == True -("bc" in "abc") == True -("bd" in "abc") == False -("z" in "abc") == False -"#, - ); - } - - #[test] - fn test_successive_add() { - // we hope these get optimised away with adjacent plus optimisation - assert::eq("x = 'c'\n'a' + 'b' + x + 'd' + 'e'", "'abcde'"); - } - - #[test] - fn test_string_index() -> anyhow::Result<()> { - fn test_str(str: &str) -> anyhow::Result<()> { - let chars = str.chars().collect::>(); - let heap = Heap::new(); - let val = heap.alloc(str); - let len = chars.len() as i32; - assert_eq!(val.length()?, len); - for (i, char) in chars.iter().enumerate() { - let char_str = char.to_string(); - assert_eq!( - val.at(heap.alloc(i), &heap)?.unpack_str(), - Some(char_str.as_str()) - ); - assert_eq!( - val.at(heap.alloc(-len + (i as i32)), &heap)?.unpack_str(), - Some(char_str.as_str()) - ); - } - assert!(val.at(heap.alloc(len), &heap).is_err()); - assert!(val.at(heap.alloc(-(len + 1)), &heap).is_err()); - Ok(()) - } - - for x in EXAMPLES { - // We use all trailing substrings of the test, for better coverage (especially around smart prefix algorithms) - let mut it = x.chars(); - loop { - test_str(it.as_str())?; - if it.next().is_none() { - break; - } - } - } - Ok(()) - } -} diff --git a/starlark-rust/starlark/src/values/types/string/repr.rs b/starlark-rust/starlark/src/values/types/string/repr.rs index 8718e535d1552..fcc353b19f863 100644 --- a/starlark-rust/starlark/src/values/types/string/repr.rs +++ b/starlark-rust/starlark/src/values/types/string/repr.rs @@ -25,6 +25,7 @@ use crate::values::types::string::simd::Vector; /// Check if any byte in the buffer is non-ASCII or need escape. #[inline(always)] +#[allow(dead_code)] unsafe fn chunk_non_ascii_or_need_escape(chunk: V) -> bool { #[allow(clippy::many_single_char_names)] unsafe fn or4(a: V, b: V, c: V, d: V) -> V { @@ -124,6 +125,7 @@ pub(crate) fn string_repr(str: &str, buffer: &mut String) { } #[inline(always)] + #[allow(dead_code)] // FIXME: Investigate if this is really needed, fails on Mac. unsafe fn loop_ascii_simd(val: &str, buffer: &mut String) { // `buffer` must have enough capacity to contain `val` if it does not need escaping // followed by trailing double quote. diff --git a/starlark-rust/starlark/src/values/types/string/simd.rs b/starlark-rust/starlark/src/values/types/string/simd.rs index a0714e9a7d6d2..60ab3683dde10 100644 --- a/starlark-rust/starlark/src/values/types/string/simd.rs +++ b/starlark-rust/starlark/src/values/types/string/simd.rs @@ -16,6 +16,7 @@ */ /// Fixed length byte vector API. +#[allow(dead_code)] // FIXME: Investigate if this is really needed, fails on Mac pub(crate) trait Vector: Copy { /// Fill the vector with given byte value. unsafe fn splat(byte: u8) -> Self; @@ -42,6 +43,7 @@ where /// This function is called when SIMD is not available. fn no_simd(self) -> R; /// This function is called when SIMD is available. + #[allow(dead_code)] // FIXME: Investigate if this is really needed, fails on Mac fn simd(self) -> R; /// Call either `simd` or `no_simd` function. @@ -54,12 +56,12 @@ where #[cfg(target_arch = "x86_64")] use std::arch::x86_64::*; - self.simd::<__m128i>() - } - #[cfg(not(target_feature = "sse2"))] - { - self.no_simd() + if true { + return self.simd::<__m128i>(); + } } + + self.no_simd() } } diff --git a/starlark-rust/starlark/src/values/types/string/str_type.rs b/starlark-rust/starlark/src/values/types/string/str_type.rs new file mode 100644 index 0000000000000..17158ea16173c --- /dev/null +++ b/starlark-rust/starlark/src/values/types/string/str_type.rs @@ -0,0 +1,562 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::cmp; +use std::cmp::Ordering; +use std::fmt; +use std::fmt::Debug; +use std::fmt::Display; +use std::hash::Hash; +use std::hash::Hasher; +use std::mem; +use std::ops::Deref; +use std::slice; +use std::str; +use std::sync::atomic; + +use allocative::Allocative; +use serde::Serialize; +use starlark_derive::starlark_value; +use starlark_derive::ProvidesStaticType; +use starlark_map::Hashed; +use starlark_map::StarlarkHashValue; +use starlark_map::StarlarkHasher; +use starlark_syntax::fast_string; +use starlark_syntax::fast_string::CharIndex; +use starlark_syntax::fast_string::StrIndices; + +use crate as starlark; +use crate::collections::aligned_padded_str::AlignedPaddedStr; +use crate::environment::Methods; +use crate::environment::MethodsStatic; +use crate::private::Private; +use crate::typing::Ty; +use crate::values::index::apply_slice; +use crate::values::none::NoneOr; +use crate::values::string::interpolation; +use crate::values::string::repr::string_repr; +use crate::values::Heap; +use crate::values::StarlarkValue; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueError; + +/// The result of calling `type()` on strings. +pub const STRING_TYPE: &str = "string"; + +#[repr(C)] // We want the body to come after len +#[derive(ProvidesStaticType, Allocative)] +pub(crate) struct StarlarkStrN { + // Lazily-initialized cached hash code. + pub(crate) hash: atomic::AtomicU32, + // Length in bytes. + pub(crate) len: u32, + // Followed by an unsized block, meaning this type is unsized. + // But we can't mark it as such since we really want &StarlarkStr to + // take up only one word. + pub(crate) body: [usize; N], +} + +/// A pointer to this type represents a Starlark string. +/// Use of this type is discouraged and not considered stable. +#[derive(ProvidesStaticType, Allocative)] +#[repr(C)] +pub struct StarlarkStr { + str: StarlarkStrN<0>, +} + +impl Deref for StarlarkStr { + type Target = str; + + fn deref(&self) -> &str { + self.as_str() + } +} + +impl PartialEq for StarlarkStr { + fn eq(&self, other: &Self) -> bool { + self.as_aligned_padded_str() == other.as_aligned_padded_str() + } +} + +impl Eq for StarlarkStr {} + +impl PartialOrd for StarlarkStr { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for StarlarkStr { + fn cmp(&self, other: &Self) -> Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl Debug for StarlarkStr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(self.as_str(), f) + } +} + +impl StarlarkStr { + /// Hash value when hash field is not initialized. + pub(crate) const UNINIT_HASH: StarlarkHashValue = StarlarkHashValue::new_unchecked(0); + + /// Used in `const_frozen_string!` macro, so it is public. + #[doc(hidden)] + #[inline] + pub const fn payload_len_for_len(len: usize) -> usize { + (len + mem::size_of::() - 1) / mem::size_of::() + } + + /// Unsafe because if you do `unpack` on this it will blow up + #[inline] + pub(crate) const unsafe fn new(len: usize, hash: StarlarkHashValue) -> Self { + assert!(len as u32 as usize == len, "len overflow"); + StarlarkStr { + str: StarlarkStrN { + hash: atomic::AtomicU32::new(hash.get()), + len: len as u32, + body: [], + }, + } + } + + /// Get a Rust string reference from this Starlark string. + pub fn as_str(&self) -> &str { + unsafe { + let slice = slice::from_raw_parts(self.str.body.as_ptr() as *const u8, self.len()); + str::from_utf8_unchecked(slice) + } + } + + #[inline] + pub(crate) fn as_aligned_padded_str(&self) -> AlignedPaddedStr { + unsafe { AlignedPaddedStr::new(self.len(), self.str.body.as_ptr()) } + } + + /// Get cached hash value or compute if it is not cached yet. + pub fn get_hash(&self) -> StarlarkHashValue { + // Note relaxed load and store are practically non-locking memory operations. + let hash = self.str.hash.load(atomic::Ordering::Relaxed); + if hash != 0 { + StarlarkHashValue::new_unchecked(hash) + } else { + let mut s = StarlarkHasher::new(); + hash_string_value(self.as_str(), &mut s); + let hash = s.finish_small(); + // If hash is zero, we are unlucky, but it is highly improbable. + self.str.hash.store(hash.get(), atomic::Ordering::Relaxed); + hash + } + } + + /// Rust string reference along with its hash value. + pub fn as_str_hashed(&self) -> Hashed<&str> { + Hashed::new_unchecked(self.get_hash(), self.as_str()) + } + + /// String length, in bytes. + pub fn len(&self) -> usize { + self.str.len as usize + } + + /// Is this string empty? + pub fn is_empty(&self) -> bool { + self.str.len == 0 + } + + pub(crate) fn offset_of_content() -> usize { + memoffset::offset_of!(StarlarkStrN<0>, body) + } + + /// Format a Rust string like `repr(s)`. + pub fn repr(s: &str) -> String { + let mut buffer = String::new(); + string_repr(s, &mut buffer); + buffer + } +} + +/// How to hash a string in a way that is compatible with Value +#[inline] +pub(crate) fn hash_string_value(x: &str, state: &mut H) { + x.hash(state) +} + +impl Display for StarlarkStr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // We could either accumulate straight into the buffer (can't preallocate, virtual call on each character) + // or accumulate into a String buffer first. Not sure which is faster, but string buffer lets us + // share code with collect_repr more easily. + f.write_str(&StarlarkStr::repr(self.as_str())) + } +} + +pub(crate) fn str_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(crate::values::types::string::methods::string_methods) +} + +#[starlark_value(type = STRING_TYPE)] +impl<'v> StarlarkValue<'v> for StarlarkStr { + fn is_special(_: Private) -> bool + where + Self: Sized, + { + true + } + + fn get_methods() -> Option<&'static Methods> { + str_methods() + } + + fn collect_repr(&self, buffer: &mut String) { + // String repr() is quite hot, so optimise it + string_repr(self, buffer) + } + + fn to_bool(&self) -> bool { + !self.is_empty() + } + + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { + // Don't defer to str because we cache the Hash in StarlarkStr + hasher.write_u32(self.get_hash().get()); + Ok(()) + } + + fn get_hash(&self, _private: Private) -> crate::Result { + Ok(self.get_hash()) + } + + fn equals(&self, other: Value) -> crate::Result { + if let Some(other) = other.unpack_starlark_str() { + Ok(self == other) + } else { + Ok(false) + } + } + + fn compare(&self, other: Value) -> crate::Result { + if let Some(other) = other.unpack_str() { + Ok(self.as_str().cmp(other)) + } else { + ValueError::unsupported_with(self, "cmp()", other) + } + } + + fn at(&self, index: Value<'v>, heap: &'v Heap) -> crate::Result> { + // This method is disturbingly hot. Use the logic from `convert_index`, + // but modified to be UTF8 string friendly. + let i = i32::unpack_param(index)?; + if i >= 0 { + match fast_string::at(self, CharIndex(i as usize)) { + None => Err(ValueError::IndexOutOfBound(i).into()), + Some(c) => Ok(heap.alloc(c)), + } + } else { + let len_chars = fast_string::len(self); + let ind = CharIndex((-i) as usize); // Index from the end, minimum of 1 + if ind > len_chars { + Err(ValueError::IndexOutOfBound(i).into()) + } else if len_chars.0 == self.len() { + // We are a 7bit ASCII string, so take the fast-path + Ok(heap.alloc(self.as_bytes()[(len_chars - ind).0] as char)) + } else { + Ok(heap.alloc(fast_string::at(self, len_chars - ind).unwrap())) + } + } + } + + fn length(&self) -> crate::Result { + Ok(fast_string::len(self).0 as i32) + } + + fn is_in(&self, other: Value) -> crate::Result { + let s = <&str>::unpack_param(other)?; + Ok(fast_string::contains(self, s)) + } + + fn slice( + &self, + start: Option>, + stop: Option>, + stride: Option>, + heap: &'v Heap, + ) -> crate::Result> { + let s = self; + if matches!(stride, Some(stride) if stride.unpack_i32() != Some(1)) { + // The stride case is super rare and super complex, so let's do something inefficient but safe + let xs = s.chars().collect::>(); + let xs = apply_slice(&xs, start, stop, stride)?; + return Ok(heap.alloc(xs.into_iter().collect::())); + } + + #[inline(always)] + fn start_stop_to_none_or(v: Option) -> crate::Result> { + match v { + None => Ok(NoneOr::None), + Some(v) => Ok(NoneOr::Other(i32::unpack_value_err(v)?)), + } + } + + let (start, stop) = (start_stop_to_none_or(start)?, start_stop_to_none_or(stop)?); + + match fast_string::convert_str_indices(self, start.into_option(), stop.into_option()) { + Some(StrIndices { haystack, .. }) => Ok(heap.alloc_str(haystack).to_value()), + None => Ok(heap.alloc_str("").to_value()), + } + } + + fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + if let Some(other_str) = other.unpack_str() { + if self.is_empty() { + Some(Ok(other)) + } else { + Some(Ok(heap.alloc_str_concat(self, other_str).to_value())) + } + } else { + None + } + } + + fn mul(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + let l = match i32::unpack_value(other) { + Ok(Some(l)) => l, + Ok(None) => return None, + Err(e) => return Some(Err(e)), + }; + let mut result = String::with_capacity(self.len() * cmp::max(0, l) as usize); + for _i in 0..l { + result.push_str(self) + } + Some(Ok(heap.alloc(result))) + } + + fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { + self.mul(lhs, heap) + } + + fn percent(&self, other: Value<'v>, heap: &'v Heap) -> crate::Result> { + Ok(heap.alloc(interpolation::percent(self, other)?)) + } + + fn typechecker_ty(&self) -> Option { + Some(Ty::starlark_value::()) + } +} + +impl Serialize for StarlarkStr { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(self.as_str()) + } +} + +#[cfg(test)] +mod tests { + use crate::assert; + use crate::values::index::apply_slice; + use crate::values::Heap; + use crate::values::Value; + + #[test] + fn test_string_corruption() { + assert::fail("'U4V6'[93]", "out of bound"); + assert::fail("''[2]", "out of bound"); + } + + #[test] + fn test_escape_characters() { + // Test cases from the Starlark spec + assert_eq!( + assert::pass(r#"'\a\b\f\n\r\t\v'"#).unpack_str().unwrap(), + "\x07\x08\x0C\x0A\x0D\x09\x0B" + ); + assert_eq!(assert::pass(r#"'\0'"#).unpack_str().unwrap(), "\x00"); + assert_eq!(assert::pass(r#"'\12'"#).unpack_str().unwrap(), "\n"); + assert_eq!(assert::pass(r#"'\101-\132'"#).unpack_str().unwrap(), "A-Z"); + // 9 is not an octal digit, so it terminates early + assert_eq!(assert::pass(r#"'\119'"#).unpack_str().unwrap(), "\t9"); + assert_eq!(assert::pass(r#"'\117'"#).unpack_str().unwrap(), "O"); + assert_eq!(assert::pass(r#"'\u0041'"#).unpack_str().unwrap(), "A"); + assert_eq!(assert::pass(r#"'\u0414'"#).unpack_str().unwrap(), "Д"); + assert_eq!(assert::pass(r#"'\u754c'"#).unpack_str().unwrap(), "界"); + assert_eq!(assert::pass(r#"'\U0001F600'"#).unpack_str().unwrap(), "😀"); + } + + const EXAMPLES: &[&str] = &[ + "", + "short", + "longer string which is all ASCII!#", + "🤗", + "mix of prefix ASCII and 🤗 some emjoi", + "🤗 and the emjoi can go first", + "😥🍊🍉🫐🥥🥬🥒🥑🍈🍋", + "© and other characters Ŕ", + "ça va bien merci", + "Диана is a name in Russia", + ]; + + #[test] + fn test_string_hash() { + let heap = Heap::new(); + for x in EXAMPLES { + assert_eq!( + heap.alloc_str(x).get_hashed().hash(), + heap.alloc(*x).get_hashed().unwrap().hash() + ); + } + } + + // If hash was zero, we'd need to mask the value in the hash cache. + #[test] + fn test_zero_length_string_hash_is_not_zero() { + let heap = Heap::new(); + assert_ne!(0, heap.alloc("").get_hash().unwrap().get()); + } + + #[test] + fn test_string_len() { + assert::all_true( + r#" +len("😿") == 1 +"#, + ); + } + + #[test] + fn test_arithmetic_on_string() { + assert::all_true( + r#" +"abc" + "def" == "abcdef" +"abc" * 3 == "abcabcabc" +"#, + ); + } + + #[test] + fn test_slice_string() { + let heap = Heap::new(); + for example in EXAMPLES { + let s = heap.alloc_str(example).to_value(); + for i in -5..=6 { + for j in -5..=6 { + let start = if i == 6 { + None + } else { + Some(Value::testing_new_int(i)) + }; + let stop = if j == 6 { + None + } else { + Some(Value::testing_new_int(j)) + }; + // Compare list slicing (comparatively simple) to string slicing (complex unicode) + let res1 = apply_slice(&example.chars().collect::>(), start, stop, None) + .unwrap() + .iter() + .collect::(); + let res2 = s + .slice(start, stop, None, &heap) + .unwrap() + .unpack_str() + .unwrap(); + assert_eq!( + &res1, + res2, + "{:?}[{}:{}]", + example, + start.map_or("".to_owned(), |x| x.to_string()), + stop.map_or("".to_owned(), |x| x.to_string()) + ); + } + } + } + + assert::all_true( + r#" +"abc"[1:] == "bc" # Remove the first element +"abc"[:-1] == "ab" # Remove the last element +"abc"[1:-1] == "b" # Remove the first and the last element +"banana"[1::2] == "aaa" # Select one element out of 2, skipping the first +"banana"[4::-2] == "nnb" # Select one element out of 2 in reverse order, starting at index 4 +"242"[ -0:-2:-1] == "" # From https://github.com/facebook/starlark-rust/issues/35 +"#, + ); + } + + #[test] + fn test_string_is_in() { + assert::all_true( + r#" +("a" in "abc") == True +("b" in "abc") == True +("bc" in "abc") == True +("bd" in "abc") == False +("z" in "abc") == False +"#, + ); + } + + #[test] + fn test_successive_add() { + // we hope these get optimised away with adjacent plus optimisation + assert::eq("x = 'c'\n'a' + 'b' + x + 'd' + 'e'", "'abcde'"); + } + + #[test] + fn test_string_index() -> crate::Result<()> { + fn test_str(str: &str) -> crate::Result<()> { + let chars = str.chars().collect::>(); + let heap = Heap::new(); + let val = heap.alloc(str); + let len = chars.len() as i32; + assert_eq!(val.length()?, len); + for (i, char) in chars.iter().enumerate() { + let char_str = char.to_string(); + assert_eq!( + val.at(heap.alloc(i), &heap)?.unpack_str(), + Some(char_str.as_str()) + ); + assert_eq!( + val.at(heap.alloc(-len + (i as i32)), &heap)?.unpack_str(), + Some(char_str.as_str()) + ); + } + assert!(val.at(heap.alloc(len), &heap).is_err()); + assert!(val.at(heap.alloc(-(len + 1)), &heap).is_err()); + Ok(()) + } + + for x in EXAMPLES { + // We use all trailing substrings of the test, for better coverage (especially around smart prefix algorithms) + let mut it = x.chars(); + loop { + test_str(it.as_str())?; + if it.next().is_none() { + break; + } + } + } + Ok(()) + } +} diff --git a/starlark-rust/starlark/src/values/types/structs.rs b/starlark-rust/starlark/src/values/types/structs.rs new file mode 100644 index 0000000000000..7d08daa220381 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/structs.rs @@ -0,0 +1,45 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The struct type, an associative-map created with `struct()`. +//! +//! This struct type is related to both the [dictionary](crate::values::dict) and the +//! [record](crate::values::record) types, all being associative maps. +//! +//! * Like a record, a struct is immutable, fields can be referred to with `struct.field`, and +//! it uses strings for keys. +//! * Like a dictionary, the struct is untyped, and manipulating structs from Rust is ergonomic. +//! +//! The `struct()` function creates a struct. It accepts keyword arguments, keys become +//! struct field names, and values become field values. +//! +//! ``` +//! # starlark::assert::is_true(r#" +//! ip_address = struct(host='localhost', port=80) +//! ip_address.port == 80 +//! # "#); +//! ``` + +pub(crate) mod alloc; +pub(crate) mod refs; +pub(crate) mod structs; +pub(crate) mod unordered_hasher; +pub(crate) mod value; + +pub use crate::values::types::structs::alloc::AllocStruct; +pub use crate::values::types::structs::refs::FrozenStructRef; +pub use crate::values::types::structs::refs::StructRef; diff --git a/starlark-rust/starlark/src/values/types/structs/alloc.rs b/starlark-rust/starlark/src/values/types/structs/alloc.rs index 7989ed3507af2..eaeaf5598e780 100644 --- a/starlark-rust/starlark/src/values/types/structs/alloc.rs +++ b/starlark-rust/starlark/src/values/types/structs/alloc.rs @@ -24,6 +24,7 @@ use crate::values::alloc_value::AllocFrozenStringValue; use crate::values::alloc_value::AllocStringValue; use crate::values::structs::value::FrozenStruct; use crate::values::structs::value::Struct; +use crate::values::structs::StructRef; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::AllocFrozenValue; use crate::values::AllocValue; @@ -62,6 +63,8 @@ where S: IntoIterator, V: StarlarkTypeRepr, { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Struct::starlark_type_repr() } diff --git a/starlark-rust/starlark/src/values/types/structs/mod.rs b/starlark-rust/starlark/src/values/types/structs/mod.rs deleted file mode 100644 index e9885483d7eea..0000000000000 --- a/starlark-rust/starlark/src/values/types/structs/mod.rs +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! The struct type, an associative-map created with `struct()`. -//! -//! This struct type is related to both the [dictionary](crate::values::dict) and the -//! [record](crate::values::record) types, all being associative maps. -//! -//! * Like a record, a struct is immutable, fields can be referred to with `struct.field`, and -//! it uses strings for keys. -//! * Like a dictionary, the struct is untyped, and manipulating structs from Rust is ergonomic. -//! -//! The `struct()` function creates a struct. It accepts keyword arguments, keys become -//! struct field names, and values become field values. -//! -//! ``` -//! # starlark::assert::is_true(r#" -//! ip_address = struct(host='localhost', port=80) -//! ip_address.port == 80 -//! # "#); -//! ``` - -pub(crate) mod alloc; -pub(crate) mod of; -pub(crate) mod refs; -pub(crate) mod unordered_hasher; -pub(crate) mod value; - -pub use crate::values::types::structs::alloc::AllocStruct; -pub use crate::values::types::structs::of::StructOf; -pub use crate::values::types::structs::refs::FrozenStructRef; -pub use crate::values::types::structs::refs::StructRef; diff --git a/starlark-rust/starlark/src/values/types/structs/of.rs b/starlark-rust/starlark/src/values/types/structs/of.rs deleted file mode 100644 index 0badb076629fc..0000000000000 --- a/starlark-rust/starlark/src/values/types/structs/of.rs +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::marker; -use std::marker::PhantomData; - -use starlark_map::small_map::SmallMap; - -use crate::typing::Ty; -use crate::values::structs::value::Struct; -use crate::values::structs::StructRef; -use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::StringValue; -use crate::values::UnpackValue; -use crate::values::Value; -use crate::values::ValueOf; - -/// Like [`ValueOf`](crate::values::ValueOf), but only validates value types; does not construct -/// or store a map. -#[derive(Debug)] -pub struct StructOf<'v, V: UnpackValue<'v>> { - value: ValueOf<'v, StructRef<'v>>, - _marker: PhantomData, -} - -impl<'v, V: UnpackValue<'v>> StarlarkTypeRepr for StructOf<'v, V> { - fn starlark_type_repr() -> Ty { - Struct::starlark_type_repr() - } -} - -impl<'v, V: UnpackValue<'v>> UnpackValue<'v> for StructOf<'v, V> { - fn expected() -> String { - format!("struct with fields of type {}", V::expected()) - } - - fn unpack_value(value: Value<'v>) -> Option> { - let value = ValueOf::::unpack_value(value)?; - for (_k, v) in value.typed.iter() { - // Validate field types - V::unpack_value(v)?; - } - Some(StructOf { - value, - _marker: marker::PhantomData, - }) - } -} - -impl<'v, V: UnpackValue<'v>> StructOf<'v, V> { - /// Get the actual value this `StructOf` wraps. - pub fn to_value(&self) -> Value<'v> { - self.value.value - } - - /// Get untyped struct reference. - fn as_struct(&self) -> &StructRef<'v> { - &self.value.typed - } - - /// Collect field structs. - pub fn to_map(&self) -> SmallMap, V> { - self.as_struct() - .iter() - .map(|(k, v)| (k, V::unpack_value(v).expect("validated at construction"))) - .collect() - } -} diff --git a/starlark-rust/starlark/src/values/types/structs/refs.rs b/starlark-rust/starlark/src/values/types/structs/refs.rs index 80bc3d44c2226..55067a5da62c0 100644 --- a/starlark-rust/starlark/src/values/types/structs/refs.rs +++ b/starlark-rust/starlark/src/values/types/structs/refs.rs @@ -15,6 +15,10 @@ * limitations under the License. */ +use std::convert::Infallible; + +use dupe::Dupe; + use crate::typing::Ty; use crate::values::starlark_type_id::StarlarkTypeId; use crate::values::structs::value::FrozenStruct; @@ -30,7 +34,7 @@ use crate::values::Value; /// /// Struct implementation (for example, memory layout) may change, /// this type provides implementation agnostics API to it. -#[derive(Debug)] +#[derive(Debug, Clone, Copy, Dupe)] pub struct StructRef<'v>(&'v Struct<'v>); impl<'v> StructRef<'v> { @@ -52,20 +56,24 @@ impl<'v> StructRef<'v> { } impl<'v> StarlarkTypeRepr for StructRef<'v> { + type Canonical = Self; + fn starlark_type_repr() -> Ty { FrozenStruct::starlark_type_repr() } } impl<'v> UnpackValue<'v> for StructRef<'v> { - fn unpack_value(value: Value<'v>) -> Option { - StructRef::from_value(value) + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(StructRef::from_value(value)) } } /// Reference to the frozen struct. #[derive(Debug)] -pub struct FrozenStructRef<'f>(&'f FrozenStruct); +pub struct FrozenStructRef<'f>(pub(crate) &'f FrozenStruct); impl<'f> FrozenStructRef<'f> { /// Iterate over struct fields. diff --git a/starlark-rust/starlark/src/values/types/structs/structs.rs b/starlark-rust/starlark/src/values/types/structs/structs.rs new file mode 100644 index 0000000000000..0f110d716e2ce --- /dev/null +++ b/starlark-rust/starlark/src/values/types/structs/structs.rs @@ -0,0 +1,90 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Implementation of `struct` function. + +use allocative::Allocative; +use dupe::Dupe; +use starlark_derive::starlark_module; +use starlark_map::sorted_map::SortedMap; + +use crate as starlark; +use crate::codemap::Span; +use crate::environment::GlobalsBuilder; +use crate::eval::Arguments; +use crate::typing::call_args::TyCallArgs; +use crate::typing::callable::TyCallable; +use crate::typing::error::TypingOrInternalError; +use crate::typing::function::TyCustomFunctionImpl; +use crate::typing::oracle::ctx::TypingOracleCtx; +use crate::typing::structs::TyStruct; +use crate::typing::ParamSpec; +use crate::typing::Ty; +use crate::util::arc_str::ArcStr; +use crate::values::structs::value::FrozenStruct; +use crate::values::structs::value::Struct; +use crate::values::Heap; + +#[derive( + Allocative, Clone, Copy, Dupe, Debug, Eq, PartialEq, Hash, Ord, PartialOrd +)] +struct StructType; + +impl TyCustomFunctionImpl for StructType { + fn as_callable(&self) -> TyCallable { + // TODO(nga): this should be obtained from function signature from function definition. + TyCallable::new(ParamSpec::kwargs(Ty::any()), Ty::any_struct()) + } + + fn validate_call( + &self, + _span: Span, + args: &TyCallArgs, + oracle: TypingOracleCtx, + ) -> Result { + if let [pos, ..] = args.pos.as_slice() { + return Err(oracle.msg_error(pos.span, "Positional arguments not allowed")); + } + let mut fields = Vec::new(); + for named in &args.named { + let (name, ty) = &named.node; + fields.push((ArcStr::from(*name), ty.clone())); + } + let extra = args.kwargs.is_some(); + Ok(Ty::custom(TyStruct { + fields: SortedMap::from_iter(fields), + extra, + })) + } +} + +/// Register `struct` builtin. +#[starlark_module] +pub(crate) fn register_struct(builder: &mut GlobalsBuilder) { + #[starlark( + ty_custom_function = StructType, + as_type = FrozenStruct, + )] + fn r#struct<'v>(args: &Arguments<'v, '_>, heap: &'v Heap) -> starlark::Result> { + args.no_positional_args(heap)?; + // TODO(nga): missing optimization: practically most `struct` invocations are + // performed with fixed named arguments, e.g. `struct(a = 1, b = 2)`. + // In this case we can avoid allocating the map, but instead + // allocate field index once at compilation time and store field values in a vector. + Ok(Struct::new(args.names_map()?)) + } +} diff --git a/starlark-rust/starlark/src/values/types/structs/value.rs b/starlark-rust/starlark/src/values/types/structs/value.rs index f4536388a866c..f0169e323133b 100644 --- a/starlark-rust/starlark/src/values/types/structs/value.rs +++ b/starlark-rust/starlark/src/values/types/structs/value.rs @@ -26,7 +26,6 @@ use display_container::fmt_keyed_container; use serde::Serialize; use starlark_derive::starlark_value; use starlark_derive::Freeze; -use starlark_derive::StarlarkDocs; use starlark_derive::Trace; use starlark_map::small_map::SmallMap; use starlark_map::Hashed; @@ -38,14 +37,13 @@ use crate::coerce::coerce; use crate::coerce::Coerce; use crate::docs::DocItem; use crate::docs::DocMember; -use crate::docs::DocObject; use crate::docs::DocProperty; use crate::starlark_complex_value; use crate::typing::Ty; use crate::typing::TyStruct; +use crate::util::arc_str::ArcStr; use crate::values::comparison::compare_small_map; use crate::values::comparison::equals_small_map; -use crate::values::layout::heap::profile::arc_str::ArcStr; use crate::values::structs::unordered_hasher::UnorderedHasher; use crate::values::FrozenStringValue; use crate::values::FrozenValue; @@ -75,6 +73,17 @@ impl<'v, V: ValueLike<'v>> StructGen<'v, V> { .iter() .map(|(name, value)| (name.to_string_value(), *value)) } + + fn self_ty(&self) -> Ty { + Ty::custom(TyStruct { + fields: self + .fields + .iter() + .map(|(name, value)| (ArcStr::from(name.as_str()), Ty::of_value(value.to_value()))) + .collect(), + extra: false, + }) + } } impl StructGen<'static, FrozenValue> { @@ -88,17 +97,7 @@ impl StructGen<'static, FrozenValue> { starlark_complex_value!(pub(crate) Struct<'v>); /// The result of calling `struct()`. -#[derive( - Clone, - Default, - Debug, - Trace, - Freeze, - ProvidesStaticType, - StarlarkDocs, - Allocative -)] -#[starlark_docs(builtin = "extension")] +#[derive(Clone, Default, Debug, Trace, Freeze, ProvidesStaticType, Allocative)] #[repr(C)] pub(crate) struct StructGen<'v, V: ValueLike<'v>> { /// The fields in a struct. @@ -120,7 +119,7 @@ impl<'v, V: ValueLike<'v>> Display for StructGen<'v, V> { } #[starlark_value(type = Struct::TYPE)] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for StructGen<'v, V> +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for StructGen<'v, V> where Self: ProvidesStaticType<'v>, { @@ -128,7 +127,7 @@ where collector.push_str("struct(...)"); } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> crate::Result { match Struct::from_value(other) { None => Ok(false), Some(other) => { @@ -137,7 +136,7 @@ where } } - fn compare(&self, other: Value<'v>) -> anyhow::Result { + fn compare(&self, other: Value<'v>) -> crate::Result { match Struct::from_value(other) { None => ValueError::unsupported_with(self, "cmp()", other), Some(other) => compare_small_map( @@ -157,7 +156,7 @@ where coerce(&self.fields).get_hashed(attribute).copied() } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { // Must use unordered hash because equality is unordered, // and `a = b => hash(a) = hash(b)`. let mut unordered_hasher = UnorderedHasher::new(); @@ -180,28 +179,13 @@ where self.fields.keys().map(|x| x.as_str().to_owned()).collect() } - fn documentation(&self) -> Option { - let members = self - .fields - .iter() - .map(|(k, v)| { - let name = k.as_str().to_owned(); - match v.to_value().documentation() { - Some(DocItem::Function(f)) => (name, DocMember::Function(f)), - _ => ( - name, - DocMember::Property(DocProperty { - docs: None, - typ: Ty::any(), - }), - ), - } - }) - .collect(); - Some(DocItem::Object(DocObject { - docs: None, - members, - })) + fn documentation(&self) -> DocItem { + // This treats structs as being value-like, and intentionally generates bad docs in the case + // of namespace-like usage. See + // for some + // additional discussion + let typ = self.self_ty(); + DocItem::Member(DocMember::Property(DocProperty { docs: None, typ })) } fn get_type_starlark_repr() -> Ty { @@ -209,14 +193,7 @@ where } fn typechecker_ty(&self) -> Option { - Some(Ty::custom(TyStruct { - fields: self - .fields - .iter() - .map(|(name, value)| (ArcStr::from(name.as_str()), Ty::of_value(value.to_value()))) - .collect(), - extra: false, - })) + Some(self.self_ty()) } } diff --git a/starlark-rust/starlark/src/values/types/tuple.rs b/starlark-rust/starlark/src/values/types/tuple.rs new file mode 100644 index 0000000000000..30df03638eee6 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/tuple.rs @@ -0,0 +1,30 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! The list type, an immutable sequence of values. + +pub(crate) mod alloc; +pub(crate) mod globals; +pub(crate) mod refs; +pub(crate) mod rust_tuple; +pub(crate) mod unpack; +pub(crate) mod value; + +pub use crate::values::types::tuple::alloc::AllocTuple; +pub use crate::values::types::tuple::refs::FrozenTupleRef; +pub use crate::values::types::tuple::refs::TupleRef; +pub use crate::values::types::tuple::unpack::UnpackTuple; diff --git a/starlark-rust/starlark/src/values/types/tuple/alloc.rs b/starlark-rust/starlark/src/values/types/tuple/alloc.rs index 86cf34e59d42a..1902c536ed52d 100644 --- a/starlark-rust/starlark/src/values/types/tuple/alloc.rs +++ b/starlark-rust/starlark/src/values/types/tuple/alloc.rs @@ -18,6 +18,7 @@ use std::iter; use crate::typing::Ty; +use crate::values::tuple::UnpackTuple; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::AllocFrozenValue; use crate::values::AllocValue; @@ -50,9 +51,12 @@ impl AllocTuple> { impl StarlarkTypeRepr for AllocTuple where T: IntoIterator, + T::Item: StarlarkTypeRepr, { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { - Ty::any_tuple() + Ty::tuple_of(T::Item::starlark_type_repr()) } } diff --git a/starlark-rust/starlark/src/values/types/tuple/globals.rs b/starlark-rust/starlark/src/values/types/tuple/globals.rs new file mode 100644 index 0000000000000..9aa7aa250221a --- /dev/null +++ b/starlark-rust/starlark/src/values/types/tuple/globals.rs @@ -0,0 +1,65 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use starlark_derive::starlark_module; + +use crate as starlark; +use crate::environment::GlobalsBuilder; +use crate::values::function::SpecialBuiltinFunction; +use crate::values::tuple::value::FrozenTuple; +use crate::values::tuple::AllocTuple; +use crate::values::tuple::TupleRef; +use crate::values::typing::StarlarkIter; +use crate::values::Heap; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +#[starlark_module] +pub(crate) fn register_tuple(globals: &mut GlobalsBuilder) { + /// [tuple]( + /// https://github.com/bazelbuild/starlark/blob/master/spec.md#tuple + /// ): returns a tuple containing the elements of the iterable x. + /// + /// With no arguments, `tuple()` returns the empty tuple. + /// + /// ``` + /// # starlark::assert::all_true(r#" + /// tuple() == () + /// tuple([1,2,3]) == (1, 2, 3) + /// # "#); + /// ``` + #[starlark( + as_type = FrozenTuple, + speculative_exec_safe, + special_builtin_function = SpecialBuiltinFunction::Tuple, + )] + fn tuple<'v>( + #[starlark(require = pos)] a: Option>>>, + heap: &'v Heap, + ) -> starlark::Result>> { + if let Some(a) = a { + if TupleRef::from_value(a.get()).is_some() { + return Ok(ValueOfUnchecked::new(a.get())); + } + + let it = a.get().iterate(heap)?; + Ok(ValueOfUnchecked::new(heap.alloc_tuple_iter(it))) + } else { + Ok(ValueOfUnchecked::new(heap.alloc(AllocTuple::EMPTY))) + } + } +} diff --git a/starlark-rust/starlark/src/values/types/tuple/mod.rs b/starlark-rust/starlark/src/values/types/tuple/mod.rs deleted file mode 100644 index 1505bd14ef525..0000000000000 --- a/starlark-rust/starlark/src/values/types/tuple/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! The list type, an immutable sequence of values. - -pub(crate) mod alloc; -pub(crate) mod refs; -pub(crate) mod rust_tuple; -pub(crate) mod value; - -pub use crate::values::types::tuple::alloc::AllocTuple; -pub use crate::values::types::tuple::refs::FrozenTupleRef; -pub use crate::values::types::tuple::refs::TupleRef; diff --git a/starlark-rust/starlark/src/values/types/tuple/refs.rs b/starlark-rust/starlark/src/values/types/tuple/refs.rs index afed8ee0977e8..687c4dbc1603a 100644 --- a/starlark-rust/starlark/src/values/types/tuple/refs.rs +++ b/starlark-rust/starlark/src/values/types/tuple/refs.rs @@ -15,10 +15,15 @@ * limitations under the License. */ -use crate as starlark; -use crate::coerce::coerce; -use crate::coerce::Coerce; +use std::convert::Infallible; +use std::iter; +use std::slice; + +use ref_cast::ref_cast_custom; +use ref_cast::RefCastCustom; + use crate::typing::Ty; +use crate::values::tuple::UnpackTuple; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::types::tuple::value::FrozenTuple; use crate::values::types::tuple::value::Tuple; @@ -28,7 +33,7 @@ use crate::values::Value; use crate::values::ValueLike; /// Reference to tuple data in Starlark heap. -#[derive(Coerce, Debug)] +#[derive(RefCastCustom, Debug)] #[repr(transparent)] pub struct TupleRef<'v> { contents: [Value<'v>], @@ -36,7 +41,7 @@ pub struct TupleRef<'v> { /// Reference to tuple data in frozen Starlark heap. #[repr(transparent)] -#[derive(Coerce, Debug)] +#[derive(RefCastCustom, Debug)] pub struct FrozenTupleRef { contents: [FrozenValue], } @@ -45,9 +50,12 @@ impl<'v> TupleRef<'v> { /// `type(())`, which is `"tuple"`. pub const TYPE: &'static str = FrozenTupleRef::TYPE; + #[ref_cast_custom] + fn new(slice: &'v [Value<'v>]) -> &'v TupleRef<'v>; + /// Downcast a value to a tuple. pub fn from_value(value: Value<'v>) -> Option<&'v TupleRef<'v>> { - Some(coerce(Tuple::from_value(value)?.content())) + Some(Self::new(Tuple::from_value(value)?.content())) } /// Downcast a value to a tuple. @@ -66,7 +74,7 @@ impl<'v> TupleRef<'v> { } /// Iterate over the contents. - pub fn iter(&self) -> impl ExactSizeIterator> + '_ { + pub fn iter<'a>(&'a self) -> iter::Copied>> { self.content().iter().copied() } } @@ -75,9 +83,12 @@ impl FrozenTupleRef { /// `type(())`, which is `"tuple"`. pub const TYPE: &'static str = FrozenTuple::TYPE; + #[ref_cast_custom] + fn new(slice: &'static [FrozenValue]) -> &'static FrozenTupleRef; + /// Downcast a value to a tuple. pub fn from_frozen_value(value: FrozenValue) -> Option<&'static FrozenTupleRef> { - Some(coerce(value.downcast_ref::()?.content())) + Some(Self::new(value.downcast_ref::()?.content())) } /// Number of elements. @@ -97,25 +108,37 @@ impl FrozenTupleRef { } impl<'v> StarlarkTypeRepr for &'v TupleRef<'v> { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Ty::any_tuple() } } impl<'a> StarlarkTypeRepr for &'a FrozenTupleRef { + type Canonical = as StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> Ty { Ty::any_tuple() } } impl<'v> UnpackValue<'v> for &'v TupleRef<'v> { - fn unpack_value(value: Value<'v>) -> Option { - TupleRef::from_value(value) + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(TupleRef::from_value(value)) } } impl<'v> UnpackValue<'v> for &'v FrozenTupleRef { - fn unpack_value(value: Value<'v>) -> Option { - FrozenTupleRef::from_frozen_value(value.unpack_frozen()?) + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> crate::Result> { + let Some(value) = value.unpack_frozen() else { + // TODO(nga): return error. + return Ok(None); + }; + Ok(FrozenTupleRef::from_frozen_value(value)) } } diff --git a/starlark-rust/starlark/src/values/types/tuple/rust_tuple.rs b/starlark-rust/starlark/src/values/types/tuple/rust_tuple.rs index f80cd69dbd5b9..a0d11ca054844 100644 --- a/starlark-rust/starlark/src/values/types/tuple/rust_tuple.rs +++ b/starlark-rust/starlark/src/values/types/tuple/rust_tuple.rs @@ -17,6 +17,8 @@ //! Bindings to/from Rust tuple types. +use either::Either; + use crate::typing::Ty; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::types::tuple::value::Tuple; @@ -80,12 +82,16 @@ impl AllocFroz } impl StarlarkTypeRepr for (T1, T2) { + type Canonical = (T1::Canonical, T2::Canonical); + fn starlark_type_repr() -> Ty { Ty::tuple2(T1::starlark_type_repr(), T2::starlark_type_repr()) } } impl StarlarkTypeRepr for (T1,) { + type Canonical = (T1::Canonical,); + fn starlark_type_repr() -> Ty { Ty::tuple(vec![T1::starlark_type_repr()]) } @@ -94,6 +100,8 @@ impl StarlarkTypeRepr for (T1,) { impl StarlarkTypeRepr for (T1, T2, T3) { + type Canonical = (T1::Canonical, T2::Canonical, T3::Canonical); + fn starlark_type_repr() -> Ty { Ty::tuple(vec![ T1::starlark_type_repr(), @@ -104,18 +112,54 @@ impl StarlarkT } impl<'v, T1: UnpackValue<'v>, T2: UnpackValue<'v>> UnpackValue<'v> for (T1, T2) { - fn expected() -> String { - format!("tuple ({}, {})", T1::expected(), T2::expected()) + type Error = Either; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(t) = Tuple::from_value(value) else { + return Ok(None); + }; + let [a, b] = t.content() else { + return Ok(None); + }; + let [a, b] = [*a, *b]; + let Some(a) = T1::unpack_value_impl(a).map_err(Either::Left)? else { + return Ok(None); + }; + let Some(b) = T2::unpack_value_impl(b).map_err(Either::Right)? else { + return Ok(None); + }; + Ok(Some((a, b))) } +} + +impl<'v, T1: UnpackValue<'v>, T2: UnpackValue<'v>, T3: UnpackValue<'v>> UnpackValue<'v> + for (T1, T2, T3) +{ + type Error = Either>; - fn unpack_value(value: Value<'v>) -> Option { - let t = Tuple::from_value(value)?; - if t.len() != 2 { - return None; - } - Some(( - T1::unpack_value(t.content()[0])?, - T2::unpack_value(t.content()[1])?, - )) + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(t) = Tuple::from_value(value) else { + return Ok(None); + }; + let [a, b, c] = t.content() else { + return Ok(None); + }; + let [a, b, c] = [*a, *b, *c]; + let Some(a) = T1::unpack_value_impl(a).map_err(Either::Left)? else { + return Ok(None); + }; + let Some(b) = T2::unpack_value_impl(b) + .map_err(Either::Left) + .map_err(Either::Right)? + else { + return Ok(None); + }; + let Some(c) = T3::unpack_value_impl(c) + .map_err(Either::Right) + .map_err(Either::Right)? + else { + return Ok(None); + }; + Ok(Some((a, b, c))) } } diff --git a/starlark-rust/starlark/src/values/types/tuple/unpack.rs b/starlark-rust/starlark/src/values/types/tuple/unpack.rs new file mode 100644 index 0000000000000..4190edc261b78 --- /dev/null +++ b/starlark-rust/starlark/src/values/types/tuple/unpack.rs @@ -0,0 +1,115 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::slice; +use std::vec; + +use crate::typing::Ty; +use crate::values::tuple::TupleRef; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::UnpackValue; +use crate::values::Value; + +/// Unpack a value of type `tuple[T, ...]` into a vec. +#[derive(Debug, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] +pub struct UnpackTuple { + /// Unpacked items. + pub items: Vec, +} + +impl Default for UnpackTuple { + fn default() -> Self { + UnpackTuple { items: Vec::new() } + } +} + +impl StarlarkTypeRepr for UnpackTuple { + type Canonical = UnpackTuple; + + fn starlark_type_repr() -> Ty { + Ty::tuple_of(T::starlark_type_repr()) + } +} + +impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for UnpackTuple { + type Error = >::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(tuple) = TupleRef::from_value(value) else { + return Ok(None); + }; + // TODO(nga): should not allocate if the first element is of the wrong type. + let mut items = Vec::with_capacity(tuple.len()); + for v in tuple.iter() { + let Some(v) = T::unpack_value_impl(v)? else { + return Ok(None); + }; + items.push(v); + } + Ok(Some(UnpackTuple { items })) + } +} + +impl IntoIterator for UnpackTuple { + type Item = T; + type IntoIter = vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.items.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a UnpackTuple { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.items.iter() + } +} + +impl<'a, T> IntoIterator for &'a mut UnpackTuple { + type Item = &'a mut T; + type IntoIter = slice::IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.items.iter_mut() + } +} + +#[cfg(test)] +mod tests { + use crate::values::tuple::unpack::UnpackTuple; + use crate::values::Heap; + use crate::values::UnpackValue; + + #[test] + fn test_unpack() { + let heap = Heap::new(); + let v = heap.alloc(("a", "b")); + assert_eq!( + vec!["a", "b"], + UnpackTuple::<&str>::unpack_value(v).unwrap().unwrap().items + ); + assert!(UnpackTuple::::unpack_value(v).unwrap().is_none()); + assert!( + UnpackTuple::<&str>::unpack_value(heap.alloc(1)) + .unwrap() + .is_none() + ); + } +} diff --git a/starlark-rust/starlark/src/values/types/tuple/value.rs b/starlark-rust/starlark/src/values/types/tuple/value.rs index b931c291e2c08..3b19c789330e8 100644 --- a/starlark-rust/starlark/src/values/types/tuple/value.rs +++ b/starlark-rust/starlark/src/values/types/tuple/value.rs @@ -27,7 +27,6 @@ use display_container::fmt_container; use serde::ser::SerializeTuple; use serde::Serialize; use starlark_derive::starlark_value; -use starlark_derive::StarlarkDocs; use crate as starlark; use crate::any::ProvidesStaticType; @@ -41,8 +40,8 @@ use crate::values::comparison::equals_slice; use crate::values::index::apply_slice; use crate::values::index::convert_index; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueFrozenTuple; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Direct; use crate::values::layout::heap::repr::AValueRepr; use crate::values::FrozenValue; use crate::values::Heap; @@ -50,13 +49,13 @@ use crate::values::StarlarkValue; use crate::values::UnpackValue; use crate::values::Value; use crate::values::ValueError; +use crate::values::ValueLifetimeless; use crate::values::ValueLike; /// Define the tuple type. See [`Tuple`] and [`FrozenTuple`] as the two aliases. #[repr(C)] -#[derive(ProvidesStaticType, StarlarkDocs, Allocative)] -#[starlark_docs(builtin = "standard")] -pub(crate) struct TupleGen { +#[derive(ProvidesStaticType, Allocative)] +pub(crate) struct TupleGen { len: usize, /// The data stored by the tuple. content: [V; 0], @@ -85,7 +84,7 @@ impl<'v, V: ValueLike<'v>> Debug for TupleGen { } } -impl TupleGen { +impl TupleGen { /// `type(())`. pub(crate) const TYPE: &'static str = "tuple"; @@ -98,8 +97,8 @@ impl TupleGen { } } -pub(crate) static VALUE_EMPTY_TUPLE: AValueRepr> = - alloc_static(Direct, unsafe { FrozenTuple::new(0) }); +pub(crate) static VALUE_EMPTY_TUPLE: AValueRepr> = + alloc_static(unsafe { FrozenTuple::new(0) }); /// Runtime type of unfrozen tuple. pub(crate) type Tuple<'v> = TupleGen>; @@ -144,7 +143,7 @@ impl<'v, V: ValueLike<'v>> TupleGen { } #[starlark_value(type = Tuple::TYPE)] -impl<'v, V: ValueLike<'v> + 'v> StarlarkValue<'v> for TupleGen +impl<'v, V: ValueLike<'v>> StarlarkValue<'v> for TupleGen where Self: ProvidesStaticType<'v>, { @@ -159,37 +158,37 @@ where self.len() != 0 } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { for v in self.content() { v.write_hash(hasher)?; } Ok(()) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> crate::Result { match Tuple::from_value(other) { None => Ok(false), Some(other) => equals_slice(self.content(), other.content(), |x, y| x.equals(*y)), } } - fn compare(&self, other: Value<'v>) -> anyhow::Result { + fn compare(&self, other: Value<'v>) -> crate::Result { match Tuple::from_value(other) { None => ValueError::unsupported_with(self, "cmp()", other), Some(other) => compare_slice(self.content(), other.content(), |x, y| x.compare(*y)), } } - fn at(&self, index: Value, _heap: &'v Heap) -> anyhow::Result> { + fn at(&self, index: Value, _heap: &'v Heap) -> crate::Result> { let i = convert_index(index, self.len() as i32)? as usize; Ok(self.content()[i].to_value()) } - fn length(&self) -> anyhow::Result { + fn length(&self) -> crate::Result { Ok(self.len() as i32) } - fn is_in(&self, other: Value<'v>) -> anyhow::Result { + fn is_in(&self, other: Value<'v>) -> crate::Result { for x in self.content() { if x.equals(other)? { return Ok(true); @@ -204,11 +203,11 @@ where stop: Option, stride: Option, heap: &'v Heap, - ) -> anyhow::Result> { + ) -> crate::Result> { Ok(heap.alloc_tuple(&apply_slice(coerce(self.content()), start, stop, stride)?)) } - unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> anyhow::Result> { + unsafe fn iterate(&self, me: Value<'v>, _heap: &'v Heap) -> crate::Result> { Ok(me) } @@ -224,7 +223,7 @@ where unsafe fn iter_stop(&self) {} - fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { + fn add(&self, other: Value<'v>, heap: &'v Heap) -> Option>> { if let Some(other) = Tuple::from_value(other) { let mut result = Vec::with_capacity(self.len() + other.len()); for x in self.iter() { @@ -239,8 +238,12 @@ where } } - fn mul(&self, other: Value, heap: &'v Heap) -> Option>> { - let l = i32::unpack_value(other)?; + fn mul(&self, other: Value, heap: &'v Heap) -> Option>> { + let l = match i32::unpack_value(other) { + Ok(Some(l)) => l, + Ok(None) => return None, + Err(e) => return Some(Err(e)), + }; let mut result = Vec::new(); for _i in 0..l { result.extend(self.content().iter().map(|e| e.to_value())); @@ -248,7 +251,7 @@ where Some(Ok(heap.alloc_tuple(&result))) } - fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { + fn rmul(&self, lhs: Value<'v>, heap: &'v Heap) -> Option>> { self.mul(lhs, heap) } diff --git a/starlark-rust/starlark/src/values/types/unbound.rs b/starlark-rust/starlark/src/values/types/unbound.rs index 8e660745aaf19..c0016f1070f34 100644 --- a/starlark-rust/starlark/src/values/types/unbound.rs +++ b/starlark-rust/starlark/src/values/types/unbound.rs @@ -17,47 +17,83 @@ //! Handle special "unbound" globals: methods or attributes. +use std::fmt; +use std::fmt::Debug; +use std::fmt::Formatter; + +use crate::eval::runtime::frame_span::FrameSpan; +use crate::eval::Arguments; +use crate::eval::Evaluator; use crate::values::function::BoundMethodGen; +use crate::values::function::NativeAttr; use crate::values::function::NativeAttribute; +use crate::values::function::NativeMeth; use crate::values::function::NativeMethod; -use crate::values::layout::value_not_special::FrozenValueNotSpecial; +use crate::values::FrozenRef; +use crate::values::FrozenValue; use crate::values::FrozenValueTyped; use crate::values::Heap; use crate::values::Value; use crate::values::ValueLike; /// A value or an unbound method or unbound attribute. -pub(crate) enum MaybeUnboundValue { +#[derive(Clone)] +pub(crate) enum UnboundValue { /// A method with `this` unbound. - Method(FrozenValueTyped<'static, NativeMethod>), + Method( + FrozenValueTyped<'static, NativeMethod>, + FrozenRef<'static, dyn NativeMeth>, + ), /// An attribute with `this` unbound. - Attr(FrozenValueTyped<'static, NativeAttribute>), + Attr( + FrozenValueTyped<'static, NativeAttribute>, + FrozenRef<'static, dyn NativeAttr>, + ), } -impl MaybeUnboundValue { +impl Debug for UnboundValue { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("MaybeUnboundValue").finish_non_exhaustive() + } +} + +impl UnboundValue { + #[inline] + pub(crate) fn to_frozen_value(&self) -> FrozenValue { + match self { + UnboundValue::Method(m, _) => m.to_frozen_value(), + UnboundValue::Attr(a, _) => a.to_frozen_value(), + } + } + /// Bind this object to given `this` value. - pub(crate) fn bind<'v>(self, this: Value<'v>, heap: &'v Heap) -> anyhow::Result> { + #[inline] + pub(crate) fn bind<'v>(&self, this: Value<'v>, heap: &'v Heap) -> crate::Result> { match self { - MaybeUnboundValue::Method(m) => { - Ok(heap.alloc_complex(BoundMethodGen::new(this.to_value(), m))) + UnboundValue::Method(m, _) => { + Ok(heap.alloc_complex(BoundMethodGen::new(this.to_value(), *m))) } - MaybeUnboundValue::Attr(a) => a.call(this, heap), + UnboundValue::Attr(_, a) => a(this, heap), } } -} -impl MaybeUnboundValue { - /// Split into variants. - #[allow(clippy::same_functions_in_if_condition)] // False positive - pub(crate) fn new(value: FrozenValueNotSpecial) -> MaybeUnboundValue { - // TODO(nga): this can be a little faster if we do downcast of `FrozenValueNotSpecial` - // instead of converting it to `FrozenValue` first. - if let Some(method) = FrozenValueTyped::new(value.to_frozen_value()) { - MaybeUnboundValue::Method(method) - } else if let Some(attr) = FrozenValueTyped::new(value.to_frozen_value()) { - MaybeUnboundValue::Attr(attr) - } else { - unreachable!("not a member: {}", value); - } + #[inline] + pub(crate) fn invoke_method<'v>( + &self, + this: Value<'v>, + span: FrozenRef<'static, FrameSpan>, + args: &Arguments<'v, '_>, + eval: &mut Evaluator<'v, '_, '_>, + ) -> crate::Result> { + eval.with_call_stack( + self.to_frozen_value().to_value(), + Some(span), + |eval| match self { + UnboundValue::Method(_, m) => m.invoke(eval, this, args), + UnboundValue::Attr(_, a) => { + NativeAttribute::invoke_method_impl(&**a, this, args, eval) + } + }, + ) } } diff --git a/starlark-rust/starlark/src/values/typing.rs b/starlark-rust/starlark/src/values/typing.rs new file mode 100644 index 0000000000000..e89fa0ca24cc7 --- /dev/null +++ b/starlark-rust/starlark/src/values/typing.rs @@ -0,0 +1,41 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Typechecker-related types. + +pub(crate) mod any; +pub(crate) mod callable; +pub(crate) mod globals; +pub(crate) mod iter; +pub mod macro_refs; +pub(crate) mod never; +pub(crate) mod ty; +pub(crate) mod type_compiled; +pub(crate) mod type_type; + +pub use crate::values::types::type_instance_id::TypeInstanceId; +pub use crate::values::typing::callable::param::StarlarkCallableParamAny; +pub use crate::values::typing::callable::param::StarlarkCallableParamSpec; +pub use crate::values::typing::callable::FrozenStarlarkCallable; +pub use crate::values::typing::callable::StarlarkCallable; +pub use crate::values::typing::callable::StarlarkCallableChecked; +pub use crate::values::typing::iter::StarlarkIter; +pub use crate::values::typing::never::StarlarkNever; +pub use crate::values::typing::type_compiled::compiled::TypeCompiled; +pub use crate::values::typing::type_compiled::matcher::TypeMatcher; +pub use crate::values::typing::type_compiled::type_matcher_factory::TypeMatcherFactory; +pub use crate::values::typing::type_type::TypeType; diff --git a/starlark-rust/starlark/src/values/typing/any.rs b/starlark-rust/starlark/src/values/typing/any.rs index fc0baa92fe6a9..053ac8e31d9a1 100644 --- a/starlark-rust/starlark/src/values/typing/any.rs +++ b/starlark-rust/starlark/src/values/typing/any.rs @@ -22,8 +22,8 @@ use starlark_derive::ProvidesStaticType; use crate as starlark; use crate::typing::Ty; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::starlark_value; use crate::values::AllocFrozenValue; @@ -38,7 +38,7 @@ use crate::values::StarlarkValue; ProvidesStaticType, NoSerialize )] -#[display(fmt = "{}", Self::TYPE)] +#[display("{}", Self::TYPE)] pub(crate) struct TypingAny; #[starlark_value(type = "typing.Any")] @@ -50,7 +50,8 @@ impl<'v> StarlarkValue<'v> for TypingAny { impl AllocFrozenValue for TypingAny { fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { - static ANY: AValueRepr> = alloc_static(Basic, TypingAny); + static ANY: AValueRepr>> = + alloc_static(TypingAny); FrozenValue::new_repr(&ANY) } diff --git a/starlark-rust/starlark/src/values/typing/callable.rs b/starlark-rust/starlark/src/values/typing/callable.rs index 29312e2dc26ca..4ba65f4a66dee 100644 --- a/starlark-rust/starlark/src/values/typing/callable.rs +++ b/starlark-rust/starlark/src/values/typing/callable.rs @@ -15,6 +15,15 @@ * limitations under the License. */ +pub(crate) mod param; + +use std::convert::Infallible; +use std::fmt; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::marker::PhantomData; +use std::sync::atomic::AtomicPtr; + use allocative::Allocative; use dupe::Dupe; use starlark_derive::starlark_value; @@ -22,16 +31,30 @@ use starlark_derive::NoSerialize; use starlark_derive::ProvidesStaticType; use crate as starlark; +use crate::private::Private; +use crate::typing::callable::TyCallable; +use crate::typing::ParamSpec; use crate::typing::Ty; +use crate::typing::TyBasic; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; +use crate::values::list::UnpackList; use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::typing::callable::param::StarlarkCallableParamAny; +use crate::values::typing::callable::param::StarlarkCallableParamSpec; +use crate::values::typing::TypeCompiled; use crate::values::AllocFrozenValue; +use crate::values::AllocValue; +use crate::values::Freeze; +use crate::values::Freezer; use crate::values::FrozenHeap; use crate::values::FrozenValue; +use crate::values::Heap; use crate::values::StarlarkValue; +use crate::values::Trace; +use crate::values::Tracer; use crate::values::UnpackValue; use crate::values::Value; @@ -42,50 +65,355 @@ use crate::values::Value; ProvidesStaticType, NoSerialize )] -#[display(fmt = "{}", Self::TYPE)] +#[display("{}", Self::TYPE)] pub(crate) struct TypingCallable; #[starlark_value(type = "typing.Callable")] impl<'v> StarlarkValue<'v> for TypingCallable { fn eval_type(&self) -> Option { - Some(StarlarkCallable::starlark_type_repr()) + Some(StarlarkCallable::::starlark_type_repr()) + } + + fn at2( + &self, + param_types: Value<'v>, + ret: Value<'v>, + heap: &'v Heap, + _private: Private, + ) -> crate::Result> { + let param_types = UnpackList::::unpack_value_err(param_types)?; + let ret = TypeCompiled::new(ret, heap)?.as_ty().dupe(); + let param_types: Vec = param_types + .items + .into_iter() + .map(|p| Ok(TypeCompiled::new(p, heap)?.as_ty().dupe())) + .collect::>>()?; + + Ok(heap.alloc_simple(TypingCallableAt2 { + callable: TyCallable::new(ParamSpec::pos_only(param_types, []), ret), + })) } } impl AllocFrozenValue for TypingCallable { fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { - static CALLABLE: AValueRepr> = - alloc_static(Basic, TypingCallable); + static CALLABLE: AValueRepr>> = + alloc_static(TypingCallable); FrozenValue::new_repr(&CALLABLE) } } +#[derive( + Allocative, + Debug, + ProvidesStaticType, + NoSerialize, + derive_more::Display +)] +#[display("{}", callable)] +pub(crate) struct TypingCallableAt2 { + callable: TyCallable, +} + +#[starlark_value(type = "typing.Callable")] +impl<'v> StarlarkValue<'v> for TypingCallableAt2 { + fn eval_type(&self) -> Option { + Some(Ty::basic(TyBasic::Callable(self.callable.dupe()))) + } +} + /// Marker for a callable value. Can be used in function signatures /// for better documentation and type checking. -#[derive(Debug, Copy, Clone, Dupe)] -pub struct StarlarkCallable<'v>(pub Value<'v>); +#[derive(Allocative)] +#[allocative(bound = "")] +pub struct StarlarkCallable< + 'v, + P: StarlarkCallableParamSpec = StarlarkCallableParamAny, + R: StarlarkTypeRepr = FrozenValue, +>(pub Value<'v>, PhantomData>>); + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Copy for StarlarkCallable<'v, P, R> {} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Clone for StarlarkCallable<'v, P, R> { + fn clone(&self) -> Self { + *self + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Dupe for StarlarkCallable<'v, P, R> {} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Debug for StarlarkCallable<'v, P, R> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("StarlarkCallable").field(&self.0).finish() + } +} + +// TODO(nga): implement `#[trace(bound = "")]`. +unsafe impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Trace<'v> + for StarlarkCallable<'v, P, R> +{ + fn trace(&mut self, tracer: &Tracer<'v>) { + self.0.trace(tracer); + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> StarlarkCallable<'v, P, R> { + /// Wrap the value. + pub fn unchecked_new(value: Value<'v>) -> Self { + StarlarkCallable(value, PhantomData) + } + + /// Convert to `FrozenValue` version. + pub fn unpack_frozen(self) -> Option> { + self.0 + .unpack_frozen() + .map(FrozenStarlarkCallable::unchecked_new) + } + + /// Erase parameter and return types. + pub fn erase(self) -> StarlarkCallable<'v> { + StarlarkCallable::unchecked_new(self.0) + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> StarlarkTypeRepr + for StarlarkCallable<'v, P, R> +{ + type Canonical = Self; -impl<'v> StarlarkTypeRepr for StarlarkCallable<'v> { fn starlark_type_repr() -> Ty { - Ty::any_function() + // TODO(nga): implement the same machinery for `typing.Callable`. + Ty::callable(P::params(), R::starlark_type_repr()) } } -impl<'v> UnpackValue<'v> for StarlarkCallable<'v> { +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> UnpackValue<'v> + for StarlarkCallable<'v, P, R> +{ + type Error = Infallible; + #[inline] - fn unpack_value(value: Value<'v>) -> Option { + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { if value.vtable().starlark_value.HAS_invoke { - Some(StarlarkCallable(value)) + Ok(Some(StarlarkCallable::unchecked_new(value))) } else { - None + Ok(None) + } + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> AllocValue<'v> + for StarlarkCallable<'v, P, R> +{ + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + self.0 + } +} + +/// Marker for a callable value. +#[derive(Allocative)] +#[allocative(bound = "")] +pub struct FrozenStarlarkCallable< + P: StarlarkCallableParamSpec = StarlarkCallableParamAny, + R: StarlarkTypeRepr = FrozenValue, +>(pub FrozenValue, PhantomData>); + +impl Debug for FrozenStarlarkCallable { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("FrozenStarlarkCallable") + .field(&self.0) + .finish() + } +} + +fn _assert_sync_send() { + fn _assert() {} + // `Value` is not `Sync` nor `Send`, but `FrozenStarlarkCallable` should be. + _assert::>(); +} + +impl Copy for FrozenStarlarkCallable {} + +impl Clone for FrozenStarlarkCallable { + fn clone(&self) -> Self { + *self + } +} + +impl Dupe for FrozenStarlarkCallable {} + +unsafe impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Trace<'v> + for FrozenStarlarkCallable +{ + fn trace(&mut self, tracer: &Tracer<'v>) { + // TODO: implement `#[trace(bound = "")]`. + self.0.trace(tracer); + } +} + +impl FrozenStarlarkCallable { + /// Wrap the value. + pub fn unchecked_new(value: FrozenValue) -> Self { + FrozenStarlarkCallable(value, PhantomData) + } + + /// Erase parameter and return types. + pub fn erase(self) -> FrozenStarlarkCallable { + FrozenStarlarkCallable::unchecked_new(self.0) + } +} + +impl StarlarkTypeRepr + for FrozenStarlarkCallable +{ + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + StarlarkCallable::::starlark_type_repr() + } +} + +impl AllocFrozenValue + for FrozenStarlarkCallable +{ + fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { + self.0 + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Freeze for StarlarkCallable<'v, P, R> { + type Frozen = FrozenStarlarkCallable; + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + Ok(FrozenStarlarkCallable::unchecked_new( + self.0.freeze(freezer)?, + )) + } +} + +impl FrozenStarlarkCallable { + /// Convert to `Value`-version. + #[inline] + pub fn to_callable<'v>(self) -> StarlarkCallable<'v, P, R> { + StarlarkCallable::::unchecked_new(self.0.to_value()) + } +} + +/// More strict version of [`StarlarkCallable`]. +/// +/// This checks not only that the value is callable, +/// but also that it is a callable with the correct signature. +/// +/// The implementation uses starlark-rust typechecker with all its limitations. +/// For example, if there are optional parameters in both value-def and this signature, +/// signature matching is ignored at the time of writing. +/// +/// Unpacking with this type is expensive: +/// usually it is OK to use it for code executed once at top-level scope (like `rule()`), +/// but not for code executed many times (like `partial()`). +#[derive(Allocative)] +#[allocative(bound = "")] +pub struct StarlarkCallableChecked<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr>( + pub Value<'v>, + PhantomData>, +); + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Clone + for StarlarkCallableChecked<'v, P, R> +{ + fn clone(&self) -> Self { + *self + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Copy + for StarlarkCallableChecked<'v, P, R> +{ +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Dupe + for StarlarkCallableChecked<'v, P, R> +{ +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Debug + for StarlarkCallableChecked<'v, P, R> +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("StarlarkCallableChecked") + .field(&self.0) + .finish() + } +} + +unsafe impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> Trace<'v> + for StarlarkCallableChecked<'v, P, R> +{ + fn trace(&mut self, tracer: &Tracer<'v>) { + let StarlarkCallableChecked(value, phantom) = self; + value.trace(tracer); + phantom.trace(tracer); + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> AllocValue<'v> + for StarlarkCallableChecked<'v, P, R> +{ + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + self.0 + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> StarlarkCallableChecked<'v, P, R> { + /// Convert to [`StarlarkCallable`]. + pub fn to_unchecked(self) -> StarlarkCallable<'v, P, R> { + StarlarkCallable::unchecked_new(self.0) + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> StarlarkTypeRepr + for StarlarkCallableChecked<'v, P, R> +{ + type Canonical = as StarlarkTypeRepr>::Canonical; + + fn starlark_type_repr() -> Ty { + ::starlark_type_repr() + } +} + +impl<'v, P: StarlarkCallableParamSpec, R: StarlarkTypeRepr> UnpackValue<'v> + for StarlarkCallableChecked<'v, P, R> +{ + /// Only internal error is possible. + type Error = crate::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + // Check it is a callable first. + if StarlarkCallable::::unpack_value_opt(value).is_none() { + return Ok(None); + } + + // We need generic statics to cache this. + let ty = Ty::callable(P::params(), R::starlark_type_repr()); + + match Ty::of_value(value).check_intersects(&ty)? { + true => Ok(Some(StarlarkCallableChecked(value, PhantomData))), + false => Ok(None), } } } #[cfg(test)] mod tests { + use starlark_derive::starlark_module; + + use crate as starlark; use crate::assert; + use crate::assert::Assert; + use crate::environment::GlobalsBuilder; + use crate::values::none::NoneType; + use crate::values::typing::StarlarkCallable; + use crate::values::typing::StarlarkCallableChecked; #[test] fn test_callable_runtime() { @@ -125,4 +453,149 @@ def bar(): "Expected type", ); } + + #[starlark_module] + fn my_module(globals: &mut GlobalsBuilder) { + fn accept_f( + #[starlark(require=pos)] _x: StarlarkCallable<(String,), i32>, + ) -> anyhow::Result { + Ok(NoneType) + } + } + + #[test] + fn test_native_callable_pass() { + let mut a = Assert::new(); + a.globals_add(my_module); + a.pass( + r#" +def f(x: str) -> int: + return len(x) + +def test(): + accept_f(f) +"#, + ); + } + + #[test] + fn test_native_callable_fail_compile_time_wrong_param_type() { + let mut a = Assert::new(); + a.globals_add(my_module); + a.fail( + r#" +def f(x: list) -> int: + return 1 + +def test(): + accept_f(f) +"#, + "Expected type `typing.Callable[[str], int]` but got", + ); + } + + #[test] + fn test_native_callable_fail_compile_time_wrong_param_count() { + let mut a = Assert::new(); + a.globals_add(my_module); + a.fail( + r#" +def f() -> int: + return 1 + +def test(): + accept_f(f) +"#, + "Expected type `typing.Callable[[str], int]` but got", + ); + } + + #[test] + fn test_typing_callable_pass() { + let a = Assert::new(); + a.pass( + r#" +def accept_f(x: typing.Callable[[str], int]) -> None: + pass + +def f(x: str) -> int: + return len(x) + +def test(): + accept_f(f) +"#, + ); + } + + #[test] + fn test_typing_callable_fail_compile_time_wrong_param_type() { + let a = Assert::new(); + a.fail( + r#" +def accept_f(x: typing.Callable[[str], int]) -> None: + pass + +def f(x: list) -> int: + return 1 + +def test(): + accept_f(f) +"#, + "Expected type `typing.Callable[[str], int]` but got", + ); + } + + #[test] + fn test_typing_callable_fail_compile_time_wrong_param_count() { + let a = Assert::new(); + a.fail( + r#" +def accept_f(x: typing.Callable[[str], int]) -> None: + pass + +def f() -> int: + return 1 + +def test(): + accept_f(f) +"#, + "Expected type `typing.Callable[[str], int]` but got", + ); + } + + #[test] + fn test_callable_checked_runtime() { + #[starlark_module] + fn module(globals: &mut GlobalsBuilder) { + fn accept_f( + #[starlark(require=pos)] _f: StarlarkCallableChecked<(), NoneType>, + ) -> anyhow::Result { + Ok(NoneType) + } + + fn good() -> anyhow::Result { + Ok(NoneType) + } + + fn bad() -> anyhow::Result { + Ok(10) + } + } + + let mut a = Assert::new(); + a.globals_add(module); + + a.pass("accept_f(good)"); + + a.fail( + r#" +def test(): + x = noop(bad) # Hide the type from static typechecker. + accept_f(x) + +test() + "#, + "Type of parameter `_f` doesn't match", + ); + } } diff --git a/starlark-rust/starlark/src/values/typing/callable/param.rs b/starlark-rust/starlark/src/values/typing/callable/param.rs new file mode 100644 index 0000000000000..72282f108633b --- /dev/null +++ b/starlark-rust/starlark/src/values/typing/callable/param.rs @@ -0,0 +1,91 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::typing::ParamSpec; +use crate::values::type_repr::StarlarkTypeRepr; + +/// Type parameter for [`StarlarkCallable`](crate::values::typing::callable::StarlarkCallable) +/// or [`FrozenStarlarkCallable`](crate::values::typing::callable::FrozenStarlarkCallable) +/// describing the expected parameters of the callable. +pub trait StarlarkCallableParamSpec { + /// Get the parameter specification for the callable. + fn params() -> ParamSpec; +} + +/// Indicates that a callable accepts any number of positional and keyword arguments. +pub struct StarlarkCallableParamAny; + +/// `*args` and `**kwargs` parameters. +impl StarlarkCallableParamSpec for StarlarkCallableParamAny { + fn params() -> ParamSpec { + ParamSpec::any() + } +} + +/// No parameters. +impl StarlarkCallableParamSpec for () { + fn params() -> ParamSpec { + ParamSpec::pos_only([], []) + } +} + +/// Single positional-only parameter. +impl StarlarkCallableParamSpec for (A,) { + fn params() -> ParamSpec { + ParamSpec::pos_only([A::starlark_type_repr()], []) + } +} + +/// Two positional-only parameters. +impl StarlarkCallableParamSpec for (A, B) { + fn params() -> ParamSpec { + ParamSpec::pos_only([A::starlark_type_repr(), B::starlark_type_repr()], []) + } +} + +/// Three positional-only parameters. +impl StarlarkCallableParamSpec + for (A, B, C) +{ + fn params() -> ParamSpec { + ParamSpec::pos_only( + [ + A::starlark_type_repr(), + B::starlark_type_repr(), + C::starlark_type_repr(), + ], + [], + ) + } +} + +/// Four positional-only parameters. +impl + StarlarkCallableParamSpec for (A, B, C, D) +{ + fn params() -> ParamSpec { + ParamSpec::pos_only( + [ + A::starlark_type_repr(), + B::starlark_type_repr(), + C::starlark_type_repr(), + D::starlark_type_repr(), + ], + [], + ) + } +} diff --git a/starlark-rust/starlark/src/values/typing/globals.rs b/starlark-rust/starlark/src/values/typing/globals.rs index 99d4466175696..ed80d2e68b1d1 100644 --- a/starlark-rust/starlark/src/values/typing/globals.rs +++ b/starlark-rust/starlark/src/values/typing/globals.rs @@ -24,7 +24,7 @@ use crate::values::typing::type_compiled::globals::register_eval_type; pub(crate) fn register_typing(globals: &mut GlobalsBuilder) { register_eval_type(globals); - globals.struct_("typing", |globals| { + globals.namespace("typing", |globals| { globals.set("Any", TypingAny); globals.set("Never", TypingNever); globals.set("Callable", TypingCallable); diff --git a/starlark-rust/starlark/src/values/typing/iter.rs b/starlark-rust/starlark/src/values/typing/iter.rs index 6527528f1d6c3..7bfdd39569f52 100644 --- a/starlark-rust/starlark/src/values/typing/iter.rs +++ b/starlark-rust/starlark/src/values/typing/iter.rs @@ -25,8 +25,8 @@ use starlark_derive::ProvidesStaticType; use crate as starlark; use crate::typing::Ty; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::type_repr::StarlarkTypeRepr; use crate::values::AllocFrozenValue; @@ -40,6 +40,8 @@ enum NonInstantiable {} pub struct StarlarkIter(PhantomData, NonInstantiable); impl StarlarkTypeRepr for StarlarkIter { + type Canonical = StarlarkIter; + fn starlark_type_repr() -> Ty { Ty::iter(T::starlark_type_repr()) } @@ -52,7 +54,7 @@ impl StarlarkTypeRepr for StarlarkIter { ProvidesStaticType, NoSerialize )] -#[display(fmt = "{}", Self::TYPE)] +#[display("{}", Self::TYPE)] pub(crate) struct TypingIterable; #[starlark_value(type = "typing.Iterable")] @@ -66,8 +68,8 @@ impl<'v> StarlarkValue<'v> for TypingIterable { impl AllocFrozenValue for TypingIterable { fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { - static ANY: AValueRepr> = - alloc_static(Basic, TypingIterable); + static ANY: AValueRepr>> = + alloc_static(TypingIterable); FrozenValue::new_repr(&ANY) } diff --git a/starlark-rust/starlark/src/values/typing/macro_refs.rs b/starlark-rust/starlark/src/values/typing/macro_refs.rs index a4745106cff5f..ba5de9238d80d 100644 --- a/starlark-rust/starlark/src/values/typing/macro_refs.rs +++ b/starlark-rust/starlark/src/values/typing/macro_refs.rs @@ -16,9 +16,6 @@ */ #![doc(hidden)] - -use anyhow::Context; - use crate::values::typing::type_compiled::compiled::TypeCompiled; use crate::values::Heap; use crate::values::StarlarkValue; @@ -35,13 +32,15 @@ pub fn starlark_value_bit_or_for_type<'v, S: StarlarkValue<'v>>( this: &S, other: Value<'v>, heap: &'v Heap, -) -> anyhow::Result> { +) -> crate::Result> { let Some(this) = this.eval_type() else { let mut repr = String::new(); this.collect_repr(&mut repr); - return Err(TypingMacroRefsError::LhsNotType(repr).into()); + return Err(crate::Error::new_other(TypingMacroRefsError::LhsNotType( + repr, + ))); }; let this = TypeCompiled::from_ty(&this, heap); - let other = TypeCompiled::new(other, heap).context("converting RHS to type")?; + let other = TypeCompiled::new(other, heap)?; Ok(TypeCompiled::type_any_of_two(this, other, heap).to_inner()) } diff --git a/starlark-rust/starlark/src/values/typing/mod.rs b/starlark-rust/starlark/src/values/typing/mod.rs deleted file mode 100644 index 0ce4fe32e872c..0000000000000 --- a/starlark-rust/starlark/src/values/typing/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Typechecker-related types. - -pub(crate) mod any; -pub(crate) mod callable; -pub(crate) mod globals; -pub(crate) mod iter; -pub mod macro_refs; -pub(crate) mod never; -pub(crate) mod ty; -pub(crate) mod type_compiled; - -pub use crate::values::types::type_instance_id::TypeInstanceId; -pub use crate::values::typing::callable::StarlarkCallable; -pub use crate::values::typing::iter::StarlarkIter; -pub use crate::values::typing::never::StarlarkNever; -pub use crate::values::typing::type_compiled::compiled::TypeCompiled; -pub use crate::values::typing::type_compiled::matcher::TypeMatcher; -pub use crate::values::typing::type_compiled::type_matcher_factory::TypeMatcherFactory; diff --git a/starlark-rust/starlark/src/values/typing/never.rs b/starlark-rust/starlark/src/values/typing/never.rs index 54ad2fd5c91f0..219de5eb89c0d 100644 --- a/starlark-rust/starlark/src/values/typing/never.rs +++ b/starlark-rust/starlark/src/values/typing/never.rs @@ -39,8 +39,8 @@ use starlark_derive::ProvidesStaticType; use crate as starlark; use crate::typing::Ty; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::starlark_value; use crate::values::type_repr::StarlarkTypeRepr; @@ -57,7 +57,7 @@ use crate::values::StarlarkValue; ProvidesStaticType, NoSerialize )] -#[display(fmt = "{}", Self::TYPE)] +#[display("{}", Self::TYPE)] pub(crate) struct TypingNever; #[starlark_value(type = "typing.Never")] @@ -69,7 +69,8 @@ impl<'v> StarlarkValue<'v> for TypingNever { impl AllocFrozenValue for TypingNever { fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { - static NEVER: AValueRepr> = alloc_static(Basic, TypingNever); + static NEVER: AValueRepr>> = + alloc_static(TypingNever); FrozenValue::new_repr(&NEVER) } @@ -79,6 +80,8 @@ impl AllocFrozenValue for TypingNever { pub enum StarlarkNever {} impl StarlarkTypeRepr for StarlarkNever { + type Canonical = Self; + fn starlark_type_repr() -> Ty { Ty::never() } diff --git a/starlark-rust/starlark/src/values/typing/ty.rs b/starlark-rust/starlark/src/values/typing/ty.rs index 0f0f67a6dad52..aa262c684de44 100644 --- a/starlark-rust/starlark/src/values/typing/ty.rs +++ b/starlark-rust/starlark/src/values/typing/ty.rs @@ -26,6 +26,7 @@ use crate::typing::TyBasic; use crate::values::StarlarkValue; /// Type of type. +#[doc(hidden)] #[derive( Debug, derive_more::Display, @@ -33,7 +34,7 @@ use crate::values::StarlarkValue; ProvidesStaticType, NoSerialize )] -#[display(fmt = "type")] +#[display("type")] pub enum AbstractType {} #[starlark_value(type = "type")] @@ -43,10 +44,9 @@ impl<'v> StarlarkValue<'v> for AbstractType { } fn eval_type(&self) -> Option { - unreachable!( - "This is unreachable, but this function is needed \ - so `TyStarlarkValue` could think this is a type" - ) + // This is unreachable, but this function is needed + // so `TyStarlarkValue` could think this is a type". + match *self {} } } diff --git a/starlark-rust/starlark/src/values/typing/type_compiled/mod.rs b/starlark-rust/starlark/src/values/typing/type_compiled.rs similarity index 100% rename from starlark-rust/starlark/src/values/typing/type_compiled/mod.rs rename to starlark-rust/starlark/src/values/typing/type_compiled.rs diff --git a/starlark-rust/starlark/src/values/typing/type_compiled/alloc.rs b/starlark-rust/starlark/src/values/typing/type_compiled/alloc.rs index 1ce17995a0c1a..1c956eae65dee 100644 --- a/starlark-rust/starlark/src/values/typing/type_compiled/alloc.rs +++ b/starlark-rust/starlark/src/values/typing/type_compiled/alloc.rs @@ -21,7 +21,6 @@ use crate::typing::custom::TyCustom; use crate::typing::starlark_value::TyStarlarkValue; use crate::typing::Ty; use crate::typing::TyBasic; -use crate::typing::TyName; use crate::values::typing::type_compiled::matcher::TypeMatcher; use crate::values::typing::type_compiled::matcher::TypeMatcherBoxAlloc; use crate::values::typing::type_compiled::matchers::IsAny; @@ -35,9 +34,10 @@ use crate::values::typing::type_compiled::matchers::IsInt; use crate::values::typing::type_compiled::matchers::IsIterable; use crate::values::typing::type_compiled::matchers::IsList; use crate::values::typing::type_compiled::matchers::IsListOf; -use crate::values::typing::type_compiled::matchers::IsName; use crate::values::typing::type_compiled::matchers::IsNever; use crate::values::typing::type_compiled::matchers::IsNone; +use crate::values::typing::type_compiled::matchers::IsSet; +use crate::values::typing::type_compiled::matchers::IsSetOf; use crate::values::typing::type_compiled::matchers::IsStr; use crate::values::typing::type_compiled::matchers::IsType; use crate::values::typing::type_compiled::matchers::StarlarkTypeIdMatcher; @@ -134,22 +134,18 @@ pub trait TypeMatcherAlloc: Sized { } } - fn name(self, ty: &TyName) -> Self::Result { - self.alloc(IsName(ty.as_str().to_owned())) - } - fn ty_basic(self, ty: &TyBasic) -> Self::Result { match ty { TyBasic::Any => self.any(), - TyBasic::Name(name) => self.name(name), TyBasic::StarlarkValue(x) => x.matcher(self), TyBasic::List(item) => self.list_of(item), TyBasic::Tuple(tuple) => tuple.matcher(self), TyBasic::Dict(k, v) => self.dict_of(k, v), TyBasic::Iter(_item) => self.alloc(IsIterable), - TyBasic::Callable => self.alloc(IsCallable), + TyBasic::Callable(_c) => self.alloc(IsCallable), TyBasic::Type => self.alloc(IsType), TyBasic::Custom(custom) => self.custom(custom), + TyBasic::Set(item) => self.set_of(item), } } @@ -259,4 +255,47 @@ pub trait TypeMatcherAlloc: Sized { self.dict_of_matcher(k, v) } } + + /// `set`. + fn set(self) -> Self::Result { + self.alloc(IsSet) + } + + /// `set[Item]`. + fn set_of_matcher(self, item: impl TypeMatcher) -> Self::Result { + if item.is_wildcard() { + self.set() + } else { + self.alloc(IsSetOf(item)) + } + } + + /// `set[Item]`. + fn set_of_starlark_value(self, item: TyStarlarkValue) -> Self::Result { + if item.is_str() { + self.set_of_matcher(IsStr) + } else { + self.set_of_matcher(StarlarkTypeIdMatcher::new(item)) + } + } + + /// `set[Item]`. + fn set_of_basic(self, item: &TyBasic) -> Self::Result { + match item { + TyBasic::Any => self.set(), + TyBasic::StarlarkValue(ty) => self.set_of_starlark_value(*ty), + ty => self.set_of_matcher(TypeMatcherBoxAlloc.ty_basic(ty)), + } + } + + fn set_of(self, item: &Ty) -> Self::Result { + if item.is_any() { + self.set() + } else if let [ty] = item.iter_union() { + self.set_of_basic(ty) + } else { + let matcher = TypeMatcherBoxAlloc.ty(item); + self.set_of_matcher(matcher) + } + } } diff --git a/starlark-rust/starlark/src/values/typing/type_compiled/compiled.rs b/starlark-rust/starlark/src/values/typing/type_compiled/compiled.rs index fb19ad73a4e95..f6cb1b55ab8e5 100644 --- a/starlark-rust/starlark/src/values/typing/type_compiled/compiled.rs +++ b/starlark-rust/starlark/src/values/typing/type_compiled/compiled.rs @@ -23,7 +23,6 @@ use std::hash::Hash; use std::hash::Hasher; use allocative::Allocative; -use anyhow::Context; use dupe::Dupe; use starlark_derive::starlark_module; use starlark_derive::starlark_value; @@ -42,8 +41,8 @@ use crate::private::Private; use crate::typing::Ty; use crate::values::dict::DictRef; use crate::values::layout::avalue::alloc_static; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::list::ListRef; use crate::values::none::NoneType; @@ -60,8 +59,10 @@ use crate::values::FrozenValue; use crate::values::Heap; use crate::values::NoSerialize; use crate::values::StarlarkValue; +use crate::values::StringValue; use crate::values::Trace; use crate::values::Value; +use crate::values::ValueLifetimeless; use crate::values::ValueLike; #[derive(Debug, Error)] @@ -82,11 +83,12 @@ enum TypingError { PerhapsYouMeant(String, String), #[error("Value of type `{1}` does not match type `{2}`: {0}")] ValueDoesNotMatchType(String, &'static str, String), + #[error("String literals are not allowed in type expressions: `{0}`")] + StringLiteralNotAllowed(String), } pub(crate) trait TypeCompiledDyn: Debug + Allocative + Send + Sync + 'static { fn as_ty_dyn(&self) -> &Ty; - fn matches_dyn(&self, value: Value) -> bool; fn is_runtime_wildcard_dyn(&self) -> bool; fn to_frozen_dyn(&self, heap: &FrozenHeap) -> TypeCompiled; } @@ -103,9 +105,6 @@ where fn as_ty_dyn(&self) -> &Ty { &self.ty } - fn matches_dyn(&self, value: Value) -> bool { - self.type_compiled_impl.matches(value) - } fn is_runtime_wildcard_dyn(&self) -> bool { self.type_compiled_impl.is_wildcard() } @@ -132,14 +131,14 @@ impl TypeCompiledImplAsStarlarkValue where TypeCompiledImplAsStarlarkValue: StarlarkValue<'static>, { - pub(crate) const fn alloc_static(imp: T, ty: Ty) -> AValueRepr> { - alloc_static( - Basic, - TypeCompiledImplAsStarlarkValue { - type_compiled_impl: imp, - ty, - }, - ) + pub(crate) const fn alloc_static( + imp: T, + ty: Ty, + ) -> AValueRepr>>> { + alloc_static(TypeCompiledImplAsStarlarkValue { + type_compiled_impl: imp, + ty, + }) } } @@ -168,12 +167,12 @@ where demand.provide_ref_static::(self); } - fn write_hash(&self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + fn write_hash(&self, hasher: &mut StarlarkHasher) -> crate::Result<()> { Hash::hash(&self.ty, hasher); Ok(()) } - fn equals(&self, other: Value<'v>) -> anyhow::Result { + fn equals(&self, other: Value<'v>) -> crate::Result { let Some(other) = other.downcast_ref::() else { return Ok(false); }; @@ -236,7 +235,7 @@ fn type_compiled_methods(methods: &mut MethodsBuilder) { ProvidesStaticType )] #[repr(transparent)] -pub struct TypeCompiled( +pub struct TypeCompiled( /// `V` is `TypeCompiledImplAsStarlarkValue`. V, ); @@ -253,7 +252,9 @@ impl<'v, V: ValueLike<'v>> Display for TypeCompiled { } } -impl StarlarkTypeRepr for TypeCompiled { +impl StarlarkTypeRepr for TypeCompiled { + type Canonical = TypeCompiledImplAsStarlarkValue; + fn starlark_type_repr() -> Ty { TypeCompiledImplAsStarlarkValue::::starlark_type_repr() } @@ -274,7 +275,7 @@ impl<'v, V: ValueLike<'v>> TypeCompiled { self.to_value() .0 .request_value::<&dyn TypeCompiledDyn>() - .context("Not TypeCompiledImpl (internal error)") + .ok_or_else(|| anyhow::anyhow!("Not TypeCompiledImpl (internal error)")) } /// Check if given value matches this type. @@ -295,20 +296,21 @@ impl<'v, V: ValueLike<'v>> TypeCompiled { #[cold] #[inline(never)] - fn check_type_error(self, value: Value<'v>, arg_name: Option<&str>) -> anyhow::Result<()> { - Err(TypingError::TypeAnnotationMismatch( - value.to_str(), - value.get_type().to_owned(), - self.to_string(), - match arg_name { - None => "return type".to_owned(), - Some(x) => format!("argument `{}`", x), - }, - ) - .into()) - } - - pub(crate) fn check_type(self, value: Value<'v>, arg_name: Option<&str>) -> anyhow::Result<()> { + fn check_type_error(self, value: Value<'v>, arg_name: Option<&str>) -> crate::Result<()> { + Err(crate::Error::new_other( + TypingError::TypeAnnotationMismatch( + value.to_str(), + value.get_type().to_owned(), + self.to_string(), + match arg_name { + None => "return type".to_owned(), + Some(x) => format!("argument `{}`", x), + }, + ), + )) + } + + pub(crate) fn check_type(self, value: Value<'v>, arg_name: Option<&str>) -> crate::Result<()> { if self.matches(value) { Ok(()) } else { @@ -324,12 +326,12 @@ impl<'v, V: ValueLike<'v>> TypeCompiled { self.0 } - pub(crate) fn write_hash(self, hasher: &mut StarlarkHasher) -> anyhow::Result<()> { + pub(crate) fn write_hash(self, hasher: &mut StarlarkHasher) -> crate::Result<()> { self.to_value().0.write_hash(hasher) } // Dead code, but may become useful in the future. - pub(crate) fn _equals(self, other: Self) -> anyhow::Result { + pub(crate) fn _equals(self, other: Self) -> crate::Result { self.to_value().0.equals(other.to_value().0) } } @@ -348,13 +350,10 @@ impl<'v, V: ValueLike<'v>> Hash for TypeCompiled { impl<'v, V: ValueLike<'v>> PartialEq for TypeCompiled { #[allow(clippy::manual_unwrap_or)] fn eq(&self, other: &Self) -> bool { - match self.0.to_value().equals(other.0.to_value()) { - Ok(b) => b, - Err(_) => { - // Unreachable, but we should not panic in `PartialEq`. - false - } - } + self.0 + .to_value() + .equals(other.0.to_value()) + .unwrap_or_default() } } @@ -392,6 +391,13 @@ impl<'v> TypeCompiled> { TypeCompiledFactory::alloc_ty(&Ty::list(t.as_ty().clone()), heap) } + pub(crate) fn type_set_of( + t: TypeCompiled>, + heap: &'v Heap, + ) -> TypeCompiled> { + TypeCompiledFactory::alloc_ty(&Ty::set(t.as_ty().clone()), heap) + } + pub(crate) fn type_any_of_two( t0: TypeCompiled>, t1: TypeCompiled>, @@ -418,11 +424,6 @@ impl<'v> TypeCompiled> { TypeCompiledFactory::alloc_ty(&ty, heap) } - /// For `p: "xxx"`, parse that `"xxx"` as type. - pub(crate) fn from_str(t: &str, heap: &'v Heap) -> TypeCompiled> { - TypeCompiledFactory::alloc_ty(&Ty::name(t), heap) - } - /// Parse `[t1, t2, ...]` as type. fn from_list(t: &ListRef<'v>, heap: &'v Heap) -> anyhow::Result>> { match t.content() { @@ -441,8 +442,8 @@ impl<'v> TypeCompiled> { /// Evaluate type annotation at runtime. pub fn new(ty: Value<'v>, heap: &'v Heap) -> anyhow::Result { - if let Some(s) = ty.unpack_str() { - Ok(TypeCompiled::from_str(s, heap)) + if let Some(s) = StringValue::new(ty) { + return Err(TypingError::StringLiteralNotAllowed(s.to_string()).into()); } else if ty.is_none() { Ok(TypeCompiledFactory::alloc_ty(&Ty::none(), heap)) } else if let Some(t) = Tuple::from_value(ty) { @@ -475,8 +476,9 @@ impl TypeCompiled { /// `typing.Any`. pub fn any() -> TypeCompiled { - static ANYTHING: AValueRepr>> = - TypeCompiledImplAsStarlarkValue::alloc_static(IsAny, Ty::any()); + static ANYTHING: AValueRepr< + AValueImpl<'static, AValueBasic>>, + > = TypeCompiledImplAsStarlarkValue::alloc_static(IsAny, Ty::any()); TypeCompiled::unchecked_new(FrozenValue::new_repr(&ANYTHING)) } diff --git a/starlark-rust/starlark/src/values/typing/type_compiled/factory.rs b/starlark-rust/starlark/src/values/typing/type_compiled/factory.rs index 84e1c300e54c7..394425ab70781 100644 --- a/starlark-rust/starlark/src/values/typing/type_compiled/factory.rs +++ b/starlark-rust/starlark/src/values/typing/type_compiled/factory.rs @@ -17,8 +17,8 @@ use crate::typing::custom::TyCustom; use crate::typing::Ty; +use crate::values::layout::avalue::AValueBasic; use crate::values::layout::avalue::AValueImpl; -use crate::values::layout::avalue::Basic; use crate::values::layout::heap::repr::AValueRepr; use crate::values::typing::type_compiled::alloc::TypeMatcherAlloc; use crate::values::typing::type_compiled::compiled::TypeCompiled; @@ -65,8 +65,9 @@ impl<'a, 'v> TypeMatcherAlloc for TypeCompiledFactory<'a, 'v> { fn none(self) -> TypeCompiled> { if self.ty == &Ty::none() { - static IS_NONE: AValueRepr>> = - TypeCompiledImplAsStarlarkValue::alloc_static(IsNone, Ty::none()); + static IS_NONE: AValueRepr< + AValueImpl<'static, AValueBasic>>, + > = TypeCompiledImplAsStarlarkValue::alloc_static(IsNone, Ty::none()); TypeCompiled::unchecked_new(FrozenValue::new_repr(&IS_NONE).to_value()) } else { @@ -76,8 +77,9 @@ impl<'a, 'v> TypeMatcherAlloc for TypeCompiledFactory<'a, 'v> { fn bool(self) -> TypeCompiled> { if self.ty == &Ty::bool() { - static IS_BOOL: AValueRepr>> = - TypeCompiledImplAsStarlarkValue::alloc_static(IsBool, Ty::bool()); + static IS_BOOL: AValueRepr< + AValueImpl<'static, AValueBasic>>, + > = TypeCompiledImplAsStarlarkValue::alloc_static(IsBool, Ty::bool()); TypeCompiled::unchecked_new(FrozenValue::new_repr(&IS_BOOL).to_value()) } else { @@ -87,8 +89,9 @@ impl<'a, 'v> TypeMatcherAlloc for TypeCompiledFactory<'a, 'v> { fn int(self) -> TypeCompiled> { if self.ty == &Ty::int() { - static IS_INT: AValueRepr>> = - TypeCompiledImplAsStarlarkValue::alloc_static(IsInt, Ty::int()); + static IS_INT: AValueRepr< + AValueImpl>>, + > = TypeCompiledImplAsStarlarkValue::alloc_static(IsInt, Ty::int()); TypeCompiled::unchecked_new(FrozenValue::new_repr(&IS_INT).to_value()) } else { @@ -99,7 +102,7 @@ impl<'a, 'v> TypeMatcherAlloc for TypeCompiledFactory<'a, 'v> { fn str(self) -> TypeCompiled> { if self.ty == &Ty::string() { static IS_STRING: AValueRepr< - AValueImpl>, + AValueImpl<'static, AValueBasic>>, > = TypeCompiledImplAsStarlarkValue::alloc_static(IsStr, Ty::string()); TypeCompiled::unchecked_new(FrozenValue::new_repr(&IS_STRING).to_value()) diff --git a/starlark-rust/starlark/src/values/typing/type_compiled/globals.rs b/starlark-rust/starlark/src/values/typing/type_compiled/globals.rs index 48934987a8824..c9b114691e8cd 100644 --- a/starlark-rust/starlark/src/values/typing/type_compiled/globals.rs +++ b/starlark-rust/starlark/src/values/typing/type_compiled/globals.rs @@ -19,26 +19,59 @@ use starlark_derive::starlark_module; use crate as starlark; use crate::environment::GlobalsBuilder; +use crate::eval::Evaluator; +use crate::values::typing::ty::AbstractType; use crate::values::typing::type_compiled::compiled::TypeCompiled; -use crate::values::Heap; use crate::values::Value; +use crate::values::ValueOfUnchecked; #[starlark_module] pub(crate) fn register_eval_type(globals: &mut GlobalsBuilder) { /// Create a runtime type object which can be used to check if a value matches the given type. fn eval_type<'v>( - #[starlark(require = pos)] ty: Value<'v>, - heap: &'v Heap, + #[starlark(require = pos)] ty: ValueOfUnchecked<'v, AbstractType>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result>> { - TypeCompiled::new(ty, heap) + TypeCompiled::new(ty.get(), eval.heap()) } /// Check if a value matches the given type. + /// + /// This operation can be very fast or very slow depending on how it is used. + /// + /// `isinstance(x, list)` is very fast, + /// because it is compiled to a special bytecode instruction. + /// + /// `isinstance(x, list[str])` is `O(N)` operation + /// because it checks every element in this list. + /// + /// `L = list; [isinstance(x, L) for x in y]` is slow when `L` is not a constant: + /// `isinstance()` first converts `list` to a type in a loop, which is slow. + /// + /// But last operation can be optimized like this: + /// `L = eval_type(list); [isinstance(x, L) for x in y]`: + /// `eval_type()` converts `list` value into prepared type matcher. fn isinstance<'v>( #[starlark(require = pos)] value: Value<'v>, - #[starlark(require = pos)] ty: Value<'v>, - heap: &'v Heap, + #[starlark(require = pos)] ty: ValueOfUnchecked<'v, AbstractType>, + eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { - Ok(TypeCompiled::new(ty, heap)?.matches(value)) + Ok(TypeCompiled::new(ty.get(), eval.heap())?.matches(value)) + } +} + +#[cfg(test)] +mod tests { + use crate::assert; + + #[test] + fn test_typechecking() { + assert::fail( + r#" +def test(): + isinstance(1, "") +"#, + "Expected type `type` but got `str`", + ); } } diff --git a/starlark-rust/starlark/src/values/typing/type_compiled/matchers.rs b/starlark-rust/starlark/src/values/typing/type_compiled/matchers.rs index 7a8ea57122087..606f27424f527 100644 --- a/starlark-rust/starlark/src/values/typing/type_compiled/matchers.rs +++ b/starlark-rust/starlark/src/values/typing/type_compiled/matchers.rs @@ -26,10 +26,12 @@ use crate::values::dict::value::FrozenDict; use crate::values::dict::DictRef; use crate::values::list::value::FrozenList; use crate::values::list::ListRef; +use crate::values::set::refs::SetRef; +use crate::values::set::value::FrozenSet; use crate::values::starlark_type_id::StarlarkTypeId; use crate::values::starlark_type_id::StarlarkTypeIdAligned; use crate::values::tuple::value::Tuple; -use crate::values::types::int_or_big::StarlarkIntRef; +use crate::values::types::int::int_or_big::StarlarkIntRef; use crate::values::typing::type_compiled::matcher::TypeMatcher; use crate::values::typing::type_compiled::matcher::TypeMatcherBox; use crate::values::UnpackValue; @@ -172,6 +174,27 @@ impl TypeMatcher for IsDictOf { } } +#[derive(Clone, Copy, Dupe, Allocative, Debug)] +pub(crate) struct IsSet; + +impl TypeMatcher for IsSet { + fn matches(&self, value: Value) -> bool { + value.starlark_type_id() == StarlarkTypeId::of::() + } +} + +#[derive(Clone, Allocative, Debug)] +pub(crate) struct IsSetOf(pub(crate) I); + +impl TypeMatcher for IsSetOf { + fn matches(&self, value: Value) -> bool { + match SetRef::unpack_value_opt(value) { + Some(set) => set.aref.iter().all(|v| self.0.matches(v)), + _ => false, + } + } +} + #[derive(Clone, Allocative, Debug)] pub(crate) struct IsAnyOfTwo(pub(crate) A, pub(crate) B); @@ -222,7 +245,7 @@ pub(crate) struct IsInt; impl TypeMatcher for IsInt { fn matches(&self, value: Value) -> bool { - StarlarkIntRef::unpack_value(value).is_some() + StarlarkIntRef::unpack(value).is_some() } } @@ -268,6 +291,6 @@ pub(crate) struct IsName(pub(crate) String); impl TypeMatcher for IsName { fn matches(&self, value: Value) -> bool { - value.get_ref().matches_type(&self.0) + self.0 == value.get_type() } } diff --git a/starlark-rust/starlark/src/values/typing/type_compiled/tests.rs b/starlark-rust/starlark/src/values/typing/type_compiled/tests.rs index 8063d2450de0c..e90fc124f5467 100644 --- a/starlark-rust/starlark/src/values/typing/type_compiled/tests.rs +++ b/starlark-rust/starlark/src/values/typing/type_compiled/tests.rs @@ -98,9 +98,9 @@ def g(): r#" isinstance(1, int) isinstance(True, bool) -isinstance(True, "") +isinstance(True, typing.Any) isinstance(None, None) -isinstance(assert_type, "function") +isinstance(assert_type, typing.Callable) isinstance([], list[int]) isinstance([], list[typing.Any]) isinstance([1, 2, 3], list[int]) @@ -117,8 +117,8 @@ not isinstance([1,2,None], list[int]) not isinstance({"test": 1, 8: 2}, dict[str, int]) not isinstance({"test": 1, "more": None}, dict[str, int]) -isinstance(1, "") -isinstance([1,2,"test"], list["_a"]) +isinstance(1, typing.Any) +isinstance([1,2,"test"], list) "#, ); @@ -178,11 +178,10 @@ fn test_type_compiled_display() { assert_eq!(expected, ty.to_string(), "for `{}`", ty0); } - t("typing.Any", "\"\""); - t("list[typing.Any]", "list"); - t("list[typing.Any]", "list[\"\"]"); + t("typing.Any", "typing.Any"); + t("list", "list"); + t("list", "list[typing.Any]"); t("None", "None"); - t("\"a\" | \"b\"", "[\"a\", \"b\"]"); } #[test] diff --git a/starlark-rust/starlark/src/values/typing/type_type.rs b/starlark-rust/starlark/src/values/typing/type_type.rs new file mode 100644 index 0000000000000..c473d39fafdfc --- /dev/null +++ b/starlark-rust/starlark/src/values/typing/type_type.rs @@ -0,0 +1,79 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::convert::Infallible; + +use crate::typing::Ty; +use crate::typing::TyStarlarkValue; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::typing::ty::AbstractType; +use crate::values::UnpackValue; +use crate::values::Value; + +/// Represent a type of type. (For example, an expression `int` is valid for this type.) +pub struct TypeType(()); + +impl StarlarkTypeRepr for TypeType { + type Canonical = AbstractType; + + fn starlark_type_repr() -> Ty { + ::starlark_type_repr() + } +} + +/// Validate the value is type. +impl<'v> UnpackValue<'v> for TypeType { + type Error = Infallible; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + if TyStarlarkValue::is_type_from_vtable(&value.vtable().starlark_value) { + Ok(Some(TypeType(()))) + } else { + Ok(None) + } + } +} + +#[cfg(test)] +mod tests { + use starlark_derive::starlark_module; + + use crate as starlark; + use crate::assert::Assert; + use crate::environment::GlobalsBuilder; + use crate::values::none::NoneType; + use crate::values::typing::type_type::TypeType; + + #[test] + fn test() { + #[starlark_module] + fn module(globals: &mut GlobalsBuilder) { + fn takes_type(#[starlark(require = pos)] _t: TypeType) -> anyhow::Result { + Ok(NoneType) + } + } + + let mut a = Assert::new(); + a.globals_add(module); + a.pass("takes_type(int)"); + a.pass("takes_type(list[str] | None)"); + a.fail( + "takes_type(1)", + "Type of parameter `_t` doesn't match, expected `type`,", + ); + } +} diff --git a/starlark-rust/starlark/src/values/unpack.rs b/starlark-rust/starlark/src/values/unpack.rs index 165b41ddf0b6c..6f1ac7e94981f 100644 --- a/starlark-rust/starlark/src/values/unpack.rs +++ b/starlark-rust/starlark/src/values/unpack.rs @@ -17,19 +17,76 @@ //! Parameter conversion utilities for `starlark_module` macros. -use std::ops::Deref; +use std::convert::Infallible; +use std::fmt::Debug; -use dupe::Dupe; +use anyhow::Context; use either::Either; +use starlark_syntax::StarlarkResultExt; use crate::typing::Ty; -use crate::values::list::ListRef; use crate::values::type_repr::StarlarkTypeRepr; -use crate::values::types::tuple::value::Tuple; -use crate::values::AllocValue; -use crate::values::Heap; use crate::values::Value; -use crate::values::ValueError; + +/// Error that can be returned by [`UnpackValue`]. +pub trait UnpackValueError: Debug + Send + Sync + 'static { + /// Convert into a crate error. + fn into_error(this: Self) -> crate::Error; +} + +impl UnpackValueError for crate::Error { + #[cold] + fn into_error(this: Self) -> crate::Error { + this + } +} + +impl UnpackValueError for anyhow::Error { + #[cold] + fn into_error(this: Self) -> crate::Error { + crate::Error::new_value(this) + } +} + +impl UnpackValueError for Infallible { + #[cold] + fn into_error(this: Self) -> crate::Error { + match this {} + } +} + +impl UnpackValueError for Either { + #[cold] + fn into_error(this: Self) -> crate::Error { + match this { + Either::Left(a) => UnpackValueError::into_error(a), + Either::Right(b) => UnpackValueError::into_error(b), + } + } +} + +/// Never error. +pub trait UnpackValueErrorInfallible: UnpackValueError { + /// Convert into a never type. + fn into_infallible(this: Self) -> !; +} + +impl UnpackValueErrorInfallible for Infallible { + fn into_infallible(this: Self) -> ! { + match this {} + } +} + +impl UnpackValueErrorInfallible + for Either +{ + fn into_infallible(this: Self) -> ! { + match this { + Either::Left(a) => UnpackValueErrorInfallible::into_infallible(a), + Either::Right(b) => UnpackValueErrorInfallible::into_infallible(b), + } + } +} /// How to convert a [`Value`] to a Rust type. Required for all arguments in /// a [`#[starlark_module]`](macro@crate::starlark_module) definition. @@ -42,7 +99,13 @@ use crate::values::ValueError; /// # use starlark::any::ProvidesStaticType; /// # use starlark::values::{NoSerialize, StarlarkValue, starlark_value}; /// -/// #[derive(Debug, derive_more::Display, Allocative, NoSerialize, ProvidesStaticType)] +/// #[derive( +/// Debug, +/// derive_more::Display, +/// Allocative, +/// NoSerialize, +/// ProvidesStaticType +/// )] /// struct MySimpleValue; /// /// #[starlark_value(type = "MySimpleValue", UnpackValue, StarlarkTypeRepr)] @@ -60,163 +123,160 @@ use crate::values::ValueError; /// struct BoolOrInt(i32); /// /// impl StarlarkTypeRepr for BoolOrInt { +/// type Canonical = as StarlarkTypeRepr>::Canonical; +/// /// fn starlark_type_repr() -> Ty { /// Either::::starlark_type_repr() /// } /// } /// /// impl<'v> UnpackValue<'v> for BoolOrInt { -/// fn unpack_value(value: Value<'v>) -> Option { +/// type Error = starlark::Error; +/// +/// fn unpack_value_impl(value: Value<'v>) -> starlark::Result> { /// if let Some(x) = value.unpack_bool() { -/// Some(BoolOrInt(x as i32)) +/// Ok(Some(BoolOrInt(x as i32))) /// } else { -/// value.unpack_i32().map(BoolOrInt) +/// let Some(x) = i32::unpack_value(value)? else { +/// return Ok(None); +/// }; +/// Ok(Some(BoolOrInt(x))) /// } /// } /// } /// ``` pub trait UnpackValue<'v>: Sized + StarlarkTypeRepr { - /// Description of values acceptable by `unpack_value`, e. g. `list or str`. - fn expected() -> String { - Self::starlark_type_repr().to_string() + /// Error returned when type matches, but conversion fails. + /// + /// Typically [`starlark::Error`](crate::Error), [`anyhow::Error`], or [`Infallible`]. + type Error: UnpackValueError; + + /// Given a [`Value`], try and unpack it into the given type, + /// which may involve some element of conversion. + /// + /// Return `None` if the value is not of expected type (as described by [`StarlarkTypeRepr`], + /// and return `Err` if the value is of expected type, but conversion cannot be performed. + /// For example, when unpacking an integer to `String`, return `None`, + /// and when unpacking a large integer to `i32`, return `Err`. + /// + /// This function is needs to be implemented, but usually not meant to be called directly. + /// Consider using [`unpack_value`](UnpackValue::unpack_value), + /// [`unpack_value_err`](UnpackValue::unpack_value_err), + /// [`unpack_value_opt`](UnpackValue::unpack_value_opt) instead. + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error>; + + /// Given a [`Value`], try and unpack it into the given type, + /// which may involve some element of conversion. + /// + /// Return `None` if the value is not of expected type (as described by [`StarlarkTypeRepr`], + /// and return `Err` if the value is of expected type, but conversion cannot be performed. + /// For example, when unpacking an integer to `String`, return `None`, + /// and when unpacking a large integer to `i32`, return `Err`. + fn unpack_value(value: Value<'v>) -> Result, crate::Error> { + Self::unpack_value_impl(value).map_err(Self::Error::into_error) } - /// Given a [`Value`], try and unpack it into the given type, which may involve some element of conversion. - fn unpack_value(value: Value<'v>) -> Option; + /// Unpack a value if unpacking is infallible. + fn unpack_value_opt(value: Value<'v>) -> Option + where + Self::Error: UnpackValueErrorInfallible, + { + match Self::unpack_value_impl(value) { + Ok(x) => x, + Err(e) => Self::Error::into_infallible(e), + } + } /// Unpack a value, but return error instead of `None` if unpacking fails. + #[inline] fn unpack_value_err(value: Value<'v>) -> anyhow::Result { - #[derive(thiserror::Error, Debug)] - #[error("Expected `{0}`, but got `{1}`")] - struct Error(String, &'static str); + #[cold] + fn error<'v>(value: Value<'v>, ty: fn() -> Ty) -> anyhow::Error { + #[derive(thiserror::Error, Debug)] + #[error("Expected `{0}`, but got `{1}`")] + struct IncorrectType(Ty, String); + + crate::Error::new_value(IncorrectType(ty(), value.to_string_for_type_error())) + .into_anyhow() + } - Self::unpack_value(value).ok_or_else(|| Error(Self::expected(), value.get_type()).into()) + Self::unpack_value(value) + .into_anyhow_result()? + .ok_or_else(|| error(value, Self::starlark_type_repr)) } /// Unpack value, but instead of `None` return error about incorrect argument type. #[inline] fn unpack_param(value: Value<'v>) -> anyhow::Result { #[cold] - fn error<'v, U: UnpackValue<'v>>(value: Value<'v>) -> anyhow::Error { - ValueError::IncorrectParameterTypeWithExpected( - U::expected(), - value.get_type().to_owned(), - ) - .into() + fn error<'v>(value: Value<'v>, ty: fn() -> Ty) -> anyhow::Error { + #[derive(thiserror::Error, Debug)] + #[error("Type of parameters mismatch, expected `{0}`, actual `{1}`")] + struct IncorrectParameterTypeWithExpected(Ty, String); + + crate::Error::new_value(IncorrectParameterTypeWithExpected( + ty(), + value.to_string_for_type_error(), + )) + .into_anyhow() } - Self::unpack_value(value).ok_or_else(|| error::(value)) + Self::unpack_value(value) + .into_anyhow_result()? + .ok_or_else(|| error(value, Self::starlark_type_repr)) } /// Unpack value, but instead of `None` return error about incorrect named argument type. #[inline] fn unpack_named_param(value: Value<'v>, param_name: &str) -> anyhow::Result { #[cold] - fn error<'v, U: UnpackValue<'v>>(value: Value<'v>, param_name: &str) -> anyhow::Error { - ValueError::IncorrectParameterTypeNamedWithExpected( + fn error<'v>(value: Value<'v>, param_name: &str, ty: fn() -> Ty) -> anyhow::Error { + #[derive(thiserror::Error, Debug)] + #[error("Type of parameter `{0}` doesn't match, expected `{1}`, actual `{2}`")] + struct IncorrectParameterTypeNamedWithExpected(String, Ty, String); + + crate::Error::new_value(IncorrectParameterTypeNamedWithExpected( param_name.to_owned(), - U::expected(), - value.get_type().to_owned(), - ) - .into() + ty(), + value.to_string_for_type_error(), + )) + .into_anyhow() } - Self::unpack_value(value).ok_or_else(|| error::(value, param_name)) + Self::unpack_value(value) + .into_anyhow_result() + .with_context(|| { + format!( + "Error unpacking value for parameter `{}` of type `{}", + param_name, + Self::starlark_type_repr() + ) + })? + .ok_or_else(|| error(value, param_name, Self::starlark_type_repr)) } } impl<'v> UnpackValue<'v> for Value<'v> { - fn expected() -> String { - "Value".to_owned() - } - - fn unpack_value(value: Value<'v>) -> Option { - Some(value) - } -} - -/// A wrapper that keeps the original value on the heap for use elsewhere, -/// and also, when unpacked, unpacks the value to validate it is of -/// the correct type. Has an [`UnpackValue`] instance, so often used as -/// an argument to [`#[starlark_module]`](macro@crate::starlark_module) defined -/// functions. -/// -/// Two container specializations of this are [`ListOf`](crate::values::list::ListOf) -/// and [`DictOf`](crate::values::dict::DictOf), which -/// validate the types of their containers on unpack, but do not store the -/// resulting Vec/Map -#[derive(Debug, Copy, Clone, Dupe)] -pub struct ValueOf<'v, T: UnpackValue<'v>> { - /// The original [`Value`] on the same heap. - pub value: Value<'v>, - /// The value that was unpacked. - pub typed: T, -} - -impl<'v, T: UnpackValue<'v>> Deref for ValueOf<'v, T> { - type Target = Value<'v>; - - fn deref(&self) -> &Self::Target { - &self.value - } -} + type Error = Infallible; -impl<'v, T: UnpackValue<'v>> StarlarkTypeRepr for ValueOf<'v, T> { - fn starlark_type_repr() -> Ty { - T::starlark_type_repr() - } -} - -impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for ValueOf<'v, T> { - fn expected() -> String { - T::expected() - } - - fn unpack_value(value: Value<'v>) -> Option { - let typed = T::unpack_value(value)?; - Some(Self { value, typed }) - } -} - -impl<'v, T: UnpackValue<'v>> AllocValue<'v> for ValueOf<'v, T> { - fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { - self.value + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(Some(value)) } } impl<'v, TLeft: UnpackValue<'v>, TRight: UnpackValue<'v>> UnpackValue<'v> for Either { - fn expected() -> String { - format!("either {} or {}", TLeft::expected(), TRight::expected()) - } + type Error = Either; // Only implemented for types that implement [`UnpackValue`]. Nonsensical for other types. - fn unpack_value(value: Value<'v>) -> Option { - if let Some(left) = TLeft::unpack_value(value) { - Some(Self::Left(left)) - } else { - TRight::unpack_value(value).map(Self::Right) - } - } -} - -impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for Vec { - fn expected() -> String { - format!("list or tuple of {}", T::expected()) - } - - fn unpack_value(value: Value<'v>) -> Option { - if let Some(o) = ListRef::from_value(value) { - o.iter().map(T::unpack_value).collect::>>() - } else if let Some(o) = Tuple::from_value(value) { - // TODO(nga): `StarlarkTypeRepr` for `Vec` says is `list`, - // but here we also accept `tuple`. - // So native function declaring it accepts `Vec`, - // does not accept tuple at compile time, but accepts it at runtime. - o.iter().map(T::unpack_value).collect::>>() + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + if let Some(left) = TLeft::unpack_value_impl(value).map_err(Either::Left)? { + Ok(Some(Self::Left(left))) } else { - None + Ok(TRight::unpack_value_impl(value) + .map_err(Either::Right)? + .map(Self::Right)) } } } diff --git a/starlark-rust/starlark/src/values/unpack_and_discard.rs b/starlark-rust/starlark/src/values/unpack_and_discard.rs new file mode 100644 index 0000000000000..b670182879cbe --- /dev/null +++ b/starlark-rust/starlark/src/values/unpack_and_discard.rs @@ -0,0 +1,54 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::marker::PhantomData; + +use allocative::Allocative; +use dupe::Clone_; +use dupe::Copy_; +use dupe::Dupe_; + +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::UnpackValue; +use crate::values::Value; + +/// Unpack the value of type `T`, but do not store result. +/// +/// This can be used when type needs to be checked, but the unpacked value is not needed. +#[derive(Clone_, Copy_, Dupe_, Allocative)] +#[allocative(bound = "")] +pub struct UnpackAndDiscard(PhantomData T>); + +impl StarlarkTypeRepr for UnpackAndDiscard { + type Canonical = T::Canonical; + + fn starlark_type_repr() -> crate::typing::Ty { + ::starlark_type_repr() + } +} + +impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for UnpackAndDiscard { + type Error = T::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + match T::unpack_value_impl(value) { + Ok(None) => Ok(None), + Ok(Some(_)) => Ok(Some(UnpackAndDiscard(PhantomData))), + Err(e) => Err(e), + } + } +} diff --git a/starlark-rust/starlark/src/values/value_of.rs b/starlark-rust/starlark/src/values/value_of.rs new file mode 100644 index 0000000000000..cec618dd056e8 --- /dev/null +++ b/starlark-rust/starlark/src/values/value_of.rs @@ -0,0 +1,81 @@ +/* + * Copyright 2018 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::ops::Deref; + +use dupe::Dupe; + +use crate::typing::Ty; +use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::AllocValue; +use crate::values::Heap; +use crate::values::UnpackValue; +use crate::values::Value; +use crate::values::ValueOfUnchecked; + +/// A wrapper that keeps the original value on the heap for use elsewhere, +/// and also, when unpacked, unpacks the value to validate it is of +/// the correct type. Has an [`UnpackValue`] instance, so often used as +/// an argument to [`#[starlark_module]`](macro@crate::starlark_module) defined +/// functions. +#[derive(Debug, Copy, Clone, Dupe)] +pub struct ValueOf<'v, T: UnpackValue<'v>> { + /// The original [`Value`] on the same heap. + pub value: Value<'v>, + /// The value that was unpacked. + pub typed: T, +} + +impl<'v, T: UnpackValue<'v>> ValueOf<'v, T> { + /// Convert to `ValueOfUnchecked`. + pub fn as_unchecked(&self) -> ValueOfUnchecked<'v, T> { + ValueOfUnchecked::new(self.value) + } +} + +impl<'v, T: UnpackValue<'v>> Deref for ValueOf<'v, T> { + type Target = Value<'v>; + + fn deref(&self) -> &Self::Target { + &self.value + } +} + +impl<'v, T: UnpackValue<'v>> StarlarkTypeRepr for ValueOf<'v, T> { + type Canonical = T::Canonical; + + fn starlark_type_repr() -> Ty { + T::starlark_type_repr() + } +} + +impl<'v, T: UnpackValue<'v>> UnpackValue<'v> for ValueOf<'v, T> { + type Error = T::Error; + + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + let Some(typed) = T::unpack_value_impl(value)? else { + return Ok(None); + }; + Ok(Some(Self { value, typed })) + } +} + +impl<'v, T: UnpackValue<'v>> AllocValue<'v> for ValueOf<'v, T> { + fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + self.value + } +} diff --git a/starlark-rust/starlark/src/values/value_of_unchecked.rs b/starlark-rust/starlark/src/values/value_of_unchecked.rs index 98422f312b548..ed03db5aab643 100644 --- a/starlark-rust/starlark/src/values/value_of_unchecked.rs +++ b/starlark-rust/starlark/src/values/value_of_unchecked.rs @@ -15,7 +15,10 @@ * limitations under the License. */ +use std::convert::Infallible; +use std::fmt; use std::fmt::Debug; +use std::fmt::Display; use std::fmt::Formatter; use std::marker::PhantomData; @@ -24,81 +27,206 @@ use dupe::Clone_; use dupe::Copy_; use dupe::Dupe_; +use crate::coerce::Coerce; use crate::typing::Ty; use crate::values::type_repr::StarlarkTypeRepr; +use crate::values::AllocFrozenValue; use crate::values::AllocValue; +use crate::values::Freeze; +use crate::values::Freezer; +use crate::values::FrozenHeap; +use crate::values::FrozenValue; use crate::values::Heap; use crate::values::Trace; use crate::values::Tracer; use crate::values::UnpackValue; use crate::values::Value; +use crate::values::ValueLifetimeless; +use crate::values::ValueLike; -/// Starlark value with type annotation. -/// -/// Can be used in function signatures to provide types to the type checker. -/// -/// Note this type does not actually check the type of the value. -/// Providing incorrect type annotation will result -/// in incorrect error reporting by the type checker. +/// Store value annotated with type, but do not check the type. #[derive(Clone_, Copy_, Dupe_, Allocative)] #[allocative(bound = "")] -pub struct ValueOfUnchecked<'v, T: StarlarkTypeRepr>(Value<'v>, PhantomData); +pub struct ValueOfUncheckedGeneric( + V, + PhantomData T>, +); -impl<'v, T: StarlarkTypeRepr> Debug for ValueOfUnchecked<'v, T> { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("ValueOfUnchecked").field(&self.0).finish() - } +unsafe impl Coerce> for ValueOfUncheckedGeneric +where + V: ValueLifetimeless, + U: ValueLifetimeless, + U: Coerce, + T: StarlarkTypeRepr, +{ } -impl<'v, T: StarlarkTypeRepr> ValueOfUnchecked<'v, T> { +impl ValueOfUncheckedGeneric { /// New. #[inline] - pub fn new(value: Value<'v>) -> Self { + pub fn new(value: V) -> Self { Self(value, PhantomData) } - /// Construct after checking the type. + /// Cast to a different Rust type for the same Starlark type. #[inline] - pub fn new_checked(value: Value<'v>) -> anyhow::Result - where - T: UnpackValue<'v>, - { - T::unpack_value_err(value)?; - Ok(Self::new(value)) + pub fn cast>( + self, + ) -> ValueOfUncheckedGeneric { + ValueOfUncheckedGeneric::new(self.0) } /// Get the value. #[inline] - pub fn get(self) -> Value<'v> { + pub fn get(self) -> V { self.0 } + + /// Unpack the value. + pub fn unpack<'v>(self) -> crate::Result + where + V: ValueLike<'v>, + T: UnpackValue<'v>, + { + Ok(T::unpack_value_err(self.get().to_value())?) + } +} + +impl Debug for ValueOfUncheckedGeneric { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("ValueOfUnchecked") + .field(&self.get()) + .finish() + } } -impl<'v, T: StarlarkTypeRepr> StarlarkTypeRepr for ValueOfUnchecked<'v, T> { +impl Display for ValueOfUncheckedGeneric { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.get(), f) + } +} + +impl StarlarkTypeRepr for ValueOfUncheckedGeneric { + type Canonical = T::Canonical; + fn starlark_type_repr() -> Ty { - T::starlark_type_repr() + ::Canonical::starlark_type_repr() } } -impl<'v, T: StarlarkTypeRepr> AllocValue<'v> for ValueOfUnchecked<'v, T> { - #[inline] +impl<'v, V: ValueLike<'v>, T: StarlarkTypeRepr> AllocValue<'v> for ValueOfUncheckedGeneric { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { + self.0.to_value() + } +} + +impl AllocFrozenValue for ValueOfUncheckedGeneric { + fn alloc_frozen_value(self, _heap: &FrozenHeap) -> FrozenValue { self.0 } } +unsafe impl<'v, V, T> Trace<'v> for ValueOfUncheckedGeneric +where + // This is essentially `V: ValueLike<'v>`, + // but for derive it is convenient to have these bounds. + V: ValueLifetimeless + Trace<'v>, + T: StarlarkTypeRepr, +{ + fn trace(&mut self, tracer: &Tracer<'v>) { + self.0.trace(tracer) + } +} + +impl Freeze for ValueOfUncheckedGeneric { + type Frozen = ValueOfUncheckedGeneric; + + fn freeze(self, freezer: &Freezer) -> anyhow::Result { + let frozen = self.0.freeze(freezer)?; + Ok(ValueOfUncheckedGeneric::new(frozen)) + } +} + +/// Starlark value with type annotation. +/// +/// Can be used in function signatures to provide types to the type checker. +/// +/// Note this type does not actually check the type of the value. +/// Providing incorrect type annotation will result +/// in incorrect error reporting by the type checker. +pub type ValueOfUnchecked<'v, T> = ValueOfUncheckedGeneric, T>; + +/// Frozen starlark value with type annotation. +pub type FrozenValueOfUnchecked<'f, T> = ValueOfUncheckedGeneric; + +impl<'v, T: StarlarkTypeRepr> ValueOfUnchecked<'v, T> { + /// Construct after checking the type. + #[inline] + pub fn new_checked(value: Value<'v>) -> anyhow::Result + where + T: UnpackValue<'v>, + { + T::unpack_value_err(value)?; + Ok(Self::new(value)) + } +} + +impl<'v, V: ValueLike<'v>, T: StarlarkTypeRepr> ValueOfUncheckedGeneric { + /// Convert to a value. + #[inline] + pub fn to_value(self) -> ValueOfUnchecked<'v, T> { + ValueOfUnchecked::new(self.0.to_value()) + } +} + impl<'v, T: StarlarkTypeRepr> UnpackValue<'v> for ValueOfUnchecked<'v, T> { + type Error = Infallible; + #[inline] - fn unpack_value(value: Value<'v>) -> Option { - Some(Self::new(value)) + fn unpack_value_impl(value: Value<'v>) -> Result, Self::Error> { + Ok(Some(Self::new(value))) } } -unsafe impl<'v, T: StarlarkTypeRepr> Trace<'v> for ValueOfUnchecked<'v, T> { - fn trace(&mut self, tracer: &Tracer<'v>) { - // TODO(nga): should derive, but to do that we need to implement `#[trace(bound = "")]` - let ValueOfUnchecked(value, phantom) = self; - value.trace(tracer); - phantom.trace(tracer); +#[cfg(test)] +mod tests { + use std::rc::Rc; + + use crate::const_frozen_string; + use crate::typing::Ty; + use crate::values::type_repr::StarlarkTypeRepr; + use crate::values::FrozenValueOfUnchecked; + use crate::values::ValueOfUnchecked; + + #[test] + fn test_cast_example() { + let a = + ValueOfUnchecked::::new_checked(const_frozen_string!("a").to_value()).unwrap(); + let _b: ValueOfUnchecked<&str> = a.cast(); + } + + #[test] + fn test_frozen_value_of_unchecked_send_sync() { + fn assert_send_sync() {} + + #[allow(dead_code)] + struct ReprNotSendSync(Rc); + impl StarlarkTypeRepr for ReprNotSendSync { + type Canonical = Self; + fn starlark_type_repr() -> Ty { + panic!("not needed in test") + } + } + + assert_send_sync::>(); + } + + #[test] + fn test_frozen_value_of_unchecked_covariant() { + fn _assert_covariant<'a>( + _value: FrozenValueOfUnchecked<'static, String>, + ) -> FrozenValueOfUnchecked<'a, String> { + panic!() + } } } diff --git a/starlark-rust/starlark/testcases/eval/go/README.md b/starlark-rust/starlark/testcases/eval/go/README.md index 4eab164d768c5..f078fc4f3e227 100644 --- a/starlark-rust/starlark/testcases/eval/go/README.md +++ b/starlark-rust/starlark/testcases/eval/go/README.md @@ -1,5 +1,7 @@ # Go evaluation test cases -The Go Starlark project maintains a set of test cases, which were mirrored here. The original source -is https://github.com/google/starlark-go/blob/e81fc95f7bd5bb1495fe69f27c1a99fcc77caa48/starlark/testdata/. -Note that some files were not copied, because they are unsuitable tests for Starlark, as described in the `test_go` function. +The Go Starlark project maintains a set of test cases, which were mirrored here. +The original source is +https://github.com/google/starlark-go/blob/e81fc95f7bd5bb1495fe69f27c1a99fcc77caa48/starlark/testdata/. +Note that some files were not copied, because they are unsuitable tests for +Starlark, as described in the `test_go` function. diff --git a/starlark-rust/starlark/testcases/eval/go/function.star b/starlark-rust/starlark/testcases/eval/go/function.star index 84758574a6d2e..64074c953be88 100644 --- a/starlark-rust/starlark/testcases/eval/go/function.star +++ b/starlark-rust/starlark/testcases/eval/go/function.star @@ -42,7 +42,7 @@ asserts.eq(calls, ["yang", "yin"]) # builtin_function_or_method use identity equivalence. closures = set(["".count for _ in range(10)]) -asserts.eq(len(closures), 10) +# asserts.eq(len(closures), 10) --- # Default values of function parameters are mutable. diff --git a/starlark-rust/starlark/testcases/eval/go/set.star b/starlark-rust/starlark/testcases/eval/go/set.star new file mode 100644 index 0000000000000..d10f149d8fd08 --- /dev/null +++ b/starlark-rust/starlark/testcases/eval/go/set.star @@ -0,0 +1,199 @@ +# @generated +# Copied from https://github.com/google/starlark-go/blob/70002002b310c12a44e8389d18cfb34529b67ef4/starlark/testdata/set.star +# Tests of Starlark 'set' +# option:set option:globalreassign + +# Sets are not a standard part of Starlark, so the features +# tested in this file must be enabled in the application by setting +# resolve.AllowSet. (All sets are created by calls to the 'set' +# built-in or derived from operations on existing sets.) +# The semantics are subject to change as the spec evolves. + +# TODO(adonovan): support set mutation: +# - del set[k] +# - set.update +# - set += iterable, perhaps? +# Test iterator invalidation. + +load("asserts.star", "asserts", "freeze") + +# literals +# Parser does not currently support {1, 2, 3}. +# TODO(adonovan): add test to syntax/testdata/errors.star. + +# set comprehensions +# Parser does not currently support {x for x in y}. +# See syntax/testdata/errors.star. + +# set constructor +asserts.eq(type(set()), "set") +asserts.eq(list(set()), []) +asserts.eq(type(set([1, 3, 2, 3])), "set") +asserts.eq(list(set([1, 3, 2, 3])), [1, 3, 2]) +asserts.eq(type(set("hello".elems())), "set") +asserts.eq(list(set("hello".elems())), ["h", "e", "l", "o"]) +asserts.eq(list(set(range(3))), [0, 1, 2]) +asserts.fails(lambda : set(1), "got int, want iterable") +asserts.fails(lambda : set(1, 2, 3), "got 3 arguments") +asserts.fails(lambda : set([1, 2, {}]), "unhashable type: dict") + +# truth +asserts.true(not set()) +asserts.true(set([False])) +asserts.true(set([1, 2, 3])) + +x = set([1, 2, 3]) +y = set([3, 4, 5]) + +# set + any is not defined +asserts.fails(lambda : x + y, "unknown.*: set \\+ set") + +# set | set +asserts.eq(list(set("a".elems()) | set("b".elems())), ["a", "b"]) +asserts.eq(list(set("ab".elems()) | set("bc".elems())), ["a", "b", "c"]) +asserts.fails(lambda : set() | [], "unknown binary op: set | list") +asserts.eq(type(x | y), "set") +asserts.eq(list(x | y), [1, 2, 3, 4, 5]) +asserts.eq(list(x | set([5, 1])), [1, 2, 3, 5]) +asserts.eq(list(x | set((6, 5, 4))), [1, 2, 3, 6, 5, 4]) + +# set.union (allows any iterable for right operand) +asserts.eq(list(set("a".elems()).union("b".elems())), ["a", "b"]) +asserts.eq(list(set("ab".elems()).union("bc".elems())), ["a", "b", "c"]) +asserts.eq(set().union([]), set()) +asserts.eq(type(x.union(y)), "set") +asserts.eq(list(x.union(y)), [1, 2, 3, 4, 5]) +asserts.eq(list(x.union([5, 1])), [1, 2, 3, 5]) +asserts.eq(list(x.union((6, 5, 4))), [1, 2, 3, 6, 5, 4]) +asserts.fails(lambda : x.union([1, 2, {}]), "unhashable type: dict") + +# intersection, set & set or set.intersection(iterable) +asserts.eq(list(set("a".elems()) & set("b".elems())), []) +asserts.eq(list(set("ab".elems()) & set("bc".elems())), ["b"]) +asserts.eq(list(set("a".elems()).intersection("b".elems())), []) +asserts.eq(list(set("ab".elems()).intersection("bc".elems())), ["b"]) + +# symmetric difference, set ^ set or set.symmetric_difference(iterable) +asserts.eq(set([1, 2, 3]) ^ set([4, 5, 3]), set([1, 2, 4, 5])) +asserts.eq(set([1,2,3,4]).symmetric_difference([3,4,5,6]), set([1,2,5,6])) +asserts.eq(set([1,2,3,4]).symmetric_difference(set([])), set([1,2,3,4])) + +def test_set_augmented_assign(): + x = set([1, 2, 3]) + x &= set([2, 3]) + asserts.eq(x, set([2, 3])) + x |= set([1]) + asserts.eq(x, set([1, 2, 3])) + x ^= set([4, 5, 3]) + asserts.eq(x, set([1, 2, 4, 5])) + +test_set_augmented_assign() + +# len +asserts.eq(len(x), 3) +asserts.eq(len(y), 3) +asserts.eq(len(x | y), 5) + +# str +asserts.eq(str(set([1])), "set([1])") +asserts.eq(str(set([2, 3])), "set([2, 3])") +asserts.eq(str(set([3, 2])), "set([3, 2])") + +# comparison +asserts.eq(x, x) +asserts.eq(y, y) +asserts.true(x != y) +asserts.eq(set([1, 2, 3]), set([3, 2, 1])) + +# iteration +asserts.eq(type([elem for elem in x]), "list") +asserts.eq(list([elem for elem in x]), [1, 2, 3]) + +def iter(): + list = [] + for elem in x: + list.append(elem) + return list + +asserts.eq(iter(), [1, 2, 3]) + +# sets are not indexable +asserts.fails(lambda : x[0], "unhandled.*operation") + +# adding and removing +add_set = set([1,2,3]) +add_set.add(4) +asserts.true(4 in add_set) +freeze(add_set) # no mutation of frozen set because key already present +add_set.add(4) +asserts.fails(lambda: add_set.add(5), "add: cannot insert into frozen hash table") + +# remove +remove_set = set([1,2,3]) +remove_set.remove(3) +asserts.true(3 not in remove_set) +asserts.fails(lambda: remove_set.remove(3), "remove: missing key") +freeze(remove_set) +asserts.fails(lambda: remove_set.remove(3), "remove: cannot delete from frozen hash table") + +# discard +discard_set = set([1,2,3]) +discard_set.discard(3) +asserts.true(3 not in discard_set) +asserts.eq(discard_set.discard(3), None) +freeze(discard_set) +asserts.eq(discard_set.discard(3), None) # no mutation of frozen set because key doesn't exist +asserts.fails(lambda: discard_set.discard(1), "discard: cannot delete from frozen hash table") + +# pop +pop_set = set([1,2,3]) +# asserts.eq(pop_set.pop(), 1) +# asserts.eq(pop_set.pop(), 2) +# asserts.eq(pop_set.pop(), 3) +# asserts.fails(lambda: pop_set.pop(), "pop: empty set") +# pop_set.add(1) +# pop_set.add(2) +# freeze(pop_set) +# asserts.fails(lambda: pop_set.pop(), "pop: cannot delete from frozen hash table") + +# clear +clear_set = set([1,2,3]) +clear_set.clear() +asserts.eq(len(clear_set), 0) +freeze(clear_set) # no mutation of frozen set because its already empty +asserts.eq(clear_set.clear(), None) + +other_clear_set = set([1,2,3]) +freeze(other_clear_set) +asserts.fails(lambda: other_clear_set.clear(), "clear: cannot clear frozen hash table") + +# difference: set - set or set.difference(iterable) +asserts.eq(set([1,2,3,4]).difference([1,2,3,4]), set([])) +asserts.eq(set([1,2,3,4]).difference([1,2]), set([3,4])) +asserts.eq(set([1,2,3,4]).difference([]), set([1,2,3,4])) +asserts.eq(set([1,2,3,4]).difference(set([1,2,3])), set([4])) + +asserts.eq(set([1,2,3,4]) - set([1,2,3,4]), set()) +asserts.eq(set([1,2,3,4]) - set([1,2]), set([3,4])) + +# issuperset: set >= set or set.issuperset(iterable) +asserts.true(set([1,2,3]).issuperset([1,2])) +asserts.true(not set([1,2,3]).issuperset(set([1,2,4]))) +# asserts.true(set([1,2,3]) >= set([1,2,3])) +# asserts.true(set([1,2,3]) >= set([1,2])) +# asserts.true(not set([1,2,3]) >= set([1,2,4])) + +# # proper superset: set > set +# asserts.true(set([1, 2, 3]) > set([1, 2])) +# asserts.true(not set([1,2, 3]) > set([1, 2, 3])) + +# issubset: set <= set or set.issubset(iterable) +asserts.true(set([1,2]).issubset([1,2,3])) +asserts.true(not set([1,2,3]).issubset(set([1,2,4]))) +# asserts.true(set([1,2,3]) <= set([1,2,3])) +# asserts.true(set([1,2]) <= set([1,2,3])) +# asserts.true(not set([1,2,3]) <= set([1,2,4])) + +# # proper subset: set < set +# asserts.true(set([1,2]) < set([1,2,3])) +# asserts.true(not set([1,2,3]) < set([1,2,3])) diff --git a/starlark-rust/starlark_bin/BUCK b/starlark-rust/starlark_bin/BUCK index 2fd120c1dc0fe..8e0aec81439b2 100644 --- a/starlark-rust/starlark_bin/BUCK +++ b/starlark-rust/starlark_bin/BUCK @@ -8,12 +8,14 @@ buck_rust_binary( ["bin/**/*.rs"], ), crate_root = "bin/main.rs", + env = {"CARGO_PKG_VERSION": "0.0"}, # So our OSS builds can support --version deps = [ "fbsource//third-party/rust:anyhow", "fbsource//third-party/rust:argfile", "fbsource//third-party/rust:clap", "fbsource//third-party/rust:debugserver-types", "fbsource//third-party/rust:either", + "fbsource//third-party/rust:globset", "fbsource//third-party/rust:itertools", "fbsource//third-party/rust:lsp-types", "fbsource//third-party/rust:serde", diff --git a/starlark-rust/starlark_bin/Cargo.toml b/starlark-rust/starlark_bin/Cargo.toml index a5299c72bdb03..753c00bdda2c1 100644 --- a/starlark-rust/starlark_bin/Cargo.toml +++ b/starlark-rust/starlark_bin/Cargo.toml @@ -1,31 +1,32 @@ [package] -name = "starlark_bin" -edition = "2021" -version = "0.9.0" -license = "Apache-2.0" -description = "starlark binary" -documentation = "https://docs.rs/starlark" -repository = "https://github.com/facebookexperimental/starlark-rust" authors = [ "Damien Martin-Guillerez ", "Facebook", ] -keywords = ["starlark", "skylark", "language", "interpreter"] categories = ["parser-implementations", "development-tools"] +description = "starlark binary" +documentation = "https://docs.rs/starlark" +edition = "2021" +keywords = ["starlark", "skylark", "language", "interpreter"] +license = "Apache-2.0" +name = "starlark_bin" +repository = "https://github.com/facebook/starlark-rust" +version = "0.12.0" [dependencies] dupe = { workspace = true } -starlark = { version = "0.9.0", path = "../starlark" } -starlark_lsp = { version = "0.9.0", path = "../starlark_lsp" } -starlark_map = { version = "0.9.0", path = "../starlark_map" } +starlark = { version = "0.12.0", path = "../starlark" } +starlark_lsp = { version = "0.12.0", path = "../starlark_lsp" } +starlark_map = { version = "0.12.0", path = "../starlark_map" } anyhow = "1.0.65" argfile = "0.1.0" clap = { version = "4.0.7", features = ["derive", "wrap_help"] } debugserver-types = "0.5.0" either = "1.8" -itertools = "0.10" +globset = "0.4.13" +itertools = "0.13.0" lsp-types = "0.94.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/starlark-rust/starlark_bin/bin/bazel.rs b/starlark-rust/starlark_bin/bin/bazel.rs index 1c7af86411f81..2077de69a39ae 100644 --- a/starlark-rust/starlark_bin/bin/bazel.rs +++ b/starlark-rust/starlark_bin/bin/bazel.rs @@ -23,6 +23,8 @@ //! interface develops. After the API of the `LspContext` trait stabilizes, this //! module will be removed, and extracted to its own project. +mod label; + use std::borrow::Cow; use std::collections::HashMap; use std::collections::HashSet; @@ -38,16 +40,15 @@ use lsp_types::CompletionItemKind; use lsp_types::Url; use starlark::analysis::find_call_name::AstModuleFindCallName; use starlark::analysis::AstModuleLint; -use starlark::docs::get_registered_starlark_docs; -use starlark::docs::render_docs_as_code; -use starlark::docs::Doc; -use starlark::docs::DocItem; use starlark::docs::DocModule; use starlark::environment::FrozenModule; +use starlark::environment::Globals; use starlark::environment::Module; use starlark::errors::EvalMessage; use starlark::eval::Evaluator; use starlark::syntax::AstModule; +use starlark::syntax::Dialect; +use starlark::StarlarkResultExt; use starlark_lsp::completion::StringCompletionResult; use starlark_lsp::completion::StringCompletionType; use starlark_lsp::error::eval_message_to_lsp_diagnostic; @@ -56,8 +57,7 @@ use starlark_lsp::server::LspEvalResult; use starlark_lsp::server::LspUrl; use starlark_lsp::server::StringLiteralResult; -use crate::eval::dialect; -use crate::eval::globals; +use self::label::Label; use crate::eval::ContextMode; use crate::eval::EvalResult; @@ -76,21 +76,18 @@ enum ContextError { enum ResolveLoadError { /// Attempted to resolve a relative path, but no current_file_path was provided, /// so it is not known what to resolve the path against. - #[error("Relative path `{}` provided, but current_file_path could not be determined", .0)] - MissingCurrentFilePath(String), + #[error("Relative label `{}` provided, but current_file_path could not be determined", .0)] + MissingCurrentFilePath(Label), /// The scheme provided was not correct or supported. #[error("Url `{}` was expected to be of type `{}`", .1, .0)] WrongScheme(String, LspUrl), /// Received a load for an absolute path from the root of the workspace, but the /// path to the workspace root was not provided. - #[error("Path `//{}` is absolute from the root of the workspace, but no workspace root was provided", .0)] - MissingWorkspaceRoot(String), - /// Unable to parse the given path. - #[error("Unable to parse the load path `{}`", .0)] - CannotParsePath(String), + #[error("Label `{}` is absolute from the root of the workspace, but no workspace root was provided", .0)] + MissingWorkspaceRoot(Label), /// The path contained a repository name that is not known to Bazel. - #[error("Cannot resolve path `{}` because the repository `{}` is unknown", .0, .1)] - UnknownRepository(String, String), + #[error("Cannot resolve label `{}` because the repository `{}` is unknown", .0, .1)] + UnknownRepository(Label, String), /// The path contained a target name that does not resolve to an existing file. #[error("Cannot resolve path `{}` because the file does not exist", .0)] TargetNotFound(String), @@ -139,13 +136,22 @@ pub(crate) fn main( print_non_none: bool, is_interactive: bool, prelude: &[PathBuf], + dialect: Dialect, + globals: Globals, ) -> anyhow::Result<()> { if !lsp { - anyhow::bail!("Bazel mode only supports `--lsp`"); + return Err(anyhow::anyhow!("Bazel mode only supports `--lsp`")); } // NOTE: Copied from `main.rs` - let mut ctx = BazelContext::new(ContextMode::Check, print_non_none, prelude, is_interactive)?; + let mut ctx = BazelContext::new( + ContextMode::Check, + print_non_none, + prelude, + is_interactive, + dialect, + globals, + )?; ctx.mode = ContextMode::Check; starlark_lsp::server::stdio_server(ctx)?; @@ -160,6 +166,8 @@ pub(crate) struct BazelContext { pub(crate) print_non_none: bool, pub(crate) prelude: Vec, pub(crate) module: Option, + pub(crate) dialect: Dialect, + pub(crate) globals: Globals, pub(crate) builtin_docs: HashMap, pub(crate) builtin_symbols: HashMap, } @@ -174,16 +182,17 @@ impl BazelContext { print_non_none: bool, prelude: &[PathBuf], module: bool, + dialect: Dialect, + globals: Globals, ) -> anyhow::Result { - let globals = globals(); let prelude: Vec<_> = prelude .iter() .map(|x| { let env = Module::new(); { let mut eval = Evaluator::new(&env); - let module = AstModule::parse_file(x, &dialect())?; - eval.eval_module(module, &globals)?; + let module = AstModule::parse_file(x, &dialect).into_anyhow_result()?; + eval.eval_module(module, &globals).into_anyhow_result()?; } env.freeze() }) @@ -194,17 +203,14 @@ impl BazelContext { } else { None }; - let mut builtins: HashMap> = HashMap::new(); + let mut builtin_docs: HashMap = HashMap::new(); let mut builtin_symbols: HashMap = HashMap::new(); - for doc in get_registered_starlark_docs() { - let uri = Self::url_for_doc(&doc); - builtin_symbols.insert(doc.id.name.clone(), uri.clone()); - builtins.entry(uri).or_default().push(doc); + for (name, item) in globals.documentation().members { + let uri = Url::parse(&format!("starlark:{name}.bzl"))?; + let uri = LspUrl::try_from(uri)?; + builtin_docs.insert(uri.clone(), item.render_as_code(&name)); + builtin_symbols.insert(name, uri); } - let builtin_docs = builtins - .into_iter() - .map(|(u, ds)| (u, render_docs_as_code(&ds))) - .collect(); let mut raw_command = Command::new("bazel"); let mut command = raw_command.arg("info"); @@ -233,6 +239,8 @@ impl BazelContext { print_non_none, prelude, module, + dialect, + globals, builtin_docs, builtin_symbols, workspace_name: execroot.and_then(|execroot| { @@ -253,11 +261,11 @@ impl BazelContext { // Convert an anyhow over iterator of EvalMessage, into an iterator of EvalMessage fn err( file: &str, - result: anyhow::Result>>, + result: starlark::Result>>, ) -> EvalResult> { match result { Err(e) => EvalResult { - messages: Either::Left(iter::once(EvalMessage::from_anyhow(Path::new(file), &e))), + messages: Either::Left(iter::once(EvalMessage::from_error(Path::new(file), &e))), ast: None, }, Ok(res) => EvalResult { @@ -267,19 +275,6 @@ impl BazelContext { } } - fn url_for_doc(doc: &Doc) -> LspUrl { - let url = match &doc.item { - DocItem::Module(_) => Url::parse("starlark:/native/builtins.bzl").unwrap(), - DocItem::Object(_) => { - Url::parse(&format!("starlark:/native/builtins/{}.bzl", doc.id.name)).unwrap() - } - DocItem::Function(_) | DocItem::Property(_) => { - Url::parse("starlark:/native/builtins.bzl").unwrap() - } - }; - LspUrl::try_from(url).unwrap() - } - fn new_module(prelude: &[FrozenModule]) -> Module { let module = Module::new(); for p in prelude { @@ -318,18 +313,19 @@ impl BazelContext { }; let mut eval = Evaluator::new(module); eval.enable_terminal_breakpoint_console(); - let globals = globals(); Self::err( file, - eval.eval_module(ast, &globals).map(|v| { - if self.print_non_none && !v.is_none() { - println!("{}", v); - } - EvalResult { - messages: iter::empty(), - ast: None, - } - }), + eval.eval_module(ast, &self.globals) + .map(|v| { + if self.print_non_none && !v.is_none() { + println!("{}", v); + } + EvalResult { + messages: iter::empty(), + ast: None, + } + }) + .map_err(Into::into), ) } @@ -363,7 +359,9 @@ impl BazelContext { ) -> EvalResult> { Self::err( filename, - AstModule::parse(filename, content, &dialect()).map(|module| self.go(filename, module)), + AstModule::parse(filename, content, &self.dialect) + .map(|module| self.go(filename, module)) + .map_err(Into::into), ) } @@ -387,94 +385,74 @@ impl BazelContext { .map(|external_output_base| external_output_base.join(repository_name)) } - fn resolve_folder<'a>( + /// Finds the directory that is the root of a package, given a label + fn resolve_folder( &self, - path: &'a str, + label: &Label, current_file: &LspUrl, workspace_root: Option<&Path>, - resolved_filename: &mut Option<&'a str>, ) -> anyhow::Result { - let original_path = path; - if let Some((repository, path)) = path.split_once("//") { - // The repository may be prefixed with an '@', but it's optional in Buck2. - let repository = if let Some(without_at) = repository.strip_prefix('@') { - without_at - } else { - repository - }; - - // Find the root we're resolving from. There's quite a few cases to consider here: - // - `repository` is empty, and we're resolving from the workspace root. - // - `repository` is empty, and we're resolving from a known remote repository. - // - `repository` is not empty, and refers to the current repository (the workspace). - // - `repository` is not empty, and refers to a known remote repository. - // - // Also with all of these cases, we need to consider if we have build system - // information or not. If not, we can't resolve any remote repositories, and we can't - // know whether a repository name refers to the workspace or not. - let resolve_root = match (repository, current_file) { - // Repository is empty, and we know what file we're resolving from. Use the build - // system information to check if we're in a known remote repository, and what the - // root is. Fall back to the `workspace_root` otherwise. - ("", LspUrl::File(current_file)) => { - if let Some((_, remote_repository_root)) = - self.get_repository_for_path(current_file) - { - Some(Cow::Borrowed(remote_repository_root)) - } else { - workspace_root.map(Cow::Borrowed) - } + // Find the root we're resolving from. There's quite a few cases to consider here: + // - `repository` is empty, and we're resolving from the workspace root. + // - `repository` is empty, and we're resolving from a known remote repository. + // - `repository` is not empty, and refers to the current repository (the workspace). + // - `repository` is not empty, and refers to a known remote repository. + // + // Also with all of these cases, we need to consider if we have build system + // information or not. If not, we can't resolve any remote repositories, and we can't + // know whether a repository name refers to the workspace or not. + let resolve_root = match (&label.repo, current_file) { + // Repository is empty, and we know what file we're resolving from. Use the build + // system information to check if we're in a known remote repository, and what the + // root is. Fall back to the `workspace_root` otherwise. + (None, LspUrl::File(current_file)) => { + if let Some((_, remote_repository_root)) = + self.get_repository_for_path(current_file) + { + Some(Cow::Borrowed(remote_repository_root)) + } else { + workspace_root.map(Cow::Borrowed) } - // No repository in the load path, and we don't have build system information, or - // an `LspUrl` we can't use to check the root. Use the workspace root. - ("", _) => workspace_root.map(Cow::Borrowed), - // We have a repository name and build system information. Check if the repository - // name refers to the workspace, and if so, use the workspace root. If not, check - // if it refers to a known remote repository, and if so, use that root. - // Otherwise, fail with an error. - (repository, _) => { - if matches!(self.workspace_name.as_ref(), Some(name) if name == repository) { - workspace_root.map(Cow::Borrowed) - } else if let Some(remote_repository_root) = - self.get_repository_path(repository).map(Cow::Owned) - { - Some(remote_repository_root) - } else { - return Err(ResolveLoadError::UnknownRepository( - original_path.to_owned(), - repository.to_owned(), - ) - .into()); - } + } + // No repository in the load path, and we don't have build system information, or + // an `LspUrl` we can't use to check the root. Use the workspace root. + (None, _) => workspace_root.map(Cow::Borrowed), + // We have a repository name and build system information. Check if the repository + // name refers to the workspace, and if so, use the workspace root. If not, check + // if it refers to a known remote repository, and if so, use that root. + // Otherwise, fail with an error. + (Some(repository), _) => { + if matches!(self.workspace_name.as_ref(), Some(name) if name == &repository.name) { + workspace_root.map(Cow::Borrowed) + } else if let Some(remote_repository_root) = + self.get_repository_path(&repository.name).map(Cow::Owned) + { + Some(remote_repository_root) + } else { + return Err(ResolveLoadError::UnknownRepository( + label.clone(), + repository.name.clone(), + ) + .into()); } - }; + } + }; + if let Some(package) = &label.package { // Resolve from the root of the repository. - match (path.split_once(':'), resolve_root) { - (Some((subfolder, filename)), Some(resolve_root)) => { - resolved_filename.replace(filename); - Ok(resolve_root.join(subfolder)) - } - (None, Some(resolve_root)) => Ok(resolve_root.join(path)), - (Some(_), None) => { - Err(ResolveLoadError::MissingWorkspaceRoot(original_path.to_owned()).into()) - } - (None, _) => { - Err(ResolveLoadError::CannotParsePath(original_path.to_owned()).into()) - } + match resolve_root { + Some(resolve_root) => Ok(resolve_root.join(package)), + None => Err(ResolveLoadError::MissingWorkspaceRoot(label.clone()).into()), } - } else if let Some((folder, filename)) = path.split_once(':') { - resolved_filename.replace(filename); - - // Resolve relative paths from the current file. + } else { + // If we don't have a package, this is relative to the current file, + // so resolve relative paths from the current file. match current_file { LspUrl::File(current_file_path) => { let current_file_dir = current_file_path.parent(); match current_file_dir { - Some(current_file_dir) => Ok(current_file_dir.join(folder)), - None => { - Err(ResolveLoadError::MissingCurrentFilePath(path.to_owned()).into()) - } + Some(current_file_dir) => Ok(current_file_dir.to_owned()), + None => Err(ResolveLoadError::MissingCurrentFilePath(label.clone()).into()), } } _ => Err( @@ -482,8 +460,6 @@ impl BazelContext { .into(), ), } - } else { - Err(ResolveLoadError::CannotParsePath(path.to_owned()).into()) } } @@ -522,10 +498,13 @@ impl BazelContext { // Find the actual folder on disk we're looking at. let (from_path, render_base) = match from { FilesystemCompletionRoot::Path(path) => (path.to_owned(), path.to_string_lossy()), - FilesystemCompletionRoot::String(str) => ( - self.resolve_folder(str, current_file, workspace_root, &mut None)?, - Cow::Borrowed(str), - ), + FilesystemCompletionRoot::String(str) => { + let label = Label::parse(str)?; + ( + self.resolve_folder(&label, current_file, workspace_root)?, + Cow::Borrowed(str), + ) + } }; for entry in fs::read_dir(from_path)? { @@ -659,18 +638,14 @@ impl LspContext for BazelContext { current_file: &LspUrl, workspace_root: Option<&std::path::Path>, ) -> anyhow::Result { - let mut presumed_filename = None; - let folder = - self.resolve_folder(path, current_file, workspace_root, &mut presumed_filename)?; + let label = Label::parse(path)?; + + let folder = self.resolve_folder(&label, current_file, workspace_root)?; // Try the presumed filename first, and check if it exists. - if let Some(presumed_filename) = presumed_filename { - let path = folder.join(presumed_filename); - if path.exists() { - return Ok(Url::from_file_path(path).unwrap().try_into()?); - } - } else { - return Err(ResolveLoadError::CannotParsePath(path.to_owned()).into()); + let presumed_path = folder.join(label.name); + if presumed_path.exists() { + return Ok(Url::from_file_path(presumed_path).unwrap().try_into()?); } // If the presumed filename doesn't exist, try to find a build file from the build system @@ -762,10 +737,12 @@ impl LspContext for BazelContext { location_finder: if same_filename { None } else { - let literal = literal.to_owned(); - Some(Box::new(move |ast| { - Ok(ast.find_function_call_with_name(&literal)) - })) + match Label::parse(literal) { + Err(_) => None, + Ok(label) => Some(Box::new(move |ast| { + Ok(ast.find_function_call_with_name(&label.name)) + })), + } }, }) }) diff --git a/starlark-rust/starlark_bin/bin/bazel/label.rs b/starlark-rust/starlark_bin/bin/bazel/label.rs new file mode 100644 index 0000000000000..1f89026f5e60d --- /dev/null +++ b/starlark-rust/starlark_bin/bin/bazel/label.rs @@ -0,0 +1,280 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Module for parsing bazel labels + +use std::fmt; + +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct Label { + // The repository can be omitted, in which case the label is relative to the current repository + pub repo: Option, + // The package can be omitted, in which case the label is relative to the current package + pub package: Option, + pub name: String, +} + +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct LabelRepo { + pub name: String, + pub is_canonical: bool, +} + +#[derive(thiserror::Error, Debug)] +#[error("Unable to parse the label `{}`", .label)] +pub struct LabelParseError { + label: String, +} + +impl fmt::Display for Label { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(repo) = &self.repo { + fmt::Display::fmt(&repo, f)?; + } + + if let Some(package) = &self.package { + f.write_str("//")?; + f.write_str(&package)?; + } + + f.write_str(":")?; + f.write_str(&self.name)?; + + Ok(()) + } +} + +impl fmt::Display for LabelRepo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(if self.is_canonical { "@@" } else { "@" })?; + f.write_str(&self.name)?; + + Ok(()) + } +} + +impl Label { + pub fn parse(label: &str) -> Result { + match label.split_once("//") { + Some((repo_part, rest)) => { + let repo = if repo_part == "" { + None + } else { + Some(Self::parse_repo(repo_part).ok_or_else(|| LabelParseError { + label: label.to_owned(), + })?) + }; + + let (package, name) = rest.split_once(':').unwrap_or_else(|| { + // Here the name is implicit, and comes from the last component of the package name + if let Some((index, _)) = rest.rmatch_indices('/').last() { + (rest, &rest[index + 1..]) + } else { + (rest, rest) + } + }); + + Ok(Label { + name: name.to_owned(), + package: Some(package.to_owned()), + repo, + }) + } + // Either we have a repo only (@foo or @@foo), or just a name (foo or :foo) + None => { + if let Some(repo) = Self::parse_repo(label) { + Ok(Label { + name: repo.name.to_owned(), + repo: Some(repo), + package: Some("".to_owned()), + }) + } else { + let name = label.strip_prefix(':').unwrap_or(label); + + Ok(Label { + repo: None, + name: name.to_owned(), + package: None, + }) + } + } + } + } + + fn parse_repo(repo: &str) -> Option { + if let Some(repo_name) = repo.strip_prefix("@@") { + Some(LabelRepo { + name: repo_name.to_owned(), + is_canonical: true, + }) + } else if let Some(repo_name) = repo.strip_prefix('@') { + Some(LabelRepo { + name: repo_name.to_owned(), + is_canonical: false, + }) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::Label; + use crate::bazel::label::LabelRepo; + + #[test] + fn test_parsing_repo_only_labels() { + assert_eq!( + Label::parse("@foo").unwrap(), + Label { + repo: Some(LabelRepo { + is_canonical: false, + name: "foo".to_owned(), + }), + package: Some("".to_owned()), + name: "foo".to_owned(), + } + ); + + assert_eq!( + Label::parse("@@foo").unwrap(), + Label { + repo: Some(LabelRepo { + is_canonical: true, + name: "foo".to_owned(), + }), + package: Some("".to_owned()), + name: "foo".to_owned(), + } + ); + } + + #[test] + fn test_parsing_name_only_labels() { + assert_eq!( + Label::parse("foo").unwrap(), + Label { + repo: None, + package: None, + name: "foo".to_owned(), + } + ); + + assert_eq!( + Label::parse(":foo").unwrap(), + Label { + repo: None, + package: None, + name: "foo".to_owned(), + } + ); + } + + #[test] + fn test_full_labels() { + assert_eq!( + Label::parse("//foo/bar:baz").unwrap(), + Label { + repo: None, + package: Some("foo/bar".to_owned()), + name: "baz".to_owned(), + } + ); + + assert_eq!( + Label::parse("@foo//foo/bar:baz").unwrap(), + Label { + repo: Some(LabelRepo { + name: "foo".to_owned(), + is_canonical: false + }), + package: Some("foo/bar".to_owned()), + name: "baz".to_owned(), + } + ); + + assert_eq!( + Label::parse("@@foo//foo/bar:baz").unwrap(), + Label { + repo: Some(LabelRepo { + name: "foo".to_owned(), + is_canonical: true + }), + package: Some("foo/bar".to_owned()), + name: "baz".to_owned(), + } + ); + } + + #[test] + fn test_labels_with_implicit_name() { + assert_eq!( + Label::parse("@foo//bar/baz").unwrap(), + Label { + repo: Some(LabelRepo { + name: "foo".to_owned(), + is_canonical: false + }), + package: Some("bar/baz".to_owned()), + name: "baz".to_owned(), + } + ); + + assert_eq!( + Label::parse("@foo//bar").unwrap(), + Label { + repo: Some(LabelRepo { + name: "foo".to_owned(), + is_canonical: false + }), + package: Some("bar".to_owned()), + name: "bar".to_owned(), + } + ); + + assert_eq!( + Label::parse("@@foo//bar").unwrap(), + Label { + repo: Some(LabelRepo { + name: "foo".to_owned(), + is_canonical: true + }), + package: Some("bar".to_owned()), + name: "bar".to_owned(), + } + ); + } + + #[test] + fn test_invalid_labels() { + assert!(Label::parse("foo//bar/baz").is_err()); + } + + #[test] + fn test_displaying_labels() { + assert_eq!(format!("{}", Label::parse(":foo.bzl").unwrap()), ":foo.bzl"); + assert_eq!( + format!("{}", Label::parse("@foo//bar/baz:qux").unwrap()), + "@foo//bar/baz:qux" + ); + assert_eq!( + format!("{}", Label::parse("//foo/bar").unwrap()), + "//foo/bar:bar" + ); + } +} diff --git a/starlark-rust/starlark_bin/bin/dap.rs b/starlark-rust/starlark_bin/bin/dap.rs new file mode 100644 index 0000000000000..40b80063131f5 --- /dev/null +++ b/starlark-rust/starlark_bin/bin/dap.rs @@ -0,0 +1,236 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::path::Path; +use std::path::PathBuf; +use std::sync::Arc; +use std::sync::Mutex; +use std::thread; + +use debugserver_types::*; +use dupe::Dupe; +pub(crate) use library::*; +use serde_json::Map; +use serde_json::Value; +use starlark::debug::dap_capabilities; +use starlark::debug::prepare_dap_adapter; +use starlark::debug::resolve_breakpoints; +use starlark::debug::DapAdapter; +use starlark::debug::DapAdapterClient; +use starlark::debug::DapAdapterEvalHook; +use starlark::environment::Globals; +use starlark::environment::Module; +use starlark::eval::Evaluator; +use starlark::syntax::AstModule; +use starlark::syntax::Dialect; +use starlark::StarlarkResultExt; + +mod library; + +#[derive(Debug)] +struct Backend { + adapter: Arc, + eval_wrapper: Mutex>>, + client: Client, + file: Mutex>, + dialect: Dialect, + globals: Globals, +} + +impl DapAdapterClient for Client { + fn event_stopped(&self) -> starlark::Result<()> { + self.event_stopped(StoppedEventBody { + reason: "breakpoint".to_owned(), + thread_id: Some(0), + description: Some("Hello".to_owned()), + all_threads_stopped: Some(true), + preserve_focus_hint: None, + text: None, + }); + Ok(()) + } +} + +impl Backend { + fn execute(&self, path: &str) { + let client = self.client.dupe(); + let client2 = self.client.dupe(); + let wrapper = self.eval_wrapper.lock().unwrap().take().unwrap(); + let path = PathBuf::from(path); + let dialect = self.dialect.clone(); + let globals = self.globals.dupe(); + + let go = move || -> anyhow::Result { + client.log(&format!("EVALUATION PREPARE: {}", path.display())); + let ast = AstModule::parse_file(&path, &dialect).into_anyhow_result()?; + let module = Module::new(); + let mut eval = Evaluator::new(&module); + wrapper.add_dap_hooks(&mut eval); + + // No way to pass back success/failure to the caller + client.log(&format!("EVALUATION START: {}", path.display())); + let v = eval.eval_module(ast, &globals).into_anyhow_result()?; + let s = v.to_string(); + client.log(&format!("EVALUATION FINISHED: {}", path.display())); + Ok(s) + }; + + thread::spawn(move || { + let res = go(); + let output = match &res { + Err(e) => format!("{:#}", e), + Ok(v) => v.to_owned(), + }; + client2.event_output(OutputEventBody { + output, + category: None, + column: None, + data: None, + line: None, + source: None, + variables_reference: None, + }); + client2.event_exited(ExitedEventBody { + exit_code: if res.is_ok() { 0 } else { 1 }, + }); + client2.event_terminated(None); + }); + } + + fn get_ast(&self, source: &str) -> anyhow::Result> { + Ok(Arc::new( + AstModule::parse_file(Path::new(source), &self.dialect).into_anyhow_result()?, + )) + } +} + +impl DebugServer for Backend { + fn initialize(&self, _: InitializeRequestArguments) -> anyhow::Result> { + self.client.event_initialized(None); + Ok(Some(dap_capabilities())) + } + + fn set_breakpoints( + &self, + x: SetBreakpointsArguments, + ) -> anyhow::Result { + let source = x.source.path.as_ref().unwrap(); + let resolved = resolve_breakpoints(&x, &*self.get_ast(source)?)?; + self.adapter.set_breakpoints(source, &resolved)?; + Ok(resolved.to_response()) + } + + fn set_exception_breakpoints(&self, _: SetExceptionBreakpointsArguments) -> anyhow::Result<()> { + // We just assume that break on error is always useful + Ok(()) + } + + fn launch(&self, _: LaunchRequestArguments, args: Map) -> anyhow::Result<()> { + // Expecting program of type string + match args.get("program") { + Some(Value::String(path)) => { + *self.file.lock().unwrap() = Some(path.to_owned()); + Ok(()) + } + _ => Err(anyhow::anyhow!( + "Couldn't find a program to launch, got args {:?}", + args + )), + } + } + + fn threads(&self) -> anyhow::Result { + Ok(ThreadsResponseBody { + threads: vec![Thread { + id: 0, + name: "main".to_owned(), + }], + }) + } + + fn configuration_done(&self) -> anyhow::Result<()> { + if let Some(path) = self.file.lock().unwrap().as_ref() { + self.execute(path); + } + Ok(()) + } + + fn stack_trace(&self, v: StackTraceArguments) -> anyhow::Result { + self.adapter.stack_trace(v) + } + + fn scopes(&self, _: ScopesArguments) -> anyhow::Result { + let scopes_info = self.adapter.scopes()?; + Ok(ScopesResponseBody { + scopes: vec![Scope { + name: "Locals".to_owned(), + named_variables: Some(scopes_info.num_locals as i64), + variables_reference: 2000, + expensive: false, + column: None, + end_column: None, + end_line: None, + indexed_variables: None, + line: None, + source: None, + }], + }) + } + + fn variables(&self, _: VariablesArguments) -> anyhow::Result { + let vars_info = self.adapter.variables()?; + Ok(VariablesResponseBody { + variables: vars_info + .locals + .into_iter() + .map(|var| var.to_dap()) + .collect(), + }) + } + + fn evaluate(&self, x: EvaluateArguments) -> anyhow::Result { + let expr_result = self.adapter.evaluate(&x.expression)?; + + Ok(EvaluateResponseBody { + indexed_variables: None, + named_variables: None, + presentation_hint: None, + result: expr_result.result, + type_: Some(expr_result.type_), + variables_reference: 0.0, + }) + } + + fn continue_(&self, _: ContinueArguments) -> anyhow::Result { + self.adapter.continue_()?; + Ok(ContinueResponseBody::default()) + } +} + +pub(crate) fn server(dialect: Dialect, globals: Globals) { + DapService::run(|client| { + let (adapter, wrapper) = prepare_dap_adapter(Box::new(client.dupe())); + Backend { + adapter: Arc::new(adapter), + eval_wrapper: Mutex::new(Some(Box::new(wrapper))), + client, + file: Default::default(), + dialect, + globals, + } + }) +} diff --git a/starlark-rust/starlark_bin/bin/dap/library/mod.rs b/starlark-rust/starlark_bin/bin/dap/library.rs similarity index 100% rename from starlark-rust/starlark_bin/bin/dap/library/mod.rs rename to starlark-rust/starlark_bin/bin/dap/library.rs diff --git a/starlark-rust/starlark_bin/bin/dap/mod.rs b/starlark-rust/starlark_bin/bin/dap/mod.rs deleted file mode 100644 index 6e71eaf3b9d80..0000000000000 --- a/starlark-rust/starlark_bin/bin/dap/mod.rs +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::path::Path; -use std::path::PathBuf; -use std::sync::Arc; -use std::sync::Mutex; -use std::thread; - -use debugserver_types::*; -use dupe::Dupe; -pub(crate) use library::*; -use serde_json::Map; -use serde_json::Value; -use starlark::debug::dap_capabilities; -use starlark::debug::prepare_dap_adapter; -use starlark::debug::resolve_breakpoints; -use starlark::debug::DapAdapter; -use starlark::debug::DapAdapterClient; -use starlark::debug::DapAdapterEvalHook; -use starlark::environment::Module; -use starlark::eval::Evaluator; -use starlark::syntax::AstModule; - -use crate::eval::dialect; -use crate::eval::globals; - -mod library; - -#[derive(Debug)] -struct Backend { - adapter: Arc, - eval_wrapper: Mutex>>, - client: Client, - file: Mutex>, -} - -impl DapAdapterClient for Client { - fn event_stopped(&self) { - self.event_stopped(StoppedEventBody { - reason: "breakpoint".to_owned(), - thread_id: Some(0), - description: Some("Hello".to_owned()), - all_threads_stopped: Some(true), - preserve_focus_hint: None, - text: None, - }); - } -} - -fn get_ast(source: &str) -> anyhow::Result> { - Ok(Arc::new(AstModule::parse_file( - Path::new(source), - &dialect(), - )?)) -} - -impl Backend { - fn execute(&self, path: &str) { - let client = self.client.dupe(); - let client2 = self.client.dupe(); - let wrapper = self.eval_wrapper.lock().unwrap().take().unwrap(); - let path = PathBuf::from(path); - - let go = move || -> anyhow::Result { - client.log(&format!("EVALUATION PREPARE: {}", path.display())); - let ast = AstModule::parse_file(&path, &dialect())?; - let module = Module::new(); - let globals = globals(); - let mut eval = Evaluator::new(&module); - wrapper.add_dap_hooks(&mut eval); - - // No way to pass back success/failure to the caller - client.log(&format!("EVALUATION START: {}", path.display())); - let v = eval.eval_module(ast, &globals)?; - let s = v.to_string(); - client.log(&format!("EVALUATION FINISHED: {}", path.display())); - Ok(s) - }; - - thread::spawn(move || { - let res = go(); - let output = match &res { - Err(e) => format!("{:#}", e), - Ok(v) => v.to_owned(), - }; - client2.event_output(OutputEventBody { - output, - category: None, - column: None, - data: None, - line: None, - source: None, - variables_reference: None, - }); - client2.event_exited(ExitedEventBody { - exit_code: if res.is_ok() { 0 } else { 1 }, - }); - client2.event_terminated(None); - }); - } -} - -impl DebugServer for Backend { - fn initialize(&self, _: InitializeRequestArguments) -> anyhow::Result> { - self.client.event_initialized(None); - Ok(Some(dap_capabilities())) - } - - fn set_breakpoints( - &self, - x: SetBreakpointsArguments, - ) -> anyhow::Result { - let source = x.source.path.as_ref().unwrap(); - let resolved = resolve_breakpoints(&x, &*get_ast(source)?)?; - self.adapter.set_breakpoints(source, &resolved)?; - Ok(resolved.to_response()) - } - - fn set_exception_breakpoints(&self, _: SetExceptionBreakpointsArguments) -> anyhow::Result<()> { - // We just assume that break on error is always useful - Ok(()) - } - - fn launch(&self, _: LaunchRequestArguments, args: Map) -> anyhow::Result<()> { - // Expecting program of type string - match args.get("program") { - Some(Value::String(path)) => { - *self.file.lock().unwrap() = Some(path.to_owned()); - Ok(()) - } - _ => Err(anyhow::anyhow!( - "Couldn't find a program to launch, got args {:?}", - args - )), - } - } - - fn threads(&self) -> anyhow::Result { - Ok(ThreadsResponseBody { - threads: vec![Thread { - id: 0, - name: "main".to_owned(), - }], - }) - } - - fn configuration_done(&self) -> anyhow::Result<()> { - if let Some(path) = self.file.lock().unwrap().as_ref() { - self.execute(path); - } - Ok(()) - } - - fn stack_trace(&self, v: StackTraceArguments) -> anyhow::Result { - self.adapter.stack_trace(v) - } - - fn scopes(&self, _: ScopesArguments) -> anyhow::Result { - let scopes_info = self.adapter.scopes()?; - Ok(ScopesResponseBody { - scopes: vec![Scope { - name: "Locals".to_owned(), - named_variables: Some(scopes_info.num_locals as i64), - variables_reference: 2000, - expensive: false, - column: None, - end_column: None, - end_line: None, - indexed_variables: None, - line: None, - source: None, - }], - }) - } - - fn variables(&self, _: VariablesArguments) -> anyhow::Result { - let vars_info = self.adapter.variables()?; - Ok(VariablesResponseBody { - variables: vars_info - .locals - .into_iter() - .map(|var| var.to_dap()) - .collect(), - }) - } - - fn evaluate(&self, x: EvaluateArguments) -> anyhow::Result { - self.adapter.evaluate(&x.expression) - } - - fn continue_(&self, _: ContinueArguments) -> anyhow::Result { - self.adapter.continue_()?; - Ok(ContinueResponseBody::default()) - } -} - -pub(crate) fn server() { - DapService::run(|client| { - let (adapter, wrapper) = prepare_dap_adapter(Box::new(client.dupe())); - Backend { - adapter: Arc::new(adapter), - eval_wrapper: Mutex::new(Some(Box::new(wrapper))), - client, - file: Default::default(), - } - }) -} diff --git a/starlark-rust/starlark_bin/bin/eval.rs b/starlark-rust/starlark_bin/bin/eval.rs index c36fbe2ff6f0e..0dd2dcb860abd 100644 --- a/starlark-rust/starlark_bin/bin/eval.rs +++ b/starlark-rust/starlark_bin/bin/eval.rs @@ -26,10 +26,6 @@ use std::path::PathBuf; use itertools::Either; use lsp_types::Url; use starlark::analysis::AstModuleLint; -use starlark::docs::get_registered_starlark_docs; -use starlark::docs::render_docs_as_code; -use starlark::docs::Doc; -use starlark::docs::DocItem; use starlark::docs::DocModule; use starlark::environment::FrozenModule; use starlark::environment::Globals; @@ -38,12 +34,15 @@ use starlark::errors::EvalMessage; use starlark::eval::Evaluator; use starlark::syntax::AstModule; use starlark::syntax::Dialect; +use starlark::StarlarkResultExt; use starlark_lsp::error::eval_message_to_lsp_diagnostic; use starlark_lsp::server::LspContext; use starlark_lsp::server::LspEvalResult; use starlark_lsp::server::LspUrl; use starlark_lsp::server::StringLiteralResult; +use crate::suppression::GlobLintSuppression; + #[derive(Debug)] pub(crate) enum ContextMode { Check, @@ -66,8 +65,11 @@ pub(crate) struct Context { pub(crate) print_non_none: bool, pub(crate) prelude: Vec, pub(crate) module: Option, + pub(crate) dialect: Dialect, + pub(crate) globals: Globals, pub(crate) builtin_docs: HashMap, pub(crate) builtin_symbols: HashMap, + pub(crate) suppression_rules: Vec, } /// The outcome of evaluating (checking, parsing or running) given starlark code. @@ -97,16 +99,18 @@ impl Context { print_non_none: bool, prelude: &[PathBuf], module: bool, + dialect: Dialect, + globals: Globals, + suppression_rules: Vec, ) -> anyhow::Result { - let globals = globals(); let prelude: Vec<_> = prelude .iter() .map(|x| { let env = Module::new(); { let mut eval = Evaluator::new(&env); - let module = AstModule::parse_file(x, &dialect())?; - eval.eval_module(module, &globals)?; + let module = AstModule::parse_file(x, &dialect).into_anyhow_result()?; + eval.eval_module(module, &globals).into_anyhow_result()?; } env.freeze() }) @@ -117,41 +121,28 @@ impl Context { } else { None }; - let mut builtins: HashMap> = HashMap::new(); + let mut builtin_docs: HashMap = HashMap::new(); let mut builtin_symbols: HashMap = HashMap::new(); - for doc in get_registered_starlark_docs() { - let uri = Self::url_for_doc(&doc); - builtin_symbols.insert(doc.id.name.clone(), uri.clone()); - builtins.entry(uri).or_default().push(doc); + for (name, item) in globals.documentation().members { + let uri = Url::parse(&format!("starlark:{name}.bzl"))?; + let uri = LspUrl::try_from(uri)?; + builtin_docs.insert(uri.clone(), item.render_as_code(&name)); + builtin_symbols.insert(name, uri); } - let builtin_docs = builtins - .into_iter() - .map(|(u, ds)| (u, render_docs_as_code(&ds))) - .collect(); Ok(Self { mode, print_non_none, prelude, module, + dialect, + globals, builtin_docs, builtin_symbols, + suppression_rules, }) } - fn url_for_doc(doc: &Doc) -> LspUrl { - let url = match &doc.item { - DocItem::Module(_) => Url::parse("starlark:/native/builtins.bzl").unwrap(), - DocItem::Object(_) => { - Url::parse(&format!("starlark:/native/builtins/{}.bzl", doc.id.name)).unwrap() - } - DocItem::Function(_) | DocItem::Property(_) => { - Url::parse("starlark:/native/builtins.bzl").unwrap() - } - }; - LspUrl::try_from(url).unwrap() - } - fn new_module(prelude: &[FrozenModule]) -> Module { let module = Module::new(); for p in prelude { @@ -165,7 +156,7 @@ impl Context { let mut errors = Either::Left(iter::empty()); let final_ast = match self.mode { ContextMode::Check => { - warnings = Either::Right(self.check(&ast)); + warnings = Either::Right(self.check(file, &ast)); Some(ast) } ContextMode::Run => { @@ -179,14 +170,14 @@ impl Context { } } - // Convert an anyhow over iterator of EvalMessage, into an iterator of EvalMessage + // Convert a result over iterator of EvalMessage, into an iterator of EvalMessage fn err( file: &str, - result: anyhow::Result>>, + result: starlark::Result>>, ) -> EvalResult> { match result { Err(e) => EvalResult { - messages: Either::Left(iter::once(EvalMessage::from_anyhow(Path::new(file), &e))), + messages: Either::Left(iter::once(EvalMessage::from_error(Path::new(file), &e))), ast: None, }, Ok(res) => EvalResult { @@ -203,7 +194,9 @@ impl Context { let file = "expression"; Self::err( file, - AstModule::parse(file, content, &dialect()).map(|module| self.go(file, module)), + AstModule::parse(file, content, &self.dialect) + .map(|module| self.go(file, module)) + .map_err(Into::into), ) } @@ -213,7 +206,7 @@ impl Context { filename, fs::read_to_string(file) .map(|content| self.file_with_contents(filename, content)) - .map_err(|e| e.into()), + .map_err(|e| anyhow::Error::from(e).into()), ) } @@ -224,7 +217,9 @@ impl Context { ) -> EvalResult> { Self::err( filename, - AstModule::parse(filename, content, &dialect()).map(|module| self.go(filename, module)), + AstModule::parse(filename, content, &self.dialect) + .map(|module| self.go(filename, module)) + .map_err(Into::into), ) } @@ -239,22 +234,29 @@ impl Context { }; let mut eval = Evaluator::new(module); eval.enable_terminal_breakpoint_console(); - let globals = globals(); Self::err( file, - eval.eval_module(ast, &globals).map(|v| { - if self.print_non_none && !v.is_none() { - println!("{}", v); - } - EvalResult { - messages: iter::empty(), - ast: None, - } - }), + eval.eval_module(ast, &self.globals) + .map(|v| { + if self.print_non_none && !v.is_none() { + println!("{}", v); + } + EvalResult { + messages: iter::empty(), + ast: None, + } + }) + .map_err(Into::into), ) } - fn check(&self, module: &AstModule) -> impl Iterator { + fn is_suppressed(&self, file: &str, issue: &str) -> bool { + self.suppression_rules + .iter() + .any(|rule| rule.is_suppressed(file, issue)) + } + + fn check(&self, file: &str, module: &AstModule) -> impl Iterator { let globals = if self.prelude.is_empty() { None } else { @@ -272,10 +274,9 @@ impl Context { Some(globals) }; - module - .lint(globals.as_ref()) - .into_iter() - .map(EvalMessage::from) + let mut lints = module.lint(globals.as_ref()); + lints.retain(|issue| !self.is_suppressed(file, &issue.short_name)); + lints.into_iter().map(EvalMessage::from) } } @@ -368,11 +369,3 @@ impl LspContext for Context { DocModule::default() } } - -pub(crate) fn globals() -> Globals { - Globals::extended_internal() -} - -pub(crate) fn dialect() -> Dialect { - Dialect::Extended -} diff --git a/starlark-rust/starlark_bin/bin/main.rs b/starlark-rust/starlark_bin/bin/main.rs index 21936bf4544f7..156400c8665df 100644 --- a/starlark-rust/starlark_bin/bin/main.rs +++ b/starlark-rust/starlark_bin/bin/main.rs @@ -15,12 +15,9 @@ * limitations under the License. */ -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] // Disagree these are good hints #![allow(clippy::type_complexity)] +#![allow(clippy::manual_map)] use std::ffi::OsStr; use std::fmt; @@ -28,24 +25,22 @@ use std::fmt::Display; use std::path::PathBuf; use std::sync::Arc; -use anyhow::Context as _; +use clap::builder::StringValueParser; +use clap::builder::TypedValueParser; use clap::Parser; use clap::ValueEnum; use dupe::Dupe; use eval::Context; use itertools::Either; -use itertools::Itertools; use starlark::analysis::LintMessage; -use starlark::docs::get_registered_starlark_docs; -use starlark::docs::render_docs_as_code; -use starlark::docs::Doc; +use starlark::docs::markdown::render_doc_item_no_link; use starlark::docs::DocItem; -use starlark::docs::MarkdownFlavor; -use starlark::docs::RenderMarkdown; use starlark::environment::Globals; use starlark::errors::EvalMessage; use starlark::errors::EvalSeverity; use starlark::read_line::ReadLine; +use starlark::syntax::Dialect; +use suppression::GlobLintSuppression; use walkdir::WalkDir; use crate::eval::ContextMode; @@ -53,9 +48,10 @@ use crate::eval::ContextMode; mod bazel; mod dap; mod eval; +mod suppression; #[derive(Debug, Parser)] -#[command(name = "starlark", about = "Evaluate Starlark code")] +#[command(name = "starlark", about = "Evaluate Starlark code", version)] struct Args { #[arg( long = "lsp", @@ -129,6 +125,13 @@ struct Args { )] evaluate: Vec, + #[arg( + long = "dialect", + help = "Dialect to use for features and globals.", + default_value = "extended" + )] + dialect: ArgsDialect, + #[arg( id = "files", value_name = "FILE", @@ -142,6 +145,15 @@ struct Args { help = "Run in Bazel mode (temporary, will be removed)" )] bazel: bool, + + #[arg( + long = "suppression", + help = "Specify lint rules to suppress. You may specify an optional glob pattern to \ +suppress rules for files matching the pattern, in the format of `[:][,]*`.", + requires = "check", + value_parser = StringValueParser::new().try_map(GlobLintSuppression::try_parse) + )] + suppression: Vec, } #[derive(ValueEnum, Copy, Clone, Dupe, Debug, PartialEq, Eq)] @@ -151,6 +163,12 @@ enum ArgsDoc { Code, } +#[derive(ValueEnum, Copy, Clone, Dupe, Debug, PartialEq, Eq)] +enum ArgsDialect { + Standard, + Extended, +} + // Treat directories as things to recursively walk for . files, // and everything else as normal files. fn expand_dirs(extension: &str, xs: Vec) -> impl Iterator { @@ -215,7 +233,8 @@ fn drain( if json { println!( "{}", - serde_json::to_string(&LintMessage::new(x)).context("serializing lint to JSON")? + serde_json::to_string(&LintMessage::new(x)) + .map_err(|e| anyhow::anyhow!("Failed to serialize lint to JSON: {e}"))? ); } else if let Some(error) = x.full_error_with_span { let mut error = error.to_owned(); @@ -259,8 +278,14 @@ fn main() -> anyhow::Result<()> { let args = argfile::expand_args(argfile::parse_fromfile, argfile::PREFIX)?; let args: Args = Args::parse_from(args); + + let (dialect, globals) = match args.dialect { + ArgsDialect::Standard => (Dialect::Standard, Globals::standard()), + ArgsDialect::Extended => (Dialect::Extended, Globals::extended_internal()), + }; + if args.dap { - dap::server(); + dap::server(dialect, globals); } else { let is_interactive = args.evaluate.is_empty() && args.files.is_empty(); @@ -274,7 +299,14 @@ fn main() -> anyhow::Result<()> { // TODO: Remove this when extracting the Bazel binary to its own // repository, after the LspContext interface stabilizes. if args.bazel { - bazel::main(args.lsp, print_non_none, is_interactive, &prelude)?; + bazel::main( + args.lsp, + print_non_none, + is_interactive, + &prelude, + dialect, + globals, + )?; return Ok(()); } @@ -287,31 +319,29 @@ fn main() -> anyhow::Result<()> { print_non_none, &prelude, is_interactive, + dialect, + globals, + args.suppression, )?; if args.lsp { ctx.mode = ContextMode::Check; starlark_lsp::server::stdio_server(ctx)?; } else if let Some(docs) = args.docs { - let mut builtin = get_registered_starlark_docs(); - builtin.push(Doc::named_item( - "globals".to_owned(), - DocItem::Module(Globals::extended_internal().documentation()), - )); + let global_module = DocItem::Module(Globals::extended_internal().documentation()); match docs { ArgsDoc::Markdown | ArgsDoc::Lsp => { - let mode = if docs == ArgsDoc::Markdown { - MarkdownFlavor::DocFile - } else { - MarkdownFlavor::LspSummary - }; println!( "{}", - builtin.iter().map(|x| x.render_markdown(mode)).join("\n\n") + if docs == ArgsDoc::Markdown { + render_doc_item_no_link("globals", &global_module) + } else { + String::new() + } ) } - ArgsDoc::Code => println!("{}", render_docs_as_code(&builtin)), + ArgsDoc::Code => println!("{}", global_module.render_as_code("globals")), }; } else if is_interactive { interactive(&ctx)?; diff --git a/starlark-rust/starlark_bin/bin/suppression.rs b/starlark-rust/starlark_bin/bin/suppression.rs new file mode 100644 index 0000000000000..f99c7d9794245 --- /dev/null +++ b/starlark-rust/starlark_bin/bin/suppression.rs @@ -0,0 +1,79 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashSet; + +use globset::Glob; +use globset::GlobMatcher; + +#[derive(Debug, Clone)] +pub struct GlobLintSuppression { + /// Glob pattern to match the suppression rule on. + pattern: GlobMatcher, + + /// List of rules to be suppressed. This should be the name specified in + /// [`starlark::analysis::Lint::short_name`]. + rules: HashSet, +} + +impl GlobLintSuppression { + pub fn try_parse(input: impl AsRef) -> anyhow::Result { + let rule = input.as_ref(); + let (pattern, rules) = if let Some((lhs, rhs)) = rule.split_once(':') { + (lhs, rhs) + } else { + ("*", rule) + }; + + let rules = rules.split(',').map(|s| s.trim().to_owned()).collect(); + + Ok(Self { + pattern: Glob::new(pattern)?.compile_matcher(), + rules, + }) + } + + pub fn is_suppressed(&self, filename: &str, rule: &str) -> bool { + if self.pattern.is_match(filename) { + return self.rules.contains(rule); + } + + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parsing() { + let suppression = GlobLintSuppression::try_parse("*.bzl:rule1,rule2").unwrap(); + + assert!(suppression.is_suppressed("foo/bar.bzl", "rule1")); + assert!(suppression.is_suppressed("foo/bar.bzl", "rule2")); + assert!(!suppression.is_suppressed("foo/baz.star", "rule2")); + } + + #[test] + fn test_parsing_default() { + let suppression = GlobLintSuppression::try_parse("rule1").unwrap(); + + assert!(suppression.is_suppressed("foo/bar.bzl", "rule1")); + assert!(!suppression.is_suppressed("foo/bar.bzl", "rule2")); + } +} diff --git a/starlark-rust/starlark_bin/src/lib.rs b/starlark-rust/starlark_bin/src/lib.rs deleted file mode 100644 index 450ea8a01a754..0000000000000 --- a/starlark-rust/starlark_bin/src/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! `starlark` binary. diff --git a/starlark-rust/starlark_derive/BUCK b/starlark-rust/starlark_derive/BUCK index 0efc99e4775a2..94be0744fd14f 100644 --- a/starlark-rust/starlark_derive/BUCK +++ b/starlark-rust/starlark_derive/BUCK @@ -1,5 +1,4 @@ load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") -load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") oncall("build_infra") diff --git a/starlark-rust/starlark_derive/Cargo.toml b/starlark-rust/starlark_derive/Cargo.toml index f89567a6feeeb..549be2e29fae8 100644 --- a/starlark-rust/starlark_derive/Cargo.toml +++ b/starlark-rust/starlark_derive/Cargo.toml @@ -1,18 +1,18 @@ [package] -name = "starlark_derive" -version = "0.9.0" -edition = "2021" -license = "Apache-2.0" +authors = ["Facebook"] description = "Derive helpers for the starlark package." documentation = "https://docs.rs/starlark_derive" -repository = "https://github.com/facebookexperimental/starlark-rust" -authors = ["Facebook"] +edition = "2021" +license = "Apache-2.0" +name = "starlark_derive" +repository = "https://github.com/facebook/starlark-rust" +version = "0.12.0" [lib] proc-macro = true [dependencies] -proc-macro2 = "1.0" -syn = { version = "2", features = ["full", "extra-traits", "visit", "visit-mut"] } dupe = { workspace = true } +proc-macro2 = "1.0" quote = "1.0" +syn = { version = "2", features = ["extra-traits", "full", "visit", "visit-mut"] } diff --git a/starlark-rust/starlark_derive/src/alloc_value.rs b/starlark-rust/starlark_derive/src/alloc_value.rs new file mode 100644 index 0000000000000..acf8482c20a46 --- /dev/null +++ b/starlark-rust/starlark_derive/src/alloc_value.rs @@ -0,0 +1,141 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use quote::quote; +use syn::spanned::Spanned; +use syn::DeriveInput; +use syn::Fields; + +use crate::util::DataEnumUtil; +use crate::util::DeriveInputUtil; +use crate::v_lifetime::find_v_lifetime; + +#[derive(Copy, Clone)] +enum WhichTrait { + AllocValue, + AllocFrozenValue, +} + +pub(crate) fn derive_alloc_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input = syn::parse_macro_input!(input as DeriveInput); + match derive_alloc_value_impl(input, WhichTrait::AllocValue) { + Ok(tokens) => tokens.into(), + Err(err) => err.to_compile_error().into(), + } +} + +pub(crate) fn derive_alloc_frozen_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input = syn::parse_macro_input!(input as DeriveInput); + match derive_alloc_value_impl(input, WhichTrait::AllocFrozenValue) { + Ok(tokens) => tokens.into(), + Err(err) => err.to_compile_error().into(), + } +} + +fn derive_alloc_value_impl( + derive_input: DeriveInput, + which_trait: WhichTrait, +) -> syn::Result { + let derive_input = DeriveInputUtil::new(&derive_input)?; + let DeriveInputUtil::Enum(en) = derive_input else { + return Err(syn::Error::new( + derive_input.span(), + "`AllocValue` can only be derived for enums", + )); + }; + let (_impl_generics, type_generics, where_clause) = derive_input.generics.split_for_impl(); + + let mut generics = derive_input.generics.clone(); + match which_trait { + WhichTrait::AllocValue => { + let lifetime = find_v_lifetime(&derive_input.generics)?; + if lifetime.is_none() { + generics.params.push(syn::parse_quote!('v)); + } + } + WhichTrait::AllocFrozenValue => {} + } + + let type_name = &derive_input.ident; + + let body = alloc_value_body(en, which_trait)?; + + let item_impl: syn::ItemImpl = match which_trait { + WhichTrait::AllocValue => { + syn::parse_quote_spanned! { + derive_input.span() => + impl #generics starlark::values::AllocValue<'v> for #type_name #type_generics #where_clause { + fn alloc_value( + self, + heap: &'v starlark::values::Heap, + ) -> starlark::values::Value<'v> { + let _ignore_heap_for_empty_enums = heap; + #body + } + } + } + } + WhichTrait::AllocFrozenValue => { + syn::parse_quote_spanned! { + derive_input.span() => + impl #generics starlark::values::AllocFrozenValue for #type_name #type_generics #where_clause { + fn alloc_frozen_value( + self, + heap: &starlark::values::FrozenHeap, + ) -> starlark::values::FrozenValue { + let _ignore_heap_for_empty_enums = heap; + #body + } + } + } + } + }; + + Ok(quote! { + #item_impl + }) +} + +fn alloc_value_body(en: DataEnumUtil, which_trait: WhichTrait) -> syn::Result { + en.match_self(|variant, fields| { + match &variant.fields { + Fields::Unnamed(_) => {} + _ => { + return Err(syn::Error::new( + variant.span(), + "Enum variant must has single unnamed field", + )); + } + } + let [(var, _field)] = fields.as_slice() else { + return Err(syn::Error::new( + variant.span(), + "Enum variant must have exactly one field", + )); + }; + match which_trait { + WhichTrait::AllocValue => Ok(syn::parse_quote_spanned! { + variant.span() => + starlark::values::AllocValue::alloc_value(#var, heap) + }), + WhichTrait::AllocFrozenValue => Ok(syn::parse_quote_spanned! { + variant.span() => + starlark::values::AllocFrozenValue::alloc_frozen_value(#var, heap) + }), + } + }) +} diff --git a/starlark-rust/starlark_derive/src/any_lifetime.rs b/starlark-rust/starlark_derive/src/any_lifetime.rs index 30d59ee9bba60..df0bf7f26cd49 100644 --- a/starlark-rust/starlark_derive/src/any_lifetime.rs +++ b/starlark-rust/starlark_derive/src/any_lifetime.rs @@ -21,6 +21,9 @@ use syn::punctuated::Punctuated; use syn::spanned::Spanned; use syn::DeriveInput; +use crate::util::DeriveInputUtil; +use crate::util::GenericsUtil; + fn punctuated_try_map( punctuated: &Punctuated, f: impl Fn(&A) -> syn::Result, @@ -126,21 +129,11 @@ pub(crate) fn derive_provides_static_type( } /// Single lifetime parameter for `ProvidesStaticType` -fn pst_lifetime<'a>( - params: impl Iterator, -) -> syn::Result { - let mut lifetime = None; - for param in params { - if let syn::GenericParam::Lifetime(param) = param { - if lifetime.is_some() { - return Err(syn::Error::new_spanned( - param, - "only one lifetime parameter is supported", - )); - } - lifetime = Some(param.lifetime.clone()); - } - } +fn pst_lifetime(generics: &syn::Generics) -> syn::Result { + let generics = GenericsUtil::new(generics); + let lifetime = generics + .assert_at_most_one_lifetime_param()? + .map(|p| p.lifetime.clone()); Ok(match lifetime { Some(lifetime) => lifetime, None => syn::parse_quote_spanned! { Span::call_site() => 'pst }, @@ -149,13 +142,14 @@ fn pst_lifetime<'a>( fn derive_provides_static_type_impl(input: proc_macro::TokenStream) -> syn::Result { let input: DeriveInput = syn::parse(input)?; + let input = DeriveInputUtil::new(&input)?; let span = input.ident.span(); let name = &input.ident; let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); - let lifetime = pst_lifetime(input.generics.params.iter())?; + let lifetime = pst_lifetime(&input.generics)?; let mut lifetimes: Vec = Vec::new(); let mut static_lifetimes: Vec = Vec::new(); diff --git a/starlark-rust/starlark_derive/src/coerce.rs b/starlark-rust/starlark_derive/src/coerce.rs index bb258046bec9c..b20c42c431c8b 100644 --- a/starlark-rust/starlark_derive/src/coerce.rs +++ b/starlark-rust/starlark_derive/src/coerce.rs @@ -38,7 +38,6 @@ use syn::Type; // This macro does two related derivations depending on whether there are any generic parameters. // -// struct A(B) ==> coerce both ways between A and B // struct A(...) => coerce A to A if coerce T1 to T2 and all the fields support it pub fn derive_coerce(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); @@ -52,33 +51,15 @@ fn derive_coerce_impl(input: DeriveInput) -> syn::Result syn::Result { - let lifetimes = input.generics.lifetimes().collect::>(); - - let field = match &input.data { - Data::Struct(x) if x.fields.len() == 1 => x.fields.iter().next().unwrap(), - _ => { - return Err(syn::Error::new_spanned( - input, - "Type-parameter free types must be a single field struct", - )); - } - }; - - let type1 = input.ident; - let type2 = &field.ty; - Ok(quote! { - unsafe impl < #(#lifetimes),* > starlark::coerce::Coerce<#type1< #(#lifetimes),* >> for #type2 {} - unsafe impl < #(#lifetimes),* > starlark::coerce::Coerce<#type2> for #type1< #(#lifetimes),* > {} - }) -} - #[derive(Copy, Clone)] enum ParamNameMapping { From, diff --git a/starlark-rust/starlark_derive/src/docs.rs b/starlark-rust/starlark_derive/src/docs.rs deleted file mode 100644 index 423f0ea47d5ae..0000000000000 --- a/starlark-rust/starlark_derive/src/docs.rs +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::collections::HashMap; - -use proc_macro2::Ident; -use quote::format_ident; -use quote::quote; -use quote::quote_spanned; -use syn::parse_macro_input; -use syn::punctuated::Punctuated; -use syn::spanned::Spanned; -use syn::Attribute; -use syn::DeriveInput; -use syn::Expr; -use syn::ExprLit; -use syn::MetaNameValue; -use syn::Token; - -const STARLARK_DOCS_ATTRS: &str = "starlark_docs"; - -pub(crate) fn derive_docs(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let input = parse_macro_input!(input as DeriveInput); - - expand_docs_derive(input) - .unwrap_or_else(|e| e.to_compile_error()) - .into() -} - -fn expand_docs_derive(input: DeriveInput) -> syn::Result { - let span = input.span(); - let DeriveInput { - ident: name, - generics, - attrs, - .. - } = input; - - let parsed_attrs = parse_custom_attributes(attrs)?; - - let use_inventory = if parsed_attrs.contains_key("builtin") { - quote! {} - } else { - quote! { - use starlark::__derive_refs::inventory as inventory; - } - }; - - let name_str = name.to_string(); - let custom_attrs: Vec<_> = parsed_attrs - .into_iter() - .map(|(k, v)| { - quote! { (#k, #v)} - }) - .collect(); - - // Complex values take a generic `V`, so we cannot call their impl's __generated_documentation - // function directly. We need to have a different getter, and to avoid requiring a lifetime, - // we just use FrozenValue. - // - // For now, we just assume if the struct ends in "Gen", it is a starlark_complex_value. - // It would be simple enough to make this configurable in the future if required. - let is_complex_value = name.to_string().ends_with("Gen"); - let frozen_name = match is_complex_value { - true => { - let frozen = Ident::new( - &format!( - "Frozen{}", - name_str.strip_suffix("Gen").unwrap_or(&name_str) - ), - name.span(), - ); - quote_spanned! {span=> #frozen } - } - false => { - if generics.type_params().count() != 0 { - return Err(syn::Error::new( - span, - "If a an item name does not end in `Gen`, it must have no type parameters", - )); - } - - quote_spanned! {span=> #name } - } - }; - - let namespace_fn_name = format_ident!("_{}_register_starlark_docs", name_str.to_lowercase()); - - Ok(quote_spanned! {span=> - fn #namespace_fn_name() { - // `ctor` fails at compile time on wasm32. - #[cfg(not(target_arch = "wasm32"))] - { - #use_inventory - starlark::__derive_refs::inventory::submit! { - starlark::docs::RegisteredDoc { - getter: || starlark::docs::RegisteredDoc::for_type::<#frozen_name>(&[#(#custom_attrs),*]), - } - }; - } - } - }) -} - -fn get_attrs(attr: Attribute) -> syn::Result> { - let mut found = HashMap::new(); - let args: Punctuated = - attr.parse_args_with(Punctuated::parse_terminated)?; - for arg in args { - match &arg { - MetaNameValue { - path, - value: - Expr::Lit(ExprLit { - lit: syn::Lit::Str(s), - .. - }), - .. - } => { - let ident = path.get_ident().unwrap(); - let attr_name = ident.to_string(); - if found.insert(attr_name, s.value()).is_some() { - return Err(syn::Error::new( - arg.span(), - format!("Argument {} was specified twice", ident), - )); - } - } - MetaNameValue { path, .. } => { - return Err(syn::Error::new( - arg.span(), - format!( - "Argument {} must have a string literal value", - path.get_ident().unwrap(), - ), - )); - } - } - } - Ok(found) -} - -fn parse_custom_attributes(attrs: Vec) -> syn::Result> { - for attr in attrs { - if attr.path().is_ident(STARLARK_DOCS_ATTRS) { - return get_attrs(attr); - } - } - - Ok(HashMap::new()) -} diff --git a/starlark-rust/starlark_derive/src/for_each_field.rs b/starlark-rust/starlark_derive/src/for_each_field.rs deleted file mode 100644 index ac586e73972b5..0000000000000 --- a/starlark-rust/starlark_derive/src/for_each_field.rs +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use proc_macro2::Ident; -use proc_macro2::TokenStream; -use quote::format_ident; -use quote::quote; -use quote::quote_spanned; -use syn::Data; -use syn::DataEnum; -use syn::DataStruct; -use syn::Field; -use syn::Fields; -use syn::Variant; - -fn for_each_field_in_struct( - s: &DataStruct, - field_handler: impl Fn(&Ident, &Field) -> syn::Result, -) -> syn::Result { - match &s.fields { - Fields::Unit => Ok(quote! {}), - Fields::Unnamed(fields) => { - let field_names: Vec = (0..fields.unnamed.len()) - .map(|i| format_ident!("f{}", i)) - .collect(); - let field_handlers = field_names - .iter() - .zip(fields.unnamed.iter()) - .map(|(name, field)| field_handler(name, field)) - .collect::>>()?; - Ok(quote_spanned! { - s.struct_token.span => { - let Self(#(#field_names),*) = self; - #(#field_handlers)* - } - }) - } - Fields::Named(fields) => { - let field_names = fields.named.iter().map(|f| &f.ident); - let field_handlers = fields - .named - .iter() - .map(|f| field_handler(f.ident.as_ref().unwrap(), f)) - .collect::>>()?; - Ok(quote_spanned! { - s.struct_token.span => { - let Self { #(#field_names),* } = self; - #(#field_handlers)* - } - }) - } - } -} - -fn for_each_field_in_enum_variant( - v: &Variant, - field_handler: &impl Fn(&Ident, &Field) -> syn::Result, -) -> syn::Result { - let variant_name = &v.ident; - match &v.fields { - Fields::Unit => Ok(quote_spanned! { - v.ident.span() => Self::#variant_name => {} - }), - Fields::Unnamed(fields) => { - let field_names: Vec = (0..fields.unnamed.len()) - .map(|i| format_ident!("f{}", i)) - .collect(); - let field_handlers = field_names - .iter() - .zip(fields.unnamed.iter()) - .map(|(name, field)| field_handler(name, field)) - .collect::>>()?; - Ok(quote_spanned! { - v.ident.span() => - Self::#variant_name(#(#field_names),*) => { - #(#field_handlers)* - } - }) - } - Fields::Named(fields) => { - let field_names = fields.named.iter().map(|f| &f.ident); - let field_handlers = fields - .named - .iter() - .map(|f| field_handler(f.ident.as_ref().unwrap(), f)) - .collect::>>()?; - Ok(quote_spanned! { - v.ident.span() => - Self::#variant_name { #(#field_names),* } => { - #(#field_handlers)* - } - }) - } - } -} - -fn for_each_field_in_enum( - e: &DataEnum, - field_handler: impl Fn(&Ident, &Field) -> syn::Result, -) -> syn::Result { - let variants = e - .variants - .iter() - .map(|v| for_each_field_in_enum_variant(v, &field_handler)) - .collect::, _>>()?; - Ok(quote_spanned! { - e.enum_token.span => - match self { - #(#variants)* - } - }) -} - -/// Generate code for each struct of enum field of the given type. -pub(crate) fn for_each_field( - data: &Data, - field_handler: impl Fn(&Ident, &Field) -> syn::Result, -) -> syn::Result { - match data { - Data::Struct(s) => for_each_field_in_struct(s, field_handler), - Data::Enum(e) => for_each_field_in_enum(e, field_handler), - Data::Union(u) => Err(syn::Error::new_spanned( - u.union_token, - "Unions are not supported", - )), - } -} diff --git a/starlark-rust/starlark_derive/src/freeze.rs b/starlark-rust/starlark_derive/src/freeze.rs index cf9ec33d2a3f1..30e548759e83c 100644 --- a/starlark-rust/starlark_derive/src/freeze.rs +++ b/starlark-rust/starlark_derive/src/freeze.rs @@ -17,26 +17,20 @@ use proc_macro2::Ident; use proc_macro2::TokenStream; -use quote::format_ident; use quote::quote; use quote::quote_spanned; use syn::parse::ParseStream; use syn::parse_macro_input; use syn::spanned::Spanned; use syn::Attribute; -use syn::Data; -use syn::DataEnum; -use syn::DataStruct; use syn::DeriveInput; -use syn::Error; -use syn::Fields; use syn::GenericParam; -use syn::Index; use syn::LitStr; use syn::Token; -use syn::Variant; use syn::WherePredicate; +use crate::util::DeriveInputUtil; + struct Input<'a> { input: &'a DeriveInput, } @@ -86,7 +80,10 @@ impl<'a> Input<'a> { output_params.push(quote_spanned! { span=> 'static }); } GenericParam::Const(_) => { - return Err(Error::new_spanned(param, "const generics not supported")); + return Err(syn::Error::new_spanned( + param, + "const generics not supported", + )); } } } @@ -104,11 +101,23 @@ fn derive_freeze_impl(input: DeriveInput) -> syn::Result { let name = &input.input.ident; - let opts = extract_options(&input.input.attrs)?; + let FreezeDeriveOptions { + validator, + bounds, + identity, + } = extract_options(&input.input.attrs)?; + + if let Some(identity) = identity { + return Err(syn::Error::new_spanned( + identity, + "`identity` can only be used on fields", + )); + } + let (impl_params, input_params, output_params) = - input.format_impl_generics(opts.bounds.is_some())?; + input.format_impl_generics(bounds.is_some())?; - let validate_body = match opts.validator { + let validate_body = match validator { Some(validator) => quote_spanned! { span=> #validator(&frozen)?; @@ -116,12 +125,12 @@ fn derive_freeze_impl(input: DeriveInput) -> syn::Result { None => quote_spanned! { span=> }, }; - let bounds_body = match opts.bounds { + let bounds_body = match bounds { Some(bounds) => quote_spanned! { span=> where #bounds }, None => quote_spanned! { span=> }, }; - let body = freeze_impl(name, &input.input.data)?; + let body = freeze_impl(input.input)?; let gen = syn::parse_quote_spanned! { span=> @@ -139,14 +148,19 @@ fn derive_freeze_impl(input: DeriveInput) -> syn::Result { Ok(gen) } +syn::custom_keyword!(identity); + #[derive(Default)] struct FreezeDeriveOptions { + /// `#[freeze(validator = function)]`. validator: Option, + /// `#[freeze(bounds = ...)]`. bounds: Option, + /// `#[freeze(identity)]`. + identity: Option, } /// Parse a #[freeze(validator = function)] annotation. -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_impl_dupe))] // The custom_keyword macro fn extract_options(attrs: &[Attribute]) -> syn::Result { syn::custom_keyword!(validator); syn::custom_keyword!(bounds); @@ -160,19 +174,30 @@ fn extract_options(attrs: &[Attribute]) -> syn::Result { attr.parse_args_with(|input: ParseStream| { loop { - if input.parse::().is_ok() { + if let Some(validator) = input.parse::>()? { if opts.validator.is_some() { - return Err(input.error("`validator` was set twice")); + return Err(syn::Error::new_spanned( + validator, + "`validator` was set twice", + )); } input.parse::()?; opts.validator = Some(input.parse()?); - } else if input.parse::().is_ok() { + } else if let Some(bounds) = input.parse::>()? { if opts.bounds.is_some() { - return Err(input.error("`bounds` was set twice")); + return Err(syn::Error::new_spanned(bounds, "`bounds` was set twice")); } input.parse::()?; let bounds_input = input.parse::()?; opts.bounds = Some(bounds_input.parse()?); + } else if let Some(identity) = input.parse::>()? { + if opts.identity.is_some() { + return Err(syn::Error::new_spanned( + identity, + "`identity` was set twice", + )); + } + opts.identity = Some(identity); } else { return Err(input.lookahead1().error()); } @@ -189,157 +214,47 @@ fn extract_options(attrs: &[Attribute]) -> syn::Result { Ok(opts) } -/// Parse attribute `#[freeze(identity)]`. -/// -/// Currently it fails on any attribute argument other than `id`. -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_impl_dupe))] // The custom_keyword macro -fn is_identity(attrs: &[Attribute]) -> syn::Result { - syn::custom_keyword!(identity); - - for attr in attrs.iter() { - if !attr.path().is_ident("freeze") { - continue; - } - - let ignore = attr.parse_args_with(|input: ParseStream| { - let ignore = input.parse::>()?.is_some(); - Ok(ignore) - })?; - - if !ignore { - continue; - } - - return Ok(true); - } - - Ok(false) -} - -fn freeze_struct(name: &Ident, data: &DataStruct) -> syn::Result { - let span = name.span(); - let res = match data.fields { - Fields::Named(ref fields) => { - let xs = fields - .named - .iter() - .map(|f| { - let name = &f.ident; - let res = if is_identity(&f.attrs)? { - quote_spanned! { span=> - #name: self.#name, - } - } else { - quote_spanned! { span=> - #name: starlark::values::Freeze::freeze(self.#name, freezer)?, - } - }; - - syn::Result::Ok(res) - }) - .collect::, _>>()?; - syn::parse_quote_spanned! { - span=> - #name { - #(#xs)* +fn freeze_impl(derive_input: &DeriveInput) -> syn::Result { + let derive_input = DeriveInputUtil::new(derive_input)?; + derive_input.match_self(|struct_or_enum_variant, fields| { + let fields: Vec = fields + .iter() + .map(|(ident, f)| { + let span = ident.span(); + + let FreezeDeriveOptions { + validator, + bounds, + identity, + } = extract_options(&f.attrs)?; + if let Some(validator) = validator { + return Err(syn::Error::new_spanned( + validator, + "Cannot use `validator` on field", + )); } - } - } - Fields::Unnamed(ref fields) => { - let xs = fields - .unnamed - .iter() - .enumerate() - .map(|(i, f)| { - let i = Index::from(i); - - let res = if is_identity(&f.attrs)? { - quote_spanned! { span=> - self.#i, - } - } else { - quote_spanned! { - span=> - starlark::values::Freeze::freeze(self.#i, freezer)?, - } - }; - - syn::Result::Ok(res) - }) - .collect::, _>>()?; - syn::parse_quote_spanned! { - span=> - #name ( - #(#xs)* - ) - } - } - Fields::Unit => { - syn::parse_quote_spanned! { span=> #name } - } - }; - - Ok(res) -} - -fn freeze_enum_variant(name: &Ident, variant: &Variant) -> syn::Result { - let span = variant.span(); - let variant_name = &variant.ident; - match &variant.fields { - Fields::Unit => Ok(syn::parse_quote_spanned! { - span=> - #name::#variant_name => #name::#variant_name, - }), - Fields::Unnamed(fields) => { - let field_names: Vec<_> = (0..fields.unnamed.len()) - .map(|i| format_ident!("f_{}", i)) - .collect(); - Ok(syn::parse_quote_spanned! { - span=> - #name::#variant_name(#(#field_names),*) => { - #name::#variant_name( - #(starlark::values::Freeze::freeze(#field_names, freezer)?),* - ) + if let Some(bounds) = bounds { + return Err(syn::Error::new_spanned( + bounds, + "Cannot use `bounds` on field", + )); } - }) - } - Fields::Named(field) => { - let field_names: Vec<_> = field.named.iter().map(|f| &f.ident).collect(); - Ok(syn::parse_quote_spanned! { - span=> - #name::#variant_name { #(#field_names),* } => { - #name::#variant_name { - #(#field_names: starlark::values::Freeze::freeze(#field_names, freezer)?,)* - } + + if identity.is_some() { + Ok(syn::parse_quote_spanned! { span=> + #ident + }) + } else { + Ok(syn::parse_quote_spanned! { span=> + starlark::values::Freeze::freeze(#ident, freezer)? + }) } }) - } - } -} - -fn freeze_enum(name: &Ident, data: &DataEnum) -> syn::Result { - let span = name.span(); - let variants = data - .variants - .iter() - .map(|v| freeze_enum_variant(name, v)) - .collect::, _>>()?; - Ok(syn::parse_quote_spanned! { - span=> - match self { - #(#variants)* - } + .collect::>()?; + struct_or_enum_variant.construct(fields) }) } -fn freeze_impl(name: &Ident, data: &Data) -> syn::Result { - match data { - Data::Struct(data) => freeze_struct(name, data), - Data::Enum(data) => freeze_enum(name, data), - Data::Union(_) => Err(Error::new_spanned(name, "Can't derive freeze for unions")), - } -} - pub fn derive_freeze(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); diff --git a/starlark-rust/starlark_derive/src/lib.rs b/starlark-rust/starlark_derive/src/lib.rs index 3852fe3d55f88..14ed5520ac95d 100644 --- a/starlark-rust/starlark_derive/src/lib.rs +++ b/starlark-rust/starlark_derive/src/lib.rs @@ -17,21 +17,16 @@ //! A proc-macro for writing functions in Rust that can be called from Starlark. -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] - #[allow(unused_extern_crates)] // proc_macro is very special extern crate proc_macro; use proc_macro::TokenStream; +mod alloc_value; mod any_lifetime; mod attrs; mod bc; mod coerce; -mod docs; -mod for_each_field; mod freeze; mod module; mod serde; @@ -39,6 +34,8 @@ mod starlark_type_repr; mod starlark_value; mod trace; mod unpack_value; +mod util; +mod v_lifetime; mod visit_span; mod vtable; @@ -89,7 +86,7 @@ mod vtable; /// There are two special arguments, distinguished by their type, which provides access to interpreter state: /// /// * `heap: &'v Heap` gives access to the Starlark heap, for allocating things. -/// * `eval: &mut Evaluator<'v, '_>` gives access to the Starlark evaluator, which can be used to look at interpreter state. +/// * `eval: &mut Evaluator<'v, '_, '_>` gives access to the Starlark evaluator, which can be used to look at interpreter state. /// /// A module can be used to define globals (with `GlobalsBuilder`) or methods on an object (with `MethodsBuilder`). /// In the case of methods, the first argument to each function will be the object itself, typically named `this`. @@ -167,11 +164,23 @@ pub fn derive_starlark_type_repr(input: proc_macro::TokenStream) -> proc_macro:: } /// Derive the `UnpackValue` trait. -#[proc_macro_derive(UnpackValue, attributes(starlark))] +#[proc_macro_derive(UnpackValue)] pub fn derive_unpack_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { unpack_value::derive_unpack_value(input) } +/// Derive the `AllocValue` trait. +#[proc_macro_derive(AllocValue)] +pub fn derive_alloc_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + alloc_value::derive_alloc_value(input) +} + +/// Derive the `AllocFrozenValue` trait. +#[proc_macro_derive(AllocFrozenValue)] +pub fn derive_alloc_frozen_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + alloc_value::derive_alloc_frozen_value(input) +} + /// Derive accessor methods that are designed to be used from {has,get,dir}_attr /// in an `impl StarlarkValue` block. All fields in the struct that are not /// marked with #[starlark(skip)] are exported to Starlark code as attributes. @@ -182,30 +191,6 @@ pub fn derive_starlark_attrs(input: proc_macro::TokenStream) -> proc_macro::Toke attrs::derive_attrs(input) } -/// Generate an accessor function on the provided type that returns its documentation -/// based on `StarlarkValue::get_methods()`. This macro requires that the type implements -/// `starlark::StarlarkValue`. -/// -/// Types that derive `StarlarkDocs` are also registered automatically with the `inventory` crate. -/// To get all types annotated with `StarlarkDocs`, see `starlark::docs::get_registered_starlark_docs()` -/// -/// Note that for statically linked binaries, documentation from all compiled crates in the binary -/// will be included. -/// -/// For dynamically linked binaries, documentation will only be able to retrieved after the crate's -/// library is `dlopen()`ed. -/// -/// `#[starlark_docs(key="value", second_key="second_value",...)]` can be used to insert -/// arbitrary keys and string values into the generated `Docs::custom_attrs` for use -/// by documentation tooling. -/// -/// Types provided by the `starlark` library itself will have the `builtin` key set to either -/// `standard` or `extension` depending on whether the type is part of the Starlark standard. -#[proc_macro_derive(StarlarkDocs, attributes(starlark_docs))] -pub fn derive_starlark_docs(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - docs::derive_docs(input) -} - /// Generate `{has,get,dir}_attr` in the `StarlarkValue` impl block that proxy /// to the ones generated by `derive(StarlarkAttrs)` #[proc_macro] diff --git a/starlark-rust/starlark_derive/src/module.rs b/starlark-rust/starlark_derive/src/module.rs new file mode 100644 index 0000000000000..6f9b9e670ef4a --- /dev/null +++ b/starlark-rust/starlark_derive/src/module.rs @@ -0,0 +1,42 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub(crate) mod param_spec; +pub(crate) mod parse; +mod render; +pub(crate) mod simple_param; +mod typ; +mod util; + +use proc_macro::TokenStream; +use syn::parse_macro_input; +use syn::ItemFn; + +pub(crate) fn starlark_module(attr: TokenStream, input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as ItemFn); + + fn starlark_module_impl(attr: TokenStream, input: ItemFn) -> syn::Result { + assert!(attr.is_empty()); + let x = parse::parse(input)?; + Ok(render::render(x)?.into()) + } + + match starlark_module_impl(attr, input) { + Ok(x) => x, + Err(e) => e.to_compile_error().into(), + } +} diff --git a/starlark-rust/starlark_derive/src/module/mod.rs b/starlark-rust/starlark_derive/src/module/mod.rs deleted file mode 100644 index e726a3182d647..0000000000000 --- a/starlark-rust/starlark_derive/src/module/mod.rs +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -pub(crate) mod parse; -mod render; -mod typ; -mod util; - -use proc_macro::TokenStream; -use syn::parse_macro_input; -use syn::ItemFn; - -pub(crate) fn starlark_module(attr: TokenStream, input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as ItemFn); - - fn starlark_module_impl(attr: TokenStream, input: ItemFn) -> syn::Result { - assert!(attr.is_empty()); - let x = parse::parse(input)?; - Ok(render::render(x)?.into()) - } - - match starlark_module_impl(attr, input) { - Ok(x) => x, - Err(e) => e.to_compile_error().into(), - } -} diff --git a/starlark-rust/starlark_derive/src/module/param_spec.rs b/starlark-rust/starlark_derive/src/module/param_spec.rs new file mode 100644 index 0000000000000..10ac3c7c32929 --- /dev/null +++ b/starlark-rust/starlark_derive/src/module/param_spec.rs @@ -0,0 +1,136 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::module::typ::StarArg; +use crate::module::typ::StarArgPassStyle; + +/// Function parameters validated and split into modes. +pub(crate) struct ParamSpec<'a> { + pub(crate) pos_only: Vec<&'a StarArg>, + pub(crate) pos_or_named: Vec<&'a StarArg>, + pub(crate) args: Option<&'a StarArg>, + pub(crate) named_only: Vec<&'a StarArg>, + pub(crate) kwargs: Option<&'a StarArg>, +} + +impl<'a> ParamSpec<'a> { + pub(crate) fn split(star_args: &'a [StarArg]) -> syn::Result> { + #[derive(PartialEq, Eq, PartialOrd, Ord)] + enum CurrentParamStyle { + PosOnly, + PosOrNamed, + NamedOnly, + NoMore, + } + + let mut seen_optional = false; + + let mut pos_only = Vec::new(); + let mut pos_or_named = Vec::new(); + let mut args = None; + let mut named_only = Vec::new(); + let mut kwargs = None; + + let mut last_param_style = CurrentParamStyle::PosOnly; + for arg in star_args { + match arg.pass_style { + StarArgPassStyle::Args => { + if last_param_style >= CurrentParamStyle::NamedOnly { + return Err(syn::Error::new( + arg.span, + "`args` cannot follow named-only parameters", + )); + } + if args.is_some() { + return Err(syn::Error::new( + arg.span, + "Cannot have more than one `args` parameter (internal error)", + )); + } + args = Some(arg); + last_param_style = CurrentParamStyle::NamedOnly; + } + StarArgPassStyle::Kwargs => { + if last_param_style == CurrentParamStyle::NoMore { + return Err(syn::Error::new( + arg.span, + "Cannot have more than one `kwargs` parameter", + )); + } + if kwargs.is_some() { + return Err(syn::Error::new( + arg.span, + "Cannot have more than one `kwargs` parameter (internal error)", + )); + } + kwargs = Some(arg); + last_param_style = CurrentParamStyle::NoMore; + } + StarArgPassStyle::PosOnly => { + if last_param_style > CurrentParamStyle::PosOnly { + return Err(syn::Error::new( + arg.span, + "Positional-only parameter after non-positional-only", + )); + } + last_param_style = CurrentParamStyle::PosOnly; + pos_only.push(arg); + } + StarArgPassStyle::PosOrNamed => { + if last_param_style > CurrentParamStyle::PosOrNamed { + return Err(syn::Error::new( + arg.span, + "Positional-or-named parameter after named-only", + )); + } + last_param_style = CurrentParamStyle::PosOrNamed; + pos_or_named.push(arg); + } + StarArgPassStyle::NamedOnly => { + if last_param_style > CurrentParamStyle::NamedOnly { + return Err(syn::Error::new( + arg.span, + "Named-only parameter cannot follow kwargs", + )); + } + named_only.push(arg); + last_param_style = CurrentParamStyle::NamedOnly; + } + } + + let optional = arg.default.is_some() || arg.is_option(); + + if last_param_style <= CurrentParamStyle::PosOrNamed { + if seen_optional && !optional { + return Err(syn::Error::new( + arg.span, + "Positional parameter without default after optional parameter", + )); + } + } + + seen_optional |= optional; + } + Ok(ParamSpec { + pos_only, + pos_or_named, + args, + named_only, + kwargs, + }) + } +} diff --git a/starlark-rust/starlark_derive/src/module/parse.rs b/starlark-rust/starlark_derive/src/module/parse.rs new file mode 100644 index 0000000000000..a4d3536293657 --- /dev/null +++ b/starlark-rust/starlark_derive/src/module/parse.rs @@ -0,0 +1,209 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod fun; + +use dupe::Dupe; +use syn::spanned::Spanned; +use syn::Attribute; +use syn::Expr; +use syn::ExprLit; +use syn::FnArg; +use syn::Item; +use syn::ItemConst; +use syn::ItemFn; +use syn::Meta; +use syn::MetaNameValue; +use syn::PatType; +use syn::Stmt; +use syn::Type; +use syn::TypeReference; +use syn::Visibility; + +use crate::module::parse::fun::parse_fun; +use crate::module::typ::StarConst; +use crate::module::typ::StarModule; +use crate::module::typ::StarStmt; +use crate::module::util::is_type_name; + +#[derive(Debug, Copy, Clone, Dupe, PartialEq, Eq)] +pub(crate) enum ModuleKind { + Globals, + Methods, +} + +impl ModuleKind { + pub(crate) fn statics_type_name(self) -> &'static str { + match self { + ModuleKind::Globals => "GlobalsStatic", + ModuleKind::Methods => "MethodsStatic", + } + } +} + +pub(crate) fn parse(mut input: ItemFn) -> syn::Result { + let (module_docstring, attrs) = parse_module_attributes(&input)?; + let visibility = input.vis; + let sig_span = input.sig.span(); + let name = input.sig.ident; + + if input.sig.inputs.len() != 1 { + return Err(syn::Error::new( + sig_span, + "function must have exactly one argument", + )); + } + let arg = input.sig.inputs.pop().unwrap(); + let arg_span = arg.span(); + + let (ty, module_kind) = match arg.into_value() { + FnArg::Typed(PatType { ty, .. }) if is_mut_globals_builder(&ty) => { + (ty, ModuleKind::Globals) + } + FnArg::Typed(PatType { ty, .. }) if is_mut_methods_builder(&ty) => { + (ty, ModuleKind::Methods) + } + _ => { + return Err(syn::Error::new( + arg_span, + "Expected a mutable globals or methods builder", + )); + } + }; + Ok(StarModule { + module_kind, + visibility, + globals_builder: *ty, + name, + attrs, + docstring: module_docstring, + stmts: input + .block + .stmts + .into_iter() + .map(|stmt| parse_stmt(stmt, module_kind)) + .collect::>()?, + }) +} + +fn is_attribute_docstring(x: &Attribute) -> syn::Result> { + if x.path().is_ident("doc") { + if let Meta::NameValue(MetaNameValue { + value: + Expr::Lit(ExprLit { + lit: syn::Lit::Str(s), + .. + }), + .. + }) = &x.meta + { + let ds = s.value(); + if ds.starts_with("# ") || ds.starts_with("## ") { + return Err(syn::Error::new( + x.span(), + "Docstrings may not contain H1 or H2 headers (`#` or `##`) as this leads to \ + poor generated documentation. Use at least H3 (`###`) instead.", + )); + } + + return Ok(Some(ds)); + } + } + Ok(None) +} + +/// Return (docstring, other attributes) +fn parse_module_attributes(input: &ItemFn) -> syn::Result<(Option, Vec)> { + let mut doc_attrs = Vec::new(); + let mut attrs = Vec::new(); + for attr in &input.attrs { + if let Some(ds) = is_attribute_docstring(attr)? { + doc_attrs.push(ds); + } else { + attrs.push(attr.clone()); + } + } + let docs = if doc_attrs.is_empty() { + None + } else { + Some(doc_attrs.join("\n")) + }; + Ok((docs, attrs)) +} + +fn parse_stmt(stmt: Stmt, module_kind: ModuleKind) -> syn::Result { + match stmt { + Stmt::Item(Item::Fn(x)) => parse_fun(x, module_kind), + Stmt::Item(Item::Const(x)) => Ok(StarStmt::Const(parse_const(x)?)), + s => Err(syn::Error::new( + s.span(), + "Can only put constants and functions inside a #[starlark_module]", + )), + } +} + +fn parse_visibility(vis: &Visibility) -> syn::Result<()> { + match vis { + Visibility::Inherited => Ok(()), + _ => Err(syn::Error::new( + vis.span(), + "Visibility modifiers are not allowed inside a `#[starlark_module]`", + )), + } +} + +fn parse_const(x: ItemConst) -> syn::Result { + parse_visibility(&x.vis)?; + + Ok(StarConst { + name: x.ident, + ty: *x.ty, + value: *x.expr, + }) +} + +fn is_mut_something(x: &Type, smth: &str) -> bool { + match x { + Type::Reference(TypeReference { + mutability: Some(_), + elem: x, + .. + }) => is_type_name(x, smth), + _ => false, + } +} + +pub(crate) fn is_ref_something(x: &Type, smth: &str) -> bool { + match x { + Type::Reference(TypeReference { + mutability: None, + elem: x, + .. + }) => is_type_name(x, smth), + _ => false, + } +} + +// Is the type `&mut GlobalsBuilder` +fn is_mut_globals_builder(x: &Type) -> bool { + is_mut_something(x, "GlobalsBuilder") +} + +// Is the type `&mut MethodsBuilder` +fn is_mut_methods_builder(x: &Type) -> bool { + is_mut_something(x, "MethodsBuilder") +} diff --git a/starlark-rust/starlark_derive/src/module/parse/fun.rs b/starlark-rust/starlark_derive/src/module/parse/fun.rs index 1339b868bc807..83334f58a7dc6 100644 --- a/starlark-rust/starlark_derive/src/module/parse/fun.rs +++ b/starlark-rust/starlark_derive/src/module/parse/fun.rs @@ -24,12 +24,9 @@ use syn::Attribute; use syn::Expr; use syn::FnArg; use syn::GenericArgument; -use syn::GenericParam; use syn::Generics; use syn::ItemFn; use syn::Lifetime; -use syn::Pat; -use syn::PatType; use syn::PathArguments; use syn::ReturnType; use syn::Token; @@ -40,14 +37,19 @@ use crate::module::parse::is_mut_something; use crate::module::parse::is_ref_something; use crate::module::parse::parse_visibility; use crate::module::parse::ModuleKind; +use crate::module::simple_param::SimpleParam; +use crate::module::typ::RegularParams; use crate::module::typ::SpecialParam; use crate::module::typ::StarArg; use crate::module::typ::StarArgPassStyle; use crate::module::typ::StarArgSource; +use crate::module::typ::StarArguments; use crate::module::typ::StarAttr; use crate::module::typ::StarFun; use crate::module::typ::StarFunSource; use crate::module::typ::StarStmt; +use crate::module::typ::ThisParam; +use crate::util::GenericsUtil; #[derive(Default)] struct FnAttrs { @@ -69,7 +71,20 @@ struct FnParamAttrs { named_only: bool, args: bool, kwargs: bool, - unused_attrs: Vec, +} + +impl FnParamAttrs { + fn is_empty(&self) -> bool { + let FnParamAttrs { + default, + this, + pos_only, + named_only, + args, + kwargs, + } = self; + default.is_none() && !*this && !*pos_only && !*named_only && !*args && !*kwargs + } } /// Parse `#[starlark(...)]` fn param attribute. @@ -131,16 +146,31 @@ fn parse_starlark_fn_param_attr( } /// Parse fn param attributes: parse `#[starlark(...)]` and take others as is. -fn parse_fn_param_attrs(attrs: Vec) -> syn::Result { +fn parse_fn_param_attrs(attrs: SimpleParam) -> syn::Result<(FnParamAttrs, SimpleParam)> { + let SimpleParam { + attrs, + mutability, + ident, + ty, + } = attrs; let mut param_attrs = FnParamAttrs::default(); + let mut other_attrs = Vec::new(); for attr in attrs { if attr.path().is_ident("starlark") { parse_starlark_fn_param_attr(&attr, &mut param_attrs)?; } else { - param_attrs.unused_attrs.push(attr); + other_attrs.push(attr); } } - Ok(param_attrs) + Ok(( + param_attrs, + SimpleParam { + attrs: other_attrs, + mutability, + ident, + ty, + }, + )) } /// Parse `#[starlark(...)]` fn attribute. @@ -199,7 +229,7 @@ fn parse_fn_attrs(span: Span, xs: Vec) -> syn::Result { for x in xs { if x.path().is_ident("starlark") { parse_starlark_fn_attr(&x, &mut res)?; - } else if let Some(ds) = is_attribute_docstring(&x) { + } else if let Some(ds) = is_attribute_docstring(&x)? { match &mut res.docstring { None => res.docstring = Some(ds), Some(docstring) => { @@ -220,8 +250,8 @@ fn parse_fn_attrs(span: Span, xs: Vec) -> syn::Result { Ok(res) } -/// Check if given type is `anyhow::Result` -fn is_anyhow_result(t: &Type) -> bool { +/// Check if given type is `anyhow::Result` or `starlark::Result` +fn is_anyhow_or_starlark_result(t: &Type) -> bool { let path = match t { Type::Path(p) => p, _ => return false, @@ -232,7 +262,7 @@ fn is_anyhow_result(t: &Type) -> bool { let mut segments = path.path.segments.iter(); match segments.next() { None => return false, - Some(s) if s.ident != "anyhow" => return false, + Some(s) if s.ident != "anyhow" && s.ident != "starlark" => return false, _ => {} }; let result = match segments.next() { @@ -252,10 +282,7 @@ fn is_anyhow_result(t: &Type) -> bool { None => return false, Some(t) => t, }; - match t { - GenericArgument::Type(_) => true, - _ => false, - } + matches!(t, GenericArgument::Type(_)) } // Add a function to the `GlobalsModule` named `globals_builder`. @@ -278,14 +305,16 @@ pub(crate) fn parse_fun(func: ItemFn, module_kind: ModuleKind) -> syn::Result = None; for (i, arg) in func.sig.inputs.into_iter().enumerate() { let span = arg.span(); - let parsed_arg = parse_arg(arg, has_v, seen_star_args, module_kind, i)?; + let parsed_arg = parse_arg(arg, has_v, seen_star, module_kind, i)?; match parsed_arg { StarArgOrSpecial::Heap(special) => { if heap.is_some() { @@ -300,10 +329,46 @@ pub(crate) fn parse_fun(func: ItemFn, module_kind: ModuleKind) -> syn::Result { - if arg.pass_style == StarArgPassStyle::Args { - seen_star_args = true; + if arg.pass_style == StarArgPassStyle::Args + || arg.pass_style == StarArgPassStyle::NamedOnly + { + seen_star = true; + } + let args = args.get_or_insert_with(|| RegularParams::Unpack(Vec::new())); + match args { + RegularParams::Arguments(_) => { + return Err(syn::Error::new( + span, + "Cannot mix `&Arguments` and regular params", + )); + } + RegularParams::Unpack(args) => args.push(arg), } - args.push(arg); + } + StarArgOrSpecial::Arguments(arguments) => match args { + None => args = Some(RegularParams::Arguments(arguments)), + Some(RegularParams::Unpack(_)) => { + return Err(syn::Error::new( + span, + "Cannot mix `&Arguments` and regular params", + )); + } + Some(RegularParams::Arguments(_)) => { + return Err(syn::Error::new(span, "Duplicate `&Arguments` parameter")); + } + }, + StarArgOrSpecial::This(this_param) => { + let expecting_this = module_kind == ModuleKind::Methods && i == 0; + if !expecting_this { + return Err(syn::Error::new( + span, + "Receiver parameter can be only first", + )); + } + if this.is_some() { + return Err(syn::Error::new(span, "Repeated `this` parameter")); + } + this = Some(this_param); } } } @@ -323,25 +388,18 @@ pub(crate) fn parse_fun(func: ItemFn, module_kind: ModuleKind) -> syn::Result syn::Result syn::Result syn::Result StarFunSource::Arguments, + RegularParams::Unpack(args) => resolve_args(args)?, + }; let fun = StarFun { name: func.sig.ident, as_type, attrs, + this, args, heap, eval, @@ -397,50 +460,38 @@ pub(crate) fn parse_fun(func: ItemFn, module_kind: ModuleKind) -> syn::Result syn::Result { - if args.len() == 1 && args[0].pass_style == StarArgPassStyle::Arguments { - args[0].source = StarArgSource::Parameters; - Ok(StarFunSource::Arguments) - } else if args.len() == 2 - && args[0].pass_style == StarArgPassStyle::This - && args[1].pass_style == StarArgPassStyle::Arguments - { - args[0].source = StarArgSource::This; - args[1].source = StarArgSource::Parameters; - Ok(StarFunSource::ThisArguments) + let use_arguments = args.iter().any(|x| x.requires_signature()); + if use_arguments { + let mut count = 0; + for x in args.iter_mut() { + x.source = StarArgSource::Argument(count); + count += 1; + } + Ok(StarFunSource::Signature { count }) } else { - let use_arguments = args - .iter() - .filter(|x| x.pass_style != StarArgPassStyle::This) - .any(|x| x.requires_signature()); - if use_arguments { - let mut count = 0; - for x in args.iter_mut() { - if x.pass_style == StarArgPassStyle::This { - x.source = StarArgSource::This; - } else { - x.source = StarArgSource::Argument(count); - count += 1; - } - } - Ok(StarFunSource::Signature { count }) - } else { - let mut required = 0; - let mut optional = 0; - for x in args.iter_mut() { - if x.pass_style == StarArgPassStyle::This { - x.source = StarArgSource::This; - continue; - } - if optional == 0 && x.default.is_none() && !x.is_option() { - x.source = StarArgSource::Required(required); - required += 1; - } else { - x.source = StarArgSource::Optional(optional); - optional += 1; + let mut required = 0; + let mut optional = 0; + let mut kwargs = false; + for x in args.iter_mut() { + if x.pass_style == StarArgPassStyle::Kwargs { + if kwargs { + return Err(syn::Error::new(x.span, "Duplicate `**kwargs` parameter")); } + x.source = StarArgSource::Kwargs; + kwargs = true; + } else if optional == 0 && x.default.is_none() && !x.is_option() { + x.source = StarArgSource::Required(required); + required += 1; + } else { + x.source = StarArgSource::Optional(optional); + optional += 1; } - Ok(StarFunSource::Positional { required, optional }) } + Ok(StarFunSource::Positional { + required, + optional, + kwargs, + }) } } @@ -463,7 +514,7 @@ fn check_lifetimes_in_type(ty: &Type, has_v: bool) -> syn::Result<()> { #[allow(clippy::collapsible_if)] fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) { if self.result.is_ok() { - if lifetime.ident != "_" { + if lifetime.ident != "_" && lifetime.ident != "static" { if lifetime.ident != "v" { self.result = Err(syn::Error::new( lifetime.span(), @@ -492,58 +543,48 @@ fn parse_fn_output(return_type: &ReturnType, span: Span, has_v: bool) -> syn::Re check_lifetimes_in_return_type(return_type, has_v)?; match return_type { ReturnType::Default => Err(syn::Error::new(span, "Function must have a return type")), - ReturnType::Type(_, x) => match is_anyhow_result(x) { + ReturnType::Type(_, x) => match is_anyhow_or_starlark_result(x) { true => Ok((**x).clone()), false => Err(syn::Error::new( return_type.span(), - "Function return type must be precisely `anyhow::Result<...>`", + "Function return type must be either `anyhow::Result<...>` or `starlark::Result<...>`", )), }, } } fn parse_fn_generics(generics: &Generics) -> syn::Result { + let generics = GenericsUtil::new(generics); let mut seen_v = false; - for param in &generics.params { - match param { - GenericParam::Type(..) => { - return Err(syn::Error::new( - param.span(), - "Function cannot have type parameters", - )); - } - GenericParam::Const(..) => { - return Err(syn::Error::new( - param.span(), - "Function cannot have const parameters", - )); - } - GenericParam::Lifetime(lifetime) => { - if lifetime.lifetime.ident != "v" { - return Err(syn::Error::new( - lifetime.lifetime.span(), - "Function cannot have lifetime parameters other than `v", - )); - } - if !lifetime.bounds.is_empty() { - return Err(syn::Error::new( - lifetime.span(), - "Function lifetime params must not have bounds", - )); - } - if seen_v { - return Err(syn::Error::new(lifetime.span(), "Duplicate `v parameters")); - } - seen_v = true; - } + for lifetime in generics.assert_only_lifetime_params()? { + if lifetime.lifetime.ident != "v" { + return Err(syn::Error::new( + lifetime.lifetime.span(), + "Function cannot have lifetime parameters other than `v", + )); + } + if !lifetime.bounds.is_empty() { + return Err(syn::Error::new( + lifetime.span(), + "Function lifetime params must not have bounds", + )); } + if seen_v { + return Err(syn::Error::new(lifetime.span(), "Duplicate `v parameters")); + } + seen_v = true; } Ok(seen_v) } #[allow(clippy::large_enum_variant)] enum StarArgOrSpecial { + /// Receiver. + This(ThisParam), + /// Function parameters. StarArg(StarArg), + /// `&Arguments`. + Arguments(StarArguments), /// `&mut Evaluator`. Eval(SpecialParam), /// `&Heap`. @@ -551,11 +592,17 @@ enum StarArgOrSpecial { } /// Function parameter is `eval: &mut Evaluator`. -fn is_eval(ident: &Ident, ty: &Type) -> syn::Result> { - if is_mut_something(ty, "Evaluator") { +fn is_eval(param: &SimpleParam, attrs: &FnParamAttrs) -> syn::Result> { + if is_mut_something(¶m.ty, "Evaluator") { + if !attrs.is_empty() { + return Err(syn::Error::new_spanned( + ¶m.ident, + "`&mut Evaluator` parameter cannot have attributes", + )); + } + Ok(Some(SpecialParam { - ident: ident.clone(), - ty: ty.clone(), + param: param.clone(), })) } else { Ok(None) @@ -563,11 +610,70 @@ fn is_eval(ident: &Ident, ty: &Type) -> syn::Result> { } /// Function parameter is `heap: &Heap`. -fn is_heap(ident: &Ident, ty: &Type) -> syn::Result> { - if is_ref_something(ty, "Heap") { +fn is_heap(param: &SimpleParam, attrs: &FnParamAttrs) -> syn::Result> { + if is_ref_something(¶m.ty, "Heap") { + if !attrs.is_empty() { + return Err(syn::Error::new_spanned( + ¶m.ident, + "`&Heap` parameter cannot have attributes", + )); + } Ok(Some(SpecialParam { - ident: ident.clone(), - ty: ty.clone(), + param: param.clone(), + })) + } else { + Ok(None) + } +} + +fn parse_this_param(param: &SimpleParam, attrs: &FnParamAttrs) -> syn::Result { + let FnParamAttrs { + default, + this, + pos_only, + named_only, + args, + kwargs, + } = attrs; + + if !this && ¶m.ident != "this" { + return Err(syn::Error::new_spanned( + param, + "Receiver parameter must be named `this` \ + or have `#[starlark(this)]` annotation", + )); + } + + if default.is_some() || *pos_only || *named_only || *args || *kwargs { + return Err(syn::Error::new_spanned( + param, + "Attributes are not compatible with receiver parameter", + )); + } + + Ok(ThisParam { + param: param.clone(), + }) +} + +fn is_arguments(param: &SimpleParam, attrs: &FnParamAttrs) -> syn::Result> { + if is_ref_something(¶m.ty, "Arguments") { + let FnParamAttrs { + default, + this, + pos_only, + named_only, + args, + kwargs, + } = attrs; + if default.is_some() || *this || *pos_only || *named_only || *args || *kwargs { + return Err(syn::Error::new_spanned( + param, + "Attributes are not compatible with `&Arguments` parameter", + )); + } + Ok(Some(StarArguments { + param: param.clone(), })) } else { Ok(None) @@ -578,127 +684,92 @@ fn is_heap(ident: &Ident, ty: &Type) -> syn::Result> { fn parse_arg( x: FnArg, has_v: bool, - seen_star_args: bool, + seen_star: bool, module_kind: ModuleKind, param_index: usize, ) -> syn::Result { let this = module_kind == ModuleKind::Methods && param_index == 0; let span = x.span(); - match x { - FnArg::Typed(PatType { attrs, pat, ty, .. }) => { - let ident = if let Pat::Ident(ident) = *pat { - ident - } else { - return Err(syn::Error::new( - pat.span(), - "Function parameter pattern must be identifier", - )); - }; - if let Some(heap) = is_heap(&ident.ident, &ty)? { - if this { - return Err(syn::Error::new( - span, - "Receiver parameter cannot be `&Heap`", - )); - } - return Ok(StarArgOrSpecial::Heap(heap)); - } else if let Some(eval) = is_eval(&ident.ident, &ty)? { - if this { - return Err(syn::Error::new( - span, - "Receiver parameter cannot be `&mut Evaluator`", - )); - } - return Ok(StarArgOrSpecial::Eval(eval)); - } - if ident.subpat.is_some() { - return Err(syn::Error::new( - ident.span(), - "Function arguments cannot use patterns", - )); - } - - if ident.by_ref.is_some() { - return Err(syn::Error::new( - ident.span(), - "Function arguments cannot have `ref` modifier", - )); - } + let param = SimpleParam::from_fn_arg(x)?; - check_lifetimes_in_type(&ty, has_v)?; - let param_attrs = parse_fn_param_attrs(attrs)?; + check_lifetimes_in_type(¶m.ty, has_v)?; + let (param_attrs, param) = parse_fn_param_attrs(param)?; - if this { - if !param_attrs.this && ident.ident != "this" { - return Err(syn::Error::new( - span, - "Receiver parameter must be named `this` \ - or have `#[starlark(this)]` annotation", - )); - } - } else { - if param_attrs.this { - return Err(syn::Error::new( - span, - "Receiver parameter can be only first", - )); - } - } + if let Some(heap) = is_heap(¶m, ¶m_attrs)? { + if this { + return Err(syn::Error::new( + span, + "Receiver parameter cannot be `&Heap`", + )); + } + return Ok(StarArgOrSpecial::Heap(heap)); + } else if let Some(eval) = is_eval(¶m, ¶m_attrs)? { + if this { + return Err(syn::Error::new( + span, + "Receiver parameter cannot be `&mut Evaluator`", + )); + } + return Ok(StarArgOrSpecial::Eval(eval)); + } - let arguments = is_ref_something(&ty, "Arguments"); - - let pass_style = match ( - this, - param_attrs.args, - param_attrs.kwargs, - seen_star_args, - param_attrs.pos_only, - param_attrs.named_only, - arguments, - ) { - (true, _, _, _, _, _, false) => StarArgPassStyle::This, - (false, true, _, _, _, _, false) => StarArgPassStyle::Args, - (false, _, true, _, _, _, false) => StarArgPassStyle::Kwargs, - (false, _, _, true, true, _, false) => { - return Err(syn::Error::new( - span, - "Positional-only arguments cannot follow *args", - )); - } - (false, false, false, true, false, _, false) => StarArgPassStyle::NamedOnly, - (false, false, false, false, false, false, false) => StarArgPassStyle::PosOrNamed, - (false, false, false, false, true, false, false) => StarArgPassStyle::PosOnly, - (false, false, false, false, false, true, false) => StarArgPassStyle::NamedOnly, - (false, false, false, false, true, true, false) => { - return Err(syn::Error::new( - span, - "Function parameter cannot be both positional-only and named-only", - )); - } - (false, false, false, _, false, false, true) => StarArgPassStyle::Arguments, - (_, _, _, _, _, _, true) => { - return Err(syn::Error::new( - span, - "`&Arguments` parameter type is incompatible with annotations", - )); - } - }; - Ok(StarArgOrSpecial::StarArg(StarArg { + if this { + return Ok(StarArgOrSpecial::This(parse_this_param( + ¶m, + ¶m_attrs, + )?)); + } else { + if param_attrs.this { + return Err(syn::Error::new( span, - attrs: param_attrs.unused_attrs, - mutable: ident.mutability, - name: ident.ident, - pass_style, - ty: *ty, - default: param_attrs.default, - source: StarArgSource::Unknown, - })) + "Receiver parameter can be only first", + )); } - FnArg::Receiver(..) => Err(syn::Error::new( - span, - "Function cannot have `self` parameters", - )), } + + if let Some(arguments) = is_arguments(¶m, ¶m_attrs)? { + return Ok(StarArgOrSpecial::Arguments(arguments)); + } + + let pass_style = match ( + param_attrs.args, + param_attrs.kwargs, + seen_star, + param_attrs.pos_only, + param_attrs.named_only, + ) { + (true, _, _, _, _) => StarArgPassStyle::Args, + (_, true, _, _, _) => StarArgPassStyle::Kwargs, + (_, _, true, true, _) => { + return Err(syn::Error::new( + span, + "Positional-only arguments cannot follow *args", + )); + } + (_, _, _, true, true) => { + return Err(syn::Error::new( + span, + "Function parameter cannot be both positional-only and named-only", + )); + } + (false, false, true, false, _) => StarArgPassStyle::NamedOnly, + // TODO(nga): currently, without `#[starlark(require = named)]` + // and without `#[starlark(require = pos)]`, parameter is positional-or-named. + // We want to change that: either make it positional by default, + // or require explicit `#[starlark(pos, named)]`. + // Discussion there: + // https://fb.workplace.com/groups/1267349253953900/posts/1299495914072567 + (false, false, false, false, false) => StarArgPassStyle::PosOrNamed, + (false, false, false, true, false) => StarArgPassStyle::PosOnly, + (false, false, false, false, true) => StarArgPassStyle::NamedOnly, + }; + Ok(StarArgOrSpecial::StarArg(StarArg { + span, + param, + pass_style, + default: param_attrs.default, + source: StarArgSource::Unknown, + })) } diff --git a/starlark-rust/starlark_derive/src/module/parse/mod.rs b/starlark-rust/starlark_derive/src/module/parse/mod.rs deleted file mode 100644 index 79a74646b1587..0000000000000 --- a/starlark-rust/starlark_derive/src/module/parse/mod.rs +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod fun; - -use dupe::Dupe; -use syn::spanned::Spanned; -use syn::Attribute; -use syn::Expr; -use syn::ExprLit; -use syn::FnArg; -use syn::Item; -use syn::ItemConst; -use syn::ItemFn; -use syn::Meta; -use syn::MetaNameValue; -use syn::PatType; -use syn::Stmt; -use syn::Type; -use syn::TypeReference; -use syn::Visibility; - -use crate::module::parse::fun::parse_fun; -use crate::module::typ::StarConst; -use crate::module::typ::StarModule; -use crate::module::typ::StarStmt; -use crate::module::util::is_type_name; - -#[derive(Debug, Copy, Clone, Dupe, PartialEq, Eq)] -pub(crate) enum ModuleKind { - Globals, - Methods, -} - -impl ModuleKind { - pub(crate) fn statics_type_name(self) -> &'static str { - match self { - ModuleKind::Globals => "GlobalsStatic", - ModuleKind::Methods => "MethodsStatic", - } - } -} - -pub(crate) fn parse(mut input: ItemFn) -> syn::Result { - let (module_docstring, attrs) = parse_module_attributes(&input); - let visibility = input.vis; - let sig_span = input.sig.span(); - let name = input.sig.ident; - - if input.sig.inputs.len() != 1 { - return Err(syn::Error::new( - sig_span, - "function must have exactly one argument", - )); - } - let arg = input.sig.inputs.pop().unwrap(); - let arg_span = arg.span(); - - let (ty, module_kind) = match arg.into_value() { - FnArg::Typed(PatType { ty, .. }) if is_mut_globals_builder(&ty) => { - (ty, ModuleKind::Globals) - } - FnArg::Typed(PatType { ty, .. }) if is_mut_methods_builder(&ty) => { - (ty, ModuleKind::Methods) - } - _ => { - return Err(syn::Error::new( - arg_span, - "Expected a mutable globals or methods builder", - )); - } - }; - Ok(StarModule { - module_kind, - visibility, - globals_builder: *ty, - name, - attrs, - docstring: module_docstring, - stmts: input - .block - .stmts - .into_iter() - .map(|stmt| parse_stmt(stmt, module_kind)) - .collect::>()?, - }) -} - -fn is_attribute_docstring(x: &Attribute) -> Option { - if x.path().is_ident("doc") { - if let Meta::NameValue(MetaNameValue { - value: - Expr::Lit(ExprLit { - lit: syn::Lit::Str(s), - .. - }), - .. - }) = &x.meta - { - return Some(s.value()); - } - } - None -} - -/// Return (docstring, other attributes) -fn parse_module_attributes(input: &ItemFn) -> (Option, Vec) { - let mut doc_attrs = Vec::new(); - let mut attrs = Vec::new(); - for attr in &input.attrs { - if let Some(ds) = is_attribute_docstring(attr) { - doc_attrs.push(ds); - } else { - attrs.push(attr.clone()); - } - } - let docs = if doc_attrs.is_empty() { - None - } else { - Some(doc_attrs.join("\n")) - }; - (docs, attrs) -} - -fn parse_stmt(stmt: Stmt, module_kind: ModuleKind) -> syn::Result { - match stmt { - Stmt::Item(Item::Fn(x)) => parse_fun(x, module_kind), - Stmt::Item(Item::Const(x)) => Ok(StarStmt::Const(parse_const(x)?)), - s => Err(syn::Error::new( - s.span(), - "Can only put constants and functions inside a #[starlark_module]", - )), - } -} - -fn parse_visibility(vis: &Visibility) -> syn::Result<()> { - match vis { - Visibility::Inherited => Ok(()), - _ => Err(syn::Error::new( - vis.span(), - "Visibility modifiers are not allowed inside a `#[starlark_module]`", - )), - } -} - -fn parse_const(x: ItemConst) -> syn::Result { - parse_visibility(&x.vis)?; - - Ok(StarConst { - name: x.ident, - ty: *x.ty, - value: *x.expr, - }) -} - -fn is_mut_something(x: &Type, smth: &str) -> bool { - match x { - Type::Reference(TypeReference { - mutability: Some(_), - elem: x, - .. - }) => is_type_name(x, smth), - _ => false, - } -} - -pub(crate) fn is_ref_something(x: &Type, smth: &str) -> bool { - match x { - Type::Reference(TypeReference { - mutability: None, - elem: x, - .. - }) => is_type_name(x, smth), - _ => false, - } -} - -// Is the type `&mut GlobalsBuilder` -fn is_mut_globals_builder(x: &Type) -> bool { - is_mut_something(x, "GlobalsBuilder") -} - -// Is the type `&mut MethodsBuilder` -fn is_mut_methods_builder(x: &Type) -> bool { - is_mut_something(x, "MethodsBuilder") -} diff --git a/starlark-rust/starlark_derive/src/module/render.rs b/starlark-rust/starlark_derive/src/module/render.rs new file mode 100644 index 0000000000000..eb51ba63341b0 --- /dev/null +++ b/starlark-rust/starlark_derive/src/module/render.rs @@ -0,0 +1,244 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod fun; + +use std::collections::HashSet; + +use proc_macro2::TokenStream; +use quote::format_ident; +use quote::quote; +use quote::ToTokens; + +use crate::module::render::fun::render_fun; +use crate::module::render::fun::render_none; +use crate::module::render::fun::render_some; +use crate::module::simple_param::SimpleParam; +use crate::module::typ::SpecialParam; +use crate::module::typ::StarAttr; +use crate::module::typ::StarConst; +use crate::module::typ::StarFun; +use crate::module::typ::StarModule; +use crate::module::typ::StarStmt; +use crate::module::util::ident_string; + +pub(crate) fn render(x: StarModule) -> syn::Result { + Ok(render_impl(x)?.to_token_stream()) +} + +fn render_impl(x: StarModule) -> syn::Result { + let StarModule { + name, + globals_builder, + visibility, + attrs, + docstring, + stmts, + module_kind, + } = x; + let statics = format_ident!("{}", module_kind.statics_type_name()); + let stmts: Vec<_> = stmts + .into_iter() + .map(render_stmt) + .collect::>()?; + let set_docstring = docstring.map(|ds| quote!(globals_builder.set_docstring(#ds);)); + Ok(syn::parse_quote! { + #( #attrs )* + #visibility fn #name(globals_builder: #globals_builder) { + fn build(globals_builder: #globals_builder) { + #set_docstring + #( #stmts )* + // Mute warning if stmts is empty. + let _ = globals_builder; + } + static RES: starlark::environment::#statics = starlark::environment::#statics::new(); + RES.populate(build, globals_builder); + } + }) +} + +fn render_stmt(x: StarStmt) -> syn::Result { + match x { + StarStmt::Const(x) => Ok(render_const(x)), + StarStmt::Attr(x) => Ok(render_attr(x)), + StarStmt::Fun(x) => render_fun(x), + } +} + +fn render_const(x: StarConst) -> syn::Stmt { + let StarConst { name, ty, value } = x; + let name = ident_string(&name); + syn::parse_quote! { + globals_builder.set::<#ty>(#name, #value); + } +} + +fn render_attr(x: StarAttr) -> syn::Stmt { + let StarAttr { + name, + this, + heap, + attrs, + return_type, + speculative_exec_safe, + body, + docstring, + } = x; + let name_str = ident_string(&name); + let name_inner = syn::Ident::new(&format!("{}__inner", name_str), name.span()); + let docstring: syn::Expr = match docstring { + Some(d) => render_some(syn::parse_quote! { #d.to_owned() }), + None => render_none(), + }; + + let let_heap = if let Some(SpecialParam { + param: SimpleParam { ident, ty, .. }, + }) = heap + { + Some(quote! { let #ident: #ty = __heap; }) + } else { + None + }; + + let this_value: syn::Ident = syn::parse_quote! { s_this_value }; + + let unpack = this.render_prepare(&this.param.ident, &this_value); + + let inner: syn::ItemFn = syn::parse_quote! { + #( #attrs )* + #[allow(non_snake_case)] // Starlark doesn't have this convention + fn #name_inner<'v>( + #this_value: starlark::values::Value<'v>, + #[allow(unused_variables)] + __heap: &'v starlark::values::Heap, + ) -> #return_type { + #[allow(unused_variables)] + #unpack + #let_heap + #body + } + }; + + let outer: syn::ItemFn = syn::parse_quote! { + #[allow(non_snake_case)] + fn #name<'v>( + #[allow(unused_variables)] + this: starlark::values::Value<'v>, + heap: &'v starlark::values::Heap, + ) -> starlark::Result> { + Ok(heap.alloc(#name_inner(this, heap)?)) + } + }; + + syn::parse_quote! { + { + #inner + #outer + + globals_builder.set_attribute_fn( + #name_str, + #speculative_exec_safe, + #docstring, + starlark::values::type_repr::type_repr_from_attr_impl(#name_inner), + #name + ); + } + } +} + +/// Get the lifetimes that are mentioned in a given type and its nested generics. +fn get_lifetimes_inner<'a>(ret: &mut HashSet<&'a syn::Lifetime>, typ: &'a syn::Type) { + match typ { + syn::Type::Path(path) => { + if let Some(segment) = path.path.segments.last() { + match &segment.arguments { + syn::PathArguments::None => {} + syn::PathArguments::AngleBracketed(args) => { + for arg in &args.args { + match arg { + syn::GenericArgument::Lifetime(l) => { + ret.insert(l); + } + syn::GenericArgument::Type(t) => get_lifetimes_inner(ret, t), + _ => {} + }; + } + } + syn::PathArguments::Parenthesized(args) => { + for t in &args.inputs { + get_lifetimes_inner(ret, t); + } + match &args.output { + syn::ReturnType::Default => {} + syn::ReturnType::Type(_, t) => get_lifetimes_inner(ret, t), + }; + } + }; + } + } + syn::Type::Group(g) => get_lifetimes_inner(ret, &g.elem), + syn::Type::Paren(p) => get_lifetimes_inner(ret, &p.elem), + syn::Type::Ptr(p) => get_lifetimes_inner(ret, &p.elem), + syn::Type::Reference(r) => { + if let Some(l) = &r.lifetime { + ret.insert(l); + }; + get_lifetimes_inner(ret, &r.elem); + } + syn::Type::Tuple(t) => { + for t in &t.elems { + get_lifetimes_inner(ret, t); + } + } + _ => {} + }; +} + +/// Get the lifetime specifications to use with a function based on the lifetimes mentioned in `typ`. +/// +/// e.g. `i32` would return ``, `Vec<(&'a str, &'b str)>` would return `<'a, 'b>` +fn get_lifetimes(typ: &syn::Type) -> TokenStream { + let mut ret = HashSet::new(); + get_lifetimes_inner(&mut ret, typ); + if ret.is_empty() { + TokenStream::new() + } else { + let mut ret: Vec<_> = ret.into_iter().filter(|l| l.ident != "_").collect(); + ret.sort_by(|l, r| l.ident.cmp(&r.ident)); + quote!(<#(#ret),*>) + } +} + +pub(crate) fn render_starlark_type(typ: &syn::Type) -> syn::Expr { + let lifetimes = get_lifetimes(typ); + syn::parse_quote! { + { + #[allow(clippy::extra_unused_lifetimes)] + fn get_type_string #lifetimes() -> starlark::typing::Ty { + <#typ as starlark::values::type_repr::StarlarkTypeRepr>::starlark_type_repr() + } + get_type_string() + } + } +} + +pub(crate) fn render_starlark_return_type(fun: &StarFun) -> syn::Expr { + let struct_name = fun.struct_name(); + syn::parse_quote! { + #struct_name::return_type_starlark_type_repr() + } +} diff --git a/starlark-rust/starlark_derive/src/module/render/fun.rs b/starlark-rust/starlark_derive/src/module/render/fun.rs index f6b745ce9eaa7..79960945d1f2a 100644 --- a/starlark-rust/starlark_derive/src/module/render/fun.rs +++ b/starlark-rust/starlark_derive/src/module/render/fun.rs @@ -15,21 +15,23 @@ * limitations under the License. */ +use std::iter; + use proc_macro2::Ident; use proc_macro2::TokenStream; use quote::format_ident; use quote::quote; -use quote::quote_spanned; -use syn::Attribute; use syn::Expr; use syn::ExprLit; use syn::Lit; +use crate::module::param_spec::ParamSpec; use crate::module::render::render_starlark_return_type; use crate::module::render::render_starlark_type; +use crate::module::simple_param::SimpleParam; +use crate::module::typ::RegularParams; use crate::module::typ::SpecialParam; use crate::module::typ::StarArg; -use crate::module::typ::StarArgPassStyle; use crate::module::typ::StarArgSource; use crate::module::typ::StarFun; use crate::module::typ::StarFunSource; @@ -38,13 +40,11 @@ use crate::module::util::ident_string; impl StarFun { fn ty_custom_expr(&self) -> syn::Expr { match &self.starlark_ty_custom_function { - Some(x) => syn::parse_quote_spanned! { - self.span()=> + Some(x) => syn::parse_quote! { std::option::Option::Some(starlark::typing::Ty::custom_function(#x)) }, None => { - syn::parse_quote_spanned! { - self.span()=> + syn::parse_quote! { std::option::Option::None } } @@ -53,13 +53,11 @@ impl StarFun { fn special_builtin_function_expr(&self) -> syn::Expr { match &self.special_builtin_function { - Some(x) => syn::parse_quote_spanned! { - self.span()=> + Some(x) => syn::parse_quote! { std::option::Option::Some(#x) }, None => { - syn::parse_quote_spanned! { - self.span()=> + syn::parse_quote! { std::option::Option::None } } @@ -68,66 +66,43 @@ impl StarFun { fn as_type_expr(&self) -> syn::Expr { match &self.as_type { - Some(x) => syn::parse_quote_spanned! { - self.span()=> - std::option::Option::Some( + Some(x) => syn::parse_quote! { + std::option::Option::Some(( <#x as starlark::values::StarlarkValue>::get_type_starlark_repr(), - ) + starlark::docs::DocType::from_starlark_value::<#x>(), + )) }, - None => syn::parse_quote_spanned! { - self.span()=> + None => syn::parse_quote! { std::option::Option::None }, } } /// Evaluator function parameter and call argument. - fn eval_param_arg( - &self, - ) -> ( - Option, - Option, - Option, - ) { - if let Some(SpecialParam { ident, ty }) = &self.eval { + fn eval_param_arg(&self) -> (Option, Option) { + if let Some(SpecialParam { param }) = &self.eval { ( - Some(quote_spanned! {self.span()=> - #ident: #ty, - }), - Some(quote_spanned! {self.span()=> - #ty, - }), - Some(quote_spanned! {self.span()=> - eval, + Some(param.clone()), + Some(syn::parse_quote! { + eval }), ) } else { - (None, None, None) + (None, None) } } /// Heap function parameter and call argument. - fn heap_param_arg( - &self, - ) -> ( - Option, - Option, - Option, - ) { - if let Some(SpecialParam { ident, ty }) = &self.heap { + fn heap_param_arg(&self) -> (Option, Option) { + if let Some(SpecialParam { param }) = &self.heap { ( - Some(quote_spanned! {self.span()=> - #ident: #ty, - }), - Some(quote_spanned! {self.span()=> - #ty, - }), - Some(quote_spanned! {self.span()=> - eval.heap(), + Some(param.clone()), + Some(syn::parse_quote! { + eval.heap() }), ) } else { - (None, None, None) + (None, None) } } @@ -135,35 +110,41 @@ impl StarFun { fn this_param_arg( &self, ) -> ( - Option, - Option, - Option, + // Outer function parameter. + Option, + // Inner function parameter. + Option, + Option, + Option, ) { - if self.is_method() { - ( - Some(quote_spanned! {self.span()=> __this: starlark::values::Value<'v>, }), - Some(quote_spanned! {self.span()=> starlark::values::Value<'v>, }), - Some(quote_spanned! {self.span()=> __this, }), - ) - } else { - (None, None, None) + match &self.this { + Some(this) => { + let outer_param_name: syn::Ident = syn::parse_quote! { s_this_value }; + let local_var: syn::Ident = syn::parse_quote! { s_this_typed }; + ( + Some(syn::parse_quote! { #outer_param_name: starlark::values::Value<'v> }), + Some(this.param.clone()), + Some(this.render_prepare(&local_var, &outer_param_name)), + Some(syn::parse_quote! { #local_var }), + ) + } + None => (None, None, None, None), } } /// Non-special params. - fn binding_params_arg(&self) -> (Vec, Vec, TokenStream, Vec) { - let Bindings { prepare, bindings } = render_binding(self); - let binding_params: Vec<_> = bindings.iter().map(|b| b.render_param()).collect(); - let binding_param_types: Vec<_> = bindings.iter().map(|b| b.render_param_type()).collect(); + fn binding_params_arg(&self) -> syn::Result<(Vec, TokenStream, Vec)> { + let Bindings { prepare, bindings } = render_binding(self)?; + let binding_params: Vec<_> = bindings.iter().map(|b| b.param.clone()).collect(); let binding_args: Vec<_> = bindings.iter().map(|b| b.render_arg()).collect(); - (binding_params, binding_param_types, prepare, binding_args) + Ok((binding_params, prepare, binding_args)) } fn trait_name(&self) -> syn::Path { if self.is_method() { - syn::parse_quote_spanned! {self.span()=> starlark::values::function::NativeMeth } + syn::parse_quote! { starlark::values::function::NativeMeth } } else { - syn::parse_quote_spanned! {self.span()=> starlark::values::function::NativeFunc } + syn::parse_quote! { starlark::values::function::NativeFunc } } } @@ -176,37 +157,31 @@ impl StarFun { } /// Fields and field initializers for the struct implementing the trait. - fn struct_fields(&self) -> syn::Result<(TokenStream, TokenStream)> { + fn struct_fields(&self) -> syn::Result<(Vec, Vec)> { let signature = if let StarFunSource::Signature { .. } = self.source { - Some(render_signature(self, Purpose::Parsing)?) + Some(render_signature(self)?) } else { None }; if let Some(signature) = signature { Ok(( - quote_spanned! { self.span()=> - signature: starlark::eval::ParametersSpec, - }, - quote_spanned! { self.span()=> - signature: #signature, - }, + vec![syn::parse_quote! { + signature: starlark::eval::ParametersSpec + }], + vec![syn::parse_quote! { + signature: #signature + }], )) } else { - Ok(( - quote_spanned! { self.span()=> }, - quote_spanned! { self.span()=> }, - )) + Ok((Vec::new(), Vec::new())) } } /// Globals builder call to register the function. - fn builder_set( - &self, - documentation_var: &Ident, - struct_fields_init: TokenStream, - ) -> syn::Result { + fn builder_set(&self, struct_fields_init: Vec) -> syn::Result { let name_str = self.name_str(); - let speculative_exec_safe = self.speculative_exec_safe; + let components = render_native_callable_components(self)?; + let struct_name = self.struct_name(); let special_builtin_function = self.special_builtin_function_expr(); @@ -223,31 +198,29 @@ impl StarFun { "methods cannot have a `ty_custom_function` attribute", )); } - Ok(syn::parse_quote_spanned! {self.span()=> + Ok(syn::parse_quote! { #[allow(clippy::redundant_closure)] globals_builder.set_method( #name_str, - #speculative_exec_safe, - #documentation_var, + #components, #struct_name { - #struct_fields_init + #( #struct_fields_init, )* }, ); }) } else { - let typ = self.as_type_expr(); + let as_type = self.as_type_expr(); let ty_custom = self.ty_custom_expr(); - Ok(syn::parse_quote_spanned! {self.span()=> + Ok(syn::parse_quote! { #[allow(clippy::redundant_closure)] globals_builder.set_function( #name_str, - #speculative_exec_safe, - #documentation_var, - #typ, + #components, + #as_type, #ty_custom, #special_builtin_function, #struct_name { - #struct_fields_init + #( #struct_fields_init, )* }, ); }) @@ -256,21 +229,17 @@ impl StarFun { } pub(crate) fn render_fun(x: StarFun) -> syn::Result { - let span = x.span(); - - let (documentation_var, documentation) = render_documentation(&x)?; - - let (this_param, this_param_type, this_arg) = x.this_param_arg(); - let (eval_param, eval_param_type, eval_arg) = x.eval_param_arg(); - let (heap_param, heap_param_type, heap_arg) = x.heap_param_arg(); - let (binding_params, binding_param_types, prepare, binding_args) = x.binding_params_arg(); + let (this_outer_param, this_inner_param, this_prepare, this_arg) = x.this_param_arg(); + let (eval_param, eval_arg) = x.eval_param_arg(); + let (heap_param, heap_arg) = x.heap_param_arg(); + let (binding_params, prepare, binding_args) = x.binding_params_arg()?; let trait_name = x.trait_name(); let (struct_fields, struct_fields_init) = x.struct_fields()?; let struct_name = x.struct_name(); - let builder_set = x.builder_set(&documentation_var, struct_fields_init)?; + let builder_set = x.builder_set(struct_fields_init)?; let StarFun { attrs, @@ -279,65 +248,85 @@ pub(crate) fn render_fun(x: StarFun) -> syn::Result { .. } = x; - Ok(syn::parse_quote_spanned! { - span=> - { - struct #struct_name { - #struct_fields - } + let invoke_params: Vec = iter::empty() + .chain(this_inner_param) + .chain(binding_params) + .chain(eval_param) + .chain(heap_param) + .collect(); - impl #struct_name { - // TODO(nga): copy lifetime parameter from declaration, - // so the warning would be precise. - #[allow(clippy::extra_unused_lifetimes)] - #( #attrs )* - fn invoke_impl<'v>( - #this_param - #( #binding_params, )* - #eval_param - #heap_param - ) -> #return_type { - #body - } + let invoke_args = iter::empty() + .chain(this_arg) + .chain(binding_args) + .chain(eval_arg) + .chain(heap_arg); + + let param_types: Vec<_> = invoke_params.iter().map(|p| &p.ty).collect(); + + let this_outer_param = this_outer_param.into_iter(); + + let struct_def: syn::ItemStruct = syn::parse_quote! { + #[allow(non_camel_case_types)] + struct #struct_name { + #( #struct_fields, )* + } + }; + + let impl_struct: syn::ItemImpl = syn::parse_quote! { + impl #struct_name { + // TODO(nga): copy lifetime parameter from declaration, + // so the warning would be precise. + #[allow(clippy::extra_unused_lifetimes)] + #( #attrs )* + fn invoke_impl<'v>( + #( #invoke_params, )* + ) -> #return_type { + #body + } - // When function signature declares return type as `anyhow::Result`, - // we cannot call `T::starlark_type_repr` to render documentation, because there's no T. - // Future Rust will provide syntax `type ReturnType = impl AllocValue`: - // https://github.com/rust-lang/rfcs/pull/2515 - // Until then we use this hack as a workaround. - #[allow(dead_code)] // Function is not used when return type is specified explicitly. - fn return_type_starlark_type_repr() -> starlark::typing::Ty { - fn get_impl<'v, T: starlark::values::AllocValue<'v>>( - _f: fn( - #this_param_type - #( #binding_param_types, )* - #eval_param_type - #heap_param_type - ) -> anyhow::Result, - ) -> starlark::typing::Ty { - ::starlark_type_repr() - } - get_impl(Self::invoke_impl) + // When function signature declares return type as `anyhow::Result`, + // we cannot call `T::starlark_type_repr` to render documentation, because there's no T. + // Future Rust will provide syntax `type ReturnType = impl AllocValue`: + // https://github.com/rust-lang/rfcs/pull/2515 + // Until then we use this hack as a workaround. + #[allow(dead_code)] // Function is not used when return type is specified explicitly. + fn return_type_starlark_type_repr() -> starlark::typing::Ty { + fn get_impl<'v, T: starlark::values::AllocValue<'v>, E>( + _f: fn( + #( #param_types, )* + ) -> std::result::Result, + ) -> starlark::typing::Ty { + ::starlark_type_repr() } + get_impl(Self::invoke_impl) } + } + }; - impl #trait_name for #struct_name { - #[allow(non_snake_case)] // Starlark doesn't have this convention - fn invoke<'v>( - &self, - eval: &mut starlark::eval::Evaluator<'v, '_>, - #this_param - parameters: &starlark::eval::Arguments<'v, '_>, - ) -> anyhow::Result> { - #prepare - match Self::invoke_impl(#this_arg #( #binding_args, )* #eval_arg #heap_arg) { - Ok(v) => Ok(eval.heap().alloc(v)), - Err(e) => Err(e), - } + let impl_trait: syn::ItemImpl = syn::parse_quote! { + impl #trait_name for #struct_name { + #[allow(non_snake_case)] // Starlark doesn't have this convention + fn invoke<'v>( + &self, + eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + #(#this_outer_param,)* + parameters: &starlark::eval::Arguments<'v, '_>, + ) -> starlark::Result> { + #this_prepare + #prepare + match Self::invoke_impl(#( #invoke_args, )*) { + Ok(v) => Ok(eval.heap().alloc(v)), + Err(e) => Err(starlark::__derive_refs::invoke_macro_error::InvokeMacroError::into_starlark_error(e)), } } + } + }; - #documentation + Ok(syn::parse_quote! { + { + #struct_def + #impl_struct + #impl_trait #builder_set } }) @@ -350,398 +339,448 @@ struct Bindings { // Given __args and __signature (if render_signature was Some) // create bindings for all the arguments -fn render_binding(x: &StarFun) -> Bindings { - let span = x.args_span(); - match x.source { - StarFunSource::Arguments => { - let StarArg { - span, - attrs, - name, - ty, - .. - } = &x.args[0]; - let span = *span; - Bindings { - prepare: quote_spanned! { span=> }, - bindings: vec![BindingArg { - name: name.to_owned(), - ty: ty.to_owned(), - attrs: attrs.clone(), - mutability: None, - expr: syn::parse_quote_spanned! { span=> parameters }, - }], - } - } - StarFunSource::ThisArguments => { - let StarArg { - span, - attrs, - name, - ty, - .. - } = &x.args[1]; - let span = *span; - let this = render_binding_arg(&x.args[0]); - Bindings { - prepare: quote_spanned! { span=> }, - bindings: vec![ - this, - BindingArg { - name: name.to_owned(), - ty: ty.to_owned(), - attrs: attrs.clone(), - mutability: None, - expr: syn::parse_quote_spanned! { span=> parameters }, - }, - ], - } +fn render_binding(x: &StarFun) -> syn::Result { + match (&x.args, &x.source) { + (RegularParams::Arguments(arguments), StarFunSource::Arguments) => Ok(Bindings { + prepare: TokenStream::new(), + bindings: vec![BindingArg { + param: arguments.param.clone(), + expr: syn::parse_quote! { parameters }, + }], + }), + (RegularParams::Arguments(_), _) | (_, StarFunSource::Arguments) => Err(syn::Error::new( + x.span(), + "Inconsistent params/source (internal error)", + )), + (RegularParams::Unpack(args), StarFunSource::Signature { count }) => { + let bind_args: Vec = args + .iter() + .map(render_binding_arg) + .collect::>()?; + Ok(Bindings { + prepare: quote! { + let __args: [_; #count] = + starlark::__derive_refs::parse_args::parse_signature( + &self.signature, parameters, eval.heap())?; + }, + bindings: bind_args, + }) } - StarFunSource::Signature { count } => { - let bind_args: Vec = x.args.iter().map(render_binding_arg).collect(); - Bindings { - prepare: quote_spanned! { span=> - let __args: [_; #count] = self.signature.collect_into(parameters, eval.heap())?; + ( + RegularParams::Unpack(args), + StarFunSource::Positional { + required, + optional, + kwargs: false, + }, + ) => { + let bind_args = args + .iter() + .map(render_binding_arg) + .collect::>()?; + Ok(Bindings { + prepare: quote! { + let (__required, __optional): ([_; #required], [_; #optional]) = + starlark::__derive_refs::parse_args::parse_positional( + ¶meters, eval.heap())?; }, bindings: bind_args, - } + }) } - StarFunSource::Positional { required, optional } => { - let bind_args = x.args.iter().map(render_binding_arg).collect(); - if optional == 0 { - Bindings { - prepare: quote_spanned! { - span=> - parameters.no_named_args()?; - let __required: [_; #required] = parameters.positional(eval.heap())?; - }, - bindings: bind_args, - } - } else { - Bindings { - prepare: quote_spanned! { - span=> - parameters.no_named_args()?; - let (__required, __optional): ([_; #required], [_; #optional]) = parameters.optional(eval.heap())?; - }, - bindings: bind_args, - } - } + ( + RegularParams::Unpack(args), + StarFunSource::Positional { + required, + optional, + kwargs: true, + }, + ) => { + let bind_args = args + .iter() + .map(render_binding_arg) + .collect::>()?; + Ok(Bindings { + prepare: quote! { + let (__required, __optional, s_kwargs_value): ([_; #required], [_; #optional], _) = + starlark::__derive_refs::parse_args::parse_positional_kwargs_alloc( + ¶meters, eval.heap())?; + }, + bindings: bind_args, + }) } } } struct BindingArg { expr: syn::Expr, - - attrs: Vec, - mutability: Option, - name: Ident, - ty: syn::Type, + param: SimpleParam, } impl BindingArg { - fn render_param_type(&self) -> syn::Type { - let BindingArg { ty, .. } = self; - ty.clone() + fn render_arg(&self) -> syn::Expr { + self.expr.clone() } +} - fn render_param(&self) -> syn::FnArg { - let mutability = &self.mutability; - let name = &self.name; - let ty = self.render_param_type(); - let attrs = &self.attrs; - syn::parse_quote_spanned! { - self.name.span()=> - #( #attrs )* - #mutability #name: #ty +/// Convert an expression of type `Value` to an expression of type of parameter. +fn render_unpack_value(value: syn::Expr, arg: &StarArg) -> syn::Expr { + if arg.is_value() { + // If we already have a `Value`, no need to unpack it. + value + } else { + let name_str = ident_string(&arg.param.ident); + syn::parse_quote! { + starlark::__derive_refs::parse_args::check_unpack(#name_str, #value)? } } +} - fn render_arg(&self) -> syn::Expr { - self.expr.clone() +/// Convert an expression of type `Option` to an expression of type of parameter. +fn render_unpack_option_value(option_value: syn::Expr, arg: &StarArg) -> syn::Expr { + let name_str = ident_string(&arg.param.ident); + if arg.is_option_value() { + // If we already have a `Option`, no need to unpack it. + option_value + } else if arg.is_option() { + syn::parse_quote! { + starlark::__derive_refs::parse_args::check_optional(#name_str, #option_value)? + } + } else if arg.is_value() { + // We call `check_required` even if `default` is set because for `Value`, + // default is pulled into `ParametersSpec`. + syn::parse_quote! { + starlark::__derive_refs::parse_args::check_required(#name_str, #option_value)? + } + } else if let Some(default) = &arg.default { + syn::parse_quote! { + starlark::__derive_refs::parse_args::check_defaulted(#name_str, #option_value, || #default)? + } + } else { + syn::parse_quote! { + starlark::__derive_refs::parse_args::check_required(#name_str, #option_value)? + } } } // Create a binding for an argument given. If it requires an index, take from the index -fn render_binding_arg(arg: &StarArg) -> BindingArg { - let span = arg.span; - let name = &arg.name; - let name_str = ident_string(name); - - let source = match arg.source { - StarArgSource::This => quote_spanned! {span=> __this}, - StarArgSource::Argument(i) => quote_spanned! {span=> __args[#i].get()}, - StarArgSource::Required(i) => quote_spanned! {span=> Some(__required[#i])}, - StarArgSource::Optional(i) => quote_spanned! {span=> __optional[#i]}, - ref s => unreachable!("unknown source: {:?}", s), - }; - - // Rust doesn't have powerful enough nested if yet - let next = if arg.pass_style == StarArgPassStyle::This { - syn::parse_quote_spanned! { span=> starlark::eval::Arguments::check_this(#source)? } - } else if arg.is_option() { - assert!( - arg.default.is_none(), - "Can't have Option argument with a default, for `{}`", - name_str - ); - syn::parse_quote_spanned! { span=> starlark::eval::Arguments::check_optional(#name_str, #source)? } - } else if !arg.is_value() && arg.default.is_some() { - let default = arg - .default - .as_ref() - .unwrap_or_else(|| unreachable!("Checked on the line above")); - syn::parse_quote_spanned! { span=> - { - // Combo - #[allow(clippy::manual_unwrap_or)] - #[allow(clippy::unnecessary_lazy_evaluations)] - #[allow(clippy::redundant_closure)] - let x = starlark::eval::Arguments::check_optional(#name_str, #source)?.unwrap_or_else(|| #default); - x - } +fn render_binding_arg(arg: &StarArg) -> syn::Result { + let next: syn::Expr = match &arg.source { + StarArgSource::Argument(i) => { + render_unpack_option_value(syn::parse_quote! { __args[#i] }, arg) + } + StarArgSource::Optional(i) => { + render_unpack_option_value(syn::parse_quote! { __optional[#i] }, arg) + } + StarArgSource::Required(i) => { + render_unpack_value(syn::parse_quote! { __required[#i] }, arg) + } + StarArgSource::Kwargs => render_unpack_value(syn::parse_quote! { s_kwargs_value }, arg), + s => { + return Err(syn::Error::new( + arg.span, + format!("Unexpected source {:?} (internal error)", s), + )); } - } else { - syn::parse_quote_spanned! { span=> starlark::eval::Arguments::check_required(#name_str, #source)? } }; - BindingArg { + Ok(BindingArg { expr: next, - attrs: arg.attrs.clone(), - mutability: arg.mutable, - name: arg.name.to_owned(), - ty: arg.ty.clone(), - } -} - -#[derive(Copy, Clone, Eq, PartialEq)] -enum Purpose { - Documentation, - Parsing, + param: arg.param.clone(), + }) } // Given the arguments, create a variable `signature` with a `ParametersSpec` object. // Or return None if you don't need a signature -fn render_signature(x: &StarFun, purpose: Purpose) -> syn::Result { - let span = x.args_span(); +fn render_signature(x: &StarFun) -> syn::Result { let name_str = ident_string(&x.name); - let signature_var = format_ident!("__signature"); - let sig_args = render_signature_args(&x.args, &signature_var, purpose)?; - Ok(quote_spanned! { - span=> { - #[allow(unused_mut)] - let mut #signature_var = starlark::eval::ParametersSpec::new(#name_str.to_owned()); - #sig_args - #signature_var.finish() + + match &x.args { + RegularParams::Arguments(_) => Ok(syn::parse_quote! { + starlark::__derive_refs::sig::parameter_spec_for_arguments(#name_str) + }), + RegularParams::Unpack(args) => { + let ParametersSpecArgs { + pos_only, + pos_or_named, + args, + named_only, + kwargs, + } = parameter_spec_args(args)?; + + let pos_only: Vec = + pos_only.iter().map(SignatureRegularArg::render).collect(); + let pos_or_named: Vec = pos_or_named + .iter() + .map(SignatureRegularArg::render) + .collect(); + let named_only: Vec = + named_only.iter().map(SignatureRegularArg::render).collect(); + + Ok(syn::parse_quote! { + starlark::__derive_refs::sig::parameter_spec( + #name_str, + &[#(#pos_only),*], + &[#(#pos_or_named),*], + #args, + &[#(#named_only),*], + #kwargs, + ) + }) } - }) + } } -fn render_documentation(x: &StarFun) -> syn::Result<(Ident, TokenStream)> { - let span = x.args_span(); +pub(crate) fn render_none() -> syn::Expr { + syn::parse_quote! { std::option::Option::None } +} - // A signature is not needed to invoke positional-only functions, but we still want - // information like names, order, type, etc to be available to call '.documentation()' on. - let name_str = ident_string(&x.name); - let need_render_signature = match &x.source { - StarFunSource::Signature { .. } | StarFunSource::Positional { .. } => true, - StarFunSource::Arguments | StarFunSource::ThisArguments => false, - }; - let documentation_signature = if need_render_signature { - render_signature(x, Purpose::Documentation)? - } else { - // An Arguments can take anything, so give the most generic documentation signature - quote_spanned! { - span=> { - let mut __signature = starlark::eval::ParametersSpec::::new(#name_str.to_owned()); - __signature.args(); - __signature.kwargs(); - __signature.finish() - } +pub(crate) fn render_some(expr: syn::Expr) -> syn::Expr { + syn::parse_quote! { std::option::Option::Some(#expr) } +} + +pub(crate) fn render_option(expr: Option) -> syn::Expr { + match expr { + Some(x) => render_some(x), + None => render_none(), + } +} + +fn render_regular_native_callable_param(arg: &StarArg) -> syn::Result { + let ty = render_starlark_type(arg.without_option()); + let name_str = ident_string(&arg.param.ident); + let required: syn::Expr = match (&arg.default, arg.is_option()) { + (Some(_), true) => { + return Err(syn::Error::new( + arg.span, + "Option arguments cannot have defaults", + )); + } + (None, true) => render_some( + syn::parse_quote! { starlark::__derive_refs::param_spec::NativeCallableParamDefaultValue::Optional }, + ), + (None, false) => render_none(), + (Some(default), _) => { + // For things that are type Value, we put them on the frozen heap. + // For things that aren't type value, use optional and then next_opt/unwrap + // to avoid the to/from value conversion. + let default = if arg.is_value() { + Some(syn::parse_quote! { globals_builder.alloc(#default) }) + } else { + render_default_as_frozen_value(default) + }; + render_some(match default { + None => { + syn::parse_quote! { starlark::__derive_refs::param_spec::NativeCallableParamDefaultValue::Optional } + } + Some(_) => { + syn::parse_quote! { starlark::__derive_refs::param_spec::NativeCallableParamDefaultValue::Value(#default) } + } + }) } }; + Ok(syn::parse_quote! { + starlark::__derive_refs::param_spec::NativeCallableParam { + name: #name_str, + ty: #ty, + required: #required, + } + }) +} + +fn render_native_callable_components(x: &StarFun) -> syn::Result { let docs = match x.docstring.as_ref() { - Some(d) => quote_spanned!(span=> Some(#d)), - None => quote_spanned!(span=> None), + Some(d) => quote!(Some(#d)), + None => quote!(None), }; - let parameter_types: Vec = x - .args - .iter() - .flat_map(|arg| { - if arg.pass_style == StarArgPassStyle::This { - // "this" gets ignored when creating the signature, so make sure the indexes match up. - vec![] - } else if arg.pass_style == StarArgPassStyle::Args { - // TODO(nga): type is not as precise as it could be. - // If parameter type is declared as `Vec` for example, - // we should pass repr of `String`, not repr of `Vec`. - // We cannot do it yet, so we pass any. - vec![syn::parse_quote_spanned! { span=> starlark::typing::Ty::any() }] - } else if arg.pass_style != StarArgPassStyle::Arguments { - let typ_str = render_starlark_type(span, arg.without_option()); - vec![syn::parse_quote_spanned! { span=> #typ_str }] - } else { - // `*args` and `**kwargs`. - vec![ - syn::parse_quote_spanned! { span=> starlark::typing::Ty::any() }, - syn::parse_quote_spanned! { span=> starlark::typing::Ty::any() }, - ] + + let param_spec: syn::Expr = match &x.args { + RegularParams::Arguments(_) => { + syn::parse_quote! { + starlark::__derive_refs::param_spec::NativeCallableParamSpec::for_arguments() } - }) - .collect(); + } + RegularParams::Unpack(args) => { + let ParamSpec { + pos_only, + pos_or_named, + args, + named_only, + kwargs, + } = ParamSpec::split(args)?; + + let pos_only: Vec = pos_only + .iter() + .copied() + .map(render_regular_native_callable_param) + .collect::>>()?; + let pos_or_named: Vec = pos_or_named + .iter() + .copied() + .map(render_regular_native_callable_param) + .collect::>>()?; + let args: Option = args.map(|arg| { + let name_str = ident_string(&arg.param.ident); + let ty = render_starlark_type(&arg.param.ty); + syn::parse_quote! { + starlark::__derive_refs::param_spec::NativeCallableParam::args(#name_str, #ty) + } + }); + let named_only: Vec = named_only + .iter() + .copied() + .map(render_regular_native_callable_param) + .collect::>>()?; + let kwargs: Option = kwargs.map(|arg| { + let name_str = ident_string(&arg.param.ident); + let ty = render_starlark_type(&arg.param.ty); + syn::parse_quote! { + starlark::__derive_refs::param_spec::NativeCallableParam::kwargs(#name_str, #ty) + } + }); + + let args = render_option(args); + let kwargs = render_option(kwargs); + syn::parse_quote! { + starlark::__derive_refs::param_spec::NativeCallableParamSpec { + pos_only: vec![#(#pos_only),*], + pos_or_named: vec![#(#pos_or_named),*], + args: #args, + named_only: vec![#(#named_only),*], + kwargs: #kwargs, + } + } + } + }; let return_type_str = render_starlark_return_type(x); - let var_name = format_ident!("__documentation"); - let as_type = x.as_type_expr(); - let documentation = quote_spanned!(span=> - let #var_name = { - let parameter_types = std::vec![#(#parameter_types),*]; - starlark::values::function::NativeCallableRawDocs { + let speculative_exec_safe = x.speculative_exec_safe; + Ok(quote!( + { + let param_spec = #param_spec; + starlark::__derive_refs::components::NativeCallableComponents { + speculative_exec_safe: #speculative_exec_safe, rust_docstring: #docs, - signature: #documentation_signature, - parameter_types, + param_spec, return_type: #return_type_str, - as_type: #as_type, } - }; - ); - Ok((var_name, documentation)) + } + )) } -fn render_signature_args( - args: &[StarArg], - signature_var: &Ident, - purpose: Purpose, -) -> syn::Result { - #[derive(PartialEq, Eq, PartialOrd, Ord)] - enum CurrentParamStyle { - PosOnly, - PosOrNamed, - NamedOnly, - NoMore, - } +enum SignatureRegularArgMode { + Required, + Optional, + Defaulted(syn::Expr), +} - let mut sig_args = TokenStream::new(); - let mut last_param_style = CurrentParamStyle::PosOnly; - for arg in args { - match arg.pass_style { - StarArgPassStyle::This => {} - StarArgPassStyle::Args => { - if last_param_style >= CurrentParamStyle::NamedOnly { - return Err(syn::Error::new( - arg.span, - "`args` cannot follow named-only parameters", - )); - } - last_param_style = CurrentParamStyle::NamedOnly; - } - StarArgPassStyle::Kwargs => { - if last_param_style == CurrentParamStyle::NoMore { - return Err(syn::Error::new( - arg.span, - "Cannot have more than one `kwargs` parameter", - )); - } - last_param_style = CurrentParamStyle::NoMore; +impl SignatureRegularArgMode { + fn from_star_arg(arg: &StarArg) -> SignatureRegularArgMode { + if arg.is_option() { + SignatureRegularArgMode::Optional + } else if let Some(default) = &arg.default { + // For things that are type Value, we put them on the frozen heap. + // For things that aren't type value, use optional and then next_opt/unwrap + // to avoid the to/from value conversion. + if arg.is_value() { + SignatureRegularArgMode::Defaulted(syn::parse_quote! { + globals_builder.alloc(#default) + }) + } else { + SignatureRegularArgMode::Optional } - StarArgPassStyle::PosOnly => { - if last_param_style > CurrentParamStyle::PosOnly { - return Err(syn::Error::new( - arg.span, - "Positional-only parameter after non-positional-only", - )); - } - last_param_style = CurrentParamStyle::PosOnly; + } else { + SignatureRegularArgMode::Required + } + } +} + +/// Derive version of `NativeSigArg`. +struct SignatureRegularArg { + name: String, + mode: SignatureRegularArgMode, +} + +impl SignatureRegularArg { + fn from_star_arg(arg: &StarArg) -> SignatureRegularArg { + SignatureRegularArg { + name: ident_string(&arg.param.ident), + mode: SignatureRegularArgMode::from_star_arg(arg), + } + } + + fn render(&self) -> syn::Expr { + let name_str = &self.name; + match &self.mode { + SignatureRegularArgMode::Required => { + syn::parse_quote! { starlark::__derive_refs::sig::NativeSigArg::Required(#name_str) } } - StarArgPassStyle::PosOrNamed => { - if last_param_style == CurrentParamStyle::PosOnly { - sig_args.extend(quote_spanned! { arg.span=> - #signature_var.no_more_positional_only_args(); - }); - } - last_param_style = CurrentParamStyle::PosOrNamed; + SignatureRegularArgMode::Optional => { + syn::parse_quote! { starlark::__derive_refs::sig::NativeSigArg::Optional(#name_str) } } - StarArgPassStyle::NamedOnly => { - if last_param_style < CurrentParamStyle::NamedOnly { - sig_args.extend(quote_spanned! { arg.span=> - #signature_var.no_more_positional_args(); - }); - } - last_param_style = CurrentParamStyle::NamedOnly; - } - StarArgPassStyle::Arguments => { - return Err(syn::Error::new( - arg.span, - "unreachable: signature is not meant to be created for `&Arguments`", - )); + SignatureRegularArgMode::Defaulted(value) => { + syn::parse_quote! { starlark::__derive_refs::sig::NativeSigArg::Defaulted(#name_str, #value) } } } - sig_args.extend(render_signature_arg(arg, signature_var, purpose)?); } - Ok(sig_args) } -// Generate a statement that modifies signature to add a new argument in. -fn render_signature_arg( - arg: &StarArg, - signature_var: &Ident, - purpose: Purpose, -) -> syn::Result { - let span = arg.span; - - let name_str = ident_string(&arg.name); - - if arg.pass_style == StarArgPassStyle::Args { - assert!(arg.default.is_none(), "Can't have *args with a default"); - Ok(quote_spanned! { span=> #signature_var.args();}) - } else if arg.pass_style == StarArgPassStyle::Kwargs { - assert!(arg.default.is_none(), "Can't have **kwargs with a default"); - Ok(quote_spanned! { span=> #signature_var.kwargs();}) - } else if arg.pass_style == StarArgPassStyle::This { - Ok(quote_spanned! { span=> }) - } else if arg.is_option() { - Ok(quote_spanned! { span=> #signature_var.optional(#name_str);}) - } else if let Some(default) = &arg.default { - // For things that are type Value, we put them on the frozen heap. - // For things that aren't type value, use optional and then next_opt/unwrap - // to avoid the to/from value conversion. - if arg.is_value() { - Ok(quote_spanned! { span=> - #signature_var.defaulted(#name_str, globals_builder.alloc(#default)); - }) - } else if purpose == Purpose::Documentation - && render_default_as_frozen_value(default).is_some() - { - // We want the repr of the default arugment to show up, so pass it along - let frozen = render_default_as_frozen_value(default).unwrap(); - Ok(quote_spanned! { span=> - #signature_var.defaulted(#name_str, #frozen); - }) - } else { - Ok(quote_spanned! { span=> - #signature_var.optional(#name_str); - }) - } - } else { - Ok(quote_spanned! { span=> - #signature_var.required(#name_str); - }) - } +/// Arguments to pass to `parameter_spec` to render `ParametersSpec`. +struct ParametersSpecArgs { + pos_only: Vec, + pos_or_named: Vec, + /// `*args`. + args: bool, + named_only: Vec, + /// `**kwargs`. + kwargs: bool, +} + +/// Return the number of positional and positional-only arguments. +fn parameter_spec_args(star_args: &[StarArg]) -> syn::Result { + let ParamSpec { + pos_only, + pos_or_named, + args, + named_only, + kwargs, + } = ParamSpec::split(star_args)?; + + let pos_only = pos_only + .iter() + .map(|a| SignatureRegularArg::from_star_arg(a)) + .collect(); + let pos_or_named = pos_or_named + .iter() + .map(|a| SignatureRegularArg::from_star_arg(a)) + .collect(); + let args = args.is_some(); + let named_only = named_only + .iter() + .map(|a| SignatureRegularArg::from_star_arg(a)) + .collect(); + let kwargs = kwargs.is_some(); + + Ok(ParametersSpecArgs { + pos_only, + pos_or_named, + args, + named_only, + kwargs, + }) } /// We have an argument that the user wants to use as a default. /// That _might_ have a valid `FrozenValue` representation, if so, it would be great to use for documentation. /// Try and synthesise it if we can. -fn render_default_as_frozen_value(default: &Expr) -> Option { +fn render_default_as_frozen_value(default: &Expr) -> Option { let x = quote!(#default).to_string(); if let Ok(x) = x.trim_end_matches("i32").parse::() { - Some(quote! { globals_builder.alloc(#x) }) + Some(syn::parse_quote! { globals_builder.alloc(#x) }) } else if let Ok(x) = x.parse::() { - Some(quote! { starlark::values::FrozenValue::new_bool(#x) }) + Some(syn::parse_quote! { starlark::values::FrozenValue::new_bool(#x) }) } else if x == "NoneOr :: None" { - Some(quote! { starlark::values::FrozenValue::new_none() }) + Some(syn::parse_quote! { starlark::values::FrozenValue::new_none() }) } else if matches!( default, Expr::Lit(ExprLit { @@ -750,11 +789,11 @@ fn render_default_as_frozen_value(default: &Expr) -> Option { }) ) { // Make sure we don't splice in `x` again, or we double quote the string - Some(quote! { globals_builder.alloc(#default) }) - } else if x == "Vec :: new()" { - Some(quote! { globals_builder.alloc(starlark::values::list::AllocList::EMPTY) }) + Some(syn::parse_quote! { starlark::const_frozen_string!(#default).to_frozen_value() }) + } else if x == "UnpackListOrTuple :: default()" || x == "UnpackList :: default()" { + Some(syn::parse_quote! { starlark::values::FrozenValue::new_empty_list() }) } else if x == "SmallMap :: new()" { - Some(quote! { globals_builder.alloc(starlark::values::dict::AllocDict::EMPTY) }) + Some(syn::parse_quote! { starlark::values::FrozenValue::new_empty_dict() }) } else { None } diff --git a/starlark-rust/starlark_derive/src/module/render/mod.rs b/starlark-rust/starlark_derive/src/module/render/mod.rs deleted file mode 100644 index c75fa93f7e56a..0000000000000 --- a/starlark-rust/starlark_derive/src/module/render/mod.rs +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2019 The Starlark in Rust Authors. - * Copyright (c) Facebook, Inc. and its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -mod fun; - -use std::collections::HashSet; - -use proc_macro2::TokenStream; -use quote::format_ident; -use quote::quote_spanned; - -use crate::module::render::fun::render_fun; -use crate::module::typ::SpecialParam; -use crate::module::typ::StarAttr; -use crate::module::typ::StarConst; -use crate::module::typ::StarFun; -use crate::module::typ::StarModule; -use crate::module::typ::StarStmt; -use crate::module::util::ident_string; - -pub(crate) fn render(x: StarModule) -> syn::Result { - let span = x.span(); - let render = render_impl(x)?; - Ok(quote_spanned! { span => #render }) -} - -fn render_impl(x: StarModule) -> syn::Result { - let span = x.span(); - let StarModule { - name, - globals_builder, - visibility, - attrs, - docstring, - stmts, - module_kind, - } = x; - let statics = format_ident!("{}", module_kind.statics_type_name()); - let stmts: Vec<_> = stmts - .into_iter() - .map(render_stmt) - .collect::>()?; - let set_docstring = - docstring.map(|ds| quote_spanned!(span=> globals_builder.set_docstring(#ds);)); - Ok(syn::parse_quote_spanned! { - span=> - #( #attrs )* - #visibility fn #name(globals_builder: #globals_builder) { - fn build(globals_builder: #globals_builder) { - #set_docstring - #( #stmts )* - // Mute warning if stmts is empty. - let _ = globals_builder; - } - static RES: starlark::environment::#statics = starlark::environment::#statics::new(); - RES.populate(build, globals_builder); - } - }) -} - -fn render_stmt(x: StarStmt) -> syn::Result { - match x { - StarStmt::Const(x) => Ok(render_const(x)), - StarStmt::Attr(x) => Ok(render_attr(x)), - StarStmt::Fun(x) => render_fun(x), - } -} - -fn render_const(x: StarConst) -> syn::Stmt { - let StarConst { name, ty, value } = x; - let span = name.span(); - let name = ident_string(&name); - syn::parse_quote_spanned! { - span=> - globals_builder.set::<#ty>(#name, #value); - } -} - -fn render_attr(x: StarAttr) -> syn::Stmt { - let span = x.span(); - let StarAttr { - name, - arg, - heap, - attrs, - return_type, - speculative_exec_safe, - body, - docstring, - } = x; - let name_str = ident_string(&name); - let name_inner = syn::Ident::new(&format!("{}__inner", name_str), name.span()); - let docstring = match docstring { - Some(d) => quote_spanned!(span=> Some(#d.to_owned())), - None => quote_spanned!(span=> None), - }; - - let let_heap = if let Some(SpecialParam { ident, ty }) = heap { - Some(quote_spanned! { span=> let #ident: #ty = __heap; }) - } else { - None - }; - - syn::parse_quote_spanned! { - span=> - { - #( #attrs )* - #[allow(non_snake_case)] // Starlark doesn't have this convention - fn #name_inner<'v>( - this: starlark::values::Value<'v>, - #[allow(unused_variables)] - __heap: &'v starlark::values::Heap, - ) -> #return_type { - #[allow(unused_variables)] - let this: #arg = match starlark::values::UnpackValue::unpack_value(this) { - None => return Err(starlark::values::ValueError::IncorrectParameterTypeNamedWithExpected( - "this".to_owned(), - <#arg as starlark::values::UnpackValue>::expected(), - this.get_type().to_owned(), - ).into()), - Some(v) => v, - }; - #let_heap - #body - } - - #[allow(non_snake_case)] - fn #name<'v>( - #[allow(unused_variables)] - this: starlark::values::Value<'v>, - heap: &'v starlark::values::Heap, - ) -> anyhow::Result> { - Ok(heap.alloc(#name_inner(this, heap)?)) - } - - globals_builder.set_attribute_fn( - #name_str, - #speculative_exec_safe, - #docstring, - starlark::values::type_repr::type_repr_from_attr_impl(#name_inner), - #name - ); - } - } -} - -/// Get the lifetimes that are mentioned in a given type and its nested generics. -fn get_lifetimes_inner<'a>(ret: &mut HashSet<&'a syn::Lifetime>, typ: &'a syn::Type) { - match typ { - syn::Type::Path(path) => { - if let Some(segment) = path.path.segments.last() { - match &segment.arguments { - syn::PathArguments::None => {} - syn::PathArguments::AngleBracketed(args) => { - for arg in &args.args { - match arg { - syn::GenericArgument::Lifetime(l) => { - ret.insert(l); - } - syn::GenericArgument::Type(t) => get_lifetimes_inner(ret, t), - _ => {} - }; - } - } - syn::PathArguments::Parenthesized(args) => { - for t in &args.inputs { - get_lifetimes_inner(ret, t); - } - match &args.output { - syn::ReturnType::Default => {} - syn::ReturnType::Type(_, t) => get_lifetimes_inner(ret, t), - }; - } - }; - } - } - syn::Type::Group(g) => get_lifetimes_inner(ret, &g.elem), - syn::Type::Paren(p) => get_lifetimes_inner(ret, &p.elem), - syn::Type::Ptr(p) => get_lifetimes_inner(ret, &p.elem), - syn::Type::Reference(r) => { - if let Some(l) = &r.lifetime { - ret.insert(l); - }; - get_lifetimes_inner(ret, &r.elem); - } - syn::Type::Tuple(t) => { - for t in &t.elems { - get_lifetimes_inner(ret, t); - } - } - _ => {} - }; -} - -/// Get the lifetime specifications to use with a function based on the lifetimes mentioned in `typ`. -/// -/// e.g. `i32` would return ``, `Vec<(&'a str, &'b str)>` would return `<'a, 'b>` -fn get_lifetimes(span: proc_macro2::Span, typ: &syn::Type) -> TokenStream { - let mut ret = HashSet::new(); - get_lifetimes_inner(&mut ret, typ); - if ret.is_empty() { - TokenStream::new() - } else { - let mut ret: Vec<_> = ret.into_iter().filter(|l| l.ident != "_").collect(); - ret.sort_by(|l, r| l.ident.cmp(&r.ident)); - quote_spanned!(span=> <#(#ret),*>) - } -} - -pub(crate) fn render_starlark_type(span: proc_macro2::Span, typ: &syn::Type) -> syn::Expr { - let lifetimes = get_lifetimes(span, typ); - syn::parse_quote_spanned! { span=> - { - #[allow(clippy::extra_unused_lifetimes)] - fn get_type_string #lifetimes() -> starlark::typing::Ty { - <#typ as starlark::values::type_repr::StarlarkTypeRepr>::starlark_type_repr() - } - get_type_string() - } - } -} - -pub(crate) fn render_starlark_return_type(fun: &StarFun) -> syn::Expr { - let struct_name = fun.struct_name(); - syn::parse_quote_spanned! { fun.span()=> - #struct_name::return_type_starlark_type_repr() - } -} diff --git a/starlark-rust/starlark_derive/src/module/simple_param.rs b/starlark-rust/starlark_derive/src/module/simple_param.rs new file mode 100644 index 0000000000000..825f6c32f5472 --- /dev/null +++ b/starlark-rust/starlark_derive/src/module/simple_param.rs @@ -0,0 +1,111 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use proc_macro2::TokenStream; +use quote::ToTokens; +use quote::TokenStreamExt; +use syn::parse::Parse; +use syn::parse::ParseStream; +use syn::spanned::Spanned; + +/// Simple function parameter, subset of what Rust language supports. +#[derive(Debug, Clone)] +pub(crate) struct SimpleParam { + pub(crate) attrs: Vec, + pub(crate) mutability: Option, + pub(crate) ident: syn::Ident, + pub(crate) ty: syn::Type, +} + +impl SimpleParam { + fn fn_arg(&self) -> syn::FnArg { + let attrs = &self.attrs; + let mutability = &self.mutability; + let ident = &self.ident; + let ty = &self.ty; + syn::parse_quote! { + #(#attrs)* + #mutability #ident: #ty + } + } + + pub(crate) fn from_fn_arg(arg: syn::FnArg) -> syn::Result { + match arg { + syn::FnArg::Receiver(_) => Err(syn::Error::new( + arg.span(), + "`self` is not used in Starlark native functions", + )), + syn::FnArg::Typed(syn::PatType { + attrs, + pat, + colon_token: _, + ty, + }) => { + let syn::Pat::Ident(ident) = *pat else { + return Err(syn::Error::new_spanned( + pat, + "Only simple identifiers are allowed in Starlark function parameters", + )); + }; + let syn::PatIdent { + attrs: pat_ident_attrs, + by_ref, + mutability, + ident, + subpat, + } = ident; + if by_ref.is_some() { + return Err(syn::Error::new_spanned( + by_ref, + "Starlark function parameters cannot use `ref`", + )); + } + if let Some(subpat) = subpat { + return Err(syn::Error::new_spanned( + subpat.0, + "Starlark function parameters cannot use subpatterns", + )); + } + if let Some(attr) = pat_ident_attrs.into_iter().next() { + return Err(syn::Error::new_spanned( + attr, + "Starlark function parameters cannot have attributes", + )); + } + Ok(SimpleParam { + attrs, + mutability, + ident, + ty: *ty, + }) + } + } + } +} + +impl Parse for SimpleParam { + fn parse(input: ParseStream) -> syn::Result { + let arg = syn::FnArg::parse(input)?; + SimpleParam::from_fn_arg(arg) + } +} + +impl ToTokens for SimpleParam { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.fn_arg().into_token_stream()) + } +} diff --git a/starlark-rust/starlark_derive/src/module/typ.rs b/starlark-rust/starlark_derive/src/module/typ.rs index 8147e7724978a..9e04f20a78efe 100644 --- a/starlark-rust/starlark_derive/src/module/typ.rs +++ b/starlark-rust/starlark_derive/src/module/typ.rs @@ -26,6 +26,7 @@ use syn::Type; use syn::Visibility; use crate::module::parse::ModuleKind; +use crate::module::simple_param::SimpleParam; use crate::module::util::is_type_name; use crate::module::util::unpack_option; @@ -42,18 +43,6 @@ pub(crate) struct StarModule { pub stmts: Vec, } -impl StarModule { - pub(crate) fn span(&self) -> Span { - let mut span = self.name.span(); - for stmt in &self.stmts { - if let Some(new_span) = span.join(stmt.span()) { - span = new_span; - } - } - span - } -} - #[allow(clippy::large_enum_variant)] #[derive(Debug)] pub(crate) enum StarStmt { @@ -62,16 +51,6 @@ pub(crate) enum StarStmt { Attr(StarAttr), } -impl StarStmt { - pub(crate) fn span(&self) -> Span { - match self { - StarStmt::Const(c) => c.span(), - StarStmt::Fun(c) => c.span(), - StarStmt::Attr(c) => c.span(), - } - } -} - #[derive(Debug)] pub(crate) struct StarConst { pub name: Ident, @@ -79,19 +58,9 @@ pub(crate) struct StarConst { pub value: Expr, } -impl StarConst { - pub(crate) fn span(&self) -> Span { - self.name - .span() - .join(self.value.span()) - .unwrap_or_else(|| self.name.span()) - } -} - #[derive(Debug)] pub(crate) struct SpecialParam { - pub(crate) ident: Ident, - pub(crate) ty: Type, + pub(crate) param: SimpleParam, } #[derive(Debug)] @@ -99,7 +68,8 @@ pub(crate) struct StarFun { pub name: Ident, pub as_type: Option, pub attrs: Vec, - pub args: Vec, + pub this: Option, + pub args: RegularParams, /// Has `&Heap` parameter. pub heap: Option, /// Has `&mut Evaluator` parameter. @@ -117,13 +87,7 @@ pub(crate) struct StarFun { impl StarFun { /// Is this function a method? (I. e. has `this` as first parameter). pub(crate) fn is_method(&self) -> bool { - match self.args.first() { - Some(first) => { - assert!(first.source != StarArgSource::Unknown, "not yet resolved"); - first.source == StarArgSource::This - } - None => false, - } + self.this.is_some() } pub(crate) fn span(&self) -> Span { @@ -132,20 +96,12 @@ impl StarFun { .join(self.body.span()) .unwrap_or_else(|| self.name.span()) } - - pub(crate) fn args_span(&self) -> Span { - self.args - .iter() - .map(|a| a.span) - .reduce(|a, b| a.join(b).unwrap_or(a)) - .unwrap_or_else(|| self.name.span()) - } } #[derive(Debug)] pub(crate) struct StarAttr { pub name: Ident, - pub arg: Type, + pub this: ThisParam, /// Has `&Heap` parameter. pub heap: Option, pub attrs: Vec, @@ -156,22 +112,11 @@ pub(crate) struct StarAttr { pub docstring: Option, } -impl StarAttr { - pub(crate) fn span(&self) -> Span { - self.name - .span() - .join(self.body.span()) - .unwrap_or_else(|| self.name.span()) - } -} - #[derive(Debug, PartialEq, Copy, Clone, Dupe)] pub(crate) enum StarArgPassStyle { - /// Receiver. - This, /// Parameter can be filled only positionally. PosOnly, - /// Parameter can filled both positionally and by name. + /// Parameter can be filled positionally or by name. PosOrNamed, /// Parameter can be filled by name. NamedOnly, @@ -179,66 +124,96 @@ pub(crate) enum StarArgPassStyle { Args, /// `**kwargs`. Kwargs, - /// `&Arguments`. - Arguments, } -#[derive(Debug)] +/// Method `this` parameter, always first. +#[derive(Debug, Clone)] +pub(crate) struct ThisParam { + pub(crate) param: SimpleParam, +} + +impl ThisParam { + pub(crate) fn render_prepare(&self, target: &syn::Ident, value: &syn::Ident) -> syn::Stmt { + let ty = &self.param.ty; + syn::parse_quote! { + let #target: #ty = starlark::__derive_refs::parse_args::check_this(#value)?; + } + } +} + +#[derive(Debug, Clone)] pub(crate) struct StarArg { pub span: Span, - pub attrs: Vec, - pub mutable: Option, + pub(crate) param: SimpleParam, pub pass_style: StarArgPassStyle, - pub name: Ident, - pub ty: Type, pub default: Option, pub source: StarArgSource, } -#[derive(Debug, PartialEq)] +/// `&Arguments` parameter. +#[derive(Debug)] +pub(crate) struct StarArguments { + pub(crate) param: SimpleParam, +} + +/// How we handle `&Arguments`. +#[derive(Debug)] +pub(crate) enum RegularParams { + /// Pass `&Arguments` as is. + Arguments(StarArguments), + /// Unpack the `&Arguments` into a multiple typed parameters. + Unpack(Vec), +} + +#[derive(Debug, PartialEq, Clone)] pub(crate) enum StarArgSource { Unknown, - This, - Parameters, Argument(usize), Required(usize), Optional(usize), + Kwargs, } #[derive(Debug)] pub(crate) enum StarFunSource { /// Function signature is single `Arguments` parameter. Arguments, - /// Function signature is `this` parameter followed by `Arguments` parameter. - ThisArguments, /// Normal function which uses a signature and parameters parser. Signature { count: usize }, /// Fast-path function of some required parameters, followed by some optional parameters. - /// No named parameters or `*args`/`**kwargs`. - Positional { required: usize, optional: usize }, + /// No named parameters or `*args`, but may have `**kwargs`. + Positional { + required: usize, + optional: usize, + kwargs: bool, + }, } impl StarArg { pub fn is_option(&self) -> bool { - is_type_name(&self.ty, "Option") + is_type_name(&self.param.ty, "Option") } /// Remove the `Option` if it exists, otherwise return the real type. pub fn without_option(&self) -> &Type { - unpack_option(&self.ty).unwrap_or(&self.ty) + unpack_option(&self.param.ty).unwrap_or(&self.param.ty) } pub fn is_value(&self) -> bool { - is_type_name(&self.ty, "Value") + is_type_name(&self.param.ty, "Value") + } + + /// Parameter type is `Option`. + pub(crate) fn is_option_value(&self) -> bool { + self.is_option() && is_type_name(self.without_option(), "Value") } pub fn requires_signature(&self) -> bool { // We need to use a signature if something has a name - // There are *args or **kwargs + // There are *args // There is a default that needs promoting to a Value (since the signature stores that value) - self.pass_style != StarArgPassStyle::PosOnly - || self.pass_style == StarArgPassStyle::Args - || self.pass_style == StarArgPassStyle::Kwargs + (self.pass_style != StarArgPassStyle::PosOnly + && self.pass_style != StarArgPassStyle::Kwargs) || (self.is_value() && self.default.is_some()) } } diff --git a/starlark-rust/starlark_derive/src/serde.rs b/starlark-rust/starlark_derive/src/serde.rs index 00a5801cac898..cbfacdd7c5d12 100644 --- a/starlark-rust/starlark_derive/src/serde.rs +++ b/starlark-rust/starlark_derive/src/serde.rs @@ -24,11 +24,11 @@ use syn::Lifetime; use syn::LifetimeParam; pub fn derive_no_serialize(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let mut input = parse_macro_input!(input as DeriveInput); + let input = parse_macro_input!(input as DeriveInput); let tick_v = GenericParam::Lifetime(LifetimeParam::new(Lifetime::new("'v", Span::call_site()))); let mut has_tick_v = false; - for param in &mut input.generics.params { + for param in &input.generics.params { if let GenericParam::Lifetime(t) = param { if t.lifetime.ident == "v" { has_tick_v = true; diff --git a/starlark-rust/starlark_derive/src/starlark_type_repr.rs b/starlark-rust/starlark_derive/src/starlark_type_repr.rs index 1b2921f3f2415..8e99919971144 100644 --- a/starlark-rust/starlark_derive/src/starlark_type_repr.rs +++ b/starlark-rust/starlark_derive/src/starlark_type_repr.rs @@ -111,6 +111,8 @@ fn derive_starlark_type_repr_impl( let trait_impl: syn::ItemImpl = syn::parse_quote_spanned! { span => impl #impl_generics starlark::values::type_repr::StarlarkTypeRepr for #ident #type_generics #where_clause { + type Canonical = <#helper_type as starlark::values::type_repr::StarlarkTypeRepr>::Canonical; + fn starlark_type_repr() -> starlark::typing::Ty { <#helper_type as starlark::values::type_repr::StarlarkTypeRepr>::starlark_type_repr() } diff --git a/starlark-rust/starlark_derive/src/starlark_value.rs b/starlark-rust/starlark_derive/src/starlark_value.rs index e6202f1f0240f..b479b02be8bf4 100644 --- a/starlark-rust/starlark_derive/src/starlark_value.rs +++ b/starlark-rust/starlark_derive/src/starlark_value.rs @@ -18,6 +18,8 @@ use quote::quote_spanned; use syn::spanned::Spanned; +use crate::util::GenericsUtil; +use crate::v_lifetime::find_v_lifetime; use crate::vtable::vtable_has_field_name; pub(crate) fn derive_starlark_value( @@ -99,18 +101,8 @@ fn is_impl_starlark_value( if last.ident != "StarlarkValue" { return Err(syn::Error::new_spanned(&last.ident, err)); } - let mut lifetime_param = None; - for lt in input.generics.lifetimes() { - if lifetime_param.is_some() { - return Err(syn::Error::new_spanned( - lt, - "multiple lifetime parameters are not supported", - )); - } - lifetime_param = Some(lt); - } - let lifetime_param = match lifetime_param { - Some(lt) => lt.lifetime.clone(), + let lifetime_param = match find_v_lifetime(&input.generics)? { + Some(lt) => lt.clone(), None => { return Err(syn::Error::new_spanned( input, @@ -143,20 +135,12 @@ impl ImplStarlarkValue { )); } - for param in &self.input.generics.params { - match param { - syn::GenericParam::Lifetime(_) => {} - _ => { - return Err(syn::Error::new_spanned( - param, - "only lifetime parameters are supported to implement `UnpackValue` or `StarlarkTypeRepr`", - )); - } - } - } + GenericsUtil::new(&self.input.generics).assert_only_lifetime_params()?; let lt = &self.lifetime_param; let params = &self.input.generics.params; + // TODO(nga): where clause is incorrect: + // if there's something `Self: Xxx` constraint, it should be `*Self: Xxx`. let where_clause = &self.input.generics.where_clause; let self_ty = &self.input.self_ty; Ok(quote_spanned! { @@ -165,6 +149,8 @@ impl ImplStarlarkValue { impl<#params> starlark::values::type_repr::StarlarkTypeRepr for &#lt #self_ty #where_clause { + type Canonical = #self_ty; + fn starlark_type_repr() -> starlark::typing::Ty { <#self_ty as starlark::values::type_repr::StarlarkTypeRepr>::starlark_type_repr() } @@ -173,8 +159,10 @@ impl ImplStarlarkValue { impl<#params> starlark::values::UnpackValue<#lt> for &#lt #self_ty #where_clause { - fn unpack_value(value: starlark::values::Value<#lt>) -> Option<&#lt #self_ty> { - starlark::values::ValueLike::downcast_ref(value) + type Error = std::convert::Infallible; + + fn unpack_value_impl(value: starlark::values::Value<#lt>) -> Result, Self::Error> { + std::result::Result::Ok(starlark::values::ValueLike::downcast_ref(value)) } } }) @@ -368,7 +356,7 @@ impl ImplStarlarkValue { )); } Ok(Some(syn::parse2(quote_spanned! { self.span() => - fn bit_or(&self, other: starlark::values::Value<'v>, heap: &'v starlark::values::Heap) -> anyhow::Result> { + fn bit_or(&self, other: starlark::values::Value<'v>, heap: &'v starlark::values::Heap) -> starlark::Result> { starlark::values::typing::macro_refs::starlark_value_bit_or_for_type(self, other, heap) } })?)) diff --git a/starlark-rust/starlark_derive/src/trace.rs b/starlark-rust/starlark_derive/src/trace.rs index 6496a84885bef..0e46116687e94 100644 --- a/starlark-rust/starlark_derive/src/trace.rs +++ b/starlark-rust/starlark_derive/src/trace.rs @@ -18,15 +18,15 @@ use std::collections::HashSet; use proc_macro2::Span; -use proc_macro2::TokenStream; use quote::quote; use quote::quote_spanned; +use quote::ToTokens; use syn::parse::ParseStream; use syn::parse_macro_input; use syn::parse_quote; +use syn::punctuated::Punctuated; use syn::spanned::Spanned; use syn::Attribute; -use syn::Data; use syn::DeriveInput; use syn::GenericArgument; use syn::GenericParam; @@ -35,20 +35,48 @@ use syn::Lifetime; use syn::LifetimeParam; use syn::PathArguments; use syn::ReturnType; +use syn::TraitBound; use syn::Type; use syn::TypeParamBound; -use crate::for_each_field::for_each_field; +use crate::util::DeriveInputUtil; -pub fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let mut input = parse_macro_input!(input as DeriveInput); +pub(crate) fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input = parse_macro_input!(input as DeriveInput); + match derive_trace_impl(input) { + Ok(x) => x.into_token_stream().into(), + Err(e) => e.to_compile_error().into(), + } +} + +fn derive_trace_impl(mut input: DeriveInput) -> syn::Result { let tick_v = GenericParam::Lifetime(LifetimeParam::new(Lifetime::new("'v", Span::call_site()))); + let TraceAttrs { + unsafe_ignore, + trace_static, + bounds, + } = parse_attrs(&input.attrs)?; + if let Some(unsafe_ignore) = unsafe_ignore { + return Err(syn::Error::new_spanned( + unsafe_ignore, + "`unsafe_ignore` attribute is not allowed on `#[derive(Trace)]`, only on fields", + )); + } + if let Some(trace_static) = trace_static { + return Err(syn::Error::new_spanned( + trace_static, + "`static` attribute is not allowed on `#[derive(Trace)]`, only on fields", + )); + } + let bound: TypeParamBound = parse_quote!(starlark::values::Trace<'v>); let mut has_tick_v = false; for param in &mut input.generics.params { if let GenericParam::Type(type_param) = param { - type_param.bounds.push(bound.clone()); + if bounds.is_none() { + type_param.bounds.push(bound.clone()); + } } if let GenericParam::Lifetime(t) = param { if t.lifetime.ident == "v" { @@ -56,6 +84,23 @@ pub fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { } } } + if let Some(bounds) = bounds { + 'outer: for bound in bounds { + for param in &mut input.generics.params { + if let GenericParam::Type(type_param) = param { + if type_param.ident == bound.ident { + type_param.bounds.extend(bound.bounds); + continue 'outer; + } + } + } + return Err(syn::Error::new_spanned( + bound, + "Type parameter not found in the generic parameters", + )); + } + } + let mut generics2 = input.generics.clone(); let (_, ty_generics, where_clause) = input.generics.split_for_impl(); @@ -65,54 +110,123 @@ pub fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let (impl_generics, _, _) = generics2.split_for_impl(); let name = &input.ident; - let body = match trace_impl(&input.data, &input.generics) { - Ok(body) => body, - Err(e) => { - return e.to_compile_error().into(); - } - }; - let gen = quote! { + let body = trace_impl(&input, &input.generics)?; + Ok(syn::parse_quote! { unsafe impl #impl_generics starlark::values::Trace<'v> for #name #ty_generics #where_clause { #[allow(unused_variables)] fn trace(&mut self, tracer: &starlark::values::Tracer<'v>) { #body } } - }; - gen.into() + }) +} + +syn::custom_keyword!(unsafe_ignore); +syn::custom_keyword!(bound); + +#[derive(Default)] +struct TraceAttrs { + /// `#[trace(unsafe_ignore)]` + unsafe_ignore: Option, + /// `#[trace(static)]` + trace_static: Option, + /// `#[trace(bound = "A: 'static, B: Trace<'v>")]` + bounds: Option>, +} + +impl TraceAttrs { + fn parse(attr: &Attribute) -> syn::Result { + attr.parse_args_with(|input: ParseStream| { + let mut trace_attrs = TraceAttrs::default(); + while !input.is_empty() { + if let Some(unsafe_ignore) = input.parse::>()? { + if trace_attrs.unsafe_ignore.is_some() { + return Err(syn::Error::new_spanned( + unsafe_ignore, + "Duplicate `unsafe_ignore` attribute", + )); + } + trace_attrs.unsafe_ignore = Some(unsafe_ignore); + } else if let Some(trace_static) = input.parse::>()? { + if trace_attrs.trace_static.is_some() { + return Err(syn::Error::new_spanned( + trace_static, + "Duplicate `static` attribute", + )); + } + trace_attrs.trace_static = Some(trace_static); + } else if let Some(bound) = input.parse::>()? { + if trace_attrs.bounds.is_some() { + return Err(syn::Error::new_spanned( + bound, + "Duplicate `bound` attribute", + )); + } + input.parse::()?; + let bounds = input.parse::()?; + let bounds = bounds + .parse_with(|parser: ParseStream| Punctuated::parse_terminated(parser))?; + trace_attrs.bounds = Some(bounds); + } else { + return Err(input.error("Unknown attribute")); + } + if input.is_empty() { + break; + } + input.parse::()?; + } + Ok(trace_attrs) + }) + } } /// Parse attribute `#[trace(unsafe_ignore)]`. /// /// Currently it fails on any attribute argument other than `unsafe_ignore`. -#[cfg_attr(feature = "gazebo_lint", allow(gazebo_lint_impl_dupe))] // The custom_keyword macro -fn is_ignore(attrs: &[Attribute]) -> bool { - syn::custom_keyword!(unsafe_ignore); - - attrs.iter().any(|a| { - a.path().is_ident("trace") - && a.parse_args_with(|input: ParseStream| { - let ignore = input.parse::>()?.is_some(); - Ok(ignore) - }) - .unwrap() - }) +fn parse_attrs(attrs: &[Attribute]) -> syn::Result { + let mut trace_attrs = None; + + for attr in attrs { + if attr.path().is_ident("trace") { + if trace_attrs.is_some() { + return Err(syn::Error::new_spanned(attr, "Duplicate `trace` attribute")); + } + trace_attrs = Some(TraceAttrs::parse(attr)?); + } + } + + Ok(trace_attrs.unwrap_or_default()) } -fn trace_impl(data: &Data, generics: &Generics) -> syn::Result { +fn trace_impl(derive_input: &DeriveInput, generics: &Generics) -> syn::Result { + let derive_input = DeriveInputUtil::new(derive_input)?; + let generic_types = generics - .params - .iter() - .filter_map(|x| match x { - GenericParam::Type(x) => Some(x.ident.to_string()), - _ => None, - }) + .type_params() + .map(|x| x.ident.to_string()) .collect(); - for_each_field(data, |name, field| { - if is_ignore(&field.attrs) { + derive_input.for_each_field(|name, field| { + let TraceAttrs { + unsafe_ignore, + trace_static, + bounds, + } = parse_attrs(&field.attrs)?; + if let (Some(unsafe_ignore), Some(_trace_static)) = (unsafe_ignore, trace_static) { + return Err(syn::Error::new_spanned( + unsafe_ignore, + "Cannot have both `unsafe_ignore` and `static` attributes", + )); + } + if let Some(bounds) = bounds { + return Err(syn::Error::new_spanned( + bounds, + "The `bound` attribute can only be used on the `#[derive(Trace)]`", + )); + } + if unsafe_ignore.is_some() { Ok(quote! {}) - } else if is_static(&field.ty, &generic_types) { + } else if trace_static.is_some() || is_static(&field.ty, &generic_types) { Ok(quote_spanned! { field.span()=> starlark::values::Tracer::trace_static(tracer, #name); @@ -137,17 +251,42 @@ fn is_static(ty: &Type, generics: &HashSet) -> bool { Type::BareFn(_) => true, Type::Never(_) => true, Type::Paren(x) => f(&x.elem), - Type::Path(x) => { - x.qself.is_none() - && x.path.segments.iter().all(|x| { - !generics.contains(&x.ident.to_string()) - && is_static_path_arguments(&x.arguments, generics) - }) - } + Type::Path(x) => x.qself.is_none() && is_static_path(&x.path, generics), Type::Ptr(_) => true, - Type::Reference(x) => f(&x.elem) && is_static_lifetime(x.lifetime.as_ref()), + Type::Reference(x) => f(&x.elem) && is_static_opt_lifetime(x.lifetime.as_ref()), Type::Slice(x) => f(&x.elem), Type::Tuple(x) => x.elems.iter().all(f), + Type::TraitObject(tr) => { + let syn::TypeTraitObject { + dyn_token: _, + bounds, + } = tr; + bounds + .iter() + .all(|x| is_static_type_param_bound(x, generics)) + } + _ => false, + } +} + +fn is_static_path(path: &syn::Path, generics: &HashSet) -> bool { + path.segments.iter().all(|x| { + !generics.contains(&x.ident.to_string()) && is_static_path_arguments(&x.arguments, generics) + }) +} + +fn is_static_type_param_bound(x: &TypeParamBound, generics: &HashSet) -> bool { + match x { + TypeParamBound::Trait(trait_bound) => { + let TraitBound { + paren_token: _, + modifier: _, + lifetimes, + path, + } = trait_bound; + lifetimes.is_none() && is_static_path(path, generics) + } + TypeParamBound::Lifetime(lt) => is_static_lifetime(lt), _ => false, } } @@ -159,7 +298,7 @@ fn is_static_path_arguments(x: &PathArguments, generics: &HashSet) -> bo PathArguments::None => true, PathArguments::AngleBracketed(x) => x.args.iter().all(|x| match x { GenericArgument::Type(x) => f(x), - GenericArgument::Lifetime(x) => is_static_lifetime(Some(x)), + GenericArgument::Lifetime(x) => is_static_opt_lifetime(Some(x)), _ => false, }), PathArguments::Parenthesized(x) => match &x.output { @@ -169,9 +308,13 @@ fn is_static_path_arguments(x: &PathArguments, generics: &HashSet) -> bo } } -fn is_static_lifetime(x: Option<&Lifetime>) -> bool { +fn is_static_lifetime(x: &Lifetime) -> bool { + x.ident == "static" +} + +fn is_static_opt_lifetime(x: Option<&Lifetime>) -> bool { match x { None => false, - Some(x) => x.ident == "static", + Some(x) => is_static_lifetime(x), } } diff --git a/starlark-rust/starlark_derive/src/unpack_value.rs b/starlark-rust/starlark_derive/src/unpack_value.rs index cbffcffcef17b..6da423f8dfdb4 100644 --- a/starlark-rust/starlark_derive/src/unpack_value.rs +++ b/starlark-rust/starlark_derive/src/unpack_value.rs @@ -18,6 +18,7 @@ use syn::spanned::Spanned; use crate::starlark_type_repr::StarlarkTypeReprInput; +use crate::v_lifetime::find_v_lifetime; pub(crate) fn derive_unpack_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = syn::parse_macro_input!(input as syn::DeriveInput); @@ -27,39 +28,13 @@ pub(crate) fn derive_unpack_value(input: proc_macro::TokenStream) -> proc_macro: } } -fn find_lifetime(generics: &syn::Generics) -> syn::Result> { - let mut found_lifetime = None; - for lifetime in generics.lifetimes() { - if found_lifetime.is_some() { - return Err(syn::Error::new_spanned( - lifetime, - "Only one lifetime parameter is allowed", - )); - } - if !lifetime.bounds.is_empty() { - return Err(syn::Error::new_spanned( - lifetime, - "Lifetime parameter cannot have bounds", - )); - } - if lifetime.lifetime.ident != "v" { - return Err(syn::Error::new_spanned( - lifetime, - "Lifetime parameter must be named 'v'", - )); - } - found_lifetime = Some(&lifetime.lifetime); - } - Ok(found_lifetime) -} - fn derive_unpack_value_impl(input: syn::DeriveInput) -> syn::Result { let span = input.ident.span(); let input = StarlarkTypeReprInput::parse(input, "UnpackValue")?; let ident = input.ident; - let lifetime = find_lifetime(&input.generics)?; + let lifetime = find_v_lifetime(&input.generics)?; let (_impl_generics, type_generics, where_clause) = input.generics.split_for_impl(); @@ -75,21 +50,38 @@ fn derive_unpack_value_impl(input: syn::DeriveInput) -> syn::Result = input .enum_variants .iter() - .map(|(n, t)| { + .enumerate() + .map(|(i, (n, t))| { + let mut map_err: syn::Expr = syn::parse_quote_spanned! { t.span() => starlark::__macro_refs::Either::Left(e) }; + for _ in 0..i { + map_err = syn::parse_quote_spanned! { t.span() => starlark::__macro_refs::Either::Right(#map_err) }; + } syn::parse_quote_spanned! { t.span() => - if let Some(x) = <#t as starlark::values::UnpackValue<'v>>::unpack_value(value) { - return Some(#ident::#n(x)); + if let Some(x) = <#t as starlark::values::UnpackValue<'v>>::unpack_value_impl(value).map_err(|e| #map_err)? { + return std::result::Result::Ok(Some(#ident::#n(x))); } } }) .collect::>(); + // `Either>>` + let mut error: syn::Type = syn::parse_quote_spanned! { span => std::convert::Infallible }; + for variant in input.enum_variants.iter().rev() { + let t = &variant.1; + error = syn::parse_quote_spanned! { variant.1.span() => + starlark::__macro_refs::Either<<#t as starlark::values::UnpackValue<'v>>::Error, #error> + }; + } + let trait_impl: syn::ItemImpl = syn::parse_quote_spanned! { span => + #[allow(clippy::all)] impl #impl_generics starlark::values::UnpackValue<'v> for #ident #type_generics #where_clause { - fn unpack_value(value: starlark::values::Value<'v>) -> std::option::Option { + type Error = #error; + + fn unpack_value_impl(value: starlark::values::Value<'v>) -> std::result::Result, Self::Error> { #(#branches)* let _unused_when_enum_is_empty = value; - None + std::result::Result::Ok(None) } } }; diff --git a/starlark-rust/starlark_derive/src/util.rs b/starlark-rust/starlark_derive/src/util.rs new file mode 100644 index 0000000000000..5c1be55cac180 --- /dev/null +++ b/starlark-rust/starlark_derive/src/util.rs @@ -0,0 +1,454 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::ops::Deref; + +use proc_macro2::Span; +use proc_macro2::TokenStream; +use quote::quote; +use quote::quote_spanned; +use syn::spanned::Spanned; +use syn::Data; +use syn::DataEnum; +use syn::DataStruct; +use syn::DeriveInput; +use syn::Field; +use syn::Fields; + +#[derive(Copy, Clone)] +pub(crate) struct FieldsUtil<'a> { + pub(crate) fields: &'a Fields, +} + +impl<'a> Deref for FieldsUtil<'a> { + type Target = Fields; + + fn deref(&self) -> &Self::Target { + self.fields + } +} + +impl<'a> FieldsUtil<'a> { + /// Generate variable names for each field. + pub(crate) fn gen_field_names(self) -> Vec { + match self.fields { + Fields::Named(named) => named + .named + .iter() + .map(|f| f.ident.clone().unwrap()) + .collect(), + Fields::Unnamed(unnamed) => unnamed + .unnamed + .iter() + .enumerate() + .map(|(i, f)| syn::Ident::new(&format!("f{}", i), f.span())) + .collect(), + Fields::Unit => Vec::new(), + } + } + + pub(crate) fn gen_fields(self) -> Vec<(syn::Ident, &'a Field)> { + self.gen_field_names() + .into_iter() + .zip(self.fields.iter()) + .collect() + } +} + +#[derive(Copy, Clone)] +pub(crate) struct DataStructUtil<'a> { + pub(crate) derive_input: &'a DeriveInput, + pub(crate) data: &'a DataStruct, +} + +impl<'a> Deref for DataStructUtil<'a> { + type Target = DataStruct; + + fn deref(&self) -> &Self::Target { + self.data + } +} + +impl<'a> DataStructUtil<'a> { + fn span(self) -> Span { + self.derive_input.span() + } + + fn fields(self) -> FieldsUtil<'a> { + FieldsUtil { + fields: &self.data.fields, + } + } + + /// Return a statement which unpacks the fields into local variables. + pub(crate) fn unpack_self_fields_expr( + self, + ) -> syn::Result<(proc_macro2::TokenStream, Vec<(syn::Ident, &'a Field)>)> { + let names = self.fields().gen_field_names(); + let name = &self.derive_input.ident; + let stmt = match &self.data.fields { + Fields::Named(_fields) => { + quote_spanned! { + self.fields.span() => + let #name { #(#names),* } = self; + } + } + Fields::Unnamed(_fields) => { + quote_spanned! { + self.fields.span() => + let #name(#(#names),*) = self; + } + } + Fields::Unit => quote! {}, + }; + Ok(( + stmt, + names.into_iter().zip(self.fields().fields.iter()).collect(), + )) + } + + pub(crate) fn construct(self, fields: Vec) -> syn::Result { + let name = &self.derive_input.ident; + if fields.len() != self.fields.len() { + return Err(syn::Error::new( + self.span(), + "Number of fields does not match", + )); + } + match &self.fields { + Fields::Named(_) => { + let field_names = self.fields().gen_field_names(); + Ok(syn::parse_quote_spanned! { + self.span() => + #[allow(clippy::redundant_field_names)] + #name { #(#field_names: #fields),* } + }) + } + Fields::Unnamed(_) => Ok(syn::parse_quote_spanned! { + self.span() => + #name(#(#fields),*) + }), + Fields::Unit => Ok(syn::parse_quote_spanned! { + self.span() => + #name + }), + } + } + + pub(crate) fn match_self( + self, + handler: impl Fn(Vec<(syn::Ident, &'a Field)>) -> syn::Result, + ) -> syn::Result { + let (unpack, fields) = self.unpack_self_fields_expr()?; + let expr = handler(fields)?; + Ok(syn::parse_quote_spanned! { + self.span() => + { + #unpack + #expr + } + }) + } +} + +#[derive(Copy, Clone)] +pub(crate) struct VariantUtil<'a> { + pub(crate) variant: &'a syn::Variant, + pub(crate) enum_util: DataEnumUtil<'a>, +} + +impl<'a> Deref for VariantUtil<'a> { + type Target = syn::Variant; + + fn deref(&self) -> &Self::Target { + self.variant + } +} + +impl<'a> VariantUtil<'a> { + fn fields(self) -> FieldsUtil<'a> { + FieldsUtil { + fields: &self.variant.fields, + } + } + + pub(crate) fn match_arm( + self, + expr: impl FnOnce(Vec<(syn::Ident, &'a Field)>) -> syn::Result, + ) -> syn::Result { + let expr = expr(self.fields().gen_fields())?; + + let name = &self.enum_util.derive_input.ident; + let variant_name = &self.variant.ident; + let field_names = self.fields().gen_field_names(); + match &self.variant.fields { + Fields::Named(_) => Ok(syn::parse_quote_spanned! { + self.variant.span() => + #name::#variant_name { #(#field_names),* } => { #expr } + }), + Fields::Unnamed(_) => Ok(syn::parse_quote_spanned! { + self.variant.span() => + #name::#variant_name(#(#field_names),*) => { #expr } + }), + Fields::Unit => Ok(syn::parse_quote_spanned! { + self.variant.span() => + #name::#variant_name => { #expr } + }), + } + } + + pub(crate) fn construct(self, fields: Vec) -> syn::Result { + let name = &self.enum_util.derive_input.ident; + let variant_name = &self.variant.ident; + let variant = match &self.variant.fields { + Fields::Named(_) => { + let field_names = self.fields().gen_field_names(); + syn::parse_quote_spanned! { + self.variant.span() => + #[allow(clippy::redundant_field_names)] + #name::#variant_name { #(#field_names: #fields),* } + } + } + Fields::Unnamed(_) => { + syn::parse_quote_spanned! { + self.variant.span() => + #name::#variant_name(#(#fields),*) + } + } + Fields::Unit => syn::parse_quote_spanned! { + self.variant.span() => + #name::#variant_name + }, + }; + Ok(variant) + } +} + +#[derive(Copy, Clone)] +pub(crate) struct DataEnumUtil<'a> { + pub(crate) derive_input: &'a DeriveInput, + pub(crate) data: &'a DataEnum, +} + +impl<'a> Deref for DataEnumUtil<'a> { + type Target = DataEnum; + + fn deref(&self) -> &Self::Target { + self.data + } +} + +impl<'a> DataEnumUtil<'a> { + pub(crate) fn span(self) -> Span { + self.derive_input.span() + } + + pub(crate) fn match_self( + self, + variant: impl Fn(VariantUtil<'a>, Vec<(syn::Ident, &'a Field)>) -> syn::Result, + ) -> syn::Result { + let arms = self + .variants() + .map(|v| v.match_arm(|fields| variant(v, fields))) + .collect::, _>>()?; + Ok(syn::parse_quote_spanned! { + self.data.enum_token.span() => + match self { + #(#arms),* + } + }) + } +} + +impl<'a> DataEnumUtil<'a> { + pub(crate) fn variants(self) -> impl Iterator> { + self.data.variants.iter().map(move |variant| VariantUtil { + variant, + enum_util: self, + }) + } +} + +#[derive(Copy, Clone)] +pub(crate) enum StructOrEnumVariant<'a> { + Struct(DataStructUtil<'a>), + EnumVariant(VariantUtil<'a>), +} + +impl<'a> StructOrEnumVariant<'a> { + pub(crate) fn span(self) -> Span { + match self { + StructOrEnumVariant::Struct(data) => data.span(), + StructOrEnumVariant::EnumVariant(variant) => variant.span(), + } + } + + pub(crate) fn construct(self, fields: Vec) -> syn::Result { + match self { + StructOrEnumVariant::Struct(data) => data.construct(fields), + StructOrEnumVariant::EnumVariant(variant) => variant.construct(fields), + } + } +} + +#[derive(Copy, Clone)] +pub(crate) enum DeriveInputUtil<'a> { + Struct(DataStructUtil<'a>), + Enum(DataEnumUtil<'a>), +} + +impl<'a> Deref for DeriveInputUtil<'a> { + type Target = DeriveInput; + + fn deref(&self) -> &Self::Target { + match self { + DeriveInputUtil::Struct(data) => data.derive_input, + DeriveInputUtil::Enum(data) => data.derive_input, + } + } +} + +impl<'a> DeriveInputUtil<'a> { + pub(crate) fn new(derive_input: &'a DeriveInput) -> syn::Result { + match &derive_input.data { + Data::Struct(data) => Ok(DeriveInputUtil::Struct(DataStructUtil { + derive_input, + data, + })), + Data::Enum(data) => Ok(DeriveInputUtil::Enum(DataEnumUtil { derive_input, data })), + Data::Union(_) => Err(syn::Error::new_spanned( + derive_input, + "Only structs and enums are supported", + )), + } + } + + pub(crate) fn span(self) -> Span { + match self { + DeriveInputUtil::Struct(data) => data.span(), + DeriveInputUtil::Enum(data) => data.span(), + } + } + + /// Generate expressions with callback for a struct or for each enum variant. + pub(crate) fn match_self( + self, + handler: impl Fn(StructOrEnumVariant, Vec<(syn::Ident, &'a Field)>) -> syn::Result, + ) -> syn::Result { + match self { + DeriveInputUtil::Struct(data) => { + data.match_self(|fields| handler(StructOrEnumVariant::Struct(data), fields)) + } + DeriveInputUtil::Enum(data) => data.match_self(|variant, fields| { + handler(StructOrEnumVariant::EnumVariant(variant), fields) + }), + } + } + + /// Process each field of struct of enum with provided callback. + pub(crate) fn for_each_field( + self, + field_handler: impl Fn(&syn::Ident, &Field) -> syn::Result, + ) -> syn::Result { + self.match_self(|struct_or_enum_variant, fields| { + let handlers: Vec = fields + .iter() + .map(|(ident, f)| field_handler(ident, f)) + .collect::>()?; + Ok(syn::parse_quote_spanned! { + struct_or_enum_variant.span() => + { + #(#handlers)* + } + }) + }) + } + + pub(crate) fn generics(self) -> GenericsUtil<'a> { + match self { + DeriveInputUtil::Struct(data) => GenericsUtil::new(&data.derive_input.generics), + DeriveInputUtil::Enum(data) => GenericsUtil::new(&data.derive_input.generics), + } + } +} + +#[derive(Copy, Clone)] +pub(crate) struct GenericsUtil<'a> { + pub(crate) generics: &'a syn::Generics, +} + +impl<'a> GenericsUtil<'a> { + pub(crate) fn new(generics: &'a syn::Generics) -> Self { + GenericsUtil { generics } + } + + pub(crate) fn assert_only_lifetime_params(self) -> syn::Result> { + let mut lifetimes = Vec::new(); + for param in &self.generics.params { + match param { + syn::GenericParam::Lifetime(param) => lifetimes.push(param), + _ => { + return Err(syn::Error::new_spanned( + param, + "only lifetime parameters are supported (no type or const parameters)", + )); + } + } + } + Ok(lifetimes) + } + + pub(crate) fn assert_only_type_params(self) -> syn::Result> { + let mut type_params = Vec::new(); + for param in &self.generics.params { + match param { + syn::GenericParam::Type(param) => type_params.push(param), + _ => { + return Err(syn::Error::new_spanned( + param, + "only type parameters are supported (no lifetime or const parameters)", + )); + } + } + } + Ok(type_params) + } + + pub(crate) fn assert_at_most_one_lifetime_param( + self, + ) -> syn::Result> { + let mut lifetime_params = self.generics.lifetimes(); + let Some(lt) = lifetime_params.next() else { + return Ok(None); + }; + if lifetime_params.next().is_some() { + return Err(syn::Error::new_spanned( + lt, + "expecting at most one lifetime parameter", + )); + } + Ok(Some(lt)) + } +} + +impl<'a> Deref for GenericsUtil<'a> { + type Target = syn::Generics; + + fn deref(&self) -> &Self::Target { + self.generics + } +} diff --git a/starlark-rust/starlark_derive/src/v_lifetime.rs b/starlark-rust/starlark_derive/src/v_lifetime.rs new file mode 100644 index 0000000000000..8b2be55fa170f --- /dev/null +++ b/starlark-rust/starlark_derive/src/v_lifetime.rs @@ -0,0 +1,39 @@ +/* + * Copyright 2019 The Starlark in Rust Authors. + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::util::GenericsUtil; + +/// Find at most one lifetime parameter, which must be named `'v`. +pub(crate) fn find_v_lifetime(generics: &syn::Generics) -> syn::Result> { + let generics = GenericsUtil::new(generics); + let Some(lifetime) = generics.assert_at_most_one_lifetime_param()? else { + return Ok(None); + }; + if !lifetime.bounds.is_empty() { + return Err(syn::Error::new_spanned( + lifetime, + "Lifetime parameter cannot have bounds", + )); + } + if lifetime.lifetime.ident != "v" { + return Err(syn::Error::new_spanned( + lifetime, + "Lifetime parameter must be named 'v'", + )); + } + Ok(Some(&lifetime.lifetime)) +} diff --git a/starlark-rust/starlark_derive/src/visit_span.rs b/starlark-rust/starlark_derive/src/visit_span.rs index 75c699fb80bd0..7d95fb1f67e6b 100644 --- a/starlark-rust/starlark_derive/src/visit_span.rs +++ b/starlark-rust/starlark_derive/src/visit_span.rs @@ -22,10 +22,11 @@ use syn::parse_macro_input; use syn::spanned::Spanned; use syn::DeriveInput; -use crate::for_each_field::for_each_field; +use crate::util::DeriveInputUtil; -fn derive_body(input: &DeriveInput) -> syn::Result { - for_each_field(&input.data, |field_name, field| { +fn derive_body(input: &DeriveInput) -> syn::Result { + let derive_input = DeriveInputUtil::new(input)?; + derive_input.for_each_field(|field_name, field| { Ok(quote_spanned! { field.span() => crate::eval::runtime::visit_span::VisitSpanMut::visit_spans(#field_name, visitor); @@ -34,6 +35,7 @@ fn derive_body(input: &DeriveInput) -> syn::Result { } fn derive(input: DeriveInput) -> syn::Result { + let input = DeriveInputUtil::new(&input)?; let (_impl_generics, type_generics, where_clause) = input.generics.split_for_impl(); let name = &input.ident; let body = derive_body(&input)?; @@ -42,22 +44,16 @@ fn derive(input: DeriveInput) -> syn::Result { quote! {} } else { let params = input - .generics - .params - .iter() - .map(|p| match p { - syn::GenericParam::Type(t) => { - let t = &t.ident; - Ok(quote! { - #t: crate::eval::runtime::visit_span::VisitSpanMut - }) + .generics() + .assert_only_type_params()? + .into_iter() + .map(|t| { + let t = &t.ident; + quote! { + #t: crate::eval::runtime::visit_span::VisitSpanMut } - _ => Err(syn::Error::new_spanned( - p, - "VisitSpanMut cannot be derived for generics with non-type params", - )), }) - .collect::>>()?; + .collect::>(); quote! { < #(#params,)* > } diff --git a/starlark-rust/starlark_js_example/BUCK b/starlark-rust/starlark_js_example/BUCK new file mode 100644 index 0000000000000..206457cb27289 --- /dev/null +++ b/starlark-rust/starlark_js_example/BUCK @@ -0,0 +1,15 @@ +load("@fbcode_macros//build_defs:rust_library.bzl", "rust_library") + +oncall("build_infra") + +# TODO: this code is meant to be compiled to wasm, +# but here we only check it compiles for linux/mac/etc. +rust_library( + name = "starlark_js_example", + srcs = glob( + ["src/**/*.rs"], + ), + deps = [ + "//buck2/starlark-rust/starlark:starlark", + ], +) diff --git a/starlark-rust/starlark_js_example/Cargo.toml b/starlark-rust/starlark_js_example/Cargo.toml new file mode 100644 index 0000000000000..4df521ad6085b --- /dev/null +++ b/starlark-rust/starlark_js_example/Cargo.toml @@ -0,0 +1,12 @@ +[package] +description = "Example of running starlark-rust interpreter in browser" +edition = "2021" +name = "starlark_js_example" +publish = false +version = "0.0.0" + +[dependencies] +starlark = { path = "../starlark", version = "0.12.0" } + +[lib] +crate-type = ["cdylib", "rlib"] diff --git a/starlark-rust/starlark_js_example/README.md b/starlark-rust/starlark_js_example/README.md new file mode 100644 index 0000000000000..48726dd749e10 --- /dev/null +++ b/starlark-rust/starlark_js_example/README.md @@ -0,0 +1,13 @@ +# Starlark JS + +This directory contains an example project making use of Starlark +WebAssembly/WASM. To try it: + +``` +rustup target add wasm32-unknown-unknown +cargo build --target wasm32-unknown-unknown --release +cp ../target/wasm32-unknown-unknown/release/starlark_js.wasm . +python -m http.server +``` + +Then visit [http://localhost:8000](http://localhost:8000). diff --git a/starlark-rust/starlark_js_example/index.html b/starlark-rust/starlark_js_example/index.html new file mode 100644 index 0000000000000..6f699b974c9f2 --- /dev/null +++ b/starlark-rust/starlark_js_example/index.html @@ -0,0 +1,79 @@ + + + + + + Starlark evaluator + + + + + +

    for ForwardRelativePathBuf { + fn extend>(&mut self, iter: T) { + for p in iter { + self.push(p); + } + } +} + /// Errors from ForwardRelativePath creation -#[derive(Error, Debug)] +#[derive(buck2_error::Error, Debug)] enum ForwardRelativePathError { #[error("expected a relative path but got an absolute path instead: `{0}`")] PathNotRelative(String), @@ -873,13 +1027,12 @@ enum ForwardRelativePathError { PathNotUtf8(String), #[error("relativizing path `{0}` results would result in a non-forward relative path")] RelativizationError(String), + #[error("`{0}` does not start with `{1}`")] + StripPrefix(String, String), + #[error("`{0}` does not end with `{1}`")] + StripSuffix(String, String), } -/// Error from 'strip_prefix' -#[derive(Error, Debug)] -#[error("`{0}` is not a base of `{1}`")] -pub struct StripPrefixError(String, String); - impl<'a> IntoIterator for &'a ForwardRelativePath { type Item = &'a FileName; type IntoIter = ForwardRelativePathIter<'a>; @@ -896,9 +1049,9 @@ impl<'a> TryFrom<&'a str> for &'a ForwardRelativePath { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// use std::convert::TryFrom; /// /// assert!(<&ForwardRelativePath>::try_from("foo/bar").is_ok()); /// assert!(<&ForwardRelativePath>::try_from("").is_ok()); @@ -912,7 +1065,7 @@ impl<'a> TryFrom<&'a str> for &'a ForwardRelativePath { #[inline] fn try_from(s: &'a str) -> anyhow::Result<&'a ForwardRelativePath> { ForwardRelativePathVerifier::verify_str(s)?; - Ok(ForwardRelativePath::ref_cast(s)) + Ok(ForwardRelativePath::unchecked_new(s)) } } @@ -929,11 +1082,11 @@ impl<'a> TryFrom<&'a Path> for &'a ForwardRelativePath { /// no allocation conversion /// /// ``` - /// - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use std::convert::TryFrom; /// use std::path::Path; /// + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; + /// /// assert!(<&ForwardRelativePath>::try_from(Path::new("foo/bar")).is_ok()); /// assert!(<&ForwardRelativePath>::try_from(Path::new("")).is_ok()); /// assert!(<&ForwardRelativePath>::try_from(Path::new("./bar")).is_err()); @@ -959,9 +1112,9 @@ impl<'a> TryFrom<&'a RelativePath> for &'a ForwardRelativePath { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// use std::convert::TryFrom; /// use buck2_core::fs::paths::RelativePath; /// /// assert!(<&ForwardRelativePath>::try_from(RelativePath::new("foo/bar")).is_ok()); @@ -991,9 +1144,9 @@ impl TryFrom for ForwardRelativePathBuf { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; - /// use std::convert::TryFrom; /// /// assert!(ForwardRelativePathBuf::try_from("foo/bar".to_owned()).is_ok()); /// assert!(ForwardRelativePathBuf::try_from("".to_owned()).is_ok()); @@ -1017,11 +1170,12 @@ impl TryFrom for ForwardRelativePathBuf { /// no allocation conversion /// /// ``` - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; - /// use buck2_core::fs::paths::RelativePathBuf; /// use std::convert::TryFrom; /// use std::path::PathBuf; /// + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; + /// use buck2_core::fs::paths::RelativePathBuf; + /// /// assert!(ForwardRelativePathBuf::try_from(PathBuf::from("foo/bar")).is_ok()); /// assert!(ForwardRelativePathBuf::try_from(PathBuf::from("")).is_ok()); /// assert!(ForwardRelativePathBuf::try_from(PathBuf::from("./bar")).is_err()); @@ -1046,9 +1200,10 @@ impl TryFrom for ForwardRelativePathBuf { /// no allocation conversion /// /// ``` + /// use std::convert::TryFrom; + /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePathBuf; /// use buck2_core::fs::paths::RelativePathBuf; - /// use std::convert::TryFrom; /// /// assert!(ForwardRelativePathBuf::try_from(RelativePathBuf::from("foo/bar")).is_ok()); /// assert!(ForwardRelativePathBuf::try_from(RelativePathBuf::from("")).is_ok()); @@ -1189,62 +1344,20 @@ impl ForwardRelativePathVerifier { } } -impl<'a> FromIterator<&'a FileName> for Option { - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - from_iter::<20, _>(iter) - } -} - -impl<'a> FromIterator<&'a FileNameBuf> for Option { - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - iter.into_iter() - .map(>::as_ref) - .collect() - } -} - -fn from_iter<'a, const N: usize, I>(iter: I) -> Option -where - I: IntoIterator, -{ - // Collect up to 20 pointers to the stack. This avoids a reallocation when joining paths of up - // to 20 components. - let parts = iter.into_iter().collect::>(); - - let mut first = true; - let mut size = 0; - for part in &parts { - if !first { - size += 1; // For `/` - } - size += part.as_str().len(); - first = false; - } +impl> FromIterator for ForwardRelativePathBuf { + fn from_iter>(iter: T) -> Self { + // Collect up to 20 pointers to the stack. + // This avoids a reallocation when joining paths of up to 20 components. + let parts = iter.into_iter().collect::>(); - let mut ret = String::with_capacity(size); - for part in &parts { - if !ret.is_empty() { - ret.push('/'); - } - ret.push_str(part.as_ref()); - } - - if ret.is_empty() { - None - } else { - Some(ForwardRelativePathBuf(ret)) + let mut result = ForwardRelativePathBuf::with_capacity_for_concat(&parts); + result.extend(parts); + result } } #[cfg(test)] mod tests { - use crate::fs::paths::forward_rel_path::from_iter; use crate::fs::paths::forward_rel_path::FileName; use crate::fs::paths::forward_rel_path::ForwardRelativePath; use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; @@ -1325,12 +1438,90 @@ mod tests { .map(FileName::unchecked_new) .collect::>(); - let expected = Some(ForwardRelativePath::unchecked_new("foo/bar/baz").to_buf()); + assert_eq!( + ForwardRelativePath::new("foo/bar/baz").unwrap().to_buf(), + ForwardRelativePathBuf::from_iter(parts.iter().copied()), + ); + } + + #[test] + fn test_iter_as_path() { + let path = ForwardRelativePath::new("foo/bar/baz").unwrap(); + let mut iter = path.iter(); + assert_eq!( + ForwardRelativePath::new("foo/bar/baz").unwrap(), + iter.as_path() + ); + iter.next().unwrap(); + assert_eq!(ForwardRelativePath::new("bar/baz").unwrap(), iter.as_path()); + iter.next().unwrap(); + assert_eq!(ForwardRelativePath::new("baz").unwrap(), iter.as_path()); + iter.next().unwrap(); + assert_eq!(ForwardRelativePath::new("").unwrap(), iter.as_path()); + assert_eq!(None, iter.next()); + } - assert_eq!(from_iter::<1, _>(parts.iter().copied()), expected); - assert_eq!(from_iter::<2, _>(parts.iter().copied()), expected); - assert_eq!(from_iter::<3, _>(parts.iter().copied()), expected); - assert_eq!(from_iter::<4, _>(parts.iter().copied()), expected); + #[test] + fn test_iter() { + assert_eq!( + vec!["foo", "bar", "baz"], + ForwardRelativePath::new("foo/bar/baz") + .unwrap() + .iter() + .map(|p| p.as_str()) + .collect::>() + ); + assert_eq!( + vec!["baz", "bar", "foo"], + ForwardRelativePath::new("foo/bar/baz") + .unwrap() + .iter() + .rev() + .map(|p| p.as_str()) + .collect::>() + ) + } + + #[test] + fn test_strip_suffix() { + let path = ForwardRelativePath::new("foo/bar/baz").unwrap(); + assert_eq!( + Some(ForwardRelativePath::new("foo/bar/baz").unwrap()), + path.strip_suffix_opt(ForwardRelativePath::new("").unwrap()), + ); + assert_eq!( + Some(ForwardRelativePath::new("foo/bar").unwrap()), + path.strip_suffix_opt(ForwardRelativePath::new("baz").unwrap()), + ); + assert_eq!( + Some(ForwardRelativePath::new("foo").unwrap()), + path.strip_suffix_opt(ForwardRelativePath::new("bar/baz").unwrap()), + ); + assert_eq!( + Some(ForwardRelativePath::new("").unwrap()), + path.strip_suffix_opt(ForwardRelativePath::new("foo/bar/baz").unwrap()), + ); + assert_eq!( + None, + path.strip_suffix_opt(ForwardRelativePath::new("foo/bar/baz/qux").unwrap()), + ); + assert_eq!( + None, + path.strip_suffix_opt(ForwardRelativePath::new("az").unwrap()), + ); + + assert_eq!( + Some(ForwardRelativePath::new("").unwrap()), + ForwardRelativePath::new("") + .unwrap() + .strip_suffix_opt(ForwardRelativePath::new("").unwrap()) + ); + assert_eq!( + None, + ForwardRelativePath::new("") + .unwrap() + .strip_suffix_opt(ForwardRelativePath::new("xx").unwrap()) + ); } #[test] @@ -1353,4 +1544,44 @@ mod tests { .to_string(); assert!(err.contains("expected a normalized path"), "{}", err); } + + #[test] + fn test_split_last() { + assert_eq!( + Some(( + ForwardRelativePath::new("foo/bar").unwrap(), + FileName::new("baz").unwrap() + )), + ForwardRelativePath::new("foo/bar/baz") + .unwrap() + .split_last(), + ); + assert_eq!( + Some(( + ForwardRelativePath::new("foo").unwrap(), + FileName::new("bar").unwrap() + )), + ForwardRelativePath::new("foo/bar").unwrap().split_last(), + ); + assert_eq!( + Some(( + ForwardRelativePath::new("").unwrap(), + FileName::new("foo").unwrap() + )), + ForwardRelativePath::new("foo").unwrap().split_last(), + ); + assert_eq!(None, ForwardRelativePath::new("").unwrap().split_last()); + } + + #[test] + fn test_pop() { + let mut p = ForwardRelativePath::new("foo/bar/baz").unwrap().to_buf(); + assert!(p.pop()); + assert_eq!(ForwardRelativePath::new("foo/bar").unwrap(), p); + assert!(p.pop()); + assert_eq!(ForwardRelativePath::new("foo").unwrap(), p); + assert!(p.pop()); + assert_eq!(ForwardRelativePath::new("").unwrap(), p); + assert!(!p.pop()); + } } diff --git a/app/buck2_core/src/fs/paths/mod.rs b/app/buck2_core/src/fs/paths/mod.rs deleted file mode 100644 index e3db0d6a54fa7..0000000000000 --- a/app/buck2_core/src/fs/paths/mod.rs +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! The paths module for buck2. -//! -//! Introduces 'ForwardRelativePath', 'ForwardRelativePathBuf', 'AbsPath', and -//! 'AbsPathBuf', which are equivalents of 'Path' and 'PathBuf'. -//! -//! ForwardRelativePaths are fully normalized relative platform agnostic paths -//! that only points forward. This means that there is no `.` or `..` in this -//! path, and does not begin with `/`. These are resolved to the 'PathBuf' by -//! resolving them against an 'AbsPath'. -//! -//! 'AbsPath' are absolute paths, meaning they must start with a directory root -//! of either `/` or some windows root directory like `c:`. These behave -//! roughly like 'Path'. -//! - -pub mod abs_norm_path; -pub mod abs_path; -mod cmp_impls; -pub mod file_name; -pub(crate) mod fmt; -pub mod forward_rel_path; -mod into_filename_buf_iterator; -pub mod path_util; -// non public internal references - -pub use into_filename_buf_iterator::*; -pub use relative_path::RelativePath; -pub use relative_path::RelativePathBuf; - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - - use crate::fs::paths::abs_norm_path::AbsNormPath; - use crate::fs::paths::abs_norm_path::AbsNormPathBuf; - use crate::fs::paths::forward_rel_path::ForwardRelativePath; - use crate::fs::paths::forward_rel_path::ForwardRelativePathBuf; - use crate::fs::project_rel_path::ProjectRelativePath; - - #[test] - fn wrapped_paths_work_in_maps() -> anyhow::Result<()> { - let mut map = HashMap::new(); - - let p1 = ForwardRelativePath::new("foo")?; - let p2 = ProjectRelativePath::new("bar")?; - - map.insert(p1.to_buf(), p2.to_buf()); - - assert_eq!(Some(p2), map.get(p1).map(|p| p.as_ref())); - - Ok(()) - } - - #[test] - fn path_buf_is_clonable() -> anyhow::Result<()> { - let buf = ForwardRelativePathBuf::unchecked_new("foo".into()); - let buf_ref = &buf; - - let cloned: ForwardRelativePathBuf = buf_ref.clone(); - assert_eq!(buf, cloned); - - Ok(()) - } - - #[test] - fn relative_path_display_is_readable() -> anyhow::Result<()> { - let buf = ForwardRelativePathBuf::unchecked_new("foo/bar".into()); - assert_eq!("foo/bar", format!("{}", buf)); - assert_eq!("ForwardRelativePathBuf(\"foo/bar\")", format!("{:?}", buf)); - let refpath: &ForwardRelativePath = &buf; - assert_eq!("foo/bar", format!("{}", refpath)); - assert_eq!("ForwardRelativePath(\"foo/bar\")", format!("{:?}", refpath)); - - Ok(()) - } - - #[cfg(not(windows))] - #[test] - fn absolute_path_display_is_readable() -> anyhow::Result<()> { - let buf = AbsNormPathBuf::from("/foo/bar".into())?; - assert_eq!("/foo/bar", format!("{}", buf)); - assert_eq!("AbsNormPathBuf(\"/foo/bar\")", format!("{:?}", buf)); - let refpath: &AbsNormPath = &buf; - assert_eq!("/foo/bar", format!("{}", refpath)); - assert_eq!("AbsNormPath(\"/foo/bar\")", format!("{:?}", refpath)); - - Ok(()) - } - - #[cfg(windows)] - #[test] - fn absolute_path_display_is_readable() -> anyhow::Result<()> { - let buf = AbsNormPathBuf::from("C:/foo/bar".into())?; - assert_eq!("C:/foo/bar", format!("{}", buf)); - assert_eq!("AbsNormPathBuf(\"C:/foo/bar\")", format!("{:?}", buf)); - let refpath: &AbsNormPath = &buf; - assert_eq!("C:/foo/bar", format!("{}", refpath)); - assert_eq!("AbsNormPath(\"C:/foo/bar\")", format!("{:?}", refpath)); - - Ok(()) - } -} diff --git a/app/buck2_core/src/fs/project.rs b/app/buck2_core/src/fs/project.rs index 02ce953342dc1..885dae879eb60 100644 --- a/app/buck2_core/src/fs/project.rs +++ b/app/buck2_core/src/fs/project.rs @@ -26,7 +26,7 @@ use crate::fs::paths::abs_norm_path::AbsNormPathBuf; use crate::fs::paths::forward_rel_path::ForwardRelativePath; use crate::fs::paths::RelativePath; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, buck2_error::Error)] enum ProjectRootError { #[error("Provided project root `{0}` is not equal to the canonicalized path `{1}`")] NotCanonical(AbsNormPathBuf, AbsNormPathBuf), @@ -39,7 +39,7 @@ enum ProjectRootError { /// library. The cwd will be the directory from which the command was invoked, /// which is within the project root and hence relativized against it. #[derive(Clone, Debug, Dupe, PartialEq, derive_more::Display, Allocative)] -#[display(fmt = "{root}")] +#[display("{root}")] pub struct ProjectRoot { root: Arc, } @@ -55,7 +55,7 @@ impl ProjectRootTemp { /// same root pub fn new() -> anyhow::Result { let temp = tempfile::tempdir()?; - let path = fs_util::canonicalize(temp.path())?; + let path = fs_util::canonicalize(AbsPath::new(temp.path())?)?; let path = ProjectRoot::new(path)?; Ok(Self { path, _temp: temp }) } @@ -102,9 +102,9 @@ impl ProjectRoot { /// `project root`, yielding a 'AbsPathBuf' /// /// ``` + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// use buck2_core::fs::project::ProjectRoot; /// use buck2_core::fs::project_rel_path::ProjectRelativePath; - /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// /// if cfg!(not(windows)) { /// let root = AbsNormPathBuf::from("/usr/local/fbsource/".into())?; @@ -126,17 +126,18 @@ impl ProjectRoot { /// /// # anyhow::Ok(()) /// ``` - pub fn resolve(&self, path: impl PathLike) -> AbsNormPathBuf { - path.resolve(self).into_owned() + pub fn resolve(&self, path: impl AsRef) -> AbsNormPathBuf { + self.root().join(path.as_ref()) } /// /// Takes a 'ProjectRelativePath' and converts it to a 'Path' that is relative to the project root. /// /// ``` - /// use buck2_core::fs::project::{ProjectRoot}; - /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// use std::path::PathBuf; + /// + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; + /// use buck2_core::fs::project::ProjectRoot; /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// /// let root = if cfg!(not(windows)) { @@ -166,9 +167,11 @@ impl ProjectRoot { /// /// ``` /// use std::borrow::Cow; - /// use buck2_core::fs::project_rel_path::ProjectRelativePath; - /// use buck2_core::fs::paths::abs_norm_path::{AbsNormPathBuf, AbsNormPath}; + /// + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPath; + /// use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; /// use buck2_core::fs::project::ProjectRoot; + /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// /// if cfg!(not(windows)) { /// let root = AbsNormPathBuf::from("/usr/local/fbsource/".into())?; @@ -235,14 +238,21 @@ impl ProjectRoot { // * not symlink found inside the project that point outside of it // * not even symlinks found in the project unless we need to to resolve ".." - let mut current_prefix = PathBuf::new(); + // There's no empty `AbsPathBuf`, so we need to write this somewhat weird + let mut current_prefix: Option = None; let mut components = path.components(); while let Some(comp) = components.next() { - current_prefix.push(comp); + let current_prefix = match &mut current_prefix { + Some(path) => { + path.push(comp); + path + } + None => current_prefix.insert(AbsPathBuf::new(comp)?), + }; // This is not very efficient, but efficient cross-platform implementation is not easy. - let canonicalized_current_prefix = fs_util::canonicalize(¤t_prefix)?; + let canonicalized_current_prefix = fs_util::canonicalize(current_prefix)?; if let Ok(rem) = canonicalized_current_prefix .as_path() @@ -284,56 +294,56 @@ impl ProjectRoot { // TODO(nga): refactor this to global function. pub fn write_file( &self, - path: impl PathLike, + path: impl AsRef, contents: impl AsRef<[u8]>, executable: bool, ) -> anyhow::Result<()> { - let abs_path = path.resolve(self); + let abs_path = self.root().join(path.as_ref()); if let Some(parent) = abs_path.parent() { fs_util::create_dir_all(parent).with_context(|| { format!( "`write_file` for `{}` creating directory `{}`", - abs_path.as_ref(), - parent + abs_path, parent ) })?; } - fs_util::write(abs_path.as_ref(), contents) - .with_context(|| format!("`write_file` writing `{}`", abs_path.as_ref()))?; + fs_util::write(&abs_path, contents) + .with_context(|| format!("`write_file` writing `{}`", abs_path))?; if executable { - self.set_executable(abs_path.as_ref()).with_context(|| { - format!("`write_file` setting executable `{}`", abs_path.as_ref()) - })?; + fs_util::set_executable(&abs_path) + .with_context(|| format!("`write_file` setting executable `{}`", abs_path))?; } Ok(()) } // TODO(nga): refactor this to global function. - pub fn create_file(&self, path: impl PathLike, executable: bool) -> anyhow::Result { - let abs_path = path.resolve(self); + pub fn create_file( + &self, + path: impl AsRef, + executable: bool, + ) -> anyhow::Result { + let abs_path = self.root().join(path.as_ref()); if let Some(parent) = abs_path.parent() { fs_util::create_dir_all(parent).with_context(|| { format!( "`create_file` for `{}` creating directory `{}`", - abs_path.as_ref(), - parent + abs_path, parent ) })?; } - let file = File::create(abs_path.as_ref()) - .with_context(|| format!("`create_file` creating `{}`", abs_path.as_ref()))?; + let file = File::create(&abs_path) + .with_context(|| format!("`create_file` creating `{}`", abs_path))?; if executable { - self.set_executable(abs_path.as_ref()).with_context(|| { - format!("`create_file` setting executable `{}`", abs_path.as_ref()) - })?; + fs_util::set_executable(&abs_path) + .with_context(|| format!("`create_file` setting executable `{}`", abs_path))?; } Ok(file) } // TODO(nga): refactor this to global function. - pub fn set_executable(&self, path: impl PathLike) -> anyhow::Result<()> { - let path = path.resolve(self); - fs_util::set_executable(path.as_ref()) + pub fn set_executable(&self, path: impl AsRef) -> anyhow::Result<()> { + let path = self.root().join(path.as_ref()); + fs_util::set_executable(path) } /// Create a soft link from one location to another. @@ -348,7 +358,11 @@ impl ProjectRoot { /// /// Filesystems that do not support soft links will return `Err`. // TODO(nga): refactor this to global function. - pub fn soft_link_raw(&self, src: impl AsRef, dest: impl PathLike) -> anyhow::Result<()> { + pub fn soft_link_raw( + &self, + src: impl AsRef, + dest: impl AsRef, + ) -> anyhow::Result<()> { let dest_abs = self.resolve(dest); if let Some(parent) = dest_abs.parent() { @@ -374,8 +388,8 @@ impl ProjectRoot { // TODO(nga): refactor this to global function. pub fn soft_link_relativized( &self, - src: impl PathLike, - dest: impl PathLike, + src: impl AsRef, + dest: impl AsRef, ) -> anyhow::Result<()> { let target_abs = self.resolve(src); let dest_abs = self.resolve(dest); @@ -394,7 +408,11 @@ impl ProjectRoot { /// - Re-writing relative symlinks. That is, a link to `foo/bar` might end up /// as `../../../other/foo/bar` in the destination. Absolute symlinks are not changed. // TODO(nga): refactor this to global function. - pub fn copy(&self, src: impl PathLike, dest: impl PathLike) -> anyhow::Result<()> { + pub fn copy( + &self, + src: impl AsRef, + dest: impl AsRef, + ) -> anyhow::Result<()> { let src_abs = self.resolve(src); let dest_abs = self.resolve(dest); @@ -433,42 +451,12 @@ impl ProjectRoot { } } - /// Remove a path recursively, regardless of it being a file or a directory (all contents - /// deleted). - /// This does not follow symlinks, and only removes the link itself. - // TODO(nga): refactor this to global function. - pub fn remove_path_recursive(&self, path: impl PathLike) -> anyhow::Result<()> { - let path = self.resolve(path); - - // TODO: This should probably not use symlink_metadata_if_available... But it used to use - // Path::exists()... - let meta = match fs_util::symlink_metadata_if_available(&path) { - Some(m) => m, - None => return Ok(()), - }; - - let path_type = meta.file_type(); - - if path_type.is_dir() { - fs_util::remove_dir_all(&path) - .with_context(|| format!("remove_path_recursive({}) on directory", &path))?; - } else if path_type.is_file() || path_type.is_symlink() { - fs_util::remove_file(&path) - .with_context(|| format!("remove_path_recursive({}) on file", &path))?; - } else { - // If we want to handle special files, we'll need to use special traits - // https://doc.rust-lang.org/std/os/unix/fs/trait.FileTypeExt.html - return Err(anyhow::anyhow!( - "remove_path_recursive, attempted to delete a path ({}) of an unknown type", - path - )); - } - - Ok(()) - } - /// Find the relative path between two paths within the project - pub fn relative_path(&self, target: impl PathLike, dest: impl PathLike) -> PathBuf { + pub fn relative_path( + &self, + target: impl AsRef, + dest: impl AsRef, + ) -> PathBuf { Self::find_relative_path(&self.resolve(target), &self.resolve(dest)) } @@ -530,7 +518,7 @@ impl ProjectRoot { } fn copy_file(src: &AbsNormPathBuf, dst: &AbsNormPathBuf) -> anyhow::Result<()> { - fs_util::copy(src, dst).map(|_| ()) + fs_util::copy(src, dst).map(|_| ()).map_err(Into::into) } fn copy_dir(src_dir: &AbsNormPathBuf, dest_dir: &AbsNormPathBuf) -> anyhow::Result<()> { @@ -553,55 +541,12 @@ impl ProjectRoot { } use allocative::Allocative; -pub use internals::PathLike; use crate::fs::paths::abs_path::AbsPath; use crate::fs::paths::abs_path::AbsPathBuf; use crate::fs::project_rel_path::ProjectRelativePath; use crate::fs::project_rel_path::ProjectRelativePathBuf; -mod internals { - use std::borrow::Cow; - - use crate::fs::paths::abs_norm_path::AbsNormPath; - use crate::fs::paths::abs_norm_path::AbsNormPathBuf; - use crate::fs::project::ProjectRoot; - use crate::fs::project_rel_path::ProjectRelativePath; - use crate::fs::project_rel_path::ProjectRelativePathBuf; - - pub trait PathLike: PathLikeResolvable {} - - impl PathLike for T where T: PathLikeResolvable {} - - pub trait PathLikeResolvable { - fn resolve(&self, fs: &ProjectRoot) -> Cow<'_, AbsNormPath>; - } - - impl PathLikeResolvable for &AbsNormPath { - fn resolve(&self, _fs: &ProjectRoot) -> Cow<'_, AbsNormPath> { - Cow::Borrowed(self) - } - } - - impl PathLikeResolvable for &AbsNormPathBuf { - fn resolve(&self, _fs: &ProjectRoot) -> Cow<'_, AbsNormPath> { - Cow::Borrowed(self) - } - } - - impl PathLikeResolvable for &ProjectRelativePath { - fn resolve(&self, fs: &ProjectRoot) -> Cow<'_, AbsNormPath> { - Cow::Owned(self.0.resolve(fs.root())) - } - } - - impl PathLikeResolvable for &ProjectRelativePathBuf { - fn resolve(&self, fs: &ProjectRoot) -> Cow<'_, AbsNormPath> { - Cow::Owned(self.0.resolve(fs.root())) - } - } -} - #[cfg(test)] mod tests { use std::path::Path; @@ -847,23 +792,6 @@ mod tests { Ok(()) } - #[cfg(unix)] - #[test] - fn test_remove_readonly_path_recursive() -> anyhow::Result<()> { - let fs = ProjectRootTemp::new()?; - - // We can delete a read-only file - let file = ProjectRelativePath::new("foo/bar/link")?; - fs.path.write_file(file, "Hello", false)?; - let real_file = fs.path.resolve(file); - let mut perm = fs_util::metadata(&real_file)?.permissions(); - perm.set_readonly(true); - fs_util::set_permissions(&real_file, perm)?; - fs.path.remove_path_recursive(file)?; - assert!(!fs.path.resolve(file).exists()); - Ok(()) - } - #[test] fn test_relativizes_paths_correct() -> anyhow::Result<()> { let fs = ProjectRootTemp::new()?; diff --git a/app/buck2_core/src/fs/project_rel_path.rs b/app/buck2_core/src/fs/project_rel_path.rs index a73e7f8b49ae9..2966dad63adb9 100644 --- a/app/buck2_core/src/fs/project_rel_path.rs +++ b/app/buck2_core/src/fs/project_rel_path.rs @@ -22,12 +22,16 @@ //! //! Sample uses //! ``` -//! use buck2_core::fs::project::ProjectRoot; -//! use buck2_core::fs::project_rel_path::{ProjectRelativePathBuf, ProjectRelativePath}; -//! use buck2_core::fs::paths::abs_norm_path::{AbsNormPathBuf, AbsNormPath}; +//! use std::borrow::Cow; +//! use std::convert::TryFrom; +//! +//! use buck2_core::fs::paths::abs_norm_path::AbsNormPath; +//! use buck2_core::fs::paths::abs_norm_path::AbsNormPathBuf; //! use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; +//! use buck2_core::fs::project::ProjectRoot; +//! use buck2_core::fs::project_rel_path::ProjectRelativePath; +//! use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; //! use relative_path::RelativePath; -//! use std::{borrow::Cow, convert::TryFrom}; //! //! let root = if cfg!(not(windows)) { //! AbsNormPathBuf::from("/usr/local/fbsource/".into())? @@ -43,18 +47,26 @@ //! let fs = ProjectRoot::new_unchecked(root); //! let project_rel = fs.relativize(some_path)?; //! -//! assert_eq!(Cow::Borrowed(ProjectRelativePath::new("buck/BUCK")?), project_rel); +//! assert_eq!( +//! Cow::Borrowed(ProjectRelativePath::new("buck/BUCK")?), +//! project_rel +//! ); //! assert_eq!(some_path.to_buf(), fs.resolve(project_rel.as_ref())); //! //! let rel_path = RelativePath::new("../src"); //! let project_rel_2 = project_rel.join_normalized(rel_path)?; -//! assert_eq!(ProjectRelativePathBuf::try_from("buck/src".to_owned())?, project_rel_2); +//! assert_eq!( +//! ProjectRelativePathBuf::try_from("buck/src".to_owned())?, +//! project_rel_2 +//! ); //! -//! assert_eq!(some_path.join_normalized(rel_path)?, fs.resolve(&project_rel_2).to_buf()); +//! assert_eq!( +//! some_path.join_normalized(rel_path)?, +//! fs.resolve(&project_rel_2).to_buf() +//! ); //! //! # anyhow::Ok(()) //! ``` -//! use std::borrow::Borrow; use std::ops::Deref; @@ -62,6 +74,7 @@ use std::path::Path; use std::path::PathBuf; use allocative::Allocative; +use buck2_util::arc_str::StringInside; use derivative::Derivative; use ref_cast::RefCast; use relative_path::RelativePath; @@ -98,6 +111,7 @@ pub struct ProjectRelativePath( #[derive(Clone, derive_more::Display, Derivative)] // split in two because formatters don't agree #[derive( + Default, Hash, PartialEq, Eq, @@ -112,6 +126,16 @@ pub struct ProjectRelativePathBuf( #[derivative(Debug(format_with = "quoted_display"))] ForwardRelativePathBuf, ); +impl StringInside for ProjectRelativePath { + fn as_str(wrapper: &Self) -> &str { + wrapper.0.as_str() + } + + fn from_str(s: &str) -> &Self { + ProjectRelativePath::unchecked_new(s) + } +} + impl AsRef for ProjectRelativePath { fn as_ref(&self) -> &ForwardRelativePath { &self.0 @@ -142,6 +166,20 @@ impl AsRef for ProjectRelativePathBuf { } } +impl AsRef for ProjectRelativePath { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl AsRef for ProjectRelativePathBuf { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + impl ProjectRelativePath { pub fn unchecked_new>(s: &S) -> &Self { ProjectRelativePath::ref_cast(ForwardRelativePath::unchecked_new(s)) @@ -156,6 +194,7 @@ impl ProjectRelativePath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// /// assert!(ProjectRelativePath::new("foo/bar").is_ok()); @@ -191,12 +230,17 @@ impl ProjectRelativePath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// use buck2_core::fs::project_rel_path::{ProjectRelativePathBuf, ProjectRelativePath}; + /// use buck2_core::fs::project_rel_path::ProjectRelativePath; + /// use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; /// /// let path = ProjectRelativePath::new("foo/bar")?; /// let other = ForwardRelativePath::new("baz")?; - /// assert_eq!(ProjectRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), path.join(other)); + /// assert_eq!( + /// ProjectRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), + /// path.join(other) + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -230,7 +274,10 @@ impl ProjectRelativePath { /// use buck2_core::fs::paths::file_name::FileName; /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// - /// assert_eq!(Some(FileName::unchecked_new("bin")), ProjectRelativePath::new("usr/bin")?.file_name()); + /// assert_eq!( + /// Some(FileName::unchecked_new("bin")), + /// ProjectRelativePath::new("usr/bin")?.file_name() + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -246,7 +293,6 @@ impl ProjectRelativePath { /// /// ``` /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// /// let path = ProjectRelativePath::new("test/haha/foo.txt")?; @@ -255,27 +301,44 @@ impl ProjectRelativePath { /// path.strip_prefix(ProjectRelativePath::new("test")?)?, /// ForwardRelativePath::new("haha/foo.txt")? /// ); - /// assert_eq!(path.strip_prefix(ProjectRelativePath::new("asdf")?).is_err(), true); + /// assert_eq!( + /// path.strip_prefix(ProjectRelativePath::new("asdf")?) + /// .is_err(), + /// true + /// ); /// /// # anyhow::Ok(()) /// ``` - pub fn strip_prefix<'a, P: ?Sized>( - &'a self, - base: &'a P, - ) -> anyhow::Result<&'a ForwardRelativePath> + pub fn strip_prefix<'a, P>(&'a self, base: P) -> anyhow::Result<&'a ForwardRelativePath> where P: AsRef, { self.0.strip_prefix(&base.as_ref().0) } - pub fn strip_prefix_opt<'a, P: ?Sized>(&'a self, base: &'a P) -> Option<&'a ForwardRelativePath> + pub fn strip_prefix_opt<'a, P>(&'a self, base: P) -> Option<&'a ForwardRelativePath> where P: AsRef, { self.0.strip_prefix_opt(&base.as_ref().0) } + pub fn strip_suffix<'a, P>(&'a self, suffix: P) -> anyhow::Result<&'a ProjectRelativePath> + where + P: AsRef, + { + Ok(ProjectRelativePath::ref_cast(self.0.strip_suffix(suffix)?)) + } + + pub fn strip_suffix_opt<'a, P>(&'a self, suffix: P) -> Option<&'a ProjectRelativePath> + where + P: AsRef, + { + Some(ProjectRelativePath::ref_cast( + self.0.strip_suffix_opt(suffix)?, + )) + } + /// Determines whether `base` is a prefix of `self`. /// /// ``` @@ -296,8 +359,8 @@ impl ProjectRelativePath { /// /// ``` /// use std::path::Path; - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// /// let path = ProjectRelativePath::new("some/foo")?; @@ -338,7 +401,10 @@ impl ProjectRelativePath { /// ``` /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// - /// assert_eq!(Some("rs"), ProjectRelativePath::new("hi/foo.rs")?.extension()); + /// assert_eq!( + /// Some("rs"), + /// ProjectRelativePath::new("hi/foo.rs")?.extension() + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -351,7 +417,9 @@ impl ProjectRelativePath { /// /// ``` /// use std::convert::TryFrom; - /// use buck2_core::fs::project_rel_path::{ProjectRelativePath, ProjectRelativePathBuf}; + /// + /// use buck2_core::fs::project_rel_path::ProjectRelativePath; + /// use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; /// /// assert_eq!( /// ProjectRelativePath::new("foo/bar")?.join_normalized("../baz.txt")?, @@ -359,7 +427,9 @@ impl ProjectRelativePath { /// ); /// /// assert_eq!( - /// ProjectRelativePath::new("foo")?.join_normalized("../../baz.txt").is_err(), + /// ProjectRelativePath::new("foo")? + /// .join_normalized("../../baz.txt") + /// .is_err(), /// true /// ); /// @@ -383,22 +453,10 @@ impl ProjectRelativePath { /// let p = ProjectRelativePath::new("foo/bar/baz")?; /// let mut it = p.iter(); /// - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("foo")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("bar")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("baz")) - /// ); - /// assert_eq!( - /// it.next(), - /// None - /// ); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("foo"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("bar"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("baz"))); + /// assert_eq!(it.next(), None); /// /// # anyhow::Ok(()) /// ``` @@ -414,13 +472,17 @@ impl ProjectRelativePath { impl<'a> From<&'a ForwardRelativePath> for &'a ProjectRelativePath { /// /// ``` - /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use std::convert::From; + /// + /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// /// let f = ForwardRelativePath::new("foo")?; /// - /// assert_eq!(<&ProjectRelativePath>::from(f), ProjectRelativePath::new("foo")?); + /// assert_eq!( + /// <&ProjectRelativePath>::from(f), + /// ProjectRelativePath::new("foo")? + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -469,6 +531,11 @@ impl ProjectRelativePathBuf { self.0.push(path) } + /// Pops the last component from the path, if there is one. + pub fn pop(&mut self) -> bool { + self.0.pop() + } + /// Pushes a `RelativePath` to the existing buffer, normalizing it pub fn push_normalized>(&mut self, path: P) -> anyhow::Result<()> { self.0.push_normalized(path) @@ -504,6 +571,7 @@ impl<'a> TryFrom<&'a str> for &'a ProjectRelativePath { /// /// ``` /// use std::convert::TryFrom; + /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// @@ -525,6 +593,7 @@ impl<'a> TryFrom<&'a RelativePath> for &'a ProjectRelativePath { /// /// ``` /// use std::convert::TryFrom; + /// /// use buck2_core::fs::paths::RelativePath; /// use buck2_core::fs::project_rel_path::ProjectRelativePath; /// @@ -546,9 +615,10 @@ impl TryFrom for ProjectRelativePathBuf { /// no allocation conversion /// /// ``` - /// use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; /// use std::convert::TryFrom; /// + /// use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; + /// /// assert!(ProjectRelativePathBuf::try_from("foo/bar".to_owned()).is_ok()); /// assert!(ProjectRelativePathBuf::try_from("".to_owned()).is_ok()); /// assert!(ProjectRelativePathBuf::try_from("/abs/bar".to_owned()).is_err()); @@ -569,8 +639,9 @@ impl TryFrom for ProjectRelativePathBuf { /// conversion) /// /// ``` - /// use buck2_core::fs::paths::RelativePathBuf; /// use std::convert::TryFrom; + /// + /// use buck2_core::fs::paths::RelativePathBuf; /// use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; /// /// assert!(ProjectRelativePathBuf::try_from(RelativePathBuf::from("foo/bar")).is_ok()); @@ -591,9 +662,9 @@ impl TryFrom for ProjectRelativePathBuf { /// no allocation conversion /// /// ``` - /// /// use std::convert::TryFrom; /// use std::path::PathBuf; + /// /// use buck2_core::fs::project_rel_path::ProjectRelativePathBuf; /// /// assert!(ProjectRelativePathBuf::try_from(PathBuf::from("foo/bar")).is_ok()); diff --git a/app/buck2_core/src/fs/working_dir.rs b/app/buck2_core/src/fs/working_dir.rs index a8f1d69e3b94e..be73bfd848d0a 100644 --- a/app/buck2_core/src/fs/working_dir.rs +++ b/app/buck2_core/src/fs/working_dir.rs @@ -9,7 +9,6 @@ use std::env; use std::path::Path; -use std::path::PathBuf; use crate::fs::fs_util; use crate::fs::paths::abs_norm_path::AbsNormPath; @@ -21,7 +20,7 @@ use crate::fs::paths::abs_path::AbsPathBuf; /// Can be different from process working directory if process changes the directory. /// So relative paths should be resolved against this. #[derive(Clone, Debug, derive_more::Display)] -#[display(fmt = "{}", path)] +#[display("{}", path)] pub struct WorkingDir { path: AbsNormPathBuf, } @@ -32,12 +31,12 @@ impl WorkingDir { } pub fn current_dir() -> anyhow::Result { - let current_dir = env::current_dir()?; + let current_dir = AbsPathBuf::new(env::current_dir()?)?; - #[derive(Debug, thiserror::Error)] + #[derive(Debug, buck2_error::Error)] enum CurrentDirError { - #[error("std::env::current_dir returns non-canonical path: `{0}` -> `{1}`")] - NotCanonical(PathBuf, PathBuf), + #[error("std::env::current_dir returns non-canonical path: `{}` -> `{}`", _0.display(), _1.display())] + NotCanonical(AbsPathBuf, AbsNormPathBuf), } // `current_dir` seems to return canonical path everywhere except Windows, @@ -45,13 +44,11 @@ impl WorkingDir { // https://fb.workplace.com/groups/buck2windows/posts/754618429743405 let current_dir_canonical = fs_util::canonicalize(¤t_dir)?; - if current_dir != current_dir_canonical.as_path() { + if current_dir.as_path() != current_dir_canonical.as_path() { if !cfg!(windows) { - return Err(CurrentDirError::NotCanonical( - current_dir, - current_dir_canonical.into_path_buf(), - ) - .into()); + return Err( + CurrentDirError::NotCanonical(current_dir, current_dir_canonical).into(), + ); } } diff --git a/app/buck2_core/src/lib.rs b/app/buck2_core/src/lib.rs index e568e94e218fc..8caf88968caca 100644 --- a/app/buck2_core/src/lib.rs +++ b/app/buck2_core/src/lib.rs @@ -7,32 +7,17 @@ * of this source tree. */ -// Because Buck build uses different version of Rust than Cargo build. -#![allow(stable_features)] -#![feature(absolute_path)] -#![feature(const_fn_fn_ptr_basics)] -#![feature(const_fn_trait_bound)] -#![feature(const_panic)] -#![feature(control_flow_enum)] -#![feature(fs_try_exists)] -#![feature(termination_trait_lib)] -#![feature(try_trait_v2)] -#![feature(type_alias_impl_trait)] +#![feature(error_generic_member_access)] +#![feature(decl_macro)] #![feature(never_type)] #![feature(pattern)] #![feature(box_patterns)] -#![feature(maybe_uninit_slice)] #![feature(impl_trait_in_assoc_type)] +#![feature(io_error_more)] #![feature(once_cell_try)] -#![cfg_attr(windows, feature(windows_file_type_ext))] -// Plugins -#![cfg_attr(feature = "gazebo_lint", feature(plugin))] -#![cfg_attr(feature = "gazebo_lint", allow(deprecated))] // :( -#![cfg_attr(feature = "gazebo_lint", plugin(gazebo_lint))] - -#[cfg(test)] -#[macro_use] -extern crate maplit; +#![feature(try_blocks)] +#![feature(used_with_arg)] +#![feature(let_chains)] #[macro_use] pub mod error; @@ -40,14 +25,15 @@ pub mod error; mod ascii_char_set; pub mod async_once_cell; pub mod base_deferred_key; -pub mod buck_path; pub mod build_file_path; pub mod bzl; pub mod category; pub mod cells; +pub mod ci; +pub mod client_only; pub mod configuration; -pub mod directory; -pub mod env_helper; +pub mod directory_digest; +pub mod env; pub mod execution_types; pub mod fs; pub mod io_counters; @@ -57,24 +43,30 @@ pub mod pattern; pub mod plugins; pub mod provider; pub mod rollout_percentage; -pub mod sandcastle; pub mod target; pub mod target_aliases; pub mod unsafe_send_future; +pub use env::__macro_refs::buck2_env; +pub use env::__macro_refs::buck2_env_anyhow; +pub use env::__macro_refs::buck2_env_name; + /// Marker for things that are only sensible to use inside Facebook, /// not intended to be complete, but intended to be useful to audit /// en-mass at some point in the future. pub fn facebook_only() {} -#[inline] -pub fn is_open_source() -> bool { - // @oss-disable: false - true // @oss-enable +/// Emit one expression or another depending on whether this is an open source or internal build. +#[macro_export] +macro_rules! if_else_opensource { + ($opensource:expr, $internal:expr $(,)? + ) => { + // @oss-disable: $internal + $opensource // @oss-enable + }; } -/// Internal build with `buck2`. #[inline] -pub fn is_fbcode_build() -> bool { - cfg!(fbcode_build) +pub fn is_open_source() -> bool { + if_else_opensource!(true, false) } diff --git a/app/buck2_core/src/logging.rs b/app/buck2_core/src/logging.rs index aee05d40b39eb..269af513a0041 100644 --- a/app/buck2_core/src/logging.rs +++ b/app/buck2_core/src/logging.rs @@ -17,6 +17,10 @@ use tracing_subscriber::reload; use tracing_subscriber::reload::Handle; use tracing_subscriber::EnvFilter; +use crate::buck2_env_anyhow; + +pub mod log_file; + pub trait LogConfigurationReloadHandle: Send + Sync + 'static { fn update_log_filter(&self, format: &str) -> anyhow::Result<()>; } @@ -59,15 +63,9 @@ where // If the user specifies BUCK_LOG, we want to honour that. const ENV_VAR: &str = "BUCK_LOG"; - let filter = match std::env::var_os(ENV_VAR) { - Some(v) => { - let v = v - .into_string() - .ok() - .with_context(|| format!("Failed to parse ${} as utf-8", ENV_VAR))?; - EnvFilter::try_new(v) - .with_context(|| format!("Failed to parse ${} as a filter", ENV_VAR))? - } + let filter = match buck2_env_anyhow!(ENV_VAR)? { + Some(v) => EnvFilter::try_new(v) + .with_context(|| format!("Failed to parse ${} as a filter", ENV_VAR))?, // daemon_listener is all emitted before the client starts tailing, which is why we log // those by default. None => EnvFilter::new("warn,[daemon_listener]=info"), diff --git a/app/buck2_server_ctx/src/logging.rs b/app/buck2_core/src/logging/log_file.rs similarity index 100% rename from app/buck2_server_ctx/src/logging.rs rename to app/buck2_core/src/logging/log_file.rs diff --git a/app/buck2_core/src/package.rs b/app/buck2_core/src/package.rs new file mode 100644 index 0000000000000..43042c0a16e1d --- /dev/null +++ b/app/buck2_core/src/package.rs @@ -0,0 +1,195 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! +//! A 'Package' in Buck corresponds to the subdirectories containing the +//! repository sources that are accessible to the targets defined in the build +//! file of current package. Each 'Package' can only contain one build file. +//! +//! A 'Package' is usually the entire directory contents where directory +//! contains a build file, including all transitive subdirectories that do not +//! contain a build file themselves, i.e. excluding all sub-packages. There's +//! also a set of outputs that corresponds to building all the targets of the +//! 'Package'. +//! +//! Example: +//! ```ignore +//! fbsource +//! +-- .buck +//! +-- package1 +//! | +-- TARGETS +//! | +-- my.java +//! +-- package2 +//! | +-- subdir // package 2 contains this subdir +//! | | +-- foo.cpp +//! | +-- bar.cpp +//! | +-- TARGETS +//! +-- package3 +//! | +-- package4 // package 3 excludes all subdirectories rooted at package4 +//! | | +-- a.cpp +//! | | +-- TARGETS +//! | +-- faz.java +//! | +-- TARGETS +//! ``` + +pub mod package_relative_path; +pub mod source_path; + +use std::hash::Hash; +use std::hash::Hasher; + +use allocative::Allocative; +use buck2_util::hash::BuckHasher; +use derive_more::Display; +use dupe::Dupe; +use equivalent::Equivalent; +use serde::Serialize; +use serde::Serializer; +use static_interner::Intern; +use static_interner::Interner; + +use crate::cells::cell_path::CellPath; +use crate::cells::cell_path::CellPathRef; +use crate::cells::name::CellName; +use crate::cells::paths::CellRelativePath; +use crate::fs::paths::fmt::quoted_display; +use crate::fs::paths::forward_rel_path::ForwardRelativePath; + +/// A 'Package' as defined above. +/// +/// This type does not assert it represents a valid package. +/// However, we use it in context where we expect it to be a valid package +/// (for example, attempt to gather package listing for a package fails +/// if it is a directory, but does not have a build file). +#[derive( + Copy, Clone, Dupe, Debug, Display, Eq, PartialEq, Hash, Ord, PartialOrd, Allocative +)] +pub struct PackageLabel(Intern); + +impl Serialize for PackageLabel { + fn serialize(&self, s: S) -> Result { + s.collect_str(&self.to_string()) + } +} + +#[derive(Debug, Display, Eq, PartialEq, Ord, PartialOrd, Allocative)] +struct PackageLabelData(CellPath); + +#[derive(Hash, Eq, PartialEq)] +struct PackageLabelDataRef<'a> { + path: CellPathRef<'a>, +} + +impl<'a> From> for PackageLabelData { + fn from(package_data: PackageLabelDataRef<'a>) -> Self { + PackageLabelData(package_data.path.to_owned()) + } +} + +impl PackageLabelData { + fn as_ref(&self) -> PackageLabelDataRef { + PackageLabelDataRef { + path: self.0.as_ref(), + } + } +} + +#[allow(clippy::derived_hash_with_manual_eq)] +impl Hash for PackageLabelData { + fn hash(&self, state: &mut H) { + self.as_ref().hash(state) + } +} + +impl<'a> Equivalent for PackageLabelDataRef<'a> { + fn equivalent(&self, key: &PackageLabelData) -> bool { + self == &key.as_ref() + } +} + +static INTERNER: Interner = Interner::new(); + +impl PackageLabel { + #[inline] + pub fn new(cell: CellName, path: &CellRelativePath) -> Self { + PackageLabel::from_cell_path(CellPathRef::new(cell, path)) + } + + #[inline] + pub fn from_cell_path(path: CellPathRef) -> Self { + PackageLabel(INTERNER.intern(PackageLabelDataRef { path })) + } + + #[inline] + pub fn cell_name(&self) -> CellName { + self.0.0.cell() + } + + #[inline] + pub fn cell_relative_path(&self) -> &'static CellRelativePath { + self.0.deref_static().0.path() + } + + #[inline] + pub fn to_cell_path(&self) -> CellPath { + self.0.0.clone() + } + + #[inline] + pub fn as_cell_path(&self) -> CellPathRef { + self.0.0.as_ref() + } + + pub fn join(&self, path: &ForwardRelativePath) -> Self { + if path.is_empty() { + self.dupe() + } else { + PackageLabel::from_cell_path(self.as_cell_path().join(path).as_ref()) + } + } + + pub fn parent(&self) -> Option { + Some(PackageLabel::from_cell_path(self.as_cell_path().parent()?)) + } + + // Following functions should only be used in tests, so they have "testing" in their names. + + /// Some package name usable in tests. + pub fn testing() -> PackageLabel { + PackageLabel::new( + CellName::testing_new("root"), + CellRelativePath::new(ForwardRelativePath::new("package/subdir").unwrap()), + ) + } + + pub fn testing_new(cell: &str, path: &str) -> PackageLabel { + PackageLabel::new( + CellName::testing_new(cell), + CellRelativePath::new(ForwardRelativePath::new(path).unwrap()), + ) + } + + pub fn testing_parse(label: &str) -> PackageLabel { + let (cell, path) = label.split_once("//").unwrap(); + PackageLabel::testing_new(cell, path) + } +} + +#[cfg(test)] +mod tests { + use crate::package::PackageLabel; + + #[test] + fn test_serialize() { + assert_eq!( + r#""foo//bar/baz""#, + serde_json::to_string(&PackageLabel::testing_parse("foo//bar/baz")).unwrap() + ); + } +} diff --git a/app/buck2_core/src/package/mod.rs b/app/buck2_core/src/package/mod.rs deleted file mode 100644 index 371629806ebb5..0000000000000 --- a/app/buck2_core/src/package/mod.rs +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! -//! A 'Package' in Buck corresponds to the subdirectories containing the -//! repository sources that are accessible to the targets defined in the build -//! file of current package. Each 'Package' can only contain one build file. -//! -//! A 'Package' is usually the entire directory contents where directory -//! contains a build file, including all transitive subdirectories that do not -//! contain a build file themselves, i.e. excluding all sub-packages. There's -//! also a set of outputs that corresponds to building all the targets of the -//! 'Package'. -//! -//! Example: -//! ```ignore -//! fbsource -//! +-- .buck -//! +-- package1 -//! | +-- TARGETS -//! | +-- my.java -//! +-- package2 -//! | +-- subdir // package 2 contains this subdir -//! | | +-- foo.cpp -//! | +-- bar.cpp -//! | +-- TARGETS -//! +-- package3 -//! | +-- package4 // package 3 excludes all subdirectories rooted at package4 -//! | | +-- a.cpp -//! | | +-- TARGETS -//! | +-- faz.java -//! | +-- TARGETS -//! ``` - -pub mod package_relative_path; - -use std::hash::Hash; -use std::hash::Hasher; - -use allocative::Allocative; -use derive_more::Display; -use dupe::Dupe; -use equivalent::Equivalent; -use fnv::FnvHasher; -use internment_tweaks::Intern; -use internment_tweaks::StaticInterner; -use serde::Serialize; -use serde::Serializer; - -use crate::cells::cell_path::CellPath; -use crate::cells::cell_path::CellPathRef; -use crate::cells::name::CellName; -use crate::cells::paths::CellRelativePath; -use crate::fs::paths::fmt::quoted_display; -use crate::fs::paths::forward_rel_path::ForwardRelativePath; - -/// A 'Package' as defined above. -/// -/// This type does not assert it represents a valid package. -/// However, we use it in context where we expect it to be a valid package -/// (for example, attempt to gather package listing for a package fails -/// if it is a directory, but does not have a build file). -#[derive( - Clone, Dupe, Debug, Display, Eq, PartialEq, Hash, Ord, PartialOrd, Allocative -)] -pub struct PackageLabel(Intern); - -impl Serialize for PackageLabel { - fn serialize(&self, s: S) -> Result { - s.collect_str(&self.to_string()) - } -} - -#[derive(Debug, Display, Eq, PartialEq, Ord, PartialOrd, Allocative)] -struct PackageLabelData(CellPath); - -#[derive(Hash, Eq, PartialEq)] -struct PackageLabelDataRef<'a> { - path: CellPathRef<'a>, -} - -impl<'a> From> for PackageLabelData { - fn from(package_data: PackageLabelDataRef<'a>) -> Self { - PackageLabelData(package_data.path.to_owned()) - } -} - -impl PackageLabelData { - fn as_ref(&self) -> PackageLabelDataRef { - PackageLabelDataRef { - path: self.0.as_ref(), - } - } -} - -#[allow(clippy::derived_hash_with_manual_eq)] -impl Hash for PackageLabelData { - fn hash(&self, state: &mut H) { - self.as_ref().hash(state) - } -} - -impl<'a> Equivalent for PackageLabelDataRef<'a> { - fn equivalent(&self, key: &PackageLabelData) -> bool { - self == &key.as_ref() - } -} - -static INTERNER: StaticInterner = StaticInterner::new(); - -impl PackageLabel { - #[inline] - pub fn new(cell: CellName, path: &CellRelativePath) -> Self { - PackageLabel::from_cell_path(CellPathRef::new(cell, path)) - } - - #[inline] - pub fn from_cell_path(path: CellPathRef) -> Self { - PackageLabel(INTERNER.intern(PackageLabelDataRef { path })) - } - - #[inline] - pub fn cell_name(&self) -> CellName { - self.0.0.cell() - } - - #[inline] - pub fn cell_relative_path(&self) -> &'static CellRelativePath { - self.0.deref_static().0.path() - } - - #[inline] - pub fn to_cell_path(&self) -> CellPath { - self.0.0.clone() - } - - #[inline] - pub fn as_cell_path(&self) -> CellPathRef { - self.0.0.as_ref() - } - - pub fn join(&self, path: &ForwardRelativePath) -> Self { - if path.is_empty() { - self.dupe() - } else { - PackageLabel::from_cell_path(self.as_cell_path().join(path).as_ref()) - } - } - - // Following functions should only be used in tests, so they have "testing" in their names. - - /// Some package name usable in tests. - pub fn testing() -> PackageLabel { - PackageLabel::new( - CellName::testing_new("root"), - CellRelativePath::new(ForwardRelativePath::new("package/subdir").unwrap()), - ) - } - - pub fn testing_new(cell: &str, path: &str) -> PackageLabel { - PackageLabel::new( - CellName::testing_new(cell), - CellRelativePath::new(ForwardRelativePath::new(path).unwrap()), - ) - } - - pub fn testing_parse(label: &str) -> PackageLabel { - let (cell, path) = label.split_once("//").unwrap(); - PackageLabel::testing_new(cell, path) - } -} - -#[cfg(test)] -mod tests { - use crate::package::PackageLabel; - - #[test] - fn test_serialize() { - assert_eq!( - r#""foo//bar/baz""#, - serde_json::to_string(&PackageLabel::testing_parse("foo//bar/baz")).unwrap() - ); - } -} diff --git a/app/buck2_core/src/package/package_relative_path.rs b/app/buck2_core/src/package/package_relative_path.rs index d38a2186ee7e0..135779ad7c0df 100644 --- a/app/buck2_core/src/package/package_relative_path.rs +++ b/app/buck2_core/src/package/package_relative_path.rs @@ -110,6 +110,20 @@ impl AsRef for PackageRelativePathBuf { } } +impl AsRef for PackageRelativePath { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl AsRef for PackageRelativePathBuf { + #[inline] + fn as_ref(&self) -> &str { + self.0.as_str() + } +} + impl Clone for Box { #[inline] fn clone(&self) -> Self { @@ -140,9 +154,10 @@ impl PackageRelativePath { /// normalized relative path, otherwise error. /// /// ``` - /// use buck2_core::package::package_relative_path::PackageRelativePath; /// use std::path::Path; /// + /// use buck2_core::package::package_relative_path::PackageRelativePath; + /// /// assert!(PackageRelativePath::new("foo/bar").is_ok()); /// assert!(PackageRelativePath::new("").is_ok()); /// assert!(PackageRelativePath::new("/abs/bar").is_err()); @@ -181,12 +196,17 @@ impl PackageRelativePath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// use buck2_core::package::package_relative_path::{PackageRelativePath, PackageRelativePathBuf}; + /// use buck2_core::package::package_relative_path::PackageRelativePath; + /// use buck2_core::package::package_relative_path::PackageRelativePathBuf; /// /// let path = PackageRelativePath::new("foo/bar")?; /// let other = ForwardRelativePath::new("baz")?; - /// assert_eq!(PackageRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), path.join(other)); + /// assert_eq!( + /// PackageRelativePathBuf::unchecked_new("foo/bar/baz".to_owned()), + /// path.join(other) + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -219,10 +239,13 @@ impl PackageRelativePath { /// a directory, this is the directory name. /// /// ``` - /// use buck2_core::package::package_relative_path::PackageRelativePath; /// use buck2_core::fs::paths::file_name::FileName; + /// use buck2_core::package::package_relative_path::PackageRelativePath; /// - /// assert_eq!(Some(FileName::unchecked_new("bin")), PackageRelativePath::new("usr/bin")?.file_name()); + /// assert_eq!( + /// Some(FileName::unchecked_new("bin")), + /// PackageRelativePath::new("usr/bin")?.file_name() + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -245,7 +268,11 @@ impl PackageRelativePath { /// path.strip_prefix(PackageRelativePath::new("test")?)?, /// ForwardRelativePath::new("haha/foo.txt")? /// ); - /// assert_eq!(path.strip_prefix(PackageRelativePath::new("asdf")?).is_err(), true); + /// assert_eq!( + /// path.strip_prefix(PackageRelativePath::new("asdf")?) + /// .is_err(), + /// true + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -281,6 +308,7 @@ impl PackageRelativePath { /// /// ``` /// use std::path::Path; + /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; /// use buck2_core::package::package_relative_path::PackageRelativePath; /// @@ -322,10 +350,12 @@ impl PackageRelativePath { /// Extracts the extension of [`self.file_name`], if possible. /// /// ``` - /// /// use buck2_core::package::package_relative_path::PackageRelativePath; /// - /// assert_eq!(Some("rs"), PackageRelativePath::new("hi/foo.rs")?.extension()); + /// assert_eq!( + /// Some("rs"), + /// PackageRelativePath::new("hi/foo.rs")?.extension() + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -343,22 +373,10 @@ impl PackageRelativePath { /// let p = PackageRelativePath::new("foo/bar/baz")?; /// let mut it = p.iter(); /// - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("foo")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("bar")) - /// ); - /// assert_eq!( - /// it.next(), - /// Some(FileName::unchecked_new("baz")) - /// ); - /// assert_eq!( - /// it.next(), - /// None - /// ); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("foo"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("bar"))); + /// assert_eq!(it.next(), Some(FileName::unchecked_new("baz"))); + /// assert_eq!(it.next(), None); /// /// # anyhow::Ok(()) /// ``` @@ -386,14 +404,17 @@ impl PackageRelativePath { impl<'a> From<&'a ForwardRelativePath> for &'a PackageRelativePath { /// /// ``` + /// use std::convert::From; /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; - /// use std::convert::From; /// use buck2_core::package::package_relative_path::PackageRelativePath; /// /// let f = ForwardRelativePath::new("foo")?; /// - /// assert_eq!(<&PackageRelativePath>::from(f), PackageRelativePath::new("foo")?); + /// assert_eq!( + /// <&PackageRelativePath>::from(f), + /// PackageRelativePath::new("foo")? + /// ); /// /// # anyhow::Ok(()) /// ``` @@ -492,10 +513,10 @@ impl<'a> TryFrom<&'a str> for &'a PackageRelativePath { /// no allocation conversion /// /// ``` - /// - /// use buck2_core::package::package_relative_path::PackageRelativePath; /// use std::convert::TryFrom; + /// /// use buck2_core::fs::paths::forward_rel_path::ForwardRelativePath; + /// use buck2_core::package::package_relative_path::PackageRelativePath; /// /// assert!(<&PackageRelativePath>::try_from("foo/bar").is_ok()); /// assert!(<&PackageRelativePath>::try_from("").is_ok()); @@ -515,10 +536,10 @@ impl<'a> TryFrom<&'a RelativePath> for &'a PackageRelativePath { /// no allocation conversion /// /// ``` - /// - /// use buck2_core::package::package_relative_path::PackageRelativePath; /// use std::convert::TryFrom; + /// /// use buck2_core::fs::paths::RelativePath; + /// use buck2_core::package::package_relative_path::PackageRelativePath; /// /// assert!(<&PackageRelativePath>::try_from(RelativePath::new("foo/bar")).is_ok()); /// assert!(<&PackageRelativePath>::try_from(RelativePath::new("")).is_ok()); @@ -539,10 +560,10 @@ impl TryFrom for PackageRelativePathBuf { /// no allocation conversion /// /// ``` - /// - /// use buck2_core::package::package_relative_path::PackageRelativePathBuf; /// use std::convert::TryFrom; + /// /// use buck2_core::package::package_relative_path::PackageRelativePath; + /// use buck2_core::package::package_relative_path::PackageRelativePathBuf; /// /// assert!(PackageRelativePathBuf::try_from("foo/bar".to_owned()).is_ok()); /// assert!(PackageRelativePathBuf::try_from("".to_owned()).is_ok()); @@ -565,10 +586,11 @@ impl TryFrom for PackageRelativePathBuf { /// conversion) /// /// ``` - /// use buck2_core::package::package_relative_path::PackageRelativePathBuf; - /// use buck2_core::fs::paths::RelativePathBuf; /// use std::convert::TryFrom; + /// + /// use buck2_core::fs::paths::RelativePathBuf; /// use buck2_core::package::package_relative_path::PackageRelativePath; + /// use buck2_core::package::package_relative_path::PackageRelativePathBuf; /// /// assert!(PackageRelativePathBuf::try_from(RelativePathBuf::from("foo/bar")).is_ok()); /// assert!(PackageRelativePathBuf::try_from(RelativePathBuf::from("")).is_ok()); @@ -589,11 +611,11 @@ impl TryFrom for PackageRelativePathBuf { /// no allocation conversion /// /// ``` - /// - /// use buck2_core::package::package_relative_path::PackageRelativePathBuf; /// use std::convert::TryFrom; /// use std::path::PathBuf; + /// /// use buck2_core::package::package_relative_path::PackageRelativePath; + /// use buck2_core::package::package_relative_path::PackageRelativePathBuf; /// /// assert!(PackageRelativePathBuf::try_from(PathBuf::from("foo/bar")).is_ok()); /// assert!(PackageRelativePathBuf::try_from(PathBuf::from("")).is_ok()); diff --git a/app/buck2_core/src/package/source_path.rs b/app/buck2_core/src/package/source_path.rs new file mode 100644 index 0000000000000..81c0708dd04e5 --- /dev/null +++ b/app/buck2_core/src/package/source_path.rs @@ -0,0 +1,112 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use allocative::Allocative; +use buck2_util::arc_str::ArcS; +use derive_more::Display; +use dupe::Dupe; + +use crate::cells::cell_path::CellPath; +use crate::package::package_relative_path::PackageRelativePath; +use crate::package::PackageLabel; + +/// Represents the path of a source artifact. +#[derive( + Clone, + Debug, + derive_more::Display, + Hash, + Eq, + PartialEq, + Ord, + PartialOrd, + Allocative +)] +#[display("{}", self.as_ref())] +pub struct SourcePath { + pkg: PackageLabel, + path: ArcS, +} + +impl SourcePath { + #[inline] + pub fn new(pkg: PackageLabel, path: ArcS) -> Self { + SourcePath { pkg, path } + } + + /// This is slow, but OK to use in tests. + pub fn testing_new(pkg: &str, path: &str) -> Self { + SourcePath::new( + PackageLabel::testing_parse(pkg), + ArcS::from(PackageRelativePath::new(path).unwrap()), + ) + } + + #[inline] + pub fn package(&self) -> PackageLabel { + self.pkg.dupe() + } + + #[inline] + pub fn path(&self) -> &PackageRelativePath { + &self.path + } + + #[inline] + pub fn to_cell_path(&self) -> CellPath { + self.as_ref().to_cell_path() + } + + #[inline] + pub fn as_ref(&self) -> SourcePathRef { + SourcePathRef { + pkg: self.pkg.dupe(), + path: &self.path, + } + } +} + +#[derive(Display, Debug, Eq, Hash, PartialEq, Copy, Clone, Dupe)] +#[display("{}/{}", pkg, path.as_str())] +pub struct SourcePathRef<'a> { + pkg: PackageLabel, + path: &'a ArcS, +} + +impl<'a> SourcePathRef<'a> { + #[inline] + pub fn new(pkg: PackageLabel, path: &'a ArcS) -> SourcePathRef<'a> { + SourcePathRef { pkg, path } + } + + #[inline] + pub fn package(&self) -> PackageLabel { + self.pkg.dupe() + } + + #[inline] + pub fn path(&self) -> &PackageRelativePath { + self.path + } + + #[inline] + pub fn to_cell_path(&self) -> CellPath { + self.pkg + .as_cell_path() + .join(self.path.as_forward_rel_path()) + } + + #[inline] + pub fn to_owned(&self) -> SourcePath { + SourcePath { + pkg: self.pkg.dupe(), + path: self.path.dupe(), + } + } +} diff --git a/app/buck2_core/src/pattern.rs b/app/buck2_core/src/pattern.rs new file mode 100644 index 0000000000000..609cc43f77294 --- /dev/null +++ b/app/buck2_core/src/pattern.rs @@ -0,0 +1,19 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +//! Implements target pattern resolution. +#![doc = include_str!("pattern/target_pattern.md")] + +mod ascii_pattern; +pub mod package; +pub mod parse_package; +pub mod pattern; +pub mod pattern_type; +pub mod query_file_literal; +pub mod unparsed; diff --git a/app/buck2_core/src/pattern/ascii_pattern.rs b/app/buck2_core/src/pattern/ascii_pattern.rs index 0d1a764caf151..49a5795c88e16 100644 --- a/app/buck2_core/src/pattern/ascii_pattern.rs +++ b/app/buck2_core/src/pattern/ascii_pattern.rs @@ -37,6 +37,7 @@ const fn assert_ascii_str(s: &str) -> &[u8] { /// Caller of this trait skips boundary checks/UTF-8 checks, so this trait is `unsafe`. pub(crate) unsafe trait AsciiPattern { fn first_index_in(&self, s: &str) -> Option; + #[allow(dead_code)] fn last_index_in(&self, s: &str) -> Option; fn is_prefix_of(&self, s: &str) -> bool; fn is_suffix_of(&self, s: &str) -> bool; diff --git a/app/buck2_core/src/pattern/mod.rs b/app/buck2_core/src/pattern/mod.rs deleted file mode 100644 index e856297467674..0000000000000 --- a/app/buck2_core/src/pattern/mod.rs +++ /dev/null @@ -1,1901 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under both the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree and the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. - */ - -//! Implements target pattern resolution. -//! -#![doc = include_str!("target_pattern.md")] - -mod ascii_pattern; -pub mod parse_package; -pub mod pattern_type; -pub mod query_file_literal; - -use std::fmt; -use std::fmt::Debug; -use std::fmt::Display; - -use allocative::Allocative; -use anyhow::Context; -use dupe::Dupe; -use once_cell::sync::Lazy; -use pattern_type::ConfigurationPredicate; -use pattern_type::ConfiguredProvidersPatternExtra; -use pattern_type::PatternType; -use pattern_type::ProvidersPatternExtra; -use pattern_type::TargetPatternExtra; -use regex::Regex; - -use crate::cells::alias::CellAlias; -use crate::cells::cell_path::CellPath; -use crate::cells::cell_path::CellPathCow; -use crate::cells::cell_path::CellPathRef; -use crate::cells::cell_root_path::CellRootPathBuf; -use crate::cells::name::CellName; -use crate::cells::paths::CellRelativePath; -use crate::cells::CellResolver; -use crate::configuration::bound_label::BoundConfigurationLabel; -use crate::configuration::builtin::BuiltinPlatform; -use crate::configuration::hash::ConfigurationHash; -use crate::fs::paths::forward_rel_path::ForwardRelativePath; -use crate::package::PackageLabel; -use crate::pattern::ascii_pattern::split1_opt_ascii; -use crate::pattern::ascii_pattern::strip_suffix_ascii; -use crate::pattern::ascii_pattern::trim_prefix_ascii; -use crate::pattern::ascii_pattern::AsciiChar; -use crate::pattern::ascii_pattern::AsciiStr; -use crate::pattern::ascii_pattern::AsciiStr2; -use crate::provider::flavors::map_flavors; -use crate::provider::label::NonDefaultProvidersName; -use crate::provider::label::ProviderName; -use crate::provider::label::ProvidersLabel; -use crate::provider::label::ProvidersName; -use crate::target::label::TargetLabel; -use crate::target::name::TargetName; -use crate::target::name::TargetNameRef; -use crate::target_aliases::TargetAliasResolver; - -#[derive(thiserror::Error, Debug)] -enum TargetPatternParseError { - #[error("Expected a `:`, a trailing `/...` or the literal `...`.")] - UnexpectedFormat, - #[error("Package is empty")] - PackageIsEmpty, - #[error("Must be absolute, with a `//` or no package just `:`.")] - AbsoluteRequired, - #[error( - "Packages may not end with a trailing `/` (except when provided on the command line where it's tolerated)" - )] - PackageTrailingSlash, - #[error("Required a target literal, but got a non-literal pattern `{0}`")] - TargetLiteralRequired(String), - #[error( - "You may be trying to use a macro instead of a target pattern. Macro usage is invalid here" - )] - PossibleMacroUsage, - #[error("Expecting {0} pattern, got: `{1}`")] - ExpectingPatternOfType(&'static str, String), - #[error("Configuration part of the pattern must be enclosed in `()`")] - ConfigurationPartMustBeEnclosedInParentheses, - #[error( - "Cell resolver cell `{0}` does not match the given relative dir `{1}` (internal error)" - )] - CellResolverCellDoesNotMatchWorkingDir(CellName, CellPath), - #[error("Pattern `{0}` is parsed as `{1}` which crosses cell boundaries. Try `{2}` instead")] - PatternCrossesCellBoundaries(String, String, String), -} - -pub fn display_precise_pattern<'a, T: PatternType>( - package: &'a PackageLabel, - target_name: &'a TargetNameRef, - extra: &'a T, -) -> impl Display + 'a { - #[derive(derive_more::Display)] - #[display(fmt = "{}:{}{}", package, target_name, extra)] - struct Impl<'a, T: PatternType> { - package: &'a PackageLabel, - target_name: &'a TargetNameRef, - extra: &'a T, - } - Impl { - package, - target_name, - extra, - } -} - -/// Extract provider name from a target pattern. -pub(crate) fn split_providers_name(s: &str) -> anyhow::Result<(&str, ProvidersName)> { - if let Some((t, flavors)) = split1_opt_ascii(s, AsciiChar::new('#')) { - let name = map_flavors(flavors, s)?; - Ok((t, name)) - } else if let Some((t, p)) = split1_opt_ascii(s, AsciiChar::new('[')) { - let mut names = Vec::new(); - - let mut remaining = if let Some((p, r)) = split1_opt_ascii(p, AsciiChar::new(']')) { - names.push(ProviderName::new(p.to_owned())?); - r - } else { - return Err(anyhow::anyhow!( - "target pattern with `[` must end with `]` to mark end of providers set label" - )); - }; - - while !remaining.is_empty() { - if let Some(("", r)) = split1_opt_ascii(remaining, AsciiChar::new('[')) { - if let Some((p, r)) = split1_opt_ascii(r, AsciiChar::new(']')) { - names.push(ProviderName::new(p.to_owned())?); - remaining = r; - continue; - } - } - return Err(anyhow::anyhow!( - "target pattern with `[` must end with `]` to mark end of providers set label" - )); - } - - Ok(( - t, - ProvidersName::NonDefault(Box::new(NonDefaultProvidersName::Named( - names.into_boxed_slice(), - ))), - )) - } else { - Ok((s, ProvidersName::Default)) - } -} - -/// A parsed target pattern. -#[derive(Clone, Debug, Hash, Eq, PartialEq, Allocative)] -pub enum ParsedPattern { - /// A target pattern that matches a explicit target pattern type T. See - /// `PatternType` for pattern - Target(PackageLabel, TargetName, T), - /// A target pattern that matches an entire package. Ex. `//some/package:` - Package(PackageLabel), - /// A target pattern that matches all recursive packages. Ex. - /// `//some/package/...`. The path component here is not required to be - /// an actual package (i.e. a build file is not required at the path) - /// and so we don't hold this as a [PackageLabel]. - Recursive(CellPath), -} - -impl ParsedPattern { - /// Extract [`TargetLabel`] from a [`ParsedPattern`]. - pub fn as_target_label(self, original: &str) -> anyhow::Result { - let (package, target_name, TargetPatternExtra) = self.as_literal(original)?; - Ok(TargetLabel::new(package, target_name.as_ref())) - } - - /// Check if a [`ParsedPattern`] matches a [`TargetLabel`] - pub fn matches(&self, target: &TargetLabel) -> bool { - let target_pkg = target.pkg(); - match self { - ParsedPattern::Target(pkg, t, TargetPatternExtra) => { - *pkg == target_pkg && t.as_ref() == target.name() - } - ParsedPattern::Package(pkg) => target_pkg.as_cell_path() == pkg.as_cell_path(), - ParsedPattern::Recursive(cell_path) => { - target_pkg.as_cell_path().starts_with(cell_path.as_ref()) - } - } - } -} - -impl ParsedPattern { - /// Extract [`ProvidersLabel`] from a [`ParsedPattern`]. - pub fn as_providers_label(self, original: &str) -> anyhow::Result { - let (package, target, ProvidersPatternExtra { providers }) = self.as_literal(original)?; - Ok(ProvidersLabel::new( - TargetLabel::new(package, target.as_ref()), - providers, - )) - } -} - -impl ParsedPattern { - pub(crate) fn cell_path(&self) -> CellPathRef { - match self { - ParsedPattern::Target(pkg, _, _) => pkg.as_cell_path(), - ParsedPattern::Package(pkg) => pkg.as_cell_path(), - ParsedPattern::Recursive(cell_path) => cell_path.as_ref(), - } - } - - pub fn try_map( - self, - f: impl FnOnce(T) -> anyhow::Result, - ) -> anyhow::Result> { - match self { - ParsedPattern::Target(package, target_name, val) => { - Ok(ParsedPattern::Target(package, target_name, f(val)?)) - } - ParsedPattern::Package(package) => Ok(ParsedPattern::Package(package)), - ParsedPattern::Recursive(cell_path) => Ok(ParsedPattern::Recursive(cell_path)), - } - } - - /// Extract a literal from a [ParsedPattern], or `Err` if it is not a literal. - pub fn as_literal(self, original: &str) -> anyhow::Result<(PackageLabel, TargetName, T)> { - // FIXME: Would be better if we had a Display on self, so we could produce a nice error message. - // For now, just require the original string to be passed in for good errors. - match self { - ParsedPattern::Target(package, target_name, val) => Ok((package, target_name, val)), - _ => Err(TargetPatternParseError::TargetLiteralRequired(original.to_owned()).into()), - } - } - - /// Parse a TargetPattern, but where there there is no relative directory. - pub fn parse_precise( - pattern: &str, - cell: CellName, - cell_resolver: &CellResolver, - ) -> anyhow::Result { - parse_target_pattern( - cell, - cell_resolver, - None, - TargetParsingOptions::precise(), - pattern, - ) - .with_context(|| { - format!( - "Invalid absolute target pattern `{}` is not allowed", - pattern - ) - }) - } - - pub fn parsed_opt_absolute( - pattern: &str, - relative_dir: Option, - cell: CellName, - cell_resolver: &CellResolver, - ) -> anyhow::Result { - parse_target_pattern( - cell, - cell_resolver, - None, - TargetParsingOptions { - relative: TargetParsingRel::RequireAbsolute(relative_dir), - infer_target: false, - strip_package_trailing_slash: false, - }, - pattern, - ) - .with_context(|| { - format!( - "Invalid absolute target pattern `{}` is not allowed", - pattern - ) - }) - } - - /// Parse a TargetPattern out, resolving aliases via `cell_resolver`, and resolving relative - /// targets via `enclosing_package`, if provided. - /// Allows everything from `parse_absolute`, plus relative patterns. - pub fn parse_relative( - target_alias_resolver: &dyn TargetAliasResolver, - relative_dir: CellPathRef, - pattern: &str, - cell_resolver: &CellResolver, - ) -> anyhow::Result { - parse_target_pattern( - relative_dir.cell(), - cell_resolver, - Some(target_alias_resolver), - TargetParsingOptions { - relative: TargetParsingRel::AllowRelative(relative_dir), - infer_target: false, - strip_package_trailing_slash: false, - }, - pattern, - ) - .with_context(|| { - format!( - "Invalid relative target pattern `{}` is not allowed", - pattern - ) - }) - } - - /// Parse a TargetPattern out, resolving aliases via `cell_resolver`, resolving relative - /// targets via `relative_dir`, inferring a target name if no target or recursive pattern - /// is provided (e.g. `//foo/bar` is inferred to be equivalent to `//foo/bar:bar`), and - /// stripping trailing `/` in package names instead of rejecting them. - /// - /// This should only be used with user-provided command line arguments, as precision is - /// generally preferred elsewhere. - pub fn parse_relaxed( - target_alias_resolver: &dyn TargetAliasResolver, - relative_dir: CellPathRef, - pattern: &str, - cell_resolver: &CellResolver, - ) -> anyhow::Result { - parse_target_pattern( - relative_dir.cell(), - cell_resolver, - Some(target_alias_resolver), - TargetParsingOptions { - relative: TargetParsingRel::AllowRelative(relative_dir), - infer_target: true, - strip_package_trailing_slash: true, - }, - pattern, - ) - .with_context(|| format!("Parsing target pattern `{}`", pattern)) - } - - pub fn testing_parse(pattern: &str) -> Self { - let cell_name = pattern.split_once("//").unwrap().0; - let cell_name = CellName::testing_new(cell_name); - let cell_resolver = - CellResolver::testing_with_name_and_path(cell_name, CellRootPathBuf::testing_new("")); - Self::parse_precise(pattern, cell_name, &cell_resolver).unwrap() - } -} - -impl Display for ParsedPattern { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ParsedPattern::Target(package, target_name, pattern) => { - write!( - f, - "{}", - display_precise_pattern(package, target_name.as_ref(), pattern) - ) - } - ParsedPattern::Package(package) => { - write!(f, "{}:", package.as_cell_path()) - } - ParsedPattern::Recursive(path) => { - if path.path().is_empty() { - write!(f, "{}...", path) - } else { - write!(f, "{}/...", path) - } - } - } - } -} - -#[derive(Debug)] -pub struct PatternParts<'a, T: PatternType> { - /// Is there a `foo//` or `//` part. - pub cell_alias: Option<&'a str>, - pub pattern: PatternDataOrAmbiguous<'a, T>, -} - -impl<'a, T: PatternType> PatternParts<'a, T> { - fn try_map anyhow::Result>( - self, - f: F, - ) -> anyhow::Result> { - let PatternParts { - cell_alias, - pattern, - } = self; - Ok(PatternParts { - cell_alias, - pattern: pattern.try_map(f)?, - }) - } -} - -#[derive(Debug, derive_more::From)] -pub enum PatternDataOrAmbiguous<'a, T: PatternType> { - /// We successfully extracted PatternData. - PatternData(PatternData<'a, T>), - - /// This pattern looks like `foo/bar`, `foo/bar/` or `foo`. It could be a package + target if - /// we allow inference (i.e. expanding `foo/bar:bar`). - Ambiguous { - /// The pattern. If we allow inference this will become the package. - pattern: &'a str, - /// Whether we should strip trailing slashes out of this pattern before doing inference - /// (rather than throwing an error). - strip_package_trailing_slash: bool, - extra: T, - }, -} - -impl<'a, T: PatternType> PatternDataOrAmbiguous<'a, T> { - fn try_map( - self, - f: impl FnOnce(T) -> anyhow::Result, - ) -> anyhow::Result> { - match self { - PatternDataOrAmbiguous::PatternData(d) => { - Ok(PatternDataOrAmbiguous::PatternData(d.try_map(f)?)) - } - PatternDataOrAmbiguous::Ambiguous { - pattern, - strip_package_trailing_slash, - extra, - } => Ok(PatternDataOrAmbiguous::Ambiguous { - pattern, - strip_package_trailing_slash, - extra: f(extra)?, - }), - } - } -} - -impl<'a, T> PatternDataOrAmbiguous<'a, T> -where - T: PatternType, -{ - /// If the pattern is ambiguous, try to infer a target. This would convert `foo/bar` into - /// `foo/bar:bar`. - pub fn infer_target(self) -> anyhow::Result> { - match self { - Self::PatternData(d) => Ok(d), - Self::Ambiguous { - pattern, - strip_package_trailing_slash, - extra, - } => { - let package = normalize_package(pattern, strip_package_trailing_slash)?; - - let target = package - .file_name() - .context(TargetPatternParseError::PackageIsEmpty)?; - - let target_name = TargetName::new(target.as_ref())?; - - Ok(PatternData::TargetInPackage { - package, - target_name, - extra, - }) - } - } - } - - /// If the pattern is ambiguous, error out. - pub fn reject_ambiguity(self) -> anyhow::Result> { - match self { - Self::PatternData(d) => Ok(d), - Self::Ambiguous { pattern, .. } => { - // Check if the user maybe tried to use a macro - if pattern.contains('$') - && pattern.contains(' ') - && pattern.contains('(') - && pattern.contains(')') - { - return Err(TargetPatternParseError::PossibleMacroUsage.into()); - } - Err(TargetPatternParseError::UnexpectedFormat.into()) - } - } - } -} - -/// The pattern data we extracted. -#[derive(Debug)] -pub enum PatternData<'a, T: PatternType> { - /// A pattern like `foo/bar/...`. - Recursive { package: &'a ForwardRelativePath }, - - /// A pattern like `foo/bar:`, or `:` - AllTargetsInPackage { package: &'a ForwardRelativePath }, - - /// A pattern like `foo/bar:qux`, or `:qux`. The target will never be empty. - TargetInPackage { - package: &'a ForwardRelativePath, - target_name: TargetName, - extra: T, - }, -} - -impl<'a, T: PatternType> PatternData<'a, T> { - fn try_map( - self, - f: impl FnOnce(T) -> anyhow::Result, - ) -> anyhow::Result> { - match self { - PatternData::Recursive { package } => Ok(PatternData::Recursive { package }), - PatternData::AllTargetsInPackage { package } => { - Ok(PatternData::AllTargetsInPackage { package }) - } - PatternData::TargetInPackage { - package, - target_name, - extra, - } => Ok(PatternData::TargetInPackage { - package, - target_name, - extra: f(extra)?, - }), - } - } - - pub fn package_path(&self) -> &'a ForwardRelativePath { - match self { - Self::Recursive { package } => package, - Self::AllTargetsInPackage { package } => package, - Self::TargetInPackage { package, .. } => package, - } - } - - pub fn target(&self) -> Option<(&TargetName, &T)> { - match self { - Self::Recursive { .. } => None, - Self::AllTargetsInPackage { .. } => None, - Self::TargetInPackage { - target_name, extra, .. - } => Some((target_name, extra)), - } - } - - /// Whether this is a target that looks like `:target`. - pub fn is_adjacent_target(&self) -> bool { - self.package_path().is_empty() && self.target().is_some() - } -} - -// Splits a pattern into cell alias and forward relative path if "//" is present, otherwise returns None, -pub fn maybe_split_cell_alias_and_relative_path<'a>( - pattern: &'a str, -) -> anyhow::Result> { - Ok(match split1_opt_ascii(pattern, AsciiStr2::new("//")) { - Some((a, p)) => Some(( - CellAlias::new(trim_prefix_ascii(a, AsciiChar::new('@')).to_owned()), - ForwardRelativePath::new(p)?, - )), - None => None, - }) -} - -fn lex_provider_pattern<'a>( - pattern: &'a str, - strip_package_trailing_slash: bool, -) -> anyhow::Result> { - let (cell_alias, pattern) = match split1_opt_ascii(pattern, AsciiStr2::new("//")) { - Some((a, p)) => (Some(trim_prefix_ascii(a, AsciiChar::new('@'))), p), - None => (None, pattern), - }; - - let pattern = match split1_opt_ascii(pattern, AsciiChar::new(':')) { - Some((package, "")) => PatternData::AllTargetsInPackage { - package: normalize_package(package, strip_package_trailing_slash)?, - } - .into(), - Some((package, target)) => { - let (target, providers) = split_providers_name(target)?; - let target_name = TargetName::new(target)?; - let extra = ProvidersPatternExtra { providers }; - PatternData::TargetInPackage { - package: normalize_package(package, strip_package_trailing_slash)?, - target_name, - extra, - } - .into() - } - None => { - if let Some(package) = strip_suffix_ascii(pattern, AsciiStr::new("/...")) { - PatternData::Recursive { - package: ForwardRelativePath::new(package)?, - } - .into() - } else if pattern == "..." { - PatternData::Recursive { - package: ForwardRelativePath::new("")?, - } - .into() - } else if !pattern.is_empty() { - let (pattern, providers) = split_providers_name(pattern)?; - PatternDataOrAmbiguous::Ambiguous { - pattern, - strip_package_trailing_slash, - extra: ProvidersPatternExtra { providers }, - } - } else { - return Err(TargetPatternParseError::UnexpectedFormat.into()); - } - } - }; - - Ok(PatternParts { - cell_alias, - pattern, - }) -} - -fn lex_configuration_predicate(pattern: &str) -> anyhow::Result { - let pattern = pattern - .strip_prefix('(') - .context(TargetPatternParseError::ConfigurationPartMustBeEnclosedInParentheses)?; - let pattern = pattern - .strip_suffix(')') - .context(TargetPatternParseError::ConfigurationPartMustBeEnclosedInParentheses)?; - match pattern.split_once('#') { - Some((cfg, hash)) => { - let cfg = BoundConfigurationLabel::new(cfg.to_owned())?; - let hash = ConfigurationHash::from_str(hash)?; - Ok(ConfigurationPredicate::Bound(cfg, Some(hash))) - } - None => { - if let Some(builtin) = BuiltinPlatform::from_label(pattern) { - Ok(ConfigurationPredicate::Builtin(builtin)) - } else { - Ok(ConfigurationPredicate::Bound( - BoundConfigurationLabel::new(pattern.to_owned())?, - None, - )) - } - } - } -} - -/// Split target pattern and configuration preserving parentheses for better diagnostics. -fn split_cfg(s: &str) -> Option<(&str, &str)> { - // Fast path. - if !s.contains(' ') { - return None; - } - - let mut braces: u32 = 0; - for (i, c) in s.char_indices() { - match c { - '(' => braces += 1, - ')' => match braces.checked_sub(1) { - Some(b) => braces = b, - None => { - // Pattern is invalid, let parser fail elsewhere. - return None; - } - }, - ' ' if braces == 0 => return Some((&s[..i], &s[i + 1..])), - _ => {} - } - } - None -} - -pub fn lex_configured_providers_pattern<'a>( - pattern: &'a str, - strip_package_trailing_slash: bool, -) -> anyhow::Result> { - let (provider_pattern, cfg) = match split_cfg(pattern) { - Some((providers, cfg)) => { - let provider_pattern = lex_provider_pattern(providers, strip_package_trailing_slash)?; - let cfg = lex_configuration_predicate(cfg)?; - (provider_pattern, cfg) - } - None => ( - lex_provider_pattern(pattern, strip_package_trailing_slash)?, - ConfigurationPredicate::Any, - ), - }; - provider_pattern.try_map(|ProvidersPatternExtra { providers }| { - Ok(ConfiguredProvidersPatternExtra { providers, cfg }) - }) -} - -// Lex the target pattern into the relevant pieces. -pub fn lex_target_pattern<'a, T: PatternType>( - pattern: &'a str, - strip_package_trailing_slash: bool, -) -> anyhow::Result> { - let provider_pattern = lex_configured_providers_pattern(pattern, strip_package_trailing_slash)?; - provider_pattern - .try_map(|extra| T::from_configured_providers(extra)) - .with_context(|| { - // This can only fail when `PatternType = TargetName`, so the message is correct. - TargetPatternParseError::ExpectingPatternOfType(T::NAME, pattern.to_owned()) - }) -} - -fn normalize_package<'a>( - package: &'a str, - strip_package_trailing_slash: bool, -) -> anyhow::Result<&'a ForwardRelativePath> { - // Strip or reject trailing `/`, such as in `foo/:bar`. - if let Some(stripped) = strip_suffix_ascii(package, AsciiChar::new('/')) { - if strip_package_trailing_slash { - return ForwardRelativePath::new(stripped); - } else { - return Err(anyhow::Error::from( - TargetPatternParseError::PackageTrailingSlash, - )); - } - } - - ForwardRelativePath::new(package) -} - -#[derive(Clone, Dupe)] -enum TargetParsingRel<'a> { - /// The dir this pattern should be interpreted relative to. - AllowRelative(CellPathRef<'a>), - /// The dir this pattern should be interpreted relative to. - /// This is only used for targets such as `:foo`. - RequireAbsolute(Option>), -} - -impl<'a> TargetParsingRel<'a> { - fn dir(&self) -> Option> { - match self { - TargetParsingRel::AllowRelative(dir) => Some(*dir), - TargetParsingRel::RequireAbsolute(dir) => *dir, - } - } - - fn allow_relative(&self) -> bool { - match self { - TargetParsingRel::AllowRelative(_) => true, - TargetParsingRel::RequireAbsolute(_) => false, - } - } -} - -#[derive(Clone, Dupe)] -struct TargetParsingOptions<'a> { - relative: TargetParsingRel<'a>, - /// Whether to infer the target in a pattern such as `foo/bar` (to `foo/bar:bar`). - infer_target: bool, - /// Whether to strip trailing slashes in package names, in e.g. `foo/bar/` or `foo/bar/:qux`. - /// If not set, trailing slashes are an error. Note that this happens before target inference - /// (if enabled), so e.g. `foo/bar/` becomes `foo/bar:bar`. - strip_package_trailing_slash: bool, -} - -impl<'a> TargetParsingOptions<'a> { - fn precise() -> TargetParsingOptions<'a> { - TargetParsingOptions { - relative: TargetParsingRel::RequireAbsolute(None), - infer_target: false, - strip_package_trailing_slash: false, - } - } -} - -/// Parse a TargetPattern out, resolving aliases via `cell_resolver`, and resolving relative -/// targets via `enclosing_package`, if provided. -fn parse_target_pattern( - cell_name: CellName, - cell_resolver: &CellResolver, - target_alias_resolver: Option<&dyn TargetAliasResolver>, - opts: TargetParsingOptions, - pattern: &str, -) -> anyhow::Result> -where - T: PatternType, -{ - let parsed_pattern = parse_target_pattern_no_validate::( - cell_name, - cell_resolver, - target_alias_resolver, - opts, - pattern, - )?; - - let crossed_path = - cell_resolver.resolve_path_crossing_cell_boundaries(parsed_pattern.cell_path())?; - if crossed_path != parsed_pattern.cell_path() { - let new_pattern = match &parsed_pattern { - ParsedPattern::Target(_, target_name, extra) => ParsedPattern::Target( - PackageLabel::from_cell_path(crossed_path), - target_name.dupe(), - extra.clone(), - ), - ParsedPattern::Package(_) => { - ParsedPattern::Package(PackageLabel::from_cell_path(crossed_path)) - } - ParsedPattern::Recursive(_) => ParsedPattern::Recursive(crossed_path.to_owned()), - }; - - soft_error!( - "pattern_crosses_cell_boundary", - TargetPatternParseError::PatternCrossesCellBoundaries( - pattern.to_owned(), - parsed_pattern.to_string(), - new_pattern.to_string(), - ) - .into() - )?; - } - - Ok(parsed_pattern) -} - -fn parse_target_pattern_no_validate( - cell_name: CellName, - cell_resolver: &CellResolver, - target_alias_resolver: Option<&dyn TargetAliasResolver>, - opts: TargetParsingOptions, - pattern: &str, -) -> anyhow::Result> -where - T: PatternType, -{ - let TargetParsingOptions { - relative, - infer_target, - strip_package_trailing_slash, - } = opts; - - if let Some(dir) = relative.dir() { - if dir.cell() != cell_name { - return Err( - TargetPatternParseError::CellResolverCellDoesNotMatchWorkingDir( - cell_name, - dir.to_owned(), - ) - .into(), - ); - } - } - - let cell_alias_resolver = cell_resolver.get(cell_name)?.cell_alias_resolver(); - - let lex = lex_target_pattern(pattern, strip_package_trailing_slash)?; - - if let Some(target_alias_resolver) = target_alias_resolver { - if let Some(aliased) = - resolve_target_alias(cell_name, cell_resolver, target_alias_resolver, &lex)? - { - return Ok(aliased); - } - } - - let PatternParts { - cell_alias, - pattern, - } = lex; - - let pattern = if infer_target { - pattern.infer_target()? - } else { - pattern.reject_ambiguity()? - }; - - // This allows things of the form `//foo` (having a cell alias) or `:bar` (no cell, no package, - // just relative target). This is a bit of a wonky definition of "is_absolute" but we rely on - // it. - let is_absolute_or_adjacent = cell_alias.is_some() || pattern.is_adjacent_target(); - if !relative.allow_relative() && !is_absolute_or_adjacent { - return Err(TargetPatternParseError::AbsoluteRequired.into()); - } - - // Prohibit parsing `:foo` as `root//:foo`. - if relative.dir().is_none() && cell_alias.is_none() { - soft_error!( - "adjacent_target_no_path", - TargetPatternParseError::AbsoluteRequired.into() - )?; - } - - // We ask for the cell, but if the pattern is relative we might not use it - let cell = cell_alias_resolver.resolve(cell_alias.unwrap_or_default())?; - - let package_path = pattern.package_path(); - - let path = match relative.dir() { - Some(rel) - if cell_alias.is_none() && (relative.allow_relative() || package_path.is_empty()) => - { - CellPathCow::Owned(rel.join(package_path)) - } - _ => CellPathCow::Borrowed(CellPathRef::new(cell, CellRelativePath::new(package_path))), - }; - - match pattern { - PatternData::Recursive { .. } => Ok(ParsedPattern::Recursive(path.into_owned())), - PatternData::AllTargetsInPackage { .. } => Ok(ParsedPattern::Package( - PackageLabel::from_cell_path(path.as_ref()), - )), - PatternData::TargetInPackage { - target_name, extra, .. - } => Ok(ParsedPattern::Target( - PackageLabel::from_cell_path(path.as_ref()), - target_name, - extra, - )), - } -} - -#[derive(thiserror::Error, Debug)] -enum ResolveTargetAliasError { - #[error("Error dereferencing alias `{}` -> `{}`", target, alias)] - ErrorDereferencing { target: String, alias: String }, - - #[error("Invalid alias: `{}`", alias)] - InvalidAlias { alias: String }, - - #[error("Alias for `{}` is not a target: `{}`", target, alias)] - AliasIsNotATarget { target: String, alias: String }, -} - -fn resolve_target_alias( - cell_name: CellName, - cell_resolver: &CellResolver, - target_alias_resolver: &dyn TargetAliasResolver, - lex: &PatternParts, -) -> anyhow::Result>> -where - T: PatternType, -{ - // Imported from Buck1 - static ALIAS_REGEX: Lazy = - Lazy::new(|| Regex::new("^[a-zA-Z_-][a-zA-Z0-9_-]*$").unwrap()); - - // If the input starts with a cell path, it can't be an alias. - if lex.cell_alias.is_some() { - return Ok(None); - } - - // Unless the input is a standalone bit of ambiguous text then it cannot be an alias. - let (target, extra) = match &lex.pattern { - PatternDataOrAmbiguous::Ambiguous { pattern, extra, .. } => (*pattern, extra), - _ => return Ok(None), - }; - - // Check if this is an alias after all. - let alias = match target_alias_resolver.get(target)? { - Some(alias) => alias, - None => return Ok(None), - }; - - // Now that we know it's an alias, check it matches the regex. We only do this once we know the - // alias is valid so that we avoid throwing "alias is invalid" if the user didn't mean to use - // an alias. - if !ALIAS_REGEX.is_match(target) { - return Err(ResolveTargetAliasError::InvalidAlias { - alias: alias.to_owned(), - } - .into()); - } - - // We found a matching alias. Parse the alias as a target. - let res = parse_target_pattern::( - cell_name, - cell_resolver, - None, - TargetParsingOptions::precise(), - alias, - ) - .with_context(|| ResolveTargetAliasError::ErrorDereferencing { - target: target.to_owned(), - alias: alias.to_owned(), - })?; - - // And finally, put the `T` we were looking for back together. - let res = match res { - ParsedPattern::Target(package, target_name, TargetPatternExtra) => { - ParsedPattern::Target(package, target_name, extra.clone()) - } - _ => { - return Err(ResolveTargetAliasError::AliasIsNotATarget { - target: target.to_owned(), - alias: alias.to_owned(), - } - .into()); - } - }; - - Ok(Some(res)) -} - -#[derive(Debug, Eq, PartialEq)] -pub enum PackageSpec { - /// Given targets in a package. - Targets(Vec<(TargetName, T)>), - /// All targets in a package, without subpackages. - /// Syntax for this variant is `foo:`. - All, -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - use std::marker::PhantomData; - - use assert_matches::assert_matches; - use gazebo::prelude::*; - use pattern_type::PatternType; - use test_case::test_case; - - use super::*; - use crate::cells::alias::NonEmptyCellAlias; - use crate::cells::cell_root_path::CellRootPathBuf; - use crate::cells::name::CellName; - use crate::cells::paths::CellRelativePathBuf; - use crate::pattern::pattern_type::ConfiguredTargetPatternExtra; - use crate::target::label::TargetLabel; - use crate::target::name::TargetNameRef; - - fn mk_package(cell: &str, path: &str) -> ParsedPattern